summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.mailmap1
-rw-r--r--CREDITS28
-rw-r--r--Documentation/ABI/obsolete/sysfs-block-zram119
-rw-r--r--Documentation/ABI/testing/sysfs-block-dm22
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram25
-rw-r--r--Documentation/ABI/testing/sysfs-class-cxl2
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-flash80
-rw-r--r--Documentation/ABI/testing/sysfs-class-mtd10
-rw-r--r--Documentation/ABI/testing/sysfs-class-net8
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-queues8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-toshiba_acpi93
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dell-laptop69
-rw-r--r--Documentation/CodingStyle162
-rw-r--r--Documentation/DocBook/crypto-API.tmpl860
-rw-r--r--Documentation/DocBook/drm.tmpl31
-rw-r--r--Documentation/DocBook/media/v4l/biblio.xml11
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml2
-rw-r--r--Documentation/DocBook/media/v4l/dev-sliced-vbi.xml2
-rw-r--r--Documentation/DocBook/media/v4l/media-ioc-enum-entities.xml92
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml79
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sgrbg8.xml16
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml2
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml4
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt.xml110
-rw-r--r--Documentation/DocBook/media/v4l/subdev-formats.xml1105
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml9
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-cropcap.xml9
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dqevent.xml121
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-crop.xml5
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml18
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml4
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-selection.xml4
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-querycap.xml8
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-queryctrl.xml12
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml13
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml13
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml11
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml111
-rw-r--r--Documentation/IPMI.txt5
-rw-r--r--Documentation/IRQ-domain.txt3
-rw-r--r--Documentation/Makefile2
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt21
-rw-r--r--Documentation/PCI/pci-error-recovery.txt2
-rw-r--r--Documentation/PCI/pcieaer-howto.txt4
-rw-r--r--Documentation/SubmittingPatches4
-rw-r--r--Documentation/acpi/enumeration.txt28
-rw-r--r--Documentation/acpi/gpio-properties.txt6
-rw-r--r--Documentation/arm/00-INDEX2
-rw-r--r--Documentation/arm/Booting9
-rw-r--r--Documentation/arm/Makefile1
-rw-r--r--Documentation/arm/Marvell/README5
-rw-r--r--Documentation/arm/README15
-rw-r--r--Documentation/arm/SH-Mobile/Makefile7
-rw-r--r--Documentation/arm/SH-Mobile/vrl4.c170
-rw-r--r--Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt29
-rw-r--r--Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt42
-rw-r--r--Documentation/arm/msm/gpiomux.txt176
-rw-r--r--Documentation/arm64/acpi_object_usage.txt593
-rw-r--r--Documentation/arm64/arm-acpi.txt505
-rw-r--r--Documentation/atomic_ops.txt45
-rw-r--r--Documentation/blackfin/Makefile2
-rw-r--r--Documentation/block/biodoc.txt36
-rw-r--r--Documentation/blockdev/nbd.txt48
-rw-r--r--Documentation/blockdev/zram.txt87
-rw-r--r--Documentation/cgroups/memory.txt8
-rw-r--r--Documentation/cma/debugfs.txt21
-rw-r--r--Documentation/cpu-hotplug.txt2
-rw-r--r--Documentation/crypto/crypto-API-userspace.txt205
-rw-r--r--Documentation/device-mapper/dm-crypt.txt4
-rw-r--r--Documentation/device-mapper/log-writes.txt140
-rw-r--r--Documentation/device-mapper/switch.txt4
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt3
-rw-r--r--Documentation/device-mapper/verity.txt21
-rw-r--r--Documentation/devicetree/bindings/arc/pct.txt20
-rw-r--r--Documentation/devicetree/bindings/arc/pmu.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/al,alpine.txt88
-rw-r--r--Documentation/devicetree/bindings/arm/altera.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/arch_timer.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/armada-39x.txt20
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt (renamed from Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method)0
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.txt (renamed from Documentation/devicetree/bindings/arm/bcm/bcm11351.txt)0
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.txt (renamed from Documentation/devicetree/bindings/arm/bcm/bcm21664.txt)0
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt (renamed from Documentation/devicetree/bindings/arm/bcm2835.txt)0
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.txt (renamed from Documentation/devicetree/bindings/arm/bcm4708.txt)0
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt (renamed from Documentation/devicetree/bindings/arm/bcm/bcm63138.txt)0
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,brcmstb.txt (renamed from Documentation/devicetree/bindings/arm/brcm-brcmstb.txt)0
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.txt (renamed from Documentation/devicetree/bindings/arm/bcm/cygnus.txt)0
-rw-r--r--Documentation/devicetree/bindings/arm/cci.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp52
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/exynos/power_domain.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/geniatech.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/gic.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/marvell,kirkwood.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt84
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt40
-rw-r--r--Documentation/devicetree/bindings/arm/msm/timer.txt16
-rw-r--r--Documentation/devicetree/bindings/arm/omap/ctrl.txt79
-rw-r--r--Documentation/devicetree/bindings/arm/omap/l3-noc.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/omap/l4.txt26
-rw-r--r--Documentation/devicetree/bindings/arm/omap/prcm.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/shmobile.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-actmon.txt32
-rw-r--r--Documentation/devicetree/bindings/bus/brcm,bus-axi.txt (renamed from Documentation/devicetree/bindings/bus/bcma.txt)0
-rw-r--r--Documentation/devicetree/bindings/bus/omap-ocp2scp.txt3
-rw-r--r--Documentation/devicetree/bindings/bus/renesas,bsc.txt46
-rw-r--r--Documentation/devicetree/bindings/bus/simple-pm-bus.txt44
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,kona-ccu.txt (renamed from Documentation/devicetree/bindings/clock/bcm-kona-clock.txt)0
-rw-r--r--Documentation/devicetree/bindings/clock/exynos3250-clock.txt8
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5433-clock.txt462
-rw-r--r--Documentation/devicetree/bindings/clock/fujitsu,mb86s70-crg11.txt26
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-core-clock.txt9
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt15
-rw-r--r--Documentation/devicetree/bindings/clock/pistachio-clock.txt123
-rw-r--r--Documentation/devicetree/bindings/clock/pwm-clock.txt26
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt25
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si5351.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi.txt3
-rw-r--r--Documentation/devicetree/bindings/common-properties.txt60
-rw-r--r--Documentation/devicetree/bindings/cris/axis.txt9
-rw-r--r--Documentation/devicetree/bindings/cris/boards.txt8
-rw-r--r--Documentation/devicetree/bindings/cris/interrupts.txt23
-rw-r--r--Documentation/devicetree/bindings/crypto/img-hash.txt27
-rw-r--r--Documentation/devicetree/bindings/dma/apm-xgene-dma.txt47
-rw-r--r--Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt (renamed from Documentation/devicetree/bindings/dma/bcm2835-dma.txt)0
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/jz4780-dma.txt56
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_bam_dma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/rcar-audmapp.txt29
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt37
-rw-r--r--Documentation/devicetree/bindings/drm/imx/ldb.txt62
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt18
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt (renamed from Documentation/devicetree/bindings/gpio/gpio-bcm-kona.txt)0
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-altera.txt43
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt30
-rw-r--r--Documentation/devicetree/bindings/gpio/mrvl-gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/hwrng/brcm,iproc-rng200.txt12
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,kona-i2c.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-bcm-kona.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-davinci.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-digicolor.txt25
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-jz4780.txt35
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-xlp9xx.txt22
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt2
-rw-r--r--Documentation/devicetree/bindings/input/brcm,bcm-keypad.txt108
-rw-r--r--Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt43
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/brcm,iproc-touchscreen.txt76
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt46
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.txt29
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sun4i.txt22
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sx8654.txt16
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt41
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt52
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt12
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-mx.txt18
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.txt25
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt5
-rw-r--r--Documentation/devicetree/bindings/leds/common.txt6
-rw-r--r--Documentation/devicetree/bindings/leds/leds-gpio.txt12
-rw-r--r--Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt43
-rw-r--r--Documentation/devicetree/bindings/mailbox/arm-mhu.txt43
-rw-r--r--Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/mt9v032.txt39
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov2640.txt46
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov2659.txt38
-rw-r--r--Documentation/devicetree/bindings/media/ti,omap3isp.txt71
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.txt6
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/video.txt35
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-tc.txt33
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt71
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,video.txt55
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt75
-rw-r--r--Documentation/devicetree/bindings/mfd/arizona.txt41
-rw-r--r--Documentation/devicetree/bindings/mfd/axp20x.txt96
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm59056.txt (renamed from Documentation/devicetree/bindings/mfd/bcm590xx.txt)0
-rw-r--r--Documentation/devicetree/bindings/mfd/da9150.txt43
-rw-r--r--Documentation/devicetree/bindings/mfd/mt6397.txt70
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt19
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,tcsr.txt22
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom-rpm.txt218
-rw-r--r--Documentation/devicetree/bindings/mfd/sky81452.txt35
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt37
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/brcm,bmips.txt (renamed from Documentation/devicetree/bindings/mips/brcm/bmips.txt)0
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt11
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/soc.txt12
-rw-r--r--Documentation/devicetree/bindings/mips/img/pistachio.txt42
-rw-r--r--Documentation/devicetree/bindings/misc/brcm,kona-smc.txt (renamed from Documentation/devicetree/bindings/misc/smc.txt)0
-rw-r--r--Documentation/devicetree/bindings/misc/lis302.txt9
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt (renamed from Documentation/devicetree/bindings/mmc/kona-sdhci.txt)0
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt (renamed from Documentation/devicetree/bindings/mtd/m25p80.txt)13
-rw-r--r--Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/sunxi-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/net/apm-xgene-enet.txt7
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt (renamed from Documentation/devicetree/bindings/net/broadcom-sf2.txt)0
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcmgenet.txt (renamed from Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt)0
-rw-r--r--Documentation/devicetree/bindings/net/brcm,systemport.txt (renamed from Documentation/devicetree/bindings/net/broadcom-systemport.txt)0
-rw-r--r--Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt (renamed from Documentation/devicetree/bindings/net/broadcom-mdio-unimac.txt)0
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt6
-rw-r--r--Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt8
-rw-r--r--Documentation/devicetree/bindings/net/ieee802154/cc2520.txt4
-rw-r--r--Documentation/devicetree/bindings/net/keystone-netcp.txt34
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt5
-rw-r--r--Documentation/devicetree/bindings/net/nfc/nxp-nci.txt35
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt11
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt47
-rw-r--r--Documentation/devicetree/bindings/panel/ampire,am800480r3tmqwa1h.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b101ean01.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/innolux,at043tn24.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/innolux,zj070na-01p.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/ortustech,com43h4m85ulc.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/samsung,ltn140at29-301.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/shelly,sca07010-bfn-lnn.txt7
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt (renamed from Documentation/devicetree/bindings/phy/bcm-phy.txt)0
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt98
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt132
-rw-r--r--Documentation/devicetree/bindings/pinctrl/marvell,armada-39x-pinctrl.txt78
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt166
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt39
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt145
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt2
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpc.txt59
-rw-r--r--Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt1
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt (renamed from Documentation/devicetree/bindings/pwm/bcm-kona-pwm.txt)0
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-pwm.txt12
-rw-r--r--Documentation/devicetree/bindings/reset/brcm,bcm21664-resetmgr.txt (renamed from Documentation/devicetree/bindings/arm/bcm/kona-resetmgr.txt)0
-rw-r--r--Documentation/devicetree/bindings/rtc/abracon,abx80x.txt30
-rw-r--r--Documentation/devicetree/bindings/rtc/digicolor-rtc.txt17
-rw-r--r--Documentation/devicetree/bindings/rtc/stmp3xxx-rtc.txt5
-rw-r--r--Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt34
-rw-r--r--Documentation/devicetree/bindings/serial/atmel-usart.txt3
-rw-r--r--Documentation/devicetree/bindings/serial/brcm,bcm6345-uart.txt (renamed from Documentation/devicetree/bindings/serial/bcm63xx-uart.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/omap_serial.txt20
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/pwrap.txt58
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,gsbi.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/brcm,bcm2835-i2s.txt (renamed from Documentation/devicetree/bindings/sound/bcm2835-i2s.txt)0
-rw-r--r--Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/max98925.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/omap-twl4030.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,lpass-cpu.txt43
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt125
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt67
-rw-r--r--Documentation/devicetree/bindings/sound/storm.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/wm8804.txt7
-rw-r--r--Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt6
-rw-r--r--Documentation/devicetree/bindings/timer/brcm,kona-timer.txt (renamed from Documentation/devicetree/bindings/arm/bcm/kona-timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/unittest.txt44
-rw-r--r--Documentation/devicetree/bindings/usb/brcm,bcm3384-usb.txt (renamed from Documentation/devicetree/bindings/mips/brcm/usb.txt)0
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt12
-rw-r--r--Documentation/devicetree/bindings/video/atmel,lcdc.txt12
-rw-r--r--Documentation/devicetree/bindings/video/backlight/sky81452-backlight.txt29
-rw-r--r--Documentation/devicetree/bindings/video/ti,omap-dss.txt4
-rw-r--r--Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.txt (renamed from Documentation/devicetree/bindings/arm/bcm/kona-wdt.txt)0
-rw-r--r--Documentation/devicetree/booting-without-of.txt28
-rw-r--r--Documentation/devicetree/of_unittest.txt (renamed from Documentation/devicetree/of_selftest.txt)28
-rw-r--r--Documentation/dma-buf-sharing.txt23
-rw-r--r--Documentation/driver-model/devres.txt5
-rw-r--r--Documentation/email-clients.txt11
-rw-r--r--Documentation/filesystems/Locking12
-rw-r--r--Documentation/filesystems/f2fs.txt6
-rw-r--r--Documentation/filesystems/nfs/nfs-rdma.txt9
-rw-r--r--Documentation/filesystems/porting12
-rw-r--r--Documentation/filesystems/proc.txt21
-rw-r--r--Documentation/filesystems/vfs.txt8
-rw-r--r--Documentation/filesystems/xfs.txt29
-rw-r--r--Documentation/gpio/board.txt41
-rw-r--r--Documentation/gpio/consumer.txt65
-rw-r--r--Documentation/i2c/slave-eeprom-backend14
-rw-r--r--Documentation/i2c/slave-interface179
-rw-r--r--Documentation/i2c/summary4
-rw-r--r--Documentation/i2o/README63
-rw-r--r--Documentation/i2o/ioctl394
-rw-r--r--Documentation/input/alps.txt13
-rw-r--r--Documentation/input/event-codes.txt2
-rw-r--r--Documentation/input/gpio-tilt.txt2
-rw-r--r--Documentation/input/iforce-protocol.txt2
-rw-r--r--Documentation/input/walkera0701.txt2
-rw-r--r--Documentation/input/yealink.txt2
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kasan.txt8
-rw-r--r--Documentation/kernel-parameters.txt80
-rw-r--r--Documentation/kernel-per-CPU-kthreads.txt34
-rw-r--r--Documentation/kmemcheck.txt4
-rw-r--r--Documentation/kprobes.txt4
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt18
-rw-r--r--Documentation/leds/leds-class-flash.txt22
-rw-r--r--Documentation/md-cluster.txt176
-rw-r--r--Documentation/memory-barriers.txt44
-rw-r--r--Documentation/memory-hotplug.txt47
-rw-r--r--Documentation/module-signing.txt6
-rw-r--r--Documentation/networking/can.txt20
-rw-r--r--Documentation/networking/filter.txt3
-rw-r--r--Documentation/networking/igb.txt4
-rw-r--r--Documentation/networking/ip-sysctl.txt49
-rw-r--r--Documentation/networking/ipvs-sysctl.txt21
-rw-r--r--Documentation/networking/ixgb.txt12
-rw-r--r--Documentation/networking/ixgbe.txt14
-rw-r--r--Documentation/networking/mpls-sysctl.txt29
-rw-r--r--Documentation/networking/packet_mmap.txt13
-rw-r--r--Documentation/networking/pktgen.txt62
-rw-r--r--Documentation/networking/rds.txt9
-rw-r--r--Documentation/networking/s2io.txt2
-rw-r--r--Documentation/networking/scaling.txt11
-rw-r--r--Documentation/networking/vxge.txt2
-rw-r--r--Documentation/pinctrl.txt25
-rw-r--r--Documentation/power/basic-pm-debugging.txt10
-rw-r--r--Documentation/powerpc/pci_iov_resource_on_powernv.txt301
-rw-r--r--Documentation/powerpc/transactional_memory.txt4
-rw-r--r--Documentation/printk-formats.txt59
-rw-r--r--Documentation/scheduler/completion.txt132
-rw-r--r--Documentation/security/Smack.txt129
-rw-r--r--Documentation/serial/tty.txt3
-rw-r--r--Documentation/sound/alsa/ControlNames.txt10
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt6
-rw-r--r--Documentation/sound/alsa/timestamping.txt200
-rw-r--r--Documentation/spi/spidev_test.c4
-rw-r--r--Documentation/sysctl/kernel.txt83
-rw-r--r--Documentation/sysctl/vm.txt11
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py79
-rw-r--r--Documentation/target/tcmu-design.txt43
-rw-r--r--Documentation/timers/NO_HZ.txt10
-rw-r--r--Documentation/trace/coresight.txt2
-rw-r--r--Documentation/video4linux/v4l2-controls.txt4
-rw-r--r--Documentation/video4linux/v4l2-framework.txt6
-rw-r--r--Documentation/video4linux/vivid.txt5
-rw-r--r--Documentation/virtual/kvm/api.txt17
-rw-r--r--Documentation/vm/cleancache.txt4
-rw-r--r--Documentation/vm/hugetlbpage.txt55
-rw-r--r--Documentation/vm/pagemap.txt3
-rw-r--r--Documentation/vm/transhuge.txt11
-rw-r--r--Documentation/vm/unevictable-lru.txt38
-rw-r--r--Documentation/vm/zsmalloc.txt70
-rw-r--r--Documentation/zh_CN/arm64/booting.txt54
-rw-r--r--Documentation/zh_CN/arm64/legacy_instructions.txt72
-rw-r--r--Documentation/zh_CN/arm64/memory.txt65
-rw-r--r--Kbuild21
-rw-r--r--MAINTAINERS380
-rw-r--r--Makefile32
-rw-r--r--README42
-rw-r--r--arch/Kconfig17
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/include/asm/processor.h1
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/arc/Kconfig.debug13
-rw-r--r--arch/arc/boot/dts/angel4.dts2
-rw-r--r--arch/arc/configs/nsimosci_defconfig19
-rw-r--r--arch/arc/include/asm/arcregs.h14
-rw-r--r--arch/arc/include/asm/atomic.h2
-rw-r--r--arch/arc/include/asm/bitops.h31
-rw-r--r--arch/arc/include/asm/perf_event.h70
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/kernel/perf_event.c73
-rw-r--r--arch/arc/kernel/process.c11
-rw-r--r--arch/arc/kernel/setup.c5
-rw-r--r--arch/arc/kernel/signal.c14
-rw-r--r--arch/arc/kernel/traps.c4
-rw-r--r--arch/arc/kernel/troubleshoot.c3
-rw-r--r--arch/arc/kernel/unwind.c2
-rw-r--r--arch/arc/mm/cache_arc700.c4
-rw-r--r--arch/arc/mm/init.c9
-rw-r--r--arch/arm/Kconfig89
-rw-r--r--arch/arm/Kconfig.debug108
-rw-r--r--arch/arm/Makefile13
-rw-r--r--arch/arm/boot/Makefile2
-rw-r--r--arch/arm/boot/compressed/Makefile15
-rw-r--r--arch/arm/boot/compressed/head-shmobile.S30
-rw-r--r--arch/arm/boot/compressed/head.S52
-rw-r--r--arch/arm/boot/compressed/mmcif-sh7372.c88
-rw-r--r--arch/arm/boot/compressed/sdhi-sh7372.c95
-rw-r--r--arch/arm/boot/compressed/sdhi-shmobile.c449
-rw-r--r--arch/arm/boot/compressed/sdhi-shmobile.h11
-rw-r--r--arch/arm/boot/dts/Makefile35
-rw-r--r--arch/arm/boot/dts/alpine-db.dts35
-rw-r--r--arch/arm/boot/dts/alpine.dtsi160
-rw-r--r--arch/arm/boot/dts/am335x-chiliboard.dts112
-rw-r--r--arch/arm/boot/dts/am335x-chilisom.dtsi239
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts11
-rw-r--r--arch/arm/boot/dts/am335x-nano.dts18
-rw-r--r--arch/arm/boot/dts/am33xx-clocks.dtsi2
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi87
-rw-r--r--arch/arm/boot/dts/am3517.dtsi2
-rw-r--r--arch/arm/boot/dts/am35xx-clocks.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi91
-rw-r--r--arch/arm/boot/dts/am437x-idk-evm.dts22
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts4
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts84
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi2
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts69
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts13
-rw-r--r--arch/arm/boot/dts/armada-370-mirabox.dts2
-rw-r--r--arch/arm/boot/dts/armada-370-netgear-rn102.dts2
-rw-r--r--arch/arm/boot/dts/armada-370-netgear-rn104.dts2
-rw-r--r--arch/arm/boot/dts/armada-370-rd.dts2
-rw-r--r--arch/arm/boot/dts/armada-370-synology-ds213j.dts3
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi11
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi3
-rw-r--r--arch/arm/boot/dts/armada-375-db.dts2
-rw-r--r--arch/arm/boot/dts/armada-375.dtsi17
-rw-r--r--arch/arm/boot/dts/armada-385-db-ap.dts44
-rw-r--r--arch/arm/boot/dts/armada-388-db.dts4
-rw-r--r--arch/arm/boot/dts/armada-388-gp.dts5
-rw-r--r--arch/arm/boot/dts/armada-388-rd.dts12
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi25
-rw-r--r--arch/arm/boot/dts/armada-390.dtsi57
-rw-r--r--arch/arm/boot/dts/armada-398-db.dts153
-rw-r--r--arch/arm/boot/dts/armada-398.dtsi60
-rw-r--r--arch/arm/boot/dts/armada-39x.dtsi508
-rw-r--r--arch/arm/boot/dts/armada-xp-axpwifiap.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts3
-rw-r--r--arch/arm/boot/dts/armada-xp-linksys-mamba.dts393
-rw-r--r--arch/arm/boot/dts/armada-xp-matrix.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi1
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78460.dtsi1
-rw-r--r--arch/arm/boot/dts/armada-xp-netgear-rn2120.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts6
-rw-r--r--arch/arm/boot/dts/armada-xp-synology-ds414.dts3
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi8
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts14
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts241
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts55
-rw-r--r--arch/arm/boot/dts/at91rm9200.dtsi8
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9g25.dtsi1
-rw-r--r--arch/arm/boot/dts/at91sam9g25ek.dts18
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi11
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts18
-rw-r--r--arch/arm/boot/dts/at91sam9rl.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5_isi.dtsi46
-rw-r--r--arch/arm/boot/dts/at91sam9x5_macb0.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5_macb1.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5cm.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi48
-rw-r--r--arch/arm/boot/dts/bcm-cygnus.dtsi78
-rw-r--r--arch/arm/boot/dts/bcm4708-netgear-r6250.dts4
-rw-r--r--arch/arm/boot/dts/bcm4709-netgear-r8000.dts77
-rw-r--r--arch/arm/boot/dts/bcm7445.dtsi14
-rw-r--r--arch/arm/boot/dts/bcm911360_entphn.dts13
-rw-r--r--arch/arm/boot/dts/bcm958300k.dts8
-rw-r--r--arch/arm/boot/dts/bcm958305k.dts53
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts14
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi60
-rw-r--r--arch/arm/boot/dts/dove-cubox.dts1
-rw-r--r--arch/arm/boot/dts/dove.dtsi63
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts8
-rw-r--r--arch/arm/boot/dts/dra7.dtsi192
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts8
-rw-r--r--arch/arm/boot/dts/dra72x.dtsi5
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi5
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/emev2-kzm9d.dts13
-rw-r--r--arch/arm/boot/dts/emev2.dtsi10
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi17
-rw-r--r--arch/arm/boot/dts/exynos5250-snow.dts58
-rw-r--r--arch/arm/boot/dts/exynos5250-spring.dts21
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts4
-rw-r--r--arch/arm/boot/dts/exynos5420-peach-pit.dts100
-rw-r--r--arch/arm/boot/dts/exynos5420-pinctrl.dtsi7
-rw-r--r--arch/arm/boot/dts/exynos5420-smdk5420.dts7
-rw-r--r--arch/arm/boot/dts/exynos5420-trip-points.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi11
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3.dts21
-rw-r--r--arch/arm/boot/dts/exynos5440-trip-points.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts72
-rw-r--r--arch/arm/boot/dts/hip04.dtsi1
-rw-r--r--arch/arm/boot/dts/imx23-olinuxino.dts4
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts58
-rw-r--r--arch/arm/boot/dts/imx25-pinfunc.h86
-rw-r--r--arch/arm/boot/dts/imx25.dtsi1
-rw-r--r--arch/arm/boot/dts/imx27.dtsi2
-rw-r--r--arch/arm/boot/dts/imx28-apf28.dts2
-rw-r--r--arch/arm/boot/dts/imx28-apf28dev.dts30
-rw-r--r--arch/arm/boot/dts/imx28.dtsi16
-rw-r--r--arch/arm/boot/dts/imx35.dtsi1
-rw-r--r--arch/arm/boot/dts/imx50.dtsi3
-rw-r--r--arch/arm/boot/dts/imx51.dtsi3
-rw-r--r--arch/arm/boot/dts/imx53.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6dl-aristainetos_4.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-aristainetos_7.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-cubox-i.dts38
-rw-r--r--arch/arm/boot/dts/imx6dl-hummingboard.dts38
-rw-r--r--arch/arm/boot/dts/imx6q-cubox-i.dts38
-rw-r--r--arch/arm/boot/dts/imx6q-hummingboard.dts38
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi20
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi62
-rw-r--r--arch/arm/boot/dts/imx6qdl-hummingboard.dtsi101
-rw-r--r--arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi38
-rw-r--r--arch/arm/boot/dts/imx6qdl-microsom.dtsi38
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi34
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi49
-rw-r--r--arch/arm/boot/dts/imx6sl-warp.dts262
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi13
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb-reva.dts143
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts603
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi562
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi9
-rw-r--r--arch/arm/boot/dts/kirkwood-nas2big.dts143
-rw-r--r--arch/arm/boot/dts/kirkwood-net2big.dts5
-rw-r--r--arch/arm/boot/dts/meson.dtsi20
-rw-r--r--arch/arm/boot/dts/meson6-atv1200.dts4
-rw-r--r--arch/arm/boot/dts/meson8-minix-neo-x8.dts128
-rw-r--r--arch/arm/boot/dts/meson8.dtsi68
-rw-r--r--arch/arm/boot/dts/mt6589.dtsi5
-rw-r--r--arch/arm/boot/dts/nspire-classic.dtsi5
-rw-r--r--arch/arm/boot/dts/nspire-cx.dts4
-rw-r--r--arch/arm/boot/dts/nspire.dtsi21
-rw-r--r--arch/arm/boot/dts/omap2420.dtsi80
-rw-r--r--arch/arm/boot/dts/omap2430-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi107
-rw-r--r--arch/arm/boot/dts/omap24xx-clocks.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts2
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts54
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3517.dts10
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3730.dts10
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3x30.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000.dts1
-rw-r--r--arch/arm/boot/dts/omap3-evm-common.dtsi10
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-rev-f.dts9
-rw-r--r--arch/arm/boot/dts/omap3-igep0030-rev-g.dts9
-rw-r--r--arch/arm/boot/dts/omap3-lilly-a83x.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-n9.dts37
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts72
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-n950.dts37
-rw-r--r--arch/arm/boot/dts/omap3-overo-base.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-pandora-1ghz.dts70
-rw-r--r--arch/arm/boot/dts/omap3-pandora-600mhz.dts65
-rw-r--r--arch/arm/boot/dts/omap3-pandora-common.dtsi640
-rw-r--r--arch/arm/boot/dts/omap3-tao3530.dtsi12
-rw-r--r--arch/arm/boot/dts/omap3-zoom3.dts10
-rw-r--r--arch/arm/boot/dts/omap3.dtsi98
-rw-r--r--arch/arm/boot/dts/omap34xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/omap34xx.dtsi17
-rw-r--r--arch/arm/boot/dts/omap36xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/omap36xx.dtsi17
-rw-r--r--arch/arm/boot/dts/omap3xxx-clocks.dtsi13
-rw-r--r--arch/arm/boot/dts/omap4-cpu-thermal.dtsi4
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi10
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts11
-rw-r--r--arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi10
-rw-r--r--arch/arm/boot/dts/omap4.dtsi200
-rw-r--r--arch/arm/boot/dts/omap5.dtsi184
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi51
-rw-r--r--arch/arm/boot/dts/qcom-apq8074-dragonboard.dts2
-rw-r--r--arch/arm/boot/dts/qcom-apq8084-ifc6540.dts1
-rw-r--r--arch/arm/boot/dts/qcom-apq8084-mtp.dts1
-rw-r--r--arch/arm/boot/dts/qcom-apq8084.dtsi56
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi52
-rw-r--r--arch/arm/boot/dts/qcom-msm8660.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom-msm8960.dtsi15
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts2
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi56
-rw-r--r--arch/arm/boot/dts/qcom-pm8841.dtsi18
-rw-r--r--arch/arm/boot/dts/qcom-pm8941.dtsi18
-rw-r--r--arch/arm/boot/dts/qcom-pma8084.dtsi18
-rw-r--r--arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts156
-rw-r--r--arch/arm/boot/dts/r8a73a4-ape6evm.dts59
-rw-r--r--arch/arm/boot/dts/r8a73a4.dtsi557
-rw-r--r--arch/arm/boot/dts/r8a7740.dtsi79
-rw-r--r--arch/arm/boot/dts/r8a7778-bockw.dts174
-rw-r--r--arch/arm/boot/dts/r8a7778.dtsi293
-rw-r--r--arch/arm/boot/dts/r8a7779-marzen.dts9
-rw-r--r--arch/arm/boot/dts/r8a7790-lager.dts87
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi259
-rw-r--r--arch/arm/boot/dts/r8a7791-henninger.dts11
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts87
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi278
-rw-r--r--arch/arm/boot/dts/r8a7794-alt.dts13
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi157
-rw-r--r--arch/arm/boot/dts/rk3188-radxarock.dts16
-rw-r--r--arch/arm/boot/dts/rk3288-evb-act8846.dts10
-rw-r--r--arch/arm/boot/dts/rk3288-evb-rk808.dts7
-rw-r--r--arch/arm/boot/dts/rk3288-evb.dtsi13
-rw-r--r--arch/arm/boot/dts/rk3288-firefly.dtsi20
-rw-r--r--arch/arm/boot/dts/rk3288-popmetal.dts447
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi35
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi36
-rw-r--r--arch/arm/boot/dts/sama5d35ek.dts2
-rw-r--r--arch/arm/boot/dts/sama5d3_can.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3_emac.dtsi4
-rw-r--r--arch/arm/boot/dts/sama5d3_gmac.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3_lcd.dtsi207
-rw-r--r--arch/arm/boot/dts/sama5d3_mci2.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3_tcb1.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3_uart.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi344
-rw-r--r--arch/arm/boot/dts/sh7372-mackerel.dts26
-rw-r--r--arch/arm/boot/dts/sh7372.dtsi35
-rw-r--r--arch/arm/boot/dts/sh73a0-kzm9g-reference.dts366
-rw-r--r--arch/arm/boot/dts/sh73a0-kzm9g.dts378
-rw-r--r--arch/arm/boot/dts/sh73a0.dtsi245
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi17
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi15
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts13
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi2421
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts557
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi2023
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-big.dts2005
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi2049
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-blaze.dts1334
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi695
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi27
-rw-r--r--arch/arm/boot/dts/tegra30-beaver.dts1651
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca9.dts11
-rw-r--r--arch/arm/boot/dts/vf-colibri-eval-v3.dtsi31
-rw-r--r--arch/arm/boot/dts/vf-colibri.dtsi15
-rw-r--r--arch/arm/boot/dts/vf500.dtsi137
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi64
-rw-r--r--arch/arm/common/mcpm_entry.c202
-rw-r--r--arch/arm/configs/ape6evm_defconfig109
-rw-r--r--arch/arm/configs/at91_dt_defconfig5
-rw-r--r--arch/arm/configs/badge4_defconfig1
-rw-r--r--arch/arm/configs/exynos_defconfig20
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig4
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig4
-rw-r--r--arch/arm/configs/mackerel_defconfig157
-rw-r--r--arch/arm/configs/msm_defconfig121
-rw-r--r--arch/arm/configs/multi_v5_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig17
-rw-r--r--arch/arm/configs/mvebu_v7_defconfig2
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/omap1_defconfig16
-rw-r--r--arch/arm/configs/omap2plus_defconfig27
-rw-r--r--arch/arm/configs/qcom_defconfig9
-rw-r--r--arch/arm/configs/shmobile_defconfig9
-rw-r--r--arch/arm/configs/sunxi_defconfig1
-rw-r--r--arch/arm/crypto/Kconfig130
-rw-r--r--arch/arm/crypto/Makefile27
-rw-r--r--arch/arm/crypto/aes-ce-core.S518
-rw-r--r--arch/arm/crypto/aes-ce-glue.c524
-rw-r--r--arch/arm/crypto/aesbs-glue.c9
-rw-r--r--arch/arm/crypto/ghash-ce-core.S94
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c320
-rw-r--r--arch/arm/crypto/sha1-ce-core.S125
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c96
-rw-r--r--arch/arm/crypto/sha1.h (renamed from arch/arm/include/asm/crypto/sha1.h)3
-rw-r--r--arch/arm/crypto/sha1_glue.c112
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c137
-rw-r--r--arch/arm/crypto/sha2-ce-core.S125
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c114
-rw-r--r--arch/arm/crypto/sha256-armv4.pl716
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped2808
-rw-r--r--arch/arm/crypto/sha256_glue.c128
-rw-r--r--arch/arm/crypto/sha256_glue.h14
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c101
-rw-r--r--arch/arm/include/asm/Kbuild2
-rw-r--r--arch/arm/include/asm/arm-cci.h42
-rw-r--r--arch/arm/include/asm/assembler.h3
-rw-r--r--arch/arm/include/asm/auxvec.h1
-rw-r--r--arch/arm/include/asm/cpuidle.h23
-rw-r--r--arch/arm/include/asm/cputype.h16
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/include/asm/elf.h15
-rw-r--r--arch/arm/include/asm/futex.h2
-rw-r--r--arch/arm/include/asm/kvm_mmu.h10
-rw-r--r--arch/arm/include/asm/mcpm.h65
-rw-r--r--arch/arm/include/asm/mmu.h3
-rw-r--r--arch/arm/include/asm/pmu.h1
-rw-r--r--arch/arm/include/asm/seccomp.h11
-rw-r--r--arch/arm/include/asm/smp_plat.h1
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/asm/uaccess.h10
-rw-r--r--arch/arm/include/asm/unified.h8
-rw-r--r--arch/arm/include/asm/vdso.h32
-rw-r--r--arch/arm/include/asm/vdso_datapage.h60
-rw-r--r--arch/arm/include/asm/word-at-a-time.h2
-rw-r--r--arch/arm/include/asm/xen/page.h1
-rw-r--r--arch/arm/include/debug/msm.S14
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/auxvec.h7
-rw-r--r--arch/arm/include/uapi/asm/kvm.h8
-rw-r--r--arch/arm/kernel/Makefile6
-rw-r--r--arch/arm/kernel/arthur.c94
-rw-r--r--arch/arm/kernel/asm-offsets.c6
-rw-r--r--arch/arm/kernel/bios32.c10
-rw-r--r--arch/arm/kernel/cpuidle.c133
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head-nommu.S12
-rw-r--r--arch/arm/kernel/head.S14
-rw-r--r--arch/arm/kernel/hibernate.c6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm/kernel/machine_kexec.c3
-rw-r--r--arch/arm/kernel/module.c38
-rw-r--r--arch/arm/kernel/perf_event.c21
-rw-r--r--arch/arm/kernel/perf_event_cpu.c76
-rw-r--r--arch/arm/kernel/perf_event_v7.c525
-rw-r--r--arch/arm/kernel/process.c159
-rw-r--r--arch/arm/kernel/psci-call.S31
-rw-r--r--arch/arm/kernel/psci.c39
-rw-r--r--arch/arm/kernel/reboot.c155
-rw-r--r--arch/arm/kernel/reboot.h7
-rw-r--r--arch/arm/kernel/return_address.c4
-rw-r--r--arch/arm/kernel/setup.c44
-rw-r--r--arch/arm/kernel/signal.c13
-rw-r--r--arch/arm/kernel/sleep.S15
-rw-r--r--arch/arm/kernel/smp.c5
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/kernel/traps.c6
-rw-r--r--arch/arm/kernel/vdso.c337
-rw-r--r--arch/arm/kernel/vmlinux.lds.S18
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/mmu.c69
-rw-r--r--arch/arm/lib/clear_user.S2
-rw-r--r--arch/arm/lib/copy_to_user.S2
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S2
-rw-r--r--arch/arm/lib/delay.c6
-rw-r--r--arch/arm/mach-alpine/Kconfig12
-rw-r--r--arch/arm/mach-alpine/Makefile2
-rw-r--r--arch/arm/mach-alpine/alpine_cpu_pm.c70
-rw-r--r--arch/arm/mach-alpine/alpine_cpu_pm.h (renamed from arch/arm/mach-at91/include/mach/io.h)21
-rw-r--r--arch/arm/mach-alpine/alpine_cpu_resume.h38
-rw-r--r--arch/arm/mach-alpine/alpine_machine.c (renamed from arch/powerpc/platforms/cell/beat_interrupt.h)26
-rw-r--r--arch/arm/mach-alpine/platsmp.c49
-rw-r--r--arch/arm/mach-at91/Kconfig132
-rw-r--r--arch/arm/mach-at91/Makefile11
-rw-r--r--arch/arm/mach-at91/at91rm9200.c46
-rw-r--r--arch/arm/mach-at91/at91sam9.c76
-rw-r--r--arch/arm/mach-at91/generic.h7
-rw-r--r--arch/arm/mach-at91/include/mach/at91_dbgu.h63
-rw-r--r--arch/arm/mach-at91/include/mach/at91_matrix.h23
-rw-r--r--arch/arm/mach-at91/include/mach/at91_ramc.h4
-rw-r--r--arch/arm/mach-at91/include/mach/at91_st.h61
-rw-r--r--arch/arm/mach-at91/include/mach/at91rm9200.h103
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9260.h129
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9260_matrix.h80
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9261.h99
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9261_matrix.h64
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9263.h117
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9263_matrix.h129
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9_smc.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9g45.h143
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9g45_matrix.h153
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9n12.h65
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9n12_matrix.h53
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9rl.h105
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9rl_matrix.h96
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9x5.h71
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9x5_matrix.h53
-rw-r--r--arch/arm/mach-at91/include/mach/cpu.h216
-rw-r--r--arch/arm/mach-at91/include/mach/hardware.h134
-rw-r--r--arch/arm/mach-at91/include/mach/sama5d3.h86
-rw-r--r--arch/arm/mach-at91/include/mach/sama5d4.h33
-rw-r--r--arch/arm/mach-at91/include/mach/uncompress.h218
-rw-r--r--arch/arm/mach-at91/pm.c238
-rw-r--r--arch/arm/mach-at91/pm.h100
-rw-r--r--arch/arm/mach-at91/pm_slowclock.S335
-rw-r--r--arch/arm/mach-at91/pm_suspend.S337
-rw-r--r--arch/arm/mach-at91/sama5.c95
-rw-r--r--arch/arm/mach-at91/setup.c330
-rw-r--r--arch/arm/mach-at91/soc.c97
-rw-r--r--arch/arm/mach-at91/soc.h78
-rw-r--r--arch/arm/mach-bcm/bcm_cygnus.c2
-rw-r--r--arch/arm/mach-cns3xxx/pm.c1
-rw-r--r--arch/arm/mach-davinci/Kconfig11
-rw-r--r--arch/arm/mach-davinci/asp.h7
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c112
-rw-r--r--arch/arm/mach-davinci/cpuidle.c1
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c80
-rw-r--r--arch/arm/mach-davinci/dm646x.c24
-rw-r--r--arch/arm/mach-davinci/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-exynos/common.h8
-rw-r--r--arch/arm/mach-exynos/exynos.c30
-rw-r--r--arch/arm/mach-exynos/firmware.c33
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c247
-rw-r--r--arch/arm/mach-exynos/platsmp.c62
-rw-r--r--arch/arm/mach-exynos/pm.c14
-rw-r--r--arch/arm/mach-exynos/pm_domains.c33
-rw-r--r--arch/arm/mach-exynos/regs-pmu.h3
-rw-r--r--arch/arm/mach-exynos/sleep.S31
-rw-r--r--arch/arm/mach-exynos/smc.h9
-rw-r--r--arch/arm/mach-exynos/suspend.c29
-rw-r--r--arch/arm/mach-gemini/common.h4
-rw-r--r--arch/arm/mach-gemini/reset.c4
-rw-r--r--arch/arm/mach-imx/Kconfig84
-rw-r--r--arch/arm/mach-imx/Makefile8
-rw-r--r--arch/arm/mach-imx/clk-imx25.c75
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c8
-rw-r--r--arch/arm/mach-imx/common.h7
-rw-r--r--arch/arm/mach-imx/cpu-imx25.c11
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c1
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sl.c1
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c1
-rw-r--r--arch/arm/mach-imx/devices-imx25.h85
-rw-r--r--arch/arm/mach-imx/devices/Kconfig3
-rw-r--r--arch/arm/mach-imx/devices/Makefile1
-rw-r--r--arch/arm/mach-imx/devices/platform-fec.c5
-rw-r--r--arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c5
-rw-r--r--arch/arm/mach-imx/devices/platform-imx-fb.c5
-rw-r--r--arch/arm/mach-imx/devices/platform-imx-i2c.c10
-rw-r--r--arch/arm/mach-imx/devices/platform-imx-keypad.c5
-rw-r--r--arch/arm/mach-imx/devices/platform-imx-ssi.c9
-rw-r--r--arch/arm/mach-imx/devices/platform-imx-uart.c12
-rw-r--r--arch/arm/mach-imx/devices/platform-imx2-wdt.c5
-rw-r--r--arch/arm/mach-imx/devices/platform-imxdi_rtc.c42
-rw-r--r--arch/arm/mach-imx/devices/platform-mx2-camera.c5
-rw-r--r--arch/arm/mach-imx/devices/platform-mxc-ehci.c7
-rw-r--r--arch/arm/mach-imx/devices/platform-mxc_nand.c5
-rw-r--r--arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c2
-rw-r--r--arch/arm/mach-imx/devices/platform-spi_imx.c11
-rw-r--r--arch/arm/mach-imx/ehci-imx25.c99
-rw-r--r--arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c310
-rw-r--r--arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c2
-rw-r--r--arch/arm/mach-imx/gpc.c353
-rw-r--r--arch/arm/mach-imx/hardware.h1
-rw-r--r--arch/arm/mach-imx/iomux-mx25.h524
-rw-r--r--arch/arm/mach-imx/iomux-mx3.h2
-rw-r--r--arch/arm/mach-imx/iomux-v3.c5
-rw-r--r--arch/arm/mach-imx/iomux-v3.h5
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c2
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c172
-rw-r--r--arch/arm/mach-imx/mach-imx25.c (renamed from arch/arm/mach-imx/imx25-dt.c)20
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6sl.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6sx.c2
-rw-r--r--arch/arm/mach-imx/mach-mx25_3ds.c270
-rw-r--r--arch/arm/mach-imx/mach-mx35_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm043.c2
-rw-r--r--arch/arm/mach-imx/mach-vpr200.c2
-rw-r--r--arch/arm/mach-imx/mm-imx25.c89
-rw-r--r--arch/arm/mach-imx/mx25.h117
-rw-r--r--arch/arm/mach-imx/pm-imx6.c6
-rw-r--r--arch/arm/mach-mediatek/Kconfig1
-rw-r--r--arch/arm/mach-meson/Kconfig3
-rw-r--r--arch/arm/mach-msm/Kconfig109
-rw-r--r--arch/arm/mach-msm/Makefile23
-rw-r--r--arch/arm/mach-msm/Makefile.boot3
-rw-r--r--arch/arm/mach-msm/board-halibut.c110
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c191
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c254
-rw-r--r--arch/arm/mach-msm/board-sapphire.c114
-rw-r--r--arch/arm/mach-msm/board-trout-gpio.c233
-rw-r--r--arch/arm/mach-msm/board-trout-mmc.c185
-rw-r--r--arch/arm/mach-msm/board-trout-panel.c292
-rw-r--r--arch/arm/mach-msm/board-trout.c111
-rw-r--r--arch/arm/mach-msm/board-trout.h162
-rw-r--r--arch/arm/mach-msm/clock-pcom.c176
-rw-r--r--arch/arm/mach-msm/clock-pcom.h145
-rw-r--r--arch/arm/mach-msm/clock.c28
-rw-r--r--arch/arm/mach-msm/clock.h43
-rw-r--r--arch/arm/mach-msm/common.h41
-rw-r--r--arch/arm/mach-msm/devices-msm7x00.c480
-rw-r--r--arch/arm/mach-msm/devices-msm7x30.c246
-rw-r--r--arch/arm/mach-msm/devices-qsd8x50.c388
-rw-r--r--arch/arm/mach-msm/devices.h53
-rw-r--r--arch/arm/mach-msm/dma.c298
-rw-r--r--arch/arm/mach-msm/gpiomux-8x50.c51
-rw-r--r--arch/arm/mach-msm/gpiomux-v1.h67
-rw-r--r--arch/arm/mach-msm/gpiomux.c111
-rw-r--r--arch/arm/mach-msm/gpiomux.h84
-rw-r--r--arch/arm/mach-msm/include/mach/clk.h31
-rw-r--r--arch/arm/mach-msm/include/mach/dma.h151
-rw-r--r--arch/arm/mach-msm/include/mach/entry-macro.S36
-rw-r--r--arch/arm/mach-msm/include/mach/hardware.h18
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-7x00.h75
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-7x30.h153
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-8x50.h88
-rw-r--r--arch/arm/mach-msm/include/mach/irqs.h37
-rw-r--r--arch/arm/mach-msm/include/mach/msm_gpiomux.h38
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x00.h108
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x30.h103
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x50.h125
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h53
-rw-r--r--arch/arm/mach-msm/include/mach/msm_smd.h109
-rw-r--r--arch/arm/mach-msm/include/mach/sirc.h98
-rw-r--r--arch/arm/mach-msm/include/mach/vreg.h29
-rw-r--r--arch/arm/mach-msm/io.c161
-rw-r--r--arch/arm/mach-msm/irq-vic.c363
-rw-r--r--arch/arm/mach-msm/irq.c151
-rw-r--r--arch/arm/mach-msm/last_radio_log.c71
-rw-r--r--arch/arm/mach-msm/proc_comm.c129
-rw-r--r--arch/arm/mach-msm/proc_comm.h258
-rw-r--r--arch/arm/mach-msm/sirc.c172
-rw-r--r--arch/arm/mach-msm/smd.c1034
-rw-r--r--arch/arm/mach-msm/smd_debug.c311
-rw-r--r--arch/arm/mach-msm/smd_private.h403
-rw-r--r--arch/arm/mach-msm/vreg.c220
-rw-r--r--arch/arm/mach-mvebu/Kconfig14
-rw-r--r--arch/arm/mach-mvebu/board-v7.c20
-rw-r--r--arch/arm/mach-mvebu/dove.c2
-rw-r--r--arch/arm/mach-mvebu/kirkwood.c2
-rw-r--r--arch/arm/mach-mvebu/platsmp-a9.c2
-rw-r--r--arch/arm/mach-mvebu/pmsu.c16
-rw-r--r--arch/arm/mach-omap1/pm.c51
-rw-r--r--arch/arm/mach-omap2/Kconfig41
-rw-r--r--arch/arm/mach-omap2/Makefile4
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c57
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c654
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c433
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c395
-rw-r--r--arch/arm/mach-omap2/clock.c111
-rw-r--r--arch/arm/mach-omap2/clock.h8
-rw-r--r--arch/arm/mach-omap2/cm.h2
-rw-r--r--arch/arm/mach-omap2/cm2xxx.c2
-rw-r--r--arch/arm/mach-omap2/cm2xxx.h2
-rw-r--r--arch/arm/mach-omap2/cm33xx.c2
-rw-r--r--arch/arm/mach-omap2/cm33xx.h3
-rw-r--r--arch/arm/mach-omap2/cm3xxx.c3
-rw-r--r--arch/arm/mach-omap2/cm3xxx.h2
-rw-r--r--arch/arm/mach-omap2/cm44xx.h3
-rw-r--r--arch/arm/mach-omap2/cm_common.c156
-rw-r--r--arch/arm/mach-omap2/cminst44xx.c6
-rw-r--r--arch/arm/mach-omap2/common.c1
-rw-r--r--arch/arm/mach-omap2/common.h3
-rw-r--r--arch/arm/mach-omap2/control.c201
-rw-r--r--arch/arm/mach-omap2/control.h10
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c1
-rw-r--r--arch/arm/mach-omap2/devices.c76
-rw-r--r--arch/arm/mach-omap2/display.c15
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c18
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c4
-rw-r--r--arch/arm/mach-omap2/id.c5
-rw-r--r--arch/arm/mach-omap2/io.c114
-rw-r--r--arch/arm/mach-omap2/mux.c2
-rw-r--r--arch/arm/mach-omap2/omap-secure.h7
-rw-r--r--arch/arm/mach-omap2/omap34xx.h36
-rw-r--r--arch/arm/mach-omap2/omap4-common.c69
-rw-r--r--arch/arm/mach-omap2/omap_device.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c68
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_43xx_data.c106
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c113
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c74
-rw-r--r--arch/arm/mach-omap2/pm24xx.c24
-rw-r--r--arch/arm/mach-omap2/pm34xx.c18
-rw-r--r--arch/arm/mach-omap2/prcm-common.h20
-rw-r--r--arch/arm/mach-omap2/prcm43xx.h4
-rw-r--r--arch/arm/mach-omap2/prm-regbits-34xx.h1
-rw-r--r--arch/arm/mach-omap2/prm-regbits-44xx.h1
-rw-r--r--arch/arm/mach-omap2/prm.h27
-rw-r--r--arch/arm/mach-omap2/prm2xxx.c6
-rw-r--r--arch/arm/mach-omap2/prm2xxx.h4
-rw-r--r--arch/arm/mach-omap2/prm33xx.c2
-rw-r--r--arch/arm/mach-omap2/prm33xx.h2
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c20
-rw-r--r--arch/arm/mach-omap2/prm3xxx.h7
-rw-r--r--arch/arm/mach-omap2/prm44xx.c70
-rw-r--r--arch/arm/mach-omap2/prm44xx.h1
-rw-r--r--arch/arm/mach-omap2/prm44xx_54xx.h8
-rw-r--r--arch/arm/mach-omap2/prm54xx.h1
-rw-r--r--arch/arm/mach-omap2/prm7xx.h2
-rw-r--r--arch/arm/mach-omap2/prm_common.c258
-rw-r--r--arch/arm/mach-omap2/prminst44xx.c38
-rw-r--r--arch/arm/mach-omap2/prminst44xx.h1
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S2
-rw-r--r--arch/arm/mach-omap2/timer.c13
-rw-r--r--arch/arm/mach-omap2/usb-tusb6010.c4
-rw-r--r--arch/arm/mach-omap2/vc.c12
-rw-r--r--arch/arm/mach-omap2/vc.h2
-rw-r--r--arch/arm/mach-omap2/vc3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/vc44xx_data.c1
-rw-r--r--arch/arm/mach-omap2/vp.h9
-rw-r--r--arch/arm/mach-omap2/vp3xxx_data.c4
-rw-r--r--arch/arm/mach-omap2/vp44xx_data.c4
-rw-r--r--arch/arm/mach-pxa/Kconfig9
-rw-r--r--arch/arm/mach-pxa/Makefile1
-rw-r--r--arch/arm/mach-pxa/include/mach/lubbock.h7
-rw-r--r--arch/arm/mach-pxa/include/mach/mainstone.h6
-rw-r--r--arch/arm/mach-pxa/lubbock.c108
-rw-r--r--arch/arm/mach-pxa/mainstone.c115
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c200
-rw-r--r--arch/arm/mach-qcom/Kconfig3
-rw-r--r--arch/arm/mach-qcom/Makefile3
-rw-r--r--arch/arm/mach-qcom/platsmp.c23
-rw-r--r--arch/arm/mach-qcom/scm-boot.c39
-rw-r--r--arch/arm/mach-qcom/scm.c326
-rw-r--r--arch/arm/mach-rockchip/platsmp.c4
-rw-r--r--arch/arm/mach-rockchip/pm.c21
-rw-r--r--arch/arm/mach-rockchip/pm.h10
-rw-r--r--arch/arm/mach-rockchip/rockchip.c19
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig19
-rw-r--r--arch/arm/mach-s3c24xx/Makefile3
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/pm-core.h24
-rw-r--r--arch/arm/mach-s3c24xx/pm-s3c2416.c3
-rw-r--r--arch/arm/mach-s3c24xx/pm.c6
-rw-r--r--arch/arm/mach-s3c24xx/s3c2410.c2
-rw-r--r--arch/arm/mach-s3c24xx/s3c2412.c2
-rw-r--r--arch/arm/mach-s3c24xx/s3c2416.c2
-rw-r--r--arch/arm/mach-s3c24xx/s3c2440.c4
-rw-r--r--arch/arm/mach-s3c24xx/s3c2442.c4
-rw-r--r--arch/arm/mach-s3c24xx/s3c244x.c7
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig4
-rw-r--r--arch/arm/mach-s3c64xx/Makefile3
-rw-r--r--arch/arm/mach-s3c64xx/cpuidle.c2
-rw-r--r--arch/arm/mach-s3c64xx/crag6410.h1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/pm.c2
-rw-r--r--arch/arm/mach-s5pv210/sleep.S2
-rw-r--r--arch/arm/mach-shmobile/Kconfig66
-rw-r--r--arch/arm/mach-shmobile/Makefile22
-rw-r--r--arch/arm/mach-shmobile/Makefile.boot4
-rw-r--r--arch/arm/mach-shmobile/board-ape6evm-reference.c60
-rw-r--r--arch/arm/mach-shmobile/board-ape6evm.c306
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c23
-rw-r--r--arch/arm/mach-shmobile/board-bockw-reference.c2
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c14
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g-reference.c62
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c20
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c1522
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c10
-rw-r--r--arch/arm/mach-shmobile/clock-r8a73a4.c659
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c620
-rw-r--r--arch/arm/mach-shmobile/clock.c11
-rw-r--r--arch/arm/mach-shmobile/common.h10
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c37
-rw-r--r--arch/arm/mach-shmobile/entry-intc.S54
-rw-r--r--arch/arm/mach-shmobile/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-mackerel.txt93
-rw-r--r--arch/arm/mach-shmobile/include/mach/mmc-mackerel.h38
-rw-r--r--arch/arm/mach-shmobile/include/mach/mmc.h16
-rw-r--r--arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h21
-rw-r--r--arch/arm/mach-shmobile/include/mach/sdhi.h16
-rw-r--r--arch/arm/mach-shmobile/include/mach/system.h11
-rw-r--r--arch/arm/mach-shmobile/include/mach/uncompress.h19
-rw-r--r--arch/arm/mach-shmobile/include/mach/zboot.h5
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c672
-rw-r--r--arch/arm/mach-shmobile/pm-r8a7790.c82
-rw-r--r--arch/arm/mach-shmobile/pm-r8a7791.c73
-rw-r--r--arch/arm/mach-shmobile/pm-rcar-gen2.c115
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c549
-rw-r--r--arch/arm/mach-shmobile/r8a73a4.h17
-rw-r--r--arch/arm/mach-shmobile/r8a7790.h1
-rw-r--r--arch/arm/mach-shmobile/r8a7791.h1
-rw-r--r--arch/arm/mach-shmobile/rcar-gen2.h1
-rw-r--r--arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c147
-rw-r--r--arch/arm/mach-shmobile/setup-r8a73a4.c273
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c8
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c19
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c7
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c1016
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c19
-rw-r--r--arch/arm/mach-shmobile/sh7372.h84
-rw-r--r--arch/arm/mach-shmobile/sh73a0.h1
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S98
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c7
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7790.c7
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7791.c4
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c2
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c1
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra30.c1
-rw-r--r--arch/arm/mach-vexpress/Kconfig5
-rw-r--r--arch/arm/mach-vexpress/dcscb.c197
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c291
-rw-r--r--arch/arm/mm/Kconfig16
-rw-r--r--arch/arm/mm/alignment.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c7
-rw-r--r--arch/arm/mm/cache-v7.S38
-rw-r--r--arch/arm/mm/dma-mapping.c129
-rw-r--r--arch/arm/mm/init.c52
-rw-r--r--arch/arm/mm/mmap.c16
-rw-r--r--arch/arm/mm/proc-arm1020.S6
-rw-r--r--arch/arm/mm/proc-arm1020e.S6
-rw-r--r--arch/arm/mm/proc-arm1022.S4
-rw-r--r--arch/arm/mm/proc-arm1026.S4
-rw-r--r--arch/arm/mm/proc-arm720.S4
-rw-r--r--arch/arm/mm/proc-arm740.S4
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S4
-rw-r--r--arch/arm/mm/proc-arm920.S4
-rw-r--r--arch/arm/mm/proc-arm922.S4
-rw-r--r--arch/arm/mm/proc-arm925.S7
-rw-r--r--arch/arm/mm/proc-arm926.S4
-rw-r--r--arch/arm/mm/proc-arm940.S30
-rw-r--r--arch/arm/mm/proc-arm946.S26
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S4
-rw-r--r--arch/arm/mm/proc-fa526.S4
-rw-r--r--arch/arm/mm/proc-feroceon.S4
-rw-r--r--arch/arm/mm/proc-macros.S28
-rw-r--r--arch/arm/mm/proc-mohawk.S4
-rw-r--r--arch/arm/mm/proc-sa110.S4
-rw-r--r--arch/arm/mm/proc-sa1100.S4
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7-2level.S12
-rw-r--r--arch/arm/mm/proc-v7.S56
-rw-r--r--arch/arm/mm/proc-v7m.S4
-rw-r--r--arch/arm/mm/proc-xsc3.S4
-rw-r--r--arch/arm/mm/proc-xscale.S4
-rw-r--r--arch/arm/net/bpf_jit_32.c42
-rw-r--r--arch/arm/nwfpe/entry.S2
-rw-r--r--arch/arm/plat-pxa/dma.c111
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h14
-rw-r--r--arch/arm/plat-samsung/pm-debug.c1
-rw-r--r--arch/arm/plat-samsung/pm.c20
-rw-r--r--arch/arm/tools/mach-types1
-rw-r--r--arch/arm/vdso/.gitignore3
-rw-r--r--arch/arm/vdso/Makefile74
-rw-r--r--arch/arm/vdso/datapage.S15
-rw-r--r--arch/arm/vdso/vdso.S (renamed from arch/arm64/kernel/cputable.c)30
-rw-r--r--arch/arm/vdso/vdso.lds.S87
-rw-r--r--arch/arm/vdso/vdsomunge.c201
-rw-r--r--arch/arm/vdso/vgettimeofday.c282
-rw-r--r--arch/arm/xen/enlighten.c104
-rw-r--r--arch/arm/xen/mm.c15
-rw-r--r--arch/arm64/Kconfig73
-rw-r--r--arch/arm64/Kconfig.debug2
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/Makefile3
-rw-r--r--arch/arm64/boot/dts/apm/apm-mustang.dts4
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi58
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi31
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts14
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-pinfunc.h682
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile5
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dts (renamed from arch/arm/mach-qcom/scm-boot.h)21
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi (renamed from arch/arm/mach-qcom/scm.h)30
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mtp.dts22
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi33
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi196
-rw-r--r--arch/arm64/boot/dts/sprd/Makefile5
-rw-r--r--arch/arm64/boot/dts/sprd/sc9836-openphone.dts49
-rw-r--r--arch/arm64/boot/dts/sprd/sc9836.dtsi129
-rw-r--r--arch/arm64/boot/dts/sprd/sharkl64.dtsi65
-rw-r--r--arch/arm64/boot/dts/xilinx/Makefile5
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts47
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi305
-rw-r--r--arch/arm64/configs/defconfig26
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S12
-rw-r--r--arch/arm64/crypto/aes-ce.S10
-rw-r--r--arch/arm64/crypto/aes-glue.c12
-rw-r--r--arch/arm64/crypto/crc32-arm64.c22
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S33
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c152
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S29
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c228
-rw-r--r--arch/arm64/include/asm/acenv.h18
-rw-r--r--arch/arm64/include/asm/acpi.h96
-rw-r--r--arch/arm64/include/asm/arm-cci.h (renamed from arch/arm64/include/asm/cputable.h)21
-rw-r--r--arch/arm64/include/asm/assembler.h48
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/include/asm/cpu_ops.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h18
-rw-r--r--arch/arm64/include/asm/cpuidle.h9
-rw-r--r--arch/arm64/include/asm/dma-mapping.h2
-rw-r--r--arch/arm64/include/asm/elf.h5
-rw-r--r--arch/arm64/include/asm/fixmap.h5
-rw-r--r--arch/arm64/include/asm/insn.h1
-rw-r--r--arch/arm64/include/asm/irq.h13
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h37
-rw-r--r--arch/arm64/include/asm/mmu_context.h43
-rw-r--r--arch/arm64/include/asm/page.h10
-rw-r--r--arch/arm64/include/asm/pci.h6
-rw-r--r--arch/arm64/include/asm/pgalloc.h8
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h13
-rw-r--r--arch/arm64/include/asm/pgtable-types.h12
-rw-r--r--arch/arm64/include/asm/pgtable.h8
-rw-r--r--arch/arm64/include/asm/pmu.h1
-rw-r--r--arch/arm64/include/asm/proc-fns.h9
-rw-r--r--arch/arm64/include/asm/processor.h6
-rw-r--r--arch/arm64/include/asm/psci.h3
-rw-r--r--arch/arm64/include/asm/smp.h5
-rw-r--r--arch/arm64/include/asm/smp_plat.h2
-rw-r--r--arch/arm64/include/asm/thread_info.h3
-rw-r--r--arch/arm64/include/asm/tlb.h4
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h8
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/acpi.c345
-rw-r--r--arch/arm64/kernel/alternative.c2
-rw-r--r--arch/arm64/kernel/asm-offsets.c5
-rw-r--r--arch/arm64/kernel/cpu_errata.c47
-rw-r--r--arch/arm64/kernel/cpu_ops.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c47
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/entry.S20
-rw-r--r--arch/arm64/kernel/entry32.S18
-rw-r--r--arch/arm64/kernel/head.S261
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/insn.c81
-rw-r--r--arch/arm64/kernel/pci.c25
-rw-r--r--arch/arm64/kernel/perf_event.c83
-rw-r--r--arch/arm64/kernel/psci.c112
-rw-r--r--arch/arm64/kernel/setup.c76
-rw-r--r--arch/arm64/kernel/signal.c7
-rw-r--r--arch/arm64/kernel/smp.c7
-rw-r--r--arch/arm64/kernel/sys32.c1
-rw-r--r--arch/arm64/kernel/time.c7
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm64/kvm/hyp-init.S25
-rw-r--r--arch/arm64/mm/dma-mapping.c9
-rw-r--r--arch/arm64/mm/dump.c2
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmap.c20
-rw-r--r--arch/arm64/mm/mmu.c16
-rw-r--r--arch/arm64/mm/pageattr.c2
-rw-r--r--arch/arm64/mm/proc-macros.S10
-rw-r--r--arch/arm64/mm/proc.S3
-rw-r--r--arch/arm64/net/bpf_jit_comp.c2
-rw-r--r--arch/avr32/include/asm/thread_info.h3
-rw-r--r--arch/avr32/kernel/asm-offsets.c1
-rw-r--r--arch/blackfin/configs/BF518F-EZBRD_defconfig1
-rw-r--r--arch/blackfin/configs/BF527-TLL6527M_defconfig1
-rw-r--r--arch/blackfin/configs/BF533-EZKIT_defconfig1
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig1
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig3
-rw-r--r--arch/blackfin/configs/BF538-EZKIT_defconfig1
-rw-r--r--arch/blackfin/configs/BF561-ACVILON_defconfig1
-rw-r--r--arch/blackfin/configs/BF561-EZKIT-SMP_defconfig1
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig1
-rw-r--r--arch/blackfin/configs/BF609-EZKIT_defconfig1
-rw-r--r--arch/blackfin/configs/CM-BF527_defconfig1
-rw-r--r--arch/blackfin/configs/CM-BF533_defconfig1
-rw-r--r--arch/blackfin/configs/CM-BF537E_defconfig1
-rw-r--r--arch/blackfin/configs/CM-BF537U_defconfig1
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig1
-rw-r--r--arch/blackfin/configs/CM-BF561_defconfig1
-rw-r--r--arch/blackfin/configs/DNP5370_defconfig1
-rw-r--r--arch/blackfin/configs/IP0X_defconfig1
-rw-r--r--arch/blackfin/configs/PNAV-10_defconfig1
-rw-r--r--arch/blackfin/configs/SRV1_defconfig1
-rw-r--r--arch/blackfin/configs/TCM-BF518_defconfig1
-rw-r--r--arch/blackfin/configs/TCM-BF537_defconfig1
-rw-r--r--arch/blackfin/include/asm/io.h35
-rw-r--r--arch/blackfin/include/asm/thread_info.h11
-rw-r--r--arch/blackfin/include/uapi/asm/unistd.h12
-rw-r--r--arch/blackfin/kernel/asm-offsets.c6
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c1
-rw-r--r--arch/blackfin/kernel/kgdb.c25
-rw-r--r--arch/blackfin/kernel/setup.c2
-rw-r--r--arch/blackfin/kernel/signal.c6
-rw-r--r--arch/blackfin/kernel/traps.c1
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF525.h5
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF525.h4
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF542.h4
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF547.h4
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF542.h3
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF547.h3
-rw-r--r--arch/blackfin/mach-bf561/smp.c2
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c90
-rw-r--r--arch/blackfin/mach-bf609/clock.c7
-rw-r--r--arch/blackfin/mach-common/entry.S10
-rw-r--r--arch/blackfin/mach-common/pm.c2
-rw-r--r--arch/blackfin/mach-common/smp.c6
-rw-r--r--arch/c6x/Makefile2
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/dma-mapping.h8
-rw-r--r--arch/c6x/include/asm/flat.h12
-rw-r--r--arch/c6x/include/asm/setup.h1
-rw-r--r--arch/c6x/include/asm/thread_info.h2
-rw-r--r--arch/c6x/kernel/process.c1
-rw-r--r--arch/c6x/kernel/setup.c10
-rw-r--r--arch/c6x/kernel/time.c2
-rw-r--r--arch/c6x/platforms/cache.c2
-rw-r--r--arch/cris/Kconfig12
-rw-r--r--arch/cris/Makefile4
-rw-r--r--arch/cris/arch-v10/kernel/fasttimer.c85
-rw-r--r--arch/cris/arch-v10/kernel/setup.c58
-rw-r--r--arch/cris/arch-v10/kernel/signal.c2
-rw-r--r--arch/cris/arch-v32/kernel/Makefile1
-rw-r--r--arch/cris/arch-v32/kernel/entry.S42
-rw-r--r--arch/cris/arch-v32/kernel/fasttimer.c85
-rw-r--r--arch/cris/arch-v32/kernel/head.S32
-rw-r--r--arch/cris/arch-v32/kernel/irq.c31
-rw-r--r--arch/cris/arch-v32/kernel/setup.c67
-rw-r--r--arch/cris/arch-v32/kernel/signal.c7
-rw-r--r--arch/cris/arch-v32/kernel/smp.c358
-rw-r--r--arch/cris/arch-v32/kernel/time.c180
-rw-r--r--arch/cris/arch-v32/lib/Makefile2
-rw-r--r--arch/cris/arch-v32/lib/spinlock.S40
-rw-r--r--arch/cris/arch-v32/mm/init.c11
-rw-r--r--arch/cris/arch-v32/mm/mmu.S4
-rw-r--r--arch/cris/boot/dts/Makefile6
-rw-r--r--arch/cris/boot/dts/dev88.dts18
-rw-r--r--arch/cris/boot/dts/etraxfs.dtsi38
-rw-r--r--arch/cris/include/arch-v10/arch/atomic.h7
-rw-r--r--arch/cris/include/arch-v10/arch/system.h8
-rw-r--r--arch/cris/include/arch-v32/arch/atomic.h36
-rw-r--r--arch/cris/include/arch-v32/arch/processor.h3
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h131
-rw-r--r--arch/cris/include/asm/Kbuild15
-rw-r--r--arch/cris/include/asm/atomic.h149
-rw-r--r--arch/cris/include/asm/bitops.h111
-rw-r--r--arch/cris/include/asm/cmpxchg.h53
-rw-r--r--arch/cris/include/asm/device.h7
-rw-r--r--arch/cris/include/asm/div64.h1
-rw-r--r--arch/cris/include/asm/elf.h2
-rw-r--r--arch/cris/include/asm/emergency-restart.h6
-rw-r--r--arch/cris/include/asm/futex.h6
-rw-r--r--arch/cris/include/asm/hardirq.h7
-rw-r--r--arch/cris/include/asm/irq_regs.h1
-rw-r--r--arch/cris/include/asm/kdebug.h1
-rw-r--r--arch/cris/include/asm/kmap_types.h10
-rw-r--r--arch/cris/include/asm/local.h1
-rw-r--r--arch/cris/include/asm/local64.h1
-rw-r--r--arch/cris/include/asm/percpu.h6
-rw-r--r--arch/cris/include/asm/smp.h10
-rw-r--r--arch/cris/include/asm/spinlock.h1
-rw-r--r--arch/cris/include/asm/thread_info.h2
-rw-r--r--arch/cris/include/asm/tlbflush.h7
-rw-r--r--arch/cris/include/asm/topology.h6
-rw-r--r--arch/cris/kernel/Makefile1
-rw-r--r--arch/cris/kernel/devicetree.c14
-rw-r--r--arch/cris/kernel/ptrace.c23
-rw-r--r--arch/cris/kernel/setup.c15
-rw-r--r--arch/cris/kernel/time.c2
-rw-r--r--arch/frv/include/asm/io.h5
-rw-r--r--arch/frv/include/asm/thread_info.h2
-rw-r--r--arch/frv/kernel/asm-offsets.c1
-rw-r--r--arch/frv/kernel/signal.c24
-rw-r--r--arch/hexagon/include/asm/thread_info.h2
-rw-r--r--arch/hexagon/kernel/process.c2
-rw-r--r--arch/ia64/Kconfig19
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/include/asm/page.h4
-rw-r--r--arch/ia64/include/asm/pgalloc.h4
-rw-r--r--arch/ia64/include/asm/pgtable.h12
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c36
-rw-r--r--arch/ia64/kernel/ivt.S12
-rw-r--r--arch/ia64/kernel/machine_kexec.c4
-rw-r--r--arch/ia64/kernel/mca.c10
-rw-r--r--arch/ia64/kernel/msi_ia64.c10
-rw-r--r--arch/ia64/kernel/numa.c10
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/salinfo.c24
-rw-r--r--arch/ia64/kernel/setup.c11
-rw-r--r--arch/ia64/kernel/smp.c6
-rw-r--r--arch/ia64/kernel/smpboot.c42
-rw-r--r--arch/ia64/kernel/topology.c6
-rw-r--r--arch/ia64/mm/init.c25
-rw-r--r--arch/ia64/pci/pci.c5
-rw-r--r--arch/m32r/include/asm/asm-offsets.h1
-rw-r--r--arch/m32r/include/asm/io.h1
-rw-r--r--arch/m32r/include/asm/thread_info.h15
-rw-r--r--arch/m32r/kernel/asm-offsets.c15
-rw-r--r--arch/m32r/kernel/entry.S1
-rw-r--r--arch/m32r/kernel/signal.c14
-rw-r--r--arch/m32r/kernel/smp.c6
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/coldfire/m527x.c3
-rw-r--r--arch/m68k/include/asm/m527xsim.h2
-rw-r--r--arch/m68k/include/asm/m68360_pram.h4
-rw-r--r--arch/m68k/include/asm/thread_info.h2
-rw-r--r--arch/m68k/kernel/signal.c14
-rw-r--r--arch/metag/include/asm/processor.h1
-rw-r--r--arch/metag/include/asm/thread_info.h2
-rw-r--r--arch/metag/kernel/irq.c2
-rw-r--r--arch/metag/kernel/process.c10
-rw-r--r--arch/metag/kernel/smp.c5
-rw-r--r--arch/microblaze/include/asm/seccomp.h8
-rw-r--r--arch/microblaze/include/asm/thread_info.h2
-rw-r--r--arch/microblaze/kernel/cpu/mb.c149
-rw-r--r--arch/microblaze/kernel/signal.c11
-rw-r--r--arch/mips/Kbuild.platforms3
-rw-r--r--arch/mips/Kconfig124
-rw-r--r--arch/mips/Makefile25
-rw-r--r--arch/mips/ar7/platform.c5
-rw-r--r--arch/mips/ath79/common.h2
-rw-r--r--arch/mips/bcm3384/Platform7
-rw-r--r--arch/mips/bcm3384/dma.c81
-rw-r--r--arch/mips/bcm3384/irq.c193
-rw-r--r--arch/mips/bcm3384/setup.c97
-rw-r--r--arch/mips/bcm47xx/bcm47xx_private.h4
-rw-r--r--arch/mips/bcm47xx/board.c61
-rw-r--r--arch/mips/bcm47xx/buttons.c18
-rw-r--r--arch/mips/bcm47xx/leds.c10
-rw-r--r--arch/mips/bcm47xx/nvram.c77
-rw-r--r--arch/mips/bcm47xx/prom.c3
-rw-r--r--arch/mips/bcm47xx/serial.c8
-rw-r--r--arch/mips/bcm47xx/setup.c13
-rw-r--r--arch/mips/bcm47xx/sprom.c675
-rw-r--r--arch/mips/bcm47xx/time.c2
-rw-r--r--arch/mips/bcm63xx/irq.c4
-rw-r--r--arch/mips/bcm63xx/prom.c4
-rw-r--r--arch/mips/bcm63xx/setup.c4
-rw-r--r--arch/mips/bmips/Kconfig62
-rw-r--r--arch/mips/bmips/Makefile (renamed from arch/mips/bcm3384/Makefile)0
-rw-r--r--arch/mips/bmips/Platform7
-rw-r--r--arch/mips/bmips/dma.c117
-rw-r--r--arch/mips/bmips/irq.c38
-rw-r--r--arch/mips/bmips/setup.c194
-rw-r--r--arch/mips/boot/compressed/Makefile9
-rw-r--r--arch/mips/boot/compressed/decompress.c5
-rw-r--r--arch/mips/boot/dts/Makefile33
-rw-r--r--arch/mips/boot/dts/bcm3384.dtsi109
-rw-r--r--arch/mips/boot/dts/brcm/Makefile19
-rw-r--r--arch/mips/boot/dts/brcm/bcm3384_viper.dtsi108
-rw-r--r--arch/mips/boot/dts/brcm/bcm3384_zephyr.dtsi126
-rw-r--r--arch/mips/boot/dts/brcm/bcm6328.dtsi86
-rw-r--r--arch/mips/boot/dts/brcm/bcm6368.dtsi93
-rw-r--r--arch/mips/boot/dts/brcm/bcm7125.dtsi139
-rw-r--r--arch/mips/boot/dts/brcm/bcm7346.dtsi224
-rw-r--r--arch/mips/boot/dts/brcm/bcm7358.dtsi161
-rw-r--r--arch/mips/boot/dts/brcm/bcm7360.dtsi161
-rw-r--r--arch/mips/boot/dts/brcm/bcm7362.dtsi167
-rw-r--r--arch/mips/boot/dts/brcm/bcm7420.dtsi184
-rw-r--r--arch/mips/boot/dts/brcm/bcm7425.dtsi225
-rw-r--r--arch/mips/boot/dts/brcm/bcm93384wvg.dts (renamed from arch/mips/boot/dts/bcm93384wvg.dts)9
-rw-r--r--arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts25
-rw-r--r--arch/mips/boot/dts/brcm/bcm96368mvwg.dts31
-rw-r--r--arch/mips/boot/dts/brcm/bcm97125cbmb.dts31
-rw-r--r--arch/mips/boot/dts/brcm/bcm97346dbsmb.dts58
-rw-r--r--arch/mips/boot/dts/brcm/bcm97358svmb.dts34
-rw-r--r--arch/mips/boot/dts/brcm/bcm97360svmb.dts34
-rw-r--r--arch/mips/boot/dts/brcm/bcm97362svmb.dts34
-rw-r--r--arch/mips/boot/dts/brcm/bcm97420c.dts45
-rw-r--r--arch/mips/boot/dts/brcm/bcm97425svmb.dts60
-rw-r--r--arch/mips/boot/dts/brcm/bcm9ejtagprb.dts22
-rw-r--r--arch/mips/boot/dts/cavium-octeon/Makefile9
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts (renamed from arch/mips/boot/dts/octeon_3xxx.dts)12
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_68xx.dts (renamed from arch/mips/boot/dts/octeon_68xx.dts)0
-rw-r--r--arch/mips/boot/dts/lantiq/Makefile9
-rw-r--r--arch/mips/boot/dts/lantiq/danube.dtsi (renamed from arch/mips/boot/dts/danube.dtsi)0
-rw-r--r--arch/mips/boot/dts/lantiq/easy50712.dts (renamed from arch/mips/boot/dts/easy50712.dts)0
-rw-r--r--arch/mips/boot/dts/mti/Makefile9
-rw-r--r--arch/mips/boot/dts/mti/sead3.dts (renamed from arch/mips/boot/dts/sead3.dts)0
-rw-r--r--arch/mips/boot/dts/netlogic/Makefile13
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_evp.dts (renamed from arch/mips/boot/dts/xlp_evp.dts)0
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_fvp.dts (renamed from arch/mips/boot/dts/xlp_fvp.dts)0
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_gvp.dts (renamed from arch/mips/boot/dts/xlp_gvp.dts)0
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_rvp.dts77
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_svp.dts (renamed from arch/mips/boot/dts/xlp_svp.dts)0
-rw-r--r--arch/mips/boot/dts/ralink/Makefile12
-rw-r--r--arch/mips/boot/dts/ralink/mt7620a.dtsi (renamed from arch/mips/boot/dts/mt7620a.dtsi)0
-rw-r--r--arch/mips/boot/dts/ralink/mt7620a_eval.dts (renamed from arch/mips/boot/dts/mt7620a_eval.dts)0
-rw-r--r--arch/mips/boot/dts/ralink/rt2880.dtsi (renamed from arch/mips/boot/dts/rt2880.dtsi)0
-rw-r--r--arch/mips/boot/dts/ralink/rt2880_eval.dts (renamed from arch/mips/boot/dts/rt2880_eval.dts)0
-rw-r--r--arch/mips/boot/dts/ralink/rt3050.dtsi (renamed from arch/mips/boot/dts/rt3050.dtsi)0
-rw-r--r--arch/mips/boot/dts/ralink/rt3052_eval.dts (renamed from arch/mips/boot/dts/rt3052_eval.dts)0
-rw-r--r--arch/mips/boot/dts/ralink/rt3883.dtsi (renamed from arch/mips/boot/dts/rt3883.dtsi)0
-rw-r--r--arch/mips/boot/dts/ralink/rt3883_eval.dts (renamed from arch/mips/boot/dts/rt3883_eval.dts)0
-rw-r--r--arch/mips/cavium-octeon/crypto/Makefile5
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.c4
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.h163
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-md5.c8
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha1.c241
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha256.c280
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha512.c277
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c45
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c83
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c19
-rw-r--r--arch/mips/cavium-octeon/octeon_boot.h23
-rw-r--r--arch/mips/cavium-octeon/setup.c10
-rw-r--r--arch/mips/cavium-octeon/smp.c4
-rw-r--r--arch/mips/configs/bmips_be_defconfig (renamed from arch/mips/configs/bcm3384_defconfig)11
-rw-r--r--arch/mips/configs/bmips_stb_defconfig88
-rw-r--r--arch/mips/configs/ip32_defconfig3
-rw-r--r--arch/mips/configs/lemote2f_defconfig1
-rw-r--r--arch/mips/configs/loongson3_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig439
-rw-r--r--arch/mips/configs/pistachio_defconfig336
-rw-r--r--arch/mips/dec/int-handler.S7
-rw-r--r--arch/mips/dec/setup.c16
-rw-r--r--arch/mips/include/asm/asm-eva.h137
-rw-r--r--arch/mips/include/asm/asmmacro-32.h96
-rw-r--r--arch/mips/include/asm/bitops.h7
-rw-r--r--arch/mips/include/asm/bmips.h16
-rw-r--r--arch/mips/include/asm/cacheflush.h38
-rw-r--r--arch/mips/include/asm/cdmm.h98
-rw-r--r--arch/mips/include/asm/cevt-r4k.h19
-rw-r--r--arch/mips/include/asm/checksum.h6
-rw-r--r--arch/mips/include/asm/cmpxchg.h11
-rw-r--r--arch/mips/include/asm/cpu-features.h48
-rw-r--r--arch/mips/include/asm/cpu-info.h2
-rw-r--r--arch/mips/include/asm/cpu-type.h1
-rw-r--r--arch/mips/include/asm/cpu.h8
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/include/asm/elf.h16
-rw-r--r--arch/mips/include/asm/fpu.h7
-rw-r--r--arch/mips/include/asm/fpu_emulator.h6
-rw-r--r--arch/mips/include/asm/irq.h3
-rw-r--r--arch/mips/include/asm/mach-ar7/war.h24
-rw-r--r--arch/mips/include/asm/mach-ath25/dma-coherence.h14
-rw-r--r--arch/mips/include/asm/mach-ath25/war.h25
-rw-r--r--arch/mips/include/asm/mach-ath79/war.h24
-rw-r--r--arch/mips/include/asm/mach-au1x00/war.h24
-rw-r--r--arch/mips/include/asm/mach-bcm3384/war.h24
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx.h1
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h4
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/war.h24
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h14
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/war.h24
-rw-r--r--arch/mips/include/asm/mach-bmips/dma-coherence.h (renamed from arch/mips/include/asm/mach-bcm3384/dma-coherence.h)12
-rw-r--r--arch/mips/include/asm/mach-bmips/spaces.h18
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h4
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/mangle-port.h74
-rw-r--r--arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-cobalt/war.h24
-rw-r--r--arch/mips/include/asm/mach-dec/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-dec/war.h24
-rw-r--r--arch/mips/include/asm/mach-emma2rh/war.h24
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h6
-rw-r--r--arch/mips/include/asm/mach-generic/war.h (renamed from arch/mips/include/asm/mach-ralink/war.h)6
-rw-r--r--arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h4
-rw-r--r--arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h4
-rw-r--r--arch/mips/include/asm/mach-ip32/mc146818rtc.h36
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h4
-rw-r--r--arch/mips/include/asm/mach-jazz/war.h24
-rw-r--r--arch/mips/include/asm/mach-jz4740/war.h24
-rw-r--r--arch/mips/include/asm/mach-lantiq/war.h23
-rw-r--r--arch/mips/include/asm/mach-lasat/war.h24
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-loongson/dma-coherence.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/gpio.h15
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/war.h24
-rw-r--r--arch/mips/include/asm/mach-loongson1/war.h24
-rw-r--r--arch/mips/include/asm/mach-netlogic/multi-node.h9
-rw-r--r--arch/mips/include/asm/mach-netlogic/topology.h15
-rw-r--r--arch/mips/include/asm/mach-netlogic/war.h25
-rw-r--r--arch/mips/include/asm/mach-paravirt/war.h25
-rw-r--r--arch/mips/include/asm/mach-pistachio/gpio.h21
-rw-r--r--arch/mips/include/asm/mach-pistachio/irq.h18
-rw-r--r--arch/mips/include/asm/mach-pnx833x/war.h24
-rw-r--r--arch/mips/include/asm/mach-rm/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-tx39xx/war.h24
-rw-r--r--arch/mips/include/asm/mach-vr41xx/war.h24
-rw-r--r--arch/mips/include/asm/mips-boards/sead3-addr.h83
-rw-r--r--arch/mips/include/asm/mips-r2-to-r6-emul.h9
-rw-r--r--arch/mips/include/asm/mipsregs.h312
-rw-r--r--arch/mips/include/asm/netlogic/common.h21
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h8
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h2
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/sys.h3
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/xlp.h3
-rw-r--r--arch/mips/include/asm/octeon/cvmx-address.h67
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h55
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootmem.h14
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fau.h22
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa.h7
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h9
-rw-r--r--arch/mips/include/asm/octeon/cvmx-packet.h8
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pko.h31
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h247
-rw-r--r--arch/mips/include/asm/octeon/cvmx-wqe.h71
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h8
-rw-r--r--arch/mips/include/asm/octeon/octeon.h2
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h3
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/pci.h2
-rw-r--r--arch/mips/include/asm/pci/bridge.h1
-rw-r--r--arch/mips/include/asm/pgtable-32.h15
-rw-r--r--arch/mips/include/asm/pgtable-64.h10
-rw-r--r--arch/mips/include/asm/pgtable-bits.h96
-rw-r--r--arch/mips/include/asm/pgtable.h83
-rw-r--r--arch/mips/include/asm/r4kcache.h89
-rw-r--r--arch/mips/include/asm/seccomp.h7
-rw-r--r--arch/mips/include/asm/sgi/sgi.h15
-rw-r--r--arch/mips/include/asm/smp.h4
-rw-r--r--arch/mips/include/asm/spinlock.h2
-rw-r--r--arch/mips/include/asm/thread_info.h6
-rw-r--r--arch/mips/jz4740/Platform1
-rw-r--r--arch/mips/jz4740/time.c8
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/branch.c6
-rw-r--r--arch/mips/kernel/cevt-r4k.c33
-rw-r--r--arch/mips/kernel/cevt-txx9.c9
-rw-r--r--arch/mips/kernel/cpu-probe.c200
-rw-r--r--arch/mips/kernel/crash.c8
-rw-r--r--arch/mips/kernel/csrc-bcm1480.c12
-rw-r--r--arch/mips/kernel/csrc-ioasic.c13
-rw-r--r--arch/mips/kernel/csrc-r4k.c8
-rw-r--r--arch/mips/kernel/csrc-sb1250.c23
-rw-r--r--arch/mips/kernel/elf.c46
-rw-r--r--arch/mips/kernel/entry.S3
-rw-r--r--arch/mips/kernel/idle.c13
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c4
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c15
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c83
-rw-r--r--arch/mips/kernel/proc.c1
-rw-r--r--arch/mips/kernel/process.c12
-rw-r--r--arch/mips/kernel/prom.c5
-rw-r--r--arch/mips/kernel/ptrace.c10
-rw-r--r--arch/mips/kernel/r2300_switch.S7
-rw-r--r--arch/mips/kernel/r4k_switch.S7
-rw-r--r--arch/mips/kernel/reset.c25
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/smp-cmp.c4
-rw-r--r--arch/mips/kernel/smp-cps.c10
-rw-r--r--arch/mips/kernel/smp-mt.c4
-rw-r--r--arch/mips/kernel/smp.c36
-rw-r--r--arch/mips/kernel/traps.c283
-rw-r--r--arch/mips/kernel/unaligned.c346
-rw-r--r--arch/mips/kvm/emulate.c6
-rw-r--r--arch/mips/lantiq/prom.c2
-rw-r--r--arch/mips/lantiq/xway/vmmc.c1
-rw-r--r--arch/mips/lasat/sysctl.c15
-rw-r--r--arch/mips/lib/csum_partial.S38
-rw-r--r--arch/mips/loongson/common/Makefile1
-rw-r--r--arch/mips/loongson/common/env.c9
-rw-r--r--arch/mips/loongson/common/pci.c6
-rw-r--r--arch/mips/loongson/loongson-3/cop2-ex.c2
-rw-r--r--arch/mips/loongson/loongson-3/irq.c1
-rw-r--r--arch/mips/loongson/loongson-3/numa.c4
-rw-r--r--arch/mips/loongson/loongson-3/smp.c2
-rw-r--r--arch/mips/math-emu/Makefile15
-rw-r--r--arch/mips/math-emu/cp1emu.c288
-rw-r--r--arch/mips/math-emu/dp_add.c14
-rw-r--r--arch/mips/math-emu/dp_cmp.c13
-rw-r--r--arch/mips/math-emu/dp_div.c9
-rw-r--r--arch/mips/math-emu/dp_fsp.c16
-rw-r--r--arch/mips/math-emu/dp_mul.c9
-rw-r--r--arch/mips/math-emu/dp_simple.c55
-rw-r--r--arch/mips/math-emu/dp_sqrt.c13
-rw-r--r--arch/mips/math-emu/dp_sub.c14
-rw-r--r--arch/mips/math-emu/dsemul.c6
-rw-r--r--arch/mips/math-emu/ieee754.h111
-rw-r--r--arch/mips/math-emu/ieee754dp.c25
-rw-r--r--arch/mips/math-emu/ieee754dp.h1
-rw-r--r--arch/mips/math-emu/ieee754int.h5
-rw-r--r--arch/mips/math-emu/ieee754sp.c25
-rw-r--r--arch/mips/math-emu/ieee754sp.h1
-rw-r--r--arch/mips/math-emu/me-debugfs.c1
-rw-r--r--arch/mips/math-emu/sp_add.c14
-rw-r--r--arch/mips/math-emu/sp_cmp.c13
-rw-r--r--arch/mips/math-emu/sp_div.c9
-rw-r--r--arch/mips/math-emu/sp_fdp.c22
-rw-r--r--arch/mips/math-emu/sp_mul.c9
-rw-r--r--arch/mips/math-emu/sp_simple.c55
-rw-r--r--arch/mips/math-emu/sp_sqrt.c13
-rw-r--r--arch/mips/math-emu/sp_sub.c14
-rw-r--r--arch/mips/mm/c-r4k.c33
-rw-r--r--arch/mips/mm/cache.c39
-rw-r--r--arch/mips/mm/dma-default.c4
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/mmap.c24
-rw-r--r--arch/mips/mm/page.c1
-rw-r--r--arch/mips/mm/tlb-r4k.c17
-rw-r--r--arch/mips/mm/tlbex.c116
-rw-r--r--arch/mips/mti-malta/malta-init.c4
-rw-r--r--arch/mips/mti-malta/malta-memory.c13
-rw-r--r--arch/mips/mti-malta/malta-time.c20
-rw-r--r--arch/mips/mti-sead3/Makefile7
-rw-r--r--arch/mips/mti-sead3/leds-sead3.c41
-rw-r--r--arch/mips/mti-sead3/sead3-ehci.c53
-rw-r--r--arch/mips/mti-sead3/sead3-i2c-dev.c33
-rw-r--r--arch/mips/mti-sead3/sead3-i2c-drv.c404
-rw-r--r--arch/mips/mti-sead3/sead3-i2c.c31
-rw-r--r--arch/mips/mti-sead3/sead3-init.c2
-rw-r--r--arch/mips/mti-sead3/sead3-leds.c79
-rw-r--r--arch/mips/mti-sead3/sead3-mtd.c53
-rw-r--r--arch/mips/mti-sead3/sead3-net.c57
-rw-r--r--arch/mips/mti-sead3/sead3-platform.c184
-rw-r--r--arch/mips/netlogic/Kconfig9
-rw-r--r--arch/mips/netlogic/common/irq.c10
-rw-r--r--arch/mips/netlogic/common/reset.S22
-rw-r--r--arch/mips/netlogic/common/smp.c25
-rw-r--r--arch/mips/netlogic/common/time.c1
-rw-r--r--arch/mips/netlogic/xlp/ahci-init-xlp2.c13
-rw-r--r--arch/mips/netlogic/xlp/ahci-init.c2
-rw-r--r--arch/mips/netlogic/xlp/dt.c10
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c57
-rw-r--r--arch/mips/netlogic/xlp/setup.c7
-rw-r--r--arch/mips/netlogic/xlp/usb-init-xlp2.c10
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c10
-rw-r--r--arch/mips/oprofile/common.c1
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c15
-rw-r--r--arch/mips/paravirt/paravirt-smp.c2
-rw-r--r--arch/mips/pci/Makefile2
-rw-r--r--arch/mips/pci/msi-xlp.c19
-rw-r--r--arch/mips/pci/pci-ar2315.c1
-rw-r--r--arch/mips/pci/pci-octeon.c8
-rw-r--r--arch/mips/pci/pci-rt2880.c1
-rw-r--r--arch/mips/pci/pci.c5
-rw-r--r--arch/mips/pci/pcie-octeon.c8
-rw-r--r--arch/mips/pistachio/Makefile1
-rw-r--r--arch/mips/pistachio/Platform8
-rw-r--r--arch/mips/pistachio/init.c131
-rw-r--r--arch/mips/pistachio/irq.c28
-rw-r--r--arch/mips/pistachio/time.c52
-rw-r--r--arch/mips/power/Makefile2
-rw-r--r--arch/mips/power/hibernate.c10
-rw-r--r--arch/mips/power/hibernate_asm.S (renamed from arch/mips/power/hibernate.S)5
-rw-r--r--arch/mips/ralink/Kconfig5
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c10
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c8
-rw-r--r--arch/mips/sgi-ip32/ip32-platform.c53
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c131
-rw-r--r--arch/mn10300/include/asm/io.h5
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/kernel/asm-offsets.c2
-rw-r--r--arch/mn10300/kernel/signal.c20
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/shmparam.h21
-rw-r--r--arch/nios2/include/asm/thread_info.h2
-rw-r--r--arch/nios2/include/uapi/asm/ptrace.h2
-rw-r--r--arch/nios2/kernel/cpuinfo.c77
-rw-r--r--arch/nios2/kernel/entry.S71
-rw-r--r--arch/nios2/kernel/process.c1
-rw-r--r--arch/nios2/kernel/traps.c34
-rw-r--r--arch/nios2/mm/cacheflush.c55
-rw-r--r--arch/openrisc/include/asm/thread_info.h2
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/openrisc/kernel/setup.c50
-rw-r--r--arch/openrisc/kernel/signal.c2
-rw-r--r--arch/parisc/Kconfig5
-rw-r--r--arch/parisc/include/asm/Kbuild2
-rw-r--r--arch/parisc/include/asm/elf.h4
-rw-r--r--arch/parisc/include/asm/pgalloc.h8
-rw-r--r--arch/parisc/include/asm/pgtable.h16
-rw-r--r--arch/parisc/include/asm/scatterlist.h10
-rw-r--r--arch/parisc/include/asm/seccomp.h16
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/kernel/asm-offsets.c1
-rw-r--r--arch/parisc/kernel/entry.S4
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/irq.c4
-rw-r--r--arch/parisc/kernel/pci-dma.c8
-rw-r--r--arch/parisc/kernel/process.c10
-rw-r--r--arch/parisc/kernel/sys_parisc.c3
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/Kconfig14
-rw-r--r--arch/powerpc/Kconfig.debug9
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/crt0.S26
-rw-r--r--arch/powerpc/boot/dts/b4860emu.dts223
-rw-r--r--arch/powerpc/boot/dts/b4qds.dtsi17
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-post.dtsi60
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi89
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi37
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi65
-rw-r--r--arch/powerpc/boot/dts/fsl/t2081si-post.dtsi105
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi265
-rw-r--r--arch/powerpc/boot/dts/kmcoge4.dts15
-rw-r--r--arch/powerpc/boot/dts/oca4080.dts15
-rw-r--r--arch/powerpc/boot/dts/p1023rdb.dts18
-rw-r--r--arch/powerpc/boot/dts/p2041rdb.dts17
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts17
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts17
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts17
-rw-r--r--arch/powerpc/boot/dts/p5040ds.dts17
-rw-r--r--arch/powerpc/boot/dts/t104xqds.dtsi17
-rw-r--r--arch/powerpc/boot/dts/t104xrdb.dtsi14
-rw-r--r--arch/powerpc/boot/dts/t208xqds.dtsi19
-rw-r--r--arch/powerpc/boot/dts/t208xrdb.dtsi15
-rw-r--r--arch/powerpc/boot/dts/t4240qds.dts17
-rw-r--r--arch/powerpc/boot/dts/t4240rdb.dts15
-rw-r--r--arch/powerpc/boot/libfdt-wrapper.c6
-rw-r--r--arch/powerpc/boot/libfdt_env.h14
-rw-r--r--arch/powerpc/boot/of.h8
-rw-r--r--arch/powerpc/boot/planetcore.c33
-rw-r--r--arch/powerpc/boot/planetcore.h3
-rwxr-xr-xarch/powerpc/boot/wrapper2
-rw-r--r--arch/powerpc/configs/cell_defconfig3
-rw-r--r--arch/powerpc/configs/celleb_defconfig152
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig7
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig15
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig3
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig3
-rw-r--r--arch/powerpc/crypto/Makefile8
-rw-r--r--arch/powerpc/crypto/aes-spe-core.S351
-rw-r--r--arch/powerpc/crypto/aes-spe-glue.c512
-rw-r--r--arch/powerpc/crypto/aes-spe-keys.S283
-rw-r--r--arch/powerpc/crypto/aes-spe-modes.S630
-rw-r--r--arch/powerpc/crypto/aes-spe-regs.h42
-rw-r--r--arch/powerpc/crypto/aes-tab-4k.S331
-rw-r--r--arch/powerpc/crypto/md5-asm.S243
-rw-r--r--arch/powerpc/crypto/md5-glue.c165
-rw-r--r--arch/powerpc/crypto/sha1-spe-asm.S299
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c210
-rw-r--r--arch/powerpc/crypto/sha256-spe-asm.S323
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c275
-rw-r--r--arch/powerpc/include/asm/Kbuild4
-rw-r--r--arch/powerpc/include/asm/archrandom.h11
-rw-r--r--arch/powerpc/include/asm/asm-compat.h4
-rw-r--r--arch/powerpc/include/asm/cache.h3
-rw-r--r--arch/powerpc/include/asm/cputable.h8
-rw-r--r--arch/powerpc/include/asm/cputhreads.h2
-rw-r--r--arch/powerpc/include/asm/dbdma.h12
-rw-r--r--arch/powerpc/include/asm/dcr-native.h2
-rw-r--r--arch/powerpc/include/asm/device.h6
-rw-r--r--arch/powerpc/include/asm/div64.h1
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h29
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/firmware.h10
-rw-r--r--arch/powerpc/include/asm/iommu.h6
-rw-r--r--arch/powerpc/include/asm/irq_regs.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h7
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h49
-rw-r--r--arch/powerpc/include/asm/kvm_host.h49
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/include/asm/local64.h1
-rw-r--r--arch/powerpc/include/asm/machdep.h19
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h1
-rw-r--r--arch/powerpc/include/asm/mpc85xx.h1
-rw-r--r--arch/powerpc/include/asm/mpic.h17
-rw-r--r--arch/powerpc/include/asm/nmi.h4
-rw-r--r--arch/powerpc/include/asm/nvram.h50
-rw-r--r--arch/powerpc/include/asm/opal-api.h735
-rw-r--r--arch/powerpc/include/asm/opal.h770
-rw-r--r--arch/powerpc/include/asm/paca.h4
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h60
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h28
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h8
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h192
-rw-r--r--arch/powerpc/include/asm/rtas.h33
-rw-r--r--arch/powerpc/include/asm/seccomp.h10
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/smp.h5
-rw-r--r--arch/powerpc/include/asm/smu.h2
-rw-r--r--arch/powerpc/include/asm/swab.h26
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/time.h3
-rw-r--r--arch/powerpc/include/asm/ucc_slow.h13
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/asm/vga.h4
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/uapi/asm/seccomp.h16
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c20
-rw-r--r--arch/powerpc/kernel/cacheinfo.c44
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S10
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c11
-rw-r--r--arch/powerpc/kernel/eeh.c189
-rw-r--r--arch/powerpc/kernel/eeh_cache.c25
-rw-r--r--arch/powerpc/kernel/eeh_dev.c14
-rw-r--r--arch/powerpc/kernel/eeh_driver.c22
-rw-r--r--arch/powerpc/kernel/eeh_pe.c129
-rw-r--r--arch/powerpc/kernel/entry_64.S5
-rw-r--r--arch/powerpc/kernel/idle_power7.S3
-rw-r--r--arch/powerpc/kernel/io-workarounds.c10
-rw-r--r--arch/powerpc/kernel/mce.c4
-rw-r--r--arch/powerpc/kernel/mce_power.c53
-rw-r--r--arch/powerpc/kernel/nvram_64.c677
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c57
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c9
-rw-r--r--arch/powerpc/kernel/pci_dn.c309
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c9
-rw-r--r--arch/powerpc/kernel/process.c9
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/rtas.c30
-rw-r--r--arch/powerpc/kernel/rtas_pci.c49
-rw-r--r--arch/powerpc/kernel/setup_64.c20
-rw-r--r--arch/powerpc/kernel/syscalls.c17
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/kernel/systbl_chk.c2
-rw-r--r--arch/powerpc/kernel/time.c6
-rw-r--r--arch/powerpc/kernel/tm.S8
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/vector.S24
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/kvm/Kconfig16
-rw-r--r--arch/powerpc/kvm/book3s.c76
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c203
-rw-r--r--arch/powerpc/kvm/book3s_hv.c435
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c100
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c111
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c238
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S559
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c28
-rw-r--r--arch/powerpc/kvm/book3s_xics.c105
-rw-r--r--arch/powerpc/kvm/book3s_xics.h13
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c32
-rw-r--r--arch/powerpc/kvm/powerpc.c41
-rw-r--r--arch/powerpc/lib/alloc.c2
-rw-r--r--arch/powerpc/lib/copy_32.S127
-rw-r--r--arch/powerpc/lib/copypage_power7.S32
-rw-r--r--arch/powerpc/lib/copyuser_power7.S226
-rw-r--r--arch/powerpc/lib/crtsavres.S96
-rw-r--r--arch/powerpc/lib/ldstfp.S32
-rw-r--r--arch/powerpc/lib/locks.c1
-rw-r--r--arch/powerpc/lib/memcpy_power7.S226
-rw-r--r--arch/powerpc/lib/ppc_ksyms.c4
-rw-r--r--arch/powerpc/lib/rheap.c2
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c3
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c55
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmap.c28
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c62
-rw-r--r--arch/powerpc/mm/pgtable_32.c18
-rw-r--r--arch/powerpc/mm/pgtable_64.c17
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c5
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
-rw-r--r--arch/powerpc/mm/vphn.c70
-rw-r--r--arch/powerpc/mm/vphn.h16
-rw-r--r--arch/powerpc/net/Makefile2
-rw-r--r--arch/powerpc/net/bpf_jit.h64
-rw-r--r--arch/powerpc/net/bpf_jit_asm.S (renamed from arch/powerpc/net/bpf_jit_64.S)70
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c46
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c13
-rw-r--r--arch/powerpc/perf/callchain.c26
-rw-r--r--arch/powerpc/perf/core-book3s.c17
-rw-r--r--arch/powerpc/perf/hv-24x7.c253
-rw-r--r--arch/powerpc/perf/hv-24x7.h8
-rw-r--r--arch/powerpc/platforms/85xx/common.c1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c12
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c4
-rw-r--r--arch/powerpc/platforms/85xx/smp.c4
-rw-r--r--arch/powerpc/platforms/Kconfig5
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype3
-rw-r--r--arch/powerpc/platforms/cell/Kconfig11
-rw-r--r--arch/powerpc/platforms/cell/Makefile15
-rw-r--r--arch/powerpc/platforms/cell/beat.c264
-rw-r--r--arch/powerpc/platforms/cell/beat.h39
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c445
-rw-r--r--arch/powerpc/platforms/cell/beat_hvCall.S285
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c253
-rw-r--r--arch/powerpc/platforms/cell/beat_iommu.c115
-rw-r--r--arch/powerpc/platforms/cell/beat_spu_priv1.c205
-rw-r--r--arch/powerpc/platforms/cell/beat_syscall.h164
-rw-r--r--arch/powerpc/platforms/cell/beat_udbg.c98
-rw-r--r--arch/powerpc/platforms/cell/beat_wrapper.h290
-rw-r--r--arch/powerpc/platforms/cell/cell.h24
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c500
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.h46
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc.h232
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_epci.c428
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c538
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_sio.c99
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_uhc.c95
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c243
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c11
-rw-r--r--arch/powerpc/platforms/cell/setup.c5
-rw-r--r--arch/powerpc/platforms/cell/smp.c9
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c22
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/maple/maple.h2
-rw-r--r--arch/powerpc/platforms/maple/pci.c4
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c6
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h1
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c5
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c38
-rw-r--r--arch/powerpc/platforms/powermac/pic.c3
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h3
-rw-r--r--arch/powerpc/platforms/powermac/setup.c22
-rw-r--r--arch/powerpc/platforms/powermac/smp.c18
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig7
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c1149
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c1300
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c10
-rw-r--r--arch/powerpc/platforms/powernv/opal-power.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c30
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S5
-rw-r--r--arch/powerpc/platforms/powernv/opal.c92
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c797
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c190
-rw-r--r--arch/powerpc/platforms/powernv/pci.h38
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/rng.c29
-rw-r--r--arch/powerpc/platforms/powernv/setup.c54
-rw-r--r--arch/powerpc/platforms/powernv/smp.c13
-rw-r--r--arch/powerpc/platforms/ps3/smp.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c128
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c98
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c489
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c9
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c26
-rw-r--r--arch/powerpc/platforms/pseries/msi.c6
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c674
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h14
-rw-r--r--arch/powerpc/platforms/pseries/setup.c48
-rw-r--r--arch/powerpc/platforms/pseries/smp.c6
-rwxr-xr-xarch/powerpc/relocs_check.pl66
-rwxr-xr-xarch/powerpc/relocs_check.sh59
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c10
-rw-r--r--arch/powerpc/sysdev/dcr.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c29
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c15
-rw-r--r--arch/powerpc/sysdev/mpic.c30
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c25
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c5
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c4
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig89
-rw-r--r--arch/s390/Makefile16
-rw-r--r--arch/s390/boot/compressed/Makefile12
-rw-r--r--arch/s390/boot/compressed/head.S (renamed from arch/s390/boot/compressed/head64.S)0
-rw-r--r--arch/s390/boot/compressed/head31.S51
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S5
-rw-r--r--arch/s390/crypto/crypt_s390.h128
-rw-r--r--arch/s390/crypto/prng.c850
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c4
-rw-r--r--arch/s390/hypfs/inode.c22
-rw-r--r--arch/s390/include/asm/appldata.h24
-rw-r--r--arch/s390/include/asm/atomic.h95
-rw-r--r--arch/s390/include/asm/bitops.h28
-rw-r--r--arch/s390/include/asm/cmpxchg.h7
-rw-r--r--arch/s390/include/asm/cputime.h26
-rw-r--r--arch/s390/include/asm/ctl_reg.h14
-rw-r--r--arch/s390/include/asm/dma-mapping.h2
-rw-r--r--arch/s390/include/asm/elf.h16
-rw-r--r--arch/s390/include/asm/idals.h16
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/jump_label.h12
-rw-r--r--arch/s390/include/asm/kexec.h3
-rw-r--r--arch/s390/include/asm/livepatch.h43
-rw-r--r--arch/s390/include/asm/lowcore.h159
-rw-r--r--arch/s390/include/asm/mman.h2
-rw-r--r--arch/s390/include/asm/mmu.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h7
-rw-r--r--arch/s390/include/asm/pci.h10
-rw-r--r--arch/s390/include/asm/percpu.h4
-rw-r--r--arch/s390/include/asm/perf_event.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h25
-rw-r--r--arch/s390/include/asm/pgtable.h292
-rw-r--r--arch/s390/include/asm/processor.h66
-rw-r--r--arch/s390/include/asm/ptrace.h4
-rw-r--r--arch/s390/include/asm/qdio.h10
-rw-r--r--arch/s390/include/asm/runtime_instr.h10
-rw-r--r--arch/s390/include/asm/rwsem.h81
-rw-r--r--arch/s390/include/asm/setup.h35
-rw-r--r--arch/s390/include/asm/sfp-util.h10
-rw-r--r--arch/s390/include/asm/sparsemem.h9
-rw-r--r--arch/s390/include/asm/switch_to.h21
-rw-r--r--arch/s390/include/asm/syscall.h2
-rw-r--r--arch/s390/include/asm/thread_info.h13
-rw-r--r--arch/s390/include/asm/tlb.h4
-rw-r--r--arch/s390/include/asm/tlbflush.h7
-rw-r--r--arch/s390/include/asm/types.h17
-rw-r--r--arch/s390/include/asm/uaccess.h1
-rw-r--r--arch/s390/include/asm/unistd.h8
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/kernel/Makefile24
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/base.S76
-rw-r--r--arch/s390/kernel/cache.c4
-rw-r--r--arch/s390/kernel/compat_signal.c14
-rw-r--r--arch/s390/kernel/cpcmd.c10
-rw-r--r--arch/s390/kernel/diag.c15
-rw-r--r--arch/s390/kernel/dis.c48
-rw-r--r--arch/s390/kernel/dumpstack.c26
-rw-r--r--arch/s390/kernel/early.c69
-rw-r--r--arch/s390/kernel/entry.S1005
-rw-r--r--arch/s390/kernel/entry64.S1059
-rw-r--r--arch/s390/kernel/ftrace.c12
-rw-r--r--arch/s390/kernel/head.S49
-rw-r--r--arch/s390/kernel/head31.S106
-rw-r--r--arch/s390/kernel/head_kdump.S8
-rw-r--r--arch/s390/kernel/ipl.c157
-rw-r--r--arch/s390/kernel/irq.c4
-rw-r--r--arch/s390/kernel/jump_label.c2
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/module.c12
-rw-r--r--arch/s390/kernel/nmi.c92
-rw-r--r--arch/s390/kernel/pgm_check.S22
-rw-r--r--arch/s390/kernel/process.c29
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reipl.S133
-rw-r--r--arch/s390/kernel/reipl64.S155
-rw-r--r--arch/s390/kernel/relocate_kernel.S63
-rw-r--r--arch/s390/kernel/relocate_kernel64.S121
-rw-r--r--arch/s390/kernel/sclp.S10
-rw-r--r--arch/s390/kernel/setup.c72
-rw-r--r--arch/s390/kernel/signal.c24
-rw-r--r--arch/s390/kernel/smp.c36
-rw-r--r--arch/s390/kernel/suspend.c4
-rw-r--r--arch/s390/kernel/swsusp.S (renamed from arch/s390/kernel/swsusp_asm64.S)0
-rw-r--r--arch/s390/kernel/sys_s390.c49
-rw-r--r--arch/s390/kernel/syscalls.S716
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/traps.c155
-rw-r--r--arch/s390/kernel/uprobes.c4
-rw-r--r--arch/s390/kernel/vdso.c16
-rw-r--r--arch/s390/kernel/vmlinux.lds.S7
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/kvm/trace-s390.h7
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/div64.c147
-rw-r--r--arch/s390/lib/mem.S (renamed from arch/s390/lib/mem64.S)0
-rw-r--r--arch/s390/lib/mem32.S92
-rw-r--r--arch/s390/lib/qrnnd.S78
-rw-r--r--arch/s390/lib/uaccess.c136
-rw-r--r--arch/s390/lib/ucmpdi2.c26
-rw-r--r--arch/s390/math-emu/Makefile7
-rw-r--r--arch/s390/math-emu/math.c2255
-rw-r--r--arch/s390/mm/dump_pagetables.c24
-rw-r--r--arch/s390/mm/extmem.c14
-rw-r--r--arch/s390/mm/fault.c36
-rw-r--r--arch/s390/mm/gup.c4
-rw-r--r--arch/s390/mm/hugetlbpage.c66
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/maccess.c70
-rw-r--r--arch/s390/mm/mem_detect.c4
-rw-r--r--arch/s390/mm/mmap.c59
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c150
-rw-r--r--arch/s390/mm/vmem.c10
-rw-r--r--arch/s390/net/bpf_jit.S197
-rw-r--r--arch/s390/net/bpf_jit.h58
-rw-r--r--arch/s390/net/bpf_jit_comp.c1780
-rw-r--r--arch/s390/oprofile/Makefile2
-rw-r--r--arch/s390/oprofile/init.c11
-rw-r--r--arch/s390/pci/pci.c9
-rw-r--r--arch/s390/pci/pci_debug.c39
-rw-r--r--arch/s390/pci/pci_dma.c8
-rw-r--r--arch/score/include/asm/thread_info.h2
-rw-r--r--arch/score/kernel/asm-offsets.c1
-rw-r--r--arch/sh/Kconfig4
-rw-r--r--arch/sh/boards/board-sh7757lcr.c9
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c9
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c20
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c10
-rw-r--r--arch/sh/boards/mach-migor/setup.c9
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c17
-rw-r--r--arch/sh/include/asm/mmu_context.h2
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sh/kernel/asm-offsets.c1
-rw-r--r--arch/sh/kernel/dwarf.c18
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/kernel/signal_32.c22
-rw-r--r--arch/sh/kernel/signal_64.c25
-rw-r--r--arch/sh/kernel/smp.c6
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/include/asm/iommu_64.h7
-rw-r--r--arch/sparc/include/asm/seccomp.h11
-rw-r--r--arch/sparc/include/asm/thread_info_32.h15
-rw-r--r--arch/sparc/include/asm/thread_info_64.h26
-rw-r--r--arch/sparc/kernel/iommu.c172
-rw-r--r--arch/sparc/kernel/iommu_common.h8
-rw-r--r--arch/sparc/kernel/ldc.c155
-rw-r--r--arch/sparc/kernel/mdesc.c22
-rw-r--r--arch/sparc/kernel/pci_sun4v.c183
-rw-r--r--arch/sparc/kernel/perf_event.c35
-rw-r--r--arch/sparc/kernel/time_32.c4
-rw-r--r--arch/sparc/kernel/traps_32.c1
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/tile/Kconfig6
-rw-r--r--arch/tile/gxio/mpipe.c4
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/ftrace.h2
-rw-r--r--arch/tile/include/asm/irq_work.h14
-rw-r--r--arch/tile/include/asm/smp.h1
-rw-r--r--arch/tile/include/asm/thread_info.h11
-rw-r--r--arch/tile/include/gxio/mpipe.h4
-rw-r--r--arch/tile/include/hv/hypervisor.h6
-rw-r--r--arch/tile/kernel/compat_signal.c20
-rw-r--r--arch/tile/kernel/ftrace.c6
-rw-r--r--arch/tile/kernel/mcount_64.S7
-rw-r--r--arch/tile/kernel/process.c12
-rw-r--r--arch/tile/kernel/ptrace.c22
-rw-r--r--arch/tile/kernel/setup.c25
-rw-r--r--arch/tile/kernel/signal.c9
-rw-r--r--arch/tile/kernel/single_step.c3
-rw-r--r--arch/tile/kernel/smp.c32
-rw-r--r--arch/tile/kernel/stack.c15
-rw-r--r--arch/tile/kernel/traps.c16
-rw-r--r--arch/tile/kernel/unaligned.c22
-rw-r--r--arch/tile/mm/elf.c47
-rw-r--r--arch/tile/mm/fault.c10
-rw-r--r--arch/tile/mm/init.c7
-rw-r--r--arch/um/Kconfig.um47
-rw-r--r--arch/um/Makefile6
-rw-r--r--arch/um/Makefile-ia641
-rw-r--r--arch/um/Makefile-ppc9
-rw-r--r--arch/um/include/asm/fixmap.h4
-rw-r--r--arch/um/include/asm/pgtable.h6
-rw-r--r--arch/um/include/asm/processor-generic.h8
-rw-r--r--arch/um/include/asm/smp.h26
-rw-r--r--arch/um/include/asm/thread_info.h2
-rw-r--r--arch/um/include/shared/as-layout.h1
-rw-r--r--arch/um/include/shared/os.h2
-rw-r--r--arch/um/include/shared/skas/proc_mm.h44
-rw-r--r--arch/um/include/shared/skas/skas.h3
-rw-r--r--arch/um/include/shared/skas_ptrace.h14
-rw-r--r--arch/um/kernel/Makefile4
-rw-r--r--arch/um/kernel/irq.c3
-rw-r--r--arch/um/kernel/kmsg_dump.c43
-rw-r--r--arch/um/kernel/mem.c66
-rw-r--r--arch/um/kernel/physmem.c41
-rw-r--r--arch/um/kernel/process.c11
-rw-r--r--arch/um/kernel/ptrace.c32
-rw-r--r--arch/um/kernel/reboot.c35
-rw-r--r--arch/um/kernel/skas/mmu.c68
-rw-r--r--arch/um/kernel/skas/process.c31
-rw-r--r--arch/um/kernel/smp.c238
-rw-r--r--arch/um/kernel/sysrq.c6
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/kernel/um_arch.c67
-rw-r--r--arch/um/os-Linux/process.c16
-rw-r--r--arch/um/os-Linux/skas/mem.c100
-rw-r--r--arch/um/os-Linux/skas/process.c202
-rw-r--r--arch/um/os-Linux/start_up.c154
-rw-r--r--arch/um/sys-ia64/Makefile11
-rw-r--r--arch/um/sys-ia64/sysdep/ptrace.h16
-rw-r--r--arch/um/sys-ia64/sysdep/sigcontext.h10
-rw-r--r--arch/um/sys-ia64/sysdep/skas_ptrace.h22
-rw-r--r--arch/um/sys-ia64/sysdep/syscalls.h10
-rw-r--r--arch/um/sys-ppc/Makefile65
-rw-r--r--arch/um/sys-ppc/asm/archparam.h8
-rw-r--r--arch/um/sys-ppc/asm/elf.h51
-rw-r--r--arch/um/sys-ppc/asm/processor.h15
-rw-r--r--arch/um/sys-ppc/misc.S111
-rw-r--r--arch/um/sys-ppc/miscthings.c42
-rw-r--r--arch/um/sys-ppc/ptrace.c58
-rw-r--r--arch/um/sys-ppc/ptrace_user.c29
-rw-r--r--arch/um/sys-ppc/shared/sysdep/ptrace.h93
-rw-r--r--arch/um/sys-ppc/shared/sysdep/sigcontext.h52
-rw-r--r--arch/um/sys-ppc/shared/sysdep/skas_ptrace.h22
-rw-r--r--arch/um/sys-ppc/shared/sysdep/syscalls.h43
-rw-r--r--arch/um/sys-ppc/sigcontext.c4
-rw-r--r--arch/um/sys-ppc/sysrq.c33
-rw-r--r--arch/unicore32/include/asm/thread_info.h3
-rw-r--r--arch/unicore32/kernel/asm-offsets.c1
-rw-r--r--arch/unicore32/kernel/signal.c7
-rw-r--r--arch/x86/Kconfig33
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/Makefile.um2
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c187
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c15
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c15
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c9
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c15
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c7
-rw-r--r--arch/x86/crypto/glue_helper.c1
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c15
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c15
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c15
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c9
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c2
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c139
-rw-r--r--arch/x86/crypto/sha256-avx-asm.S10
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S10
-rw-r--r--arch/x86/crypto/sha256-ssse3-asm.S10
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c193
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S6
-rw-r--r--arch/x86/crypto/sha512-avx2-asm.S8
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S6
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c202
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c15
-rw-r--r--arch/x86/ia32/ia32entry.S7
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h11
-rw-r--r--arch/x86/include/asm/e820.h8
-rw-r--r--arch/x86/include/asm/elf.h3
-rw-r--r--arch/x86/include/asm/hypervisor.h2
-rw-r--r--arch/x86/include/asm/intel-mid.h3
-rw-r--r--arch/x86/include/asm/lguest.h7
-rw-r--r--arch/x86/include/asm/livepatch.h4
-rw-r--r--arch/x86/include/asm/page_types.h2
-rw-r--r--arch/x86/include/asm/paravirt.h8
-rw-r--r--arch/x86/include/asm/paravirt_types.h8
-rw-r--r--arch/x86/include/asm/pgalloc.h8
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h1
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h2
-rw-r--r--arch/x86/include/asm/pgtable.h8
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/include/asm/pm-trace.h (renamed from arch/x86/include/asm/resume-trace.h)10
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/seccomp.h21
-rw-r--r--arch/x86/include/asm/seccomp_32.h11
-rw-r--r--arch/x86/include/asm/seccomp_64.h17
-rw-r--r--arch/x86/include/asm/serial.h8
-rw-r--r--arch/x86/include/asm/smp.h2
-rw-r--r--arch/x86/include/asm/spinlock.h2
-rw-r--r--arch/x86/include/asm/thread_info.h3
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/include/uapi/asm/e820.h10
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h2
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h26
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile3
-rw-r--r--arch/x86/kernel/cpu/amd.c3
-rw-r--r--arch/x86/kernel/cpu/common.c39
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c4
-rw-r--r--arch/x86/kernel/cpu/intel_pt.h131
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event.c205
-rw-r--r--arch/x86/kernel/cpu/perf_event.h181
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c981
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_bts.c525
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c1379
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c39
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c321
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_pt.c1100
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c95
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c3
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/e820.c26
-rw-r--r--arch/x86/kernel/early_printk.c6
-rw-r--r--arch/x86/kernel/entry_64.S9
-rw-r--r--arch/x86/kernel/i387.c8
-rw-r--r--arch/x86/kernel/kprobes/core.c9
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/paravirt.c6
-rw-r--r--arch/x86/kernel/pmem.c53
-rw-r--r--arch/x86/kernel/process.c14
-rw-r--r--arch/x86/kernel/process_64.c28
-rw-r--r--arch/x86/kernel/pvclock.c44
-rw-r--r--arch/x86/kernel/signal.c38
-rw-r--r--arch/x86/kernel/smpboot.c39
-rw-r--r--arch/x86/kernel/test_rodata.c2
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kvm/assigned-dev.c2
-rw-r--r--arch/x86/kvm/lapic.c11
-rw-r--r--arch/x86/kvm/mmu.c20
-rw-r--r--arch/x86/kvm/vmx.c12
-rw-r--r--arch/x86/kvm/x86.c43
-rw-r--r--arch/x86/lguest/boot.c7
-rw-r--r--arch/x86/lguest/head_32.S30
-rw-r--r--arch/x86/lib/usercopy_64.c2
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/ioremap.c37
-rw-r--r--arch/x86/mm/mmap.c38
-rw-r--r--arch/x86/mm/pgtable.c79
-rw-r--r--arch/x86/net/bpf_jit_comp.c28
-rw-r--r--arch/x86/pci/acpi.c24
-rw-r--r--arch/x86/platform/intel-mid/Makefile1
-rw-r--r--arch/x86/platform/intel-mid/early_printk_intel_mid.c112
-rw-r--r--arch/x86/syscalls/Makefile9
-rw-r--r--arch/x86/um/Makefile1
-rw-r--r--arch/x86/um/asm/barrier.h11
-rw-r--r--arch/x86/um/asm/elf.h2
-rw-r--r--arch/x86/um/ldt.c227
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_32.h3
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_64.h3
-rw-r--r--arch/x86/um/shared/sysdep/skas_ptrace.h22
-rw-r--r--arch/x86/um/signal.c7
-rw-r--r--arch/x86/vdso/Makefile2
-rw-r--r--arch/x86/vdso/vclock_gettime.c34
-rw-r--r--arch/x86/xen/apic.c180
-rw-r--r--arch/x86/xen/enlighten.c117
-rw-r--r--arch/x86/xen/mmu.c221
-rw-r--r--arch/x86/xen/smp.c46
-rw-r--r--arch/x86/xen/suspend.c10
-rw-r--r--arch/x86/xen/trace.c50
-rw-r--r--arch/x86/xen/xen-head.S63
-rw-r--r--arch/xtensa/Kconfig30
-rw-r--r--arch/xtensa/boot/dts/xtfpga.dtsi64
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig142
-rw-r--r--arch/xtensa/include/asm/thread_info.h13
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h8
-rw-r--r--arch/xtensa/kernel/Makefile1
-rw-r--r--arch/xtensa/kernel/asm-offsets.c8
-rw-r--r--arch/xtensa/kernel/signal.c16
-rw-r--r--arch/xtensa/platforms/iss/network.c29
-rw-r--r--arch/xtensa/platforms/xtfpga/Makefile3
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h6
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/lcd.h15
-rw-r--r--arch/xtensa/platforms/xtfpga/lcd.c55
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c34
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-map.c6
-rw-r--r--block/blk-mq-sysfs.c1
-rw-r--r--block/blk-mq.c131
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/bounce.c2
-rw-r--r--block/elevator.c6
-rw-r--r--block/scsi_ioctl.c12
-rw-r--r--crypto/Kconfig142
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablk_helper.c3
-rw-r--r--crypto/af_alg.c18
-rw-r--r--crypto/algapi.c42
-rw-r--r--crypto/algif_aead.c663
-rw-r--r--crypto/algif_hash.c12
-rw-r--r--crypto/algif_rng.c6
-rw-r--r--crypto/algif_skcipher.c245
-rw-r--r--crypto/ansi_cprng.c6
-rw-r--r--crypto/api.c10
-rw-r--r--crypto/async_tx/async_pq.c19
-rw-r--r--crypto/cryptd.c49
-rw-r--r--crypto/crypto_user.c39
-rw-r--r--crypto/drbg.c64
-rw-r--r--crypto/mcryptd.c25
-rw-r--r--crypto/proc.c3
-rw-r--r--crypto/sha1_generic.c102
-rw-r--r--crypto/sha256_generic.c133
-rw-r--r--crypto/sha512_generic.c123
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/testmgr.c24
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig10
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_platform.c2
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/acpi_processor.c7
-rw-r--r--drivers/acpi/acpica/acapps.h8
-rw-r--r--drivers/acpi/acpica/acglobal.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h13
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h6
-rw-r--r--drivers/acpi/acpica/acstruct.h5
-rw-r--r--drivers/acpi/acpica/actables.h9
-rw-r--r--drivers/acpi/acpica/acutils.h22
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/dsopcode.c7
-rw-r--r--drivers/acpi/acpica/dsutils.c11
-rw-r--r--drivers/acpi/acpica/evgpe.c5
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c5
-rw-r--r--drivers/acpi/acpica/exdump.c4
-rw-r--r--drivers/acpi/acpica/exfldio.c10
-rw-r--r--drivers/acpi/acpica/exoparg3.c13
-rw-r--r--drivers/acpi/acpica/exregion.c17
-rw-r--r--drivers/acpi/acpica/hwgpe.c24
-rw-r--r--drivers/acpi/acpica/hwvalid.c16
-rw-r--r--drivers/acpi/acpica/nsdump.c12
-rw-r--r--drivers/acpi/acpica/psopcode.c8
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c227
-rw-r--r--drivers/acpi/acpica/tbdata.c35
-rw-r--r--drivers/acpi/acpica/tbinstal.c67
-rw-r--r--drivers/acpi/acpica/tbprint.c19
-rw-r--r--drivers/acpi/acpica/tbxfroot.c7
-rw-r--r--drivers/acpi/acpica/utaddress.c34
-rw-r--r--drivers/acpi/acpica/utbuffer.c8
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utosi.c1
-rw-r--r--drivers/acpi/acpica/utprint.c13
-rw-r--r--drivers/acpi/acpica/utstate.c34
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/battery.c4
-rw-r--r--drivers/acpi/blacklist.c8
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/ec.c125
-rw-r--r--drivers/acpi/glue.c4
-rw-r--r--drivers/acpi/gsi.c105
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/osl.c12
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c2
-rw-r--r--drivers/acpi/processor_core.c60
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c22
-rw-r--r--drivers/acpi/scan.c433
-rw-r--r--drivers/acpi/sleep.c17
-rw-r--r--drivers/acpi/sleep.h2
-rw-r--r--drivers/acpi/sysfs.c2
-rw-r--r--drivers/acpi/tables.c52
-rw-r--r--drivers/acpi/video.c36
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/amba/tegra-ahb.c78
-rw-r--r--drivers/ata/Kconfig10
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c103
-rw-r--r--drivers/ata/ahci_st.c49
-rw-r--r--drivers/ata/libahci.c3
-rw-r--r--drivers/ata/libata-core.c34
-rw-r--r--drivers/ata/libata-eh.c3
-rw-r--r--drivers/ata/pata_isapnp.c14
-rw-r--r--drivers/ata/pata_macio.c10
-rw-r--r--drivers/ata/pata_scc.c1110
-rw-r--r--drivers/ata/sata_svw.c11
-rw-r--r--drivers/atm/nicstar.c90
-rw-r--r--drivers/base/core.c51
-rw-r--r--drivers/base/dd.c14
-rw-r--r--drivers/base/devtmpfs.c32
-rw-r--r--drivers/base/memory.c21
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/domain.c70
-rw-r--r--drivers/base/power/main.c20
-rw-r--r--drivers/base/power/trace.c6
-rw-r--r--drivers/base/power/wakeup.c16
-rw-r--r--drivers/base/property.c198
-rw-r--r--drivers/bcma/Kconfig17
-rw-r--r--drivers/bcma/Makefile4
-rw-r--r--drivers/bcma/bcma_private.h67
-rw-r--r--drivers/bcma/driver_gpio.c27
-rw-r--r--drivers/bcma/driver_mips.c2
-rw-r--r--drivers/bcma/driver_pci.c53
-rw-r--r--drivers/bcma/driver_pci_host.c1
-rw-r--r--drivers/bcma/driver_pcie2.c29
-rw-r--r--drivers/bcma/host_pci.c77
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/block/Kconfig11
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/drbd/drbd_debugfs.c8
-rw-r--r--drivers/block/drbd/drbd_main.c7
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/loop.c300
-rw-r--r--drivers/block/nbd.c140
-rw-r--r--drivers/block/nvme-core.c159
-rw-r--r--drivers/block/nvme-scsi.c29
-rw-r--r--drivers/block/paride/pg.c4
-rw-r--r--drivers/block/pmem.c262
-rw-r--r--drivers/block/rbd.c31
-rw-r--r--drivers/block/swim3.c12
-rw-r--r--drivers/block/virtio_blk.c9
-rw-r--r--drivers/block/xen-blkback/blkback.c97
-rw-r--r--drivers/block/xen-blkback/common.h6
-rw-r--r--drivers/block/xen-blkback/xenbus.c43
-rw-r--r--drivers/block/xen-blkfront.c5
-rw-r--r--drivers/block/zram/zram_drv.c96
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/bluetooth/Kconfig41
-rw-r--r--drivers/bluetooth/Makefile4
-rw-r--r--drivers/bluetooth/ath3k.c1
-rw-r--r--drivers/bluetooth/bt3c_cs.c3
-rw-r--r--drivers/bluetooth/btbcm.c397
-rw-r--r--drivers/bluetooth/btbcm.h60
-rw-r--r--drivers/bluetooth/btintel.c101
-rw-r--r--drivers/bluetooth/btintel.h89
-rw-r--r--drivers/bluetooth/btmrvl_drv.h1
-rw-r--r--drivers/bluetooth/btmrvl_main.c14
-rw-r--r--drivers/bluetooth/btusb.c1080
-rw-r--r--drivers/bluetooth/hci_ath.c115
-rw-r--r--drivers/bluetooth/hci_bcm.c153
-rw-r--r--drivers/bluetooth/hci_bcsp.c18
-rw-r--r--drivers/bluetooth/hci_h4.c132
-rw-r--r--drivers/bluetooth/hci_h5.c16
-rw-r--r--drivers/bluetooth/hci_intel.c31
-rw-r--r--drivers/bluetooth/hci_ldisc.c99
-rw-r--r--drivers/bluetooth/hci_ll.c16
-rw-r--r--drivers/bluetooth/hci_uart.h54
-rw-r--r--drivers/bus/Kconfig86
-rw-r--r--drivers/bus/Makefile16
-rw-r--r--drivers/bus/arm-cci.c519
-rw-r--r--drivers/bus/imx-weim.c13
-rw-r--r--drivers/bus/mips_cdmm.c716
-rw-r--r--drivers/bus/omap-ocp2scp.c34
-rw-r--r--drivers/bus/omap_l3_noc.c5
-rw-r--r--drivers/bus/omap_l3_noc.h54
-rw-r--r--drivers/bus/simple-pm-bus.c58
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c118
-rw-r--r--drivers/char/hw_random/core.c79
-rw-r--r--drivers/char/hw_random/exynos-rng.c12
-rw-r--r--drivers/char/hw_random/iproc-rng200.c239
-rw-r--r--drivers/char/hw_random/msm-rng.c11
-rw-r--r--drivers/char/hw_random/octeon-rng.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c23
-rw-r--r--drivers/char/hw_random/pasemi-rng.c2
-rw-r--r--drivers/char/hw_random/powernv-rng.c2
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c2
-rw-r--r--drivers/char/hw_random/pseries-rng.c4
-rw-r--r--drivers/char/hw_random/xgene-rng.c10
-rw-r--r--drivers/char/i8k.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c20
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c213
-rw-r--r--drivers/char/mem.c22
-rw-r--r--drivers/char/misc.c20
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/tile-srom.c1
-rw-r--r--drivers/char/tpm/Kconfig20
-rw-r--r--drivers/char/tpm/Makefile2
-rw-r--r--drivers/char/tpm/st33zp24/Kconfig30
-rw-r--r--drivers/char/tpm/st33zp24/Makefile12
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c276
-rw-r--r--drivers/char/tpm/st33zp24/spi.c399
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c698
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.h37
-rw-r--r--drivers/char/tpm/tpm-chip.c66
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c911
-rw-r--r--drivers/char/tpm/tpm_infineon.c47
-rw-r--r--drivers/char/tpm/xen-tpmfront.c5
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/char/xillybus/xillybus_core.c2
-rw-r--r--drivers/char/xillybus/xillybus_of.c2
-rw-r--r--drivers/clk/Kconfig7
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/at91/clk-usb.c64
-rw-r--r--drivers/clk/bcm/clk-kona.c28
-rw-r--r--drivers/clk/bcm/clk-kona.h1
-rw-r--r--drivers/clk/clk-cdce706.c2
-rw-r--r--drivers/clk/clk-conf.c7
-rw-r--r--drivers/clk/clk-fractional-divider.c3
-rw-r--r--drivers/clk/clk-gpio-gate.c31
-rw-r--r--drivers/clk/clk-mb86s7x.c386
-rw-r--r--drivers/clk/clk-palmas.c2
-rw-r--r--drivers/clk/clk-pwm.c136
-rw-r--r--drivers/clk/clk-si5351.c73
-rw-r--r--drivers/clk/clk-si570.c2
-rw-r--r--drivers/clk/clk.c165
-rw-r--r--drivers/clk/clk.h3
-rw-r--r--drivers/clk/clkdev.c30
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c70
-rw-r--r--drivers/clk/hisilicon/clk-hix5hd2.c6
-rw-r--r--drivers/clk/mvebu/Kconfig4
-rw-r--r--drivers/clk/mvebu/Makefile1
-rw-r--r--drivers/clk/mvebu/armada-39x.c156
-rw-r--r--drivers/clk/mvebu/common.c17
-rw-r--r--drivers/clk/mvebu/common.h1
-rw-r--r--drivers/clk/mxs/clk-imx23.c12
-rw-r--r--drivers/clk/mxs/clk-imx28.c18
-rw-r--r--drivers/clk/pistachio/Makefile3
-rw-r--r--drivers/clk/pistachio/clk-pistachio.c329
-rw-r--r--drivers/clk/pistachio/clk-pll.c401
-rw-r--r--drivers/clk/pistachio/clk.c140
-rw-r--r--drivers/clk/pistachio/clk.h174
-rw-r--r--drivers/clk/pxa/clk-pxa.h2
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c3
-rw-r--r--drivers/clk/qcom/Kconfig9
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/clk-pll.c6
-rw-r--r--drivers/clk/qcom/clk-rcg.c188
-rw-r--r--drivers/clk/qcom/clk-rcg.h15
-rw-r--r--drivers/clk/qcom/clk-rcg2.c43
-rw-r--r--drivers/clk/qcom/common.c12
-rw-r--r--drivers/clk/qcom/common.h4
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c62
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c80
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c22
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c2868
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c32
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c30
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c35
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c12
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c168
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c49
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c134
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c2
-rw-r--r--drivers/clk/rockchip/clk.c3
-rw-r--r--drivers/clk/rockchip/clk.h4
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c4
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c163
-rw-r--r--drivers/clk/samsung/clk-exynos4.c11
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c5423
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c56
-rw-r--r--drivers/clk/shmobile/Makefile1
-rw-r--r--drivers/clk/shmobile/clk-r8a7778.c143
-rw-r--r--drivers/clk/st/clkgen-fsyn.c2
-rw-r--r--drivers/clk/st/clkgen-mux.c8
-rw-r--r--drivers/clk/st/clkgen-pll.c4
-rw-r--r--drivers/clk/sunxi/Makefile1
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c185
-rw-r--r--drivers/clk/sunxi/clk-usb.c233
-rw-r--r--drivers/clk/tegra/clk-pll.c7
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c24
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c36
-rw-r--r--drivers/clk/tegra/clk-tegra124.c22
-rw-r--r--drivers/clk/tegra/clk-tegra30.c23
-rw-r--r--drivers/clk/tegra/clk.c16
-rw-r--r--drivers/clk/tegra/clk.h10
-rw-r--r--drivers/clk/ti/apll.c5
-rw-r--r--drivers/clk/ti/autoidle.c2
-rw-r--r--drivers/clk/ti/clk-3xxx-legacy.c16
-rw-r--r--drivers/clk/ti/clk-3xxx.c19
-rw-r--r--drivers/clk/ti/clk-44xx.c11
-rw-r--r--drivers/clk/ti/clk-54xx.c22
-rw-r--r--drivers/clk/ti/clk-7xx.c18
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c2
-rw-r--r--drivers/clk/ti/clk.c7
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/composite.c2
-rw-r--r--drivers/clk/ti/divider.c4
-rw-r--r--drivers/clk/ti/dpll.c6
-rw-r--r--drivers/clk/ti/fapll.c270
-rw-r--r--drivers/clk/ti/gate.c4
-rw-r--r--drivers/clk/ti/interface.c2
-rw-r--r--drivers/clk/ti/mux.c4
-rw-r--r--drivers/clk/versatile/clk-versatile.c2
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c2
-rw-r--r--drivers/clk/zynq/clkc.c24
-rw-r--r--drivers/clocksource/Kconfig7
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c132
-rw-r--r--drivers/clocksource/dw_apb_timer.c3
-rw-r--r--drivers/clocksource/mips-gic-timer.c13
-rw-r--r--drivers/clocksource/timer-atmel-st.c (renamed from arch/arm/mach-at91/at91rm9200_time.c)117
-rw-r--r--drivers/cpufreq/Kconfig8
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Kconfig.powerpc9
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/hisi-acpu-cpufreq.c42
-rw-r--r--drivers/cpufreq/intel_pstate.c59
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c47
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c (renamed from drivers/cpufreq/ppc-corenet-cpufreq.c)163
-rw-r--r--drivers/cpuidle/Kconfig7
-rw-r--r--drivers/cpuidle/Kconfig.arm28
-rw-r--r--drivers/cpuidle/Kconfig.arm6413
-rw-r--r--drivers/cpuidle/Makefile5
-rw-r--r--drivers/cpuidle/coupled.c6
-rw-r--r--drivers/cpuidle/cpuidle-arm.c (renamed from drivers/cpuidle/cpuidle-arm64.c)83
-rw-r--r--drivers/cpuidle/cpuidle-at91.c1
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c4
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c1
-rw-r--r--drivers/cpuidle/cpuidle-ux500.c1
-rw-r--r--drivers/cpuidle/cpuidle-zynq.c1
-rw-r--r--drivers/cpuidle/cpuidle.c16
-rw-r--r--drivers/cpuidle/governors/menu.c8
-rw-r--r--drivers/crypto/Kconfig25
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/atmel-aes.c26
-rw-r--r--drivers/crypto/atmel-sha.c37
-rw-r--r--drivers/crypto/atmel-tdes.c3
-rw-r--r--drivers/crypto/caam/caamhash.c1
-rw-r--r--drivers/crypto/caam/caamrng.c6
-rw-r--r--drivers/crypto/ccp/Makefile9
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c12
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c5
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c12
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c7
-rw-r--r--drivers/crypto/ccp/ccp-dev.h12
-rw-r--r--drivers/crypto/ccp/ccp-ops.c24
-rw-r--r--drivers/crypto/ccp/ccp-pci.c21
-rw-r--r--drivers/crypto/ccp/ccp-platform.c111
-rw-r--r--drivers/crypto/img-hash.c1029
-rw-r--r--drivers/crypto/mxs-dcp.c2
-rw-r--r--drivers/crypto/n2_core.c4
-rw-r--r--drivers/crypto/omap-aes.c14
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_engine.c35
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c21
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h10
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c66
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c3
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c88
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c31
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c2
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c9
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c6
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_admin.c3
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c3
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h6
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c15
-rw-r--r--drivers/crypto/sahara.c51
-rw-r--r--drivers/crypto/talitos.c17
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/crypto/vmx/Kconfig8
-rw-r--r--drivers/crypto/vmx/Makefile19
-rw-r--r--drivers/crypto/vmx/aes.c139
-rw-r--r--drivers/crypto/vmx/aes_cbc.c184
-rw-r--r--drivers/crypto/vmx/aes_ctr.c167
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h20
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl1930
-rw-r--r--drivers/crypto/vmx/ghash.c214
-rw-r--r--drivers/crypto/vmx/ghashp8-ppc.pl228
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl207
-rw-r--r--drivers/crypto/vmx/vmx.c88
-rw-r--r--drivers/devfreq/devfreq.c1
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c2
-rw-r--r--drivers/devfreq/tegra-devfreq.c509
-rw-r--r--drivers/dma-buf/dma-buf.c47
-rw-r--r--drivers/dma/Kconfig32
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/at_hdmac.c175
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/bestcomm/bestcomm.c4
-rw-r--r--drivers/dma/dma-jz4740.c12
-rw-r--r--drivers/dma/dma-jz4780.c877
-rw-r--r--drivers/dma/dmaengine.c22
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/dw/core.c18
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/dma/fsl_raid.c904
-rw-r--r--drivers/dma/fsl_raid.h306
-rw-r--r--drivers/dma/hsu/Kconfig14
-rw-r--r--drivers/dma/hsu/Makefile5
-rw-r--r--drivers/dma/hsu/hsu.c495
-rw-r--r--drivers/dma/hsu/hsu.h118
-rw-r--r--drivers/dma/hsu/pci.c124
-rw-r--r--drivers/dma/img-mdc-dma.c6
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/ioat/dca.c4
-rw-r--r--drivers/dma/ioat/dma.c4
-rw-r--r--drivers/dma/ioat/dma.h4
-rw-r--r--drivers/dma/ioat/dma_v2.c4
-rw-r--r--drivers/dma/ioat/dma_v2.h4
-rw-r--r--drivers/dma/ioat/dma_v3.c4
-rw-r--r--drivers/dma/ioat/hw.h4
-rw-r--r--drivers/dma/ioat/pci.c4
-rw-r--r--drivers/dma/ioat/registers.h4
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/k3dma.c8
-rw-r--r--drivers/dma/mmp_pdma.c2
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c6
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/mv_xor.h4
-rw-r--r--drivers/dma/of-dma.c1
-rw-r--r--drivers/dma/pch_dma.c1
-rw-r--r--drivers/dma/pl330.c26
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/dma/qcom_bam_dma.c44
-rw-r--r--drivers/dma/s3c24xx-dma.c13
-rw-r--r--drivers/dma/sa11x0-dma.c12
-rw-r--r--drivers/dma/sh/Kconfig15
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/rcar-audmapp.c376
-rw-r--r--drivers/dma/sh/shdma-base.c73
-rw-r--r--drivers/dma/sh/shdmac.c4
-rw-r--r--drivers/dma/sh/usb-dmac.c912
-rw-r--r--drivers/dma/sirf-dma.c2
-rw-r--r--drivers/dma/ste_dma40.c6
-rw-r--r--drivers/dma/sun6i-dma.c8
-rwxr-xr-xdrivers/dma/xgene-dma.c2089
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c2
-rw-r--r--drivers/extcon/Kconfig17
-rw-r--r--drivers/extcon/Makefile4
-rw-r--r--drivers/extcon/extcon-arizona.c50
-rw-r--r--drivers/extcon/extcon-max14577.c5
-rw-r--r--drivers/extcon/extcon-max77693.c37
-rw-r--r--drivers/extcon/extcon-max77843.c881
-rw-r--r--drivers/extcon/extcon-max8997.c5
-rw-r--r--drivers/extcon/extcon-rt8973a.c6
-rw-r--r--drivers/extcon/extcon-sm5502.c6
-rw-r--r--drivers/extcon/extcon-usb-gpio.c237
-rw-r--r--drivers/extcon/extcon.c (renamed from drivers/extcon/extcon-class.c)36
-rw-r--r--drivers/firewire/net.c13
-rw-r--r--drivers/firmware/Kconfig4
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/dmi_scan.c12
-rw-r--r--drivers/firmware/efi/runtime-map.c6
-rw-r--r--drivers/firmware/pcdp.c4
-rw-r--r--drivers/firmware/qcom_scm.c494
-rw-r--r--drivers/gpio/Kconfig733
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/devres.c107
-rw-r--r--drivers/gpio/gpio-adp5588.c8
-rw-r--r--drivers/gpio/gpio-altera.c374
-rw-r--r--drivers/gpio/gpio-arizona.c4
-rw-r--r--drivers/gpio/gpio-crystalcove.c7
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-f7188x.c47
-rw-r--r--drivers/gpio/gpio-ich.c6
-rw-r--r--drivers/gpio/gpio-kempld.c2
-rw-r--r--drivers/gpio/gpio-loongson.c (renamed from arch/mips/loongson/common/gpio.c)120
-rw-r--r--drivers/gpio/gpio-max7300.c4
-rw-r--r--drivers/gpio/gpio-max732x.c134
-rw-r--r--drivers/gpio/gpio-mb86s7x.c5
-rw-r--r--drivers/gpio/gpio-mc33880.c2
-rw-r--r--drivers/gpio/gpio-mcp23s08.c9
-rw-r--r--drivers/gpio/gpio-msm-v1.c714
-rw-r--r--drivers/gpio/gpio-mvebu.c24
-rw-r--r--drivers/gpio/gpio-omap.c179
-rw-r--r--drivers/gpio/gpio-pcf857x.c134
-rw-r--r--drivers/gpio/gpio-pxa.c3
-rw-r--r--drivers/gpio/gpio-rcar.c63
-rw-r--r--drivers/gpio/gpio-tb10x.c2
-rw-r--r--drivers/gpio/gpio-tc3589x.c3
-rw-r--r--drivers/gpio/gpio-vf610.c16
-rw-r--r--drivers/gpio/gpio-xgene-sb.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c88
-rw-r--r--drivers/gpio/gpiolib-of.c111
-rw-r--r--drivers/gpio/gpiolib-sysfs.c19
-rw-r--r--drivers/gpio/gpiolib.c299
-rw-r--r--drivers/gpio/gpiolib.h15
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c12
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h64
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c10
-rw-r--r--drivers/gpu/drm/armada/armada_output.h2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c311
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c40
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h62
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c4
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c41
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c640
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.c54
-rw-r--r--drivers/gpu/drm/bridge/ps8622.c684
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c205
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c660
-rw-r--r--drivers/gpu/drm/drm_bridge.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c114
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c34
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c80
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c13
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c62
-rw-r--r--drivers/gpu/drm/drm_info.c1
-rw-r--r--drivers/gpu/drm/drm_ioc32.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c63
-rw-r--r--drivers/gpu/drm/drm_irq.c67
-rw-r--r--drivers/gpu/drm/drm_modes.c12
-rw-r--r--drivers/gpu/drm/drm_of.c10
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c39
-rw-r--r--drivers/gpu/drm/drm_prime.c12
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c61
-rw-r--r--drivers/gpu/drm/drm_trace.h1
-rw-r--r--drivers/gpu/drm/drm_vm.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c178
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c101
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c251
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c136
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c260
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c197
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c101
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c75
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c352
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c242
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c189
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h392
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c548
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c115
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c194
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1068
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h160
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c335
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c22
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c294
-rw-r--r--drivers/gpu/drm/i915/i915_params.c19
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h202
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c215
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c78
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h108
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c552
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c264
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h91
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c16
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c111
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1791
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c466
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c38
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h129
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c184
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c32
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c66
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c243
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h12
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c37
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1286
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c351
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c266
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c25
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c476
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c77
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c14
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c34
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h10
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c196
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c24
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c7
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig11
-rw-r--r--drivers/gpu/drm/msm/Makefile5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c212
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h117
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h418
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c1993
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c705
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy.c352
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c34
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h399
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c102
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h18
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c343
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c86
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c315
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h75
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c83
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c200
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h75
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c64
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c100
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h29
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c25
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c79
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c173
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c1054
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc114
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h294
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h354
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h354
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h354
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h230
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h480
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c387
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c440
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c622
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c80
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c241
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c66
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c57
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c29
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c146
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c256
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig8
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c28
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c177
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c55
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/cik.c136
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c25
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c131
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c53
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c23
-rw-r--r--drivers/gpu/drm/radeon/ni.c34
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c36
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h44
-rw-r--r--drivers/gpu/drm/radeon/nid.h47
-rw-r--r--drivers/gpu/drm/radeon/r600.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h13
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h34
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c140
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c206
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c785
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h72
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c144
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c36
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c46
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h3
-rw-r--r--drivers/gpu/drm/radeon/si.c134
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c37
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c25
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c14
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c29
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c400
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c71
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c65
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c358
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c18
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c422
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h69
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c9
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c30
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c17
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c152
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c175
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c86
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.h6
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c66
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c6
-rw-r--r--drivers/gpu/drm/tegra/dc.c105
-rw-r--r--drivers/gpu/drm/tegra/dc.h7
-rw-r--r--drivers/gpu/drm/tegra/drm.c21
-rw-r--r--drivers/gpu/drm/tegra/drm.h4
-rw-r--r--drivers/gpu/drm/tegra/gem.c10
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h2
-rw-r--r--drivers/gpu/drm/tegra/sor.c202
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c9
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c9
-rw-r--r--drivers/gpu/drm/vgem/Makefile4
-rw-r--r--drivers/gpu/drm/vgem/vgem_dma_buf.c94
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c364
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h (renamed from drivers/gpu/drm/i915/intel_dsi_cmd.h)46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/host1x/syncpt.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c18
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c9
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c4
-rw-r--r--drivers/hid/Kconfig7
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-core.c33
-rw-r--r--drivers/hid/hid-cypress.c6
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-ids.h31
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-lenovo.c59
-rw-r--r--drivers/hid/hid-lg.c24
-rw-r--r--drivers/hid/hid-lg4ff.c458
-rw-r--r--drivers/hid/hid-lg4ff.h4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c245
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-plantronics.c132
-rw-r--r--drivers/hid/hid-prodikeys.c3
-rw-r--r--drivers/hid/hid-rmi.c15
-rw-r--r--drivers/hid/hid-sensor-hub.c13
-rw-r--r--drivers/hid/hid-sjoy.c3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c12
-rw-r--r--drivers/hid/usbhid/hid-quirks.c7
-rw-r--r--drivers/hid/wacom.h6
-rw-r--r--drivers/hid/wacom_sys.c309
-rw-r--r--drivers/hid/wacom_wac.c422
-rw-r--r--drivers/hid/wacom_wac.h27
-rw-r--r--drivers/hv/channel.c125
-rw-r--r--drivers/hv/channel_mgmt.c223
-rw-r--r--drivers/hv/connection.c40
-rw-r--r--drivers/hv/hv.c34
-rw-r--r--drivers/hv/hv_balloon.c143
-rw-r--r--drivers/hv/hv_util.c13
-rw-r--r--drivers/hv/hyperv_vmbus.h31
-rw-r--r--drivers/hv/vmbus_drv.c136
-rw-r--r--drivers/hwmon/ina2xx.c17
-rw-r--r--drivers/hwmon/lm85.c26
-rw-r--r--drivers/hwmon/w83795.c8
-rw-r--r--drivers/hwtracing/coresight/Kconfig61
-rw-r--r--drivers/hwtracing/coresight/Makefile (renamed from drivers/coresight/Makefile)0
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c (renamed from drivers/coresight/coresight-etb10.c)4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-cp14.c (renamed from drivers/coresight/coresight-etm-cp14.c)0
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h (renamed from drivers/coresight/coresight-etm.h)0
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c (renamed from drivers/coresight/coresight-etm3x.c)0
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c (renamed from drivers/coresight/coresight-funnel.c)0
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h (renamed from drivers/coresight/coresight-priv.h)0
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c (renamed from drivers/coresight/coresight-replicator.c)2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c (renamed from drivers/coresight/coresight-tmc.c)60
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c (renamed from drivers/coresight/coresight-tpiu.c)0
-rw-r--r--drivers/hwtracing/coresight/coresight.c (renamed from drivers/coresight/coresight.c)4
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c (renamed from drivers/coresight/of_coresight.c)31
-rw-r--r--drivers/i2c/busses/Kconfig28
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-at91.c39
-rw-r--r--drivers/i2c/busses/i2c-axxia.c27
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c15
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c6
-rw-r--r--drivers/i2c/busses/i2c-cpm.c20
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c51
-rw-r--r--drivers/i2c/busses/i2c-davinci.c194
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c10
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c384
-rw-r--r--drivers/i2c/busses/i2c-dln2.c13
-rw-r--r--drivers/i2c/busses/i2c-i801.c51
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c14
-rw-r--r--drivers/i2c/busses/i2c-imx.c10
-rw-r--r--drivers/i2c/busses/i2c-ismt.c5
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c833
-rw-r--r--drivers/i2c/busses/i2c-mpc.c33
-rw-r--r--drivers/i2c/busses/i2c-mxs.c7
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c6
-rw-r--r--drivers/i2c/busses/i2c-opal.c24
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c42
-rw-r--r--drivers/i2c/busses/i2c-powermac.c10
-rw-r--r--drivers/i2c/busses/i2c-qup.c21
-rw-r--r--drivers/i2c/busses/i2c-rcar.c10
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c13
-rw-r--r--drivers/i2c/busses/i2c-tegra.c12
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c10
-rw-r--r--drivers/i2c/busses/i2c-wmt.c10
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c445
-rw-r--r--drivers/i2c/i2c-core.c82
-rw-r--r--drivers/i2c/i2c-mux.c8
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c18
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c6
-rw-r--r--drivers/ide/Kconfig9
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/cs5520.c2
-rw-r--r--drivers/ide/ide-pnp.c14
-rw-r--r--drivers/ide/pmac.c15
-rw-r--r--drivers/ide/scc_pata.c887
-rw-r--r--drivers/ide/setup-pci.c2
-rw-r--r--drivers/ide/sgiioc4.c4
-rw-r--r--drivers/idle/intel_idle.c68
-rw-r--r--drivers/iio/accel/mma9551_core.c21
-rw-r--r--drivers/iio/accel/mma9553.c18
-rw-r--r--drivers/iio/accel/st_accel_core.c1
-rw-r--r--drivers/iio/adc/axp288_adc.c12
-rw-r--r--drivers/iio/adc/cc10001_adc.c60
-rw-r--r--drivers/iio/adc/max1027.c2
-rw-r--r--drivers/iio/adc/mcp320x.c6
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c7
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c5
-rw-r--r--drivers/iio/adc/xilinx-xadc.h6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c1
-rw-r--r--drivers/iio/kfifo_buf.c3
-rw-r--r--drivers/iio/light/hid-sensor-prox.c12
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/pressure/bmp280.c1
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c1
-rw-r--r--drivers/infiniband/core/addr.c13
-rw-r--r--drivers/infiniband/core/cm.c23
-rw-r--r--drivers/infiniband/core/cm_msgs.h4
-rw-r--r--drivers/infiniband/core/cma.c27
-rw-r--r--drivers/infiniband/core/iwpm_msg.c75
-rw-r--r--drivers/infiniband/core/iwpm_util.c208
-rw-r--r--drivers/infiniband/core/iwpm_util.h15
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/core/umem_odp.c14
-rw-r--r--drivers/infiniband/core/uverbs_main.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c87
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c41
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h7
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mcast.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c19
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c16
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c457
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c40
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c44
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c14
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c107
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/user.h2
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c65
-rw-r--r--drivers/infiniband/hw/qib/qib.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c41
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c32
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c22
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c195
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c86
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c520
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c44
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h66
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c66
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c523
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c220
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c691
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h37
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c9
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c233
-rw-r--r--drivers/input/ff-core.c10
-rw-r--r--drivers/input/ff-memless.c18
-rw-r--r--drivers/input/input-mt.c26
-rw-r--r--drivers/input/joystick/xpad.c21
-rw-r--r--drivers/input/keyboard/Kconfig21
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/atkbd.c6
-rw-r--r--drivers/input/keyboard/bcm-keypad.c458
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c15
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c2
-rw-r--r--drivers/input/keyboard/ipaq-micro-keys.c168
-rw-r--r--drivers/input/keyboard/lm8333.c4
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c42
-rw-r--r--drivers/input/misc/Kconfig24
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/max77843-haptic.c358
-rw-r--r--drivers/input/misc/mma8450.c35
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c293
-rw-r--r--drivers/input/misc/pwm-beeper.c9
-rw-r--r--drivers/input/misc/regulator-haptic.c2
-rw-r--r--drivers/input/misc/tps65218-pwrbutton.c2
-rw-r--r--drivers/input/mouse/Kconfig12
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/alps.c381
-rw-r--r--drivers/input/mouse/alps.h78
-rw-r--r--drivers/input/mouse/cyapa.c4
-rw-r--r--drivers/input/mouse/elan_i2c.h5
-rw-r--r--drivers/input/mouse/elan_i2c_core.c53
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c37
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c12
-rw-r--r--drivers/input/mouse/elantech.c22
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/lifebook.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c57
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/synaptics.c42
-rw-r--r--drivers/input/mouse/synaptics.h1
-rw-r--r--drivers/input/mouse/vmmouse.c508
-rw-r--r--drivers/input/mouse/vmmouse.h30
-rw-r--r--drivers/input/serio/gscps2.c1
-rw-r--r--drivers/input/serio/i8042.c28
-rw-r--r--drivers/input/touchscreen/Kconfig43
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c532
-rw-r--r--drivers/input/touchscreen/bcm_iproc_tsc.c522
-rw-r--r--drivers/input/touchscreen/chipone_icn8318.c316
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c6
-rw-r--r--drivers/input/touchscreen/elants_i2c.c14
-rw-r--r--drivers/input/touchscreen/goodix.c36
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c62
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c46
-rw-r--r--drivers/input/touchscreen/sur40.c434
-rw-r--r--drivers/input/touchscreen/sx8654.c286
-rw-r--r--drivers/input/touchscreen/tsc2007.c11
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c11
-rw-r--r--drivers/iommu/amd_iommu.c250
-rw-r--r--drivers/iommu/amd_iommu_types.h13
-rw-r--r--drivers/iommu/amd_iommu_v2.c3
-rw-r--r--drivers/iommu/arm-smmu.c201
-rw-r--r--drivers/iommu/exynos-iommu.c87
-rw-r--r--drivers/iommu/fsl_pamu_domain.c60
-rw-r--r--drivers/iommu/fsl_pamu_domain.h2
-rw-r--r--drivers/iommu/intel-iommu.c205
-rw-r--r--drivers/iommu/intel_irq_remapping.c17
-rw-r--r--drivers/iommu/io-pgtable-arm.c5
-rw-r--r--drivers/iommu/iommu.c26
-rw-r--r--drivers/iommu/ipmmu-vmsa.c41
-rw-r--r--drivers/iommu/msm_iommu.c73
-rw-r--r--drivers/iommu/omap-iommu.c49
-rw-r--r--drivers/iommu/rockchip-iommu.c44
-rw-r--r--drivers/iommu/shmobile-iommu.c39
-rw-r--r--drivers/iommu/tegra-gart.c88
-rw-r--r--drivers/iommu/tegra-smmu.c59
-rw-r--r--drivers/irqchip/Kconfig5
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c335
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c193
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c9
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c175
-rw-r--r--drivers/irqchip/irq-mips-gic.c46
-rw-r--r--drivers/irqchip/irq-tegra.c2
-rw-r--r--drivers/irqchip/irqchip.c3
-rw-r--r--drivers/isdn/gigaset/ev-layer.c365
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c33
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c2
-rw-r--r--drivers/isdn/mISDN/dsp_core.c4
-rw-r--r--drivers/isdn/mISDN/socket.c7
-rw-r--r--drivers/leds/Kconfig8
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class-flash.c82
-rw-r--r--drivers/leds/led-class.c96
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-lp8501.c2
-rw-r--r--drivers/leds/leds-lp8860.c14
-rw-r--r--drivers/leds/leds-pca963x.c2
-rw-r--r--drivers/leds/leds-pm8941-wled.c435
-rw-r--r--drivers/leds/leds-pwm.c3
-rw-r--r--drivers/lguest/hypercalls.c5
-rw-r--r--drivers/lguest/interrupts_and_traps.c105
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_user.c8
-rw-r--r--drivers/macintosh/rack-meter.c30
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/macintosh/via-pmu.c25
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/arm_mhu.c195
-rw-r--r--drivers/mailbox/pcc.c122
-rw-r--r--drivers/mcb/mcb-pci.c4
-rw-r--r--drivers/md/Kconfig43
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bitmap.c189
-rw-r--r--drivers/md/bitmap.h10
-rw-r--r--drivers/md/dm-cache-policy-mq.c251
-rw-r--r--drivers/md/dm-crypt.c13
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-ioctl.c17
-rw-r--r--drivers/md/dm-log-userspace-base.c91
-rw-r--r--drivers/md/dm-log-userspace-transfer.c5
-rw-r--r--drivers/md/dm-log-writes.c825
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-sysfs.c43
-rw-r--r--drivers/md/dm-table.c71
-rw-r--r--drivers/md/dm-verity.c147
-rw-r--r--drivers/md/dm.c571
-rw-r--r--drivers/md/dm.h10
-rw-r--r--drivers/md/md-cluster.c965
-rw-r--r--drivers/md/md-cluster.h29
-rw-r--r--drivers/md/md.c386
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid0.c51
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c897
-rw-r--r--drivers/md/raid5.h59
-rw-r--r--drivers/media/Kconfig10
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c19
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c4
-rw-r--r--drivers/media/common/siano/sms-cards.c8
-rw-r--r--drivers/media/common/siano/sms-cards.h3
-rw-r--r--drivers/media/common/siano/smscoreapi.c164
-rw-r--r--drivers/media/common/siano/smscoreapi.h32
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c6
-rw-r--r--drivers/media/common/siano/smsdvb-main.c74
-rw-r--r--drivers/media/common/siano/smsir.c18
-rw-r--r--drivers/media/dvb-core/dmxdev.c11
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c30
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c124
-rw-r--r--drivers/media/dvb-core/dvb_net.c7
-rw-r--r--drivers/media/dvb-core/dvbdev.c144
-rw-r--r--drivers/media/dvb-core/dvbdev.h27
-rw-r--r--drivers/media/dvb-frontends/Kconfig8
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/a8293.h2
-rw-r--r--drivers/media/dvb-frontends/af9013.h2
-rw-r--r--drivers/media/dvb-frontends/atbm8830.h2
-rw-r--r--drivers/media/dvb-frontends/au8522.h2
-rw-r--r--drivers/media/dvb-frontends/bcm3510.h2
-rw-r--r--drivers/media/dvb-frontends/cx22700.h2
-rw-r--r--drivers/media/dvb-frontends/cx22702.h2
-rw-r--r--drivers/media/dvb-frontends/cx24110.h2
-rw-r--r--drivers/media/dvb-frontends/cx24113.h2
-rw-r--r--drivers/media/dvb-frontends/cx24116.h2
-rw-r--r--drivers/media/dvb-frontends/cx24117.h2
-rw-r--r--drivers/media/dvb-frontends/cx24123.h2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r.h2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c6
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_priv.h2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c2
-rw-r--r--drivers/media/dvb-frontends/dib0070.h2
-rw-r--r--drivers/media/dvb-frontends/dib0090.h2
-rw-r--r--drivers/media/dvb-frontends/dib3000.h2
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.h2
-rw-r--r--drivers/media/dvb-frontends/dib7000m.h2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.h2
-rw-r--r--drivers/media/dvb-frontends/dib8000.h2
-rw-r--r--drivers/media/dvb-frontends/dib9000.h2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx39xxj.h2
-rw-r--r--drivers/media/dvb-frontends/drxd.h2
-rw-r--r--drivers/media/dvb-frontends/drxk.h2
-rw-r--r--drivers/media/dvb-frontends/ds3000.h2
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.h2
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.h2
-rw-r--r--drivers/media/dvb-frontends/ec100.h2
-rw-r--r--drivers/media/dvb-frontends/hd29l2.h2
-rw-r--r--drivers/media/dvb-frontends/isl6405.h2
-rw-r--r--drivers/media/dvb-frontends/isl6421.h2
-rw-r--r--drivers/media/dvb-frontends/isl6423.h2
-rw-r--r--drivers/media/dvb-frontends/itd1000.h2
-rw-r--r--drivers/media/dvb-frontends/ix2505v.h2
-rw-r--r--drivers/media/dvb-frontends/l64781.h2
-rw-r--r--drivers/media/dvb-frontends/lg2160.h2
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.h2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c2144
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.h74
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.h2
-rw-r--r--drivers/media/dvb-frontends/lgs8gl5.h2
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.h2
-rw-r--r--drivers/media/dvb-frontends/lnbh24.h2
-rw-r--r--drivers/media/dvb-frontends/lnbp21.h2
-rw-r--r--drivers/media/dvb-frontends/lnbp22.h2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c2
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.h2
-rw-r--r--drivers/media/dvb-frontends/mb86a16.h2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.h2
-rw-r--r--drivers/media/dvb-frontends/mn88472.h12
-rw-r--r--drivers/media/dvb-frontends/mn88473.h6
-rw-r--r--drivers/media/dvb-frontends/mt312.h2
-rw-r--r--drivers/media/dvb-frontends/mt352.h2
-rw-r--r--drivers/media/dvb-frontends/nxt200x.h2
-rw-r--r--drivers/media/dvb-frontends/nxt6000.h2
-rw-r--r--drivers/media/dvb-frontends/or51132.h2
-rw-r--r--drivers/media/dvb-frontends/or51211.h2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1409.h2
-rw-r--r--drivers/media/dvb-frontends/s5h1411.h2
-rw-r--r--drivers/media/dvb-frontends/s5h1420.h2
-rw-r--r--drivers/media/dvb-frontends/s5h1432.h2
-rw-r--r--drivers/media/dvb-frontends/s921.h2
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si2165.h2
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/si21xx.h2
-rw-r--r--drivers/media/dvb-frontends/sp2.c5
-rw-r--r--drivers/media/dvb-frontends/sp8870.h2
-rw-r--r--drivers/media/dvb-frontends/sp887x.h2
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.h2
-rw-r--r--drivers/media/dvb-frontends/stb6000.h2
-rw-r--r--drivers/media/dvb-frontends/stb6100.h2
-rw-r--r--drivers/media/dvb-frontends/stv0288.h2
-rw-r--r--drivers/media/dvb-frontends/stv0297.h2
-rw-r--r--drivers/media/dvb-frontends/stv0299.h2
-rw-r--r--drivers/media/dvb-frontends/stv0367.h2
-rw-r--r--drivers/media/dvb-frontends/stv0900.h2
-rw-r--r--drivers/media/dvb-frontends/stv090x.h2
-rw-r--r--drivers/media/dvb-frontends/stv6110.h2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.h2
-rw-r--r--drivers/media/dvb-frontends/tda1002x.h4
-rw-r--r--drivers/media/dvb-frontends/tda10048.h2
-rw-r--r--drivers/media/dvb-frontends/tda1004x.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071_priv.h2
-rw-r--r--drivers/media/dvb-frontends/tda10086.h2
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.h2
-rw-r--r--drivers/media/dvb-frontends/tda665x.h2
-rw-r--r--drivers/media/dvb-frontends/tda8083.h2
-rw-r--r--drivers/media/dvb-frontends/tda8261.h2
-rw-r--r--drivers/media/dvb-frontends/tda826x.h2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c302
-rw-r--r--drivers/media/dvb-frontends/ts2020.h27
-rw-r--r--drivers/media/dvb-frontends/tua6100.h2
-rw-r--r--drivers/media/dvb-frontends/ves1820.h2
-rw-r--r--drivers/media/dvb-frontends/ves1x93.h2
-rw-r--r--drivers/media/dvb-frontends/zl10036.h2
-rw-r--r--drivers/media/dvb-frontends/zl10039.h2
-rw-r--r--drivers/media/dvb-frontends/zl10353.h2
-rw-r--r--drivers/media/i2c/Kconfig11
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/ad9389b.c10
-rw-r--r--drivers/media/i2c/adv7180.c10
-rw-r--r--drivers/media/i2c/adv7343.c1
-rw-r--r--drivers/media/i2c/adv7511.c26
-rw-r--r--drivers/media/i2c/adv7604.c945
-rw-r--r--drivers/media/i2c/adv7842.c5
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c30
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.h11
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c16
-rw-r--r--drivers/media/i2c/mt9m032.c34
-rw-r--r--drivers/media/i2c/mt9p031.c81
-rw-r--r--drivers/media/i2c/mt9t001.c36
-rw-r--r--drivers/media/i2c/mt9v032.c115
-rw-r--r--drivers/media/i2c/noon010pc30.c17
-rw-r--r--drivers/media/i2c/ov2659.c1509
-rw-r--r--drivers/media/i2c/ov7670.c37
-rw-r--r--drivers/media/i2c/ov9650.c16
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c72
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-spi.c2
-rw-r--r--drivers/media/i2c/s5k4ecgx.c16
-rw-r--r--drivers/media/i2c/s5k5baf.c40
-rw-r--r--drivers/media/i2c/s5k6a3.c18
-rw-r--r--drivers/media/i2c/s5k6aa.c34
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c118
-rw-r--r--drivers/media/i2c/soc_camera/mt9m111.c1
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c125
-rw-r--r--drivers/media/i2c/ths7303.c4
-rw-r--r--drivers/media/i2c/ths8200.c1
-rw-r--r--drivers/media/i2c/tvp514x.c13
-rw-r--r--drivers/media/i2c/tvp7002.c15
-rw-r--r--drivers/media/mmc/siano/smssdio.c17
-rw-r--r--drivers/media/pci/bt8xx/bt878.c12
-rw-r--r--drivers/media/pci/bt8xx/bt878.h11
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c73
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h6
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h3
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c27
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c58
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c66
-rw-r--r--drivers/media/pci/cx18/cx18-streams.h2
-rw-r--r--drivers/media/pci/cx23885/Kconfig1
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c3
-rw-r--r--drivers/media/pci/cx23885/altera-ci.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c30
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c1
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c22
-rw-r--r--drivers/media/pci/cx88/cx88-core.c18
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c3
-rw-r--r--drivers/media/pci/cx88/cx88-video.c61
-rw-r--r--drivers/media/pci/cx88/cx88.h17
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c159
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c113
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.h2
-rw-r--r--drivers/media/pci/meye/meye.c21
-rw-r--r--drivers/media/pci/meye/meye.h2
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c2
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c2
-rw-r--r--drivers/media/pci/saa7146/mxb.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/pci/smipcie/Kconfig2
-rw-r--r--drivers/media/pci/smipcie/smipcie.c12
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c35
-rw-r--r--drivers/media/pci/ttpci/av7110.h4
-rw-r--r--drivers/media/pci/ttpci/budget-av.c2
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/am437x/Kconfig2
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c60
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h3
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c348
-rw-r--r--drivers/media/platform/coda/Makefile2
-rw-r--r--drivers/media/platform/coda/coda-bit.c205
-rw-r--r--drivers/media/platform/coda/coda-common.c113
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c1
-rw-r--r--drivers/media/platform/coda/coda.h18
-rw-r--r--drivers/media/platform/coda/trace.h203
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c26
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c52
-rw-r--r--drivers/media/platform/davinci/vpif_capture.h2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c49
-rw-r--r--drivers/media/platform/davinci/vpif_display.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c22
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c28
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c33
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c16
-rw-r--r--drivers/media/platform/m2m-deinterlace.c21
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c62
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h8
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c1
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.h4
-rw-r--r--drivers/media/platform/omap3isp/isp.c563
-rw-r--r--drivers/media/platform/omap3isp/isp.h43
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c112
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c68
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c56
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c48
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c1
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c1
-rw-r--r--drivers/media/platform/omap3isp/isphist.c127
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c70
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c80
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c2
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h5
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c20
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c18
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c63
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h12
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c32
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h3
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c7
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c2
-rw-r--r--drivers/media/platform/sh_vou.c30
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c22
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c1
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c111
-rw-r--r--drivers/media/platform/via-camera.c15
-rw-r--r--drivers/media/platform/vim2m.c23
-rw-r--r--drivers/media/platform/vivid/vivid-core.c93
-rw-r--r--drivers/media/platform/vivid/vivid-core.h8
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c2
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c125
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c66
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c1082
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.h112
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c180
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c378
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c85
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c42
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c16
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c18
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c22
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c22
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c37
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h12
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c30
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c30
-rw-r--r--drivers/media/platform/xilinx/Kconfig23
-rw-r--r--drivers/media/platform/xilinx/Makefile5
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c766
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h109
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c931
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c323
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h238
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c669
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h49
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c380
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h42
-rw-r--r--drivers/media/radio/radio-wl1273.c27
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c14
-rw-r--r--drivers/media/radio/si4713/si4713.c18
-rw-r--r--drivers/media/radio/wl128x/Kconfig2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c28
-rw-r--r--drivers/media/rc/ene_ir.c13
-rw-r--r--drivers/media/rc/fintek-cir.c13
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c31
-rw-r--r--drivers/media/rc/img-ir/img-ir.h2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c8
-rw-r--r--drivers/media/rc/ite-cir.c13
-rw-r--r--drivers/media/rc/nuvoton-cir.c13
-rw-r--r--drivers/media/tuners/Kconfig8
-rw-r--r--drivers/media/tuners/Makefile1
-rw-r--r--drivers/media/tuners/fc0011.h2
-rw-r--r--drivers/media/tuners/fc0012.h2
-rw-r--r--drivers/media/tuners/fc0013.h2
-rw-r--r--drivers/media/tuners/fc2580.h2
-rw-r--r--drivers/media/tuners/m88ts2022.c579
-rw-r--r--drivers/media/tuners/m88ts2022.h54
-rw-r--r--drivers/media/tuners/m88ts2022_priv.h35
-rw-r--r--drivers/media/tuners/max2165.h2
-rw-r--r--drivers/media/tuners/mc44s803.h2
-rw-r--r--drivers/media/tuners/msi001.c2
-rw-r--r--drivers/media/tuners/mt2060.h2
-rw-r--r--drivers/media/tuners/mt2063.h2
-rw-r--r--drivers/media/tuners/mt20xx.h2
-rw-r--r--drivers/media/tuners/mt2131.h2
-rw-r--r--drivers/media/tuners/mt2266.h2
-rw-r--r--drivers/media/tuners/mxl5005s.h2
-rw-r--r--drivers/media/tuners/mxl5007t.h2
-rw-r--r--drivers/media/tuners/qt1010.h2
-rw-r--r--drivers/media/tuners/r820t.c29
-rw-r--r--drivers/media/tuners/r820t.h2
-rw-r--r--drivers/media/tuners/si2157.c25
-rw-r--r--drivers/media/tuners/si2157_priv.h1
-rw-r--r--drivers/media/tuners/tda18218.h2
-rw-r--r--drivers/media/tuners/tda18271.h2
-rw-r--r--drivers/media/tuners/tda827x.h2
-rw-r--r--drivers/media/tuners/tda8290.h2
-rw-r--r--drivers/media/tuners/tda9887.h2
-rw-r--r--drivers/media/tuners/tea5761.h2
-rw-r--r--drivers/media/tuners/tea5767.h2
-rw-r--r--drivers/media/tuners/tua9001.h2
-rw-r--r--drivers/media/tuners/tuner-simple.h2
-rw-r--r--drivers/media/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/tuners/xc4000.h2
-rw-r--r--drivers/media/tuners/xc5000.c5
-rw-r--r--drivers/media/tuners/xc5000.h2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c104
-rw-r--r--drivers/media/usb/au0828/au0828.h4
-rw-r--r--drivers/media/usb/cx231xx/Kconfig1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c33
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c144
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c13
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c71
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c176
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h21
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c58
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c26
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c8
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig5
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c155
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c3
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c3
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-dvb.c69
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c192
-rw-r--r--drivers/media/usb/em28xx/Kconfig2
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c13
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c14
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c119
-rw-r--r--drivers/media/usb/em28xx/em28xx.h7
-rw-r--r--drivers/media/usb/gspca/ov534.c11
-rw-r--r--drivers/media/usb/gspca/topro.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c10
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c19
-rw-r--r--drivers/media/usb/hdpvr/hdpvr.h2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c84
-rw-r--r--drivers/media/usb/siano/smsusb.c136
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c17
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c6
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c59
-rw-r--r--drivers/media/usb/tm6000/tm6000.h4
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c94
-rw-r--r--drivers/media/usb/usbvision/usbvision.h4
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c30
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c15
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c70
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/v4l2-core/tuner-core.c22
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c81
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c22
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c55
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c102
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c33
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c61
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c8
-rw-r--r--drivers/memory/Kconfig9
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/jz4780-nemc.c391
-rw-r--r--drivers/memory/omap-gpmc.c358
-rw-r--r--drivers/memstick/core/mspro_block.c3
-rw-r--r--drivers/mfd/Kconfig58
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/ab8500-debugfs.c196
-rw-r--r--drivers/mfd/arizona-core.c25
-rw-r--r--drivers/mfd/arizona-i2c.c2
-rw-r--r--drivers/mfd/arizona-irq.c1
-rw-r--r--drivers/mfd/arizona-spi.c2
-rw-r--r--drivers/mfd/axp20x.c30
-rw-r--r--drivers/mfd/cros_ec.c19
-rw-r--r--drivers/mfd/da9052-irq.c2
-rw-r--r--drivers/mfd/da9052-spi.c6
-rw-r--r--drivers/mfd/da9150-core.c2
-rw-r--r--drivers/mfd/dln2.c2
-rw-r--r--drivers/mfd/hi6421-pmic-core.c2
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c282
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c14
-rw-r--r--drivers/mfd/kempld-core.c11
-rw-r--r--drivers/mfd/lpc_ich.c174
-rw-r--r--drivers/mfd/max77693.c4
-rw-r--r--drivers/mfd/max77843.c243
-rw-r--r--drivers/mfd/mc13xxx-core.c6
-rw-r--r--drivers/mfd/menelaus.c29
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/mt6397-core.c227
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c103
-rw-r--r--drivers/mfd/qcom_rpm.c41
-rw-r--r--drivers/mfd/rk808.c3
-rw-r--r--drivers/mfd/rtl8411.c11
-rw-r--r--drivers/mfd/rts5209.c4
-rw-r--r--drivers/mfd/rts5227.c12
-rw-r--r--drivers/mfd/rts5229.c4
-rw-r--r--drivers/mfd/rts5249.c343
-rw-r--r--drivers/mfd/rtsx_gops.c37
-rw-r--r--drivers/mfd/rtsx_pcr.c109
-rw-r--r--drivers/mfd/rtsx_pcr.h8
-rw-r--r--drivers/mfd/sec-core.c7
-rw-r--r--drivers/mfd/sec-irq.c14
-rw-r--r--drivers/mfd/si476x-i2c.c2
-rw-r--r--drivers/mfd/sky81452.c108
-rw-r--r--drivers/mfd/tc3589x.c9
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c13
-rw-r--r--drivers/mfd/tps65010.c4
-rw-r--r--drivers/mfd/twl4030-power.c2
-rw-r--r--drivers/mfd/twl6040.c1
-rw-r--r--drivers/mfd/vexpress-sysreg.c71
-rw-r--r--drivers/mfd/wm5102-tables.c3
-rw-r--r--drivers/misc/bh1780gli.c2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c12
-rw-r--r--drivers/misc/carma/carma-fpga.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c56
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c2
-rw-r--r--drivers/misc/mei/Makefile3
-rw-r--r--drivers/misc/mei/amthif.c424
-rw-r--r--drivers/misc/mei/bus.c105
-rw-r--r--drivers/misc/mei/client.c478
-rw-r--r--drivers/misc/mei/client.h42
-rw-r--r--drivers/misc/mei/debugfs.c21
-rw-r--r--drivers/misc/mei/hbm.c8
-rw-r--r--drivers/misc/mei/hw-me.c170
-rw-r--r--drivers/misc/mei/hw-me.h4
-rw-r--r--drivers/misc/mei/hw-txe.c2
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/misc/mei/interrupt.c171
-rw-r--r--drivers/misc/mei/main.c147
-rw-r--r--drivers/misc/mei/mei-trace.c25
-rw-r--r--drivers/misc/mei/mei-trace.h74
-rw-r--r--drivers/misc/mei/mei_dev.h40
-rw-r--r--drivers/misc/mei/nfc.c43
-rw-r--r--drivers/misc/mei/pci-me.c5
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/wd.c36
-rw-r--r--drivers/misc/mic/host/mic_boot.c14
-rw-r--r--drivers/misc/mic/host/mic_intr.c2
-rw-r--r--drivers/misc/sram.c19
-rw-r--r--drivers/misc/tifm_7xx1.c5
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c37
-rw-r--r--drivers/mmc/card/block.c50
-rw-r--r--drivers/mmc/card/mmc_test.c18
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/bus.c41
-rw-r--r--drivers/mmc/core/core.c1
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/core/pwrseq.c2
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/host/Kconfig8
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/msm_sdcc.c1474
-rw-r--r--drivers/mmc/host/msm_sdcc.h256
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c15
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c26
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c10
-rw-r--r--drivers/mtd/Kconfig13
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c30
-rw-r--r--drivers/mtd/devices/block2mtd.c58
-rw-r--r--drivers/mtd/devices/docg3.c11
-rw-r--r--drivers/mtd/devices/m25p80.c21
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c4
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/mtdcore.c52
-rw-r--r--drivers/mtd/mtdpart.c68
-rw-r--r--drivers/mtd/nand/atmel_nand.c26
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h3
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h1
-rw-r--r--drivers/mtd/nand/denali.c6
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c4
-rw-r--r--drivers/mtd/nand/fsmc_nand.c7
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c16
-rw-r--r--drivers/mtd/nand/mxc_nand.c146
-rw-r--r--drivers/mtd/nand/nand_base.c41
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c48
-rw-r--r--drivers/mtd/nand/s3c2410.c3
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/onenand/onenand_base.c12
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c11
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c61
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c6
-rw-r--r--drivers/mtd/tests/mtd_test.h12
-rw-r--r--drivers/mtd/tests/nandbiterrs.c4
-rw-r--r--drivers/mtd/tests/oobtest.c90
-rw-r--r--drivers/mtd/tests/pagetest.c10
-rw-r--r--drivers/mtd/tests/readtest.c7
-rw-r--r--drivers/mtd/tests/speedtest.c38
-rw-r--r--drivers/mtd/tests/stresstest.c9
-rw-r--r--drivers/mtd/tests/subpagetest.c29
-rw-r--r--drivers/mtd/tests/torturetest.c23
-rw-r--r--drivers/mtd/ubi/attach.c73
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/build.c35
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/debug.c100
-rw-r--r--drivers/mtd/ubi/debug.h12
-rw-r--r--drivers/mtd/ubi/eba.c54
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c362
-rw-r--r--drivers/mtd/ubi/fastmap.c443
-rw-r--r--drivers/mtd/ubi/io.c6
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/ubi-media.h2
-rw-r--r--drivers/mtd/ubi/ubi.h85
-rw-r--r--drivers/mtd/ubi/wl.c587
-rw-r--r--drivers/mtd/ubi/wl.h28
-rw-r--r--drivers/net/arcnet/arcnet.c55
-rw-r--r--drivers/net/bonding/bond_3ad.c101
-rw-r--r--drivers/net/bonding/bond_main.c52
-rw-r--r--drivers/net/bonding/bond_procfs.c44
-rw-r--r--drivers/net/bonding/bonding_priv.h25
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c258
-rw-r--r--drivers/net/can/cc770/cc770_platform.c2
-rw-r--r--drivers/net/can/grcan.c2
-rw-r--r--drivers/net/can/led.c22
-rw-r--r--drivers/net/can/m_can/m_can.c4
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c11
-rw-r--r--drivers/net/can/usb/esd_usb2.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c2
-rw-r--r--drivers/net/can/xilinx_can.c9
-rw-r--r--drivers/net/dsa/Kconfig13
-rw-r--r--drivers/net/dsa/bcm_sf2.c155
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h15
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c199
-rw-r--r--drivers/net/dsa/mv88e6131.c182
-rw-r--r--drivers/net/dsa/mv88e6171.c202
-rw-r--r--drivers/net/dsa/mv88e6352.c307
-rw-r--r--drivers/net/dsa/mv88e6xxx.c989
-rw-r--r--drivers/net/dsa/mv88e6xxx.h245
-rw-r--r--drivers/net/ethernet/8390/etherh.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c16
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h6
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c76
-rw-r--r--drivers/net/ethernet/amd/Kconfig3
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c4
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c81
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c68
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c29
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c22
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c13
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h17
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c189
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h23
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c32
-rw-r--r--drivers/net/ethernet/apple/mace.c48
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/arc/Kconfig5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_hw.h2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig10
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c498
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2_fw.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h141
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c125
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c6
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1106
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h50
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c45
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/brocade/Kconfig8
-rw-r--r--drivers/net/ethernet/brocade/Makefile2
-rw-r--r--drivers/net/ethernet/brocade/bna/Kconfig12
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h9
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c9
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h9
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c11
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c7
-rw-r--r--drivers/net/ethernet/cadence/Kconfig12
-rw-r--r--drivers/net/ethernet/cadence/Makefile1
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c481
-rw-r--r--drivers/net/ethernet/cadence/macb.c762
-rw-r--r--drivers/net/ethernet/cadence/macb.h40
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c20
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h60
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c915
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c122
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1772
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c154
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c736
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c203
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h23
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1205
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c17
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c204
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h32
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c23
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c98
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c18
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c20
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c776
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c81
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c25
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c95
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c53
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c76
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c52
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c54
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c63
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c33
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c17
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c354
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c93
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c519
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c936
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c98
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c489
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c647
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c59
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h50
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c463
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h42
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c183
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c44
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c77
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h128
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c83
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h103
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c21
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h53
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c394
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h123
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c43
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h83
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c122
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c293
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c95
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h285
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h84
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c236
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h98
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h105
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c189
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h17
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c106
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c343
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c281
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c121
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h145
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h107
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c38
-rw-r--r--drivers/net/ethernet/neterion/s2io.c10
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c19
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h19
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c259
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h23
-rw-r--r--drivers/net/ethernet/rocker/rocker.c860
-rw-r--r--drivers/net/ethernet/rocker/rocker.h32
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c12
-rw-r--r--drivers/net/ethernet/sfc/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c22
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/siena.c3
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c46
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c12
-rw-r--r--drivers/net/ethernet/sun/sungem.c18
-rw-r--r--drivers/net/ethernet/sun/sunhme.c16
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c8
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpts.c19
-rw-r--r--drivers/net/ethernet/ti/netcp.h5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c27
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c1589
-rw-r--r--drivers/net/ethernet/tile/tilegx.c13
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c4
-rw-r--r--drivers/net/ethernet/via/via-rhine.c31
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/hamradio/6pack.c31
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/hamradio/bpqether.c3
-rw-r--r--drivers/net/hamradio/dmascc.c3
-rw-r--r--drivers/net/hamradio/hdlcdrv.c3
-rw-r--r--drivers/net/hamradio/mkiss.c44
-rw-r--r--drivers/net/hamradio/scc.c3
-rw-r--r--drivers/net/hamradio/yam.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h44
-rw-r--r--drivers/net/hyperv/netvsc.c220
-rw-r--r--drivers/net/hyperv/netvsc_drv.c125
-rw-r--r--drivers/net/hyperv/rndis_filter.c19
-rw-r--r--drivers/net/ieee802154/at86rf230.c726
-rw-r--r--drivers/net/ieee802154/cc2520.c150
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c10
-rw-r--r--drivers/net/macvlan.c25
-rw-r--r--drivers/net/macvtap.c13
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c98
-rw-r--r--drivers/net/phy/at803x.c11
-rw-r--r--drivers/net/phy/bcm7xxx.c1
-rw-r--r--drivers/net/phy/broadcom.c14
-rw-r--r--drivers/net/phy/dp83640.c21
-rw-r--r--drivers/net/phy/fixed_phy.c29
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/phy/mdio-gpio.c21
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c62
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/phy/mdio-octeon.c2
-rw-r--r--drivers/net/phy/micrel.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c1
-rw-r--r--drivers/net/ppp/ppp_mppe.c36
-rw-r--r--drivers/net/ppp/pppoe.c32
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/sb1000.c15
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cx82310_eth.c2
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/lg-vl600.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c132
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/veth.c15
-rw-r--r--drivers/net/virtio_net.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c29
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h5
-rw-r--r--drivers/net/vxlan.c315
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wireless/airo.c8
-rw-r--r--drivers/net/wireless/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c9
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.h1
-rw-r--r--drivers/net/wireless/ath/ath.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c101
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c171
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c132
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c33
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c24
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c77
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c599
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.h61
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c178
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h61
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c103
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c142
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c54
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h33
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h279
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_aic.h168
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_mci.h310
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_wow.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c20
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c68
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c37
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c70
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c202
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c417
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h42
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h23
-rw-r--r--drivers/net/wireless/atmel.c16
-rw-r--r--drivers/net/wireless/b43/dma.c12
-rw-r--r--drivers/net/wireless/b43/main.c14
-rw-r--r--drivers/net/wireless/b43legacy/dma.c6
-rw-r--r--drivers/net/wireless/b43legacy/main.c8
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c174
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c310
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c308
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.h47
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac.h102
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h88
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h110
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h225
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c11
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/chipcommon.h9
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c11
-rw-r--r--drivers/net/wireless/cw1200/sta.c6
-rw-r--r--drivers/net/wireless/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h33
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c8
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c6
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c15
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace-data.h79
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace-io.h155
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h202
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace-msg.h97
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace-ucode.h81
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h438
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c132
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h244
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c410
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h72
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c226
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c75
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c32
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c63
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h47
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h249
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h64
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h147
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c206
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c41
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c538
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h207
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c327
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c91
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c289
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c155
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c673
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c67
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c101
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c281
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c27
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c236
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c63
-rw-r--r--drivers/net/wireless/libertas/cfg.c6
-rw-r--r--drivers/net/wireless/libertas/debugfs.c3
-rw-r--r--drivers/net/wireless/libertas/main.c4
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c109
-rw-r--r--drivers/net/wireless/mwifiex/11n.c18
-rw-r--r--drivers/net/wireless/mwifiex/11n.h32
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c16
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c202
-rw-r--r--drivers/net/wireless/mwifiex/decl.h11
-rw-r--r--drivers/net/wireless/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/mwifiex/init.c31
-rw-r--r--drivers/net/wireless/mwifiex/main.c98
-rw-r--r--drivers/net/wireless/mwifiex/main.h34
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c33
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h6
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c233
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h74
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c61
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c21
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c6
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c136
-rw-r--r--drivers/net/wireless/mwifiex/usb.c6
-rw-r--r--drivers/net/wireless/mwifiex/util.c30
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c50
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h2
-rw-r--r--drivers/net/wireless/mwl8k.c2
-rw-r--r--drivers/net/wireless/orinoco/Kconfig2
-rw-r--r--drivers/net/wireless/orinoco/airport.c2
-rw-r--r--drivers/net/wireless/orinoco/wext.c2
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/p54/main.c8
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c28
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c7
-rw-r--r--drivers/net/wireless/rtlwifi/base.h10
-rw-r--r--drivers/net/wireless/rtlwifi/cam.h2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c6
-rw-r--r--drivers/net/wireless/rtlwifi/core.h2
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h6
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/def.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/rf.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/def.h39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/rf.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/def.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/rf.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/rf.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/def.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/rf.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/trx.c16
-rw-r--r--drivers/net/wireless/rtlwifi/stats.c24
-rw-r--r--drivers/net/wireless/rtlwifi/stats.h1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c13
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c25
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c6
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c63
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c31
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c63
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h5
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h6
-rw-r--r--drivers/net/xen-netback/common.h4
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netback/netback.c8
-rw-r--r--drivers/net/xen-netback/xenbus.c57
-rw-r--r--drivers/net/xen-netfront.c11
-rw-r--r--drivers/nfc/Kconfig1
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/microread/i2c.c2
-rw-r--r--drivers/nfc/nfcmrvl/main.c4
-rw-r--r--drivers/nfc/nfcmrvl/usb.c18
-rw-r--r--drivers/nfc/nxp-nci/Kconfig25
-rw-r--r--drivers/nfc/nxp-nci/Makefile11
-rw-r--r--drivers/nfc/nxp-nci/core.c186
-rw-r--r--drivers/nfc/nxp-nci/firmware.c325
-rw-r--r--drivers/nfc/nxp-nci/i2c.c415
-rw-r--r--drivers/nfc/nxp-nci/nxp-nci.h89
-rw-r--r--drivers/nfc/pn533.c6
-rw-r--r--drivers/nfc/pn544/i2c.c7
-rw-r--r--drivers/nfc/port100.c36
-rw-r--r--drivers/nfc/st21nfca/st21nfca.c4
-rw-r--r--drivers/nfc/st21nfca/st21nfca_se.c11
-rw-r--r--drivers/nfc/st21nfcb/i2c.c9
-rw-r--r--drivers/nfc/st21nfcb/ndlc.c5
-rw-r--r--drivers/nfc/st21nfcb/st21nfcb_se.c16
-rw-r--r--drivers/of/Kconfig13
-rw-r--r--drivers/of/Makefile5
-rw-r--r--drivers/of/base.c95
-rw-r--r--drivers/of/fdt.c24
-rw-r--r--drivers/of/of_mdio.c3
-rw-r--r--drivers/of/of_net.c29
-rw-r--r--drivers/of/unittest-data/.gitignore2
-rw-r--r--drivers/of/unittest-data/Makefile7
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi108
-rw-r--r--drivers/of/unittest.c806
-rw-r--r--drivers/oprofile/buffer_sync.c30
-rw-r--r--drivers/oprofile/oprofilefs.c16
-rw-r--r--drivers/parisc/ccio-dma.c60
-rw-r--r--drivers/parisc/eisa_enumerator.c2
-rw-r--r--drivers/parisc/iommu-helpers.h26
-rw-r--r--drivers/parisc/sba_iommu.c93
-rw-r--r--drivers/parisc/superio.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/pci/iov.c155
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/setup-bus.c95
-rw-r--r--drivers/pci/xen-pcifront.c5
-rw-r--r--drivers/pcmcia/Kconfig1
-rw-r--r--drivers/pcmcia/at91_cf.c13
-rw-r--r--drivers/pcmcia/omap_cf.c4
-rw-r--r--drivers/pcmcia/pd6729.c8
-rw-r--r--drivers/pcmcia/soc_common.c5
-rw-r--r--drivers/pcmcia/yenta_socket.c8
-rw-r--r--drivers/pinctrl/Kconfig36
-rw-r--r--drivers/pinctrl/Makefile6
-rw-r--r--drivers/pinctrl/bcm/Kconfig56
-rw-r--r--drivers/pinctrl/bcm/Makefile6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c (renamed from drivers/pinctrl/pinctrl-bcm281xx.c)4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c (renamed from drivers/pinctrl/pinctrl-bcm2835.c)28
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c907
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c1022
-rw-r--r--drivers/pinctrl/core.c10
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/devicetree.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c15
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c2
-rw-r--r--drivers/pinctrl/intel/Kconfig17
-rw-r--r--drivers/pinctrl/intel/Makefile2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c1149
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h128
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c336
-rw-r--r--drivers/pinctrl/mediatek/Kconfig26
-rw-r--r--drivers/pinctrl/mediatek/Makefile6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8135.c376
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8173.c455
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c1259
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.h229
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8135.h2114
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h1226
-rw-r--r--drivers/pinctrl/meson/Makefile2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c1050
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c899
-rw-r--r--drivers/pinctrl/mvebu/Kconfig4
-rw-r--r--drivers/pinctrl/mvebu/Makefile1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-375.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-38x.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-39x.c432
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c42
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c2
-rw-r--r--drivers/pinctrl/nomadik/Kconfig2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/pinconf-generic.c54
-rw-r--r--drivers/pinctrl/pinctrl-amd.c869
-rw-r--r--drivers/pinctrl/pinctrl-amd.h261
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91.c60
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.h2
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c4
-rw-r--r--drivers/pinctrl/pinctrl-st.c85
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c28
-rw-r--r--drivers/pinctrl/pinctrl-tegra.h43
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c17
-rw-r--r--drivers/pinctrl/pinctrl-tegra124.c32
-rw-r--r--drivers/pinctrl/pinctrl-tegra20.c3
-rw-r--r--drivers/pinctrl/pinctrl-tegra210.c1588
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c116
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c2
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c17
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c15
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c154
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c79
-rw-r--r--drivers/pinctrl/sh-pfc/core.h7
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c31
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c91
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h37
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c26
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-vt8500.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8505.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8650.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8750.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8850.c2
-rw-r--r--drivers/platform/Kconfig3
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/chrome/Kconfig26
-rw-r--r--drivers/platform/chrome/Makefile3
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c35
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c274
-rw-r--r--drivers/platform/chrome/cros_ec_dev.h53
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c367
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c319
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c271
-rw-r--r--drivers/platform/mips/Kconfig30
-rw-r--r--drivers/platform/mips/Makefile2
-rw-r--r--drivers/platform/mips/acpi_init.c150
-rw-r--r--drivers/platform/mips/cpu_hwmon.c207
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/apple-gmux.c62
-rw-r--r--drivers/platform/x86/dell-laptop.c1089
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c6
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel_oaktrail.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c322
-rw-r--r--drivers/platform/x86/toshiba_acpi.c256
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c133
-rw-r--r--drivers/platform/x86/wmi.c5
-rw-r--r--drivers/pnp/base.h2
-rw-r--r--drivers/pnp/card.c25
-rw-r--r--drivers/pnp/core.c64
-rw-r--r--drivers/pnp/driver.c10
-rw-r--r--drivers/pnp/pnpacpi/core.c5
-rw-r--r--drivers/power/axp288_fuel_gauge.c1
-rw-r--r--drivers/power/bq27x00_battery.c8
-rw-r--r--drivers/power/collie_battery.c2
-rw-r--r--drivers/power/reset/Kconfig1
-rw-r--r--drivers/power/reset/at91-reset.c4
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c18
-rw-r--r--drivers/powercap/intel_rapl.c4
-rw-r--r--drivers/ps3/ps3-lpm.c4
-rw-r--r--drivers/ptp/ptp_chardev.c8
-rw-r--r--drivers/ptp/ptp_clock.c12
-rw-r--r--drivers/ptp/ptp_ixp46x.c8
-rw-r--r--drivers/ptp/ptp_pch.c8
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c4
-rw-r--r--drivers/pwm/pwm-img.c76
-rw-r--r--drivers/pwm/pwm-mxs.c8
-rw-r--r--drivers/pwm/pwm-pca9685.c2
-rw-r--r--drivers/pwm/pwm-samsung.c32
-rw-r--r--drivers/regulator/arizona-micsupp.c1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c1
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c15
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c1
-rw-r--r--drivers/rtc/Kconfig26
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/class.c6
-rw-r--r--drivers/rtc/hctosys.c6
-rw-r--r--drivers/rtc/interface.c5
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c2
-rw-r--r--drivers/rtc/rtc-abx80x.c307
-rw-r--r--drivers/rtc/rtc-armada38x.c26
-rw-r--r--drivers/rtc/rtc-at91rm9200.c4
-rw-r--r--drivers/rtc/rtc-cmos.c42
-rw-r--r--drivers/rtc/rtc-da9052.c97
-rw-r--r--drivers/rtc/rtc-digicolor.c227
-rw-r--r--drivers/rtc/rtc-ds1305.c6
-rw-r--r--drivers/rtc/rtc-ds1374.c8
-rw-r--r--drivers/rtc/rtc-ds1685.c9
-rw-r--r--drivers/rtc/rtc-ds3232.c6
-rw-r--r--drivers/rtc/rtc-efi-platform.c3
-rw-r--r--drivers/rtc/rtc-em3027.c11
-rw-r--r--drivers/rtc/rtc-hym8563.c12
-rw-r--r--drivers/rtc/rtc-m41t80.c6
-rw-r--r--drivers/rtc/rtc-max77686.c6
-rw-r--r--drivers/rtc/rtc-max8997.c8
-rw-r--r--drivers/rtc/rtc-mrst.c16
-rw-r--r--drivers/rtc/rtc-msm6242.c4
-rw-r--r--drivers/rtc/rtc-omap.c68
-rw-r--r--drivers/rtc/rtc-opal.c3
-rw-r--r--drivers/rtc/rtc-pcf8563.c7
-rw-r--r--drivers/rtc/rtc-s3c.c193
-rw-r--r--drivers/rtc/rtc-s5m.c94
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c66
-rw-r--r--drivers/rtc/rtc-tegra.c4
-rw-r--r--drivers/rtc/rtc-twl.c9
-rw-r--r--drivers/rtc/rtc-x1205.c4
-rw-r--r--drivers/s390/block/dasd.c55
-rw-r--r--drivers/s390/block/dasd_diag.h42
-rw-r--r--drivers/s390/block/dasd_eckd.c9
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c48
-rw-r--r--drivers/s390/char/sclp_sdias.c4
-rw-r--r--drivers/s390/char/zcore.c32
-rw-r--r--drivers/s390/cio/blacklist.c12
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/qdio.h7
-rw-r--r--drivers/s390/cio/qdio_setup.c3
-rw-r--r--drivers/s390/crypto/ap_bus.c24
-rw-r--r--drivers/s390/kvm/virtio_ccw.c10
-rw-r--r--drivers/s390/net/Kconfig13
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/claw.c3377
-rw-r--r--drivers/s390/net/claw.h348
-rw-r--r--drivers/s390/net/ctcm_mpc.c12
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/sbus/char/bbc_envctrl.c3
-rw-r--r--drivers/sbus/char/envctrl.c7
-rw-r--r--drivers/scsi/3w-9xxx.c57
-rw-r--r--drivers/scsi/3w-9xxx.h5
-rw-r--r--drivers/scsi/3w-sas.c50
-rw-r--r--drivers/scsi/3w-sas.h4
-rw-r--r--drivers/scsi/3w-xxxx.c42
-rw-r--r--drivers/scsi/3w-xxxx.h5
-rw-r--r--drivers/scsi/NCR5380.c10
-rw-r--r--drivers/scsi/aacraid/aachba.c410
-rw-r--r--drivers/scsi/aacraid/aacraid.h106
-rw-r--r--drivers/scsi/aacraid/commctrl.c10
-rw-r--r--drivers/scsi/aacraid/comminit.c106
-rw-r--r--drivers/scsi/aacraid/commsup.c96
-rw-r--r--drivers/scsi/aacraid/dpcsup.c13
-rw-r--r--drivers/scsi/aacraid/linit.c61
-rw-r--r--drivers/scsi/aacraid/rx.c14
-rw-r--r--drivers/scsi/aacraid/src.c438
-rw-r--r--drivers/scsi/aha1542.c1700
-rw-r--r--drivers/scsi/aha1542.h136
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c10
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c3
-rw-r--r--drivers/scsi/am53c974.c6
-rw-r--r--drivers/scsi/atari_NCR5380.c2
-rw-r--r--drivers/scsi/atari_scsi.c1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/bfa/bfad.c22
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/g_NCR5380.c8
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/ipr.c319
-rw-r--r--drivers/scsi/ipr.h15
-rw-r--r--drivers/scsi/lpfc/lpfc.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c738
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c74
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h208
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c264
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c83
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c72
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/mac53c94.c10
-rw-r--r--drivers/scsi/mac_scsi.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c9
-rw-r--r--drivers/scsi/mesh.c14
-rw-r--r--drivers/scsi/mvsas/mv_sas.c5
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/qla2xxx/Kconfig3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h16
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c79
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c31
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c174
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/scsi.c20
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_scan.c13
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/sd.c64
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c15
-rw-r--r--drivers/scsi/storvsc_drv.c232
-rw-r--r--drivers/scsi/sun3_scsi.c1
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c36
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h26
-rw-r--r--drivers/scsi/ufs/ufshcd.c35
-rw-r--r--drivers/scsi/ufs/ufshcd.h9
-rw-r--r--drivers/scsi/xen-scsifront.c219
-rw-r--r--drivers/sh/pm_runtime.c7
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/mediatek/Kconfig11
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c975
-rw-r--r--drivers/soc/qcom/Kconfig1
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c152
-rw-r--r--drivers/spi/Kconfig3
-rw-r--r--drivers/spi/spi-bcm2835.c5
-rw-r--r--drivers/spi/spi-bitbang.c17
-rw-r--r--drivers/spi/spi-fsl-cpm.c40
-rw-r--r--drivers/spi/spi-fsl-espi.c45
-rw-r--r--drivers/spi/spi-omap2-mcspi.c16
-rw-r--r--drivers/spi/spi-rspi.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/spmi/Kconfig1
-rw-r--r--drivers/spmi/spmi-pmic-arb.c319
-rw-r--r--drivers/spmi/spmi.c9
-rw-r--r--drivers/ssb/Kconfig1
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c2
-rw-r--r--drivers/ssb/driver_mipscore.c2
-rw-r--r--drivers/ssb/driver_pcicore.c9
-rw-r--r--drivers/ssb/main.c2
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/android/ion/ion.c9
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c18
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c16
-rw-r--r--drivers/staging/lustre/lustre/Kconfig1
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c28
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c8
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c39
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c51
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c49
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c83
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c59
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c37
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.h4
-rw-r--r--drivers/staging/media/mn88472/mn88472.c93
-rw-r--r--drivers/staging/media/mn88472/mn88472_priv.h2
-rw-r--r--drivers/staging/media/mn88473/mn88473.c133
-rw-r--r--drivers/staging/media/mn88473/mn88473_priv.h1
-rw-r--r--drivers/staging/media/omap4iss/Kconfig1
-rw-r--r--drivers/staging/media/omap4iss/iss.c11
-rw-r--r--drivers/staging/media/omap4iss/iss.h4
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c42
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.c12
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c48
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c58
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c52
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c8
-rw-r--r--drivers/staging/octeon/ethernet-tx.c5
-rw-r--r--drivers/staging/octeon/ethernet.c10
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c17
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c6
-rw-r--r--drivers/staging/sm750fb/sm750.c2
-rw-r--r--drivers/staging/unisys/include/timskmod.h1
-rw-r--r--drivers/staging/vt6655/card.c10
-rw-r--r--drivers/staging/vt6655/card.h2
-rw-r--r--drivers/staging/vt6655/device_main.c42
-rw-r--r--drivers/staging/vt6656/rxtx.c14
-rw-r--r--drivers/target/Kconfig5
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/iscsi/Makefile1
-rw-r--r--drivers/target/iscsi/iscsi_target.c131
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c208
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c60
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c25
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c495
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h84
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1
-rw-r--r--drivers/target/loopback/tcm_loop.c242
-rw-r--r--drivers/target/loopback/tcm_loop.h1
-rw-r--r--drivers/target/sbp/sbp_target.c68
-rw-r--r--drivers/target/target_core_configfs.c192
-rw-r--r--drivers/target/target_core_fabric_configfs.c38
-rw-r--r--drivers/target/target_core_file.c261
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_internal.h6
-rw-r--r--drivers/target/target_core_pr.c48
-rw-r--r--drivers/target/target_core_rd.c137
-rw-r--r--drivers/target/target_core_sbc.c109
-rw-r--r--drivers/target/target_core_spc.c16
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_tpg.c2
-rw-r--r--drivers/target/target_core_transport.c162
-rw-r--r--drivers/target/target_core_user.c52
-rw-r--r--drivers/target/target_core_xcopy.c46
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c89
-rw-r--r--drivers/thermal/intel_powerclamp.c89
-rw-r--r--drivers/thermal/rockchip_thermal.c2
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/tty/Kconfig47
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/goldfish.c2
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c18
-rw-r--r--drivers/tty/ipwireless/hardware.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c1303
-rw-r--r--drivers/tty/n_gsm.c17
-rw-r--r--drivers/tty/n_hdlc.c4
-rw-r--r--drivers/tty/n_tty.c22
-rw-r--r--drivers/tty/pty.c5
-rw-r--r--drivers/tty/serial/8250/8250.h23
-rw-r--r--drivers/tty/serial/8250/8250_core.c513
-rw-r--r--drivers/tty/serial/8250/8250_dw.c58
-rw-r--r--drivers/tty/serial/8250/8250_early.c74
-rw-r--r--drivers/tty/serial/8250/8250_em.c1
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c13
-rw-r--r--drivers/tty/serial/8250/8250_hp300.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c1
-rw-r--r--drivers/tty/serial/8250/8250_pci.c439
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig32
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c235
-rw-r--r--drivers/tty/serial/apbuart.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c31
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c4
-rw-r--r--drivers/tty/serial/bfin_uart.c2
-rw-r--r--drivers/tty/serial/clps711x.c2
-rw-r--r--drivers/tty/serial/cpm_uart/Makefile2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/tty/serial/earlycon.c135
-rw-r--r--drivers/tty/serial/fsl_lpuart.c2
-rw-r--r--drivers/tty/serial/imx.c317
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c2
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c6
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/mfd.c1505
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c2
-rw-r--r--drivers/tty/serial/msm_serial.h9
-rw-r--r--drivers/tty/serial/msm_serial_hs.c1874
-rw-r--r--drivers/tty/serial/mxs-auart.c18
-rw-r--r--drivers/tty/serial/of_serial.c9
-rw-r--r--drivers/tty/serial/omap-serial.c12
-rw-r--r--drivers/tty/serial/pmac_zilog.c2
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/samsung.c5
-rw-r--r--drivers/tty/serial/sc16is7xx.c46
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_core.c54
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c50
-rw-r--r--drivers/tty/serial/sh-sci.c88
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c2
-rw-r--r--drivers/tty/serial/sprd_serial.c6
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/uartlite.c13
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c254
-rw-r--r--drivers/tty/tty_buffer.c41
-rw-r--r--drivers/tty/tty_io.c24
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/tty/vt/vt.c74
-rw-r--r--drivers/tty/vt/vt_ioctl.c2
-rw-r--r--drivers/uio/uio.c12
-rw-r--r--drivers/usb/chipidea/debug.c6
-rw-r--r--drivers/usb/chipidea/otg_fsm.c4
-rw-r--r--drivers/usb/class/cdc-acm.c7
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c94
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c8
-rw-r--r--drivers/usb/gadget/function/f_hid.c16
-rw-r--r--drivers/usb/gadget/function/f_uvc.c40
-rw-r--r--drivers/usb/gadget/function/u_serial.c5
-rw-r--r--drivers/usb/gadget/function/uvc.h3
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c79
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h4
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c8
-rw-r--r--drivers/usb/gadget/function/uvc_video.c3
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c10
-rw-r--r--drivers/usb/gadget/legacy/audio.c10
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c10
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c4
-rw-r--r--drivers/usb/gadget/legacy/ether.c12
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c2
-rw-r--r--drivers/usb/gadget/legacy/gmidi.c10
-rw-r--r--drivers/usb/gadget/legacy/hid.c12
-rw-r--r--drivers/usb/gadget/legacy/inode.c10
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c6
-rw-r--r--drivers/usb/gadget/legacy/multi.c10
-rw-r--r--drivers/usb/gadget/legacy/ncm.c10
-rw-r--r--drivers/usb/gadget/legacy/nokia.c10
-rw-r--r--drivers/usb/gadget/legacy/printer.c8
-rw-r--r--drivers/usb/gadget/legacy/serial.c4
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c59
-rw-r--r--drivers/usb/gadget/legacy/webcam.c8
-rw-r--r--drivers/usb/gadget/legacy/zero.c4
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c4
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c4
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c4
-rw-r--r--drivers/usb/host/ehci-msm.c13
-rw-r--r--drivers/usb/host/xhci-ring.c7
-rw-r--r--drivers/usb/host/xhci-trace.h7
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/misc/ldusb.c10
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/visor.c2
-rw-r--r--drivers/usb/storage/uas-detect.h11
-rw-r--r--drivers/usb/storage/uas.c16
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/usb.c8
-rw-r--r--drivers/vfio/Kconfig6
-rw-r--r--drivers/vfio/Makefile4
-rw-r--r--drivers/vfio/pci/Kconfig1
-rw-r--r--drivers/vfio/pci/vfio_pci.c196
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c238
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h3
-rw-r--r--drivers/vfio/platform/Kconfig20
-rw-r--r--drivers/vfio/platform/Makefile8
-rw-r--r--drivers/vfio/platform/vfio_amba.c115
-rw-r--r--drivers/vfio/platform/vfio_platform.c103
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c521
-rw-r--r--drivers/vfio/platform/vfio_platform_irq.c336
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h85
-rw-r--r--drivers/vfio/vfio.c34
-rw-r--r--drivers/vfio/virqfd.c226
-rw-r--r--drivers/vhost/net.c10
-rw-r--r--drivers/vhost/scsi.c126
-rw-r--r--drivers/video/backlight/88pm860x_bl.c5
-rw-r--r--drivers/video/backlight/Kconfig10
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/sky81452-backlight.c353
-rw-r--r--drivers/video/fbdev/Kconfig4
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c4
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c24
-rw-r--r--drivers/video/fbdev/aty/radeon_monitor.c20
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c16
-rw-r--r--drivers/video/fbdev/aty/radeonfb.h4
-rw-r--r--drivers/video/fbdev/controlfb.c2
-rw-r--r--drivers/video/fbdev/core/fbmon.c4
-rw-r--r--drivers/video/fbdev/hyperv_fb.c6
-rw-r--r--drivers/video/fbdev/imsttfb.c6
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/nvidia/Makefile3
-rw-r--r--drivers/video/fbdev/nvidia/nv_of.c3
-rw-r--r--drivers/video/fbdev/nvidia/nv_proto.h8
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c4
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-dvi.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c11
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/core.c4
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.c147
-rw-r--r--drivers/video/fbdev/omap2/dss/display.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/dsi.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.c3
-rw-r--r--drivers/video/fbdev/omap2/dss/dss_features.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5_core.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/omapdss-boot-init.c7
-rw-r--r--drivers/video/fbdev/omap2/dss/rfbi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c4
-rw-r--r--drivers/video/fbdev/platinumfb.c2
-rw-r--r--drivers/video/fbdev/pm3fb.c6
-rw-r--r--drivers/video/fbdev/pxafb.c6
-rw-r--r--drivers/video/fbdev/riva/fbdev.c17
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c15
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/via/via_clock.c2
-rw-r--r--drivers/virtio/Kconfig10
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_input.c384
-rw-r--r--drivers/virtio/virtio_mmio.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c123
-rw-r--r--drivers/w1/masters/mxc_w1.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/masters/w1-gpio.c2
-rw-r--r--drivers/watchdog/Kconfig4
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c61
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c27
-rw-r--r--drivers/watchdog/diag288_wdt.c20
-rw-r--r--drivers/watchdog/iTCO_wdt.c51
-rw-r--r--drivers/watchdog/octeon-wdt-main.c201
-rw-r--r--drivers/watchdog/pnx4008_wdt.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c21
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c4
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/events/events_2l.c10
-rw-r--r--drivers/xen/events/events_base.c7
-rw-r--r--drivers/xen/gntdev.c28
-rw-r--r--drivers/xen/grant-table.c28
-rw-r--r--drivers/xen/manage.c9
-rw-r--r--drivers/xen/mcelog.c25
-rw-r--r--drivers/xen/pci.c15
-rw-r--r--drivers/xen/pcpu.c44
-rw-r--r--drivers/xen/privcmd.c117
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/tmem.c16
-rw-r--r--drivers/xen/xen-balloon.c45
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xen-pciback/conf_space.h2
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c17
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c4
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xen-scsiback.c164
-rw-r--r--drivers/xen/xenbus/xenbus_client.c387
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c29
-rw-r--r--drivers/xen/xlate_mmu.c143
-rw-r--r--firmware/ihex2fw.c1
-rw-r--r--fs/9p/acl.c4
-rw-r--r--fs/9p/v9fs.h1
-rw-r--r--fs/9p/v9fs_vfs.h4
-rw-r--r--fs/9p/vfs_addr.c87
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_dir.c17
-rw-r--r--fs/9p/vfs_file.c326
-rw-r--r--fs/9p/vfs_inode.c34
-rw-r--r--fs/9p/vfs_inode_dotl.c16
-rw-r--r--fs/9p/vfs_super.c8
-rw-r--r--fs/9p/xattr.c80
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/Kconfig.binfmt3
-rw-r--r--fs/Makefile1
-rw-r--r--fs/adfs/dir_fplus.c1
-rw-r--r--fs/adfs/file.c2
-rw-r--r--fs/adfs/inode.c2
-rw-r--r--fs/adfs/super.c20
-rw-r--r--fs/affs/affs.h28
-rw-r--r--fs/affs/amigaffs.c10
-rw-r--r--fs/affs/file.c15
-rw-r--r--fs/affs/inode.c34
-rw-r--r--fs/affs/namei.c16
-rw-r--r--fs/affs/super.c43
-rw-r--r--fs/afs/dir.c42
-rw-r--r--fs/afs/file.c2
-rw-r--r--fs/afs/inode.c4
-rw-r--r--fs/afs/misc.c16
-rw-r--r--fs/afs/mntpt.c8
-rw-r--r--fs/afs/rxrpc.c5
-rw-r--r--fs/afs/super.c2
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/aio.c258
-rw-r--r--fs/autofs4/autofs_i.h6
-rw-r--r--fs/autofs4/expire.c2
-rw-r--r--fs/autofs4/inode.c6
-rw-r--r--fs/autofs4/root.c18
-rw-r--r--fs/autofs4/symlink.c2
-rw-r--r--fs/autofs4/waitq.c6
-rw-r--r--fs/befs/befs.h22
-rw-r--r--fs/befs/datastream.c4
-rw-r--r--fs/befs/io.c2
-rw-r--r--fs/befs/linuxvfs.c16
-rw-r--r--fs/befs/super.c4
-rw-r--r--fs/bfs/dir.c12
-rw-r--r--fs/bfs/file.c2
-rw-r--r--fs/bfs/inode.c1
-rw-r--r--fs/binfmt_elf.c31
-rw-r--r--fs/binfmt_misc.c46
-rw-r--r--fs/block_dev.c27
-rw-r--r--fs/btrfs/async-thread.c4
-rw-r--r--fs/btrfs/async-thread.h2
-rw-r--r--fs/btrfs/backref.c4
-rw-r--r--fs/btrfs/btrfs_inode.h14
-rw-r--r--fs/btrfs/check-integrity.c9
-rw-r--r--fs/btrfs/compression.c4
-rw-r--r--fs/btrfs/compression.h4
-rw-r--r--fs/btrfs/ctree.c62
-rw-r--r--fs/btrfs/ctree.h46
-rw-r--r--fs/btrfs/delayed-inode.c11
-rw-r--r--fs/btrfs/delayed-ref.c22
-rw-r--r--fs/btrfs/delayed-ref.h10
-rw-r--r--fs/btrfs/dev-replace.c6
-rw-r--r--fs/btrfs/disk-io.c570
-rw-r--r--fs/btrfs/disk-io.h4
-rw-r--r--fs/btrfs/export.c6
-rw-r--r--fs/btrfs/extent-tree.c561
-rw-r--r--fs/btrfs/extent_io.c78
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file-item.c6
-rw-r--r--fs/btrfs/file.c97
-rw-r--r--fs/btrfs/free-space-cache.c351
-rw-r--r--fs/btrfs/free-space-cache.h9
-rw-r--r--fs/btrfs/inode-map.c2
-rw-r--r--fs/btrfs/inode.c227
-rw-r--r--fs/btrfs/ioctl.c58
-rw-r--r--fs/btrfs/lzo.c2
-rw-r--r--fs/btrfs/math.h6
-rw-r--r--fs/btrfs/ordered-data.c14
-rw-r--r--fs/btrfs/props.c2
-rw-r--r--fs/btrfs/qgroup.c348
-rw-r--r--fs/btrfs/qgroup.h3
-rw-r--r--fs/btrfs/raid56.c16
-rw-r--r--fs/btrfs/relocation.c11
-rw-r--r--fs/btrfs/scrub.c25
-rw-r--r--fs/btrfs/send.c83
-rw-r--r--fs/btrfs/super.c31
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/sysfs.h22
-rw-r--r--fs/btrfs/tests/qgroup-tests.c4
-rw-r--r--fs/btrfs/transaction.c54
-rw-r--r--fs/btrfs/transaction.h12
-rw-r--r--fs/btrfs/tree-log.c396
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c155
-rw-r--r--fs/btrfs/volumes.h3
-rw-r--r--fs/btrfs/xattr.c69
-rw-r--r--fs/btrfs/zlib.c2
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/cachefiles/bind.c10
-rw-r--r--fs/cachefiles/interface.c6
-rw-r--r--fs/cachefiles/namei.c122
-rw-r--r--fs/cachefiles/rdwr.c14
-rw-r--r--fs/cachefiles/security.c6
-rw-r--r--fs/cachefiles/xattr.c22
-rw-r--r--fs/ceph/addr.c41
-rw-r--r--fs/ceph/caps.c53
-rw-r--r--fs/ceph/debugfs.c2
-rw-r--r--fs/ceph/dir.c108
-rw-r--r--fs/ceph/export.c28
-rw-r--r--fs/ceph/file.c31
-rw-r--r--fs/ceph/inode.c52
-rw-r--r--fs/ceph/mds_client.c85
-rw-r--r--fs/ceph/strings.c1
-rw-r--r--fs/ceph/super.c60
-rw-r--r--fs/ceph/super.h4
-rw-r--r--fs/ceph/xattr.c39
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifsfs.c14
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c6
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/file.c141
-rw-r--r--fs/cifs/inode.c32
-rw-r--r--fs/cifs/link.c12
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/readdir.c4
-rw-r--r--fs/cifs/smb1ops.c2
-rw-r--r--fs/cifs/smb2file.c4
-rw-r--r--fs/cifs/smb2misc.c4
-rw-r--r--fs/cifs/smb2ops.c10
-rw-r--r--fs/cifs/xattr.c22
-rw-r--r--fs/coda/cache.c4
-rw-r--r--fs/coda/dir.c22
-rw-r--r--fs/coda/file.c38
-rw-r--r--fs/coda/inode.c6
-rw-r--r--fs/coda/pioctl.c2
-rw-r--r--fs/coda/upcall.c4
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/configfs/dir.c70
-rw-r--r--fs/configfs/file.c4
-rw-r--r--fs/configfs/inode.c16
-rw-r--r--fs/configfs/mount.c2
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/dax.c48
-rw-r--r--fs/dcache.c49
-rw-r--r--fs/debugfs/file.c2
-rw-r--r--fs/debugfs/inode.c58
-rw-r--r--fs/devpts/inode.c16
-rw-r--r--fs/direct-io.c51
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/ecryptfs/dentry.c6
-rw-r--r--fs/ecryptfs/file.c11
-rw-r--r--fs/ecryptfs/inode.c164
-rw-r--r--fs/ecryptfs/kthread.c2
-rw-r--r--fs/ecryptfs/main.c6
-rw-r--r--fs/ecryptfs/mmap.c2
-rw-r--r--fs/efivarfs/inode.c4
-rw-r--r--fs/efivarfs/super.c4
-rw-r--r--fs/efs/namei.c4
-rw-r--r--fs/exec.c91
-rw-r--r--fs/exofs/dir.c4
-rw-r--r--fs/exofs/file.c2
-rw-r--r--fs/exofs/inode.c6
-rw-r--r--fs/exofs/namei.c10
-rw-r--r--fs/exofs/super.c2
-rw-r--r--fs/exofs/symlink.c2
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h1
-rw-r--r--fs/ext2/file.c21
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c20
-rw-r--r--fs/ext2/namei.c24
-rw-r--r--fs/ext2/symlink.c2
-rw-r--r--fs/ext2/xattr.c4
-rw-r--r--fs/ext2/xattr_security.c4
-rw-r--r--fs/ext2/xattr_trusted.c4
-rw-r--r--fs/ext2/xattr_user.c4
-rw-r--r--fs/ext3/file.c2
-rw-r--r--fs/ext3/ialloc.c2
-rw-r--r--fs/ext3/inode.c18
-rw-r--r--fs/ext3/namei.c34
-rw-r--r--fs/ext3/super.c8
-rw-r--r--fs/ext3/symlink.c2
-rw-r--r--fs/ext3/xattr.c13
-rw-r--r--fs/ext3/xattr_security.c4
-rw-r--r--fs/ext3/xattr_trusted.c4
-rw-r--r--fs/ext3/xattr_user.c4
-rw-r--r--fs/ext4/Kconfig22
-rw-r--r--fs/ext4/Makefile4
-rw-r--r--fs/ext4/acl.c5
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/bitmap.c1
-rw-r--r--fs/ext4/block_validity.c1
-rw-r--r--fs/ext4/crypto.c558
-rw-r--r--fs/ext4/crypto_fname.c719
-rw-r--r--fs/ext4/crypto_key.c166
-rw-r--r--fs/ext4/crypto_policy.c198
-rw-r--r--fs/ext4/dir.c81
-rw-r--r--fs/ext4/ext4.h185
-rw-r--r--fs/ext4/ext4_crypto.h156
-rw-r--r--fs/ext4/ext4_jbd2.c6
-rw-r--r--fs/ext4/extents.c106
-rw-r--r--fs/ext4/extents_status.c10
-rw-r--r--fs/ext4/file.c77
-rw-r--r--fs/ext4/fsync.c3
-rw-r--r--fs/ext4/hash.c1
-rw-r--r--fs/ext4/ialloc.c30
-rw-r--r--fs/ext4/indirect.c33
-rw-r--r--fs/ext4/inline.c20
-rw-r--r--fs/ext4/inode.c176
-rw-r--r--fs/ext4/ioctl.c86
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/namei.c621
-rw-r--r--fs/ext4/page-io.c48
-rw-r--r--fs/ext4/readpage.c328
-rw-r--r--fs/ext4/resize.c7
-rw-r--r--fs/ext4/super.c66
-rw-r--r--fs/ext4/symlink.c99
-rw-r--r--fs/ext4/xattr.c14
-rw-r--r--fs/ext4/xattr.h3
-rw-r--r--fs/ext4/xattr_security.c4
-rw-r--r--fs/ext4/xattr_trusted.c4
-rw-r--r--fs/ext4/xattr_user.c4
-rw-r--r--fs/f2fs/Kconfig2
-rw-r--r--fs/f2fs/acl.c14
-rw-r--r--fs/f2fs/checkpoint.c38
-rw-r--r--fs/f2fs/data.c759
-rw-r--r--fs/f2fs/debug.c22
-rw-r--r--fs/f2fs/dir.c93
-rw-r--r--fs/f2fs/f2fs.h175
-rw-r--r--fs/f2fs/file.c70
-rw-r--r--fs/f2fs/gc.c6
-rw-r--r--fs/f2fs/inline.c69
-rw-r--r--fs/f2fs/inode.c25
-rw-r--r--fs/f2fs/namei.c97
-rw-r--r--fs/f2fs/node.c18
-rw-r--r--fs/f2fs/node.h1
-rw-r--r--fs/f2fs/recovery.c76
-rw-r--r--fs/f2fs/segment.c17
-rw-r--r--fs/f2fs/segment.h3
-rw-r--r--fs/f2fs/super.c39
-rw-r--r--fs/f2fs/xattr.c14
-rw-r--r--fs/fat/cache.c2
-rw-r--r--fs/fat/dir.c4
-rw-r--r--fs/fat/fat.h5
-rw-r--r--fs/fat/fatent.c3
-rw-r--r--fs/fat/file.c10
-rw-r--r--fs/fat/inode.c23
-rw-r--r--fs/fat/misc.c4
-rw-r--r--fs/fat/namei_msdos.c10
-rw-r--r--fs/fat/namei_vfat.c16
-rw-r--r--fs/fat/nfs.c4
-rw-r--r--fs/file.c3
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/freevxfs/vxfs_immed.c2
-rw-r--r--fs/fs_pin.c4
-rw-r--r--fs/fuse/control.c6
-rw-r--r--fs/fuse/cuse.c27
-rw-r--r--fs/fuse/dev.c64
-rw-r--r--fs/fuse/dir.c60
-rw-r--r--fs/fuse/file.c151
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/fuse/inode.c4
-rw-r--r--fs/gfs2/acl.c6
-rw-r--r--fs/gfs2/aops.c24
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/dentry.c12
-rw-r--r--fs/gfs2/export.c8
-rw-r--r--fs/gfs2/file.c108
-rw-r--r--fs/gfs2/glock.c47
-rw-r--r--fs/gfs2/incore.h4
-rw-r--r--fs/gfs2/inode.c52
-rw-r--r--fs/gfs2/ops_fstype.c8
-rw-r--r--fs/gfs2/quota.c90
-rw-r--r--fs/gfs2/quota.h8
-rw-r--r--fs/gfs2/rgrp.c20
-rw-r--r--fs/gfs2/rgrp.h3
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/gfs2/xattr.c8
-rw-r--r--fs/hfs/attr.c6
-rw-r--r--fs/hfs/dir.c12
-rw-r--r--fs/hfs/inode.c14
-rw-r--r--fs/hfs/sysdep.c2
-rw-r--r--fs/hfsplus/bfind.c4
-rw-r--r--fs/hfsplus/catalog.c3
-rw-r--r--fs/hfsplus/dir.c16
-rw-r--r--fs/hfsplus/inode.c21
-rw-r--r--fs/hfsplus/ioctl.c14
-rw-r--r--fs/hfsplus/xattr.c90
-rw-r--r--fs/hfsplus/xattr.h22
-rw-r--r--fs/hfsplus/xattr_security.c38
-rw-r--r--fs/hfsplus/xattr_trusted.c37
-rw-r--r--fs/hfsplus/xattr_user.c35
-rw-r--r--fs/hostfs/hostfs.h6
-rw-r--r--fs/hostfs/hostfs_kern.c116
-rw-r--r--fs/hostfs/hostfs_user.c29
-rw-r--r--fs/hpfs/file.c2
-rw-r--r--fs/hpfs/inode.c2
-rw-r--r--fs/hpfs/namei.c8
-rw-r--r--fs/hppfs/hppfs.c20
-rw-r--r--fs/hugetlbfs/inode.c187
-rw-r--r--fs/inode.c20
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/jbd2/recovery.c10
-rw-r--r--fs/jbd2/revoke.c18
-rw-r--r--fs/jbd2/transaction.c25
-rw-r--r--fs/jffs2/dir.c40
-rw-r--r--fs/jffs2/file.c2
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/security.c4
-rw-r--r--fs/jffs2/super.c4
-rw-r--r--fs/jffs2/symlink.c2
-rw-r--r--fs/jffs2/xattr.c5
-rw-r--r--fs/jffs2/xattr_trusted.c4
-rw-r--r--fs/jffs2/xattr_user.c4
-rw-r--r--fs/jfs/file.c4
-rw-r--r--fs/jfs/inode.c10
-rw-r--r--fs/jfs/jfs_metapage.c31
-rw-r--r--fs/jfs/jfs_metapage.h1
-rw-r--r--fs/jfs/namei.c18
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/jfs/symlink.c2
-rw-r--r--fs/jfs/xattr.c12
-rw-r--r--fs/kernfs/dir.c11
-rw-r--r--fs/kernfs/inode.c8
-rw-r--r--fs/libfs.c26
-rw-r--r--fs/lockd/svcsubs.c2
-rw-r--r--fs/locks.c94
-rw-r--r--fs/logfs/dir.c14
-rw-r--r--fs/logfs/file.c4
-rw-r--r--fs/minix/dir.c4
-rw-r--r--fs/minix/file.c4
-rw-r--r--fs/minix/inode.c4
-rw-r--r--fs/minix/namei.c10
-rw-r--r--fs/namei.c202
-rw-r--r--fs/namespace.c148
-rw-r--r--fs/ncpfs/dir.c48
-rw-r--r--fs/ncpfs/file.c88
-rw-r--r--fs/ncpfs/inode.c6
-rw-r--r--fs/ncpfs/ioctl.c8
-rw-r--r--fs/ncpfs/ncplib_kernel.c8
-rw-r--r--fs/ncpfs/ncplib_kernel.h2
-rw-r--r--fs/ncpfs/symlink.c2
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfs/Makefile2
-rw-r--r--fs/nfs/blocklayout/blocklayout.c1
-rw-r--r--fs/nfs/blocklayout/dev.c2
-rw-r--r--fs/nfs/callback.c6
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfs/delegation.c4
-rw-r--r--fs/nfs/dir.c63
-rw-r--r--fs/nfs/direct.c93
-rw-r--r--fs/nfs/file.c21
-rw-r--r--fs/nfs/filelayout/filelayout.c10
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c12
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c2
-rw-r--r--fs/nfs/getroot.c4
-rw-r--r--fs/nfs/inode.c54
-rw-r--r--fs/nfs/namespace.c10
-rw-r--r--fs/nfs/nfs3acl.c2
-rw-r--r--fs/nfs/nfs3proc.c12
-rw-r--r--fs/nfs/nfs42proc.c31
-rw-r--r--fs/nfs/nfs42xdr.c20
-rw-r--r--fs/nfs/nfs4client.c4
-rw-r--r--fs/nfs/nfs4file.c26
-rw-r--r--fs/nfs/nfs4idmap.c (renamed from fs/nfs/idmap.c)2
-rw-r--r--fs/nfs/nfs4idmap.h (renamed from include/linux/nfs_idmap.h)13
-rw-r--r--fs/nfs/nfs4namespace.c4
-rw-r--r--fs/nfs/nfs4proc.c86
-rw-r--r--fs/nfs/nfs4state.c6
-rw-r--r--fs/nfs/nfs4super.c7
-rw-r--r--fs/nfs/nfs4sysctl.c2
-rw-r--r--fs/nfs/nfs4trace.h4
-rw-r--r--fs/nfs/nfs4xdr.c22
-rw-r--r--fs/nfs/nfstrace.c3
-rw-r--r--fs/nfs/objlayout/objio_osd.c4
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c68
-rw-r--r--fs/nfs/pnfs.h28
-rw-r--r--fs/nfs/pnfs_dev.c21
-rw-r--r--fs/nfs/pnfs_nfs.c12
-rw-r--r--fs/nfs/proc.c4
-rw-r--r--fs/nfs/read.c10
-rw-r--r--fs/nfs/super.c10
-rw-r--r--fs/nfs/symlink.c2
-rw-r--r--fs/nfs/unlink.c20
-rw-r--r--fs/nfs/write.c31
-rw-r--r--fs/nfsd/Kconfig3
-rw-r--r--fs/nfsd/blocklayout.c11
-rw-r--r--fs/nfsd/export.c8
-rw-r--r--fs/nfsd/nfs2acl.c8
-rw-r--r--fs/nfsd/nfs3acl.c8
-rw-r--r--fs/nfsd/nfs3proc.c6
-rw-r--r--fs/nfsd/nfs3xdr.c16
-rw-r--r--fs/nfsd/nfs4acl.c54
-rw-r--r--fs/nfsd/nfs4callback.c119
-rw-r--r--fs/nfsd/nfs4proc.c24
-rw-r--r--fs/nfsd/nfs4recover.c22
-rw-r--r--fs/nfsd/nfs4state.c174
-rw-r--r--fs/nfsd/nfs4xdr.c26
-rw-r--r--fs/nfsd/nfsctl.c16
-rw-r--r--fs/nfsd/nfsd.h2
-rw-r--r--fs/nfsd/nfsfh.c20
-rw-r--r--fs/nfsd/nfsfh.h6
-rw-r--r--fs/nfsd/nfsproc.c4
-rw-r--r--fs/nfsd/nfsxdr.c2
-rw-r--r--fs/nfsd/state.h19
-rw-r--r--fs/nfsd/vfs.c62
-rw-r--r--fs/nfsd/xdr4.h6
-rw-r--r--fs/nilfs2/alloc.c5
-rw-r--r--fs/nilfs2/bmap.c48
-rw-r--r--fs/nilfs2/bmap.h13
-rw-r--r--fs/nilfs2/btree.c65
-rw-r--r--fs/nilfs2/cpfile.c58
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/nilfs2/direct.c17
-rw-r--r--fs/nilfs2/file.c2
-rw-r--r--fs/nilfs2/inode.c39
-rw-r--r--fs/nilfs2/mdt.c54
-rw-r--r--fs/nilfs2/mdt.h10
-rw-r--r--fs/nilfs2/namei.c18
-rw-r--r--fs/nilfs2/page.c24
-rw-r--r--fs/nilfs2/segment.c17
-rw-r--r--fs/nilfs2/super.c8
-rw-r--r--fs/nsfs.c4
-rw-r--r--fs/ntfs/Makefile2
-rw-r--r--fs/ntfs/file.c778
-rw-r--r--fs/ntfs/inode.c3
-rw-r--r--fs/ntfs/namei.c4
-rw-r--r--fs/ocfs2/alloc.c48
-rw-r--r--fs/ocfs2/aops.c178
-rw-r--r--fs/ocfs2/aops.h2
-rw-r--r--fs/ocfs2/cluster/masklog.h5
-rw-r--r--fs/ocfs2/dcache.c14
-rw-r--r--fs/ocfs2/dir.c15
-rw-r--r--fs/ocfs2/dir.h2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c13
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/dlmglue.c5
-rw-r--r--fs/ocfs2/export.c4
-rw-r--r--fs/ocfs2/file.c140
-rw-r--r--fs/ocfs2/inode.c6
-rw-r--r--fs/ocfs2/localalloc.c4
-rw-r--r--fs/ocfs2/namei.c20
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/slot_map.c4
-rw-r--r--fs/ocfs2/stack_o2cb.c2
-rw-r--r--fs/ocfs2/stack_user.c8
-rw-r--r--fs/ocfs2/suballoc.c2
-rw-r--r--fs/ocfs2/super.c37
-rw-r--r--fs/ocfs2/xattr.c30
-rw-r--r--fs/omfs/dir.c10
-rw-r--r--fs/omfs/file.c4
-rw-r--r--fs/open.c17
-rw-r--r--fs/pipe.c5
-rw-r--r--fs/pnode.c60
-rw-r--r--fs/pnode.h7
-rw-r--r--fs/posix_acl.c8
-rw-r--r--fs/proc/array.c26
-rw-r--r--fs/proc/base.c114
-rw-r--r--fs/proc/fd.c33
-rw-r--r--fs/proc/generic.c4
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/namespaces.c4
-rw-r--r--fs/proc/proc_net.c2
-rw-r--r--fs/proc/proc_sysctl.c12
-rw-r--r--fs/proc/root.c2
-rw-r--r--fs/proc/self.c2
-rw-r--r--fs/proc/thread_self.c2
-rw-r--r--fs/pstore/inode.c13
-rw-r--r--fs/pstore/ram.c3
-rw-r--r--fs/qnx6/inode.c2
-rw-r--r--fs/quota/dquot.c161
-rw-r--r--fs/quota/quota.c217
-rw-r--r--fs/quota/quota_tree.c7
-rw-r--r--fs/quota/quota_v2.c12
-rw-r--r--fs/quota/quotaio_v2.h6
-rw-r--r--fs/ramfs/file-mmu.c2
-rw-r--r--fs/ramfs/file-nommu.c4
-rw-r--r--fs/read_write.c213
-rw-r--r--fs/reiserfs/dir.c4
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/inode.c12
-rw-r--r--fs/reiserfs/namei.c12
-rw-r--r--fs/reiserfs/reiserfs.h1
-rw-r--r--fs/reiserfs/super.c6
-rw-r--r--fs/reiserfs/xattr.c126
-rw-r--r--fs/reiserfs/xattr.h2
-rw-r--r--fs/reiserfs/xattr_security.c10
-rw-r--r--fs/reiserfs/xattr_trusted.c10
-rw-r--r--fs/reiserfs/xattr_user.c4
-rw-r--r--fs/romfs/mmap-nommu.c1
-rw-r--r--fs/splice.c43
-rw-r--r--fs/squashfs/export.c2
-rw-r--r--fs/squashfs/xattr.c8
-rw-r--r--fs/stat.c6
-rw-r--r--fs/super.c2
-rw-r--r--fs/sysv/dir.c4
-rw-r--r--fs/sysv/file.c4
-rw-r--r--fs/sysv/itree.c2
-rw-r--r--fs/sysv/namei.c10
-rw-r--r--fs/sysv/symlink.c2
-rw-r--r--fs/tracefs/Makefile4
-rw-r--r--fs/tracefs/inode.c650
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/commit.c12
-rw-r--r--fs/ubifs/compress.c22
-rw-r--r--fs/ubifs/debug.c186
-rw-r--r--fs/ubifs/dir.c37
-rw-r--r--fs/ubifs/file.c24
-rw-r--r--fs/ubifs/io.c40
-rw-r--r--fs/ubifs/ioctl.c2
-rw-r--r--fs/ubifs/journal.c21
-rw-r--r--fs/ubifs/log.c4
-rw-r--r--fs/ubifs/lprops.c62
-rw-r--r--fs/ubifs/lpt.c59
-rw-r--r--fs/ubifs/lpt_commit.c34
-rw-r--r--fs/ubifs/master.c6
-rw-r--r--fs/ubifs/orphan.c26
-rw-r--r--fs/ubifs/recovery.c44
-rw-r--r--fs/ubifs/replay.c34
-rw-r--r--fs/ubifs/sb.c30
-rw-r--r--fs/ubifs/scan.c24
-rw-r--r--fs/ubifs/super.c107
-rw-r--r--fs/ubifs/tnc.c20
-rw-r--r--fs/ubifs/tnc_commit.c12
-rw-r--r--fs/ubifs/tnc_misc.c24
-rw-r--r--fs/ubifs/ubifs.h40
-rw-r--r--fs/ubifs/xattr.c28
-rw-r--r--fs/udf/balloc.c20
-rw-r--r--fs/udf/dir.c1
-rw-r--r--fs/udf/directory.c1
-rw-r--r--fs/udf/file.c32
-rw-r--r--fs/udf/inode.c12
-rw-r--r--fs/udf/misc.c1
-rw-r--r--fs/udf/namei.c26
-rw-r--r--fs/udf/partition.c1
-rw-r--r--fs/udf/super.c1
-rw-r--r--fs/udf/symlink.c1
-rw-r--r--fs/udf/truncate.c1
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/file.c2
-rw-r--r--fs/ufs/namei.c10
-rw-r--r--fs/ufs/super.c4
-rw-r--r--fs/ufs/symlink.c2
-rw-r--r--fs/ufs/truncate.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c104
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c150
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c554
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h13
-rw-r--r--fs/xfs/libxfs/xfs_btree.c24
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c8
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h14
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c39
-rw-r--r--fs/xfs/libxfs/xfs_format.h62
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c48
-rw-r--r--fs/xfs/libxfs/xfs_sb.c20
-rw-r--r--fs/xfs/xfs_aops.c283
-rw-r--r--fs/xfs/xfs_attr_inactive.c3
-rw-r--r--fs/xfs/xfs_attr_list.c9
-rw-r--r--fs/xfs/xfs_bmap_util.c164
-rw-r--r--fs/xfs/xfs_bmap_util.h2
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_error.c2
-rw-r--r--fs/xfs/xfs_error.h8
-rw-r--r--fs/xfs/xfs_export.c2
-rw-r--r--fs/xfs/xfs_file.c203
-rw-r--r--fs/xfs/xfs_filestream.c4
-rw-r--r--fs/xfs/xfs_fsops.c20
-rw-r--r--fs/xfs/xfs_icache.c4
-rw-r--r--fs/xfs/xfs_inode.c558
-rw-r--r--fs/xfs/xfs_inode.h49
-rw-r--r--fs/xfs/xfs_ioctl.c25
-rw-r--r--fs/xfs/xfs_ioctl32.c12
-rw-r--r--fs/xfs/xfs_iomap.c3
-rw-r--r--fs/xfs/xfs_iops.c109
-rw-r--r--fs/xfs/xfs_iops.h2
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_linux.h9
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_mount.c918
-rw-r--r--fs/xfs/xfs_mount.h95
-rw-r--r--fs/xfs/xfs_mru_cache.c2
-rw-r--r--fs/xfs/xfs_pnfs.c7
-rw-r--r--fs/xfs/xfs_pnfs.h5
-rw-r--r--fs/xfs/xfs_qm.c13
-rw-r--r--fs/xfs/xfs_qm.h4
-rw-r--r--fs/xfs/xfs_qm_syscalls.c176
-rw-r--r--fs/xfs/xfs_quotaops.c117
-rw-r--r--fs/xfs/xfs_super.c134
-rw-r--r--fs/xfs/xfs_super.h2
-rw-r--r--fs/xfs/xfs_symlink.c58
-rw-r--r--fs/xfs/xfs_trace.h29
-rw-r--r--fs/xfs/xfs_trans.c234
-rw-r--r--fs/xfs/xfs_xattr.c6
-rw-r--r--include/acpi/acpi_bus.h4
-rw-r--r--include/acpi/acpi_io.h4
-rw-r--r--include/acpi/acpixf.h14
-rw-r--r--include/acpi/actbl2.h70
-rw-r--r--include/acpi/actypes.h51
-rw-r--r--include/acpi/platform/acenv.h1
-rw-r--r--include/acpi/processor.h6
-rw-r--r--include/asm-generic/dma-mapping-common.h5
-rw-r--r--include/asm-generic/gpio.h48
-rw-r--r--include/asm-generic/pgtable.h30
-rw-r--r--include/asm-generic/seccomp.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h18
-rw-r--r--include/crypto/algapi.h2
-rw-r--r--include/crypto/if_alg.h4
-rw-r--r--include/crypto/rng.h3
-rw-r--r--include/crypto/sha.h15
-rw-r--r--include/crypto/sha1_base.h106
-rw-r--r--include/crypto/sha256_base.h128
-rw-r--r--include/crypto/sha512_base.h131
-rw-r--r--include/drm/bridge/dw_hdmi.h5
-rw-r--r--include/drm/drmP.h20
-rw-r--r--include/drm/drm_atomic.h24
-rw-r--r--include/drm/drm_atomic_helper.h20
-rw-r--r--include/drm/drm_crtc.h21
-rw-r--r--include/drm/drm_crtc_helper.h9
-rw-r--r--include/drm/drm_dp_helper.h176
-rw-r--r--include/drm/drm_dp_mst_helper.h2
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_fb_helper.h19
-rw-r--r--include/drm/drm_gem.h14
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_panel.h5
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/drm_plane_helper.h12
-rw-r--r--include/drm/i915_pciids.h77
-rw-r--r--include/dt-bindings/clock/exynos3250.h61
-rw-r--r--include/dt-bindings/clock/exynos5433.h1403
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h5
-rw-r--r--include/dt-bindings/clock/pistachio-clk.h183
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq806x.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8916.h156
-rw-r--r--include/dt-bindings/clock/r8a73a4-clock.h62
-rw-r--r--include/dt-bindings/clock/r8a7778-clock.h71
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h3
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h3
-rw-r--r--include/dt-bindings/clock/sh73a0-clock.h3
-rw-r--r--include/dt-bindings/clock/tegra124-car-common.h2
-rw-r--r--include/dt-bindings/dma/jz4780-dma.h49
-rw-r--r--include/dt-bindings/gpio/meson8b-gpio.h32
-rw-r--r--include/dt-bindings/leds/common.h21
-rw-r--r--include/dt-bindings/media/omap3-isp.h22
-rw-r--r--include/dt-bindings/media/xilinx-vip.h39
-rw-r--r--include/dt-bindings/mfd/arizona.h (renamed from include/linux/mfd/arizona/gpio.h)67
-rw-r--r--include/dt-bindings/mfd/qcom-rpm.h6
-rw-r--r--include/dt-bindings/pinctrl/mt65xx.h40
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-gpio.h15
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h4
-rw-r--r--include/dt-bindings/reset/qcom,gcc-msm8916.h108
-rw-r--r--include/linux/a.out.h67
-rw-r--r--include/linux/acpi.h23
-rw-r--r--include/linux/acpi_irq.h10
-rw-r--r--include/linux/aio.h70
-rw-r--r--include/linux/arm-cci.h9
-rw-r--r--include/linux/async_tx.h3
-rw-r--r--include/linux/bcm47xx_nvram.h (renamed from arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h)19
-rw-r--r--include/linux/bcma/bcma.h21
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h11
-rw-r--r--include/linux/bcma/bcma_driver_gmac_cmn.h6
-rw-r--r--include/linux/bcma/bcma_driver_mips.h15
-rw-r--r--include/linux/bcma/bcma_driver_pci.h12
-rw-r--r--include/linux/bcma/bcma_driver_pcie2.h4
-rw-r--r--include/linux/bitmap.h16
-rw-r--r--include/linux/bitops.h4
-rw-r--r--include/linux/blk-mq.h9
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/bpf.h63
-rw-r--r--include/linux/brcmphy.h2
-rw-r--r--include/linux/can/dev.h2
-rw-r--r--include/linux/can/led.h6
-rw-r--r--include/linux/can/skb.h7
-rw-r--r--include/linux/capability.h29
-rw-r--r--include/linux/ceph/ceph_features.h16
-rw-r--r--include/linux/ceph/ceph_fs.h1
-rw-r--r--include/linux/ceph/debugfs.h8
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/osdmap.h5
-rw-r--r--include/linux/cleancache.h13
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/clk/at91_pmc.h4
-rw-r--r--include/linux/clk/shmobile.h1
-rw-r--r--include/linux/clk/ti.h6
-rw-r--r--include/linux/clocksource.h6
-rw-r--r--include/linux/cma.h12
-rw-r--r--include/linux/compaction.h1
-rw-r--r--include/linux/compiler-gcc.h16
-rw-r--r--include/linux/compiler-intel.h3
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/context_tracking.h15
-rw-r--r--include/linux/context_tracking_state.h9
-rw-r--r--include/linux/cpu.h20
-rw-r--r--include/linux/cpumask.h176
-rw-r--r--include/linux/cred.h23
-rw-r--r--include/linux/crush/crush.h12
-rw-r--r--include/linux/crypto.h6
-rw-r--r--include/linux/dcache.h59
-rw-r--r--include/linux/dccp.h4
-rw-r--r--include/linux/devfreq-event.h2
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/linux/device.h16
-rw-r--r--include/linux/dma-buf.h34
-rw-r--r--include/linux/dma-mapping.h4
-rw-r--r--include/linux/dma/hsu.h48
-rw-r--r--include/linux/dma/xilinx_dma.h (renamed from include/linux/amba/xilinx_dma.h)0
-rw-r--r--include/linux/dmaengine.h36
-rw-r--r--include/linux/elf-randomize.h22
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/f2fs_fs.h3
-rw-r--r--include/linux/falloc.h6
-rw-r--r--include/linux/filter.h7
-rw-r--r--include/linux/fixp-arith.h145
-rw-r--r--include/linux/fs.h117
-rw-r--r--include/linux/fs_pin.h2
-rw-r--r--include/linux/ftrace_event.h20
-rw-r--r--include/linux/fwnode.h27
-rw-r--r--include/linux/gfp.h18
-rw-r--r--include/linux/gpio/consumer.h77
-rw-r--r--include/linux/gpio/driver.h48
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/host1x.h1
-rw-r--r--include/linux/hsi/hsi.h6
-rw-r--r--include/linux/hugetlb.h20
-rw-r--r--include/linux/hw_random.h4
-rw-r--r--include/linux/hyperv.h37
-rw-r--r--include/linux/i2c.h59
-rw-r--r--include/linux/ieee802154.h14
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/if_vlan.h67
-rw-r--r--include/linux/inet_diag.h43
-rw-r--r--include/linux/intel-iommu.h18
-rw-r--r--include/linux/io.h10
-rw-r--r--include/linux/iommu-common.h51
-rw-r--r--include/linux/iommu.h33
-rw-r--r--include/linux/ioport.h8
-rw-r--r--include/linux/ipv6.h4
-rw-r--r--include/linux/irqchip/arm-gic-acpi.h31
-rw-r--r--include/linux/irqchip/arm-gic.h2
-rw-r--r--include/linux/irqchip/mips-gic.h7
-rw-r--r--include/linux/jhash.h17
-rw-r--r--include/linux/jz4780-nemc.h43
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kconfig.h24
-rw-r--r--include/linux/kernel.h12
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/ksm.h17
-rw-r--r--include/linux/kvm_host.h3
-rw-r--r--include/linux/led-class-flash.h19
-rw-r--r--include/linux/leds.h5
-rw-r--r--include/linux/lguest.h4
-rw-r--r--include/linux/libata.h10
-rw-r--r--include/linux/livepatch.h8
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--include/linux/memblock.h8
-rw-r--r--include/linux/memcontrol.h4
-rw-r--r--include/linux/memory_hotplug.h6
-rw-r--r--include/linux/mempool.h5
-rw-r--r--include/linux/mfd/arizona/core.h3
-rw-r--r--include/linux/mfd/arizona/pdata.h24
-rw-r--r--include/linux/mfd/cros_ec.h23
-rw-r--r--include/linux/mfd/max77693-private.h5
-rw-r--r--include/linux/mfd/max77693.h13
-rw-r--r--include/linux/mfd/max77843-private.h454
-rw-r--r--include/linux/mfd/menelaus.h7
-rw-r--r--include/linux/mfd/mt6397/core.h64
-rw-r--r--include/linux/mfd/mt6397/registers.h362
-rw-r--r--include/linux/mfd/rk808.h3
-rw-r--r--include/linux/mfd/rtsx_pci.h1116
-rw-r--r--include/linux/mfd/samsung/core.h9
-rw-r--r--include/linux/mfd/samsung/irq.h2
-rw-r--r--include/linux/mfd/samsung/rtc.h2
-rw-r--r--include/linux/mfd/sky81452.h31
-rw-r--r--include/linux/mfd/syscon/atmel-st.h49
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h1
-rw-r--r--include/linux/mfd/tc3589x.h23
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h1
-rw-r--r--include/linux/mfd/tmio.h2
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cmd.h26
-rw-r--r--include/linux/mlx4/device.h44
-rw-r--r--include/linux/mlx4/qp.h18
-rw-r--r--include/linux/mlx5/cmd.h2
-rw-r--r--include/linux/mlx5/cq.h7
-rw-r--r--include/linux/mlx5/device.h2
-rw-r--r--include/linux/mlx5/doorbell.h2
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2
-rw-r--r--include/linux/mlx5/qp.h2
-rw-r--r--include/linux/mlx5/srq.h2
-rw-r--r--include/linux/mm.h102
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmc/card.h14
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h10
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/mod_devicetable.h8
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/mtd/map.h54
-rw-r--r--include/linux/mtd/spi-nor.h5
-rw-r--r--include/linux/nbd.h46
-rw-r--r--include/linux/net.h12
-rw-r--r--include/linux/netdevice.h161
-rw-r--r--include/linux/netfilter.h103
-rw-r--r--include/linux/netfilter/ipset/ip_set.h5
-rw-r--r--include/linux/netfilter_arp/arp_tables.h3
-rw-r--r--include/linux/netfilter_bridge.h112
-rw-r--r--include/linux/netfilter_bridge/ebtables.h3
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h3
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h3
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nfs4.h7
-rw-r--r--include/linux/nfs_fs.h6
-rw-r--r--include/linux/nfs_xdr.h6
-rw-r--r--include/linux/nilfs2_fs.h2
-rw-r--r--include/linux/nmi.h21
-rw-r--r--include/linux/nvme.h5
-rw-r--r--include/linux/of.h44
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--include/linux/of_graph.h20
-rw-r--r--include/linux/of_irq.h9
-rw-r--r--include/linux/of_mdio.h7
-rw-r--r--include/linux/of_net.h8
-rw-r--r--include/linux/omap-gpmc.h3
-rw-r--r--include/linux/oom.h3
-rw-r--r--include/linux/page-flags.h105
-rw-r--r--include/linux/pci.h15
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/perf_event.h121
-rw-r--r--include/linux/personality.h40
-rw-r--r--include/linux/phy_fixed.h9
-rw-r--r--include/linux/platform_data/dma-hsu.h25
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h3
-rw-r--r--include/linux/platform_data/i2c-davinci.h1
-rw-r--r--include/linux/platform_data/mmc-msm_sdcc.h27
-rw-r--r--include/linux/platform_data/msm_serial_hs.h49
-rw-r--r--include/linux/platform_data/nxp-nci.h27
-rw-r--r--include/linux/platform_data/serial-imx.h5
-rw-r--r--include/linux/platform_data/si5351.h4
-rw-r--r--include/linux/platform_data/sky81452-backlight.h46
-rw-r--r--include/linux/platform_data/st33zp24.h (renamed from include/linux/platform_data/tpm_stm_st33.h)21
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pm-trace.h (renamed from include/linux/resume-trace.h)9
-rw-r--r--include/linux/pm.h8
-rw-r--r--include/linux/pm_domain.h6
-rw-r--r--include/linux/pnp.h12
-rw-r--r--include/linux/printk.h5
-rw-r--r--include/linux/property.h44
-rw-r--r--include/linux/pstore.h1
-rw-r--r--include/linux/ptp_clock_kernel.h12
-rw-r--r--include/linux/qcom_scm.h28
-rw-r--r--include/linux/quota.h90
-rw-r--r--include/linux/quotaops.h14
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--include/linux/rcupdate.h40
-rw-r--r--include/linux/reboot.h3
-rw-r--r--include/linux/remoteproc.h2
-rw-r--r--include/linux/rhashtable.h528
-rw-r--r--include/linux/rmap.h8
-rw-r--r--include/linux/rtnetlink.h17
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/sched/rt.h7
-rw-r--r--include/linux/security.h15
-rw-r--r--include/linux/serial_8250.h20
-rw-r--r--include/linux/serial_core.h23
-rw-r--r--include/linux/serial_mfd.h47
-rw-r--r--include/linux/shdma-base.h1
-rw-r--r--include/linux/skbuff.h26
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/smpboot.h1
-rw-r--r--include/linux/sock_diag.h4
-rw-r--r--include/linux/socket.h8
-rw-r--r--include/linux/spi/at86rf230.h1
-rw-r--r--include/linux/spi/cc2520.h1
-rw-r--r--include/linux/srcu.h2
-rw-r--r--include/linux/stacktrace.h2
-rw-r--r--include/linux/stmmac.h2
-rw-r--r--include/linux/string_helpers.h10
-rw-r--r--include/linux/sunrpc/msg_prot.h8
-rw-r--r--include/linux/sunrpc/xprtrdma.h5
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/tcp.h13
-rw-r--r--include/linux/tracefs.h45
-rw-r--r--include/linux/tracepoint.h8
-rw-r--r--include/linux/tty.h3
-rw-r--r--include/linux/types.h6
-rw-r--r--include/linux/udp.h2
-rw-r--r--include/linux/uidgid.h16
-rw-r--r--include/linux/uio.h23
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/util_macros.h40
-rw-r--r--include/linux/vfio.h25
-rw-r--r--include/linux/vgaarb.h5
-rw-r--r--include/linux/virtio.h2
-rw-r--r--include/linux/virtio_config.h16
-rw-r--r--include/linux/virtio_ring.h23
-rw-r--r--include/linux/watchdog.h8
-rw-r--r--include/linux/wl12xx.h49
-rw-r--r--include/linux/zsmalloc.h1
-rw-r--r--include/media/adv7604.h83
-rw-r--r--include/media/davinci/vpfe_capture.h2
-rw-r--r--include/media/media-entity.h21
-rw-r--r--include/media/mt9p031.h2
-rw-r--r--include/media/omap3isp.h38
-rw-r--r--include/media/ov2659.h34
-rw-r--r--include/media/saa7146_vv.h4
-rw-r--r--include/media/v4l2-clk.h10
-rw-r--r--include/media/v4l2-dev.h1
-rw-r--r--include/media/v4l2-device.h2
-rw-r--r--include/media/v4l2-ioctl.h6
-rw-r--r--include/media/v4l2-of.h30
-rw-r--r--include/media/v4l2-subdev.h55
-rw-r--r--include/media/videobuf2-core.h20
-rw-r--r--include/net/9p/client.h8
-rw-r--r--include/net/9p/transport.h2
-rw-r--r--include/net/af_vsock.h4
-rw-r--r--include/net/arp.h20
-rw-r--r--include/net/ax25.h5
-rw-r--r--include/net/bluetooth/bluetooth.h45
-rw-r--r--include/net/bluetooth/hci.h59
-rw-r--r--include/net/bluetooth/hci_core.h233
-rw-r--r--include/net/bluetooth/mgmt.h101
-rw-r--r--include/net/bond_3ad.h29
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h127
-rw-r--r--include/net/cfg802154.h2
-rw-r--r--include/net/codel.h10
-rw-r--r--include/net/compat.h2
-rw-r--r--include/net/dcbnl.h3
-rw-r--r--include/net/dn_neigh.h5
-rw-r--r--include/net/dsa.h27
-rw-r--r--include/net/dst_ops.h1
-rw-r--r--include/net/fib_rules.h11
-rw-r--r--include/net/genetlink.h4
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/inet6_connection_sock.h3
-rw-r--r--include/net/inet6_hashtables.h2
-rw-r--r--include/net/inet_common.h7
-rw-r--r--include/net/inet_connection_sock.h36
-rw-r--r--include/net/inet_hashtables.h36
-rw-r--r--include/net/inet_sock.h30
-rw-r--r--include/net/inet_timewait_sock.h108
-rw-r--r--include/net/inetpeer.h3
-rw-r--r--include/net/ip.h15
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip6_tunnel.h6
-rw-r--r--include/net/ip_fib.h70
-rw-r--r--include/net/ip_tunnels.h1
-rw-r--r--include/net/ip_vs.h69
-rw-r--r--include/net/ipv6.h14
-rw-r--r--include/net/iw_handler.h22
-rw-r--r--include/net/mac80211.h253
-rw-r--r--include/net/mac802154.h105
-rw-r--r--include/net/ndisc.h19
-rw-r--r--include/net/neighbour.h67
-rw-r--r--include/net/net_namespace.h58
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h8
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h13
-rw-r--r--include/net/netfilter/nf_conntrack.h5
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h48
-rw-r--r--include/net/netfilter/nf_queue.h6
-rw-r--r--include/net/netfilter/nf_tables.h621
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h5
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h5
-rw-r--r--include/net/netfilter/nft_meta.h4
-rw-r--r--include/net/netlink.h50
-rw-r--r--include/net/netns/generic.h2
-rw-r--r--include/net/netns/hash.h4
-rw-r--r--include/net/netns/ipv4.h13
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/netns/mpls.h17
-rw-r--r--include/net/netns/x_tables.h1
-rw-r--r--include/net/nfc/hci.h4
-rw-r--r--include/net/nfc/nci_core.h5
-rw-r--r--include/net/nfc/nfc.h2
-rw-r--r--include/net/ping.h7
-rw-r--r--include/net/request_sock.h130
-rw-r--r--include/net/rtnetlink.h2
-rw-r--r--include/net/sch_generic.h4
-rw-r--r--include/net/sctp/sctp.h3
-rw-r--r--include/net/sock.h64
-rw-r--r--include/net/switchdev.h55
-rw-r--r--include/net/tc_act/tc_bpf.h6
-rw-r--r--include/net/tcp.h129
-rw-r--r--include/net/tcp_states.h4
-rw-r--r--include/net/udp.h24
-rw-r--r--include/net/udp_tunnel.h5
-rw-r--r--include/net/vxlan.h4
-rw-r--r--include/net/xfrm.h22
-rw-r--r--include/rdma/ib_addr.h3
-rw-r--r--include/rdma/ib_cm.h7
-rw-r--r--include/rdma/iw_portmap.h25
-rw-r--r--include/rxrpc/packet.h3
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/scsi/scsi_transport_fc.h1
-rw-r--r--include/sound/ac97_codec.h4
-rw-r--r--include/sound/compress_driver.h4
-rw-r--r--include/sound/control.h2
-rw-r--r--include/sound/core.h3
-rw-r--r--include/sound/designware_i2s.h2
-rw-r--r--include/sound/emu10k1.h14
-rw-r--r--include/sound/hda_regmap.h217
-rw-r--r--include/sound/hdaudio.h247
-rw-r--r--include/sound/pcm.h66
-rw-r--r--include/sound/pcm_params.h7
-rw-r--r--include/sound/rt5670.h1
-rw-r--r--include/sound/seq_device.h46
-rw-r--r--include/sound/seq_kernel.h6
-rw-r--r--include/sound/simple_card.h1
-rw-r--r--include/sound/soc-dapm.h8
-rw-r--r--include/sound/soc-dpcm.h2
-rw-r--r--include/sound/soc.h44
-rw-r--r--include/sound/spear_dma.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h15
-rw-r--r--include/target/target_core_base.h10
-rw-r--r--include/target/target_core_configfs.h6
-rw-r--r--include/target/target_core_fabric.h33
-rw-r--r--include/target/target_core_fabric_configfs.h5
-rw-r--r--include/trace/events/9p.h157
-rw-r--r--include/trace/events/btrfs.h8
-rw-r--r--include/trace/events/clk.h198
-rw-r--r--include/trace/events/cma.h66
-rw-r--r--include/trace/events/ext3.h18
-rw-r--r--include/trace/events/ext4.h22
-rw-r--r--include/trace/events/f2fs.h192
-rw-r--r--include/trace/events/filemap.h8
-rw-r--r--include/trace/events/intel-sst.h7
-rw-r--r--include/trace/events/irq.h39
-rw-r--r--include/trace/events/kmem.h42
-rw-r--r--include/trace/events/migrate.h42
-rw-r--r--include/trace/events/module.h4
-rw-r--r--include/trace/events/random.h10
-rw-r--r--include/trace/events/sunrpc.h62
-rw-r--r--include/trace/events/tlb.h30
-rw-r--r--include/trace/events/v4l2.h75
-rw-r--r--include/trace/events/vmscan.h8
-rw-r--r--include/trace/events/writeback.h33
-rw-r--r--include/trace/events/xen.h2
-rw-r--r--include/trace/ftrace.h41
-rw-r--r--include/uapi/asm-generic/errno.h11
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/drm_fourcc.h78
-rw-r--r--include/uapi/drm/drm_mode.h9
-rw-r--r--include/uapi/drm/i915_drm.h5
-rw-r--r--include/uapi/drm/nouveau_drm.h1
-rw-r--r--include/uapi/drm/radeon_drm.h4
-rw-r--r--include/uapi/drm/tegra_drm.h3
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/am437x-vpfe.h2
-rw-r--r--include/uapi/linux/bpf.h63
-rw-r--r--include/uapi/linux/can/raw.h1
-rw-r--r--include/uapi/linux/dcbnl.h66
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/falloc.h17
-rw-r--r--include/uapi/linux/filter.h10
-rw-r--r--include/uapi/linux/fou.h1
-rw-r--r--include/uapi/linux/if_addr.h2
-rw-r--r--include/uapi/linux/if_link.h14
-rw-r--r--include/uapi/linux/if_packet.h1
-rw-r--r--include/uapi/linux/inet_diag.h4
-rw-r--r--include/uapi/linux/input.h3
-rw-r--r--include/uapi/linux/ip_vs.h7
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/linux/media-bus-format.h28
-rw-r--r--include/uapi/linux/media.h52
-rw-r--r--include/uapi/linux/mpls.h10
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h80
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/uapi/linux/nfs4.h7
-rw-r--r--include/uapi/linux/nfs_idmap.h2
-rw-r--r--include/uapi/linux/nfsd/debug.h8
-rw-r--r--include/uapi/linux/nfsd/export.h3
-rw-r--r--include/uapi/linux/nl80211.h34
-rw-r--r--include/uapi/linux/perf_event.h115
-rw-r--r--include/uapi/linux/pkt_cls.h2
-rw-r--r--include/uapi/linux/quota.h6
-rw-r--r--include/uapi/linux/raid/md_p.h7
-rw-r--r--include/uapi/linux/raid/md_u.h1
-rw-r--r--include/uapi/linux/rtnetlink.h16
-rw-r--r--include/uapi/linux/serial_reg.h19
-rw-r--r--include/uapi/linux/target_core_user.h44
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/tipc_netlink.h9
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h64
-rw-r--r--include/uapi/linux/v4l2-subdev.h12
-rw-r--r--include/uapi/linux/vfio.h2
-rw-r--r--include/uapi/linux/videodev2.h18
-rw-r--r--include/uapi/linux/virtio_balloon.h32
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_input.h76
-rw-r--r--include/uapi/linux/virtio_ring.h2
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--include/uapi/linux/xilinx-v4l2-controls.h73
-rw-r--r--include/uapi/rdma/rdma_netlink.h1
-rw-r--r--include/uapi/sound/asequencer.h1
-rw-r--r--include/uapi/sound/asound.h41
-rw-r--r--include/uapi/sound/compress_offload.h2
-rw-r--r--include/uapi/sound/emu10k1.h3
-rw-r--r--include/uapi/sound/hdspm.h6
-rw-r--r--include/video/imx-ipu-v3.h2
-rw-r--r--include/video/omapdss.h7
-rw-r--r--include/video/samsung_fimd.h11
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/interface/xen.h6
-rw-r--r--include/xen/xen-ops.h48
-rw-r--r--include/xen/xenbus.h20
-rw-r--r--init/Kconfig40
-rw-r--r--init/do_mounts.c9
-rw-r--r--init/main.c9
-rw-r--r--ipc/mqueue.c22
-rw-r--r--ipc/msg.c34
-rw-r--r--ipc/sem.c26
-rw-r--r--ipc/shm.c44
-rw-r--r--ipc/util.c6
-rw-r--r--kernel/Makefile10
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/audit.c49
-rw-r--r--kernel/audit.h3
-rw-r--r--kernel/audit_tree.c92
-rw-r--r--kernel/audit_watch.c14
-rw-r--r--kernel/auditsc.c15
-rw-r--r--kernel/bpf/Makefile3
-rw-r--r--kernel/bpf/arraymap.c6
-rw-r--r--kernel/bpf/core.c20
-rw-r--r--kernel/bpf/hashtab.c6
-rw-r--r--kernel/bpf/helpers.c30
-rw-r--r--kernel/bpf/syscall.c18
-rw-r--r--kernel/bpf/test_stub.c78
-rw-r--r--kernel/bpf/verifier.c185
-rw-r--r--kernel/capability.c35
-rw-r--r--kernel/cgroup.c6
-rw-r--r--kernel/context_tracking.c59
-rw-r--r--kernel/cpu.c38
-rw-r--r--kernel/cpuset.c18
-rw-r--r--kernel/cred.c3
-rw-r--r--kernel/events/core.c791
-rw-r--r--kernel/events/hw_breakpoint.c8
-rw-r--r--kernel/events/internal.h33
-rw-r--r--kernel/events/ring_buffer.c327
-rw-r--r--kernel/exec_domain.c137
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c145
-rw-r--r--kernel/gcov/base.c5
-rw-r--r--kernel/groups.c3
-rw-r--r--kernel/hung_task.c4
-rw-r--r--kernel/irq/dummychip.c1
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/livepatch/core.c69
-rw-r--r--kernel/locking/lockdep.c16
-rw-r--r--kernel/locking/rtmutex.c12
-rw-r--r--kernel/module.c12
-rw-r--r--kernel/params.c4
-rw-r--r--kernel/pid.c15
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/suspend.c13
-rw-r--r--kernel/printk/printk.c57
-rw-r--r--kernel/ptrace.c39
-rw-r--r--kernel/rcu/rcutorture.c27
-rw-r--r--kernel/rcu/srcu.c19
-rw-r--r--kernel/rcu/tiny.c14
-rw-r--r--kernel/rcu/tree.c439
-rw-r--r--kernel/rcu/tree.h11
-rw-r--r--kernel/rcu/tree_plugin.h267
-rw-r--r--kernel/rcu/tree_trace.c4
-rw-r--r--kernel/rcu/update.c72
-rw-r--r--kernel/reboot.c53
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/resource.c32
-rw-r--r--kernel/sched/core.c71
-rw-r--r--kernel/sched/idle.c25
-rw-r--r--kernel/signal.c14
-rw-r--r--kernel/smp.c80
-rw-r--r--kernel/smpboot.c156
-rw-r--r--kernel/sys.c49
-rw-r--r--kernel/sys_ni.c14
-rw-r--r--kernel/sysctl.c61
-rw-r--r--kernel/time/clockevents.c8
-rw-r--r--kernel/trace/Kconfig36
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/bpf_trace.c222
-rw-r--r--kernel/trace/ftrace.c44
-rw-r--r--kernel/trace/ring_buffer.c10
-rw-r--r--kernel/trace/trace.c493
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_entries.h6
-rw-r--r--kernel/trace/trace_events.c172
-rw-r--r--kernel/trace/trace_export.c2
-rw-r--r--kernel/trace/trace_functions_graph.c15
-rw-r--r--kernel/trace/trace_kprobe.c25
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/trace/trace_probe.c19
-rw-r--r--kernel/trace/trace_probe.h12
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_stat.c10
-rw-r--r--kernel/trace/trace_uprobe.c17
-rw-r--r--kernel/watchdog.c327
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug48
-rw-r--r--lib/Kconfig.kasan8
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bitmap.c30
-rw-r--r--lib/cpumask.c37
-rw-r--r--lib/devres.c28
-rw-r--r--lib/div64.c2
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/find_bit.c193
-rw-r--r--lib/find_last_bit.c49
-rw-r--r--lib/find_next_bit.c285
-rw-r--r--lib/iommu-common.c270
-rw-r--r--lib/ioremap.c53
-rw-r--r--lib/iov_iter.c83
-rw-r--r--lib/lru_cache.c9
-rw-r--r--lib/raid6/algos.c41
-rw-r--r--lib/raid6/altivec.uc1
-rw-r--r--lib/raid6/avx2.c3
-rw-r--r--lib/raid6/int.uc41
-rw-r--r--lib/raid6/mmx.c2
-rw-r--r--lib/raid6/neon.c1
-rw-r--r--lib/raid6/sse1.c2
-rw-r--r--lib/raid6/sse2.c227
-rw-r--r--lib/raid6/test/test.c51
-rw-r--r--lib/raid6/tilegx.uc1
-rw-r--r--lib/rhashtable.c1025
-rw-r--r--lib/sha1.c1
-rw-r--r--lib/string.c2
-rw-r--r--lib/string_helpers.c261
-rw-r--r--lib/test-hexdump.c10
-rw-r--r--lib/test-string_helpers.c40
-rw-r--r--lib/test_rhashtable.c58
-rw-r--r--lib/vsprintf.c352
-rw-r--r--mm/Kconfig6
-rw-r--r--mm/Makefile2
-rw-r--r--mm/cleancache.c276
-rw-r--r--mm/cma.c52
-rw-r--r--mm/cma.h24
-rw-r--r--mm/cma_debug.c205
-rw-r--r--mm/compaction.c75
-rw-r--r--mm/filemap.c130
-rw-r--r--mm/gup.c128
-rw-r--r--mm/huge_memory.c111
-rw-r--r--mm/hugetlb.c246
-rw-r--r--mm/hwpoison-inject.c13
-rw-r--r--mm/internal.h8
-rw-r--r--mm/kasan/kasan.c13
-rw-r--r--mm/kmemleak.c3
-rw-r--r--mm/ksm.c10
-rw-r--r--mm/memblock.c22
-rw-r--r--mm/memcontrol.c241
-rw-r--r--mm/memory-failure.c138
-rw-r--r--mm/memory.c419
-rw-r--r--mm/memory_hotplug.c37
-rw-r--r--mm/mempolicy.c8
-rw-r--r--mm/mempool.c127
-rw-r--r--mm/memtest.c (renamed from arch/x86/mm/memtest.c)16
-rw-r--r--mm/migrate.c40
-rw-r--r--mm/mlock.c131
-rw-r--r--mm/mmap.c25
-rw-r--r--mm/mremap.c25
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/oom_kill.c9
-rw-r--r--mm/page-writeback.c28
-rw-r--r--mm/page_alloc.c253
-rw-r--r--mm/page_io.c7
-rw-r--r--mm/page_isolation.c3
-rw-r--r--mm/process_vm_access.c35
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/shmem.c34
-rw-r--r--mm/slab.c22
-rw-r--r--mm/slob.c3
-rw-r--r--mm/slub.c32
-rw-r--r--mm/swap.c34
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c39
-rw-r--r--mm/util.c41
-rw-r--r--mm/vmalloc.c103
-rw-r--r--mm/zsmalloc.c971
-rw-r--r--net/6lowpan/Kconfig57
-rw-r--r--net/6lowpan/Makefile13
-rw-r--r--net/6lowpan/iphc.c200
-rw-r--r--net/6lowpan/nhc.c241
-rw-r--r--net/6lowpan/nhc.h146
-rw-r--r--net/6lowpan/nhc_dest.c28
-rw-r--r--net/6lowpan/nhc_fragment.c27
-rw-r--r--net/6lowpan/nhc_hop.c27
-rw-r--r--net/6lowpan/nhc_ipv6.c27
-rw-r--r--net/6lowpan/nhc_mobility.c27
-rw-r--r--net/6lowpan/nhc_routing.c27
-rw-r--r--net/6lowpan/nhc_udp.c157
-rw-r--r--net/802/fc.c21
-rw-r--r--net/802/fddi.c26
-rw-r--r--net/802/hippi.c28
-rw-r--r--net/8021q/vlan.c16
-rw-r--r--net/8021q/vlan_dev.c47
-rw-r--r--net/9p/client.c262
-rw-r--r--net/9p/protocol.c28
-rw-r--r--net/9p/trans_common.c42
-rw-r--r--net/9p/trans_common.h2
-rw-r--r--net/9p/trans_fd.c7
-rw-r--r--net/9p/trans_rdma.c52
-rw-r--r--net/9p/trans_virtio.c142
-rw-r--r--net/Makefile2
-rw-r--r--net/appletalk/aarp.c6
-rw-r--r--net/appletalk/ddp.c7
-rw-r--r--net/atm/common.c7
-rw-r--r--net/atm/common.h7
-rw-r--r--net/atm/lec.c4
-rw-r--r--net/atm/mpoa_proc.c2
-rw-r--r--net/atm/signaling.c24
-rw-r--r--net/ax25/af_ax25.c7
-rw-r--r--net/ax25/ax25_ip.c30
-rw-r--r--net/batman-adv/gateway_client.c19
-rw-r--r--net/batman-adv/hard-interface.c5
-rw-r--r--net/bluetooth/Kconfig8
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c44
-rw-r--r--net/bluetooth/a2mp.h8
-rw-r--r--net/bluetooth/af_bluetooth.c20
-rw-r--r--net/bluetooth/bnep/bnep.h4
-rw-r--r--net/bluetooth/bnep/core.c70
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bluetooth/bnep/sock.c7
-rw-r--r--net/bluetooth/cmtp/capi.c2
-rw-r--r--net/bluetooth/cmtp/core.c15
-rw-r--r--net/bluetooth/hci_conn.c34
-rw-r--r--net/bluetooth/hci_core.c579
-rw-r--r--net/bluetooth/hci_debugfs.c98
-rw-r--r--net/bluetooth/hci_debugfs.h22
-rw-r--r--net/bluetooth/hci_event.c517
-rw-r--r--net/bluetooth/hci_request.c48
-rw-r--r--net/bluetooth/hci_request.h5
-rw-r--r--net/bluetooth/hci_sock.c329
-rw-r--r--net/bluetooth/hidp/core.c16
-rw-r--r--net/bluetooth/l2cap_core.c98
-rw-r--r--net/bluetooth/l2cap_sock.c18
-rw-r--r--net/bluetooth/mgmt.c3652
-rw-r--r--net/bluetooth/mgmt_util.c210
-rw-r--r--net/bluetooth/mgmt_util.h53
-rw-r--r--net/bluetooth/rfcomm/sock.c10
-rw-r--r--net/bluetooth/sco.c33
-rw-r--r--net/bluetooth/selftest.c35
-rw-r--r--net/bluetooth/smp.c415
-rw-r--r--net/bluetooth/smp.h1
-rw-r--r--net/bridge/br_device.c10
-rw-r--r--net/bridge/br_forward.c20
-rw-r--r--net/bridge/br_input.c33
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_netfilter.c319
-rw-r--r--net/bridge/br_netlink.c135
-rw-r--r--net/bridge/br_nf_core.c1
-rw-r--r--net/bridge/br_private.h14
-rw-r--r--net/bridge/br_stp_bpdu.c5
-rw-r--r--net/bridge/br_sysfs_if.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c14
-rw-r--r--net/bridge/netfilter/ebtable_nat.c14
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c28
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c26
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c92
-rw-r--r--net/caif/caif_socket.c17
-rw-r--r--net/can/bcm.c9
-rw-r--r--net/can/raw.c63
-rw-r--r--net/ceph/ceph_common.c37
-rw-r--r--net/ceph/crush/crush.c14
-rw-r--r--net/ceph/crush/crush_ln_table.h166
-rw-r--r--net/ceph/crush/mapper.c118
-rw-r--r--net/ceph/debugfs.c24
-rw-r--r--net/ceph/messenger.c25
-rw-r--r--net/ceph/osdmap.c25
-rw-r--r--net/compat.c30
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c166
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/fib_rules.c25
-rw-r--r--net/core/filter.c483
-rw-r--r--net/core/link_watch.c4
-rw-r--r--net/core/neighbour.c112
-rw-r--r--net/core/net-sysfs.c125
-rw-r--r--net/core/net_namespace.c115
-rw-r--r--net/core/request_sock.c45
-rw-r--r--net/core/rtnetlink.c133
-rw-r--r--net/core/skbuff.c95
-rw-r--r--net/core/sock.c107
-rw-r--r--net/core/sock_diag.c37
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/dcb/dcbnl.c44
-rw-r--r--net/dccp/dccp.h12
-rw-r--r--net/dccp/diag.c7
-rw-r--r--net/dccp/ipv4.c100
-rw-r--r--net/dccp/ipv6.c87
-rw-r--r--net/dccp/minisocks.c27
-rw-r--r--net/dccp/probe.c3
-rw-r--r--net/dccp/proto.c7
-rw-r--r--net/dccp/timer.c24
-rw-r--r--net/decnet/af_decnet.c7
-rw-r--r--net/decnet/dn_neigh.c137
-rw-r--r--net/decnet/dn_nsp_in.c5
-rw-r--r--net/decnet/dn_route.c36
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c4
-rw-r--r--net/dsa/Kconfig7
-rw-r--r--net/dsa/dsa.c259
-rw-r--r--net/dsa/dsa_priv.h9
-rw-r--r--net/dsa/slave.c327
-rw-r--r--net/ethernet/eth.c38
-rw-r--r--net/ieee802154/6lowpan/core.c8
-rw-r--r--net/ieee802154/Makefile4
-rw-r--r--net/ieee802154/core.c6
-rw-r--r--net/ieee802154/nl-mac.c1
-rw-r--r--net/ieee802154/nl-phy.c5
-rw-r--r--net/ieee802154/nl802154.c2
-rw-r--r--net/ieee802154/rdev-ops.h85
-rw-r--r--net/ieee802154/socket.c21
-rw-r--r--net/ieee802154/sysfs.c49
-rw-r--r--net/ieee802154/trace.c7
-rw-r--r--net/ieee802154/trace.h247
-rw-r--r--net/ipv4/af_inet.c21
-rw-r--r--net/ipv4/arp.c150
-rw-r--r--net/ipv4/cipso_ipv4.c42
-rw-r--r--net/ipv4/devinet.c91
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c134
-rw-r--r--net/ipv4/fib_lookup.h4
-rw-r--r--net/ipv4/fib_rules.c39
-rw-r--r--net/ipv4/fib_semantics.c43
-rw-r--r--net/ipv4/fib_trie.c1767
-rw-r--r--net/ipv4/fou.c233
-rw-r--r--net/ipv4/geneve.c14
-rw-r--r--net/ipv4/gre_offload.c4
-rw-r--r--net/ipv4/icmp.c6
-rw-r--r--net/ipv4/igmp.c72
-rw-r--r--net/ipv4/inet_connection_sock.c231
-rw-r--r--net/ipv4/inet_diag.c464
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/inet_hashtables.c72
-rw-r--r--net/ipv4/inet_timewait_sock.c277
-rw-r--r--net/ipv4/ip_forward.c11
-rw-r--r--net/ipv4/ip_fragment.c14
-rw-r--r--net/ipv4/ip_gre.c14
-rw-r--r--net/ipv4/ip_input.c17
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/ip_output.c89
-rw-r--r--net/ipv4/ip_sockglue.c34
-rw-r--r--net/ipv4/ip_tunnel.c21
-rw-r--r--net/ipv4/ip_tunnel_core.c3
-rw-r--r--net/ipv4/ip_vti.c12
-rw-r--r--net/ipv4/ipcomp.c2
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/ipip.c12
-rw-r--r--net/ipv4/ipmr.c81
-rw-r--r--net/ipv4/netfilter.c4
-rw-r--r--net/ipv4/netfilter/Kconfig38
-rw-r--r--net/ipv4/netfilter/arp_tables.c11
-rw-r--r--net/ipv4/netfilter/arptable_filter.c7
-rw-r--r--net/ipv4/netfilter/ip_tables.c13
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c17
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c17
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c6
-rw-r--r--net/ipv4/netfilter/iptable_filter.c8
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c19
-rw-r--r--net/ipv4/netfilter/iptable_nat.c29
-rw-r--r--net/ipv4/netfilter/iptable_raw.c7
-rw-r--r--net/ipv4/netfilter/iptable_security.c8
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c28
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_log_arp.c4
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c33
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c29
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c6
-rw-r--r--net/ipv4/netfilter/nf_tables_ipv4.c12
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c29
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c9
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c11
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c9
-rw-r--r--net/ipv4/ping.c18
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/raw.c20
-rw-r--r--net/ipv4/route.c45
-rw-r--r--net/ipv4/syncookies.c24
-rw-r--r--net/ipv4/sysctl_net_ipv4.c20
-rw-r--r--net/ipv4/tcp.c99
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_dctcp.c21
-rw-r--r--net/ipv4/tcp_diag.c6
-rw-r--r--net/ipv4/tcp_fastopen.c21
-rw-r--r--net/ipv4/tcp_illinois.c23
-rw-r--r--net/ipv4/tcp_input.c256
-rw-r--r--net/ipv4/tcp_ipv4.c224
-rw-r--r--net/ipv4/tcp_metrics.c208
-rw-r--r--net/ipv4/tcp_minisocks.c66
-rw-r--r--net/ipv4/tcp_offload.c4
-rw-r--r--net/ipv4/tcp_output.c239
-rw-r--r--net/ipv4/tcp_timer.c21
-rw-r--r--net/ipv4/tcp_vegas.c20
-rw-r--r--net/ipv4/tcp_vegas.h3
-rw-r--r--net/ipv4/tcp_westwood.c17
-rw-r--r--net/ipv4/udp.c44
-rw-r--r--net/ipv4/udp_diag.c24
-rw-r--r--net/ipv4/udp_impl.h4
-rw-r--r--net/ipv4/udp_offload.c4
-rw-r--r--net/ipv4/udp_tunnel.c4
-rw-r--r--net/ipv4/xfrm4_input.c7
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c2
-rw-r--r--net/ipv4/xfrm4_output.c12
-rw-r--r--net/ipv4/xfrm4_policy.c3
-rw-r--r--net/ipv6/addrconf.c433
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/addrlabel.c13
-rw-r--r--net/ipv6/af_inet6.c18
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/anycast.c22
-rw-r--r--net/ipv6/datagram.c6
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/exthdrs_core.c10
-rw-r--r--net/ipv6/fib6_rules.c21
-rw-r--r--net/ipv6/icmp.c13
-rw-r--r--net/ipv6/inet6_connection_sock.c23
-rw-r--r--net/ipv6/inet6_hashtables.c60
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c25
-rw-r--r--net/ipv6/ip6_gre.c43
-rw-r--r--net/ipv6/ip6_input.c13
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c100
-rw-r--r--net/ipv6/ip6_tunnel.c42
-rw-r--r--net/ipv6/ip6_udp_tunnel.c5
-rw-r--r--net/ipv6/ip6_vti.c30
-rw-r--r--net/ipv6/ip6mr.c74
-rw-r--r--net/ipv6/ipv6_sockglue.c42
-rw-r--r--net/ipv6/mcast.c75
-rw-r--r--net/ipv6/ndisc.c36
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/Kconfig18
-rw-r--r--net/ipv6/netfilter/ip6_tables.c18
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c3
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c6
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c8
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c19
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c29
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c8
-rw-r--r--net/ipv6/netfilter/ip6table_security.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c32
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c10
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c32
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c41
-rw-r--r--net/ipv6/netfilter/nf_tables_ipv6.c12
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c29
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c6
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c7
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c11
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c6
-rw-r--r--net/ipv6/output_core.c37
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/raw.c28
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c45
-rw-r--r--net/ipv6/sit.c42
-rw-r--r--net/ipv6/syncookies.c12
-rw-r--r--net/ipv6/sysctl_net_ipv6.c18
-rw-r--r--net/ipv6/tcp_ipv6.c170
-rw-r--r--net/ipv6/tcpv6_offload.c4
-rw-r--r--net/ipv6/udp.c43
-rw-r--r--net/ipv6/udp_impl.h7
-rw-r--r--net/ipv6/udp_offload.c4
-rw-r--r--net/ipv6/xfrm6_input.c3
-rw-r--r--net/ipv6/xfrm6_mode_beet.c4
-rw-r--r--net/ipv6/xfrm6_output.c15
-rw-r--r--net/ipv6/xfrm6_policy.c7
-rw-r--r--net/ipx/af_ipx.c7
-rw-r--r--net/irda/af_irda.c29
-rw-r--r--net/iucv/af_iucv.c8
-rw-r--r--net/key/af_key.c8
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ip.c4
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/l2tp/l2tp_netlink.c18
-rw-r--r--net/l2tp/l2tp_ppp.c7
-rw-r--r--net/llc/af_llc.c7
-rw-r--r--net/mac80211/aes_ccm.c12
-rw-r--r--net/mac80211/aes_gcm.c12
-rw-r--r--net/mac80211/aes_gmac.c4
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/agg-tx.c58
-rw-r--r--net/mac80211/cfg.c99
-rw-r--r--net/mac80211/debugfs.c170
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/debugfs_sta.c134
-rw-r--r--net/mac80211/driver-ops.h24
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/ibss.c343
-rw-r--r--net/mac80211/ieee80211_i.h67
-rw-r--r--net/mac80211/iface.c39
-rw-r--r--net/mac80211/key.c1
-rw-r--r--net/mac80211/key.h2
-rw-r--r--net/mac80211/main.c16
-rw-r--r--net/mac80211/mesh.c5
-rw-r--r--net/mac80211/mesh_plink.c10
-rw-r--r--net/mac80211/mlme.c236
-rw-r--r--net/mac80211/pm.c28
-rw-r--r--net/mac80211/rc80211_minstrel.c125
-rw-r--r--net/mac80211/rc80211_minstrel.h49
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c125
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c223
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h5
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c216
-rw-r--r--net/mac80211/rx.c39
-rw-r--r--net/mac80211/scan.c25
-rw-r--r--net/mac80211/sta_info.c239
-rw-r--r--net/mac80211/sta_info.h62
-rw-r--r--net/mac80211/status.c82
-rw-r--r--net/mac80211/tdls.c155
-rw-r--r--net/mac80211/trace.c1
-rw-r--r--net/mac80211/trace.h63
-rw-r--r--net/mac80211/trace_msg.h53
-rw-r--r--net/mac80211/tx.c381
-rw-r--r--net/mac80211/util.c190
-rw-r--r--net/mac80211/vht.c4
-rw-r--r--net/mac80211/wpa.c13
-rw-r--r--net/mac802154/cfg.c9
-rw-r--r--net/mac802154/driver-ops.h4
-rw-r--r--net/mac802154/ieee802154_i.h3
-rw-r--r--net/mac802154/iface.c25
-rw-r--r--net/mac802154/llsec.c4
-rw-r--r--net/mac802154/main.c7
-rw-r--r--net/mac802154/util.c13
-rw-r--r--net/mpls/Kconfig21
-rw-r--r--net/mpls/Makefile3
-rw-r--r--net/mpls/af_mpls.c1142
-rw-r--r--net/mpls/internal.h55
-rw-r--r--net/netfilter/Kconfig23
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/core.c31
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c32
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c101
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c182
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c102
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c15
-rw-r--r--net/netfilter/nf_conntrack_acct.c8
-rw-r--r--net/netfilter/nf_conntrack_amanda.c10
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_internals.h11
-rw-r--r--net/netfilter/nf_log_common.c7
-rw-r--r--net/netfilter/nf_queue.c76
-rw-r--r--net/netfilter/nf_tables_api.c639
-rw-r--r--net/netfilter/nf_tables_core.c161
-rw-r--r--net/netfilter/nfnetlink_log.c35
-rw-r--r--net/netfilter/nfnetlink_queue_core.c69
-rw-r--r--net/netfilter/nft_bitwise.c37
-rw-r--r--net/netfilter/nft_byteorder.c40
-rw-r--r--net/netfilter/nft_cmp.c44
-rw-r--r--net/netfilter/nft_compat.c41
-rw-r--r--net/netfilter/nft_counter.c3
-rw-r--r--net/netfilter/nft_ct.c118
-rw-r--r--net/netfilter/nft_dynset.c265
-rw-r--r--net/netfilter/nft_expr_template.c94
-rw-r--r--net/netfilter/nft_exthdr.c23
-rw-r--r--net/netfilter/nft_hash.c327
-rw-r--r--net/netfilter/nft_immediate.c18
-rw-r--r--net/netfilter/nft_limit.c5
-rw-r--r--net/netfilter/nft_log.c4
-rw-r--r--net/netfilter/nft_lookup.c35
-rw-r--r--net/netfilter/nft_meta.c116
-rw-r--r--net/netfilter/nft_nat.c71
-rw-r--r--net/netfilter/nft_payload.c24
-rw-r--r--net/netfilter/nft_queue.c4
-rw-r--r--net/netfilter/nft_rbtree.c132
-rw-r--r--net/netfilter/nft_redir.c19
-rw-r--r--net/netfilter/nft_reject.c2
-rw-r--r--net/netfilter/nft_reject_inet.c13
-rw-r--r--net/netfilter/xt_TPROXY.c22
-rw-r--r--net/netfilter/xt_cgroup.c2
-rw-r--r--net/netfilter/xt_physdev.c35
-rw-r--r--net/netfilter/xt_set.c4
-rw-r--r--net/netfilter/xt_socket.c129
-rw-r--r--net/netfilter/xt_string.c3
-rw-r--r--net/netlabel/netlabel_mgmt.c20
-rw-r--r--net/netlabel/netlabel_unlabeled.c28
-rw-r--r--net/netlink/af_netlink.c100
-rw-r--r--net/netrom/af_netrom.c7
-rw-r--r--net/netrom/nr_dev.c31
-rw-r--r--net/nfc/llcp_sock.c8
-rw-r--r--net/nfc/nci/core.c11
-rw-r--r--net/nfc/netlink.c2
-rw-r--r--net/nfc/rawsock.c7
-rw-r--r--net/openvswitch/Kconfig1
-rw-r--r--net/openvswitch/datapath.c4
-rw-r--r--net/openvswitch/datapath.h4
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/flow_netlink.c10
-rw-r--r--net/openvswitch/vport-vxlan.c5
-rw-r--r--net/packet/af_packet.c71
-rw-r--r--net/packet/internal.h4
-rw-r--r--net/phonet/datagram.c8
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/socket.c6
-rw-r--r--net/rds/connection.c20
-rw-r--r--net/rds/ib_cm.c13
-rw-r--r--net/rds/rds.h8
-rw-r--r--net/rds/recv.c4
-rw-r--r--net/rds/send.c36
-rw-r--r--net/rds/tcp_connect.c1
-rw-r--r--net/rds/tcp_listen.c46
-rw-r--r--net/rose/af_rose.c7
-rw-r--r--net/rose/rose_dev.c53
-rw-r--r--net/rxrpc/af_rxrpc.c7
-rw-r--r--net/rxrpc/ar-input.c23
-rw-r--r--net/rxrpc/ar-internal.h12
-rw-r--r--net/rxrpc/ar-local.c98
-rw-r--r--net/rxrpc/ar-output.c73
-rw-r--r--net/rxrpc/ar-recvmsg.c6
-rw-r--r--net/sched/act_bpf.c298
-rw-r--r--net/sched/act_connmark.c2
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/cls_api.c13
-rw-r--r--net/sched/cls_basic.c6
-rw-r--r--net/sched/cls_bpf.c224
-rw-r--r--net/sched/cls_cgroup.c6
-rw-r--r--net/sched/cls_flow.c6
-rw-r--r--net/sched/cls_fw.c34
-rw-r--r--net/sched/cls_route.c26
-rw-r--r--net/sched/cls_rsvp.h12
-rw-r--r--net/sched/cls_tcindex.c6
-rw-r--r--net/sched/cls_u32.c25
-rw-r--r--net/sched/em_text.c3
-rw-r--r--net/sched/sch_api.c14
-rw-r--r--net/sched/sch_codel.c2
-rw-r--r--net/sched/sch_fq.c4
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_gred.c4
-rw-r--r--net/sched/sch_ingress.c9
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/socket.c13
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/socket.c184
-rw-r--r--net/sunrpc/Kconfig2
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c23
-rw-r--r--net/sunrpc/cache.c8
-rw-r--r--net/sunrpc/rpc_pipe.c32
-rw-r--r--net/sunrpc/sched.c4
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xprt.c22
-rw-r--r--net/sunrpc/xprtrdma/Makefile3
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c208
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c353
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c94
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c87
-rw-r--r--net/sunrpc/xprtrdma/transport.c61
-rw-r--r--net/sunrpc/xprtrdma/verbs.c699
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h90
-rw-r--r--net/switchdev/switchdev.c217
-rw-r--r--net/tipc/Kconfig8
-rw-r--r--net/tipc/Makefile1
-rw-r--r--net/tipc/addr.c7
-rw-r--r--net/tipc/addr.h1
-rw-r--r--net/tipc/bcast.c95
-rw-r--r--net/tipc/bcast.h4
-rw-r--r--net/tipc/bearer.c30
-rw-r--r--net/tipc/bearer.h17
-rw-r--r--net/tipc/discover.c11
-rw-r--r--net/tipc/eth_media.c8
-rw-r--r--net/tipc/ib_media.c2
-rw-r--r--net/tipc/link.c866
-rw-r--r--net/tipc/link.h51
-rw-r--r--net/tipc/msg.c130
-rw-r--r--net/tipc/msg.h133
-rw-r--r--net/tipc/name_distr.c4
-rw-r--r--net/tipc/name_table.c4
-rw-r--r--net/tipc/node.c116
-rw-r--r--net/tipc/node.h18
-rw-r--r--net/tipc/server.c42
-rw-r--r--net/tipc/socket.c275
-rw-r--r--net/tipc/socket.h4
-rw-r--r--net/tipc/subscr.c23
-rw-r--r--net/tipc/udp_media.c448
-rw-r--r--net/unix/af_unix.c58
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/unix/garbage.c70
-rw-r--r--net/vmw_vsock/af_vsock.c20
-rw-r--r--net/vmw_vsock/vmci_transport.c3
-rw-r--r--net/wireless/Kconfig10
-rw-r--r--net/wireless/ibss.c4
-rw-r--r--net/wireless/mlme.c6
-rw-r--r--net/wireless/nl80211.c98
-rw-r--r--net/wireless/rdev-ops.h5
-rw-r--r--net/wireless/reg.c145
-rw-r--r--net/wireless/reg.h24
-rw-r--r--net/wireless/scan.c302
-rw-r--r--net/wireless/sme.c90
-rw-r--r--net/wireless/trace.h41
-rw-r--r--net/wireless/util.c171
-rw-r--r--net/wireless/wext-compat.c18
-rw-r--r--net/wireless/wext-compat.h6
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/x25/af_x25.c6
-rw-r--r--net/xfrm/xfrm_input.c10
-rw-r--r--net/xfrm/xfrm_output.c16
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--net/xfrm/xfrm_user.c5
-rw-r--r--samples/bpf/Makefile17
-rw-r--r--samples/bpf/bpf_helpers.h13
-rw-r--r--samples/bpf/bpf_load.c125
-rw-r--r--samples/bpf/bpf_load.h3
-rw-r--r--samples/bpf/libbpf.c14
-rw-r--r--samples/bpf/libbpf.h9
-rw-r--r--samples/bpf/sock_example.c2
-rw-r--r--samples/bpf/sockex1_kern.c8
-rw-r--r--samples/bpf/sockex1_user.c2
-rw-r--r--samples/bpf/sockex2_kern.c26
-rw-r--r--samples/bpf/sockex2_user.c11
-rw-r--r--samples/bpf/tcbpf1_kern.c67
-rw-r--r--samples/bpf/test_verifier.c108
-rw-r--r--samples/bpf/tracex1_kern.c50
-rw-r--r--samples/bpf/tracex1_user.c25
-rw-r--r--samples/bpf/tracex2_kern.c86
-rw-r--r--samples/bpf/tracex2_user.c95
-rw-r--r--samples/bpf/tracex3_kern.c89
-rw-r--r--samples/bpf/tracex3_user.c150
-rw-r--r--samples/bpf/tracex4_kern.c54
-rw-r--r--samples/bpf/tracex4_user.c69
-rwxr-xr-xsamples/pktgen/pktgen.conf-1-159
-rwxr-xr-xsamples/pktgen/pktgen.conf-1-1-flows67
-rwxr-xr-xsamples/pktgen/pktgen.conf-1-1-ip660
-rwxr-xr-xsamples/pktgen/pktgen.conf-1-1-ip6-rdos63
-rwxr-xr-xsamples/pktgen/pktgen.conf-1-1-rdos64
-rwxr-xr-xsamples/pktgen/pktgen.conf-1-269
-rwxr-xr-xsamples/pktgen/pktgen.conf-2-166
-rwxr-xr-xsamples/pktgen/pktgen.conf-2-273
-rw-r--r--samples/trace_events/trace-events-sample.h84
-rw-r--r--scripts/Makefile.dtbinst2
-rw-r--r--scripts/Makefile.fwinst2
-rw-r--r--scripts/Makefile.kasan8
-rw-r--r--scripts/Makefile.lib3
-rwxr-xr-xscripts/check_extable.sh146
-rwxr-xr-x[-rw-r--r--]scripts/checkkconfigsymbols.py147
-rwxr-xr-xscripts/checkpatch.pl164
-rw-r--r--scripts/coccinelle/misc/bugon.cocci2
-rw-r--r--scripts/coccinelle/misc/irqf_oneshot.cocci24
-rwxr-xr-xscripts/extract-ikconfig1
-rw-r--r--scripts/kallsyms.c29
-rw-r--r--scripts/kconfig/Makefile80
-rw-r--r--scripts/kconfig/conf.c8
-rw-r--r--scripts/kconfig/confdata.c5
-rw-r--r--scripts/kconfig/expr.c22
-rw-r--r--scripts/kconfig/expr.h5
-rw-r--r--scripts/kconfig/gconf.c29
-rw-r--r--scripts/kconfig/lkc.h14
-rw-r--r--scripts/kconfig/lkc_proto.h85
-rw-r--r--scripts/kconfig/mconf.c31
-rw-r--r--scripts/kconfig/menu.c4
-rwxr-xr-xscripts/kconfig/merge_config.sh27
-rw-r--r--scripts/kconfig/nconf.c5
-rw-r--r--scripts/kconfig/qconf.cc5
-rw-r--r--scripts/kconfig/symbol.c42
-rw-r--r--scripts/kconfig/util.c10
-rw-r--r--scripts/mod/devicetable-offsets.c3
-rw-r--r--scripts/mod/file2alias.c16
-rw-r--r--scripts/mod/modpost.c341
-rw-r--r--scripts/spelling.txt1
-rw-r--r--scripts/xen-hypercalls.sh12
-rw-r--r--security/Kconfig1
-rw-r--r--security/apparmor/apparmorfs.c2
-rw-r--r--security/apparmor/file.c6
-rw-r--r--security/apparmor/lsm.c24
-rw-r--r--security/capability.c8
-rw-r--r--security/commoncap.c6
-rw-r--r--security/inode.c18
-rw-r--r--security/integrity/evm/evm_crypto.c4
-rw-r--r--security/integrity/evm/evm_main.c18
-rw-r--r--security/integrity/ima/ima_appraise.c10
-rw-r--r--security/keys/compat.c29
-rw-r--r--security/keys/internal.h5
-rw-r--r--security/keys/keyctl.c78
-rw-r--r--security/lsm_audit.c19
-rw-r--r--security/security.c59
-rw-r--r--security/selinux/avc.c6
-rw-r--r--security/selinux/hooks.c83
-rw-r--r--security/selinux/nlmsgtab.c13
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--security/selinux/ss/avtab.c72
-rw-r--r--security/selinux/ss/avtab.h8
-rw-r--r--security/selinux/ss/mls.c10
-rw-r--r--security/selinux/ss/services.c6
-rw-r--r--security/smack/smack.h8
-rw-r--r--security/smack/smack_access.c43
-rw-r--r--security/smack/smack_lsm.c156
-rw-r--r--security/smack/smack_netfilter.c8
-rw-r--r--security/smack/smackfs.c99
-rw-r--r--security/tomoyo/.gitignore2
-rw-r--r--security/tomoyo/Kconfig1
-rw-r--r--security/tomoyo/Makefile55
-rw-r--r--security/tomoyo/common.h4
-rw-r--r--security/tomoyo/condition.c2
-rw-r--r--security/tomoyo/file.c4
-rw-r--r--security/tomoyo/policy/exception_policy.conf.default2
-rw-r--r--security/tomoyo/realpath.c16
-rw-r--r--security/tomoyo/tomoyo.c5
-rw-r--r--security/tomoyo/util.c13
-rw-r--r--security/yama/Kconfig2
-rw-r--r--security/yama/yama_lsm.c13
-rw-r--r--sound/Kconfig2
-rw-r--r--sound/Makefile2
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c2
-rw-r--r--sound/core/control.c329
-rw-r--r--sound/core/device.c47
-rw-r--r--sound/core/hwdep.c4
-rw-r--r--sound/core/init.c5
-rw-r--r--sound/core/oss/mixer_oss.c4
-rw-r--r--sound/core/oss/pcm_oss.c1
-rw-r--r--sound/core/pcm.c105
-rw-r--r--sound/core/pcm_compat.c28
-rw-r--r--sound/core/pcm_dmaengine.c4
-rw-r--r--sound/core/pcm_lib.c88
-rw-r--r--sound/core/pcm_native.c82
-rw-r--r--sound/core/rawmidi.c8
-rw-r--r--sound/core/seq/oss/seq_oss.c22
-rw-r--r--sound/core/seq/oss/seq_oss_init.c4
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c5
-rw-r--r--sound/core/seq/oss/seq_oss_readq.c9
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c12
-rw-r--r--sound/core/seq/oss/seq_oss_synth.h4
-rw-r--r--sound/core/seq/seq_clientmgr.c1
-rw-r--r--sound/core/seq/seq_device.c571
-rw-r--r--sound/core/seq/seq_dummy.c6
-rw-r--r--sound/core/seq/seq_fifo.c4
-rw-r--r--sound/core/seq/seq_memory.c8
-rw-r--r--sound/core/seq/seq_midi.c36
-rw-r--r--sound/core/seq/seq_ports.c4
-rw-r--r--sound/core/seq/seq_prioq.c4
-rw-r--r--sound/core/seq/seq_queue.c4
-rw-r--r--sound/core/seq/seq_timer.c4
-rw-r--r--sound/core/sound.c14
-rw-r--r--sound/core/timer.c4
-rw-r--r--sound/drivers/opl3/opl3_seq.c34
-rw-r--r--sound/drivers/opl4/opl4_seq.c33
-rw-r--r--sound/firewire/amdtp.c8
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c2
-rw-r--r--sound/hda/Kconfig3
-rw-r--r--sound/hda/Makefile7
-rw-r--r--sound/hda/array.c49
-rw-r--r--sound/hda/hda_bus_type.c42
-rw-r--r--sound/hda/hdac_bus.c186
-rw-r--r--sound/hda/hdac_device.c599
-rw-r--r--sound/hda/hdac_regmap.c472
-rw-r--r--sound/hda/hdac_sysfs.c406
-rw-r--r--sound/hda/local.h23
-rw-r--r--sound/hda/trace.c6
-rw-r--r--sound/hda/trace.h62
-rw-r--r--sound/i2c/other/ak4113.c4
-rw-r--r--sound/isa/sb/emu8000_synth.c35
-rw-r--r--sound/isa/wavefront/wavefront_fx.c6
-rw-r--r--sound/isa/wavefront/wavefront_synth.c26
-rw-r--r--sound/mips/au1x00.c12
-rw-r--r--sound/oss/dev_table.c6
-rw-r--r--sound/oss/opl3.c4
-rw-r--r--sound/oss/sb_ess.c19
-rw-r--r--sound/oss/sb_midi.c6
-rw-r--r--sound/oss/sequencer.c12
-rw-r--r--sound/oss/sys_timer.c35
-rw-r--r--sound/oss/v_midi.c4
-rw-r--r--sound/pci/ac97/ac97_codec.c4
-rw-r--r--sound/pci/ac97/ac97_patch.c27
-rw-r--r--sound/pci/ad1889.c2
-rw-r--r--sound/pci/asihpi/asihpi.c2
-rw-r--r--sound/pci/atiixp.c2
-rw-r--r--sound/pci/azt3328.c7
-rw-r--r--sound/pci/cmipci.c2
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c2
-rw-r--r--sound/pci/echoaudio/echoaudio.c16
-rw-r--r--sound/pci/emu10k1/emu10k1.c6
-rw-r--r--sound/pci/emu10k1/emu10k1_callback.c4
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c46
-rw-r--r--sound/pci/emu10k1/emu10k1_synth.c35
-rw-r--r--sound/pci/emu10k1/emumixer.c118
-rw-r--r--sound/pci/emu10k1/emupcm.c2
-rw-r--r--sound/pci/emu10k1/emuproc.c12
-rw-r--r--sound/pci/emu10k1/memory.c11
-rw-r--r--sound/pci/hda/Kconfig1
-rw-r--r--sound/pci/hda/Makefile3
-rw-r--r--sound/pci/hda/hda_auto_parser.c33
-rw-r--r--sound/pci/hda/hda_beep.c37
-rw-r--r--sound/pci/hda/hda_beep.h1
-rw-r--r--sound/pci/hda/hda_bind.c273
-rw-r--r--sound/pci/hda/hda_codec.c2311
-rw-r--r--sound/pci/hda/hda_codec.h288
-rw-r--r--sound/pci/hda/hda_controller.c269
-rw-r--r--sound/pci/hda/hda_controller.h397
-rw-r--r--sound/pci/hda/hda_generic.c602
-rw-r--r--sound/pci/hda/hda_generic.h9
-rw-r--r--sound/pci/hda/hda_hwdep.c5
-rw-r--r--sound/pci/hda/hda_i915.c8
-rw-r--r--sound/pci/hda/hda_intel.c104
-rw-r--r--sound/pci/hda/hda_intel.h2
-rw-r--r--sound/pci/hda/hda_jack.c8
-rw-r--r--sound/pci/hda/hda_local.h123
-rw-r--r--sound/pci/hda/hda_priv.h406
-rw-r--r--sound/pci/hda/hda_proc.c80
-rw-r--r--sound/pci/hda/hda_sysfs.c62
-rw-r--r--sound/pci/hda/hda_tegra.c50
-rw-r--r--sound/pci/hda/hda_trace.h143
-rw-r--r--sound/pci/hda/local.h39
-rw-r--r--sound/pci/hda/patch_analog.c34
-rw-r--r--sound/pci/hda/patch_ca0110.c16
-rw-r--r--sound/pci/hda/patch_ca0132.c68
-rw-r--r--sound/pci/hda/patch_cirrus.c16
-rw-r--r--sound/pci/hda/patch_cmedia.c16
-rw-r--r--sound/pci/hda/patch_conexant.c41
-rw-r--r--sound/pci/hda/patch_hdmi.c99
-rw-r--r--sound/pci/hda/patch_realtek.c354
-rw-r--r--sound/pci/hda/patch_si3054.c37
-rw-r--r--sound/pci/hda/patch_sigmatel.c138
-rw-r--r--sound/pci/hda/patch_via.c778
-rw-r--r--sound/pci/hda/thinkpad_helper.c3
-rw-r--r--sound/pci/ice1712/wtm.c172
-rw-r--r--sound/pci/intel8x0.c6
-rw-r--r--sound/pci/rme9652/hdspm.c141
-rw-r--r--sound/pci/via82xx.c2
-rw-r--r--sound/ppc/pmac.c58
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/atmel/Kconfig9
-rw-r--r--sound/soc/atmel/Makefile2
-rw-r--r--sound/soc/atmel/atmel-pcm-dma.c4
-rw-r--r--sound/soc/atmel/atmel-pcm-pdc.c79
-rw-r--r--sound/soc/atmel/atmel-pcm.c121
-rw-r--r--sound/soc/atmel/atmel-pcm.h5
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c111
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.h1
-rw-r--r--sound/soc/codecs/Kconfig22
-rw-r--r--sound/soc/codecs/Makefile6
-rw-r--r--sound/soc/codecs/ab8500-codec.c2
-rw-r--r--sound/soc/codecs/adau1977.c17
-rw-r--r--sound/soc/codecs/ak4554.c2
-rw-r--r--sound/soc/codecs/ak4642.c41
-rw-r--r--sound/soc/codecs/arizona.c13
-rw-r--r--sound/soc/codecs/cs35l32.c19
-rw-r--r--sound/soc/codecs/cs4265.c19
-rw-r--r--sound/soc/codecs/cs4271.c4
-rw-r--r--sound/soc/codecs/cx20442.c4
-rw-r--r--sound/soc/codecs/max98090.c17
-rw-r--r--sound/soc/codecs/max98357a.c11
-rw-r--r--sound/soc/codecs/max98925.c655
-rw-r--r--sound/soc/codecs/max98925.h832
-rw-r--r--sound/soc/codecs/pcm512x.c189
-rw-r--r--sound/soc/codecs/rt286.c40
-rw-r--r--sound/soc/codecs/rt5631.c2
-rw-r--r--sound/soc/codecs/rt5645.c103
-rw-r--r--sound/soc/codecs/rt5645.h2
-rw-r--r--sound/soc/codecs/rt5670.c213
-rw-r--r--sound/soc/codecs/rt5670.h10
-rw-r--r--sound/soc/codecs/rt5677.c212
-rw-r--r--sound/soc/codecs/rt5677.h85
-rw-r--r--sound/soc/codecs/sn95031.c14
-rw-r--r--sound/soc/codecs/sn95031.h3
-rw-r--r--sound/soc/codecs/sta350.c30
-rw-r--r--sound/soc/codecs/tas2552.c13
-rw-r--r--sound/soc/codecs/tfa9879.c4
-rw-r--r--sound/soc/codecs/tlv320aic23-i2c.c4
-rw-r--r--sound/soc/codecs/wm2200.c9
-rw-r--r--sound/soc/codecs/wm5100.c7
-rw-r--r--sound/soc/codecs/wm5102.c1
-rw-r--r--sound/soc/codecs/wm8350.c25
-rw-r--r--sound/soc/codecs/wm8731.c34
-rw-r--r--sound/soc/codecs/wm8741.c8
-rw-r--r--sound/soc/codecs/wm8753.c73
-rw-r--r--sound/soc/codecs/wm8804-i2c.c65
-rw-r--r--sound/soc/codecs/wm8804-spi.c57
-rw-r--r--sound/soc/codecs/wm8804.c534
-rw-r--r--sound/soc/codecs/wm8804.h8
-rw-r--r--sound/soc/codecs/wm8971.c99
-rw-r--r--sound/soc/codecs/wm8996.c12
-rw-r--r--sound/soc/codecs/wm_adsp.c13
-rw-r--r--sound/soc/davinci/Kconfig18
-rw-r--r--sound/soc/davinci/Makefile2
-rw-r--r--sound/soc/davinci/davinci-evm.c17
-rw-r--r--sound/soc/davinci/davinci-i2s.c67
-rw-r--r--sound/soc/davinci/davinci-mcasp.c335
-rw-r--r--sound/soc/davinci/davinci-pcm.c861
-rw-r--r--sound/soc/davinci/davinci-pcm.h41
-rw-r--r--sound/soc/davinci/davinci-vcif.c55
-rw-r--r--sound/soc/fsl/Kconfig4
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c6
-rw-r--r--sound/soc/fsl/fsl_ssi.c34
-rw-r--r--sound/soc/fsl/imx-es8328.c6
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c2
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c2
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c2
-rw-r--r--sound/soc/fsl/wm1133-ev1.c15
-rw-r--r--sound/soc/generic/simple-card.c50
-rw-r--r--sound/soc/intel/Makefile42
-rw-r--r--sound/soc/intel/atom/Makefile7
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c (renamed from sound/soc/intel/sst-atom-controls.c)0
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.h (renamed from sound/soc/intel/sst-atom-controls.h)0
-rw-r--r--sound/soc/intel/atom/sst-mfld-dsp.h (renamed from sound/soc/intel/sst-mfld-dsp.h)0
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-compress.c (renamed from sound/soc/intel/sst-mfld-platform-compress.c)0
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c (renamed from sound/soc/intel/sst-mfld-platform-pcm.c)60
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform.h (renamed from sound/soc/intel/sst-mfld-platform.h)1
-rw-r--r--sound/soc/intel/atom/sst/Makefile (renamed from sound/soc/intel/sst/Makefile)0
-rw-r--r--sound/soc/intel/atom/sst/sst.c (renamed from sound/soc/intel/sst/sst.c)130
-rw-r--r--sound/soc/intel/atom/sst/sst.h (renamed from sound/soc/intel/sst/sst.h)12
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c (renamed from sound/soc/intel/sst/sst_acpi.c)4
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c (renamed from sound/soc/intel/sst/sst_drv_interface.c)69
-rw-r--r--sound/soc/intel/atom/sst/sst_ipc.c (renamed from sound/soc/intel/sst/sst_ipc.c)2
-rw-r--r--sound/soc/intel/atom/sst/sst_loader.c (renamed from sound/soc/intel/sst/sst_loader.c)12
-rw-r--r--sound/soc/intel/atom/sst/sst_pci.c (renamed from sound/soc/intel/sst/sst_pci.c)0
-rw-r--r--sound/soc/intel/atom/sst/sst_pvt.c (renamed from sound/soc/intel/sst/sst_pvt.c)26
-rw-r--r--sound/soc/intel/atom/sst/sst_stream.c (renamed from sound/soc/intel/sst/sst_stream.c)2
-rw-r--r--sound/soc/intel/baytrail/Makefile4
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-dsp.c (renamed from sound/soc/intel/sst-baytrail-dsp.c)4
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c (renamed from sound/soc/intel/sst-baytrail-ipc.c)363
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.h (renamed from sound/soc/intel/sst-baytrail-ipc.h)0
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-pcm.c (renamed from sound/soc/intel/sst-baytrail-pcm.c)4
-rw-r--r--sound/soc/intel/boards/Makefile15
-rw-r--r--sound/soc/intel/boards/broadwell.c (renamed from sound/soc/intel/broadwell.c)50
-rw-r--r--sound/soc/intel/boards/byt-max98090.c (renamed from sound/soc/intel/byt-max98090.c)13
-rw-r--r--sound/soc/intel/boards/byt-rt5640.c (renamed from sound/soc/intel/byt-rt5640.c)4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c (renamed from sound/soc/intel/bytcr_dpcm_rt5640.c)8
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c (renamed from sound/soc/intel/cht_bsw_rt5645.c)20
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c (renamed from sound/soc/intel/cht_bsw_rt5672.c)108
-rw-r--r--sound/soc/intel/boards/haswell.c (renamed from sound/soc/intel/haswell.c)10
-rw-r--r--sound/soc/intel/boards/mfld_machine.c (renamed from sound/soc/intel/mfld_machine.c)24
-rw-r--r--sound/soc/intel/common/Makefile7
-rw-r--r--sound/soc/intel/common/sst-acpi.c (renamed from sound/soc/intel/sst-acpi.c)1
-rw-r--r--sound/soc/intel/common/sst-dsp-priv.h (renamed from sound/soc/intel/sst-dsp-priv.h)13
-rw-r--r--sound/soc/intel/common/sst-dsp.c (renamed from sound/soc/intel/sst-dsp.c)0
-rw-r--r--sound/soc/intel/common/sst-dsp.h (renamed from sound/soc/intel/sst-dsp.h)2
-rw-r--r--sound/soc/intel/common/sst-firmware.c (renamed from sound/soc/intel/sst-firmware.c)10
-rw-r--r--sound/soc/intel/common/sst-ipc.c294
-rw-r--r--sound/soc/intel/common/sst-ipc.h91
-rw-r--r--sound/soc/intel/haswell/Makefile4
-rw-r--r--sound/soc/intel/haswell/sst-haswell-dsp.c (renamed from sound/soc/intel/sst-haswell-dsp.c)9
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c (renamed from sound/soc/intel/sst-haswell-ipc.c)793
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.h (renamed from sound/soc/intel/sst-haswell-ipc.h)53
-rw-r--r--sound/soc/intel/haswell/sst-haswell-pcm.c (renamed from sound/soc/intel/sst-haswell-pcm.c)140
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c84
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c2
-rw-r--r--sound/soc/nuc900/nuc900-audio.h3
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c31
-rw-r--r--sound/soc/omap/Kconfig4
-rw-r--r--sound/soc/omap/ams-delta.c4
-rw-r--r--sound/soc/omap/n810.c23
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c10
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c2
-rw-r--r--sound/soc/omap/omap-pcm.c21
-rw-r--r--sound/soc/omap/omap-twl4030.c12
-rw-r--r--sound/soc/omap/rx51.c6
-rw-r--r--sound/soc/pxa/hx4700.c11
-rw-r--r--sound/soc/pxa/palm27x.c11
-rw-r--r--sound/soc/pxa/ttc-dkb.c15
-rw-r--r--sound/soc/pxa/z2.c10
-rw-r--r--sound/soc/qcom/Kconfig25
-rw-r--r--sound/soc/qcom/Makefile11
-rw-r--r--sound/soc/qcom/lpass-cpu.c491
-rw-r--r--sound/soc/qcom/lpass-lpaif-ipq806x.h172
-rw-r--r--sound/soc/qcom/lpass-platform.c526
-rw-r--r--sound/soc/qcom/lpass.h51
-rw-r--r--sound/soc/qcom/storm.c162
-rw-r--r--sound/soc/samsung/h1940_uda1380.c9
-rw-r--r--sound/soc/samsung/littlemill.c12
-rw-r--r--sound/soc/samsung/lowland.c14
-rw-r--r--sound/soc/samsung/rx1950_uda1380.c9
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c4
-rw-r--r--sound/soc/samsung/smartq_wm8987.c11
-rw-r--r--sound/soc/samsung/speyside.c14
-rw-r--r--sound/soc/samsung/tobermory.c13
-rw-r--r--sound/soc/sh/Kconfig6
-rw-r--r--sound/soc/sh/fsi.c89
-rw-r--r--sound/soc/sh/rcar/Makefile7
-rw-r--r--sound/soc/sh/rcar/adg.c4
-rw-r--r--sound/soc/sh/rcar/core.c278
-rw-r--r--sound/soc/sh/rcar/dma.c617
-rw-r--r--sound/soc/sh/rcar/dvc.c45
-rw-r--r--sound/soc/sh/rcar/gen.c152
-rw-r--r--sound/soc/sh/rcar/rsnd.h92
-rw-r--r--sound/soc/sh/rcar/rsrc-card.c512
-rw-r--r--sound/soc/sh/rcar/src.c250
-rw-r--r--sound/soc/sh/rcar/ssi.c73
-rw-r--r--sound/soc/soc-core.c128
-rw-r--r--sound/soc/soc-dapm.c182
-rw-r--r--sound/soc/soc-jack.c42
-rw-r--r--sound/soc/soc-pcm.c17
-rw-r--r--sound/soc/tegra/tegra_alc5632.c14
-rw-r--r--sound/soc/tegra/tegra_max98090.c26
-rw-r--r--sound/soc/tegra/tegra_rt5640.c10
-rw-r--r--sound/soc/tegra/tegra_rt5677.c20
-rw-r--r--sound/soc/tegra/tegra_wm8903.c21
-rw-r--r--sound/soc/tegra/tegra_wm9712.c6
-rw-r--r--sound/soc/ux500/mop500_ab8500.c36
-rw-r--r--sound/synth/emux/emux_oss.c11
-rw-r--r--sound/synth/emux/emux_seq.c29
-rw-r--r--sound/usb/format.c5
-rw-r--r--sound/usb/quirks-table.h30
-rw-r--r--sound/usb/quirks.c41
-rw-r--r--tools/build/Build.include81
-rw-r--r--tools/build/Documentation/Build.txt139
-rw-r--r--tools/build/Makefile.build130
-rw-r--r--tools/build/Makefile.feature171
-rw-r--r--tools/build/feature/.gitignore (renamed from tools/perf/config/feature-checks/.gitignore)1
-rw-r--r--tools/build/feature/Makefile (renamed from tools/perf/config/feature-checks/Makefile)31
-rw-r--r--tools/build/feature/test-all.c (renamed from tools/perf/config/feature-checks/test-all.c)19
-rw-r--r--tools/build/feature/test-backtrace.c (renamed from tools/perf/config/feature-checks/test-backtrace.c)0
-rw-r--r--tools/build/feature/test-bionic.c (renamed from tools/perf/config/feature-checks/test-bionic.c)0
-rw-r--r--tools/build/feature/test-compile.c (renamed from tools/perf/config/feature-checks/test-compile.c)0
-rw-r--r--tools/build/feature/test-cplus-demangle.c (renamed from tools/perf/config/feature-checks/test-cplus-demangle.c)0
-rw-r--r--tools/build/feature/test-dwarf.c (renamed from tools/perf/config/feature-checks/test-dwarf.c)0
-rw-r--r--tools/build/feature/test-fortify-source.c (renamed from tools/perf/config/feature-checks/test-fortify-source.c)0
-rw-r--r--tools/build/feature/test-glibc.c (renamed from tools/perf/config/feature-checks/test-glibc.c)0
-rw-r--r--tools/build/feature/test-gtk2-infobar.c (renamed from tools/perf/config/feature-checks/test-gtk2-infobar.c)0
-rw-r--r--tools/build/feature/test-gtk2.c (renamed from tools/perf/config/feature-checks/test-gtk2.c)0
-rw-r--r--tools/build/feature/test-hello.c (renamed from tools/perf/config/feature-checks/test-hello.c)0
-rw-r--r--tools/build/feature/test-libaudit.c (renamed from tools/perf/config/feature-checks/test-libaudit.c)0
-rw-r--r--tools/build/feature/test-libbabeltrace.c9
-rw-r--r--tools/build/feature/test-libbfd.c (renamed from tools/perf/config/feature-checks/test-libbfd.c)0
-rw-r--r--tools/build/feature/test-libdw-dwarf-unwind.c (renamed from tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c)0
-rw-r--r--tools/build/feature/test-libelf-getphdrnum.c (renamed from tools/perf/config/feature-checks/test-libelf-getphdrnum.c)0
-rw-r--r--tools/build/feature/test-libelf-mmap.c (renamed from tools/perf/config/feature-checks/test-libelf-mmap.c)0
-rw-r--r--tools/build/feature/test-libelf.c (renamed from tools/perf/config/feature-checks/test-libelf.c)0
-rw-r--r--tools/build/feature/test-libnuma.c (renamed from tools/perf/config/feature-checks/test-libnuma.c)0
-rw-r--r--tools/build/feature/test-libperl.c (renamed from tools/perf/config/feature-checks/test-libperl.c)0
-rw-r--r--tools/build/feature/test-libpython-version.c (renamed from tools/perf/config/feature-checks/test-libpython-version.c)0
-rw-r--r--tools/build/feature/test-libpython.c (renamed from tools/perf/config/feature-checks/test-libpython.c)0
-rw-r--r--tools/build/feature/test-libslang.c (renamed from tools/perf/config/feature-checks/test-libslang.c)0
-rw-r--r--tools/build/feature/test-libunwind-debug-frame.c (renamed from tools/perf/config/feature-checks/test-libunwind-debug-frame.c)0
-rw-r--r--tools/build/feature/test-libunwind.c (renamed from tools/perf/config/feature-checks/test-libunwind.c)0
-rw-r--r--tools/build/feature/test-lzma.c10
-rw-r--r--tools/build/feature/test-pthread-attr-setaffinity-np.c (renamed from tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c)4
-rw-r--r--tools/build/feature/test-stackprotector-all.c (renamed from tools/perf/config/feature-checks/test-stackprotector-all.c)0
-rw-r--r--tools/build/feature/test-sync-compare-and-swap.c (renamed from tools/perf/config/feature-checks/test-sync-compare-and-swap.c)0
-rw-r--r--tools/build/feature/test-timerfd.c (renamed from tools/perf/config/feature-checks/test-timerfd.c)0
-rw-r--r--tools/build/feature/test-zlib.c (renamed from tools/perf/config/feature-checks/test-zlib.c)0
-rw-r--r--tools/build/tests/ex/Build8
-rw-r--r--tools/build/tests/ex/Makefile23
-rw-r--r--tools/build/tests/ex/a.c5
-rw-r--r--tools/build/tests/ex/arch/Build2
-rw-r--r--tools/build/tests/ex/arch/e.c5
-rw-r--r--tools/build/tests/ex/arch/f.c5
-rw-r--r--tools/build/tests/ex/b.c5
-rw-r--r--tools/build/tests/ex/c.c5
-rw-r--r--tools/build/tests/ex/d.c5
-rw-r--r--tools/build/tests/ex/empty/Build0
-rw-r--r--tools/build/tests/ex/ex.c19
-rwxr-xr-xtools/build/tests/run.sh42
-rw-r--r--tools/hv/Makefile2
-rw-r--r--tools/hv/hv_vss_daemon.c10
-rw-r--r--tools/lib/api/Build2
-rw-r--r--tools/lib/api/Makefile58
-rw-r--r--tools/lib/api/fd/Build1
-rw-r--r--tools/lib/api/fs/Build4
-rw-r--r--tools/lib/api/fs/debugfs.c69
-rw-r--r--tools/lib/api/fs/debugfs.h13
-rw-r--r--tools/lib/api/fs/findfs.c63
-rw-r--r--tools/lib/api/fs/findfs.h23
-rw-r--r--tools/lib/api/fs/tracefs.c78
-rw-r--r--tools/lib/api/fs/tracefs.h21
-rw-r--r--tools/lib/lockdep/Build1
-rw-r--r--tools/lib/lockdep/Makefile135
-rw-r--r--tools/lib/lockdep/uinclude/linux/kernel.h3
-rw-r--r--tools/lib/traceevent/Build17
-rw-r--r--tools/lib/traceevent/Makefile169
-rw-r--r--tools/lib/traceevent/event-parse.c307
-rw-r--r--tools/lib/traceevent/event-parse.h24
-rw-r--r--tools/lib/traceevent/event-plugin.c60
-rw-r--r--tools/lib/traceevent/kbuffer-parse.c12
-rw-r--r--tools/lib/traceevent/kbuffer.h1
-rw-r--r--tools/lib/traceevent/parse-filter.c2
-rw-r--r--tools/lib/traceevent/trace-seq.c13
-rw-r--r--tools/net/bpf_exp.l6
-rw-r--r--tools/net/bpf_exp.y23
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Build44
-rw-r--r--tools/perf/Documentation/Build.txt49
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt24
-rw-r--r--tools/perf/Documentation/perf-data.txt40
-rw-r--r--tools/perf/Documentation/perf-diff.txt8
-rw-r--r--tools/perf/Documentation/perf-kmem.txt12
-rw-r--r--tools/perf/Documentation/perf-list.txt7
-rw-r--r--tools/perf/Documentation/perf-probe.txt16
-rw-r--r--tools/perf/Documentation/perf-record.txt30
-rw-r--r--tools/perf/Documentation/perf-report.txt5
-rw-r--r--tools/perf/Documentation/perf-script.txt6
-rw-r--r--tools/perf/Documentation/perf-trace.txt6
-rw-r--r--tools/perf/Documentation/perf.txt7
-rw-r--r--tools/perf/MANIFEST1
-rw-r--r--tools/perf/Makefile4
-rw-r--r--tools/perf/Makefile.perf622
-rw-r--r--tools/perf/arch/Build2
-rw-r--r--tools/perf/arch/arm/Build2
-rw-r--r--tools/perf/arch/arm/Makefile11
-rw-r--r--tools/perf/arch/arm/tests/Build2
-rw-r--r--tools/perf/arch/arm/util/Build4
-rw-r--r--tools/perf/arch/arm64/Build1
-rw-r--r--tools/perf/arch/arm64/Makefile4
-rw-r--r--tools/perf/arch/arm64/util/Build2
-rw-r--r--tools/perf/arch/powerpc/Build1
-rw-r--r--tools/perf/arch/powerpc/Makefile3
-rw-r--r--tools/perf/arch/powerpc/util/Build4
-rw-r--r--tools/perf/arch/s390/Build1
-rw-r--r--tools/perf/arch/s390/Makefile3
-rw-r--r--tools/perf/arch/s390/util/Build4
-rw-r--r--tools/perf/arch/sh/Build1
-rw-r--r--tools/perf/arch/sh/Makefile1
-rw-r--r--tools/perf/arch/sh/util/Build1
-rw-r--r--tools/perf/arch/sparc/Build1
-rw-r--r--tools/perf/arch/sparc/Makefile1
-rw-r--r--tools/perf/arch/sparc/util/Build1
-rw-r--r--tools/perf/arch/x86/Build2
-rw-r--r--tools/perf/arch/x86/Makefile15
-rw-r--r--tools/perf/arch/x86/tests/Build2
-rw-r--r--tools/perf/arch/x86/util/Build8
-rw-r--r--tools/perf/bench/Build11
-rw-r--r--tools/perf/bench/futex-requeue.c15
-rw-r--r--tools/perf/bench/numa.c12
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-buildid-cache.c107
-rw-r--r--tools/perf/builtin-buildid-list.c2
-rw-r--r--tools/perf/builtin-data.c123
-rw-r--r--tools/perf/builtin-diff.c6
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-help.c17
-rw-r--r--tools/perf/builtin-inject.c16
-rw-r--r--tools/perf/builtin-kmem.c538
-rw-r--r--tools/perf/builtin-kvm.c15
-rw-r--r--tools/perf/builtin-list.c28
-rw-r--r--tools/perf/builtin-lock.c7
-rw-r--r--tools/perf/builtin-mem.c7
-rw-r--r--tools/perf/builtin-probe.c19
-rw-r--r--tools/perf/builtin-record.c108
-rw-r--r--tools/perf/builtin-report.c18
-rw-r--r--tools/perf/builtin-sched.c101
-rw-r--r--tools/perf/builtin-script.c34
-rw-r--r--tools/perf/builtin-stat.c98
-rw-r--r--tools/perf/builtin-timechart.c12
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c285
-rw-r--r--tools/perf/builtin.h1
-rw-r--r--tools/perf/command-list.txt1
-rw-r--r--tools/perf/config/Makefile293
-rw-r--r--tools/perf/config/Makefile.arch29
-rw-r--r--tools/perf/config/utilities.mak3
-rw-r--r--tools/perf/perf-completion.sh94
-rw-r--r--tools/perf/perf.c29
-rw-r--r--tools/perf/perf.h5
-rw-r--r--tools/perf/scripts/Build2
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build3
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Build3
-rw-r--r--tools/perf/tests/Build43
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/attr/base-stat2
-rw-r--r--tools/perf/tests/builtin-test.c9
-rw-r--r--tools/perf/tests/dso-data.c22
-rw-r--r--tools/perf/tests/kmod-path.c73
-rw-r--r--tools/perf/tests/make2
-rw-r--r--tools/perf/tests/open-syscall-all-cpus.c7
-rw-r--r--tools/perf/tests/open-syscall.c7
-rw-r--r--tools/perf/tests/parse-events.c53
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/ui/Build14
-rw-r--r--tools/perf/ui/browsers/Build10
-rw-r--r--tools/perf/ui/browsers/annotate.c8
-rw-r--r--tools/perf/ui/browsers/hists.c88
-rw-r--r--tools/perf/ui/gtk/Build9
-rw-r--r--tools/perf/ui/tui/Build4
-rw-r--r--tools/perf/util/Build145
-rw-r--r--tools/perf/util/annotate.c32
-rw-r--r--tools/perf/util/build-id.c209
-rw-r--r--tools/perf/util/build-id.h8
-rw-r--r--tools/perf/util/cache.h1
-rw-r--r--tools/perf/util/callchain.c8
-rw-r--r--tools/perf/util/callchain.h1
-rw-r--r--tools/perf/util/cloexec.c6
-rw-r--r--tools/perf/util/cloexec.h6
-rw-r--r--tools/perf/util/data-convert-bt.c857
-rw-r--r--tools/perf/util/data-convert-bt.h8
-rw-r--r--tools/perf/util/db-export.c4
-rw-r--r--tools/perf/util/db-export.h3
-rw-r--r--tools/perf/util/debug.c2
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/dso.c159
-rw-r--r--tools/perf/util/dso.h49
-rw-r--r--tools/perf/util/dwarf-aux.c29
-rw-r--r--tools/perf/util/dwarf-aux.h3
-rw-r--r--tools/perf/util/event.c190
-rw-r--r--tools/perf/util/event.h1
-rw-r--r--tools/perf/util/evlist.c49
-rw-r--r--tools/perf/util/evlist.h14
-rw-r--r--tools/perf/util/evsel.c354
-rw-r--r--tools/perf/util/evsel.h11
-rw-r--r--tools/perf/util/header.c35
-rw-r--r--tools/perf/util/hist.c11
-rw-r--r--tools/perf/util/hist.h11
-rw-r--r--tools/perf/util/kvm-stat.h1
-rw-r--r--tools/perf/util/lzma.c95
-rw-r--r--tools/perf/util/machine.c332
-rw-r--r--tools/perf/util/machine.h2
-rw-r--r--tools/perf/util/map.c20
-rw-r--r--tools/perf/util/map.h6
-rw-r--r--tools/perf/util/ordered-events.c65
-rw-r--r--tools/perf/util/ordered-events.h19
-rw-r--r--tools/perf/util/parse-events.c242
-rw-r--r--tools/perf/util/parse-events.h13
-rw-r--r--tools/perf/util/parse-events.l2
-rw-r--r--tools/perf/util/parse-options.c32
-rw-r--r--tools/perf/util/parse-options.h2
-rw-r--r--tools/perf/util/probe-event.c427
-rw-r--r--tools/perf/util/probe-event.h6
-rw-r--r--tools/perf/util/probe-finder.c104
-rw-r--r--tools/perf/util/probe-finder.h4
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/scripting-engines/Build6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c10
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c21
-rw-r--r--tools/perf/util/session.c320
-rw-r--r--tools/perf/util/session.h18
-rw-r--r--tools/perf/util/setup.py2
-rw-r--r--tools/perf/util/sort.c9
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/symbol-elf.c87
-rw-r--r--tools/perf/util/symbol-minimal.c7
-rw-r--r--tools/perf/util/symbol.c65
-rw-r--r--tools/perf/util/symbol.h8
-rw-r--r--tools/perf/util/target.c7
-rw-r--r--tools/perf/util/thread.c15
-rw-r--r--tools/perf/util/thread.h24
-rw-r--r--tools/perf/util/tool.h8
-rw-r--r--tools/perf/util/trace-event-parse.c12
-rw-r--r--tools/perf/util/trace-event-scripting.c1
-rw-r--r--tools/perf/util/trace-event.h6
-rw-r--r--tools/perf/util/unwind-libunwind.c8
-rw-r--r--tools/perf/util/util.c67
-rw-r--r--tools/perf/util/util.h6
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/cpupower/utils/helpers/pci.c11
-rw-r--r--tools/power/x86/turbostat/Makefile6
-rw-r--r--tools/power/x86/turbostat/turbostat.8138
-rw-r--r--tools/power/x86/turbostat/turbostat.c436
-rw-r--r--tools/testing/selftests/powerpc/Makefile16
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h33
-rw-r--r--tools/testing/selftests/powerpc/harness.c47
-rw-r--r--tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c8
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.c47
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.h1
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/Makefile24
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/check.S100
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/common.h6
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S81
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall-asm.S27
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall.c121
-rw-r--r--tools/testing/selftests/powerpc/utils.h3
-rw-r--r--tools/testing/selftests/powerpc/vphn/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/vphn/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/vphn/test-vphn.c410
l---------tools/testing/selftests/powerpc/vphn/vphn.c1
l---------tools/testing/selftests/powerpc/vphn/vphn.h1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFcommon1
-rw-r--r--tools/testing/selftests/vm/hugetlbfstest.c4
-rw-r--r--tools/testing/selftests/vm/map_hugetlb.c6
-rw-r--r--tools/testing/selftests/x86/Makefile55
-rwxr-xr-xtools/testing/selftests/x86/check_cc.sh16
-rw-r--r--tools/testing/selftests/x86/run_x86_tests.sh11
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c181
-rw-r--r--tools/testing/selftests/x86/trivial_32bit_program.c4
-rw-r--r--tools/testing/selftests/x86/trivial_64bit_program.c18
-rw-r--r--tools/thermal/tmon/Makefile8
-rw-r--r--tools/vm/Makefile2
-rw-r--r--virt/kvm/arm/vgic.c5
-rw-r--r--virt/kvm/kvm_main.c1
8028 files changed, 332703 insertions, 172769 deletions
diff --git a/.gitignore b/.gitignore
index acb6afe6b7a3..4ad4a98b884b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,6 +24,7 @@
*.order
*.elf
*.bin
+*.tar
*.gz
*.bz2
*.lzma
diff --git a/.mailmap b/.mailmap
index 0d971cfb0772..6287004040e7 100644
--- a/.mailmap
+++ b/.mailmap
@@ -100,6 +100,7 @@ Rajesh Shah <rajesh.shah@intel.com>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
Rémi Denis-Courmont <rdenis@simphalempin.com>
+Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
diff --git a/CREDITS b/CREDITS
index 843e17647f3b..ec7e6c7fdd1b 100644
--- a/CREDITS
+++ b/CREDITS
@@ -508,6 +508,10 @@ E: paul@paulbristow.net
W: http://paulbristow.net/linux/idefloppy.html
D: Maintainer of IDE/ATAPI floppy driver
+N: Stefano Brivio
+E: stefano.brivio@polimi.it
+D: Broadcom B43 driver
+
N: Dominik Brodowski
E: linux@brodo.de
W: http://www.brodo.de/
@@ -2045,6 +2049,10 @@ D: pirq addr, CS5535 alsa audio driver
S: Gurgaon, India
S: Kuala Lumpur, Malaysia
+N: Mohit Kumar
+D: ST Microelectronics SPEAr13xx PCI host bridge driver
+D: Synopsys Designware PCI host bridge driver
+
N: Gabor Kuti
M: seasons@falcon.sch.bme.hu
M: seasons@makosteszta.sote.hu
@@ -3008,6 +3016,19 @@ W: http://www.qsl.net/dl1bke/
D: Generic Z8530 driver, AX.25 DAMA slave implementation
D: Several AX.25 hacks
+N: Ricardo Ribalda Delgado
+E: ricardo.ribalda@gmail.com
+W: http://ribalda.com
+D: PLX USB338x driver
+D: PCA9634 driver
+D: Option GTM671WFS
+D: Fintek F81216A
+D: Various kernel hacks
+S: Qtechnology A/S
+S: Valby Langgade 142
+S: 2500 Valby
+S: Denmark
+
N: Francois-Rene Rideau
E: fare@tunes.org
W: http://www.tunes.org/~fare
@@ -3688,6 +3709,13 @@ N: Dirk Verworner
D: Co-author of German book ``Linux-Kernel-Programmierung''
D: Co-founder of Berlin Linux User Group
+N: Andrew Victor
+E: linux@maxim.org.za
+W: http://maxim.org.za/at91_26.html
+D: First maintainer of Atmel ARM-based SoC, aka AT91
+D: Introduced support for at91rm9200, the first chip of AT91 family
+S: South Africa
+
N: Riku Voipio
E: riku.voipio@iki.fi
D: Author of PCA9532 LED and Fintek f75375s hwmon driver
diff --git a/Documentation/ABI/obsolete/sysfs-block-zram b/Documentation/ABI/obsolete/sysfs-block-zram
new file mode 100644
index 000000000000..720ea92cfb2e
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-block-zram
@@ -0,0 +1,119 @@
+What: /sys/block/zram<id>/num_reads
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The num_reads file is read-only and specifies the number of
+ reads (failed or successful) done on this device.
+ Now accessible via zram<id>/stat node.
+
+What: /sys/block/zram<id>/num_writes
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The num_writes file is read-only and specifies the number of
+ writes (failed or successful) done on this device.
+ Now accessible via zram<id>/stat node.
+
+What: /sys/block/zram<id>/invalid_io
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The invalid_io file is read-only and specifies the number of
+ non-page-size-aligned I/O requests issued to this device.
+ Now accessible via zram<id>/io_stat node.
+
+What: /sys/block/zram<id>/failed_reads
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The failed_reads file is read-only and specifies the number of
+ failed reads happened on this device.
+ Now accessible via zram<id>/io_stat node.
+
+What: /sys/block/zram<id>/failed_writes
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The failed_writes file is read-only and specifies the number of
+ failed writes happened on this device.
+ Now accessible via zram<id>/io_stat node.
+
+What: /sys/block/zram<id>/notify_free
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The notify_free file is read-only. Depending on device usage
+ scenario it may account a) the number of pages freed because
+ of swap slot free notifications or b) the number of pages freed
+ because of REQ_DISCARD requests sent by bio. The former ones
+ are sent to a swap block device when a swap slot is freed, which
+ implies that this disk is being used as a swap disk. The latter
+ ones are sent by filesystem mounted with discard option,
+ whenever some data blocks are getting discarded.
+ Now accessible via zram<id>/io_stat node.
+
+What: /sys/block/zram<id>/zero_pages
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The zero_pages file is read-only and specifies number of zero
+ filled pages written to this disk. No memory is allocated for
+ such pages.
+ Now accessible via zram<id>/mm_stat node.
+
+What: /sys/block/zram<id>/orig_data_size
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The orig_data_size file is read-only and specifies uncompressed
+ size of data stored in this disk. This excludes zero-filled
+ pages (zero_pages) since no memory is allocated for them.
+ Unit: bytes
+ Now accessible via zram<id>/mm_stat node.
+
+What: /sys/block/zram<id>/compr_data_size
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The compr_data_size file is read-only and specifies compressed
+ size of data stored in this disk. So, compression ratio can be
+ calculated using orig_data_size and this statistic.
+ Unit: bytes
+ Now accessible via zram<id>/mm_stat node.
+
+What: /sys/block/zram<id>/mem_used_total
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The mem_used_total file is read-only and specifies the amount
+ of memory, including allocator fragmentation and metadata
+ overhead, allocated for this disk. So, allocator space
+ efficiency can be calculated using compr_data_size and this
+ statistic.
+ Unit: bytes
+ Now accessible via zram<id>/mm_stat node.
+
+What: /sys/block/zram<id>/mem_used_max
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The mem_used_max file is read/write and specifies the amount
+ of maximum memory zram have consumed to store compressed data.
+ For resetting the value, you should write "0". Otherwise,
+ you could see -EINVAL.
+ Unit: bytes
+ Downgraded to write-only node: so it's possible to set new
+ value only; its current value is stored in zram<id>/mm_stat
+ node.
+
+What: /sys/block/zram<id>/mem_limit
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The mem_limit file is read/write and specifies the maximum
+ amount of memory ZRAM can use to store the compressed data.
+ The limit could be changed in run time and "0" means disable
+ the limit. No limit is the initial state. Unit: bytes
+ Downgraded to write-only node: so it's possible to set new
+ value only; its current value is stored in zram<id>/mm_stat
+ node.
diff --git a/Documentation/ABI/testing/sysfs-block-dm b/Documentation/ABI/testing/sysfs-block-dm
index 87ca5691e29b..f9f2339b9a0a 100644
--- a/Documentation/ABI/testing/sysfs-block-dm
+++ b/Documentation/ABI/testing/sysfs-block-dm
@@ -23,3 +23,25 @@ Description: Device-mapper device suspend state.
Contains the value 1 while the device is suspended.
Otherwise it contains 0. Read-only attribute.
Users: util-linux, device-mapper udev rules
+
+What: /sys/block/dm-<num>/dm/rq_based_seq_io_merge_deadline
+Date: March 2015
+KernelVersion: 4.1
+Contact: dm-devel@redhat.com
+Description: Allow control over how long a request that is a
+ reasonable merge candidate can be queued on the request
+ queue. The resolution of this deadline is in
+ microseconds (ranging from 1 to 100000 usecs).
+ Setting this attribute to 0 (the default) will disable
+ request-based DM's merge heuristic and associated extra
+ accounting. This attribute is not applicable to
+ bio-based DM devices so it will only ever report 0 for
+ them.
+
+What: /sys/block/dm-<num>/dm/use_blk_mq
+Date: March 2015
+KernelVersion: 4.1
+Contact: dm-devel@redhat.com
+Description: Request-based Device-mapper blk-mq I/O path mode.
+ Contains the value 1 if the device is using blk-mq.
+ Otherwise it contains 0. Read-only attribute.
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index a6148eaf91e5..2e69e83bf510 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -141,3 +141,28 @@ Description:
amount of memory ZRAM can use to store the compressed data. The
limit could be changed in run time and "0" means disable the
limit. No limit is the initial state. Unit: bytes
+
+What: /sys/block/zram<id>/compact
+Date: August 2015
+Contact: Minchan Kim <minchan@kernel.org>
+Description:
+ The compact file is write-only and trigger compaction for
+ allocator zrm uses. The allocator moves some objects so that
+ it could free fragment space.
+
+What: /sys/block/zram<id>/io_stat
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The io_stat file is read-only and accumulates device's I/O
+ statistics not accounted by block layer. For example,
+ failed_reads, failed_writes, etc. File format is similar to
+ block layer statistics file format.
+
+What: /sys/block/zram<id>/mm_stat
+Date: August 2015
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The mm_stat file is read-only and represents device's mm
+ statistics (orig_data_size, compr_data_size, etc.) in a format
+ similar to block layer statistics file format.
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index 3680364b4048..d46bba801aac 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -100,7 +100,7 @@ Description: read only
Hexadecimal value of the device ID found in this AFU
configuration record.
-What: /sys/class/cxl/<afu>/cr<config num>/vendor
+What: /sys/class/cxl/<afu>/cr<config num>/class
Date: February 2015
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
diff --git a/Documentation/ABI/testing/sysfs-class-led-flash b/Documentation/ABI/testing/sysfs-class-led-flash
new file mode 100644
index 000000000000..220a0270b47b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-flash
@@ -0,0 +1,80 @@
+What: /sys/class/leds/<led>/flash_brightness
+Date: March 2015
+KernelVersion: 4.0
+Contact: Jacek Anaszewski <j.anaszewski@samsung.com>
+Description: read/write
+ Set the brightness of this LED in the flash strobe mode, in
+ microamperes. The file is created only for the flash LED devices
+ that support setting flash brightness.
+
+ The value is between 0 and
+ /sys/class/leds/<led>/max_flash_brightness.
+
+What: /sys/class/leds/<led>/max_flash_brightness
+Date: March 2015
+KernelVersion: 4.0
+Contact: Jacek Anaszewski <j.anaszewski@samsung.com>
+Description: read only
+ Maximum brightness level for this LED in the flash strobe mode,
+ in microamperes.
+
+What: /sys/class/leds/<led>/flash_timeout
+Date: March 2015
+KernelVersion: 4.0
+Contact: Jacek Anaszewski <j.anaszewski@samsung.com>
+Description: read/write
+ Hardware timeout for flash, in microseconds. The flash strobe
+ is stopped after this period of time has passed from the start
+ of the strobe. The file is created only for the flash LED
+ devices that support setting flash timeout.
+
+What: /sys/class/leds/<led>/max_flash_timeout
+Date: March 2015
+KernelVersion: 4.0
+Contact: Jacek Anaszewski <j.anaszewski@samsung.com>
+Description: read only
+ Maximum flash timeout for this LED, in microseconds.
+
+What: /sys/class/leds/<led>/flash_strobe
+Date: March 2015
+KernelVersion: 4.0
+Contact: Jacek Anaszewski <j.anaszewski@samsung.com>
+Description: read/write
+ Flash strobe state. When written with 1 it triggers flash strobe
+ and when written with 0 it turns the flash off.
+
+ On read 1 means that flash is currently strobing and 0 means
+ that flash is off.
+
+What: /sys/class/leds/<led>/flash_fault
+Date: March 2015
+KernelVersion: 4.0
+Contact: Jacek Anaszewski <j.anaszewski@samsung.com>
+Description: read only
+ Space separated list of flash faults that may have occurred.
+ Flash faults are re-read after strobing the flash. Possible
+ flash faults:
+
+ * led-over-voltage - flash controller voltage to the flash LED
+ has exceeded the limit specific to the flash controller
+ * flash-timeout-exceeded - the flash strobe was still on when
+ the timeout set by the user has expired; not all flash
+ controllers may set this in all such conditions
+ * controller-over-temperature - the flash controller has
+ overheated
+ * controller-short-circuit - the short circuit protection
+ of the flash controller has been triggered
+ * led-power-supply-over-current - current in the LED power
+ supply has exceeded the limit specific to the flash
+ controller
+ * indicator-led-fault - the flash controller has detected
+ a short or open circuit condition on the indicator LED
+ * led-under-voltage - flash controller voltage to the flash
+ LED has been below the minimum limit specific to
+ the flash
+ * controller-under-voltage - the input voltage of the flash
+ controller is below the limit under which strobing the
+ flash at full current will not be possible;
+ the condition persists until this flag is no longer set
+ * led-over-temperature - the temperature of the LED has exceeded
+ its allowed upper limit
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
index 76ee192f80a0..3b5c3bca9186 100644
--- a/Documentation/ABI/testing/sysfs-class-mtd
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -222,3 +222,13 @@ Description:
The number of blocks that are marked as reserved, if any, in
this partition. These are typically used to store the in-flash
bad block table (BBT).
+
+What: /sys/class/mtd/mtdX/offset
+Date: March 2015
+KernelVersion: 4.1
+Contact: linux-mtd@lists.infradead.org
+Description:
+ For a partition, the offset of that partition from the start
+ of the master device in bytes. This attribute is absent on
+ main devices, so it can be used to distinguish between
+ partitions and devices that aren't partitions.
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index beb8ec4dabbc..5ecfd72ba684 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -188,6 +188,14 @@ Description:
Indicates the interface unique physical port identifier within
the NIC, as a string.
+What: /sys/class/net/<iface>/phys_port_name
+Date: March 2015
+KernelVersion: 4.0
+Contact: netdev@vger.kernel.org
+Description:
+ Indicates the interface physical port name within the NIC,
+ as a string.
+
What: /sys/class/net/<iface>/speed
Date: October 2009
KernelVersion: 2.6.33
diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
index 5e9aeb91d355..0c0df91b1516 100644
--- a/Documentation/ABI/testing/sysfs-class-net-queues
+++ b/Documentation/ABI/testing/sysfs-class-net-queues
@@ -24,6 +24,14 @@ Description:
Indicates the number of transmit timeout events seen by this
network interface transmit queue.
+What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
+Date: March 2015
+KernelVersion: 4.1
+Contact: netdev@vger.kernel.org
+Description:
+ A Mbps max-rate set for the queue, a value of zero means disabled,
+ default is disabled.
+
What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
Date: November 2010
KernelVersion: 2.6.38
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
index b3f6a2ac5007..db197a879580 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
+++ b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
@@ -1,7 +1,7 @@
-What: /sys/module/hid_logitech/drivers/hid:logitech/<dev>/range.
+What: /sys/bus/hid/drivers/logitech/<dev>/range
Date: July 2011
KernelVersion: 3.2
-Contact: Michal Malý <madcatxster@gmail.com>
+Contact: Michal Malý <madcatxster@devoid-pointer.net>
Description: Display minimum, maximum and current range of the steering
wheel. Writing a value within min and max boundaries sets the
range of the wheel.
@@ -9,7 +9,7 @@ Description: Display minimum, maximum and current range of the steering
What: /sys/bus/hid/drivers/logitech/<dev>/alternate_modes
Date: Feb 2015
KernelVersion: 4.1
-Contact: Michal Malý <madcatxster@gmail.com>
+Contact: Michal Malý <madcatxster@devoid-pointer.net>
Description: Displays a set of alternate modes supported by a wheel. Each
mode is listed as follows:
Tag: Mode Name
@@ -45,7 +45,7 @@ Description: Displays a set of alternate modes supported by a wheel. Each
What: /sys/bus/hid/drivers/logitech/<dev>/real_id
Date: Feb 2015
KernelVersion: 4.1
-Contact: Michal Malý <madcatxster@gmail.com>
+Contact: Michal Malý <madcatxster@devoid-pointer.net>
Description: Displays the real model of the wheel regardless of any
alternate mode the wheel might be switched to.
It is a read-only value.
diff --git a/Documentation/ABI/testing/sysfs-driver-toshiba_acpi b/Documentation/ABI/testing/sysfs-driver-toshiba_acpi
index ca9c71a531c5..eed922ef42e5 100644
--- a/Documentation/ABI/testing/sysfs-driver-toshiba_acpi
+++ b/Documentation/ABI/testing/sysfs-driver-toshiba_acpi
@@ -8,9 +8,11 @@ Description: This file controls the keyboard backlight operation mode, valid
* 0x2 -> AUTO (also called TIMER)
* 0x8 -> ON
* 0x10 -> OFF
- Note that the kernel 3.16 onwards this file accepts all listed
+ Note that from kernel 3.16 onwards this file accepts all listed
parameters, kernel 3.15 only accepts the first two (FN-Z and
AUTO).
+ Also note that toggling this value on type 1 devices, requires
+ a reboot for changes to take effect.
Users: KToshiba
What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_backlight_timeout
@@ -67,15 +69,72 @@ Description: This file shows the current keyboard backlight type,
* 2 -> Type 2, supporting modes TIMER, ON and OFF
Users: KToshiba
+What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/usb_sleep_charge
+Date: January 23, 2015
+KernelVersion: 4.0
+Contact: Azael Avalos <coproscefalo@gmail.com>
+Description: This file controls the USB Sleep & Charge charging mode, which
+ can be:
+ * 0 -> Disabled (0x00)
+ * 1 -> Alternate (0x09)
+ * 2 -> Auto (0x21)
+ * 3 -> Typical (0x11)
+ Note that from kernel 4.1 onwards this file accepts all listed
+ values, kernel 4.0 only supports the first three.
+ Note that this feature only works when connected to power, if
+ you want to use it under battery, see the entry named
+ "sleep_functions_on_battery"
+Users: KToshiba
+
+What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/sleep_functions_on_battery
+Date: January 23, 2015
+KernelVersion: 4.0
+Contact: Azael Avalos <coproscefalo@gmail.com>
+Description: This file controls the USB Sleep Functions under battery, and
+ set the level at which point they will be disabled, accepted
+ values can be:
+ * 0 -> Disabled
+ * 1-100 -> Battery level to disable sleep functions
+ Currently it prints two values, the first one indicates if the
+ feature is enabled or disabled, while the second one shows the
+ current battery level set.
+ Note that when the value is set to disabled, the sleep function
+ will only work when connected to power.
+Users: KToshiba
+
+What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/usb_rapid_charge
+Date: January 23, 2015
+KernelVersion: 4.0
+Contact: Azael Avalos <coproscefalo@gmail.com>
+Description: This file controls the USB Rapid Charge state, which can be:
+ * 0 -> Disabled
+ * 1 -> Enabled
+ Note that toggling this value requires a reboot for changes to
+ take effect.
+Users: KToshiba
+
+What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/usb_sleep_music
+Date: January 23, 2015
+KernelVersion: 4.0
+Contact: Azael Avalos <coproscefalo@gmail.com>
+Description: This file controls the Sleep & Music state, which values can be:
+ * 0 -> Disabled
+ * 1 -> Enabled
+ Note that this feature only works when connected to power, if
+ you want to use it under battery, see the entry named
+ "sleep_functions_on_battery"
+Users: KToshiba
+
What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/version
-Date: February, 2015
-KernelVersion: 3.20
+Date: February 12, 2015
+KernelVersion: 4.0
Contact: Azael Avalos <coproscefalo@gmail.com>
Description: This file shows the current version of the driver
+Users: KToshiba
What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/fan
-Date: February, 2015
-KernelVersion: 3.20
+Date: February 12, 2015
+KernelVersion: 4.0
Contact: Azael Avalos <coproscefalo@gmail.com>
Description: This file controls the state of the internal fan, valid
values are:
@@ -83,8 +142,8 @@ Description: This file controls the state of the internal fan, valid
* 1 -> ON
What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_function_keys
-Date: February, 2015
-KernelVersion: 3.20
+Date: February 12, 2015
+KernelVersion: 4.0
Contact: Azael Avalos <coproscefalo@gmail.com>
Description: This file controls the Special Functions (hotkeys) operation
mode, valid values are:
@@ -94,21 +153,29 @@ Description: This file controls the Special Functions (hotkeys) operation
and the hotkeys are accessed via FN-F{1-12}.
In the "Special Functions" mode, the F{1-12} keys trigger the
hotkey and the F{1-12} keys are accessed via FN-F{1-12}.
+ Note that toggling this value requires a reboot for changes to
+ take effect.
+Users: KToshiba
What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/panel_power_on
-Date: February, 2015
-KernelVersion: 3.20
+Date: February 12, 2015
+KernelVersion: 4.0
Contact: Azael Avalos <coproscefalo@gmail.com>
Description: This file controls whether the laptop should turn ON whenever
the LID is opened, valid values are:
* 0 -> Disabled
* 1 -> Enabled
+ Note that toggling this value requires a reboot for changes to
+ take effect.
+Users: KToshiba
What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/usb_three
-Date: February, 2015
-KernelVersion: 3.20
+Date: February 12, 2015
+KernelVersion: 4.0
Contact: Azael Avalos <coproscefalo@gmail.com>
-Description: This file controls whether the USB 3 functionality, valid
- values are:
+Description: This file controls the USB 3 functionality, valid values are:
* 0 -> Disabled (Acts as a regular USB 2)
* 1 -> Enabled (Full USB 3 functionality)
+ Note that toggling this value requires a reboot for changes to
+ take effect.
+Users: KToshiba
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop
new file mode 100644
index 000000000000..8c6a0b8e1131
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-dell-laptop
@@ -0,0 +1,69 @@
+What: /sys/class/leds/dell::kbd_backlight/als_enabled
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to control the automatic keyboard
+ illumination mode on some systems that have an ambient
+ light sensor. Write 1 to this file to enable the auto
+ mode, 0 to disable it.
+
+What: /sys/class/leds/dell::kbd_backlight/als_setting
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to specifiy the on/off threshold value,
+ as reported by the ambient light sensor.
+
+What: /sys/class/leds/dell::kbd_backlight/start_triggers
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to control the input triggers that
+ turn on the keyboard backlight illumination that is
+ disabled because of inactivity.
+ Read the file to see the triggers available. The ones
+ enabled are preceded by '+', those disabled by '-'.
+
+ To enable a trigger, write its name preceded by '+' to
+ this file. To disable a trigger, write its name preceded
+ by '-' instead.
+
+ For example, to enable the keyboard as trigger run:
+ echo +keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
+ To disable it:
+ echo -keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
+
+ Note that not all the available triggers can be configured.
+
+What: /sys/class/leds/dell::kbd_backlight/stop_timeout
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to specify the interval after which the
+ keyboard illumination is disabled because of inactivity.
+ The timeouts are expressed in seconds, minutes, hours and
+ days, for which the symbols are 's', 'm', 'h' and 'd'
+ respectively.
+
+ To configure the timeout, write to this file a value along
+ with any the above units. If no unit is specified, the value
+ is assumed to be expressed in seconds.
+
+ For example, to set the timeout to 10 minutes run:
+ echo 10m > /sys/class/leds/dell::kbd_backlight/stop_timeout
+
+ Note that when this file is read, the returned value might be
+ expressed in a different unit than the one used when the timeout
+ was set.
+
+ Also note that only some timeouts are supported and that
+ some systems might fall back to a specific timeout in case
+ an invalid timeout is written to this file.
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 449a8a19fc21..f4b78eafd92a 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -13,7 +13,7 @@ and NOT read it. Burn them, it's a great symbolic gesture.
Anyway, here goes:
- Chapter 1: Indentation
+ Chapter 1: Indentation
Tabs are 8 characters, and thus indentations are also 8 characters.
There are heretic movements that try to make indentations 4 (or even 2!)
@@ -56,7 +56,6 @@ instead of "double-indenting" the "case" labels. E.g.:
break;
}
-
Don't put multiple statements on a single line unless you have
something to hide:
@@ -156,25 +155,25 @@ comments on.
Do not unnecessarily use braces where a single statement will do.
-if (condition)
- action();
+ if (condition)
+ action();
and
-if (condition)
- do_this();
-else
- do_that();
+ if (condition)
+ do_this();
+ else
+ do_that();
This does not apply if only one branch of a conditional statement is a single
statement; in the latter case use braces in both branches:
-if (condition) {
- do_this();
- do_that();
-} else {
- otherwise();
-}
+ if (condition) {
+ do_this();
+ do_that();
+ } else {
+ otherwise();
+ }
3.1: Spaces
@@ -186,8 +185,11 @@ although they are not required in the language, as in: "sizeof info" after
"struct fileinfo info;" is declared).
So use a space after these keywords:
+
if, switch, case, for, do, while
+
but not with sizeof, typeof, alignof, or __attribute__. E.g.,
+
s = sizeof(struct file);
Do not add spaces around (inside) parenthesized expressions. This example is
@@ -209,12 +211,15 @@ such as any of these:
= + - < > * / % | & ^ <= >= == != ? :
but no space after unary operators:
+
& * + - ~ ! sizeof typeof alignof __attribute__ defined
no space before the postfix increment & decrement unary operators:
+
++ --
no space after the prefix increment & decrement unary operators:
+
++ --
and no space around the '.' and "->" structure member operators.
@@ -268,13 +273,11 @@ See chapter 6 (Functions).
Chapter 5: Typedefs
Please don't use things like "vps_t".
-
It's a _mistake_ to use typedef for structures and pointers. When you see a
vps_t a;
in the source, what does it mean?
-
In contrast, if it says
struct virtual_container *a;
@@ -372,11 +375,11 @@ In source files, separate functions with one blank line. If the function is
exported, the EXPORT* macro for it should follow immediately after the closing
function brace line. E.g.:
-int system_is_up(void)
-{
- return system_state == SYSTEM_RUNNING;
-}
-EXPORT_SYMBOL(system_is_up);
+ int system_is_up(void)
+ {
+ return system_state == SYSTEM_RUNNING;
+ }
+ EXPORT_SYMBOL(system_is_up);
In function prototypes, include parameter names with their data types.
Although this is not required by the C language, it is preferred in Linux
@@ -405,34 +408,34 @@ The rationale for using gotos is:
modifications are prevented
- saves the compiler work to optimize redundant code away ;)
-int fun(int a)
-{
- int result = 0;
- char *buffer;
-
- buffer = kmalloc(SIZE, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- if (condition1) {
- while (loop1) {
- ...
+ int fun(int a)
+ {
+ int result = 0;
+ char *buffer;
+
+ buffer = kmalloc(SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ if (condition1) {
+ while (loop1) {
+ ...
+ }
+ result = 1;
+ goto out_buffer;
}
- result = 1;
- goto out_buffer;
+ ...
+ out_buffer:
+ kfree(buffer);
+ return result;
}
- ...
-out_buffer:
- kfree(buffer);
- return result;
-}
A common type of bug to be aware of it "one err bugs" which look like this:
-err:
- kfree(foo->bar);
- kfree(foo);
- return ret;
+ err:
+ kfree(foo->bar);
+ kfree(foo);
+ return ret;
The bug in this code is that on some exit paths "foo" is NULL. Normally the
fix for this is to split it up into two error labels "err_bar:" and "err_foo:".
@@ -503,9 +506,9 @@ values. To do the latter, you can stick the following in your .emacs file:
(defun c-lineup-arglist-tabs-only (ignored)
"Line up argument lists by tabs, not spaces"
(let* ((anchor (c-langelem-pos c-syntactic-element))
- (column (c-langelem-2nd-pos c-syntactic-element))
- (offset (- (1+ column) anchor))
- (steps (floor offset c-basic-offset)))
+ (column (c-langelem-2nd-pos c-syntactic-element))
+ (offset (- (1+ column) anchor))
+ (steps (floor offset c-basic-offset)))
(* (max steps 1)
c-basic-offset)))
@@ -612,7 +615,7 @@ have a reference count on it, you almost certainly have a bug.
Names of macros defining constants and labels in enums are capitalized.
-#define CONSTANT 0x12345
+ #define CONSTANT 0x12345
Enums are preferred when defining several related constants.
@@ -623,28 +626,28 @@ Generally, inline functions are preferable to macros resembling functions.
Macros with multiple statements should be enclosed in a do - while block:
-#define macrofun(a, b, c) \
- do { \
- if (a == 5) \
- do_this(b, c); \
- } while (0)
+ #define macrofun(a, b, c) \
+ do { \
+ if (a == 5) \
+ do_this(b, c); \
+ } while (0)
Things to avoid when using macros:
1) macros that affect control flow:
-#define FOO(x) \
- do { \
- if (blah(x) < 0) \
- return -EBUGGERED; \
- } while(0)
+ #define FOO(x) \
+ do { \
+ if (blah(x) < 0) \
+ return -EBUGGERED; \
+ } while(0)
is a _very_ bad idea. It looks like a function call but exits the "calling"
function; don't break the internal parsers of those who will read the code.
2) macros that depend on having a local variable with a magic name:
-#define FOO(val) bar(index, val)
+ #define FOO(val) bar(index, val)
might look like a good thing, but it's confusing as hell when one reads the
code and it's prone to breakage from seemingly innocent changes.
@@ -656,8 +659,21 @@ bite you if somebody e.g. turns FOO into an inline function.
must enclose the expression in parentheses. Beware of similar issues with
macros using parameters.
-#define CONSTANT 0x4000
-#define CONSTEXP (CONSTANT | 3)
+ #define CONSTANT 0x4000
+ #define CONSTEXP (CONSTANT | 3)
+
+5) namespace collisions when defining local variables in macros resembling
+functions:
+
+#define FOO(x) \
+({ \
+ typeof(x) ret; \
+ ret = calc_ret(x); \
+ (ret); \
+)}
+
+ret is a common name for a local variable - __foo_ret is less likely
+to collide with an existing variable.
The cpp manual deals with macros exhaustively. The gcc internals manual also
covers RTL which is used frequently with assembly language in the kernel.
@@ -796,11 +812,11 @@ you should use, rather than explicitly coding some variant of them yourself.
For example, if you need to calculate the length of an array, take advantage
of the macro
- #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
Similarly, if you need to calculate the size of some structure member, use
- #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+ #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
There are also min() and max() macros that do strict type checking if you
need them. Feel free to peruse that header file to see what else is already
@@ -813,19 +829,19 @@ Some editors can interpret configuration information embedded in source files,
indicated with special markers. For example, emacs interprets lines marked
like this:
--*- mode: c -*-
+ -*- mode: c -*-
Or like this:
-/*
-Local Variables:
-compile-command: "gcc -DMAGIC_DEBUG_FLAG foo.c"
-End:
-*/
+ /*
+ Local Variables:
+ compile-command: "gcc -DMAGIC_DEBUG_FLAG foo.c"
+ End:
+ */
Vim interprets markers that look like this:
-/* vim:set sw=8 noet */
+ /* vim:set sw=8 noet */
Do not include any of these in source files. People have their own personal
editor configurations, and your source files should not override them. This
@@ -902,9 +918,9 @@ At the end of any non-trivial #if or #ifdef block (more than a few lines),
place a comment after the #endif on the same line, noting the conditional
expression used. For instance:
-#ifdef CONFIG_SOMETHING
-...
-#endif /* CONFIG_SOMETHING */
+ #ifdef CONFIG_SOMETHING
+ ...
+ #endif /* CONFIG_SOMETHING */
Appendix I: References
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
index 04a8c24ead47..efc8d90a9a3f 100644
--- a/Documentation/DocBook/crypto-API.tmpl
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -509,6 +509,270 @@
select it due to the used type and mask field.
</para>
</sect1>
+
+ <sect1><title>Internal Structure of Kernel Crypto API</title>
+
+ <para>
+ The kernel crypto API has an internal structure where a cipher
+ implementation may use many layers and indirections. This section
+ shall help to clarify how the kernel crypto API uses
+ various components to implement the complete cipher.
+ </para>
+
+ <para>
+ The following subsections explain the internal structure based
+ on existing cipher implementations. The first section addresses
+ the most complex scenario where all other scenarios form a logical
+ subset.
+ </para>
+
+ <sect2><title>Generic AEAD Cipher Structure</title>
+
+ <para>
+ The following ASCII art decomposes the kernel crypto API layers
+ when using the AEAD cipher with the automated IV generation. The
+ shown example is used by the IPSEC layer.
+ </para>
+
+ <para>
+ For other use cases of AEAD ciphers, the ASCII art applies as
+ well, but the caller may not use the GIVCIPHER interface. In
+ this case, the caller must generate the IV.
+ </para>
+
+ <para>
+ The depicted example decomposes the AEAD cipher of GCM(AES) based
+ on the generic C implementations (gcm.c, aes-generic.c, ctr.c,
+ ghash-generic.c, seqiv.c). The generic implementation serves as an
+ example showing the complete logic of the kernel crypto API.
+ </para>
+
+ <para>
+ It is possible that some streamlined cipher implementations (like
+ AES-NI) provide implementations merging aspects which in the view
+ of the kernel crypto API cannot be decomposed into layers any more.
+ In case of the AES-NI implementation, the CTR mode, the GHASH
+ implementation and the AES cipher are all merged into one cipher
+ implementation registered with the kernel crypto API. In this case,
+ the concept described by the following ASCII art applies too. However,
+ the decomposition of GCM into the individual sub-components
+ by the kernel crypto API is not done any more.
+ </para>
+
+ <para>
+ Each block in the following ASCII art is an independent cipher
+ instance obtained from the kernel crypto API. Each block
+ is accessed by the caller or by other blocks using the API functions
+ defined by the kernel crypto API for the cipher implementation type.
+ </para>
+
+ <para>
+ The blocks below indicate the cipher type as well as the specific
+ logic implemented in the cipher.
+ </para>
+
+ <para>
+ The ASCII art picture also indicates the call structure, i.e. who
+ calls which component. The arrows point to the invoked block
+ where the caller uses the API applicable to the cipher type
+ specified for the block.
+ </para>
+
+ <programlisting>
+<![CDATA[
+kernel crypto API | IPSEC Layer
+ |
++-----------+ |
+| | (1)
+| givcipher | <----------------------------------- esp_output
+| (seqiv) | ---+
++-----------+ |
+ | (2)
++-----------+ |
+| | <--+ (2)
+| aead | <----------------------------------- esp_input
+| (gcm) | ------------+
++-----------+ |
+ | (3) | (5)
+ v v
++-----------+ +-----------+
+| | | |
+| ablkcipher| | ahash |
+| (ctr) | ---+ | (ghash) |
++-----------+ | +-----------+
+ |
++-----------+ | (4)
+| | <--+
+| cipher |
+| (aes) |
++-----------+
+]]>
+ </programlisting>
+
+ <para>
+ The following call sequence is applicable when the IPSEC layer
+ triggers an encryption operation with the esp_output function. During
+ configuration, the administrator set up the use of rfc4106(gcm(aes)) as
+ the cipher for ESP. The following call sequence is now depicted in the
+ ASCII art above:
+ </para>
+
+ <orderedlist>
+ <listitem>
+ <para>
+ esp_output() invokes crypto_aead_givencrypt() to trigger an encryption
+ operation of the GIVCIPHER implementation.
+ </para>
+
+ <para>
+ In case of GCM, the SEQIV implementation is registered as GIVCIPHER
+ in crypto_rfc4106_alloc().
+ </para>
+
+ <para>
+ The SEQIV performs its operation to generate an IV where the core
+ function is seqiv_geniv().
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Now, SEQIV uses the AEAD API function calls to invoke the associated
+ AEAD cipher. In our case, during the instantiation of SEQIV, the
+ cipher handle for GCM is provided to SEQIV. This means that SEQIV
+ invokes AEAD cipher operations with the GCM cipher handle.
+ </para>
+
+ <para>
+ During instantiation of the GCM handle, the CTR(AES) and GHASH
+ ciphers are instantiated. The cipher handles for CTR(AES) and GHASH
+ are retained for later use.
+ </para>
+
+ <para>
+ The GCM implementation is responsible to invoke the CTR mode AES and
+ the GHASH cipher in the right manner to implement the GCM
+ specification.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ The GCM AEAD cipher type implementation now invokes the ABLKCIPHER API
+ with the instantiated CTR(AES) cipher handle.
+ </para>
+
+ <para>
+ During instantiation of the CTR(AES) cipher, the CIPHER type
+ implementation of AES is instantiated. The cipher handle for AES is
+ retained.
+ </para>
+
+ <para>
+ That means that the ABLKCIPHER implementation of CTR(AES) only
+ implements the CTR block chaining mode. After performing the block
+ chaining operation, the CIPHER implementation of AES is invoked.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ The ABLKCIPHER of CTR(AES) now invokes the CIPHER API with the AES
+ cipher handle to encrypt one block.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ The GCM AEAD implementation also invokes the GHASH cipher
+ implementation via the AHASH API.
+ </para>
+ </listitem>
+ </orderedlist>
+
+ <para>
+ When the IPSEC layer triggers the esp_input() function, the same call
+ sequence is followed with the only difference that the operation starts
+ with step (2).
+ </para>
+ </sect2>
+
+ <sect2><title>Generic Block Cipher Structure</title>
+ <para>
+ Generic block ciphers follow the same concept as depicted with the ASCII
+ art picture above.
+ </para>
+
+ <para>
+ For example, CBC(AES) is implemented with cbc.c, and aes-generic.c. The
+ ASCII art picture above applies as well with the difference that only
+ step (4) is used and the ABLKCIPHER block chaining mode is CBC.
+ </para>
+ </sect2>
+
+ <sect2><title>Generic Keyed Message Digest Structure</title>
+ <para>
+ Keyed message digest implementations again follow the same concept as
+ depicted in the ASCII art picture above.
+ </para>
+
+ <para>
+ For example, HMAC(SHA256) is implemented with hmac.c and
+ sha256_generic.c. The following ASCII art illustrates the
+ implementation:
+ </para>
+
+ <programlisting>
+<![CDATA[
+kernel crypto API | Caller
+ |
++-----------+ (1) |
+| | <------------------ some_function
+| ahash |
+| (hmac) | ---+
++-----------+ |
+ | (2)
++-----------+ |
+| | <--+
+| shash |
+| (sha256) |
++-----------+
+]]>
+ </programlisting>
+
+ <para>
+ The following call sequence is applicable when a caller triggers
+ an HMAC operation:
+ </para>
+
+ <orderedlist>
+ <listitem>
+ <para>
+ The AHASH API functions are invoked by the caller. The HMAC
+ implementation performs its operation as needed.
+ </para>
+
+ <para>
+ During initialization of the HMAC cipher, the SHASH cipher type of
+ SHA256 is instantiated. The cipher handle for the SHA256 instance is
+ retained.
+ </para>
+
+ <para>
+ At one time, the HMAC implementation requires a SHA256 operation
+ where the SHA256 cipher handle is used.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ The HMAC instance now invokes the SHASH API with the SHA256
+ cipher handle to calculate the message digest.
+ </para>
+ </listitem>
+ </orderedlist>
+ </sect2>
+ </sect1>
</chapter>
<chapter id="Development"><title>Developing Cipher Algorithms</title>
@@ -808,6 +1072,602 @@
</sect1>
</chapter>
+ <chapter id="User"><title>User Space Interface</title>
+ <sect1><title>Introduction</title>
+ <para>
+ The concepts of the kernel crypto API visible to kernel space is fully
+ applicable to the user space interface as well. Therefore, the kernel
+ crypto API high level discussion for the in-kernel use cases applies
+ here as well.
+ </para>
+
+ <para>
+ The major difference, however, is that user space can only act as a
+ consumer and never as a provider of a transformation or cipher algorithm.
+ </para>
+
+ <para>
+ The following covers the user space interface exported by the kernel
+ crypto API. A working example of this description is libkcapi that
+ can be obtained from [1]. That library can be used by user space
+ applications that require cryptographic services from the kernel.
+ </para>
+
+ <para>
+ Some details of the in-kernel kernel crypto API aspects do not
+ apply to user space, however. This includes the difference between
+ synchronous and asynchronous invocations. The user space API call
+ is fully synchronous.
+ </para>
+
+ <para>
+ [1] http://www.chronox.de/libkcapi.html
+ </para>
+
+ </sect1>
+
+ <sect1><title>User Space API General Remarks</title>
+ <para>
+ The kernel crypto API is accessible from user space. Currently,
+ the following ciphers are accessible:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>Message digest including keyed message digest (HMAC, CMAC)</para>
+ </listitem>
+
+ <listitem>
+ <para>Symmetric ciphers</para>
+ </listitem>
+
+ <listitem>
+ <para>AEAD ciphers</para>
+ </listitem>
+
+ <listitem>
+ <para>Random Number Generators</para>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+ The interface is provided via socket type using the type AF_ALG.
+ In addition, the setsockopt option type is SOL_ALG. In case the
+ user space header files do not export these flags yet, use the
+ following macros:
+ </para>
+
+ <programlisting>
+#ifndef AF_ALG
+#define AF_ALG 38
+#endif
+#ifndef SOL_ALG
+#define SOL_ALG 279
+#endif
+ </programlisting>
+
+ <para>
+ A cipher is accessed with the same name as done for the in-kernel
+ API calls. This includes the generic vs. unique naming schema for
+ ciphers as well as the enforcement of priorities for generic names.
+ </para>
+
+ <para>
+ To interact with the kernel crypto API, a socket must be
+ created by the user space application. User space invokes the cipher
+ operation with the send()/write() system call family. The result of the
+ cipher operation is obtained with the read()/recv() system call family.
+ </para>
+
+ <para>
+ The following API calls assume that the socket descriptor
+ is already opened by the user space application and discusses only
+ the kernel crypto API specific invocations.
+ </para>
+
+ <para>
+ To initialize the socket interface, the following sequence has to
+ be performed by the consumer:
+ </para>
+
+ <orderedlist>
+ <listitem>
+ <para>
+ Create a socket of type AF_ALG with the struct sockaddr_alg
+ parameter specified below for the different cipher types.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Invoke bind with the socket descriptor
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Invoke accept with the socket descriptor. The accept system call
+ returns a new file descriptor that is to be used to interact with
+ the particular cipher instance. When invoking send/write or recv/read
+ system calls to send data to the kernel or obtain data from the
+ kernel, the file descriptor returned by accept must be used.
+ </para>
+ </listitem>
+ </orderedlist>
+ </sect1>
+
+ <sect1><title>In-place Cipher operation</title>
+ <para>
+ Just like the in-kernel operation of the kernel crypto API, the user
+ space interface allows the cipher operation in-place. That means that
+ the input buffer used for the send/write system call and the output
+ buffer used by the read/recv system call may be one and the same.
+ This is of particular interest for symmetric cipher operations where a
+ copying of the output data to its final destination can be avoided.
+ </para>
+
+ <para>
+ If a consumer on the other hand wants to maintain the plaintext and
+ the ciphertext in different memory locations, all a consumer needs
+ to do is to provide different memory pointers for the encryption and
+ decryption operation.
+ </para>
+ </sect1>
+
+ <sect1><title>Message Digest API</title>
+ <para>
+ The message digest type to be used for the cipher operation is
+ selected when invoking the bind syscall. bind requires the caller
+ to provide a filled struct sockaddr data structure. This data
+ structure must be filled as follows:
+ </para>
+
+ <programlisting>
+struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "hash", /* this selects the hash logic in the kernel */
+ .salg_name = "sha1" /* this is the cipher name */
+};
+ </programlisting>
+
+ <para>
+ The salg_type value "hash" applies to message digests and keyed
+ message digests. Though, a keyed message digest is referenced by
+ the appropriate salg_name. Please see below for the setsockopt
+ interface that explains how the key can be set for a keyed message
+ digest.
+ </para>
+
+ <para>
+ Using the send() system call, the application provides the data that
+ should be processed with the message digest. The send system call
+ allows the following flags to be specified:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>
+ MSG_MORE: If this flag is set, the send system call acts like a
+ message digest update function where the final hash is not
+ yet calculated. If the flag is not set, the send system call
+ calculates the final message digest immediately.
+ </para>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+ With the recv() system call, the application can read the message
+ digest from the kernel crypto API. If the buffer is too small for the
+ message digest, the flag MSG_TRUNC is set by the kernel.
+ </para>
+
+ <para>
+ In order to set a message digest key, the calling application must use
+ the setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC
+ operation is performed without the initial HMAC state change caused by
+ the key.
+ </para>
+ </sect1>
+
+ <sect1><title>Symmetric Cipher API</title>
+ <para>
+ The operation is very similar to the message digest discussion.
+ During initialization, the struct sockaddr data structure must be
+ filled as follows:
+ </para>
+
+ <programlisting>
+struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "skcipher", /* this selects the symmetric cipher */
+ .salg_name = "cbc(aes)" /* this is the cipher name */
+};
+ </programlisting>
+
+ <para>
+ Before data can be sent to the kernel using the write/send system
+ call family, the consumer must set the key. The key setting is
+ described with the setsockopt invocation below.
+ </para>
+
+ <para>
+ Using the sendmsg() system call, the application provides the data that should be processed for encryption or decryption. In addition, the IV is
+ specified with the data structure provided by the sendmsg() system call.
+ </para>
+
+ <para>
+ The sendmsg system call parameter of struct msghdr is embedded into the
+ struct cmsghdr data structure. See recv(2) and cmsg(3) for more
+ information on how the cmsghdr data structure is used together with the
+ send/recv system call family. That cmsghdr data structure holds the
+ following information specified with a separate header instances:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>
+ specification of the cipher operation type with one of these flags:
+ </para>
+ <itemizedlist>
+ <listitem>
+ <para>ALG_OP_ENCRYPT - encryption of data</para>
+ </listitem>
+ <listitem>
+ <para>ALG_OP_DECRYPT - decryption of data</para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+
+ <listitem>
+ <para>
+ specification of the IV information marked with the flag ALG_SET_IV
+ </para>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+ The send system call family allows the following flag to be specified:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>
+ MSG_MORE: If this flag is set, the send system call acts like a
+ cipher update function where more input data is expected
+ with a subsequent invocation of the send system call.
+ </para>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+ Note: The kernel reports -EINVAL for any unexpected data. The caller
+ must make sure that all data matches the constraints given in
+ /proc/crypto for the selected cipher.
+ </para>
+
+ <para>
+ With the recv() system call, the application can read the result of
+ the cipher operation from the kernel crypto API. The output buffer
+ must be at least as large as to hold all blocks of the encrypted or
+ decrypted data. If the output data size is smaller, only as many
+ blocks are returned that fit into that output buffer size.
+ </para>
+ </sect1>
+
+ <sect1><title>AEAD Cipher API</title>
+ <para>
+ The operation is very similar to the symmetric cipher discussion.
+ During initialization, the struct sockaddr data structure must be
+ filled as follows:
+ </para>
+
+ <programlisting>
+struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "aead", /* this selects the symmetric cipher */
+ .salg_name = "gcm(aes)" /* this is the cipher name */
+};
+ </programlisting>
+
+ <para>
+ Before data can be sent to the kernel using the write/send system
+ call family, the consumer must set the key. The key setting is
+ described with the setsockopt invocation below.
+ </para>
+
+ <para>
+ In addition, before data can be sent to the kernel using the
+ write/send system call family, the consumer must set the authentication
+ tag size. To set the authentication tag size, the caller must use the
+ setsockopt invocation described below.
+ </para>
+
+ <para>
+ Using the sendmsg() system call, the application provides the data that should be processed for encryption or decryption. In addition, the IV is
+ specified with the data structure provided by the sendmsg() system call.
+ </para>
+
+ <para>
+ The sendmsg system call parameter of struct msghdr is embedded into the
+ struct cmsghdr data structure. See recv(2) and cmsg(3) for more
+ information on how the cmsghdr data structure is used together with the
+ send/recv system call family. That cmsghdr data structure holds the
+ following information specified with a separate header instances:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>
+ specification of the cipher operation type with one of these flags:
+ </para>
+ <itemizedlist>
+ <listitem>
+ <para>ALG_OP_ENCRYPT - encryption of data</para>
+ </listitem>
+ <listitem>
+ <para>ALG_OP_DECRYPT - decryption of data</para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+
+ <listitem>
+ <para>
+ specification of the IV information marked with the flag ALG_SET_IV
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ specification of the associated authentication data (AAD) with the
+ flag ALG_SET_AEAD_ASSOCLEN. The AAD is sent to the kernel together
+ with the plaintext / ciphertext. See below for the memory structure.
+ </para>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+ The send system call family allows the following flag to be specified:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>
+ MSG_MORE: If this flag is set, the send system call acts like a
+ cipher update function where more input data is expected
+ with a subsequent invocation of the send system call.
+ </para>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+ Note: The kernel reports -EINVAL for any unexpected data. The caller
+ must make sure that all data matches the constraints given in
+ /proc/crypto for the selected cipher.
+ </para>
+
+ <para>
+ With the recv() system call, the application can read the result of
+ the cipher operation from the kernel crypto API. The output buffer
+ must be at least as large as defined with the memory structure below.
+ If the output data size is smaller, the cipher operation is not performed.
+ </para>
+
+ <para>
+ The authenticated decryption operation may indicate an integrity error.
+ Such breach in integrity is marked with the -EBADMSG error code.
+ </para>
+
+ <sect2><title>AEAD Memory Structure</title>
+ <para>
+ The AEAD cipher operates with the following information that
+ is communicated between user and kernel space as one data stream:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>plaintext or ciphertext</para>
+ </listitem>
+
+ <listitem>
+ <para>associated authentication data (AAD)</para>
+ </listitem>
+
+ <listitem>
+ <para>authentication tag</para>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+ The sizes of the AAD and the authentication tag are provided with
+ the sendmsg and setsockopt calls (see there). As the kernel knows
+ the size of the entire data stream, the kernel is now able to
+ calculate the right offsets of the data components in the data
+ stream.
+ </para>
+
+ <para>
+ The user space caller must arrange the aforementioned information
+ in the following order:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>
+ AEAD encryption input: AAD || plaintext
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ AEAD decryption input: AAD || ciphertext || authentication tag
+ </para>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+ The output buffer the user space caller provides must be at least as
+ large to hold the following data:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>
+ AEAD encryption output: ciphertext || authentication tag
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ AEAD decryption output: plaintext
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect2>
+ </sect1>
+
+ <sect1><title>Random Number Generator API</title>
+ <para>
+ Again, the operation is very similar to the other APIs.
+ During initialization, the struct sockaddr data structure must be
+ filled as follows:
+ </para>
+
+ <programlisting>
+struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "rng", /* this selects the symmetric cipher */
+ .salg_name = "drbg_nopr_sha256" /* this is the cipher name */
+};
+ </programlisting>
+
+ <para>
+ Depending on the RNG type, the RNG must be seeded. The seed is provided
+ using the setsockopt interface to set the key. For example, the
+ ansi_cprng requires a seed. The DRBGs do not require a seed, but
+ may be seeded.
+ </para>
+
+ <para>
+ Using the read()/recvmsg() system calls, random numbers can be obtained.
+ The kernel generates at most 128 bytes in one call. If user space
+ requires more data, multiple calls to read()/recvmsg() must be made.
+ </para>
+
+ <para>
+ WARNING: The user space caller may invoke the initially mentioned
+ accept system call multiple times. In this case, the returned file
+ descriptors have the same state.
+ </para>
+
+ </sect1>
+
+ <sect1><title>Zero-Copy Interface</title>
+ <para>
+ In addition to the send/write/read/recv system call familty, the AF_ALG
+ interface can be accessed with the zero-copy interface of splice/vmsplice.
+ As the name indicates, the kernel tries to avoid a copy operation into
+ kernel space.
+ </para>
+
+ <para>
+ The zero-copy operation requires data to be aligned at the page boundary.
+ Non-aligned data can be used as well, but may require more operations of
+ the kernel which would defeat the speed gains obtained from the zero-copy
+ interface.
+ </para>
+
+ <para>
+ The system-interent limit for the size of one zero-copy operation is
+ 16 pages. If more data is to be sent to AF_ALG, user space must slice
+ the input into segments with a maximum size of 16 pages.
+ </para>
+
+ <para>
+ Zero-copy can be used with the following code example (a complete working
+ example is provided with libkcapi):
+ </para>
+
+ <programlisting>
+int pipes[2];
+
+pipe(pipes);
+/* input data in iov */
+vmsplice(pipes[1], iov, iovlen, SPLICE_F_GIFT);
+/* opfd is the file descriptor returned from accept() system call */
+splice(pipes[0], NULL, opfd, NULL, ret, 0);
+read(opfd, out, outlen);
+ </programlisting>
+
+ </sect1>
+
+ <sect1><title>Setsockopt Interface</title>
+ <para>
+ In addition to the read/recv and send/write system call handling
+ to send and retrieve data subject to the cipher operation, a consumer
+ also needs to set the additional information for the cipher operation.
+ This additional information is set using the setsockopt system call
+ that must be invoked with the file descriptor of the open cipher
+ (i.e. the file descriptor returned by the accept system call).
+ </para>
+
+ <para>
+ Each setsockopt invocation must use the level SOL_ALG.
+ </para>
+
+ <para>
+ The setsockopt interface allows setting the following data using
+ the mentioned optname:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>
+ ALG_SET_KEY -- Setting the key. Key setting is applicable to:
+ </para>
+ <itemizedlist>
+ <listitem>
+ <para>the skcipher cipher type (symmetric ciphers)</para>
+ </listitem>
+ <listitem>
+ <para>the hash cipher type (keyed message digests)</para>
+ </listitem>
+ <listitem>
+ <para>the AEAD cipher type</para>
+ </listitem>
+ <listitem>
+ <para>the RNG cipher type to provide the seed</para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+
+ <listitem>
+ <para>
+ ALG_SET_AEAD_AUTHSIZE -- Setting the authentication tag size
+ for AEAD ciphers. For a encryption operation, the authentication
+ tag of the given size will be generated. For a decryption operation,
+ the provided ciphertext is assumed to contain an authentication tag
+ of the given size (see section about AEAD memory layout below).
+ </para>
+ </listitem>
+ </itemizedlist>
+
+ </sect1>
+
+ <sect1><title>User space API example</title>
+ <para>
+ Please see [1] for libkcapi which provides an easy-to-use wrapper
+ around the aforementioned Netlink kernel interface. [1] also contains
+ a test application that invokes all libkcapi API calls.
+ </para>
+
+ <para>
+ [1] http://www.chronox.de/libkcapi.html
+ </para>
+
+ </sect1>
+
+ </chapter>
+
<chapter id="API"><title>Programming Interface</title>
<sect1><title>Block Cipher Context Data Structures</title>
!Pinclude/linux/crypto.h Block Cipher Context Data Structures
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 03f1985a4bd1..9765a4c0829d 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1293,7 +1293,7 @@ int max_width, max_height;</synopsis>
</para>
<para>
If a page flip can be successfully scheduled the driver must set the
- <code>drm_crtc-&lt;fb</code> field to the new framebuffer pointed to
+ <code>drm_crtc-&gt;fb</code> field to the new framebuffer pointed to
by <code>fb</code>. This is important so that the reference counting
on framebuffers stays balanced.
</para>
@@ -3979,6 +3979,11 @@ int num_ioctls;</synopsis>
!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
</sect2>
+ <sect2>
+ <title>Intel GVT-g Guest Support(vGPU)</title>
+!Pdrivers/gpu/drm/i915/i915_vgpu.c Intel GVT-g guest support
+!Idrivers/gpu/drm/i915/i915_vgpu.c
+ </sect2>
</sect1>
<sect1>
<title>Display Hardware Handling</title>
@@ -4048,6 +4053,17 @@ int num_ioctls;</synopsis>
!Idrivers/gpu/drm/i915/intel_fbc.c
</sect2>
<sect2>
+ <title>Display Refresh Rate Switching (DRRS)</title>
+!Pdrivers/gpu/drm/i915/intel_dp.c Display Refresh Rate Switching (DRRS)
+!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_set_drrs_state
+!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_enable
+!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_disable
+!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_invalidate
+!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_flush
+!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_drrs_init
+
+ </sect2>
+ <sect2>
<title>DPIO</title>
!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
<table id="dpiox2">
@@ -4168,7 +4184,7 @@ int num_ioctls;</synopsis>
<sect2>
<title>Buffer Object Eviction</title>
<para>
- This section documents the interface function for evicting buffer
+ This section documents the interface functions for evicting buffer
objects to make space available in the virtual gpu address spaces.
Note that this is mostly orthogonal to shrinking buffer objects
caches, which has the goal to make main memory (shared with the gpu
@@ -4176,6 +4192,17 @@ int num_ioctls;</synopsis>
</para>
!Idrivers/gpu/drm/i915/i915_gem_evict.c
</sect2>
+ <sect2>
+ <title>Buffer Object Memory Shrinking</title>
+ <para>
+ This section documents the interface function for shrinking memory
+ usage of buffer object caches. Shrinking is used to make main memory
+ available. Note that this is mostly orthogonal to evicting buffer
+ objects, which has the goal to make space in gpu virtual address
+ spaces.
+ </para>
+!Idrivers/gpu/drm/i915/i915_gem_shrinker.c
+ </sect2>
</sect1>
<sect1>
diff --git a/Documentation/DocBook/media/v4l/biblio.xml b/Documentation/DocBook/media/v4l/biblio.xml
index 7ff01a23c2fe..fdee6b3f3eca 100644
--- a/Documentation/DocBook/media/v4l/biblio.xml
+++ b/Documentation/DocBook/media/v4l/biblio.xml
@@ -1,14 +1,13 @@
<bibliography>
<title>References</title>
- <biblioentry id="eia608">
- <abbrev>EIA&nbsp;608-B</abbrev>
+ <biblioentry id="cea608">
+ <abbrev>CEA&nbsp;608-E</abbrev>
<authorgroup>
- <corpauthor>Electronic Industries Alliance (<ulink
-url="http://www.eia.org">http://www.eia.org</ulink>)</corpauthor>
+ <corpauthor>Consumer Electronics Association (<ulink
+url="http://www.ce.org">http://www.ce.org</ulink>)</corpauthor>
</authorgroup>
- <title>EIA 608-B "Recommended Practice for Line 21 Data
-Service"</title>
+ <title>CEA-608-E R-2014 "Line 21 Data Services"</title>
</biblioentry>
<biblioentry id="en300294">
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 350dfb3d71ea..a0aef85d33c1 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2491,7 +2491,7 @@ that used it. It was originally scheduled for removal in 2.6.35.
</listitem>
<listitem>
<para>Added <constant>V4L2_EVENT_CTRL_CH_RANGE</constant> control event
- changes flag. See <xref linkend="changes-flags"/>.</para>
+ changes flag. See <xref linkend="ctrl-changes-flags"/>.</para>
</listitem>
</orderedlist>
</section>
diff --git a/Documentation/DocBook/media/v4l/dev-sliced-vbi.xml b/Documentation/DocBook/media/v4l/dev-sliced-vbi.xml
index 7a8bf3011ee9..0aec62ed2bf8 100644
--- a/Documentation/DocBook/media/v4l/dev-sliced-vbi.xml
+++ b/Documentation/DocBook/media/v4l/dev-sliced-vbi.xml
@@ -254,7 +254,7 @@ ETS&nbsp;300&nbsp;231, lsb first transmitted.</entry>
<row>
<entry><constant>V4L2_SLICED_CAPTION_525</constant></entry>
<entry>0x1000</entry>
- <entry><xref linkend="eia608" /></entry>
+ <entry><xref linkend="cea608" /></entry>
<entry>NTSC line 21, 284 (second field 21)</entry>
<entry>Two bytes in transmission order, including parity
bit, lsb first transmitted.</entry>
diff --git a/Documentation/DocBook/media/v4l/media-ioc-enum-entities.xml b/Documentation/DocBook/media/v4l/media-ioc-enum-entities.xml
index 116c301656e0..5872f8bbf774 100644
--- a/Documentation/DocBook/media/v4l/media-ioc-enum-entities.xml
+++ b/Documentation/DocBook/media/v4l/media-ioc-enum-entities.xml
@@ -143,86 +143,28 @@
<row>
<entry></entry>
<entry>struct</entry>
- <entry><structfield>v4l</structfield></entry>
+ <entry><structfield>dev</structfield></entry>
<entry></entry>
- <entry>Valid for V4L sub-devices and nodes only.</entry>
+ <entry>Valid for (sub-)devices that create a single device node.</entry>
</row>
<row>
<entry></entry>
<entry></entry>
<entry>__u32</entry>
<entry><structfield>major</structfield></entry>
- <entry>V4L device node major number. For V4L sub-devices with no
- device node, set by the driver to 0.</entry>
+ <entry>Device node major number.</entry>
</row>
<row>
<entry></entry>
<entry></entry>
<entry>__u32</entry>
<entry><structfield>minor</structfield></entry>
- <entry>V4L device node minor number. For V4L sub-devices with no
- device node, set by the driver to 0.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct</entry>
- <entry><structfield>fb</structfield></entry>
- <entry></entry>
- <entry>Valid for frame buffer nodes only.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>major</structfield></entry>
- <entry>Frame buffer device node major number.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>minor</structfield></entry>
- <entry>Frame buffer device node minor number.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct</entry>
- <entry><structfield>alsa</structfield></entry>
- <entry></entry>
- <entry>Valid for ALSA devices only.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>card</structfield></entry>
- <entry>ALSA card number</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>device</structfield></entry>
- <entry>ALSA device number</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>subdevice</structfield></entry>
- <entry>ALSA sub-device number</entry>
- </row>
- <row>
- <entry></entry>
- <entry>int</entry>
- <entry><structfield>dvb</structfield></entry>
- <entry></entry>
- <entry>DVB card number</entry>
+ <entry>Device node minor number.</entry>
</row>
<row>
<entry></entry>
<entry>__u8</entry>
- <entry><structfield>raw</structfield>[180]</entry>
+ <entry><structfield>raw</structfield>[184]</entry>
<entry></entry>
<entry></entry>
</row>
@@ -253,8 +195,24 @@
<entry>ALSA card</entry>
</row>
<row>
- <entry><constant>MEDIA_ENT_T_DEVNODE_DVB</constant></entry>
- <entry>DVB card</entry>
+ <entry><constant>MEDIA_ENT_T_DEVNODE_DVB_FE</constant></entry>
+ <entry>DVB frontend devnode</entry>
+ </row>
+ <row>
+ <entry><constant>MEDIA_ENT_T_DEVNODE_DVB_DEMUX</constant></entry>
+ <entry>DVB demux devnode</entry>
+ </row>
+ <row>
+ <entry><constant>MEDIA_ENT_T_DEVNODE_DVB_DVR</constant></entry>
+ <entry>DVB DVR devnode</entry>
+ </row>
+ <row>
+ <entry><constant>MEDIA_ENT_T_DEVNODE_DVB_CA</constant></entry>
+ <entry>DVB CAM devnode</entry>
+ </row>
+ <row>
+ <entry><constant>MEDIA_ENT_T_DEVNODE_DVB_NET</constant></entry>
+ <entry>DVB network devnode</entry>
</row>
<row>
<entry><constant>MEDIA_ENT_T_V4L2_SUBDEV</constant></entry>
@@ -282,6 +240,10 @@
it in some digital video standard, with appropriate embedded timing
signals.</entry>
</row>
+ <row>
+ <entry><constant>MEDIA_ENT_T_V4L2_SUBDEV_TUNER</constant></entry>
+ <entry>TV and/or radio tuner</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml b/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
index 6ab4f0f3db64..b60fb935b91b 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
@@ -303,45 +303,6 @@ for a pixel lie next to each other in memory.</para>
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-PIX-FMT-BGR666">
- <entry><constant>V4L2_PIX_FMT_BGR666</constant></entry>
- <entry>'BGRH'</entry>
- <entry></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- </row>
<row id="V4L2-PIX-FMT-BGR24">
<entry><constant>V4L2_PIX_FMT_BGR24</constant></entry>
<entry>'BGR3'</entry>
@@ -404,6 +365,46 @@ for a pixel lie next to each other in memory.</para>
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
+ <row id="V4L2-PIX-FMT-BGR666">
+ <entry><constant>V4L2_PIX_FMT_BGR666</constant></entry>
+ <entry>'BGRH'</entry>
+ <entry></entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ </row>
<row id="V4L2-PIX-FMT-ABGR32">
<entry><constant>V4L2_PIX_FMT_ABGR32</constant></entry>
<entry>'AR24'</entry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sgrbg8.xml b/Documentation/DocBook/media/v4l/pixfmt-sgrbg8.xml
index 19727ab4c757..7803b8c41b45 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-sgrbg8.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-sgrbg8.xml
@@ -38,10 +38,10 @@ columns and rows.</para>
</row>
<row>
<entry>start&nbsp;+&nbsp;4:</entry>
- <entry>R<subscript>10</subscript></entry>
- <entry>B<subscript>11</subscript></entry>
- <entry>R<subscript>12</subscript></entry>
- <entry>B<subscript>13</subscript></entry>
+ <entry>B<subscript>10</subscript></entry>
+ <entry>G<subscript>11</subscript></entry>
+ <entry>B<subscript>12</subscript></entry>
+ <entry>G<subscript>13</subscript></entry>
</row>
<row>
<entry>start&nbsp;+&nbsp;8:</entry>
@@ -52,10 +52,10 @@ columns and rows.</para>
</row>
<row>
<entry>start&nbsp;+&nbsp;12:</entry>
- <entry>R<subscript>30</subscript></entry>
- <entry>B<subscript>31</subscript></entry>
- <entry>R<subscript>32</subscript></entry>
- <entry>B<subscript>33</subscript></entry>
+ <entry>B<subscript>30</subscript></entry>
+ <entry>G<subscript>31</subscript></entry>
+ <entry>B<subscript>32</subscript></entry>
+ <entry>G<subscript>33</subscript></entry>
</row>
</tbody>
</tgroup>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml
index 30aa63581fe3..a8cc102cde4f 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml
@@ -38,7 +38,7 @@
<title>Byte Order.</title>
<para>Each cell is one byte.
<informaltable frame="topbot" colsep="1" rowsep="1">
- <tgroup cols="5" align="center" border="1">
+ <tgroup cols="5" align="center">
<colspec align="left" colwidth="2*" />
<tbody valign="top">
<row>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
index 60308f1eefdf..e781cc61786c 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
@@ -29,12 +29,12 @@ and Cr planes have half as many pad bytes after their rows. In other
words, two Cx rows (including padding) is exactly as long as one Y row
(including padding).</para>
- <para><constant>V4L2_PIX_FMT_NV12M</constant> is intended to be
+ <para><constant>V4L2_PIX_FMT_YUV420M</constant> is intended to be
used only in drivers and applications that support the multi-planar API,
described in <xref linkend="planar-apis"/>. </para>
<example>
- <title><constant>V4L2_PIX_FMT_YVU420M</constant> 4 &times; 4
+ <title><constant>V4L2_PIX_FMT_YUV420M</constant> 4 &times; 4
pixel image</title>
<formalpara>
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index 5e0352c50324..fcde4e20205e 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -80,9 +80,9 @@ padding bytes after the last line of an image cross a system page
boundary. Input devices may write padding bytes, the value is
undefined. Output devices ignore the contents of padding
bytes.</para><para>When the image format is planar the
-<structfield>bytesperline</structfield> value applies to the largest
+<structfield>bytesperline</structfield> value applies to the first
plane and is divided by the same factor as the
-<structfield>width</structfield> field for any smaller planes. For
+<structfield>width</structfield> field for the other planes. For
example the Cb and Cr planes of a YUV 4:2:0 image have half as many
padding bytes following each line as the Y plane. To avoid ambiguities
drivers must return a <structfield>bytesperline</structfield> value
@@ -182,14 +182,14 @@ see <xref linkend="colorspaces" />.</entry>
</entry>
</row>
<row>
- <entry>__u16</entry>
+ <entry>__u32</entry>
<entry><structfield>bytesperline</structfield></entry>
<entry>Distance in bytes between the leftmost pixels in two adjacent
lines. See &v4l2-pix-format;.</entry>
</row>
<row>
<entry>__u16</entry>
- <entry><structfield>reserved[7]</structfield></entry>
+ <entry><structfield>reserved[6]</structfield></entry>
<entry>Reserved for future extensions. Should be zeroed by the
application.</entry>
</row>
@@ -483,8 +483,8 @@ is the Y'CbCr encoding identifier (&v4l2-ycbcr-encoding;) to specify non-standar
Y'CbCr encodings and the third is the quantization identifier (&v4l2-quantization;)
to specify non-standard quantization methods. Most of the time only the colorspace
field of &v4l2-pix-format; or &v4l2-pix-format-mplane; needs to be filled in. Note
-that the default R'G'B' quantization is always full range for all colorspaces,
-so this won't be mentioned explicitly for each colorspace description.</para>
+that the default R'G'B' quantization is full range for all colorspaces except for
+BT.2020 which uses limited range R'G'B' quantization.</para>
<table pgwide="1" frame="none" id="v4l2-colorspace">
<title>V4L2 Colorspaces</title>
@@ -598,7 +598,8 @@ so this won't be mentioned explicitly for each colorspace description.</para>
<row>
<entry><constant>V4L2_QUANTIZATION_DEFAULT</constant></entry>
<entry>Use the default quantization encoding as defined by the colorspace.
-This is always full range for R'G'B' and usually limited range for Y'CbCr.</entry>
+This is always full range for R'G'B' (except for the BT.2020 colorspace) and usually
+limited range for Y'CbCr.</entry>
</row>
<row>
<entry><constant>V4L2_QUANTIZATION_FULL_RANGE</constant></entry>
@@ -620,8 +621,8 @@ is mapped to [16&hellip;235]. Cb and Cr are mapped from [-0.5&hellip;0.5] to [16
<section>
<title>Detailed Colorspace Descriptions</title>
- <section>
- <title id="col-smpte-170m">Colorspace SMPTE 170M (<constant>V4L2_COLORSPACE_SMPTE170M</constant>)</title>
+ <section id="col-smpte-170m">
+ <title>Colorspace SMPTE 170M (<constant>V4L2_COLORSPACE_SMPTE170M</constant>)</title>
<para>The <xref linkend="smpte170m" /> standard defines the colorspace used by NTSC and PAL and by SDTV
in general. The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_601</constant>.
The default Y'CbCr quantization is limited range. The chromaticities of the primary colors and
@@ -666,8 +667,7 @@ as the SMPTE C set, so this colorspace is sometimes called SMPTE C as well.</par
<variablelist>
<varlistentry>
<term>The transfer function defined for SMPTE 170M is the same as the
-one defined in Rec. 709. Normally L is in the range [0&hellip;1], but for the extended
-gamut xvYCC encoding values outside that range are allowed.</term>
+one defined in Rec. 709.</term>
<listitem>
<para>L' = -1.099(-L)<superscript>0.45</superscript>&nbsp;+&nbsp;0.099&nbsp;for&nbsp;L&nbsp;&le;&nbsp;-0.018</para>
<para>L' = 4.5L&nbsp;for&nbsp;-0.018&nbsp;&lt;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
@@ -702,29 +702,10 @@ defined in the <xref linkend="itu601" /> standard and this colorspace is sometim
though BT.601 does not mention any color primaries.</para>
<para>The default quantization is limited range, but full range is possible although
rarely seen.</para>
- <para>The <constant>V4L2_YCBCR_ENC_601</constant> encoding as described above is the
-default for this colorspace, but it can be overridden with <constant>V4L2_YCBCR_ENC_709</constant>,
-in which case the Rec. 709 Y'CbCr encoding is used.</para>
- <variablelist>
- <varlistentry>
- <term>The xvYCC 601 encoding (<constant>V4L2_YCBCR_ENC_XV601</constant>, <xref linkend="xvycc" />) is similar
-to the BT.601 encoding, but it allows for R', G' and B' values that are outside the range
-[0&hellip;1]. The resulting Y', Cb and Cr values are scaled and offset:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;(219&nbsp;/&nbsp;255)&nbsp;*&nbsp;(0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B')&nbsp;+&nbsp;(16&nbsp;/&nbsp;255)</para>
- <para>Cb&nbsp;=&nbsp;(224&nbsp;/&nbsp;255)&nbsp;*&nbsp;(-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B')</para>
- <para>Cr&nbsp;=&nbsp;(224&nbsp;/&nbsp;255)&nbsp;*&nbsp;(0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B')</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are clamped
-to the range [-0.5&hellip;0.5]. The non-standard xvYCC 709 encoding can also be used by selecting
-<constant>V4L2_YCBCR_ENC_XV709</constant>. The xvYCC encodings always use full range
-quantization.</para>
</section>
- <section>
- <title id="col-rec709">Colorspace Rec. 709 (<constant>V4L2_COLORSPACE_REC709</constant>)</title>
+ <section id="col-rec709">
+ <title>Colorspace Rec. 709 (<constant>V4L2_COLORSPACE_REC709</constant>)</title>
<para>The <xref linkend="itu709" /> standard defines the colorspace used by HDTV in general. The default
Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_709</constant>. The default Y'CbCr quantization is
limited range. The chromaticities of the primary colors and the white reference are:</para>
@@ -803,26 +784,39 @@ rarely seen.</para>
<para>The <constant>V4L2_YCBCR_ENC_709</constant> encoding described above is the default
for this colorspace, but it can be overridden with <constant>V4L2_YCBCR_ENC_601</constant>, in which
case the BT.601 Y'CbCr encoding is used.</para>
+ <para>Two additional extended gamut Y'CbCr encodings are also possible with this colorspace:</para>
<variablelist>
<varlistentry>
<term>The xvYCC 709 encoding (<constant>V4L2_YCBCR_ENC_XV709</constant>, <xref linkend="xvycc" />)
is similar to the Rec. 709 encoding, but it allows for R', G' and B' values that are outside the range
[0&hellip;1]. The resulting Y', Cb and Cr values are scaled and offset:</term>
<listitem>
- <para>Y'&nbsp;=&nbsp;(219&nbsp;/&nbsp;255)&nbsp;*&nbsp;(0.2126R'&nbsp;+&nbsp;0.7152G'&nbsp;+&nbsp;0.0722B')&nbsp;+&nbsp;(16&nbsp;/&nbsp;255)</para>
- <para>Cb&nbsp;=&nbsp;(224&nbsp;/&nbsp;255)&nbsp;*&nbsp;(-0.1146R'&nbsp;-&nbsp;0.3854G'&nbsp;+&nbsp;0.5B')</para>
- <para>Cr&nbsp;=&nbsp;(224&nbsp;/&nbsp;255)&nbsp;*&nbsp;(0.5R'&nbsp;-&nbsp;0.4542G'&nbsp;-&nbsp;0.0458B')</para>
+ <para>Y'&nbsp;=&nbsp;(219&nbsp;/&nbsp;256)&nbsp;*&nbsp;(0.2126R'&nbsp;+&nbsp;0.7152G'&nbsp;+&nbsp;0.0722B')&nbsp;+&nbsp;(16&nbsp;/&nbsp;256)</para>
+ <para>Cb&nbsp;=&nbsp;(224&nbsp;/&nbsp;256)&nbsp;*&nbsp;(-0.1146R'&nbsp;-&nbsp;0.3854G'&nbsp;+&nbsp;0.5B')</para>
+ <para>Cr&nbsp;=&nbsp;(224&nbsp;/&nbsp;256)&nbsp;*&nbsp;(0.5R'&nbsp;-&nbsp;0.4542G'&nbsp;-&nbsp;0.0458B')</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>The xvYCC 601 encoding (<constant>V4L2_YCBCR_ENC_XV601</constant>, <xref linkend="xvycc" />) is similar
+to the BT.601 encoding, but it allows for R', G' and B' values that are outside the range
+[0&hellip;1]. The resulting Y', Cb and Cr values are scaled and offset:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;(219&nbsp;/&nbsp;256)&nbsp;*&nbsp;(0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B')&nbsp;+&nbsp;(16&nbsp;/&nbsp;256)</para>
+ <para>Cb&nbsp;=&nbsp;(224&nbsp;/&nbsp;256)&nbsp;*&nbsp;(-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B')</para>
+ <para>Cr&nbsp;=&nbsp;(224&nbsp;/&nbsp;256)&nbsp;*&nbsp;(0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B')</para>
</listitem>
</varlistentry>
</variablelist>
<para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are clamped
-to the range [-0.5&hellip;0.5]. The non-standard xvYCC 601 encoding can also be used by
-selecting <constant>V4L2_YCBCR_ENC_XV601</constant>. The xvYCC encodings always use full
-range quantization.</para>
+to the range [-0.5&hellip;0.5]. The non-standard xvYCC 709 or xvYCC 601 encodings can be used by
+selecting <constant>V4L2_YCBCR_ENC_XV709</constant> or <constant>V4L2_YCBCR_ENC_XV601</constant>.
+The xvYCC encodings always use full range quantization.</para>
</section>
- <section>
- <title id="col-srgb">Colorspace sRGB (<constant>V4L2_COLORSPACE_SRGB</constant>)</title>
+ <section id="col-srgb">
+ <title>Colorspace sRGB (<constant>V4L2_COLORSPACE_SRGB</constant>)</title>
<para>The <xref linkend="srgb" /> standard defines the colorspace used by most webcams and computer graphics. The
default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_SYCC</constant>. The default Y'CbCr quantization
is full range. The chromaticities of the primary colors and the white reference are:</para>
@@ -898,8 +892,8 @@ encoding, it is not. The <constant>V4L2_YCBCR_ENC_XV601</constant> scales and of
values before quantization, but this encoding does not do that.</para>
</section>
- <section>
- <title id="col-adobergb">Colorspace Adobe RGB (<constant>V4L2_COLORSPACE_ADOBERGB</constant>)</title>
+ <section id="col-adobergb">
+ <title>Colorspace Adobe RGB (<constant>V4L2_COLORSPACE_ADOBERGB</constant>)</title>
<para>The <xref linkend="adobergb" /> standard defines the colorspace used by computer graphics
that use the AdobeRGB colorspace. This is also known as the <xref linkend="oprgb" /> standard.
The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_601</constant>. The default Y'CbCr
@@ -970,12 +964,12 @@ clamped to the range [-0.5&hellip;0.5]. This transform is identical to one defin
SMPTE 170M/BT.601. The Y'CbCr quantization is limited range.</para>
</section>
- <section>
- <title id="col-bt2020">Colorspace BT.2020 (<constant>V4L2_COLORSPACE_BT2020</constant>)</title>
+ <section id="col-bt2020">
+ <title>Colorspace BT.2020 (<constant>V4L2_COLORSPACE_BT2020</constant>)</title>
<para>The <xref linkend="itu2020" /> standard defines the colorspace used by Ultra-high definition
television (UHDTV). The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_BT2020</constant>.
-The default Y'CbCr quantization is limited range. The chromaticities of the primary colors and
-the white reference are:</para>
+The default R'G'B' quantization is limited range (!), and so is the default Y'CbCr quantization.
+The chromaticities of the primary colors and the white reference are:</para>
<table frame="none">
<title>BT.2020 Chromaticities</title>
<tgroup cols="3" align="left">
@@ -1032,7 +1026,7 @@ the white reference are:</para>
<term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
following <constant>V4L2_YCBCR_ENC_BT2020</constant> encoding:</term>
<listitem>
- <para>Y'&nbsp;=&nbsp;0.2627R'&nbsp;+&nbsp;0.6789G'&nbsp;+&nbsp;0.0593B'</para>
+ <para>Y'&nbsp;=&nbsp;0.2627R'&nbsp;+&nbsp;0.6780G'&nbsp;+&nbsp;0.0593B'</para>
<para>Cb&nbsp;=&nbsp;-0.1396R'&nbsp;-&nbsp;0.3604G'&nbsp;+&nbsp;0.5B'</para>
<para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.4598G'&nbsp;-&nbsp;0.0402B'</para>
</listitem>
@@ -1046,7 +1040,7 @@ clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range
<varlistentry>
<term>Luma:</term>
<listitem>
- <para>Yc'&nbsp;=&nbsp;(0.2627R&nbsp;+&nbsp;0.6789G&nbsp;+&nbsp;0.0593B)'</para>
+ <para>Yc'&nbsp;=&nbsp;(0.2627R&nbsp;+&nbsp;0.6780G&nbsp;+&nbsp;0.0593B)'</para>
</listitem>
</varlistentry>
</variablelist>
@@ -1054,7 +1048,7 @@ clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range
<varlistentry>
<term>B'&nbsp;-&nbsp;Yc'&nbsp;&le;&nbsp;0:</term>
<listitem>
- <para>Cbc&nbsp;=&nbsp;(B'&nbsp;-&nbsp;Y')&nbsp;/&nbsp;1.9404</para>
+ <para>Cbc&nbsp;=&nbsp;(B'&nbsp;-&nbsp;Yc')&nbsp;/&nbsp;1.9404</para>
</listitem>
</varlistentry>
</variablelist>
@@ -1062,7 +1056,7 @@ clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range
<varlistentry>
<term>B'&nbsp;-&nbsp;Yc'&nbsp;&gt;&nbsp;0:</term>
<listitem>
- <para>Cbc&nbsp;=&nbsp;(B'&nbsp;-&nbsp;Y')&nbsp;/&nbsp;1.5816</para>
+ <para>Cbc&nbsp;=&nbsp;(B'&nbsp;-&nbsp;Yc')&nbsp;/&nbsp;1.5816</para>
</listitem>
</varlistentry>
</variablelist>
@@ -1086,8 +1080,8 @@ clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range
clamped to the range [-0.5&hellip;0.5]. The Yc'CbcCrc quantization is limited range.</para>
</section>
- <section>
- <title id="col-smpte-240m">Colorspace SMPTE 240M (<constant>V4L2_COLORSPACE_SMPTE240M</constant>)</title>
+ <section id="col-smpte-240m">
+ <title>Colorspace SMPTE 240M (<constant>V4L2_COLORSPACE_SMPTE240M</constant>)</title>
<para>The <xref linkend="smpte240m" /> standard was an interim standard used during the early days of HDTV (1988-1998).
It has been superseded by Rec. 709. The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_SMPTE240M</constant>.
The default Y'CbCr quantization is limited range. The chromaticities of the primary colors and the
@@ -1159,8 +1153,8 @@ following <constant>V4L2_YCBCR_ENC_SMPTE240M</constant> encoding:</term>
clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range.</para>
</section>
- <section>
- <title id="col-sysm">Colorspace NTSC 1953 (<constant>V4L2_COLORSPACE_470_SYSTEM_M</constant>)</title>
+ <section id="col-sysm">
+ <title>Colorspace NTSC 1953 (<constant>V4L2_COLORSPACE_470_SYSTEM_M</constant>)</title>
<para>This standard defines the colorspace used by NTSC in 1953. In practice this
colorspace is obsolete and SMPTE 170M should be used instead. The default Y'CbCr encoding
is <constant>V4L2_YCBCR_ENC_601</constant>. The default Y'CbCr quantization is limited range.
@@ -1237,8 +1231,8 @@ clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range
This transform is identical to one defined in SMPTE 170M/BT.601.</para>
</section>
- <section>
- <title id="col-sysbg">Colorspace EBU Tech. 3213 (<constant>V4L2_COLORSPACE_470_SYSTEM_BG</constant>)</title>
+ <section id="col-sysbg">
+ <title>Colorspace EBU Tech. 3213 (<constant>V4L2_COLORSPACE_470_SYSTEM_BG</constant>)</title>
<para>The <xref linkend="tech3213" /> standard defines the colorspace used by PAL/SECAM in 1975. In practice this
colorspace is obsolete and SMPTE 170M should be used instead. The default Y'CbCr encoding
is <constant>V4L2_YCBCR_ENC_601</constant>. The default Y'CbCr quantization is limited range.
@@ -1311,8 +1305,8 @@ clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range
This transform is identical to one defined in SMPTE 170M/BT.601.</para>
</section>
- <section>
- <title id="col-jpeg">Colorspace JPEG (<constant>V4L2_COLORSPACE_JPEG</constant>)</title>
+ <section id="col-jpeg">
+ <title>Colorspace JPEG (<constant>V4L2_COLORSPACE_JPEG</constant>)</title>
<para>This colorspace defines the colorspace used by most (Motion-)JPEG formats. The chromaticities
of the primary colors and the white reference are identical to sRGB. The Y'CbCr encoding is
<constant>V4L2_YCBCR_ENC_601</constant> with full range quantization where
diff --git a/Documentation/DocBook/media/v4l/subdev-formats.xml b/Documentation/DocBook/media/v4l/subdev-formats.xml
index c5ea868e3909..2588ad781242 100644
--- a/Documentation/DocBook/media/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/media/v4l/subdev-formats.xml
@@ -91,7 +91,9 @@ see <xref linkend="colorspaces" />.</entry>
<listitem><para>For formats where the total number of bits per pixel is smaller
than the number of bus samples per pixel times the bus width, a padding
value stating if the bytes are padded in their most high order bits
- (PADHI) or low order bits (PADLO).</para></listitem>
+ (PADHI) or low order bits (PADLO). A "C" prefix is used for component-wise
+ padding in the most high order bits (CPADHI) or low order bits (CPADLO)
+ of each separate component.</para></listitem>
<listitem><para>For formats where the number of bus samples per pixel is larger
than 1, an endianness value stating if the pixel is transferred MSB first
(BE) or LSB first (LE).</para></listitem>
@@ -192,6 +194,24 @@ see <xref linkend="colorspaces" />.</entry>
</row>
</thead>
<tbody valign="top">
+ <row id="MEDIA-BUS-FMT-RGB444-1X12">
+ <entry>MEDIA_BUS_FMT_RGB444_1X12</entry>
+ <entry>0x1016</entry>
+ <entry></entry>
+ &dash-ent-20;
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ </row>
<row id="MEDIA-BUS-FMT-RGB444-2X8-PADHI-BE">
<entry>MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE</entry>
<entry>0x1001</entry>
@@ -304,6 +324,28 @@ see <xref linkend="colorspaces" />.</entry>
<entry>g<subscript>4</subscript></entry>
<entry>g<subscript>3</subscript></entry>
</row>
+ <row id="MEDIA-BUS-FMT-RGB565-1X16">
+ <entry>MEDIA_BUS_FMT_RGB565_1X16</entry>
+ <entry>0x1017</entry>
+ <entry></entry>
+ &dash-ent-16;
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ </row>
<row id="MEDIA-BUS-FMT-BGR565-2X8-BE">
<entry>MEDIA_BUS_FMT_BGR565_2X8_BE</entry>
<entry>0x1005</entry>
@@ -440,6 +482,126 @@ see <xref linkend="colorspaces" />.</entry>
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
+ <row id="MEDIA-BUS-FMT-RBG888-1X24">
+ <entry>MEDIA_BUS_FMT_RBG888_1X24</entry>
+ <entry>0x100e</entry>
+ <entry></entry>
+ &dash-ent-8;
+ <entry>r<subscript>7</subscript></entry>
+ <entry>r<subscript>6</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>b<subscript>7</subscript></entry>
+ <entry>b<subscript>6</subscript></entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ <entry>g<subscript>7</subscript></entry>
+ <entry>g<subscript>6</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ </row>
+ <row id="MEDIA-BUS-FMT-RGB666-1X24_CPADHI">
+ <entry>MEDIA_BUS_FMT_RGB666_1X24_CPADHI</entry>
+ <entry>0x1015</entry>
+ <entry></entry>
+ &dash-ent-8;
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>r<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ </row>
+ <row id="MEDIA-BUS-FMT-BGR888-1X24">
+ <entry>MEDIA_BUS_FMT_BGR888_1X24</entry>
+ <entry>0x1013</entry>
+ <entry></entry>
+ &dash-ent-8;
+ <entry>b<subscript>7</subscript></entry>
+ <entry>b<subscript>6</subscript></entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ <entry>g<subscript>7</subscript></entry>
+ <entry>g<subscript>6</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>r<subscript>7</subscript></entry>
+ <entry>r<subscript>6</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ </row>
+ <row id="MEDIA-BUS-FMT-GBR888-1X24">
+ <entry>MEDIA_BUS_FMT_GBR888_1X24</entry>
+ <entry>0x1014</entry>
+ <entry></entry>
+ &dash-ent-8;
+ <entry>g<subscript>7</subscript></entry>
+ <entry>g<subscript>6</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>b<subscript>7</subscript></entry>
+ <entry>b<subscript>6</subscript></entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ <entry>r<subscript>7</subscript></entry>
+ <entry>r<subscript>6</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ </row>
<row id="MEDIA-BUS-FMT-RGB888-1X24">
<entry>MEDIA_BUS_FMT_RGB888_1X24</entry>
<entry>0x100a</entry>
@@ -579,6 +741,298 @@ see <xref linkend="colorspaces" />.</entry>
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
+ <row id="MEDIA-BUS-FMT-RGB888-1X32-PADHI">
+ <entry>MEDIA_BUS_FMT_RGB888_1X32_PADHI</entry>
+ <entry>0x100f</entry>
+ <entry></entry>
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>0</entry>
+ <entry>r<subscript>7</subscript></entry>
+ <entry>r<subscript>6</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>g<subscript>7</subscript></entry>
+ <entry>g<subscript>6</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>b<subscript>7</subscript></entry>
+ <entry>b<subscript>6</subscript></entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <para>On LVDS buses, usually each sample is transferred serialized in
+ seven time slots per pixel clock, on three (18-bit) or four (24-bit)
+ differential data pairs at the same time. The remaining bits are used for
+ control signals as defined by SPWG/PSWG/VESA or JEIDA standards.
+ The 24-bit RGB format serialized in seven time slots on four lanes using
+ JEIDA defined bit mapping will be named
+ <constant>MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA</constant>, for example.
+ </para>
+
+ <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-rgb-lvds">
+ <title>LVDS RGB formats</title>
+ <tgroup cols="8">
+ <colspec colname="id" align="left" />
+ <colspec colname="code" align="center" />
+ <colspec colname="slot" align="center" />
+ <colspec colname="lane" />
+ <colspec colnum="5" colname="l03" align="center" />
+ <colspec colnum="6" colname="l02" align="center" />
+ <colspec colnum="7" colname="l01" align="center" />
+ <colspec colnum="8" colname="l00" align="center" />
+ <spanspec namest="l03" nameend="l00" spanname="l0" />
+ <thead>
+ <row>
+ <entry>Identifier</entry>
+ <entry>Code</entry>
+ <entry></entry>
+ <entry></entry>
+ <entry spanname="l0">Data organization</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>Timeslot</entry>
+ <entry>Lane</entry>
+ <entry>3</entry>
+ <entry>2</entry>
+ <entry>1</entry>
+ <entry>0</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row id="MEDIA-BUS-FMT-RGB666-1X7X3-SPWG">
+ <entry>MEDIA_BUS_FMT_RGB666_1X7X3_SPWG</entry>
+ <entry>0x1010</entry>
+ <entry>0</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>d</entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>1</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>d</entry>
+ <entry>b<subscript>0</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>2</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>d</entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>3</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>4</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>5</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>6</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ </row>
+ <row id="MEDIA-BUS-FMT-RGB888-1X7X4-SPWG">
+ <entry>MEDIA_BUS_FMT_RGB888_1X7X4_SPWG</entry>
+ <entry>0x1011</entry>
+ <entry>0</entry>
+ <entry></entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>1</entry>
+ <entry></entry>
+ <entry>b<subscript>7</subscript></entry>
+ <entry>d</entry>
+ <entry>b<subscript>0</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>2</entry>
+ <entry></entry>
+ <entry>b<subscript>6</subscript></entry>
+ <entry>d</entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>3</entry>
+ <entry></entry>
+ <entry>g<subscript>7</subscript></entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>4</entry>
+ <entry></entry>
+ <entry>g<subscript>6</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>5</entry>
+ <entry></entry>
+ <entry>r<subscript>7</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>6</entry>
+ <entry></entry>
+ <entry>r<subscript>6</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ </row>
+ <row id="MEDIA-BUS-FMT-RGB888-1X7X4-JEIDA">
+ <entry>MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA</entry>
+ <entry>0x1012</entry>
+ <entry>0</entry>
+ <entry></entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>1</entry>
+ <entry></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>d</entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>r<subscript>7</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>2</entry>
+ <entry></entry>
+ <entry>b<subscript>0</subscript></entry>
+ <entry>d</entry>
+ <entry>g<subscript>7</subscript></entry>
+ <entry>r<subscript>6</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>3</entry>
+ <entry></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>b<subscript>7</subscript></entry>
+ <entry>g<subscript>6</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>4</entry>
+ <entry></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>b<subscript>6</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>5</entry>
+ <entry></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry>6</entry>
+ <entry></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -2188,11 +2642,15 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-UYVY8-1X16">
- <entry>MEDIA_BUS_FMT_UYVY8_1X16</entry>
- <entry>0x200f</entry>
+ <row id="MEDIA-BUS-FMT-UYVY12-2X12">
+ <entry>MEDIA_BUS_FMT_UYVY12_2X12</entry>
+ <entry>0x201c</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-20;
+ <entry>u<subscript>11</subscript></entry>
+ <entry>u<subscript>10</subscript></entry>
+ <entry>u<subscript>9</subscript></entry>
+ <entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2201,6 +2659,16 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>2</subscript></entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-20;
+ <entry>y<subscript>11</subscript></entry>
+ <entry>y<subscript>10</subscript></entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2214,7 +2682,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-20;
+ <entry>v<subscript>11</subscript></entry>
+ <entry>v<subscript>10</subscript></entry>
+ <entry>v<subscript>9</subscript></entry>
+ <entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2223,6 +2695,16 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>2</subscript></entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-20;
+ <entry>y<subscript>11</subscript></entry>
+ <entry>y<subscript>10</subscript></entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2232,11 +2714,15 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-VYUY8-1X16">
- <entry>MEDIA_BUS_FMT_VYUY8_1X16</entry>
- <entry>0x2010</entry>
+ <row id="MEDIA-BUS-FMT-VYUY12-2X12">
+ <entry>MEDIA_BUS_FMT_VYUY12_2X12</entry>
+ <entry>0x201d</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-20;
+ <entry>v<subscript>11</subscript></entry>
+ <entry>v<subscript>10</subscript></entry>
+ <entry>v<subscript>9</subscript></entry>
+ <entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2245,6 +2731,16 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>2</subscript></entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-20;
+ <entry>y<subscript>11</subscript></entry>
+ <entry>y<subscript>10</subscript></entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2258,7 +2754,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-20;
+ <entry>u<subscript>11</subscript></entry>
+ <entry>u<subscript>10</subscript></entry>
+ <entry>u<subscript>9</subscript></entry>
+ <entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2267,6 +2767,16 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>2</subscript></entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-20;
+ <entry>y<subscript>11</subscript></entry>
+ <entry>y<subscript>10</subscript></entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2276,11 +2786,15 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-YUYV8-1X16">
- <entry>MEDIA_BUS_FMT_YUYV8_1X16</entry>
- <entry>0x2011</entry>
+ <row id="MEDIA-BUS-FMT-YUYV12-2X12">
+ <entry>MEDIA_BUS_FMT_YUYV12_2X12</entry>
+ <entry>0x201e</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-20;
+ <entry>y<subscript>11</subscript></entry>
+ <entry>y<subscript>10</subscript></entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2289,6 +2803,16 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-20;
+ <entry>u<subscript>11</subscript></entry>
+ <entry>u<subscript>10</subscript></entry>
+ <entry>u<subscript>9</subscript></entry>
+ <entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2302,7 +2826,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-20;
+ <entry>y<subscript>11</subscript></entry>
+ <entry>y<subscript>10</subscript></entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2311,6 +2839,16 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-20;
+ <entry>v<subscript>11</subscript></entry>
+ <entry>v<subscript>10</subscript></entry>
+ <entry>v<subscript>9</subscript></entry>
+ <entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2320,11 +2858,15 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-YVYU8-1X16">
- <entry>MEDIA_BUS_FMT_YVYU8_1X16</entry>
- <entry>0x2012</entry>
+ <row id="MEDIA-BUS-FMT-YVYU12-2X12">
+ <entry>MEDIA_BUS_FMT_YVYU12_2X12</entry>
+ <entry>0x201f</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-20;
+ <entry>y<subscript>11</subscript></entry>
+ <entry>y<subscript>10</subscript></entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2333,6 +2875,16 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-20;
+ <entry>v<subscript>11</subscript></entry>
+ <entry>v<subscript>10</subscript></entry>
+ <entry>v<subscript>9</subscript></entry>
+ <entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2346,29 +2898,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YDYUYDYV8-1X16">
- <entry>MEDIA_BUS_FMT_YDYUYDYV8_1X16</entry>
- <entry>0x2014</entry>
- <entry></entry>
- &dash-ent-16;
+ &dash-ent-20;
+ <entry>y<subscript>11</subscript></entry>
+ <entry>y<subscript>10</subscript></entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2377,28 +2911,16 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
</row>
<row>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
+ &dash-ent-20;
+ <entry>u<subscript>11</subscript></entry>
+ <entry>u<subscript>10</subscript></entry>
+ <entry>u<subscript>9</subscript></entry>
+ <entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2408,57 +2930,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
+ <row id="MEDIA-BUS-FMT-UYVY8-1X16">
+ <entry>MEDIA_BUS_FMT_UYVY8_1X16</entry>
+ <entry>0x200f</entry>
<entry></entry>
&dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-UYVY10-1X20">
- <entry>MEDIA_BUS_FMT_UYVY10_1X20</entry>
- <entry>0x201a</entry>
- <entry></entry>
- &dash-ent-12;
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2467,8 +2943,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>2</subscript></entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2482,9 +2956,7 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-12;
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
+ &dash-ent-16;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2493,8 +2965,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>2</subscript></entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2504,13 +2974,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-VYUY10-1X20">
- <entry>MEDIA_BUS_FMT_VYUY10_1X20</entry>
- <entry>0x201b</entry>
+ <row id="MEDIA-BUS-FMT-VYUY8-1X16">
+ <entry>MEDIA_BUS_FMT_VYUY8_1X16</entry>
+ <entry>0x2010</entry>
<entry></entry>
- &dash-ent-12;
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
+ &dash-ent-16;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2519,8 +2987,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>2</subscript></entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2534,9 +3000,7 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-12;
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
+ &dash-ent-16;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2545,8 +3009,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>2</subscript></entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2556,13 +3018,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-YUYV10-1X20">
- <entry>MEDIA_BUS_FMT_YUYV10_1X20</entry>
- <entry>0x200d</entry>
+ <row id="MEDIA-BUS-FMT-YUYV8-1X16">
+ <entry>MEDIA_BUS_FMT_YUYV8_1X16</entry>
+ <entry>0x2011</entry>
<entry></entry>
- &dash-ent-12;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2571,8 +3031,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2586,9 +3044,7 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-12;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2597,8 +3053,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2608,13 +3062,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-YVYU10-1X20">
- <entry>MEDIA_BUS_FMT_YVYU10_1X20</entry>
- <entry>0x200e</entry>
+ <row id="MEDIA-BUS-FMT-YVYU8-1X16">
+ <entry>MEDIA_BUS_FMT_YVYU8_1X16</entry>
+ <entry>0x2012</entry>
<entry></entry>
- &dash-ent-12;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2623,8 +3075,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2638,9 +3088,7 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-12;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2649,8 +3097,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2660,14 +3106,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-YUV10-1X30">
- <entry>MEDIA_BUS_FMT_YUV10_1X30</entry>
- <entry>0x2016</entry>
+ <row id="MEDIA-BUS-FMT-YDYUYDYV8-1X16">
+ <entry>MEDIA_BUS_FMT_YDYUYDYV8_1X16</entry>
+ <entry>0x2014</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2676,39 +3119,20 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
</row>
- <row id="MEDIA-BUS-FMT-AYUV8-1X32">
- <entry>MEDIA_BUS_FMT_AYUV8_1X32</entry>
- <entry>0x2017</entry>
+ <row>
<entry></entry>
- <entry>a<subscript>7</subscript></entry>
- <entry>a<subscript>6</subscript></entry>
- <entry>a<subscript>5</subscript></entry>
- <entry>a<subscript>4</subscript></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2725,6 +3149,42 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>2</subscript></entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-16;
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-16;
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2734,13 +3194,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-UYVY12-2X12">
- <entry>MEDIA_BUS_FMT_UYVY12_2X12</entry>
- <entry>0x201c</entry>
+ <row id="MEDIA-BUS-FMT-UYVY10-1X20">
+ <entry>MEDIA_BUS_FMT_UYVY10_1X20</entry>
+ <entry>0x201a</entry>
<entry></entry>
- &dash-ent-20;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
+ &dash-ent-12;
<entry>u<subscript>9</subscript></entry>
<entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
@@ -2751,14 +3209,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>2</subscript></entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2774,9 +3224,7 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-20;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
+ &dash-ent-12;
<entry>v<subscript>9</subscript></entry>
<entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
@@ -2787,14 +3235,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>2</subscript></entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2806,13 +3246,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-VYUY12-2X12">
- <entry>MEDIA_BUS_FMT_VYUY12_2X12</entry>
- <entry>0x201d</entry>
+ <row id="MEDIA-BUS-FMT-VYUY10-1X20">
+ <entry>MEDIA_BUS_FMT_VYUY10_1X20</entry>
+ <entry>0x201b</entry>
<entry></entry>
- &dash-ent-20;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
+ &dash-ent-12;
<entry>v<subscript>9</subscript></entry>
<entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
@@ -2823,14 +3261,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>2</subscript></entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2846,9 +3276,7 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-20;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
+ &dash-ent-12;
<entry>u<subscript>9</subscript></entry>
<entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
@@ -2859,14 +3287,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>2</subscript></entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2878,13 +3298,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-YUYV12-2X12">
- <entry>MEDIA_BUS_FMT_YUYV12_2X12</entry>
- <entry>0x201e</entry>
+ <row id="MEDIA-BUS-FMT-YUYV10-1X20">
+ <entry>MEDIA_BUS_FMT_YUYV10_1X20</entry>
+ <entry>0x200d</entry>
<entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
+ &dash-ent-12;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2895,14 +3313,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
<entry>u<subscript>9</subscript></entry>
<entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
@@ -2918,9 +3328,7 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
+ &dash-ent-12;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2931,14 +3339,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
<entry>v<subscript>9</subscript></entry>
<entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
@@ -2950,13 +3350,11 @@ see <xref linkend="colorspaces" />.</entry>
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="MEDIA-BUS-FMT-YVYU12-2X12">
- <entry>MEDIA_BUS_FMT_YVYU12_2X12</entry>
- <entry>0x201f</entry>
+ <row id="MEDIA-BUS-FMT-YVYU10-1X20">
+ <entry>MEDIA_BUS_FMT_YVYU10_1X20</entry>
+ <entry>0x200e</entry>
<entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
+ &dash-ent-12;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2967,14 +3365,6 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
<entry>v<subscript>9</subscript></entry>
<entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
@@ -2990,9 +3380,7 @@ see <xref linkend="colorspaces" />.</entry>
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
+ &dash-ent-12;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -3003,16 +3391,67 @@ see <xref linkend="colorspaces" />.</entry>
<entry>y<subscript>2</subscript></entry>
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
+ <entry>u<subscript>9</subscript></entry>
+ <entry>u<subscript>8</subscript></entry>
+ <entry>u<subscript>7</subscript></entry>
+ <entry>u<subscript>6</subscript></entry>
+ <entry>u<subscript>5</subscript></entry>
+ <entry>u<subscript>4</subscript></entry>
+ <entry>u<subscript>3</subscript></entry>
+ <entry>u<subscript>2</subscript></entry>
+ <entry>u<subscript>1</subscript></entry>
+ <entry>u<subscript>0</subscript></entry>
</row>
- <row>
- <entry></entry>
+ <row id="MEDIA-BUS-FMT-VUY8-1X24">
+ <entry>MEDIA_BUS_FMT_VUY8_1X24</entry>
+ <entry>0x201a</entry>
<entry></entry>
+ &dash-ent-8;
+ <entry>v<subscript>7</subscript></entry>
+ <entry>v<subscript>6</subscript></entry>
+ <entry>v<subscript>5</subscript></entry>
+ <entry>v<subscript>4</subscript></entry>
+ <entry>v<subscript>3</subscript></entry>
+ <entry>v<subscript>2</subscript></entry>
+ <entry>v<subscript>1</subscript></entry>
+ <entry>v<subscript>0</subscript></entry>
+ <entry>u<subscript>7</subscript></entry>
+ <entry>u<subscript>6</subscript></entry>
+ <entry>u<subscript>5</subscript></entry>
+ <entry>u<subscript>4</subscript></entry>
+ <entry>u<subscript>3</subscript></entry>
+ <entry>u<subscript>2</subscript></entry>
+ <entry>u<subscript>1</subscript></entry>
+ <entry>u<subscript>0</subscript></entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ </row>
+ <row id="MEDIA-BUS-FMT-YUV8-1X24">
+ <entry>MEDIA_BUS_FMT_YUV8_1X24</entry>
+ <entry>0x2025</entry>
<entry></entry>
- &dash-ent-20;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -3021,6 +3460,14 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>2</subscript></entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
+ <entry>v<subscript>7</subscript></entry>
+ <entry>v<subscript>6</subscript></entry>
+ <entry>v<subscript>5</subscript></entry>
+ <entry>v<subscript>4</subscript></entry>
+ <entry>v<subscript>3</subscript></entry>
+ <entry>v<subscript>2</subscript></entry>
+ <entry>v<subscript>1</subscript></entry>
+ <entry>v<subscript>0</subscript></entry>
</row>
<row id="MEDIA-BUS-FMT-UYVY12-1X24">
<entry>MEDIA_BUS_FMT_UYVY12_1X24</entry>
@@ -3262,6 +3709,80 @@ see <xref linkend="colorspaces" />.</entry>
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
+ <row id="MEDIA-BUS-FMT-YUV10-1X30">
+ <entry>MEDIA_BUS_FMT_YUV10_1X30</entry>
+ <entry>0x2016</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ <entry>u<subscript>9</subscript></entry>
+ <entry>u<subscript>8</subscript></entry>
+ <entry>u<subscript>7</subscript></entry>
+ <entry>u<subscript>6</subscript></entry>
+ <entry>u<subscript>5</subscript></entry>
+ <entry>u<subscript>4</subscript></entry>
+ <entry>u<subscript>3</subscript></entry>
+ <entry>u<subscript>2</subscript></entry>
+ <entry>u<subscript>1</subscript></entry>
+ <entry>u<subscript>0</subscript></entry>
+ <entry>v<subscript>9</subscript></entry>
+ <entry>v<subscript>8</subscript></entry>
+ <entry>v<subscript>7</subscript></entry>
+ <entry>v<subscript>6</subscript></entry>
+ <entry>v<subscript>5</subscript></entry>
+ <entry>v<subscript>4</subscript></entry>
+ <entry>v<subscript>3</subscript></entry>
+ <entry>v<subscript>2</subscript></entry>
+ <entry>v<subscript>1</subscript></entry>
+ <entry>v<subscript>0</subscript></entry>
+ </row>
+ <row id="MEDIA-BUS-FMT-AYUV8-1X32">
+ <entry>MEDIA_BUS_FMT_AYUV8_1X32</entry>
+ <entry>0x2017</entry>
+ <entry></entry>
+ <entry>a<subscript>7</subscript></entry>
+ <entry>a<subscript>6</subscript></entry>
+ <entry>a<subscript>5</subscript></entry>
+ <entry>a<subscript>4</subscript></entry>
+ <entry>a<subscript>3</subscript></entry>
+ <entry>a<subscript>2</subscript></entry>
+ <entry>a<subscript>1</subscript></entry>
+ <entry>a<subscript>0</subscript></entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ <entry>u<subscript>7</subscript></entry>
+ <entry>u<subscript>6</subscript></entry>
+ <entry>u<subscript>5</subscript></entry>
+ <entry>u<subscript>4</subscript></entry>
+ <entry>u<subscript>3</subscript></entry>
+ <entry>u<subscript>2</subscript></entry>
+ <entry>u<subscript>1</subscript></entry>
+ <entry>u<subscript>0</subscript></entry>
+ <entry>v<subscript>7</subscript></entry>
+ <entry>v<subscript>6</subscript></entry>
+ <entry>v<subscript>5</subscript></entry>
+ <entry>v<subscript>4</subscript></entry>
+ <entry>v<subscript>3</subscript></entry>
+ <entry>v<subscript>2</subscript></entry>
+ <entry>v<subscript>1</subscript></entry>
+ <entry>v<subscript>0</subscript></entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index ac0f8d9d2a49..e98caa1c39bd 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -136,6 +136,7 @@ Remote Controller chapter.</contrib>
<year>2012</year>
<year>2013</year>
<year>2014</year>
+ <year>2015</year>
<holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab,
Pawel Osciak</holder>
@@ -152,6 +153,14 @@ structs, ioctls) must be noted in more detail in the history chapter
applications. -->
<revision>
+ <revnumber>3.21</revnumber>
+ <date>2015-02-13</date>
+ <authorinitials>mcc</authorinitials>
+ <revremark>Fix documentation for media controller device nodes and add support for DVB device nodes.
+Add support for Tuner sub-device.
+ </revremark>
+ </revision>
+ <revision>
<revnumber>3.19</revnumber>
<date>2014-12-05</date>
<authorinitials>hv</authorinitials>
diff --git a/Documentation/DocBook/media/v4l/vidioc-cropcap.xml b/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
index 1f5ed64cd75a..50cb940cbe5c 100644
--- a/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
@@ -59,6 +59,11 @@ constant except when switching the video standard. Remember this
switch can occur implicit when switching the video input or
output.</para>
+<para>Do not use the multiplanar buffer types. Use <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>
+instead of <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>
+and use <constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> instead of
+<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>.</para>
+
<para>This ioctl must be implemented for video capture or output devices that
support cropping and/or scaling and/or have non-square pixels, and for overlay devices.</para>
@@ -73,9 +78,7 @@ support cropping and/or scaling and/or have non-square pixels, and for overlay d
<entry>Type of the data stream, set by the application.
Only these types are valid here:
<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>,
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>,
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant>,
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant> and
+<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> and
<constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>. See <xref linkend="v4l2-buf-type" />.</entry>
</row>
<row>
diff --git a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml b/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
index b036f8963353..50ccd33948c1 100644
--- a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
@@ -64,7 +64,7 @@
<entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry></entry>
- <entry>Type of the event.</entry>
+ <entry>Type of the event, see <xref linkend="event-type" />.</entry>
</row>
<row>
<entry>union</entry>
@@ -154,6 +154,113 @@
</tgroup>
</table>
+ <table frame="none" pgwide="1" id="event-type">
+ <title>Event Types</title>
+ <tgroup cols="3">
+ &cs-def;
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_EVENT_ALL</constant></entry>
+ <entry>0</entry>
+ <entry>All events. V4L2_EVENT_ALL is valid only for
+ VIDIOC_UNSUBSCRIBE_EVENT for unsubscribing all events at once.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_VSYNC</constant></entry>
+ <entry>1</entry>
+ <entry>This event is triggered on the vertical sync.
+ This event has a &v4l2-event-vsync; associated with it.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_EOS</constant></entry>
+ <entry>2</entry>
+ <entry>This event is triggered when the end of a stream is reached.
+ This is typically used with MPEG decoders to report to the application
+ when the last of the MPEG stream has been decoded.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_CTRL</constant></entry>
+ <entry>3</entry>
+ <entry><para>This event requires that the <structfield>id</structfield>
+ matches the control ID from which you want to receive events.
+ This event is triggered if the control's value changes, if a
+ button control is pressed or if the control's flags change.
+ This event has a &v4l2-event-ctrl; associated with it. This struct
+ contains much of the same information as &v4l2-queryctrl; and
+ &v4l2-control;.</para>
+
+ <para>If the event is generated due to a call to &VIDIOC-S-CTRL; or
+ &VIDIOC-S-EXT-CTRLS;, then the event will <emphasis>not</emphasis> be sent to
+ the file handle that called the ioctl function. This prevents
+ nasty feedback loops. If you <emphasis>do</emphasis> want to get the
+ event, then set the <constant>V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK</constant>
+ flag.
+ </para>
+
+ <para>This event type will ensure that no information is lost when
+ more events are raised than there is room internally. In that
+ case the &v4l2-event-ctrl; of the second-oldest event is kept,
+ but the <structfield>changes</structfield> field of the
+ second-oldest event is ORed with the <structfield>changes</structfield>
+ field of the oldest event.</para>
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_FRAME_SYNC</constant></entry>
+ <entry>4</entry>
+ <entry>
+ <para>Triggered immediately when the reception of a
+ frame has begun. This event has a
+ &v4l2-event-frame-sync; associated with it.</para>
+
+ <para>If the hardware needs to be stopped in the case of a
+ buffer underrun it might not be able to generate this event.
+ In such cases the <structfield>frame_sequence</structfield>
+ field in &v4l2-event-frame-sync; will not be incremented. This
+ causes two consecutive frame sequence numbers to have n times
+ frame interval in between them.</para>
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_SOURCE_CHANGE</constant></entry>
+ <entry>5</entry>
+ <entry>
+ <para>This event is triggered when a source parameter change is
+ detected during runtime by the video device. It can be a
+ runtime resolution change triggered by a video decoder or the
+ format change happening on an input connector.
+ This event requires that the <structfield>id</structfield>
+ matches the input index (when used with a video device node)
+ or the pad index (when used with a subdevice node) from which
+ you want to receive events.</para>
+
+ <para>This event has a &v4l2-event-src-change; associated
+ with it. The <structfield>changes</structfield> bitfield denotes
+ what has changed for the subscribed pad. If multiple events
+ occurred before application could dequeue them, then the changes
+ will have the ORed value of all the events generated.</para>
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_MOTION_DET</constant></entry>
+ <entry>6</entry>
+ <entry>
+ <para>Triggered whenever the motion detection state for one or more of the regions
+ changes. This event has a &v4l2-event-motion-det; associated with it.</para>
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_PRIVATE_START</constant></entry>
+ <entry>0x08000000</entry>
+ <entry>Base event number for driver-private events.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
<table frame="none" pgwide="1" id="v4l2-event-vsync">
<title>struct <structname>v4l2_event_vsync</structname></title>
<tgroup cols="3">
@@ -177,7 +284,7 @@
<entry>__u32</entry>
<entry><structfield>changes</structfield></entry>
<entry></entry>
- <entry>A bitmask that tells what has changed. See <xref linkend="changes-flags" />.</entry>
+ <entry>A bitmask that tells what has changed. See <xref linkend="ctrl-changes-flags" />.</entry>
</row>
<row>
<entry>__u32</entry>
@@ -309,8 +416,8 @@
</tgroup>
</table>
- <table pgwide="1" frame="none" id="changes-flags">
- <title>Changes</title>
+ <table pgwide="1" frame="none" id="ctrl-changes-flags">
+ <title>Control Changes</title>
<tgroup cols="3">
&cs-def;
<tbody valign="top">
@@ -318,9 +425,9 @@
<entry><constant>V4L2_EVENT_CTRL_CH_VALUE</constant></entry>
<entry>0x0001</entry>
<entry>This control event was triggered because the value of the control
- changed. Special case: if a button control is pressed, then this
- event is sent as well, even though there is not explicit value
- associated with a button control.</entry>
+ changed. Special cases: Volatile controls do no generate this event;
+ If a control has the <constant>V4L2_CTRL_FLAG_EXECUTE_ON_WRITE</constant>
+ flag set, then this event is sent as well, regardless its value.</entry>
</row>
<row>
<entry><constant>V4L2_EVENT_CTRL_CH_FLAGS</constant></entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-crop.xml b/Documentation/DocBook/media/v4l/vidioc-g-crop.xml
index 75c6a93de3c1..e6c4efb9e8b4 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-crop.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-crop.xml
@@ -70,6 +70,11 @@ structure or returns the &EINVAL; if cropping is not supported.</para>
<constant>VIDIOC_S_CROP</constant> ioctl with a pointer to this
structure.</para>
+<para>Do not use the multiplanar buffer types. Use <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>
+instead of <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>
+and use <constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> instead of
+<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>.</para>
+
<para>The driver first adjusts the requested dimensions against
hardware limits, &ie; the bounds given by the capture/output window,
and it rounds to the closest possible values of horizontal and
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
index c4336577ff06..764b635ed4cf 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
@@ -318,10 +318,20 @@ can't generate such frequencies, then the flag will also be cleared.
</row>
<row>
<entry>V4L2_DV_FL_HALF_LINE</entry>
- <entry>Specific to interlaced formats: if set, then field 1 (aka the odd field)
-is really one half-line longer and field 2 (aka the even field) is really one half-line
-shorter, so each field has exactly the same number of half-lines. Whether half-lines can be
-detected or used depends on the hardware.
+ <entry>Specific to interlaced formats: if set, then the vertical frontporch
+of field 1 (aka the odd field) is really one half-line longer and the vertical backporch
+of field 2 (aka the even field) is really one half-line shorter, so each field has exactly
+the same number of half-lines. Whether half-lines can be detected or used depends on
+the hardware.
+ </entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_FL_IS_CE_VIDEO</entry>
+ <entry>If set, then this is a Consumer Electronics (CE) video format.
+Such formats differ from other formats (commonly called IT formats) in that if
+R'G'B' encoding is used then by default the R'G'B' values use limited range
+(i.e. 16-235) as opposed to full range (i.e. 0-255). All formats defined in CEA-861
+except for the 640x480p59.94 format are CE formats.
</entry>
</row>
</tbody>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml b/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
index 20460730b02c..77607cc19688 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
@@ -240,9 +240,9 @@ where padding bytes after the last line of an image cross a system
page boundary. Capture devices may write padding bytes, the value is
undefined. Output devices ignore the contents of padding
bytes.</para><para>When the image format is planar the
-<structfield>bytesperline</structfield> value applies to the largest
+<structfield>bytesperline</structfield> value applies to the first
plane and is divided by the same factor as the
-<structfield>width</structfield> field for any smaller planes. For
+<structfield>width</structfield> field for the other planes. For
example the Cb and Cr planes of a YUV 4:2:0 image have half as many
padding bytes following each line as the Y plane. To avoid ambiguities
drivers must return a <structfield>bytesperline</structfield> value
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
index 9c04ac8661b1..0bb5c060db27 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
@@ -60,8 +60,8 @@
<para>To query the cropping (composing) rectangle set &v4l2-selection;
<structfield> type </structfield> field to the respective buffer type.
-Do not use multiplanar buffers. Use <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>
-instead of <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>. Use
+Do not use the multiplanar buffer types. Use <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>
+instead of <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant> and use
<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> instead of
<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>. The next step is
setting the value of &v4l2-selection; <structfield>target</structfield> field
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml b/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml
index bd015d1563ff..d05623c55403 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml
@@ -205,7 +205,7 @@ ETS&nbsp;300&nbsp;231, lsb first transmitted.</entry>
<row>
<entry><constant>V4L2_SLICED_CAPTION_525</constant></entry>
<entry>0x1000</entry>
- <entry><xref linkend="eia608" /></entry>
+ <entry><xref linkend="cea608" /></entry>
<entry>NTSC line 21, 284 (second field 21)</entry>
<entry>Two bytes in transmission order, including parity
bit, lsb first transmitted.</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-querycap.xml b/Documentation/DocBook/media/v4l/vidioc-querycap.xml
index d0c5e604f014..20fda75a012d 100644
--- a/Documentation/DocBook/media/v4l/vidioc-querycap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-querycap.xml
@@ -102,10 +102,10 @@ The bus_info must start with "PCI:" for PCI boards, "PCIe:" for PCI Express boar
<entry>__u32</entry>
<entry><structfield>version</structfield></entry>
<entry><para>Version number of the driver.</para>
-<para>Starting on kernel 3.1, the version reported is provided per
-V4L2 subsystem, following the same Kernel numberation scheme. However, it
-should not always return the same version as the kernel, if, for example,
-an stable or distribution-modified kernel uses the V4L2 stack from a
+<para>Starting with kernel 3.1, the version reported is provided by the
+V4L2 subsystem following the kernel numbering scheme. However, it
+may not always return the same version as the kernel if, for example,
+a stable or distribution-modified kernel uses the V4L2 stack from a
newer kernel.</para>
<para>The version number is formatted using the
<constant>KERNEL_VERSION()</constant> macro:</para></entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml b/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
index 2bd98fd7a4e5..dc83ad70f8dc 100644
--- a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
@@ -600,7 +600,9 @@ writing a value will cause the device to carry out a given action
changes continuously. A typical example would be the current gain value if the device
is in auto-gain mode. In such a case the hardware calculates the gain value based on
the lighting conditions which can change over time. Note that setting a new value for
-a volatile control will have no effect. The new value will just be ignored.</entry>
+a volatile control will have no effect and no <constant>V4L2_EVENT_CTRL_CH_VALUE</constant>
+will be sent, unless the <constant>V4L2_CTRL_FLAG_EXECUTE_ON_WRITE</constant> flag
+(see below) is also set. Otherwise the new value will just be ignored.</entry>
</row>
<row>
<entry><constant>V4L2_CTRL_FLAG_HAS_PAYLOAD</constant></entry>
@@ -610,6 +612,14 @@ using one of the pointer fields of &v4l2-ext-control;. This flag is set for cont
that are an array, string, or have a compound type. In all cases you have to set a
pointer to memory containing the payload of the control.</entry>
</row>
+ <row>
+ <entry><constant>V4L2_CTRL_FLAG_EXECUTE_ON_WRITE</constant></entry>
+ <entry>0x0200</entry>
+ <entry>The value provided to the control will be propagated to the driver
+even if remains constant. This is required when the control represents an action
+on the hardware. For example: clearing an error flag or triggering the flash. All the
+controls of the type <constant>V4L2_CTRL_TYPE_BUTTON</constant> have this flag set.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml
index 2f8f4f0a0235..cff59f5cbf04 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml
@@ -67,9 +67,9 @@
<para>To enumerate frame intervals applications initialize the
<structfield>index</structfield>, <structfield>pad</structfield>,
- <structfield>code</structfield>, <structfield>width</structfield> and
- <structfield>height</structfield> fields of
- &v4l2-subdev-frame-interval-enum; and call the
+ <structfield>which</structfield>, <structfield>code</structfield>,
+ <structfield>width</structfield> and <structfield>height</structfield>
+ fields of &v4l2-subdev-frame-interval-enum; and call the
<constant>VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL</constant> ioctl with a pointer
to this structure. Drivers fill the rest of the structure or return
an &EINVAL; if one of the input fields is invalid. All frame intervals are
@@ -123,7 +123,12 @@
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[9]</entry>
+ <entry><structfield>which</structfield></entry>
+ <entry>Frame intervals to be enumerated, from &v4l2-subdev-format-whence;.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[8]</entry>
<entry>Reserved for future extensions. Applications and drivers must
set the array to zero.</entry>
</row>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml
index 79ce42b7c60c..abd545ede67a 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml
@@ -61,9 +61,9 @@
ioctl.</para>
<para>To enumerate frame sizes applications initialize the
- <structfield>pad</structfield>, <structfield>code</structfield> and
- <structfield>index</structfield> fields of the
- &v4l2-subdev-mbus-code-enum; and call the
+ <structfield>pad</structfield>, <structfield>which</structfield> ,
+ <structfield>code</structfield> and <structfield>index</structfield>
+ fields of the &v4l2-subdev-mbus-code-enum; and call the
<constant>VIDIOC_SUBDEV_ENUM_FRAME_SIZE</constant> ioctl with a pointer to
the structure. Drivers fill the minimum and maximum frame sizes or return
an &EINVAL; if one of the input parameters is invalid.</para>
@@ -127,7 +127,12 @@
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[9]</entry>
+ <entry><structfield>which</structfield></entry>
+ <entry>Frame sizes to be enumerated, from &v4l2-subdev-format-whence;.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[8]</entry>
<entry>Reserved for future extensions. Applications and drivers must
set the array to zero.</entry>
</row>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml
index a6b3432449f6..0bcb278fd062 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml
@@ -56,8 +56,8 @@
</note>
<para>To enumerate media bus formats available at a given sub-device pad
- applications initialize the <structfield>pad</structfield> and
- <structfield>index</structfield> fields of &v4l2-subdev-mbus-code-enum; and
+ applications initialize the <structfield>pad</structfield>, <structfield>which</structfield>
+ and <structfield>index</structfield> fields of &v4l2-subdev-mbus-code-enum; and
call the <constant>VIDIOC_SUBDEV_ENUM_MBUS_CODE</constant> ioctl with a
pointer to this structure. Drivers fill the rest of the structure or return
an &EINVAL; if either the <structfield>pad</structfield> or
@@ -93,7 +93,12 @@
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[9]</entry>
+ <entry><structfield>which</structfield></entry>
+ <entry>Media bus format codes to be enumerated, from &v4l2-subdev-format-whence;.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[8]</entry>
<entry>Reserved for future extensions. Applications and drivers must
set the array to zero.</entry>
</row>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml b/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml
index d7c9365ecdbe..d0332f610929 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml
@@ -60,7 +60,9 @@
<row>
<entry>__u32</entry>
<entry><structfield>type</structfield></entry>
- <entry>Type of the event.</entry>
+ <entry>Type of the event, see <xref linkend="event-type" />. Note that
+<constant>V4L2_EVENT_ALL</constant> can be used with VIDIOC_UNSUBSCRIBE_EVENT
+for unsubscribing all events at once.</entry>
</row>
<row>
<entry>__u32</entry>
@@ -84,113 +86,6 @@
</tgroup>
</table>
- <table frame="none" pgwide="1" id="event-type">
- <title>Event Types</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_EVENT_ALL</constant></entry>
- <entry>0</entry>
- <entry>All events. V4L2_EVENT_ALL is valid only for
- VIDIOC_UNSUBSCRIBE_EVENT for unsubscribing all events at once.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_VSYNC</constant></entry>
- <entry>1</entry>
- <entry>This event is triggered on the vertical sync.
- This event has a &v4l2-event-vsync; associated with it.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_EOS</constant></entry>
- <entry>2</entry>
- <entry>This event is triggered when the end of a stream is reached.
- This is typically used with MPEG decoders to report to the application
- when the last of the MPEG stream has been decoded.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_CTRL</constant></entry>
- <entry>3</entry>
- <entry><para>This event requires that the <structfield>id</structfield>
- matches the control ID from which you want to receive events.
- This event is triggered if the control's value changes, if a
- button control is pressed or if the control's flags change.
- This event has a &v4l2-event-ctrl; associated with it. This struct
- contains much of the same information as &v4l2-queryctrl; and
- &v4l2-control;.</para>
-
- <para>If the event is generated due to a call to &VIDIOC-S-CTRL; or
- &VIDIOC-S-EXT-CTRLS;, then the event will <emphasis>not</emphasis> be sent to
- the file handle that called the ioctl function. This prevents
- nasty feedback loops. If you <emphasis>do</emphasis> want to get the
- event, then set the <constant>V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK</constant>
- flag.
- </para>
-
- <para>This event type will ensure that no information is lost when
- more events are raised than there is room internally. In that
- case the &v4l2-event-ctrl; of the second-oldest event is kept,
- but the <structfield>changes</structfield> field of the
- second-oldest event is ORed with the <structfield>changes</structfield>
- field of the oldest event.</para>
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_FRAME_SYNC</constant></entry>
- <entry>4</entry>
- <entry>
- <para>Triggered immediately when the reception of a
- frame has begun. This event has a
- &v4l2-event-frame-sync; associated with it.</para>
-
- <para>If the hardware needs to be stopped in the case of a
- buffer underrun it might not be able to generate this event.
- In such cases the <structfield>frame_sequence</structfield>
- field in &v4l2-event-frame-sync; will not be incremented. This
- causes two consecutive frame sequence numbers to have n times
- frame interval in between them.</para>
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_SOURCE_CHANGE</constant></entry>
- <entry>5</entry>
- <entry>
- <para>This event is triggered when a source parameter change is
- detected during runtime by the video device. It can be a
- runtime resolution change triggered by a video decoder or the
- format change happening on an input connector.
- This event requires that the <structfield>id</structfield>
- matches the input index (when used with a video device node)
- or the pad index (when used with a subdevice node) from which
- you want to receive events.</para>
-
- <para>This event has a &v4l2-event-src-change; associated
- with it. The <structfield>changes</structfield> bitfield denotes
- what has changed for the subscribed pad. If multiple events
- occurred before application could dequeue them, then the changes
- will have the ORed value of all the events generated.</para>
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_MOTION_DET</constant></entry>
- <entry>6</entry>
- <entry>
- <para>Triggered whenever the motion detection state for one or more of the regions
- changes. This event has a &v4l2-event-motion-det; associated with it.</para>
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_PRIVATE_START</constant></entry>
- <entry>0x08000000</entry>
- <entry>Base event number for driver-private events.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
<table pgwide="1" frame="none" id="event-flags">
<title>Event Flags</title>
<tgroup cols="3">
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 653d5d739d7f..31d1d658827f 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -505,7 +505,10 @@ at module load time (for a module) with:
The addresses are normal I2C addresses. The adapter is the string
name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
-It is *NOT* i2c-<n> itself.
+It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
+spaces, so if the name is "This is an I2C chip" you can say
+adapter_name=ThisisanI2cchip. This is because it's hard to pass in
+spaces in kernel parameters.
The debug flags are bit flags for each BMC found, they are:
IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
index 39cfa72732ff..3a8e15cba816 100644
--- a/Documentation/IRQ-domain.txt
+++ b/Documentation/IRQ-domain.txt
@@ -95,8 +95,7 @@ since it doesn't need to allocate a table as large as the largest
hwirq number. The disadvantage is that hwirq to IRQ number lookup is
dependent on how many entries are in the table.
-Very few drivers should need this mapping. At the moment, powerpc
-iseries is the only user.
+Very few drivers should need this mapping.
==== No Map ===-
irq_domain_add_nomap()
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 6883a1b9b351..bc0548201755 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -1,4 +1,4 @@
-subdir-y := accounting arm auxdisplay blackfin connector \
+subdir-y := accounting auxdisplay blackfin connector \
filesystems filesystems ia64 laptops mic misc-devices \
networking pcmcia prctl ptp spi timers vDSO video4linux \
watchdog
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index 0d920d54536d..1179850f453c 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -353,7 +353,7 @@ retry:
rc = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
maxvec, maxvec);
/*
- * -ENOSPC is the only error code allowed to be analized
+ * -ENOSPC is the only error code allowed to be analyzed
*/
if (rc == -ENOSPC) {
if (maxvec == 1)
@@ -370,7 +370,7 @@ retry:
return rc;
}
-Note how pci_enable_msix_range() return value is analized for a fallback -
+Note how pci_enable_msix_range() return value is analyzed for a fallback -
any error code other than -ENOSPC indicates a fatal error and should not
be retried.
@@ -486,7 +486,7 @@ during development.
If your device supports both MSI-X and MSI capabilities, you should use
the MSI-X facilities in preference to the MSI facilities. As mentioned
above, MSI-X supports any number of interrupts between 1 and 2048.
-In constrast, MSI is restricted to a maximum of 32 interrupts (and
+In contrast, MSI is restricted to a maximum of 32 interrupts (and
must be a power of two). In addition, the MSI interrupt vectors must
be allocated consecutively, so the system might not be able to allocate
as many vectors for MSI as it could for MSI-X. On some platforms, MSI
@@ -501,18 +501,9 @@ necessary to disable interrupts (Linux guarantees the same interrupt will
not be re-entered). If a device uses multiple interrupts, the driver
must disable interrupts while the lock is held. If the device sends
a different interrupt, the driver will deadlock trying to recursively
-acquire the spinlock.
-
-There are two solutions. The first is to take the lock with
-spin_lock_irqsave() or spin_lock_irq() (see
-Documentation/DocBook/kernel-locking). The second is to specify
-IRQF_DISABLED to request_irq() so that the kernel runs the entire
-interrupt routine with interrupts disabled.
-
-If your MSI interrupt routine does not hold the lock for the whole time
-it is running, the first solution may be best. The second solution is
-normally preferred as it avoids making two transitions from interrupt
-disabled to enabled and back again.
+acquire the spinlock. Such deadlocks can be avoided by using
+spin_lock_irqsave() or spin_lock_irq() which disable local interrupts
+and acquire the lock (see Documentation/DocBook/kernel-locking).
4.6 How to tell whether MSI/MSI-X is enabled on a device
diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt
index 898ded24510d..ac26869c7db4 100644
--- a/Documentation/PCI/pci-error-recovery.txt
+++ b/Documentation/PCI/pci-error-recovery.txt
@@ -256,7 +256,7 @@ STEP 4: Slot Reset
------------------
In response to a return value of PCI_ERS_RESULT_NEED_RESET, the
-the platform will peform a slot reset on the requesting PCI device(s).
+the platform will perform a slot reset on the requesting PCI device(s).
The actual steps taken by a platform to perform a slot reset
will be platform-dependent. Upon completion of slot reset, the
platform will call the device slot_reset() callback.
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index 26d3d945c3c2..b4987c0bcb20 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -66,8 +66,8 @@ hardware (mostly chipsets) has root ports that cannot obtain the reporting
source ID. nosourceid=n by default.
2.3 AER error output
-When a PCI-E AER error is captured, an error message will be outputed to
-console. If it's a correctable error, it is outputed as a warning.
+When a PCI-E AER error is captured, an error message will be outputted to
+console. If it's a correctable error, it is outputted as a warning.
Otherwise, it is printed as an error. So users could choose different
log level to filter out correctable error messages.
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 447671bd2927..b03a832a08e2 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -614,8 +614,8 @@ The canonical patch message body contains the following:
- An empty line.
- - The body of the explanation, which will be copied to the
- permanent changelog to describe this patch.
+ - The body of the explanation, line wrapped at 75 columns, which will
+ be copied to the permanent changelog to describe this patch.
- The "Signed-off-by:" lines, described above, which will
also go in the changelog.
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index 9b121a569ab4..15dfce708ebf 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -253,9 +253,14 @@ input driver:
GPIO support
~~~~~~~~~~~~
ACPI 5 introduced two new resources to describe GPIO connections: GpioIo
-and GpioInt. These resources are used be used to pass GPIO numbers used by
-the device to the driver. For example:
+and GpioInt. These resources can be used to pass GPIO numbers used by
+the device to the driver. ACPI 5.1 extended this with _DSD (Device
+Specific Data) which made it possible to name the GPIOs among other things.
+For example:
+
+Device (DEV)
+{
Method (_CRS, 0, NotSerialized)
{
Name (SBUF, ResourceTemplate()
@@ -285,6 +290,18 @@ the device to the driver. For example:
Return (SBUF)
}
+ // ACPI 5.1 _DSD used for naming the GPIOs
+ Name (_DSD, Package ()
+ {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package ()
+ {
+ Package () {"power-gpios", Package() {^DEV, 0, 0, 0 }},
+ Package () {"irq-gpios", Package() {^DEV, 1, 0, 0 }},
+ }
+ })
+ ...
+
These GPIO numbers are controller relative and path "\\_SB.PCI0.GPI0"
specifies the path to the controller. In order to use these GPIOs in Linux
we need to translate them to the corresponding Linux GPIO descriptors.
@@ -300,11 +317,11 @@ a code like this:
struct gpio_desc *irq_desc, *power_desc;
- irq_desc = gpiod_get_index(dev, NULL, 1);
+ irq_desc = gpiod_get(dev, "irq");
if (IS_ERR(irq_desc))
/* handle error */
- power_desc = gpiod_get_index(dev, NULL, 0);
+ power_desc = gpiod_get(dev, "power");
if (IS_ERR(power_desc))
/* handle error */
@@ -313,6 +330,9 @@ a code like this:
There are also devm_* versions of these functions which release the
descriptors once the device is released.
+See Documentation/acpi/gpio-properties.txt for more information about the
+_DSD binding related to GPIOs.
+
MFD devices
~~~~~~~~~~~
The MFD devices register their children as platform devices. For the child
diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/acpi/gpio-properties.txt
index ae36fcf86dc7..f35dad11f0de 100644
--- a/Documentation/acpi/gpio-properties.txt
+++ b/Documentation/acpi/gpio-properties.txt
@@ -1,9 +1,9 @@
_DSD Device Properties Related to GPIO
--------------------------------------
-With the release of ACPI 5.1 and the _DSD configuration objecte names
-can finally be given to GPIOs (and other things as well) returned by
-_CRS. Previously, we were only able to use an integer index to find
+With the release of ACPI 5.1, the _DSD configuration object finally
+allows names to be given to GPIOs (and other things as well) returned
+by _CRS. Previously, we were only able to use an integer index to find
the corresponding GPIO, which is pretty error prone (it depends on
the _CRS output ordering, for example).
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index 8edb9007844e..dea011c8d7c7 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -10,8 +10,6 @@ IXP4xx
- Intel IXP4xx Network processor.
Makefile
- Build sourcefiles as part of the Documentation-build for arm
-msm/
- - MSM specific documentation
Netwinder
- Netwinder specific documentation
Porting
diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting
index 371814a36719..83c1df2fc758 100644
--- a/Documentation/arm/Booting
+++ b/Documentation/arm/Booting
@@ -58,13 +58,18 @@ serial format options as described in
--------------------------
Existing boot loaders: OPTIONAL
-New boot loaders: MANDATORY
+New boot loaders: MANDATORY except for DT-only platforms
The boot loader should detect the machine type its running on by some
method. Whether this is a hard coded value or some algorithm that
looks at the connected hardware is beyond the scope of this document.
The boot loader must ultimately be able to provide a MACH_TYPE_xxx
-value to the kernel. (see linux/arch/arm/tools/mach-types).
+value to the kernel. (see linux/arch/arm/tools/mach-types). This
+should be passed to the kernel in register r1.
+
+For DT-only platforms, the machine type will be determined by device
+tree. set the machine type to all ones (~0). This is not strictly
+necessary, but assures that it will not match any existing types.
4. Setup boot data
------------------
diff --git a/Documentation/arm/Makefile b/Documentation/arm/Makefile
deleted file mode 100644
index 732c77050cff..000000000000
--- a/Documentation/arm/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := SH-Mobile
diff --git a/Documentation/arm/Marvell/README b/Documentation/arm/Marvell/README
index 17453794fca5..18a775d10172 100644
--- a/Documentation/arm/Marvell/README
+++ b/Documentation/arm/Marvell/README
@@ -96,6 +96,11 @@ EBU Armada family
88F6820
88F6828
+ Armada 390/398 Flavors:
+ 88F6920
+ 88F6928
+ Product infos: http://www.marvell.com/embedded-processors/armada-39x/
+
Armada XP Flavors:
MV78230
MV78260
diff --git a/Documentation/arm/README b/Documentation/arm/README
index aea34095cdcf..9d1e5b2c92e6 100644
--- a/Documentation/arm/README
+++ b/Documentation/arm/README
@@ -185,13 +185,20 @@ Kernel entry (head.S)
board devices are used, or the device is setup, and provides that
machine specific "personality."
- This fine-grained machine specific selection is controlled by the machine
- type ID, which acts both as a run-time and a compile-time code selection
- method.
+ For platforms that support device tree (DT), the machine selection is
+ controlled at runtime by passing the device tree blob to the kernel. At
+ compile-time, support for the machine type must be selected. This allows for
+ a single multiplatform kernel build to be used for several machine types.
- You can register a new machine via the web site at:
+ For platforms that do not use device tree, this machine selection is
+ controlled by the machine type ID, which acts both as a run-time and a
+ compile-time code selection method. You can register a new machine via the
+ web site at:
<http://www.arm.linux.org.uk/developer/machines/>
+ Note: Please do not register a machine type for DT-only platforms. If your
+ platform is DT-only, you do not need a registered machine type.
+
---
Russell King (15/03/2004)
diff --git a/Documentation/arm/SH-Mobile/Makefile b/Documentation/arm/SH-Mobile/Makefile
deleted file mode 100644
index bca8a7ef6bbe..000000000000
--- a/Documentation/arm/SH-Mobile/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# List of programs to build
-hostprogs-y := vrl4
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_vrl4.o += -I$(objtree)/usr/include -I$(srctree)/tools/include
diff --git a/Documentation/arm/SH-Mobile/vrl4.c b/Documentation/arm/SH-Mobile/vrl4.c
deleted file mode 100644
index f4cd8ad4e720..000000000000
--- a/Documentation/arm/SH-Mobile/vrl4.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * vrl4 format generator
- *
- * Copyright (C) 2010 Simon Horman
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-/*
- * usage: vrl4 < zImage > out
- * dd if=out of=/dev/sdx bs=512 seek=1 # Write the image to sector 1
- *
- * Reads a zImage from stdin and writes a vrl4 image to stdout.
- * In practice this means writing a padded vrl4 header to stdout followed
- * by the zImage.
- *
- * The padding places the zImage at ALIGN bytes into the output.
- * The vrl4 uses ALIGN + START_BASE as the start_address.
- * This is where the mask ROM will jump to after verifying the header.
- *
- * The header sets copy_size to min(sizeof(zImage), MAX_BOOT_PROG_LEN) + ALIGN.
- * That is, the mask ROM will load the padded header (ALIGN bytes)
- * And then MAX_BOOT_PROG_LEN bytes of the image, or the entire image,
- * whichever is smaller.
- *
- * The zImage is not modified in any way.
- */
-
-#define _BSD_SOURCE
-#include <endian.h>
-#include <unistd.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <errno.h>
-#include <tools/endian.h>
-
-struct hdr {
- uint32_t magic1;
- uint32_t reserved1;
- uint32_t magic2;
- uint32_t reserved2;
- uint16_t copy_size;
- uint16_t boot_options;
- uint32_t reserved3;
- uint32_t start_address;
- uint32_t reserved4;
- uint32_t reserved5;
- char reserved6[308];
-};
-
-#define DECLARE_HDR(h) \
- struct hdr (h) = { \
- .magic1 = htole32(0xea000000), \
- .reserved1 = htole32(0x56), \
- .magic2 = htole32(0xe59ff008), \
- .reserved3 = htole16(0x1) }
-
-/* Align to 512 bytes, the MMCIF sector size */
-#define ALIGN_BITS 9
-#define ALIGN (1 << ALIGN_BITS)
-
-#define START_BASE 0xe55b0000
-
-/*
- * With an alignment of 512 the header uses the first sector.
- * There is a 128 sector (64kbyte) limit on the data loaded by the mask ROM.
- * So there are 127 sectors left for the boot programme. But in practice
- * Only a small portion of a zImage is needed, 16 sectors should be more
- * than enough.
- *
- * Note that this sets how much of the zImage is copied by the mask ROM.
- * The entire zImage is present after the header and is loaded
- * by the code in the boot program (which is the first portion of the zImage).
- */
-#define MAX_BOOT_PROG_LEN (16 * 512)
-
-#define ROUND_UP(x) ((x + ALIGN - 1) & ~(ALIGN - 1))
-
-static ssize_t do_read(int fd, void *buf, size_t count)
-{
- size_t offset = 0;
- ssize_t l;
-
- while (offset < count) {
- l = read(fd, buf + offset, count - offset);
- if (!l)
- break;
- if (l < 0) {
- if (errno == EAGAIN || errno == EWOULDBLOCK)
- continue;
- perror("read");
- return -1;
- }
- offset += l;
- }
-
- return offset;
-}
-
-static ssize_t do_write(int fd, const void *buf, size_t count)
-{
- size_t offset = 0;
- ssize_t l;
-
- while (offset < count) {
- l = write(fd, buf + offset, count - offset);
- if (l < 0) {
- if (errno == EAGAIN || errno == EWOULDBLOCK)
- continue;
- perror("write");
- return -1;
- }
- offset += l;
- }
-
- return offset;
-}
-
-static ssize_t write_zero(int fd, size_t len)
-{
- size_t i = len;
-
- while (i--) {
- const char x = 0;
- if (do_write(fd, &x, 1) < 0)
- return -1;
- }
-
- return len;
-}
-
-int main(void)
-{
- DECLARE_HDR(hdr);
- char boot_program[MAX_BOOT_PROG_LEN];
- size_t aligned_hdr_len, alligned_prog_len;
- ssize_t prog_len;
-
- prog_len = do_read(0, boot_program, sizeof(boot_program));
- if (prog_len <= 0)
- return -1;
-
- aligned_hdr_len = ROUND_UP(sizeof(hdr));
- hdr.start_address = htole32(START_BASE + aligned_hdr_len);
- alligned_prog_len = ROUND_UP(prog_len);
- hdr.copy_size = htole16(aligned_hdr_len + alligned_prog_len);
-
- if (do_write(1, &hdr, sizeof(hdr)) < 0)
- return -1;
- if (write_zero(1, aligned_hdr_len - sizeof(hdr)) < 0)
- return -1;
-
- if (do_write(1, boot_program, prog_len) < 0)
- return 1;
-
- /* Write out the rest of the kernel */
- while (1) {
- prog_len = do_read(0, boot_program, sizeof(boot_program));
- if (prog_len < 0)
- return 1;
- if (prog_len == 0)
- break;
- if (do_write(1, boot_program, prog_len) < 0)
- return 1;
- }
-
- return 0;
-}
diff --git a/Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt b/Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt
deleted file mode 100644
index efff8ae2713d..000000000000
--- a/Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-ROM-able zImage boot from MMC
------------------------------
-
-An ROM-able zImage compiled with ZBOOT_ROM_MMCIF may be written to MMC and
-SuperH Mobile ARM will to boot directly from the MMCIF hardware block.
-
-This is achieved by the mask ROM loading the first portion of the image into
-MERAM and then jumping to it. This portion contains loader code which
-copies the entire image to SDRAM and jumps to it. From there the zImage
-boot code proceeds as normal, uncompressing the image into its final
-location and then jumping to it.
-
-This code has been tested on an AP4EB board using the developer 1A eMMC
-boot mode which is configured using the following jumper settings.
-The board used for testing required a patched mask ROM in order for
-this mode to function.
-
- 8 7 6 5 4 3 2 1
- x|x|x|x|x| |x|
-S4 -+-+-+-+-+-+-+-
- | | | | |x| |x on
-
-The zImage must be written to the MMC card at sector 1 (512 bytes) in
-vrl4 format. A utility vrl4 is supplied to accomplish this.
-
-e.g.
- vrl4 < zImage | dd of=/dev/sdX bs=512 seek=1
-
-A dual-voltage MMC 4.0 card was used for testing.
diff --git a/Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt b/Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt
deleted file mode 100644
index 441959846e1a..000000000000
--- a/Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-ROM-able zImage boot from eSD
------------------------------
-
-An ROM-able zImage compiled with ZBOOT_ROM_SDHI may be written to eSD and
-SuperH Mobile ARM will to boot directly from the SDHI hardware block.
-
-This is achieved by the mask ROM loading the first portion of the image into
-MERAM and then jumping to it. This portion contains loader code which
-copies the entire image to SDRAM and jumps to it. From there the zImage
-boot code proceeds as normal, uncompressing the image into its final
-location and then jumping to it.
-
-This code has been tested on an mackerel board using the developer 1A eSD
-boot mode which is configured using the following jumper settings.
-
- 8 7 6 5 4 3 2 1
- x|x|x|x| |x|x|
-S4 -+-+-+-+-+-+-+-
- | | | |x| | |x on
-
-The eSD card needs to be present in SDHI slot 1 (CN7).
-As such S1 and S33 also need to be configured as per
-the notes in arch/arm/mach-shmobile/board-mackerel.c.
-
-A partial zImage must be written to physical partition #1 (boot)
-of the eSD at sector 0 in vrl4 format. A utility vrl4 is supplied to
-accomplish this.
-
-e.g.
- vrl4 < zImage | dd of=/dev/sdX bs=512 count=17
-
-A full copy of _the same_ zImage should be written to physical partition #1
-(boot) of the eSD at sector 0. This should _not_ be in vrl4 format.
-
- vrl4 < zImage | dd of=/dev/sdX bs=512
-
-Note: The commands above assume that the physical partition has been
-switched. No such facility currently exists in the Linux Kernel.
-
-Physical partitions are described in the eSD specification. At the time of
-writing they are not the same as partitions that are typically configured
-using fdisk and visible through /proc/partitions
diff --git a/Documentation/arm/msm/gpiomux.txt b/Documentation/arm/msm/gpiomux.txt
deleted file mode 100644
index 67a81620adf6..000000000000
--- a/Documentation/arm/msm/gpiomux.txt
+++ /dev/null
@@ -1,176 +0,0 @@
-This document provides an overview of the msm_gpiomux interface, which
-is used to provide gpio pin multiplexing and configuration on mach-msm
-targets.
-
-History
-=======
-
-The first-generation API for gpio configuration & multiplexing on msm
-is the function gpio_tlmm_config(). This function has a few notable
-shortcomings, which led to its deprecation and replacement by gpiomux:
-
-The 'disable' parameter: Setting the second parameter to
-gpio_tlmm_config to GPIO_CFG_DISABLE tells the peripheral
-processor in charge of the subsystem to perform a look-up into a
-low-power table and apply the low-power/sleep setting for the pin.
-As the msm family evolved this became problematic. Not all pins
-have sleep settings, not all peripheral processors will accept requests
-to apply said sleep settings, and not all msm targets have their gpio
-subsystems managed by a peripheral processor. In order to get consistent
-behavior on all targets, drivers are forced to ignore this parameter,
-rendering it useless.
-
-The 'direction' flag: for all mux-settings other than raw-gpio (0),
-the output-enable bit of a gpio is hard-wired to a known
-input (usually VDD or ground). For those settings, the direction flag
-is meaningless at best, and deceptive at worst. In addition, using the
-direction flag to change output-enable (OE) directly can cause trouble in
-gpiolib, which has no visibility into gpio direction changes made
-in this way. Direction control in gpio mode should be made through gpiolib.
-
-Key Features of gpiomux
-=======================
-
-- A consistent interface across all generations of msm. Drivers can expect
-the same results on every target.
-- gpiomux plays nicely with gpiolib. Functions that should belong to gpiolib
-are left to gpiolib and not duplicated here. gpiomux is written with the
-intent that gpio_chips will call gpiomux reference-counting methods
-from their request() and free() hooks, providing full integration.
-- Tabular configuration. Instead of having to call gpio_tlmm_config
-hundreds of times, gpio configuration is placed in a single table.
-- Per-gpio sleep. Each gpio is individually reference counted, allowing only
-those lines which are in use to be put in high-power states.
-- 0 means 'do nothing': all flags are designed so that the default memset-zero
-equates to a sensible default of 'no configuration', preventing users
-from having to provide hundreds of 'no-op' configs for unused or
-unwanted lines.
-
-Usage
-=====
-
-To use gpiomux, provide configuration information for relevant gpio lines
-in the msm_gpiomux_configs table. Since a 0 equates to "unconfigured",
-only those lines to be managed by gpiomux need to be specified. Here
-is a completely fictional example:
-
-struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS] = {
- [12] = {
- .active = GPIOMUX_VALID | GPIOMUX_DRV_8MA | GPIOMUX_FUNC_1,
- .suspended = GPIOMUX_VALID | GPIOMUX_PULL_DOWN,
- },
- [34] = {
- .suspended = GPIOMUX_VALID | GPIOMUX_PULL_DOWN,
- },
-};
-
-To indicate that a gpio is in use, call msm_gpiomux_get() to increase
-its reference count. To decrease the reference count, call msm_gpiomux_put().
-
-The effect of this configuration is as follows:
-
-When the system boots, gpios 12 and 34 will be initialized with their
-'suspended' configurations. All other gpios, which were left unconfigured,
-will not be touched.
-
-When msm_gpiomux_get() is called on gpio 12 to raise its reference count
-above 0, its active configuration will be applied. Since no other gpio
-line has a valid active configuration, msm_gpiomux_get() will have no
-effect on any other line.
-
-When msm_gpiomux_put() is called on gpio 12 or 34 to drop their reference
-count to 0, their suspended configurations will be applied.
-Since no other gpio line has a valid suspended configuration, no other
-gpio line will be effected by msm_gpiomux_put(). Since gpio 34 has no valid
-active configuration, this is effectively a no-op for gpio 34 as well,
-with one small caveat, see the section "About Output-Enable Settings".
-
-All of the GPIOMUX_VALID flags may seem like unnecessary overhead, but
-they address some important issues. As unused entries (all those
-except 12 and 34) are zero-filled, gpiomux needs a way to distinguish
-the used fields from the unused. In addition, the all-zero pattern
-is a valid configuration! Therefore, gpiomux defines an additional bit
-which is used to indicate when a field is used. This has the pleasant
-side-effect of allowing calls to msm_gpiomux_write to use '0' to indicate
-that a value should not be changed:
-
- msm_gpiomux_write(0, GPIOMUX_VALID, 0);
-
-replaces the active configuration of gpio 0 with an all-zero configuration,
-but leaves the suspended configuration as it was.
-
-Static Configurations
-=====================
-
-To install a static configuration, which is applied at boot and does
-not change after that, install a configuration with a suspended component
-but no active component, as in the previous example:
-
- [34] = {
- .suspended = GPIOMUX_VALID | GPIOMUX_PULL_DOWN,
- },
-
-The suspended setting is applied during boot, and the lack of any valid
-active setting prevents any other setting from being applied at runtime.
-If other subsystems attempting to access the line is a concern, one could
-*really* anchor the configuration down by calling msm_gpiomux_get on the
-line at initialization to move the line into active mode. With the line
-held, it will never be re-suspended, and with no valid active configuration,
-no new configurations will be applied.
-
-But then, if having other subsystems grabbing for the line is truly a concern,
-it should be reserved with gpio_request instead, which carries an implicit
-msm_gpiomux_get.
-
-gpiomux and gpiolib
-===================
-
-It is expected that msm gpio_chips will call msm_gpiomux_get() and
-msm_gpiomux_put() from their request and free hooks, like this fictional
-example:
-
-static int request(struct gpio_chip *chip, unsigned offset)
-{
- return msm_gpiomux_get(chip->base + offset);
-}
-
-static void free(struct gpio_chip *chip, unsigned offset)
-{
- msm_gpiomux_put(chip->base + offset);
-}
-
- ...somewhere in a gpio_chip declaration...
- .request = request,
- .free = free,
-
-This provides important functionality:
-- It guarantees that a gpio line will have its 'active' config applied
- when the line is requested, and will not be suspended while the line
- remains requested; and
-- It guarantees that gpio-direction settings from gpiolib behave sensibly.
- See "About Output-Enable Settings."
-
-This mechanism allows for "auto-request" of gpiomux lines via gpiolib
-when it is suitable. Drivers wishing more exact control are, of course,
-free to also use msm_gpiomux_set and msm_gpiomux_get.
-
-About Output-Enable Settings
-============================
-
-Some msm targets do not have the ability to query the current gpio
-configuration setting. This means that changes made to the output-enable
-(OE) bit by gpiolib cannot be consistently detected and preserved by gpiomux.
-Therefore, when gpiomux applies a configuration setting, any direction
-settings which may have been applied by gpiolib are lost and the default
-input settings are re-applied.
-
-For this reason, drivers should not assume that gpio direction settings
-continue to hold if they free and then re-request a gpio. This seems like
-common sense - after all, anybody could have obtained the line in the
-meantime - but it needs saying.
-
-This also means that calls to msm_gpiomux_write will reset the OE bit,
-which means that if the gpio line is held by a client of gpiolib and
-msm_gpiomux_write is called, the direction setting has been lost and
-gpiolib's internal state has been broken.
-Release gpio lines before reconfiguring them.
diff --git a/Documentation/arm64/acpi_object_usage.txt b/Documentation/arm64/acpi_object_usage.txt
new file mode 100644
index 000000000000..a6e1a1805e51
--- /dev/null
+++ b/Documentation/arm64/acpi_object_usage.txt
@@ -0,0 +1,593 @@
+ACPI Tables
+-----------
+The expectations of individual ACPI tables are discussed in the list that
+follows.
+
+If a section number is used, it refers to a section number in the ACPI
+specification where the object is defined. If "Signature Reserved" is used,
+the table signature (the first four bytes of the table) is the only portion
+of the table recognized by the specification, and the actual table is defined
+outside of the UEFI Forum (see Section 5.2.6 of the specification).
+
+For ACPI on arm64, tables also fall into the following categories:
+
+ -- Required: DSDT, FADT, GTDT, MADT, MCFG, RSDP, SPCR, XSDT
+
+ -- Recommended: BERT, EINJ, ERST, HEST, SSDT
+
+ -- Optional: BGRT, CPEP, CSRT, DRTM, ECDT, FACS, FPDT, MCHI, MPST,
+ MSCT, RASF, SBST, SLIT, SPMI, SRAT, TCPA, TPM2, UEFI
+
+ -- Not supported: BOOT, DBG2, DBGP, DMAR, ETDT, HPET, IBFT, IVRS,
+ LPIT, MSDM, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
+
+
+Table Usage for ARMv8 Linux
+----- ----------------------------------------------------------------
+BERT Section 18.3 (signature == "BERT")
+ == Boot Error Record Table ==
+ Must be supplied if RAS support is provided by the platform. It
+ is recommended this table be supplied.
+
+BOOT Signature Reserved (signature == "BOOT")
+ == simple BOOT flag table ==
+ Microsoft only table, will not be supported.
+
+BGRT Section 5.2.22 (signature == "BGRT")
+ == Boot Graphics Resource Table ==
+ Optional, not currently supported, with no real use-case for an
+ ARM server.
+
+CPEP Section 5.2.18 (signature == "CPEP")
+ == Corrected Platform Error Polling table ==
+ Optional, not currently supported, and not recommended until such
+ time as ARM-compatible hardware is available, and the specification
+ suitably modified.
+
+CSRT Signature Reserved (signature == "CSRT")
+ == Core System Resources Table ==
+ Optional, not currently supported.
+
+DBG2 Signature Reserved (signature == "DBG2")
+ == DeBuG port table 2 ==
+ Microsoft only table, will not be supported.
+
+DBGP Signature Reserved (signature == "DBGP")
+ == DeBuG Port table ==
+ Microsoft only table, will not be supported.
+
+DSDT Section 5.2.11.1 (signature == "DSDT")
+ == Differentiated System Description Table ==
+ A DSDT is required; see also SSDT.
+
+ ACPI tables contain only one DSDT but can contain one or more SSDTs,
+ which are optional. Each SSDT can only add to the ACPI namespace,
+ but cannot modify or replace anything in the DSDT.
+
+DMAR Signature Reserved (signature == "DMAR")
+ == DMA Remapping table ==
+ x86 only table, will not be supported.
+
+DRTM Signature Reserved (signature == "DRTM")
+ == Dynamic Root of Trust for Measurement table ==
+ Optional, not currently supported.
+
+ECDT Section 5.2.16 (signature == "ECDT")
+ == Embedded Controller Description Table ==
+ Optional, not currently supported, but could be used on ARM if and
+ only if one uses the GPE_BIT field to represent an IRQ number, since
+ there are no GPE blocks defined in hardware reduced mode. This would
+ need to be modified in the ACPI specification.
+
+EINJ Section 18.6 (signature == "EINJ")
+ == Error Injection table ==
+ This table is very useful for testing platform response to error
+ conditions; it allows one to inject an error into the system as
+ if it had actually occurred. However, this table should not be
+ shipped with a production system; it should be dynamically loaded
+ and executed with the ACPICA tools only during testing.
+
+ERST Section 18.5 (signature == "ERST")
+ == Error Record Serialization Table ==
+ On a platform supports RAS, this table must be supplied if it is not
+ UEFI-based; if it is UEFI-based, this table may be supplied. When this
+ table is not present, UEFI run time service will be utilized to save
+ and retrieve hardware error information to and from a persistent store.
+
+ETDT Signature Reserved (signature == "ETDT")
+ == Event Timer Description Table ==
+ Obsolete table, will not be supported.
+
+FACS Section 5.2.10 (signature == "FACS")
+ == Firmware ACPI Control Structure ==
+ It is unlikely that this table will be terribly useful. If it is
+ provided, the Global Lock will NOT be used since it is not part of
+ the hardware reduced profile, and only 64-bit address fields will
+ be considered valid.
+
+FADT Section 5.2.9 (signature == "FACP")
+ == Fixed ACPI Description Table ==
+ Required for arm64.
+
+ The HW_REDUCED_ACPI flag must be set. All of the fields that are
+ to be ignored when HW_REDUCED_ACPI is set are expected to be set to
+ zero.
+
+ If an FACS table is provided, the X_FIRMWARE_CTRL field is to be
+ used, not FIRMWARE_CTRL.
+
+ If PSCI is used (as is recommended), make sure that ARM_BOOT_ARCH is
+ filled in properly -- that the PSCI_COMPLIANT flag is set and that
+ PSCI_USE_HVC is set or unset as needed (see table 5-37).
+
+ For the DSDT that is also required, the X_DSDT field is to be used,
+ not the DSDT field.
+
+FPDT Section 5.2.23 (signature == "FPDT")
+ == Firmware Performance Data Table ==
+ Optional, not currently supported.
+
+GTDT Section 5.2.24 (signature == "GTDT")
+ == Generic Timer Description Table ==
+ Required for arm64.
+
+HEST Section 18.3.2 (signature == "HEST")
+ == Hardware Error Source Table ==
+ Until further error source types are defined, use only types 6 (AER
+ Root Port), 7 (AER Endpoint), 8 (AER Bridge), or 9 (Generic Hardware
+ Error Source). Firmware first error handling is possible if and only
+ if Trusted Firmware is being used on arm64.
+
+ Must be supplied if RAS support is provided by the platform. It
+ is recommended this table be supplied.
+
+HPET Signature Reserved (signature == "HPET")
+ == High Precision Event timer Table ==
+ x86 only table, will not be supported.
+
+IBFT Signature Reserved (signature == "IBFT")
+ == iSCSI Boot Firmware Table ==
+ Microsoft defined table, support TBD.
+
+IVRS Signature Reserved (signature == "IVRS")
+ == I/O Virtualization Reporting Structure ==
+ x86_64 (AMD) only table, will not be supported.
+
+LPIT Signature Reserved (signature == "LPIT")
+ == Low Power Idle Table ==
+ x86 only table as of ACPI 5.1; future versions have been adapted for
+ use with ARM and will be recommended in order to support ACPI power
+ management.
+
+MADT Section 5.2.12 (signature == "APIC")
+ == Multiple APIC Description Table ==
+ Required for arm64. Only the GIC interrupt controller structures
+ should be used (types 0xA - 0xE).
+
+MCFG Signature Reserved (signature == "MCFG")
+ == Memory-mapped ConFiGuration space ==
+ If the platform supports PCI/PCIe, an MCFG table is required.
+
+MCHI Signature Reserved (signature == "MCHI")
+ == Management Controller Host Interface table ==
+ Optional, not currently supported.
+
+MPST Section 5.2.21 (signature == "MPST")
+ == Memory Power State Table ==
+ Optional, not currently supported.
+
+MSDM Signature Reserved (signature == "MSDM")
+ == Microsoft Data Management table ==
+ Microsoft only table, will not be supported.
+
+MSCT Section 5.2.19 (signature == "MSCT")
+ == Maximum System Characteristic Table ==
+ Optional, not currently supported.
+
+RASF Section 5.2.20 (signature == "RASF")
+ == RAS Feature table ==
+ Optional, not currently supported.
+
+RSDP Section 5.2.5 (signature == "RSD PTR")
+ == Root System Description PoinTeR ==
+ Required for arm64.
+
+RSDT Section 5.2.7 (signature == "RSDT")
+ == Root System Description Table ==
+ Since this table can only provide 32-bit addresses, it is deprecated
+ on arm64, and will not be used.
+
+SBST Section 5.2.14 (signature == "SBST")
+ == Smart Battery Subsystem Table ==
+ Optional, not currently supported.
+
+SLIC Signature Reserved (signature == "SLIC")
+ == Software LIcensing table ==
+ Microsoft only table, will not be supported.
+
+SLIT Section 5.2.17 (signature == "SLIT")
+ == System Locality distance Information Table ==
+ Optional in general, but required for NUMA systems.
+
+SPCR Signature Reserved (signature == "SPCR")
+ == Serial Port Console Redirection table ==
+ Required for arm64.
+
+SPMI Signature Reserved (signature == "SPMI")
+ == Server Platform Management Interface table ==
+ Optional, not currently supported.
+
+SRAT Section 5.2.16 (signature == "SRAT")
+ == System Resource Affinity Table ==
+ Optional, but if used, only the GICC Affinity structures are read.
+ To support NUMA, this table is required.
+
+SSDT Section 5.2.11.2 (signature == "SSDT")
+ == Secondary System Description Table ==
+ These tables are a continuation of the DSDT; these are recommended
+ for use with devices that can be added to a running system, but can
+ also serve the purpose of dividing up device descriptions into more
+ manageable pieces.
+
+ An SSDT can only ADD to the ACPI namespace. It cannot modify or
+ replace existing device descriptions already in the namespace.
+
+ These tables are optional, however. ACPI tables should contain only
+ one DSDT but can contain many SSDTs.
+
+TCPA Signature Reserved (signature == "TCPA")
+ == Trusted Computing Platform Alliance table ==
+ Optional, not currently supported, and may need changes to fully
+ interoperate with arm64.
+
+TPM2 Signature Reserved (signature == "TPM2")
+ == Trusted Platform Module 2 table ==
+ Optional, not currently supported, and may need changes to fully
+ interoperate with arm64.
+
+UEFI Signature Reserved (signature == "UEFI")
+ == UEFI ACPI data table ==
+ Optional, not currently supported. No known use case for arm64,
+ at present.
+
+WAET Signature Reserved (signature == "WAET")
+ == Windows ACPI Emulated devices Table ==
+ Microsoft only table, will not be supported.
+
+WDAT Signature Reserved (signature == "WDAT")
+ == Watch Dog Action Table ==
+ Microsoft only table, will not be supported.
+
+WDRT Signature Reserved (signature == "WDRT")
+ == Watch Dog Resource Table ==
+ Microsoft only table, will not be supported.
+
+WPBT Signature Reserved (signature == "WPBT")
+ == Windows Platform Binary Table ==
+ Microsoft only table, will not be supported.
+
+XSDT Section 5.2.8 (signature == "XSDT")
+ == eXtended System Description Table ==
+ Required for arm64.
+
+
+ACPI Objects
+------------
+The expectations on individual ACPI objects are discussed in the list that
+follows:
+
+Name Section Usage for ARMv8 Linux
+---- ------------ -------------------------------------------------
+_ADR 6.1.1 Use as needed.
+
+_BBN 6.5.5 Use as needed; PCI-specific.
+
+_BDN 6.5.3 Optional; not likely to be used on arm64.
+
+_CCA 6.2.17 This method should be defined for all bus masters
+ on arm64. While cache coherency is assumed, making
+ it explicit ensures the kernel will set up DMA as
+ it should.
+
+_CDM 6.2.1 Optional, to be used only for processor devices.
+
+_CID 6.1.2 Use as needed.
+
+_CLS 6.1.3 Use as needed.
+
+_CRS 6.2.2 Required on arm64.
+
+_DCK 6.5.2 Optional; not likely to be used on arm64.
+
+_DDN 6.1.4 This field can be used for a device name. However,
+ it is meant for DOS device names (e.g., COM1), so be
+ careful of its use across OSes.
+
+_DEP 6.5.8 Use as needed.
+
+_DIS 6.2.3 Optional, for power management use.
+
+_DLM 5.7.5 Optional.
+
+_DMA 6.2.4 Optional.
+
+_DSD 6.2.5 To be used with caution. If this object is used, try
+ to use it within the constraints already defined by the
+ Device Properties UUID. Only in rare circumstances
+ should it be necessary to create a new _DSD UUID.
+
+ In either case, submit the _DSD definition along with
+ any driver patches for discussion, especially when
+ device properties are used. A driver will not be
+ considered complete without a corresponding _DSD
+ description. Once approved by kernel maintainers,
+ the UUID or device properties must then be registered
+ with the UEFI Forum; this may cause some iteration as
+ more than one OS will be registering entries.
+
+_DSM Do not use this method. It is not standardized, the
+ return values are not well documented, and it is
+ currently a frequent source of error.
+
+_DSW 7.2.1 Use as needed; power management specific.
+
+_EDL 6.3.1 Optional.
+
+_EJD 6.3.2 Optional.
+
+_EJx 6.3.3 Optional.
+
+_FIX 6.2.7 x86 specific, not used on arm64.
+
+\_GL 5.7.1 This object is not to be used in hardware reduced
+ mode, and therefore should not be used on arm64.
+
+_GLK 6.5.7 This object requires a global lock be defined; there
+ is no global lock on arm64 since it runs in hardware
+ reduced mode. Hence, do not use this object on arm64.
+
+\_GPE 5.3.1 This namespace is for x86 use only. Do not use it
+ on arm64.
+
+_GSB 6.2.7 Optional.
+
+_HID 6.1.5 Use as needed. This is the primary object to use in
+ device probing, though _CID and _CLS may also be used.
+
+_HPP 6.2.8 Optional, PCI specific.
+
+_HPX 6.2.9 Optional, PCI specific.
+
+_HRV 6.1.6 Optional, use as needed to clarify device behavior; in
+ some cases, this may be easier to use than _DSD.
+
+_INI 6.5.1 Not required, but can be useful in setting up devices
+ when UEFI leaves them in a state that may not be what
+ the driver expects before it starts probing.
+
+_IRC 7.2.15 Use as needed; power management specific.
+
+_LCK 6.3.4 Optional.
+
+_MAT 6.2.10 Optional; see also the MADT.
+
+_MLS 6.1.7 Optional, but highly recommended for use in
+ internationalization.
+
+_OFF 7.1.2 It is recommended to define this method for any device
+ that can be turned on or off.
+
+_ON 7.1.3 It is recommended to define this method for any device
+ that can be turned on or off.
+
+\_OS 5.7.3 This method will return "Linux" by default (this is
+ the value of the macro ACPI_OS_NAME on Linux). The
+ command line parameter acpi_os=<string> can be used
+ to set it to some other value.
+
+_OSC 6.2.11 This method can be a global method in ACPI (i.e.,
+ \_SB._OSC), or it may be associated with a specific
+ device (e.g., \_SB.DEV0._OSC), or both. When used
+ as a global method, only capabilities published in
+ the ACPI specification are allowed. When used as
+ a device-specific method, the process described for
+ using _DSD MUST be used to create an _OSC definition;
+ out-of-process use of _OSC is not allowed. That is,
+ submit the device-specific _OSC usage description as
+ part of the kernel driver submission, get it approved
+ by the kernel community, then register it with the
+ UEFI Forum.
+
+\_OSI 5.7.2 Deprecated on ARM64. Any invocation of this method
+ will print a warning on the console and return false.
+ That is, as far as ACPI firmware is concerned, _OSI
+ cannot be used to determine what sort of system is
+ being used or what functionality is provided. The
+ _OSC method is to be used instead.
+
+_OST 6.3.5 Optional.
+
+_PDC 8.4.1 Deprecated, do not use on arm64.
+
+\_PIC 5.8.1 The method should not be used. On arm64, the only
+ interrupt model available is GIC.
+
+_PLD 6.1.8 Optional.
+
+\_PR 5.3.1 This namespace is for x86 use only on legacy systems.
+ Do not use it on arm64.
+
+_PRS 6.2.12 Optional.
+
+_PRT 6.2.13 Required as part of the definition of all PCI root
+ devices.
+
+_PRW 7.2.13 Use as needed; power management specific.
+
+_PRx 7.2.8-11 Use as needed; power management specific. If _PR0 is
+ defined, _PR3 must also be defined.
+
+_PSC 7.2.6 Use as needed; power management specific.
+
+_PSE 7.2.7 Use as needed; power management specific.
+
+_PSW 7.2.14 Use as needed; power management specific.
+
+_PSx 7.2.2-5 Use as needed; power management specific. If _PS0 is
+ defined, _PS3 must also be defined. If clocks or
+ regulators need adjusting to be consistent with power
+ usage, change them in these methods.
+
+\_PTS 7.3.1 Use as needed; power management specific.
+
+_PXM 6.2.14 Optional.
+
+_REG 6.5.4 Use as needed.
+
+\_REV 5.7.4 Always returns the latest version of ACPI supported.
+
+_RMV 6.3.6 Optional.
+
+\_SB 5.3.1 Required on arm64; all devices must be defined in this
+ namespace.
+
+_SEG 6.5.6 Use as needed; PCI-specific.
+
+\_SI 5.3.1, Optional.
+ 9.1
+
+_SLI 6.2.15 Optional; recommended when SLIT table is in use.
+
+_STA 6.3.7, It is recommended to define this method for any device
+ 7.1.4 that can be turned on or off.
+
+_SRS 6.2.16 Optional; see also _PRS.
+
+_STR 6.1.10 Recommended for conveying device names to end users;
+ this is preferred over using _DDN.
+
+_SUB 6.1.9 Use as needed; _HID or _CID are preferred.
+
+_SUN 6.1.11 Optional.
+
+\_Sx 7.3.2 Use as needed; power management specific.
+
+_SxD 7.2.16-19 Use as needed; power management specific.
+
+_SxW 7.2.20-24 Use as needed; power management specific.
+
+_SWS 7.3.3 Use as needed; power management specific; this may
+ require specification changes for use on arm64.
+
+\_TTS 7.3.4 Use as needed; power management specific.
+
+\_TZ 5.3.1 Optional.
+
+_UID 6.1.12 Recommended for distinguishing devices of the same
+ class; define it if at all possible.
+
+\_WAK 7.3.5 Use as needed; power management specific.
+
+
+ACPI Event Model
+----------------
+Do not use GPE block devices; these are not supported in the hardware reduced
+profile used by arm64. Since there are no GPE blocks defined for use on ARM
+platforms, GPIO-signaled interrupts should be used for creating system events.
+
+
+ACPI Processor Control
+----------------------
+Section 8 of the ACPI specification is currently undergoing change that
+should be completed in the 6.0 version of the specification. Processor
+performance control will be handled differently for arm64 at that point
+in time. Processor aggregator devices (section 8.5) will not be used,
+for example, but another similar mechanism instead.
+
+While UEFI constrains what we can say until the release of 6.0, it is
+recommended that CPPC (8.4.5) be used as the primary model. This will
+still be useful into the future. C-states and P-states will still be
+provided, but most of the current design work appears to favor CPPC.
+
+Further, it is essential that the ARMv8 SoC provide a fully functional
+implementation of PSCI; this will be the only mechanism supported by ACPI
+to control CPU power state (including secondary CPU booting).
+
+More details will be provided on the release of the ACPI 6.0 specification.
+
+
+ACPI System Address Map Interfaces
+----------------------------------
+In Section 15 of the ACPI specification, several methods are mentioned as
+possible mechanisms for conveying memory resource information to the kernel.
+For arm64, we will only support UEFI for booting with ACPI, hence the UEFI
+GetMemoryMap() boot service is the only mechanism that will be used.
+
+
+ACPI Platform Error Interfaces (APEI)
+-------------------------------------
+The APEI tables supported are described above.
+
+APEI requires the equivalent of an SCI and an NMI on ARMv8. The SCI is used
+to notify the OSPM of errors that have occurred but can be corrected and the
+system can continue correct operation, even if possibly degraded. The NMI is
+used to indicate fatal errors that cannot be corrected, and require immediate
+attention.
+
+Since there is no direct equivalent of the x86 SCI or NMI, arm64 handles
+these slightly differently. The SCI is handled as a normal GPIO-signaled
+interrupt; given that these are corrected (or correctable) errors being
+reported, this is sufficient. The NMI is emulated as the highest priority
+GPIO-signaled interrupt possible. This implies some caution must be used
+since there could be interrupts at higher privilege levels or even interrupts
+at the same priority as the emulated NMI. In Linux, this should not be the
+case but one should be aware it could happen.
+
+
+ACPI Objects Not Supported on ARM64
+-----------------------------------
+While this may change in the future, there are several classes of objects
+that can be defined, but are not currently of general interest to ARM servers.
+
+These are not supported:
+
+ -- Section 9.2: ambient light sensor devices
+
+ -- Section 9.3: battery devices
+
+ -- Section 9.4: lids (e.g., laptop lids)
+
+ -- Section 9.8.2: IDE controllers
+
+ -- Section 9.9: floppy controllers
+
+ -- Section 9.10: GPE block devices
+
+ -- Section 9.15: PC/AT RTC/CMOS devices
+
+ -- Section 9.16: user presence detection devices
+
+ -- Section 9.17: I/O APIC devices; all GICs must be enumerable via MADT
+
+ -- Section 9.18: time and alarm devices (see 9.15)
+
+
+ACPI Objects Not Yet Implemented
+--------------------------------
+While these objects have x86 equivalents, and they do make some sense in ARM
+servers, there is either no hardware available at present, or in some cases
+there may not yet be a non-ARM implementation. Hence, they are currently not
+implemented though that may change in the future.
+
+Not yet implemented are:
+
+ -- Section 10: power source and power meter devices
+
+ -- Section 11: thermal management
+
+ -- Section 12: embedded controllers interface
+
+ -- Section 13: SMBus interfaces
+
+ -- Section 17: NUMA support (prototypes have been submitted for
+ review)
diff --git a/Documentation/arm64/arm-acpi.txt b/Documentation/arm64/arm-acpi.txt
new file mode 100644
index 000000000000..570a4f8e1a01
--- /dev/null
+++ b/Documentation/arm64/arm-acpi.txt
@@ -0,0 +1,505 @@
+ACPI on ARMv8 Servers
+---------------------
+ACPI can be used for ARMv8 general purpose servers designed to follow
+the ARM SBSA (Server Base System Architecture) [0] and SBBR (Server
+Base Boot Requirements) [1] specifications. Please note that the SBBR
+can be retrieved simply by visiting [1], but the SBSA is currently only
+available to those with an ARM login due to ARM IP licensing concerns.
+
+The ARMv8 kernel implements the reduced hardware model of ACPI version
+5.1 or later. Links to the specification and all external documents
+it refers to are managed by the UEFI Forum. The specification is
+available at http://www.uefi.org/specifications and documents referenced
+by the specification can be found via http://www.uefi.org/acpi.
+
+If an ARMv8 system does not meet the requirements of the SBSA and SBBR,
+or cannot be described using the mechanisms defined in the required ACPI
+specifications, then ACPI may not be a good fit for the hardware.
+
+While the documents mentioned above set out the requirements for building
+industry-standard ARMv8 servers, they also apply to more than one operating
+system. The purpose of this document is to describe the interaction between
+ACPI and Linux only, on an ARMv8 system -- that is, what Linux expects of
+ACPI and what ACPI can expect of Linux.
+
+
+Why ACPI on ARM?
+----------------
+Before examining the details of the interface between ACPI and Linux, it is
+useful to understand why ACPI is being used. Several technologies already
+exist in Linux for describing non-enumerable hardware, after all. In this
+section we summarize a blog post [2] from Grant Likely that outlines the
+reasoning behind ACPI on ARMv8 servers. Actually, we snitch a good portion
+of the summary text almost directly, to be honest.
+
+The short form of the rationale for ACPI on ARM is:
+
+-- ACPI’s bytecode (AML) allows the platform to encode hardware behavior,
+ while DT explicitly does not support this. For hardware vendors, being
+ able to encode behavior is a key tool used in supporting operating
+ system releases on new hardware.
+
+-- ACPI’s OSPM defines a power management model that constrains what the
+ platform is allowed to do into a specific model, while still providing
+ flexibility in hardware design.
+
+-- In the enterprise server environment, ACPI has established bindings (such
+ as for RAS) which are currently used in production systems. DT does not.
+ Such bindings could be defined in DT at some point, but doing so means ARM
+ and x86 would end up using completely different code paths in both firmware
+ and the kernel.
+
+-- Choosing a single interface to describe the abstraction between a platform
+ and an OS is important. Hardware vendors would not be required to implement
+ both DT and ACPI if they want to support multiple operating systems. And,
+ agreeing on a single interface instead of being fragmented into per OS
+ interfaces makes for better interoperability overall.
+
+-- The new ACPI governance process works well and Linux is now at the same
+ table as hardware vendors and other OS vendors. In fact, there is no
+ longer any reason to feel that ACPI is only belongs to Windows or that
+ Linux is in any way secondary to Microsoft in this arena. The move of
+ ACPI governance into the UEFI forum has significantly opened up the
+ specification development process, and currently, a large portion of the
+ changes being made to ACPI is being driven by Linux.
+
+Key to the use of ACPI is the support model. For servers in general, the
+responsibility for hardware behaviour cannot solely be the domain of the
+kernel, but rather must be split between the platform and the kernel, in
+order to allow for orderly change over time. ACPI frees the OS from needing
+to understand all the minute details of the hardware so that the OS doesn’t
+need to be ported to each and every device individually. It allows the
+hardware vendors to take responsibility for power management behaviour without
+depending on an OS release cycle which is not under their control.
+
+ACPI is also important because hardware and OS vendors have already worked
+out the mechanisms for supporting a general purpose computing ecosystem. The
+infrastructure is in place, the bindings are in place, and the processes are
+in place. DT does exactly what Linux needs it to when working with vertically
+integrated devices, but there are no good processes for supporting what the
+server vendors need. Linux could potentially get there with DT, but doing so
+really just duplicates something that already works. ACPI already does what
+the hardware vendors need, Microsoft won’t collaborate on DT, and hardware
+vendors would still end up providing two completely separate firmware
+interfaces -- one for Linux and one for Windows.
+
+
+Kernel Compatibility
+--------------------
+One of the primary motivations for ACPI is standardization, and using that
+to provide backward compatibility for Linux kernels. In the server market,
+software and hardware are often used for long periods. ACPI allows the
+kernel and firmware to agree on a consistent abstraction that can be
+maintained over time, even as hardware or software change. As long as the
+abstraction is supported, systems can be updated without necessarily having
+to replace the kernel.
+
+When a Linux driver or subsystem is first implemented using ACPI, it by
+definition ends up requiring a specific version of the ACPI specification
+-- it's baseline. ACPI firmware must continue to work, even though it may
+not be optimal, with the earliest kernel version that first provides support
+for that baseline version of ACPI. There may be a need for additional drivers,
+but adding new functionality (e.g., CPU power management) should not break
+older kernel versions. Further, ACPI firmware must also work with the most
+recent version of the kernel.
+
+
+Relationship with Device Tree
+-----------------------------
+ACPI support in drivers and subsystems for ARMv8 should never be mutually
+exclusive with DT support at compile time.
+
+At boot time the kernel will only use one description method depending on
+parameters passed from the bootloader (including kernel bootargs).
+
+Regardless of whether DT or ACPI is used, the kernel must always be capable
+of booting with either scheme (in kernels with both schemes enabled at compile
+time).
+
+
+Booting using ACPI tables
+-------------------------
+The only defined method for passing ACPI tables to the kernel on ARMv8
+is via the UEFI system configuration table. Just so it is explicit, this
+means that ACPI is only supported on platforms that boot via UEFI.
+
+When an ARMv8 system boots, it can either have DT information, ACPI tables,
+or in some very unusual cases, both. If no command line parameters are used,
+the kernel will try to use DT for device enumeration; if there is no DT
+present, the kernel will try to use ACPI tables, but only if they are present.
+In neither is available, the kernel will not boot. If acpi=force is used
+on the command line, the kernel will attempt to use ACPI tables first, but
+fall back to DT if there are no ACPI tables present. The basic idea is that
+the kernel will not fail to boot unless it absolutely has no other choice.
+
+Processing of ACPI tables may be disabled by passing acpi=off on the kernel
+command line; this is the default behavior.
+
+In order for the kernel to load and use ACPI tables, the UEFI implementation
+MUST set the ACPI_20_TABLE_GUID to point to the RSDP table (the table with
+the ACPI signature "RSD PTR "). If this pointer is incorrect and acpi=force
+is used, the kernel will disable ACPI and try to use DT to boot instead; the
+kernel has, in effect, determined that ACPI tables are not present at that
+point.
+
+If the pointer to the RSDP table is correct, the table will be mapped into
+the kernel by the ACPI core, using the address provided by UEFI.
+
+The ACPI core will then locate and map in all other ACPI tables provided by
+using the addresses in the RSDP table to find the XSDT (eXtended System
+Description Table). The XSDT in turn provides the addresses to all other
+ACPI tables provided by the system firmware; the ACPI core will then traverse
+this table and map in the tables listed.
+
+The ACPI core will ignore any provided RSDT (Root System Description Table).
+RSDTs have been deprecated and are ignored on arm64 since they only allow
+for 32-bit addresses.
+
+Further, the ACPI core will only use the 64-bit address fields in the FADT
+(Fixed ACPI Description Table). Any 32-bit address fields in the FADT will
+be ignored on arm64.
+
+Hardware reduced mode (see Section 4.1 of the ACPI 5.1 specification) will
+be enforced by the ACPI core on arm64. Doing so allows the ACPI core to
+run less complex code since it no longer has to provide support for legacy
+hardware from other architectures. Any fields that are not to be used for
+hardware reduced mode must be set to zero.
+
+For the ACPI core to operate properly, and in turn provide the information
+the kernel needs to configure devices, it expects to find the following
+tables (all section numbers refer to the ACPI 5.1 specfication):
+
+ -- RSDP (Root System Description Pointer), section 5.2.5
+
+ -- XSDT (eXtended System Description Table), section 5.2.8
+
+ -- FADT (Fixed ACPI Description Table), section 5.2.9
+
+ -- DSDT (Differentiated System Description Table), section
+ 5.2.11.1
+
+ -- MADT (Multiple APIC Description Table), section 5.2.12
+
+ -- GTDT (Generic Timer Description Table), section 5.2.24
+
+ -- If PCI is supported, the MCFG (Memory mapped ConFiGuration
+ Table), section 5.2.6, specifically Table 5-31.
+
+If the above tables are not all present, the kernel may or may not be
+able to boot properly since it may not be able to configure all of the
+devices available.
+
+
+ACPI Detection
+--------------
+Drivers should determine their probe() type by checking for a null
+value for ACPI_HANDLE, or checking .of_node, or other information in
+the device structure. This is detailed further in the "Driver
+Recommendations" section.
+
+In non-driver code, if the presence of ACPI needs to be detected at
+runtime, then check the value of acpi_disabled. If CONFIG_ACPI is not
+set, acpi_disabled will always be 1.
+
+
+Device Enumeration
+------------------
+Device descriptions in ACPI should use standard recognized ACPI interfaces.
+These may contain less information than is typically provided via a Device
+Tree description for the same device. This is also one of the reasons that
+ACPI can be useful -- the driver takes into account that it may have less
+detailed information about the device and uses sensible defaults instead.
+If done properly in the driver, the hardware can change and improve over
+time without the driver having to change at all.
+
+Clocks provide an excellent example. In DT, clocks need to be specified
+and the drivers need to take them into account. In ACPI, the assumption
+is that UEFI will leave the device in a reasonable default state, including
+any clock settings. If for some reason the driver needs to change a clock
+value, this can be done in an ACPI method; all the driver needs to do is
+invoke the method and not concern itself with what the method needs to do
+to change the clock. Changing the hardware can then take place over time
+by changing what the ACPI method does, and not the driver.
+
+In DT, the parameters needed by the driver to set up clocks as in the example
+above are known as "bindings"; in ACPI, these are known as "Device Properties"
+and provided to a driver via the _DSD object.
+
+ACPI tables are described with a formal language called ASL, the ACPI
+Source Language (section 19 of the specification). This means that there
+are always multiple ways to describe the same thing -- including device
+properties. For example, device properties could use an ASL construct
+that looks like this: Name(KEY0, "value0"). An ACPI device driver would
+then retrieve the value of the property by evaluating the KEY0 object.
+However, using Name() this way has multiple problems: (1) ACPI limits
+names ("KEY0") to four characters unlike DT; (2) there is no industry
+wide registry that maintains a list of names, minimzing re-use; (3)
+there is also no registry for the definition of property values ("value0"),
+again making re-use difficult; and (4) how does one maintain backward
+compatibility as new hardware comes out? The _DSD method was created
+to solve precisely these sorts of problems; Linux drivers should ALWAYS
+use the _DSD method for device properties and nothing else.
+
+The _DSM object (ACPI Section 9.14.1) could also be used for conveying
+device properties to a driver. Linux drivers should only expect it to
+be used if _DSD cannot represent the data required, and there is no way
+to create a new UUID for the _DSD object. Note that there is even less
+regulation of the use of _DSM than there is of _DSD. Drivers that depend
+on the contents of _DSM objects will be more difficult to maintain over
+time because of this; as of this writing, the use of _DSM is the cause
+of quite a few firmware problems and is not recommended.
+
+Drivers should look for device properties in the _DSD object ONLY; the _DSD
+object is described in the ACPI specification section 6.2.5, but this only
+describes how to define the structure of an object returned via _DSD, and
+how specific data structures are defined by specific UUIDs. Linux should
+only use the _DSD Device Properties UUID [5]:
+
+ -- UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301
+
+ -- http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
+
+The UEFI Forum provides a mechanism for registering device properties [4]
+so that they may be used across all operating systems supporting ACPI.
+Device properties that have not been registered with the UEFI Forum should
+not be used.
+
+Before creating new device properties, check to be sure that they have not
+been defined before and either registered in the Linux kernel documentation
+as DT bindings, or the UEFI Forum as device properties. While we do not want
+to simply move all DT bindings into ACPI device properties, we can learn from
+what has been previously defined.
+
+If it is necessary to define a new device property, or if it makes sense to
+synthesize the definition of a binding so it can be used in any firmware,
+both DT bindings and ACPI device properties for device drivers have review
+processes. Use them both. When the driver itself is submitted for review
+to the Linux mailing lists, the device property definitions needed must be
+submitted at the same time. A driver that supports ACPI and uses device
+properties will not be considered complete without their definitions. Once
+the device property has been accepted by the Linux community, it must be
+registered with the UEFI Forum [4], which will review it again for consistency
+within the registry. This may require iteration. The UEFI Forum, though,
+will always be the canonical site for device property definitions.
+
+It may make sense to provide notice to the UEFI Forum that there is the
+intent to register a previously unused device property name as a means of
+reserving the name for later use. Other operating system vendors will
+also be submitting registration requests and this may help smooth the
+process.
+
+Once registration and review have been completed, the kernel provides an
+interface for looking up device properties in a manner independent of
+whether DT or ACPI is being used. This API should be used [6]; it can
+eliminate some duplication of code paths in driver probing functions and
+discourage divergence between DT bindings and ACPI device properties.
+
+
+Programmable Power Control Resources
+------------------------------------
+Programmable power control resources include such resources as voltage/current
+providers (regulators) and clock sources.
+
+With ACPI, the kernel clock and regulator framework is not expected to be used
+at all.
+
+The kernel assumes that power control of these resources is represented with
+Power Resource Objects (ACPI section 7.1). The ACPI core will then handle
+correctly enabling and disabling resources as they are needed. In order to
+get that to work, ACPI assumes each device has defined D-states and that these
+can be controlled through the optional ACPI methods _PS0, _PS1, _PS2, and _PS3;
+in ACPI, _PS0 is the method to invoke to turn a device full on, and _PS3 is for
+turning a device full off.
+
+There are two options for using those Power Resources. They can:
+
+ -- be managed in a _PSx method which gets called on entry to power
+ state Dx.
+
+ -- be declared separately as power resources with their own _ON and _OFF
+ methods. They are then tied back to D-states for a particular device
+ via _PRx which specifies which power resources a device needs to be on
+ while in Dx. Kernel then tracks number of devices using a power resource
+ and calls _ON/_OFF as needed.
+
+The kernel ACPI code will also assume that the _PSx methods follow the normal
+ACPI rules for such methods:
+
+ -- If either _PS0 or _PS3 is implemented, then the other method must also
+ be implemented.
+
+ -- If a device requires usage or setup of a power resource when on, the ASL
+ should organize that it is allocated/enabled using the _PS0 method.
+
+ -- Resources allocated or enabled in the _PS0 method should be disabled
+ or de-allocated in the _PS3 method.
+
+ -- Firmware will leave the resources in a reasonable state before handing
+ over control to the kernel.
+
+Such code in _PSx methods will of course be very platform specific. But,
+this allows the driver to abstract out the interface for operating the device
+and avoid having to read special non-standard values from ACPI tables. Further,
+abstracting the use of these resources allows the hardware to change over time
+without requiring updates to the driver.
+
+
+Clocks
+------
+ACPI makes the assumption that clocks are initialized by the firmware --
+UEFI, in this case -- to some working value before control is handed over
+to the kernel. This has implications for devices such as UARTs, or SoC-driven
+LCD displays, for example.
+
+When the kernel boots, the clocks are assumed to be set to reasonable
+working values. If for some reason the frequency needs to change -- e.g.,
+throttling for power management -- the device driver should expect that
+process to be abstracted out into some ACPI method that can be invoked
+(please see the ACPI specification for further recommendations on standard
+methods to be expected). The only exceptions to this are CPU clocks where
+CPPC provides a much richer interface than ACPI methods. If the clocks
+are not set, there is no direct way for Linux to control them.
+
+If an SoC vendor wants to provide fine-grained control of the system clocks,
+they could do so by providing ACPI methods that could be invoked by Linux
+drivers. However, this is NOT recommended and Linux drivers should NOT use
+such methods, even if they are provided. Such methods are not currently
+standardized in the ACPI specification, and using them could tie a kernel
+to a very specific SoC, or tie an SoC to a very specific version of the
+kernel, both of which we are trying to avoid.
+
+
+Driver Recommendations
+----------------------
+DO NOT remove any DT handling when adding ACPI support for a driver. The
+same device may be used on many different systems.
+
+DO try to structure the driver so that it is data-driven. That is, set up
+a struct containing internal per-device state based on defaults and whatever
+else must be discovered by the driver probe function. Then, have the rest
+of the driver operate off of the contents of that struct. Doing so should
+allow most divergence between ACPI and DT functionality to be kept local to
+the probe function instead of being scattered throughout the driver. For
+example:
+
+static int device_probe_dt(struct platform_device *pdev)
+{
+ /* DT specific functionality */
+ ...
+}
+
+static int device_probe_acpi(struct platform_device *pdev)
+{
+ /* ACPI specific functionality */
+ ...
+}
+
+static int device_probe(struct platform_device *pdev)
+{
+ ...
+ struct device_node node = pdev->dev.of_node;
+ ...
+
+ if (node)
+ ret = device_probe_dt(pdev);
+ else if (ACPI_HANDLE(&pdev->dev))
+ ret = device_probe_acpi(pdev);
+ else
+ /* other initialization */
+ ...
+ /* Continue with any generic probe operations */
+ ...
+}
+
+DO keep the MODULE_DEVICE_TABLE entries together in the driver to make it
+clear the different names the driver is probed for, both from DT and from
+ACPI:
+
+static struct of_device_id virtio_mmio_match[] = {
+ { .compatible = "virtio,mmio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, virtio_mmio_match);
+
+static const struct acpi_device_id virtio_mmio_acpi_match[] = {
+ { "LNRO0005", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
+
+
+ASWG
+----
+The ACPI specification changes regularly. During the year 2014, for instance,
+version 5.1 was released and version 6.0 substantially completed, with most of
+the changes being driven by ARM-specific requirements. Proposed changes are
+presented and discussed in the ASWG (ACPI Specification Working Group) which
+is a part of the UEFI Forum.
+
+Participation in this group is open to all UEFI members. Please see
+http://www.uefi.org/workinggroup for details on group membership.
+
+It is the intent of the ARMv8 ACPI kernel code to follow the ACPI specification
+as closely as possible, and to only implement functionality that complies with
+the released standards from UEFI ASWG. As a practical matter, there will be
+vendors that provide bad ACPI tables or violate the standards in some way.
+If this is because of errors, quirks and fixups may be necessary, but will
+be avoided if possible. If there are features missing from ACPI that preclude
+it from being used on a platform, ECRs (Engineering Change Requests) should be
+submitted to ASWG and go through the normal approval process; for those that
+are not UEFI members, many other members of the Linux community are and would
+likely be willing to assist in submitting ECRs.
+
+
+Linux Code
+----------
+Individual items specific to Linux on ARM, contained in the the Linux
+source code, are in the list that follows:
+
+ACPI_OS_NAME This macro defines the string to be returned when
+ an ACPI method invokes the _OS method. On ARM64
+ systems, this macro will be "Linux" by default.
+ The command line parameter acpi_os=<string>
+ can be used to set it to some other value. The
+ default value for other architectures is "Microsoft
+ Windows NT", for example.
+
+ACPI Objects
+------------
+Detailed expectations for ACPI tables and object are listed in the file
+Documentation/arm64/acpi_object_usage.txt.
+
+
+References
+----------
+[0] http://silver.arm.com -- document ARM-DEN-0029, or newer
+ "Server Base System Architecture", version 2.3, dated 27 Mar 2014
+
+[1] http://infocenter.arm.com/help/topic/com.arm.doc.den0044a/Server_Base_Boot_Requirements.pdf
+ Document ARM-DEN-0044A, or newer: "Server Base Boot Requirements, System
+ Software on ARM Platforms", dated 16 Aug 2014
+
+[2] http://www.secretlab.ca/archives/151, 10 Jan 2015, Copyright (c) 2015,
+ Linaro Ltd., written by Grant Likely. A copy of the verbatim text (apart
+ from formatting) is also in Documentation/arm64/why_use_acpi.txt.
+
+[3] AMD ACPI for Seattle platform documentation:
+ http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2012/10/Seattle_ACPI_Guide.pdf
+
+[4] http://www.uefi.org/acpi -- please see the link for the "ACPI _DSD Device
+ Property Registry Instructions"
+
+[5] http://www.uefi.org/acpi -- please see the link for the "_DSD (Device
+ Specific Data) Implementation Guide"
+
+[6] Kernel code for the unified device property interface can be found in
+ include/linux/property.h and drivers/base/property.c.
+
+
+Authors
+-------
+Al Stone <al.stone@linaro.org>
+Graeme Gregory <graeme.gregory@linaro.org>
+Hanjun Guo <hanjun.guo@linaro.org>
+
+Grant Likely <grant.likely@linaro.org>, for the "Why ACPI on ARM?" section
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 183e41bdcb69..dab6da3382d9 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -201,11 +201,11 @@ These routines add 1 and subtract 1, respectively, from the given
atomic_t and return the new counter value after the operation is
performed.
-Unlike the above routines, it is required that explicit memory
-barriers are performed before and after the operation. It must be
-done such that all memory operations before and after the atomic
-operation calls are strongly ordered with respect to the atomic
-operation itself.
+Unlike the above routines, it is required that these primitives
+include explicit memory barriers that are performed before and after
+the operation. It must be done such that all memory operations before
+and after the atomic operation calls are strongly ordered with respect
+to the atomic operation itself.
For example, it should behave as if a smp_mb() call existed both
before and after the atomic operation.
@@ -233,21 +233,21 @@ These two routines increment and decrement by 1, respectively, the
given atomic counter. They return a boolean indicating whether the
resulting counter value was zero or not.
-It requires explicit memory barrier semantics around the operation as
-above.
+Again, these primitives provide explicit memory barrier semantics around
+the atomic operation.
int atomic_sub_and_test(int i, atomic_t *v);
This is identical to atomic_dec_and_test() except that an explicit
-decrement is given instead of the implicit "1". It requires explicit
-memory barrier semantics around the operation.
+decrement is given instead of the implicit "1". This primitive must
+provide explicit memory barrier semantics around the operation.
int atomic_add_negative(int i, atomic_t *v);
-The given increment is added to the given atomic counter value. A
-boolean is return which indicates whether the resulting counter value
-is negative. It requires explicit memory barrier semantics around the
-operation.
+The given increment is added to the given atomic counter value. A boolean
+is return which indicates whether the resulting counter value is negative.
+This primitive must provide explicit memory barrier semantics around
+the operation.
Then:
@@ -257,7 +257,7 @@ This performs an atomic exchange operation on the atomic variable v, setting
the given new value. It returns the old value that the atomic variable v had
just before the operation.
-atomic_xchg requires explicit memory barriers around the operation.
+atomic_xchg must provide explicit memory barriers around the operation.
int atomic_cmpxchg(atomic_t *v, int old, int new);
@@ -266,7 +266,7 @@ with the given old and new values. Like all atomic_xxx operations,
atomic_cmpxchg will only satisfy its atomicity semantics as long as all
other accesses of *v are performed through atomic_xxx operations.
-atomic_cmpxchg requires explicit memory barriers around the operation.
+atomic_cmpxchg must provide explicit memory barriers around the operation.
The semantics for atomic_cmpxchg are the same as those defined for 'cas'
below.
@@ -279,8 +279,8 @@ If the atomic value v is not equal to u, this function adds a to v, and
returns non zero. If v is equal to u then it returns zero. This is done as
an atomic operation.
-atomic_add_unless requires explicit memory barriers around the operation
-unless it fails (returns 0).
+atomic_add_unless must provide explicit memory barriers around the
+operation unless it fails (returns 0).
atomic_inc_not_zero, equivalent to atomic_add_unless(v, 1, 0)
@@ -460,9 +460,9 @@ the return value into an int. There are other places where things
like this occur as well.
These routines, like the atomic_t counter operations returning values,
-require explicit memory barrier semantics around their execution. All
-memory operations before the atomic bit operation call must be made
-visible globally before the atomic bit operation is made visible.
+must provide explicit memory barrier semantics around their execution.
+All memory operations before the atomic bit operation call must be
+made visible globally before the atomic bit operation is made visible.
Likewise, the atomic bit operation must be visible globally before any
subsequent memory operation is made visible. For example:
@@ -536,8 +536,9 @@ except that two underscores are prefixed to the interface name.
These non-atomic variants also do not require any special memory
barrier semantics.
-The routines xchg() and cmpxchg() need the same exact memory barriers
-as the atomic and bit operations returning values.
+The routines xchg() and cmpxchg() must provide the same exact
+memory-barrier semantics as the atomic and bit operations returning
+values.
Spinlocks and rwlocks have memory barrier expectations as well.
The rule to follow is simple:
diff --git a/Documentation/blackfin/Makefile b/Documentation/blackfin/Makefile
index 03f78059d6f5..6782c58fbc29 100644
--- a/Documentation/blackfin/Makefile
+++ b/Documentation/blackfin/Makefile
@@ -1,5 +1,5 @@
ifneq ($(CONFIG_BLACKFIN),)
-ifneq ($(CONFIG_BFIN_GPTIMERS,)
+ifneq ($(CONFIG_BFIN_GPTIMERS),)
obj-m := gptimers-example.o
endif
endif
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 5aabc08de811..fd12c0d835fd 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -48,8 +48,7 @@ Description of Contents:
- Highmem I/O support
- I/O scheduler modularization
1.2 Tuning based on high level requirements/capabilities
- 1.2.1 I/O Barriers
- 1.2.2 Request Priority/Latency
+ 1.2.1 Request Priority/Latency
1.3 Direct access/bypass to lower layers for diagnostics and special
device operations
1.3.1 Pre-built commands
@@ -255,29 +254,12 @@ some control over i/o ordering.
What kind of support exists at the generic block layer for this ?
The flags and rw fields in the bio structure can be used for some tuning
-from above e.g indicating that an i/o is just a readahead request, or for
-marking barrier requests (discussed next), or priority settings (currently
-unused). As far as user applications are concerned they would need an
-additional mechanism either via open flags or ioctls, or some other upper
-level mechanism to communicate such settings to block.
-
-1.2.1 I/O Barriers
-
-There is a way to enforce strict ordering for i/os through barriers.
-All requests before a barrier point must be serviced before the barrier
-request and any other requests arriving after the barrier will not be
-serviced until after the barrier has completed. This is useful for higher
-level control on write ordering, e.g flushing a log of committed updates
-to disk before the corresponding updates themselves.
-
-A flag in the bio structure, BIO_BARRIER is used to identify a barrier i/o.
-The generic i/o scheduler would make sure that it places the barrier request and
-all other requests coming after it after all the previous requests in the
-queue. Barriers may be implemented in different ways depending on the
-driver. For more details regarding I/O barriers, please read barrier.txt
-in this directory.
-
-1.2.2 Request Priority/Latency
+from above e.g indicating that an i/o is just a readahead request, or priority
+settings (currently unused). As far as user applications are concerned they
+would need an additional mechanism either via open flags or ioctls, or some
+other upper level mechanism to communicate such settings to block.
+
+1.2.1 Request Priority/Latency
Todo/Under discussion:
Arjan's proposed request priority scheme allows higher levels some broad
@@ -906,8 +888,8 @@ queue and specific I/O schedulers. Unless stated otherwise, elevator is used
to refer to both parts and I/O scheduler to specific I/O schedulers.
Block layer implements generic dispatch queue in block/*.c.
-The generic dispatch queue is responsible for properly ordering barrier
-requests, requeueing, handling non-fs requests and all other subtleties.
+The generic dispatch queue is responsible for requeueing, handling non-fs
+requests and all other subtleties.
Specific I/O schedulers are responsible for ordering normal filesystem
requests. They can also choose to delay certain requests to improve
diff --git a/Documentation/blockdev/nbd.txt b/Documentation/blockdev/nbd.txt
index 271e607304da..db242ea2bce8 100644
--- a/Documentation/blockdev/nbd.txt
+++ b/Documentation/blockdev/nbd.txt
@@ -1,17 +1,31 @@
- Network Block Device (TCP version)
-
- What is it: With this compiled in the kernel (or as a module), Linux
- can use a remote server as one of its block devices. So every time
- the client computer wants to read, e.g., /dev/nb0, it sends a
- request over TCP to the server, which will reply with the data read.
- This can be used for stations with low disk space (or even diskless)
- to borrow disk space from another computer.
- Unlike NFS, it is possible to put any filesystem on it, etc.
-
- For more information, or to download the nbd-client and nbd-server
- tools, go to http://nbd.sf.net/.
-
- The nbd kernel module need only be installed on the client
- system, as the nbd-server is completely in userspace. In fact,
- the nbd-server has been successfully ported to other operating
- systems, including Windows.
+Network Block Device (TCP version)
+==================================
+
+1) Overview
+-----------
+
+What is it: With this compiled in the kernel (or as a module), Linux
+can use a remote server as one of its block devices. So every time
+the client computer wants to read, e.g., /dev/nb0, it sends a
+request over TCP to the server, which will reply with the data read.
+This can be used for stations with low disk space (or even diskless)
+to borrow disk space from another computer.
+Unlike NFS, it is possible to put any filesystem on it, etc.
+
+For more information, or to download the nbd-client and nbd-server
+tools, go to http://nbd.sf.net/.
+
+The nbd kernel module need only be installed on the client
+system, as the nbd-server is completely in userspace. In fact,
+the nbd-server has been successfully ported to other operating
+systems, including Windows.
+
+A) NBD parameters
+-----------------
+
+max_part
+ Number of partitions per device (default: 0).
+
+nbds_max
+ Number of block devices that should be initialized (default: 16).
+
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 7fcf9c6592ec..48a183e29988 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -98,20 +98,79 @@ size of the disk when not in use so a huge zram is wasteful.
mount /dev/zram1 /tmp
7) Stats:
- Per-device statistics are exported as various nodes under
- /sys/block/zram<id>/
- disksize
- num_reads
- num_writes
- failed_reads
- failed_writes
- invalid_io
- notify_free
- zero_pages
- orig_data_size
- compr_data_size
- mem_used_total
- mem_used_max
+Per-device statistics are exported as various nodes under /sys/block/zram<id>/
+
+A brief description of exported device attritbutes. For more details please
+read Documentation/ABI/testing/sysfs-block-zram.
+
+Name access description
+---- ------ -----------
+disksize RW show and set the device's disk size
+initstate RO shows the initialization state of the device
+reset WO trigger device reset
+num_reads RO the number of reads
+failed_reads RO the number of failed reads
+num_write RO the number of writes
+failed_writes RO the number of failed writes
+invalid_io RO the number of non-page-size-aligned I/O requests
+max_comp_streams RW the number of possible concurrent compress operations
+comp_algorithm RW show and change the compression algorithm
+notify_free RO the number of notifications to free pages (either
+ slot free notifications or REQ_DISCARD requests)
+zero_pages RO the number of zero filled pages written to this disk
+orig_data_size RO uncompressed size of data stored in this disk
+compr_data_size RO compressed size of data stored in this disk
+mem_used_total RO the amount of memory allocated for this disk
+mem_used_max RW the maximum amount memory zram have consumed to
+ store compressed data
+mem_limit RW the maximum amount of memory ZRAM can use to store
+ the compressed data
+num_migrated RO the number of objects migrated migrated by compaction
+
+
+WARNING
+=======
+per-stat sysfs attributes are considered to be deprecated.
+The basic strategy is:
+-- the existing RW nodes will be downgraded to WO nodes (in linux 4.11)
+-- deprecated RO sysfs nodes will eventually be removed (in linux 4.11)
+
+The list of deprecated attributes can be found here:
+Documentation/ABI/obsolete/sysfs-block-zram
+
+Basically, every attribute that has its own read accessible sysfs node
+(e.g. num_reads) *AND* is accessible via one of the stat files (zram<id>/stat
+or zram<id>/io_stat or zram<id>/mm_stat) is considered to be deprecated.
+
+User space is advised to use the following files to read the device statistics.
+
+File /sys/block/zram<id>/stat
+
+Represents block layer statistics. Read Documentation/block/stat.txt for
+details.
+
+File /sys/block/zram<id>/io_stat
+
+The stat file represents device's I/O statistics not accounted by block
+layer and, thus, not available in zram<id>/stat file. It consists of a
+single line of text and contains the following stats separated by
+whitespace:
+ failed_reads
+ failed_writes
+ invalid_io
+ notify_free
+
+File /sys/block/zram<id>/mm_stat
+
+The stat file represents device's mm statistics. It consists of a single
+line of text and contains the following stats separated by whitespace:
+ orig_data_size
+ compr_data_size
+ mem_used_total
+ mem_limit
+ mem_used_max
+ zero_pages
+ num_migrated
8) Deactivate:
swapoff /dev/zram0
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index a22df3ad35ff..f456b4315e86 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -275,11 +275,6 @@ When oom event notifier is registered, event will be delivered.
2.7 Kernel Memory Extension (CONFIG_MEMCG_KMEM)
-WARNING: Current implementation lacks reclaim support. That means allocation
- attempts will fail when close to the limit even if there are plenty of
- kmem available for reclaim. That makes this option unusable in real
- life so DO NOT SELECT IT unless for development purposes.
-
With the Kernel memory extension, the Memory Controller is able to limit
the amount of kernel memory used by the system. Kernel memory is fundamentally
different than user memory, since it can't be swapped out, which makes it
@@ -345,6 +340,9 @@ set:
In this case, the admin could set up K so that the sum of all groups is
never greater than the total memory, and freely set U at the cost of his
QoS.
+ WARNING: In the current implementation, memory reclaim will NOT be
+ triggered for a cgroup when it hits K while staying below U, which makes
+ this setup impractical.
U != 0, K >= U:
Since kmem charges will also be fed to the user counter and reclaim will be
diff --git a/Documentation/cma/debugfs.txt b/Documentation/cma/debugfs.txt
new file mode 100644
index 000000000000..6cef20a8cedc
--- /dev/null
+++ b/Documentation/cma/debugfs.txt
@@ -0,0 +1,21 @@
+The CMA debugfs interface is useful to retrieve basic information out of the
+different CMA areas and to test allocation/release in each of the areas.
+
+Each CMA zone represents a directory under <debugfs>/cma/, indexed by the
+kernel's CMA index. So the first CMA zone would be:
+
+ <debugfs>/cma/cma-0
+
+The structure of the files created under that directory is as follows:
+
+ - [RO] base_pfn: The base PFN (Page Frame Number) of the zone.
+ - [RO] count: Amount of memory in the CMA area.
+ - [RO] order_per_bit: Order of pages represented by one bit.
+ - [RO] bitmap: The bitmap of page states in the zone.
+ - [WO] alloc: Allocate N pages from that CMA area. For example:
+
+ echo 5 > <debugfs>/cma/cma-2/alloc
+
+would try to allocate 5 pages from the cma-2 area.
+
+ - [WO] free: Free N pages from that CMA area, similar to the above.
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index a0b005d2bd95..f9ad5e048b11 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -108,7 +108,7 @@ Never use anything other than cpumask_t to represent bitmap of CPUs.
for_each_possible_cpu - Iterate over cpu_possible_mask
for_each_online_cpu - Iterate over cpu_online_mask
for_each_present_cpu - Iterate over cpu_present_mask
- for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask.
+ for_each_cpu(x,mask) - Iterate over some random collection of cpu mask.
#include <linux/cpu.h>
get_online_cpus() and put_online_cpus():
diff --git a/Documentation/crypto/crypto-API-userspace.txt b/Documentation/crypto/crypto-API-userspace.txt
deleted file mode 100644
index ac619cd90300..000000000000
--- a/Documentation/crypto/crypto-API-userspace.txt
+++ /dev/null
@@ -1,205 +0,0 @@
-Introduction
-============
-
-The concepts of the kernel crypto API visible to kernel space is fully
-applicable to the user space interface as well. Therefore, the kernel crypto API
-high level discussion for the in-kernel use cases applies here as well.
-
-The major difference, however, is that user space can only act as a consumer
-and never as a provider of a transformation or cipher algorithm.
-
-The following covers the user space interface exported by the kernel crypto
-API. A working example of this description is libkcapi that can be obtained from
-[1]. That library can be used by user space applications that require
-cryptographic services from the kernel.
-
-Some details of the in-kernel kernel crypto API aspects do not
-apply to user space, however. This includes the difference between synchronous
-and asynchronous invocations. The user space API call is fully synchronous.
-In addition, only a subset of all cipher types are available as documented
-below.
-
-
-User space API general remarks
-==============================
-
-The kernel crypto API is accessible from user space. Currently, the following
-ciphers are accessible:
-
- * Message digest including keyed message digest (HMAC, CMAC)
-
- * Symmetric ciphers
-
-Note, AEAD ciphers are currently not supported via the symmetric cipher
-interface.
-
-The interface is provided via Netlink using the type AF_ALG. In addition, the
-setsockopt option type is SOL_ALG. In case the user space header files do not
-export these flags yet, use the following macros:
-
-#ifndef AF_ALG
-#define AF_ALG 38
-#endif
-#ifndef SOL_ALG
-#define SOL_ALG 279
-#endif
-
-A cipher is accessed with the same name as done for the in-kernel API calls.
-This includes the generic vs. unique naming schema for ciphers as well as the
-enforcement of priorities for generic names.
-
-To interact with the kernel crypto API, a Netlink socket must be created by
-the user space application. User space invokes the cipher operation with the
-send/write system call family. The result of the cipher operation is obtained
-with the read/recv system call family.
-
-The following API calls assume that the Netlink socket descriptor is already
-opened by the user space application and discusses only the kernel crypto API
-specific invocations.
-
-To initialize a Netlink interface, the following sequence has to be performed
-by the consumer:
-
- 1. Create a socket of type AF_ALG with the struct sockaddr_alg parameter
- specified below for the different cipher types.
-
- 2. Invoke bind with the socket descriptor
-
- 3. Invoke accept with the socket descriptor. The accept system call
- returns a new file descriptor that is to be used to interact with
- the particular cipher instance. When invoking send/write or recv/read
- system calls to send data to the kernel or obtain data from the
- kernel, the file descriptor returned by accept must be used.
-
-In-place cipher operation
-=========================
-
-Just like the in-kernel operation of the kernel crypto API, the user space
-interface allows the cipher operation in-place. That means that the input buffer
-used for the send/write system call and the output buffer used by the read/recv
-system call may be one and the same. This is of particular interest for
-symmetric cipher operations where a copying of the output data to its final
-destination can be avoided.
-
-If a consumer on the other hand wants to maintain the plaintext and the
-ciphertext in different memory locations, all a consumer needs to do is to
-provide different memory pointers for the encryption and decryption operation.
-
-Message digest API
-==================
-
-The message digest type to be used for the cipher operation is selected when
-invoking the bind syscall. bind requires the caller to provide a filled
-struct sockaddr data structure. This data structure must be filled as follows:
-
-struct sockaddr_alg sa = {
- .salg_family = AF_ALG,
- .salg_type = "hash", /* this selects the hash logic in the kernel */
- .salg_name = "sha1" /* this is the cipher name */
-};
-
-The salg_type value "hash" applies to message digests and keyed message digests.
-Though, a keyed message digest is referenced by the appropriate salg_name.
-Please see below for the setsockopt interface that explains how the key can be
-set for a keyed message digest.
-
-Using the send() system call, the application provides the data that should be
-processed with the message digest. The send system call allows the following
-flags to be specified:
-
- * MSG_MORE: If this flag is set, the send system call acts like a
- message digest update function where the final hash is not
- yet calculated. If the flag is not set, the send system call
- calculates the final message digest immediately.
-
-With the recv() system call, the application can read the message digest from
-the kernel crypto API. If the buffer is too small for the message digest, the
-flag MSG_TRUNC is set by the kernel.
-
-In order to set a message digest key, the calling application must use the
-setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC operation is
-performed without the initial HMAC state change caused by the key.
-
-
-Symmetric cipher API
-====================
-
-The operation is very similar to the message digest discussion. During
-initialization, the struct sockaddr data structure must be filled as follows:
-
-struct sockaddr_alg sa = {
- .salg_family = AF_ALG,
- .salg_type = "skcipher", /* this selects the symmetric cipher */
- .salg_name = "cbc(aes)" /* this is the cipher name */
-};
-
-Before data can be sent to the kernel using the write/send system call family,
-the consumer must set the key. The key setting is described with the setsockopt
-invocation below.
-
-Using the sendmsg() system call, the application provides the data that should
-be processed for encryption or decryption. In addition, the IV is specified
-with the data structure provided by the sendmsg() system call.
-
-The sendmsg system call parameter of struct msghdr is embedded into the
-struct cmsghdr data structure. See recv(2) and cmsg(3) for more information
-on how the cmsghdr data structure is used together with the send/recv system
-call family. That cmsghdr data structure holds the following information
-specified with a separate header instances:
-
- * specification of the cipher operation type with one of these flags:
- ALG_OP_ENCRYPT - encryption of data
- ALG_OP_DECRYPT - decryption of data
-
- * specification of the IV information marked with the flag ALG_SET_IV
-
-The send system call family allows the following flag to be specified:
-
- * MSG_MORE: If this flag is set, the send system call acts like a
- cipher update function where more input data is expected
- with a subsequent invocation of the send system call.
-
-Note: The kernel reports -EINVAL for any unexpected data. The caller must
-make sure that all data matches the constraints given in /proc/crypto for the
-selected cipher.
-
-With the recv() system call, the application can read the result of the
-cipher operation from the kernel crypto API. The output buffer must be at least
-as large as to hold all blocks of the encrypted or decrypted data. If the output
-data size is smaller, only as many blocks are returned that fit into that
-output buffer size.
-
-Setsockopt interface
-====================
-
-In addition to the read/recv and send/write system call handling to send and
-retrieve data subject to the cipher operation, a consumer also needs to set
-the additional information for the cipher operation. This additional information
-is set using the setsockopt system call that must be invoked with the file
-descriptor of the open cipher (i.e. the file descriptor returned by the
-accept system call).
-
-Each setsockopt invocation must use the level SOL_ALG.
-
-The setsockopt interface allows setting the following data using the mentioned
-optname:
-
- * ALG_SET_KEY -- Setting the key. Key setting is applicable to:
-
- - the skcipher cipher type (symmetric ciphers)
-
- - the hash cipher type (keyed message digests)
-
-User space API example
-======================
-
-Please see [1] for libkcapi which provides an easy-to-use wrapper around the
-aforementioned Netlink kernel interface. [1] also contains a test application
-that invokes all libkcapi API calls.
-
-[1] http://www.chronox.de/libkcapi.html
-
-Author
-======
-
-Stephan Mueller <smueller@chronox.de>
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index ad697781f9ac..692171fe9da0 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -5,7 +5,7 @@ Device-Mapper's "crypt" target provides transparent encryption of block devices
using the kernel crypto API.
For a more detailed description of supported parameters see:
-http://code.google.com/p/cryptsetup/wiki/DMCrypt
+https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt
Parameters: <cipher> <key> <iv_offset> <device path> \
<offset> [<#opt_params> <opt_params>]
@@ -80,7 +80,7 @@ Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
encryption with dm-crypt using the 'cryptsetup' utility, see
-http://code.google.com/p/cryptsetup/
+https://gitlab.com/cryptsetup/cryptsetup
[[
#!/bin/sh
diff --git a/Documentation/device-mapper/log-writes.txt b/Documentation/device-mapper/log-writes.txt
new file mode 100644
index 000000000000..c10f30c9b534
--- /dev/null
+++ b/Documentation/device-mapper/log-writes.txt
@@ -0,0 +1,140 @@
+dm-log-writes
+=============
+
+This target takes 2 devices, one to pass all IO to normally, and one to log all
+of the write operations to. This is intended for file system developers wishing
+to verify the integrity of metadata or data as the file system is written to.
+There is a log_write_entry written for every WRITE request and the target is
+able to take arbitrary data from userspace to insert into the log. The data
+that is in the WRITE requests is copied into the log to make the replay happen
+exactly as it happened originally.
+
+Log Ordering
+============
+
+We log things in order of completion once we are sure the write is no longer in
+cache. This means that normal WRITE requests are not actually logged until the
+next REQ_FLUSH request. This is to make it easier for userspace to replay the
+log in a way that correlates to what is on disk and not what is in cache, to
+make it easier to detect improper waiting/flushing.
+
+This works by attaching all WRITE requests to a list once the write completes.
+Once we see a REQ_FLUSH request we splice this list onto the request and once
+the FLUSH request completes we log all of the WRITEs and then the FLUSH. Only
+completed WRITEs, at the time the REQ_FLUSH is issued, are added in order to
+simulate the worst case scenario with regard to power failures. Consider the
+following example (W means write, C means complete):
+
+W1,W2,W3,C3,C2,Wflush,C1,Cflush
+
+The log would show the following
+
+W3,W2,flush,W1....
+
+Again this is to simulate what is actually on disk, this allows us to detect
+cases where a power failure at a particular point in time would create an
+inconsistent file system.
+
+Any REQ_FUA requests bypass this flushing mechanism and are logged as soon as
+they complete as those requests will obviously bypass the device cache.
+
+Any REQ_DISCARD requests are treated like WRITE requests. Otherwise we would
+have all the DISCARD requests, and then the WRITE requests and then the FLUSH
+request. Consider the following example:
+
+WRITE block 1, DISCARD block 1, FLUSH
+
+If we logged DISCARD when it completed, the replay would look like this
+
+DISCARD 1, WRITE 1, FLUSH
+
+which isn't quite what happened and wouldn't be caught during the log replay.
+
+Target interface
+================
+
+i) Constructor
+
+ log-writes <dev_path> <log_dev_path>
+
+ dev_path : Device that all of the IO will go to normally.
+ log_dev_path : Device where the log entries are written to.
+
+ii) Status
+
+ <#logged entries> <highest allocated sector>
+
+ #logged entries : Number of logged entries
+ highest allocated sector : Highest allocated sector
+
+iii) Messages
+
+ mark <description>
+
+ You can use a dmsetup message to set an arbitrary mark in a log.
+ For example say you want to fsck a file system after every
+ write, but first you need to replay up to the mkfs to make sure
+ we're fsck'ing something reasonable, you would do something like
+ this:
+
+ mkfs.btrfs -f /dev/mapper/log
+ dmsetup message log 0 mark mkfs
+ <run test>
+
+ This would allow you to replay the log up to the mkfs mark and
+ then replay from that point on doing the fsck check in the
+ interval that you want.
+
+ Every log has a mark at the end labeled "dm-log-writes-end".
+
+Userspace component
+===================
+
+There is a userspace tool that will replay the log for you in various ways.
+It can be found here: https://github.com/josefbacik/log-writes
+
+Example usage
+=============
+
+Say you want to test fsync on your file system. You would do something like
+this:
+
+TABLE="0 $(blockdev --getsz /dev/sdb) log-writes /dev/sdb /dev/sdc"
+dmsetup create log --table "$TABLE"
+mkfs.btrfs -f /dev/mapper/log
+dmsetup message log 0 mark mkfs
+
+mount /dev/mapper/log /mnt/btrfs-test
+<some test that does fsync at the end>
+dmsetup message log 0 mark fsync
+md5sum /mnt/btrfs-test/foo
+umount /mnt/btrfs-test
+
+dmsetup remove log
+replay-log --log /dev/sdc --replay /dev/sdb --end-mark fsync
+mount /dev/sdb /mnt/btrfs-test
+md5sum /mnt/btrfs-test/foo
+<verify md5sum's are correct>
+
+Another option is to do a complicated file system operation and verify the file
+system is consistent during the entire operation. You could do this with:
+
+TABLE="0 $(blockdev --getsz /dev/sdb) log-writes /dev/sdb /dev/sdc"
+dmsetup create log --table "$TABLE"
+mkfs.btrfs -f /dev/mapper/log
+dmsetup message log 0 mark mkfs
+
+mount /dev/mapper/log /mnt/btrfs-test
+<fsstress to dirty the fs>
+btrfs filesystem balance /mnt/btrfs-test
+umount /mnt/btrfs-test
+dmsetup remove log
+
+replay-log --log /dev/sdc --replay /dev/sdb --end-mark mkfs
+btrfsck /dev/sdb
+replay-log --log /dev/sdc --replay /dev/sdb --start-mark mkfs \
+ --fsck "btrfsck /dev/sdb" --check fua
+
+And that will replay the log until it sees a FUA request, run the fsck command
+and if the fsck passes it will replay to the next FUA, until it is completed or
+the fsck command exists abnormally.
diff --git a/Documentation/device-mapper/switch.txt b/Documentation/device-mapper/switch.txt
index 8897d0494838..424835e57f27 100644
--- a/Documentation/device-mapper/switch.txt
+++ b/Documentation/device-mapper/switch.txt
@@ -47,8 +47,8 @@ consume far too much memory.
Using this device-mapper switch target we can now build a two-layer
device hierarchy:
- Upper Tier – Determine which array member the I/O should be sent to.
- Lower Tier – Load balance amongst paths to a particular member.
+ Upper Tier - Determine which array member the I/O should be sent to.
+ Lower Tier - Load balance amongst paths to a particular member.
The lower tier consists of a single dm multipath device for each member.
Each of these multipath devices contains the set of paths directly to
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 2f5173500bd9..4f67578b2954 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -380,9 +380,6 @@ then you'll have no access to blocks mapped beyond the end. If you
load a target that is bigger than before, then extra blocks will be
provisioned as and when needed.
-If you wish to reduce the size of your thin device and potentially
-regain some space then send the 'trim' message to the pool.
-
ii) Status
<nr mapped sectors> <highest mapped sector>
diff --git a/Documentation/device-mapper/verity.txt b/Documentation/device-mapper/verity.txt
index 9884681535ee..e15bc1a0fb98 100644
--- a/Documentation/device-mapper/verity.txt
+++ b/Documentation/device-mapper/verity.txt
@@ -11,6 +11,7 @@ Construction Parameters
<data_block_size> <hash_block_size>
<num_data_blocks> <hash_start_block>
<algorithm> <digest> <salt>
+ [<#opt_params> <opt_params>]
<version>
This is the type of the on-disk hash format.
@@ -62,6 +63,22 @@ Construction Parameters
<salt>
The hexadecimal encoding of the salt value.
+<#opt_params>
+ Number of optional parameters. If there are no optional parameters,
+ the optional paramaters section can be skipped or #opt_params can be zero.
+ Otherwise #opt_params is the number of following arguments.
+
+ Example of optional parameters section:
+ 1 ignore_corruption
+
+ignore_corruption
+ Log corrupted blocks, but allow read operations to proceed normally.
+
+restart_on_corruption
+ Restart the system when a corrupted block is discovered. This option is
+ not compatible with ignore_corruption and requires user space support to
+ avoid restart loops.
+
Theory of operation
===================
@@ -125,7 +142,7 @@ block boundary) are the hash blocks which are stored a depth at a time
The full specification of kernel parameters and on-disk metadata format
is available at the cryptsetup project's wiki page
- http://code.google.com/p/cryptsetup/wiki/DMVerity
+ https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity
Status
======
@@ -142,7 +159,7 @@ Set up a device:
A command line tool veritysetup is available to compute or verify
the hash tree or activate the kernel device. This is available from
-the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
+the cryptsetup upstream repository https://gitlab.com/cryptsetup/cryptsetup/
(as a libcryptsetup extension).
Create hash on the device:
diff --git a/Documentation/devicetree/bindings/arc/pct.txt b/Documentation/devicetree/bindings/arc/pct.txt
new file mode 100644
index 000000000000..7b9588444f20
--- /dev/null
+++ b/Documentation/devicetree/bindings/arc/pct.txt
@@ -0,0 +1,20 @@
+* ARC Performance Counters
+
+The ARC700 can be configured with a pipeline performance monitor for counting
+CPU and cache events like cache misses and hits. Like conventional PCT there
+are 100+ hardware conditions dynamically mapped to upto 32 counters
+
+Note that:
+ * The ARC 700 PCT does not support interrupts; although HW events may be
+ counted, the HW events themselves cannot serve as a trigger for a sample.
+
+Required properties:
+
+- compatible : should contain
+ "snps,arc700-pct"
+
+Example:
+
+pmu {
+ compatible = "snps,arc700-pct";
+};
diff --git a/Documentation/devicetree/bindings/arc/pmu.txt b/Documentation/devicetree/bindings/arc/pmu.txt
deleted file mode 100644
index 49d517340de3..000000000000
--- a/Documentation/devicetree/bindings/arc/pmu.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-* ARC Performance Monitor Unit
-
-The ARC 700 can be configured with a pipeline performance monitor for counting
-CPU and cache events like cache misses and hits.
-
-Note that:
- * ARC 700 refers to a family of ARC processor cores;
- - There is only one type of PMU available for the whole family;
- - The PMU may support different sets of events; supported events are probed
- at boot time, as required by the reference manual.
-
- * The ARC 700 PMU does not support interrupts; although HW events may be
- counted, the HW events themselves cannot serve as a trigger for a sample.
-
-Required properties:
-
-- compatible : should contain
- "snps,arc700-pmu"
-
-Example:
-
-pmu {
- compatible = "snps,arc700-pmu";
-};
diff --git a/Documentation/devicetree/bindings/arm/al,alpine.txt b/Documentation/devicetree/bindings/arm/al,alpine.txt
new file mode 100644
index 000000000000..f404a4f9b165
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/al,alpine.txt
@@ -0,0 +1,88 @@
+Annapurna Labs Alpine Platform Device Tree Bindings
+---------------------------------------------------------------
+
+Boards in the Alpine family shall have the following properties:
+
+* Required root node properties:
+compatible: must contain "al,alpine"
+
+* Example:
+
+/ {
+ model = "Annapurna Labs Alpine Dev Board";
+ compatible = "al,alpine";
+
+ ...
+}
+
+* CPU node:
+
+The Alpine platform includes cortex-a15 cores.
+enable-method: must be "al,alpine-smp" to allow smp [1]
+
+Example:
+
+cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "al,alpine-smp";
+
+ cpu@0 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <1>;
+ };
+
+ cpu@2 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <2>;
+ };
+
+ cpu@3 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <3>;
+ };
+};
+
+
+* Alpine CPU resume registers
+
+The CPU resume register are used to define required resume address after
+reset.
+
+Properties:
+- compatible : Should contain "al,alpine-cpu-resume".
+- reg : Offset and length of the register set for the device
+
+Example:
+
+cpu_resume {
+ compatible = "al,alpine-cpu-resume";
+ reg = <0xfbff5ed0 0x30>;
+};
+
+* Alpine System-Fabric Service Registers
+
+The System-Fabric Service Registers allow various operation on CPU and
+system fabric, like powering CPUs off.
+
+Properties:
+- compatible : Should contain "al,alpine-sysfabric-service" and "syscon".
+- reg : Offset and length of the register set for the device
+
+Example:
+
+nb_service {
+ compatible = "al,alpine-sysfabric-service", "syscon";
+ reg = <0xfb070000 0x10000>;
+};
+
+[1] arm/cpu-enable-method/al,alpine-smp
diff --git a/Documentation/devicetree/bindings/arm/altera.txt b/Documentation/devicetree/bindings/arm/altera.txt
new file mode 100644
index 000000000000..558735aacca8
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/altera.txt
@@ -0,0 +1,14 @@
+Altera's SoCFPGA platform device tree bindings
+---------------------------------------------
+
+Boards with Cyclone 5 SoC:
+Required root node properties:
+compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+
+Boards with Arria 5 SoC:
+Required root node properties:
+compatible = "altr,socfpga-arria5", "altr,socfpga";
+
+Boards with Arria 10 SoC:
+Required root node properties:
+compatible = "altr,socfpga-arria10", "altr,socfpga";
diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt
index 8fe815046140..973884a1bacf 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.txt
+++ b/Documentation/devicetree/bindings/arm/amlogic.txt
@@ -8,3 +8,7 @@ Boards with the Amlogic Meson6 SoC shall have the following properties:
Boards with the Amlogic Meson8 SoC shall have the following properties:
Required root node property:
compatible: "amlogic,meson8";
+
+Board compatible values:
+ - "geniatech,atv1200"
+ - "minix,neo-x8"
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index 256b4d8bab7b..e774128935d5 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -17,7 +17,10 @@ to deliver its interrupts via SPIs.
- interrupts : Interrupt list for secure, non-secure, virtual and
hypervisor timers, in that order.
-- clock-frequency : The frequency of the main counter, in Hz. Optional.
+- clock-frequency : The frequency of the main counter, in Hz. Should be present
+ only where necessary to work around broken firmware which does not configure
+ CNTFRQ on all CPUs to a uniform correct value. Use of this property is
+ strongly discouraged; fix your firmware unless absolutely impossible.
- always-on : a boolean property. If present, the timer is powered through an
always-on power domain, therefore it never loses context.
@@ -46,7 +49,8 @@ Example:
- compatible : Should at least contain "arm,armv7-timer-mem".
-- clock-frequency : The frequency of the main counter, in Hz. Optional.
+- clock-frequency : The frequency of the main counter, in Hz. Should be present
+ only when firmware has not configured the MMIO CNTFRQ registers.
- reg : The control frame base address.
diff --git a/Documentation/devicetree/bindings/arm/armada-39x.txt b/Documentation/devicetree/bindings/arm/armada-39x.txt
new file mode 100644
index 000000000000..53d4ff9ea8ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/armada-39x.txt
@@ -0,0 +1,20 @@
+Marvell Armada 39x Platforms Device Tree Bindings
+-------------------------------------------------
+
+Boards with a SoC of the Marvell Armada 39x family shall have the
+following property:
+
+Required root node property:
+
+ - compatible: must contain "marvell,armada390"
+
+In addition, boards using the Marvell Armada 398 SoC shall have the
+following property before the previous one:
+
+Required root node property:
+
+compatible: must contain "marvell,armada398"
+
+Example:
+
+compatible = "marvell,a398-db", "marvell,armada398", "marvell,armada390";
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index ad319f84f560..2e99b5b57350 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -46,10 +46,12 @@ PIT Timer required properties:
shared across all System Controller members.
System Timer (ST) required properties:
-- compatible: Should be "atmel,at91rm9200-st"
+- compatible: Should be "atmel,at91rm9200-st", "syscon", "simple-mfd"
- reg: Should contain registers location and length
- interrupts: Should contain interrupt for the ST which is the IRQ line
shared across all System Controller members.
+Its subnodes can be:
+- watchdog: compatible should be "atmel,at91rm9200-wdt"
TC/TCLIB Timer required properties:
- compatible: Should be "atmel,<chip>-tcb".
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt
index 8240c023e202..8240c023e202 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt
diff --git a/Documentation/devicetree/bindings/arm/bcm/bcm11351.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.txt
index 0ff6560e6094..0ff6560e6094 100644
--- a/Documentation/devicetree/bindings/arm/bcm/bcm11351.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.txt
diff --git a/Documentation/devicetree/bindings/arm/bcm/bcm21664.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.txt
index e0774255e1a6..e0774255e1a6 100644
--- a/Documentation/devicetree/bindings/arm/bcm/bcm21664.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.txt
diff --git a/Documentation/devicetree/bindings/arm/bcm2835.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
index ac683480c486..ac683480c486 100644
--- a/Documentation/devicetree/bindings/arm/bcm2835.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
diff --git a/Documentation/devicetree/bindings/arm/bcm4708.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.txt
index 6b0f49f6f499..6b0f49f6f499 100644
--- a/Documentation/devicetree/bindings/arm/bcm4708.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.txt
diff --git a/Documentation/devicetree/bindings/arm/bcm/bcm63138.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt
index bd49987a8812..bd49987a8812 100644
--- a/Documentation/devicetree/bindings/arm/bcm/bcm63138.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt
diff --git a/Documentation/devicetree/bindings/arm/brcm-brcmstb.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,brcmstb.txt
index 430608ec09f0..430608ec09f0 100644
--- a/Documentation/devicetree/bindings/arm/brcm-brcmstb.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,brcmstb.txt
diff --git a/Documentation/devicetree/bindings/arm/bcm/cygnus.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.txt
index 4c77169bb534..4c77169bb534 100644
--- a/Documentation/devicetree/bindings/arm/bcm/cygnus.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.txt
diff --git a/Documentation/devicetree/bindings/arm/cci.txt b/Documentation/devicetree/bindings/arm/cci.txt
index f28d82bbbc56..3c5c631328d3 100644
--- a/Documentation/devicetree/bindings/arm/cci.txt
+++ b/Documentation/devicetree/bindings/arm/cci.txt
@@ -94,8 +94,11 @@ specific to ARM.
- compatible
Usage: required
Value type: <string>
- Definition: must be "arm,cci-400-pmu"
-
+ Definition: Must contain one of:
+ "arm,cci-400-pmu,r0"
+ "arm,cci-400-pmu,r1"
+ "arm,cci-400-pmu" - DEPRECATED, permitted only where OS has
+ secure acces to CCI registers
- reg:
Usage: required
Value type: Integer cells. A register entry, expressed
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index a3089359aaa6..88602b75418e 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -61,7 +61,6 @@ Example:
compatible = "arm,coresight-etb10", "arm,primecell";
reg = <0 0x20010000 0 0x1000>;
- coresight-default-sink;
clocks = <&oscclk6a>;
clock-names = "apb_pclk";
port {
diff --git a/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp b/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp
new file mode 100644
index 000000000000..c2e0cc5e4cfd
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp
@@ -0,0 +1,52 @@
+========================================================
+Secondary CPU enable-method "al,alpine-smp" binding
+========================================================
+
+This document describes the "al,alpine-smp" method for
+enabling secondary CPUs. To apply to all CPUs, a single
+"al,alpine-smp" enable method should be defined in the
+"cpus" node.
+
+Enable method name: "al,alpine-smp"
+Compatible machines: "al,alpine"
+Compatible CPUs: "arm,cortex-a15"
+Related properties: (none)
+
+Note:
+This enable method requires valid nodes compatible with
+"al,alpine-cpu-resume" and "al,alpine-nb-service"[1].
+
+Example:
+
+cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "al,alpine-smp";
+
+ cpu@0 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <1>;
+ };
+
+ cpu@2 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <2>;
+ };
+
+ cpu@3 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <3>;
+ };
+};
+
+--
+[1] arm/al,alpine.txt
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index 8b9e0a95de31..6aa331d11c5e 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -192,6 +192,7 @@ nodes to be present and contain the properties described below.
"brcm,brahma-b15"
"marvell,armada-375-smp"
"marvell,armada-380-smp"
+ "marvell,armada-390-smp"
"marvell,armada-xp-smp"
"qcom,gcc-msm8660"
"qcom,kpss-acc-v1"
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
index 1e097037349c..5da38c5ed476 100644
--- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
@@ -22,6 +22,9 @@ Optional Properties:
- pclkN, clkN: Pairs of parent of input clock and input clock to the
devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
are supported currently.
+ - asbN: Clocks required by asynchronous bridges (ASB) present in
+ the power domain. These clock should be enabled during power
+ domain on/off operations.
- power-domains: phandle pointing to the parent power domain, for more details
see Documentation/devicetree/bindings/power/power_domain.txt
diff --git a/Documentation/devicetree/bindings/arm/geniatech.txt b/Documentation/devicetree/bindings/arm/geniatech.txt
deleted file mode 100644
index 74ccba40b73b..000000000000
--- a/Documentation/devicetree/bindings/arm/geniatech.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Geniatech platforms device tree bindings
--------------------------------------------
-
-Geniatech ATV1200
- - compatible = "geniatech,atv1200"
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index 1e0d21201d3a..2da059a4790c 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -18,6 +18,8 @@ Main node required properties:
"arm,arm11mp-gic"
"brcm,brahma-b15-gic"
"arm,arm1176jzf-devchip-gic"
+ "qcom,msm-8660-qgic"
+ "qcom,msm-qgic2"
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The type shall be a <u32> and the value shall be 3.
diff --git a/Documentation/devicetree/bindings/arm/marvell,kirkwood.txt b/Documentation/devicetree/bindings/arm/marvell,kirkwood.txt
index 925ecbf6e7b7..4f40ff3fee4b 100644
--- a/Documentation/devicetree/bindings/arm/marvell,kirkwood.txt
+++ b/Documentation/devicetree/bindings/arm/marvell,kirkwood.txt
@@ -42,6 +42,7 @@ board. Currently known boards are:
"lacie,cloudbox"
"lacie,inetspace_v2"
"lacie,laplug"
+"lacie,nas2big"
"lacie,netspace_lite_v2"
"lacie,netspace_max_v2"
"lacie,netspace_mini_v2"
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
new file mode 100644
index 000000000000..06df04cc827a
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
@@ -0,0 +1,84 @@
+QCOM Idle States for cpuidle driver
+
+ARM provides idle-state node to define the cpuidle states, as defined in [1].
+cpuidle-qcom is the cpuidle driver for Qualcomm SoCs and uses these idle
+states. Idle states have different enter/exit latency and residency values.
+The idle states supported by the QCOM SoC are defined as -
+
+ * Standby
+ * Retention
+ * Standalone Power Collapse (Standalone PC or SPC)
+ * Power Collapse (PC)
+
+Standby: Standby does a little more in addition to architectural clock gating.
+When the WFI instruction is executed the ARM core would gate its internal
+clocks. In addition to gating the clocks, QCOM cpus use this instruction as a
+trigger to execute the SPM state machine. The SPM state machine waits for the
+interrupt to trigger the core back in to active. This triggers the cache
+hierarchy to enter standby states, when all cpus are idle. An interrupt brings
+the SPM state machine out of its wait, the next step is to ensure that the
+cache hierarchy is also out of standby, and then the cpu is allowed to resume
+execution. This state is defined as a generic ARM WFI state by the ARM cpuidle
+driver and is not defined in the DT. The SPM state machine should be
+configured to execute this state by default and after executing every other
+state below.
+
+Retention: Retention is a low power state where the core is clock gated and
+the memory and the registers associated with the core are retained. The
+voltage may be reduced to the minimum value needed to keep the processor
+registers active. The SPM should be configured to execute the retention
+sequence and would wait for interrupt, before restoring the cpu to execution
+state. Retention may have a slightly higher latency than Standby.
+
+Standalone PC: A cpu can power down and warmboot if there is a sufficient time
+between the time it enters idle and the next known wake up. SPC mode is used
+to indicate a core entering a power down state without consulting any other
+cpu or the system resources. This helps save power only on that core. The SPM
+sequence for this idle state is programmed to power down the supply to the
+core, wait for the interrupt, restore power to the core, and ensure the
+system state including cache hierarchy is ready before allowing core to
+resume. Applying power and resetting the core causes the core to warmboot
+back into Elevation Level (EL) which trampolines the control back to the
+kernel. Entering a power down state for the cpu, needs to be done by trapping
+into a EL. Failing to do so, would result in a crash enforced by the warm boot
+code in the EL for the SoC. On SoCs with write-back L1 cache, the cache has to
+be flushed in s/w, before powering down the core.
+
+Power Collapse: This state is similar to the SPC mode, but distinguishes
+itself in that the cpu acknowledges and permits the SoC to enter deeper sleep
+modes. In a hierarchical power domain SoC, this means L2 and other caches can
+be flushed, system bus, clocks - lowered, and SoC main XO clock gated and
+voltages reduced, provided all cpus enter this state. Since the span of low
+power modes possible at this state is vast, the exit latency and the residency
+of this low power mode would be considered high even though at a cpu level,
+this essentially is cpu power down. The SPM in this state also may handshake
+with the Resource power manager (RPM) processor in the SoC to indicate a
+complete application processor subsystem shut down.
+
+The idle-state for QCOM SoCs are distinguished by the compatible property of
+the idle-states device node.
+
+The devicetree representation of the idle state should be -
+
+Required properties:
+
+- compatible: Must be one of -
+ "qcom,idle-state-ret",
+ "qcom,idle-state-spc",
+ "qcom,idle-state-pc",
+ and "arm,idle-state".
+
+Other required and optional properties are specified in [1].
+
+Example:
+
+ idle-states {
+ CPU_SPC: spc {
+ compatible = "qcom,idle-state-spc", "arm,idle-state";
+ entry-latency-us = <150>;
+ exit-latency-us = <200>;
+ min-residency-us = <2000>;
+ };
+ };
+
+[1]. Documentation/devicetree/bindings/arm/idle-states.txt
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt b/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt
index 1505fb8e131a..ae4afc6dcfe0 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt
@@ -2,22 +2,31 @@ SPM AVS Wrapper 2 (SAW2)
The SAW2 is a wrapper around the Subsystem Power Manager (SPM) and the
Adaptive Voltage Scaling (AVS) hardware. The SPM is a programmable
-micro-controller that transitions a piece of hardware (like a processor or
+power-controller that transitions a piece of hardware (like a processor or
subsystem) into and out of low power modes via a direct connection to
the PMIC. It can also be wired up to interact with other processors in the
system, notifying them when a low power state is entered or exited.
+Multiple revisions of the SAW hardware are supported using these Device Nodes.
+SAW2 revisions differ in the register offset and configuration data. Also, the
+same revision of the SAW in different SoCs may have different configuration
+data due the the differences in hardware capabilities. Hence the SoC name, the
+version of the SAW hardware in that SoC and the distinction between cpu (big
+or Little) or cache, may be needed to uniquely identify the SAW register
+configuration and initialization data. The compatible string is used to
+indicate this parameter.
+
PROPERTIES
- compatible:
Usage: required
Value type: <string>
- Definition: shall contain "qcom,saw2". A more specific value should be
- one of:
- "qcom,saw2-v1"
- "qcom,saw2-v1.1"
- "qcom,saw2-v2"
- "qcom,saw2-v2.1"
+ Definition: Must have
+ "qcom,saw2"
+ A more specific value could be one of:
+ "qcom,apq8064-saw2-v1.1-cpu"
+ "qcom,msm8974-saw2-v2.1-cpu"
+ "qcom,apq8084-saw2-v2.1-cpu"
- reg:
Usage: required
@@ -26,10 +35,23 @@ PROPERTIES
the register region. An optional second element specifies
the base address and size of the alias register region.
+- regulator:
+ Usage: optional
+ Value type: boolean
+ Definition: Indicates that this SPM device acts as a regulator device
+ device for the core (CPU or Cache) the SPM is attached
+ to.
-Example:
+Example 1:
- regulator@2099000 {
+ power-controller@2099000 {
compatible = "qcom,saw2";
reg = <0x02099000 0x1000>, <0x02009000 0x1000>;
+ regulator;
+ };
+
+Example 2:
+ saw0: power-controller@f9089000 {
+ compatible = "qcom,apq8084-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf9089000 0x1000>, <0xf9009000 0x1000>;
};
diff --git a/Documentation/devicetree/bindings/arm/msm/timer.txt b/Documentation/devicetree/bindings/arm/msm/timer.txt
index 74607b6c1117..5e10c345548f 100644
--- a/Documentation/devicetree/bindings/arm/msm/timer.txt
+++ b/Documentation/devicetree/bindings/arm/msm/timer.txt
@@ -9,11 +9,17 @@ Properties:
"qcom,scss-timer" - scorpion subsystem
- interrupts : Interrupts for the debug timer, the first general purpose
- timer, and optionally a second general purpose timer in that
- order.
+ timer, and optionally a second general purpose timer, and
+ optionally as well, 2 watchdog interrupts, in that order.
- reg : Specifies the base address of the timer registers.
+- clocks: Reference to the parent clocks, one per output clock. The parents
+ must appear in the same order as the clock names.
+
+- clock-names: The name of the clocks as free-form strings. They should be in
+ the same order as the clocks.
+
- clock-frequency : The frequency of the debug timer and the general purpose
timer(s) in Hz in that order.
@@ -29,9 +35,13 @@ Example:
compatible = "qcom,scss-timer", "qcom,msm-timer";
interrupts = <1 1 0x301>,
<1 2 0x301>,
- <1 3 0x301>;
+ <1 3 0x301>,
+ <1 4 0x301>,
+ <1 5 0x301>;
reg = <0x0200a000 0x100>;
clock-frequency = <19200000>,
<32768>;
+ clocks = <&sleep_clk>;
+ clock-names = "sleep";
cpu-offset = <0x40000>;
};
diff --git a/Documentation/devicetree/bindings/arm/omap/ctrl.txt b/Documentation/devicetree/bindings/arm/omap/ctrl.txt
new file mode 100644
index 000000000000..3a4e5901ce31
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/omap/ctrl.txt
@@ -0,0 +1,79 @@
+OMAP Control Module bindings
+
+Control Module contains miscellaneous features under it based on SoC type.
+Pincontrol is one common feature, and it has a specialized support
+described in [1]. Typically some clock nodes are also under control module.
+Syscon is used to share register level access to drivers external to
+control module driver itself.
+
+See [2] for documentation about clock/clockdomain nodes.
+
+[1] Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
+[2] Documentation/devicetree/bindings/clock/ti/*
+
+Required properties:
+- compatible: Must be one of:
+ "ti,am3-scm"
+ "ti,am4-scm"
+ "ti,dm814-scrm"
+ "ti,dm816-scrm"
+ "ti,omap2-scm"
+ "ti,omap3-scm"
+ "ti,omap4-scm-core"
+ "ti,omap4-scm-padconf-core"
+ "ti,omap5-scm-core"
+ "ti,omap5-scm-padconf-core"
+ "ti,dra7-scm-core"
+- reg: Contains Control Module register address range
+ (base address and length)
+
+Optional properties:
+- clocks: clocks for this module
+- clockdomains: clockdomains for this module
+
+Examples:
+
+scm: scm@2000 {
+ compatible = "ti,omap3-scm", "simple-bus";
+ reg = <0x2000 0x2000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x2000 0x2000>;
+
+ omap3_pmx_core: pinmux@30 {
+ compatible = "ti,omap3-padconf",
+ "pinctrl-single";
+ reg = <0x30 0x230>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ pinctrl-single,register-width = <16>;
+ pinctrl-single,function-mask = <0xff1f>;
+ };
+
+ scm_conf: scm_conf@270 {
+ compatible = "syscon";
+ reg = <0x270 0x330>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ scm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ scm_clockdomains: clockdomains {
+ };
+}
+
+&scm_clocks {
+ mcbsp5_mux_fck: mcbsp5_mux_fck {
+ #clock-cells = <0>;
+ compatible = "ti,composite-mux-clock";
+ clocks = <&core_96m_fck>, <&mcbsp_clks>;
+ ti,bit-shift = <4>;
+ reg = <0x02d8>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt
index 974624ea68f6..161448da959d 100644
--- a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt
+++ b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt
@@ -6,6 +6,7 @@ provided by Arteris.
Required properties:
- compatible : Should be "ti,omap3-l3-smx" for OMAP3 family
Should be "ti,omap4-l3-noc" for OMAP4 family
+ Should be "ti,omap5-l3-noc" for OMAP5 family
Should be "ti,dra7-l3-noc" for DRA7 family
Should be "ti,am4372-l3-noc" for AM43 family
- reg: Contains L3 register address range for each noc domain.
diff --git a/Documentation/devicetree/bindings/arm/omap/l4.txt b/Documentation/devicetree/bindings/arm/omap/l4.txt
new file mode 100644
index 000000000000..b4f8a16e7e3b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/omap/l4.txt
@@ -0,0 +1,26 @@
+L4 interconnect bindings
+
+These bindings describe the OMAP SoCs L4 interconnect bus.
+
+Required properties:
+- compatible : Should be "ti,omap2-l4" for OMAP2 family l4 core bus
+ Should be "ti,omap2-l4-wkup" for OMAP2 family l4 wkup bus
+ Should be "ti,omap3-l4-core" for OMAP3 family l4 core bus
+ Should be "ti,omap4-l4-cfg" for OMAP4 family l4 cfg bus
+ Should be "ti,omap4-l4-wkup" for OMAP4 family l4 wkup bus
+ Should be "ti,omap5-l4-cfg" for OMAP5 family l4 cfg bus
+ Should be "ti,omap5-l4-wkup" for OMAP5 family l4 wkup bus
+ Should be "ti,dra7-l4-cfg" for DRA7 family l4 cfg bus
+ Should be "ti,dra7-l4-wkup" for DRA7 family l4 wkup bus
+ Should be "ti,am3-l4-wkup" for AM33xx family l4 wkup bus
+ Should be "ti,am4-l4-wkup" for AM43xx family l4 wkup bus
+- ranges : contains the IO map range for the bus
+
+Examples:
+
+l4: l4@48000000 {
+ compatible "ti,omap2-l4", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x48000000 0x100000>;
+};
diff --git a/Documentation/devicetree/bindings/arm/omap/prcm.txt b/Documentation/devicetree/bindings/arm/omap/prcm.txt
index 79074dac684a..3eb6d7afff14 100644
--- a/Documentation/devicetree/bindings/arm/omap/prcm.txt
+++ b/Documentation/devicetree/bindings/arm/omap/prcm.txt
@@ -10,14 +10,10 @@ documentation about the individual clock/clockdomain nodes.
Required properties:
- compatible: Must be one of:
"ti,am3-prcm"
- "ti,am3-scrm"
"ti,am4-prcm"
- "ti,am4-scrm"
"ti,omap2-prcm"
- "ti,omap2-scrm"
"ti,omap3-prm"
"ti,omap3-cm"
- "ti,omap3-scrm"
"ti,omap4-cm1"
"ti,omap4-prm"
"ti,omap4-cm2"
@@ -29,6 +25,8 @@ Required properties:
"ti,dra7-prm"
"ti,dra7-cm-core-aon"
"ti,dra7-cm-core"
+ "ti,dm814-prcm"
+ "ti,dm816-prcm"
- reg: Contains PRCM module register address range
(base address and length)
- clocks: clocks for this module
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 75ef91d08f3b..3b5f5d1088c6 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -18,12 +18,21 @@ Required properties:
"arm,arm11mpcore-pmu"
"arm,arm1176-pmu"
"arm,arm1136-pmu"
+ "qcom,scorpion-pmu"
+ "qcom,scorpion-mp-pmu"
"qcom,krait-pmu"
- interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu
interrupt (PPI) then 1 interrupt should be specified.
Optional properties:
+- interrupt-affinity : Valid only when using SPIs, specifies a list of phandles
+ to CPU nodes corresponding directly to the affinity of
+ the SPIs listed in the interrupts property.
+
+ This property should be present when there is more than
+ a single SPI.
+
- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
events.
diff --git a/Documentation/devicetree/bindings/arm/rockchip.txt b/Documentation/devicetree/bindings/arm/rockchip.txt
index 6809e4e51ed2..60d4a1e0a9b5 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.txt
+++ b/Documentation/devicetree/bindings/arm/rockchip.txt
@@ -22,3 +22,7 @@ Rockchip platforms device tree bindings
- compatible = "firefly,firefly-rk3288", "rockchip,rk3288";
or
- compatible = "firefly,firefly-rk3288-beta", "rockchip,rk3288";
+
+- ChipSPARK PopMetal-RK3288 board:
+ Required root node properties:
+ - compatible = "chipspark,popmetal-rk3288", "rockchip,rk3288";
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
index 51147cb5c036..c4f19b2e7dd9 100644
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ b/Documentation/devicetree/bindings/arm/shmobile.txt
@@ -7,8 +7,6 @@ SoCs:
compatible = "renesas,emev2"
- RZ/A1H (R7S72100)
compatible = "renesas,r7s72100"
- - SH-Mobile AP4 (R8A73720/SH7372)
- compatible = "renesas,sh7372"
- SH-Mobile AG5 (R8A73A00/SH73A0)
compatible = "renesas,sh73a0"
- R-Mobile APE6 (R8A73A40)
@@ -37,8 +35,6 @@ Boards:
compatible = "renesas,alt", "renesas,r8a7794"
- APE6-EVM
compatible = "renesas,ape6evm", "renesas,r8a73a4"
- - APE6-EVM - Reference Device Tree Implementation
- compatible = "renesas,ape6evm-reference", "renesas,r8a73a4"
- Atmark Techno Armadillo-800 EVA
compatible = "renesas,armadillo800eva"
- BOCK-W
@@ -57,12 +53,8 @@ Boards:
compatible = "renesas,kzm9d", "renesas,emev2"
- Kyoto Microcomputer Co. KZM-A9-GT
compatible = "renesas,kzm9g", "renesas,sh73a0"
- - Kyoto Microcomputer Co. KZM-A9-GT - Reference Device Tree Implementation
- compatible = "renesas,kzm9g-reference", "renesas,sh73a0"
- Lager (RTP0RC7790SEB00010S)
compatible = "renesas,lager", "renesas,r8a7790"
- - Mackerel (R0P7372LC0016RL, AP4 EVM 2nd)
- compatible = "renesas,mackerel"
- Marzen
compatible = "renesas,marzen", "renesas,r8a7779"
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
index 067c9790062f..9a4295b54539 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
@@ -5,9 +5,12 @@ Required properties:
Tegra30, must contain "nvidia,tegra30-ahb". Otherwise, must contain
'"nvidia,<chip>-ahb", "nvidia,tegra30-ahb"' where <chip> is tegra124,
tegra132, or tegra210.
-- reg : Should contain 1 register ranges(address and length)
+- reg : Should contain 1 register ranges(address and length). For
+ Tegra20, Tegra30, and Tegra114 chips, the value must be <0x6000c004
+ 0x10c>. For Tegra124, Tegra132 and Tegra210 chips, the value should
+ be be <0x6000c000 0x150>.
-Example:
+Example (for a Tegra20 chip):
ahb: ahb@6000c004 {
compatible = "nvidia,tegra20-ahb";
reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-actmon.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-actmon.txt
new file mode 100644
index 000000000000..ea670a5d7ee3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-actmon.txt
@@ -0,0 +1,32 @@
+NVIDIA Tegra Activity Monitor
+
+The activity monitor block collects statistics about the behaviour of other
+components in the system. This information can be used to derive the rate at
+which the external memory needs to be clocked in order to serve all requests
+from the monitored clients.
+
+Required properties:
+- compatible: should be "nvidia,tegra<chip>-actmon"
+- reg: offset and length of the register set for the device
+- interrupts: standard interrupt property
+- clocks: Must contain a phandle and clock specifier pair for each entry in
+clock-names. See ../../clock/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - actmon
+ - emc
+- resets: Must contain an entry for each entry in reset-names. See
+../../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+ - actmon
+
+Example:
+ actmon@6000c800 {
+ compatible = "nvidia,tegra124-actmon";
+ reg = <0x0 0x6000c800 0x0 0x400>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA124_CLK_ACTMON>,
+ <&tegra_car TEGRA124_CLK_EMC>;
+ clock-names = "actmon", "emc";
+ resets = <&tegra_car 119>;
+ reset-names = "actmon";
+ };
diff --git a/Documentation/devicetree/bindings/bus/bcma.txt b/Documentation/devicetree/bindings/bus/brcm,bus-axi.txt
index edd44d802139..edd44d802139 100644
--- a/Documentation/devicetree/bindings/bus/bcma.txt
+++ b/Documentation/devicetree/bindings/bus/brcm,bus-axi.txt
diff --git a/Documentation/devicetree/bindings/bus/omap-ocp2scp.txt b/Documentation/devicetree/bindings/bus/omap-ocp2scp.txt
index 63dd8051521c..18729f6fe1e5 100644
--- a/Documentation/devicetree/bindings/bus/omap-ocp2scp.txt
+++ b/Documentation/devicetree/bindings/bus/omap-ocp2scp.txt
@@ -1,7 +1,8 @@
* OMAP OCP2SCP - ocp interface to scp interface
properties:
-- compatible : Should be "ti,omap-ocp2scp"
+- compatible : Should be "ti,am437x-ocp2scp" for AM437x processor
+ Should be "ti,omap-ocp2scp" for all others
- reg : Address and length of the register set for the device
- #address-cells, #size-cells : Must be present if the device has sub-nodes
- ranges : the child address space are mapped 1:1 onto the parent address space
diff --git a/Documentation/devicetree/bindings/bus/renesas,bsc.txt b/Documentation/devicetree/bindings/bus/renesas,bsc.txt
new file mode 100644
index 000000000000..90e947269437
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/renesas,bsc.txt
@@ -0,0 +1,46 @@
+Renesas Bus State Controller (BSC)
+==================================
+
+The Renesas Bus State Controller (BSC, sometimes called "LBSC within Bus
+Bridge", or "External Bus Interface") can be found in several Renesas ARM SoCs.
+It provides an external bus for connecting multiple external devices to the
+SoC, driving several chip select lines, for e.g. NOR FLASH, Ethernet and USB.
+
+While the BSC is a fairly simple memory-mapped bus, it may be part of a PM
+domain, and may have a gateable functional clock.
+Before a device connected to the BSC can be accessed, the PM domain
+containing the BSC must be powered on, and the functional clock
+driving the BSC must be enabled.
+
+The bindings for the BSC extend the bindings for "simple-pm-bus".
+
+
+Required properties
+ - compatible: Must contain an SoC-specific value, and "renesas,bsc" and
+ "simple-pm-bus" as fallbacks.
+ SoC-specific values can be:
+ "renesas,bsc-r8a73a4" for R-Mobile APE6 (r8a73a4)
+ "renesas,bsc-sh73a0" for SH-Mobile AG5 (sh73a0)
+ - #address-cells, #size-cells, ranges: Must describe the mapping between
+ parent address and child address spaces.
+ - reg: Must contain the base address and length to access the bus controller.
+
+Optional properties:
+ - interrupts: Must contain a reference to the BSC interrupt, if available.
+ - clocks: Must contain a reference to the functional clock, if available.
+ - power-domains: Must contain a reference to the PM domain, if available.
+
+
+Example:
+
+ bsc: bus@fec10000 {
+ compatible = "renesas,bsc-sh73a0", "renesas,bsc",
+ "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x20000000>;
+ reg = <0xfec10000 0x400>;
+ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&zb_clk>;
+ power-domains = <&pd_a4s>;
+ };
diff --git a/Documentation/devicetree/bindings/bus/simple-pm-bus.txt b/Documentation/devicetree/bindings/bus/simple-pm-bus.txt
new file mode 100644
index 000000000000..d032237512c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/simple-pm-bus.txt
@@ -0,0 +1,44 @@
+Simple Power-Managed Bus
+========================
+
+A Simple Power-Managed Bus is a transparent bus that doesn't need a real
+driver, as it's typically initialized by the boot loader.
+
+However, its bus controller is part of a PM domain, or under the control of a
+functional clock. Hence, the bus controller's PM domain and/or clock must be
+enabled for child devices connected to the bus (either on-SoC or externally)
+to function.
+
+While "simple-pm-bus" follows the "simple-bus" set of properties, as specified
+in ePAPR, it is not an extension of "simple-bus".
+
+
+Required properties:
+ - compatible: Must contain at least "simple-pm-bus".
+ Must not contain "simple-bus".
+ It's recommended to let this be preceded by one or more
+ vendor-specific compatible values.
+ - #address-cells, #size-cells, ranges: Must describe the mapping between
+ parent address and child address spaces.
+
+Optional platform-specific properties for clock or PM domain control (at least
+one of them is required):
+ - clocks: Must contain a reference to the functional clock(s),
+ - power-domains: Must contain a reference to the PM domain.
+Please refer to the binding documentation for the clock and/or PM domain
+providers for more details.
+
+
+Example:
+
+ bsc: bus@fec10000 {
+ compatible = "renesas,bsc-sh73a0", "renesas,bsc",
+ "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x20000000>;
+ reg = <0xfec10000 0x400>;
+ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&zb_clk>;
+ power-domains = <&pd_a4s>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/bcm-kona-clock.txt b/Documentation/devicetree/bindings/clock/brcm,kona-ccu.txt
index 5286e260fcae..5286e260fcae 100644
--- a/Documentation/devicetree/bindings/clock/bcm-kona-clock.txt
+++ b/Documentation/devicetree/bindings/clock/brcm,kona-ccu.txt
diff --git a/Documentation/devicetree/bindings/clock/exynos3250-clock.txt b/Documentation/devicetree/bindings/clock/exynos3250-clock.txt
index f57d9dd9ea85..f1738b88c225 100644
--- a/Documentation/devicetree/bindings/clock/exynos3250-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos3250-clock.txt
@@ -9,6 +9,8 @@ Required Properties:
- "samsung,exynos3250-cmu" - controller compatible with Exynos3250 SoC.
- "samsung,exynos3250-cmu-dmc" - controller compatible with
Exynos3250 SoC for Dynamic Memory Controller domain.
+ - "samsung,exynos3250-cmu-isp" - ISP block clock controller compatible
+ with Exynos3250 SOC
- reg: physical base address of the controller and length of memory mapped
region.
@@ -36,6 +38,12 @@ Example 1: Examples of clock controller nodes are listed below.
#clock-cells = <1>;
};
+ cmu_isp: clock-controller@10048000 {
+ compatible = "samsung,exynos3250-cmu-isp";
+ reg = <0x10048000 0x1000>;
+ #clock-cells = <1>;
+ };
+
Example 2: UART controller node that consumes the clock generated by the clock
controller. Refer to the standard clock bindings for information
about 'clocks' and 'clock-names' property.
diff --git a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
new file mode 100644
index 000000000000..63379b04e052
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
@@ -0,0 +1,462 @@
+* Samsung Exynos5433 CMU (Clock Management Units)
+
+The Exynos5433 clock controller generates and supplies clock to various
+controllers within the Exynos5433 SoC.
+
+Required Properties:
+
+- compatible: should be one of the following.
+ - "samsung,exynos5433-cmu-top" - clock controller compatible for CMU_TOP
+ which generates clocks for IMEM/FSYS/G3D/GSCL/HEVC/MSCL/G2D/MFC/PERIC/PERIS
+ domains and bus clocks.
+ - "samsung,exynos5433-cmu-cpif" - clock controller compatible for CMU_CPIF
+ which generates clocks for LLI (Low Latency Interface) IP.
+ - "samsung,exynos5433-cmu-mif" - clock controller compatible for CMU_MIF
+ which generates clocks for DRAM Memory Controller domain.
+ - "samsung,exynos5433-cmu-peric" - clock controller compatible for CMU_PERIC
+ which generates clocks for UART/I2C/SPI/I2S/PCM/SPDIF/PWM/SLIMBUS IPs.
+ - "samsung,exynos5433-cmu-peris" - clock controller compatible for CMU_PERIS
+ which generates clocks for PMU/TMU/MCT/WDT/RTC/SECKEY/TZPC IPs.
+ - "samsung,exynos5433-cmu-fsys" - clock controller compatible for CMU_FSYS
+ which generates clocks for USB/UFS/SDMMC/TSI/PDMA IPs.
+ - "samsung,exynos5433-cmu-g2d" - clock controller compatible for CMU_G2D
+ which generates clocks for G2D/MDMA IPs.
+ - "samsung,exynos5433-cmu-disp" - clock controller compatible for CMU_DISP
+ which generates clocks for Display (DECON/HDMI/DSIM/MIXER) IPs.
+ - "samsung,exynos5433-cmu-aud" - clock controller compatible for CMU_AUD
+ which generates clocks for Cortex-A5/BUS/AUDIO clocks.
+ - "samsung,exynos5433-cmu-bus0", "samsung,exynos5433-cmu-bus1"
+ and "samsung,exynos5433-cmu-bus2" - clock controller compatible for CMU_BUS
+ which generates global data buses clock and global peripheral buses clock.
+ - "samsung,exynos5433-cmu-g3d" - clock controller compatible for CMU_G3D
+ which generates clocks for 3D Graphics Engine IP.
+ - "samsung,exynos5433-cmu-gscl" - clock controller compatible for CMU_GSCL
+ which generates clocks for GSCALER IPs.
+ - "samsung,exynos5433-cmu-apollo"- clock controller compatible for CMU_APOLLO
+ which generates clocks for Cortex-A53 Quad-core processor.
+ - "samsung,exynos5433-cmu-atlas" - clock controller compatible for CMU_ATLAS
+ which generates clocks for Cortex-A57 Quad-core processor, CoreSight and
+ L2 cache controller.
+ - "samsung,exynos5433-cmu-mscl" - clock controller compatible for CMU_MSCL
+ which generates clocks for M2M (Memory to Memory) scaler and JPEG IPs.
+ - "samsung,exynos5433-cmu-mfc" - clock controller compatible for CMU_MFC
+ which generates clocks for MFC(Multi-Format Codec) IP.
+ - "samsung,exynos5433-cmu-hevc" - clock controller compatible for CMU_HEVC
+ which generates clocks for HEVC(High Efficiency Video Codec) decoder IP.
+ - "samsung,exynos5433-cmu-isp" - clock controller compatible for CMU_ISP
+ which generates clocks for FIMC-ISP/DRC/SCLC/DIS/3DNR IPs.
+ - "samsung,exynos5433-cmu-cam0" - clock controller compatible for CMU_CAM0
+ which generates clocks for MIPI_CSIS{0|1}/FIMC_LITE_{A|B|D}/FIMC_3AA{0|1}
+ IPs.
+ - "samsung,exynos5433-cmu-cam1" - clock controller compatible for CMU_CAM1
+ which generates clocks for Cortex-A5/MIPI_CSIS2/FIMC-LITE_C/FIMC-FD IPs.
+
+- reg: physical base address of the controller and length of memory mapped
+ region.
+
+- #clock-cells: should be 1.
+
+- clocks: list of the clock controller input clock identifiers,
+ from common clock bindings. Please refer the next section
+ to find the input clocks for a given controller.
+
+- clock-names: list of the clock controller input clock names,
+ as described in clock-bindings.txt.
+
+ Input clocks for top clock controller:
+ - oscclk
+ - sclk_mphy_pll
+ - sclk_mfc_pll
+ - sclk_bus_pll
+
+ Input clocks for cpif clock controller:
+ - oscclk
+
+ Input clocks for mif clock controller:
+ - oscclk
+ - sclk_mphy_pll
+
+ Input clocks for fsys clock controller:
+ - oscclk
+ - sclk_ufs_mphy
+ - div_aclk_fsys_200
+ - sclk_pcie_100_fsys
+ - sclk_ufsunipro_fsys
+ - sclk_mmc2_fsys
+ - sclk_mmc1_fsys
+ - sclk_mmc0_fsys
+ - sclk_usbhost30_fsys
+ - sclk_usbdrd30_fsys
+
+ Input clocks for g2d clock controller:
+ - oscclk
+ - aclk_g2d_266
+ - aclk_g2d_400
+
+ Input clocks for disp clock controller:
+ - oscclk
+ - sclk_dsim1_disp
+ - sclk_dsim0_disp
+ - sclk_dsd_disp
+ - sclk_decon_tv_eclk_disp
+ - sclk_decon_vclk_disp
+ - sclk_decon_eclk_disp
+ - sclk_decon_tv_vclk_disp
+ - aclk_disp_333
+
+ Input clocks for bus0 clock controller:
+ - aclk_bus0_400
+
+ Input clocks for bus1 clock controller:
+ - aclk_bus1_400
+
+ Input clocks for bus2 clock controller:
+ - oscclk
+ - aclk_bus2_400
+
+ Input clocks for g3d clock controller:
+ - oscclk
+ - aclk_g3d_400
+
+ Input clocks for gscl clock controller:
+ - oscclk
+ - aclk_gscl_111
+ - aclk_gscl_333
+
+ Input clocks for apollo clock controller:
+ - oscclk
+ - sclk_bus_pll_apollo
+
+ Input clocks for atlas clock controller:
+ - oscclk
+ - sclk_bus_pll_atlas
+
+ Input clocks for mscl clock controller:
+ - oscclk
+ - sclk_jpeg_mscl
+ - aclk_mscl_400
+
+ Input clocks for mfc clock controller:
+ - oscclk
+ - aclk_mfc_400
+
+ Input clocks for hevc clock controller:
+ - oscclk
+ - aclk_hevc_400
+
+ Input clocks for isp clock controller:
+ - oscclk
+ - aclk_isp_dis_400
+ - aclk_isp_400
+
+ Input clocks for cam0 clock controller:
+ - oscclk
+ - aclk_cam0_333
+ - aclk_cam0_400
+ - aclk_cam0_552
+
+ Input clocks for cam1 clock controller:
+ - oscclk
+ - sclk_isp_uart_cam1
+ - sclk_isp_spi1_cam1
+ - sclk_isp_spi0_cam1
+ - aclk_cam1_333
+ - aclk_cam1_400
+ - aclk_cam1_552
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume.
+
+All available clocks are defined as preprocessor macros in
+dt-bindings/clock/exynos5433.h header and can be used in device
+tree sources.
+
+Example 1: Examples of 'oscclk' source clock node are listed below.
+
+ xxti: xxti {
+ compatible = "fixed-clock";
+ clock-output-names = "oscclk";
+ #clock-cells = <0>;
+ };
+
+Example 2: Examples of clock controller nodes are listed below.
+
+ cmu_top: clock-controller@10030000 {
+ compatible = "samsung,exynos5433-cmu-top";
+ reg = <0x10030000 0x0c04>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "sclk_mphy_pll",
+ "sclk_mfc_pll",
+ "sclk_bus_pll";
+ clocks = <&xxti>,
+ <&cmu_cpif CLK_SCLK_MPHY_PLL>,
+ <&cmu_mif CLK_SCLK_MFC_PLL>,
+ <&cmu_mif CLK_SCLK_BUS_PLL>;
+ };
+
+ cmu_cpif: clock-controller@10fc0000 {
+ compatible = "samsung,exynos5433-cmu-cpif";
+ reg = <0x10fc0000 0x0c04>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk";
+ clocks = <&xxti>;
+ };
+
+ cmu_mif: clock-controller@105b0000 {
+ compatible = "samsung,exynos5433-cmu-mif";
+ reg = <0x105b0000 0x100c>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "sclk_mphy_pll";
+ clocks = <&xxti>,
+ <&cmu_cpif CLK_SCLK_MPHY_PLL>;
+ };
+
+ cmu_peric: clock-controller@14c80000 {
+ compatible = "samsung,exynos5433-cmu-peric";
+ reg = <0x14c80000 0x0b08>;
+ #clock-cells = <1>;
+ };
+
+ cmu_peris: clock-controller@10040000 {
+ compatible = "samsung,exynos5433-cmu-peris";
+ reg = <0x10040000 0x0b20>;
+ #clock-cells = <1>;
+ };
+
+ cmu_fsys: clock-controller@156e0000 {
+ compatible = "samsung,exynos5433-cmu-fsys";
+ reg = <0x156e0000 0x0b04>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "sclk_ufs_mphy",
+ "div_aclk_fsys_200",
+ "sclk_pcie_100_fsys",
+ "sclk_ufsunipro_fsys",
+ "sclk_mmc2_fsys",
+ "sclk_mmc1_fsys",
+ "sclk_mmc0_fsys",
+ "sclk_usbhost30_fsys",
+ "sclk_usbdrd30_fsys";
+ clocks = <&xxti>,
+ <&cmu_cpif CLK_SCLK_UFS_MPHY>,
+ <&cmu_top CLK_DIV_ACLK_FSYS_200>,
+ <&cmu_top CLK_SCLK_PCIE_100_FSYS>,
+ <&cmu_top CLK_SCLK_UFSUNIPRO_FSYS>,
+ <&cmu_top CLK_SCLK_MMC2_FSYS>,
+ <&cmu_top CLK_SCLK_MMC1_FSYS>,
+ <&cmu_top CLK_SCLK_MMC0_FSYS>,
+ <&cmu_top CLK_SCLK_USBHOST30_FSYS>,
+ <&cmu_top CLK_SCLK_USBDRD30_FSYS>;
+ };
+
+ cmu_g2d: clock-controller@12460000 {
+ compatible = "samsung,exynos5433-cmu-g2d";
+ reg = <0x12460000 0x0b08>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "aclk_g2d_266",
+ "aclk_g2d_400";
+ clocks = <&xxti>,
+ <&cmu_top CLK_ACLK_G2D_266>,
+ <&cmu_top CLK_ACLK_G2D_400>;
+ };
+
+ cmu_disp: clock-controller@13b90000 {
+ compatible = "samsung,exynos5433-cmu-disp";
+ reg = <0x13b90000 0x0c04>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "sclk_dsim1_disp",
+ "sclk_dsim0_disp",
+ "sclk_dsd_disp",
+ "sclk_decon_tv_eclk_disp",
+ "sclk_decon_vclk_disp",
+ "sclk_decon_eclk_disp",
+ "sclk_decon_tv_vclk_disp",
+ "aclk_disp_333";
+ clocks = <&xxti>,
+ <&cmu_mif CLK_SCLK_DSIM1_DISP>,
+ <&cmu_mif CLK_SCLK_DSIM0_DISP>,
+ <&cmu_mif CLK_SCLK_DSD_DISP>,
+ <&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>,
+ <&cmu_mif CLK_SCLK_DECON_VCLK_DISP>,
+ <&cmu_mif CLK_SCLK_DECON_ECLK_DISP>,
+ <&cmu_mif CLK_SCLK_DECON_TV_VCLK_DISP>,
+ <&cmu_mif CLK_ACLK_DISP_333>;
+ };
+
+ cmu_aud: clock-controller@114c0000 {
+ compatible = "samsung,exynos5433-cmu-aud";
+ reg = <0x114c0000 0x0b04>;
+ #clock-cells = <1>;
+ };
+
+ cmu_bus0: clock-controller@13600000 {
+ compatible = "samsung,exynos5433-cmu-bus0";
+ reg = <0x13600000 0x0b04>;
+ #clock-cells = <1>;
+
+ clock-names = "aclk_bus0_400";
+ clocks = <&cmu_top CLK_ACLK_BUS0_400>;
+ };
+
+ cmu_bus1: clock-controller@14800000 {
+ compatible = "samsung,exynos5433-cmu-bus1";
+ reg = <0x14800000 0x0b04>;
+ #clock-cells = <1>;
+
+ clock-names = "aclk_bus1_400";
+ clocks = <&cmu_top CLK_ACLK_BUS1_400>;
+ };
+
+ cmu_bus2: clock-controller@13400000 {
+ compatible = "samsung,exynos5433-cmu-bus2";
+ reg = <0x13400000 0x0b04>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "aclk_bus2_400";
+ clocks = <&xxti>, <&cmu_mif CLK_ACLK_BUS2_400>;
+ };
+
+ cmu_g3d: clock-controller@14aa0000 {
+ compatible = "samsung,exynos5433-cmu-g3d";
+ reg = <0x14aa0000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "aclk_g3d_400";
+ clocks = <&xxti>, <&cmu_top CLK_ACLK_G3D_400>;
+ };
+
+ cmu_gscl: clock-controller@13cf0000 {
+ compatible = "samsung,exynos5433-cmu-gscl";
+ reg = <0x13cf0000 0x0b10>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "aclk_gscl_111",
+ "aclk_gscl_333";
+ clocks = <&xxti>,
+ <&cmu_top CLK_ACLK_GSCL_111>,
+ <&cmu_top CLK_ACLK_GSCL_333>;
+ };
+
+ cmu_apollo: clock-controller@11900000 {
+ compatible = "samsung,exynos5433-cmu-apollo";
+ reg = <0x11900000 0x1088>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "sclk_bus_pll_apollo";
+ clocks = <&xxti>, <&cmu_mif CLK_SCLK_BUS_PLL_APOLLO>;
+ };
+
+ cmu_atlas: clock-controller@11800000 {
+ compatible = "samsung,exynos5433-cmu-atlas";
+ reg = <0x11800000 0x1088>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "sclk_bus_pll_atlas";
+ clocks = <&xxti>, <&cmu_mif CLK_SCLK_BUS_PLL_ATLAS>;
+ };
+
+ cmu_mscl: clock-controller@105d0000 {
+ compatible = "samsung,exynos5433-cmu-mscl";
+ reg = <0x105d0000 0x0b10>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "sclk_jpeg_mscl",
+ "aclk_mscl_400";
+ clocks = <&xxti>,
+ <&cmu_top CLK_SCLK_JPEG_MSCL>,
+ <&cmu_top CLK_ACLK_MSCL_400>;
+ };
+
+ cmu_mfc: clock-controller@15280000 {
+ compatible = "samsung,exynos5433-cmu-mfc";
+ reg = <0x15280000 0x0b08>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "aclk_mfc_400";
+ clocks = <&xxti>, <&cmu_top CLK_ACLK_MFC_400>;
+ };
+
+ cmu_hevc: clock-controller@14f80000 {
+ compatible = "samsung,exynos5433-cmu-hevc";
+ reg = <0x14f80000 0x0b08>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk", "aclk_hevc_400";
+ clocks = <&xxti>, <&cmu_top CLK_ACLK_HEVC_400>;
+ };
+
+ cmu_isp: clock-controller@146d0000 {
+ compatible = "samsung,exynos5433-cmu-isp";
+ reg = <0x146d0000 0x0b0c>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "aclk_isp_dis_400",
+ "aclk_isp_400";
+ clocks = <&xxti>,
+ <&cmu_top CLK_ACLK_ISP_DIS_400>,
+ <&cmu_top CLK_ACLK_ISP_400>;
+ };
+
+ cmu_cam0: clock-controller@120d0000 {
+ compatible = "samsung,exynos5433-cmu-cam0";
+ reg = <0x120d0000 0x0b0c>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "aclk_cam0_333",
+ "aclk_cam0_400",
+ "aclk_cam0_552";
+ clocks = <&xxti>,
+ <&cmu_top CLK_ACLK_CAM0_333>,
+ <&cmu_top CLK_ACLK_CAM0_400>,
+ <&cmu_top CLK_ACLK_CAM0_552>;
+ };
+
+ cmu_cam1: clock-controller@145d0000 {
+ compatible = "samsung,exynos5433-cmu-cam1";
+ reg = <0x145d0000 0x0b08>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "sclk_isp_uart_cam1",
+ "sclk_isp_spi1_cam1",
+ "sclk_isp_spi0_cam1",
+ "aclk_cam1_333",
+ "aclk_cam1_400",
+ "aclk_cam1_552";
+ clocks = <&xxti>,
+ <&cmu_top CLK_SCLK_ISP_UART_CAM1>,
+ <&cmu_top CLK_SCLK_ISP_SPI1_CAM1>,
+ <&cmu_top CLK_SCLK_ISP_SPI0_CAM1>,
+ <&cmu_top CLK_ACLK_CAM1_333>,
+ <&cmu_top CLK_ACLK_CAM1_400>,
+ <&cmu_top CLK_ACLK_CAM1_552>;
+ };
+
+Example 3: UART controller node that consumes the clock generated by the clock
+ controller.
+
+ serial_0: serial@14C10000 {
+ compatible = "samsung,exynos5433-uart";
+ reg = <0x14C10000 0x100>;
+ interrupts = <0 421 0>;
+ clocks = <&cmu_peric CLK_PCLK_UART0>,
+ <&cmu_peric CLK_SCLK_UART0>;
+ clock-names = "uart", "clk_uart_baud0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_bus>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/clock/fujitsu,mb86s70-crg11.txt b/Documentation/devicetree/bindings/clock/fujitsu,mb86s70-crg11.txt
new file mode 100644
index 000000000000..332396265689
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fujitsu,mb86s70-crg11.txt
@@ -0,0 +1,26 @@
+Fujitsu CRG11 clock driver bindings
+-----------------------------------
+
+Required properties :
+- compatible : Shall contain "fujitsu,mb86s70-crg11"
+- #clock-cells : Shall be 3 {cntrlr domain port}
+
+The consumer specifies the desired clock pointing to its phandle.
+
+Example:
+
+ clock: crg11 {
+ compatible = "fujitsu,mb86s70-crg11";
+ #clock-cells = <3>;
+ };
+
+ mhu: mhu0@2b1f0000 {
+ #mbox-cells = <1>;
+ compatible = "arm,mhu";
+ reg = <0 0x2B1F0000 0x1000>;
+ interrupts = <0 36 4>, /* LP Non-Sec */
+ <0 35 4>, /* HP Non-Sec */
+ <0 37 4>; /* Secure */
+ clocks = <&clock 0 2 1>; /* Cntrlr:0 Domain:2 Port:1 */
+ clock-names = "clk";
+ };
diff --git a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
index dc5ea5b22da9..670c2af3e931 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
@@ -23,6 +23,14 @@ The following is a list of provided IDs and clock names on Armada 380/385:
2 = l2clk (L2 Cache clock)
3 = ddrclk (DDR clock)
+The following is a list of provided IDs and clock names on Armada 39x:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU clock)
+ 2 = nbclk (Coherent Fabric clock)
+ 3 = hclk (SDRAM Controller Internal Clock)
+ 4 = dclk (SDRAM Interface Clock)
+ 5 = refclk (Reference Clock)
+
The following is a list of provided IDs and clock names on Kirkwood and Dove:
0 = tclk (Internal Bus clock)
1 = cpuclk (CPU0 clock)
@@ -39,6 +47,7 @@ Required properties:
"marvell,armada-370-core-clock" - For Armada 370 SoC core clocks
"marvell,armada-375-core-clock" - For Armada 375 SoC core clocks
"marvell,armada-380-core-clock" - For Armada 380/385 SoC core clocks
+ "marvell,armada-390-core-clock" - For Armada 39x SoC core clocks
"marvell,armada-xp-core-clock" - For Armada XP SoC core clocks
"marvell,dove-core-clock" - for Dove SoC core clocks
"marvell,kirkwood-core-clock" - for Kirkwood SoC (except mv88f6180)
diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
index 76477be742b2..31c7c0c1ce8f 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
@@ -1,6 +1,6 @@
* Gated Clock bindings for Marvell EBU SoCs
-Marvell Armada 370/375/380/385/XP, Dove and Kirkwood allow some
+Marvell Armada 370/375/380/385/39x/XP, Dove and Kirkwood allow some
peripheral clocks to be gated to save some power. The clock consumer
should specify the desired clock by having the clock ID in its
"clocks" phandle cell. The clock ID is directly mapped to the
@@ -77,6 +77,18 @@ ID Clock Peripheral
28 xor1 XOR 1
30 sata1 SATA 1
+The following is a list of provided IDs for Armada 39x:
+ID Clock Peripheral
+-----------------------------------
+5 pex1 PCIe 1
+6 pex2 PCIe 2
+7 pex3 PCIe 3
+8 pex0 PCIe 0
+9 usb3h0 USB3 Host 0
+17 sdio SDIO
+22 xor0 XOR 0
+28 xor1 XOR 1
+
The following is a list of provided IDs for Armada XP:
ID Clock Peripheral
-----------------------------------
@@ -152,6 +164,7 @@ Required properties:
"marvell,armada-370-gating-clock" - for Armada 370 SoC clock gating
"marvell,armada-375-gating-clock" - for Armada 375 SoC clock gating
"marvell,armada-380-gating-clock" - for Armada 380/385 SoC clock gating
+ "marvell,armada-390-gating-clock" - for Armada 39x SoC clock gating
"marvell,armada-xp-gating-clock" - for Armada XP SoC clock gating
"marvell,dove-gating-clock" - for Dove SoC clock gating
"marvell,kirkwood-gating-clock" - for Kirkwood SoC clock gating
diff --git a/Documentation/devicetree/bindings/clock/pistachio-clock.txt b/Documentation/devicetree/bindings/clock/pistachio-clock.txt
new file mode 100644
index 000000000000..868db499eed2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/pistachio-clock.txt
@@ -0,0 +1,123 @@
+Imagination Technologies Pistachio SoC clock controllers
+========================================================
+
+Pistachio has four clock controllers (core clock, peripheral clock, peripheral
+general control, and top general control) which are instantiated individually
+from the device-tree.
+
+External clocks:
+----------------
+
+There are three external inputs to the clock controllers which should be
+defined with the following clock-output-names:
+- "xtal": External 52Mhz oscillator (required)
+- "audio_clk_in": Alternate audio reference clock (optional)
+- "enet_clk_in": Alternate ethernet PHY clock (optional)
+
+Core clock controller:
+----------------------
+
+The core clock controller generates clocks for the CPU, RPU (WiFi + BT
+co-processor), audio, and several peripherals.
+
+Required properties:
+- compatible: Must be "img,pistachio-clk".
+- reg: Must contain the base address and length of the core clock controller.
+- #clock-cells: Must be 1. The single cell is the clock identifier.
+ See dt-bindings/clock/pistachio-clk.h for the list of valid identifiers.
+- clocks: Must contain an entry for each clock in clock-names.
+- clock-names: Must include "xtal" (see "External clocks") and
+ "audio_clk_in_gate", "enet_clk_in_gate" which are generated by the
+ top-level general control.
+
+Example:
+ clk_core: clock-controller@18144000 {
+ compatible = "img,pistachio-clk";
+ reg = <0x18144000 0x800>;
+ clocks = <&xtal>, <&cr_top EXT_CLK_AUDIO_IN>,
+ <&cr_top EXT_CLK_ENET_IN>;
+ clock-names = "xtal", "audio_clk_in_gate", "enet_clk_in_gate";
+
+ #clock-cells = <1>;
+ };
+
+Peripheral clock controller:
+----------------------------
+
+The peripheral clock controller generates clocks for the DDR, ROM, and other
+peripherals. The peripheral system clock ("periph_sys") generated by the core
+clock controller is the input clock to the peripheral clock controller.
+
+Required properties:
+- compatible: Must be "img,pistachio-periph-clk".
+- reg: Must contain the base address and length of the peripheral clock
+ controller.
+- #clock-cells: Must be 1. The single cell is the clock identifier.
+ See dt-bindings/clock/pistachio-clk.h for the list of valid identifiers.
+- clocks: Must contain an entry for each clock in clock-names.
+- clock-names: Must include "periph_sys", the peripheral system clock generated
+ by the core clock controller.
+
+Example:
+ clk_periph: clock-controller@18144800 {
+ compatible = "img,pistachio-clk-periph";
+ reg = <0x18144800 0x800>;
+ clocks = <&clk_core CLK_PERIPH_SYS>;
+ clock-names = "periph_sys";
+
+ #clock-cells = <1>;
+ };
+
+Peripheral general control:
+---------------------------
+
+The peripheral general control block generates system interface clocks and
+resets for various peripherals. It also contains miscellaneous peripheral
+control registers. The system clock ("sys") generated by the peripheral clock
+controller is the input clock to the system clock controller.
+
+Required properties:
+- compatible: Must include "img,pistachio-periph-cr" and "syscon".
+- reg: Must contain the base address and length of the peripheral general
+ control registers.
+- #clock-cells: Must be 1. The single cell is the clock identifier.
+ See dt-bindings/clock/pistachio-clk.h for the list of valid identifiers.
+- clocks: Must contain an entry for each clock in clock-names.
+- clock-names: Must include "sys", the system clock generated by the peripheral
+ clock controller.
+
+Example:
+ cr_periph: syscon@18144800 {
+ compatible = "img,pistachio-cr-periph", "syscon";
+ reg = <0x18148000 0x1000>;
+ clocks = <&clock_periph PERIPH_CLK_PERIPH_SYS>;
+ clock-names = "sys";
+
+ #clock-cells = <1>;
+ };
+
+Top-level general control:
+--------------------------
+
+The top-level general control block contains miscellaneous control registers and
+gates for the external clocks "audio_clk_in" and "enet_clk_in".
+
+Required properties:
+- compatible: Must include "img,pistachio-cr-top" and "syscon".
+- reg: Must contain the base address and length of the top-level
+ control registers.
+- clocks: Must contain an entry for each clock in clock-names.
+- clock-names: Two optional clocks, "audio_clk_in" and "enet_clk_in" (see
+ "External clocks").
+- #clock-cells: Must be 1. The single cell is the clock identifier.
+ See dt-bindings/clock/pistachio-clk.h for the list of valid identifiers.
+
+Example:
+ cr_top: syscon@18144800 {
+ compatible = "img,pistachio-cr-top", "syscon";
+ reg = <0x18149000 0x200>;
+ clocks = <&audio_refclk>, <&ext_enet_in>;
+ clock-names = "audio_clk_in", "enet_clk_in";
+
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/pwm-clock.txt b/Documentation/devicetree/bindings/clock/pwm-clock.txt
new file mode 100644
index 000000000000..83db876b3b90
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/pwm-clock.txt
@@ -0,0 +1,26 @@
+Binding for an external clock signal driven by a PWM pin.
+
+This binding uses the common clock binding[1] and the common PWM binding[2].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Documentation/devicetree/bindings/pwm/pwm.txt
+
+Required properties:
+- compatible : shall be "pwm-clock".
+- #clock-cells : from common clock binding; shall be set to 0.
+- pwms : from common PWM binding; this determines the clock frequency
+ via the period given in the PWM specifier.
+
+Optional properties:
+- clock-output-names : From common clock binding.
+- clock-frequency : Exact output frequency, in case the PWM period
+ is not exact but was rounded to nanoseconds.
+
+Example:
+ clock {
+ compatible = "pwm-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "mipi_mclk";
+ pwms = <&pwm2 0 40>; /* 1 / 40 ns = 25 MHz */
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index aba3d254e037..54c23f34f194 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -8,6 +8,7 @@ Required properties :
"qcom,gcc-apq8084"
"qcom,gcc-ipq8064"
"qcom,gcc-msm8660"
+ "qcom,gcc-msm8916"
"qcom,gcc-msm8960"
"qcom,gcc-msm8974"
"qcom,gcc-msm8974pro"
diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt
new file mode 100644
index 000000000000..2f3747fdcf1c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt
@@ -0,0 +1,25 @@
+* Renesas R8A7778 Clock Pulse Generator (CPG)
+
+The CPG generates core clocks for the R8A7778. It includes two PLLs and
+several fixed ratio dividers
+
+Required Properties:
+
+ - compatible: Must be "renesas,r8a7778-cpg-clocks"
+ - reg: Base address and length of the memory resource used by the CPG
+ - #clock-cells: Must be 1
+ - clock-output-names: The names of the clocks. Supported clocks are
+ "plla", "pllb", "b", "out", "p", "s", and "s1".
+
+
+Example
+-------
+
+ cpg_clocks: cpg_clocks@ffc80000 {
+ compatible = "renesas,r8a7778-cpg-clocks";
+ reg = <0xffc80000 0x80>;
+ #clock-cells = <1>;
+ clocks = <&extal_clk>;
+ clock-output-names = "plla", "pllb", "b",
+ "out", "p", "s", "s1";
+ };
diff --git a/Documentation/devicetree/bindings/clock/silabs,si5351.txt b/Documentation/devicetree/bindings/clock/silabs,si5351.txt
index c40711e8e8f7..28b28309f535 100644
--- a/Documentation/devicetree/bindings/clock/silabs,si5351.txt
+++ b/Documentation/devicetree/bindings/clock/silabs,si5351.txt
@@ -17,7 +17,8 @@ Required properties:
- #clock-cells: from common clock binding; shall be set to 1.
- clocks: from common clock binding; list of parent clock
handles, shall be xtal reference clock or xtal and clkin for
- si5351c only.
+ si5351c only. Corresponding clock input names are "xtal" and
+ "clkin" respectively.
- #address-cells: shall be set to 1.
- #size-cells: shall be set to 0.
@@ -71,6 +72,7 @@ i2c-master-node {
/* connect xtal input to 25MHz reference */
clocks = <&ref25>;
+ clock-names = "xtal";
/* connect xtal input as source of pll0 and pll1 */
silabs,pll-source = <0 0>, <1 0>;
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index 60b44285250d..4fa11af3d378 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -20,6 +20,7 @@ Required properties:
"allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23
"allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates
"allwinner,sun4i-a10-ahb-clk" - for the AHB clock
+ "allwinner,sun5i-a13-ahb-clk" - for the AHB clock on A13
"allwinner,sun9i-a80-ahb-clk" - for the AHB bus clocks on A80
"allwinner,sun4i-a10-ahb-gates-clk" - for the AHB gates on A10
"allwinner,sun5i-a13-ahb-gates-clk" - for the AHB gates on A13
@@ -66,6 +67,8 @@ Required properties:
"allwinner,sun4i-a10-usb-clk" - for usb gates + resets on A10 / A20
"allwinner,sun5i-a13-usb-clk" - for usb gates + resets on A13
"allwinner,sun6i-a31-usb-clk" - for usb gates + resets on A31
+ "allwinner,sun9i-a80-usb-mod-clk" - for usb gates + resets on A80
+ "allwinner,sun9i-a80-usb-phy-clk" - for usb phy gates + resets on A80
Required properties for all clocks:
- reg : shall be the control register address for the clock.
diff --git a/Documentation/devicetree/bindings/common-properties.txt b/Documentation/devicetree/bindings/common-properties.txt
new file mode 100644
index 000000000000..3193979b1d05
--- /dev/null
+++ b/Documentation/devicetree/bindings/common-properties.txt
@@ -0,0 +1,60 @@
+Common properties
+
+The ePAPR specification does not define any properties related to hardware
+byteswapping, but endianness issues show up frequently in porting Linux to
+different machine types. This document attempts to provide a consistent
+way of handling byteswapping across drivers.
+
+Optional properties:
+ - big-endian: Boolean; force big endian register accesses
+ unconditionally (e.g. ioread32be/iowrite32be). Use this if you
+ know the peripheral always needs to be accessed in BE mode.
+ - little-endian: Boolean; force little endian register accesses
+ unconditionally (e.g. readl/writel). Use this if you know the
+ peripheral always needs to be accessed in LE mode.
+ - native-endian: Boolean; always use register accesses matched to the
+ endianness of the kernel binary (e.g. LE vmlinux -> readl/writel,
+ BE vmlinux -> ioread32be/iowrite32be). In this case no byteswaps
+ will ever be performed. Use this if the hardware "self-adjusts"
+ register endianness based on the CPU's configured endianness.
+
+If a binding supports these properties, then the binding should also
+specify the default behavior if none of these properties are present.
+In such cases, little-endian is the preferred default, but it is not
+a requirement. The of_device_is_big_endian() and of_fdt_is_big_endian()
+helper functions do assume that little-endian is the default, because
+most existing (PCI-based) drivers implicitly default to LE by using
+readl/writel for MMIO accesses.
+
+Examples:
+Scenario 1 : CPU in LE mode & device in LE mode.
+dev: dev@40031000 {
+ compatible = "name";
+ reg = <0x40031000 0x1000>;
+ ...
+ native-endian;
+};
+
+Scenario 2 : CPU in LE mode & device in BE mode.
+dev: dev@40031000 {
+ compatible = "name";
+ reg = <0x40031000 0x1000>;
+ ...
+ big-endian;
+};
+
+Scenario 3 : CPU in BE mode & device in BE mode.
+dev: dev@40031000 {
+ compatible = "name";
+ reg = <0x40031000 0x1000>;
+ ...
+ native-endian;
+};
+
+Scenario 4 : CPU in BE mode & device in LE mode.
+dev: dev@40031000 {
+ compatible = "name";
+ reg = <0x40031000 0x1000>;
+ ...
+ little-endian;
+};
diff --git a/Documentation/devicetree/bindings/cris/axis.txt b/Documentation/devicetree/bindings/cris/axis.txt
new file mode 100644
index 000000000000..d209ca2a47c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/cris/axis.txt
@@ -0,0 +1,9 @@
+Axis Communications AB
+ARTPEC series SoC Device Tree Bindings
+
+
+CRISv32 based SoCs are ETRAX FS and ARTPEC-3:
+
+ - compatible = "axis,crisv32";
+
+
diff --git a/Documentation/devicetree/bindings/cris/boards.txt b/Documentation/devicetree/bindings/cris/boards.txt
new file mode 100644
index 000000000000..533dd273ccf7
--- /dev/null
+++ b/Documentation/devicetree/bindings/cris/boards.txt
@@ -0,0 +1,8 @@
+Boards based on the CRIS SoCs:
+
+Required root node properties:
+ - compatible = should be one or more of the following:
+ - "axis,dev88" - for Axis devboard 88 with ETRAX FS
+
+Optional:
+
diff --git a/Documentation/devicetree/bindings/cris/interrupts.txt b/Documentation/devicetree/bindings/cris/interrupts.txt
new file mode 100644
index 000000000000..e8b123b0a5e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/cris/interrupts.txt
@@ -0,0 +1,23 @@
+* CRISv32 Interrupt Controller
+
+Interrupt controller for the CRISv32 SoCs.
+
+Main node required properties:
+
+- compatible : should be:
+ "axis,crisv32-intc"
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The type shall be a <u32> and the value shall be 1.
+- reg: physical base address and size of the intc registers map.
+
+Example:
+
+ intc: interrupt-controller {
+ compatible = "axis,crisv32-intc";
+ reg = <0xb001c000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+
diff --git a/Documentation/devicetree/bindings/crypto/img-hash.txt b/Documentation/devicetree/bindings/crypto/img-hash.txt
new file mode 100644
index 000000000000..91a3d757d641
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/img-hash.txt
@@ -0,0 +1,27 @@
+Imagination Technologies hardware hash accelerator
+
+The hash accelerator provides hardware hashing acceleration for
+SHA1, SHA224, SHA256 and MD5 hashes
+
+Required properties:
+
+- compatible : "img,hash-accelerator"
+- reg : Offset and length of the register set for the module, and the DMA port
+- interrupts : The designated IRQ line for the hashing module.
+- dmas : DMA specifier as per Documentation/devicetree/bindings/dma/dma.txt
+- dma-names : Should be "tx"
+- clocks : Clock specifiers
+- clock-names : "sys" Used to clock the hash block registers
+ "hash" Used to clock data through the accelerator
+
+Example:
+
+ hash: hash@18149600 {
+ compatible = "img,hash-accelerator";
+ reg = <0x18149600 0x100>, <0x18101100 0x4>;
+ interrupts = <GIC_SHARED 59 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dma 8 0xffffffff 0>;
+ dma-names = "tx";
+ clocks = <&cr_periph SYS_CLK_HASH>, <&clk_periph PERIPH_CLK_ROM>;
+ clock-names = "sys", "hash";
+ };
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
new file mode 100644
index 000000000000..d3058768b23d
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
@@ -0,0 +1,47 @@
+Applied Micro X-Gene SoC DMA nodes
+
+DMA nodes are defined to describe on-chip DMA interfaces in
+APM X-Gene SoC.
+
+Required properties for DMA interfaces:
+- compatible: Should be "apm,xgene-dma".
+- device_type: set to "dma".
+- reg: Address and length of the register set for the device.
+ It contains the information of registers in the following order:
+ 1st - DMA control and status register address space.
+ 2nd - Descriptor ring control and status register address space.
+ 3rd - Descriptor ring command register address space.
+ 4th - Soc efuse register address space.
+- interrupts: DMA has 5 interrupts sources. 1st interrupt is
+ DMA error reporting interrupt. 2nd, 3rd, 4th and 5th interrupts
+ are completion interrupts for each DMA channels.
+- clocks: Reference to the clock entry.
+
+Optional properties:
+- dma-coherent : Present if dma operations are coherent
+
+Example:
+ dmaclk: dmaclk@1f27c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f27c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "dmaclk";
+ };
+
+ dma: dma@1f270000 {
+ compatible = "apm,xgene-storm-dma";
+ device_type = "dma";
+ reg = <0x0 0x1f270000 0x0 0x10000>,
+ <0x0 0x1f200000 0x0 0x10000>,
+ <0x0 0x1b008000 0x0 0x2000>,
+ <0x0 0x1054a000 0x0 0x100>;
+ interrupts = <0x0 0x82 0x4>,
+ <0x0 0xb8 0x4>,
+ <0x0 0xb9 0x4>,
+ <0x0 0xba 0x4>,
+ <0x0 0xbb 0x4>;
+ dma-coherent;
+ clocks = <&dmaclk 0>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt
index 1396078d15ac..1396078d15ac 100644
--- a/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
+++ b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
index a4873e5e3e36..e30e184f50c7 100644
--- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 {
80 81 68 69
70 71 72 73
74 75 76 77>;
- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
"saif0", "saif1", "i2c0", "i2c1",
"auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
"auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
diff --git a/Documentation/devicetree/bindings/dma/jz4780-dma.txt b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
new file mode 100644
index 000000000000..f25feee62b15
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
@@ -0,0 +1,56 @@
+* Ingenic JZ4780 DMA Controller
+
+Required properties:
+
+- compatible: Should be "ingenic,jz4780-dma"
+- reg: Should contain the DMA controller registers location and length.
+- interrupts: Should contain the interrupt specifier of the DMA controller.
+- interrupt-parent: Should be the phandle of the interrupt controller that
+- clocks: Should contain a clock specifier for the JZ4780 PDMA clock.
+- #dma-cells: Must be <2>. Number of integer cells in the dmas property of
+ DMA clients (see below).
+
+Optional properties:
+
+- ingenic,reserved-channels: Bitmask of channels to reserve for devices that
+ need a specific channel. These channels will only be assigned when explicitly
+ requested by a client. The primary use for this is channels 0 and 1, which
+ can be configured to have special behaviour for NAND/BCH when using
+ programmable firmware.
+
+Example:
+
+dma: dma@13420000 {
+ compatible = "ingenic,jz4780-dma";
+ reg = <0x13420000 0x10000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <10>;
+
+ clocks = <&cgu JZ4780_CLK_PDMA>;
+
+ #dma-cells = <2>;
+
+ ingenic,reserved-channels = <0x3>;
+};
+
+DMA clients must use the format described in dma.txt, giving a phandle to the
+DMA controller plus the following 2 integer cells:
+
+1. Request type: The DMA request type for transfers to/from the device on
+ the allocated channel, as defined in the SoC documentation.
+
+2. Channel: If set to 0xffffffff, any available channel will be allocated for
+ the client. Otherwise, the exact channel specified will be used. The channel
+ should be reserved on the DMA controller using the ingenic,reserved-channels
+ property.
+
+Example:
+
+uart0: serial@10030000 {
+ ...
+ dmas = <&dma 0x14 0xffffffff
+ &dma 0x15 0xffffffff>;
+ dma-names = "tx", "rx";
+ ...
+};
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
index f8c3311b7153..1c9d48ea4914 100644
--- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: must be one of the following:
* "qcom,bam-v1.4.0" for MSM8974, APQ8074 and APQ8084
* "qcom,bam-v1.3.0" for APQ8064, IPQ8064 and MSM8960
+ * "qcom,bam-v1.7.0" for MSM8916
- reg: Address range for DMA registers
- interrupts: Should contain the one interrupt shared by all channels
- #dma-cells: must be <1>, the cell in the dmas property of the client device
diff --git a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt
deleted file mode 100644
index 61bca509d7b9..000000000000
--- a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* R-Car Audio DMAC peri peri Device Tree bindings
-
-Required properties:
-- compatible: should be "renesas,rcar-audmapp"
-- #dma-cells: should be <1>, see "dmas" property below
-
-Example:
- audmapp: audio-dma-pp@0xec740000 {
- compatible = "renesas,rcar-audmapp";
- #dma-cells = <1>;
-
- reg = <0 0xec740000 0 0x200>;
- };
-
-
-* DMA client
-
-Required properties:
-- dmas: a list of <[DMA multiplexer phandle] [SRS << 8 | DRS]> pairs.
- where SRS/DRS are specified in the SoC manual.
- It will be written into PDMACHCR as high 16-bit parts.
-- dma-names: a list of DMA channel names, one per "dmas" entry
-
-Example:
-
- dmas = <&audmapp 0x2d00
- &audmapp 0x3700>;
- dma-names = "src0_ssiu0",
- "dvc0_ssiu0";
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
new file mode 100644
index 000000000000..040f365954cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
@@ -0,0 +1,37 @@
+* Renesas USB DMA Controller Device Tree bindings
+
+Required Properties:
+- compatible: must contain "renesas,usb-dmac"
+- reg: base address and length of the registers block for the DMAC
+- interrupts: interrupt specifiers for the DMAC, one for each entry in
+ interrupt-names.
+- interrupt-names: one entry per channel, named "ch%u", where %u is the
+ channel number ranging from zero to the number of channels minus one.
+- clocks: a list of phandle + clock-specifier pairs.
+- #dma-cells: must be <1>, the cell specifies the channel number of the DMAC
+ port connected to the DMA client.
+- dma-channels: number of DMA channels
+
+Example: R8A7790 (R-Car H2) USB-DMACs
+
+ usb_dmac0: dma-controller@e65a0000 {
+ compatible = "renesas,usb-dmac";
+ reg = <0 0xe65a0000 0 0x100>;
+ interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH
+ 0 109 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1";
+ clocks = <&mstp3_clks R8A7790_CLK_USBDMAC0>;
+ #dma-cells = <1>;
+ dma-channels = <2>;
+ };
+
+ usb_dmac1: dma-controller@e65b0000 {
+ compatible = "renesas,usb-dmac";
+ reg = <0 0xe65b0000 0 0x100>;
+ interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH
+ 0 110 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1";
+ clocks = <&mstp3_clks R8A7790_CLK_USBDMAC1>;
+ #dma-cells = <1>;
+ dma-channels = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/imx/ldb.txt b/Documentation/devicetree/bindings/drm/imx/ldb.txt
index 443bcb6134d5..9a21366436f6 100644
--- a/Documentation/devicetree/bindings/drm/imx/ldb.txt
+++ b/Documentation/devicetree/bindings/drm/imx/ldb.txt
@@ -44,23 +44,30 @@ Optional properties:
LVDS Channel
============
-Each LVDS Channel has to contain a display-timings node that describes the
-video timings for the connected LVDS display. For detailed information, also
-have a look at Documentation/devicetree/bindings/video/display-timing.txt.
+Each LVDS Channel has to contain either an of graph link to a panel device node
+or a display-timings node that describes the video timings for the connected
+LVDS display as well as the fsl,data-mapping and fsl,data-width properties.
Required properties:
- reg : should be <0> or <1>
+ - port: Input and output port nodes with endpoint definitions as defined in
+ Documentation/devicetree/bindings/graph.txt.
+ On i.MX5, the internal two-input-multiplexer is used. Due to hardware
+ limitations, only one input port (port@[0,1]) can be used for each channel
+ (lvds-channel@[0,1], respectively).
+ On i.MX6, there should be four input ports (port@[0-3]) that correspond
+ to the four LVDS multiplexer inputs.
+ A single output port (port@2 on i.MX5, port@4 on i.MX6) must be connected
+ to a panel input port. Optionally, the output port can be left out if
+ display-timings are used instead.
+
+Optional properties (required if display-timings are used):
+ - display-timings : A node that describes the display timings as defined in
+ Documentation/devicetree/bindings/video/display-timing.txt.
- fsl,data-mapping : should be "spwg" or "jeida"
This describes how the color bits are laid out in the
serialized LVDS signal.
- fsl,data-width : should be <18> or <24>
- - port: A port node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
- On i.MX5, the internal two-input-multiplexer is used.
- Due to hardware limitations, only one port (port@[0,1])
- can be used for each channel (lvds-channel@[0,1], respectively)
- On i.MX6, there should be four ports (port@[0-3]) that correspond
- to the four LVDS multiplexer inputs.
example:
@@ -73,23 +80,21 @@ ldb: ldb@53fa8008 {
#size-cells = <0>;
compatible = "fsl,imx53-ldb";
gpr = <&gpr>;
- clocks = <&clks 122>, <&clks 120>,
- <&clks 115>, <&clks 116>,
- <&clks 123>, <&clks 85>;
+ clocks = <&clks IMX5_CLK_LDB_DI0_SEL>,
+ <&clks IMX5_CLK_LDB_DI1_SEL>,
+ <&clks IMX5_CLK_IPU_DI0_SEL>,
+ <&clks IMX5_CLK_IPU_DI1_SEL>,
+ <&clks IMX5_CLK_LDB_DI0_GATE>,
+ <&clks IMX5_CLK_LDB_DI1_GATE>;
clock-names = "di0_pll", "di1_pll",
"di0_sel", "di1_sel",
"di0", "di1";
+ /* Using an of-graph endpoint link to connect the panel */
lvds-channel@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
- fsl,data-mapping = "spwg";
- fsl,data-width = <24>;
-
- display-timings {
- /* ... */
- };
port@0 {
reg = <0>;
@@ -98,8 +103,17 @@ ldb: ldb@53fa8008 {
remote-endpoint = <&ipu_di0_lvds0>;
};
};
+
+ port@2 {
+ reg = <2>;
+
+ lvds0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
};
+ /* Using display-timings and fsl,data-mapping/width instead */
lvds-channel@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -120,3 +134,13 @@ ldb: ldb@53fa8008 {
};
};
};
+
+panel: lvds-panel {
+ /* ... */
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
new file mode 100644
index 000000000000..af0b903de293
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
@@ -0,0 +1,18 @@
+USB GPIO Extcon device
+
+This is a virtual device used to generate USB cable states from the USB ID pin
+connected to a GPIO pin.
+
+Required properties:
+- compatible: Should be "linux,extcon-usb-gpio"
+- id-gpio: gpio for USB ID pin. See gpio binding.
+
+Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
+ extcon_usb1 {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&gpio6 1 GPIO_ACTIVE_HIGH>;
+ }
+
+ &omap_dwc3_1 {
+ extcon = <&extcon_usb1>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-bcm-kona.txt b/Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
index 4a63bc96b687..4a63bc96b687 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-bcm-kona.txt
+++ b/Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
diff --git a/Documentation/devicetree/bindings/gpio/gpio-altera.txt b/Documentation/devicetree/bindings/gpio/gpio-altera.txt
new file mode 100644
index 000000000000..12f50149e1ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-altera.txt
@@ -0,0 +1,43 @@
+Altera GPIO controller bindings
+
+Required properties:
+- compatible:
+ - "altr,pio-1.0"
+- reg: Physical base address and length of the controller's registers.
+- #gpio-cells : Should be 2
+ - The first cell is the gpio offset number.
+ - The second cell is reserved and is currently unused.
+- gpio-controller : Marks the device node as a GPIO controller.
+- interrupt-controller: Mark the device node as an interrupt controller
+- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware.
+ - The first cell is the GPIO offset number within the GPIO controller.
+- interrupts: Specify the interrupt.
+- altr,interrupt-trigger: Specifies the interrupt trigger type the GPIO
+ hardware is synthesized. This field is required if the Altera GPIO controller
+ used has IRQ enabled as the interrupt type is not software controlled,
+ but hardware synthesized. Required if GPIO is used as an interrupt
+ controller. The value is defined in <dt-bindings/interrupt-controller/irq.h>
+ Only the following flags are supported:
+ IRQ_TYPE_EDGE_RISING
+ IRQ_TYPE_EDGE_FALLING
+ IRQ_TYPE_EDGE_BOTH
+ IRQ_TYPE_LEVEL_HIGH
+
+Optional properties:
+- altr,ngpio: Width of the GPIO bank. This defines how many pins the
+ GPIO device has. Ranges between 1-32. Optional and defaults to 32 if not
+ specified.
+
+Example:
+
+gpio_altr: gpio@0xff200000 {
+ compatible = "altr,pio-1.0";
+ reg = <0xff200000 0x10>;
+ interrupts = <0 45 4>;
+ altr,ngpio = <32>;
+ altr,interrupt-trigger = <IRQ_TYPE_EDGE_RISING>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index f7a158d85862..5788d5cf1252 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -116,6 +116,29 @@ Every GPIO controller node must contain both an empty "gpio-controller"
property, and a #gpio-cells integer property, which indicates the number of
cells in a gpio-specifier.
+The GPIO chip may contain GPIO hog definitions. GPIO hogging is a mechanism
+providing automatic GPIO request and configuration as part of the
+gpio-controller's driver probe function.
+
+Each GPIO hog definition is represented as a child node of the GPIO controller.
+Required properties:
+- gpio-hog: A property specifying that this child node represent a GPIO hog.
+- gpios: Store the GPIO information (id, flags, ...). Shall contain the
+ number of cells specified in its parent node (GPIO controller
+ node).
+Only one of the following properties scanned in the order shown below.
+This means that when multiple properties are present they will be searched
+in the order presented below and the first match is taken as the intended
+configuration.
+- input: A property specifying to set the GPIO direction as input.
+- output-low A property specifying to set the GPIO direction as output with
+ the value low.
+- output-high A property specifying to set the GPIO direction as output with
+ the value high.
+
+Optional properties:
+- line-name: The GPIO label name. If not present the node name is used.
+
Example of two SOC GPIO banks defined as gpio-controller nodes:
qe_pio_a: gpio-controller@1400 {
@@ -123,6 +146,13 @@ Example of two SOC GPIO banks defined as gpio-controller nodes:
reg = <0x1400 0x18>;
gpio-controller;
#gpio-cells = <2>;
+
+ line_b {
+ gpio-hog;
+ gpios = <6 0>;
+ output-low;
+ line-name = "foo-bar-gpio";
+ };
};
qe_pio_e: gpio-controller@1460 {
diff --git a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
index 67a2e4e414a5..98d198396956 100644
--- a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
@@ -12,7 +12,7 @@ Required properties:
gpio_mux.
- interrupt-names : Should be the names of irq resources. Each interrupt
uses its own interrupt name, so there should be as many interrupt names
- as referenced interrups.
+ as referenced interrupts.
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells: Specifies the number of cells needed to encode an
interrupt source.
diff --git a/Documentation/devicetree/bindings/hwrng/brcm,iproc-rng200.txt b/Documentation/devicetree/bindings/hwrng/brcm,iproc-rng200.txt
new file mode 100644
index 000000000000..e25a456664b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwrng/brcm,iproc-rng200.txt
@@ -0,0 +1,12 @@
+HWRNG support for the iproc-rng200 driver
+
+Required properties:
+- compatible : "brcm,iproc-rng200"
+- reg : base address and size of control register block
+
+Example:
+
+rng {
+ compatible = "brcm,iproc-rng200";
+ reg = <0x18032000 0x28>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-bcm-kona.txt b/Documentation/devicetree/bindings/i2c/brcm,kona-i2c.txt
index 1b87b741fa8e..1b87b741fa8e 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-bcm-kona.txt
+++ b/Documentation/devicetree/bindings/i2c/brcm,kona-i2c.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-davinci.txt b/Documentation/devicetree/bindings/i2c/i2c-davinci.txt
index 2dc935b4113d..a4e1cbc810c1 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-davinci.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-davinci.txt
@@ -10,6 +10,9 @@ Required properties:
Recommended properties :
- interrupts : standard interrupt property.
- clock-frequency : desired I2C bus clock frequency in Hz.
+- ti,has-pfunc: boolean; if defined, it indicates that SoC supports PFUNC
+ registers. PFUNC registers allow to switch I2C pins to function as
+ GPIOs, so they can by toggled manually.
Example (enbw_cmc board):
i2c@1c22000 {
diff --git a/Documentation/devicetree/bindings/i2c/i2c-digicolor.txt b/Documentation/devicetree/bindings/i2c/i2c-digicolor.txt
new file mode 100644
index 000000000000..457a098d4f7e
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-digicolor.txt
@@ -0,0 +1,25 @@
+Conexant Digicolor I2C controller
+
+Required properties:
+ - compatible: must be "cnxt,cx92755-i2c"
+ - reg: physical address and length of the device registers
+ - interrupts: a single interrupt specifier
+ - clocks: clock for the device
+ - #address-cells: should be <1>
+ - #size-cells: should be <0>
+
+Optional properties:
+- clock-frequency: the desired I2C bus clock frequency in Hz; in
+ absence of this property the default value is used (100 kHz).
+
+Example:
+
+ i2c: i2c@f0000120 {
+ compatible = "cnxt,cx92755-i2c";
+ reg = <0xf0000120 0x10>;
+ interrupts = <28>;
+ clocks = <&main_clk>;
+ clock-frequency = <100000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
new file mode 100644
index 000000000000..231e4cc4008c
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
@@ -0,0 +1,35 @@
+* Ingenic JZ4780 I2C Bus controller
+
+Required properties:
+- compatible: should be "ingenic,jz4780-i2c"
+- reg: Should contain the address & size of the I2C controller registers.
+- interrupts: Should specify the interrupt provided by parent.
+- clocks: Should contain a single clock specifier for the JZ4780 I2C clock.
+- clock-frequency: desired I2C bus clock frequency in Hz.
+
+Recommended properties:
+- pinctrl-names: should be "default";
+- pinctrl-0: phandle to pinctrl function
+
+Optional properties:
+- interrupt-parent: Should be the phandle of the interrupt controller that
+ delivers interrupts to the I2C block.
+
+Example
+
+/ {
+ i2c4: i2c4@0x10054000 {
+ compatible = "ingenic,jz4780-i2c";
+ reg = <0x10054000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <56>;
+
+ clocks = <&cgu JZ4780_CLK_SMB4>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c4_data>;
+
+ };
+};
+
diff --git a/Documentation/devicetree/bindings/i2c/i2c-xlp9xx.txt b/Documentation/devicetree/bindings/i2c/i2c-xlp9xx.txt
new file mode 100644
index 000000000000..f818ef507ab7
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-xlp9xx.txt
@@ -0,0 +1,22 @@
+Device tree configuration for the I2C controller on the XLP9xx/5xx SoC
+
+Required properties:
+- compatible : should be "netlogic,xlp980-i2c"
+- reg : bus address start and address range size of device
+- interrupts : interrupt number
+
+Optional properties:
+- clock-frequency : frequency of bus clock in Hz
+ Defaults to 100 KHz when the property is not specified
+
+Example:
+
+i2c0: i2c@113100 {
+ compatible = "netlogic,xlp980-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x113100 0x100>;
+ clock-frequency = <400000>;
+ interrupts = <30>;
+ interrupt-parent = <&pic>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index aaa8325004d2..ad0c4ac916dd 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -77,6 +77,7 @@ nxp,pca9556 Octal SMBus and I2C registered interface
nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset
nxp,pcf8563 Real-time clock/calendar
nxp,pcf85063 Tiny Real-Time Clock
+oki,ml86v7667 OKI ML86V7667 video decoder
ovti,ov5642 OV5642: Color CMOS QSXGA (5-megapixel) Image Sensor with OmniBSI and Embedded TrueFocus
pericom,pt7c4338 Real-time Clock Module
plx,pex8648 48-Lane, 12-Port PCI Express Gen 2 (5.0 GT/s) Switch
@@ -89,6 +90,7 @@ ricoh,rv5c386 I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
ricoh,rv5c387a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
samsung,24ad0xd1 S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
sii,s35390a 2-wire CMOS real-time clock
+skyworks,sky81452 Skyworks SKY81452: Six-Channel White LED Driver with Touch Panel Bias Supply
st-micro,24c256 i2c serial eeprom (24cxx)
stm,m41t00 Serial Access TIMEKEEPER
stm,m41t62 Serial real-time clock (RTC) with alarm
diff --git a/Documentation/devicetree/bindings/input/brcm,bcm-keypad.txt b/Documentation/devicetree/bindings/input/brcm,bcm-keypad.txt
new file mode 100644
index 000000000000..b77f50bd6403
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/brcm,bcm-keypad.txt
@@ -0,0 +1,108 @@
+* Broadcom Keypad Controller device tree bindings
+
+Broadcom Keypad controller is used to interface a SoC with a matrix-type
+keypad device. The keypad controller supports multiple row and column lines.
+A key can be placed at each intersection of a unique row and a unique column.
+The keypad controller can sense a key-press and key-release and report the
+event using a interrupt to the cpu.
+
+This binding is based on the matrix-keymap binding with the following
+changes:
+
+keypad,num-rows and keypad,num-columns are required.
+
+Required SoC Specific Properties:
+- compatible: should be "brcm,bcm-keypad"
+
+- reg: physical base address of the controller and length of memory mapped
+ region.
+
+- interrupts: The interrupt number to the cpu.
+
+Board Specific Properties:
+- keypad,num-rows: Number of row lines connected to the keypad
+ controller.
+
+- keypad,num-columns: Number of column lines connected to the
+ keypad controller.
+
+- col-debounce-filter-period: The debounce period for the Column filter.
+
+ KEYPAD_DEBOUNCE_1_ms = 0
+ KEYPAD_DEBOUNCE_2_ms = 1
+ KEYPAD_DEBOUNCE_4_ms = 2
+ KEYPAD_DEBOUNCE_8_ms = 3
+ KEYPAD_DEBOUNCE_16_ms = 4
+ KEYPAD_DEBOUNCE_32_ms = 5
+ KEYPAD_DEBOUNCE_64_ms = 6
+ KEYPAD_DEBOUNCE_128_ms = 7
+
+- status-debounce-filter-period: The debounce period for the Status filter.
+
+ KEYPAD_DEBOUNCE_1_ms = 0
+ KEYPAD_DEBOUNCE_2_ms = 1
+ KEYPAD_DEBOUNCE_4_ms = 2
+ KEYPAD_DEBOUNCE_8_ms = 3
+ KEYPAD_DEBOUNCE_16_ms = 4
+ KEYPAD_DEBOUNCE_32_ms = 5
+ KEYPAD_DEBOUNCE_64_ms = 6
+ KEYPAD_DEBOUNCE_128_ms = 7
+
+- row-output-enabled: An optional property indicating whether the row or
+ column is being used as output. If specified the row is being used
+ as the output. Else defaults to column.
+
+- pull-up-enabled: An optional property indicating the Keypad scan mode.
+ If specified implies the keypad scan pull-up has been enabled.
+
+- autorepeat: Boolean, Enable auto repeat feature of Linux input
+ subsystem (optional).
+
+- linux,keymap: The keymap for keys as described in the binding document
+ devicetree/bindings/input/matrix-keymap.txt.
+
+Example:
+#include "dt-bindings/input/input.h"
+
+/ {
+ keypad: keypad@180ac000 {
+ /* Required SoC specific properties */
+ compatible = "brcm,bcm-keypad";
+
+ /* Required Board specific properties */
+ keypad,num-rows = <5>;
+ keypad,num-columns = <5>;
+ status = "okay";
+
+ linux,keymap = <MATRIX_KEY(0x00, 0x02, KEY_F) /* key_forward */
+ MATRIX_KEY(0x00, 0x03, KEY_HOME) /* key_home */
+ MATRIX_KEY(0x00, 0x04, KEY_M) /* key_message */
+ MATRIX_KEY(0x01, 0x00, KEY_A) /* key_contacts */
+ MATRIX_KEY(0x01, 0x01, KEY_1) /* key_1 */
+ MATRIX_KEY(0x01, 0x02, KEY_2) /* key_2 */
+ MATRIX_KEY(0x01, 0x03, KEY_3) /* key_3 */
+ MATRIX_KEY(0x01, 0x04, KEY_S) /* key_speaker */
+ MATRIX_KEY(0x02, 0x00, KEY_P) /* key_phone */
+ MATRIX_KEY(0x02, 0x01, KEY_4) /* key_4 */
+ MATRIX_KEY(0x02, 0x02, KEY_5) /* key_5 */
+ MATRIX_KEY(0x02, 0x03, KEY_6) /* key_6 */
+ MATRIX_KEY(0x02, 0x04, KEY_VOLUMEUP) /* key_vol_up */
+ MATRIX_KEY(0x03, 0x00, KEY_C) /* key_call_log */
+ MATRIX_KEY(0x03, 0x01, KEY_7) /* key_7 */
+ MATRIX_KEY(0x03, 0x02, KEY_8) /* key_8 */
+ MATRIX_KEY(0x03, 0x03, KEY_9) /* key_9 */
+ MATRIX_KEY(0x03, 0x04, KEY_VOLUMEDOWN) /* key_vol_down */
+ MATRIX_KEY(0x04, 0x00, KEY_H) /* key_headset */
+ MATRIX_KEY(0x04, 0x01, KEY_KPASTERISK) /* key_* */
+ MATRIX_KEY(0x04, 0x02, KEY_0) /* key_0 */
+ MATRIX_KEY(0x04, 0x03, KEY_GRAVE) /* key_# */
+ MATRIX_KEY(0x04, 0x04, KEY_MUTE) /* key_mute */
+ >;
+
+ /* Optional board specific properties */
+ col-debounce-filter-period = <5>;
+ row-output-enabled;
+ pull-up-enabled;
+
+ };
+};
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
new file mode 100644
index 000000000000..07bf55f6e0b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
@@ -0,0 +1,43 @@
+Qualcomm PM8941 PMIC Power Key
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,pm8941-pwrkey"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: base address of registers for block
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: key change interrupt; The format of the specifier is
+ defined by the binding document describing the node's
+ interrupt parent.
+
+- debounce:
+ Usage: optional
+ Value type: <u32>
+ Definition: time in microseconds that key must be pressed or released
+ for state change interrupt to trigger.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <empty>
+ Definition: presence of this property indicates that the KPDPWR_N pin
+ should be configured for pull up.
+
+EXAMPLE
+
+ pwrkey@800 {
+ compatible = "qcom,pm8941-pwrkey";
+ reg = <0x800>;
+ interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/brcm,iproc-touchscreen.txt b/Documentation/devicetree/bindings/input/touchscreen/brcm,iproc-touchscreen.txt
new file mode 100644
index 000000000000..34e3382a0659
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/brcm,iproc-touchscreen.txt
@@ -0,0 +1,76 @@
+* Broadcom's IPROC Touchscreen Controller
+
+Required properties:
+- compatible: must be "brcm,iproc-touchscreen"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- clocks: The clock provided by the SOC to driver the tsc
+- clock-name: name for the clock
+- interrupts: The touchscreen controller's interrupt
+
+Optional properties:
+- scanning_period: Time between scans. Each step is 1024 us. Valid 1-256.
+- debounce_timeout: Each step is 512 us. Valid 0-255
+- settling_timeout: The settling duration (in ms) is the amount of time
+ the tsc waits to allow the voltage to settle after
+ turning on the drivers in detection mode.
+ Valid values: 0-11
+ 0 = 0.008 ms
+ 1 = 0.01 ms
+ 2 = 0.02 ms
+ 3 = 0.04 ms
+ 4 = 0.08 ms
+ 5 = 0.16 ms
+ 6 = 0.32 ms
+ 7 = 0.64 ms
+ 8 = 1.28 ms
+ 9 = 2.56 ms
+ 10 = 5.12 ms
+ 11 = 10.24 ms
+- touch_timeout: The continuous number of scan periods in which touch is
+ not detected before the controller returns to idle state.
+ Valid values 0-255.
+- average_data: Number of data samples which are averaged before a final
+ data point is placed into the FIFO
+ Valid values 0-7
+ 0 = 1 sample
+ 1 = 2 samples
+ 2 = 4 samples
+ 3 = 8 samples
+ 4 = 16 samples
+ 5 = 32 samples
+ 6 = 64 samples
+ 7 = 128 samples
+- fifo_threshold: Interrupt is generated whenever the number of fifo
+ entries exceeds this value
+ Valid values 0-31
+- touchscreen-size-x: horizontal resolution of touchscreen (in pixels)
+- touchscreen-size-y: vertical resolution of touchscreen (in pixels)
+- touchscreen-fuzz-x: horizontal noise value of the absolute input
+ device (in pixels)
+- touchscreen-fuzz-y: vertical noise value of the absolute input
+ device (in pixels)
+- touchscreen-inverted-x: X axis is inverted (boolean)
+- touchscreen-inverted-y: Y axis is inverted (boolean)
+
+Example:
+
+ touchscreen: tsc@0x180A6000 {
+ compatible = "brcm,iproc-touchscreen";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x180A6000 0x40>;
+ clocks = <&adc_clk>;
+ clock-names = "tsc_clk";
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+
+ scanning_period = <5>;
+ debounce_timeout = <40>;
+ settling_timeout = <7>;
+ touch_timeout = <10>;
+ average_data = <5>;
+ fifo_threshold = <1>;
+ /* Touchscreen is rotated 180 degrees. */
+ touchscreen-inverted-x;
+ touchscreen-inverted-y;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt b/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
new file mode 100644
index 000000000000..d11f8d615b5d
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
@@ -0,0 +1,46 @@
+* ChipOne icn8318 I2C touchscreen controller
+
+Required properties:
+ - compatible : "chipone,icn8318"
+ - reg : I2C slave address of the chip (0x40)
+ - interrupt-parent : a phandle pointing to the interrupt controller
+ serving the interrupt for this chip
+ - interrupts : interrupt specification for the icn8318 interrupt
+ - wake-gpios : GPIO specification for the WAKE input
+ - touchscreen-size-x : horizontal resolution of touchscreen (in pixels)
+ - touchscreen-size-y : vertical resolution of touchscreen (in pixels)
+
+Optional properties:
+ - pinctrl-names : should be "default"
+ - pinctrl-0: : a phandle pointing to the pin settings for the
+ control gpios
+ - touchscreen-fuzz-x : horizontal noise value of the absolute input
+ device (in pixels)
+ - touchscreen-fuzz-y : vertical noise value of the absolute input
+ device (in pixels)
+ - touchscreen-inverted-x : X axis is inverted (boolean)
+ - touchscreen-inverted-y : Y axis is inverted (boolean)
+ - touchscreen-swapped-x-y : X and Y axis are swapped (boolean)
+ Swapping is done after inverting the axis
+
+Example:
+
+i2c@00000000 {
+ /* ... */
+
+ chipone_icn8318@40 {
+ compatible = "chipone,icn8318";
+ reg = <0x40>;
+ interrupt-parent = <&pio>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>; /* EINT9 (PG9) */
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_wake_pin_p66>;
+ wake-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ touchscreen-inverted-x;
+ touchscreen-swapped-x-y;
+ };
+
+ /* ... */
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
new file mode 100644
index 000000000000..8ba98eec765b
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
@@ -0,0 +1,29 @@
+Device tree bindings for Goodix GT9xx series touchscreen controller
+
+Required properties:
+
+ - compatible : Should be "goodix,gt911"
+ or "goodix,gt9110"
+ or "goodix,gt912"
+ or "goodix,gt927"
+ or "goodix,gt9271"
+ or "goodix,gt928"
+ or "goodix,gt967"
+ - reg : I2C address of the chip. Should be 0x5d or 0x14
+ - interrupt-parent : Interrupt controller to which the chip is connected
+ - interrupts : Interrupt to which the chip is connected
+
+Example:
+
+ i2c@00000000 {
+ /* ... */
+
+ gt928@5d {
+ compatible = "goodix,gt928";
+ reg = <0x5d>;
+ interrupt-parent = <&gpio>;
+ interrupts = <0 0>;
+ };
+
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sun4i.txt b/Documentation/devicetree/bindings/input/touchscreen/sun4i.txt
index 433332d3b2ba..89abecd938cb 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/sun4i.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/sun4i.txt
@@ -2,14 +2,27 @@ sun4i resistive touchscreen controller
--------------------------------------
Required properties:
- - compatible: "allwinner,sun4i-a10-ts" or "allwinner,sun6i-a31-ts"
+ - compatible: "allwinner,sun4i-a10-ts", "allwinner,sun5i-a13-ts" or
+ "allwinner,sun6i-a31-ts"
- reg: mmio address range of the chip
- interrupts: interrupt to which the chip is connected
- #thermal-sensor-cells: shall be 0
Optional properties:
- - allwinner,ts-attached: boolean indicating that an actual touchscreen is
- attached to the controller
+ - allwinner,ts-attached : boolean indicating that an actual touchscreen
+ is attached to the controller
+ - allwinner,tp-sensitive-adjust : integer (4 bits)
+ adjust sensitivity of pen down detection
+ between 0 (least sensitive) and 15
+ (defaults to 15)
+ - allwinner,filter-type : integer (2 bits)
+ select median and averaging filter
+ samples used for median / averaging filter
+ 0: 4/2
+ 1: 5/3
+ 2: 8/4
+ 3: 16/8
+ (defaults to 1)
Example:
@@ -19,4 +32,7 @@ Example:
interrupts = <29>;
allwinner,ts-attached;
#thermal-sensor-cells = <0>;
+ /* sensitive/noisy touch panel */
+ allwinner,tp-sensitive-adjust = <0>;
+ allwinner,filter-type = <3>;
};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
new file mode 100644
index 000000000000..5aaa6b3aa90c
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
@@ -0,0 +1,16 @@
+* Semtech SX8654 I2C Touchscreen Controller
+
+Required properties:
+- compatible: must be "semtech,sx8654"
+- reg: i2c slave address
+- interrupt-parent: the phandle for the interrupt controller
+- interrupts: touch controller interrupt
+
+Example:
+
+ sx8654@48 {
+ compatible = "semtech,sx8654";
+ reg = <0x48>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
index d8e06163c54e..ac23caf518ad 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
@@ -16,6 +16,8 @@ Optional properties for Touchscreens:
controller)
- touchscreen-inverted-x : X axis is inverted (boolean)
- touchscreen-inverted-y : Y axis is inverted (boolean)
+ - touchscreen-swapped-x-y : X and Y axis are swapped (boolean)
+ Swapping is done after inverting the axis
Deprecated properties for Touchscreens:
- x-size : deprecated name for touchscreen-size-x
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
new file mode 100644
index 000000000000..8f48aad50868
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
@@ -0,0 +1,41 @@
+Broadcom BCM3380-style Level 1 / Level 2 interrupt controller
+
+This interrupt controller shows up in various forms on many BCM338x/BCM63xx
+chipsets. It has the following properties:
+
+- outputs a single interrupt signal to its interrupt controller parent
+
+- contains one or more enable/status word pairs, which often appear at
+ different offsets in different blocks
+
+- no atomic set/clear operations
+
+Required properties:
+
+- compatible: should be "brcm,bcm3380-l2-intc"
+- reg: specifies one or more enable/status pairs, in the following format:
+ <enable_reg 0x4 status_reg 0x4>...
+- interrupt-controller: identifies the node as an interrupt controller
+- #interrupt-cells: specifies the number of cells needed to encode an interrupt
+ source, should be 1.
+- interrupt-parent: specifies the phandle to the parent interrupt controller
+ this one is cascaded from
+- interrupts: specifies the interrupt line in the interrupt-parent controller
+ node, valid values depend on the type of parent interrupt controller
+
+Optional properties:
+
+- brcm,irq-can-wake: if present, this means the L2 controller can be used as a
+ wakeup source for system suspend/resume.
+
+Example:
+
+irq0_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x10000024 0x4 0x1000002c 0x4>,
+ <0x10000020 0x4 0x10000028 0x4>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
new file mode 100644
index 000000000000..cc217b22dccd
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
@@ -0,0 +1,52 @@
+Broadcom BCM7038-style Level 1 interrupt controller
+
+This block is a first level interrupt controller that is typically connected
+directly to one of the HW INT lines on each CPU. Every BCM7xxx set-top chip
+since BCM7038 has contained this hardware.
+
+Key elements of the hardware design include:
+
+- 64, 96, 128, or 160 incoming level IRQ lines
+
+- Most onchip peripherals are wired directly to an L1 input
+
+- A separate instance of the register set for each CPU, allowing individual
+ peripheral IRQs to be routed to any CPU
+
+- Atomic mask/unmask operations
+
+- No polarity/level/edge settings
+
+- No FIFO or priority encoder logic; software is expected to read all
+ 2-5 status words to determine which IRQs are pending
+
+Required properties:
+
+- compatible: should be "brcm,bcm7038-l1-intc"
+- reg: specifies the base physical address and size of the registers;
+ the number of supported IRQs is inferred from the size argument
+- interrupt-controller: identifies the node as an interrupt controller
+- #interrupt-cells: specifies the number of cells needed to encode an interrupt
+ source, should be 1.
+- interrupt-parent: specifies the phandle to the parent interrupt controller(s)
+ this one is cascaded from
+- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
+ node; valid values depend on the type of parent interrupt controller
+
+If multiple reg ranges and interrupt-parent entries are present on an SMP
+system, the driver will allow IRQ SMP affinity to be set up through the
+/proc/irq/ interface. In the simplest possible configuration, only one
+reg range and one interrupt-parent is needed.
+
+Example:
+
+periph_intc: periph_intc@1041a400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x1041a400 0x30 0x1041a600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
index bae1f2187226..44a9bb15dd56 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
@@ -13,8 +13,7 @@ Such an interrupt controller has the following hardware design:
or if they will output an interrupt signal at this 2nd level interrupt
controller, in particular for UARTs
-- typically has one 32-bit enable word and one 32-bit status word, but on
- some hardware may have more than one enable/status pair
+- has one 32-bit enable word and one 32-bit status word
- no atomic set/clear operations
@@ -53,9 +52,7 @@ The typical hardware layout for this controller is represented below:
Required properties:
- compatible: should be "brcm,bcm7120-l2-intc"
-- reg: specifies the base physical address and size of the registers;
- multiple pairs may be specified, with the first pair handling IRQ offsets
- 0..31 and the second pair handling 32..63
+- reg: specifies the base physical address and size of the registers
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
@@ -66,10 +63,7 @@ Required properties:
- brcm,int-map-mask: 32-bits bit mask describing how many and which interrupts
are wired to this 2nd level interrupt controller, and how they match their
respective interrupt parents. Should match exactly the number of interrupts
- specified in the 'interrupts' property, multiplied by the number of
- enable/status register pairs implemented by this controller. For
- multiple parent IRQs with multiple enable/status words, this looks like:
- <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...>
+ specified in the 'interrupts' property.
Optional properties:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-mx.txt b/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-mx.txt
new file mode 100644
index 000000000000..d4de980e55fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-mx.txt
@@ -0,0 +1,18 @@
+* Xtensa Interrupt Distributor and Programmable Interrupt Controller (MX)
+
+Required properties:
+- compatible: Should be "cdns,xtensa-mx".
+
+Remaining properties have exact same meaning as in Xtensa PIC
+(see cdns,xtensa-pic.txt).
+
+Examples:
+ pic: pic {
+ compatible = "cdns,xtensa-mx";
+ /* one cell: internal irq number,
+ * two cells: second cell == 0: internal irq number
+ * second cell == 1: external irq number
+ */
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.txt
new file mode 100644
index 000000000000..026ef4cfc1d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/cdns,xtensa-pic.txt
@@ -0,0 +1,25 @@
+* Xtensa built-in Programmable Interrupt Controller (PIC)
+
+Required properties:
+- compatible: Should be "cdns,xtensa-pic".
+- interrupt-controller: Identifies the node as an interrupt controller.
+- #interrupt-cells: The number of cells to define the interrupts.
+ It may be either 1 or 2.
+ When it's 1, the first cell is the internal IRQ number.
+ When it's 2, the first cell is the IRQ number, and the second cell
+ specifies whether it's internal (0) or external (1).
+ Periferals are usually connected to a fixed external IRQ, but for different
+ core variants it may be mapped to different internal IRQ.
+ IRQ sensitivity and priority are fixed for each core variant and may not be
+ changed at runtime.
+
+Examples:
+ pic: pic {
+ compatible = "cdns,xtensa-pic";
+ /* one cell: internal irq number,
+ * two cells: second cell == 0: internal irq number
+ * second cell == 1: external irq number
+ */
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
index 5a65478e5d40..aae4c384ee1f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
@@ -27,8 +27,13 @@ Optional properties:
Required properties for timer sub-node:
- compatible : Should be "mti,gic-timer".
- interrupts : Interrupt for the GIC local timer.
+
+Optional properties for timer sub-node:
+- clocks : GIC timer operating clock.
- clock-frequency : Clock frequency at which the GIC timers operate.
+Note that one of clocks or clock-frequency must be specified.
+
Example:
gic: interrupt-controller@1bdc0000 {
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index 34811c57db69..747c53805eec 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -14,8 +14,10 @@ Optional properties for child nodes:
- led-sources : List of device current outputs the LED is connected to. The
outputs are identified by the numbers that must be defined
in the LED device binding documentation.
-- label : The label for this LED. If omitted, the label is
- taken from the node name (excluding the unit address).
+- label : The label for this LED. If omitted, the label is taken from the node
+ name (excluding the unit address). It has to uniquely identify
+ a device, i.e. no other LED class device can be assigned the same
+ label.
- linux,default-trigger : This parameter, if present, is a
string defining the trigger assigned to the LED. Current triggers are:
diff --git a/Documentation/devicetree/bindings/leds/leds-gpio.txt b/Documentation/devicetree/bindings/leds/leds-gpio.txt
index f77148f372ea..fea1ebfe24a9 100644
--- a/Documentation/devicetree/bindings/leds/leds-gpio.txt
+++ b/Documentation/devicetree/bindings/leds/leds-gpio.txt
@@ -26,16 +26,18 @@ LED sub-node properties:
Examples:
+#include <dt-bindings/gpio/gpio.h>
+
leds {
compatible = "gpio-leds";
hdd {
label = "IDE Activity";
- gpios = <&mcu_pio 0 1>; /* Active low */
+ gpios = <&mcu_pio 0 GPIO_ACTIVE_LOW>;
linux,default-trigger = "ide-disk";
};
fault {
- gpios = <&mcu_pio 1 0>;
+ gpios = <&mcu_pio 1 GPIO_ACTIVE_HIGH>;
/* Keep LED on if BIOS detected hardware fault */
default-state = "keep";
};
@@ -44,11 +46,11 @@ leds {
run-control {
compatible = "gpio-leds";
red {
- gpios = <&mpc8572 6 0>;
+ gpios = <&mpc8572 6 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
green {
- gpios = <&mpc8572 7 0>;
+ gpios = <&mpc8572 7 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
};
@@ -57,7 +59,7 @@ leds {
compatible = "gpio-leds";
charger-led {
- gpios = <&gpio1 2 0>;
+ gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "max8903-charger-charging";
retain-state-suspended;
};
diff --git a/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt b/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt
new file mode 100644
index 000000000000..a85a964d61f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt
@@ -0,0 +1,43 @@
+Binding for Qualcomm PM8941 WLED driver
+
+Required properties:
+- compatible: should be "qcom,pm8941-wled"
+- reg: slave address
+
+Optional properties:
+- label: The label for this led
+ See Documentation/devicetree/bindings/leds/common.txt
+- linux,default-trigger: Default trigger assigned to the LED
+ See Documentation/devicetree/bindings/leds/common.txt
+- qcom,cs-out: bool; enable current sink output
+- qcom,cabc: bool; enable content adaptive backlight control
+- qcom,ext-gen: bool; use externally generated modulator signal to dim
+- qcom,current-limit: mA; per-string current limit; value from 0 to 25
+ default: 20mA
+- qcom,current-boost-limit: mA; boost current limit; one of:
+ 105, 385, 525, 805, 980, 1260, 1400, 1680
+ default: 805mA
+- qcom,switching-freq: kHz; switching frequency; one of:
+ 600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371,
+ 1600, 1920, 2400, 3200, 4800, 9600,
+ default: 1600kHz
+- qcom,ovp: V; Over-voltage protection limit; one of:
+ 27, 29, 32, 35
+ default: 29V
+- qcom,num-strings: #; number of led strings attached; value from 1 to 3
+ default: 2
+
+Example:
+
+pm8941-wled@d800 {
+ compatible = "qcom,pm8941-wled";
+ reg = <0xd800>;
+ label = "backlight";
+
+ qcom,cs-out;
+ qcom,current-limit = <20>;
+ qcom,current-boost-limit = <805>;
+ qcom,switching-freq = <1600>;
+ qcom,ovp = <29>;
+ qcom,num-strings = <2>;
+};
diff --git a/Documentation/devicetree/bindings/mailbox/arm-mhu.txt b/Documentation/devicetree/bindings/mailbox/arm-mhu.txt
new file mode 100644
index 000000000000..4971f03f0b33
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/arm-mhu.txt
@@ -0,0 +1,43 @@
+ARM MHU Mailbox Driver
+======================
+
+The ARM's Message-Handling-Unit (MHU) is a mailbox controller that has
+3 independent channels/links to communicate with remote processor(s).
+ MHU links are hardwired on a platform. A link raises interrupt for any
+received data. However, there is no specified way of knowing if the sent
+data has been read by the remote. This driver assumes the sender polls
+STAT register and the remote clears it after having read the data.
+The last channel is specified to be a 'Secure' resource, hence can't be
+used by Linux running NS.
+
+Mailbox Device Node:
+====================
+
+Required properties:
+--------------------
+- compatible: Shall be "arm,mhu" & "arm,primecell"
+- reg: Contains the mailbox register address range (base
+ address and length)
+- #mbox-cells Shall be 1 - the index of the channel needed.
+- interrupts: Contains the interrupt information corresponding to
+ each of the 3 links of MHU.
+
+Example:
+--------
+
+ mhu: mailbox@2b1f0000 {
+ #mbox-cells = <1>;
+ compatible = "arm,mhu", "arm,primecell";
+ reg = <0 0x2b1f0000 0x1000>;
+ interrupts = <0 36 4>, /* LP-NonSecure */
+ <0 35 4>, /* HP-NonSecure */
+ <0 37 4>; /* Secure */
+ clocks = <&clock 0 2 1>;
+ clock-names = "apb_pclk";
+ };
+
+ mhu_client: scb@2e000000 {
+ compatible = "fujitsu,mb86s70-scb-1.0";
+ reg = <0 0x2e000000 0x4000>;
+ mboxes = <&mhu 1>; /* HP-NonSecure */
+ };
diff --git a/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt b/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
index bf52ed4a5067..4ef45636ebde 100644
--- a/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
+++ b/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
@@ -4,7 +4,7 @@ Required properties:
- compatible : should be one of:
"samsung,s5pv210-jpeg", "samsung,exynos4210-jpeg",
- "samsung,exynos3250-jpeg";
+ "samsung,exynos3250-jpeg", "samsung,exynos5420-jpeg";
- reg : address and length of the JPEG codec IP register set;
- interrupts : specifies the JPEG codec IP interrupt;
- clock-names : should contain:
diff --git a/Documentation/devicetree/bindings/media/i2c/mt9v032.txt b/Documentation/devicetree/bindings/media/i2c/mt9v032.txt
new file mode 100644
index 000000000000..202565313e82
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/mt9v032.txt
@@ -0,0 +1,39 @@
+* Aptina 1/3-Inch WVGA CMOS Digital Image Sensor
+
+The Aptina MT9V032 is a 1/3-inch CMOS active pixel digital image sensor with
+an active array size of 752H x 480V. It is programmable through a simple
+two-wire serial interface.
+
+Required Properties:
+
+- compatible: value should be either one among the following
+ (a) "aptina,mt9v022" for MT9V022 color sensor
+ (b) "aptina,mt9v022m" for MT9V022 monochrome sensor
+ (c) "aptina,mt9v024" for MT9V024 color sensor
+ (d) "aptina,mt9v024m" for MT9V024 monochrome sensor
+ (e) "aptina,mt9v032" for MT9V032 color sensor
+ (f) "aptina,mt9v032m" for MT9V032 monochrome sensor
+ (g) "aptina,mt9v034" for MT9V034 color sensor
+ (h) "aptina,mt9v034m" for MT9V034 monochrome sensor
+
+Optional Properties:
+
+- link-frequencies: List of allowed link frequencies in Hz. Each frequency is
+ expressed as a 64-bit big-endian integer.
+
+For further reading on port node refer to
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+
+ mt9v032@5c {
+ compatible = "aptina,mt9v032";
+ reg = <0x5c>;
+
+ port {
+ mt9v032_out: endpoint {
+ link-frequencies = /bits/ 64
+ <13000000 26600000 27000000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/ov2640.txt b/Documentation/devicetree/bindings/media/i2c/ov2640.txt
new file mode 100644
index 000000000000..c429b5bdcaa0
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ov2640.txt
@@ -0,0 +1,46 @@
+* Omnivision OV2640 CMOS sensor
+
+The Omnivision OV2640 sensor support multiple resolutions output, such as
+CIF, SVGA, UXGA. It also can support YUV422/420, RGB565/555 or raw RGB
+output format.
+
+Required Properties:
+- compatible: should be "ovti,ov2640"
+- clocks: reference to the xvclk input clock.
+- clock-names: should be "xvclk".
+
+Optional Properties:
+- resetb-gpios: reference to the GPIO connected to the resetb pin, if any.
+- pwdn-gpios: reference to the GPIO connected to the pwdn pin, if any.
+
+The device node must contain one 'port' child node for its digital output
+video port, in accordance with the video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+
+ i2c1: i2c@f0018000 {
+ ov2640: camera@0x30 {
+ compatible = "ovti,ov2640";
+ reg = <0x30>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck1 &pinctrl_ov2640_pwdn &pinctrl_ov2640_resetb>;
+
+ resetb-gpios = <&pioE 24 GPIO_ACTIVE_LOW>;
+ pwdn-gpios = <&pioE 29 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&pck1>;
+ clock-names = "xvclk";
+
+ assigned-clocks = <&pck1>;
+ assigned-clock-rates = <25000000>;
+
+ port {
+ ov2640_0: endpoint {
+ remote-endpoint = <&isi_0>;
+ bus-width = <8>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/ov2659.txt b/Documentation/devicetree/bindings/media/i2c/ov2659.txt
new file mode 100644
index 000000000000..cabc7d827dfb
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ov2659.txt
@@ -0,0 +1,38 @@
+* OV2659 1/5-Inch 2Mp SOC Camera
+
+The Omnivision OV2659 is a 1/5-inch SOC camera, with an active array size of
+1632H x 1212V. It is programmable through a SCCB. The OV2659 sensor supports
+multiple resolutions output, such as UXGA, SVGA, 720p. It also can support
+YUV422, RGB565/555 or raw RGB output formats.
+
+Required Properties:
+- compatible: Must be "ovti,ov2659"
+- reg: I2C slave address
+- clocks: reference to the xvclk input clock.
+- clock-names: should be "xvclk".
+- link-frequencies: target pixel clock frequency.
+
+For further reading on port node refer to
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+
+ i2c0@1c22000 {
+ ...
+ ...
+ ov2659@30 {
+ compatible = "ovti,ov2659";
+ reg = <0x30>;
+
+ clocks = <&clk_ov2659 0>;
+ clock-names = "xvclk";
+
+ port {
+ ov2659_0: endpoint {
+ remote-endpoint = <&vpfe_ep>;
+ link-frequencies = /bits/ 64 <70000000>;
+ };
+ };
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/media/ti,omap3isp.txt b/Documentation/devicetree/bindings/media/ti,omap3isp.txt
new file mode 100644
index 000000000000..ac23de855641
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/ti,omap3isp.txt
@@ -0,0 +1,71 @@
+OMAP 3 ISP Device Tree bindings
+===============================
+
+The DT definitions can be found in include/dt-bindings/media/omap3-isp.h.
+
+Required properties
+===================
+
+compatible : must contain "ti,omap3-isp"
+
+reg : the two registers sets (physical address and length) for the
+ ISP. The first set contains the core ISP registers up to
+ the end of the SBL block. The second set contains the
+ CSI PHYs and receivers registers.
+interrupts : the ISP interrupt specifier
+iommus : phandle and IOMMU specifier for the IOMMU that serves the ISP
+syscon : the phandle and register offset to the Complex I/O or CSI-PHY
+ register
+ti,phy-type : 0 -- OMAP3ISP_PHY_TYPE_COMPLEX_IO (e.g. 3430)
+ 1 -- OMAP3ISP_PHY_TYPE_CSIPHY (e.g. 3630)
+#clock-cells : Must be 1 --- the ISP provides two external clocks,
+ cam_xclka and cam_xclkb, at indices 0 and 1,
+ respectively. Please find more information on common
+ clock bindings in ../clock/clock-bindings.txt.
+
+Port nodes (optional)
+---------------------
+
+More documentation on these bindings is available in
+video-interfaces.txt in the same directory.
+
+reg : The interface:
+ 0 - parallel (CCDC)
+ 1 - CSIPHY1 -- CSI2C / CCP2B on 3630;
+ CSI1 -- CSIb on 3430
+ 2 - CSIPHY2 -- CSI2A / CCP2B on 3630;
+ CSI2 -- CSIa on 3430
+
+Optional properties
+===================
+
+vdd-csiphy1-supply : voltage supply of the CSI-2 PHY 1
+vdd-csiphy2-supply : voltage supply of the CSI-2 PHY 2
+
+Endpoint nodes
+--------------
+
+lane-polarities : lane polarity (required on CSI-2)
+ 0 -- not inverted; 1 -- inverted
+data-lanes : an array of data lanes from 1 to 3. The length can
+ be either 1 or 2. (required on CSI-2)
+clock-lanes : the clock lane (from 1 to 3). (required on CSI-2)
+
+
+Example
+=======
+
+ isp@480bc000 {
+ compatible = "ti,omap3-isp";
+ reg = <0x480bc000 0x12fc
+ 0x480bd800 0x0600>;
+ interrupts = <24>;
+ iommus = <&mmu_isp>;
+ syscon = <&scm_conf 0x2f0>;
+ ti,phy-type = <OMAP3ISP_PHY_TYPE_CSIPHY>;
+ #clock-cells = <1>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt
index 571b4c60665f..9cd2a369125d 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.txt
+++ b/Documentation/devicetree/bindings/media/video-interfaces.txt
@@ -106,6 +106,12 @@ Optional endpoint properties
- link-frequencies: Allowed data bus frequencies. For MIPI CSI-2, for
instance, this is the actual frequency of the bus, not bits per clock per
lane value. An array of 64-bit unsigned integers.
+- lane-polarities: an array of polarities of the lanes starting from the clock
+ lane and followed by the data lanes in the same order as in data-lanes.
+ Valid values are 0 (normal) and 1 (inverted). The length of the array
+ should be the combined length of data-lanes and clock-lanes properties.
+ If the lane-polarities property is omitted, the value must be interpreted
+ as 0 (normal). This property is valid for serial busses only.
Example
diff --git a/Documentation/devicetree/bindings/media/xilinx/video.txt b/Documentation/devicetree/bindings/media/xilinx/video.txt
new file mode 100644
index 000000000000..cbd46fa0988f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/video.txt
@@ -0,0 +1,35 @@
+DT bindings for Xilinx video IP cores
+-------------------------------------
+
+Xilinx video IP cores process video streams by acting as video sinks and/or
+sources. They are connected by links through their input and output ports,
+creating a video pipeline.
+
+Each video IP core is represented by an AMBA bus child node in the device
+tree using bindings documented in this directory. Connections between the IP
+cores are represented as defined in ../video-interfaces.txt.
+
+The whole pipeline is represented by an AMBA bus child node in the device
+tree using bindings documented in ./xlnx,video.txt.
+
+Common properties
+-----------------
+
+The following properties are common to all Xilinx video IP cores.
+
+- xlnx,video-format: This property represents a video format transmitted on an
+ AXI bus between video IP cores, using its VF code as defined in "AXI4-Stream
+ Video IP and System Design Guide" [UG934]. How the format relates to the IP
+ core is decribed in the IP core bindings documentation.
+
+- xlnx,video-width: This property qualifies the video format with the sample
+ width expressed as a number of bits per pixel component. All components must
+ use the same width.
+
+- xlnx,cfa-pattern: When the video format is set to Mono/Sensor, this property
+ describes the sensor's color filter array pattern. Supported values are
+ "bggr", "gbrg", "grbg", "rggb" and "mono". If not specified, the pattern
+ defaults to "mono".
+
+
+[UG934] http://www.xilinx.com/support/documentation/ip_documentation/axi_videoip/v1_0/ug934_axi_videoIP.pdf
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tc.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tc.txt
new file mode 100644
index 000000000000..2aed3b4a6cf1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tc.txt
@@ -0,0 +1,33 @@
+Xilinx Video Timing Controller (VTC)
+------------------------------------
+
+The Video Timing Controller is a general purpose video timing generator and
+detector.
+
+Required properties:
+
+ - compatible: Must be "xlnx,v-tc-6.1".
+
+ - reg: Physical base address and length of the registers set for the device.
+
+ - clocks: Must contain a clock specifier for the VTC core and timing
+ interfaces clock.
+
+Optional properties:
+
+ - xlnx,detector: The VTC has a timing detector
+ - xlnx,generator: The VTC has a timing generator
+
+ At least one of the xlnx,detector and xlnx,generator properties must be
+ specified.
+
+
+Example:
+
+ vtc: vtc@43c40000 {
+ compatible = "xlnx,v-tc-6.1";
+ reg = <0x43c40000 0x10000>;
+
+ clocks = <&clkc 15>;
+ xlnx,generator;
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
new file mode 100644
index 000000000000..9dd86b3db937
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
@@ -0,0 +1,71 @@
+Xilinx Video Test Pattern Generator (TPG)
+-----------------------------------------
+
+Required properties:
+
+- compatible: Must contain at least one of
+
+ "xlnx,v-tpg-5.0" (TPG version 5.0)
+ "xlnx,v-tpg-6.0" (TPG version 6.0)
+
+ TPG versions backward-compatible with previous versions should list all
+ compatible versions in the newer to older order.
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- xlnx,video-format, xlnx,video-width: Video format and width, as defined in
+ video.txt.
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+ The TPG has a single output port numbered 0.
+
+Optional properties:
+
+- xlnx,vtc: A phandle referencing the Video Timing Controller that generates
+ video timings for the TPG test patterns.
+
+- timing-gpios: Specifier for a GPIO that controls the timing mux at the TPG
+ input. The GPIO active level corresponds to the selection of VTC-generated
+ video timings.
+
+The xlnx,vtc and timing-gpios properties are mandatory when the TPG is
+synthesized with two ports and forbidden when synthesized with one port.
+
+Example:
+
+ tpg_0: tpg@40050000 {
+ compatible = "xlnx,v-tpg-6.0", "xlnx,v-tpg-5.0";
+ reg = <0x40050000 0x10000>;
+ clocks = <&clkc 15>;
+
+ xlnx,vtc = <&vtc_3>;
+ timing-gpios = <&ps7_gpio_0 55 GPIO_ACTIVE_LOW>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ tpg_in: endpoint {
+ remote-endpoint = <&adv7611_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ tpg1_out: endpoint {
+ remote-endpoint = <&switch_in0>;
+ };
+ }:
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,video.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,video.txt
new file mode 100644
index 000000000000..5a0227023608
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,video.txt
@@ -0,0 +1,55 @@
+Xilinx Video IP Pipeline (VIPP)
+-------------------------------
+
+General concept
+---------------
+
+Xilinx video IP pipeline processes video streams through one or more Xilinx
+video IP cores. Each video IP core is represented as documented in video.txt
+and IP core specific documentation, xlnx,v-*.txt, in this directory. The DT
+node of the VIPP represents as a top level node of the pipeline and defines
+mappings between DMAs and the video IP cores.
+
+Required properties:
+
+- compatible: Must be "xlnx,video".
+
+- dmas, dma-names: List of one DMA specifier and identifier string (as defined
+ in Documentation/devicetree/bindings/dma/dma.txt) per port. Each port
+ requires a DMA channel with the identifier string set to "port" followed by
+ the port index.
+
+- ports: Video port, using the DT bindings defined in ../video-interfaces.txt.
+
+Required port properties:
+
+- direction: should be either "input" or "output" depending on the direction
+ of stream.
+
+Example:
+
+ video_cap {
+ compatible = "xlnx,video";
+ dmas = <&vdma_1 1>, <&vdma_3 1>;
+ dma-names = "port0", "port1";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ direction = "input";
+ vcap0_in0: endpoint {
+ remote-endpoint = <&scaler0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ direction = "input";
+ vcap0_in1: endpoint {
+ remote-endpoint = <&switch_out1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt b/Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt
new file mode 100644
index 000000000000..f936b5589b19
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt
@@ -0,0 +1,75 @@
+* Ingenic JZ4780 NAND/external memory controller (NEMC)
+
+This file documents the device tree bindings for the NEMC external memory
+controller in Ingenic JZ4780
+
+Required properties:
+- compatible: Should be set to one of:
+ "ingenic,jz4780-nemc" (JZ4780)
+- reg: Should specify the NEMC controller registers location and length.
+- clocks: Clock for the NEMC controller.
+- #address-cells: Must be set to 2.
+- #size-cells: Must be set to 1.
+- ranges: A set of ranges for each bank describing the physical memory layout.
+ Each should specify the following 4 integer values:
+
+ <cs number> 0 <physical address of mapping> <size of mapping>
+
+Each child of the NEMC node describes a device connected to the NEMC.
+
+Required child node properties:
+- reg: Should contain at least one register specifier, given in the following
+ format:
+
+ <cs number> <offset> <size>
+
+ Multiple registers can be specified across multiple banks. This is needed,
+ for example, for packaged NAND devices with multiple dies. Such devices
+ should be grouped into a single node.
+
+Optional child node properties:
+- ingenic,nemc-bus-width: Specifies the bus width in bits. Defaults to 8 bits.
+- ingenic,nemc-tAS: Address setup time in nanoseconds.
+- ingenic,nemc-tAH: Address hold time in nanoseconds.
+- ingenic,nemc-tBP: Burst pitch time in nanoseconds.
+- ingenic,nemc-tAW: Access wait time in nanoseconds.
+- ingenic,nemc-tSTRV: Static memory recovery time in nanoseconds.
+
+If a child node references multiple banks in its "reg" property, the same value
+for all optional parameters will be configured for all banks. If any optional
+parameters are omitted, they will be left unchanged from whatever they are
+configured to when the NEMC device is probed (which may be the reset value as
+given in the hardware reference manual, or a value configured by the boot
+loader).
+
+Example (NEMC node with a NAND child device attached at CS1):
+
+nemc: nemc@13410000 {
+ compatible = "ingenic,jz4780-nemc";
+ reg = <0x13410000 0x10000>;
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ ranges = <1 0 0x1b000000 0x1000000
+ 2 0 0x1a000000 0x1000000
+ 3 0 0x19000000 0x1000000
+ 4 0 0x18000000 0x1000000
+ 5 0 0x17000000 0x1000000
+ 6 0 0x16000000 0x1000000>;
+
+ clocks = <&cgu JZ4780_CLK_NEMC>;
+
+ nand: nand@1 {
+ compatible = "ingenic,jz4780-nand";
+ reg = <1 0 0x1000000>;
+
+ ingenic,nemc-tAS = <10>;
+ ingenic,nemc-tAH = <5>;
+ ingenic,nemc-tBP = <10>;
+ ingenic,nemc-tAW = <15>;
+ ingenic,nemc-tSTRV = <100>;
+
+ ...
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
index 7bd1273f571a..7665aa95979f 100644
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ b/Documentation/devicetree/bindings/mfd/arizona.txt
@@ -8,6 +8,7 @@ Required properties:
- compatible : One of the following chip-specific strings:
"wlf,wm5102"
"wlf,wm5110"
+ "wlf,wm8280"
"wlf,wm8997"
- reg : I2C slave address when connected using I2C, chip select number when
using SPI.
@@ -26,21 +27,27 @@ Required properties:
- #gpio-cells : Must be 2. The first cell is the pin number and the
second cell is used to specify optional parameters (currently unused).
- - AVDD-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply (wm5102, wm5110),
- CPVDD-supply, SPKVDDL-supply (wm5102, wm5110), SPKVDDR-supply (wm5102,
- wm5110), SPKVDD-supply (wm8997) : Power supplies for the device, as covered
- in Documentation/devicetree/bindings/regulator/regulator.txt
+ - AVDD-supply, DBVDD1-supply, CPVDD-supply : Power supplies for the device,
+ as covered in Documentation/devicetree/bindings/regulator/regulator.txt
+
+ - DBVDD2-supply, DBVDD3-supply : Additional databus power supplies (wm5102,
+ wm5110, wm8280)
+
+ - SPKVDDL-supply, SPKVDDR-supply : Speaker driver power supplies (wm5102,
+ wm5110, wm8280)
+
+ - SPKVDD-supply : Speaker driver power supply (wm8997)
Optional properties:
- wlf,reset : GPIO specifier for the GPIO controlling /RESET
- wlf,ldoena : GPIO specifier for the GPIO controlling LDOENA
- - wlf,gpio-defaults : A list of GPIO configuration register values. If
- absent, no configuration of these registers is performed. If any
- entry has a value that is out of range for a 16 bit register then
- the chip default will be used. If present exactly five values must
- be specified.
+ - wlf,gpio-defaults : A list of GPIO configuration register values. Defines
+ for the appropriate values can found in <dt-bindings/mfd/arizona.txt>. If
+ absent, no configuration of these registers is performed. If any entry has
+ a value that is out of range for a 16 bit register then the chip default
+ will be used. If present exactly five values must be specified.
- wlf,inmode : A list of INn_MODE register values, where n is the number
of input signals. Valid values are 0 (Differential), 1 (Single-ended) and
@@ -49,6 +56,12 @@ Optional properties:
input singals. If values less than the number of input signals, elements
that has not been specifed are set to 0 by default.
+ - wlf,dmic-ref : DMIC reference voltage source for each input, can be
+ selected from either MICVDD or one of the MICBIAS's, defines
+ (ARIZONA_DMIC_xxxx) are provided in <dt-bindings/mfd/arizona.txt>. If
+ present, the number of values should be less than or equal to the
+ number of inputs, unspecified inputs will use the chip default.
+
- DCVDD-supply, MICVDD-supply : Power supplies, only need to be specified if
they are being externally supplied. As covered in
Documentation/devicetree/bindings/regulator/regulator.txt
@@ -73,10 +86,10 @@ codec: wm5102@1a {
#gpio-cells = <2>;
wlf,gpio-defaults = <
- 0x00000000 /* AIF1TXLRCLK */
- 0xffffffff
- 0xffffffff
- 0xffffffff
- 0xffffffff
+ ARIZONA_GP_FN_TXLRCLK
+ ARIZONA_GP_DEFAULT
+ ARIZONA_GP_DEFAULT
+ ARIZONA_GP_DEFAULT
+ ARIZONA_GP_DEFAULT
>;
};
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
new file mode 100644
index 000000000000..98685f291a72
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -0,0 +1,96 @@
+AXP202/AXP209 device tree bindings
+
+The axp20x family current members :
+axp202 (X-Powers)
+axp209 (X-Powers)
+
+Required properties:
+- compatible: "x-powers,axp202" or "x-powers,axp209"
+- reg: The I2C slave address for the AXP chip
+- interrupt-parent: The parent interrupt controller
+- interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin
+- interrupt-controller: axp20x has its own internal IRQs
+- #interrupt-cells: Should be set to 1
+
+Optional properties:
+- x-powers,dcdc-freq: defines the work frequency of DC-DC in KHz
+ (range: 750-1875). Default: 1.5MHz
+- <input>-supply: a phandle to the regulator supply node. May be omitted if
+ inputs are unregulated, such as using the IPSOUT output
+ from the PMIC.
+
+- regulators: A node that houses a sub-node for each regulator. Regulators
+ not used but preferred to be managed by the OS should be
+ listed as well.
+ See Documentation/devicetree/bindings/regulator/regulator.txt
+ for more information on standard regulator bindings.
+
+Optional properties for DCDC regulators:
+- x-powers,dcdc-workmode: 1 for PWM mode, 0 for AUTO (PWM/PFM) mode
+ Default: Current hardware setting
+ The DCDC regulators work in a mixed PWM/PFM mode,
+ using PFM under light loads and switching to PWM
+ for heavier loads. Forcing PWM mode trades efficiency
+ under light loads for lower output noise. This
+ probably makes sense for HiFi audio related
+ applications that aren't battery constrained.
+
+
+AXP202/AXP209 regulators, type, and corresponding input supply names:
+
+Regulator Type Supply Name Notes
+--------- ---- ----------- -----
+DCDC2 : DC-DC buck : vin2-supply
+DCDC3 : DC-DC buck : vin3-supply
+LDO1 : LDO : acin-supply : always on
+LDO2 : LDO : ldo24in-supply : shared supply
+LDO3 : LDO : ldo3in-supply
+LDO4 : LDO : ldo24in-supply : shared supply
+LDO5 : LDO : ldo5in-supply
+
+Example:
+
+axp209: pmic@34 {
+ compatible = "x-powers,axp209";
+ reg = <0x34>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ regulators {
+ x-powers,dcdc-freq = <1500>;
+
+ vdd_cpu: dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-name = "vdd-cpu";
+ };
+
+ vdd_int_dll: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-int-dll";
+ };
+
+ vdd_rtc: ldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-rtc";
+ };
+
+ avcc: ldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "avcc";
+ };
+
+ ldo3 {
+ /* unused but preferred to be managed by OS */
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/bcm590xx.txt b/Documentation/devicetree/bindings/mfd/brcm,bcm59056.txt
index be51a15e05f9..be51a15e05f9 100644
--- a/Documentation/devicetree/bindings/mfd/bcm590xx.txt
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm59056.txt
diff --git a/Documentation/devicetree/bindings/mfd/da9150.txt b/Documentation/devicetree/bindings/mfd/da9150.txt
new file mode 100644
index 000000000000..d0588eaa0d71
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/da9150.txt
@@ -0,0 +1,43 @@
+Dialog Semiconductor DA9150 Combined Charger/Fuel-Gauge MFD bindings
+
+DA9150 consists of a group of sub-devices:
+
+Device Description
+------ -----------
+da9150-gpadc : General Purpose ADC
+da9150-charger : Battery Charger
+
+======
+
+Required properties:
+- compatible : Should be "dlg,da9150"
+- reg: Specifies the I2C slave address
+- interrupt-parent: Specifies the phandle of the interrupt controller to which
+ the IRQs from da9150 are delivered to.
+- interrupts: IRQ line info for da9150 chip.
+- interrupt-controller: da9150 has internal IRQs (own IRQ domain).
+ (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
+ further information relating to interrupt properties)
+
+Sub-devices:
+- da9150-gpadc: See Documentation/devicetree/bindings/iio/adc/da9150-gpadc.txt
+- da9150-charger: See Documentation/devicetree/bindings/power/da9150-charger.txt
+
+
+Example:
+
+ charger_fg: da9150@58 {
+ compatible = "dlg,da9150";
+ reg = <0x58>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+
+ gpadc: da9150-gpadc {
+ ...
+ };
+
+ da9150-charger {
+ ...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
new file mode 100644
index 000000000000..15043e652699
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
@@ -0,0 +1,70 @@
+MediaTek MT6397 Multifunction Device Driver
+
+MT6397 is a multifunction device with the following sub modules:
+- Regulator
+- RTC
+- Audio codec
+- GPIO
+- Clock
+
+It is interfaced to host controller using SPI interface by a proprietary hardware
+called PMIC wrapper or pwrap. MT6397 MFD is a child device of pwrap.
+See the following for pwarp node definitions:
+Documentation/devicetree/bindings/soc/pwrap.txt
+
+This document describes the binding for MFD device and its sub module.
+
+Required properties:
+compatible: "mediatek,mt6397"
+
+Optional subnodes:
+
+- rtc
+ Required properties:
+ - compatible: "mediatek,mt6397-rtc"
+- regulators
+ Required properties:
+ - compatible: "mediatek,mt6397-regulator"
+ see Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
+- codec
+ Required properties:
+ - compatible: "mediatek,mt6397-codec"
+- clk
+ Required properties:
+ - compatible: "mediatek,mt6397-clk"
+
+Example:
+ pwrap: pwrap@1000f000 {
+ compatible = "mediatek,mt8135-pwrap";
+
+ ...
+
+ pmic {
+ compatible = "mediatek,mt6397";
+
+ codec: mt6397codec {
+ compatible = "mediatek,mt6397-codec";
+ };
+
+ regulators {
+ compatible = "mediatek,mt6397-regulator";
+
+ mt6397_vpca15_reg: buck_vpca15 {
+ regulator-compatible = "buck_vpca15";
+ regulator-name = "vpca15";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vgp4_reg: ldo_vgp4 {
+ regulator-compatible = "ldo_vgp4";
+ regulator-name = "vgp4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
index 7182b8857f57..6ac06c1b9aec 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
@@ -15,10 +15,21 @@ each. A function can consume one or more of these fixed-size register regions.
Required properties:
- compatible: Should contain one of:
- "qcom,pm8941"
- "qcom,pm8841"
- "qcom,pma8084"
- or generalized "qcom,spmi-pmic".
+ "qcom,pm8941",
+ "qcom,pm8841",
+ "qcom,pma8084",
+ "qcom,pm8019",
+ "qcom,pm8226",
+ "qcom,pm8110",
+ "qcom,pma8084",
+ "qcom,pmi8962",
+ "qcom,pmd9635",
+ "qcom,pm8994",
+ "qcom,pmi8994",
+ "qcom,pm8916",
+ "qcom,pm8004",
+ "qcom,pm8909",
+ or generalized "qcom,spmi-pmic".
- reg: Specifies the SPMI USID slave address for this device.
For more information see:
Documentation/devicetree/bindings/spmi/spmi.txt
diff --git a/Documentation/devicetree/bindings/mfd/qcom,tcsr.txt b/Documentation/devicetree/bindings/mfd/qcom,tcsr.txt
new file mode 100644
index 000000000000..e90519d566a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.txt
@@ -0,0 +1,22 @@
+QCOM Top Control and Status Register
+
+Qualcomm devices have a set of registers that provide various control and status
+functions for their peripherals. This node is intended to allow access to these
+registers via syscon.
+
+Required properties:
+- compatible: Should contain:
+ "qcom,tcsr-ipq8064", "syscon" for IPQ8064
+ "qcom,tcsr-apq8064", "syscon" for APQ8064
+ "qcom,tcsr-msm8660", "syscon" for MSM8660
+ "qcom,tcsr-msm8960", "syscon" for MSM8960
+ "qcom,tcsr-msm8974", "syscon" for MSM8974
+ "qcom,tcsr-apq8084", "syscon" for APQ8084
+ "qcom,tcsr-msm8916", "syscon" for MSM8916
+- reg: Address range for TCSR registers
+
+Example:
+ tcsr: syscon@1a400000 {
+ compatible = "qcom,tcsr-msm8960", "syscon";
+ reg = <0x1a400000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
index 85e31980017a..5e97a9593ad7 100644
--- a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
@@ -12,6 +12,7 @@ frequencies.
"qcom,rpm-apq8064"
"qcom,rpm-msm8660"
"qcom,rpm-msm8960"
+ "qcom,rpm-ipq8064"
- reg:
Usage: required
@@ -31,16 +32,6 @@ frequencies.
Value type: <string-array>
Definition: must be the three strings "ack", "err" and "wakeup", in order
-- #address-cells:
- Usage: required
- Value type: <u32>
- Definition: must be 1
-
-- #size-cells:
- Usage: required
- Value type: <u32>
- Definition: must be 0
-
- qcom,ipc:
Usage: required
Value type: <prop-encoded-array>
@@ -52,6 +43,188 @@ frequencies.
- u32 representing the ipc bit within the register
+= SUBNODES
+
+The RPM exposes resources to its subnodes. The below bindings specify the set
+of valid subnodes that can operate on these resources.
+
+== Regulators
+
+Regulator nodes are identified by their compatible:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,rpm-pm8058-regulators"
+ "qcom,rpm-pm8901-regulators"
+ "qcom,rpm-pm8921-regulators"
+
+- vdd_l0_l1_lvs-supply:
+- vdd_l2_l11_l12-supply:
+- vdd_l3_l4_l5-supply:
+- vdd_l6_l7-supply:
+- vdd_l8-supply:
+- vdd_l9-supply:
+- vdd_l10-supply:
+- vdd_l13_l16-supply:
+- vdd_l14_l15-supply:
+- vdd_l17_l18-supply:
+- vdd_l19_l20-supply:
+- vdd_l21-supply:
+- vdd_l22-supply:
+- vdd_l23_l24_l25-supply:
+- vdd_ncp-supply:
+- vdd_s0-supply:
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
+ Usage: optional (pm8058 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
+- lvs0_in-supply:
+- lvs1_in-supply:
+- lvs2_in-supply:
+- lvs3_in-supply:
+- mvs_in-supply:
+- vdd_l0-supply:
+- vdd_l1-supply:
+- vdd_l2-supply:
+- vdd_l3-supply:
+- vdd_l4-supply:
+- vdd_l5-supply:
+- vdd_l6-supply:
+- vdd_s0-supply:
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
+ Usage: optional (pm8901 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
+- vdd_l1_l2_l12_l18-supply:
+- vdd_l3_l15_l17-supply:
+- vdd_l4_l14-supply:
+- vdd_l5_l8_l16-supply:
+- vdd_l6_l7-supply:
+- vdd_l9_l11-supply:
+- vdd_l10_l22-supply:
+- vdd_l21_l23_l29-supply:
+- vdd_l24-supply:
+- vdd_l25-supply:
+- vdd_l26-supply:
+- vdd_l27-supply:
+- vdd_l28-supply:
+- vdd_ncp-supply:
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s4-supply:
+- vdd_s5-supply:
+- vdd_s6-supply:
+- vdd_s7-supply:
+- vdd_s8-supply:
+- vin_5vs-supply:
+- vin_lvs1_3_6-supply:
+- vin_lvs2-supply:
+- vin_lvs4_5_7-supply:
+ Usage: optional (pm8921 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
+The regulator node houses sub-nodes for each regulator within the device. Each
+sub-node is identified using the node's name, with valid values listed for each
+of the pmics below.
+
+pm8058:
+ l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15,
+ l16, l17, l18, l19, l20, l21, l22, l23, l24, l25, s0, s1, s2, s3, s4,
+ lvs0, lvs1, ncp
+
+pm8901:
+ l0, l1, l2, l3, l4, l5, l6, s0, s1, s2, s3, s4, lvs0, lvs1, lvs2, lvs3,
+ mvs
+
+pm8921:
+ s1, s2, s3, s4, s7, s8, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
+ l12, l14, l15, l16, l17, l18, l21, l22, l23, l24, l25, l26, l27, l28,
+ l29, lvs1, lvs2, lvs3, lvs4, lvs5, lvs6, lvs7, usb-switch, hdmi-switch,
+ ncp
+
+The content of each sub-node is defined by the standard binding for regulators -
+see regulator.txt - with additional custom properties described below:
+
+=== Switch-mode Power Supply regulator custom properties
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <empty>
+ Definition: enable pull down of the regulator when inactive
+
+- qcom,switch-mode-frequency:
+ Usage: required
+ Value type: <u32>
+ Definition: Frequency (Hz) of the switch-mode power supply;
+ must be one of:
+ 19200000, 9600000, 6400000, 4800000, 3840000, 3200000,
+ 2740000, 2400000, 2130000, 1920000, 1750000, 1600000,
+ 1480000, 1370000, 1280000, 1200000
+
+- qcom,force-mode:
+ Usage: optional (default if no other qcom,force-mode is specified)
+ Value type: <u32>
+ Defintion: indicates that the regulator should be forced to a
+ particular mode, valid values are:
+ QCOM_RPM_FORCE_MODE_NONE - do not force any mode
+ QCOM_RPM_FORCE_MODE_LPM - force into low power mode
+ QCOM_RPM_FORCE_MODE_HPM - force into high power mode
+ QCOM_RPM_FORCE_MODE_AUTO - allow regulator to automatically
+ select its own mode based on
+ realtime current draw, only for:
+ pm8921 smps and ftsmps
+
+- qcom,power-mode-hysteretic:
+ Usage: optional
+ Value type: <empty>
+ Definition: select that the power supply should operate in hysteretic
+ mode, instead of the default pwm mode
+
+=== Low-dropout regulator custom properties
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <empty>
+ Definition: enable pull down of the regulator when inactive
+
+- qcom,force-mode:
+ Usage: optional
+ Value type: <u32>
+ Defintion: indicates that the regulator should not be forced to any
+ particular mode, valid values are:
+ QCOM_RPM_FORCE_MODE_NONE - do not force any mode
+ QCOM_RPM_FORCE_MODE_LPM - force into low power mode
+ QCOM_RPM_FORCE_MODE_HPM - force into high power mode
+ QCOM_RPM_FORCE_MODE_BYPASS - set regulator to use bypass
+ mode, i.e. to act as a switch
+ and not regulate, only for:
+ pm8921 pldo, nldo and nldo1200
+
+=== Negative Charge Pump custom properties
+
+- qcom,switch-mode-frequency:
+ Usage: required
+ Value type: <u32>
+ Definition: Frequency (Hz) of the swith mode power supply;
+ must be one of:
+ 19200000, 9600000, 6400000, 4800000, 3840000, 3200000,
+ 2740000, 2400000, 2130000, 1920000, 1750000, 1600000,
+ 1480000, 1370000, 1280000, 1200000
+
= EXAMPLE
#include <dt-bindings/mfd/qcom-rpm.h>
@@ -64,7 +237,28 @@ frequencies.
interrupts = <0 19 0>, <0 21 0>, <0 22 0>;
interrupt-names = "ack", "err", "wakeup";
- #address-cells = <1>;
- #size-cells = <0>;
+ regulators {
+ compatible = "qcom,rpm-pm8921-regulators";
+ vdd_l1_l2_l12_l18-supply = <&pm8921_s4>;
+
+ s1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+
+ bias-pull-down;
+
+ qcom,switch-mode-frequency = <3200000>;
+ };
+
+ pm8921_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+
+ qcom,force-mode = <QCOM_RPM_FORCE_MODE_AUTO>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/mfd/sky81452.txt b/Documentation/devicetree/bindings/mfd/sky81452.txt
new file mode 100644
index 000000000000..35181794aa24
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/sky81452.txt
@@ -0,0 +1,35 @@
+SKY81452 bindings
+
+Required properties:
+- compatible : Must be "skyworks,sky81452"
+- reg : I2C slave address
+
+Required child nodes:
+- backlight : container node for backlight following the binding
+ in video/backlight/sky81452-backlight.txt
+- regulator : container node for regulators following the binding
+ in regulator/sky81452-regulator.txt
+
+Example:
+
+ sky81452@2c {
+ compatible = "skyworks,sky81452";
+ reg = <0x2c>;
+
+ backlight {
+ compatible = "skyworks,sky81452-backlight";
+ name = "pwm-backlight";
+ led-sources = <0 1 2 3 6>;
+ skyworks,ignore-pwm;
+ skyworks,phase-shift;
+ skyworks,current-limit = <2300>;
+ };
+
+ regulator {
+ lout {
+ regulator-name = "sky81452-lout";
+ regulator-min-microvolt = <4500000>;
+ regulator-max-microvolt = <8000000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt b/Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt
deleted file mode 100644
index d4e0141d3620..000000000000
--- a/Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-* Interrupt Controller
-
-Properties:
-- compatible: "brcm,bcm3384-intc"
-
- Compatibility with BCM3384 and possibly other BCM33xx/BCM63xx SoCs.
-
-- reg: Address/length pairs for each mask/status register set. Length must
- be 8. If multiple register sets are specified, the first set will
- handle IRQ offsets 0..31, the second set 32..63, and so on.
-
-- interrupt-controller: This is an interrupt controller.
-
-- #interrupt-cells: Must be <1>. Just a simple IRQ offset; no level/edge
- or polarity configuration is possible with this controller.
-
-- interrupt-parent: This controller is cascaded from a MIPS CPU HW IRQ, or
- from another INTC.
-
-- interrupts: The IRQ on the parent controller.
-
-Example:
- periph_intc: periph_intc@14e00038 {
- compatible = "brcm,bcm3384-intc";
-
- /*
- * IRQs 0..31: mask reg 0x14e00038, status reg 0x14e0003c
- * IRQs 32..63: mask reg 0x14e00340, status reg 0x14e00344
- */
- reg = <0x14e00038 0x8 0x14e00340 0x8>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- interrupt-parent = <&cpu_intc>;
- interrupts = <4>;
- };
diff --git a/Documentation/devicetree/bindings/mips/brcm/bmips.txt b/Documentation/devicetree/bindings/mips/brcm/brcm,bmips.txt
index 8ef71b4085ca..8ef71b4085ca 100644
--- a/Documentation/devicetree/bindings/mips/brcm/bmips.txt
+++ b/Documentation/devicetree/bindings/mips/brcm/brcm,bmips.txt
diff --git a/Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt b/Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt
deleted file mode 100644
index 8a139cb3c0b5..000000000000
--- a/Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-* Broadcom cable/DSL platforms
-
-SoCs:
-
-Required properties:
-- compatible: "brcm,bcm3384", "brcm,bcm33843"
-
-Boards:
-
-Required properties:
-- compatible: "brcm,bcm93384wvg"
diff --git a/Documentation/devicetree/bindings/mips/brcm/soc.txt b/Documentation/devicetree/bindings/mips/brcm/soc.txt
new file mode 100644
index 000000000000..7bab90cc4a7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/brcm/soc.txt
@@ -0,0 +1,12 @@
+* Broadcom cable/DSL/settop platforms
+
+Required properties:
+
+- compatible: "brcm,bcm3384", "brcm,bcm33843"
+ "brcm,bcm3384-viper", "brcm,bcm33843-viper"
+ "brcm,bcm6328", "brcm,bcm6368",
+ "brcm,bcm7125", "brcm,bcm7346", "brcm,bcm7358", "brcm,bcm7360",
+ "brcm,bcm7362", "brcm,bcm7420", "brcm,bcm7425"
+
+The experimental -viper variants are for running Linux on the 3384's
+BMIPS4355 cable modem CPU instead of the BMIPS5000 application processor.
diff --git a/Documentation/devicetree/bindings/mips/img/pistachio.txt b/Documentation/devicetree/bindings/mips/img/pistachio.txt
new file mode 100644
index 000000000000..a736d889c2b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/img/pistachio.txt
@@ -0,0 +1,42 @@
+Imagination Pistachio SoC
+=========================
+
+Required properties:
+--------------------
+ - compatible: Must include "img,pistachio".
+
+CPU nodes:
+----------
+A "cpus" node is required. Required properties:
+ - #address-cells: Must be 1.
+ - #size-cells: Must be 0.
+A CPU sub-node is also required for at least CPU 0. Since the topology may
+be probed via CPS, it is not necessary to specify secondary CPUs. Required
+propertis:
+ - device_type: Must be "cpu".
+ - compatible: Must be "mti,interaptiv".
+ - reg: CPU number.
+ - clocks: Must include the CPU clock. See ../../clock/clock-bindings.txt for
+ details on clock bindings.
+Example:
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "mti,interaptiv";
+ reg = <0>;
+ clocks = <&clk_core CLK_MIPS>;
+ };
+ };
+
+
+Boot protocol:
+--------------
+In accordance with the MIPS UHI specification[1], the bootloader must pass the
+following arguments to the kernel:
+ - $a0: -2.
+ - $a1: KSEG0 address of the flattened device-tree blob.
+
+[1] http://prplfoundation.org/wiki/MIPS_documentation
diff --git a/Documentation/devicetree/bindings/misc/smc.txt b/Documentation/devicetree/bindings/misc/brcm,kona-smc.txt
index 6c9f176f3571..6c9f176f3571 100644
--- a/Documentation/devicetree/bindings/misc/smc.txt
+++ b/Documentation/devicetree/bindings/misc/brcm,kona-smc.txt
diff --git a/Documentation/devicetree/bindings/misc/lis302.txt b/Documentation/devicetree/bindings/misc/lis302.txt
index 6def86f6b053..2a19bff9693f 100644
--- a/Documentation/devicetree/bindings/misc/lis302.txt
+++ b/Documentation/devicetree/bindings/misc/lis302.txt
@@ -46,11 +46,18 @@ Optional properties for all bus drivers:
interrupt 2
- st,wakeup-{x,y,z}-{lo,hi}: set wakeup condition on x/y/z axis for
upper/lower limit
+ - st,wakeup-threshold: set wakeup threshold
+ - st,wakeup2-{x,y,z}-{lo,hi}: set wakeup condition on x/y/z axis for
+ upper/lower limit for second wakeup
+ engine.
+ - st,wakeup2-threshold: set wakeup threshold for second wakeup
+ engine.
- st,highpass-cutoff-hz=: 1, 2, 4 or 8 for 1Hz, 2Hz, 4Hz or 8Hz of
highpass cut-off frequency
- st,hipass{1,2}-disable: disable highpass 1/2.
- st,default-rate=: set the default rate
- - st,axis-{x,y,z}=: set the axis to map to the three coordinates
+ - st,axis-{x,y,z}=: set the axis to map to the three coordinates.
+ Negative values can be used for inverted axis.
- st,{min,max}-limit-{x,y,z} set the min/max limits for x/y/z axis
(used by self-test)
diff --git a/Documentation/devicetree/bindings/mmc/kona-sdhci.txt b/Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt
index aaba2483b4ff..aaba2483b4ff 100644
--- a/Documentation/devicetree/bindings/mmc/kona-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt
diff --git a/Documentation/devicetree/bindings/mtd/m25p80.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
index 4611aa83531b..2bee68103b01 100644
--- a/Documentation/devicetree/bindings/mtd/m25p80.txt
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
@@ -3,10 +3,13 @@
Required properties:
- #address-cells, #size-cells : Must be present if the device has sub-nodes
representing partitions.
-- compatible : Should be the manufacturer and the name of the chip. Bear in mind
- the DT binding is not Linux-only, but in case of Linux, see the
- "spi_nor_ids" table in drivers/mtd/spi-nor/spi-nor.c for the list
- of supported chips.
+- compatible : May include a device-specific string consisting of the
+ manufacturer and name of the chip. Bear in mind the DT binding
+ is not Linux-only, but in case of Linux, see the "m25p_ids"
+ table in drivers/mtd/devices/m25p80.c for the list of supported
+ chips.
+ Must also include "jedec,spi-nor" for any SPI NOR flash that can
+ be identified by the JEDEC READ ID opcode (0x9F).
- reg : Chip-Select number
- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
@@ -22,7 +25,7 @@ Example:
flash: m25p80@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "spansion,m25p80";
+ compatible = "spansion,m25p80", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <40000000>;
m25p,fast-read;
diff --git a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
index de8b517a5521..4f833e3c4f51 100644
--- a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
@@ -14,7 +14,7 @@ Optional properties:
- marvell,nand-enable-arbiter: Set to enable the bus arbiter
- marvell,nand-keep-config: Set to keep the NAND controller config as set
by the bootloader
- - num-cs: Number of chipselect lines to usw
+ - num-cs: Number of chipselect lines to use
- nand-on-flash-bbt: boolean to enable on flash bbt option if
not present false
- nand-ecc-strength: number of bits to correct per ECC step
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
index 0273adb8638c..086d6f44c4b9 100644
--- a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
@@ -21,7 +21,7 @@ Optional properties:
- nand-ecc-mode : one of the supported ECC modes ("hw", "hw_syndrome", "soft",
"soft_bch" or "none")
-see Documentation/devicetree/mtd/nand.txt for generic bindings.
+see Documentation/devicetree/bindings/mtd/nand.txt for generic bindings.
Examples:
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index 6151999c5dca..f55aa280d34f 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -14,7 +14,11 @@ Required properties for all the ethernet interfaces:
- "enet_csr": Ethernet control and status register address space
- "ring_csr": Descriptor ring control and status register address space
- "ring_cmd": Descriptor ring command register address space
-- interrupts: Ethernet main interrupt
+- interrupts: Two interrupt specifiers can be specified.
+ - First is the Rx interrupt. This irq is mandatory.
+ - Second is the Tx completion interrupt.
+ This is supported only on SGMII based 1GbE and 10GbE interfaces.
+- port-id: Port number (0 or 1)
- clocks: Reference to the clock entry.
- local-mac-address: MAC address assigned to this device
- phy-connection-type: Interface type between ethernet device and PHY device
@@ -49,6 +53,7 @@ Example:
<0x0 0X10000000 0x0 0X200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0x3c 0x4>;
+ port-id = <0>;
clocks = <&menetclk 0>;
local-mac-address = [00 01 73 00 00 01];
phy-connection-type = "rgmii";
diff --git a/Documentation/devicetree/bindings/net/broadcom-sf2.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index 30d487597ecb..30d487597ecb 100644
--- a/Documentation/devicetree/bindings/net/broadcom-sf2.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
diff --git a/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
index 451fef26b4df..451fef26b4df 100644
--- a/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
diff --git a/Documentation/devicetree/bindings/net/broadcom-systemport.txt b/Documentation/devicetree/bindings/net/brcm,systemport.txt
index 877da34145b0..877da34145b0 100644
--- a/Documentation/devicetree/bindings/net/broadcom-systemport.txt
+++ b/Documentation/devicetree/bindings/net/brcm,systemport.txt
diff --git a/Documentation/devicetree/bindings/net/broadcom-mdio-unimac.txt b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
index ab0bb4247d14..ab0bb4247d14 100644
--- a/Documentation/devicetree/bindings/net/broadcom-mdio-unimac.txt
+++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index 3fc360523bc9..41b3f3f864e8 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -19,6 +19,12 @@ The following properties are common to the Ethernet controllers:
- phy: the same as "phy-handle" property, not recommended for new bindings.
- phy-device: the same as "phy-handle" property, not recommended for new
bindings.
+- rx-fifo-depth: the size of the controller's receive fifo in bytes. This
+ is used for components that can have configurable receive fifo sizes,
+ and is useful for determining certain configuration settings such as
+ flow control thresholds.
+- tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
+ is used for components that can have configurable fifo sizes.
Child nodes of the Ethernet controller are typically the individual PHY devices
connected via the MDIO bus (sometimes the MDIO bus controller is separate).
diff --git a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
index d3bbdded4cbe..168f1be50912 100644
--- a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
+++ b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
@@ -6,11 +6,14 @@ Required properties:
- spi-max-frequency: maximal bus speed, should be set to 7500000 depends
sync or async operation mode
- reg: the chipselect index
- - interrupts: the interrupt generated by the device
+ - interrupts: the interrupt generated by the device. Non high-level
+ can occur deadlocks while handling isr.
Optional properties:
- reset-gpio: GPIO spec for the rstn pin
- sleep-gpio: GPIO spec for the slp_tr pin
+ - xtal-trim: u8 value for fine tuning the internal capacitance
+ arrays of xtal pins: 0 = +0 pF, 0xf = +4.5 pF
Example:
@@ -18,6 +21,7 @@ Example:
compatible = "atmel,at86rf231";
spi-max-frequency = <7500000>;
reg = <0>;
- interrupts = <19 1>;
+ interrupts = <19 4>;
interrupt-parent = <&gpio3>;
+ xtal-trim = /bits/ 8 <0x06>;
};
diff --git a/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt b/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
index 0071883c08d8..fb6d49f184ed 100644
--- a/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
+++ b/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
@@ -13,11 +13,15 @@ Required properties:
- cca-gpio: GPIO spec for the CCA pin
- vreg-gpio: GPIO spec for the VREG pin
- reset-gpio: GPIO spec for the RESET pin
+Optional properties:
+ - amplified: include if the CC2520 is connected to a CC2591 amplifier
+
Example:
cc2520@0 {
compatible = "ti,cc2520";
reg = <0>;
spi-max-frequency = <4000000>;
+ amplified;
pinctrl-names = "default";
pinctrl-0 = <&cc2520_cape_pins>;
fifo-gpio = <&gpio1 18 0>;
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt
index f9c07710478d..d0e6fa38f335 100644
--- a/Documentation/devicetree/bindings/net/keystone-netcp.txt
+++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt
@@ -49,6 +49,7 @@ Required properties:
- compatible: Should be "ti,netcp-1.0"
- clocks: phandle to the reference clocks for the subsystem.
- dma-id: Navigator packet dma instance id.
+- ranges: address range of NetCP (includes, Ethernet SS, PA and SA)
Optional properties:
- reg: register location and the size for the following register
@@ -64,10 +65,30 @@ NetCP device properties: Device specification for NetCP sub-modules.
1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications.
Required properties:
- label: Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb.
+- compatible: Must be one of below:-
+ "ti,netcp-gbe" for 1GbE on NetCP 1.4
+ "ti,netcp-gbe-5" for 1GbE N NetCP 1.5 (N=5)
+ "ti,netcp-gbe-9" for 1GbE N NetCP 1.5 (N=9)
+ "ti,netcp-gbe-2" for 1GbE N NetCP 1.5 (N=2)
+ "ti,netcp-xgbe" for 10 GbE
+
- reg: register location and the size for the following register
regions in the specified order.
- - subsystem registers
- - serdes registers
+ - switch subsystem registers
+ - sgmii port3/4 module registers (only for NetCP 1.4)
+ - switch module registers
+ - serdes registers (only for 10G)
+
+ NetCP 1.4 ethss, here is the order
+ index #0 - switch subsystem registers
+ index #1 - sgmii port3/4 module registers
+ index #2 - switch module registers
+
+ NetCP 1.5 ethss 9 port, 5 port and 2 port
+ index #0 - switch subsystem registers
+ index #1 - switch module registers
+ index #2 - serdes registers
+
- tx-channel: the navigator packet dma channel name for tx.
- tx-queue: the navigator queue number associated with the tx dma channel.
- interfaces: specification for each of the switch port to be registered as a
@@ -120,14 +141,13 @@ Optional properties:
Example binding:
-netcp: netcp@2090000 {
+netcp: netcp@2000000 {
reg = <0x2620110 0x8>;
reg-names = "efuse";
compatible = "ti,netcp-1.0";
#address-cells = <1>;
#size-cells = <1>;
- ranges;
-
+ ranges = <0 0x2000000 0xfffff>;
clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>;
dma-coherent;
/* big-endian; */
@@ -137,9 +157,9 @@ netcp: netcp@2090000 {
#address-cells = <1>;
#size-cells = <1>;
ranges;
- gbe@0x2090000 {
+ gbe@90000 {
label = "netcp-gbe";
- reg = <0x2090000 0xf00>;
+ reg = <0x90000 0x300>, <0x90400 0x400>, <0x90800 0x700>;
/* enable-ale; */
tx-queue = <648>;
tx-channel = <8>;
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index aaa696414f57..ba19d671e808 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -2,10 +2,13 @@
Required properties:
- compatible: Should be "cdns,[<chip>-]{macb|gem}"
- Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs.
+ Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs or the 10/100Mbit IP
+ available on sama5d3 SoCs.
Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
the Cadence GEM, or the generic form: "cdns,gem".
+ Use "cdns,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
+ Use "cdns,sama5d4-gem" for the Gigabit IP available on Atmel sama5d4 SoCs.
- reg: Address and length of the register set for the device
- interrupts: Should contain macb interrupt
- phy-mode: See ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
new file mode 100644
index 000000000000..5b6cd9b3f628
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
@@ -0,0 +1,35 @@
+* NXP Semiconductors NXP NCI NFC Controllers
+
+Required properties:
+- compatible: Should be "nxp,nxp-nci-i2c".
+- clock-frequency: I²C work frequency.
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- enable-gpios: Output GPIO pin used for enabling/disabling the chip
+- firmware-gpios: Output GPIO pin used to enter firmware download mode
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2):
+
+&i2c2 {
+
+ status = "okay";
+
+ npc100: npc100@29 {
+
+ compatible = "nxp,nxp-nci-i2c";
+
+ reg = <0x29>;
+ clock-frequency = <100000>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <29 GPIO_ACTIVE_HIGH>;
+
+ enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
+ firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 8ca65cec52ae..f34fc3c81a75 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -35,15 +35,18 @@ Optional properties:
- reset-names: Should contain the reset signal name "stmmaceth", if a
reset phandle is given
- max-frame-size: See ethernet.txt file in the same directory
-- clocks: If present, the first clock should be the GMAC main clock,
- further clocks may be specified in derived bindings.
+- clocks: If present, the first clock should be the GMAC main clock and
+ the second clock should be peripheral's register interface clock. Further
+ clocks may be specified in derived bindings.
- clock-names: One name for each entry in the clocks property, the
- first one should be "stmmaceth".
+ first one should be "stmmaceth" and the second one should be "pclk".
- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
available this clock is used for programming the Timestamp Addend Register.
If not passed then the system clock will be used and this is fine on some
platforms.
- snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register.
+- tx-fifo-depth: See ethernet.txt file in the same directory
+- rx-fifo-depth: See ethernet.txt file in the same directory
Examples:
@@ -58,6 +61,8 @@ Examples:
phy-mode = "gmii";
snps,multicast-filter-bins = <256>;
snps,perfect-filter-entries = <128>;
+ rx-fifo-depth = <16384>;
+ tx-fifo-depth = <16384>;
clocks = <&clock>;
clock-names = "stmmaceth";
};
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
new file mode 100644
index 000000000000..2a3d90de18ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
@@ -0,0 +1,47 @@
+TI Wilink 6/7/8 (wl12xx/wl18xx) SDIO devices
+
+This node provides properties for controlling the wilink wireless device. The
+node is expected to be specified as a child node to the SDIO controller that
+connects the device to the system.
+
+Required properties:
+ - compatible: should be one of the following:
+ * "ti,wl1271"
+ * "ti,wl1273"
+ * "ti,wl1281"
+ * "ti,wl1283"
+ * "ti,wl1801"
+ * "ti,wl1805"
+ * "ti,wl1807"
+ * "ti,wl1831"
+ * "ti,wl1835"
+ * "ti,wl1837"
+ - interrupts : specifies attributes for the out-of-band interrupt.
+
+Optional properties:
+ - interrupt-parent : the phandle for the interrupt controller to which the
+ device interrupts are connected.
+ - ref-clock-frequency : ref clock frequency in Hz
+ - tcxo-clock-frequency : tcxo clock frequency in Hz
+
+Note: the *-clock-frequency properties assume internal clocks. In case of external
+clock, new bindings (for parsing the clock nodes) have to be added.
+
+Example:
+
+&mmc3 {
+ status = "okay";
+ vmmc-supply = <&wlan_en_reg>;
+ bus-width = <4>;
+ cap-power-off-card;
+ keep-power-in-suspend;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1835";
+ reg = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/panel/ampire,am800480r3tmqwa1h.txt b/Documentation/devicetree/bindings/panel/ampire,am800480r3tmqwa1h.txt
new file mode 100644
index 000000000000..83e2cae1cc1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/ampire,am800480r3tmqwa1h.txt
@@ -0,0 +1,7 @@
+Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "ampire,am800480r3tmqwa1h"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/auo,b101ean01.txt b/Documentation/devicetree/bindings/panel/auo,b101ean01.txt
new file mode 100644
index 000000000000..3590b0741619
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b101ean01.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,b101ean01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,at043tn24.txt b/Documentation/devicetree/bindings/panel/innolux,at043tn24.txt
new file mode 100644
index 000000000000..4104226b61bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/innolux,at043tn24.txt
@@ -0,0 +1,7 @@
+Innolux AT043TN24 4.3" WQVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,at043tn24"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,zj070na-01p.txt b/Documentation/devicetree/bindings/panel/innolux,zj070na-01p.txt
new file mode 100644
index 000000000000..824f87f1526d
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/innolux,zj070na-01p.txt
@@ -0,0 +1,7 @@
+Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,zj070na-01p"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/ortustech,com43h4m85ulc.txt b/Documentation/devicetree/bindings/panel/ortustech,com43h4m85ulc.txt
new file mode 100644
index 000000000000..de19e9398618
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/ortustech,com43h4m85ulc.txt
@@ -0,0 +1,7 @@
+OrtusTech COM43H4M85ULC Blanview 3.7" TFT-LCD panel
+
+Required properties:
+- compatible: should be "ortustech,com43h4m85ulc"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/samsung,ltn140at29-301.txt b/Documentation/devicetree/bindings/panel/samsung,ltn140at29-301.txt
new file mode 100644
index 000000000000..e7f969d891cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/samsung,ltn140at29-301.txt
@@ -0,0 +1,7 @@
+Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
+
+Required properties:
+- compatible: should be "samsung,ltn140at29-301"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/shelly,sca07010-bfn-lnn.txt b/Documentation/devicetree/bindings/panel/shelly,sca07010-bfn-lnn.txt
new file mode 100644
index 000000000000..fc1ea9e26c94
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/shelly,sca07010-bfn-lnn.txt
@@ -0,0 +1,7 @@
+Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "shelly,sca07010-bfn-lnn"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/phy/bcm-phy.txt b/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt
index 3dc8b3d2ffbb..3dc8b3d2ffbb 100644
--- a/Documentation/devicetree/bindings/phy/bcm-phy.txt
+++ b/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt b/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt
new file mode 100644
index 000000000000..6540ca56be5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt
@@ -0,0 +1,98 @@
+Broadcom Cygnus GPIO/PINCONF Controller
+
+Required properties:
+
+- compatible:
+ Must be "brcm,cygnus-ccm-gpio", "brcm,cygnus-asiu-gpio", or
+ "brcm,cygnus-crmu-gpio"
+
+- reg:
+ Define the base and range of the I/O address space that contains the Cygnus
+GPIO/PINCONF controller registers
+
+- #gpio-cells:
+ Must be two. The first cell is the GPIO pin number (within the
+controller's pin space) and the second cell is used for the following:
+ bit[0]: polarity (0 for active high and 1 for active low)
+
+- gpio-controller:
+ Specifies that the node is a GPIO controller
+
+Optional properties:
+
+- interrupts:
+ Interrupt ID
+
+- interrupt-controller:
+ Specifies that the node is an interrupt controller
+
+- pinmux:
+ Specifies the phandle to the IOMUX device, where pins can be individually
+muxed to GPIO
+
+Supported generic PINCONF properties in child nodes:
+
+- pins:
+ The list of pins (within the controller's own pin space) that properties
+in the node apply to. Pin names are "gpio-<pin>"
+
+- bias-disable:
+ Disable pin bias
+
+- bias-pull-up:
+ Enable internal pull up resistor
+
+- bias-pull-down:
+ Enable internal pull down resistor
+
+- drive-strength:
+ Valid drive strength values include 2, 4, 6, 8, 10, 12, 14, 16 (mA)
+
+Example:
+ gpio_ccm: gpio@1800a000 {
+ compatible = "brcm,cygnus-ccm-gpio";
+ reg = <0x1800a000 0x50>,
+ <0x0301d164 0x20>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+
+ touch_pins: touch_pins {
+ pwr: pwr {
+ pins = "gpio-0";
+ drive-strength = <16>;
+ };
+
+ event: event {
+ pins = "gpio-1";
+ bias-pull-up;
+ };
+ };
+ };
+
+ gpio_asiu: gpio@180a5000 {
+ compatible = "brcm,cygnus-asiu-gpio";
+ reg = <0x180a5000 0x668>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ };
+
+ /*
+ * Touchscreen that uses the CCM GPIO 0 and 1
+ */
+ tsc {
+ ...
+ ...
+ gpio-pwr = <&gpio_ccm 0 0>;
+ gpio-event = <&gpio_ccm 1 0>;
+ };
+
+ /* Bluetooth that uses the ASIU GPIO 5, with polarity inverted */
+ bluetooth {
+ ...
+ ...
+ bcm,rfkill-bank-sel = <&gpio_asiu 5 1>
+ }
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt
new file mode 100644
index 000000000000..3600d5c6c4d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt
@@ -0,0 +1,132 @@
+Broadcom Cygnus IOMUX Controller
+
+The Cygnus IOMUX controller supports group based mux configuration. In
+addition, certain pins can be muxed to GPIO function individually.
+
+Required properties:
+
+- compatible:
+ Must be "brcm,cygnus-pinmux"
+
+- reg:
+ Define the base and range of the I/O address space that contains the Cygnus
+IOMUX registers
+
+Properties in subnodes:
+
+- function:
+ The mux function to select
+
+- groups:
+ The list of groups to select with a given function
+
+For more details, refer to
+Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+For example:
+
+ pinmux: pinmux@0x0301d0c8 {
+ compatible = "brcm,cygnus-pinmux";
+ reg = <0x0301d0c8 0x1b0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_default>;
+
+ i2s0_default: i2s0_default {
+ mux {
+ function = "i2s0";
+ groups = "i2s0_0_grp", "i2s0_1_grp";
+ };
+ };
+ };
+
+List of supported functions and groups in Cygnus:
+
+"i2s0": "i2s0_0_grp", "i2s0_1_grp"
+
+"i2s1": "i2s1_0_grp", "i2s1_1_grp"
+
+"i2s2": "i2s2_0_grp", "i2s2_1_grp", "i2s2_2_grp", "i2s2_3_grp", "i2s2_4_grp"
+
+"spdif": "spdif_grp"
+
+"pwm0": "pwm0_grp"
+
+"pwm1": "pwm1_grp"
+
+"pwm2": "pwm2_grp"
+
+"pwm3": "pwm3_grp"
+
+"pwm4": "pwm4_grp"
+
+"pwm5": "pwm5_grp"
+
+"key": "key0_grp", "key1_grp", "key2_grp", "key3_grp", "key4_grp", "key5_grp",
+"key6_grp", "key7_grp", "key8_grp", "key9_grp", "key10_grp", "key11_grp",
+"key12_grp", "key13_grp", "key14_grp", "key15_grp"
+
+"audio_dte": "audio_dte0_grp", "audio_dte1_grp", "audio_dte2_grp", "audio_dte3_grp"
+
+"smart_card0": "smart_card0_grp", "smart_card0_fcb_grp"
+
+"smart_card1": "smart_card1_grp", "smart_card1_fcb_grp"
+
+"spi0": "spi0_grp"
+
+"spi1": "spi1_grp"
+
+"spi2": "spi2_grp"
+
+"spi3": "spi3_grp"
+
+"spi4": "spi4_0_grp", "spi4_1_grp"
+
+"spi5": "spi5_grp"
+
+"sw_led0": "sw_led0_0_grp", "sw_led0_1_grp"
+
+"sw_led1": "sw_led1_grp"
+
+"sw_led2": "sw_led2_0_grp", "sw_led2_1_grp"
+
+"d1w": "d1w_grp"
+
+"lcd": "lcd_grp"
+
+"sram": "sram_0_grp", "sram_1_grp"
+
+"uart0": "uart0_grp"
+
+"uart1": "uart1_grp", "uart1_dte_grp"
+
+"uart2": "uart2_grp"
+
+"uart3": "uart3_grp"
+
+"uart4": "uart4_grp"
+
+"qspi": "qspi_0_grp", "qspi_1_grp"
+
+"nand": "nand_grp"
+
+"sdio0": "sdio0_grp", "sdio0_cd_grp", "sdio0_mmc_grp"
+
+"sdio1": "sdio1_data_0_grp", "sdio1_data_1_grp", "sdio1_cd_grp",
+"sdio1_led_grp", "sdio1_mmc_grp"
+
+"can0": "can0_grp"
+
+"can1": "can1_grp"
+
+"cam": "cam_led_grp", "cam_0_grp", "cam_1_grp"
+
+"bsc1": "bsc1_grp"
+
+"pcie_clkreq": "pcie_clkreq_grp"
+
+"usb0_oc": "usb0_oc_grp"
+
+"usb1_oc": "usb1_oc_grp"
+
+"usb2_oc": "usb2_oc_grp"
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-39x-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-39x-pinctrl.txt
new file mode 100644
index 000000000000..5b1a9dc004f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-39x-pinctrl.txt
@@ -0,0 +1,78 @@
+* Marvell Armada 39x SoC pinctrl driver for mpp
+
+Please refer to marvell,mvebu-pinctrl.txt in this directory for common binding
+part and usage.
+
+Required properties:
+- compatible: "marvell,88f6920-pinctrl", "marvell,88f6928-pinctrl"
+ depending on the specific variant of the SoC being used.
+- reg: register specifier of MPP registers
+
+Available mpp pins/groups and functions:
+Note: brackets (x) are not part of the mpp name for marvell,function and given
+only for more detailed description in this document.
+
+name pins functions
+================================================================================
+mpp0 0 gpio, ua0(rxd)
+mpp1 1 gpio, ua0(txd)
+mpp2 2 gpio, i2c0(sck)
+mpp3 3 gpio, i2c0(sda)
+mpp4 4 gpio, ua1(txd), ua0(rts), smi(mdc)
+mpp5 5 gpio, ua1(rxd), ua0(cts), smi(mdio)
+mpp6 6 gpio, dev(cs3), xsmi(mdio)
+mpp7 7 gpio, dev(ad9), xsmi(mdc)
+mpp8 8 gpio, dev(ad10), ptp(trig)
+mpp9 9 gpio, dev(ad11), ptp(clk)
+mpp10 10 gpio, dev(ad12), ptp(event)
+mpp11 11 gpio, dev(ad13), led(clk)
+mpp12 12 gpio, pcie0(rstout), dev(ad14), led(stb)
+mpp13 13 gpio, dev(ad15), led(data)
+mpp14 14 gpio, m(vtt), dev(wen1), ua1(txd)
+mpp15 15 gpio, pcie0(rstout), spi0(mosi), i2c1(sck)
+mpp16 16 gpio, m(decc), spi0(miso), i2c1(sda)
+mpp17 17 gpio, ua1(rxd), spi0(sck), smi(mdio)
+mpp18 18 gpio, ua1(txd), spi0(cs0), i2c2(sck)
+mpp19 19 gpio, sata1(present) [1], ua0(cts), ua1(rxd), i2c2(sda)
+mpp20 20 gpio, sata0(present) [1], ua0(rts), ua1(txd), smi(mdc)
+mpp21 21 gpio, spi0(cs1), sata0(present) [1], sd(cmd), dev(bootcs), ge(rxd0)
+mpp22 22 gpio, spi0(mosi), dev(ad0)
+mpp23 23 gpio, spi0(sck), dev(ad2)
+mpp24 24 gpio, spi0(miso), ua0(cts), ua1(rxd), sd(d4), dev(readyn)
+mpp25 25 gpio, spi0(cs0), ua0(rts), ua1(txd), sd(d5), dev(cs0)
+mpp26 26 gpio, spi0(cs2), i2c1(sck), sd(d6), dev(cs1)
+mpp27 27 gpio, spi0(cs3), i2c1(sda), sd(d7), dev(cs2), ge(txclkout)
+mpp28 28 gpio, sd(clk), dev(ad5), ge(txd0)
+mpp29 29 gpio, dev(ale0), ge(txd1)
+mpp30 30 gpio, dev(oen), ge(txd2)
+mpp31 31 gpio, dev(ale1), ge(txd3)
+mpp32 32 gpio, dev(wen0), ge(txctl)
+mpp33 33 gpio, m(decc), dev(ad3)
+mpp34 34 gpio, dev(ad1)
+mpp35 35 gpio, ref(clk), dev(a1)
+mpp36 36 gpio, dev(a0)
+mpp37 37 gpio, sd(d3), dev(ad8), ge(rxclk)
+mpp38 38 gpio, ref(clk), sd(d0), dev(ad4), ge(rxd1)
+mpp39 39 gpio, i2c1(sck), ua0(cts), sd(d1), dev(a2), ge(rxd2)
+mpp40 40 gpio, i2c1(sda), ua0(rts), sd(d2), dev(ad6), ge(rxd3)
+mpp41 41 gpio, ua1(rxd), ua0(cts), spi1(cs3), dev(burstn), nd(rbn0), ge(rxctl)
+mpp42 42 gpio, ua1(txd), ua0(rts), dev(ad7)
+mpp43 43 gpio, pcie0(clkreq), m(vtt), m(decc), spi1(cs2), dev(clkout), nd(rbn1)
+mpp44 44 gpio, sata0(present) [1], sata1(present) [1], led(clk)
+mpp45 45 gpio, ref(clk), pcie0(rstout), ua1(rxd)
+mpp46 46 gpio, ref(clk), pcie0(rstout), ua1(txd), led(stb)
+mpp47 47 gpio, sata0(present) [1], sata1(present) [1], led(data)
+mpp48 48 gpio, sata0(present) [1], m(vtt), tdm(pclk) [1], audio(mclk) [1], sd(d4), pcie0(clkreq), ua1(txd)
+mpp49 49 gpio, tdm(fsync) [1], audio(lrclk) [1], sd(d5), ua2(rxd)
+mpp50 50 gpio, pcie0(rstout), tdm(drx) [1], audio(extclk) [1], sd(cmd), ua2(rxd)
+mpp51 51 gpio, tdm(dtx) [1], audio(sdo) [1], m(decc), ua2(txd)
+mpp52 52 gpio, pcie0(rstout), tdm(intn) [1], audio(sdi) [1], sd(d6), i2c3(sck)
+mpp53 53 gpio, sata1(present) [1], sata0(present) [1], tdm(rstn) [1], audio(bclk) [1], sd(d7), i2c3(sda)
+mpp54 54 gpio, sata0(present) [1], sata1(present) [1], pcie0(rstout), sd(d3), ua3(txd)
+mpp55 55 gpio, ua1(cts), spi1(cs1), sd(d0), ua1(rxd), ua3(rxd)
+mpp56 56 gpio, ua1(rts), m(decc), spi1(mosi), ua1(txd)
+mpp57 57 gpio, spi1(sck), sd(clk), ua1(txd)
+mpp58 58 gpio, i2c1(sck), pcie2(clkreq), spi1(miso), sd(d1), ua1(rxd)
+mpp59 59 gpio, pcie0(rstout), i2c1(sda), spi1(cs0), sd(d2)
+
+[1]: only available on 88F6928
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 17e7240c6998..3f6a524cc5ff 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -1,7 +1,7 @@
== Amlogic Meson pinmux controller ==
Required properties for the root node:
- - compatible: "amlogic,meson8-pinctrl"
+ - compatible: "amlogic,meson8-pinctrl" or "amlogic,meson8b-pinctrl"
- reg: address and size of registers controlling irq functionality
=== GPIO sub-nodes ===
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt
new file mode 100644
index 000000000000..a62d82d5fbe9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt
@@ -0,0 +1,166 @@
+NVIDIA Tegra210 pinmux controller
+
+Required properties:
+- compatible: "nvidia,tegra210-pinmux"
+- reg: Should contain a list of base address and size pairs for:
+ - first entry: The APB_MISC_GP_*_PADCTRL registers (pad control)
+ - second entry: The PINMUX_AUX_* registers (pinmux)
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Tegra's pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, tristate, drive strength, etc.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function or tristate parameter. For this
+reason, even seemingly boolean values are actually tristates in this binding:
+unspecified, off, or on. Unspecified is represented as an absent property,
+and off/on are represented as integer values 0 and 1.
+
+See the TRM to determine which properties and values apply to each pin/group.
+Macro values for property values are defined in
+include/dt-binding/pinctrl/pinctrl-tegra.h.
+
+Required subnode-properties:
+- nvidia,pins : An array of strings. Each string contains the name of a pin or
+ group. Valid values for these names are listed below.
+
+Optional subnode-properties:
+- nvidia,function: A string containing the name of the function to mux to the
+ pin or group.
+- nvidia,pull: Integer, representing the pull-down/up to apply to the pin.
+ 0: none, 1: down, 2: up.
+- nvidia,tristate: Integer.
+ 0: drive, 1: tristate.
+- nvidia,enable-input: Integer. Enable the pin's input path.
+ enable :TEGRA_PIN_ENABLE0 and
+ disable or output only: TEGRA_PIN_DISABLE.
+- nvidia,open-drain: Integer.
+ enable: TEGRA_PIN_ENABLE.
+ disable: TEGRA_PIN_DISABLE.
+- nvidia,lock: Integer. Lock the pin configuration against further changes
+ until reset.
+ enable: TEGRA_PIN_ENABLE.
+ disable: TEGRA_PIN_DISABLE.
+- nvidia,io-hv: Integer. Select high-voltage receivers.
+ normal: TEGRA_PIN_DISABLE
+ high: TEGRA_PIN_ENABLE
+- nvidia,high-speed-mode: Integer. Enable high speed mode the pins.
+ normal: TEGRA_PIN_DISABLE
+ high: TEGRA_PIN_ENABLE
+- nvidia,schmitt: Integer. Enables Schmitt Trigger on the input.
+ normal: TEGRA_PIN_DISABLE
+ high: TEGRA_PIN_ENABLE
+- nvidia,drive-type: Integer. Valid range 0...3.
+- nvidia,pull-down-strength: Integer. Controls drive strength. 0 is weakest.
+ The range of valid values depends on the pingroup. See "CAL_DRVDN" in the
+ Tegra TRM.
+- nvidia,pull-up-strength: Integer. Controls drive strength. 0 is weakest.
+ The range of valid values depends on the pingroup. See "CAL_DRVUP" in the
+ Tegra TRM.
+- nvidia,slew-rate-rising: Integer. Controls rising signal slew rate. 0 is
+ fastest. The range of valid values depends on the pingroup. See
+ "DRVDN_SLWR" in the Tegra TRM.
+- nvidia,slew-rate-falling: Integer. Controls falling signal slew rate. 0 is
+ fastest. The range of valid values depends on the pingroup. See
+ "DRVUP_SLWF" in the Tegra TRM.
+
+Valid values for pin and group names (nvidia,pin) are:
+
+ Mux groups:
+
+ These correspond to Tegra PINMUX_AUX_* (pinmux) registers. Any property
+ that exists in those registers may be set for the following pin names.
+
+ In Tegra210, many pins also have a dedicated APB_MISC_GP_*_PADCTRL
+ register. Where that is true, and property that exists in that register
+ may also be set on the following pin names.
+
+ als_prox_int_px3, ap_ready_pv5, ap_wake_bt_ph3, ap_wake_nfc_ph7,
+ aud_mclk_pbb0, batt_bcl, bt_rst_ph4, bt_wake_ap_ph5, button_home_py1,
+ button_power_on_px5, button_slide_sw_py0, button_vol_down_px7,
+ button_vol_up_px6, cam1_mclk_ps0, cam1_pwdn_ps7, cam1_strobe_pt1,
+ cam2_mclk_ps1, cam2_pwdn_pt0, cam_af_en_ps5, cam_flash_en_ps6,
+ cam_i2c_scl_ps2, cam_i2c_sda_ps3, cam_rst_ps4cam_rst_ps4, clk_32k_in,
+ clk_32k_out_py5, clk_req, core_pwr_req, cpu_pwr_req, dap1_din_pb1,
+ dap1_dout_pb2, dap1_fs_pb0, dap1_sclk_pb3, dap2_din_paa2, dap2_dout_paa3,
+ dap2_fs_paa0, dap2_sclk_paa1, dap4_din_pj5, dap4_dout_pj6, dap4_fs_pj4,
+ dap4_sclk_pj7, dmic1_clk_pe0, dmic1_dat_pe1, dmic2_clk_pe2, dmic2_dat_pe3,
+ dmic3_clk_pe4, dmic3_dat_pe5, dp_hpd0_pcc6, dvfs_clk_pbb2, dvfs_pwm_pbb1,
+ gen1_i2c_scl_pj1, gen1_i2c_sda_pj0, gen2_i2c_scl_pj2, gen2_i2c_sda_pj3,
+ gen3_i2c_scl_pf0, gen3_i2c_sda_pf1, gpio_x1_aud_pbb3, gpio_x3_aud_pbb4,
+ gps_en_pi2, gps_rst_pi3, hdmi_cec_pcc0, hdmi_int_dp_hpd_pcc1, jtag_rtck,
+ lcd_bl_en_pv1, lcd_bl_pwm_pv0, lcd_gpio1_pv3, lcd_gpio2_pv4, lcd_rst_pv2,
+ lcd_te_py2, modem_wake_ap_px0, motion_int_px2, nfc_en_pi0, nfc_int_pi1,
+ pa6, pcc7, pe6, pe7, pex_l0_clkreq_n_pa1, pex_l0_rst_n_pa0,
+ pex_l1_clkreq_n_pa4, pex_l1_rst_n_pa3, pex_wake_n_pa2, ph6, pk0, pk1, pk2,
+ pk3, pk4, pk5, pk6, pk7, pl0, pl1, pwr_i2c_scl_py3, pwr_i2c_sda_py4,
+ pwr_int_n, pz0, pz1, pz2, pz3, pz4, pz5, qspi_cs_n_pee1, qspi_io0_pee2,
+ qspi_io1_pee3, qspi_io2_pee4, qspi_io3_pee5, qspi_sck_pee0,
+ sata_led_active_pa5, sdmmc1_clk_pm0, sdmmc1_cmd_pm1, sdmmc1_dat0_pm5,
+ sdmmc1_dat1_pm4, sdmmc1_dat2_pm3, sdmmc1_dat3_pm2, sdmmc3_clk_pp0,
+ sdmmc3_cmd_pp1, sdmmc3_dat0_pp5, sdmmc3_dat1_pp4, sdmmc3_dat2_pp3,
+ sdmmc3_dat3_pp2, shutdown, spdif_in_pcc3, spdif_out_pcc2, spi1_cs0_pc3,
+ spi1_cs1_pc4, spi1_miso_pc1, spi1_mosi_pc0, spi1_sck_pc2, spi2_cs0_pb7,
+ spi2_cs1_pdd0, spi2_miso_pb5, spi2_mosi_pb4, spi2_sck_pb6, spi4_cs0_pc6,
+ spi4_miso_pd0, spi4_mosi_pc7, spi4_sck_pc5, temp_alert_px4, touch_clk_pv7,
+ touch_int_px1, touch_rst_pv6, uart1_cts_pu3, uart1_rts_pu2, uart1_rx_pu1,
+ uart1_tx_pu0, uart2_cts_pg3, uart2_rts_pg2, uart2_rx_pg1, uart2_tx_pg0,
+ uart3_cts_pd4, uart3_rts_pd3, uart3_rx_pd2, uart3_tx_pd1, uart4_cts_pi7,
+ uart4_rts_pi6, uart4_rx_pi5, uart4_tx_pi4, usb_vbus_en0_pcc4,
+ usb_vbus_en1_pcc5, wifi_en_ph0, wifi_rst_ph1, wifi_wake_ap_ph2
+
+ Drive groups:
+
+ These correspond to the Tegra APB_MISC_GP_*_PADCTRL (pad control)
+ registers. Note that where one of these registers controls a single pin
+ for which a PINMUX_AUX_* exists, see the list above for the pin name to
+ use when configuring the pinmux.
+
+ pa6, pcc7, pe6, pe7, ph6, pk0, pk1, pk2, pk3, pk4, pk5, pk6, pk7, pl0, pl1,
+ pz0, pz1, pz2, pz3, pz4, pz5, sdmmc1, sdmmc2, sdmmc3, sdmmc4
+
+Valid values for nvidia,functions are:
+
+ aud, bcl, blink, ccla, cec, cldvfs, clk, core, cpu, displaya, displayb,
+ dmic1, dmic2, dmic3, dp, dtv, extperiph3, i2c1, i2c2, i2c3, i2cpmu, i2cvi,
+ i2s1, i2s2, i2s3, i2s4a, i2s4b, i2s5a, i2s5b, iqc0, iqc1, jtag, pe, pe0,
+ pe1, pmi, pwm0, pwm1, pwm2, pwm3, qspi, rsvd0, rsvd1, rsvd2, rsvd3, sata,
+ sdmmc1, sdmmc3, shutdown, soc, sor0, sor1, spdif, spi1, spi2, spi3, spi4,
+ sys, touch, uart, uarta, uartb, uartc, uartd, usb, vgp1, vgp2, vgp3, vgp4,
+ vgp5, vgp6, vimclk, vimclk2
+
+Example:
+
+ pinmux: pinmux@70000800 {
+ compatible = "nvidia,tegra210-pinmux";
+ reg = <0x0 0x700008d4 0x0 0x2a8>, /* Pad control registers */
+ <0x0 0x70003000 0x0 0x1000>; /* Mux registers */
+
+ pinctrl-names = "boot";
+ pinctrl-0 = <&state_boot>;
+
+ state_boot: pinmux {
+ gen1_i2c_scl_pj1 {
+ nvidia,pins = "gen1_i2c_scl_pj1",
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index 47d84b6ee91b..b73c96d24f59 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -38,7 +38,7 @@ property exists to define the pin configuration. Each state may also be
assigned a name. When names are used, another property exists to map from
those names to the integer IDs.
-Each client device's own binding determines the set of states the must be
+Each client device's own binding determines the set of states that must be
defined in its device tree node, and whether to define the set of state
IDs that must be provided, or whether to define the set of state names that
must be provided.
@@ -133,16 +133,27 @@ pin multiplexing nodes:
function - the mux function to select
groups - the list of groups to select with this function
+ (either this or "pins" must be specified)
+pins - the list of pins to select with this function (either
+ this or "groups" must be specified)
Example:
state_0_node_a {
- function = "uart0";
- groups = "u0rxtx", "u0rtscts";
+ uart0 {
+ function = "uart0";
+ groups = "u0rxtx", "u0rtscts";
+ };
};
state_1_node_a {
- function = "spi0";
- groups = "spi0pins";
+ spi0 {
+ function = "spi0";
+ groups = "spi0pins";
+ };
+};
+state_2_node_a {
+ function = "i2c0";
+ pins = "mfio29", "mfio30";
};
== Generic pin configuration node content ==
@@ -188,16 +199,22 @@ slew-rate - set the slew rate
For example:
state_0_node_a {
- pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
- bias-pull-up;
+ cts_rxd {
+ pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
+ bias-pull-up;
+ };
};
state_1_node_a {
- pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */
- output-high;
+ rts_txd {
+ pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */
+ output-high;
+ };
};
state_2_node_a {
- group = "foo-group";
- bias-pull-up;
+ foo {
+ group = "foo-group";
+ bias-pull-up;
+ };
};
Some of the generic properties take arguments. For those that do, the
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
new file mode 100644
index 000000000000..5868a0f7255d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
@@ -0,0 +1,145 @@
+* Mediatek MT65XX Pin Controller
+
+The Mediatek's Pin controller is used to control SoC pins.
+
+Required properties:
+- compatible: value should be either of the following.
+ (a) "mediatek,mt8135-pinctrl", compatible with mt8135 pinctrl.
+- mediatek,pctl-regmap: Should be a phandle of the syscfg node.
+- pins-are-numbered: Specify the subnodes are using numbered pinmux to
+ specify pins.
+- gpio-controller : Marks the device node as a gpio controller.
+- #gpio-cells: number of cells in GPIO specifier. Since the generic GPIO
+ binding is used, the amount of cells must be specified as 2. See the below
+ mentioned gpio binding representation for description of particular cells.
+
+ Eg: <&pio 6 0>
+ <[phandle of the gpio controller node]
+ [line number within the gpio controller]
+ [flags]>
+
+ Values for gpio specifier:
+ - Line number: is a value between 0 to 202.
+ - Flags: bit field of flags, as defined in <dt-bindings/gpio/gpio.h>.
+ Only the following flags are supported:
+ 0 - GPIO_ACTIVE_HIGH
+ 1 - GPIO_ACTIVE_LOW
+- reg: physicall address base for EINT registers
+- interrupt-controller: Marks the device node as an interrupt controller
+- #interrupt-cells: Should be two.
+- interrupts : The interrupt outputs from the controller.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices.
+
+Subnode format
+A pinctrl node should contain at least one subnodes representing the
+pinctrl groups available on the machine. Each subnode will list the
+pins it needs, and how they should be configured, with regard to muxer
+configuration, pullups, drive strength, input enable/disable and input schmitt.
+
+ node {
+ pinmux = <PIN_NUMBER_PINMUX>;
+ GENERIC_PINCONFIG;
+ };
+
+Required properties:
+- pinmux: integer array, represents gpio pin number and mux setting.
+ Supported pin number and mux varies for different SoCs, and are defined
+ as macros in boot/dts/<soc>-pinfunc.h directly.
+
+Optional properties:
+- GENERIC_PINCONFIG: is the generic pinconfig options to use, bias-disable,
+ bias-pull-down, bias-pull-up, input-enable, input-disable, output-low, output-high,
+ input-schmitt-enable, input-schmitt-disable and drive-strength are valid.
+
+ Some special pins have extra pull up strength, there are R0 and R1 pull-up
+ resistors available, but for user, it's only need to set R1R0 as 00, 01, 10 or 11.
+ So when config bias-pull-up, it support arguments for those special pins.
+ Some macros have been defined for this usage, such as MTK_PUPD_SET_R1R0_00.
+ See dt-bindings/pinctrl/mt65xx.h.
+
+ When config drive-strength, it can support some arguments, such as
+ MTK_DRIVE_4mA, MTK_DRIVE_6mA, etc. See dt-bindings/pinctrl/mt65xx.h.
+
+Examples:
+
+#include "mt8135-pinfunc.h"
+
+...
+{
+ syscfg_pctl_a: syscfg_pctl_a@10005000 {
+ compatible = "mediatek,mt8135-pctl-a-syscfg", "syscon";
+ reg = <0 0x10005000 0 0x1000>;
+ };
+
+ syscfg_pctl_b: syscfg_pctl_b@1020C020 {
+ compatible = "mediatek,mt8135-pctl-b-syscfg", "syscon";
+ reg = <0 0x1020C020 0 0x1000>;
+ };
+
+ pinctrl@01c20800 {
+ compatible = "mediatek,mt8135-pinctrl";
+ reg = <0 0x1000B000 0 0x1000>;
+ mediatek,pctl-regmap = <&syscfg_pctl_a &syscfg_pctl_b>;
+ pins-are-numbered;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+
+ i2c0_pins_a: i2c0@0 {
+ pins1 {
+ pinmux = <MT8135_PIN_100_SDA0__FUNC_SDA0>,
+ <MT8135_PIN_101_SCL0__FUNC_SCL0>;
+ bias-disable;
+ };
+ };
+
+ i2c1_pins_a: i2c1@0 {
+ pins {
+ pinmux = <MT8135_PIN_195_SDA1__FUNC_SDA1>,
+ <MT8135_PIN_196_SCL1__FUNC_SCL1>;
+ bias-pull-up = <55>;
+ };
+ };
+
+ i2c2_pins_a: i2c2@0 {
+ pins1 {
+ pinmux = <MT8135_PIN_193_SDA2__FUNC_SDA2>;
+ bias-pull-down;
+ };
+
+ pins2 {
+ pinmux = <MT8135_PIN_49_WATCHDOG__FUNC_GPIO49>;
+ bias-pull-up;
+ };
+ };
+
+ i2c3_pins_a: i2c3@0 {
+ pins1 {
+ pinmux = <MT8135_PIN_40_DAC_CLK__FUNC_GPIO40>,
+ <MT8135_PIN_41_DAC_WS__FUNC_GPIO41>;
+ bias-pull-up = <55>;
+ };
+
+ pins2 {
+ pinmux = <MT8135_PIN_35_SCL3__FUNC_SCL3>,
+ <MT8135_PIN_36_SDA3__FUNC_SDA3>;
+ output-low;
+ bias-pull-up = <55>;
+ };
+
+ pins3 {
+ pinmux = <MT8135_PIN_57_JTCK__FUNC_GPIO57>,
+ <MT8135_PIN_60_JTDI__FUNC_JTDI>;
+ drive-strength = <32>;
+ };
+ };
+
+ ...
+ }
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index 7ed08048516a..1ae63c0acd40 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -10,6 +10,7 @@ PMIC's from Qualcomm.
"qcom,pm8018-gpio"
"qcom,pm8038-gpio"
"qcom,pm8058-gpio"
+ "qcom,pm8916-gpio"
"qcom,pm8917-gpio"
"qcom,pm8921-gpio"
"qcom,pm8941-gpio"
@@ -74,6 +75,7 @@ to specify in a pin configuration subnode:
gpio1-gpio6 for pm8018
gpio1-gpio12 for pm8038
gpio1-gpio40 for pm8058
+ gpio1-gpio4 for pm8916
gpio1-gpio38 for pm8917
gpio1-gpio44 for pm8921
gpio1-gpio36 for pm8941
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
index 854774b194ed..ed19991aad35 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -8,6 +8,7 @@ of PMIC's from Qualcomm.
Value type: <string>
Definition: Should contain one of:
"qcom,pm8841-mpp",
+ "qcom,pm8916-mpp",
"qcom,pm8941-mpp",
"qcom,pma8084-mpp",
@@ -67,6 +68,7 @@ to specify in a pin configuration subnode:
Definition: List of MPP pins affected by the properties specified in
this subnode. Valid pins are:
mpp1-mpp4 for pm8841
+ mpp1-mpp4 for pm8916
mpp1-mpp8 for pm8941
mpp1-mpp4 for pma8084
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
new file mode 100644
index 000000000000..65cc0345747d
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
@@ -0,0 +1,59 @@
+Freescale i.MX General Power Controller
+=======================================
+
+The i.MX6Q General Power Control (GPC) block contains DVFS load tracking
+counters and Power Gating Control (PGC) for the CPU and PU (GPU/VPU) power
+domains.
+
+Required properties:
+- compatible: Should be "fsl,imx6q-gpc" or "fsl,imx6sl-gpc"
+- reg: should be register base and length as documented in the
+ datasheet
+- interrupts: Should contain GPC interrupt request 1
+- pu-supply: Link to the LDO regulator powering the PU power domain
+- clocks: Clock phandles to devices in the PU power domain that need
+ to be enabled during domain power-up for reset propagation.
+- #power-domain-cells: Should be 1, see below:
+
+The gpc node is a power-controller as documented by the generic power domain
+bindings in Documentation/devicetree/bindings/power/power_domain.txt.
+
+Example:
+
+ gpc: gpc@020dc000 {
+ compatible = "fsl,imx6q-gpc";
+ reg = <0x020dc000 0x4000>;
+ interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>,
+ <0 90 IRQ_TYPE_LEVEL_HIGH>;
+ pu-supply = <&reg_pu>;
+ clocks = <&clks IMX6QDL_CLK_GPU3D_CORE>,
+ <&clks IMX6QDL_CLK_GPU3D_SHADER>,
+ <&clks IMX6QDL_CLK_GPU2D_CORE>,
+ <&clks IMX6QDL_CLK_GPU2D_AXI>,
+ <&clks IMX6QDL_CLK_OPENVG_AXI>,
+ <&clks IMX6QDL_CLK_VPU_AXI>;
+ #power-domain-cells = <1>;
+ };
+
+
+Specifying power domain for IP modules
+======================================
+
+IP cores belonging to a power domain should contain a 'power-domains' property
+that is a phandle pointing to the gpc device node and a DOMAIN_INDEX specifying
+the power domain the device belongs to.
+
+Example of a device that is part of the PU power domain:
+
+ vpu: vpu@02040000 {
+ reg = <0x02040000 0x3c000>;
+ /* ... */
+ power-domains = <&gpc 1>;
+ /* ... */
+ };
+
+The following DOMAIN_INDEX values are valid for i.MX6Q:
+ARM_DOMAIN 0
+PU_DOMAIN 1
+The following additional DOMAIN_INDEX value is valid for i.MX6SL:
+DISPLAY_DOMAIN 2
diff --git a/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt b/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt
index cc3b1f0a9b1a..beda7d2efc30 100644
--- a/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt
+++ b/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt
@@ -11,6 +11,7 @@ Required properties:
- compatible: Should be "renesas,sysc-<soctype>", "renesas,sysc-rmobile" as
fallback.
Examples with soctypes are:
+ - "renesas,sysc-r8a73a4" (R-Mobile APE6)
- "renesas,sysc-r8a7740" (R-Mobile A1)
- "renesas,sysc-sh73a0" (SH-Mobile AG5)
- reg: Two address start and address range blocks for the device:
diff --git a/Documentation/devicetree/bindings/pwm/bcm-kona-pwm.txt b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt
index 8eae9fe7841c..8eae9fe7841c 100644
--- a/Documentation/devicetree/bindings/pwm/bcm-kona-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt
diff --git a/Documentation/devicetree/bindings/pwm/imx-pwm.txt b/Documentation/devicetree/bindings/pwm/imx-pwm.txt
index b50d7a6d9d7f..e00c2e9f484d 100644
--- a/Documentation/devicetree/bindings/pwm/imx-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/imx-pwm.txt
@@ -1,10 +1,17 @@
Freescale i.MX PWM controller
Required properties:
-- compatible: should be "fsl,<soc>-pwm"
+- compatible : should be "fsl,<soc>-pwm" and one of the following
+ compatible strings:
+ - "fsl,imx1-pwm" for PWM compatible with the one integrated on i.MX1
+ - "fsl,imx27-pwm" for PWM compatible with the one integrated on i.MX27
- reg: physical base address and length of the controller's registers
- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
the cells format.
+- clocks : Clock specifiers for both ipg and per clocks.
+- clock-names : Clock names should include both "ipg" and "per"
+See the clock consumer binding,
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
- interrupts: The interrupt for the pwm controller
Example:
@@ -13,5 +20,8 @@ pwm1: pwm@53fb4000 {
#pwm-cells = <2>;
compatible = "fsl,imx53-pwm", "fsl,imx27-pwm";
reg = <0x53fb4000 0x4000>;
+ clocks = <&clks IMX5_CLK_PWM1_IPG_GATE>,
+ <&clks IMX5_CLK_PWM1_HF_GATE>;
+ clock-names = "ipg", "per";
interrupts = <61>;
};
diff --git a/Documentation/devicetree/bindings/arm/bcm/kona-resetmgr.txt b/Documentation/devicetree/bindings/reset/brcm,bcm21664-resetmgr.txt
index 93f31ca1ef4b..93f31ca1ef4b 100644
--- a/Documentation/devicetree/bindings/arm/bcm/kona-resetmgr.txt
+++ b/Documentation/devicetree/bindings/reset/brcm,bcm21664-resetmgr.txt
diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
new file mode 100644
index 000000000000..be789685a1c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
@@ -0,0 +1,30 @@
+Abracon ABX80X I2C ultra low power RTC/Alarm chip
+
+The Abracon ABX80X family consist of the ab0801, ab0803, ab0804, ab0805, ab1801,
+ab1803, ab1804 and ab1805. The ab0805 is the superset of ab080x and the ab1805
+is the superset of ab180x.
+
+Required properties:
+
+ - "compatible": should one of:
+ "abracon,abx80x"
+ "abracon,ab0801"
+ "abracon,ab0803"
+ "abracon,ab0804"
+ "abracon,ab0805"
+ "abracon,ab1801"
+ "abracon,ab1803"
+ "abracon,ab1804"
+ "abracon,ab1805"
+ Using "abracon,abx80x" will enable chip autodetection.
+ - "reg": I2C bus address of the device
+
+Optional properties:
+
+The abx804 and abx805 have a trickle charger that is able to charge the
+connected battery or supercap. Both the following properties have to be defined
+and valid to enable charging:
+
+ - "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V)
+ - "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output
+ resistor, the other values are in ohm.
diff --git a/Documentation/devicetree/bindings/rtc/digicolor-rtc.txt b/Documentation/devicetree/bindings/rtc/digicolor-rtc.txt
new file mode 100644
index 000000000000..d464986012cd
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/digicolor-rtc.txt
@@ -0,0 +1,17 @@
+Conexant Digicolor Real Time Clock controller
+
+This binding currently supports the CX92755 SoC.
+
+Required properties:
+- compatible: should be "cnxt,cx92755-rtc"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: rtc alarm interrupt
+
+Example:
+
+ rtc@f0000c30 {
+ compatible = "cnxt,cx92755-rtc";
+ reg = <0xf0000c30 0x18>;
+ interrupts = <25>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/stmp3xxx-rtc.txt b/Documentation/devicetree/bindings/rtc/stmp3xxx-rtc.txt
index b800070fe6e9..fa6a94226669 100644
--- a/Documentation/devicetree/bindings/rtc/stmp3xxx-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/stmp3xxx-rtc.txt
@@ -7,6 +7,11 @@ Required properties:
region.
- interrupts: rtc alarm interrupt
+Optional properties:
+- stmp,crystal-freq: override crystal frequency as determined from fuse bits.
+ Only <32000> and <32768> are possible for the hardware. Use <0> for
+ "no crystal".
+
Example:
rtc@80056000 {
diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt
new file mode 100644
index 000000000000..158b0165e01c
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt
@@ -0,0 +1,34 @@
+* STMicroelectronics SAS. ST33ZP24 TPM SoC
+
+Required properties:
+- compatible: Should be "st,st33zp24-spi".
+- spi-max-frequency: Maximum SPI frequency (<= 10000000).
+
+Optional ST33ZP24 Properties:
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- lpcpd-gpios: Output GPIO pin used for ST33ZP24 power management D1/D2 state.
+If set, power must be present when the platform is going into sleep/hibernate mode.
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBoard xM with ST33ZP24 on SPI4):
+
+&mcspi4 {
+
+ status = "okay";
+
+ st33zp24@0 {
+
+ compatible = "st,st33zp24-spi";
+
+ spi-max-frequency = <10000000>;
+
+ interrupt-parent = <&gpio5>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+
+ lpcpd-gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/serial/atmel-usart.txt b/Documentation/devicetree/bindings/serial/atmel-usart.txt
index a6391e70a8fd..90787aa2e648 100644
--- a/Documentation/devicetree/bindings/serial/atmel-usart.txt
+++ b/Documentation/devicetree/bindings/serial/atmel-usart.txt
@@ -1,9 +1,10 @@
* Atmel Universal Synchronous Asynchronous Receiver/Transmitter (USART)
Required properties:
-- compatible: Should be "atmel,<chip>-usart"
+- compatible: Should be "atmel,<chip>-usart" or "atmel,<chip>-dbgu"
The compatible <chip> indicated will be the first SoC to support an
additional mode or an USART new feature.
+ For the dbgu UART, use "atmel,<chip>-dbgu", "atmel,<chip>-usart"
- reg: Should contain registers location and length
- interrupts: Should contain interrupt
- clock-names: tuple listing input clock names.
diff --git a/Documentation/devicetree/bindings/serial/bcm63xx-uart.txt b/Documentation/devicetree/bindings/serial/brcm,bcm6345-uart.txt
index 5c52e5eef16d..5c52e5eef16d 100644
--- a/Documentation/devicetree/bindings/serial/bcm63xx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/brcm,bcm6345-uart.txt
diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt
index 342eedd10050..54c2a155c783 100644
--- a/Documentation/devicetree/bindings/serial/omap_serial.txt
+++ b/Documentation/devicetree/bindings/serial/omap_serial.txt
@@ -4,7 +4,27 @@ Required properties:
- compatible : should be "ti,omap2-uart" for OMAP2 controllers
- compatible : should be "ti,omap3-uart" for OMAP3 controllers
- compatible : should be "ti,omap4-uart" for OMAP4 controllers
+- reg : address and length of the register space
+- interrupts or interrupts-extended : Should contain the uart interrupt
+ specifier or both the interrupt
+ controller phandle and interrupt
+ specifier.
- ti,hwmods : Must be "uart<n>", n being the instance number (1-based)
Optional properties:
- clock-frequency : frequency of the clock input to the UART
+- dmas : DMA specifier, consisting of a phandle to the DMA controller
+ node and a DMA channel number.
+- dma-names : "rx" for receive channel, "tx" for transmit channel.
+
+Example:
+
+ uart4: serial@49042000 {
+ compatible = "ti,omap3-uart";
+ reg = <0x49042000 0x400>;
+ interrupts = <80>;
+ dmas = <&sdma 81 &sdma 82>;
+ dma-names = "tx", "rx";
+ ti,hwmods = "uart4";
+ clock-frequency = <48000000>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
new file mode 100644
index 000000000000..ddeb5b6a53c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
@@ -0,0 +1,58 @@
+MediaTek PMIC Wrapper Driver
+
+This document describes the binding for the MediaTek PMIC wrapper.
+
+On MediaTek SoCs the PMIC is connected via SPI. The SPI master interface
+is not directly visible to the CPU, but only through the PMIC wrapper
+inside the SoC. The communication between the SoC and the PMIC can
+optionally be encrypted. Also a non standard Dual IO SPI mode can be
+used to increase speed.
+
+IP Pairing
+
+on MT8135 the pins of some SoC internal peripherals can be on the PMIC.
+The signals of these pins are routed over the SPI bus using the pwrap
+bridge. In the binding description below the properties needed for bridging
+are marked with "IP Pairing". These are optional on SoCs which do not support
+IP Pairing
+
+Required properties in pwrap device node.
+- compatible:
+ "mediatek,mt8135-pwrap" for MT8135 SoCs
+ "mediatek,mt8173-pwrap" for MT8173 SoCs
+- interrupts: IRQ for pwrap in SOC
+- reg-names: Must include the following entries:
+ "pwrap": Main registers base
+ "pwrap-bridge": bridge base (IP Pairing)
+- reg: Must contain an entry for each entry in reg-names.
+- reset-names: Must include the following entries:
+ "pwrap"
+ "pwrap-bridge" (IP Pairing)
+- resets: Must contain an entry for each entry in reset-names.
+- clock-names: Must include the following entries:
+ "spi": SPI bus clock
+ "wrap": Main module clock
+- clocks: Must contain an entry for each entry in clock-names.
+
+Optional properities:
+- pmic: Mediatek PMIC MFD is the child device of pwrap
+ See the following for child node definitions:
+ Documentation/devicetree/bindings/mfd/mt6397.txt
+
+Example:
+ pwrap: pwrap@1000f000 {
+ compatible = "mediatek,mt8135-pwrap";
+ reg = <0 0x1000f000 0 0x1000>,
+ <0 0x11017000 0 0x1000>;
+ reg-names = "pwrap", "pwrap-bridge";
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&infracfg MT8135_INFRA_PMIC_WRAP_RST>,
+ <&pericfg MT8135_PERI_PWRAP_BRIDGE_SW_RST>;
+ reset-names = "pwrap", "pwrap-bridge";
+ clocks = <&clk26m>, <&clk26m>;
+ clock-names = "spi", "wrap";
+
+ pmic {
+ compatible = "mediatek,mt6397";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,gsbi.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,gsbi.txt
index 4ce24d425bf1..2f5ede39bea2 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,gsbi.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,gsbi.txt
@@ -6,7 +6,8 @@ configuration settings. The mode setting will govern the input/output mode of
the 4 GSBI IOs.
Required properties:
-- compatible: must contain "qcom,gsbi-v1.0.0" for APQ8064/IPQ8064
+- compatible: Should contain "qcom,gsbi-v1.0.0"
+- cell-index: Should contain the GSBI index
- reg: Address range for GSBI registers
- clocks: required clock
- clock-names: must contain "iface" entry
@@ -16,6 +17,8 @@ Required properties:
Optional properties:
- qcom,crci : indicates CRCI MUX value for QUP CRCI ports. Please reference
dt-bindings/soc/qcom,gsbi.h for valid CRCI mux values.
+- syscon-tcsr: indicates phandle of TCSR syscon node. Required if child uses
+ dma.
Required properties if child node exists:
- #address-cells: Must be 1
@@ -39,6 +42,7 @@ Example for APQ8064:
gsbi4@16300000 {
compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <4>;
reg = <0x16300000 0x100>;
clocks = <&gcc GSBI4_H_CLK>;
clock-names = "iface";
@@ -48,22 +52,24 @@ Example for APQ8064:
qcom,mode = <GSBI_PROT_I2C_UART>;
qcom,crci = <GSBI_CRCI_QUP>;
+ syscon-tcsr = <&tcsr>;
+
/* child nodes go under here */
i2c_qup4: i2c@16380000 {
- compatible = "qcom,i2c-qup-v1.1.1";
- reg = <0x16380000 0x1000>;
- interrupts = <0 153 0>;
+ compatible = "qcom,i2c-qup-v1.1.1";
+ reg = <0x16380000 0x1000>;
+ interrupts = <0 153 0>;
- clocks = <&gcc GSBI4_QUP_CLK>, <&gcc GSBI4_H_CLK>;
- clock-names = "core", "iface";
+ clocks = <&gcc GSBI4_QUP_CLK>, <&gcc GSBI4_H_CLK>;
+ clock-names = "core", "iface";
- clock-frequency = <200000>;
+ clock-frequency = <200000>;
- #address-cells = <1>;
- #size-cells = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
- };
+ };
uart4: serial@16340000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
@@ -76,3 +82,7 @@ Example for APQ8064:
};
};
+ tcsr: syscon@1a400000 {
+ compatible = "qcom,apq8064-tcsr", "syscon";
+ reg = <0x1a400000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/bcm2835-i2s.txt b/Documentation/devicetree/bindings/sound/brcm,bcm2835-i2s.txt
index 65783de0aedf..65783de0aedf 100644
--- a/Documentation/devicetree/bindings/sound/bcm2835-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/brcm,bcm2835-i2s.txt
diff --git a/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt b/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt
index b41433386e2f..b623d50004fb 100644
--- a/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt
@@ -1,7 +1,7 @@
Ingenic JZ4740 I2S controller
Required properties:
-- compatible : "ingenic,jz4740-i2s"
+- compatible : "ingenic,jz4740-i2s" or "ingenic,jz4780-i2s"
- reg : I2S registers location and length
- clocks : AIC and I2S PLL clock specifiers.
- clock-names: "aic" and "i2s"
diff --git a/Documentation/devicetree/bindings/sound/max98925.txt b/Documentation/devicetree/bindings/sound/max98925.txt
new file mode 100644
index 000000000000..27be63e2aa0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98925.txt
@@ -0,0 +1,22 @@
+max98925 audio CODEC
+
+This device supports I2C.
+
+Required properties:
+
+ - compatible : "maxim,max98925"
+
+ - vmon-slot-no : slot number used to send voltage information
+
+ - imon-slot-no : slot number used to send current information
+
+ - reg : the I2C address of the device for I2C
+
+Example:
+
+codec: max98925@1a {
+ compatible = "maxim,max98925";
+ vmon-slot-no = <0>;
+ imon-slot-no = <2>;
+ reg = <0x1a>;
+};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt
index c949abc2992f..c3495beba358 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt
@@ -18,6 +18,7 @@ Required properties:
* Headphones
* Speakers
* Mic Jack
+ * Int Mic
- nvidia,i2s-controller : The phandle of the Tegra I2S controller that's
connected to the CODEC.
diff --git a/Documentation/devicetree/bindings/sound/omap-twl4030.txt b/Documentation/devicetree/bindings/sound/omap-twl4030.txt
index 1ab6bc8404d5..f6a715e4ef43 100644
--- a/Documentation/devicetree/bindings/sound/omap-twl4030.txt
+++ b/Documentation/devicetree/bindings/sound/omap-twl4030.txt
@@ -4,9 +4,9 @@ Required properties:
- compatible: "ti,omap-twl4030"
- ti,model: Name of the sound card (for example "omap3beagle")
- ti,mcbsp: phandle for the McBSP node
-- ti,codec: phandle for the twl4030 audio node
Optional properties:
+- ti,codec: phandle for the twl4030 audio node
- ti,mcbsp-voice: phandle for the McBSP node connected to the voice port of twl
- ti, jack-det-gpio: Jack detect GPIO
- ti,audio-routing: List of connections between audio components.
@@ -59,5 +59,4 @@ sound {
ti,model = "omap3beagle";
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.txt b/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.txt
new file mode 100644
index 000000000000..e00732dac939
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.txt
@@ -0,0 +1,43 @@
+* Qualcomm Technologies LPASS CPU DAI
+
+This node models the Qualcomm Technologies Low-Power Audio SubSystem (LPASS).
+
+Required properties:
+
+- compatible : "qcom,lpass-cpu"
+- clocks : Must contain an entry for each entry in clock-names.
+- clock-names : A list which must include the following entries:
+ * "ahbix-clk"
+ * "mi2s-osr-clk"
+ * "mi2s-bit-clk"
+- interrupts : Must contain an entry for each entry in
+ interrupt-names.
+- interrupt-names : A list which must include the following entries:
+ * "lpass-irq-lpaif"
+- pinctrl-N : One property must exist for each entry in
+ pinctrl-names. See ../pinctrl/pinctrl-bindings.txt
+ for details of the property values.
+- pinctrl-names : Must contain a "default" entry.
+- reg : Must contain an address for each entry in reg-names.
+- reg-names : A list which must include the following entries:
+ * "lpass-lpaif"
+
+Optional properties:
+
+- qcom,adsp : Phandle for the audio DSP node
+
+Example:
+
+lpass@28100000 {
+ compatible = "qcom,lpass-cpu";
+ clocks = <&lcc AHBIX_CLK>, <&lcc MI2S_OSR_CLK>, <&lcc MI2S_BIT_CLK>;
+ clock-names = "ahbix-clk", "mi2s-osr-clk", "mi2s-bit-clk";
+ interrupts = <0 85 1>;
+ interrupt-names = "lpass-irq-lpaif";
+ pinctrl-names = "default", "idle";
+ pinctrl-0 = <&mi2s_default>;
+ pinctrl-1 = <&mi2s_idle>;
+ reg = <0x28100000 0x10000>;
+ reg-names = "lpass-lpaif";
+ qcom,adsp = <&adsp>;
+};
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 2dd690bc19cc..f316ce1f214a 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -29,9 +29,17 @@ SSI subnode properties:
- shared-pin : if shared clock pin
- pio-transfer : use PIO transfer mode
- no-busif : BUSIF is not ussed when [mem -> SSI] via DMA case
+- dma : Should contain Audio DMAC entry
+- dma-names : SSI case "rx" (=playback), "tx" (=capture)
+ SSIU case "rxu" (=playback), "txu" (=capture)
SRC subnode properties:
-no properties at this point
+- dma : Should contain Audio DMAC entry
+- dma-names : "rx" (=playback), "tx" (=capture)
+
+DVC subnode properties:
+- dma : Should contain Audio DMAC entry
+- dma-names : "tx" (=playback/capture)
DAI subnode properties:
- playback : list of playback modules
@@ -45,56 +53,145 @@ rcar_sound: rcar_sound@ec500000 {
reg = <0 0xec500000 0 0x1000>, /* SCU */
<0 0xec5a0000 0 0x100>, /* ADG */
<0 0xec540000 0 0x1000>, /* SSIU */
- <0 0xec541000 0 0x1280>; /* SSI */
+ <0 0xec541000 0 0x1280>, /* SSI */
+ <0 0xec740000 0 0x200>; /* Audio DMAC peri peri*/
+ reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
+ clocks = <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7790_CLK_SSI9>, <&mstp10_clks R8A7790_CLK_SSI8>,
+ <&mstp10_clks R8A7790_CLK_SSI7>, <&mstp10_clks R8A7790_CLK_SSI6>,
+ <&mstp10_clks R8A7790_CLK_SSI5>, <&mstp10_clks R8A7790_CLK_SSI4>,
+ <&mstp10_clks R8A7790_CLK_SSI3>, <&mstp10_clks R8A7790_CLK_SSI2>,
+ <&mstp10_clks R8A7790_CLK_SSI1>, <&mstp10_clks R8A7790_CLK_SSI0>,
+ <&mstp10_clks R8A7790_CLK_SCU_SRC9>, <&mstp10_clks R8A7790_CLK_SCU_SRC8>,
+ <&mstp10_clks R8A7790_CLK_SCU_SRC7>, <&mstp10_clks R8A7790_CLK_SCU_SRC6>,
+ <&mstp10_clks R8A7790_CLK_SCU_SRC5>, <&mstp10_clks R8A7790_CLK_SCU_SRC4>,
+ <&mstp10_clks R8A7790_CLK_SCU_SRC3>, <&mstp10_clks R8A7790_CLK_SCU_SRC2>,
+ <&mstp10_clks R8A7790_CLK_SCU_SRC1>, <&mstp10_clks R8A7790_CLK_SCU_SRC0>,
+ <&mstp10_clks R8A7790_CLK_SCU_DVC0>, <&mstp10_clks R8A7790_CLK_SCU_DVC1>,
+ <&audio_clk_a>, <&audio_clk_b>, <&audio_clk_c>, <&m2_clk>;
+ clock-names = "ssi-all",
+ "ssi.9", "ssi.8", "ssi.7", "ssi.6", "ssi.5",
+ "ssi.4", "ssi.3", "ssi.2", "ssi.1", "ssi.0",
+ "src.9", "src.8", "src.7", "src.6", "src.5",
+ "src.4", "src.3", "src.2", "src.1", "src.0",
+ "dvc.0", "dvc.1",
+ "clk_a", "clk_b", "clk_c", "clk_i";
rcar_sound,dvc {
- dvc0: dvc@0 { };
- dvc1: dvc@1 { };
+ dvc0: dvc@0 {
+ dmas = <&audma0 0xbc>;
+ dma-names = "tx";
+ };
+ dvc1: dvc@1 {
+ dmas = <&audma0 0xbe>;
+ dma-names = "tx";
+ };
};
rcar_sound,src {
- src0: src@0 { };
- src1: src@1 { };
- src2: src@2 { };
- src3: src@3 { };
- src4: src@4 { };
- src5: src@5 { };
- src6: src@6 { };
- src7: src@7 { };
- src8: src@8 { };
- src9: src@9 { };
+ src0: src@0 {
+ interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x85>, <&audma1 0x9a>;
+ dma-names = "rx", "tx";
+ };
+ src1: src@1 {
+ interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x87>, <&audma1 0x9c>;
+ dma-names = "rx", "tx";
+ };
+ src2: src@2 {
+ interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x89>, <&audma1 0x9e>;
+ dma-names = "rx", "tx";
+ };
+ src3: src@3 {
+ interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8b>, <&audma1 0xa0>;
+ dma-names = "rx", "tx";
+ };
+ src4: src@4 {
+ interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8d>, <&audma1 0xb0>;
+ dma-names = "rx", "tx";
+ };
+ src5: src@5 {
+ interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8f>, <&audma1 0xb2>;
+ dma-names = "rx", "tx";
+ };
+ src6: src@6 {
+ interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x91>, <&audma1 0xb4>;
+ dma-names = "rx", "tx";
+ };
+ src7: src@7 {
+ interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x93>, <&audma1 0xb6>;
+ dma-names = "rx", "tx";
+ };
+ src8: src@8 {
+ interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x95>, <&audma1 0xb8>;
+ dma-names = "rx", "tx";
+ };
+ src9: src@9 {
+ interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x97>, <&audma1 0xba>;
+ dma-names = "rx", "tx";
+ };
};
rcar_sound,ssi {
ssi0: ssi@0 {
interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
ssi1: ssi@1 {
interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
ssi2: ssi@2 {
interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
ssi3: ssi@3 {
interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
ssi4: ssi@4 {
interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
ssi5: ssi@5 {
interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
ssi6: ssi@6 {
interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
ssi7: ssi@7 {
interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
ssi8: ssi@8 {
interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
ssi9: ssi@9 {
interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
+ dma-names = "rx", "tx", "rxu", "txu";
};
};
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
new file mode 100644
index 000000000000..c64155027288
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
@@ -0,0 +1,67 @@
+Renesas Sampling Rate Convert Sound Card:
+
+Renesas Sampling Rate Convert Sound Card specifies audio DAI connections of SoC <-> codec.
+
+Required properties:
+
+- compatible : "renesas,rsrc-card,<board>"
+ Examples with soctypes are:
+ - "renesas,rsrc-card,lager"
+ - "renesas,rsrc-card,koelsch"
+Optional properties:
+
+- card_name : User specified audio sound card name, one string
+ property.
+- cpu : CPU sub-node
+- codec : CODEC sub-node
+
+Optional subnode properties:
+
+- format : CPU/CODEC common audio format.
+ "i2s", "right_j", "left_j" , "dsp_a"
+ "dsp_b", "ac97", "pdm", "msb", "lsb"
+- frame-master : Indicates dai-link frame master.
+ phandle to a cpu or codec subnode.
+- bitclock-master : Indicates dai-link bit clock master.
+ phandle to a cpu or codec subnode.
+- bitclock-inversion : bool property. Add this if the
+ dai-link uses bit clock inversion.
+- frame-inversion : bool property. Add this if the
+ dai-link uses frame clock inversion.
+- convert-rate : platform specified sampling rate convert
+
+Required CPU/CODEC subnodes properties:
+
+- sound-dai : phandle and port of CPU/CODEC
+
+Optional CPU/CODEC subnodes properties:
+
+- clocks / system-clock-frequency : specify subnode's clock if needed.
+ it can be specified via "clocks" if system has
+ clock node (= common clock), or "system-clock-frequency"
+ (if system doens't support common clock)
+ If a clock is specified, it is
+ enabled with clk_prepare_enable()
+ in dai startup() and disabled with
+ clk_disable_unprepare() in dai
+ shutdown().
+
+Example
+
+sound {
+ compatible = "renesas,rsrc-card,lager";
+
+ card-name = "rsnd-ak4643";
+ format = "left_j";
+ bitclock-master = <&sndcodec>;
+ frame-master = <&sndcodec>;
+
+ sndcpu: cpu {
+ sound-dai = <&rcar_sound>;
+ };
+
+ sndcodec: codec {
+ sound-dai = <&ak4643>;
+ system-clock-frequency = <11289600>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/storm.txt b/Documentation/devicetree/bindings/sound/storm.txt
new file mode 100644
index 000000000000..062a4c185fa9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/storm.txt
@@ -0,0 +1,23 @@
+* Sound complex for Storm boards
+
+Models a soundcard for Storm boards with the Qualcomm Technologies IPQ806x SOC
+connected to a MAX98357A DAC via I2S.
+
+Required properties:
+
+- compatible : "google,storm-audio"
+- cpu : Phandle of the CPU DAI
+- codec : Phandle of the codec DAI
+
+Optional properties:
+
+- qcom,model : The user-visible name of this sound card.
+
+Example:
+
+sound {
+ compatible = "google,storm-audio";
+ qcom,model = "ipq806x-storm";
+ cpu = <&lpass_cpu>;
+ codec = <&max98357a>;
+};
diff --git a/Documentation/devicetree/bindings/sound/wm8804.txt b/Documentation/devicetree/bindings/sound/wm8804.txt
index 4d3a56f38adc..6fd124b16496 100644
--- a/Documentation/devicetree/bindings/sound/wm8804.txt
+++ b/Documentation/devicetree/bindings/sound/wm8804.txt
@@ -10,6 +10,13 @@ Required properties:
- reg : the I2C address of the device for I2C, the chip select
number for SPI.
+ - PVDD-supply, DVDD-supply : Power supplies for the device, as covered
+ in Documentation/devicetree/bindings/regulator/regulator.txt
+
+Optional properties:
+
+ - wlf,reset-gpio: A GPIO specifier for the GPIO controlling the reset pin
+
Example:
codec: wm8804@1a {
diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
index 715d0998af8e..e16b9b5afc70 100644
--- a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
+++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
@@ -1,6 +1,6 @@
Qualcomm SPMI Controller (PMIC Arbiter)
-The SPMI PMIC Arbiter is found on the Snapdragon 800 Series. It is an SPMI
+The SPMI PMIC Arbiter is found on Snapdragon chipsets. It is an SPMI
controller with wrapping arbitration logic to allow for multiple on-chip
devices to control a single SPMI master.
@@ -19,6 +19,10 @@ Required properties:
"core" - core registers
"intr" - interrupt controller registers
"cnfg" - configuration registers
+ Registers used only for V2 PMIC Arbiter:
+ "chnls" - tx-channel per virtual slave registers.
+ "obsrvr" - rx-channel (called observer) per virtual slave registers.
+
- reg : address + size pairs describing the PMIC arb register sets; order must
correspond with the order of entries in reg-names
- #address-cells : must be set to 2
diff --git a/Documentation/devicetree/bindings/arm/bcm/kona-timer.txt b/Documentation/devicetree/bindings/timer/brcm,kona-timer.txt
index 39adf54b4388..39adf54b4388 100644
--- a/Documentation/devicetree/bindings/arm/bcm/kona-timer.txt
+++ b/Documentation/devicetree/bindings/timer/brcm,kona-timer.txt
diff --git a/Documentation/devicetree/bindings/unittest.txt b/Documentation/devicetree/bindings/unittest.txt
index 8933211f32f9..3bf58c20fe94 100644
--- a/Documentation/devicetree/bindings/unittest.txt
+++ b/Documentation/devicetree/bindings/unittest.txt
@@ -1,60 +1,60 @@
-1) OF selftest platform device
+1) OF unittest platform device
-** selftest
+** unittest
Required properties:
-- compatible: must be "selftest"
+- compatible: must be "unittest"
All other properties are optional.
Example:
- selftest {
- compatible = "selftest";
+ unittest {
+ compatible = "unittest";
status = "okay";
};
-2) OF selftest i2c adapter platform device
+2) OF unittest i2c adapter platform device
** platform device unittest adapter
Required properties:
-- compatible: must be selftest-i2c-bus
+- compatible: must be unittest-i2c-bus
-Children nodes contain selftest i2c devices.
+Children nodes contain unittest i2c devices.
Example:
- selftest-i2c-bus {
- compatible = "selftest-i2c-bus";
+ unittest-i2c-bus {
+ compatible = "unittest-i2c-bus";
status = "okay";
};
-3) OF selftest i2c device
+3) OF unittest i2c device
-** I2C selftest device
+** I2C unittest device
Required properties:
-- compatible: must be selftest-i2c-dev
+- compatible: must be unittest-i2c-dev
All other properties are optional
Example:
- selftest-i2c-dev {
- compatible = "selftest-i2c-dev";
+ unittest-i2c-dev {
+ compatible = "unittest-i2c-dev";
status = "okay";
};
-4) OF selftest i2c mux device
+4) OF unittest i2c mux device
-** I2C selftest mux
+** I2C unittest mux
Required properties:
-- compatible: must be selftest-i2c-mux
+- compatible: must be unittest-i2c-mux
-Children nodes contain selftest i2c bus nodes per channel.
+Children nodes contain unittest i2c bus nodes per channel.
Example:
- selftest-i2c-mux {
- compatible = "selftest-i2c-mux";
+ unittest-i2c-mux {
+ compatible = "unittest-i2c-mux";
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
@@ -64,7 +64,7 @@ Example:
#size-cells = <0>;
i2c-dev {
reg = <8>;
- compatible = "selftest-i2c-dev";
+ compatible = "unittest-i2c-dev";
status = "okay";
};
};
diff --git a/Documentation/devicetree/bindings/mips/brcm/usb.txt b/Documentation/devicetree/bindings/usb/brcm,bcm3384-usb.txt
index 452c45c7bf29..452c45c7bf29 100644
--- a/Documentation/devicetree/bindings/mips/brcm/usb.txt
+++ b/Documentation/devicetree/bindings/usb/brcm,bcm3384-usb.txt
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index fae26d014aaf..80339192c93e 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -11,18 +11,22 @@ adapteva Adapteva, Inc.
adh AD Holdings Plc.
adi Analog Devices, Inc.
aeroflexgaisler Aeroflex Gaisler AB
+al Annapurna Labs
allwinner Allwinner Technology Co., Ltd.
alphascale AlphaScale Integrated Circuits Systems, Inc.
altr Altera Corp.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
amd Advanced Micro Devices (AMD), Inc.
amlogic Amlogic, Inc.
+ampire Ampire Co., Ltd.
ams AMS AG
amstaos AMS-Taos Inc.
apm Applied Micro Circuits Corporation (APM)
+aptina Aptina Imaging
arasan Arasan Chip Systems
arm ARM Ltd.
armadeus ARMadeus Systems SARL
+artesyn Artesyn Embedded Technologies Inc.
asahi-kasei Asahi Kasei Corp.
atmel Atmel Corporation
auo AU Optronics Corporation
@@ -37,6 +41,7 @@ capella Capella Microsystems, Inc
cavium Cavium, Inc.
cdns Cadence Design Systems Inc.
chipidea Chipidea, Inc
+chipone ChipOne
chipspark ChipSPARK
chrp Common Hardware Reference Platform
chunghwa Chunghwa Picture Tubes Ltd.
@@ -78,7 +83,9 @@ geniatech Geniatech, Inc.
giantplus Giantplus Technology Co., Ltd.
globalscale Globalscale Technologies, Inc.
gmt Global Mixed-mode Technology, Inc.
+goodix Shenzhen Huiding Technology Co., Ltd.
google Google, Inc.
+grinn Grinn
gumstix Gumstix, Inc.
gw Gateworks Corporation
hannstar HannStar Display Corporation
@@ -114,6 +121,7 @@ merrii Merrii Technology Co., Ltd.
micrel Micrel Inc.
microchip Microchip Technology Inc.
micron Micron Technology Inc.
+minix MINIX Technology Ltd.
mitsubishi Mitsubishi Electric Corporation
mosaixtech Mosaix Technologies, Inc.
moxa Moxa
@@ -125,6 +133,7 @@ mxicy Macronix International Co., Ltd.
national National Semiconductor
neonode Neonode Inc.
netgear NETGEAR
+netlogic Broadcom Corporation (formerly NetLogic Microsystems)
newhaven Newhaven Display International
nintendo Nintendo
nokia Nokia
@@ -132,6 +141,7 @@ nvidia NVIDIA
nxp NXP Semiconductors
onnn ON Semiconductor Corp.
opencores OpenCores.org
+ortustech Ortus Technology Co., Ltd.
ovti OmniVision Technologies
panasonic Panasonic Corporation
parade Parade Technologies Inc.
@@ -167,6 +177,7 @@ sii Seiko Instruments, Inc.
silergy Silergy Corp.
sirf SiRF Technology, Inc.
sitronix Sitronix Technology Corporation
+skyworks Skyworks Solutions, Inc.
smsc Standard Microsystems Corporation
snps Synopsys, Inc.
solidrun SolidRun
@@ -194,6 +205,7 @@ voipac Voipac Technologies s.r.o.
winbond Winbond Electronics corp.
wlf Wolfson Microelectronics
wm Wondermedia Technologies, Inc.
+x-powers X-Powers
xes Extreme Engineering Solutions (X-ES)
xillybus Xillybus Ltd.
xlnx Xilinx
diff --git a/Documentation/devicetree/bindings/video/atmel,lcdc.txt b/Documentation/devicetree/bindings/video/atmel,lcdc.txt
index f059dd0b3d28..ecb8da063d07 100644
--- a/Documentation/devicetree/bindings/video/atmel,lcdc.txt
+++ b/Documentation/devicetree/bindings/video/atmel,lcdc.txt
@@ -10,7 +10,9 @@ Required properties:
"atmel,at91sam9g45es-lcdc" ,
"atmel,at91sam9rl-lcdc" ,
"atmel,at32ap-lcdc"
-- reg : Should contain 1 register ranges(address and length)
+- reg : Should contain 1 register ranges(address and length).
+ Can contain an additional register range(address and length)
+ for fixed framebuffer memory. Useful for dedicated memories.
- interrupts : framebuffer controller interrupt
- display: a phandle pointing to the display node
@@ -38,6 +40,14 @@ Example:
};
+Example for fixed framebuffer memory:
+
+ fb0: fb@0x00500000 {
+ compatible = "atmel,at91sam9263-lcdc";
+ reg = <0x00700000 0x1000 0x70000000 0x200000>;
+ [...]
+ };
+
Atmel LCDC Display
-----------------------------------------------------
Required properties (as per of_videomode_helper):
diff --git a/Documentation/devicetree/bindings/video/backlight/sky81452-backlight.txt b/Documentation/devicetree/bindings/video/backlight/sky81452-backlight.txt
new file mode 100644
index 000000000000..8bf2940f54bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/backlight/sky81452-backlight.txt
@@ -0,0 +1,29 @@
+SKY81452-backlight bindings
+
+Required properties:
+- compatible : Must be "skyworks,sky81452-backlight"
+
+Optional properties:
+- name : Name of backlight device. Default is 'lcd-backlight'.
+- gpios : GPIO to use to EN pin.
+ See Documentation/devicetree/bindings/gpio/gpio.txt
+- led-sources : List of enabled channels from 0 to 5.
+ See Documentation/devicetree/bindings/leds/common.txt
+- skyworks,ignore-pwm : Ignore both PWM input
+- skyworks,dpwm-mode : Enable DPWM dimming mode, otherwise Analog dimming.
+- skyworks,phase-shift : Enable phase shift mode
+- skyworks,short-detection-threshold-volt
+ : It should be one of 4, 5, 6 and 7V.
+- skyworks,current-limit-mA
+ : It should be 2300mA or 2750mA.
+
+Example:
+
+ backlight {
+ compatible = "skyworks,sky81452-backlight";
+ name = "pwm-backlight";
+ led-sources = <0 1 2 5>;
+ skyworks,ignore-pwm;
+ skyworks,phase-shift;
+ skyworks,current-limit-mA = <2300>;
+ };
diff --git a/Documentation/devicetree/bindings/video/ti,omap-dss.txt b/Documentation/devicetree/bindings/video/ti,omap-dss.txt
index d5f1a3fe3109..e1ef29569338 100644
--- a/Documentation/devicetree/bindings/video/ti,omap-dss.txt
+++ b/Documentation/devicetree/bindings/video/ti,omap-dss.txt
@@ -25,8 +25,8 @@ Video Ports
-----------
The DSS Core and the encoders have video port outputs. The structure of the
-video ports is described in Documentation/devicetree/bindings/video/video-
-ports.txt, and the properties for the ports and endpoints for each encoder are
+video ports is described in Documentation/devicetree/bindings/graph.txt,
+and the properties for the ports and endpoints for each encoder are
described in the SoC's DSS binding documentation.
The video ports are used to describe the connections to external hardware, like
diff --git a/Documentation/devicetree/bindings/arm/bcm/kona-wdt.txt b/Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.txt
index 2b86a00e351d..2b86a00e351d 100644
--- a/Documentation/devicetree/bindings/arm/bcm/kona-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.txt
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index 77685185cf3b..e49e423268c0 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -15,6 +15,7 @@ Table of Contents
1) Entry point for arch/arm
2) Entry point for arch/powerpc
3) Entry point for arch/x86
+ 4) Entry point for arch/mips/bmips
II - The DT block format
1) Header
@@ -288,6 +289,33 @@ it with special cases.
or initrd address. It simply holds information which can not be retrieved
otherwise like interrupt routing or a list of devices behind an I2C bus.
+4) Entry point for arch/mips/bmips
+----------------------------------
+
+ Some bootloaders only support a single entry point, at the start of the
+ kernel image. Other bootloaders will jump to the ELF start address.
+ Both schemes are supported; CONFIG_BOOT_RAW=y and CONFIG_NO_EXCEPT_FILL=y,
+ so the first instruction immediately jumps to kernel_entry().
+
+ Similar to the arch/arm case (b), a DT-aware bootloader is expected to
+ set up the following registers:
+
+ a0 : 0
+
+ a1 : 0xffffffff
+
+ a2 : Physical pointer to the device tree block (defined in chapter
+ II) in RAM. The device tree can be located anywhere in the first
+ 512MB of the physical address space (0x00000000 - 0x1fffffff),
+ aligned on a 64 bit boundary.
+
+ Legacy bootloaders do not use this convention, and they do not pass in a
+ DT block. In this case, Linux will look for a builtin DTB, selected via
+ CONFIG_DT_*.
+
+ This convention is defined for 32-bit systems only, as there are not
+ currently any 64-bit BMIPS implementations.
+
II - The DT block format
========================
diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_unittest.txt
index 57a808b588bf..3e4e7d48ae93 100644
--- a/Documentation/devicetree/of_selftest.txt
+++ b/Documentation/devicetree/of_unittest.txt
@@ -1,11 +1,11 @@
-Open Firmware Device Tree Selftest
+Open Firmware Device Tree Unittest
----------------------------------
Author: Gaurav Minocha <gaurav.minocha.os@gmail.com>
1. Introduction
-This document explains how the test data required for executing OF selftest
+This document explains how the test data required for executing OF unittest
is attached to the live tree dynamically, independent of the machine's
architecture.
@@ -22,31 +22,31 @@ most of the device drivers in various use cases.
2. Test-data
-The Device Tree Source file (drivers/of/testcase-data/testcases.dts) contains
+The Device Tree Source file (drivers/of/unittest-data/testcases.dts) contains
the test data required for executing the unit tests automated in
-drivers/of/selftests.c. Currently, following Device Tree Source Include files
-(.dtsi) are included in testcase.dts:
+drivers/of/unittest.c. Currently, following Device Tree Source Include files
+(.dtsi) are included in testcases.dts:
-drivers/of/testcase-data/tests-interrupts.dtsi
-drivers/of/testcase-data/tests-platform.dtsi
-drivers/of/testcase-data/tests-phandle.dtsi
-drivers/of/testcase-data/tests-match.dtsi
+drivers/of/unittest-data/tests-interrupts.dtsi
+drivers/of/unittest-data/tests-platform.dtsi
+drivers/of/unittest-data/tests-phandle.dtsi
+drivers/of/unittest-data/tests-match.dtsi
When the kernel is build with OF_SELFTEST enabled, then the following make rule
$(obj)/%.dtb: $(src)/%.dts FORCE
$(call if_changed_dep, dtc)
-is used to compile the DT source file (testcase.dts) into a binary blob
-(testcase.dtb), also referred as flattened DT.
+is used to compile the DT source file (testcases.dts) into a binary blob
+(testcases.dtb), also referred as flattened DT.
After that, using the following rule the binary blob above is wrapped as an
-assembly file (testcase.dtb.S).
+assembly file (testcases.dtb.S).
$(obj)/%.dtb.S: $(obj)/%.dtb
$(call cmd, dt_S_dtb)
-The assembly file is compiled into an object file (testcase.dtb.o), and is
+The assembly file is compiled into an object file (testcases.dtb.o), and is
linked into the kernel image.
@@ -98,7 +98,7 @@ child11 -> sibling12 -> sibling13 -> sibling14 -> null
Figure 1: Generic structure of un-flattened device tree
-Before executing OF selftest, it is required to attach the test data to
+Before executing OF unittest, it is required to attach the test data to
machine's device tree (if present). So, when selftest_data_add() is called,
at first it reads the flattened device tree data linked into the kernel image
via the following kernel symbols:
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index bb9753b635a3..480c8de3c2c4 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -49,25 +49,26 @@ The dma_buf buffer sharing API usage contains the following steps:
The buffer exporter announces its wish to export a buffer. In this, it
connects its own private buffer data, provides implementation for operations
that can be performed on the exported dma_buf, and flags for the file
- associated with this buffer.
+ associated with this buffer. All these fields are filled in struct
+ dma_buf_export_info, defined via the DEFINE_DMA_BUF_EXPORT_INFO macro.
Interface:
- struct dma_buf *dma_buf_export_named(void *priv, struct dma_buf_ops *ops,
- size_t size, int flags,
- const char *exp_name)
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info)
+ struct dma_buf *dma_buf_export(struct dma_buf_export_info *exp_info)
- If this succeeds, dma_buf_export_named allocates a dma_buf structure, and
+ If this succeeds, dma_buf_export allocates a dma_buf structure, and
returns a pointer to the same. It also associates an anonymous file with this
buffer, so it can be exported. On failure to allocate the dma_buf object,
it returns NULL.
- 'exp_name' is the name of exporter - to facilitate information while
- debugging.
+ 'exp_name' in struct dma_buf_export_info is the name of exporter - to
+ facilitate information while debugging. It is set to KBUILD_MODNAME by
+ default, so exporters don't have to provide a specific name, if they don't
+ wish to.
+
+ DEFINE_DMA_BUF_EXPORT_INFO macro defines the struct dma_buf_export_info,
+ zeroes it out and pre-populates exp_name in it.
- Exporting modules which do not wish to provide any specific name may use the
- helper define 'dma_buf_export()', with the same arguments as above, but
- without the last argument; a KBUILD_MODNAME pre-processor directive will be
- inserted in place of 'exp_name' instead.
2. Userspace gets a handle to pass around to potential buffer-users
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 6d1e8eeb5990..831a5363f6be 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -276,6 +276,7 @@ IOMAP
devm_ioport_unmap()
devm_ioremap()
devm_ioremap_nocache()
+ devm_ioremap_wc()
devm_ioremap_resource() : checks resource, requests memory region, ioremaps
devm_iounmap()
pcim_iomap()
@@ -289,6 +290,10 @@ IRQ
devm_request_irq()
devm_request_threaded_irq()
+LED
+ devm_led_classdev_register()
+ devm_led_classdev_unregister()
+
MDIO
devm_mdiobus_alloc()
devm_mdiobus_alloc_size()
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index eede6088f978..c7d49b885559 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -211,7 +211,7 @@ Thunderbird (GUI)
Thunderbird is an Outlook clone that likes to mangle text, but there are ways
to coerce it into behaving.
-- Allows use of an external editor:
+- Allow use of an external editor:
The easiest thing to do with Thunderbird and patches is to use an
"external editor" extension and then just use your favorite $EDITOR
for reading/merging patches into the body text. To do this, download
@@ -219,6 +219,15 @@ to coerce it into behaving.
View->Toolbars->Customize... and finally just click on it when in the
Compose dialog.
+ Please note that "external editor" requires that your editor must not
+ fork, or in other words, the editor must not return before closing.
+ You may have to pass additional flags or change the settings of your
+ editor. Most notably if you are using gvim then you must pass the -f
+ option to gvim by putting "/usr/bin/gvim -f" (if the binary is in
+ /usr/bin) to the text editor field in "external editor" settings. If you
+ are using some other editor then please read its manual to find out how
+ to do this.
+
To beat some sense out of the internal editor, do this:
- Edit your Thunderbird config settings so that it won't use format=flowed.
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index f91926f2f482..0a926e2ba3ab 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -196,7 +196,7 @@ prototypes:
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
- int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+ int (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
int (*migratepage)(struct address_space *, struct page *, struct page *);
int (*launder_page)(struct page *);
int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
@@ -429,8 +429,6 @@ prototypes:
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
- ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
@@ -525,6 +523,7 @@ prototypes:
void (*close)(struct vm_area_struct*);
int (*fault)(struct vm_area_struct*, struct vm_fault *);
int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
+ int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);
int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
locking rules:
@@ -534,6 +533,7 @@ close: yes
fault: yes can return with page locked
map_pages: yes
page_mkwrite: yes can return with page locked
+pfn_mkwrite: yes
access: yes
->fault() is called when a previously not present pte is about
@@ -560,6 +560,12 @@ the page has been truncated, the filesystem should not look up a new page
like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
will cause the VM to retry the fault.
+ ->pfn_mkwrite() is the same as page_mkwrite but when the pte is
+VM_PFNMAP or VM_MIXEDMAP with a page-less entry. Expected return is
+VM_FAULT_NOPAGE. Or one of the VM_FAULT_ERROR types. The default behavior
+after this call is to make the pte read-write, unless pfn_mkwrite returns
+an error.
+
->access() is called when get_user_pages() fails in
access_process_vm(), typically used to debug a process through
/proc/pid/mem or ptrace. This function is needed only for
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index dac11d7fef27..e9e750e59efc 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -140,6 +140,12 @@ nobarrier This option can be used if underlying storage guarantees
fastboot This option is used when a system wants to reduce mount
time as much as possible, even though normal performance
can be sacrificed.
+extent_cache Enable an extent cache based on rb-tree, it can cache
+ as many as extent which map between contiguous logical
+ address and physical address per inode, resulting in
+ increasing the cache hit ratio.
+noinline_data Disable the inline data feature, inline data feature is
+ enabled by default.
================================================================================
DEBUGFS ENTRIES
diff --git a/Documentation/filesystems/nfs/nfs-rdma.txt b/Documentation/filesystems/nfs/nfs-rdma.txt
index 724043858b08..95c13aa575ff 100644
--- a/Documentation/filesystems/nfs/nfs-rdma.txt
+++ b/Documentation/filesystems/nfs/nfs-rdma.txt
@@ -187,8 +187,10 @@ Check RDMA and NFS Setup
To further test the InfiniBand software stack, use IPoIB (this
assumes you have two IB hosts named host1 and host2):
- host1$ ifconfig ib0 a.b.c.x
- host2$ ifconfig ib0 a.b.c.y
+ host1$ ip link set dev ib0 up
+ host1$ ip address add dev ib0 a.b.c.x
+ host2$ ip link set dev ib0 up
+ host2$ ip address add dev ib0 a.b.c.y
host1$ ping a.b.c.y
host2$ ping a.b.c.x
@@ -229,7 +231,8 @@ NFS/RDMA Setup
$ modprobe ib_mthca
$ modprobe ib_ipoib
- $ ifconfig ib0 a.b.c.d
+ $ ip li set dev ib0 up
+ $ ip addr add dev ib0 a.b.c.d
NOTE: use unique addresses for the client and server
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index fa2db081505e..e69274de8d0c 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -471,3 +471,15 @@ in your dentry operations instead.
[mandatory]
f_dentry is gone; use f_path.dentry, or, better yet, see if you can avoid
it entirely.
+--
+[mandatory]
+ never call ->read() and ->write() directly; use __vfs_{read,write} or
+ wrappers; instead of checking for ->write or ->read being NULL, look for
+ FMODE_CAN_{WRITE,READ} in file->f_mode.
+--
+[mandatory]
+ do _not_ use new_sync_{read,write} for ->read/->write; leave it NULL
+ instead.
+--
+[mandatory]
+ ->aio_read/->aio_write are gone. Use ->read_iter/->write_iter.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index a07ba61662ed..c3b6b301d8b0 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -200,12 +200,12 @@ contains details information about the process itself. Its fields are
explained in Table 1-4.
(for SMP CONFIG users)
-For making accounting scalable, RSS related information are handled in
-asynchronous manner and the vaule may not be very precise. To see a precise
+For making accounting scalable, RSS related information are handled in an
+asynchronous manner and the value may not be very precise. To see a precise
snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
It's slow but very precise.
-Table 1-2: Contents of the status files (as of 2.6.30-rc7)
+Table 1-2: Contents of the status files (as of 3.20.0)
..............................................................................
Field Content
Name filename of the executable
@@ -213,6 +213,7 @@ Table 1-2: Contents of the status files (as of 2.6.30-rc7)
in an uninterruptible wait, Z is zombie,
T is traced or stopped)
Tgid thread group ID
+ Ngid NUMA group ID (0 if none)
Pid process id
PPid process id of the parent process
TracerPid PID of process tracing this process (0 if not)
@@ -220,6 +221,10 @@ Table 1-2: Contents of the status files (as of 2.6.30-rc7)
Gid Real, effective, saved set, and file system GIDs
FDSize number of file descriptor slots currently allocated
Groups supplementary group list
+ NStgid descendant namespace thread group ID hierarchy
+ NSpid descendant namespace process ID hierarchy
+ NSpgid descendant namespace process group ID hierarchy
+ NSsid descendant namespace session ID hierarchy
VmPeak peak virtual memory size
VmSize total program size
VmLck locked memory size
@@ -1255,9 +1260,9 @@ Various pieces of information about kernel activity are available in the
since the system first booted. For a quick look, simply cat the file:
> cat /proc/stat
- cpu 2255 34 2290 22625563 6290 127 456 0 0
- cpu0 1132 34 1441 11311718 3675 127 438 0 0
- cpu1 1123 0 849 11313845 2614 0 18 0 0
+ cpu 2255 34 2290 22625563 6290 127 456 0 0 0
+ cpu0 1132 34 1441 11311718 3675 127 438 0 0 0
+ cpu1 1123 0 849 11313845 2614 0 18 0 0 0
intr 114930548 113199788 3 0 5 263 0 4 [... lots more numbers ...]
ctxt 1990473
btime 1062191376
@@ -1704,6 +1709,10 @@ A typical output is
flags: 0100002
mnt_id: 19
+All locks associated with a file descriptor are shown in its fdinfo too.
+
+lock: 1: FLOCK ADVISORY WRITE 359 00:13:11691 0 EOF
+
The files such as eventfd, fsnotify, signalfd, epoll among the regular pos/flags
pair provide additional information particular to the objects they represent.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 966b22829f3b..5d833b32bbcd 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -590,7 +590,7 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
- ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
/* migrate the contents of a page to the specified target */
int (*migratepage) (struct page *, struct page *);
int (*launder_page) (struct page *);
@@ -804,8 +804,6 @@ struct file_operations {
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
- ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
@@ -838,14 +836,10 @@ otherwise noted.
read: called by read(2) and related system calls
- aio_read: vectored, possibly asynchronous read
-
read_iter: possibly asynchronous read with iov_iter as destination
write: called by write(2) and related system calls
- aio_write: vectored, possibly asynchronous write
-
write_iter: possibly asynchronous write with iov_iter as source
iterate: called when the VFS needs to read the directory contents
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 0bfafe108357..5a5a05582b58 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -228,30 +228,19 @@ default behaviour.
Deprecated Mount Options
========================
- delaylog/nodelaylog
- Delayed logging is the only logging method that XFS supports
- now, so these mount options are now ignored.
-
- Due for removal in 3.12.
-
- ihashsize=value
- In memory inode hashes have been removed, so this option has
- no function as of August 2007. Option is deprecated.
-
- Due for removal in 3.12.
+None at present.
- irixsgid
- This behaviour is now controlled by a sysctl, so the mount
- option is ignored.
- Due for removal in 3.12.
+Removed Mount Options
+=====================
- osyncisdsync
- osyncisosync
- O_SYNC and O_DSYNC are fully supported, so there is no need
- for these options any more.
+ Name Removed
+ ---- -------
+ delaylog/nodelaylog v3.20
+ ihashsize v3.20
+ irixsgid v3.20
+ osyncisdsync/osyncisosync v3.20
- Due for removal in 3.12.
sysctls
=======
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index 8b35f51fe7b6..b80606de545a 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -50,10 +50,43 @@ gpiod_is_active_low(power) will be true).
ACPI
----
-ACPI does not support function names for GPIOs. Therefore, only the "idx"
-argument of gpiod_get_index() is useful to discriminate between GPIOs assigned
-to a device. The "con_id" argument can still be set for debugging purposes (it
-will appear under error messages as well as debug and sysfs nodes).
+ACPI also supports function names for GPIOs in a similar fashion to DT.
+The above DT example can be converted to an equivalent ACPI description
+with the help of _DSD (Device Specific Data), introduced in ACPI 5.1:
+
+ Device (FOO) {
+ Name (_CRS, ResourceTemplate () {
+ GpioIo (Exclusive, ..., IoRestrictionOutputOnly,
+ "\\_SB.GPI0") {15} // red
+ GpioIo (Exclusive, ..., IoRestrictionOutputOnly,
+ "\\_SB.GPI0") {16} // green
+ GpioIo (Exclusive, ..., IoRestrictionOutputOnly,
+ "\\_SB.GPI0") {17} // blue
+ GpioIo (Exclusive, ..., IoRestrictionOutputOnly,
+ "\\_SB.GPI0") {1} // power
+ })
+
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () {
+ "led-gpios",
+ Package () {
+ ^FOO, 0, 0, 1,
+ ^FOO, 1, 0, 1,
+ ^FOO, 2, 0, 1,
+ }
+ },
+ Package () {
+ "power-gpios",
+ Package () {^FOO, 3, 0, 0},
+ },
+ }
+ })
+ }
+
+For more information about the ACPI GPIO bindings see
+Documentation/acpi/gpio-properties.txt.
Platform Data
-------------
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index d85fbae451ea..c21c1313f09e 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -58,7 +58,6 @@ pattern where a GPIO is optional, the gpiod_get_optional() and
gpiod_get_index_optional() functions can be used. These functions return NULL
instead of -ENOENT if no GPIO has been assigned to the requested function:
-
struct gpio_desc *gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
@@ -68,6 +67,27 @@ instead of -ENOENT if no GPIO has been assigned to the requested function:
unsigned int index,
enum gpiod_flags flags)
+For a function using multiple GPIOs all of those can be obtained with one call:
+
+ struct gpio_descs *gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+
+This function returns a struct gpio_descs which contains an array of
+descriptors:
+
+ struct gpio_descs {
+ unsigned int ndescs;
+ struct gpio_desc *desc[];
+ }
+
+The following function returns NULL instead of -ENOENT if no GPIOs have been
+assigned to the requested function:
+
+ struct gpio_descs *gpiod_get_array_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+
Device-managed variants of these functions are also defined:
struct gpio_desc *devm_gpiod_get(struct device *dev, const char *con_id,
@@ -82,20 +102,37 @@ Device-managed variants of these functions are also defined:
const char *con_id,
enum gpiod_flags flags)
- struct gpio_desc * devm_gpiod_get_index_optional(struct device *dev,
+ struct gpio_desc *devm_gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags)
+ struct gpio_descs *devm_gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+
+ struct gpio_descs *devm_gpiod_get_array_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+
A GPIO descriptor can be disposed of using the gpiod_put() function:
void gpiod_put(struct gpio_desc *desc)
-It is strictly forbidden to use a descriptor after calling this function. The
-device-managed variant is, unsurprisingly:
+For an array of GPIOs this function can be used:
+
+ void gpiod_put_array(struct gpio_descs *descs)
+
+It is strictly forbidden to use a descriptor after calling these functions.
+It is also not allowed to individually release descriptors (using gpiod_put())
+from an array acquired with gpiod_get_array().
+
+The device-managed variants are, unsurprisingly:
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
+ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
+
Using GPIOs
===========
@@ -222,6 +259,26 @@ GPIOs belonging to the same bank or chip simultaneously if supported by the
corresponding chip driver. In that case a significantly improved performance
can be expected. If simultaneous setting is not possible the GPIOs will be set
sequentially.
+
+The gpiod_set_array() functions take three arguments:
+ * array_size - the number of array elements
+ * desc_array - an array of GPIO descriptors
+ * value_array - an array of values to assign to the GPIOs
+
+The descriptor array can be obtained using the gpiod_get_array() function
+or one of its variants. If the group of descriptors returned by that function
+matches the desired group of GPIOs, those GPIOs can be set by simply using
+the struct gpio_descs returned by gpiod_get_array():
+
+ struct gpio_descs *my_gpio_descs = gpiod_get_array(...);
+ gpiod_set_array(my_gpio_descs->ndescs, my_gpio_descs->desc,
+ my_gpio_values);
+
+It is also possible to set a completely arbitrary array of descriptors. The
+descriptors may be obtained using any combination of gpiod_get() and
+gpiod_get_array(). Afterwards the array of descriptors has to be setup
+manually before it can be used with gpiod_set_array().
+
Note that for optimal performance GPIOs belonging to the same chip should be
contiguous within the array of descriptors.
diff --git a/Documentation/i2c/slave-eeprom-backend b/Documentation/i2c/slave-eeprom-backend
new file mode 100644
index 000000000000..c8444ef82acf
--- /dev/null
+++ b/Documentation/i2c/slave-eeprom-backend
@@ -0,0 +1,14 @@
+Linux I2C slave eeprom backend
+==============================
+
+by Wolfram Sang <wsa@sang-engineering.com> in 2014-15
+
+This is a proof-of-concept backend which acts like an EEPROM on the connected
+I2C bus. The memory contents can be modified from userspace via this file
+located in sysfs:
+
+ /sys/bus/i2c/devices/<device-direcory>/slave-eeprom
+
+As of 2015, Linux doesn't support poll on binary sysfs files, so there is no
+notfication when another master changed the content.
+
diff --git a/Documentation/i2c/slave-interface b/Documentation/i2c/slave-interface
new file mode 100644
index 000000000000..389bb5d61854
--- /dev/null
+++ b/Documentation/i2c/slave-interface
@@ -0,0 +1,179 @@
+Linux I2C slave interface description
+=====================================
+
+by Wolfram Sang <wsa@sang-engineering.com> in 2014-15
+
+Linux can also be an I2C slave in case I2C controllers have slave support.
+Besides this HW requirement, one also needs a software backend providing the
+actual functionality. An example for this is the slave-eeprom driver, which
+acts as a dual memory driver. While another I2C master on the bus can access it
+like a regular EEPROM, the Linux I2C slave can access the content via sysfs and
+retrieve/provide information as needed. The software backend driver and the I2C
+bus driver communicate via events. Here is a small graph visualizing the data
+flow and the means by which data is transported. The dotted line marks only one
+example. The backend could also use e.g. a character device, be in-kernel
+only, or something completely different:
+
+
+ e.g. sysfs I2C slave events I/O registers
+ +-----------+ v +---------+ v +--------+ v +------------+
+ | Userspace +........+ Backend +-----------+ Driver +-----+ Controller |
+ +-----------+ +---------+ +--------+ +------------+
+ | |
+ ----------------------------------------------------------------+-- I2C
+ --------------------------------------------------------------+---- Bus
+
+Note: Technically, there is also the I2C core between the backend and the
+driver. However, at this time of writing, the layer is transparent.
+
+
+User manual
+===========
+
+I2C slave backends behave like standard I2C clients. So, you can instantiate
+them like described in the document 'instantiating-devices'. A quick example
+for instantiating the slave-eeprom driver from userspace:
+
+ # echo 0-0064 > /sys/bus/i2c/drivers/i2c-slave-eeprom/bind
+
+Each backend should come with separate documentation to describe its specific
+behaviour and setup.
+
+
+Developer manual
+================
+
+I2C slave events
+----------------
+
+The bus driver sends an event to the backend using the following function:
+
+ ret = i2c_slave_event(client, event, &val)
+
+'client' describes the i2c slave device. 'event' is one of the special event
+types described hereafter. 'val' holds an u8 value for the data byte to be
+read/written and is thus bidirectional. The pointer to val must always be
+provided even if val is not used for an event, i.e. don't use NULL here. 'ret'
+is the return value from the backend. Mandatory events must be provided by the
+bus drivers and must be checked for by backend drivers.
+
+Event types:
+
+* I2C_SLAVE_WRITE_REQUESTED (mandatory)
+
+'val': unused
+'ret': always 0
+
+Another I2C master wants to write data to us. This event should be sent once
+our own address and the write bit was detected. The data did not arrive yet, so
+there is nothing to process or return. Wakeup or initialization probably needs
+to be done, though.
+
+* I2C_SLAVE_READ_REQUESTED (mandatory)
+
+'val': backend returns first byte to be sent
+'ret': always 0
+
+Another I2C master wants to read data from us. This event should be sent once
+our own address and the read bit was detected. After returning, the bus driver
+should transmit the first byte.
+
+* I2C_SLAVE_WRITE_RECEIVED (mandatory)
+
+'val': bus driver delivers received byte
+'ret': 0 if the byte should be acked, some errno if the byte should be nacked
+
+Another I2C master has sent a byte to us which needs to be set in 'val'. If 'ret'
+is zero, the bus driver should ack this byte. If 'ret' is an errno, then the byte
+should be nacked.
+
+* I2C_SLAVE_READ_PROCESSED (mandatory)
+
+'val': backend returns next byte to be sent
+'ret': always 0
+
+The bus driver requests the next byte to be sent to another I2C master in
+'val'. Important: This does not mean that the previous byte has been acked, it
+only means that the previous byte is shifted out to the bus! To ensure seamless
+transmission, most hardware requests the next byte when the previous one is
+still shifted out. If the master sends NACK and stops reading after the byte
+currently shifted out, this byte requested here is never used. It very likely
+needs to be sent again on the next I2C_SLAVE_READ_REQUEST, depending a bit on
+your backend, though.
+
+* I2C_SLAVE_STOP (mandatory)
+
+'val': unused
+'ret': always 0
+
+A stop condition was received. This can happen anytime and the backend should
+reset its state machine for I2C transfers to be able to receive new requests.
+
+
+Software backends
+-----------------
+
+If you want to write a software backend:
+
+* use a standard i2c_driver and its matching mechanisms
+* write the slave_callback which handles the above slave events
+ (best using a state machine)
+* register this callback via i2c_slave_register()
+
+Check the i2c-slave-eeprom driver as an example.
+
+
+Bus driver support
+------------------
+
+If you want to add slave support to the bus driver:
+
+* implement calls to register/unregister the slave and add those to the
+ struct i2c_algorithm. When registering, you probably need to set the i2c
+ slave address and enable slave specific interrupts. If you use runtime pm, you
+ should use pm_runtime_forbid() because your device usually needs to be powered
+ on always to be able to detect its slave address. When unregistering, do the
+ inverse of the above.
+
+* Catch the slave interrupts and send appropriate i2c_slave_events to the backend.
+
+Check the i2c-rcar driver as an example.
+
+
+About ACK/NACK
+--------------
+
+It is good behaviour to always ACK the address phase, so the master knows if a
+device is basically present or if it mysteriously disappeared. Using NACK to
+state being busy is troublesome. SMBus demands to always ACK the address phase,
+while the I2C specification is more loose on that. Most I2C controllers also
+automatically ACK when detecting their slave addresses, so there is no option
+to NACK them. For those reasons, this API does not support NACK in the address
+phase.
+
+Currently, there is no slave event to report if the master did ACK or NACK a
+byte when it reads from us. We could make this an optional event if the need
+arises. However, cases should be extremely rare because the master is expected
+to send STOP after that and we have an event for that. Also, keep in mind not
+all I2C controllers have the possibility to report that event.
+
+
+About buffers
+-------------
+
+During development of this API, the question of using buffers instead of just
+bytes came up. Such an extension might be possible, usefulness is unclear at
+this time of writing. Some points to keep in mind when using buffers:
+
+* Buffers should be opt-in and slave drivers will always have to support
+ byte-based transactions as the ultimate fallback because this is how the
+ majority of HW works.
+
+* For backends simulating hardware registers, buffers are not helpful because
+ on writes an action should be immediately triggered. For reads, the data in
+ the buffer might get stale.
+
+* A master can send STOP at any time. For partially transferred buffers, this
+ means additional code to handle this exception. Such code tends to be
+ error-prone.
+
diff --git a/Documentation/i2c/summary b/Documentation/i2c/summary
index 13ab076dcd92..809541ab352f 100644
--- a/Documentation/i2c/summary
+++ b/Documentation/i2c/summary
@@ -41,7 +41,3 @@ integrated than Algorithm and Adapter.
For a given configuration, you will need a driver for your I2C bus, and
drivers for your I2C devices (usually one driver for each device).
-
-At this time, Linux only operates I2C (or SMBus) in master mode; you can't
-use these APIs to make a Linux system behave as a slave/device, either to
-speak a custom protocol or to emulate some other device.
diff --git a/Documentation/i2o/README b/Documentation/i2o/README
deleted file mode 100644
index ee91e2626ff0..000000000000
--- a/Documentation/i2o/README
+++ /dev/null
@@ -1,63 +0,0 @@
-
- Linux I2O Support (c) Copyright 1999 Red Hat Software
- and others.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version
- 2 of the License, or (at your option) any later version.
-
-AUTHORS (so far)
-
-Alan Cox, Building Number Three Ltd.
- Core code, SCSI and Block OSMs
-
-Steve Ralston, LSI Logic Corp.
- Debugging SCSI and Block OSM
-
-Deepak Saxena, Intel Corp.
- Various core/block extensions
- /proc interface, bug fixes
- Ioctl interfaces for control
- Debugging LAN OSM
-
-Philip Rumpf
- Fixed assorted dumb SMP locking bugs
-
-Juha Sievanen, University of Helsinki Finland
- LAN OSM code
- /proc interface to LAN class
- Bug fixes
- Core code extensions
-
-Auvo Häkkinen, University of Helsinki Finland
- LAN OSM code
- /Proc interface to LAN class
- Bug fixes
- Core code extensions
-
-Taneli Vähäkangas, University of Helsinki Finland
- Fixes to i2o_config
-
-CREDITS
-
- This work was made possible by
-
-Red Hat Software
- Funding for the Building #3 part of the project
-
-Symbios Logic (Now LSI)
- Host adapters, hints, known to work platforms when I hit
- compatibility problems
-
-BoxHill Corporation
- Loan of initial FibreChannel disk array used for development work.
-
-European Commission
- Funding the work done by the University of Helsinki
-
-SysKonnect
- Loan of FDDI and Gigabit Ethernet cards
-
-ASUSTeK
- Loan of I2O motherboard
diff --git a/Documentation/i2o/ioctl b/Documentation/i2o/ioctl
deleted file mode 100644
index 27c3c5493116..000000000000
--- a/Documentation/i2o/ioctl
+++ /dev/null
@@ -1,394 +0,0 @@
-
-Linux I2O User Space Interface
-rev 0.3 - 04/20/99
-
-=============================================================================
-Originally written by Deepak Saxena(deepak@plexity.net)
-Currently maintained by Deepak Saxena(deepak@plexity.net)
-=============================================================================
-
-I. Introduction
-
-The Linux I2O subsystem provides a set of ioctl() commands that can be
-utilized by user space applications to communicate with IOPs and devices
-on individual IOPs. This document defines the specific ioctl() commands
-that are available to the user and provides examples of their uses.
-
-This document assumes the reader is familiar with or has access to the
-I2O specification as no I2O message parameters are outlined. For information
-on the specification, see http://www.i2osig.org
-
-This document and the I2O user space interface are currently maintained
-by Deepak Saxena. Please send all comments, errata, and bug fixes to
-deepak@csociety.purdue.edu
-
-II. IOP Access
-
-Access to the I2O subsystem is provided through the device file named
-/dev/i2o/ctl. This file is a character file with major number 10 and minor
-number 166. It can be created through the following command:
-
- mknod /dev/i2o/ctl c 10 166
-
-III. Determining the IOP Count
-
- SYNOPSIS
-
- ioctl(fd, I2OGETIOPS, int *count);
-
- u8 count[MAX_I2O_CONTROLLERS];
-
- DESCRIPTION
-
- This function returns the system's active IOP table. count should
- point to a buffer containing MAX_I2O_CONTROLLERS entries. Upon
- returning, each entry will contain a non-zero value if the given
- IOP unit is active, and NULL if it is inactive or non-existent.
-
- RETURN VALUE.
-
- Returns 0 if no errors occur, and -1 otherwise. If an error occurs,
- errno is set appropriately:
-
- EFAULT Invalid user space pointer was passed
-
-IV. Getting Hardware Resource Table
-
- SYNOPSIS
-
- ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt);
-
- struct i2o_cmd_hrtlct
- {
- u32 iop; /* IOP unit number */
- void *resbuf; /* Buffer for result */
- u32 *reslen; /* Buffer length in bytes */
- };
-
- DESCRIPTION
-
- This function returns the Hardware Resource Table of the IOP specified
- by hrt->iop in the buffer pointed to by hrt->resbuf. The actual size of
- the data is written into *(hrt->reslen).
-
- RETURNS
-
- This function returns 0 if no errors occur. If an error occurs, -1
- is returned and errno is set appropriately:
-
- EFAULT Invalid user space pointer was passed
- ENXIO Invalid IOP number
- ENOBUFS Buffer not large enough. If this occurs, the required
- buffer length is written into *(hrt->reslen)
-
-V. Getting Logical Configuration Table
-
- SYNOPSIS
-
- ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct);
-
- struct i2o_cmd_hrtlct
- {
- u32 iop; /* IOP unit number */
- void *resbuf; /* Buffer for result */
- u32 *reslen; /* Buffer length in bytes */
- };
-
- DESCRIPTION
-
- This function returns the Logical Configuration Table of the IOP specified
- by lct->iop in the buffer pointed to by lct->resbuf. The actual size of
- the data is written into *(lct->reslen).
-
- RETURNS
-
- This function returns 0 if no errors occur. If an error occurs, -1
- is returned and errno is set appropriately:
-
- EFAULT Invalid user space pointer was passed
- ENXIO Invalid IOP number
- ENOBUFS Buffer not large enough. If this occurs, the required
- buffer length is written into *(lct->reslen)
-
-VI. Setting Parameters
-
- SYNOPSIS
-
- ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops);
-
- struct i2o_cmd_psetget
- {
- u32 iop; /* IOP unit number */
- u32 tid; /* Target device TID */
- void *opbuf; /* Operation List buffer */
- u32 oplen; /* Operation List buffer length in bytes */
- void *resbuf; /* Result List buffer */
- u32 *reslen; /* Result List buffer length in bytes */
- };
-
- DESCRIPTION
-
- This function posts a UtilParamsSet message to the device identified
- by ops->iop and ops->tid. The operation list for the message is
- sent through the ops->opbuf buffer, and the result list is written
- into the buffer pointed to by ops->resbuf. The number of bytes
- written is placed into *(ops->reslen).
-
- RETURNS
-
- The return value is the size in bytes of the data written into
- ops->resbuf if no errors occur. If an error occurs, -1 is returned
- and errno is set appropriately:
-
- EFAULT Invalid user space pointer was passed
- ENXIO Invalid IOP number
- ENOBUFS Buffer not large enough. If this occurs, the required
- buffer length is written into *(ops->reslen)
- ETIMEDOUT Timeout waiting for reply message
- ENOMEM Kernel memory allocation error
-
- A return value of 0 does not mean that the value was actually
- changed properly on the IOP. The user should check the result
- list to determine the specific status of the transaction.
-
-VII. Getting Parameters
-
- SYNOPSIS
-
- ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops);
-
- struct i2o_parm_setget
- {
- u32 iop; /* IOP unit number */
- u32 tid; /* Target device TID */
- void *opbuf; /* Operation List buffer */
- u32 oplen; /* Operation List buffer length in bytes */
- void *resbuf; /* Result List buffer */
- u32 *reslen; /* Result List buffer length in bytes */
- };
-
- DESCRIPTION
-
- This function posts a UtilParamsGet message to the device identified
- by ops->iop and ops->tid. The operation list for the message is
- sent through the ops->opbuf buffer, and the result list is written
- into the buffer pointed to by ops->resbuf. The actual size of data
- written is placed into *(ops->reslen).
-
- RETURNS
-
- EFAULT Invalid user space pointer was passed
- ENXIO Invalid IOP number
- ENOBUFS Buffer not large enough. If this occurs, the required
- buffer length is written into *(ops->reslen)
- ETIMEDOUT Timeout waiting for reply message
- ENOMEM Kernel memory allocation error
-
- A return value of 0 does not mean that the value was actually
- properly retrieved. The user should check the result list
- to determine the specific status of the transaction.
-
-VIII. Downloading Software
-
- SYNOPSIS
-
- ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw);
-
- struct i2o_sw_xfer
- {
- u32 iop; /* IOP unit number */
- u8 flags; /* DownloadFlags field */
- u8 sw_type; /* Software type */
- u32 sw_id; /* Software ID */
- void *buf; /* Pointer to software buffer */
- u32 *swlen; /* Length of software buffer */
- u32 *maxfrag; /* Number of fragments */
- u32 *curfrag; /* Current fragment number */
- };
-
- DESCRIPTION
-
- This function downloads a software fragment pointed by sw->buf
- to the iop identified by sw->iop. The DownloadFlags, SwID, SwType
- and SwSize fields of the ExecSwDownload message are filled in with
- the values of sw->flags, sw->sw_id, sw->sw_type and *(sw->swlen).
-
- The fragments _must_ be sent in order and be 8K in size. The last
- fragment _may_ be shorter, however. The kernel will compute its
- size based on information in the sw->swlen field.
-
- Please note that SW transfers can take a long time.
-
- RETURNS
-
- This function returns 0 no errors occur. If an error occurs, -1
- is returned and errno is set appropriately:
-
- EFAULT Invalid user space pointer was passed
- ENXIO Invalid IOP number
- ETIMEDOUT Timeout waiting for reply message
- ENOMEM Kernel memory allocation error
-
-IX. Uploading Software
-
- SYNOPSIS
-
- ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw);
-
- struct i2o_sw_xfer
- {
- u32 iop; /* IOP unit number */
- u8 flags; /* UploadFlags */
- u8 sw_type; /* Software type */
- u32 sw_id; /* Software ID */
- void *buf; /* Pointer to software buffer */
- u32 *swlen; /* Length of software buffer */
- u32 *maxfrag; /* Number of fragments */
- u32 *curfrag; /* Current fragment number */
- };
-
- DESCRIPTION
-
- This function uploads a software fragment from the IOP identified
- by sw->iop, sw->sw_type, sw->sw_id and optionally sw->swlen fields.
- The UploadFlags, SwID, SwType and SwSize fields of the ExecSwUpload
- message are filled in with the values of sw->flags, sw->sw_id,
- sw->sw_type and *(sw->swlen).
-
- The fragments _must_ be requested in order and be 8K in size. The
- user is responsible for allocating memory pointed by sw->buf. The
- last fragment _may_ be shorter.
-
- Please note that SW transfers can take a long time.
-
- RETURNS
-
- This function returns 0 if no errors occur. If an error occurs, -1
- is returned and errno is set appropriately:
-
- EFAULT Invalid user space pointer was passed
- ENXIO Invalid IOP number
- ETIMEDOUT Timeout waiting for reply message
- ENOMEM Kernel memory allocation error
-
-X. Removing Software
-
- SYNOPSIS
-
- ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw);
-
- struct i2o_sw_xfer
- {
- u32 iop; /* IOP unit number */
- u8 flags; /* RemoveFlags */
- u8 sw_type; /* Software type */
- u32 sw_id; /* Software ID */
- void *buf; /* Unused */
- u32 *swlen; /* Length of the software data */
- u32 *maxfrag; /* Unused */
- u32 *curfrag; /* Unused */
- };
-
- DESCRIPTION
-
- This function removes software from the IOP identified by sw->iop.
- The RemoveFlags, SwID, SwType and SwSize fields of the ExecSwRemove message
- are filled in with the values of sw->flags, sw->sw_id, sw->sw_type and
- *(sw->swlen). Give zero in *(sw->len) if the value is unknown. IOP uses
- *(sw->swlen) value to verify correct identication of the module to remove.
- The actual size of the module is written into *(sw->swlen).
-
- RETURNS
-
- This function returns 0 if no errors occur. If an error occurs, -1
- is returned and errno is set appropriately:
-
- EFAULT Invalid user space pointer was passed
- ENXIO Invalid IOP number
- ETIMEDOUT Timeout waiting for reply message
- ENOMEM Kernel memory allocation error
-
-X. Validating Configuration
-
- SYNOPSIS
-
- ioctl(fd, I2OVALIDATE, int *iop);
- u32 iop;
-
- DESCRIPTION
-
- This function posts an ExecConfigValidate message to the controller
- identified by iop. This message indicates that the current
- configuration is accepted. The iop changes the status of suspect drivers
- to valid and may delete old drivers from its store.
-
- RETURNS
-
- This function returns 0 if no erro occur. If an error occurs, -1 is
- returned and errno is set appropriately:
-
- ETIMEDOUT Timeout waiting for reply message
- ENXIO Invalid IOP number
-
-XI. Configuration Dialog
-
- SYNOPSIS
-
- ioctl(fd, I2OHTML, struct i2o_html *htquery);
- struct i2o_html
- {
- u32 iop; /* IOP unit number */
- u32 tid; /* Target device ID */
- u32 page; /* HTML page */
- void *resbuf; /* Buffer for reply HTML page */
- u32 *reslen; /* Length in bytes of reply buffer */
- void *qbuf; /* Pointer to HTTP query string */
- u32 qlen; /* Length in bytes of query string buffer */
- };
-
- DESCRIPTION
-
- This function posts an UtilConfigDialog message to the device identified
- by htquery->iop and htquery->tid. The requested HTML page number is
- provided by the htquery->page field, and the resultant data is stored
- in the buffer pointed to by htquery->resbuf. If there is an HTTP query
- string that is to be sent to the device, it should be sent in the buffer
- pointed to by htquery->qbuf. If there is no query string, this field
- should be set to NULL. The actual size of the reply received is written
- into *(htquery->reslen).
-
- RETURNS
-
- This function returns 0 if no error occur. If an error occurs, -1
- is returned and errno is set appropriately:
-
- EFAULT Invalid user space pointer was passed
- ENXIO Invalid IOP number
- ENOBUFS Buffer not large enough. If this occurs, the required
- buffer length is written into *(ops->reslen)
- ETIMEDOUT Timeout waiting for reply message
- ENOMEM Kernel memory allocation error
-
-XII. Events
-
- In the process of determining this. Current idea is to have use
- the select() interface to allow user apps to periodically poll
- the /dev/i2o/ctl device for events. When select() notifies the user
- that an event is available, the user would call read() to retrieve
- a list of all the events that are pending for the specific device.
-
-=============================================================================
-Revision History
-=============================================================================
-
-Rev 0.1 - 04/01/99
-- Initial revision
-
-Rev 0.2 - 04/06/99
-- Changed return values to match UNIX ioctl() standard. Only return values
- are 0 and -1. All errors are reported through errno.
-- Added summary of proposed possible event interfaces
-
-Rev 0.3 - 04/20/99
-- Changed all ioctls() to use pointers to user data instead of actual data
-- Updated error values to match the code
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index 92ae734c00c3..c86f2f1ae4f6 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -58,7 +58,7 @@ To exit command mode, PSMOUSE_CMD_SETSTREAM (EA) is sent to the touchpad.
While in command mode, register addresses can be set by first sending a
specific command, either EC for v3 devices or F5 for v4 devices. Then the
address is sent one nibble at a time, where each nibble is encoded as a
-command with optional data. This enoding differs slightly between the v3 and
+command with optional data. This encoding differs slightly between the v3 and
v4 protocols.
Once an address has been set, the addressed register can be read by sending
@@ -94,6 +94,10 @@ PS/2 packet format
Note that the device never signals overflow condition.
+For protocol version 2 devices when the trackpoint is used, and no fingers
+are on the touchpad, the M R L bits signal the combined status of both the
+pointingstick and touchpad buttons.
+
ALPS Absolute Mode - Protocol Version 1
--------------------------------------
@@ -107,7 +111,7 @@ ALPS Absolute Mode - Protocol Version 1
ALPS Absolute Mode - Protocol Version 2
---------------------------------------
- byte 0: 1 ? ? ? 1 ? ? ?
+ byte 0: 1 ? ? ? 1 PSM PSR PSL
byte 1: 0 x6 x5 x4 x3 x2 x1 x0
byte 2: 0 x10 x9 x8 x7 ? fin ges
byte 3: 0 y9 y8 y7 1 M R L
@@ -115,7 +119,8 @@ ALPS Absolute Mode - Protocol Version 2
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
-the DualPoint Stick.
+the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
+buttons get reported separately in the PSM, PSR and PSL bits.
Dualpoint device -- interleaved packet format
---------------------------------------------
@@ -139,7 +144,7 @@ ALPS Absolute Mode - Protocol Version 3
---------------------------------------
ALPS protocol version 3 has three different packet formats. The first two are
-associated with touchpad events, and the third is associatd with trackstick
+associated with touchpad events, and the third is associated with trackstick
events.
The first type is the touchpad position packet.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index 96705616f582..3f0f5ce3338b 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -229,7 +229,7 @@ such device to feedback.
EV_PWR:
----------
EV_PWR events are a special type of event used specifically for power
-mangement. Its usage is not well defined. To be addressed later.
+management. Its usage is not well defined. To be addressed later.
Device properties:
=================
diff --git a/Documentation/input/gpio-tilt.txt b/Documentation/input/gpio-tilt.txt
index 06d60c3ff5e7..2cdfd9bcb1af 100644
--- a/Documentation/input/gpio-tilt.txt
+++ b/Documentation/input/gpio-tilt.txt
@@ -28,7 +28,7 @@ Example:
--------
Example configuration for a single TS1003 tilt switch that rotates around
-one axis in 4 steps and emitts the current tilt via two GPIOs.
+one axis in 4 steps and emits the current tilt via two GPIOs.
static int sg060_tilt_enable(struct device *dev) {
/* code to enable the sensors */
diff --git a/Documentation/input/iforce-protocol.txt b/Documentation/input/iforce-protocol.txt
index 2d5fbfd6023e..66287151c54a 100644
--- a/Documentation/input/iforce-protocol.txt
+++ b/Documentation/input/iforce-protocol.txt
@@ -97,7 +97,7 @@ LEN= 0e
*** Attack and fade ***
OP= 02
LEN= 08
-00-01 Address where to store the parameteres
+00-01 Address where to store the parameters
02-03 Duration of attack (little endian encoding, in ms)
04 Level at end of attack. Signed byte.
05-06 Duration of fade.
diff --git a/Documentation/input/walkera0701.txt b/Documentation/input/walkera0701.txt
index 561385d38482..49e3ac60dcef 100644
--- a/Documentation/input/walkera0701.txt
+++ b/Documentation/input/walkera0701.txt
@@ -91,7 +91,7 @@ absolute binary value. (10 bits per channel). Next nibble is checksum for
first ten nibbles.
Next nibbles 12 .. 21 represents four channels (not all channels can be
-directly controlled from TX). Binary representations ar the same as in first
+directly controlled from TX). Binary representations are the same as in first
four channels. In nibbles 22 and 23 is a special magic number. Nibble 24 is
checksum for nibbles 12..23.
diff --git a/Documentation/input/yealink.txt b/Documentation/input/yealink.txt
index 5360e434486c..8277b76ec506 100644
--- a/Documentation/input/yealink.txt
+++ b/Documentation/input/yealink.txt
@@ -93,7 +93,7 @@ Format description:
Format specifier
'8' : Generic 7 segment digit with individual addressable segments
- Reduced capability 7 segm digit, when segments are hard wired together.
+ Reduced capability 7 segment digit, when segments are hard wired together.
'1' : 2 segments digit only able to produce a 1.
'e' : Most significant day of the month digit,
able to produce at least 1 2 3.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 8136e1fd30fd..51f4221657bf 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -321,6 +321,7 @@ Code Seq#(hex) Include File Comments
0xDB 00-0F drivers/char/mwave/mwavepub.h
0xDD 00-3F ZFCP device driver see drivers/s390/scsi/
<mailto:aherrman@de.ibm.com>
+0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver
0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development)
<mailto:thomas@winischhofer.net>
0xF4 00-1F video/mbxfb.h mbxfb
diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt
index 092fc10961fe..4692241789b1 100644
--- a/Documentation/kasan.txt
+++ b/Documentation/kasan.txt
@@ -9,7 +9,9 @@ a fast and comprehensive solution for finding use-after-free and out-of-bounds
bugs.
KASan uses compile-time instrumentation for checking every memory access,
-therefore you will need a certain version of GCC > 4.9.2
+therefore you will need a gcc version of 4.9.2 or later. KASan could detect out
+of bounds accesses to stack or global variables, but only if gcc 5.0 or later was
+used to built the kernel.
Currently KASan is supported only for x86_64 architecture and requires that the
kernel be built with the SLUB allocator.
@@ -23,8 +25,8 @@ To enable KASAN configure kernel with:
and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline
is compiler instrumentation types. The former produces smaller binary the
-latter is 1.1 - 2 times faster. Inline instrumentation requires GCC 5.0 or
-latter.
+latter is 1.1 - 2 times faster. Inline instrumentation requires a gcc version
+of 5.0 or later.
Currently KASAN works only with the SLUB memory allocator.
For better bug detection and nicer report, enable CONFIG_STACKTRACE and put
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 01aa47d3b6ab..61ab1628a057 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -165,7 +165,7 @@ multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
bytes respectively. Such letter suffixes can also be entirely omitted.
- acpi= [HW,ACPI,X86]
+ acpi= [HW,ACPI,X86,ARM64]
Advanced Configuration and Power Interface
Format: { force | off | strict | noirq | rsdt }
force -- enable ACPI if default was off
@@ -175,6 +175,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
strictly ACPI specification compliant.
rsdt -- prefer RSDT over (default) XSDT
copy_dsdt -- copy DSDT to memory
+ For ARM64, ONLY "acpi=off" or "acpi=force" are available
See also Documentation/power/runtime_pm.txt, pci=noacpi
@@ -713,10 +714,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
uart[8250],io,<addr>[,options]
uart[8250],mmio,<addr>[,options]
+ uart[8250],mmio32,<addr>[,options]
+ uart[8250],0x<addr>[,options]
Start an early, polled-mode console on the 8250/16550
UART at the specified I/O port or MMIO address,
- switching to the matching ttyS device later. The
- options are the same as for ttyS, above.
+ switching to the matching ttyS device later.
+ MMIO inter-register address stride is either 8-bit
+ (mmio) or 32-bit (mmio32).
+ If none of [io|mmio|mmio32], <addr> is assumed to be
+ equivalent to 'mmio'. 'options' are specified in the
+ same format described for ttyS above; if unspecified,
+ the h/w is not re-initialized.
+
hvc<n> Use the hypervisor console device <n>. This is for
both Xen and PowerPC hypervisors.
@@ -928,6 +937,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Enable debug messages at boot time. See
Documentation/dynamic-debug-howto.txt for details.
+ eagerfpu= [X86]
+ on enable eager fpu restore
+ off disable eager fpu restore
+ auto selects the default scheme, which automatically
+ enables eagerfpu restore for xsaveopt.
+
early_ioremap_debug [KNL]
Enable debug messages in early_ioremap support. This
is useful for tracking down temporary early mappings
@@ -944,11 +959,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
uart[8250],io,<addr>[,options]
uart[8250],mmio,<addr>[,options]
uart[8250],mmio32,<addr>[,options]
+ uart[8250],0x<addr>[,options]
Start an early, polled-mode console on the 8250/16550
UART at the specified I/O port or MMIO address.
MMIO inter-register address stride is either 8-bit
(mmio) or 32-bit (mmio32).
- The options are the same as for ttyS, above.
+ If none of [io|mmio|mmio32], <addr> is assumed to be
+ equivalent to 'mmio'. 'options' are specified in the
+ same format described for "console=ttyS<n>"; if
+ unspecified, the h/w is not initialized.
pl011,<addr>
Start an early, polled-mode console on a pl011 serial
@@ -1966,6 +1985,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
or
memmap=0x10000$0x18690000
+ memmap=nn[KMG]!ss[KMG]
+ [KNL,X86] Mark specific memory as protected.
+ Region of memory to be used, from ss to ss+nn.
+ The memory region may be marked as e820 type 12 (0xc)
+ and is NVDIMM or ADR memory.
+
memory_corruption_check=0/1 [X86]
Some BIOSes seem to corrupt the first 64k of
memory when doing things like suspend/resume.
@@ -1989,7 +2014,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
seconds. Use this parameter to check at some
other rate. 0 disables periodic checking.
- memtest= [KNL,X86] Enable memtest
+ memtest= [KNL,X86,ARM] Enable memtest
Format: <integer>
default : 0 <disable>
Specifies the number of memtest passes to be
@@ -2236,8 +2261,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nmi_watchdog= [KNL,BUGS=X86] Debugging features for SMP kernels
Format: [panic,][nopanic,][num]
- Valid num: 0
+ Valid num: 0 or 1
0 - turn nmi_watchdog off
+ 1 - turn nmi_watchdog on
When panic is specified, panic when an NMI watchdog
timeout occurs (or 'nopanic' to override the opposite
default).
@@ -2316,12 +2342,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noexec32=off: disable non-executable mappings
read implies executable mappings
- nofpu [SH] Disable hardware FPU at boot time.
+ nofpu [MIPS,SH] Disable hardware FPU at boot time.
nofxsr [BUGS=X86-32] Disables x86 floating point extended
register save and restore. The kernel will only save
legacy floating-point registers on task switch.
+ nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
+
noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state.
@@ -2341,12 +2369,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
parameter, xsave area per process might occupy more
memory on xsaves enabled systems.
- eagerfpu= [X86]
- on enable eager fpu restore
- off disable eager fpu restore
- auto selects the default scheme, which automatically
- enables eagerfpu restore for xsaveopt.
-
nohlt [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or
wfi(ARM) instruction doesn't work correctly and not to
use it. This is also useful when using JTAG debugger.
@@ -2464,7 +2486,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nousb [USB] Disable the USB subsystem
- nowatchdog [KNL] Disable the lockup detector (NMI watchdog).
+ nowatchdog [KNL] Disable both lockup detectors, i.e.
+ soft-lockup and NMI watchdog (hard-lockup).
nowb [ARM]
@@ -2969,6 +2992,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Set maximum number of finished RCU callbacks to
process in one batch.
+ rcutree.gp_init_delay= [KNL]
+ Set the number of jiffies to delay each step of
+ RCU grace-period initialization. This only has
+ effect when CONFIG_RCU_TORTURE_TEST_SLOW_INIT is
+ set.
+
rcutree.rcu_fanout_leaf= [KNL]
Increase the number of CPUs assigned to each
leaf rcu_node structure. Useful for very large
@@ -2992,11 +3021,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
value is one, and maximum value is HZ.
rcutree.kthread_prio= [KNL,BOOT]
- Set the SCHED_FIFO priority of the RCU
- per-CPU kthreads (rcuc/N). This value is also
- used for the priority of the RCU boost threads
- (rcub/N). Valid values are 1-99 and the default
- is 1 (the least-favored priority).
+ Set the SCHED_FIFO priority of the RCU per-CPU
+ kthreads (rcuc/N). This value is also used for
+ the priority of the RCU boost threads (rcub/N)
+ and for the RCU grace-period kthreads (rcu_bh,
+ rcu_preempt, and rcu_sched). If RCU_BOOST is
+ set, valid values are 1-99 and the default is 1
+ (the least-favored priority). Otherwise, when
+ RCU_BOOST is not set, valid values are 0-99 and
+ the default is zero (non-realtime operation).
rcutree.rcu_nocb_leader_stride= [KNL]
Set the number of NOCB kthread groups, which
@@ -3463,6 +3496,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
improve throughput, but will also increase the
amount of memory reserved for use by the client.
+ suspend.pm_test_delay=
+ [SUSPEND]
+ Sets the number of seconds to remain in a suspend test
+ mode before resuming the system (see
+ /sys/power/pm_test). Only available when CONFIG_PM_DEBUG
+ is set. Default value is 5.
+
swapaccount=[0|1]
[KNL] Enable accounting of swap in memory resource
controller if no parameter or 1 is given or disable
@@ -3747,6 +3787,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
READ_CAPACITY_16 command);
f = NO_REPORT_OPCODES (don't use report opcodes
command, uas only);
+ g = MAX_SECTORS_240 (don't transfer more than
+ 240 sectors at a time, uas only);
h = CAPACITY_HEURISTICS (decrease the
reported device capacity by one
sector if the number is odd);
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
index f3cd299fcc41..f4cbfe0ba108 100644
--- a/Documentation/kernel-per-CPU-kthreads.txt
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -190,20 +190,24 @@ To reduce its OS jitter, do any of the following:
on each CPU, including cs_dbs_timer() and od_dbs_timer().
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
- d. It is not possible to entirely get rid of OS jitter
- from vmstat_update() on CONFIG_SMP=y systems, but you
- can decrease its frequency by writing a large value
- to /proc/sys/vm/stat_interval. The default value is
- HZ, for an interval of one second. Of course, larger
- values will make your virtual-memory statistics update
- more slowly. Of course, you can also run your workload
- at a real-time priority, thus preempting vmstat_update(),
+ d. As of v3.18, Christoph Lameter's on-demand vmstat workers
+ commit prevents OS jitter due to vmstat_update() on
+ CONFIG_SMP=y systems. Before v3.18, is not possible
+ to entirely get rid of the OS jitter, but you can
+ decrease its frequency by writing a large value to
+ /proc/sys/vm/stat_interval. The default value is HZ,
+ for an interval of one second. Of course, larger values
+ will make your virtual-memory statistics update more
+ slowly. Of course, you can also run your workload at
+ a real-time priority, thus preempting vmstat_update(),
but if your workload is CPU-bound, this is a bad idea.
However, there is an RFC patch from Christoph Lameter
(based on an earlier one from Gilad Ben-Yossef) that
reduces or even eliminates vmstat overhead for some
workloads at https://lkml.org/lkml/2013/9/4/379.
- e. If running on high-end powerpc servers, build with
+ e. Boot with "elevator=noop" to avoid workqueue use by
+ the block layer.
+ f. If running on high-end powerpc servers, build with
CONFIG_PPC_RTAS_DAEMON=n. This prevents the RTAS
daemon from running on each CPU every second or so.
(This will require editing Kconfig files and will defeat
@@ -211,12 +215,12 @@ To reduce its OS jitter, do any of the following:
due to the rtas_event_scan() function.
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
- f. If running on Cell Processor, build your kernel with
+ g. If running on Cell Processor, build your kernel with
CBE_CPUFREQ_SPU_GOVERNOR=n to avoid OS jitter from
spu_gov_work().
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
- g. If running on PowerMAC, build your kernel with
+ h. If running on PowerMAC, build your kernel with
CONFIG_PMAC_RACKMETER=n to disable the CPU-meter,
avoiding OS jitter from rackmeter_do_timer().
@@ -258,8 +262,12 @@ Purpose: Detect software lockups on each CPU.
To reduce its OS jitter, do at least one of the following:
1. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
kthreads from being created in the first place.
-2. Echo a zero to /proc/sys/kernel/watchdog to disable the
+2. Boot with "nosoftlockup=0", which will also prevent these kthreads
+ from being created. Other related watchdog and softlockup boot
+ parameters may be found in Documentation/kernel-parameters.txt
+ and Documentation/watchdog/watchdog-parameters.txt.
+3. Echo a zero to /proc/sys/kernel/watchdog to disable the
watchdog timer.
-3. Echo a large number of /proc/sys/kernel/watchdog_thresh in
+4. Echo a large number of /proc/sys/kernel/watchdog_thresh in
order to reduce the frequency of OS jitter due to the watchdog
timer down to a level that is acceptable for your workload.
diff --git a/Documentation/kmemcheck.txt b/Documentation/kmemcheck.txt
index a41bdebbe87b..80aae85d8da6 100644
--- a/Documentation/kmemcheck.txt
+++ b/Documentation/kmemcheck.txt
@@ -82,8 +82,8 @@ menu to even appear in "menuconfig". These are:
o CONFIG_DEBUG_PAGEALLOC=n
- This option is located under "Kernel hacking" / "Debug page memory
- allocations".
+ This option is located under "Kernel hacking" / "Memory Debugging"
+ / "Debug page memory allocations".
In addition, I highly recommend turning on CONFIG_DEBUG_INFO=y. This is also
located under "Kernel hacking". With this, you will be able to get line number
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 1488b6525eb6..1f9b3e2b98ae 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -305,8 +305,8 @@ architectures:
3. Configuring Kprobes
When configuring the kernel using make menuconfig/xconfig/oldconfig,
-ensure that CONFIG_KPROBES is set to "y". Under "Instrumentation
-Support", look for "Kprobes".
+ensure that CONFIG_KPROBES is set to "y". Under "General setup", look
+for "Kprobes".
So that you can load and unload Kprobes-based instrumentation modules,
make sure "Loadable module support" (CONFIG_MODULES) and "Module
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index fc04c14de4bb..72a150d8f3df 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -1355,6 +1355,24 @@ Sysfs notes:
rfkill controller switch "tpacpi_uwb_sw": refer to
Documentation/rfkill.txt for details.
+Adaptive keyboard
+-----------------
+
+sysfs device attribute: adaptive_kbd_mode
+
+This sysfs attribute controls the keyboard "face" that will be shown on the
+Lenovo X1 Carbon 2nd gen (2014)'s adaptive keyboard. The value can be read
+and set.
+
+1 = Home mode
+2 = Web-browser mode
+3 = Web-conference mode
+4 = Function mode
+5 = Layflat mode
+
+For more details about which buttons will appear depending on the mode, please
+review the laptop's user guide:
+http://www.lenovo.com/shop/americas/content/user_guides/x1carbon_2_ug_en.pdf
Multiple Commands, Module Parameters
------------------------------------
diff --git a/Documentation/leds/leds-class-flash.txt b/Documentation/leds/leds-class-flash.txt
new file mode 100644
index 000000000000..19bb67355424
--- /dev/null
+++ b/Documentation/leds/leds-class-flash.txt
@@ -0,0 +1,22 @@
+
+Flash LED handling under Linux
+==============================
+
+Some LED devices provide two modes - torch and flash. In the LED subsystem
+those modes are supported by LED class (see Documentation/leds/leds-class.txt)
+and LED Flash class respectively. The torch mode related features are enabled
+by default and the flash ones only if a driver declares it by setting
+LED_DEV_CAP_FLASH flag.
+
+In order to enable the support for flash LEDs CONFIG_LEDS_CLASS_FLASH symbol
+must be defined in the kernel config. A LED Flash class driver must be
+registered in the LED subsystem with led_classdev_flash_register function.
+
+Following sysfs attributes are exposed for controlling flash LED devices:
+(see Documentation/ABI/testing/sysfs-class-led-flash)
+ - flash_brightness
+ - max_flash_brightness
+ - flash_timeout
+ - max_flash_timeout
+ - flash_strobe
+ - flash_fault
diff --git a/Documentation/md-cluster.txt b/Documentation/md-cluster.txt
new file mode 100644
index 000000000000..de1af7db3355
--- /dev/null
+++ b/Documentation/md-cluster.txt
@@ -0,0 +1,176 @@
+The cluster MD is a shared-device RAID for a cluster.
+
+
+1. On-disk format
+
+Separate write-intent-bitmap are used for each cluster node.
+The bitmaps record all writes that may have been started on that node,
+and may not yet have finished. The on-disk layout is:
+
+0 4k 8k 12k
+-------------------------------------------------------------------
+| idle | md super | bm super [0] + bits |
+| bm bits[0, contd] | bm super[1] + bits | bm bits[1, contd] |
+| bm super[2] + bits | bm bits [2, contd] | bm super[3] + bits |
+| bm bits [3, contd] | | |
+
+During "normal" functioning we assume the filesystem ensures that only one
+node writes to any given block at a time, so a write
+request will
+ - set the appropriate bit (if not already set)
+ - commit the write to all mirrors
+ - schedule the bit to be cleared after a timeout.
+
+Reads are just handled normally. It is up to the filesystem to
+ensure one node doesn't read from a location where another node (or the same
+node) is writing.
+
+
+2. DLM Locks for management
+
+There are two locks for managing the device:
+
+2.1 Bitmap lock resource (bm_lockres)
+
+ The bm_lockres protects individual node bitmaps. They are named in the
+ form bitmap001 for node 1, bitmap002 for node and so on. When a node
+ joins the cluster, it acquires the lock in PW mode and it stays so
+ during the lifetime the node is part of the cluster. The lock resource
+ number is based on the slot number returned by the DLM subsystem. Since
+ DLM starts node count from one and bitmap slots start from zero, one is
+ subtracted from the DLM slot number to arrive at the bitmap slot number.
+
+3. Communication
+
+Each node has to communicate with other nodes when starting or ending
+resync, and metadata superblock updates.
+
+3.1 Message Types
+
+ There are 3 types, of messages which are passed
+
+ 3.1.1 METADATA_UPDATED: informs other nodes that the metadata has been
+ updated, and the node must re-read the md superblock. This is performed
+ synchronously.
+
+ 3.1.2 RESYNC: informs other nodes that a resync is initiated or ended
+ so that each node may suspend or resume the region.
+
+3.2 Communication mechanism
+
+ The DLM LVB is used to communicate within nodes of the cluster. There
+ are three resources used for the purpose:
+
+ 3.2.1 Token: The resource which protects the entire communication
+ system. The node having the token resource is allowed to
+ communicate.
+
+ 3.2.2 Message: The lock resource which carries the data to
+ communicate.
+
+ 3.2.3 Ack: The resource, acquiring which means the message has been
+ acknowledged by all nodes in the cluster. The BAST of the resource
+ is used to inform the receive node that a node wants to communicate.
+
+The algorithm is:
+
+ 1. receive status
+
+ sender receiver receiver
+ ACK:CR ACK:CR ACK:CR
+
+ 2. sender get EX of TOKEN
+ sender get EX of MESSAGE
+ sender receiver receiver
+ TOKEN:EX ACK:CR ACK:CR
+ MESSAGE:EX
+ ACK:CR
+
+ Sender checks that it still needs to send a message. Messages received
+ or other events that happened while waiting for the TOKEN may have made
+ this message inappropriate or redundant.
+
+ 3. sender write LVB.
+ sender down-convert MESSAGE from EX to CR
+ sender try to get EX of ACK
+ [ wait until all receiver has *processed* the MESSAGE ]
+
+ [ triggered by bast of ACK ]
+ receiver get CR of MESSAGE
+ receiver read LVB
+ receiver processes the message
+ [ wait finish ]
+ receiver release ACK
+
+ sender receiver receiver
+ TOKEN:EX MESSAGE:CR MESSAGE:CR
+ MESSAGE:CR
+ ACK:EX
+
+ 4. triggered by grant of EX on ACK (indicating all receivers have processed
+ message)
+ sender down-convert ACK from EX to CR
+ sender release MESSAGE
+ sender release TOKEN
+ receiver upconvert to EX of MESSAGE
+ receiver get CR of ACK
+ receiver release MESSAGE
+
+ sender receiver receiver
+ ACK:CR ACK:CR ACK:CR
+
+
+4. Handling Failures
+
+4.1 Node Failure
+ When a node fails, the DLM informs the cluster with the slot. The node
+ starts a cluster recovery thread. The cluster recovery thread:
+ - acquires the bitmap<number> lock of the failed node
+ - opens the bitmap
+ - reads the bitmap of the failed node
+ - copies the set bitmap to local node
+ - cleans the bitmap of the failed node
+ - releases bitmap<number> lock of the failed node
+ - initiates resync of the bitmap on the current node
+
+ The resync process, is the regular md resync. However, in a clustered
+ environment when a resync is performed, it needs to tell other nodes
+ of the areas which are suspended. Before a resync starts, the node
+ send out RESYNC_START with the (lo,hi) range of the area which needs
+ to be suspended. Each node maintains a suspend_list, which contains
+ the list of ranges which are currently suspended. On receiving
+ RESYNC_START, the node adds the range to the suspend_list. Similarly,
+ when the node performing resync finishes, it send RESYNC_FINISHED
+ to other nodes and other nodes remove the corresponding entry from
+ the suspend_list.
+
+ A helper function, should_suspend() can be used to check if a particular
+ I/O range should be suspended or not.
+
+4.2 Device Failure
+ Device failures are handled and communicated with the metadata update
+ routine.
+
+5. Adding a new Device
+For adding a new device, it is necessary that all nodes "see" the new device
+to be added. For this, the following algorithm is used:
+
+ 1. Node 1 issues mdadm --manage /dev/mdX --add /dev/sdYY which issues
+ ioctl(ADD_NEW_DISC with disc.state set to MD_DISK_CLUSTER_ADD)
+ 2. Node 1 sends NEWDISK with uuid and slot number
+ 3. Other nodes issue kobject_uevent_env with uuid and slot number
+ (Steps 4,5 could be a udev rule)
+ 4. In userspace, the node searches for the disk, perhaps
+ using blkid -t SUB_UUID=""
+ 5. Other nodes issue either of the following depending on whether the disk
+ was found:
+ ioctl(ADD_NEW_DISK with disc.state set to MD_DISK_CANDIDATE and
+ disc.number set to slot number)
+ ioctl(CLUSTERED_DISK_NACK)
+ 6. Other nodes drop lock on no-new-devs (CR) if device is found
+ 7. Node 1 attempts EX lock on no-new-devs
+ 8. If node 1 gets the lock, it sends METADATA_UPDATED after unmarking the disk
+ as SpareLocal
+ 9. If not (get no-new-dev lock), it fails the operation and sends METADATA_UPDATED
+ 10. Other nodes get the information whether a disk is added or not
+ by the following METADATA_UPDATED.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index ca2387ef27ab..f95746189b5d 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -592,9 +592,9 @@ See also the subsection on "Cache Coherency" for a more thorough example.
CONTROL DEPENDENCIES
--------------------
-A control dependency requires a full read memory barrier, not simply a data
-dependency barrier to make it work correctly. Consider the following bit of
-code:
+A load-load control dependency requires a full read memory barrier, not
+simply a data dependency barrier to make it work correctly. Consider the
+following bit of code:
q = ACCESS_ONCE(a);
if (q) {
@@ -615,14 +615,15 @@ case what's actually required is:
}
However, stores are not speculated. This means that ordering -is- provided
-in the following example:
+for load-store control dependencies, as in the following example:
q = ACCESS_ONCE(a);
if (q) {
ACCESS_ONCE(b) = p;
}
-Please note that ACCESS_ONCE() is not optional! Without the
+Control dependencies pair normally with other types of barriers.
+That said, please note that ACCESS_ONCE() is not optional! Without the
ACCESS_ONCE(), might combine the load from 'a' with other loads from
'a', and the store to 'b' with other stores to 'b', with possible highly
counterintuitive effects on ordering.
@@ -813,6 +814,8 @@ In summary:
barrier() can help to preserve your control dependency. Please
see the Compiler Barrier section for more information.
+ (*) Control dependencies pair normally with other types of barriers.
+
(*) Control dependencies do -not- provide transitivity. If you
need transitivity, use smp_mb().
@@ -823,14 +826,14 @@ SMP BARRIER PAIRING
When dealing with CPU-CPU interactions, certain types of memory barrier should
always be paired. A lack of appropriate pairing is almost certainly an error.
-General barriers pair with each other, though they also pair with
-most other types of barriers, albeit without transitivity. An acquire
-barrier pairs with a release barrier, but both may also pair with other
-barriers, including of course general barriers. A write barrier pairs
-with a data dependency barrier, an acquire barrier, a release barrier,
-a read barrier, or a general barrier. Similarly a read barrier or a
-data dependency barrier pairs with a write barrier, an acquire barrier,
-a release barrier, or a general barrier:
+General barriers pair with each other, though they also pair with most
+other types of barriers, albeit without transitivity. An acquire barrier
+pairs with a release barrier, but both may also pair with other barriers,
+including of course general barriers. A write barrier pairs with a data
+dependency barrier, a control dependency, an acquire barrier, a release
+barrier, a read barrier, or a general barrier. Similarly a read barrier,
+control dependency, or a data dependency barrier pairs with a write
+barrier, an acquire barrier, a release barrier, or a general barrier:
CPU 1 CPU 2
=============== ===============
@@ -850,6 +853,19 @@ Or:
<data dependency barrier>
y = *x;
+Or even:
+
+ CPU 1 CPU 2
+ =============== ===============================
+ r1 = ACCESS_ONCE(y);
+ <general barrier>
+ ACCESS_ONCE(y) = 1; if (r2 = ACCESS_ONCE(x)) {
+ <implicit control dependency>
+ ACCESS_ONCE(y) = 1;
+ }
+
+ assert(r1 == 0 || r2 == 0);
+
Basically, the read barrier always has to be there, even though it can be of
the "weaker" type.
@@ -1711,7 +1727,7 @@ There are some more advanced barrier functions:
}
The dma_rmb() allows us guarantee the device has released ownership
- before we read the data from the descriptor, and he dma_wmb() allows
+ before we read the data from the descriptor, and the dma_wmb() allows
us to guarantee the data is written to the descriptor before the device
can see it now has ownership. The wmb() is needed to guarantee that the
cache coherent memory writes have completed before attempting a write to
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index ea03abfc97e9..ce2cfcf35c27 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -149,7 +149,7 @@ For example, assume 1GiB memory block size. A device for a memory starting at
(0x100000000 / 1Gib = 4)
This device covers address range [0x100000000 ... 0x140000000)
-Under each memory block, you can see 4 files:
+Under each memory block, you can see 5 files:
/sys/devices/system/memory/memoryXXX/phys_index
/sys/devices/system/memory/memoryXXX/phys_device
@@ -359,38 +359,51 @@ Need more implementation yet....
--------------------------------
8. Memory hotplug event notifier
--------------------------------
-Memory hotplug has event notifier. There are 6 types of notification.
+Hotplugging events are sent to a notification queue.
-MEMORY_GOING_ONLINE
+There are six types of notification defined in include/linux/memory.h:
+
+MEM_GOING_ONLINE
Generated before new memory becomes available in order to be able to
prepare subsystems to handle memory. The page allocator is still unable
to allocate from the new memory.
-MEMORY_CANCEL_ONLINE
+MEM_CANCEL_ONLINE
Generated if MEMORY_GOING_ONLINE fails.
-MEMORY_ONLINE
+MEM_ONLINE
Generated when memory has successfully brought online. The callback may
allocate pages from the new memory.
-MEMORY_GOING_OFFLINE
+MEM_GOING_OFFLINE
Generated to begin the process of offlining memory. Allocations are no
longer possible from the memory but some of the memory to be offlined
is still in use. The callback can be used to free memory known to a
subsystem from the indicated memory block.
-MEMORY_CANCEL_OFFLINE
+MEM_CANCEL_OFFLINE
Generated if MEMORY_GOING_OFFLINE fails. Memory is available again from
the memory block that we attempted to offline.
-MEMORY_OFFLINE
+MEM_OFFLINE
Generated after offlining memory is complete.
-A callback routine can be registered by
+A callback routine can be registered by calling
+
hotplug_memory_notifier(callback_func, priority)
-The second argument of callback function (action) is event types of above.
-The third argument is passed by pointer of struct memory_notify.
+Callback functions with higher values of priority are called before callback
+functions with lower values.
+
+A callback function must have the following prototype:
+
+ int callback_func(
+ struct notifier_block *self, unsigned long action, void *arg);
+
+The first argument of the callback function (self) is a pointer to the block
+of the notifier chain that points to the callback function itself.
+The second argument (action) is one of the event types described above.
+The third argument (arg) passes a pointer of struct memory_notify.
struct memory_notify {
unsigned long start_pfn;
@@ -412,6 +425,18 @@ node loses all memory. If this is -1, then nodemask status is not changed.
If status_changed_nid* >= 0, callback should create/discard structures for the
node if necessary.
+The callback routine shall return one of the values
+NOTIFY_DONE, NOTIFY_OK, NOTIFY_BAD, NOTIFY_STOP
+defined in include/linux/notifier.h
+
+NOTIFY_DONE and NOTIFY_OK have no effect on the further processing.
+
+NOTIFY_BAD is used as response to the MEM_GOING_ONLINE, MEM_GOING_OFFLINE,
+MEM_ONLINE, or MEM_OFFLINE action to cancel hotplugging. It stops
+further processing of the notification queue.
+
+NOTIFY_STOP stops further processing of the notification queue.
+
--------------
9. Future Work
--------------
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
index 09c2382ad055..c72702ec1ded 100644
--- a/Documentation/module-signing.txt
+++ b/Documentation/module-signing.txt
@@ -119,9 +119,9 @@ Most notably, in the x509.genkey file, the req_distinguished_name section
should be altered from the default:
[ req_distinguished_name ]
- O = Magrathea
- CN = Glacier signing key
- emailAddress = slartibartfast@magrathea.h2g2
+ #O = Unspecified company
+ CN = Build time autogenerated kernel key
+ #emailAddress = unspecified.user@unspecified.company
The generated RSA key size can also be set with:
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 0a2859a8ee7e..5abad1e921ca 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -22,7 +22,8 @@ This file contains
4.1.3 RAW socket option CAN_RAW_LOOPBACK
4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
4.1.5 RAW socket option CAN_RAW_FD_FRAMES
- 4.1.6 RAW socket returned message flags
+ 4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
+ 4.1.7 RAW socket returned message flags
4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
4.2.1 Broadcast Manager operations
4.2.2 Broadcast Manager message flags
@@ -601,7 +602,22 @@ solution for a couple of reasons:
CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
- 4.1.6 RAW socket returned message flags
+ 4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
+
+ The CAN_RAW socket can set multiple CAN identifier specific filters that
+ lead to multiple filters in the af_can.c filter processing. These filters
+ are indenpendent from each other which leads to logical OR'ed filters when
+ applied (see 4.1.1).
+
+ This socket option joines the given CAN filters in the way that only CAN
+ frames are passed to user space that matched *all* given CAN filters. The
+ semantic for the applied filters is therefore changed to a logical AND.
+
+ This is useful especially when the filterset is a combination of filters
+ where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
+ CAN ID ranges from the incoming traffic.
+
+ 4.1.7 RAW socket returned message flags
When using recvmsg() call, the msg->msg_flags may contain following flags:
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 9930ecfbb465..135581f015e1 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -280,7 +280,8 @@ Possible BPF extensions are shown in the following table:
rxhash skb->hash
cpu raw_smp_processor_id()
vlan_tci skb_vlan_tag_get(skb)
- vlan_pr skb_vlan_tag_present(skb)
+ vlan_avail skb_vlan_tag_present(skb)
+ vlan_tpid skb->vlan_proto
rand prandom_u32()
These extensions can also be prefixed with '#'.
diff --git a/Documentation/networking/igb.txt b/Documentation/networking/igb.txt
index 43d3549366a0..15534fdd09a8 100644
--- a/Documentation/networking/igb.txt
+++ b/Documentation/networking/igb.txt
@@ -42,10 +42,10 @@ Additional Configurations
Jumbo Frames
------------
Jumbo Frames support is enabled by changing the MTU to a value larger than
- the default of 1500. Use the ifconfig command to increase the MTU size.
+ the default of 1500. Use the ip command to increase the MTU size.
For example:
- ifconfig eth<x> mtu 9000 up
+ ip link set dev eth<x> mtu 9000
This setting is not saved across reboots.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 1b8c964b0d17..071fb18dc57c 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -388,6 +388,16 @@ tcp_mtu_probing - INTEGER
1 - Disabled by default, enabled when an ICMP black hole detected
2 - Always enabled, use initial MSS of tcp_base_mss.
+tcp_probe_interval - INTEGER
+ Controls how often to start TCP Packetization-Layer Path MTU
+ Discovery reprobe. The default is reprobing every 10 minutes as
+ per RFC4821.
+
+tcp_probe_threshold - INTEGER
+ Controls when TCP Packetization-Layer Path MTU Discovery probing
+ will stop in respect to the width of search range in bytes. Default
+ is 8 bytes.
+
tcp_no_metrics_save - BOOLEAN
By default, TCP saves various connection metrics in the route cache
when the connection closes, so that connections established in the
@@ -1116,11 +1126,23 @@ arp_accept - BOOLEAN
gratuitous arp frame, the arp table will be updated regardless
if this setting is on or off.
+mcast_solicit - INTEGER
+ The maximum number of multicast probes in INCOMPLETE state,
+ when the associated hardware address is unknown. Defaults
+ to 3.
+
+ucast_solicit - INTEGER
+ The maximum number of unicast probes in PROBE state, when
+ the hardware address is being reconfirmed. Defaults to 3.
app_solicit - INTEGER
The maximum number of probes to send to the user space ARP daemon
via netlink before dropping back to multicast probes (see
- mcast_solicit). Defaults to 0.
+ mcast_resolicit). Defaults to 0.
+
+mcast_resolicit - INTEGER
+ The maximum number of multicast probes after unicast and
+ app probes in PROBE state. Defaults to 0.
disable_policy - BOOLEAN
Disable IPSEC policy (SPD) for this interface
@@ -1198,6 +1220,17 @@ anycast_src_echo_reply - BOOLEAN
FALSE: disabled
Default: FALSE
+idgen_delay - INTEGER
+ Controls the delay in seconds after which time to retry
+ privacy stable address generation if a DAD conflict is
+ detected.
+ Default: 1 (as specified in RFC7217)
+
+idgen_retries - INTEGER
+ Controls the number of retries to generate a stable privacy
+ address if a DAD conflict is detected.
+ Default: 3 (as specified in RFC7217)
+
mld_qrv - INTEGER
Controls the MLD query robustness variable (see RFC3810 9.1).
Default: 2 (as specified by RFC3810 9.1)
@@ -1518,6 +1551,20 @@ use_optimistic - BOOLEAN
0: disabled (default)
1: enabled
+stable_secret - IPv6 address
+ This IPv6 address will be used as a secret to generate IPv6
+ addresses for link-local addresses and autoconfigured
+ ones. All addresses generated after setting this secret will
+ be stable privacy ones by default. This can be changed via the
+ addrgenmode ip-link. conf/default/stable_secret is used as the
+ secret for the namespace, the interface specific ones can
+ overwrite that. Writes to conf/all/stable_secret are refused.
+
+ It is recommended to generate this secret during installation
+ of a system and keep it stable after that.
+
+ By default the stable secret is unset.
+
icmp/*:
ratelimit - INTEGER
Limit the maximal rates for sending ICMPv6 packets.
diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
index 7a3c04729591..3ba709531adb 100644
--- a/Documentation/networking/ipvs-sysctl.txt
+++ b/Documentation/networking/ipvs-sysctl.txt
@@ -22,6 +22,27 @@ backup_only - BOOLEAN
If set, disable the director function while the server is
in backup mode to avoid packet loops for DR/TUN methods.
+conn_reuse_mode - INTEGER
+ 1 - default
+
+ Controls how ipvs will deal with connections that are detected
+ port reuse. It is a bitmap, with the values being:
+
+ 0: disable any special handling on port reuse. The new
+ connection will be delivered to the same real server that was
+ servicing the previous connection. This will effectively
+ disable expire_nodest_conn.
+
+ bit 1: enable rescheduling of new connections when it is safe.
+ That is, whenever expire_nodest_conn and for TCP sockets, when
+ the connection is in TIME_WAIT state (which is only possible if
+ you use NAT mode).
+
+ bit 2: it is bit 1 plus, for TCP connections, when connections
+ are in FIN_WAIT state, as this is the last state seen by load
+ balancer in Direct Routing mode. This bit helps on adding new
+ real servers to a very busy cluster.
+
conntrack - BOOLEAN
0 - disabled (default)
not 0 - enabled
diff --git a/Documentation/networking/ixgb.txt b/Documentation/networking/ixgb.txt
index 1e0c045e89f7..9b4a10a1cf50 100644
--- a/Documentation/networking/ixgb.txt
+++ b/Documentation/networking/ixgb.txt
@@ -39,7 +39,7 @@ Channel Bonding documentation can be found in the Linux kernel source:
The driver information previously displayed in the /proc filesystem is not
supported in this release. Alternatively, you can use ethtool (version 1.6
-or later), lspci, and ifconfig to obtain the same information.
+or later), lspci, and iproute2 to obtain the same information.
Instructions on updating ethtool can be found in the section "Additional
Configurations" later in this document.
@@ -90,7 +90,7 @@ select m for "Intel(R) PRO/10GbE support" located at:
3. Assign an IP address to the interface by entering the following, where
x is the interface number:
- ifconfig ethx <IP_address>
+ ip addr add ethx <IP_address>
4. Verify that the interface works. Enter the following, where <IP_address>
is the IP address for another machine on the same subnet as the interface
@@ -177,7 +177,7 @@ NOTE: These changes are only suggestions, and serve as a starting point for
tuning your network performance.
The changes are made in three major ways, listed in order of greatest effect:
-- Use ifconfig to modify the mtu (maximum transmission unit) and the txqueuelen
+- Use ip link to modify the mtu (maximum transmission unit) and the txqueuelen
parameter.
- Use sysctl to modify /proc parameters (essentially kernel tuning)
- Use setpci to modify the MMRBC field in PCI-X configuration space to increase
@@ -202,7 +202,7 @@ setpci -d 8086:1a48 e6.b=2e
# to change as well.
# set the txqueuelen
# your ixgb adapter should be loaded as eth1 for this to work, change if needed
-ifconfig eth1 mtu 9000 txqueuelen 1000 up
+ip li set dev eth1 mtu 9000 txqueuelen 1000 up
# call the sysctl utility to modify /proc/sys entries
sysctl -p ./sysctl_ixgb.conf
- END ixgb_perf.sh
@@ -297,10 +297,10 @@ Additional Configurations
------------
The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
enabled by changing the MTU to a value larger than the default of 1500.
- The maximum value for the MTU is 16114. Use the ifconfig command to
+ The maximum value for the MTU is 16114. Use the ip command to
increase the MTU size. For example:
- ifconfig ethx mtu 9000 up
+ ip li set dev ethx mtu 9000
The maximum MTU setting for Jumbo Frames is 16114. This value coincides
with the maximum Jumbo Frames size of 16128.
diff --git a/Documentation/networking/ixgbe.txt b/Documentation/networking/ixgbe.txt
index 0ace6e776ac8..6f0cb57b59c6 100644
--- a/Documentation/networking/ixgbe.txt
+++ b/Documentation/networking/ixgbe.txt
@@ -70,10 +70,10 @@ Avago 1000BASE-T SFP ABCU-5710RZ
82599-based adapters support all passive and active limiting direct attach
cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
-Laser turns off for SFP+ when ifconfig down
+Laser turns off for SFP+ when device is down
-------------------------------------------
-"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
-"ifconfig up" turns on the laser.
+"ip link set down" turns off the laser for 82599-based SFP+ fiber adapters.
+"ip link set up" turns on the laser.
82598-BASED ADAPTERS
@@ -213,13 +213,13 @@ Additional Configurations
------------
The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
enabled by changing the MTU to a value larger than the default of 1500.
- The maximum value for the MTU is 16110. Use the ifconfig command to
+ The maximum value for the MTU is 16110. Use the ip command to
increase the MTU size. For example:
- ifconfig ethx mtu 9000 up
+ ip link set dev ethx mtu 9000
- The maximum MTU setting for Jumbo Frames is 16110. This value coincides
- with the maximum Jumbo Frames size of 16128.
+ The maximum MTU setting for Jumbo Frames is 9710. This value coincides
+ with the maximum Jumbo Frames size of 9728.
Generic Receive Offload, aka GRO
--------------------------------
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt
new file mode 100644
index 000000000000..9ed15f86c17c
--- /dev/null
+++ b/Documentation/networking/mpls-sysctl.txt
@@ -0,0 +1,29 @@
+/proc/sys/net/mpls/* Variables:
+
+platform_labels - INTEGER
+ Number of entries in the platform label table. It is not
+ possible to configure forwarding for label values equal to or
+ greater than the number of platform labels.
+
+ A dense utliziation of the entries in the platform label table
+ is possible and expected aas the platform labels are locally
+ allocated.
+
+ If the number of platform label table entries is set to 0 no
+ label will be recognized by the kernel and mpls forwarding
+ will be disabled.
+
+ Reducing this value will remove all label routing entries that
+ no longer fit in the table.
+
+ Possible values: 0 - 1048575
+ Default: 0
+
+conf/<interface>/input - BOOL
+ Control whether packets can be input on this interface.
+
+ If disabled, packets will be discarded without further
+ processing.
+
+ 0 - disabled (default)
+ not 0 - enabled
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index a6d7cb91069e..daa015af16a0 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -440,9 +440,10 @@ and the following flags apply:
+++ Capture process:
from include/linux/if_packet.h
- #define TP_STATUS_COPY 2
- #define TP_STATUS_LOSING 4
- #define TP_STATUS_CSUMNOTREADY 8
+ #define TP_STATUS_COPY (1 << 1)
+ #define TP_STATUS_LOSING (1 << 2)
+ #define TP_STATUS_CSUMNOTREADY (1 << 3)
+ #define TP_STATUS_CSUM_VALID (1 << 7)
TP_STATUS_COPY : This flag indicates that the frame (and associated
meta information) has been truncated because it's
@@ -466,6 +467,12 @@ TP_STATUS_CSUMNOTREADY: currently it's used for outgoing IP packets which
reading the packet we should not try to check the
checksum.
+TP_STATUS_CSUM_VALID : This flag indicates that at least the transport
+ header checksum of the packet has been already
+ validated on the kernel side. If the flag is not set
+ then we are free to check the checksum by ourselves
+ provided that TP_STATUS_CSUMNOTREADY is also not set.
+
for convenience there are also the following defines:
#define TP_STATUS_KERNEL 0
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index 6915c6b27869..0344f1d45b37 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -3,13 +3,11 @@
HOWTO for the linux packet generator
------------------------------------
-Date: 041221
-
-Enable CONFIG_NET_PKTGEN to compile and build pktgen.o either in kernel
-or as module. Module is preferred. insmod pktgen if needed. Once running
-pktgen creates a thread on each CPU where each thread has affinity to its CPU.
-Monitoring and controlling is done via /proc. Easiest to select a suitable
-a sample script and configure.
+Enable CONFIG_NET_PKTGEN to compile and build pktgen either in-kernel
+or as a module. A module is preferred; modprobe pktgen if needed. Once
+running, pktgen creates a thread for each CPU with affinity to that CPU.
+Monitoring and controlling is done via /proc. It is easiest to select a
+suitable sample script and configure that.
On a dual CPU:
@@ -27,7 +25,7 @@ For monitoring and control pktgen creates:
Tuning NIC for max performance
==============================
-The default NIC setting are (likely) not tuned for pktgen's artificial
+The default NIC settings are (likely) not tuned for pktgen's artificial
overload type of benchmarking, as this could hurt the normal use-case.
Specifically increasing the TX ring buffer in the NIC:
@@ -35,20 +33,20 @@ Specifically increasing the TX ring buffer in the NIC:
A larger TX ring can improve pktgen's performance, while it can hurt
in the general case, 1) because the TX ring buffer might get larger
-than the CPUs L1/L2 cache, 2) because it allow more queueing in the
+than the CPU's L1/L2 cache, 2) because it allows more queueing in the
NIC HW layer (which is bad for bufferbloat).
-One should be careful to conclude, that packets/descriptors in the HW
+One should hesitate to conclude that packets/descriptors in the HW
TX ring cause delay. Drivers usually delay cleaning up the
-ring-buffers (for various performance reasons), thus packets stalling
-the TX ring, might just be waiting for cleanup.
+ring-buffers for various performance reasons, and packets stalling
+the TX ring might just be waiting for cleanup.
-This cleanup issues is specifically the case, for the driver ixgbe
-(Intel 82599 chip). This driver (ixgbe) combine TX+RX ring cleanups,
+This cleanup issue is specifically the case for the driver ixgbe
+(Intel 82599 chip). This driver (ixgbe) combines TX+RX ring cleanups,
and the cleanup interval is affected by the ethtool --coalesce setting
of parameter "rx-usecs".
-For ixgbe use e.g "30" resulting in approx 33K interrupts/sec (1/30*10^6):
+For ixgbe use e.g. "30" resulting in approx 33K interrupts/sec (1/30*10^6):
# ethtool -C ethX rx-usecs 30
@@ -60,15 +58,16 @@ Running:
Stopped: eth1
Result: OK: max_before_softirq=10000
-Most important the devices assigned to thread. Note! A device can only belong
-to one thread.
+Most important are the devices assigned to the thread. Note that a
+device can only belong to one thread.
Viewing devices
===============
-Parm section holds configured info. Current hold running stats.
-Result is printed after run or after interruption. Example:
+The Params section holds configured information. The Current section
+holds running statistics. The Result is printed after a run or after
+interruption. Example:
/proc/net/pktgen/eth1
@@ -93,7 +92,8 @@ Result: OK: 13101142(c12220741+d880401) usec, 10000000 (60byte,0frags)
Configuring threads and devices
================================
-This is done via the /proc interface easiest done via pgset in the scripts
+This is done via the /proc interface, and most easily done via pgset
+as defined in the sample scripts.
Examples:
@@ -192,10 +192,11 @@ Examples:
pgset "rate 300M" set rate to 300 Mb/s
pgset "ratep 1000000" set rate to 1Mpps
-Example scripts
-===============
+Sample scripts
+==============
-A collection of small tutorial scripts for pktgen is in examples dir.
+A collection of small tutorial scripts for pktgen is in the
+samples/pktgen directory:
pktgen.conf-1-1 # 1 CPU 1 dev
pktgen.conf-1-2 # 1 CPU 2 dev
@@ -206,25 +207,26 @@ pktgen.conf-1-1-ip6 # 1 CPU 1 dev ipv6
pktgen.conf-1-1-ip6-rdos # 1 CPU 1 dev ipv6 w. route DoS
pktgen.conf-1-1-flows # 1 CPU 1 dev multiple flows.
-Run in shell: ./pktgen.conf-X-Y It does all the setup including sending.
+Run in shell: ./pktgen.conf-X-Y
+This does all the setup including sending.
Interrupt affinity
===================
-Note when adding devices to a specific CPU there good idea to also assign
-/proc/irq/XX/smp_affinity so the TX-interrupts gets bound to the same CPU.
-as this reduces cache bouncing when freeing skb's.
+Note that when adding devices to a specific CPU it is a good idea to
+also assign /proc/irq/XX/smp_affinity so that the TX interrupts are bound
+to the same CPU. This reduces cache bouncing when freeing skbs.
Enable IPsec
============
-Default IPsec transformation with ESP encapsulation plus Transport mode
-could be enabled by simply setting:
+Default IPsec transformation with ESP encapsulation plus transport mode
+can be enabled by simply setting:
pgset "flag IPSEC"
pgset "flows 1"
To avoid breaking existing testbed scripts for using AH type and tunnel mode,
-user could use "pgset spi SPI_VALUE" to specify which formal of transformation
+you can use "pgset spi SPI_VALUE" to specify which transformation mode
to employ.
diff --git a/Documentation/networking/rds.txt b/Documentation/networking/rds.txt
index c67077cbeb80..e1a3d59bbe0f 100644
--- a/Documentation/networking/rds.txt
+++ b/Documentation/networking/rds.txt
@@ -62,11 +62,10 @@ Socket Interface
================
AF_RDS, PF_RDS, SOL_RDS
- These constants haven't been assigned yet, because RDS isn't in
- mainline yet. Currently, the kernel module assigns some constant
- and publishes it to user space through two sysctl files
- /proc/sys/net/rds/pf_rds
- /proc/sys/net/rds/sol_rds
+ AF_RDS and PF_RDS are the domain type to be used with socket(2)
+ to create RDS sockets. SOL_RDS is the socket-level to be used
+ with setsockopt(2) and getsockopt(2) for RDS specific socket
+ options.
fd = socket(PF_RDS, SOCK_SEQPACKET, 0);
This creates a new, unbound RDS socket.
diff --git a/Documentation/networking/s2io.txt b/Documentation/networking/s2io.txt
index d2a9f43b5546..0362a42f7cf4 100644
--- a/Documentation/networking/s2io.txt
+++ b/Documentation/networking/s2io.txt
@@ -38,7 +38,7 @@ The corresponding adapter's LED will blink multiple times.
3. Features supported:
a. Jumbo frames. Xframe I/II supports MTU up to 9600 bytes,
-modifiable using ifconfig command.
+modifiable using ip command.
b. Offloads. Supports checksum offload(TCP/UDP/IP) on transmit
and receive, TSO.
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 99ca40e8e810..59f4db2a0c85 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -282,7 +282,7 @@ following is true:
- The current CPU's queue head counter >= the recorded tail counter
value in rps_dev_flow[i]
-- The current CPU is unset (equal to RPS_NO_CPU)
+- The current CPU is unset (>= nr_cpu_ids)
- The current CPU is offline
After this check, the packet is sent to the (possibly updated) current
@@ -421,6 +421,15 @@ best CPUs to share a given queue are probably those that share the cache
with the CPU that processes transmit completions for that queue
(transmit interrupts).
+Per TX Queue rate limitation:
+=============================
+
+These are rate-limitation mechanisms implemented by HW, where currently
+a max-rate attribute is supported, by setting a Mbps value to
+
+/sys/class/net/<dev>/queues/tx-<n>/tx_maxrate
+
+A value of zero means disabled, and this is the default.
Further Information
===================
diff --git a/Documentation/networking/vxge.txt b/Documentation/networking/vxge.txt
index bb76c667a476..abfec245f97c 100644
--- a/Documentation/networking/vxge.txt
+++ b/Documentation/networking/vxge.txt
@@ -39,7 +39,7 @@ iii) PCI-SIG's I/O Virtualization
iv) Jumbo frames
X3100 Series supports MTU up to 9600 bytes, modifiable using
- ifconfig command.
+ ip command.
v) Offloads supported: (Enabled by default)
Checksum offload (TCP/UDP/IP) on transmit and receive paths
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index b8f2147b96dd..a9b47163bb5d 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -72,7 +72,6 @@ static struct pinctrl_desc foo_desc = {
.name = "foo",
.pins = foo_pins,
.npins = ARRAY_SIZE(foo_pins),
- .maxpin = 63,
.owner = THIS_MODULE,
};
@@ -164,8 +163,8 @@ static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
}
static int foo_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned ** const pins,
- unsigned * const num_pins)
+ const unsigned **pins,
+ unsigned *num_pins)
{
*pins = (unsigned *) foo_groups[selector].pins;
*num_pins = foo_groups[selector].num_pins;
@@ -570,9 +569,8 @@ is possible to perform the requested mux setting, poke the hardware so that
this happens.
Pinmux drivers are required to supply a few callback functions, some are
-optional. Usually the enable() and disable() functions are implemented,
-writing values into some certain registers to activate a certain mux setting
-for a certain pin.
+optional. Usually the set_mux() function is implemented, writing values into
+some certain registers to activate a certain mux setting for a certain pin.
A simple driver for the above example will work by setting bits 0, 1, 2, 3 or 4
into some register named MUX to select a certain function with a certain
@@ -683,12 +681,12 @@ static const struct foo_pmx_func foo_functions[] = {
},
};
-int foo_get_functions_count(struct pinctrl_dev *pctldev)
+static int foo_get_functions_count(struct pinctrl_dev *pctldev)
{
return ARRAY_SIZE(foo_functions);
}
-const char *foo_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
+static const char *foo_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
{
return foo_functions[selector].name;
}
@@ -702,7 +700,7 @@ static int foo_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-int foo_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+static int foo_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
u8 regbit = (1 << selector + group);
@@ -711,7 +709,7 @@ int foo_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-struct pinmux_ops foo_pmxops = {
+static struct pinmux_ops foo_pmxops = {
.get_functions_count = foo_get_functions_count,
.get_function_name = foo_get_fname,
.get_function_groups = foo_get_groups,
@@ -1266,7 +1264,7 @@ The semantics of the pinctrl APIs are:
Usually the pin control core handled the get/put pair and call out to the
device drivers bookkeeping operations, like checking available functions and
-the associated pins, whereas the enable/disable pass on to the pin controller
+the associated pins, whereas select_state pass on to the pin controller
driver which takes care of activating and/or deactivating the mux setting by
quickly poking some registers.
@@ -1363,8 +1361,9 @@ function, but with different named in the mapping as described under
"Advanced mapping" above. So that for an SPI device, we have two states named
"pos-A" and "pos-B".
-This snippet first muxes the function in the pins defined by group A, enables
-it, disables and releases it, and muxes it in on the pins defined by group B:
+This snippet first initializes a state object for both groups (in foo_probe()),
+then muxes the function in the pins defined by group A, and finally muxes it in
+on the pins defined by group B:
#include <linux/pinctrl/consumer.h>
diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt
index edeecd447d23..b96098ccfe69 100644
--- a/Documentation/power/basic-pm-debugging.txt
+++ b/Documentation/power/basic-pm-debugging.txt
@@ -75,12 +75,14 @@ you should do the following:
# echo platform > /sys/power/disk
# echo disk > /sys/power/state
-Then, the kernel will try to freeze processes, suspend devices, wait 5 seconds,
-resume devices and thaw processes. If "platform" is written to
+Then, the kernel will try to freeze processes, suspend devices, wait a few
+seconds (5 by default, but configurable by the suspend.pm_test_delay module
+parameter), resume devices and thaw processes. If "platform" is written to
/sys/power/pm_test , then after suspending devices the kernel will additionally
invoke the global control methods (eg. ACPI global control methods) used to
-prepare the platform firmware for hibernation. Next, it will wait 5 seconds and
-invoke the platform (eg. ACPI) global methods used to cancel hibernation etc.
+prepare the platform firmware for hibernation. Next, it will wait a
+configurable number of seconds and invoke the platform (eg. ACPI) global
+methods used to cancel hibernation etc.
Writing "none" to /sys/power/pm_test causes the kernel to switch to the normal
hibernation/suspend operations. Also, when open for reading, /sys/power/pm_test
diff --git a/Documentation/powerpc/pci_iov_resource_on_powernv.txt b/Documentation/powerpc/pci_iov_resource_on_powernv.txt
new file mode 100644
index 000000000000..b55c5cd83f8d
--- /dev/null
+++ b/Documentation/powerpc/pci_iov_resource_on_powernv.txt
@@ -0,0 +1,301 @@
+Wei Yang <weiyang@linux.vnet.ibm.com>
+Benjamin Herrenschmidt <benh@au1.ibm.com>
+Bjorn Helgaas <bhelgaas@google.com>
+26 Aug 2014
+
+This document describes the requirement from hardware for PCI MMIO resource
+sizing and assignment on PowerKVM and how generic PCI code handles this
+requirement. The first two sections describe the concepts of Partitionable
+Endpoints and the implementation on P8 (IODA2). The next two sections talks
+about considerations on enabling SRIOV on IODA2.
+
+1. Introduction to Partitionable Endpoints
+
+A Partitionable Endpoint (PE) is a way to group the various resources
+associated with a device or a set of devices to provide isolation between
+partitions (i.e., filtering of DMA, MSIs etc.) and to provide a mechanism
+to freeze a device that is causing errors in order to limit the possibility
+of propagation of bad data.
+
+There is thus, in HW, a table of PE states that contains a pair of "frozen"
+state bits (one for MMIO and one for DMA, they get set together but can be
+cleared independently) for each PE.
+
+When a PE is frozen, all stores in any direction are dropped and all loads
+return all 1's value. MSIs are also blocked. There's a bit more state that
+captures things like the details of the error that caused the freeze etc., but
+that's not critical.
+
+The interesting part is how the various PCIe transactions (MMIO, DMA, ...)
+are matched to their corresponding PEs.
+
+The following section provides a rough description of what we have on P8
+(IODA2). Keep in mind that this is all per PHB (PCI host bridge). Each PHB
+is a completely separate HW entity that replicates the entire logic, so has
+its own set of PEs, etc.
+
+2. Implementation of Partitionable Endpoints on P8 (IODA2)
+
+P8 supports up to 256 Partitionable Endpoints per PHB.
+
+ * Inbound
+
+ For DMA, MSIs and inbound PCIe error messages, we have a table (in
+ memory but accessed in HW by the chip) that provides a direct
+ correspondence between a PCIe RID (bus/dev/fn) with a PE number.
+ We call this the RTT.
+
+ - For DMA we then provide an entire address space for each PE that can
+ contain two "windows", depending on the value of PCI address bit 59.
+ Each window can be configured to be remapped via a "TCE table" (IOMMU
+ translation table), which has various configurable characteristics
+ not described here.
+
+ - For MSIs, we have two windows in the address space (one at the top of
+ the 32-bit space and one much higher) which, via a combination of the
+ address and MSI value, will result in one of the 2048 interrupts per
+ bridge being triggered. There's a PE# in the interrupt controller
+ descriptor table as well which is compared with the PE# obtained from
+ the RTT to "authorize" the device to emit that specific interrupt.
+
+ - Error messages just use the RTT.
+
+ * Outbound. That's where the tricky part is.
+
+ Like other PCI host bridges, the Power8 IODA2 PHB supports "windows"
+ from the CPU address space to the PCI address space. There is one M32
+ window and sixteen M64 windows. They have different characteristics.
+ First what they have in common: they forward a configurable portion of
+ the CPU address space to the PCIe bus and must be naturally aligned
+ power of two in size. The rest is different:
+
+ - The M32 window:
+
+ * Is limited to 4GB in size.
+
+ * Drops the top bits of the address (above the size) and replaces
+ them with a configurable value. This is typically used to generate
+ 32-bit PCIe accesses. We configure that window at boot from FW and
+ don't touch it from Linux; it's usually set to forward a 2GB
+ portion of address space from the CPU to PCIe
+ 0x8000_0000..0xffff_ffff. (Note: The top 64KB are actually
+ reserved for MSIs but this is not a problem at this point; we just
+ need to ensure Linux doesn't assign anything there, the M32 logic
+ ignores that however and will forward in that space if we try).
+
+ * It is divided into 256 segments of equal size. A table in the chip
+ maps each segment to a PE#. That allows portions of the MMIO space
+ to be assigned to PEs on a segment granularity. For a 2GB window,
+ the segment granularity is 2GB/256 = 8MB.
+
+ Now, this is the "main" window we use in Linux today (excluding
+ SR-IOV). We basically use the trick of forcing the bridge MMIO windows
+ onto a segment alignment/granularity so that the space behind a bridge
+ can be assigned to a PE.
+
+ Ideally we would like to be able to have individual functions in PEs
+ but that would mean using a completely different address allocation
+ scheme where individual function BARs can be "grouped" to fit in one or
+ more segments.
+
+ - The M64 windows:
+
+ * Must be at least 256MB in size.
+
+ * Do not translate addresses (the address on PCIe is the same as the
+ address on the PowerBus). There is a way to also set the top 14
+ bits which are not conveyed by PowerBus but we don't use this.
+
+ * Can be configured to be segmented. When not segmented, we can
+ specify the PE# for the entire window. When segmented, a window
+ has 256 segments; however, there is no table for mapping a segment
+ to a PE#. The segment number *is* the PE#.
+
+ * Support overlaps. If an address is covered by multiple windows,
+ there's a defined ordering for which window applies.
+
+ We have code (fairly new compared to the M32 stuff) that exploits that
+ for large BARs in 64-bit space:
+
+ We configure an M64 window to cover the entire region of address space
+ that has been assigned by FW for the PHB (about 64GB, ignore the space
+ for the M32, it comes out of a different "reserve"). We configure it
+ as segmented.
+
+ Then we do the same thing as with M32, using the bridge alignment
+ trick, to match to those giant segments.
+
+ Since we cannot remap, we have two additional constraints:
+
+ - We do the PE# allocation *after* the 64-bit space has been assigned
+ because the addresses we use directly determine the PE#. We then
+ update the M32 PE# for the devices that use both 32-bit and 64-bit
+ spaces or assign the remaining PE# to 32-bit only devices.
+
+ - We cannot "group" segments in HW, so if a device ends up using more
+ than one segment, we end up with more than one PE#. There is a HW
+ mechanism to make the freeze state cascade to "companion" PEs but
+ that only works for PCIe error messages (typically used so that if
+ you freeze a switch, it freezes all its children). So we do it in
+ SW. We lose a bit of effectiveness of EEH in that case, but that's
+ the best we found. So when any of the PEs freezes, we freeze the
+ other ones for that "domain". We thus introduce the concept of
+ "master PE" which is the one used for DMA, MSIs, etc., and "secondary
+ PEs" that are used for the remaining M64 segments.
+
+ We would like to investigate using additional M64 windows in "single
+ PE" mode to overlay over specific BARs to work around some of that, for
+ example for devices with very large BARs, e.g., GPUs. It would make
+ sense, but we haven't done it yet.
+
+3. Considerations for SR-IOV on PowerKVM
+
+ * SR-IOV Background
+
+ The PCIe SR-IOV feature allows a single Physical Function (PF) to
+ support several Virtual Functions (VFs). Registers in the PF's SR-IOV
+ Capability control the number of VFs and whether they are enabled.
+
+ When VFs are enabled, they appear in Configuration Space like normal
+ PCI devices, but the BARs in VF config space headers are unusual. For
+ a non-VF device, software uses BARs in the config space header to
+ discover the BAR sizes and assign addresses for them. For VF devices,
+ software uses VF BAR registers in the *PF* SR-IOV Capability to
+ discover sizes and assign addresses. The BARs in the VF's config space
+ header are read-only zeros.
+
+ When a VF BAR in the PF SR-IOV Capability is programmed, it sets the
+ base address for all the corresponding VF(n) BARs. For example, if the
+ PF SR-IOV Capability is programmed to enable eight VFs, and it has a
+ 1MB VF BAR0, the address in that VF BAR sets the base of an 8MB region.
+ This region is divided into eight contiguous 1MB regions, each of which
+ is a BAR0 for one of the VFs. Note that even though the VF BAR
+ describes an 8MB region, the alignment requirement is for a single VF,
+ i.e., 1MB in this example.
+
+ There are several strategies for isolating VFs in PEs:
+
+ - M32 window: There's one M32 window, and it is split into 256
+ equally-sized segments. The finest granularity possible is a 256MB
+ window with 1MB segments. VF BARs that are 1MB or larger could be
+ mapped to separate PEs in this window. Each segment can be
+ individually mapped to a PE via the lookup table, so this is quite
+ flexible, but it works best when all the VF BARs are the same size. If
+ they are different sizes, the entire window has to be small enough that
+ the segment size matches the smallest VF BAR, which means larger VF
+ BARs span several segments.
+
+ - Non-segmented M64 window: A non-segmented M64 window is mapped entirely
+ to a single PE, so it could only isolate one VF.
+
+ - Single segmented M64 windows: A segmented M64 window could be used just
+ like the M32 window, but the segments can't be individually mapped to
+ PEs (the segment number is the PE#), so there isn't as much
+ flexibility. A VF with multiple BARs would have to be in a "domain" of
+ multiple PEs, which is not as well isolated as a single PE.
+
+ - Multiple segmented M64 windows: As usual, each window is split into 256
+ equally-sized segments, and the segment number is the PE#. But if we
+ use several M64 windows, they can be set to different base addresses
+ and different segment sizes. If we have VFs that each have a 1MB BAR
+ and a 32MB BAR, we could use one M64 window to assign 1MB segments and
+ another M64 window to assign 32MB segments.
+
+ Finally, the plan to use M64 windows for SR-IOV, which will be described
+ more in the next two sections. For a given VF BAR, we need to
+ effectively reserve the entire 256 segments (256 * VF BAR size) and
+ position the VF BAR to start at the beginning of a free range of
+ segments/PEs inside that M64 window.
+
+ The goal is of course to be able to give a separate PE for each VF.
+
+ The IODA2 platform has 16 M64 windows, which are used to map MMIO
+ range to PE#. Each M64 window defines one MMIO range and this range is
+ divided into 256 segments, with each segment corresponding to one PE.
+
+ We decide to leverage this M64 window to map VFs to individual PEs, since
+ SR-IOV VF BARs are all the same size.
+
+ But doing so introduces another problem: total_VFs is usually smaller
+ than the number of M64 window segments, so if we map one VF BAR directly
+ to one M64 window, some part of the M64 window will map to another
+ device's MMIO range.
+
+ IODA supports 256 PEs, so segmented windows contain 256 segments, so if
+ total_VFs is less than 256, we have the situation in Figure 1.0, where
+ segments [total_VFs, 255] of the M64 window may map to some MMIO range on
+ other devices:
+
+ 0 1 total_VFs - 1
+ +------+------+- -+------+------+
+ | | | ... | | |
+ +------+------+- -+------+------+
+
+ VF(n) BAR space
+
+ 0 1 total_VFs - 1 255
+ +------+------+- -+------+------+- -+------+------+
+ | | | ... | | | ... | | |
+ +------+------+- -+------+------+- -+------+------+
+
+ M64 window
+
+ Figure 1.0 Direct map VF(n) BAR space
+
+ Our current solution is to allocate 256 segments even if the VF(n) BAR
+ space doesn't need that much, as shown in Figure 1.1:
+
+ 0 1 total_VFs - 1 255
+ +------+------+- -+------+------+- -+------+------+
+ | | | ... | | | ... | | |
+ +------+------+- -+------+------+- -+------+------+
+
+ VF(n) BAR space + extra
+
+ 0 1 total_VFs - 1 255
+ +------+------+- -+------+------+- -+------+------+
+ | | | ... | | | ... | | |
+ +------+------+- -+------+------+- -+------+------+
+
+ M64 window
+
+ Figure 1.1 Map VF(n) BAR space + extra
+
+ Allocating the extra space ensures that the entire M64 window will be
+ assigned to this one SR-IOV device and none of the space will be
+ available for other devices. Note that this only expands the space
+ reserved in software; there are still only total_VFs VFs, and they only
+ respond to segments [0, total_VFs - 1]. There's nothing in hardware that
+ responds to segments [total_VFs, 255].
+
+4. Implications for the Generic PCI Code
+
+The PCIe SR-IOV spec requires that the base of the VF(n) BAR space be
+aligned to the size of an individual VF BAR.
+
+In IODA2, the MMIO address determines the PE#. If the address is in an M32
+window, we can set the PE# by updating the table that translates segments
+to PE#s. Similarly, if the address is in an unsegmented M64 window, we can
+set the PE# for the window. But if it's in a segmented M64 window, the
+segment number is the PE#.
+
+Therefore, the only way to control the PE# for a VF is to change the base
+of the VF(n) BAR space in the VF BAR. If the PCI core allocates the exact
+amount of space required for the VF(n) BAR space, the VF BAR value is fixed
+and cannot be changed.
+
+On the other hand, if the PCI core allocates additional space, the VF BAR
+value can be changed as long as the entire VF(n) BAR space remains inside
+the space allocated by the core.
+
+Ideally the segment size will be the same as an individual VF BAR size.
+Then each VF will be in its own PE. The VF BARs (and therefore the PE#s)
+are contiguous. If VF0 is in PE(x), then VF(n) is in PE(x+n). If we
+allocate 256 segments, there are (256 - numVFs) choices for the PE# of VF0.
+
+If the segment size is smaller than the VF BAR size, it will take several
+segments to cover a VF BAR, and a VF will be in several PEs. This is
+possible, but the isolation isn't as good, and it reduces the number of PE#
+choices because instead of consuming only numVFs segments, the VF(n) BAR
+space will consume (numVFs * n) segments. That means there aren't as many
+available segments for adjusting base of the VF(n) BAR space.
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt
index 9791e98ab49c..ded69794a5c0 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.txt
@@ -174,7 +174,7 @@ These are defined in <asm/reg.h>, and distinguish different reasons why the
kernel aborted a transaction:
TM_CAUSE_RESCHED Thread was rescheduled.
- TM_CAUSE_TLBI Software TLB invalide.
+ TM_CAUSE_TLBI Software TLB invalid.
TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap.
TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort
transactions for consistency will use this.
@@ -185,7 +185,7 @@ kernel aborted a transaction:
These can be checked by the user program's abort handler as TEXASR[0:7]. If
bit 7 is set, it indicates that the error is consider persistent. For example
-a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q
+a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.
GDB
===
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 5a615c14f75d..2216eb187c21 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -8,6 +8,21 @@ If variable is of Type, use printk format specifier:
unsigned long long %llu or %llx
size_t %zu or %zx
ssize_t %zd or %zx
+ s32 %d or %x
+ u32 %u or %x
+ s64 %lld or %llx
+ u64 %llu or %llx
+
+If <type> is dependent on a config option for its size (e.g., sector_t,
+blkcnt_t) or is architecture-dependent for its size (e.g., tcflag_t), use a
+format specifier of its largest possible type and explicitly cast to it.
+Example:
+
+ printk("test: sector number/total blocks: %llu/%llu\n",
+ (unsigned long long)sector, (unsigned long long)blockcount);
+
+Reminder: sizeof() result is of type size_t.
+
Raw pointer value SHOULD be printed with %p. The kernel supports
the following extended format specifiers for pointer types:
@@ -54,6 +69,7 @@ Struct Resources:
For printing struct resources. The 'R' and 'r' specifiers result in a
printed resource with ('R') or without ('r') a decoded flags member.
+ Passed by reference.
Physical addresses types phys_addr_t:
@@ -132,6 +148,8 @@ MAC/FDDI addresses:
specifier to use reversed byte order suitable for visual interpretation
of Bluetooth addresses which are in the little endian order.
+ Passed by reference.
+
IPv4 addresses:
%pI4 1.2.3.4
@@ -146,6 +164,8 @@ IPv4 addresses:
host, network, big or little endian order addresses respectively. Where
no specifier is provided the default network/big endian order is used.
+ Passed by reference.
+
IPv6 addresses:
%pI6 0001:0002:0003:0004:0005:0006:0007:0008
@@ -160,6 +180,8 @@ IPv6 addresses:
print a compressed IPv6 address as described by
http://tools.ietf.org/html/rfc5952
+ Passed by reference.
+
IPv4/IPv6 addresses (generic, with port, flowinfo, scope):
%pIS 1.2.3.4 or 0001:0002:0003:0004:0005:0006:0007:0008
@@ -186,6 +208,8 @@ IPv4/IPv6 addresses (generic, with port, flowinfo, scope):
specifiers can be used as well and are ignored in case of an IPv6
address.
+ Passed by reference.
+
Further examples:
%pISfc 1.2.3.4 or [1:2:3:4:5:6:7:8]/123456789
@@ -204,9 +228,11 @@ UUID/GUID addresses:
lower ('l') or upper case ('L') hex characters - and big endian order
in lower ('b') or upper case ('B') hex characters.
- Where no additional specifiers are used the default little endian
+ Where no additional specifiers are used the default big endian
order with lower case hex characters will be printed.
+ Passed by reference.
+
dentry names:
%pd{,2,3,4}
%pD{,2,3,4}
@@ -216,6 +242,8 @@ dentry names:
equivalent of %s dentry->d_name.name we used to use, %pd<n> prints
n last components. %pD does the same thing for struct file.
+ Passed by reference.
+
struct va_format:
%pV
@@ -231,23 +259,30 @@ struct va_format:
Do not use this feature without some mechanism to verify the
correctness of the format string and va_list arguments.
-u64 SHOULD be printed with %llu/%llx:
+ Passed by reference.
- printk("%llu", u64_var);
+struct clk:
-s64 SHOULD be printed with %lld/%llx:
+ %pC pll1
+ %pCn pll1
+ %pCr 1560000000
- printk("%lld", s64_var);
+ For printing struct clk structures. '%pC' and '%pCn' print the name
+ (Common Clock Framework) or address (legacy clock framework) of the
+ structure; '%pCr' prints the current clock rate.
-If <type> is dependent on a config option for its size (e.g., sector_t,
-blkcnt_t) or is architecture-dependent for its size (e.g., tcflag_t), use a
-format specifier of its largest possible type and explicitly cast to it.
-Example:
+ Passed by reference.
- printk("test: sector number/total blocks: %llu/%llu\n",
- (unsigned long long)sector, (unsigned long long)blockcount);
+bitmap and its derivatives such as cpumask and nodemask:
-Reminder: sizeof() result is of type size_t.
+ %*pb 0779
+ %*pbl 0,3-6,8-10
+
+ For printing bitmap and its derivatives such as cpumask and nodemask,
+ %*pb output the bitmap with field width as the number of bits and %*pbl
+ output the bitmap as range list with field width as the number of bits.
+
+ Passed by reference.
Thank you for your cooperation and attention.
diff --git a/Documentation/scheduler/completion.txt b/Documentation/scheduler/completion.txt
index f77651eca31e..2622bc7a188b 100644
--- a/Documentation/scheduler/completion.txt
+++ b/Documentation/scheduler/completion.txt
@@ -7,24 +7,24 @@ Introduction:
-------------
If you have one or more threads of execution that must wait for some process
-to have reached a point or a specific state, completions can provide a race
-free solution to this problem. Semantically they are somewhat like a
-pthread_barriers and have similar use-cases.
+to have reached a point or a specific state, completions can provide a
+race-free solution to this problem. Semantically they are somewhat like a
+pthread_barrier and have similar use-cases.
-Completions are a code synchronization mechanism that is preferable to any
+Completions are a code synchronization mechanism which is preferable to any
misuse of locks. Any time you think of using yield() or some quirky
-msleep(1); loop to allow something else to proceed, you probably want to
+msleep(1) loop to allow something else to proceed, you probably want to
look into using one of the wait_for_completion*() calls instead. The
-advantage of using completions is clear intent of the code but also more
+advantage of using completions is clear intent of the code, but also more
efficient code as both threads can continue until the result is actually
needed.
Completions are built on top of the generic event infrastructure in Linux,
-with the event reduced to a simple flag appropriately called "done" in
-struct completion, that tells the waiting threads of execution if they
+with the event reduced to a simple flag (appropriately called "done") in
+struct completion that tells the waiting threads of execution if they
can continue safely.
-As completions are scheduling related the code is found in
+As completions are scheduling related, the code is found in
kernel/sched/completion.c - for details on completion design and
implementation see completions-design.txt
@@ -32,9 +32,9 @@ implementation see completions-design.txt
Usage:
------
-There are three parts to the using completions, the initialization of the
+There are three parts to using completions, the initialization of the
struct completion, the waiting part through a call to one of the variants of
-wait_for_completion() and the signaling side through a call to complete(),
+wait_for_completion() and the signaling side through a call to complete()
or complete_all(). Further there are some helper functions for checking the
state of completions.
@@ -50,7 +50,7 @@ handling of completions is:
providing the wait queue to place tasks on for waiting and the flag for
indicating the state of affairs.
-Completions should be named to convey the intent of the waiter. A good
+Completions should be named to convey the intent of the waiter. A good
example is:
wait_for_completion(&early_console_added);
@@ -73,7 +73,7 @@ the default state to "not available", that is, "done" is set to 0.
The re-initialization function, reinit_completion(), simply resets the
done element to "not available", thus again to 0, without touching the
-wait queue. Calling init_completion() on the same completions object is
+wait queue. Calling init_completion() twice on the same completion object is
most likely a bug as it re-initializes the queue to an empty queue and
enqueued tasks could get "lost" - use reinit_completion() in that case.
@@ -87,10 +87,17 @@ initialization should always use:
DECLARE_COMPLETION_ONSTACK(setup_done)
suitable for automatic/local variables on the stack and will make lockdep
-happy. Note also that one needs to making *sure* the completion passt to
+happy. Note also that one needs to make *sure* the completion passed to
work threads remains in-scope, and no references remain to on-stack data
when the initiating function returns.
+Using on-stack completions for code that calls any of the _timeout or
+_interruptible/_killable variants is not advisable as they will require
+additional synchronization to prevent the on-stack completion object in
+the timeout/signal cases from going out of scope. Consider using dynamically
+allocated completions when intending to use the _interruptible/_killable
+or _timeout variants of wait_for_completion().
+
Waiting for completions:
------------------------
@@ -99,34 +106,38 @@ For a thread of execution to wait for some concurrent work to finish, it
calls wait_for_completion() on the initialized completion structure.
A typical usage scenario is:
- structure completion setup_done;
+ struct completion setup_done;
init_completion(&setup_done);
- initialze_work(...,&setup_done,...)
+ initialize_work(...,&setup_done,...)
/* run non-dependent code */ /* do setup */
- wait_for_completion(&seupt_done); complete(setup_done)
+ wait_for_completion(&setup_done); complete(setup_done)
-This is not implying any temporal order of wait_for_completion() and the
+This is not implying any temporal order on wait_for_completion() and the
call to complete() - if the call to complete() happened before the call
to wait_for_completion() then the waiting side simply will continue
-immediately as all dependencies are satisfied.
+immediately as all dependencies are satisfied if not it will block until
+completion is signaled by complete().
-Note that wait_for_completion() is calling spin_lock_irq/spin_unlock_irq
+Note that wait_for_completion() is calling spin_lock_irq()/spin_unlock_irq(),
so it can only be called safely when you know that interrupts are enabled.
-Calling it from hard-irq context will result in hard to detect spurious
-enabling of interrupts.
+Calling it from hard-irq or irqs-off atomic contexts will result in
+hard-to-detect spurious enabling of interrupts.
wait_for_completion():
void wait_for_completion(struct completion *done):
-The default behavior is to wait without a timeout and mark the task as
+The default behavior is to wait without a timeout and to mark the task as
uninterruptible. wait_for_completion() and its variants are only safe
-in soft-interrupt or process context but not in hard-irq context.
+in process context (as they can sleep) but not in atomic context,
+interrupt context, with disabled irqs. or preemption is disabled - see also
+try_wait_for_completion() below for handling completion in atomic/interrupt
+context.
+
As all variants of wait_for_completion() can (obviously) block for a long
-time, you probably don't want to call this with held locks - see also
-try_wait_for_completion() below.
+time, you probably don't want to call this with held mutexes.
Variants available:
@@ -141,43 +152,44 @@ A common problem that occurs is to have unclean assignment of return types,
so care should be taken with assigning return-values to variables of proper
type. Checking for the specific meaning of return values also has been found
to be quite inaccurate e.g. constructs like
-if(!wait_for_completion_interruptible_timeout(...)) would execute the same
+if (!wait_for_completion_interruptible_timeout(...)) would execute the same
code path for successful completion and for the interrupted case - which is
probably not what you want.
int wait_for_completion_interruptible(struct completion *done)
-marking the task TASK_INTERRUPTIBLE. If a signal was received while waiting.
-It will return -ERESTARTSYS and 0 otherwise.
+This function marks the task TASK_INTERRUPTIBLE. If a signal was received
+while waiting it will return -ERESTARTSYS; 0 otherwise.
unsigned long wait_for_completion_timeout(struct completion *done,
unsigned long timeout)
-The task is marked as TASK_UNINTERRUPTIBLE and will wait at most timeout
-(in jiffies). If timeout occurs it return 0 else the remaining time in
-jiffies (but at least 1). Timeouts are preferably passed by msecs_to_jiffies()
-or usecs_to_jiffies(). If the returned timeout value is deliberately ignored
-a comment should probably explain why (e.g. see drivers/mfd/wm8350-core.c
-wm8350_read_auxadc())
+The task is marked as TASK_UNINTERRUPTIBLE and will wait at most 'timeout'
+(in jiffies). If timeout occurs it returns 0 else the remaining time in
+jiffies (but at least 1). Timeouts are preferably calculated with
+msecs_to_jiffies() or usecs_to_jiffies(). If the returned timeout value is
+deliberately ignored a comment should probably explain why (e.g. see
+drivers/mfd/wm8350-core.c wm8350_read_auxadc())
long wait_for_completion_interruptible_timeout(
struct completion *done, unsigned long timeout)
-passing a timeout in jiffies and marking the task as TASK_INTERRUPTIBLE. If a
-signal was received it will return -ERESTARTSYS, 0 if completion timed-out and
-the remaining time in jiffies if completion occurred.
+This function passes a timeout in jiffies and marks the task as
+TASK_INTERRUPTIBLE. If a signal was received it will return -ERESTARTSYS;
+otherwise it returns 0 if the completion timed out or the remaining time in
+jiffies if completion occurred.
-Further variants include _killable which passes TASK_KILLABLE as the
-designated tasks state and will return a -ERESTARTSYS if interrupted or
-else 0 if completions was achieved as well as a _timeout variant.
+Further variants include _killable which uses TASK_KILLABLE as the
+designated tasks state and will return -ERESTARTSYS if it is interrupted or
+else 0 if completion was achieved. There is a _timeout variant as well:
long wait_for_completion_killable(struct completion *done)
long wait_for_completion_killable_timeout(struct completion *done,
unsigned long timeout)
-The _io variants wait_for_completion_io behave the same as the non-_io
+The _io variants wait_for_completion_io() behave the same as the non-_io
variants, except for accounting waiting time as waiting on IO, which has
-an impact on how scheduling is calculated.
+an impact on how the task is accounted in scheduling stats.
void wait_for_completion_io(struct completion *done)
unsigned long wait_for_completion_io_timeout(struct completion *done
@@ -187,13 +199,13 @@ an impact on how scheduling is calculated.
Signaling completions:
----------------------
-A thread of execution that wants to signal that the conditions for
-continuation have been achieved calls complete() to signal exactly one
-of the waiters that it can continue.
+A thread that wants to signal that the conditions for continuation have been
+achieved calls complete() to signal exactly one of the waiters that it can
+continue.
void complete(struct completion *done)
-or calls complete_all to signal all current and future waiters.
+or calls complete_all() to signal all current and future waiters.
void complete_all(struct completion *done)
@@ -205,32 +217,32 @@ wakeup order is the same in which they were enqueued (FIFO order).
If complete() is called multiple times then this will allow for that number
of waiters to continue - each call to complete() will simply increment the
done element. Calling complete_all() multiple times is a bug though. Both
-complete() and complete_all() can be called in hard-irq context safely.
+complete() and complete_all() can be called in hard-irq/atomic context safely.
There only can be one thread calling complete() or complete_all() on a
-particular struct completions at any time - serialized through the wait
+particular struct completion at any time - serialized through the wait
queue spinlock. Any such concurrent calls to complete() or complete_all()
probably are a design bug.
Signaling completion from hard-irq context is fine as it will appropriately
-lock with spin_lock_irqsave/spin_unlock_irqrestore.
+lock with spin_lock_irqsave/spin_unlock_irqrestore and it will never sleep.
try_wait_for_completion()/completion_done():
--------------------------------------------
-The try_wait_for_completion will not put the thread on the wait queue but
-rather returns false if it would need to enqueue (block) the thread, else it
-consumes any posted completions and returns true.
+The try_wait_for_completion() function will not put the thread on the wait
+queue but rather returns false if it would need to enqueue (block) the thread,
+else it consumes one posted completion and returns true.
- bool try_wait_for_completion(struct completion *done)
+ bool try_wait_for_completion(struct completion *done)
-Finally to check state of a completions without changing it in any way is
-provided by completion_done() returning false if there are any posted
-completion that was not yet consumed by waiters implying that there are
-waiters and true otherwise;
+Finally, to check the state of a completion without changing it in any way,
+call completion_done(), which returns false if there are no posted
+completions that were not yet consumed by waiters (implying that there are
+waiters) and true otherwise;
- bool completion_done(struct completion *done)
+ bool completion_done(struct completion *done)
Both try_wait_for_completion() and completion_done() are safe to be called in
-hard-irq context.
+hard-irq or atomic context.
diff --git a/Documentation/security/Smack.txt b/Documentation/security/Smack.txt
index b6ef7e9dba30..abc82f85215b 100644
--- a/Documentation/security/Smack.txt
+++ b/Documentation/security/Smack.txt
@@ -33,11 +33,18 @@ The current git repository for Smack user space is:
git://github.com/smack-team/smack.git
This should make and install on most modern distributions.
-There are three commands included in smackutil:
+There are five commands included in smackutil:
-smackload - properly formats data for writing to /smack/load
-smackcipso - properly formats data for writing to /smack/cipso
chsmack - display or set Smack extended attribute values
+smackctl - load the Smack access rules
+smackaccess - report if a process with one label has access
+ to an object with another
+
+These two commands are obsolete with the introduction of
+the smackfs/load2 and smackfs/cipso2 interfaces.
+
+smackload - properly formats data for writing to smackfs/load
+smackcipso - properly formats data for writing to smackfs/cipso
In keeping with the intent of Smack, configuration data is
minimal and not strictly required. The most important
@@ -47,9 +54,9 @@ of this, but it can be manually as well.
Add this line to /etc/fstab:
- smackfs /smack smackfs smackfsdef=* 0 0
+ smackfs /sys/fs/smackfs smackfs defaults 0 0
-and create the /smack directory for mounting.
+The /sys/fs/smackfs directory is created by the kernel.
Smack uses extended attributes (xattrs) to store labels on filesystem
objects. The attributes are stored in the extended attribute security
@@ -92,13 +99,13 @@ There are multiple ways to set a Smack label on a file:
# attr -S -s SMACK64 -V "value" path
# chsmack -a value path
-A process can see the smack label it is running with by
+A process can see the Smack label it is running with by
reading /proc/self/attr/current. A process with CAP_MAC_ADMIN
-can set the process smack by writing there.
+can set the process Smack by writing there.
Most Smack configuration is accomplished by writing to files
-in the smackfs filesystem. This pseudo-filesystem is usually
-mounted on /smack.
+in the smackfs filesystem. This pseudo-filesystem is mounted
+on /sys/fs/smackfs.
access
This interface reports whether a subject with the specified
@@ -206,23 +213,30 @@ onlycap
file or cleared by writing "-" to the file.
ptrace
This is used to define the current ptrace policy
- 0 - default: this is the policy that relies on smack access rules.
+ 0 - default: this is the policy that relies on Smack access rules.
For the PTRACE_READ a subject needs to have a read access on
object. For the PTRACE_ATTACH a read-write access is required.
1 - exact: this is the policy that limits PTRACE_ATTACH. Attach is
only allowed when subject's and object's labels are equal.
- PTRACE_READ is not affected. Can be overriden with CAP_SYS_PTRACE.
+ PTRACE_READ is not affected. Can be overridden with CAP_SYS_PTRACE.
2 - draconian: this policy behaves like the 'exact' above with an
- exception that it can't be overriden with CAP_SYS_PTRACE.
+ exception that it can't be overridden with CAP_SYS_PTRACE.
revoke-subject
Writing a Smack label here sets the access to '-' for all access
rules with that subject label.
+unconfined
+ If the kernel is configured with CONFIG_SECURITY_SMACK_BRINGUP
+ a process with CAP_MAC_ADMIN can write a label into this interface.
+ Thereafter, accesses that involve that label will be logged and
+ the access permitted if it wouldn't be otherwise. Note that this
+ is dangerous and can ruin the proper labeling of your system.
+ It should never be used in production.
You can add access rules in /etc/smack/accesses. They take the form:
subjectlabel objectlabel access
-access is a combination of the letters rwxa which specify the
+access is a combination of the letters rwxatb which specify the
kind of access permitted a subject with subjectlabel on an
object with objectlabel. If there is no rule no access is allowed.
@@ -318,8 +332,9 @@ each of the subject and the object.
Labels
-Smack labels are ASCII character strings, one to twenty-three characters in
-length. Single character labels using special characters, that being anything
+Smack labels are ASCII character strings. They can be up to 255 characters
+long, but keeping them to twenty-three characters is recommended.
+Single character labels using special characters, that being anything
other than a letter or digit, are reserved for use by the Smack development
team. Smack labels are unstructured, case sensitive, and the only operation
ever performed on them is comparison for equality. Smack labels cannot
@@ -335,10 +350,9 @@ There are some predefined labels:
? Pronounced "huh", a single question mark character.
@ Pronounced "web", a single at sign character.
-Every task on a Smack system is assigned a label. System tasks, such as
-init(8) and systems daemons, are run with the floor ("_") label. User tasks
-are assigned labels according to the specification found in the
-/etc/smack/user configuration file.
+Every task on a Smack system is assigned a label. The Smack label
+of a process will usually be assigned by the system initialization
+mechanism.
Access Rules
@@ -393,6 +407,7 @@ describe access modes:
w: indicates that write access should be granted.
x: indicates that execute access should be granted.
t: indicates that the rule requests transmutation.
+ b: indicates that the rule should be reported for bring-up.
Uppercase values for the specification letters are allowed as well.
Access mode specifications can be in any order. Examples of acceptable rules
@@ -402,6 +417,7 @@ are:
Secret Unclass R
Manager Game x
User HR w
+ Snap Crackle rwxatb
New Old rRrRr
Closed Off -
@@ -413,7 +429,7 @@ Examples of unacceptable rules are:
Spaces are not allowed in labels. Since a subject always has access to files
with the same label specifying a rule for that case is pointless. Only
-valid letters (rwxatRWXAT) and the dash ('-') character are allowed in
+valid letters (rwxatbRWXATB) and the dash ('-') character are allowed in
access specifications. The dash is a placeholder, so "a-r" is the same
as "ar". A lone dash is used to specify that no access should be allowed.
@@ -462,16 +478,11 @@ receiver. The receiver is not required to have read access to the sender.
Setting Access Rules
The configuration file /etc/smack/accesses contains the rules to be set at
-system startup. The contents are written to the special file /smack/load.
-Rules can be written to /smack/load at any time and take effect immediately.
-For any pair of subject and object labels there can be only one rule, with the
-most recently specified overriding any earlier specification.
-
-The program smackload is provided to ensure data is formatted
-properly when written to /smack/load. This program reads lines
-of the form
-
- subjectlabel objectlabel mode.
+system startup. The contents are written to the special file
+/sys/fs/smackfs/load2. Rules can be added at any time and take effect
+immediately. For any pair of subject and object labels there can be only
+one rule, with the most recently specified overriding any earlier
+specification.
Task Attribute
@@ -488,7 +499,10 @@ only be changed by a process with privilege.
Privilege
-A process with CAP_MAC_OVERRIDE is privileged.
+A process with CAP_MAC_OVERRIDE or CAP_MAC_ADMIN is privileged.
+CAP_MAC_OVERRIDE allows the process access to objects it would
+be denied otherwise. CAP_MAC_ADMIN allows a process to change
+Smack data, including rules and attributes.
Smack Networking
@@ -510,14 +524,14 @@ intervention. Unlabeled packets that come into the system will be given the
ambient label.
Smack requires configuration in the case where packets from a system that is
-not smack that speaks CIPSO may be encountered. Usually this will be a Trusted
+not Smack that speaks CIPSO may be encountered. Usually this will be a Trusted
Solaris system, but there are other, less widely deployed systems out there.
CIPSO provides 3 important values, a Domain Of Interpretation (DOI), a level,
and a category set with each packet. The DOI is intended to identify a group
of systems that use compatible labeling schemes, and the DOI specified on the
-smack system must match that of the remote system or packets will be
-discarded. The DOI is 3 by default. The value can be read from /smack/doi and
-can be changed by writing to /smack/doi.
+Smack system must match that of the remote system or packets will be
+discarded. The DOI is 3 by default. The value can be read from
+/sys/fs/smackfs/doi and can be changed by writing to /sys/fs/smackfs/doi.
The label and category set are mapped to a Smack label as defined in
/etc/smack/cipso.
@@ -539,15 +553,13 @@ The ":" and "," characters are permitted in a Smack label but have no special
meaning.
The mapping of Smack labels to CIPSO values is defined by writing to
-/smack/cipso. Again, the format of data written to this special file
-is highly restrictive, so the program smackcipso is provided to
-ensure the writes are done properly. This program takes mappings
-on the standard input and sends them to /smack/cipso properly.
+/sys/fs/smackfs/cipso2.
In addition to explicit mappings Smack supports direct CIPSO mappings. One
CIPSO level is used to indicate that the category set passed in the packet is
in fact an encoding of the Smack label. The level used is 250 by default. The
-value can be read from /smack/direct and changed by writing to /smack/direct.
+value can be read from /sys/fs/smackfs/direct and changed by writing to
+/sys/fs/smackfs/direct.
Socket Attributes
@@ -565,8 +577,8 @@ sockets.
Smack Netlabel Exceptions
You will often find that your labeled application has to talk to the outside,
-unlabeled world. To do this there's a special file /smack/netlabel where you can
-add some exceptions in the form of :
+unlabeled world. To do this there's a special file /sys/fs/smackfs/netlabel
+where you can add some exceptions in the form of :
@IP1 LABEL1 or
@IP2/MASK LABEL2
@@ -574,22 +586,22 @@ It means that your application will have unlabeled access to @IP1 if it has
write access on LABEL1, and access to the subnet @IP2/MASK if it has write
access on LABEL2.
-Entries in the /smack/netlabel file are matched by longest mask first, like in
-classless IPv4 routing.
+Entries in the /sys/fs/smackfs/netlabel file are matched by longest mask
+first, like in classless IPv4 routing.
A special label '@' and an option '-CIPSO' can be used there :
@ means Internet, any application with any label has access to it
-CIPSO means standard CIPSO networking
If you don't know what CIPSO is and don't plan to use it, you can just do :
-echo 127.0.0.1 -CIPSO > /smack/netlabel
-echo 0.0.0.0/0 @ > /smack/netlabel
+echo 127.0.0.1 -CIPSO > /sys/fs/smackfs/netlabel
+echo 0.0.0.0/0 @ > /sys/fs/smackfs/netlabel
If you use CIPSO on your 192.168.0.0/16 local network and need also unlabeled
Internet access, you can have :
-echo 127.0.0.1 -CIPSO > /smack/netlabel
-echo 192.168.0.0/16 -CIPSO > /smack/netlabel
-echo 0.0.0.0/0 @ > /smack/netlabel
+echo 127.0.0.1 -CIPSO > /sys/fs/smackfs/netlabel
+echo 192.168.0.0/16 -CIPSO > /sys/fs/smackfs/netlabel
+echo 0.0.0.0/0 @ > /sys/fs/smackfs/netlabel
Writing Applications for Smack
@@ -676,7 +688,7 @@ Smack auditing
If you want Smack auditing of security events, you need to set CONFIG_AUDIT
in your kernel configuration.
By default, all denied events will be audited. You can change this behavior by
-writing a single character to the /smack/logging file :
+writing a single character to the /sys/fs/smackfs/logging file :
0 : no logging
1 : log denied (default)
2 : log accepted
@@ -686,3 +698,20 @@ Events are logged as 'key=value' pairs, for each event you at least will get
the subject, the object, the rights requested, the action, the kernel function
that triggered the event, plus other pairs depending on the type of event
audited.
+
+Bringup Mode
+
+Bringup mode provides logging features that can make application
+configuration and system bringup easier. Configure the kernel with
+CONFIG_SECURITY_SMACK_BRINGUP to enable these features. When bringup
+mode is enabled accesses that succeed due to rules marked with the "b"
+access mode will logged. When a new label is introduced for processes
+rules can be added aggressively, marked with the "b". The logging allows
+tracking of which rules actual get used for that label.
+
+Another feature of bringup mode is the "unconfined" option. Writing
+a label to /sys/fs/smackfs/unconfined makes subjects with that label
+able to access any object, and objects with that label accessible to
+all subjects. Any access that is granted because a label is unconfined
+is logged. This feature is dangerous, as files and directories may
+be created in places they couldn't if the policy were being enforced.
diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
index 1e52d67d0abf..dbe6623fed1c 100644
--- a/Documentation/serial/tty.txt
+++ b/Documentation/serial/tty.txt
@@ -198,6 +198,9 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
TTY_OTHER_CLOSED Device is a pty and the other side has closed.
+TTY_OTHER_DONE Device is a pty and the other side has closed and
+ all pending input processing has been completed.
+
TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
smaller chunks.
diff --git a/Documentation/sound/alsa/ControlNames.txt b/Documentation/sound/alsa/ControlNames.txt
index 79a6127863ca..3fc1cf50d28e 100644
--- a/Documentation/sound/alsa/ControlNames.txt
+++ b/Documentation/sound/alsa/ControlNames.txt
@@ -71,11 +71,11 @@ SOURCE:
HDMI/DP (either HDMI or DisplayPort)
Exceptions (deprecated):
- [Digital] Capture Source
- [Digital] Capture Switch (aka input gain switch)
- [Digital] Capture Volume (aka input gain volume)
- [Digital] Playback Switch (aka output gain switch)
- [Digital] Playback Volume (aka output gain volume)
+ [Analogue|Digital] Capture Source
+ [Analogue|Digital] Capture Switch (aka input gain switch)
+ [Analogue|Digital] Capture Volume (aka input gain volume)
+ [Analogue|Digital] Playback Switch (aka output gain switch)
+ [Analogue|Digital] Playback Volume (aka output gain volume)
Tone Control - Switch
Tone Control - Bass
Tone Control - Treble
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index 42a0a39b77e6..e7193aac669c 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -466,7 +466,11 @@ The generic parser supports the following hints:
- add_jack_modes (bool): add "xxx Jack Mode" enum controls to each
I/O jack for allowing to change the headphone amp and mic bias VREF
capabilities
-- power_down_unused (bool): power down the unused widgets
+- power_save_node (bool): advanced power management for each widget,
+ controlling the power sate (D0/D3) of each widget node depending on
+ the actual pin and stream states
+- power_down_unused (bool): power down the unused widgets, a subset of
+ power_save_node, and will be dropped in future
- add_hp_mic (bool): add the headphone to capture source if possible
- hp_mic_detect (bool): enable/disable the hp/mic shared input for a
single built-in mic case; default true
diff --git a/Documentation/sound/alsa/timestamping.txt b/Documentation/sound/alsa/timestamping.txt
new file mode 100644
index 000000000000..0b191a23f534
--- /dev/null
+++ b/Documentation/sound/alsa/timestamping.txt
@@ -0,0 +1,200 @@
+The ALSA API can provide two different system timestamps:
+
+- Trigger_tstamp is the system time snapshot taken when the .trigger
+callback is invoked. This snapshot is taken by the ALSA core in the
+general case, but specific hardware may have synchronization
+capabilities or conversely may only be able to provide a correct
+estimate with a delay. In the latter two cases, the low-level driver
+is responsible for updating the trigger_tstamp at the most appropriate
+and precise moment. Applications should not rely solely on the first
+trigger_tstamp but update their internal calculations if the driver
+provides a refined estimate with a delay.
+
+- tstamp is the current system timestamp updated during the last
+event or application query.
+The difference (tstamp - trigger_tstamp) defines the elapsed time.
+
+The ALSA API provides reports two basic pieces of information, avail
+and delay, which combined with the trigger and current system
+timestamps allow for applications to keep track of the 'fullness' of
+the ring buffer and the amount of queued samples.
+
+The use of these different pointers and time information depends on
+the application needs:
+
+- 'avail' reports how much can be written in the ring buffer
+- 'delay' reports the time it will take to hear a new sample after all
+queued samples have been played out.
+
+When timestamps are enabled, the avail/delay information is reported
+along with a snapshot of system time. Applications can select from
+CLOCK_REALTIME (NTP corrections including going backwards),
+CLOCK_MONOTONIC (NTP corrections but never going backwards),
+CLOCK_MONOTIC_RAW (without NTP corrections) and change the mode
+dynamically with sw_params
+
+
+The ALSA API also provide an audio_tstamp which reflects the passage
+of time as measured by different components of audio hardware. In
+ascii-art, this could be represented as follows (for the playback
+case):
+
+
+--------------------------------------------------------------> time
+ ^ ^ ^ ^ ^
+ | | | | |
+ analog link dma app FullBuffer
+ time time time time time
+ | | | | |
+ |< codec delay >|<--hw delay-->|<queued samples>|<---avail->|
+ |<----------------- delay---------------------->| |
+ |<----ring buffer length---->|
+
+The analog time is taken at the last stage of the playback, as close
+as possible to the actual transducer
+
+The link time is taken at the output of the SOC/chipset as the samples
+are pushed on a link. The link time can be directly measured if
+supported in hardware by sample counters or wallclocks (e.g. with
+HDAudio 24MHz or PTP clock for networked solutions) or indirectly
+estimated (e.g. with the frame counter in USB).
+
+The DMA time is measured using counters - typically the least reliable
+of all measurements due to the bursty natured of DMA transfers.
+
+The app time corresponds to the time tracked by an application after
+writing in the ring buffer.
+
+The application can query what the hardware supports, define which
+audio time it wants reported by selecting the relevant settings in
+audio_tstamp_config fields, get an estimate of the timestamp
+accuracy. It can also request the delay-to-analog be included in the
+measurement. Direct access to the link time is very interesting on
+platforms that provide an embedded DSP; measuring directly the link
+time with dedicated hardware, possibly synchronized with system time,
+removes the need to keep track of internal DSP processing times and
+latency.
+
+In case the application requests an audio tstamp that is not supported
+in hardware/low-level driver, the type is overridden as DEFAULT and the
+timestamp will report the DMA time based on the hw_pointer value.
+
+For backwards compatibility with previous implementations that did not
+provide timestamp selection, with a zero-valued COMPAT timestamp type
+the results will default to the HDAudio wall clock for playback
+streams and to the DMA time (hw_ptr) in all other cases.
+
+The audio timestamp accuracy can be returned to user-space, so that
+appropriate decisions are made:
+
+- for dma time (default), the granularity of the transfers can be
+ inferred from the steps between updates and in turn provide
+ information on how much the application pointer can be rewound
+ safely.
+
+- the link time can be used to track long-term drifts between audio
+ and system time using the (tstamp-trigger_tstamp)/audio_tstamp
+ ratio, the precision helps define how much smoothing/low-pass
+ filtering is required. The link time can be either reset on startup
+ or reported as is (the latter being useful to compare progress of
+ different streams - but may require the wallclock to be always
+ running and not wrap-around during idle periods). If supported in
+ hardware, the absolute link time could also be used to define a
+ precise start time (patches WIP)
+
+- including the delay in the audio timestamp may
+ counter-intuitively not increase the precision of timestamps, e.g. if a
+ codec includes variable-latency DSP processing or a chain of
+ hardware components the delay is typically not known with precision.
+
+The accuracy is reported in nanosecond units (using an unsigned 32-bit
+word), which gives a max precision of 4.29s, more than enough for
+audio applications...
+
+Due to the varied nature of timestamping needs, even for a single
+application, the audio_tstamp_config can be changed dynamically. In
+the STATUS ioctl, the parameters are read-only and do not allow for
+any application selection. To work around this limitation without
+impacting legacy applications, a new STATUS_EXT ioctl is introduced
+with read/write parameters. ALSA-lib will be modified to make use of
+STATUS_EXT and effectively deprecate STATUS.
+
+The ALSA API only allows for a single audio timestamp to be reported
+at a time. This is a conscious design decision, reading the audio
+timestamps from hardware registers or from IPC takes time, the more
+timestamps are read the more imprecise the combined measurements
+are. To avoid any interpretation issues, a single (system, audio)
+timestamp is reported. Applications that need different timestamps
+will be required to issue multiple queries and perform an
+interpolation of the results
+
+In some hardware-specific configuration, the system timestamp is
+latched by a low-level audio subsytem, and the information provided
+back to the driver. Due to potential delays in the communication with
+the hardware, there is a risk of misalignment with the avail and delay
+information. To make sure applications are not confused, a
+driver_timestamp field is added in the snd_pcm_status structure; this
+timestamp shows when the information is put together by the driver
+before returning from the STATUS and STATUS_EXT ioctl. in most cases
+this driver_timestamp will be identical to the regular system tstamp.
+
+Examples of typestamping with HDaudio:
+
+1. DMA timestamp, no compensation for DMA+analog delay
+$ ./audio_time -p --ts_type=1
+playback: systime: 341121338 nsec, audio time 342000000 nsec, systime delta -878662
+playback: systime: 426236663 nsec, audio time 427187500 nsec, systime delta -950837
+playback: systime: 597080580 nsec, audio time 598000000 nsec, systime delta -919420
+playback: systime: 682059782 nsec, audio time 683020833 nsec, systime delta -961051
+playback: systime: 852896415 nsec, audio time 853854166 nsec, systime delta -957751
+playback: systime: 937903344 nsec, audio time 938854166 nsec, systime delta -950822
+
+2. DMA timestamp, compensation for DMA+analog delay
+$ ./audio_time -p --ts_type=1 -d
+playback: systime: 341053347 nsec, audio time 341062500 nsec, systime delta -9153
+playback: systime: 426072447 nsec, audio time 426062500 nsec, systime delta 9947
+playback: systime: 596899518 nsec, audio time 596895833 nsec, systime delta 3685
+playback: systime: 681915317 nsec, audio time 681916666 nsec, systime delta -1349
+playback: systime: 852741306 nsec, audio time 852750000 nsec, systime delta -8694
+
+3. link timestamp, compensation for DMA+analog delay
+$ ./audio_time -p --ts_type=2 -d
+playback: systime: 341060004 nsec, audio time 341062791 nsec, systime delta -2787
+playback: systime: 426242074 nsec, audio time 426244875 nsec, systime delta -2801
+playback: systime: 597080992 nsec, audio time 597084583 nsec, systime delta -3591
+playback: systime: 682084512 nsec, audio time 682088291 nsec, systime delta -3779
+playback: systime: 852936229 nsec, audio time 852940916 nsec, systime delta -4687
+playback: systime: 938107562 nsec, audio time 938112708 nsec, systime delta -5146
+
+Example 1 shows that the timestamp at the DMA level is close to 1ms
+ahead of the actual playback time (as a side time this sort of
+measurement can help define rewind safeguards). Compensating for the
+DMA-link delay in example 2 helps remove the hardware buffering abut
+the information is still very jittery, with up to one sample of
+error. In example 3 where the timestamps are measured with the link
+wallclock, the timestamps show a monotonic behavior and a lower
+dispersion.
+
+Example 3 and 4 are with USB audio class. Example 3 shows a high
+offset between audio time and system time due to buffering. Example 4
+shows how compensating for the delay exposes a 1ms accuracy (due to
+the use of the frame counter by the driver)
+
+Example 3: DMA timestamp, no compensation for delay, delta of ~5ms
+$ ./audio_time -p -Dhw:1 -t1
+playback: systime: 120174019 nsec, audio time 125000000 nsec, systime delta -4825981
+playback: systime: 245041136 nsec, audio time 250000000 nsec, systime delta -4958864
+playback: systime: 370106088 nsec, audio time 375000000 nsec, systime delta -4893912
+playback: systime: 495040065 nsec, audio time 500000000 nsec, systime delta -4959935
+playback: systime: 620038179 nsec, audio time 625000000 nsec, systime delta -4961821
+playback: systime: 745087741 nsec, audio time 750000000 nsec, systime delta -4912259
+playback: systime: 870037336 nsec, audio time 875000000 nsec, systime delta -4962664
+
+Example 4: DMA timestamp, compensation for delay, delay of ~1ms
+$ ./audio_time -p -Dhw:1 -t1 -d
+playback: systime: 120190520 nsec, audio time 120000000 nsec, systime delta 190520
+playback: systime: 245036740 nsec, audio time 244000000 nsec, systime delta 1036740
+playback: systime: 370034081 nsec, audio time 369000000 nsec, systime delta 1034081
+playback: systime: 495159907 nsec, audio time 494000000 nsec, systime delta 1159907
+playback: systime: 620098824 nsec, audio time 619000000 nsec, systime delta 1098824
+playback: systime: 745031847 nsec, audio time 744000000 nsec, systime delta 1031847
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c
index 94f574b0fdb2..135b3f592b83 100644
--- a/Documentation/spi/spidev_test.c
+++ b/Documentation/spi/spidev_test.c
@@ -80,7 +80,7 @@ static void hex_dump(const void *src, size_t length, size_t line_size, char *pre
* Unescape - process hexadecimal escape character
* converts shell input "\x23" -> 0x23
*/
-int unespcape(char *_dst, char *_src, size_t len)
+static int unescape(char *_dst, char *_src, size_t len)
{
int ret = 0;
char *src = _src;
@@ -304,7 +304,7 @@ int main(int argc, char *argv[])
size = strlen(input_tx+1);
tx = malloc(size);
rx = malloc(size);
- size = unespcape((char *)tx, input_tx, size);
+ size = unescape((char *)tx, input_tx, size);
transfer(fd, tx, rx, size);
free(rx);
free(tx);
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 83ab25660fc9..c831001c45f1 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -77,12 +77,14 @@ show up in /proc/sys/kernel:
- shmmax [ sysv ipc ]
- shmmni
- softlockup_all_cpu_backtrace
+- soft_watchdog
- stop-a [ SPARC only ]
- sysrq ==> Documentation/sysrq.txt
- sysctl_writes_strict
- tainted
- threads-max
- unknown_nmi_panic
+- watchdog
- watchdog_thresh
- version
@@ -417,16 +419,23 @@ successful IPC object allocation.
nmi_watchdog:
-Enables/Disables the NMI watchdog on x86 systems. When the value is
-non-zero the NMI watchdog is enabled and will continuously test all
-online cpus to determine whether or not they are still functioning
-properly. Currently, passing "nmi_watchdog=" parameter at boot time is
-required for this function to work.
+This parameter can be used to control the NMI watchdog
+(i.e. the hard lockup detector) on x86 systems.
-If LAPIC NMI watchdog method is in use (nmi_watchdog=2 kernel
-parameter), the NMI watchdog shares registers with oprofile. By
-disabling the NMI watchdog, oprofile may have more registers to
-utilize.
+ 0 - disable the hard lockup detector
+ 1 - enable the hard lockup detector
+
+The hard lockup detector monitors each CPU for its ability to respond to
+timer interrupts. The mechanism utilizes CPU performance counter registers
+that are programmed to generate Non-Maskable Interrupts (NMIs) periodically
+while a CPU is busy. Hence, the alternative name 'NMI watchdog'.
+
+The NMI watchdog is disabled by default if the kernel is running as a guest
+in a KVM virtual machine. This default can be overridden by adding
+
+ nmi_watchdog=1
+
+to the guest kernel command line (see Documentation/kernel-parameters.txt).
==============================================================
@@ -816,6 +825,22 @@ NMI.
==============================================================
+soft_watchdog
+
+This parameter can be used to control the soft lockup detector.
+
+ 0 - disable the soft lockup detector
+ 1 - enable the soft lockup detector
+
+The soft lockup detector monitors CPUs for threads that are hogging the CPUs
+without rescheduling voluntarily, and thus prevent the 'watchdog/N' threads
+from running. The mechanism depends on the CPUs ability to respond to timer
+interrupts which are needed for the 'watchdog/N' threads to be woken up by
+the watchdog timer function, otherwise the NMI watchdog - if enabled - can
+detect a hard lockup condition.
+
+==============================================================
+
tainted:
Non-zero if the kernel has been tainted. Numeric values, which
@@ -847,6 +872,27 @@ can be ORed together:
==============================================================
+threads-max
+
+This value controls the maximum number of threads that can be created
+using fork().
+
+During initialization the kernel sets this value such that even if the
+maximum number of threads is created, the thread structures occupy only
+a part (1/8th) of the available RAM pages.
+
+The minimum value that can be written to threads-max is 20.
+The maximum value that can be written to threads-max is given by the
+constant FUTEX_TID_MASK (0x3fffffff).
+If a value outside of this range is written to threads-max an error
+EINVAL occurs.
+
+The value written is checked against the available RAM pages. If the
+thread structures would occupy too much (more than 1/8th) of the
+available RAM pages threads-max is reduced accordingly.
+
+==============================================================
+
unknown_nmi_panic:
The value in this file affects behavior of handling NMI. When the
@@ -858,6 +904,25 @@ example. If a system hangs up, try pressing the NMI switch.
==============================================================
+watchdog:
+
+This parameter can be used to disable or enable the soft lockup detector
+_and_ the NMI watchdog (i.e. the hard lockup detector) at the same time.
+
+ 0 - disable both lockup detectors
+ 1 - enable both lockup detectors
+
+The soft lockup detector and the NMI watchdog can also be disabled or
+enabled individually, using the soft_watchdog and nmi_watchdog parameters.
+If the watchdog parameter is read, for example by executing
+
+ cat /proc/sys/kernel/watchdog
+
+the output of this command (0 or 1) shows the logical OR of soft_watchdog
+and nmi_watchdog.
+
+==============================================================
+
watchdog_thresh:
This value can be used to control the frequency of hrtimer and NMI
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 902b4574acfb..9832ec52f859 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -21,6 +21,7 @@ Currently, these files are in /proc/sys/vm:
- admin_reserve_kbytes
- block_dump
- compact_memory
+- compact_unevictable_allowed
- dirty_background_bytes
- dirty_background_ratio
- dirty_bytes
@@ -106,6 +107,16 @@ huge pages although processes will also directly compact memory as required.
==============================================================
+compact_unevictable_allowed
+
+Available only when CONFIG_COMPACTION is set. When set to 1, compaction is
+allowed to examine the unevictable lru (mlocked pages) for pages to compact.
+This should be used on systems where stalls for minor page faults are an
+acceptable trade for large contiguous free memory. Set to 0 to prevent
+compaction from moving pages that are unevictable. Default value is 1.
+
+==============================================================
+
dirty_background_bytes
Contains the amount of dirty memory at which the background kernel
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 2b47704f75cb..2ba71cea0172 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -237,8 +237,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
- buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
- buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
+ buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
@@ -309,8 +308,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
- buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
- buf += " &tpg->se_tpg, (void *)tpg,\n"
+ buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
+ buf += " &tpg->se_tpg, tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
@@ -370,7 +369,10 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " NULL,\n"
buf += "};\n\n"
- buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
+ buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
+ buf += " .module = THIS_MODULE,\n"
+ buf += " .name = " + fabric_mod_name + ",\n"
+ buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
@@ -413,75 +415,18 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
- buf += "};\n\n"
-
- buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
- buf += "{\n"
- buf += " struct target_fabric_configfs *fabric;\n"
- buf += " int ret;\n\n"
- buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
- buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
- buf += " utsname()->machine);\n"
- buf += " /*\n"
- buf += " * Register the top level struct config_item_type with TCM core\n"
- buf += " */\n"
- buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
- buf += " if (IS_ERR(fabric)) {\n"
- buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
- buf += " return PTR_ERR(fabric);\n"
- buf += " }\n"
- buf += " /*\n"
- buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
- buf += " */\n"
- buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
- buf += " /*\n"
- buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
- buf += " */\n"
- buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
- buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n"
- buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
- buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n"
- buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
- buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
- buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
- buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
- buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
- buf += " /*\n"
- buf += " * Register the fabric for use within TCM\n"
- buf += " */\n"
- buf += " ret = target_fabric_configfs_register(fabric);\n"
- buf += " if (ret < 0) {\n"
- buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
- buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
- buf += " return ret;\n"
- buf += " }\n"
- buf += " /*\n"
- buf += " * Setup our local pointer to *fabric\n"
- buf += " */\n"
- buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
- buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
- buf += " return 0;\n"
- buf += "};\n\n"
- buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
- buf += "{\n"
- buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
- buf += " return;\n\n"
- buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
- buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
- buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
+ buf += "\n"
+ buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
- buf += " int ret;\n\n"
- buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
- buf += " if (ret < 0)\n"
- buf += " return ret;\n\n"
- buf += " return 0;\n"
+ buf += " return target_register_template(" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
+
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
- buf += " " + fabric_mod_name + "_deregister_configfs();\n"
+ buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt
index 5518465290bf..43e94ea6d2ca 100644
--- a/Documentation/target/tcmu-design.txt
+++ b/Documentation/target/tcmu-design.txt
@@ -138,27 +138,40 @@ signals the kernel via a 4-byte write(). When cmd_head equals
cmd_tail, the ring is empty -- no commands are currently waiting to be
processed by userspace.
-TCMU commands start with a common header containing "len_op", a 32-bit
-value that stores the length, as well as the opcode in the lowest
-unused bits. Currently only two opcodes are defined, TCMU_OP_PAD and
-TCMU_OP_CMD. When userspace encounters a command with PAD opcode, it
-should skip ahead by the bytes in "length". (The kernel inserts PAD
-entries to ensure each CMD entry fits contigously into the circular
-buffer.)
-
-When userspace handles a CMD, it finds the SCSI CDB (Command Data
-Block) via tcmu_cmd_entry.req.cdb_off. This is an offset from the
-start of the overall shared memory region, not the entry. The data
-in/out buffers are accessible via tht req.iov[] array. Note that
-each iov.iov_base is also an offset from the start of the region.
-
-TCMU currently does not support BIDI operations.
+TCMU commands are 8-byte aligned. They start with a common header
+containing "len_op", a 32-bit value that stores the length, as well as
+the opcode in the lowest unused bits. It also contains cmd_id and
+flags fields for setting by the kernel (kflags) and userspace
+(uflags).
+
+Currently only two opcodes are defined, TCMU_OP_CMD and TCMU_OP_PAD.
+
+When the opcode is CMD, the entry in the command ring is a struct
+tcmu_cmd_entry. Userspace finds the SCSI CDB (Command Data Block) via
+tcmu_cmd_entry.req.cdb_off. This is an offset from the start of the
+overall shared memory region, not the entry. The data in/out buffers
+are accessible via tht req.iov[] array. iov_cnt contains the number of
+entries in iov[] needed to describe either the Data-In or Data-Out
+buffers. For bidirectional commands, iov_cnt specifies how many iovec
+entries cover the Data-Out area, and iov_bidi_count specifies how many
+iovec entries immediately after that in iov[] cover the Data-In
+area. Just like other fields, iov.iov_base is an offset from the start
+of the region.
When completing a command, userspace sets rsp.scsi_status, and
rsp.sense_buffer if necessary. Userspace then increments
mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the
kernel via the UIO method, a 4-byte write to the file descriptor.
+When the opcode is PAD, userspace only updates cmd_tail as above --
+it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry
+is contiguous within the command ring.)
+
+More opcodes may be added in the future. If userspace encounters an
+opcode it does not handle, it must set UNKNOWN_OP bit (bit 0) in
+hdr.uflags, update cmd_tail, and proceed with processing additional
+commands, if any.
+
The Data Area:
This is shared-memory space after the command ring. The organization
diff --git a/Documentation/timers/NO_HZ.txt b/Documentation/timers/NO_HZ.txt
index cca122f25120..6eaf576294f3 100644
--- a/Documentation/timers/NO_HZ.txt
+++ b/Documentation/timers/NO_HZ.txt
@@ -158,13 +158,9 @@ not come for free:
to the need to inform kernel subsystems (such as RCU) about
the change in mode.
-3. POSIX CPU timers on adaptive-tick CPUs may miss their deadlines
- (perhaps indefinitely) because they currently rely on
- scheduling-tick interrupts. This will likely be fixed in
- one of two ways: (1) Prevent CPUs with POSIX CPU timers from
- entering adaptive-tick mode, or (2) Use hrtimers or other
- adaptive-ticks-immune mechanism to cause the POSIX CPU timer to
- fire properly.
+3. POSIX CPU timers prevent CPUs from entering adaptive-tick mode.
+ Real-time applications needing to take actions based on CPU time
+ consumption need to use other means of doing so.
4. If there are more perf events pending than the hardware can
accommodate, they are normally round-robined so as to collect
diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt
index 02361552a3ea..77d14d51a670 100644
--- a/Documentation/trace/coresight.txt
+++ b/Documentation/trace/coresight.txt
@@ -14,7 +14,7 @@ document is concerned with the latter.
HW assisted tracing is becoming increasingly useful when dealing with systems
that have many SoCs and other components like GPU and DMA engines. ARM has
developed a HW assisted tracing solution by means of different components, each
-being added to a design at systhesis time to cater to specific tracing needs.
+being added to a design at synthesis time to cater to specific tracing needs.
Compoments are generally categorised as source, link and sinks and are
(usually) discovered using the AMBA bus.
diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt
index 0f84ce8c9a7b..5517db602f37 100644
--- a/Documentation/video4linux/v4l2-controls.txt
+++ b/Documentation/video4linux/v4l2-controls.txt
@@ -344,7 +344,9 @@ implement g_volatile_ctrl like this:
}
Note that you use the 'new value' union as well in g_volatile_ctrl. In general
-controls that need to implement g_volatile_ctrl are read-only controls.
+controls that need to implement g_volatile_ctrl are read-only controls. If they
+are not, a V4L2_EVENT_CTRL_CH_VALUE will not be generated when the control
+changes.
To mark a control as volatile you have to set V4L2_CTRL_FLAG_VOLATILE:
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index f586e29ce221..59e619f9bbf5 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -793,8 +793,8 @@ video_register_device_no_warn() instead.
Whenever a device node is created some attributes are also created for you.
If you look in /sys/class/video4linux you see the devices. Go into e.g.
-video0 and you will see 'name', 'debug' and 'index' attributes. The 'name'
-attribute is the 'name' field of the video_device struct. The 'debug' attribute
+video0 and you will see 'name', 'dev_debug' and 'index' attributes. The 'name'
+attribute is the 'name' field of the video_device struct. The 'dev_debug' attribute
can be used to enable core debugging. See the next section for more detailed
information on this.
@@ -821,7 +821,7 @@ unregister the device if the registration failed.
video device debugging
----------------------
-The 'debug' attribute that is created for each video, vbi, radio or swradio
+The 'dev_debug' attribute that is created for each video, vbi, radio or swradio
device in /sys/class/video4linux/<devX>/ allows you to enable logging of
file operations.
diff --git a/Documentation/video4linux/vivid.txt b/Documentation/video4linux/vivid.txt
index 6cfc8541a362..cd4b5a1ac529 100644
--- a/Documentation/video4linux/vivid.txt
+++ b/Documentation/video4linux/vivid.txt
@@ -912,6 +912,11 @@ looped to the video input provided that:
sequence and field counting in struct v4l2_buffer on the capture side may not
be 100% accurate.
+- field settings V4L2_FIELD_SEQ_TB/BT are not supported. While it is possible to
+ implement this, it would mean a lot of work to get this right. Since these
+ field values are rarely used the decision was made not to implement this for
+ now.
+
- on the input side the "Standard Signal Mode" for the S-Video input or the
"DV Timings Signal Mode" for the HDMI input should be configured so that a
valid signal is passed to the video input.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index bc9f6fe44e27..9fa2bf8c3f6f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3573,3 +3573,20 @@ struct {
@ar - access register number
KVM handlers should exit to userspace with rc = -EREMOTE.
+
+
+8. Other capabilities.
+----------------------
+
+This section lists capabilities that give information about other
+features of the KVM implementation.
+
+8.1 KVM_CAP_PPC_HWRNG
+
+Architectures: ppc
+
+This capability, if KVM_CHECK_EXTENSION indicates that it is
+available, means that that the kernel has an implementation of the
+H_RANDOM hypercall backed by a hardware random-number generator.
+If present, the kernel H_RANDOM handler can be enabled for guest use
+with the KVM_CAP_PPC_ENABLE_HCALL capability.
diff --git a/Documentation/vm/cleancache.txt b/Documentation/vm/cleancache.txt
index 01d76282444e..e4b49df7a048 100644
--- a/Documentation/vm/cleancache.txt
+++ b/Documentation/vm/cleancache.txt
@@ -28,9 +28,7 @@ IMPLEMENTATION OVERVIEW
A cleancache "backend" that provides transcendent memory registers itself
to the kernel's cleancache "frontend" by calling cleancache_register_ops,
passing a pointer to a cleancache_ops structure with funcs set appropriately.
-Note that cleancache_register_ops returns the previous settings so that
-chaining can be performed if desired. The functions provided must conform to
-certain semantics as follows:
+The functions provided must conform to certain semantics as follows:
Most important, cleancache is "ephemeral". Pages which are copied into
cleancache have an indefinite lifetime which is completely unknowable
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index f2d3a100fe38..030977fb8d2d 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -267,21 +267,34 @@ call, then it is required that system administrator mount a file system of
type hugetlbfs:
mount -t hugetlbfs \
- -o uid=<value>,gid=<value>,mode=<value>,size=<value>,nr_inodes=<value> \
- none /mnt/huge
+ -o uid=<value>,gid=<value>,mode=<value>,pagesize=<value>,size=<value>,\
+ min_size=<value>,nr_inodes=<value> none /mnt/huge
This command mounts a (pseudo) filesystem of type hugetlbfs on the directory
/mnt/huge. Any files created on /mnt/huge uses huge pages. The uid and gid
options sets the owner and group of the root of the file system. By default
the uid and gid of the current process are taken. The mode option sets the
mode of root of file system to value & 01777. This value is given in octal.
-By default the value 0755 is picked. The size option sets the maximum value of
-memory (huge pages) allowed for that filesystem (/mnt/huge). The size is
-rounded down to HPAGE_SIZE. The option nr_inodes sets the maximum number of
-inodes that /mnt/huge can use. If the size or nr_inodes option is not
-provided on command line then no limits are set. For size and nr_inodes
-options, you can use [G|g]/[M|m]/[K|k] to represent giga/mega/kilo. For
-example, size=2K has the same meaning as size=2048.
+By default the value 0755 is picked. If the paltform supports multiple huge
+page sizes, the pagesize option can be used to specify the huge page size and
+associated pool. pagesize is specified in bytes. If pagesize is not specified
+the paltform's default huge page size and associated pool will be used. The
+size option sets the maximum value of memory (huge pages) allowed for that
+filesystem (/mnt/huge). The size option can be specified in bytes, or as a
+percentage of the specified huge page pool (nr_hugepages). The size is
+rounded down to HPAGE_SIZE boundary. The min_size option sets the minimum
+value of memory (huge pages) allowed for the filesystem. min_size can be
+specified in the same way as size, either bytes or a percentage of the
+huge page pool. At mount time, the number of huge pages specified by
+min_size are reserved for use by the filesystem. If there are not enough
+free huge pages available, the mount will fail. As huge pages are allocated
+to the filesystem and freed, the reserve count is adjusted so that the sum
+of allocated and reserved huge pages is always at least min_size. The option
+nr_inodes sets the maximum number of inodes that /mnt/huge can use. If the
+size, min_size or nr_inodes option is not provided on command line then
+no limits are set. For pagesize, size, min_size and nr_inodes options, you
+can use [G|g]/[M|m]/[K|k] to represent giga/mega/kilo. For example, size=2K
+has the same meaning as size=2048.
While read system calls are supported on files that reside on hugetlb
file systems, write system calls are not.
@@ -289,15 +302,23 @@ file systems, write system calls are not.
Regular chown, chgrp, and chmod commands (with right permissions) could be
used to change the file attributes on hugetlbfs.
-Also, it is important to note that no such mount command is required if the
+Also, it is important to note that no such mount command is required if
applications are going to use only shmat/shmget system calls or mmap with
-MAP_HUGETLB. Users who wish to use hugetlb page via shared memory segment
-should be a member of a supplementary group and system admin needs to
-configure that gid into /proc/sys/vm/hugetlb_shm_group. It is possible for
-same or different applications to use any combination of mmaps and shm*
-calls, though the mount of filesystem will be required for using mmap calls
-without MAP_HUGETLB. For an example of how to use mmap with MAP_HUGETLB see
-map_hugetlb.c.
+MAP_HUGETLB. For an example of how to use mmap with MAP_HUGETLB see map_hugetlb
+below.
+
+Users who wish to use hugetlb memory via shared memory segment should be a
+member of a supplementary group and system admin needs to configure that gid
+into /proc/sys/vm/hugetlb_shm_group. It is possible for same or different
+applications to use any combination of mmaps and shm* calls, though the mount of
+filesystem will be required for using mmap calls without MAP_HUGETLB.
+
+Syscalls that operate on memory backed by hugetlb pages only have their lengths
+aligned to the native page size of the processor; they will normally fail with
+errno set to EINVAL or exclude hugetlb pages that extend beyond the length if
+not hugepage aligned. For example, munmap(2) will fail if memory is backed by
+a hugetlb page and the length is smaller than the hugepage size.
+
Examples
========
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt
index 6fbd55ef6b45..6bfbc172cdb9 100644
--- a/Documentation/vm/pagemap.txt
+++ b/Documentation/vm/pagemap.txt
@@ -131,7 +131,8 @@ Short descriptions to the page flags:
13. SWAPCACHE page is mapped to swap space, ie. has an associated swap entry
14. SWAPBACKED page is backed by swap/RAM
-The page-types tool in this directory can be used to query the above flags.
+The page-types tool in the tools/vm directory can be used to query the
+above flags.
Using pagemap to do something useful:
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index 6b31cfbe2a9a..8143b9e8373d 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -159,6 +159,17 @@ for each pass:
/sys/kernel/mm/transparent_hugepage/khugepaged/full_scans
+max_ptes_none specifies how many extra small pages (that are
+not already mapped) can be allocated when collapsing a group
+of small pages into one large page.
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none
+
+A higher value leads to use additional memory for programs.
+A lower value leads to gain less thp performance. Value of
+max_ptes_none can waste cpu time very little, you can
+ignore it.
+
== Boot parameter ==
You can change the sysfs boot time defaults of Transparent Hugepage
diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt
index 744f82f86c58..3be0bfc4738d 100644
--- a/Documentation/vm/unevictable-lru.txt
+++ b/Documentation/vm/unevictable-lru.txt
@@ -22,6 +22,7 @@ CONTENTS
- Filtering special vmas.
- munlock()/munlockall() system call handling.
- Migrating mlocked pages.
+ - Compacting mlocked pages.
- mmap(MAP_LOCKED) system call handling.
- munmap()/exit()/exec() system call handling.
- try_to_unmap().
@@ -317,7 +318,7 @@ If the VMA passes some filtering as described in "Filtering Special Vmas"
below, mlock_fixup() will attempt to merge the VMA with its neighbors or split
off a subset of the VMA if the range does not cover the entire VMA. Once the
VMA has been merged or split or neither, mlock_fixup() will call
-__mlock_vma_pages_range() to fault in the pages via get_user_pages() and to
+populate_vma_page_range() to fault in the pages via get_user_pages() and to
mark the pages as mlocked via mlock_vma_page().
Note that the VMA being mlocked might be mapped with PROT_NONE. In this case,
@@ -327,7 +328,7 @@ fault path or in vmscan.
Also note that a page returned by get_user_pages() could be truncated or
migrated out from under us, while we're trying to mlock it. To detect this,
-__mlock_vma_pages_range() checks page_mapping() after acquiring the page lock.
+populate_vma_page_range() checks page_mapping() after acquiring the page lock.
If the page is still associated with its mapping, we'll go ahead and call
mlock_vma_page(). If the mapping is gone, we just unlock the page and move on.
In the worst case, this will result in a page mapped in a VM_LOCKED VMA
@@ -392,7 +393,7 @@ ignored for munlock.
If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the
specified range. The range is then munlocked via the function
-__mlock_vma_pages_range() - the same function used to mlock a VMA range -
+populate_vma_page_range() - the same function used to mlock a VMA range -
passing a flag to indicate that munlock() is being performed.
Because the VMA access protections could have been changed to PROT_NONE after
@@ -402,7 +403,7 @@ get_user_pages() was enhanced to accept a flag to ignore the permissions when
fetching the pages - all of which should be resident as a result of previous
mlocking.
-For munlock(), __mlock_vma_pages_range() unlocks individual pages by calling
+For munlock(), populate_vma_page_range() unlocks individual pages by calling
munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked
flag using TestClearPageMlocked(). As with mlock_vma_page(),
munlock_vma_page() use the Test*PageMlocked() function to handle the case where
@@ -450,6 +451,17 @@ list because of a race between munlock and migration, page migration uses the
putback_lru_page() function to add migrated pages back to the LRU.
+COMPACTING MLOCKED PAGES
+------------------------
+
+The unevictable LRU can be scanned for compactable regions and the default
+behavior is to do so. /proc/sys/vm/compact_unevictable_allowed controls
+this behavior (see Documentation/sysctl/vm.txt). Once scanning of the
+unevictable LRU is enabled, the work of compaction is mostly handled by
+the page migration code and the same work flow as described in MIGRATING
+MLOCKED PAGES will apply.
+
+
mmap(MAP_LOCKED) SYSTEM CALL HANDLING
-------------------------------------
@@ -463,21 +475,11 @@ populate the page table.
To mlock a range of memory under the unevictable/mlock infrastructure, the
mmap() handler and task address space expansion functions call
-mlock_vma_pages_range() specifying the vma and the address range to mlock.
-mlock_vma_pages_range() filters VMAs like mlock_fixup(), as described above in
-"Filtering Special VMAs". It will clear the VM_LOCKED flag, which will have
-already been set by the caller, in filtered VMAs. Thus these VMA's need not be
-visited for munlock when the region is unmapped.
-
-For "normal" VMAs, mlock_vma_pages_range() calls __mlock_vma_pages_range() to
-fault/allocate the pages and mlock them. Again, like mlock_fixup(),
-mlock_vma_pages_range() downgrades the mmap semaphore to read mode before
-attempting to fault/allocate and mlock the pages and "upgrades" the semaphore
-back to write mode before returning.
-
-The callers of mlock_vma_pages_range() will have already added the memory range
+populate_vma_page_range() specifying the vma and the address range to mlock.
+
+The callers of populate_vma_page_range() will have already added the memory range
to be mlocked to the task's "locked_vm". To account for filtered VMAs,
-mlock_vma_pages_range() returns the number of pages NOT mlocked. All of the
+populate_vma_page_range() returns the number of pages NOT mlocked. All of the
callers then subtract a non-negative return value from the task's locked_vm. A
negative return value represent an error - for example, from get_user_pages()
attempting to fault in a VMA with PROT_NONE access. In this case, we leave the
diff --git a/Documentation/vm/zsmalloc.txt b/Documentation/vm/zsmalloc.txt
new file mode 100644
index 000000000000..64ed63c4f69d
--- /dev/null
+++ b/Documentation/vm/zsmalloc.txt
@@ -0,0 +1,70 @@
+zsmalloc
+--------
+
+This allocator is designed for use with zram. Thus, the allocator is
+supposed to work well under low memory conditions. In particular, it
+never attempts higher order page allocation which is very likely to
+fail under memory pressure. On the other hand, if we just use single
+(0-order) pages, it would suffer from very high fragmentation --
+any object of size PAGE_SIZE/2 or larger would occupy an entire page.
+This was one of the major issues with its predecessor (xvmalloc).
+
+To overcome these issues, zsmalloc allocates a bunch of 0-order pages
+and links them together using various 'struct page' fields. These linked
+pages act as a single higher-order page i.e. an object can span 0-order
+page boundaries. The code refers to these linked pages as a single entity
+called zspage.
+
+For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
+since this satisfies the requirements of all its current users (in the
+worst case, page is incompressible and is thus stored "as-is" i.e. in
+uncompressed form). For allocation requests larger than this size, failure
+is returned (see zs_malloc).
+
+Additionally, zs_malloc() does not return a dereferenceable pointer.
+Instead, it returns an opaque handle (unsigned long) which encodes actual
+location of the allocated object. The reason for this indirection is that
+zsmalloc does not keep zspages permanently mapped since that would cause
+issues on 32-bit systems where the VA region for kernel space mappings
+is very small. So, before using the allocating memory, the object has to
+be mapped using zs_map_object() to get a usable pointer and subsequently
+unmapped using zs_unmap_object().
+
+stat
+----
+
+With CONFIG_ZSMALLOC_STAT, we could see zsmalloc internal information via
+/sys/kernel/debug/zsmalloc/<user name>. Here is a sample of stat output:
+
+# cat /sys/kernel/debug/zsmalloc/zram0/classes
+
+ class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage
+ ..
+ ..
+ 9 176 0 1 186 129 8 4
+ 10 192 1 0 2880 2872 135 3
+ 11 208 0 1 819 795 42 2
+ 12 224 0 1 219 159 12 4
+ ..
+ ..
+
+
+class: index
+size: object size zspage stores
+almost_empty: the number of ZS_ALMOST_EMPTY zspages(see below)
+almost_full: the number of ZS_ALMOST_FULL zspages(see below)
+obj_allocated: the number of objects allocated
+obj_used: the number of objects allocated to the user
+pages_used: the number of pages allocated for the class
+pages_per_zspage: the number of 0-order pages to make a zspage
+
+We assign a zspage to ZS_ALMOST_EMPTY fullness group when:
+ n <= N / f, where
+n = number of allocated objects
+N = total number of objects zspage can store
+f = fullness_threshold_frac(ie, 4 at the moment)
+
+Similarly, we assign zspage to:
+ ZS_ALMOST_FULL when n > N / f
+ ZS_EMPTY when n == 0
+ ZS_FULL when n == N
diff --git a/Documentation/zh_CN/arm64/booting.txt b/Documentation/zh_CN/arm64/booting.txt
index 6f6d956ac1c9..7cd36af11e71 100644
--- a/Documentation/zh_CN/arm64/booting.txt
+++ b/Documentation/zh_CN/arm64/booting.txt
@@ -15,6 +15,8 @@ Documentation/arm64/booting.txt 的中文翻译
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
译存在问题,请è”系中文版维护者。
+本文翻译æ交时的 Git 检出点为: bc465aa9d045feb0e13b4a8f32cc33c1943f62d6
+
英文版维护者: Will Deacon <will.deacon@arm.com>
中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
@@ -88,22 +90,44 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
u32 code0; /* å¯æ‰§è¡Œä»£ç  */
u32 code1; /* å¯æ‰§è¡Œä»£ç  */
- u64 text_offset; /* 映åƒè£…è½½å移 */
- u64 res0 = 0; /* ä¿ç•™ */
- u64 res1 = 0; /* ä¿ç•™ */
+ u64 text_offset; /* 映åƒè£…è½½å移,å°ç«¯æ¨¡å¼ */
+ u64 image_size; /* 映åƒå®žé™…大å°, å°ç«¯æ¨¡å¼ */
+ u64 flags; /* 内核旗标, å°ç«¯æ¨¡å¼ *
u64 res2 = 0; /* ä¿ç•™ */
u64 res3 = 0; /* ä¿ç•™ */
u64 res4 = 0; /* ä¿ç•™ */
u32 magic = 0x644d5241; /* 魔数, å°ç«¯, "ARM\x64" */
- u32 res5 = 0; /* ä¿ç•™ */
+ u32 res5; /* ä¿ç•™ (用于 PE COFF å移) */
映åƒå¤´æ³¨é‡Šï¼š
+- 自 v3.17 起,除éžå¦æœ‰è¯´æ˜Žï¼Œæ‰€æœ‰åŸŸéƒ½æ˜¯å°ç«¯æ¨¡å¼ã€‚
+
- code0/code1 负责跳转到 stext.
-映åƒå¿…é¡»ä½äºŽç³»ç»Ÿ RAM 起始处的特定å移(当å‰æ˜¯ 0x80000)。系统 RAM
-的起始地å€å¿…须是以 2MB 对é½çš„。
+- 当通过 EFI å¯åŠ¨æ—¶ï¼Œ æœ€åˆ code0/code1 被跳过。
+ res5 是到 PE 文件头的å移,而 PE 文件头å«æœ‰ EFI çš„å¯åŠ¨å…¥å£ç‚¹ (efi_stub_entry)。
+ 当 stub 代ç å®Œæˆäº†å®ƒçš„使命,它会跳转到 code0 继续正常的å¯åŠ¨æµç¨‹ã€‚
+
+- v3.17 之å‰ï¼Œæœªæ˜Žç¡®æŒ‡å®š text_offset 的字节åºã€‚此时,image_size 为零,
+ 且 text_offset ä¾ç…§å†…核字节åºä¸º 0x80000。
+ 当 image_size éžé›¶ï¼Œtext_offset 为å°ç«¯æ¨¡å¼ä¸”是有效值,应被引导加载程åºä½¿ç”¨ã€‚
+ 当 image_size 为零,text_offset å¯å‡å®šä¸º 0x80000。
+
+- flags 域 (v3.17 引入) 为 64 ä½å°ç«¯æ¨¡å¼ï¼Œå…¶ç¼–ç å¦‚下:
+ ä½ 0: 内核字节åºã€‚ 1 表示大端模å¼ï¼Œ0 表示å°ç«¯æ¨¡å¼ã€‚
+ ä½ 1-63: ä¿ç•™ã€‚
+
+- 当 image_size 为零时,引导装载程åºåº”该试图在内核映åƒæœ«å°¾ä¹‹åŽå°½å¯èƒ½å¤šåœ°ä¿ç•™ç©ºé—²å†…å­˜
+ 供内核直接使用。对内存空间的需求é‡å› æ‰€é€‰å®šçš„内核特性而异, 且无实际é™åˆ¶ã€‚
+
+内核映åƒå¿…须被放置在é è¿‘å¯ç”¨ç³»ç»Ÿå†…存起始的 2MB 对é½ä¸ºåŸºå€çš„ text_offset 字节处,并从那里被调用。
+当å‰ï¼Œå¯¹ Linux æ¥è¯´åœ¨æ­¤åŸºå€ä»¥ä¸‹çš„内存是无法使用的,因此强烈建议将系统内存的起始作为这个基å€ã€‚
+从映åƒèµ·å§‹åœ°å€ç®—起,最少必须为内核释放出 image_size 字节的空间。
+
+任何æ供给内核的内存(甚至在 2MB 对é½çš„基地å€ä¹‹å‰ï¼‰ï¼Œè‹¥æœªä»Žå†…核中标记为ä¿ç•™
+(如在设备树(dtb)的 memreserve 区域),都将被认为对内核是å¯ç”¨ã€‚
在跳转入内核å‰ï¼Œå¿…须符åˆä»¥ä¸‹çŠ¶æ€ï¼š
@@ -124,8 +148,12 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
- 高速缓存ã€MMU
MMU 必须关闭。
指令缓存开å¯æˆ–关闭都å¯ä»¥ã€‚
- æ•°æ®ç¼“存必须关闭且无效。
- 外部高速缓存(如果存在)必须é…置并ç¦ç”¨ã€‚
+ 已载入的内核映åƒçš„相应内存区必须被清ç†ï¼Œä»¥è¾¾åˆ°ç¼“存一致性点(PoC)。
+ 当存在系统缓存或其他使能缓存的一致性主控器时,通常需使用虚拟地å€ç»´æŠ¤å…¶ç¼“å­˜ï¼Œè€Œéž set/way æ“作。
+ éµä»Žé€šè¿‡è™šæ‹Ÿåœ°å€æ“作维护构架缓存的系统缓存必须被é…置,并å¯ä»¥è¢«ä½¿èƒ½ã€‚
+ 而ä¸é€šè¿‡è™šæ‹Ÿåœ°å€æ“作维护构架缓存的系统缓存(ä¸æŽ¨è),必须被é…置且ç¦ç”¨ã€‚
+
+ *译者注:对于 PoC 以åŠç¼“存相关内容,请å‚考 ARMv8 构架å‚考手册 ARM DDI 0487A
- 架构计时器
CNTFRQ 必须设定为计时器的频率,且 CNTVOFF 必须设定为对所有 CPU
@@ -141,6 +169,14 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
在进入内核映åƒçš„异常级中,所有构架中å¯å†™çš„系统寄存器必须通过软件
在一个更高的异常级别下åˆå§‹åŒ–,以防止在 未知 状æ€ä¸‹è¿è¡Œã€‚
+ 对于拥有 GICv3 中断控制器的系统:
+ - 若当å‰åœ¨ EL3 :
+ ICC_SRE_EL3.Enable (ä½ 3) å¿…é¡»åˆå§‹åŒ–为 0b1。
+ ICC_SRE_EL3.SRE (ä½ 0) å¿…é¡»åˆå§‹åŒ–为 0b1。
+ - 若内核è¿è¡Œåœ¨ EL1:
+ ICC_SRE_EL2.Enable (ä½ 3) å¿…é¡»åˆå§‹åŒ–为 0b1。
+ ICC_SRE_EL2.SRE (ä½ 0) å¿…é¡»åˆå§‹åŒ–为 0b1。
+
以上对于 CPU 模å¼ã€é«˜é€Ÿç¼“å­˜ã€MMUã€æž¶æž„计时器ã€ä¸€è‡´æ€§ã€ç³»ç»Ÿå¯„存器的
å¿…è¦æ¡ä»¶æ述适用于所有 CPU。所有 CPU 必须在åŒä¸€å¼‚常级别跳入内核。
@@ -170,7 +206,7 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
ARM DEN 0022A:用于 ARM 上的电æºçŠ¶æ€å调接å£ç³»ç»Ÿè½¯ä»¶ï¼‰ä¸­æè¿°çš„
CPU_ON 调用æ¥å°† CPU 带入内核。
- *译者注:到文档翻译时,此文档已更新为 ARM DEN 0022B。
+ *译者注: ARM DEN 0022A 已更新到 ARM DEN 0022C。
设备树必须包å«ä¸€ä¸ª ‘psci’ 节点,请å‚考以下文档:
Documentation/devicetree/bindings/arm/psci.txt
diff --git a/Documentation/zh_CN/arm64/legacy_instructions.txt b/Documentation/zh_CN/arm64/legacy_instructions.txt
new file mode 100644
index 000000000000..68362a1ab717
--- /dev/null
+++ b/Documentation/zh_CN/arm64/legacy_instructions.txt
@@ -0,0 +1,72 @@
+Chinese translated version of Documentation/arm64/legacy_instructions.txt
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Punit Agrawal <punit.agrawal@arm.com>
+ Suzuki K. Poulose <suzuki.poulose@arm.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
+---------------------------------------------------------------------
+Documentation/arm64/legacy_instructions.txt 的中文翻译
+
+如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
+交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
+译存在问题,请è”系中文版维护者。
+
+本文翻译æ交时的 Git 检出点为: bc465aa9d045feb0e13b4a8f32cc33c1943f62d6
+
+英文版维护者: Punit Agrawal <punit.agrawal@arm.com>
+ Suzuki K. Poulose <suzuki.poulose@arm.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
+
+以下为正文
+---------------------------------------------------------------------
+Linux 内核在 arm64 上的移æ¤æ供了一个基础框架,以支æŒæž„架中正在被淘汰或已废弃指令的模拟执行。
+这个基础框架的代ç ä½¿ç”¨æœªå®šä¹‰æŒ‡ä»¤é’©å­ï¼ˆhooks)æ¥æ”¯æŒæ¨¡æ‹Ÿã€‚如果指令存在,它也å…许在硬件中å¯ç”¨è¯¥æŒ‡ä»¤ã€‚
+
+模拟模å¼å¯é€šè¿‡å†™ sysctl 节点(/proc/sys/abi)æ¥æŽ§åˆ¶ã€‚
+ä¸åŒçš„执行方å¼åŠ sysctl 节点的相应值,解释如下:
+
+* Undef(未定义)
+ 值: 0
+ 产生未定义指令终止异常。它是那些构架中已废弃的指令,如 SWP,的默认处ç†æ–¹å¼ã€‚
+
+* Emulate(模拟)
+ 值: 1
+ 使用软件模拟方å¼ã€‚为解决软件è¿ç§»é—®é¢˜ï¼Œè¿™ç§æ¨¡æ‹ŸæŒ‡ä»¤æ¨¡å¼çš„使用是被跟踪的,并会å‘出速率é™åˆ¶è­¦å‘Šã€‚
+ 它是那些构架中正在被淘汰的指令,如 CP15 barriers(隔离指令),的默认处ç†æ–¹å¼ã€‚
+
+* Hardware Execution(硬件执行)
+ 值: 2
+ 虽然标记为正在被淘汰,但一些实现å¯èƒ½æ供硬件执行这些指令的使能/ç¦ç”¨æ“作。
+ 使用硬件执行一般会有更好的性能,但将无法收集è¿è¡Œæ—¶å¯¹æ­£è¢«æ·˜æ±°æŒ‡ä»¤çš„使用统计数æ®ã€‚
+
+默认执行模å¼ä¾èµ–于指令在构架中状æ€ã€‚正在被淘汰的指令应该以模拟(Emulate)作为默认模å¼ï¼Œ
+而已废弃的指令必须默认使用未定义(Undef)模å¼
+
+注æ„:指令模拟å¯èƒ½æ— æ³•åº”对所有情况。更多详情请å‚考å•ç‹¬çš„指令注释。
+
+å—支æŒçš„é—留指令
+-------------
+* SWP{B}
+节点: /proc/sys/abi/swp
+状æ€: 已废弃
+默认执行方å¼: Undef (0)
+
+* CP15 Barriers
+节点: /proc/sys/abi/cp15_barrier
+状æ€: 正被淘汰,ä¸æŽ¨è使用
+默认执行方å¼: Emulate (1)
+
+* SETEND
+节点: /proc/sys/abi/setend
+状æ€: 正被淘汰,ä¸æŽ¨è使用
+默认执行方å¼: Emulate (1)*
+注:为了使能这个特性,系统中的所有 CPU 必须在 EL0 支æŒæ··åˆå­—节åºã€‚
+如果一个新的 CPU (ä¸æ”¯æŒæ··åˆå­—节åºï¼‰ 在使能这个特性åŽè¢«çƒ­æ’入系统,
+在应用中å¯èƒ½ä¼šå‡ºçŽ°ä¸å¯é¢„期的结果。
diff --git a/Documentation/zh_CN/arm64/memory.txt b/Documentation/zh_CN/arm64/memory.txt
index a782704c1cb5..19b3a52d5d94 100644
--- a/Documentation/zh_CN/arm64/memory.txt
+++ b/Documentation/zh_CN/arm64/memory.txt
@@ -15,6 +15,8 @@ Documentation/arm64/memory.txt 的中文翻译
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
译存在问题,请è”系中文版维护者。
+本文翻译æ交时的 Git 检出点为: bc465aa9d045feb0e13b4a8f32cc33c1943f62d6
+
英文版维护者: Catalin Marinas <catalin.marinas@arm.com>
中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
@@ -26,69 +28,53 @@ Documentation/arm64/memory.txt 的中文翻译
===========================
作者: Catalin Marinas <catalin.marinas@arm.com>
-日期: 2012 年 02 月 20 日
本文档æè¿° AArch64 Linux 内核所使用的虚拟内存布局。此构架å¯ä»¥å®žçŽ°
页大å°ä¸º 4KB çš„ 4 级转æ¢è¡¨å’Œé¡µå¤§å°ä¸º 64KB çš„ 3 级转æ¢è¡¨ã€‚
-AArch64 Linux 使用页大å°ä¸º 4KB çš„ 3 级转æ¢è¡¨é…置,对于用户和内核
-都有 39-bit (512GB) 的虚拟地å€ç©ºé—´ã€‚对于页大å°ä¸º 64KBçš„é…置,仅
-使用 2 级转æ¢è¡¨ï¼Œä½†å†…存布局相åŒã€‚
+AArch64 Linux 使用 3 级或 4 级转æ¢è¡¨ï¼Œå…¶é¡µå¤§å°é…置为 4KB,对于用户和内核
+分别都有 39-bit (512GB) 或 48-bit (256TB) 的虚拟地å€ç©ºé—´ã€‚
+对于页大å°ä¸º 64KBçš„é…置,仅使用 2 级转æ¢è¡¨ï¼Œæœ‰ 42-bit (4TB) 的虚拟地å€ç©ºé—´ï¼Œä½†å†…存布局相åŒã€‚
-用户地å€ç©ºé—´çš„ 63:39 ä½ä¸º 0,而内核地å€ç©ºé—´çš„相应ä½ä¸º 1。TTBRx çš„
+用户地å€ç©ºé—´çš„ 63:48 ä½ä¸º 0,而内核地å€ç©ºé—´çš„相应ä½ä¸º 1。TTBRx çš„
选择由虚拟地å€çš„ 63 ä½ç»™å‡ºã€‚swapper_pg_dir 仅包å«å†…核(全局)映射,
-而用户 pgd 仅包å«ç”¨æˆ·ï¼ˆéžå…¨å±€ï¼‰æ˜ å°„。swapper_pgd_dir 地å€è¢«å†™å…¥
+而用户 pgd 仅包å«ç”¨æˆ·ï¼ˆéžå…¨å±€ï¼‰æ˜ å°„。swapper_pg_dir 地å€è¢«å†™å…¥
TTBR1 中,且从ä¸å†™å…¥ TTBR0。
-AArch64 Linux 在页大å°ä¸º 4KB 时的内存布局:
+AArch64 Linux 在页大å°ä¸º 4KB,并使用 3 级转æ¢è¡¨æ—¶çš„内存布局:
èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
-----------------------------------------------------------------------
0000000000000000 0000007fffffffff 512GB 用户空间
+ffffff8000000000 ffffffffffffffff 512GB 内核空间
-ffffff8000000000 ffffffbbfffeffff ~240GB vmalloc
-
-ffffffbbffff0000 ffffffbbffffffff 64KB [防护页]
-
-ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
-
-ffffffbe00000000 ffffffbffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
-ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk 设备
+AArch64 Linux 在页大å°ä¸º 4KB,并使用 4 级转æ¢è¡¨æ—¶çš„内存布局:
-ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O 空间
-
-ffffffbffbe10000 ffffffbcffffffff ~2MB [防护页]
-
-ffffffbffc000000 ffffffbfffffffff 64MB 模å—
-
-ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射
+èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
+-----------------------------------------------------------------------
+0000000000000000 0000ffffffffffff 256TB 用户空间
+ffff000000000000 ffffffffffffffff 256TB 内核空间
-AArch64 Linux 在页大å°ä¸º 64KB 时的内存布局:
+AArch64 Linux 在页大å°ä¸º 64KB,并使用 2 级转æ¢è¡¨æ—¶çš„内存布局:
èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
-----------------------------------------------------------------------
0000000000000000 000003ffffffffff 4TB 用户空间
+fffffc0000000000 ffffffffffffffff 4TB 内核空间
-fffffc0000000000 fffffdfbfffeffff ~2TB vmalloc
-
-fffffdfbffff0000 fffffdfbffffffff 64KB [防护页]
-
-fffffdfc00000000 fffffdfdffffffff 8GB vmemmap
-
-fffffdfe00000000 fffffdfffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
-fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk 设备
+AArch64 Linux 在页大å°ä¸º 64KB,并使用 3 级转æ¢è¡¨æ—¶çš„内存布局:
-fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O 空间
-
-fffffdfffbe10000 fffffdfffbffffff ~2MB [防护页]
+èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
+-----------------------------------------------------------------------
+0000000000000000 0000ffffffffffff 256TB 用户空间
+ffff000000000000 ffffffffffffffff 256TB 内核空间
-fffffdfffc000000 fffffdffffffffff 64MB 模å—
-fffffe0000000000 ffffffffffffffff 2TB 内核逻辑内存映射
+更详细的内核虚拟内存布局,请å‚阅内核å¯åŠ¨ä¿¡æ¯ã€‚
4KB 页大å°çš„转æ¢è¡¨æŸ¥æ‰¾ï¼š
@@ -102,7 +88,7 @@ fffffe0000000000 ffffffffffffffff 2TB 内核逻辑内存映射
| | | | +-> [20:12] L3 索引
| | | +-----------> [29:21] L2 索引
| | +---------------------> [38:30] L1 索引
- | +-------------------------------> [47:39] L0 索引 (未使用)
+ | +-------------------------------> [47:39] L0 索引
+-------------------------------------------------> [63] TTBR0/1
@@ -115,10 +101,11 @@ fffffe0000000000 ffffffffffffffff 2TB 内核逻辑内存映射
| | | | v
| | | | [15:0] 页内å移
| | | +----------> [28:16] L3 索引
- | | +--------------------------> [41:29] L2 索引 (仅使用 38:29 )
- | +-------------------------------> [47:42] L1 索引 (未使用)
+ | | +--------------------------> [41:29] L2 索引
+ | +-------------------------------> [47:42] L1 索引
+-------------------------------------------------> [63] TTBR0/1
+
当使用 KVM æ—¶, 管ç†ç¨‹åºï¼ˆhypervisor)在 EL2 中通过相对内核虚拟地å€çš„
一个固定å移æ¥æ˜ å°„内核页(内核虚拟地å€çš„高 24 ä½è®¾ä¸ºé›¶ï¼‰:
diff --git a/Kbuild b/Kbuild
index ab8ded92e870..6f0d82a9245d 100644
--- a/Kbuild
+++ b/Kbuild
@@ -13,8 +13,9 @@ define sed-y
s:->::; p;}"
endef
-quiet_cmd_offsets = GEN $@
-define cmd_offsets
+# Use filechk to avoid rebuilds when a header changes, but the resulting file
+# does not
+define filechk_offsets
(set -e; \
echo "#ifndef $2"; \
echo "#define $2"; \
@@ -24,9 +25,9 @@ define cmd_offsets
echo " * This file was generated by Kbuild"; \
echo " */"; \
echo ""; \
- sed -ne $(sed-y) $<; \
+ sed -ne $(sed-y); \
echo ""; \
- echo "#endif" ) > $@
+ echo "#endif" )
endef
#####
@@ -35,16 +36,15 @@ endef
bounds-file := include/generated/bounds.h
always := $(bounds-file)
-targets := $(bounds-file) kernel/bounds.s
+targets := kernel/bounds.s
# We use internal kbuild rules to avoid the "is up to date" message from make
kernel/bounds.s: kernel/bounds.c FORCE
$(Q)mkdir -p $(dir $@)
$(call if_changed_dep,cc_s_c)
-$(obj)/$(bounds-file): kernel/bounds.s Kbuild
- $(Q)mkdir -p $(dir $@)
- $(call cmd,offsets,__LINUX_BOUNDS_H__)
+$(obj)/$(bounds-file): kernel/bounds.s FORCE
+ $(call filechk,offsets,__LINUX_BOUNDS_H__)
#####
# 2) Generate asm-offsets.h
@@ -53,7 +53,6 @@ $(obj)/$(bounds-file): kernel/bounds.s Kbuild
offsets-file := include/generated/asm-offsets.h
always += $(offsets-file)
-targets += $(offsets-file)
targets += arch/$(SRCARCH)/kernel/asm-offsets.s
# We use internal kbuild rules to avoid the "is up to date" message from make
@@ -62,8 +61,8 @@ arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
$(Q)mkdir -p $(dir $@)
$(call if_changed_dep,cc_s_c)
-$(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s Kbuild
- $(call cmd,offsets,__ASM_OFFSETS_H__)
+$(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
+ $(call filechk,offsets,__ASM_OFFSETS_H__)
#####
# 3) Check for missing system calls
diff --git a/MAINTAINERS b/MAINTAINERS
index 38579ac581ce..10f1dacef8ac 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -569,6 +569,12 @@ L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
S: Maintained
F: drivers/mailbox/mailbox-altera.c
+ALTERA PIO DRIVER
+M: Tien Hock Loh <thloh@altera.com>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-altera.c
+
ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Vince Bridgers <vbridger@opensource.altera.com>
L: netdev@vger.kernel.org
@@ -625,16 +631,16 @@ F: drivers/iommu/amd_iommu*.[ch]
F: include/linux/amd-iommu.h
AMD KFD
-M: Oded Gabbay <oded.gabbay@amd.com>
-L: dri-devel@lists.freedesktop.org
-T: git git://people.freedesktop.org/~gabbayo/linux.git
-S: Supported
-F: drivers/gpu/drm/amd/amdkfd/
+M: Oded Gabbay <oded.gabbay@amd.com>
+L: dri-devel@lists.freedesktop.org
+T: git git://people.freedesktop.org/~gabbayo/linux.git
+S: Supported
+F: drivers/gpu/drm/amd/amdkfd/
F: drivers/gpu/drm/amd/include/cik_structs.h
F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h
-F: drivers/gpu/drm/radeon/radeon_kfd.c
-F: drivers/gpu/drm/radeon/radeon_kfd.h
-F: include/uapi/linux/kfd_ioctl.h
+F: drivers/gpu/drm/radeon/radeon_kfd.c
+F: drivers/gpu/drm/radeon/radeon_kfd.h
+F: include/uapi/linux/kfd_ioctl.h
AMD MICROCODE UPDATE SUPPORT
M: Borislav Petkov <bp@alien8.de>
@@ -880,12 +886,16 @@ S: Maintained
F: drivers/media/rc/meson-ir.c
N: meson[x68]
+ARM/Annapurna Labs ALPINE ARCHITECTURE
+M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
+S: Maintained
+F: arch/arm/mach-alpine/
+
ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
-M: Andrew Victor <linux@maxim.org.za>
M: Nicolas Ferre <nicolas.ferre@atmel.com>
+M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://maxim.org.za/at91_26.html
W: http://www.linux4sam.org
S: Supported
F: arch/arm/mach-at91/
@@ -952,7 +962,7 @@ ARM/CORESIGHT FRAMEWORK AND DRIVERS
M: Mathieu Poirier <mathieu.poirier@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: drivers/coresight/*
+F: drivers/hwtracing/coresight/*
F: Documentation/trace/coresight.txt
F: Documentation/devicetree/bindings/arm/coresight.txt
F: Documentation/ABI/testing/sysfs-bus-coresight-devices-*
@@ -964,7 +974,7 @@ S: Maintained
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T: git git://git.berlios.de/gemini-board
+T: git git://github.com/ulli-kroll/linux.git
S: Maintained
F: arch/arm/mach-gemini/
@@ -979,6 +989,12 @@ F: drivers/clocksource/timer-prima2.c
F: drivers/clocksource/timer-atlas7.c
N: [^a-z]sirf
+ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
+M: Baruch Siach <baruch@tkos.co.il>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+N: digicolor
+
ARM/EBSA110 MACHINE SUPPORT
M: Russell King <linux@arm.linux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1177,7 +1193,7 @@ ARM/MAGICIAN MACHINE SUPPORT
M: Philipp Zabel <philipp.zabel@gmail.com>
S: Maintained
-ARM/Marvell Armada 370 and Armada XP SOC support
+ARM/Marvell Kirkwood and Armada 370, 375, 38x, XP SOC support
M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
M: Gregory Clement <gregory.clement@free-electrons.com>
@@ -1186,12 +1202,17 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-mvebu/
F: drivers/rtc/rtc-armada38x.c
+F: arch/arm/boot/dts/armada*
+F: arch/arm/boot/dts/kirkwood*
+
ARM/Marvell Berlin SoC support
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-berlin/
+F: arch/arm/boot/dts/berlin*
+
ARM/Marvell Dove/MV78xx0/Orion SOC support
M: Jason Cooper <jason@lakedaemon.net>
@@ -1204,6 +1225,9 @@ F: arch/arm/mach-dove/
F: arch/arm/mach-mv78xx0/
F: arch/arm/mach-orion5x/
F: arch/arm/plat-orion/
+F: arch/arm/boot/dts/dove*
+F: arch/arm/boot/dts/orion5x*
+
ARM/Orion SoC/Technologic Systems TS-78xx platform support
M: Alexander Clouter <alex@digriz.org.uk>
@@ -1215,6 +1239,7 @@ F: arch/arm/mach-orion5x/ts78xx-*
ARM/Mediatek SoC support
M: Matthias Brugger <matthias.bgg@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/mt6*
F: arch/arm/boot/dts/mt8*
@@ -1254,22 +1279,6 @@ L: openmoko-kernel@lists.openmoko.org (subscribers-only)
W: http://wiki.openmoko.org/wiki/Neo_FreeRunner
S: Supported
-ARM/QUALCOMM MSM MACHINE SUPPORT
-M: David Brown <davidb@codeaurora.org>
-M: Daniel Walker <dwalker@fifo99.com>
-M: Bryan Huntsman <bryanh@codeaurora.org>
-L: linux-arm-msm@vger.kernel.org
-F: arch/arm/mach-msm/
-F: drivers/video/fbdev/msm/
-F: drivers/mmc/host/msm_sdcc.c
-F: drivers/mmc/host/msm_sdcc.h
-F: drivers/tty/serial/msm_serial.h
-F: drivers/tty/serial/msm_serial.c
-F: drivers/*/pm8???-*
-F: drivers/mfd/ssbi.c
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm.git
-S: Maintained
-
ARM/TOSA MACHINE SUPPORT
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
M: Dirk Opfer <dirk@opfer-online.de>
@@ -1327,6 +1336,11 @@ L: linux-soc@vger.kernel.org
S: Maintained
F: arch/arm/mach-qcom/
F: drivers/soc/qcom/
+F: drivers/tty/serial/msm_serial.h
+F: drivers/tty/serial/msm_serial.c
+F: drivers/*/pm8???-*
+F: drivers/mfd/ssbi.c
+F: drivers/firmware/qcom_scm.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/linux-qcom.git
ARM/RADISYS ENP2611 MACHINE SUPPORT
@@ -1365,6 +1379,7 @@ N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org>
+M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
@@ -1425,11 +1440,9 @@ F: arch/arm/boot/dts/emev2*
F: arch/arm/boot/dts/r7s*
F: arch/arm/boot/dts/r8a*
F: arch/arm/boot/dts/sh*
-F: arch/arm/configs/ape6evm_defconfig
F: arch/arm/configs/armadillo800eva_defconfig
F: arch/arm/configs/bockw_defconfig
F: arch/arm/configs/kzm9g_defconfig
-F: arch/arm/configs/mackerel_defconfig
F: arch/arm/configs/marzen_defconfig
F: arch/arm/configs/shmobile_defconfig
F: arch/arm/include/debug/renesas-scif.S
@@ -1440,9 +1453,10 @@ ARM/SOCFPGA ARCHITECTURE
M: Dinh Nguyen <dinguyen@opensource.altera.com>
S: Maintained
F: arch/arm/mach-socfpga/
+F: arch/arm/boot/dts/socfpga*
+F: arch/arm/configs/socfpga_defconfig
W: http://www.rocketboards.org
-T: git://git.rocketboards.org/linux-socfpga.git
-T: git://git.rocketboards.org/linux-socfpga-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
M: Dinh Nguyen <dinguyen@opensource.altera.com>
@@ -1764,7 +1778,7 @@ S: Supported
F: drivers/tty/serial/atmel_serial.c
ATMEL Audio ALSA driver
-M: Bo Shen <voice.shen@atmel.com>
+M: Nicolas Ferre <nicolas.ferre@atmel.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/atmel
@@ -1821,7 +1835,7 @@ S: Supported
F: drivers/spi/spi-atmel.*
ATMEL SSC DRIVER
-M: Bo Shen <voice.shen@atmel.com>
+M: Nicolas Ferre <nicolas.ferre@atmel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/misc/atmel-ssc.c
@@ -1915,16 +1929,14 @@ S: Maintained
F: drivers/media/radio/radio-aztech*
B43 WIRELESS DRIVER
-M: Stefano Brivio <stefano.brivio@polimi.it>
L: linux-wireless@vger.kernel.org
L: b43-dev@lists.infradead.org
W: http://wireless.kernel.org/en/users/Drivers/b43
-S: Maintained
+S: Odd Fixes
F: drivers/net/wireless/b43/
B43LEGACY WIRELESS DRIVER
M: Larry Finger <Larry.Finger@lwfinger.net>
-M: Stefano Brivio <stefano.brivio@polimi.it>
L: linux-wireless@vger.kernel.org
L: b43-dev@lists.infradead.org
W: http://wireless.kernel.org/en/users/Drivers/b43
@@ -1932,7 +1944,7 @@ S: Maintained
F: drivers/net/wireless/b43legacy/
BACKLIGHT CLASS/SUBSYSTEM
-M: Jingoo Han <jg1.han@samsung.com>
+M: Jingoo Han <jingoohan1@gmail.com>
M: Lee Jones <lee.jones@linaro.org>
S: Maintained
F: drivers/video/backlight/
@@ -1967,10 +1979,10 @@ F: Documentation/filesystems/befs.txt
F: fs/befs/
BECKHOFF CX5020 ETHERCAT MASTER DRIVER
-M: Dariusz Marcinkiewicz <reksio@newterm.pl>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ethernet/ec_bhf.c
+M: Dariusz Marcinkiewicz <reksio@newterm.pl>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/ec_bhf.c
BFS FILE SYSTEM
M: "Tigran A. Aivazian" <tigran@aivazian.fsnet.co.uk>
@@ -2119,8 +2131,9 @@ S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
-M: Christian Daudt <bcm@fixthebug.org>
M: Florian Fainelli <f.fainelli@gmail.com>
+M: Ray Jui <rjui@broadcom.com>
+M: Scott Branden <sbranden@broadcom.com>
L: bcm-kernel-feedback-list@broadcom.com
T: git git://github.com/broadcom/mach-bcm
S: Maintained
@@ -2171,7 +2184,6 @@ S: Maintained
F: drivers/usb/gadget/udc/bcm63xx_udc.*
BROADCOM BCM7XXX ARM ARCHITECTURE
-M: Marc Carino <marc.ceeeee@gmail.com>
M: Brian Norris <computersforpeace@gmail.com>
M: Gregory Fong <gregory.0xf0@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
@@ -2525,6 +2537,13 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/chipidea/
+CHIPONE ICN8318 I2C TOUCHSCREEN DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
+F: drivers/input/touchscreen/chipone_icn8318.c
+
CHROME HARDWARE PLATFORM SUPPORT
M: Olof Johansson <olof@lixom.net>
S: Maintained
@@ -2565,6 +2584,7 @@ F: include/linux/cleancache.h
CLK API
M: Russell King <linux@arm.linux.org.uk>
+L: linux-clk@vger.kernel.org
S: Maintained
F: include/linux/clk.h
@@ -2625,7 +2645,7 @@ F: drivers/media/platform/coda/
COMMON CLK FRAMEWORK
M: Mike Turquette <mturquette@linaro.org>
M: Stephen Boyd <sboyd@codeaurora.org>
-L: linux-kernel@vger.kernel.org
+L: linux-clk@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
S: Maintained
F: drivers/clk/
@@ -2818,6 +2838,7 @@ L: linux-crypto@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
S: Maintained
F: Documentation/crypto/
+F: Documentation/DocBook/crypto-API.tmpl
F: arch/*/crypto/
F: crypto/
F: drivers/crypto/
@@ -2888,11 +2909,11 @@ S: Supported
F: drivers/net/ethernet/chelsio/cxgb3/
CXGB3 ISCSI DRIVER (CXGB3I)
-M: Karen Xie <kxie@chelsio.com>
-L: linux-scsi@vger.kernel.org
-W: http://www.chelsio.com
-S: Supported
-F: drivers/scsi/cxgbi/cxgb3i
+M: Karen Xie <kxie@chelsio.com>
+L: linux-scsi@vger.kernel.org
+W: http://www.chelsio.com
+S: Supported
+F: drivers/scsi/cxgbi/cxgb3i
CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
M: Steve Wise <swise@chelsio.com>
@@ -2909,11 +2930,11 @@ S: Supported
F: drivers/net/ethernet/chelsio/cxgb4/
CXGB4 ISCSI DRIVER (CXGB4I)
-M: Karen Xie <kxie@chelsio.com>
-L: linux-scsi@vger.kernel.org
-W: http://www.chelsio.com
-S: Supported
-F: drivers/scsi/cxgbi/cxgb4i
+M: Karen Xie <kxie@chelsio.com>
+L: linux-scsi@vger.kernel.org
+W: http://www.chelsio.com
+S: Supported
+F: drivers/scsi/cxgbi/cxgb4i
CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
M: Steve Wise <swise@chelsio.com>
@@ -3060,10 +3081,16 @@ F: drivers/net/fddi/defxx.*
DELL LAPTOP DRIVER
M: Matthew Garrett <mjg59@srcf.ucam.org>
+M: Pali Rohár <pali.rohar@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/dell-laptop.c
+DELL LAPTOP FREEFALL DRIVER
+M: Pali Rohár <pali.rohar@gmail.com>
+S: Maintained
+F: drivers/platform/x86/dell-smo8800.c
+
DELL LAPTOP SMM DRIVER
M: Guenter Roeck <linux@roeck-us.net>
S: Maintained
@@ -3078,6 +3105,7 @@ F: drivers/firmware/dcdbas.*
DELL WMI EXTRAS DRIVER
M: Matthew Garrett <mjg59@srcf.ucam.org>
+M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
F: drivers/platform/x86/dell-wmi.c
@@ -3265,12 +3293,6 @@ F: drivers/firmware/dmi-id.c
F: drivers/firmware/dmi_scan.c
F: include/linux/dmi.h
-DOCKING STATION DRIVER
-M: Shaohua Li <shaohua.li@intel.com>
-L: linux-acpi@vger.kernel.org
-S: Supported
-F: drivers/acpi/dock.c
-
DOCUMENTATION
M: Jonathan Corbet <corbet@lwn.net>
L: linux-doc@vger.kernel.org
@@ -3278,7 +3300,9 @@ S: Maintained
F: Documentation/
X: Documentation/ABI/
X: Documentation/devicetree/
-X: Documentation/[a-z][a-z]_[A-Z][A-Z]/
+X: Documentation/acpi
+X: Documentation/power
+X: Documentation/spi
T: git git://git.lwn.net/linux-2.6.git docs-next
DOUBLETALK DRIVER
@@ -3402,9 +3426,15 @@ T: git git://people.freedesktop.org/~airlied/linux
S: Supported
F: drivers/gpu/drm/rcar-du/
F: drivers/gpu/drm/shmobile/
-F: include/linux/platform_data/rcar-du.h
F: include/linux/platform_data/shmob_drm.h
+DRM DRIVERS FOR ROCKCHIP
+M: Mark Yao <mark.yao@rock-chips.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/rockchip/
+F: Documentation/devicetree/bindings/video/rockchip*
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -3897,7 +3927,7 @@ F: drivers/extcon/
F: Documentation/extcon/
EXYNOS DP DRIVER
-M: Jingoo Han <jg1.han@samsung.com>
+M: Jingoo Han <jingoohan1@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/exynos/exynos_dp*
@@ -4324,6 +4354,15 @@ S: Supported
F: drivers/phy/
F: include/linux/phy/
+GENERIC PM DOMAINS
+M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: Kevin Hilman <khilman@kernel.org>
+M: Ulf Hansson <ulf.hansson@linaro.org>
+L: linux-pm@vger.kernel.org
+S: Supported
+F: drivers/base/power/domain*.c
+F: include/linux/pm_domain.h
+
GENERIC UIO DRIVER FOR PCI DEVICES
M: "Michael S. Tsirkin" <mst@redhat.com>
L: kvm@vger.kernel.org
@@ -4337,21 +4376,20 @@ F: scripts/get_maintainer.pl
GFS2 FILE SYSTEM
M: Steven Whitehouse <swhiteho@redhat.com>
+M: Bob Peterson <rpeterso@redhat.com>
L: cluster-devel@redhat.com
W: http://sources.redhat.com/cluster/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
S: Supported
F: Documentation/filesystems/gfs2*.txt
F: fs/gfs2/
F: include/uapi/linux/gfs2_ondisk.h
GIGASET ISDN DRIVERS
-M: Hansjoerg Lipp <hjlipp@web.de>
-M: Tilman Schmidt <tilman@imap.cc>
+M: Paul Bolle <pebolle@tiscali.nl>
L: gigaset307x-common@lists.sourceforge.net
W: http://gigaset307x.sourceforge.net/
-S: Maintained
+S: Odd Fixes
F: Documentation/isdn/README.gigaset
F: drivers/isdn/gigaset/
F: include/uapi/linux/gigaset_dev.h
@@ -4450,7 +4488,7 @@ S: Maintained
F: block/partitions/efi.*
STK1160 USB VIDEO CAPTURE DRIVER
-M: Ezequiel Garcia <elezegarcia@gmail.com>
+M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Maintained
@@ -4498,7 +4536,7 @@ M: Jean Delvare <jdelvare@suse.de>
M: Guenter Roeck <linux@roeck-us.net>
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
-T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
F: Documentation/hwmon/
@@ -4605,6 +4643,18 @@ F: drivers/hid/
F: include/linux/hid*
F: include/uapi/linux/hid*
+HID SENSOR HUB DRIVERS
+M: Jiri Kosina <jkosina@suse.cz>
+M: Jonathan Cameron <jic23@kernel.org>
+M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+L: linux-input@vger.kernel.org
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/hid/hid-sensor*
+F: drivers/hid/hid-sensor-*
+F: drivers/iio/*/hid-*
+F: include/linux/hid-sensor-*
+
HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
@@ -4993,6 +5043,11 @@ W: http://industrypack.sourceforge.net
S: Maintained
F: drivers/ipack/
+INGENIC JZ4780 DMA Driver
+M: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com>
+S: Maintained
+F: drivers/dma/dma-jz4780.c
+
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
@@ -5013,17 +5068,19 @@ S: Orphan
F: drivers/video/fbdev/imsttfb.c
INFINIBAND SUBSYSTEM
-M: Roland Dreier <roland@kernel.org>
+M: Doug Ledford <dledford@redhat.com>
M: Sean Hefty <sean.hefty@intel.com>
M: Hal Rosenstock <hal.rosenstock@gmail.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
S: Supported
F: Documentation/infiniband/
F: drivers/infiniband/
F: include/uapi/linux/if_infiniband.h
+F: include/uapi/rdma/
+F: include/rdma/
INOTIFY
M: John McCutchan <john@johnmccutchan.com>
@@ -5206,7 +5263,7 @@ F: arch/x86/kernel/tboot.c
INTEL WIRELESS WIMAX CONNECTION 2400
M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
M: linux-wimax@intel.com
-L: wimax@linuxwimax.org (subscribers-only)
+L: wimax@linuxwimax.org (subscribers-only)
S: Supported
W: http://linuxwimax.org
F: Documentation/wimax/README.i2400m
@@ -5284,6 +5341,13 @@ F: drivers/char/ipmi/
F: include/linux/ipmi*
F: include/uapi/linux/ipmi*
+QCOM AUDIO (ASoC) DRIVERS
+M: Patrick Lai <plai@codeaurora.org>
+M: Banajit Goswami <bgoswami@codeaurora.org>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Supported
+F: sound/soc/qcom/
+
IPS SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <aacraid@adaptec.com>
L: linux-scsi@vger.kernel.org
@@ -5769,6 +5833,7 @@ F: drivers/scsi/53c700*
LED SUBSYSTEM
M: Bryan Wu <cooloney@gmail.com>
M: Richard Purdie <rpurdie@rpsys.net>
+M: Jacek Anaszewski <j.anaszewski@samsung.com>
L: linux-leds@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
S: Maintained
@@ -5902,7 +5967,7 @@ F: arch/powerpc/platforms/512x/
F: arch/powerpc/platforms/52xx/
LINUX FOR POWERPC EMBEDDED PPC4XX
-M: Alistair Popple <alistair@popple.id.au>
+M: Alistair Popple <alistair@popple.id.au>
M: Matt Porter <mporter@kernel.crashing.org>
W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org
@@ -6135,16 +6200,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
F: drivers/media/dvb-frontends/m88rs2000*
-M88TS2022 MEDIA DRIVER
-M: Antti Palosaari <crope@iki.fi>
-L: linux-media@vger.kernel.org
-W: http://linuxtv.org/
-W: http://palosaari.fi/linux/
-Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
-S: Maintained
-F: drivers/media/tuners/m88ts2022*
-
MA901 MASTERKIT USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -6337,6 +6392,7 @@ F: drivers/scsi/megaraid/
MELLANOX ETHERNET DRIVER (mlx4_en)
M: Amir Vadai <amirv@mellanox.com>
+M: Ido Shamay <idos@mellanox.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@@ -6374,7 +6430,7 @@ S: Supported
F: drivers/watchdog/mena21_wdt.c
MEN CHAMELEON BUS (mcb)
-M: Johannes Thumshirn <johannes.thumshirn@men.de>
+M: Johannes Thumshirn <johannes.thumshirn@men.de>
S: Supported
F: drivers/mcb/
F: include/linux/mcb.h
@@ -6567,6 +6623,7 @@ M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/mt9v032.txt
F: drivers/media/i2c/mt9v032.c
F: include/media/mt9v032.h
@@ -6922,6 +6979,17 @@ T: git git://git.rocketboards.org/linux-socfpga-next.git
S: Maintained
F: arch/nios2/
+NOKIA N900 POWER SUPPLY DRIVERS
+M: Pali Rohár <pali.rohar@gmail.com>
+S: Maintained
+F: include/linux/power/bq2415x_charger.h
+F: include/linux/power/bq27x00_battery.h
+F: include/linux/power/isp1704_charger.h
+F: drivers/power/bq2415x_charger.c
+F: drivers/power/bq27x00_battery.c
+F: drivers/power/isp1704_charger.c
+F: drivers/power/rx51_battery.c
+
NTB DRIVER
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com>
@@ -6956,6 +7024,13 @@ S: Supported
F: drivers/block/nvme*
F: include/linux/nvme.h
+NXP-NCI NFC DRIVER
+M: Clément Perrochaud <clement.perrochaud@effinnov.com>
+R: Charles Gorand <charles.gorand@effinnov.com>
+L: linux-nfc@lists.01.org (moderated for non-subscribers)
+S: Supported
+F: drivers/nfc/nxp-nci
+
NXP TDA998X DRM DRIVER
M: Russell King <rmk+kernel@arm.linux.org.uk>
S: Supported
@@ -6977,6 +7052,8 @@ Q: http://patchwork.kernel.org/project/linux-omap/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git
S: Maintained
F: arch/arm/*omap*/
+F: arch/arm/configs/omap1_defconfig
+F: arch/arm/configs/omap2plus_defconfig
F: drivers/i2c/busses/i2c-omap.c
F: drivers/irqchip/irq-omap-intc.c
F: drivers/mfd/*omap*.c
@@ -7107,6 +7184,7 @@ OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/media/ti,omap3isp.txt
F: drivers/media/platform/omap3isp/
F: drivers/staging/media/omap4iss/
@@ -7206,6 +7284,15 @@ F: Documentation/devicetree/
F: arch/*/boot/dts/
F: include/dt-bindings/
+OPEN FIRMWARE AND DEVICE TREE OVERLAYS
+M: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
+L: devicetree@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/dynamic-resolution-notes.txt
+F: Documentation/devicetree/overlay-notes.txt
+F: drivers/of/overlay.c
+F: drivers/of/resolver.c
+
OPENRISC ARCHITECTURE
M: Jonas Bonn <jonas@southpole.se>
W: http://openrisc.net
@@ -7491,7 +7578,7 @@ S: Maintained
F: drivers/pci/host/*rcar*
PCI DRIVER FOR SAMSUNG EXYNOS
-M: Jingoo Han <jg1.han@samsung.com>
+M: Jingoo Han <jingoohan1@gmail.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -7499,8 +7586,7 @@ S: Maintained
F: drivers/pci/host/pci-exynos.c
PCI DRIVER FOR SYNOPSIS DESIGNWARE
-M: Mohit Kumar <mohit.kumar@st.com>
-M: Jingoo Han <jg1.han@samsung.com>
+M: Jingoo Han <jingoohan1@gmail.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: drivers/pci/host/*designware*
@@ -7514,9 +7600,8 @@ F: Documentation/devicetree/bindings/pci/host-generic-pci.txt
F: drivers/pci/host/pci-host-generic.c
PCIE DRIVER FOR ST SPEAR13XX
-M: Mohit Kumar <mohit.kumar@st.com>
L: linux-pci@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/pci/host/*spear*
PCMCIA SUBSYSTEM
@@ -7923,10 +8008,10 @@ L: rtc-linux@googlegroups.com
S: Maintained
QAT DRIVER
-M: Tadeusz Struk <tadeusz.struk@intel.com>
-L: qat-linux@intel.com
-S: Supported
-F: drivers/crypto/qat/
+M: Tadeusz Struk <tadeusz.struk@intel.com>
+L: qat-linux@intel.com
+S: Supported
+F: drivers/crypto/qat/
QIB DRIVER
M: Mike Marciniszyn <infinipath@intel.com>
@@ -8077,11 +8162,17 @@ S: Maintained
F: drivers/net/wireless/rt2x00/
RAMDISK RAM BLOCK DEVICE DRIVER
-M: Nick Piggin <npiggin@kernel.dk>
+M: Jens Axboe <axboe@kernel.dk>
S: Maintained
F: Documentation/blockdev/ramdisk.txt
F: drivers/block/brd.c
+PERSISTENT MEMORY DRIVER
+M: Ross Zwisler <ross.zwisler@linux.intel.com>
+L: linux-nvdimm@lists.01.org
+S: Supported
+F: drivers/block/pmem.c
+
RANDOM NUMBER DRIVER
M: "Theodore Ts'o" <tytso@mit.edu>
S: Maintained
@@ -8153,6 +8244,7 @@ X: kernel/torture.c
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo <a.zummo@towertech.it>
+M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
L: rtc-linux@googlegroups.com
Q: http://patchwork.ozlabs.org/project/rtc-linux/list/
S: Maintained
@@ -8364,7 +8456,6 @@ F: block/partitions/ibm.c
S390 NETWORK DRIVERS
M: Ursula Braun <ursula.braun@de.ibm.com>
-M: Frank Blaschka <blaschka@linux.vnet.ibm.com>
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -8451,7 +8542,7 @@ S: Supported
F: sound/soc/samsung/
SAMSUNG FRAMEBUFFER DRIVER
-M: Jingoo Han <jg1.han@samsung.com>
+M: Jingoo Han <jingoohan1@gmail.com>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/s3c-fb.c
@@ -8624,11 +8715,9 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h
SCSI SUBSYSTEM
-M: "James E.J. Bottomley" <JBottomley@parallels.com>
+M: "James E.J. Bottomley" <JBottomley@odin.com>
L: linux-scsi@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-pending-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
S: Maintained
F: drivers/scsi/
F: include/scsi/
@@ -8758,15 +8847,25 @@ W: http://www.emulex.com
S: Supported
F: drivers/scsi/be2iscsi/
-SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
-M: Sathya Perla <sathya.perla@emulex.com>
-M: Subbu Seetharaman <subbu.seetharaman@emulex.com>
-M: Ajit Khaparde <ajit.khaparde@emulex.com>
+Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
+M: Sathya Perla <sathya.perla@avagotech.com>
+M: Ajit Khaparde <ajit.khaparde@avagotech.com>
+M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
+M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
L: netdev@vger.kernel.org
W: http://www.emulex.com
S: Supported
F: drivers/net/ethernet/emulex/benet/
+EMULEX ONECONNECT ROCE DRIVER
+M: Selvin Xavier <selvin.xavier@emulex.com>
+M: Devesh Sharma <devesh.sharma@emulex.com>
+M: Mitesh Ahuja <mitesh.ahuja@emulex.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.emulex.com
+S: Supported
+F: drivers/infiniband/hw/ocrdma/
+
SFC NETWORK DRIVER
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
M: Shradha Shah <sshah@solarflare.com>
@@ -8940,6 +9039,16 @@ T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
S: Maintained
F: drivers/media/platform/am437x/
+OV2659 OMNIVISION SENSOR DRIVER
+M: Lad, Prabhakar <prabhakar.csengg@gmail.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
+S: Maintained
+F: drivers/media/i2c/ov2659.c
+F: include/media/ov2659.h
+
SIS 190 ETHERNET DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
@@ -9840,7 +9949,7 @@ F: include/linux/wl12xx.h
TIPC NETWORK LAYER
M: Jon Maloy <jon.maloy@ericsson.com>
-M: Allan Stephens <allan.stephens@windriver.com>
+M: Ying Xue <ying.xue@windriver.com>
L: netdev@vger.kernel.org (core kernel code)
L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
W: http://tipc.sourceforge.net/
@@ -9889,10 +9998,23 @@ S: Maintained
F: drivers/platform/x86/topstar-laptop.c
TOSHIBA ACPI EXTRAS DRIVER
+M: Azael Avalos <coproscefalo@gmail.com>
L: platform-driver-x86@vger.kernel.org
-S: Orphan
+S: Maintained
F: drivers/platform/x86/toshiba_acpi.c
+TOSHIBA BLUETOOTH DRIVER
+M: Azael Avalos <coproscefalo@gmail.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/toshiba_bluetooth.c
+
+TOSHIBA HDD ACTIVE PROTECTION SENSOR DRIVER
+M: Azael Avalos <coproscefalo@gmail.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/toshiba_haps.c
+
TOSHIBA SMM DRIVER
M: Jonathan Buzzard <jonathan@buzzard.org.uk>
L: tlinux-users@tce.toshiba-dme.co.jp
@@ -9944,6 +10066,7 @@ F: drivers/media/pci/tw68/
TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de>
M: Marcel Selhorst <tpmdd@selhorst.net>
+R: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
W: http://tpmdd.sourceforge.net
L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
Q: git git://github.com/PeterHuewe/linux-tpmdd.git
@@ -10097,11 +10220,11 @@ F: include/linux/cdrom.h
F: include/uapi/linux/cdrom.h
UNISYS S-PAR DRIVERS
-M: Benjamin Romer <benjamin.romer@unisys.com>
-M: David Kershner <david.kershner@unisys.com>
-L: sparmaintainer@unisys.com (Unisys internal)
-S: Supported
-F: drivers/staging/unisys/
+M: Benjamin Romer <benjamin.romer@unisys.com>
+M: David Kershner <david.kershner@unisys.com>
+L: sparmaintainer@unisys.com (Unisys internal)
+S: Supported
+F: drivers/staging/unisys/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
M: Vinayak Holikatti <vinholikatti@gmail.com>
@@ -10448,7 +10571,6 @@ F: include/linux/virtio_console.h
F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS
-M: Rusty Russell <rusty@rustcorp.com.au>
M: "Michael S. Tsirkin" <mst@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
@@ -10468,6 +10590,12 @@ S: Maintained
F: drivers/vhost/
F: include/uapi/linux/vhost.h
+VIRTIO INPUT DRIVER
+M: Gerd Hoffmann <kraxel@redhat.com>
+S: Maintained
+F: drivers/virtio/virtio_input.c
+F: include/uapi/linux/virtio_input.h
+
VIA RHINE NETWORK DRIVER
M: Roger Luethi <rl@hellgate.ch>
S: Maintained
@@ -10543,6 +10671,14 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/misc/vmw_balloon.c
+VMWARE VMMOUSE SUBDRIVER
+M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
+M: "VMware, Inc." <pv-drivers@vmware.com>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/input/mouse/vmmouse.c
+F: drivers/input/mouse/vmmouse.h
+
VMWARE VMXNET3 ETHERNET DRIVER
M: Shreyas Bhatewara <sbhatewara@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
@@ -10658,7 +10794,7 @@ F: drivers/media/rc/winbond-cir.c
WIMAX STACK
M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
M: linux-wimax@intel.com
-L: wimax@linuxwimax.org (subscribers-only)
+L: wimax@linuxwimax.org (subscribers-only)
S: Supported
W: http://linuxwimax.org
F: Documentation/wimax/README.wimax
@@ -10868,6 +11004,16 @@ L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial/uartlite.c
+XILINX VIDEO IP CORES
+M: Hyun Kwon <hyun.kwon@xilinx.com>
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: Documentation/devicetree/bindings/media/xilinx/
+F: drivers/media/platform/xilinx/
+F: include/uapi/linux/xilinx-v4l2-controls.h
+
XILLYBUS DRIVER
M: Eli Billauer <eli.billauer@gmail.com>
L: linux-kernel@vger.kernel.org
@@ -10932,6 +11078,7 @@ F: drivers/media/pci/zoran/
ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
M: Minchan Kim <minchan@kernel.org>
M: Nitin Gupta <ngupta@vflare.org>
+R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/block/zram/
@@ -10949,6 +11096,7 @@ L: linux-mm@kvack.org
S: Maintained
F: mm/zsmalloc.c
F: include/linux/zsmalloc.h
+F: Documentation/vm/zsmalloc.txt
ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjennings@variantweb.net>
diff --git a/Makefile b/Makefile
index 9b76ce1e08bb..dc20bcb9b271 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
-PATCHLEVEL = 0
+PATCHLEVEL = 1
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc4
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
@@ -10,9 +10,10 @@ NAME = Hurr durr I'ma sheep
# Comments in this file are targeted only to the developer, do not
# expect to learn how to build the kernel reading this file.
-# Do not use make's built-in rules and variables
-# (this increases performance and avoids hard-to-debug behaviour);
-MAKEFLAGS += -rR
+# o Do not use make's built-in rules and variables
+# (this increases performance and avoids hard-to-debug behaviour);
+# o Look for make include files relative to root of kernel src
+MAKEFLAGS += -rR --include-dir=$(CURDIR)
# Avoid funny character set dependencies
unexport LC_ALL
@@ -344,12 +345,9 @@ endif
export COMPILER
endif
-# Look for make include files relative to root of kernel src
-MAKEFLAGS += --include-dir=$(srctree)
-
# We need some generic definitions (do not try to remake the file).
-$(srctree)/scripts/Kbuild.include: ;
-include $(srctree)/scripts/Kbuild.include
+scripts/Kbuild.include: ;
+include scripts/Kbuild.include
# Make variables (CC, etc...)
AS = $(CROSS_COMPILE)as
@@ -533,7 +531,7 @@ ifeq ($(config-targets),1)
# Read arch specific Makefile to set KBUILD_DEFCONFIG as needed.
# KBUILD_DEFCONFIG may point out an alternative default configuration
# used for 'make defconfig'
-include $(srctree)/arch/$(SRCARCH)/Makefile
+include arch/$(SRCARCH)/Makefile
export KBUILD_DEFCONFIG KBUILD_KCONFIG
config: scripts_basic outputmakefile FORCE
@@ -609,7 +607,7 @@ endif # $(dot-config)
# Defaults to vmlinux, but the arch makefile usually adds further targets
all: vmlinux
-include $(srctree)/arch/$(SRCARCH)/Makefile
+include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -782,8 +780,8 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif
-include $(srctree)/scripts/Makefile.kasan
-include $(srctree)/scripts/Makefile.extrawarn
+include scripts/Makefile.kasan
+include scripts/Makefile.extrawarn
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
KBUILD_CPPFLAGS += $(KCPPFLAGS)
@@ -1027,12 +1025,6 @@ headerdep:
$(srctree)/scripts/headerdep.pl -I$(srctree)/include
# ---------------------------------------------------------------------------
-
-PHONY += depend dep
-depend dep:
- @echo '*** Warning: make $@ is unnecessary now.'
-
-# ---------------------------------------------------------------------------
# Firmware install
INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib/firmware
export INSTALL_FW_PATH
diff --git a/README b/README
index a24ec89ba442..69c68fb4a109 100644
--- a/README
+++ b/README
@@ -1,6 +1,6 @@
- Linux kernel release 3.x <http://kernel.org/>
+ Linux kernel release 4.x <http://kernel.org/>
-These are the release notes for Linux version 3. Read them carefully,
+These are the release notes for Linux version 4. Read them carefully,
as they tell you what this is all about, explain how to install the
kernel, and what to do if something goes wrong.
@@ -62,11 +62,7 @@ INSTALLING the kernel source:
directory where you have permissions (eg. your home directory) and
unpack it:
- gzip -cd linux-3.X.tar.gz | tar xvf -
-
- or
-
- bzip2 -dc linux-3.X.tar.bz2 | tar xvf -
+ xz -cd linux-4.X.tar.xz | tar xvf -
Replace "X" with the version number of the latest kernel.
@@ -75,16 +71,12 @@ INSTALLING the kernel source:
files. They should match the library, and not get messed up by
whatever the kernel-du-jour happens to be.
- - You can also upgrade between 3.x releases by patching. Patches are
- distributed in the traditional gzip and the newer bzip2 format. To
- install by patching, get all the newer patch files, enter the
- top level directory of the kernel source (linux-3.X) and execute:
-
- gzip -cd ../patch-3.x.gz | patch -p1
-
- or
+ - You can also upgrade between 4.x releases by patching. Patches are
+ distributed in the xz format. To install by patching, get all the
+ newer patch files, enter the top level directory of the kernel source
+ (linux-4.X) and execute:
- bzip2 -dc ../patch-3.x.bz2 | patch -p1
+ xz -cd ../patch-4.x.xz | patch -p1
Replace "x" for all versions bigger than the version "X" of your current
source tree, _in_order_, and you should be ok. You may want to remove
@@ -92,13 +84,13 @@ INSTALLING the kernel source:
that there are no failed patches (some-file-name# or some-file-name.rej).
If there are, either you or I have made a mistake.
- Unlike patches for the 3.x kernels, patches for the 3.x.y kernels
+ Unlike patches for the 4.x kernels, patches for the 4.x.y kernels
(also known as the -stable kernels) are not incremental but instead apply
- directly to the base 3.x kernel. For example, if your base kernel is 3.0
- and you want to apply the 3.0.3 patch, you must not first apply the 3.0.1
- and 3.0.2 patches. Similarly, if you are running kernel version 3.0.2 and
- want to jump to 3.0.3, you must first reverse the 3.0.2 patch (that is,
- patch -R) _before_ applying the 3.0.3 patch. You can read more on this in
+ directly to the base 4.x kernel. For example, if your base kernel is 4.0
+ and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1
+ and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and
+ want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is,
+ patch -R) _before_ applying the 4.0.3 patch. You can read more on this in
Documentation/applying-patches.txt
Alternatively, the script patch-kernel can be used to automate this
@@ -120,7 +112,7 @@ INSTALLING the kernel source:
SOFTWARE REQUIREMENTS
- Compiling and running the 3.x kernels requires up-to-date
+ Compiling and running the 4.x kernels requires up-to-date
versions of various software packages. Consult
Documentation/Changes for the minimum version numbers required
and how to get updates for these packages. Beware that using
@@ -137,12 +129,12 @@ BUILD directory for the kernel:
place for the output files (including .config).
Example:
- kernel source code: /usr/src/linux-3.X
+ kernel source code: /usr/src/linux-4.X
build directory: /home/name/build/kernel
To configure and build the kernel, use:
- cd /usr/src/linux-3.X
+ cd /usr/src/linux-4.X
make O=/home/name/build/kernel menuconfig
make O=/home/name/build/kernel
sudo make O=/home/name/build/kernel modules_install install
diff --git a/arch/Kconfig b/arch/Kconfig
index 05d7a8a458d5..a65eafb24997 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -32,7 +32,7 @@ config HAVE_OPROFILE
config OPROFILE_NMI_TIMER
def_bool y
- depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+ depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !PPC64
config KPROBES
bool "Kprobes"
@@ -446,6 +446,9 @@ config HAVE_IRQ_TIME_ACCOUNTING
config HAVE_ARCH_TRANSPARENT_HUGEPAGE
bool
+config HAVE_ARCH_HUGE_VMAP
+ bool
+
config HAVE_ARCH_SOFT_DIRTY
bool
@@ -484,6 +487,18 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK
This spares a stack switch and improves cache usage on softirq
processing.
+config PGTABLE_LEVELS
+ int
+ default 2
+
+config ARCH_HAS_ELF_RANDOMIZE
+ bool
+ help
+ An architecture supports choosing randomized locations for
+ stack, mmap, brk, and ET_DYN. Defined functions:
+ - arch_mmap_rnd()
+ - arch_randomize_brk()
+
#
# ABI hall of shame
#
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index b7ff9a318c31..bf9e9d3b3792 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -76,6 +76,10 @@ config GENERIC_ISA_DMA
bool
default y
+config PGTABLE_LEVELS
+ int
+ default 3
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index b4cf03690394..43a7559c448b 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -44,6 +44,7 @@ struct task_struct;
extern unsigned long thread_saved_pc(struct task_struct *);
/* Do necessary setup to start up a newly executed thread. */
+struct pt_regs;
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/* Free all resources held by a thread. */
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index d5b98ab514bb..32e920a83ae5 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -18,7 +18,6 @@ struct thread_info {
unsigned int flags; /* low level flags */
unsigned int ieee_state; /* see fpu.h */
- struct exec_domain *exec_domain; /* execution domain */
mm_segment_t addr_limit; /* thread address space */
unsigned cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -35,7 +34,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug
index a7fc0da25650..ff6a4b5ce927 100644
--- a/arch/arc/Kconfig.debug
+++ b/arch/arc/Kconfig.debug
@@ -2,19 +2,6 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-config EARLY_PRINTK
- bool "Early printk" if EMBEDDED
- default y
- help
- Write kernel log output directly into the VGA buffer or to a serial
- port.
-
- This is useful for kernel debugging when your machine crashes very
- early before the console code is initialized. For normal operation
- it is not recommended because it looks ugly and doesn't cooperate
- with klogd/syslogd or the X server. You should normally N here,
- unless you want to debug such a crash.
-
config 16KSTACKS
bool "Use 16Kb for kernel stacks instead of 8Kb"
help
diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts
index 757e0c62c4f9..3b076fbd8366 100644
--- a/arch/arc/boot/dts/angel4.dts
+++ b/arch/arc/boot/dts/angel4.dts
@@ -64,7 +64,7 @@
};
arcpmu0: pmu {
- compatible = "snps,arc700-pmu";
+ compatible = "snps,arc700-pct";
};
};
};
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 278dacf2a3f9..d2ac4e56ba1d 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -2,6 +2,9 @@ CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -9,7 +12,7 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
@@ -21,12 +24,9 @@ CONFIG_MODULES=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARC_PLAT_FPGA_LEGACY=y
-# CONFIG_ARC_IDE is not set
-# CONFIG_ARCTANGENT_EMAC is not set
# CONFIG_ARC_HAS_RTSC is not set
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -39,23 +39,23 @@ CONFIG_INET=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
# CONFIG_MOUSE_PS2_ALPS is not set
# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+# CONFIG_MOUSE_PS2_CYPRESS is not set
# CONFIG_MOUSE_PS2_TRACKPOINT is not set
CONFIG_MOUSE_PS2_TOUCHKIT=y
-# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_ARC_PS2=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_ARC=y
-CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
@@ -72,4 +72,3 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index be33db8a2ee3..e2b1b1211b0d 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -30,6 +30,7 @@
#define ARC_REG_D_UNCACH_BCR 0x6A
#define ARC_REG_BPU_BCR 0xc0
#define ARC_REG_ISA_CFG_BCR 0xc1
+#define ARC_REG_RTT_BCR 0xF2
#define ARC_REG_SMART_BCR 0xFF
/* status32 Bits Positions */
@@ -50,11 +51,7 @@
* [15: 8] = Exception Cause Code
* [ 7: 0] = Exception Parameters (for certain types only)
*/
-#define ECR_VEC_MASK 0xff0000
-#define ECR_CODE_MASK 0x00ff00
-#define ECR_PARAM_MASK 0x0000ff
-
-/* Exception Cause Vector Values */
+#define ECR_V_MEM_ERR 0x01
#define ECR_V_INSN_ERR 0x02
#define ECR_V_MACH_CHK 0x20
#define ECR_V_ITLB_MISS 0x21
@@ -62,7 +59,8 @@
#define ECR_V_PROTV 0x23
#define ECR_V_TRAP 0x25
-/* Protection Violation Exception Cause Code Values */
+/* DTLB Miss and Protection Violation Cause Codes */
+
#define ECR_C_PROTV_INST_FETCH 0x00
#define ECR_C_PROTV_LOAD 0x01
#define ECR_C_PROTV_STORE 0x02
@@ -173,11 +171,11 @@
} \
}
-#define WRITE_BCR(reg, into) \
+#define WRITE_AUX(reg, into) \
{ \
unsigned int tmp; \
if (sizeof(tmp) == sizeof(into)) { \
- tmp = (*(unsigned int *)(into)); \
+ tmp = (*(unsigned int *)&(into)); \
write_aux_reg(reg, tmp); \
} else { \
extern void bogus_undefined(void); \
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 067551b6920a..9917a45fc430 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -99,7 +99,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
atomic_ops_unlock(flags); \
}
-#define ATOMIC_OP_RETURN(op, c_op) \
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 1a5bf07eefe2..4051e9525939 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -32,6 +32,20 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *m)
m += nr >> 5;
+ /*
+ * ARC ISA micro-optimization:
+ *
+ * Instructions dealing with bitpos only consider lower 5 bits (0-31)
+ * e.g (x << 33) is handled like (x << 1) by ASL instruction
+ * (mem pointer still needs adjustment to point to next word)
+ *
+ * Hence the masking to clamp @nr arg can be elided in general.
+ *
+ * However if @nr is a constant (above assumed it in a register),
+ * and greater than 31, gcc can optimize away (x << 33) to 0,
+ * as overflow, given the 32-bit ISA. Thus masking needs to be done
+ * for constant @nr, but no code is generated due to const prop.
+ */
if (__builtin_constant_p(nr))
nr &= 0x1f;
@@ -374,29 +388,20 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *m)
* This routine doesn't need to be atomic.
*/
static inline int
-__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
-{
- return ((1UL << (nr & 31)) &
- (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
-}
-
-static inline int
-__test_bit(unsigned int nr, const volatile unsigned long *addr)
+test_bit(unsigned int nr, const volatile unsigned long *addr)
{
unsigned long mask;
addr += nr >> 5;
- /* ARC700 only considers 5 bits in bit-fiddling insn */
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
mask = 1 << nr;
return ((mask & *addr) != 0);
}
-#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
- __constant_test_bit((nr), (addr)) : \
- __test_bit((nr), (addr)))
-
/*
* Count the number of zeros, starting from MSB
* Helper for fls( ) friends
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index cbf755e32a03..2b8880e953a2 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -54,29 +54,13 @@ struct arc_reg_cc_build {
#define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
#define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
#define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
-#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 6)
+#define PERF_COUNT_ARC_LDC (PERF_COUNT_HW_MAX + 6)
+#define PERF_COUNT_ARC_STC (PERF_COUNT_HW_MAX + 7)
+
+#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8)
/*
- * The "generalized" performance events seem to really be a copy
- * of the available events on x86 processors; the mapping to ARC
- * events is not always possible 1-to-1. Fortunately, there doesn't
- * seem to be an exact definition for these events, so we can cheat
- * a bit where necessary.
- *
- * In particular, the following PERF events may behave a bit differently
- * compared to other architectures:
- *
- * PERF_COUNT_HW_CPU_CYCLES
- * Cycles not in halted state
- *
- * PERF_COUNT_HW_REF_CPU_CYCLES
- * Reference cycles not in halted state, same as PERF_COUNT_HW_CPU_CYCLES
- * for now as we don't do Dynamic Voltage/Frequency Scaling (yet)
- *
- * PERF_COUNT_HW_BUS_CYCLES
- * Unclear what this means, Intel uses 0x013c, which according to
- * their datasheet means "unhalted reference cycles". It sounds similar
- * to PERF_COUNT_HW_REF_CPU_CYCLES, and we use the same counter for it.
+ * Some ARC pct quirks:
*
* PERF_COUNT_HW_STALLED_CYCLES_BACKEND
* PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
@@ -91,21 +75,38 @@ struct arc_reg_cc_build {
* Note that I$ cache misses aren't counted by either of the two!
*/
+/*
+ * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
+ * (based on a specific RTL build)
+ * Below is the static map between perf generic/arc specific event_id and
+ * h/w condition names.
+ * At the time of probe, we loop thru each index and find it's name to
+ * complete the mapping of perf event_id to h/w index as latter is needed
+ * to program the counter really
+ */
static const char * const arc_pmu_ev_hw_map[] = {
+ /* count cycles */
[PERF_COUNT_HW_CPU_CYCLES] = "crun",
[PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
[PERF_COUNT_HW_BUS_CYCLES] = "crun",
- [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
- [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail",
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
+
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
- [PERF_COUNT_ARC_DCLM] = "dclm",
- [PERF_COUNT_ARC_DCSM] = "dcsm",
- [PERF_COUNT_ARC_ICM] = "icm",
- [PERF_COUNT_ARC_BPOK] = "bpok",
- [PERF_COUNT_ARC_EDTLB] = "edtlb",
- [PERF_COUNT_ARC_EITLB] = "eitlb",
+
+ /* counts condition */
+ [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
+ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
+ [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
+
+ [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
+ [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */
+
+ [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
+ [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
+ [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
+ [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
+ [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
};
#define C(_x) PERF_COUNT_HW_CACHE_##_x
@@ -114,11 +115,11 @@ static const char * const arc_pmu_ev_hw_map[] = {
static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
[C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC,
[C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
},
[C(OP_PREFETCH)] = {
@@ -128,7 +129,7 @@ static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
[C(L1I)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS,
[C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
},
[C(OP_WRITE)] = {
@@ -156,9 +157,10 @@ static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
[C(DTLB)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
[C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
},
+ /* DTLB LD/ST Miss not segregated by h/w*/
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 1163a1838ac1..aca0d5a45c7b 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -43,7 +43,6 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct task_struct *task; /* main task structure */
mm_segment_t addr_limit; /* thread address space */
- struct exec_domain *exec_domain;/* execution domain */
__u32 cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
};
@@ -56,7 +55,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index ae1c485cbc68..fd2ec50102f2 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -16,6 +16,7 @@
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <asm/arcregs.h>
+#include <asm/stacktrace.h>
struct arc_pmu {
struct pmu pmu;
@@ -25,6 +26,46 @@ struct arc_pmu {
int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
};
+struct arc_callchain_trace {
+ int depth;
+ void *perf_stuff;
+};
+
+static int callchain_trace(unsigned int addr, void *data)
+{
+ struct arc_callchain_trace *ctrl = data;
+ struct perf_callchain_entry *entry = ctrl->perf_stuff;
+ perf_callchain_store(entry, addr);
+
+ if (ctrl->depth++ < 3)
+ return 0;
+
+ return -1;
+}
+
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+ struct arc_callchain_trace ctrl = {
+ .depth = 0,
+ .perf_stuff = entry,
+ };
+
+ arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
+}
+
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+ /*
+ * User stack can't be unwound trivially with kernel dwarf unwinder
+ * So for now just record the user PC
+ */
+ perf_callchain_store(entry, instruction_pointer(regs));
+}
+
+static struct arc_pmu *arc_pmu;
+
/* read counter #idx; note that counter# != event# on ARC! */
static uint64_t arc_pmu_read_counter(int idx)
{
@@ -47,7 +88,6 @@ static uint64_t arc_pmu_read_counter(int idx)
static void arc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
- struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
uint64_t prev_raw_count, new_raw_count;
int64_t delta;
@@ -89,13 +129,16 @@ static int arc_pmu_cache_event(u64 config)
if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT;
+ pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n",
+ cache_type, cache_op, cache_result, ret,
+ arc_pmu_ev_hw_map[ret]);
+
return ret;
}
/* initializes hw_perf_event structure if event is supported */
static int arc_pmu_event_init(struct perf_event *event)
{
- struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
struct hw_perf_event *hwc = &event->hw;
int ret;
@@ -106,8 +149,9 @@ static int arc_pmu_event_init(struct perf_event *event)
if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
return -ENOENT;
hwc->config = arc_pmu->ev_hw_idx[event->attr.config];
- pr_debug("initializing event %d with cfg %d\n",
- (int) event->attr.config, (int) hwc->config);
+ pr_debug("init event %d with h/w %d \'%s\'\n",
+ (int) event->attr.config, (int) hwc->config,
+ arc_pmu_ev_hw_map[event->attr.config]);
return 0;
case PERF_TYPE_HW_CACHE:
ret = arc_pmu_cache_event(event->attr.config);
@@ -183,8 +227,6 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
static void arc_pmu_del(struct perf_event *event, int flags)
{
- struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
-
arc_pmu_stop(event, PERF_EF_UPDATE);
__clear_bit(event->hw.idx, arc_pmu->used_mask);
@@ -194,7 +236,6 @@ static void arc_pmu_del(struct perf_event *event, int flags)
/* allocate hardware counter and optionally start counting */
static int arc_pmu_add(struct perf_event *event, int flags)
{
- struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -247,10 +288,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
BUG_ON(pct_bcr.c > ARC_PMU_MAX_HWEVENTS);
READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
- if (!cc_bcr.v) {
- pr_err("Performance counters exist, but no countable conditions?\n");
- return -ENODEV;
- }
+ BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
if (!arc_pmu)
@@ -263,19 +301,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
arc_pmu->n_counters, arc_pmu->counter_size, cc_bcr.c);
cc_name.str[8] = 0;
- for (i = 0; i < PERF_COUNT_HW_MAX; i++)
+ for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
arc_pmu->ev_hw_idx[i] = -1;
+ /* loop thru all available h/w condition indexes */
for (j = 0; j < cc_bcr.c; j++) {
write_aux_reg(ARC_REG_CC_INDEX, j);
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
+
+ /* See if it has been mapped to a perf event_id */
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
if (arc_pmu_ev_hw_map[i] &&
!strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
strlen(arc_pmu_ev_hw_map[i])) {
- pr_debug("mapping %d to idx %d with name %s\n",
- i, j, cc_name.str);
+ pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
+ i, cc_name.str, j);
arc_pmu->ev_hw_idx[i] = j;
}
}
@@ -302,7 +343,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id arc_pmu_match[] = {
- { .compatible = "snps,arc700-pmu" },
+ { .compatible = "snps,arc700-pct" },
{},
};
MODULE_DEVICE_TABLE(of, arc_pmu_match);
@@ -310,7 +351,7 @@ MODULE_DEVICE_TABLE(of, arc_pmu_match);
static struct platform_driver arc_pmu_driver = {
.driver = {
- .name = "arc700-pmu",
+ .name = "arc700-pct",
.of_match_table = of_match_ptr(arc_pmu_match),
},
.probe = arc_pmu_device_probe,
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 98c00a2d4dd9..e095c557afdd 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -49,7 +49,10 @@ void arch_cpu_idle(void)
asmlinkage void ret_from_fork(void);
-/* Layout of Child kernel mode stack as setup at the end of this function is
+/*
+ * Copy architecture-specific thread state
+ *
+ * Layout of Child kernel mode stack as setup at the end of this function is
*
* | ... |
* | ... |
@@ -81,7 +84,7 @@ asmlinkage void ret_from_fork(void);
* ------------------ <===== END of PAGE
*/
int copy_thread(unsigned long clone_flags,
- unsigned long usp, unsigned long arg,
+ unsigned long usp, unsigned long kthread_arg,
struct task_struct *p)
{
struct pt_regs *c_regs; /* child's pt_regs */
@@ -112,7 +115,7 @@ int copy_thread(unsigned long clone_flags,
if (unlikely(p->flags & PF_KTHREAD)) {
memset(c_regs, 0, sizeof(struct pt_regs));
- c_callee->r13 = arg; /* argument to kernel thread */
+ c_callee->r13 = kthread_arg;
c_callee->r14 = usp; /* function */
return 0;
@@ -155,8 +158,6 @@ int copy_thread(unsigned long clone_flags,
*/
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
{
- set_fs(USER_DS); /* user space */
-
regs->sp = usp;
regs->ret = pc;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 900f68a70088..1d167c6df8ca 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -120,7 +120,10 @@ static void read_arc_build_cfg_regs(void)
READ_BCR(ARC_REG_SMART_BCR, bcr);
cpu->extn.smart = bcr.ver ? 1 : 0;
- cpu->extn.debug = cpu->extn.ap | cpu->extn.smart;
+ READ_BCR(ARC_REG_RTT_BCR, bcr);
+ cpu->extn.rtt = bcr.ver ? 1 : 0;
+
+ cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
}
static const struct cpuinfo_data arc_cpu_tbl[] = {
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index edda76fae83f..2251fb4bbfd7 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -171,18 +171,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
return frame;
}
-/*
- * translate the signal
- */
-static inline int map_sig(int sig)
-{
- struct thread_info *thread = current_thread_info();
- if (thread->exec_domain && thread->exec_domain->signal_invmap
- && sig < 32)
- sig = thread->exec_domain->signal_invmap[sig];
- return sig;
-}
-
static int
setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
@@ -231,7 +219,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
return err;
/* #1 arg to the user Signal handler */
- regs->r0 = map_sig(ksig->sig);
+ regs->r0 = ksig->sig;
/* setup PC of user space signal handler */
regs->ret = (unsigned long)ksig->ka.sa.sa_handler;
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 3eadfdabc322..c927aa84e652 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -42,7 +42,7 @@ void die(const char *str, struct pt_regs *regs, unsigned long address)
* -for kernel, chk if due to copy_(to|from)_user, otherwise die()
*/
static noinline int
-handle_exception(const char *str, struct pt_regs *regs, siginfo_t *info)
+unhandled_exception(const char *str, struct pt_regs *regs, siginfo_t *info)
{
if (user_mode(regs)) {
struct task_struct *tsk = current;
@@ -71,7 +71,7 @@ int name(unsigned long address, struct pt_regs *regs) \
.si_code = sicode, \
.si_addr = (void __user *)address, \
}; \
- return handle_exception(str, regs, &info);\
+ return unhandled_exception(str, regs, &info);\
}
/*
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 1badf9b84b51..e00a01879025 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -52,7 +52,7 @@ static void show_callee_regs(struct callee_regs *cregs)
print_reg_file(&(cregs->r13), 13);
}
-void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
{
struct path path;
char *path_nm = NULL;
@@ -77,7 +77,6 @@ void print_task_path_n_nm(struct task_struct *tsk, char *buf)
done:
pr_info("Path: %s\n", path_nm);
}
-EXPORT_SYMBOL(print_task_path_n_nm);
static void show_faulting_vma(unsigned long address, char *buf)
{
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index e550b117ec4f..93c6ea52b671 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -841,7 +841,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
break;
case DW_CFA_GNU_window_save:
default:
- unw_debug("UNKNOW OPCODE 0x%x\n", opcode);
+ unw_debug("UNKNOWN OPCODE 0x%x\n", opcode);
result = 0;
break;
}
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 8c3a3e02ba92..12b2100db073 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -266,7 +266,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
* Machine specific helpers for Entire D-Cache or Per Line ops
*/
-static unsigned int __before_dc_op(const int op)
+static inline unsigned int __before_dc_op(const int op)
{
unsigned int reg = reg;
@@ -284,7 +284,7 @@ static unsigned int __before_dc_op(const int op)
return reg;
}
-static void __after_dc_op(const int op, unsigned int reg)
+static inline void __after_dc_op(const int op, unsigned int reg)
{
if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 523412369f70..d44eedd8c322 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -71,7 +71,7 @@ early_param("initrd", early_initrd);
*/
void __init setup_arch_memory(void)
{
- unsigned long zones_size[MAX_NR_ZONES] = { 0, 0 };
+ unsigned long zones_size[MAX_NR_ZONES];
unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;
init_mm.start_code = (unsigned long)_text;
@@ -90,7 +90,7 @@ void __init setup_arch_memory(void)
/*------------- externs in mm need setting up ---------------*/
/* first page of system - kernel .vector starts here */
- min_low_pfn = PFN_DOWN(CONFIG_LINUX_LINK_BASE);
+ min_low_pfn = ARCH_PFN_OFFSET;
/* Last usable page of low mem (no HIGHMEM yet for ARC port) */
max_low_pfn = max_pfn = PFN_DOWN(end_mem);
@@ -111,7 +111,7 @@ void __init setup_arch_memory(void)
/*-------------- node setup --------------------------------*/
memset(zones_size, 0, sizeof(zones_size));
- zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
+ zones_size[ZONE_NORMAL] = max_mapnr;
/*
* We can't use the helper free_area_init(zones[]) because it uses
@@ -123,6 +123,8 @@ void __init setup_arch_memory(void)
zones_size, /* num pages per zone */
min_low_pfn, /* first pfn of node */
NULL); /* NO holes */
+
+ high_memory = (void *)end_mem;
}
/*
@@ -133,7 +135,6 @@ void __init setup_arch_memory(void)
*/
void __init mem_init(void)
{
- high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz);
free_all_bootmem();
mem_init_print_info(NULL);
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cf4c0c99aa25..45df48ba0b12 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1,8 +1,8 @@
config ARM
bool
default y
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -21,6 +21,7 @@ config ARM
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
@@ -286,6 +287,11 @@ config GENERIC_BUG
def_bool y
depends on BUG
+config PGTABLE_LEVELS
+ int
+ default 3 if ARM_LPAE
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -356,19 +362,6 @@ config ARCH_VERSATILE
help
This enables support for ARM Ltd Versatile board.
-config ARCH_AT91
- bool "Atmel AT91"
- select ARCH_REQUIRE_GPIOLIB
- select CLKDEV_LOOKUP
- select IRQ_DOMAIN
- select NEED_MACH_IO_H if PCCARD
- select PINCTRL
- select PINCTRL_AT91
- select USE_OF
- help
- This enables support for systems based on Atmel
- AT91RM9200, AT91SAM9 and SAMA5 processors.
-
config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x/EP731x-based"
select ARCH_REQUIRE_GPIOLIB
@@ -626,18 +619,6 @@ config ARCH_PXA
help
Support for Intel/Marvell's PXA2xx/PXA3xx processor line.
-config ARCH_MSM
- bool "Qualcomm MSM (non-multiplatform)"
- select ARCH_REQUIRE_GPIOLIB
- select COMMON_CLK
- select GENERIC_CLOCKEVENTS
- help
- Support for Qualcomm MSM/QSD based systems. This runs on the
- apps processor of the MSM/QSD and depends on a shared memory
- interface to the modem processor which runs the baseband
- stack and controls some vital subsystems
- (clock and power control, etc).
-
config ARCH_SHMOBILE_LEGACY
bool "Renesas ARM SoCs (non-multiplatform)"
select ARCH_SHMOBILE
@@ -647,7 +628,6 @@ config ARCH_SHMOBILE_LEGACY
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
- select HAVE_MACH_CLKDEV
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
select MULTI_IRQ_HANDLER
@@ -845,6 +825,8 @@ config ARCH_VIRT
#
source "arch/arm/mach-mvebu/Kconfig"
+source "arch/arm/mach-alpine/Kconfig"
+
source "arch/arm/mach-asm9260/Kconfig"
source "arch/arm/mach-at91/Kconfig"
@@ -891,8 +873,6 @@ source "arch/arm/mach-ks8695/Kconfig"
source "arch/arm/mach-meson/Kconfig"
-source "arch/arm/mach-msm/Kconfig"
-
source "arch/arm/mach-moxart/Kconfig"
source "arch/arm/mach-mv78xx0/Kconfig"
@@ -1058,7 +1038,7 @@ config ARM_ERRATA_430973
depends on CPU_V7
help
This option enables the workaround for the 430973 Cortex-A8
- (r1p0..r1p2) erratum. If a code sequence containing an ARM/Thumb
+ r1p* erratum. If a code sequence containing an ARM/Thumb
interworking branch is replaced with another code sequence at the
same virtual address, whether due to self-modifying code or virtual
to physical address re-mapping, Cortex-A8 does not recover from the
@@ -1127,6 +1107,7 @@ config ARM_ERRATA_742231
config ARM_ERRATA_643719
bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
depends on CPU_V7 && SMP
+ default y
help
This option enables the workaround for the 643719 Cortex-A9 (prior to
r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
@@ -1344,7 +1325,7 @@ config SMP
If you don't know what to do here, say N.
config SMP_ON_UP
- bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
+ bool "Allow booting SMP kernel on uniprocessor systems"
depends on SMP && !XIP_KERNEL && MMU
default y
help
@@ -1516,7 +1497,7 @@ config HZ_FIXED
int
default 200 if ARCH_EBSA110 || ARCH_S3C24XX || \
ARCH_S5PV210 || ARCH_EXYNOS4
- default AT91_TIMER_HZ if ARCH_AT91
+ default 128 if SOC_AT91RM9200
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE_LEGACY
default 0
@@ -1849,35 +1830,6 @@ config ZBOOT_ROM
Say Y here if you intend to execute your compressed kernel image
(zImage) directly from ROM or flash. If unsure, say N.
-choice
- prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)"
- depends on ZBOOT_ROM && ARCH_SH7372
- default ZBOOT_ROM_NONE
- help
- Include experimental SD/MMC loading code in the ROM-able zImage.
- With this enabled it is possible to write the ROM-able zImage
- kernel image to an MMC or SD card and boot the kernel straight
- from the reset vector. At reset the processor Mask ROM will load
- the first part of the ROM-able zImage which in turn loads the
- rest the kernel image to RAM.
-
-config ZBOOT_ROM_NONE
- bool "No SD/MMC loader in zImage (EXPERIMENTAL)"
- help
- Do not load image from SD or MMC
-
-config ZBOOT_ROM_MMCIF
- bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
- help
- Load image from MMCIF hardware block.
-
-config ZBOOT_ROM_SH_MOBILE_SDHI
- bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)"
- help
- Load image from SDHI hardware block
-
-endchoice
-
config ARM_APPENDED_DTB
bool "Use appended device tree blob to zImage (EXPERIMENTAL)"
depends on OF
@@ -2126,16 +2078,6 @@ menu "Userspace binary formats"
source "fs/Kconfig.binfmt"
-config ARTHUR
- tristate "RISC OS personality"
- depends on !AEABI
- help
- Say Y here to include the kernel code necessary if you want to run
- Acorn RISC OS/Arthur binaries under Linux. This code is still very
- experimental; if this sounds frightening, say N and sleep in peace.
- You can also say M here to compile this support as a module (which
- will be called arthur).
-
endmenu
menu "Power management options"
@@ -2161,6 +2103,8 @@ source "net/Kconfig"
source "drivers/Kconfig"
+source "drivers/firmware/Kconfig"
+
source "fs/Kconfig"
source "arch/arm/Kconfig.debug"
@@ -2168,6 +2112,9 @@ source "arch/arm/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
+if CRYPTO
+source "arch/arm/crypto/Kconfig"
+endif
source "lib/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 970de7518341..0c12ffb155a2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -93,6 +93,14 @@ choice
prompt "Kernel low-level debugging port"
depends on DEBUG_LL
+ config DEBUG_ALPINE_UART0
+ bool "Kernel low-level debugging messages via Alpine UART0"
+ depends on ARCH_ALPINE
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Alpine based platforms.
+
config DEBUG_ASM9260_UART
bool "Kernel low-level debugging via asm9260 UART"
depends on MACH_ASM9260
@@ -448,25 +456,6 @@ choice
Say Y here if you want kernel low-level debugging support
on MMP UART3.
- config DEBUG_MSM_UART
- bool "Kernel low-level debugging messages via MSM UART"
- depends on ARCH_MSM
- help
- Say Y here if you want the debug print routines to direct
- their output to the serial port on MSM devices.
-
- ARCH DEBUG_UART_PHYS DEBUG_UART_VIRT #
- MSM7X00A, QSD8X50 0xa9a00000 0xe1000000 UART1
- MSM7X00A, QSD8X50 0xa9b00000 0xe1000000 UART2
- MSM7X00A, QSD8X50 0xa9c00000 0xe1000000 UART3
-
- MSM7X30 0xaca00000 0xe1000000 UART1
- MSM7X30 0xacb00000 0xe1000000 UART2
- MSM7X30 0xacc00000 0xe1000000 UART3
-
- Please adjust DEBUG_UART_PHYS and DEBUG_UART_BASE configuration
- options based on your needs.
-
config DEBUG_QCOM_UARTDM
bool "Kernel low-level debugging messages via QCOM UARTDM"
depends on ARCH_QCOM
@@ -806,7 +795,7 @@ choice
via SCIF2 on Renesas R-Car H1 (R8A7779).
config DEBUG_RCAR_GEN2_SCIF0
- bool "Kernel low-level debugging messages via SCIF0 on R8A7790/R8A7791/R8A7793)"
+ bool "Kernel low-level debugging messages via SCIF0 on R8A7790/R8A7791/R8A7793"
depends on ARCH_R8A7790 || ARCH_R8A7791 || ARCH_R8A7793
help
Say Y here if you want kernel low-level debugging support
@@ -821,12 +810,11 @@ choice
via SCIF2 on Renesas R-Car E2 (R8A7794).
config DEBUG_RMOBILE_SCIFA0
- bool "Kernel low-level debugging messages via SCIFA0 on R8A73A4/SH7372"
- depends on ARCH_R8A73A4 || ARCH_SH7372
+ bool "Kernel low-level debugging messages via SCIFA0 on R8A73A4"
+ depends on ARCH_R8A73A4
help
Say Y here if you want kernel low-level debugging support
- via SCIFA0 on Renesas R-Mobile APE6 (R8A73A4) or SH-Mobile
- AP4 (SH7372).
+ via SCIFA0 on Renesas R-Mobile APE6 (R8A73A4).
config DEBUG_RMOBILE_SCIFA1
bool "Kernel low-level debugging messages via SCIFA1 on R8A7740"
@@ -1295,7 +1283,7 @@ config DEBUG_LL_INCLUDE
DEBUG_IMX6SL_UART || \
DEBUG_IMX6SX_UART
default "debug/ks8695.S" if DEBUG_KS8695_UART
- default "debug/msm.S" if DEBUG_MSM_UART || DEBUG_QCOM_UARTDM
+ default "debug/msm.S" if DEBUG_QCOM_UARTDM
default "debug/netx.S" if DEBUG_NETX_UART
default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
default "debug/renesas-scif.S" if DEBUG_R7S72100_SCIF2
@@ -1388,7 +1376,6 @@ config DEBUG_UART_PHYS
default 0x80230000 if DEBUG_PICOXCELL_UART
default 0x808c0000 if ARCH_EP93XX
default 0x90020000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
- default 0xa9a00000 if DEBUG_MSM_UART
default 0xb0060000 if DEBUG_SIRFPRIMA2_UART1
default 0xb0090000 if DEBUG_VEXPRESS_UART0_CRX
default 0xc0013000 if DEBUG_U300_UART
@@ -1417,6 +1404,7 @@ config DEBUG_UART_PHYS
default 0xf8b00000 if DEBUG_HIX5HD2_UART
default 0xf991e000 if DEBUG_QCOM_UARTDM
default 0xfcb00000 if DEBUG_HI3620_UART
+ default 0xfd883000 if DEBUG_ALPINE_UART0
default 0xfe800000 if ARCH_IOP32X
default 0xff690000 if DEBUG_RK32_UART2
default 0xffc02000 if DEBUG_SOCFPGA_UART
@@ -1433,7 +1421,7 @@ config DEBUG_UART_PHYS
DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
DEBUG_LL_UART_EFM32 || \
DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
- DEBUG_MSM_UART || DEBUG_NETX_UART || \
+ DEBUG_NETX_UART || \
DEBUG_QCOM_UARTDM || DEBUG_R7S72100_SCIF2 || \
DEBUG_RCAR_GEN1_SCIF0 || DEBUG_RCAR_GEN1_SCIF2 || \
DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
@@ -1446,7 +1434,6 @@ config DEBUG_UART_VIRT
hex "Virtual base address of debug UART"
default 0xe0000a00 if DEBUG_NETX_UART
default 0xe0010fe0 if ARCH_RPC
- default 0xe1000000 if DEBUG_MSM_UART
default 0xf0000be0 if ARCH_EBSA110
default 0xf0010000 if DEBUG_ASM9260_UART
default 0xf01fb000 if DEBUG_NOMADIK_UART
@@ -1483,6 +1470,7 @@ config DEBUG_UART_VIRT
default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
default 0xfd000000 if ARCH_SPEAR13XX
default 0xfd012000 if ARCH_MV78XX0
+ default 0xfd883000 if DEBUG_ALPINE_UART0
default 0xfde12000 if ARCH_DOVE
default 0xfe012000 if ARCH_ORION5X
default 0xf31004c0 if DEBUG_MESON_UARTAO
@@ -1526,7 +1514,7 @@ config DEBUG_UART_VIRT
default DEBUG_UART_PHYS if !MMU
depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
- DEBUG_MSM_UART || DEBUG_NETX_UART || \
+ DEBUG_NETX_UART || \
DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
@@ -1543,7 +1531,7 @@ config DEBUG_UART_8250_WORD
depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
depends on DEBUG_UART_8250_SHIFT >= 2
default y if DEBUG_PICOXCELL_UART || DEBUG_SOCFPGA_UART || \
- ARCH_KEYSTONE || \
+ ARCH_KEYSTONE || DEBUG_ALPINE_UART0 || \
DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
DEBUG_DAVINCI_DA8XX_UART2 || \
DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 || \
@@ -1556,7 +1544,7 @@ config DEBUG_UART_8250_FLOW_CONTROL
config DEBUG_UNCOMPRESS
bool
- depends on ARCH_MULTIPLATFORM || ARCH_MSM || PLAT_SAMSUNG
+ depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG
default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
(!DEBUG_TEGRA_UART || !ZBOOT_ROM)
help
@@ -1573,7 +1561,8 @@ config DEBUG_UNCOMPRESS
config UNCOMPRESS_INCLUDE
string
default "debug/uncompress.h" if ARCH_MULTIPLATFORM || ARCH_MSM || \
- PLAT_SAMSUNG || ARCH_EFM32
+ PLAT_SAMSUNG || ARCH_EFM32 || \
+ ARCH_SHMOBILE_LEGACY
default "mach/uncompress.h"
config EARLY_PRINTK
@@ -1610,59 +1599,6 @@ config DEBUG_SET_MODULE_RONX
against certain classes of kernel exploits.
If in doubt, say "N".
-menuconfig CORESIGHT
- bool "CoreSight Tracing Support"
- select ARM_AMBA
- help
- This framework provides a kernel interface for the CoreSight debug
- and trace drivers to register themselves with. It's intended to build
- a topological view of the CoreSight components based on a DT
- specification and configure the right serie of components when a
- trace source gets enabled.
-
-if CORESIGHT
-config CORESIGHT_LINKS_AND_SINKS
- bool "CoreSight Link and Sink drivers"
- help
- This enables support for CoreSight link and sink drivers that are
- responsible for transporting and collecting the trace data
- respectively. Link and sinks are dynamically aggregated with a trace
- entity at run time to form a complete trace path.
-
-config CORESIGHT_LINK_AND_SINK_TMC
- bool "Coresight generic TMC driver"
- depends on CORESIGHT_LINKS_AND_SINKS
- help
- This enables support for the Trace Memory Controller driver. Depending
- on its configuration the device can act as a link (embedded trace router
- - ETR) or sink (embedded trace FIFO). The driver complies with the
- generic implementation of the component without special enhancement or
- added features.
-
-config CORESIGHT_SINK_TPIU
- bool "Coresight generic TPIU driver"
- depends on CORESIGHT_LINKS_AND_SINKS
- help
- This enables support for the Trace Port Interface Unit driver, responsible
- for bridging the gap between the on-chip coresight components and a trace
- port collection engine, typically connected to an external host for use
- case capturing more traces than the on-board coresight memory can handle.
-
-config CORESIGHT_SINK_ETBV10
- bool "Coresight ETBv1.0 driver"
- depends on CORESIGHT_LINKS_AND_SINKS
- help
- This enables support for the Embedded Trace Buffer version 1.0 driver
- that complies with the generic implementation of the component without
- special enhancement or added features.
+source "drivers/hwtracing/coresight/Kconfig"
-config CORESIGHT_SOURCE_ETM3X
- bool "CoreSight Embedded Trace Macrocell 3.x driver"
- select CORESIGHT_LINKS_AND_SINKS
- help
- This driver provides support for processor ETM3.x and PTM1.x modules,
- which allows tracing the instructions that a processor is executing
- This is primarily useful for instruction level tracing. Depending
- the ETM version data tracing may also be available.
-endif
endmenu
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index eb7bb511f853..985227cbbd1b 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -13,7 +13,7 @@
# Ensure linker flags are correct
LDFLAGS :=
-LDFLAGS_vmlinux :=-p --no-undefined -X
+LDFLAGS_vmlinux :=-p --no-undefined -X --pic-veneer
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
LDFLAGS_MODULE += --be8
@@ -136,13 +136,13 @@ textofs-$(CONFIG_PM_H1940) := 0x00108000
ifeq ($(CONFIG_ARCH_SA1100),y)
textofs-$(CONFIG_SA1111) := 0x00208000
endif
-textofs-$(CONFIG_ARCH_MSM7X30) := 0x00208000
textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000
textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
textofs-$(CONFIG_ARCH_AXXIA) := 0x00308000
# Machine directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
+machine-$(CONFIG_ARCH_ALPINE) += alpine
machine-$(CONFIG_ARCH_AT91) += at91
machine-$(CONFIG_ARCH_AXXIA) += axxia
machine-$(CONFIG_ARCH_BCM) += bcm
@@ -171,7 +171,6 @@ machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx
machine-$(CONFIG_ARCH_MESON) += meson
machine-$(CONFIG_ARCH_MMP) += mmp
machine-$(CONFIG_ARCH_MOXART) += moxart
-machine-$(CONFIG_ARCH_MSM) += msm
machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0
machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_MXC) += imx
@@ -264,6 +263,7 @@ core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ)
core-$(CONFIG_VFP) += arch/arm/vfp/
core-$(CONFIG_XEN) += arch/arm/xen/
core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
+core-$(CONFIG_VDSO) += arch/arm/vdso/
# If we have a machine-specific directory, then include it in the build.
core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
@@ -321,6 +321,12 @@ dtbs: prepare scripts
dtbs_install:
$(Q)$(MAKE) $(dtbinst)=$(boot)/dts
+PHONY += vdso_install
+vdso_install:
+ifeq ($(CONFIG_VDSO),y)
+ $(Q)$(MAKE) $(build)=arch/arm/vdso $@
+endif
+
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
@@ -345,4 +351,5 @@ define archhelp
echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH) and run lilo'
+ echo ' vdso_install - Install unstripped vdso.so to $$(INSTALL_MOD_PATH)/vdso'
endef
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index ec2f8065f955..9eca7aee927f 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -12,7 +12,7 @@
#
ifneq ($(MACHINE),)
-include $(srctree)/$(MACHINE)/Makefile.boot
+include $(MACHINE)/Makefile.boot
endif
# Note: the following conditions must always be true:
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 3ea230aa94b7..6e1fb2b2ecc7 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -6,21 +6,6 @@
OBJS =
-# Ensure that MMCIF loader code appears early in the image
-# to minimise that number of bocks that have to be read in
-# order to load it.
-ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
-OBJS += mmcif-sh7372.o
-endif
-
-# Ensure that SDHI loader code appears early in the image
-# to minimise that number of bocks that have to be read in
-# order to load it.
-ifeq ($(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI),y)
-OBJS += sdhi-shmobile.o
-OBJS += sdhi-sh7372.o
-endif
-
AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
HEAD = head.o
OBJS += misc.o decompress.o
diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S
index e7f80928949c..22a75259faa3 100644
--- a/arch/arm/boot/compressed/head-shmobile.S
+++ b/arch/arm/boot/compressed/head-shmobile.S
@@ -25,36 +25,6 @@
/* load board-specific initialization code */
#include <mach/zboot.h>
-#if defined(CONFIG_ZBOOT_ROM_MMCIF) || defined(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI)
- /* Load image from MMC/SD */
- adr sp, __tmp_stack + 256
- ldr r0, __image_start
- ldr r1, __image_end
- subs r1, r1, r0
- ldr r0, __load_base
- bl mmc_loader
-
- /* Jump to loaded code */
- ldr r0, __loaded
- ldr r1, __image_start
- sub r0, r0, r1
- ldr r1, __load_base
- add pc, r0, r1
-
-__image_start:
- .long _start
-__image_end:
- .long _got_end
-__load_base:
- .long MEMORY_START + 0x02000000 @ Load at 32Mb into SDRAM
-__loaded:
- .long __continue
- .align
-__tmp_stack:
- .space 256
-__continue:
-#endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */
-
adr r0, dtb_info
ldmia r0, {r1, r3, r4, r5, r7}
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index c41a793b519c..2c45b5709fa4 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -10,8 +10,11 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/v7m.h>
+
+ AR_CLASS( .arch armv7-a )
+ M_CLASS( .arch armv7-m )
- .arch armv7-a
/*
* Debugging stuff
*
@@ -114,7 +117,12 @@
* sort out different calling conventions
*/
.align
- .arm @ Always enter in ARM state
+ /*
+ * Always enter in ARM state for CPUs that support the ARM ISA.
+ * As of today (2014) that's exactly the members of the A and R
+ * classes.
+ */
+ AR_CLASS( .arm )
start:
.type start,#function
.rept 7
@@ -132,14 +140,15 @@ start:
THUMB( .thumb )
1:
- ARM_BE8( setend be ) @ go BE8 if compiled for BE8
- mrs r9, cpsr
+ ARM_BE8( setend be ) @ go BE8 if compiled for BE8
+ AR_CLASS( mrs r9, cpsr )
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install @ get into SVC mode, reversibly
#endif
mov r7, r1 @ save architecture ID
mov r8, r2 @ save atags pointer
+#ifndef CONFIG_CPU_V7M
/*
* Booting from Angel - need to enter SVC mode and disable
* FIQs/IRQs (numeric definitions from angel arm.h source).
@@ -155,6 +164,7 @@ not_angel:
safe_svcmode_maskall r0
msr spsr_cxsf, r9 @ Save the CPU boot mode in
@ SPSR
+#endif
/*
* Note that some cache flushing and other stuff may
* be needed here - is there an Angel SWI call for this?
@@ -168,9 +178,26 @@ not_angel:
.text
#ifdef CONFIG_AUTO_ZRELADDR
- @ determine final kernel image address
+ /*
+ * Find the start of physical memory. As we are executing
+ * without the MMU on, we are in the physical address space.
+ * We just need to get rid of any offset by aligning the
+ * address.
+ *
+ * This alignment is a balance between the requirements of
+ * different platforms - we have chosen 128MB to allow
+ * platforms which align the start of their physical memory
+ * to 128MB to use this feature, while allowing the zImage
+ * to be placed within the first 128MB of memory on other
+ * platforms. Increasing the alignment means we place
+ * stricter alignment requirements on the start of physical
+ * memory, but relaxing it means that we break people who
+ * are already placing their zImage in (eg) the top 64MB
+ * of this range.
+ */
mov r4, pc
and r4, r4, #0xf8000000
+ /* Determine final kernel image address. */
add r4, r4, #TEXT_OFFSET
#else
ldr r4, =zreladdr
@@ -810,6 +837,16 @@ __common_mmu_cache_on:
call_cache_fn: adr r12, proc_types
#ifdef CONFIG_CPU_CP15
mrc p15, 0, r9, c0, c0 @ get processor ID
+#elif defined(CONFIG_CPU_V7M)
+ /*
+ * On v7-M the processor id is located in the V7M_SCB_CPUID
+ * register, but as cache handling is IMPLEMENTATION DEFINED on
+ * v7-M (if existant at all) we just return early here.
+ * If V7M_SCB_CPUID were used the cpu ID functions (i.e.
+ * __armv7_mmu_cache_{on,off,flush}) would be selected which
+ * use cp15 registers that are not implemented on v7-M.
+ */
+ bx lr
#else
ldr r9, =CONFIG_PROCESSOR_ID
#endif
@@ -1310,8 +1347,9 @@ __hyp_reentry_vectors:
__enter_kernel:
mov r0, #0 @ must be 0
- ARM( mov pc, r4 ) @ call kernel
- THUMB( bx r4 ) @ entry point is always ARM
+ ARM( mov pc, r4 ) @ call kernel
+ M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
+ THUMB( bx r4 ) @ entry point is always ARM for A/R classes
reloc_code_end:
diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c
deleted file mode 100644
index 672ae95db5c3..000000000000
--- a/arch/arm/boot/compressed/mmcif-sh7372.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * sh7372 MMCIF loader
- *
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2010 Simon Horman
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/boot.h>
-#include <mach/mmc.h>
-
-#define MMCIF_BASE (void __iomem *)0xe6bd0000
-
-#define PORT84CR (void __iomem *)0xe6050054
-#define PORT85CR (void __iomem *)0xe6050055
-#define PORT86CR (void __iomem *)0xe6050056
-#define PORT87CR (void __iomem *)0xe6050057
-#define PORT88CR (void __iomem *)0xe6050058
-#define PORT89CR (void __iomem *)0xe6050059
-#define PORT90CR (void __iomem *)0xe605005a
-#define PORT91CR (void __iomem *)0xe605005b
-#define PORT92CR (void __iomem *)0xe605005c
-#define PORT99CR (void __iomem *)0xe6050063
-
-#define SMSTPCR3 (void __iomem *)0xe615013c
-
-/* SH7372 specific MMCIF loader
- *
- * loads the zImage from an MMC card starting from block 1.
- *
- * The image must be start with a vrl4 header and
- * the zImage must start at offset 512 of the image. That is,
- * at block 2 (=byte 1024) on the media
- *
- * Use the following line to write the vrl4 formated zImage
- * to an MMC card
- * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
- */
-asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
-{
- mmc_init_progress();
- mmc_update_progress(MMC_PROGRESS_ENTER);
-
- /* Initialise MMC
- * registers: PORT84CR-PORT92CR
- * (MMCD0_0-MMCD0_7,MMCCMD0 Control)
- * value: 0x04 - select function 4
- */
- __raw_writeb(0x04, PORT84CR);
- __raw_writeb(0x04, PORT85CR);
- __raw_writeb(0x04, PORT86CR);
- __raw_writeb(0x04, PORT87CR);
- __raw_writeb(0x04, PORT88CR);
- __raw_writeb(0x04, PORT89CR);
- __raw_writeb(0x04, PORT90CR);
- __raw_writeb(0x04, PORT91CR);
- __raw_writeb(0x04, PORT92CR);
-
- /* Initialise MMC
- * registers: PORT99CR (MMCCLK0 Control)
- * value: 0x10 | 0x04 - enable output | select function 4
- */
- __raw_writeb(0x14, PORT99CR);
-
- /* Enable clock to MMC hardware block */
- __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 12), SMSTPCR3);
-
- mmc_update_progress(MMC_PROGRESS_INIT);
-
- /* setup MMCIF hardware */
- sh_mmcif_boot_init(MMCIF_BASE);
-
- mmc_update_progress(MMC_PROGRESS_LOAD);
-
- /* load kernel via MMCIF interface */
- sh_mmcif_boot_do_read(MMCIF_BASE, 2, /* Kernel is at block 2 */
- (len + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS, buf);
-
-
- /* Disable clock to MMC hardware block */
- __raw_writel(__raw_readl(SMSTPCR3) | (1 << 12), SMSTPCR3);
-
- mmc_update_progress(MMC_PROGRESS_DONE);
-}
diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c
deleted file mode 100644
index d279294f2381..000000000000
--- a/arch/arm/boot/compressed/sdhi-sh7372.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * SuperH Mobile SDHI
- *
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2010 Kuninori Morimoto
- * Copyright (C) 2010 Simon Horman
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Parts inspired by u-boot
- */
-
-#include <linux/io.h>
-#include <mach/mmc.h>
-#include <linux/mmc/boot.h>
-#include <linux/mmc/tmio.h>
-
-#include "sdhi-shmobile.h"
-
-#define PORT179CR 0xe60520b3
-#define PORT180CR 0xe60520b4
-#define PORT181CR 0xe60520b5
-#define PORT182CR 0xe60520b6
-#define PORT183CR 0xe60520b7
-#define PORT184CR 0xe60520b8
-
-#define SMSTPCR3 0xe615013c
-
-#define CR_INPUT_ENABLE 0x10
-#define CR_FUNCTION1 0x01
-
-#define SDHI1_BASE (void __iomem *)0xe6860000
-#define SDHI_BASE SDHI1_BASE
-
-/* SuperH Mobile SDHI loader
- *
- * loads the zImage from an SD card starting from block 0
- * on physical partition 1
- *
- * The image must be start with a vrl4 header and
- * the zImage must start at offset 512 of the image. That is,
- * at block 1 (=byte 512) of physical partition 1
- *
- * Use the following line to write the vrl4 formated zImage
- * to an SD card
- * # dd if=vrl4.out of=/dev/sdx bs=512
- */
-asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
-{
- int high_capacity;
-
- mmc_init_progress();
-
- mmc_update_progress(MMC_PROGRESS_ENTER);
- /* Initialise SDHI1 */
- /* PORT184CR: GPIO_FN_SDHICMD1 Control */
- __raw_writeb(CR_FUNCTION1, PORT184CR);
- /* PORT179CR: GPIO_FN_SDHICLK1 Control */
- __raw_writeb(CR_INPUT_ENABLE|CR_FUNCTION1, PORT179CR);
- /* PORT181CR: GPIO_FN_SDHID1_3 Control */
- __raw_writeb(CR_FUNCTION1, PORT183CR);
- /* PORT182CR: GPIO_FN_SDHID1_2 Control */
- __raw_writeb(CR_FUNCTION1, PORT182CR);
- /* PORT183CR: GPIO_FN_SDHID1_1 Control */
- __raw_writeb(CR_FUNCTION1, PORT181CR);
- /* PORT180CR: GPIO_FN_SDHID1_0 Control */
- __raw_writeb(CR_FUNCTION1, PORT180CR);
-
- /* Enable clock to SDHI1 hardware block */
- __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 13), SMSTPCR3);
-
- /* setup SDHI hardware */
- mmc_update_progress(MMC_PROGRESS_INIT);
- high_capacity = sdhi_boot_init(SDHI_BASE);
- if (high_capacity < 0)
- goto err;
-
- mmc_update_progress(MMC_PROGRESS_LOAD);
- /* load kernel */
- if (sdhi_boot_do_read(SDHI_BASE, high_capacity,
- 0, /* Kernel is at block 1 */
- (len + TMIO_BBS - 1) / TMIO_BBS, buf))
- goto err;
-
- /* Disable clock to SDHI1 hardware block */
- __raw_writel(__raw_readl(SMSTPCR3) | (1 << 13), SMSTPCR3);
-
- mmc_update_progress(MMC_PROGRESS_DONE);
-
- return;
-err:
- for(;;);
-}
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.c b/arch/arm/boot/compressed/sdhi-shmobile.c
deleted file mode 100644
index bd3d46980955..000000000000
--- a/arch/arm/boot/compressed/sdhi-shmobile.c
+++ /dev/null
@@ -1,449 +0,0 @@
-/*
- * SuperH Mobile SDHI
- *
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2010 Kuninori Morimoto
- * Copyright (C) 2010 Simon Horman
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Parts inspired by u-boot
- */
-
-#include <linux/io.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/core.h>
-#include <linux/mmc/mmc.h>
-#include <linux/mmc/sd.h>
-#include <linux/mmc/tmio.h>
-#include <mach/sdhi.h>
-
-#define OCR_FASTBOOT (1<<29)
-#define OCR_HCS (1<<30)
-#define OCR_BUSY (1<<31)
-
-#define RESP_CMD12 0x00000030
-
-static inline u16 sd_ctrl_read16(void __iomem *base, int addr)
-{
- return __raw_readw(base + addr);
-}
-
-static inline u32 sd_ctrl_read32(void __iomem *base, int addr)
-{
- return __raw_readw(base + addr) |
- __raw_readw(base + addr + 2) << 16;
-}
-
-static inline void sd_ctrl_write16(void __iomem *base, int addr, u16 val)
-{
- __raw_writew(val, base + addr);
-}
-
-static inline void sd_ctrl_write32(void __iomem *base, int addr, u32 val)
-{
- __raw_writew(val, base + addr);
- __raw_writew(val >> 16, base + addr + 2);
-}
-
-#define ALL_ERROR (TMIO_STAT_CMD_IDX_ERR | TMIO_STAT_CRCFAIL | \
- TMIO_STAT_STOPBIT_ERR | TMIO_STAT_DATATIMEOUT | \
- TMIO_STAT_RXOVERFLOW | TMIO_STAT_TXUNDERRUN | \
- TMIO_STAT_CMDTIMEOUT | TMIO_STAT_ILL_ACCESS | \
- TMIO_STAT_ILL_FUNC)
-
-static int sdhi_intr(void __iomem *base)
-{
- unsigned long state = sd_ctrl_read32(base, CTL_STATUS);
-
- if (state & ALL_ERROR) {
- sd_ctrl_write32(base, CTL_STATUS, ~ALL_ERROR);
- sd_ctrl_write32(base, CTL_IRQ_MASK,
- ALL_ERROR |
- sd_ctrl_read32(base, CTL_IRQ_MASK));
- return -EINVAL;
- }
- if (state & TMIO_STAT_CMDRESPEND) {
- sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
- sd_ctrl_write32(base, CTL_IRQ_MASK,
- TMIO_STAT_CMDRESPEND |
- sd_ctrl_read32(base, CTL_IRQ_MASK));
- return 0;
- }
- if (state & TMIO_STAT_RXRDY) {
- sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_RXRDY);
- sd_ctrl_write32(base, CTL_IRQ_MASK,
- TMIO_STAT_RXRDY | TMIO_STAT_TXUNDERRUN |
- sd_ctrl_read32(base, CTL_IRQ_MASK));
- return 0;
- }
- if (state & TMIO_STAT_DATAEND) {
- sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_DATAEND);
- sd_ctrl_write32(base, CTL_IRQ_MASK,
- TMIO_STAT_DATAEND |
- sd_ctrl_read32(base, CTL_IRQ_MASK));
- return 0;
- }
-
- return -EAGAIN;
-}
-
-static int sdhi_boot_wait_resp_end(void __iomem *base)
-{
- int err = -EAGAIN, timeout = 10000000;
-
- while (timeout--) {
- err = sdhi_intr(base);
- if (err != -EAGAIN)
- break;
- udelay(1);
- }
-
- return err;
-}
-
-/* SDHI_CLK_CTRL */
-#define CLK_MMC_ENABLE (1 << 8)
-#define CLK_MMC_INIT (1 << 6) /* clk / 256 */
-
-static void sdhi_boot_mmc_clk_stop(void __iomem *base)
-{
- sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, 0x0000);
- msleep(10);
- sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, ~CLK_MMC_ENABLE &
- sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
- msleep(10);
-}
-
-static void sdhi_boot_mmc_clk_start(void __iomem *base)
-{
- sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, CLK_MMC_ENABLE |
- sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
- msleep(10);
- sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, CLK_MMC_ENABLE);
- msleep(10);
-}
-
-static void sdhi_boot_reset(void __iomem *base)
-{
- sd_ctrl_write16(base, CTL_RESET_SD, 0x0000);
- msleep(10);
- sd_ctrl_write16(base, CTL_RESET_SD, 0x0001);
- msleep(10);
-}
-
-/* Set MMC clock / power.
- * Note: This controller uses a simple divider scheme therefore it cannot
- * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
- * MMC wont run that fast, it has to be clocked at 12MHz which is the next
- * slowest setting.
- */
-static int sdhi_boot_mmc_set_ios(void __iomem *base, struct mmc_ios *ios)
-{
- if (sd_ctrl_read32(base, CTL_STATUS) & TMIO_STAT_CMD_BUSY)
- return -EBUSY;
-
- if (ios->clock)
- sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL,
- ios->clock | CLK_MMC_ENABLE);
-
- /* Power sequence - OFF -> ON -> UP */
- switch (ios->power_mode) {
- case MMC_POWER_OFF: /* power down SD bus */
- sdhi_boot_mmc_clk_stop(base);
- break;
- case MMC_POWER_ON: /* power up SD bus */
- break;
- case MMC_POWER_UP: /* start bus clock */
- sdhi_boot_mmc_clk_start(base);
- break;
- }
-
- switch (ios->bus_width) {
- case MMC_BUS_WIDTH_1:
- sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x80e0);
- break;
- case MMC_BUS_WIDTH_4:
- sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x00e0);
- break;
- }
-
- /* Let things settle. delay taken from winCE driver */
- udelay(140);
-
- return 0;
-}
-
-/* These are the bitmasks the tmio chip requires to implement the MMC response
- * types. Note that R1 and R6 are the same in this scheme. */
-#define RESP_NONE 0x0300
-#define RESP_R1 0x0400
-#define RESP_R1B 0x0500
-#define RESP_R2 0x0600
-#define RESP_R3 0x0700
-#define DATA_PRESENT 0x0800
-#define TRANSFER_READ 0x1000
-
-static int sdhi_boot_request(void __iomem *base, struct mmc_command *cmd)
-{
- int err, c = cmd->opcode;
-
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_NONE: c |= RESP_NONE; break;
- case MMC_RSP_R1: c |= RESP_R1; break;
- case MMC_RSP_R1B: c |= RESP_R1B; break;
- case MMC_RSP_R2: c |= RESP_R2; break;
- case MMC_RSP_R3: c |= RESP_R3; break;
- default:
- return -EINVAL;
- }
-
- /* No interrupts so this may not be cleared */
- sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
-
- sd_ctrl_write32(base, CTL_IRQ_MASK, TMIO_STAT_CMDRESPEND |
- sd_ctrl_read32(base, CTL_IRQ_MASK));
- sd_ctrl_write32(base, CTL_ARG_REG, cmd->arg);
- sd_ctrl_write16(base, CTL_SD_CMD, c);
-
-
- sd_ctrl_write32(base, CTL_IRQ_MASK,
- ~(TMIO_STAT_CMDRESPEND | ALL_ERROR) &
- sd_ctrl_read32(base, CTL_IRQ_MASK));
-
- err = sdhi_boot_wait_resp_end(base);
- if (err)
- return err;
-
- cmd->resp[0] = sd_ctrl_read32(base, CTL_RESPONSE);
-
- return 0;
-}
-
-static int sdhi_boot_do_read_single(void __iomem *base, int high_capacity,
- unsigned long block, unsigned short *buf)
-{
- int err, i;
-
- /* CMD17 - Read */
- {
- struct mmc_command cmd;
-
- cmd.opcode = MMC_READ_SINGLE_BLOCK | \
- TRANSFER_READ | DATA_PRESENT;
- if (high_capacity)
- cmd.arg = block;
- else
- cmd.arg = block * TMIO_BBS;
- cmd.flags = MMC_RSP_R1;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- return err;
- }
-
- sd_ctrl_write32(base, CTL_IRQ_MASK,
- ~(TMIO_STAT_DATAEND | TMIO_STAT_RXRDY |
- TMIO_STAT_TXUNDERRUN) &
- sd_ctrl_read32(base, CTL_IRQ_MASK));
- err = sdhi_boot_wait_resp_end(base);
- if (err)
- return err;
-
- sd_ctrl_write16(base, CTL_SD_XFER_LEN, TMIO_BBS);
- for (i = 0; i < TMIO_BBS / sizeof(*buf); i++)
- *buf++ = sd_ctrl_read16(base, RESP_CMD12);
-
- err = sdhi_boot_wait_resp_end(base);
- if (err)
- return err;
-
- return 0;
-}
-
-int sdhi_boot_do_read(void __iomem *base, int high_capacity,
- unsigned long offset, unsigned short count,
- unsigned short *buf)
-{
- unsigned long i;
- int err = 0;
-
- for (i = 0; i < count; i++) {
- err = sdhi_boot_do_read_single(base, high_capacity, offset + i,
- buf + (i * TMIO_BBS /
- sizeof(*buf)));
- if (err)
- return err;
- }
-
- return 0;
-}
-
-#define VOLTAGES (MMC_VDD_32_33 | MMC_VDD_33_34)
-
-int sdhi_boot_init(void __iomem *base)
-{
- bool sd_v2 = false, sd_v1_0 = false;
- unsigned short cid;
- int err, high_capacity = 0;
-
- sdhi_boot_mmc_clk_stop(base);
- sdhi_boot_reset(base);
-
- /* mmc0: clock 400000Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0 */
- {
- struct mmc_ios ios;
- ios.power_mode = MMC_POWER_ON;
- ios.bus_width = MMC_BUS_WIDTH_1;
- ios.clock = CLK_MMC_INIT;
- err = sdhi_boot_mmc_set_ios(base, &ios);
- if (err)
- return err;
- }
-
- /* CMD0 */
- {
- struct mmc_command cmd;
- msleep(1);
- cmd.opcode = MMC_GO_IDLE_STATE;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_NONE;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- return err;
- msleep(2);
- }
-
- /* CMD8 - Test for SD version 2 */
- {
- struct mmc_command cmd;
- cmd.opcode = SD_SEND_IF_COND;
- cmd.arg = (VOLTAGES != 0) << 8 | 0xaa;
- cmd.flags = MMC_RSP_R1;
- err = sdhi_boot_request(base, &cmd); /* Ignore error */
- if ((cmd.resp[0] & 0xff) == 0xaa)
- sd_v2 = true;
- }
-
- /* CMD55 - Get OCR (SD) */
- {
- int timeout = 1000;
- struct mmc_command cmd;
-
- cmd.arg = 0;
-
- do {
- cmd.opcode = MMC_APP_CMD;
- cmd.flags = MMC_RSP_R1;
- cmd.arg = 0;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- break;
-
- cmd.opcode = SD_APP_OP_COND;
- cmd.flags = MMC_RSP_R3;
- cmd.arg = (VOLTAGES & 0xff8000);
- if (sd_v2)
- cmd.arg |= OCR_HCS;
- cmd.arg |= OCR_FASTBOOT;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- break;
-
- msleep(1);
- } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
-
- if (!err && timeout) {
- if (!sd_v2)
- sd_v1_0 = true;
- high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
- }
- }
-
- /* CMD1 - Get OCR (MMC) */
- if (!sd_v2 && !sd_v1_0) {
- int timeout = 1000;
- struct mmc_command cmd;
-
- do {
- cmd.opcode = MMC_SEND_OP_COND;
- cmd.arg = VOLTAGES | OCR_HCS;
- cmd.flags = MMC_RSP_R3;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- return err;
-
- msleep(1);
- } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
-
- if (!timeout)
- return -EAGAIN;
-
- high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
- }
-
- /* CMD2 - Get CID */
- {
- struct mmc_command cmd;
- cmd.opcode = MMC_ALL_SEND_CID;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_R2;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- return err;
- }
-
- /* CMD3
- * MMC: Set the relative address
- * SD: Get the relative address
- * Also puts the card into the standby state
- */
- {
- struct mmc_command cmd;
- cmd.opcode = MMC_SET_RELATIVE_ADDR;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_R1;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- return err;
- cid = cmd.resp[0] >> 16;
- }
-
- /* CMD9 - Get CSD */
- {
- struct mmc_command cmd;
- cmd.opcode = MMC_SEND_CSD;
- cmd.arg = cid << 16;
- cmd.flags = MMC_RSP_R2;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- return err;
- }
-
- /* CMD7 - Select the card */
- {
- struct mmc_command cmd;
- cmd.opcode = MMC_SELECT_CARD;
- //cmd.arg = rca << 16;
- cmd.arg = cid << 16;
- //cmd.flags = MMC_RSP_R1B;
- cmd.flags = MMC_RSP_R1;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- return err;
- }
-
- /* CMD16 - Set the block size */
- {
- struct mmc_command cmd;
- cmd.opcode = MMC_SET_BLOCKLEN;
- cmd.arg = TMIO_BBS;
- cmd.flags = MMC_RSP_R1;
- err = sdhi_boot_request(base, &cmd);
- if (err)
- return err;
- }
-
- return high_capacity;
-}
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.h b/arch/arm/boot/compressed/sdhi-shmobile.h
deleted file mode 100644
index 92eaa09f985e..000000000000
--- a/arch/arm/boot/compressed/sdhi-shmobile.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef SDHI_MOBILE_H
-#define SDHI_MOBILE_H
-
-#include <linux/compiler.h>
-
-int sdhi_boot_do_read(void __iomem *base, int high_capacity,
- unsigned long offset, unsigned short count,
- unsigned short *buf);
-int sdhi_boot_init(void __iomem *base);
-
-#endif
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index a1c776b8dcec..86217db2937a 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -1,5 +1,7 @@
ifeq ($(CONFIG_OF),y)
+dtb-$(CONFIG_ARCH_ALPINE) += \
+ alpine-db.dtb
dtb-$(CONFIG_MACH_ASM9260) += \
alphascale-asm9260-devkit.dtb
# Keep at91 dtb files sorted alphabetically for each SoC
@@ -42,6 +44,7 @@ dtb-$(CONFIG_SOC_SAM_V7) += \
sama5d34ek.dtb \
sama5d35ek.dtb \
sama5d36ek.dtb \
+ at91-sama5d4_xplained.dtb \
at91-sama5d4ek.dtb
dtb-$(CONFIG_ARCH_ATLAS6) += \
atlas6-evb.dtb
@@ -59,13 +62,15 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \
bcm4708-netgear-r6300-v2.dtb \
bcm47081-asus-rt-n18u.dtb \
bcm47081-buffalo-wzr-600dhp2.dtb \
- bcm47081-buffalo-wzr-900dhp.dtb
+ bcm47081-buffalo-wzr-900dhp.dtb \
+ bcm4709-netgear-r8000.dtb
dtb-$(CONFIG_ARCH_BCM_63XX) += \
bcm963138dvt.dtb
dtb-$(CONFIG_ARCH_BCM_CYGNUS) += \
bcm911360_entphn.dtb \
bcm911360k.dtb \
- bcm958300k.dtb
+ bcm958300k.dtb \
+ bcm958305k.dtb
dtb-$(CONFIG_ARCH_BCM_MOBILE) += \
bcm28155-ap.dtb \
bcm21664-garnet.dtb
@@ -165,6 +170,7 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += \
kirkwood-lsxhl.dtb \
kirkwood-mplcec4.dtb \
kirkwood-mv88f6281gtw-ge.dtb \
+ kirkwood-nas2big.dtb \
kirkwood-net2big.dtb \
kirkwood-net5big.dtb \
kirkwood-netgear_readynas_duo_v2.dtb \
@@ -199,6 +205,8 @@ dtb-$(CONFIG_ARCH_LPC32XX) += \
ea3250.dtb phy3250.dtb
dtb-$(CONFIG_MACH_MESON6) += \
meson6-atv1200.dtb
+dtb-$(CONFIG_MACH_MESON8) += \
+ meson8-minix-neo-x8.dtb
dtb-$(CONFIG_ARCH_MMP) += \
pxa168-aspenite.dtb \
pxa910-dkb.dtb \
@@ -299,9 +307,11 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-wandboard.dtb \
imx6q-wandboard-revb1.dtb
dtb-$(CONFIG_SOC_IMX6SL) += \
- imx6sl-evk.dtb
+ imx6sl-evk.dtb \
+ imx6sl-warp.dtb
dtb-$(CONFIG_SOC_IMX6SX) += \
imx6sx-sabreauto.dtb \
+ imx6sx-sdb-reva.dtb \
imx6sx-sdb.dtb
dtb-$(CONFIG_SOC_LS1021A) += \
ls1021a-qds.dtb \
@@ -386,6 +396,8 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
omap3-overo-storm-tobi.dtb \
omap3-overo-summit.dtb \
omap3-overo-tobi.dtb \
+ omap3-pandora-600mhz.dtb \
+ omap3-pandora-1ghz.dtb \
omap3-sbc-t3517.dtb \
omap3-sbc-t3530.dtb \
omap3-sbc-t3730.dtb \
@@ -401,7 +413,8 @@ dtb-$(CONFIG_SOC_AM33XX) += \
am335x-evmsk.dtb \
am335x-nano.dtb \
am335x-pepper.dtb \
- am335x-lxm.dtb
+ am335x-lxm.dtb \
+ am335x-chiliboard.dtb
dtb-$(CONFIG_ARCH_OMAP4) += \
omap4-duovero-parlor.dtb \
omap4-panda.dtb \
@@ -464,25 +477,23 @@ dtb-$(CONFIG_ARCH_S5PV210) += \
s5pv210-smdkv210.dtb \
s5pv210-torbreck.dtb
dtb-$(CONFIG_ARCH_SHMOBILE_LEGACY) += \
- r8a73a4-ape6evm.dtb \
- r8a73a4-ape6evm-reference.dtb \
r8a7740-armadillo800eva.dtb \
r8a7778-bockw.dtb \
r8a7778-bockw-reference.dtb \
r8a7779-marzen.dtb \
- sh7372-mackerel.dtb \
- sh73a0-kzm9g.dtb \
- sh73a0-kzm9g-reference.dtb
+ sh73a0-kzm9g.dtb
dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += \
emev2-kzm9d.dtb \
r7s72100-genmai.dtb \
r8a73a4-ape6evm.dtb \
r8a7740-armadillo800eva.dtb \
+ r8a7778-bockw.dtb \
r8a7779-marzen.dtb \
r8a7790-lager.dtb \
r8a7791-henninger.dtb \
r8a7791-koelsch.dtb \
- r8a7794-alt.dtb
+ r8a7794-alt.dtb \
+ sh73a0-kzm9g.dtb
dtb-$(CONFIG_ARCH_SOCFPGA) += \
socfpga_arria5_socdk.dtb \
socfpga_arria10_socdk.dtb \
@@ -577,6 +588,7 @@ dtb-$(CONFIG_ARCH_TEGRA_114_SOC) += \
dtb-$(CONFIG_ARCH_TEGRA_124_SOC) += \
tegra124-jetson-tk1.dtb \
tegra124-nyan-big.dtb \
+ tegra124-nyan-blaze.dtb \
tegra124-venice2.dtb
dtb-$(CONFIG_ARCH_U300) += \
ste-u300.dtb
@@ -624,11 +636,14 @@ dtb-$(CONFIG_MACH_ARMADA_38X) += \
armada-388-db.dtb \
armada-388-gp.dtb \
armada-388-rd.dtb
+dtb-$(CONFIG_MACH_ARMADA_39X) += \
+ armada-398-db.dtb
dtb-$(CONFIG_MACH_ARMADA_XP) += \
armada-xp-axpwifiap.dtb \
armada-xp-db.dtb \
armada-xp-gp.dtb \
armada-xp-lenovo-ix4-300d.dtb \
+ armada-xp-linksys-mamba.dtb \
armada-xp-matrix.dtb \
armada-xp-netgear-rn2120.dtb \
armada-xp-openblocks-ax3-4.dtb \
diff --git a/arch/arm/boot/dts/alpine-db.dts b/arch/arm/boot/dts/alpine-db.dts
new file mode 100644
index 000000000000..dfb5a0802273
--- /dev/null
+++ b/arch/arm/boot/dts/alpine-db.dts
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2015 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Alternatively, redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+/dts-v1/;
+
+#include "alpine.dtsi"
+
+/ {
+ model = "Annapurna Labs Alpine Dev Board";
+ /* no need for anything outside SOC */
+};
+
diff --git a/arch/arm/boot/dts/alpine.dtsi b/arch/arm/boot/dts/alpine.dtsi
new file mode 100644
index 000000000000..9af2d60e9a7f
--- /dev/null
+++ b/arch/arm/boot/dts/alpine.dtsi
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2015 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Alternatively, redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "skeleton64.dtsi"
+
+/ {
+ /* SOC compatibility */
+ compatible = "al,alpine";
+
+ /* CPU Configuration */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "al,alpine-smp";
+
+ cpu@0 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <0>;
+ clock-frequency = <0>; /* Filled by loader */
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <1>;
+ clock-frequency = <0>; /* Filled by loader */
+ };
+
+ cpu@2 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <2>;
+ clock-frequency = <0>; /* Filled by loader */
+ };
+
+ cpu@3 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <3>;
+ clock-frequency = <0>; /* Filled by loader */
+ };
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ ranges;
+
+ arch-timer {
+ compatible = "arm,cortex-a15-timer",
+ "arm,armv7-timer";
+ interrupts =
+ <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ clock-frequency = <0>; /* Filled by loader */
+ };
+
+ /* Interrupt Controller */
+ gic: gic@fb001000 {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ #size-cells = <0>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0xfb001000 0x0 0x1000>,
+ <0x0 0xfb002000 0x0 0x2000>,
+ <0x0 0xfb004000 0x0 0x1000>,
+ <0x0 0xfb006000 0x0 0x2000>;
+ interrupts =
+ <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ /* CPU Resume registers */
+ cpu-resume@fbff5ec0 {
+ compatible = "al,alpine-cpu-resume";
+ reg = <0x0 0xfbff5ec0 0x0 0x30>;
+ };
+
+ /* North Bridge Service Registers */
+ sysfabric-service@fb070000 {
+ compatible = "al,alpine-sysfabric-service", "syscon";
+ reg = <0x0 0xfb070000 0x0 0x10000>;
+ };
+
+ /* Performance Monitor Unit */
+ pmu {
+ compatible = "arm,cortex-a15-pmu";
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ uart0:uart@fd883000 {
+ compatible = "ns16550a";
+ reg = <0x0 0xfd883000 0x0 0x1000>;
+ clock-frequency = <0>; /* Filled by loader */
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ uart1:uart@0xfd884000 {
+ compatible = "ns16550a";
+ reg = <0x0 0xfd884000 0x0 0x1000>;
+ clock-frequency = <0>; /* Filled by loader */
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ /* Internal PCIe Controller */
+ pcie-internal@0xfbc00000 {
+ compatible = "pci-host-ecam-generic";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ reg = <0x0 0xfbc00000 0x0 0x100000>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ /* Add legacy interrupts for SATA devices only */
+ interrupt-map = <0x4000 0 0 1 &gic 0 43 4>,
+ <0x4800 0 0 1 &gic 0 44 4>;
+
+ /* 32 bit non prefetchable memory space */
+ ranges = <0x02000000 0x0 0xfe000000 0x0 0xfe000000 0x0 0x1000000>;
+
+ bus-range = <0x00 0x00>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/am335x-chiliboard.dts b/arch/arm/boot/dts/am335x-chiliboard.dts
new file mode 100644
index 000000000000..310da20a8aa7
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-chiliboard.dts
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2015 Jablotron s.r.o. -- http://www.jablotron.com/
+ * Author: Rostislav Lisovy <lisovy@jablotron.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+#include "am335x-chilisom.dtsi"
+
+/ {
+ model = "AM335x Chiliboard";
+ compatible = "grinn,am335x-chiliboard", "grinn,am335x-chilisom",
+ "ti,am33xx";
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_gpio_pins>;
+
+ led0 {
+ label = "led0";
+ gpios = <&gpio3 7 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led1 {
+ label = "led1";
+ gpios = <&gpio3 8 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+ };
+ };
+};
+
+&am33xx_pinmux {
+ usb1_drvvbus: usb1_drvvbus {
+ pinctrl-single,pins = <
+ 0x234 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* usb1_drvvbus.usb1_drvvbus */
+ >;
+ };
+
+ sd_pins: pinmux_sd_card {
+ pinctrl-single,pins = <
+ 0xf0 (PIN_INPUT | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
+ 0xf4 (PIN_INPUT | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
+ 0xf8 (PIN_INPUT | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
+ 0xfc (PIN_INPUT | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
+ 0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */
+ 0x104 (PIN_INPUT | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
+ 0x160 (PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
+ >;
+ };
+
+ led_gpio_pins: led_gpio_pins {
+ pinctrl-single,pins = <
+ 0x1e4 (PIN_OUTPUT | MUX_MODE7) /* emu0.gpio3_7 */
+ 0x1e8 (PIN_OUTPUT | MUX_MODE7) /* emu1.gpio3_8 */
+ >;
+ };
+};
+
+&ldo4_reg {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+};
+
+/* Ethernet */
+&cpsw_emac0 {
+ phy_id = <&davinci_mdio>, <0>;
+ phy-mode = "rmii";
+};
+
+&phy_sel {
+ rmii-clock-ext;
+};
+
+/* USB */
+&usb {
+ status = "okay";
+};
+
+&usb_ctrl_mod {
+ status = "okay";
+};
+
+&usb1_phy {
+ status = "okay";
+};
+
+&usb1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_drvvbus>;
+
+ status = "okay";
+ dr_mode = "host";
+};
+
+&cppi41dma {
+ status = "okay";
+};
+
+/* microSD */
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd_pins>;
+ vmmc-supply = <&ldo4_reg>;
+ bus-width = <0x4>;
+ cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi
new file mode 100644
index 000000000000..7e9a34dffe21
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-chilisom.dtsi
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2015 Jablotron s.r.o. -- http://www.jablotron.com/
+ * Author: Rostislav Lisovy <lisovy@jablotron.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "am33xx.dtsi"
+
+/ {
+ model = "Grinn AM335x ChiliSOM";
+ compatible = "grinn,am335x-chilisom", "ti,am33xx";
+
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&dcdc2_reg>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512 MB */
+ };
+};
+
+&am33xx_pinmux {
+ pinctrl-names = "default";
+
+ i2c0_pins: pinmux_i2c0_pins {
+ pinctrl-single,pins = <
+ 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ >;
+ };
+
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
+
+ cpsw_default: cpsw_default {
+ pinctrl-single,pins = <
+ /* Slave 1 */
+ 0x10c (PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_crs.rmii1_crs */
+ 0x110 (PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxerr.rmii1_rxerr */
+ 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txen.rmii1_txen */
+ 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
+ 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
+ 0x13c (PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */
+ 0x140 (PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */
+ 0x144 (PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii1_ref_clk.rmii_ref_clk */
+ >;
+ };
+
+ cpsw_sleep: cpsw_sleep {
+ pinctrl-single,pins = <
+ /* Slave 1 reset value */
+ 0x10c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x144 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ davinci_mdio_default: davinci_mdio_default {
+ pinctrl-single,pins = <
+ /* mdio_data.mdio_data */
+ 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)
+ /* mdio_clk.mdio_clk */
+ 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0)
+ >;
+ };
+
+ davinci_mdio_sleep: davinci_mdio_sleep {
+ pinctrl-single,pins = <
+ /* MDIO reset value */
+ 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ nandflash_pins: nandflash_pins {
+ pinctrl-single,pins = <
+ 0x00 (PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
+ 0x04 (PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
+ 0x08 (PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
+ 0x0c (PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
+ 0x10 (PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
+ 0x14 (PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
+ 0x18 (PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
+ 0x1c (PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
+
+ 0x70 (PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
+ 0x7c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
+ 0x90 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
+ 0x94 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
+ 0x98 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_wen.gpmc_wen */
+ 0x9c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
+ >;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tps: tps@24 {
+ reg = <0x24>;
+ };
+
+};
+
+/include/ "tps65217.dtsi"
+
+&tps {
+ regulators {
+ dcdc1_reg: regulator@0 {
+ regulator-name = "vdds_dpr";
+ regulator-always-on;
+ };
+
+ dcdc2_reg: regulator@1 {
+ /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+ regulator-name = "vdd_mpu";
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <1325000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc3_reg: regulator@2 {
+ /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
+ regulator-name = "vdd_core";
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: regulator@3 {
+ regulator-name = "vio,vrtc,vdds";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: regulator@4 {
+ regulator-name = "vdd_3v3aux";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: regulator@5 {
+ regulator-name = "vdd_1v8";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4_reg: regulator@6 {
+ regulator-name = "vdd_3v3d";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+};
+
+/* Ethernet MAC */
+&mac {
+ slaves = <1>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cpsw_default>;
+ pinctrl-1 = <&cpsw_sleep>;
+ status = "okay";
+};
+
+&davinci_mdio {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&davinci_mdio_default>;
+ pinctrl-1 = <&davinci_mdio_sleep>;
+ status = "okay";
+};
+
+/* NAND Flash */
+&elm {
+ status = "okay";
+};
+
+&gpmc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&nandflash_pins>;
+ ranges = <0 0 0x08000000 0x01000000>; /* CS0 0 @addr 0x08000000, size 0x01000000 */
+ nand@0,0 {
+ reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+ ti,nand-ecc-opt = "bch8";
+ ti,elm-id = <&elm>;
+ nand-bus-width = <8>;
+ gpmc,device-width = <1>;
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <44>;
+ gpmc,cs-wr-off-ns = <44>;
+ gpmc,adv-on-ns = <6>;
+ gpmc,adv-rd-off-ns = <34>;
+ gpmc,adv-wr-off-ns = <44>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <40>;
+ gpmc,oe-on-ns = <0>;
+ gpmc,oe-off-ns = <54>;
+ gpmc,access-ns = <64>;
+ gpmc,rd-cycle-ns = <82>;
+ gpmc,wr-cycle-ns = <82>;
+ gpmc,wait-on-read = "true";
+ gpmc,wait-on-write = "true";
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,clk-activation-ns = <0>;
+ gpmc,wait-monitoring-ns = <0>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
+ };
+};
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index df5fee6b6b4b..87fc7a35e802 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -15,6 +15,7 @@
#include "am33xx.dtsi"
#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/interrupt-controller/irq.h>
/ {
model = "TI AM335x EVM-SK";
@@ -647,6 +648,16 @@
cap-power-off-card;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1271";
+ reg = <2>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */
+ ref-clock-frequency = <38400000>;
+ };
};
&mcasp1 {
diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts
index a3466455b171..5ed4ca6eaf55 100644
--- a/arch/arm/boot/dts/am335x-nano.dts
+++ b/arch/arm/boot/dts/am335x-nano.dts
@@ -213,7 +213,9 @@
pinctrl-0 = <&i2c0_pins>;
gpio@20 {
- compatible = "mcp,mcp23017";
+ compatible = "microchip,mcp23017";
+ gpio-controller;
+ #gpio-cells = <2>;
reg = <0x20>;
};
@@ -222,7 +224,7 @@
};
eeprom@53 {
- compatible = "mcp,24c02";
+ compatible = "microchip,24c02";
reg = <0x53>;
pagesize = <8>;
};
@@ -297,8 +299,8 @@
| |-->0x004FFFFF-> Kernel end
| |-->0x00500000-> File system start
| |
- | |-->0x014FFFFF-> File system end
- | |-->0x01500000-> User data start
+ | |-->0x01FFFFFF-> File system end
+ | |-->0x02000000-> User data start
| |
| |-->0x03FFFFFF-> User data end
| |-->0x04000000-> Data storage start
@@ -327,12 +329,12 @@
partition@4 {
label = "rootfs";
- reg = <0x00500000 0x01000000>; /* 16MB */
+ reg = <0x00500000 0x01b00000>; /* 27MB */
};
partition@5 {
label = "user";
- reg = <0x01500000 0x02b00000>; /* 43MB */
+ reg = <0x02000000 0x02000000>; /* 32MB */
};
partition@6 {
@@ -343,7 +345,7 @@
};
&mac {
- dual_emac = <1>;
+ dual_emac;
status = "okay";
};
@@ -353,11 +355,13 @@
&cpsw_emac0 {
phy_id = <&davinci_mdio>, <0>;
+ phy-mode = "mii";
dual_emac_res_vlan = <1>;
};
&cpsw_emac1 {
phy_id = <&davinci_mdio>, <1>;
+ phy-mode = "mii";
dual_emac_res_vlan = <2>;
};
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi
index 071b56aa0c7e..afb4b3a7bab4 100644
--- a/arch/arm/boot/dts/am33xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am33xx-clocks.dtsi
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-&scrm_clocks {
+&scm_clocks {
sys_clkin_ck: sys_clkin_ck {
#clock-cells = <0>;
compatible = "ti,mux-clock";
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index acd37057bca9..21fcc440fc1a 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -83,20 +83,6 @@
};
};
- am33xx_control_module: control_module@4a002000 {
- compatible = "syscon";
- reg = <0x44e10000 0x7fc>;
- };
-
- am33xx_pinmux: pinmux@44e10800 {
- compatible = "pinctrl-single";
- reg = <0x44e10800 0x0238>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-single,register-width = <32>;
- pinctrl-single,function-mask = <0x7f>;
- };
-
/*
* XXX: Use a flat representation of the AM33XX interconnect.
* The real AM33XX interconnect network is quite complex. Since
@@ -111,37 +97,58 @@
ranges;
ti,hwmods = "l3_main";
- prcm: prcm@44e00000 {
- compatible = "ti,am3-prcm";
- reg = <0x44e00000 0x4000>;
-
- prcm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
+ l4_wkup: l4_wkup@44c00000 {
+ compatible = "ti,am3-l4-wkup", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x44c00000 0x280000>;
- prcm_clockdomains: clockdomains {
- };
- };
+ prcm: prcm@200000 {
+ compatible = "ti,am3-prcm";
+ reg = <0x200000 0x4000>;
- scrm: scrm@44e10000 {
- compatible = "ti,am3-scrm";
- reg = <0x44e10000 0x2000>;
+ prcm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
- scrm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
+ prcm_clockdomains: clockdomains {
+ };
};
- scrm_clockdomains: clockdomains {
+ scm: scm@210000 {
+ compatible = "ti,am3-scm", "simple-bus";
+ reg = <0x210000 0x2000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x210000 0x2000>;
+
+ am33xx_pinmux: pinmux@800 {
+ compatible = "pinctrl-single";
+ reg = <0x800 0x238>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0x7f>;
+ };
+
+ scm_conf: scm_conf@0 {
+ compatible = "syscon";
+ reg = <0x0 0x800>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ scm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ scm_clockdomains: clockdomains {
+ };
};
};
- cm: syscon@44e10000 {
- compatible = "ti,am33xx-controlmodule", "syscon";
- reg = <0x44e10000 0x800>;
- };
-
intc: interrupt-controller@48200000 {
compatible = "ti,am33xx-intc";
interrupt-controller;
@@ -350,7 +357,7 @@
reg = <0x481cc000 0x2000>;
clocks = <&dcan0_fck>;
clock-names = "fck";
- syscon-raminit = <&am33xx_control_module 0x644 0>;
+ syscon-raminit = <&scm_conf 0x644 0>;
interrupts = <52>;
status = "disabled";
};
@@ -361,7 +368,7 @@
reg = <0x481d0000 0x2000>;
clocks = <&dcan1_fck>;
clock-names = "fck";
- syscon-raminit = <&am33xx_control_module 0x644 1>;
+ syscon-raminit = <&scm_conf 0x644 1>;
interrupts = <55>;
status = "disabled";
};
@@ -720,7 +727,7 @@
*/
interrupts = <40 41 42 43>;
ranges;
- syscon = <&cm>;
+ syscon = <&scm_conf>;
status = "disabled";
davinci_mdio: mdio@4a101000 {
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index c90724bded10..f164dce08755 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -31,7 +31,7 @@
status = "disabled";
reg = <0x5c000000 0x30000>;
interrupts = <67 68 69 70>;
- syscon = <&omap3_scm_general>;
+ syscon = <&scm_conf>;
ti,davinci-ctrl-reg-offset = <0x10000>;
ti,davinci-ctrl-mod-reg-offset = <0>;
ti,davinci-ctrl-ram-offset = <0x20000>;
diff --git a/arch/arm/boot/dts/am35xx-clocks.dtsi b/arch/arm/boot/dts/am35xx-clocks.dtsi
index df489d310b50..518b8fde88b0 100644
--- a/arch/arm/boot/dts/am35xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am35xx-clocks.dtsi
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-&scrm_clocks {
+&scm_clocks {
emac_ick: emac_ick {
#clock-cells = <0>;
compatible = "ti,am35xx-gate-clock";
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 8a099bc10c1e..c80a3e233792 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -66,22 +66,6 @@
cache-level = <2>;
};
- am43xx_control_module: control_module@4a002000 {
- compatible = "syscon";
- reg = <0x44e10000 0x7f4>;
- };
-
- am43xx_pinmux: pinmux@44e10800 {
- compatible = "ti,am437-padconf", "pinctrl-single";
- reg = <0x44e10800 0x31c>;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- pinctrl-single,register-width = <32>;
- pinctrl-single,function-mask = <0xffffffff>;
- };
-
ocp {
compatible = "ti,am4372-l3-noc", "simple-bus";
#address-cells = <1>;
@@ -93,29 +77,58 @@
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- prcm: prcm@44df0000 {
- compatible = "ti,am4-prcm";
- reg = <0x44df0000 0x11000>;
-
- prcm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
+ l4_wkup: l4_wkup@44c00000 {
+ compatible = "ti,am4-l4-wkup", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x44c00000 0x287000>;
- prcm_clockdomains: clockdomains {
- };
- };
+ prcm: prcm@1f0000 {
+ compatible = "ti,am4-prcm";
+ reg = <0x1f0000 0x11000>;
- scrm: scrm@44e10000 {
- compatible = "ti,am4-scrm";
- reg = <0x44e10000 0x2000>;
+ prcm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
- scrm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
+ prcm_clockdomains: clockdomains {
+ };
};
- scrm_clockdomains: clockdomains {
+ scm: scm@210000 {
+ compatible = "ti,am4-scm", "simple-bus";
+ reg = <0x210000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x210000 0x4000>;
+
+ am43xx_pinmux: pinmux@800 {
+ compatible = "ti,am437-padconf",
+ "pinctrl-single";
+ reg = <0x800 0x31c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0xffffffff>;
+ };
+
+ scm_conf: scm_conf@0 {
+ compatible = "syscon";
+ reg = <0x0 0x800>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ scm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ scm_clockdomains: clockdomains {
+ };
};
};
@@ -796,7 +809,7 @@
};
ocp2scp0: ocp2scp@483a8000 {
- compatible = "ti,omap-ocp2scp";
+ compatible = "ti,am437x-ocp2scp", "ti,omap-ocp2scp";
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -815,7 +828,7 @@
};
ocp2scp1: ocp2scp@483e8000 {
- compatible = "ti,omap-ocp2scp";
+ compatible = "ti,am437x-ocp2scp", "ti,omap-ocp2scp";
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -893,7 +906,7 @@
};
hdq: hdq@48347000 {
- compatible = "ti,am43xx-hdq";
+ compatible = "ti,am4372-hdq";
reg = <0x48347000 0x1000>;
interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&func_12m_clk>;
@@ -942,7 +955,7 @@
clocks = <&dcan0_fck>;
clock-names = "fck";
reg = <0x481cc000 0x2000>;
- syscon-raminit = <&am43xx_control_module 0x644 0>;
+ syscon-raminit = <&scm_conf 0x644 0>;
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -953,7 +966,7 @@
clocks = <&dcan1_fck>;
clock-names = "fck";
reg = <0x481d0000 0x2000>;
- syscon-raminit = <&am43xx_control_module 0x644 1>;
+ syscon-raminit = <&scm_conf 0x644 1>;
interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index 0198f5a62b96..378344271746 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -133,6 +133,20 @@
>;
};
+ i2c2_pins_default: i2c2_pins_default {
+ pinctrl-single,pins = <
+ 0x1e8 (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE3) /* cam1_data1.i2c2_scl */
+ 0x1ec (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE3) /* cam1_data0.i2c2_sda */
+ >;
+ };
+
+ i2c2_pins_sleep: i2c2_pins_sleep {
+ pinctrl-single,pins = <
+ 0x1e8 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x1ec (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
mmc1_pins_default: pinmux_mmc1_pins_default {
pinctrl-single,pins = <
0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */
@@ -263,6 +277,14 @@
};
};
+&i2c2 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c2_pins_default>;
+ pinctrl-1 = <&i2c2_pins_sleep>;
+ clock-frequency = <100000>;
+};
+
&epwmss0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 8ae29c955c11..c17097d2c167 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -49,7 +49,7 @@
pinctrl-0 = <&matrix_keypad_pins>;
debounce-delay-ms = <5>;
- col-scan-delay-us = <1500>;
+ col-scan-delay-us = <5>;
row-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH /* Bank5, pin5 */
&gpio5 6 GPIO_ACTIVE_HIGH>; /* Bank5, pin6 */
@@ -473,7 +473,7 @@
interrupt-parent = <&gpio0>;
interrupts = <31 0>;
- wake-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>;
touchscreen-size-x = <480>;
touchscreen-size-y = <272>;
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 1d7109196872..795d68af6df9 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -69,7 +69,48 @@
};
};
- am43xx_pinmux: pinmux@44e10800 {
+ matrix_keypad: matrix_keypad@0 {
+ compatible = "gpio-matrix-keypad";
+ debounce-delay-ms = <5>;
+ col-scan-delay-us = <2>;
+
+ row-gpios = <&gpio0 12 GPIO_ACTIVE_HIGH /* Bank0, pin12 */
+ &gpio0 13 GPIO_ACTIVE_HIGH /* Bank0, pin13 */
+ &gpio0 14 GPIO_ACTIVE_HIGH /* Bank0, pin14 */
+ &gpio0 15 GPIO_ACTIVE_HIGH>; /* Bank0, pin15 */
+
+ col-gpios = <&gpio3 9 GPIO_ACTIVE_HIGH /* Bank3, pin9 */
+ &gpio3 10 GPIO_ACTIVE_HIGH /* Bank3, pin10 */
+ &gpio2 18 GPIO_ACTIVE_HIGH /* Bank2, pin18 */
+ &gpio2 19 GPIO_ACTIVE_HIGH>; /* Bank2, pin19 */
+
+ linux,keymap = <0x00000201 /* P1 */
+ 0x01000204 /* P4 */
+ 0x02000207 /* P7 */
+ 0x0300020a /* NUMERIC_STAR */
+ 0x00010202 /* P2 */
+ 0x01010205 /* P5 */
+ 0x02010208 /* P8 */
+ 0x03010200 /* P0 */
+ 0x00020203 /* P3 */
+ 0x01020206 /* P6 */
+ 0x02020209 /* P9 */
+ 0x0302020b /* NUMERIC_POUND */
+ 0x00030067 /* UP */
+ 0x0103006a /* RIGHT */
+ 0x0203006c /* DOWN */
+ 0x03030069>; /* LEFT */
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&ecap0 0 50000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 51 53 56 62 75 101 152 255>;
+ default-brightness-level = <8>;
+ };
+};
+
+&am43xx_pinmux {
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
@@ -279,47 +320,6 @@
0x204 (DS0_PULL_UP_DOWN_EN | INPUT_EN | MUX_MODE7)
>;
};
- };
-
- matrix_keypad: matrix_keypad@0 {
- compatible = "gpio-matrix-keypad";
- debounce-delay-ms = <5>;
- col-scan-delay-us = <2>;
-
- row-gpios = <&gpio0 12 GPIO_ACTIVE_HIGH /* Bank0, pin12 */
- &gpio0 13 GPIO_ACTIVE_HIGH /* Bank0, pin13 */
- &gpio0 14 GPIO_ACTIVE_HIGH /* Bank0, pin14 */
- &gpio0 15 GPIO_ACTIVE_HIGH>; /* Bank0, pin15 */
-
- col-gpios = <&gpio3 9 GPIO_ACTIVE_HIGH /* Bank3, pin9 */
- &gpio3 10 GPIO_ACTIVE_HIGH /* Bank3, pin10 */
- &gpio2 18 GPIO_ACTIVE_HIGH /* Bank2, pin18 */
- &gpio2 19 GPIO_ACTIVE_HIGH>; /* Bank2, pin19 */
-
- linux,keymap = <0x00000201 /* P1 */
- 0x01000204 /* P4 */
- 0x02000207 /* P7 */
- 0x0300020a /* NUMERIC_STAR */
- 0x00010202 /* P2 */
- 0x01010205 /* P5 */
- 0x02010208 /* P8 */
- 0x03010200 /* P0 */
- 0x00020203 /* P3 */
- 0x01020206 /* P6 */
- 0x02020209 /* P9 */
- 0x0302020b /* NUMERIC_POUND */
- 0x00030067 /* UP */
- 0x0103006a /* RIGHT */
- 0x0203006c /* DOWN */
- 0x03030069>; /* LEFT */
- };
-
- backlight {
- compatible = "pwm-backlight";
- pwms = <&ecap0 0 50000 PWM_POLARITY_INVERTED>;
- brightness-levels = <0 51 53 56 62 75 101 152 255>;
- default-brightness-level = <8>;
- };
};
&mmc1 {
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index cfb49686ab6a..d0c0dfa4ec48 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-&scrm_clocks {
+&scm_clocks {
sys_clkin_ck: sys_clkin_ck {
#clock-cells = <0>;
compatible = "ti,mux-clock";
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index bd48dba16748..7128fad991ac 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -8,7 +8,6 @@
/dts-v1/;
#include "dra74x.dtsi"
-#include <dt-bindings/clk/ti-dra7-atl.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -19,6 +18,7 @@
aliases {
rtc0 = &mcp_rtc;
rtc1 = &tps659038_rtc;
+ rtc2 = &rtc;
};
memory {
@@ -84,9 +84,10 @@
gpio_fan: gpio_fan {
/* Based on 5v 500mA AFB02505HHB */
compatible = "gpio-fan";
- gpios = <&tps659038_gpio 1 GPIO_ACTIVE_HIGH>;
+ gpios = <&tps659038_gpio 2 GPIO_ACTIVE_HIGH>;
gpio-fan,speed-map = <0 0>,
<13000 1>;
+ #cooling-cells = <2>;
};
extcon_usb1: extcon_usb1 {
@@ -130,8 +131,8 @@
uart3_pins_default: uart3_pins_default {
pinctrl-single,pins = <
- 0x248 (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_rxd.rxd */
- 0x24c (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_txd.txd */
+ 0x3f8 (PIN_INPUT_SLEW | MUX_MODE2) /* uart2_ctsn.uart3_rxd */
+ 0x3fc (PIN_INPUT_SLEW | MUX_MODE1) /* uart2_rtsn.uart3_txd */
>;
};
@@ -442,6 +443,7 @@
pinctrl-0 = <&tmp102_pins_default>;
interrupt-parent = <&gpio7>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+ #thermal-sensor-cells = <1>;
};
};
@@ -454,7 +456,7 @@
mcp_rtc: rtc@6f {
compatible = "microchip,mcp7941x";
reg = <0x6f>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_LOW>; /* IRQ_SYS_1N */
+ interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>; /* IRQ_SYS_1N */
pinctrl-names = "default";
pinctrl-0 = <&mcp79410_pins_default>;
@@ -477,7 +479,7 @@
&uart3 {
status = "okay";
interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
- <&dra7_pmx_core 0x248>;
+ <&dra7_pmx_core 0x3f8>;
pinctrl-names = "default";
pinctrl-0 = <&uart3_pins_default>;
@@ -548,6 +550,61 @@
pinctrl-0 = <&usb1_pins>;
};
+&omap_dwc3_1 {
+ extcon = <&extcon_usb1>;
+};
+
+&omap_dwc3_2 {
+ extcon = <&extcon_usb2>;
+};
+
&usb2 {
dr_mode = "peripheral";
};
+
+&cpu_trips {
+ cpu_alert1: cpu_alert1 {
+ temperature = <50000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "active";
+ };
+};
+
+&cpu_cooling_maps {
+ map1 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&gpio_fan THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+};
+
+&thermal_zones {
+ board_thermal: board_thermal {
+ polling-delay-passive = <1250>; /* milliseconds */
+ polling-delay = <1500>; /* milliseconds */
+
+ /* sensor ID */
+ thermal-sensors = <&tmp102 0>;
+
+ board_trips: trips {
+ board_alert0: board_alert {
+ temperature = <40000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "active";
+ };
+
+ board_crit: board_crit {
+ temperature = <105000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ board_cooling_maps: cooling-maps {
+ map0 {
+ trip = <&board_alert0>;
+ cooling-device =
+ <&gpio_fan THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index e993c46bd472..19f3bf271915 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -45,6 +45,15 @@
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Note: this Device Tree assumes that the bootloader has remapped the
+ * internal registers to 0xf1000000 (instead of the default
+ * 0xd0000000). The 0xf1000000 is the default used by the recent,
+ * DT-capable, U-Boot bootloaders provided by Marvell. Some earlier
+ * boards were delivered with an older version of the bootloader that
+ * left internal registers mapped at 0xd0000000. If you are in this
+ * situation, you should either update your bootloader (preferred
+ * solution) or the below Device Tree should be adjusted.
*/
/dts-v1/;
@@ -55,7 +64,7 @@
compatible = "marvell,a370-db", "marvell,armada370", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
@@ -64,7 +73,7 @@
};
soc {
- ranges = <MBUS_ID(0xf0, 0x01) 0 0xd0000000 0x100000
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
internal-regs {
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts
index b10ceb488efe..0f40d5da28c3 100644
--- a/arch/arm/boot/dts/armada-370-mirabox.dts
+++ b/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -51,7 +51,7 @@
compatible = "globalscale,mirabox", "marvell,armada370", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn102.dts b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
index 3f8cc3845a5e..a31207860f34 100644
--- a/arch/arm/boot/dts/armada-370-netgear-rn102.dts
+++ b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
@@ -53,7 +53,7 @@
compatible = "netgear,readynas-102", "marvell,armada370", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn104.dts b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
index 99eb8a014ac6..00540f292979 100644
--- a/arch/arm/boot/dts/armada-370-netgear-rn104.dts
+++ b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
@@ -53,7 +53,7 @@
compatible = "netgear,readynas-104", "marvell,armada370", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts
index 6ae36a38beb2..19475e68b8e9 100644
--- a/arch/arm/boot/dts/armada-370-rd.dts
+++ b/arch/arm/boot/dts/armada-370-rd.dts
@@ -64,7 +64,7 @@
compatible = "marvell,a370-rd", "marvell,armada370", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-370-synology-ds213j.dts b/arch/arm/boot/dts/armada-370-synology-ds213j.dts
index 59f74e66963f..b42b767763aa 100644
--- a/arch/arm/boot/dts/armada-370-synology-ds213j.dts
+++ b/arch/arm/boot/dts/armada-370-synology-ds213j.dts
@@ -67,8 +67,7 @@
"marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
- stdout-path = &uart0;
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 8a322ad57e5f..ec96f0b36346 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -59,8 +59,8 @@
compatible = "marvell,armada-370-xp";
aliases {
- eth0 = &eth0;
- eth1 = &eth1;
+ serial0 = &uart0;
+ serial1 = &uart1;
};
cpus {
@@ -73,6 +73,11 @@
};
};
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts-extended = <&mpic 3>;
+ };
+
soc {
#address-cells = <2>;
#size-cells = <1>;
@@ -223,7 +228,7 @@
<0x20250 0x8>;
};
- mpic: interrupt-controller@20000 {
+ mpic: interrupt-controller@20a00 {
compatible = "marvell,mpic";
#interrupt-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index 27397f151def..00b50db57c9c 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -129,6 +129,7 @@
compatible = "marvell,aurora-outer-cache";
reg = <0x08000 0x1000>;
cache-id-part = <0x100>;
+ cache-level = <2>;
cache-unified;
wt-override;
};
@@ -232,7 +233,7 @@
reg = <0x18330 0x4>;
};
- interrupt-controller@20000 {
+ interrupt-controller@20a00 {
reg = <0x20a00 0x1d0>, <0x21870 0x58>;
};
diff --git a/arch/arm/boot/dts/armada-375-db.dts b/arch/arm/boot/dts/armada-375-db.dts
index 0440891425c0..4eabc9c21f8d 100644
--- a/arch/arm/boot/dts/armada-375-db.dts
+++ b/arch/arm/boot/dts/armada-375-db.dts
@@ -55,7 +55,7 @@
compatible = "marvell,a375-db", "marvell,armada375";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index ba3c57e0af72..f076ff856d8b 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -60,8 +60,8 @@
gpio0 = &gpio0;
gpio1 = &gpio1;
gpio2 = &gpio2;
- ethernet0 = &eth0;
- ethernet1 = &eth1;
+ serial0 = &uart0;
+ serial1 = &uart1;
};
clocks {
@@ -69,7 +69,7 @@
mainpll: mainpll {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <2000000000>;
+ clock-frequency = <1000000000>;
};
/* 25 MHz reference crystal */
refclk: oscillator {
@@ -96,6 +96,11 @@
};
};
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts-extended = <&mpic 3>;
+ };
+
soc {
compatible = "marvell,armada375-mbus", "simple-bus";
#address-cells = <2>;
@@ -276,7 +281,7 @@
status = "disabled";
};
- serial@12000 {
+ uart0: serial@12000 {
compatible = "snps,dw-apb-uart";
reg = <0x12000 0x100>;
reg-shift = <2>;
@@ -286,7 +291,7 @@
status = "disabled";
};
- serial@12100 {
+ uart1: serial@12100 {
compatible = "snps,dw-apb-uart";
reg = <0x12100 0x100>;
reg-shift = <2>;
@@ -394,7 +399,7 @@
reg = <0x20000 0x100>, <0x20180 0x20>;
};
- mpic: interrupt-controller@20000 {
+ mpic: interrupt-controller@20a00 {
compatible = "marvell,mpic";
reg = <0x20a00 0x2d0>, <0x21070 0x58>;
#interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts
index 57b9119fb3e0..7219ac3a3d90 100644
--- a/arch/arm/boot/dts/armada-385-db-ap.dts
+++ b/arch/arm/boot/dts/armada-385-db-ap.dts
@@ -49,8 +49,7 @@
compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x";
chosen {
- bootargs = "console=ttyS0,115200";
- stdout-path = &uart1;
+ stdout-path = "serial1:115200n8";
};
memory {
@@ -126,6 +125,13 @@
status = "okay";
};
+ pinctrl@18000 {
+ xhci0_vbus_pins: xhci0-vbus-pins {
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+ };
+
ethernet@30000 {
status = "okay";
phy = <&phy2>;
@@ -150,6 +156,24 @@
phy = <&phy0>;
phy-mode = "rgmii-id";
};
+
+ nfc: flash@d0000 {
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ num-cs = <1>;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+ marvell,nand-keep-config;
+ marvell,nand-enable-arbiter;
+ nand-on-flash-bbt;
+ };
+
+ usb3@f0000 {
+ status = "okay";
+ usb-phy = <&usb3_phy>;
+ };
};
pcie-controller {
@@ -175,4 +199,20 @@
};
};
};
+
+ usb3_phy: usb3_phy {
+ compatible = "usb-nop-xceiv";
+ vcc-supply = <&reg_xhci0_vbus>;
+ };
+
+ reg_xhci0_vbus: xhci0-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&xhci0_vbus_pins>;
+ regulator-name = "xhci0-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+ };
};
diff --git a/arch/arm/boot/dts/armada-388-db.dts b/arch/arm/boot/dts/armada-388-db.dts
index 16512efcd32c..51d1623de53e 100644
--- a/arch/arm/boot/dts/armada-388-db.dts
+++ b/arch/arm/boot/dts/armada-388-db.dts
@@ -54,7 +54,7 @@
"marvell,armada385", "marvell,armada380";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
@@ -99,7 +99,7 @@
phy-mode = "rgmii-id";
};
- usb@50000 {
+ usb@58000 {
status = "ok";
};
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index 590b383db323..78514ab0b47a 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -48,8 +48,7 @@
compatible = "marvell,a385-gp", "marvell,armada388", "marvell,armada380";
chosen {
- bootargs = "console=ttyS0,115200";
- stdout-path = &uart0;
+ stdout-path = "serial0:115200n8";
};
memory {
@@ -135,7 +134,7 @@
};
/* CON4 */
- usb@50000 {
+ usb@58000 {
vcc-supply = <&reg_usb2_0_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/armada-388-rd.dts b/arch/arm/boot/dts/armada-388-rd.dts
index d99baac72081..1dc6e2341cc2 100644
--- a/arch/arm/boot/dts/armada-388-rd.dts
+++ b/arch/arm/boot/dts/armada-388-rd.dts
@@ -55,7 +55,7 @@
"marvell,armada385","marvell,armada380";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
@@ -85,6 +85,16 @@
clock-frequency = <100000>;
};
+ sdhci@d8000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhci_pins>;
+ broken-cd;
+ no-1-8-v;
+ wp-inverted;
+ bus-width = <8>;
+ status = "okay";
+ };
+
serial@12000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 1dff30a81e24..218a2acd36e5 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -59,9 +59,13 @@
aliases {
gpio0 = &gpio0;
gpio1 = &gpio1;
- ethernet0 = &eth0;
- ethernet1 = &eth1;
- ethernet2 = &eth2;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts-extended = <&mpic 3>;
};
soc {
@@ -216,7 +220,7 @@
status = "disabled";
};
- serial@12100 {
+ uart1: serial@12100 {
compatible = "snps,dw-apb-uart";
reg = <0x12100 0x100>;
reg-shift = <2>;
@@ -368,7 +372,7 @@
reg = <0x20000 0x100>, <0x20180 0x20>;
};
- mpic: interrupt-controller@20000 {
+ mpic: interrupt-controller@20a00 {
compatible = "marvell,mpic";
reg = <0x20a00 0x2d0>, <0x21070 0x58>;
#interrupt-cells = <1>;
@@ -435,7 +439,7 @@
status = "disabled";
};
- usb@50000 {
+ usb@58000 {
compatible = "marvell,orion-ehci";
reg = <0x58000 0x500>;
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
@@ -548,8 +552,11 @@
sdhci@d8000 {
compatible = "marvell,armada-380-sdhci";
- reg = <0xd8000 0x1000>, <0xdc000 0x100>;
- interrupts = <0 25 0x4>;
+ reg-names = "sdhci", "mbus", "conf-sdio3";
+ reg = <0xd8000 0x1000>,
+ <0xdc000 0x100>,
+ <0x18454 0x4>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gateclk 17>;
mrvl,clk-delay-cycles = <0x1F>;
status = "disabled";
@@ -578,7 +585,7 @@
mainpll: mainpll {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <2000000000>;
+ clock-frequency = <1000000000>;
};
/* 25 MHz reference crystal */
diff --git a/arch/arm/boot/dts/armada-390.dtsi b/arch/arm/boot/dts/armada-390.dtsi
new file mode 100644
index 000000000000..094e39c66039
--- /dev/null
+++ b/arch/arm/boot/dts/armada-390.dtsi
@@ -0,0 +1,57 @@
+/*
+ * Device Tree Include file for Marvell Armada 390 SoC.
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "armada-39x.dtsi"
+
+/ {
+ soc {
+ internal-regs {
+ pinctrl@18000 {
+ compatible = "marvell,mv88f6920-pinctrl";
+ reg = <0x18000 0x20>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-398-db.dts b/arch/arm/boot/dts/armada-398-db.dts
new file mode 100644
index 000000000000..bbf83756c43c
--- /dev/null
+++ b/arch/arm/boot/dts/armada-398-db.dts
@@ -0,0 +1,153 @@
+/*
+ * Device Tree Include file for Marvell Armada 398 Development Board
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "armada-398.dtsi"
+
+/ {
+ model = "Marvell Armada 398 Development Board";
+ compatible = "marvell,a398-db", "marvell,armada398", "marvell,armada390";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000>; /* 2 GB */
+ };
+
+ soc {
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+ MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000>;
+
+ internal-regs {
+ spi@10680 {
+ status = "okay";
+ pinctrl-0 = <&spi1_pins>;
+ pinctrl-names = "default";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "n25q128a13";
+ reg = <0>;
+ spi-max-frequency = <108000000>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0 0x400000>;
+ };
+
+ partition@400000 {
+ label = "Filesystem";
+ reg = <0x400000 0x1000000>;
+ };
+ };
+ };
+
+ i2c@11000 {
+ pinctrl-0 = <&i2c0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ clock-frequency = <100000>;
+ };
+
+ serial@12000 {
+ pinctrl-0 = <&uart0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ };
+
+ serial@12100 {
+ pinctrl-0 = <&uart1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ };
+
+ flash@d0000 {
+ status = "okay";
+ pinctrl-0 = <&nand_pins>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ marvell,nand-keep-config;
+ marvell,nand-enable-arbiter;
+ nand-on-flash-bbt;
+ nand-ecc-strength = <8>;
+ nand-ecc-step-size = <512>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0 0x800000>;
+ };
+ partition@800000 {
+ label = "Linux";
+ reg = <0x800000 0x800000>;
+ };
+ partition@1000000 {
+ label = "Filesystem";
+ reg = <0x1000000 0x3f000000>;
+ };
+ };
+ };
+
+ pcie-controller {
+ status = "okay";
+
+ pcie@1,0 {
+ status = "okay";
+ };
+
+ pcie@2,0 {
+ status = "okay";
+ };
+
+ pcie@3,0 {
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-398.dtsi b/arch/arm/boot/dts/armada-398.dtsi
new file mode 100644
index 000000000000..fdc25914e3a3
--- /dev/null
+++ b/arch/arm/boot/dts/armada-398.dtsi
@@ -0,0 +1,60 @@
+/*
+ * Device Tree Include file for Marvell Armada 398 SoC.
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "armada-39x.dtsi"
+
+/ {
+ compatible = "marvell,armada398", "marvell,armada390";
+
+ soc {
+ internal-regs {
+ pinctrl@18000 {
+ compatible = "marvell,mv88f6928-pinctrl";
+ reg = <0x18000 0x20>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
new file mode 100644
index 000000000000..ecd1318109ba
--- /dev/null
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -0,0 +1,508 @@
+/*
+ * Device Tree Include file for Marvell Armada 39x family of SoCs.
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "skeleton.dtsi"
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
+/ {
+ model = "Marvell Armada 39x family SoC";
+ compatible = "marvell,armada390";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "marvell,armada-390-smp";
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ };
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <1>;
+ };
+ };
+
+ soc {
+ compatible = "marvell,armada390-mbus", "marvell,armadaxp-mbus",
+ "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ controller = <&mbusc>;
+ interrupt-parent = <&gic>;
+ pcie-mem-aperture = <0xe0000000 0x8000000>;
+ pcie-io-aperture = <0xe8000000 0x100000>;
+
+ bootrom {
+ compatible = "marvell,bootrom";
+ reg = <MBUS_ID(0x01, 0x1d) 0 0x200000>;
+ };
+
+ internal-regs {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
+
+ L2: cache-controller@8000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x8000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ scu@c000 {
+ compatible = "arm,cortex-a9-scu";
+ reg = <0xc000 0x100>;
+ };
+
+ timer@c600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0xc600 0x20>;
+ interrupts = <GIC_PPI 13 (IRQ_TYPE_EDGE_RISING | GIC_CPU_MASK_SIMPLE(2))>;
+ clocks = <&coreclk 2>;
+ };
+
+ gic: interrupt-controller@d000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #size-cells = <0>;
+ interrupt-controller;
+ reg = <0xd000 0x1000>,
+ <0xc100 0x100>;
+ };
+
+ spi0: spi@10600 {
+ compatible = "marvell,orion-spi";
+ reg = <0x10600 0x50>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ spi1: spi@10680 {
+ compatible = "marvell,orion-spi";
+ reg = <0x10680 0x50>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@11000 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0x11000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ timeout-ms = <1000>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@11100 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0x11100 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ timeout-ms = <1000>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@11200 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0x11200 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ timeout-ms = <1000>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@11300 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0x11300 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ timeout-ms = <1000>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ uart0: serial@12000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x12000 0x100>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg-io-width = <1>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ uart1: serial@12100 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x12100 0x100>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg-io-width = <1>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ uart2: serial@12200 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x12200 0x100>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ reg-io-width = <1>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ uart3: serial@12300 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x12300 0x100>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ reg-io-width = <1>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ pinctrl@18000 {
+ i2c0_pins: i2c0-pins {
+ marvell,pins = "mpp2", "mpp3";
+ marvell,function = "i2c0";
+ };
+
+ uart0_pins: uart0-pins {
+ marvell,pins = "mpp0", "mpp1";
+ marvell,function = "ua0";
+ };
+
+ uart1_pins: uart1-pins {
+ marvell,pins = "mpp19", "mpp20";
+ marvell,function = "ua1";
+ };
+
+ spi1_pins: spi1-pins {
+ marvell,pins = "mpp56", "mpp57", "mpp58", "mpp59";
+ marvell,function = "spi1";
+ };
+
+ nand_pins: nand-pins {
+ marvell,pins = "mpp22", "mpp34", "mpp23", "mpp33",
+ "mpp38", "mpp28", "mpp40", "mpp42",
+ "mpp35", "mpp36", "mpp25", "mpp30",
+ "mpp32";
+ marvell,function = "dev";
+ };
+ };
+
+ system-controller@18200 {
+ compatible = "marvell,armada-390-system-controller",
+ "marvell,armada-370-xp-system-controller";
+ reg = <0x18200 0x100>;
+ };
+
+ gateclk: clock-gating-control@18220 {
+ compatible = "marvell,armada-390-gating-clock";
+ reg = <0x18220 0x4>;
+ clocks = <&coreclk 0>;
+ #clock-cells = <1>;
+ };
+
+ coreclk: mvebu-sar@18600 {
+ compatible = "marvell,armada-390-core-clock";
+ reg = <0x18600 0x04>;
+ #clock-cells = <1>;
+ };
+
+ mbusc: mbus-controller@20000 {
+ compatible = "marvell,mbus-controller";
+ reg = <0x20000 0x100>, <0x20180 0x20>, <0x20250 0x8>;
+ };
+
+ mpic: interrupt-controller@20a00 {
+ compatible = "marvell,mpic";
+ reg = <0x20a00 0x2d0>, <0x21070 0x58>;
+ #interrupt-cells = <1>;
+ #size-cells = <1>;
+ interrupt-controller;
+ msi-controller;
+ interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ timer@20300 {
+ compatible = "marvell,armada-380-timer",
+ "marvell,armada-xp-timer";
+ reg = <0x20300 0x30>, <0x21040 0x30>;
+ interrupts-extended = <&gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <&mpic 5>,
+ <&mpic 6>;
+ clocks = <&coreclk 2>, <&coreclk 5>;
+ clock-names = "nbclk", "fixed";
+ };
+
+ cpurst@20800 {
+ compatible = "marvell,armada-370-cpu-reset";
+ reg = <0x20800 0x10>;
+ };
+
+ pmsu@22000 {
+ compatible = "marvell,armada-390-pmsu",
+ "marvell,armada-380-pmsu";
+ reg = <0x22000 0x1000>;
+ };
+
+ xor@60800 {
+ compatible = "marvell,orion-xor";
+ reg = <0x60800 0x100
+ 0x60a00 0x100>;
+ clocks = <&gateclk 22>;
+ status = "okay";
+
+ xor00 {
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor01 {
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
+ };
+
+ xor@60900 {
+ compatible = "marvell,orion-xor";
+ reg = <0x60900 0x100
+ 0x60b00 0x100>;
+ clocks = <&gateclk 28>;
+ status = "okay";
+
+ xor10 {
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor11 {
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
+ };
+
+ flash@d0000 {
+ compatible = "marvell,armada370-nand";
+ reg = <0xd0000 0x54>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&coredivclk 0>;
+ status = "disabled";
+ };
+
+ sdhci@d8000 {
+ compatible = "marvell,armada-380-sdhci";
+ reg = <0xd8000 0x1000>, <0xdc000 0x100>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gateclk 17>;
+ mrvl,clk-delay-cycles = <0x1F>;
+ status = "disabled";
+ };
+
+ coredivclk: clock@e4250 {
+ compatible = "marvell,armada-390-corediv-clock",
+ "marvell,armada-380-corediv-clock";
+ reg = <0xe4250 0xc>;
+ #clock-cells = <1>;
+ clocks = <&mainpll>;
+ clock-output-names = "nand";
+ };
+ };
+
+ pcie-controller {
+ compatible = "marvell,armada-370-pcie";
+ status = "disabled";
+ device_type = "pci";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ msi-parent = <&mpic>;
+ bus-range = <0x00 0xff>;
+
+ ranges =
+ <0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000
+ 0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000
+ 0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000
+ 0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000
+ 0x82000000 0x1 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 0 MEM */
+ 0x81000000 0x1 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 0 IO */
+ 0x82000000 0x2 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 1 MEM */
+ 0x81000000 0x2 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 1 IO */
+ 0x82000000 0x3 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 2 MEM */
+ 0x81000000 0x3 0 MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 2 IO */
+ 0x82000000 0x4 0 MBUS_ID(0x04, 0xb8) 0 1 0 /* Port 3 MEM */
+ 0x81000000 0x4 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 3 IO */>;
+
+ /*
+ * This port can be either x4 or x1. When
+ * configured in x4 by the bootloader, then
+ * pcie@4,0 is not available.
+ */
+ pcie@1,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+ reg = <0x0800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+ 0x81000000 0 0 0x81000000 0x1 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ marvell,pcie-port = <0>;
+ marvell,pcie-lane = <0>;
+ clocks = <&gateclk 8>;
+ status = "disabled";
+ };
+
+ /* x1 port */
+ pcie@2,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+ 0x81000000 0 0 0x81000000 0x2 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <0>;
+ clocks = <&gateclk 5>;
+ status = "disabled";
+ };
+
+ /* x1 port */
+ pcie@3,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0
+ 0x81000000 0 0 0x81000000 0x3 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ marvell,pcie-port = <2>;
+ marvell,pcie-lane = <0>;
+ clocks = <&gateclk 6>;
+ status = "disabled";
+ };
+
+ /*
+ * x1 port only available when pcie@1,0 is
+ * configured as a x1 port
+ */
+ pcie@4,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0
+ 0x81000000 0 0 0x81000000 0x4 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ marvell,pcie-port = <3>;
+ marvell,pcie-lane = <0>;
+ clocks = <&gateclk 7>;
+ status = "disabled";
+ };
+ };
+ };
+
+ clocks {
+ /* 2 GHz fixed main PLL */
+ mainpll: mainpll {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp-axpwifiap.dts b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
index c1fbab243609..dfd782b44e50 100644
--- a/arch/arm/boot/dts/armada-xp-axpwifiap.dts
+++ b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
@@ -59,7 +59,7 @@
compatible = "marvell,rd-axpwifiap", "marvell,armadaxp-mv78230", "marvell,armadaxp", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index 48bdafe17526..103782407618 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -64,7 +64,7 @@
compatible = "marvell,axp-db", "marvell,armadaxp-mv78460", "marvell,armadaxp", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 206aebba01be..565227eacf06 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -65,7 +65,7 @@
compatible = "marvell,axp-gp", "marvell,armadaxp-mv78460", "marvell,armadaxp", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
index 5fb3c8b687cf..06a6a6c1fdf7 100644
--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
@@ -54,8 +54,7 @@
"marvell,armadaxp", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
- stdout-path = &uart0;
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
new file mode 100644
index 000000000000..a2cf2154dcdb
--- /dev/null
+++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
@@ -0,0 +1,393 @@
+/*
+ * Device Tree file for the Linksys WRT1900AC (Mamba).
+ *
+ * Note: this board is shipped with a new generation boot loader that
+ * remaps internal registers at 0xf1000000. Therefore, if earlyprintk
+ * is used, the CONFIG_DEBUG_MVEBU_UART0_ALTERNATE option should be
+ * used.
+ *
+ * Copyright (C) 2014 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Based on armada-xp-axpwifiap.dts:
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "armada-xp-mv78230.dtsi"
+
+/ {
+ model = "Linksys WRT1900AC";
+ compatible = "linksys,mamba", "marvell,armadaxp-mv78230",
+ "marvell,armadaxp", "marvell,armada-370-xp";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000 0x10000000>; /* 256MB */
+ };
+
+ soc {
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+ MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000>;
+
+ pcie-controller {
+ status = "okay";
+
+ /* Etron EJ168 USB 3.0 controller */
+ pcie@1,0 {
+ /* Port 0, Lane 0 */
+ status = "okay";
+ };
+
+ /* First mini-PCIe port */
+ pcie@2,0 {
+ /* Port 0, Lane 1 */
+ status = "okay";
+ };
+
+ /* Second mini-PCIe port */
+ pcie@3,0 {
+ /* Port 0, Lane 3 */
+ status = "okay";
+ };
+ };
+
+ internal-regs {
+
+ /* J10: VCC, NC, RX, NC, TX, GND */
+ serial@12000 {
+ status = "okay";
+ };
+
+ sata@a0000 {
+ nr-ports = <1>;
+ status = "okay";
+ };
+
+ ethernet@70000 {
+ pinctrl-0 = <&ge0_rgmii_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ phy-mode = "rgmii-id";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ ethernet@74000 {
+ pinctrl-0 = <&ge1_rgmii_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ phy-mode = "rgmii-id";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ /* USB part of the eSATA/USB 2.0 port */
+ usb@50000 {
+ status = "okay";
+ };
+
+ i2c@11000 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ tmp421@4c {
+ compatible = "ti,tmp421";
+ reg = <0x4c>;
+ };
+
+ tlc59116@68 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #gpio-cells = <2>;
+ compatible = "ti,tlc59116";
+ reg = <0x68>;
+
+ wan_amber@0 {
+ label = "mamba:amber:wan";
+ reg = <0x0>;
+ };
+
+ wan_white@1 {
+ label = "mamba:white:wan";
+ reg = <0x1>;
+ };
+
+ wlan_2g@2 {
+ label = "mamba:white:wlan_2g";
+ reg = <0x2>;
+ };
+
+ wlan_5g@3 {
+ label = "mamba:white:wlan_5g";
+ reg = <0x3>;
+ };
+
+ esata@4 {
+ label = "mamba:white:esata";
+ reg = <0x4>;
+ };
+
+ usb2@5 {
+ label = "mamba:white:usb2";
+ reg = <0x5>;
+ };
+
+ usb3_1@6 {
+ label = "mamba:white:usb3_1";
+ reg = <0x6>;
+ };
+
+ usb3_2@7 {
+ label = "mamba:white:usb3_2";
+ reg = <0x7>;
+ };
+
+ wps_white@8 {
+ label = "mamba:white:wps";
+ reg = <0x8>;
+ };
+
+ wps_amber@9 {
+ label = "mamba:amber:wps";
+ reg = <0x9>;
+ };
+ };
+ };
+
+ nand@d0000 {
+ status = "okay";
+ num-cs = <1>;
+ marvell,nand-keep-config;
+ marvell,nand-enable-arbiter;
+ nand-on-flash-bbt;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x100000>; /* 1MB */
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u_env";
+ reg = <0x100000 0x40000>; /* 256KB */
+ };
+
+ partition@140000 {
+ label = "s_env";
+ reg = <0x140000 0x40000>; /* 256KB */
+ };
+
+ partition@900000 {
+ label = "devinfo";
+ reg = <0x900000 0x100000>; /* 1MB */
+ read-only;
+ };
+
+ /* kernel1 overlaps with rootfs1 by design */
+ partition@a00000 {
+ label = "kernel1";
+ reg = <0xa00000 0x2800000>; /* 40MB */
+ };
+
+ partition@d00000 {
+ label = "rootfs1";
+ reg = <0xd00000 0x2500000>; /* 37MB */
+ };
+
+ /* kernel2 overlaps with rootfs2 by design */
+ partition@3200000 {
+ label = "kernel2";
+ reg = <0x3200000 0x2800000>; /* 40MB */
+ };
+
+ partition@3500000 {
+ label = "rootfs2";
+ reg = <0x3500000 0x2500000>; /* 37MB */
+ };
+
+ /*
+ * 38MB, last MB is for the BBT, not writable
+ */
+ partition@5a00000 {
+ label = "syscfg";
+ reg = <0x5a00000 0x2600000>;
+ };
+
+ /*
+ * Unused area between "s_env" and "devinfo".
+ * Moved here because otherwise the renumbered
+ * partitions would break the bootloader
+ * supplied bootargs
+ */
+ partition@180000 {
+ label = "unused_area";
+ reg = <0x180000 0x780000>; /* 7.5MB */
+ };
+ };
+
+ spi0: spi@10600 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "everspin,mr25h256";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <40000000>;
+ };
+ };
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&keys_pin>;
+ pinctrl-names = "default";
+
+ button@1 {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&gpio1 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ button@2 {
+ label = "Factory Reset Button";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&power_led_pin>;
+ pinctrl-names = "default";
+
+ power {
+ label = "mamba:white:power";
+ gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ gpio_fan {
+ /* SUNON HA4010V4-0000-C99 */
+ compatible = "gpio-fan";
+ gpios = <&gpio0 24 0>;
+
+ gpio-fan,speed-map = <0 0
+ 4500 1>;
+ };
+
+ dsa@0 {
+ compatible = "marvell,dsa";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ dsa,ethernet = <&eth0>;
+ dsa,mii-bus = <&mdio>;
+
+ switch@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0>; /* MDIO address 0, switch 0 in tree */
+
+ port@0 {
+ reg = <0>;
+ label = "lan4";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan3";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan1";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "internet";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ };
+ };
+ };
+};
+
+&pinctrl {
+
+ keys_pin: keys-pin {
+ marvell,pins = "mpp32", "mpp33";
+ marvell,function = "gpio";
+ };
+
+ power_led_pin: power-led-pin {
+ marvell,pins = "mpp40";
+ marvell,function = "gpio";
+ };
+
+ gpio_fan_pin: gpio-fan-pin {
+ marvell,pins = "mpp24";
+ marvell,function = "gpio";
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp-matrix.dts b/arch/arm/boot/dts/armada-xp-matrix.dts
index 56f958eb1ede..f894bc83e957 100644
--- a/arch/arm/boot/dts/armada-xp-matrix.dts
+++ b/arch/arm/boot/dts/armada-xp-matrix.dts
@@ -52,7 +52,7 @@
compatible = "marvell,axp-matrix", "marvell,armadaxp-mv78460", "marvell,armadaxp", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 4a7cbed79b07..8479fdc9e9c2 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -57,7 +57,6 @@
gpio0 = &gpio0;
gpio1 = &gpio1;
gpio2 = &gpio2;
- eth3 = &eth3;
};
cpus {
diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
index 36ce63a96cc9..661d54c81580 100644
--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
@@ -57,7 +57,6 @@
gpio0 = &gpio0;
gpio1 = &gpio1;
gpio2 = &gpio2;
- eth3 = &eth3;
};
diff --git a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
index 99cb9a8401b4..1516fc2627f9 100644
--- a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
+++ b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
@@ -53,7 +53,7 @@
compatible = "netgear,readynas-2120", "marvell,armadaxp-mv78230", "marvell,armadaxp", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index 0c76d9f05fd0..990e8a2100f0 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -54,7 +54,7 @@
compatible = "plathome,openblocks-ax3-4", "marvell,armadaxp-mv78260", "marvell,armadaxp", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
+ stdout-path = "serial0:115200n8";
};
memory {
@@ -105,6 +105,10 @@
};
internal-regs {
+ rtc@10300 {
+ /* No crystal connected to the internal RTC */
+ status = "disabled";
+ };
serial@12000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/armada-xp-synology-ds414.dts b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
index e9fb225169aa..6063428fa6a0 100644
--- a/arch/arm/boot/dts/armada-xp-synology-ds414.dts
+++ b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
@@ -67,8 +67,7 @@
"marvell,armadaxp", "marvell,armada-370-xp";
chosen {
- bootargs = "console=ttyS0,115200 earlyprintk";
- stdout-path = &uart0;
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 82917236a2fb..013d63f69e36 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -57,7 +57,8 @@
compatible = "marvell,armadaxp", "marvell,armada-370-xp";
aliases {
- eth2 = &eth2;
+ serial2 = &uart2;
+ serial3 = &uart3;
};
soc {
@@ -78,6 +79,7 @@
compatible = "marvell,aurora-system-cache";
reg = <0x08000 0x1000>;
cache-id-part = <0x100>;
+ cache-level = <2>;
cache-unified;
wt-override;
};
@@ -149,11 +151,11 @@
cpuclk: clock-complex@18700 {
#clock-cells = <1>;
compatible = "marvell,armada-xp-cpu-clock";
- reg = <0x18700 0xA0>, <0x1c054 0x10>;
+ reg = <0x18700 0x24>, <0x1c054 0x10>;
clocks = <&coreclk 1>;
};
- interrupt-controller@20000 {
+ interrupt-controller@20a00 {
reg = <0x20a00 0x2d0>, <0x21070 0x58>;
};
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index fec1fca2ad66..9991240b7438 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -167,7 +167,13 @@
macb1: ethernet@f802c000 {
phy-mode = "rmii";
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
+
+ ethernet-phy@1 {
+ reg = <0x1>;
+ };
};
dbgu: serial@ffffee00 {
@@ -188,6 +194,11 @@
<AT91_PIOA 19 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>;
};
+ pinctrl_key_gpio: key_gpio_0 {
+ atmel,pins =
+ <AT91_PIOE 29 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
+
pinctrl_mmc0_cd: mmc0_cd {
atmel,pins =
<AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
@@ -276,6 +287,9 @@
gpio_keys {
compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_key_gpio>;
+
bp3 {
label = "PB_USER";
gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
new file mode 100644
index 000000000000..c740e1a2a3a5
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -0,0 +1,241 @@
+/*
+ * at91-sama5d4_xplained.dts - Device Tree file for SAMA5D4 Xplained board
+ *
+ * Copyright (C) 2015 Atmel,
+ * 2015 Josh Wu <josh.wu@atmel.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/dts-v1/;
+#include "sama5d4.dtsi"
+
+/ {
+ model = "Atmel SAMA5D4 Xplained";
+ compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5";
+
+ chosen {
+ bootargs = "console=ttyS0,115200 ignore_loglevel earlyprintk";
+ };
+
+ memory {
+ reg = <0x20000000 0x20000000>;
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ main_clock: clock@0 {
+ compatible = "atmel,osc", "fixed-clock";
+ clock-frequency = <12000000>;
+ };
+
+ slow_xtal {
+ clock-frequency = <32768>;
+ };
+
+ main_xtal {
+ clock-frequency = <12000000>;
+ };
+ };
+
+ ahb {
+ apb {
+ spi0: spi@f8010000 {
+ cs-gpios = <&pioC 3 0>, <0>, <0>, <0>;
+ status = "okay";
+ m25p80@0 {
+ compatible = "atmel,at25df321a";
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ };
+ };
+
+ i2c0: i2c@f8014000 {
+ status = "okay";
+ };
+
+ macb0: ethernet@f8020000 {
+ phy-mode = "rmii";
+ status = "okay";
+
+ phy0: ethernet-phy@1 {
+ interrupt-parent = <&pioE>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ reg = <1>;
+ };
+ };
+
+ mmc1: mmc@fc000000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3 &pinctrl_mmc1_cd>;
+ status = "okay";
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ cd-gpios = <&pioE 3 0>;
+ };
+ };
+
+ usart3: serial@fc00c000 {
+ status = "okay";
+ };
+
+ usart4: serial@fc010000 {
+ status = "okay";
+ };
+
+ adc0: adc@fc034000 {
+ atmel,adc-vref = <3300>;
+ status = "okay";
+ };
+
+ watchdog@fc068640 {
+ status = "okay";
+ };
+
+ pinctrl@fc06a000 {
+ board {
+ pinctrl_mmc1_cd: mmc1_cd {
+ atmel,pins =
+ <AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
+ pinctrl_usba_vbus: usba_vbus {
+ atmel,pins =
+ <AT91_PIOE 31 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;
+ };
+ pinctrl_key_gpio: key_gpio_0 {
+ atmel,pins =
+ <AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
+ };
+ };
+ };
+
+ usb0: gadget@00400000 {
+ atmel,vbus-gpio = <&pioE 31 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usba_vbus>;
+ status = "okay";
+ };
+
+ usb1: ohci@00500000 {
+ num-ports = <3>;
+ atmel,vbus-gpio = <0
+ &pioE 11 GPIO_ACTIVE_HIGH
+ &pioE 14 GPIO_ACTIVE_HIGH
+ >;
+ status = "okay";
+ };
+
+ usb2: ehci@00600000 {
+ status = "okay";
+ };
+
+ nand0: nand@80000000 {
+ nand-bus-width = <8>;
+ nand-ecc-mode = "hw";
+ nand-on-flash-bbt;
+ atmel,has-pmecc;
+ status = "okay";
+
+ at91bootstrap@0 {
+ label = "at91bootstrap";
+ reg = <0x0 0x40000>;
+ };
+
+ bootloader@40000 {
+ label = "bootloader";
+ reg = <0x40000 0x80000>;
+ };
+
+ bootloaderenv@c0000 {
+ label = "bootloader env";
+ reg = <0xc0000 0xc0000>;
+ };
+
+ dtb@180000 {
+ label = "device tree";
+ reg = <0x180000 0x80000>;
+ };
+
+ kernel@200000 {
+ label = "kernel";
+ reg = <0x200000 0x600000>;
+ };
+
+ rootfs@800000 {
+ label = "rootfs";
+ reg = <0x800000 0x0f800000>;
+ };
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_key_gpio>;
+
+ pb_user1 {
+ label = "pb_user1";
+ gpios = <&pioE 8 GPIO_ACTIVE_HIGH>;
+ linux,code = <0x100>;
+ gpio-key,wakeup;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ status = "okay";
+
+ d8 {
+ label = "d8";
+ gpios = <&pioD 30 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+ };
+
+ d10 {
+ label = "d10";
+ gpios = <&pioE 15 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index 9198b719d0ef..89ef4a540db5 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -115,6 +115,10 @@
};
};
+ ssc0: ssc@f8008000 {
+ status = "okay";
+ };
+
spi0: spi@f8010000 {
cs-gpios = <&pioC 3 0>, <0>, <0>, <0>;
status = "okay";
@@ -127,6 +131,13 @@
i2c0: i2c@f8014000 {
status = "okay";
+
+ wm8904: codec@1a {
+ compatible = "wlf,wm8904";
+ reg = <0x1a>;
+ clocks = <&pck2>;
+ clock-names = "mclk";
+ };
};
macb0: ethernet@f8020000 {
@@ -171,6 +182,10 @@
atmel,pins =
<AT91_PIOE 6 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
};
+ pinctrl_pck2_as_audio_mck: pck2_as_audio_mck {
+ atmel,pins =
+ <AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>;
+ };
pinctrl_usba_vbus: usba_vbus {
atmel,pins =
<AT91_PIOE 31 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;
@@ -244,8 +259,6 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_key_gpio>;
@@ -257,4 +270,42 @@
gpio-key,wakeup;
};
};
+
+ leds {
+ compatible = "gpio-leds";
+ status = "okay";
+
+ d8 {
+ label = "d8";
+ /* PE28, conflicts with usart4 rts pin */
+ gpios = <&pioE 28 GPIO_ACTIVE_LOW>;
+ };
+
+ d9 {
+ label = "d9";
+ gpios = <&pioE 9 GPIO_ACTIVE_HIGH>;
+ };
+
+ d10 {
+ label = "d10";
+ gpios = <&pioE 8 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ sound {
+ compatible = "atmel,asoc-wm8904";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck2_as_audio_mck>;
+
+ atmel,model = "wm8904 @ SAMA5D4EK";
+ atmel,audio-routing =
+ "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "IN1L", "Line In Jack",
+ "IN1R", "Line In Jack";
+
+ atmel,ssc-controller = <&ssc0>;
+ atmel,audio-codec = <&wm8904>;
+ };
};
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index 21c2b504f977..4fb333bd1f85 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -356,9 +356,13 @@
};
st: timer@fffffd00 {
- compatible = "atmel,at91rm9200-st";
+ compatible = "atmel,at91rm9200-st", "syscon", "simple-mfd";
reg = <0xfffffd00 0x100>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+
+ watchdog {
+ compatible = "atmel,at91rm9200-wdt";
+ };
};
rtc: rtc@fffffe00 {
@@ -830,7 +834,7 @@
};
dbgu: serial@fffff200 {
- compatible = "atmel,at91rm9200-usart";
+ compatible = "atmel,at91rm9200-dbgu", "atmel,at91rm9200-usart";
reg = <0xfffff200 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index e7f0a4ae271c..d88fe62a2b2e 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -753,7 +753,7 @@
};
dbgu: serial@fffff200 {
- compatible = "atmel,at91sam9260-usart";
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xfffff200 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
@@ -842,7 +842,7 @@
};
macb0: ethernet@fffc4000 {
- compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xfffc4000 0x100>;
interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index d55fdf2487ef..bf8d1856a55a 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -276,7 +276,7 @@
};
dbgu: serial@fffff200 {
- compatible = "atmel,at91sam9260-usart";
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xfffff200 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index fce301c4e9d6..111889b556cf 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -762,7 +762,7 @@
};
dbgu: serial@ffffee00 {
- compatible = "atmel,at91sam9260-usart";
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xffffee00 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
@@ -845,7 +845,7 @@
};
macb0: ethernet@fffbc000 {
- compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xfffbc000 0x100>;
interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
index 17b879990914..a7da0dd0c98f 100644
--- a/arch/arm/boot/dts/at91sam9g25.dtsi
+++ b/arch/arm/boot/dts/at91sam9g25.dtsi
@@ -7,6 +7,7 @@
*/
#include "at91sam9x5.dtsi"
+#include "at91sam9x5_isi.dtsi"
#include "at91sam9x5_usart3.dtsi"
#include "at91sam9x5_macb0.dtsi"
diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts
index 1e4c49c584d3..707fd4ea58f5 100644
--- a/arch/arm/boot/dts/at91sam9g25ek.dts
+++ b/arch/arm/boot/dts/at91sam9g25ek.dts
@@ -16,10 +16,28 @@
ahb {
apb {
+ spi0: spi@f0000000 {
+ status = "disabled";
+ };
+
+ mmc1: mmc@f000c000 {
+ status = "disabled";
+ };
+
+ i2c0: i2c@f8010000 {
+ ov2640: camera@0x30 {
+ status = "okay";
+ };
+ };
+
macb0: ethernet@f802c000 {
phy-mode = "rmii";
status = "okay";
};
+
+ isi: isi@f8048000 {
+ status = "okay";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 488af63d5174..70e59c5ceb2f 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -893,7 +893,7 @@
};
dbgu: serial@ffffee00 {
- compatible = "atmel,at91sam9260-usart";
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xffffee00 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
@@ -956,7 +956,7 @@
};
macb0: ethernet@fffbc000 {
- compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xfffbc000 0x100>;
interrupts = <25 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 0c53a375ba99..a9e35dfc12d9 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -757,7 +757,7 @@
};
dbgu: serial@fffff200 {
- compatible = "atmel,at91sam9260-usart";
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xfffff200 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
@@ -912,6 +912,15 @@
clocks = <&pwm_clk>;
status = "disabled";
};
+
+ usb1: gadget@f803c000 {
+ compatible = "atmel,at91sam9260-udc";
+ reg = <0xf803c000 0x4000>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH 2>;
+ clocks = <&udphs_clk>, <&udpck>;
+ clock-names = "pclk", "hclk";
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index 9575c0d895c9..6e067c8a3502 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -108,6 +108,13 @@
<AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>;
};
};
+
+ usb1 {
+ pinctrl_usb1_vbus_sense: usb1_vbus_sense {
+ atmel,pins =
+ <AT91_PIOB 16 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PB16 gpio usb vbus sense, no pull up and deglitch */
+ };
+ };
};
spi0: spi@f0000000 {
@@ -120,9 +127,20 @@
};
};
+ usb1: gadget@f803c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_vbus_sense>;
+ atmel,vbus-gpio = <&pioB 16 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+
watchdog@fffffe40 {
status = "okay";
};
+
+ rtc@fffffeb0 {
+ status = "okay";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index 40f645b8fe25..ebfd5ce9cb38 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -377,7 +377,7 @@
};
dbgu: serial@fffff200 {
- compatible = "atmel,at91sam9260-usart";
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xfffff200 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index d221179d0f1a..3aa56ae3410a 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -860,7 +860,7 @@
};
dbgu: serial@fffff200 {
- compatible = "atmel,at91sam9260-usart";
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xfffff200 0x200>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9x5_isi.dtsi b/arch/arm/boot/dts/at91sam9x5_isi.dtsi
index 98bc877a68ef..8fc45ca4dcb5 100644
--- a/arch/arm/boot/dts/at91sam9x5_isi.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5_isi.dtsi
@@ -13,6 +13,37 @@
/ {
ahb {
apb {
+ pinctrl@fffff400 {
+ isi {
+ pinctrl_isi_data_0_7: isi-0-data-0-7 {
+ atmel,pins =
+ <AT91_PIOC 0 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D0, conflicts with LCDDAT0 */
+ AT91_PIOC 1 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D1, conflicts with LCDDAT1 */
+ AT91_PIOC 2 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D2, conflicts with LCDDAT2 */
+ AT91_PIOC 3 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D3, conflicts with LCDDAT3 */
+ AT91_PIOC 4 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D4, conflicts with LCDDAT4 */
+ AT91_PIOC 5 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D5, conflicts with LCDDAT5 */
+ AT91_PIOC 6 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D6, conflicts with LCDDAT6 */
+ AT91_PIOC 7 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D7, conflicts with LCDDAT7 */
+ AT91_PIOC 12 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_PCK, conflicts with LCDDAT12 */
+ AT91_PIOC 14 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_HSYNC, conflicts with LCDDAT14 */
+ AT91_PIOC 13 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* ISI_VSYNC, conflicts with LCDDAT13 */
+ };
+
+ pinctrl_isi_data_8_9: isi-0-data-8-9 {
+ atmel,pins =
+ <AT91_PIOC 8 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D8, conflicts with LCDDAT8 */
+ AT91_PIOC 9 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* ISI_D9, conflicts with LCDDAT9 */
+ };
+
+ pinctrl_isi_data_10_11: isi-0-data-10-11 {
+ atmel,pins =
+ <AT91_PIOC 10 AT91_PERIPH_B AT91_PINCTRL_NONE /* ISI_D10, conflicts with LCDDAT10 */
+ AT91_PIOC 11 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* ISI_D11, conflicts with LCDDAT11 */
+ };
+ };
+ };
+
pmc: pmc@fffffc00 {
periphck {
isi_clk: isi_clk {
@@ -21,6 +52,21 @@
};
};
};
+
+ isi: isi@f8048000 {
+ compatible = "atmel,at91sam9g45-isi";
+ reg = <0xf8048000 0x4000>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_isi_data_0_7>;
+ clocks = <&isi_clk>;
+ clock-names = "isi_clk";
+ status = "disabled";
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/at91sam9x5_macb0.dtsi b/arch/arm/boot/dts/at91sam9x5_macb0.dtsi
index 57e89d1d0325..73d7e30965ba 100644
--- a/arch/arm/boot/dts/at91sam9x5_macb0.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5_macb0.dtsi
@@ -53,7 +53,7 @@
};
macb0: ethernet@f802c000 {
- compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <24 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9x5_macb1.dtsi b/arch/arm/boot/dts/at91sam9x5_macb1.dtsi
index 663676c02861..d81980c40c7d 100644
--- a/arch/arm/boot/dts/at91sam9x5_macb1.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5_macb1.dtsi
@@ -41,7 +41,7 @@
};
macb1: ethernet@f8030000 {
- compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf8030000 0x100>;
interrupts = <27 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi
index 229d6c24a9c4..26112ebd15fc 100644
--- a/arch/arm/boot/dts/at91sam9x5cm.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi
@@ -42,6 +42,10 @@
};
};
};
+
+ rtc@fffffeb0 {
+ status = "okay";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index bd16bd360272..cc83a37a7311 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -59,6 +59,16 @@
status = "okay";
};
+ isi: isi@f8048000 {
+ status = "disabled";
+ port {
+ isi_0: endpoint@0 {
+ remote-endpoint = <&ov2640_0>;
+ bus-width = <8>;
+ };
+ };
+ };
+
i2c0: i2c@f8010000 {
status = "okay";
@@ -66,9 +76,47 @@
compatible = "wm8731";
reg = <0x1a>;
};
+
+ ov2640: camera@0x30 {
+ compatible = "ovti,ov2640";
+ reg = <0x30>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck0_as_isi_mck &pinctrl_sensor_power &pinctrl_sensor_reset>;
+ resetb-gpios = <&pioA 7 GPIO_ACTIVE_LOW>;
+ pwdn-gpios = <&pioA 13 GPIO_ACTIVE_HIGH>;
+ clocks = <&pck0>;
+ clock-names = "xvclk";
+ assigned-clocks = <&pck0>;
+ assigned-clock-rates = <25000000>;
+ status = "disabled";
+
+ port {
+ ov2640_0: endpoint {
+ remote-endpoint = <&isi_0>;
+ bus-width = <8>;
+ };
+ };
+ };
};
pinctrl@fffff400 {
+ camera_sensor {
+ pinctrl_pck0_as_isi_mck: pck0_as_isi_mck-0 {
+ atmel,pins =
+ <AT91_PIOC 15 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* ISI_MCK */
+ };
+
+ pinctrl_sensor_power: sensor_power-0 {
+ atmel,pins =
+ <AT91_PIOA 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
+
+ pinctrl_sensor_reset: sensor_reset-0 {
+ atmel,pins =
+ <AT91_PIOA 7 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
+ };
+
mmc0 {
pinctrl_board_mmc0: mmc0-board {
atmel,pins =
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index ff5fb6ab0b97..7b52c33ea69a 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -54,6 +54,42 @@
/include/ "bcm-cygnus-clock.dtsi"
+ pinctrl: pinctrl@0x0301d0c8 {
+ compatible = "brcm,cygnus-pinmux";
+ reg = <0x0301d0c8 0x30>,
+ <0x0301d24c 0x2c>;
+ };
+
+ gpio_crmu: gpio@03024800 {
+ compatible = "brcm,cygnus-crmu-gpio";
+ reg = <0x03024800 0x50>,
+ <0x03024008 0x18>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ gpio_ccm: gpio@1800a000 {
+ compatible = "brcm,cygnus-ccm-gpio";
+ reg = <0x1800a000 0x50>,
+ <0x0301d164 0x20>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ };
+
+ gpio_asiu: gpio@180a5000 {
+ compatible = "brcm,cygnus-asiu-gpio";
+ reg = <0x180a5000 0x668>;
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ pinmux = <&pinctrl>;
+
+ interrupt-controller;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
amba {
#address-cells = <1>;
#size-cells = <1>;
@@ -90,6 +126,48 @@
status = "disabled";
};
+ pcie0: pcie@18012000 {
+ compatible = "brcm,iproc-pcie";
+ reg = <0x18012000 0x1000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
+
+ linux,pci-domain = <0>;
+
+ bus-range = <0x00 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x81000000 0 0 0x28000000 0 0x00010000
+ 0x82000000 0 0x20000000 0x20000000 0 0x04000000>;
+
+ status = "disabled";
+ };
+
+ pcie1: pcie@18013000 {
+ compatible = "brcm,iproc-pcie";
+ reg = <0x18013000 0x1000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
+
+ linux,pci-domain = <1>;
+
+ bus-range = <0x00 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x81000000 0 0 0x48000000 0 0x00010000
+ 0x82000000 0 0x40000000 0x40000000 0 0x04000000>;
+
+ status = "disabled";
+ };
+
uart0: serial@18020000 {
compatible = "snps,dw-apb-uart";
reg = <0x18020000 0x100>;
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
index f18c9d9b2f2c..2ed9e5794785 100644
--- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
@@ -45,13 +45,13 @@
power0 {
label = "bcm53xx:green:power";
gpios = <&chipcommon 2 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
+ linux,default-trigger = "default-on";
};
power1 {
label = "bcm53xx:amber:power";
gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-on";
+ linux,default-trigger = "default-off";
};
usb {
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
new file mode 100644
index 000000000000..ea26dd3ec03a
--- /dev/null
+++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
@@ -0,0 +1,77 @@
+/*
+ * Broadcom BCM470X / BCM5301X ARM platform code.
+ * DTS for Netgear R8000
+ *
+ * Copyright (C) 2015 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+/dts-v1/;
+
+#include "bcm4708.dtsi"
+
+/ {
+ compatible = "netgear,r8000", "brcm,bcm4709", "brcm,bcm4708";
+ model = "Netgear R8000 (BCM4709)";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory {
+ reg = <0x00000000 0x08000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ power0 {
+ label = "bcm53xx:white:power";
+ gpios = <&chipcommon 2 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
+ };
+
+ power1 {
+ label = "bcm53xx:amber:power";
+ gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-off";
+ };
+
+ 5ghz-1 {
+ label = "bcm53xx:white:5ghz-1";
+ gpios = <&chipcommon 12 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-off";
+ };
+
+ 2ghz {
+ label = "bcm53xx:white:2ghz";
+ gpios = <&chipcommon 13 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-off";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rfkill {
+ label = "WiFi";
+ linux,code = <KEY_RFKILL>;
+ gpios = <&chipcommon 4 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&chipcommon 5 GPIO_ACTIVE_LOW>;
+ };
+
+ restart {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&chipcommon 6 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm7445.dtsi b/arch/arm/boot/dts/bcm7445.dtsi
index 0ca0f4e523d0..39ac7840d7ee 100644
--- a/arch/arm/boot/dts/bcm7445.dtsi
+++ b/arch/arm/boot/dts/bcm7445.dtsi
@@ -76,7 +76,7 @@
reg-shift = <2>;
reg-io-width = <4>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- clock-frequency = <0x4d3f640>;
+ clock-frequency = <81000000>;
};
sun_top_ctrl: syscon@404000 {
@@ -96,6 +96,18 @@
"syscon";
reg = <0x452000 0x100>;
};
+
+ irq0_intc: interrupt-controller@40a780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <1>;
+ reg = <0x40a780 0x8>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 0x45 0x0>,
+ <GIC_SPI 0x43 0x0>;
+ brcm,int-map-mask = <0x25c>, <0x7000000>;
+ brcm,int-fwd-mask = <0x70000>;
+ };
};
smpboot {
diff --git a/arch/arm/boot/dts/bcm911360_entphn.dts b/arch/arm/boot/dts/bcm911360_entphn.dts
index d2ee95280548..7db484323fd6 100644
--- a/arch/arm/boot/dts/bcm911360_entphn.dts
+++ b/arch/arm/boot/dts/bcm911360_entphn.dts
@@ -33,6 +33,7 @@
/dts-v1/;
#include "bcm-cygnus.dtsi"
+#include "dt-bindings/input/input.h"
/ {
model = "Cygnus Enterprise Phone (BCM911360_ENTPHN)";
@@ -50,4 +51,16 @@
uart3: serial@18023000 {
status = "okay";
};
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hook {
+ label = "HOOK";
+ linux,code = <KEY_O>;
+ gpios = <&gpio_asiu 48 0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/bcm958300k.dts b/arch/arm/boot/dts/bcm958300k.dts
index f1bb36f3975c..c9eb8565eac5 100644
--- a/arch/arm/boot/dts/bcm958300k.dts
+++ b/arch/arm/boot/dts/bcm958300k.dts
@@ -47,6 +47,14 @@
bootargs = "console=ttyS0,115200";
};
+ pcie0: pcie@18012000 {
+ status = "okay";
+ };
+
+ pcie1: pcie@18013000 {
+ status = "okay";
+ };
+
uart3: serial@18023000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958305k.dts b/arch/arm/boot/dts/bcm958305k.dts
new file mode 100644
index 000000000000..56b429abbedb
--- /dev/null
+++ b/arch/arm/boot/dts/bcm958305k.dts
@@ -0,0 +1,53 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Broadcom Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm-cygnus.dtsi"
+
+/ {
+ model = "Cygnus Wireless Audio (BCM958305K)";
+ compatible = "brcm,bcm58305", "brcm,cygnus";
+
+ aliases {
+ serial0 = &uart3;
+ };
+
+ chosen {
+ stdout-path = &uart3;
+ bootargs = "console=ttyS0,115200";
+ };
+
+ uart3: serial@18023000 {
+ status = "okay";
+ };
+};
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index afe678f6d2e9..169a85578fc9 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -29,10 +29,10 @@
&dm816x_pinmux {
mcspi1_pins: pinmux_mcspi1_pins {
pinctrl-single,pins = <
- DM816X_IOPAD(0x0a94, PIN_INPUT | MUX_MODE0) /* SPI_SCLK */
- DM816X_IOPAD(0x0a98, PIN_OUTPUT | MUX_MODE0) /* SPI_SCS0 */
- DM816X_IOPAD(0x0aa8, PIN_INPUT | MUX_MODE0) /* SPI_D0 */
- DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */
+ DM816X_IOPAD(0x0a94, MUX_MODE0) /* SPI_SCLK */
+ DM816X_IOPAD(0x0a98, MUX_MODE0) /* SPI_SCS0 */
+ DM816X_IOPAD(0x0aa8, MUX_MODE0) /* SPI_D0 */
+ DM816X_IOPAD(0x0aac, MUX_MODE0) /* SPI_D1 */
>;
};
@@ -52,13 +52,13 @@
usb0_pins: pinmux_usb0_pins {
pinctrl-single,pins = <
- DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
+ DM816X_IOPAD(0x0d04, MUX_MODE0) /* USB0_DRVVBUS */
>;
};
- usb1_pins: pinmux_usb0_pins {
+ usb1_pins: pinmux_usb1_pins {
pinctrl-single,pins = <
- DM816X_IOPAD(0x0d04, MUX_MODE0) /* USB1_DRVVBUS */
+ DM816X_IOPAD(0x0d08, MUX_MODE0) /* USB1_DRVVBUS */
>;
};
};
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index f35715bc6992..de8427be830a 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -396,6 +396,29 @@
mentor,num-eps = <16>;
mentor,ram-bits = <12>;
mentor,power = <500>;
+
+ dmas = <&cppi41dma 0 0 &cppi41dma 1 0
+ &cppi41dma 2 0 &cppi41dma 3 0
+ &cppi41dma 4 0 &cppi41dma 5 0
+ &cppi41dma 6 0 &cppi41dma 7 0
+ &cppi41dma 8 0 &cppi41dma 9 0
+ &cppi41dma 10 0 &cppi41dma 11 0
+ &cppi41dma 12 0 &cppi41dma 13 0
+ &cppi41dma 14 0 &cppi41dma 0 1
+ &cppi41dma 1 1 &cppi41dma 2 1
+ &cppi41dma 3 1 &cppi41dma 4 1
+ &cppi41dma 5 1 &cppi41dma 6 1
+ &cppi41dma 7 1 &cppi41dma 8 1
+ &cppi41dma 9 1 &cppi41dma 10 1
+ &cppi41dma 11 1 &cppi41dma 12 1
+ &cppi41dma 13 1 &cppi41dma 14 1>;
+ dma-names =
+ "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7",
+ "rx8", "rx9", "rx10", "rx11", "rx12", "rx13",
+ "rx14", "rx15",
+ "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
+ "tx8", "tx9", "tx10", "tx11", "tx12", "tx13",
+ "tx14", "tx15";
};
usb1: usb@47401800 {
@@ -413,6 +436,43 @@
mentor,num-eps = <16>;
mentor,ram-bits = <12>;
mentor,power = <500>;
+
+ dmas = <&cppi41dma 15 0 &cppi41dma 16 0
+ &cppi41dma 17 0 &cppi41dma 18 0
+ &cppi41dma 19 0 &cppi41dma 20 0
+ &cppi41dma 21 0 &cppi41dma 22 0
+ &cppi41dma 23 0 &cppi41dma 24 0
+ &cppi41dma 25 0 &cppi41dma 26 0
+ &cppi41dma 27 0 &cppi41dma 28 0
+ &cppi41dma 29 0 &cppi41dma 15 1
+ &cppi41dma 16 1 &cppi41dma 17 1
+ &cppi41dma 18 1 &cppi41dma 19 1
+ &cppi41dma 20 1 &cppi41dma 21 1
+ &cppi41dma 22 1 &cppi41dma 23 1
+ &cppi41dma 24 1 &cppi41dma 25 1
+ &cppi41dma 26 1 &cppi41dma 27 1
+ &cppi41dma 28 1 &cppi41dma 29 1>;
+ dma-names =
+ "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7",
+ "rx8", "rx9", "rx10", "rx11", "rx12", "rx13",
+ "rx14", "rx15",
+ "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
+ "tx8", "tx9", "tx10", "tx11", "tx12", "tx13",
+ "tx14", "tx15";
+ };
+
+ cppi41dma: dma-controller@47402000 {
+ compatible = "ti,am3359-cppi41";
+ reg = <0x47400000 0x1000
+ 0x47402000 0x1000
+ 0x47403000 0x1000
+ 0x47404000 0x4000>;
+ reg-names = "glue", "controller", "scheduler", "queuemgr";
+ interrupts = <17>;
+ interrupt-names = "glue";
+ #dma-cells = <2>;
+ #dma-channels = <30>;
+ #dma-requests = <256>;
};
};
diff --git a/arch/arm/boot/dts/dove-cubox.dts b/arch/arm/boot/dts/dove-cubox.dts
index aae7efc09b0b..e6fa251e17b9 100644
--- a/arch/arm/boot/dts/dove-cubox.dts
+++ b/arch/arm/boot/dts/dove-cubox.dts
@@ -87,6 +87,7 @@
/* connect xtal input to 25MHz reference */
clocks = <&ref25>;
+ clock-names = "xtal";
/* connect xtal input as source of pll0 and pll1 */
silabs,pll-source = <0 0>, <1 0>;
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index a5441d5482a6..9ad829523a13 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -1,5 +1,8 @@
/include/ "skeleton.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
/ {
@@ -61,7 +64,7 @@
0x82000000 0x2 0x0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 Mem */
0x81000000 0x2 0x0 MBUS_ID(0x08, 0xe0) 0 1 0>; /* Port 1.0 I/O */
- pcie-port@0 {
+ pcie0: pcie-port@0 {
device_type = "pci";
status = "disabled";
assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
@@ -79,7 +82,7 @@
interrupt-map = <0 0 0 0 &intc 16>;
};
- pcie-port@1 {
+ pcie1: pcie-port@1 {
device_type = "pci";
status = "disabled";
assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
@@ -154,7 +157,7 @@
uart2: serial@12200 {
compatible = "ns16550a";
- reg = <0x12000 0x100>;
+ reg = <0x12200 0x100>;
reg-shift = <2>;
interrupts = <9>;
clocks = <&core_clk 0>;
@@ -163,7 +166,7 @@
uart3: serial@12300 {
compatible = "ns16550a";
- reg = <0x12100 0x100>;
+ reg = <0x12300 0x100>;
reg-shift = <2>;
interrupts = <10>;
clocks = <&core_clk 0>;
@@ -448,6 +451,11 @@
marvell,function = "gpio";
};
+ pmx_pcie1_clkreq: pmx-pcie1-clkreq {
+ marvell,pins = "mpp9";
+ marvell,function = "pex1";
+ };
+
pmx_gpio_10: pmx-gpio-10 {
marvell,pins = "mpp10";
marvell,function = "gpio";
@@ -458,6 +466,11 @@
marvell,function = "gpio";
};
+ pmx_pcie0_clkreq: pmx-pcie0-clkreq {
+ marvell,pins = "mpp11";
+ marvell,function = "pex0";
+ };
+
pmx_gpio_12: pmx-gpio-12 {
marvell,pins = "mpp12";
marvell,function = "gpio";
@@ -563,6 +576,18 @@
marvell,function = "gpio";
};
+ pmx_spi1_4_7: pmx-spi1-4-7 {
+ marvell,pins = "mpp4", "mpp5",
+ "mpp6", "mpp7";
+ marvell,function = "spi1";
+ };
+
+ pmx_spi1_20_23: pmx-spi1-20-23 {
+ marvell,pins = "mpp20", "mpp21",
+ "mpp22", "mpp23";
+ marvell,function = "spi1";
+ };
+
pmx_uart1: pmx-uart1 {
marvell,pins = "mpp_uart1";
marvell,function = "uart1";
@@ -582,6 +607,36 @@
marvell,pins = "mpp_nand";
marvell,function = "gpo";
};
+
+ pmx_i2c1: pmx-i2c1 {
+ marvell,pins = "mpp17", "mpp19";
+ marvell,function = "twsi";
+ };
+
+ pmx_i2c2: pmx-i2c2 {
+ marvell,pins = "mpp_audio1";
+ marvell,function = "twsi";
+ };
+
+ pmx_ssp_i2c2: pmx-ssp-i2c2 {
+ marvell,pins = "mpp_audio1";
+ marvell,function = "ssp/twsi";
+ };
+
+ pmx_i2cmux_0: pmx-i2cmux-0 {
+ marvell,pins = "twsi";
+ marvell,function = "twsi-opt1";
+ };
+
+ pmx_i2cmux_1: pmx-i2cmux-1 {
+ marvell,pins = "twsi";
+ marvell,function = "twsi-opt2";
+ };
+
+ pmx_i2cmux_2: pmx-i2cmux-2 {
+ marvell,pins = "twsi";
+ marvell,function = "twsi-opt3";
+ };
};
core_clk: core-clocks@d0214 {
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index b1bd06c6c2a8..aa465904f6cc 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -541,6 +541,14 @@
};
};
+&omap_dwc3_1 {
+ extcon = <&extcon_usb1>;
+};
+
+&omap_dwc3_2 {
+ extcon = <&extcon_usb2>;
+};
+
&usb1 {
dr_mode = "peripheral";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index a0afce7ad482..f03a091cd076 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -102,17 +102,101 @@
interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- prm: prm@4ae06000 {
- compatible = "ti,dra7-prm";
- reg = <0x4ae06000 0x3000>;
- interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ l4_cfg: l4@4a000000 {
+ compatible = "ti,dra7-l4-cfg", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x4a000000 0x22c000>;
- prm_clocks: clocks {
+ scm: scm@2000 {
+ compatible = "ti,dra7-scm-core", "simple-bus";
+ reg = <0x2000 0x2000>;
#address-cells = <1>;
- #size-cells = <0>;
+ #size-cells = <1>;
+ ranges = <0 0x2000 0x2000>;
+
+ scm_conf: scm_conf@0 {
+ compatible = "syscon";
+ reg = <0x0 0x1400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pbias_regulator: pbias_regulator {
+ compatible = "ti,pbias-omap";
+ reg = <0xe00 0x4>;
+ syscon = <&scm_conf>;
+ pbias_mmc_reg: pbias_mmc_omap5 {
+ regulator-name = "pbias_mmc_omap5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ };
+ };
+
+ dra7_pmx_core: pinmux@1400 {
+ compatible = "ti,dra7-padconf",
+ "pinctrl-single";
+ reg = <0x1400 0x0464>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0x3fffffff>;
+ };
+ };
+
+ cm_core_aon: cm_core_aon@5000 {
+ compatible = "ti,dra7-cm-core-aon";
+ reg = <0x5000 0x2000>;
+
+ cm_core_aon_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cm_core_aon_clockdomains: clockdomains {
+ };
+ };
+
+ cm_core: cm_core@8000 {
+ compatible = "ti,dra7-cm-core";
+ reg = <0x8000 0x3000>;
+
+ cm_core_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cm_core_clockdomains: clockdomains {
+ };
};
+ };
- prm_clockdomains: clockdomains {
+ l4_wkup: l4@4ae00000 {
+ compatible = "ti,dra7-l4-wkup", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x4ae00000 0x3f000>;
+
+ counter32k: counter@4000 {
+ compatible = "ti,omap-counter32k";
+ reg = <0x4000 0x40>;
+ ti,hwmods = "counter_32k";
+ };
+
+ prm: prm@6000 {
+ compatible = "ti,dra7-prm";
+ reg = <0x6000 0x3000>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+
+ prm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ prm_clockdomains: clockdomains {
+ };
};
};
@@ -185,36 +269,16 @@
};
};
- cm_core_aon: cm_core_aon@4a005000 {
- compatible = "ti,dra7-cm-core-aon";
- reg = <0x4a005000 0x2000>;
-
- cm_core_aon_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- cm_core_aon_clockdomains: clockdomains {
- };
- };
-
- cm_core: cm_core@4a008000 {
- compatible = "ti,dra7-cm-core";
- reg = <0x4a008000 0x3000>;
-
- cm_core_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- cm_core_clockdomains: clockdomains {
- };
- };
-
- counter32k: counter@4ae04000 {
- compatible = "ti,omap-counter32k";
- reg = <0x4ae04000 0x40>;
- ti,hwmods = "counter_32k";
+ bandgap: bandgap@4a0021e0 {
+ reg = <0x4a0021e0 0xc
+ 0x4a00232c 0xc
+ 0x4a002380 0x2c
+ 0x4a0023C0 0x3c
+ 0x4a002564 0x8
+ 0x4a002574 0x50>;
+ compatible = "ti,dra752-bandgap";
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ #thermal-sensor-cells = <1>;
};
dra7_ctrl_core: ctrl_core@4a002000 {
@@ -227,28 +291,6 @@
reg = <0x4a002e00 0x7c>;
};
- pbias_regulator: pbias_regulator {
- compatible = "ti,pbias-omap";
- reg = <0 0x4>;
- syscon = <&dra7_ctrl_general>;
- pbias_mmc_reg: pbias_mmc_omap5 {
- regulator-name = "pbias_mmc_omap5";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3000000>;
- };
- };
-
- dra7_pmx_core: pinmux@4a003400 {
- compatible = "ti,dra7-padconf", "pinctrl-single";
- reg = <0x4a003400 0x0464>;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- pinctrl-single,register-width = <32>;
- pinctrl-single,function-mask = <0x3fffffff>;
- };
-
sdma: dma-controller@4a056000 {
compatible = "ti,omap4430-sdma";
reg = <0x4a056000 0x1000>;
@@ -666,7 +708,6 @@
reg = <0x48820000 0x80>;
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "timer5";
- ti,timer-dsp;
};
timer6: timer@48822000 {
@@ -674,8 +715,6 @@
reg = <0x48822000 0x80>;
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "timer6";
- ti,timer-dsp;
- ti,timer-pwm;
};
timer7: timer@48824000 {
@@ -683,7 +722,6 @@
reg = <0x48824000 0x80>;
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "timer7";
- ti,timer-dsp;
};
timer8: timer@48826000 {
@@ -691,8 +729,6 @@
reg = <0x48826000 0x80>;
interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "timer8";
- ti,timer-dsp;
- ti,timer-pwm;
};
timer9: timer@4803e000 {
@@ -714,7 +750,6 @@
reg = <0x48088000 0x80>;
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "timer11";
- ti,timer-pwm;
};
timer13: timer@48828000 {
@@ -876,7 +911,7 @@
ti,clock-cycles = <16>;
reg = <0x4ae07ddc 0x4>, <0x4ae07de0 0x4>,
- <0x4ae06014 0x4>, <0x4a003b20 0x8>,
+ <0x4ae06014 0x4>, <0x4a003b20 0xc>,
<0x4ae0c158 0x4>;
reg-names = "setup-address", "control-address",
"int-address", "efuse-address",
@@ -909,7 +944,7 @@
ti,clock-cycles = <16>;
reg = <0x4ae07e34 0x4>, <0x4ae07e24 0x4>,
- <0x4ae06010 0x4>, <0x4a0025cc 0x8>,
+ <0x4ae06010 0x4>, <0x4a0025cc 0xc>,
<0x4a002470 0x4>;
reg-names = "setup-address", "control-address",
"int-address", "efuse-address",
@@ -942,7 +977,7 @@
ti,clock-cycles = <16>;
reg = <0x4ae07e30 0x4>, <0x4ae07e20 0x4>,
- <0x4ae06010 0x4>, <0x4a0025e0 0x8>,
+ <0x4ae06010 0x4>, <0x4a0025e0 0xc>,
<0x4a00246c 0x4>;
reg-names = "setup-address", "control-address",
"int-address", "efuse-address",
@@ -975,7 +1010,7 @@
ti,clock-cycles = <16>;
reg = <0x4ae07de4 0x4>, <0x4ae07de8 0x4>,
- <0x4ae06010 0x4>, <0x4a003b08 0x8>,
+ <0x4ae06010 0x4>, <0x4a003b08 0xc>,
<0x4ae0c154 0x4>;
reg-names = "setup-address", "control-address",
"int-address", "efuse-address",
@@ -1168,7 +1203,7 @@
status = "disabled";
};
- rtc@48838000 {
+ rtc: rtc@48838000 {
compatible = "ti,am3352-rtc";
reg = <0x48838000 0x100>;
interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
@@ -1419,7 +1454,7 @@
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan1";
reg = <0x4ae3c000 0x2000>;
- syscon-raminit = <&dra7_ctrl_core 0x558 0>;
+ syscon-raminit = <&scm_conf 0x558 0>;
interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&dcan1_sys_clk_mux>;
status = "disabled";
@@ -1429,12 +1464,23 @@
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan2";
reg = <0x48480000 0x2000>;
- syscon-raminit = <&dra7_ctrl_core 0x558 1>;
+ syscon-raminit = <&scm_conf 0x558 1>;
interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&sys_clkin1>;
status = "disabled";
};
};
+
+ thermal_zones: thermal-zones {
+ #include "omap4-cpu-thermal.dtsi"
+ #include "omap5-gpu-thermal.dtsi"
+ #include "omap5-core-thermal.dtsi"
+ };
+
+};
+
+&cpu_thermal {
+ polling-delay = <500>; /* milliseconds */
};
/include/ "dra7xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index daf28110d487..ce0390f081d9 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -377,6 +377,14 @@
phy-supply = <&ldo4_reg>;
};
+&omap_dwc3_1 {
+ extcon = <&extcon_usb1>;
+};
+
+&omap_dwc3_2 {
+ extcon = <&extcon_usb2>;
+};
+
&usb1 {
dr_mode = "peripheral";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/dra72x.dtsi b/arch/arm/boot/dts/dra72x.dtsi
index f7fb0d0ef25a..03d742f8d572 100644
--- a/arch/arm/boot/dts/dra72x.dtsi
+++ b/arch/arm/boot/dts/dra72x.dtsi
@@ -20,6 +20,11 @@
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0>;
+
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <2>;
+ #cooling-cells = <2>; /* min followed by max */
};
};
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index 00eeed789b4b..cc560a70926f 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -31,6 +31,11 @@
clock-names = "cpu";
clock-latency = <300000>; /* From omap-cpufreq driver */
+
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <2>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu@1 {
device_type = "cpu";
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 99b09a44e269..3b933f74d000 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -1493,6 +1493,14 @@
ti,dividers = <1>, <8>;
};
+ clkout2_clk: clkout2_clk {
+ #clock-cells = <0>;
+ compatible = "ti,gate-clock";
+ clocks = <&clkoutmux2_clk_mux>;
+ ti,bit-shift = <8>;
+ reg = <0x06b0>;
+ };
+
l3init_960m_gfclk: l3init_960m_gfclk {
#clock-cells = <0>;
compatible = "ti,gate-clock";
diff --git a/arch/arm/boot/dts/emev2-kzm9d.dts b/arch/arm/boot/dts/emev2-kzm9d.dts
index 667d323e80a3..19446273e4a7 100644
--- a/arch/arm/boot/dts/emev2-kzm9d.dts
+++ b/arch/arm/boot/dts/emev2-kzm9d.dts
@@ -94,3 +94,16 @@
vdd33a-supply = <&reg_3p3v>;
};
};
+
+&pfc {
+ uart1_pins: uart@e1030000 {
+ renesas,groups = "uart1_ctrl", "uart1_data";
+ renesas,function = "uart1";
+ };
+};
+
+&uart1 {
+ pinctrl-0 = <&uart1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi
index cc7bfe0ba40a..bb45694d91bc 100644
--- a/arch/arm/boot/dts/emev2.dtsi
+++ b/arch/arm/boot/dts/emev2.dtsi
@@ -169,12 +169,18 @@
clock-names = "sclk";
};
+ pfc: pfc@e0140200 {
+ compatible = "renesas,pfc-emev2";
+ reg = <0xe0140200 0x100>;
+ };
+
gpio0: gpio@e0050000 {
compatible = "renesas,em-gio";
reg = <0xe0050000 0x2c>, <0xe0050040 0x20>;
interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>,
<0 68 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&pfc 0 0 32>;
#gpio-cells = <2>;
ngpios = <32>;
interrupt-controller;
@@ -186,6 +192,7 @@
interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>,
<0 70 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&pfc 0 32 32>;
#gpio-cells = <2>;
ngpios = <32>;
interrupt-controller;
@@ -197,6 +204,7 @@
interrupts = <0 71 IRQ_TYPE_LEVEL_HIGH>,
<0 72 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&pfc 0 64 32>;
#gpio-cells = <2>;
ngpios = <32>;
interrupt-controller;
@@ -208,6 +216,7 @@
interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>,
<0 74 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&pfc 0 96 32>;
#gpio-cells = <2>;
ngpios = <32>;
interrupt-controller;
@@ -219,6 +228,7 @@
interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>,
<0 76 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&pfc 0 128 31>;
#gpio-cells = <2>;
ngpios = <31>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 14ab515aa83c..e3bfb11c6ef8 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -176,6 +176,10 @@
compatible = "samsung,exynos3250-cmu";
reg = <0x10030000 0x20000>;
#clock-cells = <1>;
+ assigned-clocks = <&cmu CLK_MOUT_ACLK_400_MCUISP_SUB>,
+ <&cmu CLK_MOUT_ACLK_266_SUB>;
+ assigned-clock-parents = <&cmu CLK_FIN_PLL>,
+ <&cmu CLK_FIN_PLL>;
};
cmu_dmc: clock-controller@105C0000 {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index adb4f6a97a1d..d6b49e5b32e9 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -9,6 +9,7 @@
#include <dt-bindings/sound/samsung-i2s.h>
#include <dt-bindings/input/input.h>
+#include <dt-bindings/clock/maxim,max77686.h>
#include "exynos4412.dtsi"
/ {
@@ -75,10 +76,18 @@
};
};
+ emmc_pwrseq: pwrseq {
+ pinctrl-0 = <&sd1_cd>;
+ pinctrl-names = "default";
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpk1 2 1>;
+ };
+
mmc@12550000 {
pinctrl-0 = <&sd4_clk &sd4_cmd &sd4_bus4 &sd4_bus8>;
pinctrl-names = "default";
vmmc-supply = <&ldo20_reg &buck8_reg>;
+ mmc-pwrseq = <&emmc_pwrseq>;
status = "okay";
num-slots = <1>;
@@ -97,6 +106,8 @@
rtc@10070000 {
status = "okay";
+ clocks = <&clock CLK_RTC>, <&max77686 MAX77686_CLK_AP>;
+ clock-names = "rtc", "rtc_src";
};
g2d@10800000 {
@@ -472,6 +483,12 @@
};
};
+/* RSTN signal for eMMC */
+&sd1_cd {
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+};
+
&pinctrl_1 {
gpio_power_key: power_key {
samsung,pins = "gpx1-3";
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index b9aeec430527..1eca97ee4bd6 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -29,6 +29,7 @@
chosen {
bootargs = "console=tty1";
+ stdout-path = "serial3:115200n8";
};
gpio-keys {
@@ -183,7 +184,20 @@
powerdown-gpios = <&gpy2 5 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpx1 5 GPIO_ACTIVE_HIGH>;
edid-emulation = <5>;
- panel = <&panel>;
+
+ ports {
+ port@0 {
+ bridge_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+
+ port@1 {
+ bridge_in: endpoint {
+ remote-endpoint = <&dp_out>;
+ };
+ };
+ };
};
};
@@ -228,6 +242,20 @@
compatible = "auo,b116xw03";
power-supply = <&fet6>;
backlight = <&backlight>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&bridge_out>;
+ };
+ };
+ };
+
+ mmc3_pwrseq: mmc3_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpx0 2 GPIO_ACTIVE_LOW>, /* WIFI_RSTn */
+ <&gpx0 1 GPIO_ACTIVE_LOW>; /* WIFI_EN */
+ clocks = <&max77686 MAX77686_CLK_PMIC>;
+ clock-names = "ext_clock";
};
};
@@ -242,7 +270,14 @@
samsung,link-rate = <0x0a>;
samsung,lane-count = <2>;
samsung,hpd-gpio = <&gpx0 7 GPIO_ACTIVE_HIGH>;
- bridge = <&ptn3460>;
+
+ ports {
+ port@0 {
+ dp_out: endpoint {
+ remote-endpoint = <&bridge_in>;
+ };
+ };
+ };
};
&ehci {
@@ -531,17 +566,34 @@
status = "okay";
num-slots = <1>;
broken-cd;
+ cap-sdio-irq;
+ keep-power-in-suspend;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <2 3>;
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
- pinctrl-0 = <&sd3_clk &sd3_cmd &sd3_bus4>;
+ pinctrl-0 = <&sd3_clk &sd3_cmd &sd3_bus4 &wifi_en &wifi_rst>;
bus-width = <4>;
cap-sd-highspeed;
+ mmc-pwrseq = <&mmc3_pwrseq>;
};
&pinctrl_0 {
+ wifi_en: wifi-en {
+ samsung,pins = "gpx0-1";
+ samsung,pin-function = <1>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ wifi_rst: wifi-rst {
+ samsung,pins = "gpx0-2";
+ samsung,pin-function = <1>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
power_key_irq: power-key-irq {
samsung,pins = "gpx1-3";
samsung,pin-function = <0xf>;
diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
index f02775487cd4..d03f9b8d376d 100644
--- a/arch/arm/boot/dts/exynos5250-spring.dts
+++ b/arch/arm/boot/dts/exynos5250-spring.dts
@@ -25,6 +25,7 @@
chosen {
bootargs = "console=tty1";
+ stdout-path = "serial3:115200n8";
};
gpio-keys {
@@ -429,7 +430,6 @@
&mmc_0 {
status = "okay";
num-slots = <1>;
- supports-highspeed;
broken-cd;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
@@ -437,11 +437,8 @@
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4 &sd0_bus8>;
-
- slot@0 {
- reg = <0>;
- bus-width = <8>;
- };
+ bus-width = <8>;
+ cap-mmc-highspeed;
};
/*
@@ -451,7 +448,6 @@
&mmc_1 {
status = "okay";
num-slots = <1>;
- supports-highspeed;
broken-cd;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
@@ -459,11 +455,8 @@
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>;
-
- slot@0 {
- reg = <0>;
- bus-width = <4>;
- };
+ bus-width = <4>;
+ cap-sd-highspeed;
};
&pinctrl_0 {
@@ -490,7 +483,7 @@
power_key_irq: power-key-irq {
samsung,pins = "gpx1-3";
- samsung,pin-function = <0>;
+ samsung,pin-function = <0xf>;
samsung,pin-pud = <0>;
samsung,pin-drv = <0>;
};
@@ -518,7 +511,7 @@
lid_irq: lid-irq {
samsung,pins = "gpx3-5";
- samsung,pin-function = <0>;
+ samsung,pin-function = <0xf>;
samsung,pin-pud = <0>;
samsung,pin-drv = <0>;
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 77f656eb8e6b..257e2f10525d 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -143,7 +143,7 @@
compatible = "samsung,exynos4210-mct";
reg = <0x101C0000 0x800>;
interrupt-controller;
- #interrups-cells = <2>;
+ #interrupt-cells = <2>;
interrupt-parent = <&mct_map>;
interrupts = <0 0>, <1 0>, <2 0>, <3 0>,
<4 0>, <5 0>;
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index db2c1c4cd900..b82b6fa15f48 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -55,7 +55,7 @@
samsung,dw-mshc-sdr-timing = <0 4>;
samsung,dw-mshc-ddr-timing = <0 2>;
pinctrl-names = "default";
- pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_bus8>;
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8>;
vmmc-supply = <&ldo10_reg>;
bus-width = <8>;
cap-mmc-highspeed;
@@ -68,7 +68,7 @@
samsung,dw-mshc-sdr-timing = <2 3>;
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
- pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus4>;
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus1 &sd2_bus4>;
vmmc-supply = <&ldo19_reg>;
vqmmc-supply = <&ldo13_reg>;
bus-width = <4>;
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index c47bb70665c1..146e71118a72 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -43,6 +43,10 @@
pinctrl-names = "default";
};
+ chosen {
+ stdout-path = "serial3:115200n8";
+ };
+
fixed-rate-clocks {
oscclk {
compatible = "samsung,exynos5420-oscclk";
@@ -118,6 +122,19 @@
compatible = "auo,b116xw03";
power-supply = <&tps65090_fet6>;
backlight = <&backlight>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&bridge_out>;
+ };
+ };
+ };
+
+ mmc1_pwrseq: mmc1_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpx0 0 GPIO_ACTIVE_LOW>; /* WIFI_EN */
+ clocks = <&max77802 MAX77802_CLK_32K_CP>;
+ clock-names = "ext_clock";
};
};
@@ -137,7 +154,14 @@
samsung,link-rate = <0x06>;
samsung,lane-count = <2>;
samsung,hpd-gpio = <&gpx2 6 0>;
- bridge = <&ps8625>;
+
+ ports {
+ port@0 {
+ dp_out: endpoint {
+ remote-endpoint = <&bridge_in>;
+ };
+ };
+ };
};
&fimd {
@@ -581,6 +605,8 @@
interrupt-parent = <&gpx0>;
pinctrl-names = "default";
pinctrl-0 = <&max98090_irq>;
+ clocks = <&pmu_system_controller 0>;
+ clock-names = "mclk";
};
light-sensor@44 {
@@ -595,8 +621,22 @@
sleep-gpios = <&gpx3 5 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpy7 7 GPIO_ACTIVE_HIGH>;
lane-count = <2>;
- panel = <&panel>;
use-external-pwm;
+
+ ports {
+ port@0 {
+ bridge_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+
+ port@1 {
+ bridge_in: endpoint {
+ remote-endpoint = <&dp_out>;
+ };
+ };
+ };
+
};
};
@@ -659,11 +699,33 @@
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <0 4>;
samsung,dw-mshc-ddr-timing = <0 2>;
+ samsung,dw-mshc-hs400-timing = <0 2>;
+ samsung,read-strobe-delay = <90>;
pinctrl-names = "default";
- pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_bus8>;
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_rclk>;
bus-width = <8>;
};
+&mmc_1 {
+ status = "okay";
+ num-slots = <1>;
+ broken-cd;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ card-detect-delay = <200>;
+ clock-frequency = <400000000>;
+ samsung,dw-mshc-ciu-div = <1>;
+ samsung,dw-mshc-sdr-timing = <0 1>;
+ samsung,dw-mshc-ddr-timing = <0 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_int>, <&sd1_bus1>,
+ <&sd1_bus4>, <&sd1_bus8>, <&wifi_en>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ mmc-pwrseq = <&mmc1_pwrseq>;
+ vqmmc-supply = <&buck10_reg>;
+};
+
&mmc_2 {
status = "okay";
num-slots = <1>;
@@ -674,7 +736,7 @@
samsung,dw-mshc-sdr-timing = <2 3>;
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
- pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus4>;
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus1 &sd2_bus4>;
bus-width = <4>;
};
@@ -683,6 +745,13 @@
pinctrl-names = "default";
pinctrl-0 = <&mask_tpm_reset>;
+ wifi_en: wifi-en {
+ samsung,pins = "gpx0-0";
+ samsung,pin-function = <1>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
max98090_irq: max98090-irq {
samsung,pins = "gpx0-2";
samsung,pin-function = <0>;
@@ -770,6 +839,29 @@
};
};
+&pinctrl_1 {
+ /* Adjust WiFi drive strengths lower for EMI */
+ sd1_clk: sd1-clk {
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_cmd: sd1-cmd {
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_bus1: sd1-bus-width1 {
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_bus4: sd1-bus-width4 {
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_bus8: sd1-bus-width8 {
+ samsung,pin-drv = <2>;
+ };
+};
+
&pinctrl_2 {
pmic_dvs_2: pmic-dvs-2 {
samsung,pins = "gpj4-2";
diff --git a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
index ba686e40eac7..8b153166ebdb 100644
--- a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
@@ -201,6 +201,13 @@
samsung,pin-drv = <3>;
};
+ sd0_rclk: sd0-rclk {
+ samsung,pins = "gpc0-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <1>;
+ samsung,pin-drv = <3>;
+ };
+
sd1_cmd: sd1-cmd {
samsung,pins = "gpc1-1";
samsung,pin-function = <2>;
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index 8be3d7b489ff..9103f2381a6d 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -80,8 +80,11 @@
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <0 4>;
samsung,dw-mshc-ddr-timing = <0 2>;
+ samsung,dw-mshc-hs400-timing = <0 2>;
+ samsung,read-strobe-delay = <90>;
pinctrl-names = "default";
- pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_bus8>;
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8
+ &sd0_rclk>;
bus-width = <8>;
cap-mmc-highspeed;
};
@@ -93,7 +96,7 @@
samsung,dw-mshc-sdr-timing = <2 3>;
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
- pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus4>;
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus1 &sd2_bus4>;
bus-width = <4>;
cap-sd-highspeed;
};
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
index 5d31fc140823..2180a0152c9b 100644
--- a/arch/arm/boot/dts/exynos5420-trip-points.dtsi
+++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
@@ -28,7 +28,7 @@ trips {
type = "active";
};
cpu-crit-0 {
- temperature = <1200000>; /* millicelsius */
+ temperature = <120000>; /* millicelsius */
hysteresis = <0>; /* millicelsius */
type = "critical";
};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index b3d2d53820e3..45317538bbae 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -221,7 +221,7 @@
compatible = "samsung,exynos4210-mct";
reg = <0x101C0000 0x800>;
interrupt-controller;
- #interrups-cells = <1>;
+ #interrupt-cells = <1>;
interrupt-parent = <&mct_map>;
interrupts = <0>, <1>, <2>, <3>, <4>, <5>, <6>, <7>,
<8>, <9>, <10>, <11>;
@@ -251,6 +251,8 @@
compatible = "samsung,exynos4210-pd";
reg = <0x10044000 0x20>;
#power-domain-cells = <0>;
+ clocks = <&clock CLK_GSCL0>, <&clock CLK_GSCL1>;
+ clock-names = "asb0", "asb1";
};
isp_pd: power-domain@10044020 {
@@ -283,9 +285,11 @@
<&clock CLK_MOUT_SW_ACLK300>,
<&clock CLK_MOUT_USER_ACLK300_DISP1>,
<&clock CLK_MOUT_SW_ACLK400>,
- <&clock CLK_MOUT_USER_ACLK400_DISP1>;
+ <&clock CLK_MOUT_USER_ACLK400_DISP1>,
+ <&clock CLK_FIMD1>, <&clock CLK_MIXER>;
clock-names = "oscclk", "pclk0", "clk0",
- "pclk1", "clk1", "pclk2", "clk2";
+ "pclk1", "clk1", "pclk2", "clk2",
+ "asb0", "asb1";
};
pinctrl_0: pinctrl@13400000 {
@@ -532,6 +536,7 @@
clock-names = "dp";
phys = <&dp_phy>;
phy-names = "dp";
+ power-domains = <&disp_pd>;
};
mipi_phy: video-phy@10040714 {
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3.dts b/arch/arm/boot/dts/exynos5422-odroidxu3.dts
index a519c863248d..edc25cf1d717 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3.dts
@@ -264,6 +264,13 @@
};
};
+ emmc_pwrseq: pwrseq {
+ pinctrl-0 = <&emmc_nrst_pin>;
+ pinctrl-names = "default";
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpd1 0 1>;
+ };
+
i2c_2: i2c@12C80000 {
samsung,i2c-sda-delay = <100>;
samsung,i2c-max-bus-freq = <66000>;
@@ -298,13 +305,14 @@
&mmc_0 {
status = "okay";
+ mmc-pwrseq = <&emmc_pwrseq>;
broken-cd;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <0 4>;
samsung,dw-mshc-ddr-timing = <0 2>;
pinctrl-names = "default";
- pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_bus8>;
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8>;
bus-width = <8>;
cap-mmc-highspeed;
};
@@ -316,7 +324,7 @@
samsung,dw-mshc-sdr-timing = <0 4>;
samsung,dw-mshc-ddr-timing = <0 2>;
pinctrl-names = "default";
- pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus4>;
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus1 &sd2_bus4>;
bus-width = <4>;
cap-sd-highspeed;
};
@@ -330,6 +338,15 @@
};
};
+&pinctrl_1 {
+ emmc_nrst_pin: emmc-nrst {
+ samsung,pins = "gpd1-0";
+ samsung,pin-function = <0>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+};
+
&usbdrd_dwc3_0 {
dr_mode = "host";
};
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
index 48adfa8f4300..356e963edf11 100644
--- a/arch/arm/boot/dts/exynos5440-trip-points.dtsi
+++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
@@ -18,7 +18,7 @@ trips {
type = "active";
};
cpu-crit-0 {
- temperature = <1050000>; /* millicelsius */
+ temperature = <105000>; /* millicelsius */
hysteresis = <0>; /* millicelsius */
type = "critical";
};
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 06737c60d333..02eb8b15374f 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -42,6 +42,10 @@
pinctrl-names = "default";
};
+ chosen {
+ stdout-path = "serial3:115200n8";
+ };
+
fixed-rate-clocks {
oscclk {
compatible = "samsung,exynos5420-oscclk";
@@ -119,6 +123,13 @@
power-supply = <&tps65090_fet6>;
backlight = <&backlight>;
};
+
+ mmc1_pwrseq: mmc1_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpx0 0 GPIO_ACTIVE_LOW>; /* WIFI_EN */
+ clocks = <&max77802 MAX77802_CLK_32K_CP>;
+ clock-names = "ext_clock";
+ };
};
&adc {
@@ -581,6 +592,8 @@
interrupt-parent = <&gpx0>;
pinctrl-names = "default";
pinctrl-0 = <&max98091_irq>;
+ clocks = <&pmu_system_controller 0>;
+ clock-names = "mclk";
};
light-sensor@44 {
@@ -641,18 +654,41 @@
num-slots = <1>;
broken-cd;
mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
cap-mmc-highspeed;
non-removable;
card-detect-delay = <200>;
- clock-frequency = <400000000>;
+ clock-frequency = <800000000>;
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <0 4>;
samsung,dw-mshc-ddr-timing = <0 2>;
+ samsung,dw-mshc-hs400-timing = <0 2>;
+ samsung,read-strobe-delay = <90>;
pinctrl-names = "default";
- pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_bus8>;
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_rclk>;
bus-width = <8>;
};
+&mmc_1 {
+ status = "okay";
+ num-slots = <1>;
+ broken-cd;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ card-detect-delay = <200>;
+ clock-frequency = <400000000>;
+ samsung,dw-mshc-ciu-div = <1>;
+ samsung,dw-mshc-sdr-timing = <0 1>;
+ samsung,dw-mshc-ddr-timing = <0 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_int>, <&sd1_bus1>,
+ <&sd1_bus4>, <&sd1_bus8>, <&wifi_en>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ mmc-pwrseq = <&mmc1_pwrseq>;
+ vqmmc-supply = <&buck10_reg>;
+};
+
&mmc_2 {
status = "okay";
num-slots = <1>;
@@ -663,7 +699,7 @@
samsung,dw-mshc-sdr-timing = <2 3>;
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
- pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus4>;
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus1 &sd2_bus4>;
bus-width = <4>;
};
@@ -672,6 +708,13 @@
pinctrl-names = "default";
pinctrl-0 = <&mask_tpm_reset>;
+ wifi_en: wifi-en {
+ samsung,pins = "gpx0-0";
+ samsung,pin-function = <1>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
max98091_irq: max98091-irq {
samsung,pins = "gpx0-2";
samsung,pin-function = <0>;
@@ -759,6 +802,29 @@
};
};
+&pinctrl_1 {
+ /* Adjust WiFi drive strengths lower for EMI */
+ sd1_clk: sd1-clk {
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_cmd: sd1-cmd {
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_bus1: sd1-bus-width1 {
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_bus4: sd1-bus-width4 {
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_bus8: sd1-bus-width8 {
+ samsung,pin-drv = <2>;
+ };
+};
+
&pinctrl_2 {
pmic_dvs_2: pmic-dvs-2 {
samsung,pins = "gpj4-2";
diff --git a/arch/arm/boot/dts/hip04.dtsi b/arch/arm/boot/dts/hip04.dtsi
index 238814596a87..44044f275115 100644
--- a/arch/arm/boot/dts/hip04.dtsi
+++ b/arch/arm/boot/dts/hip04.dtsi
@@ -275,7 +275,6 @@
compatible = "arm,coresight-etb10", "arm,primecell";
reg = <0 0xe3c42000 0 0x1000>;
- coresight-default-sink;
clocks = <&clk_375m>;
clock-names = "apb_pclk";
port {
diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
index 7e6eef2488e8..82045398bf1f 100644
--- a/arch/arm/boot/dts/imx23-olinuxino.dts
+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
@@ -12,6 +12,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "imx23.dtsi"
/ {
@@ -93,6 +94,7 @@
ahb@80080000 {
usb0: usb@80080000 {
+ dr_mode = "host";
vbus-supply = <&reg_usb0_vbus>;
status = "okay";
};
@@ -122,7 +124,7 @@
user {
label = "green";
- gpios = <&gpio2 1 1>;
+ gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
};
};
};
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index 9c21b1583762..dd45e6971bc3 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -75,6 +75,27 @@
mux-int-port = <1>;
mux-ext-port = <4>;
};
+
+ wvga: display {
+ model = "CLAA057VC01CW";
+ bits-per-pixel = <16>;
+ fsl,pcr = <0xfa208b80>;
+ bus-width = <18>;
+ native-mode = <&wvga_timings>;
+ display-timings {
+ wvga_timings: 640x480 {
+ hactive = <640>;
+ vactive = <480>;
+ hback-porch = <45>;
+ hfront-porch = <114>;
+ hsync-len = <1>;
+ vback-porch = <33>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
+ clock-frequency = <25200000>;
+ };
+ };
+ };
};
&audmux {
@@ -190,6 +211,33 @@
>;
};
+ pinctrl_lcd: lcdgrp {
+ fsl,pins = <
+ MX25_PAD_LD0__LD0 0xe0
+ MX25_PAD_LD1__LD1 0xe0
+ MX25_PAD_LD2__LD2 0xe0
+ MX25_PAD_LD3__LD3 0xe0
+ MX25_PAD_LD4__LD4 0xe0
+ MX25_PAD_LD5__LD5 0xe0
+ MX25_PAD_LD6__LD6 0xe0
+ MX25_PAD_LD7__LD7 0xe0
+ MX25_PAD_LD8__LD8 0xe0
+ MX25_PAD_LD9__LD9 0xe0
+ MX25_PAD_LD10__LD10 0xe0
+ MX25_PAD_LD11__LD11 0xe0
+ MX25_PAD_LD12__LD12 0xe0
+ MX25_PAD_LD13__LD13 0xe0
+ MX25_PAD_LD14__LD14 0xe0
+ MX25_PAD_LD15__LD15 0xe0
+ MX25_PAD_GPIO_E__LD16 0xe0
+ MX25_PAD_GPIO_F__LD17 0xe0
+ MX25_PAD_HSYNC__HSYNC 0xe0
+ MX25_PAD_VSYNC__VSYNC 0xe0
+ MX25_PAD_LSCLK__LSCLK 0xe0
+ MX25_PAD_OE_ACD__OE_ACD 0xe0
+ MX25_PAD_CONTRAST__CONTRAST 0xe0
+ >;
+ };
pinctrl_uart1: uart1grp {
fsl,pins = <
@@ -202,6 +250,16 @@
};
};
+&lcdc {
+ display = <&wvga>;
+ fsl,lpccr = <0x00a903ff>;
+ fsl,lscr1 = <0x00120300>;
+ fsl,dmacr = <0x00020010>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd>;
+ status = "okay";
+};
+
&nfc {
nand-on-flash-bbt;
status = "okay";
diff --git a/arch/arm/boot/dts/imx25-pinfunc.h b/arch/arm/boot/dts/imx25-pinfunc.h
index 88eebb15da6a..7c4b9f2f9aad 100644
--- a/arch/arm/boot/dts/imx25-pinfunc.h
+++ b/arch/arm/boot/dts/imx25-pinfunc.h
@@ -17,48 +17,69 @@
* <mux_reg conf_reg input_reg mux_mode input_val>
*/
+#define MX25_PAD_TDO__TDO 0x000 0x3e8 0x000 0x00 0x000
+
#define MX25_PAD_A10__A10 0x008 0x000 0x000 0x00 0x000
#define MX25_PAD_A10__GPIO_4_0 0x008 0x000 0x000 0x05 0x000
#define MX25_PAD_A13__A13 0x00c 0x22C 0x000 0x00 0x000
#define MX25_PAD_A13__GPIO_4_1 0x00c 0x22C 0x000 0x05 0x000
+#define MX25_PAD_A13__LCDC_CLS 0x00c 0x22C 0x000 0x07 0x000
#define MX25_PAD_A14__A14 0x010 0x230 0x000 0x10 0x000
#define MX25_PAD_A14__GPIO_2_0 0x010 0x230 0x000 0x15 0x000
+#define MX25_PAD_A14__SIM1_CLK1 0x010 0x230 0x000 0x16 0x000
+#define MX25_PAD_A14__LCDC_SPL 0x010 0x230 0x000 0x17 0x000
#define MX25_PAD_A15__A15 0x014 0x234 0x000 0x10 0x000
#define MX25_PAD_A15__GPIO_2_1 0x014 0x234 0x000 0x15 0x000
+#define MX25_PAD_A15__SIM1_RST1 0x014 0x234 0x000 0x16 0x000
+#define MX25_PAD_A15__LCDC_PS 0x014 0x234 0x000 0x17 0x000
#define MX25_PAD_A16__A16 0x018 0x000 0x000 0x10 0x000
#define MX25_PAD_A16__GPIO_2_2 0x018 0x000 0x000 0x15 0x000
+#define MX25_PAD_A16__SIM1_VEN1 0x018 0x000 0x000 0x16 0x000
+#define MX25_PAD_A16__LCDC_REV 0x018 0x000 0x000 0x17 0x000
#define MX25_PAD_A17__A17 0x01c 0x238 0x000 0x10 0x000
#define MX25_PAD_A17__GPIO_2_3 0x01c 0x238 0x000 0x15 0x000
+#define MX25_PAD_A17__SIM1_TX 0x01c 0x238 0x554 0x16 0x000
+#define MX25_PAD_A17__FEC_TX_ERR 0x01c 0x238 0x000 0x17 0x000
#define MX25_PAD_A18__A18 0x020 0x23c 0x000 0x10 0x000
#define MX25_PAD_A18__GPIO_2_4 0x020 0x23c 0x000 0x15 0x000
+#define MX25_PAD_A18__SIM1_PD1 0x020 0x23c 0x550 0x16 0x000
#define MX25_PAD_A18__FEC_COL 0x020 0x23c 0x504 0x17 0x000
#define MX25_PAD_A19__A19 0x024 0x240 0x000 0x10 0x000
-#define MX25_PAD_A19__FEC_RX_ER 0x024 0x240 0x518 0x17 0x000
#define MX25_PAD_A19__GPIO_2_5 0x024 0x240 0x000 0x15 0x000
+#define MX25_PAD_A19__SIM1_RX1 0x024 0x240 0x54c 0x16 0x000
+#define MX25_PAD_A19__FEC_RX_ERR 0x024 0x240 0x518 0x17 0x000
#define MX25_PAD_A20__A20 0x028 0x244 0x000 0x10 0x000
#define MX25_PAD_A20__GPIO_2_6 0x028 0x244 0x000 0x15 0x000
+#define MX25_PAD_A20__SIM2_CLK1 0x028 0x244 0x000 0x16 0x000
#define MX25_PAD_A20__FEC_RDATA2 0x028 0x244 0x50c 0x17 0x000
#define MX25_PAD_A21__A21 0x02c 0x248 0x000 0x10 0x000
#define MX25_PAD_A21__GPIO_2_7 0x02c 0x248 0x000 0x15 0x000
+#define MX25_PAD_A21__SIM2_RST1 0x02c 0x248 0x000 0x16 0x000
#define MX25_PAD_A21__FEC_RDATA3 0x02c 0x248 0x510 0x17 0x000
#define MX25_PAD_A22__A22 0x030 0x000 0x000 0x10 0x000
#define MX25_PAD_A22__GPIO_2_8 0x030 0x000 0x000 0x15 0x000
+#define MX25_PAD_A22__FEC_TDATA2 0x030 0x000 0x000 0x17 0x000
+#define MX25_PAD_A22__SIM2_VEN1 0x030 0x000 0x000 0x16 0x000
+#define MX25_PAD_A22__FEC_TDATA2 0x030 0x000 0x000 0x17 0x000
#define MX25_PAD_A23__A23 0x034 0x24c 0x000 0x10 0x000
#define MX25_PAD_A23__GPIO_2_9 0x034 0x24c 0x000 0x15 0x000
+#define MX25_PAD_A23__SIM2_TX1 0x034 0x24c 0x560 0x16 0x000
+#define MX25_PAD_A23__FEC_TDATA3 0x034 0x24c 0x000 0x17 0x000
#define MX25_PAD_A24__A24 0x038 0x250 0x000 0x10 0x000
#define MX25_PAD_A24__GPIO_2_10 0x038 0x250 0x000 0x15 0x000
+#define MX25_PAD_A24__SIM2_PD1 0x038 0x250 0x55c 0x16 0x000
#define MX25_PAD_A24__FEC_RX_CLK 0x038 0x250 0x514 0x17 0x000
#define MX25_PAD_A25__A25 0x03c 0x254 0x000 0x10 0x000
@@ -133,20 +154,25 @@
#define MX25_PAD_D15__D15 0x088 0x280 0x000 0x00 0x000
#define MX25_PAD_D15__LD16 0x088 0x280 0x000 0x01 0x000
#define MX25_PAD_D15__GPIO_4_5 0x088 0x280 0x000 0x05 0x000
+#define MX25_PAD_D15__SDHC1_DAT7 0x088 0x280 0x4d8 0x06 0x000
#define MX25_PAD_D14__D14 0x08c 0x284 0x000 0x00 0x000
#define MX25_PAD_D14__LD17 0x08c 0x284 0x000 0x01 0x000
#define MX25_PAD_D14__GPIO_4_6 0x08c 0x284 0x000 0x05 0x000
+#define MX25_PAD_D14__SDHC1_DAT6 0x08c 0x284 0x4d4 0x06 0x000
#define MX25_PAD_D13__D13 0x090 0x288 0x000 0x00 0x000
#define MX25_PAD_D13__LD18 0x090 0x288 0x000 0x01 0x000
#define MX25_PAD_D13__GPIO_4_7 0x090 0x288 0x000 0x05 0x000
+#define MX25_PAD_D13__SDHC1_DAT5 0x090 0x288 0x4d0 0x06 0x000
#define MX25_PAD_D12__D12 0x094 0x28c 0x000 0x00 0x000
#define MX25_PAD_D12__GPIO_4_8 0x094 0x28c 0x000 0x05 0x000
+#define MX25_PAD_D12__SDHC1_DAT4 0x094 0x28c 0x4cc 0x06 0x000
#define MX25_PAD_D11__D11 0x098 0x290 0x000 0x00 0x000
#define MX25_PAD_D11__GPIO_4_9 0x098 0x290 0x000 0x05 0x000
+#define MX25_PAD_D11__USBOTG_PWR 0x098 0x290 0x000 0x06 0x000
#define MX25_PAD_D10__D10 0x09c 0x294 0x000 0x00 0x000
#define MX25_PAD_D10__GPIO_4_10 0x09c 0x294 0x000 0x05 0x000
@@ -212,26 +238,33 @@
#define MX25_PAD_LD8__LD8 0x0e8 0x2e0 0x000 0x10 0x000
#define MX25_PAD_LD8__FEC_TX_ERR 0x0e8 0x2e0 0x000 0x15 0x000
+#define MX25_PAD_LD8__SDHC2_CMD 0x0e8 0x2e0 0x4e0 0x06 0x000
#define MX25_PAD_LD9__LD9 0x0ec 0x2e4 0x000 0x10 0x000
#define MX25_PAD_LD9__FEC_COL 0x0ec 0x2e4 0x504 0x15 0x001
+#define MX25_PAD_LD9__SDHC2_CLK 0x0ec 0x2e4 0x4dc 0x06 0x000
#define MX25_PAD_LD10__LD10 0x0f0 0x2e8 0x000 0x10 0x000
-#define MX25_PAD_LD10__FEC_RX_ER 0x0f0 0x2e8 0x518 0x15 0x001
+#define MX25_PAD_LD10__FEC_RX_ERR 0x0f0 0x2e8 0x518 0x15 0x001
#define MX25_PAD_LD11__LD11 0x0f4 0x2ec 0x000 0x10 0x000
#define MX25_PAD_LD11__FEC_RDATA2 0x0f4 0x2ec 0x50c 0x15 0x001
+#define MX25_PAD_LD11__SDHC2_DAT1 0x0f4 0x2ec 0x4e8 0x06 0x000
#define MX25_PAD_LD12__LD12 0x0f8 0x2f0 0x000 0x10 0x000
+#define MX25_PAD_LD12__CSPI2_MOSI 0x0f8 0x2f0 0x4a0 0x02 0x000
#define MX25_PAD_LD12__FEC_RDATA3 0x0f8 0x2f0 0x510 0x15 0x001
#define MX25_PAD_LD13__LD13 0x0fc 0x2f4 0x000 0x10 0x000
+#define MX25_PAD_LD13__CSPI2_MISO 0x0fc 0x2f4 0x49c 0x02 0x000
#define MX25_PAD_LD13__FEC_TDATA2 0x0fc 0x2f4 0x000 0x15 0x000
#define MX25_PAD_LD14__LD14 0x100 0x2f8 0x000 0x10 0x000
+#define MX25_PAD_LD14__CSPI2_SCLK 0x100 0x2f8 0x494 0x02 0x000
#define MX25_PAD_LD14__FEC_TDATA3 0x100 0x2f8 0x000 0x15 0x000
#define MX25_PAD_LD15__LD15 0x104 0x2fc 0x000 0x10 0x000
+#define MX25_PAD_LD15__CSPI2_RDY 0x104 0x2fc 0x498 0x02 0x000
#define MX25_PAD_LD15__FEC_RX_CLK 0x104 0x2fc 0x514 0x15 0x001
#define MX25_PAD_HSYNC__HSYNC 0x108 0x300 0x000 0x10 0x000
@@ -244,6 +277,7 @@
#define MX25_PAD_LSCLK__GPIO_1_24 0x110 0x308 0x000 0x15 0x000
#define MX25_PAD_OE_ACD__OE_ACD 0x114 0x30c 0x000 0x10 0x000
+#define MX25_PAD_OE_ACD__CSPI2_SS0 0x114 0x30c 0x4a4 0x02 0x000
#define MX25_PAD_OE_ACD__GPIO_1_25 0x114 0x30c 0x000 0x15 0x000
#define MX25_PAD_CONTRAST__CONTRAST 0x118 0x310 0x000 0x10 0x000
@@ -257,26 +291,31 @@
#define MX25_PAD_CSI_D2__CSI_D2 0x120 0x318 0x000 0x10 0x000
#define MX25_PAD_CSI_D2__UART5_RXD_MUX 0x120 0x318 0x578 0x11 0x001
+#define MX25_PAD_CSI_D2__SIM1_CLK0 0x120 0x318 0x000 0x04 0x000
#define MX25_PAD_CSI_D2__GPIO_1_27 0x120 0x318 0x000 0x15 0x000
#define MX25_PAD_CSI_D2__CSPI3_MOSI 0x120 0x318 0x000 0x17 0x000
#define MX25_PAD_CSI_D3__CSI_D3 0x124 0x31c 0x000 0x10 0x000
#define MX25_PAD_CSI_D3__UART5_TXD_MUX 0x124 0x31c 0x000 0x11 0x000
+#define MX25_PAD_CSI_D3__SIM1_RST0 0x124 0x31c 0x000 0x04 0x000
#define MX25_PAD_CSI_D3__GPIO_1_28 0x124 0x31c 0x000 0x15 0x000
#define MX25_PAD_CSI_D3__CSPI3_MISO 0x124 0x31c 0x4b4 0x17 0x001
#define MX25_PAD_CSI_D4__CSI_D4 0x128 0x320 0x000 0x10 0x000
#define MX25_PAD_CSI_D4__UART5_RTS 0x128 0x320 0x574 0x11 0x001
+#define MX25_PAD_CSI_D4__SIM1_VEN0 0x128 0x320 0x000 0x04 0x000
#define MX25_PAD_CSI_D4__GPIO_1_29 0x128 0x320 0x000 0x15 0x000
#define MX25_PAD_CSI_D4__CSPI3_SCLK 0x128 0x320 0x000 0x17 0x000
#define MX25_PAD_CSI_D5__CSI_D5 0x12c 0x324 0x000 0x10 0x000
-#define MX25_PAD_CSI_D5__UART5_CTS 0x12c 0x324 0x000 0x11 0x001
+#define MX25_PAD_CSI_D5__UART5_CTS 0x12c 0x324 0x000 0x11 0x000
+#define MX25_PAD_CSI_D5__SIM1_TX0 0x12c 0x324 0x000 0x04 0x000
#define MX25_PAD_CSI_D5__GPIO_1_30 0x12c 0x324 0x000 0x15 0x000
#define MX25_PAD_CSI_D5__CSPI3_RDY 0x12c 0x324 0x000 0x17 0x000
#define MX25_PAD_CSI_D6__CSI_D6 0x130 0x328 0x000 0x10 0x000
#define MX25_PAD_CSI_D6__SDHC2_CMD 0x130 0x328 0x4e0 0x12 0x001
+#define MX25_PAD_CSI_D6__SIM1_PD0 0x130 0x328 0x000 0x04 0x000
#define MX25_PAD_CSI_D6__GPIO_1_31 0x130 0x328 0x000 0x15 0x000
#define MX25_PAD_CSI_D7__CSI_D7 0x134 0x32c 0x000 0x10 0x000
@@ -284,32 +323,32 @@
#define MX25_PAD_CSI_D7__GPIO_1_6 0x134 0x32c 0x000 0x15 0x000
#define MX25_PAD_CSI_D8__CSI_D8 0x138 0x330 0x000 0x10 0x000
-#define MX25_PAD_CSI_D8__AUD6_RXC 0x138 0x330 0x000 0x12 0x001
+#define MX25_PAD_CSI_D8__AUD6_RXC 0x138 0x330 0x000 0x12 0x000
#define MX25_PAD_CSI_D8__GPIO_1_7 0x138 0x330 0x000 0x15 0x000
#define MX25_PAD_CSI_D8__CSPI3_SS2 0x138 0x330 0x4c4 0x17 0x000
#define MX25_PAD_CSI_D9__CSI_D9 0x13c 0x334 0x000 0x10 0x000
-#define MX25_PAD_CSI_D9__AUD6_RXFS 0x13c 0x334 0x000 0x12 0x001
+#define MX25_PAD_CSI_D9__AUD6_RXFS 0x13c 0x334 0x000 0x12 0x000
#define MX25_PAD_CSI_D9__GPIO_4_21 0x13c 0x334 0x000 0x15 0x000
#define MX25_PAD_CSI_D9__CSPI3_SS3 0x13c 0x334 0x4c8 0x17 0x000
#define MX25_PAD_CSI_MCLK__CSI_MCLK 0x140 0x338 0x000 0x10 0x000
-#define MX25_PAD_CSI_MCLK__AUD6_TXD 0x140 0x338 0x000 0x11 0x001
+#define MX25_PAD_CSI_MCLK__AUD6_TXD 0x140 0x338 0x000 0x11 0x000
#define MX25_PAD_CSI_MCLK__SDHC2_DAT0 0x140 0x338 0x4e4 0x12 0x001
#define MX25_PAD_CSI_MCLK__GPIO_1_8 0x140 0x338 0x000 0x15 0x000
#define MX25_PAD_CSI_VSYNC__CSI_VSYNC 0x144 0x33c 0x000 0x10 0x000
-#define MX25_PAD_CSI_VSYNC__AUD6_RXD 0x144 0x33c 0x000 0x11 0x001
+#define MX25_PAD_CSI_VSYNC__AUD6_RXD 0x144 0x33c 0x000 0x11 0x000
#define MX25_PAD_CSI_VSYNC__SDHC2_DAT1 0x144 0x33c 0x4e8 0x12 0x001
#define MX25_PAD_CSI_VSYNC__GPIO_1_9 0x144 0x33c 0x000 0x15 0x000
#define MX25_PAD_CSI_HSYNC__CSI_HSYNC 0x148 0x340 0x000 0x10 0x000
-#define MX25_PAD_CSI_HSYNC__AUD6_TXC 0x148 0x340 0x000 0x11 0x001
+#define MX25_PAD_CSI_HSYNC__AUD6_TXC 0x148 0x340 0x000 0x11 0x000
#define MX25_PAD_CSI_HSYNC__SDHC2_DAT2 0x148 0x340 0x4ec 0x12 0x001
#define MX25_PAD_CSI_HSYNC__GPIO_1_10 0x148 0x340 0x000 0x15 0x000
#define MX25_PAD_CSI_PIXCLK__CSI_PIXCLK 0x14c 0x344 0x000 0x10 0x000
-#define MX25_PAD_CSI_PIXCLK__AUD6_TXFS 0x14c 0x344 0x000 0x11 0x001
+#define MX25_PAD_CSI_PIXCLK__AUD6_TXFS 0x14c 0x344 0x000 0x11 0x000
#define MX25_PAD_CSI_PIXCLK__SDHC2_DAT3 0x14c 0x344 0x4f0 0x12 0x001
#define MX25_PAD_CSI_PIXCLK__GPIO_1_11 0x14c 0x344 0x000 0x15 0x000
@@ -369,8 +408,8 @@
#define MX25_PAD_UART2_RTS__CC1 0x188 0x380 0x000 0x13 0x000
#define MX25_PAD_UART2_RTS__GPIO_4_28 0x188 0x380 0x000 0x15 0x000
-#define MX25_PAD_UART2_CTS__FEC_RX_ER 0x18c 0x384 0x518 0x12 0x002
#define MX25_PAD_UART2_CTS__UART2_CTS 0x18c 0x384 0x000 0x10 0x000
+#define MX25_PAD_UART2_CTS__FEC_RX_ERR 0x18c 0x384 0x518 0x12 0x002
#define MX25_PAD_UART2_CTS__GPIO_4_29 0x18c 0x384 0x000 0x15 0x000
#define MX25_PAD_SD1_CMD__SD1_CMD 0x190 0x388 0x000 0x10 0x000
@@ -392,11 +431,11 @@
#define MX25_PAD_SD1_DATA1__GPIO_2_26 0x19c 0x394 0x000 0x15 0x000
#define MX25_PAD_SD1_DATA2__SD1_DATA2 0x1a0 0x398 0x000 0x10 0x000
-#define MX25_PAD_SD1_DATA2__FEC_RX_CLK 0x1a0 0x398 0x514 0x15 0x002
+#define MX25_PAD_SD1_DATA2__FEC_RX_CLK 0x1a0 0x398 0x514 0x12 0x002
#define MX25_PAD_SD1_DATA2__GPIO_2_27 0x1a0 0x398 0x000 0x15 0x000
#define MX25_PAD_SD1_DATA3__SD1_DATA3 0x1a4 0x39c 0x000 0x10 0x000
-#define MX25_PAD_SD1_DATA3__FEC_CRS 0x1a4 0x39c 0x508 0x10 0x002
+#define MX25_PAD_SD1_DATA3__FEC_CRS 0x1a4 0x39c 0x508 0x12 0x002
#define MX25_PAD_SD1_DATA3__GPIO_2_28 0x1a4 0x39c 0x000 0x15 0x000
#define MX25_PAD_KPP_ROW0__KPP_ROW0 0x1a8 0x3a0 0x000 0x10 0x000
@@ -410,7 +449,7 @@
#define MX25_PAD_KPP_ROW2__GPIO_2_31 0x1b0 0x3a8 0x000 0x15 0x000
#define MX25_PAD_KPP_ROW3__KPP_ROW3 0x1b4 0x3ac 0x000 0x10 0x000
-#define MX25_PAD_KPP_ROW3__CSI_LD1 0x1b4 0x3ac 0x48c 0x13 0x002
+#define MX25_PAD_KPP_ROW3__CSI_D1 0x1b4 0x3ac 0x48c 0x13 0x002
#define MX25_PAD_KPP_ROW3__GPIO_3_0 0x1b4 0x3ac 0x000 0x15 0x000
#define MX25_PAD_KPP_COL0__KPP_COL0 0x1b8 0x3b0 0x000 0x10 0x000
@@ -455,9 +494,18 @@
#define MX25_PAD_FEC_RDATA0__GPIO_3_10 0x1dc 0x3d4 0x000 0x15 0x000
#define MX25_PAD_FEC_RDATA1__FEC_RDATA1 0x1e0 0x3d8 0x000 0x10 0x000
+/*
+ * According to the i.MX25 Reference manual (IMX25RM, Rev. 2,
+ * 01/2011) this is CAN1_TX but that's wrong.
+ */
+#define MX25_PAD_FEC_RDATA1__CAN2_TX 0x1e0 0x3d8 0x000 0x14 0x000
#define MX25_PAD_FEC_RDATA1__GPIO_3_11 0x1e0 0x3d8 0x000 0x15 0x000
#define MX25_PAD_FEC_RX_DV__FEC_RX_DV 0x1e4 0x3dc 0x000 0x10 0x000
+/*
+ * According to the i.MX25 Reference manual (IMX25RM, Rev. 2,
+ * 01/2011) this is CAN1_RX but that's wrong.
+ */
#define MX25_PAD_FEC_RX_DV__CAN2_RX 0x1e4 0x3dc 0x484 0x14 0x000
#define MX25_PAD_FEC_RX_DV__GPIO_3_12 0x1e4 0x3dc 0x000 0x15 0x000
@@ -471,30 +519,34 @@
#define MX25_PAD_DE_B__DE_B 0x1f0 0x3ec 0x000 0x10 0x000
#define MX25_PAD_DE_B__GPIO_2_20 0x1f0 0x3ec 0x000 0x15 0x000
-#define MX25_PAD_TDO__TDO 0x000 0x3e8 0x000 0x00 0x000
-
#define MX25_PAD_GPIO_A__GPIO_A 0x1f4 0x3f0 0x000 0x10 0x000
#define MX25_PAD_GPIO_A__CAN1_TX 0x1f4 0x3f0 0x000 0x16 0x000
#define MX25_PAD_GPIO_A__USBOTG_PWR 0x1f4 0x3f0 0x000 0x12 0x000
#define MX25_PAD_GPIO_B__GPIO_B 0x1f8 0x3f4 0x000 0x10 0x000
-#define MX25_PAD_GPIO_B__CAN1_RX 0x1f8 0x3f4 0x480 0x16 0x001
#define MX25_PAD_GPIO_B__USBOTG_OC 0x1f8 0x3f4 0x57c 0x12 0x001
+#define MX25_PAD_GPIO_B__CAN1_RX 0x1f8 0x3f4 0x480 0x16 0x001
#define MX25_PAD_GPIO_C__GPIO_C 0x1fc 0x3f8 0x000 0x10 0x000
+#define MX25_PAD_GPIO_C__PWM4_PWMO 0x1fc 0x3f8 0x000 0x11 0x000
+#define MX25_PAD_GPIO_C__I2C2_SCL 0x1fc 0x3f8 0x51c 0x12 0x001
+#define MX25_PAD_GPIO_C__KPP_COL4 0x1fc 0x3f8 0x52c 0x13 0x001
#define MX25_PAD_GPIO_C__CAN2_TX 0x1fc 0x3f8 0x000 0x16 0x000
#define MX25_PAD_GPIO_D__GPIO_D 0x200 0x3fc 0x000 0x10 0x000
+#define MX25_PAD_GPIO_D__I2C2_SDA 0x200 0x3fc 0x520 0x12 0x001
#define MX25_PAD_GPIO_D__CAN2_RX 0x200 0x3fc 0x484 0x16 0x001
#define MX25_PAD_GPIO_E__GPIO_E 0x204 0x400 0x000 0x10 0x000
#define MX25_PAD_GPIO_E__I2C3_CLK 0x204 0x400 0x524 0x11 0x002
#define MX25_PAD_GPIO_E__LD16 0x204 0x400 0x000 0x12 0x000
#define MX25_PAD_GPIO_E__AUD7_TXD 0x204 0x400 0x000 0x14 0x000
+#define MX25_PAD_GPIO_E__UART4_RXD 0x204 0x400 0x570 0x16 0x002
#define MX25_PAD_GPIO_F__GPIO_F 0x208 0x404 0x000 0x10 0x000
#define MX25_PAD_GPIO_F__LD17 0x208 0x404 0x000 0x12 0x000
#define MX25_PAD_GPIO_F__AUD7_TXC 0x208 0x404 0x000 0x14 0x000
+#define MX25_PAD_GPIO_F__UART4_TXD 0x208 0x404 0x000 0x16 0x000
#define MX25_PAD_EXT_ARMCLK__EXT_ARMCLK 0x20c 0x000 0x000 0x10 0x000
#define MX25_PAD_EXT_ARMCLK__GPIO_3_15 0x20c 0x000 0x000 0x15 0x000
@@ -505,6 +557,7 @@
#define MX25_PAD_VSTBY_REQ__VSTBY_REQ 0x214 0x408 0x000 0x10 0x000
#define MX25_PAD_VSTBY_REQ__AUD7_TXFS 0x214 0x408 0x000 0x14 0x000
#define MX25_PAD_VSTBY_REQ__GPIO_3_17 0x214 0x408 0x000 0x15 0x000
+
#define MX25_PAD_VSTBY_ACK__VSTBY_ACK 0x218 0x40c 0x000 0x10 0x000
#define MX25_PAD_VSTBY_ACK__GPIO_3_18 0x218 0x40c 0x000 0x15 0x000
@@ -517,6 +570,7 @@
#define MX25_PAD_BOOT_MODE0__BOOT_MODE0 0x224 0x000 0x000 0x00 0x000
#define MX25_PAD_BOOT_MODE0__GPIO_4_30 0x224 0x000 0x000 0x05 0x000
+
#define MX25_PAD_BOOT_MODE1__BOOT_MODE1 0x228 0x000 0x000 0x00 0x000
#define MX25_PAD_BOOT_MODE1__GPIO_4_31 0x228 0x000 0x000 0x05 0x000
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index e4d3aecc4ed2..677f81d9dcd5 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -428,6 +428,7 @@
pwm4: pwm@53fc8000 {
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ #pwm-cells = <2>;
reg = <0x53fc8000 0x4000>;
clocks = <&clks 108>, <&clks 52>;
clock-names = "ipg", "per";
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 4b063b68db44..6951b66d1ab7 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -488,6 +488,7 @@
interrupts = <54>;
clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
fsl,usbmisc = <&usbmisc 1>;
+ dr_mode = "host";
status = "disabled";
};
@@ -497,6 +498,7 @@
interrupts = <55>;
clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
fsl,usbmisc = <&usbmisc 2>;
+ dr_mode = "host";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx28-apf28.dts b/arch/arm/boot/dts/imx28-apf28.dts
index 7198fe3798c6..070e59cbdd8b 100644
--- a/arch/arm/boot/dts/imx28-apf28.dts
+++ b/arch/arm/boot/dts/imx28-apf28.dts
@@ -78,7 +78,7 @@
phy-mode = "rmii";
pinctrl-names = "default";
pinctrl-0 = <&mac0_pins_a>;
- phy-reset-gpios = <&gpio4 13 0>;
+ phy-reset-gpios = <&gpio4 13 GPIO_ACTIVE_LOW>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/imx28-apf28dev.dts b/arch/arm/boot/dts/imx28-apf28dev.dts
index 1f38a052ad4b..7ac4f1af16ac 100644
--- a/arch/arm/boot/dts/imx28-apf28dev.dts
+++ b/arch/arm/boot/dts/imx28-apf28dev.dts
@@ -110,6 +110,13 @@
};
};
};
+
+ can0: can@80032000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0_pins_a>;
+ xceiver-supply = <&reg_can0_vcc>;
+ status = "okay";
+ };
};
apbx@80040000 {
@@ -130,6 +137,13 @@
status = "okay";
};
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_pins_a>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+ };
+
usbphy0: usbphy@8007c000 {
status = "okay";
};
@@ -143,7 +157,8 @@
ahb@80080000 {
usb0: usb@80080000 {
pinctrl-names = "default";
- pinctrl-0 = <&usb0_otg_apf28dev>;
+ pinctrl-0 = <&usb0_otg_apf28dev
+ &usb0_id_pins_b>;
vbus-supply = <&reg_usb0_vbus>;
status = "okay";
};
@@ -156,7 +171,7 @@
phy-mode = "rmii";
pinctrl-names = "default";
pinctrl-0 = <&mac1_pins_a>;
- phy-reset-gpios = <&gpio0 23 0>;
+ phy-reset-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>;
status = "okay";
};
};
@@ -175,6 +190,14 @@
gpio = <&gpio1 23 1>;
enable-active-high;
};
+
+ reg_can0_vcc: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "can0_vcc";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
};
leds {
@@ -200,8 +223,9 @@
user-button {
label = "User button";
- gpios = <&gpio0 17 0>;
+ gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
linux,code = <0x100>;
+ gpio-key,wakeup;
};
};
};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 47f68ac868d4..4e073e854742 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -829,6 +829,19 @@
fsl,pull-up = <MXS_PULL_DISABLE>;
};
+ spi3_pins_b: spi3@1 {
+ reg = <1>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP3_SCK__SSP3_SCK
+ MX28_PAD_SSP3_MOSI__SSP3_CMD
+ MX28_PAD_SSP3_MISO__SSP3_D0
+ MX28_PAD_SSP3_SS0__SSP3_D3
+ >;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
+ };
+
usb0_pins_a: usb0@0 {
reg = <0>;
fsl,pinmux-ids = <
@@ -900,7 +913,7 @@
80 81 68 69
70 71 72 73
74 75 76 77>;
- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
"saif0", "saif1", "i2c0", "i2c1",
"auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
"auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
@@ -1197,6 +1210,7 @@
interrupts = <92>;
clocks = <&clks 61>;
fsl,usbphy = <&usbphy1>;
+ dr_mode = "host";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 6932928f3b45..b6478e97d6a7 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -318,6 +318,7 @@
clocks = <&clks 73>;
fsl,usbmisc = <&usbmisc 1>;
fsl,usbphy = <&usbphy1>;
+ dr_mode = "host";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index 620b0f030591..e2457138311f 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -197,6 +197,7 @@
reg = <0x53f80200 0x0200>;
interrupts = <14>;
clocks = <&clks IMX5_CLK_USB_PHY2_GATE>;
+ dr_mode = "host";
status = "disabled";
};
@@ -205,6 +206,7 @@
reg = <0x53f80400 0x0200>;
interrupts = <16>;
clocks = <&clks IMX5_CLK_USBOH3_GATE>;
+ dr_mode = "host";
status = "disabled";
};
@@ -213,6 +215,7 @@
reg = <0x53f80600 0x0200>;
interrupts = <17>;
clocks = <&clks IMX5_CLK_USBOH3_GATE>;
+ dr_mode = "host";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index c0116cffc513..f46fe9bf0bcb 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -265,6 +265,7 @@
interrupts = <14>;
clocks = <&clks IMX5_CLK_USBOH3_GATE>;
fsl,usbmisc = <&usbmisc 1>;
+ dr_mode = "host";
status = "disabled";
};
@@ -274,6 +275,7 @@
interrupts = <16>;
clocks = <&clks IMX5_CLK_USBOH3_GATE>;
fsl,usbmisc = <&usbmisc 2>;
+ dr_mode = "host";
status = "disabled";
};
@@ -283,6 +285,7 @@
interrupts = <17>;
clocks = <&clks IMX5_CLK_USBOH3_GATE>;
fsl,usbmisc = <&usbmisc 3>;
+ dr_mode = "host";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index ff4fa7ecacd8..c3e3ca9362fb 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -309,6 +309,7 @@
clocks = <&clks IMX5_CLK_USBOH3_GATE>;
fsl,usbmisc = <&usbmisc 1>;
fsl,usbphy = <&usbphy1>;
+ dr_mode = "host";
status = "disabled";
};
@@ -318,6 +319,7 @@
interrupts = <16>;
clocks = <&clks IMX5_CLK_USBOH3_GATE>;
fsl,usbmisc = <&usbmisc 2>;
+ dr_mode = "host";
status = "disabled";
};
@@ -327,6 +329,7 @@
interrupts = <17>;
clocks = <&clks IMX5_CLK_USBOH3_GATE>;
fsl,usbmisc = <&usbmisc 3>;
+ dr_mode = "host";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6dl-aristainetos_4.dts b/arch/arm/boot/dts/imx6dl-aristainetos_4.dts
index 9cd06e5e59f0..d4c4a22db488 100644
--- a/arch/arm/boot/dts/imx6dl-aristainetos_4.dts
+++ b/arch/arm/boot/dts/imx6dl-aristainetos_4.dts
@@ -83,3 +83,7 @@
&ipu1_di0_disp0 {
remote-endpoint = <&display0_in>;
};
+
+&pwm1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6dl-aristainetos_7.dts b/arch/arm/boot/dts/imx6dl-aristainetos_7.dts
index b413e24288dc..15203f0e9725 100644
--- a/arch/arm/boot/dts/imx6dl-aristainetos_7.dts
+++ b/arch/arm/boot/dts/imx6dl-aristainetos_7.dts
@@ -72,3 +72,7 @@
&ipu1_di0_disp0 {
remote-endpoint = <&display0_in>;
};
+
+&pwm3 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6dl-cubox-i.dts b/arch/arm/boot/dts/imx6dl-cubox-i.dts
index 58aa8f2b0f26..e0b7fe8e18f8 100644
--- a/arch/arm/boot/dts/imx6dl-cubox-i.dts
+++ b/arch/arm/boot/dts/imx6dl-cubox-i.dts
@@ -1,5 +1,43 @@
/*
* Copyright (C) 2014 Russell King
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts
index 44a0e6736bb1..7369d2d7da3e 100644
--- a/arch/arm/boot/dts/imx6dl-hummingboard.dts
+++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts
@@ -1,6 +1,44 @@
/*
* Copyright (C) 2014 Rabeeh Khoury (rabeeh@solid-run.com)
* Based on dt work by Russell King
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6q-cubox-i.dts b/arch/arm/boot/dts/imx6q-cubox-i.dts
index 9efd8b0c8011..670bd8c4c847 100644
--- a/arch/arm/boot/dts/imx6q-cubox-i.dts
+++ b/arch/arm/boot/dts/imx6q-cubox-i.dts
@@ -1,5 +1,43 @@
/*
* Copyright (C) 2014 Russell King
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6q-hummingboard.dts b/arch/arm/boot/dts/imx6q-hummingboard.dts
index c2bf8476ce45..0f6044553a24 100644
--- a/arch/arm/boot/dts/imx6q-hummingboard.dts
+++ b/arch/arm/boot/dts/imx6q-hummingboard.dts
@@ -1,6 +1,44 @@
/*
* Copyright (C) 2014 Rabeeh Khoury (rabeeh@solid-run.com)
* Based on dt work by Russell King
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 93ec79bb6b35..399103b8e2c9 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -294,19 +294,21 @@
};
&mipi_dsi {
- port@2 {
- reg = <2>;
+ ports {
+ port@2 {
+ reg = <2>;
- mipi_mux_2: endpoint {
- remote-endpoint = <&ipu2_di0_mipi>;
+ mipi_mux_2: endpoint {
+ remote-endpoint = <&ipu2_di0_mipi>;
+ };
};
- };
- port@3 {
- reg = <3>;
+ port@3 {
+ reg = <3>;
- mipi_mux_3: endpoint {
- remote-endpoint = <&ipu2_di1_mipi>;
+ mipi_mux_3: endpoint {
+ remote-endpoint = <&ipu2_di1_mipi>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 6a524ca011e7..d033bb182060 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -1,8 +1,48 @@
/*
* Copyright (C) 2014 Russell King
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
#include "imx6qdl-microsom.dtsi"
#include "imx6qdl-microsom-ar8035.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
/ {
ir_recv: ir-receiver {
@@ -66,6 +106,18 @@
spdif-controller = <&spdif>;
spdif-out;
};
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&pinctrl_gpio_key>;
+ pinctrl-names = "default";
+
+ button_0 {
+ label = "Button 0";
+ gpios = <&gpio3 8 GPIO_ACTIVE_LOW>;
+ linux,code = <BTN_0>;
+ };
+ };
};
&hdmi {
@@ -170,9 +222,19 @@
MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
>;
};
+
+ pinctrl_gpio_key: gpio-key {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_DA8__GPIO3_IO08 0x17059
+ >;
+ };
};
};
+&pwm1 {
+ status = "okay";
+};
+
&spdif {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_cubox_i_spdif>;
diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
index 62841e85a91e..151a3db2aea9 100644
--- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
@@ -1,5 +1,43 @@
/*
* Copyright (C) 2013,2014 Russell King
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
#include "imx6qdl-microsom.dtsi"
#include "imx6qdl-microsom-ar8035.dtsi"
@@ -50,6 +88,19 @@
};
};
+ sound-sgtl5000 {
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ compatible = "fsl,imx-audio-sgtl5000";
+ model = "On-board Codec";
+ mux-ext-port = <5>;
+ mux-int-port = <1>;
+ ssi-controller = <&ssi1>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "On-board SPDIF";
@@ -59,6 +110,10 @@
};
};
+&audmux {
+ status = "okay";
+};
+
&can1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hummingboard_flexcan1>;
@@ -75,16 +130,24 @@
&i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hummingboard_i2c1>;
-
- /*
- * Not fitted on Carrier-1 board... yet
status = "okay";
+ /* Pro baseboard model */
rtc: pcf8523@68 {
compatible = "nxp,pcf8523";
reg = <0x68>;
};
- */
+
+ /* Pro baseboard model */
+ sgtl5000: sgtl5000@0a {
+ clocks = <&clks IMX6QDL_CLK_CKO>;
+ compatible = "fsl,sgtl5000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hummingboard_sgtl5000>;
+ reg = <0x0a>;
+ VDDA-supply = <&reg_3p3v>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
};
&i2c2 {
@@ -129,6 +192,20 @@
>;
};
+ pinctrl_hummingboard_pwm1: pwm1grp {
+ fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b1>;
+ };
+
+ pinctrl_hummingboard_sgtl5000: hummingboard-sgtl5000 {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x130b0
+ MX6QDL_PAD_KEY_COL0__AUD5_TXC 0x130b0
+ MX6QDL_PAD_KEY_ROW0__AUD5_TXD 0x110b0
+ MX6QDL_PAD_KEY_COL1__AUD5_TXFS 0x130b0
+ MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0
+ >;
+ };
+
pinctrl_hummingboard_spdif: hummingboard-spdif {
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
};
@@ -168,12 +245,28 @@
};
};
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hummingboard_pwm1>;
+ status = "okay";
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ status = "okay";
+};
+
&spdif {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hummingboard_spdif>;
status = "okay";
};
+&ssi1 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
&usbh1 {
disable-over-current;
vbus-supply = <&reg_usbh1_vbus>;
diff --git a/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi b/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
index db9f45b2c573..4a1820309cdb 100644
--- a/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
@@ -3,6 +3,44 @@
*
* This describes the hookup for an AR8035 to the iMX6 on the SolidRun
* MicroSOM.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
&fec {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/imx6qdl-microsom.dtsi b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
index 79eac6849d4c..349f82be816e 100644
--- a/arch/arm/boot/dts/imx6qdl-microsom.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
@@ -1,5 +1,43 @@
/*
* Copyright (C) 2013,2014 Russell King
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
&iomuxc {
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 19cc269a08d4..1ce6133b67f5 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -31,6 +31,7 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio4 15 0>;
+ enable-active-high;
};
reg_usb_h1_vbus: regulator@1 {
@@ -40,6 +41,7 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio1 0 0>;
+ enable-active-high;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 009abd69385d..3b24b12651b2 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -182,6 +182,33 @@
};
};
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ max7310_a: gpio@30 {
+ compatible = "maxim,max7310";
+ reg = <0x30>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ max7310_b: gpio@32 {
+ compatible = "maxim,max7310";
+ reg = <0x32>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ max7310_c: gpio@34 {
+ compatible = "maxim,max7310";
+ reg = <0x34>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog>;
@@ -265,6 +292,13 @@
>;
};
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
pinctrl_pwm3: pwm1grp {
fsl,pins = <
MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index d6c69ec44314..f74a8ded515f 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -53,6 +53,7 @@
interrupt-controller;
reg = <0x00a01000 0x1000>,
<0x00a00100 0x100>;
+ interrupt-parent = <&intc>;
};
clocks {
@@ -82,7 +83,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
- interrupt-parent = <&intc>;
+ interrupt-parent = <&gpc>;
ranges;
dma_apbh: dma-apbh@00110000 {
@@ -122,6 +123,7 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x00a00600 0x20>;
interrupts = <1 13 0xf01>;
+ interrupt-parent = <&intc>;
clocks = <&clks IMX6QDL_CLK_TWD>;
};
@@ -357,6 +359,7 @@
clocks = <&clks IMX6QDL_CLK_IPG>,
<&clks IMX6QDL_CLK_PWM1>;
clock-names = "ipg", "per";
+ status = "disabled";
};
pwm2: pwm@02084000 {
@@ -367,6 +370,7 @@
clocks = <&clks IMX6QDL_CLK_IPG>,
<&clks IMX6QDL_CLK_PWM2>;
clock-names = "ipg", "per";
+ status = "disabled";
};
pwm3: pwm@02088000 {
@@ -377,6 +381,7 @@
clocks = <&clks IMX6QDL_CLK_IPG>,
<&clks IMX6QDL_CLK_PWM3>;
clock-names = "ipg", "per";
+ status = "disabled";
};
pwm4: pwm@0208c000 {
@@ -387,6 +392,7 @@
clocks = <&clks IMX6QDL_CLK_IPG>,
<&clks IMX6QDL_CLK_PWM4>;
clock-names = "ipg", "per";
+ status = "disabled";
};
can1: flexcan@02090000 {
@@ -598,7 +604,7 @@
regulator-name = "vddpu";
regulator-min-microvolt = <725000>;
regulator-max-microvolt = <1450000>;
- regulator-always-on;
+ regulator-enable-ramp-delay = <150>;
anatop-reg-offset = <0x140>;
anatop-vol-bit-shift = <9>;
anatop-vol-bit-width = <5>;
@@ -658,7 +664,7 @@
#size-cells = <1>;
ranges = <0 0x020cc000 0x4000>;
- snvs-rtc-lp@34 {
+ snvs_rtc: snvs-rtc-lp@34 {
compatible = "fsl,sec-v4.0-mon-rtc-lp";
reg = <0x34 0x58>;
interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>,
@@ -693,8 +699,19 @@
gpc: gpc@020dc000 {
compatible = "fsl,imx6q-gpc";
reg = <0x020dc000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>,
<0 90 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&intc>;
+ pu-supply = <&reg_pu>;
+ clocks = <&clks IMX6QDL_CLK_GPU3D_CORE>,
+ <&clks IMX6QDL_CLK_GPU3D_SHADER>,
+ <&clks IMX6QDL_CLK_GPU2D_CORE>,
+ <&clks IMX6QDL_CLK_GPU2D_AXI>,
+ <&clks IMX6QDL_CLK_OPENVG_AXI>,
+ <&clks IMX6QDL_CLK_VPU_AXI>;
+ #power-domain-cells = <1>;
};
gpr: iomuxc-gpr@020e0000 {
@@ -845,6 +862,7 @@
clocks = <&clks IMX6QDL_CLK_USBOH3>;
fsl,usbphy = <&usbphy2>;
fsl,usbmisc = <&usbmisc 1>;
+ dr_mode = "host";
status = "disabled";
};
@@ -854,6 +872,7 @@
interrupts = <0 41 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_USBOH3>;
fsl,usbmisc = <&usbmisc 2>;
+ dr_mode = "host";
status = "disabled";
};
@@ -863,6 +882,7 @@
interrupts = <0 42 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_USBOH3>;
fsl,usbmisc = <&usbmisc 3>;
+ dr_mode = "host";
status = "disabled";
};
@@ -1022,19 +1042,24 @@
reg = <0x021e0000 0x4000>;
status = "disabled";
- port@0 {
- reg = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
- mipi_mux_0: endpoint {
- remote-endpoint = <&ipu1_di0_mipi>;
+ mipi_mux_0: endpoint {
+ remote-endpoint = <&ipu1_di0_mipi>;
+ };
};
- };
- port@1 {
- reg = <1>;
+ port@1 {
+ reg = <1>;
- mipi_mux_1: endpoint {
- remote-endpoint = <&ipu1_di1_mipi>;
+ mipi_mux_1: endpoint {
+ remote-endpoint = <&ipu1_di1_mipi>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/imx6sl-warp.dts b/arch/arm/boot/dts/imx6sl-warp.dts
new file mode 100644
index 000000000000..64f7decf1fdc
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sl-warp.dts
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2014, 2015 O.S. Systems Software LTDA.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "imx6sl.dtsi"
+
+/ {
+ model = "WaRP Board";
+ compatible = "warp,imx6sl-warp", "fsl,imx6sl";
+
+ memory {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg_usb_otg1_vbus: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+ regulator-name = "usb_otg1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 0 0>;
+ enable-active-high;
+ };
+
+ reg_usb_otg2_vbus: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "usb_otg2_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 2 0>;
+ enable-active-high;
+ };
+
+ reg_1p8v: regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+
+ usdhc3_pwrseq: usdhc3_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>, /* WL_REG_ON */
+ <&gpio3 25 GPIO_ACTIVE_LOW>, /* BT_REG_ON */
+ <&gpio4 4 GPIO_ACTIVE_LOW>, /* BT_WAKE */
+ <&gpio4 6 GPIO_ACTIVE_LOW>; /* BT_RST_N */
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&usbotg1 {
+ vbus-supply = <&reg_usb_otg1_vbus>;
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usbotg2 {
+ vbus-supply = <&reg_usb_otg2_vbus>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <4>;
+ non-removable;
+ keep-power-in-suspend;
+ enable-sdio-wakeup;
+ mmc-pwrseq = <&usdhc3_pwrseq>;
+ status = "okay";
+};
+
+&iomuxc {
+ imx6sl-warp {
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6SL_PAD_UART1_RXD__UART1_RX_DATA 0x41b0b1
+ MX6SL_PAD_UART1_TXD__UART1_TX_DATA 0x41b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6SL_PAD_EPDC_D12__UART2_RX_DATA 0x41b0b1
+ MX6SL_PAD_EPDC_D13__UART2_TX_DATA 0x41b0b1
+ MX6SL_PAD_EPDC_D14__UART2_RTS_B 0x4130B1
+ MX6SL_PAD_EPDC_D15__UART2_CTS_B 0x4130B1
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6SL_PAD_AUD_RXC__UART3_RX_DATA 0x41b0b1
+ MX6SL_PAD_AUD_RXC__UART3_TX_DATA 0x41b0b1
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6SL_PAD_SD2_CMD__SD2_CMD 0x417059
+ MX6SL_PAD_SD2_CLK__SD2_CLK 0x410059
+ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x417059
+ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x417059
+ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x417059
+ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x417059
+ MX6SL_PAD_SD2_DAT4__SD2_DATA4 0x417059
+ MX6SL_PAD_SD2_DAT5__SD2_DATA5 0x417059
+ MX6SL_PAD_SD2_DAT6__SD2_DATA6 0x417059
+ MX6SL_PAD_SD2_DAT7__SD2_DATA7 0x417059
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD2_CMD__SD2_CMD 0x4170b9
+ MX6SL_PAD_SD2_CLK__SD2_CLK 0x4100b9
+ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x4170b9
+ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x4170b9
+ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x4170b9
+ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x4170b9
+ MX6SL_PAD_SD2_DAT4__SD2_DATA4 0x4170b9
+ MX6SL_PAD_SD2_DAT5__SD2_DATA5 0x4170b9
+ MX6SL_PAD_SD2_DAT6__SD2_DATA6 0x4170b9
+ MX6SL_PAD_SD2_DAT7__SD2_DATA7 0x4170b9
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD2_CMD__SD2_CMD 0x4170f9
+ MX6SL_PAD_SD2_CLK__SD2_CLK 0x4100f9
+ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x4170f9
+ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x4170f9
+ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x4170f9
+ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x4170f9
+ MX6SL_PAD_SD2_DAT4__SD2_DATA4 0x4170f9
+ MX6SL_PAD_SD2_DAT5__SD2_DATA5 0x4170f9
+ MX6SL_PAD_SD2_DAT6__SD2_DATA6 0x4170f9
+ MX6SL_PAD_SD2_DAT7__SD2_DATA7 0x4170f9
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6SL_PAD_SD3_CMD__SD3_CMD 0x417059
+ MX6SL_PAD_SD3_CLK__SD3_CLK 0x410059
+ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x417059
+ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x417059
+ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x417059
+ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x417059
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD3_CMD__SD3_CMD 0x4170b9
+ MX6SL_PAD_SD3_CLK__SD3_CLK 0x4100b9
+ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x4170b9
+ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x4170b9
+ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x4170b9
+ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x4170b9
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ fsl,pins = <
+ MX6SL_PAD_SD3_CMD__SD3_CMD 0x4170f9
+ MX6SL_PAD_SD3_CLK__SD3_CLK 0x4100f9
+ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x4170f9
+ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x4170f9
+ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x4170f9
+ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x4170f9
+ >;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 36ab8e054cee..a78e715e3982 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -72,6 +72,7 @@
interrupt-controller;
reg = <0x00a01000 0x1000>,
<0x00a00100 0x100>;
+ interrupt-parent = <&intc>;
};
clocks {
@@ -95,7 +96,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
- interrupt-parent = <&intc>;
+ interrupt-parent = <&gpc>;
ranges;
ocram: sram@00900000 {
@@ -568,7 +569,7 @@
#size-cells = <1>;
ranges = <0 0x020cc000 0x4000>;
- snvs-rtc-lp@34 {
+ snvs_rtc: snvs-rtc-lp@34 {
compatible = "fsl,sec-v4.0-mon-rtc-lp";
reg = <0x34 0x58>;
interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>,
@@ -603,7 +604,14 @@
gpc: gpc@020dc000 {
compatible = "fsl,imx6sl-gpc", "fsl,imx6q-gpc";
reg = <0x020dc000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&intc>;
+ pu-supply = <&reg_pu>;
+ clocks = <&clks IMX6SL_CLK_GPU2D_OVG>,
+ <&clks IMX6SL_CLK_GPU2D_PODF>;
+ #power-domain-cells = <1>;
};
gpr: iomuxc-gpr@020e0000 {
@@ -699,6 +707,7 @@
interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SL_CLK_USBOH3>;
fsl,usbmisc = <&usbmisc 2>;
+ dr_mode = "host";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6sx-sdb-reva.dts b/arch/arm/boot/dts/imx6sx-sdb-reva.dts
new file mode 100644
index 000000000000..c76b87cba275
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sx-sdb-reva.dts
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "imx6sx-sdb.dtsi"
+
+/ {
+ model = "Freescale i.MX6 SoloX SDB RevA Board";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic: pfuze100@08 {
+ compatible = "fsl,pfuze100";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3a {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3b_reg: sw3b {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ regulator-always-on;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&qspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi2>;
+ status = "okay";
+
+ flash0: s25fl128s@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25fl128s";
+ spi-max-frequency = <66000000>;
+ };
+
+ flash1: s25fl128s@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25fl128s";
+ spi-max-frequency = <66000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 32f07d6b4042..0bfc4e7865b2 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -1,197 +1,40 @@
/*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-/dts-v1/;
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include "imx6sx.dtsi"
+#include "imx6sx-sdb.dtsi"
/ {
- model = "Freescale i.MX6 SoloX SDB Board";
- compatible = "fsl,imx6sx-sdb", "fsl,imx6sx";
-
- chosen {
- stdout-path = &uart1;
- };
-
- memory {
- reg = <0x80000000 0x40000000>;
- };
-
- backlight {
- compatible = "pwm-backlight";
- pwms = <&pwm3 0 5000000>;
- brightness-levels = <0 4 8 16 32 64 128 255>;
- default-brightness-level = <6>;
- };
-
- gpio-keys {
- compatible = "gpio-keys";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_gpio_keys>;
-
- volume-up {
- label = "Volume Up";
- gpios = <&gpio1 18 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_VOLUMEUP>;
- };
-
- volume-down {
- label = "Volume Down";
- gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_VOLUMEDOWN>;
- };
- };
-
- regulators {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- vcc_sd3: regulator@0 {
- compatible = "regulator-fixed";
- reg = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_vcc_sd3>;
- regulator-name = "VCC_SD3";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- gpio = <&gpio2 11 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- reg_usb_otg1_vbus: regulator@1 {
- compatible = "regulator-fixed";
- reg = <1>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usb_otg1>;
- regulator-name = "usb_otg1_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- reg_usb_otg2_vbus: regulator@2 {
- compatible = "regulator-fixed";
- reg = <2>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usb_otg2>;
- regulator-name = "usb_otg2_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- reg_psu_5v: regulator@3 {
- compatible = "regulator-fixed";
- reg = <3>;
- regulator-name = "PSU-5V0";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- };
-
- reg_lcd_3v3: regulator@4 {
- compatible = "regulator-fixed";
- reg = <4>;
- regulator-name = "lcd-3v3";
- gpio = <&gpio3 27 0>;
- enable-active-high;
- };
-
- reg_peri_3v3: regulator@5 {
- compatible = "regulator-fixed";
- reg = <5>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_peri_3v3>;
- regulator-name = "peri_3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- regulator-always-on;
- };
-
- reg_enet_3v3: regulator@6 {
- compatible = "regulator-fixed";
- reg = <6>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_enet_3v3>;
- regulator-name = "enet_3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
- };
- };
-
- sound {
- compatible = "fsl,imx6sx-sdb-wm8962", "fsl,imx-audio-wm8962";
- model = "wm8962-audio";
- ssi-controller = <&ssi2>;
- audio-codec = <&codec>;
- audio-routing =
- "Headphone Jack", "HPOUTL",
- "Headphone Jack", "HPOUTR",
- "Ext Spk", "SPKOUTL",
- "Ext Spk", "SPKOUTR",
- "AMIC", "MICBIAS",
- "IN3R", "AMIC";
- mux-int-port = <2>;
- mux-ext-port = <6>;
- };
-};
-
-&audmux {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_audmux>;
- status = "okay";
+ model = "Freescale i.MX6 SoloX SDB RevB Board";
};
-&fec1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_enet1>;
- phy-supply = <&reg_enet_3v3>;
- phy-mode = "rgmii";
- phy-handle = <&ethphy1>;
- status = "okay";
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- ethphy1: ethernet-phy@1 {
- reg = <1>;
- };
-
- ethphy2: ethernet-phy@2 {
- reg = <2>;
- };
- };
+&cpu0 {
+ operating-points = <
+ /* kHz uV */
+ 996000 1250000
+ 792000 1175000
+ 396000 1175000
+ >;
+ fsl,soc-operating-points = <
+ /* ARM kHz SOC uV */
+ 996000 1250000
+ 792000 1175000
+ 396000 1175000
+ >;
};
-&fec2 {
+&i2c1 {
+ clock-frequency = <100000>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_enet2>;
- phy-mode = "rgmii";
- phy-handle = <&ethphy2>;
+ pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
-};
-
-&i2c1 {
- clock-frequency = <100000>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c1>;
- status = "okay";
pmic: pfuze100@08 {
- compatible = "fsl,pfuze100";
+ compatible = "fsl,pfuze200";
reg = <0x08>;
regulators {
@@ -203,14 +46,6 @@
regulator-ramp-delay = <6250>;
};
- sw1c_reg: sw1c {
- regulator-min-microvolt = <300000>;
- regulator-max-microvolt = <1875000>;
- regulator-boot-on;
- regulator-always-on;
- regulator-ramp-delay = <6250>;
- };
-
sw2_reg: sw2 {
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <3300000>;
@@ -232,11 +67,6 @@
regulator-always-on;
};
- sw4_reg: sw4 {
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <3300000>;
- };
-
swbst_reg: swbst {
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5150000>;
@@ -292,401 +122,24 @@
};
};
-&i2c4 {
- clock-frequency = <100000>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c4>;
- status = "okay";
-
- codec: wm8962@1a {
- compatible = "wlf,wm8962";
- reg = <0x1a>;
- clocks = <&clks IMX6SX_CLK_AUDIO>;
- DCVDD-supply = <&vgen4_reg>;
- DBVDD-supply = <&vgen4_reg>;
- AVDD-supply = <&vgen4_reg>;
- CPVDD-supply = <&vgen4_reg>;
- MICVDD-supply = <&vgen3_reg>;
- PLLVDD-supply = <&vgen4_reg>;
- SPKVDD1-supply = <&reg_psu_5v>;
- SPKVDD2-supply = <&reg_psu_5v>;
- };
-};
-
-&lcdif1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_lcd>;
- lcd-supply = <&reg_lcd_3v3>;
- display = <&display0>;
- status = "okay";
-
- display0: display0 {
- bits-per-pixel = <16>;
- bus-width = <24>;
-
- display-timings {
- native-mode = <&timing0>;
- timing0: timing0 {
- clock-frequency = <33500000>;
- hactive = <800>;
- vactive = <480>;
- hback-porch = <89>;
- hfront-porch = <164>;
- vback-porch = <23>;
- vfront-porch = <10>;
- hsync-len = <10>;
- vsync-len = <10>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
- };
- };
-};
-
-&pwm3 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_pwm3>;
- status = "okay";
-};
-
-&snvs_poweroff {
- status = "okay";
-};
-
&qspi2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_qspi2>;
status = "okay";
- flash0: s25fl128s@0 {
- reg = <0>;
+ flash0: n25q256a@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "spansion,s25fl128s";
- spi-max-frequency = <66000000>;
+ compatible = "micron,n25q256a";
+ spi-max-frequency = <29000000>;
+ reg = <0>;
};
- flash1: s25fl128s@1 {
- reg = <1>;
+ flash1: n25q256a@1 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "spansion,s25fl128s";
- spi-max-frequency = <66000000>;
- };
-};
-
-&ssi2 {
- status = "okay";
-};
-
-&uart1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1>;
- status = "okay";
-};
-
-&uart5 { /* for bluetooth */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart5>;
- fsl,uart-has-rtscts;
- status = "okay";
-};
-
-&usbotg1 {
- vbus-supply = <&reg_usb_otg1_vbus>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usb_otg1_id>;
- status = "okay";
-};
-
-&usbotg2 {
- vbus-supply = <&reg_usb_otg2_vbus>;
- dr_mode = "host";
- status = "okay";
-};
-
-&usdhc2 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc2>;
- non-removable;
- no-1-8-v;
- keep-power-in-suspend;
- enable-sdio-wakeup;
- status = "okay";
-};
-
-&usdhc3 {
- pinctrl-names = "default", "state_100mhz", "state_200mhz";
- pinctrl-0 = <&pinctrl_usdhc3>;
- pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
- pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
- bus-width = <8>;
- cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
- wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
- keep-power-in-suspend;
- enable-sdio-wakeup;
- vmmc-supply = <&vcc_sd3>;
- status = "okay";
-};
-
-&usdhc4 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc4>;
- cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>;
- wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
- status = "okay";
-};
-
-&iomuxc {
- imx6x-sdb {
- pinctrl_audmux: audmuxgrp {
- fsl,pins = <
- MX6SX_PAD_CSI_DATA00__AUDMUX_AUD6_TXC 0x130b0
- MX6SX_PAD_CSI_DATA01__AUDMUX_AUD6_TXFS 0x130b0
- MX6SX_PAD_CSI_HSYNC__AUDMUX_AUD6_TXD 0x120b0
- MX6SX_PAD_CSI_VSYNC__AUDMUX_AUD6_RXD 0x130b0
- MX6SX_PAD_CSI_PIXCLK__AUDMUX_MCLK 0x130b0
- >;
- };
-
- pinctrl_enet1: enet1grp {
- fsl,pins = <
- MX6SX_PAD_ENET1_MDIO__ENET1_MDIO 0xa0b1
- MX6SX_PAD_ENET1_MDC__ENET1_MDC 0xa0b1
- MX6SX_PAD_RGMII1_TXC__ENET1_RGMII_TXC 0xa0b1
- MX6SX_PAD_RGMII1_TD0__ENET1_TX_DATA_0 0xa0b1
- MX6SX_PAD_RGMII1_TD1__ENET1_TX_DATA_1 0xa0b1
- MX6SX_PAD_RGMII1_TD2__ENET1_TX_DATA_2 0xa0b1
- MX6SX_PAD_RGMII1_TD3__ENET1_TX_DATA_3 0xa0b1
- MX6SX_PAD_RGMII1_TX_CTL__ENET1_TX_EN 0xa0b1
- MX6SX_PAD_RGMII1_RXC__ENET1_RX_CLK 0x3081
- MX6SX_PAD_RGMII1_RD0__ENET1_RX_DATA_0 0x3081
- MX6SX_PAD_RGMII1_RD1__ENET1_RX_DATA_1 0x3081
- MX6SX_PAD_RGMII1_RD2__ENET1_RX_DATA_2 0x3081
- MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081
- MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081
- MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91
- >;
- };
-
- pinctrl_enet_3v3: enet3v3grp {
- fsl,pins = <
- MX6SX_PAD_ENET2_COL__GPIO2_IO_6 0x80000000
- >;
- };
-
- pinctrl_enet2: enet2grp {
- fsl,pins = <
- MX6SX_PAD_RGMII2_TXC__ENET2_RGMII_TXC 0xa0b9
- MX6SX_PAD_RGMII2_TD0__ENET2_TX_DATA_0 0xa0b1
- MX6SX_PAD_RGMII2_TD1__ENET2_TX_DATA_1 0xa0b1
- MX6SX_PAD_RGMII2_TD2__ENET2_TX_DATA_2 0xa0b1
- MX6SX_PAD_RGMII2_TD3__ENET2_TX_DATA_3 0xa0b1
- MX6SX_PAD_RGMII2_TX_CTL__ENET2_TX_EN 0xa0b1
- MX6SX_PAD_RGMII2_RXC__ENET2_RX_CLK 0x3081
- MX6SX_PAD_RGMII2_RD0__ENET2_RX_DATA_0 0x3081
- MX6SX_PAD_RGMII2_RD1__ENET2_RX_DATA_1 0x3081
- MX6SX_PAD_RGMII2_RD2__ENET2_RX_DATA_2 0x3081
- MX6SX_PAD_RGMII2_RD3__ENET2_RX_DATA_3 0x3081
- MX6SX_PAD_RGMII2_RX_CTL__ENET2_RX_EN 0x3081
- >;
- };
-
- pinctrl_gpio_keys: gpio_keysgrp {
- fsl,pins = <
- MX6SX_PAD_CSI_DATA04__GPIO1_IO_18 0x17059
- MX6SX_PAD_CSI_DATA05__GPIO1_IO_19 0x17059
- >;
- };
-
- pinctrl_i2c1: i2c1grp {
- fsl,pins = <
- MX6SX_PAD_GPIO1_IO01__I2C1_SDA 0x4001b8b1
- MX6SX_PAD_GPIO1_IO00__I2C1_SCL 0x4001b8b1
- >;
- };
-
- pinctrl_i2c4: i2c4grp {
- fsl,pins = <
- MX6SX_PAD_CSI_DATA07__I2C4_SDA 0x4001b8b1
- MX6SX_PAD_CSI_DATA06__I2C4_SCL 0x4001b8b1
- >;
- };
-
- pinctrl_lcd: lcdgrp {
- fsl,pins = <
- MX6SX_PAD_LCD1_DATA00__LCDIF1_DATA_0 0x4001b0b0
- MX6SX_PAD_LCD1_DATA01__LCDIF1_DATA_1 0x4001b0b0
- MX6SX_PAD_LCD1_DATA02__LCDIF1_DATA_2 0x4001b0b0
- MX6SX_PAD_LCD1_DATA03__LCDIF1_DATA_3 0x4001b0b0
- MX6SX_PAD_LCD1_DATA04__LCDIF1_DATA_4 0x4001b0b0
- MX6SX_PAD_LCD1_DATA05__LCDIF1_DATA_5 0x4001b0b0
- MX6SX_PAD_LCD1_DATA06__LCDIF1_DATA_6 0x4001b0b0
- MX6SX_PAD_LCD1_DATA07__LCDIF1_DATA_7 0x4001b0b0
- MX6SX_PAD_LCD1_DATA08__LCDIF1_DATA_8 0x4001b0b0
- MX6SX_PAD_LCD1_DATA09__LCDIF1_DATA_9 0x4001b0b0
- MX6SX_PAD_LCD1_DATA10__LCDIF1_DATA_10 0x4001b0b0
- MX6SX_PAD_LCD1_DATA11__LCDIF1_DATA_11 0x4001b0b0
- MX6SX_PAD_LCD1_DATA12__LCDIF1_DATA_12 0x4001b0b0
- MX6SX_PAD_LCD1_DATA13__LCDIF1_DATA_13 0x4001b0b0
- MX6SX_PAD_LCD1_DATA14__LCDIF1_DATA_14 0x4001b0b0
- MX6SX_PAD_LCD1_DATA15__LCDIF1_DATA_15 0x4001b0b0
- MX6SX_PAD_LCD1_DATA16__LCDIF1_DATA_16 0x4001b0b0
- MX6SX_PAD_LCD1_DATA17__LCDIF1_DATA_17 0x4001b0b0
- MX6SX_PAD_LCD1_DATA18__LCDIF1_DATA_18 0x4001b0b0
- MX6SX_PAD_LCD1_DATA19__LCDIF1_DATA_19 0x4001b0b0
- MX6SX_PAD_LCD1_DATA20__LCDIF1_DATA_20 0x4001b0b0
- MX6SX_PAD_LCD1_DATA21__LCDIF1_DATA_21 0x4001b0b0
- MX6SX_PAD_LCD1_DATA22__LCDIF1_DATA_22 0x4001b0b0
- MX6SX_PAD_LCD1_DATA23__LCDIF1_DATA_23 0x4001b0b0
- MX6SX_PAD_LCD1_CLK__LCDIF1_CLK 0x4001b0b0
- MX6SX_PAD_LCD1_ENABLE__LCDIF1_ENABLE 0x4001b0b0
- MX6SX_PAD_LCD1_VSYNC__LCDIF1_VSYNC 0x4001b0b0
- MX6SX_PAD_LCD1_HSYNC__LCDIF1_HSYNC 0x4001b0b0
- MX6SX_PAD_LCD1_RESET__GPIO3_IO_27 0x4001b0b0
- >;
- };
-
- pinctrl_peri_3v3: peri3v3grp {
- fsl,pins = <
- MX6SX_PAD_QSPI1A_DATA0__GPIO4_IO_16 0x80000000
- >;
- };
-
- pinctrl_pwm3: pwm3grp-1 {
- fsl,pins = <
- MX6SX_PAD_SD1_DATA2__PWM3_OUT 0x110b0
- >;
- };
-
- pinctrl_qspi2: qspi2grp {
- fsl,pins = <
- MX6SX_PAD_NAND_WP_B__QSPI2_A_DATA_0 0x70f1
- MX6SX_PAD_NAND_READY_B__QSPI2_A_DATA_1 0x70f1
- MX6SX_PAD_NAND_CE0_B__QSPI2_A_DATA_2 0x70f1
- MX6SX_PAD_NAND_CE1_B__QSPI2_A_DATA_3 0x70f1
- MX6SX_PAD_NAND_CLE__QSPI2_A_SCLK 0x70f1
- MX6SX_PAD_NAND_ALE__QSPI2_A_SS0_B 0x70f1
- MX6SX_PAD_NAND_DATA01__QSPI2_B_DATA_0 0x70f1
- MX6SX_PAD_NAND_DATA00__QSPI2_B_DATA_1 0x70f1
- MX6SX_PAD_NAND_WE_B__QSPI2_B_DATA_2 0x70f1
- MX6SX_PAD_NAND_RE_B__QSPI2_B_DATA_3 0x70f1
- MX6SX_PAD_NAND_DATA02__QSPI2_B_SCLK 0x70f1
- MX6SX_PAD_NAND_DATA03__QSPI2_B_SS0_B 0x70f1
- >;
- };
-
- pinctrl_vcc_sd3: vccsd3grp {
- fsl,pins = <
- MX6SX_PAD_KEY_COL1__GPIO2_IO_11 0x17059
- >;
- };
-
- pinctrl_uart1: uart1grp {
- fsl,pins = <
- MX6SX_PAD_GPIO1_IO04__UART1_TX 0x1b0b1
- MX6SX_PAD_GPIO1_IO05__UART1_RX 0x1b0b1
- >;
- };
-
- pinctrl_uart5: uart5grp {
- fsl,pins = <
- MX6SX_PAD_KEY_ROW3__UART5_RX 0x1b0b1
- MX6SX_PAD_KEY_COL3__UART5_TX 0x1b0b1
- MX6SX_PAD_KEY_ROW2__UART5_CTS_B 0x1b0b1
- MX6SX_PAD_KEY_COL2__UART5_RTS_B 0x1b0b1
- >;
- };
-
- pinctrl_usb_otg1: usbotg1grp {
- fsl,pins = <
- MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9 0x10b0
- >;
- };
-
- pinctrl_usb_otg1_id: usbotg1idgrp {
- fsl,pins = <
- MX6SX_PAD_GPIO1_IO10__ANATOP_OTG1_ID 0x17059
- >;
- };
-
- pinctrl_usb_otg2: usbot2ggrp {
- fsl,pins = <
- MX6SX_PAD_GPIO1_IO12__GPIO1_IO_12 0x10b0
- >;
- };
-
- pinctrl_usdhc2: usdhc2grp {
- fsl,pins = <
- MX6SX_PAD_SD2_CMD__USDHC2_CMD 0x17059
- MX6SX_PAD_SD2_CLK__USDHC2_CLK 0x10059
- MX6SX_PAD_SD2_DATA0__USDHC2_DATA0 0x17059
- MX6SX_PAD_SD2_DATA1__USDHC2_DATA1 0x17059
- MX6SX_PAD_SD2_DATA2__USDHC2_DATA2 0x17059
- MX6SX_PAD_SD2_DATA3__USDHC2_DATA3 0x17059
- >;
- };
-
- pinctrl_usdhc3: usdhc3grp {
- fsl,pins = <
- MX6SX_PAD_SD3_CMD__USDHC3_CMD 0x17059
- MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x10059
- MX6SX_PAD_SD3_DATA0__USDHC3_DATA0 0x17059
- MX6SX_PAD_SD3_DATA1__USDHC3_DATA1 0x17059
- MX6SX_PAD_SD3_DATA2__USDHC3_DATA2 0x17059
- MX6SX_PAD_SD3_DATA3__USDHC3_DATA3 0x17059
- MX6SX_PAD_SD3_DATA4__USDHC3_DATA4 0x17059
- MX6SX_PAD_SD3_DATA5__USDHC3_DATA5 0x17059
- MX6SX_PAD_SD3_DATA6__USDHC3_DATA6 0x17059
- MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x17059
- MX6SX_PAD_KEY_COL0__GPIO2_IO_10 0x17059 /* CD */
- MX6SX_PAD_KEY_ROW0__GPIO2_IO_15 0x17059 /* WP */
- >;
- };
-
- pinctrl_usdhc3_100mhz: usdhc3grp-100mhz {
- fsl,pins = <
- MX6SX_PAD_SD3_CMD__USDHC3_CMD 0x170b9
- MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x100b9
- MX6SX_PAD_SD3_DATA0__USDHC3_DATA0 0x170b9
- MX6SX_PAD_SD3_DATA1__USDHC3_DATA1 0x170b9
- MX6SX_PAD_SD3_DATA2__USDHC3_DATA2 0x170b9
- MX6SX_PAD_SD3_DATA3__USDHC3_DATA3 0x170b9
- MX6SX_PAD_SD3_DATA4__USDHC3_DATA4 0x170b9
- MX6SX_PAD_SD3_DATA5__USDHC3_DATA5 0x170b9
- MX6SX_PAD_SD3_DATA6__USDHC3_DATA6 0x170b9
- MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x170b9
- >;
- };
-
- pinctrl_usdhc3_200mhz: usdhc3grp-200mhz {
- fsl,pins = <
- MX6SX_PAD_SD3_CMD__USDHC3_CMD 0x170f9
- MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x100f9
- MX6SX_PAD_SD3_DATA0__USDHC3_DATA0 0x170f9
- MX6SX_PAD_SD3_DATA1__USDHC3_DATA1 0x170f9
- MX6SX_PAD_SD3_DATA2__USDHC3_DATA2 0x170f9
- MX6SX_PAD_SD3_DATA3__USDHC3_DATA3 0x170f9
- MX6SX_PAD_SD3_DATA4__USDHC3_DATA4 0x170f9
- MX6SX_PAD_SD3_DATA5__USDHC3_DATA5 0x170f9
- MX6SX_PAD_SD3_DATA6__USDHC3_DATA6 0x170f9
- MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x170f9
- >;
- };
-
- pinctrl_usdhc4: usdhc4grp {
- fsl,pins = <
- MX6SX_PAD_SD4_CMD__USDHC4_CMD 0x17059
- MX6SX_PAD_SD4_CLK__USDHC4_CLK 0x10059
- MX6SX_PAD_SD4_DATA0__USDHC4_DATA0 0x17059
- MX6SX_PAD_SD4_DATA1__USDHC4_DATA1 0x17059
- MX6SX_PAD_SD4_DATA2__USDHC4_DATA2 0x17059
- MX6SX_PAD_SD4_DATA3__USDHC4_DATA3 0x17059
- MX6SX_PAD_SD4_DATA7__GPIO6_IO_21 0x17059 /* CD */
- MX6SX_PAD_SD4_DATA6__GPIO6_IO_20 0x17059 /* WP */
- >;
- };
+ compatible = "micron,n25q256a";
+ spi-max-frequency = <29000000>;
+ reg = <1>;
};
};
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
new file mode 100644
index 000000000000..cef04cef3a80
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -0,0 +1,562 @@
+/*
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "imx6sx.dtsi"
+
+/ {
+ model = "Freescale i.MX6 SoloX SDB Board";
+ compatible = "fsl,imx6sx-sdb", "fsl,imx6sx";
+
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ memory {
+ reg = <0x80000000 0x40000000>;
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm3 0 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_keys>;
+
+ volume-up {
+ label = "Volume Up";
+ gpios = <&gpio1 18 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+
+ volume-down {
+ label = "Volume Down";
+ gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vcc_sd3: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_vcc_sd3>;
+ regulator-name = "VCC_SD3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ gpio = <&gpio2 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usb_otg1_vbus: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_otg1>;
+ regulator-name = "usb_otg1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usb_otg2_vbus: regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_otg2>;
+ regulator-name = "usb_otg2_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_psu_5v: regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <3>;
+ regulator-name = "PSU-5V0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_lcd_3v3: regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <4>;
+ regulator-name = "lcd-3v3";
+ gpio = <&gpio3 27 0>;
+ enable-active-high;
+ };
+
+ reg_peri_3v3: regulator@5 {
+ compatible = "regulator-fixed";
+ reg = <5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_peri_3v3>;
+ regulator-name = "peri_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ reg_enet_3v3: regulator@6 {
+ compatible = "regulator-fixed";
+ reg = <6>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_3v3>;
+ regulator-name = "enet_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx6sx-sdb-wm8962", "fsl,imx-audio-wm8962";
+ model = "wm8962-audio";
+ ssi-controller = <&ssi2>;
+ audio-codec = <&codec>;
+ audio-routing =
+ "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "Ext Spk", "SPKOUTL",
+ "Ext Spk", "SPKOUTR",
+ "AMIC", "MICBIAS",
+ "IN3R", "AMIC";
+ mux-int-port = <2>;
+ mux-ext-port = <6>;
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet1>;
+ phy-supply = <&reg_enet_3v3>;
+ phy-mode = "rgmii";
+ phy-handle = <&ethphy1>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ ethphy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ };
+};
+
+&fec2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet2>;
+ phy-mode = "rgmii";
+ phy-handle = <&ethphy2>;
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+
+ codec: wm8962@1a {
+ compatible = "wlf,wm8962";
+ reg = <0x1a>;
+ clocks = <&clks IMX6SX_CLK_AUDIO>;
+ DCVDD-supply = <&vgen4_reg>;
+ DBVDD-supply = <&vgen4_reg>;
+ AVDD-supply = <&vgen4_reg>;
+ CPVDD-supply = <&vgen4_reg>;
+ MICVDD-supply = <&vgen3_reg>;
+ PLLVDD-supply = <&vgen4_reg>;
+ SPKVDD1-supply = <&reg_psu_5v>;
+ SPKVDD2-supply = <&reg_psu_5v>;
+ };
+};
+
+&lcdif1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd>;
+ lcd-supply = <&reg_lcd_3v3>;
+ display = <&display0>;
+ status = "okay";
+
+ display0: display0 {
+ bits-per-pixel = <16>;
+ bus-width = <24>;
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <33500000>;
+ hactive = <800>;
+ vactive = <480>;
+ hback-porch = <89>;
+ hfront-porch = <164>;
+ vback-porch = <23>;
+ vfront-porch = <10>;
+ hsync-len = <10>;
+ vsync-len = <10>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <0>;
+ };
+ };
+ };
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>;
+ status = "okay";
+};
+
+&snvs_poweroff {
+ status = "okay";
+};
+
+&ssi2 {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart5 { /* for bluetooth */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+};
+
+&usbotg1 {
+ vbus-supply = <&reg_usb_otg1_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_otg1_id>;
+ status = "okay";
+};
+
+&usbotg2 {
+ vbus-supply = <&reg_usb_otg2_vbus>;
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ non-removable;
+ no-1-8-v;
+ keep-power-in-suspend;
+ enable-sdio-wakeup;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
+ wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
+ keep-power-in-suspend;
+ enable-sdio-wakeup;
+ vmmc-supply = <&vcc_sd3>;
+ status = "okay";
+};
+
+&usdhc4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc4>;
+ cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>;
+ wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&iomuxc {
+ imx6x-sdb {
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ MX6SX_PAD_CSI_DATA00__AUDMUX_AUD6_TXC 0x130b0
+ MX6SX_PAD_CSI_DATA01__AUDMUX_AUD6_TXFS 0x130b0
+ MX6SX_PAD_CSI_HSYNC__AUDMUX_AUD6_TXD 0x120b0
+ MX6SX_PAD_CSI_VSYNC__AUDMUX_AUD6_RXD 0x130b0
+ MX6SX_PAD_CSI_PIXCLK__AUDMUX_MCLK 0x130b0
+ >;
+ };
+
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+ MX6SX_PAD_ENET1_MDIO__ENET1_MDIO 0xa0b1
+ MX6SX_PAD_ENET1_MDC__ENET1_MDC 0xa0b1
+ MX6SX_PAD_RGMII1_TXC__ENET1_RGMII_TXC 0xa0b1
+ MX6SX_PAD_RGMII1_TD0__ENET1_TX_DATA_0 0xa0b1
+ MX6SX_PAD_RGMII1_TD1__ENET1_TX_DATA_1 0xa0b1
+ MX6SX_PAD_RGMII1_TD2__ENET1_TX_DATA_2 0xa0b1
+ MX6SX_PAD_RGMII1_TD3__ENET1_TX_DATA_3 0xa0b1
+ MX6SX_PAD_RGMII1_TX_CTL__ENET1_TX_EN 0xa0b1
+ MX6SX_PAD_RGMII1_RXC__ENET1_RX_CLK 0x3081
+ MX6SX_PAD_RGMII1_RD0__ENET1_RX_DATA_0 0x3081
+ MX6SX_PAD_RGMII1_RD1__ENET1_RX_DATA_1 0x3081
+ MX6SX_PAD_RGMII1_RD2__ENET1_RX_DATA_2 0x3081
+ MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081
+ MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081
+ MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91
+ >;
+ };
+
+ pinctrl_enet_3v3: enet3v3grp {
+ fsl,pins = <
+ MX6SX_PAD_ENET2_COL__GPIO2_IO_6 0x80000000
+ >;
+ };
+
+ pinctrl_enet2: enet2grp {
+ fsl,pins = <
+ MX6SX_PAD_RGMII2_TXC__ENET2_RGMII_TXC 0xa0b9
+ MX6SX_PAD_RGMII2_TD0__ENET2_TX_DATA_0 0xa0b1
+ MX6SX_PAD_RGMII2_TD1__ENET2_TX_DATA_1 0xa0b1
+ MX6SX_PAD_RGMII2_TD2__ENET2_TX_DATA_2 0xa0b1
+ MX6SX_PAD_RGMII2_TD3__ENET2_TX_DATA_3 0xa0b1
+ MX6SX_PAD_RGMII2_TX_CTL__ENET2_TX_EN 0xa0b1
+ MX6SX_PAD_RGMII2_RXC__ENET2_RX_CLK 0x3081
+ MX6SX_PAD_RGMII2_RD0__ENET2_RX_DATA_0 0x3081
+ MX6SX_PAD_RGMII2_RD1__ENET2_RX_DATA_1 0x3081
+ MX6SX_PAD_RGMII2_RD2__ENET2_RX_DATA_2 0x3081
+ MX6SX_PAD_RGMII2_RD3__ENET2_RX_DATA_3 0x3081
+ MX6SX_PAD_RGMII2_RX_CTL__ENET2_RX_EN 0x3081
+ >;
+ };
+
+ pinctrl_gpio_keys: gpio_keysgrp {
+ fsl,pins = <
+ MX6SX_PAD_CSI_DATA04__GPIO1_IO_18 0x17059
+ MX6SX_PAD_CSI_DATA05__GPIO1_IO_19 0x17059
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6SX_PAD_GPIO1_IO01__I2C1_SDA 0x4001b8b1
+ MX6SX_PAD_GPIO1_IO00__I2C1_SCL 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX6SX_PAD_CSI_DATA07__I2C4_SDA 0x4001b8b1
+ MX6SX_PAD_CSI_DATA06__I2C4_SCL 0x4001b8b1
+ >;
+ };
+
+ pinctrl_lcd: lcdgrp {
+ fsl,pins = <
+ MX6SX_PAD_LCD1_DATA00__LCDIF1_DATA_0 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA01__LCDIF1_DATA_1 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA02__LCDIF1_DATA_2 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA03__LCDIF1_DATA_3 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA04__LCDIF1_DATA_4 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA05__LCDIF1_DATA_5 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA06__LCDIF1_DATA_6 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA07__LCDIF1_DATA_7 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA08__LCDIF1_DATA_8 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA09__LCDIF1_DATA_9 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA10__LCDIF1_DATA_10 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA11__LCDIF1_DATA_11 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA12__LCDIF1_DATA_12 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA13__LCDIF1_DATA_13 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA14__LCDIF1_DATA_14 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA15__LCDIF1_DATA_15 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA16__LCDIF1_DATA_16 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA17__LCDIF1_DATA_17 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA18__LCDIF1_DATA_18 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA19__LCDIF1_DATA_19 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA20__LCDIF1_DATA_20 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA21__LCDIF1_DATA_21 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA22__LCDIF1_DATA_22 0x4001b0b0
+ MX6SX_PAD_LCD1_DATA23__LCDIF1_DATA_23 0x4001b0b0
+ MX6SX_PAD_LCD1_CLK__LCDIF1_CLK 0x4001b0b0
+ MX6SX_PAD_LCD1_ENABLE__LCDIF1_ENABLE 0x4001b0b0
+ MX6SX_PAD_LCD1_VSYNC__LCDIF1_VSYNC 0x4001b0b0
+ MX6SX_PAD_LCD1_HSYNC__LCDIF1_HSYNC 0x4001b0b0
+ MX6SX_PAD_LCD1_RESET__GPIO3_IO_27 0x4001b0b0
+ >;
+ };
+
+ pinctrl_peri_3v3: peri3v3grp {
+ fsl,pins = <
+ MX6SX_PAD_QSPI1A_DATA0__GPIO4_IO_16 0x80000000
+ >;
+ };
+
+ pinctrl_pwm3: pwm3grp-1 {
+ fsl,pins = <
+ MX6SX_PAD_SD1_DATA2__PWM3_OUT 0x110b0
+ >;
+ };
+
+ pinctrl_qspi2: qspi2grp {
+ fsl,pins = <
+ MX6SX_PAD_NAND_WP_B__QSPI2_A_DATA_0 0x70f1
+ MX6SX_PAD_NAND_READY_B__QSPI2_A_DATA_1 0x70f1
+ MX6SX_PAD_NAND_CE0_B__QSPI2_A_DATA_2 0x70f1
+ MX6SX_PAD_NAND_CE1_B__QSPI2_A_DATA_3 0x70f1
+ MX6SX_PAD_NAND_CLE__QSPI2_A_SCLK 0x70f1
+ MX6SX_PAD_NAND_ALE__QSPI2_A_SS0_B 0x70f1
+ MX6SX_PAD_NAND_DATA01__QSPI2_B_DATA_0 0x70f1
+ MX6SX_PAD_NAND_DATA00__QSPI2_B_DATA_1 0x70f1
+ MX6SX_PAD_NAND_WE_B__QSPI2_B_DATA_2 0x70f1
+ MX6SX_PAD_NAND_RE_B__QSPI2_B_DATA_3 0x70f1
+ MX6SX_PAD_NAND_DATA02__QSPI2_B_SCLK 0x70f1
+ MX6SX_PAD_NAND_DATA03__QSPI2_B_SS0_B 0x70f1
+ >;
+ };
+
+ pinctrl_vcc_sd3: vccsd3grp {
+ fsl,pins = <
+ MX6SX_PAD_KEY_COL1__GPIO2_IO_11 0x17059
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6SX_PAD_GPIO1_IO04__UART1_TX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO05__UART1_RX 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX6SX_PAD_KEY_ROW3__UART5_RX 0x1b0b1
+ MX6SX_PAD_KEY_COL3__UART5_TX 0x1b0b1
+ MX6SX_PAD_KEY_ROW2__UART5_CTS_B 0x1b0b1
+ MX6SX_PAD_KEY_COL2__UART5_RTS_B 0x1b0b1
+ >;
+ };
+
+ pinctrl_usb_otg1: usbotg1grp {
+ fsl,pins = <
+ MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9 0x10b0
+ >;
+ };
+
+ pinctrl_usb_otg1_id: usbotg1idgrp {
+ fsl,pins = <
+ MX6SX_PAD_GPIO1_IO10__ANATOP_OTG1_ID 0x17059
+ >;
+ };
+
+ pinctrl_usb_otg2: usbot2ggrp {
+ fsl,pins = <
+ MX6SX_PAD_GPIO1_IO12__GPIO1_IO_12 0x10b0
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6SX_PAD_SD2_CMD__USDHC2_CMD 0x17059
+ MX6SX_PAD_SD2_CLK__USDHC2_CLK 0x10059
+ MX6SX_PAD_SD2_DATA0__USDHC2_DATA0 0x17059
+ MX6SX_PAD_SD2_DATA1__USDHC2_DATA1 0x17059
+ MX6SX_PAD_SD2_DATA2__USDHC2_DATA2 0x17059
+ MX6SX_PAD_SD2_DATA3__USDHC2_DATA3 0x17059
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6SX_PAD_SD3_CMD__USDHC3_CMD 0x17059
+ MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x10059
+ MX6SX_PAD_SD3_DATA0__USDHC3_DATA0 0x17059
+ MX6SX_PAD_SD3_DATA1__USDHC3_DATA1 0x17059
+ MX6SX_PAD_SD3_DATA2__USDHC3_DATA2 0x17059
+ MX6SX_PAD_SD3_DATA3__USDHC3_DATA3 0x17059
+ MX6SX_PAD_SD3_DATA4__USDHC3_DATA4 0x17059
+ MX6SX_PAD_SD3_DATA5__USDHC3_DATA5 0x17059
+ MX6SX_PAD_SD3_DATA6__USDHC3_DATA6 0x17059
+ MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x17059
+ MX6SX_PAD_KEY_COL0__GPIO2_IO_10 0x17059 /* CD */
+ MX6SX_PAD_KEY_ROW0__GPIO2_IO_15 0x17059 /* WP */
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp-100mhz {
+ fsl,pins = <
+ MX6SX_PAD_SD3_CMD__USDHC3_CMD 0x170b9
+ MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x100b9
+ MX6SX_PAD_SD3_DATA0__USDHC3_DATA0 0x170b9
+ MX6SX_PAD_SD3_DATA1__USDHC3_DATA1 0x170b9
+ MX6SX_PAD_SD3_DATA2__USDHC3_DATA2 0x170b9
+ MX6SX_PAD_SD3_DATA3__USDHC3_DATA3 0x170b9
+ MX6SX_PAD_SD3_DATA4__USDHC3_DATA4 0x170b9
+ MX6SX_PAD_SD3_DATA5__USDHC3_DATA5 0x170b9
+ MX6SX_PAD_SD3_DATA6__USDHC3_DATA6 0x170b9
+ MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp-200mhz {
+ fsl,pins = <
+ MX6SX_PAD_SD3_CMD__USDHC3_CMD 0x170f9
+ MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x100f9
+ MX6SX_PAD_SD3_DATA0__USDHC3_DATA0 0x170f9
+ MX6SX_PAD_SD3_DATA1__USDHC3_DATA1 0x170f9
+ MX6SX_PAD_SD3_DATA2__USDHC3_DATA2 0x170f9
+ MX6SX_PAD_SD3_DATA3__USDHC3_DATA3 0x170f9
+ MX6SX_PAD_SD3_DATA4__USDHC3_DATA4 0x170f9
+ MX6SX_PAD_SD3_DATA5__USDHC3_DATA5 0x170f9
+ MX6SX_PAD_SD3_DATA6__USDHC3_DATA6 0x170f9
+ MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x170f9
+ >;
+ };
+
+ pinctrl_usdhc4: usdhc4grp {
+ fsl,pins = <
+ MX6SX_PAD_SD4_CMD__USDHC4_CMD 0x17059
+ MX6SX_PAD_SD4_CLK__USDHC4_CLK 0x10059
+ MX6SX_PAD_SD4_DATA0__USDHC4_DATA0 0x17059
+ MX6SX_PAD_SD4_DATA1__USDHC4_DATA1 0x17059
+ MX6SX_PAD_SD4_DATA2__USDHC4_DATA2 0x17059
+ MX6SX_PAD_SD4_DATA3__USDHC4_DATA3 0x17059
+ MX6SX_PAD_SD4_DATA7__GPIO6_IO_21 0x17059 /* CD */
+ MX6SX_PAD_SD4_DATA6__GPIO6_IO_20 0x17059 /* WP */
+ >;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 7a24fee1e7ae..708175d59b9c 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -88,6 +88,7 @@
interrupt-controller;
reg = <0x00a01000 0x1000>,
<0x00a00100 0x100>;
+ interrupt-parent = <&intc>;
};
clocks {
@@ -131,7 +132,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
- interrupt-parent = <&intc>;
+ interrupt-parent = <&gpc>;
ranges;
pmu {
@@ -666,7 +667,7 @@
#size-cells = <1>;
ranges = <0 0x020cc000 0x4000>;
- snvs-rtc-lp@34 {
+ snvs_rtc: snvs-rtc-lp@34 {
compatible = "fsl,sec-v4.0-mon-rtc-lp";
reg = <0x34 0x58>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
@@ -700,7 +701,10 @@
gpc: gpc@020dc000 {
compatible = "fsl,imx6sx-gpc", "fsl,imx6q-gpc";
reg = <0x020dc000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&intc>;
};
iomuxc: iomuxc@020e0000 {
@@ -763,6 +767,7 @@
fsl,usbmisc = <&usbmisc 2>;
phy_type = "hsic";
fsl,anatop = <&anatop>;
+ dr_mode = "host";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/kirkwood-nas2big.dts b/arch/arm/boot/dts/kirkwood-nas2big.dts
new file mode 100644
index 000000000000..7427ec50b829
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-nas2big.dts
@@ -0,0 +1,143 @@
+/*
+ * Device Tree file for LaCie 2Big NAS
+ *
+ * Copyright (C) 2015 Seagate
+ *
+ * Author: Simon Guinot <simon.guinot@sequanux.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+*/
+
+/dts-v1/;
+
+#include "kirkwood-netxbig.dtsi"
+
+/ {
+ model = "LaCie 2Big NAS";
+ compatible = "lacie,nas2big", "lacie,netxbig", "marvell,kirkwood-88f6282", "marvell,kirkwood";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8";
+ stdout-path = &uart0;
+ };
+
+ mbus {
+ pcie-controller {
+ status = "okay";
+
+ pcie@1,0 {
+ status = "okay";
+ };
+ };
+ };
+
+ ocp@f1000000 {
+ rtc@10300 {
+ /* The on-chip RTC is not powered (no supercap). */
+ status = "disabled";
+ };
+ spi@10600 {
+ /*
+ * A NAND flash is used instead of an SPI flash for
+ * the other netxbig-compatible boards.
+ */
+ status = "disabled";
+ };
+ };
+
+ fan {
+ /*
+ * An I2C fan controller (GMT G762) is used but alarm is
+ * wired to a separate GPIO.
+ */
+ compatible = "gpio-fan";
+ alarm-gpios = <&gpio0 25 GPIO_ACTIVE_LOW>;
+ };
+
+ regulators: regulators {
+ status = "okay";
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+
+ regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+ regulator-name = "hdd1power";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio0 17 GPIO_ACTIVE_HIGH>;
+ };
+ clocks {
+ g762_clk: g762-oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+ };
+ };
+};
+
+&mdio {
+ status = "okay";
+
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+
+ /*
+ * An external I2C RTC (Dallas DS1337S+) is used. This allows
+ * to power-up the board on an RTC alarm. The external RTC can
+ * be kept powered, even when the SoC is off.
+ */
+ rtc@68 {
+ compatible = "dallas,ds1307";
+ reg = <0x68>;
+ interrupts = <43>;
+ };
+ g762@3e {
+ compatible = "gmt,g762";
+ reg = <0x3e>;
+ clocks = <&g762_clk>;
+ };
+};
+
+&nand {
+ chip-delay = <50>;
+ status = "okay";
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0x0 0x100000>;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x100000 0x1000000>;
+ };
+
+ partition@1100000 {
+ label = "root";
+ reg = <0x1100000 0x8000000>;
+ };
+
+ partition@9100000 {
+ label = "unused";
+ reg = <0x9100000 0x6f00000>;
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-net2big.dts b/arch/arm/boot/dts/kirkwood-net2big.dts
index 53dc37a3b687..13a44773b6df 100644
--- a/arch/arm/boot/dts/kirkwood-net2big.dts
+++ b/arch/arm/boot/dts/kirkwood-net2big.dts
@@ -27,6 +27,11 @@
device_type = "memory";
reg = <0x00000000 0x10000000>;
};
+
+ fan {
+ compatible = "gpio-fan";
+ alarm-gpios = <&gpio0 25 GPIO_ACTIVE_LOW>;
+ };
};
&regulators {
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index b67ede515bcd..548441384d2a 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -150,5 +150,25 @@
interrupts = <0 15 1>;
status = "disabled";
};
+
+ spifc: spi@c1108c80 {
+ compatible = "amlogic,meson6-spifc";
+ reg = <0xc1108c80 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk81>;
+ status = "disabled";
+ };
+
+ ethmac: ethernet@c9410000 {
+ compatible = "amlogic,meson6-dwmac", "snps,dwmac";
+ reg = <0xc9410000 0x10000
+ 0xc1108108 0x4>;
+ interrupts = <0 8 1>;
+ interrupt-names = "macirq";
+ clocks = <&clk81>;
+ clock-names = "stmmaceth";
+ status = "disabled";
+ };
};
}; /* end of / */
diff --git a/arch/arm/boot/dts/meson6-atv1200.dts b/arch/arm/boot/dts/meson6-atv1200.dts
index d7d351a68944..1237faa63ce6 100644
--- a/arch/arm/boot/dts/meson6-atv1200.dts
+++ b/arch/arm/boot/dts/meson6-atv1200.dts
@@ -64,3 +64,7 @@
&uart_AO {
status = "okay";
};
+
+&ethmac {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/meson8-minix-neo-x8.dts b/arch/arm/boot/dts/meson8-minix-neo-x8.dts
new file mode 100644
index 000000000000..4f536bb1f002
--- /dev/null
+++ b/arch/arm/boot/dts/meson8-minix-neo-x8.dts
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include "meson8.dtsi"
+
+/ {
+ model = "MINIX NEO-X8";
+ compatible = "minix,neo-x8", "amlogic,meson8";
+
+ aliases {
+ serial0 = &uart_AO;
+ };
+
+ memory {
+ reg = <0x40000000 0x80000000>;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ blue {
+ label = "x8:blue:power";
+ gpios = <&gpio_ao GPIO_TEST_N GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&i2c_AO {
+ status = "okay";
+ pinctrl-0 = <&i2c_ao_pins>;
+ pinctrl-names = "default";
+
+ pmic@32 {
+ compatible = "ricoh,rn5t618";
+ reg = <0x32>;
+
+ regulators {
+ };
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+};
+
+&spifc {
+ status = "okay";
+ pinctrl-0 = <&spi_nor_pins>;
+ pinctrl-names = "default";
+
+ spi-flash@0 {
+ compatible = "mxicy,mx25l1606e";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+ spi-max-frequency = <30000000>;
+
+ partition@0 {
+ label = "boot";
+ reg = <0x0 0x100000>;
+ };
+
+ partition@100000 {
+ label = "env";
+ reg = <0x100000 0x10000>;
+ };
+ };
+};
+
+&ir_receiver {
+ status = "okay";
+ pinctrl-0 = <&ir_recv_pins>;
+ pinctrl-names = "default";
+};
+
+&ethmac {
+ status = "okay";
+ pinctrl-0 = <&eth_pins>;
+ pnictrl-names = "default";
+};
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index 1f442a7fe03b..a2ddcb8c545a 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -43,6 +43,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <dt-bindings/gpio/meson8-gpio.h>
/include/ "meson.dtsi"
/ {
@@ -89,4 +90,71 @@
compatible = "fixed-clock";
clock-frequency = <141666666>;
};
+
+ pinctrl: pinctrl@c1109880 {
+ compatible = "amlogic,meson8-pinctrl";
+ reg = <0xc1109880 0x10>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gpio: banks@c11080b0 {
+ reg = <0xc11080b0 0x28>,
+ <0xc11080e8 0x18>,
+ <0xc1108120 0x18>,
+ <0xc1108030 0x30>;
+ reg-names = "mux", "pull", "pull-enable", "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio_ao: ao-bank@c1108030 {
+ reg = <0xc8100014 0x4>,
+ <0xc810002c 0x4>,
+ <0xc8100024 0x8>;
+ reg-names = "mux", "pull", "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ uart_ao_a_pins: uart_ao_a {
+ mux {
+ groups = "uart_tx_ao_a", "uart_rx_ao_a";
+ function = "uart_ao";
+ };
+ };
+
+ i2c_ao_pins: i2c_mst_ao {
+ mux {
+ groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
+ function = "i2c_mst_ao";
+ };
+ };
+
+ spi_nor_pins: nor {
+ mux {
+ groups = "nor_d", "nor_q", "nor_c", "nor_cs";
+ function = "nor";
+ };
+ };
+
+ ir_recv_pins: remote {
+ mux {
+ groups = "remote_input";
+ function = "remote";
+ };
+ };
+
+ eth_pins: ethernet {
+ mux {
+ groups = "eth_tx_clk_50m", "eth_tx_en",
+ "eth_txd1", "eth_txd0",
+ "eth_rx_clk_in", "eth_rx_dv",
+ "eth_rxd1", "eth_rxd0", "eth_mdio",
+ "eth_mdc";
+ function = "ethernet";
+ };
+ };
+ };
+
}; /* end of / */
diff --git a/arch/arm/boot/dts/mt6589.dtsi b/arch/arm/boot/dts/mt6589.dtsi
index 106b61b10030..88b3cb128698 100644
--- a/arch/arm/boot/dts/mt6589.dtsi
+++ b/arch/arm/boot/dts/mt6589.dtsi
@@ -138,5 +138,10 @@
clocks = <&uart_clk>;
status = "disabled";
};
+
+ wdt: watchdog@010000000 {
+ compatible = "mediatek,mt6589-wdt";
+ reg = <0x10000000 0x44>;
+ };
};
};
diff --git a/arch/arm/boot/dts/nspire-classic.dtsi b/arch/arm/boot/dts/nspire-classic.dtsi
index 9565199bce7a..4907c5085d4b 100644
--- a/arch/arm/boot/dts/nspire-classic.dtsi
+++ b/arch/arm/boot/dts/nspire-classic.dtsi
@@ -51,6 +51,11 @@
compatible = "lsi,nspire-classic-ahb-divider";
};
+
+&vbus_reg {
+ gpio = <&gpio 5 0>;
+};
+
/ {
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/nspire-cx.dts b/arch/arm/boot/dts/nspire-cx.dts
index 375b924f60d8..08e0b81b3385 100644
--- a/arch/arm/boot/dts/nspire-cx.dts
+++ b/arch/arm/boot/dts/nspire-cx.dts
@@ -69,6 +69,10 @@
0x0709001d 0x070a0033 >;
};
+&vbus_reg {
+ gpio = <&gpio 2 0>;
+};
+
/ {
model = "TI-NSPIRE CX";
compatible = "ti,nspire-cx";
diff --git a/arch/arm/boot/dts/nspire.dtsi b/arch/arm/boot/dts/nspire.dtsi
index a22ffe633b49..390c91aea16d 100644
--- a/arch/arm/boot/dts/nspire.dtsi
+++ b/arch/arm/boot/dts/nspire.dtsi
@@ -54,6 +54,20 @@
clocks = <&ahb_clk>;
};
+ usb_phy: usb_phy {
+ compatible = "usb-nop-xceiv";
+ };
+
+ vbus_reg: vbus_reg {
+ compatible = "regulator-fixed";
+
+ regulator-name = "USB VBUS output";
+ regulator-type = "voltage";
+
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
ahb {
compatible = "simple-bus";
#address-cells = <1>;
@@ -65,8 +79,12 @@
};
usb0: usb@B0000000 {
+ compatible = "lsi,zevio-usb";
reg = <0xB0000000 0x1000>;
interrupts = <8>;
+
+ usb-phy = <&usb_phy>;
+ vbus-supply = <&vbus_reg>;
};
usb1: usb@B4000000 {
@@ -105,8 +123,11 @@
ranges;
gpio: gpio@90000000 {
+ compatible = "lsi,zevio-gpio";
reg = <0x90000000 0x1000>;
interrupts = <7>;
+ gpio-controller;
+ #gpio-cells = <2>;
};
fast_timer: timer@90010000 {
diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi
index e2b2e93d7b61..5b9a376cc31e 100644
--- a/arch/arm/boot/dts/omap2420.dtsi
+++ b/arch/arm/boot/dts/omap2420.dtsi
@@ -14,47 +14,65 @@
compatible = "ti,omap2420", "ti,omap2";
ocp {
- prcm: prcm@48008000 {
- compatible = "ti,omap2-prcm";
- reg = <0x48008000 0x1000>;
+ l4: l4@48000000 {
+ compatible = "ti,omap2-l4", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x48000000 0x100000>;
- prcm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
+ prcm: prcm@8000 {
+ compatible = "ti,omap2-prcm";
+ reg = <0x8000 0x1000>;
- prcm_clockdomains: clockdomains {
- };
- };
+ prcm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
- scrm: scrm@48000000 {
- compatible = "ti,omap2-scrm";
- reg = <0x48000000 0x1000>;
+ prcm_clockdomains: clockdomains {
+ };
+ };
- scrm_clocks: clocks {
+ scm: scm@0 {
+ compatible = "ti,omap2-scm", "simple-bus";
+ reg = <0x0 0x1000>;
#address-cells = <1>;
- #size-cells = <0>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x1000>;
+
+ omap2420_pmx: pinmux@30 {
+ compatible = "ti,omap2420-padconf",
+ "pinctrl-single";
+ reg = <0x30 0x0113>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-single,register-width = <8>;
+ pinctrl-single,function-mask = <0x3f>;
+ };
+
+ scm_conf: scm_conf@270 {
+ compatible = "syscon";
+ reg = <0x270 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ scm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ scm_clockdomains: clockdomains {
+ };
};
- scrm_clockdomains: clockdomains {
+ counter32k: counter@4000 {
+ compatible = "ti,omap-counter32k";
+ reg = <0x4000 0x20>;
+ ti,hwmods = "counter_32k";
};
};
- counter32k: counter@48004000 {
- compatible = "ti,omap-counter32k";
- reg = <0x48004000 0x20>;
- ti,hwmods = "counter_32k";
- };
-
- omap2420_pmx: pinmux@48000030 {
- compatible = "ti,omap2420-padconf", "pinctrl-single";
- reg = <0x48000030 0x0113>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-single,register-width = <8>;
- pinctrl-single,function-mask = <0x3f>;
- };
-
gpio1: gpio@48018000 {
compatible = "ti,omap2-gpio";
reg = <0x48018000 0x200>;
diff --git a/arch/arm/boot/dts/omap2430-clocks.dtsi b/arch/arm/boot/dts/omap2430-clocks.dtsi
index 805f75df1cf2..93fed68839b9 100644
--- a/arch/arm/boot/dts/omap2430-clocks.dtsi
+++ b/arch/arm/boot/dts/omap2430-clocks.dtsi
@@ -8,12 +8,12 @@
* published by the Free Software Foundation.
*/
-&scrm_clocks {
+&scm_clocks {
mcbsp3_mux_fck: mcbsp3_mux_fck {
#clock-cells = <0>;
compatible = "ti,composite-mux-clock";
clocks = <&func_96m_ck>, <&mcbsp_clks>;
- reg = <0x02e8>;
+ reg = <0x78>;
};
mcbsp3_fck: mcbsp3_fck {
@@ -27,7 +27,7 @@
compatible = "ti,composite-mux-clock";
clocks = <&func_96m_ck>, <&mcbsp_clks>;
ti,bit-shift = <2>;
- reg = <0x02e8>;
+ reg = <0x78>;
};
mcbsp4_fck: mcbsp4_fck {
@@ -41,7 +41,7 @@
compatible = "ti,composite-mux-clock";
clocks = <&func_96m_ck>, <&mcbsp_clks>;
ti,bit-shift = <4>;
- reg = <0x02e8>;
+ reg = <0x78>;
};
mcbsp5_fck: mcbsp5_fck {
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 0dc8de2782b1..11a7963be003 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -14,60 +14,73 @@
compatible = "ti,omap2430", "ti,omap2";
ocp {
- prcm: prcm@49006000 {
- compatible = "ti,omap2-prcm";
- reg = <0x49006000 0x1000>;
+ l4_wkup: l4_wkup@49000000 {
+ compatible = "ti,omap2-l4-wkup", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x49000000 0x31000>;
- prcm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
+ prcm: prcm@6000 {
+ compatible = "ti,omap2-prcm";
+ reg = <0x6000 0x1000>;
- prcm_clockdomains: clockdomains {
- };
- };
-
- scrm: scrm@49002000 {
- compatible = "ti,omap2-scrm";
- reg = <0x49002000 0x1000>;
+ prcm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
- scrm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
+ prcm_clockdomains: clockdomains {
+ };
};
- scrm_clockdomains: clockdomains {
+ scm: scm@2000 {
+ compatible = "ti,omap2-scm", "simple-bus";
+ reg = <0x2000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x2000 0x1000>;
+
+ omap2430_pmx: pinmux@30 {
+ compatible = "ti,omap2430-padconf",
+ "pinctrl-single";
+ reg = <0x30 0x0154>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-single,register-width = <8>;
+ pinctrl-single,function-mask = <0x3f>;
+ };
+
+ scm_conf: scm_conf@270 {
+ compatible = "syscon";
+ reg = <0x270 0x240>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ scm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ pbias_regulator: pbias_regulator {
+ compatible = "ti,pbias-omap";
+ reg = <0x230 0x4>;
+ syscon = <&scm_conf>;
+ pbias_mmc_reg: pbias_mmc_omap2430 {
+ regulator-name = "pbias_mmc_omap2430";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ };
+ };
+
+ scm_clockdomains: clockdomains {
+ };
};
- };
-
- counter32k: counter@49020000 {
- compatible = "ti,omap-counter32k";
- reg = <0x49020000 0x20>;
- ti,hwmods = "counter_32k";
- };
-
- omap2430_pmx: pinmux@49002030 {
- compatible = "ti,omap2430-padconf", "pinctrl-single";
- reg = <0x49002030 0x0154>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-single,register-width = <8>;
- pinctrl-single,function-mask = <0x3f>;
- };
-
- omap2_scm_general: tisyscon@49002270 {
- compatible = "syscon";
- reg = <0x49002270 0x240>;
- };
- pbias_regulator: pbias_regulator {
- compatible = "ti,pbias-omap";
- reg = <0x230 0x4>;
- syscon = <&omap2_scm_general>;
- pbias_mmc_reg: pbias_mmc_omap2430 {
- regulator-name = "pbias_mmc_omap2430";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3000000>;
+ counter32k: counter@20000 {
+ compatible = "ti,omap-counter32k";
+ reg = <0x20000 0x20>;
+ ti,hwmods = "counter_32k";
};
};
diff --git a/arch/arm/boot/dts/omap24xx-clocks.dtsi b/arch/arm/boot/dts/omap24xx-clocks.dtsi
index a1365ca926eb..63965b876973 100644
--- a/arch/arm/boot/dts/omap24xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap24xx-clocks.dtsi
@@ -7,13 +7,13 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-&scrm_clocks {
+&scm_clocks {
mcbsp1_mux_fck: mcbsp1_mux_fck {
#clock-cells = <0>;
compatible = "ti,composite-mux-clock";
clocks = <&func_96m_ck>, <&mcbsp_clks>;
ti,bit-shift = <2>;
- reg = <0x0274>;
+ reg = <0x4>;
};
mcbsp1_fck: mcbsp1_fck {
@@ -27,7 +27,7 @@
compatible = "ti,composite-mux-clock";
clocks = <&func_96m_ck>, <&mcbsp_clks>;
ti,bit-shift = <6>;
- reg = <0x0274>;
+ reg = <0x4>;
};
mcbsp2_fck: mcbsp2_fck {
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 25f7b0a22114..7c4dca122a91 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -60,7 +60,6 @@
ti,model = "omap3beagle";
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
};
gpio_keys {
@@ -150,7 +149,6 @@
compatible = "arm,coresight-etb10", "arm,primecell";
reg = <0x5401b000 0x1000>;
- coresight-default-sink;
clocks = <&emu_src_ck>;
clock-names = "apb_pclk";
port {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index c792391ef090..a5474113cd50 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -71,7 +71,6 @@
ti,model = "omap3beagle";
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
};
gpio_keys {
@@ -145,7 +144,6 @@
compatible = "arm,coresight-etb10", "arm,primecell";
reg = <0x5401b000 0x1000>;
- coresight-default-sink;
clocks = <&emu_src_ck>;
clock-names = "apb_pclk";
port {
@@ -379,3 +377,55 @@
};
};
};
+
+&gpmc {
+ status = "ok";
+ ranges = <0 0 0x30000000 0x1000000>; /* CS0 space, 16MB */
+
+ /* Chip select 0 */
+ nand@0,0 {
+ reg = <0 0 4>; /* NAND I/O window, 4 bytes */
+ interrupts = <20>;
+ ti,nand-ecc-opt = "ham1";
+ nand-bus-width = <16>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ gpmc,device-width = <2>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <36>;
+ gpmc,cs-wr-off-ns = <36>;
+ gpmc,adv-on-ns = <6>;
+ gpmc,adv-rd-off-ns = <24>;
+ gpmc,adv-wr-off-ns = <36>;
+ gpmc,oe-on-ns = <6>;
+ gpmc,oe-off-ns = <48>;
+ gpmc,we-on-ns = <6>;
+ gpmc,we-off-ns = <30>;
+ gpmc,rd-cycle-ns = <72>;
+ gpmc,wr-cycle-ns = <72>;
+ gpmc,access-ns = <54>;
+ gpmc,wr-access-ns = <30>;
+
+ partition@0 {
+ label = "X-Loader";
+ reg = <0 0x80000>;
+ };
+ partition@80000 {
+ label = "U-Boot";
+ reg = <0x80000 0x1e0000>;
+ };
+ partition@1c0000 {
+ label = "U-Boot Env";
+ reg = <0x260000 0x20000>;
+ };
+ partition@280000 {
+ label = "Kernel";
+ reg = <0x280000 0x400000>;
+ };
+ partition@780000 {
+ label = "Filesystem";
+ reg = <0x680000 0xf980000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-cm-t3517.dts b/arch/arm/boot/dts/omap3-cm-t3517.dts
index 0ab748cf7749..f5b5a1d96cd7 100644
--- a/arch/arm/boot/dts/omap3-cm-t3517.dts
+++ b/arch/arm/boot/dts/omap3-cm-t3517.dts
@@ -133,6 +133,16 @@
non-removable;
bus-width = <4>;
cap-power-off-card;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1271";
+ reg = <2>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; /* gpio 145 */
+ ref-clock-frequency = <38400000>;
+ };
};
&dss {
diff --git a/arch/arm/boot/dts/omap3-cm-t3730.dts b/arch/arm/boot/dts/omap3-cm-t3730.dts
index 46eadb21b5ef..2294f5b0aa10 100644
--- a/arch/arm/boot/dts/omap3-cm-t3730.dts
+++ b/arch/arm/boot/dts/omap3-cm-t3730.dts
@@ -73,6 +73,16 @@
non-removable;
bus-width = <4>;
cap-power-off-card;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1271";
+ reg = <2>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; /* gpio 136 */
+ ref-clock-frequency = <38400000>;
+ };
};
&dss {
diff --git a/arch/arm/boot/dts/omap3-cm-t3x30.dtsi b/arch/arm/boot/dts/omap3-cm-t3x30.dtsi
index d9e92b654f85..046cd7733c4f 100644
--- a/arch/arm/boot/dts/omap3-cm-t3x30.dtsi
+++ b/arch/arm/boot/dts/omap3-cm-t3x30.dtsi
@@ -16,7 +16,6 @@
ti,model = "cm-t35";
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
};
};
diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts
index 169037e5ff53..134d3f27a8ec 100644
--- a/arch/arm/boot/dts/omap3-devkit8000.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000.dts
@@ -48,7 +48,6 @@
ti,model = "devkit8000";
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
ti,audio-routing =
"Ext Spk", "PREDRIVEL",
"Ext Spk", "PREDRIVER",
diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi
index 127f3e7c10c4..346552b94d9f 100644
--- a/arch/arm/boot/dts/omap3-evm-common.dtsi
+++ b/arch/arm/boot/dts/omap3-evm-common.dtsi
@@ -106,6 +106,16 @@
non-removable;
bus-width = <4>;
cap-power-off-card;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1271";
+ reg = <2>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* gpio 149 */
+ ref-clock-frequency = <38400000>;
+ };
};
&twl_gpio {
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index fb3a69604ed5..b9f68817bd6e 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -46,7 +46,6 @@
ti,model = "gta04";
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
};
spi_lcd {
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index 8a63ad2286aa..d5e5cd449b16 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -22,7 +22,6 @@
compatible = "ti,omap-twl4030";
ti,model = "igep2";
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
};
vdd33: regulator-vdd33 {
diff --git a/arch/arm/boot/dts/omap3-igep0020-rev-f.dts b/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
index cc8bd0cd8cf8..72f7cdc091fb 100644
--- a/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
+++ b/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
@@ -42,4 +42,13 @@
vmmc-supply = <&lbep5clwmc_wlen>;
bus-width = <4>;
non-removable;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1835";
+ reg = <2>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; /* gpio 177 */
+ };
};
diff --git a/arch/arm/boot/dts/omap3-igep0030-rev-g.dts b/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
index 9326b282c94a..b899e341874a 100644
--- a/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
+++ b/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
@@ -64,4 +64,13 @@
vmmc-supply = <&lbep5clwmc_wlen>;
bus-width = <4>;
non-removable;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1835";
+ reg = <2>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; /* gpio 136 */
+ };
};
diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
index e81fb651d5d0..e63133304a34 100644
--- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
@@ -38,7 +38,6 @@
ti,model = "lilly-a83x";
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
};
reg_vcc3: vcc3 {
diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts
index 9938b5dc1909..f2e213931e09 100644
--- a/arch/arm/boot/dts/omap3-n9.dts
+++ b/arch/arm/boot/dts/omap3-n9.dts
@@ -16,3 +16,40 @@
model = "Nokia N9";
compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3";
};
+
+&i2c2 {
+ smia_1: camera@10 {
+ compatible = "nokia,smia";
+ reg = <0x10>;
+ /* No reset gpio */
+ vana-supply = <&vaux3>;
+ clocks = <&isp 0>;
+ clock-frequency = <9600000>;
+ nokia,nvm-size = <(16 * 64)>;
+ port {
+ smia_1_1: endpoint {
+ link-frequencies = /bits/ 64 <199200000 210000000 499200000>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ remote-endpoint = <&csi2a_ep>;
+ };
+ };
+ };
+};
+
+&isp {
+ vdd-csiphy1-supply = <&vaux2>;
+ vdd-csiphy2-supply = <&vaux2>;
+ ports {
+ port@2 {
+ reg = <2>;
+ csi2a_ep: endpoint {
+ remote-endpoint = <&smia_1_1>;
+ clock-lanes = <2>;
+ data-lanes = <1 3>;
+ crc = <1>;
+ lane-polarities = <1 1 1>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index db80f9d376fa..5c16145920ea 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -9,9 +9,23 @@
/dts-v1/;
-#include "omap34xx-hs.dtsi"
+#include "omap34xx.dtsi"
#include <dt-bindings/input/input.h>
+/*
+ * Default secure signed bootloader (Nokia X-Loader) does not enable L3 firewall
+ * for omap AES HW crypto support. When linux kernel try to access memory of AES
+ * blocks then kernel receive "Unhandled fault: external abort on non-linefetch"
+ * and crash. Until somebody fix omap-aes.c and omap_hwmod_3xxx_data.c code (no
+ * crash anymore) omap AES support will be disabled for all Nokia N900 devices.
+ * There is "unofficial" version of bootloader which enables AES in L3 firewall
+ * but it is not widely used and to prevent kernel crash rather AES is disabled.
+ * There is also no runtime detection code if AES is disabled in L3 firewall...
+ */
+&aes {
+ status = "disabled";
+};
+
/ {
model = "Nokia N900";
compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3";
@@ -484,6 +498,8 @@
DRVDD-supply = <&vmmc2>;
IOVDD-supply = <&vio>;
DVDD-supply = <&vio>;
+
+ ai3x-micbias-vg = <1>;
};
tlv320aic3x_aux: tlv320aic3x@19 {
@@ -495,6 +511,8 @@
DRVDD-supply = <&vmmc2>;
IOVDD-supply = <&vio>;
DVDD-supply = <&vio>;
+
+ ai3x-micbias-vg = <2>;
};
tsl2563: tsl2563@29 {
@@ -609,6 +627,58 @@
pinctrl-0 = <&i2c3_pins>;
clock-frequency = <400000>;
+
+ lis302dl: lis3lv02d@1d {
+ compatible = "st,lis3lv02d";
+ reg = <0x1d>;
+
+ Vdd-supply = <&vaux1>;
+ Vdd_IO-supply = <&vio>;
+
+ interrupt-parent = <&gpio6>;
+ interrupts = <21 20>; /* 181 and 180 */
+
+ /* click flags */
+ st,click-single-x;
+ st,click-single-y;
+ st,click-single-z;
+
+ /* Limits are 0.5g * value */
+ st,click-threshold-x = <8>;
+ st,click-threshold-y = <8>;
+ st,click-threshold-z = <10>;
+
+ /* Click must be longer than time limit */
+ st,click-time-limit = <9>;
+
+ /* Kind of debounce filter */
+ st,click-latency = <50>;
+
+ /* Interrupt line 2 for click detection */
+ st,irq2-click;
+
+ st,wakeup-x-hi;
+ st,wakeup-y-hi;
+ st,wakeup-threshold = <(800/18)>; /* millig-value / 18 to get HW values */
+
+ st,wakeup2-z-hi;
+ st,wakeup2-threshold = <(900/18)>; /* millig-value / 18 to get HW values */
+
+ st,hipass1-disable;
+ st,hipass2-disable;
+
+ st,axis-x = <1>; /* LIS3_DEV_X */
+ st,axis-y = <(-2)>; /* LIS3_INV_DEV_Y */
+ st,axis-z = <(-3)>; /* LIS3_INV_DEV_Z */
+
+ st,min-limit-x = <(-32)>;
+ st,min-limit-y = <3>;
+ st,min-limit-z = <3>;
+
+ st,max-limit-x = <(-3)>;
+ st,max-limit-y = <32>;
+ st,max-limit-z = <32>;
+ };
};
&mmc1 {
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index c41db94ee9c2..800b379d368d 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*/
-#include "omap36xx-hs.dtsi"
+#include "omap36xx.dtsi"
/ {
cpus {
diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts
index 261c5589bfa3..0885b34d5d7d 100644
--- a/arch/arm/boot/dts/omap3-n950.dts
+++ b/arch/arm/boot/dts/omap3-n950.dts
@@ -16,3 +16,40 @@
model = "Nokia N950";
compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
};
+
+&i2c2 {
+ smia_1: camera@10 {
+ compatible = "nokia,smia";
+ reg = <0x10>;
+ /* No reset gpio */
+ vana-supply = <&vaux3>;
+ clocks = <&isp 0>;
+ clock-frequency = <9600000>;
+ nokia,nvm-size = <(16 * 64)>;
+ port {
+ smia_1_1: endpoint {
+ link-frequencies = /bits/ 64 <210000000 333600000 398400000>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ remote-endpoint = <&csi2a_ep>;
+ };
+ };
+ };
+};
+
+&isp {
+ vdd-csiphy1-supply = <&vaux2>;
+ vdd-csiphy2-supply = <&vaux2>;
+ ports {
+ port@2 {
+ reg = <2>;
+ csi2a_ep: endpoint {
+ remote-endpoint = <&smia_1_1>;
+ clock-lanes = <2>;
+ data-lanes = <3 1>;
+ crc = <1>;
+ lane-polarities = <1 1 1>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi
index d36bf0250a05..18e1649681c1 100644
--- a/arch/arm/boot/dts/omap3-overo-base.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-base.dtsi
@@ -27,7 +27,6 @@
ti,model = "overo";
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
};
/* HS USB Port 2 Power */
diff --git a/arch/arm/boot/dts/omap3-pandora-1ghz.dts b/arch/arm/boot/dts/omap3-pandora-1ghz.dts
new file mode 100644
index 000000000000..9619a28dfd7d
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-pandora-1ghz.dts
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2015
+ * Nikolaus Schaller <hns@goldelico.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * device tree for OpenPandora 1GHz with DM3730
+ */
+
+/dts-v1/;
+
+#include "omap36xx.dtsi"
+#include "omap3-pandora-common.dtsi"
+
+/ {
+ model = "Pandora Handheld Console 1GHz";
+
+ compatible = "ti,omap36xx", "ti,omap3";
+};
+
+&omap3_pmx_core2 {
+
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &hsusb2_2_pins
+ &control_pins
+ >;
+
+ hsusb2_2_pins: pinmux_hsusb2_2_pins {
+ pinctrl-single,pins = <
+ OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
+ OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
+ OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
+ OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
+ OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
+ OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
+ >;
+ };
+
+ mmc3_pins: pinmux_mmc3_pins {
+ pinctrl-single,pins = <
+ OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */
+ OMAP3630_CORE2_IOPAD(0x25da, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_ctl.sdmmc3_cmd */
+ OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */
+ OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */
+ OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
+ OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */
+ >;
+ };
+
+ control_pins: pinmux_control_pins {
+ pinctrl-single,pins = <
+ OMAP3630_CORE2_IOPAD(0x25dc, PIN_INPUT_PULLDOWN | MUX_MODE4) /* etk_d0.gpio_14 = HP_SHUTDOWN */
+ OMAP3630_CORE2_IOPAD(0x25de, PIN_OUTPUT | MUX_MODE4) /* etk_d1.gpio_15 = BT_SHUTDOWN */
+ OMAP3630_CORE2_IOPAD(0x25e0, PIN_OUTPUT | MUX_MODE4) /* etk_d2.gpio_16 = RESET_USB_HOST */
+ OMAP3630_CORE2_IOPAD(0x25ea, PIN_INPUT | MUX_MODE4) /* etk_d7.gpio_21 = WIFI IRQ */
+ OMAP3630_CORE2_IOPAD(0x25ec, PIN_OUTPUT | MUX_MODE4) /* etk_d8.gpio_22 = MSECURE */
+ OMAP3630_CORE2_IOPAD(0x25ee, PIN_OUTPUT | MUX_MODE4) /* etk_d9.gpio_23 = WIFI_POWER */
+ OMAP3_WKUP_IOPAD(0x2a54, PIN_INPUT | MUX_MODE4) /* reserved.gpio_127 = MMC2_WP */
+ OMAP3_WKUP_IOPAD(0x2a56, PIN_INPUT | MUX_MODE4) /* reserved.gpio_126 = MMC1_WP */
+ OMAP3_WKUP_IOPAD(0x2a58, PIN_OUTPUT | MUX_MODE4) /* reserved.gpio_128 = LED_MMC1 */
+ OMAP3_WKUP_IOPAD(0x2a5a, PIN_OUTPUT | MUX_MODE4) /* reserved.gpio_129 = LED_MMC2 */
+
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-pandora-600mhz.dts b/arch/arm/boot/dts/omap3-pandora-600mhz.dts
new file mode 100644
index 000000000000..fb803a70a2bb
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-pandora-600mhz.dts
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015
+ * Nikolaus Schaller <hns@goldelico.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * device tree for OpenPandora with OMAP3530
+ */
+
+/dts-v1/;
+
+#include "omap34xx.dtsi"
+#include "omap3-pandora-common.dtsi"
+
+/ {
+ model = "Pandora Handheld Console";
+
+ compatible = "ti,omap3";
+};
+
+&omap3_pmx_core2 {
+
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &hsusb2_2_pins
+ &control_pins
+ >;
+
+ hsusb2_2_pins: pinmux_hsusb2_2_pins {
+ pinctrl-single,pins = <
+ OMAP3430_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
+ OMAP3430_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
+ OMAP3430_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
+ OMAP3430_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
+ OMAP3430_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
+ OMAP3430_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
+ >;
+ };
+
+ mmc3_pins: pinmux_mmc3_pins {
+ pinctrl-single,pins = <
+ OMAP3430_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */
+ OMAP3430_CORE2_IOPAD(0x25da, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_ctl.sdmmc3_cmd */
+ OMAP3430_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */
+ OMAP3430_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */
+ OMAP3430_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
+ OMAP3430_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */
+ >;
+ };
+
+ control_pins: pinmux_control_pins {
+ pinctrl-single,pins = <
+ OMAP3430_CORE2_IOPAD(0x25dc, PIN_INPUT_PULLDOWN | MUX_MODE4) /* etk_d0.gpio_14 = HP_SHUTDOWN */
+ OMAP3430_CORE2_IOPAD(0x25de, PIN_OUTPUT | MUX_MODE4) /* etk_d1.gpio_15 = BT_SHUTDOWN */
+ OMAP3430_CORE2_IOPAD(0x25e0, PIN_OUTPUT | MUX_MODE4) /* etk_d2.gpio_16 = RESET_USB_HOST */
+ OMAP3430_CORE2_IOPAD(0x25ea, PIN_INPUT | MUX_MODE4) /* etk_d7.gpio_21 = WIFI IRQ */
+ OMAP3430_CORE2_IOPAD(0x25ec, PIN_OUTPUT | MUX_MODE4) /* etk_d8.gpio_22 = MSECURE */
+ OMAP3430_CORE2_IOPAD(0x25ee, PIN_OUTPUT | MUX_MODE4) /* etk_d9.gpio_23 = WIFI_POWER */
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
new file mode 100644
index 000000000000..782ab1ff1d08
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -0,0 +1,640 @@
+/*
+ * Copyright (C) 2015
+ * Nikolaus Schaller <hns@goldelico.com>
+ *
+ * Common device tree include for OpenPandora devices.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/input/input.h>
+
+/ {
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&vcc>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512 MB */
+ };
+
+ aliases {
+ display0 = &lcd;
+ };
+
+ tv: connector@1 {
+ compatible = "connector-analog-tv";
+ label = "tv";
+
+ port {
+ tv_connector_in: endpoint {
+ remote-endpoint = <&venc_out>;
+ };
+ };
+ };
+
+ gpio-leds {
+
+ compatible = "gpio-leds";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ led@1 {
+ label = "pandora::sd1";
+ gpios = <&gpio5 0 GPIO_ACTIVE_HIGH>; /* GPIO_128 */
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ led@2 {
+ label = "pandora::sd2";
+ gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>; /* GPIO_129 */
+ linux,default-trigger = "mmc1";
+ default-state = "off";
+ };
+
+ led@3 {
+ label = "pandora::bluetooth";
+ gpios = <&gpio5 30 GPIO_ACTIVE_HIGH>; /* GPIO_158 */
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led@4 {
+ label = "pandora::wifi";
+ gpios = <&gpio5 31 GPIO_ACTIVE_HIGH>; /* GPIO_159 */
+ linux,default-trigger = "mmc2";
+ default-state = "off";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&button_pins>;
+
+ up-button {
+ label = "up";
+ linux,code = <KEY_UP>;
+ gpios = <&gpio4 14 GPIO_ACTIVE_LOW>; /* GPIO_110 */
+ gpio-key,wakeup;
+ };
+
+ down-button {
+ label = "down";
+ linux,code = <KEY_DOWN>;
+ gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; /* GPIO_103 */
+ gpio-key,wakeup;
+ };
+
+ left-button {
+ label = "left";
+ linux,code = <KEY_LEFT>;
+ gpios = <&gpio4 0 GPIO_ACTIVE_LOW>; /* GPIO_96 */
+ gpio-key,wakeup;
+ };
+
+ right-button {
+ label = "right";
+ linux,code = <KEY_RIGHT>;
+ gpios = <&gpio4 2 GPIO_ACTIVE_LOW>; /* GPIO_98 */
+ gpio-key,wakeup;
+ };
+
+ pageup-button {
+ label = "game 1";
+ linux,code = <KEY_PAGEUP>;
+ gpios = <&gpio4 13 GPIO_ACTIVE_LOW>; /* GPIO_109 */
+ gpio-key,wakeup;
+ };
+
+ pagedown-button {
+ label = "game 3";
+ linux,code = <KEY_PAGEDOWN>;
+ gpios = <&gpio4 10 GPIO_ACTIVE_LOW>; /* GPIO_106 */
+ gpio-key,wakeup;
+ };
+
+ home-button {
+ label = "game 4";
+ linux,code = <KEY_HOME>;
+ gpios = <&gpio4 5 GPIO_ACTIVE_LOW>; /* GPIO_101 */
+ gpio-key,wakeup;
+ };
+
+ end-button {
+ label = "game 2";
+ linux,code = <KEY_END>;
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* GPIO_111 */
+ gpio-key,wakeup;
+ };
+
+ right-shift {
+ label = "l";
+ linux,code = <KEY_RIGHTSHIFT>;
+ gpios = <&gpio4 6 GPIO_ACTIVE_LOW>; /* GPIO_102 */
+ gpio-key,wakeup;
+ };
+
+ kp-plus {
+ label = "l2";
+ linux,code = <KEY_KPPLUS>;
+ gpios = <&gpio4 1 GPIO_ACTIVE_LOW>; /* GPIO_97 */
+ gpio-key,wakeup;
+ };
+
+ right-ctrl {
+ label = "r";
+ linux,code = <KEY_RIGHTCTRL>;
+ gpios = <&gpio4 9 GPIO_ACTIVE_LOW>; /* GPIO_105 */
+ gpio-key,wakeup;
+ };
+
+ kp-minus {
+ label = "r2";
+ linux,code = <KEY_KPMINUS>;
+ gpios = <&gpio4 11 GPIO_ACTIVE_LOW>; /* GPIO_107 */
+ gpio-key,wakeup;
+ };
+
+ left-ctrl {
+ label = "ctrl";
+ linux,code = <KEY_LEFTCTRL>;
+ gpios = <&gpio4 8 GPIO_ACTIVE_LOW>; /* GPIO_104 */
+ gpio-key,wakeup;
+ };
+
+ menu {
+ label = "menu";
+ linux,code = <KEY_MENU>;
+ gpios = <&gpio4 3 GPIO_ACTIVE_LOW>; /* GPIO_99 */
+ gpio-key,wakeup;
+ };
+
+ hold {
+ label = "hold";
+ linux,code = <KEY_COFFEE>;
+ gpios = <&gpio6 16 GPIO_ACTIVE_LOW>; /* GPIO_176 */
+ gpio-key,wakeup;
+ };
+
+ left-alt {
+ label = "alt";
+ linux,code = <KEY_LEFTALT>;
+ gpios = <&gpio4 4 GPIO_ACTIVE_HIGH>; /* GPIO_100 */
+ gpio-key,wakeup;
+ };
+
+ lid {
+ label = "lid";
+ linux,code = <0x00>; /* SW_LID lid shut */
+ linux,input-type = <0x05>; /* EV_SW */
+ gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>; /* GPIO_108 */
+ };
+ };
+};
+
+&omap3_pmx_core {
+
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2144, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
+ OMAP3_CORE1_IOPAD(0x2146, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_cmd.sdmmc1_cmd */
+ OMAP3_CORE1_IOPAD(0x2148, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat0.sdmmc1_dat0 */
+ OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
+ OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
+ OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
+ >;
+ };
+
+ mmc2_pins: pinmux_mmc2_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
+ OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
+ OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
+ OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+ OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
+ OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
+ OMAP3_CORE1_IOPAD(0x2164, PIN_OUTPUT_PULLUP | MUX_MODE1) /* sdmmc2_dat4.sdmmc2_dirdat0 */
+ OMAP3_CORE1_IOPAD(0x2166, PIN_OUTPUT_PULLUP | MUX_MODE1) /* sdmmc2_dat5.sdmmc2_dirdat1 */
+ OMAP3_CORE1_IOPAD(0x2168, PIN_OUTPUT_PULLUP | MUX_MODE1) /* sdmmc2_dat6.sdmmc2_dircmd */
+ OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE1) /* sdmmc2_dat7.sdmmc2_clkin */
+ >;
+ };
+
+ dss_dpi_pins: pinmux_dss_dpi_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x20d4, PIN_OUTPUT | MUX_MODE0) /* dss_pclk.dss_pclk */
+ OMAP3_CORE1_IOPAD(0x20d6, PIN_OUTPUT | MUX_MODE0) /* dss_hsync.dss_hsync */
+ OMAP3_CORE1_IOPAD(0x20d8, PIN_OUTPUT | MUX_MODE0) /* dss_vsync.dss_vsync */
+ OMAP3_CORE1_IOPAD(0x20da, PIN_OUTPUT | MUX_MODE0) /* dss_acbias.dss_acbias */
+ OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE0) /* dss_data0.dss_data0 */
+ OMAP3_CORE1_IOPAD(0x20de, PIN_OUTPUT | MUX_MODE0) /* dss_data1.dss_data1 */
+ OMAP3_CORE1_IOPAD(0x20e0, PIN_OUTPUT | MUX_MODE0) /* dss_data2.dss_data2 */
+ OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE0) /* dss_data3.dss_data3 */
+ OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE0) /* dss_data4.dss_data4 */
+ OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE0) /* dss_data5.dss_data5 */
+ OMAP3_CORE1_IOPAD(0x20e8, PIN_OUTPUT | MUX_MODE0) /* dss_data6.dss_data6 */
+ OMAP3_CORE1_IOPAD(0x20ea, PIN_OUTPUT | MUX_MODE0) /* dss_data7.dss_data7 */
+ OMAP3_CORE1_IOPAD(0x20ec, PIN_OUTPUT | MUX_MODE0) /* dss_data8.dss_data8 */
+ OMAP3_CORE1_IOPAD(0x20ee, PIN_OUTPUT | MUX_MODE0) /* dss_data9.dss_data9 */
+ OMAP3_CORE1_IOPAD(0x20f0, PIN_OUTPUT | MUX_MODE0) /* dss_data10.dss_data10 */
+ OMAP3_CORE1_IOPAD(0x20f2, PIN_OUTPUT | MUX_MODE0) /* dss_data11.dss_data11 */
+ OMAP3_CORE1_IOPAD(0x20f4, PIN_OUTPUT | MUX_MODE0) /* dss_data12.dss_data12 */
+ OMAP3_CORE1_IOPAD(0x20f6, PIN_OUTPUT | MUX_MODE0) /* dss_data13.dss_data13 */
+ OMAP3_CORE1_IOPAD(0x20f8, PIN_OUTPUT | MUX_MODE0) /* dss_data14.dss_data14 */
+ OMAP3_CORE1_IOPAD(0x20fa, PIN_OUTPUT | MUX_MODE0) /* dss_data15.dss_data15 */
+ OMAP3_CORE1_IOPAD(0x20fc, PIN_OUTPUT | MUX_MODE0) /* dss_data16.dss_data16 */
+ OMAP3_CORE1_IOPAD(0x20fe, PIN_OUTPUT | MUX_MODE0) /* dss_data17.dss_data17 */
+ OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE0) /* dss_data18.dss_data18 */
+ OMAP3_CORE1_IOPAD(0x2102, PIN_OUTPUT | MUX_MODE0) /* dss_data19.dss_data19 */
+ OMAP3_CORE1_IOPAD(0x2104, PIN_OUTPUT | MUX_MODE0) /* dss_data20.dss_data20 */
+ OMAP3_CORE1_IOPAD(0x2106, PIN_OUTPUT | MUX_MODE0) /* dss_data21.dss_data21 */
+ OMAP3_CORE1_IOPAD(0x2108, PIN_OUTPUT | MUX_MODE0) /* dss_data22.dss_data22 */
+ OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE0) /* dss_data23.dss_data23 */
+ OMAP3_CORE1_IOPAD(0x218e, PIN_OUTPUT | MUX_MODE4) /* GPIO_157 = lcd reset */
+ >;
+ };
+
+ uart3_pins: pinmux_uart3_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | PIN_OFF_WAKEUPENABLE | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+ OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
+ >;
+ };
+
+ led_pins: pinmux_leds_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2154, PIN_OUTPUT | MUX_MODE4) /* GPIO_128 */
+ OMAP3_CORE1_IOPAD(0x2156, PIN_OUTPUT | MUX_MODE4) /* GPIO_129 */
+ OMAP3_CORE1_IOPAD(0x2190, PIN_OUTPUT | MUX_MODE4) /* GPIO_158 */
+ OMAP3_CORE1_IOPAD(0x2192, PIN_OUTPUT | MUX_MODE4) /* GPIO_159 */
+ >;
+ };
+
+ button_pins: pinmux_button_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2110, PIN_INPUT | MUX_MODE4) /* GPIO_96 */
+ OMAP3_CORE1_IOPAD(0x2112, PIN_INPUT | MUX_MODE4) /* GPIO_97 */
+ OMAP3_CORE1_IOPAD(0x2114, PIN_INPUT | MUX_MODE4) /* GPIO_98 */
+ OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE4) /* GPIO_99 */
+ OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE4) /* GPIO_100 */
+ OMAP3_CORE1_IOPAD(0x211a, PIN_INPUT | MUX_MODE4) /* GPIO_101 */
+ OMAP3_CORE1_IOPAD(0x211c, PIN_INPUT | MUX_MODE4) /* GPIO_102 */
+ OMAP3_CORE1_IOPAD(0x211e, PIN_INPUT | MUX_MODE4) /* GPIO_103 */
+ OMAP3_CORE1_IOPAD(0x2120, PIN_INPUT | MUX_MODE4) /* GPIO_104 */
+ OMAP3_CORE1_IOPAD(0x2122, PIN_INPUT | MUX_MODE4) /* GPIO_105 */
+ OMAP3_CORE1_IOPAD(0x2124, PIN_INPUT | MUX_MODE4) /* GPIO_106 */
+ OMAP3_CORE1_IOPAD(0x2126, PIN_INPUT | MUX_MODE4) /* GPIO_107 */
+ OMAP3_CORE1_IOPAD(0x2128, PIN_INPUT | MUX_MODE4) /* GPIO_108 */
+ OMAP3_CORE1_IOPAD(0x212a, PIN_INPUT | MUX_MODE4) /* GPIO_109 */
+ OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT | MUX_MODE4) /* GPIO_110 */
+ OMAP3_CORE1_IOPAD(0x212e, PIN_INPUT | MUX_MODE4) /* GPIO_111 */
+ OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT | MUX_MODE4) /* GPIO_176 */
+ >;
+ };
+
+ penirq_pins: pinmux_penirq_pins {
+ pinctrl-single,pins = <
+ /* here we could enable to wakeup the cpu from suspend by a pen touch */
+ OMAP3_CORE1_IOPAD(0x210c, PIN_INPUT | MUX_MODE4) /* GPIO_94 */
+ >;
+ };
+
+};
+
+&omap3_pmx_core2 {
+ /* define in CPU specific file that includes this one
+ * use either OMAP3430_CORE2_IOPAD() or OMAP3630_CORE2_IOPAD()
+ */
+};
+
+&i2c1 {
+ clock-frequency = <2600000>;
+
+ twl: twl@48 {
+ reg = <0x48>;
+ interrupts = <7>; /* SYS_NIRQ cascaded to intc */
+ interrupt-parent = <&intc>;
+
+ twl_power: power {
+ compatible = "ti,twl4030-power-reset";
+ ti,use_poweroff;
+ };
+
+ twl_audio: audio {
+ compatible = "ti,twl4030-audio";
+
+ codec {
+ ti,ramp_delay_value = <3>;
+ };
+ };
+ };
+};
+
+#include "twl4030.dtsi"
+#include "twl4030_omap3.dtsi"
+
+&twl_keypad {
+ keypad,num-rows = <8>;
+ keypad,num-columns = <6>;
+ linux,keymap = <
+ MATRIX_KEY(0, 0, KEY_9)
+ MATRIX_KEY(0, 1, KEY_8)
+ MATRIX_KEY(0, 2, KEY_I)
+ MATRIX_KEY(0, 3, KEY_J)
+ MATRIX_KEY(0, 4, KEY_N)
+ MATRIX_KEY(0, 5, KEY_M)
+ MATRIX_KEY(1, 0, KEY_0)
+ MATRIX_KEY(1, 1, KEY_7)
+ MATRIX_KEY(1, 2, KEY_U)
+ MATRIX_KEY(1, 3, KEY_H)
+ MATRIX_KEY(1, 4, KEY_B)
+ MATRIX_KEY(1, 5, KEY_SPACE)
+ MATRIX_KEY(2, 0, KEY_BACKSPACE)
+ MATRIX_KEY(2, 1, KEY_6)
+ MATRIX_KEY(2, 2, KEY_Y)
+ MATRIX_KEY(2, 3, KEY_G)
+ MATRIX_KEY(2, 4, KEY_V)
+ MATRIX_KEY(2, 5, KEY_FN)
+ MATRIX_KEY(3, 0, KEY_O)
+ MATRIX_KEY(3, 1, KEY_5)
+ MATRIX_KEY(3, 2, KEY_T)
+ MATRIX_KEY(3, 3, KEY_F)
+ MATRIX_KEY(3, 4, KEY_C)
+ MATRIX_KEY(4, 0, KEY_P)
+ MATRIX_KEY(4, 1, KEY_4)
+ MATRIX_KEY(4, 2, KEY_R)
+ MATRIX_KEY(4, 3, KEY_D)
+ MATRIX_KEY(4, 4, KEY_X)
+ MATRIX_KEY(5, 0, KEY_K)
+ MATRIX_KEY(5, 1, KEY_3)
+ MATRIX_KEY(5, 2, KEY_E)
+ MATRIX_KEY(5, 3, KEY_S)
+ MATRIX_KEY(5, 4, KEY_Z)
+ MATRIX_KEY(6, 0, KEY_L)
+ MATRIX_KEY(6, 1, KEY_2)
+ MATRIX_KEY(6, 2, KEY_W)
+ MATRIX_KEY(6, 3, KEY_A)
+ MATRIX_KEY(6, 4, KEY_RIGHTBRACE)
+ MATRIX_KEY(7, 0, KEY_ENTER)
+ MATRIX_KEY(7, 1, KEY_1)
+ MATRIX_KEY(7, 2, KEY_Q)
+ MATRIX_KEY(7, 3, KEY_LEFTSHIFT)
+ MATRIX_KEY(7, 4, KEY_LEFTBRACE )
+ >;
+};
+
+/* backup battery charger */
+&charger {
+ ti,bb-uvolt = <3200000>;
+ ti,bb-uamp = <150>;
+};
+
+/* MMC2 */
+&vmmc2 {
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <3150000>;
+};
+
+/* LCD */
+&vaux1 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+};
+
+/* USB Host PHY */
+&vaux2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+};
+
+/* available on expansion connector */
+&vaux3 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+};
+
+/* ADS7846 and nubs */
+&vaux4 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+};
+
+/* power audio DAC and LID sensor */
+&vsim {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ /* no clients so we should disable clock */
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+
+ bq27500@55 {
+ compatible = "ti,bq27500";
+ reg = <0x55>;
+ };
+
+};
+
+&usb_otg_hs {
+ interface-type = <0>;
+ usb-phy = <&usb2_phy>;
+ phys = <&usb2_phy>;
+ phy-names = "usb2-phy";
+ mode = <3>;
+ power = <50>;
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ vmmc-supply = <&vmmc1>;
+ bus-width = <4>;
+ cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio4 30 GPIO_ACTIVE_LOW>; /* GPIO_126 */
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+ vmmc-supply = <&vmmc2>;
+ bus-width = <4>;
+ cd-gpios = <&twl_gpio 1 GPIO_ACTIVE_HIGH>;
+ wp-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* GPIO_127 */
+};
+
+/* bluetooth*/
+&uart1 {
+};
+
+/* spare (expansion connector) */
+&uart2 {
+};
+
+/* console (expansion connector) */
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+ interrupts-extended = <&intc 74 &omap3_pmx_core OMAP3_UART3_RX>;
+};
+
+&usbhshost {
+ port2-mode = "ehci-phy";
+};
+
+&gpmc {
+ ranges = <0 0 0x30000000 0x1000000>; /* CS0: 16MB for NAND */
+
+ nand@0,0 {
+ reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+ nand-bus-width = <16>;
+ ti,nand-ecc-opt = "sw";
+
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <44>;
+ gpmc,cs-wr-off-ns = <44>;
+ gpmc,adv-on-ns = <6>;
+ gpmc,adv-rd-off-ns = <34>;
+ gpmc,adv-wr-off-ns = <44>;
+ gpmc,we-off-ns = <40>;
+ gpmc,oe-off-ns = <54>;
+ gpmc,access-ns = <64>;
+ gpmc,rd-cycle-ns = <82>;
+ gpmc,wr-cycle-ns = <82>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
+ gpmc,device-width = <2>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* u-boot uses mtdparts=nand:512k(xloader),1920k(uboot),128k(uboot-env),10m(boot),-(rootfs) */
+
+ x-loader@0 {
+ label = "xloader";
+ reg = <0 0x80000>;
+ };
+
+ bootloaders@80000 {
+ label = "uboot";
+ reg = <0x80000 0x1e0000>;
+ };
+
+ bootloaders_env@260000 {
+ label = "uboot-env";
+ reg = <0x260000 0x20000>;
+ };
+
+ kernel@280000 {
+ label = "boot";
+ reg = <0x280000 0xa00000>;
+ };
+
+ filesystem@680000 {
+ label = "rootfs";
+ reg = <0xc80000 0>; /* 0 = MTDPART_SIZ_FULL */
+ };
+ };
+};
+
+&mcspi1 {
+ tsc2046@0 {
+ reg = <0>; /* CS0 */
+ compatible = "ti,tsc2046";
+ spi-max-frequency = <1000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&penirq_pins>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <30 0>; /* GPIO_94 */
+ pendown-gpio = <&gpio3 30 0>;
+ vcc-supply = <&vaux4>;
+
+ ti,x-min = /bits/ 16 <0>;
+ ti,x-max = /bits/ 16 <8000>;
+ ti,y-min = /bits/ 16 <0>;
+ ti,y-max = /bits/ 16 <4800>;
+ ti,x-plate-ohms = /bits/ 16 <40>;
+ ti,pressure-max = /bits/ 16 <255>;
+
+ linux,wakeup;
+ };
+
+ lcd: lcd@1 {
+ reg = <1>; /* CS1 */
+ compatible = "omapdss,tpo,td043mtea1";
+ spi-max-frequency = <100000>;
+ spi-cpol;
+ spi-cpha;
+
+ label = "lcd";
+ reset-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>; /* GPIO_157 */
+ vcc-supply = <&vaux1>;
+
+ port {
+ lcd_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+ };
+
+
+};
+
+/* n/a - used as GPIOs */
+&mcbsp1 {
+};
+
+/* audio DAC */
+&mcbsp2 {
+};
+
+/* bluetooth */
+&mcbsp3 {
+};
+
+/* to twl4030*/
+&mcbsp4 {
+};
+
+&venc {
+ status = "ok";
+
+ vdda-supply = <&vdac>;
+
+ port {
+ venc_out: endpoint {
+ remote-endpoint = <&tv_connector_in>;
+ ti,channels = <2>;
+ };
+ };
+};
+
+&dss {
+ pinctrl-names = "default";
+ pinctrl-0 = < &dss_dpi_pins >;
+
+ status = "ok";
+ vdds_dsi-supply = <&vpll2>;
+
+ port {
+ dpi_out: endpoint {
+ remote-endpoint = <&lcd_in>;
+ data-lines = <24>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-tao3530.dtsi b/arch/arm/boot/dts/omap3-tao3530.dtsi
index e89820a6776e..7bd8d9a4f67f 100644
--- a/arch/arm/boot/dts/omap3-tao3530.dtsi
+++ b/arch/arm/boot/dts/omap3-tao3530.dtsi
@@ -8,7 +8,16 @@
*/
/dts-v1/;
-#include "omap34xx-hs.dtsi"
+#include "omap34xx.dtsi"
+
+/* Secure omaps have some devices inaccessible depending on the firmware */
+&aes {
+ status = "disabled";
+};
+
+&sham {
+ status = "disabled";
+};
/ {
cpus {
@@ -45,7 +54,6 @@
/* McBSP2 is used for onboard sound, same as on beagle */
ti,mcbsp = <&mcbsp2>;
- ti,codec = <&twl_audio>;
};
/* Regulator to enable/switch the vcc of the Wifi module */
diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts
index 6644f516a42b..131448d86e67 100644
--- a/arch/arm/boot/dts/omap3-zoom3.dts
+++ b/arch/arm/boot/dts/omap3-zoom3.dts
@@ -195,6 +195,16 @@
cap-power-off-card;
pinctrl-names = "default";
pinctrl-0 = <&mmc3_pins &mmc3_2_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1271";
+ reg = <2>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; /* gpio 162 */
+ ref-clock-frequency = <26000000>;
+ };
};
&uart1 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 3fdc84fddb70..69a40cfc1f29 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -87,6 +87,60 @@
ranges;
ti,hwmods = "l3_main";
+ l4_core: l4@48000000 {
+ compatible = "ti,omap3-l4-core", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x48000000 0x1000000>;
+
+ scm: scm@2000 {
+ compatible = "ti,omap3-scm", "simple-bus";
+ reg = <0x2000 0x2000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x2000 0x2000>;
+
+ omap3_pmx_core: pinmux@30 {
+ compatible = "ti,omap3-padconf",
+ "pinctrl-single";
+ reg = <0x30 0x238>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ pinctrl-single,register-width = <16>;
+ pinctrl-single,function-mask = <0xff1f>;
+ };
+
+ scm_conf: scm_conf@270 {
+ compatible = "syscon";
+ reg = <0x270 0x330>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ scm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ scm_clockdomains: clockdomains {
+ };
+
+ omap3_pmx_wkup: pinmux@a00 {
+ compatible = "ti,omap3-padconf",
+ "pinctrl-single";
+ reg = <0xa00 0x5c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ pinctrl-single,register-width = <16>;
+ pinctrl-single,function-mask = <0xff1f>;
+ };
+ };
+ };
+
aes: aes@480c5000 {
compatible = "ti,omap3-aes";
ti,hwmods = "aes";
@@ -123,19 +177,6 @@
};
};
- scrm: scrm@48002000 {
- compatible = "ti,omap3-scrm";
- reg = <0x48002000 0x2000>;
-
- scrm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- scrm_clockdomains: clockdomains {
- };
- };
-
counter32k: counter@48320000 {
compatible = "ti,omap-counter32k";
reg = <0x48320000 0x20>;
@@ -161,37 +202,10 @@
dma-requests = <96>;
};
- omap3_pmx_core: pinmux@48002030 {
- compatible = "ti,omap3-padconf", "pinctrl-single";
- reg = <0x48002030 0x0238>;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- pinctrl-single,register-width = <16>;
- pinctrl-single,function-mask = <0xff1f>;
- };
-
- omap3_pmx_wkup: pinmux@48002a00 {
- compatible = "ti,omap3-padconf", "pinctrl-single";
- reg = <0x48002a00 0x5c>;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- pinctrl-single,register-width = <16>;
- pinctrl-single,function-mask = <0xff1f>;
- };
-
- omap3_scm_general: tisyscon@48002270 {
- compatible = "syscon";
- reg = <0x48002270 0x2f0>;
- };
-
pbias_regulator: pbias_regulator {
compatible = "ti,pbias-omap";
reg = <0x2b0 0x4>;
- syscon = <&omap3_scm_general>;
+ syscon = <&scm_conf>;
pbias_mmc_reg: pbias_mmc_omap2430 {
regulator-name = "pbias_mmc_omap2430";
regulator-min-microvolt = <1800000>;
@@ -442,6 +456,7 @@
};
mmu_isp: mmu@480bd400 {
+ #iommu-cells = <0>;
compatible = "ti,omap2-iommu";
reg = <0x480bd400 0x80>;
interrupts = <24>;
@@ -450,6 +465,7 @@
};
mmu_iva: mmu@5d000000 {
+ #iommu-cells = <0>;
compatible = "ti,omap2-iommu";
reg = <0x5d000000 0x80>;
interrupts = <28>;
diff --git a/arch/arm/boot/dts/omap34xx-hs.dtsi b/arch/arm/boot/dts/omap34xx-hs.dtsi
deleted file mode 100644
index 1ff626489546..000000000000
--- a/arch/arm/boot/dts/omap34xx-hs.dtsi
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Disabled modules for secure omaps */
-
-#include "omap34xx.dtsi"
-
-/* Secure omaps have some devices inaccessible depending on the firmware */
-&aes {
- status = "disabled";
-};
-
-&sham {
- status = "disabled";
-};
-
-&timer12 {
- status = "disabled";
-};
diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi
index 3819c1e91591..4f6b2d5b1902 100644
--- a/arch/arm/boot/dts/omap34xx.dtsi
+++ b/arch/arm/boot/dts/omap34xx.dtsi
@@ -8,6 +8,8 @@
* kind, whether express or implied.
*/
+#include <dt-bindings/media/omap3-isp.h>
+
#include "omap3.dtsi"
/ {
@@ -37,6 +39,21 @@
pinctrl-single,register-width = <16>;
pinctrl-single,function-mask = <0xff1f>;
};
+
+ isp: isp@480bc000 {
+ compatible = "ti,omap3-isp";
+ reg = <0x480bc000 0x12fc
+ 0x480bd800 0x017c>;
+ interrupts = <24>;
+ iommus = <&mmu_isp>;
+ syscon = <&scm_conf 0xdc>;
+ ti,phy-type = <OMAP3ISP_PHY_TYPE_COMPLEX_IO>;
+ #clock-cells = <1>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/omap36xx-hs.dtsi b/arch/arm/boot/dts/omap36xx-hs.dtsi
deleted file mode 100644
index 2c7febb0e016..000000000000
--- a/arch/arm/boot/dts/omap36xx-hs.dtsi
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Disabled modules for secure omaps */
-
-#include "omap36xx.dtsi"
-
-/* Secure omaps have some devices inaccessible depending on the firmware */
-&aes {
- status = "disabled";
-};
-
-&sham {
- status = "disabled";
-};
-
-&timer12 {
- status = "disabled";
-};
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index 541704a59a5a..86253de5a97a 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -8,6 +8,8 @@
* kind, whether express or implied.
*/
+#include <dt-bindings/media/omap3-isp.h>
+
#include "omap3.dtsi"
/ {
@@ -69,6 +71,21 @@
pinctrl-single,register-width = <16>;
pinctrl-single,function-mask = <0xff1f>;
};
+
+ isp: isp@480bc000 {
+ compatible = "ti,omap3-isp";
+ reg = <0x480bc000 0x12fc
+ 0x480bd800 0x0600>;
+ interrupts = <24>;
+ iommus = <&mmu_isp>;
+ syscon = <&scm_conf 0x2f0>;
+ ti,phy-type = <OMAP3ISP_PHY_TYPE_CSIPHY>;
+ #clock-cells = <1>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/omap3xxx-clocks.dtsi b/arch/arm/boot/dts/omap3xxx-clocks.dtsi
index 5c375003bad1..bbba5bdc4bc9 100644
--- a/arch/arm/boot/dts/omap3xxx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap3xxx-clocks.dtsi
@@ -79,13 +79,14 @@
clock-div = <1>;
};
};
-&scrm_clocks {
+
+&scm_clocks {
mcbsp5_mux_fck: mcbsp5_mux_fck {
#clock-cells = <0>;
compatible = "ti,composite-mux-clock";
clocks = <&core_96m_fck>, <&mcbsp_clks>;
ti,bit-shift = <4>;
- reg = <0x02d8>;
+ reg = <0x68>;
};
mcbsp5_fck: mcbsp5_fck {
@@ -99,7 +100,7 @@
compatible = "ti,composite-mux-clock";
clocks = <&core_96m_fck>, <&mcbsp_clks>;
ti,bit-shift = <2>;
- reg = <0x0274>;
+ reg = <0x04>;
};
mcbsp1_fck: mcbsp1_fck {
@@ -113,7 +114,7 @@
compatible = "ti,composite-mux-clock";
clocks = <&per_96m_fck>, <&mcbsp_clks>;
ti,bit-shift = <6>;
- reg = <0x0274>;
+ reg = <0x04>;
};
mcbsp2_fck: mcbsp2_fck {
@@ -126,7 +127,7 @@
#clock-cells = <0>;
compatible = "ti,composite-mux-clock";
clocks = <&per_96m_fck>, <&mcbsp_clks>;
- reg = <0x02d8>;
+ reg = <0x68>;
};
mcbsp3_fck: mcbsp3_fck {
@@ -140,7 +141,7 @@
compatible = "ti,composite-mux-clock";
clocks = <&per_96m_fck>, <&mcbsp_clks>;
ti,bit-shift = <2>;
- reg = <0x02d8>;
+ reg = <0x68>;
};
mcbsp4_fck: mcbsp4_fck {
diff --git a/arch/arm/boot/dts/omap4-cpu-thermal.dtsi b/arch/arm/boot/dts/omap4-cpu-thermal.dtsi
index cb9458feb2e3..ab7f87ae96f0 100644
--- a/arch/arm/boot/dts/omap4-cpu-thermal.dtsi
+++ b/arch/arm/boot/dts/omap4-cpu-thermal.dtsi
@@ -18,7 +18,7 @@ cpu_thermal: cpu_thermal {
/* sensor ID */
thermal-sensors = <&bandgap 0>;
- trips {
+ cpu_trips: trips {
cpu_alert0: cpu_alert {
temperature = <100000>; /* millicelsius */
hysteresis = <2000>; /* millicelsius */
@@ -31,7 +31,7 @@ cpu_thermal: cpu_thermal {
};
};
- cooling-maps {
+ cpu_cooling_maps: cooling-maps {
map0 {
trip = <&cpu_alert0>;
cooling-device =
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 7c15fb2e2fe4..f1507bc8737e 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -448,6 +448,16 @@
non-removable;
bus-width = <4>;
cap-power-off-card;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1271";
+ reg = <2>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* gpio 53 */
+ ref-clock-frequency = <38400000>;
+ };
};
&emif1 {
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 8aca8dae968a..dac86ed7481f 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -485,6 +485,17 @@
non-removable;
bus-width = <4>;
cap-power-off-card;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1281";
+ reg = <2>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* gpio 53 */
+ ref-clock-frequency = <26000000>;
+ tcxo-clock-frequency = <26000000>;
+ };
};
&emif1 {
diff --git a/arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi b/arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi
index cc66af419236..9bceeb7e1f03 100644
--- a/arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi
+++ b/arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi
@@ -65,4 +65,14 @@
bus-width = <4>;
cap-power-off-card;
status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1271";
+ reg = <2>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; /* gpio 41 */
+ ref-clock-frequency = <38400000>;
+ };
};
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index f2091d1c9c36..f884d6adb71e 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -124,99 +124,141 @@
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- cm1: cm1@4a004000 {
- compatible = "ti,omap4-cm1";
- reg = <0x4a004000 0x2000>;
-
- cm1_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
+ l4_cfg: l4@4a000000 {
+ compatible = "ti,omap4-l4-cfg", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x4a000000 0x1000000>;
- cm1_clockdomains: clockdomains {
- };
- };
+ cm1: cm1@4000 {
+ compatible = "ti,omap4-cm1";
+ reg = <0x4000 0x2000>;
- prm: prm@4a306000 {
- compatible = "ti,omap4-prm";
- reg = <0x4a306000 0x3000>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ cm1_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
- prm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
+ cm1_clockdomains: clockdomains {
+ };
};
- prm_clockdomains: clockdomains {
- };
- };
+ cm2: cm2@8000 {
+ compatible = "ti,omap4-cm2";
+ reg = <0x8000 0x3000>;
- cm2: cm2@4a008000 {
- compatible = "ti,omap4-cm2";
- reg = <0x4a008000 0x3000>;
+ cm2_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
- cm2_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
+ cm2_clockdomains: clockdomains {
+ };
};
- cm2_clockdomains: clockdomains {
+ omap4_scm_core: scm@2000 {
+ compatible = "ti,omap4-scm-core", "simple-bus";
+ reg = <0x2000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x2000 0x1000>;
+
+ scm_conf: scm_conf@0 {
+ compatible = "syscon";
+ reg = <0x0 0x800>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
- };
-
- scrm: scrm@4a30a000 {
- compatible = "ti,omap4-scrm";
- reg = <0x4a30a000 0x2000>;
- scrm_clocks: clocks {
+ omap4_padconf_core: scm@100000 {
+ compatible = "ti,omap4-scm-padconf-core",
+ "simple-bus";
#address-cells = <1>;
- #size-cells = <0>;
+ #size-cells = <1>;
+ ranges = <0 0x100000 0x1000>;
+
+ omap4_pmx_core: pinmux@40 {
+ compatible = "ti,omap4-padconf",
+ "pinctrl-single";
+ reg = <0x40 0x0196>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ pinctrl-single,register-width = <16>;
+ pinctrl-single,function-mask = <0x7fff>;
+ };
+
+ omap4_padconf_global: omap4_padconf_global@5a0 {
+ compatible = "syscon";
+ reg = <0x5a0 0x170>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pbias_regulator: pbias_regulator {
+ compatible = "ti,pbias-omap";
+ reg = <0x60 0x4>;
+ syscon = <&omap4_padconf_global>;
+ pbias_mmc_reg: pbias_mmc_omap4 {
+ regulator-name = "pbias_mmc_omap4";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ };
+ };
};
- scrm_clockdomains: clockdomains {
- };
- };
-
- counter32k: counter@4a304000 {
- compatible = "ti,omap-counter32k";
- reg = <0x4a304000 0x20>;
- ti,hwmods = "counter_32k";
- };
-
- omap4_pmx_core: pinmux@4a100040 {
- compatible = "ti,omap4-padconf", "pinctrl-single";
- reg = <0x4a100040 0x0196>;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- pinctrl-single,register-width = <16>;
- pinctrl-single,function-mask = <0x7fff>;
- };
- omap4_pmx_wkup: pinmux@4a31e040 {
- compatible = "ti,omap4-padconf", "pinctrl-single";
- reg = <0x4a31e040 0x0038>;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- pinctrl-single,register-width = <16>;
- pinctrl-single,function-mask = <0x7fff>;
- };
-
- omap4_padconf_global: tisyscon@4a1005a0 {
- compatible = "syscon";
- reg = <0x4a1005a0 0x170>;
- };
-
- pbias_regulator: pbias_regulator {
- compatible = "ti,pbias-omap";
- reg = <0x60 0x4>;
- syscon = <&omap4_padconf_global>;
- pbias_mmc_reg: pbias_mmc_omap4 {
- regulator-name = "pbias_mmc_omap4";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3000000>;
+ l4_wkup: l4@300000 {
+ compatible = "ti,omap4-l4-wkup", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x300000 0x40000>;
+
+ counter32k: counter@4000 {
+ compatible = "ti,omap-counter32k";
+ reg = <0x4000 0x20>;
+ ti,hwmods = "counter_32k";
+ };
+
+ prm: prm@6000 {
+ compatible = "ti,omap4-prm";
+ reg = <0x6000 0x3000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+
+ prm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ prm_clockdomains: clockdomains {
+ };
+ };
+
+ scrm: scrm@a000 {
+ compatible = "ti,omap4-scrm";
+ reg = <0xa000 0x2000>;
+
+ scrm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ scrm_clockdomains: clockdomains {
+ };
+ };
+
+ omap4_pmx_wkup: pinmux@1e040 {
+ compatible = "ti,omap4-padconf",
+ "pinctrl-single";
+ reg = <0x1e040 0x0038>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ pinctrl-single,register-width = <16>;
+ pinctrl-single,function-mask = <0x7fff>;
+ };
};
};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 77b5f70d0ebc..7d24ae0306b5 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -128,7 +128,7 @@
* hierarchy.
*/
ocp {
- compatible = "ti,omap4-l3-noc", "simple-bus";
+ compatible = "ti,omap5-l3-noc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -139,99 +139,141 @@
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- prm: prm@4ae06000 {
- compatible = "ti,omap5-prm";
- reg = <0x4ae06000 0x3000>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ l4_cfg: l4@4a000000 {
+ compatible = "ti,omap5-l4-cfg", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x4a000000 0x22a000>;
- prm_clocks: clocks {
+ scm_core: scm@2000 {
+ compatible = "ti,omap5-scm-core", "simple-bus";
+ reg = <0x2000 0x1000>;
#address-cells = <1>;
- #size-cells = <0>;
+ #size-cells = <1>;
+ ranges = <0 0x2000 0x800>;
+
+ scm_conf: scm_conf@0 {
+ compatible = "syscon";
+ reg = <0x0 0x800>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
- prm_clockdomains: clockdomains {
+ scm_padconf_core: scm@2800 {
+ compatible = "ti,omap5-scm-padconf-core",
+ "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x2800 0x800>;
+
+ omap5_pmx_core: pinmux@40 {
+ compatible = "ti,omap5-padconf",
+ "pinctrl-single";
+ reg = <0x40 0x01b6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ pinctrl-single,register-width = <16>;
+ pinctrl-single,function-mask = <0x7fff>;
+ };
+
+ omap5_padconf_global: omap5_padconf_global@5a0 {
+ compatible = "syscon";
+ reg = <0x5a0 0xec>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pbias_regulator: pbias_regulator {
+ compatible = "ti,pbias-omap";
+ reg = <0x60 0x4>;
+ syscon = <&omap5_padconf_global>;
+ pbias_mmc_reg: pbias_mmc_omap5 {
+ regulator-name = "pbias_mmc_omap5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ };
+ };
};
- };
- cm_core_aon: cm_core_aon@4a004000 {
- compatible = "ti,omap5-cm-core-aon";
- reg = <0x4a004000 0x2000>;
+ cm_core_aon: cm_core_aon@4000 {
+ compatible = "ti,omap5-cm-core-aon";
+ reg = <0x4000 0x2000>;
- cm_core_aon_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
+ cm_core_aon_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
- cm_core_aon_clockdomains: clockdomains {
+ cm_core_aon_clockdomains: clockdomains {
+ };
};
- };
- scrm: scrm@4ae0a000 {
- compatible = "ti,omap5-scrm";
- reg = <0x4ae0a000 0x2000>;
+ cm_core: cm_core@8000 {
+ compatible = "ti,omap5-cm-core";
+ reg = <0x8000 0x3000>;
- scrm_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
- };
+ cm_core_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
- scrm_clockdomains: clockdomains {
+ cm_core_clockdomains: clockdomains {
+ };
};
};
- cm_core: cm_core@4a008000 {
- compatible = "ti,omap5-cm-core";
- reg = <0x4a008000 0x3000>;
+ l4_wkup: l4@4ae00000 {
+ compatible = "ti,omap5-l4-wkup", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x4ae00000 0x2b000>;
- cm_core_clocks: clocks {
- #address-cells = <1>;
- #size-cells = <0>;
+ counter32k: counter@4000 {
+ compatible = "ti,omap-counter32k";
+ reg = <0x4000 0x40>;
+ ti,hwmods = "counter_32k";
};
- cm_core_clockdomains: clockdomains {
+ prm: prm@6000 {
+ compatible = "ti,omap5-prm";
+ reg = <0x6000 0x3000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+
+ prm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ prm_clockdomains: clockdomains {
+ };
};
- };
- counter32k: counter@4ae04000 {
- compatible = "ti,omap-counter32k";
- reg = <0x4ae04000 0x40>;
- ti,hwmods = "counter_32k";
- };
+ scrm: scrm@a000 {
+ compatible = "ti,omap5-scrm";
+ reg = <0xa000 0x2000>;
- omap5_pmx_core: pinmux@4a002840 {
- compatible = "ti,omap5-padconf", "pinctrl-single";
- reg = <0x4a002840 0x01b6>;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- pinctrl-single,register-width = <16>;
- pinctrl-single,function-mask = <0x7fff>;
- };
- omap5_pmx_wkup: pinmux@4ae0c840 {
- compatible = "ti,omap5-padconf", "pinctrl-single";
- reg = <0x4ae0c840 0x0038>;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- pinctrl-single,register-width = <16>;
- pinctrl-single,function-mask = <0x7fff>;
- };
+ scrm_clocks: clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
- omap5_padconf_global: tisyscon@4a002da0 {
- compatible = "syscon";
- reg = <0x4A002da0 0xec>;
- };
+ scrm_clockdomains: clockdomains {
+ };
+ };
- pbias_regulator: pbias_regulator {
- compatible = "ti,pbias-omap";
- reg = <0x60 0x4>;
- syscon = <&omap5_padconf_global>;
- pbias_mmc_reg: pbias_mmc_omap5 {
- regulator-name = "pbias_mmc_omap5";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3000000>;
+ omap5_pmx_wkup: pinmux@c840 {
+ compatible = "ti,omap5-padconf",
+ "pinctrl-single";
+ reg = <0xc840 0x0038>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ pinctrl-single,register-width = <16>;
+ pinctrl-single,function-mask = <0x7fff>;
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index b3154c071652..6c1511263a55 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -23,6 +23,7 @@
next-level-cache = <&L2>;
qcom,acc = <&acc0>;
qcom,saw = <&saw0>;
+ cpu-idle-states = <&CPU_SPC>;
};
cpu@1 {
@@ -33,6 +34,7 @@
next-level-cache = <&L2>;
qcom,acc = <&acc1>;
qcom,saw = <&saw1>;
+ cpu-idle-states = <&CPU_SPC>;
};
cpu@2 {
@@ -43,6 +45,7 @@
next-level-cache = <&L2>;
qcom,acc = <&acc2>;
qcom,saw = <&saw2>;
+ cpu-idle-states = <&CPU_SPC>;
};
cpu@3 {
@@ -53,12 +56,23 @@
next-level-cache = <&L2>;
qcom,acc = <&acc3>;
qcom,saw = <&saw3>;
+ cpu-idle-states = <&CPU_SPC>;
};
L2: l2-cache {
compatible = "cache";
cache-level = <2>;
};
+
+ idle-states {
+ CPU_SPC: spc {
+ compatible = "qcom,idle-state-spc",
+ "arm,idle-state";
+ entry-latency-us = <400>;
+ exit-latency-us = <900>;
+ min-residency-us = <3000>;
+ };
+ };
};
cpu-pmu {
@@ -139,26 +153,26 @@
reg = <0x020b8000 0x1000>, <0x02008000 0x1000>;
};
- saw0: regulator@2089000 {
- compatible = "qcom,saw2";
+ saw0: power-controller@2089000 {
+ compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x02089000 0x1000>, <0x02009000 0x1000>;
regulator;
};
- saw1: regulator@2099000 {
- compatible = "qcom,saw2";
+ saw1: power-controller@2099000 {
+ compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x02099000 0x1000>, <0x02009000 0x1000>;
regulator;
};
- saw2: regulator@20a9000 {
- compatible = "qcom,saw2";
+ saw2: power-controller@20a9000 {
+ compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x020a9000 0x1000>, <0x02009000 0x1000>;
regulator;
};
- saw3: regulator@20b9000 {
- compatible = "qcom,saw2";
+ saw3: power-controller@20b9000 {
+ compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x020b9000 0x1000>, <0x02009000 0x1000>;
regulator;
};
@@ -166,6 +180,7 @@
gsbi1: gsbi@12440000 {
status = "disabled";
compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <1>;
reg = <0x12440000 0x100>;
clocks = <&gcc GSBI1_H_CLK>;
clock-names = "iface";
@@ -173,6 +188,8 @@
#size-cells = <1>;
ranges;
+ syscon-tcsr = <&tcsr>;
+
i2c1: i2c@12460000 {
compatible = "qcom,i2c-qup-v1.1.1";
reg = <0x12460000 0x1000>;
@@ -187,6 +204,7 @@
gsbi2: gsbi@12480000 {
status = "disabled";
compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <2>;
reg = <0x12480000 0x100>;
clocks = <&gcc GSBI2_H_CLK>;
clock-names = "iface";
@@ -194,6 +212,8 @@
#size-cells = <1>;
ranges;
+ syscon-tcsr = <&tcsr>;
+
i2c2: i2c@124a0000 {
compatible = "qcom,i2c-qup-v1.1.1";
reg = <0x124a0000 0x1000>;
@@ -208,6 +228,7 @@
gsbi7: gsbi@16600000 {
status = "disabled";
compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <7>;
reg = <0x16600000 0x100>;
clocks = <&gcc GSBI7_H_CLK>;
clock-names = "iface";
@@ -215,6 +236,8 @@
#size-cells = <1>;
ranges;
+ syscon-tcsr = <&tcsr>;
+
serial@16640000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x16640000 0x1000>,
@@ -239,6 +262,13 @@
#reset-cells = <1>;
};
+ lcc: clock-controller@28000000 {
+ compatible = "qcom,lcc-apq8064";
+ reg = <0x28000000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
mmcc: clock-controller@4000000 {
compatible = "qcom,mmcc-apq8064";
reg = <0x4000000 0x1000>;
@@ -349,5 +379,10 @@
pinctrl-0 = <&sdc4_gpios>;
};
};
+
+ tcsr: syscon@1a400000 {
+ compatible = "qcom,tcsr-apq8064", "syscon";
+ reg = <0x1a400000 0x100>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
index 47370494d0f8..d484d08163e9 100644
--- a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
@@ -1,4 +1,6 @@
#include "qcom-msm8974.dtsi"
+#include "qcom-pm8841.dtsi"
+#include "qcom-pm8941.dtsi"
/ {
model = "Qualcomm APQ8074 Dragonboard";
diff --git a/arch/arm/boot/dts/qcom-apq8084-ifc6540.dts b/arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
index c9ff10821ad9..f7725b96612c 100644
--- a/arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
+++ b/arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
@@ -1,4 +1,5 @@
#include "qcom-apq8084.dtsi"
+#include "qcom-pma8084.dtsi"
/ {
model = "Qualcomm APQ8084/IFC6540";
diff --git a/arch/arm/boot/dts/qcom-apq8084-mtp.dts b/arch/arm/boot/dts/qcom-apq8084-mtp.dts
index 8ecec58a9ff6..cb43acfc5d1d 100644
--- a/arch/arm/boot/dts/qcom-apq8084-mtp.dts
+++ b/arch/arm/boot/dts/qcom-apq8084-mtp.dts
@@ -1,4 +1,5 @@
#include "qcom-apq8084.dtsi"
+#include "qcom-pma8084.dtsi"
/ {
model = "Qualcomm APQ 8084-MTP";
diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
index 1f130bc16858..7084010ee61b 100644
--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
@@ -21,6 +21,8 @@
enable-method = "qcom,kpss-acc-v2";
next-level-cache = <&L2>;
qcom,acc = <&acc0>;
+ qcom,saw = <&saw0>;
+ cpu-idle-states = <&CPU_SPC>;
};
cpu@1 {
@@ -30,6 +32,8 @@
enable-method = "qcom,kpss-acc-v2";
next-level-cache = <&L2>;
qcom,acc = <&acc1>;
+ qcom,saw = <&saw1>;
+ cpu-idle-states = <&CPU_SPC>;
};
cpu@2 {
@@ -39,6 +43,8 @@
enable-method = "qcom,kpss-acc-v2";
next-level-cache = <&L2>;
qcom,acc = <&acc2>;
+ qcom,saw = <&saw2>;
+ cpu-idle-states = <&CPU_SPC>;
};
cpu@3 {
@@ -48,6 +54,8 @@
enable-method = "qcom,kpss-acc-v2";
next-level-cache = <&L2>;
qcom,acc = <&acc3>;
+ qcom,saw = <&saw3>;
+ cpu-idle-states = <&CPU_SPC>;
};
L2: l2-cache {
@@ -55,6 +63,16 @@
cache-level = <2>;
qcom,saw = <&saw_l2>;
};
+
+ idle-states {
+ CPU_SPC: spc {
+ compatible = "qcom,idle-state-spc",
+ "arm,idle-state";
+ entry-latency-us = <150>;
+ exit-latency-us = <200>;
+ min-residency-us = <2000>;
+ };
+ };
};
cpu-pmu {
@@ -144,7 +162,27 @@
};
};
- saw_l2: regulator@f9012000 {
+ saw0: power-controller@f9089000 {
+ compatible = "qcom,apq8084-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf9089000 0x1000>, <0xf9009000 0x1000>;
+ };
+
+ saw1: power-controller@f9099000 {
+ compatible = "qcom,apq8084-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf9099000 0x1000>, <0xf9009000 0x1000>;
+ };
+
+ saw2: power-controller@f90a9000 {
+ compatible = "qcom,apq8084-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf90a9000 0x1000>, <0xf9009000 0x1000>;
+ };
+
+ saw3: power-controller@f90b9000 {
+ compatible = "qcom,apq8084-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf90b9000 0x1000>, <0xf9009000 0x1000>;
+ };
+
+ saw_l2: power-controller@f9012000 {
compatible = "qcom,saw2";
reg = <0xf9012000 0x1000>;
regulator;
@@ -226,5 +264,21 @@
clock-names = "core", "iface";
status = "disabled";
};
+
+ spmi_bus: spmi@fc4cf000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg-names = "core", "intr", "cnfg";
+ reg = <0xfc4cf000 0x1000>,
+ <0xfc4cb000 0x1000>,
+ <0xfc4ca000 0x1000>;
+ interrupt-names = "periph_irq";
+ interrupts = <0 190 0>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index cb225dafe97c..9f727d8eadf6 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -2,6 +2,7 @@
#include "skeleton.dtsi"
#include <dt-bindings/clock/qcom,gcc-ipq806x.h>
+#include <dt-bindings/clock/qcom,lcc-ipq806x.h>
#include <dt-bindings/soc/qcom,gsbi.h>
/ {
@@ -60,12 +61,35 @@
};
};
+ clocks {
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ #clock-cells = <0>;
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
ranges;
compatible = "simple-bus";
+ lpass@28100000 {
+ compatible = "qcom,lpass-cpu";
+ status = "disabled";
+ clocks = <&lcc AHBIX_CLK>,
+ <&lcc MI2S_OSR_CLK>,
+ <&lcc MI2S_BIT_CLK>;
+ clock-names = "ahbix-clk",
+ "mi2s-osr-clk",
+ "mi2s-bit-clk";
+ interrupts = <0 85 1>;
+ interrupt-names = "lpass-irq-lpaif";
+ reg = <0x28100000 0x10000>;
+ reg-names = "lpass-lpaif";
+ };
+
qcom_pinmux: pinmux@800000 {
compatible = "qcom,ipq8064-pinctrl";
reg = <0x800000 0x4000>;
@@ -89,10 +113,14 @@
compatible = "qcom,kpss-timer", "qcom,msm-timer";
interrupts = <1 1 0x301>,
<1 2 0x301>,
- <1 3 0x301>;
+ <1 3 0x301>,
+ <1 4 0x301>,
+ <1 5 0x301>;
reg = <0x0200a000 0x100>;
clock-frequency = <25000000>,
<32768>;
+ clocks = <&sleep_clk>;
+ clock-names = "sleep";
cpu-offset = <0x80000>;
};
@@ -120,6 +148,7 @@
gsbi2: gsbi@12480000 {
compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <2>;
reg = <0x12480000 0x100>;
clocks = <&gcc GSBI2_H_CLK>;
clock-names = "iface";
@@ -128,6 +157,8 @@
ranges;
status = "disabled";
+ syscon-tcsr = <&tcsr>;
+
serial@12490000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x12490000 0x1000>,
@@ -155,6 +186,7 @@
gsbi4: gsbi@16300000 {
compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <4>;
reg = <0x16300000 0x100>;
clocks = <&gcc GSBI4_H_CLK>;
clock-names = "iface";
@@ -163,6 +195,8 @@
ranges;
status = "disabled";
+ syscon-tcsr = <&tcsr>;
+
serial@16340000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x16340000 0x1000>,
@@ -189,6 +223,7 @@
gsbi5: gsbi@1a200000 {
compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <5>;
reg = <0x1a200000 0x100>;
clocks = <&gcc GSBI5_H_CLK>;
clock-names = "iface";
@@ -197,6 +232,8 @@
ranges;
status = "disabled";
+ syscon-tcsr = <&tcsr>;
+
serial@1a240000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x1a240000 0x1000>,
@@ -279,5 +316,18 @@
#clock-cells = <1>;
#reset-cells = <1>;
};
+
+ tcsr: syscon@1a400000 {
+ compatible = "qcom,tcsr-ipq8064", "syscon";
+ reg = <0x1a400000 0x100>;
+ };
+
+ lcc: clock-controller@28000000 {
+ compatible = "qcom,lcc-ipq8064";
+ reg = <0x28000000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
};
};
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index 0affd6193f56..20bbd19b996e 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -82,6 +82,7 @@
gsbi12: gsbi@19c00000 {
compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <12>;
reg = <0x19c00000 0x100>;
clocks = <&gcc GSBI12_H_CLK>;
clock-names = "iface";
@@ -89,6 +90,8 @@
#size-cells = <1>;
ranges;
+ syscon-tcsr = <&tcsr>;
+
serial@19c40000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x19c40000 0x1000>,
@@ -196,6 +199,11 @@
vmmc-supply = <&vsdcc_fixed>;
};
};
+
+ tcsr: syscon@1a400000 {
+ compatible = "qcom,tcsr-msm8660", "syscon";
+ reg = <0x1a400000 0x100>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi
index e1b0d5cd9e3c..a02b984cc68d 100644
--- a/arch/arm/boot/dts/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8960.dtsi
@@ -91,6 +91,13 @@
reg = <0x900000 0x4000>;
};
+ lcc: clock-controller@28000000 {
+ compatible = "qcom,lcc-msm8960";
+ reg = <0x28000000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
clock-controller@4000000 {
compatible = "qcom,mmcc-msm8960";
reg = <0x4000000 0x1000>;
@@ -122,6 +129,7 @@
gsbi5: gsbi@16400000 {
compatible = "qcom,gsbi-v1.0.0";
+ cell-index = <5>;
reg = <0x16400000 0x100>;
clocks = <&gcc GSBI5_H_CLK>;
clock-names = "iface";
@@ -129,6 +137,8 @@
#size-cells = <1>;
ranges;
+ syscon-tcsr = <&tcsr>;
+
serial@16440000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x16440000 0x1000>,
@@ -238,5 +248,10 @@
vmmc-supply = <&vsdcc_fixed>;
};
};
+
+ tcsr: syscon@1a400000 {
+ compatible = "qcom,tcsr-msm8960", "syscon";
+ reg = <0x1a400000 0x100>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
index cccc21b7c8fd..bd35b0674ff6 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
@@ -1,4 +1,6 @@
#include "qcom-msm8974.dtsi"
+#include "qcom-pm8841.dtsi"
+#include "qcom-pm8941.dtsi"
/ {
model = "Sony Xperia Z1";
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index e265ec16a787..37b47b5538b8 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -21,6 +21,8 @@
reg = <0>;
next-level-cache = <&L2>;
qcom,acc = <&acc0>;
+ qcom,saw = <&saw0>;
+ cpu-idle-states = <&CPU_SPC>;
};
cpu@1 {
@@ -30,6 +32,8 @@
reg = <1>;
next-level-cache = <&L2>;
qcom,acc = <&acc1>;
+ qcom,saw = <&saw1>;
+ cpu-idle-states = <&CPU_SPC>;
};
cpu@2 {
@@ -39,6 +43,8 @@
reg = <2>;
next-level-cache = <&L2>;
qcom,acc = <&acc2>;
+ qcom,saw = <&saw2>;
+ cpu-idle-states = <&CPU_SPC>;
};
cpu@3 {
@@ -48,6 +54,8 @@
reg = <3>;
next-level-cache = <&L2>;
qcom,acc = <&acc3>;
+ qcom,saw = <&saw3>;
+ cpu-idle-states = <&CPU_SPC>;
};
L2: l2-cache {
@@ -55,6 +63,16 @@
cache-level = <2>;
qcom,saw = <&saw_l2>;
};
+
+ idle-states {
+ CPU_SPC: spc {
+ compatible = "qcom,idle-state-spc",
+ "arm,idle-state";
+ entry-latency-us = <150>;
+ exit-latency-us = <200>;
+ min-residency-us = <2000>;
+ };
+ };
};
cpu-pmu {
@@ -144,7 +162,27 @@
};
};
- saw_l2: regulator@f9012000 {
+ saw0: power-controller@f9089000 {
+ compatible = "qcom,msm8974-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf9089000 0x1000>, <0xf9009000 0x1000>;
+ };
+
+ saw1: power-controller@f9099000 {
+ compatible = "qcom,msm8974-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf9099000 0x1000>, <0xf9009000 0x1000>;
+ };
+
+ saw2: power-controller@f90a9000 {
+ compatible = "qcom,msm8974-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf90a9000 0x1000>, <0xf9009000 0x1000>;
+ };
+
+ saw3: power-controller@f90b9000 {
+ compatible = "qcom,msm8974-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf90b9000 0x1000>, <0xf9009000 0x1000>;
+ };
+
+ saw_l2: power-controller@f9012000 {
compatible = "qcom,saw2";
reg = <0xf9012000 0x1000>;
regulator;
@@ -247,5 +285,21 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ spmi_bus: spmi@fc4cf000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg-names = "core", "intr", "cnfg";
+ reg = <0xfc4cf000 0x1000>,
+ <0xfc4cb000 0x1000>,
+ <0xfc4ca000 0x1000>;
+ interrupt-names = "periph_irq";
+ interrupts = <0 190 0>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-pm8841.dtsi b/arch/arm/boot/dts/qcom-pm8841.dtsi
new file mode 100644
index 000000000000..73813cc118f9
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-pm8841.dtsi
@@ -0,0 +1,18 @@
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+
+ usid4: pm8841@4 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x4 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ usid5: pm8841@5 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x5 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom-pm8941.dtsi
new file mode 100644
index 000000000000..24c5088acea2
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-pm8941.dtsi
@@ -0,0 +1,18 @@
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+
+ usid0: pm8941@0 {
+ compatible ="qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ usid1: pm8941@1 {
+ compatible ="qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom-pma8084.dtsi b/arch/arm/boot/dts/qcom-pma8084.dtsi
new file mode 100644
index 000000000000..a5a4fe695a46
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-pma8084.dtsi
@@ -0,0 +1,18 @@
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+
+ usid0: pma8084@0 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ usid1: pma8084@1 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts b/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts
deleted file mode 100644
index b3d8f844b57a..000000000000
--- a/arch/arm/boot/dts/r8a73a4-ape6evm-reference.dts
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Device Tree Source for the APE6EVM board
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-/dts-v1/;
-#include "r8a73a4.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-
-/ {
- model = "APE6EVM";
- compatible = "renesas,ape6evm-reference", "renesas,r8a73a4";
-
- aliases {
- serial0 = &scifa0;
- };
-
- chosen {
- bootargs = "ignore_loglevel rw";
- stdout-path = &scifa0;
- };
-
- memory@40000000 {
- device_type = "memory";
- reg = <0 0x40000000 0 0x40000000>;
- };
-
- memory@200000000 {
- device_type = "memory";
- reg = <2 0x00000000 0 0x40000000>;
- };
-
- vcc_mmc0: regulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "MMC0 Vcc";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-always-on;
- };
-
- vcc_sdhi0: regulator@1 {
- compatible = "regulator-fixed";
-
- regulator-name = "SDHI0 Vcc";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&pfc 76 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- /* Common 3.3V rail, used by several devices on APE6EVM */
- ape6evm_fixed_3v3: regulator@2 {
- compatible = "regulator-fixed";
- regulator-name = "3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- lbsc {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0 0x20000000>;
- };
-};
-
-&i2c5 {
- status = "okay";
- vdd_dvfs: max8973@1b {
- compatible = "maxim,max8973";
- reg = <0x1b>;
-
- regulator-min-microvolt = <935000>;
- regulator-max-microvolt = <1200000>;
- regulator-boot-on;
- regulator-always-on;
- };
-};
-
-&cpu0 {
- cpu0-supply = <&vdd_dvfs>;
- operating-points = <
- /* kHz uV */
- 1950000 1115000
- 1462500 995000
- >;
- voltage-tolerance = <1>; /* 1% */
-};
-
-&cmt1 {
- status = "okay";
-};
-
-&pfc {
- scifa0_pins: serial0 {
- renesas,groups = "scifa0_data";
- renesas,function = "scifa0";
- };
-
- mmc0_pins: mmc {
- renesas,groups = "mmc0_data8", "mmc0_ctrl";
- renesas,function = "mmc0";
- };
-
- sdhi0_pins: sd0 {
- renesas,groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd";
- renesas,function = "sdhi0";
- };
-
- sdhi1_pins: sd1 {
- renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
- renesas,function = "sdhi1";
- };
-};
-
-&mmcif0 {
- vmmc-supply = <&vcc_mmc0>;
- bus-width = <8>;
- non-removable;
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins>;
- status = "okay";
-};
-
-&scifa0 {
- pinctrl-0 = <&scifa0_pins>;
- pinctrl-names = "default";
-
- status = "okay";
-};
-
-&sdhi0 {
- vmmc-supply = <&vcc_sdhi0>;
- bus-width = <4>;
- toshiba,mmc-wrprotect-disable;
- pinctrl-names = "default";
- pinctrl-0 = <&sdhi0_pins>;
- status = "okay";
-};
-
-&sdhi1 {
- vmmc-supply = <&ape6evm_fixed_3v3>;
- bus-width = <4>;
- broken-cd;
- toshiba,mmc-wrprotect-disable;
- pinctrl-names = "default";
- pinctrl-0 = <&sdhi1_pins>;
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
index 0d50bef01234..81a38ceee098 100644
--- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
@@ -22,7 +22,7 @@
};
chosen {
- bootargs = "console=ttySC0,115200 ignore_loglevel root=/dev/nfs ip=dhcp rw";
+ bootargs = "ignore_loglevel root=/dev/nfs ip=dhcp rw";
stdout-path = &scifa0;
};
@@ -72,50 +72,30 @@
regulator-always-on;
};
- lbsc {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0 0x20000000>;
-
- ethernet@8000000 {
- compatible = "smsc,lan9220", "smsc,lan9115";
- reg = <0x08000000 0x1000>;
- interrupt-parent = <&irqc1>;
- interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
- phy-mode = "mii";
- reg-io-width = <4>;
- smsc,irq-active-high;
- smsc,irq-push-pull;
- vdd33a-supply = <&ape6evm_fixed_3v3>;
- vddvario-supply = <&ape6evm_fixed_1v8>;
- };
- };
-
leds {
compatible = "gpio-leds";
led1 {
- gpios = <&pfc 28 GPIO_ACTIVE_LOW>;
+ gpios = <&pfc 28 GPIO_ACTIVE_HIGH>;
label = "GNSS_EN";
};
led2 {
- gpios = <&pfc 126 GPIO_ACTIVE_LOW>;
+ gpios = <&pfc 126 GPIO_ACTIVE_HIGH>;
label = "NFC_NRST";
};
led3 {
- gpios = <&pfc 132 GPIO_ACTIVE_LOW>;
+ gpios = <&pfc 132 GPIO_ACTIVE_HIGH>;
label = "GNSS_NRST";
};
led4 {
- gpios = <&pfc 232 GPIO_ACTIVE_LOW>;
+ gpios = <&pfc 232 GPIO_ACTIVE_HIGH>;
label = "BT_WAKEUP";
};
led5 {
- gpios = <&pfc 250 GPIO_ACTIVE_LOW>;
+ gpios = <&pfc 250 GPIO_ACTIVE_HIGH>;
label = "STROBE";
};
led6 {
- gpios = <&pfc 288 GPIO_ACTIVE_LOW>;
+ gpios = <&pfc 288 GPIO_ACTIVE_HIGH>;
label = "BBRESETOUT";
};
};
@@ -123,10 +103,14 @@
keyboard {
compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&keyboard_pins>;
+
zero-key {
gpios = <&pfc 324 GPIO_ACTIVE_LOW>;
linux,code = <KEY_0>;
label = "S16";
+ gpio-key,wakeup;
};
menu-key {
@@ -184,6 +168,21 @@
voltage-tolerance = <1>; /* 1% */
};
+&bsc {
+ ethernet@8000000 {
+ compatible = "smsc,lan9220", "smsc,lan9115";
+ reg = <0x08000000 0x1000>;
+ interrupt-parent = <&irqc1>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ phy-mode = "mii";
+ reg-io-width = <4>;
+ smsc,irq-active-high;
+ smsc,irq-push-pull;
+ vdd33a-supply = <&ape6evm_fixed_3v3>;
+ vddvario-supply = <&ape6evm_fixed_1v8>;
+ };
+};
+
&cmt1 {
status = "okay";
};
@@ -208,6 +207,12 @@
renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
renesas,function = "sdhi1";
};
+
+ keyboard_pins: keyboard {
+ renesas,pins = "PORT324", "PORT325", "PORT326", "PORT327",
+ "PORT328", "PORT329";
+ bias-pull-up;
+ };
};
&mmcif0 {
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 38136d9f6d95..0fd889f88109 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -9,6 +9,7 @@
* kind, whether express or implied.
*/
+#include <dt-bindings/clock/r8a73a4-clock.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -27,9 +28,15 @@
compatible = "arm,cortex-a15";
reg = <0>;
clock-frequency = <1500000000>;
+ power-domains = <&pd_a2sl>;
};
};
+ ptm {
+ compatible = "arm,coresight-etm3x";
+ power-domains = <&pd_d4>;
+ };
+
timer {
compatible = "arm,armv7-timer";
interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
@@ -41,11 +48,13 @@
dbsc1: memory-controller@e6790000 {
compatible = "renesas,dbsc-r8a73a4";
reg = <0 0xe6790000 0 0x10000>;
+ power-domains = <&pd_a3bc>;
};
dbsc2: memory-controller@e67a0000 {
compatible = "renesas,dbsc-r8a73a4";
reg = <0 0xe67a0000 0 0x10000>;
+ power-domains = <&pd_a3bc>;
};
dmac: dma-multiplexer {
@@ -87,38 +96,19 @@
"ch8", "ch9", "ch10", "ch11",
"ch12", "ch13", "ch14", "ch15",
"ch16", "ch17", "ch18", "ch19";
+ clocks = <&mstp2_clks R8A73A4_CLK_DMAC>;
+ power-domains = <&pd_a3sp>;
};
};
- pfc: pfc@e6050000 {
- compatible = "renesas,pfc-r8a73a4";
- reg = <0 0xe6050000 0 0x9000>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupts-extended =
- <&irqc0 0 0>, <&irqc0 1 0>, <&irqc0 2 0>, <&irqc0 3 0>,
- <&irqc0 4 0>, <&irqc0 5 0>, <&irqc0 6 0>, <&irqc0 7 0>,
- <&irqc0 8 0>, <&irqc0 9 0>, <&irqc0 10 0>, <&irqc0 11 0>,
- <&irqc0 12 0>, <&irqc0 13 0>, <&irqc0 14 0>, <&irqc0 15 0>,
- <&irqc0 16 0>, <&irqc0 17 0>, <&irqc0 18 0>, <&irqc0 19 0>,
- <&irqc0 20 0>, <&irqc0 21 0>, <&irqc0 22 0>, <&irqc0 23 0>,
- <&irqc0 24 0>, <&irqc0 25 0>, <&irqc0 26 0>, <&irqc0 27 0>,
- <&irqc0 28 0>, <&irqc0 29 0>, <&irqc0 30 0>, <&irqc0 31 0>,
- <&irqc1 0 0>, <&irqc1 1 0>, <&irqc1 2 0>, <&irqc1 3 0>,
- <&irqc1 4 0>, <&irqc1 5 0>, <&irqc1 6 0>, <&irqc1 7 0>,
- <&irqc1 8 0>, <&irqc1 9 0>, <&irqc1 10 0>, <&irqc1 11 0>,
- <&irqc1 12 0>, <&irqc1 13 0>, <&irqc1 14 0>, <&irqc1 15 0>,
- <&irqc1 16 0>, <&irqc1 17 0>, <&irqc1 18 0>, <&irqc1 19 0>,
- <&irqc1 20 0>, <&irqc1 21 0>, <&irqc1 22 0>, <&irqc1 23 0>,
- <&irqc1 24 0>, <&irqc1 25 0>;
- };
-
i2c5: i2c@e60b0000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
reg = <0 0xe60b0000 0 0x428>;
interrupts = <0 179 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp4_clks R8A73A4_CLK_IIC5>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -127,6 +117,9 @@
compatible = "renesas,cmt-48-r8a73a4", "renesas,cmt-48-gen2";
reg = <0 0xe6130000 0 0x1004>;
interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_CMT1>;
+ clock-names = "fck";
+ power-domains = <&pd_c5>;
renesas,channels-mask = <0xff>;
@@ -170,6 +163,7 @@
<0 29 IRQ_TYPE_LEVEL_HIGH>,
<0 30 IRQ_TYPE_LEVEL_HIGH>,
<0 31 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&pd_c4>;
};
irqc1: interrupt-controller@e61c0200 {
@@ -203,6 +197,31 @@
<0 55 IRQ_TYPE_LEVEL_HIGH>,
<0 56 IRQ_TYPE_LEVEL_HIGH>,
<0 57 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&pd_c4>;
+ };
+
+ pfc: pfc@e6050000 {
+ compatible = "renesas,pfc-r8a73a4";
+ reg = <0 0xe6050000 0 0x9000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts-extended =
+ <&irqc0 0 0>, <&irqc0 1 0>, <&irqc0 2 0>, <&irqc0 3 0>,
+ <&irqc0 4 0>, <&irqc0 5 0>, <&irqc0 6 0>, <&irqc0 7 0>,
+ <&irqc0 8 0>, <&irqc0 9 0>, <&irqc0 10 0>, <&irqc0 11 0>,
+ <&irqc0 12 0>, <&irqc0 13 0>, <&irqc0 14 0>, <&irqc0 15 0>,
+ <&irqc0 16 0>, <&irqc0 17 0>, <&irqc0 18 0>, <&irqc0 19 0>,
+ <&irqc0 20 0>, <&irqc0 21 0>, <&irqc0 22 0>, <&irqc0 23 0>,
+ <&irqc0 24 0>, <&irqc0 25 0>, <&irqc0 26 0>, <&irqc0 27 0>,
+ <&irqc0 28 0>, <&irqc0 29 0>, <&irqc0 30 0>, <&irqc0 31 0>,
+ <&irqc1 0 0>, <&irqc1 1 0>, <&irqc1 2 0>, <&irqc1 3 0>,
+ <&irqc1 4 0>, <&irqc1 5 0>, <&irqc1 6 0>, <&irqc1 7 0>,
+ <&irqc1 8 0>, <&irqc1 9 0>, <&irqc1 10 0>, <&irqc1 11 0>,
+ <&irqc1 12 0>, <&irqc1 13 0>, <&irqc1 14 0>, <&irqc1 15 0>,
+ <&irqc1 16 0>, <&irqc1 17 0>, <&irqc1 18 0>, <&irqc1 19 0>,
+ <&irqc1 20 0>, <&irqc1 21 0>, <&irqc1 22 0>, <&irqc1 23 0>,
+ <&irqc1 24 0>, <&irqc1 25 0>;
+ power-domains = <&pd_c5>;
};
thermal@e61f0000 {
@@ -210,6 +229,8 @@
reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>,
<0 0xe61f0200 0 0x38>, <0 0xe61f0300 0 0x38>;
interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp5_clks R8A73A4_CLK_THERMAL>;
+ power-domains = <&pd_c5>;
};
i2c0: i2c@e6500000 {
@@ -218,6 +239,8 @@
compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
reg = <0 0xe6500000 0 0x428>;
interrupts = <0 174 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_IIC0>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -227,6 +250,8 @@
compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
reg = <0 0xe6510000 0 0x428>;
interrupts = <0 175 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_IIC1>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -236,6 +261,8 @@
compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
reg = <0 0xe6520000 0 0x428>;
interrupts = <0 176 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_IIC2>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -245,6 +272,8 @@
compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
reg = <0 0xe6530000 0 0x428>;
interrupts = <0 177 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp4_clks R8A73A4_CLK_IIC3>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -254,6 +283,8 @@
compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
reg = <0 0xe6540000 0 0x428>;
interrupts = <0 178 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp4_clks R8A73A4_CLK_IIC4>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -263,6 +294,8 @@
compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
reg = <0 0xe6550000 0 0x428>;
interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_IIC6>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -272,6 +305,8 @@
compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
reg = <0 0xe6560000 0 0x428>;
interrupts = <0 185 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_IIC7>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -281,6 +316,8 @@
compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
reg = <0 0xe6570000 0 0x428>;
interrupts = <0 173 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp5_clks R8A73A4_CLK_IIC8>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -288,6 +325,9 @@
compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
reg = <0 0xe6c20000 0 0x100>;
interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp2_clks R8A73A4_CLK_SCIFB0>;
+ clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -295,6 +335,9 @@
compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
reg = <0 0xe6c30000 0 0x100>;
interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp2_clks R8A73A4_CLK_SCIFB1>;
+ clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -302,6 +345,9 @@
compatible = "renesas,scifa-r8a73a4", "renesas,scifa";
reg = <0 0xe6c40000 0 0x100>;
interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp2_clks R8A73A4_CLK_SCIFA0>;
+ clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -309,6 +355,9 @@
compatible = "renesas,scifa-r8a73a4", "renesas,scifa";
reg = <0 0xe6c50000 0 0x100>;
interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp2_clks R8A73A4_CLK_SCIFA1>;
+ clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -316,6 +365,9 @@
compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
reg = <0 0xe6ce0000 0 0x100>;
interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp2_clks R8A73A4_CLK_SCIFB2>;
+ clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -323,6 +375,9 @@
compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
reg = <0 0xe6cf0000 0 0x100>;
interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp2_clks R8A73A4_CLK_SCIFB3>;
+ clock-names = "sci_ick";
+ power-domains = <&pd_c4>;
status = "disabled";
};
@@ -330,6 +385,8 @@
compatible = "renesas,sdhi-r8a73a4";
reg = <0 0xee100000 0 0x100>;
interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_SDHI0>;
+ power-domains = <&pd_a3sp>;
cap-sd-highspeed;
status = "disabled";
};
@@ -338,6 +395,8 @@
compatible = "renesas,sdhi-r8a73a4";
reg = <0 0xee120000 0 0x100>;
interrupts = <0 166 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_SDHI1>;
+ power-domains = <&pd_a3sp>;
cap-sd-highspeed;
status = "disabled";
};
@@ -346,6 +405,8 @@
compatible = "renesas,sdhi-r8a73a4";
reg = <0 0xee140000 0 0x100>;
interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_SDHI2>;
+ power-domains = <&pd_a3sp>;
cap-sd-highspeed;
status = "disabled";
};
@@ -354,6 +415,8 @@
compatible = "renesas,sh-mmcif";
reg = <0 0xee200000 0 0x80>;
interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_MMCIF0>;
+ power-domains = <&pd_a3sp>;
reg-io-width = <4>;
status = "disabled";
};
@@ -362,6 +425,8 @@
compatible = "renesas,sh-mmcif";
reg = <0 0xee220000 0 0x80>;
interrupts = <0 170 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A73A4_CLK_MMCIF1>;
+ power-domains = <&pd_a3sp>;
reg-io-width = <4>;
status = "disabled";
};
@@ -377,4 +442,450 @@
<0 0xf1006000 0 0x2000>;
interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
};
+
+ bsc: bus@fec10000 {
+ compatible = "renesas,bsc-r8a73a4", "renesas,bsc",
+ "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x20000000>;
+ reg = <0 0xfec10000 0 0x400>;
+ clocks = <&zb_clk>;
+ power-domains = <&pd_c4>;
+ };
+
+ clocks {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* External root clocks */
+ extalr_clk: extalr_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "extalr";
+ };
+ extal1_clk: extal1_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "extal1";
+ };
+ extal2_clk: extal2_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ clock-output-names = "extal2";
+ };
+ fsiack_clk: fsiack_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
+ clock-output-names = "fsiack";
+ };
+ fsibck_clk: fsibck_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
+ clock-output-names = "fsibck";
+ };
+
+ /* Special CPG clocks */
+ cpg_clocks: cpg_clocks@e6150000 {
+ compatible = "renesas,r8a73a4-cpg-clocks";
+ reg = <0 0xe6150000 0 0x10000>;
+ clocks = <&extal1_clk>, <&extal2_clk>;
+ #clock-cells = <1>;
+ clock-output-names = "main", "pll0", "pll1", "pll2",
+ "pll2s", "pll2h", "z", "z2",
+ "i", "m3", "b", "m1", "m2",
+ "zx", "zs", "hp";
+ };
+
+ /* Variable factor clocks (DIV6) */
+ zb_clk: zb_clk@e6150010 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150010 0 4>;
+ clocks = <&pll1_div2_clk>, <0>,
+ <&cpg_clocks R8A73A4_CLK_PLL2S>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "zb";
+ };
+ sdhi0_clk: sdhi0_clk@e6150074 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150074 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>;
+ #clock-cells = <0>;
+ clock-output-names = "sdhi0ck";
+ };
+ sdhi1_clk: sdhi1_clk@e6150078 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150078 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>;
+ #clock-cells = <0>;
+ clock-output-names = "sdhi1ck";
+ };
+ sdhi2_clk: sdhi2_clk@e615007c {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe615007c 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>;
+ #clock-cells = <0>;
+ clock-output-names = "sdhi2ck";
+ };
+ mmc0_clk: mmc0_clk@e6150240 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150240 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>;
+ #clock-cells = <0>;
+ clock-output-names = "mmc0";
+ };
+ mmc1_clk: mmc1_clk@e6150244 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150244 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>;
+ #clock-cells = <0>;
+ clock-output-names = "mmc1";
+ };
+ vclk1_clk: vclk1_clk@e6150008 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150008 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>, <&main_div2_clk>,
+ <&extalr_clk>, <0>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "vclk1";
+ };
+ vclk2_clk: vclk2_clk@e615000c {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe615000c 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>, <&main_div2_clk>,
+ <&extalr_clk>, <0>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "vclk2";
+ };
+ vclk3_clk: vclk3_clk@e615001c {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe615001c 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>, <&main_div2_clk>,
+ <&extalr_clk>, <0>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "vclk3";
+ };
+ vclk4_clk: vclk4_clk@e6150014 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150014 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>, <&main_div2_clk>,
+ <&extalr_clk>, <0>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "vclk4";
+ };
+ vclk5_clk: vclk5_clk@e6150034 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150034 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <0>, <&extal2_clk>, <&main_div2_clk>,
+ <&extalr_clk>, <0>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "vclk5";
+ };
+ fsia_clk: fsia_clk@e6150018 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150018 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <&fsiack_clk>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "fsia";
+ };
+ fsib_clk: fsib_clk@e6150090 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150090 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <&fsibck_clk>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "fsib";
+ };
+ mp_clk: mp_clk@e6150080 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150080 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <&extal2_clk>, <&extal2_clk>;
+ #clock-cells = <0>;
+ clock-output-names = "mp";
+ };
+ m4_clk: m4_clk@e6150098 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150098 0 4>;
+ clocks = <&cpg_clocks R8A73A4_CLK_PLL2S>;
+ #clock-cells = <0>;
+ clock-output-names = "m4";
+ };
+ hsi_clk: hsi_clk@e615026c {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe615026c 0 4>;
+ clocks = <&cpg_clocks R8A73A4_CLK_PLL2H>, <&pll1_div2_clk>,
+ <&cpg_clocks R8A73A4_CLK_PLL2S>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "hsi";
+ };
+ spuv_clk: spuv_clk@e6150094 {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0 0xe6150094 0 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+ <&extal2_clk>, <&extal2_clk>;
+ #clock-cells = <0>;
+ clock-output-names = "spuv";
+ };
+
+ /* Fixed factor clocks */
+ main_div2_clk: main_div2_clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A73A4_CLK_MAIN>;
+ #clock-cells = <0>;
+ clock-div = <2>;
+ clock-mult = <1>;
+ clock-output-names = "main_div2";
+ };
+ pll0_div2_clk: pll0_div2_clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A73A4_CLK_PLL0>;
+ #clock-cells = <0>;
+ clock-div = <2>;
+ clock-mult = <1>;
+ clock-output-names = "pll0_div2";
+ };
+ pll1_div2_clk: pll1_div2_clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A73A4_CLK_PLL1>;
+ #clock-cells = <0>;
+ clock-div = <2>;
+ clock-mult = <1>;
+ clock-output-names = "pll1_div2";
+ };
+ extal1_div2_clk: extal1_div2_clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&extal1_clk>;
+ #clock-cells = <0>;
+ clock-div = <2>;
+ clock-mult = <1>;
+ clock-output-names = "extal1_div2";
+ };
+
+ /* Gate clocks */
+ mstp2_clks: mstp2_clks@e6150138 {
+ compatible = "renesas,r8a73a4-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150138 0 4>, <0 0xe6150040 0 4>;
+ clocks = <&mp_clk>, <&mp_clk>, <&mp_clk>, <&mp_clk>,
+ <&mp_clk>, <&mp_clk>, <&cpg_clocks R8A73A4_CLK_HP>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A73A4_CLK_SCIFA0 R8A73A4_CLK_SCIFA1
+ R8A73A4_CLK_SCIFB0 R8A73A4_CLK_SCIFB1
+ R8A73A4_CLK_SCIFB2 R8A73A4_CLK_SCIFB3
+ R8A73A4_CLK_DMAC
+ >;
+ clock-output-names =
+ "scifa0", "scifa1", "scifb0", "scifb1",
+ "scifb2", "scifb3", "dmac";
+ };
+ mstp3_clks: mstp3_clks@e615013c {
+ compatible = "renesas,r8a73a4-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0 0xe615013c 0 4>, <0 0xe6150048 0 4>;
+ clocks = <&cpg_clocks R8A73A4_CLK_HP>, <&mmc1_clk>,
+ <&sdhi2_clk>, <&sdhi1_clk>, <&sdhi0_clk>,
+ <&mmc0_clk>, <&cpg_clocks R8A73A4_CLK_HP>,
+ <&cpg_clocks R8A73A4_CLK_HP>, <&cpg_clocks
+ R8A73A4_CLK_HP>, <&cpg_clocks
+ R8A73A4_CLK_HP>, <&extalr_clk>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A73A4_CLK_IIC2 R8A73A4_CLK_MMCIF1
+ R8A73A4_CLK_SDHI2 R8A73A4_CLK_SDHI1
+ R8A73A4_CLK_SDHI0 R8A73A4_CLK_MMCIF0
+ R8A73A4_CLK_IIC6 R8A73A4_CLK_IIC7
+ R8A73A4_CLK_IIC0 R8A73A4_CLK_IIC1
+ R8A73A4_CLK_CMT1
+ >;
+ clock-output-names =
+ "iic2", "mmcif1", "sdhi2", "sdhi1", "sdhi0",
+ "mmcif0", "iic6", "iic7", "iic0", "iic1",
+ "cmt1";
+ };
+ mstp4_clks: mstp4_clks@e6150140 {
+ compatible = "renesas,r8a73a4-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150140 0 4>, <0 0xe615004c 0 4>;
+ clocks = <&main_div2_clk>, <&cpg_clocks R8A73A4_CLK_HP>,
+ <&cpg_clocks R8A73A4_CLK_HP>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A73A4_CLK_IIC5 R8A73A4_CLK_IIC4
+ R8A73A4_CLK_IIC3
+ >;
+ clock-output-names =
+ "iic5", "iic4", "iic3";
+ };
+ mstp5_clks: mstp5_clks@e6150144 {
+ compatible = "renesas,r8a73a4-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150144 0 4>, <0 0xe615003c 0 4>;
+ clocks = <&extal2_clk>, <&cpg_clocks R8A73A4_CLK_HP>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A73A4_CLK_THERMAL R8A73A4_CLK_IIC8
+ >;
+ clock-output-names =
+ "thermal", "iic8";
+ };
+ };
+
+ sysc: system-controller@e6180000 {
+ compatible = "renesas,sysc-r8a73a4", "renesas,sysc-rmobile";
+ reg = <0 0xe6180000 0 0x8000>, <0 0xe6188000 0 0x8000>;
+
+ pm-domains {
+ pd_c5: c5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_c4: c4@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a3sg: a3sg@16 {
+ reg = <16>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a3ex: a3ex@17 {
+ reg = <17>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a3sp: a3sp@18 {
+ reg = <18>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a2us: a2us@19 {
+ reg = <19>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ pd_a3sm: a3sm@20 {
+ reg = <20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a2sl: a2sl@21 {
+ reg = <21>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ pd_a3km: a3km@22 {
+ reg = <22>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a2kl: a2kl@23 {
+ reg = <23>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+
+ pd_c4ma: c4ma@1 {
+ reg = <1>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_c4cl: c4cl@2 {
+ reg = <2>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_d4: d4@3 {
+ reg = <3>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a4bc: a4bc@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a3bc: a3bc@5 {
+ reg = <5>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ pd_a4l: a4l@6 {
+ reg = <6>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a4lc: a4lc@7 {
+ reg = <7>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a4mp: a4mp@8 {
+ reg = <8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a3mp: a3mp@9 {
+ reg = <9>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a3vc: a3vc@10 {
+ reg = <10>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ pd_a4sf: a4sf@11 {
+ reg = <11>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a3r: a3r@12 {
+ reg = <12>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a2rv: a2rv@13 {
+ reg = <13>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a2is: a2is@14 {
+ reg = <14>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index 8a092605d641..83c1c3ca1b8f 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -431,6 +431,18 @@
clock-frequency = <27000000>;
clock-output-names = "dv";
};
+ fmsick_clk: fmsick_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "fmsick";
+ };
+ fmsock_clk: fmsock_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "fmsock";
+ };
fsiack_clk: fsiack_clk {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -459,13 +471,78 @@
};
/* Variable factor clocks (DIV6) */
+ vclk1_clk: vclk1_clk@e6150008 {
+ compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0xe6150008 4>;
+ clocks = <&pllc1_div2_clk>, <0>, <&dv_clk>,
+ <&cpg_clocks R8A7740_CLK_USB24S>,
+ <&extal1_div2_clk>, <&extalr_clk>, <0>,
+ <0>;
+ #clock-cells = <0>;
+ clock-output-names = "vclk1";
+ };
+ vclk2_clk: vclk2_clk@e615000c {
+ compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0xe615000c 4>;
+ clocks = <&pllc1_div2_clk>, <0>, <&dv_clk>,
+ <&cpg_clocks R8A7740_CLK_USB24S>,
+ <&extal1_div2_clk>, <&extalr_clk>, <0>,
+ <0>;
+ #clock-cells = <0>;
+ clock-output-names = "vclk2";
+ };
+ fmsi_clk: fmsi_clk@e6150010 {
+ compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0xe6150010 4>;
+ clocks = <&pllc1_div2_clk>, <&fmsick_clk>, <0>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "fmsi";
+ };
+ fmso_clk: fmso_clk@e6150014 {
+ compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0xe6150014 4>;
+ clocks = <&pllc1_div2_clk>, <&fmsock_clk>, <0>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "fmso";
+ };
+ fsia_clk: fsia_clk@e6150018 {
+ compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0xe6150018 4>;
+ clocks = <&pllc1_div2_clk>, <&fsiack_clk>, <0>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "fsia";
+ };
sub_clk: sub_clk@e6150080 {
compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150080 4>;
- clocks = <&pllc1_div2_clk>;
+ clocks = <&pllc1_div2_clk>,
+ <&cpg_clocks R8A7740_CLK_USB24S>, <0>, <0>;
#clock-cells = <0>;
clock-output-names = "sub";
};
+ spu_clk: spu_clk@e6150084 {
+ compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0xe6150084 4>;
+ clocks = <&pllc1_div2_clk>,
+ <&cpg_clocks R8A7740_CLK_USB24S>, <0>, <0>;
+ #clock-cells = <0>;
+ clock-output-names = "spu";
+ };
+ vou_clk: vou_clk@e6150088 {
+ compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0xe6150088 4>;
+ clocks = <&pllc1_div2_clk>, <&extal1_clk>, <&dv_clk>,
+ <0>;
+ #clock-cells = <0>;
+ clock-output-names = "vou";
+ };
+ stpro_clk: stpro_clk@e615009c {
+ compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0xe615009c 4>;
+ clocks = <&cpg_clocks R8A7740_CLK_PLLC0>;
+ #clock-cells = <0>;
+ clock-output-names = "stpro";
+ };
/* Fixed factor clocks */
pllc1_div2_clk: pllc1_div2_clk {
diff --git a/arch/arm/boot/dts/r8a7778-bockw.dts b/arch/arm/boot/dts/r8a7778-bockw.dts
index 46a884d45175..787fa6f9f46d 100644
--- a/arch/arm/boot/dts/r8a7778-bockw.dts
+++ b/arch/arm/boot/dts/r8a7778-bockw.dts
@@ -16,17 +16,191 @@
/dts-v1/;
#include "r8a7778.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "bockw";
compatible = "renesas,bockw", "renesas,r8a7778";
+ aliases {
+ serial0 = &scif0;
+ };
+
chosen {
bootargs = "console=ttySC0,115200 ignore_loglevel ip=dhcp root=/dev/nfs rw";
+ stdout-path = &scif0;
};
memory {
device_type = "memory";
reg = <0x60000000 0x10000000>;
};
+
+ fixedregulator3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&sndcodec>;
+ simple-audio-card,frame-master = <&sndcodec>;
+
+ sndcpu: simple-audio-card,cpu {
+ sound-dai = <&rcar_sound>;
+ };
+
+ sndcodec: simple-audio-card,codec {
+ sound-dai = <&ak4643>;
+ system-clock-frequency = <11289600>;
+ };
+ };
+};
+
+&bsc {
+ ethernet@18300000 {
+ compatible = "smsc,lan9220", "smsc,lan9115";
+ reg = <0x18300000 0x1000>;
+
+ phy-mode = "mii";
+ interrupt-parent = <&irqpin>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+ reg-io-width = <4>;
+ vddvario-supply = <&fixedregulator3v3>;
+ vdd33a-supply = <&fixedregulator3v3>;
+ };
+};
+
+&extal_clk {
+ clock-frequency = <33333333>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ ak4643: sound-codec@12 {
+ compatible = "asahi-kasei,ak4643";
+ #sound-dai-cells = <0>;
+ reg = <0x12>;
+ };
+
+ camera@41 {
+ compatible = "oki,ml86v7667";
+ reg = <0x41>;
+ };
+
+ camera@43 {
+ compatible = "oki,ml86v7667";
+ reg = <0x43>;
+ };
+
+ rx8581: rtc@51 {
+ compatible = "epson,rx8581";
+ reg = <0x51>;
+ };
+};
+
+&mmcif {
+ pinctrl-0 = <&mmc_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&fixedregulator3v3>;
+ bus-width = <8>;
+ broken-cd;
+ status = "okay";
+};
+
+&irqpin {
+ status = "okay";
+};
+
+&tmu0 {
+ status = "okay";
+};
+
+&pfc {
+ scif0_pins: serial0 {
+ renesas,groups = "scif0_data_a", "scif0_ctrl";
+ renesas,function = "scif0";
+ };
+
+ mmc_pins: mmc {
+ renesas,groups = "mmc_data8", "mmc_ctrl";
+ renesas,function = "mmc";
+ };
+
+ sdhi0_pins: sd0 {
+ renesas,groups = "sdhi0_data4", "sdhi0_ctrl",
+ "sdhi0_cd";
+ renesas,function = "sdhi0";
+ };
+
+ hspi0_pins: hspi0 {
+ renesas,groups = "hspi0_a";
+ renesas,function = "hspi0";
+ };
+
+ usb0_pins: usb0 {
+ renesas,groups = "usb0";
+ renesas,function = "usb0";
+ };
+
+ usb1_pins: usb1 {
+ renesas,groups = "usb1";
+ renesas,function = "usb1";
+ };
+
+ vin0_pins: vin0 {
+ renesas,groups = "vin0_data8", "vin0_clk";
+ renesas,function = "vin0";
+ };
+
+ vin1_pins: vin1 {
+ renesas,groups = "vin1_data8", "vin1_clk";
+ renesas,function = "vin1";
+ };
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&fixedregulator3v3>;
+ bus-width = <4>;
+ status = "okay";
+ wp-gpios = <&gpio3 18 GPIO_ACTIVE_HIGH>;
+};
+
+&hspi0 {
+ pinctrl-0 = <&hspi0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ flash: flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25fl008k";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ m25p,fast-read;
+
+ partition@0 {
+ label = "data(spi)";
+ reg = <0x00000000 0x00100000>;
+ };
+ };
+};
+
+&scif0 {
+ pinctrl-0 = <&scif0_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index ef8533910029..868f97309533 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -16,6 +16,7 @@
/include/ "skeleton.dtsi"
+#include <dt-bindings/clock/r8a7778-clock.h>
#include <dt-bindings/interrupt-controller/irq.h>
/ {
@@ -40,6 +41,24 @@
spi2 = &hspi2;
};
+ bsc: bus@1c000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x1c000000>;
+ };
+
+ ether: ethernet@fde00000 {
+ compatible = "renesas,ether-r8a7778";
+ reg = <0xfde00000 0x400>;
+ interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp1_clks R8A7778_CLK_ETHER>;
+ phy-mode = "rmii";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@fe438000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
@@ -132,6 +151,7 @@
compatible = "renesas,i2c-r8a7778";
reg = <0xffc70000 0x1000>;
interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_I2C0>;
status = "disabled";
};
@@ -141,6 +161,7 @@
compatible = "renesas,i2c-r8a7778";
reg = <0xffc71000 0x1000>;
interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_I2C1>;
status = "disabled";
};
@@ -150,6 +171,7 @@
compatible = "renesas,i2c-r8a7778";
reg = <0xffc72000 0x1000>;
interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_I2C2>;
status = "disabled";
};
@@ -159,6 +181,7 @@
compatible = "renesas,i2c-r8a7778";
reg = <0xffc73000 0x1000>;
interrupts = <0 77 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_I2C3>;
status = "disabled";
};
@@ -168,6 +191,8 @@
interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
<0 33 IRQ_TYPE_LEVEL_HIGH>,
<0 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_TMU0>;
+ clock-names = "fck";
#renesas,channels = <3>;
@@ -180,6 +205,8 @@
interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>,
<0 37 IRQ_TYPE_LEVEL_HIGH>,
<0 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_TMU1>;
+ clock-names = "fck";
#renesas,channels = <3>;
@@ -192,16 +219,75 @@
interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>,
<0 41 IRQ_TYPE_LEVEL_HIGH>,
<0 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_TMU2>;
+ clock-names = "fck";
#renesas,channels = <3>;
status = "disabled";
};
+ rcar_sound: sound@ffd90000 {
+ #sound-dai-cells = <1>;
+ compatible = "renesas,rcar_sound-r8a7778", "renesas,rcar_sound-gen1";
+ reg = <0xffd90000 0x1000>, /* SRU */
+ <0xffd91000 0x1240>, /* SSI */
+ <0xfffe0000 0x24>; /* ADG */
+ clocks = <&mstp3_clks R8A7778_CLK_SSI8>,
+ <&mstp3_clks R8A7778_CLK_SSI7>,
+ <&mstp3_clks R8A7778_CLK_SSI6>,
+ <&mstp3_clks R8A7778_CLK_SSI5>,
+ <&mstp3_clks R8A7778_CLK_SSI4>,
+ <&mstp0_clks R8A7778_CLK_SSI3>,
+ <&mstp0_clks R8A7778_CLK_SSI2>,
+ <&mstp0_clks R8A7778_CLK_SSI1>,
+ <&mstp0_clks R8A7778_CLK_SSI0>,
+ <&mstp5_clks R8A7778_CLK_SRU_SRC8>,
+ <&mstp5_clks R8A7778_CLK_SRU_SRC7>,
+ <&mstp5_clks R8A7778_CLK_SRU_SRC6>,
+ <&mstp5_clks R8A7778_CLK_SRU_SRC5>,
+ <&mstp5_clks R8A7778_CLK_SRU_SRC4>,
+ <&mstp5_clks R8A7778_CLK_SRU_SRC3>,
+ <&mstp5_clks R8A7778_CLK_SRU_SRC2>,
+ <&mstp5_clks R8A7778_CLK_SRU_SRC1>,
+ <&mstp5_clks R8A7778_CLK_SRU_SRC0>,
+ <&audio_clk_a>, <&audio_clk_b>, <&audio_clk_c>,
+ <&cpg_clocks R8A7778_CLK_S1>;
+ clock-names = "ssi.8", "ssi.7", "ssi.6", "ssi.5", "ssi.4",
+ "ssi.3", "ssi.2", "ssi.1", "ssi.0",
+ "src.8", "src.7", "src.6", "src.5", "src.4",
+ "src.3", "src.2", "src.1", "src.0",
+ "clk_a", "clk_b", "clk_c", "clk_i";
+
+ status = "disabled";
+
+ rcar_sound,src {
+ src3: src@3 { };
+ src4: src@4 { };
+ src5: src@5 { };
+ src6: src@6 { };
+ src7: src@7 { };
+ src8: src@8 { };
+ src9: src@9 { };
+ };
+
+ rcar_sound,ssi {
+ ssi3: ssi@3 { interrupts = <0 0x85 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi4: ssi@4 { interrupts = <0 0x85 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi5: ssi@5 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi6: ssi@6 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi7: ssi@7 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi8: ssi@8 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi9: ssi@9 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ };
+ };
+
scif0: serial@ffe40000 {
compatible = "renesas,scif-r8a7778", "renesas,scif";
reg = <0xffe40000 0x100>;
interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_SCIF0>;
+ clock-names = "sci_ick";
status = "disabled";
};
@@ -209,6 +295,8 @@
compatible = "renesas,scif-r8a7778", "renesas,scif";
reg = <0xffe41000 0x100>;
interrupts = <0 71 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_SCIF1>;
+ clock-names = "sci_ick";
status = "disabled";
};
@@ -216,6 +304,8 @@
compatible = "renesas,scif-r8a7778", "renesas,scif";
reg = <0xffe42000 0x100>;
interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_SCIF2>;
+ clock-names = "sci_ick";
status = "disabled";
};
@@ -223,6 +313,8 @@
compatible = "renesas,scif-r8a7778", "renesas,scif";
reg = <0xffe43000 0x100>;
interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_SCIF3>;
+ clock-names = "sci_ick";
status = "disabled";
};
@@ -230,6 +322,8 @@
compatible = "renesas,scif-r8a7778", "renesas,scif";
reg = <0xffe44000 0x100>;
interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_SCIF4>;
+ clock-names = "sci_ick";
status = "disabled";
};
@@ -237,6 +331,8 @@
compatible = "renesas,scif-r8a7778", "renesas,scif";
reg = <0xffe45000 0x100>;
interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_SCIF5>;
+ clock-names = "sci_ick";
status = "disabled";
};
@@ -244,6 +340,7 @@
compatible = "renesas,sh-mmcif";
reg = <0xffe4e000 0x100>;
interrupts = <0 61 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A7778_CLK_MMC>;
status = "disabled";
};
@@ -251,6 +348,7 @@
compatible = "renesas,sdhi-r8a7778";
reg = <0xffe4c000 0x100>;
interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A7778_CLK_SDHI0>;
status = "disabled";
};
@@ -258,6 +356,7 @@
compatible = "renesas,sdhi-r8a7778";
reg = <0xffe4d000 0x100>;
interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A7778_CLK_SDHI1>;
status = "disabled";
};
@@ -265,6 +364,7 @@
compatible = "renesas,sdhi-r8a7778";
reg = <0xffe4f000 0x100>;
interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A7778_CLK_SDHI2>;
status = "disabled";
};
@@ -272,6 +372,7 @@
compatible = "renesas,hspi-r8a7778", "renesas,hspi";
reg = <0xfffc7000 0x18>;
interrupts = <0 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -281,6 +382,7 @@
compatible = "renesas,hspi-r8a7778", "renesas,hspi";
reg = <0xfffc8000 0x18>;
interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -290,8 +392,199 @@
compatible = "renesas,hspi-r8a7778", "renesas,hspi";
reg = <0xfffc6000 0x18>;
interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* External input clock */
+ extal_clk: extal_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "extal";
+ };
+
+ /* Special CPG clocks */
+ cpg_clocks: cpg_clocks@ffc80000 {
+ compatible = "renesas,r8a7778-cpg-clocks";
+ reg = <0xffc80000 0x80>;
+ #clock-cells = <1>;
+ clocks = <&extal_clk>;
+ clock-output-names = "plla", "pllb", "b",
+ "out", "p", "s", "s1";
+ };
+
+ /* Audio clocks; frequencies are set by boards if applicable. */
+ audio_clk_a: audio_clk_a {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-output-names = "audio_clk_a";
+ };
+ audio_clk_b: audio_clk_b {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-output-names = "audio_clk_b";
+ };
+ audio_clk_c: audio_clk_c {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-output-names = "audio_clk_c";
+ };
+
+ /* Fixed ratio clocks */
+ g_clk: g_clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7778_CLK_PLLA>;
+ #clock-cells = <0>;
+ clock-div = <12>;
+ clock-mult = <1>;
+ clock-output-names = "g";
+ };
+ i_clk: i_clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7778_CLK_PLLA>;
+ #clock-cells = <0>;
+ clock-div = <1>;
+ clock-mult = <1>;
+ clock-output-names = "i";
+ };
+ s3_clk: s3_clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7778_CLK_PLLA>;
+ #clock-cells = <0>;
+ clock-div = <4>;
+ clock-mult = <1>;
+ clock-output-names = "s3";
+ };
+ s4_clk: s4_clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7778_CLK_PLLA>;
+ #clock-cells = <0>;
+ clock-div = <8>;
+ clock-mult = <1>;
+ clock-output-names = "s4";
+ };
+ z_clk: z_clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7778_CLK_PLLB>;
+ #clock-cells = <0>;
+ clock-div = <1>;
+ clock-mult = <1>;
+ clock-output-names = "z";
+ };
+
+ /* Gate clocks */
+ mstp0_clks: mstp0_clks@ffc80030 {
+ compatible = "renesas,r8a7778-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0xffc80030 4>;
+ clocks = <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_S>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A7778_CLK_I2C0 R8A7778_CLK_I2C1
+ R8A7778_CLK_I2C2 R8A7778_CLK_I2C3
+ R8A7778_CLK_SCIF0 R8A7778_CLK_SCIF1
+ R8A7778_CLK_SCIF2 R8A7778_CLK_SCIF3
+ R8A7778_CLK_SCIF4 R8A7778_CLK_SCIF5
+ R8A7778_CLK_TMU0 R8A7778_CLK_TMU1
+ R8A7778_CLK_TMU2 R8A7778_CLK_SSI0
+ R8A7778_CLK_SSI1 R8A7778_CLK_SSI2
+ R8A7778_CLK_SSI3 R8A7778_CLK_SRU
+ R8A7778_CLK_HSPI
+ >;
+ clock-output-names =
+ "i2c0", "i2c1", "i2c2", "i2c3", "scif0",
+ "scif1", "scif2", "scif3", "scif4", "scif5",
+ "tmu0", "tmu1", "tmu2", "ssi0", "ssi1",
+ "ssi2", "ssi3", "sru", "hspi";
+ };
+ mstp1_clks: mstp1_clks@ffc80034 {
+ compatible = "renesas,r8a7778-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0xffc80034 4>, <0xffc80044 4>;
+ clocks = <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_S>,
+ <&cpg_clocks R8A7778_CLK_S>,
+ <&cpg_clocks R8A7778_CLK_P>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A7778_CLK_ETHER R8A7778_CLK_VIN0
+ R8A7778_CLK_VIN1 R8A7778_CLK_USB
+ >;
+ clock-output-names =
+ "ether", "vin0", "vin1", "usb";
+ };
+ mstp3_clks: mstp3_clks@ffc8003c {
+ compatible = "renesas,r8a7778-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0xffc8003c 4>;
+ clocks = <&s4_clk>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A7778_CLK_MMC R8A7778_CLK_SDHI0
+ R8A7778_CLK_SDHI1 R8A7778_CLK_SDHI2
+ R8A7778_CLK_SSI4 R8A7778_CLK_SSI5
+ R8A7778_CLK_SSI6 R8A7778_CLK_SSI7
+ R8A7778_CLK_SSI8
+ >;
+ clock-output-names =
+ "mmc", "sdhi0", "sdhi1", "sdhi2", "ssi4",
+ "ssi5", "ssi6", "ssi7", "ssi8";
+ };
+ mstp5_clks: mstp5_clks@ffc80054 {
+ compatible = "renesas,r8a7778-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0xffc80054 4>;
+ clocks = <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_P>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A7778_CLK_SRU_SRC0 R8A7778_CLK_SRU_SRC1
+ R8A7778_CLK_SRU_SRC2 R8A7778_CLK_SRU_SRC3
+ R8A7778_CLK_SRU_SRC4 R8A7778_CLK_SRU_SRC5
+ R8A7778_CLK_SRU_SRC6 R8A7778_CLK_SRU_SRC7
+ R8A7778_CLK_SRU_SRC8
+ >;
+ clock-output-names =
+ "sru-src0", "sru-src1", "sru-src2",
+ "sru-src3", "sru-src4", "sru-src5",
+ "sru-src6", "sru-src7", "sru-src8";
+ };
+ };
};
diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
index e83d40e24bcd..540756cdf391 100644
--- a/arch/arm/boot/dts/r8a7779-marzen.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen.dts
@@ -122,6 +122,12 @@
};
};
};
+
+ x3_clk: x3-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <65000000>;
+ };
};
&du {
@@ -129,6 +135,9 @@
pinctrl-names = "default";
status = "okay";
+ clocks = <&mstp1_clks R8A7779_CLK_DU>, <&x3_clk>;
+ clock-names = "du", "dclkin.0";
+
ports {
port@0 {
endpoint {
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 0c3b6783b72a..aaa4f258e279 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -222,6 +222,29 @@
};
};
};
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&adv7511_out>;
+ };
+ };
+ };
+
+ x2_clk: x2-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <148500000>;
+ };
+
+ x13_clk: x13-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <148500000>;
+ };
};
&du {
@@ -229,12 +252,26 @@
pinctrl-names = "default";
status = "okay";
+ clocks = <&mstp7_clks R8A7790_CLK_DU0>,
+ <&mstp7_clks R8A7790_CLK_DU1>,
+ <&mstp7_clks R8A7790_CLK_DU2>,
+ <&mstp7_clks R8A7790_CLK_LVDS0>,
+ <&mstp7_clks R8A7790_CLK_LVDS1>,
+ <&x13_clk>, <&x2_clk>;
+ clock-names = "du.0", "du.1", "du.2", "lvds.0", "lvds.1",
+ "dclkin.0", "dclkin.1";
+
ports {
port@0 {
endpoint {
remote-endpoint = <&adv7123_in>;
};
};
+ port@1 {
+ endpoint {
+ remote-endpoint = <&adv7511_in>;
+ };
+ };
port@2 {
lvds_connector: endpoint {
};
@@ -506,6 +543,38 @@
};
};
};
+
+ hdmi@39 {
+ compatible = "adi,adv7511w";
+ reg = <0x39>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15 IRQ_TYPE_EDGE_FALLING>;
+
+ adi,input-depth = <8>;
+ adi,input-colorspace = "rgb";
+ adi,input-clock = "1x";
+ adi,input-style = <1>;
+ adi,input-justification = "evenly";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7511_in: endpoint {
+ remote-endpoint = <&du_out_lvds0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7511_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+ };
};
&iic3 {
@@ -513,9 +582,27 @@
pinctrl-0 = <&iic3_pins>;
status = "okay";
+ pmic@58 {
+ compatible = "dlg,da9063";
+ reg = <0x58>;
+ interrupt-parent = <&irqc0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+ };
+
+ wdt {
+ compatible = "dlg,da9063-watchdog";
+ };
+ };
+
vdd_dvfs: regulator@68 {
compatible = "dlg,da9210";
reg = <0x68>;
+ interrupt-parent = <&irqc0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 4b38fc920114..4bb2f4c17321 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1,6 +1,7 @@
/*
* Device Tree Source for the r8a7790 SoC
*
+ * Copyright (C) 2015 Renesas Electronics Corporation
* Copyright (C) 2013-2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded Inc.
*
@@ -369,13 +370,6 @@
dma-channels = <13>;
};
- audmapp: dma-controller@ec740000 {
- compatible = "renesas,rcar-audmapp";
- #dma-cells = <1>;
-
- reg = <0 0xec740000 0 0x200>;
- };
-
i2c0: i2c@e6508000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -493,17 +487,21 @@
sdhi0: sd@ee100000 {
compatible = "renesas,sdhi-r8a7790";
- reg = <0 0xee100000 0 0x200>;
+ reg = <0 0xee100000 0 0x328>;
interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_SDHI0>;
+ dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
+ dma-names = "tx", "rx";
status = "disabled";
};
sdhi1: sd@ee120000 {
compatible = "renesas,sdhi-r8a7790";
- reg = <0 0xee120000 0 0x200>;
+ reg = <0 0xee120000 0 0x328>;
interrupts = <0 166 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_SDHI1>;
+ dmas = <&dmac1 0xc9>, <&dmac1 0xca>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -512,6 +510,8 @@
reg = <0 0xee140000 0 0x100>;
interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_SDHI2>;
+ dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -520,6 +520,8 @@
reg = <0 0xee160000 0 0x100>;
interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_SDHI3>;
+ dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -792,6 +794,26 @@
};
};
+ can0: can@e6e80000 {
+ compatible = "renesas,can-r8a7790";
+ reg = <0 0xe6e80000 0 0x1000>;
+ interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7790_CLK_RCAN0>,
+ <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
+ status = "disabled";
+ };
+
+ can1: can@e6e88000 {
+ compatible = "renesas,can-r8a7790";
+ reg = <0 0xe6e88000 0 0x1000>;
+ interrupts = <0 187 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7790_CLK_RCAN1>,
+ <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
+ status = "disabled";
+ };
+
clocks {
#address-cells = <2>;
#size-cells = <2>;
@@ -838,16 +860,34 @@
clock-output-names = "audio_clk_c";
};
+ /* External USB clock - can be overridden by the board */
+ usb_extal_clk: usb_extal_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ clock-output-names = "usb_extal";
+ };
+
+ /* External CAN clock */
+ can_clk: can_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
+ clock-output-names = "can_clk";
+ status = "disabled";
+ };
+
/* Special CPG clocks */
cpg_clocks: cpg_clocks@e6150000 {
compatible = "renesas,r8a7790-cpg-clocks",
"renesas,rcar-gen2-cpg-clocks";
reg = <0 0xe6150000 0 0x1000>;
- clocks = <&extal_clk>;
+ clocks = <&extal_clk &usb_extal_clk>;
#clock-cells = <1>;
clock-output-names = "main", "pll0", "pll1", "pll3",
"lb", "qspi", "sdh", "sd0", "sd1",
- "z";
+ "z", "rcan", "adsp";
};
/* Variable factor clocks */
@@ -1121,18 +1161,21 @@
mstp5_clks: mstp5_clks@e6150144 {
compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150144 0 4>, <0 0xe615003c 0 4>;
- clocks = <&hp_clk>, <&hp_clk>, <&extal_clk>, <&p_clk>;
+ clocks = <&hp_clk>, <&hp_clk>, <&cpg_clocks R8A7790_CLK_ADSP>,
+ <&extal_clk>, <&p_clk>;
#clock-cells = <1>;
clock-indices = <
R8A7790_CLK_AUDIO_DMAC0 R8A7790_CLK_AUDIO_DMAC1
- R8A7790_CLK_THERMAL R8A7790_CLK_PWM
+ R8A7790_CLK_ADSP_MOD R8A7790_CLK_THERMAL
+ R8A7790_CLK_PWM
>;
- clock-output-names = "audmac0", "audmac1", "thermal", "pwm";
+ clock-output-names = "audmac0", "audmac1", "adsp_mod",
+ "thermal", "pwm";
};
mstp7_clks: mstp7_clks@e615014c {
compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
- clocks = <&mp_clk>, <&mp_clk>, <&zs_clk>, <&zs_clk>, <&p_clk>,
+ clocks = <&mp_clk>, <&hp_clk>, <&zs_clk>, <&zs_clk>, <&p_clk>,
<&p_clk>, <&zx_clk>, <&zx_clk>, <&zx_clk>, <&zx_clk>,
<&zx_clk>;
#clock-cells = <1>;
@@ -1410,7 +1453,10 @@
reg = <0 0xec500000 0 0x1000>, /* SCU */
<0 0xec5a0000 0 0x100>, /* ADG */
<0 0xec540000 0 0x1000>, /* SSIU */
- <0 0xec541000 0 0x1280>; /* SSI */
+ <0 0xec541000 0 0x1280>, /* SSI */
+ <0 0xec740000 0 0x200>; /* Audio DMAC peri peri*/
+ reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
clocks = <&mstp10_clks R8A7790_CLK_SSI_ALL>,
<&mstp10_clks R8A7790_CLK_SSI9>, <&mstp10_clks R8A7790_CLK_SSI8>,
<&mstp10_clks R8A7790_CLK_SSI7>, <&mstp10_clks R8A7790_CLK_SSI6>,
@@ -1435,34 +1481,171 @@
status = "disabled";
rcar_sound,dvc {
- dvc0: dvc@0 { };
- dvc1: dvc@1 { };
+ dvc0: dvc@0 {
+ dmas = <&audma0 0xbc>;
+ dma-names = "tx";
+ };
+ dvc1: dvc@1 {
+ dmas = <&audma0 0xbe>;
+ dma-names = "tx";
+ };
};
rcar_sound,src {
- src0: src@0 { interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>; };
- src1: src@1 { interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>; };
- src2: src@2 { interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>; };
- src3: src@3 { interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>; };
- src4: src@4 { interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>; };
- src5: src@5 { interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>; };
- src6: src@6 { interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>; };
- src7: src@7 { interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>; };
- src8: src@8 { interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>; };
- src9: src@9 { interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>; };
+ src0: src@0 {
+ interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x85>, <&audma1 0x9a>;
+ dma-names = "rx", "tx";
+ };
+ src1: src@1 {
+ interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x87>, <&audma1 0x9c>;
+ dma-names = "rx", "tx";
+ };
+ src2: src@2 {
+ interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x89>, <&audma1 0x9e>;
+ dma-names = "rx", "tx";
+ };
+ src3: src@3 {
+ interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8b>, <&audma1 0xa0>;
+ dma-names = "rx", "tx";
+ };
+ src4: src@4 {
+ interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8d>, <&audma1 0xb0>;
+ dma-names = "rx", "tx";
+ };
+ src5: src@5 {
+ interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8f>, <&audma1 0xb2>;
+ dma-names = "rx", "tx";
+ };
+ src6: src@6 {
+ interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x91>, <&audma1 0xb4>;
+ dma-names = "rx", "tx";
+ };
+ src7: src@7 {
+ interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x93>, <&audma1 0xb6>;
+ dma-names = "rx", "tx";
+ };
+ src8: src@8 {
+ interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x95>, <&audma1 0xb8>;
+ dma-names = "rx", "tx";
+ };
+ src9: src@9 {
+ interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x97>, <&audma1 0xba>;
+ dma-names = "rx", "tx";
+ };
};
rcar_sound,ssi {
- ssi0: ssi@0 { interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>; };
- ssi1: ssi@1 { interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>; };
- ssi2: ssi@2 { interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>; };
- ssi3: ssi@3 { interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>; };
- ssi4: ssi@4 { interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>; };
- ssi5: ssi@5 { interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>; };
- ssi6: ssi@6 { interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>; };
- ssi7: ssi@7 { interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>; };
- ssi8: ssi@8 { interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>; };
- ssi9: ssi@9 { interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi0: ssi@0 {
+ interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi1: ssi@1 {
+ interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi2: ssi@2 {
+ interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi3: ssi@3 {
+ interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi4: ssi@4 {
+ interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi5: ssi@5 {
+ interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi6: ssi@6 {
+ interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi7: ssi@7 {
+ interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi8: ssi@8 {
+ interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi9: ssi@9 {
+ interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
};
};
+
+ ipmmu_sy0: mmu@e6280000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe6280000 0 0x1000>;
+ interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
+ <0 224 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_sy1: mmu@e6290000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe6290000 0 0x1000>;
+ interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_ds: mmu@e6740000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe6740000 0 0x1000>;
+ interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
+ <0 199 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_mp: mmu@ec680000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xec680000 0 0x1000>;
+ interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_mx: mmu@fe951000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xfe951000 0 0x1000>;
+ interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_rt: mmu@ffc80000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xffc80000 0 0x1000>;
+ interrupts = <0 307 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/r8a7791-henninger.dts b/arch/arm/boot/dts/r8a7791-henninger.dts
index d2ebf11f9881..e33e4047b0b0 100644
--- a/arch/arm/boot/dts/r8a7791-henninger.dts
+++ b/arch/arm/boot/dts/r8a7791-henninger.dts
@@ -141,6 +141,11 @@
renesas,groups = "vin0_data8", "vin0_clk";
renesas,function = "vin0";
};
+
+ can0_pins: can0 {
+ renesas,groups = "can0_data";
+ renesas,function = "can0";
+ };
};
&scif0 {
@@ -307,3 +312,9 @@
};
};
};
+
+&can0 {
+ pinctrl-0 = <&can0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index a3c27807f6c5..824ddab9c3ad 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -258,6 +258,29 @@
system-clock-frequency = <11289600>;
};
};
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&adv7511_out>;
+ };
+ };
+ };
+
+ x2_clk: x2-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <148500000>;
+ };
+
+ x13_clk: x13-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <148500000>;
+ };
};
&du {
@@ -265,7 +288,19 @@
pinctrl-names = "default";
status = "okay";
+ clocks = <&mstp7_clks R8A7791_CLK_DU0>,
+ <&mstp7_clks R8A7791_CLK_DU1>,
+ <&mstp7_clks R8A7791_CLK_LVDS0>,
+ <&x13_clk>, <&x2_clk>;
+ clock-names = "du.0", "du.1", "lvds.0",
+ "dclkin.0", "dclkin.1";
+
ports {
+ port@0 {
+ endpoint {
+ remote-endpoint = <&adv7511_in>;
+ };
+ };
port@1 {
lvds_connector: endpoint {
};
@@ -284,7 +319,7 @@
};
du_pins: du {
- renesas,groups = "du_rgb666", "du_sync", "du_clk_out_0";
+ renesas,groups = "du_rgb666", "du_sync", "du_disp", "du_clk_out_0";
renesas,function = "du";
};
@@ -506,6 +541,38 @@
};
};
+ hdmi@39 {
+ compatible = "adi,adv7511w";
+ reg = <0x39>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
+
+ adi,input-depth = <8>;
+ adi,input-colorspace = "rgb";
+ adi,input-clock = "1x";
+ adi,input-style = <1>;
+ adi,input-justification = "evenly";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7511_in: endpoint {
+ remote-endpoint = <&du_out_rgb>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7511_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+ };
+
eeprom@50 {
compatible = "renesas,24c02";
reg = <0x50>;
@@ -517,9 +584,27 @@
status = "okay";
clock-frequency = <100000>;
+ pmic@58 {
+ compatible = "dlg,da9063";
+ reg = <0x58>;
+ interrupt-parent = <&irqc0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+ };
+
+ wdt {
+ compatible = "dlg,da9063-watchdog";
+ };
+ };
+
vdd_dvfs: regulator@68 {
compatible = "dlg,da9210";
reg = <0x68>;
+ interrupt-parent = <&irqc0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index e35812a0d8d4..4696062f6dde 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1,7 +1,7 @@
/*
* Device Tree Source for the r8a7791 SoC
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
* Copyright (C) 2013-2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded Inc.
*
@@ -357,13 +357,6 @@
dma-channels = <13>;
};
- audmapp: dma-controller@ec740000 {
- compatible = "renesas,rcar-audmapp";
- #dma-cells = <1>;
-
- reg = <0 0xec740000 0 0x200>;
- };
-
/* The memory map in the User's Manual maps the cores to bus numbers */
i2c0: i2c@e6508000 {
#address-cells = <1>;
@@ -482,9 +475,11 @@
sdhi0: sd@ee100000 {
compatible = "renesas,sdhi-r8a7791";
- reg = <0 0xee100000 0 0x200>;
+ reg = <0 0xee100000 0 0x328>;
interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7791_CLK_SDHI0>;
+ dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -493,6 +488,8 @@
reg = <0 0xee140000 0 0x100>;
interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7791_CLK_SDHI1>;
+ dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -501,6 +498,8 @@
reg = <0 0xee160000 0 0x100>;
interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7791_CLK_SDHI2>;
+ dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -816,6 +815,26 @@
};
};
+ can0: can@e6e80000 {
+ compatible = "renesas,can-r8a7791";
+ reg = <0 0xe6e80000 0 0x1000>;
+ interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7791_CLK_RCAN0>,
+ <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
+ status = "disabled";
+ };
+
+ can1: can@e6e88000 {
+ compatible = "renesas,can-r8a7791";
+ reg = <0 0xe6e88000 0 0x1000>;
+ interrupts = <0 187 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7791_CLK_RCAN1>,
+ <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
+ status = "disabled";
+ };
+
clocks {
#address-cells = <2>;
#size-cells = <2>;
@@ -862,31 +881,50 @@
status = "disabled";
};
+ /* External USB clock - can be overridden by the board */
+ usb_extal_clk: usb_extal_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ clock-output-names = "usb_extal";
+ };
+
+ /* External CAN clock */
+ can_clk: can_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
+ clock-output-names = "can_clk";
+ status = "disabled";
+ };
+
/* Special CPG clocks */
cpg_clocks: cpg_clocks@e6150000 {
compatible = "renesas,r8a7791-cpg-clocks",
"renesas,rcar-gen2-cpg-clocks";
reg = <0 0xe6150000 0 0x1000>;
- clocks = <&extal_clk>;
+ clocks = <&extal_clk &usb_extal_clk>;
#clock-cells = <1>;
clock-output-names = "main", "pll0", "pll1", "pll3",
- "lb", "qspi", "sdh", "sd0", "z";
+ "lb", "qspi", "sdh", "sd0", "z",
+ "rcan", "adsp";
};
/* Variable factor clocks */
- sd1_clk: sd2_clk@e6150078 {
+ sd2_clk: sd2_clk@e6150078 {
compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
reg = <0 0xe6150078 0 4>;
clocks = <&pll1_div2_clk>;
#clock-cells = <0>;
- clock-output-names = "sd1";
+ clock-output-names = "sd2";
};
- sd2_clk: sd3_clk@e615026c {
+ sd3_clk: sd3_clk@e615026c {
compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
reg = <0 0xe615026c 0 4>;
clocks = <&pll1_div2_clk>;
#clock-cells = <0>;
- clock-output-names = "sd2";
+ clock-output-names = "sd3";
};
mmc0_clk: mmc0_clk@e6150240 {
compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
@@ -1107,7 +1145,7 @@
mstp3_clks: mstp3_clks@e615013c {
compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe615013c 0 4>, <0 0xe6150048 0 4>;
- clocks = <&cp_clk>, <&sd2_clk>, <&sd1_clk>, <&cpg_clocks R8A7791_CLK_SD0>,
+ clocks = <&cp_clk>, <&sd3_clk>, <&sd2_clk>, <&cpg_clocks R8A7791_CLK_SD0>,
<&mmc0_clk>, <&hp_clk>, <&mp_clk>, <&hp_clk>, <&mp_clk>, <&rclk_clk>,
<&hp_clk>, <&hp_clk>;
#clock-cells = <1>;
@@ -1125,18 +1163,21 @@
mstp5_clks: mstp5_clks@e6150144 {
compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150144 0 4>, <0 0xe615003c 0 4>;
- clocks = <&hp_clk>, <&hp_clk>, <&extal_clk>, <&p_clk>;
+ clocks = <&hp_clk>, <&hp_clk>, <&cpg_clocks R8A7791_CLK_ADSP>,
+ <&extal_clk>, <&p_clk>;
#clock-cells = <1>;
clock-indices = <
R8A7791_CLK_AUDIO_DMAC0 R8A7791_CLK_AUDIO_DMAC1
- R8A7791_CLK_THERMAL R8A7791_CLK_PWM
+ R8A7791_CLK_ADSP_MOD R8A7791_CLK_THERMAL
+ R8A7791_CLK_PWM
>;
- clock-output-names = "audmac0", "audmac1", "thermal", "pwm";
+ clock-output-names = "audmac0", "audmac1", "adsp_mod",
+ "thermal", "pwm";
};
mstp7_clks: mstp7_clks@e615014c {
compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
- clocks = <&mp_clk>, <&mp_clk>, <&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>,
+ clocks = <&mp_clk>, <&hp_clk>, <&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>,
<&zs_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
<&zx_clk>, <&zx_clk>, <&zx_clk>;
#clock-cells = <1>;
@@ -1154,7 +1195,7 @@
mstp8_clks: mstp8_clks@e6150990 {
compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150990 0 4>, <0 0xe61509a0 0 4>;
- clocks = <&zg_clk>, <&hp_clk>, <&zg_clk>, <&zg_clk>,
+ clocks = <&zx_clk>, <&hp_clk>, <&zg_clk>, <&zg_clk>,
<&zg_clk>, <&p_clk>, <&zs_clk>, <&zs_clk>;
#clock-cells = <1>;
clock-indices = <
@@ -1384,6 +1425,66 @@
status = "disabled";
};
+ ipmmu_sy0: mmu@e6280000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe6280000 0 0x1000>;
+ interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
+ <0 224 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_sy1: mmu@e6290000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe6290000 0 0x1000>;
+ interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_ds: mmu@e6740000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe6740000 0 0x1000>;
+ interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
+ <0 199 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_mp: mmu@ec680000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xec680000 0 0x1000>;
+ interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_mx: mmu@fe951000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xfe951000 0 0x1000>;
+ interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_rt: mmu@ffc80000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xffc80000 0 0x1000>;
+ interrupts = <0 307 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_gp: mmu@e62a0000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe62a0000 0 0x1000>;
+ interrupts = <0 260 IRQ_TYPE_LEVEL_HIGH>,
+ <0 261 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
rcar_sound: rcar_sound@ec500000 {
/*
* #sound-dai-cells is required
@@ -1395,7 +1496,10 @@
reg = <0 0xec500000 0 0x1000>, /* SCU */
<0 0xec5a0000 0 0x100>, /* ADG */
<0 0xec540000 0 0x1000>, /* SSIU */
- <0 0xec541000 0 0x1280>; /* SSI */
+ <0 0xec541000 0 0x1280>, /* SSI */
+ <0 0xec740000 0 0x200>; /* Audio DMAC peri peri*/
+ reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
clocks = <&mstp10_clks R8A7791_CLK_SSI_ALL>,
<&mstp10_clks R8A7791_CLK_SSI9>, <&mstp10_clks R8A7791_CLK_SSI8>,
<&mstp10_clks R8A7791_CLK_SSI7>, <&mstp10_clks R8A7791_CLK_SSI6>,
@@ -1420,34 +1524,120 @@
status = "disabled";
rcar_sound,dvc {
- dvc0: dvc@0 { };
- dvc1: dvc@1 { };
+ dvc0: dvc@0 {
+ dmas = <&audma0 0xbc>;
+ dma-names = "tx";
+ };
+ dvc1: dvc@1 {
+ dmas = <&audma0 0xbe>;
+ dma-names = "tx";
+ };
};
rcar_sound,src {
- src0: src@0 { interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>; };
- src1: src@1 { interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>; };
- src2: src@2 { interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>; };
- src3: src@3 { interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>; };
- src4: src@4 { interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>; };
- src5: src@5 { interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>; };
- src6: src@6 { interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>; };
- src7: src@7 { interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>; };
- src8: src@8 { interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>; };
- src9: src@9 { interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>; };
+ src0: src@0 {
+ interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x85>, <&audma1 0x9a>;
+ dma-names = "rx", "tx";
+ };
+ src1: src@1 {
+ interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x87>, <&audma1 0x9c>;
+ dma-names = "rx", "tx";
+ };
+ src2: src@2 {
+ interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x89>, <&audma1 0x9e>;
+ dma-names = "rx", "tx";
+ };
+ src3: src@3 {
+ interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8b>, <&audma1 0xa0>;
+ dma-names = "rx", "tx";
+ };
+ src4: src@4 {
+ interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8d>, <&audma1 0xb0>;
+ dma-names = "rx", "tx";
+ };
+ src5: src@5 {
+ interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8f>, <&audma1 0xb2>;
+ dma-names = "rx", "tx";
+ };
+ src6: src@6 {
+ interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x91>, <&audma1 0xb4>;
+ dma-names = "rx", "tx";
+ };
+ src7: src@7 {
+ interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x93>, <&audma1 0xb6>;
+ dma-names = "rx", "tx";
+ };
+ src8: src@8 {
+ interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x95>, <&audma1 0xb8>;
+ dma-names = "rx", "tx";
+ };
+ src9: src@9 {
+ interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x97>, <&audma1 0xba>;
+ dma-names = "rx", "tx";
+ };
};
rcar_sound,ssi {
- ssi0: ssi@0 { interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>; };
- ssi1: ssi@1 { interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>; };
- ssi2: ssi@2 { interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>; };
- ssi3: ssi@3 { interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>; };
- ssi4: ssi@4 { interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>; };
- ssi5: ssi@5 { interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>; };
- ssi6: ssi@6 { interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>; };
- ssi7: ssi@7 { interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>; };
- ssi8: ssi@8 { interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>; };
- ssi9: ssi@9 { interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi0: ssi@0 {
+ interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi1: ssi@1 {
+ interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi2: ssi@2 {
+ interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi3: ssi@3 {
+ interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi4: ssi@4 {
+ interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi5: ssi@5 {
+ interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi6: ssi@6 {
+ interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi7: ssi@7 {
+ interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi8: ssi@8 {
+ interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi9: ssi@9 {
+ interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index 0d848e605071..928cfa641475 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -43,6 +43,19 @@
status = "okay";
};
+&ether {
+ phy-handle = <&phy1>;
+ renesas,ether-link-active-low;
+ status = "okay";
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ interrupt-parent = <&irqc0>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ micrel,led-mode = <1>;
+ };
+};
+
&scif2 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 8f78da5ef10b..7a3ffa51a8bf 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -107,6 +107,66 @@
<0 17 IRQ_TYPE_LEVEL_HIGH>;
};
+ dmac0: dma-controller@e6700000 {
+ compatible = "renesas,rcar-dmac";
+ reg = <0 0xe6700000 0 0x20000>;
+ interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
+ 0 200 IRQ_TYPE_LEVEL_HIGH
+ 0 201 IRQ_TYPE_LEVEL_HIGH
+ 0 202 IRQ_TYPE_LEVEL_HIGH
+ 0 203 IRQ_TYPE_LEVEL_HIGH
+ 0 204 IRQ_TYPE_LEVEL_HIGH
+ 0 205 IRQ_TYPE_LEVEL_HIGH
+ 0 206 IRQ_TYPE_LEVEL_HIGH
+ 0 207 IRQ_TYPE_LEVEL_HIGH
+ 0 208 IRQ_TYPE_LEVEL_HIGH
+ 0 209 IRQ_TYPE_LEVEL_HIGH
+ 0 210 IRQ_TYPE_LEVEL_HIGH
+ 0 211 IRQ_TYPE_LEVEL_HIGH
+ 0 212 IRQ_TYPE_LEVEL_HIGH
+ 0 213 IRQ_TYPE_LEVEL_HIGH
+ 0 214 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14";
+ clocks = <&mstp2_clks R8A7794_CLK_SYS_DMAC0>;
+ clock-names = "fck";
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ };
+
+ dmac1: dma-controller@e6720000 {
+ compatible = "renesas,rcar-dmac";
+ reg = <0 0xe6720000 0 0x20000>;
+ interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
+ 0 216 IRQ_TYPE_LEVEL_HIGH
+ 0 217 IRQ_TYPE_LEVEL_HIGH
+ 0 218 IRQ_TYPE_LEVEL_HIGH
+ 0 219 IRQ_TYPE_LEVEL_HIGH
+ 0 308 IRQ_TYPE_LEVEL_HIGH
+ 0 309 IRQ_TYPE_LEVEL_HIGH
+ 0 310 IRQ_TYPE_LEVEL_HIGH
+ 0 311 IRQ_TYPE_LEVEL_HIGH
+ 0 312 IRQ_TYPE_LEVEL_HIGH
+ 0 313 IRQ_TYPE_LEVEL_HIGH
+ 0 314 IRQ_TYPE_LEVEL_HIGH
+ 0 315 IRQ_TYPE_LEVEL_HIGH
+ 0 316 IRQ_TYPE_LEVEL_HIGH
+ 0 317 IRQ_TYPE_LEVEL_HIGH
+ 0 318 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14";
+ clocks = <&mstp2_clks R8A7794_CLK_SYS_DMAC1>;
+ clock-names = "fck";
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ };
+
scifa0: serial@e6c40000 {
compatible = "renesas,scifa-r8a7794", "renesas,scifa";
reg = <0 0xe6c40000 0 64>;
@@ -269,6 +329,41 @@
status = "disabled";
};
+ ether: ethernet@ee700000 {
+ compatible = "renesas,ether-r8a7794";
+ reg = <0 0xee700000 0 0x400>;
+ interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7794_CLK_ETHER>;
+ phy-mode = "rmii";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhi0: sd@ee100000 {
+ compatible = "renesas,sdhi-r8a7794";
+ reg = <0 0xee100000 0 0x200>;
+ interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A7794_CLK_SDHI0>;
+ status = "disabled";
+ };
+
+ sdhi1: sd@ee140000 {
+ compatible = "renesas,sdhi-r8a7794";
+ reg = <0 0xee140000 0 0x100>;
+ interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A7794_CLK_SDHI1>;
+ status = "disabled";
+ };
+
+ sdhi2: sd@ee160000 {
+ compatible = "renesas,sdhi-r8a7794";
+ reg = <0 0xee160000 0 0x100>;
+ interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A7794_CLK_SDHI2>;
+ status = "disabled";
+ };
+
clocks {
#address-cells = <2>;
#size-cells = <2>;
@@ -294,19 +389,19 @@
"lb", "qspi", "sdh", "sd0", "z";
};
/* Variable factor clocks */
- sd1_clk: sd2_clk@e6150078 {
+ sd2_clk: sd2_clk@e6150078 {
compatible = "renesas,r8a7794-div6-clock", "renesas,cpg-div6-clock";
reg = <0 0xe6150078 0 4>;
clocks = <&pll1_div2_clk>;
#clock-cells = <0>;
- clock-output-names = "sd1";
+ clock-output-names = "sd2";
};
- sd2_clk: sd3_clk@e615007c {
+ sd3_clk: sd3_clk@e615026c {
compatible = "renesas,r8a7794-div6-clock", "renesas,cpg-div6-clock";
- reg = <0 0xe615007c 0 4>;
+ reg = <0 0xe615026c 0 4>;
clocks = <&pll1_div2_clk>;
#clock-cells = <0>;
- clock-output-names = "sd2";
+ clock-output-names = "sd3";
};
mmc0_clk: mmc0_clk@e6150240 {
compatible = "renesas,r8a7794-div6-clock", "renesas,cpg-div6-clock";
@@ -518,7 +613,7 @@
mstp3_clks: mstp3_clks@e615013c {
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe615013c 0 4>, <0 0xe6150048 0 4>;
- clocks = <&sd2_clk>, <&sd1_clk>, <&cpg_clocks R8A7794_CLK_SD0>,
+ clocks = <&sd3_clk>, <&sd2_clk>, <&cpg_clocks R8A7794_CLK_SD0>,
<&mmc0_clk>, <&rclk_clk>, <&hp_clk>, <&hp_clk>;
#clock-cells = <1>;
clock-indices = <
@@ -585,4 +680,54 @@
clock-output-names = "scifa3", "scifa4", "scifa5";
};
};
+
+ ipmmu_sy0: mmu@e6280000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe6280000 0 0x1000>;
+ interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
+ <0 224 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_sy1: mmu@e6290000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe6290000 0 0x1000>;
+ interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_ds: mmu@e6740000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe6740000 0 0x1000>;
+ interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
+ <0 199 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_mp: mmu@ec680000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xec680000 0 0x1000>;
+ interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
+
+ ipmmu_mx: mmu@fe951000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xfe951000 0 0x1000>;
+ interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
+ <0 221 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_gp: mmu@e62a0000 {
+ compatible = "renesas,ipmmu-vmsa";
+ reg = <0 0xe62a0000 0 0x1000>;
+ interrupts = <0 260 IRQ_TYPE_LEVEL_HIGH>,
+ <0 261 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index 9a09579b8309..bdf85701987d 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -103,6 +103,14 @@
regulator-always-on;
regulator-boot-on;
};
+
+ vsys: vsys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ };
};
&emac {
@@ -148,6 +156,14 @@
pinctrl-names = "default";
pinctrl-0 = <&act8846_dvs0_ctl>;
+ vp1-supply = <&vsys>;
+ vp2-supply = <&vsys>;
+ vp3-supply = <&vsys>;
+ vp4-supply = <&vsys>;
+ inl1-supply = <&vcc_io>;
+ inl2-supply = <&vsys>;
+ inl3-supply = <&vsys>;
+
regulators {
vcc_ddr: REG1 {
regulator-name = "VCC_DDR";
diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts
index d7b8bbc0c25f..1687e8336994 100644
--- a/arch/arm/boot/dts/rk3288-evb-act8846.dts
+++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts
@@ -33,6 +33,7 @@
regulator-max-microvolt = <1350000>;
regulator-always-on;
regulator-boot-on;
+ vin-supply = <&vcc_sys>;
};
vdd_gpu: syr828@41 {
@@ -43,6 +44,7 @@
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
+ vin-supply = <&vcc_sys>;
};
hym8563@51 {
@@ -64,6 +66,14 @@
reg = <0x5a>;
status = "okay";
+ vp1-supply = <&vcc_sys>;
+ vp2-supply = <&vcc_sys>;
+ vp3-supply = <&vcc_sys>;
+ vp4-supply = <&vcc_sys>;
+ inl1-supply = <&vcc_io>;
+ inl2-supply = <&vcc_sys>;
+ inl3-supply = <&vcc_20>;
+
regulators {
vcc_ddr: REG1 {
regulator-name = "VCC_DDR";
diff --git a/arch/arm/boot/dts/rk3288-evb-rk808.dts b/arch/arm/boot/dts/rk3288-evb-rk808.dts
index a1c294bf7fed..f62ea78754a9 100644
--- a/arch/arm/boot/dts/rk3288-evb-rk808.dts
+++ b/arch/arm/boot/dts/rk3288-evb-rk808.dts
@@ -43,9 +43,16 @@
#clock-cells = <1>;
clock-output-names = "xin32k", "rk808-clkout2";
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc6-supply = <&vcc_sys>;
+ vcc7-supply = <&vcc_sys>;
vcc8-supply = <&vcc_18>;
vcc9-supply = <&vcc_io>;
vcc10-supply = <&vcc_io>;
+ vcc11-supply = <&vcc_sys>;
vcc12-supply = <&vcc_io>;
vddio-supply = <&vccio_pmu>;
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 5e895a514a0b..4a457518d861 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -103,6 +103,15 @@
regulator-always-on;
regulator-boot-on;
};
+
+ vcc_sys: vsys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
};
&emmc {
@@ -238,6 +247,10 @@
};
};
+&usbphy {
+ status = "okay";
+};
+
&usb_host0_ehci {
status = "okay";
};
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index e6f873abbe0d..b54dd78580c1 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -179,6 +179,22 @@
status = "okay";
};
+&gmac {
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ clock_in_out = "input";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>, <&phy_rst>, <&phy_pmeb>, <&phy_int>;
+ phy-supply = <&vcc_lan>;
+ phy-mode = "rgmii";
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-gpio = <&gpio4 8 GPIO_ACTIVE_LOW>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+ status = "ok";
+};
+
&hdmi {
ddc-i2c-bus = <&i2c5>;
status = "okay";
@@ -459,6 +475,10 @@
status = "okay";
};
+&usbphy {
+ status = "okay";
+};
+
&usb_host1 {
pinctrl-names = "default";
pinctrl-0 = <&usbhub_rst>;
diff --git a/arch/arm/boot/dts/rk3288-popmetal.dts b/arch/arm/boot/dts/rk3288-popmetal.dts
new file mode 100644
index 000000000000..d081f0e0da36
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-popmetal.dts
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2014, 2015 Andy Yan <andy.yan@rock-chips.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "rk3288.dtsi"
+
+/ {
+ model = "PopMetal-RK3288";
+ compatible = "chipspark,popmetal-rk3288", "rockchip,rk3288";
+
+ memory{
+ reg = <0 0x80000000>;
+ };
+
+ ext_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "ext_gmac";
+ #clock-cells = <0>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwrbtn>;
+
+ button@0 {
+ gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+ linux,code = <116>;
+ label = "GPIO Key Power";
+ linux,input-type = <1>;
+ gpio-key,wakeup = <1>;
+ debounce-interval = <100>;
+ };
+ };
+
+ ir: ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_int>;
+ };
+
+ vcc_sys: vsys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&cpu0 {
+ cpu0-supply = <&vdd_cpu>;
+};
+
+&emmc {
+ broken-cd;
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ disable-wp;
+ non-removable;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ disable-wp; /* wp not hooked up */
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
+ status = "okay";
+};
+
+&gmac {
+ phy-supply = <&vcc_lan>;
+ phy-mode = "rgmii";
+ clock_in_out = "input";
+ snps,reset-gpio = <&gpio4 7 0>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+ status = "ok";
+};
+
+&hdmi {
+ ddc-i2c-bus = <&i2c5>;
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int &global_pwroff>;
+ rockchip,system-power-controller;
+ wakeup-source;
+ #clock-cells = <1>;
+ clock-output-names = "xin32k", "rk808-clkout2";
+
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc6-supply = <&vcc_sys>;
+ vcc7-supply = <&vcc_sys>;
+ vcc8-supply = <&vcc_18>;
+ vcc9-supply = <&vcc_io>;
+ vcc10-supply = <&vcc_io>;
+ vcc11-supply = <&vcc_sys>;
+ vcc12-supply = <&vcc_io>;
+ vddio-supply = <&vcc_io>;
+
+ regulators {
+ vdd_cpu: DCDC_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-name = "vdd_arm";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-name = "vdd_gpu";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_ddr";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_io: DCDC_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_io";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_lan: LDO_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_lan";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vccio_sd: LDO_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_10: LDO_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd_10";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc18_lcd: LDO_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc18_lcd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ ldo5: LDO_REG5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "ldo5";
+ };
+
+ vdd10_lcd: LDO_REG6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd10_lcd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_18: LDO_REG7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_18";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca_codec: LDO_REG8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcca_codec";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_wl: SWITCH_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_wl";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_lcd: SWITCH_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_lcd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ ak8963: ak8963@0d {
+ compatible = "asahi-kasei,ak8975";
+ reg = <0x0d>;
+ interrupt-parent = <&gpio8>;
+ interrupts = <1 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&comp_int>;
+ };
+
+ l3g4200d: l3g4200d@68 {
+ compatible = "st,l3g4200d-gyro";
+ st,drdy-int-pin = <2>;
+ reg = <0x6b>;
+ };
+
+ mma8452: mma8452@1d {
+ compatible = "fsl,mma8452";
+ reg = <0x1d>;
+ interrupt-parent = <&gpio8>;
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gsensor_int>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&pinctrl {
+ ak8963 {
+ comp_int: comp-int {
+ rockchip,pins = <8 1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ buttons {
+ pwrbtn: pwrbtn {
+ rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ ir {
+ ir_int: ir-int {
+ rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ mma8452 {
+ gsensor_int: gsensor-int {
+ rockchip,pins = <8 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&uart4 {
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index eccc78d3220b..165968d51d8f 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -420,6 +420,8 @@
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_USBHOST0>;
clock-names = "usbhost";
+ phys = <&usbphy1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -432,6 +434,8 @@
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_USBHOST1>;
clock-names = "otg";
+ phys = <&usbphy2>;
+ phy-names = "usb2-phy";
status = "disabled";
};
@@ -442,6 +446,8 @@
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_OTG0>;
clock-names = "otg";
+ phys = <&usbphy0>;
+ phy-names = "usb2-phy";
status = "disabled";
};
@@ -698,6 +704,35 @@
interrupts = <GIC_PPI 9 0xf04>;
};
+ usbphy: phy {
+ compatible = "rockchip,rk3288-usb-phy";
+ rockchip,grf = <&grf>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ usbphy0: usb-phy0 {
+ #phy-cells = <0>;
+ reg = <0x320>;
+ clocks = <&cru SCLK_OTGPHY0>;
+ clock-names = "phyclk";
+ };
+
+ usbphy1: usb-phy1 {
+ #phy-cells = <0>;
+ reg = <0x334>;
+ clocks = <&cru SCLK_OTGPHY1>;
+ clock-names = "phyclk";
+ };
+
+ usbphy2: usb-phy2 {
+ #phy-cells = <0>;
+ reg = <0x348>;
+ clocks = <&cru SCLK_OTGPHY2>;
+ clock-names = "phyclk";
+ };
+ };
+
pinctrl: pinctrl {
compatible = "rockchip,rk3288-pinctrl";
rockchip,grf = <&grf>;
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 367af53c1b84..57ab8587f7b9 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -26,6 +26,7 @@
serial2 = &usart1;
serial3 = &usart2;
serial4 = &usart3;
+ serial5 = &uart0;
gpio0 = &pioA;
gpio1 = &pioB;
gpio2 = &pioC;
@@ -206,6 +207,17 @@
status = "disabled";
};
+ uart0: serial@f0024000 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0xf0024000 0x100>;
+ interrupts = <16 IRQ_TYPE_LEVEL_HIGH 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0>;
+ clocks = <&uart0_clk>;
+ clock-names = "usart";
+ status = "disabled";
+ };
+
pwm0: pwm@f002c000 {
compatible = "atmel,sama5d3-pwm";
reg = <0xf002c000 0x300>;
@@ -439,7 +451,7 @@
};
dbgu: serial@ffffee00 {
- compatible = "atmel,at91sam9260-usart";
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xffffee00 0x200>;
interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
dmas = <&dma1 2 AT91_DMA_CFG_PER_ID(13)>,
@@ -764,6 +776,22 @@
};
};
+ uart0 {
+ pinctrl_uart0: uart0-0 {
+ atmel,pins =
+ <AT91_PIOC 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* conflicts with PWMFI2, ISI_D8 */
+ AT91_PIOC 30 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>; /* conflicts with ISI_PCK */
+ };
+ };
+
+ uart1 {
+ pinctrl_uart1: uart1-0 {
+ atmel,pins =
+ <AT91_PIOA 30 AT91_PERIPH_B AT91_PINCTRL_NONE /* conflicts with TWD0, ISI_VSYNC */
+ AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>; /* conflicts with TWCK0, ISI_HSYNC */
+ };
+ };
+
usart0 {
pinctrl_usart0: usart0-0 {
atmel,pins =
@@ -1098,6 +1126,12 @@
atmel,clk-output-range = <0 66000000>;
};
+ uart0_clk: uart0_clk {
+ #clock-cells = <0>;
+ reg = <16>;
+ atmel,clk-output-range = <0 66000000>;
+ };
+
twi0_clk: twi0_clk {
reg = <18>;
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/sama5d35ek.dts b/arch/arm/boot/dts/sama5d35ek.dts
index 9089c7c6cea8..d9a9aca1ccfd 100644
--- a/arch/arm/boot/dts/sama5d35ek.dts
+++ b/arch/arm/boot/dts/sama5d35ek.dts
@@ -44,8 +44,6 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pb_user1 {
label = "pb_user1";
diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
index eaf41451ad0c..c5a3772741bf 100644
--- a/arch/arm/boot/dts/sama5d3_can.dtsi
+++ b/arch/arm/boot/dts/sama5d3_can.dtsi
@@ -1,5 +1,5 @@
/*
- * at91sama5d3_can.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * sama5d3_can.dtsi - Device Tree Include file for SAMA5D3 SoC with
* CAN support
*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
index fe2af9276312..7cb235ef0fb6 100644
--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -1,5 +1,5 @@
/*
- * at91sama5d3_emac.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * sama5d3_emac.dtsi - Device Tree Include file for SAMA5D3 SoC with
* Ethernet.
*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
@@ -41,7 +41,7 @@
};
macb1: ethernet@f802c000 {
- compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sama5d3_gmac.dtsi b/arch/arm/boot/dts/sama5d3_gmac.dtsi
index de5ed59fb446..23f225fbb756 100644
--- a/arch/arm/boot/dts/sama5d3_gmac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_gmac.dtsi
@@ -1,5 +1,5 @@
/*
- * at91sama5d3_gmac.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * sama5d3_gmac.dtsi - Device Tree Include file for SAMA5D3 SoC with
* Gigabit Ethernet.
*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
diff --git a/arch/arm/boot/dts/sama5d3_lcd.dtsi b/arch/arm/boot/dts/sama5d3_lcd.dtsi
index 85d302701565..be7cfefc6c31 100644
--- a/arch/arm/boot/dts/sama5d3_lcd.dtsi
+++ b/arch/arm/boot/dts/sama5d3_lcd.dtsi
@@ -1,5 +1,5 @@
/*
- * at91sama5d3_lcd.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * sama5d3_lcd.dtsi - Device Tree Include file for SAMA5D3 SoC with
* LCD support
*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
@@ -13,40 +13,183 @@
/ {
ahb {
apb {
+ hlcdc: hlcdc@f0030000 {
+ compatible = "atmel,sama5d3-hlcdc";
+ reg = <0xf0030000 0x2000>;
+ interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+ clock-names = "periph_clk","sys_clk", "slow_clk";
+ status = "disabled";
+
+ hlcdc-display-controller {
+ compatible = "atmel,hlcdc-display-controller";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ };
+
+ hlcdc_pwm: hlcdc-pwm {
+ compatible = "atmel,hlcdc-pwm";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd_pwm>;
+ #pwm-cells = <3>;
+ };
+ };
+
pinctrl@fffff200 {
lcd {
- pinctrl_lcd: lcd-0 {
+ pinctrl_lcd_base: lcd-base-0 {
+ atmel,pins =
+ <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDVSYNC */
+ AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDHSYNC */
+ AT91_PIOA 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDDISP */
+ AT91_PIOA 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDDEN */
+ AT91_PIOA 28 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDPCK */
+ };
+
+ pinctrl_lcd_pwm: lcd-pwm-0 {
+ atmel,pins = <AT91_PIOA 24 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDPWM */
+ };
+
+ pinctrl_lcd_rgb444: lcd-rgb-0 {
+ atmel,pins =
+ <AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDD11 pin */
+ };
+
+ pinctrl_lcd_rgb565: lcd-rgb-1 {
+ atmel,pins =
+ <AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDD15 pin */
+ };
+
+ pinctrl_lcd_rgb666: lcd-rgb-2 {
+ atmel,pins =
+ <AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD15 pin */
+ AT91_PIOA 16 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD16 pin */
+ AT91_PIOA 17 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDD17 pin */
+ };
+
+ pinctrl_lcd_rgb666_alt: lcd-rgb-2-alt {
+ atmel,pins =
+ <AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD15 pin */
+ AT91_PIOC 14 AT91_PERIPH_C AT91_PINCTRL_NONE /* LCDD16 pin */
+ AT91_PIOC 13 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* LCDD17 pin */
+ };
+
+ pinctrl_lcd_rgb888: lcd-rgb-3 {
+ atmel,pins =
+ <AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD15 pin */
+ AT91_PIOA 16 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD16 pin */
+ AT91_PIOA 17 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD17 pin */
+ AT91_PIOA 18 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD18 pin */
+ AT91_PIOA 19 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD19 pin */
+ AT91_PIOA 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD20 pin */
+ AT91_PIOA 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD21 pin */
+ AT91_PIOA 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD22 pin */
+ AT91_PIOA 23 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDD23 pin */
+ };
+
+ pinctrl_lcd_rgb888_alt: lcd-rgb-3-alt {
atmel,pins =
- <AT91_PIOA 24 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA24 periph A LCDPWM */
- AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA26 periph A LCDVSYNC */
- AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA27 periph A LCDHSYNC */
- AT91_PIOA 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA25 periph A LCDDISP */
- AT91_PIOA 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA29 periph A LCDDEN */
- AT91_PIOA 28 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA28 periph A LCDPCK */
- AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA0 periph A LCDD0 pin */
- AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA1 periph A LCDD1 pin */
- AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA2 periph A LCDD2 pin */
- AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA3 periph A LCDD3 pin */
- AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA4 periph A LCDD4 pin */
- AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA5 periph A LCDD5 pin */
- AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA6 periph A LCDD6 pin */
- AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA7 periph A LCDD7 pin */
- AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA8 periph A LCDD8 pin */
- AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA9 periph A LCDD9 pin */
- AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA10 periph A LCDD10 pin */
- AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA11 periph A LCDD11 pin */
- AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA12 periph A LCDD12 pin */
- AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA13 periph A LCDD13 pin */
- AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA14 periph A LCDD14 pin */
- AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA15 periph A LCDD15 pin */
- AT91_PIOC 14 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC14 periph C LCDD16 pin */
- AT91_PIOC 13 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC13 periph C LCDD17 pin */
- AT91_PIOC 12 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC12 periph C LCDD18 pin */
- AT91_PIOC 11 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC11 periph C LCDD19 pin */
- AT91_PIOC 10 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC10 periph C LCDD20 pin */
- AT91_PIOC 15 AT91_PERIPH_C AT91_PINCTRL_NONE /* PC15 periph C LCDD21 pin */
- AT91_PIOE 27 AT91_PERIPH_C AT91_PINCTRL_NONE /* PE27 periph C LCDD22 pin */
- AT91_PIOE 28 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* PE28 periph C LCDD23 pin */
+ <AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD15 pin */
+ AT91_PIOC 14 AT91_PERIPH_C AT91_PINCTRL_NONE /* LCDD16 pin */
+ AT91_PIOC 13 AT91_PERIPH_C AT91_PINCTRL_NONE /* LCDD17 pin */
+ AT91_PIOC 12 AT91_PERIPH_C AT91_PINCTRL_NONE /* LCDD18 pin */
+ AT91_PIOC 11 AT91_PERIPH_C AT91_PINCTRL_NONE /* LCDD19 pin */
+ AT91_PIOC 10 AT91_PERIPH_C AT91_PINCTRL_NONE /* LCDD20 pin */
+ AT91_PIOC 15 AT91_PERIPH_C AT91_PINCTRL_NONE /* LCDD21 pin */
+ AT91_PIOE 27 AT91_PERIPH_C AT91_PINCTRL_NONE /* LCDD22 pin */
+ AT91_PIOE 28 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* LCDD23 pin */
};
};
};
diff --git a/arch/arm/boot/dts/sama5d3_mci2.dtsi b/arch/arm/boot/dts/sama5d3_mci2.dtsi
index 1b02208ea6ff..026b252f09b3 100644
--- a/arch/arm/boot/dts/sama5d3_mci2.dtsi
+++ b/arch/arm/boot/dts/sama5d3_mci2.dtsi
@@ -1,5 +1,5 @@
/*
- * at91sama5d3_mci2.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * sama5d3_mci2.dtsi - Device Tree Include file for SAMA5D3 SoC with
* 3 MMC ports
*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
index 02848453ca0c..f7fa58fe09f1 100644
--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
+++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
@@ -1,5 +1,5 @@
/*
- * at91sama5d3_tcb1.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * sama5d3_tcb1.dtsi - Device Tree Include file for SAMA5D3 SoC with
* 2 TC blocks.
*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
index 7a8d4c6115f7..2511d748867b 100644
--- a/arch/arm/boot/dts/sama5d3_uart.dtsi
+++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
@@ -1,5 +1,5 @@
/*
- * at91sama5d3_uart.dtsi - Device Tree Include file for AT91SAM9x5 SoC with
+ * sama5d3_uart.dtsi - Device Tree Include file for SAMA5D3 SoC with
* UART support
*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 4303874889c6..6b1bb58f9c0b 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -64,9 +64,13 @@
gpio2 = &pioC;
gpio3 = &pioD;
gpio4 = &pioE;
+ pwm0 = &pwm0;
+ ssc0 = &ssc0;
+ ssc1 = &ssc1;
tcb0 = &tcb0;
tcb1 = &tcb1;
i2c0 = &i2c0;
+ i2c1 = &i2c1;
i2c2 = &i2c2;
};
cpus {
@@ -310,6 +314,34 @@
#size-cells = <1>;
ranges;
+ hlcdc: hlcdc@f0000000 {
+ compatible = "atmel,sama5d4-hlcdc";
+ reg = <0xf0000000 0x4000>;
+ interrupts = <51 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+ clock-names = "periph_clk","sys_clk", "slow_clk";
+ status = "disabled";
+
+ hlcdc-display-controller {
+ compatible = "atmel,hlcdc-display-controller";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ };
+
+ hlcdc_pwm: hlcdc-pwm {
+ compatible = "atmel,hlcdc-pwm";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd_pwm>;
+ #pwm-cells = <3>;
+ };
+ };
+
dma1: dma-controller@f0004000 {
compatible = "atmel,sama5d4-dma";
reg = <0xf0004000 0x200>;
@@ -319,6 +351,21 @@
clock-names = "dma_clk";
};
+ isi: isi@f0008000 {
+ compatible = "atmel,at91sam9g45-isi";
+ reg = <0xf0008000 0x4000>;
+ interrupts = <52 IRQ_TYPE_LEVEL_HIGH 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_isi_data_0_7>;
+ clocks = <&isi_clk>;
+ clock-names = "isi_clk";
+ status = "disabled";
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
ramc0: ramc@f0010000 {
compatible = "atmel,sama5d3-ddramc";
reg = <0xf0010000 0x200>;
@@ -800,6 +847,33 @@
clock-names = "mci_clk";
};
+ ssc0: ssc@f8008000 {
+ compatible = "atmel,at91sam9g45-ssc";
+ reg = <0xf8008000 0x4000>;
+ interrupts = <48 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(26))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(27))>;
+ dma-names = "tx", "rx";
+ clocks = <&ssc0_clk>;
+ clock-names = "pclk";
+ status = "disabled";
+ };
+
+ pwm0: pwm@f800c000 {
+ compatible = "atmel,sama5d3-pwm";
+ reg = <0xf800c000 0x300>;
+ interrupts = <43 IRQ_TYPE_LEVEL_HIGH 4>;
+ #pwm-cells = <3>;
+ clocks = <&pwm_clk>;
+ status = "disabled";
+ };
+
spi0: spi@f8010000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -839,6 +913,25 @@
status = "disabled";
};
+ i2c1: i2c@f8018000 {
+ compatible = "atmel,at91sam9x5-i2c";
+ reg = <0xf8018000 0x4000>;
+ interrupts = <33 IRQ_TYPE_LEVEL_HIGH 6>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+ AT91_XDMAC_DT_PERID(4)>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+ AT91_XDMAC_DT_PERID(5)>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&twi1_clk>;
+ status = "disabled";
+ };
+
tcb0: timer@f801c000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xf801c000 0x100>;
@@ -853,6 +946,8 @@
interrupts = <54 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_macb0_rmii>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&macb0_clk>, <&macb0_clk>;
clock-names = "hclk", "pclk";
status = "disabled";
@@ -953,6 +1048,24 @@
status = "disabled";
};
+ ssc1: ssc@fc014000 {
+ compatible = "atmel,at91sam9g45-ssc";
+ reg = <0xfc014000 0x4000>;
+ interrupts = <49 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(28))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(29))>;
+ dma-names = "tx", "rx";
+ clocks = <&ssc1_clk>;
+ clock-names = "pclk";
+ status = "disabled";
+ };
+
tcb1: timer@fc020000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xfc020000 0x100>;
@@ -1008,6 +1121,46 @@
};
};
+ aes@fc044000 {
+ compatible = "atmel,at91sam9g46-aes";
+ reg = <0xfc044000 0x100>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+ AT91_XDMAC_DT_PERID(41)>,
+ <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+ AT91_XDMAC_DT_PERID(40)>;
+ dma-names = "tx", "rx";
+ clocks = <&aes_clk>;
+ clock-names = "aes_clk";
+ status = "disabled";
+ };
+
+ tdes@fc04c000 {
+ compatible = "atmel,at91sam9g46-tdes";
+ reg = <0xfc04c000 0x100>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+ AT91_XDMAC_DT_PERID(42)>,
+ <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+ AT91_XDMAC_DT_PERID(43)>;
+ dma-names = "tx", "rx";
+ clocks = <&tdes_clk>;
+ clock-names = "tdes_clk";
+ status = "disabled";
+ };
+
+ sha@fc050000 {
+ compatible = "atmel,at91sam9g46-sha";
+ reg = <0xfc050000 0x100>;
+ interrupts = <15 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+ AT91_XDMAC_DT_PERID(44)>;
+ dma-names = "tx";
+ clocks = <&sha_clk>;
+ clock-names = "sha_clk";
+ status = "disabled";
+ };
+
rstc@fc068600 {
compatible = "atmel,at91sam9g45-rstc";
reg = <0xfc068600 0x10>;
@@ -1064,7 +1217,7 @@
};
dbgu: serial@fc069000 {
- compatible = "atmel,at91sam9260-usart";
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xfc069000 0x200>;
interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
@@ -1190,6 +1343,14 @@
};
};
+ i2c1 {
+ pinctrl_i2c1: i2c1-0 {
+ atmel,pins =
+ <AT91_PIOE 29 AT91_PERIPH_C AT91_PINCTRL_NONE /* TWD1, conflicts with UART0 RX and DIBP */
+ AT91_PIOE 30 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* TWCK1, conflicts with UART0 TX and DIBN */
+ };
+ };
+
i2c2 {
pinctrl_i2c2: i2c2-0 {
atmel,pins =
@@ -1198,6 +1359,155 @@
};
};
+ isi {
+ pinctrl_isi_data_0_7: isi-0-data-0-7 {
+ atmel,pins =
+ <AT91_PIOC 19 AT91_PERIPH_A AT91_PINCTRL_NONE /* ISI_D0 */
+ AT91_PIOC 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* ISI_D1 */
+ AT91_PIOC 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* ISI_D2 */
+ AT91_PIOC 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* ISI_D3 */
+ AT91_PIOC 23 AT91_PERIPH_A AT91_PINCTRL_NONE /* ISI_D4 */
+ AT91_PIOC 24 AT91_PERIPH_A AT91_PINCTRL_NONE /* ISI_D5 */
+ AT91_PIOC 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* ISI_D6 */
+ AT91_PIOC 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* ISI_D7 */
+ AT91_PIOB 1 AT91_PERIPH_C AT91_PINCTRL_NONE /* ISI_PCK, conflict with G0_RXCK */
+ AT91_PIOB 3 AT91_PERIPH_C AT91_PINCTRL_NONE /* ISI_VSYNC */
+ AT91_PIOB 4 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* ISI_HSYNC */
+ };
+ pinctrl_isi_data_8_9: isi-0-data-8-9 {
+ atmel,pins =
+ <AT91_PIOC 0 AT91_PERIPH_C AT91_PINCTRL_NONE /* ISI_D8, conflicts with SPI0_MISO, PWMH2 */
+ AT91_PIOC 1 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* ISI_D9, conflicts with SPI0_MOSI, PWML2 */
+ };
+ pinctrl_isi_data_10_11: isi-0-data-10-11 {
+ atmel,pins =
+ <AT91_PIOC 2 AT91_PERIPH_C AT91_PINCTRL_NONE /* ISI_D10, conflicts with SPI0_SPCK, PWMH3 */
+ AT91_PIOC 3 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* ISI_D11, conflicts with SPI0_NPCS0, PWML3 */
+ };
+ };
+
+ lcd {
+ pinctrl_lcd_base: lcd-base-0 {
+ atmel,pins =
+ <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDVSYNC */
+ AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDHSYNC */
+ AT91_PIOA 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDDEN */
+ AT91_PIOA 28 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDPCK */
+ };
+ pinctrl_lcd_pwm: lcd-pwm-0 {
+ atmel,pins = <AT91_PIOA 24 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDPWM */
+ };
+ pinctrl_lcd_rgb444: lcd-rgb-0 {
+ atmel,pins =
+ <AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDD11 pin */
+ };
+ pinctrl_lcd_rgb565: lcd-rgb-1 {
+ atmel,pins =
+ <AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDD15 pin */
+ };
+ pinctrl_lcd_rgb666: lcd-rgb-2 {
+ atmel,pins =
+ <AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD15 pin */
+ AT91_PIOA 18 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD18 pin */
+ AT91_PIOA 19 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD19 pin */
+ AT91_PIOA 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD20 pin */
+ AT91_PIOA 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD21 pin */
+ AT91_PIOA 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD22 pin */
+ AT91_PIOA 23 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDD23 pin */
+ };
+ pinctrl_lcd_rgb777: lcd-rgb-3 {
+ atmel,pins =
+ /* LCDDAT0 conflicts with TMS */
+ <AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ /* LCDDAT8 conflicts with TCK */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD15 pin */
+ /* LCDDAT16 conflicts with NTRST */
+ AT91_PIOA 17 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD17 pin */
+ AT91_PIOA 18 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD18 pin */
+ AT91_PIOA 19 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD19 pin */
+ AT91_PIOA 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD20 pin */
+ AT91_PIOA 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD21 pin */
+ AT91_PIOA 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD22 pin */
+ AT91_PIOA 23 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDD23 pin */
+ };
+ pinctrl_lcd_rgb888: lcd-rgb-4 {
+ atmel,pins =
+ <AT91_PIOA 0 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD0 pin */
+ AT91_PIOA 1 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD1 pin */
+ AT91_PIOA 2 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD2 pin */
+ AT91_PIOA 3 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD3 pin */
+ AT91_PIOA 4 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD4 pin */
+ AT91_PIOA 5 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD5 pin */
+ AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD6 pin */
+ AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD7 pin */
+ AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD8 pin */
+ AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD9 pin */
+ AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD10 pin */
+ AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD11 pin */
+ AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD12 pin */
+ AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD13 pin */
+ AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD14 pin */
+ AT91_PIOA 15 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD15 pin */
+ AT91_PIOA 16 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD16 pin */
+ AT91_PIOA 17 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD17 pin */
+ AT91_PIOA 18 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD18 pin */
+ AT91_PIOA 19 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD19 pin */
+ AT91_PIOA 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD20 pin */
+ AT91_PIOA 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD21 pin */
+ AT91_PIOA 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* LCDD22 pin */
+ AT91_PIOA 23 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* LCDD23 pin */
+ };
+ };
+
macb0 {
pinctrl_macb0_rmii: macb0_rmii-0 {
atmel,pins =
@@ -1281,6 +1591,38 @@
};
};
+ ssc0 {
+ pinctrl_ssc0_tx: ssc0_tx {
+ atmel,pins =
+ <AT91_PIOB 27 AT91_PERIPH_B AT91_PINCTRL_NONE /* TK0 */
+ AT91_PIOB 31 AT91_PERIPH_B AT91_PINCTRL_NONE /* TF0 */
+ AT91_PIOB 28 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* TD0 */
+ };
+
+ pinctrl_ssc0_rx: ssc0_rx {
+ atmel,pins =
+ <AT91_PIOB 26 AT91_PERIPH_B AT91_PINCTRL_NONE /* RK0 */
+ AT91_PIOB 30 AT91_PERIPH_B AT91_PINCTRL_NONE /* RF0 */
+ AT91_PIOB 29 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* RD0 */
+ };
+ };
+
+ ssc1 {
+ pinctrl_ssc1_tx: ssc1_tx {
+ atmel,pins =
+ <AT91_PIOC 19 AT91_PERIPH_B AT91_PINCTRL_NONE /* TK1 */
+ AT91_PIOC 20 AT91_PERIPH_B AT91_PINCTRL_NONE /* TF1 */
+ AT91_PIOC 21 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* TD1 */
+ };
+
+ pinctrl_ssc1_rx: ssc1_rx {
+ atmel,pins =
+ <AT91_PIOC 24 AT91_PERIPH_B AT91_PINCTRL_NONE /* RK1 */
+ AT91_PIOC 22 AT91_PERIPH_B AT91_PINCTRL_NONE /* RF1 */
+ AT91_PIOC 23 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* RD1 */
+ };
+ };
+
usart2 {
pinctrl_usart2: usart2-0 {
atmel,pins =
diff --git a/arch/arm/boot/dts/sh7372-mackerel.dts b/arch/arm/boot/dts/sh7372-mackerel.dts
deleted file mode 100644
index a759a276c9a9..000000000000
--- a/arch/arm/boot/dts/sh7372-mackerel.dts
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Device Tree Source for the mackerel board
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-/dts-v1/;
-#include "sh7372.dtsi"
-
-/ {
- model = "Mackerel (AP4 EVM 2nd)";
- compatible = "renesas,mackerel";
-
- chosen {
- bootargs = "console=tty0, console=ttySC0,115200 earlyprintk=sh-sci.0,115200 root=/dev/nfs nfsroot=,tcp,v3 ip=dhcp mem=240m rw";
- };
-
- memory {
- device_type = "memory";
- reg = <0x40000000 0x10000000>;
- };
-};
diff --git a/arch/arm/boot/dts/sh7372.dtsi b/arch/arm/boot/dts/sh7372.dtsi
deleted file mode 100644
index f863a10cb1b2..000000000000
--- a/arch/arm/boot/dts/sh7372.dtsi
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Device Tree Source for the sh7372 SoC
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-/include/ "skeleton.dtsi"
-
-/ {
- compatible = "renesas,sh7372";
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- compatible = "arm,cortex-a8";
- device_type = "cpu";
- reg = <0x0>;
- clock-frequency = <800000000>;
- };
- };
-
- pfc: pfc@e6050000 {
- compatible = "renesas,pfc-sh7372";
- reg = <0xe6050000 0x8000>,
- <0xe605801c 0x1c>;
- gpio-controller;
- #gpio-cells = <2>;
- };
-};
diff --git a/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts b/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts
deleted file mode 100644
index 6d32c87632d4..000000000000
--- a/arch/arm/boot/dts/sh73a0-kzm9g-reference.dts
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Device Tree Source for the KZM-A9-GT board
- *
- * Copyright (C) 2012 Horms Solutions Ltd.
- *
- * Based on sh73a0-kzm9g.dts
- * Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-/dts-v1/;
-#include "sh73a0.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-
-/ {
- model = "KZM-A9-GT";
- compatible = "renesas,kzm9g-reference", "renesas,sh73a0";
-
- aliases {
- serial4 = &scifa4;
- };
-
- cpus {
- cpu@0 {
- cpu0-supply = <&vdd_dvfs>;
- operating-points = <
- /* kHz uV */
- 1196000 1315000
- 598000 1175000
- 398667 1065000
- >;
- voltage-tolerance = <1>; /* 1% */
- };
- };
-
- chosen {
- bootargs = "console=tty0 console=ttySC4,115200 root=/dev/nfs ip=dhcp ignore_loglevel rw";
- stdout-path = &scifa4;
- };
-
- memory {
- device_type = "memory";
- reg = <0x41000000 0x1e800000>;
- };
-
- reg_1p8v: regulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "fixed-1.8V";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- reg_3p3v: regulator@1 {
- compatible = "regulator-fixed";
- regulator-name = "fixed-3.3V";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vmmc_sdhi0: regulator@2 {
- compatible = "regulator-fixed";
- regulator-name = "SDHI0 Vcc";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&pfc 15 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- vmmc_sdhi2: regulator@3 {
- compatible = "regulator-fixed";
- regulator-name = "SDHI2 Vcc";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&pfc 14 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- lan9220@10000000 {
- compatible = "smsc,lan9220", "smsc,lan9115";
- reg = <0x10000000 0x100>;
- phy-mode = "mii";
- interrupt-parent = <&irqpin0>;
- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
- reg-io-width = <4>;
- smsc,irq-push-pull;
- smsc,save-mac-address;
- vddvario-supply = <&reg_1p8v>;
- vdd33a-supply = <&reg_3p3v>;
- };
-
- leds {
- compatible = "gpio-leds";
- led1 {
- gpios = <&pfc 20 GPIO_ACTIVE_LOW>;
- label = "LED1";
- };
- led2 {
- gpios = <&pfc 21 GPIO_ACTIVE_LOW>;
- label = "LED2";
- };
- led3 {
- gpios = <&pfc 22 GPIO_ACTIVE_LOW>;
- label = "LED3";
- };
- led4 {
- gpios = <&pfc 23 GPIO_ACTIVE_LOW>;
- label = "LED4";
- };
- };
-
- keyboard {
- compatible = "gpio-keys";
-
- back-key {
- gpios = <&pcf8575 8 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_BACK>;
- label = "SW3";
- };
-
- right-key {
- gpios = <&pcf8575 9 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_RIGHT>;
- label = "SW2-R";
- };
-
- left-key {
- gpios = <&pcf8575 10 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_LEFT>;
- label = "SW2-L";
- };
-
- enter-key {
- gpios = <&pcf8575 11 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_ENTER>;
- label = "SW2-P";
- };
-
- up-key {
- gpios = <&pcf8575 12 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_UP>;
- label = "SW2-U";
- };
-
- down-key {
- gpios = <&pcf8575 13 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_DOWN>;
- label = "SW2-D";
- };
-
- home-key {
- gpios = <&pcf8575 14 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_HOME>;
- label = "SW1";
- };
- };
-
- sound {
- compatible = "simple-audio-card";
- simple-audio-card,format = "left_j";
- simple-audio-card,cpu {
- sound-dai = <&sh_fsi2 0>;
- };
- simple-audio-card,codec {
- sound-dai = <&ak4648>;
- bitclock-master;
- frame-master;
- system-clock-frequency = <11289600>;
- };
- };
-};
-
-&cmt1 {
- status = "okay";
-};
-
-&extal2_clk {
- clock-frequency = <48000000>;
-};
-
-&i2c0 {
- status = "okay";
- as3711@40 {
- compatible = "ams,as3711";
- reg = <0x40>;
-
- regulators {
- vdd_dvfs: sd1 {
- regulator-name = "1.315V CPU";
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1350000>;
- regulator-always-on;
- regulator-boot-on;
- };
- sd2 {
- regulator-name = "1.8V";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
- sd4 {
- regulator-name = "1.215V";
- regulator-min-microvolt = <1215000>;
- regulator-max-microvolt = <1235000>;
- regulator-always-on;
- regulator-boot-on;
- };
- ldo2 {
- regulator-name = "2.8V CPU";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-always-on;
- regulator-boot-on;
- };
- ldo3 {
- regulator-name = "3.0V CPU";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-always-on;
- regulator-boot-on;
- };
- ldo4 {
- regulator-name = "2.8V";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-always-on;
- regulator-boot-on;
- };
- ldo5 {
- regulator-name = "2.8V #2";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-always-on;
- regulator-boot-on;
- };
- ldo7 {
- regulator-name = "1.15V CPU";
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- regulator-always-on;
- regulator-boot-on;
- };
- ldo8 {
- regulator-name = "1.15V CPU #2";
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- regulator-always-on;
- regulator-boot-on;
- };
- };
- };
-
- ak4648: ak4648@12 {
- #sound-dai-cells = <0>;
- compatible = "asahi-kasei,ak4648";
- reg = <0x12>;
- };
-};
-
-&i2c3 {
- pinctrl-0 = <&i2c3_pins>;
- pinctrl-names = "default";
- status = "okay";
-
- pcf8575: gpio@20 {
- compatible = "nxp,pcf8575";
- reg = <0x20>;
- interrupt-parent = <&irqpin2>;
- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-};
-
-&mmcif {
- pinctrl-0 = <&mmcif_pins>;
- pinctrl-names = "default";
-
- bus-width = <8>;
- vmmc-supply = <&reg_1p8v>;
- status = "okay";
-};
-
-&pfc {
- i2c3_pins: i2c3 {
- renesas,groups = "i2c3_1";
- renesas,function = "i2c3";
- };
-
- mmcif_pins: mmc {
- mux {
- renesas,groups = "mmc0_data8_0", "mmc0_ctrl_0";
- renesas,function = "mmc0";
- };
- cfg {
- renesas,groups = "mmc0_data8_0";
- renesas,pins = "PORT279";
- bias-pull-up;
- };
- };
-
- scifa4_pins: serial4 {
- renesas,groups = "scifa4_data", "scifa4_ctrl";
- renesas,function = "scifa4";
- };
-
- sdhi0_pins: sd0 {
- renesas,groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd", "sdhi0_wp";
- renesas,function = "sdhi0";
- };
-
- sdhi2_pins: sd2 {
- renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
- renesas,function = "sdhi2";
- };
-
- fsia_pins: sounda {
- renesas,groups = "fsia_mclk_in", "fsia_sclk_in",
- "fsia_data_in", "fsia_data_out";
- renesas,function = "fsia";
- };
-};
-
-&scifa4 {
- pinctrl-0 = <&scifa4_pins>;
- pinctrl-names = "default";
-
- status = "okay";
-};
-
-&sdhi0 {
- pinctrl-0 = <&sdhi0_pins>;
- pinctrl-names = "default";
-
- vmmc-supply = <&vmmc_sdhi0>;
- bus-width = <4>;
- status = "okay";
-};
-
-&sdhi2 {
- pinctrl-0 = <&sdhi2_pins>;
- pinctrl-names = "default";
-
- vmmc-supply = <&vmmc_sdhi2>;
- bus-width = <4>;
- broken-cd;
- status = "okay";
-};
-
-&sh_fsi2 {
- pinctrl-0 = <&fsia_pins>;
- pinctrl-names = "default";
-
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/sh73a0-kzm9g.dts b/arch/arm/boot/dts/sh73a0-kzm9g.dts
index 27c5f426d172..022ba505f573 100644
--- a/arch/arm/boot/dts/sh73a0-kzm9g.dts
+++ b/arch/arm/boot/dts/sh73a0-kzm9g.dts
@@ -1,6 +1,9 @@
/*
* Device Tree Source for the KZM-A9-GT board
*
+ * Copyright (C) 2012 Horms Solutions Ltd.
+ *
+ * Based on sh73a0-kzm9g.dts
* Copyright (C) 2012 Renesas Solutions Corp.
*
* This file is licensed under the terms of the GNU General Public License
@@ -10,17 +13,388 @@
/dts-v1/;
#include "sh73a0.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
/ {
model = "KZM-A9-GT";
compatible = "renesas,kzm9g", "renesas,sh73a0";
+ aliases {
+ serial4 = &scifa4;
+ };
+
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&vdd_dvfs>;
+ operating-points = <
+ /* kHz uV */
+ 1196000 1315000
+ 598000 1175000
+ 398667 1065000
+ >;
+ voltage-tolerance = <1>; /* 1% */
+ };
+ };
+
chosen {
- bootargs = "console=tty0 console=ttySC4,115200 root=/dev/nfs ip=dhcp ignore_loglevel earlyprintk=sh-sci.4,115200 rw";
+ bootargs = "console=tty0 console=ttySC4,115200 root=/dev/nfs ip=dhcp ignore_loglevel rw";
+ stdout-path = &scifa4;
};
memory {
device_type = "memory";
- reg = <0x41000000 0x1e800000>;
+ reg = <0x40000000 0x20000000>;
+ };
+
+ reg_1p8v: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_3p3v: regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vmmc_sdhi0: regulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "SDHI0 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pfc 15 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vmmc_sdhi2: regulator@3 {
+ compatible = "regulator-fixed";
+ regulator-name = "SDHI2 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pfc 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led1 {
+ gpios = <&pfc 20 GPIO_ACTIVE_LOW>;
+ label = "LED1";
+ };
+ led2 {
+ gpios = <&pfc 21 GPIO_ACTIVE_LOW>;
+ label = "LED2";
+ };
+ led3 {
+ gpios = <&pfc 22 GPIO_ACTIVE_LOW>;
+ label = "LED3";
+ };
+ led4 {
+ gpios = <&pfc 23 GPIO_ACTIVE_LOW>;
+ label = "LED4";
+ };
+ };
+
+ keyboard {
+ compatible = "gpio-keys";
+
+ back-key {
+ gpios = <&pcf8575 8 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_BACK>;
+ label = "SW3";
+ };
+
+ right-key {
+ gpios = <&pcf8575 9 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_RIGHT>;
+ label = "SW2-R";
+ };
+
+ left-key {
+ gpios = <&pcf8575 10 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_LEFT>;
+ label = "SW2-L";
+ };
+
+ enter-key {
+ gpios = <&pcf8575 11 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_ENTER>;
+ label = "SW2-P";
+ };
+
+ up-key {
+ gpios = <&pcf8575 12 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_UP>;
+ label = "SW2-U";
+ };
+
+ down-key {
+ gpios = <&pcf8575 13 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_DOWN>;
+ label = "SW2-D";
+ };
+
+ home-key {
+ gpios = <&pcf8575 14 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_HOME>;
+ label = "SW1";
+ };
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "left_j";
+ simple-audio-card,cpu {
+ sound-dai = <&sh_fsi2 0>;
+ };
+ simple-audio-card,codec {
+ sound-dai = <&ak4648>;
+ bitclock-master;
+ frame-master;
+ system-clock-frequency = <11289600>;
+ };
+ };
+};
+
+&bsc {
+ ethernet@10000000 {
+ compatible = "smsc,lan9220", "smsc,lan9115";
+ reg = <0x10000000 0x100>;
+ phy-mode = "mii";
+ interrupt-parent = <&irqpin0>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ reg-io-width = <4>;
+ smsc,irq-push-pull;
+ smsc,save-mac-address;
+ vddvario-supply = <&reg_1p8v>;
+ vdd33a-supply = <&reg_3p3v>;
+ };
+};
+
+&cmt1 {
+ status = "okay";
+};
+
+&extal2_clk {
+ clock-frequency = <48000000>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ compass@c {
+ compatible = "asahi-kasei,ak8975";
+ reg = <0x0c>;
+ interrupt-parent = <&irqpin3>;
+ interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ ak4648: codec@12 {
+ compatible = "asahi-kasei,ak4648";
+ reg = <0x12>;
+ #sound-dai-cells = <0>;
+ };
+
+ accelerometer@1d {
+ compatible = "adi,adxl34x";
+ reg = <0x1d>;
+ interrupt-parent = <&irqpin3>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>,
+ <3 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ rtc@32 {
+ compatible = "ricoh,r2025sd";
+ reg = <0x32>;
+ };
+
+ as3711@40 {
+ compatible = "ams,as3711";
+ reg = <0x40>;
+
+ regulators {
+ vdd_dvfs: sd1 {
+ regulator-name = "1.315V CPU";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ sd2 {
+ regulator-name = "1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ sd4 {
+ regulator-name = "1.215V";
+ regulator-min-microvolt = <1215000>;
+ regulator-max-microvolt = <1235000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ ldo2 {
+ regulator-name = "2.8V CPU";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ ldo3 {
+ regulator-name = "3.0V CPU";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ ldo4 {
+ regulator-name = "2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ ldo5 {
+ regulator-name = "2.8V #2";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ ldo7 {
+ regulator-name = "1.15V CPU";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ ldo8 {
+ regulator-name = "1.15V CPU #2";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ touchscreen@55 {
+ compatible = "sitronix,st1232";
+ reg = <0x55>;
+ interrupt-parent = <&irqpin1>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
+&i2c3 {
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ pcf8575: gpio@20 {
+ compatible = "nxp,pcf8575";
+ reg = <0x20>;
+ interrupt-parent = <&irqpin2>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&mmcif {
+ pinctrl-0 = <&mmcif_pins>;
+ pinctrl-names = "default";
+
+ bus-width = <8>;
+ vmmc-supply = <&reg_1p8v>;
+ status = "okay";
+};
+
+&pfc {
+ i2c3_pins: i2c3 {
+ renesas,groups = "i2c3_1";
+ renesas,function = "i2c3";
+ };
+
+ mmcif_pins: mmc {
+ mux {
+ renesas,groups = "mmc0_data8_0", "mmc0_ctrl_0";
+ renesas,function = "mmc0";
+ };
+ cfg {
+ renesas,groups = "mmc0_data8_0";
+ renesas,pins = "PORT279";
+ bias-pull-up;
+ };
+ };
+
+ scifa4_pins: serial4 {
+ renesas,groups = "scifa4_data", "scifa4_ctrl";
+ renesas,function = "scifa4";
+ };
+
+ sdhi0_pins: sd0 {
+ renesas,groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd", "sdhi0_wp";
+ renesas,function = "sdhi0";
+ };
+
+ sdhi2_pins: sd2 {
+ renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
+ renesas,function = "sdhi2";
+ };
+
+ fsia_pins: sounda {
+ renesas,groups = "fsia_mclk_in", "fsia_sclk_in",
+ "fsia_data_in", "fsia_data_out";
+ renesas,function = "fsia";
+ };
+};
+
+&scifa4 {
+ pinctrl-0 = <&scifa4_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&vmmc_sdhi0>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&sdhi2 {
+ pinctrl-0 = <&sdhi2_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&vmmc_sdhi2>;
+ bus-width = <4>;
+ broken-cd;
+ status = "okay";
+};
+
+&sh_fsi2 {
+ pinctrl-0 = <&fsia_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index 2dfd5b44255d..45b539ce4d35 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -11,6 +11,7 @@
/include/ "skeleton.dtsi"
#include <dt-bindings/clock/sh73a0-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
/ {
@@ -26,15 +27,24 @@
compatible = "arm,cortex-a9";
reg = <0>;
clock-frequency = <1196000000>;
+ power-domains = <&pd_a2sl>;
};
cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
clock-frequency = <1196000000>;
+ power-domains = <&pd_a2sl>;
};
};
+ timer@f0000600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0xf0000600 0x20>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&twd_clk>;
+ };
+
gic: interrupt-controller@f0001000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
@@ -49,6 +59,7 @@
interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>,
<0 38 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "sec", "temp";
+ power-domains = <&pd_a4bc1>;
};
sbsc1: memory-controller@fe400000 {
@@ -57,6 +68,7 @@
interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>,
<0 36 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "sec", "temp";
+ power-domains = <&pd_a4bc0>;
};
pmu {
@@ -69,11 +81,12 @@
compatible = "renesas,cmt-48-sh73a0", "renesas,cmt-48";
reg = <0xe6138000 0x200>;
interrupts = <0 65 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks SH73A0_CLK_CMT1>;
+ clock-names = "fck";
+ power-domains = <&pd_c5>;
renesas,channels-mask = <0x3f>;
- clocks = <&mstp3_clks SH73A0_CLK_CMT1>;
- clock-names = "fck";
status = "disabled";
};
@@ -94,6 +107,9 @@
0 6 IRQ_TYPE_LEVEL_HIGH
0 7 IRQ_TYPE_LEVEL_HIGH
0 8 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
+ power-domains = <&pd_a4s>;
+ control-parent;
};
irqpin1: irqpin@e6900004 {
@@ -113,6 +129,8 @@
0 14 IRQ_TYPE_LEVEL_HIGH
0 15 IRQ_TYPE_LEVEL_HIGH
0 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
+ power-domains = <&pd_a4s>;
control-parent;
};
@@ -133,6 +151,9 @@
0 22 IRQ_TYPE_LEVEL_HIGH
0 23 IRQ_TYPE_LEVEL_HIGH
0 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
+ power-domains = <&pd_a4s>;
+ control-parent;
};
irqpin3: irqpin@e690000c {
@@ -152,6 +173,9 @@
0 30 IRQ_TYPE_LEVEL_HIGH
0 31 IRQ_TYPE_LEVEL_HIGH
0 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
+ power-domains = <&pd_a4s>;
+ control-parent;
};
i2c0: i2c@e6820000 {
@@ -164,6 +188,7 @@
0 169 IRQ_TYPE_LEVEL_HIGH
0 170 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks SH73A0_CLK_IIC0>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -177,6 +202,7 @@
0 53 IRQ_TYPE_LEVEL_HIGH
0 54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks SH73A0_CLK_IIC1>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -190,6 +216,7 @@
0 173 IRQ_TYPE_LEVEL_HIGH
0 174 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks SH73A0_CLK_IIC2>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -203,6 +230,7 @@
0 185 IRQ_TYPE_LEVEL_HIGH
0 186 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp4_clks SH73A0_CLK_IIC3>;
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -216,6 +244,7 @@
0 189 IRQ_TYPE_LEVEL_HIGH
0 190 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp4_clks SH73A0_CLK_IIC4>;
+ power-domains = <&pd_c5>;
status = "disabled";
};
@@ -225,6 +254,7 @@
interrupts = <0 140 IRQ_TYPE_LEVEL_HIGH
0 141 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks SH73A0_CLK_MMCIF0>;
+ power-domains = <&pd_a3sp>;
reg-io-width = <4>;
status = "disabled";
};
@@ -236,6 +266,7 @@
0 84 IRQ_TYPE_LEVEL_HIGH
0 85 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks SH73A0_CLK_SDHI0>;
+ power-domains = <&pd_a3sp>;
cap-sd-highspeed;
status = "disabled";
};
@@ -247,6 +278,7 @@
interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH
0 89 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks SH73A0_CLK_SDHI1>;
+ power-domains = <&pd_a3sp>;
toshiba,mmc-wrprotect-disable;
cap-sd-highspeed;
status = "disabled";
@@ -258,6 +290,7 @@
interrupts = <0 104 IRQ_TYPE_LEVEL_HIGH
0 105 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks SH73A0_CLK_SDHI2>;
+ power-domains = <&pd_a3sp>;
toshiba,mmc-wrprotect-disable;
cap-sd-highspeed;
status = "disabled";
@@ -269,6 +302,7 @@
interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks SH73A0_CLK_SCIFA0>;
clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -278,6 +312,7 @@
interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks SH73A0_CLK_SCIFA1>;
clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -287,6 +322,7 @@
interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks SH73A0_CLK_SCIFA2>;
clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -296,6 +332,7 @@
interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks SH73A0_CLK_SCIFA3>;
clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -305,6 +342,7 @@
interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks SH73A0_CLK_SCIFA4>;
clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -314,6 +352,7 @@
interrupts = <0 79 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks SH73A0_CLK_SCIFA5>;
clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -323,6 +362,7 @@
interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks SH73A0_CLK_SCIFA6>;
clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -332,6 +372,7 @@
interrupts = <0 143 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks SH73A0_CLK_SCIFA7>;
clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -341,6 +382,7 @@
interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks SH73A0_CLK_SCIFB>;
clock-names = "sci_ick";
+ power-domains = <&pd_a3sp>;
status = "disabled";
};
@@ -359,6 +401,117 @@
<&irqpin2 4 0>, <&irqpin2 5 0>, <&irqpin2 6 0>, <&irqpin2 7 0>,
<&irqpin3 0 0>, <&irqpin3 1 0>, <&irqpin3 2 0>, <&irqpin3 3 0>,
<&irqpin3 4 0>, <&irqpin3 5 0>, <&irqpin3 6 0>, <&irqpin3 7 0>;
+ power-domains = <&pd_c5>;
+ };
+
+ sysc: system-controller@e6180000 {
+ compatible = "renesas,sysc-sh73a0", "renesas,sysc-rmobile";
+ reg = <0xe6180000 0x8000>, <0xe6188000 0x8000>;
+
+ pm-domains {
+ pd_c5: c5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_c4: c4@0 {
+ reg = <0>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_d4: d4@1 {
+ reg = <1>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a4bc0: a4bc0@4 {
+ reg = <4>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a4bc1: a4bc1@5 {
+ reg = <5>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a4lc0: a4lc0@6 {
+ reg = <6>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a4lc1: a4lc1@7 {
+ reg = <7>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a4mp: a4mp@8 {
+ reg = <8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a3mp: a3mp@9 {
+ reg = <9>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a3vc: a3vc@10 {
+ reg = <10>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ pd_a4rm: a4rm@12 {
+ reg = <12>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a3r: a3r@13 {
+ reg = <13>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a2rv: a2rv@14 {
+ reg = <14>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+
+ pd_a4s: a4s@16 {
+ reg = <16>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a3sp: a3sp@17 {
+ reg = <17>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a3sg: a3sg@18 {
+ reg = <18>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_a3sm: a3sm@19 {
+ reg = <19>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a2sl: a2sl@20 {
+ reg = <20>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+ };
+ };
};
sh_fsi2: sound@ec230000 {
@@ -366,9 +519,22 @@
compatible = "renesas,fsi2-sh73a0", "renesas,sh_fsi2";
reg = <0xec230000 0x400>;
interrupts = <0 146 0x4>;
+ power-domains = <&pd_a4mp>;
status = "disabled";
};
+ bsc: bus@fec10000 {
+ compatible = "renesas,bsc-sh73a0", "renesas,bsc",
+ "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x20000000>;
+ reg = <0xfec10000 0x400>;
+ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&zb_clk>;
+ power-domains = <&pd_a4s>;
+ };
+
clocks {
#address-cells = <1>;
#size-cells = <1>;
@@ -426,133 +592,159 @@
vclk1_clk: vclk1_clk@e6150008 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150008 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&extcki_clk>, <&extal2_clk>, <&main_div2_clk>,
+ <&extalr_clk>, <&cpg_clocks SH73A0_CLK_MAIN>,
+ <0>;
#clock-cells = <0>;
clock-output-names = "vclk1";
};
vclk2_clk: vclk2_clk@e615000c {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe615000c 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&extcki_clk>, <&extal2_clk>, <&main_div2_clk>,
+ <&extalr_clk>, <&cpg_clocks SH73A0_CLK_MAIN>,
+ <0>;
#clock-cells = <0>;
clock-output-names = "vclk2";
};
vclk3_clk: vclk3_clk@e615001c {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe615001c 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&extcki_clk>, <&extal2_clk>, <&main_div2_clk>,
+ <&extalr_clk>, <&cpg_clocks SH73A0_CLK_MAIN>,
+ <0>;
#clock-cells = <0>;
clock-output-names = "vclk3";
};
zb_clk: zb_clk@e6150010 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150010 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <0>,
+ <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
#clock-cells = <0>;
clock-output-names = "zb";
};
flctl_clk: flctl_clk@e6150014 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150014 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <0>,
+ <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
#clock-cells = <0>;
clock-output-names = "flctlck";
};
sdhi0_clk: sdhi0_clk@e6150074 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150074 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&pll1_div13_clk>, <0>;
#clock-cells = <0>;
clock-output-names = "sdhi0ck";
};
sdhi1_clk: sdhi1_clk@e6150078 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150078 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&pll1_div13_clk>, <0>;
#clock-cells = <0>;
clock-output-names = "sdhi1ck";
};
sdhi2_clk: sdhi2_clk@e615007c {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe615007c 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&pll1_div13_clk>, <0>;
#clock-cells = <0>;
clock-output-names = "sdhi2ck";
};
fsia_clk: fsia_clk@e6150018 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150018 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&fsiack_clk>, <&fsiack_clk>;
#clock-cells = <0>;
clock-output-names = "fsia";
};
fsib_clk: fsib_clk@e6150090 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150090 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&fsibck_clk>, <&fsibck_clk>;
#clock-cells = <0>;
clock-output-names = "fsib";
};
sub_clk: sub_clk@e6150080 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150080 4>;
- clocks = <&extal2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&extal2_clk>, <&extal2_clk>;
#clock-cells = <0>;
clock-output-names = "sub";
};
spua_clk: spua_clk@e6150084 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150084 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&extal2_clk>, <&extal2_clk>;
#clock-cells = <0>;
clock-output-names = "spua";
};
spuv_clk: spuv_clk@e6150094 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150094 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&extal2_clk>, <&extal2_clk>;
#clock-cells = <0>;
clock-output-names = "spuv";
};
msu_clk: msu_clk@e6150088 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150088 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <0>,
+ <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
#clock-cells = <0>;
clock-output-names = "msu";
};
hsi_clk: hsi_clk@e615008c {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe615008c 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&pll1_div7_clk>, <0>;
#clock-cells = <0>;
clock-output-names = "hsi";
};
mfg1_clk: mfg1_clk@e6150098 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150098 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <0>,
+ <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
#clock-cells = <0>;
clock-output-names = "mfg1";
};
mfg2_clk: mfg2_clk@e615009c {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe615009c 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <0>,
+ <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
#clock-cells = <0>;
clock-output-names = "mfg2";
};
dsit_clk: dsit_clk@e6150060 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150060 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <0>,
+ <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
#clock-cells = <0>;
clock-output-names = "dsit";
};
dsi0p_clk: dsi0p_clk@e6150064 {
compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
reg = <0xe6150064 4>;
- clocks = <&pll1_div2_clk>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
+ <&cpg_clocks SH73A0_CLK_MAIN>, <&extal2_clk>,
+ <&extcki_clk>, <0>, <0>, <0>;
#clock-cells = <0>;
clock-output-names = "dsi0pck";
};
@@ -695,5 +887,16 @@
clock-output-names =
"iic3", "iic4", "keysc";
};
+ mstp5_clks: mstp5_clks@e6150144 {
+ compatible = "renesas,sh73a0-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0xe6150144 4>, <0xe615003c 4>;
+ clocks = <&cpg_clocks SH73A0_CLK_HP>;
+ #clock-cells = <1>;
+ clock-indices = <
+ SH73A0_CLK_INTCA0
+ >;
+ clock-output-names =
+ "intca0";
+ };
};
};
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index bfd3f1c734b8..2201cd5da3bb 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -1017,23 +1017,6 @@
status = "disabled";
};
- vmmci: regulator-gpio {
- compatible = "regulator-gpio";
-
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2900000>;
- regulator-name = "mmci-reg";
- regulator-type = "voltage";
-
- startup-delay-us = <100>;
- enable-active-high;
-
- states = <1800000 0x1
- 2900000 0x0>;
-
- status = "disabled";
- };
-
mcde@a0350000 {
compatible = "stericsson,mcde";
reg = <0xa0350000 0x1000>, /* MCDE */
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index bf8f0eddc2c0..744c1e3a744d 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -111,6 +111,21 @@
pinctrl-1 = <&i2c3_sleep_mode>;
};
+ vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ startup-delay-us = <100>;
+ enable-active-high;
+
+ states = <1800000 0x1
+ 2900000 0x0>;
+ };
+
// External Micro SD slot
sdi0_per1@80126000 {
arm,primecell-periphid = <0x10480180>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 206826a855c0..1bc84ebdccaa 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -146,8 +146,21 @@
};
vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
gpios = <&gpio7 4 0x4>;
enable-gpio = <&gpio6 25 0x4>;
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ startup-delay-us = <100>;
+ enable-active-high;
+
+ states = <1800000 0x1
+ 2900000 0x0>;
};
// External Micro SD slot
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi b/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi
new file mode 100644
index 000000000000..2c5cede686dc
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi
@@ -0,0 +1,2421 @@
+/ {
+ clock@0,60006000 {
+ emc-timings-3 {
+ nvidia,ram-code = <3>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-20400000 {
+ clock-frequency = <20400000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-40800000 {
+ clock-frequency = <40800000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-68000000 {
+ clock-frequency = <68000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-102000000 {
+ clock-frequency = <102000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-204000000 {
+ clock-frequency = <204000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-300000000 {
+ clock-frequency = <300000000>;
+ nvidia,parent-clock-frequency = <600000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_C>;
+ clock-names = "emc-parent";
+ };
+ timing-396000000 {
+ clock-frequency = <396000000>;
+ nvidia,parent-clock-frequency = <792000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M>;
+ clock-names = "emc-parent";
+ };
+ timing-528000000 {
+ clock-frequency = <528000000>;
+ nvidia,parent-clock-frequency = <528000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M_UD>;
+ clock-names = "emc-parent";
+ };
+ timing-600000000 {
+ clock-frequency = <600000000>;
+ nvidia,parent-clock-frequency = <600000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_C_UD>;
+ clock-names = "emc-parent";
+ };
+ timing-792000000 {
+ clock-frequency = <792000000>;
+ nvidia,parent-clock-frequency = <792000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M_UD>;
+ clock-names = "emc-parent";
+ };
+ timing-924000000 {
+ clock-frequency = <924000000>;
+ nvidia,parent-clock-frequency = <924000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M_UD>;
+ clock-names = "emc-parent";
+ };
+ };
+ };
+
+ emc@0,7001b000 {
+ emc-timings-3 {
+ nvidia,ram-code = <3>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000005
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000060
+ 0x00000000
+ 0x00000018
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000007
+ 0x0000000f
+ 0x00000005
+ 0x00000005
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000064
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000e0e
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000007
+ 0x00000000
+ 0x00000042
+ 0x000e000e
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x800001c5
+ 0x0000000a
+ >;
+ };
+
+ timing-20400000 {
+ clock-frequency = <20400000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000000
+ 0x00000005
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000005
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x0000009a
+ 0x00000000
+ 0x00000026
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000007
+ 0x0000000f
+ 0x00000006
+ 0x00000006
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x000000a0
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000e0e
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x0000000b
+ 0x00000000
+ 0x00000042
+ 0x000e000e
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x8000023a
+ 0x0000000a
+ >;
+ };
+
+ timing-40800000 {
+ clock-frequency = <40800000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000001
+ 0x0000000a
+ 0x00000000
+ 0x00000001
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000005
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000134
+ 0x00000000
+ 0x0000004d
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000008
+ 0x0000000f
+ 0x0000000c
+ 0x0000000c
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x0000013f
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000e0e
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000015
+ 0x00000000
+ 0x00000042
+ 0x000e000e
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x80000370
+ 0x0000000a
+ >;
+ };
+
+ timing-68000000 {
+ clock-frequency = <68000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000003
+ 0x00000011
+ 0x00000000
+ 0x00000002
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000005
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000202
+ 0x00000000
+ 0x00000080
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x0000000f
+ 0x0000000f
+ 0x00000013
+ 0x00000013
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000001
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000213
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000e0e
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000022
+ 0x00000000
+ 0x00000042
+ 0x000e000e
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x8000050e
+ 0x0000000a
+ >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000004
+ 0x0000001a
+ 0x00000000
+ 0x00000003
+ 0x00000001
+ 0x00000004
+ 0x0000000a
+ 0x00000005
+ 0x0000000b
+ 0x00000001
+ 0x00000001
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000304
+ 0x00000000
+ 0x000000c1
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000018
+ 0x0000000f
+ 0x0000001c
+ 0x0000001c
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x0000031c
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000e0e
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000033
+ 0x00000000
+ 0x00000042
+ 0x000e000e
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x80000713
+ 0x0000000a
+ >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008cd>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000009
+ 0x00000035
+ 0x00000000
+ 0x00000006
+ 0x00000002
+ 0x00000005
+ 0x0000000a
+ 0x00000005
+ 0x0000000b
+ 0x00000002
+ 0x00000002
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000004
+ 0x00000006
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x0000000d
+ 0x0000000f
+ 0x00000011
+ 0x00000607
+ 0x00000000
+ 0x00000181
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000032
+ 0x0000000f
+ 0x00000038
+ 0x00000038
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000006
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000638
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00080000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00008000
+ 0x00000000
+ 0x00000000
+ 0x00008000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00090000
+ 0x00090000
+ 0x00090000
+ 0x00090000
+ 0x00009000
+ 0x00009000
+ 0x00009000
+ 0x00009000
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000707
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000066
+ 0x00000000
+ 0x00000100
+ 0x000e000e
+ 0x00000000
+ 0x00000003
+ 0x0000d2b3
+ 0x80000d22
+ 0x0000000a
+ >;
+ };
+
+ timing-300000000 {
+ clock-frequency = <300000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73340000>;
+ nvidia,emc-cfg-2 = <0x000008d5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200000>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000321>;
+ nvidia,emc-mrs-wait-cnt = <0x0173000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x01231339>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000000d
+ 0x0000004d
+ 0x00000000
+ 0x00000009
+ 0x00000003
+ 0x00000004
+ 0x00000008
+ 0x00000002
+ 0x00000009
+ 0x00000003
+ 0x00000003
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000005
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000007
+ 0x00020000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000001
+ 0x0000000e
+ 0x00000010
+ 0x00000012
+ 0x000008e4
+ 0x00000000
+ 0x00000239
+ 0x00000001
+ 0x00000008
+ 0x00000001
+ 0x00000000
+ 0x0000004b
+ 0x0000000e
+ 0x00000052
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000008
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000924
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0x002c00a0
+ 0x00008000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00098000
+ 0x00098000
+ 0x00000000
+ 0x00098000
+ 0x00098000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00050000
+ 0x00050000
+ 0x00050000
+ 0x00050000
+ 0x00005000
+ 0x00005000
+ 0x00005000
+ 0x00005000
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000096
+ 0x00000000
+ 0x00000100
+ 0x0173000e
+ 0x00000000
+ 0x00000003
+ 0x000052a3
+ 0x800012d7
+ 0x00000009
+ >;
+ };
+
+ timing-396000000 {
+ clock-frequency = <396000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73340000>;
+ nvidia,emc-cfg-2 = <0x00000895>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200000>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000521>;
+ nvidia,emc-mrs-wait-cnt = <0x015b000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x01231339>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000011
+ 0x00000066
+ 0x00000000
+ 0x0000000c
+ 0x00000004
+ 0x00000004
+ 0x00000008
+ 0x00000002
+ 0x0000000a
+ 0x00000004
+ 0x00000004
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000005
+ 0x00000002
+ 0x00000000
+ 0x00000001
+ 0x00000008
+ 0x00020000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000000f
+ 0x00000010
+ 0x00000012
+ 0x00000bd1
+ 0x00000000
+ 0x000002f4
+ 0x00000001
+ 0x00000008
+ 0x00000001
+ 0x00000000
+ 0x00000063
+ 0x0000000f
+ 0x0000006c
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x0000000b
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000c11
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0x002c00a0
+ 0x00008000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00070000
+ 0x00070000
+ 0x00000000
+ 0x00070000
+ 0x00070000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00038000
+ 0x00038000
+ 0x00038000
+ 0x00038000
+ 0x00003800
+ 0x00003800
+ 0x00003800
+ 0x00003800
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x000000c6
+ 0x00000000
+ 0x00000100
+ 0x015b000e
+ 0x00000000
+ 0x00000003
+ 0x000052a3
+ 0x8000188b
+ 0x00000009
+ >;
+ };
+
+ timing-528000000 {
+ clock-frequency = <528000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000941>;
+ nvidia,emc-mrs-wait-cnt = <0x0139000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0123133d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000018
+ 0x00000088
+ 0x00000000
+ 0x00000010
+ 0x00000006
+ 0x00000006
+ 0x00000009
+ 0x00000002
+ 0x0000000d
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000001
+ 0x00000009
+ 0x00030000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000010
+ 0x00000012
+ 0x00000014
+ 0x00000fd6
+ 0x00000000
+ 0x000003f5
+ 0x00000002
+ 0x0000000b
+ 0x00000001
+ 0x00000000
+ 0x00000085
+ 0x00000012
+ 0x00000090
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000010
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00001017
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0xe01200b1
+ 0x00008000
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00054000
+ 0x00054000
+ 0x00000000
+ 0x00054000
+ 0x00054000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000000c
+ 0x0000000c
+ 0x0000000c
+ 0x0000000c
+ 0x0000000c
+ 0x0000000c
+ 0x0000000c
+ 0x0000000c
+ 0x100002a0
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc085
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0606003f
+ 0x00000000
+ 0x00000000
+ 0x00000100
+ 0x0139000e
+ 0x00000000
+ 0x00000003
+ 0x000042a0
+ 0x80002062
+ 0x0000000a
+ >;
+ };
+
+ timing-600000000 {
+ clock-frequency = <600000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200010>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000b61>;
+ nvidia,emc-mrs-wait-cnt = <0x0127000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0121113d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000001b
+ 0x0000009b
+ 0x00000000
+ 0x00000013
+ 0x00000007
+ 0x00000007
+ 0x0000000b
+ 0x00000003
+ 0x00000010
+ 0x00000007
+ 0x00000007
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x0000000a
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x0000000b
+ 0x00070000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000002
+ 0x00000012
+ 0x00000016
+ 0x00000018
+ 0x00001208
+ 0x00000000
+ 0x00000482
+ 0x00000002
+ 0x0000000d
+ 0x00000001
+ 0x00000000
+ 0x00000097
+ 0x00000015
+ 0x000000a3
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000013
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00001248
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0xe00e00b1
+ 0x00008000
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00048000
+ 0x00048000
+ 0x00000000
+ 0x00048000
+ 0x00048000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000000d
+ 0x0000000d
+ 0x0000000d
+ 0x0000000d
+ 0x0000000d
+ 0x0000000d
+ 0x0000000d
+ 0x0000000d
+ 0x100002a0
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc085
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0606003f
+ 0x00000000
+ 0x00000000
+ 0x00000100
+ 0x0127000e
+ 0x00000000
+ 0x00000003
+ 0x000040a0
+ 0x800024aa
+ 0x0000000e
+ >;
+ };
+
+ timing-792000000 {
+ clock-frequency = <792000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200018>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000d71>;
+ nvidia,emc-mrs-wait-cnt = <0x00f7000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040000>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0120113d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000024
+ 0x000000cd
+ 0x00000000
+ 0x00000019
+ 0x0000000a
+ 0x00000008
+ 0x0000000d
+ 0x00000004
+ 0x00000013
+ 0x0000000a
+ 0x0000000a
+ 0x00000004
+ 0x00000002
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x0000000b
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x0000000d
+ 0x00080000
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000001
+ 0x00000014
+ 0x00000018
+ 0x0000001a
+ 0x000017e2
+ 0x00000000
+ 0x000005f8
+ 0x00000003
+ 0x00000011
+ 0x00000001
+ 0x00000000
+ 0x000000c7
+ 0x00000018
+ 0x000000d7
+ 0x00000200
+ 0x00000005
+ 0x00000006
+ 0x00000005
+ 0x00000019
+ 0x00000000
+ 0x00000008
+ 0x00000008
+ 0x00001822
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0xe00700b1
+ 0x00008000
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x007fc008
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00034000
+ 0x00034000
+ 0x00000000
+ 0x00034000
+ 0x00034000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x100002a0
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc085
+ 0x00000000
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x61861820
+ 0x00514514
+ 0x00514514
+ 0x61861800
+ 0x0606003f
+ 0x00000000
+ 0x00000000
+ 0x00000100
+ 0x00f7000e
+ 0x00000000
+ 0x00000004
+ 0x00004080
+ 0x80003012
+ 0x0000000f
+ >;
+ };
+
+ timing-924000000 {
+ clock-frequency = <924000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430303>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200020>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000f15>;
+ nvidia,emc-mrs-wait-cnt = <0x00cd000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040000>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0120113d>;
+ nvidia,emc-zcal-cnt-long = <0x0000004c>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000002b
+ 0x000000f0
+ 0x00000000
+ 0x0000001e
+ 0x0000000b
+ 0x00000009
+ 0x0000000f
+ 0x00000005
+ 0x00000016
+ 0x0000000b
+ 0x0000000b
+ 0x00000004
+ 0x00000002
+ 0x00000000
+ 0x00000007
+ 0x00000007
+ 0x0000000d
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x0000000f
+ 0x000a0000
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000001
+ 0x00000016
+ 0x0000001a
+ 0x0000001c
+ 0x00001be7
+ 0x00000000
+ 0x000006f9
+ 0x00000004
+ 0x00000015
+ 0x00000001
+ 0x00000000
+ 0x000000e7
+ 0x0000001b
+ 0x000000fb
+ 0x00000200
+ 0x00000006
+ 0x00000007
+ 0x00000006
+ 0x0000001e
+ 0x00000000
+ 0x0000000a
+ 0x0000000a
+ 0x00001c28
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab898
+ 0xe00400b1
+ 0x00008000
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x007f800a
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0002c000
+ 0x0002c000
+ 0x00000000
+ 0x0002c000
+ 0x0002c000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000004
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x100002a0
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc085
+ 0x00000000
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x5d75d720
+ 0x00514514
+ 0x00514514
+ 0x5d75d700
+ 0x0606003f
+ 0x00000000
+ 0x00000000
+ 0x00000128
+ 0x00cd000e
+ 0x00000000
+ 0x00000004
+ 0x00004080
+ 0x800037ea
+ 0x00000011
+ >;
+ };
+
+ };
+ };
+
+ memory-controller@0,70019000 {
+ emc-timings-3 {
+ nvidia,ram-code = <3>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+
+ nvidia,emem-configuration = <
+ 0x40040001
+ 0x8000000a
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000003
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0502
+ 0x77e30303
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-20400000 {
+ clock-frequency = <20400000>;
+
+ nvidia,emem-configuration = <
+ 0x40020001
+ 0x80000012
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000003
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0502
+ 0x76230303
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-40800000 {
+ clock-frequency = <40800000>;
+
+ nvidia,emem-configuration = <
+ 0xa0000001
+ 0x80000017
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000003
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0502
+ 0x74a30303
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-68000000 {
+ clock-frequency = <68000000>;
+
+ nvidia,emem-configuration = <
+ 0x00000001
+ 0x8000001e
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000003
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0502
+ 0x74230403
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emem-configuration = <
+ 0x08000001
+ 0x80000026
+ 0x00000001
+ 0x00000001
+ 0x00000003
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000003
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0503
+ 0x73c30504
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emem-configuration = <
+ 0x01000003
+ 0x80000040
+ 0x00000001
+ 0x00000001
+ 0x00000004
+ 0x00000002
+ 0x00000003
+ 0x00000001
+ 0x00000003
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000004
+ 0x00000006
+ 0x06040203
+ 0x000a0504
+ 0x73840a05
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-300000000 {
+ clock-frequency = <300000000>;
+
+ nvidia,emem-configuration = <
+ 0x08000004
+ 0x80000040
+ 0x00000001
+ 0x00000002
+ 0x00000007
+ 0x00000004
+ 0x00000004
+ 0x00000001
+ 0x00000002
+ 0x00000007
+ 0x00000002
+ 0x00000002
+ 0x00000004
+ 0x00000006
+ 0x06040202
+ 0x000b0607
+ 0x77450e08
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-396000000 {
+ clock-frequency = <396000000>;
+
+ nvidia,emem-configuration = <
+ 0x0f000005
+ 0x80000040
+ 0x00000001
+ 0x00000002
+ 0x00000009
+ 0x00000005
+ 0x00000006
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000002
+ 0x00000002
+ 0x00000004
+ 0x00000006
+ 0x06040202
+ 0x000d0709
+ 0x7586120a
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-528000000 {
+ clock-frequency = <528000000>;
+
+ nvidia,emem-configuration = <
+ 0x0f000007
+ 0x80000040
+ 0x00000002
+ 0x00000003
+ 0x0000000c
+ 0x00000007
+ 0x00000008
+ 0x00000001
+ 0x00000002
+ 0x00000009
+ 0x00000002
+ 0x00000002
+ 0x00000005
+ 0x00000006
+ 0x06050202
+ 0x0010090c
+ 0x7428180d
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-600000000 {
+ clock-frequency = <600000000>;
+
+ nvidia,emem-configuration = <
+ 0x00000009
+ 0x80000040
+ 0x00000003
+ 0x00000004
+ 0x0000000e
+ 0x00000009
+ 0x0000000a
+ 0x00000001
+ 0x00000003
+ 0x0000000b
+ 0x00000002
+ 0x00000002
+ 0x00000005
+ 0x00000007
+ 0x07050202
+ 0x00130b0e
+ 0x73a91b0f
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-792000000 {
+ clock-frequency = <792000000>;
+
+ nvidia,emem-configuration = <
+ 0x0e00000b
+ 0x80000040
+ 0x00000004
+ 0x00000005
+ 0x00000013
+ 0x0000000c
+ 0x0000000d
+ 0x00000002
+ 0x00000003
+ 0x0000000c
+ 0x00000002
+ 0x00000002
+ 0x00000006
+ 0x00000008
+ 0x08060202
+ 0x00170e13
+ 0x736c2414
+ 0x70000f02
+ 0x001f0000
+ >;
+ };
+
+ timing-924000000 {
+ clock-frequency = <924000000>;
+
+ nvidia,emem-configuration = <
+ 0x0e00000d
+ 0x80000040
+ 0x00000005
+ 0x00000006
+ 0x00000016
+ 0x0000000e
+ 0x0000000f
+ 0x00000002
+ 0x00000004
+ 0x0000000e
+ 0x00000002
+ 0x00000002
+ 0x00000006
+ 0x00000009
+ 0x09060202
+ 0x001a1016
+ 0x734e2a17
+ 0x70000f02
+ 0x001f0000
+ >;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index dbfaba09703a..ed8a8acd3d34 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -3,6 +3,8 @@
#include <dt-bindings/input/input.h>
#include "tegra124.dtsi"
+#include "tegra124-jetson-tk1-emc.dtsi"
+
/ {
model = "NVIDIA Tegra124 Jetson TK1";
compatible = "nvidia,jetson-tk1", "nvidia,tegra124";
@@ -60,35 +62,35 @@
nvidia,pins = "clk_32k_out_pa0";
nvidia,function = "soc";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
uart3_cts_n_pa1 {
nvidia,pins = "uart3_cts_n_pa1";
- nvidia,function = "uartc";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap2_fs_pa2 {
nvidia,pins = "dap2_fs_pa2";
nvidia,function = "i2s1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap2_sclk_pa3 {
nvidia,pins = "dap2_sclk_pa3";
nvidia,function = "i2s1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap2_din_pa4 {
nvidia,pins = "dap2_din_pa4";
nvidia,function = "i2s1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
dap2_dout_pa5 {
@@ -96,14 +98,14 @@
nvidia,function = "i2s1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
sdmmc3_clk_pa6 {
nvidia,pins = "sdmmc3_clk_pa6";
nvidia,function = "sdmmc3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
sdmmc3_cmd_pa7 {
nvidia,pins = "sdmmc3_cmd_pa7";
@@ -116,14 +118,14 @@
nvidia,pins = "pb0";
nvidia,function = "uartd";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pb1 {
nvidia,pins = "pb1";
nvidia,function = "uartd";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
sdmmc3_dat3_pb4 {
@@ -156,9 +158,9 @@
};
uart3_rts_n_pc0 {
nvidia,pins = "uart3_rts_n_pc0";
- nvidia,function = "uartc";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
uart2_txd_pc2 {
@@ -172,7 +174,7 @@
nvidia,pins = "uart2_rxd_pc3";
nvidia,function = "irda";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
gen1_i2c_scl_pc4 {
@@ -194,44 +196,39 @@
pc7 {
nvidia,pins = "pc7";
nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pg0 {
nvidia,pins = "pg0";
- nvidia,function = "rsvd1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pg1 {
nvidia,pins = "pg1";
- nvidia,function = "rsvd1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pg2 {
nvidia,pins = "pg2";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pg3 {
nvidia,pins = "pg3";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pg4 {
nvidia,pins = "pg4";
- nvidia,function = "spi4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pg5 {
nvidia,pins = "pg5";
@@ -251,7 +248,7 @@
nvidia,pins = "pg7";
nvidia,function = "spi4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
ph0 {
@@ -270,7 +267,6 @@
};
ph2 {
nvidia,pins = "ph2";
- nvidia,function = "gmi";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -278,57 +274,53 @@
ph3 {
nvidia,pins = "ph3";
nvidia,function = "gmi";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ph4 {
nvidia,pins = "ph4";
- nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
ph5 {
nvidia,pins = "ph5";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ph6 {
nvidia,pins = "ph6";
nvidia,function = "gmi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ph7 {
nvidia,pins = "ph7";
- nvidia,function = "gmi";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pi0 {
nvidia,pins = "pi0";
- nvidia,function = "rsvd1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pi1 {
nvidia,pins = "pi1";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_ENABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pi2 {
nvidia,pins = "pi2";
nvidia,function = "rsvd4";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pi3 {
@@ -341,22 +333,21 @@
pi4 {
nvidia,pins = "pi4";
nvidia,function = "gmi";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pi5 {
nvidia,pins = "pi5";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pi6 {
nvidia,pins = "pi6";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pi7 {
@@ -368,23 +359,22 @@
};
pj0 {
nvidia,pins = "pj0";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pj2 {
nvidia,pins = "pj2";
nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
uart2_cts_n_pj5 {
nvidia,pins = "uart2_cts_n_pj5";
nvidia,function = "uartb";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
uart2_rts_n_pj6 {
@@ -403,35 +393,32 @@
};
pk0 {
nvidia,pins = "pk0";
- nvidia,function = "soc";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pk1 {
nvidia,pins = "pk1";
- nvidia,function = "rsvd4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pk2 {
nvidia,pins = "pk2";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pk3 {
nvidia,pins = "pk3";
nvidia,function = "gmi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pk4 {
nvidia,pins = "pk4";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -439,13 +426,12 @@
spdif_out_pk5 {
nvidia,pins = "spdif_out_pk5";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
spdif_in_pk6 {
nvidia,pins = "spdif_in_pk6";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -459,17 +445,17 @@
};
dap1_fs_pn0 {
nvidia,pins = "dap1_fs_pn0";
- nvidia,function = "i2s0";
+ nvidia,function = "rsvd4";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap1_din_pn1 {
nvidia,pins = "dap1_din_pn1";
- nvidia,function = "i2s0";
+ nvidia,function = "rsvd4";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap1_dout_pn2 {
nvidia,pins = "dap1_dout_pn2";
@@ -480,108 +466,104 @@
};
dap1_sclk_pn3 {
nvidia,pins = "dap1_sclk_pn3";
- nvidia,function = "i2s0";
+ nvidia,function = "rsvd4";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
usb_vbus_en0_pn4 {
nvidia,pins = "usb_vbus_en0_pn4";
nvidia,function = "usb";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
- nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
};
usb_vbus_en1_pn5 {
nvidia,pins = "usb_vbus_en1_pn5";
nvidia,function = "usb";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
- nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
};
hdmi_int_pn7 {
nvidia,pins = "hdmi_int_pn7";
- nvidia,function = "rsvd1";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
};
ulpi_data7_po0 {
nvidia,pins = "ulpi_data7_po0";
nvidia,function = "ulpi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ulpi_data0_po1 {
nvidia,pins = "ulpi_data0_po1";
- nvidia,function = "ulpi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
ulpi_data1_po2 {
nvidia,pins = "ulpi_data1_po2";
nvidia,function = "ulpi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ulpi_data2_po3 {
nvidia,pins = "ulpi_data2_po3";
nvidia,function = "ulpi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ulpi_data3_po4 {
nvidia,pins = "ulpi_data3_po4";
- nvidia,function = "ulpi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
ulpi_data4_po5 {
nvidia,pins = "ulpi_data4_po5";
nvidia,function = "ulpi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ulpi_data5_po6 {
nvidia,pins = "ulpi_data5_po6";
nvidia,function = "ulpi";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ulpi_data6_po7 {
nvidia,pins = "ulpi_data6_po7";
nvidia,function = "ulpi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap3_fs_pp0 {
nvidia,pins = "dap3_fs_pp0";
nvidia,function = "i2s2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap3_din_pp1 {
nvidia,pins = "dap3_din_pp1";
nvidia,function = "i2s2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap3_dout_pp2 {
nvidia,pins = "dap3_dout_pp2";
- nvidia,function = "rsvd4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -595,91 +577,87 @@
};
dap4_fs_pp4 {
nvidia,pins = "dap4_fs_pp4";
- nvidia,function = "i2s3";
+ nvidia,function = "rsvd4";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap4_din_pp5 {
nvidia,pins = "dap4_din_pp5";
- nvidia,function = "i2s3";
+ nvidia,function = "rsvd3";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap4_dout_pp6 {
nvidia,pins = "dap4_dout_pp6";
- nvidia,function = "i2s3";
+ nvidia,function = "rsvd4";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap4_sclk_pp7 {
nvidia,pins = "dap4_sclk_pp7";
- nvidia,function = "i2s3";
+ nvidia,function = "rsvd3";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_col0_pq0 {
nvidia,pins = "kb_col0_pq0";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
kb_col1_pq1 {
nvidia,pins = "kb_col1_pq1";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_col2_pq2 {
nvidia,pins = "kb_col2_pq2";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_col3_pq3 {
nvidia,pins = "kb_col3_pq3";
- nvidia,function = "kbc";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_ENABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
kb_col4_pq4 {
nvidia,pins = "kb_col4_pq4";
nvidia,function = "sdmmc3";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
kb_col5_pq5 {
nvidia,pins = "kb_col5_pq5";
- nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
kb_col6_pq6 {
nvidia,pins = "kb_col6_pq6";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_col7_pq7 {
nvidia,pins = "kb_col7_pq7";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row0_pr0 {
nvidia,pins = "kb_row0_pr0";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -687,121 +665,115 @@
kb_row1_pr1 {
nvidia,pins = "kb_row1_pr1";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row2_pr2 {
nvidia,pins = "kb_row2_pr2";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row3_pr3 {
nvidia,pins = "kb_row3_pr3";
- nvidia,function = "sys";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row4_pr4 {
nvidia,pins = "kb_row4_pr4";
- nvidia,function = "rsvd3";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
kb_row5_pr5 {
nvidia,pins = "kb_row5_pr5";
nvidia,function = "rsvd3";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row6_pr6 {
nvidia,pins = "kb_row6_pr6";
nvidia,function = "displaya_alt";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
kb_row7_pr7 {
nvidia,pins = "kb_row7_pr7";
- nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
kb_row8_ps0 {
nvidia,pins = "kb_row8_ps0";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row9_ps1 {
nvidia,pins = "kb_row9_ps1";
- nvidia,function = "rsvd2";
+ nvidia,function = "uarta";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row10_ps2 {
nvidia,pins = "kb_row10_ps2";
- nvidia,function = "rsvd2";
+ nvidia,function = "uarta";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
kb_row11_ps3 {
nvidia,pins = "kb_row11_ps3";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row12_ps4 {
nvidia,pins = "kb_row12_ps4";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row13_ps5 {
nvidia,pins = "kb_row13_ps5";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row14_ps6 {
nvidia,pins = "kb_row14_ps6";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row15_ps7 {
nvidia,pins = "kb_row15_ps7";
- nvidia,function = "soc";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
kb_row16_pt0 {
nvidia,pins = "kb_row16_pt0";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
kb_row17_pt1 {
nvidia,pins = "kb_row17_pt1";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
gen2_i2c_scl_pt5 {
nvidia,pins = "gen2_i2c_scl_pt5";
@@ -828,72 +800,63 @@
};
pu0 {
nvidia,pins = "pu0";
- nvidia,function = "rsvd4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pu1 {
nvidia,pins = "pu1";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pu2 {
nvidia,pins = "pu2";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pu3 {
nvidia,pins = "pu3";
- nvidia,function = "gmi";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pu4 {
nvidia,pins = "pu4";
- nvidia,function = "gmi";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pu5 {
nvidia,pins = "pu5";
- nvidia,function = "gmi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pu6 {
nvidia,pins = "pu6";
- nvidia,function = "rsvd3";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pv0 {
nvidia,pins = "pv0";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pv1 {
nvidia,pins = "pv1";
- nvidia,function = "rsvd1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
sdmmc3_cd_n_pv2 {
nvidia,pins = "sdmmc3_cd_n_pv2";
nvidia,function = "sdmmc3";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
sdmmc1_wp_n_pv3 {
@@ -922,16 +885,16 @@
gpio_w2_aud_pw2 {
nvidia,pins = "gpio_w2_aud_pw2";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
gpio_w3_aud_pw3 {
nvidia,pins = "gpio_w3_aud_pw3";
nvidia,function = "spi6";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap_mclk1_pw4 {
nvidia,pins = "dap_mclk1_pw4";
@@ -949,17 +912,17 @@
};
uart3_txd_pw6 {
nvidia,pins = "uart3_txd_pw6";
- nvidia,function = "uartc";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
uart3_rxd_pw7 {
nvidia,pins = "uart3_rxd_pw7";
- nvidia,function = "uartc";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dvfs_pwm_px0 {
nvidia,pins = "dvfs_pwm_px0";
@@ -970,10 +933,9 @@
};
gpio_x1_aud_px1 {
nvidia,pins = "gpio_x1_aud_px1";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
dvfs_clk_px2 {
nvidia,pins = "dvfs_clk_px2";
@@ -985,34 +947,32 @@
gpio_x3_aud_px3 {
nvidia,pins = "gpio_x3_aud_px3";
nvidia,function = "rsvd4";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
gpio_x4_aud_px4 {
nvidia,pins = "gpio_x4_aud_px4";
- nvidia,function = "gmi";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
gpio_x5_aud_px5 {
nvidia,pins = "gpio_x5_aud_px5";
nvidia,function = "rsvd4";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
gpio_x6_aud_px6 {
nvidia,pins = "gpio_x6_aud_px6";
nvidia,function = "gmi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
gpio_x7_aud_px7 {
nvidia,pins = "gpio_x7_aud_px7";
- nvidia,function = "rsvd1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -1027,8 +987,8 @@
ulpi_dir_py1 {
nvidia,pins = "ulpi_dir_py1";
nvidia,function = "spi1";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
ulpi_nxt_py2 {
@@ -1048,44 +1008,44 @@
sdmmc1_dat3_py4 {
nvidia,pins = "sdmmc1_dat3_py4";
nvidia,function = "sdmmc1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
sdmmc1_dat2_py5 {
nvidia,pins = "sdmmc1_dat2_py5";
nvidia,function = "sdmmc1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
sdmmc1_dat1_py6 {
nvidia,pins = "sdmmc1_dat1_py6";
nvidia,function = "sdmmc1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
sdmmc1_dat0_py7 {
nvidia,pins = "sdmmc1_dat0_py7";
- nvidia,function = "sdmmc1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
sdmmc1_clk_pz0 {
nvidia,pins = "sdmmc1_clk_pz0";
- nvidia,function = "sdmmc1";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
sdmmc1_cmd_pz1 {
nvidia,pins = "sdmmc1_cmd_pz1";
nvidia,function = "sdmmc1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pwr_i2c_scl_pz6 {
nvidia,pins = "pwr_i2c_scl_pz6";
@@ -1184,7 +1144,6 @@
};
pbb3 {
nvidia,pins = "pbb3";
- nvidia,function = "vgp3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -1198,21 +1157,18 @@
};
pbb5 {
nvidia,pins = "pbb5";
- nvidia,function = "rsvd3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pbb6 {
nvidia,pins = "pbb6";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pbb7 {
nvidia,pins = "pbb7";
- nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -1226,15 +1182,13 @@
};
pcc1 {
nvidia,pins = "pcc1";
- nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pcc2 {
nvidia,pins = "pcc2";
- nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
@@ -1248,8 +1202,8 @@
clk2_req_pcc5 {
nvidia,pins = "clk2_req_pcc5";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pex_l0_rst_n_pdd1 {
@@ -1262,15 +1216,15 @@
pex_l0_clkreq_n_pdd2 {
nvidia,pins = "pex_l0_clkreq_n_pdd2";
nvidia,function = "pe0";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pex_wake_n_pdd3 {
nvidia,pins = "pex_wake_n_pdd3";
nvidia,function = "pe";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pex_l1_rst_n_pdd5 {
@@ -1283,8 +1237,8 @@
pex_l1_clkreq_n_pdd6 {
nvidia,pins = "pex_l1_clkreq_n_pdd6";
nvidia,function = "pe1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
clk3_out_pee0 {
@@ -1297,13 +1251,12 @@
clk3_req_pee1 {
nvidia,pins = "clk3_req_pee1";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
dap_mclk1_req_pee2 {
nvidia,pins = "dap_mclk1_req_pee2";
- nvidia,function = "sata";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -1314,7 +1267,7 @@
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
- nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
};
sdmmc3_clk_lb_out_pee4 {
nvidia,pins = "sdmmc3_clk_lb_out_pee4";
@@ -1333,24 +1286,24 @@
dp_hpd_pff0 {
nvidia,pins = "dp_hpd_pff0";
nvidia,function = "dp";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
usb_vbus_en2_pff1 {
nvidia,pins = "usb_vbus_en2_pff1";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
nvidia,open-drain = <TEGRA_PIN_DISABLE>;
};
pff2 {
nvidia,pins = "pff2";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
nvidia,open-drain = <TEGRA_PIN_DISABLE>;
};
core_pwr_req {
@@ -1362,7 +1315,7 @@
};
cpu_pwr_req {
nvidia,pins = "cpu_pwr_req";
- nvidia,function = "rsvd2";
+ nvidia,function = "cpu";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
@@ -1371,7 +1324,7 @@
nvidia,pins = "pwr_int_n";
nvidia,function = "pmi";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
reset_out_n {
@@ -1379,7 +1332,7 @@
nvidia,function = "reset_out_n";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
owr {
nvidia,pins = "owr";
@@ -1391,9 +1344,9 @@
};
clk_32k_in {
nvidia,pins = "clk_32k_in";
- nvidia,function = "rsvd2";
+ nvidia,function = "clk";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
jtag_rtck {
diff --git a/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi b/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi
new file mode 100644
index 000000000000..1a5748d05dda
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi
@@ -0,0 +1,2023 @@
+/ {
+ clock@0,60006000 {
+ emc-timings-1 {
+ nvidia,ram-code = <1>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-20400000 {
+ clock-frequency = <20400000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-40800000 {
+ clock-frequency = <40800000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-68000000 {
+ clock-frequency = <68000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-102000000 {
+ clock-frequency = <102000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-204000000 {
+ clock-frequency = <204000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-300000000 {
+ clock-frequency = <300000000>;
+ nvidia,parent-clock-frequency = <600000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_C>;
+ clock-names = "emc-parent";
+ };
+ timing-396000000 {
+ clock-frequency = <396000000>;
+ nvidia,parent-clock-frequency = <792000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M>;
+ clock-names = "emc-parent";
+ };
+ /* TODO: Add 528MHz frequency */
+ timing-600000000 {
+ clock-frequency = <600000000>;
+ nvidia,parent-clock-frequency = <600000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_C_UD>;
+ clock-names = "emc-parent";
+ };
+ timing-792000000 {
+ clock-frequency = <792000000>;
+ nvidia,parent-clock-frequency = <792000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M_UD>;
+ clock-names = "emc-parent";
+ };
+ };
+ };
+
+ emc@0,7001b000 {
+ emc-timings-1 {
+ nvidia,ram-code = <1>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000060
+ 0x00000000
+ 0x00000018
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000007
+ 0x0000000f
+ 0x00000005
+ 0x00000005
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000064
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000303
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000007
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x800001c5
+ 0x0000000a
+ >;
+ };
+
+ timing-20400000 {
+ clock-frequency = <20400000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000000
+ 0x00000005
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x0000009a
+ 0x00000000
+ 0x00000026
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000007
+ 0x0000000f
+ 0x00000006
+ 0x00000006
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x000000a0
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000303
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x0000000b
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x8000023a
+ 0x0000000a
+ >;
+ };
+
+ timing-40800000 {
+ clock-frequency = <40800000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000001
+ 0x0000000a
+ 0x00000000
+ 0x00000001
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000134
+ 0x00000000
+ 0x0000004d
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000008
+ 0x0000000f
+ 0x0000000c
+ 0x0000000c
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x0000013f
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000303
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000015
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x80000370
+ 0x0000000a
+ >;
+ };
+
+ timing-68000000 {
+ clock-frequency = <68000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000003
+ 0x00000011
+ 0x00000000
+ 0x00000002
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000202
+ 0x00000000
+ 0x00000080
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x0000000f
+ 0x0000000f
+ 0x00000013
+ 0x00000013
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000001
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000213
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000303
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000022
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x8000050e
+ 0x0000000a
+ >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000004
+ 0x0000001a
+ 0x00000000
+ 0x00000003
+ 0x00000001
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000001
+ 0x00000001
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000304
+ 0x00000000
+ 0x000000c1
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000018
+ 0x0000000f
+ 0x0000001c
+ 0x0000001c
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000003
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x0000031c
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000303
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000033
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x80000713
+ 0x0000000a
+ >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x0000088d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000009
+ 0x00000035
+ 0x00000000
+ 0x00000007
+ 0x00000002
+ 0x00000005
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000002
+ 0x00000002
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000004
+ 0x00000006
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x0000000d
+ 0x0000000f
+ 0x00000011
+ 0x00000607
+ 0x00000000
+ 0x00000181
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000032
+ 0x0000000f
+ 0x00000038
+ 0x00000038
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000007
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000638
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00004000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00090000
+ 0x00090000
+ 0x00094000
+ 0x00094000
+ 0x00009400
+ 0x00009000
+ 0x00009000
+ 0x00009000
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000303
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000066
+ 0x00000000
+ 0x00000100
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000d2b3
+ 0x80000d22
+ 0x0000000a
+ >;
+ };
+
+ timing-300000000 {
+ clock-frequency = <300000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73340000>;
+ nvidia,emc-cfg-2 = <0x000008d5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200000>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000321>;
+ nvidia,emc-mrs-wait-cnt = <0x0174000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x01231339>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000000d
+ 0x0000004c
+ 0x00000000
+ 0x00000009
+ 0x00000003
+ 0x00000004
+ 0x00000008
+ 0x00000002
+ 0x00000009
+ 0x00000003
+ 0x00000003
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000005
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000007
+ 0x00020000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000001
+ 0x0000000e
+ 0x00000010
+ 0x00000012
+ 0x000008e4
+ 0x00000000
+ 0x00000239
+ 0x00000001
+ 0x00000008
+ 0x00000001
+ 0x00000000
+ 0x0000004a
+ 0x0000000e
+ 0x00000051
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000009
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000924
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0x002c00a0
+ 0x00008000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00098000
+ 0x00098000
+ 0x00000000
+ 0x00098000
+ 0x00098000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00060000
+ 0x00060000
+ 0x00060000
+ 0x00060000
+ 0x00006000
+ 0x00006000
+ 0x00006000
+ 0x00006000
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000101
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000096
+ 0x00000000
+ 0x00000100
+ 0x0174000c
+ 0x00000000
+ 0x00000003
+ 0x000052a3
+ 0x800012d7
+ 0x00000009
+ >;
+ };
+
+ timing-396000000 {
+ clock-frequency = <396000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73340000>;
+ nvidia,emc-cfg-2 = <0x00000895>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200000>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000521>;
+ nvidia,emc-mrs-wait-cnt = <0x015b000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x01231339>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000012
+ 0x00000065
+ 0x00000000
+ 0x0000000c
+ 0x00000004
+ 0x00000005
+ 0x00000008
+ 0x00000002
+ 0x0000000a
+ 0x00000004
+ 0x00000004
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000005
+ 0x00000002
+ 0x00000000
+ 0x00000001
+ 0x00000008
+ 0x00020000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000000f
+ 0x00000010
+ 0x00000012
+ 0x00000bd1
+ 0x00000000
+ 0x000002f4
+ 0x00000001
+ 0x00000008
+ 0x00000001
+ 0x00000000
+ 0x00000063
+ 0x0000000f
+ 0x0000006b
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x0000000d
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000c11
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0x002c00a0
+ 0x00008000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00070000
+ 0x00070000
+ 0x00000000
+ 0x00070000
+ 0x00070000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00048000
+ 0x00048000
+ 0x00048000
+ 0x00048000
+ 0x00004800
+ 0x00004800
+ 0x00004800
+ 0x00004800
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000101
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x000000c6
+ 0x00000000
+ 0x00000100
+ 0x015b000c
+ 0x00000000
+ 0x00000003
+ 0x000052a3
+ 0x8000188b
+ 0x00000009
+ >;
+ };
+
+ timing-600000000 {
+ clock-frequency = <600000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200010>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000b61>;
+ nvidia,emc-mrs-wait-cnt = <0x0128000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0121113d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000001c
+ 0x0000009a
+ 0x00000000
+ 0x00000013
+ 0x00000007
+ 0x00000007
+ 0x0000000b
+ 0x00000003
+ 0x00000010
+ 0x00000007
+ 0x00000007
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x0000000a
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x0000000b
+ 0x00070000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000002
+ 0x00000012
+ 0x00000016
+ 0x00000018
+ 0x00001208
+ 0x00000000
+ 0x00000482
+ 0x00000002
+ 0x0000000d
+ 0x00000001
+ 0x00000000
+ 0x00000096
+ 0x00000015
+ 0x000000a2
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000015
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00001249
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0xe00e00b1
+ 0x00008000
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00048000
+ 0x00048000
+ 0x00000000
+ 0x00048000
+ 0x00048000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x00000004
+ 0x00000002
+ 0x00000005
+ 0x00000006
+ 0x00000003
+ 0x00000006
+ 0x00000005
+ 0x00000004
+ 0x00000004
+ 0x00000002
+ 0x00000005
+ 0x00000006
+ 0x00000003
+ 0x00000006
+ 0x00000005
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x100002a0
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc085
+ 0x00000101
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0606003f
+ 0x00000000
+ 0x00000000
+ 0x00000100
+ 0x0128000c
+ 0x00000000
+ 0x00000003
+ 0x000040a0
+ 0x800024aa
+ 0x0000000e
+ >;
+ };
+
+ timing-792000000 {
+ clock-frequency = <792000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0080089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200418>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000d71>;
+ nvidia,emc-mrs-wait-cnt = <0x00f8000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040000>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0120113d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000025
+ 0x000000cc
+ 0x00000000
+ 0x0000001a
+ 0x00000009
+ 0x00000008
+ 0x0000000d
+ 0x00000004
+ 0x00000013
+ 0x00000009
+ 0x00000009
+ 0x00000003
+ 0x00000002
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x0000000b
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x0000000d
+ 0x00080000
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000001
+ 0x00000014
+ 0x00000018
+ 0x0000001a
+ 0x000017e2
+ 0x00000000
+ 0x000005f8
+ 0x00000003
+ 0x00000011
+ 0x00000001
+ 0x00000000
+ 0x000000c6
+ 0x00000018
+ 0x000000d6
+ 0x00000200
+ 0x00000005
+ 0x00000006
+ 0x00000005
+ 0x0000001d
+ 0x00000000
+ 0x00000008
+ 0x00000008
+ 0x00001822
+ 0x00000000
+ 0x80000005
+ 0x00000000
+ 0x104ab198
+ 0xe00700b1
+ 0x00008000
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000005
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00034000
+ 0x00034000
+ 0x00000000
+ 0x00034000
+ 0x00034000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000008
+ 0x00000008
+ 0x00000005
+ 0x00000009
+ 0x00000009
+ 0x00000007
+ 0x00000009
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000005
+ 0x00000009
+ 0x00000009
+ 0x00000007
+ 0x00000009
+ 0x00000008
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x100002a0
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc085
+ 0x00000101
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x61861820
+ 0x00514514
+ 0x00514514
+ 0x61861800
+ 0x0606003f
+ 0x00000000
+ 0x00000000
+ 0x00000100
+ 0x00f8000c
+ 0x00000007
+ 0x00000004
+ 0x00004080
+ 0x80003012
+ 0x0000000f
+ >;
+ };
+
+ };
+ };
+
+ memory-controller@0,70019000 {
+ emc-timings-1 {
+ nvidia,ram-code = <1>;
+
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+
+ nvidia,emem-configuration = <
+ 0x40040001
+ 0x8000000a
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0402
+ 0x77e30303
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-20400000 {
+ clock-frequency = <20400000>;
+
+ nvidia,emem-configuration = <
+ 0x40020001
+ 0x80000012
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0402
+ 0x76230303
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-40800000 {
+ clock-frequency = <40800000>;
+
+ nvidia,emem-configuration = <
+ 0xa0000001
+ 0x80000017
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0402
+ 0x74a30303
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-68000000 {
+ clock-frequency = <68000000>;
+
+ nvidia,emem-configuration = <
+ 0x00000001
+ 0x8000001e
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0402
+ 0x74230403
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emem-configuration = <
+ 0x08000001
+ 0x80000026
+ 0x00000001
+ 0x00000001
+ 0x00000003
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0403
+ 0x73c30504
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emem-configuration = <
+ 0x01000003
+ 0x80000040
+ 0x00000001
+ 0x00000001
+ 0x00000005
+ 0x00000002
+ 0x00000004
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000004
+ 0x00000006
+ 0x06040203
+ 0x000a0405
+ 0x73840a06
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-300000000 {
+ clock-frequency = <300000000>;
+
+ nvidia,emem-configuration = <
+ 0x08000004
+ 0x80000040
+ 0x00000001
+ 0x00000002
+ 0x00000007
+ 0x00000004
+ 0x00000005
+ 0x00000001
+ 0x00000002
+ 0x00000007
+ 0x00000002
+ 0x00000002
+ 0x00000004
+ 0x00000006
+ 0x06040202
+ 0x000b0607
+ 0x77450e08
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-396000000 {
+ clock-frequency = <396000000>;
+
+ nvidia,emem-configuration = <
+ 0x0f000005
+ 0x80000040
+ 0x00000001
+ 0x00000002
+ 0x00000009
+ 0x00000005
+ 0x00000007
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000002
+ 0x00000002
+ 0x00000004
+ 0x00000006
+ 0x06040202
+ 0x000d0709
+ 0x7586120a
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-600000000 {
+ clock-frequency = <600000000>;
+
+ nvidia,emem-configuration = <
+ 0x00000009
+ 0x80000040
+ 0x00000003
+ 0x00000004
+ 0x0000000e
+ 0x00000009
+ 0x0000000b
+ 0x00000001
+ 0x00000003
+ 0x0000000b
+ 0x00000002
+ 0x00000002
+ 0x00000005
+ 0x00000007
+ 0x07050202
+ 0x00130b0e
+ 0x73a91b0f
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-792000000 {
+ clock-frequency = <792000000>;
+
+ nvidia,emem-configuration = <
+ 0x0e00000b
+ 0x80000040
+ 0x00000004
+ 0x00000005
+ 0x00000013
+ 0x0000000c
+ 0x0000000f
+ 0x00000002
+ 0x00000003
+ 0x0000000c
+ 0x00000002
+ 0x00000002
+ 0x00000006
+ 0x00000008
+ 0x08060202
+ 0x00160d13
+ 0x734c2414
+ 0x70000f02
+ 0x001f0000
+ >;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts
index 004e8e4e1c04..2d21253ea4e3 100644
--- a/arch/arm/boot/dts/tegra124-nyan-big.dts
+++ b/arch/arm/boot/dts/tegra124-nyan-big.dts
@@ -1,46 +1,29 @@
/dts-v1/;
-#include <dt-bindings/input/input.h>
-#include "tegra124.dtsi"
+#include "tegra124-nyan.dtsi"
+
+#include "tegra124-nyan-big-emc.dtsi"
/ {
model = "Acer Chromebook 13 CB5-311";
compatible = "google,nyan-big", "nvidia,tegra124";
- aliases {
- rtc0 = "/i2c@0,7000d000/pmic@40";
- rtc1 = "/rtc@0,7000e000";
- serial0 = &uarta;
- };
+ panel: panel {
+ compatible = "auo,b133xtn01";
- memory {
- reg = <0x0 0x80000000 0x0 0x80000000>;
+ backlight = <&backlight>;
+ ddc-i2c-bus = <&dpaux>;
};
- host1x@0,50000000 {
- hdmi@0,54280000 {
- status = "okay";
-
- vdd-supply = <&vdd_3v3_hdmi>;
- pll-supply = <&vdd_hdmi_pll>;
- hdmi-supply = <&vdd_5v0_hdmi>;
-
- nvidia,ddc-i2c-bus = <&hdmi_ddc>;
- nvidia,hpd-gpio =
- <&gpio TEGRA_GPIO(N, 7) GPIO_ACTIVE_HIGH>;
- };
-
- sor@0,54540000 {
- status = "okay";
-
- nvidia,dpaux = <&dpaux>;
- nvidia,panel = <&panel>;
- };
+ sdhci@0,700b0400 { /* SD Card on this bus */
+ wp-gpios = <&gpio TEGRA_GPIO(Q, 4) GPIO_ACTIVE_LOW>;
+ };
- dpaux@0,545c0000 {
- vdd-supply = <&vdd_3v3_panel>;
- status = "okay";
- };
+ sound {
+ compatible = "nvidia,tegra-audio-max98090-nyan-big",
+ "nvidia,tegra-audio-max98090-nyan",
+ "nvidia,tegra-audio-max98090";
+ nvidia,model = "GoogleNyanBig";
};
pinmux@0,70000868 {
@@ -48,1092 +31,1308 @@
pinctrl-0 = <&pinmux_default>;
pinmux_default: common {
- dap_mclk1_pw4 {
- nvidia,pins = "dap_mclk1_pw4";
- nvidia,function = "extperiph1";
+ clk_32k_out_pa0 {
+ nvidia,pins = "clk_32k_out_pa0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart3_cts_n_pa1 {
+ nvidia,pins = "uart3_cts_n_pa1";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap2_fs_pa2 {
+ nvidia,pins = "dap2_fs_pa2";
+ nvidia,function = "i2s1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_sclk_pa3 {
+ nvidia,pins = "dap2_sclk_pa3";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
dap2_din_pa4 {
nvidia,pins = "dap2_din_pa4";
nvidia,function = "i2s1";
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
dap2_dout_pa5 {
- nvidia,pins = "dap2_dout_pa5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3";
+ nvidia,pins = "dap2_dout_pa5";
nvidia,function = "i2s1";
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- dvfs_pwm_px0 {
- nvidia,pins = "dvfs_pwm_px0",
- "dvfs_clk_px2";
- nvidia,function = "cldvfs";
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ sdmmc3_clk_pa6 {
+ nvidia,pins = "sdmmc3_clk_pa6";
+ nvidia,function = "sdmmc3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- ulpi_clk_py0 {
- nvidia,pins = "ulpi_clk_py0",
- "ulpi_nxt_py2",
- "ulpi_stp_py3";
- nvidia,function = "spi1";
+ sdmmc3_cmd_pa7 {
+ nvidia,pins = "sdmmc3_cmd_pa7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pb0 {
+ nvidia,pins = "pb0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pb1 {
+ nvidia,pins = "pb1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_dat3_pb4 {
+ nvidia,pins = "sdmmc3_dat3_pb4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat2_pb5 {
+ nvidia,pins = "sdmmc3_dat2_pb5";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat1_pb6 {
+ nvidia,pins = "sdmmc3_dat1_pb6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat0_pb7 {
+ nvidia,pins = "sdmmc3_dat0_pb7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart3_rts_n_pc0 {
+ nvidia,pins = "uart3_rts_n_pc0";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_txd_pc2 {
+ nvidia,pins = "uart2_txd_pc2";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rxd_pc3 {
+ nvidia,pins = "uart2_rxd_pc3";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gen1_i2c_scl_pc4 {
+ nvidia,pins = "gen1_i2c_scl_pc4";
+ nvidia,function = "i2c1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
};
- ulpi_dir_py1 {
- nvidia,pins = "ulpi_dir_py1";
- nvidia,function = "spi1";
+ gen1_i2c_sda_pc5 {
+ nvidia,pins = "gen1_i2c_sda_pc5";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ pc7 {
+ nvidia,pins = "pc7";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- cam_i2c_scl_pbb1 {
- nvidia,pins = "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2";
- nvidia,function = "i2c3";
+ pg0 {
+ nvidia,pins = "pg0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pg1 {
+ nvidia,pins = "pg1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,lock = <TEGRA_PIN_DISABLE>;
- nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- gen2_i2c_scl_pt5 {
- nvidia,pins = "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6";
- nvidia,function = "i2c2";
+ pg2 {
+ nvidia,pins = "pg2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pg3 {
+ nvidia,pins = "pg3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,lock = <TEGRA_PIN_DISABLE>;
- nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pg4 {
- nvidia,pins = "pg4",
- "pg5",
- "pg6",
- "pi3";
+ nvidia,pins = "pg4";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg5 {
+ nvidia,pins = "pg5";
nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg6 {
+ nvidia,pins = "pg6";
+ nvidia,function = "spi4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
pg7 {
nvidia,pins = "pg7";
nvidia,function = "spi4";
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ph0 {
+ nvidia,pins = "ph0";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ph1 {
nvidia,pins = "ph1";
nvidia,function = "pwm1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph2 {
+ nvidia,pins = "ph2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- pk0 {
- nvidia,pins = "pk0",
- "kb_row15_ps7",
- "clk_32k_out_pa0";
- nvidia,function = "soc";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ ph3 {
+ nvidia,pins = "ph3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph4 {
+ nvidia,pins = "ph4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- sdmmc1_clk_pz0 {
- nvidia,pins = "sdmmc1_clk_pz0";
- nvidia,function = "sdmmc1";
+ ph5 {
+ nvidia,pins = "ph5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph6 {
+ nvidia,pins = "ph6";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- sdmmc1_cmd_pz1 {
- nvidia,pins = "sdmmc1_cmd_pz1",
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4";
- nvidia,function = "sdmmc1";
+ ph7 {
+ nvidia,pins = "ph7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi0 {
+ nvidia,pins = "pi0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pi1 {
+ nvidia,pins = "pi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pi2 {
+ nvidia,pins = "pi2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi3 {
+ nvidia,pins = "pi3";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi4 {
+ nvidia,pins = "pi4";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi5 {
+ nvidia,pins = "pi5";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- sdmmc3_clk_pa6 {
- nvidia,pins = "sdmmc3_clk_pa6";
- nvidia,function = "sdmmc3";
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ pi6 {
+ nvidia,pins = "pi6";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- sdmmc3_cmd_pa7 {
- nvidia,pins = "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "kb_col4_pq4",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "sdmmc3_cd_n_pv2";
- nvidia,function = "sdmmc3";
+ pi7 {
+ nvidia,pins = "pi7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pj0 {
+ nvidia,pins = "pj0";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- sdmmc4_clk_pcc4 {
- nvidia,pins = "sdmmc4_clk_pcc4";
- nvidia,function = "sdmmc4";
+ pj2 {
+ nvidia,pins = "pj2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_cts_n_pj5 {
+ nvidia,pins = "uart2_cts_n_pj5";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rts_n_pj6 {
+ nvidia,pins = "uart2_rts_n_pj6";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pj7 {
+ nvidia,pins = "pj7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pk0 {
+ nvidia,pins = "pk0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk1 {
+ nvidia,pins = "pk1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- sdmmc4_cmd_pt7 {
- nvidia,pins = "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7";
- nvidia,function = "sdmmc4";
+ pk2 {
+ nvidia,pins = "pk2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pk3 {
+ nvidia,pins = "pk3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk4 {
+ nvidia,pins = "pk4";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- pwr_i2c_scl_pz6 {
- nvidia,pins = "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7";
- nvidia,function = "i2cpwr";
+ spdif_out_pk5 {
+ nvidia,pins = "spdif_out_pk5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_in_pk6 {
+ nvidia,pins = "spdif_in_pk6";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk7 {
+ nvidia,pins = "pk7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap1_fs_pn0 {
+ nvidia,pins = "dap1_fs_pn0";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_din_pn1 {
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_dout_pn2 {
+ nvidia,pins = "dap1_dout_pn2";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_sclk_pn3 {
+ nvidia,pins = "dap1_sclk_pn3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ usb_vbus_en0_pn4 {
+ nvidia,pins = "usb_vbus_en0_pn4";
+ nvidia,function = "usb";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,lock = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
nvidia,open-drain = <TEGRA_PIN_ENABLE>;
};
- jtag_rtck {
- nvidia,pins = "jtag_rtck";
- nvidia,function = "rtck";
+ usb_vbus_en1_pn5 {
+ nvidia,pins = "usb_vbus_en1_pn5";
+ nvidia,function = "usb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ hdmi_int_pn7 {
+ nvidia,pins = "hdmi_int_pn7";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data7_po0 {
+ nvidia,pins = "ulpi_data7_po0";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data0_po1 {
+ nvidia,pins = "ulpi_data0_po1";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data1_po2 {
+ nvidia,pins = "ulpi_data1_po2";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data2_po3 {
+ nvidia,pins = "ulpi_data2_po3";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data3_po4 {
+ nvidia,pins = "ulpi_data3_po4";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data4_po5 {
+ nvidia,pins = "ulpi_data4_po5";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data5_po6 {
+ nvidia,pins = "ulpi_data5_po6";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data6_po7 {
+ nvidia,pins = "ulpi_data6_po7";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_fs_pp0 {
+ nvidia,pins = "dap3_fs_pp0";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_din_pp1 {
+ nvidia,pins = "dap3_din_pp1";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_dout_pp2 {
+ nvidia,pins = "dap3_dout_pp2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_sclk_pp3 {
+ nvidia,pins = "dap3_sclk_pp3";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_fs_pp4 {
+ nvidia,pins = "dap4_fs_pp4";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_din_pp5 {
+ nvidia,pins = "dap4_din_pp5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_dout_pp6 {
+ nvidia,pins = "dap4_dout_pp6";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_sclk_pp7 {
+ nvidia,pins = "dap4_sclk_pp7";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col0_pq0 {
+ nvidia,pins = "kb_col0_pq0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col1_pq1 {
+ nvidia,pins = "kb_col1_pq1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col2_pq2 {
+ nvidia,pins = "kb_col2_pq2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col3_pq3 {
+ nvidia,pins = "kb_col3_pq3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col4_pq4 {
+ nvidia,pins = "kb_col4_pq4";
+ nvidia,function = "sdmmc3";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- clk_32k_in {
- nvidia,pins = "clk_32k_in";
- nvidia,function = "clk";
+ kb_col5_pq5 {
+ nvidia,pins = "kb_col5_pq5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col6_pq6 {
+ nvidia,pins = "kb_col6_pq6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col7_pq7 {
+ nvidia,pins = "kb_col7_pq7";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- core_pwr_req {
- nvidia,pins = "core_pwr_req";
- nvidia,function = "pwron";
+ kb_row0_pr0 {
+ nvidia,pins = "kb_row0_pr0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row1_pr1 {
+ nvidia,pins = "kb_row1_pr1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- cpu_pwr_req {
- nvidia,pins = "cpu_pwr_req";
- nvidia,function = "cpu";
+ kb_row2_pr2 {
+ nvidia,pins = "kb_row2_pr2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row3_pr3 {
+ nvidia,pins = "kb_row3_pr3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row4_pr4 {
+ nvidia,pins = "kb_row4_pr4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- pwr_int_n {
- nvidia,pins = "pwr_int_n";
- nvidia,function = "pmi";
+ kb_row5_pr5 {
+ nvidia,pins = "kb_row5_pr5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row6_pr6 {
+ nvidia,pins = "kb_row6_pr6";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row7_pr7 {
+ nvidia,pins = "kb_row7_pr7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ };
+ kb_row8_ps0 {
+ nvidia,pins = "kb_row8_ps0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row9_ps1 {
+ nvidia,pins = "kb_row9_ps1";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- reset_out_n {
- nvidia,pins = "reset_out_n";
- nvidia,function = "reset_out_n";
+ kb_row10_ps2 {
+ nvidia,pins = "kb_row10_ps2";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row11_ps3 {
+ nvidia,pins = "kb_row11_ps3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row12_ps4 {
+ nvidia,pins = "kb_row12_ps4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- clk3_out_pee0 {
- nvidia,pins = "clk3_out_pee0";
- nvidia,function = "extperiph3";
+ kb_row13_ps5 {
+ nvidia,pins = "kb_row13_ps5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row14_ps6 {
+ nvidia,pins = "kb_row14_ps6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row15_ps7 {
+ nvidia,pins = "kb_row15_ps7";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- gen1_i2c_sda_pc5 {
- nvidia,pins = "gen1_i2c_sda_pc5",
- "gen1_i2c_scl_pc4";
- nvidia,function = "i2c1";
+ kb_row16_pt0 {
+ nvidia,pins = "kb_row16_pt0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row17_pt1 {
+ nvidia,pins = "kb_row17_pt1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gen2_i2c_scl_pt5 {
+ nvidia,pins = "gen2_i2c_scl_pt5";
+ nvidia,function = "i2c2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,lock = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
nvidia,open-drain = <TEGRA_PIN_ENABLE>;
};
- hdmi_cec_pee3 {
- nvidia,pins = "hdmi_cec_pee3";
- nvidia,function = "cec";
+ gen2_i2c_sda_pt6 {
+ nvidia,pins = "gen2_i2c_sda_pt6";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_cmd_pt7 {
+ nvidia,pins = "sdmmc4_cmd_pt7";
+ nvidia,function = "sdmmc4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,lock = <TEGRA_PIN_DISABLE>;
- nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- hdmi_int_pn7 {
- nvidia,pins = "hdmi_int_pn7";
+ pu0 {
+ nvidia,pins = "pu0";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu1 {
+ nvidia,pins = "pu1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu2 {
+ nvidia,pins = "pu2";
nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu3 {
+ nvidia,pins = "pu3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu4 {
+ nvidia,pins = "pu4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pu5 {
+ nvidia,pins = "pu5";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pu6 {
+ nvidia,pins = "pu6";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pv0 {
+ nvidia,pins = "pv0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pv1 {
+ nvidia,pins = "pv1";
+ nvidia,function = "rsvd1";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_cd_n_pv2 {
+ nvidia,pins = "sdmmc3_cd_n_pv2";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_wp_n_pv3 {
+ nvidia,pins = "sdmmc1_wp_n_pv3";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
ddc_scl_pv4 {
- nvidia,pins = "ddc_scl_pv4",
- "ddc_sda_pv5";
+ nvidia,pins = "ddc_scl_pv4";
nvidia,function = "i2c4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+ ddc_sda_pv5 {
+ nvidia,pins = "ddc_sda_pv5";
+ nvidia,function = "i2c4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,lock = <TEGRA_PIN_DISABLE>;
- nvidia,rcv-sel = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
};
- kb_row10_ps2 {
- nvidia,pins = "kb_row10_ps2";
- nvidia,function = "uarta";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ gpio_w2_aud_pw2 {
+ nvidia,pins = "gpio_w2_aud_pw2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_w3_aud_pw3 {
+ nvidia,pins = "gpio_w3_aud_pw3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- kb_row9_ps1 {
- nvidia,pins = "kb_row9_ps1";
- nvidia,function = "uarta";
+ dap_mclk1_pw4 {
+ nvidia,pins = "dap_mclk1_pw4";
+ nvidia,function = "extperiph1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- usb_vbus_en0_pn4 {
- nvidia,pins = "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5";
- nvidia,function = "usb";
+ clk2_out_pw5 {
+ nvidia,pins = "clk2_out_pw5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_txd_pw6 {
+ nvidia,pins = "uart3_txd_pw6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_rxd_pw7 {
+ nvidia,pins = "uart3_rxd_pw7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dvfs_pwm_px0 {
+ nvidia,pins = "dvfs_pwm_px0";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x1_aud_px1 {
+ nvidia,pins = "gpio_x1_aud_px1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dvfs_clk_px2 {
+ nvidia,pins = "dvfs_clk_px2";
+ nvidia,function = "cldvfs";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,lock = <TEGRA_PIN_DISABLE>;
- nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- drive_sdio1 {
- nvidia,pins = "drive_sdio1";
- nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
- nvidia,schmitt = <TEGRA_PIN_DISABLE>;
- nvidia,pull-down-strength = <36>;
- nvidia,pull-up-strength = <20>;
- nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_SLOW>;
- nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_SLOW>;
- };
- drive_sdio3 {
- nvidia,pins = "drive_sdio3";
- nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
- nvidia,schmitt = <TEGRA_PIN_DISABLE>;
- nvidia,pull-down-strength = <22>;
- nvidia,pull-up-strength = <36>;
- nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
- nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
- };
- drive_gma {
- nvidia,pins = "drive_gma";
- nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
- nvidia,schmitt = <TEGRA_PIN_DISABLE>;
- nvidia,pull-down-strength = <2>;
- nvidia,pull-up-strength = <1>;
- nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
- nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
- nvidia,drive-type = <1>;
- };
- codec_irq_l {
- nvidia,pins = "ph4";
- nvidia,function = "gmi";
+ gpio_x3_aud_px3 {
+ nvidia,pins = "gpio_x3_aud_px3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x4_aud_px4 {
+ nvidia,pins = "gpio_x4_aud_px4";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- lcd_bl_en {
- nvidia,pins = "ph2";
+ gpio_x5_aud_px5 {
+ nvidia,pins = "gpio_x5_aud_px5";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x6_aud_px6 {
+ nvidia,pins = "gpio_x6_aud_px6";
nvidia,function = "gmi";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x7_aud_px7 {
+ nvidia,pins = "gpio_x7_aud_px7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- touch_irq_l {
- nvidia,pins = "gpio_w3_aud_pw3";
- nvidia,function = "spi6";
+ ulpi_clk_py0 {
+ nvidia,pins = "ulpi_clk_py0";
+ nvidia,function = "spi1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- tpm_davint_l {
- nvidia,pins = "ph6";
- nvidia,function = "gmi";
+ ulpi_dir_py1 {
+ nvidia,pins = "ulpi_dir_py1";
+ nvidia,function = "spi1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- ts_irq_l {
- nvidia,pins = "pk2";
- nvidia,function = "gmi";
+ ulpi_nxt_py2 {
+ nvidia,pins = "ulpi_nxt_py2";
+ nvidia,function = "spi1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- ts_reset_l {
- nvidia,pins = "pk4";
- nvidia,function = "gmi";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ ulpi_stp_py3 {
+ nvidia,pins = "ulpi_stp_py3";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- ts_shdn_l {
- nvidia,pins = "pk1";
- nvidia,function = "gmi";
+ sdmmc1_dat3_py4 {
+ nvidia,pins = "sdmmc1_dat3_py4";
+ nvidia,function = "sdmmc1";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- ph7 {
- nvidia,pins = "ph7";
- nvidia,function = "gmi";
- nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ sdmmc1_dat2_py5 {
+ nvidia,pins = "sdmmc1_dat2_py5";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- kb_col0_ap {
- nvidia,pins = "kb_col0_pq0";
- nvidia,function = "rsvd4";
+ sdmmc1_dat1_py6 {
+ nvidia,pins = "sdmmc1_dat1_py6";
+ nvidia,function = "sdmmc1";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- lid_open {
- nvidia,pins = "kb_row4_pr4";
- nvidia,function = "rsvd3";
+ sdmmc1_dat0_py7 {
+ nvidia,pins = "sdmmc1_dat0_py7";
+ nvidia,function = "sdmmc1";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- en_vdd_sd {
- nvidia,pins = "kb_row0_pr0";
- nvidia,function = "rsvd4";
+ sdmmc1_clk_pz0 {
+ nvidia,pins = "sdmmc1_clk_pz0";
+ nvidia,function = "sdmmc1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- ac_ok {
- nvidia,pins = "pj0";
- nvidia,function = "gmi";
+ sdmmc1_cmd_pz1 {
+ nvidia,pins = "sdmmc1_cmd_pz1";
+ nvidia,function = "sdmmc1";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- sensor_irq_l {
- nvidia,pins = "pi6";
- nvidia,function = "gmi";
+ pwr_i2c_scl_pz6 {
+ nvidia,pins = "pwr_i2c_scl_pz6";
+ nvidia,function = "i2cpwr";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
};
- wifi_en {
- nvidia,pins = "gpio_x7_aud_px7";
- nvidia,function = "rsvd4";
+ pwr_i2c_sda_pz7 {
+ nvidia,pins = "pwr_i2c_sda_pz7";
+ nvidia,function = "i2cpwr";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat0_paa0 {
+ nvidia,pins = "sdmmc4_dat0_paa0";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat1_paa1 {
+ nvidia,pins = "sdmmc4_dat1_paa1";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat2_paa2 {
+ nvidia,pins = "sdmmc4_dat2_paa2";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat3_paa3 {
+ nvidia,pins = "sdmmc4_dat3_paa3";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat4_paa4 {
+ nvidia,pins = "sdmmc4_dat4_paa4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat5_paa5 {
+ nvidia,pins = "sdmmc4_dat5_paa5";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat6_paa6 {
+ nvidia,pins = "sdmmc4_dat6_paa6";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat7_paa7 {
+ nvidia,pins = "sdmmc4_dat7_paa7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pbb0 {
+ nvidia,pins = "pbb0";
+ nvidia,function = "vgp6";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- en_vdd_bl {
- nvidia,pins = "dap3_dout_pp2";
- nvidia,function = "i2s2";
+ cam_i2c_scl_pbb1 {
+ nvidia,pins = "cam_i2c_scl_pbb1";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam_i2c_sda_pbb2 {
+ nvidia,pins = "cam_i2c_sda_pbb2";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pbb3 {
+ nvidia,pins = "pbb3";
+ nvidia,function = "vgp3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb4 {
+ nvidia,pins = "pbb4";
+ nvidia,function = "vgp4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb5 {
+ nvidia,pins = "pbb5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb6 {
+ nvidia,pins = "pbb6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb7 {
+ nvidia,pins = "pbb7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ cam_mclk_pcc0 {
+ nvidia,pins = "cam_mclk_pcc0";
+ nvidia,function = "vi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc1 {
+ nvidia,pins = "pcc1";
+ nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc2 {
+ nvidia,pins = "pcc2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc4_clk_pcc4 {
+ nvidia,pins = "sdmmc4_clk_pcc4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_req_pcc5 {
+ nvidia,pins = "clk2_req_pcc5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- en_vdd_hdmi {
- nvidia,pins = "spdif_in_pk6";
- nvidia,function = "spdif";
+ pex_l0_rst_n_pdd1 {
+ nvidia,pins = "pex_l0_rst_n_pdd1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l0_clkreq_n_pdd2 {
+ nvidia,pins = "pex_l0_clkreq_n_pdd2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_wake_n_pdd3 {
+ nvidia,pins = "pex_wake_n_pdd3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l1_rst_n_pdd5 {
+ nvidia,pins = "pex_l1_rst_n_pdd5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l1_clkreq_n_pdd6 {
+ nvidia,pins = "pex_l1_clkreq_n_pdd6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_out_pee0 {
+ nvidia,pins = "clk3_out_pee0";
+ nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_req_pee1 {
+ nvidia,pins = "clk3_req_pee1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap_mclk1_req_pee2 {
+ nvidia,pins = "dap_mclk1_req_pee2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ hdmi_cec_pee3 {
+ nvidia,pins = "hdmi_cec_pee3";
+ nvidia,function = "cec";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_clk_lb_out_pee4 {
+ nvidia,pins = "sdmmc3_clk_lb_out_pee4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- soc_warm_reset_l {
- nvidia,pins = "pi5";
- nvidia,function = "gmi";
+ sdmmc3_clk_lb_in_pee5 {
+ nvidia,pins = "sdmmc3_clk_lb_in_pee5";
+ nvidia,function = "sdmmc3";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- hp_det_l {
- nvidia,pins = "pi7";
- nvidia,function = "rsvd1";
+ dp_hpd_pff0 {
+ nvidia,pins = "dp_hpd_pff0";
+ nvidia,function = "dp";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- mic_det_l {
- nvidia,pins = "kb_row7_pr7";
+ usb_vbus_en2_pff1 {
+ nvidia,pins = "usb_vbus_en2_pff1";
nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pff2 {
+ nvidia,pins = "pff2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ core_pwr_req {
+ nvidia,pins = "core_pwr_req";
+ nvidia,function = "pwron";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ cpu_pwr_req {
+ nvidia,pins = "cpu_pwr_req";
+ nvidia,function = "cpu";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pwr_int_n {
+ nvidia,pins = "pwr_int_n";
+ nvidia,function = "pmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- };
- };
-
- serial@0,70006000 {
- /* Debug connector on the bottom of the board near SD card. */
- status = "okay";
- };
-
- pwm@0,7000a000 {
- status = "okay";
- };
-
- i2c@0,7000c000 {
- status = "okay";
- clock-frequency = <100000>;
-
- acodec: audio-codec@10 {
- compatible = "maxim,max98090";
- reg = <0x10>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA_GPIO(H, 4) GPIO_ACTIVE_HIGH>;
- };
-
- temperature-sensor@4c {
- compatible = "ti,tmp451";
- reg = <0x4c>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA_GPIO(I, 6) IRQ_TYPE_LEVEL_LOW>;
-
- #thermal-sensor-cells = <1>;
- };
- };
-
- i2c@0,7000c400 {
- status = "okay";
- clock-frequency = <100000>;
- };
-
- i2c@0,7000c500 {
- status = "okay";
- clock-frequency = <400000>;
-
- tpm@20 {
- compatible = "infineon,slb9645tt";
- reg = <0x20>;
- };
- };
-
- hdmi_ddc: i2c@0,7000c700 {
- status = "okay";
- clock-frequency = <100000>;
- };
-
- i2c@0,7000d000 {
- status = "okay";
- clock-frequency = <400000>;
-
- pmic: pmic@40 {
- compatible = "ams,as3722";
- reg = <0x40>;
- interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
-
- ams,system-power-controller;
-
- #interrupt-cells = <2>;
- interrupt-controller;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&as3722_default>;
-
- as3722_default: pinmux {
- gpio0 {
- pins = "gpio0";
- function = "gpio";
- bias-pull-down;
- };
-
- gpio1 {
- pins = "gpio1";
- function = "gpio";
- bias-pull-up;
- };
-
- gpio2_4_7 {
- pins = "gpio2", "gpio4", "gpio7";
- function = "gpio";
- bias-pull-up;
- };
-
- gpio3_6 {
- pins = "gpio3", "gpio6";
- bias-high-impedance;
- };
-
- gpio5 {
- pins = "gpio5";
- function = "clk32k-out";
- bias-pull-down;
- };
+ reset_out_n {
+ nvidia,pins = "reset_out_n";
+ nvidia,function = "reset_out_n";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
-
- regulators {
- vsup-sd2-supply = <&vdd_5v0_sys>;
- vsup-sd3-supply = <&vdd_5v0_sys>;
- vsup-sd4-supply = <&vdd_5v0_sys>;
- vsup-sd5-supply = <&vdd_5v0_sys>;
- vin-ldo0-supply = <&vdd_1v35_lp0>;
- vin-ldo1-6-supply = <&vdd_3v3_run>;
- vin-ldo2-5-7-supply = <&vddio_1v8>;
- vin-ldo3-4-supply = <&vdd_3v3_sys>;
- vin-ldo9-10-supply = <&vdd_5v0_sys>;
- vin-ldo11-supply = <&vdd_3v3_run>;
-
- sd0 {
- regulator-name = "+VDD_CPU_AP";
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1350000>;
- regulator-min-microamp = <3500000>;
- regulator-max-microamp = <3500000>;
- regulator-always-on;
- regulator-boot-on;
- ams,ext-control = <2>;
- };
-
- sd1 {
- regulator-name = "+VDD_CORE";
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1350000>;
- regulator-min-microamp = <2500000>;
- regulator-max-microamp = <4000000>;
- regulator-always-on;
- regulator-boot-on;
- ams,ext-control = <1>;
- };
-
- vdd_1v35_lp0: sd2 {
- regulator-name = "+1.35V_LP0(sd2)";
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <1350000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- sd3 {
- regulator-name = "+1.35V_LP0(sd3)";
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <1350000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_1v05_run: sd4 {
- regulator-name = "+1.05V_RUN";
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
- };
-
- vddio_1v8: sd5 {
- regulator-name = "+1.8V_VDDIO";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- sd6 {
- regulator-name = "+VDD_GPU_AP";
- regulator-min-microvolt = <650000>;
- regulator-max-microvolt = <1200000>;
- regulator-min-microamp = <3500000>;
- regulator-max-microamp = <3500000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- ldo0 {
- regulator-name = "+1.05V_RUN_AVDD";
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
- regulator-boot-on;
- regulator-always-on;
- ams,ext-control = <1>;
- };
-
- ldo1 {
- regulator-name = "+1.8V_RUN_CAM";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- ldo2 {
- regulator-name = "+1.2V_GEN_AVDD";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- ldo3 {
- regulator-name = "+1.00V_LP0_VDD_RTC";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
- regulator-boot-on;
- regulator-always-on;
- ams,enable-tracking;
- };
-
- vdd_run_cam: ldo4 {
- regulator-name = "+3.3V_RUN_CAM";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- };
-
- ldo5 {
- regulator-name = "+1.2V_RUN_CAM_FRONT";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
-
- vddio_sdmmc3: ldo6 {
- regulator-name = "+VDDIO_SDMMC3";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- };
-
- ldo7 {
- regulator-name = "+1.05V_RUN_CAM_REAR";
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
- };
-
- ldo9 {
- regulator-name = "+2.8V_RUN_TOUCH";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- };
-
- ldo10 {
- regulator-name = "+2.8V_RUN_CAM_AF";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- };
-
- ldo11 {
- regulator-name = "+1.8V_RUN_VPP_FUSE";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
+ owr {
+ nvidia,pins = "owr";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
};
- };
- };
-
- spi@0,7000d400 {
- status = "okay";
-
- cros_ec: cros-ec@0 {
- compatible = "google,cros-ec-spi";
- spi-max-frequency = <3000000>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA_GPIO(C, 7) IRQ_TYPE_LEVEL_LOW>;
- reg = <0>;
-
- google,cros-ec-spi-msg-delay = <2000>;
-
- i2c-tunnel {
- compatible = "google,cros-ec-i2c-tunnel";
- #address-cells = <1>;
- #size-cells = <0>;
-
- google,remote-bus = <0>;
-
- charger: bq24735@9 {
- compatible = "ti,bq24735";
- reg = <0x9>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA_GPIO(J, 0)
- GPIO_ACTIVE_HIGH>;
- ti,ac-detect-gpios = <&gpio
- TEGRA_GPIO(J, 0)
- GPIO_ACTIVE_HIGH>;
- };
-
- battery: sbs-battery@b {
- compatible = "sbs,sbs-battery";
- reg = <0xb>;
- sbs,i2c-retry-count = <2>;
- sbs,poll-retry-count = <10>;
- power-supplies = <&charger>;
- };
+ clk_32k_in {
+ nvidia,pins = "clk_32k_in";
+ nvidia,function = "clk";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ jtag_rtck {
+ nvidia,pins = "jtag_rtck";
+ nvidia,function = "rtck";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
};
};
-
- spi@0,7000da00 {
- status = "okay";
- spi-max-frequency = <25000000>;
-
- flash@0 {
- compatible = "winbond,w25q32dw";
- reg = <0>;
- };
- };
-
- pmc@0,7000e400 {
- nvidia,invert-interrupt;
- nvidia,suspend-mode = <0>;
- nvidia,cpu-pwr-good-time = <500>;
- nvidia,cpu-pwr-off-time = <300>;
- nvidia,core-pwr-good-time = <641 3845>;
- nvidia,core-pwr-off-time = <61036>;
- nvidia,core-power-req-active-high;
- nvidia,sys-clock-req-active-high;
- };
-
- hda@0,70030000 {
- status = "okay";
- };
-
- sdhci@0,700b0000 { /* WiFi/BT on this bus */
- status = "okay";
- power-gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_HIGH>;
- bus-width = <4>;
- no-1-8-v;
- non-removable;
- };
-
- sdhci@0,700b0400 { /* SD Card on this bus */
- status = "okay";
- cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
- power-gpios = <&gpio TEGRA_GPIO(R, 0) GPIO_ACTIVE_HIGH>;
- wp-gpios = <&gpio TEGRA_GPIO(Q, 4) GPIO_ACTIVE_LOW>;
- bus-width = <4>;
- no-1-8-v;
- vqmmc-supply = <&vddio_sdmmc3>;
- };
-
- sdhci@0,700b0600 { /* eMMC on this bus */
- status = "okay";
- bus-width = <8>;
- no-1-8-v;
- non-removable;
- };
-
- ahub@0,70300000 {
- i2s@0,70301100 {
- status = "okay";
- };
- };
-
- usb@0,7d000000 { /* Rear external USB port. */
- status = "okay";
- };
-
- usb-phy@0,7d000000 {
- status = "okay";
- vbus-supply = <&vdd_usb1_vbus>;
- };
-
- usb@0,7d004000 { /* Internal webcam. */
- status = "okay";
- };
-
- usb-phy@0,7d004000 {
- status = "okay";
- vbus-supply = <&vdd_run_cam>;
- };
-
- usb@0,7d008000 { /* Left external USB port. */
- status = "okay";
- };
-
- usb-phy@0,7d008000 {
- status = "okay";
- vbus-supply = <&vdd_usb3_vbus>;
- };
-
- backlight: backlight {
- compatible = "pwm-backlight";
-
- enable-gpios = <&gpio TEGRA_GPIO(H, 2) GPIO_ACTIVE_HIGH>;
- power-supply = <&vdd_led>;
- pwms = <&pwm 1 1000000>;
-
- default-brightness-level = <224>;
- brightness-levels =
- < 0 1 2 3 4 5 6 7
- 8 9 10 11 12 13 14 15
- 16 17 18 19 20 21 22 23
- 24 25 26 27 28 29 30 31
- 32 33 34 35 36 37 38 39
- 40 41 42 43 44 45 46 47
- 48 49 50 51 52 53 54 55
- 56 57 58 59 60 61 62 63
- 64 65 66 67 68 69 70 71
- 72 73 74 75 76 77 78 79
- 80 81 82 83 84 85 86 87
- 88 89 90 91 92 93 94 95
- 96 97 98 99 100 101 102 103
- 104 105 106 107 108 109 110 111
- 112 113 114 115 116 117 118 119
- 120 121 122 123 124 125 126 127
- 128 129 130 131 132 133 134 135
- 136 137 138 139 140 141 142 143
- 144 145 146 147 148 149 150 151
- 152 153 154 155 156 157 158 159
- 160 161 162 163 164 165 166 167
- 168 169 170 171 172 173 174 175
- 176 177 178 179 180 181 182 183
- 184 185 186 187 188 189 190 191
- 192 193 194 195 196 197 198 199
- 200 201 202 203 204 205 206 207
- 208 209 210 211 212 213 214 215
- 216 217 218 219 220 221 222 223
- 224 225 226 227 228 229 230 231
- 232 233 234 235 236 237 238 239
- 240 241 242 243 244 245 246 247
- 248 249 250 251 252 253 254 255
- 256>;
- };
-
- clocks {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- clk32k_in: clock@0 {
- compatible = "fixed-clock";
- reg = <0>;
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
- };
-
- gpio-keys {
- compatible = "gpio-keys";
-
- lid {
- label = "Lid";
- gpios = <&gpio TEGRA_GPIO(R, 4) GPIO_ACTIVE_LOW>;
- linux,input-type = <5>;
- linux,code = <KEY_RESERVED>;
- debounce-interval = <1>;
- gpio-key,wakeup;
- };
-
- power {
- label = "Power";
- gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
- linux,code = <KEY_POWER>;
- debounce-interval = <30>;
- gpio-key,wakeup;
- };
- };
-
- panel: panel {
- compatible = "auo,b133xtn01";
-
- backlight = <&backlight>;
- ddc-i2c-bus = <&dpaux>;
- };
-
- regulators {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- vdd_mux: regulator@0 {
- compatible = "regulator-fixed";
- reg = <0>;
- regulator-name = "+VDD_MUX";
- regulator-min-microvolt = <12000000>;
- regulator-max-microvolt = <12000000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_5v0_sys: regulator@1 {
- compatible = "regulator-fixed";
- reg = <1>;
- regulator-name = "+5V_SYS";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- regulator-always-on;
- regulator-boot-on;
- vin-supply = <&vdd_mux>;
- };
-
- vdd_3v3_sys: regulator@2 {
- compatible = "regulator-fixed";
- reg = <2>;
- regulator-name = "+3.3V_SYS";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- vin-supply = <&vdd_mux>;
- };
-
- vdd_3v3_run: regulator@3 {
- compatible = "regulator-fixed";
- reg = <3>;
- regulator-name = "+3.3V_RUN";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- gpio = <&pmic 1 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vdd_3v3_sys>;
- };
-
- vdd_3v3_hdmi: regulator@4 {
- compatible = "regulator-fixed";
- reg = <4>;
- regulator-name = "+3.3V_AVDD_HDMI_AP_GATED";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- vin-supply = <&vdd_3v3_run>;
- };
-
- vdd_led: regulator@5 {
- compatible = "regulator-fixed";
- reg = <5>;
- regulator-name = "+VDD_LED";
- gpio = <&gpio TEGRA_GPIO(P, 2) GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vdd_mux>;
- };
-
- vdd_5v0_ts: regulator@6 {
- compatible = "regulator-fixed";
- reg = <6>;
- regulator-name = "+5V_VDD_TS_SW";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- regulator-boot-on;
- gpio = <&gpio TEGRA_GPIO(K, 1) GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vdd_5v0_sys>;
- };
-
- vdd_usb1_vbus: regulator@7 {
- compatible = "regulator-fixed";
- reg = <7>;
- regulator-name = "+5V_USB_HS";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio TEGRA_GPIO(N, 4) GPIO_ACTIVE_HIGH>;
- enable-active-high;
- gpio-open-drain;
- vin-supply = <&vdd_5v0_sys>;
- };
-
- vdd_usb3_vbus: regulator@8 {
- compatible = "regulator-fixed";
- reg = <8>;
- regulator-name = "+5V_USB_SS";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio TEGRA_GPIO(N, 5) GPIO_ACTIVE_HIGH>;
- enable-active-high;
- gpio-open-drain;
- vin-supply = <&vdd_5v0_sys>;
- };
-
- vdd_3v3_panel: regulator@9 {
- compatible = "regulator-fixed";
- reg = <9>;
- regulator-name = "+3.3V_PANEL";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&pmic 4 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vdd_3v3_run>;
- };
-
- vdd_3v3_lp0: regulator@10 {
- compatible = "regulator-fixed";
- reg = <10>;
- regulator-name = "+3.3V_LP0";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- /*
- * TODO: find a way to wire this up with the USB EHCI
- * controllers so that it can be enabled on demand.
- */
- regulator-always-on;
- gpio = <&pmic 2 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vdd_3v3_sys>;
- };
-
- vdd_hdmi_pll: regulator@11 {
- compatible = "regulator-fixed";
- reg = <11>;
- regulator-name = "+1.05V_RUN_AVDD_HDMI_PLL";
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
- gpio = <&gpio TEGRA_GPIO(H, 7) GPIO_ACTIVE_LOW>;
- vin-supply = <&vdd_1v05_run>;
- };
-
- vdd_5v0_hdmi: regulator@12 {
- compatible = "regulator-fixed";
- reg = <12>;
- regulator-name = "+5V_HDMI_CON";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio TEGRA_GPIO(K, 6) GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vdd_5v0_sys>;
- };
- };
-
- sound {
- compatible = "nvidia,tegra-audio-max98090-nyan-big",
- "nvidia,tegra-audio-max98090";
- nvidia,model = "Acer Chromebook 13";
-
- nvidia,audio-routing =
- "Headphones", "HPR",
- "Headphones", "HPL",
- "Speakers", "SPKR",
- "Speakers", "SPKL",
- "Mic Jack", "MICBIAS",
- "DMICL", "Int Mic",
- "DMICR", "Int Mic",
- "IN34", "Mic Jack";
-
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,audio-codec = <&acodec>;
-
- clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
- <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA124_CLK_EXTERN1>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-
- nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(I, 7) GPIO_ACTIVE_HIGH>;
- nvidia,mic-det-gpios =
- <&gpio TEGRA_GPIO(R, 7) GPIO_ACTIVE_HIGH>;
- };
};
-
-#include "cros-ec-keyboard.dtsi"
diff --git a/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi b/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi
new file mode 100644
index 000000000000..9ecd108f56cf
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi
@@ -0,0 +1,2049 @@
+/ {
+ clock@0,60006000 {
+ emc-timings-1 {
+ nvidia,ram-code = <1>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-20400000 {
+ clock-frequency = <20400000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-40800000 {
+ clock-frequency = <40800000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-68000000 {
+ clock-frequency = <68000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-102000000 {
+ clock-frequency = <102000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-204000000 {
+ clock-frequency = <204000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-300000000 {
+ clock-frequency = <300000000>;
+ nvidia,parent-clock-frequency = <600000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_C>;
+ clock-names = "emc-parent";
+ };
+ timing-396000000 {
+ clock-frequency = <396000000>;
+ nvidia,parent-clock-frequency = <792000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M>;
+ clock-names = "emc-parent";
+ };
+ /* TODO: Add 528MHz frequency */
+ timing-600000000 {
+ clock-frequency = <600000000>;
+ nvidia,parent-clock-frequency = <600000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_C_UD>;
+ clock-names = "emc-parent";
+ };
+ timing-792000000 {
+ clock-frequency = <792000000>;
+ nvidia,parent-clock-frequency = <792000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M_UD>;
+ clock-names = "emc-parent";
+ };
+ };
+ };
+
+ emc@0,7001b000 {
+ emc-timings-1 {
+ nvidia,ram-code = <1>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000060
+ 0x00000000
+ 0x00000018
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000007
+ 0x0000000f
+ 0x00000005
+ 0x00000005
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000064
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000007
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x800001c5
+ 0x0000000a
+ >;
+ };
+
+ timing-20400000 {
+ clock-frequency = <20400000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000000
+ 0x00000005
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x0000009a
+ 0x00000000
+ 0x00000026
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000007
+ 0x0000000f
+ 0x00000006
+ 0x00000006
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x000000a0
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x0000000b
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x8000023a
+ 0x0000000a
+ >;
+ };
+
+ timing-40800000 {
+ clock-frequency = <40800000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000001
+ 0x0000000a
+ 0x00000000
+ 0x00000001
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000134
+ 0x00000000
+ 0x0000004d
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000008
+ 0x0000000f
+ 0x0000000c
+ 0x0000000c
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x0000013f
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000015
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x80000370
+ 0x0000000a
+ >;
+ };
+
+ timing-68000000 {
+ clock-frequency = <68000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000003
+ 0x00000011
+ 0x00000000
+ 0x00000002
+ 0x00000000
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000202
+ 0x00000000
+ 0x00000080
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x0000000f
+ 0x0000000f
+ 0x00000013
+ 0x00000013
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000001
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000213
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000022
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x8000050e
+ 0x0000000a
+ >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000004
+ 0x0000001a
+ 0x00000000
+ 0x00000003
+ 0x00000001
+ 0x00000004
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000001
+ 0x00000001
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x0000000c
+ 0x0000000d
+ 0x0000000f
+ 0x00000304
+ 0x00000000
+ 0x000000c1
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000018
+ 0x0000000f
+ 0x0000001c
+ 0x0000001c
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000003
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x0000031c
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x000fc000
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x0000fc00
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000033
+ 0x00000000
+ 0x00000042
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000f2f3
+ 0x80000713
+ 0x0000000a
+ >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x0000088d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000c000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000009
+ 0x00000035
+ 0x00000000
+ 0x00000007
+ 0x00000002
+ 0x00000005
+ 0x0000000a
+ 0x00000003
+ 0x0000000b
+ 0x00000002
+ 0x00000002
+ 0x00000003
+ 0x00000003
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000006
+ 0x00000002
+ 0x00000000
+ 0x00000004
+ 0x00000006
+ 0x00010000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000003
+ 0x0000000d
+ 0x0000000f
+ 0x00000011
+ 0x00000607
+ 0x00000000
+ 0x00000181
+ 0x00000002
+ 0x00000002
+ 0x00000001
+ 0x00000000
+ 0x00000032
+ 0x0000000f
+ 0x00000038
+ 0x00000038
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000007
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000638
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x106aa298
+ 0x002c00a0
+ 0x00008000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00064000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x0000c000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00090000
+ 0x00090000
+ 0x00090000
+ 0x00090000
+ 0x00009000
+ 0x00009000
+ 0x00009000
+ 0x00009000
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000505
+ 0x81f1f108
+ 0x07070004
+ 0x0000003f
+ 0x016eeeee
+ 0x51451400
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000066
+ 0x00000000
+ 0x00000100
+ 0x000c000c
+ 0x00000000
+ 0x00000003
+ 0x0000d2b3
+ 0x80000d22
+ 0x0000000a
+ >;
+ };
+
+ timing-300000000 {
+ clock-frequency = <300000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73340000>;
+ nvidia,emc-cfg-2 = <0x000008d5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200000>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000321>;
+ nvidia,emc-mrs-wait-cnt = <0x0174000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x01231339>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000000d
+ 0x0000004c
+ 0x00000000
+ 0x00000009
+ 0x00000003
+ 0x00000004
+ 0x00000008
+ 0x00000002
+ 0x00000009
+ 0x00000003
+ 0x00000003
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000005
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000007
+ 0x00020000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000001
+ 0x0000000e
+ 0x00000010
+ 0x00000012
+ 0x000008e4
+ 0x00000000
+ 0x00000239
+ 0x00000001
+ 0x00000008
+ 0x00000001
+ 0x00000000
+ 0x0000004a
+ 0x0000000e
+ 0x00000051
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000009
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000924
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0x002c00a0
+ 0x00008000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00090000
+ 0x00090000
+ 0x00000000
+ 0x00090000
+ 0x00090000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00060000
+ 0x00060000
+ 0x00060000
+ 0x00060000
+ 0x00006000
+ 0x00006000
+ 0x00006000
+ 0x00006000
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000202
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x00000096
+ 0x00000000
+ 0x00000100
+ 0x0174000c
+ 0x00000000
+ 0x00000003
+ 0x000052a3
+ 0x800012d7
+ 0x00000009
+ >;
+ };
+
+ timing-396000000 {
+ clock-frequency = <396000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73340000>;
+ nvidia,emc-cfg-2 = <0x00000895>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200000>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000521>;
+ nvidia,emc-mrs-wait-cnt = <0x015b000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x01231339>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000012
+ 0x00000065
+ 0x00000000
+ 0x0000000c
+ 0x00000004
+ 0x00000005
+ 0x00000008
+ 0x00000002
+ 0x0000000a
+ 0x00000004
+ 0x00000004
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x00000003
+ 0x00000005
+ 0x00000002
+ 0x00000000
+ 0x00000001
+ 0x00000008
+ 0x00020000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0000000f
+ 0x00000010
+ 0x00000012
+ 0x00000bd1
+ 0x00000000
+ 0x000002f4
+ 0x00000001
+ 0x00000008
+ 0x00000001
+ 0x00000000
+ 0x00000063
+ 0x0000000f
+ 0x0000006b
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x0000000d
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x00000c11
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0x002c00a0
+ 0x00008000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00030000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00068000
+ 0x00068000
+ 0x00000000
+ 0x00068000
+ 0x00068000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00058000
+ 0x00058000
+ 0x00058000
+ 0x00058000
+ 0x00005800
+ 0x00005800
+ 0x00005800
+ 0x00005800
+ 0x10000280
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc081
+ 0x00000202
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0000003f
+ 0x000000c6
+ 0x00000000
+ 0x00000100
+ 0x015b000c
+ 0x00000000
+ 0x00000003
+ 0x000052a3
+ 0x8000188b
+ 0x00000009
+ >;
+ };
+
+ timing-600000000 {
+ clock-frequency = <600000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200010>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000b61>;
+ nvidia,emc-mrs-wait-cnt = <0x0128000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0121113d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000001c
+ 0x0000009a
+ 0x00000000
+ 0x00000013
+ 0x00000007
+ 0x00000007
+ 0x0000000b
+ 0x00000003
+ 0x00000010
+ 0x00000007
+ 0x00000007
+ 0x00000002
+ 0x00000002
+ 0x00000000
+ 0x00000005
+ 0x00000005
+ 0x0000000a
+ 0x00000002
+ 0x00000000
+ 0x00000003
+ 0x0000000b
+ 0x00070000
+ 0x00000003
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000002
+ 0x00000012
+ 0x00000016
+ 0x00000018
+ 0x00001208
+ 0x00000000
+ 0x00000482
+ 0x00000002
+ 0x0000000d
+ 0x00000001
+ 0x00000000
+ 0x00000096
+ 0x00000015
+ 0x000000a2
+ 0x00000200
+ 0x00000004
+ 0x00000005
+ 0x00000004
+ 0x00000015
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x00001248
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0xe00e00b1
+ 0x00008000
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x0000000a
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00040000
+ 0x00040000
+ 0x00000000
+ 0x00040000
+ 0x00040000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000004
+ 0x00000004
+ 0x00000001
+ 0x00000005
+ 0x00000007
+ 0x00000004
+ 0x00000006
+ 0x00000007
+ 0x00000004
+ 0x00000004
+ 0x00000001
+ 0x00000005
+ 0x00000007
+ 0x00000004
+ 0x00000006
+ 0x00000007
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x100002a0
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc085
+ 0x00000202
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x51451420
+ 0x00514514
+ 0x00514514
+ 0x51451400
+ 0x0606003f
+ 0x00000000
+ 0x00000000
+ 0x00000100
+ 0x0128000c
+ 0x00000000
+ 0x00000003
+ 0x000040a0
+ 0x800024a9
+ 0x0000000e
+ >;
+ };
+
+ timing-792000000 {
+ clock-frequency = <792000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200018>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000d71>;
+ nvidia,emc-mrs-wait-cnt = <0x00f8000c>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040000>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0120113d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000025
+ 0x000000cc
+ 0x00000000
+ 0x0000001a
+ 0x00000009
+ 0x00000008
+ 0x0000000d
+ 0x00000004
+ 0x00000013
+ 0x00000009
+ 0x00000009
+ 0x00000003
+ 0x00000002
+ 0x00000000
+ 0x00000006
+ 0x00000006
+ 0x0000000b
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x0000000d
+ 0x00080000
+ 0x00000004
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000001
+ 0x00000014
+ 0x00000018
+ 0x0000001a
+ 0x000017e2
+ 0x00000000
+ 0x000005f8
+ 0x00000003
+ 0x00000011
+ 0x00000001
+ 0x00000000
+ 0x000000c6
+ 0x00000018
+ 0x000000d6
+ 0x00000200
+ 0x00000005
+ 0x00000006
+ 0x00000005
+ 0x0000001d
+ 0x00000000
+ 0x00000008
+ 0x00000008
+ 0x00001822
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x104ab098
+ 0xe00700b1
+ 0x00008000
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000008
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x0002c000
+ 0x0002c000
+ 0x00000000
+ 0x0002c000
+ 0x0002c000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000000
+ 0x00000008
+ 0x00000008
+ 0x00000005
+ 0x00000008
+ 0x0000000a
+ 0x00000008
+ 0x0000000a
+ 0x0000000a
+ 0x00000008
+ 0x00000008
+ 0x00000005
+ 0x00000008
+ 0x0000000a
+ 0x00000008
+ 0x0000000a
+ 0x0000000a
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x0000000e
+ 0x100002a0
+ 0x00000000
+ 0x00111111
+ 0x00000000
+ 0x00000000
+ 0x77ffc085
+ 0x00000202
+ 0x81f1f108
+ 0x07070004
+ 0x00000000
+ 0x016eeeee
+ 0x61861820
+ 0x00492492
+ 0x00492492
+ 0x61861800
+ 0x0606003f
+ 0x00000000
+ 0x00000000
+ 0x00000100
+ 0x00f8000c
+ 0x00000000
+ 0x00000004
+ 0x00004080
+ 0x80003012
+ 0x0000000f
+ >;
+ };
+
+ };
+ };
+
+ memory-controller@0,70019000 {
+ emc-timings-1 {
+ nvidia,ram-code = <1>;
+
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+
+ nvidia,emem-configuration = <
+ 0x40040001
+ 0x8000000a
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0402
+ 0x77e30303
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-20400000 {
+ clock-frequency = <20400000>;
+
+ nvidia,emem-configuration = <
+ 0x40020001
+ 0x80000012
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0402
+ 0x76230303
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-40800000 {
+ clock-frequency = <40800000>;
+
+ nvidia,emem-configuration = <
+ 0xa0000001
+ 0x80000017
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0402
+ 0x74a30303
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-68000000 {
+ clock-frequency = <68000000>;
+
+ nvidia,emem-configuration = <
+ 0x00000001
+ 0x8000001e
+ 0x00000001
+ 0x00000001
+ 0x00000002
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0402
+ 0x74230403
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emem-configuration = <
+ 0x08000001
+ 0x80000026
+ 0x00000001
+ 0x00000001
+ 0x00000003
+ 0x00000000
+ 0x00000002
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000003
+ 0x00000006
+ 0x06030203
+ 0x000a0403
+ 0x73c30504
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emem-configuration = <
+ 0x01000003
+ 0x80000040
+ 0x00000001
+ 0x00000001
+ 0x00000005
+ 0x00000002
+ 0x00000004
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000003
+ 0x00000002
+ 0x00000004
+ 0x00000006
+ 0x06040203
+ 0x000a0405
+ 0x73840a06
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-300000000 {
+ clock-frequency = <300000000>;
+
+ nvidia,emem-configuration = <
+ 0x08000004
+ 0x80000040
+ 0x00000001
+ 0x00000002
+ 0x00000007
+ 0x00000004
+ 0x00000005
+ 0x00000001
+ 0x00000002
+ 0x00000007
+ 0x00000002
+ 0x00000002
+ 0x00000004
+ 0x00000006
+ 0x06040202
+ 0x000b0607
+ 0x77450e08
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-396000000 {
+ clock-frequency = <396000000>;
+
+ nvidia,emem-configuration = <
+ 0x0f000005
+ 0x80000040
+ 0x00000001
+ 0x00000002
+ 0x00000009
+ 0x00000005
+ 0x00000007
+ 0x00000001
+ 0x00000002
+ 0x00000008
+ 0x00000002
+ 0x00000002
+ 0x00000004
+ 0x00000006
+ 0x06040202
+ 0x000d0709
+ 0x7586120a
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-528000000 {
+ clock-frequency = <528000000>;
+
+ nvidia,emem-configuration = <
+ 0x0f000007
+ 0x80000040
+ 0x00000002
+ 0x00000003
+ 0x0000000d
+ 0x00000008
+ 0x0000000a
+ 0x00000001
+ 0x00000002
+ 0x00000009
+ 0x00000002
+ 0x00000002
+ 0x00000005
+ 0x00000006
+ 0x06050202
+ 0x0010090d
+ 0x7428180e
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-600000000 {
+ clock-frequency = <600000000>;
+
+ nvidia,emem-configuration = <
+ 0x00000009
+ 0x80000040
+ 0x00000003
+ 0x00000004
+ 0x0000000e
+ 0x00000009
+ 0x0000000b
+ 0x00000001
+ 0x00000003
+ 0x0000000b
+ 0x00000002
+ 0x00000002
+ 0x00000005
+ 0x00000007
+ 0x07050202
+ 0x00130b0e
+ 0x73a91b0f
+ 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-792000000 {
+ clock-frequency = <792000000>;
+
+ nvidia,emem-configuration = <
+ 0x0e00000b
+ 0x80000040
+ 0x00000004
+ 0x00000005
+ 0x00000013
+ 0x0000000c
+ 0x0000000f
+ 0x00000002
+ 0x00000003
+ 0x0000000c
+ 0x00000002
+ 0x00000002
+ 0x00000006
+ 0x00000008
+ 0x08060202
+ 0x00160d13
+ 0x734c2414
+ 0x70000f02
+ 0x001f0000
+ >;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-nyan-blaze.dts b/arch/arm/boot/dts/tegra124-nyan-blaze.dts
new file mode 100644
index 000000000000..0d30c514ffad
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-nyan-blaze.dts
@@ -0,0 +1,1334 @@
+/dts-v1/;
+
+#include "tegra124-nyan.dtsi"
+
+#include "tegra124-nyan-blaze-emc.dtsi"
+
+/ {
+ model = "HP Chromebook 14";
+ compatible = "google,nyan-blaze", "google,nyan", "nvidia,tegra124";
+
+ panel: panel {
+ compatible = "samsung,ltn140at29-301";
+
+ backlight = <&backlight>;
+ ddc-i2c-bus = <&dpaux>;
+ };
+
+ sound {
+ compatible = "nvidia,tegra-audio-max98090-nyan-blaze",
+ "nvidia,tegra-audio-max98090-nyan",
+ "nvidia,tegra-audio-max98090";
+ nvidia,model = "GoogleNyanBlaze";
+ };
+
+ pinmux@0,70000868 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_default>;
+
+ pinmux_default: common {
+ clk_32k_out_pa0 {
+ nvidia,pins = "clk_32k_out_pa0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart3_cts_n_pa1 {
+ nvidia,pins = "uart3_cts_n_pa1";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap2_fs_pa2 {
+ nvidia,pins = "dap2_fs_pa2";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_sclk_pa3 {
+ nvidia,pins = "dap2_sclk_pa3";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_din_pa4 {
+ nvidia,pins = "dap2_din_pa4";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_dout_pa5 {
+ nvidia,pins = "dap2_dout_pa5";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_clk_pa6 {
+ nvidia,pins = "sdmmc3_clk_pa6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_cmd_pa7 {
+ nvidia,pins = "sdmmc3_cmd_pa7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pb0 {
+ nvidia,pins = "pb0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pb1 {
+ nvidia,pins = "pb1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_dat3_pb4 {
+ nvidia,pins = "sdmmc3_dat3_pb4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat2_pb5 {
+ nvidia,pins = "sdmmc3_dat2_pb5";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat1_pb6 {
+ nvidia,pins = "sdmmc3_dat1_pb6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat0_pb7 {
+ nvidia,pins = "sdmmc3_dat0_pb7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart3_rts_n_pc0 {
+ nvidia,pins = "uart3_rts_n_pc0";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_txd_pc2 {
+ nvidia,pins = "uart2_txd_pc2";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rxd_pc3 {
+ nvidia,pins = "uart2_rxd_pc3";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gen1_i2c_scl_pc4 {
+ nvidia,pins = "gen1_i2c_scl_pc4";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ gen1_i2c_sda_pc5 {
+ nvidia,pins = "gen1_i2c_sda_pc5";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ pc7 {
+ nvidia,pins = "pc7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pg0 {
+ nvidia,pins = "pg0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pg1 {
+ nvidia,pins = "pg1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pg2 {
+ nvidia,pins = "pg2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pg3 {
+ nvidia,pins = "pg3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pg4 {
+ nvidia,pins = "pg4";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg5 {
+ nvidia,pins = "pg5";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg6 {
+ nvidia,pins = "pg6";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg7 {
+ nvidia,pins = "pg7";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ph0 {
+ nvidia,pins = "ph0";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph1 {
+ nvidia,pins = "ph1";
+ nvidia,function = "pwm1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph2 {
+ nvidia,pins = "ph2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ph3 {
+ nvidia,pins = "ph3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph4 {
+ nvidia,pins = "ph4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ph5 {
+ nvidia,pins = "ph5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph6 {
+ nvidia,pins = "ph6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ph7 {
+ nvidia,pins = "ph7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi0 {
+ nvidia,pins = "pi0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pi1 {
+ nvidia,pins = "pi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pi2 {
+ nvidia,pins = "pi2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi3 {
+ nvidia,pins = "pi3";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi4 {
+ nvidia,pins = "pi4";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi5 {
+ nvidia,pins = "pi5";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi6 {
+ nvidia,pins = "pi6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pi7 {
+ nvidia,pins = "pi7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pj0 {
+ nvidia,pins = "pj0";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pj2 {
+ nvidia,pins = "pj2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_cts_n_pj5 {
+ nvidia,pins = "uart2_cts_n_pj5";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rts_n_pj6 {
+ nvidia,pins = "uart2_rts_n_pj6";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pj7 {
+ nvidia,pins = "pj7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pk0 {
+ nvidia,pins = "pk0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk1 {
+ nvidia,pins = "pk1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk2 {
+ nvidia,pins = "pk2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pk3 {
+ nvidia,pins = "pk3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk4 {
+ nvidia,pins = "pk4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_out_pk5 {
+ nvidia,pins = "spdif_out_pk5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_in_pk6 {
+ nvidia,pins = "spdif_in_pk6";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk7 {
+ nvidia,pins = "pk7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap1_fs_pn0 {
+ nvidia,pins = "dap1_fs_pn0";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_din_pn1 {
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_dout_pn2 {
+ nvidia,pins = "dap1_dout_pn2";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_sclk_pn3 {
+ nvidia,pins = "dap1_sclk_pn3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ usb_vbus_en0_pn4 {
+ nvidia,pins = "usb_vbus_en0_pn4";
+ nvidia,function = "usb";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ usb_vbus_en1_pn5 {
+ nvidia,pins = "usb_vbus_en1_pn5";
+ nvidia,function = "usb";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ hdmi_int_pn7 {
+ nvidia,pins = "hdmi_int_pn7";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data7_po0 {
+ nvidia,pins = "ulpi_data7_po0";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data0_po1 {
+ nvidia,pins = "ulpi_data0_po1";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data1_po2 {
+ nvidia,pins = "ulpi_data1_po2";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data2_po3 {
+ nvidia,pins = "ulpi_data2_po3";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data3_po4 {
+ nvidia,pins = "ulpi_data3_po4";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data4_po5 {
+ nvidia,pins = "ulpi_data4_po5";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data5_po6 {
+ nvidia,pins = "ulpi_data5_po6";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data6_po7 {
+ nvidia,pins = "ulpi_data6_po7";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_fs_pp0 {
+ nvidia,pins = "dap3_fs_pp0";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_din_pp1 {
+ nvidia,pins = "dap3_din_pp1";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_dout_pp2 {
+ nvidia,pins = "dap3_dout_pp2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_sclk_pp3 {
+ nvidia,pins = "dap3_sclk_pp3";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_fs_pp4 {
+ nvidia,pins = "dap4_fs_pp4";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_din_pp5 {
+ nvidia,pins = "dap4_din_pp5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_dout_pp6 {
+ nvidia,pins = "dap4_dout_pp6";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_sclk_pp7 {
+ nvidia,pins = "dap4_sclk_pp7";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col0_pq0 {
+ nvidia,pins = "kb_col0_pq0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col1_pq1 {
+ nvidia,pins = "kb_col1_pq1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col2_pq2 {
+ nvidia,pins = "kb_col2_pq2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col3_pq3 {
+ nvidia,pins = "kb_col3_pq3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col4_pq4 {
+ nvidia,pins = "kb_col4_pq4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col5_pq5 {
+ nvidia,pins = "kb_col5_pq5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col6_pq6 {
+ nvidia,pins = "kb_col6_pq6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col7_pq7 {
+ nvidia,pins = "kb_col7_pq7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row0_pr0 {
+ nvidia,pins = "kb_row0_pr0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row1_pr1 {
+ nvidia,pins = "kb_row1_pr1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row2_pr2 {
+ nvidia,pins = "kb_row2_pr2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row3_pr3 {
+ nvidia,pins = "kb_row3_pr3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row4_pr4 {
+ nvidia,pins = "kb_row4_pr4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row5_pr5 {
+ nvidia,pins = "kb_row5_pr5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row6_pr6 {
+ nvidia,pins = "kb_row6_pr6";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row7_pr7 {
+ nvidia,pins = "kb_row7_pr7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row8_ps0 {
+ nvidia,pins = "kb_row8_ps0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row9_ps1 {
+ nvidia,pins = "kb_row9_ps1";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row10_ps2 {
+ nvidia,pins = "kb_row10_ps2";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row11_ps3 {
+ nvidia,pins = "kb_row11_ps3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row12_ps4 {
+ nvidia,pins = "kb_row12_ps4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row13_ps5 {
+ nvidia,pins = "kb_row13_ps5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row14_ps6 {
+ nvidia,pins = "kb_row14_ps6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row15_ps7 {
+ nvidia,pins = "kb_row15_ps7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row16_pt0 {
+ nvidia,pins = "kb_row16_pt0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row17_pt1 {
+ nvidia,pins = "kb_row17_pt1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gen2_i2c_scl_pt5 {
+ nvidia,pins = "gen2_i2c_scl_pt5";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ gen2_i2c_sda_pt6 {
+ nvidia,pins = "gen2_i2c_sda_pt6";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_cmd_pt7 {
+ nvidia,pins = "sdmmc4_cmd_pt7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pu0 {
+ nvidia,pins = "pu0";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu1 {
+ nvidia,pins = "pu1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu2 {
+ nvidia,pins = "pu2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu3 {
+ nvidia,pins = "pu3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu4 {
+ nvidia,pins = "pu4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pu5 {
+ nvidia,pins = "pu5";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pu6 {
+ nvidia,pins = "pu6";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pv0 {
+ nvidia,pins = "pv0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pv1 {
+ nvidia,pins = "pv1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_cd_n_pv2 {
+ nvidia,pins = "sdmmc3_cd_n_pv2";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_wp_n_pv3 {
+ nvidia,pins = "sdmmc1_wp_n_pv3";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ddc_scl_pv4 {
+ nvidia,pins = "ddc_scl_pv4";
+ nvidia,function = "i2c4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+ ddc_sda_pv5 {
+ nvidia,pins = "ddc_sda_pv5";
+ nvidia,function = "i2c4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_w2_aud_pw2 {
+ nvidia,pins = "gpio_w2_aud_pw2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_w3_aud_pw3 {
+ nvidia,pins = "gpio_w3_aud_pw3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap_mclk1_pw4 {
+ nvidia,pins = "dap_mclk1_pw4";
+ nvidia,function = "extperiph1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk2_out_pw5 {
+ nvidia,pins = "clk2_out_pw5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_txd_pw6 {
+ nvidia,pins = "uart3_txd_pw6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_rxd_pw7 {
+ nvidia,pins = "uart3_rxd_pw7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dvfs_pwm_px0 {
+ nvidia,pins = "dvfs_pwm_px0";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x1_aud_px1 {
+ nvidia,pins = "gpio_x1_aud_px1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dvfs_clk_px2 {
+ nvidia,pins = "dvfs_clk_px2";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x3_aud_px3 {
+ nvidia,pins = "gpio_x3_aud_px3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x4_aud_px4 {
+ nvidia,pins = "gpio_x4_aud_px4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gpio_x5_aud_px5 {
+ nvidia,pins = "gpio_x5_aud_px5";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x6_aud_px6 {
+ nvidia,pins = "gpio_x6_aud_px6";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x7_aud_px7 {
+ nvidia,pins = "gpio_x7_aud_px7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_clk_py0 {
+ nvidia,pins = "ulpi_clk_py0";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_dir_py1 {
+ nvidia,pins = "ulpi_dir_py1";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_nxt_py2 {
+ nvidia,pins = "ulpi_nxt_py2";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_stp_py3 {
+ nvidia,pins = "ulpi_stp_py3";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc1_dat3_py4 {
+ nvidia,pins = "sdmmc1_dat3_py4";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat2_py5 {
+ nvidia,pins = "sdmmc1_dat2_py5";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat1_py6 {
+ nvidia,pins = "sdmmc1_dat1_py6";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat0_py7 {
+ nvidia,pins = "sdmmc1_dat0_py7";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_clk_pz0 {
+ nvidia,pins = "sdmmc1_clk_pz0";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_cmd_pz1 {
+ nvidia,pins = "sdmmc1_cmd_pz1";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pwr_i2c_scl_pz6 {
+ nvidia,pins = "pwr_i2c_scl_pz6";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ pwr_i2c_sda_pz7 {
+ nvidia,pins = "pwr_i2c_sda_pz7";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat0_paa0 {
+ nvidia,pins = "sdmmc4_dat0_paa0";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat1_paa1 {
+ nvidia,pins = "sdmmc4_dat1_paa1";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat2_paa2 {
+ nvidia,pins = "sdmmc4_dat2_paa2";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat3_paa3 {
+ nvidia,pins = "sdmmc4_dat3_paa3";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat4_paa4 {
+ nvidia,pins = "sdmmc4_dat4_paa4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat5_paa5 {
+ nvidia,pins = "sdmmc4_dat5_paa5";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat6_paa6 {
+ nvidia,pins = "sdmmc4_dat6_paa6";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat7_paa7 {
+ nvidia,pins = "sdmmc4_dat7_paa7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pbb0 {
+ nvidia,pins = "pbb0";
+ nvidia,function = "vgp6";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ cam_i2c_scl_pbb1 {
+ nvidia,pins = "cam_i2c_scl_pbb1";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam_i2c_sda_pbb2 {
+ nvidia,pins = "cam_i2c_sda_pbb2";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pbb3 {
+ nvidia,pins = "pbb3";
+ nvidia,function = "vgp3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb4 {
+ nvidia,pins = "pbb4";
+ nvidia,function = "vgp4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb5 {
+ nvidia,pins = "pbb5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb6 {
+ nvidia,pins = "pbb6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb7 {
+ nvidia,pins = "pbb7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ cam_mclk_pcc0 {
+ nvidia,pins = "cam_mclk_pcc0";
+ nvidia,function = "vi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc1 {
+ nvidia,pins = "pcc1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc2 {
+ nvidia,pins = "pcc2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc4_clk_pcc4 {
+ nvidia,pins = "sdmmc4_clk_pcc4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_req_pcc5 {
+ nvidia,pins = "clk2_req_pcc5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l0_rst_n_pdd1 {
+ nvidia,pins = "pex_l0_rst_n_pdd1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l0_clkreq_n_pdd2 {
+ nvidia,pins = "pex_l0_clkreq_n_pdd2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_wake_n_pdd3 {
+ nvidia,pins = "pex_wake_n_pdd3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l1_rst_n_pdd5 {
+ nvidia,pins = "pex_l1_rst_n_pdd5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l1_clkreq_n_pdd6 {
+ nvidia,pins = "pex_l1_clkreq_n_pdd6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_out_pee0 {
+ nvidia,pins = "clk3_out_pee0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_req_pee1 {
+ nvidia,pins = "clk3_req_pee1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap_mclk1_req_pee2 {
+ nvidia,pins = "dap_mclk1_req_pee2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ hdmi_cec_pee3 {
+ nvidia,pins = "hdmi_cec_pee3";
+ nvidia,function = "cec";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_clk_lb_out_pee4 {
+ nvidia,pins = "sdmmc3_clk_lb_out_pee4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_clk_lb_in_pee5 {
+ nvidia,pins = "sdmmc3_clk_lb_in_pee5";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dp_hpd_pff0 {
+ nvidia,pins = "dp_hpd_pff0";
+ nvidia,function = "dp";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ usb_vbus_en2_pff1 {
+ nvidia,pins = "usb_vbus_en2_pff1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pff2 {
+ nvidia,pins = "pff2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ core_pwr_req {
+ nvidia,pins = "core_pwr_req";
+ nvidia,function = "pwron";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ cpu_pwr_req {
+ nvidia,pins = "cpu_pwr_req";
+ nvidia,function = "cpu";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pwr_int_n {
+ nvidia,pins = "pwr_int_n";
+ nvidia,function = "pmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ reset_out_n {
+ nvidia,pins = "reset_out_n";
+ nvidia,function = "reset_out_n";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ owr {
+ nvidia,pins = "owr";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+ clk_32k_in {
+ nvidia,pins = "clk_32k_in";
+ nvidia,function = "clk";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ jtag_rtck {
+ nvidia,pins = "jtag_rtck";
+ nvidia,function = "rtck";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
new file mode 100644
index 000000000000..a9aec23e06f2
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -0,0 +1,695 @@
+#include <dt-bindings/input/input.h>
+#include "tegra124.dtsi"
+
+/ {
+ aliases {
+ rtc0 = "/i2c@0,7000d000/pmic@40";
+ rtc1 = "/rtc@0,7000e000";
+ serial0 = &uarta;
+ };
+
+ memory {
+ reg = <0x0 0x80000000 0x0 0x80000000>;
+ };
+
+ host1x@0,50000000 {
+ hdmi@0,54280000 {
+ status = "okay";
+
+ vdd-supply = <&vdd_3v3_hdmi>;
+ pll-supply = <&vdd_hdmi_pll>;
+ hdmi-supply = <&vdd_5v0_hdmi>;
+
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ nvidia,hpd-gpio =
+ <&gpio TEGRA_GPIO(N, 7) GPIO_ACTIVE_HIGH>;
+ };
+
+ sor@0,54540000 {
+ status = "okay";
+
+ nvidia,dpaux = <&dpaux>;
+ nvidia,panel = <&panel>;
+ };
+
+ dpaux@0,545c0000 {
+ vdd-supply = <&vdd_3v3_panel>;
+ status = "okay";
+ };
+ };
+
+ serial@0,70006000 {
+ /* Debug connector on the bottom of the board near SD card. */
+ status = "okay";
+ };
+
+ pwm@0,7000a000 {
+ status = "okay";
+ };
+
+ i2c@0,7000c000 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ acodec: audio-codec@10 {
+ compatible = "maxim,max98090";
+ reg = <0x10>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(H, 4) GPIO_ACTIVE_HIGH>;
+ };
+
+ temperature-sensor@4c {
+ compatible = "ti,tmp451";
+ reg = <0x4c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(I, 6) IRQ_TYPE_LEVEL_LOW>;
+
+ #thermal-sensor-cells = <1>;
+ };
+ };
+
+ i2c@0,7000c400 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ trackpad@15 {
+ compatible = "elan,ekth3000";
+ reg = <0x15>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(W, 3) IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ };
+ };
+
+ i2c@0,7000c500 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tpm@20 {
+ compatible = "infineon,slb9645tt";
+ reg = <0x20>;
+ };
+ };
+
+ hdmi_ddc: i2c@0,7000c700 {
+ status = "okay";
+ clock-frequency = <100000>;
+ };
+
+ i2c@0,7000d000 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ pmic: pmic@40 {
+ compatible = "ams,as3722";
+ reg = <0x40>;
+ interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+
+ ams,system-power-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&as3722_default>;
+
+ as3722_default: pinmux {
+ gpio0 {
+ pins = "gpio0";
+ function = "gpio";
+ bias-pull-down;
+ };
+
+ gpio1 {
+ pins = "gpio1";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ gpio2_4_7 {
+ pins = "gpio2", "gpio4", "gpio7";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ gpio3_6 {
+ pins = "gpio3", "gpio6";
+ bias-high-impedance;
+ };
+
+ gpio5 {
+ pins = "gpio5";
+ function = "clk32k-out";
+ bias-pull-down;
+ };
+ };
+
+ regulators {
+ vsup-sd2-supply = <&vdd_5v0_sys>;
+ vsup-sd3-supply = <&vdd_5v0_sys>;
+ vsup-sd4-supply = <&vdd_5v0_sys>;
+ vsup-sd5-supply = <&vdd_5v0_sys>;
+ vin-ldo0-supply = <&vdd_1v35_lp0>;
+ vin-ldo1-6-supply = <&vdd_3v3_run>;
+ vin-ldo2-5-7-supply = <&vddio_1v8>;
+ vin-ldo3-4-supply = <&vdd_3v3_sys>;
+ vin-ldo9-10-supply = <&vdd_5v0_sys>;
+ vin-ldo11-supply = <&vdd_3v3_run>;
+
+ sd0 {
+ regulator-name = "+VDD_CPU_AP";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-min-microamp = <3500000>;
+ regulator-max-microamp = <3500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ams,ext-control = <2>;
+ };
+
+ sd1 {
+ regulator-name = "+VDD_CORE";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-min-microamp = <2500000>;
+ regulator-max-microamp = <4000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ams,ext-control = <1>;
+ };
+
+ vdd_1v35_lp0: sd2 {
+ regulator-name = "+1.35V_LP0(sd2)";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ sd3 {
+ regulator-name = "+1.35V_LP0(sd3)";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v05_run: sd4 {
+ regulator-name = "+1.05V_RUN";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ vddio_1v8: sd5 {
+ regulator-name = "+1.8V_VDDIO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ sd6 {
+ regulator-name = "+VDD_GPU_AP";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-min-microamp = <3500000>;
+ regulator-max-microamp = <3500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo0 {
+ regulator-name = "+1.05V_RUN_AVDD";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-boot-on;
+ regulator-always-on;
+ ams,ext-control = <1>;
+ };
+
+ ldo1 {
+ regulator-name = "+1.8V_RUN_CAM";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo2 {
+ regulator-name = "+1.2V_GEN_AVDD";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3 {
+ regulator-name = "+1.00V_LP0_VDD_RTC";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ ams,enable-tracking;
+ };
+
+ vdd_run_cam: ldo4 {
+ regulator-name = "+3.3V_RUN_CAM";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ ldo5 {
+ regulator-name = "+1.2V_RUN_CAM_FRONT";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ vddio_sdmmc3: ldo6 {
+ regulator-name = "+VDDIO_SDMMC3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo7 {
+ regulator-name = "+1.05V_RUN_CAM_REAR";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ ldo9 {
+ regulator-name = "+2.8V_RUN_TOUCH";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ ldo10 {
+ regulator-name = "+2.8V_RUN_CAM_AF";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ ldo11 {
+ regulator-name = "+1.8V_RUN_VPP_FUSE";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+
+ spi@0,7000d400 {
+ status = "okay";
+
+ cros_ec: cros-ec@0 {
+ compatible = "google,cros-ec-spi";
+ spi-max-frequency = <3000000>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(C, 7) IRQ_TYPE_LEVEL_LOW>;
+ reg = <0>;
+
+ google,cros-ec-spi-msg-delay = <2000>;
+
+ i2c-tunnel {
+ compatible = "google,cros-ec-i2c-tunnel";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ google,remote-bus = <0>;
+
+ charger: bq24735@9 {
+ compatible = "ti,bq24735";
+ reg = <0x9>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(J, 0)
+ GPIO_ACTIVE_HIGH>;
+ ti,ac-detect-gpios = <&gpio
+ TEGRA_GPIO(J, 0)
+ GPIO_ACTIVE_HIGH>;
+ };
+
+ battery: sbs-battery@b {
+ compatible = "sbs,sbs-battery";
+ reg = <0xb>;
+ sbs,i2c-retry-count = <2>;
+ sbs,poll-retry-count = <10>;
+ power-supplies = <&charger>;
+ };
+ };
+ };
+ };
+
+ spi@0,7000da00 {
+ status = "okay";
+ spi-max-frequency = <25000000>;
+
+ flash@0 {
+ compatible = "winbond,w25q32dw";
+ spi-max-frequency = <25000000>;
+ reg = <0>;
+ };
+ };
+
+ pmc@0,7000e400 {
+ nvidia,invert-interrupt;
+ nvidia,suspend-mode = <0>;
+ nvidia,cpu-pwr-good-time = <500>;
+ nvidia,cpu-pwr-off-time = <300>;
+ nvidia,core-pwr-good-time = <641 3845>;
+ nvidia,core-pwr-off-time = <61036>;
+ nvidia,core-power-req-active-high;
+ nvidia,sys-clock-req-active-high;
+ };
+
+ hda@0,70030000 {
+ status = "okay";
+ };
+
+ sdhci0_pwrseq: sdhci0_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+
+ reset-gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_LOW>;
+ };
+
+ sdhci@0,700b0000 { /* WiFi/BT on this bus */
+ status = "okay";
+ bus-width = <4>;
+ no-1-8-v;
+ non-removable;
+ mmc-pwrseq = <&sdhci0_pwrseq>;
+ vmmc-supply = <&vdd_3v3_lp0>;
+ vqmmc-supply = <&vddio_1v8>;
+ keep-power-in-suspend;
+ };
+
+ sdhci@0,700b0400 { /* SD Card on this bus */
+ status = "okay";
+ cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
+ power-gpios = <&gpio TEGRA_GPIO(R, 0) GPIO_ACTIVE_HIGH>;
+ bus-width = <4>;
+ no-1-8-v;
+ vqmmc-supply = <&vddio_sdmmc3>;
+ };
+
+ sdhci@0,700b0600 { /* eMMC on this bus */
+ status = "okay";
+ bus-width = <8>;
+ no-1-8-v;
+ non-removable;
+ };
+
+ ahub@0,70300000 {
+ i2s@0,70301100 {
+ status = "okay";
+ };
+ };
+
+ usb@0,7d000000 { /* Rear external USB port. */
+ status = "okay";
+ };
+
+ usb-phy@0,7d000000 {
+ status = "okay";
+ vbus-supply = <&vdd_usb1_vbus>;
+ };
+
+ usb@0,7d004000 { /* Internal webcam. */
+ status = "okay";
+ };
+
+ usb-phy@0,7d004000 {
+ status = "okay";
+ vbus-supply = <&vdd_run_cam>;
+ };
+
+ usb@0,7d008000 { /* Left external USB port. */
+ status = "okay";
+ };
+
+ usb-phy@0,7d008000 {
+ status = "okay";
+ vbus-supply = <&vdd_usb3_vbus>;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+
+ enable-gpios = <&gpio TEGRA_GPIO(H, 2) GPIO_ACTIVE_HIGH>;
+ power-supply = <&vdd_led>;
+ pwms = <&pwm 1 1000000>;
+
+ default-brightness-level = <224>;
+ brightness-levels =
+ < 0 1 2 3 4 5 6 7
+ 8 9 10 11 12 13 14 15
+ 16 17 18 19 20 21 22 23
+ 24 25 26 27 28 29 30 31
+ 32 33 34 35 36 37 38 39
+ 40 41 42 43 44 45 46 47
+ 48 49 50 51 52 53 54 55
+ 56 57 58 59 60 61 62 63
+ 64 65 66 67 68 69 70 71
+ 72 73 74 75 76 77 78 79
+ 80 81 82 83 84 85 86 87
+ 88 89 90 91 92 93 94 95
+ 96 97 98 99 100 101 102 103
+ 104 105 106 107 108 109 110 111
+ 112 113 114 115 116 117 118 119
+ 120 121 122 123 124 125 126 127
+ 128 129 130 131 132 133 134 135
+ 136 137 138 139 140 141 142 143
+ 144 145 146 147 148 149 150 151
+ 152 153 154 155 156 157 158 159
+ 160 161 162 163 164 165 166 167
+ 168 169 170 171 172 173 174 175
+ 176 177 178 179 180 181 182 183
+ 184 185 186 187 188 189 190 191
+ 192 193 194 195 196 197 198 199
+ 200 201 202 203 204 205 206 207
+ 208 209 210 211 212 213 214 215
+ 216 217 218 219 220 221 222 223
+ 224 225 226 227 228 229 230 231
+ 232 233 234 235 236 237 238 239
+ 240 241 242 243 244 245 246 247
+ 248 249 250 251 252 253 254 255
+ 256>;
+ };
+
+ clocks {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clk32k_in: clock@0 {
+ compatible = "fixed-clock";
+ reg = <0>;
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ lid {
+ label = "Lid";
+ gpios = <&gpio TEGRA_GPIO(R, 4) GPIO_ACTIVE_LOW>;
+ linux,input-type = <5>;
+ linux,code = <KEY_RESERVED>;
+ debounce-interval = <1>;
+ gpio-key,wakeup;
+ };
+
+ power {
+ label = "Power";
+ gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ debounce-interval = <30>;
+ gpio-key,wakeup;
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdd_mux: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+ regulator-name = "+VDD_MUX";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_5v0_sys: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "+5V_SYS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vdd_mux>;
+ };
+
+ vdd_3v3_sys: regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+ regulator-name = "+3.3V_SYS";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vdd_mux>;
+ };
+
+ vdd_3v3_run: regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <3>;
+ regulator-name = "+3.3V_RUN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&pmic 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_3v3_sys>;
+ };
+
+ vdd_3v3_hdmi: regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <4>;
+ regulator-name = "+3.3V_AVDD_HDMI_AP_GATED";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vdd_3v3_run>;
+ };
+
+ vdd_led: regulator@5 {
+ compatible = "regulator-fixed";
+ reg = <5>;
+ regulator-name = "+VDD_LED";
+ gpio = <&gpio TEGRA_GPIO(P, 2) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_mux>;
+ };
+
+ vdd_5v0_ts: regulator@6 {
+ compatible = "regulator-fixed";
+ reg = <6>;
+ regulator-name = "+5V_VDD_TS_SW";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ gpio = <&gpio TEGRA_GPIO(K, 1) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_usb1_vbus: regulator@7 {
+ compatible = "regulator-fixed";
+ reg = <7>;
+ regulator-name = "+5V_USB_HS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(N, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ gpio-open-drain;
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_usb3_vbus: regulator@8 {
+ compatible = "regulator-fixed";
+ reg = <8>;
+ regulator-name = "+5V_USB_SS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(N, 5) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ gpio-open-drain;
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_3v3_panel: regulator@9 {
+ compatible = "regulator-fixed";
+ reg = <9>;
+ regulator-name = "+3.3V_PANEL";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pmic 4 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_3v3_run>;
+ };
+
+ vdd_3v3_lp0: regulator@10 {
+ compatible = "regulator-fixed";
+ reg = <10>;
+ regulator-name = "+3.3V_LP0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ /*
+ * TODO: find a way to wire this up with the USB EHCI
+ * controllers so that it can be enabled on demand.
+ */
+ regulator-always-on;
+ gpio = <&pmic 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_3v3_sys>;
+ };
+
+ vdd_hdmi_pll: regulator@11 {
+ compatible = "regulator-fixed";
+ reg = <11>;
+ regulator-name = "+1.05V_RUN_AVDD_HDMI_PLL";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ gpio = <&gpio TEGRA_GPIO(H, 7) GPIO_ACTIVE_LOW>;
+ vin-supply = <&vdd_1v05_run>;
+ };
+
+ vdd_5v0_hdmi: regulator@12 {
+ compatible = "regulator-fixed";
+ reg = <12>;
+ regulator-name = "+5V_HDMI_CON";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(K, 6) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
+ };
+
+ sound {
+ nvidia,audio-routing =
+ "Headphones", "HPR",
+ "Headphones", "HPL",
+ "Speakers", "SPKR",
+ "Speakers", "SPKL",
+ "Mic Jack", "MICBIAS",
+ "DMICL", "Int Mic",
+ "DMICR", "Int Mic",
+ "IN34", "Mic Jack";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&acodec>;
+
+ clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
+ <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA124_CLK_EXTERN1>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(I, 7) GPIO_ACTIVE_HIGH>;
+ nvidia,mic-det-gpios =
+ <&gpio TEGRA_GPIO(R, 7) GPIO_ACTIVE_HIGH>;
+ };
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_LOW>;
+ priority = <200>;
+ };
+};
+
+#include "cros-ec-keyboard.dtsi"
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index db85695aa7aa..13cc7ca5e031 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -220,6 +220,7 @@
reg = <0x0 0x60006000 0x0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
+ nvidia,external-memory-controller = <&emc>;
};
flow-controller@0,60007000 {
@@ -227,6 +228,17 @@
reg = <0x0 0x60007000 0x0 0x1000>;
};
+ actmon@0,6000c800 {
+ compatible = "nvidia,tegra124-actmon";
+ reg = <0x0 0x6000c800 0x0 0x400>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA124_CLK_ACTMON>,
+ <&tegra_car TEGRA124_CLK_EMC>;
+ clock-names = "actmon", "emc";
+ resets = <&tegra_car 119>;
+ reset-names = "actmon";
+ };
+
gpio: gpio@0,6000d000 {
compatible = "nvidia,tegra124-gpio", "nvidia,tegra30-gpio";
reg = <0x0 0x6000d000 0x0 0x1000>;
@@ -582,6 +594,13 @@
#iommu-cells = <1>;
};
+ emc: emc@0,7001b000 {
+ compatible = "nvidia,tegra124-emc";
+ reg = <0x0 0x7001b000 0x0 0x1000>;
+
+ nvidia,memory-controller = <&mc>;
+ };
+
sata@0,70020000 {
compatible = "nvidia,tegra124-ahci";
@@ -807,7 +826,7 @@
<&tegra_car TEGRA124_CLK_PLL_U>,
<&tegra_car TEGRA124_CLK_USBD>;
clock-names = "reg", "pll_u", "utmi-pads";
- resets = <&tegra_car 59>, <&tegra_car 22>;
+ resets = <&tegra_car 22>, <&tegra_car 22>;
reset-names = "usb", "utmi-pads";
nvidia,hssync-start-delay = <0>;
nvidia,idle-wait-delay = <17>;
@@ -819,6 +838,7 @@
nvidia,hssquelch-level = <2>;
nvidia,hsdiscon-level = <5>;
nvidia,xcvr-hsslew = <12>;
+ nvidia,has-utmi-pad-registers;
status = "disabled";
};
@@ -843,7 +863,7 @@
<&tegra_car TEGRA124_CLK_PLL_U>,
<&tegra_car TEGRA124_CLK_USBD>;
clock-names = "reg", "pll_u", "utmi-pads";
- resets = <&tegra_car 22>, <&tegra_car 22>;
+ resets = <&tegra_car 58>, <&tegra_car 22>;
reset-names = "usb", "utmi-pads";
nvidia,hssync-start-delay = <0>;
nvidia,idle-wait-delay = <17>;
@@ -855,7 +875,6 @@
nvidia,hssquelch-level = <2>;
nvidia,hsdiscon-level = <5>;
nvidia,xcvr-hsslew = <12>;
- nvidia,has-utmi-pad-registers;
status = "disabled";
};
@@ -880,7 +899,7 @@
<&tegra_car TEGRA124_CLK_PLL_U>,
<&tegra_car TEGRA124_CLK_USBD>;
clock-names = "reg", "pll_u", "utmi-pads";
- resets = <&tegra_car 58>, <&tegra_car 22>;
+ resets = <&tegra_car 59>, <&tegra_car 22>;
reset-names = "usb", "utmi-pads";
nvidia,hssync-start-delay = <0>;
nvidia,idle-wait-delay = <17>;
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index 6b157eeabcc5..3dede3934446 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -62,71 +62,1652 @@
pinctrl-0 = <&state_default>;
state_default: pinmux {
- sdmmc1_clk_pz0 {
- nvidia,pins = "sdmmc1_clk_pz0";
- nvidia,function = "sdmmc1";
+ clk_32k_out_pa0 {
+ nvidia,pins = "clk_32k_out_pa0";
+ nvidia,function = "blink";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- sdmmc1_cmd_pz1 {
- nvidia,pins = "sdmmc1_cmd_pz1",
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4";
- nvidia,function = "sdmmc1";
- nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ uart3_cts_n_pa1 {
+ nvidia,pins = "uart3_cts_n_pa1";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_fs_pa2 {
+ nvidia,pins = "dap2_fs_pa2";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_sclk_pa3 {
+ nvidia,pins = "dap2_sclk_pa3";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_din_pa4 {
+ nvidia,pins = "dap2_din_pa4";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_dout_pa5 {
+ nvidia,pins = "dap2_dout_pa5";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
sdmmc3_clk_pa6 {
nvidia,pins = "sdmmc3_clk_pa6";
nvidia,function = "sdmmc3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
sdmmc3_cmd_pa7 {
- nvidia,pins = "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4";
+ nvidia,pins = "sdmmc3_cmd_pa7";
nvidia,function = "sdmmc3";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- sdmmc4_clk_pcc4 {
- nvidia,pins = "sdmmc4_clk_pcc4",
- "sdmmc4_rst_n_pcc3";
+ gmi_a17_pb0 {
+ nvidia,pins = "gmi_a17_pb0";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_a18_pb1 {
+ nvidia,pins = "gmi_a18_pb1";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_pwr0_pb2 {
+ nvidia,pins = "lcd_pwr0_pb2";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_pclk_pb3 {
+ nvidia,pins = "lcd_pclk_pb3";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat3_pb4 {
+ nvidia,pins = "sdmmc3_dat3_pb4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat2_pb5 {
+ nvidia,pins = "sdmmc3_dat2_pb5";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat1_pb6 {
+ nvidia,pins = "sdmmc3_dat1_pb6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat0_pb7 {
+ nvidia,pins = "sdmmc3_dat0_pb7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart3_rts_n_pc0 {
+ nvidia,pins = "uart3_rts_n_pc0";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ lcd_pwr1_pc1 {
+ nvidia,pins = "lcd_pwr1_pc1";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart2_txd_pc2 {
+ nvidia,pins = "uart2_txd_pc2";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rxd_pc3 {
+ nvidia,pins = "uart2_rxd_pc3";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gen1_i2c_scl_pc4 {
+ nvidia,pins = "gen1_i2c_scl_pc4";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ gen1_i2c_sda_pc5 {
+ nvidia,pins = "gen1_i2c_sda_pc5";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_pwr2_pc6 {
+ nvidia,pins = "lcd_pwr2_pc6";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_wp_n_pc7 {
+ nvidia,pins = "gmi_wp_n_pc7";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat5_pd0 {
+ nvidia,pins = "sdmmc3_dat5_pd0";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat4_pd1 {
+ nvidia,pins = "sdmmc3_dat4_pd1";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_dc1_pd2 {
+ nvidia,pins = "lcd_dc1_pd2";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat6_pd3 {
+ nvidia,pins = "sdmmc3_dat6_pd3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat7_pd4 {
+ nvidia,pins = "sdmmc3_dat7_pd4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d1_pd5 {
+ nvidia,pins = "vi_d1_pd5";
+ nvidia,function = "sdmmc2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_vsync_pd6 {
+ nvidia,pins = "vi_vsync_pd6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_hsync_pd7 {
+ nvidia,pins = "vi_hsync_pd7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d0_pe0 {
+ nvidia,pins = "lcd_d0_pe0";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d1_pe1 {
+ nvidia,pins = "lcd_d1_pe1";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d2_pe2 {
+ nvidia,pins = "lcd_d2_pe2";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d3_pe3 {
+ nvidia,pins = "lcd_d3_pe3";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d4_pe4 {
+ nvidia,pins = "lcd_d4_pe4";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d5_pe5 {
+ nvidia,pins = "lcd_d5_pe5";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d6_pe6 {
+ nvidia,pins = "lcd_d6_pe6";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d7_pe7 {
+ nvidia,pins = "lcd_d7_pe7";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d8_pf0 {
+ nvidia,pins = "lcd_d8_pf0";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d9_pf1 {
+ nvidia,pins = "lcd_d9_pf1";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d10_pf2 {
+ nvidia,pins = "lcd_d10_pf2";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d11_pf3 {
+ nvidia,pins = "lcd_d11_pf3";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d12_pf4 {
+ nvidia,pins = "lcd_d12_pf4";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d13_pf5 {
+ nvidia,pins = "lcd_d13_pf5";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d14_pf6 {
+ nvidia,pins = "lcd_d14_pf6";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d15_pf7 {
+ nvidia,pins = "lcd_d15_pf7";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_ad0_pg0 {
+ nvidia,pins = "gmi_ad0_pg0";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad1_pg1 {
+ nvidia,pins = "gmi_ad1_pg1";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad2_pg2 {
+ nvidia,pins = "gmi_ad2_pg2";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad3_pg3 {
+ nvidia,pins = "gmi_ad3_pg3";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad4_pg4 {
+ nvidia,pins = "gmi_ad4_pg4";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad5_pg5 {
+ nvidia,pins = "gmi_ad5_pg5";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad6_pg6 {
+ nvidia,pins = "gmi_ad6_pg6";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad7_pg7 {
+ nvidia,pins = "gmi_ad7_pg7";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad8_ph0 {
+ nvidia,pins = "gmi_ad8_ph0";
+ nvidia,function = "pwm0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad9_ph1 {
+ nvidia,pins = "gmi_ad9_ph1";
+ nvidia,function = "pwm1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad10_ph2 {
+ nvidia,pins = "gmi_ad10_ph2";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad11_ph3 {
+ nvidia,pins = "gmi_ad11_ph3";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_ad12_ph4 {
+ nvidia,pins = "gmi_ad12_ph4";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_ad13_ph5 {
+ nvidia,pins = "gmi_ad13_ph5";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_ad14_ph6 {
+ nvidia,pins = "gmi_ad14_ph6";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_wr_n_pi0 {
+ nvidia,pins = "gmi_wr_n_pi0";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_oe_n_pi1 {
+ nvidia,pins = "gmi_oe_n_pi1";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_dqs_pi2 {
+ nvidia,pins = "gmi_dqs_pi2";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_iordy_pi5 {
+ nvidia,pins = "gmi_iordy_pi5";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_cs7_n_pi6 {
+ nvidia,pins = "gmi_cs7_n_pi6";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_wait_pi7 {
+ nvidia,pins = "gmi_wait_pi7";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ lcd_de_pj1 {
+ nvidia,pins = "lcd_de_pj1";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_hsync_pj3 {
+ nvidia,pins = "lcd_hsync_pj3";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_vsync_pj4 {
+ nvidia,pins = "lcd_vsync_pj4";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart2_cts_n_pj5 {
+ nvidia,pins = "uart2_cts_n_pj5";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart2_rts_n_pj6 {
+ nvidia,pins = "uart2_rts_n_pj6";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_a16_pj7 {
+ nvidia,pins = "gmi_a16_pj7";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_adv_n_pk0 {
+ nvidia,pins = "gmi_adv_n_pk0";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_clk_pk1 {
+ nvidia,pins = "gmi_clk_pk1";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gmi_cs2_n_pk3 {
+ nvidia,pins = "gmi_cs2_n_pk3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_cs3_n_pk4 {
+ nvidia,pins = "gmi_cs3_n_pk4";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_out_pk5 {
+ nvidia,pins = "spdif_out_pk5";
+ nvidia,function = "spdif";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_in_pk6 {
+ nvidia,pins = "spdif_in_pk6";
+ nvidia,function = "spdif";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gmi_a19_pk7 {
+ nvidia,pins = "gmi_a19_pk7";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d2_pl0 {
+ nvidia,pins = "vi_d2_pl0";
+ nvidia,function = "sdmmc2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d3_pl1 {
+ nvidia,pins = "vi_d3_pl1";
+ nvidia,function = "sdmmc2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d4_pl2 {
+ nvidia,pins = "vi_d4_pl2";
+ nvidia,function = "vi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ vi_d5_pl3 {
+ nvidia,pins = "vi_d5_pl3";
+ nvidia,function = "sdmmc2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d6_pl4 {
+ nvidia,pins = "vi_d6_pl4";
+ nvidia,function = "vi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ vi_d7_pl5 {
+ nvidia,pins = "vi_d7_pl5";
+ nvidia,function = "sdmmc2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d8_pl6 {
+ nvidia,pins = "vi_d8_pl6";
+ nvidia,function = "sdmmc2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d9_pl7 {
+ nvidia,pins = "vi_d9_pl7";
+ nvidia,function = "sdmmc2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d16_pm0 {
+ nvidia,pins = "lcd_d16_pm0";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d17_pm1 {
+ nvidia,pins = "lcd_d17_pm1";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d18_pm2 {
+ nvidia,pins = "lcd_d18_pm2";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d19_pm3 {
+ nvidia,pins = "lcd_d19_pm3";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d20_pm4 {
+ nvidia,pins = "lcd_d20_pm4";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d21_pm5 {
+ nvidia,pins = "lcd_d21_pm5";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d22_pm6 {
+ nvidia,pins = "lcd_d22_pm6";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_d23_pm7 {
+ nvidia,pins = "lcd_d23_pm7";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap1_fs_pn0 {
+ nvidia,pins = "dap1_fs_pn0";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap1_din_pn1 {
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap1_dout_pn2 {
+ nvidia,pins = "dap1_dout_pn2";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap1_sclk_pn3 {
+ nvidia,pins = "dap1_sclk_pn3";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_cs0_n_pn4 {
+ nvidia,pins = "lcd_cs0_n_pn4";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_sdout_pn5 {
+ nvidia,pins = "lcd_sdout_pn5";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_dc0_pn6 {
+ nvidia,pins = "lcd_dc0_pn6";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ hdmi_int_pn7 {
+ nvidia,pins = "hdmi_int_pn7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_data7_po0 {
+ nvidia,pins = "ulpi_data7_po0";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data0_po1 {
+ nvidia,pins = "ulpi_data0_po1";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data1_po2 {
+ nvidia,pins = "ulpi_data1_po2";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_data2_po3 {
+ nvidia,pins = "ulpi_data2_po3";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_data3_po4 {
+ nvidia,pins = "ulpi_data3_po4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_data4_po5 {
+ nvidia,pins = "ulpi_data4_po5";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_data5_po6 {
+ nvidia,pins = "ulpi_data5_po6";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_data6_po7 {
+ nvidia,pins = "ulpi_data6_po7";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap3_fs_pp0 {
+ nvidia,pins = "dap3_fs_pp0";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap3_din_pp1 {
+ nvidia,pins = "dap3_din_pp1";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap3_dout_pp2 {
+ nvidia,pins = "dap3_dout_pp2";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap3_sclk_pp3 {
+ nvidia,pins = "dap3_sclk_pp3";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap4_fs_pp4 {
+ nvidia,pins = "dap4_fs_pp4";
+ nvidia,function = "i2s3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap4_din_pp5 {
+ nvidia,pins = "dap4_din_pp5";
+ nvidia,function = "i2s3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap4_dout_pp6 {
+ nvidia,pins = "dap4_dout_pp6";
+ nvidia,function = "i2s3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap4_sclk_pp7 {
+ nvidia,pins = "dap4_sclk_pp7";
+ nvidia,function = "i2s3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col0_pq0 {
+ nvidia,pins = "kb_col0_pq0";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col1_pq1 {
+ nvidia,pins = "kb_col1_pq1";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col2_pq2 {
+ nvidia,pins = "kb_col2_pq2";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col3_pq3 {
+ nvidia,pins = "kb_col3_pq3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col4_pq4 {
+ nvidia,pins = "kb_col4_pq4";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col5_pq5 {
+ nvidia,pins = "kb_col5_pq5";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col6_pq6 {
+ nvidia,pins = "kb_col6_pq6";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_col7_pq7 {
+ nvidia,pins = "kb_col7_pq7";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row0_pr0 {
+ nvidia,pins = "kb_row0_pr0";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row1_pr1 {
+ nvidia,pins = "kb_row1_pr1";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row2_pr2 {
+ nvidia,pins = "kb_row2_pr2";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row3_pr3 {
+ nvidia,pins = "kb_row3_pr3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row4_pr4 {
+ nvidia,pins = "kb_row4_pr4";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row5_pr5 {
+ nvidia,pins = "kb_row5_pr5";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row6_pr6 {
+ nvidia,pins = "kb_row6_pr6";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row7_pr7 {
+ nvidia,pins = "kb_row7_pr7";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row8_ps0 {
+ nvidia,pins = "kb_row8_ps0";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row9_ps1 {
+ nvidia,pins = "kb_row9_ps1";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row10_ps2 {
+ nvidia,pins = "kb_row10_ps2";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row11_ps3 {
+ nvidia,pins = "kb_row11_ps3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row12_ps4 {
+ nvidia,pins = "kb_row12_ps4";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row13_ps5 {
+ nvidia,pins = "kb_row13_ps5";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row14_ps6 {
+ nvidia,pins = "kb_row14_ps6";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ kb_row15_ps7 {
+ nvidia,pins = "kb_row15_ps7";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_pclk_pt0 {
+ nvidia,pins = "vi_pclk_pt0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_mclk_pt1 {
+ nvidia,pins = "vi_mclk_pt1";
+ nvidia,function = "vi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d10_pt2 {
+ nvidia,pins = "vi_d10_pt2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d11_pt3 {
+ nvidia,pins = "vi_d11_pt3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ vi_d0_pt4 {
+ nvidia,pins = "vi_d0_pt4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gen2_i2c_scl_pt5 {
+ nvidia,pins = "gen2_i2c_scl_pt5";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ gen2_i2c_sda_pt6 {
+ nvidia,pins = "gen2_i2c_sda_pt6";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_cmd_pt7 {
+ nvidia,pins = "sdmmc4_cmd_pt7";
nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pu0 {
+ nvidia,pins = "pu0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pu1 {
+ nvidia,pins = "pu1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu2 {
+ nvidia,pins = "pu2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pu3 {
+ nvidia,pins = "pu3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pu4 {
+ nvidia,pins = "pu4";
+ nvidia,function = "pwm1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu5 {
+ nvidia,pins = "pu5";
+ nvidia,function = "pwm2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu6 {
+ nvidia,pins = "pu6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ jtag_rtck_pu7 {
+ nvidia,pins = "jtag_rtck_pu7";
+ nvidia,function = "rtck";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pv0 {
+ nvidia,pins = "pv0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pv2 {
+ nvidia,pins = "pv2";
+ nvidia,function = "owr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pv3 {
+ nvidia,pins = "pv3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ddc_scl_pv4 {
+ nvidia,pins = "ddc_scl_pv4";
+ nvidia,function = "i2c4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ddc_sda_pv5 {
+ nvidia,pins = "ddc_sda_pv5";
+ nvidia,function = "i2c4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ crt_hsync_pv6 {
+ nvidia,pins = "crt_hsync_pv6";
+ nvidia,function = "crt";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ crt_vsync_pv7 {
+ nvidia,pins = "crt_vsync_pv7";
+ nvidia,function = "crt";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ lcd_cs1_n_pw0 {
+ nvidia,pins = "lcd_cs1_n_pw0";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_m1_pw1 {
+ nvidia,pins = "lcd_m1_pw1";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ spi2_cs1_n_pw2 {
+ nvidia,pins = "spi2_cs1_n_pw2";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk1_out_pw4 {
+ nvidia,pins = "clk1_out_pw4";
+ nvidia,function = "extperiph1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_out_pw5 {
+ nvidia,pins = "clk2_out_pw5";
+ nvidia,function = "extperiph2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart3_txd_pw6 {
+ nvidia,pins = "uart3_txd_pw6";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_rxd_pw7 {
+ nvidia,pins = "uart3_rxd_pw7";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ spi2_sck_px2 {
+ nvidia,pins = "spi2_sck_px2";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ spi1_mosi_px4 {
+ nvidia,pins = "spi1_mosi_px4";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ spi1_sck_px5 {
+ nvidia,pins = "spi1_sck_px5";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ spi1_cs0_n_px6 {
+ nvidia,pins = "spi1_cs0_n_px6";
+ nvidia,function = "spi1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ spi1_miso_px7 {
+ nvidia,pins = "spi1_miso_px7";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_clk_py0 {
+ nvidia,pins = "ulpi_clk_py0";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_dir_py1 {
+ nvidia,pins = "ulpi_dir_py1";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_nxt_py2 {
+ nvidia,pins = "ulpi_nxt_py2";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_stp_py3 {
+ nvidia,pins = "ulpi_stp_py3";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc1_dat3_py4 {
+ nvidia,pins = "sdmmc1_dat3_py4";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat2_py5 {
+ nvidia,pins = "sdmmc1_dat2_py5";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat1_py6 {
+ nvidia,pins = "sdmmc1_dat1_py6";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat0_py7 {
+ nvidia,pins = "sdmmc1_dat0_py7";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_clk_pz0 {
+ nvidia,pins = "sdmmc1_clk_pz0";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_cmd_pz1 {
+ nvidia,pins = "sdmmc1_cmd_pz1";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_sdin_pz2 {
+ nvidia,pins = "lcd_sdin_pz2";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_wr_n_pz3 {
+ nvidia,pins = "lcd_wr_n_pz3";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ lcd_sck_pz4 {
+ nvidia,pins = "lcd_sck_pz4";
+ nvidia,function = "displaya";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sys_clk_req_pz5 {
+ nvidia,pins = "sys_clk_req_pz5";
+ nvidia,function = "sysclk";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pwr_i2c_scl_pz6 {
+ nvidia,pins = "pwr_i2c_scl_pz6";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ pwr_i2c_sda_pz7 {
+ nvidia,pins = "pwr_i2c_sda_pz7";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
};
sdmmc4_dat0_paa0 {
- nvidia,pins = "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7";
+ nvidia,pins = "sdmmc4_dat0_paa0";
nvidia,function = "sdmmc4";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- dap2_fs_pa2 {
- nvidia,pins = "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5";
- nvidia,function = "i2s1";
+ sdmmc4_dat1_paa1 {
+ nvidia,pins = "sdmmc4_dat1_paa1";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat2_paa2 {
+ nvidia,pins = "sdmmc4_dat2_paa2";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat3_paa3 {
+ nvidia,pins = "sdmmc4_dat3_paa3";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat4_paa4 {
+ nvidia,pins = "sdmmc4_dat4_paa4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat5_paa5 {
+ nvidia,pins = "sdmmc4_dat5_paa5";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat6_paa6 {
+ nvidia,pins = "sdmmc4_dat6_paa6";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat7_paa7 {
+ nvidia,pins = "sdmmc4_dat7_paa7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pbb0 {
+ nvidia,pins = "pbb0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ cam_i2c_scl_pbb1 {
+ nvidia,pins = "cam_i2c_scl_pbb1";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ cam_i2c_sda_pbb2 {
+ nvidia,pins = "cam_i2c_sda_pbb2";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ pbb3 {
+ nvidia,pins = "pbb3";
+ nvidia,function = "vgp3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pbb4 {
+ nvidia,pins = "pbb4";
+ nvidia,function = "vgp4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pbb5 {
+ nvidia,pins = "pbb5";
+ nvidia,function = "vgp5";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pbb6 {
+ nvidia,pins = "pbb6";
+ nvidia,function = "vgp6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pbb7 {
+ nvidia,pins = "pbb7";
+ nvidia,function = "i2s4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ cam_mclk_pcc0 {
+ nvidia,pins = "cam_mclk_pcc0";
+ nvidia,function = "vi_alt3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pcc1 {
+ nvidia,pins = "pcc1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pcc2 {
+ nvidia,pins = "pcc2";
+ nvidia,function = "i2s4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_rst_n_pcc3 {
+ nvidia,pins = "sdmmc4_rst_n_pcc3";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_clk_pcc4 {
+ nvidia,pins = "sdmmc4_clk_pcc4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_req_pcc5 {
+ nvidia,pins = "clk2_req_pcc5";
+ nvidia,function = "dap";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l2_rst_n_pcc6 {
+ nvidia,pins = "pex_l2_rst_n_pcc6";
+ nvidia,function = "pcie";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l2_clkreq_n_pcc7 {
+ nvidia,pins = "pex_l2_clkreq_n_pcc7";
+ nvidia,function = "pcie";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l0_prsnt_n_pdd0 {
+ nvidia,pins = "pex_l0_prsnt_n_pdd0";
+ nvidia,function = "pcie";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l0_rst_n_pdd1 {
+ nvidia,pins = "pex_l0_rst_n_pdd1";
+ nvidia,function = "pcie";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l0_clkreq_n_pdd2 {
+ nvidia,pins = "pex_l0_clkreq_n_pdd2";
+ nvidia,function = "pcie";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_wake_n_pdd3 {
+ nvidia,pins = "pex_wake_n_pdd3";
+ nvidia,function = "pcie";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pex_l1_prsnt_n_pdd4 {
- nvidia,pins = "pex_l1_prsnt_n_pdd4",
- "pex_l1_clkreq_n_pdd6";
+ nvidia,pins = "pex_l1_prsnt_n_pdd4";
+ nvidia,function = "pcie";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l1_rst_n_pdd5 {
+ nvidia,pins = "pex_l1_rst_n_pdd5";
+ nvidia,function = "pcie";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l1_clkreq_n_pdd6 {
+ nvidia,pins = "pex_l1_clkreq_n_pdd6";
+ nvidia,function = "pcie";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l2_prsnt_n_pdd7 {
+ nvidia,pins = "pex_l2_prsnt_n_pdd7";
+ nvidia,function = "pcie";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk3_out_pee0 {
+ nvidia,pins = "clk3_out_pee0";
+ nvidia,function = "extperiph3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_req_pee1 {
+ nvidia,pins = "clk3_req_pee1";
+ nvidia,function = "dev3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk1_req_pee2 {
+ nvidia,pins = "clk1_req_pee2";
+ nvidia,function = "dap";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ hdmi_cec_pee3 {
+ nvidia,pins = "hdmi_cec_pee3";
+ nvidia,function = "cec";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ owr {
+ nvidia,pins = "owr";
+ nvidia,function = "owr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
sdio3 {
nvidia,pins = "drive_sdio3";
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 33920df03640..107395c32d82 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -191,6 +191,7 @@
compatible = "arm,cortex-a15-pmu";
interrupts = <0 68 4>,
<0 69 4>;
+ interrupt-affinity = <&cpu0>, <&cpu1>;
};
oscclk6a: oscclk6a {
@@ -362,7 +363,6 @@
compatible = "arm,coresight-etb10", "arm,primecell";
reg = <0 0x20010000 0 0x1000>;
- coresight-default-sink;
clocks = <&oscclk6a>;
clock-names = "apb_pclk";
port {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index 23662b5a5e9d..d949facba376 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -33,28 +33,28 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ A9_0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
next-level-cache = <&L2>;
};
- cpu@1 {
+ A9_1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
next-level-cache = <&L2>;
};
- cpu@2 {
+ A9_2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <2>;
next-level-cache = <&L2>;
};
- cpu@3 {
+ A9_3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <3>;
@@ -170,6 +170,7 @@
compatible = "arm,pl310-cache";
reg = <0x1e00a000 0x1000>;
interrupts = <0 43 4>;
+ cache-unified;
cache-level = <2>;
arm,data-latency = <1 1 1>;
arm,tag-latency = <1 1 1>;
@@ -181,6 +182,8 @@
<0 61 4>,
<0 62 4>,
<0 63 4>;
+ interrupt-affinity = <&A9_0>, <&A9_1>, <&A9_2>, <&A9_3>;
+
};
dcc {
diff --git a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
index 36cafbfa1bfa..606753eb72c8 100644
--- a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
+++ b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
@@ -12,6 +12,12 @@
bootargs = "console=ttyLP0,115200";
};
+ clk16m: clk16m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <16000000>;
+ };
+
regulators {
compatible = "simple-bus";
#address-cells = <1>;
@@ -47,6 +53,21 @@
status = "okay";
};
+&dspi1 {
+ status = "okay";
+
+ mcp2515can: can@0 {
+ compatible = "microchip,mcp2515";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can_int>;
+ reg = <0>;
+ clocks = <&clk16m>;
+ spi-max-frequency = <10000000>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <11 GPIO_ACTIVE_LOW>;
+ };
+};
+
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
@@ -94,3 +115,13 @@
&usbh1 {
vbus-supply = <&usbh_vbus_reg>;
};
+
+&iomuxc {
+ vf610-colibri {
+ pinctrl_can_int: can_int {
+ fsl,pins = <
+ VF610_PAD_PTB21__GPIO_43 0x22ed
+ >;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/vf-colibri.dtsi b/arch/arm/boot/dts/vf-colibri.dtsi
index 5c2b7320856d..fbef0828e930 100644
--- a/arch/arm/boot/dts/vf-colibri.dtsi
+++ b/arch/arm/boot/dts/vf-colibri.dtsi
@@ -23,6 +23,12 @@
status = "okay";
};
+&dspi1 {
+ bus-num = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_dspi1>;
+};
+
&edma0 {
status = "okay";
};
@@ -107,6 +113,15 @@
>;
};
+ pinctrl_dspi1: dspi1grp {
+ fsl,pins = <
+ VF610_PAD_PTD5__DSPI1_CS0 0x33e2
+ VF610_PAD_PTD6__DSPI1_SIN 0x33e1
+ VF610_PAD_PTD7__DSPI1_SOUT 0x33e2
+ VF610_PAD_PTD8__DSPI1_SCK 0x33e2
+ >;
+ };
+
pinctrl_esdhc1: esdhc1grp {
fsl,pins = <
VF610_PAD_PTA24__ESDHC1_CLK 0x31ef
diff --git a/arch/arm/boot/dts/vf500.dtsi b/arch/arm/boot/dts/vf500.dtsi
index 1dbf8d2d1ddf..e976d2fa1527 100644
--- a/arch/arm/boot/dts/vf500.dtsi
+++ b/arch/arm/boot/dts/vf500.dtsi
@@ -24,14 +24,13 @@
};
soc {
- interrupt-parent = <&intc>;
-
aips-bus@40000000 {
intc: interrupt-controller@40002000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
interrupt-controller;
+ interrupt-parent = <&intc>;
reg = <0x40003000 0x1000>,
<0x40002100 0x100>;
};
@@ -40,145 +39,17 @@
compatible = "arm,cortex-a9-global-timer";
reg = <0x40002200 0x20>;
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&intc>;
clocks = <&clks VF610_CLK_PLATFORM_BUS>;
};
};
};
};
-&adc0 {
- interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&adc1 {
- interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&can0 {
- interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&can1 {
- interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&dspi0 {
- interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&edma0 {
- interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma-tx", "edma-err";
-};
-
-&edma1 {
- interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma-tx", "edma-err";
-};
-
-&esdhc1 {
- interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&fec0 {
- interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&fec1 {
- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&ftm {
- interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&gpio0 {
- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&gpio1 {
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&gpio2 {
- interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&gpio3 {
- interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&gpio4 {
- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&i2c0 {
- interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&pit {
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&qspi0 {
- interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&sai2 {
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&snvsrtc {
- interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&src {
- interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&uart0 {
- interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&uart1 {
- interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&uart2 {
- interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&uart3 {
- interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&uart4 {
- interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&uart5 {
- interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&usbdev0 {
- interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&usbh1 {
- interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&usbphy0 {
- interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-&usbphy1 {
- interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+&mscm_ir {
+ interrupt-parent = <&intc>;
};
&wdoga5 {
- interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index a29c7ce15eaf..4aa335166be7 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -54,6 +54,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
+ interrupt-parent = <&mscm_ir>;
ranges;
aips0: aips-bus@40000000 {
@@ -62,6 +63,19 @@
#size-cells = <1>;
ranges;
+ mscm_cpucfg: cpucfg@40001000 {
+ compatible = "fsl,vf610-mscm-cpucfg", "syscon";
+ reg = <0x40001000 0x800>;
+ };
+
+ mscm_ir: interrupt-controller@40001800 {
+ compatible = "fsl,vf610-mscm-ir";
+ reg = <0x40001800 0x400>;
+ fsl,cpucfg = <&mscm_cpucfg>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
edma0: dma-controller@40018000 {
#dma-cells = <2>;
compatible = "fsl,vf610-edma";
@@ -69,6 +83,9 @@
<0x40024000 0x1000>,
<0x40025000 0x1000>;
dma-channels = <32>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>,
+ <9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma-tx", "edma-err";
clock-names = "dmamux0", "dmamux1";
clocks = <&clks VF610_CLK_DMAMUX0>,
<&clks VF610_CLK_DMAMUX1>;
@@ -78,6 +95,7 @@
can0: flexcan@40020000 {
compatible = "fsl,vf610-flexcan";
reg = <0x40020000 0x4000>;
+ interrupts = <58 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_FLEXCAN0>,
<&clks VF610_CLK_FLEXCAN0>;
clock-names = "ipg", "per";
@@ -87,6 +105,7 @@
uart0: serial@40027000 {
compatible = "fsl,vf610-lpuart";
reg = <0x40027000 0x1000>;
+ interrupts = <61 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_UART0>;
clock-names = "ipg";
dmas = <&edma0 0 2>,
@@ -98,6 +117,7 @@
uart1: serial@40028000 {
compatible = "fsl,vf610-lpuart";
reg = <0x40028000 0x1000>;
+ interrupts = <62 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_UART1>;
clock-names = "ipg";
dmas = <&edma0 0 4>,
@@ -109,6 +129,7 @@
uart2: serial@40029000 {
compatible = "fsl,vf610-lpuart";
reg = <0x40029000 0x1000>;
+ interrupts = <63 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_UART2>;
clock-names = "ipg";
dmas = <&edma0 0 6>,
@@ -120,6 +141,7 @@
uart3: serial@4002a000 {
compatible = "fsl,vf610-lpuart";
reg = <0x4002a000 0x1000>;
+ interrupts = <64 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_UART3>;
clock-names = "ipg";
dmas = <&edma0 0 8>,
@@ -133,15 +155,29 @@
#size-cells = <0>;
compatible = "fsl,vf610-dspi";
reg = <0x4002c000 0x1000>;
+ interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_DSPI0>;
clock-names = "dspi";
spi-num-chipselects = <5>;
status = "disabled";
};
+ dspi1: dspi1@4002d000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,vf610-dspi";
+ reg = <0x4002d000 0x1000>;
+ interrupts = <68 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks VF610_CLK_DSPI1>;
+ clock-names = "dspi";
+ spi-num-chipselects = <5>;
+ status = "disabled";
+ };
+
sai2: sai@40031000 {
compatible = "fsl,vf610-sai";
reg = <0x40031000 0x1000>;
+ interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_SAI2>;
clock-names = "sai";
dma-names = "tx", "rx";
@@ -153,6 +189,7 @@
pit: pit@40037000 {
compatible = "fsl,vf610-pit";
reg = <0x40037000 0x1000>;
+ interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_PIT>;
clock-names = "pit";
};
@@ -186,6 +223,7 @@
adc0: adc@4003b000 {
compatible = "fsl,vf610-adc";
reg = <0x4003b000 0x1000>;
+ interrupts = <53 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_ADC0>;
clock-names = "adc";
status = "disabled";
@@ -194,6 +232,7 @@
wdoga5: wdog@4003e000 {
compatible = "fsl,vf610-wdt", "fsl,imx21-wdt";
reg = <0x4003e000 0x1000>;
+ interrupts = <20 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_WDT>;
clock-names = "wdog";
status = "disabled";
@@ -204,6 +243,7 @@
#size-cells = <0>;
compatible = "fsl,vf610-qspi";
reg = <0x40044000 0x1000>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_QSPI0_EN>,
<&clks VF610_CLK_QSPI0>;
clock-names = "qspi_en", "qspi";
@@ -213,7 +253,6 @@
iomuxc: iomuxc@40048000 {
compatible = "fsl,vf610-iomuxc";
reg = <0x40048000 0x1000>;
- #gpio-range-cells = <3>;
};
gpio0: gpio@40049000 {
@@ -221,6 +260,7 @@
reg = <0x40049000 0x1000 0x400ff000 0x40>;
gpio-controller;
#gpio-cells = <2>;
+ interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-ranges = <&iomuxc 0 0 32>;
@@ -231,6 +271,7 @@
reg = <0x4004a000 0x1000 0x400ff040 0x40>;
gpio-controller;
#gpio-cells = <2>;
+ interrupts = <108 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-ranges = <&iomuxc 0 32 32>;
@@ -241,6 +282,7 @@
reg = <0x4004b000 0x1000 0x400ff080 0x40>;
gpio-controller;
#gpio-cells = <2>;
+ interrupts = <109 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-ranges = <&iomuxc 0 64 32>;
@@ -251,6 +293,7 @@
reg = <0x4004c000 0x1000 0x400ff0c0 0x40>;
gpio-controller;
#gpio-cells = <2>;
+ interrupts = <110 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-ranges = <&iomuxc 0 96 32>;
@@ -261,6 +304,7 @@
reg = <0x4004d000 0x1000 0x400ff100 0x40>;
gpio-controller;
#gpio-cells = <2>;
+ interrupts = <111 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-ranges = <&iomuxc 0 128 7>;
@@ -274,6 +318,7 @@
usbphy0: usbphy@40050800 {
compatible = "fsl,vf610-usbphy";
reg = <0x40050800 0x400>;
+ interrupts = <50 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_USBPHY0>;
fsl,anatop = <&anatop>;
status = "disabled";
@@ -282,6 +327,7 @@
usbphy1: usbphy@40050c00 {
compatible = "fsl,vf610-usbphy";
reg = <0x40050c00 0x400>;
+ interrupts = <51 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_USBPHY1>;
fsl,anatop = <&anatop>;
status = "disabled";
@@ -292,6 +338,7 @@
#size-cells = <0>;
compatible = "fsl,vf610-i2c";
reg = <0x40066000 0x1000>;
+ interrupts = <71 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_I2C0>;
clock-names = "ipg";
dmas = <&edma0 0 50>,
@@ -311,6 +358,7 @@
usbdev0: usb@40034000 {
compatible = "fsl,vf610-usb", "fsl,imx27-usb";
reg = <0x40034000 0x800>;
+ interrupts = <75 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_USBC0>;
fsl,usbphy = <&usbphy0>;
fsl,usbmisc = <&usbmisc0 0>;
@@ -329,6 +377,7 @@
src: src@4006e000 {
compatible = "fsl,vf610-src", "syscon";
reg = <0x4006e000 0x1000>;
+ interrupts = <96 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -345,6 +394,9 @@
<0x400a1000 0x1000>,
<0x400a2000 0x1000>;
dma-channels = <32>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>,
+ <11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma-tx", "edma-err";
clock-names = "dmamux0", "dmamux1";
clocks = <&clks VF610_CLK_DMAMUX2>,
<&clks VF610_CLK_DMAMUX3>;
@@ -360,6 +412,7 @@
snvsrtc: snvs-rtc-lp@34 {
compatible = "fsl,sec-v4.0-mon-rtc-lp";
reg = <0x34 0x58>;
+ interrupts = <100 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_SNVS>;
clock-names = "snvs-rtc";
};
@@ -368,6 +421,7 @@
uart4: serial@400a9000 {
compatible = "fsl,vf610-lpuart";
reg = <0x400a9000 0x1000>;
+ interrupts = <65 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_UART4>;
clock-names = "ipg";
status = "disabled";
@@ -376,6 +430,7 @@
uart5: serial@400aa000 {
compatible = "fsl,vf610-lpuart";
reg = <0x400aa000 0x1000>;
+ interrupts = <66 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_UART5>;
clock-names = "ipg";
status = "disabled";
@@ -384,6 +439,7 @@
adc1: adc@400bb000 {
compatible = "fsl,vf610-adc";
reg = <0x400bb000 0x1000>;
+ interrupts = <54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_ADC1>;
clock-names = "adc";
status = "disabled";
@@ -392,6 +448,7 @@
esdhc1: esdhc@400b2000 {
compatible = "fsl,imx53-esdhc";
reg = <0x400b2000 0x1000>;
+ interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_IPG_BUS>,
<&clks VF610_CLK_PLATFORM_BUS>,
<&clks VF610_CLK_ESDHC1>;
@@ -402,6 +459,7 @@
usbh1: usb@400b4000 {
compatible = "fsl,vf610-usb", "fsl,imx27-usb";
reg = <0x400b4000 0x800>;
+ interrupts = <76 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_USBC1>;
fsl,usbphy = <&usbphy1>;
fsl,usbmisc = <&usbmisc1 0>;
@@ -420,6 +478,7 @@
ftm: ftm@400b8000 {
compatible = "fsl,ftm-timer";
reg = <0x400b8000 0x1000 0x400b9000 0x1000>;
+ interrupts = <44 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "ftm-evt", "ftm-src",
"ftm-evt-counter-en", "ftm-src-counter-en";
clocks = <&clks VF610_CLK_FTM2>,
@@ -432,6 +491,7 @@
fec0: ethernet@400d0000 {
compatible = "fsl,mvf600-fec";
reg = <0x400d0000 0x1000>;
+ interrupts = <78 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_ENET0>,
<&clks VF610_CLK_ENET0>,
<&clks VF610_CLK_ENET>;
@@ -442,6 +502,7 @@
fec1: ethernet@400d1000 {
compatible = "fsl,mvf600-fec";
reg = <0x400d1000 0x1000>;
+ interrupts = <79 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_ENET1>,
<&clks VF610_CLK_ENET1>,
<&clks VF610_CLK_ENET>;
@@ -452,6 +513,7 @@
can1: flexcan@400d4000 {
compatible = "fsl,vf610-flexcan";
reg = <0x400d4000 0x4000>;
+ interrupts = <59 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_FLEXCAN1>,
<&clks VF610_CLK_FLEXCAN1>;
clock-names = "ipg", "per";
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 3c165fc2dce2..5f8a52ac7edf 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -55,22 +55,81 @@ bool mcpm_is_available(void)
return (platform_ops) ? true : false;
}
+/*
+ * We can't use regular spinlocks. In the switcher case, it is possible
+ * for an outbound CPU to call power_down() after its inbound counterpart
+ * is already live using the same logical CPU number which trips lockdep
+ * debugging.
+ */
+static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
+
+static inline bool mcpm_cluster_unused(unsigned int cluster)
+{
+ int i, cnt;
+ for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++)
+ cnt |= mcpm_cpu_use_count[cluster][i];
+ return !cnt;
+}
+
int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
{
+ bool cpu_is_down, cluster_is_down;
+ int ret = 0;
+
if (!platform_ops)
return -EUNATCH; /* try not to shadow power_up errors */
might_sleep();
- return platform_ops->power_up(cpu, cluster);
+
+ /* backward compatibility callback */
+ if (platform_ops->power_up)
+ return platform_ops->power_up(cpu, cluster);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+
+ /*
+ * Since this is called with IRQs enabled, and no arch_spin_lock_irq
+ * variant exists, we need to disable IRQs manually here.
+ */
+ local_irq_disable();
+ arch_spin_lock(&mcpm_lock);
+
+ cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
+ cluster_is_down = mcpm_cluster_unused(cluster);
+
+ mcpm_cpu_use_count[cluster][cpu]++;
+ /*
+ * The only possible values are:
+ * 0 = CPU down
+ * 1 = CPU (still) up
+ * 2 = CPU requested to be up before it had a chance
+ * to actually make itself down.
+ * Any other value is a bug.
+ */
+ BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
+ mcpm_cpu_use_count[cluster][cpu] != 2);
+
+ if (cluster_is_down)
+ ret = platform_ops->cluster_powerup(cluster);
+ if (cpu_is_down && !ret)
+ ret = platform_ops->cpu_powerup(cpu, cluster);
+
+ arch_spin_unlock(&mcpm_lock);
+ local_irq_enable();
+ return ret;
}
typedef void (*phys_reset_t)(unsigned long);
void mcpm_cpu_power_down(void)
{
+ unsigned int mpidr, cpu, cluster;
+ bool cpu_going_down, last_man;
phys_reset_t phys_reset;
- if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
- return;
+ if (WARN_ON_ONCE(!platform_ops))
+ return;
BUG_ON(!irqs_disabled());
/*
@@ -79,28 +138,65 @@ void mcpm_cpu_power_down(void)
*/
setup_mm_for_reboot();
- platform_ops->power_down();
+ /* backward compatibility callback */
+ if (platform_ops->power_down) {
+ platform_ops->power_down();
+ goto not_dead;
+ }
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+
+ __mcpm_cpu_going_down(cpu, cluster);
+ arch_spin_lock(&mcpm_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+
+ mcpm_cpu_use_count[cluster][cpu]--;
+ BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
+ mcpm_cpu_use_count[cluster][cpu] != 1);
+ cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
+ last_man = mcpm_cluster_unused(cluster);
+
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+ platform_ops->cpu_powerdown_prepare(cpu, cluster);
+ platform_ops->cluster_powerdown_prepare(cluster);
+ arch_spin_unlock(&mcpm_lock);
+ platform_ops->cluster_cache_disable();
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+ if (cpu_going_down)
+ platform_ops->cpu_powerdown_prepare(cpu, cluster);
+ arch_spin_unlock(&mcpm_lock);
+ /*
+ * If cpu_going_down is false here, that means a power_up
+ * request raced ahead of us. Even if we do not want to
+ * shut this CPU down, the caller still expects execution
+ * to return through the system resume entry path, like
+ * when the WFI is aborted due to a new IRQ or the like..
+ * So let's continue with cache cleaning in all cases.
+ */
+ platform_ops->cpu_cache_disable();
+ }
+
+ __mcpm_cpu_down(cpu, cluster);
+
+ /* Now we are prepared for power-down, do it: */
+ if (cpu_going_down)
+ wfi();
+
+not_dead:
/*
* It is possible for a power_up request to happen concurrently
* with a power_down request for the same CPU. In this case the
- * power_down method might not be able to actually enter a
- * powered down state with the WFI instruction if the power_up
- * method has removed the required reset condition. The
- * power_down method is then allowed to return. We must perform
- * a re-entry in the kernel as if the power_up method just had
- * deasserted reset on the CPU.
- *
- * To simplify race issues, the platform specific implementation
- * must accommodate for the possibility of unordered calls to
- * power_down and power_up with a usage count. Therefore, if a
- * call to power_up is issued for a CPU that is not down, then
- * the next call to power_down must not attempt a full shutdown
- * but only do the minimum (normally disabling L1 cache and CPU
- * coherency) and return just as if a concurrent power_up request
- * had happened as described above.
+ * CPU might not be able to actually enter a powered down state
+ * with the WFI instruction if the power_up request has removed
+ * the required reset condition. We must perform a re-entry in
+ * the kernel as if the power_up method just had deasserted reset
+ * on the CPU.
*/
-
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
phys_reset(virt_to_phys(mcpm_entry_point));
@@ -125,26 +221,66 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
void mcpm_cpu_suspend(u64 expected_residency)
{
- phys_reset_t phys_reset;
-
- if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
+ if (WARN_ON_ONCE(!platform_ops))
return;
- BUG_ON(!irqs_disabled());
- /* Very similar to mcpm_cpu_power_down() */
- setup_mm_for_reboot();
- platform_ops->suspend(expected_residency);
- phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
- phys_reset(virt_to_phys(mcpm_entry_point));
- BUG();
+ /* backward compatibility callback */
+ if (platform_ops->suspend) {
+ phys_reset_t phys_reset;
+ BUG_ON(!irqs_disabled());
+ setup_mm_for_reboot();
+ platform_ops->suspend(expected_residency);
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+ BUG();
+ }
+
+ /* Some platforms might have to enable special resume modes, etc. */
+ if (platform_ops->cpu_suspend_prepare) {
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ arch_spin_lock(&mcpm_lock);
+ platform_ops->cpu_suspend_prepare(cpu, cluster);
+ arch_spin_unlock(&mcpm_lock);
+ }
+ mcpm_cpu_power_down();
}
int mcpm_cpu_powered_up(void)
{
+ unsigned int mpidr, cpu, cluster;
+ bool cpu_was_down, first_man;
+ unsigned long flags;
+
if (!platform_ops)
return -EUNATCH;
- if (platform_ops->powered_up)
+
+ /* backward compatibility callback */
+ if (platform_ops->powered_up) {
platform_ops->powered_up();
+ return 0;
+ }
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ local_irq_save(flags);
+ arch_spin_lock(&mcpm_lock);
+
+ cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
+ first_man = mcpm_cluster_unused(cluster);
+
+ if (first_man && platform_ops->cluster_is_up)
+ platform_ops->cluster_is_up(cluster);
+ if (cpu_was_down)
+ mcpm_cpu_use_count[cluster][cpu] = 1;
+ if (platform_ops->cpu_is_up)
+ platform_ops->cpu_is_up(cpu, cluster);
+
+ arch_spin_unlock(&mcpm_lock);
+ local_irq_restore(flags);
+
return 0;
}
@@ -334,8 +470,10 @@ int __init mcpm_sync_init(
}
mpidr = read_cpuid_mpidr();
this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- for_each_online_cpu(i)
+ for_each_online_cpu(i) {
+ mcpm_cpu_use_count[this_cluster][i] = 1;
mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
+ }
mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
sync_cache_w(&mcpm_sync);
diff --git a/arch/arm/configs/ape6evm_defconfig b/arch/arm/configs/ape6evm_defconfig
deleted file mode 100644
index 9e9a72e3d30f..000000000000
--- a/arch/arm/configs/ape6evm_defconfig
+++ /dev/null
@@ -1,109 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-CONFIG_PERF_EVENTS=y
-CONFIG_SLAB=y
-CONFIG_ARCH_SHMOBILE_LEGACY=y
-CONFIG_ARCH_R8A73A4=y
-CONFIG_MACH_APE6EVM=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_CPU_BPREDICT_DISABLE=y
-CONFIG_PL310_ERRATA_588369=y
-CONFIG_ARM_ERRATA_754322=y
-CONFIG_SMP=y
-CONFIG_SCHED_MC=y
-CONFIG_HAVE_ARM_ARCH_TIMER=y
-CONFIG_NR_CPUS=8
-CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
-CONFIG_HIGHPTE=y
-# CONFIG_HW_PERF_EVENTS is not set
-# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_ARM_APPENDED_DTB=y
-CONFIG_VFP=y
-CONFIG_NEON=y
-CONFIG_BINFMT_MISC=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6_SIT is not set
-CONFIG_NETFILTER=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FW_LOADER_USER_HELPER is not set
-CONFIG_NETDEVICES=y
-# CONFIG_NET_CADENCE is not set
-CONFIG_SMC91X=y
-CONFIG_SMSC911X=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=12
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_I2C=y
-CONFIG_I2C_SH_MOBILE=y
-CONFIG_GPIO_SH_PFC=y
-CONFIG_GPIOLIB=y
-# CONFIG_HWMON is not set
-CONFIG_THERMAL=y
-CONFIG_RCAR_THERMAL=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
-CONFIG_REGULATOR_GPIO=y
-CONFIG_REGULATOR_MAX8973=y
-# CONFIG_HID is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_MMC=y
-CONFIG_MMC_SDHI=y
-CONFIG_MMC_SH_MMCIF=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_DMADEVICES=y
-CONFIG_SH_DMAE=y
-# CONFIG_IOMMU_SUPPORT is not set
-# CONFIG_DNOTIFY is not set
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_ENABLE_DEFAULT_TRACERS=y
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=y
-CONFIG_CRYPTO_TWOFISH=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_CRC_T10DIF=y
-CONFIG_CRC_ITU_T=y
-CONFIG_CRC7=y
-CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 811e72bbe642..bcef49a21801 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -13,10 +13,13 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_MULTI_V4T=y
+CONFIG_ARCH_MULTI_V5=y
+# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_AT91=y
+CONFIG_SOC_SAM_V4_V5=y
CONFIG_SOC_AT91RM9200=y
CONFIG_SOC_AT91SAM9=y
-CONFIG_AT91_TIMER_HZ=128
CONFIG_AEABI=y
CONFIG_UACCESS_WITH_MEMCPY=y
CONFIG_ZBOOT_ROM_TEXT=0x0
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 0494c8f229a2..d59009878312 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -12,7 +12,6 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_FPE_NWFPE=y
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
-CONFIG_ARTHUR=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 1d8935359fd0..d034c96c039b 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -26,6 +26,8 @@ CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_EXYNOS_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_PM=y
@@ -34,6 +36,14 @@ CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=y
+CONFIG_MWIFIEX=y
+CONFIG_MWIFIEX_SDIO=y
CONFIG_RFKILL_REGULATOR=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
@@ -91,9 +101,11 @@ CONFIG_CHARGER_MAX77693=y
CONFIG_CHARGER_TPS65090=y
CONFIG_HWMON=y
CONFIG_SENSORS_LM90=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_THERMAL=y
CONFIG_THERMAL=y
CONFIG_EXYNOS_THERMAL=y
-CONFIG_EXYNOS_THERMAL_CORE=y
+CONFIG_THERMAL_EMULATION=y
CONFIG_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_MFD_CROS_EC=y
@@ -118,6 +130,7 @@ CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_S5M8767=y
CONFIG_REGULATOR_TPS65090=y
CONFIG_DRM=y
+CONFIG_DRM_EXYNOS_HDMI=y
CONFIG_DRM_BRIDGE=y
CONFIG_DRM_PTN3460=y
CONFIG_DRM_PS8622=y
@@ -171,10 +184,11 @@ CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_S3C=y
CONFIG_DMADEVICES=y
CONFIG_PL330_DMA=y
+CONFIG_CHROME_PLATFORMS=y
+CONFIG_CROS_EC_CHARDEV=y
CONFIG_COMMON_CLK_MAX77686=y
CONFIG_COMMON_CLK_MAX77802=y
CONFIG_COMMON_CLK_S2MPS11=y
-CONFIG_EXYNOS_IOMMU=y
CONFIG_EXTCON=y
CONFIG_EXTCON_MAX14577=y
CONFIG_EXTCON_MAX77693=y
@@ -197,6 +211,8 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index e6b0007355f8..d3a8018639de 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -24,9 +24,8 @@ CONFIG_ARCH_MXC=y
CONFIG_MACH_SCB9328=y
CONFIG_MACH_APF9328=y
CONFIG_MACH_MX21ADS=y
-CONFIG_MACH_MX25_3DS=y
CONFIG_MACH_EUKREA_CPUIMX25SD=y
-CONFIG_MACH_IMX25_DT=y
+CONFIG_SOC_IMX25=y
CONFIG_MACH_MX27ADS=y
CONFIG_MACH_MX27_3DS=y
CONFIG_MACH_IMX27_VISSTRIM_M10=y
@@ -177,6 +176,7 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index cf1e71e2f60a..fdeb1c83dcb5 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -71,6 +71,9 @@ CONFIG_IPV6=y
CONFIG_NETFILTER=y
CONFIG_CAN=y
CONFIG_CAN_FLEXCAN=y
+CONFIG_BT=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_3WIRE=y
CONFIG_CFG80211=y
CONFIG_MAC80211=y
CONFIG_RFKILL=y
@@ -168,6 +171,7 @@ CONFIG_SPI=y
CONFIG_SPI_IMX=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_MC9S08DZ60=y
+CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_STMPE=y
CONFIG_POWER_SUPPLY=y
CONFIG_POWER_RESET=y
diff --git a/arch/arm/configs/mackerel_defconfig b/arch/arm/configs/mackerel_defconfig
deleted file mode 100644
index 05a529311b4d..000000000000
--- a/arch/arm/configs/mackerel_defconfig
+++ /dev/null
@@ -1,157 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-# CONFIG_NET_NS is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_SHMOBILE_LEGACY=y
-CONFIG_ARCH_SH7372=y
-CONFIG_MACH_MACKEREL=y
-CONFIG_MEMORY_SIZE=0x10000000
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_FORCE_MAX_ZONEORDER=15
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_ARM_APPENDED_DTB=y
-CONFIG_KEXEC=y
-CONFIG_VFP=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_ARM_INTEGRATOR=y
-CONFIG_MTD_BLOCK2MTD=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMSC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=8
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_SH_MOBILE=y
-# CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
-CONFIG_REGULATOR=y
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_SH_MOBILE_LCDC=y
-CONFIG_FB_SH_MOBILE_HDMI=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_VERBOSE_PROCFS is not set
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_ARM is not set
-CONFIG_SND_SOC_SH4_FSI=y
-CONFIG_USB=y
-CONFIG_USB_RENESAS_USBHS_HCD=y
-CONFIG_USB_RENESAS_USBHS=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_RENESAS_USBHS_UDC=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHI=y
-CONFIG_MMC_SH_MMCIF=y
-CONFIG_DMADEVICES=y
-CONFIG_SH_DMAE=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_DNOTIFY is not set
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=y
-CONFIG_NLS_CODEPAGE_775=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_CODEPAGE_852=y
-CONFIG_NLS_CODEPAGE_855=y
-CONFIG_NLS_CODEPAGE_857=y
-CONFIG_NLS_CODEPAGE_860=y
-CONFIG_NLS_CODEPAGE_861=y
-CONFIG_NLS_CODEPAGE_862=y
-CONFIG_NLS_CODEPAGE_863=y
-CONFIG_NLS_CODEPAGE_864=y
-CONFIG_NLS_CODEPAGE_865=y
-CONFIG_NLS_CODEPAGE_866=y
-CONFIG_NLS_CODEPAGE_869=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=y
-CONFIG_NLS_ISO8859_3=y
-CONFIG_NLS_ISO8859_4=y
-CONFIG_NLS_ISO8859_5=y
-CONFIG_NLS_ISO8859_6=y
-CONFIG_NLS_ISO8859_7=y
-CONFIG_NLS_ISO8859_9=y
-CONFIG_NLS_ISO8859_13=y
-CONFIG_NLS_ISO8859_14=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_KOI8_R=y
-CONFIG_NLS_KOI8_U=y
-CONFIG_NLS_UTF8=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_ARM_UNWIND is not set
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/arm/configs/msm_defconfig b/arch/arm/configs/msm_defconfig
deleted file mode 100644
index dd18c9e527d6..000000000000
--- a/arch/arm/configs/msm_defconfig
+++ /dev/null
@@ -1,121 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-# CONFIG_SLUB_DEBUG is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-CONFIG_KPROBES=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_ARCH_MSM=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
-CONFIG_HIGHPTE=y
-CONFIG_CLEANCACHE=y
-CONFIG_AUTO_ZRELADDR=y
-CONFIG_VFP=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_CFG80211=y
-CONFIG_RFKILL=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_SLIP=y
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_USB_USBNET=y
-# CONFIG_USB_NET_AX8817X is not set
-# CONFIG_USB_NET_ZAURUS is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_MOUSE_PS2 is not set
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=y
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_MSM=y
-CONFIG_SERIAL_MSM_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_SPI=y
-CONFIG_DEBUG_GPIO=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_THERMAL=y
-CONFIG_REGULATOR=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_FB=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_DYNAMIC_MINORS=y
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_SPI is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_ACM=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_DEBUG_FILES=y
-CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_RTC_CLASS=y
-CONFIG_STAGING=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT4_FS=y
-CONFIG_FUSE_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_CIFS=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-# CONFIG_DETECT_HUNG_TASK is not set
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_TIMER_STATS=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index 9d56781a8f80..f69a459f4f92 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -13,7 +13,7 @@ CONFIG_ARCH_MVEBU=y
CONFIG_MACH_KIRKWOOD=y
CONFIG_MACH_NETXBIG=y
CONFIG_ARCH_MXC=y
-CONFIG_MACH_IMX25_DT=y
+CONFIG_SOC_IMX25=y
CONFIG_MACH_IMX27_DT=y
CONFIG_ARCH_U300=y
CONFIG_PCI_MVEBU=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 06075b6d2463..0ca4a3eaf65d 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -12,10 +12,12 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_CMDLINE_PARTITION=y
CONFIG_ARCH_VIRT=y
+CONFIG_ARCH_ALPINE=y
CONFIG_ARCH_MVEBU=y
CONFIG_MACH_ARMADA_370=y
CONFIG_MACH_ARMADA_375=y
CONFIG_MACH_ARMADA_38X=y
+CONFIG_MACH_ARMADA_39X=y
CONFIG_MACH_ARMADA_XP=y
CONFIG_MACH_DOVE=y
CONFIG_ARCH_BCM=y
@@ -37,11 +39,14 @@ CONFIG_ARCH_HIP04=y
CONFIG_ARCH_KEYSTONE=y
CONFIG_ARCH_MESON=y
CONFIG_ARCH_MXC=y
+CONFIG_SOC_IMX50=y
CONFIG_SOC_IMX51=y
CONFIG_SOC_IMX53=y
CONFIG_SOC_IMX6Q=y
CONFIG_SOC_IMX6SL=y
+CONFIG_SOC_IMX6SX=y
CONFIG_SOC_VF610=y
+CONFIG_SOC_LS1021A=y
CONFIG_ARCH_OMAP3=y
CONFIG_ARCH_OMAP4=y
CONFIG_SOC_OMAP5=y
@@ -91,6 +96,7 @@ CONFIG_ARCH_WM8850=y
CONFIG_ARCH_ZYNQ=y
CONFIG_TRUSTED_FOUNDATIONS=y
CONFIG_PCI=y
+CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCI_KEYSTONE=y
CONFIG_PCI_MSI=y
CONFIG_PCI_MVEBU=y
@@ -133,6 +139,9 @@ CONFIG_CAN_BCM=y
CONFIG_CAN_DEV=y
CONFIG_CAN_XILINXCAN=y
CONFIG_CAN_MCP251X=y
+CONFIG_BT=m
+CONFIG_BT_MRVL=m
+CONFIG_BT_MRVL_SDIO=m
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_RFKILL=y
@@ -200,6 +209,8 @@ CONFIG_USB_NET_SMSC95XX=y
CONFIG_BRCMFMAC=m
CONFIG_RT2X00=m
CONFIG_RT2800USB=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
@@ -208,6 +219,7 @@ CONFIG_KEYBOARD_SPEAR=y
CONFIG_KEYBOARD_ST_KEYSCAN=y
CONFIG_KEYBOARD_CROS_EC=y
CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_ELAN_I2C=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_TOUCHSCREEN_ST1232=m
@@ -308,6 +320,7 @@ CONFIG_BATTERY_SBS=y
CONFIG_CHARGER_TPS65090=y
CONFIG_POWER_RESET_AS3722=y
CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_POWER_RESET_KEYSTONE=y
CONFIG_POWER_RESET_SUN6I=y
CONFIG_POWER_RESET_RMOBILE=y
@@ -505,7 +518,6 @@ CONFIG_DW_DMAC=y
CONFIG_MV_XOR=y
CONFIG_TEGRA20_APB_DMA=y
CONFIG_SH_DMAE=y
-CONFIG_RCAR_AUDMAC_PP=m
CONFIG_RCAR_DMAC=y
CONFIG_STE_DMA40=y
CONFIG_SIRF_DMA=y
@@ -533,6 +545,8 @@ CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_ARM_TEGRA_DEVFREQ=m
CONFIG_MEMORY=y
CONFIG_TI_AEMIF=y
CONFIG_IIO=y
@@ -550,6 +564,7 @@ CONFIG_PHY_MIPHY365X=y
CONFIG_PHY_STIH41X_USB=y
CONFIG_PHY_STIH407_USB=y
CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_SUN9I_USB=y
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index 73673e95f23c..cacc9f4055a7 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -5,6 +5,7 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -12,6 +13,7 @@ CONFIG_ARCH_MVEBU=y
CONFIG_MACH_ARMADA_370=y
CONFIG_MACH_ARMADA_375=y
CONFIG_MACH_ARMADA_38X=y
+CONFIG_MACH_ARMADA_39X=y
CONFIG_MACH_ARMADA_XP=y
CONFIG_MACH_DOVE=y
CONFIG_PCI=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index c7906c2fd645..b47e7c6628c9 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -149,6 +149,7 @@ CONFIG_EXT4_FS=y
CONFIG_FSCACHE=m
CONFIG_FSCACHE_STATS=y
CONFIG_CACHEFILES=m
+CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index a7dce674f1be..0c8a78734536 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -34,7 +33,6 @@ CONFIG_ARCH_OMAP16XX=y
CONFIG_MACH_OMAP_INNOVATOR=y
CONFIG_MACH_OMAP_H2=y
CONFIG_MACH_OMAP_H3=y
-CONFIG_MACH_OMAP_HTCWIZARD=y
CONFIG_MACH_HERALD=y
CONFIG_MACH_OMAP_OSK=y
CONFIG_MACH_OMAP_PERSEUS2=y
@@ -55,7 +53,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=1f03 rootfstype=jffs2"
@@ -80,8 +77,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IPV6=y
CONFIG_NETFILTER=y
CONFIG_BT=y
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=y
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=y
@@ -92,11 +87,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
# CONFIG_PROC_EVENTS is not set
CONFIG_MTD=y
-CONFIG_MTD_DEBUG=y
-CONFIG_MTD_DEBUG_VERBOSE=3
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -113,11 +104,9 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
CONFIG_PHYLIB=y
-CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y
CONFIG_USB_CATC=y
CONFIG_USB_KAWETH=y
@@ -158,7 +147,6 @@ CONFIG_SPI_OMAP_UWIRE=y
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_OMAP_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
@@ -168,7 +156,6 @@ CONFIG_FB_OMAP_LCDC_EXTERNAL=y
CONFIG_FB_OMAP_LCDC_HWA742=y
CONFIG_FB_OMAP_MANUAL_UPDATE=y
CONFIG_FB_OMAP_LCD_MIPID=y
-CONFIG_FB_OMAP_BOOTLOADER_INIT=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -194,7 +181,6 @@ CONFIG_SND_OMAP_SOC=y
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_PHY=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
@@ -261,9 +247,7 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 8e108599e1af..3743ca221d40 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -1,3 +1,4 @@
+CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
@@ -86,17 +87,33 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
+CONFIG_PHONET=m
CONFIG_CAN=m
CONFIG_CAN_C_CAN=m
CONFIG_CAN_C_CAN_PLATFORM=m
CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_BCSP=y
CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_CFG80211=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_BT_MRVL=m
+CONFIG_BT_MRVL_SDIO=m
+CONFIG_AF_RXRPC=m
+CONFIG_RXKAD=m
CONFIG_MAC80211=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -163,6 +180,7 @@ CONFIG_USB_EPSON2888=y
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_KC2190=y
+CONFIG_USB_CDC_PHONET=m
CONFIG_LIBERTAS=m
CONFIG_LIBERTAS_USB=m
CONFIG_LIBERTAS_SDIO=m
@@ -209,6 +227,10 @@ CONFIG_I2C_CHARDEV=y
CONFIG_SPI=y
CONFIG_SPI_OMAP24XX=y
CONFIG_SPI_TI_QSPI=m
+CONFIG_HSI=m
+CONFIG_OMAP_SSI=m
+CONFIG_NOKIA_MODEM=m
+CONFIG_SSI_PROTOCOL=m
CONFIG_PINCTRL_SINGLE=y
CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
@@ -334,6 +356,7 @@ CONFIG_USB_CONFIGFS_ECM=y
CONFIG_USB_CONFIGFS_ECM_SUBSET=y
CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_PHONET=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_LB_SS=y
CONFIG_USB_CONFIGFS_F_FS=y
@@ -342,6 +365,7 @@ CONFIG_USB_CONFIGFS_F_UAC2=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_ZERO=m
+CONFIG_USB_G_NOKIA=m
CONFIG_MMC=y
CONFIG_SDIO_UART=y
CONFIG_MMC_OMAP=y
@@ -349,6 +373,7 @@ CONFIG_MMC_OMAP_HS=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=m
CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_PWM=m
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_ONESHOT=m
@@ -368,6 +393,7 @@ CONFIG_TI_EDMA=y
CONFIG_DMA_OMAP=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXTCON=m
+CONFIG_EXTCON_USB_GPIO=m
CONFIG_EXTCON_PALMAS=m
CONFIG_TI_EMIF=m
CONFIG_PWM=y
@@ -390,6 +416,7 @@ CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_SUMMARY=y
CONFIG_JFFS2_FS_XATTR=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 8c7da3319d82..d2f2babfd47a 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -97,9 +97,9 @@ CONFIG_PINCTRL_APQ8084=y
CONFIG_PINCTRL_IPQ8064=y
CONFIG_PINCTRL_MSM8960=y
CONFIG_PINCTRL_MSM8X74=y
+CONFIG_GPIOLIB=y
CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
-CONFIG_POWER_SUPPLY=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_MSM=y
CONFIG_THERMAL=y
@@ -125,7 +125,7 @@ CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_MMC=y
-CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
@@ -134,14 +134,15 @@ CONFIG_RTC_CLASS=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_BAM_DMA=y
CONFIG_STAGING=y
-CONFIG_QCOM_GSBI=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_APQ_MMCC_8084=y
-CONFIG_IPQ_GCC_806X=y
+CONFIG_IPQ_LCC_806X=y
CONFIG_MSM_GCC_8660=y
+CONFIG_MSM_LCC_8960=y
CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
CONFIG_MSM_IOMMU=y
+CONFIG_QCOM_GSBI=y
CONFIG_PHY_QCOM_APQ8064_SATA=y
CONFIG_PHY_QCOM_IPQ806X_SATA=y
CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index b17036088726..b58618e2d13c 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -12,7 +12,9 @@ CONFIG_SLAB=y
CONFIG_ARCH_SHMOBILE_MULTI=y
CONFIG_ARCH_EMEV2=y
CONFIG_ARCH_R7S72100=y
+CONFIG_ARCH_R8A73A4=y
CONFIG_ARCH_R8A7740=y
+CONFIG_ARCH_R8A7778=y
CONFIG_ARCH_R8A7779=y
CONFIG_ARCH_R8A7790=y
CONFIG_ARCH_R8A7791=y
@@ -92,7 +94,6 @@ CONFIG_INPUT_ADXL34X=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_EM=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=20
@@ -109,6 +110,9 @@ CONFIG_SPI_SH_HSPI=y
CONFIG_GPIO_EM=y
CONFIG_GPIO_RCAR=y
CONFIG_GPIO_PCF857X=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_RMOBILE=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
@@ -121,6 +125,7 @@ CONFIG_REGULATOR=y
CONFIG_REGULATOR_AS3711=y
CONFIG_REGULATOR_DA9210=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_MAX8973=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
@@ -133,6 +138,7 @@ CONFIG_V4L_MEM2MEM_DRIVERS=y
CONFIG_VIDEO_RENESAS_VSP1=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7180=y
+CONFIG_VIDEO_ML86V7667=y
CONFIG_DRM=y
CONFIG_DRM_RCAR_DU=y
CONFIG_FB_SH_MOBILE_LCDC=y
@@ -167,6 +173,7 @@ CONFIG_LEDS_GPIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_RS5C372=y
CONFIG_RTC_DRV_S35390A=y
+CONFIG_RTC_DRV_RX8581=y
CONFIG_DMADEVICES=y
CONFIG_SH_DMAE=y
CONFIG_RCAR_DMAC=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 8f6a5702b696..8ecba00dcd83 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -107,6 +107,7 @@ CONFIG_RTC_DRV_SUN6I=y
CONFIG_RTC_DRV_SUNXI=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_SUN9I_USB=y
CONFIG_EXT4_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
new file mode 100644
index 000000000000..8da2207b0072
--- /dev/null
+++ b/arch/arm/crypto/Kconfig
@@ -0,0 +1,130 @@
+
+menuconfig ARM_CRYPTO
+ bool "ARM Accelerated Cryptographic Algorithms"
+ depends on ARM
+ help
+ Say Y here to choose from a selection of cryptographic algorithms
+ implemented using ARM specific CPU features or instructions.
+
+if ARM_CRYPTO
+
+config CRYPTO_SHA1_ARM
+ tristate "SHA1 digest algorithm (ARM-asm)"
+ select CRYPTO_SHA1
+ select CRYPTO_HASH
+ help
+ SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
+ using optimized ARM assembler.
+
+config CRYPTO_SHA1_ARM_NEON
+ tristate "SHA1 digest algorithm (ARM NEON)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_SHA1_ARM
+ select CRYPTO_SHA1
+ select CRYPTO_HASH
+ help
+ SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
+ using optimized ARM NEON assembly, when NEON instructions are
+ available.
+
+config CRYPTO_SHA1_ARM_CE
+ tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_SHA1_ARM
+ select CRYPTO_HASH
+ help
+ SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
+ using special ARMv8 Crypto Extensions.
+
+config CRYPTO_SHA2_ARM_CE
+ tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_SHA256_ARM
+ select CRYPTO_HASH
+ help
+ SHA-256 secure hash standard (DFIPS 180-2) implemented
+ using special ARMv8 Crypto Extensions.
+
+config CRYPTO_SHA256_ARM
+ tristate "SHA-224/256 digest algorithm (ARM-asm and NEON)"
+ select CRYPTO_HASH
+ depends on !CPU_V7M
+ help
+ SHA-256 secure hash standard (DFIPS 180-2) implemented
+ using optimized ARM assembler and NEON, when available.
+
+config CRYPTO_SHA512_ARM_NEON
+ tristate "SHA384 and SHA512 digest algorithm (ARM NEON)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_SHA512
+ select CRYPTO_HASH
+ help
+ SHA-512 secure hash standard (DFIPS 180-2) implemented
+ using ARM NEON instructions, when available.
+
+ This version of SHA implements a 512 bit hash with 256 bits of
+ security against collision attacks.
+
+ This code also includes SHA-384, a 384 bit hash with 192 bits
+ of security against collision attacks.
+
+config CRYPTO_AES_ARM
+ tristate "AES cipher algorithms (ARM-asm)"
+ depends on ARM
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ help
+ Use optimized AES assembler routines for ARM platforms.
+
+ AES cipher algorithms (FIPS-197). AES uses the Rijndael
+ algorithm.
+
+ Rijndael appears to be consistently a very good performer in
+ both hardware and software across a wide range of computing
+ environments regardless of its use in feedback or non-feedback
+ modes. Its key setup time is excellent, and its key agility is
+ good. Rijndael's very low memory requirements make it very well
+ suited for restricted-space environments, in which it also
+ demonstrates excellent performance. Rijndael's operations are
+ among the easiest to defend against power and timing attacks.
+
+ The AES specifies three key sizes: 128, 192 and 256 bits
+
+ See <http://csrc.nist.gov/encryption/aes/> for more information.
+
+config CRYPTO_AES_ARM_BS
+ tristate "Bit sliced AES using NEON instructions"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES_ARM
+ select CRYPTO_ABLK_HELPER
+ help
+ Use a faster and more secure NEON based implementation of AES in CBC,
+ CTR and XTS modes
+
+ Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
+ and for XTS mode encryption, CBC and XTS mode decryption speedup is
+ around 25%. (CBC encryption speed is not affected by this driver.)
+ This implementation does not rely on any lookup tables so it is
+ believed to be invulnerable to cache timing attacks.
+
+config CRYPTO_AES_ARM_CE
+ tristate "Accelerated AES using ARMv8 Crypto Extensions"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_ABLK_HELPER
+ help
+ Use an implementation of AES in CBC, CTR and XTS modes that uses
+ ARMv8 Crypto Extensions
+
+config CRYPTO_GHASH_ARM_CE
+ tristate "PMULL-accelerated GHASH using ARMv8 Crypto Extensions"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_CRYPTD
+ help
+ Use an implementation of GHASH (used by the GCM AEAD chaining mode)
+ that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
+ that is part of the ARMv8 Crypto Extensions
+
+endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index b48fa341648d..6ea828241fcb 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -6,13 +6,35 @@ obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
+obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
+ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
+ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
+ce-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
+ce-obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
+
+ifneq ($(ce-obj-y)$(ce-obj-m),)
+ifeq ($(call as-instr,.fpu crypto-neon-fp-armv8,y,n),y)
+obj-y += $(ce-obj-y)
+obj-m += $(ce-obj-m)
+else
+$(warning These ARMv8 Crypto Extensions modules need binutils 2.23 or higher)
+$(warning $(ce-obj-y) $(ce-obj-m))
+endif
+endif
+
aes-arm-y := aes-armv4.o aes_glue.o
aes-arm-bs-y := aesbs-core.o aesbs-glue.o
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
+sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
+sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
+sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
+sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
+aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
+ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
@@ -20,4 +42,7 @@ quiet_cmd_perl = PERL $@
$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
$(call cmd,perl)
-.PRECIOUS: $(obj)/aesbs-core.S
+$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
+ $(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S $(obj)/sha256-core.S
diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S
new file mode 100644
index 000000000000..8cfa468ee570
--- /dev/null
+++ b/arch/arm/crypto/aes-ce-core.S
@@ -0,0 +1,518 @@
+/*
+ * aes-ce-core.S - AES in CBC/CTR/XTS mode using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .fpu crypto-neon-fp-armv8
+ .align 3
+
+ .macro enc_round, state, key
+ aese.8 \state, \key
+ aesmc.8 \state, \state
+ .endm
+
+ .macro dec_round, state, key
+ aesd.8 \state, \key
+ aesimc.8 \state, \state
+ .endm
+
+ .macro enc_dround, key1, key2
+ enc_round q0, \key1
+ enc_round q0, \key2
+ .endm
+
+ .macro dec_dround, key1, key2
+ dec_round q0, \key1
+ dec_round q0, \key2
+ .endm
+
+ .macro enc_fround, key1, key2, key3
+ enc_round q0, \key1
+ aese.8 q0, \key2
+ veor q0, q0, \key3
+ .endm
+
+ .macro dec_fround, key1, key2, key3
+ dec_round q0, \key1
+ aesd.8 q0, \key2
+ veor q0, q0, \key3
+ .endm
+
+ .macro enc_dround_3x, key1, key2
+ enc_round q0, \key1
+ enc_round q1, \key1
+ enc_round q2, \key1
+ enc_round q0, \key2
+ enc_round q1, \key2
+ enc_round q2, \key2
+ .endm
+
+ .macro dec_dround_3x, key1, key2
+ dec_round q0, \key1
+ dec_round q1, \key1
+ dec_round q2, \key1
+ dec_round q0, \key2
+ dec_round q1, \key2
+ dec_round q2, \key2
+ .endm
+
+ .macro enc_fround_3x, key1, key2, key3
+ enc_round q0, \key1
+ enc_round q1, \key1
+ enc_round q2, \key1
+ aese.8 q0, \key2
+ aese.8 q1, \key2
+ aese.8 q2, \key2
+ veor q0, q0, \key3
+ veor q1, q1, \key3
+ veor q2, q2, \key3
+ .endm
+
+ .macro dec_fround_3x, key1, key2, key3
+ dec_round q0, \key1
+ dec_round q1, \key1
+ dec_round q2, \key1
+ aesd.8 q0, \key2
+ aesd.8 q1, \key2
+ aesd.8 q2, \key2
+ veor q0, q0, \key3
+ veor q1, q1, \key3
+ veor q2, q2, \key3
+ .endm
+
+ .macro do_block, dround, fround
+ cmp r3, #12 @ which key size?
+ vld1.8 {q10-q11}, [ip]!
+ \dround q8, q9
+ vld1.8 {q12-q13}, [ip]!
+ \dround q10, q11
+ vld1.8 {q10-q11}, [ip]!
+ \dround q12, q13
+ vld1.8 {q12-q13}, [ip]!
+ \dround q10, q11
+ blo 0f @ AES-128: 10 rounds
+ vld1.8 {q10-q11}, [ip]!
+ beq 1f @ AES-192: 12 rounds
+ \dround q12, q13
+ vld1.8 {q12-q13}, [ip]
+ \dround q10, q11
+0: \fround q12, q13, q14
+ bx lr
+
+1: \dround q12, q13
+ \fround q10, q11, q14
+ bx lr
+ .endm
+
+ /*
+ * Internal, non-AAPCS compliant functions that implement the core AES
+ * transforms. These should preserve all registers except q0 - q2 and ip
+ * Arguments:
+ * q0 : first in/output block
+ * q1 : second in/output block (_3x version only)
+ * q2 : third in/output block (_3x version only)
+ * q8 : first round key
+ * q9 : secound round key
+ * ip : address of 3rd round key
+ * q14 : final round key
+ * r3 : number of rounds
+ */
+ .align 6
+aes_encrypt:
+ add ip, r2, #32 @ 3rd round key
+.Laes_encrypt_tweak:
+ do_block enc_dround, enc_fround
+ENDPROC(aes_encrypt)
+
+ .align 6
+aes_decrypt:
+ add ip, r2, #32 @ 3rd round key
+ do_block dec_dround, dec_fround
+ENDPROC(aes_decrypt)
+
+ .align 6
+aes_encrypt_3x:
+ add ip, r2, #32 @ 3rd round key
+ do_block enc_dround_3x, enc_fround_3x
+ENDPROC(aes_encrypt_3x)
+
+ .align 6
+aes_decrypt_3x:
+ add ip, r2, #32 @ 3rd round key
+ do_block dec_dround_3x, dec_fround_3x
+ENDPROC(aes_decrypt_3x)
+
+ .macro prepare_key, rk, rounds
+ add ip, \rk, \rounds, lsl #4
+ vld1.8 {q8-q9}, [\rk] @ load first 2 round keys
+ vld1.8 {q14}, [ip] @ load last round key
+ .endm
+
+ /*
+ * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks)
+ * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks)
+ */
+ENTRY(ce_aes_ecb_encrypt)
+ push {r4, lr}
+ ldr r4, [sp, #8]
+ prepare_key r2, r3
+.Lecbencloop3x:
+ subs r4, r4, #3
+ bmi .Lecbenc1x
+ vld1.8 {q0-q1}, [r1, :64]!
+ vld1.8 {q2}, [r1, :64]!
+ bl aes_encrypt_3x
+ vst1.8 {q0-q1}, [r0, :64]!
+ vst1.8 {q2}, [r0, :64]!
+ b .Lecbencloop3x
+.Lecbenc1x:
+ adds r4, r4, #3
+ beq .Lecbencout
+.Lecbencloop:
+ vld1.8 {q0}, [r1, :64]!
+ bl aes_encrypt
+ vst1.8 {q0}, [r0, :64]!
+ subs r4, r4, #1
+ bne .Lecbencloop
+.Lecbencout:
+ pop {r4, pc}
+ENDPROC(ce_aes_ecb_encrypt)
+
+ENTRY(ce_aes_ecb_decrypt)
+ push {r4, lr}
+ ldr r4, [sp, #8]
+ prepare_key r2, r3
+.Lecbdecloop3x:
+ subs r4, r4, #3
+ bmi .Lecbdec1x
+ vld1.8 {q0-q1}, [r1, :64]!
+ vld1.8 {q2}, [r1, :64]!
+ bl aes_decrypt_3x
+ vst1.8 {q0-q1}, [r0, :64]!
+ vst1.8 {q2}, [r0, :64]!
+ b .Lecbdecloop3x
+.Lecbdec1x:
+ adds r4, r4, #3
+ beq .Lecbdecout
+.Lecbdecloop:
+ vld1.8 {q0}, [r1, :64]!
+ bl aes_decrypt
+ vst1.8 {q0}, [r0, :64]!
+ subs r4, r4, #1
+ bne .Lecbdecloop
+.Lecbdecout:
+ pop {r4, pc}
+ENDPROC(ce_aes_ecb_decrypt)
+
+ /*
+ * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, u8 iv[])
+ * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, u8 iv[])
+ */
+ENTRY(ce_aes_cbc_encrypt)
+ push {r4-r6, lr}
+ ldrd r4, r5, [sp, #16]
+ vld1.8 {q0}, [r5]
+ prepare_key r2, r3
+.Lcbcencloop:
+ vld1.8 {q1}, [r1, :64]! @ get next pt block
+ veor q0, q0, q1 @ ..and xor with iv
+ bl aes_encrypt
+ vst1.8 {q0}, [r0, :64]!
+ subs r4, r4, #1
+ bne .Lcbcencloop
+ vst1.8 {q0}, [r5]
+ pop {r4-r6, pc}
+ENDPROC(ce_aes_cbc_encrypt)
+
+ENTRY(ce_aes_cbc_decrypt)
+ push {r4-r6, lr}
+ ldrd r4, r5, [sp, #16]
+ vld1.8 {q6}, [r5] @ keep iv in q6
+ prepare_key r2, r3
+.Lcbcdecloop3x:
+ subs r4, r4, #3
+ bmi .Lcbcdec1x
+ vld1.8 {q0-q1}, [r1, :64]!
+ vld1.8 {q2}, [r1, :64]!
+ vmov q3, q0
+ vmov q4, q1
+ vmov q5, q2
+ bl aes_decrypt_3x
+ veor q0, q0, q6
+ veor q1, q1, q3
+ veor q2, q2, q4
+ vmov q6, q5
+ vst1.8 {q0-q1}, [r0, :64]!
+ vst1.8 {q2}, [r0, :64]!
+ b .Lcbcdecloop3x
+.Lcbcdec1x:
+ adds r4, r4, #3
+ beq .Lcbcdecout
+ vmov q15, q14 @ preserve last round key
+.Lcbcdecloop:
+ vld1.8 {q0}, [r1, :64]! @ get next ct block
+ veor q14, q15, q6 @ combine prev ct with last key
+ vmov q6, q0
+ bl aes_decrypt
+ vst1.8 {q0}, [r0, :64]!
+ subs r4, r4, #1
+ bne .Lcbcdecloop
+.Lcbcdecout:
+ vst1.8 {q6}, [r5] @ keep iv in q6
+ pop {r4-r6, pc}
+ENDPROC(ce_aes_cbc_decrypt)
+
+ /*
+ * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, u8 ctr[])
+ */
+ENTRY(ce_aes_ctr_encrypt)
+ push {r4-r6, lr}
+ ldrd r4, r5, [sp, #16]
+ vld1.8 {q6}, [r5] @ load ctr
+ prepare_key r2, r3
+ vmov r6, s27 @ keep swabbed ctr in r6
+ rev r6, r6
+ cmn r6, r4 @ 32 bit overflow?
+ bcs .Lctrloop
+.Lctrloop3x:
+ subs r4, r4, #3
+ bmi .Lctr1x
+ add r6, r6, #1
+ vmov q0, q6
+ vmov q1, q6
+ rev ip, r6
+ add r6, r6, #1
+ vmov q2, q6
+ vmov s7, ip
+ rev ip, r6
+ add r6, r6, #1
+ vmov s11, ip
+ vld1.8 {q3-q4}, [r1, :64]!
+ vld1.8 {q5}, [r1, :64]!
+ bl aes_encrypt_3x
+ veor q0, q0, q3
+ veor q1, q1, q4
+ veor q2, q2, q5
+ rev ip, r6
+ vst1.8 {q0-q1}, [r0, :64]!
+ vst1.8 {q2}, [r0, :64]!
+ vmov s27, ip
+ b .Lctrloop3x
+.Lctr1x:
+ adds r4, r4, #3
+ beq .Lctrout
+.Lctrloop:
+ vmov q0, q6
+ bl aes_encrypt
+ subs r4, r4, #1
+ bmi .Lctrhalfblock @ blocks < 0 means 1/2 block
+ vld1.8 {q3}, [r1, :64]!
+ veor q3, q0, q3
+ vst1.8 {q3}, [r0, :64]!
+
+ adds r6, r6, #1 @ increment BE ctr
+ rev ip, r6
+ vmov s27, ip
+ bcs .Lctrcarry
+ teq r4, #0
+ bne .Lctrloop
+.Lctrout:
+ vst1.8 {q6}, [r5]
+ pop {r4-r6, pc}
+
+.Lctrhalfblock:
+ vld1.8 {d1}, [r1, :64]
+ veor d0, d0, d1
+ vst1.8 {d0}, [r0, :64]
+ pop {r4-r6, pc}
+
+.Lctrcarry:
+ .irp sreg, s26, s25, s24
+ vmov ip, \sreg @ load next word of ctr
+ rev ip, ip @ ... to handle the carry
+ adds ip, ip, #1
+ rev ip, ip
+ vmov \sreg, ip
+ bcc 0f
+ .endr
+0: teq r4, #0
+ beq .Lctrout
+ b .Lctrloop
+ENDPROC(ce_aes_ctr_encrypt)
+
+ /*
+ * aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
+ * int blocks, u8 iv[], u8 const rk2[], int first)
+ * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
+ * int blocks, u8 iv[], u8 const rk2[], int first)
+ */
+
+ .macro next_tweak, out, in, const, tmp
+ vshr.s64 \tmp, \in, #63
+ vand \tmp, \tmp, \const
+ vadd.u64 \out, \in, \in
+ vext.8 \tmp, \tmp, \tmp, #8
+ veor \out, \out, \tmp
+ .endm
+
+ .align 3
+.Lxts_mul_x:
+ .quad 1, 0x87
+
+ce_aes_xts_init:
+ vldr d14, .Lxts_mul_x
+ vldr d15, .Lxts_mul_x + 8
+
+ ldrd r4, r5, [sp, #16] @ load args
+ ldr r6, [sp, #28]
+ vld1.8 {q0}, [r5] @ load iv
+ teq r6, #1 @ start of a block?
+ bxne lr
+
+ @ Encrypt the IV in q0 with the second AES key. This should only
+ @ be done at the start of a block.
+ ldr r6, [sp, #24] @ load AES key 2
+ prepare_key r6, r3
+ add ip, r6, #32 @ 3rd round key of key 2
+ b .Laes_encrypt_tweak @ tail call
+ENDPROC(ce_aes_xts_init)
+
+ENTRY(ce_aes_xts_encrypt)
+ push {r4-r6, lr}
+
+ bl ce_aes_xts_init @ run shared prologue
+ prepare_key r2, r3
+ vmov q3, q0
+
+ teq r6, #0 @ start of a block?
+ bne .Lxtsenc3x
+
+.Lxtsencloop3x:
+ next_tweak q3, q3, q7, q6
+.Lxtsenc3x:
+ subs r4, r4, #3
+ bmi .Lxtsenc1x
+ vld1.8 {q0-q1}, [r1, :64]! @ get 3 pt blocks
+ vld1.8 {q2}, [r1, :64]!
+ next_tweak q4, q3, q7, q6
+ veor q0, q0, q3
+ next_tweak q5, q4, q7, q6
+ veor q1, q1, q4
+ veor q2, q2, q5
+ bl aes_encrypt_3x
+ veor q0, q0, q3
+ veor q1, q1, q4
+ veor q2, q2, q5
+ vst1.8 {q0-q1}, [r0, :64]! @ write 3 ct blocks
+ vst1.8 {q2}, [r0, :64]!
+ vmov q3, q5
+ teq r4, #0
+ beq .Lxtsencout
+ b .Lxtsencloop3x
+.Lxtsenc1x:
+ adds r4, r4, #3
+ beq .Lxtsencout
+.Lxtsencloop:
+ vld1.8 {q0}, [r1, :64]!
+ veor q0, q0, q3
+ bl aes_encrypt
+ veor q0, q0, q3
+ vst1.8 {q0}, [r0, :64]!
+ subs r4, r4, #1
+ beq .Lxtsencout
+ next_tweak q3, q3, q7, q6
+ b .Lxtsencloop
+.Lxtsencout:
+ vst1.8 {q3}, [r5]
+ pop {r4-r6, pc}
+ENDPROC(ce_aes_xts_encrypt)
+
+
+ENTRY(ce_aes_xts_decrypt)
+ push {r4-r6, lr}
+
+ bl ce_aes_xts_init @ run shared prologue
+ prepare_key r2, r3
+ vmov q3, q0
+
+ teq r6, #0 @ start of a block?
+ bne .Lxtsdec3x
+
+.Lxtsdecloop3x:
+ next_tweak q3, q3, q7, q6
+.Lxtsdec3x:
+ subs r4, r4, #3
+ bmi .Lxtsdec1x
+ vld1.8 {q0-q1}, [r1, :64]! @ get 3 ct blocks
+ vld1.8 {q2}, [r1, :64]!
+ next_tweak q4, q3, q7, q6
+ veor q0, q0, q3
+ next_tweak q5, q4, q7, q6
+ veor q1, q1, q4
+ veor q2, q2, q5
+ bl aes_decrypt_3x
+ veor q0, q0, q3
+ veor q1, q1, q4
+ veor q2, q2, q5
+ vst1.8 {q0-q1}, [r0, :64]! @ write 3 pt blocks
+ vst1.8 {q2}, [r0, :64]!
+ vmov q3, q5
+ teq r4, #0
+ beq .Lxtsdecout
+ b .Lxtsdecloop3x
+.Lxtsdec1x:
+ adds r4, r4, #3
+ beq .Lxtsdecout
+.Lxtsdecloop:
+ vld1.8 {q0}, [r1, :64]!
+ veor q0, q0, q3
+ add ip, r2, #32 @ 3rd round key
+ bl aes_decrypt
+ veor q0, q0, q3
+ vst1.8 {q0}, [r0, :64]!
+ subs r4, r4, #1
+ beq .Lxtsdecout
+ next_tweak q3, q3, q7, q6
+ b .Lxtsdecloop
+.Lxtsdecout:
+ vst1.8 {q3}, [r5]
+ pop {r4-r6, pc}
+ENDPROC(ce_aes_xts_decrypt)
+
+ /*
+ * u32 ce_aes_sub(u32 input) - use the aese instruction to perform the
+ * AES sbox substitution on each byte in
+ * 'input'
+ */
+ENTRY(ce_aes_sub)
+ vdup.32 q1, r0
+ veor q0, q0, q0
+ aese.8 q0, q1
+ vmov r0, s0
+ bx lr
+ENDPROC(ce_aes_sub)
+
+ /*
+ * void ce_aes_invert(u8 *dst, u8 *src) - perform the Inverse MixColumns
+ * operation on round key *src
+ */
+ENTRY(ce_aes_invert)
+ vld1.8 {q0}, [r1]
+ aesimc.8 q0, q0
+ vst1.8 {q0}, [r0]
+ bx lr
+ENDPROC(ce_aes_invert)
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
new file mode 100644
index 000000000000..b445a5d56f43
--- /dev/null
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -0,0 +1,524 @@
+/*
+ * aes-ce-glue.c - wrapper code for ARMv8 AES
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/hwcap.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+/* defined in aes-ce-core.S */
+asmlinkage u32 ce_aes_sub(u32 input);
+asmlinkage void ce_aes_invert(void *dst, void *src);
+
+asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks);
+asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks);
+
+asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, u8 iv[]);
+asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, u8 iv[]);
+
+asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, u8 ctr[]);
+
+asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
+ int rounds, int blocks, u8 iv[],
+ u8 const rk2[], int first);
+asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
+ int rounds, int blocks, u8 iv[],
+ u8 const rk2[], int first);
+
+struct aes_block {
+ u8 b[AES_BLOCK_SIZE];
+};
+
+static int num_rounds(struct crypto_aes_ctx *ctx)
+{
+ /*
+ * # of rounds specified by AES:
+ * 128 bit key 10 rounds
+ * 192 bit key 12 rounds
+ * 256 bit key 14 rounds
+ * => n byte key => 6 + (n/4) rounds
+ */
+ return 6 + ctx->key_length / 4;
+}
+
+static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len)
+{
+ /*
+ * The AES key schedule round constants
+ */
+ static u8 const rcon[] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
+ };
+
+ u32 kwords = key_len / sizeof(u32);
+ struct aes_block *key_enc, *key_dec;
+ int i, j;
+
+ if (key_len != AES_KEYSIZE_128 &&
+ key_len != AES_KEYSIZE_192 &&
+ key_len != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key_enc, in_key, key_len);
+ ctx->key_length = key_len;
+
+ kernel_neon_begin();
+ for (i = 0; i < sizeof(rcon); i++) {
+ u32 *rki = ctx->key_enc + (i * kwords);
+ u32 *rko = rki + kwords;
+
+ rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
+ rko[0] = rko[0] ^ rki[0] ^ rcon[i];
+ rko[1] = rko[0] ^ rki[1];
+ rko[2] = rko[1] ^ rki[2];
+ rko[3] = rko[2] ^ rki[3];
+
+ if (key_len == AES_KEYSIZE_192) {
+ if (i >= 7)
+ break;
+ rko[4] = rko[3] ^ rki[4];
+ rko[5] = rko[4] ^ rki[5];
+ } else if (key_len == AES_KEYSIZE_256) {
+ if (i >= 6)
+ break;
+ rko[4] = ce_aes_sub(rko[3]) ^ rki[4];
+ rko[5] = rko[4] ^ rki[5];
+ rko[6] = rko[5] ^ rki[6];
+ rko[7] = rko[6] ^ rki[7];
+ }
+ }
+
+ /*
+ * Generate the decryption keys for the Equivalent Inverse Cipher.
+ * This involves reversing the order of the round keys, and applying
+ * the Inverse Mix Columns transformation on all but the first and
+ * the last one.
+ */
+ key_enc = (struct aes_block *)ctx->key_enc;
+ key_dec = (struct aes_block *)ctx->key_dec;
+ j = num_rounds(ctx);
+
+ key_dec[0] = key_enc[j];
+ for (i = 1, j--; j > 0; i++, j--)
+ ce_aes_invert(key_dec + i, key_enc + j);
+ key_dec[i] = key_enc[0];
+
+ kernel_neon_end();
+ return 0;
+}
+
+static int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = ce_aes_expandkey(ctx, in_key, key_len);
+ if (!ret)
+ return 0;
+
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+
+struct crypto_aes_xts_ctx {
+ struct crypto_aes_ctx key1;
+ struct crypto_aes_ctx __aligned(8) key2;
+};
+
+static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = ce_aes_expandkey(&ctx->key1, in_key, key_len / 2);
+ if (!ret)
+ ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2],
+ key_len / 2);
+ if (!ret)
+ return 0;
+
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+ int err;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, num_rounds(ctx), blocks);
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+ int err;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_dec, num_rounds(ctx), blocks);
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+ int err;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, num_rounds(ctx), blocks,
+ walk.iv);
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+ int err;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_dec, num_rounds(ctx), blocks,
+ walk.iv);
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err, blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+
+ kernel_neon_begin();
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, num_rounds(ctx), blocks,
+ walk.iv);
+ nbytes -= blocks * AES_BLOCK_SIZE;
+ if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
+ break;
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (nbytes) {
+ u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 __aligned(8) tail[AES_BLOCK_SIZE];
+
+ /*
+ * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
+ * to tell aes_ctr_encrypt() to only read half a block.
+ */
+ blocks = (nbytes <= 8) ? -1 : 1;
+
+ ce_aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc,
+ num_rounds(ctx), blocks, walk.iv);
+ memcpy(tdst, tail, nbytes);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_neon_end();
+
+ return err;
+}
+
+static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = num_rounds(&ctx->key1);
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key1.key_enc, rounds, blocks,
+ walk.iv, (u8 *)ctx->key2.key_enc, first);
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+
+ return err;
+}
+
+static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = num_rounds(&ctx->key1);
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key1.key_dec, rounds, blocks,
+ walk.iv, (u8 *)ctx->key2.key_enc, first);
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+
+ return err;
+}
+
+static struct crypto_alg aes_algs[] = { {
+ .cra_name = "__ecb-aes-ce",
+ .cra_driver_name = "__driver-ecb-aes-ce",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ce_aes_setkey,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ },
+}, {
+ .cra_name = "__cbc-aes-ce",
+ .cra_driver_name = "__driver-cbc-aes-ce",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ce_aes_setkey,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ },
+}, {
+ .cra_name = "__ctr-aes-ce",
+ .cra_driver_name = "__driver-ctr-aes-ce",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ce_aes_setkey,
+ .encrypt = ctr_encrypt,
+ .decrypt = ctr_encrypt,
+ },
+}, {
+ .cra_name = "__xts-aes-ce",
+ .cra_driver_name = "__driver-xts-aes-ce",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_set_key,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
+ },
+}, {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+} };
+
+static int __init aes_init(void)
+{
+ if (!(elf_hwcap2 & HWCAP2_AES))
+ return -ENODEV;
+ return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+static void __exit aes_exit(void)
+{
+ crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+module_init(aes_init);
+module_exit(aes_exit);
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 15468fbbdea3..6d685298690e 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -301,7 +301,8 @@ static struct crypto_alg aesbs_algs[] = { {
.cra_name = "__cbc-aes-neonbs",
.cra_driver_name = "__driver-cbc-aes-neonbs",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.cra_alignmask = 7,
@@ -319,7 +320,8 @@ static struct crypto_alg aesbs_algs[] = { {
.cra_name = "__ctr-aes-neonbs",
.cra_driver_name = "__driver-ctr-aes-neonbs",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
.cra_alignmask = 7,
@@ -337,7 +339,8 @@ static struct crypto_alg aesbs_algs[] = { {
.cra_name = "__xts-aes-neonbs",
.cra_driver_name = "__driver-xts-aes-neonbs",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
.cra_alignmask = 7,
diff --git a/arch/arm/crypto/ghash-ce-core.S b/arch/arm/crypto/ghash-ce-core.S
new file mode 100644
index 000000000000..f6ab8bcc9efe
--- /dev/null
+++ b/arch/arm/crypto/ghash-ce-core.S
@@ -0,0 +1,94 @@
+/*
+ * Accelerated GHASH implementation with ARMv8 vmull.p64 instructions.
+ *
+ * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ SHASH .req q0
+ SHASH2 .req q1
+ T1 .req q2
+ T2 .req q3
+ MASK .req q4
+ XL .req q5
+ XM .req q6
+ XH .req q7
+ IN1 .req q7
+
+ SHASH_L .req d0
+ SHASH_H .req d1
+ SHASH2_L .req d2
+ T1_L .req d4
+ MASK_L .req d8
+ XL_L .req d10
+ XL_H .req d11
+ XM_L .req d12
+ XM_H .req d13
+ XH_L .req d14
+
+ .text
+ .fpu crypto-neon-fp-armv8
+
+ /*
+ * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+ * struct ghash_key const *k, const char *head)
+ */
+ENTRY(pmull_ghash_update)
+ vld1.64 {SHASH}, [r3]
+ vld1.64 {XL}, [r1]
+ vmov.i8 MASK, #0xe1
+ vext.8 SHASH2, SHASH, SHASH, #8
+ vshl.u64 MASK, MASK, #57
+ veor SHASH2, SHASH2, SHASH
+
+ /* do the head block first, if supplied */
+ ldr ip, [sp]
+ teq ip, #0
+ beq 0f
+ vld1.64 {T1}, [ip]
+ teq r0, #0
+ b 1f
+
+0: vld1.64 {T1}, [r2]!
+ subs r0, r0, #1
+
+1: /* multiply XL by SHASH in GF(2^128) */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ vrev64.8 T1, T1
+#endif
+ vext.8 T2, XL, XL, #8
+ vext.8 IN1, T1, T1, #8
+ veor T1, T1, T2
+ veor XL, XL, IN1
+
+ vmull.p64 XH, SHASH_H, XL_H @ a1 * b1
+ veor T1, T1, XL
+ vmull.p64 XL, SHASH_L, XL_L @ a0 * b0
+ vmull.p64 XM, SHASH2_L, T1_L @ (a1 + a0)(b1 + b0)
+
+ vext.8 T1, XL, XH, #8
+ veor T2, XL, XH
+ veor XM, XM, T1
+ veor XM, XM, T2
+ vmull.p64 T2, XL_L, MASK_L
+
+ vmov XH_L, XM_H
+ vmov XM_H, XL_L
+
+ veor XL, XM, T2
+ vext.8 T2, XL, XL, #8
+ vmull.p64 XL, XL_L, MASK_L
+ veor T2, T2, XH
+ veor XL, XL, T2
+
+ bne 0b
+
+ vst1.64 {XL}, [r1]
+ bx lr
+ENDPROC(pmull_ghash_update)
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
new file mode 100644
index 000000000000..03a39fe29246
--- /dev/null
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -0,0 +1,320 @@
+/*
+ * Accelerated GHASH implementation with ARMv8 vmull.p64 instructions.
+ *
+ * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/cryptd.h>
+#include <crypto/internal/hash.h>
+#include <crypto/gf128mul.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+struct ghash_key {
+ u64 a;
+ u64 b;
+};
+
+struct ghash_desc_ctx {
+ u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
+ u8 buf[GHASH_BLOCK_SIZE];
+ u32 count;
+};
+
+struct ghash_async_ctx {
+ struct cryptd_ahash *cryptd_tfm;
+};
+
+asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+ struct ghash_key const *k, const char *head);
+
+static int ghash_init(struct shash_desc *desc)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ *ctx = (struct ghash_desc_ctx){};
+ return 0;
+}
+
+static int ghash_update(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+
+ ctx->count += len;
+
+ if ((partial + len) >= GHASH_BLOCK_SIZE) {
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ int blocks;
+
+ if (partial) {
+ int p = GHASH_BLOCK_SIZE - partial;
+
+ memcpy(ctx->buf + partial, src, p);
+ src += p;
+ len -= p;
+ }
+
+ blocks = len / GHASH_BLOCK_SIZE;
+ len %= GHASH_BLOCK_SIZE;
+
+ kernel_neon_begin();
+ pmull_ghash_update(blocks, ctx->digest, src, key,
+ partial ? ctx->buf : NULL);
+ kernel_neon_end();
+ src += blocks * GHASH_BLOCK_SIZE;
+ partial = 0;
+ }
+ if (len)
+ memcpy(ctx->buf + partial, src, len);
+ return 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+
+ if (partial) {
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+
+ memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
+ kernel_neon_begin();
+ pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
+ kernel_neon_end();
+ }
+ put_unaligned_be64(ctx->digest[1], dst);
+ put_unaligned_be64(ctx->digest[0], dst + 8);
+
+ *ctx = (struct ghash_desc_ctx){};
+ return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *inkey, unsigned int keylen)
+{
+ struct ghash_key *key = crypto_shash_ctx(tfm);
+ u64 a, b;
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* perform multiplication by 'x' in GF(2^128) */
+ b = get_unaligned_be64(inkey);
+ a = get_unaligned_be64(inkey + 8);
+
+ key->a = (a << 1) | (b >> 63);
+ key->b = (b << 1) | (a >> 63);
+
+ if (b >> 63)
+ key->b ^= 0xc200000000000000UL;
+
+ return 0;
+}
+
+static struct shash_alg ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "__driver-ghash-ce",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ghash_key),
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static int ghash_async_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+
+ if (!may_use_simd()) {
+ memcpy(cryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+ return crypto_ahash_init(cryptd_req);
+ } else {
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
+
+ desc->tfm = child;
+ desc->flags = req->base.flags;
+ return crypto_shash_init(desc);
+ }
+}
+
+static int ghash_async_update(struct ahash_request *req)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+
+ if (!may_use_simd()) {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+
+ memcpy(cryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+ return crypto_ahash_update(cryptd_req);
+ } else {
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ return shash_ahash_update(req, desc);
+ }
+}
+
+static int ghash_async_final(struct ahash_request *req)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+
+ if (!may_use_simd()) {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+
+ memcpy(cryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+ return crypto_ahash_final(cryptd_req);
+ } else {
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ return crypto_shash_final(desc, req->result);
+ }
+}
+
+static int ghash_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+
+ if (!may_use_simd()) {
+ memcpy(cryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+ return crypto_ahash_digest(cryptd_req);
+ } else {
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
+
+ desc->tfm = child;
+ desc->flags = req->base.flags;
+ return shash_ahash_digest(req, desc);
+ }
+}
+
+static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_ahash *child = &ctx->cryptd_tfm->base;
+ int err;
+
+ crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
+ & CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(child, key, keylen);
+ crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
+ & CRYPTO_TFM_RES_MASK);
+
+ return err;
+}
+
+static int ghash_async_init_tfm(struct crypto_tfm *tfm)
+{
+ struct cryptd_ahash *cryptd_tfm;
+ struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ cryptd_tfm = cryptd_alloc_ahash("__driver-ghash-ce",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+ ctx->cryptd_tfm = cryptd_tfm;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(&cryptd_tfm->base));
+
+ return 0;
+}
+
+static void ghash_async_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ cryptd_free_ahash(ctx->cryptd_tfm);
+}
+
+static struct ahash_alg ghash_async_alg = {
+ .init = ghash_async_init,
+ .update = ghash_async_update,
+ .final = ghash_async_final,
+ .setkey = ghash_async_setkey,
+ .digest = ghash_async_digest,
+ .halg.digestsize = GHASH_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_type = &crypto_ahash_type,
+ .cra_ctxsize = sizeof(struct ghash_async_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = ghash_async_init_tfm,
+ .cra_exit = ghash_async_exit_tfm,
+ },
+};
+
+static int __init ghash_ce_mod_init(void)
+{
+ int err;
+
+ if (!(elf_hwcap2 & HWCAP2_PMULL))
+ return -ENODEV;
+
+ err = crypto_register_shash(&ghash_alg);
+ if (err)
+ return err;
+ err = crypto_register_ahash(&ghash_async_alg);
+ if (err)
+ goto err_shash;
+
+ return 0;
+
+err_shash:
+ crypto_unregister_shash(&ghash_alg);
+ return err;
+}
+
+static void __exit ghash_ce_mod_exit(void)
+{
+ crypto_unregister_ahash(&ghash_async_alg);
+ crypto_unregister_shash(&ghash_alg);
+}
+
+module_init(ghash_ce_mod_init);
+module_exit(ghash_ce_mod_exit);
diff --git a/arch/arm/crypto/sha1-ce-core.S b/arch/arm/crypto/sha1-ce-core.S
new file mode 100644
index 000000000000..b623f51ccbcf
--- /dev/null
+++ b/arch/arm/crypto/sha1-ce-core.S
@@ -0,0 +1,125 @@
+/*
+ * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .fpu crypto-neon-fp-armv8
+
+ k0 .req q0
+ k1 .req q1
+ k2 .req q2
+ k3 .req q3
+
+ ta0 .req q4
+ ta1 .req q5
+ tb0 .req q5
+ tb1 .req q4
+
+ dga .req q6
+ dgb .req q7
+ dgbs .req s28
+
+ dg0 .req q12
+ dg1a0 .req q13
+ dg1a1 .req q14
+ dg1b0 .req q14
+ dg1b1 .req q13
+
+ .macro add_only, op, ev, rc, s0, dg1
+ .ifnb \s0
+ vadd.u32 tb\ev, q\s0, \rc
+ .endif
+ sha1h.32 dg1b\ev, dg0
+ .ifb \dg1
+ sha1\op\().32 dg0, dg1a\ev, ta\ev
+ .else
+ sha1\op\().32 dg0, \dg1, ta\ev
+ .endif
+ .endm
+
+ .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1
+ sha1su0.32 q\s0, q\s1, q\s2
+ add_only \op, \ev, \rc, \s1, \dg1
+ sha1su1.32 q\s0, q\s3
+ .endm
+
+ .align 6
+.Lsha1_rcon:
+ .word 0x5a827999, 0x5a827999, 0x5a827999, 0x5a827999
+ .word 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1
+ .word 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc
+ .word 0xca62c1d6, 0xca62c1d6, 0xca62c1d6, 0xca62c1d6
+
+ /*
+ * void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+ * int blocks);
+ */
+ENTRY(sha1_ce_transform)
+ /* load round constants */
+ adr ip, .Lsha1_rcon
+ vld1.32 {k0-k1}, [ip, :128]!
+ vld1.32 {k2-k3}, [ip, :128]
+
+ /* load state */
+ vld1.32 {dga}, [r0]
+ vldr dgbs, [r0, #16]
+
+ /* load input */
+0: vld1.32 {q8-q9}, [r1]!
+ vld1.32 {q10-q11}, [r1]!
+ subs r2, r2, #1
+
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ vrev32.8 q8, q8
+ vrev32.8 q9, q9
+ vrev32.8 q10, q10
+ vrev32.8 q11, q11
+#endif
+
+ vadd.u32 ta0, q8, k0
+ vmov dg0, dga
+
+ add_update c, 0, k0, 8, 9, 10, 11, dgb
+ add_update c, 1, k0, 9, 10, 11, 8
+ add_update c, 0, k0, 10, 11, 8, 9
+ add_update c, 1, k0, 11, 8, 9, 10
+ add_update c, 0, k1, 8, 9, 10, 11
+
+ add_update p, 1, k1, 9, 10, 11, 8
+ add_update p, 0, k1, 10, 11, 8, 9
+ add_update p, 1, k1, 11, 8, 9, 10
+ add_update p, 0, k1, 8, 9, 10, 11
+ add_update p, 1, k2, 9, 10, 11, 8
+
+ add_update m, 0, k2, 10, 11, 8, 9
+ add_update m, 1, k2, 11, 8, 9, 10
+ add_update m, 0, k2, 8, 9, 10, 11
+ add_update m, 1, k2, 9, 10, 11, 8
+ add_update m, 0, k3, 10, 11, 8, 9
+
+ add_update p, 1, k3, 11, 8, 9, 10
+ add_only p, 0, k3, 9
+ add_only p, 1, k3, 10
+ add_only p, 0, k3, 11
+ add_only p, 1
+
+ /* update state */
+ vadd.u32 dga, dga, dg0
+ vadd.u32 dgb, dgb, dg1a0
+ bne 0b
+
+ /* store new state */
+ vst1.32 {dga}, [r0]
+ vstr dgbs, [r0, #16]
+ bx lr
+ENDPROC(sha1_ce_transform)
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
new file mode 100644
index 000000000000..80bc2fcd241a
--- /dev/null
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -0,0 +1,96 @@
+/*
+ * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/sha1_base.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+#include "sha1.h"
+
+MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+ int blocks);
+
+static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ if (!may_use_simd() ||
+ (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
+ return sha1_update_arm(desc, data, len);
+
+ kernel_neon_begin();
+ sha1_base_do_update(desc, data, len, sha1_ce_transform);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!may_use_simd())
+ return sha1_finup_arm(desc, data, len, out);
+
+ kernel_neon_begin();
+ if (len)
+ sha1_base_do_update(desc, data, len, sha1_ce_transform);
+ sha1_base_do_finalize(desc, sha1_ce_transform);
+ kernel_neon_end();
+
+ return sha1_base_finish(desc, out);
+}
+
+static int sha1_ce_final(struct shash_desc *desc, u8 *out)
+{
+ return sha1_ce_finup(desc, NULL, 0, out);
+}
+
+static struct shash_alg alg = {
+ .init = sha1_base_init,
+ .update = sha1_ce_update,
+ .final = sha1_ce_final,
+ .finup = sha1_ce_finup,
+ .descsize = sizeof(struct sha1_state),
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-ce",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sha1_ce_mod_init(void)
+{
+ if (!(elf_hwcap2 & HWCAP2_SHA1))
+ return -ENODEV;
+ return crypto_register_shash(&alg);
+}
+
+static void __exit sha1_ce_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(sha1_ce_mod_init);
+module_exit(sha1_ce_mod_fini);
diff --git a/arch/arm/include/asm/crypto/sha1.h b/arch/arm/crypto/sha1.h
index 75e6a417416b..ffd8bd08b1a7 100644
--- a/arch/arm/include/asm/crypto/sha1.h
+++ b/arch/arm/crypto/sha1.h
@@ -7,4 +7,7 @@
extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
unsigned int len);
+extern int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
+
#endif
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index e31b0440c613..6fc73bf8766d 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -22,127 +22,47 @@
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
+#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
-#include <asm/crypto/sha1.h>
+#include "sha1.h"
asmlinkage void sha1_block_data_order(u32 *digest,
const unsigned char *data, unsigned int rounds);
-
-static int sha1_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
-
- return 0;
-}
-
-
-static int __sha1_update(struct sha1_state *sctx, const u8 *data,
- unsigned int len, unsigned int partial)
-{
- unsigned int done = 0;
-
- sctx->count += len;
-
- if (partial) {
- done = SHA1_BLOCK_SIZE - partial;
- memcpy(sctx->buffer + partial, data, done);
- sha1_block_data_order(sctx->state, sctx->buffer, 1);
- }
-
- if (len - done >= SHA1_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
- sha1_block_data_order(sctx->state, data + done, rounds);
- done += rounds * SHA1_BLOCK_SIZE;
- }
-
- memcpy(sctx->buffer, data + done, len - done);
- return 0;
-}
-
-
int sha1_update_arm(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
- int res;
+ /* make sure casting to sha1_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
- /* Handle the fast case right here */
- if (partial + len < SHA1_BLOCK_SIZE) {
- sctx->count += len;
- memcpy(sctx->buffer + partial, data, len);
- return 0;
- }
- res = __sha1_update(sctx, data, len, partial);
- return res;
+ return sha1_base_do_update(desc, data, len,
+ (sha1_block_fn *)sha1_block_data_order);
}
EXPORT_SYMBOL_GPL(sha1_update_arm);
-
-/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
-
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64 and append length */
- index = sctx->count % SHA1_BLOCK_SIZE;
- padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
- /* We need to fill a whole block for __sha1_update() */
- if (padlen <= 56) {
- sctx->count += padlen;
- memcpy(sctx->buffer + index, padding, padlen);
- } else {
- __sha1_update(sctx, padding, padlen, index);
- }
- __sha1_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
-
- /* Store state in digest */
- for (i = 0; i < 5; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
- return 0;
+ sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_block_data_order);
+ return sha1_base_finish(desc, out);
}
-
-static int sha1_export(struct shash_desc *desc, void *out)
+int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
+ sha1_base_do_update(desc, data, len,
+ (sha1_block_fn *)sha1_block_data_order);
+ return sha1_final(desc, out);
}
-
-
-static int sha1_import(struct shash_desc *desc, const void *in)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
-}
-
+EXPORT_SYMBOL_GPL(sha1_finup_arm);
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_init,
+ .init = sha1_base_init,
.update = sha1_update_arm,
.final = sha1_final,
- .export = sha1_export,
- .import = sha1_import,
+ .finup = sha1_finup_arm,
.descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 0b0083757d47..4e22f122f966 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -25,147 +25,60 @@
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
-#include <asm/byteorder.h>
+#include <crypto/sha1_base.h>
#include <asm/neon.h>
#include <asm/simd.h>
-#include <asm/crypto/sha1.h>
+#include "sha1.h"
asmlinkage void sha1_transform_neon(void *state_h, const char *data,
unsigned int rounds);
-
-static int sha1_neon_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
-
- return 0;
-}
-
-static int __sha1_neon_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, unsigned int partial)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int done = 0;
-
- sctx->count += len;
-
- if (partial) {
- done = SHA1_BLOCK_SIZE - partial;
- memcpy(sctx->buffer + partial, data, done);
- sha1_transform_neon(sctx->state, sctx->buffer, 1);
- }
-
- if (len - done >= SHA1_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
-
- sha1_transform_neon(sctx->state, data + done, rounds);
- done += rounds * SHA1_BLOCK_SIZE;
- }
-
- memcpy(sctx->buffer, data + done, len - done);
-
- return 0;
-}
-
static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
- int res;
- /* Handle the fast case right here */
- if (partial + len < SHA1_BLOCK_SIZE) {
- sctx->count += len;
- memcpy(sctx->buffer + partial, data, len);
+ if (!may_use_simd() ||
+ (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
+ return sha1_update_arm(desc, data, len);
- return 0;
- }
-
- if (!may_use_simd()) {
- res = sha1_update_arm(desc, data, len);
- } else {
- kernel_neon_begin();
- res = __sha1_neon_update(desc, data, len, partial);
- kernel_neon_end();
- }
-
- return res;
-}
-
-
-/* Add padding and return the message digest. */
-static int sha1_neon_final(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
-
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64 and append length */
- index = sctx->count % SHA1_BLOCK_SIZE;
- padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
- if (!may_use_simd()) {
- sha1_update_arm(desc, padding, padlen);
- sha1_update_arm(desc, (const u8 *)&bits, sizeof(bits));
- } else {
- kernel_neon_begin();
- /* We need to fill a whole block for __sha1_neon_update() */
- if (padlen <= 56) {
- sctx->count += padlen;
- memcpy(sctx->buffer + index, padding, padlen);
- } else {
- __sha1_neon_update(desc, padding, padlen, index);
- }
- __sha1_neon_update(desc, (const u8 *)&bits, sizeof(bits), 56);
- kernel_neon_end();
- }
-
- /* Store state in digest */
- for (i = 0; i < 5; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
+ kernel_neon_begin();
+ sha1_base_do_update(desc, data, len,
+ (sha1_block_fn *)sha1_transform_neon);
+ kernel_neon_end();
return 0;
}
-static int sha1_neon_export(struct shash_desc *desc, void *out)
+static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
+ if (!may_use_simd())
+ return sha1_finup_arm(desc, data, len, out);
- memcpy(out, sctx, sizeof(*sctx));
+ kernel_neon_begin();
+ if (len)
+ sha1_base_do_update(desc, data, len,
+ (sha1_block_fn *)sha1_transform_neon);
+ sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon);
+ kernel_neon_end();
- return 0;
+ return sha1_base_finish(desc, out);
}
-static int sha1_neon_import(struct shash_desc *desc, const void *in)
+static int sha1_neon_final(struct shash_desc *desc, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
+ return sha1_neon_finup(desc, NULL, 0, out);
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_neon_init,
+ .init = sha1_base_init,
.update = sha1_neon_update,
.final = sha1_neon_final,
- .export = sha1_neon_export,
- .import = sha1_neon_import,
+ .finup = sha1_neon_finup,
.descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-neon",
diff --git a/arch/arm/crypto/sha2-ce-core.S b/arch/arm/crypto/sha2-ce-core.S
new file mode 100644
index 000000000000..87ec11a5f405
--- /dev/null
+++ b/arch/arm/crypto/sha2-ce-core.S
@@ -0,0 +1,125 @@
+/*
+ * sha2-ce-core.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .fpu crypto-neon-fp-armv8
+
+ k0 .req q7
+ k1 .req q8
+ rk .req r3
+
+ ta0 .req q9
+ ta1 .req q10
+ tb0 .req q10
+ tb1 .req q9
+
+ dga .req q11
+ dgb .req q12
+
+ dg0 .req q13
+ dg1 .req q14
+ dg2 .req q15
+
+ .macro add_only, ev, s0
+ vmov dg2, dg0
+ .ifnb \s0
+ vld1.32 {k\ev}, [rk, :128]!
+ .endif
+ sha256h.32 dg0, dg1, tb\ev
+ sha256h2.32 dg1, dg2, tb\ev
+ .ifnb \s0
+ vadd.u32 ta\ev, q\s0, k\ev
+ .endif
+ .endm
+
+ .macro add_update, ev, s0, s1, s2, s3
+ sha256su0.32 q\s0, q\s1
+ add_only \ev, \s1
+ sha256su1.32 q\s0, q\s2, q\s3
+ .endm
+
+ .align 6
+.Lsha256_rcon:
+ .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
+ .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
+ .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
+ .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
+ .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
+ .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
+ .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
+ .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
+ .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
+ .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
+ .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
+ .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
+ .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
+ .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
+ .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
+ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+
+ /*
+ * void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+ int blocks);
+ */
+ENTRY(sha2_ce_transform)
+ /* load state */
+ vld1.32 {dga-dgb}, [r0]
+
+ /* load input */
+0: vld1.32 {q0-q1}, [r1]!
+ vld1.32 {q2-q3}, [r1]!
+ subs r2, r2, #1
+
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ vrev32.8 q0, q0
+ vrev32.8 q1, q1
+ vrev32.8 q2, q2
+ vrev32.8 q3, q3
+#endif
+
+ /* load first round constant */
+ adr rk, .Lsha256_rcon
+ vld1.32 {k0}, [rk, :128]!
+
+ vadd.u32 ta0, q0, k0
+ vmov dg0, dga
+ vmov dg1, dgb
+
+ add_update 1, 0, 1, 2, 3
+ add_update 0, 1, 2, 3, 0
+ add_update 1, 2, 3, 0, 1
+ add_update 0, 3, 0, 1, 2
+ add_update 1, 0, 1, 2, 3
+ add_update 0, 1, 2, 3, 0
+ add_update 1, 2, 3, 0, 1
+ add_update 0, 3, 0, 1, 2
+ add_update 1, 0, 1, 2, 3
+ add_update 0, 1, 2, 3, 0
+ add_update 1, 2, 3, 0, 1
+ add_update 0, 3, 0, 1, 2
+
+ add_only 1, 1
+ add_only 0, 2
+ add_only 1, 3
+ add_only 0
+
+ /* update state */
+ vadd.u32 dga, dga, dg0
+ vadd.u32 dgb, dgb, dg1
+ bne 0b
+
+ /* store new state */
+ vst1.32 {dga-dgb}, [r0]
+ bx lr
+ENDPROC(sha2_ce_transform)
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
new file mode 100644
index 000000000000..0755b2d657f3
--- /dev/null
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -0,0 +1,114 @@
+/*
+ * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/sha256_base.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/hwcap.h>
+#include <asm/simd.h>
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+
+#include "sha256_glue.h"
+
+MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+ int blocks);
+
+static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ if (!may_use_simd() ||
+ (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
+ return crypto_sha256_arm_update(desc, data, len);
+
+ kernel_neon_begin();
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha2_ce_transform);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!may_use_simd())
+ return crypto_sha256_arm_finup(desc, data, len, out);
+
+ kernel_neon_begin();
+ if (len)
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+ kernel_neon_end();
+
+ return sha256_base_finish(desc, out);
+}
+
+static int sha2_ce_final(struct shash_desc *desc, u8 *out)
+{
+ return sha2_ce_finup(desc, NULL, 0, out);
+}
+
+static struct shash_alg algs[] = { {
+ .init = sha224_base_init,
+ .update = sha2_ce_update,
+ .final = sha2_ce_final,
+ .finup = sha2_ce_finup,
+ .descsize = sizeof(struct sha256_state),
+ .digestsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .init = sha256_base_init,
+ .update = sha2_ce_update,
+ .final = sha2_ce_final,
+ .finup = sha2_ce_finup,
+ .descsize = sizeof(struct sha256_state),
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init sha2_ce_mod_init(void)
+{
+ if (!(elf_hwcap2 & HWCAP2_SHA2))
+ return -ENODEV;
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha2_ce_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_init(sha2_ce_mod_init);
+module_exit(sha2_ce_mod_fini);
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
new file mode 100644
index 000000000000..fac0533ea633
--- /dev/null
+++ b/arch/arm/crypto/sha256-armv4.pl
@@ -0,0 +1,716 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Permission to use under GPL terms is granted.
+# ====================================================================
+
+# SHA256 block procedure for ARMv4. May 2007.
+
+# Performance is ~2x better than gcc 3.4 generated code and in "abso-
+# lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+# byte [on single-issue Xscale PXA250 core].
+
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 22% improvement on
+# Cortex A8 core and ~20 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+# September 2013.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process one
+# byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+# S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+# code (meaning that latter performs sub-optimally, nothing was done
+# about it).
+
+# May 2014.
+#
+# Add ARMv8 code path performing at 2.0 cpb on Apple A7.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$ctx="r0"; $t0="r0";
+$inp="r1"; $t4="r1";
+$len="r2"; $t1="r2";
+$T1="r3"; $t3="r3";
+$A="r4";
+$B="r5";
+$C="r6";
+$D="r7";
+$E="r8";
+$F="r9";
+$G="r10";
+$H="r11";
+@V=($A,$B,$C,$D,$E,$F,$G,$H);
+$t2="r12";
+$Ktbl="r14";
+
+@Sigma0=( 2,13,22);
+@Sigma1=( 6,11,25);
+@sigma0=( 7,18, 3);
+@sigma1=(17,19,10);
+
+sub BODY_00_15 {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___ if ($i<16);
+#if __ARM_ARCH__>=7
+ @ ldr $t1,[$inp],#4 @ $i
+# if $i==15
+ str $inp,[sp,#17*4] @ make room for $t4
+# endif
+ eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+ add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
+ eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
+# ifndef __ARMEB__
+ rev $t1,$t1
+# endif
+#else
+ @ ldrb $t1,[$inp,#3] @ $i
+ add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
+ ldrb $t2,[$inp,#2]
+ ldrb $t0,[$inp,#1]
+ orr $t1,$t1,$t2,lsl#8
+ ldrb $t2,[$inp],#4
+ orr $t1,$t1,$t0,lsl#16
+# if $i==15
+ str $inp,[sp,#17*4] @ make room for $t4
+# endif
+ eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+ orr $t1,$t1,$t2,lsl#24
+ eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
+#endif
+___
+$code.=<<___;
+ ldr $t2,[$Ktbl],#4 @ *K256++
+ add $h,$h,$t1 @ h+=X[i]
+ str $t1,[sp,#`$i%16`*4]
+ eor $t1,$f,$g
+ add $h,$h,$t0,ror#$Sigma1[0] @ h+=Sigma1(e)
+ and $t1,$t1,$e
+ add $h,$h,$t2 @ h+=K256[i]
+ eor $t1,$t1,$g @ Ch(e,f,g)
+ eor $t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]`
+ add $h,$h,$t1 @ h+=Ch(e,f,g)
+#if $i==31
+ and $t2,$t2,#0xff
+ cmp $t2,#0xf2 @ done?
+#endif
+#if $i<15
+# if __ARM_ARCH__>=7
+ ldr $t1,[$inp],#4 @ prefetch
+# else
+ ldrb $t1,[$inp,#3]
+# endif
+ eor $t2,$a,$b @ a^b, b^c in next round
+#else
+ ldr $t1,[sp,#`($i+2)%16`*4] @ from future BODY_16_xx
+ eor $t2,$a,$b @ a^b, b^c in next round
+ ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx
+#endif
+ eor $t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]` @ Sigma0(a)
+ and $t3,$t3,$t2 @ (b^c)&=(a^b)
+ add $d,$d,$h @ d+=h
+ eor $t3,$t3,$b @ Maj(a,b,c)
+ add $h,$h,$t0,ror#$Sigma0[0] @ h+=Sigma0(a)
+ @ add $h,$h,$t3 @ h+=Maj(a,b,c)
+___
+ ($t2,$t3)=($t3,$t2);
+}
+
+sub BODY_16_XX {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___;
+ @ ldr $t1,[sp,#`($i+1)%16`*4] @ $i
+ @ ldr $t4,[sp,#`($i+14)%16`*4]
+ mov $t0,$t1,ror#$sigma0[0]
+ add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
+ mov $t2,$t4,ror#$sigma1[0]
+ eor $t0,$t0,$t1,ror#$sigma0[1]
+ eor $t2,$t2,$t4,ror#$sigma1[1]
+ eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
+ ldr $t1,[sp,#`($i+0)%16`*4]
+ eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14])
+ ldr $t4,[sp,#`($i+9)%16`*4]
+
+ add $t2,$t2,$t0
+ eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` @ from BODY_00_15
+ add $t1,$t1,$t2
+ eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
+ add $t1,$t1,$t4 @ X[i]
+___
+ &BODY_00_15(@_);
+}
+
+$code=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+#endif
+
+.text
+#if __ARM_ARCH__<7
+.code 32
+#else
+.syntax unified
+# ifdef __thumb2__
+# define adrl adr
+.thumb
+# else
+.code 32
+# endif
+#endif
+
+.type K256,%object
+.align 5
+K256:
+.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size K256,.-K256
+.word 0 @ terminator
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha256_block_data_order
+#endif
+.align 5
+
+.global sha256_block_data_order
+.type sha256_block_data_order,%function
+sha256_block_data_order:
+#if __ARM_ARCH__<7
+ sub r3,pc,#8 @ sha256_block_data_order
+#else
+ adr r3,sha256_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#ARMV8_SHA256
+ bne .LARMv8
+ tst r12,#ARMV7_NEON
+ bne .LNEON
+#endif
+ add $len,$inp,$len,lsl#6 @ len to point at the end of inp
+ stmdb sp!,{$ctx,$inp,$len,r4-r11,lr}
+ ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
+ sub $Ktbl,r3,#256+32 @ K256
+ sub sp,sp,#16*4 @ alloca(X[16])
+.Loop:
+# if __ARM_ARCH__>=7
+ ldr $t1,[$inp],#4
+# else
+ ldrb $t1,[$inp,#3]
+# endif
+ eor $t3,$B,$C @ magic
+ eor $t2,$t2,$t2
+___
+for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=".Lrounds_16_xx:\n";
+for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+#if __ARM_ARCH__>=7
+ ite eq @ Thumb2 thing, sanity check in ARM
+#endif
+ ldreq $t3,[sp,#16*4] @ pull ctx
+ bne .Lrounds_16_xx
+
+ add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
+ ldr $t0,[$t3,#0]
+ ldr $t1,[$t3,#4]
+ ldr $t2,[$t3,#8]
+ add $A,$A,$t0
+ ldr $t0,[$t3,#12]
+ add $B,$B,$t1
+ ldr $t1,[$t3,#16]
+ add $C,$C,$t2
+ ldr $t2,[$t3,#20]
+ add $D,$D,$t0
+ ldr $t0,[$t3,#24]
+ add $E,$E,$t1
+ ldr $t1,[$t3,#28]
+ add $F,$F,$t2
+ ldr $inp,[sp,#17*4] @ pull inp
+ ldr $t2,[sp,#18*4] @ pull inp+len
+ add $G,$G,$t0
+ add $H,$H,$t1
+ stmia $t3,{$A,$B,$C,$D,$E,$F,$G,$H}
+ cmp $inp,$t2
+ sub $Ktbl,$Ktbl,#256 @ rewind Ktbl
+ bne .Loop
+
+ add sp,sp,#`16+3`*4 @ destroy frame
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r11,pc}
+#else
+ ldmia sp!,{r4-r11,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size sha256_block_data_order,.-sha256_block_data_order
+___
+######################################################################
+# NEON stuff
+#
+{{{
+my @X=map("q$_",(0..3));
+my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25");
+my $Xfer=$t4;
+my $j=0;
+
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+ my $arg = pop;
+ $arg = "#$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub Xupdate()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+ &vext_8 ($T0,@X[0],@X[1],4); # X[1..4]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vext_8 ($T1,@X[2],@X[3],4); # X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T2,$T0,$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T1,$T0,$sigma0[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T2,$T0,32-$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T3,$T0,$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T1,$T1,$T2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T3,$T0,32-$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T1,$T1,$T3); # sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T5,&Dhi(@X[3]),$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4); # sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T5,&Dlo(@X[0]),$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_32 ("{$T0}","[$Ktbl,:128]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4); # sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 ($T0,$T0,@X[0]);
+ while($#insns>=2) { eval(shift(@insns)); }
+ &vst1_32 ("{$T0}","[$Xfer,:128]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xpreload()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_32 ("{$T0}","[$Ktbl,:128]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vrev32_8 (@X[0],@X[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 ($T0,$T0,@X[0]);
+ foreach (@insns) { eval; } # remaining instructions
+ &vst1_32 ("{$T0}","[$Xfer,:128]!");
+
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub body_00_15 () {
+ (
+ '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
+ '&add ($h,$h,$t1)', # h+=X[i]+K[i]
+ '&eor ($t1,$f,$g)',
+ '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
+ '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past
+ '&and ($t1,$t1,$e)',
+ '&eor ($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e)
+ '&eor ($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
+ '&eor ($t1,$t1,$g)', # Ch(e,f,g)
+ '&add ($h,$h,$t2,"ror#$Sigma1[0]")', # h+=Sigma1(e)
+ '&eor ($t2,$a,$b)', # a^b, b^c in next round
+ '&eor ($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a)
+ '&add ($h,$h,$t1)', # h+=Ch(e,f,g)
+ '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'.
+ '&ldr ($t1,"[$Ktbl]") if ($j==15);'.
+ '&ldr ($t1,"[sp,#64]") if ($j==31)',
+ '&and ($t3,$t3,$t2)', # (b^c)&=(a^b)
+ '&add ($d,$d,$h)', # d+=h
+ '&add ($h,$h,$t0,"ror#$Sigma0[0]");'. # h+=Sigma0(a)
+ '&eor ($t3,$t3,$b)', # Maj(a,b,c)
+ '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
+ )
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
+.global sha256_block_data_order_neon
+.type sha256_block_data_order_neon,%function
+.align 4
+sha256_block_data_order_neon:
+.LNEON:
+ stmdb sp!,{r4-r12,lr}
+
+ sub $H,sp,#16*4+16
+ adrl $Ktbl,K256
+ bic $H,$H,#15 @ align for 128-bit stores
+ mov $t2,sp
+ mov sp,$H @ alloca
+ add $len,$inp,$len,lsl#6 @ len to point at the end of inp
+
+ vld1.8 {@X[0]},[$inp]!
+ vld1.8 {@X[1]},[$inp]!
+ vld1.8 {@X[2]},[$inp]!
+ vld1.8 {@X[3]},[$inp]!
+ vld1.32 {$T0},[$Ktbl,:128]!
+ vld1.32 {$T1},[$Ktbl,:128]!
+ vld1.32 {$T2},[$Ktbl,:128]!
+ vld1.32 {$T3},[$Ktbl,:128]!
+ vrev32.8 @X[0],@X[0] @ yes, even on
+ str $ctx,[sp,#64]
+ vrev32.8 @X[1],@X[1] @ big-endian
+ str $inp,[sp,#68]
+ mov $Xfer,sp
+ vrev32.8 @X[2],@X[2]
+ str $len,[sp,#72]
+ vrev32.8 @X[3],@X[3]
+ str $t2,[sp,#76] @ save original sp
+ vadd.i32 $T0,$T0,@X[0]
+ vadd.i32 $T1,$T1,@X[1]
+ vst1.32 {$T0},[$Xfer,:128]!
+ vadd.i32 $T2,$T2,@X[2]
+ vst1.32 {$T1},[$Xfer,:128]!
+ vadd.i32 $T3,$T3,@X[3]
+ vst1.32 {$T2},[$Xfer,:128]!
+ vst1.32 {$T3},[$Xfer,:128]!
+
+ ldmia $ctx,{$A-$H}
+ sub $Xfer,$Xfer,#64
+ ldr $t1,[sp,#0]
+ eor $t2,$t2,$t2
+ eor $t3,$B,$C
+ b .L_00_48
+
+.align 4
+.L_00_48:
+___
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+$code.=<<___;
+ teq $t1,#0 @ check for K256 terminator
+ ldr $t1,[sp,#0]
+ sub $Xfer,$Xfer,#64
+ bne .L_00_48
+
+ ldr $inp,[sp,#68]
+ ldr $t0,[sp,#72]
+ sub $Ktbl,$Ktbl,#256 @ rewind $Ktbl
+ teq $inp,$t0
+ it eq
+ subeq $inp,$inp,#64 @ avoid SEGV
+ vld1.8 {@X[0]},[$inp]! @ load next input block
+ vld1.8 {@X[1]},[$inp]!
+ vld1.8 {@X[2]},[$inp]!
+ vld1.8 {@X[3]},[$inp]!
+ it ne
+ strne $inp,[sp,#68]
+ mov $Xfer,sp
+___
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+$code.=<<___;
+ ldr $t0,[$t1,#0]
+ add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
+ ldr $t2,[$t1,#4]
+ ldr $t3,[$t1,#8]
+ ldr $t4,[$t1,#12]
+ add $A,$A,$t0 @ accumulate
+ ldr $t0,[$t1,#16]
+ add $B,$B,$t2
+ ldr $t2,[$t1,#20]
+ add $C,$C,$t3
+ ldr $t3,[$t1,#24]
+ add $D,$D,$t4
+ ldr $t4,[$t1,#28]
+ add $E,$E,$t0
+ str $A,[$t1],#4
+ add $F,$F,$t2
+ str $B,[$t1],#4
+ add $G,$G,$t3
+ str $C,[$t1],#4
+ add $H,$H,$t4
+ str $D,[$t1],#4
+ stmia $t1,{$E-$H}
+
+ ittte ne
+ movne $Xfer,sp
+ ldrne $t1,[sp,#0]
+ eorne $t2,$t2,$t2
+ ldreq sp,[sp,#76] @ restore original sp
+ itt ne
+ eorne $t3,$B,$C
+ bne .L_00_48
+
+ ldmia sp!,{r4-r12,pc}
+.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
+#endif
+___
+}}}
+######################################################################
+# ARMv8 stuff
+#
+{{{
+my ($ABCD,$EFGH,$abcd)=map("q$_",(0..2));
+my @MSG=map("q$_",(8..11));
+my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15));
+my $Ktbl="r3";
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+
+# ifdef __thumb2__
+# define INST(a,b,c,d) .byte c,d|0xc,a,b
+# else
+# define INST(a,b,c,d) .byte a,b,c,d
+# endif
+
+.type sha256_block_data_order_armv8,%function
+.align 5
+sha256_block_data_order_armv8:
+.LARMv8:
+ vld1.32 {$ABCD,$EFGH},[$ctx]
+# ifdef __thumb2__
+ adr $Ktbl,.LARMv8
+ sub $Ktbl,$Ktbl,#.LARMv8-K256
+# else
+ adrl $Ktbl,K256
+# endif
+ add $len,$inp,$len,lsl#6 @ len to point at the end of inp
+
+.Loop_v8:
+ vld1.8 {@MSG[0]-@MSG[1]},[$inp]!
+ vld1.8 {@MSG[2]-@MSG[3]},[$inp]!
+ vld1.32 {$W0},[$Ktbl]!
+ vrev32.8 @MSG[0],@MSG[0]
+ vrev32.8 @MSG[1],@MSG[1]
+ vrev32.8 @MSG[2],@MSG[2]
+ vrev32.8 @MSG[3],@MSG[3]
+ vmov $ABCD_SAVE,$ABCD @ offload
+ vmov $EFGH_SAVE,$EFGH
+ teq $inp,$len
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+ vld1.32 {$W1},[$Ktbl]!
+ vadd.i32 $W0,$W0,@MSG[0]
+ sha256su0 @MSG[0],@MSG[1]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+ sha256su1 @MSG[0],@MSG[2],@MSG[3]
+___
+ ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ vld1.32 {$W1},[$Ktbl]!
+ vadd.i32 $W0,$W0,@MSG[0]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ vld1.32 {$W0},[$Ktbl]!
+ vadd.i32 $W1,$W1,@MSG[1]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ vld1.32 {$W1},[$Ktbl]
+ vadd.i32 $W0,$W0,@MSG[2]
+ sub $Ktbl,$Ktbl,#256-16 @ rewind
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ vadd.i32 $W1,$W1,@MSG[3]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ vadd.i32 $ABCD,$ABCD,$ABCD_SAVE
+ vadd.i32 $EFGH,$EFGH,$EFGH_SAVE
+ it ne
+ bne .Loop_v8
+
+ vst1.32 {$ABCD,$EFGH},[$ctx]
+
+ ret @ bx lr
+.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
+#endif
+___
+}}}
+$code.=<<___;
+.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.comm OPENSSL_armcap_P,4,4
+#endif
+___
+
+open SELF,$0;
+while(<SELF>) {
+ next if (/^#!/);
+ last if (!s/^#/@/ and !/^$/);
+ print;
+}
+close SELF;
+
+{ my %opcode = (
+ "sha256h" => 0xf3000c40, "sha256h2" => 0xf3100c40,
+ "sha256su0" => 0xf3ba03c0, "sha256su1" => 0xf3200c40 );
+
+ sub unsha256 {
+ my ($mnemonic,$arg)=@_;
+
+ if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
+ my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+ |(($2&7)<<17)|(($2&8)<<4)
+ |(($3&7)<<1) |(($3&8)<<2);
+ # since ARMv7 instructions are always encoded little-endian.
+ # correct solution is to use .inst directive, but older
+ # assemblers don't implement it:-(
+ sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
+ $word&0xff,($word>>8)&0xff,
+ ($word>>16)&0xff,($word>>24)&0xff,
+ $mnemonic,$arg;
+ }
+ }
+}
+
+foreach (split($/,$code)) {
+
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\b(sha256\w+)\s+(q.*)/unsha256($1,$2)/geo;
+
+ s/\bret\b/bx lr/go or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
+
+ print $_,"\n";
+}
+
+close STDOUT; # enforce flush
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
new file mode 100644
index 000000000000..555a1a8eec90
--- /dev/null
+++ b/arch/arm/crypto/sha256-core.S_shipped
@@ -0,0 +1,2808 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Permission to use under GPL terms is granted.
+@ ====================================================================
+
+@ SHA256 block procedure for ARMv4. May 2007.
+
+@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
+@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+@ byte [on single-issue Xscale PXA250 core].
+
+@ July 2010.
+@
+@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
+@ Cortex A8 core and ~20 cycles per processed byte.
+
+@ February 2011.
+@
+@ Profiler-assisted and platform-specific optimization resulted in 16%
+@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+@ September 2013.
+@
+@ Add NEON implementation. On Cortex A8 it was measured to process one
+@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+@ code (meaning that latter performs sub-optimally, nothing was done
+@ about it).
+
+@ May 2014.
+@
+@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+#endif
+
+.text
+#if __ARM_ARCH__<7
+.code 32
+#else
+.syntax unified
+# ifdef __thumb2__
+# define adrl adr
+.thumb
+# else
+.code 32
+# endif
+#endif
+
+.type K256,%object
+.align 5
+K256:
+.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size K256,.-K256
+.word 0 @ terminator
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha256_block_data_order
+#endif
+.align 5
+
+.global sha256_block_data_order
+.type sha256_block_data_order,%function
+sha256_block_data_order:
+#if __ARM_ARCH__<7
+ sub r3,pc,#8 @ sha256_block_data_order
+#else
+ adr r3,sha256_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#ARMV8_SHA256
+ bne .LARMv8
+ tst r12,#ARMV7_NEON
+ bne .LNEON
+#endif
+ add r2,r1,r2,lsl#6 @ len to point at the end of inp
+ stmdb sp!,{r0,r1,r2,r4-r11,lr}
+ ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
+ sub r14,r3,#256+32 @ K256
+ sub sp,sp,#16*4 @ alloca(X[16])
+.Loop:
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ magic
+ eor r12,r12,r12
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 0
+# if 0==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r8,r8,ror#5
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 0
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 0==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r8,r8,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r11,r11,r2 @ h+=X[i]
+ str r2,[sp,#0*4]
+ eor r2,r9,r10
+ add r11,r11,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r8
+ add r11,r11,r12 @ h+=K256[i]
+ eor r2,r2,r10 @ Ch(e,f,g)
+ eor r0,r4,r4,ror#11
+ add r11,r11,r2 @ h+=Ch(e,f,g)
+#if 0==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 0<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r4,r5 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#2*4] @ from future BODY_16_xx
+ eor r12,r4,r5 @ a^b, b^c in next round
+ ldr r1,[sp,#15*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r4,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r7,r7,r11 @ d+=h
+ eor r3,r3,r5 @ Maj(a,b,c)
+ add r11,r11,r0,ror#2 @ h+=Sigma0(a)
+ @ add r11,r11,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 1
+# if 1==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r7,r7,ror#5
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 1
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 1==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r7,r7,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r10,r10,r2 @ h+=X[i]
+ str r2,[sp,#1*4]
+ eor r2,r8,r9
+ add r10,r10,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r7
+ add r10,r10,r3 @ h+=K256[i]
+ eor r2,r2,r9 @ Ch(e,f,g)
+ eor r0,r11,r11,ror#11
+ add r10,r10,r2 @ h+=Ch(e,f,g)
+#if 1==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 1<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r11,r4 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#3*4] @ from future BODY_16_xx
+ eor r3,r11,r4 @ a^b, b^c in next round
+ ldr r1,[sp,#0*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r11,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r6,r6,r10 @ d+=h
+ eor r12,r12,r4 @ Maj(a,b,c)
+ add r10,r10,r0,ror#2 @ h+=Sigma0(a)
+ @ add r10,r10,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 2
+# if 2==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 2
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 2==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r6,r6,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r9,r9,r2 @ h+=X[i]
+ str r2,[sp,#2*4]
+ eor r2,r7,r8
+ add r9,r9,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r6
+ add r9,r9,r12 @ h+=K256[i]
+ eor r2,r2,r8 @ Ch(e,f,g)
+ eor r0,r10,r10,ror#11
+ add r9,r9,r2 @ h+=Ch(e,f,g)
+#if 2==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 2<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r10,r11 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#4*4] @ from future BODY_16_xx
+ eor r12,r10,r11 @ a^b, b^c in next round
+ ldr r1,[sp,#1*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r10,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r5,r5,r9 @ d+=h
+ eor r3,r3,r11 @ Maj(a,b,c)
+ add r9,r9,r0,ror#2 @ h+=Sigma0(a)
+ @ add r9,r9,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 3
+# if 3==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r5,r5,ror#5
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 3
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 3==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r5,r5,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r8,r8,r2 @ h+=X[i]
+ str r2,[sp,#3*4]
+ eor r2,r6,r7
+ add r8,r8,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r5
+ add r8,r8,r3 @ h+=K256[i]
+ eor r2,r2,r7 @ Ch(e,f,g)
+ eor r0,r9,r9,ror#11
+ add r8,r8,r2 @ h+=Ch(e,f,g)
+#if 3==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 3<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r9,r10 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#5*4] @ from future BODY_16_xx
+ eor r3,r9,r10 @ a^b, b^c in next round
+ ldr r1,[sp,#2*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r9,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r4,r4,r8 @ d+=h
+ eor r12,r12,r10 @ Maj(a,b,c)
+ add r8,r8,r0,ror#2 @ h+=Sigma0(a)
+ @ add r8,r8,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 4
+# if 4==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r4,r4,ror#5
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 4
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 4==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r4,r4,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r7,r7,r2 @ h+=X[i]
+ str r2,[sp,#4*4]
+ eor r2,r5,r6
+ add r7,r7,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r4
+ add r7,r7,r12 @ h+=K256[i]
+ eor r2,r2,r6 @ Ch(e,f,g)
+ eor r0,r8,r8,ror#11
+ add r7,r7,r2 @ h+=Ch(e,f,g)
+#if 4==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 4<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r8,r9 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#6*4] @ from future BODY_16_xx
+ eor r12,r8,r9 @ a^b, b^c in next round
+ ldr r1,[sp,#3*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r8,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r11,r11,r7 @ d+=h
+ eor r3,r3,r9 @ Maj(a,b,c)
+ add r7,r7,r0,ror#2 @ h+=Sigma0(a)
+ @ add r7,r7,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 5
+# if 5==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r11,r11,ror#5
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 5
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 5==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r11,r11,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r6,r6,r2 @ h+=X[i]
+ str r2,[sp,#5*4]
+ eor r2,r4,r5
+ add r6,r6,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r11
+ add r6,r6,r3 @ h+=K256[i]
+ eor r2,r2,r5 @ Ch(e,f,g)
+ eor r0,r7,r7,ror#11
+ add r6,r6,r2 @ h+=Ch(e,f,g)
+#if 5==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 5<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r7,r8 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#7*4] @ from future BODY_16_xx
+ eor r3,r7,r8 @ a^b, b^c in next round
+ ldr r1,[sp,#4*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r7,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r10,r10,r6 @ d+=h
+ eor r12,r12,r8 @ Maj(a,b,c)
+ add r6,r6,r0,ror#2 @ h+=Sigma0(a)
+ @ add r6,r6,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 6
+# if 6==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 6
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 6==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r10,r10,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r5,r5,r2 @ h+=X[i]
+ str r2,[sp,#6*4]
+ eor r2,r11,r4
+ add r5,r5,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r10
+ add r5,r5,r12 @ h+=K256[i]
+ eor r2,r2,r4 @ Ch(e,f,g)
+ eor r0,r6,r6,ror#11
+ add r5,r5,r2 @ h+=Ch(e,f,g)
+#if 6==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 6<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r6,r7 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#8*4] @ from future BODY_16_xx
+ eor r12,r6,r7 @ a^b, b^c in next round
+ ldr r1,[sp,#5*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r6,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r9,r9,r5 @ d+=h
+ eor r3,r3,r7 @ Maj(a,b,c)
+ add r5,r5,r0,ror#2 @ h+=Sigma0(a)
+ @ add r5,r5,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 7
+# if 7==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r9,r9,ror#5
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 7
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 7==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r9,r9,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r4,r4,r2 @ h+=X[i]
+ str r2,[sp,#7*4]
+ eor r2,r10,r11
+ add r4,r4,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r9
+ add r4,r4,r3 @ h+=K256[i]
+ eor r2,r2,r11 @ Ch(e,f,g)
+ eor r0,r5,r5,ror#11
+ add r4,r4,r2 @ h+=Ch(e,f,g)
+#if 7==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 7<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#9*4] @ from future BODY_16_xx
+ eor r3,r5,r6 @ a^b, b^c in next round
+ ldr r1,[sp,#6*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r5,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r8,r8,r4 @ d+=h
+ eor r12,r12,r6 @ Maj(a,b,c)
+ add r4,r4,r0,ror#2 @ h+=Sigma0(a)
+ @ add r4,r4,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 8
+# if 8==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r8,r8,ror#5
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 8
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 8==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r8,r8,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r11,r11,r2 @ h+=X[i]
+ str r2,[sp,#8*4]
+ eor r2,r9,r10
+ add r11,r11,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r8
+ add r11,r11,r12 @ h+=K256[i]
+ eor r2,r2,r10 @ Ch(e,f,g)
+ eor r0,r4,r4,ror#11
+ add r11,r11,r2 @ h+=Ch(e,f,g)
+#if 8==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 8<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r4,r5 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#10*4] @ from future BODY_16_xx
+ eor r12,r4,r5 @ a^b, b^c in next round
+ ldr r1,[sp,#7*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r4,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r7,r7,r11 @ d+=h
+ eor r3,r3,r5 @ Maj(a,b,c)
+ add r11,r11,r0,ror#2 @ h+=Sigma0(a)
+ @ add r11,r11,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 9
+# if 9==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r7,r7,ror#5
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 9
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 9==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r7,r7,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r10,r10,r2 @ h+=X[i]
+ str r2,[sp,#9*4]
+ eor r2,r8,r9
+ add r10,r10,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r7
+ add r10,r10,r3 @ h+=K256[i]
+ eor r2,r2,r9 @ Ch(e,f,g)
+ eor r0,r11,r11,ror#11
+ add r10,r10,r2 @ h+=Ch(e,f,g)
+#if 9==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 9<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r11,r4 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#11*4] @ from future BODY_16_xx
+ eor r3,r11,r4 @ a^b, b^c in next round
+ ldr r1,[sp,#8*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r11,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r6,r6,r10 @ d+=h
+ eor r12,r12,r4 @ Maj(a,b,c)
+ add r10,r10,r0,ror#2 @ h+=Sigma0(a)
+ @ add r10,r10,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 10
+# if 10==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 10
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 10==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r6,r6,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r9,r9,r2 @ h+=X[i]
+ str r2,[sp,#10*4]
+ eor r2,r7,r8
+ add r9,r9,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r6
+ add r9,r9,r12 @ h+=K256[i]
+ eor r2,r2,r8 @ Ch(e,f,g)
+ eor r0,r10,r10,ror#11
+ add r9,r9,r2 @ h+=Ch(e,f,g)
+#if 10==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 10<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r10,r11 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#12*4] @ from future BODY_16_xx
+ eor r12,r10,r11 @ a^b, b^c in next round
+ ldr r1,[sp,#9*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r10,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r5,r5,r9 @ d+=h
+ eor r3,r3,r11 @ Maj(a,b,c)
+ add r9,r9,r0,ror#2 @ h+=Sigma0(a)
+ @ add r9,r9,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 11
+# if 11==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r5,r5,ror#5
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 11
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 11==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r5,r5,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r8,r8,r2 @ h+=X[i]
+ str r2,[sp,#11*4]
+ eor r2,r6,r7
+ add r8,r8,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r5
+ add r8,r8,r3 @ h+=K256[i]
+ eor r2,r2,r7 @ Ch(e,f,g)
+ eor r0,r9,r9,ror#11
+ add r8,r8,r2 @ h+=Ch(e,f,g)
+#if 11==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 11<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r9,r10 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#13*4] @ from future BODY_16_xx
+ eor r3,r9,r10 @ a^b, b^c in next round
+ ldr r1,[sp,#10*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r9,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r4,r4,r8 @ d+=h
+ eor r12,r12,r10 @ Maj(a,b,c)
+ add r8,r8,r0,ror#2 @ h+=Sigma0(a)
+ @ add r8,r8,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 12
+# if 12==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r4,r4,ror#5
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 12
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 12==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r4,r4,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r7,r7,r2 @ h+=X[i]
+ str r2,[sp,#12*4]
+ eor r2,r5,r6
+ add r7,r7,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r4
+ add r7,r7,r12 @ h+=K256[i]
+ eor r2,r2,r6 @ Ch(e,f,g)
+ eor r0,r8,r8,ror#11
+ add r7,r7,r2 @ h+=Ch(e,f,g)
+#if 12==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 12<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r8,r9 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#14*4] @ from future BODY_16_xx
+ eor r12,r8,r9 @ a^b, b^c in next round
+ ldr r1,[sp,#11*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r8,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r11,r11,r7 @ d+=h
+ eor r3,r3,r9 @ Maj(a,b,c)
+ add r7,r7,r0,ror#2 @ h+=Sigma0(a)
+ @ add r7,r7,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 13
+# if 13==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r11,r11,ror#5
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 13
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 13==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r11,r11,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r6,r6,r2 @ h+=X[i]
+ str r2,[sp,#13*4]
+ eor r2,r4,r5
+ add r6,r6,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r11
+ add r6,r6,r3 @ h+=K256[i]
+ eor r2,r2,r5 @ Ch(e,f,g)
+ eor r0,r7,r7,ror#11
+ add r6,r6,r2 @ h+=Ch(e,f,g)
+#if 13==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 13<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r7,r8 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#15*4] @ from future BODY_16_xx
+ eor r3,r7,r8 @ a^b, b^c in next round
+ ldr r1,[sp,#12*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r7,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r10,r10,r6 @ d+=h
+ eor r12,r12,r8 @ Maj(a,b,c)
+ add r6,r6,r0,ror#2 @ h+=Sigma0(a)
+ @ add r6,r6,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 14
+# if 14==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 14
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 14==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r10,r10,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r5,r5,r2 @ h+=X[i]
+ str r2,[sp,#14*4]
+ eor r2,r11,r4
+ add r5,r5,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r10
+ add r5,r5,r12 @ h+=K256[i]
+ eor r2,r2,r4 @ Ch(e,f,g)
+ eor r0,r6,r6,ror#11
+ add r5,r5,r2 @ h+=Ch(e,f,g)
+#if 14==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 14<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r6,r7 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#0*4] @ from future BODY_16_xx
+ eor r12,r6,r7 @ a^b, b^c in next round
+ ldr r1,[sp,#13*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r6,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r9,r9,r5 @ d+=h
+ eor r3,r3,r7 @ Maj(a,b,c)
+ add r5,r5,r0,ror#2 @ h+=Sigma0(a)
+ @ add r5,r5,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 15
+# if 15==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r9,r9,ror#5
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
+ rev r2,r2
+# endif
+#else
+ @ ldrb r2,[r1,#3] @ 15
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 15==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r9,r9,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r4,r4,r2 @ h+=X[i]
+ str r2,[sp,#15*4]
+ eor r2,r10,r11
+ add r4,r4,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r9
+ add r4,r4,r3 @ h+=K256[i]
+ eor r2,r2,r11 @ Ch(e,f,g)
+ eor r0,r5,r5,ror#11
+ add r4,r4,r2 @ h+=Ch(e,f,g)
+#if 15==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 15<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#1*4] @ from future BODY_16_xx
+ eor r3,r5,r6 @ a^b, b^c in next round
+ ldr r1,[sp,#14*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r5,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r8,r8,r4 @ d+=h
+ eor r12,r12,r6 @ Maj(a,b,c)
+ add r4,r4,r0,ror#2 @ h+=Sigma0(a)
+ @ add r4,r4,r12 @ h+=Maj(a,b,c)
+.Lrounds_16_xx:
+ @ ldr r2,[sp,#1*4] @ 16
+ @ ldr r1,[sp,#14*4]
+ mov r0,r2,ror#7
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#0*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#9*4]
+
+ add r12,r12,r0
+ eor r0,r8,r8,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r11,r11,r2 @ h+=X[i]
+ str r2,[sp,#0*4]
+ eor r2,r9,r10
+ add r11,r11,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r8
+ add r11,r11,r12 @ h+=K256[i]
+ eor r2,r2,r10 @ Ch(e,f,g)
+ eor r0,r4,r4,ror#11
+ add r11,r11,r2 @ h+=Ch(e,f,g)
+#if 16==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 16<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r4,r5 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#2*4] @ from future BODY_16_xx
+ eor r12,r4,r5 @ a^b, b^c in next round
+ ldr r1,[sp,#15*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r4,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r7,r7,r11 @ d+=h
+ eor r3,r3,r5 @ Maj(a,b,c)
+ add r11,r11,r0,ror#2 @ h+=Sigma0(a)
+ @ add r11,r11,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#2*4] @ 17
+ @ ldr r1,[sp,#15*4]
+ mov r0,r2,ror#7
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#1*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#10*4]
+
+ add r3,r3,r0
+ eor r0,r7,r7,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r10,r10,r2 @ h+=X[i]
+ str r2,[sp,#1*4]
+ eor r2,r8,r9
+ add r10,r10,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r7
+ add r10,r10,r3 @ h+=K256[i]
+ eor r2,r2,r9 @ Ch(e,f,g)
+ eor r0,r11,r11,ror#11
+ add r10,r10,r2 @ h+=Ch(e,f,g)
+#if 17==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 17<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r11,r4 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#3*4] @ from future BODY_16_xx
+ eor r3,r11,r4 @ a^b, b^c in next round
+ ldr r1,[sp,#0*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r11,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r6,r6,r10 @ d+=h
+ eor r12,r12,r4 @ Maj(a,b,c)
+ add r10,r10,r0,ror#2 @ h+=Sigma0(a)
+ @ add r10,r10,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#3*4] @ 18
+ @ ldr r1,[sp,#0*4]
+ mov r0,r2,ror#7
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#2*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#11*4]
+
+ add r12,r12,r0
+ eor r0,r6,r6,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r9,r9,r2 @ h+=X[i]
+ str r2,[sp,#2*4]
+ eor r2,r7,r8
+ add r9,r9,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r6
+ add r9,r9,r12 @ h+=K256[i]
+ eor r2,r2,r8 @ Ch(e,f,g)
+ eor r0,r10,r10,ror#11
+ add r9,r9,r2 @ h+=Ch(e,f,g)
+#if 18==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 18<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r10,r11 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#4*4] @ from future BODY_16_xx
+ eor r12,r10,r11 @ a^b, b^c in next round
+ ldr r1,[sp,#1*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r10,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r5,r5,r9 @ d+=h
+ eor r3,r3,r11 @ Maj(a,b,c)
+ add r9,r9,r0,ror#2 @ h+=Sigma0(a)
+ @ add r9,r9,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#4*4] @ 19
+ @ ldr r1,[sp,#1*4]
+ mov r0,r2,ror#7
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#3*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#12*4]
+
+ add r3,r3,r0
+ eor r0,r5,r5,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r8,r8,r2 @ h+=X[i]
+ str r2,[sp,#3*4]
+ eor r2,r6,r7
+ add r8,r8,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r5
+ add r8,r8,r3 @ h+=K256[i]
+ eor r2,r2,r7 @ Ch(e,f,g)
+ eor r0,r9,r9,ror#11
+ add r8,r8,r2 @ h+=Ch(e,f,g)
+#if 19==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 19<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r9,r10 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#5*4] @ from future BODY_16_xx
+ eor r3,r9,r10 @ a^b, b^c in next round
+ ldr r1,[sp,#2*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r9,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r4,r4,r8 @ d+=h
+ eor r12,r12,r10 @ Maj(a,b,c)
+ add r8,r8,r0,ror#2 @ h+=Sigma0(a)
+ @ add r8,r8,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#5*4] @ 20
+ @ ldr r1,[sp,#2*4]
+ mov r0,r2,ror#7
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#4*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#13*4]
+
+ add r12,r12,r0
+ eor r0,r4,r4,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r7,r7,r2 @ h+=X[i]
+ str r2,[sp,#4*4]
+ eor r2,r5,r6
+ add r7,r7,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r4
+ add r7,r7,r12 @ h+=K256[i]
+ eor r2,r2,r6 @ Ch(e,f,g)
+ eor r0,r8,r8,ror#11
+ add r7,r7,r2 @ h+=Ch(e,f,g)
+#if 20==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 20<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r8,r9 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#6*4] @ from future BODY_16_xx
+ eor r12,r8,r9 @ a^b, b^c in next round
+ ldr r1,[sp,#3*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r8,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r11,r11,r7 @ d+=h
+ eor r3,r3,r9 @ Maj(a,b,c)
+ add r7,r7,r0,ror#2 @ h+=Sigma0(a)
+ @ add r7,r7,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#6*4] @ 21
+ @ ldr r1,[sp,#3*4]
+ mov r0,r2,ror#7
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#5*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#14*4]
+
+ add r3,r3,r0
+ eor r0,r11,r11,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r6,r6,r2 @ h+=X[i]
+ str r2,[sp,#5*4]
+ eor r2,r4,r5
+ add r6,r6,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r11
+ add r6,r6,r3 @ h+=K256[i]
+ eor r2,r2,r5 @ Ch(e,f,g)
+ eor r0,r7,r7,ror#11
+ add r6,r6,r2 @ h+=Ch(e,f,g)
+#if 21==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 21<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r7,r8 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#7*4] @ from future BODY_16_xx
+ eor r3,r7,r8 @ a^b, b^c in next round
+ ldr r1,[sp,#4*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r7,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r10,r10,r6 @ d+=h
+ eor r12,r12,r8 @ Maj(a,b,c)
+ add r6,r6,r0,ror#2 @ h+=Sigma0(a)
+ @ add r6,r6,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#7*4] @ 22
+ @ ldr r1,[sp,#4*4]
+ mov r0,r2,ror#7
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#6*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#15*4]
+
+ add r12,r12,r0
+ eor r0,r10,r10,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r5,r5,r2 @ h+=X[i]
+ str r2,[sp,#6*4]
+ eor r2,r11,r4
+ add r5,r5,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r10
+ add r5,r5,r12 @ h+=K256[i]
+ eor r2,r2,r4 @ Ch(e,f,g)
+ eor r0,r6,r6,ror#11
+ add r5,r5,r2 @ h+=Ch(e,f,g)
+#if 22==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 22<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r6,r7 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#8*4] @ from future BODY_16_xx
+ eor r12,r6,r7 @ a^b, b^c in next round
+ ldr r1,[sp,#5*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r6,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r9,r9,r5 @ d+=h
+ eor r3,r3,r7 @ Maj(a,b,c)
+ add r5,r5,r0,ror#2 @ h+=Sigma0(a)
+ @ add r5,r5,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#8*4] @ 23
+ @ ldr r1,[sp,#5*4]
+ mov r0,r2,ror#7
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#7*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#0*4]
+
+ add r3,r3,r0
+ eor r0,r9,r9,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r4,r4,r2 @ h+=X[i]
+ str r2,[sp,#7*4]
+ eor r2,r10,r11
+ add r4,r4,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r9
+ add r4,r4,r3 @ h+=K256[i]
+ eor r2,r2,r11 @ Ch(e,f,g)
+ eor r0,r5,r5,ror#11
+ add r4,r4,r2 @ h+=Ch(e,f,g)
+#if 23==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 23<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#9*4] @ from future BODY_16_xx
+ eor r3,r5,r6 @ a^b, b^c in next round
+ ldr r1,[sp,#6*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r5,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r8,r8,r4 @ d+=h
+ eor r12,r12,r6 @ Maj(a,b,c)
+ add r4,r4,r0,ror#2 @ h+=Sigma0(a)
+ @ add r4,r4,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#9*4] @ 24
+ @ ldr r1,[sp,#6*4]
+ mov r0,r2,ror#7
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#8*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#1*4]
+
+ add r12,r12,r0
+ eor r0,r8,r8,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r11,r11,r2 @ h+=X[i]
+ str r2,[sp,#8*4]
+ eor r2,r9,r10
+ add r11,r11,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r8
+ add r11,r11,r12 @ h+=K256[i]
+ eor r2,r2,r10 @ Ch(e,f,g)
+ eor r0,r4,r4,ror#11
+ add r11,r11,r2 @ h+=Ch(e,f,g)
+#if 24==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 24<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r4,r5 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#10*4] @ from future BODY_16_xx
+ eor r12,r4,r5 @ a^b, b^c in next round
+ ldr r1,[sp,#7*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r4,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r7,r7,r11 @ d+=h
+ eor r3,r3,r5 @ Maj(a,b,c)
+ add r11,r11,r0,ror#2 @ h+=Sigma0(a)
+ @ add r11,r11,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#10*4] @ 25
+ @ ldr r1,[sp,#7*4]
+ mov r0,r2,ror#7
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#9*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#2*4]
+
+ add r3,r3,r0
+ eor r0,r7,r7,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r10,r10,r2 @ h+=X[i]
+ str r2,[sp,#9*4]
+ eor r2,r8,r9
+ add r10,r10,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r7
+ add r10,r10,r3 @ h+=K256[i]
+ eor r2,r2,r9 @ Ch(e,f,g)
+ eor r0,r11,r11,ror#11
+ add r10,r10,r2 @ h+=Ch(e,f,g)
+#if 25==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 25<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r11,r4 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#11*4] @ from future BODY_16_xx
+ eor r3,r11,r4 @ a^b, b^c in next round
+ ldr r1,[sp,#8*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r11,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r6,r6,r10 @ d+=h
+ eor r12,r12,r4 @ Maj(a,b,c)
+ add r10,r10,r0,ror#2 @ h+=Sigma0(a)
+ @ add r10,r10,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#11*4] @ 26
+ @ ldr r1,[sp,#8*4]
+ mov r0,r2,ror#7
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#10*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#3*4]
+
+ add r12,r12,r0
+ eor r0,r6,r6,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r9,r9,r2 @ h+=X[i]
+ str r2,[sp,#10*4]
+ eor r2,r7,r8
+ add r9,r9,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r6
+ add r9,r9,r12 @ h+=K256[i]
+ eor r2,r2,r8 @ Ch(e,f,g)
+ eor r0,r10,r10,ror#11
+ add r9,r9,r2 @ h+=Ch(e,f,g)
+#if 26==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 26<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r10,r11 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#12*4] @ from future BODY_16_xx
+ eor r12,r10,r11 @ a^b, b^c in next round
+ ldr r1,[sp,#9*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r10,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r5,r5,r9 @ d+=h
+ eor r3,r3,r11 @ Maj(a,b,c)
+ add r9,r9,r0,ror#2 @ h+=Sigma0(a)
+ @ add r9,r9,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#12*4] @ 27
+ @ ldr r1,[sp,#9*4]
+ mov r0,r2,ror#7
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#11*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#4*4]
+
+ add r3,r3,r0
+ eor r0,r5,r5,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r8,r8,r2 @ h+=X[i]
+ str r2,[sp,#11*4]
+ eor r2,r6,r7
+ add r8,r8,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r5
+ add r8,r8,r3 @ h+=K256[i]
+ eor r2,r2,r7 @ Ch(e,f,g)
+ eor r0,r9,r9,ror#11
+ add r8,r8,r2 @ h+=Ch(e,f,g)
+#if 27==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 27<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r9,r10 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#13*4] @ from future BODY_16_xx
+ eor r3,r9,r10 @ a^b, b^c in next round
+ ldr r1,[sp,#10*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r9,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r4,r4,r8 @ d+=h
+ eor r12,r12,r10 @ Maj(a,b,c)
+ add r8,r8,r0,ror#2 @ h+=Sigma0(a)
+ @ add r8,r8,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#13*4] @ 28
+ @ ldr r1,[sp,#10*4]
+ mov r0,r2,ror#7
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#12*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#5*4]
+
+ add r12,r12,r0
+ eor r0,r4,r4,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r7,r7,r2 @ h+=X[i]
+ str r2,[sp,#12*4]
+ eor r2,r5,r6
+ add r7,r7,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r4
+ add r7,r7,r12 @ h+=K256[i]
+ eor r2,r2,r6 @ Ch(e,f,g)
+ eor r0,r8,r8,ror#11
+ add r7,r7,r2 @ h+=Ch(e,f,g)
+#if 28==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 28<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r8,r9 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#14*4] @ from future BODY_16_xx
+ eor r12,r8,r9 @ a^b, b^c in next round
+ ldr r1,[sp,#11*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r8,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r11,r11,r7 @ d+=h
+ eor r3,r3,r9 @ Maj(a,b,c)
+ add r7,r7,r0,ror#2 @ h+=Sigma0(a)
+ @ add r7,r7,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#14*4] @ 29
+ @ ldr r1,[sp,#11*4]
+ mov r0,r2,ror#7
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#13*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#6*4]
+
+ add r3,r3,r0
+ eor r0,r11,r11,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r6,r6,r2 @ h+=X[i]
+ str r2,[sp,#13*4]
+ eor r2,r4,r5
+ add r6,r6,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r11
+ add r6,r6,r3 @ h+=K256[i]
+ eor r2,r2,r5 @ Ch(e,f,g)
+ eor r0,r7,r7,ror#11
+ add r6,r6,r2 @ h+=Ch(e,f,g)
+#if 29==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 29<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r7,r8 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#15*4] @ from future BODY_16_xx
+ eor r3,r7,r8 @ a^b, b^c in next round
+ ldr r1,[sp,#12*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r7,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r10,r10,r6 @ d+=h
+ eor r12,r12,r8 @ Maj(a,b,c)
+ add r6,r6,r0,ror#2 @ h+=Sigma0(a)
+ @ add r6,r6,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#15*4] @ 30
+ @ ldr r1,[sp,#12*4]
+ mov r0,r2,ror#7
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#14*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#7*4]
+
+ add r12,r12,r0
+ eor r0,r10,r10,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r5,r5,r2 @ h+=X[i]
+ str r2,[sp,#14*4]
+ eor r2,r11,r4
+ add r5,r5,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r10
+ add r5,r5,r12 @ h+=K256[i]
+ eor r2,r2,r4 @ Ch(e,f,g)
+ eor r0,r6,r6,ror#11
+ add r5,r5,r2 @ h+=Ch(e,f,g)
+#if 30==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 30<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r6,r7 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#0*4] @ from future BODY_16_xx
+ eor r12,r6,r7 @ a^b, b^c in next round
+ ldr r1,[sp,#13*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r6,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r9,r9,r5 @ d+=h
+ eor r3,r3,r7 @ Maj(a,b,c)
+ add r5,r5,r0,ror#2 @ h+=Sigma0(a)
+ @ add r5,r5,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#0*4] @ 31
+ @ ldr r1,[sp,#13*4]
+ mov r0,r2,ror#7
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#15*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#8*4]
+
+ add r3,r3,r0
+ eor r0,r9,r9,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r4,r4,r2 @ h+=X[i]
+ str r2,[sp,#15*4]
+ eor r2,r10,r11
+ add r4,r4,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r9
+ add r4,r4,r3 @ h+=K256[i]
+ eor r2,r2,r11 @ Ch(e,f,g)
+ eor r0,r5,r5,ror#11
+ add r4,r4,r2 @ h+=Ch(e,f,g)
+#if 31==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 31<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#1*4] @ from future BODY_16_xx
+ eor r3,r5,r6 @ a^b, b^c in next round
+ ldr r1,[sp,#14*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r5,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r8,r8,r4 @ d+=h
+ eor r12,r12,r6 @ Maj(a,b,c)
+ add r4,r4,r0,ror#2 @ h+=Sigma0(a)
+ @ add r4,r4,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ ite eq @ Thumb2 thing, sanity check in ARM
+#endif
+ ldreq r3,[sp,#16*4] @ pull ctx
+ bne .Lrounds_16_xx
+
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ ldr r0,[r3,#0]
+ ldr r2,[r3,#4]
+ ldr r12,[r3,#8]
+ add r4,r4,r0
+ ldr r0,[r3,#12]
+ add r5,r5,r2
+ ldr r2,[r3,#16]
+ add r6,r6,r12
+ ldr r12,[r3,#20]
+ add r7,r7,r0
+ ldr r0,[r3,#24]
+ add r8,r8,r2
+ ldr r2,[r3,#28]
+ add r9,r9,r12
+ ldr r1,[sp,#17*4] @ pull inp
+ ldr r12,[sp,#18*4] @ pull inp+len
+ add r10,r10,r0
+ add r11,r11,r2
+ stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
+ cmp r1,r12
+ sub r14,r14,#256 @ rewind Ktbl
+ bne .Loop
+
+ add sp,sp,#19*4 @ destroy frame
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r11,pc}
+#else
+ ldmia sp!,{r4-r11,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size sha256_block_data_order,.-sha256_block_data_order
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
+.global sha256_block_data_order_neon
+.type sha256_block_data_order_neon,%function
+.align 4
+sha256_block_data_order_neon:
+.LNEON:
+ stmdb sp!,{r4-r12,lr}
+
+ sub r11,sp,#16*4+16
+ adrl r14,K256
+ bic r11,r11,#15 @ align for 128-bit stores
+ mov r12,sp
+ mov sp,r11 @ alloca
+ add r2,r1,r2,lsl#6 @ len to point at the end of inp
+
+ vld1.8 {q0},[r1]!
+ vld1.8 {q1},[r1]!
+ vld1.8 {q2},[r1]!
+ vld1.8 {q3},[r1]!
+ vld1.32 {q8},[r14,:128]!
+ vld1.32 {q9},[r14,:128]!
+ vld1.32 {q10},[r14,:128]!
+ vld1.32 {q11},[r14,:128]!
+ vrev32.8 q0,q0 @ yes, even on
+ str r0,[sp,#64]
+ vrev32.8 q1,q1 @ big-endian
+ str r1,[sp,#68]
+ mov r1,sp
+ vrev32.8 q2,q2
+ str r2,[sp,#72]
+ vrev32.8 q3,q3
+ str r12,[sp,#76] @ save original sp
+ vadd.i32 q8,q8,q0
+ vadd.i32 q9,q9,q1
+ vst1.32 {q8},[r1,:128]!
+ vadd.i32 q10,q10,q2
+ vst1.32 {q9},[r1,:128]!
+ vadd.i32 q11,q11,q3
+ vst1.32 {q10},[r1,:128]!
+ vst1.32 {q11},[r1,:128]!
+
+ ldmia r0,{r4-r11}
+ sub r1,r1,#64
+ ldr r2,[sp,#0]
+ eor r12,r12,r12
+ eor r3,r5,r6
+ b .L_00_48
+
+.align 4
+.L_00_48:
+ vext.8 q8,q0,q1,#4
+ add r11,r11,r2
+ eor r2,r9,r10
+ eor r0,r8,r8,ror#5
+ vext.8 q9,q2,q3,#4
+ add r4,r4,r12
+ and r2,r2,r8
+ eor r12,r0,r8,ror#19
+ vshr.u32 q10,q8,#7
+ eor r0,r4,r4,ror#11
+ eor r2,r2,r10
+ vadd.i32 q0,q0,q9
+ add r11,r11,r12,ror#6
+ eor r12,r4,r5
+ vshr.u32 q9,q8,#3
+ eor r0,r0,r4,ror#20
+ add r11,r11,r2
+ vsli.32 q10,q8,#25
+ ldr r2,[sp,#4]
+ and r3,r3,r12
+ vshr.u32 q11,q8,#18
+ add r7,r7,r11
+ add r11,r11,r0,ror#2
+ eor r3,r3,r5
+ veor q9,q9,q10
+ add r10,r10,r2
+ vsli.32 q11,q8,#14
+ eor r2,r8,r9
+ eor r0,r7,r7,ror#5
+ vshr.u32 d24,d7,#17
+ add r11,r11,r3
+ and r2,r2,r7
+ veor q9,q9,q11
+ eor r3,r0,r7,ror#19
+ eor r0,r11,r11,ror#11
+ vsli.32 d24,d7,#15
+ eor r2,r2,r9
+ add r10,r10,r3,ror#6
+ vshr.u32 d25,d7,#10
+ eor r3,r11,r4
+ eor r0,r0,r11,ror#20
+ vadd.i32 q0,q0,q9
+ add r10,r10,r2
+ ldr r2,[sp,#8]
+ veor d25,d25,d24
+ and r12,r12,r3
+ add r6,r6,r10
+ vshr.u32 d24,d7,#19
+ add r10,r10,r0,ror#2
+ eor r12,r12,r4
+ vsli.32 d24,d7,#13
+ add r9,r9,r2
+ eor r2,r7,r8
+ veor d25,d25,d24
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12
+ vadd.i32 d0,d0,d25
+ and r2,r2,r6
+ eor r12,r0,r6,ror#19
+ vshr.u32 d24,d0,#17
+ eor r0,r10,r10,ror#11
+ eor r2,r2,r8
+ vsli.32 d24,d0,#15
+ add r9,r9,r12,ror#6
+ eor r12,r10,r11
+ vshr.u32 d25,d0,#10
+ eor r0,r0,r10,ror#20
+ add r9,r9,r2
+ veor d25,d25,d24
+ ldr r2,[sp,#12]
+ and r3,r3,r12
+ vshr.u32 d24,d0,#19
+ add r5,r5,r9
+ add r9,r9,r0,ror#2
+ eor r3,r3,r11
+ vld1.32 {q8},[r14,:128]!
+ add r8,r8,r2
+ vsli.32 d24,d0,#13
+ eor r2,r6,r7
+ eor r0,r5,r5,ror#5
+ veor d25,d25,d24
+ add r9,r9,r3
+ and r2,r2,r5
+ vadd.i32 d1,d1,d25
+ eor r3,r0,r5,ror#19
+ eor r0,r9,r9,ror#11
+ vadd.i32 q8,q8,q0
+ eor r2,r2,r7
+ add r8,r8,r3,ror#6
+ eor r3,r9,r10
+ eor r0,r0,r9,ror#20
+ add r8,r8,r2
+ ldr r2,[sp,#16]
+ and r12,r12,r3
+ add r4,r4,r8
+ vst1.32 {q8},[r1,:128]!
+ add r8,r8,r0,ror#2
+ eor r12,r12,r10
+ vext.8 q8,q1,q2,#4
+ add r7,r7,r2
+ eor r2,r5,r6
+ eor r0,r4,r4,ror#5
+ vext.8 q9,q3,q0,#4
+ add r8,r8,r12
+ and r2,r2,r4
+ eor r12,r0,r4,ror#19
+ vshr.u32 q10,q8,#7
+ eor r0,r8,r8,ror#11
+ eor r2,r2,r6
+ vadd.i32 q1,q1,q9
+ add r7,r7,r12,ror#6
+ eor r12,r8,r9
+ vshr.u32 q9,q8,#3
+ eor r0,r0,r8,ror#20
+ add r7,r7,r2
+ vsli.32 q10,q8,#25
+ ldr r2,[sp,#20]
+ and r3,r3,r12
+ vshr.u32 q11,q8,#18
+ add r11,r11,r7
+ add r7,r7,r0,ror#2
+ eor r3,r3,r9
+ veor q9,q9,q10
+ add r6,r6,r2
+ vsli.32 q11,q8,#14
+ eor r2,r4,r5
+ eor r0,r11,r11,ror#5
+ vshr.u32 d24,d1,#17
+ add r7,r7,r3
+ and r2,r2,r11
+ veor q9,q9,q11
+ eor r3,r0,r11,ror#19
+ eor r0,r7,r7,ror#11
+ vsli.32 d24,d1,#15
+ eor r2,r2,r5
+ add r6,r6,r3,ror#6
+ vshr.u32 d25,d1,#10
+ eor r3,r7,r8
+ eor r0,r0,r7,ror#20
+ vadd.i32 q1,q1,q9
+ add r6,r6,r2
+ ldr r2,[sp,#24]
+ veor d25,d25,d24
+ and r12,r12,r3
+ add r10,r10,r6
+ vshr.u32 d24,d1,#19
+ add r6,r6,r0,ror#2
+ eor r12,r12,r8
+ vsli.32 d24,d1,#13
+ add r5,r5,r2
+ eor r2,r11,r4
+ veor d25,d25,d24
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12
+ vadd.i32 d2,d2,d25
+ and r2,r2,r10
+ eor r12,r0,r10,ror#19
+ vshr.u32 d24,d2,#17
+ eor r0,r6,r6,ror#11
+ eor r2,r2,r4
+ vsli.32 d24,d2,#15
+ add r5,r5,r12,ror#6
+ eor r12,r6,r7
+ vshr.u32 d25,d2,#10
+ eor r0,r0,r6,ror#20
+ add r5,r5,r2
+ veor d25,d25,d24
+ ldr r2,[sp,#28]
+ and r3,r3,r12
+ vshr.u32 d24,d2,#19
+ add r9,r9,r5
+ add r5,r5,r0,ror#2
+ eor r3,r3,r7
+ vld1.32 {q8},[r14,:128]!
+ add r4,r4,r2
+ vsli.32 d24,d2,#13
+ eor r2,r10,r11
+ eor r0,r9,r9,ror#5
+ veor d25,d25,d24
+ add r5,r5,r3
+ and r2,r2,r9
+ vadd.i32 d3,d3,d25
+ eor r3,r0,r9,ror#19
+ eor r0,r5,r5,ror#11
+ vadd.i32 q8,q8,q1
+ eor r2,r2,r11
+ add r4,r4,r3,ror#6
+ eor r3,r5,r6
+ eor r0,r0,r5,ror#20
+ add r4,r4,r2
+ ldr r2,[sp,#32]
+ and r12,r12,r3
+ add r8,r8,r4
+ vst1.32 {q8},[r1,:128]!
+ add r4,r4,r0,ror#2
+ eor r12,r12,r6
+ vext.8 q8,q2,q3,#4
+ add r11,r11,r2
+ eor r2,r9,r10
+ eor r0,r8,r8,ror#5
+ vext.8 q9,q0,q1,#4
+ add r4,r4,r12
+ and r2,r2,r8
+ eor r12,r0,r8,ror#19
+ vshr.u32 q10,q8,#7
+ eor r0,r4,r4,ror#11
+ eor r2,r2,r10
+ vadd.i32 q2,q2,q9
+ add r11,r11,r12,ror#6
+ eor r12,r4,r5
+ vshr.u32 q9,q8,#3
+ eor r0,r0,r4,ror#20
+ add r11,r11,r2
+ vsli.32 q10,q8,#25
+ ldr r2,[sp,#36]
+ and r3,r3,r12
+ vshr.u32 q11,q8,#18
+ add r7,r7,r11
+ add r11,r11,r0,ror#2
+ eor r3,r3,r5
+ veor q9,q9,q10
+ add r10,r10,r2
+ vsli.32 q11,q8,#14
+ eor r2,r8,r9
+ eor r0,r7,r7,ror#5
+ vshr.u32 d24,d3,#17
+ add r11,r11,r3
+ and r2,r2,r7
+ veor q9,q9,q11
+ eor r3,r0,r7,ror#19
+ eor r0,r11,r11,ror#11
+ vsli.32 d24,d3,#15
+ eor r2,r2,r9
+ add r10,r10,r3,ror#6
+ vshr.u32 d25,d3,#10
+ eor r3,r11,r4
+ eor r0,r0,r11,ror#20
+ vadd.i32 q2,q2,q9
+ add r10,r10,r2
+ ldr r2,[sp,#40]
+ veor d25,d25,d24
+ and r12,r12,r3
+ add r6,r6,r10
+ vshr.u32 d24,d3,#19
+ add r10,r10,r0,ror#2
+ eor r12,r12,r4
+ vsli.32 d24,d3,#13
+ add r9,r9,r2
+ eor r2,r7,r8
+ veor d25,d25,d24
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12
+ vadd.i32 d4,d4,d25
+ and r2,r2,r6
+ eor r12,r0,r6,ror#19
+ vshr.u32 d24,d4,#17
+ eor r0,r10,r10,ror#11
+ eor r2,r2,r8
+ vsli.32 d24,d4,#15
+ add r9,r9,r12,ror#6
+ eor r12,r10,r11
+ vshr.u32 d25,d4,#10
+ eor r0,r0,r10,ror#20
+ add r9,r9,r2
+ veor d25,d25,d24
+ ldr r2,[sp,#44]
+ and r3,r3,r12
+ vshr.u32 d24,d4,#19
+ add r5,r5,r9
+ add r9,r9,r0,ror#2
+ eor r3,r3,r11
+ vld1.32 {q8},[r14,:128]!
+ add r8,r8,r2
+ vsli.32 d24,d4,#13
+ eor r2,r6,r7
+ eor r0,r5,r5,ror#5
+ veor d25,d25,d24
+ add r9,r9,r3
+ and r2,r2,r5
+ vadd.i32 d5,d5,d25
+ eor r3,r0,r5,ror#19
+ eor r0,r9,r9,ror#11
+ vadd.i32 q8,q8,q2
+ eor r2,r2,r7
+ add r8,r8,r3,ror#6
+ eor r3,r9,r10
+ eor r0,r0,r9,ror#20
+ add r8,r8,r2
+ ldr r2,[sp,#48]
+ and r12,r12,r3
+ add r4,r4,r8
+ vst1.32 {q8},[r1,:128]!
+ add r8,r8,r0,ror#2
+ eor r12,r12,r10
+ vext.8 q8,q3,q0,#4
+ add r7,r7,r2
+ eor r2,r5,r6
+ eor r0,r4,r4,ror#5
+ vext.8 q9,q1,q2,#4
+ add r8,r8,r12
+ and r2,r2,r4
+ eor r12,r0,r4,ror#19
+ vshr.u32 q10,q8,#7
+ eor r0,r8,r8,ror#11
+ eor r2,r2,r6
+ vadd.i32 q3,q3,q9
+ add r7,r7,r12,ror#6
+ eor r12,r8,r9
+ vshr.u32 q9,q8,#3
+ eor r0,r0,r8,ror#20
+ add r7,r7,r2
+ vsli.32 q10,q8,#25
+ ldr r2,[sp,#52]
+ and r3,r3,r12
+ vshr.u32 q11,q8,#18
+ add r11,r11,r7
+ add r7,r7,r0,ror#2
+ eor r3,r3,r9
+ veor q9,q9,q10
+ add r6,r6,r2
+ vsli.32 q11,q8,#14
+ eor r2,r4,r5
+ eor r0,r11,r11,ror#5
+ vshr.u32 d24,d5,#17
+ add r7,r7,r3
+ and r2,r2,r11
+ veor q9,q9,q11
+ eor r3,r0,r11,ror#19
+ eor r0,r7,r7,ror#11
+ vsli.32 d24,d5,#15
+ eor r2,r2,r5
+ add r6,r6,r3,ror#6
+ vshr.u32 d25,d5,#10
+ eor r3,r7,r8
+ eor r0,r0,r7,ror#20
+ vadd.i32 q3,q3,q9
+ add r6,r6,r2
+ ldr r2,[sp,#56]
+ veor d25,d25,d24
+ and r12,r12,r3
+ add r10,r10,r6
+ vshr.u32 d24,d5,#19
+ add r6,r6,r0,ror#2
+ eor r12,r12,r8
+ vsli.32 d24,d5,#13
+ add r5,r5,r2
+ eor r2,r11,r4
+ veor d25,d25,d24
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12
+ vadd.i32 d6,d6,d25
+ and r2,r2,r10
+ eor r12,r0,r10,ror#19
+ vshr.u32 d24,d6,#17
+ eor r0,r6,r6,ror#11
+ eor r2,r2,r4
+ vsli.32 d24,d6,#15
+ add r5,r5,r12,ror#6
+ eor r12,r6,r7
+ vshr.u32 d25,d6,#10
+ eor r0,r0,r6,ror#20
+ add r5,r5,r2
+ veor d25,d25,d24
+ ldr r2,[sp,#60]
+ and r3,r3,r12
+ vshr.u32 d24,d6,#19
+ add r9,r9,r5
+ add r5,r5,r0,ror#2
+ eor r3,r3,r7
+ vld1.32 {q8},[r14,:128]!
+ add r4,r4,r2
+ vsli.32 d24,d6,#13
+ eor r2,r10,r11
+ eor r0,r9,r9,ror#5
+ veor d25,d25,d24
+ add r5,r5,r3
+ and r2,r2,r9
+ vadd.i32 d7,d7,d25
+ eor r3,r0,r9,ror#19
+ eor r0,r5,r5,ror#11
+ vadd.i32 q8,q8,q3
+ eor r2,r2,r11
+ add r4,r4,r3,ror#6
+ eor r3,r5,r6
+ eor r0,r0,r5,ror#20
+ add r4,r4,r2
+ ldr r2,[r14]
+ and r12,r12,r3
+ add r8,r8,r4
+ vst1.32 {q8},[r1,:128]!
+ add r4,r4,r0,ror#2
+ eor r12,r12,r6
+ teq r2,#0 @ check for K256 terminator
+ ldr r2,[sp,#0]
+ sub r1,r1,#64
+ bne .L_00_48
+
+ ldr r1,[sp,#68]
+ ldr r0,[sp,#72]
+ sub r14,r14,#256 @ rewind r14
+ teq r1,r0
+ it eq
+ subeq r1,r1,#64 @ avoid SEGV
+ vld1.8 {q0},[r1]! @ load next input block
+ vld1.8 {q1},[r1]!
+ vld1.8 {q2},[r1]!
+ vld1.8 {q3},[r1]!
+ it ne
+ strne r1,[sp,#68]
+ mov r1,sp
+ add r11,r11,r2
+ eor r2,r9,r10
+ eor r0,r8,r8,ror#5
+ add r4,r4,r12
+ vld1.32 {q8},[r14,:128]!
+ and r2,r2,r8
+ eor r12,r0,r8,ror#19
+ eor r0,r4,r4,ror#11
+ eor r2,r2,r10
+ vrev32.8 q0,q0
+ add r11,r11,r12,ror#6
+ eor r12,r4,r5
+ eor r0,r0,r4,ror#20
+ add r11,r11,r2
+ vadd.i32 q8,q8,q0
+ ldr r2,[sp,#4]
+ and r3,r3,r12
+ add r7,r7,r11
+ add r11,r11,r0,ror#2
+ eor r3,r3,r5
+ add r10,r10,r2
+ eor r2,r8,r9
+ eor r0,r7,r7,ror#5
+ add r11,r11,r3
+ and r2,r2,r7
+ eor r3,r0,r7,ror#19
+ eor r0,r11,r11,ror#11
+ eor r2,r2,r9
+ add r10,r10,r3,ror#6
+ eor r3,r11,r4
+ eor r0,r0,r11,ror#20
+ add r10,r10,r2
+ ldr r2,[sp,#8]
+ and r12,r12,r3
+ add r6,r6,r10
+ add r10,r10,r0,ror#2
+ eor r12,r12,r4
+ add r9,r9,r2
+ eor r2,r7,r8
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12
+ and r2,r2,r6
+ eor r12,r0,r6,ror#19
+ eor r0,r10,r10,ror#11
+ eor r2,r2,r8
+ add r9,r9,r12,ror#6
+ eor r12,r10,r11
+ eor r0,r0,r10,ror#20
+ add r9,r9,r2
+ ldr r2,[sp,#12]
+ and r3,r3,r12
+ add r5,r5,r9
+ add r9,r9,r0,ror#2
+ eor r3,r3,r11
+ add r8,r8,r2
+ eor r2,r6,r7
+ eor r0,r5,r5,ror#5
+ add r9,r9,r3
+ and r2,r2,r5
+ eor r3,r0,r5,ror#19
+ eor r0,r9,r9,ror#11
+ eor r2,r2,r7
+ add r8,r8,r3,ror#6
+ eor r3,r9,r10
+ eor r0,r0,r9,ror#20
+ add r8,r8,r2
+ ldr r2,[sp,#16]
+ and r12,r12,r3
+ add r4,r4,r8
+ add r8,r8,r0,ror#2
+ eor r12,r12,r10
+ vst1.32 {q8},[r1,:128]!
+ add r7,r7,r2
+ eor r2,r5,r6
+ eor r0,r4,r4,ror#5
+ add r8,r8,r12
+ vld1.32 {q8},[r14,:128]!
+ and r2,r2,r4
+ eor r12,r0,r4,ror#19
+ eor r0,r8,r8,ror#11
+ eor r2,r2,r6
+ vrev32.8 q1,q1
+ add r7,r7,r12,ror#6
+ eor r12,r8,r9
+ eor r0,r0,r8,ror#20
+ add r7,r7,r2
+ vadd.i32 q8,q8,q1
+ ldr r2,[sp,#20]
+ and r3,r3,r12
+ add r11,r11,r7
+ add r7,r7,r0,ror#2
+ eor r3,r3,r9
+ add r6,r6,r2
+ eor r2,r4,r5
+ eor r0,r11,r11,ror#5
+ add r7,r7,r3
+ and r2,r2,r11
+ eor r3,r0,r11,ror#19
+ eor r0,r7,r7,ror#11
+ eor r2,r2,r5
+ add r6,r6,r3,ror#6
+ eor r3,r7,r8
+ eor r0,r0,r7,ror#20
+ add r6,r6,r2
+ ldr r2,[sp,#24]
+ and r12,r12,r3
+ add r10,r10,r6
+ add r6,r6,r0,ror#2
+ eor r12,r12,r8
+ add r5,r5,r2
+ eor r2,r11,r4
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12
+ and r2,r2,r10
+ eor r12,r0,r10,ror#19
+ eor r0,r6,r6,ror#11
+ eor r2,r2,r4
+ add r5,r5,r12,ror#6
+ eor r12,r6,r7
+ eor r0,r0,r6,ror#20
+ add r5,r5,r2
+ ldr r2,[sp,#28]
+ and r3,r3,r12
+ add r9,r9,r5
+ add r5,r5,r0,ror#2
+ eor r3,r3,r7
+ add r4,r4,r2
+ eor r2,r10,r11
+ eor r0,r9,r9,ror#5
+ add r5,r5,r3
+ and r2,r2,r9
+ eor r3,r0,r9,ror#19
+ eor r0,r5,r5,ror#11
+ eor r2,r2,r11
+ add r4,r4,r3,ror#6
+ eor r3,r5,r6
+ eor r0,r0,r5,ror#20
+ add r4,r4,r2
+ ldr r2,[sp,#32]
+ and r12,r12,r3
+ add r8,r8,r4
+ add r4,r4,r0,ror#2
+ eor r12,r12,r6
+ vst1.32 {q8},[r1,:128]!
+ add r11,r11,r2
+ eor r2,r9,r10
+ eor r0,r8,r8,ror#5
+ add r4,r4,r12
+ vld1.32 {q8},[r14,:128]!
+ and r2,r2,r8
+ eor r12,r0,r8,ror#19
+ eor r0,r4,r4,ror#11
+ eor r2,r2,r10
+ vrev32.8 q2,q2
+ add r11,r11,r12,ror#6
+ eor r12,r4,r5
+ eor r0,r0,r4,ror#20
+ add r11,r11,r2
+ vadd.i32 q8,q8,q2
+ ldr r2,[sp,#36]
+ and r3,r3,r12
+ add r7,r7,r11
+ add r11,r11,r0,ror#2
+ eor r3,r3,r5
+ add r10,r10,r2
+ eor r2,r8,r9
+ eor r0,r7,r7,ror#5
+ add r11,r11,r3
+ and r2,r2,r7
+ eor r3,r0,r7,ror#19
+ eor r0,r11,r11,ror#11
+ eor r2,r2,r9
+ add r10,r10,r3,ror#6
+ eor r3,r11,r4
+ eor r0,r0,r11,ror#20
+ add r10,r10,r2
+ ldr r2,[sp,#40]
+ and r12,r12,r3
+ add r6,r6,r10
+ add r10,r10,r0,ror#2
+ eor r12,r12,r4
+ add r9,r9,r2
+ eor r2,r7,r8
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12
+ and r2,r2,r6
+ eor r12,r0,r6,ror#19
+ eor r0,r10,r10,ror#11
+ eor r2,r2,r8
+ add r9,r9,r12,ror#6
+ eor r12,r10,r11
+ eor r0,r0,r10,ror#20
+ add r9,r9,r2
+ ldr r2,[sp,#44]
+ and r3,r3,r12
+ add r5,r5,r9
+ add r9,r9,r0,ror#2
+ eor r3,r3,r11
+ add r8,r8,r2
+ eor r2,r6,r7
+ eor r0,r5,r5,ror#5
+ add r9,r9,r3
+ and r2,r2,r5
+ eor r3,r0,r5,ror#19
+ eor r0,r9,r9,ror#11
+ eor r2,r2,r7
+ add r8,r8,r3,ror#6
+ eor r3,r9,r10
+ eor r0,r0,r9,ror#20
+ add r8,r8,r2
+ ldr r2,[sp,#48]
+ and r12,r12,r3
+ add r4,r4,r8
+ add r8,r8,r0,ror#2
+ eor r12,r12,r10
+ vst1.32 {q8},[r1,:128]!
+ add r7,r7,r2
+ eor r2,r5,r6
+ eor r0,r4,r4,ror#5
+ add r8,r8,r12
+ vld1.32 {q8},[r14,:128]!
+ and r2,r2,r4
+ eor r12,r0,r4,ror#19
+ eor r0,r8,r8,ror#11
+ eor r2,r2,r6
+ vrev32.8 q3,q3
+ add r7,r7,r12,ror#6
+ eor r12,r8,r9
+ eor r0,r0,r8,ror#20
+ add r7,r7,r2
+ vadd.i32 q8,q8,q3
+ ldr r2,[sp,#52]
+ and r3,r3,r12
+ add r11,r11,r7
+ add r7,r7,r0,ror#2
+ eor r3,r3,r9
+ add r6,r6,r2
+ eor r2,r4,r5
+ eor r0,r11,r11,ror#5
+ add r7,r7,r3
+ and r2,r2,r11
+ eor r3,r0,r11,ror#19
+ eor r0,r7,r7,ror#11
+ eor r2,r2,r5
+ add r6,r6,r3,ror#6
+ eor r3,r7,r8
+ eor r0,r0,r7,ror#20
+ add r6,r6,r2
+ ldr r2,[sp,#56]
+ and r12,r12,r3
+ add r10,r10,r6
+ add r6,r6,r0,ror#2
+ eor r12,r12,r8
+ add r5,r5,r2
+ eor r2,r11,r4
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12
+ and r2,r2,r10
+ eor r12,r0,r10,ror#19
+ eor r0,r6,r6,ror#11
+ eor r2,r2,r4
+ add r5,r5,r12,ror#6
+ eor r12,r6,r7
+ eor r0,r0,r6,ror#20
+ add r5,r5,r2
+ ldr r2,[sp,#60]
+ and r3,r3,r12
+ add r9,r9,r5
+ add r5,r5,r0,ror#2
+ eor r3,r3,r7
+ add r4,r4,r2
+ eor r2,r10,r11
+ eor r0,r9,r9,ror#5
+ add r5,r5,r3
+ and r2,r2,r9
+ eor r3,r0,r9,ror#19
+ eor r0,r5,r5,ror#11
+ eor r2,r2,r11
+ add r4,r4,r3,ror#6
+ eor r3,r5,r6
+ eor r0,r0,r5,ror#20
+ add r4,r4,r2
+ ldr r2,[sp,#64]
+ and r12,r12,r3
+ add r8,r8,r4
+ add r4,r4,r0,ror#2
+ eor r12,r12,r6
+ vst1.32 {q8},[r1,:128]!
+ ldr r0,[r2,#0]
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ ldr r12,[r2,#4]
+ ldr r3,[r2,#8]
+ ldr r1,[r2,#12]
+ add r4,r4,r0 @ accumulate
+ ldr r0,[r2,#16]
+ add r5,r5,r12
+ ldr r12,[r2,#20]
+ add r6,r6,r3
+ ldr r3,[r2,#24]
+ add r7,r7,r1
+ ldr r1,[r2,#28]
+ add r8,r8,r0
+ str r4,[r2],#4
+ add r9,r9,r12
+ str r5,[r2],#4
+ add r10,r10,r3
+ str r6,[r2],#4
+ add r11,r11,r1
+ str r7,[r2],#4
+ stmia r2,{r8-r11}
+
+ ittte ne
+ movne r1,sp
+ ldrne r2,[sp,#0]
+ eorne r12,r12,r12
+ ldreq sp,[sp,#76] @ restore original sp
+ itt ne
+ eorne r3,r5,r6
+ bne .L_00_48
+
+ ldmia sp!,{r4-r12,pc}
+.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+
+# ifdef __thumb2__
+# define INST(a,b,c,d) .byte c,d|0xc,a,b
+# else
+# define INST(a,b,c,d) .byte a,b,c,d
+# endif
+
+.type sha256_block_data_order_armv8,%function
+.align 5
+sha256_block_data_order_armv8:
+.LARMv8:
+ vld1.32 {q0,q1},[r0]
+# ifdef __thumb2__
+ adr r3,.LARMv8
+ sub r3,r3,#.LARMv8-K256
+# else
+ adrl r3,K256
+# endif
+ add r2,r1,r2,lsl#6 @ len to point at the end of inp
+
+.Loop_v8:
+ vld1.8 {q8-q9},[r1]!
+ vld1.8 {q10-q11},[r1]!
+ vld1.32 {q12},[r3]!
+ vrev32.8 q8,q8
+ vrev32.8 q9,q9
+ vrev32.8 q10,q10
+ vrev32.8 q11,q11
+ vmov q14,q0 @ offload
+ vmov q15,q1
+ teq r1,r2
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q8
+ INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q9
+ INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q10
+ INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q11
+ INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q8
+ INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q9
+ INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q10
+ INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q11
+ INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q8
+ INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q9
+ INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q10
+ INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q11
+ INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q8
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q9
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+
+ vld1.32 {q13},[r3]
+ vadd.i32 q12,q12,q10
+ sub r3,r3,#256-16 @ rewind
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+
+ vadd.i32 q13,q13,q11
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+
+ vadd.i32 q0,q0,q14
+ vadd.i32 q1,q1,q15
+ it ne
+ bne .Loop_v8
+
+ vst1.32 {q0,q1},[r0]
+
+ bx lr @ bx lr
+.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
+#endif
+.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.comm OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
new file mode 100644
index 000000000000..a84e869ef900
--- /dev/null
+++ b/arch/arm/crypto/sha256_glue.c
@@ -0,0 +1,128 @@
+/*
+ * Glue code for the SHA256 Secure Hash Algorithm assembly implementation
+ * using optimized ARM assembler and NEON instructions.
+ *
+ * Copyright © 2015 Google Inc.
+ *
+ * This file is based on sha256_ssse3_glue.c:
+ * Copyright (C) 2013 Intel Corporation
+ * Author: Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <crypto/sha.h>
+#include <crypto/sha256_base.h>
+#include <asm/simd.h>
+#include <asm/neon.h>
+
+#include "sha256_glue.h"
+
+asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
+ unsigned int num_blks);
+
+int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ /* make sure casting to sha256_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
+
+ return sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order);
+}
+EXPORT_SYMBOL(crypto_sha256_arm_update);
+
+static int sha256_final(struct shash_desc *desc, u8 *out)
+{
+ sha256_base_do_finalize(desc,
+ (sha256_block_fn *)sha256_block_data_order);
+ return sha256_base_finish(desc, out);
+}
+
+int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order);
+ return sha256_final(desc, out);
+}
+EXPORT_SYMBOL(crypto_sha256_arm_finup);
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = sha256_base_init,
+ .update = crypto_sha256_arm_update,
+ .final = sha256_final,
+ .finup = crypto_sha256_arm_finup,
+ .descsize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-asm",
+ .cra_priority = 150,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = sha224_base_init,
+ .update = crypto_sha256_arm_update,
+ .final = sha256_final,
+ .finup = crypto_sha256_arm_finup,
+ .descsize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-asm",
+ .cra_priority = 150,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init sha256_mod_init(void)
+{
+ int res = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+
+ if (res < 0)
+ return res;
+
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) {
+ res = crypto_register_shashes(sha256_neon_algs,
+ ARRAY_SIZE(sha256_neon_algs));
+
+ if (res < 0)
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ }
+
+ return res;
+}
+
+static void __exit sha256_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon())
+ crypto_unregister_shashes(sha256_neon_algs,
+ ARRAY_SIZE(sha256_neon_algs));
+}
+
+module_init(sha256_mod_init);
+module_exit(sha256_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm (ARM), including NEON");
+
+MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/arm/crypto/sha256_glue.h b/arch/arm/crypto/sha256_glue.h
new file mode 100644
index 000000000000..7cf0bf786ada
--- /dev/null
+++ b/arch/arm/crypto/sha256_glue.h
@@ -0,0 +1,14 @@
+#ifndef _CRYPTO_SHA256_GLUE_H
+#define _CRYPTO_SHA256_GLUE_H
+
+#include <linux/crypto.h>
+
+extern struct shash_alg sha256_neon_algs[2];
+
+int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
+#endif /* _CRYPTO_SHA256_GLUE_H */
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
new file mode 100644
index 000000000000..39ccd658817e
--- /dev/null
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -0,0 +1,101 @@
+/*
+ * Glue code for the SHA256 Secure Hash Algorithm assembly implementation
+ * using NEON instructions.
+ *
+ * Copyright © 2015 Google Inc.
+ *
+ * This file is based on sha512_neon_glue.c:
+ * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <crypto/sha.h>
+#include <crypto/sha256_base.h>
+#include <asm/byteorder.h>
+#include <asm/simd.h>
+#include <asm/neon.h>
+
+#include "sha256_glue.h"
+
+asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
+ unsigned int num_blks);
+
+static int sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ if (!may_use_simd() ||
+ (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
+ return crypto_sha256_arm_update(desc, data, len);
+
+ kernel_neon_begin();
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order_neon);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!may_use_simd())
+ return crypto_sha256_arm_finup(desc, data, len, out);
+
+ kernel_neon_begin();
+ if (len)
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order_neon);
+ sha256_base_do_finalize(desc,
+ (sha256_block_fn *)sha256_block_data_order_neon);
+ kernel_neon_end();
+
+ return sha256_base_finish(desc, out);
+}
+
+static int sha256_final(struct shash_desc *desc, u8 *out)
+{
+ return sha256_finup(desc, NULL, 0, out);
+}
+
+struct shash_alg sha256_neon_algs[] = { {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = sha256_base_init,
+ .update = sha256_update,
+ .final = sha256_final,
+ .finup = sha256_finup,
+ .descsize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-neon",
+ .cra_priority = 250,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = sha224_base_init,
+ .update = sha256_update,
+ .final = sha256_final,
+ .finup = sha256_finup,
+ .descsize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-neon",
+ .cra_priority = 250,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index fe74c0d1e485..3c4596d0ce6c 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,6 +1,5 @@
-generic-y += auxvec.h
generic-y += bitsperlong.h
generic-y += cputime.h
generic-y += current.h
@@ -22,6 +21,7 @@ generic-y += preempt.h
generic-y += resource.h
generic-y += rwsem.h
generic-y += scatterlist.h
+generic-y += seccomp.h
generic-y += sections.h
generic-y += segment.h
generic-y += sembuf.h
diff --git a/arch/arm/include/asm/arm-cci.h b/arch/arm/include/asm/arm-cci.h
new file mode 100644
index 000000000000..fe77f7ab7e6b
--- /dev/null
+++ b/arch/arm/include/asm/arm-cci.h
@@ -0,0 +1,42 @@
+/*
+ * arch/arm/include/asm/arm-cci.h
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM_CCI_H
+#define __ASM_ARM_CCI_H
+
+#ifdef CONFIG_MCPM
+#include <asm/mcpm.h>
+
+/*
+ * We don't have a reliable way of detecting whether,
+ * if we have access to secure-only registers, unless
+ * mcpm is registered.
+ */
+static inline bool platform_has_secure_cci_access(void)
+{
+ return mcpm_is_available();
+}
+
+#else
+static inline bool platform_has_secure_cci_access(void)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index f67fd3afebdf..186270b3e194 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -237,6 +237,9 @@
.pushsection ".alt.smp.init", "a" ;\
.long 9998b ;\
9997: instr ;\
+ .if . - 9997b == 2 ;\
+ nop ;\
+ .endif ;\
.if . - 9997b != 4 ;\
.error "ALT_UP() content must assemble to exactly 4 bytes";\
.endif ;\
diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h
new file mode 100644
index 000000000000..fbd388c46299
--- /dev/null
+++ b/arch/arm/include/asm/auxvec.h
@@ -0,0 +1 @@
+#include <uapi/asm/auxvec.h>
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index af319ac4960c..0f8424924902 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ARM_CPUIDLE_H
#define __ASM_ARM_CPUIDLE_H
+#include <asm/proc-fns.h>
+
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
@@ -25,4 +27,25 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
*/
#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
+struct device_node;
+
+struct cpuidle_ops {
+ int (*suspend)(int cpu, unsigned long arg);
+ int (*init)(struct device_node *, int cpu);
+};
+
+struct of_cpuidle_method {
+ const char *method;
+ struct cpuidle_ops *ops;
+};
+
+#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
+ static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
+ __used __section(__cpuidle_method_of_table) \
+ = { .method = _method, .ops = _ops }
+
+extern int arm_cpuidle_suspend(int index);
+
+extern int arm_cpuidle_init(int cpu);
+
#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 819777d0e91f..85e374f873ac 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -253,4 +253,20 @@ static inline int cpu_is_pj4(void)
#else
#define cpu_is_pj4() 0
#endif
+
+static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
+ int field)
+{
+ int feature = (features >> field) & 15;
+
+ /* feature registers are signed values */
+ if (feature > 8)
+ feature -= 16;
+
+ return feature;
+}
+
+#define cpuid_feature_extract(reg, field) \
+ cpuid_feature_extract_field(read_cpuid_ext(reg), field)
+
#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 8e3fcb924db6..2ef282f96651 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -25,7 +25,7 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index afb9cafd3786..d2315ffd8f12 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -1,7 +1,9 @@
#ifndef __ASMARM_ELF_H
#define __ASMARM_ELF_H
+#include <asm/auxvec.h>
#include <asm/hwcap.h>
+#include <asm/vdso_datapage.h>
/*
* ELF register definitions..
@@ -115,7 +117,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
@@ -125,11 +127,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
extern void elf_set_personality(const struct elf32_hdr *);
#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
#ifdef CONFIG_MMU
+#ifdef CONFIG_VDSO
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (elf_addr_t)current->mm->context.vdso); \
+} while (0)
+#endif
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
int arch_setup_additional_pages(struct linux_binprm *, int);
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 53e69dae796f..4e78065a16aa 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -13,7 +13,7 @@
" .align 3\n" \
" .long 1b, 4f, 2b, 4f\n" \
" .popsection\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, " err_reg "\n" \
" b 3b\n" \
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 4cf48c3aca13..405aa1883307 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -269,6 +269,16 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
+static inline bool __kvm_cpu_uses_extended_idmap(void)
+{
+ return false;
+}
+
+static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
+ pgd_t *hyp_pgd,
+ pgd_t *merged_hyp_pgd,
+ unsigned long hyp_idmap_start) { }
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 3446f6a1d9fa..50b378f59e08 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -171,12 +171,73 @@ void mcpm_cpu_suspend(u64 expected_residency);
int mcpm_cpu_powered_up(void);
/*
- * Platform specific methods used in the implementation of the above API.
+ * Platform specific callbacks used in the implementation of the above API.
+ *
+ * cpu_powerup:
+ * Make given CPU runable. Called with MCPM lock held and IRQs disabled.
+ * The given cluster is assumed to be set up (cluster_powerup would have
+ * been called beforehand). Must return 0 for success or negative error code.
+ *
+ * cluster_powerup:
+ * Set up power for given cluster. Called with MCPM lock held and IRQs
+ * disabled. Called before first cpu_powerup when cluster is down. Must
+ * return 0 for success or negative error code.
+ *
+ * cpu_suspend_prepare:
+ * Special suspend configuration. Called on target CPU with MCPM lock held
+ * and IRQs disabled. This callback is optional. If provided, it is called
+ * before cpu_powerdown_prepare.
+ *
+ * cpu_powerdown_prepare:
+ * Configure given CPU for power down. Called on target CPU with MCPM lock
+ * held and IRQs disabled. Power down must be effective only at the next WFI instruction.
+ *
+ * cluster_powerdown_prepare:
+ * Configure given cluster for power down. Called on one CPU from target
+ * cluster with MCPM lock held and IRQs disabled. A cpu_powerdown_prepare
+ * for each CPU in the cluster has happened when this occurs.
+ *
+ * cpu_cache_disable:
+ * Clean and disable CPU level cache for the calling CPU. Called on with IRQs
+ * disabled only. The CPU is no longer cache coherent with the rest of the
+ * system when this returns.
+ *
+ * cluster_cache_disable:
+ * Clean and disable the cluster wide cache as well as the CPU level cache
+ * for the calling CPU. No call to cpu_cache_disable will happen for this
+ * CPU. Called with IRQs disabled and only when all the other CPUs are done
+ * with their own cpu_cache_disable. The cluster is no longer cache coherent
+ * with the rest of the system when this returns.
+ *
+ * cpu_is_up:
+ * Called on given CPU after it has been powered up or resumed. The MCPM lock
+ * is held and IRQs disabled. This callback is optional.
+ *
+ * cluster_is_up:
+ * Called by the first CPU to be powered up or resumed in given cluster.
+ * The MCPM lock is held and IRQs disabled. This callback is optional. If
+ * provided, it is called before cpu_is_up for that CPU.
+ *
+ * wait_for_powerdown:
+ * Wait until given CPU is powered down. This is called in sleeping context.
+ * Some reasonable timeout must be considered. Must return 0 for success or
+ * negative error code.
*/
struct mcpm_platform_ops {
+ int (*cpu_powerup)(unsigned int cpu, unsigned int cluster);
+ int (*cluster_powerup)(unsigned int cluster);
+ void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster);
+ void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster);
+ void (*cluster_powerdown_prepare)(unsigned int cluster);
+ void (*cpu_cache_disable)(void);
+ void (*cluster_cache_disable)(void);
+ void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
+ void (*cluster_is_up)(unsigned int cluster);
+ int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
+
+ /* deprecated callbacks */
int (*power_up)(unsigned int cpu, unsigned int cluster);
void (*power_down)(void);
- int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
void (*suspend)(u64);
void (*powered_up)(void);
};
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 64fd15159b7d..a5b47421059d 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -11,6 +11,9 @@ typedef struct {
#endif
unsigned int vmalloc_seq;
unsigned long sigpage;
+#ifdef CONFIG_VDSO
+ unsigned long vdso;
+#endif
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index b1596bd59129..675e4ab79f68 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -92,6 +92,7 @@ struct pmu_hw_events {
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
+ int *irq_affinity;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct perf_event *event);
diff --git a/arch/arm/include/asm/seccomp.h b/arch/arm/include/asm/seccomp.h
deleted file mode 100644
index 52b156b341f5..000000000000
--- a/arch/arm/include/asm/seccomp.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_ARM_SECCOMP_H
-#define _ASM_ARM_SECCOMP_H
-
-#include <linux/unistd.h>
-
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#endif /* _ASM_ARM_SECCOMP_H */
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 0ad7d490ee6f..993e5224d8f7 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -104,6 +104,7 @@ static inline u32 mpidr_hash_size(void)
return 1 << mpidr_hash.bits;
}
+extern int platform_can_secondary_boot(void);
extern int platform_can_cpu_hotplug(void);
#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 72812a1f3d1c..bd32eded3e50 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -23,7 +23,6 @@
#ifndef __ASSEMBLY__
struct task_struct;
-struct exec_domain;
#include <asm/types.h>
#include <asm/domain.h>
@@ -53,7 +52,6 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => bug */
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
__u32 cpu; /* cpu */
__u32 cpu_domain; /* cpu domain */
struct cpu_context_save cpu_context; /* cpu context */
@@ -73,7 +71,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index ce0786efd26c..74b17d09ef7a 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -315,7 +315,7 @@ do { \
__asm__ __volatile__( \
"1: " TUSER(ldrb) " %1,[%2],#0\n" \
"2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
@@ -351,7 +351,7 @@ do { \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1,[%2],#0\n" \
"2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
@@ -397,7 +397,7 @@ do { \
__asm__ __volatile__( \
"1: " TUSER(strb) " %1,[%2],#0\n" \
"2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
@@ -430,7 +430,7 @@ do { \
__asm__ __volatile__( \
"1: " TUSER(str) " %1,[%2],#0\n" \
"2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
@@ -458,7 +458,7 @@ do { \
THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
"3:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, %3\n" \
" b 3b\n" \
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index b88beaba6b4a..200f9a7cd623 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -24,6 +24,14 @@
.syntax unified
#endif
+#ifdef CONFIG_CPU_V7M
+#define AR_CLASS(x...)
+#define M_CLASS(x...) x
+#else
+#define AR_CLASS(x...) x
+#define M_CLASS(x...)
+#endif
+
#ifdef CONFIG_THUMB2_KERNEL
#if __GNUC__ < 4
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
new file mode 100644
index 000000000000..d0295f1dd1a3
--- /dev/null
+++ b/arch/arm/include/asm/vdso.h
@@ -0,0 +1,32 @@
+#ifndef __ASM_VDSO_H
+#define __ASM_VDSO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
+
+#ifdef CONFIG_VDSO
+
+void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
+
+extern char vdso_start, vdso_end;
+
+extern unsigned int vdso_total_pages;
+
+#else /* CONFIG_VDSO */
+
+static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
+{
+}
+
+#define vdso_total_pages 0
+
+#endif /* CONFIG_VDSO */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_VDSO_H */
diff --git a/arch/arm/include/asm/vdso_datapage.h b/arch/arm/include/asm/vdso_datapage.h
new file mode 100644
index 000000000000..9be259442fca
--- /dev/null
+++ b/arch/arm/include/asm/vdso_datapage.h
@@ -0,0 +1,60 @@
+/*
+ * Adapted from arm64 version.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_VDSO_DATAPAGE_H
+#define __ASM_VDSO_DATAPAGE_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+
+/* Try to be cache-friendly on systems that don't implement the
+ * generic timer: fit the unconditionally updated fields in the first
+ * 32 bytes.
+ */
+struct vdso_data {
+ u32 seq_count; /* sequence count - odd during updates */
+ u16 tk_is_cntvct; /* fall back to syscall if false */
+ u16 cs_shift; /* clocksource shift */
+ u32 xtime_coarse_sec; /* coarse time */
+ u32 xtime_coarse_nsec;
+
+ u32 wtm_clock_sec; /* wall to monotonic offset */
+ u32 wtm_clock_nsec;
+ u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
+ u32 cs_mult; /* clocksource multiplier */
+
+ u64 cs_cycle_last; /* last cycle value */
+ u64 cs_mask; /* clocksource mask */
+
+ u64 xtime_clock_snsec; /* CLOCK_REALTIME sub-ns base */
+ u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
+ u32 tz_dsttime;
+};
+
+union vdso_data_store {
+ struct vdso_data data;
+ u8 page[PAGE_SIZE];
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
index a6d0a29861e7..5831dce4b51c 100644
--- a/arch/arm/include/asm/word-at-a-time.h
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -71,7 +71,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
asm(
"1: ldr %0, [%2]\n"
"2:\n"
- " .pushsection .fixup,\"ax\"\n"
+ " .pushsection .text.fixup,\"ax\"\n"
" .align 2\n"
"3: and %1, %2, #0x3\n"
" bic %2, %2, #0x3\n"
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 2f7e6ff67d51..0b579b2f4e0e 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -110,5 +110,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
unsigned long mfn);
+unsigned long xen_get_swiotlb_free_pages(unsigned int order);
#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S
index e55a9426b496..b03024fa671f 100644
--- a/arch/arm/include/debug/msm.S
+++ b/arch/arm/include/debug/msm.S
@@ -16,24 +16,17 @@
*/
.macro addruart, rp, rv, tmp
-#ifdef CONFIG_DEBUG_UART_PHYS
ldr \rp, =CONFIG_DEBUG_UART_PHYS
ldr \rv, =CONFIG_DEBUG_UART_VIRT
-#endif
.endm
.macro senduart, rd, rx
ARM_BE8(rev \rd, \rd )
-#ifdef CONFIG_DEBUG_QCOM_UARTDM
@ Write the 1 character to UARTDM_TF
str \rd, [\rx, #0x70]
-#else
- str \rd, [\rx, #0x0C]
-#endif
.endm
.macro waituart, rd, rx
-#ifdef CONFIG_DEBUG_QCOM_UARTDM
@ check for TX_EMT in UARTDM_SR
ldr \rd, [\rx, #0x08]
ARM_BE8(rev \rd, \rd )
@@ -55,13 +48,6 @@ ARM_BE8(rev \rd, \rd )
str \rd, [\rx, #0x40]
@ UARTDM reg. Read to induce delay
ldr \rd, [\rx, #0x08]
-#else
- @ wait for TX_READY
-1001: ldr \rd, [\rx, #0x08]
-ARM_BE8(rev \rd, \rd )
- tst \rd, #0x04
- beq 1001b
-#endif
.endm
.macro busyuart, rd, rx
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 70a1c9da30ca..a1c05f93d920 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,6 +1,7 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += auxvec.h
header-y += byteorder.h
header-y += fcntl.h
header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/auxvec.h b/arch/arm/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000000..cb02a767a500
--- /dev/null
+++ b/arch/arm/include/uapi/asm/auxvec.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_AUXVEC_H
+#define __ASM_AUXVEC_H
+
+/* VDSO location */
+#define AT_SYSINFO_EHDR 33
+
+#endif
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 2499867dd0d8..df3f60cb1168 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -195,8 +195,14 @@ struct kvm_arch_memory_slot {
#define KVM_ARM_IRQ_CPU_IRQ 0
#define KVM_ARM_IRQ_CPU_FIQ 1
-/* Highest supported SPI, from VGIC_NR_IRQS */
+/*
+ * This used to hold the highest supported SPI, but it is now obsolete
+ * and only here to provide source code level compatibility with older
+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
+ */
+#ifndef __KERNEL__
#define KVM_ARM_IRQ_GIC_MAX 127
+#endif
/* One single KVM irqchip, ie. the VGIC */
#define KVM_NR_IRQCHIPS 1
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 902397dd1000..752725dcbf42 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -16,7 +16,7 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
obj-y := elf.o entry-common.o irq.o opcodes.o \
- process.o ptrace.o return_address.o \
+ process.o ptrace.o reboot.o return_address.o \
setup.o signal.o sigreturn_codes.o \
stacktrace.o sys_arm.o time.o traps.o
@@ -34,7 +34,6 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
obj-$(CONFIG_MODULES) += armksyms.o module.o
-obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
@@ -75,6 +74,7 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
CFLAGS_pj4-cp0.o := -marm
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
+obj-$(CONFIG_VDSO) += vdso.o
ifneq ($(CONFIG_ARCH_EBSA110),y)
obj-y += io.o
@@ -86,7 +86,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
ifeq ($(CONFIG_ARM_PSCI),y)
-obj-y += psci.o
+obj-y += psci.o psci-call.o
obj-$(CONFIG_SMP) += psci_smp.o
endif
diff --git a/arch/arm/kernel/arthur.c b/arch/arm/kernel/arthur.c
deleted file mode 100644
index 321c5291d05f..000000000000
--- a/arch/arm/kernel/arthur.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * linux/arch/arm/kernel/arthur.c
- *
- * Copyright (C) 1998, 1999, 2000, 2001 Philip Blundell
- *
- * Arthur personality
- */
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/personality.h>
-#include <linux/stddef.h>
-#include <linux/signal.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include <asm/ptrace.h>
-
-/* Arthur doesn't have many signals, and a lot of those that it does
- have don't map easily to any Linux equivalent. Never mind. */
-
-#define ARTHUR_SIGABRT 1
-#define ARTHUR_SIGFPE 2
-#define ARTHUR_SIGILL 3
-#define ARTHUR_SIGINT 4
-#define ARTHUR_SIGSEGV 5
-#define ARTHUR_SIGTERM 6
-#define ARTHUR_SIGSTAK 7
-#define ARTHUR_SIGUSR1 8
-#define ARTHUR_SIGUSR2 9
-#define ARTHUR_SIGOSERROR 10
-
-static unsigned long arthur_to_linux_signals[32] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31
-};
-
-static unsigned long linux_to_arthur_signals[32] = {
- 0, -1, ARTHUR_SIGINT, -1,
- ARTHUR_SIGILL, 5, ARTHUR_SIGABRT, 7,
- ARTHUR_SIGFPE, 9, ARTHUR_SIGUSR1, ARTHUR_SIGSEGV,
- ARTHUR_SIGUSR2, 13, 14, ARTHUR_SIGTERM,
- 16, 17, 18, 19,
- 20, 21, 22, 23,
- 24, 25, 26, 27,
- 28, 29, 30, 31
-};
-
-static void arthur_lcall7(int nr, struct pt_regs *regs)
-{
- struct siginfo info;
- info.si_signo = SIGSWI;
- info.si_errno = nr;
- /* Bounce it to the emulator */
- send_sig_info(SIGSWI, &info, current);
-}
-
-static struct exec_domain arthur_exec_domain = {
- .name = "Arthur",
- .handler = arthur_lcall7,
- .pers_low = PER_RISCOS,
- .pers_high = PER_RISCOS,
- .signal_map = arthur_to_linux_signals,
- .signal_invmap = linux_to_arthur_signals,
- .module = THIS_MODULE,
-};
-
-/*
- * We could do with some locking to stop Arthur being removed while
- * processes are using it.
- */
-
-static int __init arthur_init(void)
-{
- return register_exec_domain(&arthur_exec_domain);
-}
-
-static void __exit arthur_exit(void)
-{
- unregister_exec_domain(&arthur_exec_domain);
-}
-
-module_init(arthur_init);
-module_exit(arthur_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 488eaac56028..871b8267d211 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -25,6 +25,7 @@
#include <asm/memory.h>
#include <asm/procinfo.h>
#include <asm/suspend.h>
+#include <asm/vdso_datapage.h>
#include <asm/hardware/cache-l2x0.h>
#include <linux/kbuild.h>
@@ -66,7 +67,6 @@ int main(void)
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
@@ -206,5 +206,9 @@ int main(void)
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
#endif
+ BLANK();
+#ifdef CONFIG_VDSO
+ DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
+#endif
return 0;
}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index ab19b7c03423..fcbbbb1b9e95 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -618,21 +618,15 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
- struct pci_sys_data *root = dev->sysdata;
- unsigned long phys;
-
- if (mmap_state == pci_mmap_io) {
+ if (mmap_state == pci_mmap_io)
return -EINVAL;
- } else {
- phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
- }
/*
* Mark this as IO
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (remap_pfn_range(vma, vma->vm_start, phys,
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index 89545f6c8403..318da33465f4 100644
--- a/arch/arm/kernel/cpuidle.c
+++ b/arch/arm/kernel/cpuidle.c
@@ -10,8 +10,28 @@
*/
#include <linux/cpuidle.h>
-#include <asm/proc-fns.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm/cpuidle.h>
+extern struct of_cpuidle_method __cpuidle_method_of_table[];
+
+static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
+ __used __section(__cpuidle_method_of_table_end);
+
+static struct cpuidle_ops cpuidle_ops[NR_CPUS];
+
+/**
+ * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
+ * @dev: not used
+ * @drv: not used
+ * @index: not used
+ *
+ * A trivial wrapper to allow the cpu_do_idle function to be assigned as a
+ * cpuidle callback by matching the function signature.
+ *
+ * Returns the index passed as parameter
+ */
int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
@@ -19,3 +39,114 @@ int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
return index;
}
+
+/**
+ * arm_cpuidle_suspend() - function to enter low power idle states
+ * @index: an integer used as an identifier for the low level PM callbacks
+ *
+ * This function calls the underlying arch specific low level PM code as
+ * registered at the init time.
+ *
+ * Returns -EOPNOTSUPP if no suspend callback is defined, the result of the
+ * callback otherwise.
+ */
+int arm_cpuidle_suspend(int index)
+{
+ int ret = -EOPNOTSUPP;
+ int cpu = smp_processor_id();
+
+ if (cpuidle_ops[cpu].suspend)
+ ret = cpuidle_ops[cpu].suspend(cpu, index);
+
+ return ret;
+}
+
+/**
+ * arm_cpuidle_get_ops() - find a registered cpuidle_ops by name
+ * @method: the method name
+ *
+ * Search in the __cpuidle_method_of_table array the cpuidle ops matching the
+ * method name.
+ *
+ * Returns a struct cpuidle_ops pointer, NULL if not found.
+ */
+static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
+{
+ struct of_cpuidle_method *m = __cpuidle_method_of_table;
+
+ for (; m->method; m++)
+ if (!strcmp(m->method, method))
+ return m->ops;
+
+ return NULL;
+}
+
+/**
+ * arm_cpuidle_read_ops() - Initialize the cpuidle ops with the device tree
+ * @dn: a pointer to a struct device node corresponding to a cpu node
+ * @cpu: the cpu identifier
+ *
+ * Get the method name defined in the 'enable-method' property, retrieve the
+ * associated cpuidle_ops and do a struct copy. This copy is needed because all
+ * cpuidle_ops are tagged __initdata and will be unloaded after the init
+ * process.
+ *
+ * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
+ * no cpuidle_ops is registered for the 'enable-method'.
+ */
+static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
+{
+ const char *enable_method;
+ struct cpuidle_ops *ops;
+
+ enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method)
+ return -ENOENT;
+
+ ops = arm_cpuidle_get_ops(enable_method);
+ if (!ops) {
+ pr_warn("%s: unsupported enable-method property: %s\n",
+ dn->full_name, enable_method);
+ return -EOPNOTSUPP;
+ }
+
+ cpuidle_ops[cpu] = *ops; /* structure copy */
+
+ pr_notice("cpuidle: enable-method property '%s'"
+ " found operations\n", enable_method);
+
+ return 0;
+}
+
+/**
+ * arm_cpuidle_init() - Initialize cpuidle_ops for a specific cpu
+ * @cpu: the cpu to be initialized
+ *
+ * Initialize the cpuidle ops with the device for the cpu and then call
+ * the cpu's idle initialization callback. This may fail if the underlying HW
+ * is not operational.
+ *
+ * Returns:
+ * 0 on success,
+ * -ENODEV if it fails to find the cpu node in the device tree,
+ * -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu,
+ * -ENOENT if it fails to find an 'enable-method' property,
+ * -ENXIO if the HW reports a failure or a misconfiguration,
+ * -ENOMEM if the HW report an memory allocation failure
+ */
+int __init arm_cpuidle_init(int cpu)
+{
+ struct device_node *cpu_node = of_cpu_device_node_get(cpu);
+ int ret;
+
+ if (!cpu_node)
+ return -ENODEV;
+
+ ret = arm_cpuidle_read_ops(cpu_node, cpu);
+ if (!ret && cpuidle_ops[cpu].init)
+ ret = cpuidle_ops[cpu].init(cpu_node, cpu);
+
+ of_node_put(cpu_node);
+
+ return ret;
+}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 672b21942fff..570306c49406 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -545,7 +545,7 @@ ENDPROC(__und_usr)
/*
* The out of line fixup for the ldrt instructions above.
*/
- .pushsection .fixup, "ax"
+ .pushsection .text.fixup, "ax"
.align 2
4: str r4, [sp, #S_PC] @ retry current instruction
ret r9
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index cc176b67c134..aebfbf79a1a3 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -80,9 +80,9 @@ ENTRY(stext)
ldr r13, =__mmap_switched @ address to jump to after
@ initialising sctlr
adr lr, BSYM(1f) @ return (PIC) address
- ARM( add pc, r10, #PROCINFO_INITFUNC )
- THUMB( add r12, r10, #PROCINFO_INITFUNC )
- THUMB( ret r12 )
+ ldr r12, [r10, #PROCINFO_INITFUNC]
+ add r12, r12, r10
+ ret r12
1: b __after_proc_init
ENDPROC(stext)
@@ -117,9 +117,9 @@ ENTRY(secondary_startup)
adr lr, BSYM(__after_proc_init) @ return address
mov r13, r12 @ __secondary_switched address
- ARM( add pc, r10, #PROCINFO_INITFUNC )
- THUMB( add r12, r10, #PROCINFO_INITFUNC )
- THUMB( ret r12 )
+ ldr r12, [r10, #PROCINFO_INITFUNC]
+ add r12, r12, r10
+ ret r12
ENDPROC(secondary_startup)
ENTRY(__secondary_switched)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 01963273c07a..3637973a9708 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -138,9 +138,9 @@ ENTRY(stext)
@ mmu has been enabled
adr lr, BSYM(1f) @ return (PIC) address
mov r8, r4 @ set TTBR1 to swapper_pg_dir
- ARM( add pc, r10, #PROCINFO_INITFUNC )
- THUMB( add r12, r10, #PROCINFO_INITFUNC )
- THUMB( ret r12 )
+ ldr r12, [r10, #PROCINFO_INITFUNC]
+ add r12, r12, r10
+ ret r12
1: b __enable_mmu
ENDPROC(stext)
.ltorg
@@ -386,10 +386,10 @@ ENTRY(secondary_startup)
ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir
adr lr, BSYM(__enable_mmu) @ return address
mov r13, r12 @ __secondary_switched address
- ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
- @ (return control reg)
- THUMB( add r12, r10, #PROCINFO_INITFUNC )
- THUMB( ret r12 )
+ ldr r12, [r10, #PROCINFO_INITFUNC]
+ add r12, r12, r10 @ initialise processor
+ @ (return control reg)
+ ret r12
ENDPROC(secondary_startup)
ENDPROC(secondary_startup_arm)
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
index c4cc50e58c13..a71501ff6f18 100644
--- a/arch/arm/kernel/hibernate.c
+++ b/arch/arm/kernel/hibernate.c
@@ -22,6 +22,7 @@
#include <asm/suspend.h>
#include <asm/memory.h>
#include <asm/sections.h>
+#include "reboot.h"
int pfn_is_nosave(unsigned long pfn)
{
@@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
ret = swsusp_save();
if (ret == 0)
- soft_restart(virt_to_phys(cpu_resume));
+ _soft_restart(virt_to_phys(cpu_resume), false);
return ret;
}
@@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
for (pbe = restore_pblist; pbe; pbe = pbe->next)
copy_page(pbe->orig_address, pbe->address);
- soft_restart(virt_to_phys(cpu_resume));
+ _soft_restart(virt_to_phys(cpu_resume), false);
}
static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
@@ -99,7 +100,6 @@ static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
*/
int swsusp_arch_resume(void)
{
- extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
call_with_stack(arch_restore_image, 0,
resume_stack + ARRAY_SIZE(resume_stack));
return 0;
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7fc70ae21185..dc7d0a95bd36 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Per-cpu breakpoints are not supported by our stepping
* mechanism.
*/
- if (!bp->hw.bp_target)
+ if (!bp->hw.target)
return -EINVAL;
/*
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index de2b085ad753..8bf3b7c09888 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -46,7 +46,8 @@ int machine_kexec_prepare(struct kimage *image)
* and implements CPU hotplug for the current HW. If not, we won't be
* able to kexec reliably, so fail the prepare operation.
*/
- if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
+ if (num_possible_cpus() > 1 && platform_can_secondary_boot() &&
+ !platform_can_cpu_hotplug())
return -EINVAL;
/*
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 2e11961f65ae..af791f4a6205 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -98,14 +98,19 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
+ if (sym->st_value & 3) {
+ pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (ARM -> Thumb)\n",
+ module->name, relindex, i, symname);
+ return -ENOEXEC;
+ }
+
offset = __mem_to_opcode_arm(*(u32 *)loc);
offset = (offset & 0x00ffffff) << 2;
if (offset & 0x02000000)
offset -= 0x04000000;
offset += sym->st_value - loc;
- if (offset & 3 ||
- offset <= (s32)0xfe000000 ||
+ if (offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000) {
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
module->name, relindex, i, symname,
@@ -155,6 +160,22 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
+ /*
+ * For function symbols, only Thumb addresses are
+ * allowed (no interworking).
+ *
+ * For non-function symbols, the destination
+ * has no specific ARM/Thumb disposition, so
+ * the branch is resolved under the assumption
+ * that interworking is not required.
+ */
+ if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
+ !(sym->st_value & 1)) {
+ pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (Thumb -> ARM)\n",
+ module->name, relindex, i, symname);
+ return -ENOEXEC;
+ }
+
upper = __mem_to_opcode_thumb16(*(u16 *)loc);
lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
@@ -182,18 +203,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
offset -= 0x02000000;
offset += sym->st_value - loc;
- /*
- * For function symbols, only Thumb addresses are
- * allowed (no interworking).
- *
- * For non-function symbols, the destination
- * has no specific ARM/Thumb disposition, so
- * the branch is resolved under the assumption
- * that interworking is not required.
- */
- if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
- !(offset & 1)) ||
- offset <= (s32)0xff000000 ||
+ if (offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000) {
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
module->name, relindex, i, symname,
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 557e128e4df0..4a86a0133ac3 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -259,20 +259,29 @@ out:
}
static int
-validate_event(struct pmu_hw_events *hw_events,
- struct perf_event *event)
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
+ struct perf_event *event)
{
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct arm_pmu *armpmu;
if (is_software_event(event))
return 1;
+ /*
+ * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+ * core perf code won't check that the pmu->ctx == leader->ctx
+ * until after pmu->event_init(event).
+ */
+ if (event->pmu != pmu)
+ return 0;
+
if (event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
+ armpmu = to_arm_pmu(event->pmu);
return armpmu->get_event_idx(hw_events, event) >= 0;
}
@@ -288,15 +297,15 @@ validate_group(struct perf_event *event)
*/
memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
- if (!validate_event(&fake_pmu, leader))
+ if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
- if (!validate_event(&fake_pmu, sibling))
+ if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
- if (!validate_event(&fake_pmu, event))
+ if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 61b53c46edfa..213919ba326f 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -92,11 +92,16 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
free_percpu_irq(irq, &hw_events->percpu_pmu);
} else {
for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
+ int cpu = i;
+
+ if (cpu_pmu->irq_affinity)
+ cpu = cpu_pmu->irq_affinity[i];
+
+ if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
- free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i));
+ free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
}
}
@@ -128,32 +133,37 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
} else {
for (i = 0; i < irqs; ++i) {
+ int cpu = i;
+
err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
+ if (cpu_pmu->irq_affinity)
+ cpu = cpu_pmu->irq_affinity[i];
+
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
+ irq, cpu);
continue;
}
err = request_irq(irq, handler,
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
- per_cpu_ptr(&hw_events->percpu_pmu, i));
+ per_cpu_ptr(&hw_events->percpu_pmu, cpu));
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
return err;
}
- cpumask_set_cpu(i, &cpu_pmu->active_irqs);
+ cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
}
}
@@ -243,6 +253,8 @@ static const struct of_device_id cpu_pmu_of_device_ids[] = {
{.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
{.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
{.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
+ {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
+ {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init},
{},
};
@@ -289,6 +301,53 @@ static int probe_current_pmu(struct arm_pmu *pmu)
return ret;
}
+static int of_pmu_irq_cfg(struct platform_device *pdev)
+{
+ int i, irq;
+ int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+
+ if (!irqs)
+ return -ENOMEM;
+
+ /* Don't bother with PPIs; they're already affine */
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0 && irq_is_percpu(irq))
+ return 0;
+
+ for (i = 0; i < pdev->num_resources; ++i) {
+ struct device_node *dn;
+ int cpu;
+
+ dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
+ i);
+ if (!dn) {
+ pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
+ of_node_full_name(pdev->dev.of_node), i);
+ break;
+ }
+
+ for_each_possible_cpu(cpu)
+ if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
+ break;
+
+ of_node_put(dn);
+ if (cpu >= nr_cpu_ids) {
+ pr_warn("Failed to find logical CPU for %s\n",
+ dn->name);
+ break;
+ }
+
+ irqs[i] = cpu;
+ }
+
+ if (i == pdev->num_resources)
+ cpu_pmu->irq_affinity = irqs;
+ else
+ kfree(irqs);
+
+ return 0;
+}
+
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
@@ -313,7 +372,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
init_fn = of_id->data;
- ret = init_fn(pmu);
+
+ ret = of_pmu_irq_cfg(pdev);
+ if (!ret)
+ ret = init_fn(pmu);
} else {
ret = probe_current_pmu(pmu);
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 8993770c47de..f4207a4dcb01 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -140,6 +140,23 @@ enum krait_perf_types {
KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
};
+/* ARMv7 Scorpion specific event types */
+enum scorpion_perf_types {
+ SCORPION_LPM0_GROUP0 = 0x4c,
+ SCORPION_LPM1_GROUP0 = 0x50,
+ SCORPION_LPM2_GROUP0 = 0x54,
+ SCORPION_L2LPM_GROUP0 = 0x58,
+ SCORPION_VLPM_GROUP0 = 0x5c,
+
+ SCORPION_ICACHE_ACCESS = 0x10053,
+ SCORPION_ICACHE_MISS = 0x10052,
+
+ SCORPION_DTLB_ACCESS = 0x12013,
+ SCORPION_DTLB_MISS = 0x12012,
+
+ SCORPION_ITLB_MISS = 0x12021,
+};
+
/*
* Cortex-A8 HW events mapping
*
@@ -482,6 +499,49 @@ static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
};
/*
+ * Scorpion HW events mapping
+ */
+static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+ /*
+ * The performance counters don't differentiate between read and write
+ * accesses/misses so this isn't strictly correct, but it's the best we
+ * can do. Writes and reads get combined.
+ */
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
+ /*
+ * Only ITLB misses and DTLB refills are supported. If users want the
+ * DTLB refills misses a raw counter must be used.
+ */
+ [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/*
* Perf Events' indices
*/
#define ARMV7_IDX_CYCLE_COUNTER 0
@@ -976,6 +1036,12 @@ static int krait_map_event_no_branch(struct perf_event *event)
&krait_perf_cache_map, 0xFFFFF);
}
+static int scorpion_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &scorpion_perf_map,
+ &scorpion_perf_cache_map, 0xFFFFF);
+}
+
static void armv7pmu_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->handle_irq = armv7pmu_handle_irq;
@@ -1103,6 +1169,12 @@ static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
#define PMRESRn_EN BIT(31)
+#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
+#define EVENT_GROUP(event) ((event) & 0xf) /* G */
+#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
+#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
+#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
+
static u32 krait_read_pmresrn(int n)
{
u32 val;
@@ -1141,19 +1213,19 @@ static void krait_write_pmresrn(int n, u32 val)
}
}
-static u32 krait_read_vpmresr0(void)
+static u32 venum_read_pmresr(void)
{
u32 val;
asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
return val;
}
-static void krait_write_vpmresr0(u32 val)
+static void venum_write_pmresr(u32 val)
{
asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
}
-static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
+static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
{
u32 venum_new_val;
u32 fp_new_val;
@@ -1170,7 +1242,7 @@ static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
fmxr(FPEXC, fp_new_val);
}
-static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val)
+static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
{
BUG_ON(preemptible());
/* Restore FPEXC */
@@ -1193,16 +1265,11 @@ static void krait_evt_setup(int idx, u32 config_base)
u32 val;
u32 mask;
u32 vval, fval;
- unsigned int region;
- unsigned int group;
- unsigned int code;
+ unsigned int region = EVENT_REGION(config_base);
+ unsigned int group = EVENT_GROUP(config_base);
+ unsigned int code = EVENT_CODE(config_base);
unsigned int group_shift;
- bool venum_event;
-
- venum_event = !!(config_base & VENUM_EVENT);
- region = (config_base >> 12) & 0xf;
- code = (config_base >> 4) & 0xff;
- group = (config_base >> 0) & 0xf;
+ bool venum_event = EVENT_VENUM(config_base);
group_shift = group * 8;
mask = 0xff << group_shift;
@@ -1217,16 +1284,14 @@ static void krait_evt_setup(int idx, u32 config_base)
val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
armv7_pmnc_write_evtsel(idx, val);
- asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
-
if (venum_event) {
- krait_pre_vpmresr0(&vval, &fval);
- val = krait_read_vpmresr0();
+ venum_pre_pmresr(&vval, &fval);
+ val = venum_read_pmresr();
val &= ~mask;
val |= code << group_shift;
val |= PMRESRn_EN;
- krait_write_vpmresr0(val);
- krait_post_vpmresr0(vval, fval);
+ venum_write_pmresr(val);
+ venum_post_pmresr(vval, fval);
} else {
val = krait_read_pmresrn(region);
val &= ~mask;
@@ -1236,7 +1301,7 @@ static void krait_evt_setup(int idx, u32 config_base)
}
}
-static u32 krait_clear_pmresrn_group(u32 val, int group)
+static u32 clear_pmresrn_group(u32 val, int group)
{
u32 mask;
int group_shift;
@@ -1256,23 +1321,19 @@ static void krait_clearpmu(u32 config_base)
{
u32 val;
u32 vval, fval;
- unsigned int region;
- unsigned int group;
- bool venum_event;
-
- venum_event = !!(config_base & VENUM_EVENT);
- region = (config_base >> 12) & 0xf;
- group = (config_base >> 0) & 0xf;
+ unsigned int region = EVENT_REGION(config_base);
+ unsigned int group = EVENT_GROUP(config_base);
+ bool venum_event = EVENT_VENUM(config_base);
if (venum_event) {
- krait_pre_vpmresr0(&vval, &fval);
- val = krait_read_vpmresr0();
- val = krait_clear_pmresrn_group(val, group);
- krait_write_vpmresr0(val);
- krait_post_vpmresr0(vval, fval);
+ venum_pre_pmresr(&vval, &fval);
+ val = venum_read_pmresr();
+ val = clear_pmresrn_group(val, group);
+ venum_write_pmresr(val);
+ venum_post_pmresr(vval, fval);
} else {
val = krait_read_pmresrn(region);
- val = krait_clear_pmresrn_group(val, group);
+ val = clear_pmresrn_group(val, group);
krait_write_pmresrn(region, val);
}
}
@@ -1342,6 +1403,8 @@ static void krait_pmu_enable_event(struct perf_event *event)
static void krait_pmu_reset(void *info)
{
u32 vval, fval;
+ struct arm_pmu *cpu_pmu = info;
+ u32 idx, nb_cnt = cpu_pmu->num_events;
armv7pmu_reset(info);
@@ -1350,9 +1413,16 @@ static void krait_pmu_reset(void *info)
krait_write_pmresrn(1, 0);
krait_write_pmresrn(2, 0);
- krait_pre_vpmresr0(&vval, &fval);
- krait_write_vpmresr0(0);
- krait_post_vpmresr0(vval, fval);
+ venum_pre_pmresr(&vval, &fval);
+ venum_write_pmresr(0);
+ venum_post_pmresr(vval, fval);
+
+ /* Reset PMxEVNCTCR to sane default */
+ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
+ armv7_pmnc_select_counter(idx);
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
+ }
+
}
static int krait_event_to_bit(struct perf_event *event, unsigned int region,
@@ -1386,26 +1456,18 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
{
int idx;
int bit = -1;
- unsigned int prefix;
- unsigned int region;
- unsigned int code;
- unsigned int group;
- bool krait_event;
struct hw_perf_event *hwc = &event->hw;
+ unsigned int region = EVENT_REGION(hwc->config_base);
+ unsigned int code = EVENT_CODE(hwc->config_base);
+ unsigned int group = EVENT_GROUP(hwc->config_base);
+ bool venum_event = EVENT_VENUM(hwc->config_base);
+ bool krait_event = EVENT_CPU(hwc->config_base);
- region = (hwc->config_base >> 12) & 0xf;
- code = (hwc->config_base >> 4) & 0xff;
- group = (hwc->config_base >> 0) & 0xf;
- krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
-
- if (krait_event) {
+ if (venum_event || krait_event) {
/* Ignore invalid events */
if (group > 3 || region > 2)
return -EINVAL;
- prefix = hwc->config_base & KRAIT_EVENT_MASK;
- if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
- return -EINVAL;
- if (prefix == VENUM_EVENT && (code & 0xe0))
+ if (venum_event && (code & 0xe0))
return -EINVAL;
bit = krait_event_to_bit(event, region, group);
@@ -1425,15 +1487,12 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
{
int bit;
struct hw_perf_event *hwc = &event->hw;
- unsigned int region;
- unsigned int group;
- bool krait_event;
+ unsigned int region = EVENT_REGION(hwc->config_base);
+ unsigned int group = EVENT_GROUP(hwc->config_base);
+ bool venum_event = EVENT_VENUM(hwc->config_base);
+ bool krait_event = EVENT_CPU(hwc->config_base);
- region = (hwc->config_base >> 12) & 0xf;
- group = (hwc->config_base >> 0) & 0xf;
- krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
-
- if (krait_event) {
+ if (venum_event || krait_event) {
bit = krait_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask);
}
@@ -1458,6 +1517,344 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
return 0;
}
+
+/*
+ * Scorpion Local Performance Monitor Register (LPMn)
+ *
+ * 31 30 24 16 8 0
+ * +--------------------------------+
+ * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
+ * +--------------------------------+
+ * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
+ * +--------------------------------+
+ * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
+ * +--------------------------------+
+ * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
+ * +--------------------------------+
+ * VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
+ * +--------------------------------+
+ * EN | G=3 | G=2 | G=1 | G=0
+ *
+ *
+ * Event Encoding:
+ *
+ * hwc->config_base = 0xNRCCG
+ *
+ * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
+ * R = region register
+ * CC = class of events the group G is choosing from
+ * G = group or particular event
+ *
+ * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
+ *
+ * A region (R) corresponds to a piece of the CPU (execution unit, instruction
+ * unit, etc.) while the event code (CC) corresponds to a particular class of
+ * events (interrupts for example). An event code is broken down into
+ * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
+ * example).
+ */
+
+static u32 scorpion_read_pmresrn(int n)
+{
+ u32 val;
+
+ switch (n) {
+ case 0:
+ asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
+ break;
+ case 1:
+ asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
+ break;
+ case 2:
+ asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
+ break;
+ case 3:
+ asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
+ break;
+ default:
+ BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
+ }
+
+ return val;
+}
+
+static void scorpion_write_pmresrn(int n, u32 val)
+{
+ switch (n) {
+ case 0:
+ asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
+ break;
+ case 1:
+ asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
+ break;
+ case 2:
+ asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
+ break;
+ case 3:
+ asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
+ break;
+ default:
+ BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
+ }
+}
+
+static u32 scorpion_get_pmresrn_event(unsigned int region)
+{
+ static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
+ SCORPION_LPM1_GROUP0,
+ SCORPION_LPM2_GROUP0,
+ SCORPION_L2LPM_GROUP0 };
+ return pmresrn_table[region];
+}
+
+static void scorpion_evt_setup(int idx, u32 config_base)
+{
+ u32 val;
+ u32 mask;
+ u32 vval, fval;
+ unsigned int region = EVENT_REGION(config_base);
+ unsigned int group = EVENT_GROUP(config_base);
+ unsigned int code = EVENT_CODE(config_base);
+ unsigned int group_shift;
+ bool venum_event = EVENT_VENUM(config_base);
+
+ group_shift = group * 8;
+ mask = 0xff << group_shift;
+
+ /* Configure evtsel for the region and group */
+ if (venum_event)
+ val = SCORPION_VLPM_GROUP0;
+ else
+ val = scorpion_get_pmresrn_event(region);
+ val += group;
+ /* Mix in mode-exclusion bits */
+ val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
+ armv7_pmnc_write_evtsel(idx, val);
+
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
+
+ if (venum_event) {
+ venum_pre_pmresr(&vval, &fval);
+ val = venum_read_pmresr();
+ val &= ~mask;
+ val |= code << group_shift;
+ val |= PMRESRn_EN;
+ venum_write_pmresr(val);
+ venum_post_pmresr(vval, fval);
+ } else {
+ val = scorpion_read_pmresrn(region);
+ val &= ~mask;
+ val |= code << group_shift;
+ val |= PMRESRn_EN;
+ scorpion_write_pmresrn(region, val);
+ }
+}
+
+static void scorpion_clearpmu(u32 config_base)
+{
+ u32 val;
+ u32 vval, fval;
+ unsigned int region = EVENT_REGION(config_base);
+ unsigned int group = EVENT_GROUP(config_base);
+ bool venum_event = EVENT_VENUM(config_base);
+
+ if (venum_event) {
+ venum_pre_pmresr(&vval, &fval);
+ val = venum_read_pmresr();
+ val = clear_pmresrn_group(val, group);
+ venum_write_pmresr(val);
+ venum_post_pmresr(vval, fval);
+ } else {
+ val = scorpion_read_pmresrn(region);
+ val = clear_pmresrn_group(val, group);
+ scorpion_write_pmresrn(region, val);
+ }
+}
+
+static void scorpion_pmu_disable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ /* Disable counter and interrupt */
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Disable counter */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Clear pmresr code (if destined for PMNx counters)
+ */
+ if (hwc->config_base & KRAIT_EVENT_MASK)
+ scorpion_clearpmu(hwc->config_base);
+
+ /* Disable interrupt for this counter */
+ armv7_pmnc_disable_intens(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void scorpion_pmu_enable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ /*
+ * Enable counter and interrupt, and set the counter to count
+ * the event that we're interested in.
+ */
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Disable counter */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Set event (if destined for PMNx counters)
+ * We don't set the event for the cycle counter because we
+ * don't have the ability to perform event filtering.
+ */
+ if (hwc->config_base & KRAIT_EVENT_MASK)
+ scorpion_evt_setup(idx, hwc->config_base);
+ else if (idx != ARMV7_IDX_CYCLE_COUNTER)
+ armv7_pmnc_write_evtsel(idx, hwc->config_base);
+
+ /* Enable interrupt for this counter */
+ armv7_pmnc_enable_intens(idx);
+
+ /* Enable counter */
+ armv7_pmnc_enable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void scorpion_pmu_reset(void *info)
+{
+ u32 vval, fval;
+ struct arm_pmu *cpu_pmu = info;
+ u32 idx, nb_cnt = cpu_pmu->num_events;
+
+ armv7pmu_reset(info);
+
+ /* Clear all pmresrs */
+ scorpion_write_pmresrn(0, 0);
+ scorpion_write_pmresrn(1, 0);
+ scorpion_write_pmresrn(2, 0);
+ scorpion_write_pmresrn(3, 0);
+
+ venum_pre_pmresr(&vval, &fval);
+ venum_write_pmresr(0);
+ venum_post_pmresr(vval, fval);
+
+ /* Reset PMxEVNCTCR to sane default */
+ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
+ armv7_pmnc_select_counter(idx);
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
+ }
+}
+
+static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
+ unsigned int group)
+{
+ int bit;
+ struct hw_perf_event *hwc = &event->hw;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+
+ if (hwc->config_base & VENUM_EVENT)
+ bit = SCORPION_VLPM_GROUP0;
+ else
+ bit = scorpion_get_pmresrn_event(region);
+ bit -= scorpion_get_pmresrn_event(0);
+ bit += group;
+ /*
+ * Lower bits are reserved for use by the counters (see
+ * armv7pmu_get_event_idx() for more info)
+ */
+ bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
+
+ return bit;
+}
+
+/*
+ * We check for column exclusion constraints here.
+ * Two events cant use the same group within a pmresr register.
+ */
+static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int idx;
+ int bit = -1;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int region = EVENT_REGION(hwc->config_base);
+ unsigned int group = EVENT_GROUP(hwc->config_base);
+ bool venum_event = EVENT_VENUM(hwc->config_base);
+ bool scorpion_event = EVENT_CPU(hwc->config_base);
+
+ if (venum_event || scorpion_event) {
+ /* Ignore invalid events */
+ if (group > 3 || region > 3)
+ return -EINVAL;
+
+ bit = scorpion_event_to_bit(event, region, group);
+ if (test_and_set_bit(bit, cpuc->used_mask))
+ return -EAGAIN;
+ }
+
+ idx = armv7pmu_get_event_idx(cpuc, event);
+ if (idx < 0 && bit >= 0)
+ clear_bit(bit, cpuc->used_mask);
+
+ return idx;
+}
+
+static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int bit;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int region = EVENT_REGION(hwc->config_base);
+ unsigned int group = EVENT_GROUP(hwc->config_base);
+ bool venum_event = EVENT_VENUM(hwc->config_base);
+ bool scorpion_event = EVENT_CPU(hwc->config_base);
+
+ if (venum_event || scorpion_event) {
+ bit = scorpion_event_to_bit(event, region, group);
+ clear_bit(bit, cpuc->used_mask);
+ }
+}
+
+static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_scorpion";
+ cpu_pmu->map_event = scorpion_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ cpu_pmu->reset = scorpion_pmu_reset;
+ cpu_pmu->enable = scorpion_pmu_enable_event;
+ cpu_pmu->disable = scorpion_pmu_disable_event;
+ cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
+ return 0;
+}
+
+static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_scorpion_mp";
+ cpu_pmu->map_event = scorpion_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ cpu_pmu->reset = scorpion_pmu_reset;
+ cpu_pmu->enable = scorpion_pmu_enable_event;
+ cpu_pmu->disable = scorpion_pmu_disable_event;
+ cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
+ return 0;
+}
#else
static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{
@@ -1498,4 +1895,14 @@ static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
{
return -ENODEV;
}
+
+static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return -ENODEV;
+}
+
+static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index fdfa3a78ec8c..f192a2a41719 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -17,12 +17,9 @@
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/user.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
#include <linux/interrupt.h>
#include <linux/kallsyms.h>
#include <linux/init.h>
-#include <linux/cpu.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
#include <linux/tick.h>
@@ -31,16 +28,14 @@
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/leds.h>
-#include <linux/reboot.h>
-#include <asm/cacheflush.h>
-#include <asm/idmap.h>
#include <asm/processor.h>
#include <asm/thread_notify.h>
#include <asm/stacktrace.h>
#include <asm/system_misc.h>
#include <asm/mach/time.h>
#include <asm/tls.h>
+#include <asm/vdso.h>
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
@@ -59,69 +54,6 @@ static const char *isa_modes[] __maybe_unused = {
"ARM" , "Thumb" , "Jazelle", "ThumbEE"
};
-extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
-typedef void (*phys_reset_t)(unsigned long);
-
-/*
- * A temporary stack to use for CPU reset. This is static so that we
- * don't clobber it with the identity mapping. When running with this
- * stack, any references to the current task *will not work* so you
- * should really do as little as possible before jumping to your reset
- * code.
- */
-static u64 soft_restart_stack[16];
-
-static void __soft_restart(void *addr)
-{
- phys_reset_t phys_reset;
-
- /* Take out a flat memory mapping. */
- setup_mm_for_reboot();
-
- /* Clean and invalidate caches */
- flush_cache_all();
-
- /* Turn off caching */
- cpu_proc_fin();
-
- /* Push out any further dirty data, and ensure cache is empty */
- flush_cache_all();
-
- /* Switch to the identity mapping. */
- phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
- phys_reset((unsigned long)addr);
-
- /* Should never get here. */
- BUG();
-}
-
-void soft_restart(unsigned long addr)
-{
- u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
-
- /* Disable interrupts first */
- raw_local_irq_disable();
- local_fiq_disable();
-
- /* Disable the L2 if we're the last man standing. */
- if (num_online_cpus() == 1)
- outer_disable();
-
- /* Change to the new stack and continue with the reset. */
- call_with_stack(__soft_restart, (void *)addr, (void *)stack);
-
- /* Should never get here. */
- BUG();
-}
-
-/*
- * Function pointers to optional machine specific functions
- */
-void (*pm_power_off)(void);
-EXPORT_SYMBOL(pm_power_off);
-
-void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-
/*
* This is our default idle handler.
*/
@@ -166,79 +98,6 @@ void arch_cpu_idle_dead(void)
}
#endif
-/*
- * Called by kexec, immediately prior to machine_kexec().
- *
- * This must completely disable all secondary CPUs; simply causing those CPUs
- * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
- * kexec'd kernel to use any and all RAM as it sees fit, without having to
- * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
- * functionality embodied in disable_nonboot_cpus() to achieve this.
- */
-void machine_shutdown(void)
-{
- disable_nonboot_cpus();
-}
-
-/*
- * Halting simply requires that the secondary CPUs stop performing any
- * activity (executing tasks, handling interrupts). smp_send_stop()
- * achieves this.
- */
-void machine_halt(void)
-{
- local_irq_disable();
- smp_send_stop();
-
- local_irq_disable();
- while (1);
-}
-
-/*
- * Power-off simply requires that the secondary CPUs stop performing any
- * activity (executing tasks, handling interrupts). smp_send_stop()
- * achieves this. When the system power is turned off, it will take all CPUs
- * with it.
- */
-void machine_power_off(void)
-{
- local_irq_disable();
- smp_send_stop();
-
- if (pm_power_off)
- pm_power_off();
-}
-
-/*
- * Restart requires that the secondary CPUs stop performing any activity
- * while the primary CPU resets the system. Systems with a single CPU can
- * use soft_restart() as their machine descriptor's .restart hook, since that
- * will cause the only available CPU to reset. Systems with multiple CPUs must
- * provide a HW restart implementation, to ensure that all CPUs reset at once.
- * This is required so that any code running after reset on the primary CPU
- * doesn't have to co-ordinate with other CPUs to ensure they aren't still
- * executing pre-reset code, and using RAM that the primary CPU's code wishes
- * to use. Implementing such co-ordination would be essentially impossible.
- */
-void machine_restart(char *cmd)
-{
- local_irq_disable();
- smp_send_stop();
-
- if (arm_pm_restart)
- arm_pm_restart(reboot_mode, cmd);
- else
- do_kernel_restart(cmd);
-
- /* Give a grace period for failure to restart of 1s */
- mdelay(1000);
-
- /* Whoops - the platform was unable to reboot. Tell the user! */
- printk("Reboot failed -- System halted\n");
- local_irq_disable();
- while (1);
-}
-
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
@@ -475,7 +334,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
}
/* If possible, provide a placement hint at a random offset from the
- * stack for the signal page.
+ * stack for the sigpage and vdso pages.
*/
static unsigned long sigpage_addr(const struct mm_struct *mm,
unsigned int npages)
@@ -519,6 +378,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+ unsigned long npages;
unsigned long addr;
unsigned long hint;
int ret = 0;
@@ -528,9 +388,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!signal_page)
return -ENOMEM;
+ npages = 1; /* for sigpage */
+ npages += vdso_total_pages;
+
down_write(&mm->mmap_sem);
- hint = sigpage_addr(mm, 1);
- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
+ hint = sigpage_addr(mm, npages);
+ addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
@@ -547,6 +410,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm->context.sigpage = addr;
+ /* Unlike the sigpage, failure to install the vdso is unlikely
+ * to be fatal to the process, so no error check needed
+ * here.
+ */
+ arm_install_vdso(mm, addr + PAGE_SIZE);
+
up_fail:
up_write(&mm->mmap_sem);
return ret;
diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S
new file mode 100644
index 000000000000..a78e9e1e206d
--- /dev/null
+++ b/arch/arm/kernel/psci-call.S
@@ -0,0 +1,31 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Mark Rutland <mark.rutland@arm.com>
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+
+/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
+ENTRY(__invoke_psci_fn_hvc)
+ __HVC(0)
+ bx lr
+ENDPROC(__invoke_psci_fn_hvc)
+
+/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
+ENTRY(__invoke_psci_fn_smc)
+ __SMC(0)
+ bx lr
+ENDPROC(__invoke_psci_fn_smc)
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
index f73891b6b730..f90fdf4ce7c7 100644
--- a/arch/arm/kernel/psci.c
+++ b/arch/arm/kernel/psci.c
@@ -23,8 +23,6 @@
#include <asm/compiler.h>
#include <asm/errno.h>
-#include <asm/opcodes-sec.h>
-#include <asm/opcodes-virt.h>
#include <asm/psci.h>
#include <asm/system_misc.h>
@@ -33,6 +31,9 @@ struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u32, u32, u32, u32);
typedef int (*psci_initcall_t)(const struct device_node *);
+asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
+asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
+
enum psci_function {
PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON,
@@ -71,40 +72,6 @@ static u32 psci_power_state_pack(struct psci_power_state state)
& PSCI_0_2_POWER_STATE_AFFL_MASK);
}
-/*
- * The following two functions are invoked via the invoke_psci_fn pointer
- * and will not be inlined, allowing us to piggyback on the AAPCS.
- */
-static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
- u32 arg2)
-{
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r1")
- __asmeq("%2", "r2")
- __asmeq("%3", "r3")
- __HVC(0)
- : "+r" (function_id)
- : "r" (arg0), "r" (arg1), "r" (arg2));
-
- return function_id;
-}
-
-static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
- u32 arg2)
-{
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r1")
- __asmeq("%2", "r2")
- __asmeq("%3", "r3")
- __SMC(0)
- : "+r" (function_id)
- : "r" (arg0), "r" (arg1), "r" (arg2));
-
- return function_id;
-}
-
static int psci_get_version(void)
{
int err;
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
new file mode 100644
index 000000000000..1a4d232796be
--- /dev/null
+++ b/arch/arm/kernel/reboot.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 1996-2000 Russell King - Converted to ARM.
+ * Original Copyright (C) 1995 Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+
+#include <asm/cacheflush.h>
+#include <asm/idmap.h>
+
+#include "reboot.h"
+
+typedef void (*phys_reset_t)(unsigned long);
+
+/*
+ * Function pointers to optional machine specific functions
+ */
+void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+/*
+ * A temporary stack to use for CPU reset. This is static so that we
+ * don't clobber it with the identity mapping. When running with this
+ * stack, any references to the current task *will not work* so you
+ * should really do as little as possible before jumping to your reset
+ * code.
+ */
+static u64 soft_restart_stack[16];
+
+static void __soft_restart(void *addr)
+{
+ phys_reset_t phys_reset;
+
+ /* Take out a flat memory mapping. */
+ setup_mm_for_reboot();
+
+ /* Clean and invalidate caches */
+ flush_cache_all();
+
+ /* Turn off caching */
+ cpu_proc_fin();
+
+ /* Push out any further dirty data, and ensure cache is empty */
+ flush_cache_all();
+
+ /* Switch to the identity mapping. */
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset((unsigned long)addr);
+
+ /* Should never get here. */
+ BUG();
+}
+
+void _soft_restart(unsigned long addr, bool disable_l2)
+{
+ u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
+
+ /* Disable interrupts first */
+ raw_local_irq_disable();
+ local_fiq_disable();
+
+ /* Disable the L2 if we're the last man standing. */
+ if (disable_l2)
+ outer_disable();
+
+ /* Change to the new stack and continue with the reset. */
+ call_with_stack(__soft_restart, (void *)addr, (void *)stack);
+
+ /* Should never get here. */
+ BUG();
+}
+
+void soft_restart(unsigned long addr)
+{
+ _soft_restart(addr, num_online_cpus() == 1);
+}
+
+/*
+ * Called by kexec, immediately prior to machine_kexec().
+ *
+ * This must completely disable all secondary CPUs; simply causing those CPUs
+ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
+ * kexec'd kernel to use any and all RAM as it sees fit, without having to
+ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
+ * functionality embodied in disable_nonboot_cpus() to achieve this.
+ */
+void machine_shutdown(void)
+{
+ disable_nonboot_cpus();
+}
+
+/*
+ * Halting simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
+void machine_halt(void)
+{
+ local_irq_disable();
+ smp_send_stop();
+
+ local_irq_disable();
+ while (1);
+}
+
+/*
+ * Power-off simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
+void machine_power_off(void)
+{
+ local_irq_disable();
+ smp_send_stop();
+
+ if (pm_power_off)
+ pm_power_off();
+}
+
+/*
+ * Restart requires that the secondary CPUs stop performing any activity
+ * while the primary CPU resets the system. Systems with a single CPU can
+ * use soft_restart() as their machine descriptor's .restart hook, since that
+ * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * provide a HW restart implementation, to ensure that all CPUs reset at once.
+ * This is required so that any code running after reset on the primary CPU
+ * doesn't have to co-ordinate with other CPUs to ensure they aren't still
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
+void machine_restart(char *cmd)
+{
+ local_irq_disable();
+ smp_send_stop();
+
+ if (arm_pm_restart)
+ arm_pm_restart(reboot_mode, cmd);
+ else
+ do_kernel_restart(cmd);
+
+ /* Give a grace period for failure to restart of 1s */
+ mdelay(1000);
+
+ /* Whoops - the platform was unable to reboot. Tell the user! */
+ printk("Reboot failed -- System halted\n");
+ local_irq_disable();
+ while (1);
+}
diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
new file mode 100644
index 000000000000..bf7a0b1f076e
--- /dev/null
+++ b/arch/arm/kernel/reboot.h
@@ -0,0 +1,7 @@
+#ifndef REBOOT_H
+#define REBOOT_H
+
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+extern void _soft_restart(unsigned long addr, bool disable_l2);
+
+#endif
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 24b4a04846eb..36ed35073289 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -56,8 +56,6 @@ void *return_address(unsigned int level)
return NULL;
}
-#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
-
-#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
+#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1d60bebea4b8..6c777e908a24 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -372,30 +372,48 @@ void __init early_print(const char *str, ...)
static void __init cpuid_init_hwcaps(void)
{
- unsigned int divide_instrs, vmsa;
+ int block;
+ u32 isar5;
if (cpu_architecture() < CPU_ARCH_ARMv7)
return;
- divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
-
- switch (divide_instrs) {
- case 2:
+ block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
+ if (block >= 2)
elf_hwcap |= HWCAP_IDIVA;
- case 1:
+ if (block >= 1)
elf_hwcap |= HWCAP_IDIVT;
- }
/* LPAE implies atomic ldrd/strd instructions */
- vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
- if (vmsa >= 5)
+ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
+ if (block >= 5)
elf_hwcap |= HWCAP_LPAE;
+
+ /* check for supported v8 Crypto instructions */
+ isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
+
+ block = cpuid_feature_extract_field(isar5, 4);
+ if (block >= 2)
+ elf_hwcap2 |= HWCAP2_PMULL;
+ if (block >= 1)
+ elf_hwcap2 |= HWCAP2_AES;
+
+ block = cpuid_feature_extract_field(isar5, 8);
+ if (block >= 1)
+ elf_hwcap2 |= HWCAP2_SHA1;
+
+ block = cpuid_feature_extract_field(isar5, 12);
+ if (block >= 1)
+ elf_hwcap2 |= HWCAP2_SHA2;
+
+ block = cpuid_feature_extract_field(isar5, 16);
+ if (block >= 1)
+ elf_hwcap2 |= HWCAP2_CRC32;
}
static void __init elf_hwcap_fixup(void)
{
unsigned id = read_cpuid_id();
- unsigned sync_prim;
/*
* HWCAP_TLS is available only on 1136 r1p0 and later,
@@ -416,9 +434,9 @@ static void __init elf_hwcap_fixup(void)
* avoid advertising SWP; it may not be atomic with
* multiprocessing cores.
*/
- sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) |
- ((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f);
- if (sync_prim >= 0x13)
+ if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
+ (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
+ cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
elf_hwcap &= ~HWCAP_SWP;
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 023ac905e4c3..423663e23791 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -318,17 +318,6 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
return frame;
}
-/*
- * translate the signal
- */
-static inline int map_sig(int sig)
-{
- struct thread_info *thread = current_thread_info();
- if (sig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
- sig = thread->exec_domain->signal_invmap[sig];
- return sig;
-}
-
static int
setup_return(struct pt_regs *regs, struct ksignal *ksig,
unsigned long __user *rc, void __user *frame)
@@ -412,7 +401,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
}
}
- regs->ARM_r0 = map_sig(ksig->sig);
+ regs->ARM_r0 = ksig->sig;
regs->ARM_sp = (unsigned long)frame;
regs->ARM_lr = retcode;
regs->ARM_pc = handler;
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index e1e60e5a7a27..7d37bfc50830 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -116,14 +116,7 @@ cpu_resume_after_mmu:
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)
-/*
- * Note: Yes, part of the following code is located into the .data section.
- * This is to allow sleep_save_sp to be accessed with a relative load
- * while we can't rely on any MMU translation. We could have put
- * sleep_save_sp in the .text section as well, but some setups might
- * insist on it to be truly read-only.
- */
- .data
+ .text
.align
ENTRY(cpu_resume)
ARM_BE8(setend be) @ ensure we are in BE mode
@@ -145,6 +138,8 @@ ARM_BE8(setend be) @ ensure we are in BE mode
compute_mpidr_hash r1, r4, r5, r6, r0, r3
1:
adr r0, _sleep_save_sp
+ ldr r2, [r0]
+ add r0, r0, r2
ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
ldr r0, [r0, r1, lsl #2]
@@ -156,10 +151,12 @@ THUMB( bx r3 )
ENDPROC(cpu_resume)
.align 2
+_sleep_save_sp:
+ .long sleep_save_sp - .
mpidr_hash_ptr:
.long mpidr_hash - . @ mpidr_hash struct offset
+ .data
.type sleep_save_sp, #object
ENTRY(sleep_save_sp)
-_sleep_save_sp:
.space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 86ef244c5a24..cca5b8758185 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -145,6 +145,11 @@ void __init smp_init_cpus(void)
smp_ops.smp_init_cpus();
}
+int platform_can_secondary_boot(void)
+{
+ return !!smp_ops.smp_boot_secondary;
+}
+
int platform_can_cpu_hotplug(void)
{
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index afdd51e30bec..1361756782c7 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -42,7 +42,7 @@
" cmp %0, #0\n" \
" movne %0, %4\n" \
"2:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .section .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %5\n" \
" b 2b\n" \
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 788e23fe64d8..3dce1a342030 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -505,12 +505,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason)
static int bad_syscall(int n, struct pt_regs *regs)
{
- struct thread_info *thread = current_thread_info();
siginfo_t info;
- if ((current->personality & PER_MASK) != PER_LINUX &&
- thread->exec_domain->handler) {
- thread->exec_domain->handler(n, regs);
+ if ((current->personality & PER_MASK) != PER_LINUX) {
+ send_sig(SIGSEGV, current, 1);
return regs->ARM_r0;
}
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
new file mode 100644
index 000000000000..efe17dd9b921
--- /dev/null
+++ b/arch/arm/kernel/vdso.c
@@ -0,0 +1,337 @@
+/*
+ * Adapted from arm64 version.
+ *
+ * Copyright (C) 2012 ARM Limited
+ * Copyright (C) 2015 Mentor Graphics Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
+#include <linux/vmalloc.h>
+#include <asm/arch_timer.h>
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
+#include <clocksource/arm_arch_timer.h>
+
+#define MAX_SYMNAME 64
+
+static struct page **vdso_text_pagelist;
+
+/* Total number of pages needed for the data and text portions of the VDSO. */
+unsigned int vdso_total_pages __read_mostly;
+
+/*
+ * The VDSO data page.
+ */
+static union vdso_data_store vdso_data_store __page_aligned_data;
+static struct vdso_data *vdso_data = &vdso_data_store.data;
+
+static struct page *vdso_data_page;
+static struct vm_special_mapping vdso_data_mapping = {
+ .name = "[vvar]",
+ .pages = &vdso_data_page,
+};
+
+static struct vm_special_mapping vdso_text_mapping = {
+ .name = "[vdso]",
+};
+
+struct elfinfo {
+ Elf32_Ehdr *hdr; /* ptr to ELF */
+ Elf32_Sym *dynsym; /* ptr to .dynsym section */
+ unsigned long dynsymsize; /* size of .dynsym section */
+ char *dynstr; /* ptr to .dynstr section */
+};
+
+/* Cached result of boot-time check for whether the arch timer exists,
+ * and if so, whether the virtual counter is useable.
+ */
+static bool cntvct_ok __read_mostly;
+
+static bool __init cntvct_functional(void)
+{
+ struct device_node *np;
+ bool ret = false;
+
+ if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
+ goto out;
+
+ /* The arm_arch_timer core should export
+ * arch_timer_use_virtual or similar so we don't have to do
+ * this.
+ */
+ np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
+ if (!np)
+ goto out_put;
+
+ if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
+ goto out_put;
+
+ ret = true;
+
+out_put:
+ of_node_put(np);
+out:
+ return ret;
+}
+
+static void * __init find_section(Elf32_Ehdr *ehdr, const char *name,
+ unsigned long *size)
+{
+ Elf32_Shdr *sechdrs;
+ unsigned int i;
+ char *secnames;
+
+ /* Grab section headers and strings so we can tell who is who */
+ sechdrs = (void *)ehdr + ehdr->e_shoff;
+ secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
+
+ /* Find the section they want */
+ for (i = 1; i < ehdr->e_shnum; i++) {
+ if (strcmp(secnames + sechdrs[i].sh_name, name) == 0) {
+ if (size)
+ *size = sechdrs[i].sh_size;
+ return (void *)ehdr + sechdrs[i].sh_offset;
+ }
+ }
+
+ if (size)
+ *size = 0;
+ return NULL;
+}
+
+static Elf32_Sym * __init find_symbol(struct elfinfo *lib, const char *symname)
+{
+ unsigned int i;
+
+ for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
+ char name[MAX_SYMNAME], *c;
+
+ if (lib->dynsym[i].st_name == 0)
+ continue;
+ strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
+ MAX_SYMNAME);
+ c = strchr(name, '@');
+ if (c)
+ *c = 0;
+ if (strcmp(symname, name) == 0)
+ return &lib->dynsym[i];
+ }
+ return NULL;
+}
+
+static void __init vdso_nullpatch_one(struct elfinfo *lib, const char *symname)
+{
+ Elf32_Sym *sym;
+
+ sym = find_symbol(lib, symname);
+ if (!sym)
+ return;
+
+ sym->st_name = 0;
+}
+
+static void __init patch_vdso(void *ehdr)
+{
+ struct elfinfo einfo;
+
+ einfo = (struct elfinfo) {
+ .hdr = ehdr,
+ };
+
+ einfo.dynsym = find_section(einfo.hdr, ".dynsym", &einfo.dynsymsize);
+ einfo.dynstr = find_section(einfo.hdr, ".dynstr", NULL);
+
+ /* If the virtual counter is absent or non-functional we don't
+ * want programs to incur the slight additional overhead of
+ * dispatching through the VDSO only to fall back to syscalls.
+ */
+ if (!cntvct_ok) {
+ vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
+ vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
+ }
+}
+
+static int __init vdso_init(void)
+{
+ unsigned int text_pages;
+ int i;
+
+ if (memcmp(&vdso_start, "\177ELF", 4)) {
+ pr_err("VDSO is not a valid ELF object!\n");
+ return -ENOEXEC;
+ }
+
+ text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
+ pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
+
+ /* Allocate the VDSO text pagelist */
+ vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (vdso_text_pagelist == NULL)
+ return -ENOMEM;
+
+ /* Grab the VDSO data page. */
+ vdso_data_page = virt_to_page(vdso_data);
+
+ /* Grab the VDSO text pages. */
+ for (i = 0; i < text_pages; i++) {
+ struct page *page;
+
+ page = virt_to_page(&vdso_start + i * PAGE_SIZE);
+ vdso_text_pagelist[i] = page;
+ }
+
+ vdso_text_mapping.pages = vdso_text_pagelist;
+
+ vdso_total_pages = 1; /* for the data/vvar page */
+ vdso_total_pages += text_pages;
+
+ cntvct_ok = cntvct_functional();
+
+ patch_vdso(&vdso_start);
+
+ return 0;
+}
+arch_initcall(vdso_init);
+
+static int install_vvar(struct mm_struct *mm, unsigned long addr)
+{
+ struct vm_area_struct *vma;
+
+ vma = _install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_MAYREAD,
+ &vdso_data_mapping);
+
+ return IS_ERR(vma) ? PTR_ERR(vma) : 0;
+}
+
+/* assumes mmap_sem is write-locked */
+void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
+{
+ struct vm_area_struct *vma;
+ unsigned long len;
+
+ mm->context.vdso = 0;
+
+ if (vdso_text_pagelist == NULL)
+ return;
+
+ if (install_vvar(mm, addr))
+ return;
+
+ /* Account for vvar page. */
+ addr += PAGE_SIZE;
+ len = (vdso_total_pages - 1) << PAGE_SHIFT;
+
+ vma = _install_special_mapping(mm, addr, len,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &vdso_text_mapping);
+
+ if (!IS_ERR(vma))
+ mm->context.vdso = addr;
+}
+
+static void vdso_write_begin(struct vdso_data *vdata)
+{
+ ++vdso_data->seq_count;
+ smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */
+}
+
+static void vdso_write_end(struct vdso_data *vdata)
+{
+ smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */
+ ++vdso_data->seq_count;
+}
+
+static bool tk_is_cntvct(const struct timekeeper *tk)
+{
+ if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
+ return false;
+
+ if (strcmp(tk->tkr_mono.clock->name, "arch_sys_counter") != 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * update_vsyscall - update the vdso data page
+ *
+ * Increment the sequence counter, making it odd, indicating to
+ * userspace that an update is in progress. Update the fields used
+ * for coarse clocks and, if the architected system timer is in use,
+ * the fields used for high precision clocks. Increment the sequence
+ * counter again, making it even, indicating to userspace that the
+ * update is finished.
+ *
+ * Userspace is expected to sample seq_count before reading any other
+ * fields from the data page. If seq_count is odd, userspace is
+ * expected to wait until it becomes even. After copying data from
+ * the page, userspace must sample seq_count again; if it has changed
+ * from its previous value, userspace must retry the whole sequence.
+ *
+ * Calls to update_vsyscall are serialized by the timekeeping core.
+ */
+void update_vsyscall(struct timekeeper *tk)
+{
+ struct timespec xtime_coarse;
+ struct timespec64 *wtm = &tk->wall_to_monotonic;
+
+ if (!cntvct_ok) {
+ /* The entry points have been zeroed, so there is no
+ * point in updating the data page.
+ */
+ return;
+ }
+
+ vdso_write_begin(vdso_data);
+
+ xtime_coarse = __current_kernel_time();
+ vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
+ vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
+ vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->wtm_clock_sec = wtm->tv_sec;
+ vdso_data->wtm_clock_nsec = wtm->tv_nsec;
+
+ if (vdso_data->tk_is_cntvct) {
+ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
+ vdso_data->xtime_clock_sec = tk->xtime_sec;
+ vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
+ vdso_data->cs_mult = tk->tkr_mono.mult;
+ vdso_data->cs_shift = tk->tkr_mono.shift;
+ vdso_data->cs_mask = tk->tkr_mono.mask;
+ }
+
+ vdso_write_end(vdso_data);
+
+ flush_dcache_page(virt_to_page(vdso_data));
+}
+
+void update_vsyscall_tz(void)
+{
+ vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
+ vdso_data->tz_dsttime = sys_tz.tz_dsttime;
+ flush_dcache_page(virt_to_page(vdso_data));
+}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b31aa73e8076..8b60fde5ce48 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -11,7 +11,7 @@
#ifdef CONFIG_ARM_KERNMEM_PERMS
#include <asm/pgtable.h>
#endif
-
+
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
@@ -23,7 +23,7 @@
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .; \
- . = ALIGN(32); \
+ . = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
@@ -74,7 +74,7 @@ SECTIONS
ARM_EXIT_DISCARD(EXIT_DATA)
EXIT_CALL
#ifndef CONFIG_MMU
- *(.fixup)
+ *(.text.fixup)
*(__ex_table)
#endif
#ifndef CONFIG_SMP_ON_UP
@@ -100,6 +100,7 @@ SECTIONS
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
+ IDMAP_TEXT
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
@@ -108,10 +109,6 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
- IDMAP_TEXT
-#ifdef CONFIG_MMU
- *(.fixup)
-#endif
*(.gnu.warning)
*(.glue_7)
*(.glue_7t)
@@ -346,8 +343,11 @@ SECTIONS
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
+
/*
- * The HYP init code can't be more than a page long.
+ * The HYP init code can't be more than a page long,
+ * and should not cross a page boundary.
* The above comment applies as well.
*/
-ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big")
+ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
+ "HYP init code too big or misaligned")
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 6f536451ab78..d9631ecddd56 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -671,8 +671,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (!irqchip_in_kernel(kvm))
return -ENXIO;
- if (irq_num < VGIC_NR_PRIVATE_IRQS ||
- irq_num > KVM_ARM_IRQ_GIC_MAX)
+ if (irq_num < VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 15b050d46fc9..1d5accbd3dcf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -35,9 +35,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
static pgd_t *boot_hyp_pgd;
static pgd_t *hyp_pgd;
+static pgd_t *merged_hyp_pgd;
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
-static void *init_bounce_page;
static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
@@ -405,9 +405,6 @@ void free_boot_hyp_pgd(void)
if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
- free_page((unsigned long)init_bounce_page);
- init_bounce_page = NULL;
-
mutex_unlock(&kvm_hyp_pgd_mutex);
}
@@ -438,6 +435,11 @@ void free_hyp_pgds(void)
free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL;
}
+ if (merged_hyp_pgd) {
+ clear_page(merged_hyp_pgd);
+ free_page((unsigned long)merged_hyp_pgd);
+ merged_hyp_pgd = NULL;
+ }
mutex_unlock(&kvm_hyp_pgd_mutex);
}
@@ -1622,12 +1624,18 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
phys_addr_t kvm_mmu_get_httbr(void)
{
- return virt_to_phys(hyp_pgd);
+ if (__kvm_cpu_uses_extended_idmap())
+ return virt_to_phys(merged_hyp_pgd);
+ else
+ return virt_to_phys(hyp_pgd);
}
phys_addr_t kvm_mmu_get_boot_httbr(void)
{
- return virt_to_phys(boot_hyp_pgd);
+ if (__kvm_cpu_uses_extended_idmap())
+ return virt_to_phys(merged_hyp_pgd);
+ else
+ return virt_to_phys(boot_hyp_pgd);
}
phys_addr_t kvm_get_idmap_vector(void)
@@ -1643,39 +1651,11 @@ int kvm_mmu_init(void)
hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
- if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
- /*
- * Our init code is crossing a page boundary. Allocate
- * a bounce page, copy the code over and use that.
- */
- size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
- phys_addr_t phys_base;
-
- init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
- if (!init_bounce_page) {
- kvm_err("Couldn't allocate HYP init bounce page\n");
- err = -ENOMEM;
- goto out;
- }
-
- memcpy(init_bounce_page, __hyp_idmap_text_start, len);
- /*
- * Warning: the code we just copied to the bounce page
- * must be flushed to the point of coherency.
- * Otherwise, the data may be sitting in L2, and HYP
- * mode won't be able to observe it as it runs with
- * caches off at that point.
- */
- kvm_flush_dcache_to_poc(init_bounce_page, len);
-
- phys_base = kvm_virt_to_phys(init_bounce_page);
- hyp_idmap_vector += phys_base - hyp_idmap_start;
- hyp_idmap_start = phys_base;
- hyp_idmap_end = phys_base + len;
-
- kvm_info("Using HYP init bounce page @%lx\n",
- (unsigned long)phys_base);
- }
+ /*
+ * We rely on the linker script to ensure at build time that the HYP
+ * init code does not cross a page boundary.
+ */
+ BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
@@ -1698,6 +1678,17 @@ int kvm_mmu_init(void)
goto out;
}
+ if (__kvm_cpu_uses_extended_idmap()) {
+ merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (!merged_hyp_pgd) {
+ kvm_err("Failed to allocate extra HYP pgd\n");
+ goto out;
+ }
+ __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
+ hyp_idmap_start);
+ return 0;
+ }
+
/* Map the very same page at the trampoline VA */
err = __create_hyp_mappings(boot_hyp_pgd,
TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 14a0d988c82c..1710fd7db2d5 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -47,7 +47,7 @@ USER( strnebt r2, [r0])
ENDPROC(__clear_user)
ENDPROC(__clear_user_std)
- .pushsection .fixup,"ax"
+ .pushsection .text.fixup,"ax"
.align 0
9001: ldmfd sp!, {r0, pc}
.popsection
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index a9d3db16ecb5..9648b0675a3e 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -100,7 +100,7 @@ WEAK(__copy_to_user)
ENDPROC(__copy_to_user)
ENDPROC(__copy_to_user_std)
- .pushsection .fixup,"ax"
+ .pushsection .text.fixup,"ax"
.align 0
copy_abort_preamble
ldmfd sp!, {r1, r2, r3}
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 7d08b43d2c0e..1d0957e61f89 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -68,7 +68,7 @@
* so properly, we would have to add in whatever registers were loaded before
* the fault, which, with the current asm above is not predictable.
*/
- .pushsection .fixup,"ax"
+ .pushsection .text.fixup,"ax"
.align 4
9001: mov r4, #-EFAULT
ldr r5, [sp, #8*4] @ *err_ptr
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 312d43eb686a..8044591dca72 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -83,6 +83,12 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
NSEC_PER_SEC, 3600);
res = cyc_to_ns(1ULL, new_mult, new_shift);
+ if (res > 1000) {
+ pr_err("Ignoring delay timer %ps, which has insufficient resolution of %lluns\n",
+ timer, res);
+ return;
+ }
+
if (!delay_calibrated && (!delay_res || (res < delay_res))) {
pr_info("Switching to timer-based delay loop, resolution %lluns\n", res);
delay_timer = timer;
diff --git a/arch/arm/mach-alpine/Kconfig b/arch/arm/mach-alpine/Kconfig
new file mode 100644
index 000000000000..2c44b930505a
--- /dev/null
+++ b/arch/arm/mach-alpine/Kconfig
@@ -0,0 +1,12 @@
+config ARCH_ALPINE
+ bool "Annapurna Labs Alpine platform" if ARCH_MULTI_V7
+ select ARM_AMBA
+ select ARM_GIC
+ select GENERIC_IRQ_CHIP
+ select HAVE_ARM_ARCH_TIMER
+ select HAVE_SMP
+ select MFD_SYSCON
+ select PCI
+ select PCI_HOST_GENERIC
+ help
+ This enables support for the Annapurna Labs Alpine V1 boards.
diff --git a/arch/arm/mach-alpine/Makefile b/arch/arm/mach-alpine/Makefile
new file mode 100644
index 000000000000..b6674890be71
--- /dev/null
+++ b/arch/arm/mach-alpine/Makefile
@@ -0,0 +1,2 @@
+obj-y += alpine_machine.o
+obj-$(CONFIG_SMP) += platsmp.o alpine_cpu_pm.o
diff --git a/arch/arm/mach-alpine/alpine_cpu_pm.c b/arch/arm/mach-alpine/alpine_cpu_pm.c
new file mode 100644
index 000000000000..121c77c4b53c
--- /dev/null
+++ b/arch/arm/mach-alpine/alpine_cpu_pm.c
@@ -0,0 +1,70 @@
+/*
+ * Low-level power-management support for Alpine platform.
+ *
+ * Copyright (C) 2015 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "alpine_cpu_pm.h"
+#include "alpine_cpu_resume.h"
+
+/* NB registers */
+#define AL_SYSFAB_POWER_CONTROL(cpu) (0x2000 + (cpu)*0x100 + 0x20)
+
+static struct regmap *al_sysfabric;
+static struct al_cpu_resume_regs __iomem *al_cpu_resume_regs;
+static int wakeup_supported;
+
+int alpine_cpu_wakeup(unsigned int phys_cpu, uint32_t phys_resume_addr)
+{
+ if (!wakeup_supported)
+ return -ENOSYS;
+
+ /*
+ * Set CPU resume address -
+ * secure firmware running on boot will jump to this address
+ * after setting proper CPU mode, and initialiing e.g. secure
+ * regs (the same mode all CPUs are booted to - usually HYP)
+ */
+ writel(phys_resume_addr,
+ &al_cpu_resume_regs->per_cpu[phys_cpu].resume_addr);
+
+ /* Power-up the CPU */
+ regmap_write(al_sysfabric, AL_SYSFAB_POWER_CONTROL(phys_cpu), 0);
+
+ return 0;
+}
+
+void __init alpine_cpu_pm_init(void)
+{
+ struct device_node *np;
+ uint32_t watermark;
+
+ al_sysfabric = syscon_regmap_lookup_by_compatible("al,alpine-sysfabric-service");
+
+ np = of_find_compatible_node(NULL, NULL, "al,alpine-cpu-resume");
+ al_cpu_resume_regs = of_iomap(np, 0);
+
+ wakeup_supported = !IS_ERR(al_sysfabric) && al_cpu_resume_regs;
+
+ if (wakeup_supported) {
+ watermark = readl(&al_cpu_resume_regs->watermark);
+ wakeup_supported = (watermark & AL_CPU_RESUME_MAGIC_NUM_MASK)
+ == AL_CPU_RESUME_MAGIC_NUM;
+ }
+}
diff --git a/arch/arm/mach-at91/include/mach/io.h b/arch/arm/mach-alpine/alpine_cpu_pm.h
index 2d9ca0455745..5179e697c492 100644
--- a/arch/arm/mach-at91/include/mach/io.h
+++ b/arch/arm/mach-alpine/alpine_cpu_pm.h
@@ -1,7 +1,7 @@
/*
- * arch/arm/mach-at91/include/mach/io.h
+ * Low-level power-management support for Alpine platform.
*
- * Copyright (C) 2003 SAN People
+ * Copyright (C) 2015 Annapurna Labs Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,16 +12,15 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef __ASM_ARCH_IO_H
-#define __ASM_ARCH_IO_H
+#ifndef __ALPINE_CPU_PM_H__
+#define __ALPINE_CPU_PM_H__
+
+/* Alpine CPU Power Management Services Initialization */
+void alpine_cpu_pm_init(void);
-#define IO_SPACE_LIMIT 0xFFFFFFFF
-#define __io(a) __typesafe_io(a)
+/* Wake-up a CPU */
+int alpine_cpu_wakeup(unsigned int phys_cpu, uint32_t phys_resume_addr);
-#endif
+#endif /* __ALPINE_CPU_PM_H__ */
diff --git a/arch/arm/mach-alpine/alpine_cpu_resume.h b/arch/arm/mach-alpine/alpine_cpu_resume.h
new file mode 100644
index 000000000000..c80150c0d2d8
--- /dev/null
+++ b/arch/arm/mach-alpine/alpine_cpu_resume.h
@@ -0,0 +1,38 @@
+/*
+ * Annapurna labs cpu-resume register structure.
+ *
+ * Copyright (C) 2015 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ALPINE_CPU_RESUME_H_
+#define ALPINE_CPU_RESUME_H_
+
+/* Per-cpu regs */
+struct al_cpu_resume_regs_per_cpu {
+ uint32_t flags;
+ uint32_t resume_addr;
+};
+
+/* general regs */
+struct al_cpu_resume_regs {
+ /* Watermark for validating the CPU resume struct */
+ uint32_t watermark;
+ uint32_t flags;
+ struct al_cpu_resume_regs_per_cpu per_cpu[];
+};
+
+/* The expected magic number for validating the resume addresses */
+#define AL_CPU_RESUME_MAGIC_NUM 0xf0e1d200
+#define AL_CPU_RESUME_MAGIC_NUM_MASK 0xffffff00
+
+#endif /* ALPINE_CPU_RESUME_H_ */
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/arm/mach-alpine/alpine_machine.c
index a7e52f91a078..b8e2145e962b 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.h
+++ b/arch/arm/mach-alpine/alpine_machine.c
@@ -1,7 +1,7 @@
/*
- * Celleb/Beat Interrupt controller
+ * Machine declaration for Alpine platforms.
*
- * (C) Copyright 2006 TOSHIBA CORPORATION
+ * Copyright (C) 2015 Annapurna Labs Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,19 +12,17 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#ifndef ASM_BEAT_PIC_H
-#define ASM_BEAT_PIC_H
-#ifdef __KERNEL__
+#include <linux/of_platform.h>
+
+#include <asm/mach/arch.h>
-extern void beatic_init_IRQ(void);
-extern unsigned int beatic_get_irq(void);
-extern void beatic_deinit_IRQ(void);
+static const char * const al_match[] __initconst = {
+ "al,alpine",
+ NULL,
+};
-#endif
-#endif /* ASM_BEAT_PIC_H */
+DT_MACHINE_START(AL_DT, "Annapurna Labs Alpine")
+ .dt_compat = al_match,
+MACHINE_END
diff --git a/arch/arm/mach-alpine/platsmp.c b/arch/arm/mach-alpine/platsmp.c
new file mode 100644
index 000000000000..f78429f48bd6
--- /dev/null
+++ b/arch/arm/mach-alpine/platsmp.c
@@ -0,0 +1,49 @@
+/*
+ * SMP operations for Alpine platform.
+ *
+ * Copyright (C) 2015 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include <asm/smp_plat.h>
+
+#include "alpine_cpu_pm.h"
+
+static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ phys_addr_t addr;
+
+ addr = virt_to_phys(secondary_startup);
+
+ if (addr > (phys_addr_t)(uint32_t)(-1)) {
+ pr_err("FAIL: resume address over 32bit (%pa)", &addr);
+ return -EINVAL;
+ }
+
+ return alpine_cpu_wakeup(cpu_logical_map(cpu), (uint32_t)addr);
+}
+
+static void __init alpine_smp_prepare_cpus(unsigned int max_cpus)
+{
+ alpine_cpu_pm_init();
+}
+
+static struct smp_operations alpine_smp_ops __initdata = {
+ .smp_prepare_cpus = alpine_smp_prepare_cpus,
+ .smp_boot_secondary = alpine_boot_secondary,
+};
+CPU_METHOD_OF_DECLARE(alpine_smp, "al,alpine-smp", &alpine_smp_ops);
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index c74a44324e5b..fd95f34945f4 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,55 +1,15 @@
-if ARCH_AT91
-
-config HAVE_AT91_UTMI
- bool
-
-config HAVE_AT91_USB_CLK
- bool
-
-config COMMON_CLK_AT91
- bool
- select COMMON_CLK
-
-config HAVE_AT91_SMD
- bool
-
-config HAVE_AT91_H32MX
- bool
-
-config SOC_SAMA5
- bool
- select ATMEL_AIC5_IRQ
+menuconfig ARCH_AT91
+ bool "Atmel SoCs"
+ depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
+ select ARCH_REQUIRE_GPIOLIB
select COMMON_CLK_AT91
- select CPU_V7
- select GENERIC_CLOCKEVENTS
- select MEMORY
- select ATMEL_SDRAMC
- select PHYLIB if NETDEVICES
-
-menu "Atmel AT91 System-on-Chip"
-
-choice
-
- prompt "Core type"
-
-config SOC_SAM_V4_V5
- bool "ARM9 AT91SAM9/AT91RM9200"
- help
- Select this if you are using one of Atmel's AT91SAM9 or
- AT91RM9200 SoC.
-
-config SOC_SAM_V7
- bool "Cortex A5"
- help
- Select this if you are using one of Atmel's SAMA5D3 SoC.
-
-endchoice
+ select PINCTRL
+ select PINCTRL_AT91
+ select SOC_BUS
-comment "Atmel AT91 Processor"
-
-if SOC_SAM_V7
+if ARCH_AT91
config SOC_SAMA5D3
- bool "SAMA5D3 family"
+ bool "SAMA5D3 family" if ARCH_MULTI_V7
select SOC_SAMA5
select HAVE_FB_ATMEL
select HAVE_AT91_UTMI
@@ -60,9 +20,8 @@ config SOC_SAMA5D3
This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
config SOC_SAMA5D4
- bool "SAMA5D4 family"
+ bool "SAMA5D4 family" if ARCH_MULTI_V7
select SOC_SAMA5
- select CLKSRC_MMIO
select CACHE_L2X0
select HAVE_FB_ATMEL
select HAVE_AT91_UTMI
@@ -71,29 +30,31 @@ config SOC_SAMA5D4
select HAVE_AT91_H32MX
help
Select this if you are using one of Atmel's SAMA5D4 family SoC.
-endif
-if SOC_SAM_V4_V5
config SOC_AT91RM9200
- bool "AT91RM9200"
+ bool "AT91RM9200" if ARCH_MULTI_V4T
select ATMEL_AIC_IRQ
- select COMMON_CLK_AT91
+ select ATMEL_ST
select CPU_ARM920T
- select GENERIC_CLOCKEVENTS
select HAVE_AT91_USB_CLK
+ select MIGHT_HAVE_PCI
+ select SOC_SAM_V4_V5
+ select SRAM if PM
+ help
+ Select this if you are using Atmel's AT91RM9200 SoC.
config SOC_AT91SAM9
- bool "AT91SAM9"
+ bool "AT91SAM9" if ARCH_MULTI_V5
select ATMEL_AIC_IRQ
select ATMEL_SDRAMC
- select COMMON_CLK_AT91
select CPU_ARM926T
- select GENERIC_CLOCKEVENTS
select HAVE_AT91_SMD
select HAVE_AT91_USB_CLK
select HAVE_AT91_UTMI
select HAVE_FB_ATMEL
select MEMORY
+ select SOC_SAM_V4_V5
+ select SRAM if PM
help
Select this if you are using one of those Atmel SoC:
AT91SAM9260
@@ -112,40 +73,35 @@ config SOC_AT91SAM9
AT91SAM9X25
AT91SAM9X35
AT91SAM9XE
-endif # SOC_SAM_V4_V5
-comment "AT91 Feature Selections"
+config HAVE_AT91_UTMI
+ bool
-config AT91_SLOW_CLOCK
- bool "Suspend-to-RAM disables main oscillator"
- select SRAM
- depends on SUSPEND
- help
- Select this if you want Suspend-to-RAM to save the most power
- possible (without powering off the CPU) by disabling the PLLs
- and main oscillator so that only the 32 KiHz clock is available.
+config HAVE_AT91_USB_CLK
+ bool
- When only that slow-clock is available, some peripherals lose
- functionality. Many can't issue wakeup events unless faster
- clocks are available. Some lose their operating state and
- need to be completely re-initialized.
+config COMMON_CLK_AT91
+ bool
+ select COMMON_CLK
-config AT91_TIMER_HZ
- int "Kernel HZ (jiffies per second)"
- range 32 1024
- depends on ARCH_AT91
- default "128" if SOC_AT91RM9200
- default "100"
- help
- On AT91rm9200 chips where you're using a system clock derived
- from the 32768 Hz hardware clock, this tick rate should divide
- it exactly: use a power-of-two value, such as 128 or 256, to
- reduce timing errors caused by rounding.
+config HAVE_AT91_SMD
+ bool
+
+config HAVE_AT91_H32MX
+ bool
- On AT91sam926x chips, or otherwise when using a higher precision
- system clock (of at least several MHz), rounding is less of a
- problem so it can be safer to use a decimal values like 100.
+config SOC_SAM_V4_V5
+ bool
-endmenu
+config SOC_SAM_V7
+ bool
+
+config SOC_SAMA5
+ bool
+ select ATMEL_AIC5_IRQ
+ select ATMEL_SDRAMC
+ select MEMORY
+ select SOC_SAM_V7
+ select SRAM if PM
endif
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 827fdbcce1c7..4fa8b4541e64 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -1,20 +1,25 @@
#
# Makefile for the linux kernel.
#
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include
+asflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include
-obj-y := setup.o
+obj-y := soc.o
obj-$(CONFIG_SOC_AT91SAM9) += sam9_smc.o
# CPU-specific support
-obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o at91rm9200_time.o
+obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9.o
obj-$(CONFIG_SOC_SAMA5) += sama5.o
# Power Management
obj-$(CONFIG_PM) += pm.o
-obj-$(CONFIG_AT91_SLOW_CLOCK) += pm_slowclock.o
+obj-$(CONFIG_PM) += pm_suspend.o
+ifeq ($(CONFIG_CPU_V7),y)
+AFLAGS_pm_suspend.o := -march=armv7-a
+endif
ifeq ($(CONFIG_PM_DEBUG),y)
CFLAGS_pm.o += -DDEBUG
endif
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 8fcfb70f7124..eaf58f88ef5d 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -8,60 +8,42 @@
* Licensed under GPLv2 or later.
*/
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/clk-provider.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/clk-provider.h>
-#include <asm/setup.h>
-#include <asm/irq.h>
#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
#include <asm/system_misc.h>
-#include <mach/at91_st.h>
-
#include "generic.h"
+#include "soc.h"
-static void at91rm9200_restart(enum reboot_mode reboot_mode, const char *cmd)
-{
- /*
- * Perform a hardware reset with the use of the Watchdog timer.
- */
- at91_st_write(AT91_ST_WDMR, AT91_ST_RSTEN | AT91_ST_EXTEN | 1);
- at91_st_write(AT91_ST_CR, AT91_ST_WDRST);
-}
-
-static void __init at91rm9200_dt_timer_init(void)
-{
- of_clk_init(NULL);
- at91rm9200_timer_init();
-}
+static const struct at91_soc rm9200_socs[] = {
+ AT91_SOC(AT91RM9200_CIDR_MATCH, 0, "at91rm9200 BGA", "at91rm9200"),
+ { /* sentinel */ },
+};
static void __init at91rm9200_dt_device_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ struct soc_device *soc;
+ struct device *soc_dev = NULL;
+
+ soc = at91_soc_init(rm9200_socs);
+ if (soc != NULL)
+ soc_dev = soc_device_to_device(soc);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
arm_pm_idle = at91rm9200_idle;
- arm_pm_restart = at91rm9200_restart;
at91rm9200_pm_init();
}
-
-
static const char *at91rm9200_dt_board_compat[] __initconst = {
"atmel,at91rm9200",
NULL
};
DT_MACHINE_START(at91rm9200_dt, "Atmel AT91RM9200")
- .init_time = at91rm9200_dt_timer_init,
- .map_io = at91_map_io,
.init_machine = at91rm9200_dt_device_init,
.dt_compat = at91rm9200_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-at91/at91sam9.c b/arch/arm/mach-at91/at91sam9.c
index 56e3ba73ec40..e47a2093a0e7 100644
--- a/arch/arm/mach-at91/at91sam9.c
+++ b/arch/arm/mach-at91/at91sam9.c
@@ -7,29 +7,68 @@
* Licensed under GPLv2 or later.
*/
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gpio.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/clk-provider.h>
-#include <asm/system_misc.h>
-#include <asm/setup.h>
-#include <asm/irq.h>
#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
+#include <asm/system_misc.h>
#include "generic.h"
+#include "soc.h"
-static void __init at91sam9_dt_device_init(void)
+static const struct at91_soc at91sam9_socs[] = {
+ AT91_SOC(AT91SAM9260_CIDR_MATCH, 0, "at91sam9260", NULL),
+ AT91_SOC(AT91SAM9261_CIDR_MATCH, 0, "at91sam9261", NULL),
+ AT91_SOC(AT91SAM9263_CIDR_MATCH, 0, "at91sam9263", NULL),
+ AT91_SOC(AT91SAM9G20_CIDR_MATCH, 0, "at91sam9g20", NULL),
+ AT91_SOC(AT91SAM9RL64_CIDR_MATCH, 0, "at91sam9rl64", NULL),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M11_EXID_MATCH,
+ "at91sam9m11", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M10_EXID_MATCH,
+ "at91sam9m10", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G46_EXID_MATCH,
+ "at91sam9g46", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G45_EXID_MATCH,
+ "at91sam9g45", "at91sam9g45"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G15_EXID_MATCH,
+ "at91sam9g15", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G35_EXID_MATCH,
+ "at91sam9g35", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X35_EXID_MATCH,
+ "at91sam9x35", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G25_EXID_MATCH,
+ "at91sam9g25", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X25_EXID_MATCH,
+ "at91sam9x25", "at91sam9x5"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN12_EXID_MATCH,
+ "at91sam9cn12", "at91sam9n12"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9N12_EXID_MATCH,
+ "at91sam9n12", "at91sam9n12"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN11_EXID_MATCH,
+ "at91sam9cn11", "at91sam9n12"),
+ AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"),
+ AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"),
+ AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"),
+ { /* sentinel */ },
+};
+
+static void __init at91sam9_common_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ struct soc_device *soc;
+ struct device *soc_dev = NULL;
+
+ soc = at91_soc_init(at91sam9_socs);
+ if (soc != NULL)
+ soc_dev = soc_device_to_device(soc);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
arm_pm_idle = at91sam9_idle;
+}
+
+static void __init at91sam9_dt_device_init(void)
+{
+ at91sam9_common_init();
at91sam9260_pm_init();
}
@@ -40,16 +79,13 @@ static const char *at91_dt_board_compat[] __initconst = {
DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM9")
/* Maintainer: Atmel */
- .map_io = at91_map_io,
.init_machine = at91sam9_dt_device_init,
.dt_compat = at91_dt_board_compat,
MACHINE_END
static void __init at91sam9g45_dt_device_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-
- arm_pm_idle = at91sam9_idle;
+ at91sam9_common_init();
at91sam9g45_pm_init();
}
@@ -60,16 +96,13 @@ static const char *at91sam9g45_board_compat[] __initconst = {
DT_MACHINE_START(at91sam9g45_dt, "Atmel AT91SAM9G45")
/* Maintainer: Atmel */
- .map_io = at91_map_io,
.init_machine = at91sam9g45_dt_device_init,
.dt_compat = at91sam9g45_board_compat,
MACHINE_END
static void __init at91sam9x5_dt_device_init(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-
- arm_pm_idle = at91sam9_idle;
+ at91sam9_common_init();
at91sam9x5_pm_init();
}
@@ -81,7 +114,6 @@ static const char *at91sam9x5_board_compat[] __initconst = {
DT_MACHINE_START(at91sam9x5_dt, "Atmel AT91SAM9")
/* Maintainer: Atmel */
- .map_io = at91_map_io,
.init_machine = at91sam9x5_dt_device_init,
.dt_compat = at91sam9x5_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 583369ffc284..b0fa7dc7286d 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -18,17 +18,10 @@
extern void __init at91_map_io(void);
extern void __init at91_alt_map_io(void);
- /* Timer */
-extern void at91rm9200_timer_init(void);
-
/* idle */
extern void at91rm9200_idle(void);
extern void at91sam9_idle(void);
-/* Matrix */
-extern void at91_ioremap_matrix(u32 base_addr);
-
-
#ifdef CONFIG_PM
extern void __init at91rm9200_pm_init(void);
extern void __init at91sam9260_pm_init(void);
diff --git a/arch/arm/mach-at91/include/mach/at91_dbgu.h b/arch/arm/mach-at91/include/mach/at91_dbgu.h
deleted file mode 100644
index 42925e8f78e4..000000000000
--- a/arch/arm/mach-at91/include/mach/at91_dbgu.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91_dbgu.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * Debug Unit (DBGU) - System peripherals registers.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91_DBGU_H
-#define AT91_DBGU_H
-
-#define AT91_DBGU_CR (0x00) /* Control Register */
-#define AT91_DBGU_MR (0x04) /* Mode Register */
-#define AT91_DBGU_IER (0x08) /* Interrupt Enable Register */
-#define AT91_DBGU_TXRDY (1 << 1) /* Transmitter Ready */
-#define AT91_DBGU_TXEMPTY (1 << 9) /* Transmitter Empty */
-#define AT91_DBGU_IDR (0x0c) /* Interrupt Disable Register */
-#define AT91_DBGU_IMR (0x10) /* Interrupt Mask Register */
-#define AT91_DBGU_SR (0x14) /* Status Register */
-#define AT91_DBGU_RHR (0x18) /* Receiver Holding Register */
-#define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */
-#define AT91_DBGU_BRGR (0x20) /* Baud Rate Generator Register */
-
-#define AT91_DBGU_CIDR (0x40) /* Chip ID Register */
-#define AT91_DBGU_EXID (0x44) /* Chip ID Extension Register */
-#define AT91_DBGU_FNR (0x48) /* Force NTRST Register [SAM9 only] */
-#define AT91_DBGU_FNTRST (1 << 0) /* Force NTRST */
-
-/*
- * Some AT91 parts that don't have full DEBUG units still support the ID
- * and extensions register.
- */
-#define AT91_CIDR_VERSION (0x1f << 0) /* Version of the Device */
-#define AT91_CIDR_EPROC (7 << 5) /* Embedded Processor */
-#define AT91_CIDR_NVPSIZ (0xf << 8) /* Nonvolatile Program Memory Size */
-#define AT91_CIDR_NVPSIZ2 (0xf << 12) /* Second Nonvolatile Program Memory Size */
-#define AT91_CIDR_SRAMSIZ (0xf << 16) /* Internal SRAM Size */
-#define AT91_CIDR_SRAMSIZ_1K (1 << 16)
-#define AT91_CIDR_SRAMSIZ_2K (2 << 16)
-#define AT91_CIDR_SRAMSIZ_112K (4 << 16)
-#define AT91_CIDR_SRAMSIZ_4K (5 << 16)
-#define AT91_CIDR_SRAMSIZ_80K (6 << 16)
-#define AT91_CIDR_SRAMSIZ_160K (7 << 16)
-#define AT91_CIDR_SRAMSIZ_8K (8 << 16)
-#define AT91_CIDR_SRAMSIZ_16K (9 << 16)
-#define AT91_CIDR_SRAMSIZ_32K (10 << 16)
-#define AT91_CIDR_SRAMSIZ_64K (11 << 16)
-#define AT91_CIDR_SRAMSIZ_128K (12 << 16)
-#define AT91_CIDR_SRAMSIZ_256K (13 << 16)
-#define AT91_CIDR_SRAMSIZ_96K (14 << 16)
-#define AT91_CIDR_SRAMSIZ_512K (15 << 16)
-#define AT91_CIDR_ARCH (0xff << 20) /* Architecture Identifier */
-#define AT91_CIDR_NVPTYP (7 << 28) /* Nonvolatile Program Memory Type */
-#define AT91_CIDR_EXT (1 << 31) /* Extension Flag */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91_matrix.h b/arch/arm/mach-at91/include/mach/at91_matrix.h
deleted file mode 100644
index f8996c954131..000000000000
--- a/arch/arm/mach-at91/include/mach/at91_matrix.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
- *
- * Under GPLv2
- */
-
-#ifndef __MACH_AT91_MATRIX_H__
-#define __MACH_AT91_MATRIX_H__
-
-#ifndef __ASSEMBLY__
-extern void __iomem *at91_matrix_base;
-
-#define at91_matrix_read(field) \
- __raw_readl(at91_matrix_base + field)
-
-#define at91_matrix_write(field, value) \
- __raw_writel(value, at91_matrix_base + field)
-
-#else
-.extern at91_matrix_base
-#endif
-
-#endif /* __MACH_AT91_MATRIX_H__ */
diff --git a/arch/arm/mach-at91/include/mach/at91_ramc.h b/arch/arm/mach-at91/include/mach/at91_ramc.h
index e4492b151fee..493bc486e858 100644
--- a/arch/arm/mach-at91/include/mach/at91_ramc.h
+++ b/arch/arm/mach-at91/include/mach/at91_ramc.h
@@ -21,10 +21,6 @@ extern void __iomem *at91_ramc_base[];
.extern at91_ramc_base
#endif
-#define AT91_MEMCTRL_MC 0
-#define AT91_MEMCTRL_SDRAMC 1
-#define AT91_MEMCTRL_DDRSDR 2
-
#include <soc/at91/at91rm9200_sdramc.h>
#include <soc/at91/at91sam9_ddrsdr.h>
#include <soc/at91/at91sam9_sdramc.h>
diff --git a/arch/arm/mach-at91/include/mach/at91_st.h b/arch/arm/mach-at91/include/mach/at91_st.h
deleted file mode 100644
index 67fdbd13c3ed..000000000000
--- a/arch/arm/mach-at91/include/mach/at91_st.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91_st.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * System Timer (ST) - System peripherals registers.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91_ST_H
-#define AT91_ST_H
-
-#ifndef __ASSEMBLY__
-extern void __iomem *at91_st_base;
-
-#define at91_st_read(field) \
- __raw_readl(at91_st_base + field)
-
-#define at91_st_write(field, value) \
- __raw_writel(value, at91_st_base + field)
-#else
-.extern at91_st_base
-#endif
-
-#define AT91_ST_CR 0x00 /* Control Register */
-#define AT91_ST_WDRST (1 << 0) /* Watchdog Timer Restart */
-
-#define AT91_ST_PIMR 0x04 /* Period Interval Mode Register */
-#define AT91_ST_PIV (0xffff << 0) /* Period Interval Value */
-
-#define AT91_ST_WDMR 0x08 /* Watchdog Mode Register */
-#define AT91_ST_WDV (0xffff << 0) /* Watchdog Counter Value */
-#define AT91_ST_RSTEN (1 << 16) /* Reset Enable */
-#define AT91_ST_EXTEN (1 << 17) /* External Signal Assertion Enable */
-
-#define AT91_ST_RTMR 0x0c /* Real-time Mode Register */
-#define AT91_ST_RTPRES (0xffff << 0) /* Real-time Prescalar Value */
-
-#define AT91_ST_SR 0x10 /* Status Register */
-#define AT91_ST_PITS (1 << 0) /* Period Interval Timer Status */
-#define AT91_ST_WDOVF (1 << 1) /* Watchdog Overflow */
-#define AT91_ST_RTTINC (1 << 2) /* Real-time Timer Increment */
-#define AT91_ST_ALMS (1 << 3) /* Alarm Status */
-
-#define AT91_ST_IER 0x14 /* Interrupt Enable Register */
-#define AT91_ST_IDR 0x18 /* Interrupt Disable Register */
-#define AT91_ST_IMR 0x1c /* Interrupt Mask Register */
-
-#define AT91_ST_RTAR 0x20 /* Real-time Alarm Register */
-#define AT91_ST_ALMV (0xfffff << 0) /* Alarm Value */
-
-#define AT91_ST_CRTR 0x24 /* Current Real-time Register */
-#define AT91_ST_CRTV (0xfffff << 0) /* Current Real-Time Value */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91rm9200.h b/arch/arm/mach-at91/include/mach/at91rm9200.h
deleted file mode 100644
index e67317c67761..000000000000
--- a/arch/arm/mach-at91/include/mach/at91rm9200.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91rm9200.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * Common definitions.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91RM9200_H
-#define AT91RM9200_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91RM9200_ID_PIOA 2 /* Parallel IO Controller A */
-#define AT91RM9200_ID_PIOB 3 /* Parallel IO Controller B */
-#define AT91RM9200_ID_PIOC 4 /* Parallel IO Controller C */
-#define AT91RM9200_ID_PIOD 5 /* Parallel IO Controller D */
-#define AT91RM9200_ID_US0 6 /* USART 0 */
-#define AT91RM9200_ID_US1 7 /* USART 1 */
-#define AT91RM9200_ID_US2 8 /* USART 2 */
-#define AT91RM9200_ID_US3 9 /* USART 3 */
-#define AT91RM9200_ID_MCI 10 /* Multimedia Card Interface */
-#define AT91RM9200_ID_UDP 11 /* USB Device Port */
-#define AT91RM9200_ID_TWI 12 /* Two-Wire Interface */
-#define AT91RM9200_ID_SPI 13 /* Serial Peripheral Interface */
-#define AT91RM9200_ID_SSC0 14 /* Serial Synchronous Controller 0 */
-#define AT91RM9200_ID_SSC1 15 /* Serial Synchronous Controller 1 */
-#define AT91RM9200_ID_SSC2 16 /* Serial Synchronous Controller 2 */
-#define AT91RM9200_ID_TC0 17 /* Timer Counter 0 */
-#define AT91RM9200_ID_TC1 18 /* Timer Counter 1 */
-#define AT91RM9200_ID_TC2 19 /* Timer Counter 2 */
-#define AT91RM9200_ID_TC3 20 /* Timer Counter 3 */
-#define AT91RM9200_ID_TC4 21 /* Timer Counter 4 */
-#define AT91RM9200_ID_TC5 22 /* Timer Counter 5 */
-#define AT91RM9200_ID_UHP 23 /* USB Host port */
-#define AT91RM9200_ID_EMAC 24 /* Ethernet MAC */
-#define AT91RM9200_ID_IRQ0 25 /* Advanced Interrupt Controller (IRQ0) */
-#define AT91RM9200_ID_IRQ1 26 /* Advanced Interrupt Controller (IRQ1) */
-#define AT91RM9200_ID_IRQ2 27 /* Advanced Interrupt Controller (IRQ2) */
-#define AT91RM9200_ID_IRQ3 28 /* Advanced Interrupt Controller (IRQ3) */
-#define AT91RM9200_ID_IRQ4 29 /* Advanced Interrupt Controller (IRQ4) */
-#define AT91RM9200_ID_IRQ5 30 /* Advanced Interrupt Controller (IRQ5) */
-#define AT91RM9200_ID_IRQ6 31 /* Advanced Interrupt Controller (IRQ6) */
-
-
-/*
- * Peripheral physical base addresses.
- */
-#define AT91RM9200_BASE_TCB0 0xfffa0000
-#define AT91RM9200_BASE_TC0 0xfffa0000
-#define AT91RM9200_BASE_TC1 0xfffa0040
-#define AT91RM9200_BASE_TC2 0xfffa0080
-#define AT91RM9200_BASE_TCB1 0xfffa4000
-#define AT91RM9200_BASE_TC3 0xfffa4000
-#define AT91RM9200_BASE_TC4 0xfffa4040
-#define AT91RM9200_BASE_TC5 0xfffa4080
-#define AT91RM9200_BASE_UDP 0xfffb0000
-#define AT91RM9200_BASE_MCI 0xfffb4000
-#define AT91RM9200_BASE_TWI 0xfffb8000
-#define AT91RM9200_BASE_EMAC 0xfffbc000
-#define AT91RM9200_BASE_US0 0xfffc0000
-#define AT91RM9200_BASE_US1 0xfffc4000
-#define AT91RM9200_BASE_US2 0xfffc8000
-#define AT91RM9200_BASE_US3 0xfffcc000
-#define AT91RM9200_BASE_SSC0 0xfffd0000
-#define AT91RM9200_BASE_SSC1 0xfffd4000
-#define AT91RM9200_BASE_SSC2 0xfffd8000
-#define AT91RM9200_BASE_SPI 0xfffe0000
-
-
-/*
- * System Peripherals
- */
-#define AT91RM9200_BASE_DBGU AT91_BASE_DBGU0 /* Debug Unit */
-#define AT91RM9200_BASE_PIOA 0xfffff400 /* PIO Controller A */
-#define AT91RM9200_BASE_PIOB 0xfffff600 /* PIO Controller B */
-#define AT91RM9200_BASE_PIOC 0xfffff800 /* PIO Controller C */
-#define AT91RM9200_BASE_PIOD 0xfffffa00 /* PIO Controller D */
-#define AT91RM9200_BASE_ST 0xfffffd00 /* System Timer */
-#define AT91RM9200_BASE_RTC 0xfffffe00 /* Real-Time Clock */
-#define AT91RM9200_BASE_MC 0xffffff00 /* Memory Controllers */
-
-/*
- * Internal Memory.
- */
-#define AT91RM9200_ROM_BASE 0x00100000 /* Internal ROM base address */
-#define AT91RM9200_ROM_SIZE SZ_128K /* Internal ROM size (128Kb) */
-
-#define AT91RM9200_SRAM_BASE 0x00200000 /* Internal SRAM base address */
-#define AT91RM9200_SRAM_SIZE SZ_16K /* Internal SRAM size (16Kb) */
-
-#define AT91RM9200_UHP_BASE 0x00300000 /* USB Host controller */
-
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9260.h b/arch/arm/mach-at91/include/mach/at91sam9260.h
deleted file mode 100644
index 416c7b6c56d3..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9260.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91sam9260.h
- *
- * (C) 2006 Andrew Victor
- *
- * Common definitions.
- * Based on AT91SAM9260 datasheet revision A (Preliminary).
- *
- * Includes also definitions for AT91SAM9XE and AT91SAM9G families
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91SAM9260_H
-#define AT91SAM9260_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91SAM9260_ID_PIOA 2 /* Parallel IO Controller A */
-#define AT91SAM9260_ID_PIOB 3 /* Parallel IO Controller B */
-#define AT91SAM9260_ID_PIOC 4 /* Parallel IO Controller C */
-#define AT91SAM9260_ID_ADC 5 /* Analog-to-Digital Converter */
-#define AT91SAM9260_ID_US0 6 /* USART 0 */
-#define AT91SAM9260_ID_US1 7 /* USART 1 */
-#define AT91SAM9260_ID_US2 8 /* USART 2 */
-#define AT91SAM9260_ID_MCI 9 /* Multimedia Card Interface */
-#define AT91SAM9260_ID_UDP 10 /* USB Device Port */
-#define AT91SAM9260_ID_TWI 11 /* Two-Wire Interface */
-#define AT91SAM9260_ID_SPI0 12 /* Serial Peripheral Interface 0 */
-#define AT91SAM9260_ID_SPI1 13 /* Serial Peripheral Interface 1 */
-#define AT91SAM9260_ID_SSC 14 /* Serial Synchronous Controller */
-#define AT91SAM9260_ID_TC0 17 /* Timer Counter 0 */
-#define AT91SAM9260_ID_TC1 18 /* Timer Counter 1 */
-#define AT91SAM9260_ID_TC2 19 /* Timer Counter 2 */
-#define AT91SAM9260_ID_UHP 20 /* USB Host port */
-#define AT91SAM9260_ID_EMAC 21 /* Ethernet */
-#define AT91SAM9260_ID_ISI 22 /* Image Sensor Interface */
-#define AT91SAM9260_ID_US3 23 /* USART 3 */
-#define AT91SAM9260_ID_US4 24 /* USART 4 */
-#define AT91SAM9260_ID_US5 25 /* USART 5 */
-#define AT91SAM9260_ID_TC3 26 /* Timer Counter 3 */
-#define AT91SAM9260_ID_TC4 27 /* Timer Counter 4 */
-#define AT91SAM9260_ID_TC5 28 /* Timer Counter 5 */
-#define AT91SAM9260_ID_IRQ0 29 /* Advanced Interrupt Controller (IRQ0) */
-#define AT91SAM9260_ID_IRQ1 30 /* Advanced Interrupt Controller (IRQ1) */
-#define AT91SAM9260_ID_IRQ2 31 /* Advanced Interrupt Controller (IRQ2) */
-
-
-/*
- * User Peripheral physical base addresses.
- */
-#define AT91SAM9260_BASE_TCB0 0xfffa0000
-#define AT91SAM9260_BASE_TC0 0xfffa0000
-#define AT91SAM9260_BASE_TC1 0xfffa0040
-#define AT91SAM9260_BASE_TC2 0xfffa0080
-#define AT91SAM9260_BASE_UDP 0xfffa4000
-#define AT91SAM9260_BASE_MCI 0xfffa8000
-#define AT91SAM9260_BASE_TWI 0xfffac000
-#define AT91SAM9260_BASE_US0 0xfffb0000
-#define AT91SAM9260_BASE_US1 0xfffb4000
-#define AT91SAM9260_BASE_US2 0xfffb8000
-#define AT91SAM9260_BASE_SSC 0xfffbc000
-#define AT91SAM9260_BASE_ISI 0xfffc0000
-#define AT91SAM9260_BASE_EMAC 0xfffc4000
-#define AT91SAM9260_BASE_SPI0 0xfffc8000
-#define AT91SAM9260_BASE_SPI1 0xfffcc000
-#define AT91SAM9260_BASE_US3 0xfffd0000
-#define AT91SAM9260_BASE_US4 0xfffd4000
-#define AT91SAM9260_BASE_US5 0xfffd8000
-#define AT91SAM9260_BASE_TCB1 0xfffdc000
-#define AT91SAM9260_BASE_TC3 0xfffdc000
-#define AT91SAM9260_BASE_TC4 0xfffdc040
-#define AT91SAM9260_BASE_TC5 0xfffdc080
-#define AT91SAM9260_BASE_ADC 0xfffe0000
-
-/*
- * System Peripherals
- */
-#define AT91SAM9260_BASE_ECC 0xffffe800
-#define AT91SAM9260_BASE_SDRAMC 0xffffea00
-#define AT91SAM9260_BASE_SMC 0xffffec00
-#define AT91SAM9260_BASE_MATRIX 0xffffee00
-#define AT91SAM9260_BASE_DBGU AT91_BASE_DBGU0
-#define AT91SAM9260_BASE_PIOA 0xfffff400
-#define AT91SAM9260_BASE_PIOB 0xfffff600
-#define AT91SAM9260_BASE_PIOC 0xfffff800
-#define AT91SAM9260_BASE_RSTC 0xfffffd00
-#define AT91SAM9260_BASE_SHDWC 0xfffffd10
-#define AT91SAM9260_BASE_RTT 0xfffffd20
-#define AT91SAM9260_BASE_PIT 0xfffffd30
-#define AT91SAM9260_BASE_WDT 0xfffffd40
-#define AT91SAM9260_BASE_GPBR 0xfffffd50
-
-
-/*
- * Internal Memory.
- */
-#define AT91SAM9260_ROM_BASE 0x00100000 /* Internal ROM base address */
-#define AT91SAM9260_ROM_SIZE SZ_32K /* Internal ROM size (32Kb) */
-
-#define AT91SAM9260_SRAM0_BASE 0x00200000 /* Internal SRAM 0 base address */
-#define AT91SAM9260_SRAM0_SIZE SZ_4K /* Internal SRAM 0 size (4Kb) */
-#define AT91SAM9260_SRAM1_BASE 0x00300000 /* Internal SRAM 1 base address */
-#define AT91SAM9260_SRAM1_SIZE SZ_4K /* Internal SRAM 1 size (4Kb) */
-#define AT91SAM9260_SRAM_BASE 0x002FF000 /* Internal SRAM base address */
-#define AT91SAM9260_SRAM_SIZE SZ_8K /* Internal SRAM size (8Kb) */
-
-#define AT91SAM9260_UHP_BASE 0x00500000 /* USB Host controller */
-
-#define AT91SAM9XE_FLASH_BASE 0x00200000 /* Internal FLASH base address */
-#define AT91SAM9XE_SRAM_BASE 0x00300000 /* Internal SRAM base address */
-
-#define AT91SAM9G20_ROM_BASE 0x00100000 /* Internal ROM base address */
-#define AT91SAM9G20_ROM_SIZE SZ_32K /* Internal ROM size (32Kb) */
-
-#define AT91SAM9G20_SRAM0_BASE 0x00200000 /* Internal SRAM 0 base address */
-#define AT91SAM9G20_SRAM0_SIZE SZ_16K /* Internal SRAM 0 size (16Kb) */
-#define AT91SAM9G20_SRAM1_BASE 0x00300000 /* Internal SRAM 1 base address */
-#define AT91SAM9G20_SRAM1_SIZE SZ_16K /* Internal SRAM 1 size (16Kb) */
-#define AT91SAM9G20_SRAM_BASE 0x002FC000 /* Internal SRAM base address */
-#define AT91SAM9G20_SRAM_SIZE SZ_32K /* Internal SRAM size (32Kb) */
-
-#define AT91SAM9G20_UHP_BASE 0x00500000 /* USB Host controller */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9260_matrix.h b/arch/arm/mach-at91/include/mach/at91sam9260_matrix.h
deleted file mode 100644
index f459df420629..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9260_matrix.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91sam9260_matrix.h
- *
- * Copyright (C) 2007 Atmel Corporation.
- *
- * Memory Controllers (MATRIX, EBI) - System peripherals registers.
- * Based on AT91SAM9260 datasheet revision B.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91SAM9260_MATRIX_H
-#define AT91SAM9260_MATRIX_H
-
-#define AT91_MATRIX_MCFG0 0x00 /* Master Configuration Register 0 */
-#define AT91_MATRIX_MCFG1 0x04 /* Master Configuration Register 1 */
-#define AT91_MATRIX_MCFG2 0x08 /* Master Configuration Register 2 */
-#define AT91_MATRIX_MCFG3 0x0C /* Master Configuration Register 3 */
-#define AT91_MATRIX_MCFG4 0x10 /* Master Configuration Register 4 */
-#define AT91_MATRIX_MCFG5 0x14 /* Master Configuration Register 5 */
-#define AT91_MATRIX_ULBT (7 << 0) /* Undefined Length Burst Type */
-#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
-#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
-#define AT91_MATRIX_ULBT_FOUR (2 << 0)
-#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
-#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
-
-#define AT91_MATRIX_SCFG0 0x40 /* Slave Configuration Register 0 */
-#define AT91_MATRIX_SCFG1 0x44 /* Slave Configuration Register 1 */
-#define AT91_MATRIX_SCFG2 0x48 /* Slave Configuration Register 2 */
-#define AT91_MATRIX_SCFG3 0x4C /* Slave Configuration Register 3 */
-#define AT91_MATRIX_SCFG4 0x50 /* Slave Configuration Register 4 */
-#define AT91_MATRIX_SLOT_CYCLE (0xff << 0) /* Maximum Number of Allowed Cycles for a Burst */
-#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */
-#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
-#define AT91_MATRIX_FIXED_DEFMSTR (7 << 18) /* Fixed Index of Default Master */
-#define AT91_MATRIX_ARBT (3 << 24) /* Arbitration Type */
-#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24)
-#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24)
-
-#define AT91_MATRIX_PRAS0 0x80 /* Priority Register A for Slave 0 */
-#define AT91_MATRIX_PRAS1 0x88 /* Priority Register A for Slave 1 */
-#define AT91_MATRIX_PRAS2 0x90 /* Priority Register A for Slave 2 */
-#define AT91_MATRIX_PRAS3 0x98 /* Priority Register A for Slave 3 */
-#define AT91_MATRIX_PRAS4 0xA0 /* Priority Register A for Slave 4 */
-#define AT91_MATRIX_M0PR (3 << 0) /* Master 0 Priority */
-#define AT91_MATRIX_M1PR (3 << 4) /* Master 1 Priority */
-#define AT91_MATRIX_M2PR (3 << 8) /* Master 2 Priority */
-#define AT91_MATRIX_M3PR (3 << 12) /* Master 3 Priority */
-#define AT91_MATRIX_M4PR (3 << 16) /* Master 4 Priority */
-#define AT91_MATRIX_M5PR (3 << 20) /* Master 5 Priority */
-
-#define AT91_MATRIX_MRCR 0x100 /* Master Remap Control Register */
-#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */
-#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */
-
-#define AT91_MATRIX_EBICSA 0x11C /* EBI Chip Select Assignment Register */
-#define AT91_MATRIX_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_CS3A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_CS3A_SMC (0 << 3)
-#define AT91_MATRIX_CS3A_SMC_SMARTMEDIA (1 << 3)
-#define AT91_MATRIX_CS4A (1 << 4) /* Chip Select 4 Assignment */
-#define AT91_MATRIX_CS4A_SMC (0 << 4)
-#define AT91_MATRIX_CS4A_SMC_CF1 (1 << 4)
-#define AT91_MATRIX_CS5A (1 << 5) /* Chip Select 5 Assignment */
-#define AT91_MATRIX_CS5A_SMC (0 << 5)
-#define AT91_MATRIX_CS5A_SMC_CF2 (1 << 5)
-#define AT91_MATRIX_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-#define AT91_MATRIX_VDDIOMSEL (1 << 16) /* Memory voltage selection */
-#define AT91_MATRIX_VDDIOMSEL_1_8V (0 << 16)
-#define AT91_MATRIX_VDDIOMSEL_3_3V (1 << 16)
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9261.h b/arch/arm/mach-at91/include/mach/at91sam9261.h
deleted file mode 100644
index a041406d06ee..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9261.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91sam9261.h
- *
- * Copyright (C) SAN People
- *
- * Common definitions.
- * Based on AT91SAM9261 datasheet revision E. (Preliminary)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91SAM9261_H
-#define AT91SAM9261_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91SAM9261_ID_PIOA 2 /* Parallel IO Controller A */
-#define AT91SAM9261_ID_PIOB 3 /* Parallel IO Controller B */
-#define AT91SAM9261_ID_PIOC 4 /* Parallel IO Controller C */
-#define AT91SAM9261_ID_US0 6 /* USART 0 */
-#define AT91SAM9261_ID_US1 7 /* USART 1 */
-#define AT91SAM9261_ID_US2 8 /* USART 2 */
-#define AT91SAM9261_ID_MCI 9 /* Multimedia Card Interface */
-#define AT91SAM9261_ID_UDP 10 /* USB Device Port */
-#define AT91SAM9261_ID_TWI 11 /* Two-Wire Interface */
-#define AT91SAM9261_ID_SPI0 12 /* Serial Peripheral Interface 0 */
-#define AT91SAM9261_ID_SPI1 13 /* Serial Peripheral Interface 1 */
-#define AT91SAM9261_ID_SSC0 14 /* Serial Synchronous Controller 0 */
-#define AT91SAM9261_ID_SSC1 15 /* Serial Synchronous Controller 1 */
-#define AT91SAM9261_ID_SSC2 16 /* Serial Synchronous Controller 2 */
-#define AT91SAM9261_ID_TC0 17 /* Timer Counter 0 */
-#define AT91SAM9261_ID_TC1 18 /* Timer Counter 1 */
-#define AT91SAM9261_ID_TC2 19 /* Timer Counter 2 */
-#define AT91SAM9261_ID_UHP 20 /* USB Host port */
-#define AT91SAM9261_ID_LCDC 21 /* LDC Controller */
-#define AT91SAM9261_ID_IRQ0 29 /* Advanced Interrupt Controller (IRQ0) */
-#define AT91SAM9261_ID_IRQ1 30 /* Advanced Interrupt Controller (IRQ1) */
-#define AT91SAM9261_ID_IRQ2 31 /* Advanced Interrupt Controller (IRQ2) */
-
-
-/*
- * User Peripheral physical base addresses.
- */
-#define AT91SAM9261_BASE_TCB0 0xfffa0000
-#define AT91SAM9261_BASE_TC0 0xfffa0000
-#define AT91SAM9261_BASE_TC1 0xfffa0040
-#define AT91SAM9261_BASE_TC2 0xfffa0080
-#define AT91SAM9261_BASE_UDP 0xfffa4000
-#define AT91SAM9261_BASE_MCI 0xfffa8000
-#define AT91SAM9261_BASE_TWI 0xfffac000
-#define AT91SAM9261_BASE_US0 0xfffb0000
-#define AT91SAM9261_BASE_US1 0xfffb4000
-#define AT91SAM9261_BASE_US2 0xfffb8000
-#define AT91SAM9261_BASE_SSC0 0xfffbc000
-#define AT91SAM9261_BASE_SSC1 0xfffc0000
-#define AT91SAM9261_BASE_SSC2 0xfffc4000
-#define AT91SAM9261_BASE_SPI0 0xfffc8000
-#define AT91SAM9261_BASE_SPI1 0xfffcc000
-
-
-/*
- * System Peripherals
- */
-#define AT91SAM9261_BASE_SMC 0xffffec00
-#define AT91SAM9261_BASE_MATRIX 0xffffee00
-#define AT91SAM9261_BASE_SDRAMC 0xffffea00
-#define AT91SAM9261_BASE_DBGU AT91_BASE_DBGU0
-#define AT91SAM9261_BASE_PIOA 0xfffff400
-#define AT91SAM9261_BASE_PIOB 0xfffff600
-#define AT91SAM9261_BASE_PIOC 0xfffff800
-#define AT91SAM9261_BASE_RSTC 0xfffffd00
-#define AT91SAM9261_BASE_SHDWC 0xfffffd10
-#define AT91SAM9261_BASE_RTT 0xfffffd20
-#define AT91SAM9261_BASE_PIT 0xfffffd30
-#define AT91SAM9261_BASE_WDT 0xfffffd40
-#define AT91SAM9261_BASE_GPBR 0xfffffd50
-
-
-/*
- * Internal Memory.
- */
-#define AT91SAM9261_SRAM_BASE 0x00300000 /* Internal SRAM base address */
-#define AT91SAM9261_SRAM_SIZE 0x00028000 /* Internal SRAM size (160Kb) */
-
-#define AT91SAM9G10_SRAM_BASE AT91SAM9261_SRAM_BASE /* Internal SRAM base address */
-#define AT91SAM9G10_SRAM_SIZE 0x00004000 /* Internal SRAM size (16Kb) */
-
-#define AT91SAM9261_ROM_BASE 0x00400000 /* Internal ROM base address */
-#define AT91SAM9261_ROM_SIZE SZ_32K /* Internal ROM size (32Kb) */
-
-#define AT91SAM9261_UHP_BASE 0x00500000 /* USB Host controller */
-#define AT91SAM9261_LCDC_BASE 0x00600000 /* LDC controller */
-
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9261_matrix.h b/arch/arm/mach-at91/include/mach/at91sam9261_matrix.h
deleted file mode 100644
index a50cdf8b8ca4..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9261_matrix.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91sam9261_matrix.h
- *
- * Copyright (C) 2007 Atmel Corporation.
- *
- * Memory Controllers (MATRIX, EBI) - System peripherals registers.
- * Based on AT91SAM9261 datasheet revision D.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91SAM9261_MATRIX_H
-#define AT91SAM9261_MATRIX_H
-
-#define AT91_MATRIX_MCFG 0x00 /* Master Configuration Register */
-#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */
-#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */
-
-#define AT91_MATRIX_SCFG0 0x04 /* Slave Configuration Register 0 */
-#define AT91_MATRIX_SCFG1 0x08 /* Slave Configuration Register 1 */
-#define AT91_MATRIX_SCFG2 0x0C /* Slave Configuration Register 2 */
-#define AT91_MATRIX_SCFG3 0x10 /* Slave Configuration Register 3 */
-#define AT91_MATRIX_SCFG4 0x14 /* Slave Configuration Register 4 */
-#define AT91_MATRIX_SLOT_CYCLE (0xff << 0) /* Maximum Number of Allowed Cycles for a Burst */
-#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */
-#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
-#define AT91_MATRIX_FIXED_DEFMSTR (7 << 18) /* Fixed Index of Default Master */
-
-#define AT91_MATRIX_TCR 0x24 /* TCM Configuration Register */
-#define AT91_MATRIX_ITCM_SIZE (0xf << 0) /* Size of ITCM enabled memory block */
-#define AT91_MATRIX_ITCM_0 (0 << 0)
-#define AT91_MATRIX_ITCM_16 (5 << 0)
-#define AT91_MATRIX_ITCM_32 (6 << 0)
-#define AT91_MATRIX_ITCM_64 (7 << 0)
-#define AT91_MATRIX_DTCM_SIZE (0xf << 4) /* Size of DTCM enabled memory block */
-#define AT91_MATRIX_DTCM_0 (0 << 4)
-#define AT91_MATRIX_DTCM_16 (5 << 4)
-#define AT91_MATRIX_DTCM_32 (6 << 4)
-#define AT91_MATRIX_DTCM_64 (7 << 4)
-
-#define AT91_MATRIX_EBICSA 0x30 /* EBI Chip Select Assignment Register */
-#define AT91_MATRIX_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_CS3A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_CS3A_SMC (0 << 3)
-#define AT91_MATRIX_CS3A_SMC_SMARTMEDIA (1 << 3)
-#define AT91_MATRIX_CS4A (1 << 4) /* Chip Select 4 Assignment */
-#define AT91_MATRIX_CS4A_SMC (0 << 4)
-#define AT91_MATRIX_CS4A_SMC_CF1 (1 << 4)
-#define AT91_MATRIX_CS5A (1 << 5) /* Chip Select 5 Assignment */
-#define AT91_MATRIX_CS5A_SMC (0 << 5)
-#define AT91_MATRIX_CS5A_SMC_CF2 (1 << 5)
-#define AT91_MATRIX_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-
-#define AT91_MATRIX_USBPUCR 0x34 /* USB Pad Pull-Up Control Register */
-#define AT91_MATRIX_USBPUCR_PUON (1 << 30) /* USB Device PAD Pull-up Enable */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9263.h b/arch/arm/mach-at91/include/mach/at91sam9263.h
deleted file mode 100644
index d201029d60b3..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9263.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91sam9263.h
- *
- * (C) 2007 Atmel Corporation.
- *
- * Common definitions.
- * Based on AT91SAM9263 datasheet revision B (Preliminary).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91SAM9263_H
-#define AT91SAM9263_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91SAM9263_ID_PIOA 2 /* Parallel IO Controller A */
-#define AT91SAM9263_ID_PIOB 3 /* Parallel IO Controller B */
-#define AT91SAM9263_ID_PIOCDE 4 /* Parallel IO Controller C, D and E */
-#define AT91SAM9263_ID_US0 7 /* USART 0 */
-#define AT91SAM9263_ID_US1 8 /* USART 1 */
-#define AT91SAM9263_ID_US2 9 /* USART 2 */
-#define AT91SAM9263_ID_MCI0 10 /* Multimedia Card Interface 0 */
-#define AT91SAM9263_ID_MCI1 11 /* Multimedia Card Interface 1 */
-#define AT91SAM9263_ID_CAN 12 /* CAN */
-#define AT91SAM9263_ID_TWI 13 /* Two-Wire Interface */
-#define AT91SAM9263_ID_SPI0 14 /* Serial Peripheral Interface 0 */
-#define AT91SAM9263_ID_SPI1 15 /* Serial Peripheral Interface 1 */
-#define AT91SAM9263_ID_SSC0 16 /* Serial Synchronous Controller 0 */
-#define AT91SAM9263_ID_SSC1 17 /* Serial Synchronous Controller 1 */
-#define AT91SAM9263_ID_AC97C 18 /* AC97 Controller */
-#define AT91SAM9263_ID_TCB 19 /* Timer Counter 0, 1 and 2 */
-#define AT91SAM9263_ID_PWMC 20 /* Pulse Width Modulation Controller */
-#define AT91SAM9263_ID_EMAC 21 /* Ethernet */
-#define AT91SAM9263_ID_2DGE 23 /* 2D Graphic Engine */
-#define AT91SAM9263_ID_UDP 24 /* USB Device Port */
-#define AT91SAM9263_ID_ISI 25 /* Image Sensor Interface */
-#define AT91SAM9263_ID_LCDC 26 /* LCD Controller */
-#define AT91SAM9263_ID_DMA 27 /* DMA Controller */
-#define AT91SAM9263_ID_UHP 29 /* USB Host port */
-#define AT91SAM9263_ID_IRQ0 30 /* Advanced Interrupt Controller (IRQ0) */
-#define AT91SAM9263_ID_IRQ1 31 /* Advanced Interrupt Controller (IRQ1) */
-
-
-/*
- * User Peripheral physical base addresses.
- */
-#define AT91SAM9263_BASE_UDP 0xfff78000
-#define AT91SAM9263_BASE_TCB0 0xfff7c000
-#define AT91SAM9263_BASE_TC0 0xfff7c000
-#define AT91SAM9263_BASE_TC1 0xfff7c040
-#define AT91SAM9263_BASE_TC2 0xfff7c080
-#define AT91SAM9263_BASE_MCI0 0xfff80000
-#define AT91SAM9263_BASE_MCI1 0xfff84000
-#define AT91SAM9263_BASE_TWI 0xfff88000
-#define AT91SAM9263_BASE_US0 0xfff8c000
-#define AT91SAM9263_BASE_US1 0xfff90000
-#define AT91SAM9263_BASE_US2 0xfff94000
-#define AT91SAM9263_BASE_SSC0 0xfff98000
-#define AT91SAM9263_BASE_SSC1 0xfff9c000
-#define AT91SAM9263_BASE_AC97C 0xfffa0000
-#define AT91SAM9263_BASE_SPI0 0xfffa4000
-#define AT91SAM9263_BASE_SPI1 0xfffa8000
-#define AT91SAM9263_BASE_CAN 0xfffac000
-#define AT91SAM9263_BASE_PWMC 0xfffb8000
-#define AT91SAM9263_BASE_EMAC 0xfffbc000
-#define AT91SAM9263_BASE_ISI 0xfffc4000
-#define AT91SAM9263_BASE_2DGE 0xfffc8000
-
-/*
- * System Peripherals
- */
-#define AT91SAM9263_BASE_ECC0 0xffffe000
-#define AT91SAM9263_BASE_SDRAMC0 0xffffe200
-#define AT91SAM9263_BASE_SMC0 0xffffe400
-#define AT91SAM9263_BASE_ECC1 0xffffe600
-#define AT91SAM9263_BASE_SDRAMC1 0xffffe800
-#define AT91SAM9263_BASE_SMC1 0xffffea00
-#define AT91SAM9263_BASE_MATRIX 0xffffec00
-#define AT91SAM9263_BASE_DBGU AT91_BASE_DBGU1
-#define AT91SAM9263_BASE_PIOA 0xfffff200
-#define AT91SAM9263_BASE_PIOB 0xfffff400
-#define AT91SAM9263_BASE_PIOC 0xfffff600
-#define AT91SAM9263_BASE_PIOD 0xfffff800
-#define AT91SAM9263_BASE_PIOE 0xfffffa00
-#define AT91SAM9263_BASE_RSTC 0xfffffd00
-#define AT91SAM9263_BASE_SHDWC 0xfffffd10
-#define AT91SAM9263_BASE_RTT0 0xfffffd20
-#define AT91SAM9263_BASE_PIT 0xfffffd30
-#define AT91SAM9263_BASE_WDT 0xfffffd40
-#define AT91SAM9263_BASE_RTT1 0xfffffd50
-#define AT91SAM9263_BASE_GPBR 0xfffffd60
-
-#define AT91_SMC AT91_SMC0
-
-/*
- * Internal Memory.
- */
-#define AT91SAM9263_SRAM0_BASE 0x00300000 /* Internal SRAM 0 base address */
-#define AT91SAM9263_SRAM0_SIZE (80 * SZ_1K) /* Internal SRAM 0 size (80Kb) */
-
-#define AT91SAM9263_ROM_BASE 0x00400000 /* Internal ROM base address */
-#define AT91SAM9263_ROM_SIZE SZ_128K /* Internal ROM size (128Kb) */
-
-#define AT91SAM9263_SRAM1_BASE 0x00500000 /* Internal SRAM 1 base address */
-#define AT91SAM9263_SRAM1_SIZE SZ_16K /* Internal SRAM 1 size (16Kb) */
-
-#define AT91SAM9263_LCDC_BASE 0x00700000 /* LCD Controller */
-#define AT91SAM9263_DMAC_BASE 0x00800000 /* DMA Controller */
-#define AT91SAM9263_UHP_BASE 0x00a00000 /* USB Host controller */
-
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9263_matrix.h b/arch/arm/mach-at91/include/mach/at91sam9263_matrix.h
deleted file mode 100644
index ebb5fdb565e0..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9263_matrix.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91sam9263_matrix.h
- *
- * Copyright (C) 2006 Atmel Corporation.
- *
- * Memory Controllers (MATRIX, EBI) - System peripherals registers.
- * Based on AT91SAM9263 datasheet revision B (Preliminary).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91SAM9263_MATRIX_H
-#define AT91SAM9263_MATRIX_H
-
-#define AT91_MATRIX_MCFG0 0x00 /* Master Configuration Register 0 */
-#define AT91_MATRIX_MCFG1 0x04 /* Master Configuration Register 1 */
-#define AT91_MATRIX_MCFG2 0x08 /* Master Configuration Register 2 */
-#define AT91_MATRIX_MCFG3 0x0C /* Master Configuration Register 3 */
-#define AT91_MATRIX_MCFG4 0x10 /* Master Configuration Register 4 */
-#define AT91_MATRIX_MCFG5 0x14 /* Master Configuration Register 5 */
-#define AT91_MATRIX_MCFG6 0x18 /* Master Configuration Register 6 */
-#define AT91_MATRIX_MCFG7 0x1C /* Master Configuration Register 7 */
-#define AT91_MATRIX_MCFG8 0x20 /* Master Configuration Register 8 */
-#define AT91_MATRIX_ULBT (7 << 0) /* Undefined Length Burst Type */
-#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
-#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
-#define AT91_MATRIX_ULBT_FOUR (2 << 0)
-#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
-#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
-
-#define AT91_MATRIX_SCFG0 0x40 /* Slave Configuration Register 0 */
-#define AT91_MATRIX_SCFG1 0x44 /* Slave Configuration Register 1 */
-#define AT91_MATRIX_SCFG2 0x48 /* Slave Configuration Register 2 */
-#define AT91_MATRIX_SCFG3 0x4C /* Slave Configuration Register 3 */
-#define AT91_MATRIX_SCFG4 0x50 /* Slave Configuration Register 4 */
-#define AT91_MATRIX_SCFG5 0x54 /* Slave Configuration Register 5 */
-#define AT91_MATRIX_SCFG6 0x58 /* Slave Configuration Register 6 */
-#define AT91_MATRIX_SCFG7 0x5C /* Slave Configuration Register 7 */
-#define AT91_MATRIX_SLOT_CYCLE (0xff << 0) /* Maximum Number of Allowed Cycles for a Burst */
-#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */
-#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
-#define AT91_MATRIX_FIXED_DEFMSTR (0xf << 18) /* Fixed Index of Default Master */
-#define AT91_MATRIX_ARBT (3 << 24) /* Arbitration Type */
-#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24)
-#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24)
-
-#define AT91_MATRIX_PRAS0 0x80 /* Priority Register A for Slave 0 */
-#define AT91_MATRIX_PRBS0 0x84 /* Priority Register B for Slave 0 */
-#define AT91_MATRIX_PRAS1 0x88 /* Priority Register A for Slave 1 */
-#define AT91_MATRIX_PRBS1 0x8C /* Priority Register B for Slave 1 */
-#define AT91_MATRIX_PRAS2 0x90 /* Priority Register A for Slave 2 */
-#define AT91_MATRIX_PRBS2 0x94 /* Priority Register B for Slave 2 */
-#define AT91_MATRIX_PRAS3 0x98 /* Priority Register A for Slave 3 */
-#define AT91_MATRIX_PRBS3 0x9C /* Priority Register B for Slave 3 */
-#define AT91_MATRIX_PRAS4 0xA0 /* Priority Register A for Slave 4 */
-#define AT91_MATRIX_PRBS4 0xA4 /* Priority Register B for Slave 4 */
-#define AT91_MATRIX_PRAS5 0xA8 /* Priority Register A for Slave 5 */
-#define AT91_MATRIX_PRBS5 0xAC /* Priority Register B for Slave 5 */
-#define AT91_MATRIX_PRAS6 0xB0 /* Priority Register A for Slave 6 */
-#define AT91_MATRIX_PRBS6 0xB4 /* Priority Register B for Slave 6 */
-#define AT91_MATRIX_PRAS7 0xB8 /* Priority Register A for Slave 7 */
-#define AT91_MATRIX_PRBS7 0xBC /* Priority Register B for Slave 7 */
-#define AT91_MATRIX_M0PR (3 << 0) /* Master 0 Priority */
-#define AT91_MATRIX_M1PR (3 << 4) /* Master 1 Priority */
-#define AT91_MATRIX_M2PR (3 << 8) /* Master 2 Priority */
-#define AT91_MATRIX_M3PR (3 << 12) /* Master 3 Priority */
-#define AT91_MATRIX_M4PR (3 << 16) /* Master 4 Priority */
-#define AT91_MATRIX_M5PR (3 << 20) /* Master 5 Priority */
-#define AT91_MATRIX_M6PR (3 << 24) /* Master 6 Priority */
-#define AT91_MATRIX_M7PR (3 << 28) /* Master 7 Priority */
-#define AT91_MATRIX_M8PR (3 << 0) /* Master 8 Priority (in Register B) */
-
-#define AT91_MATRIX_MRCR 0x100 /* Master Remap Control Register */
-#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */
-#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */
-#define AT91_MATRIX_RCB2 (1 << 2)
-#define AT91_MATRIX_RCB3 (1 << 3)
-#define AT91_MATRIX_RCB4 (1 << 4)
-#define AT91_MATRIX_RCB5 (1 << 5)
-#define AT91_MATRIX_RCB6 (1 << 6)
-#define AT91_MATRIX_RCB7 (1 << 7)
-#define AT91_MATRIX_RCB8 (1 << 8)
-
-#define AT91_MATRIX_TCMR 0x114 /* TCM Configuration Register */
-#define AT91_MATRIX_ITCM_SIZE (0xf << 0) /* Size of ITCM enabled memory block */
-#define AT91_MATRIX_ITCM_0 (0 << 0)
-#define AT91_MATRIX_ITCM_16 (5 << 0)
-#define AT91_MATRIX_ITCM_32 (6 << 0)
-#define AT91_MATRIX_DTCM_SIZE (0xf << 4) /* Size of DTCM enabled memory block */
-#define AT91_MATRIX_DTCM_0 (0 << 4)
-#define AT91_MATRIX_DTCM_16 (5 << 4)
-#define AT91_MATRIX_DTCM_32 (6 << 4)
-
-#define AT91_MATRIX_EBI0CSA 0x120 /* EBI0 Chip Select Assignment Register */
-#define AT91_MATRIX_EBI0_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_EBI0_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_EBI0_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_EBI0_CS3A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_EBI0_CS3A_SMC (0 << 3)
-#define AT91_MATRIX_EBI0_CS3A_SMC_SMARTMEDIA (1 << 3)
-#define AT91_MATRIX_EBI0_CS4A (1 << 4) /* Chip Select 4 Assignment */
-#define AT91_MATRIX_EBI0_CS4A_SMC (0 << 4)
-#define AT91_MATRIX_EBI0_CS4A_SMC_CF1 (1 << 4)
-#define AT91_MATRIX_EBI0_CS5A (1 << 5) /* Chip Select 5 Assignment */
-#define AT91_MATRIX_EBI0_CS5A_SMC (0 << 5)
-#define AT91_MATRIX_EBI0_CS5A_SMC_CF2 (1 << 5)
-#define AT91_MATRIX_EBI0_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-#define AT91_MATRIX_EBI0_VDDIOMSEL (1 << 16) /* Memory voltage selection */
-#define AT91_MATRIX_EBI0_VDDIOMSEL_1_8V (0 << 16)
-#define AT91_MATRIX_EBI0_VDDIOMSEL_3_3V (1 << 16)
-
-#define AT91_MATRIX_EBI1CSA 0x124 /* EBI1 Chip Select Assignment Register */
-#define AT91_MATRIX_EBI1_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_EBI1_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_EBI1_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_EBI1_CS2A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_EBI1_CS2A_SMC (0 << 3)
-#define AT91_MATRIX_EBI1_CS2A_SMC_SMARTMEDIA (1 << 3)
-#define AT91_MATRIX_EBI1_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-#define AT91_MATRIX_EBI1_VDDIOMSEL (1 << 16) /* Memory voltage selection */
-#define AT91_MATRIX_EBI1_VDDIOMSEL_1_8V (0 << 16)
-#define AT91_MATRIX_EBI1_VDDIOMSEL_3_3V (1 << 16)
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9_smc.h b/arch/arm/mach-at91/include/mach/at91sam9_smc.h
index 175e1fdd9fe8..ff54a0ce90e3 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9_smc.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9_smc.h
@@ -16,8 +16,6 @@
#ifndef AT91SAM9_SMC_H
#define AT91SAM9_SMC_H
-#include <mach/cpu.h>
-
#ifndef __ASSEMBLY__
struct sam9_smc_config {
/* Setup register */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45.h b/arch/arm/mach-at91/include/mach/at91sam9g45.h
deleted file mode 100644
index 8eba1021f533..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9g45.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Chip-specific header file for the AT91SAM9G45 family
- *
- * Copyright (C) 2008-2009 Atmel Corporation.
- *
- * Common definitions.
- * Based on AT91SAM9G45 preliminary datasheet.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91SAM9G45_H
-#define AT91SAM9G45_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91SAM9G45_ID_PIOA 2 /* Parallel I/O Controller A */
-#define AT91SAM9G45_ID_PIOB 3 /* Parallel I/O Controller B */
-#define AT91SAM9G45_ID_PIOC 4 /* Parallel I/O Controller C */
-#define AT91SAM9G45_ID_PIODE 5 /* Parallel I/O Controller D and E */
-#define AT91SAM9G45_ID_TRNG 6 /* True Random Number Generator */
-#define AT91SAM9G45_ID_US0 7 /* USART 0 */
-#define AT91SAM9G45_ID_US1 8 /* USART 1 */
-#define AT91SAM9G45_ID_US2 9 /* USART 2 */
-#define AT91SAM9G45_ID_US3 10 /* USART 3 */
-#define AT91SAM9G45_ID_MCI0 11 /* High Speed Multimedia Card Interface 0 */
-#define AT91SAM9G45_ID_TWI0 12 /* Two-Wire Interface 0 */
-#define AT91SAM9G45_ID_TWI1 13 /* Two-Wire Interface 1 */
-#define AT91SAM9G45_ID_SPI0 14 /* Serial Peripheral Interface 0 */
-#define AT91SAM9G45_ID_SPI1 15 /* Serial Peripheral Interface 1 */
-#define AT91SAM9G45_ID_SSC0 16 /* Synchronous Serial Controller 0 */
-#define AT91SAM9G45_ID_SSC1 17 /* Synchronous Serial Controller 1 */
-#define AT91SAM9G45_ID_TCB 18 /* Timer Counter 0, 1, 2, 3, 4 and 5 */
-#define AT91SAM9G45_ID_PWMC 19 /* Pulse Width Modulation Controller */
-#define AT91SAM9G45_ID_TSC 20 /* Touch Screen ADC Controller */
-#define AT91SAM9G45_ID_DMA 21 /* DMA Controller */
-#define AT91SAM9G45_ID_UHPHS 22 /* USB Host High Speed */
-#define AT91SAM9G45_ID_LCDC 23 /* LCD Controller */
-#define AT91SAM9G45_ID_AC97C 24 /* AC97 Controller */
-#define AT91SAM9G45_ID_EMAC 25 /* Ethernet MAC */
-#define AT91SAM9G45_ID_ISI 26 /* Image Sensor Interface */
-#define AT91SAM9G45_ID_UDPHS 27 /* USB Device High Speed */
-#define AT91SAM9G45_ID_AESTDESSHA 28 /* AES + T-DES + SHA */
-#define AT91SAM9G45_ID_MCI1 29 /* High Speed Multimedia Card Interface 1 */
-#define AT91SAM9G45_ID_VDEC 30 /* Video Decoder */
-#define AT91SAM9G45_ID_IRQ0 31 /* Advanced Interrupt Controller */
-
-/*
- * User Peripheral physical base addresses.
- */
-#define AT91SAM9G45_BASE_UDPHS 0xfff78000
-#define AT91SAM9G45_BASE_TCB0 0xfff7c000
-#define AT91SAM9G45_BASE_TC0 0xfff7c000
-#define AT91SAM9G45_BASE_TC1 0xfff7c040
-#define AT91SAM9G45_BASE_TC2 0xfff7c080
-#define AT91SAM9G45_BASE_MCI0 0xfff80000
-#define AT91SAM9G45_BASE_TWI0 0xfff84000
-#define AT91SAM9G45_BASE_TWI1 0xfff88000
-#define AT91SAM9G45_BASE_US0 0xfff8c000
-#define AT91SAM9G45_BASE_US1 0xfff90000
-#define AT91SAM9G45_BASE_US2 0xfff94000
-#define AT91SAM9G45_BASE_US3 0xfff98000
-#define AT91SAM9G45_BASE_SSC0 0xfff9c000
-#define AT91SAM9G45_BASE_SSC1 0xfffa0000
-#define AT91SAM9G45_BASE_SPI0 0xfffa4000
-#define AT91SAM9G45_BASE_SPI1 0xfffa8000
-#define AT91SAM9G45_BASE_AC97C 0xfffac000
-#define AT91SAM9G45_BASE_TSC 0xfffb0000
-#define AT91SAM9G45_BASE_ISI 0xfffb4000
-#define AT91SAM9G45_BASE_PWMC 0xfffb8000
-#define AT91SAM9G45_BASE_EMAC 0xfffbc000
-#define AT91SAM9G45_BASE_AES 0xfffc0000
-#define AT91SAM9G45_BASE_TDES 0xfffc4000
-#define AT91SAM9G45_BASE_SHA 0xfffc8000
-#define AT91SAM9G45_BASE_TRNG 0xfffcc000
-#define AT91SAM9G45_BASE_MCI1 0xfffd0000
-#define AT91SAM9G45_BASE_TCB1 0xfffd4000
-#define AT91SAM9G45_BASE_TC3 0xfffd4000
-#define AT91SAM9G45_BASE_TC4 0xfffd4040
-#define AT91SAM9G45_BASE_TC5 0xfffd4080
-
-/*
- * System Peripherals
- */
-#define AT91SAM9G45_BASE_ECC 0xffffe200
-#define AT91SAM9G45_BASE_DDRSDRC1 0xffffe400
-#define AT91SAM9G45_BASE_DDRSDRC0 0xffffe600
-#define AT91SAM9G45_BASE_DMA 0xffffec00
-#define AT91SAM9G45_BASE_SMC 0xffffe800
-#define AT91SAM9G45_BASE_MATRIX 0xffffea00
-#define AT91SAM9G45_BASE_DBGU AT91_BASE_DBGU1
-#define AT91SAM9G45_BASE_PIOA 0xfffff200
-#define AT91SAM9G45_BASE_PIOB 0xfffff400
-#define AT91SAM9G45_BASE_PIOC 0xfffff600
-#define AT91SAM9G45_BASE_PIOD 0xfffff800
-#define AT91SAM9G45_BASE_PIOE 0xfffffa00
-#define AT91SAM9G45_BASE_RSTC 0xfffffd00
-#define AT91SAM9G45_BASE_SHDWC 0xfffffd10
-#define AT91SAM9G45_BASE_RTT 0xfffffd20
-#define AT91SAM9G45_BASE_PIT 0xfffffd30
-#define AT91SAM9G45_BASE_WDT 0xfffffd40
-#define AT91SAM9G45_BASE_RTC 0xfffffdb0
-#define AT91SAM9G45_BASE_GPBR 0xfffffd60
-
-/*
- * Internal Memory.
- */
-#define AT91SAM9G45_SRAM_BASE 0x00300000 /* Internal SRAM base address */
-#define AT91SAM9G45_SRAM_SIZE SZ_64K /* Internal SRAM size (64Kb) */
-
-#define AT91SAM9G45_ROM_BASE 0x00400000 /* Internal ROM base address */
-#define AT91SAM9G45_ROM_SIZE SZ_64K /* Internal ROM size (64Kb) */
-
-#define AT91SAM9G45_LCDC_BASE 0x00500000 /* LCD Controller */
-#define AT91SAM9G45_UDPHS_FIFO 0x00600000 /* USB Device HS controller */
-#define AT91SAM9G45_OHCI_BASE 0x00700000 /* USB Host controller (OHCI) */
-#define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */
-#define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */
-
-/*
- * DMA peripheral identifiers
- * for hardware handshaking interface
- */
-#define AT_DMA_ID_MCI0 0
-#define AT_DMA_ID_SPI0_TX 1
-#define AT_DMA_ID_SPI0_RX 2
-#define AT_DMA_ID_SPI1_TX 3
-#define AT_DMA_ID_SPI1_RX 4
-#define AT_DMA_ID_SSC0_TX 5
-#define AT_DMA_ID_SSC0_RX 6
-#define AT_DMA_ID_SSC1_TX 7
-#define AT_DMA_ID_SSC1_RX 8
-#define AT_DMA_ID_AC97_TX 9
-#define AT_DMA_ID_AC97_RX 10
-#define AT_DMA_ID_AES_TX 11
-#define AT_DMA_ID_AES_RX 12
-#define AT_DMA_ID_MCI1 13
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45_matrix.h b/arch/arm/mach-at91/include/mach/at91sam9g45_matrix.h
deleted file mode 100644
index b76e2ed2fbc2..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9g45_matrix.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Matrix-centric header file for the AT91SAM9G45 family
- *
- * Copyright (C) 2008-2009 Atmel Corporation.
- *
- * Memory Controllers (MATRIX, EBI) - System peripherals registers.
- * Based on AT91SAM9G45 preliminary datasheet.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91SAM9G45_MATRIX_H
-#define AT91SAM9G45_MATRIX_H
-
-#define AT91_MATRIX_MCFG0 0x00 /* Master Configuration Register 0 */
-#define AT91_MATRIX_MCFG1 0x04 /* Master Configuration Register 1 */
-#define AT91_MATRIX_MCFG2 0x08 /* Master Configuration Register 2 */
-#define AT91_MATRIX_MCFG3 0x0C /* Master Configuration Register 3 */
-#define AT91_MATRIX_MCFG4 0x10 /* Master Configuration Register 4 */
-#define AT91_MATRIX_MCFG5 0x14 /* Master Configuration Register 5 */
-#define AT91_MATRIX_MCFG6 0x18 /* Master Configuration Register 6 */
-#define AT91_MATRIX_MCFG7 0x1C /* Master Configuration Register 7 */
-#define AT91_MATRIX_MCFG8 0x20 /* Master Configuration Register 8 */
-#define AT91_MATRIX_MCFG9 0x24 /* Master Configuration Register 9 */
-#define AT91_MATRIX_MCFG10 0x28 /* Master Configuration Register 10 */
-#define AT91_MATRIX_MCFG11 0x2C /* Master Configuration Register 11 */
-#define AT91_MATRIX_ULBT (7 << 0) /* Undefined Length Burst Type */
-#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
-#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
-#define AT91_MATRIX_ULBT_FOUR (2 << 0)
-#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
-#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
-#define AT91_MATRIX_ULBT_THIRTYTWO (5 << 0)
-#define AT91_MATRIX_ULBT_SIXTYFOUR (6 << 0)
-#define AT91_MATRIX_ULBT_128 (7 << 0)
-
-#define AT91_MATRIX_SCFG0 0x40 /* Slave Configuration Register 0 */
-#define AT91_MATRIX_SCFG1 0x44 /* Slave Configuration Register 1 */
-#define AT91_MATRIX_SCFG2 0x48 /* Slave Configuration Register 2 */
-#define AT91_MATRIX_SCFG3 0x4C /* Slave Configuration Register 3 */
-#define AT91_MATRIX_SCFG4 0x50 /* Slave Configuration Register 4 */
-#define AT91_MATRIX_SCFG5 0x54 /* Slave Configuration Register 5 */
-#define AT91_MATRIX_SCFG6 0x58 /* Slave Configuration Register 6 */
-#define AT91_MATRIX_SCFG7 0x5C /* Slave Configuration Register 7 */
-#define AT91_MATRIX_SLOT_CYCLE (0x1ff << 0) /* Maximum Number of Allowed Cycles for a Burst */
-#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */
-#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
-#define AT91_MATRIX_FIXED_DEFMSTR (0xf << 18) /* Fixed Index of Default Master */
-
-#define AT91_MATRIX_PRAS0 0x80 /* Priority Register A for Slave 0 */
-#define AT91_MATRIX_PRBS0 0x84 /* Priority Register B for Slave 0 */
-#define AT91_MATRIX_PRAS1 0x88 /* Priority Register A for Slave 1 */
-#define AT91_MATRIX_PRBS1 0x8C /* Priority Register B for Slave 1 */
-#define AT91_MATRIX_PRAS2 0x90 /* Priority Register A for Slave 2 */
-#define AT91_MATRIX_PRBS2 0x94 /* Priority Register B for Slave 2 */
-#define AT91_MATRIX_PRAS3 0x98 /* Priority Register A for Slave 3 */
-#define AT91_MATRIX_PRBS3 0x9C /* Priority Register B for Slave 3 */
-#define AT91_MATRIX_PRAS4 0xA0 /* Priority Register A for Slave 4 */
-#define AT91_MATRIX_PRBS4 0xA4 /* Priority Register B for Slave 4 */
-#define AT91_MATRIX_PRAS5 0xA8 /* Priority Register A for Slave 5 */
-#define AT91_MATRIX_PRBS5 0xAC /* Priority Register B for Slave 5 */
-#define AT91_MATRIX_PRAS6 0xB0 /* Priority Register A for Slave 6 */
-#define AT91_MATRIX_PRBS6 0xB4 /* Priority Register B for Slave 6 */
-#define AT91_MATRIX_PRAS7 0xB8 /* Priority Register A for Slave 7 */
-#define AT91_MATRIX_PRBS7 0xBC /* Priority Register B for Slave 7 */
-#define AT91_MATRIX_M0PR (3 << 0) /* Master 0 Priority */
-#define AT91_MATRIX_M1PR (3 << 4) /* Master 1 Priority */
-#define AT91_MATRIX_M2PR (3 << 8) /* Master 2 Priority */
-#define AT91_MATRIX_M3PR (3 << 12) /* Master 3 Priority */
-#define AT91_MATRIX_M4PR (3 << 16) /* Master 4 Priority */
-#define AT91_MATRIX_M5PR (3 << 20) /* Master 5 Priority */
-#define AT91_MATRIX_M6PR (3 << 24) /* Master 6 Priority */
-#define AT91_MATRIX_M7PR (3 << 28) /* Master 7 Priority */
-#define AT91_MATRIX_M8PR (3 << 0) /* Master 8 Priority (in Register B) */
-#define AT91_MATRIX_M9PR (3 << 4) /* Master 9 Priority (in Register B) */
-#define AT91_MATRIX_M10PR (3 << 8) /* Master 10 Priority (in Register B) */
-#define AT91_MATRIX_M11PR (3 << 12) /* Master 11 Priority (in Register B) */
-
-#define AT91_MATRIX_MRCR 0x100 /* Master Remap Control Register */
-#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */
-#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */
-#define AT91_MATRIX_RCB2 (1 << 2)
-#define AT91_MATRIX_RCB3 (1 << 3)
-#define AT91_MATRIX_RCB4 (1 << 4)
-#define AT91_MATRIX_RCB5 (1 << 5)
-#define AT91_MATRIX_RCB6 (1 << 6)
-#define AT91_MATRIX_RCB7 (1 << 7)
-#define AT91_MATRIX_RCB8 (1 << 8)
-#define AT91_MATRIX_RCB9 (1 << 9)
-#define AT91_MATRIX_RCB10 (1 << 10)
-#define AT91_MATRIX_RCB11 (1 << 11)
-
-#define AT91_MATRIX_TCMR 0x110 /* TCM Configuration Register */
-#define AT91_MATRIX_ITCM_SIZE (0xf << 0) /* Size of ITCM enabled memory block */
-#define AT91_MATRIX_ITCM_0 (0 << 0)
-#define AT91_MATRIX_ITCM_32 (6 << 0)
-#define AT91_MATRIX_DTCM_SIZE (0xf << 4) /* Size of DTCM enabled memory block */
-#define AT91_MATRIX_DTCM_0 (0 << 4)
-#define AT91_MATRIX_DTCM_32 (6 << 4)
-#define AT91_MATRIX_DTCM_64 (7 << 4)
-#define AT91_MATRIX_TCM_NWS (0x1 << 11) /* Wait state TCM register */
-#define AT91_MATRIX_TCM_NO_WS (0x0 << 11)
-#define AT91_MATRIX_TCM_ONE_WS (0x1 << 11)
-
-#define AT91_MATRIX_VIDEO 0x118 /* Video Mode Configuration Register */
-#define AT91C_VDEC_SEL (0x1 << 0) /* Video Mode Selection */
-#define AT91C_VDEC_SEL_OFF (0 << 0)
-#define AT91C_VDEC_SEL_ON (1 << 0)
-
-#define AT91_MATRIX_EBICSA 0x128 /* EBI Chip Select Assignment Register */
-#define AT91_MATRIX_EBI_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_EBI_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_EBI_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_EBI_CS3A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_EBI_CS3A_SMC (0 << 3)
-#define AT91_MATRIX_EBI_CS3A_SMC_SMARTMEDIA (1 << 3)
-#define AT91_MATRIX_EBI_CS4A (1 << 4) /* Chip Select 4 Assignment */
-#define AT91_MATRIX_EBI_CS4A_SMC (0 << 4)
-#define AT91_MATRIX_EBI_CS4A_SMC_CF0 (1 << 4)
-#define AT91_MATRIX_EBI_CS5A (1 << 5) /* Chip Select 5 Assignment */
-#define AT91_MATRIX_EBI_CS5A_SMC (0 << 5)
-#define AT91_MATRIX_EBI_CS5A_SMC_CF1 (1 << 5)
-#define AT91_MATRIX_EBI_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-#define AT91_MATRIX_EBI_DBPU_ON (0 << 8)
-#define AT91_MATRIX_EBI_DBPU_OFF (1 << 8)
-#define AT91_MATRIX_EBI_VDDIOMSEL (1 << 16) /* Memory voltage selection */
-#define AT91_MATRIX_EBI_VDDIOMSEL_1_8V (0 << 16)
-#define AT91_MATRIX_EBI_VDDIOMSEL_3_3V (1 << 16)
-#define AT91_MATRIX_EBI_EBI_IOSR (1 << 17) /* EBI I/O slew rate selection */
-#define AT91_MATRIX_EBI_EBI_IOSR_REDUCED (0 << 17)
-#define AT91_MATRIX_EBI_EBI_IOSR_NORMAL (1 << 17)
-#define AT91_MATRIX_EBI_DDR_IOSR (1 << 18) /* DDR2 dedicated port I/O slew rate selection */
-#define AT91_MATRIX_EBI_DDR_IOSR_REDUCED (0 << 18)
-#define AT91_MATRIX_EBI_DDR_IOSR_NORMAL (1 << 18)
-
-#define AT91_MATRIX_WPMR 0x1E4 /* Write Protect Mode Register */
-#define AT91_MATRIX_WPMR_WPEN (1 << 0) /* Write Protect ENable */
-#define AT91_MATRIX_WPMR_WP_WPDIS (0 << 0)
-#define AT91_MATRIX_WPMR_WP_WPEN (1 << 0)
-#define AT91_MATRIX_WPMR_WPKEY (0xFFFFFF << 8) /* Write Protect KEY */
-
-#define AT91_MATRIX_WPSR 0x1E8 /* Write Protect Status Register */
-#define AT91_MATRIX_WPSR_WPVS (1 << 0) /* Write Protect Violation Status */
-#define AT91_MATRIX_WPSR_NO_WPV (0 << 0)
-#define AT91_MATRIX_WPSR_WPV (1 << 0)
-#define AT91_MATRIX_WPSR_WPVSRC (0xFFFF << 8) /* Write Protect Violation Source */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9n12.h b/arch/arm/mach-at91/include/mach/at91sam9n12.h
deleted file mode 100644
index 0151bcf6163c..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9n12.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * SoC specific header file for the AT91SAM9N12
- *
- * Copyright (C) 2012 Atmel Corporation
- *
- * Common definitions, based on AT91SAM9N12 SoC datasheet
- *
- * Licensed under GPLv2 or later
- */
-
-#ifndef _AT91SAM9N12_H_
-#define _AT91SAM9N12_H_
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91SAM9N12_ID_PIOAB 2 /* Parallel I/O Controller A and B */
-#define AT91SAM9N12_ID_PIOCD 3 /* Parallel I/O Controller C and D */
-#define AT91SAM9N12_ID_FUSE 4 /* FUSE Controller */
-#define AT91SAM9N12_ID_USART0 5 /* USART 0 */
-#define AT91SAM9N12_ID_USART1 6 /* USART 1 */
-#define AT91SAM9N12_ID_USART2 7 /* USART 2 */
-#define AT91SAM9N12_ID_USART3 8 /* USART 3 */
-#define AT91SAM9N12_ID_TWI0 9 /* Two-Wire Interface 0 */
-#define AT91SAM9N12_ID_TWI1 10 /* Two-Wire Interface 1 */
-#define AT91SAM9N12_ID_MCI 12 /* High Speed Multimedia Card Interface */
-#define AT91SAM9N12_ID_SPI0 13 /* Serial Peripheral Interface 0 */
-#define AT91SAM9N12_ID_SPI1 14 /* Serial Peripheral Interface 1 */
-#define AT91SAM9N12_ID_UART0 15 /* UART 0 */
-#define AT91SAM9N12_ID_UART1 16 /* UART 1 */
-#define AT91SAM9N12_ID_TCB 17 /* Timer Counter 0, 1, 2, 3, 4 and 5 */
-#define AT91SAM9N12_ID_PWM 18 /* Pulse Width Modulation Controller */
-#define AT91SAM9N12_ID_ADC 19 /* ADC Controller */
-#define AT91SAM9N12_ID_DMA 20 /* DMA Controller */
-#define AT91SAM9N12_ID_UHP 22 /* USB Host High Speed */
-#define AT91SAM9N12_ID_UDP 23 /* USB Device High Speed */
-#define AT91SAM9N12_ID_LCDC 25 /* LCD Controller */
-#define AT91SAM9N12_ID_ISI 25 /* Image Sensor Interface */
-#define AT91SAM9N12_ID_SSC 28 /* Synchronous Serial Controller */
-#define AT91SAM9N12_ID_TRNG 30 /* TRNG */
-#define AT91SAM9N12_ID_IRQ0 31 /* Advanced Interrupt Controller */
-
-/*
- * User Peripheral physical base addresses.
- */
-#define AT91SAM9N12_BASE_USART0 0xf801c000
-#define AT91SAM9N12_BASE_USART1 0xf8020000
-#define AT91SAM9N12_BASE_USART2 0xf8024000
-#define AT91SAM9N12_BASE_USART3 0xf8028000
-
-/*
- * System Peripherals
- */
-#define AT91SAM9N12_BASE_RTC 0xfffffeb0
-
-/*
- * Internal Memory.
- */
-#define AT91SAM9N12_SRAM_BASE 0x00300000 /* Internal SRAM base address */
-#define AT91SAM9N12_SRAM_SIZE SZ_32K /* Internal SRAM size (32Kb) */
-
-#define AT91SAM9N12_ROM_BASE 0x00100000 /* Internal ROM base address */
-#define AT91SAM9N12_ROM_SIZE SZ_128K /* Internal ROM size (128Kb) */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9n12_matrix.h b/arch/arm/mach-at91/include/mach/at91sam9n12_matrix.h
deleted file mode 100644
index 40060cd62fa9..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9n12_matrix.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Matrix-centric header file for the AT91SAM9N12
- *
- * Copyright (C) 2012 Atmel Corporation.
- *
- * Only EBI related registers.
- * Write Protect register definitions may be useful.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef _AT91SAM9N12_MATRIX_H_
-#define _AT91SAM9N12_MATRIX_H_
-
-#define AT91_MATRIX_EBICSA (AT91_MATRIX + 0x118) /* EBI Chip Select Assignment Register */
-#define AT91_MATRIX_EBI_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_EBI_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_EBI_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_EBI_CS3A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_EBI_CS3A_SMC (0 << 3)
-#define AT91_MATRIX_EBI_CS3A_SMC_NANDFLASH (1 << 3)
-#define AT91_MATRIX_EBI_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-#define AT91_MATRIX_EBI_DBPU_ON (0 << 8)
-#define AT91_MATRIX_EBI_DBPU_OFF (1 << 8)
-#define AT91_MATRIX_EBI_VDDIOMSEL (1 << 16) /* Memory voltage selection */
-#define AT91_MATRIX_EBI_VDDIOMSEL_1_8V (0 << 16)
-#define AT91_MATRIX_EBI_VDDIOMSEL_3_3V (1 << 16)
-#define AT91_MATRIX_EBI_EBI_IOSR (1 << 17) /* EBI I/O slew rate selection */
-#define AT91_MATRIX_EBI_EBI_IOSR_REDUCED (0 << 17)
-#define AT91_MATRIX_EBI_EBI_IOSR_NORMAL (1 << 17)
-#define AT91_MATRIX_EBI_DDR_IOSR (1 << 18) /* DDR2 dedicated port I/O slew rate selection */
-#define AT91_MATRIX_EBI_DDR_IOSR_REDUCED (0 << 18)
-#define AT91_MATRIX_EBI_DDR_IOSR_NORMAL (1 << 18)
-#define AT91_MATRIX_NFD0_SELECT (1 << 24) /* NAND Flash Data Bus Selection */
-#define AT91_MATRIX_NFD0_ON_D0 (0 << 24)
-#define AT91_MATRIX_NFD0_ON_D16 (1 << 24)
-#define AT91_MATRIX_DDR_MP_EN (1 << 25) /* DDR Multi-port Enable */
-#define AT91_MATRIX_MP_OFF (0 << 25)
-#define AT91_MATRIX_MP_ON (1 << 25)
-
-#define AT91_MATRIX_WPMR (AT91_MATRIX + 0x1E4) /* Write Protect Mode Register */
-#define AT91_MATRIX_WPMR_WPEN (1 << 0) /* Write Protect ENable */
-#define AT91_MATRIX_WPMR_WP_WPDIS (0 << 0)
-#define AT91_MATRIX_WPMR_WP_WPEN (1 << 0)
-#define AT91_MATRIX_WPMR_WPKEY (0xFFFFFF << 8) /* Write Protect KEY */
-
-#define AT91_MATRIX_WPSR (AT91_MATRIX + 0x1E8) /* Write Protect Status Register */
-#define AT91_MATRIX_WPSR_WPVS (1 << 0) /* Write Protect Violation Status */
-#define AT91_MATRIX_WPSR_NO_WPV (0 << 0)
-#define AT91_MATRIX_WPSR_WPV (1 << 0)
-#define AT91_MATRIX_WPSR_WPVSRC (0xFFFF << 8) /* Write Protect Violation Source */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9rl.h b/arch/arm/mach-at91/include/mach/at91sam9rl.h
deleted file mode 100644
index a15db56d33fa..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9rl.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91sam9260.h
- *
- * Copyright (C) 2007 Atmel Corporation
- *
- * Common definitions.
- * Based on AT91SAM9RL datasheet revision A. (Preliminary)
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#ifndef AT91SAM9RL_H
-#define AT91SAM9RL_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91SAM9RL_ID_PIOA 2 /* Parallel IO Controller A */
-#define AT91SAM9RL_ID_PIOB 3 /* Parallel IO Controller B */
-#define AT91SAM9RL_ID_PIOC 4 /* Parallel IO Controller C */
-#define AT91SAM9RL_ID_PIOD 5 /* Parallel IO Controller D */
-#define AT91SAM9RL_ID_US0 6 /* USART 0 */
-#define AT91SAM9RL_ID_US1 7 /* USART 1 */
-#define AT91SAM9RL_ID_US2 8 /* USART 2 */
-#define AT91SAM9RL_ID_US3 9 /* USART 3 */
-#define AT91SAM9RL_ID_MCI 10 /* Multimedia Card Interface */
-#define AT91SAM9RL_ID_TWI0 11 /* TWI 0 */
-#define AT91SAM9RL_ID_TWI1 12 /* TWI 1 */
-#define AT91SAM9RL_ID_SPI 13 /* Serial Peripheral Interface */
-#define AT91SAM9RL_ID_SSC0 14 /* Serial Synchronous Controller 0 */
-#define AT91SAM9RL_ID_SSC1 15 /* Serial Synchronous Controller 1 */
-#define AT91SAM9RL_ID_TC0 16 /* Timer Counter 0 */
-#define AT91SAM9RL_ID_TC1 17 /* Timer Counter 1 */
-#define AT91SAM9RL_ID_TC2 18 /* Timer Counter 2 */
-#define AT91SAM9RL_ID_PWMC 19 /* Pulse Width Modulation Controller */
-#define AT91SAM9RL_ID_TSC 20 /* Touch Screen Controller */
-#define AT91SAM9RL_ID_DMA 21 /* DMA Controller */
-#define AT91SAM9RL_ID_UDPHS 22 /* USB Device HS */
-#define AT91SAM9RL_ID_LCDC 23 /* LCD Controller */
-#define AT91SAM9RL_ID_AC97C 24 /* AC97 Controller */
-#define AT91SAM9RL_ID_IRQ0 31 /* Advanced Interrupt Controller (IRQ0) */
-
-
-/*
- * User Peripheral physical base addresses.
- */
-#define AT91SAM9RL_BASE_TCB0 0xfffa0000
-#define AT91SAM9RL_BASE_TC0 0xfffa0000
-#define AT91SAM9RL_BASE_TC1 0xfffa0040
-#define AT91SAM9RL_BASE_TC2 0xfffa0080
-#define AT91SAM9RL_BASE_MCI 0xfffa4000
-#define AT91SAM9RL_BASE_TWI0 0xfffa8000
-#define AT91SAM9RL_BASE_TWI1 0xfffac000
-#define AT91SAM9RL_BASE_US0 0xfffb0000
-#define AT91SAM9RL_BASE_US1 0xfffb4000
-#define AT91SAM9RL_BASE_US2 0xfffb8000
-#define AT91SAM9RL_BASE_US3 0xfffbc000
-#define AT91SAM9RL_BASE_SSC0 0xfffc0000
-#define AT91SAM9RL_BASE_SSC1 0xfffc4000
-#define AT91SAM9RL_BASE_PWMC 0xfffc8000
-#define AT91SAM9RL_BASE_SPI 0xfffcc000
-#define AT91SAM9RL_BASE_TSC 0xfffd0000
-#define AT91SAM9RL_BASE_UDPHS 0xfffd4000
-#define AT91SAM9RL_BASE_AC97C 0xfffd8000
-
-
-/*
- * System Peripherals (offset from AT91_BASE_SYS)
- */
-#define AT91_SCKCR (0xfffffd50 - AT91_BASE_SYS)
-
-#define AT91SAM9RL_BASE_DMA 0xffffe600
-#define AT91SAM9RL_BASE_ECC 0xffffe800
-#define AT91SAM9RL_BASE_SDRAMC 0xffffea00
-#define AT91SAM9RL_BASE_SMC 0xffffec00
-#define AT91SAM9RL_BASE_MATRIX 0xffffee00
-#define AT91SAM9RL_BASE_DBGU AT91_BASE_DBGU0
-#define AT91SAM9RL_BASE_PIOA 0xfffff400
-#define AT91SAM9RL_BASE_PIOB 0xfffff600
-#define AT91SAM9RL_BASE_PIOC 0xfffff800
-#define AT91SAM9RL_BASE_PIOD 0xfffffa00
-#define AT91SAM9RL_BASE_RSTC 0xfffffd00
-#define AT91SAM9RL_BASE_SHDWC 0xfffffd10
-#define AT91SAM9RL_BASE_RTT 0xfffffd20
-#define AT91SAM9RL_BASE_PIT 0xfffffd30
-#define AT91SAM9RL_BASE_WDT 0xfffffd40
-#define AT91SAM9RL_BASE_GPBR 0xfffffd60
-#define AT91SAM9RL_BASE_RTC 0xfffffe00
-
-
-/*
- * Internal Memory.
- */
-#define AT91SAM9RL_SRAM_BASE 0x00300000 /* Internal SRAM base address */
-#define AT91SAM9RL_SRAM_SIZE SZ_16K /* Internal SRAM size (16Kb) */
-
-#define AT91SAM9RL_ROM_BASE 0x00400000 /* Internal ROM base address */
-#define AT91SAM9RL_ROM_SIZE (2 * SZ_16K) /* Internal ROM size (32Kb) */
-
-#define AT91SAM9RL_LCDC_BASE 0x00500000 /* LCD Controller */
-#define AT91SAM9RL_UDPHS_FIFO 0x00600000 /* USB Device HS controller */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9rl_matrix.h b/arch/arm/mach-at91/include/mach/at91sam9rl_matrix.h
deleted file mode 100644
index 6d160adadafc..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9rl_matrix.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91sam9rl_matrix.h
- *
- * Copyright (C) 2007 Atmel Corporation
- *
- * Memory Controllers (MATRIX, EBI) - System peripherals registers.
- * Based on AT91SAM9RL datasheet revision A. (Preliminary)
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#ifndef AT91SAM9RL_MATRIX_H
-#define AT91SAM9RL_MATRIX_H
-
-#define AT91_MATRIX_MCFG0 0x00 /* Master Configuration Register 0 */
-#define AT91_MATRIX_MCFG1 0x04 /* Master Configuration Register 1 */
-#define AT91_MATRIX_MCFG2 0x08 /* Master Configuration Register 2 */
-#define AT91_MATRIX_MCFG3 0x0C /* Master Configuration Register 3 */
-#define AT91_MATRIX_MCFG4 0x10 /* Master Configuration Register 4 */
-#define AT91_MATRIX_MCFG5 0x14 /* Master Configuration Register 5 */
-#define AT91_MATRIX_ULBT (7 << 0) /* Undefined Length Burst Type */
-#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
-#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
-#define AT91_MATRIX_ULBT_FOUR (2 << 0)
-#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
-#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
-
-#define AT91_MATRIX_SCFG0 0x40 /* Slave Configuration Register 0 */
-#define AT91_MATRIX_SCFG1 0x44 /* Slave Configuration Register 1 */
-#define AT91_MATRIX_SCFG2 0x48 /* Slave Configuration Register 2 */
-#define AT91_MATRIX_SCFG3 0x4C /* Slave Configuration Register 3 */
-#define AT91_MATRIX_SCFG4 0x50 /* Slave Configuration Register 4 */
-#define AT91_MATRIX_SCFG5 0x54 /* Slave Configuration Register 5 */
-#define AT91_MATRIX_SLOT_CYCLE (0xff << 0) /* Maximum Number of Allowed Cycles for a Burst */
-#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */
-#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
-#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
-#define AT91_MATRIX_FIXED_DEFMSTR (0xf << 18) /* Fixed Index of Default Master */
-#define AT91_MATRIX_ARBT (3 << 24) /* Arbitration Type */
-#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24)
-#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24)
-
-#define AT91_MATRIX_PRAS0 0x80 /* Priority Register A for Slave 0 */
-#define AT91_MATRIX_PRAS1 0x88 /* Priority Register A for Slave 1 */
-#define AT91_MATRIX_PRAS2 0x90 /* Priority Register A for Slave 2 */
-#define AT91_MATRIX_PRAS3 0x98 /* Priority Register A for Slave 3 */
-#define AT91_MATRIX_PRAS4 0xA0 /* Priority Register A for Slave 4 */
-#define AT91_MATRIX_PRAS5 0xA8 /* Priority Register A for Slave 5 */
-#define AT91_MATRIX_M0PR (3 << 0) /* Master 0 Priority */
-#define AT91_MATRIX_M1PR (3 << 4) /* Master 1 Priority */
-#define AT91_MATRIX_M2PR (3 << 8) /* Master 2 Priority */
-#define AT91_MATRIX_M3PR (3 << 12) /* Master 3 Priority */
-#define AT91_MATRIX_M4PR (3 << 16) /* Master 4 Priority */
-#define AT91_MATRIX_M5PR (3 << 20) /* Master 5 Priority */
-
-#define AT91_MATRIX_MRCR 0x100 /* Master Remap Control Register */
-#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */
-#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */
-#define AT91_MATRIX_RCB2 (1 << 2)
-#define AT91_MATRIX_RCB3 (1 << 3)
-#define AT91_MATRIX_RCB4 (1 << 4)
-#define AT91_MATRIX_RCB5 (1 << 5)
-
-#define AT91_MATRIX_TCMR 0x114 /* TCM Configuration Register */
-#define AT91_MATRIX_ITCM_SIZE (0xf << 0) /* Size of ITCM enabled memory block */
-#define AT91_MATRIX_ITCM_0 (0 << 0)
-#define AT91_MATRIX_ITCM_16 (5 << 0)
-#define AT91_MATRIX_ITCM_32 (6 << 0)
-#define AT91_MATRIX_DTCM_SIZE (0xf << 4) /* Size of DTCM enabled memory block */
-#define AT91_MATRIX_DTCM_0 (0 << 4)
-#define AT91_MATRIX_DTCM_16 (5 << 4)
-#define AT91_MATRIX_DTCM_32 (6 << 4)
-
-#define AT91_MATRIX_EBICSA 0x120 /* EBI0 Chip Select Assignment Register */
-#define AT91_MATRIX_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_CS3A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_CS3A_SMC (0 << 3)
-#define AT91_MATRIX_CS3A_SMC_SMARTMEDIA (1 << 3)
-#define AT91_MATRIX_CS4A (1 << 4) /* Chip Select 4 Assignment */
-#define AT91_MATRIX_CS4A_SMC (0 << 4)
-#define AT91_MATRIX_CS4A_SMC_CF1 (1 << 4)
-#define AT91_MATRIX_CS5A (1 << 5) /* Chip Select 5 Assignment */
-#define AT91_MATRIX_CS5A_SMC (0 << 5)
-#define AT91_MATRIX_CS5A_SMC_CF2 (1 << 5)
-#define AT91_MATRIX_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-#define AT91_MATRIX_VDDIOMSEL (1 << 16) /* Memory voltage selection */
-#define AT91_MATRIX_VDDIOMSEL_1_8V (0 << 16)
-#define AT91_MATRIX_VDDIOMSEL_3_3V (1 << 16)
-
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9x5.h b/arch/arm/mach-at91/include/mach/at91sam9x5.h
deleted file mode 100644
index 2fc76c49e97c..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9x5.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Chip-specific header file for the AT91SAM9x5 family
- *
- * Copyright (C) 2009-2012 Atmel Corporation.
- *
- * Common definitions.
- * Based on AT91SAM9x5 datasheet.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef AT91SAM9X5_H
-#define AT91SAM9X5_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91SAM9X5_ID_PIOAB 2 /* Parallel I/O Controller A and B */
-#define AT91SAM9X5_ID_PIOCD 3 /* Parallel I/O Controller C and D */
-#define AT91SAM9X5_ID_SMD 4 /* SMD Soft Modem (SMD) */
-#define AT91SAM9X5_ID_USART0 5 /* USART 0 */
-#define AT91SAM9X5_ID_USART1 6 /* USART 1 */
-#define AT91SAM9X5_ID_USART2 7 /* USART 2 */
-#define AT91SAM9X5_ID_USART3 8 /* USART 3 */
-#define AT91SAM9X5_ID_TWI0 9 /* Two-Wire Interface 0 */
-#define AT91SAM9X5_ID_TWI1 10 /* Two-Wire Interface 1 */
-#define AT91SAM9X5_ID_TWI2 11 /* Two-Wire Interface 2 */
-#define AT91SAM9X5_ID_MCI0 12 /* High Speed Multimedia Card Interface 0 */
-#define AT91SAM9X5_ID_SPI0 13 /* Serial Peripheral Interface 0 */
-#define AT91SAM9X5_ID_SPI1 14 /* Serial Peripheral Interface 1 */
-#define AT91SAM9X5_ID_UART0 15 /* UART 0 */
-#define AT91SAM9X5_ID_UART1 16 /* UART 1 */
-#define AT91SAM9X5_ID_TCB 17 /* Timer Counter 0, 1, 2, 3, 4 and 5 */
-#define AT91SAM9X5_ID_PWM 18 /* Pulse Width Modulation Controller */
-#define AT91SAM9X5_ID_ADC 19 /* ADC Controller */
-#define AT91SAM9X5_ID_DMA0 20 /* DMA Controller 0 */
-#define AT91SAM9X5_ID_DMA1 21 /* DMA Controller 1 */
-#define AT91SAM9X5_ID_UHPHS 22 /* USB Host High Speed */
-#define AT91SAM9X5_ID_UDPHS 23 /* USB Device High Speed */
-#define AT91SAM9X5_ID_EMAC0 24 /* Ethernet MAC0 */
-#define AT91SAM9X5_ID_LCDC 25 /* LCD Controller */
-#define AT91SAM9X5_ID_ISI 25 /* Image Sensor Interface */
-#define AT91SAM9X5_ID_MCI1 26 /* High Speed Multimedia Card Interface 1 */
-#define AT91SAM9X5_ID_EMAC1 27 /* Ethernet MAC1 */
-#define AT91SAM9X5_ID_SSC 28 /* Synchronous Serial Controller */
-#define AT91SAM9X5_ID_CAN0 29 /* CAN Controller 0 */
-#define AT91SAM9X5_ID_CAN1 30 /* CAN Controller 1 */
-#define AT91SAM9X5_ID_IRQ0 31 /* Advanced Interrupt Controller */
-
-/*
- * User Peripheral physical base addresses.
- */
-#define AT91SAM9X5_BASE_USART0 0xf801c000
-#define AT91SAM9X5_BASE_USART1 0xf8020000
-#define AT91SAM9X5_BASE_USART2 0xf8024000
-
-/*
- * System Peripherals
- */
-#define AT91SAM9X5_BASE_RTC 0xfffffeb0
-
-/*
- * Internal Memory.
- */
-#define AT91SAM9X5_SRAM_BASE 0x00300000 /* Internal SRAM base address */
-#define AT91SAM9X5_SRAM_SIZE SZ_32K /* Internal SRAM size (32Kb) */
-
-#define AT91SAM9X5_ROM_BASE 0x00400000 /* Internal ROM base address */
-#define AT91SAM9X5_ROM_SIZE SZ_64K /* Internal ROM size (64Kb) */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9x5_matrix.h b/arch/arm/mach-at91/include/mach/at91sam9x5_matrix.h
deleted file mode 100644
index a606d3966470..000000000000
--- a/arch/arm/mach-at91/include/mach/at91sam9x5_matrix.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Matrix-centric header file for the AT91SAM9x5 family
- *
- * Copyright (C) 2009-2012 Atmel Corporation.
- *
- * Only EBI related registers.
- * Write Protect register definitions may be useful.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef AT91SAM9X5_MATRIX_H
-#define AT91SAM9X5_MATRIX_H
-
-#define AT91_MATRIX_EBICSA (AT91_MATRIX + 0x120) /* EBI Chip Select Assignment Register */
-#define AT91_MATRIX_EBI_CS1A (1 << 1) /* Chip Select 1 Assignment */
-#define AT91_MATRIX_EBI_CS1A_SMC (0 << 1)
-#define AT91_MATRIX_EBI_CS1A_SDRAMC (1 << 1)
-#define AT91_MATRIX_EBI_CS3A (1 << 3) /* Chip Select 3 Assignment */
-#define AT91_MATRIX_EBI_CS3A_SMC (0 << 3)
-#define AT91_MATRIX_EBI_CS3A_SMC_NANDFLASH (1 << 3)
-#define AT91_MATRIX_EBI_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
-#define AT91_MATRIX_EBI_DBPU_ON (0 << 8)
-#define AT91_MATRIX_EBI_DBPU_OFF (1 << 8)
-#define AT91_MATRIX_EBI_VDDIOMSEL (1 << 16) /* Memory voltage selection */
-#define AT91_MATRIX_EBI_VDDIOMSEL_1_8V (0 << 16)
-#define AT91_MATRIX_EBI_VDDIOMSEL_3_3V (1 << 16)
-#define AT91_MATRIX_EBI_EBI_IOSR (1 << 17) /* EBI I/O slew rate selection */
-#define AT91_MATRIX_EBI_EBI_IOSR_REDUCED (0 << 17)
-#define AT91_MATRIX_EBI_EBI_IOSR_NORMAL (1 << 17)
-#define AT91_MATRIX_EBI_DDR_IOSR (1 << 18) /* DDR2 dedicated port I/O slew rate selection */
-#define AT91_MATRIX_EBI_DDR_IOSR_REDUCED (0 << 18)
-#define AT91_MATRIX_EBI_DDR_IOSR_NORMAL (1 << 18)
-#define AT91_MATRIX_NFD0_SELECT (1 << 24) /* NAND Flash Data Bus Selection */
-#define AT91_MATRIX_NFD0_ON_D0 (0 << 24)
-#define AT91_MATRIX_NFD0_ON_D16 (1 << 24)
-#define AT91_MATRIX_DDR_MP_EN (1 << 25) /* DDR Multi-port Enable */
-#define AT91_MATRIX_MP_OFF (0 << 25)
-#define AT91_MATRIX_MP_ON (1 << 25)
-
-#define AT91_MATRIX_WPMR (AT91_MATRIX + 0x1E4) /* Write Protect Mode Register */
-#define AT91_MATRIX_WPMR_WPEN (1 << 0) /* Write Protect ENable */
-#define AT91_MATRIX_WPMR_WP_WPDIS (0 << 0)
-#define AT91_MATRIX_WPMR_WP_WPEN (1 << 0)
-#define AT91_MATRIX_WPMR_WPKEY (0xFFFFFF << 8) /* Write Protect KEY */
-
-#define AT91_MATRIX_WPSR (AT91_MATRIX + 0x1E8) /* Write Protect Status Register */
-#define AT91_MATRIX_WPSR_WPVS (1 << 0) /* Write Protect Violation Status */
-#define AT91_MATRIX_WPSR_NO_WPV (0 << 0)
-#define AT91_MATRIX_WPSR_WPV (1 << 0)
-#define AT91_MATRIX_WPSR_WPVSRC (0xFFFF << 8) /* Write Protect Violation Source */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/cpu.h b/arch/arm/mach-at91/include/mach/cpu.h
deleted file mode 100644
index ce7c80a44983..000000000000
--- a/arch/arm/mach-at91/include/mach/cpu.h
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/cpu.h
- *
- * Copyright (C) 2006 SAN People
- * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#ifndef __MACH_CPU_H__
-#define __MACH_CPU_H__
-
-#define ARCH_ID_AT91RM9200 0x09290780
-#define ARCH_ID_AT91SAM9260 0x019803a0
-#define ARCH_ID_AT91SAM9261 0x019703a0
-#define ARCH_ID_AT91SAM9263 0x019607a0
-#define ARCH_ID_AT91SAM9G10 0x019903a0
-#define ARCH_ID_AT91SAM9G20 0x019905a0
-#define ARCH_ID_AT91SAM9RL64 0x019b03a0
-#define ARCH_ID_AT91SAM9G45 0x819b05a0
-#define ARCH_ID_AT91SAM9G45MRL 0x819b05a2 /* aka 9G45-ES2 & non ES lots */
-#define ARCH_ID_AT91SAM9G45ES 0x819b05a1 /* 9G45-ES (Engineering Sample) */
-#define ARCH_ID_AT91SAM9X5 0x819a05a0
-#define ARCH_ID_AT91SAM9N12 0x819a07a0
-
-#define ARCH_ID_AT91SAM9XE128 0x329973a0
-#define ARCH_ID_AT91SAM9XE256 0x329a93a0
-#define ARCH_ID_AT91SAM9XE512 0x329aa3a0
-
-#define ARCH_ID_AT91M40800 0x14080044
-#define ARCH_ID_AT91R40807 0x44080746
-#define ARCH_ID_AT91M40807 0x14080745
-#define ARCH_ID_AT91R40008 0x44000840
-
-#define ARCH_ID_SAMA5 0x8A5C07C0
-
-#define ARCH_EXID_AT91SAM9M11 0x00000001
-#define ARCH_EXID_AT91SAM9M10 0x00000002
-#define ARCH_EXID_AT91SAM9G46 0x00000003
-#define ARCH_EXID_AT91SAM9G45 0x00000004
-
-#define ARCH_EXID_AT91SAM9G15 0x00000000
-#define ARCH_EXID_AT91SAM9G35 0x00000001
-#define ARCH_EXID_AT91SAM9X35 0x00000002
-#define ARCH_EXID_AT91SAM9G25 0x00000003
-#define ARCH_EXID_AT91SAM9X25 0x00000004
-
-#define ARCH_EXID_SAMA5D3 0x00004300
-#define ARCH_EXID_SAMA5D31 0x00444300
-#define ARCH_EXID_SAMA5D33 0x00414300
-#define ARCH_EXID_SAMA5D34 0x00414301
-#define ARCH_EXID_SAMA5D35 0x00584300
-#define ARCH_EXID_SAMA5D36 0x00004301
-
-#define ARCH_EXID_SAMA5D4 0x00000007
-#define ARCH_EXID_SAMA5D41 0x00000001
-#define ARCH_EXID_SAMA5D42 0x00000002
-#define ARCH_EXID_SAMA5D43 0x00000003
-#define ARCH_EXID_SAMA5D44 0x00000004
-
-#define ARCH_FAMILY_AT91SAM9 0x01900000
-#define ARCH_FAMILY_AT91SAM9XE 0x02900000
-
-/* RM9200 type */
-#define ARCH_REVISON_9200_BGA (0 << 0)
-#define ARCH_REVISON_9200_PQFP (1 << 0)
-
-#ifndef __ASSEMBLY__
-enum at91_soc_type {
- /* 920T */
- AT91_SOC_RM9200,
-
- /* SAM92xx */
- AT91_SOC_SAM9260, AT91_SOC_SAM9261, AT91_SOC_SAM9263,
-
- /* SAM9Gxx */
- AT91_SOC_SAM9G10, AT91_SOC_SAM9G20, AT91_SOC_SAM9G45,
-
- /* SAM9RL */
- AT91_SOC_SAM9RL,
-
- /* SAM9X5 */
- AT91_SOC_SAM9X5,
-
- /* SAM9N12 */
- AT91_SOC_SAM9N12,
-
- /* SAMA5D3 */
- AT91_SOC_SAMA5D3,
-
- /* SAMA5D4 */
- AT91_SOC_SAMA5D4,
-
- /* Unknown type */
- AT91_SOC_UNKNOWN,
-};
-
-enum at91_soc_subtype {
- /* RM9200 */
- AT91_SOC_RM9200_BGA, AT91_SOC_RM9200_PQFP,
-
- /* SAM9260 */
- AT91_SOC_SAM9XE,
-
- /* SAM9G45 */
- AT91_SOC_SAM9G45ES, AT91_SOC_SAM9M10, AT91_SOC_SAM9G46, AT91_SOC_SAM9M11,
-
- /* SAM9X5 */
- AT91_SOC_SAM9G15, AT91_SOC_SAM9G35, AT91_SOC_SAM9X35,
- AT91_SOC_SAM9G25, AT91_SOC_SAM9X25,
-
- /* SAMA5D3 */
- AT91_SOC_SAMA5D31, AT91_SOC_SAMA5D33, AT91_SOC_SAMA5D34,
- AT91_SOC_SAMA5D35, AT91_SOC_SAMA5D36,
-
- /* SAMA5D4 */
- AT91_SOC_SAMA5D41, AT91_SOC_SAMA5D42, AT91_SOC_SAMA5D43,
- AT91_SOC_SAMA5D44,
-
- /* No subtype for this SoC */
- AT91_SOC_SUBTYPE_NONE,
-
- /* Unknown subtype */
- AT91_SOC_SUBTYPE_UNKNOWN,
-};
-
-struct at91_socinfo {
- unsigned int type, subtype;
- unsigned int cidr, exid;
-};
-
-extern struct at91_socinfo at91_soc_initdata;
-const char *at91_get_soc_type(struct at91_socinfo *c);
-const char *at91_get_soc_subtype(struct at91_socinfo *c);
-
-static inline int at91_soc_is_detected(void)
-{
- return at91_soc_initdata.type != AT91_SOC_UNKNOWN;
-}
-
-#ifdef CONFIG_SOC_AT91RM9200
-#define cpu_is_at91rm9200() (at91_soc_initdata.type == AT91_SOC_RM9200)
-#define cpu_is_at91rm9200_bga() (at91_soc_initdata.subtype == AT91_SOC_RM9200_BGA)
-#define cpu_is_at91rm9200_pqfp() (at91_soc_initdata.subtype == AT91_SOC_RM9200_PQFP)
-#else
-#define cpu_is_at91rm9200() (0)
-#define cpu_is_at91rm9200_bga() (0)
-#define cpu_is_at91rm9200_pqfp() (0)
-#endif
-
-#ifdef CONFIG_SOC_AT91SAM9
-#define cpu_is_at91sam9xe() (at91_soc_initdata.subtype == AT91_SOC_SAM9XE)
-#define cpu_is_at91sam9260() (at91_soc_initdata.type == AT91_SOC_SAM9260)
-#define cpu_is_at91sam9g20() (at91_soc_initdata.type == AT91_SOC_SAM9G20)
-#define cpu_is_at91sam9261() (at91_soc_initdata.type == AT91_SOC_SAM9261)
-#define cpu_is_at91sam9g10() (at91_soc_initdata.type == AT91_SOC_SAM9G10)
-#define cpu_is_at91sam9263() (at91_soc_initdata.type == AT91_SOC_SAM9263)
-#define cpu_is_at91sam9rl() (at91_soc_initdata.type == AT91_SOC_SAM9RL)
-#define cpu_is_at91sam9g45() (at91_soc_initdata.type == AT91_SOC_SAM9G45)
-#define cpu_is_at91sam9g45es() (at91_soc_initdata.subtype == AT91_SOC_SAM9G45ES)
-#define cpu_is_at91sam9m10() (at91_soc_initdata.subtype == AT91_SOC_SAM9M10)
-#define cpu_is_at91sam9g46() (at91_soc_initdata.subtype == AT91_SOC_SAM9G46)
-#define cpu_is_at91sam9m11() (at91_soc_initdata.subtype == AT91_SOC_SAM9M11)
-#define cpu_is_at91sam9x5() (at91_soc_initdata.type == AT91_SOC_SAM9X5)
-#define cpu_is_at91sam9g15() (at91_soc_initdata.subtype == AT91_SOC_SAM9G15)
-#define cpu_is_at91sam9g35() (at91_soc_initdata.subtype == AT91_SOC_SAM9G35)
-#define cpu_is_at91sam9x35() (at91_soc_initdata.subtype == AT91_SOC_SAM9X35)
-#define cpu_is_at91sam9g25() (at91_soc_initdata.subtype == AT91_SOC_SAM9G25)
-#define cpu_is_at91sam9x25() (at91_soc_initdata.subtype == AT91_SOC_SAM9X25)
-#define cpu_is_at91sam9n12() (at91_soc_initdata.type == AT91_SOC_SAM9N12)
-#else
-#define cpu_is_at91sam9xe() (0)
-#define cpu_is_at91sam9260() (0)
-#define cpu_is_at91sam9g20() (0)
-#define cpu_is_at91sam9261() (0)
-#define cpu_is_at91sam9g10() (0)
-#define cpu_is_at91sam9263() (0)
-#define cpu_is_at91sam9rl() (0)
-#define cpu_is_at91sam9g45() (0)
-#define cpu_is_at91sam9g45es() (0)
-#define cpu_is_at91sam9m10() (0)
-#define cpu_is_at91sam9g46() (0)
-#define cpu_is_at91sam9m11() (0)
-#define cpu_is_at91sam9x5() (0)
-#define cpu_is_at91sam9g15() (0)
-#define cpu_is_at91sam9g35() (0)
-#define cpu_is_at91sam9x35() (0)
-#define cpu_is_at91sam9g25() (0)
-#define cpu_is_at91sam9x25() (0)
-#define cpu_is_at91sam9n12() (0)
-#endif
-
-#ifdef CONFIG_SOC_SAMA5D3
-#define cpu_is_sama5d3() (at91_soc_initdata.type == AT91_SOC_SAMA5D3)
-#else
-#define cpu_is_sama5d3() (0)
-#endif
-
-#ifdef CONFIG_SOC_SAMA5D4
-#define cpu_is_sama5d4() (at91_soc_initdata.type == AT91_SOC_SAMA5D4)
-#else
-#define cpu_is_sama5d4() (0)
-#endif
-
-/*
- * Since this is ARM, we will never run on any AVR32 CPU. But these
- * definitions may reduce clutter in common drivers.
- */
-#define cpu_is_at32ap7000() (0)
-#endif /* __ASSEMBLY__ */
-
-#endif /* __MACH_CPU_H__ */
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h
deleted file mode 100644
index cacbaa52418f..000000000000
--- a/arch/arm/mach-at91/include/mach/hardware.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/hardware.h
- *
- * Copyright (C) 2003 SAN People
- * Copyright (C) 2003 ATMEL
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include <asm/sizes.h>
-
-/* DBGU base */
-/* rm9200, 9260/9g20, 9261/9g10, 9rl */
-#define AT91_BASE_DBGU0 0xfffff200
-/* 9263, 9g45, sama5d3 */
-#define AT91_BASE_DBGU1 0xffffee00
-/* sama5d4 */
-#define AT91_BASE_DBGU2 0xfc069000
-
-#include <mach/at91rm9200.h>
-#include <mach/at91sam9260.h>
-#include <mach/at91sam9261.h>
-#include <mach/at91sam9263.h>
-#include <mach/at91sam9rl.h>
-#include <mach/at91sam9g45.h>
-#include <mach/at91sam9x5.h>
-#include <mach/at91sam9n12.h>
-#include <mach/sama5d3.h>
-#include <mach/sama5d4.h>
-
-/*
- * On all at91 except rm9200 and x40 have the System Controller starts
- * at address 0xffffc000 and has a size of 16KiB.
- *
- * On rm9200 it's start at 0xfffe4000 of 111KiB with non reserved data starting
- * at 0xfffff000
- *
- * Removes the individual definitions of AT91_BASE_SYS and
- * replaces them with a common version at base 0xfffffc000 and size 16KiB
- * and map the same memory space
- */
-#define AT91_BASE_SYS 0xffffc000
-
-/*
- * On sama5d4 there is no system controller, we map some needed peripherals
- */
-#define AT91_ALT_BASE_SYS 0xfc069000
-
-/*
- * On all at91 have the Advanced Interrupt Controller starts at address
- * 0xfffff000 and the Power Management Controller starts at 0xfffffc00
- */
-#define AT91_AIC 0xfffff000
-#define AT91_PMC 0xfffffc00
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
-
-#ifdef CONFIG_MMU
-/*
- * Remap the peripherals from address 0xFFF78000 .. 0xFFFFFFFF
- * to 0xFEF78000 .. 0xFF000000. (544Kb)
- */
-#define AT91_IO_PHYS_BASE 0xFFF78000
-#define AT91_IO_VIRT_BASE IOMEM(0xFF000000 - AT91_IO_SIZE)
-
-/*
- * On sama5d4, remap the peripherals from address 0xFC069000 .. 0xFC06F000
- * to 0xFB069000 .. 0xFB06F000. (24Kb)
- */
-#define AT91_ALT_IO_PHYS_BASE AT91_ALT_BASE_SYS
-#define AT91_ALT_IO_VIRT_BASE IOMEM(0xFB069000)
-#else
-/*
- * Identity mapping for the non MMU case.
- */
-#define AT91_IO_PHYS_BASE AT91_BASE_SYS
-#define AT91_IO_VIRT_BASE IOMEM(AT91_IO_PHYS_BASE)
-
-#define AT91_ALT_IO_PHYS_BASE AT91_ALT_BASE_SYS
-#define AT91_ALT_IO_VIRT_BASE IOMEM(AT91_ALT_BASE_SYS)
-#endif
-
-#define AT91_IO_SIZE (0xFFFFFFFF - AT91_IO_PHYS_BASE + 1)
-
- /* Convert a physical IO address to virtual IO address */
-#define AT91_IO_P2V(x) ((x) - AT91_IO_PHYS_BASE + AT91_IO_VIRT_BASE)
-#define AT91_ALT_IO_P2V(x) ((x) - AT91_ALT_IO_PHYS_BASE + AT91_ALT_IO_VIRT_BASE)
-
-/*
- * Virtual to Physical Address mapping for IO devices.
- */
-#define AT91_VA_BASE_SYS AT91_IO_P2V(AT91_BASE_SYS)
-#define AT91_ALT_VA_BASE_SYS AT91_ALT_IO_P2V(AT91_ALT_BASE_SYS)
-
- /* Internal SRAM is mapped below the IO devices */
-#define AT91_SRAM_MAX SZ_1M
-#define AT91_VIRT_BASE (AT91_IO_VIRT_BASE - AT91_SRAM_MAX)
-
-/* External Memory Map */
-#define AT91_CHIPSELECT_0 0x10000000
-#define AT91_CHIPSELECT_1 0x20000000
-#define AT91_CHIPSELECT_2 0x30000000
-#define AT91_CHIPSELECT_3 0x40000000
-#define AT91_CHIPSELECT_4 0x50000000
-#define AT91_CHIPSELECT_5 0x60000000
-#define AT91_CHIPSELECT_6 0x70000000
-#define AT91_CHIPSELECT_7 0x80000000
-
-/* Clocks */
-#define AT91_SLOW_CLOCK 32768 /* slow clock */
-
-/*
- * FIXME: this is needed to communicate between the pinctrl driver and
- * the PM implementation in the machine. Possibly part of the PM
- * implementation should be moved down into the pinctrl driver and get
- * called as part of the generic suspend/resume path.
- */
-#ifndef __ASSEMBLY__
-extern void at91_pinctrl_gpio_suspend(void);
-extern void at91_pinctrl_gpio_resume(void);
-#endif
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/sama5d3.h b/arch/arm/mach-at91/include/mach/sama5d3.h
deleted file mode 100644
index 25613d8c6dcd..000000000000
--- a/arch/arm/mach-at91/include/mach/sama5d3.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Chip-specific header file for the SAMA5D3 family
- *
- * Copyright (C) 2013 Atmel,
- * 2013 Ludovic Desroches <ludovic.desroches@atmel.com>
- *
- * Common definitions.
- * Based on SAMA5D3 datasheet.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef SAMA5D3_H
-#define SAMA5D3_H
-
-/*
- * Peripheral identifiers/interrupts.
- */
-#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
-#define AT91_ID_SYS 1 /* System Peripherals */
-#define SAMA5D3_ID_DBGU 2 /* debug Unit (usually no special interrupt line) */
-#define AT91_ID_PIT 3 /* PIT */
-#define SAMA5D3_ID_WDT 4 /* Watchdog Timer Interrupt */
-#define SAMA5D3_ID_HSMC 5 /* Static Memory Controller */
-#define SAMA5D3_ID_PIOA 6 /* PIOA */
-#define SAMA5D3_ID_PIOB 7 /* PIOB */
-#define SAMA5D3_ID_PIOC 8 /* PIOC */
-#define SAMA5D3_ID_PIOD 9 /* PIOD */
-#define SAMA5D3_ID_PIOE 10 /* PIOE */
-#define SAMA5D3_ID_SMD 11 /* SMD Soft Modem */
-#define SAMA5D3_ID_USART0 12 /* USART0 */
-#define SAMA5D3_ID_USART1 13 /* USART1 */
-#define SAMA5D3_ID_USART2 14 /* USART2 */
-#define SAMA5D3_ID_USART3 15 /* USART3 */
-#define SAMA5D3_ID_UART0 16 /* UART 0 */
-#define SAMA5D3_ID_UART1 17 /* UART 1 */
-#define SAMA5D3_ID_TWI0 18 /* Two-Wire Interface 0 */
-#define SAMA5D3_ID_TWI1 19 /* Two-Wire Interface 1 */
-#define SAMA5D3_ID_TWI2 20 /* Two-Wire Interface 2 */
-#define SAMA5D3_ID_HSMCI0 21 /* MCI */
-#define SAMA5D3_ID_HSMCI1 22 /* MCI */
-#define SAMA5D3_ID_HSMCI2 23 /* MCI */
-#define SAMA5D3_ID_SPI0 24 /* Serial Peripheral Interface 0 */
-#define SAMA5D3_ID_SPI1 25 /* Serial Peripheral Interface 1 */
-#define SAMA5D3_ID_TC0 26 /* Timer Counter 0 */
-#define SAMA5D3_ID_TC1 27 /* Timer Counter 2 */
-#define SAMA5D3_ID_PWM 28 /* Pulse Width Modulation Controller */
-#define SAMA5D3_ID_ADC 29 /* Touch Screen ADC Controller */
-#define SAMA5D3_ID_DMA0 30 /* DMA Controller 0 */
-#define SAMA5D3_ID_DMA1 31 /* DMA Controller 1 */
-#define SAMA5D3_ID_UHPHS 32 /* USB Host High Speed */
-#define SAMA5D3_ID_UDPHS 33 /* USB Device High Speed */
-#define SAMA5D3_ID_GMAC 34 /* Gigabit Ethernet MAC */
-#define SAMA5D3_ID_EMAC 35 /* Ethernet MAC */
-#define SAMA5D3_ID_LCDC 36 /* LCD Controller */
-#define SAMA5D3_ID_ISI 37 /* Image Sensor Interface */
-#define SAMA5D3_ID_SSC0 38 /* Synchronous Serial Controller 0 */
-#define SAMA5D3_ID_SSC1 39 /* Synchronous Serial Controller 1 */
-#define SAMA5D3_ID_CAN0 40 /* CAN Controller 0 */
-#define SAMA5D3_ID_CAN1 41 /* CAN Controller 1 */
-#define SAMA5D3_ID_SHA 42 /* Secure Hash Algorithm */
-#define SAMA5D3_ID_AES 43 /* Advanced Encryption Standard */
-#define SAMA5D3_ID_TDES 44 /* Triple Data Encryption Standard */
-#define SAMA5D3_ID_TRNG 45 /* True Random Generator Number */
-#define SAMA5D3_ID_IRQ0 47 /* Advanced Interrupt Controller (IRQ0) */
-
-/*
- * User Peripheral physical base addresses.
- */
-#define SAMA5D3_BASE_USART0 0xf001c000
-#define SAMA5D3_BASE_USART1 0xf0020000
-#define SAMA5D3_BASE_USART2 0xf8020000
-#define SAMA5D3_BASE_USART3 0xf8024000
-
-/*
- * System Peripherals
- */
-#define SAMA5D3_BASE_RTC 0xfffffeb0
-
-/*
- * Internal Memory
- */
-#define SAMA5D3_SRAM_BASE 0x00300000 /* Internal SRAM base address */
-#define SAMA5D3_SRAM_SIZE (128 * SZ_1K) /* Internal SRAM size (128Kb) */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/sama5d4.h b/arch/arm/mach-at91/include/mach/sama5d4.h
deleted file mode 100644
index f256a45d9854..000000000000
--- a/arch/arm/mach-at91/include/mach/sama5d4.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Chip-specific header file for the SAMA5D4 family
- *
- * Copyright (C) 2013 Atmel Corporation,
- * Nicolas Ferre <nicolas.ferre@atmel.com>
- *
- * Common definitions.
- * Based on SAMA5D4 datasheet.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef SAMA5D4_H
-#define SAMA5D4_H
-
-/*
- * User Peripheral physical base addresses.
- */
-#define SAMA5D4_BASE_USART3 0xfc00c000 /* (USART3 non-secure) Base Address */
-#define SAMA5D4_BASE_PMC 0xf0018000 /* (PMC) Base Address */
-#define SAMA5D4_BASE_MPDDRC 0xf0010000 /* (MPDDRC) Base Address */
-#define SAMA5D4_BASE_PIOD 0xfc068000 /* (PIOD) Base Address */
-
-/* Some other peripherals */
-#define SAMA5D4_BASE_SYS2 SAMA5D4_BASE_PIOD
-
-/*
- * Internal Memory.
- */
-#define SAMA5D4_NS_SRAM_BASE 0x00210000 /* Internal SRAM base address Non-Secure */
-#define SAMA5D4_NS_SRAM_SIZE (64 * SZ_1K) /* Internal SRAM size Non-Secure part (64Kb) */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/uncompress.h b/arch/arm/mach-at91/include/mach/uncompress.h
deleted file mode 100644
index 4ebb609369e3..000000000000
--- a/arch/arm/mach-at91/include/mach/uncompress.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/uncompress.h
- *
- * Copyright (C) 2003 SAN People
- * Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_ARCH_UNCOMPRESS_H
-#define __ASM_ARCH_UNCOMPRESS_H
-
-#include <linux/io.h>
-#include <linux/atmel_serial.h>
-#include <mach/hardware.h>
-
-#include <mach/at91_dbgu.h>
-#include <mach/cpu.h>
-
-void __iomem *at91_uart;
-
-static const u32 uarts_rm9200[] = {
- AT91_BASE_DBGU0,
- AT91RM9200_BASE_US0,
- AT91RM9200_BASE_US1,
- AT91RM9200_BASE_US2,
- AT91RM9200_BASE_US3,
- 0,
-};
-
-static const u32 uarts_sam9260[] = {
- AT91_BASE_DBGU0,
- AT91SAM9260_BASE_US0,
- AT91SAM9260_BASE_US1,
- AT91SAM9260_BASE_US2,
- AT91SAM9260_BASE_US3,
- AT91SAM9260_BASE_US4,
- AT91SAM9260_BASE_US5,
- 0,
-};
-
-static const u32 uarts_sam9261[] = {
- AT91_BASE_DBGU0,
- AT91SAM9261_BASE_US0,
- AT91SAM9261_BASE_US1,
- AT91SAM9261_BASE_US2,
- 0,
-};
-
-static const u32 uarts_sam9263[] = {
- AT91_BASE_DBGU1,
- AT91SAM9263_BASE_US0,
- AT91SAM9263_BASE_US1,
- AT91SAM9263_BASE_US2,
- 0,
-};
-
-static const u32 uarts_sam9g45[] = {
- AT91_BASE_DBGU1,
- AT91SAM9G45_BASE_US0,
- AT91SAM9G45_BASE_US1,
- AT91SAM9G45_BASE_US2,
- AT91SAM9G45_BASE_US3,
- 0,
-};
-
-static const u32 uarts_sam9rl[] = {
- AT91_BASE_DBGU0,
- AT91SAM9RL_BASE_US0,
- AT91SAM9RL_BASE_US1,
- AT91SAM9RL_BASE_US2,
- AT91SAM9RL_BASE_US3,
- 0,
-};
-
-static const u32 uarts_sam9x5[] = {
- AT91_BASE_DBGU0,
- AT91SAM9X5_BASE_USART0,
- AT91SAM9X5_BASE_USART1,
- AT91SAM9X5_BASE_USART2,
- 0,
-};
-
-static const u32 uarts_sama5d3[] = {
- AT91_BASE_DBGU1,
- SAMA5D3_BASE_USART0,
- SAMA5D3_BASE_USART1,
- SAMA5D3_BASE_USART2,
- SAMA5D3_BASE_USART3,
- 0,
-};
-
-static const u32 uarts_sama5d4[] = {
- AT91_BASE_DBGU2,
- SAMA5D4_BASE_USART3,
- 0,
-};
-
-static inline const u32* decomp_soc_detect(void __iomem *dbgu_base)
-{
- u32 cidr, socid;
-
- cidr = __raw_readl(dbgu_base + AT91_DBGU_CIDR);
- socid = cidr & ~AT91_CIDR_VERSION;
-
- switch (socid) {
- case ARCH_ID_AT91RM9200:
- return uarts_rm9200;
-
- case ARCH_ID_AT91SAM9G20:
- case ARCH_ID_AT91SAM9260:
- return uarts_sam9260;
-
- case ARCH_ID_AT91SAM9261:
- return uarts_sam9261;
-
- case ARCH_ID_AT91SAM9263:
- return uarts_sam9263;
-
- case ARCH_ID_AT91SAM9G45:
- return uarts_sam9g45;
-
- case ARCH_ID_AT91SAM9RL64:
- return uarts_sam9rl;
-
- case ARCH_ID_AT91SAM9N12:
- case ARCH_ID_AT91SAM9X5:
- return uarts_sam9x5;
-
- case ARCH_ID_SAMA5:
- cidr = __raw_readl(dbgu_base + AT91_DBGU_EXID);
- if (cidr & ARCH_EXID_SAMA5D3)
- return uarts_sama5d3;
- else if (cidr & ARCH_EXID_SAMA5D4)
- return uarts_sama5d4;
-
- break;
- }
-
- /* at91sam9g10 */
- if ((cidr & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
- return uarts_sam9261;
- }
- /* at91sam9xe */
- else if ((cidr & AT91_CIDR_ARCH) == ARCH_FAMILY_AT91SAM9XE) {
- return uarts_sam9260;
- }
-
- return NULL;
-}
-
-static inline void arch_decomp_setup(void)
-{
- int i = 0;
- const u32* usarts;
-
- usarts = decomp_soc_detect((void __iomem *)AT91_BASE_DBGU0);
- if (!usarts)
- usarts = decomp_soc_detect((void __iomem *)AT91_BASE_DBGU1);
- if (!usarts)
- usarts = decomp_soc_detect((void __iomem *)AT91_BASE_DBGU2);
- if (!usarts) {
- at91_uart = NULL;
- return;
- }
-
- do {
- /* physical address */
- at91_uart = (void __iomem *)usarts[i];
-
- if (__raw_readl(at91_uart + ATMEL_US_BRGR))
- return;
- i++;
- } while (usarts[i]);
-
- at91_uart = NULL;
-}
-
-/*
- * The following code assumes the serial port has already been
- * initialized by the bootloader. If you didn't setup a port in
- * your bootloader then nothing will appear (which might be desired).
- *
- * This does not append a newline
- */
-static void putc(int c)
-{
- if (!at91_uart)
- return;
-
- while (!(__raw_readl(at91_uart + ATMEL_US_CSR) & ATMEL_US_TXRDY))
- barrier();
- __raw_writel(c, at91_uart + ATMEL_US_THR);
-}
-
-static inline void flush(void)
-{
- if (!at91_uart)
- return;
-
- /* wait for transmission to complete */
- while (!(__raw_readl(at91_uart + ATMEL_US_CSR) & ATMEL_US_TXEMPTY))
- barrier();
-}
-
-#endif
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index aa4116e9452f..5062699cbb12 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -29,19 +29,26 @@
#include <linux/atomic.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
-
-#include <mach/cpu.h>
-#include <mach/hardware.h>
+#include <asm/fncpy.h>
+#include <asm/cacheflush.h>
#include "generic.h"
#include "pm.h"
+/*
+ * FIXME: this is needed to communicate between the pinctrl driver and
+ * the PM implementation in the machine. Possibly part of the PM
+ * implementation should be moved down into the pinctrl driver and get
+ * called as part of the generic suspend/resume path.
+ */
+extern void at91_pinctrl_gpio_suspend(void);
+extern void at91_pinctrl_gpio_resume(void);
+
static struct {
unsigned long uhp_udp_mask;
int memctrl;
} at91_pm_data;
-static void (*at91_pm_standby)(void);
void __iomem *at91_ramc_base[2];
static int at91_pm_valid_state(suspend_state_t state)
@@ -119,76 +126,67 @@ int at91_suspend_entering_slow_clock(void)
}
EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
-
-static void (*slow_clock)(void __iomem *pmc, void __iomem *ramc0,
+static void (*at91_suspend_sram_fn)(void __iomem *pmc, void __iomem *ramc0,
void __iomem *ramc1, int memctrl);
-#ifdef CONFIG_AT91_SLOW_CLOCK
-extern void at91_slow_clock(void __iomem *pmc, void __iomem *ramc0,
+extern void at91_pm_suspend_in_sram(void __iomem *pmc, void __iomem *ramc0,
void __iomem *ramc1, int memctrl);
-extern u32 at91_slow_clock_sz;
-#endif
+extern u32 at91_pm_suspend_in_sram_sz;
+
+static void at91_pm_suspend(suspend_state_t state)
+{
+ unsigned int pm_data = at91_pm_data.memctrl;
+
+ pm_data |= (state == PM_SUSPEND_MEM) ?
+ AT91_PM_MODE(AT91_PM_SLOW_CLOCK) : 0;
+
+ flush_cache_all();
+ outer_disable();
+
+ at91_suspend_sram_fn(at91_pmc_base, at91_ramc_base[0],
+ at91_ramc_base[1], pm_data);
+
+ outer_resume();
+}
static int at91_pm_enter(suspend_state_t state)
{
at91_pinctrl_gpio_suspend();
switch (state) {
+ /*
+ * Suspend-to-RAM is like STANDBY plus slow clock mode, so
+ * drivers must suspend more deeply, the master clock switches
+ * to the clk32k and turns off the main oscillator
+ */
+ case PM_SUSPEND_MEM:
/*
- * Suspend-to-RAM is like STANDBY plus slow clock mode, so
- * drivers must suspend more deeply: only the master clock
- * controller may be using the main oscillator.
+ * Ensure that clocks are in a valid state.
*/
- case PM_SUSPEND_MEM:
- /*
- * Ensure that clocks are in a valid state.
- */
- if (!at91_pm_verify_clocks())
- goto error;
-
- /*
- * Enter slow clock mode by switching over to clk32k and
- * turning off the main oscillator; reverse on wakeup.
- */
- if (slow_clock) {
-#ifdef CONFIG_AT91_SLOW_CLOCK
- /* copy slow_clock handler to SRAM, and call it */
- memcpy(slow_clock, at91_slow_clock, at91_slow_clock_sz);
-#endif
- slow_clock(at91_pmc_base, at91_ramc_base[0],
- at91_ramc_base[1],
- at91_pm_data.memctrl);
- break;
- } else {
- pr_info("AT91: PM - no slow clock mode enabled ...\n");
- /* FALLTHROUGH leaving master clock alone */
- }
+ if (!at91_pm_verify_clocks())
+ goto error;
- /*
- * STANDBY mode has *all* drivers suspended; ignores irqs not
- * marked as 'wakeup' event sources; and reduces DRAM power.
- * But otherwise it's identical to PM_SUSPEND_ON: cpu idle, and
- * nothing fancy done with main or cpu clocks.
- */
- case PM_SUSPEND_STANDBY:
- /*
- * NOTE: the Wait-for-Interrupt instruction needs to be
- * in icache so no SDRAM accesses are needed until the
- * wakeup IRQ occurs and self-refresh is terminated.
- * For ARM 926 based chips, this requirement is weaker
- * as at91sam9 can access a RAM in self-refresh mode.
- */
- if (at91_pm_standby)
- at91_pm_standby();
- break;
+ at91_pm_suspend(state);
- case PM_SUSPEND_ON:
- cpu_do_idle();
- break;
+ break;
- default:
- pr_debug("AT91: PM - bogus suspend state %d\n", state);
- goto error;
+ /*
+ * STANDBY mode has *all* drivers suspended; ignores irqs not
+ * marked as 'wakeup' event sources; and reduces DRAM power.
+ * But otherwise it's identical to PM_SUSPEND_ON: cpu idle, and
+ * nothing fancy done with main or cpu clocks.
+ */
+ case PM_SUSPEND_STANDBY:
+ at91_pm_suspend(state);
+ break;
+
+ case PM_SUSPEND_ON:
+ cpu_do_idle();
+ break;
+
+ default:
+ pr_debug("AT91: PM - bogus suspend state %d\n", state);
+ goto error;
}
error:
@@ -218,12 +216,99 @@ static struct platform_device at91_cpuidle_device = {
.name = "cpuidle-at91",
};
-void at91_pm_set_standby(void (*at91_standby)(void))
+static void at91_pm_set_standby(void (*at91_standby)(void))
{
- if (at91_standby) {
+ if (at91_standby)
at91_cpuidle_device.dev.platform_data = at91_standby;
- at91_pm_standby = at91_standby;
+}
+
+/*
+ * The AT91RM9200 goes into self-refresh mode with this command, and will
+ * terminate self-refresh automatically on the next SDRAM access.
+ *
+ * Self-refresh mode is exited as soon as a memory access is made, but we don't
+ * know for sure when that happens. However, we need to restore the low-power
+ * mode if it was enabled before going idle. Restoring low-power mode while
+ * still in self-refresh is "not recommended", but seems to work.
+ */
+static void at91rm9200_standby(void)
+{
+ u32 lpr = at91_ramc_read(0, AT91RM9200_SDRAMC_LPR);
+
+ asm volatile(
+ "b 1f\n\t"
+ ".align 5\n\t"
+ "1: mcr p15, 0, %0, c7, c10, 4\n\t"
+ " str %0, [%1, %2]\n\t"
+ " str %3, [%1, %4]\n\t"
+ " mcr p15, 0, %0, c7, c0, 4\n\t"
+ " str %5, [%1, %2]"
+ :
+ : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
+ "r" (1), "r" (AT91RM9200_SDRAMC_SRR),
+ "r" (lpr));
+}
+
+/* We manage both DDRAM/SDRAM controllers, we need more than one value to
+ * remember.
+ */
+static void at91_ddr_standby(void)
+{
+ /* Those two values allow us to delay self-refresh activation
+ * to the maximum. */
+ u32 lpr0, lpr1 = 0;
+ u32 saved_lpr0, saved_lpr1 = 0;
+
+ if (at91_ramc_base[1]) {
+ saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
+ lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
+ lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+ }
+
+ saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
+ lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
+ lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+
+ /* self-refresh mode now */
+ at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
+
+ cpu_do_idle();
+
+ at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
+}
+
+/* We manage both DDRAM/SDRAM controllers, we need more than one value to
+ * remember.
+ */
+static void at91sam9_sdram_standby(void)
+{
+ u32 lpr0, lpr1 = 0;
+ u32 saved_lpr0, saved_lpr1 = 0;
+
+ if (at91_ramc_base[1]) {
+ saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
+ lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
+ lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
}
+
+ saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
+ lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
+ lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+
+ /* self-refresh mode now */
+ at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
+
+ cpu_do_idle();
+
+ at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
}
static const struct of_device_id ramc_ids[] __initconst = {
@@ -263,7 +348,6 @@ static __init void at91_dt_ramc(void)
at91_pm_set_standby(standby);
}
-#ifdef CONFIG_AT91_SLOW_CLOCK
static void __init at91_pm_sram_init(void)
{
struct gen_pool *sram_pool;
@@ -291,30 +375,36 @@ static void __init at91_pm_sram_init(void)
return;
}
- sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz);
+ sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
if (!sram_base) {
- pr_warn("%s: unable to alloc ocram!\n", __func__);
+ pr_warn("%s: unable to alloc sram!\n", __func__);
return;
}
sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
- slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false);
-}
-#endif
+ at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
+ at91_pm_suspend_in_sram_sz, false);
+ if (!at91_suspend_sram_fn) {
+ pr_warn("SRAM: Could not map\n");
+ return;
+ }
+ /* Copy the pm suspend handler to SRAM */
+ at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
+ &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
+}
static void __init at91_pm_init(void)
{
-#ifdef CONFIG_AT91_SLOW_CLOCK
at91_pm_sram_init();
-#endif
-
- pr_info("AT91: Power Management%s\n", (slow_clock ? " (with slow clock mode)" : ""));
if (at91_cpuidle_device.dev.platform_data)
platform_device_register(&at91_cpuidle_device);
- suspend_set_ops(&at91_pm_ops);
+ if (at91_suspend_sram_fn)
+ suspend_set_ops(&at91_pm_ops);
+ else
+ pr_info("AT91: PM not supported, due to no SRAM allocated\n");
}
void __init at91rm9200_pm_init(void)
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 86c0aa819d25..ecd875a91d52 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -15,100 +15,16 @@
#include <mach/at91_ramc.h>
-#ifdef CONFIG_PM
-extern void at91_pm_set_standby(void (*at91_standby)(void));
-#else
-static inline void at91_pm_set_standby(void (*at91_standby)(void)) { }
-#endif
-
-/*
- * The AT91RM9200 goes into self-refresh mode with this command, and will
- * terminate self-refresh automatically on the next SDRAM access.
- *
- * Self-refresh mode is exited as soon as a memory access is made, but we don't
- * know for sure when that happens. However, we need to restore the low-power
- * mode if it was enabled before going idle. Restoring low-power mode while
- * still in self-refresh is "not recommended", but seems to work.
- */
-
-static inline void at91rm9200_standby(void)
-{
- u32 lpr = at91_ramc_read(0, AT91RM9200_SDRAMC_LPR);
-
- asm volatile(
- "b 1f\n\t"
- ".align 5\n\t"
- "1: mcr p15, 0, %0, c7, c10, 4\n\t"
- " str %0, [%1, %2]\n\t"
- " str %3, [%1, %4]\n\t"
- " mcr p15, 0, %0, c7, c0, 4\n\t"
- " str %5, [%1, %2]"
- :
- : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
- "r" (1), "r" (AT91RM9200_SDRAMC_SRR),
- "r" (lpr));
-}
-
-/* We manage both DDRAM/SDRAM controllers, we need more than one value to
- * remember.
- */
-static inline void at91_ddr_standby(void)
-{
- /* Those two values allow us to delay self-refresh activation
- * to the maximum. */
- u32 lpr0, lpr1 = 0;
- u32 saved_lpr0, saved_lpr1 = 0;
-
- if (at91_ramc_base[1]) {
- saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
- lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
- lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
- }
-
- saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
- lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
- lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
-
- /* self-refresh mode now */
- at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
- if (at91_ramc_base[1])
- at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
-
- cpu_do_idle();
-
- at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
- if (at91_ramc_base[1])
- at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
-}
-
-/* We manage both DDRAM/SDRAM controllers, we need more than one value to
- * remember.
- */
-static inline void at91sam9_sdram_standby(void)
-{
- u32 lpr0, lpr1 = 0;
- u32 saved_lpr0, saved_lpr1 = 0;
-
- if (at91_ramc_base[1]) {
- saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
- lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
- lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
- }
-
- saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
- lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
- lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+#define AT91_MEMCTRL_MC 0
+#define AT91_MEMCTRL_SDRAMC 1
+#define AT91_MEMCTRL_DDRSDR 2
- /* self-refresh mode now */
- at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
- if (at91_ramc_base[1])
- at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
+#define AT91_PM_MEMTYPE_MASK 0x0f
- cpu_do_idle();
+#define AT91_PM_MODE_OFFSET 4
+#define AT91_PM_MODE_MASK 0x01
+#define AT91_PM_MODE(x) (((x) & AT91_PM_MODE_MASK) << AT91_PM_MODE_OFFSET)
- at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
- if (at91_ramc_base[1])
- at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
-}
+#define AT91_PM_SLOW_CLOCK 0x01
#endif
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S
deleted file mode 100644
index 931f0e302c03..000000000000
--- a/arch/arm/mach-at91/pm_slowclock.S
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * arch/arm/mach-at91/pm_slow_clock.S
- *
- * Copyright (C) 2006 Savin Zlobec
- *
- * AT91SAM9 support:
- * Copyright (C) 2007 Anti Sullin <anti.sullin@artecdesign.ee
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/linkage.h>
-#include <linux/clk/at91_pmc.h>
-#include <mach/hardware.h>
-#include <mach/at91_ramc.h>
-
-/*
- * When SLOWDOWN_MASTER_CLOCK is defined we will also slow down the Master
- * clock during suspend by adjusting its prescalar and divisor.
- * NOTE: This hasn't been shown to be stable on SAM9s; and on the RM9200 there
- * are errata regarding adjusting the prescalar and divisor.
- */
-#undef SLOWDOWN_MASTER_CLOCK
-
-pmc .req r0
-sdramc .req r1
-ramc1 .req r2
-memctrl .req r3
-tmp1 .req r4
-tmp2 .req r5
-
-/*
- * Wait until master clock is ready (after switching master clock source)
- */
- .macro wait_mckrdy
-1: ldr tmp1, [pmc, #AT91_PMC_SR]
- tst tmp1, #AT91_PMC_MCKRDY
- beq 1b
- .endm
-
-/*
- * Wait until master oscillator has stabilized.
- */
- .macro wait_moscrdy
-1: ldr tmp1, [pmc, #AT91_PMC_SR]
- tst tmp1, #AT91_PMC_MOSCS
- beq 1b
- .endm
-
-/*
- * Wait until PLLA has locked.
- */
- .macro wait_pllalock
-1: ldr tmp1, [pmc, #AT91_PMC_SR]
- tst tmp1, #AT91_PMC_LOCKA
- beq 1b
- .endm
-
-/*
- * Wait until PLLB has locked.
- */
- .macro wait_pllblock
-1: ldr tmp1, [pmc, #AT91_PMC_SR]
- tst tmp1, #AT91_PMC_LOCKB
- beq 1b
- .endm
-
- .text
-
- .arm
-
-/* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc,
- * void __iomem *ramc1, int memctrl)
- */
-ENTRY(at91_slow_clock)
- /* Save registers on stack */
- stmfd sp!, {r4 - r12, lr}
-
- /*
- * Register usage:
- * R0 = Base address of AT91_PMC
- * R1 = Base address of RAM Controller (SDRAM, DDRSDR, or AT91_SYS)
- * R2 = Base address of second RAM Controller or 0 if not present
- * R3 = Memory controller
- * R4 = temporary register
- * R5 = temporary register
- */
-
- /* Drain write buffer */
- mov tmp1, #0
- mcr p15, 0, tmp1, c7, c10, 4
-
- cmp memctrl, #AT91_MEMCTRL_MC
- bne ddr_sr_enable
-
- /*
- * at91rm9200 Memory controller
- */
- /* Put SDRAM in self-refresh mode */
- mov tmp1, #1
- str tmp1, [sdramc, #AT91RM9200_SDRAMC_SRR]
- b sdr_sr_done
-
- /*
- * DDRSDR Memory controller
- */
-ddr_sr_enable:
- cmp memctrl, #AT91_MEMCTRL_DDRSDR
- bne sdr_sr_enable
-
- /* LPDDR1 --> force DDR2 mode during self-refresh */
- ldr tmp1, [sdramc, #AT91_DDRSDRC_MDR]
- str tmp1, .saved_sam9_mdr
- bic tmp1, tmp1, #~AT91_DDRSDRC_MD
- cmp tmp1, #AT91_DDRSDRC_MD_LOW_POWER_DDR
- ldreq tmp1, [sdramc, #AT91_DDRSDRC_MDR]
- biceq tmp1, tmp1, #AT91_DDRSDRC_MD
- orreq tmp1, tmp1, #AT91_DDRSDRC_MD_DDR2
- streq tmp1, [sdramc, #AT91_DDRSDRC_MDR]
-
- /* prepare for DDRAM self-refresh mode */
- ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR]
- str tmp1, .saved_sam9_lpr
- bic tmp1, #AT91_DDRSDRC_LPCB
- orr tmp1, #AT91_DDRSDRC_LPCB_SELF_REFRESH
-
- /* figure out if we use the second ram controller */
- cmp ramc1, #0
- beq ddr_no_2nd_ctrl
-
- ldr tmp2, [ramc1, #AT91_DDRSDRC_MDR]
- str tmp2, .saved_sam9_mdr1
- bic tmp2, tmp2, #~AT91_DDRSDRC_MD
- cmp tmp2, #AT91_DDRSDRC_MD_LOW_POWER_DDR
- ldreq tmp2, [ramc1, #AT91_DDRSDRC_MDR]
- biceq tmp2, tmp2, #AT91_DDRSDRC_MD
- orreq tmp2, tmp2, #AT91_DDRSDRC_MD_DDR2
- streq tmp2, [ramc1, #AT91_DDRSDRC_MDR]
-
- ldr tmp2, [ramc1, #AT91_DDRSDRC_LPR]
- str tmp2, .saved_sam9_lpr1
- bic tmp2, #AT91_DDRSDRC_LPCB
- orr tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH
-
- /* Enable DDRAM self-refresh mode */
- str tmp2, [ramc1, #AT91_DDRSDRC_LPR]
-ddr_no_2nd_ctrl:
- str tmp1, [sdramc, #AT91_DDRSDRC_LPR]
-
- b sdr_sr_done
-
- /*
- * SDRAMC Memory controller
- */
-sdr_sr_enable:
- /* Enable SDRAM self-refresh mode */
- ldr tmp1, [sdramc, #AT91_SDRAMC_LPR]
- str tmp1, .saved_sam9_lpr
-
- bic tmp1, #AT91_SDRAMC_LPCB
- orr tmp1, #AT91_SDRAMC_LPCB_SELF_REFRESH
- str tmp1, [sdramc, #AT91_SDRAMC_LPR]
-
-sdr_sr_done:
- /* Save Master clock setting */
- ldr tmp1, [pmc, #AT91_PMC_MCKR]
- str tmp1, .saved_mckr
-
- /*
- * Set the Master clock source to slow clock
- */
- bic tmp1, tmp1, #AT91_PMC_CSS
- str tmp1, [pmc, #AT91_PMC_MCKR]
-
- wait_mckrdy
-
-#ifdef SLOWDOWN_MASTER_CLOCK
- /*
- * Set the Master Clock PRES and MDIV fields.
- *
- * See AT91RM9200 errata #27 and #28 for details.
- */
- mov tmp1, #0
- str tmp1, [pmc, #AT91_PMC_MCKR]
-
- wait_mckrdy
-#endif
-
- /* Save PLLA setting and disable it */
- ldr tmp1, [pmc, #AT91_CKGR_PLLAR]
- str tmp1, .saved_pllar
-
- mov tmp1, #AT91_PMC_PLLCOUNT
- orr tmp1, tmp1, #(1 << 29) /* bit 29 always set */
- str tmp1, [pmc, #AT91_CKGR_PLLAR]
-
- /* Save PLLB setting and disable it */
- ldr tmp1, [pmc, #AT91_CKGR_PLLBR]
- str tmp1, .saved_pllbr
-
- mov tmp1, #AT91_PMC_PLLCOUNT
- str tmp1, [pmc, #AT91_CKGR_PLLBR]
-
- /* Turn off the main oscillator */
- ldr tmp1, [pmc, #AT91_CKGR_MOR]
- bic tmp1, tmp1, #AT91_PMC_MOSCEN
- orr tmp1, tmp1, #AT91_PMC_KEY
- str tmp1, [pmc, #AT91_CKGR_MOR]
-
- /* Wait for interrupt */
- mcr p15, 0, tmp1, c7, c0, 4
-
- /* Turn on the main oscillator */
- ldr tmp1, [pmc, #AT91_CKGR_MOR]
- orr tmp1, tmp1, #AT91_PMC_MOSCEN
- orr tmp1, tmp1, #AT91_PMC_KEY
- str tmp1, [pmc, #AT91_CKGR_MOR]
-
- wait_moscrdy
-
- /* Restore PLLB setting */
- ldr tmp1, .saved_pllbr
- str tmp1, [pmc, #AT91_CKGR_PLLBR]
-
- tst tmp1, #(AT91_PMC_MUL & 0xff0000)
- bne 1f
- tst tmp1, #(AT91_PMC_MUL & ~0xff0000)
- beq 2f
-1:
- wait_pllblock
-2:
-
- /* Restore PLLA setting */
- ldr tmp1, .saved_pllar
- str tmp1, [pmc, #AT91_CKGR_PLLAR]
-
- tst tmp1, #(AT91_PMC_MUL & 0xff0000)
- bne 3f
- tst tmp1, #(AT91_PMC_MUL & ~0xff0000)
- beq 4f
-3:
- wait_pllalock
-4:
-
-#ifdef SLOWDOWN_MASTER_CLOCK
- /*
- * First set PRES if it was not 0,
- * than set CSS and MDIV fields.
- *
- * See AT91RM9200 errata #27 and #28 for details.
- */
- ldr tmp1, .saved_mckr
- tst tmp1, #AT91_PMC_PRES
- beq 2f
- and tmp1, tmp1, #AT91_PMC_PRES
- str tmp1, [pmc, #AT91_PMC_MCKR]
-
- wait_mckrdy
-#endif
-
- /*
- * Restore master clock setting
- */
-2: ldr tmp1, .saved_mckr
- str tmp1, [pmc, #AT91_PMC_MCKR]
-
- wait_mckrdy
-
- /*
- * at91rm9200 Memory controller
- * Do nothing - self-refresh is automatically disabled.
- */
- cmp memctrl, #AT91_MEMCTRL_MC
- beq ram_restored
-
- /*
- * DDRSDR Memory controller
- */
- cmp memctrl, #AT91_MEMCTRL_DDRSDR
- bne sdr_en_restore
- /* Restore MDR in case of LPDDR1 */
- ldr tmp1, .saved_sam9_mdr
- str tmp1, [sdramc, #AT91_DDRSDRC_MDR]
- /* Restore LPR on AT91 with DDRAM */
- ldr tmp1, .saved_sam9_lpr
- str tmp1, [sdramc, #AT91_DDRSDRC_LPR]
-
- /* if we use the second ram controller */
- cmp ramc1, #0
- ldrne tmp2, .saved_sam9_mdr1
- strne tmp2, [ramc1, #AT91_DDRSDRC_MDR]
- ldrne tmp2, .saved_sam9_lpr1
- strne tmp2, [ramc1, #AT91_DDRSDRC_LPR]
-
- b ram_restored
-
- /*
- * SDRAMC Memory controller
- */
-sdr_en_restore:
- /* Restore LPR on AT91 with SDRAM */
- ldr tmp1, .saved_sam9_lpr
- str tmp1, [sdramc, #AT91_SDRAMC_LPR]
-
-ram_restored:
- /* Restore registers, and return */
- ldmfd sp!, {r4 - r12, pc}
-
-
-.saved_mckr:
- .word 0
-
-.saved_pllar:
- .word 0
-
-.saved_pllbr:
- .word 0
-
-.saved_sam9_lpr:
- .word 0
-
-.saved_sam9_lpr1:
- .word 0
-
-.saved_sam9_mdr:
- .word 0
-
-.saved_sam9_mdr1:
- .word 0
-
-ENTRY(at91_slow_clock_sz)
- .word .-at91_slow_clock
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
new file mode 100644
index 000000000000..bd22b2c8a051
--- /dev/null
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -0,0 +1,337 @@
+/*
+ * arch/arm/mach-at91/pm_slow_clock.S
+ *
+ * Copyright (C) 2006 Savin Zlobec
+ *
+ * AT91SAM9 support:
+ * Copyright (C) 2007 Anti Sullin <anti.sullin@artecdesign.ee
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/clk/at91_pmc.h>
+#include <mach/at91_ramc.h>
+#include "pm.h"
+
+#define SRAMC_SELF_FRESH_ACTIVE 0x01
+#define SRAMC_SELF_FRESH_EXIT 0x00
+
+pmc .req r0
+tmp1 .req r4
+tmp2 .req r5
+
+/*
+ * Wait until master clock is ready (after switching master clock source)
+ */
+ .macro wait_mckrdy
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
+ tst tmp1, #AT91_PMC_MCKRDY
+ beq 1b
+ .endm
+
+/*
+ * Wait until master oscillator has stabilized.
+ */
+ .macro wait_moscrdy
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
+ tst tmp1, #AT91_PMC_MOSCS
+ beq 1b
+ .endm
+
+/*
+ * Wait until PLLA has locked.
+ */
+ .macro wait_pllalock
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
+ tst tmp1, #AT91_PMC_LOCKA
+ beq 1b
+ .endm
+
+/*
+ * Put the processor to enter the idle state
+ */
+ .macro at91_cpu_idle
+
+#if defined(CONFIG_CPU_V7)
+ mov tmp1, #AT91_PMC_PCK
+ str tmp1, [pmc, #AT91_PMC_SCDR]
+
+ dsb
+
+ wfi @ Wait For Interrupt
+#else
+ mcr p15, 0, tmp1, c7, c0, 4
+#endif
+
+ .endm
+
+ .text
+
+ .arm
+
+/*
+ * void at91_pm_suspend_in_sram(void __iomem *pmc, void __iomem *sdramc,
+ * void __iomem *ramc1, int memctrl)
+ * @input param:
+ * @r0: base address of AT91_PMC
+ * @r1: base address of SDRAM Controller (SDRAM, DDRSDR, or AT91_SYS)
+ * @r2: base address of second SDRAM Controller or 0 if not present
+ * @r3: pm information
+ */
+ENTRY(at91_pm_suspend_in_sram)
+ /* Save registers on stack */
+ stmfd sp!, {r4 - r12, lr}
+
+ /* Drain write buffer */
+ mov tmp1, #0
+ mcr p15, 0, tmp1, c7, c10, 4
+
+ str r0, .pmc_base
+ str r1, .sramc_base
+ str r2, .sramc1_base
+
+ and r0, r3, #AT91_PM_MEMTYPE_MASK
+ str r0, .memtype
+
+ lsr r0, r3, #AT91_PM_MODE_OFFSET
+ and r0, r0, #AT91_PM_MODE_MASK
+ str r0, .pm_mode
+
+ /* Active the self-refresh mode */
+ mov r0, #SRAMC_SELF_FRESH_ACTIVE
+ bl at91_sramc_self_refresh
+
+ ldr r0, .pm_mode
+ tst r0, #AT91_PM_SLOW_CLOCK
+ beq skip_disable_main_clock
+
+ ldr pmc, .pmc_base
+
+ /* Save Master clock setting */
+ ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ str tmp1, .saved_mckr
+
+ /*
+ * Set the Master clock source to slow clock
+ */
+ bic tmp1, tmp1, #AT91_PMC_CSS
+ str tmp1, [pmc, #AT91_PMC_MCKR]
+
+ wait_mckrdy
+
+ /* Save PLLA setting and disable it */
+ ldr tmp1, [pmc, #AT91_CKGR_PLLAR]
+ str tmp1, .saved_pllar
+
+ mov tmp1, #AT91_PMC_PLLCOUNT
+ orr tmp1, tmp1, #(1 << 29) /* bit 29 always set */
+ str tmp1, [pmc, #AT91_CKGR_PLLAR]
+
+ /* Turn off the main oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ bic tmp1, tmp1, #AT91_PMC_MOSCEN
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+skip_disable_main_clock:
+ ldr pmc, .pmc_base
+
+ /* Wait for interrupt */
+ at91_cpu_idle
+
+ ldr r0, .pm_mode
+ tst r0, #AT91_PM_SLOW_CLOCK
+ beq skip_enable_main_clock
+
+ ldr pmc, .pmc_base
+
+ /* Turn on the main oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ orr tmp1, tmp1, #AT91_PMC_MOSCEN
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ wait_moscrdy
+
+ /* Restore PLLA setting */
+ ldr tmp1, .saved_pllar
+ str tmp1, [pmc, #AT91_CKGR_PLLAR]
+
+ tst tmp1, #(AT91_PMC_MUL & 0xff0000)
+ bne 3f
+ tst tmp1, #(AT91_PMC_MUL & ~0xff0000)
+ beq 4f
+3:
+ wait_pllalock
+4:
+
+ /*
+ * Restore master clock setting
+ */
+ ldr tmp1, .saved_mckr
+ str tmp1, [pmc, #AT91_PMC_MCKR]
+
+ wait_mckrdy
+
+skip_enable_main_clock:
+ /* Exit the self-refresh mode */
+ mov r0, #SRAMC_SELF_FRESH_EXIT
+ bl at91_sramc_self_refresh
+
+ /* Restore registers, and return */
+ ldmfd sp!, {r4 - r12, pc}
+ENDPROC(at91_pm_suspend_in_sram)
+
+/*
+ * void at91_sramc_self_refresh(unsigned int is_active)
+ *
+ * @input param:
+ * @r0: 1 - active self-refresh mode
+ * 0 - exit self-refresh mode
+ * register usage:
+ * @r1: memory type
+ * @r2: base address of the sram controller
+ */
+
+ENTRY(at91_sramc_self_refresh)
+ ldr r1, .memtype
+ ldr r2, .sramc_base
+
+ cmp r1, #AT91_MEMCTRL_MC
+ bne ddrc_sf
+
+ /*
+ * at91rm9200 Memory controller
+ */
+
+ /*
+ * For exiting the self-refresh mode, do nothing,
+ * automatically exit the self-refresh mode.
+ */
+ tst r0, #SRAMC_SELF_FRESH_ACTIVE
+ beq exit_sramc_sf
+
+ /* Active SDRAM self-refresh mode */
+ mov r3, #1
+ str r3, [r2, #AT91RM9200_SDRAMC_SRR]
+ b exit_sramc_sf
+
+ddrc_sf:
+ cmp r1, #AT91_MEMCTRL_DDRSDR
+ bne sdramc_sf
+
+ /*
+ * DDR Memory controller
+ */
+ tst r0, #SRAMC_SELF_FRESH_ACTIVE
+ beq ddrc_exit_sf
+
+ /* LPDDR1 --> force DDR2 mode during self-refresh */
+ ldr r3, [r2, #AT91_DDRSDRC_MDR]
+ str r3, .saved_sam9_mdr
+ bic r3, r3, #~AT91_DDRSDRC_MD
+ cmp r3, #AT91_DDRSDRC_MD_LOW_POWER_DDR
+ ldreq r3, [r2, #AT91_DDRSDRC_MDR]
+ biceq r3, r3, #AT91_DDRSDRC_MD
+ orreq r3, r3, #AT91_DDRSDRC_MD_DDR2
+ streq r3, [r2, #AT91_DDRSDRC_MDR]
+
+ /* Active DDRC self-refresh mode */
+ ldr r3, [r2, #AT91_DDRSDRC_LPR]
+ str r3, .saved_sam9_lpr
+ bic r3, r3, #AT91_DDRSDRC_LPCB
+ orr r3, r3, #AT91_DDRSDRC_LPCB_SELF_REFRESH
+ str r3, [r2, #AT91_DDRSDRC_LPR]
+
+ /* If using the 2nd ddr controller */
+ ldr r2, .sramc1_base
+ cmp r2, #0
+ beq no_2nd_ddrc
+
+ ldr r3, [r2, #AT91_DDRSDRC_MDR]
+ str r3, .saved_sam9_mdr1
+ bic r3, r3, #~AT91_DDRSDRC_MD
+ cmp r3, #AT91_DDRSDRC_MD_LOW_POWER_DDR
+ ldreq r3, [r2, #AT91_DDRSDRC_MDR]
+ biceq r3, r3, #AT91_DDRSDRC_MD
+ orreq r3, r3, #AT91_DDRSDRC_MD_DDR2
+ streq r3, [r2, #AT91_DDRSDRC_MDR]
+
+ /* Active DDRC self-refresh mode */
+ ldr r3, [r2, #AT91_DDRSDRC_LPR]
+ str r3, .saved_sam9_lpr1
+ bic r3, r3, #AT91_DDRSDRC_LPCB
+ orr r3, r3, #AT91_DDRSDRC_LPCB_SELF_REFRESH
+ str r3, [r2, #AT91_DDRSDRC_LPR]
+
+no_2nd_ddrc:
+ b exit_sramc_sf
+
+ddrc_exit_sf:
+ /* Restore MDR in case of LPDDR1 */
+ ldr r3, .saved_sam9_mdr
+ str r3, [r2, #AT91_DDRSDRC_MDR]
+ /* Restore LPR on AT91 with DDRAM */
+ ldr r3, .saved_sam9_lpr
+ str r3, [r2, #AT91_DDRSDRC_LPR]
+
+ /* If using the 2nd ddr controller */
+ ldr r2, .sramc1_base
+ cmp r2, #0
+ ldrne r3, .saved_sam9_mdr1
+ strne r3, [r2, #AT91_DDRSDRC_MDR]
+ ldrne r3, .saved_sam9_lpr1
+ strne r3, [r2, #AT91_DDRSDRC_LPR]
+
+ b exit_sramc_sf
+
+ /*
+ * SDRAMC Memory controller
+ */
+sdramc_sf:
+ tst r0, #SRAMC_SELF_FRESH_ACTIVE
+ beq sdramc_exit_sf
+
+ /* Active SDRAMC self-refresh mode */
+ ldr r3, [r2, #AT91_SDRAMC_LPR]
+ str r3, .saved_sam9_lpr
+ bic r3, r3, #AT91_SDRAMC_LPCB
+ orr r3, r3, #AT91_SDRAMC_LPCB_SELF_REFRESH
+ str r3, [r2, #AT91_SDRAMC_LPR]
+
+sdramc_exit_sf:
+ ldr r3, .saved_sam9_lpr
+ str r3, [r2, #AT91_SDRAMC_LPR]
+
+exit_sramc_sf:
+ mov pc, lr
+ENDPROC(at91_sramc_self_refresh)
+
+.pmc_base:
+ .word 0
+.sramc_base:
+ .word 0
+.sramc1_base:
+ .word 0
+.memtype:
+ .word 0
+.pm_mode:
+ .word 0
+.saved_mckr:
+ .word 0
+.saved_pllar:
+ .word 0
+.saved_sam9_lpr:
+ .word 0
+.saved_sam9_lpr1:
+ .word 0
+.saved_sam9_mdr:
+ .word 0
+.saved_sam9_mdr1:
+ .word 0
+
+ENTRY(at91_pm_suspend_in_sram_sz)
+ .word .-at91_pm_suspend_in_sram
diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c
index 03dcb441f3d2..41d829d8e7d5 100644
--- a/arch/arm/mach-at91/sama5.c
+++ b/arch/arm/mach-at91/sama5.c
@@ -7,48 +7,48 @@
* Licensed under GPLv2 or later.
*/
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gpio.h>
-#include <linux/micrel_phy.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/phy.h>
-#include <linux/clk-provider.h>
-#include <linux/phy.h>
-#include <mach/hardware.h>
-
-#include <asm/setup.h>
-#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
+#include <asm/system_misc.h>
#include "generic.h"
+#include "soc.h"
-static int ksz8081_phy_fixup(struct phy_device *phy)
-{
- int value;
-
- value = phy_read(phy, 0x16);
- value &= ~0x20;
- phy_write(phy, 0x16, value);
-
- return 0;
-}
+static const struct at91_soc sama5_socs[] = {
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH,
+ "sama5d31", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH,
+ "sama5d33", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D34_EXID_MATCH,
+ "sama5d34", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D35_EXID_MATCH,
+ "sama5d35", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D36_EXID_MATCH,
+ "sama5d36", "sama5d3"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D41_EXID_MATCH,
+ "sama5d41", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D42_EXID_MATCH,
+ "sama5d42", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D43_EXID_MATCH,
+ "sama5d43", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D44_EXID_MATCH,
+ "sama5d44", "sama5d4"),
+ { /* sentinel */ },
+};
static void __init sama5_dt_device_init(void)
{
- if (of_machine_is_compatible("atmel,sama5d4ek") &&
- IS_ENABLED(CONFIG_PHYLIB)) {
- phy_register_fixup_for_id("fc028000.etherne:00",
- ksz8081_phy_fixup);
- }
+ struct soc_device *soc;
+ struct device *soc_dev = NULL;
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ soc = at91_soc_init(sama5_socs);
+ if (soc != NULL)
+ soc_dev = soc_device_to_device(soc);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
at91sam9x5_pm_init();
}
@@ -59,44 +59,10 @@ static const char *sama5_dt_board_compat[] __initconst = {
DT_MACHINE_START(sama5_dt, "Atmel SAMA5")
/* Maintainer: Atmel */
- .map_io = at91_map_io,
.init_machine = sama5_dt_device_init,
.dt_compat = sama5_dt_board_compat,
MACHINE_END
-static struct map_desc at91_io_desc[] __initdata = {
- {
- .virtual = (unsigned long)AT91_ALT_IO_P2V(SAMA5D4_BASE_MPDDRC),
- .pfn = __phys_to_pfn(SAMA5D4_BASE_MPDDRC),
- .length = SZ_512,
- .type = MT_DEVICE,
- },
- {
- .virtual = (unsigned long)AT91_ALT_IO_P2V(SAMA5D4_BASE_PMC),
- .pfn = __phys_to_pfn(SAMA5D4_BASE_PMC),
- .length = SZ_512,
- .type = MT_DEVICE,
- },
- { /* On sama5d4, we use USART3 as serial console */
- .virtual = (unsigned long)AT91_ALT_IO_P2V(SAMA5D4_BASE_USART3),
- .pfn = __phys_to_pfn(SAMA5D4_BASE_USART3),
- .length = SZ_256,
- .type = MT_DEVICE,
- },
- { /* A bunch of peripheral with fine grained IO space */
- .virtual = (unsigned long)AT91_ALT_IO_P2V(SAMA5D4_BASE_SYS2),
- .pfn = __phys_to_pfn(SAMA5D4_BASE_SYS2),
- .length = SZ_2K,
- .type = MT_DEVICE,
- },
-};
-
-static void __init sama5_alt_map_io(void)
-{
- at91_alt_map_io();
- iotable_init(at91_io_desc, ARRAY_SIZE(at91_io_desc));
-}
-
static const char *sama5_alt_dt_board_compat[] __initconst = {
"atmel,sama5d4",
NULL
@@ -104,7 +70,6 @@ static const char *sama5_alt_dt_board_compat[] __initconst = {
DT_MACHINE_START(sama5_alt_dt, "Atmel SAMA5")
/* Maintainer: Atmel */
- .map_io = sama5_alt_map_io,
.init_machine = sama5_dt_device_init,
.dt_compat = sama5_alt_dt_board_compat,
.l2c_aux_mask = ~0UL,
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
deleted file mode 100644
index 4e58bc90ed21..000000000000
--- a/arch/arm/mach-at91/setup.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Copyright (C) 2007 Atmel Corporation.
- * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
- *
- * Under GPLv2
- */
-
-#define pr_fmt(fmt) "AT91: " fmt
-
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/pm.h>
-#include <linux/of_address.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/clk/at91_pmc.h>
-
-#include <asm/system_misc.h>
-#include <asm/mach/map.h>
-
-#include <mach/hardware.h>
-#include <mach/cpu.h>
-#include <mach/at91_dbgu.h>
-
-#include "generic.h"
-#include "pm.h"
-
-struct at91_socinfo at91_soc_initdata;
-EXPORT_SYMBOL(at91_soc_initdata);
-
-static struct map_desc at91_io_desc __initdata __maybe_unused = {
- .virtual = (unsigned long)AT91_VA_BASE_SYS,
- .pfn = __phys_to_pfn(AT91_BASE_SYS),
- .length = SZ_16K,
- .type = MT_DEVICE,
-};
-
-static struct map_desc at91_alt_io_desc __initdata __maybe_unused = {
- .virtual = (unsigned long)AT91_ALT_VA_BASE_SYS,
- .pfn = __phys_to_pfn(AT91_ALT_BASE_SYS),
- .length = 24 * SZ_1K,
- .type = MT_DEVICE,
-};
-
-static void __init soc_detect(u32 dbgu_base)
-{
- u32 cidr, socid;
-
- cidr = __raw_readl(AT91_IO_P2V(dbgu_base) + AT91_DBGU_CIDR);
- socid = cidr & ~AT91_CIDR_VERSION;
-
- switch (socid) {
- case ARCH_ID_AT91RM9200:
- at91_soc_initdata.type = AT91_SOC_RM9200;
- if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_UNKNOWN)
- at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
- break;
-
- case ARCH_ID_AT91SAM9260:
- at91_soc_initdata.type = AT91_SOC_SAM9260;
- at91_soc_initdata.subtype = AT91_SOC_SUBTYPE_NONE;
- break;
-
- case ARCH_ID_AT91SAM9261:
- at91_soc_initdata.type = AT91_SOC_SAM9261;
- at91_soc_initdata.subtype = AT91_SOC_SUBTYPE_NONE;
- break;
-
- case ARCH_ID_AT91SAM9263:
- at91_soc_initdata.type = AT91_SOC_SAM9263;
- at91_soc_initdata.subtype = AT91_SOC_SUBTYPE_NONE;
- break;
-
- case ARCH_ID_AT91SAM9G20:
- at91_soc_initdata.type = AT91_SOC_SAM9G20;
- at91_soc_initdata.subtype = AT91_SOC_SUBTYPE_NONE;
- break;
-
- case ARCH_ID_AT91SAM9G45:
- at91_soc_initdata.type = AT91_SOC_SAM9G45;
- if (cidr == ARCH_ID_AT91SAM9G45ES)
- at91_soc_initdata.subtype = AT91_SOC_SAM9G45ES;
- break;
-
- case ARCH_ID_AT91SAM9RL64:
- at91_soc_initdata.type = AT91_SOC_SAM9RL;
- at91_soc_initdata.subtype = AT91_SOC_SUBTYPE_NONE;
- break;
-
- case ARCH_ID_AT91SAM9X5:
- at91_soc_initdata.type = AT91_SOC_SAM9X5;
- break;
-
- case ARCH_ID_AT91SAM9N12:
- at91_soc_initdata.type = AT91_SOC_SAM9N12;
- break;
-
- case ARCH_ID_SAMA5:
- at91_soc_initdata.exid = __raw_readl(AT91_IO_P2V(dbgu_base) + AT91_DBGU_EXID);
- if (at91_soc_initdata.exid & ARCH_EXID_SAMA5D3) {
- at91_soc_initdata.type = AT91_SOC_SAMA5D3;
- }
- break;
- }
-
- /* at91sam9g10 */
- if ((socid & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
- at91_soc_initdata.type = AT91_SOC_SAM9G10;
- at91_soc_initdata.subtype = AT91_SOC_SUBTYPE_NONE;
- }
- /* at91sam9xe */
- else if ((cidr & AT91_CIDR_ARCH) == ARCH_FAMILY_AT91SAM9XE) {
- at91_soc_initdata.type = AT91_SOC_SAM9260;
- at91_soc_initdata.subtype = AT91_SOC_SAM9XE;
- }
-
- if (!at91_soc_is_detected())
- return;
-
- at91_soc_initdata.cidr = cidr;
-
- /* sub version of soc */
- if (!at91_soc_initdata.exid)
- at91_soc_initdata.exid = __raw_readl(AT91_IO_P2V(dbgu_base) + AT91_DBGU_EXID);
-
- if (at91_soc_initdata.type == AT91_SOC_SAM9G45) {
- switch (at91_soc_initdata.exid) {
- case ARCH_EXID_AT91SAM9M10:
- at91_soc_initdata.subtype = AT91_SOC_SAM9M10;
- break;
- case ARCH_EXID_AT91SAM9G46:
- at91_soc_initdata.subtype = AT91_SOC_SAM9G46;
- break;
- case ARCH_EXID_AT91SAM9M11:
- at91_soc_initdata.subtype = AT91_SOC_SAM9M11;
- break;
- }
- }
-
- if (at91_soc_initdata.type == AT91_SOC_SAM9X5) {
- switch (at91_soc_initdata.exid) {
- case ARCH_EXID_AT91SAM9G15:
- at91_soc_initdata.subtype = AT91_SOC_SAM9G15;
- break;
- case ARCH_EXID_AT91SAM9G35:
- at91_soc_initdata.subtype = AT91_SOC_SAM9G35;
- break;
- case ARCH_EXID_AT91SAM9X35:
- at91_soc_initdata.subtype = AT91_SOC_SAM9X35;
- break;
- case ARCH_EXID_AT91SAM9G25:
- at91_soc_initdata.subtype = AT91_SOC_SAM9G25;
- break;
- case ARCH_EXID_AT91SAM9X25:
- at91_soc_initdata.subtype = AT91_SOC_SAM9X25;
- break;
- }
- }
-
- if (at91_soc_initdata.type == AT91_SOC_SAMA5D3) {
- switch (at91_soc_initdata.exid) {
- case ARCH_EXID_SAMA5D31:
- at91_soc_initdata.subtype = AT91_SOC_SAMA5D31;
- break;
- case ARCH_EXID_SAMA5D33:
- at91_soc_initdata.subtype = AT91_SOC_SAMA5D33;
- break;
- case ARCH_EXID_SAMA5D34:
- at91_soc_initdata.subtype = AT91_SOC_SAMA5D34;
- break;
- case ARCH_EXID_SAMA5D35:
- at91_soc_initdata.subtype = AT91_SOC_SAMA5D35;
- break;
- case ARCH_EXID_SAMA5D36:
- at91_soc_initdata.subtype = AT91_SOC_SAMA5D36;
- break;
- }
- }
-}
-
-static void __init alt_soc_detect(u32 dbgu_base)
-{
- u32 cidr, socid;
-
- /* SoC ID */
- cidr = __raw_readl(AT91_ALT_IO_P2V(dbgu_base) + AT91_DBGU_CIDR);
- socid = cidr & ~AT91_CIDR_VERSION;
-
- switch (socid) {
- case ARCH_ID_SAMA5:
- at91_soc_initdata.exid = __raw_readl(AT91_ALT_IO_P2V(dbgu_base) + AT91_DBGU_EXID);
- if (at91_soc_initdata.exid & ARCH_EXID_SAMA5D3) {
- at91_soc_initdata.type = AT91_SOC_SAMA5D3;
- } else if (at91_soc_initdata.exid & ARCH_EXID_SAMA5D4) {
- at91_soc_initdata.type = AT91_SOC_SAMA5D4;
- }
- break;
- }
-
- if (!at91_soc_is_detected())
- return;
-
- at91_soc_initdata.cidr = cidr;
-
- /* sub version of soc */
- if (!at91_soc_initdata.exid)
- at91_soc_initdata.exid = __raw_readl(AT91_ALT_IO_P2V(dbgu_base) + AT91_DBGU_EXID);
-
- if (at91_soc_initdata.type == AT91_SOC_SAMA5D4) {
- switch (at91_soc_initdata.exid) {
- case ARCH_EXID_SAMA5D41:
- at91_soc_initdata.subtype = AT91_SOC_SAMA5D41;
- break;
- case ARCH_EXID_SAMA5D42:
- at91_soc_initdata.subtype = AT91_SOC_SAMA5D42;
- break;
- case ARCH_EXID_SAMA5D43:
- at91_soc_initdata.subtype = AT91_SOC_SAMA5D43;
- break;
- case ARCH_EXID_SAMA5D44:
- at91_soc_initdata.subtype = AT91_SOC_SAMA5D44;
- break;
- }
- }
-}
-
-static const char *soc_name[] = {
- [AT91_SOC_RM9200] = "at91rm9200",
- [AT91_SOC_SAM9260] = "at91sam9260",
- [AT91_SOC_SAM9261] = "at91sam9261",
- [AT91_SOC_SAM9263] = "at91sam9263",
- [AT91_SOC_SAM9G10] = "at91sam9g10",
- [AT91_SOC_SAM9G20] = "at91sam9g20",
- [AT91_SOC_SAM9G45] = "at91sam9g45",
- [AT91_SOC_SAM9RL] = "at91sam9rl",
- [AT91_SOC_SAM9X5] = "at91sam9x5",
- [AT91_SOC_SAM9N12] = "at91sam9n12",
- [AT91_SOC_SAMA5D3] = "sama5d3",
- [AT91_SOC_SAMA5D4] = "sama5d4",
- [AT91_SOC_UNKNOWN] = "Unknown",
-};
-
-const char *at91_get_soc_type(struct at91_socinfo *c)
-{
- return soc_name[c->type];
-}
-EXPORT_SYMBOL(at91_get_soc_type);
-
-static const char *soc_subtype_name[] = {
- [AT91_SOC_RM9200_BGA] = "at91rm9200 BGA",
- [AT91_SOC_RM9200_PQFP] = "at91rm9200 PQFP",
- [AT91_SOC_SAM9XE] = "at91sam9xe",
- [AT91_SOC_SAM9G45ES] = "at91sam9g45es",
- [AT91_SOC_SAM9M10] = "at91sam9m10",
- [AT91_SOC_SAM9G46] = "at91sam9g46",
- [AT91_SOC_SAM9M11] = "at91sam9m11",
- [AT91_SOC_SAM9G15] = "at91sam9g15",
- [AT91_SOC_SAM9G35] = "at91sam9g35",
- [AT91_SOC_SAM9X35] = "at91sam9x35",
- [AT91_SOC_SAM9G25] = "at91sam9g25",
- [AT91_SOC_SAM9X25] = "at91sam9x25",
- [AT91_SOC_SAMA5D31] = "sama5d31",
- [AT91_SOC_SAMA5D33] = "sama5d33",
- [AT91_SOC_SAMA5D34] = "sama5d34",
- [AT91_SOC_SAMA5D35] = "sama5d35",
- [AT91_SOC_SAMA5D36] = "sama5d36",
- [AT91_SOC_SAMA5D41] = "sama5d41",
- [AT91_SOC_SAMA5D42] = "sama5d42",
- [AT91_SOC_SAMA5D43] = "sama5d43",
- [AT91_SOC_SAMA5D44] = "sama5d44",
- [AT91_SOC_SUBTYPE_NONE] = "None",
- [AT91_SOC_SUBTYPE_UNKNOWN] = "Unknown",
-};
-
-const char *at91_get_soc_subtype(struct at91_socinfo *c)
-{
- return soc_subtype_name[c->subtype];
-}
-EXPORT_SYMBOL(at91_get_soc_subtype);
-
-void __init at91_map_io(void)
-{
- /* Map peripherals */
- iotable_init(&at91_io_desc, 1);
-
- at91_soc_initdata.type = AT91_SOC_UNKNOWN;
- at91_soc_initdata.subtype = AT91_SOC_SUBTYPE_UNKNOWN;
-
- soc_detect(AT91_BASE_DBGU0);
- if (!at91_soc_is_detected())
- soc_detect(AT91_BASE_DBGU1);
-
- if (!at91_soc_is_detected())
- panic(pr_fmt("Impossible to detect the SOC type"));
-
- pr_info("Detected soc type: %s\n",
- at91_get_soc_type(&at91_soc_initdata));
- if (at91_soc_initdata.subtype != AT91_SOC_SUBTYPE_NONE)
- pr_info("Detected soc subtype: %s\n",
- at91_get_soc_subtype(&at91_soc_initdata));
-}
-
-void __init at91_alt_map_io(void)
-{
- /* Map peripherals */
- iotable_init(&at91_alt_io_desc, 1);
-
- at91_soc_initdata.type = AT91_SOC_UNKNOWN;
- at91_soc_initdata.subtype = AT91_SOC_SUBTYPE_UNKNOWN;
-
- alt_soc_detect(AT91_BASE_DBGU2);
- if (!at91_soc_is_detected())
- panic("AT91: Impossible to detect the SOC type");
-
- pr_info("AT91: Detected soc type: %s\n",
- at91_get_soc_type(&at91_soc_initdata));
- if (at91_soc_initdata.subtype != AT91_SOC_SUBTYPE_NONE)
- pr_info("AT91: Detected soc subtype: %s\n",
- at91_get_soc_subtype(&at91_soc_initdata));
-}
-
-void __iomem *at91_matrix_base;
-EXPORT_SYMBOL_GPL(at91_matrix_base);
-
-void __init at91_ioremap_matrix(u32 base_addr)
-{
- at91_matrix_base = ioremap(base_addr, 512);
- if (!at91_matrix_base)
- panic(pr_fmt("Impossible to ioremap at91_matrix_base\n"));
-}
diff --git a/arch/arm/mach-at91/soc.c b/arch/arm/mach-at91/soc.c
new file mode 100644
index 000000000000..54343ffa3e53
--- /dev/null
+++ b/arch/arm/mach-at91/soc.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015 Atmel
+ *
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com
+ * Boris Brezillon <boris.brezillon@free-electrons.com
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#define pr_fmt(fmt) "AT91: " fmt
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include "soc.h"
+
+#define AT91_DBGU_CIDR 0x40
+#define AT91_DBGU_CIDR_VERSION(x) ((x) & 0x1f)
+#define AT91_DBGU_CIDR_EXT BIT(31)
+#define AT91_DBGU_CIDR_MATCH_MASK 0x7fffffe0
+#define AT91_DBGU_EXID 0x44
+
+struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const struct at91_soc *soc;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ void __iomem *regs;
+ u32 cidr, exid;
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-dbgu");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "atmel,at91sam9260-dbgu");
+
+ if (!np) {
+ pr_warn("Could not find DBGU node");
+ return NULL;
+ }
+
+ regs = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!regs) {
+ pr_warn("Could not map DBGU iomem range");
+ return NULL;
+ }
+
+ cidr = readl(regs + AT91_DBGU_CIDR);
+ exid = readl(regs + AT91_DBGU_EXID);
+
+ iounmap(regs);
+
+ for (soc = socs; soc->name; soc++) {
+ if (soc->cidr_match != (cidr & AT91_DBGU_CIDR_MATCH_MASK))
+ continue;
+
+ if (!(cidr & AT91_DBGU_CIDR_EXT) || soc->exid_match == exid)
+ break;
+ }
+
+ if (!soc->name) {
+ pr_warn("Could not find matching SoC description\n");
+ return NULL;
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return NULL;
+
+ soc_dev_attr->family = soc->family;
+ soc_dev_attr->soc_id = soc->name;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X",
+ AT91_DBGU_CIDR_VERSION(cidr));
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr);
+ pr_warn("Could not register SoC device\n");
+ return NULL;
+ }
+
+ if (soc->family)
+ pr_info("Detected SoC family: %s\n", soc->family);
+ pr_info("Detected SoC: %s, revision %X\n", soc->name,
+ AT91_DBGU_CIDR_VERSION(cidr));
+
+ return soc_dev;
+}
diff --git a/arch/arm/mach-at91/soc.h b/arch/arm/mach-at91/soc.h
new file mode 100644
index 000000000000..be23c400596b
--- /dev/null
+++ b/arch/arm/mach-at91/soc.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2015 Atmel
+ *
+ * Boris Brezillon <boris.brezillon@free-electrons.com
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#ifndef __AT91_SOC_H
+#define __AT91_SOC_H
+
+#include <linux/sys_soc.h>
+
+struct at91_soc {
+ u32 cidr_match;
+ u32 exid_match;
+ const char *name;
+ const char *family;
+};
+
+#define AT91_SOC(__cidr, __exid, __name, __family) \
+ { \
+ .cidr_match = (__cidr), \
+ .exid_match = (__exid), \
+ .name = (__name), \
+ .family = (__family), \
+ }
+
+struct soc_device * __init
+at91_soc_init(const struct at91_soc *socs);
+
+#define AT91RM9200_CIDR_MATCH 0x09290780
+
+#define AT91SAM9260_CIDR_MATCH 0x019803a0
+#define AT91SAM9261_CIDR_MATCH 0x019703a0
+#define AT91SAM9263_CIDR_MATCH 0x019607a0
+#define AT91SAM9G20_CIDR_MATCH 0x019905a0
+#define AT91SAM9RL64_CIDR_MATCH 0x019b03a0
+#define AT91SAM9G45_CIDR_MATCH 0x019b05a0
+#define AT91SAM9X5_CIDR_MATCH 0x019a05a0
+#define AT91SAM9N12_CIDR_MATCH 0x019a07a0
+
+#define AT91SAM9M11_EXID_MATCH 0x00000001
+#define AT91SAM9M10_EXID_MATCH 0x00000002
+#define AT91SAM9G46_EXID_MATCH 0x00000003
+#define AT91SAM9G45_EXID_MATCH 0x00000004
+
+#define AT91SAM9G15_EXID_MATCH 0x00000000
+#define AT91SAM9G35_EXID_MATCH 0x00000001
+#define AT91SAM9X35_EXID_MATCH 0x00000002
+#define AT91SAM9G25_EXID_MATCH 0x00000003
+#define AT91SAM9X25_EXID_MATCH 0x00000004
+
+#define AT91SAM9CN12_EXID_MATCH 0x00000005
+#define AT91SAM9N12_EXID_MATCH 0x00000006
+#define AT91SAM9CN11_EXID_MATCH 0x00000009
+
+#define AT91SAM9XE128_CIDR_MATCH 0x329973a0
+#define AT91SAM9XE256_CIDR_MATCH 0x329a93a0
+#define AT91SAM9XE512_CIDR_MATCH 0x329aa3a0
+
+#define SAMA5D3_CIDR_MATCH 0x0a5c07c0
+#define SAMA5D31_EXID_MATCH 0x00444300
+#define SAMA5D33_EXID_MATCH 0x00414300
+#define SAMA5D34_EXID_MATCH 0x00414301
+#define SAMA5D35_EXID_MATCH 0x00584300
+#define SAMA5D36_EXID_MATCH 0x00004301
+
+#define SAMA5D4_CIDR_MATCH 0x0a5c07c0
+#define SAMA5D41_EXID_MATCH 0x00000001
+#define SAMA5D42_EXID_MATCH 0x00000002
+#define SAMA5D43_EXID_MATCH 0x00000003
+#define SAMA5D44_EXID_MATCH 0x00000004
+
+#endif /* __AT91_SOC_H */
diff --git a/arch/arm/mach-bcm/bcm_cygnus.c b/arch/arm/mach-bcm/bcm_cygnus.c
index 30dc58be51b8..7ae894c7849b 100644
--- a/arch/arm/mach-bcm/bcm_cygnus.c
+++ b/arch/arm/mach-bcm/bcm_cygnus.c
@@ -13,7 +13,7 @@
#include <asm/mach/arch.h>
-static const char const *bcm_cygnus_dt_compat[] = {
+static const char * const bcm_cygnus_dt_compat[] __initconst = {
"brcm,cygnus",
NULL,
};
diff --git a/arch/arm/mach-cns3xxx/pm.c b/arch/arm/mach-cns3xxx/pm.c
index fb38c726e987..f46b78dd6136 100644
--- a/arch/arm/mach-cns3xxx/pm.c
+++ b/arch/arm/mach-cns3xxx/pm.c
@@ -73,7 +73,6 @@ static void cns3xxx_pwr_soft_rst_force(unsigned int block)
__raw_writel(reg, PM_SOFT_RST_REG);
}
-EXPORT_SYMBOL(cns3xxx_pwr_soft_rst_force);
void cns3xxx_pwr_soft_rst(unsigned int block)
{
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index cd30f6f5f2ff..dd8f5312b2c0 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -200,17 +200,6 @@ config DA850_UI_SD_VIDEO_PORT
endchoice
-config DA850_WL12XX
- bool "AM18x wl1271 daughter board"
- depends on MACH_DAVINCI_DA850_EVM
- help
- The wl1271 daughter card for AM18x EVMs is a combo wireless
- connectivity add-on card, based on the LS Research TiWi module with
- Texas Instruments' wl1271 solution.
- Say Y if you want to use a wl1271 expansion card connected to the
- AM18x EVM.
-
-
config MACH_MITYOMAPL138
bool "Critical Link MityDSP-L138/MityARM-1808 SoM"
depends on ARCH_DAVINCI_DA850
diff --git a/arch/arm/mach-davinci/asp.h b/arch/arm/mach-davinci/asp.h
index d9b2acd12393..1128e1d8e4b4 100644
--- a/arch/arm/mach-davinci/asp.h
+++ b/arch/arm/mach-davinci/asp.h
@@ -21,6 +21,9 @@
/* Bases of da830 McASP1 register banks */
#define DAVINCI_DA830_MCASP1_REG_BASE 0x01D04000
+/* Bases of da830 McASP2 register banks */
+#define DAVINCI_DA830_MCASP2_REG_BASE 0x01D08000
+
/* EDMA channels of dm644x and dm355 */
#define DAVINCI_DMA_ASP0_TX 2
#define DAVINCI_DMA_ASP0_RX 3
@@ -40,6 +43,10 @@
#define DAVINCI_DA830_DMA_MCASP1_AREVT 2
#define DAVINCI_DA830_DMA_MCASP1_AXEVT 3
+/* EDMA channels of da830 McASP2 */
+#define DAVINCI_DA830_DMA_MCASP2_AREVT 4
+#define DAVINCI_DA830_DMA_MCASP2_AXEVT 5
+
/* Interrupts */
#define DAVINCI_ASP0_RX_INT IRQ_MBRINT
#define DAVINCI_ASP0_TX_INT IRQ_MBXINT
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 6b5a97da9fe3..1ed545cc2b83 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -38,7 +38,6 @@
#include <linux/regulator/fixed.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
-#include <linux/wl12xx.h>
#include <mach/common.h>
#include <mach/cp_intc.h>
@@ -60,9 +59,6 @@
#define DA850_MMCSD_CD_PIN GPIO_TO_PIN(4, 0)
#define DA850_MMCSD_WP_PIN GPIO_TO_PIN(4, 1)
-#define DA850_WLAN_EN GPIO_TO_PIN(6, 9)
-#define DA850_WLAN_IRQ GPIO_TO_PIN(6, 10)
-
#define DA850_MII_MDIO_CLKEN_PIN GPIO_TO_PIN(2, 6)
static struct mtd_partition da850evm_spiflash_part[] = {
@@ -1343,109 +1339,6 @@ static __init void da850_vpif_init(void)
static __init void da850_vpif_init(void) {}
#endif
-#ifdef CONFIG_DA850_WL12XX
-
-static void wl12xx_set_power(int index, bool power_on)
-{
- static bool power_state;
-
- pr_debug("Powering %s wl12xx", power_on ? "on" : "off");
-
- if (power_on == power_state)
- return;
- power_state = power_on;
-
- if (power_on) {
- /* Power up sequence required for wl127x devices */
- gpio_set_value(DA850_WLAN_EN, 1);
- usleep_range(15000, 15000);
- gpio_set_value(DA850_WLAN_EN, 0);
- usleep_range(1000, 1000);
- gpio_set_value(DA850_WLAN_EN, 1);
- msleep(70);
- } else {
- gpio_set_value(DA850_WLAN_EN, 0);
- }
-}
-
-static struct davinci_mmc_config da850_wl12xx_mmc_config = {
- .set_power = wl12xx_set_power,
- .wires = 4,
- .max_freq = 25000000,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NONREMOVABLE |
- MMC_CAP_POWER_OFF_CARD,
-};
-
-static const short da850_wl12xx_pins[] __initconst = {
- DA850_MMCSD1_DAT_0, DA850_MMCSD1_DAT_1, DA850_MMCSD1_DAT_2,
- DA850_MMCSD1_DAT_3, DA850_MMCSD1_CLK, DA850_MMCSD1_CMD,
- DA850_GPIO6_9, DA850_GPIO6_10,
- -1
-};
-
-static struct wl12xx_platform_data da850_wl12xx_wlan_data __initdata = {
- .irq = -1,
- .board_ref_clock = WL12XX_REFCLOCK_38,
- .platform_quirks = WL12XX_PLATFORM_QUIRK_EDGE_IRQ,
-};
-
-static __init int da850_wl12xx_init(void)
-{
- int ret;
-
- ret = davinci_cfg_reg_list(da850_wl12xx_pins);
- if (ret) {
- pr_err("wl12xx/mmc mux setup failed: %d\n", ret);
- goto exit;
- }
-
- ret = da850_register_mmcsd1(&da850_wl12xx_mmc_config);
- if (ret) {
- pr_err("wl12xx/mmc registration failed: %d\n", ret);
- goto exit;
- }
-
- ret = gpio_request_one(DA850_WLAN_EN, GPIOF_OUT_INIT_LOW, "wl12xx_en");
- if (ret) {
- pr_err("Could not request wl12xx enable gpio: %d\n", ret);
- goto exit;
- }
-
- ret = gpio_request_one(DA850_WLAN_IRQ, GPIOF_IN, "wl12xx_irq");
- if (ret) {
- pr_err("Could not request wl12xx irq gpio: %d\n", ret);
- goto free_wlan_en;
- }
-
- da850_wl12xx_wlan_data.irq = gpio_to_irq(DA850_WLAN_IRQ);
-
- ret = wl12xx_set_platform_data(&da850_wl12xx_wlan_data);
- if (ret) {
- pr_err("Could not set wl12xx data: %d\n", ret);
- goto free_wlan_irq;
- }
-
- return 0;
-
-free_wlan_irq:
- gpio_free(DA850_WLAN_IRQ);
-
-free_wlan_en:
- gpio_free(DA850_WLAN_EN);
-
-exit:
- return ret;
-}
-
-#else /* CONFIG_DA850_WL12XX */
-
-static __init int da850_wl12xx_init(void)
-{
- return 0;
-}
-
-#endif /* CONFIG_DA850_WL12XX */
-
#define DA850EVM_SATA_REFCLKPN_RATE (100 * 1000 * 1000)
static __init void da850_evm_init(void)
@@ -1502,11 +1395,6 @@ static __init void da850_evm_init(void)
if (ret)
pr_warn("%s: MMCSD0 registration failed: %d\n",
__func__, ret);
-
- ret = da850_wl12xx_init();
- if (ret)
- pr_warn("%s: WL12xx initialization failed: %d\n",
- __func__, ret);
}
davinci_serial_init(da8xx_serial_device);
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index e365c1bb1265..306ebc51599a 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -17,7 +17,6 @@
#include <linux/cpuidle.h>
#include <linux/io.h>
#include <linux/export.h>
-#include <asm/proc-fns.h>
#include <asm/cpuidle.h>
#include <mach/cpuidle.h>
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index b85b781b05fd..ddfdd820e6f2 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -463,16 +463,23 @@ static struct resource da830_mcasp1_resources[] = {
},
/* TX event */
{
+ .name = "tx",
.start = DAVINCI_DA830_DMA_MCASP1_AXEVT,
.end = DAVINCI_DA830_DMA_MCASP1_AXEVT,
.flags = IORESOURCE_DMA,
},
/* RX event */
{
+ .name = "rx",
.start = DAVINCI_DA830_DMA_MCASP1_AREVT,
.end = DAVINCI_DA830_DMA_MCASP1_AREVT,
.flags = IORESOURCE_DMA,
},
+ {
+ .name = "common",
+ .start = IRQ_DA8XX_MCASPINT,
+ .flags = IORESOURCE_IRQ,
+ },
};
static struct platform_device da830_mcasp1_device = {
@@ -482,6 +489,41 @@ static struct platform_device da830_mcasp1_device = {
.resource = da830_mcasp1_resources,
};
+static struct resource da830_mcasp2_resources[] = {
+ {
+ .name = "mpu",
+ .start = DAVINCI_DA830_MCASP2_REG_BASE,
+ .end = DAVINCI_DA830_MCASP2_REG_BASE + (SZ_1K * 12) - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /* TX event */
+ {
+ .name = "tx",
+ .start = DAVINCI_DA830_DMA_MCASP2_AXEVT,
+ .end = DAVINCI_DA830_DMA_MCASP2_AXEVT,
+ .flags = IORESOURCE_DMA,
+ },
+ /* RX event */
+ {
+ .name = "rx",
+ .start = DAVINCI_DA830_DMA_MCASP2_AREVT,
+ .end = DAVINCI_DA830_DMA_MCASP2_AREVT,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "common",
+ .start = IRQ_DA8XX_MCASPINT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device da830_mcasp2_device = {
+ .name = "davinci-mcasp",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(da830_mcasp2_resources),
+ .resource = da830_mcasp2_resources,
+};
+
static struct resource da850_mcasp_resources[] = {
{
.name = "mpu",
@@ -491,16 +533,23 @@ static struct resource da850_mcasp_resources[] = {
},
/* TX event */
{
+ .name = "tx",
.start = DAVINCI_DA8XX_DMA_MCASP0_AXEVT,
.end = DAVINCI_DA8XX_DMA_MCASP0_AXEVT,
.flags = IORESOURCE_DMA,
},
/* RX event */
{
+ .name = "rx",
.start = DAVINCI_DA8XX_DMA_MCASP0_AREVT,
.end = DAVINCI_DA8XX_DMA_MCASP0_AREVT,
.flags = IORESOURCE_DMA,
},
+ {
+ .name = "common",
+ .start = IRQ_DA8XX_MCASPINT,
+ .flags = IORESOURCE_IRQ,
+ },
};
static struct platform_device da850_mcasp_device = {
@@ -512,14 +561,31 @@ static struct platform_device da850_mcasp_device = {
void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
{
- /* DA830/OMAP-L137 has 3 instances of McASP */
- if (cpu_is_davinci_da830() && id == 1) {
- da830_mcasp1_device.dev.platform_data = pdata;
- platform_device_register(&da830_mcasp1_device);
- } else if (cpu_is_davinci_da850()) {
- da850_mcasp_device.dev.platform_data = pdata;
- platform_device_register(&da850_mcasp_device);
+ struct platform_device *pdev;
+
+ switch (id) {
+ case 0:
+ /* Valid for DA830/OMAP-L137 or DA850/OMAP-L138 */
+ pdev = &da850_mcasp_device;
+ break;
+ case 1:
+ /* Valid for DA830/OMAP-L137 only */
+ if (!cpu_is_davinci_da830())
+ return;
+ pdev = &da830_mcasp1_device;
+ break;
+ case 2:
+ /* Valid for DA830/OMAP-L137 only */
+ if (!cpu_is_davinci_da830())
+ return;
+ pdev = &da830_mcasp2_device;
+ break;
+ default:
+ return;
}
+
+ pdev->dev.platform_data = pdata;
+ platform_device_register(pdev);
}
static struct resource da8xx_pruss_resources[] = {
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 6c3bbea7d77d..3f842bb266d6 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -493,7 +493,6 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
[IRQ_DM646X_EMACMISCINT] = 7,
[IRQ_DM646X_MCASP0TXINT] = 7,
[IRQ_DM646X_MCASP0RXINT] = 7,
- [IRQ_AEMIFINT] = 7,
[IRQ_DM646X_RESERVED_3] = 7,
[IRQ_DM646X_MCASP1TXINT] = 7, /* clockevent */
[IRQ_TINT0_TINT34] = 7, /* clocksource */
@@ -610,19 +609,31 @@ static struct resource dm646x_mcasp0_resources[] = {
.end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1,
.flags = IORESOURCE_MEM,
},
- /* first TX, then RX */
{
+ .name = "tx",
.start = DAVINCI_DM646X_DMA_MCASP0_AXEVT0,
.end = DAVINCI_DM646X_DMA_MCASP0_AXEVT0,
.flags = IORESOURCE_DMA,
},
{
+ .name = "rx",
.start = DAVINCI_DM646X_DMA_MCASP0_AREVT0,
.end = DAVINCI_DM646X_DMA_MCASP0_AREVT0,
.flags = IORESOURCE_DMA,
},
+ {
+ .name = "tx",
+ .start = IRQ_DM646X_MCASP0TXINT,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "rx",
+ .start = IRQ_DM646X_MCASP0RXINT,
+ .flags = IORESOURCE_IRQ,
+ },
};
+/* DIT mode only, rx is not supported */
static struct resource dm646x_mcasp1_resources[] = {
{
.name = "mpu",
@@ -630,17 +641,16 @@ static struct resource dm646x_mcasp1_resources[] = {
.end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1,
.flags = IORESOURCE_MEM,
},
- /* DIT mode, only TX event */
{
+ .name = "tx",
.start = DAVINCI_DM646X_DMA_MCASP1_AXEVT1,
.end = DAVINCI_DM646X_DMA_MCASP1_AXEVT1,
.flags = IORESOURCE_DMA,
},
- /* DIT mode, dummy entry */
{
- .start = -1,
- .end = -1,
- .flags = IORESOURCE_DMA,
+ .name = "tx",
+ .start = IRQ_DM646X_MCASP1TXINT,
+ .flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-davinci/include/mach/irqs.h b/arch/arm/mach-davinci/include/mach/irqs.h
index 354af71798dc..edb2ca62321a 100644
--- a/arch/arm/mach-davinci/include/mach/irqs.h
+++ b/arch/arm/mach-davinci/include/mach/irqs.h
@@ -129,8 +129,8 @@
#define IRQ_DM646X_EMACMISCINT 27
#define IRQ_DM646X_MCASP0TXINT 28
#define IRQ_DM646X_MCASP0RXINT 29
+#define IRQ_DM646X_MCASP1TXINT 30
#define IRQ_DM646X_RESERVED_3 31
-#define IRQ_DM646X_MCASP1TXINT 32
#define IRQ_DM646X_VLQINT 38
#define IRQ_DM646X_UARTINT2 42
#define IRQ_DM646X_SPINT0 43
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 603820e5aba7..81064cd61a0a 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -123,7 +123,7 @@ config SOC_EXYNOS5800
config EXYNOS5420_MCPM
bool "Exynos5420 Multi-Cluster PM support"
depends on MCPM && SOC_EXYNOS5420
- select ARM_CCI
+ select ARM_CCI400_PORT_CTRL
select ARM_CPU_SUSPEND
help
This is needed to provide CPU and cluster power management
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index f70eca7ee705..5f5cd562c593 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -126,6 +126,12 @@ enum {
void exynos_firmware_init(void);
+/* CPU BOOT mode flag for Exynos3250 SoC bootloader */
+#define C2_STATE (1 << 3)
+
+void exynos_set_boot_flag(unsigned int cpu, unsigned int mode);
+void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode);
+
extern u32 exynos_get_eint_wake_mask(void);
#ifdef CONFIG_PM_SLEEP
@@ -153,6 +159,8 @@ extern void exynos_enter_aftr(void);
extern struct cpuidle_exynos_data cpuidle_coupled_exynos_data;
+extern void exynos_set_delayed_reset_assertion(bool enable);
+
extern void s5p_init_cpu(void __iomem *cpuid_addr);
extern unsigned int samsung_rev(void);
extern void __iomem *cpu_boot_reg_base(void);
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index f44c2e05c82e..5917a30eee33 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -167,6 +167,33 @@ static void __init exynos_init_io(void)
}
/*
+ * Set or clear the USE_DELAYED_RESET_ASSERTION option. Used by smp code
+ * and suspend.
+ *
+ * This is necessary only on Exynos4 SoCs. When system is running
+ * USE_DELAYED_RESET_ASSERTION should be set so the ARM CLK clock down
+ * feature could properly detect global idle state when secondary CPU is
+ * powered down.
+ *
+ * However this should not be set when such system is going into suspend.
+ */
+void exynos_set_delayed_reset_assertion(bool enable)
+{
+ if (of_machine_is_compatible("samsung,exynos4")) {
+ unsigned int tmp, core_id;
+
+ for (core_id = 0; core_id < num_possible_cpus(); core_id++) {
+ tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
+ if (enable)
+ tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
+ else
+ tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
+ pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
+ }
+ }
+}
+
+/*
* Apparently, these SoCs are not able to wake-up from suspend using
* the PMU. Too bad. Should they suddenly become capable of such a
* feat, the matches below should be moved to suspend.c.
@@ -206,7 +233,7 @@ static void __init exynos_dt_machine_init(void)
if (!IS_ENABLED(CONFIG_SMP))
exynos_sysram_init();
-#ifdef CONFIG_ARM_EXYNOS_CPUIDLE
+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_EXYNOS_CPUIDLE)
if (of_machine_is_compatible("samsung,exynos4210"))
exynos_cpuidle.dev.platform_data = &cpuidle_coupled_exynos_data;
#endif
@@ -214,6 +241,7 @@ static void __init exynos_dt_machine_init(void)
of_machine_is_compatible("samsung,exynos4212") ||
(of_machine_is_compatible("samsung,exynos4412") &&
of_machine_is_compatible("samsung,trats2")) ||
+ of_machine_is_compatible("samsung,exynos3250") ||
of_machine_is_compatible("samsung,exynos5250"))
platform_device_register(&exynos_cpuidle);
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 4791a3cc00f9..1bd35763f12e 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -48,7 +48,13 @@ static int exynos_do_idle(unsigned long mode)
__raw_writel(virt_to_phys(exynos_cpu_resume_ns),
sysram_ns_base_addr + 0x24);
__raw_writel(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
- exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0);
+ if (soc_is_exynos3250()) {
+ exynos_smc(SMC_CMD_SAVE, OP_TYPE_CORE,
+ SMC_POWERSTATE_IDLE, 0);
+ exynos_smc(SMC_CMD_SHUTDOWN, OP_TYPE_CLUSTER,
+ SMC_POWERSTATE_IDLE, 0);
+ } else
+ exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0);
break;
case FW_DO_IDLE_SLEEP:
exynos_smc(SMC_CMD_SLEEP, 0, 0, 0);
@@ -206,3 +212,28 @@ void __init exynos_firmware_init(void)
outer_cache.configure = exynos_l2_configure;
}
}
+
+#define REG_CPU_STATE_ADDR (sysram_ns_base_addr + 0x28)
+#define BOOT_MODE_MASK 0x1f
+
+void exynos_set_boot_flag(unsigned int cpu, unsigned int mode)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
+
+ if (mode & BOOT_MODE_MASK)
+ tmp &= ~BOOT_MODE_MASK;
+
+ tmp |= mode;
+ __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
+}
+
+void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
+ tmp &= ~mode;
+ __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
+}
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index b0d3c2e876fb..9bdf54795f05 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -61,25 +61,7 @@ static void __iomem *ns_sram_base_addr;
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
"r9", "r10", "lr", "memory")
-/*
- * We can't use regular spinlocks. In the switcher case, it is possible
- * for an outbound CPU to call power_down() after its inbound counterpart
- * is already live using the same logical CPU number which trips lockdep
- * debugging.
- */
-static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-static int
-cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
-
-#define exynos_cluster_usecnt(cluster) \
- (cpu_use_count[0][cluster] + \
- cpu_use_count[1][cluster] + \
- cpu_use_count[2][cluster] + \
- cpu_use_count[3][cluster])
-
-#define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster)
-
-static int exynos_power_up(unsigned int cpu, unsigned int cluster)
+static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
{
unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
@@ -88,127 +70,65 @@ static int exynos_power_up(unsigned int cpu, unsigned int cluster)
cluster >= EXYNOS5420_NR_CLUSTERS)
return -EINVAL;
- /*
- * Since this is called with IRQs enabled, and no arch_spin_lock_irq
- * variant exists, we need to disable IRQs manually here.
- */
- local_irq_disable();
- arch_spin_lock(&exynos_mcpm_lock);
-
- cpu_use_count[cpu][cluster]++;
- if (cpu_use_count[cpu][cluster] == 1) {
- bool was_cluster_down =
- (exynos_cluster_usecnt(cluster) == 1);
-
- /*
- * Turn on the cluster (L2/COMMON) and then power on the
- * cores.
- */
- if (was_cluster_down)
- exynos_cluster_power_up(cluster);
-
- exynos_cpu_power_up(cpunr);
- } else if (cpu_use_count[cpu][cluster] != 2) {
- /*
- * The only possible values are:
- * 0 = CPU down
- * 1 = CPU (still) up
- * 2 = CPU requested to be up before it had a chance
- * to actually make itself down.
- * Any other value is a bug.
- */
- BUG();
- }
+ exynos_cpu_power_up(cpunr);
+ return 0;
+}
- arch_spin_unlock(&exynos_mcpm_lock);
- local_irq_enable();
+static int exynos_cluster_powerup(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ if (cluster >= EXYNOS5420_NR_CLUSTERS)
+ return -EINVAL;
+ exynos_cluster_power_up(cluster);
return 0;
}
-/*
- * NOTE: This function requires the stack data to be visible through power down
- * and can only be executed on processors like A15 and A7 that hit the cache
- * with the C bit clear in the SCTLR register.
- */
-static void exynos_power_down(void)
+static void exynos_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
{
- unsigned int mpidr, cpu, cluster;
- bool last_man = false, skip_wfi = false;
- unsigned int cpunr;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
+ unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
cluster >= EXYNOS5420_NR_CLUSTERS);
+ exynos_cpu_power_down(cpunr);
+}
- __mcpm_cpu_going_down(cpu, cluster);
-
- arch_spin_lock(&exynos_mcpm_lock);
- BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
- cpu_use_count[cpu][cluster]--;
- if (cpu_use_count[cpu][cluster] == 0) {
- exynos_cpu_power_down(cpunr);
-
- if (exynos_cluster_unused(cluster)) {
- exynos_cluster_power_down(cluster);
- last_man = true;
- }
- } else if (cpu_use_count[cpu][cluster] == 1) {
- /*
- * A power_up request went ahead of us.
- * Even if we do not want to shut this CPU down,
- * the caller expects a certain state as if the WFI
- * was aborted. So let's continue with cache cleaning.
- */
- skip_wfi = true;
- } else {
- BUG();
- }
-
- if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
- arch_spin_unlock(&exynos_mcpm_lock);
-
- if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
- /*
- * On the Cortex-A15 we need to disable
- * L2 prefetching before flushing the cache.
- */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3\n\t"
- "isb\n\t"
- "dsb"
- : : "r" (0x400));
- }
+static void exynos_cluster_powerdown_prepare(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ BUG_ON(cluster >= EXYNOS5420_NR_CLUSTERS);
+ exynos_cluster_power_down(cluster);
+}
- /* Flush all cache levels for this cluster. */
- exynos_v7_exit_coherency_flush(all);
+static void exynos_cpu_cache_disable(void)
+{
+ /* Disable and flush the local CPU cache. */
+ exynos_v7_exit_coherency_flush(louis);
+}
+static void exynos_cluster_cache_disable(void)
+{
+ if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
/*
- * Disable cluster-level coherency by masking
- * incoming snoops and DVM messages:
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
*/
- cci_disable_port_by_cpu(mpidr);
-
- __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
- } else {
- arch_spin_unlock(&exynos_mcpm_lock);
-
- /* Disable and flush the local CPU cache. */
- exynos_v7_exit_coherency_flush(louis);
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3\n\t"
+ "isb\n\t"
+ "dsb"
+ : : "r" (0x400));
}
- __mcpm_cpu_down(cpu, cluster);
-
- /* Now we are prepared for power-down, do it: */
- if (!skip_wfi)
- wfi();
+ /* Flush all cache levels for this cluster. */
+ exynos_v7_exit_coherency_flush(all);
- /* Not dead at this point? Let our caller cope. */
+ /*
+ * Disable cluster-level coherency by masking
+ * incoming snoops and DVM messages:
+ */
+ cci_disable_port_by_cpu(read_cpuid_mpidr());
}
static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
@@ -222,10 +142,8 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
/* Wait for the core state to be OFF */
while (tries--) {
- if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
- if ((exynos_cpu_power_state(cpunr) == 0))
- return 0; /* success: the CPU is halted */
- }
+ if ((exynos_cpu_power_state(cpunr) == 0))
+ return 0; /* success: the CPU is halted */
/* Otherwise, wait and retry: */
msleep(1);
@@ -234,63 +152,23 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
return -ETIMEDOUT; /* timeout */
}
-static void exynos_powered_up(void)
-{
- unsigned int mpidr, cpu, cluster;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
- arch_spin_lock(&exynos_mcpm_lock);
- if (cpu_use_count[cpu][cluster] == 0)
- cpu_use_count[cpu][cluster] = 1;
- arch_spin_unlock(&exynos_mcpm_lock);
-}
-
-static void exynos_suspend(u64 residency)
+static void exynos_cpu_is_up(unsigned int cpu, unsigned int cluster)
{
- unsigned int mpidr, cpunr;
-
- exynos_power_down();
-
- /*
- * Execution reaches here only if cpu did not power down.
- * Hence roll back the changes done in exynos_power_down function.
- *
- * CAUTION: "This function requires the stack data to be visible through
- * power down and can only be executed on processors like A15 and A7
- * that hit the cache with the C bit clear in the SCTLR register."
- */
- mpidr = read_cpuid_mpidr();
- cpunr = exynos_pmu_cpunr(mpidr);
-
- exynos_cpu_power_up(cpunr);
+ /* especially when resuming: make sure power control is set */
+ exynos_cpu_powerup(cpu, cluster);
}
static const struct mcpm_platform_ops exynos_power_ops = {
- .power_up = exynos_power_up,
- .power_down = exynos_power_down,
+ .cpu_powerup = exynos_cpu_powerup,
+ .cluster_powerup = exynos_cluster_powerup,
+ .cpu_powerdown_prepare = exynos_cpu_powerdown_prepare,
+ .cluster_powerdown_prepare = exynos_cluster_powerdown_prepare,
+ .cpu_cache_disable = exynos_cpu_cache_disable,
+ .cluster_cache_disable = exynos_cluster_cache_disable,
.wait_for_powerdown = exynos_wait_for_powerdown,
- .suspend = exynos_suspend,
- .powered_up = exynos_powered_up,
+ .cpu_is_up = exynos_cpu_is_up,
};
-static void __init exynos_mcpm_usage_count_init(void)
-{
- unsigned int mpidr, cpu, cluster;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
- cluster >= EXYNOS5420_NR_CLUSTERS);
-
- cpu_use_count[cpu][cluster] = 1;
-}
-
/*
* Enable cluster-level coherency, in preparation for turning on the MMU.
*/
@@ -302,19 +180,6 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
"b cci_enable_port_for_self");
}
-static void __init exynos_cache_off(void)
-{
- if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
- /* disable L2 prefetching on the Cortex-A15 */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3\n\t"
- "isb\n\t"
- "dsb"
- : : "r" (0x400));
- }
- exynos_v7_exit_coherency_flush(all);
-}
-
static const struct of_device_id exynos_dt_mcpm_match[] = {
{ .compatible = "samsung,exynos5420" },
{ .compatible = "samsung,exynos5800" },
@@ -370,13 +235,11 @@ static int __init exynos_mcpm_init(void)
*/
pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
- exynos_mcpm_usage_count_init();
-
ret = mcpm_platform_register(&exynos_power_ops);
if (!ret)
ret = mcpm_sync_init(exynos_pm_power_up_setup);
if (!ret)
- ret = mcpm_loopback(exynos_cache_off); /* turn on the CCI */
+ ret = mcpm_loopback(exynos_cluster_cache_disable); /* turn on the CCI */
if (ret) {
iounmap(ns_sram_base_addr);
return ret;
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index d2e9f12d12f1..a825bca2a2b6 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -34,30 +34,6 @@
extern void exynos4_secondary_startup(void);
-/*
- * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
- * during hot-(un)plugging CPUx.
- *
- * The feature can be cleared safely during first boot of secondary CPU.
- *
- * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
- * down a CPU so the CPU idle clock down feature could properly detect global
- * idle state when CPUx is off.
- */
-static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
-{
- if (soc_is_exynos4()) {
- unsigned int tmp;
-
- tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
- if (enable)
- tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
- else
- tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
- pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
- }
-}
-
#ifdef CONFIG_HOTPLUG_CPU
static inline void cpu_leave_lowpower(u32 core_id)
{
@@ -73,8 +49,6 @@ static inline void cpu_leave_lowpower(u32 core_id)
: "=&r" (v)
: "Ir" (CR_C), "Ir" (0x40)
: "cc");
-
- exynos_set_delayed_reset_assertion(core_id, false);
}
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
@@ -87,14 +61,6 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
/* Turn the CPU off on next WFI instruction. */
exynos_cpu_power_down(core_id);
- /*
- * Exynos4 SoCs require setting
- * USE_DELAYED_RESET_ASSERTION so the CPU idle
- * clock down feature could properly detect
- * global idle state when CPUx is off.
- */
- exynos_set_delayed_reset_assertion(core_id, true);
-
wfi();
if (pen_release == core_id) {
@@ -126,6 +92,8 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
*/
void exynos_cpu_power_down(int cpu)
{
+ u32 core_conf;
+
if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
/*
* Bypass power down for CPU0 during suspend. Check for
@@ -137,7 +105,10 @@ void exynos_cpu_power_down(int cpu)
if (!(val & S5P_CORE_LOCAL_PWR_EN))
return;
}
- pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
+
+ core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
+ core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
+ pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
}
/**
@@ -148,7 +119,12 @@ void exynos_cpu_power_down(int cpu)
*/
void exynos_cpu_power_up(int cpu)
{
- pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
+ u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
+
+ if (soc_is_exynos3250())
+ core_conf |= S5P_CORE_AUTOWAKEUP_EN;
+
+ pmu_raw_writel(core_conf,
EXYNOS_ARM_CORE_CONFIGURATION(cpu));
}
@@ -226,6 +202,10 @@ static void exynos_core_restart(u32 core_id)
if (!of_machine_is_compatible("samsung,exynos3250"))
return;
+ while (!pmu_raw_readl(S5P_PMU_SPARE2))
+ udelay(10);
+ udelay(10);
+
val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
@@ -346,7 +326,10 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
call_firmware_op(cpu_boot, core_id);
- arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+ if (soc_is_exynos3250())
+ dsb_sev();
+ else
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
if (pen_release == -1)
break;
@@ -354,9 +337,6 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
udelay(10);
}
- /* No harm if this is called during first boot of secondary CPU */
- exynos_set_delayed_reset_assertion(core_id, false);
-
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
@@ -403,6 +383,8 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
exynos_sysram_init();
+ exynos_set_delayed_reset_assertion(true);
+
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
scu_enable(scu_base_addr());
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index e6209dadc00d..cc75ab448be3 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -127,6 +127,8 @@ int exynos_pm_central_resume(void)
static void exynos_set_wakeupmask(long mask)
{
pmu_raw_writel(mask, S5P_WAKEUP_MASK);
+ if (soc_is_exynos3250())
+ pmu_raw_writel(0x0, S5P_WAKEUP_MASK2);
}
static void exynos_cpu_set_boot_vector(long flags)
@@ -140,7 +142,7 @@ static int exynos_aftr_finisher(unsigned long flags)
{
int ret;
- exynos_set_wakeupmask(0x0000ff3e);
+ exynos_set_wakeupmask(soc_is_exynos3250() ? 0x40003ffe : 0x0000ff3e);
/* Set value of power down register for aftr mode */
exynos_sys_powerdown_conf(SYS_AFTR);
@@ -157,8 +159,13 @@ static int exynos_aftr_finisher(unsigned long flags)
void exynos_enter_aftr(void)
{
+ unsigned int cpuid = smp_processor_id();
+
cpu_pm_enter();
+ if (soc_is_exynos3250())
+ exynos_set_boot_flag(cpuid, C2_STATE);
+
exynos_pm_central_suspend();
if (of_machine_is_compatible("samsung,exynos4212") ||
@@ -178,9 +185,13 @@ void exynos_enter_aftr(void)
exynos_pm_central_resume();
+ if (soc_is_exynos3250())
+ exynos_clear_boot_flag(cpuid, C2_STATE);
+
cpu_pm_exit();
}
+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_EXYNOS_CPUIDLE)
static atomic_t cpu1_wakeup = ATOMIC_INIT(0);
static int exynos_cpu0_enter_aftr(void)
@@ -302,3 +313,4 @@ struct cpuidle_exynos_data cpuidle_coupled_exynos_data = {
.pre_enter_aftr = exynos_pre_enter_aftr,
.post_enter_aftr = exynos_post_enter_aftr,
};
+#endif /* CONFIG_SMP && CONFIG_ARM_EXYNOS_CPUIDLE */
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 37266a826437..a9686535f9ed 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -37,6 +37,7 @@ struct exynos_pm_domain {
struct clk *oscclk;
struct clk *clk[MAX_CLK_PER_DOMAIN];
struct clk *pclk[MAX_CLK_PER_DOMAIN];
+ struct clk *asb_clk[MAX_CLK_PER_DOMAIN];
};
static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -45,14 +46,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
void __iomem *base;
u32 timeout, pwr;
char *op;
+ int i;
pd = container_of(domain, struct exynos_pm_domain, pd);
base = pd->base;
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ clk_prepare_enable(pd->asb_clk[i]);
+ }
+
/* Set oscclk before powering off a domain*/
if (!power_on) {
- int i;
-
for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
if (IS_ERR(pd->clk[i]))
break;
@@ -81,8 +87,6 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
/* Restore clocks after powering on a domain*/
if (power_on) {
- int i;
-
for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
if (IS_ERR(pd->clk[i]))
break;
@@ -92,6 +96,12 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
}
}
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ clk_disable_unprepare(pd->asb_clk[i]);
+ }
+
return 0;
}
@@ -125,12 +135,21 @@ static __init int exynos4_pm_init_power_domain(void)
return -ENOMEM;
}
- pd->pd.name = kstrdup(np->name, GFP_KERNEL);
+ pd->pd.name = kstrdup(dev_name(dev), GFP_KERNEL);
pd->name = pd->pd.name;
pd->base = of_iomap(np, 0);
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ char clk_name[8];
+
+ snprintf(clk_name, sizeof(clk_name), "asb%d", i);
+ pd->asb_clk[i] = clk_get(dev, clk_name);
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ }
+
pd->oscclk = clk_get(dev, "oscclk");
if (IS_ERR(pd->oscclk))
goto no_clk;
@@ -169,7 +188,7 @@ no_clk:
args.np = np;
args.args_count = 0;
child_domain = of_genpd_get_from_provider(&args);
- if (!child_domain)
+ if (IS_ERR(child_domain))
continue;
if (of_parse_phandle_with_args(np, "power-domains",
@@ -177,7 +196,7 @@ no_clk:
continue;
parent_domain = of_genpd_get_from_provider(&args);
- if (!parent_domain)
+ if (IS_ERR(parent_domain))
continue;
if (pm_genpd_add_subdomain(parent_domain, child_domain))
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index eb461e1c325a..b7614333d296 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -43,12 +43,14 @@
#define S5P_WAKEUP_STAT 0x0600
#define S5P_EINT_WAKEUP_MASK 0x0604
#define S5P_WAKEUP_MASK 0x0608
+#define S5P_WAKEUP_MASK2 0x0614
#define S5P_INFORM0 0x0800
#define S5P_INFORM1 0x0804
#define S5P_INFORM5 0x0814
#define S5P_INFORM6 0x0818
#define S5P_INFORM7 0x081C
+#define S5P_PMU_SPARE2 0x0908
#define S5P_PMU_SPARE3 0x090C
#define EXYNOS_IROM_DATA2 0x0988
@@ -182,6 +184,7 @@
#define S5P_CORE_LOCAL_PWR_EN 0x3
#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
+#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
/* Only for EXYNOS4210 */
#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S
index 31d25834b9c4..cf950790fbdc 100644
--- a/arch/arm/mach-exynos/sleep.S
+++ b/arch/arm/mach-exynos/sleep.S
@@ -23,14 +23,7 @@
#define CPU_MASK 0xff0ffff0
#define CPU_CORTEX_A9 0x410fc090
- /*
- * The following code is located into the .data section. This is to
- * allow l2x0_regs_phys to be accessed with a relative load while we
- * can't rely on any MMU translation. We could have put l2x0_regs_phys
- * in the .text section as well, but some setups might insist on it to
- * be truly read-only. (Reference from: arch/arm/kernel/sleep.S)
- */
- .data
+ .text
.align
/*
@@ -69,10 +62,12 @@ ENTRY(exynos_cpu_resume_ns)
cmp r0, r1
bne skip_cp15
- adr r0, cp15_save_power
+ adr r0, _cp15_save_power
ldr r1, [r0]
- adr r0, cp15_save_diag
+ ldr r1, [r0, r1]
+ adr r0, _cp15_save_diag
ldr r2, [r0]
+ ldr r2, [r0, r2]
mov r0, #SMC_CMD_C15RESUME
dsb
smc #0
@@ -118,14 +113,20 @@ skip_l2x0:
skip_cp15:
b cpu_resume
ENDPROC(exynos_cpu_resume_ns)
+
+ .align
+_cp15_save_power:
+ .long cp15_save_power - .
+_cp15_save_diag:
+ .long cp15_save_diag - .
+#ifdef CONFIG_CACHE_L2X0
+1: .long l2x0_saved_regs - .
+#endif /* CONFIG_CACHE_L2X0 */
+
+ .data
.globl cp15_save_diag
cp15_save_diag:
.long 0 @ cp15 diagnostic
.globl cp15_save_power
cp15_save_power:
.long 0 @ cp15 power control
-
-#ifdef CONFIG_CACHE_L2X0
- .align
-1: .long l2x0_saved_regs - .
-#endif /* CONFIG_CACHE_L2X0 */
diff --git a/arch/arm/mach-exynos/smc.h b/arch/arm/mach-exynos/smc.h
index f7b82f9c1e21..c2845717bc8f 100644
--- a/arch/arm/mach-exynos/smc.h
+++ b/arch/arm/mach-exynos/smc.h
@@ -17,6 +17,8 @@
#define SMC_CMD_SLEEP (-3)
#define SMC_CMD_CPU1BOOT (-4)
#define SMC_CMD_CPU0AFTR (-5)
+#define SMC_CMD_SAVE (-6)
+#define SMC_CMD_SHUTDOWN (-7)
/* For CP15 Access */
#define SMC_CMD_C15RESUME (-11)
/* For L2 Cache Access */
@@ -32,4 +34,11 @@ extern void exynos_smc(u32 cmd, u32 arg1, u32 arg2, u32 arg3);
#endif /* __ASSEMBLY__ */
+/* op type for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
+#define OP_TYPE_CORE 0x0
+#define OP_TYPE_CLUSTER 0x1
+
+/* Power State required for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
+#define SMC_POWERSTATE_IDLE 0x1
+
#endif
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 2146d918aedd..c0b6dccbf7bd 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -65,8 +65,6 @@ static struct sleep_save exynos_core_save[] = {
struct exynos_pm_data {
const struct exynos_wkup_irq *wkup_irq;
- struct sleep_save *extra_save;
- int num_extra_save;
unsigned int wake_disable_mask;
unsigned int *release_ret_regs;
@@ -77,7 +75,7 @@ struct exynos_pm_data {
int (*cpu_suspend)(unsigned long);
};
-struct exynos_pm_data *pm_data;
+static const struct exynos_pm_data *pm_data;
static int exynos5420_cpu_state;
static unsigned int exynos_pmu_spare3;
@@ -106,7 +104,7 @@ static const struct exynos_wkup_irq exynos5250_wkup_irq[] = {
{ /* sentinel */ },
};
-unsigned int exynos_release_ret_regs[] = {
+static unsigned int exynos_release_ret_regs[] = {
S5P_PAD_RET_MAUDIO_OPTION,
S5P_PAD_RET_GPIO_OPTION,
S5P_PAD_RET_UART_OPTION,
@@ -117,7 +115,7 @@ unsigned int exynos_release_ret_regs[] = {
REG_TABLE_END,
};
-unsigned int exynos3250_release_ret_regs[] = {
+static unsigned int exynos3250_release_ret_regs[] = {
S5P_PAD_RET_MAUDIO_OPTION,
S5P_PAD_RET_GPIO_OPTION,
S5P_PAD_RET_UART_OPTION,
@@ -130,7 +128,7 @@ unsigned int exynos3250_release_ret_regs[] = {
REG_TABLE_END,
};
-unsigned int exynos5420_release_ret_regs[] = {
+static unsigned int exynos5420_release_ret_regs[] = {
EXYNOS_PAD_RET_DRAM_OPTION,
EXYNOS_PAD_RET_MAUDIO_OPTION,
EXYNOS_PAD_RET_JTAG_OPTION,
@@ -344,15 +342,13 @@ static void exynos_pm_enter_sleep_mode(void)
static void exynos_pm_prepare(void)
{
+ exynos_set_delayed_reset_assertion(false);
+
/* Set wake-up mask registers */
exynos_pm_set_wakeup_mask();
s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
- if (pm_data->extra_save)
- s3c_pm_do_save(pm_data->extra_save,
- pm_data->num_extra_save);
-
exynos_pm_enter_sleep_mode();
/* ensure at least INFORM0 has the resume address */
@@ -475,10 +471,6 @@ static void exynos_pm_resume(void)
/* For release retention */
exynos_pm_release_retention();
- if (pm_data->extra_save)
- s3c_pm_do_restore_core(pm_data->extra_save,
- pm_data->num_extra_save);
-
s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
if (cpuid == ARM_CPU_PART_CORTEX_A9)
@@ -492,6 +484,7 @@ early_wakeup:
/* Clear SLEEP mode set in INFORM1 */
pmu_raw_writel(0x0, S5P_INFORM1);
+ exynos_set_delayed_reset_assertion(true);
}
static void exynos3250_pm_resume(void)
@@ -685,7 +678,7 @@ static const struct exynos_pm_data exynos5250_pm_data = {
.cpu_suspend = exynos_cpu_suspend,
};
-static struct exynos_pm_data exynos5420_pm_data = {
+static const struct exynos_pm_data exynos5420_pm_data = {
.wkup_irq = exynos5250_wkup_irq,
.wake_disable_mask = (0x7F << 7) | (0x1F << 1),
.release_ret_regs = exynos5420_release_ret_regs,
@@ -733,10 +726,12 @@ void __init exynos_pm_init(void)
return;
}
- if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL)))
+ if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+ return;
+ }
- pm_data = (struct exynos_pm_data *) match->data;
+ pm_data = (const struct exynos_pm_data *) match->data;
/* All wakeup disable */
tmp = pmu_raw_readl(S5P_WAKEUP_MASK);
diff --git a/arch/arm/mach-gemini/common.h b/arch/arm/mach-gemini/common.h
index 38a45260a7c8..dd883698ff7e 100644
--- a/arch/arm/mach-gemini/common.h
+++ b/arch/arm/mach-gemini/common.h
@@ -12,6 +12,8 @@
#ifndef __GEMINI_COMMON_H__
#define __GEMINI_COMMON_H__
+#include <linux/reboot.h>
+
struct mtd_partition;
extern void gemini_map_io(void);
@@ -26,6 +28,6 @@ extern int platform_register_pflash(unsigned int size,
struct mtd_partition *parts,
unsigned int nr_parts);
-extern void gemini_restart(char mode, const char *cmd);
+extern void gemini_restart(enum reboot_mode mode, const char *cmd);
#endif /* __GEMINI_COMMON_H__ */
diff --git a/arch/arm/mach-gemini/reset.c b/arch/arm/mach-gemini/reset.c
index b26659759e27..21a6d6d4f9c4 100644
--- a/arch/arm/mach-gemini/reset.c
+++ b/arch/arm/mach-gemini/reset.c
@@ -14,7 +14,9 @@
#include <mach/hardware.h>
#include <mach/global_reg.h>
-void gemini_restart(char mode, const char *cmd)
+#include "common.h"
+
+void gemini_restart(enum reboot_mode mode, const char *cmd)
{
__raw_writel(RESET_GLOBAL | RESET_CPU1,
IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_RESET);
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index c8dffcee9736..3a3d3e9d7bfd 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -21,6 +21,7 @@ config MXC_AVIC
config MXC_DEBUG_BOARD
bool "Enable MXC debug board(for 3-stack)"
+ depends on MACH_MX27_3DS || MACH_MX31_3DS || MACH_MX35_3DS
help
The debug board is an integral part of the MXC 3-stack(PDK)
platforms, it can be attached or removed from the peripheral
@@ -50,6 +51,7 @@ config HAVE_IMX_ANATOP
config HAVE_IMX_GPC
bool
+ select PM_GENERIC_DOMAINS if PM
config HAVE_IMX_MMDC
bool
@@ -77,13 +79,6 @@ config SOC_IMX21
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
-config SOC_IMX25
- bool
- select ARCH_MXC_IOMUX_V3
- select CPU_ARM926T
- select MXC_AVIC
- select PINCTRL_IMX25
-
config SOC_IMX27
bool
select CPU_ARM926T
@@ -149,62 +144,6 @@ config MACH_MX21ADS
Include support for MX21ADS platform. This includes specific
configurations for the board and its peripherals.
-comment "MX25 platforms:"
-
-config MACH_MX25_3DS
- bool "Support MX25PDK (3DS) Platform"
- select IMX_HAVE_PLATFORM_FLEXCAN
- select IMX_HAVE_PLATFORM_FSL_USB2_UDC
- select IMX_HAVE_PLATFORM_IMX2_WDT
- select IMX_HAVE_PLATFORM_IMXDI_RTC
- select IMX_HAVE_PLATFORM_IMX_FB
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_KEYPAD
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_MXC_EHCI
- select IMX_HAVE_PLATFORM_MXC_NAND
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select SOC_IMX25
-
-config MACH_EUKREA_CPUIMX25SD
- bool "Support Eukrea CPUIMX25 Platform"
- select IMX_HAVE_PLATFORM_FLEXCAN
- select IMX_HAVE_PLATFORM_FSL_USB2_UDC
- select IMX_HAVE_PLATFORM_IMX2_WDT
- select IMX_HAVE_PLATFORM_IMXDI_RTC
- select IMX_HAVE_PLATFORM_IMX_FB
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_MXC_EHCI
- select IMX_HAVE_PLATFORM_MXC_NAND
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select USB_ULPI_VIEWPORT if USB_ULPI
- select SOC_IMX25
-
-choice
- prompt "Baseboard"
- depends on MACH_EUKREA_CPUIMX25SD
- default MACH_EUKREA_MBIMXSD25_BASEBOARD
-
-config MACH_EUKREA_MBIMXSD25_BASEBOARD
- bool "Eukrea MBIMXSD development board"
- select IMX_HAVE_PLATFORM_GPIO_KEYS
- select IMX_HAVE_PLATFORM_IMX_SSI
- select IMX_HAVE_PLATFORM_SPI_IMX
- select LEDS_GPIO_REGISTER
- help
- This adds board specific devices that can be found on Eukrea's
- MBIMXSD evaluation board.
-
-endchoice
-
-config MACH_IMX25_DT
- bool "Support i.MX25 platforms from device tree"
- select SOC_IMX25
- help
- Include support for Freescale i.MX25 based platforms
- using the device tree for discovery
-
comment "MX27 platforms:"
config MACH_MX27ADS
@@ -557,6 +496,20 @@ config MACH_VPR200
endif
+if ARCH_MULTI_V5
+
+comment "Device tree only"
+
+config SOC_IMX25
+ bool "i.MX25 support"
+ select ARCH_MXC_IOMUX_V3
+ select CPU_ARM926T
+ select MXC_AVIC
+ select PINCTRL_IMX25
+ help
+ This enables support for Freescale i.MX25 processor
+endif
+
if ARCH_MULTI_V7
comment "Device tree only"
@@ -635,9 +588,10 @@ config SOC_VF610
select ARM_GIC
select PINCTRL_VF610
select PL310_ERRATA_769419 if CACHE_L2X0
+ select SMP_ON_UP if SMP
help
- This enable support for Freescale Vybrid VF610 processor.
+ This enables support for Freescale Vybrid VF610 processor.
choice
prompt "Clocksource for scheduler clock"
@@ -667,7 +621,7 @@ config SOC_LS1021A
select ZONE_DMA if ARM_LPAE
help
- This enable support for Freescale LS1021A processor.
+ This enables support for Freescale LS1021A processor.
endif
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 8d1b10180908..3244cf1d2773 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -3,7 +3,7 @@ obj-y := time.o cpu.o system.o irq-common.o
obj-$(CONFIG_SOC_IMX1) += clk-imx1.o mm-imx1.o
obj-$(CONFIG_SOC_IMX21) += clk-imx21.o mm-imx21.o
-obj-$(CONFIG_SOC_IMX25) += clk-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
+obj-$(CONFIG_SOC_IMX25) += clk-imx25.o cpu-imx25.o mach-imx25.o
obj-$(CONFIG_SOC_IMX27) += cpu-imx27.o pm-imx27.o
obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
@@ -48,12 +48,6 @@ obj-$(CONFIG_MACH_IMX1_DT) += imx1-dt.o
# i.MX21 based machines
obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
-# i.MX25 based machines
-obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o
-obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o
-obj-$(CONFIG_MACH_IMX25_DT) += imx25-dt.o
-
# i.MX27 based machines
obj-$(CONFIG_MACH_MX27ADS) += mach-mx27ads.o
obj-$(CONFIG_MACH_MX27_3DS) += mach-mx27_3ds.o
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
index 59c0c8558c6b..9c2633a9de9f 100644
--- a/arch/arm/mach-imx/clk-imx25.c
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -30,7 +30,6 @@
#include "clk.h"
#include "common.h"
#include "hardware.h"
-#include "mx25.h"
#define CCM_MPCTL 0x00
#define CCM_UPCTL 0x04
@@ -239,80 +238,6 @@ static int __init __mx25_clocks_init(unsigned long osc_rate,
return 0;
}
-int __init mx25_clocks_init(void)
-{
- void __iomem *ccm;
-
- ccm = ioremap(MX25_CRM_BASE_ADDR, SZ_16K);
-
- __mx25_clocks_init(24000000, ccm);
-
- clk_register_clkdev(clk[gpt1_ipg], "ipg", "imx-gpt.0");
- clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
- /* i.mx25 has the i.mx21 type uart */
- clk_register_clkdev(clk[uart1_ipg], "ipg", "imx21-uart.0");
- clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.0");
- clk_register_clkdev(clk[uart2_ipg], "ipg", "imx21-uart.1");
- clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.1");
- clk_register_clkdev(clk[uart3_ipg], "ipg", "imx21-uart.2");
- clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.2");
- clk_register_clkdev(clk[uart4_ipg], "ipg", "imx21-uart.3");
- clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.3");
- clk_register_clkdev(clk[uart5_ipg], "ipg", "imx21-uart.4");
- clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.4");
- clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
- clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.0");
- clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
- clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
- clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.1");
- clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
- clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
- clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");
- clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
- clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
- clk_register_clkdev(clk[usbotg_ahb], "ahb", "imx-udc-mx27");
- clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
- clk_register_clkdev(clk[nfc_ipg_per], NULL, "imx25-nand.0");
- /* i.mx25 has the i.mx35 type cspi */
- clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
- clk_register_clkdev(clk[cspi2_ipg], NULL, "imx35-cspi.1");
- clk_register_clkdev(clk[cspi3_ipg], NULL, "imx35-cspi.2");
- clk_register_clkdev(clk[kpp_ipg], NULL, "imx-keypad");
- clk_register_clkdev(clk[tsc_ipg], NULL, "mx25-adc");
- clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx21-i2c.0");
- clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx21-i2c.1");
- clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx21-i2c.2");
- clk_register_clkdev(clk[fec_ipg], "ipg", "imx25-fec.0");
- clk_register_clkdev(clk[fec_ahb], "ahb", "imx25-fec.0");
- clk_register_clkdev(clk[dryice_ipg], NULL, "imxdi_rtc.0");
- clk_register_clkdev(clk[lcdc_ipg_per], "per", "imx21-fb.0");
- clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx21-fb.0");
- clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx21-fb.0");
- clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
- clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
- clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
- clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
- clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
- clk_register_clkdev(clk[esdhc2_ipg_per], "per", "sdhci-esdhc-imx25.1");
- clk_register_clkdev(clk[esdhc2_ipg], "ipg", "sdhci-esdhc-imx25.1");
- clk_register_clkdev(clk[esdhc2_ahb], "ahb", "sdhci-esdhc-imx25.1");
- clk_register_clkdev(clk[csi_ipg_per], "per", "imx25-camera.0");
- clk_register_clkdev(clk[csi_ipg], "ipg", "imx25-camera.0");
- clk_register_clkdev(clk[csi_ahb], "ahb", "imx25-camera.0");
- clk_register_clkdev(clk[dummy], "audmux", NULL);
- clk_register_clkdev(clk[can1_ipg], NULL, "flexcan.0");
- clk_register_clkdev(clk[can2_ipg], NULL, "flexcan.1");
- /* i.mx25 has the i.mx35 type sdma */
- clk_register_clkdev(clk[sdma_ipg], "ipg", "imx35-sdma");
- clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
- clk_register_clkdev(clk[iim_ipg], "iim", NULL);
-
- mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1);
-
- return 0;
-}
-
static void __init mx25_clocks_init_dt(struct device_node *np)
{
struct device_node *refnp;
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index d04a430607b8..469a150bf98f 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -119,6 +119,7 @@ static unsigned int share_count_asrc;
static unsigned int share_count_ssi1;
static unsigned int share_count_ssi2;
static unsigned int share_count_ssi3;
+static unsigned int share_count_mipi_core_cfg;
static void __init imx6q_clocks_init(struct device_node *ccm_node)
{
@@ -246,6 +247,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2);
clk[IMX6QDL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
+ clk[IMX6QDL_CLK_VIDEO_27M] = imx_clk_fixed_factor("video_27m", "pll3_pfd1_540m", 1, 20);
if (cpu_is_imx6dl()) {
clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1);
clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1);
@@ -400,7 +402,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
- clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "pll3_pfd1_540m", base + 0x70, 4);
+ clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4);
clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
@@ -415,7 +417,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
clk[IMX6QDL_CLK_IPU2_DI1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
- clk[IMX6QDL_CLK_HSI_TX] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16);
+ clk[IMX6QDL_CLK_HSI_TX] = imx_clk_gate2_shared("hsi_tx", "hsi_tx_podf", base + 0x74, 16, &share_count_mipi_core_cfg);
+ clk[IMX6QDL_CLK_MIPI_CORE_CFG] = imx_clk_gate2_shared("mipi_core_cfg", "video_27m", base + 0x74, 16, &share_count_mipi_core_cfg);
+ clk[IMX6QDL_CLK_MIPI_IPG] = imx_clk_gate2_shared("mipi_ipg", "ipg", base + 0x74, 16, &share_count_mipi_core_cfg);
if (cpu_is_imx6dl())
/*
* The multiplexer and divider of the imx6q clock gpu2d get
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 1028b6c505c4..0f04e30b726d 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -23,13 +23,11 @@ struct of_device_id;
void mx1_map_io(void);
void mx21_map_io(void);
-void mx25_map_io(void);
void mx27_map_io(void);
void mx31_map_io(void);
void mx35_map_io(void);
void imx1_init_early(void);
void imx21_init_early(void);
-void imx25_init_early(void);
void imx27_init_early(void);
void imx31_init_early(void);
void imx35_init_early(void);
@@ -37,13 +35,11 @@ void mxc_init_irq(void __iomem *);
void tzic_init_irq(void);
void mx1_init_irq(void);
void mx21_init_irq(void);
-void mx25_init_irq(void);
void mx27_init_irq(void);
void mx31_init_irq(void);
void mx35_init_irq(void);
void imx1_soc_init(void);
void imx21_soc_init(void);
-void imx25_soc_init(void);
void imx27_soc_init(void);
void imx31_soc_init(void);
void imx35_soc_init(void);
@@ -51,7 +47,6 @@ void epit_timer_init(void __iomem *base, int irq);
void mxc_timer_init(void __iomem *, int);
int mx1_clocks_init(unsigned long fref);
int mx21_clocks_init(unsigned long lref, unsigned long fref);
-int mx25_clocks_init(void);
int mx27_clocks_init(unsigned long fref);
int mx31_clocks_init(unsigned long fref);
int mx35_clocks_init(void);
@@ -71,6 +66,7 @@ unsigned int imx_get_soc_revision(void);
void imx_init_revision_from_anatop(void);
struct device *imx_soc_device_init(void);
void imx6_enable_rbc(bool enable);
+void imx_gpc_check_dt(void);
void imx_gpc_set_arm_power_in_lpm(bool power_off);
void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw);
void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw);
@@ -106,7 +102,6 @@ static inline void imx_scu_map_io(void) {}
static inline void imx_smp_prepare(void) {}
#endif
void imx_src_init(void);
-void imx_gpc_init(void);
void imx_gpc_pre_suspend(bool arm_power_off);
void imx_gpc_post_resume(void);
void imx_gpc_mask_all(void);
diff --git a/arch/arm/mach-imx/cpu-imx25.c b/arch/arm/mach-imx/cpu-imx25.c
index 96ec64b5ff7d..d0ad67e802d3 100644
--- a/arch/arm/mach-imx/cpu-imx25.c
+++ b/arch/arm/mach-imx/cpu-imx25.c
@@ -11,6 +11,8 @@
*/
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include "iim.h"
#include "hardware.h"
@@ -20,8 +22,15 @@ static int mx25_cpu_rev = -1;
static int mx25_read_cpu_rev(void)
{
u32 rev;
+ void __iomem *iim_base;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim");
+ iim_base = of_iomap(np, 0);
+ BUG_ON(!iim_base);
+ rev = readl(iim_base + MXC_IIMSREV);
+ iounmap(iim_base);
- rev = __raw_readl(MX25_IO_ADDRESS(MX25_IIM_BASE_ADDR + MXC_IIMSREV));
switch (rev) {
case 0x00:
return IMX_CHIP_REVISION_1_0;
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index d76d08623f9f..8e21ccc1eda2 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -9,7 +9,6 @@
#include <linux/cpuidle.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include "common.h"
#include "cpuidle.h"
diff --git a/arch/arm/mach-imx/cpuidle-imx6sl.c b/arch/arm/mach-imx/cpuidle-imx6sl.c
index 7d92e6584551..5742a9fd1ef2 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sl.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sl.c
@@ -9,7 +9,6 @@
#include <linux/cpuidle.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include "common.h"
#include "cpuidle.h"
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 5a36722b089d..2c9f1a8bf245 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -10,7 +10,6 @@
#include <linux/cpu_pm.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include "common.h"
diff --git a/arch/arm/mach-imx/devices-imx25.h b/arch/arm/mach-imx/devices-imx25.h
deleted file mode 100644
index 61a114cddc39..000000000000
--- a/arch/arm/mach-imx/devices-imx25.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2010 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- */
-#include "devices/devices-common.h"
-
-extern const struct imx_fec_data imx25_fec_data;
-#define imx25_add_fec(pdata) \
- imx_add_fec(&imx25_fec_data, pdata)
-
-extern const struct imx_flexcan_data imx25_flexcan_data[];
-#define imx25_add_flexcan(id) \
- imx_add_flexcan(&imx25_flexcan_data[id])
-#define imx25_add_flexcan0() imx25_add_flexcan(0)
-#define imx25_add_flexcan1() imx25_add_flexcan(1)
-
-extern const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data;
-#define imx25_add_fsl_usb2_udc(pdata) \
- imx_add_fsl_usb2_udc(&imx25_fsl_usb2_udc_data, pdata)
-
-extern struct imx_imxdi_rtc_data imx25_imxdi_rtc_data;
-#define imx25_add_imxdi_rtc() \
- imx_add_imxdi_rtc(&imx25_imxdi_rtc_data)
-
-extern const struct imx_imx2_wdt_data imx25_imx2_wdt_data;
-#define imx25_add_imx2_wdt() \
- imx_add_imx2_wdt(&imx25_imx2_wdt_data)
-
-extern const struct imx_imx_fb_data imx25_imx_fb_data;
-#define imx25_add_imx_fb(pdata) \
- imx_add_imx_fb(&imx25_imx_fb_data, pdata)
-
-extern const struct imx_imx_i2c_data imx25_imx_i2c_data[];
-#define imx25_add_imx_i2c(id, pdata) \
- imx_add_imx_i2c(&imx25_imx_i2c_data[id], pdata)
-#define imx25_add_imx_i2c0(pdata) imx25_add_imx_i2c(0, pdata)
-#define imx25_add_imx_i2c1(pdata) imx25_add_imx_i2c(1, pdata)
-#define imx25_add_imx_i2c2(pdata) imx25_add_imx_i2c(2, pdata)
-
-extern const struct imx_imx_keypad_data imx25_imx_keypad_data;
-#define imx25_add_imx_keypad(pdata) \
- imx_add_imx_keypad(&imx25_imx_keypad_data, pdata)
-
-extern const struct imx_imx_ssi_data imx25_imx_ssi_data[];
-#define imx25_add_imx_ssi(id, pdata) \
- imx_add_imx_ssi(&imx25_imx_ssi_data[id], pdata)
-
-extern const struct imx_imx_uart_1irq_data imx25_imx_uart_data[];
-#define imx25_add_imx_uart(id, pdata) \
- imx_add_imx_uart_1irq(&imx25_imx_uart_data[id], pdata)
-#define imx25_add_imx_uart0(pdata) imx25_add_imx_uart(0, pdata)
-#define imx25_add_imx_uart1(pdata) imx25_add_imx_uart(1, pdata)
-#define imx25_add_imx_uart2(pdata) imx25_add_imx_uart(2, pdata)
-#define imx25_add_imx_uart3(pdata) imx25_add_imx_uart(3, pdata)
-#define imx25_add_imx_uart4(pdata) imx25_add_imx_uart(4, pdata)
-
-extern const struct imx_mx2_camera_data imx25_mx2_camera_data;
-#define imx25_add_mx2_camera(pdata) \
- imx_add_mx2_camera(&imx25_mx2_camera_data, pdata)
-
-extern const struct imx_mxc_ehci_data imx25_mxc_ehci_otg_data;
-#define imx25_add_mxc_ehci_otg(pdata) \
- imx_add_mxc_ehci(&imx25_mxc_ehci_otg_data, pdata)
-extern const struct imx_mxc_ehci_data imx25_mxc_ehci_hs_data;
-#define imx25_add_mxc_ehci_hs(pdata) \
- imx_add_mxc_ehci(&imx25_mxc_ehci_hs_data, pdata)
-
-extern const struct imx_mxc_nand_data imx25_mxc_nand_data;
-#define imx25_add_mxc_nand(pdata) \
- imx_add_mxc_nand(&imx25_mxc_nand_data, pdata)
-
-extern const struct imx_sdhci_esdhc_imx_data imx25_sdhci_esdhc_imx_data[];
-#define imx25_add_sdhci_esdhc_imx(id, pdata) \
- imx_add_sdhci_esdhc_imx(&imx25_sdhci_esdhc_imx_data[id], pdata)
-
-extern const struct imx_spi_imx_data imx25_cspi_data[];
-#define imx25_add_spi_imx(id, pdata) \
- imx_add_spi_imx(&imx25_cspi_data[id], pdata)
-#define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata)
-#define imx25_add_spi_imx1(pdata) imx25_add_spi_imx(1, pdata)
-#define imx25_add_spi_imx2(pdata) imx25_add_spi_imx(2, pdata)
diff --git a/arch/arm/mach-imx/devices/Kconfig b/arch/arm/mach-imx/devices/Kconfig
index 1d2cc1805f3e..3a552989248e 100644
--- a/arch/arm/mach-imx/devices/Kconfig
+++ b/arch/arm/mach-imx/devices/Kconfig
@@ -21,9 +21,6 @@ config IMX_HAVE_PLATFORM_IMX27_CODA
config IMX_HAVE_PLATFORM_IMX2_WDT
bool
-config IMX_HAVE_PLATFORM_IMXDI_RTC
- bool
-
config IMX_HAVE_PLATFORM_IMX_FB
bool
diff --git a/arch/arm/mach-imx/devices/Makefile b/arch/arm/mach-imx/devices/Makefile
index 8fdb12b4ca7e..e5cf587bc1a0 100644
--- a/arch/arm/mach-imx/devices/Makefile
+++ b/arch/arm/mach-imx/devices/Makefile
@@ -8,7 +8,6 @@ obj-y += platform-gpio-mxc.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX21_HCD) += platform-imx21-hcd.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX27_CODA) += platform-imx27-coda.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX2_WDT) += platform-imx2-wdt.o
-obj-$(CONFIG_IMX_HAVE_PLATFORM_IMXDI_RTC) += platform-imxdi_rtc.o
obj-y += platform-imx-dma.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX_FB) += platform-imx-fb.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX_I2C) += platform-imx-i2c.o
diff --git a/arch/arm/mach-imx/devices/platform-fec.c b/arch/arm/mach-imx/devices/platform-fec.c
index d86f9250b4ee..b403a4fe2892 100644
--- a/arch/arm/mach-imx/devices/platform-fec.c
+++ b/arch/arm/mach-imx/devices/platform-fec.c
@@ -19,11 +19,6 @@
.irq = soc ## _INT_FEC, \
}
-#ifdef CONFIG_SOC_IMX25
-const struct imx_fec_data imx25_fec_data __initconst =
- imx_fec_data_entry_single(MX25, "imx25-fec");
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_fec_data imx27_fec_data __initconst =
imx_fec_data_entry_single(MX27, "imx27-fec");
diff --git a/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c b/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c
index 23b0061347cb..25e1de6f3a47 100644
--- a/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c
+++ b/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c
@@ -18,11 +18,6 @@
.irq = soc ## _INT_USB_OTG, \
}
-#ifdef CONFIG_SOC_IMX25
-const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data __initconst =
- imx_fsl_usb2_udc_data_entry_single(MX25, "imx-udc-mx27");
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_fsl_usb2_udc_data imx27_fsl_usb2_udc_data __initconst =
imx_fsl_usb2_udc_data_entry_single(MX27, "imx-udc-mx27");
diff --git a/arch/arm/mach-imx/devices/platform-imx-fb.c b/arch/arm/mach-imx/devices/platform-imx-fb.c
index 25a47c616b2d..7df6328306f9 100644
--- a/arch/arm/mach-imx/devices/platform-imx-fb.c
+++ b/arch/arm/mach-imx/devices/platform-imx-fb.c
@@ -29,11 +29,6 @@ const struct imx_imx_fb_data imx21_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX21, "imx21-fb", SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX21 */
-#ifdef CONFIG_SOC_IMX25
-const struct imx_imx_fb_data imx25_imx_fb_data __initconst =
- imx_imx_fb_data_entry_single(MX25, "imx21-fb", SZ_16K);
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_imx_fb_data imx27_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX27, "imx21-fb", SZ_4K);
diff --git a/arch/arm/mach-imx/devices/platform-imx-i2c.c b/arch/arm/mach-imx/devices/platform-imx-i2c.c
index 644ac2689882..ae9791522fc8 100644
--- a/arch/arm/mach-imx/devices/platform-imx-i2c.c
+++ b/arch/arm/mach-imx/devices/platform-imx-i2c.c
@@ -31,16 +31,6 @@ const struct imx_imx_i2c_data imx21_imx_i2c_data __initconst =
imx_imx_i2c_data_entry_single(MX21, "imx21-i2c", 0, , SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX21 */
-#ifdef CONFIG_SOC_IMX25
-const struct imx_imx_i2c_data imx25_imx_i2c_data[] __initconst = {
-#define imx25_imx_i2c_data_entry(_id, _hwid) \
- imx_imx_i2c_data_entry(MX25, "imx21-i2c", _id, _hwid, SZ_16K)
- imx25_imx_i2c_data_entry(0, 1),
- imx25_imx_i2c_data_entry(1, 2),
- imx25_imx_i2c_data_entry(2, 3),
-};
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_imx_i2c_data imx27_imx_i2c_data[] __initconst = {
#define imx27_imx_i2c_data_entry(_id, _hwid) \
diff --git a/arch/arm/mach-imx/devices/platform-imx-keypad.c b/arch/arm/mach-imx/devices/platform-imx-keypad.c
index f42200b7aca9..479e4d70dbf9 100644
--- a/arch/arm/mach-imx/devices/platform-imx-keypad.c
+++ b/arch/arm/mach-imx/devices/platform-imx-keypad.c
@@ -21,11 +21,6 @@ const struct imx_imx_keypad_data imx21_imx_keypad_data __initconst =
imx_imx_keypad_data_entry_single(MX21, SZ_16);
#endif /* ifdef CONFIG_SOC_IMX21 */
-#ifdef CONFIG_SOC_IMX25
-const struct imx_imx_keypad_data imx25_imx_keypad_data __initconst =
- imx_imx_keypad_data_entry_single(MX25, SZ_16K);
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_imx_keypad_data imx27_imx_keypad_data __initconst =
imx_imx_keypad_data_entry_single(MX27, SZ_16);
diff --git a/arch/arm/mach-imx/devices/platform-imx-ssi.c b/arch/arm/mach-imx/devices/platform-imx-ssi.c
index 1c7c721ebff1..6f0e94eb29ee 100644
--- a/arch/arm/mach-imx/devices/platform-imx-ssi.c
+++ b/arch/arm/mach-imx/devices/platform-imx-ssi.c
@@ -30,15 +30,6 @@ const struct imx_imx_ssi_data imx21_imx_ssi_data[] __initconst = {
};
#endif /* ifdef CONFIG_SOC_IMX21 */
-#ifdef CONFIG_SOC_IMX25
-const struct imx_imx_ssi_data imx25_imx_ssi_data[] __initconst = {
-#define imx25_imx_ssi_data_entry(_id, _hwid) \
- imx_imx_ssi_data_entry(MX25, _id, _hwid, SZ_4K)
- imx25_imx_ssi_data_entry(0, 1),
- imx25_imx_ssi_data_entry(1, 2),
-};
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_imx_ssi_data imx27_imx_ssi_data[] __initconst = {
#define imx27_imx_ssi_data_entry(_id, _hwid) \
diff --git a/arch/arm/mach-imx/devices/platform-imx-uart.c b/arch/arm/mach-imx/devices/platform-imx-uart.c
index 8c01836bc1d4..6962cff4a950 100644
--- a/arch/arm/mach-imx/devices/platform-imx-uart.c
+++ b/arch/arm/mach-imx/devices/platform-imx-uart.c
@@ -47,18 +47,6 @@ const struct imx_imx_uart_1irq_data imx21_imx_uart_data[] __initconst = {
};
#endif
-#ifdef CONFIG_SOC_IMX25
-const struct imx_imx_uart_1irq_data imx25_imx_uart_data[] __initconst = {
-#define imx25_imx_uart_data_entry(_id, _hwid) \
- imx_imx_uart_1irq_data_entry(MX25, _id, _hwid, SZ_16K)
- imx25_imx_uart_data_entry(0, 1),
- imx25_imx_uart_data_entry(1, 2),
- imx25_imx_uart_data_entry(2, 3),
- imx25_imx_uart_data_entry(3, 4),
- imx25_imx_uart_data_entry(4, 5),
-};
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_imx_uart_1irq_data imx27_imx_uart_data[] __initconst = {
#define imx27_imx_uart_data_entry(_id, _hwid) \
diff --git a/arch/arm/mach-imx/devices/platform-imx2-wdt.c b/arch/arm/mach-imx/devices/platform-imx2-wdt.c
index 54f63bc25ca4..8c134c8d7500 100644
--- a/arch/arm/mach-imx/devices/platform-imx2-wdt.c
+++ b/arch/arm/mach-imx/devices/platform-imx2-wdt.c
@@ -25,11 +25,6 @@ const struct imx_imx2_wdt_data imx21_imx2_wdt_data __initconst =
imx_imx2_wdt_data_entry_single(MX21, 0, , SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX21 */
-#ifdef CONFIG_SOC_IMX25
-const struct imx_imx2_wdt_data imx25_imx2_wdt_data __initconst =
- imx_imx2_wdt_data_entry_single(MX25, 0, , SZ_16K);
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_imx2_wdt_data imx27_imx2_wdt_data __initconst =
imx_imx2_wdt_data_entry_single(MX27, 0, , SZ_4K);
diff --git a/arch/arm/mach-imx/devices/platform-imxdi_rtc.c b/arch/arm/mach-imx/devices/platform-imxdi_rtc.c
deleted file mode 100644
index 5bb490d556ea..000000000000
--- a/arch/arm/mach-imx/devices/platform-imxdi_rtc.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2010 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- */
-#include <asm/sizes.h>
-
-#include "../hardware.h"
-#include "devices-common.h"
-
-#define imx_imxdi_rtc_data_entry_single(soc) \
- { \
- .iobase = soc ## _DRYICE_BASE_ADDR, \
- .irq = soc ## _INT_DRYICE, \
- }
-
-#ifdef CONFIG_SOC_IMX25
-const struct imx_imxdi_rtc_data imx25_imxdi_rtc_data __initconst =
- imx_imxdi_rtc_data_entry_single(MX25);
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
-struct platform_device *__init imx_add_imxdi_rtc(
- const struct imx_imxdi_rtc_data *data)
-{
- struct resource res[] = {
- {
- .start = data->iobase,
- .end = data->iobase + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .start = data->irq,
- .end = data->irq,
- .flags = IORESOURCE_IRQ,
- },
- };
-
- return imx_add_platform_device("imxdi_rtc", 0,
- res, ARRAY_SIZE(res), NULL, 0);
-}
diff --git a/arch/arm/mach-imx/devices/platform-mx2-camera.c b/arch/arm/mach-imx/devices/platform-mx2-camera.c
index b53e1f348f51..4c377c33242c 100644
--- a/arch/arm/mach-imx/devices/platform-mx2-camera.c
+++ b/arch/arm/mach-imx/devices/platform-mx2-camera.c
@@ -27,11 +27,6 @@
.irqemmaprp = soc ## _INT_EMMAPRP, \
}
-#ifdef CONFIG_SOC_IMX25
-const struct imx_mx2_camera_data imx25_mx2_camera_data __initconst =
- imx_mx2_camera_data_entry_single(MX25, "imx25-camera");
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_mx2_camera_data imx27_mx2_camera_data __initconst =
imx_mx2_camera_data_entry_single_emma(MX27, "imx27-camera");
diff --git a/arch/arm/mach-imx/devices/platform-mxc-ehci.c b/arch/arm/mach-imx/devices/platform-mxc-ehci.c
index 296353662ff0..4537abd2a8f2 100644
--- a/arch/arm/mach-imx/devices/platform-mxc-ehci.c
+++ b/arch/arm/mach-imx/devices/platform-mxc-ehci.c
@@ -18,13 +18,6 @@
.irq = soc ## _INT_USB_ ## hs, \
}
-#ifdef CONFIG_SOC_IMX25
-const struct imx_mxc_ehci_data imx25_mxc_ehci_otg_data __initconst =
- imx_mxc_ehci_data_entry_single(MX25, 0, OTG);
-const struct imx_mxc_ehci_data imx25_mxc_ehci_hs_data __initconst =
- imx_mxc_ehci_data_entry_single(MX25, 1, HS);
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_mxc_ehci_data imx27_mxc_ehci_otg_data __initconst =
imx_mxc_ehci_data_entry_single(MX27, 0, OTG);
diff --git a/arch/arm/mach-imx/devices/platform-mxc_nand.c b/arch/arm/mach-imx/devices/platform-mxc_nand.c
index fa618a34f462..676df4920c7b 100644
--- a/arch/arm/mach-imx/devices/platform-mxc_nand.c
+++ b/arch/arm/mach-imx/devices/platform-mxc_nand.c
@@ -34,11 +34,6 @@ const struct imx_mxc_nand_data imx21_mxc_nand_data __initconst =
imx_mxc_nand_data_entry_single(MX21, "imx21-nand", SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX21 */
-#ifdef CONFIG_SOC_IMX25
-const struct imx_mxc_nand_data imx25_mxc_nand_data __initconst =
- imx_mxc_nand_data_entry_single(MX25, "imx25-nand", SZ_8K);
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_mxc_nand_data imx27_mxc_nand_data __initconst =
imx_mxc_nand_data_entry_single(MX27, "imx27-nand", SZ_4K);
diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
index fb8d4a2ad48c..a5edd7d60266 100644
--- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
+++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Pengutronix, Wolfram Sang <w.sang@pengutronix.de>
+ * Copyright (C) 2010 Pengutronix, Wolfram Sang <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
diff --git a/arch/arm/mach-imx/devices/platform-spi_imx.c b/arch/arm/mach-imx/devices/platform-spi_imx.c
index aca825d74c48..5e9707b47f92 100644
--- a/arch/arm/mach-imx/devices/platform-spi_imx.c
+++ b/arch/arm/mach-imx/devices/platform-spi_imx.c
@@ -39,17 +39,6 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
};
#endif
-#ifdef CONFIG_SOC_IMX25
-/* i.mx25 has the i.mx35 type cspi */
-const struct imx_spi_imx_data imx25_cspi_data[] __initconst = {
-#define imx25_cspi_data_entry(_id, _hwid) \
- imx_spi_imx_data_entry(MX25, CSPI, "imx35-cspi", _id, _hwid, SZ_16K)
- imx25_cspi_data_entry(0, 1),
- imx25_cspi_data_entry(1, 2),
- imx25_cspi_data_entry(2, 3),
-};
-#endif /* ifdef CONFIG_SOC_IMX25 */
-
#ifdef CONFIG_SOC_IMX27
const struct imx_spi_imx_data imx27_cspi_data[] __initconst = {
#define imx27_cspi_data_entry(_id, _hwid) \
diff --git a/arch/arm/mach-imx/ehci-imx25.c b/arch/arm/mach-imx/ehci-imx25.c
deleted file mode 100644
index 42a5a3d14c5f..000000000000
--- a/arch/arm/mach-imx/ehci-imx25.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- * Copyright (C) 2010 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
-
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/platform_data/usb-ehci-mxc.h>
-
-#include "ehci.h"
-#include "hardware.h"
-
-#define USBCTRL_OTGBASE_OFFSET 0x600
-
-#define MX25_OTG_SIC_SHIFT 29
-#define MX25_OTG_SIC_MASK (0x3 << MX25_OTG_SIC_SHIFT)
-#define MX25_OTG_PM_BIT (1 << 24)
-#define MX25_OTG_PP_BIT (1 << 11)
-#define MX25_OTG_OCPOL_BIT (1 << 3)
-
-#define MX25_H1_SIC_SHIFT 21
-#define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT)
-#define MX25_H1_PP_BIT (1 << 18)
-#define MX25_H1_PM_BIT (1 << 16)
-#define MX25_H1_IPPUE_UP_BIT (1 << 7)
-#define MX25_H1_IPPUE_DOWN_BIT (1 << 6)
-#define MX25_H1_TLL_BIT (1 << 5)
-#define MX25_H1_USBTE_BIT (1 << 4)
-#define MX25_H1_OCPOL_BIT (1 << 2)
-
-int mx25_initialize_usb_hw(int port, unsigned int flags)
-{
- unsigned int v;
-
- v = readl(MX25_IO_ADDRESS(MX25_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
-
- switch (port) {
- case 0: /* OTG port */
- v &= ~(MX25_OTG_SIC_MASK | MX25_OTG_PM_BIT | MX25_OTG_PP_BIT |
- MX25_OTG_OCPOL_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX25_OTG_SIC_SHIFT;
-
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX25_OTG_PM_BIT;
-
- if (flags & MXC_EHCI_PWR_PIN_ACTIVE_HIGH)
- v |= MX25_OTG_PP_BIT;
-
- if (!(flags & MXC_EHCI_OC_PIN_ACTIVE_LOW))
- v |= MX25_OTG_OCPOL_BIT;
-
- break;
- case 1: /* H1 port */
- v &= ~(MX25_H1_SIC_MASK | MX25_H1_PM_BIT | MX25_H1_PP_BIT |
- MX25_H1_OCPOL_BIT | MX25_H1_TLL_BIT | MX25_H1_USBTE_BIT |
- MX25_H1_IPPUE_DOWN_BIT | MX25_H1_IPPUE_UP_BIT);
- v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX25_H1_SIC_SHIFT;
-
- if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
- v |= MX25_H1_PM_BIT;
-
- if (flags & MXC_EHCI_PWR_PIN_ACTIVE_HIGH)
- v |= MX25_H1_PP_BIT;
-
- if (!(flags & MXC_EHCI_OC_PIN_ACTIVE_LOW))
- v |= MX25_H1_OCPOL_BIT;
-
- if (!(flags & MXC_EHCI_TTL_ENABLED))
- v |= MX25_H1_TLL_BIT;
-
- if (flags & MXC_EHCI_INTERNAL_PHY)
- v |= MX25_H1_USBTE_BIT;
-
- if (flags & MXC_EHCI_IPPUE_DOWN)
- v |= MX25_H1_IPPUE_DOWN_BIT;
-
- if (flags & MXC_EHCI_IPPUE_UP)
- v |= MX25_H1_IPPUE_UP_BIT;
-
- break;
- default:
- return -EINVAL;
- }
-
- writel(v, MX25_IO_ADDRESS(MX25_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET));
-
- return 0;
-}
-
diff --git a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
deleted file mode 100644
index e77cc3af6db2..000000000000
--- a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Copyright (C) 2010 Eric Benard - eric@eukrea.com
- *
- * Based on pcm970-baseboard.c which is :
- * Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/gpio.h>
-#include <linux/leds.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/spi/spi.h>
-#include <video/platform_lcd.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "devices-imx25.h"
-#include "hardware.h"
-#include "iomux-mx25.h"
-#include "mx25.h"
-
-static iomux_v3_cfg_t eukrea_mbimxsd_pads[] = {
- /* LCD */
- MX25_PAD_LD0__LD0,
- MX25_PAD_LD1__LD1,
- MX25_PAD_LD2__LD2,
- MX25_PAD_LD3__LD3,
- MX25_PAD_LD4__LD4,
- MX25_PAD_LD5__LD5,
- MX25_PAD_LD6__LD6,
- MX25_PAD_LD7__LD7,
- MX25_PAD_LD8__LD8,
- MX25_PAD_LD9__LD9,
- MX25_PAD_LD10__LD10,
- MX25_PAD_LD11__LD11,
- MX25_PAD_LD12__LD12,
- MX25_PAD_LD13__LD13,
- MX25_PAD_LD14__LD14,
- MX25_PAD_LD15__LD15,
- MX25_PAD_GPIO_E__LD16,
- MX25_PAD_GPIO_F__LD17,
- MX25_PAD_HSYNC__HSYNC,
- MX25_PAD_VSYNC__VSYNC,
- MX25_PAD_LSCLK__LSCLK,
- MX25_PAD_OE_ACD__OE_ACD,
- MX25_PAD_CONTRAST__CONTRAST,
- /* LCD_PWR */
- MX25_PAD_PWM__GPIO_1_26,
- /* LED */
- MX25_PAD_POWER_FAIL__GPIO_3_19,
- /* SWITCH */
- MX25_PAD_VSTBY_ACK__GPIO_3_18,
- /* UART2 */
- MX25_PAD_UART2_RTS__UART2_RTS,
- MX25_PAD_UART2_CTS__UART2_CTS,
- MX25_PAD_UART2_TXD__UART2_TXD,
- MX25_PAD_UART2_RXD__UART2_RXD,
- /* SD1 */
- MX25_PAD_SD1_CMD__SD1_CMD,
- MX25_PAD_SD1_CLK__SD1_CLK,
- MX25_PAD_SD1_DATA0__SD1_DATA0,
- MX25_PAD_SD1_DATA1__SD1_DATA1,
- MX25_PAD_SD1_DATA2__SD1_DATA2,
- MX25_PAD_SD1_DATA3__SD1_DATA3,
- /* SD1 CD */
- MX25_PAD_DE_B__GPIO_2_20,
- /* I2S */
- MX25_PAD_KPP_COL3__AUD5_TXFS,
- MX25_PAD_KPP_COL2__AUD5_TXC,
- MX25_PAD_KPP_COL1__AUD5_RXD,
- MX25_PAD_KPP_COL0__AUD5_TXD,
- /* CAN */
- MX25_PAD_GPIO_D__CAN2_RX,
- MX25_PAD_GPIO_C__CAN2_TX,
- /* SPI1 */
- MX25_PAD_CSPI1_MOSI__CSPI1_MOSI,
- MX25_PAD_CSPI1_MISO__CSPI1_MISO,
- MX25_PAD_CSPI1_SS0__GPIO_1_16,
- MX25_PAD_CSPI1_SS1__GPIO_1_17,
- MX25_PAD_CSPI1_SCLK__CSPI1_SCLK,
- MX25_PAD_CSPI1_RDY__GPIO_2_22,
-};
-
-#define GPIO_LED1 IMX_GPIO_NR(3, 19)
-#define GPIO_SWITCH1 IMX_GPIO_NR(3, 18)
-#define GPIO_SD1CD IMX_GPIO_NR(2, 20)
-#define GPIO_LCDPWR IMX_GPIO_NR(1, 26)
-#define GPIO_SPI1_SS0 IMX_GPIO_NR(1, 16)
-#define GPIO_SPI1_SS1 IMX_GPIO_NR(1, 17)
-#define GPIO_SPI1_IRQ IMX_GPIO_NR(2, 22)
-
-static struct imx_fb_videomode eukrea_mximxsd_modes[] = {
- {
- .mode = {
- .name = "CMO-QVGA",
- .refresh = 60,
- .xres = 320,
- .yres = 240,
- .pixclock = KHZ2PICOS(6500),
- .left_margin = 30,
- .right_margin = 38,
- .upper_margin = 20,
- .lower_margin = 3,
- .hsync_len = 15,
- .vsync_len = 4,
- },
- .bpp = 16,
- .pcr = 0xCAD08B80,
- }, {
- .mode = {
- .name = "DVI-VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 32000,
- .hsync_len = 7,
- .left_margin = 100,
- .right_margin = 100,
- .vsync_len = 7,
- .upper_margin = 7,
- .lower_margin = 100,
- },
- .pcr = 0xFA208B80,
- .bpp = 16,
- }, {
- .mode = {
- .name = "DVI-SVGA",
- .refresh = 60,
- .xres = 800,
- .yres = 600,
- .pixclock = 25000,
- .hsync_len = 7,
- .left_margin = 75,
- .right_margin = 75,
- .vsync_len = 7,
- .upper_margin = 7,
- .lower_margin = 75,
- },
- .pcr = 0xFA208B80,
- .bpp = 16,
- },
-};
-
-static const struct imx_fb_platform_data eukrea_mximxsd_fb_pdata __initconst = {
- .mode = eukrea_mximxsd_modes,
- .num_modes = ARRAY_SIZE(eukrea_mximxsd_modes),
- .pwmr = 0x00A903FF,
- .lscr1 = 0x00120300,
- .dmacr = 0x00040060,
-};
-
-static void eukrea_mbimxsd_lcd_power_set(struct plat_lcd_data *pd,
- unsigned int power)
-{
- if (power)
- gpio_direction_output(GPIO_LCDPWR, 1);
- else
- gpio_direction_output(GPIO_LCDPWR, 0);
-}
-
-static struct plat_lcd_data eukrea_mbimxsd_lcd_power_data = {
- .set_power = eukrea_mbimxsd_lcd_power_set,
-};
-
-static struct platform_device eukrea_mbimxsd_lcd_powerdev = {
- .name = "platform-lcd",
- .dev.platform_data = &eukrea_mbimxsd_lcd_power_data,
-};
-
-static const struct gpio_led eukrea_mbimxsd_leds[] __initconst = {
- {
- .name = "led1",
- .default_trigger = "heartbeat",
- .active_low = 1,
- .gpio = GPIO_LED1,
- },
-};
-
-static const struct gpio_led_platform_data
- eukrea_mbimxsd_led_info __initconst = {
- .leds = eukrea_mbimxsd_leds,
- .num_leds = ARRAY_SIZE(eukrea_mbimxsd_leds),
-};
-
-static struct gpio_keys_button eukrea_mbimxsd_gpio_buttons[] = {
- {
- .gpio = GPIO_SWITCH1,
- .code = BTN_0,
- .desc = "BP1",
- .active_low = 1,
- .wakeup = 1,
- },
-};
-
-static const struct gpio_keys_platform_data
- eukrea_mbimxsd_button_data __initconst = {
- .buttons = eukrea_mbimxsd_gpio_buttons,
- .nbuttons = ARRAY_SIZE(eukrea_mbimxsd_gpio_buttons),
-};
-
-static struct platform_device *platform_devices[] __initdata = {
- &eukrea_mbimxsd_lcd_powerdev,
-};
-
-static const struct imxuart_platform_data uart_pdata __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
-static struct i2c_board_info eukrea_mbimxsd_i2c_devices[] = {
- {
- I2C_BOARD_INFO("tlv320aic23", 0x1a),
- },
-};
-
-static const
-struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata __initconst = {
- .flags = IMX_SSI_SYN | IMX_SSI_NET | IMX_SSI_USE_I2S_SLAVE,
-};
-
-static struct esdhc_platform_data sd1_pdata = {
- .cd_gpio = GPIO_SD1CD,
- .cd_type = ESDHC_CD_GPIO,
- .wp_type = ESDHC_WP_NONE,
-};
-
-static struct spi_board_info eukrea_mbimxsd25_spi_board_info[] __initdata = {
- {
- .modalias = "spidev",
- .max_speed_hz = 20000000,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_0,
- },
- {
- .modalias = "spidev",
- .max_speed_hz = 20000000,
- .bus_num = 0,
- .chip_select = 1,
- .mode = SPI_MODE_0,
- },
-};
-
-static int eukrea_mbimxsd25_spi_cs[] = {GPIO_SPI1_SS0, GPIO_SPI1_SS1};
-
-static const struct spi_imx_master eukrea_mbimxsd25_spi0_data __initconst = {
- .chipselect = eukrea_mbimxsd25_spi_cs,
- .num_chipselect = ARRAY_SIZE(eukrea_mbimxsd25_spi_cs),
-};
-
-/*
- * system init for baseboard usage. Will be called by cpuimx25 init.
- *
- * Add platform devices present on this baseboard and init
- * them from CPU side as far as required to use them later on
- */
-void __init eukrea_mbimxsd25_baseboard_init(void)
-{
- if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
- ARRAY_SIZE(eukrea_mbimxsd_pads)))
- printk(KERN_ERR "error setting mbimxsd pads !\n");
-
- imx25_add_imx_uart1(&uart_pdata);
- imx25_add_imx_fb(&eukrea_mximxsd_fb_pdata);
- imx25_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata);
-
- imx25_add_flexcan1();
- imx25_add_sdhci_esdhc_imx(0, &sd1_pdata);
-
- gpio_request(GPIO_LED1, "LED1");
- gpio_direction_output(GPIO_LED1, 1);
- gpio_free(GPIO_LED1);
-
- gpio_request(GPIO_SWITCH1, "SWITCH1");
- gpio_direction_input(GPIO_SWITCH1);
- gpio_free(GPIO_SWITCH1);
-
- gpio_request(GPIO_LCDPWR, "LCDPWR");
- gpio_direction_output(GPIO_LCDPWR, 1);
-
- i2c_register_board_info(0, eukrea_mbimxsd_i2c_devices,
- ARRAY_SIZE(eukrea_mbimxsd_i2c_devices));
-
- gpio_request(GPIO_SPI1_IRQ, "SPI1_IRQ");
- gpio_direction_input(GPIO_SPI1_IRQ);
- gpio_free(GPIO_SPI1_IRQ);
- imx25_add_spi_imx0(&eukrea_mbimxsd25_spi0_data);
- spi_register_board_info(eukrea_mbimxsd25_spi_board_info,
- ARRAY_SIZE(eukrea_mbimxsd25_spi_board_info));
-
- platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
- gpio_led_register_device(-1, &eukrea_mbimxsd_led_info);
- imx_add_gpio_keys(&eukrea_mbimxsd_button_data);
- imx_add_platform_device("eukrea_tlv320", 0, NULL, 0, NULL, 0);
-}
diff --git a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
index 14d6c8249b76..6edc940e0865 100644
--- a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
@@ -100,7 +100,7 @@ static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.num_modes = ARRAY_SIZE(fb_modedb),
};
-static iomux_v3_cfg_t eukrea_mbimxsd_pads[] = {
+static const iomux_v3_cfg_t eukrea_mbimxsd_pads[] __initconst = {
/* LCD */
MX35_PAD_LD0__IPU_DISPB_DAT_0,
MX35_PAD_LD1__IPU_DISPB_DAT_1,
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 745caa18ab2c..4d60005e9277 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -10,15 +10,25 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regulator/consumer.h>
#include <linux/irqchip/arm-gic.h>
#include "common.h"
+#include "hardware.h"
+#define GPC_CNTR 0x000
#define GPC_IMR1 0x008
+#define GPC_PGC_GPU_PDN 0x260
+#define GPC_PGC_GPU_PUPSCR 0x264
+#define GPC_PGC_GPU_PDNSCR 0x268
#define GPC_PGC_CPU_PDN 0x2a0
#define GPC_PGC_CPU_PUPSCR 0x2a4
#define GPC_PGC_CPU_PDNSCR 0x2a8
@@ -26,6 +36,19 @@
#define GPC_PGC_SW_SHIFT 0x0
#define IMR_NUM 4
+#define GPC_MAX_IRQS (IMR_NUM * 32)
+
+#define GPU_VPU_PUP_REQ BIT(1)
+#define GPU_VPU_PDN_REQ BIT(0)
+
+#define GPC_CLK_MAX 6
+
+struct pu_domain {
+ struct generic_pm_domain base;
+ struct regulator *reg;
+ struct clk *clk[GPC_CLK_MAX];
+ int num_clks;
+};
static void __iomem *gpc_base;
static u32 gpc_wake_irqs[IMR_NUM];
@@ -77,17 +100,17 @@ void imx_gpc_post_resume(void)
static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on)
{
- unsigned int idx = d->hwirq / 32 - 1;
+ unsigned int idx = d->hwirq / 32;
u32 mask;
- /* Sanity check for SPI irq */
- if (d->hwirq < 32)
- return -EINVAL;
-
mask = 1 << d->hwirq % 32;
gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
gpc_wake_irqs[idx] & ~mask;
+ /*
+ * Do *not* call into the parent, as the GIC doesn't have any
+ * wake-up facility...
+ */
return 0;
}
@@ -117,7 +140,7 @@ void imx_gpc_hwirq_unmask(unsigned int hwirq)
void __iomem *reg;
u32 val;
- reg = gpc_base + GPC_IMR1 + (hwirq / 32 - 1) * 4;
+ reg = gpc_base + GPC_IMR1 + hwirq / 32 * 4;
val = readl_relaxed(reg);
val &= ~(1 << hwirq % 32);
writel_relaxed(val, reg);
@@ -128,7 +151,7 @@ void imx_gpc_hwirq_mask(unsigned int hwirq)
void __iomem *reg;
u32 val;
- reg = gpc_base + GPC_IMR1 + (hwirq / 32 - 1) * 4;
+ reg = gpc_base + GPC_IMR1 + hwirq / 32 * 4;
val = readl_relaxed(reg);
val |= 1 << (hwirq % 32);
writel_relaxed(val, reg);
@@ -136,37 +159,319 @@ void imx_gpc_hwirq_mask(unsigned int hwirq)
static void imx_gpc_irq_unmask(struct irq_data *d)
{
- /* Sanity check for SPI irq */
- if (d->hwirq < 32)
- return;
-
imx_gpc_hwirq_unmask(d->hwirq);
+ irq_chip_unmask_parent(d);
}
static void imx_gpc_irq_mask(struct irq_data *d)
{
- /* Sanity check for SPI irq */
- if (d->hwirq < 32)
- return;
-
imx_gpc_hwirq_mask(d->hwirq);
+ irq_chip_mask_parent(d);
}
-void __init imx_gpc_init(void)
+static struct irq_chip imx_gpc_chip = {
+ .name = "GPC",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = imx_gpc_irq_mask,
+ .irq_unmask = imx_gpc_irq_unmask,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_wake = imx_gpc_irq_set_wake,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int imx_gpc_domain_xlate(struct irq_domain *domain,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
{
- struct device_node *np;
+ if (domain->of_node != controller)
+ return -EINVAL; /* Shouldn't happen, really... */
+ if (intsize != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (intspec[0] != 0)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ *out_hwirq = intspec[1];
+ *out_type = intspec[2];
+ return 0;
+}
+
+static int imx_gpc_domain_alloc(struct irq_domain *domain,
+ unsigned int irq,
+ unsigned int nr_irqs, void *data)
+{
+ struct of_phandle_args *args = data;
+ struct of_phandle_args parent_args;
+ irq_hw_number_t hwirq;
int i;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
- gpc_base = of_iomap(np, 0);
- WARN_ON(!gpc_base);
+ if (args->args_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (args->args[0] != 0)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ hwirq = args->args[1];
+ if (hwirq >= GPC_MAX_IRQS)
+ return -EINVAL; /* Can't deal with this */
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
+ &imx_gpc_chip, NULL);
+
+ parent_args = *args;
+ parent_args.np = domain->parent->of_node;
+ return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args);
+}
+
+static struct irq_domain_ops imx_gpc_domain_ops = {
+ .xlate = imx_gpc_domain_xlate,
+ .alloc = imx_gpc_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init imx_gpc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ int i;
+
+ if (!parent) {
+ pr_err("%s: no parent, giving up\n", node->full_name);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%s: unable to obtain parent domain\n", node->full_name);
+ return -ENXIO;
+ }
+
+ gpc_base = of_iomap(node, 0);
+ if (WARN_ON(!gpc_base))
+ return -ENOMEM;
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
+ node, &imx_gpc_domain_ops,
+ NULL);
+ if (!domain) {
+ iounmap(gpc_base);
+ return -ENOMEM;
+ }
/* Initially mask all interrupts */
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
- /* Register GPC as the secondary interrupt controller behind GIC */
- gic_arch_extn.irq_mask = imx_gpc_irq_mask;
- gic_arch_extn.irq_unmask = imx_gpc_irq_unmask;
- gic_arch_extn.irq_set_wake = imx_gpc_irq_set_wake;
+ return 0;
+}
+
+/*
+ * We cannot use the IRQCHIP_DECLARE macro that lives in
+ * drivers/irqchip, so we're forced to roll our own. Not very nice.
+ */
+OF_DECLARE_2(irqchip, imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
+
+void __init imx_gpc_check_dt(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
+ if (WARN_ON(!np ||
+ !of_find_property(np, "interrupt-controller", NULL)))
+ pr_warn("Outdated DT detected, system is about to crash!!!\n");
+}
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+
+static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
+{
+ int iso, iso2sw;
+ u32 val;
+
+ /* Read ISO and ISO2SW power down delays */
+ val = readl_relaxed(gpc_base + GPC_PGC_GPU_PDNSCR);
+ iso = val & 0x3f;
+ iso2sw = (val >> 8) & 0x3f;
+
+ /* Gate off PU domain when GPU/VPU when powered down */
+ writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
+
+ /* Request GPC to power down GPU/VPU */
+ val = readl_relaxed(gpc_base + GPC_CNTR);
+ val |= GPU_VPU_PDN_REQ;
+ writel_relaxed(val, gpc_base + GPC_CNTR);
+
+ /* Wait ISO + ISO2SW IPG clock cycles */
+ ndelay((iso + iso2sw) * 1000 / 66);
+}
+
+static int imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
+{
+ struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
+
+ _imx6q_pm_pu_power_off(genpd);
+
+ if (pu->reg)
+ regulator_disable(pu->reg);
+
+ return 0;
+}
+
+static int imx6q_pm_pu_power_on(struct generic_pm_domain *genpd)
+{
+ struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
+ int i, ret, sw, sw2iso;
+ u32 val;
+
+ if (pu->reg)
+ ret = regulator_enable(pu->reg);
+ if (pu->reg && ret) {
+ pr_err("%s: failed to enable regulator: %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* Enable reset clocks for all devices in the PU domain */
+ for (i = 0; i < pu->num_clks; i++)
+ clk_prepare_enable(pu->clk[i]);
+
+ /* Gate off PU domain when GPU/VPU when powered down */
+ writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
+
+ /* Read ISO and ISO2SW power down delays */
+ val = readl_relaxed(gpc_base + GPC_PGC_GPU_PUPSCR);
+ sw = val & 0x3f;
+ sw2iso = (val >> 8) & 0x3f;
+
+ /* Request GPC to power up GPU/VPU */
+ val = readl_relaxed(gpc_base + GPC_CNTR);
+ val |= GPU_VPU_PUP_REQ;
+ writel_relaxed(val, gpc_base + GPC_CNTR);
+
+ /* Wait ISO + ISO2SW IPG clock cycles */
+ ndelay((sw + sw2iso) * 1000 / 66);
+
+ /* Disable reset clocks for all devices in the PU domain */
+ for (i = 0; i < pu->num_clks; i++)
+ clk_disable_unprepare(pu->clk[i]);
+
+ return 0;
+}
+
+static struct generic_pm_domain imx6q_arm_domain = {
+ .name = "ARM",
+};
+
+static struct pu_domain imx6q_pu_domain = {
+ .base = {
+ .name = "PU",
+ .power_off = imx6q_pm_pu_power_off,
+ .power_on = imx6q_pm_pu_power_on,
+ .power_off_latency_ns = 25000,
+ .power_on_latency_ns = 2000000,
+ },
+};
+
+static struct generic_pm_domain imx6sl_display_domain = {
+ .name = "DISPLAY",
+};
+
+static struct generic_pm_domain *imx_gpc_domains[] = {
+ &imx6q_arm_domain,
+ &imx6q_pu_domain.base,
+ &imx6sl_display_domain,
+};
+
+static struct genpd_onecell_data imx_gpc_onecell_data = {
+ .domains = imx_gpc_domains,
+ .num_domains = ARRAY_SIZE(imx_gpc_domains),
+};
+
+static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
+{
+ struct clk *clk;
+ bool is_off;
+ int i;
+
+ imx6q_pu_domain.reg = pu_reg;
+
+ for (i = 0; ; i++) {
+ clk = of_clk_get(dev->of_node, i);
+ if (IS_ERR(clk))
+ break;
+ if (i >= GPC_CLK_MAX) {
+ dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX);
+ goto clk_err;
+ }
+ imx6q_pu_domain.clk[i] = clk;
+ }
+ imx6q_pu_domain.num_clks = i;
+
+ is_off = IS_ENABLED(CONFIG_PM);
+ if (is_off) {
+ _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
+ } else {
+ /*
+ * Enable power if compiled without CONFIG_PM in case the
+ * bootloader disabled it.
+ */
+ imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+ }
+
+ pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off);
+ return of_genpd_add_provider_onecell(dev->of_node,
+ &imx_gpc_onecell_data);
+
+clk_err:
+ while (i--)
+ clk_put(imx6q_pu_domain.clk[i]);
+ return -EINVAL;
+}
+
+#else
+static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
+{
+ return 0;
+}
+#endif /* CONFIG_PM_GENERIC_DOMAINS */
+
+static int imx_gpc_probe(struct platform_device *pdev)
+{
+ struct regulator *pu_reg;
+ int ret;
+
+ pu_reg = devm_regulator_get_optional(&pdev->dev, "pu");
+ if (PTR_ERR(pu_reg) == -ENODEV)
+ pu_reg = NULL;
+ if (IS_ERR(pu_reg)) {
+ ret = PTR_ERR(pu_reg);
+ dev_err(&pdev->dev, "failed to get pu regulator: %d\n", ret);
+ return ret;
+ }
+
+ return imx_gpc_genpd_init(&pdev->dev, pu_reg);
+}
+
+static const struct of_device_id imx_gpc_dt_ids[] = {
+ { .compatible = "fsl,imx6q-gpc" },
+ { .compatible = "fsl,imx6sl-gpc" },
+ { }
+};
+
+static struct platform_driver imx_gpc_driver = {
+ .driver = {
+ .name = "imx-gpc",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_gpc_dt_ids,
+ },
+ .probe = imx_gpc_probe,
+};
+
+static int __init imx_pgc_init(void)
+{
+ return platform_driver_register(&imx_gpc_driver);
}
+subsys_initcall(imx_pgc_init);
diff --git a/arch/arm/mach-imx/hardware.h b/arch/arm/mach-imx/hardware.h
index 66b2b564c463..76af2c03c241 100644
--- a/arch/arm/mach-imx/hardware.h
+++ b/arch/arm/mach-imx/hardware.h
@@ -112,7 +112,6 @@
#include "mx21.h"
#include "mx27.h"
#include "mx1.h"
-#include "mx25.h"
#define imx_map_entry(soc, name, _type) { \
.virtual = soc ## _IO_P2V(soc ## _ ## name ## _BASE_ADDR), \
diff --git a/arch/arm/mach-imx/iomux-mx25.h b/arch/arm/mach-imx/iomux-mx25.h
deleted file mode 100644
index be51e838375c..000000000000
--- a/arch/arm/mach-imx/iomux-mx25.h
+++ /dev/null
@@ -1,524 +0,0 @@
-/*
- * arch/arm/plat-mxc/include/mach/iomux-mx25.h
- *
- * Copyright (C) 2009 by Lothar Wassmann <LW@KARO-electronics.de>
- *
- * based on arch/arm/mach-mx25/mx25_pins.h
- * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
- * and
- * arch/arm/plat-mxc/include/mach/iomux-mx35.h
- * Copyright (C) 2009 by Jan Weitzel Phytec Messtechnik GmbH <armlinux@phytec.de>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-#ifndef __MACH_IOMUX_MX25_H__
-#define __MACH_IOMUX_MX25_H__
-
-#include "iomux-v3.h"
-
-/*
- * IOMUX/PAD Bit field definitions
- */
-
-#define MX25_PAD_A10__A10 IOMUX_PAD(0x000, 0x008, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A10__GPIO_4_0 IOMUX_PAD(0x000, 0x008, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A13__A13 IOMUX_PAD(0x22C, 0x00c, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A13__GPIO_4_1 IOMUX_PAD(0x22C, 0x00c, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A14__A14 IOMUX_PAD(0x230, 0x010, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A14__GPIO_2_0 IOMUX_PAD(0x230, 0x010, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A15__A15 IOMUX_PAD(0x234, 0x014, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A15__GPIO_2_1 IOMUX_PAD(0x234, 0x014, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A16__A16 IOMUX_PAD(0x000, 0x018, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A16__GPIO_2_2 IOMUX_PAD(0x000, 0x018, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A17__A17 IOMUX_PAD(0x238, 0x01c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A17__GPIO_2_3 IOMUX_PAD(0x238, 0x01c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A18__A18 IOMUX_PAD(0x23c, 0x020, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A18__GPIO_2_4 IOMUX_PAD(0x23c, 0x020, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A18__FEC_COL IOMUX_PAD(0x23c, 0x020, 0x17, 0x504, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A19__A19 IOMUX_PAD(0x240, 0x024, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A19__FEC_RX_ER IOMUX_PAD(0x240, 0x024, 0x17, 0x518, 0, NO_PAD_CTRL)
-#define MX25_PAD_A19__GPIO_2_5 IOMUX_PAD(0x240, 0x024, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A20__A20 IOMUX_PAD(0x244, 0x028, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A20__GPIO_2_6 IOMUX_PAD(0x244, 0x028, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A20__FEC_RDATA2 IOMUX_PAD(0x244, 0x028, 0x17, 0x50c, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A21__A21 IOMUX_PAD(0x248, 0x02c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A21__GPIO_2_7 IOMUX_PAD(0x248, 0x02c, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A21__FEC_RDATA3 IOMUX_PAD(0x248, 0x02c, 0x17, 0x510, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A22__A22 IOMUX_PAD(0x000, 0x030, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A22__GPIO_2_8 IOMUX_PAD(0x000, 0x030, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A23__A23 IOMUX_PAD(0x24c, 0x034, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A23__GPIO_2_9 IOMUX_PAD(0x24c, 0x034, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A24__A24 IOMUX_PAD(0x250, 0x038, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A24__GPIO_2_10 IOMUX_PAD(0x250, 0x038, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A24__FEC_RX_CLK IOMUX_PAD(0x250, 0x038, 0x17, 0x514, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_A25__A25 IOMUX_PAD(0x254, 0x03c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A25__GPIO_2_11 IOMUX_PAD(0x254, 0x03c, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A25__FEC_CRS IOMUX_PAD(0x254, 0x03c, 0x17, 0x508, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_EB0__EB0 IOMUX_PAD(0x258, 0x040, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_EB0__AUD4_TXD IOMUX_PAD(0x258, 0x040, 0x14, 0x464, 0, NO_PAD_CTRL)
-#define MX25_PAD_EB0__GPIO_2_12 IOMUX_PAD(0x258, 0x040, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_EB1__EB1 IOMUX_PAD(0x25c, 0x044, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_EB1__AUD4_RXD IOMUX_PAD(0x25c, 0x044, 0x14, 0x460, 0, NO_PAD_CTRL)
-#define MX25_PAD_EB1__GPIO_2_13 IOMUX_PAD(0x25c, 0x044, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_OE__OE IOMUX_PAD(0x260, 0x048, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_OE__AUD4_TXC IOMUX_PAD(0x260, 0x048, 0x14, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_OE__GPIO_2_14 IOMUX_PAD(0x260, 0x048, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CS0__CS0 IOMUX_PAD(0x000, 0x04c, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CS0__GPIO_4_2 IOMUX_PAD(0x000, 0x04c, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CS1__CS1 IOMUX_PAD(0x000, 0x050, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CS1__NF_CE3 IOMUX_PAD(0x000, 0x050, 0x01, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CS1__GPIO_4_3 IOMUX_PAD(0x000, 0x050, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CS4__CS4 IOMUX_PAD(0x264, 0x054, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CS4__NF_CE1 IOMUX_PAD(0x264, 0x054, 0x01, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CS4__UART5_CTS IOMUX_PAD(0x264, 0x054, 0x13, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CS4__GPIO_3_20 IOMUX_PAD(0x264, 0x054, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CS5__CS5 IOMUX_PAD(0x268, 0x058, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CS5__NF_CE2 IOMUX_PAD(0x268, 0x058, 0x01, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CS5__UART5_RTS IOMUX_PAD(0x268, 0x058, 0x13, 0x574, 0, NO_PAD_CTRL)
-#define MX25_PAD_CS5__GPIO_3_21 IOMUX_PAD(0x268, 0x058, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_NF_CE0__NF_CE0 IOMUX_PAD(0x26c, 0x05c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_NF_CE0__GPIO_3_22 IOMUX_PAD(0x26c, 0x05c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_ECB__ECB IOMUX_PAD(0x270, 0x060, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_ECB__UART5_TXD_MUX IOMUX_PAD(0x270, 0x060, 0x13, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_ECB__GPIO_3_23 IOMUX_PAD(0x270, 0x060, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LBA__LBA IOMUX_PAD(0x274, 0x064, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LBA__UART5_RXD_MUX IOMUX_PAD(0x274, 0x064, 0x13, 0x578, 0, NO_PAD_CTRL)
-#define MX25_PAD_LBA__GPIO_3_24 IOMUX_PAD(0x274, 0x064, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_BCLK__BCLK IOMUX_PAD(0x000, 0x068, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_BCLK__GPIO_4_4 IOMUX_PAD(0x000, 0x068, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_RW__RW IOMUX_PAD(0x278, 0x06c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_RW__AUD4_TXFS IOMUX_PAD(0x278, 0x06c, 0x14, 0x474, 0, NO_PAD_CTRL)
-#define MX25_PAD_RW__GPIO_3_25 IOMUX_PAD(0x278, 0x06c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_NFWE_B__NFWE_B IOMUX_PAD(0x000, 0x070, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_NFWE_B__GPIO_3_26 IOMUX_PAD(0x000, 0x070, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_NFRE_B__NFRE_B IOMUX_PAD(0x000, 0x074, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_NFRE_B__GPIO_3_27 IOMUX_PAD(0x000, 0x074, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_NFALE__NFALE IOMUX_PAD(0x000, 0x078, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_NFALE__GPIO_3_28 IOMUX_PAD(0x000, 0x078, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_NFCLE__NFCLE IOMUX_PAD(0x000, 0x07c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_NFCLE__GPIO_3_29 IOMUX_PAD(0x000, 0x07c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_NFWP_B__NFWP_B IOMUX_PAD(0x000, 0x080, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_NFWP_B__GPIO_3_30 IOMUX_PAD(0x000, 0x080, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_NFRB__NFRB IOMUX_PAD(0x27c, 0x084, 0x10, 0, 0, PAD_CTL_PKE)
-#define MX25_PAD_NFRB__GPIO_3_31 IOMUX_PAD(0x27c, 0x084, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D15__D15 IOMUX_PAD(0x280, 0x088, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D15__LD16 IOMUX_PAD(0x280, 0x088, 0x01, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_D15__GPIO_4_5 IOMUX_PAD(0x280, 0x088, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D14__D14 IOMUX_PAD(0x284, 0x08c, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D14__LD17 IOMUX_PAD(0x284, 0x08c, 0x01, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_D14__GPIO_4_6 IOMUX_PAD(0x284, 0x08c, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D13__D13 IOMUX_PAD(0x288, 0x090, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D13__LD18 IOMUX_PAD(0x288, 0x090, 0x01, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_D13__GPIO_4_7 IOMUX_PAD(0x288, 0x090, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D12__D12 IOMUX_PAD(0x28c, 0x094, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D12__GPIO_4_8 IOMUX_PAD(0x28c, 0x094, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D11__D11 IOMUX_PAD(0x290, 0x098, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D11__GPIO_4_9 IOMUX_PAD(0x290, 0x098, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D10__D10 IOMUX_PAD(0x294, 0x09c, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D10__GPIO_4_10 IOMUX_PAD(0x294, 0x09c, 0x05, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D10__USBOTG_OC IOMUX_PAD(0x294, 0x09c, 0x06, 0x57c, 0, PAD_CTL_PUS_100K_UP)
-
-#define MX25_PAD_D9__D9 IOMUX_PAD(0x298, 0x0a0, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D9__GPIO_4_11 IOMUX_PAD(0x298, 0x0a0, 0x05, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D9__USBH2_PWR IOMUX_PAD(0x298, 0x0a0, 0x06, 0, 0, PAD_CTL_PKE)
-
-#define MX25_PAD_D8__D8 IOMUX_PAD(0x29c, 0x0a4, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D8__GPIO_4_12 IOMUX_PAD(0x29c, 0x0a4, 0x05, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D8__USBH2_OC IOMUX_PAD(0x29c, 0x0a4, 0x06, 0x580, 0, PAD_CTL_PUS_100K_UP)
-
-#define MX25_PAD_D7__D7 IOMUX_PAD(0x2a0, 0x0a8, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D7__GPIO_4_13 IOMUX_PAD(0x2a0, 0x0a8, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D6__D6 IOMUX_PAD(0x2a4, 0x0ac, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D6__GPIO_4_14 IOMUX_PAD(0x2a4, 0x0ac, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D5__D5 IOMUX_PAD(0x2a8, 0x0b0, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D5__GPIO_4_15 IOMUX_PAD(0x2a8, 0x0b0, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D4__D4 IOMUX_PAD(0x2ac, 0x0b4, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D4__GPIO_4_16 IOMUX_PAD(0x2ac, 0x0b4, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D3__D3 IOMUX_PAD(0x2b0, 0x0b8, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D3__GPIO_4_17 IOMUX_PAD(0x2b0, 0x0b8, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D2__D2 IOMUX_PAD(0x2b4, 0x0bc, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D2__GPIO_4_18 IOMUX_PAD(0x2b4, 0x0bc, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D1__D1 IOMUX_PAD(0x2b8, 0x0c0, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D1__GPIO_4_19 IOMUX_PAD(0x2b8, 0x0c0, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_D0__D0 IOMUX_PAD(0x2bc, 0x0c4, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_D0__GPIO_4_20 IOMUX_PAD(0x2bc, 0x0c4, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD0__LD0 IOMUX_PAD(0x2c0, 0x0c8, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD0__CSI_D0 IOMUX_PAD(0x2c0, 0x0c8, 0x12, 0x488, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD0__GPIO_2_15 IOMUX_PAD(0x2c0, 0x0c8, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD1__LD1 IOMUX_PAD(0x2c4, 0x0cc, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD1__CSI_D1 IOMUX_PAD(0x2c4, 0x0cc, 0x12, 0x48c, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD1__GPIO_2_16 IOMUX_PAD(0x2c4, 0x0cc, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD2__LD2 IOMUX_PAD(0x2c8, 0x0d0, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD2__GPIO_2_17 IOMUX_PAD(0x2c8, 0x0d0, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD3__LD3 IOMUX_PAD(0x2cc, 0x0d4, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD3__GPIO_2_18 IOMUX_PAD(0x2cc, 0x0d4, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD4__LD4 IOMUX_PAD(0x2d0, 0x0d8, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD4__GPIO_2_19 IOMUX_PAD(0x2d0, 0x0d8, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD5__LD5 IOMUX_PAD(0x2d4, 0x0dc, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD5__GPIO_1_19 IOMUX_PAD(0x2d4, 0x0dc, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD6__LD6 IOMUX_PAD(0x2d8, 0x0e0, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD6__GPIO_1_20 IOMUX_PAD(0x2d8, 0x0e0, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD7__LD7 IOMUX_PAD(0x2dc, 0x0e4, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD7__GPIO_1_21 IOMUX_PAD(0x2dc, 0x0e4, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD8__LD8 IOMUX_PAD(0x2e0, 0x0e8, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD8__FEC_TX_ERR IOMUX_PAD(0x2e0, 0x0e8, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD9__LD9 IOMUX_PAD(0x2e4, 0x0ec, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD9__FEC_COL IOMUX_PAD(0x2e4, 0x0ec, 0x15, 0x504, 1, NO_PAD_CTRL)
-
-#define MX25_PAD_LD10__LD10 IOMUX_PAD(0x2e8, 0x0f0, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD10__FEC_RX_ER IOMUX_PAD(0x2e8, 0x0f0, 0x15, 0x518, 1, NO_PAD_CTRL)
-
-#define MX25_PAD_LD11__LD11 IOMUX_PAD(0x2ec, 0x0f4, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD11__FEC_RDATA2 IOMUX_PAD(0x2ec, 0x0f4, 0x15, 0x50c, 1, NO_PAD_CTRL)
-
-#define MX25_PAD_LD12__LD12 IOMUX_PAD(0x2f0, 0x0f8, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD12__FEC_RDATA3 IOMUX_PAD(0x2f0, 0x0f8, 0x15, 0x510, 1, NO_PAD_CTRL)
-
-#define MX25_PAD_LD13__LD13 IOMUX_PAD(0x2f4, 0x0fc, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD13__FEC_TDATA2 IOMUX_PAD(0x2f4, 0x0fc, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD14__LD14 IOMUX_PAD(0x2f8, 0x100, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD14__FEC_TDATA3 IOMUX_PAD(0x2f8, 0x100, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LD15__LD15 IOMUX_PAD(0x2fc, 0x104, 0x10, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_LD15__FEC_RX_CLK IOMUX_PAD(0x2fc, 0x104, 0x15, 0x514, 1, NO_PAD_CTRL)
-
-#define MX25_PAD_HSYNC__HSYNC IOMUX_PAD(0x300, 0x108, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_HSYNC__GPIO_1_22 IOMUX_PAD(0x300, 0x108, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_VSYNC__VSYNC IOMUX_PAD(0x304, 0x10c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_VSYNC__GPIO_1_23 IOMUX_PAD(0x304, 0x10c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_LSCLK__LSCLK IOMUX_PAD(0x308, 0x110, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LSCLK__GPIO_1_24 IOMUX_PAD(0x308, 0x110, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_OE_ACD__OE_ACD IOMUX_PAD(0x30c, 0x114, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_OE_ACD__GPIO_1_25 IOMUX_PAD(0x30c, 0x114, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CONTRAST__CONTRAST IOMUX_PAD(0x310, 0x118, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CONTRAST__PWM4_PWMO IOMUX_PAD(0x310, 0x118, 0x14, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CONTRAST__FEC_CRS IOMUX_PAD(0x310, 0x118, 0x15, 0x508, 1, NO_PAD_CTRL)
-
-#define MX25_PAD_PWM__PWM IOMUX_PAD(0x314, 0x11c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_PWM__GPIO_1_26 IOMUX_PAD(0x314, 0x11c, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_PWM__USBH2_OC IOMUX_PAD(0x314, 0x11c, 0x16, 0x580, 1, PAD_CTL_PUS_100K_UP)
-
-#define MX25_PAD_CSI_D2__CSI_D2 IOMUX_PAD(0x318, 0x120, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D2__UART5_RXD_MUX IOMUX_PAD(0x318, 0x120, 0x11, 0x578, 1, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D2__GPIO_1_27 IOMUX_PAD(0x318, 0x120, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D2__CSPI3_MOSI IOMUX_PAD(0x318, 0x120, 0x17, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_D3__CSI_D3 IOMUX_PAD(0x31c, 0x124, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D3__GPIO_1_28 IOMUX_PAD(0x31c, 0x124, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D3__CSPI3_MISO IOMUX_PAD(0x31c, 0x124, 0x17, 0x4b4, 1, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_D4__CSI_D4 IOMUX_PAD(0x320, 0x128, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D4__UART5_RTS IOMUX_PAD(0x320, 0x128, 0x11, 0x574, 1, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D4__GPIO_1_29 IOMUX_PAD(0x320, 0x128, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D4__CSPI3_SCLK IOMUX_PAD(0x320, 0x128, 0x17, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_D5__CSI_D5 IOMUX_PAD(0x324, 0x12c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D5__GPIO_1_30 IOMUX_PAD(0x324, 0x12c, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D5__CSPI3_RDY IOMUX_PAD(0x324, 0x12c, 0x17, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_D6__CSI_D6 IOMUX_PAD(0x328, 0x130, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D6__GPIO_1_31 IOMUX_PAD(0x328, 0x130, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_D7__CSI_D7 IOMUX_PAD(0x32c, 0x134, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D7__GPIO_1_6 IOMUX_PAD(0x32c, 0x134, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_D8__CSI_D8 IOMUX_PAD(0x330, 0x138, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D8__GPIO_1_7 IOMUX_PAD(0x330, 0x138, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_D9__CSI_D9 IOMUX_PAD(0x334, 0x13c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_D9__GPIO_4_21 IOMUX_PAD(0x334, 0x13c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_MCLK__CSI_MCLK IOMUX_PAD(0x338, 0x140, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_MCLK__GPIO_1_8 IOMUX_PAD(0x338, 0x140, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_VSYNC__CSI_VSYNC IOMUX_PAD(0x33c, 0x144, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_VSYNC__GPIO_1_9 IOMUX_PAD(0x33c, 0x144, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_HSYNC__CSI_HSYNC IOMUX_PAD(0x340, 0x148, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_HSYNC__GPIO_1_10 IOMUX_PAD(0x340, 0x148, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSI_PIXCLK__CSI_PIXCLK IOMUX_PAD(0x344, 0x14c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSI_PIXCLK__GPIO_1_11 IOMUX_PAD(0x344, 0x14c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_I2C1_CLK__I2C1_CLK IOMUX_PAD(0x348, 0x150, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_I2C1_CLK__GPIO_1_12 IOMUX_PAD(0x348, 0x150, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_I2C1_DAT__I2C1_DAT IOMUX_PAD(0x34c, 0x154, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_I2C1_DAT__GPIO_1_13 IOMUX_PAD(0x34c, 0x154, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSPI1_MOSI__CSPI1_MOSI IOMUX_PAD(0x350, 0x158, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSPI1_MOSI__GPIO_1_14 IOMUX_PAD(0x350, 0x158, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSPI1_MISO__CSPI1_MISO IOMUX_PAD(0x354, 0x15c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSPI1_MISO__GPIO_1_15 IOMUX_PAD(0x354, 0x15c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSPI1_SS0__CSPI1_SS0 IOMUX_PAD(0x358, 0x160, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSPI1_SS0__GPIO_1_16 IOMUX_PAD(0x358, 0x160, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSPI1_SS1__CSPI1_SS1 IOMUX_PAD(0x35c, 0x164, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSPI1_SS1__GPIO_1_17 IOMUX_PAD(0x35c, 0x164, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSPI1_SCLK__CSPI1_SCLK IOMUX_PAD(0x360, 0x168, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CSPI1_SCLK__GPIO_1_18 IOMUX_PAD(0x360, 0x168, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CSPI1_RDY__CSPI1_RDY IOMUX_PAD(0x364, 0x16c, 0x10, 0, 0, PAD_CTL_PKE)
-#define MX25_PAD_CSPI1_RDY__GPIO_2_22 IOMUX_PAD(0x364, 0x16c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_UART1_RXD__UART1_RXD IOMUX_PAD(0x368, 0x170, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN)
-#define MX25_PAD_UART1_RXD__GPIO_4_22 IOMUX_PAD(0x368, 0x170, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_UART1_TXD__UART1_TXD IOMUX_PAD(0x36c, 0x174, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_UART1_TXD__GPIO_4_23 IOMUX_PAD(0x36c, 0x174, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_UART1_RTS__UART1_RTS IOMUX_PAD(0x370, 0x178, 0x10, 0, 0, PAD_CTL_PUS_100K_UP)
-#define MX25_PAD_UART1_RTS__CSI_D0 IOMUX_PAD(0x370, 0x178, 0x11, 0x488, 1, NO_PAD_CTRL)
-#define MX25_PAD_UART1_RTS__GPIO_4_24 IOMUX_PAD(0x370, 0x178, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_UART1_CTS__UART1_CTS IOMUX_PAD(0x374, 0x17c, 0x10, 0, 0, PAD_CTL_PUS_100K_UP)
-#define MX25_PAD_UART1_CTS__CSI_D1 IOMUX_PAD(0x374, 0x17c, 0x11, 0x48c, 1, NO_PAD_CTRL)
-#define MX25_PAD_UART1_CTS__GPIO_4_25 IOMUX_PAD(0x374, 0x17c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_UART2_RXD__UART2_RXD IOMUX_PAD(0x378, 0x180, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_UART2_RXD__GPIO_4_26 IOMUX_PAD(0x378, 0x180, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_UART2_TXD__UART2_TXD IOMUX_PAD(0x37c, 0x184, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_UART2_TXD__GPIO_4_27 IOMUX_PAD(0x37c, 0x184, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_UART2_RTS__UART2_RTS IOMUX_PAD(0x380, 0x188, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_UART2_RTS__FEC_COL IOMUX_PAD(0x380, 0x188, 0x12, 0x504, 2, NO_PAD_CTRL)
-#define MX25_PAD_UART2_RTS__GPIO_4_28 IOMUX_PAD(0x380, 0x188, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_UART2_CTS__FEC_RX_ER IOMUX_PAD(0x384, 0x18c, 0x12, 0x518, 2, NO_PAD_CTRL)
-#define MX25_PAD_UART2_CTS__UART2_CTS IOMUX_PAD(0x384, 0x18c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_UART2_CTS__GPIO_4_29 IOMUX_PAD(0x384, 0x18c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_SD1_CMD__SD1_CMD IOMUX_PAD(0x388, 0x190, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_CMD__FEC_RDATA2 IOMUX_PAD(0x388, 0x190, 0x12, 0x50c, 2, NO_PAD_CTRL)
-#define MX25_PAD_SD1_CMD__GPIO_2_23 IOMUX_PAD(0x388, 0x190, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_SD1_CLK__SD1_CLK IOMUX_PAD(0x38c, 0x194, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_CLK__FEC_RDATA3 IOMUX_PAD(0x38c, 0x194, 0x12, 0x510, 2, NO_PAD_CTRL)
-#define MX25_PAD_SD1_CLK__GPIO_2_24 IOMUX_PAD(0x38c, 0x194, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_SD1_DATA0__SD1_DATA0 IOMUX_PAD(0x390, 0x198, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_DATA0__GPIO_2_25 IOMUX_PAD(0x390, 0x198, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_SD1_DATA1__SD1_DATA1 IOMUX_PAD(0x394, 0x19c, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_DATA1__AUD7_RXD IOMUX_PAD(0x394, 0x19c, 0x13, 0x478, 0, NO_PAD_CTRL)
-#define MX25_PAD_SD1_DATA1__GPIO_2_26 IOMUX_PAD(0x394, 0x19c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_SD1_DATA2__SD1_DATA2 IOMUX_PAD(0x398, 0x1a0, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_DATA2__FEC_RX_CLK IOMUX_PAD(0x398, 0x1a0, 0x15, 0x514, 2, NO_PAD_CTRL)
-#define MX25_PAD_SD1_DATA2__GPIO_2_27 IOMUX_PAD(0x398, 0x1a0, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_SD1_DATA3__SD1_DATA3 IOMUX_PAD(0x39c, 0x1a4, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_DATA3__FEC_CRS IOMUX_PAD(0x39c, 0x1a4, 0x10, 0x508, 2, NO_PAD_CTRL)
-#define MX25_PAD_SD1_DATA3__GPIO_2_28 IOMUX_PAD(0x39c, 0x1a4, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define KPP_CTL_ROW (PAD_CTL_PKE | PAD_CTL_PUE | PAD_CTL_PUS_100K_UP)
-#define KPP_CTL_COL (PAD_CTL_PKE | PAD_CTL_PUE | PAD_CTL_PUS_100K_UP | PAD_CTL_ODE)
-
-#define MX25_PAD_KPP_ROW0__KPP_ROW0 IOMUX_PAD(0x3a0, 0x1a8, 0x10, 0, 0, KPP_CTL_ROW)
-#define MX25_PAD_KPP_ROW0__GPIO_2_29 IOMUX_PAD(0x3a0, 0x1a8, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_KPP_ROW1__KPP_ROW1 IOMUX_PAD(0x3a4, 0x1ac, 0x10, 0, 0, KPP_CTL_ROW)
-#define MX25_PAD_KPP_ROW1__GPIO_2_30 IOMUX_PAD(0x3a4, 0x1ac, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_KPP_ROW2__KPP_ROW2 IOMUX_PAD(0x3a8, 0x1b0, 0x10, 0, 0, KPP_CTL_ROW)
-#define MX25_PAD_KPP_ROW2__CSI_D0 IOMUX_PAD(0x3a8, 0x1b0, 0x13, 0x488, 2, NO_PAD_CTRL)
-#define MX25_PAD_KPP_ROW2__GPIO_2_31 IOMUX_PAD(0x3a8, 0x1b0, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_KPP_ROW3__KPP_ROW3 IOMUX_PAD(0x3ac, 0x1b4, 0x10, 0, 0, KPP_CTL_ROW)
-#define MX25_PAD_KPP_ROW3__CSI_LD1 IOMUX_PAD(0x3ac, 0x1b4, 0x13, 0x48c, 2, NO_PAD_CTRL)
-#define MX25_PAD_KPP_ROW3__GPIO_3_0 IOMUX_PAD(0x3ac, 0x1b4, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_KPP_COL0__KPP_COL0 IOMUX_PAD(0x3b0, 0x1b8, 0x10, 0, 0, KPP_CTL_COL)
-#define MX25_PAD_KPP_COL0__UART4_RXD_MUX IOMUX_PAD(0x3b0, 0x1b8, 0x11, 0x570, 1, NO_PAD_CTRL)
-#define MX25_PAD_KPP_COL0__AUD5_TXD IOMUX_PAD(0x3b0, 0x1b8, 0x12, 0, 0, PAD_CTL_PKE | PAD_CTL_PUS_100K_UP)
-#define MX25_PAD_KPP_COL0__GPIO_3_1 IOMUX_PAD(0x3b0, 0x1b8, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_KPP_COL1__KPP_COL1 IOMUX_PAD(0x3b4, 0x1bc, 0x10, 0, 0, KPP_CTL_COL)
-#define MX25_PAD_KPP_COL1__UART4_TXD_MUX IOMUX_PAD(0x3b4, 0x1bc, 0x11, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_KPP_COL1__AUD5_RXD IOMUX_PAD(0x3b4, 0x1bc, 0x12, 0, 0, PAD_CTL_PKE | PAD_CTL_PUS_100K_UP)
-#define MX25_PAD_KPP_COL1__GPIO_3_2 IOMUX_PAD(0x3b4, 0x1bc, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_KPP_COL2__KPP_COL2 IOMUX_PAD(0x3b8, 0x1c0, 0x10, 0, 0, KPP_CTL_COL)
-#define MX25_PAD_KPP_COL2__UART4_RTS IOMUX_PAD(0x3b8, 0x1c0, 0x11, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_KPP_COL2__AUD5_TXC IOMUX_PAD(0x3b8, 0x1c0, 0x12, 0, 0, PAD_CTL_PKE | PAD_CTL_PUS_100K_UP)
-#define MX25_PAD_KPP_COL2__GPIO_3_3 IOMUX_PAD(0x3b8, 0x1c0, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_KPP_COL3__KPP_COL3 IOMUX_PAD(0x3bc, 0x1c4, 0x10, 0, 0, KPP_CTL_COL)
-#define MX25_PAD_KPP_COL3__UART4_CTS IOMUX_PAD(0x3bc, 0x1c4, 0x11, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_KPP_COL3__AUD5_TXFS IOMUX_PAD(0x3bc, 0x1c4, 0x12, 0, 0, PAD_CTL_PKE | PAD_CTL_PUS_100K_UP)
-#define MX25_PAD_KPP_COL3__GPIO_3_4 IOMUX_PAD(0x3bc, 0x1c4, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_FEC_MDC__FEC_MDC IOMUX_PAD(0x3c0, 0x1c8, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_MDC__AUD4_TXD IOMUX_PAD(0x3c0, 0x1c8, 0x12, 0x464, 1, NO_PAD_CTRL)
-#define MX25_PAD_FEC_MDC__GPIO_3_5 IOMUX_PAD(0x3c0, 0x1c8, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_FEC_MDIO__FEC_MDIO IOMUX_PAD(0x3c4, 0x1cc, 0x10, 0, 0, PAD_CTL_HYS | PAD_CTL_PUS_22K_UP)
-#define MX25_PAD_FEC_MDIO__AUD4_RXD IOMUX_PAD(0x3c4, 0x1cc, 0x12, 0x460, 1, NO_PAD_CTRL)
-#define MX25_PAD_FEC_MDIO__GPIO_3_6 IOMUX_PAD(0x3c4, 0x1cc, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_FEC_TDATA0__FEC_TDATA0 IOMUX_PAD(0x3c8, 0x1d0, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_TDATA0__GPIO_3_7 IOMUX_PAD(0x3c8, 0x1d0, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_FEC_TDATA1__FEC_TDATA1 IOMUX_PAD(0x3cc, 0x1d4, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_TDATA1__AUD4_TXFS IOMUX_PAD(0x3cc, 0x1d4, 0x12, 0x474, 1, NO_PAD_CTRL)
-#define MX25_PAD_FEC_TDATA1__GPIO_3_8 IOMUX_PAD(0x3cc, 0x1d4, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_FEC_TX_EN__FEC_TX_EN IOMUX_PAD(0x3d0, 0x1d8, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_TX_EN__GPIO_3_9 IOMUX_PAD(0x3d0, 0x1d8, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_FEC_RDATA0__FEC_RDATA0 IOMUX_PAD(0x3d4, 0x1dc, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTRL)
-#define MX25_PAD_FEC_RDATA0__GPIO_3_10 IOMUX_PAD(0x3d4, 0x1dc, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_FEC_RDATA1__FEC_RDATA1 IOMUX_PAD(0x3d8, 0x1e0, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTRL)
-#define MX25_PAD_FEC_RDATA1__GPIO_3_11 IOMUX_PAD(0x3d8, 0x1e0, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_FEC_RX_DV__FEC_RX_DV IOMUX_PAD(0x3dc, 0x1e4, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTRL)
-#define MX25_PAD_FEC_RX_DV__CAN2_RX IOMUX_PAD(0x3dc, 0x1e4, 0x14, 0x484, 0, PAD_CTL_PUS_22K_UP)
-#define MX25_PAD_FEC_RX_DV__GPIO_3_12 IOMUX_PAD(0x3dc, 0x1e4, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_FEC_TX_CLK__FEC_TX_CLK IOMUX_PAD(0x3e0, 0x1e8, 0x10, 0, 0, PAD_CTL_HYS | PAD_CTL_PUS_100K_DOWN)
-#define MX25_PAD_FEC_TX_CLK__GPIO_3_13 IOMUX_PAD(0x3e0, 0x1e8, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_RTCK__RTCK IOMUX_PAD(0x3e4, 0x1ec, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_RTCK__OWIRE IOMUX_PAD(0x3e4, 0x1ec, 0x11, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_RTCK__GPIO_3_14 IOMUX_PAD(0x3e4, 0x1ec, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_DE_B__DE_B IOMUX_PAD(0x3ec, 0x1f0, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_DE_B__GPIO_2_20 IOMUX_PAD(0x3ec, 0x1f0, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_TDO__TDO IOMUX_PAD(0x3e8, 0x000, 0x00, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_GPIO_A__GPIO_A IOMUX_PAD(0x3f0, 0x1f4, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_GPIO_A__CAN1_TX IOMUX_PAD(0x3f0, 0x1f4, 0x16, 0, 0, PAD_CTL_PUS_22K_UP)
-#define MX25_PAD_GPIO_A__USBOTG_PWR IOMUX_PAD(0x3f0, 0x1f4, 0x12, 0, 0, PAD_CTL_PKE)
-
-#define MX25_PAD_GPIO_B__GPIO_B IOMUX_PAD(0x3f4, 0x1f8, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_GPIO_B__CAN1_RX IOMUX_PAD(0x3f4, 0x1f8, 0x16, 0x480, 1, PAD_CTL_PUS_22K_UP)
-#define MX25_PAD_GPIO_B__USBOTG_OC IOMUX_PAD(0x3f4, 0x1f8, 0x12, 0x57c, 1, PAD_CTL_PUS_100K_UP)
-
-#define MX25_PAD_GPIO_C__GPIO_C IOMUX_PAD(0x3f8, 0x1fc, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_GPIO_C__CAN2_TX IOMUX_PAD(0x3f8, 0x1fc, 0x16, 0, 0, PAD_CTL_PUS_22K_UP)
-
-#define MX25_PAD_GPIO_D__GPIO_D IOMUX_PAD(0x3fc, 0x200, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_GPIO_E__LD16 IOMUX_PAD(0x400, 0x204, 0x02, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_GPIO_D__CAN2_RX IOMUX_PAD(0x3fc, 0x200, 0x16, 0x484, 1, PAD_CTL_PUS_22K_UP)
-
-#define MX25_PAD_GPIO_E__GPIO_E IOMUX_PAD(0x400, 0x204, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_GPIO_F__LD17 IOMUX_PAD(0x404, 0x208, 0x02, 0, 0, PAD_CTL_SRE_FAST)
-#define MX25_PAD_GPIO_E__AUD7_TXD IOMUX_PAD(0x400, 0x204, 0x14, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_GPIO_F__GPIO_F IOMUX_PAD(0x404, 0x208, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_GPIO_F__AUD7_TXC IOMUX_PAD(0x404, 0x208, 0x14, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_EXT_ARMCLK__EXT_ARMCLK IOMUX_PAD(0x000, 0x20c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_EXT_ARMCLK__GPIO_3_15 IOMUX_PAD(0x000, 0x20c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_UPLL_BYPCLK__UPLL_BYPCLK IOMUX_PAD(0x000, 0x210, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_UPLL_BYPCLK__GPIO_3_16 IOMUX_PAD(0x000, 0x210, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_VSTBY_REQ__VSTBY_REQ IOMUX_PAD(0x408, 0x214, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_VSTBY_REQ__AUD7_TXFS IOMUX_PAD(0x408, 0x214, 0x14, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_VSTBY_REQ__GPIO_3_17 IOMUX_PAD(0x408, 0x214, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_VSTBY_ACK__VSTBY_ACK IOMUX_PAD(0x40c, 0x218, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_VSTBY_ACK__GPIO_3_18 IOMUX_PAD(0x40c, 0x218, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_POWER_FAIL__POWER_FAIL IOMUX_PAD(0x410, 0x21c, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_POWER_FAIL__AUD7_RXD IOMUX_PAD(0x410, 0x21c, 0x14, 0x478, 1, NO_PAD_CTRL)
-#define MX25_PAD_POWER_FAIL__GPIO_3_19 IOMUX_PAD(0x410, 0x21c, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CLKO__CLKO IOMUX_PAD(0x414, 0x220, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CLKO__GPIO_2_21 IOMUX_PAD(0x414, 0x220, 0x15, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_BOOT_MODE0__BOOT_MODE0 IOMUX_PAD(0x000, 0x224, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_BOOT_MODE0__GPIO_4_30 IOMUX_PAD(0x000, 0x224, 0x05, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_BOOT_MODE1__BOOT_MODE1 IOMUX_PAD(0x000, 0x228, 0x00, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_BOOT_MODE1__GPIO_4_31 IOMUX_PAD(0x000, 0x228, 0x05, 0, 0, NO_PAD_CTRL)
-
-#define MX25_PAD_CTL_GRP_DVS_MISC IOMUX_PAD(0x418, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_FEC IOMUX_PAD(0x41c, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DVS_JTAG IOMUX_PAD(0x420, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_NFC IOMUX_PAD(0x424, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_CSI IOMUX_PAD(0x428, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_WEIM IOMUX_PAD(0x42c, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_DDR IOMUX_PAD(0x430, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DVS_CRM IOMUX_PAD(0x434, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_KPP IOMUX_PAD(0x438, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_SDHC1 IOMUX_PAD(0x43c, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_LCD IOMUX_PAD(0x440, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_UART IOMUX_PAD(0x444, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DVS_NFC IOMUX_PAD(0x448, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DVS_CSI IOMUX_PAD(0x44c, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DSE_CSPI1 IOMUX_PAD(0x450, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DDRTYPE IOMUX_PAD(0x454, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DVS_SDHC1 IOMUX_PAD(0x458, 0x000, 0, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CTL_GRP_DVS_LCD IOMUX_PAD(0x45c, 0x000, 0, 0, 0, NO_PAD_CTRL)
-
-#endif /* __MACH_IOMUX_MX25_H__ */
diff --git a/arch/arm/mach-imx/iomux-mx3.h b/arch/arm/mach-imx/iomux-mx3.h
index 0a5adba61e0b..2e4a0ddca76c 100644
--- a/arch/arm/mach-imx/iomux-mx3.h
+++ b/arch/arm/mach-imx/iomux-mx3.h
@@ -114,7 +114,7 @@ enum iomux_gp_func {
*/
int mxc_iomux_alloc_pin(unsigned int pin, const char *label);
/*
- * setups mutliple pins
+ * setups multiple pins
* convenient way to call the above function with tables
*/
int mxc_iomux_setup_multiple_pins(const unsigned int *pin_list, unsigned count,
diff --git a/arch/arm/mach-imx/iomux-v3.c b/arch/arm/mach-imx/iomux-v3.c
index d61f9606fc56..a53b2e64f98d 100644
--- a/arch/arm/mach-imx/iomux-v3.c
+++ b/arch/arm/mach-imx/iomux-v3.c
@@ -56,9 +56,10 @@ int mxc_iomux_v3_setup_pad(iomux_v3_cfg_t pad)
return 0;
}
-int mxc_iomux_v3_setup_multiple_pads(iomux_v3_cfg_t *pad_list, unsigned count)
+int mxc_iomux_v3_setup_multiple_pads(const iomux_v3_cfg_t *pad_list,
+ unsigned count)
{
- iomux_v3_cfg_t *p = pad_list;
+ const iomux_v3_cfg_t *p = pad_list;
int i;
int ret;
diff --git a/arch/arm/mach-imx/iomux-v3.h b/arch/arm/mach-imx/iomux-v3.h
index 2fa3b5430102..f79e165a3b3c 100644
--- a/arch/arm/mach-imx/iomux-v3.h
+++ b/arch/arm/mach-imx/iomux-v3.h
@@ -128,10 +128,11 @@ typedef u64 iomux_v3_cfg_t;
int mxc_iomux_v3_setup_pad(iomux_v3_cfg_t pad);
/*
- * setups mutliple pads
+ * setups multiple pads
* convenient way to call the above function with tables
*/
-int mxc_iomux_v3_setup_multiple_pads(iomux_v3_cfg_t *pad_list, unsigned count);
+int mxc_iomux_v3_setup_multiple_pads(const iomux_v3_cfg_t *pad_list,
+ unsigned count);
/*
* Initialise the iomux controller
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 62a6e02f4763..922ffd6ca039 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -75,7 +75,7 @@ static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
},
};
-static iomux_v3_cfg_t eukrea_cpuimx35_pads[] = {
+static const iomux_v3_cfg_t eukrea_cpuimx35_pads[] __initconst = {
/* UART1 */
MX35_PAD_CTS1__UART1_CTS,
MX35_PAD_RTS1__UART1_RTS,
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
deleted file mode 100644
index b2ee6e009fe4..000000000000
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright 2009 Sascha Hauer, <kernel@pengutronix.de>
- * Copyright 2010 Eric Bénard - Eukréa Electromatique, <eric@eukrea.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/ulpi.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-#include <asm/memory.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "devices-imx25.h"
-#include "ehci.h"
-#include "eukrea-baseboards.h"
-#include "hardware.h"
-#include "iomux-mx25.h"
-#include "mx25.h"
-
-static const struct imxuart_platform_data uart_pdata __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
-static iomux_v3_cfg_t eukrea_cpuimx25_pads[] = {
- /* FEC - RMII */
- MX25_PAD_FEC_MDC__FEC_MDC,
- MX25_PAD_FEC_MDIO__FEC_MDIO,
- MX25_PAD_FEC_TDATA0__FEC_TDATA0,
- MX25_PAD_FEC_TDATA1__FEC_TDATA1,
- MX25_PAD_FEC_TX_EN__FEC_TX_EN,
- MX25_PAD_FEC_RDATA0__FEC_RDATA0,
- MX25_PAD_FEC_RDATA1__FEC_RDATA1,
- MX25_PAD_FEC_RX_DV__FEC_RX_DV,
- MX25_PAD_FEC_TX_CLK__FEC_TX_CLK,
- /* I2C1 */
- MX25_PAD_I2C1_CLK__I2C1_CLK,
- MX25_PAD_I2C1_DAT__I2C1_DAT,
-};
-
-static const struct fec_platform_data mx25_fec_pdata __initconst = {
- .phy = PHY_INTERFACE_MODE_RMII,
-};
-
-static const struct mxc_nand_platform_data
-eukrea_cpuimx25_nand_board_info __initconst = {
- .width = 1,
- .hw_ecc = 1,
- .flash_bbt = 1,
-};
-
-static const struct imxi2c_platform_data
-eukrea_cpuimx25_i2c0_data __initconst = {
- .bitrate = 100000,
-};
-
-static struct i2c_board_info eukrea_cpuimx25_i2c_devices[] = {
- {
- I2C_BOARD_INFO("pcf8563", 0x51),
- },
-};
-
-static int eukrea_cpuimx25_otg_init(struct platform_device *pdev)
-{
- return mx25_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
-}
-
-static const struct mxc_usbh_platform_data otg_pdata __initconst = {
- .init = eukrea_cpuimx25_otg_init,
- .portsc = MXC_EHCI_MODE_UTMI,
-};
-
-static int eukrea_cpuimx25_usbh2_init(struct platform_device *pdev)
-{
- return mx25_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_SINGLE_UNI |
- MXC_EHCI_INTERNAL_PHY | MXC_EHCI_IPPUE_DOWN);
-}
-
-static const struct mxc_usbh_platform_data usbh2_pdata __initconst = {
- .init = eukrea_cpuimx25_usbh2_init,
- .portsc = MXC_EHCI_MODE_SERIAL,
-};
-
-static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
- .operating_mode = FSL_USB2_DR_DEVICE,
- .phy_mode = FSL_USB2_PHY_UTMI,
- .workaround = FLS_USB2_WORKAROUND_ENGCM09152,
-};
-
-static bool otg_mode_host __initdata;
-
-static int __init eukrea_cpuimx25_otg_mode(char *options)
-{
- if (!strcmp(options, "host"))
- otg_mode_host = true;
- else if (!strcmp(options, "device"))
- otg_mode_host = false;
- else
- pr_info("otg_mode neither \"host\" nor \"device\". "
- "Defaulting to device\n");
- return 1;
-}
-__setup("otg_mode=", eukrea_cpuimx25_otg_mode);
-
-static void __init eukrea_cpuimx25_init(void)
-{
- imx25_soc_init();
-
- if (mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx25_pads,
- ARRAY_SIZE(eukrea_cpuimx25_pads)))
- printk(KERN_ERR "error setting cpuimx25 pads !\n");
-
- imx25_add_imx_uart0(&uart_pdata);
- imx25_add_mxc_nand(&eukrea_cpuimx25_nand_board_info);
- imx25_add_imxdi_rtc();
- imx25_add_fec(&mx25_fec_pdata);
- imx25_add_imx2_wdt();
-
- i2c_register_board_info(0, eukrea_cpuimx25_i2c_devices,
- ARRAY_SIZE(eukrea_cpuimx25_i2c_devices));
- imx25_add_imx_i2c0(&eukrea_cpuimx25_i2c0_data);
-
- if (otg_mode_host)
- imx25_add_mxc_ehci_otg(&otg_pdata);
- else
- imx25_add_fsl_usb2_udc(&otg_device_pdata);
-
- imx25_add_mxc_ehci_hs(&usbh2_pdata);
-
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
- eukrea_mbimxsd25_baseboard_init();
-#endif
-}
-
-static void __init eukrea_cpuimx25_timer_init(void)
-{
- mx25_clocks_init();
-}
-
-MACHINE_START(EUKREA_CPUIMX25SD, "Eukrea CPUIMX25")
- /* Maintainer: Eukrea Electromatique */
- .atag_offset = 0x100,
- .map_io = mx25_map_io,
- .init_early = imx25_init_early,
- .init_irq = mx25_init_irq,
- .init_time = eukrea_cpuimx25_timer_init,
- .init_machine = eukrea_cpuimx25_init,
- .restart = mxc_restart,
-MACHINE_END
diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/mach-imx25.c
index 25defbdb06c4..9379fd0a7b4d 100644
--- a/arch/arm/mach-imx/imx25-dt.c
+++ b/arch/arm/mach-imx/mach-imx25.c
@@ -10,12 +10,29 @@
*/
#include <linux/irq.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include "common.h"
-#include "mx25.h"
+#include "hardware.h"
+
+static void __init imx25_init_early(void)
+{
+ mxc_set_cpu_type(MXC_CPU_MX25);
+}
+
+static void __init mx25_init_irq(void)
+{
+ struct device_node *np;
+ void __iomem *avic_base;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,avic");
+ avic_base = of_iomap(np, 0);
+ BUG_ON(!avic_base);
+ mxc_init_irq(avic_base);
+}
static const char * const imx25_dt_board_compat[] __initconst = {
"fsl,imx25",
@@ -23,7 +40,6 @@ static const char * const imx25_dt_board_compat[] __initconst = {
};
DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
- .map_io = mx25_map_io,
.init_early = imx25_init_early,
.init_irq = mx25_init_irq,
.dt_compat = imx25_dt_board_compat,
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 9de3412af406..3ab61549ce0f 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -388,10 +388,10 @@ static void __init imx6q_map_io(void)
static void __init imx6q_init_irq(void)
{
+ imx_gpc_check_dt();
imx_init_revision_from_anatop();
imx_init_l2cache();
imx_src_init();
- imx_gpc_init();
irqchip_init();
}
diff --git a/arch/arm/mach-imx/mach-imx6sl.c b/arch/arm/mach-imx/mach-imx6sl.c
index 24bfaaf944c8..12a1b098fc6a 100644
--- a/arch/arm/mach-imx/mach-imx6sl.c
+++ b/arch/arm/mach-imx/mach-imx6sl.c
@@ -61,10 +61,10 @@ static void __init imx6sl_init_machine(void)
static void __init imx6sl_init_irq(void)
{
+ imx_gpc_check_dt();
imx_init_revision_from_anatop();
imx_init_l2cache();
imx_src_init();
- imx_gpc_init();
irqchip_init();
}
diff --git a/arch/arm/mach-imx/mach-imx6sx.c b/arch/arm/mach-imx/mach-imx6sx.c
index 66988eb6a3a4..f17b7004c24b 100644
--- a/arch/arm/mach-imx/mach-imx6sx.c
+++ b/arch/arm/mach-imx/mach-imx6sx.c
@@ -81,10 +81,10 @@ static void __init imx6sx_init_machine(void)
static void __init imx6sx_init_irq(void)
{
+ imx_gpc_check_dt();
imx_init_revision_from_anatop();
imx_init_l2cache();
imx_src_init();
- imx_gpc_init();
irqchip_init();
}
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
deleted file mode 100644
index 0d01e367b062..000000000000
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright 2009 Sascha Hauer, <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- */
-
-/*
- * This machine is known as:
- * - i.MX25 3-Stack Development System
- * - i.MX25 Platform Development Kit (i.MX25 PDK)
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/usb/otg.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-#include <asm/memory.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "devices-imx25.h"
-#include "ehci.h"
-#include "hardware.h"
-#include "iomux-mx25.h"
-#include "mx25.h"
-
-#define MX25PDK_CAN_PWDN IMX_GPIO_NR(4, 6)
-
-static const struct imxuart_platform_data uart_pdata __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
-static iomux_v3_cfg_t mx25pdk_pads[] = {
- MX25_PAD_FEC_MDC__FEC_MDC,
- MX25_PAD_FEC_MDIO__FEC_MDIO,
- MX25_PAD_FEC_TDATA0__FEC_TDATA0,
- MX25_PAD_FEC_TDATA1__FEC_TDATA1,
- MX25_PAD_FEC_TX_EN__FEC_TX_EN,
- MX25_PAD_FEC_RDATA0__FEC_RDATA0,
- MX25_PAD_FEC_RDATA1__FEC_RDATA1,
- MX25_PAD_FEC_RX_DV__FEC_RX_DV,
- MX25_PAD_FEC_TX_CLK__FEC_TX_CLK,
- MX25_PAD_A17__GPIO_2_3, /* FEC_EN, GPIO 35 */
- MX25_PAD_D12__GPIO_4_8, /* FEC_RESET_B, GPIO 104 */
-
- /* LCD */
- MX25_PAD_LD0__LD0,
- MX25_PAD_LD1__LD1,
- MX25_PAD_LD2__LD2,
- MX25_PAD_LD3__LD3,
- MX25_PAD_LD4__LD4,
- MX25_PAD_LD5__LD5,
- MX25_PAD_LD6__LD6,
- MX25_PAD_LD7__LD7,
- MX25_PAD_LD8__LD8,
- MX25_PAD_LD9__LD9,
- MX25_PAD_LD10__LD10,
- MX25_PAD_LD11__LD11,
- MX25_PAD_LD12__LD12,
- MX25_PAD_LD13__LD13,
- MX25_PAD_LD14__LD14,
- MX25_PAD_LD15__LD15,
- MX25_PAD_GPIO_E__LD16,
- MX25_PAD_GPIO_F__LD17,
- MX25_PAD_HSYNC__HSYNC,
- MX25_PAD_VSYNC__VSYNC,
- MX25_PAD_LSCLK__LSCLK,
- MX25_PAD_OE_ACD__OE_ACD,
- MX25_PAD_CONTRAST__CONTRAST,
-
- /* Keypad */
- MX25_PAD_KPP_ROW0__KPP_ROW0,
- MX25_PAD_KPP_ROW1__KPP_ROW1,
- MX25_PAD_KPP_ROW2__KPP_ROW2,
- MX25_PAD_KPP_ROW3__KPP_ROW3,
- MX25_PAD_KPP_COL0__KPP_COL0,
- MX25_PAD_KPP_COL1__KPP_COL1,
- MX25_PAD_KPP_COL2__KPP_COL2,
- MX25_PAD_KPP_COL3__KPP_COL3,
-
- /* SD1 */
- MX25_PAD_SD1_CMD__SD1_CMD,
- MX25_PAD_SD1_CLK__SD1_CLK,
- MX25_PAD_SD1_DATA0__SD1_DATA0,
- MX25_PAD_SD1_DATA1__SD1_DATA1,
- MX25_PAD_SD1_DATA2__SD1_DATA2,
- MX25_PAD_SD1_DATA3__SD1_DATA3,
- MX25_PAD_A14__GPIO_2_0, /* WriteProtect */
- MX25_PAD_A15__GPIO_2_1, /* CardDetect */
-
- /* I2C1 */
- MX25_PAD_I2C1_CLK__I2C1_CLK,
- MX25_PAD_I2C1_DAT__I2C1_DAT,
-
- /* CAN1 */
- MX25_PAD_GPIO_A__CAN1_TX,
- MX25_PAD_GPIO_B__CAN1_RX,
- MX25_PAD_D14__GPIO_4_6, /* CAN_PWDN */
-};
-
-static const struct fec_platform_data mx25_fec_pdata __initconst = {
- .phy = PHY_INTERFACE_MODE_RMII,
-};
-
-#define FEC_ENABLE_GPIO IMX_GPIO_NR(2, 3)
-#define FEC_RESET_B_GPIO IMX_GPIO_NR(4, 8)
-
-static void __init mx25pdk_fec_reset(void)
-{
- gpio_request(FEC_ENABLE_GPIO, "FEC PHY enable");
- gpio_request(FEC_RESET_B_GPIO, "FEC PHY reset");
-
- gpio_direction_output(FEC_ENABLE_GPIO, 0); /* drop PHY power */
- gpio_direction_output(FEC_RESET_B_GPIO, 0); /* assert reset */
- udelay(2);
-
- /* turn on PHY power and lift reset */
- gpio_set_value(FEC_ENABLE_GPIO, 1);
- gpio_set_value(FEC_RESET_B_GPIO, 1);
-}
-
-static const struct mxc_nand_platform_data
-mx25pdk_nand_board_info __initconst = {
- .width = 1,
- .hw_ecc = 1,
- .flash_bbt = 1,
-};
-
-static struct imx_fb_videomode mx25pdk_modes[] = {
- {
- .mode = {
- .name = "CRT-VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 39683,
- .left_margin = 45,
- .right_margin = 114,
- .upper_margin = 33,
- .lower_margin = 11,
- .hsync_len = 1,
- .vsync_len = 1,
- },
- .bpp = 16,
- .pcr = 0xFA208B80,
- },
-};
-
-static const struct imx_fb_platform_data mx25pdk_fb_pdata __initconst = {
- .mode = mx25pdk_modes,
- .num_modes = ARRAY_SIZE(mx25pdk_modes),
- .pwmr = 0x00A903FF,
- .lscr1 = 0x00120300,
- .dmacr = 0x00020010,
-};
-
-static const uint32_t mx25pdk_keymap[] = {
- KEY(0, 0, KEY_UP),
- KEY(0, 1, KEY_DOWN),
- KEY(0, 2, KEY_VOLUMEDOWN),
- KEY(0, 3, KEY_HOME),
- KEY(1, 0, KEY_RIGHT),
- KEY(1, 1, KEY_LEFT),
- KEY(1, 2, KEY_ENTER),
- KEY(1, 3, KEY_VOLUMEUP),
- KEY(2, 0, KEY_F6),
- KEY(2, 1, KEY_F8),
- KEY(2, 2, KEY_F9),
- KEY(2, 3, KEY_F10),
- KEY(3, 0, KEY_F1),
- KEY(3, 1, KEY_F2),
- KEY(3, 2, KEY_F3),
- KEY(3, 3, KEY_POWER),
-};
-
-static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = {
- .keymap = mx25pdk_keymap,
- .keymap_size = ARRAY_SIZE(mx25pdk_keymap),
-};
-
-static int mx25pdk_usbh2_init(struct platform_device *pdev)
-{
- return mx25_initialize_usb_hw(pdev->id, MXC_EHCI_INTERNAL_PHY);
-}
-
-static const struct mxc_usbh_platform_data usbh2_pdata __initconst = {
- .init = mx25pdk_usbh2_init,
- .portsc = MXC_EHCI_MODE_SERIAL,
-};
-
-static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
- .operating_mode = FSL_USB2_DR_DEVICE,
- .phy_mode = FSL_USB2_PHY_UTMI,
-};
-
-static const struct imxi2c_platform_data mx25_3ds_i2c0_data __initconst = {
- .bitrate = 100000,
-};
-
-#define SD1_GPIO_WP IMX_GPIO_NR(2, 0)
-#define SD1_GPIO_CD IMX_GPIO_NR(2, 1)
-
-static const struct esdhc_platform_data mx25pdk_esdhc_pdata __initconst = {
- .wp_gpio = SD1_GPIO_WP,
- .cd_gpio = SD1_GPIO_CD,
- .wp_type = ESDHC_WP_GPIO,
- .cd_type = ESDHC_CD_GPIO,
-};
-
-static void __init mx25pdk_init(void)
-{
- imx25_soc_init();
-
- mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads,
- ARRAY_SIZE(mx25pdk_pads));
-
- imx25_add_imx_uart0(&uart_pdata);
- imx25_add_fsl_usb2_udc(&otg_device_pdata);
- imx25_add_mxc_ehci_hs(&usbh2_pdata);
- imx25_add_mxc_nand(&mx25pdk_nand_board_info);
- imx25_add_imxdi_rtc();
- imx25_add_imx_fb(&mx25pdk_fb_pdata);
- imx25_add_imx2_wdt();
-
- mx25pdk_fec_reset();
- imx25_add_fec(&mx25_fec_pdata);
- imx25_add_imx_keypad(&mx25pdk_keymap_data);
-
- imx25_add_sdhci_esdhc_imx(0, &mx25pdk_esdhc_pdata);
- imx25_add_imx_i2c0(&mx25_3ds_i2c0_data);
-
- gpio_request_one(MX25PDK_CAN_PWDN, GPIOF_OUT_INIT_LOW, "can-pwdn");
- imx25_add_flexcan0();
-}
-
-static void __init mx25pdk_timer_init(void)
-{
- mx25_clocks_init();
-}
-
-MACHINE_START(MX25_3DS, "Freescale MX25PDK (3DS)")
- /* Maintainer: Freescale Semiconductor, Inc. */
- .atag_offset = 0x100,
- .map_io = mx25_map_io,
- .init_early = imx25_init_early,
- .init_irq = mx25_init_irq,
- .init_time = mx25pdk_timer_init,
- .init_machine = mx25pdk_init,
- .restart = mxc_restart,
-MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 72cd77d21f63..7e315f00648d 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -166,7 +166,7 @@ static struct platform_device *devices[] __initdata = {
&mx35pdk_flash,
};
-static iomux_v3_cfg_t mx35pdk_pads[] = {
+static const iomux_v3_cfg_t mx35pdk_pads[] __initconst = {
/* UART1 */
MX35_PAD_CTS1__UART1_CTS,
MX35_PAD_RTS1__UART1_RTS,
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index b623bcaca76c..e447e59c0604 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -129,7 +129,7 @@ static struct platform_device *devices[] __initdata = {
&pcm043_flash,
};
-static iomux_v3_cfg_t pcm043_pads[] = {
+static const iomux_v3_cfg_t pcm043_pads[] __initconst = {
/* UART1 */
MX35_PAD_CTS1__UART1_CTS,
MX35_PAD_RTS1__UART1_RTS,
diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c
index 97836e94451c..27a8f7e3ec08 100644
--- a/arch/arm/mach-imx/mach-vpr200.c
+++ b/arch/arm/mach-imx/mach-vpr200.c
@@ -161,7 +161,7 @@ static struct i2c_board_info vpr200_i2c_devices[] = {
}
};
-static iomux_v3_cfg_t vpr200_pads[] = {
+static const iomux_v3_cfg_t vpr200_pads[] __initconst = {
/* UART1 */
MX35_PAD_TXD1__UART1_TXD_MUX,
MX35_PAD_RXD1__UART1_RXD_MUX,
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c
deleted file mode 100644
index 5211f62c624e..000000000000
--- a/arch/arm/mach-imx/mm-imx25.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 1999,2000 Arm Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- * Copyright (C) 2002 Shane Nay (shane@minirl.com)
- * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * - add MX31 specific definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/pinctrl/machine.h>
-
-#include <asm/pgtable.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "devices/devices-common.h"
-#include "hardware.h"
-#include "iomux-v3.h"
-#include "mx25.h"
-
-/*
- * This table defines static virtual address mappings for I/O regions.
- * These are the mappings common across all MX25 boards.
- */
-static struct map_desc mx25_io_desc[] __initdata = {
- imx_map_entry(MX25, AVIC, MT_DEVICE_NONSHARED),
- imx_map_entry(MX25, AIPS1, MT_DEVICE_NONSHARED),
- imx_map_entry(MX25, AIPS2, MT_DEVICE_NONSHARED),
-};
-
-/*
- * This function initializes the memory map. It is called during the
- * system startup to create static physical to virtual memory mappings
- * for the IO modules.
- */
-void __init mx25_map_io(void)
-{
- iotable_init(mx25_io_desc, ARRAY_SIZE(mx25_io_desc));
-}
-
-void __init imx25_init_early(void)
-{
- mxc_set_cpu_type(MXC_CPU_MX25);
- mxc_iomux_v3_init(MX25_IO_ADDRESS(MX25_IOMUXC_BASE_ADDR));
-}
-
-void __init mx25_init_irq(void)
-{
- mxc_init_irq(MX25_IO_ADDRESS(MX25_AVIC_BASE_ADDR));
-}
-
-static struct sdma_platform_data imx25_sdma_pdata __initdata = {
- .fw_name = "sdma-imx25.bin",
-};
-
-static const struct resource imx25_audmux_res[] __initconst = {
- DEFINE_RES_MEM(MX25_AUDMUX_BASE_ADDR, SZ_16K),
-};
-
-void __init imx25_soc_init(void)
-{
- mxc_arch_reset_init(MX25_IO_ADDRESS(MX25_WDOG_BASE_ADDR));
- mxc_device_init();
-
- /* i.mx25 has the i.mx35 type gpio */
- mxc_register_gpio("imx35-gpio", 0, MX25_GPIO1_BASE_ADDR, SZ_16K, MX25_INT_GPIO1, 0);
- mxc_register_gpio("imx35-gpio", 1, MX25_GPIO2_BASE_ADDR, SZ_16K, MX25_INT_GPIO2, 0);
- mxc_register_gpio("imx35-gpio", 2, MX25_GPIO3_BASE_ADDR, SZ_16K, MX25_INT_GPIO3, 0);
- mxc_register_gpio("imx35-gpio", 3, MX25_GPIO4_BASE_ADDR, SZ_16K, MX25_INT_GPIO4, 0);
-
- pinctrl_provide_dummies();
- /* i.mx25 has the i.mx35 type sdma */
- imx_add_imx_sdma("imx35-sdma", MX25_SDMA_BASE_ADDR, MX25_INT_SDMA, &imx25_sdma_pdata);
- /* i.mx25 has the i.mx31 type audmux */
- platform_device_register_simple("imx31-audmux", 0, imx25_audmux_res,
- ARRAY_SIZE(imx25_audmux_res));
-}
diff --git a/arch/arm/mach-imx/mx25.h b/arch/arm/mach-imx/mx25.h
deleted file mode 100644
index ec466400a200..000000000000
--- a/arch/arm/mach-imx/mx25.h
+++ /dev/null
@@ -1,117 +0,0 @@
-#ifndef __MACH_MX25_H__
-#define __MACH_MX25_H__
-
-#define MX25_AIPS1_BASE_ADDR 0x43f00000
-#define MX25_AIPS1_SIZE SZ_1M
-#define MX25_AIPS2_BASE_ADDR 0x53f00000
-#define MX25_AIPS2_SIZE SZ_1M
-#define MX25_AVIC_BASE_ADDR 0x68000000
-#define MX25_AVIC_SIZE SZ_1M
-
-#define MX25_I2C1_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0x80000)
-#define MX25_I2C3_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0x84000)
-#define MX25_CAN1_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0x88000)
-#define MX25_CAN2_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0x8c000)
-#define MX25_I2C2_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0x98000)
-#define MX25_CSPI1_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0xa4000)
-#define MX25_IOMUXC_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0xac000)
-
-#define MX25_CRM_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0x80000)
-#define MX25_GPT1_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0x90000)
-#define MX25_GPIO4_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0x9c000)
-#define MX25_PWM2_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0xa0000)
-#define MX25_GPIO3_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0xa4000)
-#define MX25_PWM3_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0xa8000)
-#define MX25_PWM4_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0xc8000)
-#define MX25_GPIO1_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0xcc000)
-#define MX25_GPIO2_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0xd0000)
-#define MX25_WDOG_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0xdc000)
-#define MX25_PWM1_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0xe0000)
-
-#define MX25_UART1_BASE_ADDR 0x43f90000
-#define MX25_UART2_BASE_ADDR 0x43f94000
-#define MX25_AUDMUX_BASE_ADDR 0x43fb0000
-#define MX25_UART3_BASE_ADDR 0x5000c000
-#define MX25_UART4_BASE_ADDR 0x50008000
-#define MX25_UART5_BASE_ADDR 0x5002c000
-
-#define MX25_CSPI3_BASE_ADDR 0x50004000
-#define MX25_CSPI2_BASE_ADDR 0x50010000
-#define MX25_FEC_BASE_ADDR 0x50038000
-#define MX25_SSI2_BASE_ADDR 0x50014000
-#define MX25_SSI1_BASE_ADDR 0x50034000
-#define MX25_NFC_BASE_ADDR 0xbb000000
-#define MX25_IIM_BASE_ADDR 0x53ff0000
-#define MX25_DRYICE_BASE_ADDR 0x53ffc000
-#define MX25_ESDHC1_BASE_ADDR 0x53fb4000
-#define MX25_ESDHC2_BASE_ADDR 0x53fb8000
-#define MX25_LCDC_BASE_ADDR 0x53fbc000
-#define MX25_KPP_BASE_ADDR 0x43fa8000
-#define MX25_SDMA_BASE_ADDR 0x53fd4000
-#define MX25_USB_BASE_ADDR 0x53ff4000
-#define MX25_USB_OTG_BASE_ADDR (MX25_USB_BASE_ADDR + 0x0000)
-/*
- * The reference manual (IMX25RM, Rev. 1, 06/2009) specifies an offset of 0x200
- * for the host controller. Early documentation drafts specified 0x400 and
- * Freescale internal sources confirm only the latter value to work.
- */
-#define MX25_USB_HS_BASE_ADDR (MX25_USB_BASE_ADDR + 0x0400)
-#define MX25_CSI_BASE_ADDR 0x53ff8000
-
-#define MX25_IO_P2V(x) IMX_IO_P2V(x)
-#define MX25_IO_ADDRESS(x) IOMEM(MX25_IO_P2V(x))
-
-/*
- * Interrupt numbers
- */
-#include <asm/irq.h>
-#define MX25_INT_CSPI3 (NR_IRQS_LEGACY + 0)
-#define MX25_INT_I2C1 (NR_IRQS_LEGACY + 3)
-#define MX25_INT_I2C2 (NR_IRQS_LEGACY + 4)
-#define MX25_INT_UART4 (NR_IRQS_LEGACY + 5)
-#define MX25_INT_ESDHC2 (NR_IRQS_LEGACY + 8)
-#define MX25_INT_ESDHC1 (NR_IRQS_LEGACY + 9)
-#define MX25_INT_I2C3 (NR_IRQS_LEGACY + 10)
-#define MX25_INT_SSI2 (NR_IRQS_LEGACY + 11)
-#define MX25_INT_SSI1 (NR_IRQS_LEGACY + 12)
-#define MX25_INT_CSPI2 (NR_IRQS_LEGACY + 13)
-#define MX25_INT_CSPI1 (NR_IRQS_LEGACY + 14)
-#define MX25_INT_GPIO3 (NR_IRQS_LEGACY + 16)
-#define MX25_INT_CSI (NR_IRQS_LEGACY + 17)
-#define MX25_INT_UART3 (NR_IRQS_LEGACY + 18)
-#define MX25_INT_GPIO4 (NR_IRQS_LEGACY + 23)
-#define MX25_INT_KPP (NR_IRQS_LEGACY + 24)
-#define MX25_INT_DRYICE (NR_IRQS_LEGACY + 25)
-#define MX25_INT_PWM1 (NR_IRQS_LEGACY + 26)
-#define MX25_INT_UART2 (NR_IRQS_LEGACY + 32)
-#define MX25_INT_NFC (NR_IRQS_LEGACY + 33)
-#define MX25_INT_SDMA (NR_IRQS_LEGACY + 34)
-#define MX25_INT_USB_HS (NR_IRQS_LEGACY + 35)
-#define MX25_INT_PWM2 (NR_IRQS_LEGACY + 36)
-#define MX25_INT_USB_OTG (NR_IRQS_LEGACY + 37)
-#define MX25_INT_LCDC (NR_IRQS_LEGACY + 39)
-#define MX25_INT_UART5 (NR_IRQS_LEGACY + 40)
-#define MX25_INT_PWM3 (NR_IRQS_LEGACY + 41)
-#define MX25_INT_PWM4 (NR_IRQS_LEGACY + 42)
-#define MX25_INT_CAN1 (NR_IRQS_LEGACY + 43)
-#define MX25_INT_CAN2 (NR_IRQS_LEGACY + 44)
-#define MX25_INT_UART1 (NR_IRQS_LEGACY + 45)
-#define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51)
-#define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52)
-#define MX25_INT_GPT1 (NR_IRQS_LEGACY + 54)
-#define MX25_INT_FEC (NR_IRQS_LEGACY + 57)
-
-#define MX25_DMA_REQ_SSI2_RX1 22
-#define MX25_DMA_REQ_SSI2_TX1 23
-#define MX25_DMA_REQ_SSI2_RX0 24
-#define MX25_DMA_REQ_SSI2_TX0 25
-#define MX25_DMA_REQ_SSI1_RX1 26
-#define MX25_DMA_REQ_SSI1_TX1 27
-#define MX25_DMA_REQ_SSI1_RX0 28
-#define MX25_DMA_REQ_SSI1_TX0 29
-
-#ifndef __ASSEMBLY__
-extern int mx25_revision(void);
-#endif
-
-#endif /* ifndef __MACH_MX25_H__ */
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 46fd695203c7..6a7c6fc780cc 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -310,10 +310,12 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
* Low-Power mode.
* 3) Software should mask IRQ #32 right after CCM Low-Power mode
* is set (set bits 0-1 of CCM_CLPCR).
+ *
+ * Note that IRQ #32 is GIC SPI #0.
*/
- imx_gpc_hwirq_unmask(32);
+ imx_gpc_hwirq_unmask(0);
writel_relaxed(val, ccm_base + CLPCR);
- imx_gpc_hwirq_mask(32);
+ imx_gpc_hwirq_mask(0);
return 0;
}
diff --git a/arch/arm/mach-mediatek/Kconfig b/arch/arm/mach-mediatek/Kconfig
index f7e463ca0287..9f59e58da3a4 100644
--- a/arch/arm/mach-mediatek/Kconfig
+++ b/arch/arm/mach-mediatek/Kconfig
@@ -1,6 +1,7 @@
menuconfig ARCH_MEDIATEK
bool "Mediatek MT65xx & MT81xx SoC" if ARCH_MULTI_V7
select ARM_GIC
+ select PINCTRL
select MTK_TIMER
help
Support for Mediatek MT65xx & MT81xx SoCs
diff --git a/arch/arm/mach-meson/Kconfig b/arch/arm/mach-meson/Kconfig
index 18301dc9d2e7..0743e2059645 100644
--- a/arch/arm/mach-meson/Kconfig
+++ b/arch/arm/mach-meson/Kconfig
@@ -1,8 +1,11 @@
menuconfig ARCH_MESON
bool "Amlogic Meson SoCs" if ARCH_MULTI_V7
+ select ARCH_REQUIRE_GPIOLIB
select GENERIC_IRQ_CHIP
select ARM_GIC
select CACHE_L2X0
+ select PINCTRL
+ select PINCTRL_MESON
if ARCH_MESON
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
deleted file mode 100644
index a6b50e62a495..000000000000
--- a/arch/arm/mach-msm/Kconfig
+++ /dev/null
@@ -1,109 +0,0 @@
-if ARCH_MSM
-
-choice
- prompt "Qualcomm MSM SoC Type"
- default ARCH_MSM7X00A
- depends on ARCH_MSM
-
-config ARCH_MSM7X00A
- bool "MSM7x00A / MSM7x01A"
- select ARCH_MSM_ARM11
- select CPU_V6
- select GPIO_MSM_V1
- select MACH_TROUT if !MACH_HALIBUT
- select MSM_PROC_COMM
- select MSM_SMD
- select CLKSRC_QCOM
- select MSM_SMD_PKG3
-
-config ARCH_MSM7X30
- bool "MSM7x30"
- select ARCH_MSM_SCORPION
- select CPU_V7
- select GPIO_MSM_V1
- select MACH_MSM7X30_SURF # if !
- select MSM_GPIOMUX
- select MSM_PROC_COMM
- select MSM_SMD
- select CLKSRC_QCOM
- select MSM_VIC
-
-config ARCH_QSD8X50
- bool "QSD8X50"
- select ARCH_MSM_SCORPION
- select CPU_V7
- select GPIO_MSM_V1
- select MACH_QSD8X50_SURF if !MACH_QSD8X50A_ST1_5
- select MSM_GPIOMUX
- select MSM_PROC_COMM
- select MSM_SMD
- select CLKSRC_QCOM
- select MSM_VIC
-
-endchoice
-
-config MSM_SOC_REV_A
- bool
-
-config ARCH_MSM_ARM11
- bool
-
-config ARCH_MSM_SCORPION
- bool
-
-config MSM_VIC
- bool
-
-menu "Qualcomm MSM Board Type"
- depends on ARCH_MSM
-
-config MACH_HALIBUT
- depends on ARCH_MSM
- depends on ARCH_MSM7X00A
- bool "Halibut Board (QCT SURF7201A)"
- help
- Support for the Qualcomm SURF7201A eval board.
-
-config MACH_TROUT
- depends on ARCH_MSM
- depends on ARCH_MSM7X00A
- bool "HTC Dream (aka trout)"
- help
- Support for the HTC Dream, T-Mobile G1, Android ADP1 devices.
-
-config MACH_MSM7X30_SURF
- depends on ARCH_MSM7X30
- bool "MSM7x30 SURF"
- help
- Support for the Qualcomm MSM7x30 SURF eval board.
-
-config MACH_QSD8X50_SURF
- depends on ARCH_QSD8X50
- bool "QSD8x50 SURF"
- help
- Support for the Qualcomm QSD8x50 SURF eval board.
-
-config MACH_QSD8X50A_ST1_5
- depends on ARCH_QSD8X50
- bool "QSD8x50A ST1.5"
- select MSM_SOC_REV_A
- help
- Support for the Qualcomm ST1.5.
-
-endmenu
-
-config MSM_SMD_PKG3
- bool
-
-config MSM_PROC_COMM
- bool
-
-config MSM_SMD
- bool
-
-config MSM_GPIOMUX
- bool
- help
- Support for MSM V1 TLMM GPIOMUX architecture.
-
-endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
deleted file mode 100644
index 27c078a568df..000000000000
--- a/arch/arm/mach-msm/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-obj-$(CONFIG_MSM_PROC_COMM) += clock.o
-
-obj-$(CONFIG_MSM_VIC) += irq-vic.o
-
-obj-$(CONFIG_ARCH_MSM7X00A) += irq.o
-obj-$(CONFIG_ARCH_QSD8X50) += sirc.o
-
-obj-$(CONFIG_MSM_PROC_COMM) += proc_comm.o clock-pcom.o vreg.o
-
-obj-$(CONFIG_ARCH_MSM7X00A) += dma.o io.o
-obj-$(CONFIG_ARCH_MSM7X30) += dma.o io.o
-obj-$(CONFIG_ARCH_QSD8X50) += dma.o io.o
-
-obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
-obj-$(CONFIG_MSM_SMD) += last_radio_log.o
-
-obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o devices-msm7x00.o
-obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o board-trout-panel.o devices-msm7x00.o
-obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o devices-msm7x00.o
-obj-$(CONFIG_ARCH_MSM7X30) += board-msm7x30.o devices-msm7x30.o
-obj-$(CONFIG_ARCH_QSD8X50) += board-qsd8x50.o devices-qsd8x50.o
-obj-$(CONFIG_MSM_GPIOMUX) += gpiomux.o
-obj-$(CONFIG_ARCH_QSD8X50) += gpiomux-8x50.o
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
deleted file mode 100644
index 9b803a578b4d..000000000000
--- a/arch/arm/mach-msm/Makefile.boot
+++ /dev/null
@@ -1,3 +0,0 @@
- zreladdr-y += 0x10008000
-params_phys-y := 0x10000100
-initrd_phys-y := 0x10800000
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
deleted file mode 100644
index fc832040c6e9..000000000000
--- a/arch/arm/mach-msm/board-halibut.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/* linux/arch/arm/mach-msm/board-halibut.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/smc91x.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
-#include <asm/setup.h>
-
-#include <mach/irqs.h>
-#include <mach/msm_iomap.h>
-
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-
-#include "devices.h"
-#include "common.h"
-
-static struct resource smc91x_resources[] = {
- [0] = {
- .start = 0x9C004300,
- .end = 0x9C004400,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = MSM_GPIO_TO_INT(49),
- .end = MSM_GPIO_TO_INT(49),
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
- },
-};
-
-static struct smc91x_platdata smc91x_platdata = {
- .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
-};
-
-static struct platform_device smc91x_device = {
- .name = "smc91x",
- .id = 0,
- .num_resources = ARRAY_SIZE(smc91x_resources),
- .resource = smc91x_resources,
- .dev.platform_data = &smc91x_platdata,
-};
-
-static struct platform_device *devices[] __initdata = {
- &msm_clock_7x01a,
- &msm_device_gpio_7201,
- &msm_device_uart3,
- &msm_device_smd,
- &msm_device_nand,
- &msm_device_hsusb,
- &msm_device_i2c,
- &smc91x_device,
-};
-
-static void __init halibut_init_early(void)
-{
- arch_ioremap_caller = __msm_ioremap_caller;
-}
-
-static void __init halibut_init_irq(void)
-{
- msm_init_irq();
-}
-
-static void __init halibut_init(void)
-{
- platform_add_devices(devices, ARRAY_SIZE(devices));
-}
-
-static void __init halibut_map_io(void)
-{
- msm_map_common_io();
-}
-
-static void __init halibut_init_late(void)
-{
- smd_debugfs_init();
-}
-
-MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
- .atag_offset = 0x100,
- .map_io = halibut_map_io,
- .init_early = halibut_init_early,
- .init_irq = halibut_init_irq,
- .init_machine = halibut_init,
- .init_late = halibut_init_late,
- .init_time = msm7x01_timer_init,
-MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
deleted file mode 100644
index 8f5ecdc4f3ce..000000000000
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/smsc911x.h>
-#include <linux/usb/msm_hsusb.h>
-#include <linux/clkdev.h>
-#include <linux/memblock.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/memory.h>
-#include <asm/setup.h>
-
-#include <mach/clk.h>
-#include <mach/msm_iomap.h>
-#include <mach/dma.h>
-
-#include <mach/vreg.h>
-#include "devices.h"
-#include "gpiomux.h"
-#include "proc_comm.h"
-#include "common.h"
-
-static void __init msm7x30_fixup(struct tag *tag, char **cmdline)
-{
- for (; tag->hdr.size; tag = tag_next(tag))
- if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) {
- tag->u.mem.start = 0;
- tag->u.mem.size += SZ_2M;
- }
-}
-
-static void __init msm7x30_reserve(void)
-{
- memblock_remove(0x0, SZ_2M);
-}
-
-static int hsusb_phy_init_seq[] = {
- 0x30, 0x32, /* Enable and set Pre-Emphasis Depth to 20% */
- 0x02, 0x36, /* Disable CDR Auto Reset feature */
- -1
-};
-
-static int hsusb_link_clk_reset(struct clk *link_clk, bool assert)
-{
- int ret;
-
- if (assert) {
- ret = clk_reset(link_clk, CLK_RESET_ASSERT);
- if (ret)
- pr_err("usb hs_clk assert failed\n");
- } else {
- ret = clk_reset(link_clk, CLK_RESET_DEASSERT);
- if (ret)
- pr_err("usb hs_clk deassert failed\n");
- }
- return ret;
-}
-
-static int hsusb_phy_clk_reset(struct clk *phy_clk)
-{
- int ret;
-
- ret = clk_reset(phy_clk, CLK_RESET_ASSERT);
- if (ret) {
- pr_err("usb phy clk assert failed\n");
- return ret;
- }
- usleep_range(10000, 12000);
- ret = clk_reset(phy_clk, CLK_RESET_DEASSERT);
- if (ret)
- pr_err("usb phy clk deassert failed\n");
- return ret;
-}
-
-static struct msm_otg_platform_data msm_otg_pdata = {
- .phy_init_seq = hsusb_phy_init_seq,
- .mode = USB_DR_MODE_PERIPHERAL,
- .otg_control = OTG_PHY_CONTROL,
- .link_clk_reset = hsusb_link_clk_reset,
- .phy_clk_reset = hsusb_phy_clk_reset,
-};
-
-struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS] = {
-#ifdef CONFIG_SERIAL_MSM_CONSOLE
- [49] = { /* UART2 RFR */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_2 | GPIOMUX_VALID,
- },
- [50] = { /* UART2 CTS */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_2 | GPIOMUX_VALID,
- },
- [51] = { /* UART2 RX */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_2 | GPIOMUX_VALID,
- },
- [52] = { /* UART2 TX */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_2 | GPIOMUX_VALID,
- },
-#endif
-};
-
-static struct platform_device *devices[] __initdata = {
- &msm_clock_7x30,
- &msm_device_gpio_7x30,
-#if defined(CONFIG_SERIAL_MSM)
- &msm_device_uart2,
-#endif
- &msm_device_smd,
- &msm_device_otg,
- &msm_device_hsusb,
- &msm_device_hsusb_host,
-};
-
-static void __init msm7x30_init_irq(void)
-{
- msm_init_irq();
-}
-
-static void __init msm7x30_init(void)
-{
- msm_device_otg.dev.platform_data = &msm_otg_pdata;
- msm_device_hsusb.dev.parent = &msm_device_otg.dev;
- msm_device_hsusb_host.dev.parent = &msm_device_otg.dev;
-
- platform_add_devices(devices, ARRAY_SIZE(devices));
-}
-
-static void __init msm7x30_map_io(void)
-{
- msm_map_msm7x30_io();
-}
-
-static void __init msm7x30_init_late(void)
-{
- smd_debugfs_init();
-}
-
-MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
- .atag_offset = 0x100,
- .fixup = msm7x30_fixup,
- .reserve = msm7x30_reserve,
- .map_io = msm7x30_map_io,
- .init_irq = msm7x30_init_irq,
- .init_machine = msm7x30_init,
- .init_late = msm7x30_init_late,
- .init_time = msm7x30_timer_init,
-MACHINE_END
-
-MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
- .atag_offset = 0x100,
- .fixup = msm7x30_fixup,
- .reserve = msm7x30_reserve,
- .map_io = msm7x30_map_io,
- .init_irq = msm7x30_init_irq,
- .init_machine = msm7x30_init,
- .init_late = msm7x30_init_late,
- .init_time = msm7x30_timer_init,
-MACHINE_END
-
-MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
- .atag_offset = 0x100,
- .fixup = msm7x30_fixup,
- .reserve = msm7x30_reserve,
- .map_io = msm7x30_map_io,
- .init_irq = msm7x30_init_irq,
- .init_machine = msm7x30_init,
- .init_late = msm7x30_init_late,
- .init_time = msm7x30_timer_init,
-MACHINE_END
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
deleted file mode 100644
index 10016a3bc698..000000000000
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/usb/msm_hsusb.h>
-#include <linux/err.h>
-#include <linux/clkdev.h>
-#include <linux/smc91x.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/io.h>
-#include <asm/setup.h>
-
-#include <mach/irqs.h>
-#include <mach/sirc.h>
-#include <mach/vreg.h>
-#include <mach/clk.h>
-#include <linux/platform_data/mmc-msm_sdcc.h>
-
-#include "devices.h"
-#include "common.h"
-
-static const resource_size_t qsd8x50_surf_smc91x_base __initconst = 0x70000300;
-static const unsigned qsd8x50_surf_smc91x_gpio __initconst = 156;
-
-/* Leave smc91x resources empty here, as we'll fill them in
- * at run-time: they vary from board to board, and the true
- * configuration won't be known until boot.
- */
-static struct resource smc91x_resources[] = {
- [0] = {
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
- },
-};
-
-static struct smc91x_platdata smc91x_platdata = {
- .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
-};
-
-static struct platform_device smc91x_device = {
- .name = "smc91x",
- .id = 0,
- .num_resources = ARRAY_SIZE(smc91x_resources),
- .resource = smc91x_resources,
- .dev.platform_data = &smc91x_platdata,
-};
-
-static int __init msm_init_smc91x(void)
-{
- if (machine_is_qsd8x50_surf()) {
- smc91x_resources[0].start = qsd8x50_surf_smc91x_base;
- smc91x_resources[0].end = qsd8x50_surf_smc91x_base + 0xff;
- smc91x_resources[1].start =
- gpio_to_irq(qsd8x50_surf_smc91x_gpio);
- smc91x_resources[1].end =
- gpio_to_irq(qsd8x50_surf_smc91x_gpio);
- platform_device_register(&smc91x_device);
- }
-
- return 0;
-}
-module_init(msm_init_smc91x);
-
-static int hsusb_phy_init_seq[] = {
- 0x08, 0x31, /* Increase HS Driver Amplitude */
- 0x20, 0x32, /* Enable and set Pre-Emphasis Depth to 10% */
- -1
-};
-
-static int hsusb_link_clk_reset(struct clk *link_clk, bool assert)
-{
- int ret;
-
- if (assert) {
- ret = clk_reset(link_clk, CLK_RESET_ASSERT);
- if (ret)
- pr_err("usb hs_clk assert failed\n");
- } else {
- ret = clk_reset(link_clk, CLK_RESET_DEASSERT);
- if (ret)
- pr_err("usb hs_clk deassert failed\n");
- }
- return ret;
-}
-
-static int hsusb_phy_clk_reset(struct clk *phy_clk)
-{
- int ret;
-
- ret = clk_reset(phy_clk, CLK_RESET_ASSERT);
- if (ret) {
- pr_err("usb phy clk assert failed\n");
- return ret;
- }
- usleep_range(10000, 12000);
- ret = clk_reset(phy_clk, CLK_RESET_DEASSERT);
- if (ret)
- pr_err("usb phy clk deassert failed\n");
- return ret;
-}
-
-static struct msm_otg_platform_data msm_otg_pdata = {
- .phy_init_seq = hsusb_phy_init_seq,
- .mode = USB_DR_MODE_PERIPHERAL,
- .otg_control = OTG_PHY_CONTROL,
- .link_clk_reset = hsusb_link_clk_reset,
- .phy_clk_reset = hsusb_phy_clk_reset,
-};
-
-static struct platform_device *devices[] __initdata = {
- &msm_clock_8x50,
- &msm_device_gpio_8x50,
- &msm_device_uart3,
- &msm_device_smd,
- &msm_device_otg,
- &msm_device_hsusb,
- &msm_device_hsusb_host,
-};
-
-static struct msm_mmc_gpio sdc1_gpio_cfg[] = {
- {51, "sdc1_dat_3"},
- {52, "sdc1_dat_2"},
- {53, "sdc1_dat_1"},
- {54, "sdc1_dat_0"},
- {55, "sdc1_cmd"},
- {56, "sdc1_clk"}
-};
-
-static struct vreg *vreg_mmc;
-static unsigned long vreg_sts;
-
-static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd)
-{
- int rc = 0;
- struct platform_device *pdev;
-
- pdev = container_of(dv, struct platform_device, dev);
-
- if (vdd == 0) {
- if (!vreg_sts)
- return 0;
-
- clear_bit(pdev->id, &vreg_sts);
-
- if (!vreg_sts) {
- rc = vreg_disable(vreg_mmc);
- if (rc)
- pr_err("vreg_mmc disable failed for slot "
- "%d: %d\n", pdev->id, rc);
- }
- return 0;
- }
-
- if (!vreg_sts) {
- rc = vreg_set_level(vreg_mmc, 2900);
- if (rc)
- pr_err("vreg_mmc set level failed for slot %d: %d\n",
- pdev->id, rc);
- rc = vreg_enable(vreg_mmc);
- if (rc)
- pr_err("vreg_mmc enable failed for slot %d: %d\n",
- pdev->id, rc);
- }
- set_bit(pdev->id, &vreg_sts);
- return 0;
-}
-
-static struct msm_mmc_gpio_data sdc1_gpio = {
- .gpio = sdc1_gpio_cfg,
- .size = ARRAY_SIZE(sdc1_gpio_cfg),
-};
-
-static struct msm_mmc_platform_data qsd8x50_sdc1_data = {
- .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
- .translate_vdd = msm_sdcc_setup_power,
- .gpio_data = &sdc1_gpio,
-};
-
-static void __init qsd8x50_init_mmc(void)
-{
- vreg_mmc = vreg_get(NULL, "gp5");
-
- if (IS_ERR(vreg_mmc)) {
- pr_err("vreg get for vreg_mmc failed (%ld)\n",
- PTR_ERR(vreg_mmc));
- return;
- }
-
- msm_add_sdcc(1, &qsd8x50_sdc1_data, 0, 0);
-}
-
-static void __init qsd8x50_map_io(void)
-{
- msm_map_qsd8x50_io();
-}
-
-static void __init qsd8x50_init_irq(void)
-{
- msm_init_irq();
- msm_init_sirc();
-}
-
-static void __init qsd8x50_init(void)
-{
- msm_device_otg.dev.platform_data = &msm_otg_pdata;
- msm_device_hsusb.dev.parent = &msm_device_otg.dev;
- msm_device_hsusb_host.dev.parent = &msm_device_otg.dev;
- platform_add_devices(devices, ARRAY_SIZE(devices));
- qsd8x50_init_mmc();
-}
-
-static void __init qsd8x50_init_late(void)
-{
- smd_debugfs_init();
-}
-
-MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
- .atag_offset = 0x100,
- .map_io = qsd8x50_map_io,
- .init_irq = qsd8x50_init_irq,
- .init_machine = qsd8x50_init,
- .init_late = qsd8x50_init_late,
- .init_time = qsd8x50_timer_init,
-MACHINE_END
-
-MACHINE_START(QSD8X50A_ST1_5, "QCT QSD8X50A ST1.5")
- .atag_offset = 0x100,
- .map_io = qsd8x50_map_io,
- .init_irq = qsd8x50_init_irq,
- .init_machine = qsd8x50_init,
- .init_late = qsd8x50_init_late,
- .init_time = qsd8x50_timer_init,
-MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
deleted file mode 100644
index e50967926dcd..000000000000
--- a/arch/arm/mach-msm/board-sapphire.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/* linux/arch/arm/mach-msm/board-sapphire.c
- * Copyright (C) 2007-2009 HTC Corporation.
- * Author: Thomas Tsai <thomas_tsai@htc.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
-*/
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/device.h>
-
-#include <linux/delay.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
-#include <mach/vreg.h>
-
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/setup.h>
-
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/memblock.h>
-
-#include "gpio_chip.h"
-#include "board-sapphire.h"
-#include "proc_comm.h"
-#include "devices.h"
-#include "common.h"
-
-void msm_init_irq(void);
-void msm_init_gpio(void);
-
-static struct platform_device *devices[] __initdata = {
- &msm_device_smd,
- &msm_device_dmov,
- &msm_device_nand,
- &msm_device_uart1,
- &msm_device_uart3,
-};
-
-void msm_timer_init(void);
-
-static void __init sapphire_init_irq(void)
-{
- msm_init_irq();
-}
-
-static void __init sapphire_init(void)
-{
- platform_add_devices(devices, ARRAY_SIZE(devices));
-}
-
-static struct map_desc sapphire_io_desc[] __initdata = {
- {
- .virtual = SAPPHIRE_CPLD_BASE,
- .pfn = __phys_to_pfn(SAPPHIRE_CPLD_START),
- .length = SAPPHIRE_CPLD_SIZE,
- .type = MT_DEVICE_NONSHARED
- }
-};
-
-static void __init sapphire_fixup(struct tag *tags, char **cmdline)
-{
- int smi_sz = parse_tag_smi((const struct tag *)tags);
-
- if (smi_sz == 32) {
- memblock_add(PHYS_OFFSET, 84*SZ_1M);
- } else if (smi_sz == 64) {
- memblock_add(PHYS_OFFSET, 101*SZ_1M);
- } else {
- memblock_add(PHYS_OFFSET, 101*SZ_1M);
- /* Give a default value when not get smi size */
- smi_sz = 64;
- }
-}
-
-static void __init sapphire_map_io(void)
-{
- msm_map_common_io();
- iotable_init(sapphire_io_desc, ARRAY_SIZE(sapphire_io_desc));
- msm_clock_init();
-}
-
-static void __init sapphire_init_late(void)
-{
- smd_debugfs_init();
-}
-
-MACHINE_START(SAPPHIRE, "sapphire")
-/* Maintainer: Brian Swetland <swetland@google.com> */
- .atag_offset = 0x100,
- .fixup = sapphire_fixup,
- .map_io = sapphire_map_io,
- .init_irq = sapphire_init_irq,
- .init_machine = sapphire_init,
- .init_late = sapphire_init_late,
- .init_time = msm_timer_init,
-MACHINE_END
diff --git a/arch/arm/mach-msm/board-trout-gpio.c b/arch/arm/mach-msm/board-trout-gpio.c
deleted file mode 100644
index 722ad63b7edc..000000000000
--- a/arch/arm/mach-msm/board-trout-gpio.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * linux/arch/arm/mach-msm/gpio.c
- *
- * Copyright (C) 2005 HP Labs
- * Copyright (C) 2008 Google, Inc.
- * Copyright (C) 2009 Pavel Machek <pavel@ucw.cz>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-
-#include "board-trout.h"
-
-static uint8_t trout_int_mask[2] = {
- [0] = 0xff, /* mask all interrupts */
- [1] = 0xff,
-};
-static uint8_t trout_sleep_int_mask[] = {
- [0] = 0xff,
- [1] = 0xff,
-};
-
-struct msm_gpio_chip {
- struct gpio_chip chip;
- void __iomem *reg; /* Base of register bank */
- u8 shadow;
-};
-
-#define to_msm_gpio_chip(c) container_of(c, struct msm_gpio_chip, chip)
-
-static int msm_gpiolib_get(struct gpio_chip *chip, unsigned offset)
-{
- struct msm_gpio_chip *msm_gpio = to_msm_gpio_chip(chip);
- unsigned mask = 1 << offset;
-
- return !!(readb(msm_gpio->reg) & mask);
-}
-
-static void msm_gpiolib_set(struct gpio_chip *chip, unsigned offset, int val)
-{
- struct msm_gpio_chip *msm_gpio = to_msm_gpio_chip(chip);
- unsigned mask = 1 << offset;
-
- if (val)
- msm_gpio->shadow |= mask;
- else
- msm_gpio->shadow &= ~mask;
-
- writeb(msm_gpio->shadow, msm_gpio->reg);
-}
-
-static int msm_gpiolib_direction_input(struct gpio_chip *chip,
- unsigned offset)
-{
- msm_gpiolib_set(chip, offset, 0);
- return 0;
-}
-
-static int msm_gpiolib_direction_output(struct gpio_chip *chip,
- unsigned offset, int val)
-{
- msm_gpiolib_set(chip, offset, val);
- return 0;
-}
-
-static int trout_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- return TROUT_GPIO_TO_INT(offset + chip->base);
-}
-
-#define TROUT_GPIO_BANK(name, reg_num, base_gpio, shadow_val) \
- { \
- .chip = { \
- .label = name, \
- .direction_input = msm_gpiolib_direction_input,\
- .direction_output = msm_gpiolib_direction_output, \
- .get = msm_gpiolib_get, \
- .set = msm_gpiolib_set, \
- .to_irq = trout_gpio_to_irq, \
- .base = base_gpio, \
- .ngpio = 8, \
- }, \
- .reg = reg_num + TROUT_CPLD_BASE, \
- .shadow = shadow_val, \
- }
-
-static struct msm_gpio_chip msm_gpio_banks[] = {
-#if defined(CONFIG_DEBUG_MSM_UART) && (CONFIG_DEBUG_UART_PHYS == 0xa9a00000)
- /* H2W pins <-> UART1 */
- TROUT_GPIO_BANK("MISC2", 0x00, TROUT_GPIO_MISC2_BASE, 0x40),
-#else
- /* H2W pins <-> UART3, Bluetooth <-> UART1 */
- TROUT_GPIO_BANK("MISC2", 0x00, TROUT_GPIO_MISC2_BASE, 0x80),
-#endif
- /* I2C pull */
- TROUT_GPIO_BANK("MISC3", 0x02, TROUT_GPIO_MISC3_BASE, 0x04),
- TROUT_GPIO_BANK("MISC4", 0x04, TROUT_GPIO_MISC4_BASE, 0),
- /* mmdi 32k en */
- TROUT_GPIO_BANK("MISC5", 0x06, TROUT_GPIO_MISC5_BASE, 0x04),
- TROUT_GPIO_BANK("INT2", 0x08, TROUT_GPIO_INT2_BASE, 0),
- TROUT_GPIO_BANK("MISC1", 0x0a, TROUT_GPIO_MISC1_BASE, 0),
- TROUT_GPIO_BANK("VIRTUAL", 0x12, TROUT_GPIO_VIRTUAL_BASE, 0),
-};
-
-static void trout_gpio_irq_ack(struct irq_data *d)
-{
- int bank = TROUT_INT_TO_BANK(d->irq);
- uint8_t mask = TROUT_INT_TO_MASK(d->irq);
- int reg = TROUT_BANK_TO_STAT_REG(bank);
- /*printk(KERN_INFO "trout_gpio_irq_ack irq %d\n", d->irq);*/
- writeb(mask, TROUT_CPLD_BASE + reg);
-}
-
-static void trout_gpio_irq_mask(struct irq_data *d)
-{
- unsigned long flags;
- uint8_t reg_val;
- int bank = TROUT_INT_TO_BANK(d->irq);
- uint8_t mask = TROUT_INT_TO_MASK(d->irq);
- int reg = TROUT_BANK_TO_MASK_REG(bank);
-
- local_irq_save(flags);
- reg_val = trout_int_mask[bank] |= mask;
- /*printk(KERN_INFO "trout_gpio_irq_mask irq %d => %d:%02x\n",
- d->irq, bank, reg_val);*/
- writeb(reg_val, TROUT_CPLD_BASE + reg);
- local_irq_restore(flags);
-}
-
-static void trout_gpio_irq_unmask(struct irq_data *d)
-{
- unsigned long flags;
- uint8_t reg_val;
- int bank = TROUT_INT_TO_BANK(d->irq);
- uint8_t mask = TROUT_INT_TO_MASK(d->irq);
- int reg = TROUT_BANK_TO_MASK_REG(bank);
-
- local_irq_save(flags);
- reg_val = trout_int_mask[bank] &= ~mask;
- /*printk(KERN_INFO "trout_gpio_irq_unmask irq %d => %d:%02x\n",
- d->irq, bank, reg_val);*/
- writeb(reg_val, TROUT_CPLD_BASE + reg);
- local_irq_restore(flags);
-}
-
-int trout_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- unsigned long flags;
- int bank = TROUT_INT_TO_BANK(d->irq);
- uint8_t mask = TROUT_INT_TO_MASK(d->irq);
-
- local_irq_save(flags);
- if(on)
- trout_sleep_int_mask[bank] &= ~mask;
- else
- trout_sleep_int_mask[bank] |= mask;
- local_irq_restore(flags);
- return 0;
-}
-
-static void trout_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- int j, m;
- unsigned v;
- int bank;
- int stat_reg;
- int int_base = TROUT_INT_START;
- uint8_t int_mask;
-
- for (bank = 0; bank < 2; bank++) {
- stat_reg = TROUT_BANK_TO_STAT_REG(bank);
- v = readb(TROUT_CPLD_BASE + stat_reg);
- int_mask = trout_int_mask[bank];
- if (v & int_mask) {
- writeb(v & int_mask, TROUT_CPLD_BASE + stat_reg);
- printk(KERN_ERR "trout_gpio_irq_handler: got masked "
- "interrupt: %d:%02x\n", bank, v & int_mask);
- }
- v &= ~int_mask;
- while (v) {
- m = v & -v;
- j = fls(m) - 1;
- /*printk(KERN_INFO "msm_gpio_irq_handler %d:%02x %02x b"
- "it %d irq %d\n", bank, v, m, j, int_base + j);*/
- v &= ~m;
- generic_handle_irq(int_base + j);
- }
- int_base += TROUT_INT_BANK0_COUNT;
- }
- desc->irq_data.chip->irq_ack(&desc->irq_data);
-}
-
-static struct irq_chip trout_gpio_irq_chip = {
- .name = "troutgpio",
- .irq_ack = trout_gpio_irq_ack,
- .irq_mask = trout_gpio_irq_mask,
- .irq_unmask = trout_gpio_irq_unmask,
- .irq_set_wake = trout_gpio_irq_set_wake,
-};
-
-/*
- * Called from the processor-specific init to enable GPIO pin support.
- */
-int __init trout_init_gpio(void)
-{
- int i;
- for(i = TROUT_INT_START; i <= TROUT_INT_END; i++) {
- irq_set_chip_and_handler(i, &trout_gpio_irq_chip,
- handle_edge_irq);
- set_irq_flags(i, IRQF_VALID);
- }
-
- for (i = 0; i < ARRAY_SIZE(msm_gpio_banks); i++)
- gpiochip_add(&msm_gpio_banks[i].chip);
-
- irq_set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH);
- irq_set_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler);
- irq_set_irq_wake(MSM_GPIO_TO_INT(17), 1);
-
- return 0;
-}
-
-postcore_initcall(trout_init_gpio);
-
diff --git a/arch/arm/mach-msm/board-trout-mmc.c b/arch/arm/mach-msm/board-trout-mmc.c
deleted file mode 100644
index 3723e55819d6..000000000000
--- a/arch/arm/mach-msm/board-trout-mmc.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/* linux/arch/arm/mach-msm/board-trout-mmc.c
-** Author: Brian Swetland <swetland@google.com>
-*/
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/err.h>
-#include <linux/debugfs.h>
-
-#include <asm/io.h>
-
-#include <mach/vreg.h>
-
-#include <linux/platform_data/mmc-msm_sdcc.h>
-
-#include "devices.h"
-
-#include "board-trout.h"
-
-#include "proc_comm.h"
-
-#define DEBUG_SDSLOT_VDD 1
-
-/* ---- COMMON ---- */
-static void config_gpio_table(uint32_t *table, int len)
-{
- int n;
- unsigned id;
- for(n = 0; n < len; n++) {
- id = table[n];
- msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0);
- }
-}
-
-/* ---- SDCARD ---- */
-
-static uint32_t sdcard_on_gpio_table[] = {
- PCOM_GPIO_CFG(62, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */
- PCOM_GPIO_CFG(63, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */
- PCOM_GPIO_CFG(64, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT3 */
- PCOM_GPIO_CFG(65, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT2 */
- PCOM_GPIO_CFG(66, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */
- PCOM_GPIO_CFG(67, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */
-};
-
-static uint32_t sdcard_off_gpio_table[] = {
- PCOM_GPIO_CFG(62, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */
- PCOM_GPIO_CFG(63, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */
- PCOM_GPIO_CFG(64, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */
- PCOM_GPIO_CFG(65, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */
- PCOM_GPIO_CFG(66, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */
- PCOM_GPIO_CFG(67, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */
-};
-
-static uint opt_disable_sdcard;
-
-static int __init trout_disablesdcard_setup(char *str)
-{
- int cal = simple_strtol(str, NULL, 0);
-
- opt_disable_sdcard = cal;
- return 1;
-}
-
-__setup("board_trout.disable_sdcard=", trout_disablesdcard_setup);
-
-static struct vreg *vreg_sdslot; /* SD slot power */
-
-struct mmc_vdd_xlat {
- int mask;
- int level;
-};
-
-static struct mmc_vdd_xlat mmc_vdd_table[] = {
- { MMC_VDD_165_195, 1800 },
- { MMC_VDD_20_21, 2050 },
- { MMC_VDD_21_22, 2150 },
- { MMC_VDD_22_23, 2250 },
- { MMC_VDD_23_24, 2350 },
- { MMC_VDD_24_25, 2450 },
- { MMC_VDD_25_26, 2550 },
- { MMC_VDD_26_27, 2650 },
- { MMC_VDD_27_28, 2750 },
- { MMC_VDD_28_29, 2850 },
- { MMC_VDD_29_30, 2950 },
-};
-
-static unsigned int sdslot_vdd = 0xffffffff;
-static unsigned int sdslot_vreg_enabled;
-
-static uint32_t trout_sdslot_switchvdd(struct device *dev, unsigned int vdd)
-{
- int i, rc;
-
- BUG_ON(!vreg_sdslot);
-
- if (vdd == sdslot_vdd)
- return 0;
-
- sdslot_vdd = vdd;
-
- if (vdd == 0) {
-#if DEBUG_SDSLOT_VDD
- printk("%s: Disabling SD slot power\n", __func__);
-#endif
- config_gpio_table(sdcard_off_gpio_table,
- ARRAY_SIZE(sdcard_off_gpio_table));
- vreg_disable(vreg_sdslot);
- sdslot_vreg_enabled = 0;
- return 0;
- }
-
- if (!sdslot_vreg_enabled) {
- rc = vreg_enable(vreg_sdslot);
- if (rc) {
- printk(KERN_ERR "%s: Error enabling vreg (%d)\n",
- __func__, rc);
- }
- config_gpio_table(sdcard_on_gpio_table,
- ARRAY_SIZE(sdcard_on_gpio_table));
- sdslot_vreg_enabled = 1;
- }
-
- for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) {
- if (mmc_vdd_table[i].mask == (1 << vdd)) {
-#if DEBUG_SDSLOT_VDD
- printk("%s: Setting level to %u\n",
- __func__, mmc_vdd_table[i].level);
-#endif
- rc = vreg_set_level(vreg_sdslot,
- mmc_vdd_table[i].level);
- if (rc) {
- printk(KERN_ERR
- "%s: Error setting vreg level (%d)\n",
- __func__, rc);
- }
- return 0;
- }
- }
-
- printk(KERN_ERR "%s: Invalid VDD %d specified\n", __func__, vdd);
- return 0;
-}
-
-static unsigned int trout_sdslot_status(struct device *dev)
-{
- unsigned int status;
-
- status = (unsigned int) gpio_get_value(TROUT_GPIO_SDMC_CD_N);
- return (!status);
-}
-
-#define TROUT_MMC_VDD MMC_VDD_165_195 | MMC_VDD_20_21 | MMC_VDD_21_22 \
- | MMC_VDD_22_23 | MMC_VDD_23_24 | MMC_VDD_24_25 \
- | MMC_VDD_25_26 | MMC_VDD_26_27 | MMC_VDD_27_28 \
- | MMC_VDD_28_29 | MMC_VDD_29_30
-
-static struct msm_mmc_platform_data trout_sdslot_data = {
- .ocr_mask = TROUT_MMC_VDD,
- .status = trout_sdslot_status,
- .translate_vdd = trout_sdslot_switchvdd,
-};
-
-int __init trout_init_mmc(unsigned int sys_rev)
-{
- sdslot_vreg_enabled = 0;
-
- vreg_sdslot = vreg_get(0, "gp6");
- if (IS_ERR(vreg_sdslot))
- return PTR_ERR(vreg_sdslot);
-
- irq_set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1);
-
- if (!opt_disable_sdcard)
- msm_add_sdcc(2, &trout_sdslot_data,
- TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 0);
- else
- printk(KERN_INFO "trout: SD-Card interface disabled\n");
- return 0;
-}
-
diff --git a/arch/arm/mach-msm/board-trout-panel.c b/arch/arm/mach-msm/board-trout-panel.c
deleted file mode 100644
index 77b0a26f897f..000000000000
--- a/arch/arm/mach-msm/board-trout-panel.c
+++ /dev/null
@@ -1,292 +0,0 @@
-/* linux/arch/arm/mach-msm/board-trout-mddi.c
-** Author: Brian Swetland <swetland@google.com>
-*/
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/leds.h>
-#include <linux/err.h>
-
-#include <asm/io.h>
-#include <asm/mach-types.h>
-#include <asm/system_info.h>
-
-#include <linux/platform_data/video-msm_fb.h>
-#include <mach/vreg.h>
-
-#include "board-trout.h"
-#include "proc_comm.h"
-#include "clock-pcom.h"
-#include "devices.h"
-
-#define TROUT_DEFAULT_BACKLIGHT_BRIGHTNESS 255
-
-#define MDDI_CLIENT_CORE_BASE 0x108000
-#define LCD_CONTROL_BLOCK_BASE 0x110000
-#define SPI_BLOCK_BASE 0x120000
-#define I2C_BLOCK_BASE 0x130000
-#define PWM_BLOCK_BASE 0x140000
-#define GPIO_BLOCK_BASE 0x150000
-#define SYSTEM_BLOCK1_BASE 0x160000
-#define SYSTEM_BLOCK2_BASE 0x170000
-
-
-#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
-#define SYSCLKENA (MDDI_CLIENT_CORE_BASE|0x2C)
-#define PWM0OFF (PWM_BLOCK_BASE|0x1C)
-
-#define V_VDDE2E_VDD2_GPIO 0
-#define MDDI_RST_N 82
-
-#define MDDICAP0 (MDDI_CLIENT_CORE_BASE|0x00)
-#define MDDICAP1 (MDDI_CLIENT_CORE_BASE|0x04)
-#define MDDICAP2 (MDDI_CLIENT_CORE_BASE|0x08)
-#define MDDICAP3 (MDDI_CLIENT_CORE_BASE|0x0C)
-#define MDCAPCHG (MDDI_CLIENT_CORE_BASE|0x10)
-#define MDCRCERC (MDDI_CLIENT_CORE_BASE|0x14)
-#define TTBUSSEL (MDDI_CLIENT_CORE_BASE|0x18)
-#define DPSET0 (MDDI_CLIENT_CORE_BASE|0x1C)
-#define DPSET1 (MDDI_CLIENT_CORE_BASE|0x20)
-#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
-#define DPRUN (MDDI_CLIENT_CORE_BASE|0x28)
-#define SYSCKENA (MDDI_CLIENT_CORE_BASE|0x2C)
-#define TESTMODE (MDDI_CLIENT_CORE_BASE|0x30)
-#define FIFOMONI (MDDI_CLIENT_CORE_BASE|0x34)
-#define INTMONI (MDDI_CLIENT_CORE_BASE|0x38)
-#define MDIOBIST (MDDI_CLIENT_CORE_BASE|0x3C)
-#define MDIOPSET (MDDI_CLIENT_CORE_BASE|0x40)
-#define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44)
-#define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48)
-#define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C)
-#define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50)
-#define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54)
-
-#define SRST (LCD_CONTROL_BLOCK_BASE|0x00)
-#define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04)
-#define START (LCD_CONTROL_BLOCK_BASE|0x08)
-#define PORT (LCD_CONTROL_BLOCK_BASE|0x0C)
-#define CMN (LCD_CONTROL_BLOCK_BASE|0x10)
-#define GAMMA (LCD_CONTROL_BLOCK_BASE|0x14)
-#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18)
-#define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C)
-#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20)
-#define HDE_LEFT (LCD_CONTROL_BLOCK_BASE|0x24)
-#define VDE_TOP (LCD_CONTROL_BLOCK_BASE|0x28)
-#define PXL (LCD_CONTROL_BLOCK_BASE|0x30)
-#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34)
-#define HSW (LCD_CONTROL_BLOCK_BASE|0x38)
-#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C)
-#define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40)
-#define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44)
-#define VSW (LCD_CONTROL_BLOCK_BASE|0x48)
-#define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C)
-#define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50)
-#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54)
-#define WSYN_DLY (LCD_CONTROL_BLOCK_BASE|0x58)
-#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C)
-#define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60)
-#define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64)
-#define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68)
-#define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C)
-#define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70)
-#define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74)
-#define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78)
-#define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C)
-#define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80)
-#define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84)
-#define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88)
-#define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C)
-
-#define SSICTL (SPI_BLOCK_BASE|0x00)
-#define SSITIME (SPI_BLOCK_BASE|0x04)
-#define SSITX (SPI_BLOCK_BASE|0x08)
-#define SSIRX (SPI_BLOCK_BASE|0x0C)
-#define SSIINTC (SPI_BLOCK_BASE|0x10)
-#define SSIINTS (SPI_BLOCK_BASE|0x14)
-#define SSIDBG1 (SPI_BLOCK_BASE|0x18)
-#define SSIDBG2 (SPI_BLOCK_BASE|0x1C)
-#define SSIID (SPI_BLOCK_BASE|0x20)
-
-#define WKREQ (SYSTEM_BLOCK1_BASE|0x00)
-#define CLKENB (SYSTEM_BLOCK1_BASE|0x04)
-#define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08)
-#define INTMASK (SYSTEM_BLOCK1_BASE|0x0C)
-#define GPIOSEL (SYSTEM_BLOCK2_BASE|0x00)
-
-#define GPIODATA (GPIO_BLOCK_BASE|0x00)
-#define GPIODIR (GPIO_BLOCK_BASE|0x04)
-#define GPIOIS (GPIO_BLOCK_BASE|0x08)
-#define GPIOIBE (GPIO_BLOCK_BASE|0x0C)
-#define GPIOIEV (GPIO_BLOCK_BASE|0x10)
-#define GPIOIE (GPIO_BLOCK_BASE|0x14)
-#define GPIORIS (GPIO_BLOCK_BASE|0x18)
-#define GPIOMIS (GPIO_BLOCK_BASE|0x1C)
-#define GPIOIC (GPIO_BLOCK_BASE|0x20)
-#define GPIOOMS (GPIO_BLOCK_BASE|0x24)
-#define GPIOPC (GPIO_BLOCK_BASE|0x28)
-#define GPIOID (GPIO_BLOCK_BASE|0x30)
-
-#define SPI_WRITE(reg, val) \
- { SSITX, 0x00010000 | (((reg) & 0xff) << 8) | ((val) & 0xff) }, \
- { 0, 5 },
-
-#define SPI_WRITE1(reg) \
- { SSITX, (reg) & 0xff }, \
- { 0, 5 },
-
-struct mddi_table {
- uint32_t reg;
- uint32_t value;
-};
-static struct mddi_table mddi_toshiba_init_table[] = {
- { DPSET0, 0x09e90046 },
- { DPSET1, 0x00000118 },
- { DPSUS, 0x00000000 },
- { DPRUN, 0x00000001 },
- { 1, 14 }, /* msleep 14 */
- { SYSCKENA, 0x00000001 },
- { CLKENB, 0x0000A1EF }, /* # SYS.CLKENB # Enable clocks for each module (without DCLK , i2cCLK) */
-
- { GPIODATA, 0x02000200 }, /* # GPI .GPIODATA # GPIO2(RESET_LCD_N) set to 0 , GPIO3(eDRAM_Power) set to 0 */
- { GPIODIR, 0x000030D }, /* 24D # GPI .GPIODIR # Select direction of GPIO port (0,2,3,6,9 output) */
- { GPIOSEL, 0/*0x00000173*/}, /* # SYS.GPIOSEL # GPIO port multiplexing control */
- { GPIOPC, 0x03C300C0 }, /* # GPI .GPIOPC # GPIO2,3 PD cut */
- { WKREQ, 0x00000000 }, /* # SYS.WKREQ # Wake-up request event is VSYNC alignment */
-
- { GPIOIBE, 0x000003FF },
- { GPIOIS, 0x00000000 },
- { GPIOIC, 0x000003FF },
- { GPIOIE, 0x00000000 },
-
- { GPIODATA, 0x00040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
- { 1, 1 }, /* msleep 1 */
- { GPIODATA, 0x02040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
- { DRAMPWR, 0x00000001 }, /* eDRAM power */
-};
-
-#define GPIOSEL_VWAKEINT (1U << 0)
-#define INTMASK_VWAKEOUT (1U << 0)
-
-
-static int trout_new_backlight = 1;
-static struct vreg *vreg_mddi_1v5;
-static struct vreg *vreg_lcm_2v85;
-
-static void trout_process_mddi_table(struct msm_mddi_client_data *client_data,
- struct mddi_table *table, size_t count)
-{
- int i;
- for (i = 0; i < count; i++) {
- uint32_t reg = table[i].reg;
- uint32_t value = table[i].value;
-
- if (reg == 0)
- udelay(value);
- else if (reg == 1)
- msleep(value);
- else
- client_data->remote_write(client_data, value, reg);
- }
-}
-
-static int trout_mddi_toshiba_client_init(
- struct msm_mddi_bridge_platform_data *bridge_data,
- struct msm_mddi_client_data *client_data)
-{
- int panel_id;
-
- client_data->auto_hibernate(client_data, 0);
- trout_process_mddi_table(client_data, mddi_toshiba_init_table,
- ARRAY_SIZE(mddi_toshiba_init_table));
- client_data->auto_hibernate(client_data, 1);
- panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3;
- if (panel_id > 1) {
- printk(KERN_WARNING "unknown panel id at mddi_enable\n");
- return -1;
- }
- return 0;
-}
-
-static int trout_mddi_toshiba_client_uninit(
- struct msm_mddi_bridge_platform_data *bridge_data,
- struct msm_mddi_client_data *client_data)
-{
- return 0;
-}
-
-static struct resource resources_msm_fb[] = {
- {
- .start = MSM_FB_BASE,
- .end = MSM_FB_BASE + MSM_FB_SIZE,
- .flags = IORESOURCE_MEM,
- },
-};
-
-struct msm_mddi_bridge_platform_data toshiba_client_data = {
- .init = trout_mddi_toshiba_client_init,
- .uninit = trout_mddi_toshiba_client_uninit,
- .fb_data = {
- .xres = 320,
- .yres = 480,
- .width = 45,
- .height = 67,
- .output_format = 0,
- },
-};
-
-static struct msm_mddi_platform_data mddi_pdata = {
- .clk_rate = 122880000,
- .fb_resource = resources_msm_fb,
- .num_clients = 1,
- .client_platform_data = {
- {
- .product_id = (0xd263 << 16 | 0),
- .name = "mddi_c_d263_0000",
- .id = 0,
- .client_data = &toshiba_client_data,
- .clk_rate = 0,
- },
- },
-};
-
-int __init trout_init_panel(void)
-{
- int rc;
-
- if (!machine_is_trout())
- return 0;
- vreg_mddi_1v5 = vreg_get(0, "gp2");
- if (IS_ERR(vreg_mddi_1v5))
- return PTR_ERR(vreg_mddi_1v5);
- vreg_lcm_2v85 = vreg_get(0, "gp4");
- if (IS_ERR(vreg_lcm_2v85))
- return PTR_ERR(vreg_lcm_2v85);
-
- trout_new_backlight = system_rev >= 5;
- if (trout_new_backlight) {
- uint32_t config = PCOM_GPIO_CFG(27, 0, GPIO_OUTPUT,
- GPIO_NO_PULL, GPIO_8MA);
- msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0);
- } else {
- uint32_t config = PCOM_GPIO_CFG(27, 1, GPIO_OUTPUT,
- GPIO_NO_PULL, GPIO_8MA);
- uint32_t id = P_GP_CLK;
- uint32_t rate = 19200000;
-
- msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0);
-
- msm_proc_comm(PCOM_CLKCTL_RPC_SET_RATE, &id, &rate);
- if (id < 0)
- pr_err("trout_init_panel: set clock rate failed\n");
- }
-
- rc = platform_device_register(&msm_device_mdp);
- if (rc)
- return rc;
- msm_device_mddi0.dev.platform_data = &mddi_pdata;
- return platform_device_register(&msm_device_mddi0);
-}
-
-device_initcall(trout_init_panel);
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
deleted file mode 100644
index ba3edd3a46cb..000000000000
--- a/arch/arm/mach-msm/board-trout.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/* linux/arch/arm/mach-msm/board-trout.c
- *
- * Copyright (C) 2009 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/clkdev.h>
-#include <linux/memblock.h>
-
-#include <asm/system_info.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/setup.h>
-
-#include <mach/hardware.h>
-#include <mach/msm_iomap.h>
-
-#include "devices.h"
-#include "board-trout.h"
-#include "common.h"
-
-extern int trout_init_mmc(unsigned int);
-
-static struct platform_device *devices[] __initdata = {
- &msm_clock_7x01a,
- &msm_device_gpio_7201,
- &msm_device_uart3,
- &msm_device_smd,
- &msm_device_nand,
- &msm_device_hsusb,
- &msm_device_i2c,
-};
-
-static void __init trout_init_early(void)
-{
- arch_ioremap_caller = __msm_ioremap_caller;
-}
-
-static void __init trout_init_irq(void)
-{
- msm_init_irq();
-}
-
-static void __init trout_fixup(struct tag *tags, char **cmdline)
-{
- memblock_add(PHYS_OFFSET, 101*SZ_1M);
-}
-
-static void __init trout_init(void)
-{
- int rc;
-
- platform_add_devices(devices, ARRAY_SIZE(devices));
-
- if (IS_ENABLED(CONFIG_MMC)) {
- rc = trout_init_mmc(system_rev);
- if (rc)
- pr_crit("MMC init failure (%d)\n", rc);
- }
-}
-
-static struct map_desc trout_io_desc[] __initdata = {
- {
- .virtual = (unsigned long)TROUT_CPLD_BASE,
- .pfn = __phys_to_pfn(TROUT_CPLD_START),
- .length = TROUT_CPLD_SIZE,
- .type = MT_DEVICE_NONSHARED
- }
-};
-
-static void __init trout_map_io(void)
-{
- msm_map_common_io();
- iotable_init(trout_io_desc, ARRAY_SIZE(trout_io_desc));
-
-#if defined(CONFIG_DEBUG_MSM_UART) && (CONFIG_DEBUG_UART_PHYS == 0xa9c00000)
- /* route UART3 to the "H2W" extended usb connector */
- writeb(0x80, TROUT_CPLD_BASE + 0x00);
-#endif
-}
-
-static void __init trout_init_late(void)
-{
- smd_debugfs_init();
-}
-
-MACHINE_START(TROUT, "HTC Dream")
- .atag_offset = 0x100,
- .fixup = trout_fixup,
- .map_io = trout_map_io,
- .init_early = trout_init_early,
- .init_irq = trout_init_irq,
- .init_machine = trout_init,
- .init_late = trout_init_late,
- .init_time = msm7x01_timer_init,
-MACHINE_END
diff --git a/arch/arm/mach-msm/board-trout.h b/arch/arm/mach-msm/board-trout.h
deleted file mode 100644
index adb757abbb92..000000000000
--- a/arch/arm/mach-msm/board-trout.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/* linux/arch/arm/mach-msm/board-trout.h
-** Author: Brian Swetland <swetland@google.com>
-*/
-#ifndef __ARCH_ARM_MACH_MSM_BOARD_TROUT_H
-#define __ARCH_ARM_MACH_MSM_BOARD_TROUT_H
-
-#include "common.h"
-
-#define MSM_SMI_BASE 0x00000000
-#define MSM_SMI_SIZE 0x00800000
-
-#define MSM_EBI_BASE 0x10000000
-#define MSM_EBI_SIZE 0x06e00000
-
-#define MSM_PMEM_GPU0_BASE 0x00000000
-#define MSM_PMEM_GPU0_SIZE 0x00700000
-
-#define MSM_PMEM_MDP_BASE 0x02000000
-#define MSM_PMEM_MDP_SIZE 0x00800000
-
-#define MSM_PMEM_ADSP_BASE 0x02800000
-#define MSM_PMEM_ADSP_SIZE 0x00800000
-
-#define MSM_PMEM_CAMERA_BASE 0x03000000
-#define MSM_PMEM_CAMERA_SIZE 0x00800000
-
-#define MSM_FB_BASE 0x03800000
-#define MSM_FB_SIZE 0x00100000
-
-#define MSM_LINUX_BASE MSM_EBI_BASE
-#define MSM_LINUX_SIZE 0x06500000
-
-#define MSM_PMEM_GPU1_SIZE 0x800000
-#define MSM_PMEM_GPU1_BASE (MSM_RAM_CONSOLE_BASE - MSM_PMEM_GPU1_SIZE)
-
-#define MSM_RAM_CONSOLE_BASE (MSM_EBI_BASE + 0x6d00000)
-#define MSM_RAM_CONSOLE_SIZE (128 * SZ_1K)
-
-#if (MSM_FB_BASE + MSM_FB_SIZE) >= (MSM_PMEM_GPU1_BASE)
-#error invalid memory map
-#endif
-
-#define DECLARE_MSM_IOMAP
-#include <mach/msm_iomap.h>
-
-#define TROUT_4_BALL_UP_0 1
-#define TROUT_4_BALL_LEFT_0 18
-#define TROUT_4_BALL_DOWN_0 57
-#define TROUT_4_BALL_RIGHT_0 91
-
-#define TROUT_5_BALL_UP_0 94
-#define TROUT_5_BALL_LEFT_0 18
-#define TROUT_5_BALL_DOWN_0 90
-#define TROUT_5_BALL_RIGHT_0 19
-
-#define TROUT_POWER_KEY 20
-
-#define TROUT_4_TP_LS_EN 19
-#define TROUT_5_TP_LS_EN 1
-
-#define TROUT_CPLD_BASE IOMEM(0xE8100000)
-#define TROUT_CPLD_START 0x98000000
-#define TROUT_CPLD_SIZE SZ_4K
-
-#define TROUT_GPIO_CABLE_IN1 (83)
-#define TROUT_GPIO_CABLE_IN2 (49)
-
-#define TROUT_GPIO_START (128)
-
-#define TROUT_GPIO_INT_MASK0_REG (0x0c)
-#define TROUT_GPIO_INT_STAT0_REG (0x0e)
-#define TROUT_GPIO_INT_MASK1_REG (0x14)
-#define TROUT_GPIO_INT_STAT1_REG (0x10)
-
-#define TROUT_GPIO_HAPTIC_PWM (28)
-#define TROUT_GPIO_PS_HOLD (25)
-
-#define TROUT_GPIO_MISC2_BASE (TROUT_GPIO_START + 0x00)
-#define TROUT_GPIO_MISC3_BASE (TROUT_GPIO_START + 0x08)
-#define TROUT_GPIO_MISC4_BASE (TROUT_GPIO_START + 0x10)
-#define TROUT_GPIO_MISC5_BASE (TROUT_GPIO_START + 0x18)
-#define TROUT_GPIO_INT2_BASE (TROUT_GPIO_START + 0x20)
-#define TROUT_GPIO_MISC1_BASE (TROUT_GPIO_START + 0x28)
-#define TROUT_GPIO_VIRTUAL_BASE (TROUT_GPIO_START + 0x30)
-#define TROUT_GPIO_INT5_BASE (TROUT_GPIO_START + 0x48)
-
-#define TROUT_GPIO_CHARGER_EN (TROUT_GPIO_MISC2_BASE + 0)
-#define TROUT_GPIO_ISET (TROUT_GPIO_MISC2_BASE + 1)
-#define TROUT_GPIO_H2W_DAT_DIR (TROUT_GPIO_MISC2_BASE + 2)
-#define TROUT_GPIO_H2W_CLK_DIR (TROUT_GPIO_MISC2_BASE + 3)
-#define TROUT_GPIO_H2W_DAT_GPO (TROUT_GPIO_MISC2_BASE + 4)
-#define TROUT_GPIO_H2W_CLK_GPO (TROUT_GPIO_MISC2_BASE + 5)
-#define TROUT_GPIO_H2W_SEL0 (TROUT_GPIO_MISC2_BASE + 6)
-#define TROUT_GPIO_H2W_SEL1 (TROUT_GPIO_MISC2_BASE + 7)
-
-#define TROUT_GPIO_SPOTLIGHT_EN (TROUT_GPIO_MISC3_BASE + 0)
-#define TROUT_GPIO_FLASH_EN (TROUT_GPIO_MISC3_BASE + 1)
-#define TROUT_GPIO_I2C_PULL (TROUT_GPIO_MISC3_BASE + 2)
-#define TROUT_GPIO_TP_I2C_PULL (TROUT_GPIO_MISC3_BASE + 3)
-#define TROUT_GPIO_TP_EN (TROUT_GPIO_MISC3_BASE + 4)
-#define TROUT_GPIO_JOG_EN (TROUT_GPIO_MISC3_BASE + 5)
-#define TROUT_GPIO_UI_LED_EN (TROUT_GPIO_MISC3_BASE + 6)
-#define TROUT_GPIO_QTKEY_LED_EN (TROUT_GPIO_MISC3_BASE + 7)
-
-#define TROUT_GPIO_VCM_PWDN (TROUT_GPIO_MISC4_BASE + 0)
-#define TROUT_GPIO_USB_H2W_SW (TROUT_GPIO_MISC4_BASE + 1)
-#define TROUT_GPIO_COMPASS_RST_N (TROUT_GPIO_MISC4_BASE + 2)
-#define TROUT_GPIO_HAPTIC_EN_UP (TROUT_GPIO_MISC4_BASE + 3)
-#define TROUT_GPIO_HAPTIC_EN_MAIN (TROUT_GPIO_MISC4_BASE + 4)
-#define TROUT_GPIO_USB_PHY_RST_N (TROUT_GPIO_MISC4_BASE + 5)
-#define TROUT_GPIO_WIFI_PA_RESETX (TROUT_GPIO_MISC4_BASE + 6)
-#define TROUT_GPIO_WIFI_EN (TROUT_GPIO_MISC4_BASE + 7)
-
-#define TROUT_GPIO_BT_32K_EN (TROUT_GPIO_MISC5_BASE + 0)
-#define TROUT_GPIO_MAC_32K_EN (TROUT_GPIO_MISC5_BASE + 1)
-#define TROUT_GPIO_MDDI_32K_EN (TROUT_GPIO_MISC5_BASE + 2)
-#define TROUT_GPIO_COMPASS_32K_EN (TROUT_GPIO_MISC5_BASE + 3)
-
-#define TROUT_GPIO_NAVI_ACT_N (TROUT_GPIO_INT2_BASE + 0)
-#define TROUT_GPIO_COMPASS_IRQ (TROUT_GPIO_INT2_BASE + 1)
-#define TROUT_GPIO_SLIDING_DET (TROUT_GPIO_INT2_BASE + 2)
-#define TROUT_GPIO_AUD_HSMIC_DET_N (TROUT_GPIO_INT2_BASE + 3)
-#define TROUT_GPIO_SD_DOOR_N (TROUT_GPIO_INT2_BASE + 4)
-#define TROUT_GPIO_CAM_BTN_STEP1_N (TROUT_GPIO_INT2_BASE + 5)
-#define TROUT_GPIO_CAM_BTN_STEP2_N (TROUT_GPIO_INT2_BASE + 6)
-#define TROUT_GPIO_TP_ATT_N (TROUT_GPIO_INT2_BASE + 7)
-#define TROUT_GPIO_BANK0_FIRST_INT_SOURCE (TROUT_GPIO_NAVI_ACT_N)
-#define TROUT_GPIO_BANK0_LAST_INT_SOURCE (TROUT_GPIO_TP_ATT_N)
-
-#define TROUT_GPIO_H2W_DAT_GPI (TROUT_GPIO_MISC1_BASE + 0)
-#define TROUT_GPIO_H2W_CLK_GPI (TROUT_GPIO_MISC1_BASE + 1)
-#define TROUT_GPIO_CPLD128_VER_0 (TROUT_GPIO_MISC1_BASE + 4)
-#define TROUT_GPIO_CPLD128_VER_1 (TROUT_GPIO_MISC1_BASE + 5)
-#define TROUT_GPIO_CPLD128_VER_2 (TROUT_GPIO_MISC1_BASE + 6)
-#define TROUT_GPIO_CPLD128_VER_3 (TROUT_GPIO_MISC1_BASE + 7)
-
-#define TROUT_GPIO_SDMC_CD_N (TROUT_GPIO_VIRTUAL_BASE + 0)
-#define TROUT_GPIO_END (TROUT_GPIO_SDMC_CD_N)
-#define TROUT_GPIO_BANK1_FIRST_INT_SOURCE (TROUT_GPIO_SDMC_CD_N)
-#define TROUT_GPIO_BANK1_LAST_INT_SOURCE (TROUT_GPIO_SDMC_CD_N)
-
-#define TROUT_GPIO_VIRTUAL_TO_REAL_OFFSET \
- (TROUT_GPIO_INT5_BASE - TROUT_GPIO_VIRTUAL_BASE)
-
-#define TROUT_INT_START (NR_MSM_IRQS + NR_GPIO_IRQS)
-#define TROUT_INT_BANK0_COUNT (8)
-#define TROUT_INT_BANK1_START (TROUT_INT_START + TROUT_INT_BANK0_COUNT)
-#define TROUT_INT_BANK1_COUNT (1)
-#define TROUT_INT_END (TROUT_INT_START + TROUT_INT_BANK0_COUNT + \
- TROUT_INT_BANK1_COUNT - 1)
-#define TROUT_GPIO_TO_INT(n) (((n) <= TROUT_GPIO_BANK0_LAST_INT_SOURCE) ? \
- (TROUT_INT_START - TROUT_GPIO_BANK0_FIRST_INT_SOURCE + (n)) : \
- (TROUT_INT_BANK1_START - TROUT_GPIO_BANK1_FIRST_INT_SOURCE + (n)))
-
-#define TROUT_INT_TO_BANK(n) ((n - TROUT_INT_START) / TROUT_INT_BANK0_COUNT)
-#define TROUT_INT_TO_MASK(n) (1U << ((n - TROUT_INT_START) & 7))
-#define TROUT_BANK_TO_MASK_REG(bank) \
- (bank ? TROUT_GPIO_INT_MASK1_REG : TROUT_GPIO_INT_MASK0_REG)
-#define TROUT_BANK_TO_STAT_REG(bank) \
- (bank ? TROUT_GPIO_INT_STAT1_REG : TROUT_GPIO_INT_STAT0_REG)
-
-#endif /* GUARD */
diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c
deleted file mode 100644
index f5b69d736ee5..000000000000
--- a/arch/arm/mach-msm/clock-pcom.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
-
-#include <mach/clk.h>
-
-#include "proc_comm.h"
-#include "clock.h"
-#include "clock-pcom.h"
-
-struct clk_pcom {
- unsigned id;
- unsigned long flags;
- struct msm_clk msm_clk;
-};
-
-static inline struct clk_pcom *to_clk_pcom(struct clk_hw *hw)
-{
- return container_of(to_msm_clk(hw), struct clk_pcom, msm_clk);
-}
-
-static int pc_clk_enable(struct clk_hw *hw)
-{
- unsigned id = to_clk_pcom(hw)->id;
- int rc = msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL);
- if (rc < 0)
- return rc;
- else
- return (int)id < 0 ? -EINVAL : 0;
-}
-
-static void pc_clk_disable(struct clk_hw *hw)
-{
- unsigned id = to_clk_pcom(hw)->id;
- msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL);
-}
-
-static int pc_clk_reset(struct clk_hw *hw, enum clk_reset_action action)
-{
- int rc;
- unsigned id = to_clk_pcom(hw)->id;
-
- if (action == CLK_RESET_ASSERT)
- rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_ASSERT, &id, NULL);
- else
- rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_DEASSERT, &id, NULL);
-
- if (rc < 0)
- return rc;
- else
- return (int)id < 0 ? -EINVAL : 0;
-}
-
-static int pc_clk_set_rate(struct clk_hw *hw, unsigned long new_rate,
- unsigned long p_rate)
-{
- struct clk_pcom *p = to_clk_pcom(hw);
- unsigned id = p->id, rate = new_rate;
- int rc;
-
- /*
- * The rate _might_ be rounded off to the nearest KHz value by the
- * remote function. So a return value of 0 doesn't necessarily mean
- * that the exact rate was set successfully.
- */
- if (p->flags & CLKFLAG_MIN)
- rc = msm_proc_comm(PCOM_CLKCTL_RPC_MIN_RATE, &id, &rate);
- else
- rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_RATE, &id, &rate);
- if (rc < 0)
- return rc;
- else
- return (int)id < 0 ? -EINVAL : 0;
-}
-
-static unsigned long pc_clk_recalc_rate(struct clk_hw *hw, unsigned long p_rate)
-{
- unsigned id = to_clk_pcom(hw)->id;
- if (msm_proc_comm(PCOM_CLKCTL_RPC_RATE, &id, NULL))
- return 0;
- else
- return id;
-}
-
-static int pc_clk_is_enabled(struct clk_hw *hw)
-{
- unsigned id = to_clk_pcom(hw)->id;
- if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLED, &id, NULL))
- return 0;
- else
- return id;
-}
-
-static long pc_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate)
-{
- /* Not really supported; pc_clk_set_rate() does rounding on it's own. */
- return rate;
-}
-
-static struct clk_ops clk_ops_pcom = {
- .enable = pc_clk_enable,
- .disable = pc_clk_disable,
- .set_rate = pc_clk_set_rate,
- .recalc_rate = pc_clk_recalc_rate,
- .is_enabled = pc_clk_is_enabled,
- .round_rate = pc_clk_round_rate,
-};
-
-static int msm_clock_pcom_probe(struct platform_device *pdev)
-{
- const struct pcom_clk_pdata *pdata = pdev->dev.platform_data;
- int i, ret;
-
- for (i = 0; i < pdata->num_lookups; i++) {
- const struct clk_pcom_desc *desc = &pdata->lookup[i];
- struct clk *c;
- struct clk_pcom *p;
- struct clk_hw *hw;
- struct clk_init_data init;
-
- p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- p->id = desc->id;
- p->flags = desc->flags;
- p->msm_clk.reset = pc_clk_reset;
-
- hw = &p->msm_clk.hw;
- hw->init = &init;
-
- init.name = desc->name;
- init.ops = &clk_ops_pcom;
- init.num_parents = 0;
- init.flags = CLK_IS_ROOT;
-
- if (!(p->flags & CLKFLAG_AUTO_OFF))
- init.flags |= CLK_IGNORE_UNUSED;
-
- c = devm_clk_register(&pdev->dev, hw);
- ret = clk_register_clkdev(c, desc->con, desc->dev);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct platform_driver msm_clock_pcom_driver = {
- .probe = msm_clock_pcom_probe,
- .driver = {
- .name = "msm-clock-pcom",
- },
-};
-module_platform_driver(msm_clock_pcom_driver);
-
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/clock-pcom.h b/arch/arm/mach-msm/clock-pcom.h
deleted file mode 100644
index 5bb164fd46a8..000000000000
--- a/arch/arm/mach-msm/clock-pcom.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ARCH_ARM_MACH_MSM_CLOCK_PCOM_H
-#define __ARCH_ARM_MACH_MSM_CLOCK_PCOM_H
-
-/* clock IDs used by the modem processor */
-
-#define P_ACPU_CLK 0 /* Applications processor clock */
-#define P_ADM_CLK 1 /* Applications data mover clock */
-#define P_ADSP_CLK 2 /* ADSP clock */
-#define P_EBI1_CLK 3 /* External bus interface 1 clock */
-#define P_EBI2_CLK 4 /* External bus interface 2 clock */
-#define P_ECODEC_CLK 5 /* External CODEC clock */
-#define P_EMDH_CLK 6 /* External MDDI host clock */
-#define P_GP_CLK 7 /* General purpose clock */
-#define P_GRP_3D_CLK 8 /* Graphics clock */
-#define P_I2C_CLK 9 /* I2C clock */
-#define P_ICODEC_RX_CLK 10 /* Internal CODEX RX clock */
-#define P_ICODEC_TX_CLK 11 /* Internal CODEX TX clock */
-#define P_IMEM_CLK 12 /* Internal graphics memory clock */
-#define P_MDC_CLK 13 /* MDDI client clock */
-#define P_MDP_CLK 14 /* Mobile display processor clock */
-#define P_PBUS_CLK 15 /* Peripheral bus clock */
-#define P_PCM_CLK 16 /* PCM clock */
-#define P_PMDH_CLK 17 /* Primary MDDI host clock */
-#define P_SDAC_CLK 18 /* Stereo DAC clock */
-#define P_SDC1_CLK 19 /* Secure Digital Card clocks */
-#define P_SDC1_P_CLK 20
-#define P_SDC2_CLK 21
-#define P_SDC2_P_CLK 22
-#define P_SDC3_CLK 23
-#define P_SDC3_P_CLK 24
-#define P_SDC4_CLK 25
-#define P_SDC4_P_CLK 26
-#define P_TSIF_CLK 27 /* Transport Stream Interface clocks */
-#define P_TSIF_REF_CLK 28
-#define P_TV_DAC_CLK 29 /* TV clocks */
-#define P_TV_ENC_CLK 30
-#define P_UART1_CLK 31 /* UART clocks */
-#define P_UART2_CLK 32
-#define P_UART3_CLK 33
-#define P_UART1DM_CLK 34
-#define P_UART2DM_CLK 35
-#define P_USB_HS_CLK 36 /* High speed USB core clock */
-#define P_USB_HS_P_CLK 37 /* High speed USB pbus clock */
-#define P_USB_OTG_CLK 38 /* Full speed USB clock */
-#define P_VDC_CLK 39 /* Video controller clock */
-#define P_VFE_MDC_CLK 40 /* Camera / Video Front End clock */
-#define P_VFE_CLK 41 /* VFE MDDI client clock */
-#define P_MDP_LCDC_PCLK_CLK 42
-#define P_MDP_LCDC_PAD_PCLK_CLK 43
-#define P_MDP_VSYNC_CLK 44
-#define P_SPI_CLK 45
-#define P_VFE_AXI_CLK 46
-#define P_USB_HS2_CLK 47 /* High speed USB 2 core clock */
-#define P_USB_HS2_P_CLK 48 /* High speed USB 2 pbus clock */
-#define P_USB_HS3_CLK 49 /* High speed USB 3 core clock */
-#define P_USB_HS3_P_CLK 50 /* High speed USB 3 pbus clock */
-#define P_GRP_3D_P_CLK 51 /* Graphics pbus clock */
-#define P_USB_PHY_CLK 52 /* USB PHY clock */
-#define P_USB_HS_CORE_CLK 53 /* High speed USB 1 core clock */
-#define P_USB_HS2_CORE_CLK 54 /* High speed USB 2 core clock */
-#define P_USB_HS3_CORE_CLK 55 /* High speed USB 3 core clock */
-#define P_CAM_M_CLK 56
-#define P_CAMIF_PAD_P_CLK 57
-#define P_GRP_2D_CLK 58
-#define P_GRP_2D_P_CLK 59
-#define P_I2S_CLK 60
-#define P_JPEG_CLK 61
-#define P_JPEG_P_CLK 62
-#define P_LPA_CODEC_CLK 63
-#define P_LPA_CORE_CLK 64
-#define P_LPA_P_CLK 65
-#define P_MDC_IO_CLK 66
-#define P_MDC_P_CLK 67
-#define P_MFC_CLK 68
-#define P_MFC_DIV2_CLK 69
-#define P_MFC_P_CLK 70
-#define P_QUP_I2C_CLK 71
-#define P_ROTATOR_IMEM_CLK 72
-#define P_ROTATOR_P_CLK 73
-#define P_VFE_CAMIF_CLK 74
-#define P_VFE_P_CLK 75
-#define P_VPE_CLK 76
-#define P_I2C_2_CLK 77
-#define P_MI2S_CODEC_RX_S_CLK 78
-#define P_MI2S_CODEC_RX_M_CLK 79
-#define P_MI2S_CODEC_TX_S_CLK 80
-#define P_MI2S_CODEC_TX_M_CLK 81
-#define P_PMDH_P_CLK 82
-#define P_EMDH_P_CLK 83
-#define P_SPI_P_CLK 84
-#define P_TSIF_P_CLK 85
-#define P_MDP_P_CLK 86
-#define P_SDAC_M_CLK 87
-#define P_MI2S_S_CLK 88
-#define P_MI2S_M_CLK 89
-#define P_AXI_ROTATOR_CLK 90
-#define P_HDMI_CLK 91
-#define P_CSI0_CLK 92
-#define P_CSI0_VFE_CLK 93
-#define P_CSI0_P_CLK 94
-#define P_CSI1_CLK 95
-#define P_CSI1_VFE_CLK 96
-#define P_CSI1_P_CLK 97
-#define P_GSBI_CLK 98
-#define P_GSBI_P_CLK 99
-#define P_CE_CLK 100 /* Crypto engine */
-#define P_CODEC_SSBI_CLK 101
-
-#define P_NR_CLKS 102
-
-struct clk_pcom_desc {
- unsigned id;
- const char *name;
- const char *con;
- const char *dev;
- unsigned long flags;
-};
-
-struct pcom_clk_pdata {
- struct clk_pcom_desc *lookup;
- u32 num_lookups;
-};
-
-#define CLK_PCOM(clk_name, clk_id, clk_dev, clk_flags) { \
- .id = P_##clk_id, \
- .name = #clk_id, \
- .con = clk_name, \
- .dev = clk_dev, \
- .flags = clk_flags, \
- }
-
-#endif
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
deleted file mode 100644
index 35ea02b52483..000000000000
--- a/arch/arm/mach-msm/clock.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/* arch/arm/mach-msm/clock.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/clk-provider.h>
-#include <linux/module.h>
-
-#include "clock.h"
-
-int clk_reset(struct clk *clk, enum clk_reset_action action)
-{
- struct clk_hw *hw = __clk_get_hw(clk);
- struct msm_clk *m = to_msm_clk(hw);
- return m->reset(hw, action);
-}
-EXPORT_SYMBOL(clk_reset);
diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h
deleted file mode 100644
index 42d29dd7aafc..000000000000
--- a/arch/arm/mach-msm/clock.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* arch/arm/mach-msm/clock.h
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ARCH_ARM_MACH_MSM_CLOCK_H
-#define __ARCH_ARM_MACH_MSM_CLOCK_H
-
-#include <linux/clk-provider.h>
-#include <mach/clk.h>
-
-#define CLK_FIRST_AVAILABLE_FLAG 0x00000100
-#define CLKFLAG_AUTO_OFF 0x00000200
-#define CLKFLAG_MIN 0x00000400
-#define CLKFLAG_MAX 0x00000800
-
-#define OFF CLKFLAG_AUTO_OFF
-#define CLK_MIN CLKFLAG_MIN
-#define CLK_MAX CLKFLAG_MAX
-#define CLK_MINMAX (CLK_MIN | CLK_MAX)
-
-struct msm_clk {
- int (*reset)(struct clk_hw *hw, enum clk_reset_action action);
- struct clk_hw hw;
-};
-
-static inline struct msm_clk *to_msm_clk(struct clk_hw *hw)
-{
- return container_of(hw, struct msm_clk, hw);
-}
-
-#endif
diff --git a/arch/arm/mach-msm/common.h b/arch/arm/mach-msm/common.h
deleted file mode 100644
index 572479a3c7be..000000000000
--- a/arch/arm/mach-msm/common.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __MACH_COMMON_H
-#define __MACH_COMMON_H
-
-extern void msm7x01_timer_init(void);
-extern void msm7x30_timer_init(void);
-extern void qsd8x50_timer_init(void);
-
-extern void msm_map_common_io(void);
-extern void msm_map_msm7x30_io(void);
-extern void msm_map_qsd8x50_io(void);
-
-extern void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size,
- unsigned int mtype, void *caller);
-
-struct msm_mmc_platform_data;
-
-extern void msm_add_devices(void);
-extern void msm_init_irq(void);
-extern void msm_init_gpio(void);
-extern int msm_add_sdcc(unsigned int controller,
- struct msm_mmc_platform_data *plat,
- unsigned int stat_irq, unsigned long stat_irq_flags);
-
-#if defined(CONFIG_MSM_SMD) && defined(CONFIG_DEBUG_FS)
-extern int smd_debugfs_init(void);
-#else
-static inline int smd_debugfs_init(void) { return 0; }
-#endif
-
-#endif
diff --git a/arch/arm/mach-msm/devices-msm7x00.c b/arch/arm/mach-msm/devices-msm7x00.c
deleted file mode 100644
index d83404d4b328..000000000000
--- a/arch/arm/mach-msm/devices-msm7x00.c
+++ /dev/null
@@ -1,480 +0,0 @@
-/* linux/arch/arm/mach-msm/devices.c
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/clkdev.h>
-
-#include <mach/irqs.h>
-#include <mach/msm_iomap.h>
-#include "devices.h"
-
-#include <asm/mach/flash.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-
-#include "clock.h"
-#include "clock-pcom.h"
-#include <linux/platform_data/mmc-msm_sdcc.h>
-
-static struct resource msm_gpio_resources[] = {
- {
- .start = 32 + 0,
- .end = 32 + 0,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = 32 + 1,
- .end = 32 + 1,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = 0xa9200800,
- .end = 0xa9200800 + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- .name = "gpio1"
- },
- {
- .start = 0xa9300C00,
- .end = 0xa9300C00 + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- .name = "gpio2"
- },
-};
-
-struct platform_device msm_device_gpio_7201 = {
- .name = "gpio-msm-7201",
- .num_resources = ARRAY_SIZE(msm_gpio_resources),
- .resource = msm_gpio_resources,
-};
-
-static struct resource resources_uart1[] = {
- {
- .start = INT_UART1,
- .end = INT_UART1,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = MSM_UART1_PHYS,
- .end = MSM_UART1_PHYS + MSM_UART1_SIZE - 1,
- .flags = IORESOURCE_MEM,
- .name = "uart_resource"
- },
-};
-
-static struct resource resources_uart2[] = {
- {
- .start = INT_UART2,
- .end = INT_UART2,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = MSM_UART2_PHYS,
- .end = MSM_UART2_PHYS + MSM_UART2_SIZE - 1,
- .flags = IORESOURCE_MEM,
- .name = "uart_resource"
- },
-};
-
-static struct resource resources_uart3[] = {
- {
- .start = INT_UART3,
- .end = INT_UART3,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = MSM_UART3_PHYS,
- .end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
- .flags = IORESOURCE_MEM,
- .name = "uart_resource"
- },
-};
-
-struct platform_device msm_device_uart1 = {
- .name = "msm_serial",
- .id = 0,
- .num_resources = ARRAY_SIZE(resources_uart1),
- .resource = resources_uart1,
-};
-
-struct platform_device msm_device_uart2 = {
- .name = "msm_serial",
- .id = 1,
- .num_resources = ARRAY_SIZE(resources_uart2),
- .resource = resources_uart2,
-};
-
-struct platform_device msm_device_uart3 = {
- .name = "msm_serial",
- .id = 2,
- .num_resources = ARRAY_SIZE(resources_uart3),
- .resource = resources_uart3,
-};
-
-static struct resource resources_i2c[] = {
- {
- .start = MSM_I2C_PHYS,
- .end = MSM_I2C_PHYS + MSM_I2C_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_PWB_I2C,
- .end = INT_PWB_I2C,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_i2c = {
- .name = "msm_i2c",
- .id = 0,
- .num_resources = ARRAY_SIZE(resources_i2c),
- .resource = resources_i2c,
-};
-
-static struct resource resources_hsusb[] = {
- {
- .start = MSM_HSUSB_PHYS,
- .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_USB_HS,
- .end = INT_USB_HS,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_hsusb = {
- .name = "msm_hsusb",
- .id = -1,
- .num_resources = ARRAY_SIZE(resources_hsusb),
- .resource = resources_hsusb,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-struct flash_platform_data msm_nand_data = {
- .parts = NULL,
- .nr_parts = 0,
-};
-
-static struct resource resources_nand[] = {
- [0] = {
- .start = 7,
- .end = 7,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device msm_device_nand = {
- .name = "msm_nand",
- .id = -1,
- .num_resources = ARRAY_SIZE(resources_nand),
- .resource = resources_nand,
- .dev = {
- .platform_data = &msm_nand_data,
- },
-};
-
-struct platform_device msm_device_smd = {
- .name = "msm_smd",
- .id = -1,
-};
-
-static struct resource resources_sdc1[] = {
- {
- .start = MSM_SDC1_PHYS,
- .end = MSM_SDC1_PHYS + MSM_SDC1_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_SDC1_0,
- .end = INT_SDC1_0,
- .flags = IORESOURCE_IRQ,
- .name = "cmd_irq",
- },
- {
- .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
- .name = "status_irq"
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_DMA,
- },
-};
-
-static struct resource resources_sdc2[] = {
- {
- .start = MSM_SDC2_PHYS,
- .end = MSM_SDC2_PHYS + MSM_SDC2_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_SDC2_0,
- .end = INT_SDC2_0,
- .flags = IORESOURCE_IRQ,
- .name = "cmd_irq",
- },
- {
- .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
- .name = "status_irq"
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_DMA,
- },
-};
-
-static struct resource resources_sdc3[] = {
- {
- .start = MSM_SDC3_PHYS,
- .end = MSM_SDC3_PHYS + MSM_SDC3_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_SDC3_0,
- .end = INT_SDC3_0,
- .flags = IORESOURCE_IRQ,
- .name = "cmd_irq",
- },
- {
- .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
- .name = "status_irq"
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_DMA,
- },
-};
-
-static struct resource resources_sdc4[] = {
- {
- .start = MSM_SDC4_PHYS,
- .end = MSM_SDC4_PHYS + MSM_SDC4_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_SDC4_0,
- .end = INT_SDC4_0,
- .flags = IORESOURCE_IRQ,
- .name = "cmd_irq",
- },
- {
- .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
- .name = "status_irq"
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device msm_device_sdc1 = {
- .name = "msm_sdcc",
- .id = 1,
- .num_resources = ARRAY_SIZE(resources_sdc1),
- .resource = resources_sdc1,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-struct platform_device msm_device_sdc2 = {
- .name = "msm_sdcc",
- .id = 2,
- .num_resources = ARRAY_SIZE(resources_sdc2),
- .resource = resources_sdc2,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-struct platform_device msm_device_sdc3 = {
- .name = "msm_sdcc",
- .id = 3,
- .num_resources = ARRAY_SIZE(resources_sdc3),
- .resource = resources_sdc3,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-struct platform_device msm_device_sdc4 = {
- .name = "msm_sdcc",
- .id = 4,
- .num_resources = ARRAY_SIZE(resources_sdc4),
- .resource = resources_sdc4,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static struct platform_device *msm_sdcc_devices[] __initdata = {
- &msm_device_sdc1,
- &msm_device_sdc2,
- &msm_device_sdc3,
- &msm_device_sdc4,
-};
-
-int __init msm_add_sdcc(unsigned int controller,
- struct msm_mmc_platform_data *plat,
- unsigned int stat_irq, unsigned long stat_irq_flags)
-{
- struct platform_device *pdev;
- struct resource *res;
-
- if (controller < 1 || controller > 4)
- return -EINVAL;
-
- pdev = msm_sdcc_devices[controller-1];
- pdev->dev.platform_data = plat;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq");
- if (!res)
- return -EINVAL;
- else if (stat_irq) {
- res->start = res->end = stat_irq;
- res->flags &= ~IORESOURCE_DISABLED;
- res->flags |= stat_irq_flags;
- }
-
- return platform_device_register(pdev);
-}
-
-static struct resource resources_mddi0[] = {
- {
- .start = MSM_PMDH_PHYS,
- .end = MSM_PMDH_PHYS + MSM_PMDH_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_MDDI_PRI,
- .end = INT_MDDI_PRI,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource resources_mddi1[] = {
- {
- .start = MSM_EMDH_PHYS,
- .end = MSM_EMDH_PHYS + MSM_EMDH_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_MDDI_EXT,
- .end = INT_MDDI_EXT,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_mddi0 = {
- .name = "msm_mddi",
- .id = 0,
- .num_resources = ARRAY_SIZE(resources_mddi0),
- .resource = resources_mddi0,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-struct platform_device msm_device_mddi1 = {
- .name = "msm_mddi",
- .id = 1,
- .num_resources = ARRAY_SIZE(resources_mddi1),
- .resource = resources_mddi1,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static struct resource resources_mdp[] = {
- {
- .start = MSM_MDP_PHYS,
- .end = MSM_MDP_PHYS + MSM_MDP_SIZE - 1,
- .name = "mdp",
- .flags = IORESOURCE_MEM
- },
- {
- .start = INT_MDP,
- .end = INT_MDP,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_mdp = {
- .name = "msm_mdp",
- .id = 0,
- .num_resources = ARRAY_SIZE(resources_mdp),
- .resource = resources_mdp,
-};
-
-static struct clk_pcom_desc msm_clocks_7x01a[] = {
- CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
- CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
- CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, 0),
- CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
- CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
- CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF),
- CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
- CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, OFF),
- CLK_PCOM("i2c_clk", I2C_CLK, "msm_i2c.0", 0),
- CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
- CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
- CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
- CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
- CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
- CLK_PCOM("pbus_clk", PBUS_CLK, NULL, 0),
- CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
- CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
- CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
- CLK_PCOM("sdc_clk", SDC1_CLK, "msm_sdcc.1", OFF),
- CLK_PCOM("sdc_pclk", SDC1_P_CLK, "msm_sdcc.1", OFF),
- CLK_PCOM("sdc_clk", SDC2_CLK, "msm_sdcc.2", OFF),
- CLK_PCOM("sdc_pclk", SDC2_P_CLK, "msm_sdcc.2", OFF),
- CLK_PCOM("sdc_clk", SDC3_CLK, "msm_sdcc.3", OFF),
- CLK_PCOM("sdc_pclk", SDC3_P_CLK, "msm_sdcc.3", OFF),
- CLK_PCOM("sdc_clk", SDC4_CLK, "msm_sdcc.4", OFF),
- CLK_PCOM("sdc_pclk", SDC4_P_CLK, "msm_sdcc.4", OFF),
- CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
- CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
- CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
- CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLK_PCOM("core", UART1_CLK, "msm_serial.0", OFF),
- CLK_PCOM("core", UART2_CLK, "msm_serial.1", 0),
- CLK_PCOM("core", UART3_CLK, "msm_serial.2", OFF),
- CLK_PCOM("uart1dm_clk", UART1DM_CLK, NULL, OFF),
- CLK_PCOM("uart2dm_clk", UART2DM_CLK, NULL, 0),
- CLK_PCOM("usb_hs_clk", USB_HS_CLK, "msm_hsusb", OFF),
- CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, "msm_hsusb", OFF),
- CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
- CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF ),
- CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
- CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
-};
-
-static struct pcom_clk_pdata msm_clock_7x01a_pdata = {
- .lookup = msm_clocks_7x01a,
- .num_lookups = ARRAY_SIZE(msm_clocks_7x01a),
-};
-
-struct platform_device msm_clock_7x01a = {
- .name = "msm-clock-pcom",
- .dev.platform_data = &msm_clock_7x01a_pdata,
-};
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
deleted file mode 100644
index c15ea8ab20a7..000000000000
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/clkdev.h>
-#include <mach/irqs.h>
-#include <mach/msm_iomap.h>
-#include <mach/dma.h>
-
-#include "devices.h"
-#include "smd_private.h"
-#include "common.h"
-
-#include <asm/mach/flash.h>
-
-#include "clock.h"
-#include "clock-pcom.h"
-
-#include <linux/platform_data/mmc-msm_sdcc.h>
-
-static struct resource msm_gpio_resources[] = {
- {
- .start = 32 + 18,
- .end = 32 + 18,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = 32 + 19,
- .end = 32 + 19,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = 0xac001000,
- .end = 0xac001000 + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- .name = "gpio1"
- },
- {
- .start = 0xac101400,
- .end = 0xac101400 + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- .name = "gpio2"
- },
-};
-
-struct platform_device msm_device_gpio_7x30 = {
- .name = "gpio-msm-7x30",
- .num_resources = ARRAY_SIZE(msm_gpio_resources),
- .resource = msm_gpio_resources,
-};
-
-static struct resource resources_uart2[] = {
- {
- .start = INT_UART2,
- .end = INT_UART2,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = MSM_UART2_PHYS,
- .end = MSM_UART2_PHYS + MSM_UART2_SIZE - 1,
- .flags = IORESOURCE_MEM,
- .name = "uart_resource"
- },
-};
-
-struct platform_device msm_device_uart2 = {
- .name = "msm_serial",
- .id = 1,
- .num_resources = ARRAY_SIZE(resources_uart2),
- .resource = resources_uart2,
-};
-
-struct platform_device msm_device_smd = {
- .name = "msm_smd",
- .id = -1,
-};
-
-static struct resource resources_otg[] = {
- {
- .start = MSM_HSUSB_PHYS,
- .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_USB_HS,
- .end = INT_USB_HS,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_otg = {
- .name = "msm_otg",
- .id = -1,
- .num_resources = ARRAY_SIZE(resources_otg),
- .resource = resources_otg,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static struct resource resources_hsusb[] = {
- {
- .start = MSM_HSUSB_PHYS,
- .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_USB_HS,
- .end = INT_USB_HS,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_hsusb = {
- .name = "msm_hsusb",
- .id = -1,
- .num_resources = ARRAY_SIZE(resources_hsusb),
- .resource = resources_hsusb,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static u64 dma_mask = 0xffffffffULL;
-static struct resource resources_hsusb_host[] = {
- {
- .start = MSM_HSUSB_PHYS,
- .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_USB_HS,
- .end = INT_USB_HS,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_hsusb_host = {
- .name = "msm_hsusb_host",
- .id = -1,
- .num_resources = ARRAY_SIZE(resources_hsusb_host),
- .resource = resources_hsusb_host,
- .dev = {
- .dma_mask = &dma_mask,
- .coherent_dma_mask = 0xffffffffULL,
- },
-};
-
-static struct clk_pcom_desc msm_clocks_7x30[] = {
- CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
- CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
- CLK_PCOM("cam_m_clk", CAM_M_CLK, NULL, 0),
- CLK_PCOM("camif_pad_pclk", CAMIF_PAD_P_CLK, NULL, OFF),
- CLK_PCOM("ce_clk", CE_CLK, NULL, 0),
- CLK_PCOM("codec_ssbi_clk", CODEC_SSBI_CLK, NULL, 0),
- CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
- CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
- CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
- CLK_PCOM("emdh_pclk", EMDH_P_CLK, NULL, OFF),
- CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
- CLK_PCOM("grp_2d_clk", GRP_2D_CLK, NULL, 0),
- CLK_PCOM("grp_2d_pclk", GRP_2D_P_CLK, NULL, 0),
- CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0),
- CLK_PCOM("grp_pclk", GRP_3D_P_CLK, NULL, 0),
- CLK_PCOM("hdmi_clk", HDMI_CLK, NULL, 0),
- CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
- CLK_PCOM("jpeg_clk", JPEG_CLK, NULL, OFF),
- CLK_PCOM("jpeg_pclk", JPEG_P_CLK, NULL, OFF),
- CLK_PCOM("lpa_codec_clk", LPA_CODEC_CLK, NULL, 0),
- CLK_PCOM("lpa_core_clk", LPA_CORE_CLK, NULL, 0),
- CLK_PCOM("lpa_pclk", LPA_P_CLK, NULL, 0),
- CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
- CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
- CLK_PCOM("mddi_pclk", PMDH_P_CLK, NULL, 0),
- CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
- CLK_PCOM("mdp_pclk", MDP_P_CLK, NULL, 0),
- CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
- CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
- CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
- CLK_PCOM("mfc_clk", MFC_CLK, NULL, 0),
- CLK_PCOM("mfc_div2_clk", MFC_DIV2_CLK, NULL, 0),
- CLK_PCOM("mfc_pclk", MFC_P_CLK, NULL, 0),
- CLK_PCOM("mi2s_m_clk", MI2S_M_CLK, NULL, 0),
- CLK_PCOM("mi2s_s_clk", MI2S_S_CLK, NULL, 0),
- CLK_PCOM("mi2s_codec_rx_m_clk", MI2S_CODEC_RX_M_CLK, NULL, 0),
- CLK_PCOM("mi2s_codec_rx_s_clk", MI2S_CODEC_RX_S_CLK, NULL, 0),
- CLK_PCOM("mi2s_codec_tx_m_clk", MI2S_CODEC_TX_M_CLK, NULL, 0),
- CLK_PCOM("mi2s_codec_tx_s_clk", MI2S_CODEC_TX_S_CLK, NULL, 0),
- CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
- CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
- CLK_PCOM("rotator_clk", AXI_ROTATOR_CLK, NULL, 0),
- CLK_PCOM("rotator_imem_clk", ROTATOR_IMEM_CLK, NULL, OFF),
- CLK_PCOM("rotator_pclk", ROTATOR_P_CLK, NULL, OFF),
- CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
- CLK_PCOM("spi_clk", SPI_CLK, NULL, 0),
- CLK_PCOM("spi_pclk", SPI_P_CLK, NULL, 0),
- CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
- CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLK_PCOM("core", UART2_CLK, "msm_serial.1", 0),
- CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
- CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
- CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
- CLK_PCOM("usb_hs_core_clk", USB_HS_CORE_CLK, NULL, OFF),
- CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF),
- CLK_PCOM("usb_hs2_pclk", USB_HS2_P_CLK, NULL, OFF),
- CLK_PCOM("usb_hs2_core_clk", USB_HS2_CORE_CLK, NULL, OFF),
- CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF),
- CLK_PCOM("usb_hs3_pclk", USB_HS3_P_CLK, NULL, OFF),
- CLK_PCOM("usb_hs3_core_clk", USB_HS3_CORE_CLK, NULL, OFF),
- CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
- CLK_PCOM("vfe_camif_clk", VFE_CAMIF_CLK, NULL, 0),
- CLK_PCOM("vfe_clk", VFE_CLK, NULL, 0),
- CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, 0),
- CLK_PCOM("vfe_pclk", VFE_P_CLK, NULL, OFF),
- CLK_PCOM("vpe_clk", VPE_CLK, NULL, 0),
-
- /* 7x30 v2 hardware only. */
- CLK_PCOM("csi_clk", CSI0_CLK, NULL, 0),
- CLK_PCOM("csi_pclk", CSI0_P_CLK, NULL, 0),
- CLK_PCOM("csi_vfe_clk", CSI0_VFE_CLK, NULL, 0),
-};
-
-static struct pcom_clk_pdata msm_clock_7x30_pdata = {
- .lookup = msm_clocks_7x30,
- .num_lookups = ARRAY_SIZE(msm_clocks_7x30),
-};
-
-struct platform_device msm_clock_7x30 = {
- .name = "msm-clock-pcom",
- .dev.platform_data = &msm_clock_7x30_pdata,
-};
diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c
deleted file mode 100644
index 9e1e9ce07b1a..000000000000
--- a/arch/arm/mach-msm/devices-qsd8x50.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/clkdev.h>
-#include <linux/dma-mapping.h>
-
-#include <mach/irqs.h>
-#include <mach/msm_iomap.h>
-#include <mach/dma.h>
-
-#include "devices.h"
-#include "common.h"
-
-#include <asm/mach/flash.h>
-
-#include <linux/platform_data/mmc-msm_sdcc.h>
-#include "clock.h"
-#include "clock-pcom.h"
-
-static struct resource msm_gpio_resources[] = {
- {
- .start = 64 + 165 + 9,
- .end = 64 + 165 + 9,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = 64 + 165 + 10,
- .end = 64 + 165 + 10,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = 0xa9000800,
- .end = 0xa9000800 + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- .name = "gpio1"
- },
- {
- .start = 0xa9100C00,
- .end = 0xa9100C00 + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- .name = "gpio2"
- },
-};
-
-struct platform_device msm_device_gpio_8x50 = {
- .name = "gpio-msm-8x50",
- .num_resources = ARRAY_SIZE(msm_gpio_resources),
- .resource = msm_gpio_resources,
-};
-
-static struct resource resources_uart3[] = {
- {
- .start = INT_UART3,
- .end = INT_UART3,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = MSM_UART3_PHYS,
- .end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
- .flags = IORESOURCE_MEM,
- .name = "uart_resource"
- },
-};
-
-struct platform_device msm_device_uart3 = {
- .name = "msm_serial",
- .id = 2,
- .num_resources = ARRAY_SIZE(resources_uart3),
- .resource = resources_uart3,
-};
-
-struct platform_device msm_device_smd = {
- .name = "msm_smd",
- .id = -1,
-};
-
-static struct resource resources_otg[] = {
- {
- .start = MSM_HSUSB_PHYS,
- .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_USB_HS,
- .end = INT_USB_HS,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_otg = {
- .name = "msm_otg",
- .id = -1,
- .num_resources = ARRAY_SIZE(resources_otg),
- .resource = resources_otg,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static struct resource resources_hsusb[] = {
- {
- .start = MSM_HSUSB_PHYS,
- .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_USB_HS,
- .end = INT_USB_HS,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_hsusb = {
- .name = "msm_hsusb",
- .id = -1,
- .num_resources = ARRAY_SIZE(resources_hsusb),
- .resource = resources_hsusb,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static u64 dma_mask = 0xffffffffULL;
-static struct resource resources_hsusb_host[] = {
- {
- .start = MSM_HSUSB_PHYS,
- .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_USB_HS,
- .end = INT_USB_HS,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_hsusb_host = {
- .name = "msm_hsusb_host",
- .id = -1,
- .num_resources = ARRAY_SIZE(resources_hsusb_host),
- .resource = resources_hsusb_host,
- .dev = {
- .dma_mask = &dma_mask,
- .coherent_dma_mask = 0xffffffffULL,
- },
-};
-
-static struct resource resources_sdc1[] = {
- {
- .start = MSM_SDC1_PHYS,
- .end = MSM_SDC1_PHYS + MSM_SDC1_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_SDC1_0,
- .end = INT_SDC1_0,
- .flags = IORESOURCE_IRQ,
- .name = "cmd_irq",
- },
- {
- .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
- .name = "status_irq"
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_DMA,
- },
-};
-
-static struct resource resources_sdc2[] = {
- {
- .start = MSM_SDC2_PHYS,
- .end = MSM_SDC2_PHYS + MSM_SDC2_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_SDC2_0,
- .end = INT_SDC2_0,
- .flags = IORESOURCE_IRQ,
- .name = "cmd_irq",
- },
- {
- .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
- .name = "status_irq"
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_DMA,
- },
-};
-
-static struct resource resources_sdc3[] = {
- {
- .start = MSM_SDC3_PHYS,
- .end = MSM_SDC3_PHYS + MSM_SDC3_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_SDC3_0,
- .end = INT_SDC3_0,
- .flags = IORESOURCE_IRQ,
- .name = "cmd_irq",
- },
- {
- .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
- .name = "status_irq"
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_DMA,
- },
-};
-
-static struct resource resources_sdc4[] = {
- {
- .start = MSM_SDC4_PHYS,
- .end = MSM_SDC4_PHYS + MSM_SDC4_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_SDC4_0,
- .end = INT_SDC4_0,
- .flags = IORESOURCE_IRQ,
- .name = "cmd_irq",
- },
- {
- .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
- .name = "status_irq"
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device msm_device_sdc1 = {
- .name = "msm_sdcc",
- .id = 1,
- .num_resources = ARRAY_SIZE(resources_sdc1),
- .resource = resources_sdc1,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-struct platform_device msm_device_sdc2 = {
- .name = "msm_sdcc",
- .id = 2,
- .num_resources = ARRAY_SIZE(resources_sdc2),
- .resource = resources_sdc2,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-struct platform_device msm_device_sdc3 = {
- .name = "msm_sdcc",
- .id = 3,
- .num_resources = ARRAY_SIZE(resources_sdc3),
- .resource = resources_sdc3,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-struct platform_device msm_device_sdc4 = {
- .name = "msm_sdcc",
- .id = 4,
- .num_resources = ARRAY_SIZE(resources_sdc4),
- .resource = resources_sdc4,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static struct platform_device *msm_sdcc_devices[] __initdata = {
- &msm_device_sdc1,
- &msm_device_sdc2,
- &msm_device_sdc3,
- &msm_device_sdc4,
-};
-
-int __init msm_add_sdcc(unsigned int controller,
- struct msm_mmc_platform_data *plat,
- unsigned int stat_irq, unsigned long stat_irq_flags)
-{
- struct platform_device *pdev;
- struct resource *res;
-
- if (controller < 1 || controller > 4)
- return -EINVAL;
-
- pdev = msm_sdcc_devices[controller-1];
- pdev->dev.platform_data = plat;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq");
- if (!res)
- return -EINVAL;
- else if (stat_irq) {
- res->start = res->end = stat_irq;
- res->flags &= ~IORESOURCE_DISABLED;
- res->flags |= stat_irq_flags;
- }
-
- return platform_device_register(pdev);
-}
-
-static struct clk_pcom_desc msm_clocks_8x50[] = {
- CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
- CLK_PCOM("ce_clk", CE_CLK, NULL, 0),
- CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
- CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
- CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
- CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
- CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
- CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0),
- CLK_PCOM("i2c_clk", I2C_CLK, NULL, 0),
- CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
- CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
- CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
- CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
- CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
- CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
- CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
- CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
- CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
- CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
- CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
- CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
- CLK_PCOM("sdc_clk", SDC1_CLK, "msm_sdcc.1", OFF),
- CLK_PCOM("sdc_pclk", SDC1_P_CLK, "msm_sdcc.1", OFF),
- CLK_PCOM("sdc_clk", SDC2_CLK, "msm_sdcc.2", OFF),
- CLK_PCOM("sdc_pclk", SDC2_P_CLK, "msm_sdcc.2", OFF),
- CLK_PCOM("sdc_clk", SDC3_CLK, "msm_sdcc.3", OFF),
- CLK_PCOM("sdc_pclk", SDC3_P_CLK, "msm_sdcc.3", OFF),
- CLK_PCOM("sdc_clk", SDC4_CLK, "msm_sdcc.4", OFF),
- CLK_PCOM("sdc_pclk", SDC4_P_CLK, "msm_sdcc.4", OFF),
- CLK_PCOM("spi_clk", SPI_CLK, NULL, 0),
- CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
- CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
- CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
- CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLK_PCOM("core", UART1_CLK, NULL, OFF),
- CLK_PCOM("core", UART2_CLK, NULL, 0),
- CLK_PCOM("core", UART3_CLK, "msm_serial.2", OFF),
- CLK_PCOM("uartdm_clk", UART1DM_CLK, NULL, OFF),
- CLK_PCOM("uartdm_clk", UART2DM_CLK, NULL, 0),
- CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
- CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
- CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
- CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
- CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
- CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
- CLK_PCOM("vfe_axi_clk", VFE_AXI_CLK, NULL, OFF),
- CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF),
- CLK_PCOM("usb_hs2_pclk", USB_HS2_P_CLK, NULL, OFF),
- CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF),
- CLK_PCOM("usb_hs3_pclk", USB_HS3_P_CLK, NULL, OFF),
- CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
-};
-
-static struct pcom_clk_pdata msm_clock_8x50_pdata = {
- .lookup = msm_clocks_8x50,
- .num_lookups = ARRAY_SIZE(msm_clocks_8x50),
-};
-
-struct platform_device msm_clock_8x50 = {
- .name = "msm-clock-pcom",
- .dev.platform_data = &msm_clock_8x50_pdata,
-};
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
deleted file mode 100644
index dccefad9f9b9..000000000000
--- a/arch/arm/mach-msm/devices.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* linux/arch/arm/mach-msm/devices.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ARCH_ARM_MACH_MSM_DEVICES_H
-#define __ARCH_ARM_MACH_MSM_DEVICES_H
-
-extern struct platform_device msm_device_gpio_7201;
-extern struct platform_device msm_device_gpio_7x30;
-extern struct platform_device msm_device_gpio_8x50;
-
-extern struct platform_device msm_device_uart1;
-extern struct platform_device msm_device_uart2;
-extern struct platform_device msm_device_uart3;
-
-extern struct platform_device msm8960_device_uart_gsbi2;
-extern struct platform_device msm8960_device_uart_gsbi5;
-
-extern struct platform_device msm_device_sdc1;
-extern struct platform_device msm_device_sdc2;
-extern struct platform_device msm_device_sdc3;
-extern struct platform_device msm_device_sdc4;
-
-extern struct platform_device msm_device_hsusb;
-extern struct platform_device msm_device_otg;
-extern struct platform_device msm_device_hsusb_host;
-
-extern struct platform_device msm_device_i2c;
-
-extern struct platform_device msm_device_smd;
-
-extern struct platform_device msm_device_nand;
-
-extern struct platform_device msm_device_mddi0;
-extern struct platform_device msm_device_mddi1;
-extern struct platform_device msm_device_mdp;
-
-extern struct platform_device msm_clock_7x01a;
-extern struct platform_device msm_clock_7x30;
-extern struct platform_device msm_clock_8x50;
-
-#endif
diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c
deleted file mode 100644
index fb9762464718..000000000000
--- a/arch/arm/mach-msm/dma.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/* linux/arch/arm/mach-msm/dma.c
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <mach/dma.h>
-#include <mach/msm_iomap.h>
-
-#define MSM_DMOV_CHANNEL_COUNT 16
-
-#define DMOV_SD0(off, ch) (MSM_DMOV_BASE + 0x0000 + (off) + ((ch) << 2))
-#define DMOV_SD1(off, ch) (MSM_DMOV_BASE + 0x0400 + (off) + ((ch) << 2))
-#define DMOV_SD2(off, ch) (MSM_DMOV_BASE + 0x0800 + (off) + ((ch) << 2))
-#define DMOV_SD3(off, ch) (MSM_DMOV_BASE + 0x0C00 + (off) + ((ch) << 2))
-
-#if defined(CONFIG_ARCH_MSM7X30)
-#define DMOV_SD_AARM DMOV_SD2
-#else
-#define DMOV_SD_AARM DMOV_SD3
-#endif
-
-#define DMOV_CMD_PTR(ch) DMOV_SD_AARM(0x000, ch)
-#define DMOV_RSLT(ch) DMOV_SD_AARM(0x040, ch)
-#define DMOV_FLUSH0(ch) DMOV_SD_AARM(0x080, ch)
-#define DMOV_FLUSH1(ch) DMOV_SD_AARM(0x0C0, ch)
-#define DMOV_FLUSH2(ch) DMOV_SD_AARM(0x100, ch)
-#define DMOV_FLUSH3(ch) DMOV_SD_AARM(0x140, ch)
-#define DMOV_FLUSH4(ch) DMOV_SD_AARM(0x180, ch)
-#define DMOV_FLUSH5(ch) DMOV_SD_AARM(0x1C0, ch)
-
-#define DMOV_STATUS(ch) DMOV_SD_AARM(0x200, ch)
-#define DMOV_ISR DMOV_SD_AARM(0x380, 0)
-
-#define DMOV_CONFIG(ch) DMOV_SD_AARM(0x300, ch)
-
-enum {
- MSM_DMOV_PRINT_ERRORS = 1,
- MSM_DMOV_PRINT_IO = 2,
- MSM_DMOV_PRINT_FLOW = 4
-};
-
-static DEFINE_SPINLOCK(msm_dmov_lock);
-static struct clk *msm_dmov_clk;
-static unsigned int channel_active;
-static struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
-static struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
-unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
-
-#define MSM_DMOV_DPRINTF(mask, format, args...) \
- do { \
- if ((mask) & msm_dmov_print_mask) \
- printk(KERN_ERR format, args); \
- } while (0)
-#define PRINT_ERROR(format, args...) \
- MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
-#define PRINT_IO(format, args...) \
- MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
-#define PRINT_FLOW(format, args...) \
- MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
-
-void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful)
-{
- writel((graceful << 31), DMOV_FLUSH0(id));
-}
-EXPORT_SYMBOL_GPL(msm_dmov_stop_cmd);
-
-void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
-{
- unsigned long irq_flags;
- unsigned int status;
-
- spin_lock_irqsave(&msm_dmov_lock, irq_flags);
- if (!channel_active)
- clk_enable(msm_dmov_clk);
- dsb();
- status = readl(DMOV_STATUS(id));
- if (list_empty(&ready_commands[id]) &&
- (status & DMOV_STATUS_CMD_PTR_RDY)) {
-#if 0
- if (list_empty(&active_commands[id])) {
- PRINT_FLOW("msm_dmov_enqueue_cmd(%d), enable interrupt\n", id);
- writel(DMOV_CONFIG_IRQ_EN, DMOV_CONFIG(id));
- }
-#endif
- if (cmd->execute_func)
- cmd->execute_func(cmd);
- PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n", id, status);
- list_add_tail(&cmd->list, &active_commands[id]);
- if (!channel_active)
- enable_irq(INT_ADM_AARM);
- channel_active |= 1U << id;
- writel(cmd->cmdptr, DMOV_CMD_PTR(id));
- } else {
- if (!channel_active)
- clk_disable(msm_dmov_clk);
- if (list_empty(&active_commands[id]))
- PRINT_ERROR("msm_dmov_enqueue_cmd(%d), error datamover stalled, status %x\n", id, status);
-
- PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status %x\n", id, status);
- list_add_tail(&cmd->list, &ready_commands[id]);
- }
- spin_unlock_irqrestore(&msm_dmov_lock, irq_flags);
-}
-EXPORT_SYMBOL_GPL(msm_dmov_enqueue_cmd);
-
-struct msm_dmov_exec_cmdptr_cmd {
- struct msm_dmov_cmd dmov_cmd;
- struct completion complete;
- unsigned id;
- unsigned int result;
- struct msm_dmov_errdata err;
-};
-
-static void
-dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
- unsigned int result,
- struct msm_dmov_errdata *err)
-{
- struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd);
- cmd->result = result;
- if (result != 0x80000002 && err)
- memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata));
-
- complete(&cmd->complete);
-}
-
-int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
-{
- struct msm_dmov_exec_cmdptr_cmd cmd;
-
- PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
-
- cmd.dmov_cmd.cmdptr = cmdptr;
- cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
- cmd.dmov_cmd.execute_func = NULL;
- cmd.id = id;
- init_completion(&cmd.complete);
-
- msm_dmov_enqueue_cmd(id, &cmd.dmov_cmd);
- wait_for_completion(&cmd.complete);
-
- if (cmd.result != 0x80000002) {
- PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result);
- PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
- id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]);
- return -EIO;
- }
- PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
- return 0;
-}
-
-
-static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
-{
- unsigned int int_status, mask, id;
- unsigned long irq_flags;
- unsigned int ch_status;
- unsigned int ch_result;
- struct msm_dmov_cmd *cmd;
-
- spin_lock_irqsave(&msm_dmov_lock, irq_flags);
-
- int_status = readl(DMOV_ISR); /* read and clear interrupt */
- PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
-
- while (int_status) {
- mask = int_status & -int_status;
- id = fls(mask) - 1;
- PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status, mask, id);
- int_status &= ~mask;
- ch_status = readl(DMOV_STATUS(id));
- if (!(ch_status & DMOV_STATUS_RSLT_VALID)) {
- PRINT_FLOW("msm_datamover_irq_handler id %d, result not valid %x\n", id, ch_status);
- continue;
- }
- do {
- ch_result = readl(DMOV_RSLT(id));
- if (list_empty(&active_commands[id])) {
- PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
- "with no active command, status %x, result %x\n",
- id, ch_status, ch_result);
- cmd = NULL;
- } else
- cmd = list_entry(active_commands[id].next, typeof(*cmd), list);
- PRINT_FLOW("msm_datamover_irq_handler id %d, status %x, result %x\n", id, ch_status, ch_result);
- if (ch_result & DMOV_RSLT_DONE) {
- PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
- id, ch_status);
- PRINT_IO("msm_datamover_irq_handler id %d, got result "
- "for %p, result %x\n", id, cmd, ch_result);
- if (cmd) {
- list_del(&cmd->list);
- dsb();
- cmd->complete_func(cmd, ch_result, NULL);
- }
- }
- if (ch_result & DMOV_RSLT_FLUSH) {
- struct msm_dmov_errdata errdata;
-
- errdata.flush[0] = readl(DMOV_FLUSH0(id));
- errdata.flush[1] = readl(DMOV_FLUSH1(id));
- errdata.flush[2] = readl(DMOV_FLUSH2(id));
- errdata.flush[3] = readl(DMOV_FLUSH3(id));
- errdata.flush[4] = readl(DMOV_FLUSH4(id));
- errdata.flush[5] = readl(DMOV_FLUSH5(id));
- PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
- PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
- if (cmd) {
- list_del(&cmd->list);
- dsb();
- cmd->complete_func(cmd, ch_result, &errdata);
- }
- }
- if (ch_result & DMOV_RSLT_ERROR) {
- struct msm_dmov_errdata errdata;
-
- errdata.flush[0] = readl(DMOV_FLUSH0(id));
- errdata.flush[1] = readl(DMOV_FLUSH1(id));
- errdata.flush[2] = readl(DMOV_FLUSH2(id));
- errdata.flush[3] = readl(DMOV_FLUSH3(id));
- errdata.flush[4] = readl(DMOV_FLUSH4(id));
- errdata.flush[5] = readl(DMOV_FLUSH5(id));
-
- PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
- PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
- if (cmd) {
- list_del(&cmd->list);
- dsb();
- cmd->complete_func(cmd, ch_result, &errdata);
- }
- /* this does not seem to work, once we get an error */
- /* the datamover will no longer accept commands */
- writel(0, DMOV_FLUSH0(id));
- }
- ch_status = readl(DMOV_STATUS(id));
- PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
- if ((ch_status & DMOV_STATUS_CMD_PTR_RDY) && !list_empty(&ready_commands[id])) {
- cmd = list_entry(ready_commands[id].next, typeof(*cmd), list);
- list_move_tail(&cmd->list, &active_commands[id]);
- if (cmd->execute_func)
- cmd->execute_func(cmd);
- PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
- writel(cmd->cmdptr, DMOV_CMD_PTR(id));
- }
- } while (ch_status & DMOV_STATUS_RSLT_VALID);
- if (list_empty(&active_commands[id]) && list_empty(&ready_commands[id]))
- channel_active &= ~(1U << id);
- PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
- }
-
- if (!channel_active) {
- disable_irq_nosync(INT_ADM_AARM);
- clk_disable(msm_dmov_clk);
- }
-
- spin_unlock_irqrestore(&msm_dmov_lock, irq_flags);
- return IRQ_HANDLED;
-}
-
-static int __init msm_init_datamover(void)
-{
- int i;
- int ret;
- struct clk *clk;
-
- for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
- INIT_LIST_HEAD(&ready_commands[i]);
- INIT_LIST_HEAD(&active_commands[i]);
- writel(DMOV_CONFIG_IRQ_EN | DMOV_CONFIG_FORCE_TOP_PTR_RSLT | DMOV_CONFIG_FORCE_FLUSH_RSLT, DMOV_CONFIG(i));
- }
- clk = clk_get(NULL, "adm_clk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
- clk_prepare(clk);
- msm_dmov_clk = clk;
- ret = request_irq(INT_ADM_AARM, msm_datamover_irq_handler, 0, "msmdatamover", NULL);
- if (ret)
- return ret;
- disable_irq(INT_ADM_AARM);
- return 0;
-}
-module_init(msm_init_datamover);
diff --git a/arch/arm/mach-msm/gpiomux-8x50.c b/arch/arm/mach-msm/gpiomux-8x50.c
deleted file mode 100644
index f7a4ea593c95..000000000000
--- a/arch/arm/mach-msm/gpiomux-8x50.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include "gpiomux.h"
-
-#if defined(CONFIG_MMC_MSM) || defined(CONFIG_MMC_MSM_MODULE)
- #define SDCC_DAT_0_3_CMD_ACTV_CFG (GPIOMUX_VALID | GPIOMUX_PULL_UP\
- | GPIOMUX_FUNC_1 | GPIOMUX_DRV_8MA)
- #define SDCC_CLK_ACTV_CFG (GPIOMUX_VALID | GPIOMUX_PULL_NONE\
- | GPIOMUX_FUNC_1 | GPIOMUX_DRV_8MA)
-#else
- #define SDCC_DAT_0_3_CMD_ACTV_CFG 0
- #define SDCC_CLK_ACTV_CFG 0
-#endif
-
-#define SDC1_SUSPEND_CONFIG (GPIOMUX_VALID | GPIOMUX_PULL_DOWN\
- | GPIOMUX_FUNC_GPIO | GPIOMUX_DRV_2MA)
-
-struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS] = {
- [86] = { /* UART3 RX */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_1 | GPIOMUX_VALID,
- },
- [87] = { /* UART3 TX */
- .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
- GPIOMUX_FUNC_1 | GPIOMUX_VALID,
- },
- /* SDC1 data[3:0] & CMD */
- [51 ... 55] = {
- .active = SDCC_DAT_0_3_CMD_ACTV_CFG,
- .suspended = SDC1_SUSPEND_CONFIG
- },
- /* SDC1 CLK */
- [56] = {
- .active = SDCC_CLK_ACTV_CFG,
- .suspended = SDC1_SUSPEND_CONFIG
- },
-};
diff --git a/arch/arm/mach-msm/gpiomux-v1.h b/arch/arm/mach-msm/gpiomux-v1.h
deleted file mode 100644
index 71d86feba450..000000000000
--- a/arch/arm/mach-msm/gpiomux-v1.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#ifndef __ARCH_ARM_MACH_MSM_GPIOMUX_V1_H
-#define __ARCH_ARM_MACH_MSM_GPIOMUX_V1_H
-
-#if defined(CONFIG_ARCH_MSM7X30)
-#define GPIOMUX_NGPIOS 182
-#elif defined(CONFIG_ARCH_QSD8X50)
-#define GPIOMUX_NGPIOS 165
-#else
-#define GPIOMUX_NGPIOS 133
-#endif
-
-typedef u32 gpiomux_config_t;
-
-enum {
- GPIOMUX_DRV_2MA = 0UL << 17,
- GPIOMUX_DRV_4MA = 1UL << 17,
- GPIOMUX_DRV_6MA = 2UL << 17,
- GPIOMUX_DRV_8MA = 3UL << 17,
- GPIOMUX_DRV_10MA = 4UL << 17,
- GPIOMUX_DRV_12MA = 5UL << 17,
- GPIOMUX_DRV_14MA = 6UL << 17,
- GPIOMUX_DRV_16MA = 7UL << 17,
-};
-
-enum {
- GPIOMUX_FUNC_GPIO = 0UL,
- GPIOMUX_FUNC_1 = 1UL,
- GPIOMUX_FUNC_2 = 2UL,
- GPIOMUX_FUNC_3 = 3UL,
- GPIOMUX_FUNC_4 = 4UL,
- GPIOMUX_FUNC_5 = 5UL,
- GPIOMUX_FUNC_6 = 6UL,
- GPIOMUX_FUNC_7 = 7UL,
- GPIOMUX_FUNC_8 = 8UL,
- GPIOMUX_FUNC_9 = 9UL,
- GPIOMUX_FUNC_A = 10UL,
- GPIOMUX_FUNC_B = 11UL,
- GPIOMUX_FUNC_C = 12UL,
- GPIOMUX_FUNC_D = 13UL,
- GPIOMUX_FUNC_E = 14UL,
- GPIOMUX_FUNC_F = 15UL,
-};
-
-enum {
- GPIOMUX_PULL_NONE = 0UL << 15,
- GPIOMUX_PULL_DOWN = 1UL << 15,
- GPIOMUX_PULL_KEEPER = 2UL << 15,
- GPIOMUX_PULL_UP = 3UL << 15,
-};
-
-#endif
diff --git a/arch/arm/mach-msm/gpiomux.c b/arch/arm/mach-msm/gpiomux.c
deleted file mode 100644
index 2b8e2d217082..000000000000
--- a/arch/arm/mach-msm/gpiomux.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include "gpiomux.h"
-#include "proc_comm.h"
-
-static DEFINE_SPINLOCK(gpiomux_lock);
-
-static void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val)
-{
- unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) |
- ((gpio & 0x3ff) << 4);
- unsigned tlmm_disable = 0;
- int rc;
-
- rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
- &tlmm_config, &tlmm_disable);
- if (rc)
- pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n",
- __func__, rc, tlmm_config, tlmm_disable);
-}
-
-int msm_gpiomux_write(unsigned gpio,
- gpiomux_config_t active,
- gpiomux_config_t suspended)
-{
- struct msm_gpiomux_config *cfg = msm_gpiomux_configs + gpio;
- unsigned long irq_flags;
- gpiomux_config_t setting;
-
- if (gpio >= GPIOMUX_NGPIOS)
- return -EINVAL;
-
- spin_lock_irqsave(&gpiomux_lock, irq_flags);
-
- if (active & GPIOMUX_VALID)
- cfg->active = active;
-
- if (suspended & GPIOMUX_VALID)
- cfg->suspended = suspended;
-
- setting = cfg->ref ? active : suspended;
- if (setting & GPIOMUX_VALID)
- __msm_gpiomux_write(gpio, setting);
-
- spin_unlock_irqrestore(&gpiomux_lock, irq_flags);
- return 0;
-}
-EXPORT_SYMBOL(msm_gpiomux_write);
-
-int msm_gpiomux_get(unsigned gpio)
-{
- struct msm_gpiomux_config *cfg = msm_gpiomux_configs + gpio;
- unsigned long irq_flags;
-
- if (gpio >= GPIOMUX_NGPIOS)
- return -EINVAL;
-
- spin_lock_irqsave(&gpiomux_lock, irq_flags);
- if (cfg->ref++ == 0 && cfg->active & GPIOMUX_VALID)
- __msm_gpiomux_write(gpio, cfg->active);
- spin_unlock_irqrestore(&gpiomux_lock, irq_flags);
- return 0;
-}
-EXPORT_SYMBOL(msm_gpiomux_get);
-
-int msm_gpiomux_put(unsigned gpio)
-{
- struct msm_gpiomux_config *cfg = msm_gpiomux_configs + gpio;
- unsigned long irq_flags;
-
- if (gpio >= GPIOMUX_NGPIOS)
- return -EINVAL;
-
- spin_lock_irqsave(&gpiomux_lock, irq_flags);
- BUG_ON(cfg->ref == 0);
- if (--cfg->ref == 0 && cfg->suspended & GPIOMUX_VALID)
- __msm_gpiomux_write(gpio, cfg->suspended);
- spin_unlock_irqrestore(&gpiomux_lock, irq_flags);
- return 0;
-}
-EXPORT_SYMBOL(msm_gpiomux_put);
-
-static int __init gpiomux_init(void)
-{
- unsigned n;
-
- for (n = 0; n < GPIOMUX_NGPIOS; ++n) {
- msm_gpiomux_configs[n].ref = 0;
- if (!(msm_gpiomux_configs[n].suspended & GPIOMUX_VALID))
- continue;
- __msm_gpiomux_write(n, msm_gpiomux_configs[n].suspended);
- }
- return 0;
-}
-postcore_initcall(gpiomux_init);
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h
deleted file mode 100644
index 4410d7766f93..000000000000
--- a/arch/arm/mach-msm/gpiomux.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#ifndef __ARCH_ARM_MACH_MSM_GPIOMUX_H
-#define __ARCH_ARM_MACH_MSM_GPIOMUX_H
-
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <mach/msm_gpiomux.h>
-#include "gpiomux-v1.h"
-
-/**
- * struct msm_gpiomux_config: gpiomux settings for one gpio line.
- *
- * A complete gpiomux config is the bitwise-or of a drive-strength,
- * function, and pull. For functions other than GPIO, the OE
- * is hard-wired according to the function. For GPIO mode,
- * OE is controlled by gpiolib.
- *
- * Available settings differ by target; see the gpiomux header
- * specific to your target arch for available configurations.
- *
- * @active: The configuration to be installed when the line is
- * active, or its reference count is > 0.
- * @suspended: The configuration to be installed when the line
- * is suspended, or its reference count is 0.
- * @ref: The reference count of the line. For internal use of
- * the gpiomux framework only.
- */
-struct msm_gpiomux_config {
- gpiomux_config_t active;
- gpiomux_config_t suspended;
- unsigned ref;
-};
-
-/**
- * @GPIOMUX_VALID: If set, the config field contains 'good data'.
- * The absence of this bit will prevent the gpiomux
- * system from applying the configuration under all
- * circumstances.
- */
-enum {
- GPIOMUX_VALID = BIT(sizeof(gpiomux_config_t) * BITS_PER_BYTE - 1),
- GPIOMUX_CTL_MASK = GPIOMUX_VALID,
-};
-
-#ifdef CONFIG_MSM_GPIOMUX
-
-/* Each architecture must provide its own instance of this table.
- * To avoid having gpiomux manage any given gpio, one or both of
- * the entries can avoid setting GPIOMUX_VALID - the absence
- * of that flag will prevent the configuration from being applied
- * during state transitions.
- */
-extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
-
-/* Install a new configuration to the gpio line. To avoid overwriting
- * a configuration, leave the VALID bit out.
- */
-int msm_gpiomux_write(unsigned gpio,
- gpiomux_config_t active,
- gpiomux_config_t suspended);
-#else
-static inline int msm_gpiomux_write(unsigned gpio,
- gpiomux_config_t active,
- gpiomux_config_t suspended)
-{
- return -ENOSYS;
-}
-#endif
-#endif
diff --git a/arch/arm/mach-msm/include/mach/clk.h b/arch/arm/mach-msm/include/mach/clk.h
deleted file mode 100644
index fd4f4a7a83b3..000000000000
--- a/arch/arm/mach-msm/include/mach/clk.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __MACH_CLK_H
-#define __MACH_CLK_H
-
-/* Magic rate value for use with PM QOS to request the board's maximum
- * supported AXI rate. PM QOS will only pass positive s32 rate values
- * through to the clock driver, so INT_MAX is used.
- */
-#define MSM_AXI_MAX_FREQ LONG_MAX
-
-enum clk_reset_action {
- CLK_RESET_DEASSERT = 0,
- CLK_RESET_ASSERT = 1
-};
-
-struct clk;
-
-/* Assert/Deassert reset to a hardware block associated with a clock */
-int clk_reset(struct clk *clk, enum clk_reset_action action);
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
deleted file mode 100644
index a72d48d42342..000000000000
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* linux/include/asm-arm/arch-msm/dma.h
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_DMA_H
-
-#include <linux/list.h>
-
-struct msm_dmov_errdata {
- uint32_t flush[6];
-};
-
-struct msm_dmov_cmd {
- struct list_head list;
- unsigned int cmdptr;
- void (*complete_func)(struct msm_dmov_cmd *cmd,
- unsigned int result,
- struct msm_dmov_errdata *err);
- void (*execute_func)(struct msm_dmov_cmd *cmd);
- void *data;
-};
-
-#ifndef CONFIG_ARCH_MSM8X60
-void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd);
-void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful);
-int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr);
-#else
-static inline
-void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd) { }
-static inline
-void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful) { }
-static inline
-int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr) { return -EIO; }
-#endif
-
-#define DMOV_CMD_LIST (0 << 29) /* does not work */
-#define DMOV_CMD_PTR_LIST (1 << 29) /* works */
-#define DMOV_CMD_INPUT_CFG (2 << 29) /* untested */
-#define DMOV_CMD_OUTPUT_CFG (3 << 29) /* untested */
-#define DMOV_CMD_ADDR(addr) ((addr) >> 3)
-
-#define DMOV_RSLT_VALID (1 << 31) /* 0 == host has empties result fifo */
-#define DMOV_RSLT_ERROR (1 << 3)
-#define DMOV_RSLT_FLUSH (1 << 2)
-#define DMOV_RSLT_DONE (1 << 1) /* top pointer done */
-#define DMOV_RSLT_USER (1 << 0) /* command with FR force result */
-
-#define DMOV_STATUS_RSLT_COUNT(n) (((n) >> 29))
-#define DMOV_STATUS_CMD_COUNT(n) (((n) >> 27) & 3)
-#define DMOV_STATUS_RSLT_VALID (1 << 1)
-#define DMOV_STATUS_CMD_PTR_RDY (1 << 0)
-
-#define DMOV_CONFIG_FORCE_TOP_PTR_RSLT (1 << 2)
-#define DMOV_CONFIG_FORCE_FLUSH_RSLT (1 << 1)
-#define DMOV_CONFIG_IRQ_EN (1 << 0)
-
-/* channel assignments */
-
-#define DMOV_NAND_CHAN 7
-#define DMOV_NAND_CRCI_CMD 5
-#define DMOV_NAND_CRCI_DATA 4
-
-#define DMOV_SDC1_CHAN 8
-#define DMOV_SDC1_CRCI 6
-
-#define DMOV_SDC2_CHAN 8
-#define DMOV_SDC2_CRCI 7
-
-#define DMOV_TSIF_CHAN 10
-#define DMOV_TSIF_CRCI 10
-
-#define DMOV_USB_CHAN 11
-
-/* no client rate control ifc (eg, ram) */
-#define DMOV_NONE_CRCI 0
-
-
-/* If the CMD_PTR register has CMD_PTR_LIST selected, the data mover
- * is going to walk a list of 32bit pointers as described below. Each
- * pointer points to a *array* of dmov_s, etc structs. The last pointer
- * in the list is marked with CMD_PTR_LP. The last struct in each array
- * is marked with CMD_LC (see below).
- */
-#define CMD_PTR_ADDR(addr) ((addr) >> 3)
-#define CMD_PTR_LP (1 << 31) /* last pointer */
-#define CMD_PTR_PT (3 << 29) /* ? */
-
-/* Single Item Mode */
-typedef struct {
- unsigned cmd;
- unsigned src;
- unsigned dst;
- unsigned len;
-} dmov_s;
-
-/* Scatter/Gather Mode */
-typedef struct {
- unsigned cmd;
- unsigned src_dscr;
- unsigned dst_dscr;
- unsigned _reserved;
-} dmov_sg;
-
-/* Box mode */
-typedef struct {
- uint32_t cmd;
- uint32_t src_row_addr;
- uint32_t dst_row_addr;
- uint32_t src_dst_len;
- uint32_t num_rows;
- uint32_t row_offset;
-} dmov_box;
-
-/* bits for the cmd field of the above structures */
-
-#define CMD_LC (1 << 31) /* last command */
-#define CMD_FR (1 << 22) /* force result -- does not work? */
-#define CMD_OCU (1 << 21) /* other channel unblock */
-#define CMD_OCB (1 << 20) /* other channel block */
-#define CMD_TCB (1 << 19) /* ? */
-#define CMD_DAH (1 << 18) /* destination address hold -- does not work?*/
-#define CMD_SAH (1 << 17) /* source address hold -- does not work? */
-
-#define CMD_MODE_SINGLE (0 << 0) /* dmov_s structure used */
-#define CMD_MODE_SG (1 << 0) /* untested */
-#define CMD_MODE_IND_SG (2 << 0) /* untested */
-#define CMD_MODE_BOX (3 << 0) /* untested */
-
-#define CMD_DST_SWAP_BYTES (1 << 14) /* exchange each byte n with byte n+1 */
-#define CMD_DST_SWAP_SHORTS (1 << 15) /* exchange each short n with short n+1 */
-#define CMD_DST_SWAP_WORDS (1 << 16) /* exchange each word n with word n+1 */
-
-#define CMD_SRC_SWAP_BYTES (1 << 11) /* exchange each byte n with byte n+1 */
-#define CMD_SRC_SWAP_SHORTS (1 << 12) /* exchange each short n with short n+1 */
-#define CMD_SRC_SWAP_WORDS (1 << 13) /* exchange each word n with word n+1 */
-
-#define CMD_DST_CRCI(n) (((n) & 15) << 7)
-#define CMD_SRC_CRCI(n) (((n) & 15) << 3)
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/entry-macro.S b/arch/arm/mach-msm/include/mach/entry-macro.S
deleted file mode 100644
index f2ae9087f654..000000000000
--- a/arch/arm/mach-msm/include/mach/entry-macro.S
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#if !defined(CONFIG_ARM_GIC)
-#include <mach/msm_iomap.h>
-
- .macro get_irqnr_preamble, base, tmp
- @ enable imprecise aborts
- cpsie a
- mov \base, #MSM_VIC_BASE
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- @ 0xD0 has irq# or old irq# if the irq has been handled
- @ 0xD4 has irq# or -1 if none pending *but* if you just
- @ read 0xD4 you never get the first irq for some reason
- ldr \irqnr, [\base, #0xD0]
- ldr \irqnr, [\base, #0xD4]
- cmp \irqnr, #0xffffffff
- .endm
-#endif
diff --git a/arch/arm/mach-msm/include/mach/hardware.h b/arch/arm/mach-msm/include/mach/hardware.h
deleted file mode 100644
index 2d126091ae41..000000000000
--- a/arch/arm/mach-msm/include/mach/hardware.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* arch/arm/mach-msm/include/mach/hardware.h
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_HARDWARE_H
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-7x00.h b/arch/arm/mach-msm/include/mach/irqs-7x00.h
deleted file mode 100644
index f1fe70612fe9..000000000000
--- a/arch/arm/mach-msm/include/mach/irqs-7x00.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- * Author: Brian Swetland <swetland@google.com>
- */
-
-#ifndef __ASM_ARCH_MSM_IRQS_7X00_H
-#define __ASM_ARCH_MSM_IRQS_7X00_H
-
-/* MSM ARM11 Interrupt Numbers */
-/* See 80-VE113-1 A, pp219-221 */
-
-#define INT_A9_M2A_0 0
-#define INT_A9_M2A_1 1
-#define INT_A9_M2A_2 2
-#define INT_A9_M2A_3 3
-#define INT_A9_M2A_4 4
-#define INT_A9_M2A_5 5
-#define INT_A9_M2A_6 6
-#define INT_GP_TIMER_EXP 7
-#define INT_DEBUG_TIMER_EXP 8
-#define INT_UART1 9
-#define INT_UART2 10
-#define INT_UART3 11
-#define INT_UART1_RX 12
-#define INT_UART2_RX 13
-#define INT_UART3_RX 14
-#define INT_USB_OTG 15
-#define INT_MDDI_PRI 16
-#define INT_MDDI_EXT 17
-#define INT_MDDI_CLIENT 18
-#define INT_MDP 19
-#define INT_GRAPHICS 20
-#define INT_ADM_AARM 21
-#define INT_ADSP_A11 22
-#define INT_ADSP_A9_A11 23
-#define INT_SDC1_0 24
-#define INT_SDC1_1 25
-#define INT_SDC2_0 26
-#define INT_SDC2_1 27
-#define INT_KEYSENSE 28
-#define INT_TCHSCRN_SSBI 29
-#define INT_TCHSCRN1 30
-#define INT_TCHSCRN2 31
-
-#define INT_GPIO_GROUP1 (32 + 0)
-#define INT_GPIO_GROUP2 (32 + 1)
-#define INT_PWB_I2C (32 + 2)
-#define INT_SOFTRESET (32 + 3)
-#define INT_NAND_WR_ER_DONE (32 + 4)
-#define INT_NAND_OP_DONE (32 + 5)
-#define INT_PBUS_ARM11 (32 + 6)
-#define INT_AXI_MPU_SMI (32 + 7)
-#define INT_AXI_MPU_EBI1 (32 + 8)
-#define INT_AD_HSSD (32 + 9)
-#define INT_ARM11_PMU (32 + 10)
-#define INT_ARM11_DMA (32 + 11)
-#define INT_TSIF_IRQ (32 + 12)
-#define INT_UART1DM_IRQ (32 + 13)
-#define INT_UART1DM_RX (32 + 14)
-#define INT_USB_HS (32 + 15)
-#define INT_SDC3_0 (32 + 16)
-#define INT_SDC3_1 (32 + 17)
-#define INT_SDC4_0 (32 + 18)
-#define INT_SDC4_1 (32 + 19)
-#define INT_UART2DM_RX (32 + 20)
-#define INT_UART2DM_IRQ (32 + 21)
-
-/* 22-31 are reserved */
-
-#define NR_MSM_IRQS 64
-#define NR_GPIO_IRQS 122
-#define NR_BOARD_IRQS 64
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-7x30.h b/arch/arm/mach-msm/include/mach/irqs-7x30.h
deleted file mode 100644
index 1f15902655fd..000000000000
--- a/arch/arm/mach-msm/include/mach/irqs-7x30.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MSM_IRQS_7X30_H
-#define __ASM_ARCH_MSM_IRQS_7X30_H
-
-/* MSM ACPU Interrupt Numbers */
-
-#define INT_DEBUG_TIMER_EXP 0
-#define INT_GPT0_TIMER_EXP 1
-#define INT_GPT1_TIMER_EXP 2
-#define INT_WDT0_ACCSCSSBARK 3
-#define INT_WDT1_ACCSCSSBARK 4
-#define INT_AVS_SVIC 5
-#define INT_AVS_SVIC_SW_DONE 6
-#define INT_SC_DBG_RX_FULL 7
-#define INT_SC_DBG_TX_EMPTY 8
-#define INT_ARM11_PM 9
-#define INT_AVS_REQ_DOWN 10
-#define INT_AVS_REQ_UP 11
-#define INT_SC_ACG 12
-/* SCSS_VICFIQSTS0[13:15] are RESERVED */
-#define INT_L2_SVICCPUIRPTREQ 16
-#define INT_L2_SVICDMANSIRPTREQ 17
-#define INT_L2_SVICDMASIRPTREQ 18
-#define INT_L2_SVICSLVIRPTREQ 19
-#define INT_AD5A_MPROC_APPS_0 20
-#define INT_AD5A_MPROC_APPS_1 21
-#define INT_A9_M2A_0 22
-#define INT_A9_M2A_1 23
-#define INT_A9_M2A_2 24
-#define INT_A9_M2A_3 25
-#define INT_A9_M2A_4 26
-#define INT_A9_M2A_5 27
-#define INT_A9_M2A_6 28
-#define INT_A9_M2A_7 29
-#define INT_A9_M2A_8 30
-#define INT_A9_M2A_9 31
-
-#define INT_AXI_EBI1_SC (32 + 0)
-#define INT_IMEM_ERR (32 + 1)
-#define INT_AXI_EBI0_SC (32 + 2)
-#define INT_PBUS_SC_IRQC (32 + 3)
-#define INT_PERPH_BUS_BPM (32 + 4)
-#define INT_CC_TEMP_SENSE (32 + 5)
-#define INT_UXMC_EBI0 (32 + 6)
-#define INT_UXMC_EBI1 (32 + 7)
-#define INT_EBI2_OP_DONE (32 + 8)
-#define INT_EBI2_WR_ER_DONE (32 + 9)
-#define INT_TCSR_SPSS_CE (32 + 10)
-#define INT_EMDH (32 + 11)
-#define INT_PMDH (32 + 12)
-#define INT_MDC (32 + 13)
-#define INT_MIDI_TO_SUPSS (32 + 14)
-#define INT_LPA_2 (32 + 15)
-#define INT_GPIO_GROUP1_SECURE (32 + 16)
-#define INT_GPIO_GROUP2_SECURE (32 + 17)
-#define INT_GPIO_GROUP1 (32 + 18)
-#define INT_GPIO_GROUP2 (32 + 19)
-#define INT_MPRPH_SOFTRESET (32 + 20)
-#define INT_PWB_I2C (32 + 21)
-#define INT_PWB_I2C_2 (32 + 22)
-#define INT_TSSC_SAMPLE (32 + 23)
-#define INT_TSSC_PENUP (32 + 24)
-#define INT_TCHSCRN_SSBI (32 + 25)
-#define INT_FM_RDS (32 + 26)
-#define INT_KEYSENSE (32 + 27)
-#define INT_USB_OTG_HS (32 + 28)
-#define INT_USB_OTG_HS2 (32 + 29)
-#define INT_USB_OTG_HS3 (32 + 30)
-#define INT_CSI (32 + 31)
-
-#define INT_SPI_OUTPUT (64 + 0)
-#define INT_SPI_INPUT (64 + 1)
-#define INT_SPI_ERROR (64 + 2)
-#define INT_UART1 (64 + 3)
-#define INT_UART1_RX (64 + 4)
-#define INT_UART2 (64 + 5)
-#define INT_UART2_RX (64 + 6)
-#define INT_UART3 (64 + 7)
-#define INT_UART3_RX (64 + 8)
-#define INT_UART1DM_IRQ (64 + 9)
-#define INT_UART1DM_RX (64 + 10)
-#define INT_UART2DM_IRQ (64 + 11)
-#define INT_UART2DM_RX (64 + 12)
-#define INT_TSIF (64 + 13)
-#define INT_ADM_SC1 (64 + 14)
-#define INT_ADM_SC2 (64 + 15)
-#define INT_MDP (64 + 16)
-#define INT_VPE (64 + 17)
-#define INT_GRP_2D (64 + 18)
-#define INT_GRP_3D (64 + 19)
-#define INT_ROTATOR (64 + 20)
-#define INT_MFC720 (64 + 21)
-#define INT_JPEG (64 + 22)
-#define INT_VFE (64 + 23)
-#define INT_TV_ENC (64 + 24)
-#define INT_PMIC_SSBI (64 + 25)
-#define INT_MPM_1 (64 + 26)
-#define INT_TCSR_SPSS_SAMPLE (64 + 27)
-#define INT_TCSR_SPSS_PENUP (64 + 28)
-#define INT_MPM_2 (64 + 29)
-#define INT_SDC1_0 (64 + 30)
-#define INT_SDC1_1 (64 + 31)
-
-#define INT_SDC3_0 (96 + 0)
-#define INT_SDC3_1 (96 + 1)
-#define INT_SDC2_0 (96 + 2)
-#define INT_SDC2_1 (96 + 3)
-#define INT_SDC4_0 (96 + 4)
-#define INT_SDC4_1 (96 + 5)
-#define INT_PWB_QUP_IN (96 + 6)
-#define INT_PWB_QUP_OUT (96 + 7)
-#define INT_PWB_QUP_ERR (96 + 8)
-#define INT_SCSS_WDT0_BITE (96 + 9)
-/* SCSS_VICFIQSTS3[10:31] are RESERVED */
-
-/* Retrofit universal macro names */
-#define INT_ADM_AARM INT_ADM_SC2
-#define INT_USB_HS INT_USB_OTG_HS
-#define INT_USB_OTG INT_USB_OTG_HS
-#define INT_TCHSCRN1 INT_TSSC_SAMPLE
-#define INT_TCHSCRN2 INT_TSSC_PENUP
-#define INT_GP_TIMER_EXP INT_GPT0_TIMER_EXP
-#define INT_ADSP_A11 INT_AD5A_MPROC_APPS_0
-#define INT_ADSP_A9_A11 INT_AD5A_MPROC_APPS_1
-#define INT_MDDI_EXT INT_EMDH
-#define INT_MDDI_PRI INT_PMDH
-#define INT_MDDI_CLIENT INT_MDC
-#define INT_NAND_WR_ER_DONE INT_EBI2_WR_ER_DONE
-#define INT_NAND_OP_DONE INT_EBI2_OP_DONE
-
-#define NR_MSM_IRQS 128
-#define NR_GPIO_IRQS 182
-#define PMIC8058_IRQ_BASE (NR_MSM_IRQS + NR_GPIO_IRQS)
-#define NR_PMIC8058_GPIO_IRQS 40
-#define NR_PMIC8058_MPP_IRQS 12
-#define NR_PMIC8058_MISC_IRQS 8
-#define NR_PMIC8058_IRQS (NR_PMIC8058_GPIO_IRQS +\
- NR_PMIC8058_MPP_IRQS +\
- NR_PMIC8058_MISC_IRQS)
-#define NR_BOARD_IRQS NR_PMIC8058_IRQS
-
-#endif /* __ASM_ARCH_MSM_IRQS_7X30_H */
diff --git a/arch/arm/mach-msm/include/mach/irqs-8x50.h b/arch/arm/mach-msm/include/mach/irqs-8x50.h
deleted file mode 100644
index 26adbe0e9406..000000000000
--- a/arch/arm/mach-msm/include/mach/irqs-8x50.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MSM_IRQS_8XXX_H
-#define __ASM_ARCH_MSM_IRQS_8XXX_H
-
-/* MSM ACPU Interrupt Numbers */
-
-#define INT_A9_M2A_0 0
-#define INT_A9_M2A_1 1
-#define INT_A9_M2A_2 2
-#define INT_A9_M2A_3 3
-#define INT_A9_M2A_4 4
-#define INT_A9_M2A_5 5
-#define INT_A9_M2A_6 6
-#define INT_GP_TIMER_EXP 7
-#define INT_DEBUG_TIMER_EXP 8
-#define INT_SIRC_0 9
-#define INT_SDC3_0 10
-#define INT_SDC3_1 11
-#define INT_SDC4_0 12
-#define INT_SDC4_1 13
-#define INT_AD6_EXT_VFR 14
-#define INT_USB_OTG 15
-#define INT_MDDI_PRI 16
-#define INT_MDDI_EXT 17
-#define INT_MDDI_CLIENT 18
-#define INT_MDP 19
-#define INT_GRAPHICS 20
-#define INT_ADM_AARM 21
-#define INT_ADSP_A11 22
-#define INT_ADSP_A9_A11 23
-#define INT_SDC1_0 24
-#define INT_SDC1_1 25
-#define INT_SDC2_0 26
-#define INT_SDC2_1 27
-#define INT_KEYSENSE 28
-#define INT_TCHSCRN_SSBI 29
-#define INT_TCHSCRN1 30
-#define INT_TCHSCRN2 31
-
-#define INT_TCSR_MPRPH_SC1 (32 + 0)
-#define INT_USB_FS2 (32 + 1)
-#define INT_PWB_I2C (32 + 2)
-#define INT_SOFTRESET (32 + 3)
-#define INT_NAND_WR_ER_DONE (32 + 4)
-#define INT_NAND_OP_DONE (32 + 5)
-#define INT_TCSR_MPRPH_SC2 (32 + 6)
-#define INT_OP_PEN (32 + 7)
-#define INT_AD_HSSD (32 + 8)
-#define INT_ARM11_PM (32 + 9)
-#define INT_SDMA_NON_SECURE (32 + 10)
-#define INT_TSIF_IRQ (32 + 11)
-#define INT_UART1DM_IRQ (32 + 12)
-#define INT_UART1DM_RX (32 + 13)
-#define INT_SDMA_SECURE (32 + 14)
-#define INT_SI2S_SLAVE (32 + 15)
-#define INT_SC_I2CPU (32 + 16)
-#define INT_SC_DBG_RDTRFULL (32 + 17)
-#define INT_SC_DBG_WDTRFULL (32 + 18)
-#define INT_SCPLL_CTL_DONE (32 + 19)
-#define INT_UART2DM_IRQ (32 + 20)
-#define INT_UART2DM_RX (32 + 21)
-#define INT_VDC_MEC (32 + 22)
-#define INT_VDC_DB (32 + 23)
-#define INT_VDC_AXI (32 + 24)
-#define INT_VFE (32 + 25)
-#define INT_USB_HS (32 + 26)
-#define INT_AUDIO_OUT0 (32 + 27)
-#define INT_AUDIO_OUT1 (32 + 28)
-#define INT_CRYPTO (32 + 29)
-#define INT_AD6M_IDLE (32 + 30)
-#define INT_SIRC_1 (32 + 31)
-
-#define NR_GPIO_IRQS 165
-#define NR_MSM_IRQS 64
-#define NR_BOARD_IRQS 64
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
deleted file mode 100644
index 164d355c96ea..000000000000
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_IRQS_H
-#define __ASM_ARCH_MSM_IRQS_H
-
-#define MSM_IRQ_BIT(irq) (1 << ((irq) & 31))
-
-#if defined(CONFIG_ARCH_MSM7X30)
-#include "irqs-7x30.h"
-#elif defined(CONFIG_ARCH_QSD8X50)
-#include "irqs-8x50.h"
-#include "sirc.h"
-#elif defined(CONFIG_ARCH_MSM_ARM11)
-#include "irqs-7x00.h"
-#else
-#error "Unknown architecture specification"
-#endif
-
-#define NR_IRQS (NR_MSM_IRQS + NR_GPIO_IRQS + NR_BOARD_IRQS)
-#define MSM_GPIO_TO_INT(n) (NR_MSM_IRQS + (n))
-#define MSM_INT_TO_REG(base, irq) (base + irq / 32)
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_gpiomux.h b/arch/arm/mach-msm/include/mach/msm_gpiomux.h
deleted file mode 100644
index 0c7d3936e02f..000000000000
--- a/arch/arm/mach-msm/include/mach/msm_gpiomux.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _LINUX_MSM_GPIOMUX_H
-#define _LINUX_MSM_GPIOMUX_H
-
-#ifdef CONFIG_MSM_GPIOMUX
-
-/* Increment a gpio's reference count, possibly activating the line. */
-int __must_check msm_gpiomux_get(unsigned gpio);
-
-/* Decrement a gpio's reference count, possibly suspending the line. */
-int msm_gpiomux_put(unsigned gpio);
-
-#else
-
-static inline int __must_check msm_gpiomux_get(unsigned gpio)
-{
- return -ENOSYS;
-}
-
-static inline int msm_gpiomux_put(unsigned gpio)
-{
- return -ENOSYS;
-}
-
-#endif
-
-#endif /* _LINUX_MSM_GPIOMUX_H */
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
deleted file mode 100644
index 67dc0e98b958..000000000000
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* arch/arm/mach-msm/include/mach/msm_iomap.h
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- * The MSM peripherals are spread all over across 768MB of physical
- * space, which makes just having a simple IO_ADDRESS macro to slide
- * them into the right virtual location rough. Instead, we will
- * provide a master phys->virt mapping for peripherals here.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_IOMAP_7X00_H
-#define __ASM_ARCH_MSM_IOMAP_7X00_H
-
-#include <asm/sizes.h>
-
-/* Physical base address and size of peripherals.
- * Ordered by the virtual base addresses they will be mapped at.
- *
- * MSM_VIC_BASE must be an value that can be loaded via a "mov"
- * instruction, otherwise entry-macro.S will not compile.
- *
- * If you add or remove entries here, you'll want to edit the
- * msm_io_desc array in arch/arm/mach-msm/io.c to reflect your
- * changes.
- *
- */
-
-#define MSM_VIC_BASE IOMEM(0xE0000000)
-#define MSM_VIC_PHYS 0xC0000000
-#define MSM_VIC_SIZE SZ_4K
-
-#define MSM7X00_CSR_PHYS 0xC0100000
-#define MSM7X00_CSR_SIZE SZ_4K
-
-#define MSM_DMOV_BASE IOMEM(0xE0002000)
-#define MSM_DMOV_PHYS 0xA9700000
-#define MSM_DMOV_SIZE SZ_4K
-
-#define MSM7X00_GPIO1_PHYS 0xA9200000
-#define MSM7X00_GPIO1_SIZE SZ_4K
-
-#define MSM7X00_GPIO2_PHYS 0xA9300000
-#define MSM7X00_GPIO2_SIZE SZ_4K
-
-#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
-#define MSM_CLK_CTL_PHYS 0xA8600000
-#define MSM_CLK_CTL_SIZE SZ_4K
-
-#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000)
-#define MSM_SHARED_RAM_PHYS 0x01F00000
-#define MSM_SHARED_RAM_SIZE SZ_1M
-
-#define MSM_UART1_PHYS 0xA9A00000
-#define MSM_UART1_SIZE SZ_4K
-
-#define MSM_UART2_PHYS 0xA9B00000
-#define MSM_UART2_SIZE SZ_4K
-
-#define MSM_UART3_PHYS 0xA9C00000
-#define MSM_UART3_SIZE SZ_4K
-
-#define MSM_SDC1_PHYS 0xA0400000
-#define MSM_SDC1_SIZE SZ_4K
-
-#define MSM_SDC2_PHYS 0xA0500000
-#define MSM_SDC2_SIZE SZ_4K
-
-#define MSM_SDC3_PHYS 0xA0600000
-#define MSM_SDC3_SIZE SZ_4K
-
-#define MSM_SDC4_PHYS 0xA0700000
-#define MSM_SDC4_SIZE SZ_4K
-
-#define MSM_I2C_PHYS 0xA9900000
-#define MSM_I2C_SIZE SZ_4K
-
-#define MSM_HSUSB_PHYS 0xA0800000
-#define MSM_HSUSB_SIZE SZ_4K
-
-#define MSM_PMDH_PHYS 0xAA600000
-#define MSM_PMDH_SIZE SZ_4K
-
-#define MSM_EMDH_PHYS 0xAA700000
-#define MSM_EMDH_SIZE SZ_4K
-
-#define MSM_MDP_PHYS 0xAA200000
-#define MSM_MDP_SIZE 0x000F0000
-
-#define MSM_MDC_PHYS 0xAA500000
-#define MSM_MDC_SIZE SZ_1M
-
-#define MSM_AD5_PHYS 0xAC000000
-#define MSM_AD5_SIZE (SZ_1M*13)
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
deleted file mode 100644
index 198202c267c8..000000000000
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2011 Code Aurora Forum. All rights reserved.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- * The MSM peripherals are spread all over across 768MB of physical
- * space, which makes just having a simple IO_ADDRESS macro to slide
- * them into the right virtual location rough. Instead, we will
- * provide a master phys->virt mapping for peripherals here.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_IOMAP_7X30_H
-#define __ASM_ARCH_MSM_IOMAP_7X30_H
-
-/* Physical base address and size of peripherals.
- * Ordered by the virtual base addresses they will be mapped at.
- *
- * MSM_VIC_BASE must be an value that can be loaded via a "mov"
- * instruction, otherwise entry-macro.S will not compile.
- *
- * If you add or remove entries here, you'll want to edit the
- * msm_io_desc array in arch/arm/mach-msm/io.c to reflect your
- * changes.
- *
- */
-
-#define MSM_VIC_BASE IOMEM(0xE0000000)
-#define MSM_VIC_PHYS 0xC0080000
-#define MSM_VIC_SIZE SZ_4K
-
-#define MSM7X30_CSR_PHYS 0xC0100000
-#define MSM7X30_CSR_SIZE SZ_4K
-
-#define MSM_DMOV_BASE IOMEM(0xE0002000)
-#define MSM_DMOV_PHYS 0xAC400000
-#define MSM_DMOV_SIZE SZ_4K
-
-#define MSM7X30_GPIO1_PHYS 0xAC001000
-#define MSM7X30_GPIO1_SIZE SZ_4K
-
-#define MSM7X30_GPIO2_PHYS 0xAC101000
-#define MSM7X30_GPIO2_SIZE SZ_4K
-
-#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
-#define MSM_CLK_CTL_PHYS 0xAB800000
-#define MSM_CLK_CTL_SIZE SZ_4K
-
-#define MSM_CLK_CTL_SH2_BASE IOMEM(0xE0006000)
-#define MSM_CLK_CTL_SH2_PHYS 0xABA01000
-#define MSM_CLK_CTL_SH2_SIZE SZ_4K
-
-#define MSM_ACC_BASE IOMEM(0xE0007000)
-#define MSM_ACC_PHYS 0xC0101000
-#define MSM_ACC_SIZE SZ_4K
-
-#define MSM_SAW_BASE IOMEM(0xE0008000)
-#define MSM_SAW_PHYS 0xC0102000
-#define MSM_SAW_SIZE SZ_4K
-
-#define MSM_GCC_BASE IOMEM(0xE0009000)
-#define MSM_GCC_PHYS 0xC0182000
-#define MSM_GCC_SIZE SZ_4K
-
-#define MSM_TCSR_BASE IOMEM(0xE000A000)
-#define MSM_TCSR_PHYS 0xAB600000
-#define MSM_TCSR_SIZE SZ_4K
-
-#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000)
-#define MSM_SHARED_RAM_PHYS 0x00100000
-#define MSM_SHARED_RAM_SIZE SZ_1M
-
-#define MSM_UART1_PHYS 0xACA00000
-#define MSM_UART1_SIZE SZ_4K
-
-#define MSM_UART2_PHYS 0xACB00000
-#define MSM_UART2_SIZE SZ_4K
-
-#define MSM_UART3_PHYS 0xACC00000
-#define MSM_UART3_SIZE SZ_4K
-
-#define MSM_MDC_BASE IOMEM(0xE0200000)
-#define MSM_MDC_PHYS 0xAA500000
-#define MSM_MDC_SIZE SZ_1M
-
-#define MSM_AD5_BASE IOMEM(0xE0300000)
-#define MSM_AD5_PHYS 0xA7000000
-#define MSM_AD5_SIZE (SZ_1M*13)
-
-#define MSM_HSUSB_PHYS 0xA3600000
-#define MSM_HSUSB_SIZE SZ_1K
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
deleted file mode 100644
index 0faa894729b7..000000000000
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2011 Code Aurora Forum. All rights reserved.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- * The MSM peripherals are spread all over across 768MB of physical
- * space, which makes just having a simple IO_ADDRESS macro to slide
- * them into the right virtual location rough. Instead, we will
- * provide a master phys->virt mapping for peripherals here.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_IOMAP_8X50_H
-#define __ASM_ARCH_MSM_IOMAP_8X50_H
-
-/* Physical base address and size of peripherals.
- * Ordered by the virtual base addresses they will be mapped at.
- *
- * MSM_VIC_BASE must be an value that can be loaded via a "mov"
- * instruction, otherwise entry-macro.S will not compile.
- *
- * If you add or remove entries here, you'll want to edit the
- * msm_io_desc array in arch/arm/mach-msm/io.c to reflect your
- * changes.
- *
- */
-
-#define MSM_VIC_BASE IOMEM(0xE0000000)
-#define MSM_VIC_PHYS 0xAC000000
-#define MSM_VIC_SIZE SZ_4K
-
-#define QSD8X50_CSR_PHYS 0xAC100000
-#define QSD8X50_CSR_SIZE SZ_4K
-
-#define MSM_DMOV_BASE IOMEM(0xE0002000)
-#define MSM_DMOV_PHYS 0xA9700000
-#define MSM_DMOV_SIZE SZ_4K
-
-#define QSD8X50_GPIO1_PHYS 0xA9000000
-#define QSD8X50_GPIO1_SIZE SZ_4K
-
-#define QSD8X50_GPIO2_PHYS 0xA9100000
-#define QSD8X50_GPIO2_SIZE SZ_4K
-
-#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
-#define MSM_CLK_CTL_PHYS 0xA8600000
-#define MSM_CLK_CTL_SIZE SZ_4K
-
-#define MSM_SIRC_BASE IOMEM(0xE1006000)
-#define MSM_SIRC_PHYS 0xAC200000
-#define MSM_SIRC_SIZE SZ_4K
-
-#define MSM_SCPLL_BASE IOMEM(0xE1007000)
-#define MSM_SCPLL_PHYS 0xA8800000
-#define MSM_SCPLL_SIZE SZ_4K
-
-#ifdef CONFIG_MSM_SOC_REV_A
-#define MSM_SMI_BASE 0xE0000000
-#else
-#define MSM_SMI_BASE 0x00000000
-#endif
-
-#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000)
-#define MSM_SHARED_RAM_PHYS (MSM_SMI_BASE + 0x00100000)
-#define MSM_SHARED_RAM_SIZE SZ_1M
-
-#define MSM_UART1_PHYS 0xA9A00000
-#define MSM_UART1_SIZE SZ_4K
-
-#define MSM_UART2_PHYS 0xA9B00000
-#define MSM_UART2_SIZE SZ_4K
-
-#define MSM_UART3_PHYS 0xA9C00000
-#define MSM_UART3_SIZE SZ_4K
-
-#define MSM_MDC_BASE IOMEM(0xE0200000)
-#define MSM_MDC_PHYS 0xAA500000
-#define MSM_MDC_SIZE SZ_1M
-
-#define MSM_AD5_BASE IOMEM(0xE0300000)
-#define MSM_AD5_PHYS 0xAC000000
-#define MSM_AD5_SIZE (SZ_1M*13)
-
-
-#define MSM_I2C_SIZE SZ_4K
-#define MSM_I2C_PHYS 0xA9900000
-
-#define MSM_HSUSB_PHYS 0xA0800000
-#define MSM_HSUSB_SIZE SZ_1K
-
-#define MSM_NAND_PHYS 0xA0A00000
-
-
-#define MSM_TSIF_PHYS (0xa0100000)
-#define MSM_TSIF_SIZE (0x200)
-
-#define MSM_TSSC_PHYS 0xAA300000
-
-#define MSM_UART1DM_PHYS 0xA0200000
-#define MSM_UART2DM_PHYS 0xA0900000
-
-
-#define MSM_SDC1_PHYS 0xA0300000
-#define MSM_SDC1_SIZE SZ_4K
-
-#define MSM_SDC2_PHYS 0xA0400000
-#define MSM_SDC2_SIZE SZ_4K
-
-#define MSM_SDC3_PHYS 0xA0500000
-#define MSM_SDC3_SIZE SZ_4K
-
-#define MSM_SDC4_PHYS 0xA0600000
-#define MSM_SDC4_SIZE SZ_4K
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
deleted file mode 100644
index 0e4f49157684..000000000000
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- * The MSM peripherals are spread all over across 768MB of physical
- * space, which makes just having a simple IO_ADDRESS macro to slide
- * them into the right virtual location rough. Instead, we will
- * provide a master phys->virt mapping for peripherals here.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_IOMAP_H
-#define __ASM_ARCH_MSM_IOMAP_H
-
-#include <asm/sizes.h>
-
-/* Physical base address and size of peripherals.
- * Ordered by the virtual base addresses they will be mapped at.
- *
- * MSM_VIC_BASE must be an value that can be loaded via a "mov"
- * instruction, otherwise entry-macro.S will not compile.
- *
- * If you add or remove entries here, you'll want to edit the
- * msm_io_desc array in arch/arm/mach-msm/io.c to reflect your
- * changes.
- *
- */
-
-#if defined(CONFIG_ARCH_MSM7X30)
-#include "msm_iomap-7x30.h"
-#elif defined(CONFIG_ARCH_QSD8X50)
-#include "msm_iomap-8x50.h"
-#else
-#include "msm_iomap-7x00.h"
-#endif
-
-/* Virtual addresses shared across all MSM targets. */
-#define MSM_CSR_BASE IOMEM(0xE0001000)
-#define MSM_GPIO1_BASE IOMEM(0xE0003000)
-#define MSM_GPIO2_BASE IOMEM(0xE0004000)
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
deleted file mode 100644
index 029463ec8756..000000000000
--- a/arch/arm/mach-msm/include/mach/msm_smd.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* linux/include/asm-arm/arch-msm/msm_smd.h
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_SMD_H
-#define __ASM_ARCH_MSM_SMD_H
-
-typedef struct smd_channel smd_channel_t;
-
-extern int (*msm_check_for_modem_crash)(void);
-
-/* warning: notify() may be called before open returns */
-int smd_open(const char *name, smd_channel_t **ch, void *priv,
- void (*notify)(void *priv, unsigned event));
-
-#define SMD_EVENT_DATA 1
-#define SMD_EVENT_OPEN 2
-#define SMD_EVENT_CLOSE 3
-
-int smd_close(smd_channel_t *ch);
-
-/* passing a null pointer for data reads and discards */
-int smd_read(smd_channel_t *ch, void *data, int len);
-
-/* Write to stream channels may do a partial write and return
-** the length actually written.
-** Write to packet channels will never do a partial write --
-** it will return the requested length written or an error.
-*/
-int smd_write(smd_channel_t *ch, const void *data, int len);
-int smd_write_atomic(smd_channel_t *ch, const void *data, int len);
-
-int smd_write_avail(smd_channel_t *ch);
-int smd_read_avail(smd_channel_t *ch);
-
-/* Returns the total size of the current packet being read.
-** Returns 0 if no packets available or a stream channel.
-*/
-int smd_cur_packet_size(smd_channel_t *ch);
-
-/* used for tty unthrottling and the like -- causes the notify()
-** callback to be called from the same lock context as is used
-** when it is called from channel updates
-*/
-void smd_kick(smd_channel_t *ch);
-
-
-#if 0
-/* these are interruptable waits which will block you until the specified
-** number of bytes are readable or writable.
-*/
-int smd_wait_until_readable(smd_channel_t *ch, int bytes);
-int smd_wait_until_writable(smd_channel_t *ch, int bytes);
-#endif
-
-typedef enum {
- SMD_PORT_DS = 0,
- SMD_PORT_DIAG,
- SMD_PORT_RPC_CALL,
- SMD_PORT_RPC_REPLY,
- SMD_PORT_BT,
- SMD_PORT_CONTROL,
- SMD_PORT_MEMCPY_SPARE1,
- SMD_PORT_DATA1,
- SMD_PORT_DATA2,
- SMD_PORT_DATA3,
- SMD_PORT_DATA4,
- SMD_PORT_DATA5,
- SMD_PORT_DATA6,
- SMD_PORT_DATA7,
- SMD_PORT_DATA8,
- SMD_PORT_DATA9,
- SMD_PORT_DATA10,
- SMD_PORT_DATA11,
- SMD_PORT_DATA12,
- SMD_PORT_DATA13,
- SMD_PORT_DATA14,
- SMD_PORT_DATA15,
- SMD_PORT_DATA16,
- SMD_PORT_DATA17,
- SMD_PORT_DATA18,
- SMD_PORT_DATA19,
- SMD_PORT_DATA20,
- SMD_PORT_GPS_NMEA,
- SMD_PORT_BRIDGE_1,
- SMD_PORT_BRIDGE_2,
- SMD_PORT_BRIDGE_3,
- SMD_PORT_BRIDGE_4,
- SMD_PORT_BRIDGE_5,
- SMD_PORT_LOOPBACK,
- SMD_PORT_CS_APPS_MODEM,
- SMD_PORT_CS_APPS_DSP,
- SMD_PORT_CS_MODEM_DSP,
- SMD_NUM_PORTS,
-} smd_port_id_type;
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/sirc.h b/arch/arm/mach-msm/include/mach/sirc.h
deleted file mode 100644
index ef55868a5b8a..000000000000
--- a/arch/arm/mach-msm/include/mach/sirc.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MSM_SIRC_H
-#define __ASM_ARCH_MSM_SIRC_H
-
-struct sirc_regs_t {
- void *int_enable;
- void *int_enable_clear;
- void *int_enable_set;
- void *int_type;
- void *int_polarity;
- void *int_clear;
-};
-
-struct sirc_cascade_regs {
- void *int_status;
- unsigned int cascade_irq;
-};
-
-void msm_init_sirc(void);
-void msm_sirc_enter_sleep(void);
-void msm_sirc_exit_sleep(void);
-
-#if defined(CONFIG_ARCH_MSM_SCORPION)
-
-#include <mach/msm_iomap.h>
-
-/*
- * Secondary interrupt controller interrupts
- */
-
-#define FIRST_SIRC_IRQ (NR_MSM_IRQS + NR_GPIO_IRQS)
-
-#define INT_UART1 (FIRST_SIRC_IRQ + 0)
-#define INT_UART2 (FIRST_SIRC_IRQ + 1)
-#define INT_UART3 (FIRST_SIRC_IRQ + 2)
-#define INT_UART1_RX (FIRST_SIRC_IRQ + 3)
-#define INT_UART2_RX (FIRST_SIRC_IRQ + 4)
-#define INT_UART3_RX (FIRST_SIRC_IRQ + 5)
-#define INT_SPI_INPUT (FIRST_SIRC_IRQ + 6)
-#define INT_SPI_OUTPUT (FIRST_SIRC_IRQ + 7)
-#define INT_SPI_ERROR (FIRST_SIRC_IRQ + 8)
-#define INT_GPIO_GROUP1 (FIRST_SIRC_IRQ + 9)
-#define INT_GPIO_GROUP2 (FIRST_SIRC_IRQ + 10)
-#define INT_GPIO_GROUP1_SECURE (FIRST_SIRC_IRQ + 11)
-#define INT_GPIO_GROUP2_SECURE (FIRST_SIRC_IRQ + 12)
-#define INT_AVS_SVIC (FIRST_SIRC_IRQ + 13)
-#define INT_AVS_REQ_UP (FIRST_SIRC_IRQ + 14)
-#define INT_AVS_REQ_DOWN (FIRST_SIRC_IRQ + 15)
-#define INT_PBUS_ERR (FIRST_SIRC_IRQ + 16)
-#define INT_AXI_ERR (FIRST_SIRC_IRQ + 17)
-#define INT_SMI_ERR (FIRST_SIRC_IRQ + 18)
-#define INT_EBI1_ERR (FIRST_SIRC_IRQ + 19)
-#define INT_IMEM_ERR (FIRST_SIRC_IRQ + 20)
-#define INT_TEMP_SENSOR (FIRST_SIRC_IRQ + 21)
-#define INT_TV_ENC (FIRST_SIRC_IRQ + 22)
-#define INT_GRP2D (FIRST_SIRC_IRQ + 23)
-#define INT_GSBI_QUP (FIRST_SIRC_IRQ + 24)
-#define INT_SC_ACG (FIRST_SIRC_IRQ + 25)
-#define INT_WDT0 (FIRST_SIRC_IRQ + 26)
-#define INT_WDT1 (FIRST_SIRC_IRQ + 27)
-
-#if defined(CONFIG_MSM_SOC_REV_A)
-#define NR_SIRC_IRQS 28
-#define SIRC_MASK 0x0FFFFFFF
-#else
-#define NR_SIRC_IRQS 23
-#define SIRC_MASK 0x007FFFFF
-#endif
-
-#define LAST_SIRC_IRQ (FIRST_SIRC_IRQ + NR_SIRC_IRQS - 1)
-
-#define SPSS_SIRC_INT_SELECT (MSM_SIRC_BASE + 0x00)
-#define SPSS_SIRC_INT_ENABLE (MSM_SIRC_BASE + 0x04)
-#define SPSS_SIRC_INT_ENABLE_CLEAR (MSM_SIRC_BASE + 0x08)
-#define SPSS_SIRC_INT_ENABLE_SET (MSM_SIRC_BASE + 0x0C)
-#define SPSS_SIRC_INT_TYPE (MSM_SIRC_BASE + 0x10)
-#define SPSS_SIRC_INT_POLARITY (MSM_SIRC_BASE + 0x14)
-#define SPSS_SIRC_SECURITY (MSM_SIRC_BASE + 0x18)
-#define SPSS_SIRC_IRQ_STATUS (MSM_SIRC_BASE + 0x1C)
-#define SPSS_SIRC_IRQ1_STATUS (MSM_SIRC_BASE + 0x20)
-#define SPSS_SIRC_RAW_STATUS (MSM_SIRC_BASE + 0x24)
-#define SPSS_SIRC_INT_CLEAR (MSM_SIRC_BASE + 0x28)
-#define SPSS_SIRC_SOFT_INT (MSM_SIRC_BASE + 0x2C)
-
-#endif
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/vreg.h b/arch/arm/mach-msm/include/mach/vreg.h
deleted file mode 100644
index 6626e7864e28..000000000000
--- a/arch/arm/mach-msm/include/mach/vreg.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* linux/include/asm-arm/arch-msm/vreg.h
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ARCH_ARM_MACH_MSM_VREG_H
-#define __ARCH_ARM_MACH_MSM_VREG_H
-
-struct vreg;
-
-struct vreg *vreg_get(struct device *dev, const char *id);
-void vreg_put(struct vreg *vreg);
-
-int vreg_enable(struct vreg *vreg);
-int vreg_disable(struct vreg *vreg);
-int vreg_set_level(struct vreg *vreg, unsigned mv);
-
-#endif
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
deleted file mode 100644
index b042dca1f633..000000000000
--- a/arch/arm/mach-msm/io.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/* arch/arm/mach-msm/io.c
- *
- * MSM7K, QSD io support
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/export.h>
-
-#include <mach/hardware.h>
-#include <asm/page.h>
-#include <mach/msm_iomap.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-
-#define MSM_CHIP_DEVICE_TYPE(name, chip, mem_type) { \
- .virtual = (unsigned long) MSM_##name##_BASE, \
- .pfn = __phys_to_pfn(chip##_##name##_PHYS), \
- .length = chip##_##name##_SIZE, \
- .type = mem_type, \
- }
-
-#define MSM_DEVICE_TYPE(name, mem_type) \
- MSM_CHIP_DEVICE_TYPE(name, MSM, mem_type)
-#define MSM_CHIP_DEVICE(name, chip) \
- MSM_CHIP_DEVICE_TYPE(name, chip, MT_DEVICE)
-#define MSM_DEVICE(name) MSM_CHIP_DEVICE(name, MSM)
-
-#if defined(CONFIG_ARCH_MSM7X00A)
-static struct map_desc msm_io_desc[] __initdata = {
- MSM_DEVICE_TYPE(VIC, MT_DEVICE_NONSHARED),
- MSM_CHIP_DEVICE_TYPE(CSR, MSM7X00, MT_DEVICE_NONSHARED),
- MSM_DEVICE_TYPE(DMOV, MT_DEVICE_NONSHARED),
- MSM_CHIP_DEVICE_TYPE(GPIO1, MSM7X00, MT_DEVICE_NONSHARED),
- MSM_CHIP_DEVICE_TYPE(GPIO2, MSM7X00, MT_DEVICE_NONSHARED),
- MSM_DEVICE_TYPE(CLK_CTL, MT_DEVICE_NONSHARED),
- {
- .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
- .pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
- .length = MSM_SHARED_RAM_SIZE,
- .type = MT_DEVICE,
- },
-#if defined(CONFIG_DEBUG_MSM_UART)
- {
- /* Must be last: virtual and pfn filled in by debug_ll_addr() */
- .length = SZ_4K,
- .type = MT_DEVICE_NONSHARED,
- }
-#endif
-};
-
-void __init msm_map_common_io(void)
-{
- size_t size = ARRAY_SIZE(msm_io_desc);
-
- /* Make sure the peripheral register window is closed, since
- * we will use PTE flags (TEX[1]=1,B=0,C=1) to determine which
- * pages are peripheral interface or not.
- */
- asm("mcr p15, 0, %0, c15, c2, 4" : : "r" (0));
-#if defined(CONFIG_DEBUG_MSM_UART)
-#ifdef CONFIG_MMU
- debug_ll_addr(&msm_io_desc[size - 1].pfn,
- &msm_io_desc[size - 1].virtual);
-#endif
- msm_io_desc[size - 1].pfn = __phys_to_pfn(msm_io_desc[size - 1].pfn);
-#endif
- iotable_init(msm_io_desc, size);
-}
-#endif
-
-#ifdef CONFIG_ARCH_QSD8X50
-static struct map_desc qsd8x50_io_desc[] __initdata = {
- MSM_DEVICE(VIC),
- MSM_CHIP_DEVICE(CSR, QSD8X50),
- MSM_DEVICE(DMOV),
- MSM_CHIP_DEVICE(GPIO1, QSD8X50),
- MSM_CHIP_DEVICE(GPIO2, QSD8X50),
- MSM_DEVICE(CLK_CTL),
- MSM_DEVICE(SIRC),
- MSM_DEVICE(SCPLL),
- MSM_DEVICE(AD5),
- MSM_DEVICE(MDC),
- {
- .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
- .pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
- .length = MSM_SHARED_RAM_SIZE,
- .type = MT_DEVICE,
- },
-};
-
-void __init msm_map_qsd8x50_io(void)
-{
- debug_ll_io_init();
- iotable_init(qsd8x50_io_desc, ARRAY_SIZE(qsd8x50_io_desc));
-}
-#endif /* CONFIG_ARCH_QSD8X50 */
-
-#ifdef CONFIG_ARCH_MSM7X30
-static struct map_desc msm7x30_io_desc[] __initdata = {
- MSM_DEVICE(VIC),
- MSM_CHIP_DEVICE(CSR, MSM7X30),
- MSM_DEVICE(DMOV),
- MSM_CHIP_DEVICE(GPIO1, MSM7X30),
- MSM_CHIP_DEVICE(GPIO2, MSM7X30),
- MSM_DEVICE(CLK_CTL),
- MSM_DEVICE(CLK_CTL_SH2),
- MSM_DEVICE(AD5),
- MSM_DEVICE(MDC),
- MSM_DEVICE(ACC),
- MSM_DEVICE(SAW),
- MSM_DEVICE(GCC),
- MSM_DEVICE(TCSR),
- {
- .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
- .pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
- .length = MSM_SHARED_RAM_SIZE,
- .type = MT_DEVICE,
- },
-};
-
-void __init msm_map_msm7x30_io(void)
-{
- debug_ll_io_init();
- iotable_init(msm7x30_io_desc, ARRAY_SIZE(msm7x30_io_desc));
-}
-#endif /* CONFIG_ARCH_MSM7X30 */
-
-#ifdef CONFIG_ARCH_MSM7X00A
-void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size,
- unsigned int mtype, void *caller)
-{
- if (mtype == MT_DEVICE) {
- /* The peripherals in the 88000000 - D0000000 range
- * are only accessible by type MT_DEVICE_NONSHARED.
- * Adjust mtype as necessary to make this "just work."
- */
- if ((phys_addr >= 0x88000000) && (phys_addr < 0xD0000000))
- mtype = MT_DEVICE_NONSHARED;
- }
-
- return __arm_ioremap_caller(phys_addr, size, mtype, caller);
-}
-#endif
diff --git a/arch/arm/mach-msm/irq-vic.c b/arch/arm/mach-msm/irq-vic.c
deleted file mode 100644
index 1b54f807c2d0..000000000000
--- a/arch/arm/mach-msm/irq-vic.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/timer.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <asm/cacheflush.h>
-
-#include <mach/hardware.h>
-
-#include <mach/msm_iomap.h>
-
-#include "smd_private.h"
-
-enum {
- IRQ_DEBUG_SLEEP_INT_TRIGGER = 1U << 0,
- IRQ_DEBUG_SLEEP_INT = 1U << 1,
- IRQ_DEBUG_SLEEP_ABORT = 1U << 2,
- IRQ_DEBUG_SLEEP = 1U << 3,
- IRQ_DEBUG_SLEEP_REQUEST = 1U << 4,
-};
-static int msm_irq_debug_mask;
-module_param_named(debug_mask, msm_irq_debug_mask, int,
- S_IRUGO | S_IWUSR | S_IWGRP);
-
-#define VIC_REG(off) (MSM_VIC_BASE + (off))
-#define VIC_INT_TO_REG_ADDR(base, irq) (base + (irq / 32) * 4)
-#define VIC_INT_TO_REG_INDEX(irq) ((irq >> 5) & 3)
-
-#define VIC_INT_SELECT0 VIC_REG(0x0000) /* 1: FIQ, 0: IRQ */
-#define VIC_INT_SELECT1 VIC_REG(0x0004) /* 1: FIQ, 0: IRQ */
-#define VIC_INT_SELECT2 VIC_REG(0x0008) /* 1: FIQ, 0: IRQ */
-#define VIC_INT_SELECT3 VIC_REG(0x000C) /* 1: FIQ, 0: IRQ */
-#define VIC_INT_EN0 VIC_REG(0x0010)
-#define VIC_INT_EN1 VIC_REG(0x0014)
-#define VIC_INT_EN2 VIC_REG(0x0018)
-#define VIC_INT_EN3 VIC_REG(0x001C)
-#define VIC_INT_ENCLEAR0 VIC_REG(0x0020)
-#define VIC_INT_ENCLEAR1 VIC_REG(0x0024)
-#define VIC_INT_ENCLEAR2 VIC_REG(0x0028)
-#define VIC_INT_ENCLEAR3 VIC_REG(0x002C)
-#define VIC_INT_ENSET0 VIC_REG(0x0030)
-#define VIC_INT_ENSET1 VIC_REG(0x0034)
-#define VIC_INT_ENSET2 VIC_REG(0x0038)
-#define VIC_INT_ENSET3 VIC_REG(0x003C)
-#define VIC_INT_TYPE0 VIC_REG(0x0040) /* 1: EDGE, 0: LEVEL */
-#define VIC_INT_TYPE1 VIC_REG(0x0044) /* 1: EDGE, 0: LEVEL */
-#define VIC_INT_TYPE2 VIC_REG(0x0048) /* 1: EDGE, 0: LEVEL */
-#define VIC_INT_TYPE3 VIC_REG(0x004C) /* 1: EDGE, 0: LEVEL */
-#define VIC_INT_POLARITY0 VIC_REG(0x0050) /* 1: NEG, 0: POS */
-#define VIC_INT_POLARITY1 VIC_REG(0x0054) /* 1: NEG, 0: POS */
-#define VIC_INT_POLARITY2 VIC_REG(0x0058) /* 1: NEG, 0: POS */
-#define VIC_INT_POLARITY3 VIC_REG(0x005C) /* 1: NEG, 0: POS */
-#define VIC_NO_PEND_VAL VIC_REG(0x0060)
-
-#if defined(CONFIG_ARCH_MSM_SCORPION)
-#define VIC_NO_PEND_VAL_FIQ VIC_REG(0x0064)
-#define VIC_INT_MASTEREN VIC_REG(0x0068) /* 1: IRQ, 2: FIQ */
-#define VIC_CONFIG VIC_REG(0x006C) /* 1: USE SC VIC */
-#else
-#define VIC_INT_MASTEREN VIC_REG(0x0064) /* 1: IRQ, 2: FIQ */
-#define VIC_PROTECTION VIC_REG(0x006C) /* 1: ENABLE */
-#define VIC_CONFIG VIC_REG(0x0068) /* 1: USE ARM1136 VIC */
-#endif
-
-#define VIC_IRQ_STATUS0 VIC_REG(0x0080)
-#define VIC_IRQ_STATUS1 VIC_REG(0x0084)
-#define VIC_IRQ_STATUS2 VIC_REG(0x0088)
-#define VIC_IRQ_STATUS3 VIC_REG(0x008C)
-#define VIC_FIQ_STATUS0 VIC_REG(0x0090)
-#define VIC_FIQ_STATUS1 VIC_REG(0x0094)
-#define VIC_FIQ_STATUS2 VIC_REG(0x0098)
-#define VIC_FIQ_STATUS3 VIC_REG(0x009C)
-#define VIC_RAW_STATUS0 VIC_REG(0x00A0)
-#define VIC_RAW_STATUS1 VIC_REG(0x00A4)
-#define VIC_RAW_STATUS2 VIC_REG(0x00A8)
-#define VIC_RAW_STATUS3 VIC_REG(0x00AC)
-#define VIC_INT_CLEAR0 VIC_REG(0x00B0)
-#define VIC_INT_CLEAR1 VIC_REG(0x00B4)
-#define VIC_INT_CLEAR2 VIC_REG(0x00B8)
-#define VIC_INT_CLEAR3 VIC_REG(0x00BC)
-#define VIC_SOFTINT0 VIC_REG(0x00C0)
-#define VIC_SOFTINT1 VIC_REG(0x00C4)
-#define VIC_SOFTINT2 VIC_REG(0x00C8)
-#define VIC_SOFTINT3 VIC_REG(0x00CC)
-#define VIC_IRQ_VEC_RD VIC_REG(0x00D0) /* pending int # */
-#define VIC_IRQ_VEC_PEND_RD VIC_REG(0x00D4) /* pending vector addr */
-#define VIC_IRQ_VEC_WR VIC_REG(0x00D8)
-
-#if defined(CONFIG_ARCH_MSM_SCORPION)
-#define VIC_FIQ_VEC_RD VIC_REG(0x00DC)
-#define VIC_FIQ_VEC_PEND_RD VIC_REG(0x00E0)
-#define VIC_FIQ_VEC_WR VIC_REG(0x00E4)
-#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E8)
-#define VIC_IRQ_IN_STACK VIC_REG(0x00EC)
-#define VIC_FIQ_IN_SERVICE VIC_REG(0x00F0)
-#define VIC_FIQ_IN_STACK VIC_REG(0x00F4)
-#define VIC_TEST_BUS_SEL VIC_REG(0x00F8)
-#define VIC_IRQ_CTRL_CONFIG VIC_REG(0x00FC)
-#else
-#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E0)
-#define VIC_IRQ_IN_STACK VIC_REG(0x00E4)
-#define VIC_TEST_BUS_SEL VIC_REG(0x00E8)
-#endif
-
-#define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4))
-#define VIC_VECTADDR(n) VIC_REG(0x0400+((n) * 4))
-
-#if defined(CONFIG_ARCH_MSM7X30)
-#define VIC_NUM_REGS 4
-#else
-#define VIC_NUM_REGS 2
-#endif
-
-#if VIC_NUM_REGS == 2
-#define DPRINT_REGS(base_reg, format, ...) \
- printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \
- readl(base_reg ## 0), readl(base_reg ## 1))
-#define DPRINT_ARRAY(array, format, ...) \
- printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \
- array[0], array[1])
-#elif VIC_NUM_REGS == 4
-#define DPRINT_REGS(base_reg, format, ...) \
- printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \
- readl(base_reg ## 0), readl(base_reg ## 1), \
- readl(base_reg ## 2), readl(base_reg ## 3))
-#define DPRINT_ARRAY(array, format, ...) \
- printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \
- array[0], array[1], \
- array[2], array[3])
-#else
-#error "VIC_NUM_REGS set to illegal value"
-#endif
-
-static uint32_t msm_irq_smsm_wake_enable[2];
-static struct {
- uint32_t int_en[2];
- uint32_t int_type;
- uint32_t int_polarity;
- uint32_t int_select;
-} msm_irq_shadow_reg[VIC_NUM_REGS];
-static uint32_t msm_irq_idle_disable[VIC_NUM_REGS];
-
-#define SMSM_FAKE_IRQ (0xff)
-static uint8_t msm_irq_to_smsm[NR_IRQS] = {
- [INT_MDDI_EXT] = 1,
- [INT_MDDI_PRI] = 2,
- [INT_MDDI_CLIENT] = 3,
- [INT_USB_OTG] = 4,
-
- [INT_PWB_I2C] = 5,
- [INT_SDC1_0] = 6,
- [INT_SDC1_1] = 7,
- [INT_SDC2_0] = 8,
-
- [INT_SDC2_1] = 9,
- [INT_ADSP_A9_A11] = 10,
- [INT_UART1] = 11,
- [INT_UART2] = 12,
-
- [INT_UART3] = 13,
- [INT_UART1_RX] = 14,
- [INT_UART2_RX] = 15,
- [INT_UART3_RX] = 16,
-
- [INT_UART1DM_IRQ] = 17,
- [INT_UART1DM_RX] = 18,
- [INT_KEYSENSE] = 19,
-#if !defined(CONFIG_ARCH_MSM7X30)
- [INT_AD_HSSD] = 20,
-#endif
-
- [INT_NAND_WR_ER_DONE] = 21,
- [INT_NAND_OP_DONE] = 22,
- [INT_TCHSCRN1] = 23,
- [INT_TCHSCRN2] = 24,
-
- [INT_TCHSCRN_SSBI] = 25,
- [INT_USB_HS] = 26,
- [INT_UART2DM_RX] = 27,
- [INT_UART2DM_IRQ] = 28,
-
- [INT_SDC4_1] = 29,
- [INT_SDC4_0] = 30,
- [INT_SDC3_1] = 31,
- [INT_SDC3_0] = 32,
-
- /* fake wakeup interrupts */
- [INT_GPIO_GROUP1] = SMSM_FAKE_IRQ,
- [INT_GPIO_GROUP2] = SMSM_FAKE_IRQ,
- [INT_A9_M2A_0] = SMSM_FAKE_IRQ,
- [INT_A9_M2A_1] = SMSM_FAKE_IRQ,
- [INT_A9_M2A_5] = SMSM_FAKE_IRQ,
- [INT_GP_TIMER_EXP] = SMSM_FAKE_IRQ,
- [INT_DEBUG_TIMER_EXP] = SMSM_FAKE_IRQ,
- [INT_ADSP_A11] = SMSM_FAKE_IRQ,
-#ifdef CONFIG_ARCH_QSD8X50
- [INT_SIRC_0] = SMSM_FAKE_IRQ,
- [INT_SIRC_1] = SMSM_FAKE_IRQ,
-#endif
-};
-
-static inline void msm_irq_write_all_regs(void __iomem *base, unsigned int val)
-{
- int i;
-
- for (i = 0; i < VIC_NUM_REGS; i++)
- writel(val, base + (i * 4));
-}
-
-static void msm_irq_ack(struct irq_data *d)
-{
- void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, d->irq);
- writel(1 << (d->irq & 31), reg);
-}
-
-static void msm_irq_mask(struct irq_data *d)
-{
- void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, d->irq);
- unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
- uint32_t mask = 1UL << (d->irq & 31);
- int smsm_irq = msm_irq_to_smsm[d->irq];
-
- msm_irq_shadow_reg[index].int_en[0] &= ~mask;
- writel(mask, reg);
- if (smsm_irq == 0)
- msm_irq_idle_disable[index] &= ~mask;
- else {
- mask = 1UL << (smsm_irq - 1);
- msm_irq_smsm_wake_enable[0] &= ~mask;
- }
-}
-
-static void msm_irq_unmask(struct irq_data *d)
-{
- void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, d->irq);
- unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
- uint32_t mask = 1UL << (d->irq & 31);
- int smsm_irq = msm_irq_to_smsm[d->irq];
-
- msm_irq_shadow_reg[index].int_en[0] |= mask;
- writel(mask, reg);
-
- if (smsm_irq == 0)
- msm_irq_idle_disable[index] |= mask;
- else {
- mask = 1UL << (smsm_irq - 1);
- msm_irq_smsm_wake_enable[0] |= mask;
- }
-}
-
-static int msm_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
- uint32_t mask = 1UL << (d->irq & 31);
- int smsm_irq = msm_irq_to_smsm[d->irq];
-
- if (smsm_irq == 0) {
- printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", d->irq);
- return -EINVAL;
- }
- if (on)
- msm_irq_shadow_reg[index].int_en[1] |= mask;
- else
- msm_irq_shadow_reg[index].int_en[1] &= ~mask;
-
- if (smsm_irq == SMSM_FAKE_IRQ)
- return 0;
-
- mask = 1UL << (smsm_irq - 1);
- if (on)
- msm_irq_smsm_wake_enable[1] |= mask;
- else
- msm_irq_smsm_wake_enable[1] &= ~mask;
- return 0;
-}
-
-static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, d->irq);
- void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, d->irq);
- unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
- int b = 1 << (d->irq & 31);
- uint32_t polarity;
- uint32_t type;
-
- polarity = msm_irq_shadow_reg[index].int_polarity;
- if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
- polarity |= b;
- if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
- polarity &= ~b;
- writel(polarity, preg);
- msm_irq_shadow_reg[index].int_polarity = polarity;
-
- type = msm_irq_shadow_reg[index].int_type;
- if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- type |= b;
- __irq_set_handler_locked(d->irq, handle_edge_irq);
- }
- if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
- type &= ~b;
- __irq_set_handler_locked(d->irq, handle_level_irq);
- }
- writel(type, treg);
- msm_irq_shadow_reg[index].int_type = type;
- return 0;
-}
-
-static struct irq_chip msm_irq_chip = {
- .name = "msm",
- .irq_disable = msm_irq_mask,
- .irq_ack = msm_irq_ack,
- .irq_mask = msm_irq_mask,
- .irq_unmask = msm_irq_unmask,
- .irq_set_wake = msm_irq_set_wake,
- .irq_set_type = msm_irq_set_type,
-};
-
-void __init msm_init_irq(void)
-{
- unsigned n;
-
- /* select level interrupts */
- msm_irq_write_all_regs(VIC_INT_TYPE0, 0);
-
- /* select highlevel interrupts */
- msm_irq_write_all_regs(VIC_INT_POLARITY0, 0);
-
- /* select IRQ for all INTs */
- msm_irq_write_all_regs(VIC_INT_SELECT0, 0);
-
- /* disable all INTs */
- msm_irq_write_all_regs(VIC_INT_EN0, 0);
-
- /* don't use vic */
- writel(0, VIC_CONFIG);
-
- /* enable interrupt controller */
- writel(3, VIC_INT_MASTEREN);
-
- for (n = 0; n < NR_MSM_IRQS; n++) {
- irq_set_chip_and_handler(n, &msm_irq_chip, handle_level_irq);
- set_irq_flags(n, IRQF_VALID);
- }
-}
diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c
deleted file mode 100644
index ea514be390c6..000000000000
--- a/arch/arm/mach-msm/irq.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/* linux/arch/arm/mach-msm/irq.c
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/timer.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-
-#include <mach/msm_iomap.h>
-
-#define VIC_REG(off) (MSM_VIC_BASE + (off))
-
-#define VIC_INT_SELECT0 VIC_REG(0x0000) /* 1: FIQ, 0: IRQ */
-#define VIC_INT_SELECT1 VIC_REG(0x0004) /* 1: FIQ, 0: IRQ */
-#define VIC_INT_EN0 VIC_REG(0x0010)
-#define VIC_INT_EN1 VIC_REG(0x0014)
-#define VIC_INT_ENCLEAR0 VIC_REG(0x0020)
-#define VIC_INT_ENCLEAR1 VIC_REG(0x0024)
-#define VIC_INT_ENSET0 VIC_REG(0x0030)
-#define VIC_INT_ENSET1 VIC_REG(0x0034)
-#define VIC_INT_TYPE0 VIC_REG(0x0040) /* 1: EDGE, 0: LEVEL */
-#define VIC_INT_TYPE1 VIC_REG(0x0044) /* 1: EDGE, 0: LEVEL */
-#define VIC_INT_POLARITY0 VIC_REG(0x0050) /* 1: NEG, 0: POS */
-#define VIC_INT_POLARITY1 VIC_REG(0x0054) /* 1: NEG, 0: POS */
-#define VIC_NO_PEND_VAL VIC_REG(0x0060)
-#define VIC_INT_MASTEREN VIC_REG(0x0064) /* 1: IRQ, 2: FIQ */
-#define VIC_PROTECTION VIC_REG(0x006C) /* 1: ENABLE */
-#define VIC_CONFIG VIC_REG(0x0068) /* 1: USE ARM1136 VIC */
-#define VIC_IRQ_STATUS0 VIC_REG(0x0080)
-#define VIC_IRQ_STATUS1 VIC_REG(0x0084)
-#define VIC_FIQ_STATUS0 VIC_REG(0x0090)
-#define VIC_FIQ_STATUS1 VIC_REG(0x0094)
-#define VIC_RAW_STATUS0 VIC_REG(0x00A0)
-#define VIC_RAW_STATUS1 VIC_REG(0x00A4)
-#define VIC_INT_CLEAR0 VIC_REG(0x00B0)
-#define VIC_INT_CLEAR1 VIC_REG(0x00B4)
-#define VIC_SOFTINT0 VIC_REG(0x00C0)
-#define VIC_SOFTINT1 VIC_REG(0x00C4)
-#define VIC_IRQ_VEC_RD VIC_REG(0x00D0) /* pending int # */
-#define VIC_IRQ_VEC_PEND_RD VIC_REG(0x00D4) /* pending vector addr */
-#define VIC_IRQ_VEC_WR VIC_REG(0x00D8)
-#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E0)
-#define VIC_IRQ_IN_STACK VIC_REG(0x00E4)
-#define VIC_TEST_BUS_SEL VIC_REG(0x00E8)
-
-#define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4))
-#define VIC_VECTADDR(n) VIC_REG(0x0400+((n) * 4))
-
-static void msm_irq_ack(struct irq_data *d)
-{
- void __iomem *reg = VIC_INT_CLEAR0 + ((d->irq & 32) ? 4 : 0);
- writel(1 << (d->irq & 31), reg);
-}
-
-static void msm_irq_mask(struct irq_data *d)
-{
- void __iomem *reg = VIC_INT_ENCLEAR0 + ((d->irq & 32) ? 4 : 0);
- writel(1 << (d->irq & 31), reg);
-}
-
-static void msm_irq_unmask(struct irq_data *d)
-{
- void __iomem *reg = VIC_INT_ENSET0 + ((d->irq & 32) ? 4 : 0);
- writel(1 << (d->irq & 31), reg);
-}
-
-static int msm_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- return -EINVAL;
-}
-
-static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- void __iomem *treg = VIC_INT_TYPE0 + ((d->irq & 32) ? 4 : 0);
- void __iomem *preg = VIC_INT_POLARITY0 + ((d->irq & 32) ? 4 : 0);
- int b = 1 << (d->irq & 31);
-
- if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
- writel(readl(preg) | b, preg);
- if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
- writel(readl(preg) & (~b), preg);
-
- if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- writel(readl(treg) | b, treg);
- __irq_set_handler_locked(d->irq, handle_edge_irq);
- }
- if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
- writel(readl(treg) & (~b), treg);
- __irq_set_handler_locked(d->irq, handle_level_irq);
- }
- return 0;
-}
-
-static struct irq_chip msm_irq_chip = {
- .name = "msm",
- .irq_ack = msm_irq_ack,
- .irq_mask = msm_irq_mask,
- .irq_unmask = msm_irq_unmask,
- .irq_set_wake = msm_irq_set_wake,
- .irq_set_type = msm_irq_set_type,
-};
-
-void __init msm_init_irq(void)
-{
- unsigned n;
-
- /* select level interrupts */
- writel(0, VIC_INT_TYPE0);
- writel(0, VIC_INT_TYPE1);
-
- /* select highlevel interrupts */
- writel(0, VIC_INT_POLARITY0);
- writel(0, VIC_INT_POLARITY1);
-
- /* select IRQ for all INTs */
- writel(0, VIC_INT_SELECT0);
- writel(0, VIC_INT_SELECT1);
-
- /* disable all INTs */
- writel(0, VIC_INT_EN0);
- writel(0, VIC_INT_EN1);
-
- /* don't use 1136 vic */
- writel(0, VIC_CONFIG);
-
- /* enable interrupt controller */
- writel(1, VIC_INT_MASTEREN);
-
- for (n = 0; n < NR_MSM_IRQS; n++) {
- irq_set_chip_and_handler(n, &msm_irq_chip, handle_level_irq);
- set_irq_flags(n, IRQF_VALID);
- }
-}
diff --git a/arch/arm/mach-msm/last_radio_log.c b/arch/arm/mach-msm/last_radio_log.c
deleted file mode 100644
index 9c392a29fc7e..000000000000
--- a/arch/arm/mach-msm/last_radio_log.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* arch/arm/mach-msm/last_radio_log.c
- *
- * Extract the log from a modem crash though SMEM
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/uaccess.h>
-
-#include "smd_private.h"
-
-static void *radio_log_base;
-static size_t radio_log_size;
-
-extern void *smem_item(unsigned id, unsigned *size);
-
-static ssize_t last_radio_log_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
-{
- return simple_read_from_buffer(buf, len, offset,
- radio_log_base, radio_log_size);
-}
-
-static struct file_operations last_radio_log_fops = {
- .read = last_radio_log_read,
- .llseek = default_llseek,
-};
-
-void msm_init_last_radio_log(struct module *owner)
-{
- struct proc_dir_entry *entry;
-
- if (last_radio_log_fops.owner) {
- pr_err("%s: already claimed\n", __func__);
- return;
- }
-
- radio_log_base = smem_item(SMEM_CLKREGIM_BSP, &radio_log_size);
- if (!radio_log_base) {
- pr_err("%s: could not retrieve SMEM_CLKREGIM_BSP\n", __func__);
- return;
- }
-
- entry = proc_create("last_radio_log", S_IRUGO, NULL,
- &last_radio_log_fops);
- if (!entry) {
- pr_err("%s: could not create proc entry for radio log\n",
- __func__);
- return;
- }
-
- pr_err("%s: last radio log is %d bytes long\n", __func__,
- radio_log_size);
- last_radio_log_fops.owner = owner;
- proc_set_size(entry, radio_log_size);
-}
-EXPORT_SYMBOL(msm_init_last_radio_log);
diff --git a/arch/arm/mach-msm/proc_comm.c b/arch/arm/mach-msm/proc_comm.c
deleted file mode 100644
index 507f5ca80697..000000000000
--- a/arch/arm/mach-msm/proc_comm.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/* arch/arm/mach-msm/proc_comm.c
- *
- * Copyright (C) 2007-2008 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/spinlock.h>
-#include <mach/msm_iomap.h>
-
-#include "proc_comm.h"
-
-static inline void msm_a2m_int(uint32_t irq)
-{
-#if defined(CONFIG_ARCH_MSM7X30)
- writel(1 << irq, MSM_GCC_BASE + 0x8);
-#else
- writel(1, MSM_CSR_BASE + 0x400 + (irq * 4));
-#endif
-}
-
-static inline void notify_other_proc_comm(void)
-{
- msm_a2m_int(6);
-}
-
-#define APP_COMMAND 0x00
-#define APP_STATUS 0x04
-#define APP_DATA1 0x08
-#define APP_DATA2 0x0C
-
-#define MDM_COMMAND 0x10
-#define MDM_STATUS 0x14
-#define MDM_DATA1 0x18
-#define MDM_DATA2 0x1C
-
-static DEFINE_SPINLOCK(proc_comm_lock);
-
-/* The higher level SMD support will install this to
- * provide a way to check for and handle modem restart.
- */
-int (*msm_check_for_modem_crash)(void);
-
-/* Poll for a state change, checking for possible
- * modem crashes along the way (so we don't wait
- * forever while the ARM9 is blowing up).
- *
- * Return an error in the event of a modem crash and
- * restart so the msm_proc_comm() routine can restart
- * the operation from the beginning.
- */
-static int proc_comm_wait_for(void __iomem *addr, unsigned value)
-{
- for (;;) {
- if (readl(addr) == value)
- return 0;
-
- if (msm_check_for_modem_crash)
- if (msm_check_for_modem_crash())
- return -EAGAIN;
- }
-}
-
-int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2)
-{
- void __iomem *base = MSM_SHARED_RAM_BASE;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&proc_comm_lock, flags);
-
- for (;;) {
- if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY))
- continue;
-
- writel(cmd, base + APP_COMMAND);
- writel(data1 ? *data1 : 0, base + APP_DATA1);
- writel(data2 ? *data2 : 0, base + APP_DATA2);
-
- notify_other_proc_comm();
-
- if (proc_comm_wait_for(base + APP_COMMAND, PCOM_CMD_DONE))
- continue;
-
- if (readl(base + APP_STATUS) != PCOM_CMD_FAIL) {
- if (data1)
- *data1 = readl(base + APP_DATA1);
- if (data2)
- *data2 = readl(base + APP_DATA2);
- ret = 0;
- } else {
- ret = -EIO;
- }
- break;
- }
-
- writel(PCOM_CMD_IDLE, base + APP_COMMAND);
-
- spin_unlock_irqrestore(&proc_comm_lock, flags);
-
- return ret;
-}
-
-/*
- * We need to wait for the ARM9 to at least partially boot
- * up before we can continue. Since the ARM9 does resource
- * allocation, if we dont' wait we could end up crashing or in
- * and unknown state. This function should be called early to
- * wait on the ARM9.
- */
-void proc_comm_boot_wait(void)
-{
- void __iomem *base = MSM_SHARED_RAM_BASE;
-
- proc_comm_wait_for(base + MDM_STATUS, PCOM_READY);
-
-}
diff --git a/arch/arm/mach-msm/proc_comm.h b/arch/arm/mach-msm/proc_comm.h
deleted file mode 100644
index e8d043a0e990..000000000000
--- a/arch/arm/mach-msm/proc_comm.h
+++ /dev/null
@@ -1,258 +0,0 @@
-/* arch/arm/mach-msm/proc_comm.h
- *
- * Copyright (c) 2007 QUALCOMM Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ARCH_ARM_MACH_MSM_PROC_COMM_H_
-#define _ARCH_ARM_MACH_MSM_PROC_COMM_H_
-
-#include <linux/init.h>
-
-enum {
- PCOM_CMD_IDLE = 0x0,
- PCOM_CMD_DONE,
- PCOM_RESET_APPS,
- PCOM_RESET_CHIP,
- PCOM_CONFIG_NAND_MPU,
- PCOM_CONFIG_USB_CLKS,
- PCOM_GET_POWER_ON_STATUS,
- PCOM_GET_WAKE_UP_STATUS,
- PCOM_GET_BATT_LEVEL,
- PCOM_CHG_IS_CHARGING,
- PCOM_POWER_DOWN,
- PCOM_USB_PIN_CONFIG,
- PCOM_USB_PIN_SEL,
- PCOM_SET_RTC_ALARM,
- PCOM_NV_READ,
- PCOM_NV_WRITE,
- PCOM_GET_UUID_HIGH,
- PCOM_GET_UUID_LOW,
- PCOM_GET_HW_ENTROPY,
- PCOM_RPC_GPIO_TLMM_CONFIG_REMOTE,
- PCOM_CLKCTL_RPC_ENABLE,
- PCOM_CLKCTL_RPC_DISABLE,
- PCOM_CLKCTL_RPC_RESET,
- PCOM_CLKCTL_RPC_SET_FLAGS,
- PCOM_CLKCTL_RPC_SET_RATE,
- PCOM_CLKCTL_RPC_MIN_RATE,
- PCOM_CLKCTL_RPC_MAX_RATE,
- PCOM_CLKCTL_RPC_RATE,
- PCOM_CLKCTL_RPC_PLL_REQUEST,
- PCOM_CLKCTL_RPC_ENABLED,
- PCOM_VREG_SWITCH,
- PCOM_VREG_SET_LEVEL,
- PCOM_GPIO_TLMM_CONFIG_GROUP,
- PCOM_GPIO_TLMM_UNCONFIG_GROUP,
- PCOM_NV_WRITE_BYTES_4_7,
- PCOM_CONFIG_DISP,
- PCOM_GET_FTM_BOOT_COUNT,
- PCOM_RPC_GPIO_TLMM_CONFIG_EX,
- PCOM_PM_MPP_CONFIG,
- PCOM_GPIO_IN,
- PCOM_GPIO_OUT,
- PCOM_RESET_MODEM,
- PCOM_RESET_CHIP_IMM,
- PCOM_PM_VID_EN,
- PCOM_VREG_PULLDOWN,
- PCOM_GET_MODEM_VERSION,
- PCOM_CLK_REGIME_SEC_RESET,
- PCOM_CLK_REGIME_SEC_RESET_ASSERT,
- PCOM_CLK_REGIME_SEC_RESET_DEASSERT,
- PCOM_CLK_REGIME_SEC_PLL_REQUEST_WRP,
- PCOM_CLK_REGIME_SEC_ENABLE,
- PCOM_CLK_REGIME_SEC_DISABLE,
- PCOM_CLK_REGIME_SEC_IS_ON,
- PCOM_CLK_REGIME_SEC_SEL_CLK_INV,
- PCOM_CLK_REGIME_SEC_SEL_CLK_SRC,
- PCOM_CLK_REGIME_SEC_SEL_CLK_DIV,
- PCOM_CLK_REGIME_SEC_ICODEC_CLK_ENABLE,
- PCOM_CLK_REGIME_SEC_ICODEC_CLK_DISABLE,
- PCOM_CLK_REGIME_SEC_SEL_SPEED,
- PCOM_CLK_REGIME_SEC_CONFIG_GP_CLK_WRP,
- PCOM_CLK_REGIME_SEC_CONFIG_MDH_CLK_WRP,
- PCOM_CLK_REGIME_SEC_USB_XTAL_ON,
- PCOM_CLK_REGIME_SEC_USB_XTAL_OFF,
- PCOM_CLK_REGIME_SEC_SET_QDSP_DME_MODE,
- PCOM_CLK_REGIME_SEC_SWITCH_ADSP_CLK,
- PCOM_CLK_REGIME_SEC_GET_MAX_ADSP_CLK_KHZ,
- PCOM_CLK_REGIME_SEC_GET_I2C_CLK_KHZ,
- PCOM_CLK_REGIME_SEC_MSM_GET_CLK_FREQ_KHZ,
- PCOM_CLK_REGIME_SEC_SEL_VFE_SRC,
- PCOM_CLK_REGIME_SEC_MSM_SEL_CAMCLK,
- PCOM_CLK_REGIME_SEC_MSM_SEL_LCDCLK,
- PCOM_CLK_REGIME_SEC_VFE_RAIL_OFF,
- PCOM_CLK_REGIME_SEC_VFE_RAIL_ON,
- PCOM_CLK_REGIME_SEC_GRP_RAIL_OFF,
- PCOM_CLK_REGIME_SEC_GRP_RAIL_ON,
- PCOM_CLK_REGIME_SEC_VDC_RAIL_OFF,
- PCOM_CLK_REGIME_SEC_VDC_RAIL_ON,
- PCOM_CLK_REGIME_SEC_LCD_CTRL,
- PCOM_CLK_REGIME_SEC_REGISTER_FOR_CPU_RESOURCE,
- PCOM_CLK_REGIME_SEC_DEREGISTER_FOR_CPU_RESOURCE,
- PCOM_CLK_REGIME_SEC_RESOURCE_REQUEST_WRP,
- PCOM_CLK_REGIME_MSM_SEC_SEL_CLK_OWNER,
- PCOM_CLK_REGIME_SEC_DEVMAN_REQUEST_WRP,
- PCOM_GPIO_CONFIG,
- PCOM_GPIO_CONFIGURE_GROUP,
- PCOM_GPIO_TLMM_SET_PORT,
- PCOM_GPIO_TLMM_CONFIG_EX,
- PCOM_SET_FTM_BOOT_COUNT,
- PCOM_RESERVED0,
- PCOM_RESERVED1,
- PCOM_CUSTOMER_CMD1,
- PCOM_CUSTOMER_CMD2,
- PCOM_CUSTOMER_CMD3,
- PCOM_CLK_REGIME_ENTER_APPSBL_CHG_MODE,
- PCOM_CLK_REGIME_EXIT_APPSBL_CHG_MODE,
- PCOM_CLK_REGIME_SEC_RAIL_DISABLE,
- PCOM_CLK_REGIME_SEC_RAIL_ENABLE,
- PCOM_CLK_REGIME_SEC_RAIL_CONTROL,
- PCOM_SET_SW_WATCHDOG_STATE,
- PCOM_PM_MPP_CONFIG_DIGITAL_INPUT,
- PCOM_PM_MPP_CONFIG_I_SINK,
- PCOM_RESERVED_101,
- PCOM_MSM_HSUSB_PHY_RESET,
- PCOM_GET_BATT_MV_LEVEL,
- PCOM_CHG_USB_IS_PC_CONNECTED,
- PCOM_CHG_USB_IS_CHARGER_CONNECTED,
- PCOM_CHG_USB_IS_DISCONNECTED,
- PCOM_CHG_USB_IS_AVAILABLE,
- PCOM_CLK_REGIME_SEC_MSM_SEL_FREQ,
- PCOM_CLK_REGIME_SEC_SET_PCLK_AXI_POLICY,
- PCOM_CLKCTL_RPC_RESET_ASSERT,
- PCOM_CLKCTL_RPC_RESET_DEASSERT,
- PCOM_CLKCTL_RPC_RAIL_ON,
- PCOM_CLKCTL_RPC_RAIL_OFF,
- PCOM_CLKCTL_RPC_RAIL_ENABLE,
- PCOM_CLKCTL_RPC_RAIL_DISABLE,
- PCOM_CLKCTL_RPC_RAIL_CONTROL,
- PCOM_CLKCTL_RPC_MIN_MSMC1,
- PCOM_NUM_CMDS,
-};
-
-enum {
- PCOM_INVALID_STATUS = 0x0,
- PCOM_READY,
- PCOM_CMD_RUNNING,
- PCOM_CMD_SUCCESS,
- PCOM_CMD_FAIL,
- PCOM_CMD_FAIL_FALSE_RETURNED,
- PCOM_CMD_FAIL_CMD_OUT_OF_BOUNDS_SERVER,
- PCOM_CMD_FAIL_CMD_OUT_OF_BOUNDS_CLIENT,
- PCOM_CMD_FAIL_CMD_UNREGISTERED,
- PCOM_CMD_FAIL_CMD_LOCKED,
- PCOM_CMD_FAIL_SERVER_NOT_YET_READY,
- PCOM_CMD_FAIL_BAD_DESTINATION,
- PCOM_CMD_FAIL_SERVER_RESET,
- PCOM_CMD_FAIL_SMSM_NOT_INIT,
- PCOM_CMD_FAIL_PROC_COMM_BUSY,
- PCOM_CMD_FAIL_PROC_COMM_NOT_INIT,
-
-};
-
-/* List of VREGs that support the Pull Down Resistor setting. */
-enum vreg_pdown_id {
- PM_VREG_PDOWN_MSMA_ID,
- PM_VREG_PDOWN_MSMP_ID,
- PM_VREG_PDOWN_MSME1_ID, /* Not supported in Panoramix */
- PM_VREG_PDOWN_MSMC1_ID, /* Not supported in PM6620 */
- PM_VREG_PDOWN_MSMC2_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_GP3_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_MSME2_ID, /* Supported in PM7500 and Panoramix only */
- PM_VREG_PDOWN_GP4_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_GP1_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_TCXO_ID,
- PM_VREG_PDOWN_PA_ID,
- PM_VREG_PDOWN_RFTX_ID,
- PM_VREG_PDOWN_RFRX1_ID,
- PM_VREG_PDOWN_RFRX2_ID,
- PM_VREG_PDOWN_SYNT_ID,
- PM_VREG_PDOWN_WLAN_ID,
- PM_VREG_PDOWN_USB_ID,
- PM_VREG_PDOWN_MMC_ID,
- PM_VREG_PDOWN_RUIM_ID,
- PM_VREG_PDOWN_MSMC0_ID, /* Supported in PM6610 only */
- PM_VREG_PDOWN_GP2_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_GP5_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_GP6_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_RF_ID,
- PM_VREG_PDOWN_RF_VCO_ID,
- PM_VREG_PDOWN_MPLL_ID,
- PM_VREG_PDOWN_S2_ID,
- PM_VREG_PDOWN_S3_ID,
- PM_VREG_PDOWN_RFUBM_ID,
-
- /* new for HAN */
- PM_VREG_PDOWN_RF1_ID,
- PM_VREG_PDOWN_RF2_ID,
- PM_VREG_PDOWN_RFA_ID,
- PM_VREG_PDOWN_CDC2_ID,
- PM_VREG_PDOWN_RFTX2_ID,
- PM_VREG_PDOWN_USIM_ID,
- PM_VREG_PDOWN_USB2P6_ID,
- PM_VREG_PDOWN_USB3P3_ID,
- PM_VREG_PDOWN_INVALID_ID,
-
- /* backward compatible enums only */
- PM_VREG_PDOWN_CAM_ID = PM_VREG_PDOWN_GP1_ID,
- PM_VREG_PDOWN_MDDI_ID = PM_VREG_PDOWN_GP2_ID,
- PM_VREG_PDOWN_RUIM2_ID = PM_VREG_PDOWN_GP3_ID,
- PM_VREG_PDOWN_AUX_ID = PM_VREG_PDOWN_GP4_ID,
- PM_VREG_PDOWN_AUX2_ID = PM_VREG_PDOWN_GP5_ID,
- PM_VREG_PDOWN_BT_ID = PM_VREG_PDOWN_GP6_ID,
-
- PM_VREG_PDOWN_MSME_ID = PM_VREG_PDOWN_MSME1_ID,
- PM_VREG_PDOWN_MSMC_ID = PM_VREG_PDOWN_MSMC1_ID,
- PM_VREG_PDOWN_RFA1_ID = PM_VREG_PDOWN_RFRX2_ID,
- PM_VREG_PDOWN_RFA2_ID = PM_VREG_PDOWN_RFTX2_ID,
- PM_VREG_PDOWN_XO_ID = PM_VREG_PDOWN_TCXO_ID
-};
-
-enum {
- PCOM_CLKRGM_APPS_RESET_USB_PHY = 34,
- PCOM_CLKRGM_APPS_RESET_USBH = 37,
-};
-
-/* gpio info for PCOM_RPC_GPIO_TLMM_CONFIG_EX */
-
-#define GPIO_ENABLE 0
-#define GPIO_DISABLE 1
-
-#define GPIO_INPUT 0
-#define GPIO_OUTPUT 1
-
-#define GPIO_NO_PULL 0
-#define GPIO_PULL_DOWN 1
-#define GPIO_KEEPER 2
-#define GPIO_PULL_UP 3
-
-#define GPIO_2MA 0
-#define GPIO_4MA 1
-#define GPIO_6MA 2
-#define GPIO_8MA 3
-#define GPIO_10MA 4
-#define GPIO_12MA 5
-#define GPIO_14MA 6
-#define GPIO_16MA 7
-
-#define PCOM_GPIO_CFG(gpio, func, dir, pull, drvstr) \
- ((((gpio) & 0x3FF) << 4) | \
- ((func) & 0xf) | \
- (((dir) & 0x1) << 14) | \
- (((pull) & 0x3) << 15) | \
- (((drvstr) & 0xF) << 17))
-
-int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2);
-void proc_comm_boot_wait(void);
-
-#endif
diff --git a/arch/arm/mach-msm/sirc.c b/arch/arm/mach-msm/sirc.c
deleted file mode 100644
index 689e78c95f38..000000000000
--- a/arch/arm/mach-msm/sirc.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <asm/irq.h>
-
-static unsigned int int_enable;
-static unsigned int wake_enable;
-
-static struct sirc_regs_t sirc_regs = {
- .int_enable = SPSS_SIRC_INT_ENABLE,
- .int_enable_clear = SPSS_SIRC_INT_ENABLE_CLEAR,
- .int_enable_set = SPSS_SIRC_INT_ENABLE_SET,
- .int_type = SPSS_SIRC_INT_TYPE,
- .int_polarity = SPSS_SIRC_INT_POLARITY,
- .int_clear = SPSS_SIRC_INT_CLEAR,
-};
-
-static struct sirc_cascade_regs sirc_reg_table[] = {
- {
- .int_status = SPSS_SIRC_IRQ_STATUS,
- .cascade_irq = INT_SIRC_0,
- }
-};
-
-/* Mask off the given interrupt. Keep the int_enable mask in sync with
- the enable reg, so it can be restored after power collapse. */
-static void sirc_irq_mask(struct irq_data *d)
-{
- unsigned int mask;
-
- mask = 1 << (d->irq - FIRST_SIRC_IRQ);
- writel(mask, sirc_regs.int_enable_clear);
- int_enable &= ~mask;
- return;
-}
-
-/* Unmask the given interrupt. Keep the int_enable mask in sync with
- the enable reg, so it can be restored after power collapse. */
-static void sirc_irq_unmask(struct irq_data *d)
-{
- unsigned int mask;
-
- mask = 1 << (d->irq - FIRST_SIRC_IRQ);
- writel(mask, sirc_regs.int_enable_set);
- int_enable |= mask;
- return;
-}
-
-static void sirc_irq_ack(struct irq_data *d)
-{
- unsigned int mask;
-
- mask = 1 << (d->irq - FIRST_SIRC_IRQ);
- writel(mask, sirc_regs.int_clear);
- return;
-}
-
-static int sirc_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- unsigned int mask;
-
- /* Used to set the interrupt enable mask during power collapse. */
- mask = 1 << (d->irq - FIRST_SIRC_IRQ);
- if (on)
- wake_enable |= mask;
- else
- wake_enable &= ~mask;
-
- return 0;
-}
-
-static int sirc_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- unsigned int mask;
- unsigned int val;
-
- mask = 1 << (d->irq - FIRST_SIRC_IRQ);
- val = readl(sirc_regs.int_polarity);
-
- if (flow_type & (IRQF_TRIGGER_LOW | IRQF_TRIGGER_FALLING))
- val |= mask;
- else
- val &= ~mask;
-
- writel(val, sirc_regs.int_polarity);
-
- val = readl(sirc_regs.int_type);
- if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- val |= mask;
- __irq_set_handler_locked(d->irq, handle_edge_irq);
- } else {
- val &= ~mask;
- __irq_set_handler_locked(d->irq, handle_level_irq);
- }
-
- writel(val, sirc_regs.int_type);
-
- return 0;
-}
-
-/* Finds the pending interrupt on the passed cascade irq and redrives it */
-static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- unsigned int reg = 0;
- unsigned int sirq;
- unsigned int status;
-
- while ((reg < ARRAY_SIZE(sirc_reg_table)) &&
- (sirc_reg_table[reg].cascade_irq != irq))
- reg++;
-
- status = readl(sirc_reg_table[reg].int_status);
- status &= SIRC_MASK;
- if (status == 0)
- return;
-
- for (sirq = 0;
- (sirq < NR_SIRC_IRQS) && ((status & (1U << sirq)) == 0);
- sirq++)
- ;
- generic_handle_irq(sirq+FIRST_SIRC_IRQ);
-
- desc->irq_data.chip->irq_ack(&desc->irq_data);
-}
-
-static struct irq_chip sirc_irq_chip = {
- .name = "sirc",
- .irq_ack = sirc_irq_ack,
- .irq_mask = sirc_irq_mask,
- .irq_unmask = sirc_irq_unmask,
- .irq_set_wake = sirc_irq_set_wake,
- .irq_set_type = sirc_irq_set_type,
-};
-
-void __init msm_init_sirc(void)
-{
- int i;
-
- int_enable = 0;
- wake_enable = 0;
-
- for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) {
- irq_set_chip_and_handler(i, &sirc_irq_chip, handle_edge_irq);
- set_irq_flags(i, IRQF_VALID);
- }
-
- for (i = 0; i < ARRAY_SIZE(sirc_reg_table); i++) {
- irq_set_chained_handler(sirc_reg_table[i].cascade_irq,
- sirc_irq_handler);
- irq_set_irq_wake(sirc_reg_table[i].cascade_irq, 1);
- }
- return;
-}
-
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
deleted file mode 100644
index 7550f5a08956..000000000000
--- a/arch/arm/mach-msm/smd.c
+++ /dev/null
@@ -1,1034 +0,0 @@
-/* arch/arm/mach-msm/smd.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/wait.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-
-#include <mach/msm_smd.h>
-
-#include "smd_private.h"
-#include "proc_comm.h"
-
-#if defined(CONFIG_ARCH_QSD8X50)
-#define CONFIG_QDSP6 1
-#endif
-
-#define MODULE_NAME "msm_smd"
-
-enum {
- MSM_SMD_DEBUG = 1U << 0,
- MSM_SMSM_DEBUG = 1U << 0,
-};
-
-static int msm_smd_debug_mask;
-
-struct shared_info {
- int ready;
- void __iomem *state;
-};
-
-static unsigned dummy_state[SMSM_STATE_COUNT];
-
-static struct shared_info smd_info = {
- /* FIXME: not a real __iomem pointer */
- .state = &dummy_state,
-};
-
-module_param_named(debug_mask, msm_smd_debug_mask,
- int, S_IRUGO | S_IWUSR | S_IWGRP);
-
-static unsigned last_heap_free = 0xffffffff;
-
-static inline void notify_other_smsm(void)
-{
- msm_a2m_int(5);
-#ifdef CONFIG_QDSP6
- msm_a2m_int(8);
-#endif
-}
-
-static inline void notify_modem_smd(void)
-{
- msm_a2m_int(0);
-}
-
-static inline void notify_dsp_smd(void)
-{
- msm_a2m_int(8);
-}
-
-static void smd_diag(void)
-{
- char *x;
-
- x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
- if (x != 0) {
- x[SZ_DIAG_ERR_MSG - 1] = 0;
- pr_debug("DIAG '%s'\n", x);
- }
-}
-
-/* call when SMSM_RESET flag is set in the A9's smsm_state */
-static void handle_modem_crash(void)
-{
- pr_err("ARM9 has CRASHED\n");
- smd_diag();
-
- /* in this case the modem or watchdog should reboot us */
- for (;;)
- ;
-}
-
-uint32_t raw_smsm_get_state(enum smsm_state_item item)
-{
- return readl(smd_info.state + item * 4);
-}
-
-static int check_for_modem_crash(void)
-{
- if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET) {
- handle_modem_crash();
- return -1;
- }
- return 0;
-}
-
-/* the spinlock is used to synchronize between the
- * irq handler and code that mutates the channel
- * list or fiddles with channel state
- */
-DEFINE_SPINLOCK(smd_lock);
-DEFINE_SPINLOCK(smem_lock);
-
-/* the mutex is used during open() and close()
- * operations to avoid races while creating or
- * destroying smd_channel structures
- */
-static DEFINE_MUTEX(smd_creation_mutex);
-
-static int smd_initialized;
-
-LIST_HEAD(smd_ch_closed_list);
-LIST_HEAD(smd_ch_list_modem);
-LIST_HEAD(smd_ch_list_dsp);
-
-static unsigned char smd_ch_allocated[64];
-static struct work_struct probe_work;
-
-/* how many bytes are available for reading */
-static int smd_stream_read_avail(struct smd_channel *ch)
-{
- return (ch->recv->head - ch->recv->tail) & ch->fifo_mask;
-}
-
-/* how many bytes we are free to write */
-static int smd_stream_write_avail(struct smd_channel *ch)
-{
- return ch->fifo_mask -
- ((ch->send->head - ch->send->tail) & ch->fifo_mask);
-}
-
-static int smd_packet_read_avail(struct smd_channel *ch)
-{
- if (ch->current_packet) {
- int n = smd_stream_read_avail(ch);
- if (n > ch->current_packet)
- n = ch->current_packet;
- return n;
- } else {
- return 0;
- }
-}
-
-static int smd_packet_write_avail(struct smd_channel *ch)
-{
- int n = smd_stream_write_avail(ch);
- return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
-}
-
-static int ch_is_open(struct smd_channel *ch)
-{
- return (ch->recv->state == SMD_SS_OPENED) &&
- (ch->send->state == SMD_SS_OPENED);
-}
-
-/* provide a pointer and length to readable data in the fifo */
-static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
-{
- unsigned head = ch->recv->head;
- unsigned tail = ch->recv->tail;
- *ptr = (void *) (ch->recv_data + tail);
-
- if (tail <= head)
- return head - tail;
- else
- return ch->fifo_size - tail;
-}
-
-/* advance the fifo read pointer after data from ch_read_buffer is consumed */
-static void ch_read_done(struct smd_channel *ch, unsigned count)
-{
- BUG_ON(count > smd_stream_read_avail(ch));
- ch->recv->tail = (ch->recv->tail + count) & ch->fifo_mask;
- ch->send->fTAIL = 1;
-}
-
-/* basic read interface to ch_read_{buffer,done} used
- * by smd_*_read() and update_packet_state()
- * will read-and-discard if the _data pointer is null
- */
-static int ch_read(struct smd_channel *ch, void *_data, int len)
-{
- void *ptr;
- unsigned n;
- unsigned char *data = _data;
- int orig_len = len;
-
- while (len > 0) {
- n = ch_read_buffer(ch, &ptr);
- if (n == 0)
- break;
-
- if (n > len)
- n = len;
- if (_data)
- memcpy(data, ptr, n);
-
- data += n;
- len -= n;
- ch_read_done(ch, n);
- }
-
- return orig_len - len;
-}
-
-static void update_stream_state(struct smd_channel *ch)
-{
- /* streams have no special state requiring updating */
-}
-
-static void update_packet_state(struct smd_channel *ch)
-{
- unsigned hdr[5];
- int r;
-
- /* can't do anything if we're in the middle of a packet */
- if (ch->current_packet != 0)
- return;
-
- /* don't bother unless we can get the full header */
- if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
- return;
-
- r = ch_read(ch, hdr, SMD_HEADER_SIZE);
- BUG_ON(r != SMD_HEADER_SIZE);
-
- ch->current_packet = hdr[0];
-}
-
-/* provide a pointer and length to next free space in the fifo */
-static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
-{
- unsigned head = ch->send->head;
- unsigned tail = ch->send->tail;
- *ptr = (void *) (ch->send_data + head);
-
- if (head < tail) {
- return tail - head - 1;
- } else {
- if (tail == 0)
- return ch->fifo_size - head - 1;
- else
- return ch->fifo_size - head;
- }
-}
-
-/* advace the fifo write pointer after freespace
- * from ch_write_buffer is filled
- */
-static void ch_write_done(struct smd_channel *ch, unsigned count)
-{
- BUG_ON(count > smd_stream_write_avail(ch));
- ch->send->head = (ch->send->head + count) & ch->fifo_mask;
- ch->send->fHEAD = 1;
-}
-
-static void ch_set_state(struct smd_channel *ch, unsigned n)
-{
- if (n == SMD_SS_OPENED) {
- ch->send->fDSR = 1;
- ch->send->fCTS = 1;
- ch->send->fCD = 1;
- } else {
- ch->send->fDSR = 0;
- ch->send->fCTS = 0;
- ch->send->fCD = 0;
- }
- ch->send->state = n;
- ch->send->fSTATE = 1;
- ch->notify_other_cpu();
-}
-
-static void do_smd_probe(void)
-{
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- if (shared->heap_info.free_offset != last_heap_free) {
- last_heap_free = shared->heap_info.free_offset;
- schedule_work(&probe_work);
- }
-}
-
-static void smd_state_change(struct smd_channel *ch,
- unsigned last, unsigned next)
-{
- ch->last_state = next;
-
- pr_debug("ch %d %d -> %d\n", ch->n, last, next);
-
- switch (next) {
- case SMD_SS_OPENING:
- ch->recv->tail = 0;
- case SMD_SS_OPENED:
- if (ch->send->state != SMD_SS_OPENED)
- ch_set_state(ch, SMD_SS_OPENED);
- ch->notify(ch->priv, SMD_EVENT_OPEN);
- break;
- case SMD_SS_FLUSHING:
- case SMD_SS_RESET:
- /* we should force them to close? */
- default:
- ch->notify(ch->priv, SMD_EVENT_CLOSE);
- }
-}
-
-static void handle_smd_irq(struct list_head *list, void (*notify)(void))
-{
- unsigned long flags;
- struct smd_channel *ch;
- int do_notify = 0;
- unsigned ch_flags;
- unsigned tmp;
-
- spin_lock_irqsave(&smd_lock, flags);
- list_for_each_entry(ch, list, ch_list) {
- ch_flags = 0;
- if (ch_is_open(ch)) {
- if (ch->recv->fHEAD) {
- ch->recv->fHEAD = 0;
- ch_flags |= 1;
- do_notify |= 1;
- }
- if (ch->recv->fTAIL) {
- ch->recv->fTAIL = 0;
- ch_flags |= 2;
- do_notify |= 1;
- }
- if (ch->recv->fSTATE) {
- ch->recv->fSTATE = 0;
- ch_flags |= 4;
- do_notify |= 1;
- }
- }
- tmp = ch->recv->state;
- if (tmp != ch->last_state)
- smd_state_change(ch, ch->last_state, tmp);
- if (ch_flags) {
- ch->update_state(ch);
- ch->notify(ch->priv, SMD_EVENT_DATA);
- }
- }
- if (do_notify)
- notify();
- spin_unlock_irqrestore(&smd_lock, flags);
- do_smd_probe();
-}
-
-static irqreturn_t smd_modem_irq_handler(int irq, void *data)
-{
- handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
- return IRQ_HANDLED;
-}
-
-#if defined(CONFIG_QDSP6)
-static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
-{
- handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
- return IRQ_HANDLED;
-}
-#endif
-
-static void smd_fake_irq_handler(unsigned long arg)
-{
- handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
- handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
-}
-
-static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
-
-static inline int smd_need_int(struct smd_channel *ch)
-{
- if (ch_is_open(ch)) {
- if (ch->recv->fHEAD || ch->recv->fTAIL || ch->recv->fSTATE)
- return 1;
- if (ch->recv->state != ch->last_state)
- return 1;
- }
- return 0;
-}
-
-void smd_sleep_exit(void)
-{
- unsigned long flags;
- struct smd_channel *ch;
- int need_int = 0;
-
- spin_lock_irqsave(&smd_lock, flags);
- list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
- if (smd_need_int(ch)) {
- need_int = 1;
- break;
- }
- }
- list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
- if (smd_need_int(ch)) {
- need_int = 1;
- break;
- }
- }
- spin_unlock_irqrestore(&smd_lock, flags);
- do_smd_probe();
-
- if (need_int) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit need interrupt\n");
- tasklet_schedule(&smd_fake_irq_tasklet);
- }
-}
-
-
-void smd_kick(smd_channel_t *ch)
-{
- unsigned long flags;
- unsigned tmp;
-
- spin_lock_irqsave(&smd_lock, flags);
- ch->update_state(ch);
- tmp = ch->recv->state;
- if (tmp != ch->last_state) {
- ch->last_state = tmp;
- if (tmp == SMD_SS_OPENED)
- ch->notify(ch->priv, SMD_EVENT_OPEN);
- else
- ch->notify(ch->priv, SMD_EVENT_CLOSE);
- }
- ch->notify(ch->priv, SMD_EVENT_DATA);
- ch->notify_other_cpu();
- spin_unlock_irqrestore(&smd_lock, flags);
-}
-
-static int smd_is_packet(int chn, unsigned type)
-{
- type &= SMD_KIND_MASK;
- if (type == SMD_KIND_PACKET)
- return 1;
- if (type == SMD_KIND_STREAM)
- return 0;
-
- /* older AMSS reports SMD_KIND_UNKNOWN always */
- if ((chn > 4) || (chn == 1))
- return 1;
- else
- return 0;
-}
-
-static int smd_stream_write(smd_channel_t *ch, const void *_data, int len)
-{
- void *ptr;
- const unsigned char *buf = _data;
- unsigned xfer;
- int orig_len = len;
-
- if (len < 0)
- return -EINVAL;
-
- while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
- if (!ch_is_open(ch))
- break;
- if (xfer > len)
- xfer = len;
- memcpy(ptr, buf, xfer);
- ch_write_done(ch, xfer);
- len -= xfer;
- buf += xfer;
- if (len == 0)
- break;
- }
-
- ch->notify_other_cpu();
-
- return orig_len - len;
-}
-
-static int smd_packet_write(smd_channel_t *ch, const void *_data, int len)
-{
- unsigned hdr[5];
-
- if (len < 0)
- return -EINVAL;
-
- if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
- return -ENOMEM;
-
- hdr[0] = len;
- hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
-
- smd_stream_write(ch, hdr, sizeof(hdr));
- smd_stream_write(ch, _data, len);
-
- return len;
-}
-
-static int smd_stream_read(smd_channel_t *ch, void *data, int len)
-{
- int r;
-
- if (len < 0)
- return -EINVAL;
-
- r = ch_read(ch, data, len);
- if (r > 0)
- ch->notify_other_cpu();
-
- return r;
-}
-
-static int smd_packet_read(smd_channel_t *ch, void *data, int len)
-{
- unsigned long flags;
- int r;
-
- if (len < 0)
- return -EINVAL;
-
- if (len > ch->current_packet)
- len = ch->current_packet;
-
- r = ch_read(ch, data, len);
- if (r > 0)
- ch->notify_other_cpu();
-
- spin_lock_irqsave(&smd_lock, flags);
- ch->current_packet -= r;
- update_packet_state(ch);
- spin_unlock_irqrestore(&smd_lock, flags);
-
- return r;
-}
-
-static int smd_alloc_channel(const char *name, uint32_t cid, uint32_t type)
-{
- struct smd_channel *ch;
-
- ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
- if (ch == 0) {
- pr_err("smd_alloc_channel() out of memory\n");
- return -1;
- }
- ch->n = cid;
-
- if (_smd_alloc_channel(ch)) {
- kfree(ch);
- return -1;
- }
-
- ch->fifo_mask = ch->fifo_size - 1;
- ch->type = type;
-
- if ((type & SMD_TYPE_MASK) == SMD_TYPE_APPS_MODEM)
- ch->notify_other_cpu = notify_modem_smd;
- else
- ch->notify_other_cpu = notify_dsp_smd;
-
- if (smd_is_packet(cid, type)) {
- ch->read = smd_packet_read;
- ch->write = smd_packet_write;
- ch->read_avail = smd_packet_read_avail;
- ch->write_avail = smd_packet_write_avail;
- ch->update_state = update_packet_state;
- } else {
- ch->read = smd_stream_read;
- ch->write = smd_stream_write;
- ch->read_avail = smd_stream_read_avail;
- ch->write_avail = smd_stream_write_avail;
- ch->update_state = update_stream_state;
- }
-
- if ((type & 0xff) == 0)
- memcpy(ch->name, "SMD_", 4);
- else
- memcpy(ch->name, "DSP_", 4);
- memcpy(ch->name + 4, name, 20);
- ch->name[23] = 0;
- ch->pdev.name = ch->name;
- ch->pdev.id = -1;
-
- pr_debug("smd_alloc_channel() cid=%02d size=%05d '%s'\n",
- ch->n, ch->fifo_size, ch->name);
-
- mutex_lock(&smd_creation_mutex);
- list_add(&ch->ch_list, &smd_ch_closed_list);
- mutex_unlock(&smd_creation_mutex);
-
- platform_device_register(&ch->pdev);
- return 0;
-}
-
-static void smd_channel_probe_worker(struct work_struct *work)
-{
- struct smd_alloc_elm *shared;
- unsigned ctype;
- unsigned type;
- unsigned n;
-
- shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
- if (!shared) {
- pr_err("cannot find allocation table\n");
- return;
- }
- for (n = 0; n < 64; n++) {
- if (smd_ch_allocated[n])
- continue;
- if (!shared[n].ref_count)
- continue;
- if (!shared[n].name[0])
- continue;
- ctype = shared[n].ctype;
- type = ctype & SMD_TYPE_MASK;
-
- /* DAL channels are stream but neither the modem,
- * nor the DSP correctly indicate this. Fixup manually.
- */
- if (!memcmp(shared[n].name, "DAL", 3))
- ctype = (ctype & (~SMD_KIND_MASK)) | SMD_KIND_STREAM;
-
- type = shared[n].ctype & SMD_TYPE_MASK;
- if ((type == SMD_TYPE_APPS_MODEM) ||
- (type == SMD_TYPE_APPS_DSP))
- if (!smd_alloc_channel(shared[n].name, shared[n].cid, ctype))
- smd_ch_allocated[n] = 1;
- }
-}
-
-static void do_nothing_notify(void *priv, unsigned flags)
-{
-}
-
-struct smd_channel *smd_get_channel(const char *name)
-{
- struct smd_channel *ch;
-
- mutex_lock(&smd_creation_mutex);
- list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
- if (!strcmp(name, ch->name)) {
- list_del(&ch->ch_list);
- mutex_unlock(&smd_creation_mutex);
- return ch;
- }
- }
- mutex_unlock(&smd_creation_mutex);
-
- return NULL;
-}
-
-int smd_open(const char *name, smd_channel_t **_ch,
- void *priv, void (*notify)(void *, unsigned))
-{
- struct smd_channel *ch;
- unsigned long flags;
-
- if (smd_initialized == 0) {
- pr_info("smd_open() before smd_init()\n");
- return -ENODEV;
- }
-
- ch = smd_get_channel(name);
- if (!ch)
- return -ENODEV;
-
- if (notify == 0)
- notify = do_nothing_notify;
-
- ch->notify = notify;
- ch->current_packet = 0;
- ch->last_state = SMD_SS_CLOSED;
- ch->priv = priv;
-
- *_ch = ch;
-
- spin_lock_irqsave(&smd_lock, flags);
-
- if ((ch->type & SMD_TYPE_MASK) == SMD_TYPE_APPS_MODEM)
- list_add(&ch->ch_list, &smd_ch_list_modem);
- else
- list_add(&ch->ch_list, &smd_ch_list_dsp);
-
- /* If the remote side is CLOSING, we need to get it to
- * move to OPENING (which we'll do by moving from CLOSED to
- * OPENING) and then get it to move from OPENING to
- * OPENED (by doing the same state change ourselves).
- *
- * Otherwise, it should be OPENING and we can move directly
- * to OPENED so that it will follow.
- */
- if (ch->recv->state == SMD_SS_CLOSING) {
- ch->send->head = 0;
- ch_set_state(ch, SMD_SS_OPENING);
- } else {
- ch_set_state(ch, SMD_SS_OPENED);
- }
- spin_unlock_irqrestore(&smd_lock, flags);
- smd_kick(ch);
-
- return 0;
-}
-
-int smd_close(smd_channel_t *ch)
-{
- unsigned long flags;
-
- if (ch == 0)
- return -1;
-
- spin_lock_irqsave(&smd_lock, flags);
- ch->notify = do_nothing_notify;
- list_del(&ch->ch_list);
- ch_set_state(ch, SMD_SS_CLOSED);
- spin_unlock_irqrestore(&smd_lock, flags);
-
- mutex_lock(&smd_creation_mutex);
- list_add(&ch->ch_list, &smd_ch_closed_list);
- mutex_unlock(&smd_creation_mutex);
-
- return 0;
-}
-
-int smd_read(smd_channel_t *ch, void *data, int len)
-{
- return ch->read(ch, data, len);
-}
-
-int smd_write(smd_channel_t *ch, const void *data, int len)
-{
- return ch->write(ch, data, len);
-}
-
-int smd_write_atomic(smd_channel_t *ch, const void *data, int len)
-{
- unsigned long flags;
- int res;
- spin_lock_irqsave(&smd_lock, flags);
- res = ch->write(ch, data, len);
- spin_unlock_irqrestore(&smd_lock, flags);
- return res;
-}
-
-int smd_read_avail(smd_channel_t *ch)
-{
- return ch->read_avail(ch);
-}
-
-int smd_write_avail(smd_channel_t *ch)
-{
- return ch->write_avail(ch);
-}
-
-int smd_wait_until_readable(smd_channel_t *ch, int bytes)
-{
- return -1;
-}
-
-int smd_wait_until_writable(smd_channel_t *ch, int bytes)
-{
- return -1;
-}
-
-int smd_cur_packet_size(smd_channel_t *ch)
-{
- return ch->current_packet;
-}
-
-
-/* ------------------------------------------------------------------------- */
-
-void *smem_alloc(unsigned id, unsigned size)
-{
- return smem_find(id, size);
-}
-
-void __iomem *smem_item(unsigned id, unsigned *size)
-{
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- struct smem_heap_entry *toc = shared->heap_toc;
-
- if (id >= SMEM_NUM_ITEMS)
- return NULL;
-
- if (toc[id].allocated) {
- *size = toc[id].size;
- return (MSM_SHARED_RAM_BASE + toc[id].offset);
- } else {
- *size = 0;
- }
-
- return NULL;
-}
-
-void *smem_find(unsigned id, unsigned size_in)
-{
- unsigned size;
- void *ptr;
-
- ptr = smem_item(id, &size);
- if (!ptr)
- return 0;
-
- size_in = ALIGN(size_in, 8);
- if (size_in != size) {
- pr_err("smem_find(%d, %d): wrong size %d\n",
- id, size_in, size);
- return 0;
- }
-
- return ptr;
-}
-
-static irqreturn_t smsm_irq_handler(int irq, void *data)
-{
- unsigned long flags;
- unsigned apps, modm;
-
- spin_lock_irqsave(&smem_lock, flags);
-
- apps = raw_smsm_get_state(SMSM_STATE_APPS);
- modm = raw_smsm_get_state(SMSM_STATE_MODEM);
-
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("<SM %08x %08x>\n", apps, modm);
- if (modm & SMSM_RESET)
- handle_modem_crash();
-
- do_smd_probe();
-
- spin_unlock_irqrestore(&smem_lock, flags);
- return IRQ_HANDLED;
-}
-
-int smsm_change_state(enum smsm_state_item item,
- uint32_t clear_mask, uint32_t set_mask)
-{
- void __iomem *addr = smd_info.state + item * 4;
- unsigned long flags;
- unsigned state;
-
- if (!smd_info.ready)
- return -EIO;
-
- spin_lock_irqsave(&smem_lock, flags);
-
- if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET)
- handle_modem_crash();
-
- state = (readl(addr) & ~clear_mask) | set_mask;
- writel(state, addr);
-
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("smsm_change_state %d %x\n", item, state);
- notify_other_smsm();
-
- spin_unlock_irqrestore(&smem_lock, flags);
-
- return 0;
-}
-
-uint32_t smsm_get_state(enum smsm_state_item item)
-{
- unsigned long flags;
- uint32_t rv;
-
- spin_lock_irqsave(&smem_lock, flags);
-
- rv = readl(smd_info.state + item * 4);
-
- if (item == SMSM_STATE_MODEM && (rv & SMSM_RESET))
- handle_modem_crash();
-
- spin_unlock_irqrestore(&smem_lock, flags);
-
- return rv;
-}
-
-#ifdef CONFIG_ARCH_MSM_SCORPION
-
-int smsm_set_sleep_duration(uint32_t delay)
-{
- struct msm_dem_slave_data *ptr;
-
- ptr = smem_find(SMEM_APPS_DEM_SLAVE_DATA, sizeof(*ptr));
- if (ptr == NULL) {
- pr_err("smsm_set_sleep_duration <SM NO APPS_DEM_SLAVE_DATA>\n");
- return -EIO;
- }
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("smsm_set_sleep_duration %d -> %d\n",
- ptr->sleep_time, delay);
- ptr->sleep_time = delay;
- return 0;
-}
-
-#else
-
-int smsm_set_sleep_duration(uint32_t delay)
-{
- uint32_t *ptr;
-
- ptr = smem_find(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
- if (ptr == NULL) {
- pr_err("smsm_set_sleep_duration <SM NO SLEEP_DELAY>\n");
- return -EIO;
- }
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("smsm_set_sleep_duration %d -> %d\n",
- *ptr, delay);
- *ptr = delay;
- return 0;
-}
-
-#endif
-
-int smd_core_init(void)
-{
- int r;
-
- /* wait for essential items to be initialized */
- for (;;) {
- unsigned size;
- void __iomem *state;
- state = smem_item(SMEM_SMSM_SHARED_STATE, &size);
- if (size == SMSM_V1_SIZE || size == SMSM_V2_SIZE) {
- smd_info.state = state;
- break;
- }
- }
-
- smd_info.ready = 1;
-
- r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
- IRQF_TRIGGER_RISING, "smd_dev", 0);
- if (r < 0)
- return r;
- r = enable_irq_wake(INT_A9_M2A_0);
- if (r < 0)
- pr_err("smd_core_init: enable_irq_wake failed for A9_M2A_0\n");
-
- r = request_irq(INT_A9_M2A_5, smsm_irq_handler,
- IRQF_TRIGGER_RISING, "smsm_dev", 0);
- if (r < 0) {
- free_irq(INT_A9_M2A_0, 0);
- return r;
- }
- r = enable_irq_wake(INT_A9_M2A_5);
- if (r < 0)
- pr_err("smd_core_init: enable_irq_wake failed for A9_M2A_5\n");
-
-#if defined(CONFIG_QDSP6)
- r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
- IRQF_TRIGGER_RISING, "smd_dsp", 0);
- if (r < 0) {
- free_irq(INT_A9_M2A_0, 0);
- free_irq(INT_A9_M2A_5, 0);
- return r;
- }
-#endif
-
- /* check for any SMD channels that may already exist */
- do_smd_probe();
-
- /* indicate that we're up and running */
- smsm_change_state(SMSM_STATE_APPS,
- ~0, SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT | SMSM_RUN);
-#ifdef CONFIG_ARCH_MSM_SCORPION
- smsm_change_state(SMSM_STATE_APPS_DEM, ~0, 0);
-#endif
-
- return 0;
-}
-
-static int msm_smd_probe(struct platform_device *pdev)
-{
- /*
- * If we haven't waited for the ARM9 to boot up till now,
- * then we need to wait here. Otherwise this should just
- * return immediately.
- */
- proc_comm_boot_wait();
-
- INIT_WORK(&probe_work, smd_channel_probe_worker);
-
- if (smd_core_init()) {
- pr_err("smd_core_init() failed\n");
- return -1;
- }
-
- do_smd_probe();
-
- msm_check_for_modem_crash = check_for_modem_crash;
-
- msm_init_last_radio_log(THIS_MODULE);
-
- smd_initialized = 1;
-
- return 0;
-}
-
-static struct platform_driver msm_smd_driver = {
- .probe = msm_smd_probe,
- .driver = {
- .name = MODULE_NAME,
- },
-};
-
-static int __init msm_smd_init(void)
-{
- return platform_driver_register(&msm_smd_driver);
-}
-
-module_init(msm_smd_init);
-
-MODULE_DESCRIPTION("MSM Shared Memory Core");
-MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
-MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
deleted file mode 100644
index 8056b3e5590f..000000000000
--- a/arch/arm/mach-msm/smd_debug.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/* arch/arm/mach-msm/smd_debug.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/list.h>
-
-#include <mach/msm_iomap.h>
-
-#include "smd_private.h"
-
-#if defined(CONFIG_DEBUG_FS)
-
-static char *chstate(unsigned n)
-{
- switch (n) {
- case SMD_SS_CLOSED:
- return "CLOSED";
- case SMD_SS_OPENING:
- return "OPENING";
- case SMD_SS_OPENED:
- return "OPENED";
- case SMD_SS_FLUSHING:
- return "FLUSHING";
- case SMD_SS_CLOSING:
- return "CLOSING";
- case SMD_SS_RESET:
- return "RESET";
- case SMD_SS_RESET_OPENING:
- return "ROPENING";
- default:
- return "UNKNOWN";
- }
-}
-
-
-static int dump_ch(char *buf, int max, struct smd_channel *ch)
-{
- volatile struct smd_half_channel *s = ch->send;
- volatile struct smd_half_channel *r = ch->recv;
-
- return scnprintf(
- buf, max,
- "ch%02d:"
- " %8s(%05d/%05d) %c%c%c%c%c%c%c <->"
- " %8s(%05d/%05d) %c%c%c%c%c%c%c '%s'\n", ch->n,
- chstate(s->state), s->tail, s->head,
- s->fDSR ? 'D' : 'd',
- s->fCTS ? 'C' : 'c',
- s->fCD ? 'C' : 'c',
- s->fRI ? 'I' : 'i',
- s->fHEAD ? 'W' : 'w',
- s->fTAIL ? 'R' : 'r',
- s->fSTATE ? 'S' : 's',
- chstate(r->state), r->tail, r->head,
- r->fDSR ? 'D' : 'd',
- r->fCTS ? 'R' : 'r',
- r->fCD ? 'C' : 'c',
- r->fRI ? 'I' : 'i',
- r->fHEAD ? 'W' : 'w',
- r->fTAIL ? 'R' : 'r',
- r->fSTATE ? 'S' : 's',
- ch->name
- );
-}
-
-static int debug_read_stat(char *buf, int max)
-{
- char *msg;
- int i = 0;
-
- msg = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
-
- if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET)
- i += scnprintf(buf + i, max - i,
- "smsm: ARM9 HAS CRASHED\n");
-
- i += scnprintf(buf + i, max - i, "smsm: a9: %08x a11: %08x\n",
- raw_smsm_get_state(SMSM_STATE_MODEM),
- raw_smsm_get_state(SMSM_STATE_APPS));
-#ifdef CONFIG_ARCH_MSM_SCORPION
- i += scnprintf(buf + i, max - i, "smsm dem: apps: %08x modem: %08x "
- "qdsp6: %08x power: %08x time: %08x\n",
- raw_smsm_get_state(SMSM_STATE_APPS_DEM),
- raw_smsm_get_state(SMSM_STATE_MODEM_DEM),
- raw_smsm_get_state(SMSM_STATE_QDSP6_DEM),
- raw_smsm_get_state(SMSM_STATE_POWER_MASTER_DEM),
- raw_smsm_get_state(SMSM_STATE_TIME_MASTER_DEM));
-#endif
- if (msg) {
- msg[SZ_DIAG_ERR_MSG - 1] = 0;
- i += scnprintf(buf + i, max - i, "diag: '%s'\n", msg);
- }
- return i;
-}
-
-static int debug_read_mem(char *buf, int max)
-{
- unsigned n;
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- struct smem_heap_entry *toc = shared->heap_toc;
- int i = 0;
-
- i += scnprintf(buf + i, max - i,
- "heap: init=%d free=%d remain=%d\n",
- shared->heap_info.initialized,
- shared->heap_info.free_offset,
- shared->heap_info.heap_remaining);
-
- for (n = 0; n < SMEM_NUM_ITEMS; n++) {
- if (toc[n].allocated == 0)
- continue;
- i += scnprintf(buf + i, max - i,
- "%04d: offset %08x size %08x\n",
- n, toc[n].offset, toc[n].size);
- }
- return i;
-}
-
-static int debug_read_ch(char *buf, int max)
-{
- struct smd_channel *ch;
- unsigned long flags;
- int i = 0;
-
- spin_lock_irqsave(&smd_lock, flags);
- list_for_each_entry(ch, &smd_ch_list_dsp, ch_list)
- i += dump_ch(buf + i, max - i, ch);
- list_for_each_entry(ch, &smd_ch_list_modem, ch_list)
- i += dump_ch(buf + i, max - i, ch);
- list_for_each_entry(ch, &smd_ch_closed_list, ch_list)
- i += dump_ch(buf + i, max - i, ch);
- spin_unlock_irqrestore(&smd_lock, flags);
-
- return i;
-}
-
-static int debug_read_version(char *buf, int max)
-{
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- unsigned version = shared->version[VERSION_MODEM];
- return sprintf(buf, "%d.%d\n", version >> 16, version & 0xffff);
-}
-
-static int debug_read_build_id(char *buf, int max)
-{
- unsigned size;
- void *data;
-
- data = smem_item(SMEM_HW_SW_BUILD_ID, &size);
- if (!data)
- return 0;
-
- if (size >= max)
- size = max;
- memcpy(buf, data, size);
-
- return size;
-}
-
-static int debug_read_alloc_tbl(char *buf, int max)
-{
- struct smd_alloc_elm *shared;
- int n, i = 0;
-
- shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
-
- for (n = 0; n < 64; n++) {
- if (shared[n].ref_count == 0)
- continue;
- i += scnprintf(buf + i, max - i,
- "%03d: %-20s cid=%02d type=%03d "
- "kind=%02d ref_count=%d\n",
- n, shared[n].name, shared[n].cid,
- shared[n].ctype & 0xff,
- (shared[n].ctype >> 8) & 0xf,
- shared[n].ref_count);
- }
-
- return i;
-}
-
-#define DEBUG_BUFMAX 4096
-static char debug_buffer[DEBUG_BUFMAX];
-
-static ssize_t debug_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- int (*fill)(char *buf, int max) = file->private_data;
- int bsize = fill(debug_buffer, DEBUG_BUFMAX);
- return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
-}
-
-static const struct file_operations debug_ops = {
- .read = debug_read,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static void debug_create(const char *name, umode_t mode,
- struct dentry *dent,
- int (*fill)(char *buf, int max))
-{
- debugfs_create_file(name, mode, dent, fill, &debug_ops);
-}
-
-int __init smd_debugfs_init(void)
-{
- struct dentry *dent;
-
- dent = debugfs_create_dir("smd", 0);
- if (IS_ERR(dent))
- return 1;
-
- debug_create("ch", 0444, dent, debug_read_ch);
- debug_create("stat", 0444, dent, debug_read_stat);
- debug_create("mem", 0444, dent, debug_read_mem);
- debug_create("version", 0444, dent, debug_read_version);
- debug_create("tbl", 0444, dent, debug_read_alloc_tbl);
- debug_create("build", 0444, dent, debug_read_build_id);
-
- return 0;
-}
-
-#endif
-
-
-#define MAX_NUM_SLEEP_CLIENTS 64
-#define MAX_SLEEP_NAME_LEN 8
-
-#define NUM_GPIO_INT_REGISTERS 6
-#define GPIO_SMEM_NUM_GROUPS 2
-#define GPIO_SMEM_MAX_PC_INTERRUPTS 8
-
-struct tramp_gpio_save {
- unsigned int enable;
- unsigned int detect;
- unsigned int polarity;
-};
-
-struct tramp_gpio_smem {
- uint16_t num_fired[GPIO_SMEM_NUM_GROUPS];
- uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS];
- uint32_t enabled[NUM_GPIO_INT_REGISTERS];
- uint32_t detection[NUM_GPIO_INT_REGISTERS];
- uint32_t polarity[NUM_GPIO_INT_REGISTERS];
-};
-
-
-void smsm_print_sleep_info(void)
-{
- unsigned long flags;
- uint32_t *ptr;
-#ifndef CONFIG_ARCH_MSM_SCORPION
- struct tramp_gpio_smem *gpio;
- struct smsm_interrupt_info *int_info;
-#endif
-
-
- spin_lock_irqsave(&smem_lock, flags);
-
- ptr = smem_alloc(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
- if (ptr)
- pr_info("SMEM_SMSM_SLEEP_DELAY: %x\n", *ptr);
-
- ptr = smem_alloc(SMEM_SMSM_LIMIT_SLEEP, sizeof(*ptr));
- if (ptr)
- pr_info("SMEM_SMSM_LIMIT_SLEEP: %x\n", *ptr);
-
- ptr = smem_alloc(SMEM_SLEEP_POWER_COLLAPSE_DISABLED, sizeof(*ptr));
- if (ptr)
- pr_info("SMEM_SLEEP_POWER_COLLAPSE_DISABLED: %x\n", *ptr);
-
-#ifndef CONFIG_ARCH_MSM_SCORPION
- int_info = smem_alloc(SMEM_SMSM_INT_INFO, sizeof(*int_info));
- if (int_info)
- pr_info("SMEM_SMSM_INT_INFO %x %x %x\n",
- int_info->interrupt_mask,
- int_info->pending_interrupts,
- int_info->wakeup_reason);
-
- gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*gpio));
- if (gpio) {
- int i;
- for (i = 0; i < NUM_GPIO_INT_REGISTERS; i++)
- pr_info("SMEM_GPIO_INT: %d: e %x d %x p %x\n",
- i, gpio->enabled[i], gpio->detection[i],
- gpio->polarity[i]);
-
- for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
- pr_info("SMEM_GPIO_INT: %d: f %d: %d %d...\n",
- i, gpio->num_fired[i], gpio->fired[i][0],
- gpio->fired[i][1]);
- }
-#else
-#endif
- spin_unlock_irqrestore(&smem_lock, flags);
-}
-
diff --git a/arch/arm/mach-msm/smd_private.h b/arch/arm/mach-msm/smd_private.h
deleted file mode 100644
index 727bfe68aa9b..000000000000
--- a/arch/arm/mach-msm/smd_private.h
+++ /dev/null
@@ -1,403 +0,0 @@
-/* arch/arm/mach-msm/smd_private.h
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007 QUALCOMM Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
-#define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
-
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/io.h>
-
-#include <mach/msm_iomap.h>
-
-struct smem_heap_info {
- unsigned initialized;
- unsigned free_offset;
- unsigned heap_remaining;
- unsigned reserved;
-};
-
-struct smem_heap_entry {
- unsigned allocated;
- unsigned offset;
- unsigned size;
- unsigned reserved;
-};
-
-struct smem_proc_comm {
- unsigned command;
- unsigned status;
- unsigned data1;
- unsigned data2;
-};
-
-#define PC_APPS 0
-#define PC_MODEM 1
-
-#define VERSION_SMD 0
-#define VERSION_QDSP6 4
-#define VERSION_APPS_SBL 6
-#define VERSION_MODEM_SBL 7
-#define VERSION_APPS 8
-#define VERSION_MODEM 9
-
-struct smem_shared {
- struct smem_proc_comm proc_comm[4];
- unsigned version[32];
- struct smem_heap_info heap_info;
- struct smem_heap_entry heap_toc[512];
-};
-
-#define SMSM_V1_SIZE (sizeof(unsigned) * 8)
-#define SMSM_V2_SIZE (sizeof(unsigned) * 4)
-
-#ifdef CONFIG_MSM_SMD_PKG3
-struct smsm_interrupt_info {
- uint32_t interrupt_mask;
- uint32_t pending_interrupts;
- uint32_t wakeup_reason;
-};
-#else
-#define DEM_MAX_PORT_NAME_LEN (20)
-struct msm_dem_slave_data {
- uint32_t sleep_time;
- uint32_t interrupt_mask;
- uint32_t resources_used;
- uint32_t reserved1;
-
- uint32_t wakeup_reason;
- uint32_t pending_interrupts;
- uint32_t rpc_prog;
- uint32_t rpc_proc;
- char smd_port_name[DEM_MAX_PORT_NAME_LEN];
- uint32_t reserved2;
-};
-#endif
-
-#define SZ_DIAG_ERR_MSG 0xC8
-#define ID_DIAG_ERR_MSG SMEM_DIAG_ERR_MESSAGE
-#define ID_SMD_CHANNELS SMEM_SMD_BASE_ID
-#define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE
-#define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL
-
-#define SMSM_INIT 0x00000001
-#define SMSM_SMDINIT 0x00000008
-#define SMSM_RPCINIT 0x00000020
-#define SMSM_RESET 0x00000040
-#define SMSM_RSA 0x00000080
-#define SMSM_RUN 0x00000100
-#define SMSM_PWRC 0x00000200
-#define SMSM_TIMEWAIT 0x00000400
-#define SMSM_TIMEINIT 0x00000800
-#define SMSM_PWRC_EARLY_EXIT 0x00001000
-#define SMSM_WFPI 0x00002000
-#define SMSM_SLEEP 0x00004000
-#define SMSM_SLEEPEXIT 0x00008000
-#define SMSM_APPS_REBOOT 0x00020000
-#define SMSM_SYSTEM_POWER_DOWN 0x00040000
-#define SMSM_SYSTEM_REBOOT 0x00080000
-#define SMSM_SYSTEM_DOWNLOAD 0x00100000
-#define SMSM_PWRC_SUSPEND 0x00200000
-#define SMSM_APPS_SHUTDOWN 0x00400000
-#define SMSM_SMD_LOOPBACK 0x00800000
-#define SMSM_RUN_QUIET 0x01000000
-#define SMSM_MODEM_WAIT 0x02000000
-#define SMSM_MODEM_BREAK 0x04000000
-#define SMSM_MODEM_CONTINUE 0x08000000
-#define SMSM_UNKNOWN 0x80000000
-
-#define SMSM_WKUP_REASON_RPC 0x00000001
-#define SMSM_WKUP_REASON_INT 0x00000002
-#define SMSM_WKUP_REASON_GPIO 0x00000004
-#define SMSM_WKUP_REASON_TIMER 0x00000008
-#define SMSM_WKUP_REASON_ALARM 0x00000010
-#define SMSM_WKUP_REASON_RESET 0x00000020
-
-#ifdef CONFIG_ARCH_MSM7X00A
-enum smsm_state_item {
- SMSM_STATE_APPS = 1,
- SMSM_STATE_MODEM = 3,
- SMSM_STATE_COUNT,
-};
-#else
-enum smsm_state_item {
- SMSM_STATE_APPS,
- SMSM_STATE_MODEM,
- SMSM_STATE_HEXAGON,
- SMSM_STATE_APPS_DEM,
- SMSM_STATE_MODEM_DEM,
- SMSM_STATE_QDSP6_DEM,
- SMSM_STATE_POWER_MASTER_DEM,
- SMSM_STATE_TIME_MASTER_DEM,
- SMSM_STATE_COUNT,
-};
-#endif
-
-void *smem_alloc(unsigned id, unsigned size);
-int smsm_change_state(enum smsm_state_item item, uint32_t clear_mask, uint32_t set_mask);
-uint32_t smsm_get_state(enum smsm_state_item item);
-int smsm_set_sleep_duration(uint32_t delay);
-void smsm_print_sleep_info(void);
-
-#define SMEM_NUM_SMD_CHANNELS 64
-
-typedef enum {
- /* fixed items */
- SMEM_PROC_COMM = 0,
- SMEM_HEAP_INFO,
- SMEM_ALLOCATION_TABLE,
- SMEM_VERSION_INFO,
- SMEM_HW_RESET_DETECT,
- SMEM_AARM_WARM_BOOT,
- SMEM_DIAG_ERR_MESSAGE,
- SMEM_SPINLOCK_ARRAY,
- SMEM_MEMORY_BARRIER_LOCATION,
-
- /* dynamic items */
- SMEM_AARM_PARTITION_TABLE,
- SMEM_AARM_BAD_BLOCK_TABLE,
- SMEM_RESERVE_BAD_BLOCKS,
- SMEM_WM_UUID,
- SMEM_CHANNEL_ALLOC_TBL,
- SMEM_SMD_BASE_ID,
- SMEM_SMEM_LOG_IDX = SMEM_SMD_BASE_ID + SMEM_NUM_SMD_CHANNELS,
- SMEM_SMEM_LOG_EVENTS,
- SMEM_SMEM_STATIC_LOG_IDX,
- SMEM_SMEM_STATIC_LOG_EVENTS,
- SMEM_SMEM_SLOW_CLOCK_SYNC,
- SMEM_SMEM_SLOW_CLOCK_VALUE,
- SMEM_BIO_LED_BUF,
- SMEM_SMSM_SHARED_STATE,
- SMEM_SMSM_INT_INFO,
- SMEM_SMSM_SLEEP_DELAY,
- SMEM_SMSM_LIMIT_SLEEP,
- SMEM_SLEEP_POWER_COLLAPSE_DISABLED,
- SMEM_KEYPAD_KEYS_PRESSED,
- SMEM_KEYPAD_STATE_UPDATED,
- SMEM_KEYPAD_STATE_IDX,
- SMEM_GPIO_INT,
- SMEM_MDDI_LCD_IDX,
- SMEM_MDDI_HOST_DRIVER_STATE,
- SMEM_MDDI_LCD_DISP_STATE,
- SMEM_LCD_CUR_PANEL,
- SMEM_MARM_BOOT_SEGMENT_INFO,
- SMEM_AARM_BOOT_SEGMENT_INFO,
- SMEM_SLEEP_STATIC,
- SMEM_SCORPION_FREQUENCY,
- SMEM_SMD_PROFILES,
- SMEM_TSSC_BUSY,
- SMEM_HS_SUSPEND_FILTER_INFO,
- SMEM_BATT_INFO,
- SMEM_APPS_BOOT_MODE,
- SMEM_VERSION_FIRST,
- SMEM_VERSION_LAST = SMEM_VERSION_FIRST + 24,
- SMEM_OSS_RRCASN1_BUF1,
- SMEM_OSS_RRCASN1_BUF2,
- SMEM_ID_VENDOR0,
- SMEM_ID_VENDOR1,
- SMEM_ID_VENDOR2,
- SMEM_HW_SW_BUILD_ID,
- SMEM_SMD_BLOCK_PORT_BASE_ID,
- SMEM_SMD_BLOCK_PORT_PROC0_HEAP = SMEM_SMD_BLOCK_PORT_BASE_ID + SMEM_NUM_SMD_CHANNELS,
- SMEM_SMD_BLOCK_PORT_PROC1_HEAP = SMEM_SMD_BLOCK_PORT_PROC0_HEAP + SMEM_NUM_SMD_CHANNELS,
- SMEM_I2C_MUTEX = SMEM_SMD_BLOCK_PORT_PROC1_HEAP + SMEM_NUM_SMD_CHANNELS,
- SMEM_SCLK_CONVERSION,
- SMEM_SMD_SMSM_INTR_MUX,
- SMEM_SMSM_CPU_INTR_MASK,
- SMEM_APPS_DEM_SLAVE_DATA,
- SMEM_QDSP6_DEM_SLAVE_DATA,
- SMEM_CLKREGIM_BSP,
- SMEM_CLKREGIM_SOURCES,
- SMEM_SMD_FIFO_BASE_ID,
- SMEM_USABLE_RAM_PARTITION_TABLE = SMEM_SMD_FIFO_BASE_ID + SMEM_NUM_SMD_CHANNELS,
- SMEM_POWER_ON_STATUS_INFO,
- SMEM_DAL_AREA,
- SMEM_SMEM_LOG_POWER_IDX,
- SMEM_SMEM_LOG_POWER_WRAP,
- SMEM_SMEM_LOG_POWER_EVENTS,
- SMEM_ERR_CRASH_LOG,
- SMEM_ERR_F3_TRACE_LOG,
- SMEM_NUM_ITEMS,
-} smem_mem_type;
-
-
-#define SMD_SS_CLOSED 0x00000000
-#define SMD_SS_OPENING 0x00000001
-#define SMD_SS_OPENED 0x00000002
-#define SMD_SS_FLUSHING 0x00000003
-#define SMD_SS_CLOSING 0x00000004
-#define SMD_SS_RESET 0x00000005
-#define SMD_SS_RESET_OPENING 0x00000006
-
-#define SMD_BUF_SIZE 8192
-#define SMD_CHANNELS 64
-
-#define SMD_HEADER_SIZE 20
-
-struct smd_alloc_elm {
- char name[20];
- uint32_t cid;
- uint32_t ctype;
- uint32_t ref_count;
-};
-
-struct smd_half_channel {
- unsigned state;
- unsigned char fDSR;
- unsigned char fCTS;
- unsigned char fCD;
- unsigned char fRI;
- unsigned char fHEAD;
- unsigned char fTAIL;
- unsigned char fSTATE;
- unsigned char fUNUSED;
- unsigned tail;
- unsigned head;
-} __attribute__(( aligned(4), packed ));
-
-/* Only used on SMD package v3 on msm7201a */
-struct smd_shared_v1 {
- struct smd_half_channel ch0;
- unsigned char data0[SMD_BUF_SIZE];
- struct smd_half_channel ch1;
- unsigned char data1[SMD_BUF_SIZE];
-};
-
-/* Used on SMD package v4 */
-struct smd_shared_v2 {
- struct smd_half_channel ch0;
- struct smd_half_channel ch1;
-};
-
-struct smd_channel {
- volatile struct smd_half_channel *send;
- volatile struct smd_half_channel *recv;
- unsigned char *send_data;
- unsigned char *recv_data;
-
- unsigned fifo_mask;
- unsigned fifo_size;
- unsigned current_packet;
- unsigned n;
-
- struct list_head ch_list;
-
- void *priv;
- void (*notify)(void *priv, unsigned flags);
-
- int (*read)(struct smd_channel *ch, void *data, int len);
- int (*write)(struct smd_channel *ch, const void *data, int len);
- int (*read_avail)(struct smd_channel *ch);
- int (*write_avail)(struct smd_channel *ch);
-
- void (*update_state)(struct smd_channel *ch);
- unsigned last_state;
- void (*notify_other_cpu)(void);
- unsigned type;
-
- char name[32];
- struct platform_device pdev;
-};
-
-#define SMD_TYPE_MASK 0x0FF
-#define SMD_TYPE_APPS_MODEM 0x000
-#define SMD_TYPE_APPS_DSP 0x001
-#define SMD_TYPE_MODEM_DSP 0x002
-
-#define SMD_KIND_MASK 0xF00
-#define SMD_KIND_UNKNOWN 0x000
-#define SMD_KIND_STREAM 0x100
-#define SMD_KIND_PACKET 0x200
-
-extern struct list_head smd_ch_closed_list;
-extern struct list_head smd_ch_list_modem;
-extern struct list_head smd_ch_list_dsp;
-
-extern spinlock_t smd_lock;
-extern spinlock_t smem_lock;
-
-void *smem_find(unsigned id, unsigned size);
-void *smem_item(unsigned id, unsigned *size);
-uint32_t raw_smsm_get_state(enum smsm_state_item item);
-
-extern void msm_init_last_radio_log(struct module *);
-
-#ifdef CONFIG_MSM_SMD_PKG3
-/*
- * This allocator assumes an SMD Package v3 which only exists on
- * MSM7x00 SoC's.
- */
-static inline int _smd_alloc_channel(struct smd_channel *ch)
-{
- struct smd_shared_v1 *shared1;
-
- shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
- if (!shared1) {
- pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
- return -1;
- }
- ch->send = &shared1->ch0;
- ch->recv = &shared1->ch1;
- ch->send_data = shared1->data0;
- ch->recv_data = shared1->data1;
- ch->fifo_size = SMD_BUF_SIZE;
- return 0;
-}
-#else
-/*
- * This allocator assumes an SMD Package v4, the most common
- * and the default.
- */
-static inline int _smd_alloc_channel(struct smd_channel *ch)
-{
- struct smd_shared_v2 *shared2;
- void *buffer;
- unsigned buffer_sz;
-
- shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2));
- buffer = smem_item(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
-
- if (!buffer)
- return -1;
-
- /* buffer must be a power-of-two size */
- if (buffer_sz & (buffer_sz - 1))
- return -1;
-
- buffer_sz /= 2;
- ch->send = &shared2->ch0;
- ch->recv = &shared2->ch1;
- ch->send_data = buffer;
- ch->recv_data = buffer + buffer_sz;
- ch->fifo_size = buffer_sz;
- return 0;
-}
-#endif /* CONFIG_MSM_SMD_PKG3 */
-
-#if defined(CONFIG_ARCH_MSM7X30)
-static inline void msm_a2m_int(uint32_t irq)
-{
- writel(1 << irq, MSM_GCC_BASE + 0x8);
-}
-#else
-static inline void msm_a2m_int(uint32_t irq)
-{
- writel(1, MSM_CSR_BASE + 0x400 + (irq * 4));
-}
-#endif /* CONFIG_ARCH_MSM7X30 */
-
-
-#endif
diff --git a/arch/arm/mach-msm/vreg.c b/arch/arm/mach-msm/vreg.c
deleted file mode 100644
index bd66ed04d6dc..000000000000
--- a/arch/arm/mach-msm/vreg.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/* arch/arm/mach-msm/vreg.c
- *
- * Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <mach/vreg.h>
-
-#include "proc_comm.h"
-
-struct vreg {
- const char *name;
- unsigned id;
- int status;
- unsigned refcnt;
-};
-
-#define VREG(_name, _id, _status, _refcnt) \
- { .name = _name, .id = _id, .status = _status, .refcnt = _refcnt }
-
-static struct vreg vregs[] = {
- VREG("msma", 0, 0, 0),
- VREG("msmp", 1, 0, 0),
- VREG("msme1", 2, 0, 0),
- VREG("msmc1", 3, 0, 0),
- VREG("msmc2", 4, 0, 0),
- VREG("gp3", 5, 0, 0),
- VREG("msme2", 6, 0, 0),
- VREG("gp4", 7, 0, 0),
- VREG("gp1", 8, 0, 0),
- VREG("tcxo", 9, 0, 0),
- VREG("pa", 10, 0, 0),
- VREG("rftx", 11, 0, 0),
- VREG("rfrx1", 12, 0, 0),
- VREG("rfrx2", 13, 0, 0),
- VREG("synt", 14, 0, 0),
- VREG("wlan", 15, 0, 0),
- VREG("usb", 16, 0, 0),
- VREG("boost", 17, 0, 0),
- VREG("mmc", 18, 0, 0),
- VREG("ruim", 19, 0, 0),
- VREG("msmc0", 20, 0, 0),
- VREG("gp2", 21, 0, 0),
- VREG("gp5", 22, 0, 0),
- VREG("gp6", 23, 0, 0),
- VREG("rf", 24, 0, 0),
- VREG("rf_vco", 26, 0, 0),
- VREG("mpll", 27, 0, 0),
- VREG("s2", 28, 0, 0),
- VREG("s3", 29, 0, 0),
- VREG("rfubm", 30, 0, 0),
- VREG("ncp", 31, 0, 0),
- VREG("gp7", 32, 0, 0),
- VREG("gp8", 33, 0, 0),
- VREG("gp9", 34, 0, 0),
- VREG("gp10", 35, 0, 0),
- VREG("gp11", 36, 0, 0),
- VREG("gp12", 37, 0, 0),
- VREG("gp13", 38, 0, 0),
- VREG("gp14", 39, 0, 0),
- VREG("gp15", 40, 0, 0),
- VREG("gp16", 41, 0, 0),
- VREG("gp17", 42, 0, 0),
- VREG("s4", 43, 0, 0),
- VREG("usb2", 44, 0, 0),
- VREG("wlan2", 45, 0, 0),
- VREG("xo_out", 46, 0, 0),
- VREG("lvsw0", 47, 0, 0),
- VREG("lvsw1", 48, 0, 0),
-};
-
-struct vreg *vreg_get(struct device *dev, const char *id)
-{
- int n;
- for (n = 0; n < ARRAY_SIZE(vregs); n++) {
- if (!strcmp(vregs[n].name, id))
- return vregs + n;
- }
- return ERR_PTR(-ENOENT);
-}
-
-void vreg_put(struct vreg *vreg)
-{
-}
-
-int vreg_enable(struct vreg *vreg)
-{
- unsigned id = vreg->id;
- unsigned enable = 1;
-
- if (vreg->refcnt == 0)
- vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
-
- if ((vreg->refcnt < UINT_MAX) && (!vreg->status))
- vreg->refcnt++;
-
- return vreg->status;
-}
-
-int vreg_disable(struct vreg *vreg)
-{
- unsigned id = vreg->id;
- unsigned enable = 0;
-
- if (!vreg->refcnt)
- return 0;
-
- if (vreg->refcnt == 1)
- vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
-
- if (!vreg->status)
- vreg->refcnt--;
-
- return vreg->status;
-}
-
-int vreg_set_level(struct vreg *vreg, unsigned mv)
-{
- unsigned id = vreg->id;
-
- vreg->status = msm_proc_comm(PCOM_VREG_SET_LEVEL, &id, &mv);
- return vreg->status;
-}
-
-#if defined(CONFIG_DEBUG_FS)
-
-static int vreg_debug_set(void *data, u64 val)
-{
- struct vreg *vreg = data;
- switch (val) {
- case 0:
- vreg_disable(vreg);
- break;
- case 1:
- vreg_enable(vreg);
- break;
- default:
- vreg_set_level(vreg, val);
- break;
- }
- return 0;
-}
-
-static int vreg_debug_get(void *data, u64 *val)
-{
- struct vreg *vreg = data;
-
- if (!vreg->status)
- *val = 0;
- else
- *val = 1;
-
- return 0;
-}
-
-static int vreg_debug_count_set(void *data, u64 val)
-{
- struct vreg *vreg = data;
- if (val > UINT_MAX)
- val = UINT_MAX;
- vreg->refcnt = val;
- return 0;
-}
-
-static int vreg_debug_count_get(void *data, u64 *val)
-{
- struct vreg *vreg = data;
-
- *val = vreg->refcnt;
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(vreg_fops, vreg_debug_get, vreg_debug_set, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(vreg_count_fops, vreg_debug_count_get,
- vreg_debug_count_set, "%llu\n");
-
-static int __init vreg_debug_init(void)
-{
- struct dentry *dent;
- int n;
- char name[32];
- const char *refcnt_name = "_refcnt";
-
- dent = debugfs_create_dir("vreg", 0);
- if (IS_ERR(dent))
- return 0;
-
- for (n = 0; n < ARRAY_SIZE(vregs); n++) {
- (void) debugfs_create_file(vregs[n].name, 0644,
- dent, vregs + n, &vreg_fops);
-
- strlcpy(name, vregs[n].name, sizeof(name));
- strlcat(name, refcnt_name, sizeof(name));
- (void) debugfs_create_file(name, 0644,
- dent, vregs + n, &vreg_count_fops);
- }
-
- return 0;
-}
-
-device_initcall(vreg_debug_init);
-#endif
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index c1e4567a5ab3..97473168d6b6 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -64,6 +64,20 @@ config MACH_ARMADA_38X
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 380/385 SoC with device tree.
+config MACH_ARMADA_39X
+ bool "Marvell Armada 39x boards" if ARCH_MULTI_V7
+ select ARM_GIC
+ select ARMADA_39X_CLK
+ select CACHE_L2X0
+ select HAVE_ARM_SCU
+ select HAVE_ARM_TWD if SMP
+ select HAVE_SMP
+ select MACH_MVEBU_V7
+ select PINCTRL_ARMADA_39X
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Armada 39x SoC with device tree.
+
config MACH_ARMADA_XP
bool "Marvell Armada XP boards" if ARCH_MULTI_V7
select ARMADA_XP_CLK
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index 89a139ed7d5b..afee9083ad92 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -184,7 +184,7 @@ static void __init mvebu_dt_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
-static const char * const armada_370_xp_dt_compat[] = {
+static const char * const armada_370_xp_dt_compat[] __initconst = {
"marvell,armada-370-xp",
NULL,
};
@@ -205,7 +205,7 @@ DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)")
.dt_compat = armada_370_xp_dt_compat,
MACHINE_END
-static const char * const armada_375_dt_compat[] = {
+static const char * const armada_375_dt_compat[] __initconst = {
"marvell,armada375",
NULL,
};
@@ -219,7 +219,7 @@ DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)")
.dt_compat = armada_375_dt_compat,
MACHINE_END
-static const char * const armada_38x_dt_compat[] = {
+static const char * const armada_38x_dt_compat[] __initconst = {
"marvell,armada380",
"marvell,armada385",
NULL,
@@ -232,3 +232,17 @@ DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)")
.restart = mvebu_restart,
.dt_compat = armada_38x_dt_compat,
MACHINE_END
+
+static const char * const armada_39x_dt_compat[] __initconst = {
+ "marvell,armada390",
+ "marvell,armada398",
+ NULL,
+};
+
+DT_MACHINE_START(ARMADA_39X_DT, "Marvell Armada 39x (Device Tree)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
+ .init_irq = mvebu_init_irq,
+ .restart = mvebu_restart,
+ .dt_compat = armada_39x_dt_compat,
+MACHINE_END
diff --git a/arch/arm/mach-mvebu/dove.c b/arch/arm/mach-mvebu/dove.c
index b50464ec1130..5a1741500a30 100644
--- a/arch/arm/mach-mvebu/dove.c
+++ b/arch/arm/mach-mvebu/dove.c
@@ -27,7 +27,7 @@ static void __init dove_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
-static const char * const dove_dt_compat[] = {
+static const char * const dove_dt_compat[] __initconst = {
"marvell,dove",
NULL
};
diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c
index 6b5310828eb2..925f75f54268 100644
--- a/arch/arm/mach-mvebu/kirkwood.c
+++ b/arch/arm/mach-mvebu/kirkwood.c
@@ -186,7 +186,7 @@ static void __init kirkwood_dt_init(void)
of_platform_populate(NULL, of_default_bus_match_table, auxdata, NULL);
}
-static const char * const kirkwood_dt_board_compat[] = {
+static const char * const kirkwood_dt_board_compat[] __initconst = {
"marvell,kirkwood",
NULL
};
diff --git a/arch/arm/mach-mvebu/platsmp-a9.c b/arch/arm/mach-mvebu/platsmp-a9.c
index 2ec1a42b4321..df0a9cc5da59 100644
--- a/arch/arm/mach-mvebu/platsmp-a9.c
+++ b/arch/arm/mach-mvebu/platsmp-a9.c
@@ -110,3 +110,5 @@ CPU_METHOD_OF_DECLARE(mvebu_armada_375_smp, "marvell,armada-375-smp",
&mvebu_cortex_a9_smp_ops);
CPU_METHOD_OF_DECLARE(mvebu_armada_380_smp, "marvell,armada-380-smp",
&armada_38x_smp_ops);
+CPU_METHOD_OF_DECLARE(mvebu_armada_390_smp, "marvell,armada-390-smp",
+ &armada_38x_smp_ops);
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 8b9f5e202ccf..4f4e22206ae5 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void)
void __iomem *mpsoc_base;
u32 reg;
+ pr_warn("CPU idle is currently broken on Armada 38x: disabling");
+ return 0;
+
np = of_find_compatible_node(NULL, NULL,
"marvell,armada-380-coherency-fabric");
if (!np)
@@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void)
return 0;
of_node_put(np);
+ /*
+ * Currently the CPU idle support for Armada 38x is broken, as
+ * the CPU hotplug uses some of the CPU idle functions it is
+ * broken too, so let's disable it
+ */
+ if (of_machine_is_compatible("marvell,armada380")) {
+ cpu_hotplug_disable();
+ pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling");
+ }
+
if (of_machine_is_compatible("marvell,armadaxp"))
ret = armada_xp_cpuidle_init();
else if (of_machine_is_compatible("marvell,armada370"))
@@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void)
return ret;
mvebu_v7_pmsu_enable_l2_powerdown_onidle();
- platform_device_register(&mvebu_v7_cpuidle_device);
+ if (mvebu_v7_cpuidle_device.name)
+ platform_device_register(&mvebu_v7_cpuidle_device);
cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
return 0;
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 34b4c0044961..dd94567c3628 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -71,13 +71,7 @@ static unsigned int mpui7xx_sleep_save[MPUI7XX_SLEEP_SAVE_SIZE];
static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE];
static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE];
-#ifndef CONFIG_OMAP_32K_TIMER
-
-static unsigned short enable_dyn_sleep = 0;
-
-#else
-
-static unsigned short enable_dyn_sleep = 1;
+static unsigned short enable_dyn_sleep;
static ssize_t idle_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
@@ -90,8 +84,9 @@ static ssize_t idle_store(struct kobject *kobj, struct kobj_attribute *attr,
{
unsigned short value;
if (sscanf(buf, "%hu", &value) != 1 ||
- (value != 0 && value != 1)) {
- printk(KERN_ERR "idle_sleep_store: Invalid value\n");
+ (value != 0 && value != 1) ||
+ (value != 0 && !IS_ENABLED(CONFIG_OMAP_32K_TIMER))) {
+ pr_err("idle_sleep_store: Invalid value\n");
return -EINVAL;
}
enable_dyn_sleep = value;
@@ -101,7 +96,6 @@ static ssize_t idle_store(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sleep_while_idle_attr =
__ATTR(sleep_while_idle, 0644, idle_show, idle_store);
-#endif
static void (*omap_sram_suspend)(unsigned long r0, unsigned long r1) = NULL;
@@ -115,16 +109,11 @@ void omap1_pm_idle(void)
{
extern __u32 arm_idlect1_mask;
__u32 use_idlect1 = arm_idlect1_mask;
- int do_sleep = 0;
local_fiq_disable();
#if defined(CONFIG_OMAP_MPU_TIMER) && !defined(CONFIG_OMAP_DM_TIMER)
-#warning Enable 32kHz OS timer in order to allow sleep states in idle
use_idlect1 = use_idlect1 & ~(1 << 9);
-#else
- if (enable_dyn_sleep)
- do_sleep = 1;
#endif
#ifdef CONFIG_OMAP_DM_TIMER
@@ -134,10 +123,12 @@ void omap1_pm_idle(void)
if (omap_dma_running())
use_idlect1 &= ~(1 << 6);
- /* We should be able to remove the do_sleep variable and multiple
+ /*
+ * We should be able to remove the do_sleep variable and multiple
* tests above as soon as drivers, timer and DMA code have been fixed.
- * Even the sleep block count should become obsolete. */
- if ((use_idlect1 != ~0) || !do_sleep) {
+ * Even the sleep block count should become obsolete.
+ */
+ if ((use_idlect1 != ~0) || !enable_dyn_sleep) {
__u32 saved_idlect1 = omap_readl(ARM_IDLECT1);
if (cpu_is_omap15xx())
@@ -635,15 +626,25 @@ static const struct platform_suspend_ops omap_pm_ops = {
static int __init omap_pm_init(void)
{
-
-#ifdef CONFIG_OMAP_32K_TIMER
- int error;
-#endif
+ int error = 0;
if (!cpu_class_is_omap1())
return -ENODEV;
- printk("Power Management for TI OMAP.\n");
+ pr_info("Power Management for TI OMAP.\n");
+
+ if (!IS_ENABLED(CONFIG_OMAP_32K_TIMER))
+ pr_info("OMAP1 PM: sleep states in idle disabled due to no 32KiHz timer\n");
+
+ if (!IS_ENABLED(CONFIG_OMAP_DM_TIMER))
+ pr_info("OMAP1 PM: sleep states in idle disabled due to no DMTIMER support\n");
+
+ if (IS_ENABLED(CONFIG_OMAP_32K_TIMER) &&
+ IS_ENABLED(CONFIG_OMAP_DM_TIMER)) {
+ /* OMAP16xx only */
+ pr_info("OMAP1 PM: sleep states in idle enabled\n");
+ enable_dyn_sleep = 1;
+ }
/*
* We copy the assembler sleep/wakeup routines to SRAM.
@@ -693,17 +694,15 @@ static int __init omap_pm_init(void)
omap_pm_init_debugfs();
#endif
-#ifdef CONFIG_OMAP_32K_TIMER
error = sysfs_create_file(power_kobj, &sleep_while_idle_attr.attr);
if (error)
printk(KERN_ERR "sysfs_create_file failed: %d\n", error);
-#endif
if (cpu_is_omap16xx()) {
/* configure LOW_PWR pin */
omap_cfg_reg(T20_1610_LOW_PWR);
}
- return 0;
+ return error;
}
__initcall(omap_pm_init);
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 2b8e47788062..6468f15f060c 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -69,6 +69,7 @@ config SOC_DRA7XX
select ARM_GIC
select HAVE_ARM_ARCH_TIMER
select IRQ_CROSSBAR
+ select ARM_ERRATA_798181 if SMP
config ARCH_OMAP2PLUS
bool
@@ -80,6 +81,7 @@ config ARCH_OMAP2PLUS
select GENERIC_IRQ_CHIP
select MACH_OMAP_GENERIC
select MEMORY
+ select MFD_SYSCON
select OMAP_DM_TIMER
select OMAP_GPMC
select PINCTRL
@@ -175,12 +177,6 @@ config MACH_OMAP3_BEAGLE
default y
select OMAP_PACKAGE_CBB
-config MACH_DEVKIT8000
- bool "DEVKIT8000 board"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CUS
-
config MACH_OMAP_LDP
bool "OMAP3 LDP board"
depends on ARCH_OMAP3
@@ -225,12 +221,6 @@ config MACH_OMAP3_PANDORA
select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
-config MACH_TOUCHBOOK
- bool "OMAP3 Touch Book"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CBB
-
config MACH_NOKIA_N810
bool
@@ -260,12 +250,6 @@ config MACH_CM_T35
config MACH_CM_T3730
bool
-config MACH_SBC3530
- bool "OMAP3 SBC STALKER board"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CUS
-
config OMAP3_SDRC_AC_TIMING
bool "Enable SDRC AC timing register changes"
depends on ARCH_OMAP3
@@ -278,27 +262,6 @@ config OMAP3_SDRC_AC_TIMING
wish to say no. Selecting yes without understanding what is
going on could result in system crashes;
-config OMAP4_ERRATA_I688
- bool "OMAP4 errata: Async Bridge Corruption"
- depends on (ARCH_OMAP4 || SOC_OMAP5) && !ARCH_MULTIPLATFORM
- select ARCH_HAS_BARRIERS
- help
- If a data is stalled inside asynchronous bridge because of back
- pressure, it may be accepted multiple times, creating pointer
- misalignment that will corrupt next transfers on that data path
- until next reset of the system (No recovery procedure once the
- issue is hit, the path remains consistently broken). Async bridge
- can be found on path between MPU to EMIF and MPU to L3 interconnect.
- This situation can happen only when the idle is initiated by a
- Master Request Disconnection (which is trigged by software when
- executing WFI on CPU).
- The work-around for this errata needs all the initiators connected
- through async bridge must ensure that data path is properly drained
- before issuing WFI. This condition will be met if one Strongly ordered
- access is performed to the target right before executing the WFI.
- In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
- IO barrier ensure that there is no synchronisation loss on initiators
- operating on both interconnect port simultaneously.
endmenu
endif
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index b83f18fcec9b..ec002bd4af77 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -243,7 +243,6 @@ obj-$(CONFIG_SOC_OMAP2420) += msdi.o
# Specific board support
obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o pdata-quirks.o
obj-$(CONFIG_MACH_OMAP3_BEAGLE) += board-omap3beagle.o
-obj-$(CONFIG_MACH_DEVKIT8000) += board-devkit8000.o
obj-$(CONFIG_MACH_OMAP_LDP) += board-ldp.o
obj-$(CONFIG_MACH_OMAP3530_LV_SOM) += board-omap3logic.o
obj-$(CONFIG_MACH_OMAP3_TORPEDO) += board-omap3logic.o
@@ -254,9 +253,6 @@ obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o sdram-nokia.o
obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-peripherals.o
obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-video.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
-obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
-
-obj-$(CONFIG_MACH_SBC3530) += board-omap3stalker.o
# Platform specific device init code
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 91738a14ecbe..b5dfbc1b1fc6 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -492,51 +492,36 @@ static struct twl4030_platform_data cm_t35_twldata = {
#include <media/omap3isp.h>
#include "devices.h"
-static struct i2c_board_info cm_t35_isp_i2c_boardinfo[] = {
+static struct isp_platform_subdev cm_t35_isp_subdevs[] = {
{
- I2C_BOARD_INFO("mt9t001", 0x5d),
- },
- {
- I2C_BOARD_INFO("tvp5150", 0x5c),
- },
-};
-
-static struct isp_subdev_i2c_board_info cm_t35_isp_primary_subdevs[] = {
- {
- .board_info = &cm_t35_isp_i2c_boardinfo[0],
- .i2c_adapter_id = 3,
- },
- { NULL, 0, },
-};
-
-static struct isp_subdev_i2c_board_info cm_t35_isp_secondary_subdevs[] = {
- {
- .board_info = &cm_t35_isp_i2c_boardinfo[1],
+ .board_info = &(struct i2c_board_info){
+ I2C_BOARD_INFO("mt9t001", 0x5d)
+ },
.i2c_adapter_id = 3,
- },
- { NULL, 0, },
-};
-
-static struct isp_v4l2_subdevs_group cm_t35_isp_subdevs[] = {
- {
- .subdevs = cm_t35_isp_primary_subdevs,
- .interface = ISP_INTERFACE_PARALLEL,
- .bus = {
- .parallel = {
- .clk_pol = 1,
+ .bus = &(struct isp_bus_cfg){
+ .interface = ISP_INTERFACE_PARALLEL,
+ .bus = {
+ .parallel = {
+ .clk_pol = 1,
+ },
},
},
},
{
- .subdevs = cm_t35_isp_secondary_subdevs,
- .interface = ISP_INTERFACE_PARALLEL,
- .bus = {
- .parallel = {
- .clk_pol = 0,
+ .board_info = &(struct i2c_board_info){
+ I2C_BOARD_INFO("tvp5150", 0x5c),
+ },
+ .i2c_adapter_id = 3,
+ .bus = &(struct isp_bus_cfg){
+ .interface = ISP_INTERFACE_PARALLEL,
+ .bus = {
+ .parallel = {
+ .clk_pol = 0,
+ },
},
},
},
- { NULL, 0, },
+ { 0 },
};
static struct isp_platform_data cm_t35_isp_pdata = {
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
deleted file mode 100644
index d8e4f346936a..000000000000
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ /dev/null
@@ -1,654 +0,0 @@
-/*
- * board-devkit8000.c - TimLL Devkit8000
- *
- * Copyright (C) 2009 Kim Botherway
- * Copyright (C) 2010 Thomas Weber
- *
- * Modified from mach-omap2/board-omap3beagle.c
- *
- * Initial code: Syed Mohammed Khasim
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/leds.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand.h>
-#include <linux/mmc/host.h>
-#include <linux/usb/phy.h>
-
-#include <linux/regulator/machine.h>
-#include <linux/i2c/twl.h>
-#include "id.h"
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
-
-#include "common.h"
-#include "gpmc.h"
-#include <linux/platform_data/mtd-nand-omap2.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#include <linux/platform_data/spi-omap2-mcspi.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/spi/spi.h>
-#include <linux/dm9000.h>
-#include <linux/interrupt.h>
-
-#include "sdram-micron-mt46h32m32lf-6.h"
-#include "mux.h"
-#include "hsmmc.h"
-#include "board-flash.h"
-#include "common-board-devices.h"
-
-#define NAND_CS 0
-
-#define OMAP_DM9000_GPIO_IRQ 25
-#define OMAP3_DEVKIT_TS_GPIO 27
-
-static struct mtd_partition devkit8000_nand_partitions[] = {
- /* All the partition sizes are listed in terms of NAND block size */
- {
- .name = "X-Loader",
- .offset = 0,
- .size = 4 * NAND_BLOCK_SIZE,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "U-Boot",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
- .size = 15 * NAND_BLOCK_SIZE,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "U-Boot Env",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */
- .size = 1 * NAND_BLOCK_SIZE,
- },
- {
- .name = "Kernel",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */
- .size = 32 * NAND_BLOCK_SIZE,
- },
- {
- .name = "File System",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct omap2_hsmmc_info mmc[] = {
- {
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
- .gpio_wp = 29,
- .deferred = true,
- },
- {} /* Terminator */
-};
-
-static struct regulator_consumer_supply devkit8000_vmmc1_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-/* ads7846 on SPI */
-static struct regulator_consumer_supply devkit8000_vio_supply[] = {
- REGULATOR_SUPPLY("vcc", "spi2.0"),
-};
-
-static const struct display_timing devkit8000_lcd_videomode = {
- .pixelclock = { 0, 40000000, 0 },
-
- .hactive = { 0, 800, 0 },
- .hfront_porch = { 0, 1, 0 },
- .hback_porch = { 0, 1, 0 },
- .hsync_len = { 0, 48, 0 },
-
- .vactive = { 0, 480, 0 },
- .vfront_porch = { 0, 12, 0 },
- .vback_porch = { 0, 25, 0 },
- .vsync_len = { 0, 3, 0 },
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
-};
-
-static struct panel_dpi_platform_data devkit8000_lcd_pdata = {
- .name = "lcd",
- .source = "dpi.0",
-
- .data_lines = 24,
-
- .display_timing = &devkit8000_lcd_videomode,
-
- .enable_gpio = -1, /* filled in code */
- .backlight_gpio = -1,
-};
-
-static struct platform_device devkit8000_lcd_device = {
- .name = "panel-dpi",
- .id = 0,
- .dev.platform_data = &devkit8000_lcd_pdata,
-};
-
-static struct connector_dvi_platform_data devkit8000_dvi_connector_pdata = {
- .name = "dvi",
- .source = "tfp410.0",
- .i2c_bus_num = 1,
-};
-
-static struct platform_device devkit8000_dvi_connector_device = {
- .name = "connector-dvi",
- .id = 0,
- .dev.platform_data = &devkit8000_dvi_connector_pdata,
-};
-
-static struct encoder_tfp410_platform_data devkit8000_tfp410_pdata = {
- .name = "tfp410.0",
- .source = "dpi.0",
- .data_lines = 24,
- .power_down_gpio = -1, /* filled in code */
-};
-
-static struct platform_device devkit8000_tfp410_device = {
- .name = "tfp410",
- .id = 0,
- .dev.platform_data = &devkit8000_tfp410_pdata,
-};
-
-static struct connector_atv_platform_data devkit8000_tv_pdata = {
- .name = "tv",
- .source = "venc.0",
- .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
- .invert_polarity = false,
-};
-
-static struct platform_device devkit8000_tv_connector_device = {
- .name = "connector-analog-tv",
- .id = 0,
- .dev.platform_data = &devkit8000_tv_pdata,
-};
-
-static struct omap_dss_board_info devkit8000_dss_data = {
- .default_display_name = "lcd",
-};
-
-static uint32_t board_keymap[] = {
- KEY(0, 0, KEY_1),
- KEY(1, 0, KEY_2),
- KEY(2, 0, KEY_3),
- KEY(0, 1, KEY_4),
- KEY(1, 1, KEY_5),
- KEY(2, 1, KEY_6),
- KEY(3, 1, KEY_F5),
- KEY(0, 2, KEY_7),
- KEY(1, 2, KEY_8),
- KEY(2, 2, KEY_9),
- KEY(3, 2, KEY_F6),
- KEY(0, 3, KEY_F7),
- KEY(1, 3, KEY_0),
- KEY(2, 3, KEY_F8),
- PERSISTENT_KEY(4, 5),
- KEY(4, 4, KEY_VOLUMEUP),
- KEY(5, 5, KEY_VOLUMEDOWN),
- 0
-};
-
-static struct matrix_keymap_data board_map_data = {
- .keymap = board_keymap,
- .keymap_size = ARRAY_SIZE(board_keymap),
-};
-
-static struct twl4030_keypad_data devkit8000_kp_data = {
- .keymap_data = &board_map_data,
- .rows = 6,
- .cols = 6,
- .rep = 1,
-};
-
-static struct gpio_led gpio_leds[];
-
-static int devkit8000_twl_gpio_setup(struct device *dev,
- unsigned gpio, unsigned ngpio)
-{
- /* gpio + 0 is "mmc0_cd" (input/IRQ) */
- mmc[0].gpio_cd = gpio + 0;
- omap_hsmmc_late_init(mmc);
-
- /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
- gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
-
- /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
- devkit8000_lcd_pdata.enable_gpio = gpio + TWL4030_GPIO_MAX + 0;
-
- /* gpio + 7 is "DVI_PD" (out, active low) */
- devkit8000_tfp410_pdata.power_down_gpio = gpio + 7;
-
- return 0;
-}
-
-static struct twl4030_gpio_platform_data devkit8000_gpio_data = {
- .use_leds = true,
- .pulldowns = BIT(1) | BIT(2) | BIT(6) | BIT(8) | BIT(13)
- | BIT(15) | BIT(16) | BIT(17),
- .setup = devkit8000_twl_gpio_setup,
-};
-
-static struct regulator_consumer_supply devkit8000_vpll1_supplies[] = {
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dpi.0"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.0"),
-};
-
-/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
-static struct regulator_init_data devkit8000_vmmc1 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 3150000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(devkit8000_vmmc1_supply),
- .consumer_supplies = devkit8000_vmmc1_supply,
-};
-
-/* VPLL1 for digital video outputs */
-static struct regulator_init_data devkit8000_vpll1 = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(devkit8000_vpll1_supplies),
- .consumer_supplies = devkit8000_vpll1_supplies,
-};
-
-/* VAUX4 for ads7846 and nubs */
-static struct regulator_init_data devkit8000_vio = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(devkit8000_vio_supply),
- .consumer_supplies = devkit8000_vio_supply,
-};
-
-static struct twl4030_platform_data devkit8000_twldata = {
- /* platform_data for children goes here */
- .gpio = &devkit8000_gpio_data,
- .vmmc1 = &devkit8000_vmmc1,
- .vpll1 = &devkit8000_vpll1,
- .vio = &devkit8000_vio,
- .keypad = &devkit8000_kp_data,
-};
-
-static int __init devkit8000_i2c_init(void)
-{
- omap3_pmic_get_config(&devkit8000_twldata,
- TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
- TWL_COMMON_REGULATOR_VDAC);
- omap3_pmic_init("tps65930", &devkit8000_twldata);
- /* Bus 3 is attached to the DVI port where devices like the pico DLP
- * projector don't work reliably with 400kHz */
- omap_register_i2c_bus(3, 400, NULL, 0);
- return 0;
-}
-
-static struct gpio_led gpio_leds[] = {
- {
- .name = "led1",
- .default_trigger = "heartbeat",
- .gpio = 186,
- .active_low = true,
- },
- {
- .name = "led2",
- .default_trigger = "mmc0",
- .gpio = 163,
- .active_low = true,
- },
- {
- .name = "ledB",
- .default_trigger = "none",
- .gpio = 153,
- .active_low = true,
- },
- {
- .name = "led3",
- .default_trigger = "none",
- .gpio = 164,
- .active_low = true,
- },
-};
-
-static struct gpio_led_platform_data gpio_led_info = {
- .leds = gpio_leds,
- .num_leds = ARRAY_SIZE(gpio_leds),
-};
-
-static struct platform_device leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &gpio_led_info,
- },
-};
-
-static struct gpio_keys_button gpio_buttons[] = {
- {
- .code = BTN_EXTRA,
- .gpio = 26,
- .desc = "user",
- .wakeup = 1,
- },
-};
-
-static struct gpio_keys_platform_data gpio_key_info = {
- .buttons = gpio_buttons,
- .nbuttons = ARRAY_SIZE(gpio_buttons),
-};
-
-static struct platform_device keys_gpio = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &gpio_key_info,
- },
-};
-
-#define OMAP_DM9000_BASE 0x2c000000
-
-static struct resource omap_dm9000_resources[] = {
- [0] = {
- .start = OMAP_DM9000_BASE,
- .end = (OMAP_DM9000_BASE + 0x4 - 1),
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = (OMAP_DM9000_BASE + 0x400),
- .end = (OMAP_DM9000_BASE + 0x400 + 0x4 - 1),
- .flags = IORESOURCE_MEM,
- },
- [2] = {
- .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
- },
-};
-
-static struct dm9000_plat_data omap_dm9000_platdata = {
- .flags = DM9000_PLATF_16BITONLY,
-};
-
-static struct platform_device omap_dm9000_dev = {
- .name = "dm9000",
- .id = -1,
- .num_resources = ARRAY_SIZE(omap_dm9000_resources),
- .resource = omap_dm9000_resources,
- .dev = {
- .platform_data = &omap_dm9000_platdata,
- },
-};
-
-static void __init omap_dm9000_init(void)
-{
- unsigned char *eth_addr = omap_dm9000_platdata.dev_addr;
- struct omap_die_id odi;
- int ret;
-
- ret = gpio_request_one(OMAP_DM9000_GPIO_IRQ, GPIOF_IN, "dm9000 irq");
- if (ret < 0) {
- printk(KERN_ERR "Failed to request GPIO%d for dm9000 IRQ\n",
- OMAP_DM9000_GPIO_IRQ);
- return;
- }
-
- /* init the mac address using DIE id */
- omap_get_die_id(&odi);
-
- eth_addr[0] = 0x02; /* locally administered */
- eth_addr[1] = odi.id_1 & 0xff;
- eth_addr[2] = (odi.id_0 & 0xff000000) >> 24;
- eth_addr[3] = (odi.id_0 & 0x00ff0000) >> 16;
- eth_addr[4] = (odi.id_0 & 0x0000ff00) >> 8;
- eth_addr[5] = (odi.id_0 & 0x000000ff);
-}
-
-static struct platform_device *devkit8000_devices[] __initdata = {
- &leds_gpio,
- &keys_gpio,
- &omap_dm9000_dev,
- &devkit8000_lcd_device,
- &devkit8000_tfp410_device,
- &devkit8000_dvi_connector_device,
- &devkit8000_tv_connector_device,
-};
-
-static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
- .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- /* nCS and IRQ for Devkit8000 ethernet */
- OMAP3_MUX(GPMC_NCS6, OMAP_MUX_MODE0),
- OMAP3_MUX(ETK_D11, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
-
- /* McSPI 2*/
- OMAP3_MUX(MCSPI2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCSPI2_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(MCSPI2_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCSPI2_CS0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(MCSPI2_CS1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
-
- /* PENDOWN GPIO */
- OMAP3_MUX(ETK_D13, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
-
- /* mUSB */
- OMAP3_MUX(HSUSB0_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_STP, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(HSUSB0_DIR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_NXT, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(HSUSB0_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
- /* USB 1 */
- OMAP3_MUX(ETK_CTL, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(ETK_D8, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_D9, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_D0, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_D1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_D2, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_D3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_D4, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_D5, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_D6, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
- OMAP3_MUX(ETK_D7, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
-
- /* MMC 1 */
- OMAP3_MUX(SDMMC1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(SDMMC1_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(SDMMC1_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(SDMMC1_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(SDMMC1_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(SDMMC1_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(SDMMC1_DAT4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(SDMMC1_DAT5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(SDMMC1_DAT6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(SDMMC1_DAT7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
- /* McBSP 2 */
- OMAP3_MUX(MCBSP2_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCBSP2_CLKX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCBSP2_DR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCBSP2_DX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
-
- /* I2C 1 */
- OMAP3_MUX(I2C1_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(I2C1_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
- /* I2C 2 */
- OMAP3_MUX(I2C2_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
- /* I2C 3 */
- OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
- /* I2C 4 */
- OMAP3_MUX(I2C4_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(I2C4_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
- /* serial ports */
- OMAP3_MUX(MCBSP3_CLKX, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(MCBSP3_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
- OMAP3_MUX(UART1_TX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(UART1_RX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
- /* DSS */
- OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
-
- /* expansion port */
- /* McSPI 1 */
- OMAP3_MUX(MCSPI1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCSPI1_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCSPI1_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCSPI1_CS0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
- OMAP3_MUX(MCSPI1_CS3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
-
- /* HDQ */
- OMAP3_MUX(HDQ_SIO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
- /* McSPI4 */
- OMAP3_MUX(MCBSP1_CLKR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCBSP1_DX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCBSP1_DR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCBSP1_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT_PULLUP),
-
- /* MMC 2 */
- OMAP3_MUX(SDMMC2_DAT4, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(SDMMC2_DAT5, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(SDMMC2_DAT6, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(SDMMC2_DAT7, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
-
- /* I2C3 */
- OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
- OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
- OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(MCBSP1_FSR, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-
- OMAP3_MUX(GPMC_NCS7, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
- OMAP3_MUX(GPMC_NCS3, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-
- /* TPS IRQ */
- OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_WAKEUP_EN | \
- OMAP_PIN_INPUT_PULLUP),
-
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static void __init devkit8000_init(void)
-{
- omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
- omap_serial_init();
- omap_sdrc_init(mt46h32m32lf6_sdrc_params,
- mt46h32m32lf6_sdrc_params);
-
- omap_dm9000_init();
-
- omap_hsmmc_init(mmc);
- devkit8000_i2c_init();
- omap_dm9000_resources[2].start = gpio_to_irq(OMAP_DM9000_GPIO_IRQ);
- platform_add_devices(devkit8000_devices,
- ARRAY_SIZE(devkit8000_devices));
-
- omap_display_init(&devkit8000_dss_data);
-
- omap_ads7846_init(2, OMAP3_DEVKIT_TS_GPIO, 0, NULL);
-
- usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
- usb_musb_init(NULL);
- usbhs_init(&usbhs_bdata);
- board_nand_init(devkit8000_nand_partitions,
- ARRAY_SIZE(devkit8000_nand_partitions), NAND_CS,
- NAND_BUSWIDTH_16, NULL);
- omap_twl4030_audio_init("omap3beagle", NULL);
-
- /* Ensure SDRC pins are mux'd for self-refresh */
- omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
-}
-
-MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap35xx_init_early,
- .init_irq = omap3_init_irq,
- .init_machine = devkit8000_init,
- .init_late = omap35xx_init_late,
- .init_time = omap3_secure_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
deleted file mode 100644
index 6311f4b1ee44..000000000000
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/board-omap3evm.c
- *
- * Copyright (C) 2008 Guangzhou EMA-Tech
- *
- * Modified from mach-omap2/board-omap3evm.c
- *
- * Initial code: Syed Mohammed Khasim
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/leds.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/i2c/twl.h>
-#include <linux/mmc/host.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/spi/spi.h>
-#include <linux/interrupt.h>
-#include <linux/smsc911x.h>
-#include <linux/platform_data/at24.h>
-#include <linux/usb/phy.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
-
-#include "common.h"
-#include "gpmc.h"
-#include <linux/platform_data/mtd-nand-omap2.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#include <linux/platform_data/spi-omap2-mcspi.h>
-
-#include "sdram-micron-mt46h32m32lf-6.h"
-#include "mux.h"
-#include "hsmmc.h"
-#include "common-board-devices.h"
-
-#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
-#include "gpmc-smsc911x.h"
-
-#define OMAP3STALKER_ETHR_START 0x2c000000
-#define OMAP3STALKER_ETHR_SIZE 1024
-#define OMAP3STALKER_ETHR_GPIO_IRQ 19
-#define OMAP3STALKER_SMC911X_CS 5
-
-static struct omap_smsc911x_platform_data smsc911x_cfg = {
- .cs = OMAP3STALKER_SMC911X_CS,
- .gpio_irq = OMAP3STALKER_ETHR_GPIO_IRQ,
- .gpio_reset = -EINVAL,
- .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS),
-};
-
-static inline void __init omap3stalker_init_eth(void)
-{
- omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP);
- gpmc_smsc911x_init(&smsc911x_cfg);
-}
-
-#else
-static inline void __init omap3stalker_init_eth(void)
-{
- return;
-}
-#endif
-
-/*
- * OMAP3 DSS control signals
- */
-
-#define DSS_ENABLE_GPIO 199
-#define LCD_PANEL_BKLIGHT_GPIO 210
-#define ENABLE_VPLL2_DEV_GRP 0xE0
-
-static void __init omap3_stalker_display_init(void)
-{
- return;
-}
-static struct connector_dvi_platform_data omap3stalker_dvi_connector_pdata = {
- .name = "dvi",
- .source = "tfp410.0",
- .i2c_bus_num = -1,
-};
-
-static struct platform_device omap3stalker_dvi_connector_device = {
- .name = "connector-dvi",
- .id = 0,
- .dev.platform_data = &omap3stalker_dvi_connector_pdata,
-};
-
-static struct encoder_tfp410_platform_data omap3stalker_tfp410_pdata = {
- .name = "tfp410.0",
- .source = "dpi.0",
- .data_lines = 24,
- .power_down_gpio = DSS_ENABLE_GPIO,
-};
-
-static struct platform_device omap3stalker_tfp410_device = {
- .name = "tfp410",
- .id = 0,
- .dev.platform_data = &omap3stalker_tfp410_pdata,
-};
-
-static struct connector_atv_platform_data omap3stalker_tv_pdata = {
- .name = "tv",
- .source = "venc.0",
- .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE,
- .invert_polarity = false,
-};
-
-static struct platform_device omap3stalker_tv_connector_device = {
- .name = "connector-analog-tv",
- .id = 0,
- .dev.platform_data = &omap3stalker_tv_pdata,
-};
-
-static struct omap_dss_board_info omap3_stalker_dss_data = {
- .default_display_name = "dvi",
-};
-
-static struct regulator_consumer_supply omap3stalker_vmmc1_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-static struct regulator_consumer_supply omap3stalker_vsim_supply[] = {
- REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
-};
-
-/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
-static struct regulator_init_data omap3stalker_vmmc1 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 3150000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(omap3stalker_vmmc1_supply),
- .consumer_supplies = omap3stalker_vmmc1_supply,
-};
-
-/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
-static struct regulator_init_data omap3stalker_vsim = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 3000000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(omap3stalker_vsim_supply),
- .consumer_supplies = omap3stalker_vsim_supply,
-};
-
-static struct omap2_hsmmc_info mmc[] = {
- {
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_cd = -EINVAL,
- .gpio_wp = 23,
- .deferred = true,
- },
- {} /* Terminator */
-};
-
-static struct gpio_keys_button gpio_buttons[] = {
- {
- .code = BTN_EXTRA,
- .gpio = 18,
- .desc = "user",
- .wakeup = 1,
- },
-};
-
-static struct gpio_keys_platform_data gpio_key_info = {
- .buttons = gpio_buttons,
- .nbuttons = ARRAY_SIZE(gpio_buttons),
-};
-
-static struct platform_device keys_gpio = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &gpio_key_info,
- },
-};
-
-static struct gpio_led gpio_leds[] = {
- {
- .name = "stalker:D8:usr0",
- .default_trigger = "default-on",
- .gpio = 126,
- },
- {
- .name = "stalker:D9:usr1",
- .default_trigger = "default-on",
- .gpio = 127,
- },
- {
- .name = "stalker:D3:mmc0",
- .gpio = -EINVAL, /* gets replaced */
- .active_low = true,
- .default_trigger = "mmc0",
- },
- {
- .name = "stalker:D4:heartbeat",
- .gpio = -EINVAL, /* gets replaced */
- .active_low = true,
- .default_trigger = "heartbeat",
- },
-};
-
-static struct gpio_led_platform_data gpio_led_info = {
- .leds = gpio_leds,
- .num_leds = ARRAY_SIZE(gpio_leds),
-};
-
-static struct platform_device leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &gpio_led_info,
- },
-};
-
-static int
-omap3stalker_twl_gpio_setup(struct device *dev,
- unsigned gpio, unsigned ngpio)
-{
- /* gpio + 0 is "mmc0_cd" (input/IRQ) */
- mmc[0].gpio_cd = gpio + 0;
- omap_hsmmc_late_init(mmc);
-
- /*
- * Most GPIOs are for USB OTG. Some are mostly sent to
- * the P2 connector; notably LEDA for the LCD backlight.
- */
-
- /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
- gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW,
- "EN_LCD_BKL");
-
- /* gpio + 7 == DVI Enable */
- gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI");
-
- /* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */
- gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
- /* GPIO + 13 == ledsync (out, heartbeat) */
- gpio_leds[3].gpio = gpio + 13;
-
- platform_device_register(&leds_gpio);
- return 0;
-}
-
-static struct twl4030_gpio_platform_data omap3stalker_gpio_data = {
- .use_leds = true,
- .setup = omap3stalker_twl_gpio_setup,
-};
-
-static uint32_t board_keymap[] = {
- KEY(0, 0, KEY_LEFT),
- KEY(0, 1, KEY_DOWN),
- KEY(0, 2, KEY_ENTER),
- KEY(0, 3, KEY_M),
-
- KEY(1, 0, KEY_RIGHT),
- KEY(1, 1, KEY_UP),
- KEY(1, 2, KEY_I),
- KEY(1, 3, KEY_N),
-
- KEY(2, 0, KEY_A),
- KEY(2, 1, KEY_E),
- KEY(2, 2, KEY_J),
- KEY(2, 3, KEY_O),
-
- KEY(3, 0, KEY_B),
- KEY(3, 1, KEY_F),
- KEY(3, 2, KEY_K),
- KEY(3, 3, KEY_P)
-};
-
-static struct matrix_keymap_data board_map_data = {
- .keymap = board_keymap,
- .keymap_size = ARRAY_SIZE(board_keymap),
-};
-
-static struct twl4030_keypad_data omap3stalker_kp_data = {
- .keymap_data = &board_map_data,
- .rows = 4,
- .cols = 4,
- .rep = 1,
-};
-
-static struct twl4030_platform_data omap3stalker_twldata = {
- /* platform_data for children goes here */
- .keypad = &omap3stalker_kp_data,
- .gpio = &omap3stalker_gpio_data,
- .vmmc1 = &omap3stalker_vmmc1,
- .vsim = &omap3stalker_vsim,
-};
-
-static struct at24_platform_data fram_info = {
- .byte_len = (64 * 1024) / 8,
- .page_size = 8192,
- .flags = AT24_FLAG_ADDR16 | AT24_FLAG_IRUGO,
-};
-
-static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = {
- {
- I2C_BOARD_INFO("24c64", 0x50),
- .flags = I2C_CLIENT_WAKE,
- .platform_data = &fram_info,
- },
-};
-
-static int __init omap3_stalker_i2c_init(void)
-{
- omap3_pmic_get_config(&omap3stalker_twldata,
- TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC |
- TWL_COMMON_PDATA_AUDIO,
- TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
-
- omap3stalker_twldata.vdac->constraints.apply_uV = true;
- omap3stalker_twldata.vpll2->constraints.apply_uV = true;
- omap3stalker_twldata.vpll2->constraints.name = "VDVI";
-
- omap3_pmic_init("twl4030", &omap3stalker_twldata);
- omap_register_i2c_bus(2, 400, NULL, 0);
- omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3,
- ARRAY_SIZE(omap3stalker_i2c_boardinfo3));
- return 0;
-}
-
-#define OMAP3_STALKER_TS_GPIO 175
-
-static struct usbhs_phy_data phy_data[] __initdata = {
- {
- .port = 2,
- .reset_gpio = 21,
- .vcc_gpio = -EINVAL,
- },
-};
-
-static struct platform_device *omap3_stalker_devices[] __initdata = {
- &keys_gpio,
- &omap3stalker_tfp410_device,
- &omap3stalker_dvi_connector_device,
- &omap3stalker_tv_connector_device,
-};
-
-static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
- .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE),
- OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE),
- {.reg_offset = OMAP_MUX_TERMINATOR},
-};
-#endif
-
-static struct regulator_consumer_supply dummy_supplies[] = {
- REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
- REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
-};
-
-static void __init omap3_stalker_init(void)
-{
- regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
- omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
-
- omap_mux_init_gpio(23, OMAP_PIN_INPUT);
- omap_hsmmc_init(mmc);
-
- omap3_stalker_i2c_init();
-
- platform_add_devices(omap3_stalker_devices,
- ARRAY_SIZE(omap3_stalker_devices));
-
- omap_display_init(&omap3_stalker_dss_data);
-
- omap_serial_init();
- omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL);
- usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
- usb_musb_init(NULL);
-
- usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
- usbhs_init(&usbhs_bdata);
- omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL);
-
- omap_mux_init_gpio(21, OMAP_PIN_OUTPUT);
- omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP);
-
- omap3stalker_init_eth();
- omap3_stalker_display_init();
-/* Ensure SDRC pins are mux'd for self-refresh */
- omap_mux_init_signal("sdr_cke0", OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sdr_cke1", OMAP_PIN_OUTPUT);
-}
-
-MACHINE_START(SBC3530, "OMAP3 STALKER")
- /* Maintainer: Jason Lam -lzg@ema-tech.com */
- .atag_offset = 0x100,
- .map_io = omap3_map_io,
- .init_early = omap35xx_init_early,
- .init_irq = omap3_init_irq,
- .init_machine = omap3_stalker_init,
- .init_late = omap35xx_init_late,
- .init_time = omap3_secure_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
deleted file mode 100644
index a01993e5500f..000000000000
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/board-omap3touchbook.c
- *
- * Copyright (C) 2009 Always Innovating
- *
- * Modified from mach-omap2/board-omap3beagleboard.c
- *
- * Initial code: Grégoire Gentil, Tim Yamin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/leds.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand.h>
-#include <linux/mmc/host.h>
-#include <linux/usb/phy.h>
-
-#include <linux/platform_data/spi-omap2-mcspi.h>
-#include <linux/spi/spi.h>
-
-#include <linux/spi/ads7846.h>
-
-#include <linux/regulator/machine.h>
-#include <linux/i2c/twl.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
-#include <asm/system_info.h>
-
-#include "common.h"
-#include "gpmc.h"
-#include <linux/platform_data/mtd-nand-omap2.h>
-
-#include "mux.h"
-#include "hsmmc.h"
-#include "board-flash.h"
-#include "common-board-devices.h"
-
-#include <asm/setup.h>
-
-#define OMAP3_AC_GPIO 136
-#define OMAP3_TS_GPIO 162
-#define TB_BL_PWM_TIMER 9
-#define TB_KILL_POWER_GPIO 168
-
-#define NAND_CS 0
-
-static unsigned long touchbook_revision;
-
-static struct mtd_partition omap3touchbook_nand_partitions[] = {
- /* All the partition sizes are listed in terms of NAND block size */
- {
- .name = "X-Loader",
- .offset = 0,
- .size = 4 * NAND_BLOCK_SIZE,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "U-Boot",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
- .size = 15 * NAND_BLOCK_SIZE,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "U-Boot Env",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */
- .size = 1 * NAND_BLOCK_SIZE,
- },
- {
- .name = "Kernel",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */
- .size = 32 * NAND_BLOCK_SIZE,
- },
- {
- .name = "File System",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-#include "sdram-micron-mt46h32m32lf-6.h"
-
-static struct omap2_hsmmc_info mmc[] = {
- {
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
- .gpio_wp = 29,
- .deferred = true,
- },
- {} /* Terminator */
-};
-
-static struct regulator_consumer_supply touchbook_vmmc1_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-static struct regulator_consumer_supply touchbook_vsim_supply[] = {
- REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
-};
-
-static struct gpio_led gpio_leds[];
-
-static int touchbook_twl_gpio_setup(struct device *dev,
- unsigned gpio, unsigned ngpio)
-{
- /* gpio + 0 is "mmc0_cd" (input/IRQ) */
- mmc[0].gpio_cd = gpio + 0;
- omap_hsmmc_late_init(mmc);
-
- /* REVISIT: need ehci-omap hooks for external VBUS
- * power switch and overcurrent detect
- */
- gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC");
-
- /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
- gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW,
- "nEN_USB_PWR");
-
- /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
- gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
-
- return 0;
-}
-
-static struct twl4030_gpio_platform_data touchbook_gpio_data = {
- .use_leds = true,
- .pullups = BIT(1),
- .pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13)
- | BIT(15) | BIT(16) | BIT(17),
- .setup = touchbook_twl_gpio_setup,
-};
-
-static struct regulator_consumer_supply touchbook_vdac_supply[] = {
-{
- .supply = "vdac",
-},
-};
-
-static struct regulator_consumer_supply touchbook_vdvi_supply[] = {
-{
- .supply = "vdvi",
-},
-};
-
-/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
-static struct regulator_init_data touchbook_vmmc1 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 3150000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(touchbook_vmmc1_supply),
- .consumer_supplies = touchbook_vmmc1_supply,
-};
-
-/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
-static struct regulator_init_data touchbook_vsim = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 3000000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(touchbook_vsim_supply),
- .consumer_supplies = touchbook_vsim_supply,
-};
-
-static struct twl4030_platform_data touchbook_twldata = {
- /* platform_data for children goes here */
- .gpio = &touchbook_gpio_data,
- .vmmc1 = &touchbook_vmmc1,
- .vsim = &touchbook_vsim,
-};
-
-static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
- {
- I2C_BOARD_INFO("bq27200", 0x55),
- },
-};
-
-static int __init omap3_touchbook_i2c_init(void)
-{
- /* Standard TouchBook bus */
- omap3_pmic_get_config(&touchbook_twldata,
- TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
- TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
-
- touchbook_twldata.vdac->num_consumer_supplies =
- ARRAY_SIZE(touchbook_vdac_supply);
- touchbook_twldata.vdac->consumer_supplies = touchbook_vdac_supply;
-
- touchbook_twldata.vpll2->constraints.name = "VDVI";
- touchbook_twldata.vpll2->num_consumer_supplies =
- ARRAY_SIZE(touchbook_vdvi_supply);
- touchbook_twldata.vpll2->consumer_supplies = touchbook_vdvi_supply;
-
- omap3_pmic_init("twl4030", &touchbook_twldata);
- /* Additional TouchBook bus */
- omap_register_i2c_bus(3, 100, touchBook_i2c_boardinfo,
- ARRAY_SIZE(touchBook_i2c_boardinfo));
-
- return 0;
-}
-
-static struct ads7846_platform_data ads7846_pdata = {
- .x_min = 100,
- .y_min = 265,
- .x_max = 3950,
- .y_max = 3750,
- .x_plate_ohms = 40,
- .pressure_max = 255,
- .debounce_max = 10,
- .debounce_tol = 5,
- .debounce_rep = 1,
- .gpio_pendown = OMAP3_TS_GPIO,
- .keep_vref_on = 1,
-};
-
-static struct gpio_led gpio_leds[] = {
- {
- .name = "touchbook::usr0",
- .default_trigger = "heartbeat",
- .gpio = 150,
- },
- {
- .name = "touchbook::usr1",
- .default_trigger = "mmc0",
- .gpio = 149,
- },
- {
- .name = "touchbook::pmu_stat",
- .gpio = -EINVAL, /* gets replaced */
- .active_low = true,
- },
-};
-
-static struct gpio_led_platform_data gpio_led_info = {
- .leds = gpio_leds,
- .num_leds = ARRAY_SIZE(gpio_leds),
-};
-
-static struct platform_device leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &gpio_led_info,
- },
-};
-
-static struct gpio_keys_button gpio_buttons[] = {
- {
- .code = BTN_EXTRA,
- .gpio = 7,
- .desc = "user",
- .wakeup = 1,
- },
- {
- .code = KEY_POWER,
- .gpio = 183,
- .desc = "power",
- .wakeup = 1,
- },
-};
-
-static struct gpio_keys_platform_data gpio_key_info = {
- .buttons = gpio_buttons,
- .nbuttons = ARRAY_SIZE(gpio_buttons),
-};
-
-static struct platform_device keys_gpio = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &gpio_key_info,
- },
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static struct usbhs_phy_data phy_data[] __initdata = {
- {
- .port = 2,
- .reset_gpio = 147,
- .vcc_gpio = -EINVAL,
- },
-};
-
-static struct platform_device *omap3_touchbook_devices[] __initdata = {
- &leds_gpio,
- &keys_gpio,
-};
-
-static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
- .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
- .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-static void omap3_touchbook_poweroff(void)
-{
- int pwr_off = TB_KILL_POWER_GPIO;
-
- if (gpio_request_one(pwr_off, GPIOF_OUT_INIT_LOW, "DVI reset") < 0)
- printk(KERN_ERR "Unable to get kill power GPIO\n");
-}
-
-static int __init early_touchbook_revision(char *p)
-{
- if (!p)
- return 0;
-
- return kstrtoul(p, 10, &touchbook_revision);
-}
-early_param("tbr", early_touchbook_revision);
-
-static void __init omap3_touchbook_init(void)
-{
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-
- pm_power_off = omap3_touchbook_poweroff;
-
- if (system_rev >= 0x20 && system_rev <= 0x34301000) {
- omap_mux_init_gpio(23, OMAP_PIN_INPUT);
- mmc[0].gpio_wp = 23;
- } else {
- omap_mux_init_gpio(29, OMAP_PIN_INPUT);
- }
- omap_hsmmc_init(mmc);
-
- omap3_touchbook_i2c_init();
- platform_add_devices(omap3_touchbook_devices,
- ARRAY_SIZE(omap3_touchbook_devices));
- omap_serial_init();
- omap_sdrc_init(mt46h32m32lf6_sdrc_params,
- mt46h32m32lf6_sdrc_params);
-
- omap_mux_init_gpio(170, OMAP_PIN_INPUT);
- /* REVISIT leave DVI powered down until it's needed ... */
- gpio_request_one(176, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
-
- /* Touchscreen and accelerometer */
- omap_ads7846_init(4, OMAP3_TS_GPIO, 310, &ads7846_pdata);
- usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
- usb_musb_init(NULL);
-
- usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
- usbhs_init(&usbhs_bdata);
- board_nand_init(omap3touchbook_nand_partitions,
- ARRAY_SIZE(omap3touchbook_nand_partitions), NAND_CS,
- NAND_BUSWIDTH_16, NULL);
-
- /* Ensure SDRC pins are mux'd for self-refresh */
- omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
-}
-
-MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
- /* Maintainer: Gregoire Gentil - http://www.alwaysinnovating.com */
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap3430_init_early,
- .init_irq = omap3_init_irq,
- .init_machine = omap3_touchbook_init,
- .init_late = omap3430_init_late,
- .init_time = omap3_secure_sync32k_timer_init,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 6124db5c37ae..a699d7169307 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -23,6 +23,9 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/of_address.h>
+#include <linux/bootmem.h>
#include <asm/cpu.h>
#include <trace/events/power.h>
@@ -72,30 +75,110 @@ struct ti_clk_features ti_clk_features;
static bool clkdm_control = true;
static LIST_HEAD(clk_hw_omap_clocks);
-void __iomem *clk_memmaps[CLK_MAX_MEMMAPS];
+
+struct clk_iomap {
+ struct regmap *regmap;
+ void __iomem *mem;
+};
+
+static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS];
+
+static void clk_memmap_writel(u32 val, void __iomem *reg)
+{
+ struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
+ struct clk_iomap *io = clk_memmaps[r->index];
+
+ if (io->regmap)
+ regmap_write(io->regmap, r->offset, val);
+ else
+ writel_relaxed(val, io->mem + r->offset);
+}
+
+static u32 clk_memmap_readl(void __iomem *reg)
+{
+ u32 val;
+ struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
+ struct clk_iomap *io = clk_memmaps[r->index];
+
+ if (io->regmap)
+ regmap_read(io->regmap, r->offset, &val);
+ else
+ val = readl_relaxed(io->mem + r->offset);
+
+ return val;
+}
void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg)
{
- if (clk->flags & MEMMAP_ADDRESSING) {
- struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
- writel_relaxed(val, clk_memmaps[r->index] + r->offset);
- } else {
+ if (WARN_ON_ONCE(!(clk->flags & MEMMAP_ADDRESSING)))
writel_relaxed(val, reg);
- }
+ else
+ clk_memmap_writel(val, reg);
}
u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg)
{
- u32 val;
+ if (WARN_ON_ONCE(!(clk->flags & MEMMAP_ADDRESSING)))
+ return readl_relaxed(reg);
+ else
+ return clk_memmap_readl(reg);
+}
- if (clk->flags & MEMMAP_ADDRESSING) {
- struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
- val = readl_relaxed(clk_memmaps[r->index] + r->offset);
- } else {
- val = readl_relaxed(reg);
- }
+static struct ti_clk_ll_ops omap_clk_ll_ops = {
+ .clk_readl = clk_memmap_readl,
+ .clk_writel = clk_memmap_writel,
+};
- return val;
+/**
+ * omap2_clk_provider_init - initialize a clock provider
+ * @match_table: DT device table to match for devices to init
+ * @np: device node pointer for the this clock provider
+ * @index: index for the clock provider
+ + @syscon: syscon regmap pointer
+ * @mem: iomem pointer for the clock provider memory area, only used if
+ * syscon is not provided
+ *
+ * Initializes a clock provider module (CM/PRM etc.), registering
+ * the memory mapping at specified index and initializing the
+ * low level driver infrastructure. Returns 0 in success.
+ */
+int __init omap2_clk_provider_init(struct device_node *np, int index,
+ struct regmap *syscon, void __iomem *mem)
+{
+ struct clk_iomap *io;
+
+ ti_clk_ll_ops = &omap_clk_ll_ops;
+
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+
+ io->regmap = syscon;
+ io->mem = mem;
+
+ clk_memmaps[index] = io;
+
+ ti_dt_clk_init_provider(np, index);
+
+ return 0;
+}
+
+/**
+ * omap2_clk_legacy_provider_init - initialize a legacy clock provider
+ * @index: index for the clock provider
+ * @mem: iomem pointer for the clock provider memory area
+ *
+ * Initializes a legacy clock provider memory mapping.
+ */
+void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
+{
+ struct clk_iomap *io;
+
+ ti_clk_ll_ops = &omap_clk_ll_ops;
+
+ io = memblock_virt_alloc(sizeof(*io), 0);
+
+ io->mem = mem;
+
+ clk_memmaps[index] = io;
}
/*
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index a56742f96000..652ed0ab86ec 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -271,10 +271,14 @@ extern const struct clksel_rate div_1_3_rates[];
extern const struct clksel_rate div_1_4_rates[];
extern const struct clksel_rate div31_1to31_rates[];
-extern void __iomem *clk_memmaps[];
-
extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
+struct regmap;
+
+int __init omap2_clk_provider_init(struct device_node *np, int index,
+ struct regmap *syscon, void __iomem *mem);
+void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem);
+
void __init ti_clk_init_features(void);
#endif
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index 6222e87a79b6..1fe3e6b833d2 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -70,6 +70,8 @@ int omap_cm_module_enable(u8 mode, u8 part, u16 inst, u16 clkctrl_offs);
int omap_cm_module_disable(u8 part, u16 inst, u16 clkctrl_offs);
extern int cm_register(struct cm_ll_data *cld);
extern int cm_unregister(struct cm_ll_data *cld);
+int omap_cm_init(void);
+int omap2_cm_base_init(void);
# endif
diff --git a/arch/arm/mach-omap2/cm2xxx.c b/arch/arm/mach-omap2/cm2xxx.c
index ef62ac9dcd05..3e5fd3587eb1 100644
--- a/arch/arm/mach-omap2/cm2xxx.c
+++ b/arch/arm/mach-omap2/cm2xxx.c
@@ -393,7 +393,7 @@ static struct cm_ll_data omap2xxx_cm_ll_data = {
.wait_module_ready = &omap2xxx_cm_wait_module_ready,
};
-int __init omap2xxx_cm_init(void)
+int __init omap2xxx_cm_init(const struct omap_prcm_init_data *data)
{
return cm_register(&omap2xxx_cm_ll_data);
}
diff --git a/arch/arm/mach-omap2/cm2xxx.h b/arch/arm/mach-omap2/cm2xxx.h
index 83b6c597b0e1..7b8c79c0ce27 100644
--- a/arch/arm/mach-omap2/cm2xxx.h
+++ b/arch/arm/mach-omap2/cm2xxx.h
@@ -63,7 +63,7 @@ extern u32 omap2xxx_cm_get_core_pll_config(void);
extern void omap2xxx_cm_set_mod_dividers(u32 mpu, u32 dsp, u32 gfx, u32 core,
u32 mdm);
-extern int __init omap2xxx_cm_init(void);
+int __init omap2xxx_cm_init(const struct omap_prcm_init_data *data);
#endif
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
index cc5aac784278..7b181f929525 100644
--- a/arch/arm/mach-omap2/cm33xx.c
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -352,7 +352,7 @@ static struct cm_ll_data am33xx_cm_ll_data = {
.module_disable = &am33xx_cm_module_disable,
};
-int __init am33xx_cm_init(void)
+int __init am33xx_cm_init(const struct omap_prcm_init_data *data)
{
return cm_register(&am33xx_cm_ll_data);
}
diff --git a/arch/arm/mach-omap2/cm33xx.h b/arch/arm/mach-omap2/cm33xx.h
index 046b4b2bc9d9..a91f7d282455 100644
--- a/arch/arm/mach-omap2/cm33xx.h
+++ b/arch/arm/mach-omap2/cm33xx.h
@@ -19,6 +19,7 @@
#include "cm.h"
#include "cm-regbits-33xx.h"
+#include "prcm-common.h"
/* CM base address */
#define AM33XX_CM_BASE 0x44e00000
@@ -374,6 +375,6 @@
#ifndef __ASSEMBLER__
-int am33xx_cm_init(void);
+int am33xx_cm_init(const struct omap_prcm_init_data *data);
#endif /* ASSEMBLER */
#endif
diff --git a/arch/arm/mach-omap2/cm3xxx.c b/arch/arm/mach-omap2/cm3xxx.c
index ebead8f035f9..187fa4386718 100644
--- a/arch/arm/mach-omap2/cm3xxx.c
+++ b/arch/arm/mach-omap2/cm3xxx.c
@@ -671,8 +671,9 @@ static struct cm_ll_data omap3xxx_cm_ll_data = {
.wait_module_ready = &omap3xxx_cm_wait_module_ready,
};
-int __init omap3xxx_cm_init(void)
+int __init omap3xxx_cm_init(const struct omap_prcm_init_data *data)
{
+ omap2_clk_legacy_provider_init(TI_CLKM_CM, cm_base + OMAP3430_IVA2_MOD);
return cm_register(&omap3xxx_cm_ll_data);
}
diff --git a/arch/arm/mach-omap2/cm3xxx.h b/arch/arm/mach-omap2/cm3xxx.h
index 734a8581c0c4..bc444e2080a1 100644
--- a/arch/arm/mach-omap2/cm3xxx.h
+++ b/arch/arm/mach-omap2/cm3xxx.h
@@ -72,7 +72,7 @@ extern void omap3_cm_save_context(void);
extern void omap3_cm_restore_context(void);
extern void omap3_cm_save_scratchpad_contents(u32 *ptr);
-extern int __init omap3xxx_cm_init(void);
+int __init omap3xxx_cm_init(const struct omap_prcm_init_data *data);
#endif
diff --git a/arch/arm/mach-omap2/cm44xx.h b/arch/arm/mach-omap2/cm44xx.h
index 728d06a4af19..309a4c913448 100644
--- a/arch/arm/mach-omap2/cm44xx.h
+++ b/arch/arm/mach-omap2/cm44xx.h
@@ -23,7 +23,6 @@
#define OMAP4_CM_CLKSTCTRL 0x0000
#define OMAP4_CM_STATICDEP 0x0004
-void omap_cm_base_init(void);
-int omap4_cm_init(void);
+int omap4_cm_init(const struct omap_prcm_init_data *data);
#endif
diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c
index 8fe02fcedc48..23e8bcec34e3 100644
--- a/arch/arm/mach-omap2/cm_common.c
+++ b/arch/arm/mach-omap2/cm_common.c
@@ -15,10 +15,14 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/bug.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include "cm2xxx.h"
#include "cm3xxx.h"
+#include "cm33xx.h"
#include "cm44xx.h"
+#include "clock.h"
/*
* cm_ll_data: function pointers to SoC-specific implementations of
@@ -33,6 +37,9 @@ void __iomem *cm_base;
/* cm2_base: base virtual address of the CM2 IP block (OMAP44xx only) */
void __iomem *cm2_base;
+#define CM_NO_CLOCKS 0x1
+#define CM_SINGLE_INSTANCE 0x2
+
/**
* omap2_set_globals_cm - set the CM/CM2 base addresses (for early use)
* @cm: CM base virtual address
@@ -212,3 +219,152 @@ int cm_unregister(struct cm_ll_data *cld)
return 0;
}
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static struct omap_prcm_init_data cm_data __initdata = {
+ .index = TI_CLKM_CM,
+ .init = omap4_cm_init,
+};
+
+static struct omap_prcm_init_data cm2_data __initdata = {
+ .index = TI_CLKM_CM2,
+ .init = omap4_cm_init,
+};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2
+static struct omap_prcm_init_data omap2_prcm_data __initdata = {
+ .index = TI_CLKM_CM,
+ .init = omap2xxx_cm_init,
+ .flags = CM_NO_CLOCKS | CM_SINGLE_INSTANCE,
+};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static struct omap_prcm_init_data omap3_cm_data __initdata = {
+ .index = TI_CLKM_CM,
+ .init = omap3xxx_cm_init,
+ .flags = CM_SINGLE_INSTANCE,
+
+ /*
+ * IVA2 offset is a negative value, must offset the cm_base address
+ * by this to get it to positive side on the iomap
+ */
+ .offset = -OMAP3430_IVA2_MOD,
+};
+#endif
+
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_TI81XX)
+static struct omap_prcm_init_data am3_prcm_data __initdata = {
+ .index = TI_CLKM_CM,
+ .flags = CM_NO_CLOCKS | CM_SINGLE_INSTANCE,
+ .init = am33xx_cm_init,
+};
+#endif
+
+#ifdef CONFIG_SOC_AM43XX
+static struct omap_prcm_init_data am4_prcm_data __initdata = {
+ .index = TI_CLKM_CM,
+ .flags = CM_NO_CLOCKS | CM_SINGLE_INSTANCE,
+ .init = omap4_cm_init,
+};
+#endif
+
+static const struct of_device_id omap_cm_dt_match_table[] __initconst = {
+#ifdef CONFIG_ARCH_OMAP2
+ { .compatible = "ti,omap2-prcm", .data = &omap2_prcm_data },
+#endif
+#ifdef CONFIG_ARCH_OMAP3
+ { .compatible = "ti,omap3-cm", .data = &omap3_cm_data },
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ { .compatible = "ti,omap4-cm1", .data = &cm_data },
+ { .compatible = "ti,omap4-cm2", .data = &cm2_data },
+#endif
+#ifdef CONFIG_SOC_OMAP5
+ { .compatible = "ti,omap5-cm-core-aon", .data = &cm_data },
+ { .compatible = "ti,omap5-cm-core", .data = &cm2_data },
+#endif
+#ifdef CONFIG_SOC_DRA7XX
+ { .compatible = "ti,dra7-cm-core-aon", .data = &cm_data },
+ { .compatible = "ti,dra7-cm-core", .data = &cm2_data },
+#endif
+#ifdef CONFIG_SOC_AM33XX
+ { .compatible = "ti,am3-prcm", .data = &am3_prcm_data },
+#endif
+#ifdef CONFIG_SOC_AM43XX
+ { .compatible = "ti,am4-prcm", .data = &am4_prcm_data },
+#endif
+#ifdef CONFIG_SOC_TI81XX
+ { .compatible = "ti,dm814-prcm", .data = &am3_prcm_data },
+ { .compatible = "ti,dm816-prcm", .data = &am3_prcm_data },
+#endif
+ { }
+};
+
+/**
+ * omap2_cm_base_init - initialize iomappings for the CM drivers
+ *
+ * Detects and initializes the iomappings for the CM driver, based
+ * on the DT data. Returns 0 in success, negative error value
+ * otherwise.
+ */
+int __init omap2_cm_base_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ struct omap_prcm_init_data *data;
+ void __iomem *mem;
+
+ for_each_matching_node_and_match(np, omap_cm_dt_match_table, &match) {
+ data = (struct omap_prcm_init_data *)match->data;
+
+ mem = of_iomap(np, 0);
+ if (!mem)
+ return -ENOMEM;
+
+ if (data->index == TI_CLKM_CM)
+ cm_base = mem + data->offset;
+
+ if (data->index == TI_CLKM_CM2)
+ cm2_base = mem + data->offset;
+
+ data->mem = mem;
+
+ data->np = np;
+
+ if (data->init && (data->flags & CM_SINGLE_INSTANCE ||
+ (cm_base && cm2_base)))
+ data->init(data);
+ }
+
+ return 0;
+}
+
+/**
+ * omap_cm_init - low level init for the CM drivers
+ *
+ * Initializes the low level clock infrastructure for CM drivers.
+ * Returns 0 in success, negative error value in failure.
+ */
+int __init omap_cm_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ const struct omap_prcm_init_data *data;
+ int ret;
+
+ for_each_matching_node_and_match(np, omap_cm_dt_match_table, &match) {
+ data = match->data;
+
+ if (data->flags & CM_NO_CLOCKS)
+ continue;
+
+ ret = omap2_clk_provider_init(np, data->index, NULL, data->mem);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index 95a8cff66aff..2c0e07ed6b99 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -63,7 +63,7 @@ static void __iomem *_cm_bases[OMAP4_MAX_PRCM_PARTITIONS];
* Populates the base addresses of the _cm_bases
* array used for read/write of cm module registers.
*/
-void omap_cm_base_init(void)
+static void omap_cm_base_init(void)
{
_cm_bases[OMAP4430_PRM_PARTITION] = prm_base;
_cm_bases[OMAP4430_CM1_PARTITION] = cm_base;
@@ -514,8 +514,10 @@ static struct cm_ll_data omap4xxx_cm_ll_data = {
.module_disable = &omap4_cminst_module_disable,
};
-int __init omap4_cm_init(void)
+int __init omap4_cm_init(const struct omap_prcm_init_data *data)
{
+ omap_cm_base_init();
+
return cm_register(&omap4xxx_cm_ll_data);
}
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index 484cdadfb187..eae6a0e87c90 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -30,5 +30,4 @@ int __weak omap_secure_ram_reserve_memblock(void)
void __init omap_reserve(void)
{
omap_secure_ram_reserve_memblock();
- omap_barrier_reserve_memblock();
}
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 46e24581d624..cf3cf22ecd42 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -200,9 +200,6 @@ void __init omap4_map_io(void);
void __init omap5_map_io(void);
void __init ti81xx_map_io(void);
-/* omap_barriers_init() is OMAP4 only */
-void omap_barriers_init(void);
-
/**
* omap_test_timeout - busy-loop, testing a condition
* @cond: condition to test until it evaluates to true
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index da041b4ab29c..af95a624fe71 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -14,6 +14,9 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include "soc.h"
#include "iomap.h"
@@ -25,13 +28,15 @@
#include "sdrc.h"
#include "pm.h"
#include "control.h"
+#include "clock.h"
/* Used by omap3_ctrl_save_padconf() */
#define START_PADCONF_SAVE 0x2
#define PADCONF_SAVE_DONE 0x1
static void __iomem *omap2_ctrl_base;
-static void __iomem *omap4_ctrl_pad_base;
+static s16 omap2_ctrl_offset;
+static struct regmap *omap2_ctrl_syscon;
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
struct omap3_scratchpad {
@@ -133,66 +138,79 @@ struct omap3_control_regs {
static struct omap3_control_regs control_context;
#endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */
-#define OMAP_CTRL_REGADDR(reg) (omap2_ctrl_base + (reg))
-#define OMAP4_CTRL_PAD_REGADDR(reg) (omap4_ctrl_pad_base + (reg))
-
-void __init omap2_set_globals_control(void __iomem *ctrl,
- void __iomem *ctrl_pad)
+void __init omap2_set_globals_control(void __iomem *ctrl)
{
omap2_ctrl_base = ctrl;
- omap4_ctrl_pad_base = ctrl_pad;
-}
-
-void __iomem *omap_ctrl_base_get(void)
-{
- return omap2_ctrl_base;
}
u8 omap_ctrl_readb(u16 offset)
{
- return readb_relaxed(OMAP_CTRL_REGADDR(offset));
+ u32 val;
+ u8 byte_offset = offset & 0x3;
+
+ val = omap_ctrl_readl(offset);
+
+ return (val >> (byte_offset * 8)) & 0xff;
}
u16 omap_ctrl_readw(u16 offset)
{
- return readw_relaxed(OMAP_CTRL_REGADDR(offset));
+ u32 val;
+ u16 byte_offset = offset & 0x2;
+
+ val = omap_ctrl_readl(offset);
+
+ return (val >> (byte_offset * 8)) & 0xffff;
}
u32 omap_ctrl_readl(u16 offset)
{
- return readl_relaxed(OMAP_CTRL_REGADDR(offset));
+ u32 val;
+
+ offset &= 0xfffc;
+ if (!omap2_ctrl_syscon)
+ val = readl_relaxed(omap2_ctrl_base + offset);
+ else
+ regmap_read(omap2_ctrl_syscon, omap2_ctrl_offset + offset,
+ &val);
+
+ return val;
}
void omap_ctrl_writeb(u8 val, u16 offset)
{
- writeb_relaxed(val, OMAP_CTRL_REGADDR(offset));
+ u32 tmp;
+ u8 byte_offset = offset & 0x3;
+
+ tmp = omap_ctrl_readl(offset);
+
+ tmp &= 0xffffffff ^ (0xff << (byte_offset * 8));
+ tmp |= val << (byte_offset * 8);
+
+ omap_ctrl_writel(tmp, offset);
}
void omap_ctrl_writew(u16 val, u16 offset)
{
- writew_relaxed(val, OMAP_CTRL_REGADDR(offset));
-}
+ u32 tmp;
+ u8 byte_offset = offset & 0x2;
-void omap_ctrl_writel(u32 val, u16 offset)
-{
- writel_relaxed(val, OMAP_CTRL_REGADDR(offset));
-}
+ tmp = omap_ctrl_readl(offset);
-/*
- * On OMAP4 control pad are not addressable from control
- * core base. So the common omap_ctrl_read/write APIs breaks
- * Hence export separate APIs to manage the omap4 pad control
- * registers. This APIs will work only for OMAP4
- */
+ tmp &= 0xffffffff ^ (0xffff << (byte_offset * 8));
+ tmp |= val << (byte_offset * 8);
-u32 omap4_ctrl_pad_readl(u16 offset)
-{
- return readl_relaxed(OMAP4_CTRL_PAD_REGADDR(offset));
+ omap_ctrl_writel(tmp, offset);
}
-void omap4_ctrl_pad_writel(u32 val, u16 offset)
+void omap_ctrl_writel(u32 val, u16 offset)
{
- writel_relaxed(val, OMAP4_CTRL_PAD_REGADDR(offset));
+ offset &= 0xfffc;
+ if (!omap2_ctrl_syscon)
+ writel_relaxed(val, omap2_ctrl_base + offset);
+ else
+ regmap_write(omap2_ctrl_syscon, omap2_ctrl_offset + offset,
+ val);
}
#ifdef CONFIG_ARCH_OMAP3
@@ -611,3 +629,120 @@ void __init omap3_ctrl_init(void)
omap3_ctrl_setup_d2d_padconf();
}
#endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */
+
+struct control_init_data {
+ int index;
+ s16 offset;
+};
+
+static struct control_init_data ctrl_data = {
+ .index = TI_CLKM_CTRL,
+};
+
+static const struct control_init_data omap2_ctrl_data = {
+ .index = TI_CLKM_CTRL,
+ .offset = -OMAP2_CONTROL_GENERAL,
+};
+
+static const struct of_device_id omap_scrm_dt_match_table[] = {
+ { .compatible = "ti,am3-scm", .data = &ctrl_data },
+ { .compatible = "ti,am4-scm", .data = &ctrl_data },
+ { .compatible = "ti,omap2-scm", .data = &omap2_ctrl_data },
+ { .compatible = "ti,omap3-scm", .data = &omap2_ctrl_data },
+ { .compatible = "ti,dm816-scrm", .data = &ctrl_data },
+ { .compatible = "ti,omap4-scm-core", .data = &ctrl_data },
+ { .compatible = "ti,omap5-scm-core", .data = &ctrl_data },
+ { .compatible = "ti,dra7-scm-core", .data = &ctrl_data },
+ { }
+};
+
+/**
+ * omap2_control_base_init - initialize iomappings for the control driver
+ *
+ * Detects and initializes the iomappings for the control driver, based
+ * on the DT data. Returns 0 in success, negative error value
+ * otherwise.
+ */
+int __init omap2_control_base_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ struct control_init_data *data;
+
+ for_each_matching_node_and_match(np, omap_scrm_dt_match_table, &match) {
+ data = (struct control_init_data *)match->data;
+
+ omap2_ctrl_base = of_iomap(np, 0);
+ if (!omap2_ctrl_base)
+ return -ENOMEM;
+
+ omap2_ctrl_offset = data->offset;
+ }
+
+ return 0;
+}
+
+/**
+ * omap_control_init - low level init for the control driver
+ *
+ * Initializes the low level clock infrastructure for control driver.
+ * Returns 0 in success, negative error value in failure.
+ */
+int __init omap_control_init(void)
+{
+ struct device_node *np, *scm_conf;
+ const struct of_device_id *match;
+ const struct omap_prcm_init_data *data;
+ int ret;
+ struct regmap *syscon;
+
+ for_each_matching_node_and_match(np, omap_scrm_dt_match_table, &match) {
+ data = match->data;
+
+ /*
+ * Check if we have scm_conf node, if yes, use this to
+ * access clock registers.
+ */
+ scm_conf = of_get_child_by_name(np, "scm_conf");
+
+ if (scm_conf) {
+ syscon = syscon_node_to_regmap(scm_conf);
+
+ if (IS_ERR(syscon))
+ return PTR_ERR(syscon);
+
+ omap2_ctrl_syscon = syscon;
+
+ if (of_get_child_by_name(scm_conf, "clocks")) {
+ ret = omap2_clk_provider_init(scm_conf,
+ data->index,
+ syscon, NULL);
+ if (ret)
+ return ret;
+ }
+
+ iounmap(omap2_ctrl_base);
+ omap2_ctrl_base = NULL;
+ } else {
+ /* No scm_conf found, direct access */
+ ret = omap2_clk_provider_init(np, data->index, NULL,
+ omap2_ctrl_base);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * omap3_control_legacy_iomap_init - legacy iomap init for clock providers
+ *
+ * Legacy iomap init for clock provider. Needed only by legacy boot mode,
+ * where the base addresses are not parsed from DT, but still required
+ * by the clock driver to be setup properly.
+ */
+void __init omap3_control_legacy_iomap_init(void)
+{
+ omap2_clk_legacy_provider_init(TI_CLKM_SCRM, omap2_ctrl_base);
+}
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index b8a487181210..80d2b7d8e36e 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -440,15 +440,12 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_ARCH_OMAP2PLUS
-extern void __iomem *omap_ctrl_base_get(void);
extern u8 omap_ctrl_readb(u16 offset);
extern u16 omap_ctrl_readw(u16 offset);
extern u32 omap_ctrl_readl(u16 offset);
-extern u32 omap4_ctrl_pad_readl(u16 offset);
extern void omap_ctrl_writeb(u8 val, u16 offset);
extern void omap_ctrl_writew(u16 val, u16 offset);
extern void omap_ctrl_writel(u32 val, u16 offset);
-extern void omap4_ctrl_pad_writel(u32 val, u16 offset);
extern void omap3_save_scratchpad_contents(void);
extern void omap3_clear_scratchpad_contents(void);
@@ -464,10 +461,11 @@ extern void omap_ctrl_write_dsp_boot_mode(u8 bootmode);
extern void omap3630_ctrl_disable_rta(void);
extern int omap3_ctrl_save_padconf(void);
void omap3_ctrl_init(void);
-extern void omap2_set_globals_control(void __iomem *ctrl,
- void __iomem *ctrl_pad);
+int omap2_control_base_init(void);
+int omap_control_init(void);
+void omap2_set_globals_control(void __iomem *ctrl);
+void __init omap3_control_legacy_iomap_init(void);
#else
-#define omap_ctrl_base_get() 0
#define omap_ctrl_readb(x) 0
#define omap_ctrl_readw(x) 0
#define omap_ctrl_readl(x) 0
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 57d429830e09..4b8e9f4d59ea 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -17,7 +17,6 @@
#include <linux/tick.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include "common.h"
#include "pm.h"
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 1afb50d6d636..990338fbaa59 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -74,82 +74,12 @@ omap_postcore_initcall(omap3_l3_init);
static struct resource omap3isp_resources[] = {
{
.start = OMAP3430_ISP_BASE,
- .end = OMAP3430_ISP_END,
+ .end = OMAP3430_ISP_BASE + 0x12fc,
.flags = IORESOURCE_MEM,
},
{
- .start = OMAP3430_ISP_CCP2_BASE,
- .end = OMAP3430_ISP_CCP2_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3430_ISP_CCDC_BASE,
- .end = OMAP3430_ISP_CCDC_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3430_ISP_HIST_BASE,
- .end = OMAP3430_ISP_HIST_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3430_ISP_H3A_BASE,
- .end = OMAP3430_ISP_H3A_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3430_ISP_PREV_BASE,
- .end = OMAP3430_ISP_PREV_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3430_ISP_RESZ_BASE,
- .end = OMAP3430_ISP_RESZ_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3430_ISP_SBL_BASE,
- .end = OMAP3430_ISP_SBL_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3430_ISP_CSI2A_REGS1_BASE,
- .end = OMAP3430_ISP_CSI2A_REGS1_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3430_ISP_CSIPHY2_BASE,
- .end = OMAP3430_ISP_CSIPHY2_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3630_ISP_CSI2A_REGS2_BASE,
- .end = OMAP3630_ISP_CSI2A_REGS2_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3630_ISP_CSI2C_REGS1_BASE,
- .end = OMAP3630_ISP_CSI2C_REGS1_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3630_ISP_CSIPHY1_BASE,
- .end = OMAP3630_ISP_CSIPHY1_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP3630_ISP_CSI2C_REGS2_BASE,
- .end = OMAP3630_ISP_CSI2C_REGS2_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE,
- .end = OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE + 3,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = OMAP343X_CTRL_BASE + OMAP3630_CONTROL_CAMERA_PHY_CTRL,
- .end = OMAP343X_CTRL_BASE + OMAP3630_CONTROL_CAMERA_PHY_CTRL + 3,
+ .start = OMAP3430_ISP_BASE2,
+ .end = OMAP3430_ISP_BASE2 + 0x0600,
.flags = IORESOURCE_MEM,
},
{
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 7a050f9c37ff..f492ae147c6a 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -26,6 +26,8 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <video/omapdss.h>
#include "omap_hwmod.h"
@@ -104,6 +106,10 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
{ "dss_hdmi", "omapdss_hdmi", -1 },
};
+#define OMAP4_DSIPHY_SYSCON_OFFSET 0x78
+
+static struct regmap *omap4_dsi_mux_syscon;
+
static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
{
u32 enable_mask, enable_shift;
@@ -124,7 +130,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
return -ENODEV;
}
- reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+ regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);
reg &= ~enable_mask;
reg &= ~pipd_mask;
@@ -132,7 +138,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
reg |= (lanes << enable_shift) & enable_mask;
reg |= (lanes << pipd_shift) & pipd_mask;
- omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+ regmap_write(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, reg);
return 0;
}
@@ -665,5 +671,10 @@ int __init omapdss_init_of(void)
return r;
}
+ /* add DSI info for omap4 */
+ node = of_find_node_by_name(NULL, "omap4_padconf_global");
+ if (node)
+ omap4_dsi_mux_syscon = syscon_node_to_regmap(node);
+
return 0;
}
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index d5951b17b736..72918c4973ea 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -96,14 +96,6 @@ int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
- if (gpmc_t) {
- err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t);
- if (err < 0) {
- pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n", err);
- return err;
- }
- }
-
memset(&s, 0, sizeof(struct gpmc_settings));
if (gpmc_nand_data->of_node)
gpmc_read_settings_dt(gpmc_nand_data->of_node, &s);
@@ -111,6 +103,16 @@ int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
gpmc_set_legacy(gpmc_nand_data, &s);
s.device_nand = true;
+
+ if (gpmc_t) {
+ err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
+ if (err < 0) {
+ pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
+ err);
+ return err;
+ }
+ }
+
err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
if (err < 0)
goto out_free_cs;
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 53d197e0c1f3..f899e77ff5e6 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -293,7 +293,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
if (ret < 0)
return ret;
- ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t);
+ ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t, &onenand_async);
if (ret < 0)
return ret;
@@ -331,7 +331,7 @@ static int omap2_onenand_setup_sync(void __iomem *onenand_base, int *freq_ptr)
if (ret < 0)
return ret;
- ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t);
+ ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t, &onenand_sync);
if (ret < 0)
return ret;
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 25f1beea453e..e3f713ffb06b 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -52,7 +52,10 @@ EXPORT_SYMBOL(omap_rev);
int omap_type(void)
{
- u32 val = 0;
+ static u32 val = OMAP2_DEVICETYPE_MASK;
+
+ if (val < OMAP2_DEVICETYPE_MASK)
+ return val;
if (cpu_is_omap24xx()) {
val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index c4871c55bd8b..820dde8b5b04 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -306,7 +306,6 @@ void __init am33xx_map_io(void)
void __init omap4_map_io(void)
{
iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
- omap_barriers_init();
}
#endif
@@ -314,7 +313,6 @@ void __init omap4_map_io(void)
void __init omap5_map_io(void)
{
iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
- omap_barriers_init();
}
#endif
/*
@@ -384,13 +382,9 @@ void __init omap2420_init_early(void)
omap2_set_globals_tap(OMAP242X_CLASS, OMAP2_L4_IO_ADDRESS(0x48014000));
omap2_set_globals_sdrc(OMAP2_L3_IO_ADDRESS(OMAP2420_SDRC_BASE),
OMAP2_L3_IO_ADDRESS(OMAP2420_SMS_BASE));
- omap2_set_globals_control(OMAP2_L4_IO_ADDRESS(OMAP242X_CTRL_BASE),
- NULL);
- omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP2420_PRM_BASE));
- omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP2420_CM_BASE), NULL);
+ omap2_control_base_init();
omap2xxx_check_revision();
- omap2xxx_prm_init();
- omap2xxx_cm_init();
+ omap2_prcm_base_init();
omap2xxx_voltagedomains_init();
omap242x_powerdomains_init();
omap242x_clockdomains_init();
@@ -414,13 +408,9 @@ void __init omap2430_init_early(void)
omap2_set_globals_tap(OMAP243X_CLASS, OMAP2_L4_IO_ADDRESS(0x4900a000));
omap2_set_globals_sdrc(OMAP2_L3_IO_ADDRESS(OMAP243X_SDRC_BASE),
OMAP2_L3_IO_ADDRESS(OMAP243X_SMS_BASE));
- omap2_set_globals_control(OMAP2_L4_IO_ADDRESS(OMAP243X_CTRL_BASE),
- NULL);
- omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP2430_PRM_BASE));
- omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP2430_CM_BASE), NULL);
+ omap2_control_base_init();
omap2xxx_check_revision();
- omap2xxx_prm_init();
- omap2xxx_cm_init();
+ omap2_prcm_base_init();
omap2xxx_voltagedomains_init();
omap243x_powerdomains_init();
omap243x_clockdomains_init();
@@ -448,21 +438,30 @@ void __init omap3_init_early(void)
omap2_set_globals_tap(OMAP343X_CLASS, OMAP2_L4_IO_ADDRESS(0x4830A000));
omap2_set_globals_sdrc(OMAP2_L3_IO_ADDRESS(OMAP343X_SDRC_BASE),
OMAP2_L3_IO_ADDRESS(OMAP343X_SMS_BASE));
- omap2_set_globals_control(OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE),
- NULL);
- omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE));
- omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP3430_CM_BASE), NULL);
+ /* XXX: remove these once OMAP3 is DT only */
+ if (!of_have_populated_dt()) {
+ omap2_set_globals_control(
+ OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE));
+ omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE));
+ omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP3430_CM_BASE),
+ NULL);
+ }
+ omap2_control_base_init();
omap3xxx_check_revision();
omap3xxx_check_features();
- omap3xxx_prm_init();
- omap3xxx_cm_init();
+ omap2_prcm_base_init();
+ /* XXX: remove these once OMAP3 is DT only */
+ if (!of_have_populated_dt()) {
+ omap3xxx_prm_init(NULL);
+ omap3xxx_cm_init(NULL);
+ }
omap3xxx_voltagedomains_init();
omap3xxx_powerdomains_init();
omap3xxx_clockdomains_init();
omap3xxx_hwmod_init();
omap_hwmod_init_postsetup();
if (!of_have_populated_dt()) {
- omap3_prcm_legacy_iomaps_init();
+ omap3_control_legacy_iomap_init();
if (soc_is_am35xx())
omap_clk_soc_init = am35xx_clk_legacy_init;
else if (cpu_is_omap3630())
@@ -549,14 +548,10 @@ void __init ti814x_init_early(void)
{
omap2_set_globals_tap(TI814X_CLASS,
OMAP2_L4_IO_ADDRESS(TI81XX_TAP_BASE));
- omap2_set_globals_control(OMAP2_L4_IO_ADDRESS(TI81XX_CTRL_BASE),
- NULL);
- omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(TI81XX_PRCM_BASE));
- omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(TI81XX_PRCM_BASE), NULL);
+ omap2_control_base_init();
omap3xxx_check_revision();
ti81xx_check_features();
- am33xx_prm_init();
- am33xx_cm_init();
+ omap2_prcm_base_init();
omap3xxx_voltagedomains_init();
omap3xxx_powerdomains_init();
ti81xx_clockdomains_init();
@@ -570,14 +565,10 @@ void __init ti816x_init_early(void)
{
omap2_set_globals_tap(TI816X_CLASS,
OMAP2_L4_IO_ADDRESS(TI81XX_TAP_BASE));
- omap2_set_globals_control(OMAP2_L4_IO_ADDRESS(TI81XX_CTRL_BASE),
- NULL);
- omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(TI81XX_PRCM_BASE));
- omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(TI81XX_PRCM_BASE), NULL);
+ omap2_control_base_init();
omap3xxx_check_revision();
ti81xx_check_features();
- am33xx_prm_init();
- am33xx_cm_init();
+ omap2_prcm_base_init();
omap3xxx_voltagedomains_init();
omap3xxx_powerdomains_init();
ti81xx_clockdomains_init();
@@ -593,14 +584,10 @@ void __init am33xx_init_early(void)
{
omap2_set_globals_tap(AM335X_CLASS,
AM33XX_L4_WK_IO_ADDRESS(AM33XX_TAP_BASE));
- omap2_set_globals_control(AM33XX_L4_WK_IO_ADDRESS(AM33XX_CTRL_BASE),
- NULL);
- omap2_set_globals_prm(AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRCM_BASE));
- omap2_set_globals_cm(AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRCM_BASE), NULL);
+ omap2_control_base_init();
omap3xxx_check_revision();
am33xx_check_features();
- am33xx_prm_init();
- am33xx_cm_init();
+ omap2_prcm_base_init();
am33xx_powerdomains_init();
am33xx_clockdomains_init();
am33xx_hwmod_init();
@@ -619,16 +606,10 @@ void __init am43xx_init_early(void)
{
omap2_set_globals_tap(AM335X_CLASS,
AM33XX_L4_WK_IO_ADDRESS(AM33XX_TAP_BASE));
- omap2_set_globals_control(AM33XX_L4_WK_IO_ADDRESS(AM33XX_CTRL_BASE),
- NULL);
- omap2_set_globals_prm(AM33XX_L4_WK_IO_ADDRESS(AM43XX_PRCM_BASE));
- omap2_set_globals_cm(AM33XX_L4_WK_IO_ADDRESS(AM43XX_PRCM_BASE), NULL);
- omap_prm_base_init();
- omap_cm_base_init();
+ omap2_control_base_init();
omap3xxx_check_revision();
am33xx_check_features();
- omap44xx_prm_init();
- omap4_cm_init();
+ omap2_prcm_base_init();
am43xx_powerdomains_init();
am43xx_clockdomains_init();
am43xx_hwmod_init();
@@ -648,19 +629,12 @@ void __init omap4430_init_early(void)
{
omap2_set_globals_tap(OMAP443X_CLASS,
OMAP2_L4_IO_ADDRESS(OMAP443X_SCM_BASE));
- omap2_set_globals_control(OMAP2_L4_IO_ADDRESS(OMAP443X_SCM_BASE),
- OMAP2_L4_IO_ADDRESS(OMAP443X_CTRL_BASE));
- omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE));
- omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP4430_CM_BASE),
- OMAP2_L4_IO_ADDRESS(OMAP4430_CM2_BASE));
omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE));
- omap_prm_base_init();
- omap_cm_base_init();
+ omap2_control_base_init();
omap4xxx_check_revision();
omap4xxx_check_features();
- omap4_cm_init();
+ omap2_prcm_base_init();
omap4_pm_init_early();
- omap44xx_prm_init();
omap44xx_voltagedomains_init();
omap44xx_powerdomains_init();
omap44xx_clockdomains_init();
@@ -683,18 +657,11 @@ void __init omap5_init_early(void)
{
omap2_set_globals_tap(OMAP54XX_CLASS,
OMAP2_L4_IO_ADDRESS(OMAP54XX_SCM_BASE));
- omap2_set_globals_control(OMAP2_L4_IO_ADDRESS(OMAP54XX_SCM_BASE),
- OMAP2_L4_IO_ADDRESS(OMAP54XX_CTRL_BASE));
- omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRM_BASE));
- omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP54XX_CM_CORE_AON_BASE),
- OMAP2_L4_IO_ADDRESS(OMAP54XX_CM_CORE_BASE));
omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
+ omap2_control_base_init();
omap4_pm_init_early();
- omap_prm_base_init();
- omap_cm_base_init();
- omap44xx_prm_init();
+ omap2_prcm_base_init();
omap5xxx_check_revision();
- omap4_cm_init();
omap54xx_voltagedomains_init();
omap54xx_powerdomains_init();
omap54xx_clockdomains_init();
@@ -715,18 +682,11 @@ void __init omap5_init_late(void)
void __init dra7xx_init_early(void)
{
omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
- omap2_set_globals_control(OMAP2_L4_IO_ADDRESS(OMAP54XX_SCM_BASE),
- OMAP2_L4_IO_ADDRESS(DRA7XX_CTRL_BASE));
- omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRM_BASE));
- omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(DRA7XX_CM_CORE_AON_BASE),
- OMAP2_L4_IO_ADDRESS(OMAP54XX_CM_CORE_BASE));
omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
+ omap2_control_base_init();
omap4_pm_init_early();
- omap_prm_base_init();
- omap_cm_base_init();
- omap44xx_prm_init();
+ omap2_prcm_base_init();
dra7xxx_check_revision();
- omap4_cm_init();
dra7xx_powerdomains_init();
dra7xx_clockdomains_init();
dra7xx_hwmod_init();
@@ -764,7 +724,11 @@ int __init omap_clk_init(void)
ti_clk_init_features();
if (of_have_populated_dt()) {
- ret = of_prcm_init();
+ ret = omap_control_init();
+ if (ret)
+ return ret;
+
+ ret = omap_prcm_init();
if (ret)
return ret;
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 78064b0d4db5..176eef6ef338 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -1053,7 +1053,7 @@ static void __init omap_mux_init_list(struct omap_mux_partition *partition,
struct omap_mux *entry;
#ifdef CONFIG_OMAP_MUX
- if (!superset->muxnames || !superset->muxnames[0]) {
+ if (!superset->muxnames[0]) {
superset++;
continue;
}
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index dec2b05d184b..af2851fbcdf0 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -70,13 +70,6 @@ extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
extern u32 rx51_secure_update_aux_cr(u32 set_bits, u32 clear_bits);
extern u32 rx51_secure_rng_call(u32 ptr, u32 count, u32 flag);
-#ifdef CONFIG_OMAP4_ERRATA_I688
-extern int omap_barrier_reserve_memblock(void);
-#else
-static inline void omap_barrier_reserve_memblock(void)
-{ }
-#endif
-
#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
void set_cntfreq(void);
#else
diff --git a/arch/arm/mach-omap2/omap34xx.h b/arch/arm/mach-omap2/omap34xx.h
index c0d1b4b1653f..ed0024dda133 100644
--- a/arch/arm/mach-omap2/omap34xx.h
+++ b/arch/arm/mach-omap2/omap34xx.h
@@ -46,39 +46,9 @@
#define OMAP34XX_IC_BASE 0x48200000
-#define OMAP3430_ISP_BASE (L4_34XX_BASE + 0xBC000)
-#define OMAP3430_ISP_CBUFF_BASE (OMAP3430_ISP_BASE + 0x0100)
-#define OMAP3430_ISP_CCP2_BASE (OMAP3430_ISP_BASE + 0x0400)
-#define OMAP3430_ISP_CCDC_BASE (OMAP3430_ISP_BASE + 0x0600)
-#define OMAP3430_ISP_HIST_BASE (OMAP3430_ISP_BASE + 0x0A00)
-#define OMAP3430_ISP_H3A_BASE (OMAP3430_ISP_BASE + 0x0C00)
-#define OMAP3430_ISP_PREV_BASE (OMAP3430_ISP_BASE + 0x0E00)
-#define OMAP3430_ISP_RESZ_BASE (OMAP3430_ISP_BASE + 0x1000)
-#define OMAP3430_ISP_SBL_BASE (OMAP3430_ISP_BASE + 0x1200)
-#define OMAP3430_ISP_MMU_BASE (OMAP3430_ISP_BASE + 0x1400)
-#define OMAP3430_ISP_CSI2A_REGS1_BASE (OMAP3430_ISP_BASE + 0x1800)
-#define OMAP3430_ISP_CSIPHY2_BASE (OMAP3430_ISP_BASE + 0x1970)
-#define OMAP3630_ISP_CSI2A_REGS2_BASE (OMAP3430_ISP_BASE + 0x19C0)
-#define OMAP3630_ISP_CSI2C_REGS1_BASE (OMAP3430_ISP_BASE + 0x1C00)
-#define OMAP3630_ISP_CSIPHY1_BASE (OMAP3430_ISP_BASE + 0x1D70)
-#define OMAP3630_ISP_CSI2C_REGS2_BASE (OMAP3430_ISP_BASE + 0x1DC0)
-
-#define OMAP3430_ISP_END (OMAP3430_ISP_BASE + 0x06F)
-#define OMAP3430_ISP_CBUFF_END (OMAP3430_ISP_CBUFF_BASE + 0x077)
-#define OMAP3430_ISP_CCP2_END (OMAP3430_ISP_CCP2_BASE + 0x1EF)
-#define OMAP3430_ISP_CCDC_END (OMAP3430_ISP_CCDC_BASE + 0x0A7)
-#define OMAP3430_ISP_HIST_END (OMAP3430_ISP_HIST_BASE + 0x047)
-#define OMAP3430_ISP_H3A_END (OMAP3430_ISP_H3A_BASE + 0x05F)
-#define OMAP3430_ISP_PREV_END (OMAP3430_ISP_PREV_BASE + 0x09F)
-#define OMAP3430_ISP_RESZ_END (OMAP3430_ISP_RESZ_BASE + 0x0AB)
-#define OMAP3430_ISP_SBL_END (OMAP3430_ISP_SBL_BASE + 0x0FB)
-#define OMAP3430_ISP_MMU_END (OMAP3430_ISP_MMU_BASE + 0x06F)
-#define OMAP3430_ISP_CSI2A_REGS1_END (OMAP3430_ISP_CSI2A_REGS1_BASE + 0x16F)
-#define OMAP3430_ISP_CSIPHY2_END (OMAP3430_ISP_CSIPHY2_BASE + 0x00B)
-#define OMAP3630_ISP_CSI2A_REGS2_END (OMAP3630_ISP_CSI2A_REGS2_BASE + 0x3F)
-#define OMAP3630_ISP_CSI2C_REGS1_END (OMAP3630_ISP_CSI2C_REGS1_BASE + 0x16F)
-#define OMAP3630_ISP_CSIPHY1_END (OMAP3630_ISP_CSIPHY1_BASE + 0x00B)
-#define OMAP3630_ISP_CSI2C_REGS2_END (OMAP3630_ISP_CSI2C_REGS2_BASE + 0x3F)
+#define OMAP3430_ISP_BASE (L4_34XX_BASE + 0xBC000)
+#define OMAP3430_ISP_MMU_BASE (OMAP3430_ISP_BASE + 0x1400)
+#define OMAP3430_ISP_BASE2 (OMAP3430_ISP_BASE + 0x1800)
#define OMAP34XX_HSUSB_OTG_BASE (L4_34XX_BASE + 0xAB000)
#define OMAP34XX_USBTLL_BASE (L4_34XX_BASE + 0x62000)
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 7bb116a6f86f..16350eefa66c 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -51,75 +51,6 @@ static void __iomem *twd_base;
#define IRQ_LOCALTIMER 29
-#ifdef CONFIG_OMAP4_ERRATA_I688
-/* Used to implement memory barrier on DRAM path */
-#define OMAP4_DRAM_BARRIER_VA 0xfe600000
-
-void __iomem *dram_sync, *sram_sync;
-
-static phys_addr_t paddr;
-static u32 size;
-
-void omap_bus_sync(void)
-{
- if (dram_sync && sram_sync) {
- writel_relaxed(readl_relaxed(dram_sync), dram_sync);
- writel_relaxed(readl_relaxed(sram_sync), sram_sync);
- isb();
- }
-}
-EXPORT_SYMBOL(omap_bus_sync);
-
-static int __init omap4_sram_init(void)
-{
- struct device_node *np;
- struct gen_pool *sram_pool;
-
- np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
- if (!np)
- pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
- __func__);
- sram_pool = of_get_named_gen_pool(np, "sram", 0);
- if (!sram_pool)
- pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
- __func__);
- else
- sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
-
- return 0;
-}
-omap_arch_initcall(omap4_sram_init);
-
-/* Steal one page physical memory for barrier implementation */
-int __init omap_barrier_reserve_memblock(void)
-{
-
- size = ALIGN(PAGE_SIZE, SZ_1M);
- paddr = arm_memblock_steal(size, SZ_1M);
-
- return 0;
-}
-
-void __init omap_barriers_init(void)
-{
- struct map_desc dram_io_desc[1];
-
- dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
- dram_io_desc[0].pfn = __phys_to_pfn(paddr);
- dram_io_desc[0].length = size;
- dram_io_desc[0].type = MT_MEMORY_RW_SO;
- iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
- dram_sync = (void __iomem *) dram_io_desc[0].virtual;
-
- pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
- (long long) paddr, dram_io_desc[0].virtual);
-
-}
-#else
-void __init omap_barriers_init(void)
-{}
-#endif
-
void gic_dist_disable(void)
{
if (gic_dist_base_addr)
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index be9541e18650..166b18f515a2 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -690,6 +690,9 @@ struct dev_pm_domain omap_device_pm_domain = {
USE_PLATFORM_PM_SLEEP_OPS
.suspend_noirq = _od_suspend_noirq,
.resume_noirq = _od_resume_noirq,
+ .freeze_noirq = _od_suspend_noirq,
+ .thaw_noirq = _od_resume_noirq,
+ .restore_noirq = _od_resume_noirq,
}
};
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 355b08936871..752969ff9de0 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -171,6 +171,12 @@
*/
#define LINKS_PER_OCP_IF 2
+/*
+ * Address offset (in bytes) between the reset control and the reset
+ * status registers: 4 bytes on OMAP4
+ */
+#define OMAP4_RST_CTRL_ST_OFFSET 4
+
/**
* struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations
* @enable_module: function to enable a module (via MODULEMODE)
@@ -3016,10 +3022,12 @@ static int _omap4_deassert_hardreset(struct omap_hwmod *oh,
if (ohri->st_shift)
pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n",
oh->name, ohri->name);
- return omap_prm_deassert_hardreset(ohri->rst_shift, 0,
+ return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->rst_shift,
oh->clkdm->pwrdm.ptr->prcm_partition,
oh->clkdm->pwrdm.ptr->prcm_offs,
- oh->prcm.omap4.rstctrl_offs, 0);
+ oh->prcm.omap4.rstctrl_offs,
+ oh->prcm.omap4.rstctrl_offs +
+ OMAP4_RST_CTRL_ST_OFFSET);
}
/**
@@ -3048,27 +3056,6 @@ static int _omap4_is_hardreset_asserted(struct omap_hwmod *oh,
}
/**
- * _am33xx_assert_hardreset - call AM33XX PRM hardreset fn with hwmod args
- * @oh: struct omap_hwmod * to assert hardreset
- * @ohri: hardreset line data
- *
- * Call am33xx_prminst_assert_hardreset() with parameters extracted
- * from the hwmod @oh and the hardreset line data @ohri. Only
- * intended for use as an soc_ops function pointer. Passes along the
- * return value from am33xx_prminst_assert_hardreset(). XXX This
- * function is scheduled for removal when the PRM code is moved into
- * drivers/.
- */
-static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
- struct omap_hwmod_rst_info *ohri)
-
-{
- return omap_prm_assert_hardreset(ohri->rst_shift, 0,
- oh->clkdm->pwrdm.ptr->prcm_offs,
- oh->prcm.omap4.rstctrl_offs);
-}
-
-/**
* _am33xx_deassert_hardreset - call AM33XX PRM hardreset fn with hwmod args
* @oh: struct omap_hwmod * to deassert hardreset
* @ohri: hardreset line data
@@ -3083,32 +3070,13 @@ static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
static int _am33xx_deassert_hardreset(struct omap_hwmod *oh,
struct omap_hwmod_rst_info *ohri)
{
- return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, 0,
+ return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift,
+ oh->clkdm->pwrdm.ptr->prcm_partition,
oh->clkdm->pwrdm.ptr->prcm_offs,
oh->prcm.omap4.rstctrl_offs,
oh->prcm.omap4.rstst_offs);
}
-/**
- * _am33xx_is_hardreset_asserted - call AM33XX PRM hardreset fn with hwmod args
- * @oh: struct omap_hwmod * to test hardreset
- * @ohri: hardreset line data
- *
- * Call am33xx_prminst_is_hardreset_asserted() with parameters
- * extracted from the hwmod @oh and the hardreset line data @ohri.
- * Only intended for use as an soc_ops function pointer. Passes along
- * the return value from am33xx_prminst_is_hardreset_asserted(). XXX
- * This function is scheduled for removal when the PRM code is moved
- * into drivers/.
- */
-static int _am33xx_is_hardreset_asserted(struct omap_hwmod *oh,
- struct omap_hwmod_rst_info *ohri)
-{
- return omap_prm_is_hardreset_asserted(ohri->rst_shift, 0,
- oh->clkdm->pwrdm.ptr->prcm_offs,
- oh->prcm.omap4.rstctrl_offs);
-}
-
/* Public functions */
u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs)
@@ -3908,21 +3876,13 @@ void __init omap_hwmod_init(void)
soc_ops.init_clkdm = _init_clkdm;
soc_ops.update_context_lost = _omap4_update_context_lost;
soc_ops.get_context_lost = _omap4_get_context_lost;
- } else if (soc_is_am43xx()) {
+ } else if (cpu_is_ti816x() || soc_is_am33xx() || soc_is_am43xx()) {
soc_ops.enable_module = _omap4_enable_module;
soc_ops.disable_module = _omap4_disable_module;
soc_ops.wait_target_ready = _omap4_wait_target_ready;
soc_ops.assert_hardreset = _omap4_assert_hardreset;
- soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
- soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
- soc_ops.init_clkdm = _init_clkdm;
- } else if (cpu_is_ti816x() || soc_is_am33xx()) {
- soc_ops.enable_module = _omap4_enable_module;
- soc_ops.disable_module = _omap4_disable_module;
- soc_ops.wait_target_ready = _omap4_wait_target_ready;
- soc_ops.assert_hardreset = _am33xx_assert_hardreset;
soc_ops.deassert_hardreset = _am33xx_deassert_hardreset;
- soc_ops.is_hardreset_asserted = _am33xx_is_hardreset_asserted;
+ soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
soc_ops.init_clkdm = _init_clkdm;
} else {
WARN(1, "omap_hwmod: unknown SoC type\n");
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index 8eb85925e444..17e8004fc20f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -20,6 +20,7 @@
#include "omap_hwmod_33xx_43xx_common_data.h"
#include "prcm43xx.h"
#include "omap_hwmod_common_data.h"
+#include "hdq1w.h"
/* IP blocks */
@@ -516,6 +517,71 @@ static struct omap_hwmod am43xx_dss_rfbi_hwmod = {
.parent_hwmod = &am43xx_dss_core_hwmod,
};
+/* HDQ1W */
+static struct omap_hwmod_class_sysconfig am43xx_hdq1w_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0014,
+ .syss_offs = 0x0018,
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class am43xx_hdq1w_hwmod_class = {
+ .name = "hdq1w",
+ .sysc = &am43xx_hdq1w_sysc,
+ .reset = &omap_hdq1w_reset,
+};
+
+static struct omap_hwmod am43xx_hdq1w_hwmod = {
+ .name = "hdq1w",
+ .class = &am43xx_hdq1w_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM43XX_CM_PER_HDQ1W_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+static struct omap_hwmod_class_sysconfig am43xx_vpfe_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x104,
+ .sysc_flags = SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_SMART | MSTANDBY_NO),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class am43xx_vpfe_hwmod_class = {
+ .name = "vpfe",
+ .sysc = &am43xx_vpfe_sysc,
+};
+
+static struct omap_hwmod am43xx_vpfe0_hwmod = {
+ .name = "vpfe0",
+ .class = &am43xx_vpfe_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ .clkctrl_offs = AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET,
+ },
+ },
+};
+
+static struct omap_hwmod am43xx_vpfe1_hwmod = {
+ .name = "vpfe1",
+ .class = &am43xx_vpfe_hwmod_class,
+ .clkdm_name = "l3s_clkdm",
+ .prcm = {
+ .omap4 = {
+ .modulemode = MODULEMODE_SWCTRL,
+ .clkctrl_offs = AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET,
+ },
+ },
+};
+
/* Interfaces */
static struct omap_hwmod_ocp_if am43xx_l3_main__l4_hs = {
.master = &am33xx_l3_main_hwmod,
@@ -790,6 +856,41 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__dss_rfbi = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+static struct omap_hwmod_ocp_if am43xx_l4_ls__hdq1w = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_hdq1w_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l3__vpfe0 = {
+ .master = &am43xx_vpfe0_hwmod,
+ .slave = &am33xx_l3_main_hwmod,
+ .clk = "l3_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l3__vpfe1 = {
+ .master = &am43xx_vpfe1_hwmod,
+ .slave = &am33xx_l3_main_hwmod,
+ .clk = "l3_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe0 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_vpfe0_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe1 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am43xx_vpfe1_hwmod,
+ .clk = "l4ls_gclk",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_wkup__synctimer,
&am43xx_l4_ls__timer8,
@@ -889,6 +990,11 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am43xx_l4_ls__dss,
&am43xx_l4_ls__dss_dispc,
&am43xx_l4_ls__dss_rfbi,
+ &am43xx_l4_ls__hdq1w,
+ &am43xx_l3__vpfe0,
+ &am43xx_l3__vpfe1,
+ &am43xx_l4_ls__vpfe0,
+ &am43xx_l4_ls__vpfe1,
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 16fe7a1b7a35..0e64c2fac0b5 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1726,21 +1726,6 @@ static struct omap_hwmod_class dra7xx_timer_1ms_hwmod_class = {
.sysc = &dra7xx_timer_1ms_sysc,
};
-static struct omap_hwmod_class_sysconfig dra7xx_timer_secure_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class dra7xx_timer_secure_hwmod_class = {
- .name = "timer",
- .sysc = &dra7xx_timer_secure_sysc,
-};
-
static struct omap_hwmod_class_sysconfig dra7xx_timer_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
@@ -1804,7 +1789,7 @@ static struct omap_hwmod dra7xx_timer3_hwmod = {
/* timer4 */
static struct omap_hwmod dra7xx_timer4_hwmod = {
.name = "timer4",
- .class = &dra7xx_timer_secure_hwmod_class,
+ .class = &dra7xx_timer_hwmod_class,
.clkdm_name = "l4per_clkdm",
.main_clk = "timer4_gfclk_mux",
.prcm = {
@@ -1921,6 +1906,66 @@ static struct omap_hwmod dra7xx_timer11_hwmod = {
},
};
+/* timer13 */
+static struct omap_hwmod dra7xx_timer13_hwmod = {
+ .name = "timer13",
+ .class = &dra7xx_timer_hwmod_class,
+ .clkdm_name = "l4per3_clkdm",
+ .main_clk = "timer13_gfclk_mux",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DRA7XX_CM_L4PER3_TIMER13_CLKCTRL_OFFSET,
+ .context_offs = DRA7XX_RM_L4PER3_TIMER13_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* timer14 */
+static struct omap_hwmod dra7xx_timer14_hwmod = {
+ .name = "timer14",
+ .class = &dra7xx_timer_hwmod_class,
+ .clkdm_name = "l4per3_clkdm",
+ .main_clk = "timer14_gfclk_mux",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DRA7XX_CM_L4PER3_TIMER14_CLKCTRL_OFFSET,
+ .context_offs = DRA7XX_RM_L4PER3_TIMER14_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* timer15 */
+static struct omap_hwmod dra7xx_timer15_hwmod = {
+ .name = "timer15",
+ .class = &dra7xx_timer_hwmod_class,
+ .clkdm_name = "l4per3_clkdm",
+ .main_clk = "timer15_gfclk_mux",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DRA7XX_CM_L4PER3_TIMER15_CLKCTRL_OFFSET,
+ .context_offs = DRA7XX_RM_L4PER3_TIMER15_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
+/* timer16 */
+static struct omap_hwmod dra7xx_timer16_hwmod = {
+ .name = "timer16",
+ .class = &dra7xx_timer_hwmod_class,
+ .clkdm_name = "l4per3_clkdm",
+ .main_clk = "timer16_gfclk_mux",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DRA7XX_CM_L4PER3_TIMER16_CLKCTRL_OFFSET,
+ .context_offs = DRA7XX_RM_L4PER3_TIMER16_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
/*
* 'uart' class
*
@@ -3059,6 +3104,38 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer11 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* l4_per3 -> timer13 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer13 = {
+ .master = &dra7xx_l4_per3_hwmod,
+ .slave = &dra7xx_timer13_hwmod,
+ .clk = "l3_iclk_div",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> timer14 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer14 = {
+ .master = &dra7xx_l4_per3_hwmod,
+ .slave = &dra7xx_timer14_hwmod,
+ .clk = "l3_iclk_div",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> timer15 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer15 = {
+ .master = &dra7xx_l4_per3_hwmod,
+ .slave = &dra7xx_timer15_hwmod,
+ .clk = "l3_iclk_div",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per3 -> timer16 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer16 = {
+ .master = &dra7xx_l4_per3_hwmod,
+ .slave = &dra7xx_timer16_hwmod,
+ .clk = "l3_iclk_div",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* l4_per1 -> uart1 */
static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart1 = {
.master = &dra7xx_l4_per1_hwmod,
@@ -3295,6 +3372,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
&dra7xx_l4_per1__timer9,
&dra7xx_l4_per1__timer10,
&dra7xx_l4_per1__timer11,
+ &dra7xx_l4_per3__timer13,
+ &dra7xx_l4_per3__timer14,
+ &dra7xx_l4_per3__timer15,
+ &dra7xx_l4_per3__timer16,
&dra7xx_l4_per1__uart1,
&dra7xx_l4_per1__uart2,
&dra7xx_l4_per1__uart3,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index e642b079e9f3..af11511dda50 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/of_platform.h>
#include <linux/ti_wilink_st.h>
-#include <linux/wl12xx.h>
#include <linux/platform_data/pinctrl-single.h>
#include <linux/platform_data/iommu-omap.h>
@@ -35,34 +34,6 @@ struct pdata_init {
struct of_dev_auxdata omap_auxdata_lookup[];
static struct twl4030_gpio_platform_data twl_gpio_auxdata;
-#if IS_ENABLED(CONFIG_WL12XX)
-
-static struct wl12xx_platform_data wl12xx __initdata;
-
-static void __init __used legacy_init_wl12xx(unsigned ref_clock,
- unsigned tcxo_clock,
- int gpio)
-{
- int res;
-
- wl12xx.board_ref_clock = ref_clock;
- wl12xx.board_tcxo_clock = tcxo_clock;
- wl12xx.irq = gpio_to_irq(gpio);
-
- res = wl12xx_set_platform_data(&wl12xx);
- if (res) {
- pr_err("error setting wl12xx data: %d\n", res);
- return;
- }
-}
-#else
-static inline void legacy_init_wl12xx(unsigned ref_clock,
- unsigned tcxo_clock,
- int gpio)
-{
-}
-#endif
-
#ifdef CONFIG_MACH_NOKIA_N8X0
static void __init omap2420_n8x0_legacy_init(void)
{
@@ -129,7 +100,6 @@ static void __init omap3_sbc_t3730_twl_init(void)
static void __init omap3_sbc_t3730_legacy_init(void)
{
omap3_sbc_t3x_usb_hub_init(167, "sb-t35 usb hub");
- legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 136);
}
static void __init omap3_sbc_t3530_legacy_init(void)
@@ -159,14 +129,12 @@ static struct platform_device btwilink_device = {
static void __init omap3_igep0020_rev_f_legacy_init(void)
{
- legacy_init_wl12xx(0, 0, 177);
platform_device_register(&wl18xx_device);
platform_device_register(&btwilink_device);
}
static void __init omap3_igep0030_rev_g_legacy_init(void)
{
- legacy_init_wl12xx(0, 0, 136);
platform_device_register(&wl18xx_device);
platform_device_register(&btwilink_device);
}
@@ -174,12 +142,6 @@ static void __init omap3_igep0030_rev_g_legacy_init(void)
static void __init omap3_evm_legacy_init(void)
{
hsmmc2_internal_input_clk();
- legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149);
-}
-
-static void __init omap3_zoom_legacy_init(void)
-{
- legacy_init_wl12xx(WL12XX_REFCLOCK_26, 0, 162);
}
static void am35xx_enable_emac_int(void)
@@ -246,7 +208,6 @@ static void __init omap3_sbc_t3517_legacy_init(void)
am35xx_emac_reset();
hsmmc2_internal_input_clk();
omap3_sbc_t3517_wifi_init();
- legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 145);
}
static void __init am3517_evm_legacy_init(void)
@@ -288,24 +249,6 @@ static void __init omap3_tao3530_legacy_init(void)
}
#endif /* CONFIG_ARCH_OMAP3 */
-#ifdef CONFIG_ARCH_OMAP4
-static void __init omap4_sdp_legacy_init(void)
-{
- legacy_init_wl12xx(WL12XX_REFCLOCK_26,
- WL12XX_TCXOCLOCK_26, 53);
-}
-
-static void __init omap4_panda_legacy_init(void)
-{
- legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 53);
-}
-
-static void __init var_som_om44_legacy_init(void)
-{
- legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 41);
-}
-#endif
-
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
static struct iommu_platform_data omap4_iommu_pdata = {
.reset_name = "mmu_cache",
@@ -314,13 +257,6 @@ static struct iommu_platform_data omap4_iommu_pdata = {
};
#endif
-#ifdef CONFIG_SOC_AM33XX
-static void __init am335x_evmsk_legacy_init(void)
-{
- legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 31);
-}
-#endif
-
#ifdef CONFIG_SOC_OMAP5
static void __init omap5_uevm_legacy_init(void)
{
@@ -421,19 +357,9 @@ static struct pdata_init pdata_quirks[] __initdata = {
{ "isee,omap3-igep0020-rev-f", omap3_igep0020_rev_f_legacy_init, },
{ "isee,omap3-igep0030-rev-g", omap3_igep0030_rev_g_legacy_init, },
{ "ti,omap3-evm-37xx", omap3_evm_legacy_init, },
- { "ti,omap3-zoom3", omap3_zoom_legacy_init, },
{ "ti,am3517-evm", am3517_evm_legacy_init, },
{ "technexion,omap3-tao3530", omap3_tao3530_legacy_init, },
#endif
-#ifdef CONFIG_ARCH_OMAP4
- { "ti,omap4-sdp", omap4_sdp_legacy_init, },
- { "ti,omap4-panda", omap4_panda_legacy_init, },
- { "variscite,var-dvk-om44", var_som_om44_legacy_init, },
- { "variscite,var-stk-om44", var_som_om44_legacy_init, },
-#endif
-#ifdef CONFIG_SOC_AM33XX
- { "ti,am335x-evmsk", am335x_evmsk_legacy_init, },
-#endif
#ifdef CONFIG_SOC_OMAP5
{ "ti,omap5-uevm", omap5_uevm_legacy_init, },
#endif
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index fe01c5a03aa2..b1aad7e1426c 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -75,9 +75,9 @@ static int omap2_enter_full_retention(void)
/* Clear old wake-up events */
/* REVISIT: These write to reserved bits? */
- omap2xxx_prm_clear_mod_irqs(CORE_MOD, PM_WKST1, ~0);
- omap2xxx_prm_clear_mod_irqs(CORE_MOD, OMAP24XX_PM_WKST2, ~0);
- omap2xxx_prm_clear_mod_irqs(WKUP_MOD, PM_WKST, ~0);
+ omap_prm_clear_mod_irqs(CORE_MOD, PM_WKST1, ~0);
+ omap_prm_clear_mod_irqs(CORE_MOD, OMAP24XX_PM_WKST2, ~0);
+ omap_prm_clear_mod_irqs(WKUP_MOD, PM_WKST, ~0);
pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET);
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);
@@ -104,18 +104,16 @@ no_sleep:
clk_enable(osc_ck);
/* clear CORE wake-up events */
- omap2xxx_prm_clear_mod_irqs(CORE_MOD, PM_WKST1, ~0);
- omap2xxx_prm_clear_mod_irqs(CORE_MOD, OMAP24XX_PM_WKST2, ~0);
+ omap_prm_clear_mod_irqs(CORE_MOD, PM_WKST1, ~0);
+ omap_prm_clear_mod_irqs(CORE_MOD, OMAP24XX_PM_WKST2, ~0);
/* wakeup domain events - bit 1: GPT1, bit5 GPIO */
- omap2xxx_prm_clear_mod_irqs(WKUP_MOD, PM_WKST, 0x4 | 0x1);
+ omap_prm_clear_mod_irqs(WKUP_MOD, PM_WKST, 0x4 | 0x1);
/* MPU domain wake events */
- omap2xxx_prm_clear_mod_irqs(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET,
- 0x1);
+ omap_prm_clear_mod_irqs(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET, 0x1);
- omap2xxx_prm_clear_mod_irqs(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET,
- 0x20);
+ omap_prm_clear_mod_irqs(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET, 0x20);
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_ON);
@@ -143,9 +141,9 @@ static void omap2_enter_mpu_retention(void)
* it is in retention mode. */
if (omap2_allow_mpu_retention()) {
/* REVISIT: These write to reserved bits? */
- omap2xxx_prm_clear_mod_irqs(CORE_MOD, PM_WKST1, ~0);
- omap2xxx_prm_clear_mod_irqs(CORE_MOD, OMAP24XX_PM_WKST2, ~0);
- omap2xxx_prm_clear_mod_irqs(WKUP_MOD, PM_WKST, ~0);
+ omap_prm_clear_mod_irqs(CORE_MOD, PM_WKST1, ~0);
+ omap_prm_clear_mod_irqs(CORE_MOD, OMAP24XX_PM_WKST2, ~0);
+ omap_prm_clear_mod_irqs(WKUP_MOD, PM_WKST, ~0);
/* Try to enter MPU retention */
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 88721df6001d..87b98bf92366 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -137,9 +137,8 @@ static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
{
int c;
- c = omap3xxx_prm_clear_mod_irqs(WKUP_MOD, 1,
- ~(OMAP3430_ST_IO_MASK |
- OMAP3430_ST_IO_CHAIN_MASK));
+ c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, OMAP3430_ST_IO_MASK |
+ OMAP3430_ST_IO_CHAIN_MASK);
return c ? IRQ_HANDLED : IRQ_NONE;
}
@@ -153,14 +152,13 @@ static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
* these are handled in a separate handler to avoid acking
* IO events before parsing in mux code
*/
- c = omap3xxx_prm_clear_mod_irqs(WKUP_MOD, 1,
- OMAP3430_ST_IO_MASK |
- OMAP3430_ST_IO_CHAIN_MASK);
- c += omap3xxx_prm_clear_mod_irqs(CORE_MOD, 1, 0);
- c += omap3xxx_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, 0);
+ c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, ~(OMAP3430_ST_IO_MASK |
+ OMAP3430_ST_IO_CHAIN_MASK));
+ c += omap_prm_clear_mod_irqs(CORE_MOD, 1, ~0);
+ c += omap_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, ~0);
if (omap_rev() > OMAP3430_REV_ES1_0) {
- c += omap3xxx_prm_clear_mod_irqs(CORE_MOD, 3, 0);
- c += omap3xxx_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, 0);
+ c += omap_prm_clear_mod_irqs(CORE_MOD, 3, ~0);
+ c += omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, ~0);
}
return c ? IRQ_HANDLED : IRQ_NONE;
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 6163d66102a3..6ae0b3a1781e 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -518,6 +518,26 @@ struct omap_prcm_irq_setup {
.priority = _priority \
}
+/**
+ * struct omap_prcm_init_data - PRCM driver init data
+ * @index: clock memory mapping index to be used
+ * @mem: IO mem pointer for this module
+ * @offset: module base address offset from the IO base
+ * @flags: PRCM module init flags
+ * @device_inst_offset: device instance offset within the module address space
+ * @init: low level PRCM init function for this module
+ * @np: device node for this PRCM module
+ */
+struct omap_prcm_init_data {
+ int index;
+ void __iomem *mem;
+ s16 offset;
+ u16 flags;
+ s32 device_inst_offset;
+ int (*init)(const struct omap_prcm_init_data *data);
+ struct device_node *np;
+};
+
extern void omap_prcm_irq_cleanup(void);
extern int omap_prcm_register_chain_handler(
struct omap_prcm_irq_setup *irq_setup);
diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h
index ad7b3e9977f8..d0261996db6d 100644
--- a/arch/arm/mach-omap2/prcm43xx.h
+++ b/arch/arm/mach-omap2/prcm43xx.h
@@ -143,5 +143,7 @@
#define AM43XX_CM_PER_USB_OTG_SS1_CLKCTRL_OFFSET 0x0268
#define AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET 0x05C0
#define AM43XX_CM_PER_DSS_CLKCTRL_OFFSET 0x0a20
-
+#define AM43XX_CM_PER_HDQ1W_CLKCTRL_OFFSET 0x04a0
+#define AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET 0x0068
+#define AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET 0x0070
#endif
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index cbefbd7cfdb5..661d753df584 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -112,6 +112,7 @@
#define OMAP3430_VC_CMD_ONLP_SHIFT 16
#define OMAP3430_VC_CMD_RET_SHIFT 8
#define OMAP3430_VC_CMD_OFF_SHIFT 0
+#define OMAP3430_SREN_MASK (1 << 4)
#define OMAP3430_HSEN_MASK (1 << 3)
#define OMAP3430_MCODE_MASK (0x7 << 0)
#define OMAP3430_VALID_MASK (1 << 24)
diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
index b1c7a33e00e7..e794828dee55 100644
--- a/arch/arm/mach-omap2/prm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
@@ -35,6 +35,7 @@
#define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1
#define OMAP4430_GLOBAL_WUEN_MASK (1 << 16)
#define OMAP4430_HSMCODE_MASK (0x7 << 0)
+#define OMAP4430_SRMODEEN_MASK (1 << 4)
#define OMAP4430_HSMODEEN_MASK (1 << 3)
#define OMAP4430_HSSCLL_SHIFT 24
#define OMAP4430_ICEPICK_RST_SHIFT 9
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index b9061a6a2db8..233bc84fbc0e 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -19,8 +19,9 @@
extern void __iomem *prm_base;
extern u16 prm_features;
extern void omap2_set_globals_prm(void __iomem *prm);
-int of_prcm_init(void);
-void omap3_prcm_legacy_iomaps_init(void);
+int omap_prcm_init(void);
+int omap2_prm_base_init(void);
+int omap2_prcm_base_init(void);
# endif
/*
@@ -28,9 +29,11 @@ void omap3_prcm_legacy_iomaps_init(void);
*
* PRM_HAS_IO_WAKEUP: has IO wakeup capability
* PRM_HAS_VOLTAGE: has voltage domains
+ * PRM_IRQ_DEFAULT: use default irq number for PRM irq
*/
-#define PRM_HAS_IO_WAKEUP (1 << 0)
-#define PRM_HAS_VOLTAGE (1 << 1)
+#define PRM_HAS_IO_WAKEUP BIT(0)
+#define PRM_HAS_VOLTAGE BIT(1)
+#define PRM_IRQ_DEFAULT BIT(2)
/*
* MAX_MODULE_SOFTRESET_WAIT: Maximum microseconds to wait for OMAP
@@ -146,6 +149,9 @@ struct prm_ll_data {
int (*is_hardreset_asserted)(u8 shift, u8 part, s16 prm_mod,
u16 offset);
void (*reset_system)(void);
+ int (*clear_mod_irqs)(s16 module, u8 regs, u32 wkst_mask);
+ u32 (*vp_check_txdone)(u8 vp_id);
+ void (*vp_clear_txdone)(u8 vp_id);
};
extern int prm_register(struct prm_ll_data *pld);
@@ -161,6 +167,19 @@ extern void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx);
void omap_prm_reset_system(void);
void omap_prm_reconfigure_io_chain(void);
+int omap_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask);
+
+/*
+ * Voltage Processor (VP) identifiers
+ */
+#define OMAP3_VP_VDD_MPU_ID 0
+#define OMAP3_VP_VDD_CORE_ID 1
+#define OMAP4_VP_VDD_CORE_ID 0
+#define OMAP4_VP_VDD_IVA_ID 1
+#define OMAP4_VP_VDD_MPU_ID 2
+
+u32 omap_prm_vp_check_txdone(u8 vp_id);
+void omap_prm_vp_clear_txdone(u8 vp_id);
#endif
diff --git a/arch/arm/mach-omap2/prm2xxx.c b/arch/arm/mach-omap2/prm2xxx.c
index af0f15278fc2..752018ce129c 100644
--- a/arch/arm/mach-omap2/prm2xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx.c
@@ -123,13 +123,14 @@ static void omap2xxx_prm_dpll_reset(void)
* Clears wakeup status bits for a given module, so that the device can
* re-enter idle.
*/
-void omap2xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask)
+static int omap2xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask)
{
u32 wkst;
wkst = omap2_prm_read_mod_reg(module, regs);
wkst &= wkst_mask;
omap2_prm_write_mod_reg(wkst, module, regs);
+ return 0;
}
int omap2xxx_clkdm_sleep(struct clockdomain *clkdm)
@@ -216,9 +217,10 @@ static struct prm_ll_data omap2xxx_prm_ll_data = {
.deassert_hardreset = &omap2_prm_deassert_hardreset,
.is_hardreset_asserted = &omap2_prm_is_hardreset_asserted,
.reset_system = &omap2xxx_prm_dpll_reset,
+ .clear_mod_irqs = &omap2xxx_prm_clear_mod_irqs,
};
-int __init omap2xxx_prm_init(void)
+int __init omap2xxx_prm_init(const struct omap_prcm_init_data *data)
{
return prm_register(&omap2xxx_prm_ll_data);
}
diff --git a/arch/arm/mach-omap2/prm2xxx.h b/arch/arm/mach-omap2/prm2xxx.h
index 1d51643062f7..9008a9e55a1a 100644
--- a/arch/arm/mach-omap2/prm2xxx.h
+++ b/arch/arm/mach-omap2/prm2xxx.h
@@ -124,9 +124,7 @@
extern int omap2xxx_clkdm_sleep(struct clockdomain *clkdm);
extern int omap2xxx_clkdm_wakeup(struct clockdomain *clkdm);
-void omap2xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask);
-
-extern int __init omap2xxx_prm_init(void);
+int __init omap2xxx_prm_init(const struct omap_prcm_init_data *data);
#endif
diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c
index 02f628601b09..dcb5001d77da 100644
--- a/arch/arm/mach-omap2/prm33xx.c
+++ b/arch/arm/mach-omap2/prm33xx.c
@@ -378,7 +378,7 @@ static struct prm_ll_data am33xx_prm_ll_data = {
.reset_system = am33xx_prm_global_warm_sw_reset,
};
-int __init am33xx_prm_init(void)
+int __init am33xx_prm_init(const struct omap_prcm_init_data *data)
{
return prm_register(&am33xx_prm_ll_data);
}
diff --git a/arch/arm/mach-omap2/prm33xx.h b/arch/arm/mach-omap2/prm33xx.h
index 98ac41f271da..2bc4ec52ba78 100644
--- a/arch/arm/mach-omap2/prm33xx.h
+++ b/arch/arm/mach-omap2/prm33xx.h
@@ -118,7 +118,7 @@
#define AM33XX_PM_CEFUSE_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_CEFUSE_MOD, 0x0004)
#ifndef __ASSEMBLER__
-int am33xx_prm_init(void);
+int am33xx_prm_init(const struct omap_prcm_init_data *data);
#endif /* ASSEMBLER */
#endif
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index 5713bbdf83bc..62680aad2126 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -29,6 +29,7 @@
#include "prm-regbits-34xx.h"
#include "cm3xxx.h"
#include "cm-regbits-34xx.h"
+#include "clock.h"
static void omap3xxx_prm_read_pending_irqs(unsigned long *events);
static void omap3xxx_prm_ocp_barrier(void);
@@ -96,7 +97,7 @@ static struct omap3_vp omap3_vp[] = {
#define MAX_VP_ID ARRAY_SIZE(omap3_vp);
-u32 omap3_prm_vp_check_txdone(u8 vp_id)
+static u32 omap3_prm_vp_check_txdone(u8 vp_id)
{
struct omap3_vp *vp = &omap3_vp[vp_id];
u32 irqstatus;
@@ -106,7 +107,7 @@ u32 omap3_prm_vp_check_txdone(u8 vp_id)
return irqstatus & vp->tranxdone_status;
}
-void omap3_prm_vp_clear_txdone(u8 vp_id)
+static void omap3_prm_vp_clear_txdone(u8 vp_id)
{
struct omap3_vp *vp = &omap3_vp[vp_id];
@@ -217,7 +218,7 @@ static void omap3xxx_prm_restore_irqen(u32 *saved_mask)
* omap3xxx_prm_clear_mod_irqs - clear wake-up events from PRCM interrupt
* @module: PRM module to clear wakeups from
* @regs: register set to clear, 1 or 3
- * @ignore_bits: wakeup status bits to ignore
+ * @wkst_mask: wkst bits to clear
*
* The purpose of this function is to clear any wake-up events latched
* in the PRCM PM_WKST_x registers. It is possible that a wake-up event
@@ -226,7 +227,7 @@ static void omap3xxx_prm_restore_irqen(u32 *saved_mask)
* that any peripheral wake-up events occurring while attempting to
* clear the PM_WKST_x are detected and cleared.
*/
-int omap3xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits)
+static int omap3xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask)
{
u32 wkst, fclk, iclk, clken;
u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
@@ -238,7 +239,7 @@ int omap3xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits)
wkst = omap2_prm_read_mod_reg(module, wkst_off);
wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
- wkst &= ~ignore_bits;
+ wkst &= wkst_mask;
if (wkst) {
iclk = omap2_cm_read_mod_reg(module, iclk_off);
fclk = omap2_cm_read_mod_reg(module, fclk_off);
@@ -254,7 +255,7 @@ int omap3xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits)
omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
omap2_prm_write_mod_reg(wkst, module, wkst_off);
wkst = omap2_prm_read_mod_reg(module, wkst_off);
- wkst &= ~ignore_bits;
+ wkst &= wkst_mask;
c++;
}
omap2_cm_write_mod_reg(iclk, module, iclk_off);
@@ -664,10 +665,15 @@ static struct prm_ll_data omap3xxx_prm_ll_data = {
.deassert_hardreset = &omap2_prm_deassert_hardreset,
.is_hardreset_asserted = &omap2_prm_is_hardreset_asserted,
.reset_system = &omap3xxx_prm_dpll3_reset,
+ .clear_mod_irqs = &omap3xxx_prm_clear_mod_irqs,
+ .vp_check_txdone = &omap3_prm_vp_check_txdone,
+ .vp_clear_txdone = &omap3_prm_vp_clear_txdone,
};
-int __init omap3xxx_prm_init(void)
+int __init omap3xxx_prm_init(const struct omap_prcm_init_data *data)
{
+ omap2_clk_legacy_provider_init(TI_CLKM_PRM,
+ prm_base + OMAP3430_IVA2_MOD);
if (omap3_has_io_wakeup())
prm_features |= PRM_HAS_IO_WAKEUP;
diff --git a/arch/arm/mach-omap2/prm3xxx.h b/arch/arm/mach-omap2/prm3xxx.h
index ed8a3d8b739a..5f095eec339c 100644
--- a/arch/arm/mach-omap2/prm3xxx.h
+++ b/arch/arm/mach-omap2/prm3xxx.h
@@ -132,10 +132,6 @@
#ifndef __ASSEMBLER__
-/* OMAP3-specific VP functions */
-u32 omap3_prm_vp_check_txdone(u8 vp_id);
-void omap3_prm_vp_clear_txdone(u8 vp_id);
-
/*
* OMAP3 access functions for voltage controller (VC) and
* voltage proccessor (VP) in the PRM.
@@ -144,8 +140,7 @@ extern u32 omap3_prm_vcvp_read(u8 offset);
extern void omap3_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
-extern int __init omap3xxx_prm_init(void);
-int omap3xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits);
+int __init omap3xxx_prm_init(const struct omap_prcm_init_data *data);
void omap3xxx_prm_iva_idle(void);
void omap3_prm_reset_modem(void);
int omap3xxx_prm_clear_global_cold_reset(void);
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index d6d6bc39e05c..4541700f743a 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -138,7 +138,7 @@ static struct omap4_vp omap4_vp[] = {
},
};
-u32 omap4_prm_vp_check_txdone(u8 vp_id)
+static u32 omap4_prm_vp_check_txdone(u8 vp_id)
{
struct omap4_vp *vp = &omap4_vp[vp_id];
u32 irqstatus;
@@ -149,7 +149,7 @@ u32 omap4_prm_vp_check_txdone(u8 vp_id)
return irqstatus & vp->tranxdone_status;
}
-void omap4_prm_vp_clear_txdone(u8 vp_id)
+static void omap4_prm_vp_clear_txdone(u8 vp_id)
{
struct omap4_vp *vp = &omap4_vp[vp_id];
@@ -699,29 +699,31 @@ static struct prm_ll_data omap44xx_prm_ll_data = {
.deassert_hardreset = omap4_prminst_deassert_hardreset,
.is_hardreset_asserted = omap4_prminst_is_hardreset_asserted,
.reset_system = omap4_prminst_global_warm_sw_reset,
+ .vp_check_txdone = omap4_prm_vp_check_txdone,
+ .vp_clear_txdone = omap4_prm_vp_clear_txdone,
};
-int __init omap44xx_prm_init(void)
+static const struct omap_prcm_init_data *prm_init_data;
+
+int __init omap44xx_prm_init(const struct omap_prcm_init_data *data)
{
- if (cpu_is_omap44xx() || soc_is_omap54xx() || soc_is_dra7xx())
+ omap_prm_base_init();
+
+ prm_init_data = data;
+
+ if (data->flags & PRM_HAS_IO_WAKEUP)
prm_features |= PRM_HAS_IO_WAKEUP;
- if (!soc_is_dra7xx())
+ if (data->flags & PRM_HAS_VOLTAGE)
prm_features |= PRM_HAS_VOLTAGE;
+ omap4_prminst_set_prm_dev_inst(data->device_inst_offset);
+
return prm_register(&omap44xx_prm_ll_data);
}
-static const struct of_device_id omap_prm_dt_match_table[] = {
- { .compatible = "ti,omap4-prm" },
- { .compatible = "ti,omap5-prm" },
- { .compatible = "ti,dra7-prm" },
- { }
-};
-
static int omap44xx_prm_late_init(void)
{
- struct device_node *np;
int irq_num;
if (!(prm_features & PRM_HAS_IO_WAKEUP))
@@ -731,31 +733,23 @@ static int omap44xx_prm_late_init(void)
if (!of_have_populated_dt())
return 0;
- np = of_find_matching_node(NULL, omap_prm_dt_match_table);
-
- if (!np) {
- /* Default loaded up with OMAP4 values */
- if (!cpu_is_omap44xx())
- return 0;
- } else {
- irq_num = of_irq_get(np, 0);
- /*
- * Already have OMAP4 IRQ num. For all other platforms, we need
- * IRQ numbers from DT
- */
- if (irq_num < 0 && !cpu_is_omap44xx()) {
- if (irq_num == -EPROBE_DEFER)
- return irq_num;
-
- /* Have nothing to do */
- return 0;
- }
-
- /* Once OMAP4 DT is filled as well */
- if (irq_num >= 0) {
- omap4_prcm_irq_setup.irq = irq_num;
- omap4_prcm_irq_setup.xlate_irq = NULL;
- }
+ irq_num = of_irq_get(prm_init_data->np, 0);
+ /*
+ * Already have OMAP4 IRQ num. For all other platforms, we need
+ * IRQ numbers from DT
+ */
+ if (irq_num < 0 && !(prm_init_data->flags & PRM_IRQ_DEFAULT)) {
+ if (irq_num == -EPROBE_DEFER)
+ return irq_num;
+
+ /* Have nothing to do */
+ return 0;
+ }
+
+ /* Once OMAP4 DT is filled as well */
+ if (irq_num >= 0) {
+ omap4_prcm_irq_setup.irq = irq_num;
+ omap4_prcm_irq_setup.xlate_irq = NULL;
}
omap44xx_prm_enable_io_wakeup();
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index 7db2422faa16..efd6035d0871 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -26,7 +26,6 @@
#define __ARCH_ARM_MACH_OMAP2_PRM44XX_H
#include "prm44xx_54xx.h"
-#include "prcm-common.h"
#include "prm.h"
#define OMAP4430_PRM_BASE 0x4a306000
diff --git a/arch/arm/mach-omap2/prm44xx_54xx.h b/arch/arm/mach-omap2/prm44xx_54xx.h
index 714329565b90..3f139ebc8398 100644
--- a/arch/arm/mach-omap2/prm44xx_54xx.h
+++ b/arch/arm/mach-omap2/prm44xx_54xx.h
@@ -23,13 +23,11 @@
#ifndef __ARCH_ARM_MACH_OMAP2_PRM44XX_54XX_H
#define __ARCH_ARM_MACH_OMAP2_PRM44XX_54XX_H
+#include "prcm-common.h"
+
/* Function prototypes */
#ifndef __ASSEMBLER__
-/* OMAP4/OMAP5-specific VP functions */
-u32 omap4_prm_vp_check_txdone(u8 vp_id);
-void omap4_prm_vp_clear_txdone(u8 vp_id);
-
/*
* OMAP4/OMAP5 access functions for voltage controller (VC) and
* voltage proccessor (VP) in the PRM.
@@ -38,7 +36,7 @@ extern u32 omap4_prm_vcvp_read(u8 offset);
extern void omap4_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
-extern int __init omap44xx_prm_init(void);
+int __init omap44xx_prm_init(const struct omap_prcm_init_data *data);
#endif
diff --git a/arch/arm/mach-omap2/prm54xx.h b/arch/arm/mach-omap2/prm54xx.h
index e4411010309c..1eb22ff087dc 100644
--- a/arch/arm/mach-omap2/prm54xx.h
+++ b/arch/arm/mach-omap2/prm54xx.h
@@ -22,7 +22,6 @@
#define __ARCH_ARM_MACH_OMAP2_PRM54XX_H
#include "prm44xx_54xx.h"
-#include "prcm-common.h"
#include "prm.h"
#define OMAP54XX_PRM_BASE 0x4ae06000
diff --git a/arch/arm/mach-omap2/prm7xx.h b/arch/arm/mach-omap2/prm7xx.h
index 4bb50fbf29be..cc1e6a2b97f6 100644
--- a/arch/arm/mach-omap2/prm7xx.h
+++ b/arch/arm/mach-omap2/prm7xx.h
@@ -22,8 +22,8 @@
#ifndef __ARCH_ARM_MACH_OMAP2_PRM7XX_H
#define __ARCH_ARM_MACH_OMAP2_PRM7XX_H
-#include "prm44xx_54xx.h"
#include "prcm-common.h"
+#include "prm44xx_54xx.h"
#include "prm.h"
#define DRA7XX_PRM_BASE 0x4ae06000
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index bfaa7ba595cc..7add7994dbfc 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -32,7 +32,11 @@
#include "prm2xxx_3xxx.h"
#include "prm2xxx.h"
#include "prm3xxx.h"
+#include "prm33xx.h"
#include "prm44xx.h"
+#include "prm54xx.h"
+#include "prm7xx.h"
+#include "prcm43xx.h"
#include "common.h"
#include "clock.h"
#include "cm.h"
@@ -534,6 +538,61 @@ void omap_prm_reset_system(void)
}
/**
+ * omap_prm_clear_mod_irqs - clear wake-up events from PRCM interrupt
+ * @module: PRM module to clear wakeups from
+ * @regs: register to clear
+ * @wkst_mask: wkst bits to clear
+ *
+ * Clears any wakeup events for the module and register set defined.
+ * Uses SoC specific implementation to do the actual wakeup status
+ * clearing.
+ */
+int omap_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask)
+{
+ if (!prm_ll_data->clear_mod_irqs) {
+ WARN_ONCE(1, "prm: %s: no mapping function defined\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return prm_ll_data->clear_mod_irqs(module, regs, wkst_mask);
+}
+
+/**
+ * omap_prm_vp_check_txdone - check voltage processor TX done status
+ *
+ * Checks if voltage processor transmission has been completed.
+ * Returns non-zero if a transmission has completed, 0 otherwise.
+ */
+u32 omap_prm_vp_check_txdone(u8 vp_id)
+{
+ if (!prm_ll_data->vp_check_txdone) {
+ WARN_ONCE(1, "prm: %s: no mapping function defined\n",
+ __func__);
+ return 0;
+ }
+
+ return prm_ll_data->vp_check_txdone(vp_id);
+}
+
+/**
+ * omap_prm_vp_clear_txdone - clears voltage processor TX done status
+ *
+ * Clears the status bit for completed voltage processor transmission
+ * returned by prm_vp_check_txdone.
+ */
+void omap_prm_vp_clear_txdone(u8 vp_id)
+{
+ if (!prm_ll_data->vp_clear_txdone) {
+ WARN_ONCE(1, "prm: %s: no mapping function defined\n",
+ __func__);
+ return;
+ }
+
+ prm_ll_data->vp_clear_txdone(vp_id);
+}
+
+/**
* prm_register - register per-SoC low-level data with the PRM
* @pld: low-level per-SoC OMAP PRM data & function pointers to register
*
@@ -578,78 +637,175 @@ int prm_unregister(struct prm_ll_data *pld)
return 0;
}
-static const struct of_device_id omap_prcm_dt_match_table[] = {
- { .compatible = "ti,am3-prcm" },
- { .compatible = "ti,am3-scrm" },
- { .compatible = "ti,am4-prcm" },
- { .compatible = "ti,am4-scrm" },
- { .compatible = "ti,dm814-prcm" },
- { .compatible = "ti,dm814-scrm" },
- { .compatible = "ti,dm816-prcm" },
- { .compatible = "ti,dm816-scrm" },
- { .compatible = "ti,omap2-prcm" },
- { .compatible = "ti,omap2-scrm" },
- { .compatible = "ti,omap3-prm" },
- { .compatible = "ti,omap3-cm" },
- { .compatible = "ti,omap3-scrm" },
- { .compatible = "ti,omap4-cm1" },
- { .compatible = "ti,omap4-prm" },
- { .compatible = "ti,omap4-cm2" },
- { .compatible = "ti,omap4-scrm" },
- { .compatible = "ti,omap5-prm" },
- { .compatible = "ti,omap5-cm-core-aon" },
- { .compatible = "ti,omap5-scrm" },
- { .compatible = "ti,omap5-cm-core" },
- { .compatible = "ti,dra7-prm" },
- { .compatible = "ti,dra7-cm-core-aon" },
- { .compatible = "ti,dra7-cm-core" },
- { }
+#ifdef CONFIG_ARCH_OMAP2
+static struct omap_prcm_init_data omap2_prm_data __initdata = {
+ .index = TI_CLKM_PRM,
+ .init = omap2xxx_prm_init,
};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static struct omap_prcm_init_data omap3_prm_data __initdata = {
+ .index = TI_CLKM_PRM,
+ .init = omap3xxx_prm_init,
-static struct clk_hw_omap memmap_dummy_ck = {
- .flags = MEMMAP_ADDRESSING,
+ /*
+ * IVA2 offset is a negative value, must offset the prm_base
+ * address by this to get it to positive
+ */
+ .offset = -OMAP3430_IVA2_MOD,
};
+#endif
-static u32 prm_clk_readl(void __iomem *reg)
-{
- return omap2_clk_readl(&memmap_dummy_ck, reg);
-}
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_TI81XX)
+static struct omap_prcm_init_data am3_prm_data __initdata = {
+ .index = TI_CLKM_PRM,
+ .init = am33xx_prm_init,
+};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP4
+static struct omap_prcm_init_data omap4_prm_data __initdata = {
+ .index = TI_CLKM_PRM,
+ .init = omap44xx_prm_init,
+ .device_inst_offset = OMAP4430_PRM_DEVICE_INST,
+ .flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE | PRM_IRQ_DEFAULT,
+};
+#endif
+
+#ifdef CONFIG_SOC_OMAP5
+static struct omap_prcm_init_data omap5_prm_data __initdata = {
+ .index = TI_CLKM_PRM,
+ .init = omap44xx_prm_init,
+ .device_inst_offset = OMAP54XX_PRM_DEVICE_INST,
+ .flags = PRM_HAS_IO_WAKEUP | PRM_HAS_VOLTAGE,
+};
+#endif
+
+#ifdef CONFIG_SOC_DRA7XX
+static struct omap_prcm_init_data dra7_prm_data __initdata = {
+ .index = TI_CLKM_PRM,
+ .init = omap44xx_prm_init,
+ .device_inst_offset = DRA7XX_PRM_DEVICE_INST,
+ .flags = PRM_HAS_IO_WAKEUP,
+};
+#endif
-static void prm_clk_writel(u32 val, void __iomem *reg)
-{
- omap2_clk_writel(val, &memmap_dummy_ck, reg);
-}
+#ifdef CONFIG_SOC_AM43XX
+static struct omap_prcm_init_data am4_prm_data __initdata = {
+ .index = TI_CLKM_PRM,
+ .init = omap44xx_prm_init,
+ .device_inst_offset = AM43XX_PRM_DEVICE_INST,
+};
+#endif
-static struct ti_clk_ll_ops omap_clk_ll_ops = {
- .clk_readl = prm_clk_readl,
- .clk_writel = prm_clk_writel,
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
+static struct omap_prcm_init_data scrm_data __initdata = {
+ .index = TI_CLKM_SCRM,
+};
+#endif
+
+static const struct of_device_id omap_prcm_dt_match_table[] __initconst = {
+#ifdef CONFIG_SOC_AM33XX
+ { .compatible = "ti,am3-prcm", .data = &am3_prm_data },
+#endif
+#ifdef CONFIG_SOC_AM43XX
+ { .compatible = "ti,am4-prcm", .data = &am4_prm_data },
+#endif
+#ifdef CONFIG_SOC_TI81XX
+ { .compatible = "ti,dm814-prcm", .data = &am3_prm_data },
+ { .compatible = "ti,dm816-prcm", .data = &am3_prm_data },
+#endif
+#ifdef CONFIG_ARCH_OMAP2
+ { .compatible = "ti,omap2-prcm", .data = &omap2_prm_data },
+#endif
+#ifdef CONFIG_ARCH_OMAP3
+ { .compatible = "ti,omap3-prm", .data = &omap3_prm_data },
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ { .compatible = "ti,omap4-prm", .data = &omap4_prm_data },
+ { .compatible = "ti,omap4-scrm", .data = &scrm_data },
+#endif
+#ifdef CONFIG_SOC_OMAP5
+ { .compatible = "ti,omap5-prm", .data = &omap5_prm_data },
+ { .compatible = "ti,omap5-scrm", .data = &scrm_data },
+#endif
+#ifdef CONFIG_SOC_DRA7XX
+ { .compatible = "ti,dra7-prm", .data = &dra7_prm_data },
+#endif
+ { }
};
-int __init of_prcm_init(void)
+/**
+ * omap2_prm_base_init - initialize iomappings for the PRM driver
+ *
+ * Detects and initializes the iomappings for the PRM driver, based
+ * on the DT data. Returns 0 in success, negative error value
+ * otherwise.
+ */
+int __init omap2_prm_base_init(void)
{
struct device_node *np;
+ const struct of_device_id *match;
+ struct omap_prcm_init_data *data;
void __iomem *mem;
- int memmap_index = 0;
- ti_clk_ll_ops = &omap_clk_ll_ops;
+ for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) {
+ data = (struct omap_prcm_init_data *)match->data;
- for_each_matching_node(np, omap_prcm_dt_match_table) {
mem = of_iomap(np, 0);
- clk_memmaps[memmap_index] = mem;
- ti_dt_clk_init_provider(np, memmap_index);
- memmap_index++;
+ if (!mem)
+ return -ENOMEM;
+
+ if (data->index == TI_CLKM_PRM)
+ prm_base = mem + data->offset;
+
+ data->mem = mem;
+
+ data->np = np;
+
+ if (data->init)
+ data->init(data);
}
return 0;
}
-void __init omap3_prcm_legacy_iomaps_init(void)
+int __init omap2_prcm_base_init(void)
{
- ti_clk_ll_ops = &omap_clk_ll_ops;
+ int ret;
- clk_memmaps[TI_CLKM_CM] = cm_base + OMAP3430_IVA2_MOD;
- clk_memmaps[TI_CLKM_PRM] = prm_base + OMAP3430_IVA2_MOD;
- clk_memmaps[TI_CLKM_SCRM] = omap_ctrl_base_get();
+ ret = omap2_prm_base_init();
+ if (ret)
+ return ret;
+
+ return omap2_cm_base_init();
+}
+
+/**
+ * omap_prcm_init - low level init for the PRCM drivers
+ *
+ * Initializes the low level clock infrastructure for PRCM drivers.
+ * Returns 0 in success, negative error value in failure.
+ */
+int __init omap_prcm_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ const struct omap_prcm_init_data *data;
+ int ret;
+
+ for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) {
+ data = match->data;
+
+ ret = omap2_clk_provider_init(np, data->index, NULL, data->mem);
+ if (ret)
+ return ret;
+ }
+
+ omap_cm_init();
+
+ return 0;
}
static int __init prm_late_init(void)
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index 8adf7b1a1dce..d0b15dbafa2e 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -47,22 +47,14 @@ void omap_prm_base_init(void)
s32 omap4_prmst_get_prm_dev_inst(void)
{
- if (prm_dev_inst != PRM_INSTANCE_UNKNOWN)
- return prm_dev_inst;
-
- /* This cannot be done way early at boot.. as things are not setup */
- if (cpu_is_omap44xx())
- prm_dev_inst = OMAP4430_PRM_DEVICE_INST;
- else if (soc_is_omap54xx())
- prm_dev_inst = OMAP54XX_PRM_DEVICE_INST;
- else if (soc_is_dra7xx())
- prm_dev_inst = DRA7XX_PRM_DEVICE_INST;
- else if (soc_is_am43xx())
- prm_dev_inst = AM43XX_PRM_DEVICE_INST;
-
return prm_dev_inst;
}
+void omap4_prminst_set_prm_dev_inst(s32 dev_inst)
+{
+ prm_dev_inst = dev_inst;
+}
+
/* Read a register in a PRM instance */
u32 omap4_prminst_read_inst_reg(u8 part, s16 inst, u16 idx)
{
@@ -95,12 +87,6 @@ u32 omap4_prminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst,
return v;
}
-/*
- * Address offset (in bytes) between the reset control and the reset
- * status registers: 4 bytes on OMAP4
- */
-#define OMAP4_RST_CTRL_ST_OFFSET 4
-
/**
* omap4_prminst_is_hardreset_asserted - read the HW reset line state of
* submodules contained in the hwmod module
@@ -149,11 +135,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst,
* omap4_prminst_deassert_hardreset - deassert a submodule hardreset line and
* wait
* @shift: register bit shift corresponding to the reset line to deassert
- * @st_shift: status bit offset, not used for OMAP4+
+ * @st_shift: status bit offset corresponding to the reset line
* @part: PRM partition
* @inst: PRM instance offset
* @rstctrl_offs: reset register offset
- * @st_offs: reset status register offset, not used for OMAP4+
+ * @rstst_offs: reset status register offset
*
* Some IPs like dsp, ipu or iva contain processors that require an HW
* reset line to be asserted / deasserted in order to fully enable the
@@ -165,11 +151,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst,
* of reset, or -EBUSY if the submodule did not exit reset promptly.
*/
int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst,
- u16 rstctrl_offs, u16 st_offs)
+ u16 rstctrl_offs, u16 rstst_offs)
{
int c;
u32 mask = 1 << shift;
- u16 rstst_offs = rstctrl_offs + OMAP4_RST_CTRL_ST_OFFSET;
+ u32 st_mask = 1 << st_shift;
/* Check the current status to avoid de-asserting the line twice */
if (omap4_prminst_is_hardreset_asserted(shift, part, inst,
@@ -177,13 +163,13 @@ int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst,
return -EEXIST;
/* Clear the reset status by writing 1 to the status bit */
- omap4_prminst_rmw_inst_reg_bits(0xffffffff, mask, part, inst,
+ omap4_prminst_rmw_inst_reg_bits(0xffffffff, st_mask, part, inst,
rstst_offs);
/* de-assert the reset control line */
omap4_prminst_rmw_inst_reg_bits(mask, 0, part, inst, rstctrl_offs);
/* wait the status to be set */
- omap_test_timeout(omap4_prminst_is_hardreset_asserted(shift, part, inst,
- rstst_offs),
+ omap_test_timeout(omap4_prminst_is_hardreset_asserted(st_shift, part,
+ inst, rstst_offs),
MAX_MODULE_HARDRESET_WAIT, c);
return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;
diff --git a/arch/arm/mach-omap2/prminst44xx.h b/arch/arm/mach-omap2/prminst44xx.h
index fb1c9d7a2f9d..0c03d0731d7f 100644
--- a/arch/arm/mach-omap2/prminst44xx.h
+++ b/arch/arm/mach-omap2/prminst44xx.h
@@ -14,6 +14,7 @@
#define PRM_INSTANCE_UNKNOWN -1
extern s32 omap4_prmst_get_prm_dev_inst(void);
+void omap4_prminst_set_prm_dev_inst(s32 dev_inst);
/*
* In an ideal world, we would not export these low-level functions,
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index b84a0122d823..ad1bb9431e94 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -333,11 +333,9 @@ ENDPROC(omap4_cpu_resume)
#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
-#ifndef CONFIG_OMAP4_ERRATA_I688
ENTRY(omap_bus_sync)
ret lr
ENDPROC(omap_bus_sync)
-#endif
ENTRY(omap_do_wfi)
stmfd sp!, {lr}
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index cef67af9e9b8..cac46d852da1 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -298,14 +298,11 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
if (IS_ERR(src))
return PTR_ERR(src);
- if (clk_get_parent(timer->fclk) != src) {
- r = clk_set_parent(timer->fclk, src);
- if (r < 0) {
- pr_warn("%s: %s cannot set source\n", __func__,
- oh->name);
- clk_put(src);
- return r;
- }
+ r = clk_set_parent(timer->fclk, src);
+ if (r < 0) {
+ pr_warn("%s: %s cannot set source\n", __func__, oh->name);
+ clk_put(src);
+ return r;
}
clk_put(src);
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index 8333400898fb..e554d9e66a1c 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -71,7 +71,7 @@ static int tusb_set_async_mode(unsigned sysclk_ps)
gpmc_calc_timings(&t, &tusb_async, &dev_t);
- return gpmc_cs_set_timings(async_cs, &t);
+ return gpmc_cs_set_timings(async_cs, &t, &tusb_async);
}
static int tusb_set_sync_mode(unsigned sysclk_ps)
@@ -98,7 +98,7 @@ static int tusb_set_sync_mode(unsigned sysclk_ps)
gpmc_calc_timings(&t, &tusb_sync, &dev_t);
- return gpmc_cs_set_timings(sync_cs, &t);
+ return gpmc_cs_set_timings(sync_cs, &t, &tusb_sync);
}
/* tusb driver calls this when it changes the chip's clocking */
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index be9ef834fa81..076fd20d7e5a 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm)
* idle. And we can also scale voltages to zero for off-idle.
* Note that no actual voltage scaling during off-idle will
* happen unless the board specific twl4030 PMIC scripts are
- * loaded.
+ * loaded. See also omap_vc_i2c_init for comments regarding
+ * erratum i531.
*/
val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET);
if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) {
@@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
return;
}
+ /*
+ * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around
+ * erratum i531 "Extra Power Consumed When Repeated Start Operation
+ * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)".
+ * Otherwise I2C4 eventually leads into about 23mW extra power being
+ * consumed even during off idle using VMODE.
+ */
i2c_high_speed = voltdm->pmic->i2c_high_speed;
if (i2c_high_speed)
- voltdm->rmw(vc->common->i2c_cfg_hsen_mask,
+ voltdm->rmw(vc->common->i2c_cfg_clear_mask,
vc->common->i2c_cfg_hsen_mask,
vc->common->i2c_cfg_reg);
diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h
index cdbdd78e755e..89b83b7ff3ec 100644
--- a/arch/arm/mach-omap2/vc.h
+++ b/arch/arm/mach-omap2/vc.h
@@ -34,6 +34,7 @@ struct voltagedomain;
* @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register
* @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register
* @i2c_cfg_reg: I2C configuration register offset
+ * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register
* @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register
* @i2c_mcode_mask: MCODE field mask for I2C config register
*
@@ -52,6 +53,7 @@ struct omap_vc_common {
u8 cmd_ret_shift;
u8 cmd_off_shift;
u8 i2c_cfg_reg;
+ u8 i2c_cfg_clear_mask;
u8 i2c_cfg_hsen_mask;
u8 i2c_mcode_mask;
};
diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
index 75bc4aa22b3a..71d74c9172c1 100644
--- a/arch/arm/mach-omap2/vc3xxx_data.c
+++ b/arch/arm/mach-omap2/vc3xxx_data.c
@@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = {
.cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT,
.cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT,
.cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT,
+ .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK,
.i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK,
.i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET,
.i2c_mcode_mask = OMAP3430_MCODE_MASK,
diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
index 085e5d6a04fd..2abd5fa8a697 100644
--- a/arch/arm/mach-omap2/vc44xx_data.c
+++ b/arch/arm/mach-omap2/vc44xx_data.c
@@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = {
.cmd_ret_shift = OMAP4430_RET_SHIFT,
.cmd_off_shift = OMAP4430_OFF_SHIFT,
.i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET,
+ .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK,
.i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK,
.i2c_mcode_mask = OMAP4430_HSMCODE_MASK,
};
diff --git a/arch/arm/mach-omap2/vp.h b/arch/arm/mach-omap2/vp.h
index 0fdf7080e4a6..7e0829682bd0 100644
--- a/arch/arm/mach-omap2/vp.h
+++ b/arch/arm/mach-omap2/vp.h
@@ -21,15 +21,6 @@
struct voltagedomain;
-/*
- * Voltage Processor (VP) identifiers
- */
-#define OMAP3_VP_VDD_MPU_ID 0
-#define OMAP3_VP_VDD_CORE_ID 1
-#define OMAP4_VP_VDD_CORE_ID 0
-#define OMAP4_VP_VDD_IVA_ID 1
-#define OMAP4_VP_VDD_MPU_ID 2
-
/* XXX document */
#define VP_IDLE_TIMEOUT 200
#define VP_TRANXDONE_TIMEOUT 300
diff --git a/arch/arm/mach-omap2/vp3xxx_data.c b/arch/arm/mach-omap2/vp3xxx_data.c
index 1914e026245e..b0590fe6ab01 100644
--- a/arch/arm/mach-omap2/vp3xxx_data.c
+++ b/arch/arm/mach-omap2/vp3xxx_data.c
@@ -28,8 +28,8 @@
#include "prm2xxx_3xxx.h"
static const struct omap_vp_ops omap3_vp_ops = {
- .check_txdone = omap3_prm_vp_check_txdone,
- .clear_txdone = omap3_prm_vp_clear_txdone,
+ .check_txdone = omap_prm_vp_check_txdone,
+ .clear_txdone = omap_prm_vp_clear_txdone,
};
/*
diff --git a/arch/arm/mach-omap2/vp44xx_data.c b/arch/arm/mach-omap2/vp44xx_data.c
index e62f6b018beb..2448bb9a8716 100644
--- a/arch/arm/mach-omap2/vp44xx_data.c
+++ b/arch/arm/mach-omap2/vp44xx_data.c
@@ -28,8 +28,8 @@
#include "vp.h"
static const struct omap_vp_ops omap4_vp_ops = {
- .check_txdone = omap4_prm_vp_check_txdone,
- .clear_txdone = omap4_prm_vp_clear_txdone,
+ .check_txdone = omap_prm_vp_check_txdone,
+ .clear_txdone = omap_prm_vp_clear_txdone,
};
/*
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 8896e71586f5..f09683687963 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -691,4 +691,13 @@ config SHARPSL_PM_MAX1111
config PXA310_ULPI
bool
+config PXA_SYSTEMS_CPLDS
+ tristate "Motherboard cplds"
+ default ARCH_LUBBOCK || MACH_MAINSTONE
+ help
+ This driver supports the Lubbock and Mainstone multifunction chip
+ found on the pxa25x development platform system (Lubbock) and pxa27x
+ development platform system (Mainstone). This IO board supports the
+ interrupts handling, ethernet controller, flash chips, etc ...
+
endif
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index eb0bf7678a99..4087d334ecdf 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -90,4 +90,5 @@ obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o
obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o
obj-$(CONFIG_MACH_ZIPIT2) += z2.o
+obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o
obj-$(CONFIG_TOSA_BT) += tosa-bt.o
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h
index 958cd6af9384..1eecf794acd2 100644
--- a/arch/arm/mach-pxa/include/mach/lubbock.h
+++ b/arch/arm/mach-pxa/include/mach/lubbock.h
@@ -37,7 +37,9 @@
#define LUB_GP __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100)
/* Board specific IRQs */
-#define LUBBOCK_IRQ(x) (IRQ_BOARD_START + (x))
+#define LUBBOCK_NR_IRQS IRQ_BOARD_START
+
+#define LUBBOCK_IRQ(x) (LUBBOCK_NR_IRQS + (x))
#define LUBBOCK_SD_IRQ LUBBOCK_IRQ(0)
#define LUBBOCK_SA1111_IRQ LUBBOCK_IRQ(1)
#define LUBBOCK_USB_IRQ LUBBOCK_IRQ(2) /* usb connect */
@@ -47,8 +49,7 @@
#define LUBBOCK_USB_DISC_IRQ LUBBOCK_IRQ(6) /* usb disconnect */
#define LUBBOCK_LAST_IRQ LUBBOCK_IRQ(6)
-#define LUBBOCK_SA1111_IRQ_BASE (IRQ_BOARD_START + 16)
-#define LUBBOCK_NR_IRQS (IRQ_BOARD_START + 16 + 55)
+#define LUBBOCK_SA1111_IRQ_BASE (LUBBOCK_NR_IRQS + 32)
#ifndef __ASSEMBLY__
extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set);
diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/include/mach/mainstone.h
index 1bfc4e822a41..e82a7d31104e 100644
--- a/arch/arm/mach-pxa/include/mach/mainstone.h
+++ b/arch/arm/mach-pxa/include/mach/mainstone.h
@@ -120,7 +120,9 @@
#define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */
/* board specific IRQs */
-#define MAINSTONE_IRQ(x) (IRQ_BOARD_START + (x))
+#define MAINSTONE_NR_IRQS IRQ_BOARD_START
+
+#define MAINSTONE_IRQ(x) (MAINSTONE_NR_IRQS + (x))
#define MAINSTONE_MMC_IRQ MAINSTONE_IRQ(0)
#define MAINSTONE_USIM_IRQ MAINSTONE_IRQ(1)
#define MAINSTONE_USBC_IRQ MAINSTONE_IRQ(2)
@@ -136,6 +138,4 @@
#define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14)
#define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15)
-#define MAINSTONE_NR_IRQS (IRQ_BOARD_START + 16)
-
#endif
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index d8a1be619f21..4ac9ab80d24b 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -123,84 +124,6 @@ void lubbock_set_misc_wr(unsigned int mask, unsigned int set)
}
EXPORT_SYMBOL(lubbock_set_misc_wr);
-static unsigned long lubbock_irq_enabled;
-
-static void lubbock_mask_irq(struct irq_data *d)
-{
- int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
- LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq));
-}
-
-static void lubbock_unmask_irq(struct irq_data *d)
-{
- int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
- /* the irq can be acknowledged only if deasserted, so it's done here */
- LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq);
- LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
-}
-
-static struct irq_chip lubbock_irq_chip = {
- .name = "FPGA",
- .irq_ack = lubbock_mask_irq,
- .irq_mask = lubbock_mask_irq,
- .irq_unmask = lubbock_unmask_irq,
-};
-
-static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
- do {
- /* clear our parent irq */
- desc->irq_data.chip->irq_ack(&desc->irq_data);
- if (likely(pending)) {
- irq = LUBBOCK_IRQ(0) + __ffs(pending);
- generic_handle_irq(irq);
- }
- pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
- } while (pending);
-}
-
-static void __init lubbock_init_irq(void)
-{
- int irq;
-
- pxa25x_init_irq();
-
- /* setup extra lubbock irqs */
- for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) {
- irq_set_chip_and_handler(irq, &lubbock_irq_chip,
- handle_level_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
-
- irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lubbock_irq_handler);
- irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
-}
-
-#ifdef CONFIG_PM
-
-static void lubbock_irq_resume(void)
-{
- LUB_IRQ_MASK_EN = lubbock_irq_enabled;
-}
-
-static struct syscore_ops lubbock_irq_syscore_ops = {
- .resume = lubbock_irq_resume,
-};
-
-static int __init lubbock_irq_device_init(void)
-{
- if (machine_is_lubbock()) {
- register_syscore_ops(&lubbock_irq_syscore_ops);
- return 0;
- }
- return -ENODEV;
-}
-
-device_initcall(lubbock_irq_device_init);
-
-#endif
-
static int lubbock_udc_is_connected(void)
{
return (LUB_MISC_RD & (1 << 9)) == 0;
@@ -383,11 +306,38 @@ static struct platform_device lubbock_flash_device[2] = {
},
};
+static struct resource lubbock_cplds_resources[] = {
+ [0] = {
+ .start = LUBBOCK_FPGA_PHYS + 0xc0,
+ .end = LUBBOCK_FPGA_PHYS + 0xe0 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PXA_GPIO_TO_IRQ(0),
+ .end = PXA_GPIO_TO_IRQ(0),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+ },
+ [2] = {
+ .start = LUBBOCK_IRQ(0),
+ .end = LUBBOCK_IRQ(6),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device lubbock_cplds_device = {
+ .name = "pxa_cplds_irqs",
+ .id = -1,
+ .resource = &lubbock_cplds_resources[0],
+ .num_resources = 3,
+};
+
+
static struct platform_device *devices[] __initdata = {
&sa1111_device,
&smc91x_device,
&lubbock_flash_device[0],
&lubbock_flash_device[1],
+ &lubbock_cplds_device,
};
static struct pxafb_mode_info sharp_lm8v31_mode = {
@@ -648,7 +598,7 @@ MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)")
/* Maintainer: MontaVista Software Inc. */
.map_io = lubbock_map_io,
.nr_irqs = LUBBOCK_NR_IRQS,
- .init_irq = lubbock_init_irq,
+ .init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.init_time = pxa_timer_init,
.init_machine = lubbock_init,
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 78b84c0dfc79..2c0658cf6be2 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -13,6 +13,7 @@
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
@@ -122,92 +123,6 @@ static unsigned long mainstone_pin_config[] = {
GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
};
-static unsigned long mainstone_irq_enabled;
-
-static void mainstone_mask_irq(struct irq_data *d)
-{
- int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
- MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq));
-}
-
-static void mainstone_unmask_irq(struct irq_data *d)
-{
- int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
- /* the irq can be acknowledged only if deasserted, so it's done here */
- MST_INTSETCLR &= ~(1 << mainstone_irq);
- MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
-}
-
-static struct irq_chip mainstone_irq_chip = {
- .name = "FPGA",
- .irq_ack = mainstone_mask_irq,
- .irq_mask = mainstone_mask_irq,
- .irq_unmask = mainstone_unmask_irq,
-};
-
-static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled;
- do {
- /* clear useless edge notification */
- desc->irq_data.chip->irq_ack(&desc->irq_data);
- if (likely(pending)) {
- irq = MAINSTONE_IRQ(0) + __ffs(pending);
- generic_handle_irq(irq);
- }
- pending = MST_INTSETCLR & mainstone_irq_enabled;
- } while (pending);
-}
-
-static void __init mainstone_init_irq(void)
-{
- int irq;
-
- pxa27x_init_irq();
-
- /* setup extra Mainstone irqs */
- for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
- irq_set_chip_and_handler(irq, &mainstone_irq_chip,
- handle_level_irq);
- if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
- else
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
- set_irq_flags(MAINSTONE_IRQ(8), 0);
- set_irq_flags(MAINSTONE_IRQ(12), 0);
-
- MST_INTMSKENA = 0;
- MST_INTSETCLR = 0;
-
- irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler);
- irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
-}
-
-#ifdef CONFIG_PM
-
-static void mainstone_irq_resume(void)
-{
- MST_INTMSKENA = mainstone_irq_enabled;
-}
-
-static struct syscore_ops mainstone_irq_syscore_ops = {
- .resume = mainstone_irq_resume,
-};
-
-static int __init mainstone_irq_device_init(void)
-{
- if (machine_is_mainstone())
- register_syscore_ops(&mainstone_irq_syscore_ops);
-
- return 0;
-}
-
-device_initcall(mainstone_irq_device_init);
-
-#endif
-
-
static struct resource smc91x_resources[] = {
[0] = {
.start = (MST_ETH_PHYS + 0x300),
@@ -487,11 +402,37 @@ static struct platform_device mst_gpio_keys_device = {
},
};
+static struct resource mst_cplds_resources[] = {
+ [0] = {
+ .start = MST_FPGA_PHYS + 0xc0,
+ .end = MST_FPGA_PHYS + 0xe0 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PXA_GPIO_TO_IRQ(0),
+ .end = PXA_GPIO_TO_IRQ(0),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+ },
+ [2] = {
+ .start = MAINSTONE_IRQ(0),
+ .end = MAINSTONE_IRQ(15),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mst_cplds_device = {
+ .name = "pxa_cplds_irqs",
+ .id = -1,
+ .resource = &mst_cplds_resources[0],
+ .num_resources = 3,
+};
+
static struct platform_device *platform_devices[] __initdata = {
&smc91x_device,
&mst_flash_device[0],
&mst_flash_device[1],
&mst_gpio_keys_device,
+ &mst_cplds_device,
};
static struct pxaohci_platform_data mainstone_ohci_platform_data = {
@@ -718,7 +659,7 @@ MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
.atag_offset = 0x100, /* BLOB boot parameter setting */
.map_io = mainstone_map_io,
.nr_irqs = MAINSTONE_NR_IRQS,
- .init_irq = mainstone_init_irq,
+ .init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_time = pxa_timer_init,
.init_machine = mainstone_init,
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
new file mode 100644
index 000000000000..f1aeb54fabe3
--- /dev/null
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -0,0 +1,200 @@
+/*
+ * Intel Reference Systems cplds
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Cplds motherboard driver, supporting lubbock and mainstone SoC board.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#define FPGA_IRQ_MASK_EN 0x0
+#define FPGA_IRQ_SET_CLR 0x10
+
+#define CPLDS_NB_IRQ 32
+
+struct cplds {
+ void __iomem *base;
+ int irq;
+ unsigned int irq_mask;
+ struct gpio_desc *gpio0;
+ struct irq_domain *irqdomain;
+};
+
+static irqreturn_t cplds_irq_handler(int in_irq, void *d)
+{
+ struct cplds *fpga = d;
+ unsigned long pending;
+ unsigned int bit;
+
+ pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
+ for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
+ generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
+
+ return IRQ_HANDLED;
+}
+
+static void cplds_irq_mask_ack(struct irq_data *d)
+{
+ struct cplds *fpga = irq_data_get_irq_chip_data(d);
+ unsigned int cplds_irq = irqd_to_hwirq(d);
+ unsigned int set, bit = BIT(cplds_irq);
+
+ fpga->irq_mask &= ~bit;
+ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+ set = readl(fpga->base + FPGA_IRQ_SET_CLR);
+ writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
+}
+
+static void cplds_irq_unmask(struct irq_data *d)
+{
+ struct cplds *fpga = irq_data_get_irq_chip_data(d);
+ unsigned int cplds_irq = irqd_to_hwirq(d);
+ unsigned int bit = BIT(cplds_irq);
+
+ fpga->irq_mask |= bit;
+ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+}
+
+static struct irq_chip cplds_irq_chip = {
+ .name = "pxa_cplds",
+ .irq_mask_ack = cplds_irq_mask_ack,
+ .irq_unmask = cplds_irq_unmask,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int cplds_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct cplds *fpga = d->host_data;
+
+ irq_set_chip_and_handler(irq, &cplds_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, fpga);
+
+ return 0;
+}
+
+static const struct irq_domain_ops cplds_irq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .map = cplds_irq_domain_map,
+};
+
+static int cplds_resume(struct platform_device *pdev)
+{
+ struct cplds *fpga = platform_get_drvdata(pdev);
+
+ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+
+ return 0;
+}
+
+static int cplds_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct cplds *fpga;
+ int ret;
+ unsigned int base_irq = 0;
+ unsigned long irqflags = 0;
+
+ fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
+ if (!fpga)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res) {
+ fpga->irq = (unsigned int)res->start;
+ irqflags = res->flags;
+ }
+ if (!fpga->irq)
+ return -ENODEV;
+
+ base_irq = platform_get_irq(pdev, 1);
+ if (base_irq < 0)
+ base_irq = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fpga->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fpga->base))
+ return PTR_ERR(fpga->base);
+
+ platform_set_drvdata(pdev, fpga);
+
+ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+ writel(0, fpga->base + FPGA_IRQ_SET_CLR);
+
+ ret = devm_request_irq(&pdev->dev, fpga->irq, cplds_irq_handler,
+ irqflags, dev_name(&pdev->dev), fpga);
+ if (ret == -ENOSYS)
+ return -EPROBE_DEFER;
+
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request main irq%d: %d\n",
+ fpga->irq, ret);
+ return ret;
+ }
+
+ irq_set_irq_wake(fpga->irq, 1);
+ fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
+ CPLDS_NB_IRQ,
+ &cplds_irq_domain_ops, fpga);
+ if (!fpga->irqdomain)
+ return -ENODEV;
+
+ if (base_irq) {
+ ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0,
+ CPLDS_NB_IRQ);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n",
+ base_irq, base_irq + CPLDS_NB_IRQ);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cplds_remove(struct platform_device *pdev)
+{
+ struct cplds *fpga = platform_get_drvdata(pdev);
+
+ irq_set_chip_and_handler(fpga->irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id cplds_id_table[] = {
+ { .compatible = "intel,lubbock-cplds-irqs", },
+ { .compatible = "intel,mainstone-cplds-irqs", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cplds_id_table);
+
+static struct platform_driver cplds_driver = {
+ .driver = {
+ .name = "pxa_cplds_irqs",
+ .of_match_table = of_match_ptr(cplds_id_table),
+ },
+ .probe = cplds_probe,
+ .remove = cplds_remove,
+ .resume = cplds_resume,
+};
+
+module_platform_driver(cplds_driver);
+
+MODULE_DESCRIPTION("PXA Cplds interrupts driver");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 48003ea652b9..2256cd1e25d1 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -22,7 +22,4 @@ config ARCH_MSM8974
bool "Enable support for MSM8974"
select HAVE_ARM_ARCH_TIMER
-config QCOM_SCM
- bool
-
endif
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index 8f756ae1ae31..e324375fa919 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1,5 +1,2 @@
obj-y := board.o
obj-$(CONFIG_SMP) += platsmp.o
-obj-$(CONFIG_QCOM_SCM) += scm.o scm-boot.o
-
-CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index 09cffed4c0a4..5cde63a64b34 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -17,10 +17,10 @@
#include <linux/of_address.h>
#include <linux/smp.h>
#include <linux/io.h>
+#include <linux/qcom_scm.h>
#include <asm/smp_plat.h>
-#include "scm-boot.h"
#define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x35a0
#define SCSS_CPU1CORE_RESET 0x2d80
@@ -319,25 +319,10 @@ static int kpssv2_boot_secondary(unsigned int cpu, struct task_struct *idle)
static void __init qcom_smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu, map;
- unsigned int flags = 0;
- static const int cold_boot_flags[] = {
- 0,
- SCM_FLAG_COLDBOOT_CPU1,
- SCM_FLAG_COLDBOOT_CPU2,
- SCM_FLAG_COLDBOOT_CPU3,
- };
-
- for_each_present_cpu(cpu) {
- map = cpu_logical_map(cpu);
- if (WARN_ON(map >= ARRAY_SIZE(cold_boot_flags))) {
- set_cpu_present(cpu, false);
- continue;
- }
- flags |= cold_boot_flags[map];
- }
+ int cpu;
- if (scm_set_boot_addr(virt_to_phys(secondary_startup_arm), flags)) {
+ if (qcom_scm_set_cold_boot_addr(secondary_startup_arm,
+ cpu_present_mask)) {
for_each_present_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
diff --git a/arch/arm/mach-qcom/scm-boot.c b/arch/arm/mach-qcom/scm-boot.c
deleted file mode 100644
index e8ff7beb6218..000000000000
--- a/arch/arm/mach-qcom/scm-boot.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "scm.h"
-#include "scm-boot.h"
-
-/*
- * Set the cold/warm boot address for one of the CPU cores.
- */
-int scm_set_boot_addr(u32 addr, int flags)
-{
- struct {
- __le32 flags;
- __le32 addr;
- } cmd;
-
- cmd.addr = cpu_to_le32(addr);
- cmd.flags = cpu_to_le32(flags);
- return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR,
- &cmd, sizeof(cmd), NULL, 0);
-}
-EXPORT_SYMBOL(scm_set_boot_addr);
diff --git a/arch/arm/mach-qcom/scm.c b/arch/arm/mach-qcom/scm.c
deleted file mode 100644
index 1d9cf18c7091..000000000000
--- a/arch/arm/mach-qcom/scm.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-
-#include <asm/outercache.h>
-#include <asm/cacheflush.h>
-
-#include "scm.h"
-
-#define SCM_ENOMEM -5
-#define SCM_EOPNOTSUPP -4
-#define SCM_EINVAL_ADDR -3
-#define SCM_EINVAL_ARG -2
-#define SCM_ERROR -1
-#define SCM_INTERRUPTED 1
-
-static DEFINE_MUTEX(scm_lock);
-
-/**
- * struct scm_command - one SCM command buffer
- * @len: total available memory for command and response
- * @buf_offset: start of command buffer
- * @resp_hdr_offset: start of response buffer
- * @id: command to be executed
- * @buf: buffer returned from scm_get_command_buffer()
- *
- * An SCM command is laid out in memory as follows:
- *
- * ------------------- <--- struct scm_command
- * | command header |
- * ------------------- <--- scm_get_command_buffer()
- * | command buffer |
- * ------------------- <--- struct scm_response and
- * | response header | scm_command_to_response()
- * ------------------- <--- scm_get_response_buffer()
- * | response buffer |
- * -------------------
- *
- * There can be arbitrary padding between the headers and buffers so
- * you should always use the appropriate scm_get_*_buffer() routines
- * to access the buffers in a safe manner.
- */
-struct scm_command {
- __le32 len;
- __le32 buf_offset;
- __le32 resp_hdr_offset;
- __le32 id;
- __le32 buf[0];
-};
-
-/**
- * struct scm_response - one SCM response buffer
- * @len: total available memory for response
- * @buf_offset: start of response data relative to start of scm_response
- * @is_complete: indicates if the command has finished processing
- */
-struct scm_response {
- __le32 len;
- __le32 buf_offset;
- __le32 is_complete;
-};
-
-/**
- * alloc_scm_command() - Allocate an SCM command
- * @cmd_size: size of the command buffer
- * @resp_size: size of the response buffer
- *
- * Allocate an SCM command, including enough room for the command
- * and response headers as well as the command and response buffers.
- *
- * Returns a valid &scm_command on success or %NULL if the allocation fails.
- */
-static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
-{
- struct scm_command *cmd;
- size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
- resp_size;
- u32 offset;
-
- cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
- if (cmd) {
- cmd->len = cpu_to_le32(len);
- offset = offsetof(struct scm_command, buf);
- cmd->buf_offset = cpu_to_le32(offset);
- cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size);
- }
- return cmd;
-}
-
-/**
- * free_scm_command() - Free an SCM command
- * @cmd: command to free
- *
- * Free an SCM command.
- */
-static inline void free_scm_command(struct scm_command *cmd)
-{
- kfree(cmd);
-}
-
-/**
- * scm_command_to_response() - Get a pointer to a scm_response
- * @cmd: command
- *
- * Returns a pointer to a response for a command.
- */
-static inline struct scm_response *scm_command_to_response(
- const struct scm_command *cmd)
-{
- return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
-}
-
-/**
- * scm_get_command_buffer() - Get a pointer to a command buffer
- * @cmd: command
- *
- * Returns a pointer to the command buffer of a command.
- */
-static inline void *scm_get_command_buffer(const struct scm_command *cmd)
-{
- return (void *)cmd->buf;
-}
-
-/**
- * scm_get_response_buffer() - Get a pointer to a response buffer
- * @rsp: response
- *
- * Returns a pointer to a response buffer of a response.
- */
-static inline void *scm_get_response_buffer(const struct scm_response *rsp)
-{
- return (void *)rsp + le32_to_cpu(rsp->buf_offset);
-}
-
-static int scm_remap_error(int err)
-{
- pr_err("scm_call failed with error code %d\n", err);
- switch (err) {
- case SCM_ERROR:
- return -EIO;
- case SCM_EINVAL_ADDR:
- case SCM_EINVAL_ARG:
- return -EINVAL;
- case SCM_EOPNOTSUPP:
- return -EOPNOTSUPP;
- case SCM_ENOMEM:
- return -ENOMEM;
- }
- return -EINVAL;
-}
-
-static u32 smc(u32 cmd_addr)
-{
- int context_id;
- register u32 r0 asm("r0") = 1;
- register u32 r1 asm("r1") = (u32)&context_id;
- register u32 r2 asm("r2") = cmd_addr;
- do {
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r0")
- __asmeq("%2", "r1")
- __asmeq("%3", "r2")
-#ifdef REQUIRES_SEC
- ".arch_extension sec\n"
-#endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0)
- : "r" (r0), "r" (r1), "r" (r2)
- : "r3");
- } while (r0 == SCM_INTERRUPTED);
-
- return r0;
-}
-
-static int __scm_call(const struct scm_command *cmd)
-{
- int ret;
- u32 cmd_addr = virt_to_phys(cmd);
-
- /*
- * Flush the command buffer so that the secure world sees
- * the correct data.
- */
- __cpuc_flush_dcache_area((void *)cmd, cmd->len);
- outer_flush_range(cmd_addr, cmd_addr + cmd->len);
-
- ret = smc(cmd_addr);
- if (ret < 0)
- ret = scm_remap_error(ret);
-
- return ret;
-}
-
-static void scm_inv_range(unsigned long start, unsigned long end)
-{
- u32 cacheline_size, ctr;
-
- asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
- cacheline_size = 4 << ((ctr >> 16) & 0xf);
-
- start = round_down(start, cacheline_size);
- end = round_up(end, cacheline_size);
- outer_inv_range(start, end);
- while (start < end) {
- asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
- : "memory");
- start += cacheline_size;
- }
- dsb();
- isb();
-}
-
-/**
- * scm_call() - Send an SCM command
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @cmd_buf: command buffer
- * @cmd_len: length of the command buffer
- * @resp_buf: response buffer
- * @resp_len: length of the response buffer
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- *
- * A note on cache maintenance:
- * Note that any buffers that are expected to be accessed by the secure world
- * must be flushed before invoking scm_call and invalidated in the cache
- * immediately after scm_call returns. Cache maintenance on the command and
- * response buffers is taken care of by scm_call; however, callers are
- * responsible for any other cached buffers passed over to the secure world.
- */
-int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
- void *resp_buf, size_t resp_len)
-{
- int ret;
- struct scm_command *cmd;
- struct scm_response *rsp;
- unsigned long start, end;
-
- cmd = alloc_scm_command(cmd_len, resp_len);
- if (!cmd)
- return -ENOMEM;
-
- cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
- if (cmd_buf)
- memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
-
- mutex_lock(&scm_lock);
- ret = __scm_call(cmd);
- mutex_unlock(&scm_lock);
- if (ret)
- goto out;
-
- rsp = scm_command_to_response(cmd);
- start = (unsigned long)rsp;
-
- do {
- scm_inv_range(start, start + sizeof(*rsp));
- } while (!rsp->is_complete);
-
- end = (unsigned long)scm_get_response_buffer(rsp) + resp_len;
- scm_inv_range(start, end);
-
- if (resp_buf)
- memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
-out:
- free_scm_command(cmd);
- return ret;
-}
-EXPORT_SYMBOL(scm_call);
-
-u32 scm_get_version(void)
-{
- int context_id;
- static u32 version = -1;
- register u32 r0 asm("r0");
- register u32 r1 asm("r1");
-
- if (version != -1)
- return version;
-
- mutex_lock(&scm_lock);
-
- r0 = 0x1 << 8;
- r1 = (u32)&context_id;
- do {
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r1")
- __asmeq("%2", "r0")
- __asmeq("%3", "r1")
-#ifdef REQUIRES_SEC
- ".arch_extension sec\n"
-#endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0), "=r" (r1)
- : "r" (r0), "r" (r1)
- : "r2", "r3");
- } while (r0 == SCM_INTERRUPTED);
-
- version = r1;
- mutex_unlock(&scm_lock);
-
- return version;
-}
-EXPORT_SYMBOL(scm_get_version);
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index f26fcdca2445..5b4ca3c3c879 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -55,7 +55,7 @@ static int pmu_power_domain_is_on(int pd)
return !(val & BIT(pd));
}
-struct reset_control *rockchip_get_core_reset(int cpu)
+static struct reset_control *rockchip_get_core_reset(int cpu)
{
struct device *dev = get_cpu_device(cpu);
struct device_node *np;
@@ -201,7 +201,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
return 0;
}
-static struct regmap_config rockchip_pmu_regmap_config = {
+static const struct regmap_config rockchip_pmu_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c
index 50cb781aaa36..b0dcbe28f78c 100644
--- a/arch/arm/mach-rockchip/pm.c
+++ b/arch/arm/mach-rockchip/pm.c
@@ -75,9 +75,20 @@ static void rk3288_slp_mode_set(int level)
regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON,
&rk3288_pmu_pwr_mode_con);
- /* set bit 8 so that system will resume to FAST_BOOT_ADDR */
+ /*
+ * SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR
+ * PCLK_WDT_GATE - disable WDT during suspend.
+ */
regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0,
- SGRF_FAST_BOOT_EN | SGRF_FAST_BOOT_EN_WRITE);
+ SGRF_PCLK_WDT_GATE | SGRF_FAST_BOOT_EN
+ | SGRF_PCLK_WDT_GATE_WRITE | SGRF_FAST_BOOT_EN_WRITE);
+
+ /*
+ * The dapswjdp can not auto reset before resume, that cause it may
+ * access some illegal address during resume. Let's disable it before
+ * suspend, and the MASKROM will enable it back.
+ */
+ regmap_write(sgrf_regmap, RK3288_SGRF_CPU_CON0, SGRF_DAPDEVICEEN_WRITE);
/* booting address of resuming system is from this register value */
regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR,
@@ -122,7 +133,8 @@ static void rk3288_slp_mode_set_resume(void)
rk3288_pmu_pwr_mode_con);
regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0,
- rk3288_sgrf_soc_con0 | SGRF_FAST_BOOT_EN_WRITE);
+ rk3288_sgrf_soc_con0 | SGRF_PCLK_WDT_GATE_WRITE
+ | SGRF_FAST_BOOT_EN_WRITE);
}
static int rockchip_lpmode_enter(unsigned long arg)
@@ -209,6 +221,9 @@ static int rk3288_suspend_init(struct device_node *np)
memcpy(rk3288_bootram_base, rockchip_slp_cpu_resume,
rk3288_bootram_sz);
+ regmap_write(pmu_regmap, RK3288_PMU_OSC_CNT, OSC_STABL_CNT_THRESH);
+ regmap_write(pmu_regmap, RK3288_PMU_STABL_CNT, PMU_STABL_CNT_THRESH);
+
return 0;
}
diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h
index 7c889c04604b..3e8d39c0c3d5 100644
--- a/arch/arm/mach-rockchip/pm.h
+++ b/arch/arm/mach-rockchip/pm.h
@@ -50,9 +50,15 @@ static inline void rockchip_suspend_init(void)
#define RK3288_SGRF_SOC_CON0 (0x0000)
#define RK3288_SGRF_FAST_BOOT_ADDR (0x0120)
+#define SGRF_PCLK_WDT_GATE BIT(6)
+#define SGRF_PCLK_WDT_GATE_WRITE BIT(22)
#define SGRF_FAST_BOOT_EN BIT(8)
#define SGRF_FAST_BOOT_EN_WRITE BIT(24)
+#define RK3288_SGRF_CPU_CON0 (0x40)
+#define SGRF_DAPDEVICEEN BIT(0)
+#define SGRF_DAPDEVICEEN_WRITE BIT(16)
+
#define RK3288_CRU_MODE_CON 0x50
#define RK3288_CRU_SEL0_CON 0x60
#define RK3288_CRU_SEL1_CON 0x64
@@ -63,6 +69,10 @@ static inline void rockchip_suspend_init(void)
/* PMU_WAKEUP_CFG1 bits */
#define PMU_ARMINT_WAKEUP_EN BIT(0)
+/* wait 30ms for OSC stable and 30ms for pmic stable */
+#define OSC_STABL_CNT_THRESH (32 * 30)
+#define PMU_STABL_CNT_THRESH (32 * 30)
+
enum rk3288_pwr_mode_con {
PMU_PWR_MODE_EN = 0,
PMU_CLK_CORE_SRC_GATE_EN,
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index d360ec044b66..b6cf3b449428 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -30,11 +30,30 @@
#include "pm.h"
#define RK3288_GRF_SOC_CON0 0x244
+#define RK3288_TIMER6_7_PHYS 0xff810000
static void __init rockchip_timer_init(void)
{
if (of_machine_is_compatible("rockchip,rk3288")) {
struct regmap *grf;
+ void __iomem *reg_base;
+
+ /*
+ * Most/all uboot versions for rk3288 don't enable timer7
+ * which is needed for the architected timer to work.
+ * So make sure it is running during early boot.
+ */
+ reg_base = ioremap(RK3288_TIMER6_7_PHYS, SZ_16K);
+ if (reg_base) {
+ writel(0, reg_base + 0x30);
+ writel(0xffffffff, reg_base + 0x20);
+ writel(0xffffffff, reg_base + 0x24);
+ writel(1, reg_base + 0x30);
+ dsb();
+ iounmap(reg_base);
+ } else {
+ pr_err("rockchip: could not map timer7 registers\n");
+ }
/*
* Disable auto jtag/sdmmc switching that causes issues
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 79c49ff77f6e..23bec3a85b22 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -39,14 +39,14 @@ config CPU_S3C2412
bool "SAMSUNG S3C2412"
select CPU_ARM926T
select S3C2412_COMMON_CLK
- select S3C2412_PM if PM
+ select S3C2412_PM if PM_SLEEP
help
Support for the S3C2412 and S3C2413 SoCs from the S3C24XX line
config CPU_S3C2416
bool "SAMSUNG S3C2416/S3C2450"
select CPU_ARM926T
- select S3C2416_PM if PM
+ select S3C2416_PM if PM_SLEEP
select S3C2443_COMMON_CLK
help
Support for the S3C2416 SoC from the S3C24XX line
@@ -55,7 +55,7 @@ config CPU_S3C2440
bool "SAMSUNG S3C2440"
select CPU_ARM920T
select S3C2410_COMMON_CLK
- select S3C2410_PM if PM
+ select S3C2410_PM if PM_SLEEP
help
Support for S3C2440 Samsung Mobile CPU based systems.
@@ -63,7 +63,7 @@ config CPU_S3C2442
bool "SAMSUNG S3C2442"
select CPU_ARM920T
select S3C2410_COMMON_CLK
- select S3C2410_PM if PM
+ select S3C2410_PM if PM_SLEEP
help
Support for S3C2442 Samsung Mobile CPU based systems.
@@ -228,11 +228,6 @@ config H1940BT
This is a simple driver that is able to control
the state of built in bluetooth chip on h1940.
-config PM_H1940
- bool
- help
- Internal node for H1940 and related PM
-
config MACH_N30
bool "Acer N30 family"
select S3C_DEV_NAND
@@ -362,6 +357,7 @@ if CPU_S3C2416
config S3C2416_PM
bool
select S3C2412_PM_SLEEP
+ select SAMSUNG_WAKEMASK
help
Internal config node to apply S3C2416 power management
@@ -584,6 +580,11 @@ config MACH_SMDK2443
endif # CPU_S3C2443
+config PM_H1940
+ bool
+ help
+ Internal node for H1940 and related PM
+
endmenu # SAMSUNG S3C24XX SoCs Support
endif # ARCH_S3C24XX
diff --git a/arch/arm/mach-s3c24xx/Makefile b/arch/arm/mach-s3c24xx/Makefile
index b40a22fe082a..05920c8a5764 100644
--- a/arch/arm/mach-s3c24xx/Makefile
+++ b/arch/arm/mach-s3c24xx/Makefile
@@ -32,7 +32,8 @@ obj-$(CONFIG_CPU_S3C2443) += s3c2443.o
# PM
-obj-$(CONFIG_PM) += pm.o irq-pm.o sleep.o
+obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_PM_SLEEP) += irq-pm.o sleep.o
# common code
diff --git a/arch/arm/mach-s3c24xx/include/mach/pm-core.h b/arch/arm/mach-s3c24xx/include/mach/pm-core.h
index 2eef7e6f7675..69459dbbdcad 100644
--- a/arch/arm/mach-s3c24xx/include/mach/pm-core.h
+++ b/arch/arm/mach-s3c24xx/include/mach/pm-core.h
@@ -10,6 +10,11 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "regs-clock.h"
+#include "regs-irq.h"
static inline void s3c_pm_debug_init_uart(void)
{
@@ -42,8 +47,23 @@ static inline void s3c_pm_arch_stop_clocks(void)
__raw_writel(0x00, S3C2410_CLKCON); /* turn off clocks over sleep */
}
-static void s3c_pm_show_resume_irqs(int start, unsigned long which,
- unsigned long mask);
+/* s3c2410_pm_show_resume_irqs
+ *
+ * print any IRQs asserted at resume time (ie, we woke from)
+*/
+static inline void s3c_pm_show_resume_irqs(int start, unsigned long which,
+ unsigned long mask)
+{
+ int i;
+
+ which &= ~mask;
+
+ for (i = 0; i <= 31; i++) {
+ if (which & (1L<<i)) {
+ S3C_PMDBG("IRQ %d asserted at resume\n", start+i);
+ }
+ }
+}
static inline void s3c_pm_arch_show_resume_irqs(void)
{
diff --git a/arch/arm/mach-s3c24xx/pm-s3c2416.c b/arch/arm/mach-s3c24xx/pm-s3c2416.c
index 44923895f558..c0e328e37bd6 100644
--- a/arch/arm/mach-s3c24xx/pm-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/pm-s3c2416.c
@@ -23,6 +23,7 @@
#include "s3c2412-power.h"
+#ifdef CONFIG_PM_SLEEP
extern void s3c2412_sleep_enter(void);
static int s3c2416_cpu_suspend(unsigned long arg)
@@ -70,7 +71,7 @@ static __init int s3c2416_pm_init(void)
}
arch_initcall(s3c2416_pm_init);
-
+#endif
static void s3c2416_pm_resume(void)
{
diff --git a/arch/arm/mach-s3c24xx/pm.c b/arch/arm/mach-s3c24xx/pm.c
index b19256ec8d40..5d510bca0844 100644
--- a/arch/arm/mach-s3c24xx/pm.c
+++ b/arch/arm/mach-s3c24xx/pm.c
@@ -50,6 +50,7 @@
#define PFX "s3c24xx-pm: "
+#ifdef CONFIG_PM_SLEEP
static struct sleep_save core_save[] = {
/* we restore the timings here, with the proviso that the board
* brings the system up in an slower, or equal frequency setting
@@ -67,6 +68,7 @@ static struct sleep_save core_save[] = {
SAVE_ITEM(S3C2410_BANKCON4),
SAVE_ITEM(S3C2410_BANKCON5),
};
+#endif
/* s3c_pm_check_resume_pin
*
@@ -121,7 +123,7 @@ void s3c_pm_configure_extint(void)
}
}
-
+#ifdef CONFIG_PM_SLEEP
void s3c_pm_restore_core(void)
{
s3c_pm_do_restore_core(core_save, ARRAY_SIZE(core_save));
@@ -131,4 +133,4 @@ void s3c_pm_save_core(void)
{
s3c_pm_do_save(core_save, ARRAY_SIZE(core_save));
}
-
+#endif
diff --git a/arch/arm/mach-s3c24xx/s3c2410.c b/arch/arm/mach-s3c24xx/s3c2410.c
index 2a6985a4a0ff..5061d66ca10c 100644
--- a/arch/arm/mach-s3c24xx/s3c2410.c
+++ b/arch/arm/mach-s3c24xx/s3c2410.c
@@ -121,7 +121,7 @@ int __init s3c2410_init(void)
{
printk("S3C2410: Initialising architecture\n");
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&s3c2410_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
#endif
diff --git a/arch/arm/mach-s3c24xx/s3c2412.c b/arch/arm/mach-s3c24xx/s3c2412.c
index ecf2c77ab88b..64a13605cfc3 100644
--- a/arch/arm/mach-s3c24xx/s3c2412.c
+++ b/arch/arm/mach-s3c24xx/s3c2412.c
@@ -172,7 +172,7 @@ int __init s3c2412_init(void)
{
printk("S3C2412: Initialising architecture\n");
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&s3c2412_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
#endif
diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c
index bfd4da86deb8..3f8ca2a3ef17 100644
--- a/arch/arm/mach-s3c24xx/s3c2416.c
+++ b/arch/arm/mach-s3c24xx/s3c2416.c
@@ -98,7 +98,7 @@ int __init s3c2416_init(void)
s3c_adc_setname("s3c2416-adc");
s3c_rtc_setname("s3c2416-rtc");
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&s3c2416_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
register_syscore_ops(&s3c2416_irq_syscore_ops);
diff --git a/arch/arm/mach-s3c24xx/s3c2440.c b/arch/arm/mach-s3c24xx/s3c2440.c
index 03d379f1fc52..eb733555fab5 100644
--- a/arch/arm/mach-s3c24xx/s3c2440.c
+++ b/arch/arm/mach-s3c24xx/s3c2440.c
@@ -57,11 +57,11 @@ int __init s3c2440_init(void)
/* register suspend/resume handlers */
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&s3c2410_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
-#endif
register_syscore_ops(&s3c244x_pm_syscore_ops);
+#endif
/* register our system device for everything else */
diff --git a/arch/arm/mach-s3c24xx/s3c2442.c b/arch/arm/mach-s3c24xx/s3c2442.c
index 7b043349f1c8..893998ede022 100644
--- a/arch/arm/mach-s3c24xx/s3c2442.c
+++ b/arch/arm/mach-s3c24xx/s3c2442.c
@@ -60,11 +60,11 @@ int __init s3c2442_init(void)
{
printk("S3C2442: Initialising architecture\n");
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&s3c2410_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
-#endif
register_syscore_ops(&s3c244x_pm_syscore_ops);
+#endif
return device_register(&s3c2442_dev);
}
diff --git a/arch/arm/mach-s3c24xx/s3c244x.c b/arch/arm/mach-s3c24xx/s3c244x.c
index 177f97802745..b14119585dc7 100644
--- a/arch/arm/mach-s3c24xx/s3c244x.c
+++ b/arch/arm/mach-s3c24xx/s3c244x.c
@@ -108,7 +108,7 @@ static int __init s3c2442_core_init(void)
core_initcall(s3c2442_core_init);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static struct sleep_save s3c244x_sleep[] = {
SAVE_ITEM(S3C2440_DSC0),
SAVE_ITEM(S3C2440_DSC1),
@@ -127,12 +127,9 @@ static void s3c244x_resume(void)
{
s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep));
}
-#else
-#define s3c244x_suspend NULL
-#define s3c244x_resume NULL
-#endif
struct syscore_ops s3c244x_pm_syscore_ops = {
.suspend = s3c244x_suspend,
.resume = s3c244x_resume,
};
+#endif
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 26ca2427e53d..eff95e950d81 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -189,6 +189,7 @@ endchoice
config SMDK6410_WM1190_EV1
bool "Support Wolfson Microelectronics 1190-EV1 PMIC card"
depends on MACH_SMDK6410
+ depends on I2C=y
select MFD_WM8350_I2C
select REGULATOR
select REGULATOR_WM8350
@@ -203,6 +204,7 @@ config SMDK6410_WM1190_EV1
config SMDK6410_WM1192_EV1
bool "Support Wolfson Microelectronics 1192-EV1 PMIC card"
depends on MACH_SMDK6410
+ depends on I2C=y
select MFD_WM831X
select MFD_WM831X_I2C
select REGULATOR
@@ -269,8 +271,8 @@ config MACH_SMARTQ7
config MACH_WLF_CRAGG_6410
bool "Wolfson Cragganmore 6410"
+ depends on I2C=y
select CPU_S3C6410
- select I2C
select LEDS_GPIO_REGISTER
select S3C64XX_DEV_SPI0
select S3C64XX_SETUP_FB_24BPP
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index 12f67b61ca5f..17f4b07ec763 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -16,7 +16,8 @@ obj-$(CONFIG_CPU_S3C6410) += s3c6410.o
# PM
-obj-$(CONFIG_PM) += pm.o irq-pm.o sleep.o
+obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_PM_SLEEP) += irq-pm.o sleep.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
# DMA support
diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c
index 2eb072440dfa..93aa8cb70195 100644
--- a/arch/arm/mach-s3c64xx/cpuidle.c
+++ b/arch/arm/mach-s3c64xx/cpuidle.c
@@ -16,7 +16,7 @@
#include <linux/export.h>
#include <linux/time.h>
-#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
#include <mach/map.h>
diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
index 7bc66682687e..dcbe17f5e5f8 100644
--- a/arch/arm/mach-s3c64xx/crag6410.h
+++ b/arch/arm/mach-s3c64xx/crag6410.h
@@ -14,6 +14,7 @@
#include <mach/gpio-samsung.h>
#define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
+#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
#define PCA935X_GPIO_BASE GPIO_BOARD_START
#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index 10b913baab28..65c426bc45f7 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = {
static struct wm831x_pdata crag_pmic_pdata = {
.wm831x_num = 1,
+ .irq_base = BANFF_PMIC_IRQ_BASE,
.gpio_base = BANFF_PMIC_GPIO_BASE,
.soft_shutdown = true,
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 661eb662d051..b7447a92276e 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -209,7 +209,7 @@ static struct platform_device smdk6410_smsc911x = {
};
#ifdef CONFIG_REGULATOR
-static struct regulator_consumer_supply smdk6410_b_pwr_5v_consumers[] __initdata = {
+static struct regulator_consumer_supply smdk6410_b_pwr_5v_consumers[] = {
REGULATOR_SUPPLY("PVDD", "0-001b"),
REGULATOR_SUPPLY("AVDD", "0-001b"),
};
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index aaf7bea4032f..75b14e756383 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -194,6 +194,7 @@ void s3c_pm_debug_smdkled(u32 set, u32 clear)
}
#endif
+#ifdef CONFIG_PM_SLEEP
static struct sleep_save core_save[] = {
SAVE_ITEM(S3C64XX_MEM0DRVCON),
SAVE_ITEM(S3C64XX_MEM1DRVCON),
@@ -238,6 +239,7 @@ void s3c_pm_save_core(void)
s3c_pm_do_save(misc_save, ARRAY_SIZE(misc_save));
s3c_pm_do_save(core_save, ARRAY_SIZE(core_save));
}
+#endif
/* since both s3c6400 and s3c6410 share the same sleep pm calls, we
* put the per-cpu code in here until any new cpu comes along and changes
diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S
index 7c43ddd33ba8..dfbfc0f7f8b8 100644
--- a/arch/arm/mach-s5pv210/sleep.S
+++ b/arch/arm/mach-s5pv210/sleep.S
@@ -14,7 +14,7 @@
#include <linux/linkage.h>
- .data
+ .text
.align
/*
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 2f36c85eec4b..0fb484221c90 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -62,6 +62,10 @@ config ARCH_R8A7740
select ARCH_RMOBILE
select RENESAS_INTC_IRQPIN
+config ARCH_R8A7778
+ bool "R-Car M1A (R8A77781)"
+ select ARCH_RCAR_GEN1
+
config ARCH_R8A7779
bool "R-Car H1 (R8A77790)"
select ARCH_RCAR_GEN1
@@ -69,15 +73,22 @@ config ARCH_R8A7779
config ARCH_R8A7790
bool "R-Car H2 (R8A77900)"
select ARCH_RCAR_GEN2
+ select I2C
config ARCH_R8A7791
bool "R-Car M2-W (R8A77910)"
select ARCH_RCAR_GEN2
+ select I2C
config ARCH_R8A7794
bool "R-Car E2 (R8A77940)"
select ARCH_RCAR_GEN2
+config ARCH_SH73A0
+ bool "SH-Mobile AG5 (R8A73A00)"
+ select ARCH_RMOBILE
+ select RENESAS_INTC_IRQPIN
+
comment "Renesas ARM SoCs Board Type"
config MACH_MARZEN
@@ -92,13 +103,6 @@ if ARCH_SHMOBILE_LEGACY
comment "Renesas ARM SoCs System Type"
-config ARCH_SH7372
- bool "SH-Mobile AP4 (SH7372)"
- select ARCH_RMOBILE
- select ARCH_WANT_OPTIONAL_GPIOLIB
- select ARM_CPU_SUSPEND if PM || CPU_IDLE
- select SH_INTC
-
config ARCH_SH73A0
bool "SH-Mobile AG5 (R8A73A00)"
select ARCH_RMOBILE
@@ -108,13 +112,6 @@ config ARCH_SH73A0
select SH_INTC
select RENESAS_INTC_IRQPIN
-config ARCH_R8A73A4
- bool "R-Mobile APE6 (R8A73A40)"
- select ARCH_RMOBILE
- select ARCH_WANT_OPTIONAL_GPIOLIB
- select ARM_GIC
- select RENESAS_IRQC
-
config ARCH_R8A7740
bool "R-Mobile A1 (R8A77400)"
select ARCH_RMOBILE
@@ -136,33 +133,6 @@ config ARCH_R8A7779
comment "Renesas ARM SoCs Board Type"
-config MACH_APE6EVM
- bool "APE6EVM board"
- depends on ARCH_R8A73A4
- select SMSC_PHY if SMSC911X
- select USE_OF
-
-config MACH_APE6EVM_REFERENCE
- bool "APE6EVM board - Reference Device Tree Implementation"
- depends on ARCH_R8A73A4
- select SMSC_PHY if SMSC911X
- select USE_OF
- ---help---
- Use reference implementation of APE6EVM board support
- which makes a greater use of device tree at the expense
- of not supporting a number of devices.
-
- This is intended to aid developers
-
-config MACH_MACKEREL
- bool "mackerel board"
- depends on ARCH_SH7372
- select ARCH_REQUIRE_GPIOLIB
- select REGULATOR_FIXED_VOLTAGE if REGULATOR
- select SMSC_PHY if SMSC911X
- select SND_SOC_AK4642 if SND_SIMPLE_CARD
- select USE_OF
-
config MACH_ARMADILLO800EVA
bool "Armadillo-800 EVA board"
depends on ARCH_R8A7740
@@ -209,20 +179,6 @@ config MACH_KZM9G
select SND_SOC_AK4642 if SND_SIMPLE_CARD
select USE_OF
-config MACH_KZM9G_REFERENCE
- bool "KZM-A9-GT board - Reference Device Tree Implementation"
- depends on ARCH_SH73A0
- select ARCH_REQUIRE_GPIOLIB
- select REGULATOR_FIXED_VOLTAGE if REGULATOR
- select SND_SOC_AK4642 if SND_SIMPLE_CARD
- select USE_OF
- ---help---
- Use reference implementation of KZM-A9-GT board support
- which makes as greater use of device tree at the expense
- of not supporting a number of devices.
-
- This is intended to aid developers
-
comment "Renesas ARM SoCs System Configuration"
config CPU_HAS_INTEVT
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index d53996e6da97..89e463de4479 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -6,14 +6,13 @@
obj-y := timer.o console.o
# CPU objects
-obj-$(CONFIG_ARCH_SH7372) += setup-sh7372.o intc-sh7372.o pm-sh7372.o
-obj-$(CONFIG_ARCH_SH73A0) += setup-sh73a0.o intc-sh73a0.o pm-sh73a0.o
+obj-$(CONFIG_ARCH_SH73A0) += setup-sh73a0.o pm-sh73a0.o
obj-$(CONFIG_ARCH_R8A73A4) += setup-r8a73a4.o
obj-$(CONFIG_ARCH_R8A7740) += setup-r8a7740.o pm-r8a7740.o
obj-$(CONFIG_ARCH_R8A7778) += setup-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += setup-r8a7779.o pm-r8a7779.o
-obj-$(CONFIG_ARCH_R8A7790) += setup-r8a7790.o pm-r8a7790.o
-obj-$(CONFIG_ARCH_R8A7791) += setup-r8a7791.o pm-r8a7791.o
+obj-$(CONFIG_ARCH_R8A7790) += setup-r8a7790.o
+obj-$(CONFIG_ARCH_R8A7791) += setup-r8a7791.o
obj-$(CONFIG_ARCH_R8A7794) += setup-r8a7794.o
obj-$(CONFIG_ARCH_EMEV2) += setup-emev2.o
obj-$(CONFIG_ARCH_R7S72100) += setup-r7s72100.o
@@ -21,9 +20,7 @@ obj-$(CONFIG_ARCH_R7S72100) += setup-r7s72100.o
# Clock objects
ifndef CONFIG_COMMON_CLK
obj-y += clock.o
-obj-$(CONFIG_ARCH_SH7372) += clock-sh7372.o
obj-$(CONFIG_ARCH_SH73A0) += clock-sh73a0.o
-obj-$(CONFIG_ARCH_R8A73A4) += clock-r8a73a4.o
obj-$(CONFIG_ARCH_R8A7740) += clock-r8a7740.o
obj-$(CONFIG_ARCH_R8A7778) += clock-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += clock-r8a7779.o
@@ -35,6 +32,8 @@ cpu-y := platsmp.o headsmp.o
# Shared SoC family objects
obj-$(CONFIG_ARCH_RCAR_GEN2) += setup-rcar-gen2.o platsmp-apmu.o $(cpu-y)
CFLAGS_setup-rcar-gen2.o += -march=armv7-a
+obj-$(CONFIG_ARCH_R8A7790) += regulator-quirk-rcar-gen2.o
+obj-$(CONFIG_ARCH_R8A7791) += regulator-quirk-rcar-gen2.o
# SMP objects
smp-y := $(cpu-y)
@@ -46,27 +45,20 @@ smp-$(CONFIG_ARCH_EMEV2) += smp-emev2.o headsmp-scu.o platsmp-scu.o
# PM objects
obj-$(CONFIG_SUSPEND) += suspend.o
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_PM_RCAR) += pm-rcar.o
obj-$(CONFIG_PM_RMOBILE) += pm-rmobile.o
-
-# special sh7372 handling for IRQ objects and low level sleep code
-obj-$(CONFIG_ARCH_SH7372) += entry-intc.o sleep-sh7372.o
+obj-$(CONFIG_ARCH_RCAR_GEN2) += pm-rcar-gen2.o
# Board objects
ifdef CONFIG_ARCH_SHMOBILE_MULTI
obj-$(CONFIG_MACH_MARZEN) += board-marzen-reference.o
else
-obj-$(CONFIG_MACH_APE6EVM) += board-ape6evm.o
-obj-$(CONFIG_MACH_APE6EVM_REFERENCE) += board-ape6evm-reference.o
-obj-$(CONFIG_MACH_MACKEREL) += board-mackerel.o
obj-$(CONFIG_MACH_BOCKW) += board-bockw.o
obj-$(CONFIG_MACH_BOCKW_REFERENCE) += board-bockw-reference.o
obj-$(CONFIG_MACH_MARZEN) += board-marzen.o
obj-$(CONFIG_MACH_ARMADILLO800EVA) += board-armadillo800eva.o
-obj-$(CONFIG_MACH_KZM9G) += board-kzm9g.o
-obj-$(CONFIG_MACH_KZM9G_REFERENCE) += board-kzm9g-reference.o
+obj-$(CONFIG_MACH_KZM9G) += board-kzm9g.o intc-sh73a0.o
endif
# Framework support
diff --git a/arch/arm/mach-shmobile/Makefile.boot b/arch/arm/mach-shmobile/Makefile.boot
index 02532bea5300..e1ef19cef89c 100644
--- a/arch/arm/mach-shmobile/Makefile.boot
+++ b/arch/arm/mach-shmobile/Makefile.boot
@@ -1,13 +1,9 @@
# per-board load address for uImage
loadaddr-y :=
-loadaddr-$(CONFIG_MACH_APE6EVM) += 0x40008000
-loadaddr-$(CONFIG_MACH_APE6EVM_REFERENCE) += 0x40008000
loadaddr-$(CONFIG_MACH_ARMADILLO800EVA) += 0x40008000
loadaddr-$(CONFIG_MACH_BOCKW) += 0x60008000
loadaddr-$(CONFIG_MACH_BOCKW_REFERENCE) += 0x60008000
loadaddr-$(CONFIG_MACH_KZM9G) += 0x41008000
-loadaddr-$(CONFIG_MACH_KZM9G_REFERENCE) += 0x41008000
-loadaddr-$(CONFIG_MACH_MACKEREL) += 0x40008000
loadaddr-$(CONFIG_MACH_MARZEN) += 0x60008000
__ZRELADDR := $(sort $(loadaddr-y))
diff --git a/arch/arm/mach-shmobile/board-ape6evm-reference.c b/arch/arm/mach-shmobile/board-ape6evm-reference.c
deleted file mode 100644
index 3b68370b03a0..000000000000
--- a/arch/arm/mach-shmobile/board-ape6evm-reference.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * APE6EVM board support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/of_platform.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/platform_device.h>
-#include <linux/sh_clk.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "r8a73a4.h"
-
-static void __init ape6evm_add_standard_devices(void)
-{
-
- struct clk *parent;
- struct clk *mp;
-
- r8a73a4_clock_init();
-
- /* MP clock parent = extal2 */
- parent = clk_get(NULL, "extal2");
- mp = clk_get(NULL, "mp");
- BUG_ON(IS_ERR(parent) || IS_ERR(mp));
-
- clk_set_parent(mp, parent);
- clk_put(parent);
- clk_put(mp);
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-
-static const char *ape6evm_boards_compat_dt[] __initdata = {
- "renesas,ape6evm-reference",
- NULL,
-};
-
-DT_MACHINE_START(APE6EVM_DT, "ape6evm")
- .init_early = shmobile_init_delay,
- .init_machine = ape6evm_add_standard_devices,
- .init_late = shmobile_init_late,
- .dt_compat = ape6evm_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c
deleted file mode 100644
index 444f22d370f0..000000000000
--- a/arch/arm/mach-shmobile/board-ape6evm.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * APE6EVM board support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/irqchip.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/kernel.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/sh_clk.h>
-#include <linux/smsc911x.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "irqs.h"
-#include "r8a73a4.h"
-
-/* LEDS */
-static struct gpio_led ape6evm_leds[] = {
- {
- .name = "gnss-en",
- .gpio = 28,
- .default_state = LEDS_GPIO_DEFSTATE_OFF,
- }, {
- .name = "nfc-nrst",
- .gpio = 126,
- .default_state = LEDS_GPIO_DEFSTATE_OFF,
- }, {
- .name = "gnss-nrst",
- .gpio = 132,
- .default_state = LEDS_GPIO_DEFSTATE_OFF,
- }, {
- .name = "bt-wakeup",
- .gpio = 232,
- .default_state = LEDS_GPIO_DEFSTATE_OFF,
- }, {
- .name = "strobe",
- .gpio = 250,
- .default_state = LEDS_GPIO_DEFSTATE_OFF,
- }, {
- .name = "bbresetout",
- .gpio = 288,
- .default_state = LEDS_GPIO_DEFSTATE_OFF,
- },
-};
-
-static __initdata struct gpio_led_platform_data ape6evm_leds_pdata = {
- .leds = ape6evm_leds,
- .num_leds = ARRAY_SIZE(ape6evm_leds),
-};
-
-/* GPIO KEY */
-#define GPIO_KEY(c, g, d, ...) \
- { .code = c, .gpio = g, .desc = d, .active_low = 1 }
-
-static struct gpio_keys_button gpio_buttons[] = {
- GPIO_KEY(KEY_0, 324, "S16"),
- GPIO_KEY(KEY_MENU, 325, "S17"),
- GPIO_KEY(KEY_HOME, 326, "S18"),
- GPIO_KEY(KEY_BACK, 327, "S19"),
- GPIO_KEY(KEY_VOLUMEUP, 328, "S20"),
- GPIO_KEY(KEY_VOLUMEDOWN, 329, "S21"),
-};
-
-static struct gpio_keys_platform_data ape6evm_keys_pdata __initdata = {
- .buttons = gpio_buttons,
- .nbuttons = ARRAY_SIZE(gpio_buttons),
-};
-
-/* Dummy supplies, where voltage doesn't matter */
-static struct regulator_consumer_supply dummy_supplies[] = {
- REGULATOR_SUPPLY("vddvario", "smsc911x"),
- REGULATOR_SUPPLY("vdd33a", "smsc911x"),
-};
-
-/* SMSC LAN9220 */
-static const struct resource lan9220_res[] __initconst = {
- DEFINE_RES_MEM(0x08000000, 0x1000),
- {
- .start = irq_pin(40), /* IRQ40 */
- .flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH,
- },
-};
-
-static const struct smsc911x_platform_config lan9220_data __initconst = {
- .flags = SMSC911X_USE_32BIT,
- .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
- .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH,
-};
-
-/*
- * MMC0 power supplies:
- * Both Vcc and VccQ to eMMC on APE6EVM are supplied by a tps80032 voltage
- * regulator. Until support for it is added to this file we simulate the
- * Vcc supply by a fixed always-on regulator
- */
-static struct regulator_consumer_supply vcc_mmc0_consumers[] =
-{
- REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
-};
-
-/*
- * SDHI0 power supplies:
- * Vcc to SDHI0 on APE6EVM is supplied by a GPIO-switchable regulator. VccQ is
- * provided by the same tps80032 regulator as both MMC0 voltages - see comment
- * above
- */
-static struct regulator_consumer_supply vcc_sdhi0_consumers[] =
-{
- REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
-};
-
-static struct regulator_init_data vcc_sdhi0_init_data = {
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(vcc_sdhi0_consumers),
- .consumer_supplies = vcc_sdhi0_consumers,
-};
-
-static const struct fixed_voltage_config vcc_sdhi0_info __initconst = {
- .supply_name = "SDHI0 Vcc",
- .microvolts = 3300000,
- .gpio = 76,
- .enable_high = 1,
- .init_data = &vcc_sdhi0_init_data,
-};
-
-/*
- * SDHI1 power supplies:
- * Vcc and VccQ to SDHI1 on APE6EVM are both fixed at 3.3V
- */
-static struct regulator_consumer_supply vcc_sdhi1_consumers[] =
-{
- REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
-};
-
-/* MMCIF */
-static const struct sh_mmcif_plat_data mmcif0_pdata __initconst = {
- .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
- .slave_id_tx = SHDMA_SLAVE_MMCIF0_TX,
- .slave_id_rx = SHDMA_SLAVE_MMCIF0_RX,
- .ccs_unsupported = true,
-};
-
-static const struct resource mmcif0_resources[] __initconst = {
- DEFINE_RES_MEM(0xee200000, 0x100),
- DEFINE_RES_IRQ(gic_spi(169)),
-};
-
-/* SDHI0 */
-static const struct sh_mobile_sdhi_info sdhi0_pdata __initconst = {
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
-};
-
-static const struct resource sdhi0_resources[] __initconst = {
- DEFINE_RES_MEM(0xee100000, 0x100),
- DEFINE_RES_IRQ(gic_spi(165)),
-};
-
-/* SDHI1 */
-static const struct sh_mobile_sdhi_info sdhi1_pdata __initconst = {
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
- MMC_CAP_NEEDS_POLL,
-};
-
-static const struct resource sdhi1_resources[] __initconst = {
- DEFINE_RES_MEM(0xee120000, 0x100),
- DEFINE_RES_IRQ(gic_spi(166)),
-};
-
-static const struct pinctrl_map ape6evm_pinctrl_map[] __initconst = {
- /* SCIFA0 console */
- PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.0", "pfc-r8a73a4",
- "scifa0_data", "scifa0"),
- /* SMSC */
- PIN_MAP_MUX_GROUP_DEFAULT("smsc911x", "pfc-r8a73a4",
- "irqc_irq40", "irqc"),
- /* MMCIF0 */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-r8a73a4",
- "mmc0_data8", "mmc0"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-r8a73a4",
- "mmc0_ctrl", "mmc0"),
- /* SDHI0: uSD: no WP */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a73a4",
- "sdhi0_data4", "sdhi0"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a73a4",
- "sdhi0_ctrl", "sdhi0"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a73a4",
- "sdhi0_cd", "sdhi0"),
- /* SDHI1 */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a73a4",
- "sdhi1_data4", "sdhi1"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a73a4",
- "sdhi1_ctrl", "sdhi1"),
-};
-
-static void __init ape6evm_add_standard_devices(void)
-{
-
- struct clk *parent;
- struct clk *mp;
-
- r8a73a4_clock_init();
-
- /* MP clock parent = extal2 */
- parent = clk_get(NULL, "extal2");
- mp = clk_get(NULL, "mp");
- BUG_ON(IS_ERR(parent) || IS_ERR(mp));
-
- clk_set_parent(mp, parent);
- clk_put(parent);
- clk_put(mp);
-
- pinctrl_register_mappings(ape6evm_pinctrl_map,
- ARRAY_SIZE(ape6evm_pinctrl_map));
- r8a73a4_pinmux_init();
- r8a73a4_add_standard_devices();
-
- /* LAN9220 ethernet */
- gpio_request_one(270, GPIOF_OUT_INIT_HIGH, NULL); /* smsc9220 RESET */
-
- regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
-
- platform_device_register_resndata(NULL, "smsc911x", -1,
- lan9220_res, ARRAY_SIZE(lan9220_res),
- &lan9220_data, sizeof(lan9220_data));
-
- regulator_register_always_on(1, "MMC0 Vcc", vcc_mmc0_consumers,
- ARRAY_SIZE(vcc_mmc0_consumers), 2800000);
- platform_device_register_resndata(NULL, "sh_mmcif", 0,
- mmcif0_resources, ARRAY_SIZE(mmcif0_resources),
- &mmcif0_pdata, sizeof(mmcif0_pdata));
- platform_device_register_data(NULL, "reg-fixed-voltage", 2,
- &vcc_sdhi0_info, sizeof(vcc_sdhi0_info));
- platform_device_register_resndata(NULL, "sh_mobile_sdhi", 0,
- sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
- &sdhi0_pdata, sizeof(sdhi0_pdata));
- regulator_register_always_on(3, "SDHI1 Vcc", vcc_sdhi1_consumers,
- ARRAY_SIZE(vcc_sdhi1_consumers), 3300000);
- platform_device_register_resndata(NULL, "sh_mobile_sdhi", 1,
- sdhi1_resources, ARRAY_SIZE(sdhi1_resources),
- &sdhi1_pdata, sizeof(sdhi1_pdata));
- platform_device_register_data(NULL, "gpio-keys", -1,
- &ape6evm_keys_pdata,
- sizeof(ape6evm_keys_pdata));
- platform_device_register_data(NULL, "leds-gpio", -1,
- &ape6evm_leds_pdata,
- sizeof(ape6evm_leds_pdata));
-}
-
-static void __init ape6evm_legacy_init_time(void)
-{
- /* Do not invoke DT-based timers via clocksource_of_init() */
-}
-
-static void __init ape6evm_legacy_init_irq(void)
-{
- void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
- void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
-
- gic_init(0, 29, gic_dist_base, gic_cpu_base);
-
- /* Do not invoke DT-based interrupt code via irqchip_init() */
-}
-
-
-static const char *ape6evm_boards_compat_dt[] __initdata = {
- "renesas,ape6evm",
- NULL,
-};
-
-DT_MACHINE_START(APE6EVM_DT, "ape6evm")
- .init_early = shmobile_init_delay,
- .init_irq = ape6evm_legacy_init_irq,
- .init_machine = ape6evm_add_standard_devices,
- .init_late = shmobile_init_late,
- .dt_compat = ape6evm_boards_compat_dt,
- .init_time = ape6evm_legacy_init_time,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 6d949f1c850b..bf37e3c532f6 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -754,12 +754,12 @@ static struct platform_device vcc_sdhi1 = {
};
/* SDHI0 */
-static struct sh_mobile_sdhi_info sdhi0_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+static struct tmio_mmc_data sdhi0_info = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_POWER_OFF_CARD,
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
+ .flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
.cd_gpio = 167,
};
@@ -796,12 +796,12 @@ static struct platform_device sdhi0_device = {
};
/* SDHI1 */
-static struct sh_mobile_sdhi_info sdhi1_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+static struct tmio_mmc_data sdhi1_info = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI1_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI1_RX,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_POWER_OFF_CARD,
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
+ .flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
/* Port72 cannot generate IRQs, will be used in polling mode. */
.cd_gpio = 72,
};
@@ -1015,7 +1015,6 @@ static struct asoc_simple_card_info fsi_wm8978_info = {
.platform = "sh_fsi2",
.daifmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM,
.cpu_dai = {
- .fmt = SND_SOC_DAIFMT_IB_NF,
.name = "fsia-dai",
},
.codec_dai = {
@@ -1040,9 +1039,9 @@ static struct asoc_simple_card_info fsi2_hdmi_info = {
.card = "FSI2B-HDMI",
.codec = "sh-mobile-hdmi",
.platform = "sh_fsi2",
+ .daifmt = SND_SOC_DAIFMT_CBS_CFS,
.cpu_dai = {
.name = "fsib-dai",
- .fmt = SND_SOC_DAIFMT_CBS_CFS,
},
.codec_dai = {
.name = "sh_mobile_hdmi-hifi",
diff --git a/arch/arm/mach-shmobile/board-bockw-reference.c b/arch/arm/mach-shmobile/board-bockw-reference.c
index d649ade4a202..9a74efda3d18 100644
--- a/arch/arm/mach-shmobile/board-bockw-reference.c
+++ b/arch/arm/mach-shmobile/board-bockw-reference.c
@@ -36,7 +36,9 @@ static void __init bockw_init(void)
void __iomem *fpga;
void __iomem *pfc;
+#ifndef CONFIG_COMMON_CLK
r8a7778_clock_init();
+#endif
r8a7778_init_irq_extpin_dt(1);
r8a7778_add_dt_devices();
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index f27b5a833bf0..25558d1f417f 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -201,12 +201,12 @@ static struct rcar_phy_platform_data usb_phy_platform_data __initdata =
/* SDHI */
-static struct sh_mobile_sdhi_info sdhi0_info __initdata = {
- .dma_slave_tx = HPBDMA_SLAVE_SDHI0_TX,
- .dma_slave_rx = HPBDMA_SLAVE_SDHI0_RX,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED,
- .tmio_ocr_mask = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
+static struct tmio_mmc_data sdhi0_info __initdata = {
+ .chan_priv_tx = (void *)HPBDMA_SLAVE_SDHI0_TX,
+ .chan_priv_rx = (void *)HPBDMA_SLAVE_SDHI0_RX,
+ .capabilities = MMC_CAP_SD_HIGHSPEED,
+ .ocr_mask = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
+ .flags = TMIO_MMC_HAS_IDLE_WAIT,
};
static struct resource sdhi0_resources[] __initdata = {
@@ -683,7 +683,7 @@ static void __init bockw_init(void)
platform_device_register_resndata(
NULL, "sh_mobile_sdhi", 0,
sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
- &sdhi0_info, sizeof(struct sh_mobile_sdhi_info));
+ &sdhi0_info, sizeof(struct tmio_mmc_data));
}
/* for Audio */
diff --git a/arch/arm/mach-shmobile/board-kzm9g-reference.c b/arch/arm/mach-shmobile/board-kzm9g-reference.c
deleted file mode 100644
index 2e82e44ab852..000000000000
--- a/arch/arm/mach-shmobile/board-kzm9g-reference.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * KZM-A9-GT board support - Reference Device Tree Implementation
- *
- * Copyright (C) 2012 Horms Solutions Ltd.
- *
- * Based on board-kzm9g.c
- * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/input.h>
-#include <linux/of_platform.h>
-
-#include <asm/hardware/cache-l2x0.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "sh73a0.h"
-
-static void __init kzm_init(void)
-{
- sh73a0_add_standard_devices_dt();
-
-#ifdef CONFIG_CACHE_L2X0
- /* Shared attribute override enable, 64K*8way */
- l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
-#endif
-}
-
-#define RESCNT2 IOMEM(0xe6188020)
-static void kzm9g_restart(enum reboot_mode mode, const char *cmd)
-{
- /* Do soft power on reset */
- writel((1 << 31), RESCNT2);
-}
-
-static const char *kzm9g_boards_compat_dt[] __initdata = {
- "renesas,kzm9g-reference",
- NULL,
-};
-
-DT_MACHINE_START(KZM9G_DT, "kzm9g-reference")
- .smp = smp_ops(sh73a0_smp_ops),
- .map_io = sh73a0_map_io,
- .init_early = shmobile_init_delay,
- .init_machine = kzm_init,
- .init_late = shmobile_init_late,
- .restart = kzm9g_restart,
- .dt_compat = kzm9g_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index 7c9b63bdde9f..260d8319fd82 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -442,11 +442,11 @@ static struct platform_device vcc_sdhi2 = {
};
/* SDHI */
-static struct sh_mobile_sdhi_info sdhi0_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+static struct tmio_mmc_data sdhi0_info = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX,
+ .flags = TMIO_MMC_HAS_IDLE_WAIT,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_POWER_OFF_CARD,
};
@@ -484,13 +484,13 @@ static struct platform_device sdhi0_device = {
};
/* Micro SD */
-static struct sh_mobile_sdhi_info sdhi2_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI2_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI2_RX,
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT |
+static struct tmio_mmc_data sdhi2_info = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI2_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI2_RX,
+ .flags = TMIO_MMC_HAS_IDLE_WAIT |
TMIO_MMC_USE_GPIO_CD |
TMIO_MMC_WRPROTECT_DISABLE,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_POWER_OFF_CARD,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_POWER_OFF_CARD,
.cd_gpio = 13,
};
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
deleted file mode 100644
index a1c1dfb6a67a..000000000000
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ /dev/null
@@ -1,1522 +0,0 @@
-/*
- * mackerel board support
- *
- * Copyright (C) 2010 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * based on ap4evb
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2008 Yoshihiro Shimoda
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include <linux/io.h>
-#include <linux/i2c.h>
-#include <linux/leds.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/sh_flctl.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/platform_data/gpio_backlight.h>
-#include <linux/pm_clock.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/smsc911x.h>
-#include <linux/sh_clk.h>
-#include <linux/tca6416_keypad.h>
-#include <linux/usb/renesas_usbhs.h>
-#include <linux/dma-mapping.h>
-
-#include <video/sh_mobile_hdmi.h>
-#include <video/sh_mobile_lcdc.h>
-#include <media/sh_mobile_ceu.h>
-#include <media/soc_camera.h>
-#include <media/soc_camera_platform.h>
-#include <sound/sh_fsi.h>
-#include <sound/simple_card.h>
-#include <asm/mach/arch.h>
-#include <asm/mach-types.h>
-
-#include "common.h"
-#include "intc.h"
-#include "irqs.h"
-#include "pm-rmobile.h"
-#include "sh-gpio.h"
-#include "sh7372.h"
-
-/*
- * Address Interface BusWidth note
- * ------------------------------------------------------------------
- * 0x0000_0000 NOR Flash ROM (MCP) 16bit SW7 : bit1 = ON
- * 0x0800_0000 user area -
- * 0x1000_0000 NOR Flash ROM (MCP) 16bit SW7 : bit1 = OFF
- * 0x1400_0000 Ether (LAN9220) 16bit
- * 0x1600_0000 user area - cannot use with NAND
- * 0x1800_0000 user area -
- * 0x1A00_0000 -
- * 0x4000_0000 LPDDR2-SDRAM (POP) 32bit
- */
-
-/*
- * CPU mode
- *
- * SW4 | Boot Area| Master | Remarks
- * 1 | 2 | 3 | 4 | 5 | 6 | 8 | | Processor|
- * ----+-----+-----+-----+-----+-----+-----+----------+----------+--------------
- * ON | ON | OFF | ON | ON | OFF | OFF | External | System | External ROM
- * ON | ON | ON | ON | ON | OFF | OFF | External | System | ROM Debug
- * ON | ON | X | ON | OFF | OFF | OFF | Built-in | System | ROM Debug
- * X | OFF | X | X | X | X | OFF | Built-in | System | MaskROM
- * OFF | X | X | X | X | X | OFF | Built-in | System | MaskROM
- * X | X | X | OFF | X | X | OFF | Built-in | System | MaskROM
- * OFF | ON | OFF | X | X | OFF | ON | External | System | Standalone
- * ON | OFF | OFF | X | X | OFF | ON | External | Realtime | Standalone
-*/
-
-/*
- * NOR Flash ROM
- *
- * SW1 | SW2 | SW7 | NOR Flash ROM
- * bit1 | bit1 bit2 | bit1 | Memory allocation
- * ------+------------+------+------------------
- * OFF | ON OFF | ON | Area 0
- * OFF | ON OFF | OFF | Area 4
- */
-
-/*
- * SMSC 9220
- *
- * SW1 SMSC 9220
- * -----------------------
- * ON access disable
- * OFF access enable
- */
-
-/*
- * NAND Flash ROM
- *
- * SW1 | SW2 | SW7 | NAND Flash ROM
- * bit1 | bit1 bit2 | bit2 | Memory allocation
- * ------+------------+------+------------------
- * OFF | ON OFF | ON | FCE 0
- * OFF | ON OFF | OFF | FCE 1
- */
-
-/*
- * External interrupt pin settings
- *
- * IRQX | pin setting | device | level
- * ------+--------------------+--------------------+-------
- * IRQ0 | ICR1A.IRQ0SA=0010 | SDHI2 card detect | Low
- * IRQ6 | ICR1A.IRQ6SA=0011 | Ether(LAN9220) | High
- * IRQ7 | ICR1A.IRQ7SA=0010 | LCD Touch Panel | Low
- * IRQ8 | ICR2A.IRQ8SA=0010 | MMC/SD card detect | Low
- * IRQ9 | ICR2A.IRQ9SA=0010 | KEY(TCA6408) | Low
- * IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345) | High
- * IRQ22 | ICR4A.IRQ22SA=0011 | Sensor(AK8975) | High
- */
-
-/*
- * USB
- *
- * USB0 : CN22 : Function
- * USB1 : CN31 : Function/Host *1
- *
- * J30 (for CN31) *1
- * ----------+---------------+-------------
- * 1-2 short | VBUS 5V | Host
- * open | external VBUS | Function
- *
- * CAUTION
- *
- * renesas_usbhs driver can use external interrupt mode
- * (which come from USB-PHY) or autonomy mode (it use own interrupt)
- * for detecting connection/disconnection when Function.
- * USB will be power OFF while it has been disconnecting
- * if external interrupt mode, and it is always power ON if autonomy mode,
- *
- * mackerel can not use external interrupt (IRQ7-PORT167) mode on "USB0",
- * because Touchscreen is using IRQ7-PORT40.
- * It is impossible to use IRQ7 demux on this board.
- */
-
-/*
- * SDHI0 (CN12)
- *
- * SW56 : OFF
- *
- */
-
-/* MMC /SDHI1 (CN7)
- *
- * I/O voltage : 1.8v
- *
- * Power voltage : 1.8v or 3.3v
- * J22 : select power voltage *1
- * 1-2 pin : 1.8v
- * 2-3 pin : 3.3v
- *
- * *1
- * Please change J22 depends the card to be used.
- * MMC's OCR field set to support either voltage for the card inserted.
- *
- * SW1 | SW33
- * | bit1 | bit2 | bit3 | bit4
- * -------------+------+------+------+-------
- * MMC0 OFF | OFF | X | ON | X (Use MMCIF)
- * SDHI1 OFF | ON | X | OFF | X (Use MFD_SH_MOBILE_SDHI)
- *
- */
-
-/*
- * SDHI2 (CN23)
- *
- * microSD card sloct
- *
- */
-
-/*
- * FSI - AK4642
- *
- * it needs amixer settings for playing
- *
- * amixer set "Headphone Enable" on
- */
-
-/* Fixed 3.3V and 1.8V regulators to be used by multiple devices */
-static struct regulator_consumer_supply fixed1v8_power_consumers[] =
-{
- /*
- * J22 on mackerel switches mmcif.0 and sdhi.1 between 1.8V and 3.3V
- * Since we cannot support both voltages, we support the default 1.8V
- */
- REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
- REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
- REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
- REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
-};
-
-static struct regulator_consumer_supply fixed3v3_power_consumers[] =
-{
- REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
- REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
- REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.2"),
- REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.2"),
-};
-
-/* Dummy supplies, where voltage doesn't matter */
-static struct regulator_consumer_supply dummy_supplies[] = {
- REGULATOR_SUPPLY("vddvario", "smsc911x"),
- REGULATOR_SUPPLY("vdd33a", "smsc911x"),
-};
-
-/* MTD */
-static struct mtd_partition nor_flash_partitions[] = {
- {
- .name = "loader",
- .offset = 0x00000000,
- .size = 512 * 1024,
- .mask_flags = MTD_WRITEABLE,
- },
- {
- .name = "bootenv",
- .offset = MTDPART_OFS_APPEND,
- .size = 512 * 1024,
- .mask_flags = MTD_WRITEABLE,
- },
- {
- .name = "kernel_ro",
- .offset = MTDPART_OFS_APPEND,
- .size = 8 * 1024 * 1024,
- .mask_flags = MTD_WRITEABLE,
- },
- {
- .name = "kernel",
- .offset = MTDPART_OFS_APPEND,
- .size = 8 * 1024 * 1024,
- },
- {
- .name = "data",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct physmap_flash_data nor_flash_data = {
- .width = 2,
- .parts = nor_flash_partitions,
- .nr_parts = ARRAY_SIZE(nor_flash_partitions),
-};
-
-static struct resource nor_flash_resources[] = {
- [0] = {
- .start = 0x20000000, /* CS0 shadow instead of regular CS0 */
- .end = 0x28000000 - 1, /* needed by USB MASK ROM boot */
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct platform_device nor_flash_device = {
- .name = "physmap-flash",
- .dev = {
- .platform_data = &nor_flash_data,
- },
- .num_resources = ARRAY_SIZE(nor_flash_resources),
- .resource = nor_flash_resources,
-};
-
-/* SMSC */
-static struct resource smc911x_resources[] = {
- {
- .start = 0x14000000,
- .end = 0x16000000 - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .start = evt2irq(0x02c0) /* IRQ6A */,
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
- },
-};
-
-static struct smsc911x_platform_config smsc911x_info = {
- .flags = SMSC911X_USE_16BIT | SMSC911X_SAVE_MAC_ADDRESS,
- .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
- .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
-};
-
-static struct platform_device smc911x_device = {
- .name = "smsc911x",
- .id = -1,
- .num_resources = ARRAY_SIZE(smc911x_resources),
- .resource = smc911x_resources,
- .dev = {
- .platform_data = &smsc911x_info,
- },
-};
-
-/* MERAM */
-static struct sh_mobile_meram_info mackerel_meram_info = {
- .addr_mode = SH_MOBILE_MERAM_MODE1,
-};
-
-static struct resource meram_resources[] = {
- [0] = {
- .name = "regs",
- .start = 0xe8000000,
- .end = 0xe807ffff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .name = "meram",
- .start = 0xe8080000,
- .end = 0xe81fffff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device meram_device = {
- .name = "sh_mobile_meram",
- .id = 0,
- .num_resources = ARRAY_SIZE(meram_resources),
- .resource = meram_resources,
- .dev = {
- .platform_data = &mackerel_meram_info,
- },
-};
-
-/* LCDC and backlight */
-static struct fb_videomode mackerel_lcdc_modes[] = {
- {
- .name = "WVGA Panel",
- .xres = 800,
- .yres = 480,
- .left_margin = 220,
- .right_margin = 110,
- .hsync_len = 70,
- .upper_margin = 20,
- .lower_margin = 5,
- .vsync_len = 5,
- .sync = 0,
- },
-};
-
-static const struct sh_mobile_meram_cfg lcd_meram_cfg = {
- .icb[0] = {
- .meram_size = 0x40,
- },
- .icb[1] = {
- .meram_size = 0x40,
- },
-};
-
-static struct sh_mobile_lcdc_info lcdc_info = {
- .meram_dev = &mackerel_meram_info,
- .clock_source = LCDC_CLK_BUS,
- .ch[0] = {
- .chan = LCDC_CHAN_MAINLCD,
- .fourcc = V4L2_PIX_FMT_RGB565,
- .lcd_modes = mackerel_lcdc_modes,
- .num_modes = ARRAY_SIZE(mackerel_lcdc_modes),
- .interface_type = RGB24,
- .clock_divider = 3,
- .flags = 0,
- .panel_cfg = {
- .width = 152,
- .height = 91,
- },
- .meram_cfg = &lcd_meram_cfg,
- }
-};
-
-static struct resource lcdc_resources[] = {
- [0] = {
- .name = "LCDC",
- .start = 0xfe940000,
- .end = 0xfe943fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = intcs_evt2irq(0x580),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device lcdc_device = {
- .name = "sh_mobile_lcdc_fb",
- .num_resources = ARRAY_SIZE(lcdc_resources),
- .resource = lcdc_resources,
- .dev = {
- .platform_data = &lcdc_info,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static struct gpio_backlight_platform_data gpio_backlight_data = {
- .fbdev = &lcdc_device.dev,
- .gpio = 31,
- .def_value = 1,
- .name = "backlight",
-};
-
-static struct platform_device gpio_backlight_device = {
- .name = "gpio-backlight",
- .dev = {
- .platform_data = &gpio_backlight_data,
- },
-};
-
-/* HDMI */
-static struct sh_mobile_hdmi_info hdmi_info = {
- .flags = HDMI_SND_SRC_SPDIF,
-};
-
-static struct resource hdmi_resources[] = {
- [0] = {
- .name = "HDMI",
- .start = 0xe6be0000,
- .end = 0xe6be00ff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- /* There's also an HDMI interrupt on INTCS @ 0x18e0 */
- .start = evt2irq(0x17e0),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device hdmi_device = {
- .name = "sh-mobile-hdmi",
- .num_resources = ARRAY_SIZE(hdmi_resources),
- .resource = hdmi_resources,
- .id = -1,
- .dev = {
- .platform_data = &hdmi_info,
- },
-};
-
-static const struct sh_mobile_meram_cfg hdmi_meram_cfg = {
- .icb[0] = {
- .meram_size = 0x100,
- },
- .icb[1] = {
- .meram_size = 0x100,
- },
-};
-
-static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
- .meram_dev = &mackerel_meram_info,
- .clock_source = LCDC_CLK_EXTERNAL,
- .ch[0] = {
- .chan = LCDC_CHAN_MAINLCD,
- .fourcc = V4L2_PIX_FMT_RGB565,
- .interface_type = RGB24,
- .clock_divider = 1,
- .flags = LCDC_FLAGS_DWPOL,
- .meram_cfg = &hdmi_meram_cfg,
- .tx_dev = &hdmi_device,
- }
-};
-
-static struct resource hdmi_lcdc_resources[] = {
- [0] = {
- .name = "LCDC1",
- .start = 0xfe944000,
- .end = 0xfe947fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = intcs_evt2irq(0x1780),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device hdmi_lcdc_device = {
- .name = "sh_mobile_lcdc_fb",
- .num_resources = ARRAY_SIZE(hdmi_lcdc_resources),
- .resource = hdmi_lcdc_resources,
- .id = 1,
- .dev = {
- .platform_data = &hdmi_lcdc_info,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static struct asoc_simple_card_info fsi2_hdmi_info = {
- .name = "HDMI",
- .card = "FSI2B-HDMI",
- .codec = "sh-mobile-hdmi",
- .platform = "sh_fsi2",
- .daifmt = SND_SOC_DAIFMT_CBS_CFS,
- .cpu_dai = {
- .name = "fsib-dai",
- },
- .codec_dai = {
- .name = "sh_mobile_hdmi-hifi",
- },
-};
-
-static struct platform_device fsi_hdmi_device = {
- .name = "asoc-simple-card",
- .id = 1,
- .dev = {
- .platform_data = &fsi2_hdmi_info,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &fsi_hdmi_device.dev.coherent_dma_mask,
- },
-};
-
-static void __init hdmi_init_pm_clock(void)
-{
- struct clk *hdmi_ick = clk_get(&hdmi_device.dev, "ick");
- int ret;
- long rate;
-
- if (IS_ERR(hdmi_ick)) {
- ret = PTR_ERR(hdmi_ick);
- pr_err("Cannot get HDMI ICK: %d\n", ret);
- goto out;
- }
-
- ret = clk_set_parent(&sh7372_pllc2_clk, &sh7372_dv_clki_div2_clk);
- if (ret < 0) {
- pr_err("Cannot set PLLC2 parent: %d, %d users\n",
- ret, sh7372_pllc2_clk.usecount);
- goto out;
- }
-
- pr_debug("PLLC2 initial frequency %lu\n",
- clk_get_rate(&sh7372_pllc2_clk));
-
- rate = clk_round_rate(&sh7372_pllc2_clk, 594000000);
- if (rate <= 0) {
- pr_err("Cannot get suitable rate: %ld\n", rate);
- ret = -EINVAL;
- goto out;
- }
-
- ret = clk_set_rate(&sh7372_pllc2_clk, rate);
- if (ret < 0) {
- pr_err("Cannot set rate %ld: %d\n", rate, ret);
- goto out;
- }
-
- pr_debug("PLLC2 set frequency %lu\n", rate);
-
- ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
- if (ret < 0)
- pr_err("Cannot set HDMI parent: %d\n", ret);
-
-out:
- if (!IS_ERR(hdmi_ick))
- clk_put(hdmi_ick);
-}
-
-/* USBHS0 is connected to CN22 which takes a USB Mini-B plug
- *
- * The sh7372 SoC has IRQ7 set aside for USBHS0 hotplug,
- * but on this particular board IRQ7 is already used by
- * the touch screen. This leaves us with software polling.
- */
-#define USBHS0_POLL_INTERVAL (HZ * 5)
-
-struct usbhs_private {
- void __iomem *usbphyaddr;
- void __iomem *usbcrcaddr;
- struct renesas_usbhs_platform_info info;
- struct delayed_work work;
- struct platform_device *pdev;
-};
-
-#define usbhs_get_priv(pdev) \
- container_of(renesas_usbhs_get_info(pdev), \
- struct usbhs_private, info)
-
-#define usbhs_is_connected(priv) \
- (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
-
-static int usbhs_get_vbus(struct platform_device *pdev)
-{
- return usbhs_is_connected(usbhs_get_priv(pdev));
-}
-
-static int usbhs_phy_reset(struct platform_device *pdev)
-{
- struct usbhs_private *priv = usbhs_get_priv(pdev);
-
- /* init phy */
- __raw_writew(0x8a0a, priv->usbcrcaddr);
-
- return 0;
-}
-
-static int usbhs0_get_id(struct platform_device *pdev)
-{
- return USBHS_GADGET;
-}
-
-static void usbhs0_work_function(struct work_struct *work)
-{
- struct usbhs_private *priv = container_of(work, struct usbhs_private,
- work.work);
-
- renesas_usbhs_call_notify_hotplug(priv->pdev);
- schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
-}
-
-static int usbhs0_hardware_init(struct platform_device *pdev)
-{
- struct usbhs_private *priv = usbhs_get_priv(pdev);
-
- priv->pdev = pdev;
- INIT_DELAYED_WORK(&priv->work, usbhs0_work_function);
- schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
- return 0;
-}
-
-static int usbhs0_hardware_exit(struct platform_device *pdev)
-{
- struct usbhs_private *priv = usbhs_get_priv(pdev);
-
- cancel_delayed_work_sync(&priv->work);
-
- return 0;
-}
-
-static struct usbhs_private usbhs0_private = {
- .usbcrcaddr = IOMEM(0xe605810c), /* USBCR2 */
- .info = {
- .platform_callback = {
- .hardware_init = usbhs0_hardware_init,
- .hardware_exit = usbhs0_hardware_exit,
- .phy_reset = usbhs_phy_reset,
- .get_id = usbhs0_get_id,
- .get_vbus = usbhs_get_vbus,
- },
- .driver_param = {
- .buswait_bwait = 4,
- .d0_tx_id = SHDMA_SLAVE_USB0_TX,
- .d1_rx_id = SHDMA_SLAVE_USB0_RX,
- },
- },
-};
-
-static struct resource usbhs0_resources[] = {
- [0] = {
- .name = "USBHS0",
- .start = 0xe6890000,
- .end = 0xe68900e6 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = evt2irq(0x1ca0) /* USB0_USB0I0 */,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device usbhs0_device = {
- .name = "renesas_usbhs",
- .id = 0,
- .dev = {
- .platform_data = &usbhs0_private.info,
- },
- .num_resources = ARRAY_SIZE(usbhs0_resources),
- .resource = usbhs0_resources,
-};
-
-/* USBHS1 is connected to CN31 which takes a USB Mini-AB plug
- *
- * Use J30 to select between Host and Function. This setting
- * can however not be detected by software. Hotplug of USBHS1
- * is provided via IRQ8.
- *
- * Current USB1 works as "USB Host".
- * - set J30 "short"
- *
- * If you want to use it as "USB gadget",
- * - J30 "open"
- * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET
- * - add .get_vbus = usbhs_get_vbus in usbhs1_private
- * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices.
- */
-#define IRQ8 evt2irq(0x0300)
-#define USB_PHY_MODE (1 << 4)
-#define USB_PHY_INT_EN ((1 << 3) | (1 << 2))
-#define USB_PHY_ON (1 << 1)
-#define USB_PHY_OFF (1 << 0)
-#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF)
-
-static irqreturn_t usbhs1_interrupt(int irq, void *data)
-{
- struct platform_device *pdev = data;
- struct usbhs_private *priv = usbhs_get_priv(pdev);
-
- dev_dbg(&pdev->dev, "%s\n", __func__);
-
- renesas_usbhs_call_notify_hotplug(pdev);
-
- /* clear status */
- __raw_writew(__raw_readw(priv->usbphyaddr) | USB_PHY_INT_CLR,
- priv->usbphyaddr);
-
- return IRQ_HANDLED;
-}
-
-static int usbhs1_hardware_init(struct platform_device *pdev)
-{
- struct usbhs_private *priv = usbhs_get_priv(pdev);
- int ret;
-
- /* clear interrupt status */
- __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
-
- ret = request_irq(IRQ8, usbhs1_interrupt, IRQF_TRIGGER_HIGH,
- dev_name(&pdev->dev), pdev);
- if (ret) {
- dev_err(&pdev->dev, "request_irq err\n");
- return ret;
- }
-
- /* enable USB phy interrupt */
- __raw_writew(USB_PHY_MODE | USB_PHY_INT_EN, priv->usbphyaddr);
-
- return 0;
-}
-
-static int usbhs1_hardware_exit(struct platform_device *pdev)
-{
- struct usbhs_private *priv = usbhs_get_priv(pdev);
-
- /* clear interrupt status */
- __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
-
- free_irq(IRQ8, pdev);
-
- return 0;
-}
-
-static int usbhs1_get_id(struct platform_device *pdev)
-{
- return USBHS_HOST;
-}
-
-static u32 usbhs1_pipe_cfg[] = {
- USB_ENDPOINT_XFER_CONTROL,
- USB_ENDPOINT_XFER_ISOC,
- USB_ENDPOINT_XFER_ISOC,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_INT,
- USB_ENDPOINT_XFER_INT,
- USB_ENDPOINT_XFER_INT,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
- USB_ENDPOINT_XFER_BULK,
-};
-
-static struct usbhs_private usbhs1_private = {
- .usbphyaddr = IOMEM(0xe60581e2), /* USBPHY1INTAP */
- .usbcrcaddr = IOMEM(0xe6058130), /* USBCR4 */
- .info = {
- .platform_callback = {
- .hardware_init = usbhs1_hardware_init,
- .hardware_exit = usbhs1_hardware_exit,
- .get_id = usbhs1_get_id,
- .phy_reset = usbhs_phy_reset,
- },
- .driver_param = {
- .buswait_bwait = 4,
- .has_otg = 1,
- .pipe_type = usbhs1_pipe_cfg,
- .pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg),
- .d0_tx_id = SHDMA_SLAVE_USB1_TX,
- .d1_rx_id = SHDMA_SLAVE_USB1_RX,
- },
- },
-};
-
-static struct resource usbhs1_resources[] = {
- [0] = {
- .name = "USBHS1",
- .start = 0xe68b0000,
- .end = 0xe68b00e6 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = evt2irq(0x1ce0) /* USB1_USB1I0 */,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device usbhs1_device = {
- .name = "renesas_usbhs",
- .id = 1,
- .dev = {
- .platform_data = &usbhs1_private.info,
- .dma_mask = &usbhs1_device.dev.coherent_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .num_resources = ARRAY_SIZE(usbhs1_resources),
- .resource = usbhs1_resources,
-};
-
-/* LED */
-static struct gpio_led mackerel_leds[] = {
- {
- .name = "led0",
- .gpio = 0,
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- },
- {
- .name = "led1",
- .gpio = 1,
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- },
- {
- .name = "led2",
- .gpio = 2,
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- },
- {
- .name = "led3",
- .gpio = 159,
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- }
-};
-
-static struct gpio_led_platform_data mackerel_leds_pdata = {
- .leds = mackerel_leds,
- .num_leds = ARRAY_SIZE(mackerel_leds),
-};
-
-static struct platform_device leds_device = {
- .name = "leds-gpio",
- .id = 0,
- .dev = {
- .platform_data = &mackerel_leds_pdata,
- },
-};
-
-/* FSI */
-#define IRQ_FSI evt2irq(0x1840)
-static struct sh_fsi_platform_info fsi_info = {
- .port_a = {
- .tx_id = SHDMA_SLAVE_FSIA_TX,
- .rx_id = SHDMA_SLAVE_FSIA_RX,
- },
- .port_b = {
- .flags = SH_FSI_CLK_CPG |
- SH_FSI_FMT_SPDIF,
- }
-};
-
-static struct resource fsi_resources[] = {
- [0] = {
- /* we need 0xFE1F0000 to access DMA
- * instead of 0xFE3C0000 */
- .name = "FSI",
- .start = 0xFE1F0000,
- .end = 0xFE1F0400 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_FSI,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device fsi_device = {
- .name = "sh_fsi2",
- .id = -1,
- .num_resources = ARRAY_SIZE(fsi_resources),
- .resource = fsi_resources,
- .dev = {
- .platform_data = &fsi_info,
- },
-};
-
-static struct asoc_simple_card_info fsi2_ak4643_info = {
- .name = "AK4643",
- .card = "FSI2A-AK4643",
- .codec = "ak4642-codec.0-0013",
- .platform = "sh_fsi2",
- .daifmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM,
- .cpu_dai = {
- .name = "fsia-dai",
- },
- .codec_dai = {
- .name = "ak4642-hifi",
- .sysclk = 11289600,
- },
-};
-
-static struct platform_device fsi_ak4643_device = {
- .name = "asoc-simple-card",
- .dev = {
- .platform_data = &fsi2_ak4643_info,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &fsi_ak4643_device.dev.coherent_dma_mask,
- },
-};
-
-/* FLCTL */
-static struct mtd_partition nand_partition_info[] = {
- {
- .name = "system",
- .offset = 0,
- .size = 128 * 1024 * 1024,
- },
- {
- .name = "userdata",
- .offset = MTDPART_OFS_APPEND,
- .size = 256 * 1024 * 1024,
- },
- {
- .name = "cache",
- .offset = MTDPART_OFS_APPEND,
- .size = 128 * 1024 * 1024,
- },
-};
-
-static struct resource nand_flash_resources[] = {
- [0] = {
- .start = 0xe6a30000,
- .end = 0xe6a3009b,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = evt2irq(0x0d80), /* flstei: status error irq */
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct sh_flctl_platform_data nand_flash_data = {
- .parts = nand_partition_info,
- .nr_parts = ARRAY_SIZE(nand_partition_info),
- .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET
- | SHBUSSEL | SEL_16BIT | SNAND_E,
- .use_holden = 1,
-};
-
-static struct platform_device nand_flash_device = {
- .name = "sh_flctl",
- .resource = nand_flash_resources,
- .num_resources = ARRAY_SIZE(nand_flash_resources),
- .dev = {
- .platform_data = &nand_flash_data,
- },
-};
-
-/* SDHI0 */
-static struct sh_mobile_sdhi_info sdhi0_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
- .tmio_flags = TMIO_MMC_USE_GPIO_CD,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
- .cd_gpio = 172,
-};
-
-static struct resource sdhi0_resources[] = {
- {
- .name = "SDHI0",
- .start = 0xe6850000,
- .end = 0xe68500ff,
- .flags = IORESOURCE_MEM,
- }, {
- .name = SH_MOBILE_SDHI_IRQ_SDCARD,
- .start = evt2irq(0x0e20) /* SDHI0_SDHI0I1 */,
- .flags = IORESOURCE_IRQ,
- }, {
- .name = SH_MOBILE_SDHI_IRQ_SDIO,
- .start = evt2irq(0x0e40) /* SDHI0_SDHI0I2 */,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device sdhi0_device = {
- .name = "sh_mobile_sdhi",
- .num_resources = ARRAY_SIZE(sdhi0_resources),
- .resource = sdhi0_resources,
- .id = 0,
- .dev = {
- .platform_data = &sdhi0_info,
- },
-};
-
-#if !IS_ENABLED(CONFIG_MMC_SH_MMCIF)
-/* SDHI1 */
-
-/* GPIO 41 can trigger IRQ8, but it is used by USBHS1, we have to poll */
-static struct sh_mobile_sdhi_info sdhi1_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
- .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_USE_GPIO_CD,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
- MMC_CAP_NEEDS_POLL,
- .cd_gpio = 41,
-};
-
-static struct resource sdhi1_resources[] = {
- {
- .name = "SDHI1",
- .start = 0xe6860000,
- .end = 0xe68600ff,
- .flags = IORESOURCE_MEM,
- }, {
- .name = SH_MOBILE_SDHI_IRQ_SDCARD,
- .start = evt2irq(0x0ea0), /* SDHI1_SDHI1I1 */
- .flags = IORESOURCE_IRQ,
- }, {
- .name = SH_MOBILE_SDHI_IRQ_SDIO,
- .start = evt2irq(0x0ec0), /* SDHI1_SDHI1I2 */
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device sdhi1_device = {
- .name = "sh_mobile_sdhi",
- .num_resources = ARRAY_SIZE(sdhi1_resources),
- .resource = sdhi1_resources,
- .id = 1,
- .dev = {
- .platform_data = &sdhi1_info,
- },
-};
-#endif
-
-/* SDHI2 */
-
-/*
- * The card detect pin of the top SD/MMC slot (CN23) is active low and is
- * connected to GPIO SCIFB_SCK of SH7372 (GPIO 162).
- */
-static struct sh_mobile_sdhi_info sdhi2_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI2_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI2_RX,
- .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_USE_GPIO_CD,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
- MMC_CAP_NEEDS_POLL,
- .cd_gpio = 162,
-};
-
-static struct resource sdhi2_resources[] = {
- {
- .name = "SDHI2",
- .start = 0xe6870000,
- .end = 0xe68700ff,
- .flags = IORESOURCE_MEM,
- }, {
- .name = SH_MOBILE_SDHI_IRQ_SDCARD,
- .start = evt2irq(0x1220), /* SDHI2_SDHI2I1 */
- .flags = IORESOURCE_IRQ,
- }, {
- .name = SH_MOBILE_SDHI_IRQ_SDIO,
- .start = evt2irq(0x1240), /* SDHI2_SDHI2I2 */
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device sdhi2_device = {
- .name = "sh_mobile_sdhi",
- .num_resources = ARRAY_SIZE(sdhi2_resources),
- .resource = sdhi2_resources,
- .id = 2,
- .dev = {
- .platform_data = &sdhi2_info,
- },
-};
-
-/* SH_MMCIF */
-#if IS_ENABLED(CONFIG_MMC_SH_MMCIF)
-static struct resource sh_mmcif_resources[] = {
- [0] = {
- .name = "MMCIF",
- .start = 0xE6BD0000,
- .end = 0xE6BD00FF,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- /* MMC ERR */
- .start = evt2irq(0x1ac0),
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* MMC NOR */
- .start = evt2irq(0x1ae0),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct sh_mmcif_plat_data sh_mmcif_plat = {
- .sup_pclk = 0,
- .caps = MMC_CAP_4_BIT_DATA |
- MMC_CAP_8_BIT_DATA |
- MMC_CAP_NEEDS_POLL,
- .use_cd_gpio = true,
- /* card detect pin for SD/MMC slot (CN7) */
- .cd_gpio = 41,
- .slave_id_tx = SHDMA_SLAVE_MMCIF_TX,
- .slave_id_rx = SHDMA_SLAVE_MMCIF_RX,
-};
-
-static struct platform_device sh_mmcif_device = {
- .name = "sh_mmcif",
- .id = 0,
- .dev = {
- .dma_mask = NULL,
- .coherent_dma_mask = 0xffffffff,
- .platform_data = &sh_mmcif_plat,
- },
- .num_resources = ARRAY_SIZE(sh_mmcif_resources),
- .resource = sh_mmcif_resources,
-};
-#endif
-
-static int mackerel_camera_add(struct soc_camera_device *icd);
-static void mackerel_camera_del(struct soc_camera_device *icd);
-
-static int camera_set_capture(struct soc_camera_platform_info *info,
- int enable)
-{
- return 0; /* camera sensor always enabled */
-}
-
-static struct soc_camera_platform_info camera_info = {
- .format_name = "UYVY",
- .format_depth = 16,
- .format = {
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- .field = V4L2_FIELD_NONE,
- .width = 640,
- .height = 480,
- },
- .mbus_param = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
- V4L2_MBUS_DATA_ACTIVE_HIGH,
- .mbus_type = V4L2_MBUS_PARALLEL,
- .set_capture = camera_set_capture,
-};
-
-static struct soc_camera_link camera_link = {
- .bus_id = 0,
- .add_device = mackerel_camera_add,
- .del_device = mackerel_camera_del,
- .module_name = "soc_camera_platform",
- .priv = &camera_info,
-};
-
-static struct platform_device *camera_device;
-
-static void mackerel_camera_release(struct device *dev)
-{
- soc_camera_platform_release(&camera_device);
-}
-
-static int mackerel_camera_add(struct soc_camera_device *icd)
-{
- return soc_camera_platform_add(icd, &camera_device, &camera_link,
- mackerel_camera_release, 0);
-}
-
-static void mackerel_camera_del(struct soc_camera_device *icd)
-{
- soc_camera_platform_del(icd, camera_device, &camera_link);
-}
-
-static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
- .flags = SH_CEU_FLAG_USE_8BIT_BUS,
- .max_width = 8188,
- .max_height = 8188,
-};
-
-static struct resource ceu_resources[] = {
- [0] = {
- .name = "CEU",
- .start = 0xfe910000,
- .end = 0xfe91009f,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = intcs_evt2irq(0x880),
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* place holder for contiguous memory */
- },
-};
-
-static struct platform_device ceu_device = {
- .name = "sh_mobile_ceu",
- .id = 0, /* "ceu0" clock */
- .num_resources = ARRAY_SIZE(ceu_resources),
- .resource = ceu_resources,
- .dev = {
- .platform_data = &sh_mobile_ceu_info,
- .coherent_dma_mask = 0xffffffff,
- },
-};
-
-static struct platform_device mackerel_camera = {
- .name = "soc-camera-pdrv",
- .id = 0,
- .dev = {
- .platform_data = &camera_link,
- },
-};
-
-static struct platform_device *mackerel_devices[] __initdata = {
- &nor_flash_device,
- &smc911x_device,
- &lcdc_device,
- &gpio_backlight_device,
- &usbhs0_device,
- &usbhs1_device,
- &leds_device,
- &fsi_device,
- &fsi_ak4643_device,
- &fsi_hdmi_device,
- &nand_flash_device,
- &sdhi0_device,
-#if !IS_ENABLED(CONFIG_MMC_SH_MMCIF)
- &sdhi1_device,
-#else
- &sh_mmcif_device,
-#endif
- &sdhi2_device,
- &ceu_device,
- &mackerel_camera,
- &hdmi_device,
- &hdmi_lcdc_device,
- &meram_device,
-};
-
-/* Keypad Initialization */
-#define KEYPAD_BUTTON(ev_type, ev_code, act_low) \
-{ \
- .type = ev_type, \
- .code = ev_code, \
- .active_low = act_low, \
-}
-
-#define KEYPAD_BUTTON_LOW(event_code) KEYPAD_BUTTON(EV_KEY, event_code, 1)
-
-static struct tca6416_button mackerel_gpio_keys[] = {
- KEYPAD_BUTTON_LOW(KEY_HOME),
- KEYPAD_BUTTON_LOW(KEY_MENU),
- KEYPAD_BUTTON_LOW(KEY_BACK),
- KEYPAD_BUTTON_LOW(KEY_POWER),
-};
-
-static struct tca6416_keys_platform_data mackerel_tca6416_keys_info = {
- .buttons = mackerel_gpio_keys,
- .nbuttons = ARRAY_SIZE(mackerel_gpio_keys),
- .rep = 1,
- .use_polling = 0,
- .pinmask = 0x000F,
-};
-
-/* I2C */
-#define IRQ7 evt2irq(0x02e0)
-#define IRQ9 evt2irq(0x0320)
-
-static struct i2c_board_info i2c0_devices[] = {
- {
- I2C_BOARD_INFO("ak4643", 0x13),
- },
- /* Keypad */
- {
- I2C_BOARD_INFO("tca6408-keys", 0x20),
- .platform_data = &mackerel_tca6416_keys_info,
- .irq = IRQ9,
- },
- /* Touchscreen */
- {
- I2C_BOARD_INFO("st1232-ts", 0x55),
- .irq = IRQ7,
- },
-};
-
-#define IRQ21 evt2irq(0x32a0)
-
-static struct i2c_board_info i2c1_devices[] = {
- /* Accelerometer */
- {
- I2C_BOARD_INFO("adxl34x", 0x53),
- .irq = IRQ21,
- },
-};
-
-static unsigned long pin_pulldown_conf[] = {
- PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_DOWN, 0),
-};
-
-static const struct pinctrl_map mackerel_pinctrl_map[] = {
- /* ADXL34X */
- PIN_MAP_MUX_GROUP_DEFAULT("1-0053", "pfc-sh7372",
- "intc_irq21", "intc"),
- /* CEU */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-sh7372",
- "ceu_data_0_7", "ceu"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-sh7372",
- "ceu_clk_0", "ceu"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-sh7372",
- "ceu_sync", "ceu"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-sh7372",
- "ceu_field", "ceu"),
- /* FLCTL */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_flctl.0", "pfc-sh7372",
- "flctl_data", "flctl"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_flctl.0", "pfc-sh7372",
- "flctl_ce0", "flctl"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_flctl.0", "pfc-sh7372",
- "flctl_ctrl", "flctl"),
- /* FSIA (AK4643) */
- PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-sh7372",
- "fsia_sclk_in", "fsia"),
- PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-sh7372",
- "fsia_data_in", "fsia"),
- PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-sh7372",
- "fsia_data_out", "fsia"),
- /* FSIB (HDMI) */
- PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-sh7372",
- "fsib_mclk_in", "fsib"),
- /* HDMI */
- PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-sh7372",
- "hdmi", "hdmi"),
- /* LCDC */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-sh7372",
- "lcd_data24", "lcd"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-sh7372",
- "lcd_sync", "lcd"),
- /* SCIFA0 */
- PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.0", "pfc-sh7372",
- "scifa0_data", "scifa0"),
- /* SCIFA2 (GT-720F GPS module) */
- PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.2", "pfc-sh7372",
- "scifa2_data", "scifa2"),
- /* SDHI0 */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh7372",
- "sdhi0_data4", "sdhi0"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh7372",
- "sdhi0_ctrl", "sdhi0"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh7372",
- "sdhi0_wp", "sdhi0"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh7372",
- "intc_irq26_1", "intc"),
- /* SDHI1 */
-#if !IS_ENABLED(CONFIG_MMC_SH_MMCIF)
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh7372",
- "sdhi1_data4", "sdhi1"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-sh7372",
- "sdhi1_ctrl", "sdhi1"),
-#else
- /* MMCIF */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh7372",
- "mmc0_data8_0", "mmc0"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh7372",
- "mmc0_ctrl_0", "mmc0"),
-#endif
- /* SDHI2 */
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-sh7372",
- "sdhi2_data4", "sdhi2"),
- PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-sh7372",
- "sdhi2_ctrl", "sdhi2"),
- /* SMSC911X */
- PIN_MAP_MUX_GROUP_DEFAULT("smsc911x", "pfc-sh7372",
- "bsc_cs5a", "bsc"),
- PIN_MAP_MUX_GROUP_DEFAULT("smsc911x", "pfc-sh7372",
- "intc_irq6_0", "intc"),
- /* ST1232 */
- PIN_MAP_MUX_GROUP_DEFAULT("0-0055", "pfc-sh7372",
- "intc_irq7_0", "intc"),
- /* TCA6416 */
- PIN_MAP_MUX_GROUP_DEFAULT("0-0020", "pfc-sh7372",
- "intc_irq9_0", "intc"),
- /* USBHS0 */
- PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs.0", "pfc-sh7372",
- "usb0_vbus", "usb0"),
- PIN_MAP_CONFIGS_GROUP_DEFAULT("renesas_usbhs.0", "pfc-sh7372",
- "usb0_vbus", pin_pulldown_conf),
- /* USBHS1 */
- PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs.1", "pfc-sh7372",
- "usb1_vbus", "usb1"),
- PIN_MAP_CONFIGS_GROUP_DEFAULT("renesas_usbhs.1", "pfc-sh7372",
- "usb1_vbus", pin_pulldown_conf),
- PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs.1", "pfc-sh7372",
- "usb1_otg_id_0", "usb1"),
-};
-
-#define GPIO_PORT9CR IOMEM(0xE6051009)
-#define GPIO_PORT10CR IOMEM(0xE605100A)
-#define SRCR4 IOMEM(0xe61580bc)
-#define USCCR1 IOMEM(0xE6058144)
-static void __init mackerel_init(void)
-{
- static struct pm_domain_device domain_devices[] __initdata = {
- { "A4LC", &lcdc_device, },
- { "A4LC", &hdmi_lcdc_device, },
- { "A4LC", &meram_device, },
- { "A4MP", &fsi_device, },
- { "A3SP", &usbhs0_device, },
- { "A3SP", &usbhs1_device, },
- { "A3SP", &nand_flash_device, },
- { "A3SP", &sdhi0_device, },
-#if !IS_ENABLED(CONFIG_MMC_SH_MMCIF)
- { "A3SP", &sdhi1_device, },
-#else
- { "A3SP", &sh_mmcif_device, },
-#endif
- { "A3SP", &sdhi2_device, },
- { "A4R", &ceu_device, },
- };
- u32 srcr4;
- struct clk *clk;
-
- regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers,
- ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
- regulator_register_always_on(1, "fixed-3.3V", fixed3v3_power_consumers,
- ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
- regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies));
-
- /* External clock source */
- clk_set_rate(&sh7372_dv_clki_clk, 27000000);
-
- pinctrl_register_mappings(mackerel_pinctrl_map,
- ARRAY_SIZE(mackerel_pinctrl_map));
- sh7372_pinmux_init();
-
- gpio_request_one(151, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */
-
- /* FSI2 port A (ak4643) */
- gpio_request_one(161, GPIOF_OUT_INIT_LOW, NULL); /* slave */
-
- gpio_request(9, NULL);
- gpio_request(10, NULL);
- gpio_direction_none(GPIO_PORT9CR); /* FSIAOBT needs no direction */
- gpio_direction_none(GPIO_PORT10CR); /* FSIAOLR needs no direction */
-
- intc_set_priority(IRQ_FSI, 3); /* irq priority FSI(3) > SMSC911X(2) */
-
- /* FSI2 port B (HDMI) */
- __raw_writew(__raw_readw(USCCR1) & ~(1 << 6), USCCR1); /* use SPDIF */
-
- /* set SPU2 clock to 119.6 MHz */
- clk = clk_get(NULL, "spu_clk");
- if (!IS_ERR(clk)) {
- clk_set_rate(clk, clk_round_rate(clk, 119600000));
- clk_put(clk);
- }
-
- /* Keypad */
- irq_set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH);
-
- /* Touchscreen */
- irq_set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW);
-
- /* Accelerometer */
- irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH);
-
- /* Reset HDMI, must be held at least one EXTALR (32768Hz) period */
- srcr4 = __raw_readl(SRCR4);
- __raw_writel(srcr4 | (1 << 13), SRCR4);
- udelay(50);
- __raw_writel(srcr4 & ~(1 << 13), SRCR4);
-
- i2c_register_board_info(0, i2c0_devices,
- ARRAY_SIZE(i2c0_devices));
- i2c_register_board_info(1, i2c1_devices,
- ARRAY_SIZE(i2c1_devices));
-
- sh7372_add_standard_devices();
-
- platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
-
- rmobile_add_devices_to_domains(domain_devices,
- ARRAY_SIZE(domain_devices));
-
- hdmi_init_pm_clock();
- sh7372_pm_init();
- pm_clk_add(&fsi_device.dev, "spu2");
- pm_clk_add(&hdmi_lcdc_device.dev, "hdmi");
-}
-
-static const char *mackerel_boards_compat_dt[] __initdata = {
- "renesas,mackerel",
- NULL,
-};
-
-DT_MACHINE_START(MACKEREL_DT, "mackerel")
- .map_io = sh7372_map_io,
- .init_early = sh7372_add_early_devices,
- .init_irq = sh7372_init_irq,
- .handle_irq = shmobile_handle_irq_intc,
- .init_machine = mackerel_init,
- .init_late = sh7372_pm_init_late,
- .init_time = sh7372_earlytimer_init,
- .dt_compat = mackerel_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 598f704f76ae..51db288f192a 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -122,11 +122,11 @@ static struct resource sdhi0_resources[] = {
},
};
-static struct sh_mobile_sdhi_info sdhi0_platform_data = {
- .dma_slave_tx = HPBDMA_SLAVE_SDHI0_TX,
- .dma_slave_rx = HPBDMA_SLAVE_SDHI0_RX,
- .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED,
+static struct tmio_mmc_data sdhi0_platform_data = {
+ .chan_priv_tx = (void *)HPBDMA_SLAVE_SDHI0_TX,
+ .chan_priv_rx = (void *)HPBDMA_SLAVE_SDHI0_RX,
+ .flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
+ .capabilities = MMC_CAP_SD_HIGHSPEED,
};
static struct platform_device sdhi0_device = {
diff --git a/arch/arm/mach-shmobile/clock-r8a73a4.c b/arch/arm/mach-shmobile/clock-r8a73a4.c
deleted file mode 100644
index 1cf44dc6d718..000000000000
--- a/arch/arm/mach-shmobile/clock-r8a73a4.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/*
- * r8a73a4 clock framework support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/sh_clk.h>
-#include <linux/clkdev.h>
-#include "common.h"
-#include "clock.h"
-
-#define CPG_BASE 0xe6150000
-#define CPG_LEN 0x270
-
-#define SMSTPCR2 0xe6150138
-#define SMSTPCR3 0xe615013c
-#define SMSTPCR4 0xe6150140
-#define SMSTPCR5 0xe6150144
-
-#define FRQCRA 0xE6150000
-#define FRQCRB 0xE6150004
-#define FRQCRC 0xE61500E0
-#define VCLKCR1 0xE6150008
-#define VCLKCR2 0xE615000C
-#define VCLKCR3 0xE615001C
-#define VCLKCR4 0xE6150014
-#define VCLKCR5 0xE6150034
-#define ZBCKCR 0xE6150010
-#define SD0CKCR 0xE6150074
-#define SD1CKCR 0xE6150078
-#define SD2CKCR 0xE615007C
-#define MMC0CKCR 0xE6150240
-#define MMC1CKCR 0xE6150244
-#define FSIACKCR 0xE6150018
-#define FSIBCKCR 0xE6150090
-#define MPCKCR 0xe6150080
-#define SPUVCKCR 0xE6150094
-#define HSICKCR 0xE615026C
-#define M4CKCR 0xE6150098
-#define PLLECR 0xE61500D0
-#define PLL0CR 0xE61500D8
-#define PLL1CR 0xE6150028
-#define PLL2CR 0xE615002C
-#define PLL2SCR 0xE61501F4
-#define PLL2HCR 0xE61501E4
-#define CKSCR 0xE61500C0
-
-#define CPG_MAP(o) ((o - CPG_BASE) + cpg_mapping.base)
-
-static struct clk_mapping cpg_mapping = {
- .phys = CPG_BASE,
- .len = CPG_LEN,
-};
-
-static struct clk extalr_clk = {
- .rate = 32768,
- .mapping = &cpg_mapping,
-};
-
-static struct clk extal1_clk = {
- .rate = 26000000,
- .mapping = &cpg_mapping,
-};
-
-static struct clk extal2_clk = {
- .rate = 48000000,
- .mapping = &cpg_mapping,
-};
-
-static struct sh_clk_ops followparent_clk_ops = {
- .recalc = followparent_recalc,
-};
-
-static struct clk main_clk = {
- /* .parent will be set r8a73a4_clock_init */
- .ops = &followparent_clk_ops,
-};
-
-SH_CLK_RATIO(div2, 1, 2);
-SH_CLK_RATIO(div4, 1, 4);
-
-SH_FIXED_RATIO_CLK(main_div2_clk, main_clk, div2);
-SH_FIXED_RATIO_CLK(extal1_div2_clk, extal1_clk, div2);
-SH_FIXED_RATIO_CLK(extal2_div2_clk, extal2_clk, div2);
-SH_FIXED_RATIO_CLK(extal2_div4_clk, extal2_clk, div4);
-
-/* External FSIACK/FSIBCK clock */
-static struct clk fsiack_clk = {
-};
-
-static struct clk fsibck_clk = {
-};
-
-/*
- * PLL clocks
- */
-static struct clk *pll_parent_main[] = {
- [0] = &main_clk,
- [1] = &main_div2_clk
-};
-
-static struct clk *pll_parent_main_extal[8] = {
- [0] = &main_div2_clk,
- [1] = &extal2_div2_clk,
- [3] = &extal2_div4_clk,
- [4] = &main_clk,
- [5] = &extal2_clk,
-};
-
-static unsigned long pll_recalc(struct clk *clk)
-{
- unsigned long mult = 1;
-
- if (ioread32(CPG_MAP(PLLECR)) & (1 << clk->enable_bit))
- mult = (((ioread32(clk->mapped_reg) >> 24) & 0x7f) + 1);
-
- return clk->parent->rate * mult;
-}
-
-static int pll_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 val;
- int i, ret;
-
- if (!clk->parent_table || !clk->parent_num)
- return -EINVAL;
-
- /* Search the parent */
- for (i = 0; i < clk->parent_num; i++)
- if (clk->parent_table[i] == parent)
- break;
-
- if (i == clk->parent_num)
- return -ENODEV;
-
- ret = clk_reparent(clk, parent);
- if (ret < 0)
- return ret;
-
- val = ioread32(clk->mapped_reg) &
- ~(((1 << clk->src_width) - 1) << clk->src_shift);
-
- iowrite32(val | i << clk->src_shift, clk->mapped_reg);
-
- return 0;
-}
-
-static struct sh_clk_ops pll_clk_ops = {
- .recalc = pll_recalc,
- .set_parent = pll_set_parent,
-};
-
-#define PLL_CLOCK(name, p, pt, w, s, reg, e) \
- static struct clk name = { \
- .ops = &pll_clk_ops, \
- .flags = CLK_ENABLE_ON_INIT, \
- .parent = p, \
- .parent_table = pt, \
- .parent_num = ARRAY_SIZE(pt), \
- .src_width = w, \
- .src_shift = s, \
- .enable_reg = (void __iomem *)reg, \
- .enable_bit = e, \
- .mapping = &cpg_mapping, \
- }
-
-PLL_CLOCK(pll0_clk, &main_clk, pll_parent_main, 1, 20, PLL0CR, 0);
-PLL_CLOCK(pll1_clk, &main_clk, pll_parent_main, 1, 7, PLL1CR, 1);
-PLL_CLOCK(pll2_clk, &main_div2_clk, pll_parent_main_extal, 3, 5, PLL2CR, 2);
-PLL_CLOCK(pll2s_clk, &main_div2_clk, pll_parent_main_extal, 3, 5, PLL2SCR, 4);
-PLL_CLOCK(pll2h_clk, &main_div2_clk, pll_parent_main_extal, 3, 5, PLL2HCR, 5);
-
-SH_FIXED_RATIO_CLK(pll1_div2_clk, pll1_clk, div2);
-
-static atomic_t frqcr_lock;
-
-/* Several clocks need to access FRQCRB, have to lock */
-static bool frqcr_kick_check(struct clk *clk)
-{
- return !(ioread32(CPG_MAP(FRQCRB)) & BIT(31));
-}
-
-static int frqcr_kick_do(struct clk *clk)
-{
- int i;
-
- /* set KICK bit in FRQCRB to update hardware setting, check success */
- iowrite32(ioread32(CPG_MAP(FRQCRB)) | BIT(31), CPG_MAP(FRQCRB));
- for (i = 1000; i; i--)
- if (ioread32(CPG_MAP(FRQCRB)) & BIT(31))
- cpu_relax();
- else
- return 0;
-
- return -ETIMEDOUT;
-}
-
-static int zclk_set_rate(struct clk *clk, unsigned long rate)
-{
- void __iomem *frqcrc;
- int ret;
- unsigned long step, p_rate;
- u32 val;
-
- if (!clk->parent || !__clk_get(clk->parent))
- return -ENODEV;
-
- if (!atomic_inc_and_test(&frqcr_lock) || !frqcr_kick_check(clk)) {
- ret = -EBUSY;
- goto done;
- }
-
- /*
- * Users are supposed to first call clk_set_rate() only with
- * clk_round_rate() results. So, we don't fix wrong rates here, but
- * guard against them anyway
- */
-
- p_rate = clk_get_rate(clk->parent);
- if (rate == p_rate) {
- val = 0;
- } else {
- step = DIV_ROUND_CLOSEST(p_rate, 32);
-
- if (rate > p_rate || rate < step) {
- ret = -EINVAL;
- goto done;
- }
-
- val = 32 - rate / step;
- }
-
- frqcrc = clk->mapped_reg + (FRQCRC - (u32)clk->enable_reg);
-
- iowrite32((ioread32(frqcrc) & ~(clk->div_mask << clk->enable_bit)) |
- (val << clk->enable_bit), frqcrc);
-
- ret = frqcr_kick_do(clk);
-
-done:
- atomic_dec(&frqcr_lock);
- __clk_put(clk->parent);
- return ret;
-}
-
-static long zclk_round_rate(struct clk *clk, unsigned long rate)
-{
- /*
- * theoretical rate = parent rate * multiplier / 32,
- * where 1 <= multiplier <= 32. Therefore we should do
- * multiplier = rate * 32 / parent rate
- * rounded rate = parent rate * multiplier / 32.
- * However, multiplication before division won't fit in 32 bits, so
- * we sacrifice some precision by first dividing and then multiplying.
- * To find the nearest divisor we calculate both and pick up the best
- * one. This avoids 64-bit arithmetics.
- */
- unsigned long step, mul_min, mul_max, rate_min, rate_max;
-
- rate_max = clk_get_rate(clk->parent);
-
- /* output freq <= parent */
- if (rate >= rate_max)
- return rate_max;
-
- step = DIV_ROUND_CLOSEST(rate_max, 32);
- /* output freq >= parent / 32 */
- if (step >= rate)
- return step;
-
- mul_min = rate / step;
- mul_max = DIV_ROUND_UP(rate, step);
- rate_min = step * mul_min;
- if (mul_max == mul_min)
- return rate_min;
-
- rate_max = step * mul_max;
-
- if (rate_max - rate < rate - rate_min)
- return rate_max;
-
- return rate_min;
-}
-
-static unsigned long zclk_recalc(struct clk *clk)
-{
- void __iomem *frqcrc = FRQCRC - (u32)clk->enable_reg + clk->mapped_reg;
- unsigned int max = clk->div_mask + 1;
- unsigned long val = ((ioread32(frqcrc) >> clk->enable_bit) &
- clk->div_mask);
-
- return DIV_ROUND_CLOSEST(clk_get_rate(clk->parent), max) *
- (max - val);
-}
-
-static struct sh_clk_ops zclk_ops = {
- .recalc = zclk_recalc,
- .set_rate = zclk_set_rate,
- .round_rate = zclk_round_rate,
-};
-
-static struct clk z_clk = {
- .parent = &pll0_clk,
- .div_mask = 0x1f,
- .enable_bit = 8,
- /* We'll need to access FRQCRB and FRQCRC */
- .enable_reg = (void __iomem *)FRQCRB,
- .ops = &zclk_ops,
-};
-
-/*
- * It seems only 1/2 divider is usable in manual mode. 1/2 / 2/3
- * switching is only available in auto-DVFS mode
- */
-SH_FIXED_RATIO_CLK(pll0_div2_clk, pll0_clk, div2);
-
-static struct clk z2_clk = {
- .parent = &pll0_div2_clk,
- .div_mask = 0x1f,
- .enable_bit = 0,
- /* We'll need to access FRQCRB and FRQCRC */
- .enable_reg = (void __iomem *)FRQCRB,
- .ops = &zclk_ops,
-};
-
-static struct clk *main_clks[] = {
- &extalr_clk,
- &extal1_clk,
- &extal1_div2_clk,
- &extal2_clk,
- &extal2_div2_clk,
- &extal2_div4_clk,
- &main_clk,
- &main_div2_clk,
- &fsiack_clk,
- &fsibck_clk,
- &pll0_clk,
- &pll1_clk,
- &pll1_div2_clk,
- &pll2_clk,
- &pll2s_clk,
- &pll2h_clk,
- &z_clk,
- &pll0_div2_clk,
- &z2_clk,
-};
-
-/* DIV4 */
-static void div4_kick(struct clk *clk)
-{
- if (!WARN(!atomic_inc_and_test(&frqcr_lock), "FRQCR* lock broken!\n"))
- frqcr_kick_do(clk);
- atomic_dec(&frqcr_lock);
-}
-
-static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18, 24, 0, 36, 48, 10};
-
-static struct clk_div_mult_table div4_div_mult_table = {
- .divisors = divisors,
- .nr_divisors = ARRAY_SIZE(divisors),
-};
-
-static struct clk_div4_table div4_table = {
- .div_mult_table = &div4_div_mult_table,
- .kick = div4_kick,
-};
-
-enum {
- DIV4_I, DIV4_M3, DIV4_B, DIV4_M1, DIV4_M2,
- DIV4_ZX, DIV4_ZS, DIV4_HP,
- DIV4_NR };
-
-static struct clk div4_clks[DIV4_NR] = {
- [DIV4_I] = SH_CLK_DIV4(&pll1_clk, FRQCRA, 20, 0x0dff, CLK_ENABLE_ON_INIT),
- [DIV4_M3] = SH_CLK_DIV4(&pll1_clk, FRQCRA, 12, 0x1dff, CLK_ENABLE_ON_INIT),
- [DIV4_B] = SH_CLK_DIV4(&pll1_clk, FRQCRA, 8, 0x0dff, CLK_ENABLE_ON_INIT),
- [DIV4_M1] = SH_CLK_DIV4(&pll1_clk, FRQCRA, 4, 0x1dff, 0),
- [DIV4_M2] = SH_CLK_DIV4(&pll1_clk, FRQCRA, 0, 0x1dff, 0),
- [DIV4_ZX] = SH_CLK_DIV4(&pll1_clk, FRQCRB, 12, 0x0dff, 0),
- [DIV4_ZS] = SH_CLK_DIV4(&pll1_clk, FRQCRB, 8, 0x0dff, 0),
- [DIV4_HP] = SH_CLK_DIV4(&pll1_clk, FRQCRB, 4, 0x0dff, 0),
-};
-
-enum {
- DIV6_ZB,
- DIV6_SDHI0, DIV6_SDHI1, DIV6_SDHI2,
- DIV6_MMC0, DIV6_MMC1,
- DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_VCK4, DIV6_VCK5,
- DIV6_FSIA, DIV6_FSIB,
- DIV6_MP, DIV6_M4, DIV6_HSI, DIV6_SPUV,
- DIV6_NR };
-
-static struct clk *div6_parents[8] = {
- [0] = &pll1_div2_clk,
- [1] = &pll2s_clk,
- [3] = &extal2_clk,
- [4] = &main_div2_clk,
- [6] = &extalr_clk,
-};
-
-static struct clk *fsia_parents[4] = {
- [0] = &pll1_div2_clk,
- [1] = &pll2s_clk,
- [2] = &fsiack_clk,
-};
-
-static struct clk *fsib_parents[4] = {
- [0] = &pll1_div2_clk,
- [1] = &pll2s_clk,
- [2] = &fsibck_clk,
-};
-
-static struct clk *mp_parents[4] = {
- [0] = &pll1_div2_clk,
- [1] = &pll2s_clk,
- [2] = &extal2_clk,
- [3] = &extal2_clk,
-};
-
-static struct clk *m4_parents[2] = {
- [0] = &pll2s_clk,
-};
-
-static struct clk *hsi_parents[4] = {
- [0] = &pll2h_clk,
- [1] = &pll1_div2_clk,
- [3] = &pll2s_clk,
-};
-
-/*** FIXME ***
- * SH_CLK_DIV6_EXT() macro doesn't care .mapping
- * but, it is necessary on R-Car (= ioremap() base CPG)
- * The difference between
- * SH_CLK_DIV6_EXT() <--> SH_CLK_MAP_DIV6_EXT()
- * is only .mapping
- */
-#define SH_CLK_MAP_DIV6_EXT(_reg, _flags, _parents, \
- _num_parents, _src_shift, _src_width) \
-{ \
- .enable_reg = (void __iomem *)_reg, \
- .enable_bit = 0, /* unused */ \
- .flags = _flags | CLK_MASK_DIV_ON_DISABLE, \
- .div_mask = SH_CLK_DIV6_MSK, \
- .parent_table = _parents, \
- .parent_num = _num_parents, \
- .src_shift = _src_shift, \
- .src_width = _src_width, \
- .mapping = &cpg_mapping, \
-}
-
-static struct clk div6_clks[DIV6_NR] = {
- [DIV6_ZB] = SH_CLK_MAP_DIV6_EXT(ZBCKCR, CLK_ENABLE_ON_INIT,
- div6_parents, 2, 7, 1),
- [DIV6_SDHI0] = SH_CLK_MAP_DIV6_EXT(SD0CKCR, 0,
- div6_parents, 2, 6, 2),
- [DIV6_SDHI1] = SH_CLK_MAP_DIV6_EXT(SD1CKCR, 0,
- div6_parents, 2, 6, 2),
- [DIV6_SDHI2] = SH_CLK_MAP_DIV6_EXT(SD2CKCR, 0,
- div6_parents, 2, 6, 2),
- [DIV6_MMC0] = SH_CLK_MAP_DIV6_EXT(MMC0CKCR, 0,
- div6_parents, 2, 6, 2),
- [DIV6_MMC1] = SH_CLK_MAP_DIV6_EXT(MMC1CKCR, 0,
- div6_parents, 2, 6, 2),
- [DIV6_VCK1] = SH_CLK_MAP_DIV6_EXT(VCLKCR1, 0, /* didn't care bit[6-7] */
- div6_parents, ARRAY_SIZE(div6_parents), 12, 3),
- [DIV6_VCK2] = SH_CLK_MAP_DIV6_EXT(VCLKCR2, 0, /* didn't care bit[6-7] */
- div6_parents, ARRAY_SIZE(div6_parents), 12, 3),
- [DIV6_VCK3] = SH_CLK_MAP_DIV6_EXT(VCLKCR3, 0, /* didn't care bit[6-7] */
- div6_parents, ARRAY_SIZE(div6_parents), 12, 3),
- [DIV6_VCK4] = SH_CLK_MAP_DIV6_EXT(VCLKCR4, 0, /* didn't care bit[6-7] */
- div6_parents, ARRAY_SIZE(div6_parents), 12, 3),
- [DIV6_VCK5] = SH_CLK_MAP_DIV6_EXT(VCLKCR5, 0, /* didn't care bit[6-7] */
- div6_parents, ARRAY_SIZE(div6_parents), 12, 3),
- [DIV6_FSIA] = SH_CLK_MAP_DIV6_EXT(FSIACKCR, 0,
- fsia_parents, ARRAY_SIZE(fsia_parents), 6, 2),
- [DIV6_FSIB] = SH_CLK_MAP_DIV6_EXT(FSIBCKCR, 0,
- fsib_parents, ARRAY_SIZE(fsib_parents), 6, 2),
- [DIV6_MP] = SH_CLK_MAP_DIV6_EXT(MPCKCR, 0, /* it needs bit[9-11] control */
- mp_parents, ARRAY_SIZE(mp_parents), 6, 2),
- /* pll2s will be selected always for M4 */
- [DIV6_M4] = SH_CLK_MAP_DIV6_EXT(M4CKCR, 0, /* it needs bit[9] control */
- m4_parents, ARRAY_SIZE(m4_parents), 6, 1),
- [DIV6_HSI] = SH_CLK_MAP_DIV6_EXT(HSICKCR, 0, /* it needs bit[9] control */
- hsi_parents, ARRAY_SIZE(hsi_parents), 6, 2),
- [DIV6_SPUV] = SH_CLK_MAP_DIV6_EXT(SPUVCKCR, 0,
- mp_parents, ARRAY_SIZE(mp_parents), 6, 2),
-};
-
-/* MSTP */
-enum {
- MSTP218, MSTP217, MSTP216, MSTP207, MSTP206, MSTP204, MSTP203,
- MSTP329, MSTP323, MSTP318, MSTP317, MSTP316,
- MSTP315, MSTP314, MSTP313, MSTP312, MSTP305, MSTP300,
- MSTP411, MSTP410, MSTP409,
- MSTP522, MSTP515,
- MSTP_NR
-};
-
-static struct clk mstp_clks[MSTP_NR] = {
- [MSTP204] = SH_CLK_MSTP32(&div6_clks[DIV6_MP], SMSTPCR2, 4, 0), /* SCIFA0 */
- [MSTP203] = SH_CLK_MSTP32(&div6_clks[DIV6_MP], SMSTPCR2, 3, 0), /* SCIFA1 */
- [MSTP206] = SH_CLK_MSTP32(&div6_clks[DIV6_MP], SMSTPCR2, 6, 0), /* SCIFB0 */
- [MSTP207] = SH_CLK_MSTP32(&div6_clks[DIV6_MP], SMSTPCR2, 7, 0), /* SCIFB1 */
- [MSTP216] = SH_CLK_MSTP32(&div6_clks[DIV6_MP], SMSTPCR2, 16, 0), /* SCIFB2 */
- [MSTP217] = SH_CLK_MSTP32(&div6_clks[DIV6_MP], SMSTPCR2, 17, 0), /* SCIFB3 */
- [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC */
- [MSTP300] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 0, 0), /* IIC2 */
- [MSTP305] = SH_CLK_MSTP32(&div6_clks[DIV6_MMC1],SMSTPCR3, 5, 0), /* MMCIF1 */
- [MSTP312] = SH_CLK_MSTP32(&div6_clks[DIV6_SDHI2],SMSTPCR3, 12, 0), /* SDHI2 */
- [MSTP313] = SH_CLK_MSTP32(&div6_clks[DIV6_SDHI1],SMSTPCR3, 13, 0), /* SDHI1 */
- [MSTP314] = SH_CLK_MSTP32(&div6_clks[DIV6_SDHI0],SMSTPCR3, 14, 0), /* SDHI0 */
- [MSTP315] = SH_CLK_MSTP32(&div6_clks[DIV6_MMC0],SMSTPCR3, 15, 0), /* MMCIF0 */
- [MSTP316] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 16, 0), /* IIC6 */
- [MSTP317] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 17, 0), /* IIC7 */
- [MSTP318] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* IIC0 */
- [MSTP323] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
- [MSTP329] = SH_CLK_MSTP32(&extalr_clk, SMSTPCR3, 29, 0), /* CMT10 */
- [MSTP409] = SH_CLK_MSTP32(&main_div2_clk, SMSTPCR4, 9, 0), /* IIC5 */
- [MSTP410] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
- [MSTP411] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
- [MSTP522] = SH_CLK_MSTP32(&extal2_clk, SMSTPCR5, 22, 0), /* Thermal */
- [MSTP515] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR5, 15, 0), /* IIC8 */
-};
-
-static struct clk_lookup lookups[] = {
- /* main clock */
- CLKDEV_CON_ID("extal1", &extal1_clk),
- CLKDEV_CON_ID("extal1_div2", &extal1_div2_clk),
- CLKDEV_CON_ID("extal2", &extal2_clk),
- CLKDEV_CON_ID("extal2_div2", &extal2_div2_clk),
- CLKDEV_CON_ID("extal2_div4", &extal2_div4_clk),
- CLKDEV_CON_ID("fsiack", &fsiack_clk),
- CLKDEV_CON_ID("fsibck", &fsibck_clk),
-
- /* pll clock */
- CLKDEV_CON_ID("pll1", &pll1_clk),
- CLKDEV_CON_ID("pll1_div2", &pll1_div2_clk),
- CLKDEV_CON_ID("pll2", &pll2_clk),
- CLKDEV_CON_ID("pll2s", &pll2s_clk),
- CLKDEV_CON_ID("pll2h", &pll2h_clk),
-
- /* CPU clock */
- CLKDEV_DEV_ID("cpu0", &z_clk),
-
- /* DIV6 */
- CLKDEV_CON_ID("zb", &div6_clks[DIV6_ZB]),
- CLKDEV_CON_ID("vck1", &div6_clks[DIV6_VCK1]),
- CLKDEV_CON_ID("vck2", &div6_clks[DIV6_VCK2]),
- CLKDEV_CON_ID("vck3", &div6_clks[DIV6_VCK3]),
- CLKDEV_CON_ID("vck4", &div6_clks[DIV6_VCK4]),
- CLKDEV_CON_ID("vck5", &div6_clks[DIV6_VCK5]),
- CLKDEV_CON_ID("fsia", &div6_clks[DIV6_FSIA]),
- CLKDEV_CON_ID("fsib", &div6_clks[DIV6_FSIB]),
- CLKDEV_CON_ID("mp", &div6_clks[DIV6_MP]),
- CLKDEV_CON_ID("m4", &div6_clks[DIV6_M4]),
- CLKDEV_CON_ID("hsi", &div6_clks[DIV6_HSI]),
- CLKDEV_CON_ID("spuv", &div6_clks[DIV6_SPUV]),
-
- /* MSTP */
- CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]),
- CLKDEV_DEV_ID("e6c40000.serial", &mstp_clks[MSTP204]),
- CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]),
- CLKDEV_DEV_ID("e6c50000.serial", &mstp_clks[MSTP203]),
- CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP206]),
- CLKDEV_DEV_ID("e6c20000.serial", &mstp_clks[MSTP206]),
- CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP207]),
- CLKDEV_DEV_ID("e6c30000.serial", &mstp_clks[MSTP207]),
- CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP216]),
- CLKDEV_DEV_ID("e6ce0000.serial", &mstp_clks[MSTP216]),
- CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP217]),
- CLKDEV_DEV_ID("e6cf0000.serial", &mstp_clks[MSTP217]),
- CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]),
- CLKDEV_DEV_ID("e6700020.dma-controller", &mstp_clks[MSTP218]),
- CLKDEV_DEV_ID("rcar_thermal", &mstp_clks[MSTP522]),
- CLKDEV_DEV_ID("e6520000.i2c", &mstp_clks[MSTP300]),
- CLKDEV_DEV_ID("sh_mmcif.1", &mstp_clks[MSTP305]),
- CLKDEV_DEV_ID("ee220000.mmc", &mstp_clks[MSTP305]),
- CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP312]),
- CLKDEV_DEV_ID("ee140000.sd", &mstp_clks[MSTP312]),
- CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]),
- CLKDEV_DEV_ID("ee120000.sd", &mstp_clks[MSTP313]),
- CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]),
- CLKDEV_DEV_ID("ee100000.sd", &mstp_clks[MSTP314]),
- CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP315]),
- CLKDEV_DEV_ID("ee200000.mmc", &mstp_clks[MSTP315]),
- CLKDEV_DEV_ID("e6550000.i2c", &mstp_clks[MSTP316]),
- CLKDEV_DEV_ID("e6560000.i2c", &mstp_clks[MSTP317]),
- CLKDEV_DEV_ID("e6500000.i2c", &mstp_clks[MSTP318]),
- CLKDEV_DEV_ID("e6510000.i2c", &mstp_clks[MSTP323]),
- CLKDEV_ICK_ID("fck", "sh-cmt-48-gen2.1", &mstp_clks[MSTP329]),
- CLKDEV_ICK_ID("fck", "e6130000.timer", &mstp_clks[MSTP329]),
- CLKDEV_DEV_ID("e60b0000.i2c", &mstp_clks[MSTP409]),
- CLKDEV_DEV_ID("e6540000.i2c", &mstp_clks[MSTP410]),
- CLKDEV_DEV_ID("e6530000.i2c", &mstp_clks[MSTP411]),
- CLKDEV_DEV_ID("e6570000.i2c", &mstp_clks[MSTP515]),
-
- /* for DT */
- CLKDEV_DEV_ID("e61f0000.thermal", &mstp_clks[MSTP522]),
-};
-
-void __init r8a73a4_clock_init(void)
-{
- void __iomem *reg;
- int k, ret = 0;
- u32 ckscr;
-
- atomic_set(&frqcr_lock, -1);
-
- reg = ioremap_nocache(CKSCR, PAGE_SIZE);
- BUG_ON(!reg);
- ckscr = ioread32(reg);
- iounmap(reg);
-
- switch ((ckscr >> 28) & 0x3) {
- case 0:
- main_clk.parent = &extal1_clk;
- break;
- case 1:
- main_clk.parent = &extal1_div2_clk;
- break;
- case 2:
- main_clk.parent = &extal2_clk;
- break;
- case 3:
- main_clk.parent = &extal2_div2_clk;
- break;
- }
-
- for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
- ret = clk_register(main_clks[k]);
-
- if (!ret)
- ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
-
- if (!ret)
- ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
-
- if (!ret)
- ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- if (!ret)
- shmobile_clk_init();
- else
- panic("failed to setup r8a73a4 clocks\n");
-}
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
deleted file mode 100644
index 3bc92f46060e..000000000000
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * SH7372 clock framework support
- *
- * Copyright (C) 2010 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/sh_clk.h>
-#include <linux/clkdev.h>
-#include "clock.h"
-#include "common.h"
-
-/* SH7372 registers */
-#define FRQCRA IOMEM(0xe6150000)
-#define FRQCRB IOMEM(0xe6150004)
-#define FRQCRC IOMEM(0xe61500e0)
-#define FRQCRD IOMEM(0xe61500e4)
-#define VCLKCR1 IOMEM(0xe6150008)
-#define VCLKCR2 IOMEM(0xe615000c)
-#define VCLKCR3 IOMEM(0xe615001c)
-#define FMSICKCR IOMEM(0xe6150010)
-#define FMSOCKCR IOMEM(0xe6150014)
-#define FSIACKCR IOMEM(0xe6150018)
-#define FSIBCKCR IOMEM(0xe6150090)
-#define SUBCKCR IOMEM(0xe6150080)
-#define SPUCKCR IOMEM(0xe6150084)
-#define VOUCKCR IOMEM(0xe6150088)
-#define HDMICKCR IOMEM(0xe6150094)
-#define DSITCKCR IOMEM(0xe6150060)
-#define DSI0PCKCR IOMEM(0xe6150064)
-#define DSI1PCKCR IOMEM(0xe6150098)
-#define PLLC01CR IOMEM(0xe6150028)
-#define PLLC2CR IOMEM(0xe615002c)
-#define RMSTPCR0 IOMEM(0xe6150110)
-#define RMSTPCR1 IOMEM(0xe6150114)
-#define RMSTPCR2 IOMEM(0xe6150118)
-#define RMSTPCR3 IOMEM(0xe615011c)
-#define RMSTPCR4 IOMEM(0xe6150120)
-#define SMSTPCR0 IOMEM(0xe6150130)
-#define SMSTPCR1 IOMEM(0xe6150134)
-#define SMSTPCR2 IOMEM(0xe6150138)
-#define SMSTPCR3 IOMEM(0xe615013c)
-#define SMSTPCR4 IOMEM(0xe6150140)
-
-#define FSIDIVA 0xFE1F8000
-#define FSIDIVB 0xFE1F8008
-
-/* Platforms must set frequency on their DV_CLKI pin */
-struct clk sh7372_dv_clki_clk = {
-};
-
-/* Fixed 32 KHz root clock from EXTALR pin */
-static struct clk r_clk = {
- .rate = 32768,
-};
-
-/*
- * 26MHz default rate for the EXTAL1 root input clock.
- * If needed, reset this with clk_set_rate() from the platform code.
- */
-struct clk sh7372_extal1_clk = {
- .rate = 26000000,
-};
-
-/*
- * 48MHz default rate for the EXTAL2 root input clock.
- * If needed, reset this with clk_set_rate() from the platform code.
- */
-struct clk sh7372_extal2_clk = {
- .rate = 48000000,
-};
-
-SH_CLK_RATIO(div2, 1, 2);
-
-SH_FIXED_RATIO_CLKg(sh7372_dv_clki_div2_clk, sh7372_dv_clki_clk, div2);
-SH_FIXED_RATIO_CLK(extal1_div2_clk, sh7372_extal1_clk, div2);
-SH_FIXED_RATIO_CLK(extal2_div2_clk, sh7372_extal2_clk, div2);
-SH_FIXED_RATIO_CLK(extal2_div4_clk, extal2_div2_clk, div2);
-
-/* PLLC0 and PLLC1 */
-static unsigned long pllc01_recalc(struct clk *clk)
-{
- unsigned long mult = 1;
-
- if (__raw_readl(PLLC01CR) & (1 << 14))
- mult = (((__raw_readl(clk->enable_reg) >> 24) & 0x3f) + 1) * 2;
-
- return clk->parent->rate * mult;
-}
-
-static struct sh_clk_ops pllc01_clk_ops = {
- .recalc = pllc01_recalc,
-};
-
-static struct clk pllc0_clk = {
- .ops = &pllc01_clk_ops,
- .flags = CLK_ENABLE_ON_INIT,
- .parent = &extal1_div2_clk,
- .enable_reg = (void __iomem *)FRQCRC,
-};
-
-static struct clk pllc1_clk = {
- .ops = &pllc01_clk_ops,
- .flags = CLK_ENABLE_ON_INIT,
- .parent = &extal1_div2_clk,
- .enable_reg = (void __iomem *)FRQCRA,
-};
-
-/* Divide PLLC1 by two */
-SH_FIXED_RATIO_CLK(pllc1_div2_clk, pllc1_clk, div2);
-
-/* PLLC2 */
-
-/* Indices are important - they are the actual src selecting values */
-static struct clk *pllc2_parent[] = {
- [0] = &extal1_div2_clk,
- [1] = &extal2_div2_clk,
- [2] = &sh7372_dv_clki_div2_clk,
-};
-
-/* Only multipliers 20 * 2 to 46 * 2 are valid, last entry for CPUFREQ_TABLE_END */
-static struct cpufreq_frequency_table pllc2_freq_table[29];
-
-static void pllc2_table_rebuild(struct clk *clk)
-{
- int i;
-
- /* Initialise PLLC2 frequency table */
- for (i = 0; i < ARRAY_SIZE(pllc2_freq_table) - 2; i++) {
- pllc2_freq_table[i].frequency = clk->parent->rate * (i + 20) * 2;
- pllc2_freq_table[i].driver_data = i;
- }
-
- /* This is a special entry - switching PLL off makes it a repeater */
- pllc2_freq_table[i].frequency = clk->parent->rate;
- pllc2_freq_table[i].driver_data = i;
-
- pllc2_freq_table[++i].frequency = CPUFREQ_TABLE_END;
- pllc2_freq_table[i].driver_data = i;
-}
-
-static unsigned long pllc2_recalc(struct clk *clk)
-{
- unsigned long mult = 1;
-
- pllc2_table_rebuild(clk);
-
- /*
- * If the PLL is off, mult == 1, clk->rate will be updated in
- * pllc2_enable().
- */
- if (__raw_readl(PLLC2CR) & (1 << 31))
- mult = (((__raw_readl(PLLC2CR) >> 24) & 0x3f) + 1) * 2;
-
- return clk->parent->rate * mult;
-}
-
-static long pllc2_round_rate(struct clk *clk, unsigned long rate)
-{
- return clk_rate_table_round(clk, clk->freq_table, rate);
-}
-
-static int pllc2_enable(struct clk *clk)
-{
- int i;
-
- __raw_writel(__raw_readl(PLLC2CR) | 0x80000000, PLLC2CR);
-
- for (i = 0; i < 100; i++)
- if (__raw_readl(PLLC2CR) & 0x80000000) {
- clk->rate = pllc2_recalc(clk);
- return 0;
- }
-
- pr_err("%s(): timeout!\n", __func__);
-
- return -ETIMEDOUT;
-}
-
-static void pllc2_disable(struct clk *clk)
-{
- __raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR);
-}
-
-static int pllc2_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long value;
- int idx;
-
- idx = clk_rate_table_find(clk, clk->freq_table, rate);
- if (idx < 0)
- return idx;
-
- if (rate == clk->parent->rate)
- return -EINVAL;
-
- value = __raw_readl(PLLC2CR) & ~(0x3f << 24);
-
- __raw_writel(value | ((idx + 19) << 24), PLLC2CR);
-
- clk->rate = clk->freq_table[idx].frequency;
-
- return 0;
-}
-
-static int pllc2_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 value;
- int ret, i;
-
- if (!clk->parent_table || !clk->parent_num)
- return -EINVAL;
-
- /* Search the parent */
- for (i = 0; i < clk->parent_num; i++)
- if (clk->parent_table[i] == parent)
- break;
-
- if (i == clk->parent_num)
- return -ENODEV;
-
- ret = clk_reparent(clk, parent);
- if (ret < 0)
- return ret;
-
- value = __raw_readl(PLLC2CR) & ~(3 << 6);
-
- __raw_writel(value | (i << 6), PLLC2CR);
-
- /* Rebiuld the frequency table */
- pllc2_table_rebuild(clk);
-
- return 0;
-}
-
-static struct sh_clk_ops pllc2_clk_ops = {
- .recalc = pllc2_recalc,
- .round_rate = pllc2_round_rate,
- .set_rate = pllc2_set_rate,
- .enable = pllc2_enable,
- .disable = pllc2_disable,
- .set_parent = pllc2_set_parent,
-};
-
-struct clk sh7372_pllc2_clk = {
- .ops = &pllc2_clk_ops,
- .parent = &extal1_div2_clk,
- .freq_table = pllc2_freq_table,
- .nr_freqs = ARRAY_SIZE(pllc2_freq_table) - 1,
- .parent_table = pllc2_parent,
- .parent_num = ARRAY_SIZE(pllc2_parent),
-};
-
-/* External input clock (pin name: FSIACK/FSIBCK ) */
-static struct clk fsiack_clk = {
-};
-
-static struct clk fsibck_clk = {
-};
-
-static struct clk *main_clks[] = {
- &sh7372_dv_clki_clk,
- &r_clk,
- &sh7372_extal1_clk,
- &sh7372_extal2_clk,
- &sh7372_dv_clki_div2_clk,
- &extal1_div2_clk,
- &extal2_div2_clk,
- &extal2_div4_clk,
- &pllc0_clk,
- &pllc1_clk,
- &pllc1_div2_clk,
- &sh7372_pllc2_clk,
- &fsiack_clk,
- &fsibck_clk,
-};
-
-static void div4_kick(struct clk *clk)
-{
- unsigned long value;
-
- /* set KICK bit in FRQCRB to update hardware setting */
- value = __raw_readl(FRQCRB);
- value |= (1 << 31);
- __raw_writel(value, FRQCRB);
-}
-
-static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18,
- 24, 32, 36, 48, 0, 72, 96, 0 };
-
-static struct clk_div_mult_table div4_div_mult_table = {
- .divisors = divisors,
- .nr_divisors = ARRAY_SIZE(divisors),
-};
-
-static struct clk_div4_table div4_table = {
- .div_mult_table = &div4_div_mult_table,
- .kick = div4_kick,
-};
-
-enum { DIV4_I, DIV4_ZG, DIV4_B, DIV4_M1, DIV4_CSIR,
- DIV4_ZX, DIV4_HP,
- DIV4_ISPB, DIV4_S, DIV4_ZB, DIV4_ZB3, DIV4_CP,
- DIV4_DDRP, DIV4_NR };
-
-#define DIV4(_reg, _bit, _mask, _flags) \
- SH_CLK_DIV4(&pllc1_clk, _reg, _bit, _mask, _flags)
-
-static struct clk div4_clks[DIV4_NR] = {
- [DIV4_I] = DIV4(FRQCRA, 20, 0x6fff, CLK_ENABLE_ON_INIT),
- [DIV4_ZG] = DIV4(FRQCRA, 16, 0x6fff, CLK_ENABLE_ON_INIT),
- [DIV4_B] = DIV4(FRQCRA, 8, 0x6fff, CLK_ENABLE_ON_INIT),
- [DIV4_M1] = DIV4(FRQCRA, 4, 0x6fff, CLK_ENABLE_ON_INIT),
- [DIV4_CSIR] = DIV4(FRQCRA, 0, 0x6fff, 0),
- [DIV4_ZX] = DIV4(FRQCRB, 12, 0x6fff, 0),
- [DIV4_HP] = DIV4(FRQCRB, 4, 0x6fff, 0),
- [DIV4_ISPB] = DIV4(FRQCRC, 20, 0x6fff, 0),
- [DIV4_S] = DIV4(FRQCRC, 12, 0x6fff, 0),
- [DIV4_ZB] = DIV4(FRQCRC, 8, 0x6fff, 0),
- [DIV4_ZB3] = DIV4(FRQCRC, 4, 0x6fff, 0),
- [DIV4_CP] = DIV4(FRQCRC, 0, 0x6fff, 0),
- [DIV4_DDRP] = DIV4(FRQCRD, 0, 0x677c, 0),
-};
-
-enum { DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_FMSI, DIV6_FMSO,
- DIV6_SUB, DIV6_SPU,
- DIV6_VOU, DIV6_DSIT, DIV6_DSI0P, DIV6_DSI1P,
- DIV6_NR };
-
-static struct clk div6_clks[DIV6_NR] = {
- [DIV6_VCK1] = SH_CLK_DIV6(&pllc1_div2_clk, VCLKCR1, 0),
- [DIV6_VCK2] = SH_CLK_DIV6(&pllc1_div2_clk, VCLKCR2, 0),
- [DIV6_VCK3] = SH_CLK_DIV6(&pllc1_div2_clk, VCLKCR3, 0),
- [DIV6_FMSI] = SH_CLK_DIV6(&pllc1_div2_clk, FMSICKCR, 0),
- [DIV6_FMSO] = SH_CLK_DIV6(&pllc1_div2_clk, FMSOCKCR, 0),
- [DIV6_SUB] = SH_CLK_DIV6(&sh7372_extal2_clk, SUBCKCR, 0),
- [DIV6_SPU] = SH_CLK_DIV6(&pllc1_div2_clk, SPUCKCR, 0),
- [DIV6_VOU] = SH_CLK_DIV6(&pllc1_div2_clk, VOUCKCR, 0),
- [DIV6_DSIT] = SH_CLK_DIV6(&pllc1_div2_clk, DSITCKCR, 0),
- [DIV6_DSI0P] = SH_CLK_DIV6(&pllc1_div2_clk, DSI0PCKCR, 0),
- [DIV6_DSI1P] = SH_CLK_DIV6(&pllc1_div2_clk, DSI1PCKCR, 0),
-};
-
-enum { DIV6_HDMI, DIV6_FSIA, DIV6_FSIB, DIV6_REPARENT_NR };
-
-/* Indices are important - they are the actual src selecting values */
-static struct clk *hdmi_parent[] = {
- [0] = &pllc1_div2_clk,
- [1] = &sh7372_pllc2_clk,
- [2] = &sh7372_dv_clki_clk,
- [3] = NULL, /* pllc2_div4 not implemented yet */
-};
-
-static struct clk *fsiackcr_parent[] = {
- [0] = &pllc1_div2_clk,
- [1] = &sh7372_pllc2_clk,
- [2] = &fsiack_clk, /* external input for FSI A */
- [3] = NULL, /* setting prohibited */
-};
-
-static struct clk *fsibckcr_parent[] = {
- [0] = &pllc1_div2_clk,
- [1] = &sh7372_pllc2_clk,
- [2] = &fsibck_clk, /* external input for FSI B */
- [3] = NULL, /* setting prohibited */
-};
-
-static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
- [DIV6_HDMI] = SH_CLK_DIV6_EXT(HDMICKCR, 0,
- hdmi_parent, ARRAY_SIZE(hdmi_parent), 6, 2),
- [DIV6_FSIA] = SH_CLK_DIV6_EXT(FSIACKCR, 0,
- fsiackcr_parent, ARRAY_SIZE(fsiackcr_parent), 6, 2),
- [DIV6_FSIB] = SH_CLK_DIV6_EXT(FSIBCKCR, 0,
- fsibckcr_parent, ARRAY_SIZE(fsibckcr_parent), 6, 2),
-};
-
-/* FSI DIV */
-enum { FSIDIV_A, FSIDIV_B, FSIDIV_REPARENT_NR };
-
-static struct clk fsidivs[] = {
- [FSIDIV_A] = SH_CLK_FSIDIV(FSIDIVA, &div6_reparent_clks[DIV6_FSIA]),
- [FSIDIV_B] = SH_CLK_FSIDIV(FSIDIVB, &div6_reparent_clks[DIV6_FSIB]),
-};
-
-enum { MSTP001, MSTP000,
- MSTP131, MSTP130,
- MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
- MSTP118, MSTP117, MSTP116, MSTP113,
- MSTP106, MSTP101, MSTP100,
- MSTP223,
- MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207,
- MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
- MSTP328, MSTP323, MSTP322, MSTP315, MSTP314, MSTP313, MSTP312,
- MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406,
- MSTP405, MSTP404, MSTP403, MSTP400,
- MSTP_NR };
-
-#define MSTP(_parent, _reg, _bit, _flags) \
- SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
-
-static struct clk mstp_clks[MSTP_NR] = {
- [MSTP001] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 1, 0), /* IIC2 */
- [MSTP000] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 0, 0), /* MSIOF0 */
- [MSTP131] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 31, 0), /* VEU3 */
- [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
- [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
- [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
- [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
- [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
- [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
- [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
- [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
- [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
- [MSTP113] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 13, 0), /* MERAM */
- [MSTP106] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 6, 0), /* JPU */
- [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */
- [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
- [MSTP223] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR2, 23, 0), /* SPU2 */
- [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
- [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
- [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
- [MSTP214] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */
- [MSTP208] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 8, 0), /* MSIOF1 */
- [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
- [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
- [MSTP205] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 5, 0), /* MSIOF2 */
- [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
- [MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */
- [MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */
- [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
- [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
- [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
- [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
- [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
- [MSTP315] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 15, 0), /* FLCTL*/
- [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
- [MSTP313] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */
- [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */
- [MSTP423] = MSTP(&div4_clks[DIV4_B], SMSTPCR4, 23, 0), /* DSITX1 */
- [MSTP415] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 15, 0), /* SDHI2 */
- [MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */
- [MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */
- [MSTP410] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 10, 0), /* IIC4 */
- [MSTP407] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 7, 0), /* USB-DMAC1 */
- [MSTP406] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 6, 0), /* USB1 */
- [MSTP405] = MSTP(&r_clk, SMSTPCR4, 5, 0), /* CMT4 */
- [MSTP404] = MSTP(&r_clk, SMSTPCR4, 4, 0), /* CMT3 */
- [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
- [MSTP400] = MSTP(&r_clk, SMSTPCR4, 0, 0), /* CMT2 */
-};
-
-static struct clk_lookup lookups[] = {
- /* main clocks */
- CLKDEV_CON_ID("dv_clki_div2_clk", &sh7372_dv_clki_div2_clk),
- CLKDEV_CON_ID("r_clk", &r_clk),
- CLKDEV_CON_ID("extal1", &sh7372_extal1_clk),
- CLKDEV_CON_ID("extal2", &sh7372_extal2_clk),
- CLKDEV_CON_ID("extal1_div2_clk", &extal1_div2_clk),
- CLKDEV_CON_ID("extal2_div2_clk", &extal2_div2_clk),
- CLKDEV_CON_ID("extal2_div4_clk", &extal2_div4_clk),
- CLKDEV_CON_ID("pllc0_clk", &pllc0_clk),
- CLKDEV_CON_ID("pllc1_clk", &pllc1_clk),
- CLKDEV_CON_ID("pllc1_div2_clk", &pllc1_div2_clk),
- CLKDEV_CON_ID("pllc2_clk", &sh7372_pllc2_clk),
- CLKDEV_CON_ID("fsiack", &fsiack_clk),
- CLKDEV_CON_ID("fsibck", &fsibck_clk),
-
- /* DIV4 clocks */
- CLKDEV_CON_ID("i_clk", &div4_clks[DIV4_I]),
- CLKDEV_CON_ID("zg_clk", &div4_clks[DIV4_ZG]),
- CLKDEV_CON_ID("b_clk", &div4_clks[DIV4_B]),
- CLKDEV_CON_ID("m1_clk", &div4_clks[DIV4_M1]),
- CLKDEV_CON_ID("csir_clk", &div4_clks[DIV4_CSIR]),
- CLKDEV_CON_ID("zx_clk", &div4_clks[DIV4_ZX]),
- CLKDEV_CON_ID("hp_clk", &div4_clks[DIV4_HP]),
- CLKDEV_CON_ID("ispb_clk", &div4_clks[DIV4_ISPB]),
- CLKDEV_CON_ID("s_clk", &div4_clks[DIV4_S]),
- CLKDEV_CON_ID("zb_clk", &div4_clks[DIV4_ZB]),
- CLKDEV_CON_ID("zb3_clk", &div4_clks[DIV4_ZB3]),
- CLKDEV_CON_ID("cp_clk", &div4_clks[DIV4_CP]),
- CLKDEV_CON_ID("ddrp_clk", &div4_clks[DIV4_DDRP]),
-
- /* DIV6 clocks */
- CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
- CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
- CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
- CLKDEV_CON_ID("fmsi_clk", &div6_clks[DIV6_FMSI]),
- CLKDEV_CON_ID("fmso_clk", &div6_clks[DIV6_FMSO]),
- CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]),
- CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]),
- CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]),
- CLKDEV_CON_ID("hdmi_clk", &div6_reparent_clks[DIV6_HDMI]),
-
- /* MSTP32 clocks */
- CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */
- CLKDEV_DEV_ID("fff30000.i2c", &mstp_clks[MSTP001]), /* IIC2 */
- CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[MSTP000]), /* MSIOF0 */
- CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[MSTP131]), /* VEU3 */
- CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
- CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
- CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
- CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
- CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
- CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX0 */
- CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
- CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
- CLKDEV_DEV_ID("fff20000.i2c", &mstp_clks[MSTP116]), /* IIC0 */
- CLKDEV_DEV_ID("sh_mobile_meram.0", &mstp_clks[MSTP113]), /* MERAM */
- CLKDEV_DEV_ID("uio_pdrv_genirq.5", &mstp_clks[MSTP106]), /* JPU */
- CLKDEV_DEV_ID("uio_pdrv_genirq.0", &mstp_clks[MSTP101]), /* VPU */
- CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
- CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[MSTP223]), /* SPU2DSP0 */
- CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[MSTP223]), /* SPU2DSP1 */
- CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
- CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
- CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
- CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), /* USB-DMAC0 */
- CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[MSTP208]), /* MSIOF1 */
- CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
- CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
- CLKDEV_DEV_ID("spi_sh_msiof.2", &mstp_clks[MSTP205]), /* MSIOF2 */
- CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
- CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
- CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
- CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
- CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
- CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
- CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
- CLKDEV_DEV_ID("e6c20000.i2c", &mstp_clks[MSTP323]), /* IIC1 */
- CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
- CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */
- CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP322]), /* USB0 */
- CLKDEV_DEV_ID("sh_flctl.0", &mstp_clks[MSTP315]), /* FLCTL */
- CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
- CLKDEV_DEV_ID("e6850000.sdhi", &mstp_clks[MSTP314]), /* SDHI0 */
- CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
- CLKDEV_DEV_ID("e6860000.sdhi", &mstp_clks[MSTP313]), /* SDHI1 */
- CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
- CLKDEV_DEV_ID("e6bd0000.mmcif", &mstp_clks[MSTP312]), /* MMC */
- CLKDEV_DEV_ID("sh-mipi-dsi.1", &mstp_clks[MSTP423]), /* DSITX1 */
- CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP415]), /* SDHI2 */
- CLKDEV_DEV_ID("e6870000.sdhi", &mstp_clks[MSTP415]), /* SDHI2 */
- CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */
- CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */
- CLKDEV_DEV_ID("e6d20000.i2c", &mstp_clks[MSTP411]), /* IIC3 */
- CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */
- CLKDEV_DEV_ID("e6d30000.i2c", &mstp_clks[MSTP410]), /* IIC4 */
- CLKDEV_DEV_ID("sh-dma-engine.4", &mstp_clks[MSTP407]), /* USB-DMAC1 */
- CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
- CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
- CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */
- CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
-
- /* ICK */
- CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
- CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
- CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
- CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
- CLKDEV_ICK_ID("hdmi", "sh_mobile_lcdc_fb.1",
- &div6_reparent_clks[DIV6_HDMI]),
- CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
- CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
- CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
- CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP125]), /* TMU0 */
- CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]),
- CLKDEV_ICK_ID("fck", "sh-cmt-32-fast.4", &mstp_clks[MSTP405]), /* CMT4 */
- CLKDEV_ICK_ID("fck", "sh-cmt-32-fast.3", &mstp_clks[MSTP404]), /* CMT3 */
- CLKDEV_ICK_ID("fck", "sh-cmt-32-fast.2", &mstp_clks[MSTP400]), /* CMT2 */
- CLKDEV_ICK_ID("diva", "sh_fsi2", &fsidivs[FSIDIV_A]),
- CLKDEV_ICK_ID("divb", "sh_fsi2", &fsidivs[FSIDIV_B]),
- CLKDEV_ICK_ID("xcka", "sh_fsi2", &fsiack_clk),
- CLKDEV_ICK_ID("xckb", "sh_fsi2", &fsibck_clk),
-};
-
-void __init sh7372_clock_init(void)
-{
- int k, ret = 0;
-
- /* make sure MSTP bits on the RT/SH4AL-DSP side are off */
- __raw_writel(0xe4ef8087, RMSTPCR0);
- __raw_writel(0xffffffff, RMSTPCR1);
- __raw_writel(0x37c7f7ff, RMSTPCR2);
- __raw_writel(0xffffffff, RMSTPCR3);
- __raw_writel(0xffe0fffd, RMSTPCR4);
-
- for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
- ret = clk_register(main_clks[k]);
-
- if (!ret)
- ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
-
- if (!ret)
- ret = sh_clk_div6_register(div6_clks, DIV6_NR);
-
- if (!ret)
- ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR);
-
- if (!ret)
- ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
-
- if (!ret)
- ret = sh_clk_fsidiv_register(fsidivs, FSIDIV_REPARENT_NR);
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- if (!ret)
- shmobile_clk_init();
- else
- panic("failed to setup sh7372 clocks\n");
-}
diff --git a/arch/arm/mach-shmobile/clock.c b/arch/arm/mach-shmobile/clock.c
index 34f056fc3756..68c2d06d0eaa 100644
--- a/arch/arm/mach-shmobile/clock.c
+++ b/arch/arm/mach-shmobile/clock.c
@@ -45,14 +45,3 @@ int __init shmobile_clk_init(void)
return 0;
}
-
-int __clk_get(struct clk *clk)
-{
- return 1;
-}
-EXPORT_SYMBOL(__clk_get);
-
-void __clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(__clk_put);
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 309025efd4cf..afc60bad6fd6 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -21,10 +21,7 @@ extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
extern int shmobile_smp_scu_cpu_kill(unsigned int cpu);
struct clk;
extern int shmobile_clk_init(void);
-extern void shmobile_handle_irq_intc(struct pt_regs *);
extern struct platform_suspend_ops shmobile_suspend_ops;
-struct cpuidle_driver;
-extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv);
#ifdef CONFIG_SUSPEND
int shmobile_suspend_init(void);
@@ -34,12 +31,6 @@ static inline int shmobile_suspend_init(void) { return 0; }
static inline void shmobile_smp_apmu_suspend_init(void) { }
#endif
-#ifdef CONFIG_CPU_IDLE
-int shmobile_cpuidle_init(void);
-#else
-static inline int shmobile_cpuidle_init(void) { return 0; }
-#endif
-
#ifdef CONFIG_CPU_FREQ
int shmobile_cpufreq_init(void);
#else
@@ -51,7 +42,6 @@ extern void __iomem *shmobile_scu_base;
static inline void __init shmobile_init_late(void)
{
shmobile_suspend_init();
- shmobile_cpuidle_init();
shmobile_cpufreq_init();
}
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
deleted file mode 100644
index 0afeb5c7061c..000000000000
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * CPUIdle support code for SH-Mobile ARM
- *
- * Copyright (C) 2011 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/pm.h>
-#include <linux/cpuidle.h>
-#include <linux/suspend.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <asm/cpuidle.h>
-#include <asm/io.h>
-
-static struct cpuidle_driver shmobile_cpuidle_default_driver = {
- .name = "shmobile_cpuidle",
- .owner = THIS_MODULE,
- .states[0] = ARM_CPUIDLE_WFI_STATE,
- .safe_state_index = 0, /* C1 */
- .state_count = 1,
-};
-
-static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver;
-
-void __init shmobile_cpuidle_set_driver(struct cpuidle_driver *drv)
-{
- cpuidle_drv = drv;
-}
-
-int __init shmobile_cpuidle_init(void)
-{
- return cpuidle_register(cpuidle_drv, NULL);
-}
diff --git a/arch/arm/mach-shmobile/entry-intc.S b/arch/arm/mach-shmobile/entry-intc.S
deleted file mode 100644
index 1a1c00ca39a2..000000000000
--- a/arch/arm/mach-shmobile/entry-intc.S
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * ARM Interrupt demux handler using INTC
- *
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2008 Renesas Solutions Corp.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <asm/entry-macro-multi.S>
-
-#define INTCA_BASE 0xe6980000
-#define INTFLGA_OFFS 0x00000018 /* accept pending interrupt */
-#define INTEVTA_OFFS 0x00000020 /* vector number of accepted interrupt */
-#define INTLVLA_OFFS 0x00000030 /* priority level of accepted interrupt */
-#define INTLVLB_OFFS 0x00000034 /* previous priority level */
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =INTCA_BASE
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- /* The single INTFLGA read access below results in the following:
- *
- * 1. INTLVLB is updated with old priority value from INTLVLA
- * 2. Highest priority interrupt is accepted
- * 3. INTLVLA is updated to contain priority of accepted interrupt
- * 4. Accepted interrupt vector is stored in INTFLGA and INTEVTA
- */
- ldr \irqnr, [\base, #INTFLGA_OFFS]
-
- /* Restore INTLVLA with the value saved in INTLVLB.
- * This is required to support interrupt priorities properly.
- */
- ldrb \tmp, [\base, #INTLVLB_OFFS]
- strb \tmp, [\base, #INTLVLA_OFFS]
-
- /* Handle invalid vector number case */
- cmp \irqnr, #0
- beq 1000f
-
- /* Convert vector to irq number, same as the evt2irq() macro */
- lsr \irqnr, \irqnr, #0x5
- subs \irqnr, \irqnr, #16
-
-1000:
- .endm
-
- .macro test_for_ipi, irqnr, irqstat, base, tmp
- .endm
-
- arch_irq_handler shmobile_handle_irq_intc
diff --git a/arch/arm/mach-shmobile/include/mach/clkdev.h b/arch/arm/mach-shmobile/include/mach/clkdev.h
deleted file mode 100644
index 36d0163a857a..000000000000
--- a/arch/arm/mach-shmobile/include/mach/clkdev.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_MACH_CLKDEV_H
-#define __ASM_MACH_CLKDEV_H
-
-int __clk_get(struct clk *clk);
-void __clk_put(struct clk *clk);
-
-#endif /* __ASM_MACH_CLKDEV_H */
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
deleted file mode 100644
index 9f134dfeffdc..000000000000
--- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-LIST "partner-jet-setup.txt"
-LIST "(C) Copyright 2010 Renesas Solutions Corp"
-LIST "Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"
-
-LIST "RWT Setting"
-EW 0xE6020004, 0xA500
-EW 0xE6030004, 0xA500
-
-LIST "GPIO Setting"
-EB 0xE6051013, 0xA2
-
-LIST "CPG"
-ED 0xE61500C0, 0x00000002
-
-WAIT 1, 0xFE40009C
-
-LIST "FRQCR"
-ED 0xE6150000, 0x2D1305C3
-ED 0xE61500E0, 0x9E40358E
-ED 0xE6150004, 0x80331050
-
-WAIT 1, 0xFE40009C
-
-ED 0xE61500E4, 0x00002000
-
-WAIT 1, 0xFE40009C
-
-LIST "PLL"
-ED 0xE6150028, 0x00004000
-
-WAIT 1, 0xFE40009C
-
-ED 0xE615002C, 0x93000040
-
-WAIT 1, 0xFE40009C
-
-LIST "SUB/USBClk"
-ED 0xE6150080, 0x00000180
-
-LIST "BSC"
-ED 0xFEC10000, 0x00E0001B
-
-LIST "SBSC1"
-ED 0xFE400354, 0x01AD8000
-ED 0xFE400354, 0x01AD8001
-
-WAIT 5, 0xFE40009C
-
-ED 0xFE400008, 0xBCC90151
-ED 0xFE400040, 0x41774113
-ED 0xFE400044, 0x2712E229
-ED 0xFE400048, 0x20C18505
-ED 0xFE40004C, 0x00110209
-ED 0xFE400010, 0x00000087
-
-WAIT 30, 0xFE40009C
-
-ED 0xFE400084, 0x0000003F
-EB 0xFE500000, 0x00
-
-WAIT 5, 0xFE40009C
-
-ED 0xFE400084, 0x0000FF0A
-EB 0xFE500000, 0x00
-
-WAIT 1, 0xFE40009C
-
-ED 0xFE400084, 0x00002201
-EB 0xFE500000, 0x00
-ED 0xFE400084, 0x00000302
-EB 0xFE500000, 0x00
-EB 0xFE5C0000, 0x00
-ED 0xFE400008, 0xBCC90159
-ED 0xFE40008C, 0x88800004
-ED 0xFE400094, 0x00000004
-ED 0xFE400028, 0xA55A0032
-ED 0xFE40002C, 0xA55A000C
-ED 0xFE400020, 0xA55A2048
-ED 0xFE400008, 0xBCC90959
-
-LIST "Change CPGA setting"
-ED 0xE61500E0, 0x9E40352E
-ED 0xE6150004, 0x80331050
-
-WAIT 1, 0xFE40009C
-
-ED 0xFE400354, 0x01AD8002
-
-LIST "SCIF0 - Serial port for earlyprintk"
-EB 0xE6053098, 0xe1
-EW 0xE6C40000, 0x0000
-EB 0xE6C40004, 0x19
-EW 0xE6C40008, 0x0030
diff --git a/arch/arm/mach-shmobile/include/mach/mmc-mackerel.h b/arch/arm/mach-shmobile/include/mach/mmc-mackerel.h
deleted file mode 100644
index 15d3a9efdec2..000000000000
--- a/arch/arm/mach-shmobile/include/mach/mmc-mackerel.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef MMC_MACKEREL_H
-#define MMC_MACKEREL_H
-
-#define PORT0CR (void __iomem *)0xe6051000
-#define PORT1CR (void __iomem *)0xe6051001
-#define PORT2CR (void __iomem *)0xe6051002
-#define PORT159CR (void __iomem *)0xe605009f
-
-#define PORTR031_000DR (void __iomem *)0xe6055000
-#define PORTL159_128DR (void __iomem *)0xe6054010
-
-static inline void mmc_init_progress(void)
-{
- /* Initialise LEDS0-3
- * registers: PORT0CR-PORT2CR,PORT159CR (LED0-LED3 Control)
- * value: 0x10 - enable output
- */
- __raw_writeb(0x10, PORT0CR);
- __raw_writeb(0x10, PORT1CR);
- __raw_writeb(0x10, PORT2CR);
- __raw_writeb(0x10, PORT159CR);
-}
-
-static inline void mmc_update_progress(int n)
-{
- unsigned a = 0, b = 0;
-
- if (n < 3)
- a = 1 << n;
- else
- b = 1 << 31;
-
- __raw_writel((__raw_readl(PORTR031_000DR) & ~0x7) | a,
- PORTR031_000DR);
- __raw_writel((__raw_readl(PORTL159_128DR) & ~(1 << 31)) | b,
- PORTL159_128DR);
-}
-#endif /* MMC_MACKEREL_H */
diff --git a/arch/arm/mach-shmobile/include/mach/mmc.h b/arch/arm/mach-shmobile/include/mach/mmc.h
deleted file mode 100644
index e979b8fc1da2..000000000000
--- a/arch/arm/mach-shmobile/include/mach/mmc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef MMC_H
-#define MMC_H
-
-/**************************************************
- *
- * board specific settings
- *
- **************************************************/
-
-#ifdef CONFIG_MACH_MACKEREL
-#include "mach/mmc-mackerel.h"
-#else
-#error "unsupported board."
-#endif
-
-#endif /* MMC_H */
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h
deleted file mode 100644
index 4a81b01f1e8f..000000000000
--- a/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef SDHI_SH7372_H
-#define SDHI_SH7372_H
-
-#define SDGENCNTA 0xfe40009c
-
-/* The countdown of SDGENCNTA is controlled by
- * ZB3D2CLK which runs at 149.5MHz.
- * That is 149.5ticks/us. Approximate this as 150ticks/us.
- */
-static void udelay(int us)
-{
- __raw_writel(us * 150, SDGENCNTA);
- while(__raw_readl(SDGENCNTA)) ;
-}
-
-static void msleep(int ms)
-{
- udelay(ms * 1000);
-}
-
-#endif
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi.h b/arch/arm/mach-shmobile/include/mach/sdhi.h
deleted file mode 100644
index 0ec9e69f2c3b..000000000000
--- a/arch/arm/mach-shmobile/include/mach/sdhi.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef SDHI_H
-#define SDHI_H
-
-/**************************************************
- *
- * CPU specific settings
- *
- **************************************************/
-
-#ifdef CONFIG_ARCH_SH7372
-#include "mach/sdhi-sh7372.h"
-#else
-#error "unsupported CPU."
-#endif
-
-#endif /* SDHI_H */
diff --git a/arch/arm/mach-shmobile/include/mach/system.h b/arch/arm/mach-shmobile/include/mach/system.h
deleted file mode 100644
index 540eaff08f34..000000000000
--- a/arch/arm/mach-shmobile/include/mach/system.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __ASM_ARCH_SYSTEM_H
-#define __ASM_ARCH_SYSTEM_H
-
-#include <asm/system_misc.h>
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- soft_restart(0);
-}
-
-#endif
diff --git a/arch/arm/mach-shmobile/include/mach/uncompress.h b/arch/arm/mach-shmobile/include/mach/uncompress.h
deleted file mode 100644
index f1aee56781e7..000000000000
--- a/arch/arm/mach-shmobile/include/mach/uncompress.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __ASM_MACH_UNCOMPRESS_H
-#define __ASM_MACH_UNCOMPRESS_H
-
-/*
- * This does not append a newline
- */
-static void putc(int c)
-{
-}
-
-static inline void flush(void)
-{
-}
-
-static void arch_decomp_setup(void)
-{
-}
-
-#endif /* __ASM_MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-shmobile/include/mach/zboot.h b/arch/arm/mach-shmobile/include/mach/zboot.h
index 727cc78ac8ec..175ee05465da 100644
--- a/arch/arm/mach-shmobile/include/mach/zboot.h
+++ b/arch/arm/mach-shmobile/include/mach/zboot.h
@@ -9,10 +9,7 @@
*
**************************************************/
-#ifdef CONFIG_MACH_MACKEREL
-#define MEMORY_START 0x40000000
-#include "mach/head-mackerel.txt"
-#elif defined(CONFIG_MACH_KZM9G) || defined(CONFIG_MACH_KZM9G_REFERENCE)
+#ifdef CONFIG_MACH_KZM9G
#define MEMORY_START 0x43000000
#include "mach/head-kzm9g.txt"
#else
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
deleted file mode 100644
index 1ccf49cb485f..000000000000
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ /dev/null
@@ -1,672 +0,0 @@
-/*
- * sh7372 processor support - INTC hardware block
- *
- * Copyright (C) 2010 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include "intc.h"
-#include "irqs.h"
-
-enum {
- UNUSED_INTCA = 0,
-
- /* interrupt sources INTCA */
- DIRC,
- CRYPT_STD,
- IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1,
- AP_ARM_IRQPMU, AP_ARM_COMMTX, AP_ARM_COMMRX,
- MFI_MFIM, MFI_MFIS,
- BBIF1, BBIF2,
- USBHSDMAC0_USHDMI,
- _3DG_SGX540,
- CMT1_CMT10, CMT1_CMT11, CMT1_CMT12, CMT1_CMT13, CMT2, CMT3,
- KEYSC_KEY,
- SCIFA0, SCIFA1, SCIFA2, SCIFA3,
- MSIOF2, MSIOF1,
- SCIFA4, SCIFA5, SCIFB,
- FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
- SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3,
- SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2,
- IRREM,
- IRDA,
- TPU0,
- TTI20,
- DDM,
- SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3,
- RWDT0,
- DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3,
- DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR,
- DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3,
- DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR,
- DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3,
- DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR,
- SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM,
- HDMI,
- SPU2_SPU0, SPU2_SPU1,
- FSI, FMSI,
- MIPI_HSI,
- IPMMU_IPMMUD,
- CEC_1, CEC_2,
- AP_ARM_CTIIRQ, AP_ARM_DMAEXTERRIRQ, AP_ARM_DMAIRQ, AP_ARM_DMASIRQ,
- MFIS2,
- CPORTR2S,
- CMT14, CMT15,
- MMC_MMC_ERR, MMC_MMC_NOR,
- IIC4_ALI4, IIC4_TACKI4, IIC4_WAITI4, IIC4_DTEI4,
- IIC3_ALI3, IIC3_TACKI3, IIC3_WAITI3, IIC3_DTEI3,
- USB0_USB0I1, USB0_USB0I0,
- USB1_USB1I1, USB1_USB1I0,
- USBHSDMAC1_USHDMI,
-
- /* interrupt groups INTCA */
- DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT,
- AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1, SDHI0, SDHI1, SDHI2
-};
-
-static struct intc_vect intca_vectors[] __initdata = {
- INTC_VECT(DIRC, 0x0560),
- INTC_VECT(CRYPT_STD, 0x0700),
- INTC_VECT(IIC1_ALI1, 0x0780), INTC_VECT(IIC1_TACKI1, 0x07a0),
- INTC_VECT(IIC1_WAITI1, 0x07c0), INTC_VECT(IIC1_DTEI1, 0x07e0),
- INTC_VECT(AP_ARM_IRQPMU, 0x0800), INTC_VECT(AP_ARM_COMMTX, 0x0840),
- INTC_VECT(AP_ARM_COMMRX, 0x0860),
- INTC_VECT(MFI_MFIM, 0x0900), INTC_VECT(MFI_MFIS, 0x0920),
- INTC_VECT(BBIF1, 0x0940), INTC_VECT(BBIF2, 0x0960),
- INTC_VECT(USBHSDMAC0_USHDMI, 0x0a00),
- INTC_VECT(_3DG_SGX540, 0x0a60),
- INTC_VECT(CMT1_CMT10, 0x0b00), INTC_VECT(CMT1_CMT11, 0x0b20),
- INTC_VECT(CMT1_CMT12, 0x0b40), INTC_VECT(CMT1_CMT13, 0x0b60),
- INTC_VECT(CMT2, 0x0b80), INTC_VECT(CMT3, 0x0ba0),
- INTC_VECT(KEYSC_KEY, 0x0be0),
- INTC_VECT(SCIFA0, 0x0c00), INTC_VECT(SCIFA1, 0x0c20),
- INTC_VECT(SCIFA2, 0x0c40), INTC_VECT(SCIFA3, 0x0c60),
- INTC_VECT(MSIOF2, 0x0c80), INTC_VECT(MSIOF1, 0x0d00),
- INTC_VECT(SCIFA4, 0x0d20), INTC_VECT(SCIFA5, 0x0d40),
- INTC_VECT(SCIFB, 0x0d60),
- INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
- INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
- INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20),
- INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60),
- INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0),
- INTC_VECT(SDHI1_SDHI1I2, 0x0ec0),
- INTC_VECT(IRREM, 0x0f60),
- INTC_VECT(IRDA, 0x0480),
- INTC_VECT(TPU0, 0x04a0),
- INTC_VECT(TTI20, 0x1100),
- INTC_VECT(DDM, 0x1140),
- INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220),
- INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260),
- INTC_VECT(RWDT0, 0x1280),
- INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020),
- INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060),
- INTC_VECT(DMAC1_2_DEI4, 0x2080), INTC_VECT(DMAC1_2_DEI5, 0x20a0),
- INTC_VECT(DMAC1_2_DADERR, 0x20c0),
- INTC_VECT(DMAC2_1_DEI0, 0x2100), INTC_VECT(DMAC2_1_DEI1, 0x2120),
- INTC_VECT(DMAC2_1_DEI2, 0x2140), INTC_VECT(DMAC2_1_DEI3, 0x2160),
- INTC_VECT(DMAC2_2_DEI4, 0x2180), INTC_VECT(DMAC2_2_DEI5, 0x21a0),
- INTC_VECT(DMAC2_2_DADERR, 0x21c0),
- INTC_VECT(DMAC3_1_DEI0, 0x2200), INTC_VECT(DMAC3_1_DEI1, 0x2220),
- INTC_VECT(DMAC3_1_DEI2, 0x2240), INTC_VECT(DMAC3_1_DEI3, 0x2260),
- INTC_VECT(DMAC3_2_DEI4, 0x2280), INTC_VECT(DMAC3_2_DEI5, 0x22a0),
- INTC_VECT(DMAC3_2_DADERR, 0x22c0),
- INTC_VECT(SHWYSTAT_RT, 0x1300), INTC_VECT(SHWYSTAT_HS, 0x1320),
- INTC_VECT(SHWYSTAT_COM, 0x1340),
- INTC_VECT(HDMI, 0x17e0),
- INTC_VECT(SPU2_SPU0, 0x1800), INTC_VECT(SPU2_SPU1, 0x1820),
- INTC_VECT(FSI, 0x1840),
- INTC_VECT(FMSI, 0x1860),
- INTC_VECT(MIPI_HSI, 0x18e0),
- INTC_VECT(IPMMU_IPMMUD, 0x1920),
- INTC_VECT(CEC_1, 0x1940), INTC_VECT(CEC_2, 0x1960),
- INTC_VECT(AP_ARM_CTIIRQ, 0x1980),
- INTC_VECT(AP_ARM_DMAEXTERRIRQ, 0x19a0),
- INTC_VECT(AP_ARM_DMAIRQ, 0x19c0),
- INTC_VECT(AP_ARM_DMASIRQ, 0x19e0),
- INTC_VECT(MFIS2, 0x1a00),
- INTC_VECT(CPORTR2S, 0x1a20),
- INTC_VECT(CMT14, 0x1a40), INTC_VECT(CMT15, 0x1a60),
- INTC_VECT(MMC_MMC_ERR, 0x1ac0), INTC_VECT(MMC_MMC_NOR, 0x1ae0),
- INTC_VECT(IIC4_ALI4, 0x1b00), INTC_VECT(IIC4_TACKI4, 0x1b20),
- INTC_VECT(IIC4_WAITI4, 0x1b40), INTC_VECT(IIC4_DTEI4, 0x1b60),
- INTC_VECT(IIC3_ALI3, 0x1b80), INTC_VECT(IIC3_TACKI3, 0x1ba0),
- INTC_VECT(IIC3_WAITI3, 0x1bc0), INTC_VECT(IIC3_DTEI3, 0x1be0),
- INTC_VECT(USB0_USB0I1, 0x1c80), INTC_VECT(USB0_USB0I0, 0x1ca0),
- INTC_VECT(USB1_USB1I1, 0x1cc0), INTC_VECT(USB1_USB1I0, 0x1ce0),
- INTC_VECT(USBHSDMAC1_USHDMI, 0x1d00),
-};
-
-static struct intc_group intca_groups[] __initdata = {
- INTC_GROUP(DMAC1_1, DMAC1_1_DEI0,
- DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3),
- INTC_GROUP(DMAC1_2, DMAC1_2_DEI4,
- DMAC1_2_DEI5, DMAC1_2_DADERR),
- INTC_GROUP(DMAC2_1, DMAC2_1_DEI0,
- DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3),
- INTC_GROUP(DMAC2_2, DMAC2_2_DEI4,
- DMAC2_2_DEI5, DMAC2_2_DADERR),
- INTC_GROUP(DMAC3_1, DMAC3_1_DEI0,
- DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3),
- INTC_GROUP(DMAC3_2, DMAC3_2_DEI4,
- DMAC3_2_DEI5, DMAC3_2_DADERR),
- INTC_GROUP(AP_ARM1, AP_ARM_IRQPMU, AP_ARM_COMMTX, AP_ARM_COMMRX),
- INTC_GROUP(AP_ARM2, AP_ARM_CTIIRQ, AP_ARM_DMAEXTERRIRQ,
- AP_ARM_DMAIRQ, AP_ARM_DMASIRQ),
- INTC_GROUP(SPU2, SPU2_SPU0, SPU2_SPU1),
- INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
- FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
- INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
- INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1,
- SDHI0_SDHI0I2, SDHI0_SDHI0I3),
- INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1,
- SDHI1_SDHI1I2),
- INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1,
- SDHI2_SDHI2I2, SDHI2_SDHI2I3),
- INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM),
-};
-
-static struct intc_mask_reg intca_mask_registers[] __initdata = {
- { 0xe6940080, 0xe69400c0, 8, /* IMR0A / IMCR0A */
- { DMAC2_1_DEI3, DMAC2_1_DEI2, DMAC2_1_DEI1, DMAC2_1_DEI0,
- AP_ARM_IRQPMU, 0, AP_ARM_COMMTX, AP_ARM_COMMRX } },
- { 0xe6940084, 0xe69400c4, 8, /* IMR1A / IMCR1A */
- { 0, CRYPT_STD, DIRC, 0,
- DMAC1_1_DEI3, DMAC1_1_DEI2, DMAC1_1_DEI1, DMAC1_1_DEI0 } },
- { 0xe6940088, 0xe69400c8, 8, /* IMR2A / IMCR2A */
- { 0, 0, 0, 0,
- BBIF1, BBIF2, MFI_MFIS, MFI_MFIM } },
- { 0xe694008c, 0xe69400cc, 8, /* IMR3A / IMCR3A */
- { DMAC3_1_DEI3, DMAC3_1_DEI2, DMAC3_1_DEI1, DMAC3_1_DEI0,
- DMAC3_2_DADERR, DMAC3_2_DEI5, DMAC3_2_DEI4, IRDA } },
- { 0xe6940090, 0xe69400d0, 8, /* IMR4A / IMCR4A */
- { DDM, 0, 0, 0,
- 0, 0, 0, 0 } },
- { 0xe6940094, 0xe69400d4, 8, /* IMR5A / IMCR5A */
- { KEYSC_KEY, DMAC1_2_DADERR, DMAC1_2_DEI5, DMAC1_2_DEI4,
- SCIFA3, SCIFA2, SCIFA1, SCIFA0 } },
- { 0xe6940098, 0xe69400d8, 8, /* IMR6A / IMCR6A */
- { SCIFB, SCIFA5, SCIFA4, MSIOF1,
- 0, 0, MSIOF2, 0 } },
- { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
- { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0,
- FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
- { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
- { 0, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0,
- TTI20, USBHSDMAC0_USHDMI, 0, 0 } },
- { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
- { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
- CMT2, 0, 0, _3DG_SGX540 } },
- { 0xe69400a8, 0xe69400e8, 8, /* IMR10A / IMCR10A */
- { 0, DMAC2_2_DADERR, DMAC2_2_DEI5, DMAC2_2_DEI4,
- 0, 0, 0, 0 } },
- { 0xe69400ac, 0xe69400ec, 8, /* IMR11A / IMCR11A */
- { IIC1_DTEI1, IIC1_WAITI1, IIC1_TACKI1, IIC1_ALI1,
- 0, 0, IRREM, 0 } },
- { 0xe69400b0, 0xe69400f0, 8, /* IMR12A / IMCR12A */
- { 0, 0, TPU0, 0,
- 0, 0, 0, 0 } },
- { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
- { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0,
- 0, CMT3, 0, RWDT0 } },
- { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */
- { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0,
- 0, 0, 0, 0 } },
- { 0xe6950090, 0xe69500d0, 8, /* IMR4A3 / IMCR4A3 */
- { 0, 0, 0, 0,
- 0, 0, 0, HDMI } },
- { 0xe6950094, 0xe69500d4, 8, /* IMR5A3 / IMCR5A3 */
- { SPU2_SPU0, SPU2_SPU1, FSI, FMSI,
- 0, 0, 0, MIPI_HSI } },
- { 0xe6950098, 0xe69500d8, 8, /* IMR6A3 / IMCR6A3 */
- { 0, IPMMU_IPMMUD, CEC_1, CEC_2,
- AP_ARM_CTIIRQ, AP_ARM_DMAEXTERRIRQ,
- AP_ARM_DMAIRQ, AP_ARM_DMASIRQ } },
- { 0xe695009c, 0xe69500dc, 8, /* IMR7A3 / IMCR7A3 */
- { MFIS2, CPORTR2S, CMT14, CMT15,
- 0, 0, MMC_MMC_ERR, MMC_MMC_NOR } },
- { 0xe69500a0, 0xe69500e0, 8, /* IMR8A3 / IMCR8A3 */
- { IIC4_ALI4, IIC4_TACKI4, IIC4_WAITI4, IIC4_DTEI4,
- IIC3_ALI3, IIC3_TACKI3, IIC3_WAITI3, IIC3_DTEI3 } },
- { 0xe69500a4, 0xe69500e4, 8, /* IMR9A3 / IMCR9A3 */
- { 0, 0, 0, 0,
- USB0_USB0I1, USB0_USB0I0, USB1_USB1I1, USB1_USB1I0 } },
- { 0xe69500a8, 0xe69500e8, 8, /* IMR10A3 / IMCR10A3 */
- { USBHSDMAC1_USHDMI, 0, 0, 0,
- 0, 0, 0, 0 } },
-};
-
-static struct intc_prio_reg intca_prio_registers[] __initdata = {
- { 0xe6940000, 0, 16, 4, /* IPRAA */ { DMAC3_1, DMAC3_2, CMT2, 0 } },
- { 0xe6940004, 0, 16, 4, /* IPRBA */ { IRDA, 0, BBIF1, BBIF2 } },
- { 0xe6940008, 0, 16, 4, /* IPRCA */ { 0, CRYPT_STD,
- CMT1_CMT11, AP_ARM1 } },
- { 0xe694000c, 0, 16, 4, /* IPRDA */ { 0, 0,
- CMT1_CMT12, 0 } },
- { 0xe6940010, 0, 16, 4, /* IPREA */ { DMAC1_1, MFI_MFIS,
- MFI_MFIM, 0 } },
- { 0xe6940014, 0, 16, 4, /* IPRFA */ { KEYSC_KEY, DMAC1_2,
- _3DG_SGX540, CMT1_CMT10 } },
- { 0xe6940018, 0, 16, 4, /* IPRGA */ { SCIFA0, SCIFA1,
- SCIFA2, SCIFA3 } },
- { 0xe694001c, 0, 16, 4, /* IPRGH */ { MSIOF2, USBHSDMAC0_USHDMI,
- FLCTL, SDHI0 } },
- { 0xe6940020, 0, 16, 4, /* IPRIA */ { MSIOF1, SCIFA4,
- 0/* MSU */, IIC1 } },
- { 0xe6940024, 0, 16, 4, /* IPRJA */ { DMAC2_1, DMAC2_2,
- 0/* MSUG */, TTI20 } },
- { 0xe6940028, 0, 16, 4, /* IPRKA */ { 0, CMT1_CMT13, IRREM, SDHI1 } },
- { 0xe694002c, 0, 16, 4, /* IPRLA */ { TPU0, 0, 0, 0 } },
- { 0xe6940030, 0, 16, 4, /* IPRMA */ { 0, CMT3, 0, RWDT0 } },
- { 0xe6940034, 0, 16, 4, /* IPRNA */ { SCIFB, SCIFA5, 0, DDM } },
- { 0xe6940038, 0, 16, 4, /* IPROA */ { 0, 0, DIRC, SDHI2 } },
- { 0xe6950000, 0, 16, 4, /* IPRAA3 */ { SHWYSTAT, 0, 0, 0 } },
- { 0xe6950024, 0, 16, 4, /* IPRJA3 */ { 0, 0, 0, HDMI } },
- { 0xe6950028, 0, 16, 4, /* IPRKA3 */ { SPU2, 0, FSI, FMSI } },
- { 0xe695002c, 0, 16, 4, /* IPRLA3 */ { 0, 0, 0, MIPI_HSI } },
- { 0xe6950030, 0, 16, 4, /* IPRMA3 */ { IPMMU_IPMMUD, 0,
- CEC_1, CEC_2 } },
- { 0xe6950034, 0, 16, 4, /* IPRNA3 */ { AP_ARM2, 0, 0, 0 } },
- { 0xe6950038, 0, 16, 4, /* IPROA3 */ { MFIS2, CPORTR2S,
- CMT14, CMT15 } },
- { 0xe695003c, 0, 16, 4, /* IPRPA3 */ { 0, 0,
- MMC_MMC_ERR, MMC_MMC_NOR } },
- { 0xe6950040, 0, 16, 4, /* IPRQA3 */ { IIC4_ALI4, IIC4_TACKI4,
- IIC4_WAITI4, IIC4_DTEI4 } },
- { 0xe6950044, 0, 16, 4, /* IPRRA3 */ { IIC3_ALI3, IIC3_TACKI3,
- IIC3_WAITI3, IIC3_DTEI3 } },
- { 0xe6950048, 0, 16, 4, /* IPRSA3 */ { 0/*ERI*/, 0/*RXI*/,
- 0/*TXI*/, 0/*TEI*/} },
- { 0xe695004c, 0, 16, 4, /* IPRTA3 */ { USB0_USB0I1, USB0_USB0I0,
- USB1_USB1I1, USB1_USB1I0 } },
- { 0xe6950050, 0, 16, 4, /* IPRUA3 */ { USBHSDMAC1_USHDMI, 0, 0, 0 } },
-};
-
-static DECLARE_INTC_DESC(intca_desc, "sh7372-intca",
- intca_vectors, intca_groups,
- intca_mask_registers, intca_prio_registers,
- NULL);
-
-INTC_IRQ_PINS_16(intca_irq_pins_lo, 0xe6900000,
- INTC_VECT, "sh7372-intca-irq-lo");
-
-INTC_IRQ_PINS_16H(intca_irq_pins_hi, 0xe6900000,
- INTC_VECT, "sh7372-intca-irq-hi");
-
-enum {
- UNUSED_INTCS = 0,
- ENABLED_INTCS,
-
- /* interrupt sources INTCS */
-
- /* IRQ0S - IRQ31S */
- VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3,
- RTDMAC_1_DEI0, RTDMAC_1_DEI1, RTDMAC_1_DEI2, RTDMAC_1_DEI3,
- CEU, BEU_BEU0, BEU_BEU1, BEU_BEU2,
- /* MFI */
- /* BBIF2 */
- VPU,
- TSIF1,
- /* 3DG */
- _2DDMAC,
- IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
- IPMMU_IPMMUR, IPMMU_IPMMUR2,
- RTDMAC_2_DEI4, RTDMAC_2_DEI5, RTDMAC_2_DADERR,
- /* KEYSC */
- /* TTI20 */
- MSIOF,
- IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0,
- TMU_TUNI0, TMU_TUNI1, TMU_TUNI2,
- CMT0,
- TSIF0,
- /* CMT2 */
- LMB,
- CTI,
- /* RWDT0 */
- ICB,
- JPU_JPEG,
- LCDC,
- LCRC,
- RTDMAC2_1_DEI0, RTDMAC2_1_DEI1, RTDMAC2_1_DEI2, RTDMAC2_1_DEI3,
- RTDMAC2_2_DEI4, RTDMAC2_2_DEI5, RTDMAC2_2_DADERR,
- ISP,
- LCDC1,
- CSIRX,
- DSITX_DSITX0,
- DSITX_DSITX1,
- /* SPU2 */
- /* FSI */
- /* FMSI */
- /* HDMI */
- TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2,
- CMT4,
- DSITX1_DSITX1_0,
- DSITX1_DSITX1_1,
- MFIS2_INTCS, /* Priority always enabled using ENABLED_INTCS */
- CPORTS2R,
- /* CEC */
- JPU6E,
-
- /* interrupt groups INTCS */
- RTDMAC_1, RTDMAC_2, VEU, BEU, IIC0, IPMMU, IIC2,
- RTDMAC2_1, RTDMAC2_2, TMU1, DSITX,
-};
-
-static struct intc_vect intcs_vectors[] = {
- /* IRQ0S - IRQ31S */
- INTCS_VECT(VEU_VEU0, 0x700), INTCS_VECT(VEU_VEU1, 0x720),
- INTCS_VECT(VEU_VEU2, 0x740), INTCS_VECT(VEU_VEU3, 0x760),
- INTCS_VECT(RTDMAC_1_DEI0, 0x800), INTCS_VECT(RTDMAC_1_DEI1, 0x820),
- INTCS_VECT(RTDMAC_1_DEI2, 0x840), INTCS_VECT(RTDMAC_1_DEI3, 0x860),
- INTCS_VECT(CEU, 0x880), INTCS_VECT(BEU_BEU0, 0x8a0),
- INTCS_VECT(BEU_BEU1, 0x8c0), INTCS_VECT(BEU_BEU2, 0x8e0),
- /* MFI */
- /* BBIF2 */
- INTCS_VECT(VPU, 0x980),
- INTCS_VECT(TSIF1, 0x9a0),
- /* 3DG */
- INTCS_VECT(_2DDMAC, 0xa00),
- INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0),
- INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0),
- INTCS_VECT(IPMMU_IPMMUR, 0xb00), INTCS_VECT(IPMMU_IPMMUR2, 0xb20),
- INTCS_VECT(RTDMAC_2_DEI4, 0xb80), INTCS_VECT(RTDMAC_2_DEI5, 0xba0),
- INTCS_VECT(RTDMAC_2_DADERR, 0xbc0),
- /* KEYSC */
- /* TTI20 */
- INTCS_VECT(MSIOF, 0x0d20),
- INTCS_VECT(IIC0_ALI0, 0xe00), INTCS_VECT(IIC0_TACKI0, 0xe20),
- INTCS_VECT(IIC0_WAITI0, 0xe40), INTCS_VECT(IIC0_DTEI0, 0xe60),
- INTCS_VECT(TMU_TUNI0, 0xe80), INTCS_VECT(TMU_TUNI1, 0xea0),
- INTCS_VECT(TMU_TUNI2, 0xec0),
- INTCS_VECT(CMT0, 0xf00),
- INTCS_VECT(TSIF0, 0xf20),
- /* CMT2 */
- INTCS_VECT(LMB, 0xf60),
- INTCS_VECT(CTI, 0x400),
- /* RWDT0 */
- INTCS_VECT(ICB, 0x480),
- INTCS_VECT(JPU_JPEG, 0x560),
- INTCS_VECT(LCDC, 0x580),
- INTCS_VECT(LCRC, 0x5a0),
- INTCS_VECT(RTDMAC2_1_DEI0, 0x1300), INTCS_VECT(RTDMAC2_1_DEI1, 0x1320),
- INTCS_VECT(RTDMAC2_1_DEI2, 0x1340), INTCS_VECT(RTDMAC2_1_DEI3, 0x1360),
- INTCS_VECT(RTDMAC2_2_DEI4, 0x1380), INTCS_VECT(RTDMAC2_2_DEI5, 0x13a0),
- INTCS_VECT(RTDMAC2_2_DADERR, 0x13c0),
- INTCS_VECT(ISP, 0x1720),
- INTCS_VECT(LCDC1, 0x1780),
- INTCS_VECT(CSIRX, 0x17a0),
- INTCS_VECT(DSITX_DSITX0, 0x17c0),
- INTCS_VECT(DSITX_DSITX1, 0x17e0),
- /* SPU2 */
- /* FSI */
- /* FMSI */
- /* HDMI */
- INTCS_VECT(TMU1_TUNI0, 0x1900), INTCS_VECT(TMU1_TUNI1, 0x1920),
- INTCS_VECT(TMU1_TUNI2, 0x1940),
- INTCS_VECT(CMT4, 0x1980),
- INTCS_VECT(DSITX1_DSITX1_0, 0x19a0),
- INTCS_VECT(DSITX1_DSITX1_1, 0x19c0),
- INTCS_VECT(MFIS2_INTCS, 0x1a00),
- INTCS_VECT(CPORTS2R, 0x1a20),
- /* CEC */
- INTCS_VECT(JPU6E, 0x1a80),
-};
-
-static struct intc_group intcs_groups[] __initdata = {
- INTC_GROUP(RTDMAC_1, RTDMAC_1_DEI0, RTDMAC_1_DEI1,
- RTDMAC_1_DEI2, RTDMAC_1_DEI3),
- INTC_GROUP(RTDMAC_2, RTDMAC_2_DEI4, RTDMAC_2_DEI5, RTDMAC_2_DADERR),
- INTC_GROUP(VEU, VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3),
- INTC_GROUP(BEU, BEU_BEU0, BEU_BEU1, BEU_BEU2),
- INTC_GROUP(IIC0, IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0),
- INTC_GROUP(IPMMU, IPMMU_IPMMUR, IPMMU_IPMMUR2),
- INTC_GROUP(IIC2, IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2),
- INTC_GROUP(RTDMAC2_1, RTDMAC2_1_DEI0, RTDMAC2_1_DEI1,
- RTDMAC2_1_DEI2, RTDMAC2_1_DEI3),
- INTC_GROUP(RTDMAC2_2, RTDMAC2_2_DEI4,
- RTDMAC2_2_DEI5, RTDMAC2_2_DADERR),
- INTC_GROUP(TMU1, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0),
- INTC_GROUP(DSITX, DSITX_DSITX0, DSITX_DSITX1),
-};
-
-static struct intc_mask_reg intcs_mask_registers[] = {
- { 0xffd20184, 0xffd201c4, 8, /* IMR1SA / IMCR1SA */
- { BEU_BEU2, BEU_BEU1, BEU_BEU0, CEU,
- VEU_VEU3, VEU_VEU2, VEU_VEU1, VEU_VEU0 } },
- { 0xffd20188, 0xffd201c8, 8, /* IMR2SA / IMCR2SA */
- { 0, 0, 0, VPU,
- 0, 0, 0, 0 } },
- { 0xffd2018c, 0xffd201cc, 8, /* IMR3SA / IMCR3SA */
- { 0, 0, 0, _2DDMAC,
- 0, 0, 0, ICB } },
- { 0xffd20190, 0xffd201d0, 8, /* IMR4SA / IMCR4SA */
- { 0, 0, 0, CTI,
- JPU_JPEG, 0, LCRC, LCDC } },
- { 0xffd20194, 0xffd201d4, 8, /* IMR5SA / IMCR5SA */
- { 0, RTDMAC_2_DADERR, RTDMAC_2_DEI5, RTDMAC_2_DEI4,
- RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } },
- { 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */
- { 0, 0, MSIOF, 0,
- 0, 0, 0, 0 } },
- { 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */
- { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0,
- 0, 0, 0, 0 } },
- { 0xffd201a4, 0xffd201e4, 8, /* IMR9SA / IMCR9SA */
- { 0, 0, 0, CMT0,
- IIC2_DTEI2, IIC2_WAITI2, IIC2_TACKI2, IIC2_ALI2 } },
- { 0xffd201a8, 0xffd201e8, 8, /* IMR10SA / IMCR10SA */
- { 0, 0, IPMMU_IPMMUR2, IPMMU_IPMMUR,
- 0, 0, 0, 0 } },
- { 0xffd201ac, 0xffd201ec, 8, /* IMR11SA / IMCR11SA */
- { IIC0_DTEI0, IIC0_WAITI0, IIC0_TACKI0, IIC0_ALI0,
- 0, TSIF1, LMB, TSIF0 } },
- { 0xffd50180, 0xffd501c0, 8, /* IMR0SA3 / IMCR0SA3 */
- { 0, RTDMAC2_2_DADERR, RTDMAC2_2_DEI5, RTDMAC2_2_DEI4,
- RTDMAC2_1_DEI3, RTDMAC2_1_DEI2, RTDMAC2_1_DEI1, RTDMAC2_1_DEI0 } },
- { 0xffd50190, 0xffd501d0, 8, /* IMR4SA3 / IMCR4SA3 */
- { 0, ISP, 0, 0,
- LCDC1, CSIRX, DSITX_DSITX0, DSITX_DSITX1 } },
- { 0xffd50198, 0xffd501d8, 8, /* IMR6SA3 / IMCR6SA3 */
- { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
- CMT4, DSITX1_DSITX1_0, DSITX1_DSITX1_1, 0 } },
- { 0xffd5019c, 0xffd501dc, 8, /* IMR7SA3 / IMCR7SA3 */
- { MFIS2_INTCS, CPORTS2R, 0, 0,
- JPU6E, 0, 0, 0 } },
-};
-
-/* Priority is needed for INTCA to receive the INTCS interrupt */
-static struct intc_prio_reg intcs_prio_registers[] = {
- { 0xffd20000, 0, 16, 4, /* IPRAS */ { CTI, 0, _2DDMAC, ICB } },
- { 0xffd20004, 0, 16, 4, /* IPRBS */ { JPU_JPEG, LCDC, 0, LCRC } },
- { 0xffd20010, 0, 16, 4, /* IPRES */ { RTDMAC_1, CEU, 0, VPU } },
- { 0xffd20014, 0, 16, 4, /* IPRFS */ { 0, RTDMAC_2, 0, CMT0 } },
- { 0xffd20018, 0, 16, 4, /* IPRGS */ { TMU_TUNI0, TMU_TUNI1,
- TMU_TUNI2, TSIF1 } },
- { 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } },
- { 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } },
- { 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } },
- { 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } },
- { 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } },
- { 0xffd50000, 0, 16, 4, /* IPRAS3 */ { RTDMAC2_1, 0, 0, 0 } },
- { 0xffd50004, 0, 16, 4, /* IPRBS3 */ { RTDMAC2_2, 0, 0, 0 } },
- { 0xffd50020, 0, 16, 4, /* IPRIS3 */ { 0, ISP, 0, 0 } },
- { 0xffd50024, 0, 16, 4, /* IPRJS3 */ { LCDC1, CSIRX, DSITX, 0 } },
- { 0xffd50030, 0, 16, 4, /* IPRMS3 */ { TMU1, 0, 0, 0 } },
- { 0xffd50034, 0, 16, 4, /* IPRNS3 */ { CMT4, DSITX1_DSITX1_0,
- DSITX1_DSITX1_1, 0 } },
- { 0xffd50038, 0, 16, 4, /* IPROS3 */ { ENABLED_INTCS, CPORTS2R,
- 0, 0 } },
- { 0xffd5003c, 0, 16, 4, /* IPRPS3 */ { JPU6E, 0, 0, 0 } },
-};
-
-static struct resource intcs_resources[] __initdata = {
- [0] = {
- .start = 0xffd20000,
- .end = 0xffd201ff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = 0xffd50000,
- .end = 0xffd501ff,
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct intc_desc intcs_desc __initdata = {
- .name = "sh7372-intcs",
- .force_enable = ENABLED_INTCS,
- .skip_syscore_suspend = true,
- .resource = intcs_resources,
- .num_resources = ARRAY_SIZE(intcs_resources),
- .hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
- intcs_prio_registers, NULL, NULL),
-};
-
-static void intcs_demux(unsigned int irq, struct irq_desc *desc)
-{
- void __iomem *reg = (void *)irq_get_handler_data(irq);
- unsigned int evtcodeas = ioread32(reg);
-
- generic_handle_irq(intcs_evt2irq(evtcodeas));
-}
-
-static void __iomem *intcs_ffd2;
-static void __iomem *intcs_ffd5;
-
-void __init sh7372_init_irq(void)
-{
- void __iomem *intevtsa;
- int n;
-
- intcs_ffd2 = ioremap_nocache(0xffd20000, PAGE_SIZE);
- intevtsa = intcs_ffd2 + 0x100;
- intcs_ffd5 = ioremap_nocache(0xffd50000, PAGE_SIZE);
-
- register_intc_controller(&intca_desc);
- register_intc_controller(&intca_irq_pins_lo_desc);
- register_intc_controller(&intca_irq_pins_hi_desc);
- register_intc_controller(&intcs_desc);
-
- /* setup dummy cascade chip for INTCS */
- n = evt2irq(0xf80);
- irq_alloc_desc_at(n, numa_node_id());
- irq_set_chip_and_handler_name(n, &dummy_irq_chip,
- handle_level_irq, "level");
- set_irq_flags(n, IRQF_VALID); /* yuck */
-
- /* demux using INTEVTSA */
- irq_set_handler_data(n, (void *)intevtsa);
- irq_set_chained_handler(n, intcs_demux);
-
- /* unmask INTCS in INTAMASK */
- iowrite16(0, intcs_ffd2 + 0x104);
-}
-
-static unsigned short ffd2[0x200];
-static unsigned short ffd5[0x100];
-
-void sh7372_intcs_suspend(void)
-{
- int k;
-
- for (k = 0x00; k <= 0x30; k += 4)
- ffd2[k] = __raw_readw(intcs_ffd2 + k);
-
- for (k = 0x80; k <= 0xb0; k += 4)
- ffd2[k] = __raw_readb(intcs_ffd2 + k);
-
- for (k = 0x180; k <= 0x188; k += 4)
- ffd2[k] = __raw_readb(intcs_ffd2 + k);
-
- for (k = 0x00; k <= 0x3c; k += 4)
- ffd5[k] = __raw_readw(intcs_ffd5 + k);
-
- for (k = 0x80; k <= 0x9c; k += 4)
- ffd5[k] = __raw_readb(intcs_ffd5 + k);
-}
-
-void sh7372_intcs_resume(void)
-{
- int k;
-
- for (k = 0x00; k <= 0x30; k += 4)
- __raw_writew(ffd2[k], intcs_ffd2 + k);
-
- for (k = 0x80; k <= 0xb0; k += 4)
- __raw_writeb(ffd2[k], intcs_ffd2 + k);
-
- for (k = 0x180; k <= 0x188; k += 4)
- __raw_writeb(ffd2[k], intcs_ffd2 + k);
-
- for (k = 0x00; k <= 0x3c; k += 4)
- __raw_writew(ffd5[k], intcs_ffd5 + k);
-
- for (k = 0x80; k <= 0x9c; k += 4)
- __raw_writeb(ffd5[k], intcs_ffd5 + k);
-}
-
-#define E694_BASE IOMEM(0xe6940000)
-#define E695_BASE IOMEM(0xe6950000)
-
-static unsigned short e694[0x200];
-static unsigned short e695[0x200];
-
-void sh7372_intca_suspend(void)
-{
- int k;
-
- for (k = 0x00; k <= 0x38; k += 4)
- e694[k] = __raw_readw(E694_BASE + k);
-
- for (k = 0x80; k <= 0xb4; k += 4)
- e694[k] = __raw_readb(E694_BASE + k);
-
- for (k = 0x180; k <= 0x1b4; k += 4)
- e694[k] = __raw_readb(E694_BASE + k);
-
- for (k = 0x00; k <= 0x50; k += 4)
- e695[k] = __raw_readw(E695_BASE + k);
-
- for (k = 0x80; k <= 0xa8; k += 4)
- e695[k] = __raw_readb(E695_BASE + k);
-
- for (k = 0x180; k <= 0x1a8; k += 4)
- e695[k] = __raw_readb(E695_BASE + k);
-}
-
-void sh7372_intca_resume(void)
-{
- int k;
-
- for (k = 0x00; k <= 0x38; k += 4)
- __raw_writew(e694[k], E694_BASE + k);
-
- for (k = 0x80; k <= 0xb4; k += 4)
- __raw_writeb(e694[k], E694_BASE + k);
-
- for (k = 0x180; k <= 0x1b4; k += 4)
- __raw_writeb(e694[k], E694_BASE + k);
-
- for (k = 0x00; k <= 0x50; k += 4)
- __raw_writew(e695[k], E695_BASE + k);
-
- for (k = 0x80; k <= 0xa8; k += 4)
- __raw_writeb(e695[k], E695_BASE + k);
-
- for (k = 0x180; k <= 0x1a8; k += 4)
- __raw_writeb(e695[k], E695_BASE + k);
-}
diff --git a/arch/arm/mach-shmobile/pm-r8a7790.c b/arch/arm/mach-shmobile/pm-r8a7790.c
deleted file mode 100644
index 80e8d95e54d3..000000000000
--- a/arch/arm/mach-shmobile/pm-r8a7790.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * r8a7790 Power management support
- *
- * Copyright (C) 2013 Renesas Electronics Corporation
- * Copyright (C) 2011 Renesas Solutions Corp.
- * Copyright (C) 2011 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <asm/io.h>
-#include "common.h"
-#include "pm-rcar.h"
-#include "r8a7790.h"
-
-/* RST */
-#define RST 0xe6160000
-#define CA15BAR 0x0020
-#define CA7BAR 0x0030
-#define CA15RESCNT 0x0040
-#define CA7RESCNT 0x0044
-
-/* On-chip RAM */
-#define MERAM 0xe8080000
-
-/* SYSC */
-#define SYSCIER 0x0c
-#define SYSCIMR 0x10
-
-#if defined(CONFIG_SMP)
-
-static void __init r8a7790_sysc_init(void)
-{
- void __iomem *base = rcar_sysc_init(0xe6180000);
-
- /* enable all interrupt sources, but do not use interrupt handler */
- iowrite32(0x0131000e, base + SYSCIER);
- iowrite32(0, base + SYSCIMR);
-}
-
-#else /* CONFIG_SMP */
-
-static inline void r8a7790_sysc_init(void) {}
-
-#endif /* CONFIG_SMP */
-
-void __init r8a7790_pm_init(void)
-{
- void __iomem *p;
- u32 bar;
- static int once;
-
- if (once++)
- return;
-
- /* MERAM for jump stub, because BAR requires 256KB aligned address */
- p = ioremap_nocache(MERAM, shmobile_boot_size);
- memcpy_toio(p, shmobile_boot_vector, shmobile_boot_size);
- iounmap(p);
-
- /* setup reset vectors */
- p = ioremap_nocache(RST, 0x63);
- bar = (MERAM >> 8) & 0xfffffc00;
- writel_relaxed(bar, p + CA15BAR);
- writel_relaxed(bar, p + CA7BAR);
- writel_relaxed(bar | 0x10, p + CA15BAR);
- writel_relaxed(bar | 0x10, p + CA7BAR);
-
- /* de-assert reset for all CPUs */
- writel_relaxed((readl_relaxed(p + CA15RESCNT) & ~0x0f) | 0xa5a50000,
- p + CA15RESCNT);
- writel_relaxed((readl_relaxed(p + CA7RESCNT) & ~0x0f) | 0x5a5a0000,
- p + CA7RESCNT);
- iounmap(p);
-
- r8a7790_sysc_init();
- shmobile_smp_apmu_suspend_init();
-}
diff --git a/arch/arm/mach-shmobile/pm-r8a7791.c b/arch/arm/mach-shmobile/pm-r8a7791.c
deleted file mode 100644
index 25f107bb3657..000000000000
--- a/arch/arm/mach-shmobile/pm-r8a7791.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * r8a7791 Power management support
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- * Copyright (C) 2011 Renesas Solutions Corp.
- * Copyright (C) 2011 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <asm/io.h>
-#include "common.h"
-#include "pm-rcar.h"
-#include "r8a7791.h"
-
-#define RST 0xe6160000
-#define CA15BAR 0x0020
-#define CA15RESCNT 0x0040
-#define RAM 0xe6300000
-
-/* SYSC */
-#define SYSCIER 0x0c
-#define SYSCIMR 0x10
-
-#if defined(CONFIG_SMP)
-
-static void __init r8a7791_sysc_init(void)
-{
- void __iomem *base = rcar_sysc_init(0xe6180000);
-
- /* enable all interrupt sources, but do not use interrupt handler */
- iowrite32(0x0131000e, base + SYSCIER);
- iowrite32(0, base + SYSCIMR);
-}
-
-#else /* CONFIG_SMP */
-
-static inline void r8a7791_sysc_init(void) {}
-
-#endif /* CONFIG_SMP */
-
-void __init r8a7791_pm_init(void)
-{
- void __iomem *p;
- u32 bar;
- static int once;
-
- if (once++)
- return;
-
- /* RAM for jump stub, because BAR requires 256KB aligned address */
- p = ioremap_nocache(RAM, shmobile_boot_size);
- memcpy_toio(p, shmobile_boot_vector, shmobile_boot_size);
- iounmap(p);
-
- /* setup reset vectors */
- p = ioremap_nocache(RST, 0x63);
- bar = (RAM >> 8) & 0xfffffc00;
- writel_relaxed(bar, p + CA15BAR);
- writel_relaxed(bar | 0x10, p + CA15BAR);
-
- /* enable clocks to all CPUs */
- writel_relaxed((readl_relaxed(p + CA15RESCNT) & ~0x0f) | 0xa5a50000,
- p + CA15RESCNT);
- iounmap(p);
-
- r8a7791_sysc_init();
- shmobile_smp_apmu_suspend_init();
-}
diff --git a/arch/arm/mach-shmobile/pm-rcar-gen2.c b/arch/arm/mach-shmobile/pm-rcar-gen2.c
new file mode 100644
index 000000000000..6815781ad116
--- /dev/null
+++ b/arch/arm/mach-shmobile/pm-rcar-gen2.c
@@ -0,0 +1,115 @@
+/*
+ * R-Car Generation 2 Power management support
+ *
+ * Copyright (C) 2013 - 2015 Renesas Electronics Corporation
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Copyright (C) 2011 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <asm/io.h>
+#include "common.h"
+#include "pm-rcar.h"
+#include "rcar-gen2.h"
+
+/* RST */
+#define RST 0xe6160000
+#define CA15BAR 0x0020
+#define CA7BAR 0x0030
+#define CA15RESCNT 0x0040
+#define CA7RESCNT 0x0044
+
+/* On-chip RAM */
+#define MERAM 0xe8080000
+#define RAM 0xe6300000
+
+/* SYSC */
+#define SYSCIER 0x0c
+#define SYSCIMR 0x10
+
+#if defined(CONFIG_SMP)
+
+static void __init rcar_gen2_sysc_init(u32 syscier)
+{
+ void __iomem *base = rcar_sysc_init(0xe6180000);
+
+ /* enable all interrupt sources, but do not use interrupt handler */
+ iowrite32(syscier, base + SYSCIER);
+ iowrite32(0, base + SYSCIMR);
+}
+
+#else /* CONFIG_SMP */
+
+static inline void rcar_gen2_sysc_init(u32 syscier) {}
+
+#endif /* CONFIG_SMP */
+
+void __init rcar_gen2_pm_init(void)
+{
+ void __iomem *p;
+ u32 bar;
+ static int once;
+ struct device_node *np, *cpus;
+ bool has_a7 = false;
+ bool has_a15 = false;
+ phys_addr_t boot_vector_addr = 0;
+ u32 syscier = 0;
+
+ if (once++)
+ return;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (!cpus)
+ return;
+
+ for_each_child_of_node(cpus, np) {
+ if (of_device_is_compatible(np, "arm,cortex-a15"))
+ has_a15 = true;
+ else if (of_device_is_compatible(np, "arm,cortex-a7"))
+ has_a7 = true;
+ }
+
+ if (of_machine_is_compatible("renesas,r8a7790")) {
+ boot_vector_addr = MERAM;
+ syscier = 0x013111ef;
+
+ } else if (of_machine_is_compatible("renesas,r8a7791")) {
+ boot_vector_addr = RAM;
+ syscier = 0x00111003;
+ }
+
+ /* RAM for jump stub, because BAR requires 256KB aligned address */
+ p = ioremap_nocache(boot_vector_addr, shmobile_boot_size);
+ memcpy_toio(p, shmobile_boot_vector, shmobile_boot_size);
+ iounmap(p);
+
+ /* setup reset vectors */
+ p = ioremap_nocache(RST, 0x63);
+ bar = (boot_vector_addr >> 8) & 0xfffffc00;
+ if (has_a15) {
+ writel_relaxed(bar, p + CA15BAR);
+ writel_relaxed(bar | 0x10, p + CA15BAR);
+
+ /* de-assert reset for CA15 CPUs */
+ writel_relaxed((readl_relaxed(p + CA15RESCNT) & ~0x0f) |
+ 0xa5a50000, p + CA15RESCNT);
+ }
+ if (has_a7) {
+ writel_relaxed(bar, p + CA7BAR);
+ writel_relaxed(bar | 0x10, p + CA7BAR);
+
+ /* de-assert reset for CA7 CPUs */
+ writel_relaxed((readl_relaxed(p + CA7RESCNT) & ~0x0f) |
+ 0x5a5a0000, p + CA7RESCNT);
+ }
+ iounmap(p);
+
+ rcar_gen2_sysc_init(syscier);
+ shmobile_smp_apmu_suspend_init();
+}
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
deleted file mode 100644
index c0293ae4b013..000000000000
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * sh7372 Power management support
- *
- * Copyright (C) 2011 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/pm.h>
-#include <linux/suspend.h>
-#include <linux/cpuidle.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/pm_clock.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/bitrev.h>
-#include <linux/console.h>
-
-#include <asm/cpuidle.h>
-#include <asm/io.h>
-#include <asm/tlbflush.h>
-#include <asm/suspend.h>
-
-#include "common.h"
-#include "pm-rmobile.h"
-#include "sh7372.h"
-
-/* DBG */
-#define DBGREG1 IOMEM(0xe6100020)
-#define DBGREG9 IOMEM(0xe6100040)
-
-/* CPGA */
-#define SYSTBCR IOMEM(0xe6150024)
-#define MSTPSR0 IOMEM(0xe6150030)
-#define MSTPSR1 IOMEM(0xe6150038)
-#define MSTPSR2 IOMEM(0xe6150040)
-#define MSTPSR3 IOMEM(0xe6150048)
-#define MSTPSR4 IOMEM(0xe615004c)
-#define PLLC01STPCR IOMEM(0xe61500c8)
-
-/* SYSC */
-#define SYSC_BASE IOMEM(0xe6180000)
-
-#define SBAR IOMEM(0xe6180020)
-#define WUPRMSK IOMEM(0xe6180028)
-#define WUPSMSK IOMEM(0xe618002c)
-#define WUPSMSK2 IOMEM(0xe6180048)
-#define WUPSFAC IOMEM(0xe6180098)
-#define IRQCR IOMEM(0xe618022c)
-#define IRQCR2 IOMEM(0xe6180238)
-#define IRQCR3 IOMEM(0xe6180244)
-#define IRQCR4 IOMEM(0xe6180248)
-#define PDNSEL IOMEM(0xe6180254)
-
-/* INTC */
-#define ICR1A IOMEM(0xe6900000)
-#define ICR2A IOMEM(0xe6900004)
-#define ICR3A IOMEM(0xe6900008)
-#define ICR4A IOMEM(0xe690000c)
-#define INTMSK00A IOMEM(0xe6900040)
-#define INTMSK10A IOMEM(0xe6900044)
-#define INTMSK20A IOMEM(0xe6900048)
-#define INTMSK30A IOMEM(0xe690004c)
-
-/* MFIS */
-/* FIXME: pointing where? */
-#define SMFRAM 0xe6a70000
-
-/* AP-System Core */
-#define APARMBAREA IOMEM(0xe6f10020)
-
-#ifdef CONFIG_PM
-
-#define PM_DOMAIN_ON_OFF_LATENCY_NS 250000
-
-static int sh7372_a4r_pd_suspend(void)
-{
- sh7372_intcs_suspend();
- __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
- return 0;
-}
-
-static bool a4s_suspend_ready;
-
-static int sh7372_a4s_pd_suspend(void)
-{
- /*
- * The A4S domain contains the CPU core and therefore it should
- * only be turned off if the CPU is not in use. This may happen
- * during system suspend, when SYSC is going to be used for generating
- * resume signals and a4s_suspend_ready is set to let
- * sh7372_enter_suspend() know that it can turn A4S off.
- */
- a4s_suspend_ready = true;
- return -EBUSY;
-}
-
-static void sh7372_a4s_pd_resume(void)
-{
- a4s_suspend_ready = false;
-}
-
-static int sh7372_a3sp_pd_suspend(void)
-{
- /*
- * Serial consoles make use of SCIF hardware located in A3SP,
- * keep such power domain on if "no_console_suspend" is set.
- */
- return console_suspend_enabled ? 0 : -EBUSY;
-}
-
-static struct rmobile_pm_domain sh7372_pm_domains[] = {
- {
- .genpd.name = "A4LC",
- .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .base = SYSC_BASE,
- .bit_shift = 1,
- },
- {
- .genpd.name = "A4MP",
- .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .base = SYSC_BASE,
- .bit_shift = 2,
- },
- {
- .genpd.name = "D4",
- .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .base = SYSC_BASE,
- .bit_shift = 3,
- },
- {
- .genpd.name = "A4R",
- .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .base = SYSC_BASE,
- .bit_shift = 5,
- .suspend = sh7372_a4r_pd_suspend,
- .resume = sh7372_intcs_resume,
- },
- {
- .genpd.name = "A3RV",
- .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .base = SYSC_BASE,
- .bit_shift = 6,
- },
- {
- .genpd.name = "A3RI",
- .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .base = SYSC_BASE,
- .bit_shift = 8,
- },
- {
- .genpd.name = "A4S",
- .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .base = SYSC_BASE,
- .bit_shift = 10,
- .gov = &pm_domain_always_on_gov,
- .no_debug = true,
- .suspend = sh7372_a4s_pd_suspend,
- .resume = sh7372_a4s_pd_resume,
- },
- {
- .genpd.name = "A3SP",
- .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .base = SYSC_BASE,
- .bit_shift = 11,
- .gov = &pm_domain_always_on_gov,
- .no_debug = true,
- .suspend = sh7372_a3sp_pd_suspend,
- },
- {
- .genpd.name = "A3SG",
- .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
- .base = SYSC_BASE,
- .bit_shift = 13,
- },
-};
-
-void __init sh7372_init_pm_domains(void)
-{
- rmobile_init_domains(sh7372_pm_domains, ARRAY_SIZE(sh7372_pm_domains));
- pm_genpd_add_subdomain_names("A4LC", "A3RV");
- pm_genpd_add_subdomain_names("A4R", "A4LC");
- pm_genpd_add_subdomain_names("A4S", "A3SG");
- pm_genpd_add_subdomain_names("A4S", "A3SP");
-}
-
-#endif /* CONFIG_PM */
-
-#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
-static void sh7372_set_reset_vector(unsigned long address)
-{
- /* set reset vector, translate 4k */
- __raw_writel(address, SBAR);
- __raw_writel(0, APARMBAREA);
-}
-
-static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode)
-{
- if (pllc0_on)
- __raw_writel(0, PLLC01STPCR);
- else
- __raw_writel(1 << 28, PLLC01STPCR);
-
- __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
- cpu_suspend(sleep_mode, sh7372_do_idle_sysc);
- __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
-
- /* disable reset vector translation */
- __raw_writel(0, SBAR);
-}
-
-static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p)
-{
- unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
- unsigned long msk, msk2;
-
- /* check active clocks to determine potential wakeup sources */
-
- mstpsr0 = __raw_readl(MSTPSR0);
- if ((mstpsr0 & 0x00000003) != 0x00000003) {
- pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0);
- return 0;
- }
-
- mstpsr1 = __raw_readl(MSTPSR1);
- if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) {
- pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1);
- return 0;
- }
-
- mstpsr2 = __raw_readl(MSTPSR2);
- if ((mstpsr2 & 0x000741ff) != 0x000741ff) {
- pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2);
- return 0;
- }
-
- mstpsr3 = __raw_readl(MSTPSR3);
- if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) {
- pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3);
- return 0;
- }
-
- mstpsr4 = __raw_readl(MSTPSR4);
- if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) {
- pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4);
- return 0;
- }
-
- msk = 0;
- msk2 = 0;
-
- /* make bitmaps of limited number of wakeup sources */
-
- if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */
- msk |= 1 << 31;
-
- if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */
- msk |= 1 << 21;
-
- if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */
- msk |= 1 << 2;
-
- if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */
- msk |= 1 << 1;
-
- if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */
- msk |= 1 << 1;
-
- if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */
- msk |= 1 << 1;
-
- if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */
- msk2 |= 1 << 17;
-
- *mskp = msk;
- *msk2p = msk2;
-
- return 1;
-}
-
-static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
-{
- u16 tmp, irqcr1, irqcr2;
- int k;
-
- irqcr1 = 0;
- irqcr2 = 0;
-
- /* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */
- for (k = 0; k <= 7; k++) {
- tmp = (icr >> ((7 - k) * 4)) & 0xf;
- irqcr1 |= (tmp & 0x03) << (k * 2);
- irqcr2 |= (tmp >> 2) << (k * 2);
- }
-
- *irqcr1p = irqcr1;
- *irqcr2p = irqcr2;
-}
-
-static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2)
-{
- u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
- unsigned long tmp;
-
- /* read IRQ0A -> IRQ15A mask */
- tmp = bitrev8(__raw_readb(INTMSK00A));
- tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8;
-
- /* setup WUPSMSK from clocks and external IRQ mask */
- msk = (~msk & 0xc030000f) | (tmp << 4);
- __raw_writel(msk, WUPSMSK);
-
- /* propage level/edge trigger for external IRQ 0->15 */
- sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low);
- sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high);
- __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR);
- __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2);
-
- /* read IRQ16A -> IRQ31A mask */
- tmp = bitrev8(__raw_readb(INTMSK20A));
- tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8;
-
- /* setup WUPSMSK2 from clocks and external IRQ mask */
- msk2 = (~msk2 & 0x00030000) | tmp;
- __raw_writel(msk2, WUPSMSK2);
-
- /* propage level/edge trigger for external IRQ 16->31 */
- sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low);
- sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high);
- __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
- __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
-}
-
-static void sh7372_enter_a3sm_common(int pllc0_on)
-{
- /* use INTCA together with SYSC for wakeup */
- sh7372_setup_sysc(1 << 0, 0);
- sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
- sh7372_enter_sysc(pllc0_on, 1 << 12);
-}
-
-static void sh7372_enter_a4s_common(int pllc0_on)
-{
- sh7372_intca_suspend();
- sh7372_set_reset_vector(SMFRAM);
- sh7372_enter_sysc(pllc0_on, 1 << 10);
- sh7372_intca_resume();
-}
-
-static void sh7372_pm_setup_smfram(void)
-{
- /* pass physical address of cpu_resume() to assembly resume code */
- sh7372_cpu_resume = virt_to_phys(cpu_resume);
-
- memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
-}
-#else
-static inline void sh7372_pm_setup_smfram(void) {}
-#endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */
-
-#ifdef CONFIG_CPU_IDLE
-static int sh7372_do_idle_core_standby(unsigned long unused)
-{
- cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */
- return 0;
-}
-
-static int sh7372_enter_core_standby(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
-
- /* enter sleep mode with SYSTBCR to 0x10 */
- __raw_writel(0x10, SYSTBCR);
- cpu_suspend(0, sh7372_do_idle_core_standby);
- __raw_writel(0, SYSTBCR);
-
- /* disable reset vector translation */
- __raw_writel(0, SBAR);
-
- return 1;
-}
-
-static int sh7372_enter_a3sm_pll_on(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- sh7372_enter_a3sm_common(1);
- return 2;
-}
-
-static int sh7372_enter_a3sm_pll_off(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- sh7372_enter_a3sm_common(0);
- return 3;
-}
-
-static int sh7372_enter_a4s(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- unsigned long msk, msk2;
-
- if (!sh7372_sysc_valid(&msk, &msk2))
- return sh7372_enter_a3sm_pll_off(dev, drv, index);
-
- sh7372_setup_sysc(msk, msk2);
- sh7372_enter_a4s_common(0);
- return 4;
-}
-
-static struct cpuidle_driver sh7372_cpuidle_driver = {
- .name = "sh7372_cpuidle",
- .owner = THIS_MODULE,
- .state_count = 5,
- .safe_state_index = 0, /* C1 */
- .states[0] = ARM_CPUIDLE_WFI_STATE,
- .states[1] = {
- .name = "C2",
- .desc = "Core Standby Mode",
- .exit_latency = 10,
- .target_residency = 20 + 10,
- .enter = sh7372_enter_core_standby,
- },
- .states[2] = {
- .name = "C3",
- .desc = "A3SM PLL ON",
- .exit_latency = 20,
- .target_residency = 30 + 20,
- .enter = sh7372_enter_a3sm_pll_on,
- },
- .states[3] = {
- .name = "C4",
- .desc = "A3SM PLL OFF",
- .exit_latency = 120,
- .target_residency = 30 + 120,
- .enter = sh7372_enter_a3sm_pll_off,
- },
- .states[4] = {
- .name = "C5",
- .desc = "A4S PLL OFF",
- .exit_latency = 240,
- .target_residency = 30 + 240,
- .enter = sh7372_enter_a4s,
- .disabled = true,
- },
-};
-
-static void __init sh7372_cpuidle_init(void)
-{
- shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver);
-}
-#else
-static void __init sh7372_cpuidle_init(void) {}
-#endif
-
-#ifdef CONFIG_SUSPEND
-static int sh7372_enter_suspend(suspend_state_t suspend_state)
-{
- unsigned long msk, msk2;
-
- /* check active clocks to determine potential wakeup sources */
- if (sh7372_sysc_valid(&msk, &msk2) && a4s_suspend_ready) {
- /* convert INTC mask/sense to SYSC mask/sense */
- sh7372_setup_sysc(msk, msk2);
-
- /* enter A4S sleep with PLLC0 off */
- pr_debug("entering A4S\n");
- sh7372_enter_a4s_common(0);
- return 0;
- }
-
- /* default to enter A3SM sleep with PLLC0 off */
- pr_debug("entering A3SM\n");
- sh7372_enter_a3sm_common(0);
- return 0;
-}
-
-/**
- * sh7372_pm_notifier_fn - SH7372 PM notifier routine.
- * @notifier: Unused.
- * @pm_event: Event being handled.
- * @unused: Unused.
- */
-static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
- unsigned long pm_event, void *unused)
-{
- switch (pm_event) {
- case PM_SUSPEND_PREPARE:
- /*
- * This is necessary, because the A4R domain has to be "on"
- * when suspend_device_irqs() and resume_device_irqs() are
- * executed during system suspend and resume, respectively, so
- * that those functions don't crash while accessing the INTCS.
- */
- pm_genpd_name_poweron("A4R");
- break;
- case PM_POST_SUSPEND:
- pm_genpd_poweroff_unused();
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static void sh7372_suspend_init(void)
-{
- shmobile_suspend_ops.enter = sh7372_enter_suspend;
- pm_notifier(sh7372_pm_notifier_fn, 0);
-}
-#else
-static void sh7372_suspend_init(void) {}
-#endif
-
-void __init sh7372_pm_init(void)
-{
- /* enable DBG hardware block to kick SYSC */
- __raw_writel(0x0000a500, DBGREG9);
- __raw_writel(0x0000a501, DBGREG9);
- __raw_writel(0x00000000, DBGREG1);
-
- /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
- __raw_writel(0, PDNSEL);
-
- sh7372_pm_setup_smfram();
-
- sh7372_suspend_init();
- sh7372_cpuidle_init();
-}
-
-void __init sh7372_pm_init_late(void)
-{
- shmobile_init_late();
- pm_genpd_name_attach_cpuidle("A4S", 4);
-}
diff --git a/arch/arm/mach-shmobile/r8a73a4.h b/arch/arm/mach-shmobile/r8a73a4.h
deleted file mode 100644
index 70dcd847a86e..000000000000
--- a/arch/arm/mach-shmobile/r8a73a4.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __ASM_R8A73A4_H__
-#define __ASM_R8A73A4_H__
-
-/* DMA slave IDs */
-enum {
- SHDMA_SLAVE_INVALID,
- SHDMA_SLAVE_MMCIF0_TX,
- SHDMA_SLAVE_MMCIF0_RX,
- SHDMA_SLAVE_MMCIF1_TX,
- SHDMA_SLAVE_MMCIF1_RX,
-};
-
-void r8a73a4_add_standard_devices(void);
-void r8a73a4_clock_init(void);
-void r8a73a4_pinmux_init(void);
-
-#endif /* __ASM_R8A73A4_H__ */
diff --git a/arch/arm/mach-shmobile/r8a7790.h b/arch/arm/mach-shmobile/r8a7790.h
index bf73a850aaed..1a46d026052c 100644
--- a/arch/arm/mach-shmobile/r8a7790.h
+++ b/arch/arm/mach-shmobile/r8a7790.h
@@ -1,7 +1,6 @@
#ifndef __ASM_R8A7790_H__
#define __ASM_R8A7790_H__
-void r8a7790_pm_init(void);
extern struct smp_operations r8a7790_smp_ops;
#endif /* __ASM_R8A7790_H__ */
diff --git a/arch/arm/mach-shmobile/r8a7791.h b/arch/arm/mach-shmobile/r8a7791.h
index 6cf11eb69d10..7ca0b7d0f59b 100644
--- a/arch/arm/mach-shmobile/r8a7791.h
+++ b/arch/arm/mach-shmobile/r8a7791.h
@@ -1,7 +1,6 @@
#ifndef __ASM_R8A7791_H__
#define __ASM_R8A7791_H__
-void r8a7791_pm_init(void);
extern struct smp_operations r8a7791_smp_ops;
#endif /* __ASM_R8A7791_H__ */
diff --git a/arch/arm/mach-shmobile/rcar-gen2.h b/arch/arm/mach-shmobile/rcar-gen2.h
index ce53cb5f53a1..8a66b4aae035 100644
--- a/arch/arm/mach-shmobile/rcar-gen2.h
+++ b/arch/arm/mach-shmobile/rcar-gen2.h
@@ -5,5 +5,6 @@ void rcar_gen2_timer_init(void);
#define MD(nr) BIT(nr)
u32 rcar_gen2_read_mode_pins(void);
void rcar_gen2_reserve(void);
+void rcar_gen2_pm_init(void);
#endif /* __ASM_RCAR_GEN2_H__ */
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
new file mode 100644
index 000000000000..384e6e934b87
--- /dev/null
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -0,0 +1,147 @@
+/*
+ * R-Car Generation 2 da9063/da9210 regulator quirk
+ *
+ * The r8a7790/lager and r8a7791/koelsch development boards have da9063 and
+ * da9210 regulators. Both regulators have their interrupt request lines tied
+ * to the same interrupt pin (IRQ2) on the SoC.
+ *
+ * After cold boot or da9063-induced restart, both the da9063 and da9210 seem
+ * to assert their interrupt request lines. Hence as soon as one driver
+ * requests this irq, it gets stuck in an interrupt storm, as it only manages
+ * to deassert its own interrupt request line, and the other driver hasn't
+ * installed an interrupt handler yet.
+ *
+ * To handle this, install a quirk that masks the interrupts in both the
+ * da9063 and da9210. This quirk has to run after the i2c master driver has
+ * been initialized, but before the i2c slave drivers are initialized.
+ *
+ * Copyright (C) 2015 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/mfd/da9063/registers.h>
+
+
+#define IRQC_BASE 0xe61c0000
+#define IRQC_MONITOR 0x104 /* IRQn Signal Level Monitor Register */
+
+#define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */
+
+static void __iomem *irqc;
+
+static const u8 da9063_mask_regs[] = {
+ DA9063_REG_IRQ_MASK_A,
+ DA9063_REG_IRQ_MASK_B,
+ DA9063_REG_IRQ_MASK_C,
+ DA9063_REG_IRQ_MASK_D,
+};
+
+/* DA9210 System Control and Event Registers */
+#define DA9210_REG_MASK_A 0x54
+#define DA9210_REG_MASK_B 0x55
+
+static const u8 da9210_mask_regs[] = {
+ DA9210_REG_MASK_A,
+ DA9210_REG_MASK_B,
+};
+
+static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
+ unsigned int nregs)
+{
+ unsigned int i;
+
+ dev_info(&client->dev, "Masking %s interrupt sources\n", client->name);
+
+ for (i = 0; i < nregs; i++) {
+ int error = i2c_smbus_write_byte_data(client, regs[i], ~0);
+ if (error) {
+ dev_err(&client->dev, "i2c error %d\n", error);
+ return;
+ }
+ }
+}
+
+static int regulator_quirk_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct i2c_client *client;
+ u32 mon;
+
+ mon = ioread32(irqc + IRQC_MONITOR);
+ dev_dbg(dev, "%s: %ld, IRQC_MONITOR = 0x%x\n", __func__, action, mon);
+ if (mon & REGULATOR_IRQ_MASK)
+ goto remove;
+
+ if (action != BUS_NOTIFY_ADD_DEVICE || dev->type == &i2c_adapter_type)
+ return 0;
+
+ client = to_i2c_client(dev);
+ dev_dbg(dev, "Detected %s\n", client->name);
+
+ if ((client->addr == 0x58 && !strcmp(client->name, "da9063")))
+ da9xxx_mask_irqs(client, da9063_mask_regs,
+ ARRAY_SIZE(da9063_mask_regs));
+ else if (client->addr == 0x68 && !strcmp(client->name, "da9210"))
+ da9xxx_mask_irqs(client, da9210_mask_regs,
+ ARRAY_SIZE(da9210_mask_regs));
+
+ mon = ioread32(irqc + IRQC_MONITOR);
+ if (mon & REGULATOR_IRQ_MASK)
+ goto remove;
+
+ return 0;
+
+remove:
+ dev_info(dev, "IRQ2 is not asserted, removing quirk\n");
+
+ bus_unregister_notifier(&i2c_bus_type, nb);
+ iounmap(irqc);
+ return 0;
+}
+
+static struct notifier_block regulator_quirk_nb = {
+ .notifier_call = regulator_quirk_notify
+};
+
+static int __init rcar_gen2_regulator_quirk(void)
+{
+ u32 mon;
+
+ if (!of_machine_is_compatible("renesas,koelsch") &&
+ !of_machine_is_compatible("renesas,lager"))
+ return -ENODEV;
+
+ irqc = ioremap(IRQC_BASE, PAGE_SIZE);
+ if (!irqc)
+ return -ENOMEM;
+
+ mon = ioread32(irqc + IRQC_MONITOR);
+ if (mon & REGULATOR_IRQ_MASK) {
+ pr_debug("%s: IRQ2 is not asserted, not installing quirk\n",
+ __func__);
+ iounmap(irqc);
+ return 0;
+ }
+
+ pr_info("IRQ2 is asserted, installing da9063/da9210 regulator quirk\n");
+
+ bus_register_notifier(&i2c_bus_type, &regulator_quirk_nb);
+ return 0;
+}
+
+arch_initcall(rcar_gen2_regulator_quirk);
diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c
index c27682291cbf..446cee611902 100644
--- a/arch/arm/mach-shmobile/setup-r8a73a4.c
+++ b/arch/arm/mach-shmobile/setup-r8a73a4.c
@@ -13,280 +13,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/of_platform.h>
-#include <linux/platform_data/irq-renesas-irqc.h>
-#include <linux/serial_sci.h>
-#include <linux/sh_dma.h>
-#include <linux/sh_timer.h>
+
+#include <linux/init.h>
#include <asm/mach/arch.h>
#include "common.h"
-#include "dma-register.h"
-#include "irqs.h"
-#include "r8a73a4.h"
-
-static const struct resource pfc_resources[] = {
- DEFINE_RES_MEM(0xe6050000, 0x9000),
-};
-
-void __init r8a73a4_pinmux_init(void)
-{
- platform_device_register_simple("pfc-r8a73a4", -1, pfc_resources,
- ARRAY_SIZE(pfc_resources));
-}
-
-#define R8A73A4_SCIF(scif_type, _scscr, index, baseaddr, irq) \
-static struct plat_sci_port scif##index##_platform_data = { \
- .type = scif_type, \
- .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, \
- .scscr = _scscr, \
-}; \
- \
-static struct resource scif##index##_resources[] = { \
- DEFINE_RES_MEM(baseaddr, 0x100), \
- DEFINE_RES_IRQ(irq), \
-}
-
-#define R8A73A4_SCIFA(index, baseaddr, irq) \
- R8A73A4_SCIF(PORT_SCIFA, SCSCR_RE | SCSCR_TE | SCSCR_CKE0, \
- index, baseaddr, irq)
-
-#define R8A73A4_SCIFB(index, baseaddr, irq) \
- R8A73A4_SCIF(PORT_SCIFB, SCSCR_RE | SCSCR_TE, \
- index, baseaddr, irq)
-
-R8A73A4_SCIFA(0, 0xe6c40000, gic_spi(144)); /* SCIFA0 */
-R8A73A4_SCIFA(1, 0xe6c50000, gic_spi(145)); /* SCIFA1 */
-R8A73A4_SCIFB(2, 0xe6c20000, gic_spi(148)); /* SCIFB0 */
-R8A73A4_SCIFB(3, 0xe6c30000, gic_spi(149)); /* SCIFB1 */
-R8A73A4_SCIFB(4, 0xe6ce0000, gic_spi(150)); /* SCIFB2 */
-R8A73A4_SCIFB(5, 0xe6cf0000, gic_spi(151)); /* SCIFB3 */
-
-#define r8a73a4_register_scif(index) \
- platform_device_register_resndata(NULL, "sh-sci", index, \
- scif##index##_resources, \
- ARRAY_SIZE(scif##index##_resources), \
- &scif##index##_platform_data, \
- sizeof(scif##index##_platform_data))
-
-static const struct renesas_irqc_config irqc0_data = {
- .irq_base = irq_pin(0), /* IRQ0 -> IRQ31 */
-};
-
-static const struct resource irqc0_resources[] = {
- DEFINE_RES_MEM(0xe61c0000, 0x200), /* IRQC Event Detector Block_0 */
- DEFINE_RES_IRQ(gic_spi(0)), /* IRQ0 */
- DEFINE_RES_IRQ(gic_spi(1)), /* IRQ1 */
- DEFINE_RES_IRQ(gic_spi(2)), /* IRQ2 */
- DEFINE_RES_IRQ(gic_spi(3)), /* IRQ3 */
- DEFINE_RES_IRQ(gic_spi(4)), /* IRQ4 */
- DEFINE_RES_IRQ(gic_spi(5)), /* IRQ5 */
- DEFINE_RES_IRQ(gic_spi(6)), /* IRQ6 */
- DEFINE_RES_IRQ(gic_spi(7)), /* IRQ7 */
- DEFINE_RES_IRQ(gic_spi(8)), /* IRQ8 */
- DEFINE_RES_IRQ(gic_spi(9)), /* IRQ9 */
- DEFINE_RES_IRQ(gic_spi(10)), /* IRQ10 */
- DEFINE_RES_IRQ(gic_spi(11)), /* IRQ11 */
- DEFINE_RES_IRQ(gic_spi(12)), /* IRQ12 */
- DEFINE_RES_IRQ(gic_spi(13)), /* IRQ13 */
- DEFINE_RES_IRQ(gic_spi(14)), /* IRQ14 */
- DEFINE_RES_IRQ(gic_spi(15)), /* IRQ15 */
- DEFINE_RES_IRQ(gic_spi(16)), /* IRQ16 */
- DEFINE_RES_IRQ(gic_spi(17)), /* IRQ17 */
- DEFINE_RES_IRQ(gic_spi(18)), /* IRQ18 */
- DEFINE_RES_IRQ(gic_spi(19)), /* IRQ19 */
- DEFINE_RES_IRQ(gic_spi(20)), /* IRQ20 */
- DEFINE_RES_IRQ(gic_spi(21)), /* IRQ21 */
- DEFINE_RES_IRQ(gic_spi(22)), /* IRQ22 */
- DEFINE_RES_IRQ(gic_spi(23)), /* IRQ23 */
- DEFINE_RES_IRQ(gic_spi(24)), /* IRQ24 */
- DEFINE_RES_IRQ(gic_spi(25)), /* IRQ25 */
- DEFINE_RES_IRQ(gic_spi(26)), /* IRQ26 */
- DEFINE_RES_IRQ(gic_spi(27)), /* IRQ27 */
- DEFINE_RES_IRQ(gic_spi(28)), /* IRQ28 */
- DEFINE_RES_IRQ(gic_spi(29)), /* IRQ29 */
- DEFINE_RES_IRQ(gic_spi(30)), /* IRQ30 */
- DEFINE_RES_IRQ(gic_spi(31)), /* IRQ31 */
-};
-
-static const struct renesas_irqc_config irqc1_data = {
- .irq_base = irq_pin(32), /* IRQ32 -> IRQ57 */
-};
-
-static const struct resource irqc1_resources[] = {
- DEFINE_RES_MEM(0xe61c0200, 0x200), /* IRQC Event Detector Block_1 */
- DEFINE_RES_IRQ(gic_spi(32)), /* IRQ32 */
- DEFINE_RES_IRQ(gic_spi(33)), /* IRQ33 */
- DEFINE_RES_IRQ(gic_spi(34)), /* IRQ34 */
- DEFINE_RES_IRQ(gic_spi(35)), /* IRQ35 */
- DEFINE_RES_IRQ(gic_spi(36)), /* IRQ36 */
- DEFINE_RES_IRQ(gic_spi(37)), /* IRQ37 */
- DEFINE_RES_IRQ(gic_spi(38)), /* IRQ38 */
- DEFINE_RES_IRQ(gic_spi(39)), /* IRQ39 */
- DEFINE_RES_IRQ(gic_spi(40)), /* IRQ40 */
- DEFINE_RES_IRQ(gic_spi(41)), /* IRQ41 */
- DEFINE_RES_IRQ(gic_spi(42)), /* IRQ42 */
- DEFINE_RES_IRQ(gic_spi(43)), /* IRQ43 */
- DEFINE_RES_IRQ(gic_spi(44)), /* IRQ44 */
- DEFINE_RES_IRQ(gic_spi(45)), /* IRQ45 */
- DEFINE_RES_IRQ(gic_spi(46)), /* IRQ46 */
- DEFINE_RES_IRQ(gic_spi(47)), /* IRQ47 */
- DEFINE_RES_IRQ(gic_spi(48)), /* IRQ48 */
- DEFINE_RES_IRQ(gic_spi(49)), /* IRQ49 */
- DEFINE_RES_IRQ(gic_spi(50)), /* IRQ50 */
- DEFINE_RES_IRQ(gic_spi(51)), /* IRQ51 */
- DEFINE_RES_IRQ(gic_spi(52)), /* IRQ52 */
- DEFINE_RES_IRQ(gic_spi(53)), /* IRQ53 */
- DEFINE_RES_IRQ(gic_spi(54)), /* IRQ54 */
- DEFINE_RES_IRQ(gic_spi(55)), /* IRQ55 */
- DEFINE_RES_IRQ(gic_spi(56)), /* IRQ56 */
- DEFINE_RES_IRQ(gic_spi(57)), /* IRQ57 */
-};
-
-#define r8a73a4_register_irqc(idx) \
- platform_device_register_resndata(NULL, "renesas_irqc", \
- idx, irqc##idx##_resources, \
- ARRAY_SIZE(irqc##idx##_resources), \
- &irqc##idx##_data, \
- sizeof(struct renesas_irqc_config))
-
-/* Thermal0 -> Thermal2 */
-static const struct resource thermal0_resources[] = {
- DEFINE_RES_MEM(0xe61f0000, 0x14),
- DEFINE_RES_MEM(0xe61f0100, 0x38),
- DEFINE_RES_MEM(0xe61f0200, 0x38),
- DEFINE_RES_MEM(0xe61f0300, 0x38),
- DEFINE_RES_IRQ(gic_spi(69)),
-};
-
-#define r8a73a4_register_thermal() \
- platform_device_register_simple("rcar_thermal", -1, \
- thermal0_resources, \
- ARRAY_SIZE(thermal0_resources))
-
-static struct sh_timer_config cmt1_platform_data = {
- .channels_mask = 0xff,
-};
-
-static struct resource cmt1_resources[] = {
- DEFINE_RES_MEM(0xe6130000, 0x1004),
- DEFINE_RES_IRQ(gic_spi(120)),
-};
-
-#define r8a73a4_register_cmt(idx) \
- platform_device_register_resndata(NULL, "sh-cmt-48-gen2", \
- idx, cmt##idx##_resources, \
- ARRAY_SIZE(cmt##idx##_resources), \
- &cmt##idx##_platform_data, \
- sizeof(struct sh_timer_config))
-
-/* DMA */
-static const struct sh_dmae_slave_config dma_slaves[] = {
- {
- .slave_id = SHDMA_SLAVE_MMCIF0_TX,
- .addr = 0xee200034,
- .chcr = CHCR_TX(XMIT_SZ_32BIT),
- .mid_rid = 0xd1,
- }, {
- .slave_id = SHDMA_SLAVE_MMCIF0_RX,
- .addr = 0xee200034,
- .chcr = CHCR_RX(XMIT_SZ_32BIT),
- .mid_rid = 0xd2,
- }, {
- .slave_id = SHDMA_SLAVE_MMCIF1_TX,
- .addr = 0xee220034,
- .chcr = CHCR_TX(XMIT_SZ_32BIT),
- .mid_rid = 0xe1,
- }, {
- .slave_id = SHDMA_SLAVE_MMCIF1_RX,
- .addr = 0xee220034,
- .chcr = CHCR_RX(XMIT_SZ_32BIT),
- .mid_rid = 0xe2,
- },
-};
-
-#define DMAE_CHANNEL(a, b) \
- { \
- .offset = (a) - 0x20, \
- .dmars = (a) - 0x20 + 0x40, \
- .chclr_bit = (b), \
- .chclr_offset = 0x80 - 0x20, \
- }
-
-static const struct sh_dmae_channel dma_channels[] = {
- DMAE_CHANNEL(0x8000, 0),
- DMAE_CHANNEL(0x8080, 1),
- DMAE_CHANNEL(0x8100, 2),
- DMAE_CHANNEL(0x8180, 3),
- DMAE_CHANNEL(0x8200, 4),
- DMAE_CHANNEL(0x8280, 5),
- DMAE_CHANNEL(0x8300, 6),
- DMAE_CHANNEL(0x8380, 7),
- DMAE_CHANNEL(0x8400, 8),
- DMAE_CHANNEL(0x8480, 9),
- DMAE_CHANNEL(0x8500, 10),
- DMAE_CHANNEL(0x8580, 11),
- DMAE_CHANNEL(0x8600, 12),
- DMAE_CHANNEL(0x8680, 13),
- DMAE_CHANNEL(0x8700, 14),
- DMAE_CHANNEL(0x8780, 15),
- DMAE_CHANNEL(0x8800, 16),
- DMAE_CHANNEL(0x8880, 17),
- DMAE_CHANNEL(0x8900, 18),
- DMAE_CHANNEL(0x8980, 19),
-};
-
-static const struct sh_dmae_pdata dma_pdata = {
- .slave = dma_slaves,
- .slave_num = ARRAY_SIZE(dma_slaves),
- .channel = dma_channels,
- .channel_num = ARRAY_SIZE(dma_channels),
- .ts_low_shift = TS_LOW_SHIFT,
- .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
- .ts_high_shift = TS_HI_SHIFT,
- .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
- .ts_shift = dma_ts_shift,
- .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
- .dmaor_init = DMAOR_DME,
- .chclr_present = 1,
- .chclr_bitwise = 1,
-};
-
-static struct resource dma_resources[] = {
- DEFINE_RES_MEM(0xe6700020, 0x89e0),
- DEFINE_RES_IRQ(gic_spi(220)),
- {
- /* IRQ for channels 0-19 */
- .start = gic_spi(200),
- .end = gic_spi(219),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-#define r8a73a4_register_dmac() \
- platform_device_register_resndata(NULL, "sh-dma-engine", 0, \
- dma_resources, ARRAY_SIZE(dma_resources), \
- &dma_pdata, sizeof(dma_pdata))
-
-void __init r8a73a4_add_standard_devices(void)
-{
- r8a73a4_register_cmt(1);
- r8a73a4_register_scif(0);
- r8a73a4_register_scif(1);
- r8a73a4_register_scif(2);
- r8a73a4_register_scif(3);
- r8a73a4_register_scif(4);
- r8a73a4_register_scif(5);
- r8a73a4_register_irqc(0);
- r8a73a4_register_irqc(1);
- r8a73a4_register_thermal();
- r8a73a4_register_dmac();
-}
-
-#ifdef CONFIG_USE_OF
static const char *r8a73a4_boards_compat_dt[] __initdata = {
"renesas,r8a73a4",
@@ -298,4 +30,3 @@ DT_MACHINE_START(R8A73A4_DT, "Generic R8A73A4 (Flattened Device Tree)")
.init_late = shmobile_init_late,
.dt_compat = r8a73a4_boards_compat_dt,
MACHINE_END
-#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index dd64caf79216..9832e48396a4 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -842,13 +842,6 @@ static void __init r8a7740_generic_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
-#define RESCNT2 IOMEM(0xe6188020)
-static void r8a7740_restart(enum reboot_mode mode, const char *cmd)
-{
- /* Do soft power on reset */
- writel(1 << 31, RESCNT2);
-}
-
static const char *r8a7740_boards_compat_dt[] __initdata = {
"renesas,r8a7740",
NULL,
@@ -861,7 +854,6 @@ DT_MACHINE_START(R8A7740_DT, "Generic R8A7740 (Flattened Device Tree)")
.init_machine = r8a7740_generic_init,
.init_late = shmobile_init_late,
.dt_compat = r8a7740_boards_compat_dt,
- .restart = r8a7740_restart,
MACHINE_END
#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index cef8895a9b82..c49aa094fe17 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk/shmobile.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/irqchip/arm-gic.h>
@@ -41,6 +42,21 @@
#include "irqs.h"
#include "r8a7778.h"
+#define MODEMR 0xffcc0020
+
+#ifdef CONFIG_COMMON_CLK
+static void __init r8a7778_timer_init(void)
+{
+ u32 mode;
+ void __iomem *modemr = ioremap_nocache(MODEMR, 4);
+
+ BUG_ON(!modemr);
+ mode = ioread32(modemr);
+ iounmap(modemr);
+ r8a7778_clocks_init(mode);
+}
+#endif
+
/* SCIF */
#define R8A7778_SCIF(index, baseaddr, irq) \
static struct plat_sci_port scif##index##_platform_data = { \
@@ -608,6 +624,9 @@ DT_MACHINE_START(R8A7778_DT, "Generic R8A7778 (Flattened Device Tree)")
.init_early = shmobile_init_delay,
.init_irq = r8a7778_init_irq_dt,
.init_late = shmobile_init_late,
+#ifdef CONFIG_COMMON_CLK
+ .init_time = r8a7778_timer_init,
+#endif
.dt_compat = r8a7778_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index d1fa625e61f5..5d13595aa027 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -21,6 +21,7 @@
#include <linux/dma-contiguous.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/mach/arch.h>
@@ -50,9 +51,7 @@ u32 rcar_gen2_read_mode_pins(void)
void __init rcar_gen2_timer_init(void)
{
-#if defined(CONFIG_ARM_ARCH_TIMER) || defined(CONFIG_COMMON_CLK)
u32 mode = rcar_gen2_read_mode_pins();
-#endif
#ifdef CONFIG_ARM_ARCH_TIMER
void __iomem *base;
int extal_mhz = 0;
@@ -128,9 +127,7 @@ void __init rcar_gen2_timer_init(void)
iounmap(base);
#endif /* CONFIG_ARM_ARCH_TIMER */
-#ifdef CONFIG_COMMON_CLK
rcar_gen2_clocks_init(mode);
-#endif
#ifdef CONFIG_ARCH_SHMOBILE_MULTI
clocksource_of_init();
#endif
@@ -199,7 +196,7 @@ void __init rcar_gen2_reserve(void)
of_scan_flat_dt(rcar_gen2_scan_mem, &mrc);
#ifdef CONFIG_DMA_CMA
- if (mrc.size)
+ if (mrc.size && memblock_is_region_memory(mrc.base, mrc.size))
dma_contiguous_reserve_area(mrc.size, mrc.base, 0,
&rcar_gen2_dma_contiguous, true);
#endif
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
deleted file mode 100644
index 458a2cfad417..000000000000
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ /dev/null
@@ -1,1016 +0,0 @@
-/*
- * sh7372 processor support
- *
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2008 Yoshihiro Shimoda
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/of_platform.h>
-#include <linux/uio_driver.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/io.h>
-#include <linux/serial_sci.h>
-#include <linux/sh_dma.h>
-#include <linux/sh_timer.h>
-#include <linux/pm_domain.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_data/sh_ipmmu.h>
-
-#include <asm/mach/map.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-
-#include "common.h"
-#include "dma-register.h"
-#include "intc.h"
-#include "irqs.h"
-#include "pm-rmobile.h"
-#include "sh7372.h"
-
-static struct map_desc sh7372_io_desc[] __initdata = {
- /* create a 1:1 identity mapping for 0xe6xxxxxx
- * used by CPGA, INTC and PFC.
- */
- {
- .virtual = 0xe6000000,
- .pfn = __phys_to_pfn(0xe6000000),
- .length = 256 << 20,
- .type = MT_DEVICE_NONSHARED
- },
-};
-
-void __init sh7372_map_io(void)
-{
- debug_ll_io_init();
- iotable_init(sh7372_io_desc, ARRAY_SIZE(sh7372_io_desc));
-}
-
-/* PFC */
-static struct resource sh7372_pfc_resources[] = {
- [0] = {
- .start = 0xe6050000,
- .end = 0xe6057fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = 0xe605800c,
- .end = 0xe6058027,
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct platform_device sh7372_pfc_device = {
- .name = "pfc-sh7372",
- .id = -1,
- .resource = sh7372_pfc_resources,
- .num_resources = ARRAY_SIZE(sh7372_pfc_resources),
-};
-
-void __init sh7372_pinmux_init(void)
-{
- platform_device_register(&sh7372_pfc_device);
-}
-
-/* SCIF */
-#define SH7372_SCIF(scif_type, index, baseaddr, irq) \
-static struct plat_sci_port scif##index##_platform_data = { \
- .type = scif_type, \
- .flags = UPF_BOOT_AUTOCONF, \
- .scscr = SCSCR_RE | SCSCR_TE, \
-}; \
- \
-static struct resource scif##index##_resources[] = { \
- DEFINE_RES_MEM(baseaddr, 0x100), \
- DEFINE_RES_IRQ(irq), \
-}; \
- \
-static struct platform_device scif##index##_device = { \
- .name = "sh-sci", \
- .id = index, \
- .resource = scif##index##_resources, \
- .num_resources = ARRAY_SIZE(scif##index##_resources), \
- .dev = { \
- .platform_data = &scif##index##_platform_data, \
- }, \
-}
-
-SH7372_SCIF(PORT_SCIFA, 0, 0xe6c40000, evt2irq(0x0c00));
-SH7372_SCIF(PORT_SCIFA, 1, 0xe6c50000, evt2irq(0x0c20));
-SH7372_SCIF(PORT_SCIFA, 2, 0xe6c60000, evt2irq(0x0c40));
-SH7372_SCIF(PORT_SCIFA, 3, 0xe6c70000, evt2irq(0x0c60));
-SH7372_SCIF(PORT_SCIFA, 4, 0xe6c80000, evt2irq(0x0d20));
-SH7372_SCIF(PORT_SCIFA, 5, 0xe6cb0000, evt2irq(0x0d40));
-SH7372_SCIF(PORT_SCIFB, 6, 0xe6c30000, evt2irq(0x0d60));
-
-/* CMT */
-static struct sh_timer_config cmt2_platform_data = {
- .channels_mask = 0x20,
-};
-
-static struct resource cmt2_resources[] = {
- DEFINE_RES_MEM(0xe6130000, 0x50),
- DEFINE_RES_IRQ(evt2irq(0x0b80)),
-};
-
-static struct platform_device cmt2_device = {
- .name = "sh-cmt-32-fast",
- .id = 2,
- .dev = {
- .platform_data = &cmt2_platform_data,
- },
- .resource = cmt2_resources,
- .num_resources = ARRAY_SIZE(cmt2_resources),
-};
-
-/* TMU */
-static struct sh_timer_config tmu0_platform_data = {
- .channels_mask = 7,
-};
-
-static struct resource tmu0_resources[] = {
- DEFINE_RES_MEM(0xfff60000, 0x2c),
- DEFINE_RES_IRQ(intcs_evt2irq(0xe80)),
- DEFINE_RES_IRQ(intcs_evt2irq(0xea0)),
- DEFINE_RES_IRQ(intcs_evt2irq(0xec0)),
-};
-
-static struct platform_device tmu0_device = {
- .name = "sh-tmu",
- .id = 0,
- .dev = {
- .platform_data = &tmu0_platform_data,
- },
- .resource = tmu0_resources,
- .num_resources = ARRAY_SIZE(tmu0_resources),
-};
-
-/* I2C */
-static struct resource iic0_resources[] = {
- [0] = {
- .name = "IIC0",
- .start = 0xFFF20000,
- .end = 0xFFF20425 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = intcs_evt2irq(0xe00), /* IIC0_ALI0 */
- .end = intcs_evt2irq(0xe60), /* IIC0_DTEI0 */
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device iic0_device = {
- .name = "i2c-sh_mobile",
- .id = 0, /* "i2c0" clock */
- .num_resources = ARRAY_SIZE(iic0_resources),
- .resource = iic0_resources,
-};
-
-static struct resource iic1_resources[] = {
- [0] = {
- .name = "IIC1",
- .start = 0xE6C20000,
- .end = 0xE6C20425 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = evt2irq(0x780), /* IIC1_ALI1 */
- .end = evt2irq(0x7e0), /* IIC1_DTEI1 */
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device iic1_device = {
- .name = "i2c-sh_mobile",
- .id = 1, /* "i2c1" clock */
- .num_resources = ARRAY_SIZE(iic1_resources),
- .resource = iic1_resources,
-};
-
-/* DMA */
-static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
- {
- .slave_id = SHDMA_SLAVE_SCIF0_TX,
- .addr = 0xe6c40020,
- .chcr = CHCR_TX(XMIT_SZ_8BIT),
- .mid_rid = 0x21,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF0_RX,
- .addr = 0xe6c40024,
- .chcr = CHCR_RX(XMIT_SZ_8BIT),
- .mid_rid = 0x22,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF1_TX,
- .addr = 0xe6c50020,
- .chcr = CHCR_TX(XMIT_SZ_8BIT),
- .mid_rid = 0x25,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF1_RX,
- .addr = 0xe6c50024,
- .chcr = CHCR_RX(XMIT_SZ_8BIT),
- .mid_rid = 0x26,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF2_TX,
- .addr = 0xe6c60020,
- .chcr = CHCR_TX(XMIT_SZ_8BIT),
- .mid_rid = 0x29,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF2_RX,
- .addr = 0xe6c60024,
- .chcr = CHCR_RX(XMIT_SZ_8BIT),
- .mid_rid = 0x2a,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF3_TX,
- .addr = 0xe6c70020,
- .chcr = CHCR_TX(XMIT_SZ_8BIT),
- .mid_rid = 0x2d,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF3_RX,
- .addr = 0xe6c70024,
- .chcr = CHCR_RX(XMIT_SZ_8BIT),
- .mid_rid = 0x2e,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF4_TX,
- .addr = 0xe6c80020,
- .chcr = CHCR_TX(XMIT_SZ_8BIT),
- .mid_rid = 0x39,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF4_RX,
- .addr = 0xe6c80024,
- .chcr = CHCR_RX(XMIT_SZ_8BIT),
- .mid_rid = 0x3a,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF5_TX,
- .addr = 0xe6cb0020,
- .chcr = CHCR_TX(XMIT_SZ_8BIT),
- .mid_rid = 0x35,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF5_RX,
- .addr = 0xe6cb0024,
- .chcr = CHCR_RX(XMIT_SZ_8BIT),
- .mid_rid = 0x36,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF6_TX,
- .addr = 0xe6c30040,
- .chcr = CHCR_TX(XMIT_SZ_8BIT),
- .mid_rid = 0x3d,
- }, {
- .slave_id = SHDMA_SLAVE_SCIF6_RX,
- .addr = 0xe6c30060,
- .chcr = CHCR_RX(XMIT_SZ_8BIT),
- .mid_rid = 0x3e,
- }, {
- .slave_id = SHDMA_SLAVE_FLCTL0_TX,
- .addr = 0xe6a30050,
- .chcr = CHCR_TX(XMIT_SZ_32BIT),
- .mid_rid = 0x83,
- }, {
- .slave_id = SHDMA_SLAVE_FLCTL0_RX,
- .addr = 0xe6a30050,
- .chcr = CHCR_RX(XMIT_SZ_32BIT),
- .mid_rid = 0x83,
- }, {
- .slave_id = SHDMA_SLAVE_FLCTL1_TX,
- .addr = 0xe6a30060,
- .chcr = CHCR_TX(XMIT_SZ_32BIT),
- .mid_rid = 0x87,
- }, {
- .slave_id = SHDMA_SLAVE_FLCTL1_RX,
- .addr = 0xe6a30060,
- .chcr = CHCR_RX(XMIT_SZ_32BIT),
- .mid_rid = 0x87,
- }, {
- .slave_id = SHDMA_SLAVE_SDHI0_TX,
- .addr = 0xe6850030,
- .chcr = CHCR_TX(XMIT_SZ_16BIT),
- .mid_rid = 0xc1,
- }, {
- .slave_id = SHDMA_SLAVE_SDHI0_RX,
- .addr = 0xe6850030,
- .chcr = CHCR_RX(XMIT_SZ_16BIT),
- .mid_rid = 0xc2,
- }, {
- .slave_id = SHDMA_SLAVE_SDHI1_TX,
- .addr = 0xe6860030,
- .chcr = CHCR_TX(XMIT_SZ_16BIT),
- .mid_rid = 0xc9,
- }, {
- .slave_id = SHDMA_SLAVE_SDHI1_RX,
- .addr = 0xe6860030,
- .chcr = CHCR_RX(XMIT_SZ_16BIT),
- .mid_rid = 0xca,
- }, {
- .slave_id = SHDMA_SLAVE_SDHI2_TX,
- .addr = 0xe6870030,
- .chcr = CHCR_TX(XMIT_SZ_16BIT),
- .mid_rid = 0xcd,
- }, {
- .slave_id = SHDMA_SLAVE_SDHI2_RX,
- .addr = 0xe6870030,
- .chcr = CHCR_RX(XMIT_SZ_16BIT),
- .mid_rid = 0xce,
- }, {
- .slave_id = SHDMA_SLAVE_FSIA_TX,
- .addr = 0xfe1f0024,
- .chcr = CHCR_TX(XMIT_SZ_32BIT),
- .mid_rid = 0xb1,
- }, {
- .slave_id = SHDMA_SLAVE_FSIA_RX,
- .addr = 0xfe1f0020,
- .chcr = CHCR_RX(XMIT_SZ_32BIT),
- .mid_rid = 0xb2,
- }, {
- .slave_id = SHDMA_SLAVE_MMCIF_TX,
- .addr = 0xe6bd0034,
- .chcr = CHCR_TX(XMIT_SZ_32BIT),
- .mid_rid = 0xd1,
- }, {
- .slave_id = SHDMA_SLAVE_MMCIF_RX,
- .addr = 0xe6bd0034,
- .chcr = CHCR_RX(XMIT_SZ_32BIT),
- .mid_rid = 0xd2,
- },
-};
-
-#define SH7372_CHCLR (0x220 - 0x20)
-
-static const struct sh_dmae_channel sh7372_dmae_channels[] = {
- {
- .offset = 0,
- .dmars = 0,
- .dmars_bit = 0,
- .chclr_offset = SH7372_CHCLR + 0,
- }, {
- .offset = 0x10,
- .dmars = 0,
- .dmars_bit = 8,
- .chclr_offset = SH7372_CHCLR + 0x10,
- }, {
- .offset = 0x20,
- .dmars = 4,
- .dmars_bit = 0,
- .chclr_offset = SH7372_CHCLR + 0x20,
- }, {
- .offset = 0x30,
- .dmars = 4,
- .dmars_bit = 8,
- .chclr_offset = SH7372_CHCLR + 0x30,
- }, {
- .offset = 0x50,
- .dmars = 8,
- .dmars_bit = 0,
- .chclr_offset = SH7372_CHCLR + 0x50,
- }, {
- .offset = 0x60,
- .dmars = 8,
- .dmars_bit = 8,
- .chclr_offset = SH7372_CHCLR + 0x60,
- }
-};
-
-static struct sh_dmae_pdata dma_platform_data = {
- .slave = sh7372_dmae_slaves,
- .slave_num = ARRAY_SIZE(sh7372_dmae_slaves),
- .channel = sh7372_dmae_channels,
- .channel_num = ARRAY_SIZE(sh7372_dmae_channels),
- .ts_low_shift = TS_LOW_SHIFT,
- .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
- .ts_high_shift = TS_HI_SHIFT,
- .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
- .ts_shift = dma_ts_shift,
- .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
- .dmaor_init = DMAOR_DME,
- .chclr_present = 1,
-};
-
-/* Resource order important! */
-static struct resource sh7372_dmae0_resources[] = {
- {
- /* Channel registers and DMAOR */
- .start = 0xfe008020,
- .end = 0xfe00828f,
- .flags = IORESOURCE_MEM,
- },
- {
- /* DMARSx */
- .start = 0xfe009000,
- .end = 0xfe00900b,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "error_irq",
- .start = evt2irq(0x20c0),
- .end = evt2irq(0x20c0),
- .flags = IORESOURCE_IRQ,
- },
- {
- /* IRQ for channels 0-5 */
- .start = evt2irq(0x2000),
- .end = evt2irq(0x20a0),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-/* Resource order important! */
-static struct resource sh7372_dmae1_resources[] = {
- {
- /* Channel registers and DMAOR */
- .start = 0xfe018020,
- .end = 0xfe01828f,
- .flags = IORESOURCE_MEM,
- },
- {
- /* DMARSx */
- .start = 0xfe019000,
- .end = 0xfe01900b,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "error_irq",
- .start = evt2irq(0x21c0),
- .end = evt2irq(0x21c0),
- .flags = IORESOURCE_IRQ,
- },
- {
- /* IRQ for channels 0-5 */
- .start = evt2irq(0x2100),
- .end = evt2irq(0x21a0),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-/* Resource order important! */
-static struct resource sh7372_dmae2_resources[] = {
- {
- /* Channel registers and DMAOR */
- .start = 0xfe028020,
- .end = 0xfe02828f,
- .flags = IORESOURCE_MEM,
- },
- {
- /* DMARSx */
- .start = 0xfe029000,
- .end = 0xfe02900b,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "error_irq",
- .start = evt2irq(0x22c0),
- .end = evt2irq(0x22c0),
- .flags = IORESOURCE_IRQ,
- },
- {
- /* IRQ for channels 0-5 */
- .start = evt2irq(0x2200),
- .end = evt2irq(0x22a0),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device dma0_device = {
- .name = "sh-dma-engine",
- .id = 0,
- .resource = sh7372_dmae0_resources,
- .num_resources = ARRAY_SIZE(sh7372_dmae0_resources),
- .dev = {
- .platform_data = &dma_platform_data,
- },
-};
-
-static struct platform_device dma1_device = {
- .name = "sh-dma-engine",
- .id = 1,
- .resource = sh7372_dmae1_resources,
- .num_resources = ARRAY_SIZE(sh7372_dmae1_resources),
- .dev = {
- .platform_data = &dma_platform_data,
- },
-};
-
-static struct platform_device dma2_device = {
- .name = "sh-dma-engine",
- .id = 2,
- .resource = sh7372_dmae2_resources,
- .num_resources = ARRAY_SIZE(sh7372_dmae2_resources),
- .dev = {
- .platform_data = &dma_platform_data,
- },
-};
-
-/*
- * USB-DMAC
- */
-static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = {
- {
- .offset = 0,
- }, {
- .offset = 0x20,
- },
-};
-
-/* USB DMAC0 */
-static const struct sh_dmae_slave_config sh7372_usb_dmae0_slaves[] = {
- {
- .slave_id = SHDMA_SLAVE_USB0_TX,
- .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
- }, {
- .slave_id = SHDMA_SLAVE_USB0_RX,
- .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
- },
-};
-
-static struct sh_dmae_pdata usb_dma0_platform_data = {
- .slave = sh7372_usb_dmae0_slaves,
- .slave_num = ARRAY_SIZE(sh7372_usb_dmae0_slaves),
- .channel = sh7372_usb_dmae_channels,
- .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
- .ts_low_shift = USBTS_LOW_SHIFT,
- .ts_low_mask = USBTS_LOW_BIT << USBTS_LOW_SHIFT,
- .ts_high_shift = USBTS_HI_SHIFT,
- .ts_high_mask = USBTS_HI_BIT << USBTS_HI_SHIFT,
- .ts_shift = dma_usbts_shift,
- .ts_shift_num = ARRAY_SIZE(dma_usbts_shift),
- .dmaor_init = DMAOR_DME,
- .chcr_offset = 0x14,
- .chcr_ie_bit = 1 << 5,
- .dmaor_is_32bit = 1,
- .needs_tend_set = 1,
- .no_dmars = 1,
- .slave_only = 1,
-};
-
-static struct resource sh7372_usb_dmae0_resources[] = {
- {
- /* Channel registers and DMAOR */
- .start = 0xe68a0020,
- .end = 0xe68a0064 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- /* VCR/SWR/DMICR */
- .start = 0xe68a0000,
- .end = 0xe68a0014 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- /* IRQ for channels */
- .start = evt2irq(0x0a00),
- .end = evt2irq(0x0a00),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device usb_dma0_device = {
- .name = "sh-dma-engine",
- .id = 3,
- .resource = sh7372_usb_dmae0_resources,
- .num_resources = ARRAY_SIZE(sh7372_usb_dmae0_resources),
- .dev = {
- .platform_data = &usb_dma0_platform_data,
- },
-};
-
-/* USB DMAC1 */
-static const struct sh_dmae_slave_config sh7372_usb_dmae1_slaves[] = {
- {
- .slave_id = SHDMA_SLAVE_USB1_TX,
- .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
- }, {
- .slave_id = SHDMA_SLAVE_USB1_RX,
- .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
- },
-};
-
-static struct sh_dmae_pdata usb_dma1_platform_data = {
- .slave = sh7372_usb_dmae1_slaves,
- .slave_num = ARRAY_SIZE(sh7372_usb_dmae1_slaves),
- .channel = sh7372_usb_dmae_channels,
- .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
- .ts_low_shift = USBTS_LOW_SHIFT,
- .ts_low_mask = USBTS_LOW_BIT << USBTS_LOW_SHIFT,
- .ts_high_shift = USBTS_HI_SHIFT,
- .ts_high_mask = USBTS_HI_BIT << USBTS_HI_SHIFT,
- .ts_shift = dma_usbts_shift,
- .ts_shift_num = ARRAY_SIZE(dma_usbts_shift),
- .dmaor_init = DMAOR_DME,
- .chcr_offset = 0x14,
- .chcr_ie_bit = 1 << 5,
- .dmaor_is_32bit = 1,
- .needs_tend_set = 1,
- .no_dmars = 1,
- .slave_only = 1,
-};
-
-static struct resource sh7372_usb_dmae1_resources[] = {
- {
- /* Channel registers and DMAOR */
- .start = 0xe68c0020,
- .end = 0xe68c0064 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- /* VCR/SWR/DMICR */
- .start = 0xe68c0000,
- .end = 0xe68c0014 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- /* IRQ for channels */
- .start = evt2irq(0x1d00),
- .end = evt2irq(0x1d00),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device usb_dma1_device = {
- .name = "sh-dma-engine",
- .id = 4,
- .resource = sh7372_usb_dmae1_resources,
- .num_resources = ARRAY_SIZE(sh7372_usb_dmae1_resources),
- .dev = {
- .platform_data = &usb_dma1_platform_data,
- },
-};
-
-/* VPU */
-static struct uio_info vpu_platform_data = {
- .name = "VPU5HG",
- .version = "0",
- .irq = intcs_evt2irq(0x980),
-};
-
-static struct resource vpu_resources[] = {
- [0] = {
- .name = "VPU",
- .start = 0xfe900000,
- .end = 0xfe900157,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device vpu_device = {
- .name = "uio_pdrv_genirq",
- .id = 0,
- .dev = {
- .platform_data = &vpu_platform_data,
- },
- .resource = vpu_resources,
- .num_resources = ARRAY_SIZE(vpu_resources),
-};
-
-/* VEU0 */
-static struct uio_info veu0_platform_data = {
- .name = "VEU0",
- .version = "0",
- .irq = intcs_evt2irq(0x700),
-};
-
-static struct resource veu0_resources[] = {
- [0] = {
- .name = "VEU0",
- .start = 0xfe920000,
- .end = 0xfe9200cb,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device veu0_device = {
- .name = "uio_pdrv_genirq",
- .id = 1,
- .dev = {
- .platform_data = &veu0_platform_data,
- },
- .resource = veu0_resources,
- .num_resources = ARRAY_SIZE(veu0_resources),
-};
-
-/* VEU1 */
-static struct uio_info veu1_platform_data = {
- .name = "VEU1",
- .version = "0",
- .irq = intcs_evt2irq(0x720),
-};
-
-static struct resource veu1_resources[] = {
- [0] = {
- .name = "VEU1",
- .start = 0xfe924000,
- .end = 0xfe9240cb,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device veu1_device = {
- .name = "uio_pdrv_genirq",
- .id = 2,
- .dev = {
- .platform_data = &veu1_platform_data,
- },
- .resource = veu1_resources,
- .num_resources = ARRAY_SIZE(veu1_resources),
-};
-
-/* VEU2 */
-static struct uio_info veu2_platform_data = {
- .name = "VEU2",
- .version = "0",
- .irq = intcs_evt2irq(0x740),
-};
-
-static struct resource veu2_resources[] = {
- [0] = {
- .name = "VEU2",
- .start = 0xfe928000,
- .end = 0xfe928307,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device veu2_device = {
- .name = "uio_pdrv_genirq",
- .id = 3,
- .dev = {
- .platform_data = &veu2_platform_data,
- },
- .resource = veu2_resources,
- .num_resources = ARRAY_SIZE(veu2_resources),
-};
-
-/* VEU3 */
-static struct uio_info veu3_platform_data = {
- .name = "VEU3",
- .version = "0",
- .irq = intcs_evt2irq(0x760),
-};
-
-static struct resource veu3_resources[] = {
- [0] = {
- .name = "VEU3",
- .start = 0xfe92c000,
- .end = 0xfe92c307,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device veu3_device = {
- .name = "uio_pdrv_genirq",
- .id = 4,
- .dev = {
- .platform_data = &veu3_platform_data,
- },
- .resource = veu3_resources,
- .num_resources = ARRAY_SIZE(veu3_resources),
-};
-
-/* JPU */
-static struct uio_info jpu_platform_data = {
- .name = "JPU",
- .version = "0",
- .irq = intcs_evt2irq(0x560),
-};
-
-static struct resource jpu_resources[] = {
- [0] = {
- .name = "JPU",
- .start = 0xfe980000,
- .end = 0xfe9902d3,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device jpu_device = {
- .name = "uio_pdrv_genirq",
- .id = 5,
- .dev = {
- .platform_data = &jpu_platform_data,
- },
- .resource = jpu_resources,
- .num_resources = ARRAY_SIZE(jpu_resources),
-};
-
-/* SPU2DSP0 */
-static struct uio_info spu0_platform_data = {
- .name = "SPU2DSP0",
- .version = "0",
- .irq = evt2irq(0x1800),
-};
-
-static struct resource spu0_resources[] = {
- [0] = {
- .name = "SPU2DSP0",
- .start = 0xfe200000,
- .end = 0xfe2fffff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device spu0_device = {
- .name = "uio_pdrv_genirq",
- .id = 6,
- .dev = {
- .platform_data = &spu0_platform_data,
- },
- .resource = spu0_resources,
- .num_resources = ARRAY_SIZE(spu0_resources),
-};
-
-/* SPU2DSP1 */
-static struct uio_info spu1_platform_data = {
- .name = "SPU2DSP1",
- .version = "0",
- .irq = evt2irq(0x1820),
-};
-
-static struct resource spu1_resources[] = {
- [0] = {
- .name = "SPU2DSP1",
- .start = 0xfe300000,
- .end = 0xfe3fffff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device spu1_device = {
- .name = "uio_pdrv_genirq",
- .id = 7,
- .dev = {
- .platform_data = &spu1_platform_data,
- },
- .resource = spu1_resources,
- .num_resources = ARRAY_SIZE(spu1_resources),
-};
-
-/* IPMMUI (an IPMMU module for ICB/LMB) */
-static struct resource ipmmu_resources[] = {
- [0] = {
- .name = "IPMMUI",
- .start = 0xfe951000,
- .end = 0xfe9510ff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static const char * const ipmmu_dev_names[] = {
- "sh_mobile_lcdc_fb.0",
- "sh_mobile_lcdc_fb.1",
- "sh_mobile_ceu.0",
- "uio_pdrv_genirq.0",
- "uio_pdrv_genirq.1",
- "uio_pdrv_genirq.2",
- "uio_pdrv_genirq.3",
- "uio_pdrv_genirq.4",
- "uio_pdrv_genirq.5",
-};
-
-static struct shmobile_ipmmu_platform_data ipmmu_platform_data = {
- .dev_names = ipmmu_dev_names,
- .num_dev_names = ARRAY_SIZE(ipmmu_dev_names),
-};
-
-static struct platform_device ipmmu_device = {
- .name = "ipmmu",
- .id = -1,
- .dev = {
- .platform_data = &ipmmu_platform_data,
- },
- .resource = ipmmu_resources,
- .num_resources = ARRAY_SIZE(ipmmu_resources),
-};
-
-static struct platform_device *sh7372_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
- &scif6_device,
- &cmt2_device,
- &tmu0_device,
- &ipmmu_device,
-};
-
-static struct platform_device *sh7372_late_devices[] __initdata = {
- &iic0_device,
- &iic1_device,
- &dma0_device,
- &dma1_device,
- &dma2_device,
- &usb_dma0_device,
- &usb_dma1_device,
- &vpu_device,
- &veu0_device,
- &veu1_device,
- &veu2_device,
- &veu3_device,
- &jpu_device,
- &spu0_device,
- &spu1_device,
-};
-
-void __init sh7372_add_standard_devices(void)
-{
- static struct pm_domain_device domain_devices[] __initdata = {
- { "A3RV", &vpu_device, },
- { "A4MP", &spu0_device, },
- { "A4MP", &spu1_device, },
- { "A3SP", &scif0_device, },
- { "A3SP", &scif1_device, },
- { "A3SP", &scif2_device, },
- { "A3SP", &scif3_device, },
- { "A3SP", &scif4_device, },
- { "A3SP", &scif5_device, },
- { "A3SP", &scif6_device, },
- { "A3SP", &iic1_device, },
- { "A3SP", &dma0_device, },
- { "A3SP", &dma1_device, },
- { "A3SP", &dma2_device, },
- { "A3SP", &usb_dma0_device, },
- { "A3SP", &usb_dma1_device, },
- { "A4R", &iic0_device, },
- { "A4R", &veu0_device, },
- { "A4R", &veu1_device, },
- { "A4R", &veu2_device, },
- { "A4R", &veu3_device, },
- { "A4R", &jpu_device, },
- { "A4R", &tmu0_device, },
- };
-
- sh7372_init_pm_domains();
-
- platform_add_devices(sh7372_early_devices,
- ARRAY_SIZE(sh7372_early_devices));
-
- platform_add_devices(sh7372_late_devices,
- ARRAY_SIZE(sh7372_late_devices));
-
- rmobile_add_devices_to_domains(domain_devices,
- ARRAY_SIZE(domain_devices));
-}
-
-void __init sh7372_earlytimer_init(void)
-{
- sh7372_clock_init();
- shmobile_earlytimer_init();
-}
-
-void __init sh7372_add_early_devices(void)
-{
- early_platform_add_devices(sh7372_early_devices,
- ARRAY_SIZE(sh7372_early_devices));
-
- /* setup early console here as well */
- shmobile_setup_console();
-}
-
-#ifdef CONFIG_USE_OF
-
-void __init sh7372_add_early_devices_dt(void)
-{
- shmobile_init_delay();
-
- sh7372_add_early_devices();
-}
-
-void __init sh7372_add_standard_devices_dt(void)
-{
- /* clocks are setup late during boot in the case of DT */
- sh7372_clock_init();
-
- platform_add_devices(sh7372_early_devices,
- ARRAY_SIZE(sh7372_early_devices));
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-
-static const char *sh7372_boards_compat_dt[] __initdata = {
- "renesas,sh7372",
- NULL,
-};
-
-DT_MACHINE_START(SH7372_DT, "Generic SH7372 (Flattened Device Tree)")
- .map_io = sh7372_map_io,
- .init_early = sh7372_add_early_devices_dt,
- .init_irq = sh7372_init_irq,
- .handle_irq = shmobile_handle_irq_intc,
- .init_machine = sh7372_add_standard_devices_dt,
- .init_late = shmobile_init_late,
- .dt_compat = sh7372_boards_compat_dt,
-MACHINE_END
-
-#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index faea74a2151b..fb2ab7590af8 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -30,6 +30,7 @@
#include <linux/platform_data/sh_ipmmu.h>
#include <linux/platform_data/irq-renesas-intc-irqpin.h>
+#include <asm/hardware/cache-l2x0.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
@@ -784,22 +785,15 @@ void __init sh73a0_add_early_devices(void)
#ifdef CONFIG_USE_OF
-void __init sh73a0_add_standard_devices_dt(void)
+static void __init sh73a0_generic_init(void)
{
- /* clocks are setup late during boot in the case of DT */
-#ifndef CONFIG_COMMON_CLK
- sh73a0_clock_init();
+#ifdef CONFIG_CACHE_L2X0
+ /* Shared attribute override enable, 64K*8way */
+ l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
#endif
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
-#define RESCNT2 IOMEM(0xe6188020)
-static void sh73a0_restart(enum reboot_mode mode, const char *cmd)
-{
- /* Do soft power on reset */
- writel((1 << 31), RESCNT2);
-}
-
static const char *sh73a0_boards_compat_dt[] __initdata = {
"renesas,sh73a0",
NULL,
@@ -809,9 +803,8 @@ DT_MACHINE_START(SH73A0_DT, "Generic SH73A0 (Flattened Device Tree)")
.smp = smp_ops(sh73a0_smp_ops),
.map_io = sh73a0_map_io,
.init_early = shmobile_init_delay,
- .init_machine = sh73a0_add_standard_devices_dt,
+ .init_machine = sh73a0_generic_init,
.init_late = shmobile_init_late,
- .restart = sh73a0_restart,
.dt_compat = sh73a0_boards_compat_dt,
MACHINE_END
#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/sh7372.h b/arch/arm/mach-shmobile/sh7372.h
deleted file mode 100644
index 4ad960d5075b..000000000000
--- a/arch/arm/mach-shmobile/sh7372.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2010 Renesas Solutions Corp.
- *
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef __ASM_SH7372_H__
-#define __ASM_SH7372_H__
-
-/* DMA slave IDs */
-enum {
- SHDMA_SLAVE_INVALID,
- SHDMA_SLAVE_SCIF0_TX,
- SHDMA_SLAVE_SCIF0_RX,
- SHDMA_SLAVE_SCIF1_TX,
- SHDMA_SLAVE_SCIF1_RX,
- SHDMA_SLAVE_SCIF2_TX,
- SHDMA_SLAVE_SCIF2_RX,
- SHDMA_SLAVE_SCIF3_TX,
- SHDMA_SLAVE_SCIF3_RX,
- SHDMA_SLAVE_SCIF4_TX,
- SHDMA_SLAVE_SCIF4_RX,
- SHDMA_SLAVE_SCIF5_TX,
- SHDMA_SLAVE_SCIF5_RX,
- SHDMA_SLAVE_SCIF6_TX,
- SHDMA_SLAVE_SCIF6_RX,
- SHDMA_SLAVE_FLCTL0_TX,
- SHDMA_SLAVE_FLCTL0_RX,
- SHDMA_SLAVE_FLCTL1_TX,
- SHDMA_SLAVE_FLCTL1_RX,
- SHDMA_SLAVE_SDHI0_RX,
- SHDMA_SLAVE_SDHI0_TX,
- SHDMA_SLAVE_SDHI1_RX,
- SHDMA_SLAVE_SDHI1_TX,
- SHDMA_SLAVE_SDHI2_RX,
- SHDMA_SLAVE_SDHI2_TX,
- SHDMA_SLAVE_FSIA_RX,
- SHDMA_SLAVE_FSIA_TX,
- SHDMA_SLAVE_MMCIF_RX,
- SHDMA_SLAVE_MMCIF_TX,
- SHDMA_SLAVE_USB0_TX,
- SHDMA_SLAVE_USB0_RX,
- SHDMA_SLAVE_USB1_TX,
- SHDMA_SLAVE_USB1_RX,
-};
-
-extern struct clk sh7372_extal1_clk;
-extern struct clk sh7372_extal2_clk;
-extern struct clk sh7372_dv_clki_clk;
-extern struct clk sh7372_dv_clki_div2_clk;
-extern struct clk sh7372_pllc2_clk;
-
-extern void sh7372_init_irq(void);
-extern void sh7372_map_io(void);
-extern void sh7372_earlytimer_init(void);
-extern void sh7372_add_early_devices(void);
-extern void sh7372_add_standard_devices(void);
-extern void sh7372_add_early_devices_dt(void);
-extern void sh7372_add_standard_devices_dt(void);
-extern void sh7372_clock_init(void);
-extern void sh7372_pinmux_init(void);
-extern void sh7372_pm_init(void);
-extern void sh7372_resume_core_standby_sysc(void);
-extern int sh7372_do_idle_sysc(unsigned long sleep_mode);
-extern void sh7372_intcs_suspend(void);
-extern void sh7372_intcs_resume(void);
-extern void sh7372_intca_suspend(void);
-extern void sh7372_intca_resume(void);
-
-extern unsigned long sh7372_cpu_resume;
-
-#ifdef CONFIG_PM
-extern void __init sh7372_init_pm_domains(void);
-#else
-static inline void sh7372_init_pm_domains(void) {}
-#endif
-
-extern void __init sh7372_pm_init_late(void);
-
-#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/sh73a0.h b/arch/arm/mach-shmobile/sh73a0.h
index f037c64b14fc..5a80f18b4fa0 100644
--- a/arch/arm/mach-shmobile/sh73a0.h
+++ b/arch/arm/mach-shmobile/sh73a0.h
@@ -77,7 +77,6 @@ extern void sh73a0_map_io(void);
extern void sh73a0_earlytimer_init(void);
extern void sh73a0_add_early_devices(void);
extern void sh73a0_add_standard_devices(void);
-extern void sh73a0_add_standard_devices_dt(void);
extern void sh73a0_clock_init(void);
extern void sh73a0_pinmux_init(void);
extern void sh73a0_pm_init(void);
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
deleted file mode 100644
index 146b8de16432..000000000000
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * sh7372 lowlevel sleep code for "Core Standby Mode"
- *
- * Copyright (C) 2011 Magnus Damm
- *
- * In "Core Standby Mode" the ARM core is off, but L2 cache is still on
- *
- * Based on mach-omap2/sleep34xx.S
- *
- * (C) Copyright 2007 Texas Instruments
- * Karthik Dasu <karthik-dp@ti.com>
- *
- * (C) Copyright 2004 Texas Instruments, <www.ti.com>
- * Richard Woodruff <r-woodruff2@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/memory.h>
-#include <asm/assembler.h>
-
-#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
- .align 12
- .text
- .global sh7372_resume_core_standby_sysc
-sh7372_resume_core_standby_sysc:
- ldr pc, 1f
-
- .align 2
- .globl sh7372_cpu_resume
-sh7372_cpu_resume:
-1: .space 4
-
-#define SPDCR 0xe6180008
-
- /* A3SM & A4S power down */
- .global sh7372_do_idle_sysc
-sh7372_do_idle_sysc:
- mov r8, r0 /* sleep mode passed in r0 */
-
- /*
- * Clear the SCTLR.C bit to prevent further data cache
- * allocation. Clearing SCTLR.C would make all the data accesses
- * strongly ordered and would not hit the cache.
- */
- mrc p15, 0, r0, c1, c0, 0
- bic r0, r0, #(1 << 2) @ Disable the C bit
- mcr p15, 0, r0, c1, c0, 0
- isb
-
- /*
- * Clean and invalidate data cache again.
- */
- ldr r1, kernel_flush
- blx r1
-
- /* disable L2 cache in the aux control register */
- mrc p15, 0, r10, c1, c0, 1
- bic r10, r10, #2
- mcr p15, 0, r10, c1, c0, 1
- isb
-
- /*
- * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
- * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
- * This sequence switches back to ARM. Note that .align may insert a
- * nop: bx pc needs to be word-aligned in order to work.
- */
- THUMB( .thumb )
- THUMB( .align )
- THUMB( bx pc )
- THUMB( nop )
- .arm
-
- /* Data memory barrier and Data sync barrier */
- dsb
- dmb
-
- /* SYSC power down */
- ldr r0, =SPDCR
- str r8, [r0]
-1:
- b 1b
-
- .align 2
-kernel_flush:
- .word v7_flush_dcache_all
-#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 9fc280e24ef4..01f792fcb220 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -124,19 +124,12 @@ static int r8a7779_cpu_kill(unsigned int cpu)
return 0;
}
-
-static int r8a7779_cpu_disable(unsigned int cpu)
-{
- /* only CPU1->3 have power domains, do not allow hotplug of CPU0 */
- return cpu == 0 ? -EPERM : 0;
-}
#endif /* CONFIG_HOTPLUG_CPU */
struct smp_operations r8a7779_smp_ops __initdata = {
.smp_prepare_cpus = r8a7779_smp_prepare_cpus,
.smp_boot_secondary = r8a7779_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = r8a7779_cpu_disable,
.cpu_die = shmobile_smp_scu_cpu_die,
.cpu_kill = r8a7779_cpu_kill,
#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
index 9c3da1345b8b..930f45cbc08a 100644
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -23,6 +23,7 @@
#include "common.h"
#include "platsmp-apmu.h"
#include "pm-rcar.h"
+#include "rcar-gen2.h"
#include "r8a7790.h"
static struct rcar_sysc_ch r8a7790_ca15_scu = {
@@ -37,11 +38,11 @@ static struct rcar_sysc_ch r8a7790_ca7_scu = {
static struct rcar_apmu_config r8a7790_apmu_config[] = {
{
- .iomem = DEFINE_RES_MEM(0xe6152000, 0x88),
+ .iomem = DEFINE_RES_MEM(0xe6152000, 0x188),
.cpus = { 0, 1, 2, 3 },
},
{
- .iomem = DEFINE_RES_MEM(0xe6151000, 0x88),
+ .iomem = DEFINE_RES_MEM(0xe6151000, 0x188),
.cpus = { 0x100, 0x0101, 0x102, 0x103 },
}
};
@@ -54,7 +55,7 @@ static void __init r8a7790_smp_prepare_cpus(unsigned int max_cpus)
ARRAY_SIZE(r8a7790_apmu_config));
/* turn on power to SCU */
- r8a7790_pm_init();
+ rcar_gen2_pm_init();
rcar_sysc_power_up(&r8a7790_ca15_scu);
rcar_sysc_power_up(&r8a7790_ca7_scu);
}
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
index 7e49e0a52e32..5e2d1db79afa 100644
--- a/arch/arm/mach-shmobile/smp-r8a7791.c
+++ b/arch/arm/mach-shmobile/smp-r8a7791.c
@@ -27,7 +27,7 @@
static struct rcar_apmu_config r8a7791_apmu_config[] = {
{
- .iomem = DEFINE_RES_MEM(0xe6152000, 0x88),
+ .iomem = DEFINE_RES_MEM(0xe6152000, 0x188),
.cpus = { 0, 1 },
}
};
@@ -39,7 +39,7 @@ static void __init r8a7791_smp_prepare_cpus(unsigned int max_cpus)
r8a7791_apmu_config,
ARRAY_SIZE(r8a7791_apmu_config));
- r8a7791_pm_init();
+ rcar_gen2_pm_init();
}
static int r8a7791_smp_boot_secondary(unsigned int cpu,
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index c16dbfe9836c..2106d6b76a06 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -33,7 +33,7 @@
#define SH73A0_SCU_BASE 0xf0000000
-#ifdef CONFIG_HAVE_ARM_TWD
+#if defined(CONFIG_HAVE_ARM_TWD) && !defined(CONFIG_ARCH_MULTIPLATFORM)
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, SH73A0_SCU_BASE + 0x600, 29);
void __init sh73a0_register_twd(void)
{
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 48844ae6c3a1..88de2dce2e87 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c
index 84d809a3cba3..4dbe1dae937c 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra30.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra30.c
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 3c2509b4b694..10f9389572da 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -42,6 +42,7 @@ if ARCH_VEXPRESS
config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
bool "Enable A5 and A9 only errata work-arounds"
default y
+ select ARM_ERRATA_643719 if SMP
select ARM_ERRATA_720789
select PL310_ERRATA_753970 if CACHE_L2X0
help
@@ -53,7 +54,7 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
config ARCH_VEXPRESS_DCSCB
bool "Dual Cluster System Control Block (DCSCB) support"
depends on MCPM
- select ARM_CCI
+ select ARM_CCI400_PORT_CTRL
help
Support for the Dual Cluster System Configuration Block (DCSCB).
This is needed to provide CPU and cluster power management
@@ -71,7 +72,7 @@ config ARCH_VEXPRESS_SPC
config ARCH_VEXPRESS_TC2_PM
bool "Versatile Express TC2 power management"
depends on MCPM
- select ARM_CCI
+ select ARM_CCI400_PORT_CTRL
select ARCH_VEXPRESS_SPC
select ARM_CPU_SUSPEND
help
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 30b993399ed7..5cedcf572104 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
-#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/of_address.h>
#include <linux/vexpress.h>
@@ -36,163 +35,102 @@
#define KFC_CFG_W 0x2c
#define DCS_CFG_R 0x30
-/*
- * We can't use regular spinlocks. In the switcher case, it is possible
- * for an outbound CPU to call power_down() while its inbound counterpart
- * is already live using the same logical CPU number which trips lockdep
- * debugging.
- */
-static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-
static void __iomem *dcscb_base;
-static int dcscb_use_count[4][2];
static int dcscb_allcpus_mask[2];
-static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
+static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster)
{
unsigned int rst_hold, cpumask = (1 << cpu);
- unsigned int all_mask;
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- if (cpu >= 4 || cluster >= 2)
+ if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster]))
return -EINVAL;
- all_mask = dcscb_allcpus_mask[cluster];
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold &= ~(cpumask | (cpumask << 4));
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+ return 0;
+}
- /*
- * Since this is called with IRQs enabled, and no arch_spin_lock_irq
- * variant exists, we need to disable IRQs manually here.
- */
- local_irq_disable();
- arch_spin_lock(&dcscb_lock);
-
- dcscb_use_count[cpu][cluster]++;
- if (dcscb_use_count[cpu][cluster] == 1) {
- rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
- if (rst_hold & (1 << 8)) {
- /* remove cluster reset and add individual CPU's reset */
- rst_hold &= ~(1 << 8);
- rst_hold |= all_mask;
- }
- rst_hold &= ~(cpumask | (cpumask << 4));
- writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
- } else if (dcscb_use_count[cpu][cluster] != 2) {
- /*
- * The only possible values are:
- * 0 = CPU down
- * 1 = CPU (still) up
- * 2 = CPU requested to be up before it had a chance
- * to actually make itself down.
- * Any other value is a bug.
- */
- BUG();
- }
+static int dcscb_cluster_powerup(unsigned int cluster)
+{
+ unsigned int rst_hold;
- arch_spin_unlock(&dcscb_lock);
- local_irq_enable();
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ if (cluster >= 2)
+ return -EINVAL;
+ /* remove cluster reset and add individual CPU's reset */
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold &= ~(1 << 8);
+ rst_hold |= dcscb_allcpus_mask[cluster];
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
return 0;
}
-static void dcscb_power_down(void)
+static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
{
- unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask;
- bool last_man = false, skip_wfi = false;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpumask = (1 << cpu);
+ unsigned int rst_hold;
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- BUG_ON(cpu >= 4 || cluster >= 2);
-
- all_mask = dcscb_allcpus_mask[cluster];
-
- __mcpm_cpu_going_down(cpu, cluster);
-
- arch_spin_lock(&dcscb_lock);
- BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
- dcscb_use_count[cpu][cluster]--;
- if (dcscb_use_count[cpu][cluster] == 0) {
- rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
- rst_hold |= cpumask;
- if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) {
- rst_hold |= (1 << 8);
- last_man = true;
- }
- writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
- } else if (dcscb_use_count[cpu][cluster] == 1) {
- /*
- * A power_up request went ahead of us.
- * Even if we do not want to shut this CPU down,
- * the caller expects a certain state as if the WFI
- * was aborted. So let's continue with cache cleaning.
- */
- skip_wfi = true;
- } else
- BUG();
-
- if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
- arch_spin_unlock(&dcscb_lock);
-
- /* Flush all cache levels for this cluster. */
- v7_exit_coherency_flush(all);
-
- /*
- * A full outer cache flush could be needed at this point
- * on platforms with such a cache, depending on where the
- * outer cache sits. In some cases the notion of a "last
- * cluster standing" would need to be implemented if the
- * outer cache is shared across clusters. In any case, when
- * the outer cache needs flushing, there is no concurrent
- * access to the cache controller to worry about and no
- * special locking besides what is already provided by the
- * MCPM state machinery is needed.
- */
-
- /*
- * Disable cluster-level coherency by masking
- * incoming snoops and DVM messages:
- */
- cci_disable_port_by_cpu(mpidr);
-
- __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
- } else {
- arch_spin_unlock(&dcscb_lock);
-
- /* Disable and flush the local CPU cache. */
- v7_exit_coherency_flush(louis);
- }
+ BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster]));
- __mcpm_cpu_down(cpu, cluster);
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold |= (1 << cpu);
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+}
- /* Now we are prepared for power-down, do it: */
- dsb();
- if (!skip_wfi)
- wfi();
+static void dcscb_cluster_powerdown_prepare(unsigned int cluster)
+{
+ unsigned int rst_hold;
- /* Not dead at this point? Let our caller cope. */
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ BUG_ON(cluster >= 2);
+
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold |= (1 << 8);
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
}
-static const struct mcpm_platform_ops dcscb_power_ops = {
- .power_up = dcscb_power_up,
- .power_down = dcscb_power_down,
-};
+static void dcscb_cpu_cache_disable(void)
+{
+ /* Disable and flush the local CPU cache. */
+ v7_exit_coherency_flush(louis);
+}
-static void __init dcscb_usage_count_init(void)
+static void dcscb_cluster_cache_disable(void)
{
- unsigned int mpidr, cpu, cluster;
+ /* Flush all cache levels for this cluster. */
+ v7_exit_coherency_flush(all);
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ /*
+ * A full outer cache flush could be needed at this point
+ * on platforms with such a cache, depending on where the
+ * outer cache sits. In some cases the notion of a "last
+ * cluster standing" would need to be implemented if the
+ * outer cache is shared across clusters. In any case, when
+ * the outer cache needs flushing, there is no concurrent
+ * access to the cache controller to worry about and no
+ * special locking besides what is already provided by the
+ * MCPM state machinery is needed.
+ */
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- BUG_ON(cpu >= 4 || cluster >= 2);
- dcscb_use_count[cpu][cluster] = 1;
+ /*
+ * Disable cluster-level coherency by masking
+ * incoming snoops and DVM messages:
+ */
+ cci_disable_port_by_cpu(read_cpuid_mpidr());
}
+static const struct mcpm_platform_ops dcscb_power_ops = {
+ .cpu_powerup = dcscb_cpu_powerup,
+ .cluster_powerup = dcscb_cluster_powerup,
+ .cpu_powerdown_prepare = dcscb_cpu_powerdown_prepare,
+ .cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare,
+ .cpu_cache_disable = dcscb_cpu_cache_disable,
+ .cluster_cache_disable = dcscb_cluster_cache_disable,
+};
+
extern void dcscb_power_up_setup(unsigned int affinity_level);
static int __init dcscb_init(void)
@@ -213,7 +151,6 @@ static int __init dcscb_init(void)
cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
- dcscb_usage_count_init();
ret = mcpm_platform_register(&dcscb_power_ops);
if (!ret)
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 2fb78b4648cb..b3328cd46c33 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/irqchip/arm-gic.h>
@@ -44,101 +43,36 @@
static void __iomem *scc;
-/*
- * We can't use regular spinlocks. In the switcher case, it is possible
- * for an outbound CPU to call power_down() after its inbound counterpart
- * is already live using the same logical CPU number which trips lockdep
- * debugging.
- */
-static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-
#define TC2_CLUSTERS 2
#define TC2_MAX_CPUS_PER_CLUSTER 3
static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
-/* Keep per-cpu usage count to cope with unordered up/down requests */
-static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
-
-#define tc2_cluster_unused(cluster) \
- (!tc2_pm_use_count[0][cluster] && \
- !tc2_pm_use_count[1][cluster] && \
- !tc2_pm_use_count[2][cluster])
-
-static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
+static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
{
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
return -EINVAL;
-
- /*
- * Since this is called with IRQs enabled, and no arch_spin_lock_irq
- * variant exists, we need to disable IRQs manually here.
- */
- local_irq_disable();
- arch_spin_lock(&tc2_pm_lock);
-
- if (tc2_cluster_unused(cluster))
- ve_spc_powerdown(cluster, false);
-
- tc2_pm_use_count[cpu][cluster]++;
- if (tc2_pm_use_count[cpu][cluster] == 1) {
- ve_spc_set_resume_addr(cluster, cpu,
- virt_to_phys(mcpm_entry_point));
- ve_spc_cpu_wakeup_irq(cluster, cpu, true);
- } else if (tc2_pm_use_count[cpu][cluster] != 2) {
- /*
- * The only possible values are:
- * 0 = CPU down
- * 1 = CPU (still) up
- * 2 = CPU requested to be up before it had a chance
- * to actually make itself down.
- * Any other value is a bug.
- */
- BUG();
- }
-
- arch_spin_unlock(&tc2_pm_lock);
- local_irq_enable();
-
+ ve_spc_set_resume_addr(cluster, cpu,
+ virt_to_phys(mcpm_entry_point));
+ ve_spc_cpu_wakeup_irq(cluster, cpu, true);
return 0;
}
-static void tc2_pm_down(u64 residency)
+static int tc2_pm_cluster_powerup(unsigned int cluster)
{
- unsigned int mpidr, cpu, cluster;
- bool last_man = false, skip_wfi = false;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ if (cluster >= TC2_CLUSTERS)
+ return -EINVAL;
+ ve_spc_powerdown(cluster, false);
+ return 0;
+}
+static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
+{
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
-
- __mcpm_cpu_going_down(cpu, cluster);
-
- arch_spin_lock(&tc2_pm_lock);
- BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
- tc2_pm_use_count[cpu][cluster]--;
- if (tc2_pm_use_count[cpu][cluster] == 0) {
- ve_spc_cpu_wakeup_irq(cluster, cpu, true);
- if (tc2_cluster_unused(cluster)) {
- ve_spc_powerdown(cluster, true);
- ve_spc_global_wakeup_irq(true);
- last_man = true;
- }
- } else if (tc2_pm_use_count[cpu][cluster] == 1) {
- /*
- * A power_up request went ahead of us.
- * Even if we do not want to shut this CPU down,
- * the caller expects a certain state as if the WFI
- * was aborted. So let's continue with cache cleaning.
- */
- skip_wfi = true;
- } else
- BUG();
-
+ ve_spc_cpu_wakeup_irq(cluster, cpu, true);
/*
* If the CPU is committed to power down, make sure
* the power controller will be in charge of waking it
@@ -146,55 +80,38 @@ static void tc2_pm_down(u64 residency)
* to the CPU by disabling the GIC CPU IF to prevent wfi
* from completing execution behind power controller back
*/
- if (!skip_wfi)
- gic_cpu_if_down();
-
- if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
- arch_spin_unlock(&tc2_pm_lock);
-
- if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
- /*
- * On the Cortex-A15 we need to disable
- * L2 prefetching before flushing the cache.
- */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3 \n\t"
- "isb \n\t"
- "dsb "
- : : "r" (0x400) );
- }
-
- v7_exit_coherency_flush(all);
-
- cci_disable_port_by_cpu(mpidr);
-
- __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
- } else {
- /*
- * If last man then undo any setup done previously.
- */
- if (last_man) {
- ve_spc_powerdown(cluster, false);
- ve_spc_global_wakeup_irq(false);
- }
-
- arch_spin_unlock(&tc2_pm_lock);
-
- v7_exit_coherency_flush(louis);
- }
-
- __mcpm_cpu_down(cpu, cluster);
+ gic_cpu_if_down();
+}
- /* Now we are prepared for power-down, do it: */
- if (!skip_wfi)
- wfi();
+static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ BUG_ON(cluster >= TC2_CLUSTERS);
+ ve_spc_powerdown(cluster, true);
+ ve_spc_global_wakeup_irq(true);
+}
- /* Not dead at this point? Let our caller cope. */
+static void tc2_pm_cpu_cache_disable(void)
+{
+ v7_exit_coherency_flush(louis);
}
-static void tc2_pm_power_down(void)
+static void tc2_pm_cluster_cache_disable(void)
{
- tc2_pm_down(0);
+ if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
+ /*
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
+ */
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3 \n\t"
+ "isb \n\t"
+ "dsb "
+ : : "r" (0x400) );
+ }
+
+ v7_exit_coherency_flush(all);
+ cci_disable_port_by_cpu(read_cpuid_mpidr());
}
static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
@@ -217,27 +134,21 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
+ pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
+ __func__, cpu, cluster,
+ readl_relaxed(scc + RESET_CTRL));
+
/*
- * Only examine the hardware state if the target CPU has
- * caught up at least as far as tc2_pm_down():
+ * We need the CPU to reach WFI, but the power
+ * controller may put the cluster in reset and
+ * power it off as soon as that happens, before
+ * we have a chance to see STANDBYWFI.
+ *
+ * So we need to check for both conditions:
*/
- if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) {
- pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
- __func__, cpu, cluster,
- readl_relaxed(scc + RESET_CTRL));
-
- /*
- * We need the CPU to reach WFI, but the power
- * controller may put the cluster in reset and
- * power it off as soon as that happens, before
- * we have a chance to see STANDBYWFI.
- *
- * So we need to check for both conditions:
- */
- if (tc2_core_in_reset(cpu, cluster) ||
- ve_spc_cpu_in_wfi(cpu, cluster))
- return 0; /* success: the CPU is halted */
- }
+ if (tc2_core_in_reset(cpu, cluster) ||
+ ve_spc_cpu_in_wfi(cpu, cluster))
+ return 0; /* success: the CPU is halted */
/* Otherwise, wait and retry: */
msleep(POLL_MSEC);
@@ -246,72 +157,40 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
return -ETIMEDOUT; /* timeout */
}
-static void tc2_pm_suspend(u64 residency)
+static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
{
- unsigned int mpidr, cpu, cluster;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
- tc2_pm_down(residency);
}
-static void tc2_pm_powered_up(void)
+static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
{
- unsigned int mpidr, cpu, cluster;
- unsigned long flags;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
-
- local_irq_save(flags);
- arch_spin_lock(&tc2_pm_lock);
-
- if (tc2_cluster_unused(cluster)) {
- ve_spc_powerdown(cluster, false);
- ve_spc_global_wakeup_irq(false);
- }
-
- if (!tc2_pm_use_count[cpu][cluster])
- tc2_pm_use_count[cpu][cluster] = 1;
-
ve_spc_cpu_wakeup_irq(cluster, cpu, false);
ve_spc_set_resume_addr(cluster, cpu, 0);
+}
- arch_spin_unlock(&tc2_pm_lock);
- local_irq_restore(flags);
+static void tc2_pm_cluster_is_up(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ BUG_ON(cluster >= TC2_CLUSTERS);
+ ve_spc_powerdown(cluster, false);
+ ve_spc_global_wakeup_irq(false);
}
static const struct mcpm_platform_ops tc2_pm_power_ops = {
- .power_up = tc2_pm_power_up,
- .power_down = tc2_pm_power_down,
+ .cpu_powerup = tc2_pm_cpu_powerup,
+ .cluster_powerup = tc2_pm_cluster_powerup,
+ .cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare,
+ .cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare,
+ .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare,
+ .cpu_cache_disable = tc2_pm_cpu_cache_disable,
+ .cluster_cache_disable = tc2_pm_cluster_cache_disable,
.wait_for_powerdown = tc2_pm_wait_for_powerdown,
- .suspend = tc2_pm_suspend,
- .powered_up = tc2_pm_powered_up,
+ .cpu_is_up = tc2_pm_cpu_is_up,
+ .cluster_is_up = tc2_pm_cluster_is_up,
};
-static bool __init tc2_pm_usage_count_init(void)
-{
- unsigned int mpidr, cpu, cluster;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
- pr_err("%s: boot CPU is out of bound!\n", __func__);
- return false;
- }
- tc2_pm_use_count[cpu][cluster] = 1;
- return true;
-}
-
/*
* Enable cluster-level coherency, in preparation for turning on the MMU.
*/
@@ -323,23 +202,9 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
" b cci_enable_port_for_self ");
}
-static void __init tc2_cache_off(void)
-{
- pr_info("TC2: disabling cache during MCPM loopback test\n");
- if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
- /* disable L2 prefetching on the Cortex-A15 */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3 \n\t"
- "isb \n\t"
- "dsb "
- : : "r" (0x400) );
- }
- v7_exit_coherency_flush(all);
- cci_disable_port_by_cpu(read_cpuid_mpidr());
-}
-
static int __init tc2_pm_init(void)
{
+ unsigned int mpidr, cpu, cluster;
int ret, irq;
u32 a15_cluster_id, a7_cluster_id, sys_info;
struct device_node *np;
@@ -379,14 +244,20 @@ static int __init tc2_pm_init(void)
if (!cci_probed())
return -ENODEV;
- if (!tc2_pm_usage_count_init())
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
+ pr_err("%s: boot CPU is out of bound!\n", __func__);
return -EINVAL;
+ }
ret = mcpm_platform_register(&tc2_pm_power_ops);
if (!ret) {
mcpm_sync_init(tc2_pm_power_up_setup);
/* test if we can (re)enable the CCI on our own */
- BUG_ON(mcpm_loopback(tc2_cache_off) != 0);
+ BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0);
pr_info("TC2 power management initialized\n");
}
return ret;
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9b4f29e595a4..b4f92b9a13ac 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -738,7 +738,7 @@ config CPU_ICACHE_DISABLE
config CPU_DCACHE_DISABLE
bool "Disable D-Cache (C-bit)"
- depends on CPU_CP15
+ depends on CPU_CP15 && !SMP
help
Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N.
@@ -825,6 +825,20 @@ config KUSER_HELPERS
Say N here only if you are absolutely certain that you do not
need these helpers; otherwise, the safe option is to say Y.
+config VDSO
+ bool "Enable VDSO for acceleration of some system calls"
+ depends on AEABI && MMU && CPU_V7
+ default y if ARM_ARCH_TIMER
+ select GENERIC_TIME_VSYSCALL
+ help
+ Place in the process address space an ELF shared object
+ providing fast implementations of gettimeofday and
+ clock_gettime. Systems that implement the ARM architected
+ timer will receive maximum benefit.
+
+ You must have glibc 2.22 or later for programs to seamlessly
+ take advantage of this.
+
config DMA_CACHE_RWFO
bool "Enable read/write for ownership DMA cache maintenance"
depends on CPU_V6K && SMP
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 2c0c541c60ca..9769f1eefe3b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -201,7 +201,7 @@ union offset_union {
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
"2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, #1\n" \
" b 2b\n" \
@@ -261,7 +261,7 @@ union offset_union {
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \
"3:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, #1\n" \
" b 3b\n" \
@@ -301,7 +301,7 @@ union offset_union {
" mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"6: mov %0, #1\n" \
" b 5b\n" \
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 8f15f70622a6..e309c8f35af5 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1647,6 +1647,7 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
struct device_node *np;
struct resource res;
u32 cache_id, old_aux;
+ u32 cache_level = 2;
np = of_find_matching_node(NULL, l2x0_ids);
if (!np)
@@ -1679,6 +1680,12 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
if (!of_property_read_bool(np, "cache-unified"))
pr_err("L2C: device tree omits to specify unified cache\n");
+ if (of_property_read_u32(np, "cache-level", &cache_level))
+ pr_err("L2C: device tree omits to specify cache-level\n");
+
+ if (cache_level != 2)
+ pr_err("L2C: device tree specifies invalid cache level\n");
+
/* Read back current (default) hardware configuration */
if (data->save)
data->save(l2x0_base);
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index b966656d2c2d..a134d8a13d00 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -36,10 +36,10 @@ ENTRY(v7_invalidate_l1)
mcr p15, 2, r0, c0, c0, 0
mrc p15, 1, r0, c0, c0, 0
- ldr r1, =0x7fff
+ movw r1, #0x7fff
and r2, r1, r0, lsr #13
- ldr r1, =0x3ff
+ movw r1, #0x3ff
and r3, r1, r0, lsr #3 @ NumWays - 1
add r2, r2, #1 @ NumSets
@@ -90,21 +90,20 @@ ENDPROC(v7_flush_icache_all)
ENTRY(v7_flush_dcache_louis)
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
- ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr
- ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
+ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position
+ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position
+ ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr
+ bne start_flush_levels @ LoU != 0, start flushing
#ifdef CONFIG_ARM_ERRATA_643719
- ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
- ALT_UP(reteq lr) @ LoUU is zero, so nothing to do
- ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
- biceq r2, r2, #0x0000000f @ clear minor revision number
- teqeq r2, r1 @ test for errata affected core and if so...
- orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne')
+ALT_SMP(mrc p15, 0, r2, c0, c0, 0) @ read main ID register
+ALT_UP( ret lr) @ LoUU is zero, so nothing to do
+ movw r1, #:lower16:(0x410fc090 >> 4) @ ID of ARM Cortex A9 r0p?
+ movt r1, #:upper16:(0x410fc090 >> 4)
+ teq r1, r2, lsr #4 @ test for errata affected core and if so...
+ moveq r3, #1 << 1 @ fix LoUIS value
+ beq start_flush_levels @ start flushing cache levels
#endif
- ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
- ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
- reteq lr @ return if level == 0
- mov r10, #0 @ r10 (starting level) = 0
- b flush_levels @ start flushing cache levels
+ ret lr
ENDPROC(v7_flush_dcache_louis)
/*
@@ -119,9 +118,10 @@ ENDPROC(v7_flush_dcache_louis)
ENTRY(v7_flush_dcache_all)
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
+ mov r3, r0, lsr #23 @ move LoC into position
+ ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
beq finished @ if loc is 0, then no need to clean
+start_flush_levels:
mov r10, #0 @ start clean at cache level 0
flush_levels:
add r2, r10, r10, lsr #1 @ work out 3x current cache level
@@ -140,10 +140,10 @@ flush_levels:
#endif
and r2, r1, #7 @ extract the length of the cache lines
add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
+ movw r4, #0x3ff
ands r4, r4, r1, lsr #3 @ find maximum number on the way size
clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
+ movw r7, #0x7fff
ands r7, r7, r1, lsr #13 @ extract max number of the index size
loop1:
mov r9, r7 @ create working copy of max index
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e315dfe3af1b..7e7583ddd607 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -289,11 +289,11 @@ static void __dma_free_buffer(struct page *page, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
- const void *caller);
+ const void *caller, bool want_vaddr);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
- const void *caller);
+ const void *caller, bool want_vaddr);
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
@@ -357,10 +357,10 @@ static int __init atomic_pool_init(void)
if (dev_get_cma_area(NULL))
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
- &page, atomic_pool_init);
+ &page, atomic_pool_init, true);
else
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
- &page, atomic_pool_init);
+ &page, atomic_pool_init, true);
if (ptr) {
int ret;
@@ -467,13 +467,15 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
- const void *caller)
+ const void *caller, bool want_vaddr)
{
struct page *page;
- void *ptr;
+ void *ptr = NULL;
page = __dma_alloc_buffer(dev, size, gfp);
if (!page)
return NULL;
+ if (!want_vaddr)
+ goto out;
ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
if (!ptr) {
@@ -481,6 +483,7 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
return NULL;
}
+ out:
*ret_page = page;
return ptr;
}
@@ -523,12 +526,12 @@ static int __free_from_pool(void *start, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
- const void *caller)
+ const void *caller, bool want_vaddr)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
struct page *page;
- void *ptr;
+ void *ptr = NULL;
page = dma_alloc_from_contiguous(dev, count, order);
if (!page)
@@ -536,6 +539,9 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
__dma_clear_buffer(page, size);
+ if (!want_vaddr)
+ goto out;
+
if (PageHighMem(page)) {
ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
if (!ptr) {
@@ -546,17 +552,21 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
__dma_remap(page, size, prot);
ptr = page_address(page);
}
+
+ out:
*ret_page = page;
return ptr;
}
static void __free_from_contiguous(struct device *dev, struct page *page,
- void *cpu_addr, size_t size)
+ void *cpu_addr, size_t size, bool want_vaddr)
{
- if (PageHighMem(page))
- __dma_free_remap(cpu_addr, size);
- else
- __dma_remap(page, size, PAGE_KERNEL);
+ if (want_vaddr) {
+ if (PageHighMem(page))
+ __dma_free_remap(cpu_addr, size);
+ else
+ __dma_remap(page, size, PAGE_KERNEL);
+ }
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
@@ -574,12 +584,12 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define nommu() 1
-#define __get_dma_pgprot(attrs, prot) __pgprot(0)
-#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
+#define __get_dma_pgprot(attrs, prot) __pgprot(0)
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
#define __free_from_pool(cpu_addr, size) 0
-#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
+#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
@@ -599,11 +609,13 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
+ gfp_t gfp, pgprot_t prot, bool is_coherent,
+ struct dma_attrs *attrs, const void *caller)
{
u64 mask = get_coherent_dma_mask(dev);
struct page *page = NULL;
void *addr;
+ bool want_vaddr;
#ifdef CONFIG_DMA_API_DEBUG
u64 limit = (mask + 1) & ~mask;
@@ -631,20 +643,21 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
+ want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
if (is_coherent || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
else if (!dev_get_cma_area(dev))
- addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
+ addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
- if (addr)
+ if (page)
*handle = pfn_to_dma(dev, page_to_pfn(page));
- return addr;
+ return want_vaddr ? addr : page;
}
/*
@@ -661,7 +674,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, false,
- __builtin_return_address(0));
+ attrs, __builtin_return_address(0));
}
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
@@ -674,7 +687,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, true,
- __builtin_return_address(0));
+ attrs, __builtin_return_address(0));
}
/*
@@ -715,6 +728,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
bool is_coherent)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+ bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;
@@ -726,14 +740,15 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
} else if (__free_from_pool(cpu_addr, size)) {
return;
} else if (!dev_get_cma_area(dev)) {
- __dma_free_remap(cpu_addr, size);
+ if (want_vaddr)
+ __dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
/*
* Non-atomic allocations cannot be freed with IRQs disabled
*/
WARN_ON(irqs_disabled());
- __free_from_contiguous(dev, page, cpu_addr, size);
+ __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr);
}
}
@@ -1135,13 +1150,28 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
while (count) {
- int j, order = __fls(count);
+ int j, order;
+
+ for (order = __fls(count); order > 0; --order) {
+ /*
+ * We do not want OOM killer to be invoked as long
+ * as we can fall back to single pages, so we force
+ * __GFP_NORETRY for orders higher than zero.
+ */
+ pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
+ if (pages[i])
+ break;
+ }
- pages[i] = alloc_pages(gfp, order);
- while (!pages[i] && order)
- pages[i] = alloc_pages(gfp, --order);
- if (!pages[i])
- goto error;
+ if (!pages[i]) {
+ /*
+ * Fall back to single page allocation.
+ * Might invoke OOM killer as last resort.
+ */
+ pages[i] = alloc_pages(gfp, 0);
+ if (!pages[i])
+ goto error;
+ }
if (order) {
split_page(pages[i], order);
@@ -1206,7 +1236,7 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
static dma_addr_t
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t dma_addr, iova;
int i, ret = DMA_ERROR_CODE;
@@ -1242,7 +1272,7 @@ fail:
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
/*
* add optional in-page offset from iova to size and align
@@ -1457,7 +1487,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
enum dma_data_direction dir, struct dma_attrs *attrs,
bool is_coherent)
{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova, iova_base;
int ret = 0;
unsigned int count;
@@ -1678,7 +1708,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t dma_addr;
int ret, prot, len = PAGE_ALIGN(size + offset);
@@ -1731,7 +1761,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
int offset = handle & ~PAGE_MASK;
int len = PAGE_ALIGN(size + offset);
@@ -1756,7 +1786,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
int offset = handle & ~PAGE_MASK;
@@ -1775,7 +1805,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
static void arm_iommu_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
unsigned int offset = handle & ~PAGE_MASK;
@@ -1789,7 +1819,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
static void arm_iommu_sync_single_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
unsigned int offset = handle & ~PAGE_MASK;
@@ -1848,7 +1878,7 @@ struct dma_map_ops iommu_coherent_ops = {
* arm_iommu_attach_device function.
*/
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
{
unsigned int bits = size >> PAGE_SHIFT;
unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
@@ -1856,6 +1886,10 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
int extensions = 1;
int err = -ENOMEM;
+ /* currently only 32-bit DMA address space is supported */
+ if (size > DMA_BIT_MASK(32) + 1)
+ return ERR_PTR(-ERANGE);
+
if (!bitmap_size)
return ERR_PTR(-EINVAL);
@@ -1950,7 +1984,7 @@ static int __arm_iommu_attach_device(struct device *dev,
return err;
kref_get(&mapping->kref);
- dev->archdata.mapping = mapping;
+ to_dma_iommu_mapping(dev) = mapping;
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0;
@@ -1995,7 +2029,7 @@ static void __arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
- dev->archdata.mapping = NULL;
+ to_dma_iommu_mapping(dev) = NULL;
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
@@ -2027,13 +2061,6 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (!iommu)
return false;
- /*
- * currently arm_iommu_create_mapping() takes a max of size_t
- * for size param. So check this limit for now.
- */
- if (size > SIZE_MAX)
- return false;
-
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
@@ -2053,7 +2080,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
static void arm_teardown_iommu_dma_ops(struct device *dev)
{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
if (!mapping)
return;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 1609b022a72f..be92fa0f2f35 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -86,55 +86,6 @@ static int __init parse_tag_initrd2(const struct tag *tag)
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
-/*
- * This keeps memory configuration data used by a couple memory
- * initialization functions, as well as show_mem() for the skipping
- * of holes in the memory map. It is populated by arm_add_memory().
- */
-void show_mem(unsigned int filter)
-{
- int free = 0, total = 0, reserved = 0;
- int shared = 0, cached = 0, slab = 0;
- struct memblock_region *reg;
-
- printk("Mem-info:\n");
- show_free_areas(filter);
-
- for_each_memblock (memory, reg) {
- unsigned int pfn1, pfn2;
- struct page *page, *end;
-
- pfn1 = memblock_region_memory_base_pfn(reg);
- pfn2 = memblock_region_memory_end_pfn(reg);
-
- page = pfn_to_page(pfn1);
- end = pfn_to_page(pfn2 - 1) + 1;
-
- do {
- total++;
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (PageSlab(page))
- slab++;
- else if (!page_count(page))
- free++;
- else
- shared += page_count(page) - 1;
- pfn1++;
- page = pfn_to_page(pfn1);
- } while (pfn1 < pfn2);
- }
-
- printk("%d pages of RAM\n", total);
- printk("%d free pages\n", free);
- printk("%d reserved pages\n", reserved);
- printk("%d slab pages\n", slab);
- printk("%d pages shared\n", shared);
- printk("%d pages swap cached\n", cached);
-}
-
static void __init find_limits(unsigned long *min, unsigned long *max_low,
unsigned long *max_high)
{
@@ -335,6 +286,9 @@ void __init bootmem_init(void)
find_limits(&min, &max_low, &max_high);
+ early_memtest((phys_addr_t)min << PAGE_SHIFT,
+ (phys_addr_t)max_low << PAGE_SHIFT);
+
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 5e85ed371364..407dc786583a 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -169,14 +169,22 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
+unsigned long arch_mmap_rnd(void)
+{
+ unsigned long rnd;
+
+ /* 8 bits of randomness in 20 address space bits */
+ rnd = (unsigned long)get_random_int() % (1 << 8);
+
+ return rnd << PAGE_SHIFT;
+}
+
void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
- /* 8 bits of randomness in 20 address space bits */
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE))
- random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 86ee5d47ce3c..774ef1323554 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -22,8 +22,6 @@
*
* These are the low level assembler for performing cache and TLB
* functions on the arm1020.
- *
- * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/init.h>
@@ -507,7 +505,7 @@ cpu_arm1020_name:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm1020_proc_info,#object
__arm1020_proc_info:
@@ -519,7 +517,7 @@ __arm1020_proc_info:
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm1020_setup
+ initfn __arm1020_setup, __arm1020_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index a6331d78601f..ae3c27b71594 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -22,8 +22,6 @@
*
* These are the low level assembler for performing cache and TLB
* functions on the arm1020e.
- *
- * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/init.h>
@@ -465,7 +463,7 @@ arm1020e_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm1020e_proc_info,#object
__arm1020e_proc_info:
@@ -479,7 +477,7 @@ __arm1020e_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm1020e_setup
+ initfn __arm1020e_setup, __arm1020e_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index a126b7a59928..dbb2413fe04d 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -448,7 +448,7 @@ arm1022_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm1022_proc_info,#object
__arm1022_proc_info:
@@ -462,7 +462,7 @@ __arm1022_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm1022_setup
+ initfn __arm1022_setup, __arm1022_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index fc294067e977..0b37b2cef9d3 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -442,7 +442,7 @@ arm1026_crval:
string cpu_arm1026_name, "ARM1026EJ-S"
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm1026_proc_info,#object
__arm1026_proc_info:
@@ -456,7 +456,7 @@ __arm1026_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm1026_setup
+ initfn __arm1026_setup, __arm1026_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 2baa66b3ac9b..3651cd70e418 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -186,7 +186,7 @@ arm720_crval:
* See <asm/procinfo.h> for a definition of this structure.
*/
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req
.type __\name\()_proc_info,#object
@@ -203,7 +203,7 @@ __\name\()_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b \cpu_flush @ cpu_flush
+ initfn \cpu_flush, __\name\()_proc_info @ cpu_flush
.long cpu_arch_name @ arch_name
.long cpu_elf_name @ elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index ac1ea6b3bce4..024fb7732407 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -132,14 +132,14 @@ __arm740_setup:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm740_proc_info,#object
__arm740_proc_info:
.long 0x41807400
.long 0xfffffff0
.long 0
.long 0
- b __arm740_setup
+ initfn __arm740_setup, __arm740_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index bf6ba4bc30ff..25472d94426d 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -76,7 +76,7 @@ __arm7tdmi_setup:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
extra_hwcaps=0
@@ -86,7 +86,7 @@ __\name\()_proc_info:
.long \cpu_mask
.long 0
.long 0
- b __arm7tdmi_setup
+ initfn __arm7tdmi_setup, __\name\()_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps )
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 22bf8dde4f84..7a14bd4414c9 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -448,7 +448,7 @@ arm920_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm920_proc_info,#object
__arm920_proc_info:
@@ -464,7 +464,7 @@ __arm920_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm920_setup
+ initfn __arm920_setup, __arm920_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 0c6d5ac5a6d4..edccfcdcd551 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -426,7 +426,7 @@ arm922_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm922_proc_info,#object
__arm922_proc_info:
@@ -442,7 +442,7 @@ __arm922_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm922_setup
+ initfn __arm922_setup, __arm922_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index c32d073282ea..32a47cc19076 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -441,9 +441,6 @@ ENTRY(cpu_arm925_set_pte_ext)
.type __arm925_setup, #function
__arm925_setup:
mov r0, #0
-#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
- orr r0,r0,#1 << 7
-#endif
/* Transparent on, D-cache clean & flush mode. See NOTE2 above */
orr r0,r0,#1 << 1 @ transparent mode on
@@ -494,7 +491,7 @@ arm925_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
.type __\name\()_proc_info,#object
@@ -510,7 +507,7 @@ __\name\()_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm925_setup
+ initfn __arm925_setup, __\name\()_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 252b2503038d..fb827c633693 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -474,7 +474,7 @@ arm926_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm926_proc_info,#object
__arm926_proc_info:
@@ -490,7 +490,7 @@ __arm926_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm926_setup
+ initfn __arm926_setup, __arm926_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index e5212d489377..ee5b66f847c4 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -297,26 +297,16 @@ __arm940_setup:
mcr p15, 0, r0, c6, c0, 1
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
- ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
- bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
- orr r0, r0, #1 @ set enable bit
- mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM
- mcr p15, 0, r0, c6, c1, 1
+ ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
+ pr_val r3, r0, r7, #1
+ mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
+ mcr p15, 0, r3, c6, c1, 1
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
- ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
- bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
- orr r0, r0, #1 @ set enable bit
- mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH
- mcr p15, 0, r0, c6, c2, 1
+ ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
+ pr_val r3, r0, r6, #1
+ mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
+ mcr p15, 0, r3, c6, c2, 1
mov r0, #0x06
mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
@@ -354,14 +344,14 @@ __arm940_setup:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm940_proc_info,#object
__arm940_proc_info:
.long 0x41009400
.long 0xff00fff0
.long 0
- b __arm940_setup
+ initfn __arm940_setup, __arm940_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index b3dd9b2d0b8e..7361837edc31 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -343,24 +343,14 @@ __arm946_setup:
mcr p15, 0, r0, c6, c0, 0 @ set region 0, default
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
- ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
- bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the region register value
- orr r0, r0, #1 @ set enable bit
- mcr p15, 0, r0, c6, c1, 0 @ set region 1, RAM
+ ldr r7, =CONFIG_DRAM_SIZE @ size of RAM (must be >= 4KB)
+ pr_val r3, r0, r7, #1
+ mcr p15, 0, r3, c6, c1, 0
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
- ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
- bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the region register value
- orr r0, r0, #1 @ set enable bit
- mcr p15, 0, r0, c6, c2, 0 @ set region 2, ROM/FLASH
+ ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
+ pr_val r3, r0, r7, #1
+ mcr p15, 0, r3, c6, c2, 0
mov r0, #0x06
mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable
@@ -409,14 +399,14 @@ __arm946_setup:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __arm946_proc_info,#object
__arm946_proc_info:
.long 0x41009460
.long 0xff00fff0
.long 0
.long 0
- b __arm946_setup
+ initfn __arm946_setup, __arm946_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 8227322bbb8f..7fac8c612134 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -70,7 +70,7 @@ __arm9tdmi_setup:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
.type __\name\()_proc_info, #object
@@ -79,7 +79,7 @@ __\name\()_proc_info:
.long \cpu_mask
.long 0
.long 0
- b __arm9tdmi_setup
+ initfn __arm9tdmi_setup, __\name\()_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index c494886892ba..4001b73af4ee 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -190,7 +190,7 @@ fa526_cr1_set:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __fa526_proc_info,#object
__fa526_proc_info:
@@ -206,7 +206,7 @@ __fa526_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __fa526_setup
+ initfn __fa526_setup, __fa526_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 03a1b75f2e16..92e08bf37aad 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -584,7 +584,7 @@ feroceon_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
.type __\name\()_proc_info,#object
@@ -601,7 +601,7 @@ __\name\()_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __feroceon_setup
+ initfn __feroceon_setup, __\name\()_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 082b9f2f7e90..c671f345266a 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -331,3 +331,31 @@ ENTRY(\name\()_tlb_fns)
.globl \x
.equ \x, \y
.endm
+
+.macro initfn, func, base
+ .long \func - \base
+.endm
+
+ /*
+ * Macro to calculate the log2 size for the protection region
+ * registers. This calculates rd = log2(size) - 1. tmp must
+ * not be the same register as rd.
+ */
+.macro pr_sz, rd, size, tmp
+ mov \tmp, \size, lsr #12
+ mov \rd, #11
+1: movs \tmp, \tmp, lsr #1
+ addne \rd, \rd, #1
+ bne 1b
+.endm
+
+ /*
+ * Macro to generate a protection region register value
+ * given a pre-masked address, size, and enable bit.
+ * Corrupts size.
+ */
+.macro pr_val, dest, addr, size, enable
+ pr_sz \dest, \size, \size @ calculate log2(size) - 1
+ orr \dest, \addr, \dest, lsl #1 @ mask in the region size
+ orr \dest, \dest, \enable
+.endm
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 53d393455f13..d65edf717bf7 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -427,7 +427,7 @@ mohawk_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __88sv331x_proc_info,#object
__88sv331x_proc_info:
@@ -443,7 +443,7 @@ __88sv331x_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __mohawk_setup
+ initfn __mohawk_setup, __88sv331x_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 8008a0461cf5..ee2ce496239f 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -199,7 +199,7 @@ sa110_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.type __sa110_proc_info,#object
__sa110_proc_info:
@@ -213,7 +213,7 @@ __sa110_proc_info:
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __sa110_setup
+ initfn __sa110_setup, __sa110_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 89f97ac648a9..222d5836f666 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -242,7 +242,7 @@ sa1100_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
.type __\name\()_proc_info,#object
@@ -257,7 +257,7 @@ __\name\()_proc_info:
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __sa1100_setup
+ initfn __sa1100_setup, __\name\()_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index d0390f4b3f18..06d890a2342b 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -264,7 +264,7 @@ v6_crval:
string cpu_elf_name, "v6"
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
/*
* Match any ARMv6 processor core.
@@ -287,7 +287,7 @@ __v6_proc_info:
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __v6_setup
+ initfn __v6_setup, __v6_proc_info
.long cpu_arch_name
.long cpu_elf_name
/* See also feat_v6_fixup() for HWCAP_TLS */
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index ed448d8a596b..10405b8d31af 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -37,15 +37,18 @@
* It is assumed that:
* - we are not using split page tables
*/
-ENTRY(cpu_v7_switch_mm)
+ENTRY(cpu_ca8_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
- mmid r1, r1 @ get mm->context.id
- ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
- ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
+#endif
+ENTRY(cpu_v7_switch_mm)
+#ifdef CONFIG_MMU
+ mmid r1, r1 @ get mm->context.id
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_PID_IN_CONTEXTIDR
mrc p15, 0, r2, c13, c0, 1 @ read current context ID
lsr r2, r2, #8 @ extract the PID
@@ -61,6 +64,7 @@ ENTRY(cpu_v7_switch_mm)
#endif
bx lr
ENDPROC(cpu_v7_switch_mm)
+ENDPROC(cpu_ca8_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 8b4ee5e81c14..3d1054f11a8a 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -153,6 +153,21 @@ ENDPROC(cpu_v7_do_resume)
#endif
/*
+ * Cortex-A8
+ */
+ globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
+ globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
+ globl_equ cpu_ca8_reset, cpu_v7_reset
+ globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
+ globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
+ globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
+ globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
+#ifdef CONFIG_ARM_CPU_SUSPEND
+ globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
+ globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
+#endif
+
+/*
* Cortex-A9 processor functions
*/
globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
@@ -451,7 +466,10 @@ __v7_setup_stack:
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+#ifndef CONFIG_ARM_LPAE
+ define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+#endif
#ifdef CONFIG_CPU_PJ4B
define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
#endif
@@ -462,19 +480,19 @@ __v7_setup_stack:
string cpu_elf_name, "v7"
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
/*
* Standard v7 proc info content
*/
-.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
+.macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags)
.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags
- W(b) \initfunc
+ initfn \initfunc, \name
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
@@ -494,7 +512,7 @@ __v7_setup_stack:
__v7_ca5mp_proc_info:
.long 0x410fc050
.long 0xff0ffff0
- __v7_proc __v7_ca5mp_setup
+ __v7_proc __v7_ca5mp_proc_info, __v7_ca5mp_setup
.size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
/*
@@ -504,9 +522,19 @@ __v7_ca5mp_proc_info:
__v7_ca9mp_proc_info:
.long 0x410fc090
.long 0xff0ffff0
- __v7_proc __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions
+ __v7_proc __v7_ca9mp_proc_info, __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions
.size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+ /*
+ * ARM Ltd. Cortex A8 processor.
+ */
+ .type __v7_ca8_proc_info, #object
+__v7_ca8_proc_info:
+ .long 0x410fc080
+ .long 0xff0ffff0
+ __v7_proc __v7_ca8_proc_info, __v7_setup, proc_fns = ca8_processor_functions
+ .size __v7_ca8_proc_info, . - __v7_ca8_proc_info
+
#endif /* CONFIG_ARM_LPAE */
/*
@@ -517,7 +545,7 @@ __v7_ca9mp_proc_info:
__v7_pj4b_proc_info:
.long 0x560f5800
.long 0xff0fff00
- __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions
+ __v7_proc __v7_pj4b_proc_info, __v7_pj4b_setup, proc_fns = pj4b_processor_functions
.size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
#endif
@@ -528,7 +556,7 @@ __v7_pj4b_proc_info:
__v7_cr7mp_proc_info:
.long 0x410fc170
.long 0xff0ffff0
- __v7_proc __v7_cr7mp_setup
+ __v7_proc __v7_cr7mp_proc_info, __v7_cr7mp_setup
.size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info
/*
@@ -538,7 +566,7 @@ __v7_cr7mp_proc_info:
__v7_ca7mp_proc_info:
.long 0x410fc070
.long 0xff0ffff0
- __v7_proc __v7_ca7mp_setup
+ __v7_proc __v7_ca7mp_proc_info, __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
@@ -548,7 +576,7 @@ __v7_ca7mp_proc_info:
__v7_ca12mp_proc_info:
.long 0x410fc0d0
.long 0xff0ffff0
- __v7_proc __v7_ca12mp_setup
+ __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
.size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
/*
@@ -558,7 +586,7 @@ __v7_ca12mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
- __v7_proc __v7_ca15mp_setup
+ __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/*
@@ -568,7 +596,7 @@ __v7_ca15mp_proc_info:
__v7_b15mp_proc_info:
.long 0x420f00f0
.long 0xff0ffff0
- __v7_proc __v7_b15mp_setup
+ __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
.size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
/*
@@ -578,7 +606,7 @@ __v7_b15mp_proc_info:
__v7_ca17mp_proc_info:
.long 0x410fc0e0
.long 0xff0ffff0
- __v7_proc __v7_ca17mp_setup
+ __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
.size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
/*
@@ -594,7 +622,7 @@ __krait_proc_info:
* do support them. They also don't indicate support for fused multiply
* instructions even though they actually do support them.
*/
- __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
+ __v7_proc __krait_proc_info, __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
.size __krait_proc_info, . - __krait_proc_info
/*
@@ -604,5 +632,5 @@ __krait_proc_info:
__v7_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
- __v7_proc __v7_setup
+ __v7_proc __v7_proc_info, __v7_setup
.size __v7_proc_info, . - __v7_proc_info
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index d1e68b553d3b..e08e1f2bab76 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -135,7 +135,7 @@ __v7m_setup_stack_top:
string cpu_elf_name "v7m"
string cpu_v7m_name "ARMv7-M"
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
/*
* Match any ARMv7-M processor core.
@@ -146,7 +146,7 @@ __v7m_proc_info:
.long 0x000f0000 @ Mask for ID
.long 0 @ proc_info_list.__cpu_mm_mmu_flags
.long 0 @ proc_info_list.__cpu_io_mmu_flags
- b __v7m_setup @ proc_info_list.__cpu_flush
+ initfn __v7m_setup, __v7m_proc_info @ proc_info_list.__cpu_flush
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index f8acdfece036..293dcc2c441f 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -499,7 +499,7 @@ xsc3_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req
.type __\name\()_proc_info,#object
@@ -514,7 +514,7 @@ __\name\()_proc_info:
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __xsc3_setup
+ initfn __xsc3_setup, __\name\()_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index afa2b3c4df4a..b6bbfdb6dfdc 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -612,7 +612,7 @@ xscale_crval:
.align
- .section ".proc.info.init", #alloc, #execinstr
+ .section ".proc.info.init", #alloc
.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
.type __\name\()_proc_info,#object
@@ -627,7 +627,7 @@ __\name\()_proc_info:
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __xscale_setup
+ initfn __xscale_setup, __\name\()_proc_info
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index e1268f905026..e0e23582c8b4 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -54,6 +54,7 @@
#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
#define FLAG_NEED_X_RESET (1 << 0)
+#define FLAG_IMM_OVERFLOW (1 << 1)
struct jit_ctx {
const struct bpf_prog *skf;
@@ -293,6 +294,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
/* PC in ARM mode == address of the instruction + 8 */
imm = offset - (8 + ctx->idx * 4);
+ if (imm & ~0xfff) {
+ /*
+ * literal pool is too far, signal it into flags. we
+ * can only detect it on the second pass unfortunately.
+ */
+ ctx->flags |= FLAG_IMM_OVERFLOW;
+ return 0;
+ }
+
return imm;
}
@@ -449,10 +459,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
return;
}
#endif
- if (rm != ARM_R0)
- emit(ARM_MOV_R(ARM_R0, rm), ctx);
+
+ /*
+ * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
+ * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
+ * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
+ * before using it as a source for ARM_R1.
+ *
+ * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
+ * ARM_R5 (r_X) so there is no particular register overlap
+ * issues.
+ */
if (rn != ARM_R1)
emit(ARM_MOV_R(ARM_R1, rn), ctx);
+ if (rm != ARM_R0)
+ emit(ARM_MOV_R(ARM_R0, rm), ctx);
ctx->seen |= SEEN_CALL;
emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
@@ -855,6 +876,14 @@ b_epilogue:
default:
return -1;
}
+
+ if (ctx->flags & FLAG_IMM_OVERFLOW)
+ /*
+ * this instruction generated an overflow when
+ * trying to access the literal pool, so
+ * delegate this filter to the kernel interpreter.
+ */
+ return -1;
}
/* compute offsets only during the first pass */
@@ -917,7 +946,14 @@ void bpf_jit_compile(struct bpf_prog *fp)
ctx.idx = 0;
build_prologue(&ctx);
- build_body(&ctx);
+ if (build_body(&ctx) < 0) {
+#if __LINUX_ARM_ARCH__ < 7
+ if (ctx.imm_count)
+ kfree(ctx.imms);
+#endif
+ bpf_jit_binary_free(header);
+ goto out;
+ }
build_epilogue(&ctx);
flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index 5d65be1f1e8a..71df43547659 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -113,7 +113,7 @@ next:
@ to fault. Emit the appropriate exception gunk to fix things up.
@ ??? For some reason, faults can happen at .Lx2 even with a
@ plain LDR instruction. Weird, but it seems harmless.
- .pushsection .fixup,"ax"
+ .pushsection .text.fixup,"ax"
.align 2
.Lfix: ret r9 @ let the user eat segfaults
.popsection
diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c
index 054fc5a1a11c..d92f07f6ecfb 100644
--- a/arch/arm/plat-pxa/dma.c
+++ b/arch/arm/plat-pxa/dma.c
@@ -51,19 +51,19 @@ static struct dentry *dbgfs_root, *dbgfs_state, **dbgfs_chan;
static int dbg_show_requester_chan(struct seq_file *s, void *p)
{
- int pos = 0;
int chan = (int)s->private;
int i;
u32 drcmr;
- pos += seq_printf(s, "DMA channel %d requesters list :\n", chan);
+ seq_printf(s, "DMA channel %d requesters list :\n", chan);
for (i = 0; i < DMA_MAX_REQUESTERS; i++) {
drcmr = DRCMR(i);
if ((drcmr & DRCMR_CHLNUM) == chan)
- pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
- !!(drcmr & DRCMR_MAPVLD));
+ seq_printf(s, "\tRequester %d (MAPVLD=%d)\n",
+ i, !!(drcmr & DRCMR_MAPVLD));
}
- return pos;
+
+ return 0;
}
static inline int dbg_burst_from_dcmd(u32 dcmd)
@@ -83,7 +83,6 @@ static int is_phys_valid(unsigned long addr)
static int dbg_show_descriptors(struct seq_file *s, void *p)
{
- int pos = 0;
int chan = (int)s->private;
int i, max_show = 20, burst, width;
u32 dcmd;
@@ -94,44 +93,43 @@ static int dbg_show_descriptors(struct seq_file *s, void *p)
spin_lock_irqsave(&dma_channels[chan].lock, flags);
phys_desc = DDADR(chan);
- pos += seq_printf(s, "DMA channel %d descriptors :\n", chan);
- pos += seq_printf(s, "[%03d] First descriptor unknown\n", 0);
+ seq_printf(s, "DMA channel %d descriptors :\n", chan);
+ seq_printf(s, "[%03d] First descriptor unknown\n", 0);
for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
desc = phys_to_virt(phys_desc);
dcmd = desc->dcmd;
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
- pos += seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
- i, phys_desc, desc);
- pos += seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
- pos += seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
- pos += seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
- pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d"
- " width=%d len=%d)\n",
- dcmd,
- DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
- DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
- DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
- DCMD_STR(ENDIAN), burst, width,
- dcmd & DCMD_LENGTH);
+ seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
+ i, phys_desc, desc);
+ seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
+ seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
+ seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
+ seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
+ dcmd,
+ DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
+ DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
+ DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
+ DCMD_STR(ENDIAN), burst, width,
+ dcmd & DCMD_LENGTH);
phys_desc = desc->ddadr;
}
if (i == max_show)
- pos += seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
- i, phys_desc);
+ seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
+ i, phys_desc);
else
- pos += seq_printf(s, "[%03d] Desc at %08lx is %s\n",
- i, phys_desc, phys_desc == DDADR_STOP ?
- "DDADR_STOP" : "invalid");
+ seq_printf(s, "[%03d] Desc at %08lx is %s\n",
+ i, phys_desc, phys_desc == DDADR_STOP ?
+ "DDADR_STOP" : "invalid");
spin_unlock_irqrestore(&dma_channels[chan].lock, flags);
- return pos;
+
+ return 0;
}
static int dbg_show_chan_state(struct seq_file *s, void *p)
{
- int pos = 0;
int chan = (int)s->private;
u32 dcsr, dcmd;
int burst, width;
@@ -142,42 +140,39 @@ static int dbg_show_chan_state(struct seq_file *s, void *p)
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
- pos += seq_printf(s, "DMA channel %d\n", chan);
- pos += seq_printf(s, "\tPriority : %s\n",
- str_prio[dma_channels[chan].prio]);
- pos += seq_printf(s, "\tUnaligned transfer bit: %s\n",
- DALGN & (1 << chan) ? "yes" : "no");
- pos += seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
- dcsr, DCSR_STR(RUN), DCSR_STR(NODESC),
- DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN),
- DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN),
- DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST),
- DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND),
- DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR),
- DCSR_STR(STARTINTR), DCSR_STR(BUSERR));
-
- pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d"
- " len=%d)\n",
- dcmd,
- DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
- DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
- DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
- DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH);
- pos += seq_printf(s, "\tDSADR = %08x\n", DSADR(chan));
- pos += seq_printf(s, "\tDTADR = %08x\n", DTADR(chan));
- pos += seq_printf(s, "\tDDADR = %08x\n", DDADR(chan));
- return pos;
+ seq_printf(s, "DMA channel %d\n", chan);
+ seq_printf(s, "\tPriority : %s\n", str_prio[dma_channels[chan].prio]);
+ seq_printf(s, "\tUnaligned transfer bit: %s\n",
+ DALGN & (1 << chan) ? "yes" : "no");
+ seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
+ dcsr, DCSR_STR(RUN), DCSR_STR(NODESC),
+ DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN),
+ DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN),
+ DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST),
+ DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND),
+ DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR),
+ DCSR_STR(STARTINTR), DCSR_STR(BUSERR));
+
+ seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
+ dcmd,
+ DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
+ DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
+ DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
+ DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH);
+ seq_printf(s, "\tDSADR = %08x\n", DSADR(chan));
+ seq_printf(s, "\tDTADR = %08x\n", DTADR(chan));
+ seq_printf(s, "\tDDADR = %08x\n", DDADR(chan));
+
+ return 0;
}
static int dbg_show_state(struct seq_file *s, void *p)
{
- int pos = 0;
-
/* basic device status */
- pos += seq_printf(s, "DMA engine status\n");
- pos += seq_printf(s, "\tChannel number: %d\n", num_dma_channels);
+ seq_puts(s, "DMA engine status\n");
+ seq_printf(s, "\tChannel number: %d\n", num_dma_channels);
- return pos;
+ return 0;
}
#define DBGFS_FUNC_DECL(name) \
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index e17d871b934c..7f415ce74591 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -43,7 +43,11 @@ extern unsigned long s3c_irqwake_eintmask;
/* IRQ masks for IRQs allowed to go to sleep (see irq.c) */
extern unsigned long s3c_irqwake_intallow;
+#ifdef CONFIG_PM_SLEEP
extern unsigned long s3c_irqwake_eintallow;
+#else
+#define s3c_irqwake_eintallow 0
+#endif
/* per-cpu sleep functions */
@@ -58,16 +62,20 @@ extern unsigned long s3c_pm_flags;
extern int s3c2410_cpu_suspend(unsigned long);
-#ifdef CONFIG_SAMSUNG_PM
+#ifdef CONFIG_PM_SLEEP
extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
-extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
extern void s3c_cpu_resume(void);
#else
#define s3c_irq_wake NULL
-#define s3c_irqext_wake NULL
#define s3c_cpu_resume NULL
#endif
+#ifdef CONFIG_SAMSUNG_PM
+extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
+#else
+#define s3c_irqext_wake NULL
+#endif
+
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
/**
* s3c_pm_debug_smdkled() - Debug PM suspend/resume via SMDK Board LEDs
diff --git a/arch/arm/plat-samsung/pm-debug.c b/arch/arm/plat-samsung/pm-debug.c
index 39609601f407..64e15da33b42 100644
--- a/arch/arm/plat-samsung/pm-debug.c
+++ b/arch/arm/plat-samsung/pm-debug.c
@@ -23,6 +23,7 @@
#include <plat/pm-common.h>
#ifdef CONFIG_SAMSUNG_ATAGS
+#include <plat/pm.h>
#include <mach/pm-core.h>
#else
static inline void s3c_pm_debug_init_uart(void) {}
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index f8c0f9797dcf..82777c649774 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -65,26 +65,6 @@ int s3c_irqext_wake(struct irq_data *data, unsigned int state)
return 0;
}
-/* s3c2410_pm_show_resume_irqs
- *
- * print any IRQs asserted at resume time (ie, we woke from)
-*/
-static void __maybe_unused s3c_pm_show_resume_irqs(int start,
- unsigned long which,
- unsigned long mask)
-{
- int i;
-
- which &= ~mask;
-
- for (i = 0; i <= 31; i++) {
- if (which & (1L<<i)) {
- S3C_PMDBG("IRQ %d asserted at resume\n", start+i);
- }
- }
-}
-
-
void (*pm_cpu_prep)(void);
int (*pm_cpu_sleep)(unsigned long);
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index a10297da122b..2ed1b8a922ed 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -526,7 +526,6 @@ ag5evm MACH_AG5EVM AG5EVM 3189
ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206
wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207
trimslice MACH_TRIMSLICE TRIMSLICE 3209
-mackerel MACH_MACKEREL MACKEREL 3211
kaen MACH_KAEN KAEN 3217
nokia_rm680 MACH_NOKIA_RM680 NOKIA_RM680 3220
msm8960_sim MACH_MSM8960_SIM MSM8960_SIM 3230
diff --git a/arch/arm/vdso/.gitignore b/arch/arm/vdso/.gitignore
new file mode 100644
index 000000000000..6b47f6e0b032
--- /dev/null
+++ b/arch/arm/vdso/.gitignore
@@ -0,0 +1,3 @@
+vdso.lds
+vdso.so.raw
+vdsomunge
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
new file mode 100644
index 000000000000..8aa791051029
--- /dev/null
+++ b/arch/arm/vdso/Makefile
@@ -0,0 +1,74 @@
+hostprogs-y := vdsomunge
+
+obj-vdso := vgettimeofday.o datapage.o
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+ccflags-y := -shared -fPIC -fno-common -fno-builtin -fno-stack-protector
+ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 -DDISABLE_BRANCH_PROFILING
+ccflags-y += -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+
+obj-$(CONFIG_VDSO) += vdso.o
+extra-$(CONFIG_VDSO) += vdso.lds
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+CFLAGS_REMOVE_vdso.o = -pg
+
+# Force -O2 to avoid libgcc dependencies
+CFLAGS_REMOVE_vgettimeofday.o = -pg -Os
+CFLAGS_vgettimeofday.o = -O2
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+
+# Force dependency
+$(obj)/vdso.o : $(obj)/vdso.so
+
+# Link rule for the .so file
+$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+
+$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE
+ $(call if_changed,vdsomunge)
+
+# Strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# Actual build commands
+quiet_cmd_vdsold = VDSO $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) \
+ $(call cc-ldoption, -Wl$(comma)--build-id) \
+ -Wl,-Bsymbolic -Wl,-z,max-page-size=4096 \
+ -Wl,-z,common-page-size=4096 -o $@
+
+quiet_cmd_vdsomunge = MUNGE $@
+ cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
+
+#
+# Install the unstripped copy of vdso.so.dbg. If our toolchain
+# supports build-id, install .build-id links as well.
+#
+# Cribbed from arch/x86/vdso/Makefile.
+#
+quiet_cmd_vdso_install = INSTALL $<
+define cmd_vdso_install
+ cp $< "$(MODLIB)/vdso/vdso.so"; \
+ if readelf -n $< | grep -q 'Build ID'; then \
+ buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+ first=`echo $$buildid | cut -b-2`; \
+ last=`echo $$buildid | cut -b3-`; \
+ mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+ ln -sf "../../vdso.so" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+ fi
+endef
+
+$(MODLIB)/vdso: FORCE
+ @mkdir -p $(MODLIB)/vdso
+
+PHONY += vdso_install
+vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso FORCE
+ $(call cmd,vdso_install)
diff --git a/arch/arm/vdso/datapage.S b/arch/arm/vdso/datapage.S
new file mode 100644
index 000000000000..a2e60367931b
--- /dev/null
+++ b/arch/arm/vdso/datapage.S
@@ -0,0 +1,15 @@
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+ .align 2
+.L_vdso_data_ptr:
+ .long _start - . - VDSO_DATA_SIZE
+
+ENTRY(__get_datapage)
+ .fnstart
+ adr r0, .L_vdso_data_ptr
+ ldr r1, [r0]
+ add r0, r0, r1
+ bx lr
+ .fnend
+ENDPROC(__get_datapage)
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm/vdso/vdso.S
index fd3993cb060f..b2b97e3e7bab 100644
--- a/arch/arm64/kernel/cputable.c
+++ b/arch/arm/vdso/vdso.S
@@ -1,9 +1,9 @@
/*
- * arch/arm64/kernel/cputable.c
+ * Adapted from arm64 version.
*
- * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2012 ARM Limited
*
- * This program is free software: you can redistribute it and/or modify
+ * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
@@ -14,20 +14,22 @@
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
*/
#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/page.h>
-#include <asm/cputable.h>
+ __PAGE_ALIGNED_DATA
-extern unsigned long __cpu_setup(void);
+ .globl vdso_start, vdso_end
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/arm/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
-struct cpu_info cpu_table[] = {
- {
- .cpu_id_val = 0x000f0000,
- .cpu_id_mask = 0x000f0000,
- .cpu_name = "AArch64 Processor",
- .cpu_setup = __cpu_setup,
- },
- { /* Empty */ },
-};
+ .previous
diff --git a/arch/arm/vdso/vdso.lds.S b/arch/arm/vdso/vdso.lds.S
new file mode 100644
index 000000000000..89ca89f12d23
--- /dev/null
+++ b/arch/arm/vdso/vdso.lds.S
@@ -0,0 +1,87 @@
+/*
+ * Adapted from arm64 version.
+ *
+ * GNU linker script for the VDSO library.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ * Heavily based on the vDSO linker scripts for other archs.
+ */
+
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
+OUTPUT_ARCH(arm)
+
+SECTIONS
+{
+ PROVIDE(_start = .);
+
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+ .text : { *(.text*) } :text =0xe7f001f2
+
+ .got : { *(.got) }
+ .rel.plt : { *(.rel.plt) }
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+VERSION
+{
+ LINUX_2.6 {
+ global:
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ local: *;
+ };
+}
diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
new file mode 100644
index 000000000000..9005b07296c8
--- /dev/null
+++ b/arch/arm/vdso/vdsomunge.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2015 Mentor Graphics Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * vdsomunge - Host program which produces a shared object
+ * architecturally specified to be usable by both soft- and hard-float
+ * programs.
+ *
+ * The Procedure Call Standard for the ARM Architecture (ARM IHI
+ * 0042E) says:
+ *
+ * 6.4.1 VFP and Base Standard Compatibility
+ *
+ * Code compiled for the VFP calling standard is compatible with
+ * the base standard (and vice-versa) if no floating-point or
+ * containerized vector arguments or results are used.
+ *
+ * And ELF for the ARM Architecture (ARM IHI 0044E) (Table 4-2) says:
+ *
+ * If both EF_ARM_ABI_FLOAT_XXXX bits are clear, conformance to the
+ * base procedure-call standard is implied.
+ *
+ * The VDSO is built with -msoft-float, as with the rest of the ARM
+ * kernel, and uses no floating point arguments or results. The build
+ * process will produce a shared object that may or may not have the
+ * EF_ARM_ABI_FLOAT_SOFT flag set (it seems to depend on the binutils
+ * version; binutils starting with 2.24 appears to set it). The
+ * EF_ARM_ABI_FLOAT_HARD flag should definitely not be set, and this
+ * program will error out if it is.
+ *
+ * If the soft-float flag is set, this program clears it. That's all
+ * it does.
+ */
+
+#define _GNU_SOURCE
+
+#include <byteswap.h>
+#include <elf.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define HOST_ORDER ELFDATA2LSB
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define HOST_ORDER ELFDATA2MSB
+#endif
+
+/* Some of the ELF constants we'd like to use were added to <elf.h>
+ * relatively recently.
+ */
+#ifndef EF_ARM_EABI_VER5
+#define EF_ARM_EABI_VER5 0x05000000
+#endif
+
+#ifndef EF_ARM_ABI_FLOAT_SOFT
+#define EF_ARM_ABI_FLOAT_SOFT 0x200
+#endif
+
+#ifndef EF_ARM_ABI_FLOAT_HARD
+#define EF_ARM_ABI_FLOAT_HARD 0x400
+#endif
+
+static const char *outfile;
+
+static void cleanup(void)
+{
+ if (error_message_count > 0 && outfile != NULL)
+ unlink(outfile);
+}
+
+static Elf32_Word read_elf_word(Elf32_Word word, bool swap)
+{
+ return swap ? bswap_32(word) : word;
+}
+
+static Elf32_Half read_elf_half(Elf32_Half half, bool swap)
+{
+ return swap ? bswap_16(half) : half;
+}
+
+static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap)
+{
+ *dst = swap ? bswap_32(val) : val;
+}
+
+int main(int argc, char **argv)
+{
+ const Elf32_Ehdr *inhdr;
+ bool clear_soft_float;
+ const char *infile;
+ Elf32_Word e_flags;
+ const void *inbuf;
+ struct stat stat;
+ void *outbuf;
+ bool swap;
+ int outfd;
+ int infd;
+
+ atexit(cleanup);
+
+ if (argc != 3)
+ error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
+
+ infile = argv[1];
+ outfile = argv[2];
+
+ infd = open(infile, O_RDONLY);
+ if (infd < 0)
+ error(EXIT_FAILURE, errno, "Cannot open %s", infile);
+
+ if (fstat(infd, &stat) != 0)
+ error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
+
+ inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
+ if (inbuf == MAP_FAILED)
+ error(EXIT_FAILURE, errno, "Failed to map %s", infile);
+
+ close(infd);
+
+ inhdr = inbuf;
+
+ if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
+ error(EXIT_FAILURE, 0, "Not an ELF file");
+
+ if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
+ error(EXIT_FAILURE, 0, "Unsupported ELF class");
+
+ swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
+
+ if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
+ error(EXIT_FAILURE, 0, "Not a shared object");
+
+ if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
+ error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
+ inhdr->e_machine);
+ }
+
+ e_flags = read_elf_word(inhdr->e_flags, swap);
+
+ if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
+ error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
+ EF_ARM_EABI_VERSION(e_flags));
+ }
+
+ if (e_flags & EF_ARM_ABI_FLOAT_HARD)
+ error(EXIT_FAILURE, 0,
+ "Unexpected hard-float flag set in e_flags");
+
+ clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
+
+ outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
+ if (outfd < 0)
+ error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
+
+ if (ftruncate(outfd, stat.st_size) != 0)
+ error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
+
+ outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ outfd, 0);
+ if (outbuf == MAP_FAILED)
+ error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
+
+ close(outfd);
+
+ memcpy(outbuf, inbuf, stat.st_size);
+
+ if (clear_soft_float) {
+ Elf32_Ehdr *outhdr;
+
+ outhdr = outbuf;
+ e_flags &= ~EF_ARM_ABI_FLOAT_SOFT;
+ write_elf_word(e_flags, &outhdr->e_flags, swap);
+ }
+
+ if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
+ error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
+
+ return EXIT_SUCCESS;
+}
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..79214d5ff097
--- /dev/null
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2015 Mentor Graphics Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/hrtimer.h>
+#include <linux/time.h>
+#include <asm/arch_timer.h>
+#include <asm/barrier.h>
+#include <asm/bug.h>
+#include <asm/page.h>
+#include <asm/unistd.h>
+#include <asm/vdso_datapage.h>
+
+#ifndef CONFIG_AEABI
+#error This code depends on AEABI system call conventions
+#endif
+
+extern struct vdso_data *__get_datapage(void);
+
+static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
+{
+ u32 seq;
+repeat:
+ seq = ACCESS_ONCE(vdata->seq_count);
+ if (seq & 1) {
+ cpu_relax();
+ goto repeat;
+ }
+ return seq;
+}
+
+static notrace u32 vdso_read_begin(const struct vdso_data *vdata)
+{
+ u32 seq;
+
+ seq = __vdso_read_begin(vdata);
+
+ smp_rmb(); /* Pairs with smp_wmb in vdso_write_end */
+ return seq;
+}
+
+static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start)
+{
+ smp_rmb(); /* Pairs with smp_wmb in vdso_write_begin */
+ return vdata->seq_count != start;
+}
+
+static notrace long clock_gettime_fallback(clockid_t _clkid,
+ struct timespec *_ts)
+{
+ register struct timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static notrace int do_realtime_coarse(struct timespec *ts,
+ struct vdso_data *vdata)
+{
+ u32 seq;
+
+ do {
+ seq = vdso_read_begin(vdata);
+
+ ts->tv_sec = vdata->xtime_coarse_sec;
+ ts->tv_nsec = vdata->xtime_coarse_nsec;
+
+ } while (vdso_read_retry(vdata, seq));
+
+ return 0;
+}
+
+static notrace int do_monotonic_coarse(struct timespec *ts,
+ struct vdso_data *vdata)
+{
+ struct timespec tomono;
+ u32 seq;
+
+ do {
+ seq = vdso_read_begin(vdata);
+
+ ts->tv_sec = vdata->xtime_coarse_sec;
+ ts->tv_nsec = vdata->xtime_coarse_nsec;
+
+ tomono.tv_sec = vdata->wtm_clock_sec;
+ tomono.tv_nsec = vdata->wtm_clock_nsec;
+
+ } while (vdso_read_retry(vdata, seq));
+
+ ts->tv_sec += tomono.tv_sec;
+ timespec_add_ns(ts, tomono.tv_nsec);
+
+ return 0;
+}
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+
+static notrace u64 get_ns(struct vdso_data *vdata)
+{
+ u64 cycle_delta;
+ u64 cycle_now;
+ u64 nsec;
+
+ cycle_now = arch_counter_get_cntvct();
+
+ cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
+
+ nsec = (cycle_delta * vdata->cs_mult) + vdata->xtime_clock_snsec;
+ nsec >>= vdata->cs_shift;
+
+ return nsec;
+}
+
+static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
+{
+ u64 nsecs;
+ u32 seq;
+
+ do {
+ seq = vdso_read_begin(vdata);
+
+ if (!vdata->tk_is_cntvct)
+ return -1;
+
+ ts->tv_sec = vdata->xtime_clock_sec;
+ nsecs = get_ns(vdata);
+
+ } while (vdso_read_retry(vdata, seq));
+
+ ts->tv_nsec = 0;
+ timespec_add_ns(ts, nsecs);
+
+ return 0;
+}
+
+static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
+{
+ struct timespec tomono;
+ u64 nsecs;
+ u32 seq;
+
+ do {
+ seq = vdso_read_begin(vdata);
+
+ if (!vdata->tk_is_cntvct)
+ return -1;
+
+ ts->tv_sec = vdata->xtime_clock_sec;
+ nsecs = get_ns(vdata);
+
+ tomono.tv_sec = vdata->wtm_clock_sec;
+ tomono.tv_nsec = vdata->wtm_clock_nsec;
+
+ } while (vdso_read_retry(vdata, seq));
+
+ ts->tv_sec += tomono.tv_sec;
+ ts->tv_nsec = 0;
+ timespec_add_ns(ts, nsecs + tomono.tv_nsec);
+
+ return 0;
+}
+
+#else /* CONFIG_ARM_ARCH_TIMER */
+
+static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
+{
+ return -1;
+}
+
+static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
+{
+ return -1;
+}
+
+#endif /* CONFIG_ARM_ARCH_TIMER */
+
+notrace int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
+{
+ struct vdso_data *vdata;
+ int ret = -1;
+
+ vdata = __get_datapage();
+
+ switch (clkid) {
+ case CLOCK_REALTIME_COARSE:
+ ret = do_realtime_coarse(ts, vdata);
+ break;
+ case CLOCK_MONOTONIC_COARSE:
+ ret = do_monotonic_coarse(ts, vdata);
+ break;
+ case CLOCK_REALTIME:
+ ret = do_realtime(ts, vdata);
+ break;
+ case CLOCK_MONOTONIC:
+ ret = do_monotonic(ts, vdata);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ ret = clock_gettime_fallback(clkid, ts);
+
+ return ret;
+}
+
+static notrace long gettimeofday_fallback(struct timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timezone *tz asm("r1") = _tz;
+ register struct timeval *tv asm("r0") = _tv;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_gettimeofday;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (tv), "r" (tz), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ struct timespec ts;
+ struct vdso_data *vdata;
+ int ret;
+
+ vdata = __get_datapage();
+
+ ret = do_realtime(&ts, vdata);
+ if (ret)
+ return gettimeofday_fallback(tv, tz);
+
+ if (tv) {
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / 1000;
+ }
+ if (tz) {
+ tz->tz_minuteswest = vdata->tz_minuteswest;
+ tz->tz_dsttime = vdata->tz_dsttime;
+ }
+
+ return ret;
+}
+
+/* Avoid unresolved references emitted by GCC */
+
+void __aeabi_unwind_cpp_pr0(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr1(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr2(void)
+{
+}
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 263a2044c65b..224081ccc92f 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -53,105 +53,33 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
static __read_mostly int xen_events_irq = -1;
-/* map fgmfn of domid to lpfn in the current domain */
-static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
- unsigned int domid)
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages)
{
- int rc;
- struct xen_add_to_physmap_range xatp = {
- .domid = DOMID_SELF,
- .foreign_domid = domid,
- .size = 1,
- .space = XENMAPSPACE_gmfn_foreign,
- };
- xen_ulong_t idx = fgmfn;
- xen_pfn_t gpfn = lpfn;
- int err = 0;
-
- set_xen_guest_handle(xatp.idxs, &idx);
- set_xen_guest_handle(xatp.gpfns, &gpfn);
- set_xen_guest_handle(xatp.errs, &err);
-
- rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
- if (rc || err) {
- pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
- rc, err, lpfn, fgmfn);
- return 1;
- }
- return 0;
-}
-
-struct remap_data {
- xen_pfn_t fgmfn; /* foreign domain's gmfn */
- pgprot_t prot;
- domid_t domid;
- struct vm_area_struct *vma;
- int index;
- struct page **pages;
- struct xen_remap_mfn_info *info;
-};
-
-static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
- void *data)
-{
- struct remap_data *info = data;
- struct page *page = info->pages[info->index++];
- unsigned long pfn = page_to_pfn(page);
- pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
-
- if (map_foreign_page(pfn, info->fgmfn, info->domid))
- return -EFAULT;
- set_pte_at(info->vma->vm_mm, addr, ptep, pte);
-
- return 0;
+ return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
+ prot, domid, pages);
}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+/* Not used by XENFEAT_auto_translated guests. */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t mfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages)
+ unsigned long addr,
+ xen_pfn_t mfn, int nr,
+ pgprot_t prot, unsigned domid,
+ struct page **pages)
{
- int err;
- struct remap_data data;
-
- /* TBD: Batching, current sole caller only does page at a time */
- if (nr > 1)
- return -EINVAL;
-
- data.fgmfn = mfn;
- data.prot = prot;
- data.domid = domid;
- data.vma = vma;
- data.index = 0;
- data.pages = pages;
- err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
- remap_pte_fn, &data);
- return err;
+ return -ENOSYS;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
{
- int i;
-
- for (i = 0; i < nr; i++) {
- struct xen_remove_from_physmap xrp;
- unsigned long rc, pfn;
-
- pfn = page_to_pfn(pages[i]);
-
- xrp.domid = DOMID_SELF;
- xrp.gpfn = pfn;
- rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
- if (rc) {
- pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
- pfn, rc);
- return rc;
- }
- }
- return 0;
+ return xen_xlate_unmap_gfn_range(vma, nr, pages);
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 793551d15f1d..498325074a06 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -4,6 +4,7 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/export.h>
+#include <linux/memblock.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -21,6 +22,20 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
+unsigned long xen_get_swiotlb_free_pages(unsigned int order)
+{
+ struct memblock_region *reg;
+ gfp_t flags = __GFP_NOWARN;
+
+ for_each_memblock(memory, reg) {
+ if (reg->base < (phys_addr_t)0xffffffff) {
+ flags |= __GFP_DMA;
+ break;
+ }
+ }
+ return __get_free_pages(flags, order);
+}
+
enum dma_cache_op {
DMA_UNMAP,
DMA_MAP,
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1b8e97331ffb..7796af4b1d6f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,7 +1,9 @@
config ARM64
def_bool y
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ACPI_GENERIC_GSI if ACPI
+ select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -29,6 +31,7 @@ config ARM64
select GENERIC_EARLY_IOREMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
@@ -143,6 +146,13 @@ config KERNEL_MODE_NEON
config FIX_EARLYCON_MEM
def_bool y
+config PGTABLE_LEVELS
+ int
+ default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
+ default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
+ default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
+ default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -174,9 +184,16 @@ config ARCH_FSL_LS2085A
config ARCH_MEDIATEK
bool "Mediatek MT65xx & MT81xx ARMv8 SoC"
select ARM_GIC
+ select PINCTRL
help
Support for Mediatek MT65xx & MT81xx ARMv8 SoCs
+config ARCH_QCOM
+ bool "Qualcomm Platforms"
+ select PINCTRL
+ help
+ This enables support for the ARMv8 based Qualcomm chipsets.
+
config ARCH_SEATTLE
bool "AMD Seattle SoC Family"
help
@@ -208,6 +225,11 @@ config ARCH_TEGRA_132_SOC
but contains an NVIDIA Denver CPU complex in place of
Tegra124's "4+1" Cortex-A15 CPU complex.
+config ARCH_SPRD
+ bool "Spreadtrum SoC platform"
+ help
+ Support for Spreadtrum ARM based SoCs
+
config ARCH_THUNDER
bool "Cavium Inc. Thunder SoC Family"
help
@@ -228,6 +250,11 @@ config ARCH_XGENE
help
This enables support for AppliedMicro X-Gene SOC Family
+config ARCH_ZYNQMP
+ bool "Xilinx ZynqMP Family"
+ help
+ This enables support for Xilinx ZynqMP Family
+
endmenu
menu "Bus support"
@@ -361,6 +388,27 @@ config ARM64_ERRATUM_832075
If unsure, say Y.
+config ARM64_ERRATUM_845719
+ bool "Cortex-A53: 845719: a load might read incorrect data"
+ depends on COMPAT
+ default y
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 845719 on Cortex-A53 parts up to r0p4.
+
+ When running a compat (AArch32) userspace on an affected Cortex-A53
+ part, a load at EL0 from a virtual address that matches the bottom 32
+ bits of the virtual address used by a recent load at (AArch64) EL1
+ might return incorrect data.
+
+ The workaround is to write the contextidr_el1 register on exception
+ return to a 32-bit task.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
endmenu
@@ -413,13 +461,6 @@ config ARM64_VA_BITS
default 42 if ARM64_VA_BITS_42
default 48 if ARM64_VA_BITS_48
-config ARM64_PGTABLE_LEVELS
- int
- default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
- default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
- default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
- default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48
-
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
help
@@ -455,8 +496,8 @@ config SCHED_SMT
places. If unsure say N here.
config NR_CPUS
- int "Maximum number of CPUs (2-64)"
- range 2 64
+ int "Maximum number of CPUs (2-4096)"
+ range 2 4096
depends on SMP
# These have to remain sorted largest to smallest
default "64"
@@ -470,6 +511,10 @@ config HOTPLUG_CPU
source kernel/Kconfig.preempt
+config UP_LATE_INIT
+ def_bool y
+ depends on !SMP
+
config HZ
int
default 100
@@ -670,7 +715,7 @@ source "fs/Kconfig.binfmt"
config COMPAT
bool "Kernel support for 32-bit EL0"
- depends on !ARM64_64K_PAGES
+ depends on !ARM64_64K_PAGES || EXPERT
select COMPAT_BINFMT_ELF
select HAVE_UID16
select OLD_SIGSUSPEND3
@@ -681,6 +726,10 @@ config COMPAT
the user helper functions, VFP support and the ptrace interface are
handled appropriately by the kernel.
+ If you also enabled CONFIG_ARM64_64K_PAGES, please be aware that you
+ will only be able to execute AArch32 binaries that were compiled with
+ 64k aligned segments.
+
If you want to execute 32-bit userspace applications, say Y.
config SYSVIPC_COMPAT
@@ -712,6 +761,8 @@ source "drivers/Kconfig"
source "drivers/firmware/Kconfig"
+source "drivers/acpi/Kconfig"
+
source "fs/Kconfig"
source "arch/arm64/kvm/Kconfig"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 4a8741073c90..d6285ef9b5f9 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -89,4 +89,6 @@ config DEBUG_ALIGN_RODATA
If in doubt, say N
+source "drivers/hwtracing/coresight/Kconfig"
+
endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 69ceedc982a5..4d2a925998f9 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y)
-libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
+core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make
KBUILD_IMAGE := Image.gz
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index e0350caf049e..ad26a752b976 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -5,5 +5,8 @@ dts-dirs += cavium
dts-dirs += exynos
dts-dirs += freescale
dts-dirs += mediatek
+dts-dirs += qcom
+dts-dirs += sprd
+dts-dirs += xilinx
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts
index 2e25de0800b9..83578e766b94 100644
--- a/arch/arm64/boot/dts/apm/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm/apm-mustang.dts
@@ -45,6 +45,10 @@
status = "ok";
};
+&sgenet1 {
+ status = "ok";
+};
+
&xgenet {
status = "ok";
};
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index a857794432d6..c8d3e0e86678 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -102,6 +102,7 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
+ dma-ranges = <0x0 0x0 0x0 0x0 0x400 0x0>;
clocks {
#address-cells = <2>;
@@ -186,6 +187,16 @@
clock-output-names = "sge0clk";
};
+ sge1clk: sge1clk@1f21c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f21c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ csr-mask = <0xc>;
+ clock-output-names = "sge1clk";
+ };
+
xge0clk: xge0clk@1f61c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
@@ -352,6 +363,15 @@
reg-names = "csr-reg";
clock-output-names = "pcie4clk";
};
+
+ dmaclk: dmaclk@1f27c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f27c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "dmaclk";
+ };
};
pcie0: pcie@1f2b0000 {
@@ -628,13 +648,30 @@
<0x0 0x1f200000 0x0 0Xc300>,
<0x0 0x1B000000 0x0 0X200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
- interrupts = <0x0 0xA0 0x4>;
+ interrupts = <0x0 0xA0 0x4>,
+ <0x0 0xA1 0x4>;
dma-coherent;
clocks = <&sge0clk 0>;
local-mac-address = [00 00 00 00 00 00];
phy-connection-type = "sgmii";
};
+ sgenet1: ethernet@1f210030 {
+ compatible = "apm,xgene1-sgenet";
+ status = "disabled";
+ reg = <0x0 0x1f210030 0x0 0xd100>,
+ <0x0 0x1f200000 0x0 0Xc300>,
+ <0x0 0x1B000000 0x0 0X8000>;
+ reg-names = "enet_csr", "ring_csr", "ring_cmd";
+ interrupts = <0x0 0xAC 0x4>,
+ <0x0 0xAD 0x4>;
+ port-id = <1>;
+ dma-coherent;
+ clocks = <&sge1clk 0>;
+ local-mac-address = [00 00 00 00 00 00];
+ phy-connection-type = "sgmii";
+ };
+
xgenet: ethernet@1f610000 {
compatible = "apm,xgene1-xgenet";
status = "disabled";
@@ -642,7 +679,8 @@
<0x0 0x1f600000 0x0 0Xc300>,
<0x0 0x18000000 0x0 0X200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
- interrupts = <0x0 0x60 0x4>;
+ interrupts = <0x0 0x60 0x4>,
+ <0x0 0x61 0x4>;
dma-coherent;
clocks = <&xge0clk 0>;
/* mac address will be overwritten by the bootloader */
@@ -656,5 +694,21 @@
interrupts = <0x0 0x41 0x4>;
clocks = <&rngpkaclk 0>;
};
+
+ dma: dma@1f270000 {
+ compatible = "apm,xgene-storm-dma";
+ device_type = "dma";
+ reg = <0x0 0x1f270000 0x0 0x10000>,
+ <0x0 0x1f200000 0x0 0x10000>,
+ <0x0 0x1b008000 0x0 0x2000>,
+ <0x0 0x1054a000 0x0 0x100>;
+ interrupts = <0x0 0x82 0x4>,
+ <0x0 0xb8 0x4>,
+ <0x0 0xb9 0x4>,
+ <0x0 0xba 0x4>,
+ <0x0 0xbb 0x4>;
+ dma-coherent;
+ clocks = <&dmaclk 0>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index c138b95a8356..351c95bda89e 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -21,6 +21,20 @@
clock-output-names = "juno_mb:clk25mhz";
};
+ v2m_refclk1mhz: refclk1mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = "juno_mb:refclk1mhz";
+ };
+
+ v2m_refclk32khz: refclk32khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "juno_mb:refclk32khz";
+ };
+
motherboard {
compatible = "arm,vexpress,v2p-p1", "simple-bus";
#address-cells = <2>; /* SMB chipselect number and offset */
@@ -66,6 +80,15 @@
#size-cells = <1>;
ranges = <0 3 0 0x200000>;
+ v2m_sysctl: sysctl@020000 {
+ compatible = "arm,sp810", "arm,primecell";
+ reg = <0x020000 0x1000>;
+ clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&mb_clk24mhz>;
+ clock-names = "refclk", "timclk", "apb_pclk";
+ #clock-cells = <1>;
+ clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+ };
+
mmci@050000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
@@ -106,16 +129,16 @@
compatible = "arm,sp804", "arm,primecell";
reg = <0x110000 0x10000>;
interrupts = <9>;
- clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
- clock-names = "timclken1", "apb_pclk";
+ clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&mb_clk24mhz>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
};
v2m_timer23: timer@120000 {
compatible = "arm,sp804", "arm,primecell";
reg = <0x120000 0x10000>;
interrupts = <9>;
- clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
- clock-names = "timclken1", "apb_pclk";
+ clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&mb_clk24mhz>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
};
rtc@170000 {
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index 133ee59de2d7..5e9110a3353d 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -120,12 +120,18 @@
pmu {
compatible = "arm,armv8-pmuv3";
- interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&A57_0>,
+ <&A57_1>,
+ <&A53_0>,
+ <&A53_1>,
+ <&A53_2>,
+ <&A53_3>;
};
/include/ "juno-clocks.dtsi"
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt8173-pinfunc.h
new file mode 100644
index 000000000000..d2f3809af70e
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-pinfunc.h
@@ -0,0 +1,682 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DTS_MT8173_PINFUNC_H
+#define __DTS_MT8173_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT8173_PIN_0_EINT0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT8173_PIN_0_EINT0__FUNC_IRDA_PDN (MTK_PIN_NO(0) | 1)
+#define MT8173_PIN_0_EINT0__FUNC_I2S1_WS (MTK_PIN_NO(0) | 2)
+#define MT8173_PIN_0_EINT0__FUNC_AUD_SPDIF (MTK_PIN_NO(0) | 3)
+#define MT8173_PIN_0_EINT0__FUNC_UTXD0 (MTK_PIN_NO(0) | 4)
+#define MT8173_PIN_0_EINT0__FUNC_DBG_MON_A_20_ (MTK_PIN_NO(0) | 7)
+
+#define MT8173_PIN_1_EINT1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT8173_PIN_1_EINT1__FUNC_IRDA_RXD (MTK_PIN_NO(1) | 1)
+#define MT8173_PIN_1_EINT1__FUNC_I2S1_BCK (MTK_PIN_NO(1) | 2)
+#define MT8173_PIN_1_EINT1__FUNC_SDA5 (MTK_PIN_NO(1) | 3)
+#define MT8173_PIN_1_EINT1__FUNC_URXD0 (MTK_PIN_NO(1) | 4)
+#define MT8173_PIN_1_EINT1__FUNC_DBG_MON_A_21_ (MTK_PIN_NO(1) | 7)
+
+#define MT8173_PIN_2_EINT2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT8173_PIN_2_EINT2__FUNC_IRDA_TXD (MTK_PIN_NO(2) | 1)
+#define MT8173_PIN_2_EINT2__FUNC_I2S1_MCK (MTK_PIN_NO(2) | 2)
+#define MT8173_PIN_2_EINT2__FUNC_SCL5 (MTK_PIN_NO(2) | 3)
+#define MT8173_PIN_2_EINT2__FUNC_UTXD3 (MTK_PIN_NO(2) | 4)
+#define MT8173_PIN_2_EINT2__FUNC_DBG_MON_A_22_ (MTK_PIN_NO(2) | 7)
+
+#define MT8173_PIN_3_EINT3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT8173_PIN_3_EINT3__FUNC_DSI1_TE (MTK_PIN_NO(3) | 1)
+#define MT8173_PIN_3_EINT3__FUNC_I2S1_DO_1 (MTK_PIN_NO(3) | 2)
+#define MT8173_PIN_3_EINT3__FUNC_SDA3 (MTK_PIN_NO(3) | 3)
+#define MT8173_PIN_3_EINT3__FUNC_URXD3 (MTK_PIN_NO(3) | 4)
+#define MT8173_PIN_3_EINT3__FUNC_DBG_MON_A_23_ (MTK_PIN_NO(3) | 7)
+
+#define MT8173_PIN_4_EINT4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT8173_PIN_4_EINT4__FUNC_DISP_PWM1 (MTK_PIN_NO(4) | 1)
+#define MT8173_PIN_4_EINT4__FUNC_I2S1_DO_2 (MTK_PIN_NO(4) | 2)
+#define MT8173_PIN_4_EINT4__FUNC_SCL3 (MTK_PIN_NO(4) | 3)
+#define MT8173_PIN_4_EINT4__FUNC_UCTS3 (MTK_PIN_NO(4) | 4)
+#define MT8173_PIN_4_EINT4__FUNC_SFWP_B (MTK_PIN_NO(4) | 6)
+
+#define MT8173_PIN_5_EINT5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT8173_PIN_5_EINT5__FUNC_PCM1_CLK (MTK_PIN_NO(5) | 1)
+#define MT8173_PIN_5_EINT5__FUNC_I2S2_WS (MTK_PIN_NO(5) | 2)
+#define MT8173_PIN_5_EINT5__FUNC_SPI_CK_3_ (MTK_PIN_NO(5) | 3)
+#define MT8173_PIN_5_EINT5__FUNC_URTS3 (MTK_PIN_NO(5) | 4)
+#define MT8173_PIN_5_EINT5__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(5) | 5)
+#define MT8173_PIN_5_EINT5__FUNC_SFOUT (MTK_PIN_NO(5) | 6)
+
+#define MT8173_PIN_6_EINT6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT8173_PIN_6_EINT6__FUNC_PCM1_SYNC (MTK_PIN_NO(6) | 1)
+#define MT8173_PIN_6_EINT6__FUNC_I2S2_BCK (MTK_PIN_NO(6) | 2)
+#define MT8173_PIN_6_EINT6__FUNC_SPI_MI_3_ (MTK_PIN_NO(6) | 3)
+#define MT8173_PIN_6_EINT6__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(6) | 5)
+#define MT8173_PIN_6_EINT6__FUNC_SFCS0 (MTK_PIN_NO(6) | 6)
+
+#define MT8173_PIN_7_EINT7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT8173_PIN_7_EINT7__FUNC_PCM1_DI (MTK_PIN_NO(7) | 1)
+#define MT8173_PIN_7_EINT7__FUNC_I2S2_DI_1 (MTK_PIN_NO(7) | 2)
+#define MT8173_PIN_7_EINT7__FUNC_SPI_MO_3_ (MTK_PIN_NO(7) | 3)
+#define MT8173_PIN_7_EINT7__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(7) | 5)
+#define MT8173_PIN_7_EINT7__FUNC_SFHOLD (MTK_PIN_NO(7) | 6)
+
+#define MT8173_PIN_8_EINT8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT8173_PIN_8_EINT8__FUNC_PCM1_DO (MTK_PIN_NO(8) | 1)
+#define MT8173_PIN_8_EINT8__FUNC_I2S2_DI_2 (MTK_PIN_NO(8) | 2)
+#define MT8173_PIN_8_EINT8__FUNC_SPI_CS_3_ (MTK_PIN_NO(8) | 3)
+#define MT8173_PIN_8_EINT8__FUNC_AUD_SPDIF (MTK_PIN_NO(8) | 4)
+#define MT8173_PIN_8_EINT8__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(8) | 5)
+#define MT8173_PIN_8_EINT8__FUNC_SFIN (MTK_PIN_NO(8) | 6)
+
+#define MT8173_PIN_9_EINT9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT8173_PIN_9_EINT9__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(9) | 1)
+#define MT8173_PIN_9_EINT9__FUNC_I2S2_MCK (MTK_PIN_NO(9) | 2)
+#define MT8173_PIN_9_EINT9__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(9) | 4)
+#define MT8173_PIN_9_EINT9__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(9) | 5)
+#define MT8173_PIN_9_EINT9__FUNC_SFCK (MTK_PIN_NO(9) | 6)
+
+#define MT8173_PIN_10_EINT10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT8173_PIN_10_EINT10__FUNC_CLKM0 (MTK_PIN_NO(10) | 1)
+#define MT8173_PIN_10_EINT10__FUNC_DSI1_TE (MTK_PIN_NO(10) | 2)
+#define MT8173_PIN_10_EINT10__FUNC_DISP_PWM1 (MTK_PIN_NO(10) | 3)
+#define MT8173_PIN_10_EINT10__FUNC_PWM4 (MTK_PIN_NO(10) | 4)
+#define MT8173_PIN_10_EINT10__FUNC_IRDA_RXD (MTK_PIN_NO(10) | 5)
+
+#define MT8173_PIN_11_EINT11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT8173_PIN_11_EINT11__FUNC_CLKM1 (MTK_PIN_NO(11) | 1)
+#define MT8173_PIN_11_EINT11__FUNC_I2S3_WS (MTK_PIN_NO(11) | 2)
+#define MT8173_PIN_11_EINT11__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(11) | 3)
+#define MT8173_PIN_11_EINT11__FUNC_PWM5 (MTK_PIN_NO(11) | 4)
+#define MT8173_PIN_11_EINT11__FUNC_IRDA_TXD (MTK_PIN_NO(11) | 5)
+#define MT8173_PIN_11_EINT11__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(11) | 6)
+#define MT8173_PIN_11_EINT11__FUNC_DBG_MON_B_30_ (MTK_PIN_NO(11) | 7)
+
+#define MT8173_PIN_12_EINT12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT8173_PIN_12_EINT12__FUNC_CLKM2 (MTK_PIN_NO(12) | 1)
+#define MT8173_PIN_12_EINT12__FUNC_I2S3_BCK (MTK_PIN_NO(12) | 2)
+#define MT8173_PIN_12_EINT12__FUNC_SRCLKENA0 (MTK_PIN_NO(12) | 3)
+#define MT8173_PIN_12_EINT12__FUNC_I2S2_WS (MTK_PIN_NO(12) | 5)
+#define MT8173_PIN_12_EINT12__FUNC_DBG_MON_B_32_ (MTK_PIN_NO(12) | 7)
+
+#define MT8173_PIN_13_EINT13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT8173_PIN_13_EINT13__FUNC_CLKM3 (MTK_PIN_NO(13) | 1)
+#define MT8173_PIN_13_EINT13__FUNC_I2S3_MCK (MTK_PIN_NO(13) | 2)
+#define MT8173_PIN_13_EINT13__FUNC_SRCLKENA0 (MTK_PIN_NO(13) | 3)
+#define MT8173_PIN_13_EINT13__FUNC_I2S2_BCK (MTK_PIN_NO(13) | 5)
+#define MT8173_PIN_13_EINT13__FUNC_DBG_MON_A_32_ (MTK_PIN_NO(13) | 7)
+
+#define MT8173_PIN_14_EINT14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT8173_PIN_14_EINT14__FUNC_CMDAT0 (MTK_PIN_NO(14) | 1)
+#define MT8173_PIN_14_EINT14__FUNC_CMCSD0 (MTK_PIN_NO(14) | 2)
+#define MT8173_PIN_14_EINT14__FUNC_CLKM2 (MTK_PIN_NO(14) | 4)
+#define MT8173_PIN_14_EINT14__FUNC_DBG_MON_B_6_ (MTK_PIN_NO(14) | 7)
+
+#define MT8173_PIN_15_EINT15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT8173_PIN_15_EINT15__FUNC_CMDAT1 (MTK_PIN_NO(15) | 1)
+#define MT8173_PIN_15_EINT15__FUNC_CMCSD1 (MTK_PIN_NO(15) | 2)
+#define MT8173_PIN_15_EINT15__FUNC_CMFLASH (MTK_PIN_NO(15) | 3)
+#define MT8173_PIN_15_EINT15__FUNC_CLKM3 (MTK_PIN_NO(15) | 4)
+#define MT8173_PIN_15_EINT15__FUNC_DBG_MON_B_29_ (MTK_PIN_NO(15) | 7)
+
+#define MT8173_PIN_16_IDDIG__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT8173_PIN_16_IDDIG__FUNC_IDDIG (MTK_PIN_NO(16) | 1)
+#define MT8173_PIN_16_IDDIG__FUNC_CMFLASH (MTK_PIN_NO(16) | 2)
+#define MT8173_PIN_16_IDDIG__FUNC_PWM5 (MTK_PIN_NO(16) | 4)
+
+#define MT8173_PIN_17_WATCHDOG__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT8173_PIN_17_WATCHDOG__FUNC_WATCHDOG_AO (MTK_PIN_NO(17) | 1)
+
+#define MT8173_PIN_18_CEC__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT8173_PIN_18_CEC__FUNC_CEC (MTK_PIN_NO(18) | 1)
+
+#define MT8173_PIN_19_HDMISCK__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT8173_PIN_19_HDMISCK__FUNC_HDMISCK (MTK_PIN_NO(19) | 1)
+#define MT8173_PIN_19_HDMISCK__FUNC_HDCP_SCL (MTK_PIN_NO(19) | 2)
+
+#define MT8173_PIN_20_HDMISD__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT8173_PIN_20_HDMISD__FUNC_HDMISD (MTK_PIN_NO(20) | 1)
+#define MT8173_PIN_20_HDMISD__FUNC_HDCP_SDA (MTK_PIN_NO(20) | 2)
+
+#define MT8173_PIN_21_HTPLG__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT8173_PIN_21_HTPLG__FUNC_HTPLG (MTK_PIN_NO(21) | 1)
+
+#define MT8173_PIN_22_MSDC3_DAT0__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT8173_PIN_22_MSDC3_DAT0__FUNC_MSDC3_DAT0 (MTK_PIN_NO(22) | 1)
+
+#define MT8173_PIN_23_MSDC3_DAT1__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT8173_PIN_23_MSDC3_DAT1__FUNC_MSDC3_DAT1 (MTK_PIN_NO(23) | 1)
+
+#define MT8173_PIN_24_MSDC3_DAT2__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT8173_PIN_24_MSDC3_DAT2__FUNC_MSDC3_DAT2 (MTK_PIN_NO(24) | 1)
+
+#define MT8173_PIN_25_MSDC3_DAT3__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT8173_PIN_25_MSDC3_DAT3__FUNC_MSDC3_DAT3 (MTK_PIN_NO(25) | 1)
+
+#define MT8173_PIN_26_MSDC3_CLK__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT8173_PIN_26_MSDC3_CLK__FUNC_MSDC3_CLK (MTK_PIN_NO(26) | 1)
+
+#define MT8173_PIN_27_MSDC3_CMD__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT8173_PIN_27_MSDC3_CMD__FUNC_MSDC3_CMD (MTK_PIN_NO(27) | 1)
+
+#define MT8173_PIN_28_MSDC3_DSL__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT8173_PIN_28_MSDC3_DSL__FUNC_MSDC3_DSL (MTK_PIN_NO(28) | 1)
+
+#define MT8173_PIN_29_UCTS2__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT8173_PIN_29_UCTS2__FUNC_UCTS2 (MTK_PIN_NO(29) | 1)
+
+#define MT8173_PIN_30_URTS2__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT8173_PIN_30_URTS2__FUNC_URTS2 (MTK_PIN_NO(30) | 1)
+
+#define MT8173_PIN_31_URXD2__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT8173_PIN_31_URXD2__FUNC_URXD2 (MTK_PIN_NO(31) | 1)
+#define MT8173_PIN_31_URXD2__FUNC_UTXD2 (MTK_PIN_NO(31) | 2)
+
+#define MT8173_PIN_32_UTXD2__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT8173_PIN_32_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(32) | 1)
+#define MT8173_PIN_32_UTXD2__FUNC_URXD2 (MTK_PIN_NO(32) | 2)
+
+#define MT8173_PIN_33_DAICLK__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT8173_PIN_33_DAICLK__FUNC_MRG_CLK (MTK_PIN_NO(33) | 1)
+#define MT8173_PIN_33_DAICLK__FUNC_PCM0_CLK (MTK_PIN_NO(33) | 2)
+
+#define MT8173_PIN_34_DAIPCMIN__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT8173_PIN_34_DAIPCMIN__FUNC_MRG_DI (MTK_PIN_NO(34) | 1)
+#define MT8173_PIN_34_DAIPCMIN__FUNC_PCM0_DI (MTK_PIN_NO(34) | 2)
+
+#define MT8173_PIN_35_DAIPCMOUT__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT8173_PIN_35_DAIPCMOUT__FUNC_MRG_DO (MTK_PIN_NO(35) | 1)
+#define MT8173_PIN_35_DAIPCMOUT__FUNC_PCM0_DO (MTK_PIN_NO(35) | 2)
+
+#define MT8173_PIN_36_DAISYNC__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT8173_PIN_36_DAISYNC__FUNC_MRG_SYNC (MTK_PIN_NO(36) | 1)
+#define MT8173_PIN_36_DAISYNC__FUNC_PCM0_SYNC (MTK_PIN_NO(36) | 2)
+
+#define MT8173_PIN_37_EINT16__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT8173_PIN_37_EINT16__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(37) | 1)
+#define MT8173_PIN_37_EINT16__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(37) | 2)
+#define MT8173_PIN_37_EINT16__FUNC_PWM0 (MTK_PIN_NO(37) | 3)
+#define MT8173_PIN_37_EINT16__FUNC_PWM1 (MTK_PIN_NO(37) | 4)
+#define MT8173_PIN_37_EINT16__FUNC_PWM2 (MTK_PIN_NO(37) | 5)
+#define MT8173_PIN_37_EINT16__FUNC_CLKM0 (MTK_PIN_NO(37) | 6)
+
+#define MT8173_PIN_38_CONN_RST__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT8173_PIN_38_CONN_RST__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(38) | 1)
+#define MT8173_PIN_38_CONN_RST__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(38) | 2)
+#define MT8173_PIN_38_CONN_RST__FUNC_CLKM1 (MTK_PIN_NO(38) | 6)
+
+#define MT8173_PIN_39_CM2MCLK__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT8173_PIN_39_CM2MCLK__FUNC_CM2MCLK (MTK_PIN_NO(39) | 1)
+#define MT8173_PIN_39_CM2MCLK__FUNC_CMCSD0 (MTK_PIN_NO(39) | 2)
+#define MT8173_PIN_39_CM2MCLK__FUNC_DBG_MON_A_17_ (MTK_PIN_NO(39) | 7)
+
+#define MT8173_PIN_40_CMPCLK__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT8173_PIN_40_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(40) | 1)
+#define MT8173_PIN_40_CMPCLK__FUNC_CMCSK (MTK_PIN_NO(40) | 2)
+#define MT8173_PIN_40_CMPCLK__FUNC_CMCSD2 (MTK_PIN_NO(40) | 3)
+#define MT8173_PIN_40_CMPCLK__FUNC_DBG_MON_A_18_ (MTK_PIN_NO(40) | 7)
+
+#define MT8173_PIN_41_CMMCLK__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT8173_PIN_41_CMMCLK__FUNC_CMMCLK (MTK_PIN_NO(41) | 1)
+#define MT8173_PIN_41_CMMCLK__FUNC_DBG_MON_A_19_ (MTK_PIN_NO(41) | 7)
+
+#define MT8173_PIN_42_DSI_TE__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT8173_PIN_42_DSI_TE__FUNC_DSI_TE (MTK_PIN_NO(42) | 1)
+
+#define MT8173_PIN_43_SDA2__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT8173_PIN_43_SDA2__FUNC_SDA2 (MTK_PIN_NO(43) | 1)
+
+#define MT8173_PIN_44_SCL2__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT8173_PIN_44_SCL2__FUNC_SCL2 (MTK_PIN_NO(44) | 1)
+
+#define MT8173_PIN_45_SDA0__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT8173_PIN_45_SDA0__FUNC_SDA0 (MTK_PIN_NO(45) | 1)
+
+#define MT8173_PIN_46_SCL0__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT8173_PIN_46_SCL0__FUNC_SCL0 (MTK_PIN_NO(46) | 1)
+
+#define MT8173_PIN_47_RDN0_A__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT8173_PIN_47_RDN0_A__FUNC_CMDAT2 (MTK_PIN_NO(47) | 1)
+
+#define MT8173_PIN_48_RDP0_A__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT8173_PIN_48_RDP0_A__FUNC_CMDAT3 (MTK_PIN_NO(48) | 1)
+
+#define MT8173_PIN_49_RDN1_A__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT8173_PIN_49_RDN1_A__FUNC_CMDAT4 (MTK_PIN_NO(49) | 1)
+
+#define MT8173_PIN_50_RDP1_A__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT8173_PIN_50_RDP1_A__FUNC_CMDAT5 (MTK_PIN_NO(50) | 1)
+
+#define MT8173_PIN_51_RCN_A__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT8173_PIN_51_RCN_A__FUNC_CMDAT6 (MTK_PIN_NO(51) | 1)
+
+#define MT8173_PIN_52_RCP_A__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT8173_PIN_52_RCP_A__FUNC_CMDAT7 (MTK_PIN_NO(52) | 1)
+
+#define MT8173_PIN_53_RDN2_A__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT8173_PIN_53_RDN2_A__FUNC_CMDAT8 (MTK_PIN_NO(53) | 1)
+#define MT8173_PIN_53_RDN2_A__FUNC_CMCSD3 (MTK_PIN_NO(53) | 2)
+
+#define MT8173_PIN_54_RDP2_A__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT8173_PIN_54_RDP2_A__FUNC_CMDAT9 (MTK_PIN_NO(54) | 1)
+#define MT8173_PIN_54_RDP2_A__FUNC_CMCSD2 (MTK_PIN_NO(54) | 2)
+
+#define MT8173_PIN_55_RDN3_A__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT8173_PIN_55_RDN3_A__FUNC_CMHSYNC (MTK_PIN_NO(55) | 1)
+#define MT8173_PIN_55_RDN3_A__FUNC_CMCSD1 (MTK_PIN_NO(55) | 2)
+
+#define MT8173_PIN_56_RDP3_A__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT8173_PIN_56_RDP3_A__FUNC_CMVSYNC (MTK_PIN_NO(56) | 1)
+#define MT8173_PIN_56_RDP3_A__FUNC_CMCSD0 (MTK_PIN_NO(56) | 2)
+
+#define MT8173_PIN_57_MSDC0_DAT0__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT8173_PIN_57_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(57) | 1)
+#define MT8173_PIN_57_MSDC0_DAT0__FUNC_I2S1_WS (MTK_PIN_NO(57) | 2)
+#define MT8173_PIN_57_MSDC0_DAT0__FUNC_DBG_MON_B_7_ (MTK_PIN_NO(57) | 7)
+
+#define MT8173_PIN_58_MSDC0_DAT1__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT8173_PIN_58_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(58) | 1)
+#define MT8173_PIN_58_MSDC0_DAT1__FUNC_I2S1_BCK (MTK_PIN_NO(58) | 2)
+#define MT8173_PIN_58_MSDC0_DAT1__FUNC_DBG_MON_B_8_ (MTK_PIN_NO(58) | 7)
+
+#define MT8173_PIN_59_MSDC0_DAT2__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT8173_PIN_59_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(59) | 1)
+#define MT8173_PIN_59_MSDC0_DAT2__FUNC_I2S1_MCK (MTK_PIN_NO(59) | 2)
+#define MT8173_PIN_59_MSDC0_DAT2__FUNC_DBG_MON_B_9_ (MTK_PIN_NO(59) | 7)
+
+#define MT8173_PIN_60_MSDC0_DAT3__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT8173_PIN_60_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(60) | 1)
+#define MT8173_PIN_60_MSDC0_DAT3__FUNC_I2S1_DO_1 (MTK_PIN_NO(60) | 2)
+#define MT8173_PIN_60_MSDC0_DAT3__FUNC_DBG_MON_B_10_ (MTK_PIN_NO(60) | 7)
+
+#define MT8173_PIN_61_MSDC0_DAT4__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT8173_PIN_61_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(61) | 1)
+#define MT8173_PIN_61_MSDC0_DAT4__FUNC_I2S1_DO_2 (MTK_PIN_NO(61) | 2)
+#define MT8173_PIN_61_MSDC0_DAT4__FUNC_DBG_MON_B_11_ (MTK_PIN_NO(61) | 7)
+
+#define MT8173_PIN_62_MSDC0_DAT5__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT8173_PIN_62_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(62) | 1)
+#define MT8173_PIN_62_MSDC0_DAT5__FUNC_I2S2_WS (MTK_PIN_NO(62) | 2)
+#define MT8173_PIN_62_MSDC0_DAT5__FUNC_DBG_MON_B_12_ (MTK_PIN_NO(62) | 7)
+
+#define MT8173_PIN_63_MSDC0_DAT6__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT8173_PIN_63_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(63) | 1)
+#define MT8173_PIN_63_MSDC0_DAT6__FUNC_I2S2_BCK (MTK_PIN_NO(63) | 2)
+#define MT8173_PIN_63_MSDC0_DAT6__FUNC_DBG_MON_B_13_ (MTK_PIN_NO(63) | 7)
+
+#define MT8173_PIN_64_MSDC0_DAT7__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(64) | 1)
+#define MT8173_PIN_64_MSDC0_DAT7__FUNC_I2S2_DI_1 (MTK_PIN_NO(64) | 2)
+#define MT8173_PIN_64_MSDC0_DAT7__FUNC_DBG_MON_B_14_ (MTK_PIN_NO(64) | 7)
+
+#define MT8173_PIN_65_MSDC0_CLK__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(65) | 1)
+#define MT8173_PIN_65_MSDC0_CLK__FUNC_DBG_MON_B_16_ (MTK_PIN_NO(65) | 7)
+
+#define MT8173_PIN_66_MSDC0_CMD__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(66) | 1)
+#define MT8173_PIN_66_MSDC0_CMD__FUNC_I2S2_DI_2 (MTK_PIN_NO(66) | 2)
+#define MT8173_PIN_66_MSDC0_CMD__FUNC_DBG_MON_B_15_ (MTK_PIN_NO(66) | 7)
+
+#define MT8173_PIN_67_MSDC0_DSL__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT8173_PIN_67_MSDC0_DSL__FUNC_MSDC0_DSL (MTK_PIN_NO(67) | 1)
+#define MT8173_PIN_67_MSDC0_DSL__FUNC_DBG_MON_B_17_ (MTK_PIN_NO(67) | 7)
+
+#define MT8173_PIN_68_MSDC0_RST___FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT8173_PIN_68_MSDC0_RST___FUNC_MSDC0_RSTB (MTK_PIN_NO(68) | 1)
+#define MT8173_PIN_68_MSDC0_RST___FUNC_I2S2_MCK (MTK_PIN_NO(68) | 2)
+#define MT8173_PIN_68_MSDC0_RST___FUNC_DBG_MON_B_18_ (MTK_PIN_NO(68) | 7)
+
+#define MT8173_PIN_69_SPI_CK__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT8173_PIN_69_SPI_CK__FUNC_SPI_CK_0_ (MTK_PIN_NO(69) | 1)
+#define MT8173_PIN_69_SPI_CK__FUNC_I2S3_DO_1 (MTK_PIN_NO(69) | 2)
+#define MT8173_PIN_69_SPI_CK__FUNC_PWM0 (MTK_PIN_NO(69) | 3)
+#define MT8173_PIN_69_SPI_CK__FUNC_PWM5 (MTK_PIN_NO(69) | 4)
+#define MT8173_PIN_69_SPI_CK__FUNC_I2S2_MCK (MTK_PIN_NO(69) | 5)
+#define MT8173_PIN_69_SPI_CK__FUNC_DBG_MON_B_19_ (MTK_PIN_NO(69) | 7)
+
+#define MT8173_PIN_70_SPI_MI__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT8173_PIN_70_SPI_MI__FUNC_SPI_MI_0_ (MTK_PIN_NO(70) | 1)
+#define MT8173_PIN_70_SPI_MI__FUNC_I2S3_DO_2 (MTK_PIN_NO(70) | 2)
+#define MT8173_PIN_70_SPI_MI__FUNC_PWM1 (MTK_PIN_NO(70) | 3)
+#define MT8173_PIN_70_SPI_MI__FUNC_SPI_MO_0_ (MTK_PIN_NO(70) | 4)
+#define MT8173_PIN_70_SPI_MI__FUNC_I2S2_DI_1 (MTK_PIN_NO(70) | 5)
+#define MT8173_PIN_70_SPI_MI__FUNC_DSI1_TE (MTK_PIN_NO(70) | 6)
+#define MT8173_PIN_70_SPI_MI__FUNC_DBG_MON_B_20_ (MTK_PIN_NO(70) | 7)
+
+#define MT8173_PIN_71_SPI_MO__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT8173_PIN_71_SPI_MO__FUNC_SPI_MO_0_ (MTK_PIN_NO(71) | 1)
+#define MT8173_PIN_71_SPI_MO__FUNC_I2S3_DO_3 (MTK_PIN_NO(71) | 2)
+#define MT8173_PIN_71_SPI_MO__FUNC_PWM2 (MTK_PIN_NO(71) | 3)
+#define MT8173_PIN_71_SPI_MO__FUNC_SPI_MI_0_ (MTK_PIN_NO(71) | 4)
+#define MT8173_PIN_71_SPI_MO__FUNC_I2S2_DI_2 (MTK_PIN_NO(71) | 5)
+#define MT8173_PIN_71_SPI_MO__FUNC_DBG_MON_B_21_ (MTK_PIN_NO(71) | 7)
+
+#define MT8173_PIN_72_SPI_CS__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT8173_PIN_72_SPI_CS__FUNC_SPI_CS_0_ (MTK_PIN_NO(72) | 1)
+#define MT8173_PIN_72_SPI_CS__FUNC_I2S3_DO_4 (MTK_PIN_NO(72) | 2)
+#define MT8173_PIN_72_SPI_CS__FUNC_PWM3 (MTK_PIN_NO(72) | 3)
+#define MT8173_PIN_72_SPI_CS__FUNC_PWM6 (MTK_PIN_NO(72) | 4)
+#define MT8173_PIN_72_SPI_CS__FUNC_DISP_PWM1 (MTK_PIN_NO(72) | 5)
+#define MT8173_PIN_72_SPI_CS__FUNC_DBG_MON_B_22_ (MTK_PIN_NO(72) | 7)
+
+#define MT8173_PIN_73_MSDC1_DAT0__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT8173_PIN_73_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(73) | 1)
+#define MT8173_PIN_73_MSDC1_DAT0__FUNC_DBG_MON_B_24_ (MTK_PIN_NO(73) | 7)
+
+#define MT8173_PIN_74_MSDC1_DAT1__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT8173_PIN_74_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(74) | 1)
+#define MT8173_PIN_74_MSDC1_DAT1__FUNC_DBG_MON_B_25_ (MTK_PIN_NO(74) | 7)
+
+#define MT8173_PIN_75_MSDC1_DAT2__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT8173_PIN_75_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(75) | 1)
+#define MT8173_PIN_75_MSDC1_DAT2__FUNC_DBG_MON_B_26_ (MTK_PIN_NO(75) | 7)
+
+#define MT8173_PIN_76_MSDC1_DAT3__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(76) | 1)
+#define MT8173_PIN_76_MSDC1_DAT3__FUNC_DBG_MON_B_27_ (MTK_PIN_NO(76) | 7)
+
+#define MT8173_PIN_77_MSDC1_CLK__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(77) | 1)
+#define MT8173_PIN_77_MSDC1_CLK__FUNC_DBG_MON_B_28_ (MTK_PIN_NO(77) | 7)
+
+#define MT8173_PIN_78_MSDC1_CMD__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(78) | 1)
+#define MT8173_PIN_78_MSDC1_CMD__FUNC_DBG_MON_B_23_ (MTK_PIN_NO(78) | 7)
+
+#define MT8173_PIN_79_PWRAP_SPI0_MI__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define MT8173_PIN_79_PWRAP_SPI0_MI__FUNC_PWRAP_SPIMI (MTK_PIN_NO(79) | 1)
+#define MT8173_PIN_79_PWRAP_SPI0_MI__FUNC_PWRAP_SPIMO (MTK_PIN_NO(79) | 2)
+
+#define MT8173_PIN_80_PWRAP_SPI0_MO__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define MT8173_PIN_80_PWRAP_SPI0_MO__FUNC_PWRAP_SPIMO (MTK_PIN_NO(80) | 1)
+#define MT8173_PIN_80_PWRAP_SPI0_MO__FUNC_PWRAP_SPIMI (MTK_PIN_NO(80) | 2)
+
+#define MT8173_PIN_81_PWRAP_SPI0_CK__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define MT8173_PIN_81_PWRAP_SPI0_CK__FUNC_PWRAP_SPICK (MTK_PIN_NO(81) | 1)
+
+#define MT8173_PIN_82_PWRAP_SPI0_CSN__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define MT8173_PIN_82_PWRAP_SPI0_CSN__FUNC_PWRAP_SPICS (MTK_PIN_NO(82) | 1)
+
+#define MT8173_PIN_83_AUD_CLK_MOSI__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT8173_PIN_83_AUD_CLK_MOSI__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(83) | 1)
+
+#define MT8173_PIN_84_AUD_DAT_MISO__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT8173_PIN_84_AUD_DAT_MISO__FUNC_AUD_DAT_MISO (MTK_PIN_NO(84) | 1)
+#define MT8173_PIN_84_AUD_DAT_MISO__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(84) | 2)
+
+#define MT8173_PIN_85_AUD_DAT_MOSI__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define MT8173_PIN_85_AUD_DAT_MOSI__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(85) | 1)
+#define MT8173_PIN_85_AUD_DAT_MOSI__FUNC_AUD_DAT_MISO (MTK_PIN_NO(85) | 2)
+
+#define MT8173_PIN_86_RTC32K_CK__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define MT8173_PIN_86_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(86) | 1)
+
+#define MT8173_PIN_87_DISP_PWM0__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define MT8173_PIN_87_DISP_PWM0__FUNC_DISP_PWM0 (MTK_PIN_NO(87) | 1)
+#define MT8173_PIN_87_DISP_PWM0__FUNC_DISP_PWM1 (MTK_PIN_NO(87) | 2)
+#define MT8173_PIN_87_DISP_PWM0__FUNC_DBG_MON_B_31_ (MTK_PIN_NO(87) | 7)
+
+#define MT8173_PIN_88_SRCLKENAI__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define MT8173_PIN_88_SRCLKENAI__FUNC_SRCLKENAI (MTK_PIN_NO(88) | 1)
+
+#define MT8173_PIN_89_SRCLKENAI2__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define MT8173_PIN_89_SRCLKENAI2__FUNC_SRCLKENAI2 (MTK_PIN_NO(89) | 1)
+
+#define MT8173_PIN_90_SRCLKENA0__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define MT8173_PIN_90_SRCLKENA0__FUNC_SRCLKENA0 (MTK_PIN_NO(90) | 1)
+
+#define MT8173_PIN_91_SRCLKENA1__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT8173_PIN_91_SRCLKENA1__FUNC_SRCLKENA1 (MTK_PIN_NO(91) | 1)
+
+#define MT8173_PIN_92_PCM_CLK__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT8173_PIN_92_PCM_CLK__FUNC_PCM1_CLK (MTK_PIN_NO(92) | 1)
+#define MT8173_PIN_92_PCM_CLK__FUNC_I2S0_BCK (MTK_PIN_NO(92) | 2)
+#define MT8173_PIN_92_PCM_CLK__FUNC_DBG_MON_A_24_ (MTK_PIN_NO(92) | 7)
+
+#define MT8173_PIN_93_PCM_SYNC__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT8173_PIN_93_PCM_SYNC__FUNC_PCM1_SYNC (MTK_PIN_NO(93) | 1)
+#define MT8173_PIN_93_PCM_SYNC__FUNC_I2S0_WS (MTK_PIN_NO(93) | 2)
+#define MT8173_PIN_93_PCM_SYNC__FUNC_DBG_MON_A_25_ (MTK_PIN_NO(93) | 7)
+
+#define MT8173_PIN_94_PCM_RX__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT8173_PIN_94_PCM_RX__FUNC_PCM1_DI (MTK_PIN_NO(94) | 1)
+#define MT8173_PIN_94_PCM_RX__FUNC_I2S0_DI (MTK_PIN_NO(94) | 2)
+#define MT8173_PIN_94_PCM_RX__FUNC_DBG_MON_A_26_ (MTK_PIN_NO(94) | 7)
+
+#define MT8173_PIN_95_PCM_TX__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define MT8173_PIN_95_PCM_TX__FUNC_PCM1_DO (MTK_PIN_NO(95) | 1)
+#define MT8173_PIN_95_PCM_TX__FUNC_I2S0_DO (MTK_PIN_NO(95) | 2)
+#define MT8173_PIN_95_PCM_TX__FUNC_DBG_MON_A_27_ (MTK_PIN_NO(95) | 7)
+
+#define MT8173_PIN_96_URXD1__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define MT8173_PIN_96_URXD1__FUNC_URXD1 (MTK_PIN_NO(96) | 1)
+#define MT8173_PIN_96_URXD1__FUNC_UTXD1 (MTK_PIN_NO(96) | 2)
+#define MT8173_PIN_96_URXD1__FUNC_DBG_MON_A_28_ (MTK_PIN_NO(96) | 7)
+
+#define MT8173_PIN_97_UTXD1__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define MT8173_PIN_97_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(97) | 1)
+#define MT8173_PIN_97_UTXD1__FUNC_URXD1 (MTK_PIN_NO(97) | 2)
+#define MT8173_PIN_97_UTXD1__FUNC_DBG_MON_A_29_ (MTK_PIN_NO(97) | 7)
+
+#define MT8173_PIN_98_URTS1__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define MT8173_PIN_98_URTS1__FUNC_URTS1 (MTK_PIN_NO(98) | 1)
+#define MT8173_PIN_98_URTS1__FUNC_UCTS1 (MTK_PIN_NO(98) | 2)
+#define MT8173_PIN_98_URTS1__FUNC_DBG_MON_A_30_ (MTK_PIN_NO(98) | 7)
+
+#define MT8173_PIN_99_UCTS1__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define MT8173_PIN_99_UCTS1__FUNC_UCTS1 (MTK_PIN_NO(99) | 1)
+#define MT8173_PIN_99_UCTS1__FUNC_URTS1 (MTK_PIN_NO(99) | 2)
+#define MT8173_PIN_99_UCTS1__FUNC_DBG_MON_A_31_ (MTK_PIN_NO(99) | 7)
+
+#define MT8173_PIN_100_MSDC2_DAT0__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT8173_PIN_100_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(100) | 1)
+#define MT8173_PIN_100_MSDC2_DAT0__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(100) | 3)
+#define MT8173_PIN_100_MSDC2_DAT0__FUNC_SDA5 (MTK_PIN_NO(100) | 4)
+#define MT8173_PIN_100_MSDC2_DAT0__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(100) | 5)
+#define MT8173_PIN_100_MSDC2_DAT0__FUNC_DBG_MON_B_0_ (MTK_PIN_NO(100) | 7)
+
+#define MT8173_PIN_101_MSDC2_DAT1__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT8173_PIN_101_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(101) | 1)
+#define MT8173_PIN_101_MSDC2_DAT1__FUNC_AUD_SPDIF (MTK_PIN_NO(101) | 3)
+#define MT8173_PIN_101_MSDC2_DAT1__FUNC_SCL5 (MTK_PIN_NO(101) | 4)
+#define MT8173_PIN_101_MSDC2_DAT1__FUNC_DBG_MON_B_1_ (MTK_PIN_NO(101) | 7)
+
+#define MT8173_PIN_102_MSDC2_DAT2__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT8173_PIN_102_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(102) | 1)
+#define MT8173_PIN_102_MSDC2_DAT2__FUNC_UTXD0 (MTK_PIN_NO(102) | 3)
+#define MT8173_PIN_102_MSDC2_DAT2__FUNC_PWM0 (MTK_PIN_NO(102) | 5)
+#define MT8173_PIN_102_MSDC2_DAT2__FUNC_SPI_CK_1_ (MTK_PIN_NO(102) | 6)
+#define MT8173_PIN_102_MSDC2_DAT2__FUNC_DBG_MON_B_2_ (MTK_PIN_NO(102) | 7)
+
+#define MT8173_PIN_103_MSDC2_DAT3__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT8173_PIN_103_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(103) | 1)
+#define MT8173_PIN_103_MSDC2_DAT3__FUNC_URXD0 (MTK_PIN_NO(103) | 3)
+#define MT8173_PIN_103_MSDC2_DAT3__FUNC_PWM1 (MTK_PIN_NO(103) | 5)
+#define MT8173_PIN_103_MSDC2_DAT3__FUNC_SPI_MI_1_ (MTK_PIN_NO(103) | 6)
+#define MT8173_PIN_103_MSDC2_DAT3__FUNC_DBG_MON_B_3_ (MTK_PIN_NO(103) | 7)
+
+#define MT8173_PIN_104_MSDC2_CLK__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT8173_PIN_104_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(104) | 1)
+#define MT8173_PIN_104_MSDC2_CLK__FUNC_UTXD3 (MTK_PIN_NO(104) | 3)
+#define MT8173_PIN_104_MSDC2_CLK__FUNC_SDA3 (MTK_PIN_NO(104) | 4)
+#define MT8173_PIN_104_MSDC2_CLK__FUNC_PWM2 (MTK_PIN_NO(104) | 5)
+#define MT8173_PIN_104_MSDC2_CLK__FUNC_SPI_MO_1_ (MTK_PIN_NO(104) | 6)
+#define MT8173_PIN_104_MSDC2_CLK__FUNC_DBG_MON_B_4_ (MTK_PIN_NO(104) | 7)
+
+#define MT8173_PIN_105_MSDC2_CMD__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT8173_PIN_105_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(105) | 1)
+#define MT8173_PIN_105_MSDC2_CMD__FUNC_URXD3 (MTK_PIN_NO(105) | 3)
+#define MT8173_PIN_105_MSDC2_CMD__FUNC_SCL3 (MTK_PIN_NO(105) | 4)
+#define MT8173_PIN_105_MSDC2_CMD__FUNC_PWM3 (MTK_PIN_NO(105) | 5)
+#define MT8173_PIN_105_MSDC2_CMD__FUNC_SPI_CS_1_ (MTK_PIN_NO(105) | 6)
+#define MT8173_PIN_105_MSDC2_CMD__FUNC_DBG_MON_B_5_ (MTK_PIN_NO(105) | 7)
+
+#define MT8173_PIN_106_SDA3__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT8173_PIN_106_SDA3__FUNC_SDA3 (MTK_PIN_NO(106) | 1)
+
+#define MT8173_PIN_107_SCL3__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT8173_PIN_107_SCL3__FUNC_SCL3 (MTK_PIN_NO(107) | 1)
+
+#define MT8173_PIN_108_JTMS__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT8173_PIN_108_JTMS__FUNC_JTMS (MTK_PIN_NO(108) | 1)
+#define MT8173_PIN_108_JTMS__FUNC_MFG_JTAG_TMS (MTK_PIN_NO(108) | 2)
+#define MT8173_PIN_108_JTMS__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(108) | 5)
+#define MT8173_PIN_108_JTMS__FUNC_DFD_TMS (MTK_PIN_NO(108) | 6)
+
+#define MT8173_PIN_109_JTCK__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT8173_PIN_109_JTCK__FUNC_JTCK (MTK_PIN_NO(109) | 1)
+#define MT8173_PIN_109_JTCK__FUNC_MFG_JTAG_TCK (MTK_PIN_NO(109) | 2)
+#define MT8173_PIN_109_JTCK__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(109) | 5)
+#define MT8173_PIN_109_JTCK__FUNC_DFD_TCK (MTK_PIN_NO(109) | 6)
+
+#define MT8173_PIN_110_JTDI__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT8173_PIN_110_JTDI__FUNC_JTDI (MTK_PIN_NO(110) | 1)
+#define MT8173_PIN_110_JTDI__FUNC_MFG_JTAG_TDI (MTK_PIN_NO(110) | 2)
+#define MT8173_PIN_110_JTDI__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(110) | 5)
+#define MT8173_PIN_110_JTDI__FUNC_DFD_TDI (MTK_PIN_NO(110) | 6)
+
+#define MT8173_PIN_111_JTDO__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT8173_PIN_111_JTDO__FUNC_JTDO (MTK_PIN_NO(111) | 1)
+#define MT8173_PIN_111_JTDO__FUNC_MFG_JTAG_TDO (MTK_PIN_NO(111) | 2)
+#define MT8173_PIN_111_JTDO__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(111) | 5)
+#define MT8173_PIN_111_JTDO__FUNC_DFD_TDO (MTK_PIN_NO(111) | 6)
+
+#define MT8173_PIN_112_JTRST_B__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT8173_PIN_112_JTRST_B__FUNC_JTRST_B (MTK_PIN_NO(112) | 1)
+#define MT8173_PIN_112_JTRST_B__FUNC_MFG_JTAG_TRSTN (MTK_PIN_NO(112) | 2)
+#define MT8173_PIN_112_JTRST_B__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(112) | 5)
+#define MT8173_PIN_112_JTRST_B__FUNC_DFD_NTRST (MTK_PIN_NO(112) | 6)
+
+#define MT8173_PIN_113_URXD0__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT8173_PIN_113_URXD0__FUNC_URXD0 (MTK_PIN_NO(113) | 1)
+#define MT8173_PIN_113_URXD0__FUNC_UTXD0 (MTK_PIN_NO(113) | 2)
+#define MT8173_PIN_113_URXD0__FUNC_I2S2_WS (MTK_PIN_NO(113) | 6)
+#define MT8173_PIN_113_URXD0__FUNC_DBG_MON_A_0_ (MTK_PIN_NO(113) | 7)
+
+#define MT8173_PIN_114_UTXD0__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT8173_PIN_114_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(114) | 1)
+#define MT8173_PIN_114_UTXD0__FUNC_URXD0 (MTK_PIN_NO(114) | 2)
+#define MT8173_PIN_114_UTXD0__FUNC_I2S2_BCK (MTK_PIN_NO(114) | 6)
+#define MT8173_PIN_114_UTXD0__FUNC_DBG_MON_A_1_ (MTK_PIN_NO(114) | 7)
+
+#define MT8173_PIN_115_URTS0__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT8173_PIN_115_URTS0__FUNC_URTS0 (MTK_PIN_NO(115) | 1)
+#define MT8173_PIN_115_URTS0__FUNC_UCTS0 (MTK_PIN_NO(115) | 2)
+#define MT8173_PIN_115_URTS0__FUNC_I2S2_MCK (MTK_PIN_NO(115) | 6)
+#define MT8173_PIN_115_URTS0__FUNC_DBG_MON_A_2_ (MTK_PIN_NO(115) | 7)
+
+#define MT8173_PIN_116_UCTS0__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT8173_PIN_116_UCTS0__FUNC_UCTS0 (MTK_PIN_NO(116) | 1)
+#define MT8173_PIN_116_UCTS0__FUNC_URTS0 (MTK_PIN_NO(116) | 2)
+#define MT8173_PIN_116_UCTS0__FUNC_I2S2_DI_1 (MTK_PIN_NO(116) | 6)
+#define MT8173_PIN_116_UCTS0__FUNC_DBG_MON_A_3_ (MTK_PIN_NO(116) | 7)
+
+#define MT8173_PIN_117_URXD3__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT8173_PIN_117_URXD3__FUNC_URXD3 (MTK_PIN_NO(117) | 1)
+#define MT8173_PIN_117_URXD3__FUNC_UTXD3 (MTK_PIN_NO(117) | 2)
+#define MT8173_PIN_117_URXD3__FUNC_DBG_MON_A_9_ (MTK_PIN_NO(117) | 7)
+
+#define MT8173_PIN_118_UTXD3__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT8173_PIN_118_UTXD3__FUNC_UTXD3 (MTK_PIN_NO(118) | 1)
+#define MT8173_PIN_118_UTXD3__FUNC_URXD3 (MTK_PIN_NO(118) | 2)
+#define MT8173_PIN_118_UTXD3__FUNC_DBG_MON_A_10_ (MTK_PIN_NO(118) | 7)
+
+#define MT8173_PIN_119_KPROW0__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT8173_PIN_119_KPROW0__FUNC_KROW0 (MTK_PIN_NO(119) | 1)
+#define MT8173_PIN_119_KPROW0__FUNC_DBG_MON_A_11_ (MTK_PIN_NO(119) | 7)
+
+#define MT8173_PIN_120_KPROW1__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT8173_PIN_120_KPROW1__FUNC_KROW1 (MTK_PIN_NO(120) | 1)
+#define MT8173_PIN_120_KPROW1__FUNC_PWM6 (MTK_PIN_NO(120) | 3)
+#define MT8173_PIN_120_KPROW1__FUNC_DBG_MON_A_12_ (MTK_PIN_NO(120) | 7)
+
+#define MT8173_PIN_121_KPROW2__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT8173_PIN_121_KPROW2__FUNC_KROW2 (MTK_PIN_NO(121) | 1)
+#define MT8173_PIN_121_KPROW2__FUNC_IRDA_PDN (MTK_PIN_NO(121) | 2)
+#define MT8173_PIN_121_KPROW2__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(121) | 3)
+#define MT8173_PIN_121_KPROW2__FUNC_PWM4 (MTK_PIN_NO(121) | 4)
+#define MT8173_PIN_121_KPROW2__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(121) | 5)
+#define MT8173_PIN_121_KPROW2__FUNC_DBG_MON_A_13_ (MTK_PIN_NO(121) | 7)
+
+#define MT8173_PIN_122_KPCOL0__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT8173_PIN_122_KPCOL0__FUNC_KCOL0 (MTK_PIN_NO(122) | 1)
+#define MT8173_PIN_122_KPCOL0__FUNC_DBG_MON_A_14_ (MTK_PIN_NO(122) | 7)
+
+#define MT8173_PIN_123_KPCOL1__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT8173_PIN_123_KPCOL1__FUNC_KCOL1 (MTK_PIN_NO(123) | 1)
+#define MT8173_PIN_123_KPCOL1__FUNC_IRDA_RXD (MTK_PIN_NO(123) | 2)
+#define MT8173_PIN_123_KPCOL1__FUNC_PWM5 (MTK_PIN_NO(123) | 3)
+#define MT8173_PIN_123_KPCOL1__FUNC_DBG_MON_A_15_ (MTK_PIN_NO(123) | 7)
+
+#define MT8173_PIN_124_KPCOL2__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT8173_PIN_124_KPCOL2__FUNC_KCOL2 (MTK_PIN_NO(124) | 1)
+#define MT8173_PIN_124_KPCOL2__FUNC_IRDA_TXD (MTK_PIN_NO(124) | 2)
+#define MT8173_PIN_124_KPCOL2__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(124) | 3)
+#define MT8173_PIN_124_KPCOL2__FUNC_PWM3 (MTK_PIN_NO(124) | 4)
+#define MT8173_PIN_124_KPCOL2__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(124) | 5)
+#define MT8173_PIN_124_KPCOL2__FUNC_DBG_MON_A_16_ (MTK_PIN_NO(124) | 7)
+
+#define MT8173_PIN_125_SDA1__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT8173_PIN_125_SDA1__FUNC_SDA1 (MTK_PIN_NO(125) | 1)
+
+#define MT8173_PIN_126_SCL1__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT8173_PIN_126_SCL1__FUNC_SCL1 (MTK_PIN_NO(126) | 1)
+
+#define MT8173_PIN_127_LCM_RST__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define MT8173_PIN_127_LCM_RST__FUNC_LCM_RST (MTK_PIN_NO(127) | 1)
+
+#define MT8173_PIN_128_I2S0_LRCK__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define MT8173_PIN_128_I2S0_LRCK__FUNC_I2S0_WS (MTK_PIN_NO(128) | 1)
+#define MT8173_PIN_128_I2S0_LRCK__FUNC_I2S1_WS (MTK_PIN_NO(128) | 2)
+#define MT8173_PIN_128_I2S0_LRCK__FUNC_I2S2_WS (MTK_PIN_NO(128) | 3)
+#define MT8173_PIN_128_I2S0_LRCK__FUNC_SPI_CK_2_ (MTK_PIN_NO(128) | 5)
+#define MT8173_PIN_128_I2S0_LRCK__FUNC_DBG_MON_A_4_ (MTK_PIN_NO(128) | 7)
+
+#define MT8173_PIN_129_I2S0_BCK__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define MT8173_PIN_129_I2S0_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(129) | 1)
+#define MT8173_PIN_129_I2S0_BCK__FUNC_I2S1_BCK (MTK_PIN_NO(129) | 2)
+#define MT8173_PIN_129_I2S0_BCK__FUNC_I2S2_BCK (MTK_PIN_NO(129) | 3)
+#define MT8173_PIN_129_I2S0_BCK__FUNC_SPI_MI_2_ (MTK_PIN_NO(129) | 5)
+#define MT8173_PIN_129_I2S0_BCK__FUNC_DBG_MON_A_5_ (MTK_PIN_NO(129) | 7)
+
+#define MT8173_PIN_130_I2S0_MCK__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define MT8173_PIN_130_I2S0_MCK__FUNC_I2S0_MCK (MTK_PIN_NO(130) | 1)
+#define MT8173_PIN_130_I2S0_MCK__FUNC_I2S1_MCK (MTK_PIN_NO(130) | 2)
+#define MT8173_PIN_130_I2S0_MCK__FUNC_I2S2_MCK (MTK_PIN_NO(130) | 3)
+#define MT8173_PIN_130_I2S0_MCK__FUNC_SPI_MO_2_ (MTK_PIN_NO(130) | 5)
+#define MT8173_PIN_130_I2S0_MCK__FUNC_DBG_MON_A_6_ (MTK_PIN_NO(130) | 7)
+
+#define MT8173_PIN_131_I2S0_DATA0__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define MT8173_PIN_131_I2S0_DATA0__FUNC_I2S0_DO (MTK_PIN_NO(131) | 1)
+#define MT8173_PIN_131_I2S0_DATA0__FUNC_I2S1_DO_1 (MTK_PIN_NO(131) | 2)
+#define MT8173_PIN_131_I2S0_DATA0__FUNC_I2S2_DI_1 (MTK_PIN_NO(131) | 3)
+#define MT8173_PIN_131_I2S0_DATA0__FUNC_SPI_CS_2_ (MTK_PIN_NO(131) | 5)
+#define MT8173_PIN_131_I2S0_DATA0__FUNC_DBG_MON_A_7_ (MTK_PIN_NO(131) | 7)
+
+#define MT8173_PIN_132_I2S0_DATA1__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define MT8173_PIN_132_I2S0_DATA1__FUNC_I2S0_DI (MTK_PIN_NO(132) | 1)
+#define MT8173_PIN_132_I2S0_DATA1__FUNC_I2S1_DO_2 (MTK_PIN_NO(132) | 2)
+#define MT8173_PIN_132_I2S0_DATA1__FUNC_I2S2_DI_2 (MTK_PIN_NO(132) | 3)
+#define MT8173_PIN_132_I2S0_DATA1__FUNC_DBG_MON_A_8_ (MTK_PIN_NO(132) | 7)
+
+#define MT8173_PIN_133_SDA4__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define MT8173_PIN_133_SDA4__FUNC_SDA4 (MTK_PIN_NO(133) | 1)
+
+#define MT8173_PIN_134_SCL4__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define MT8173_PIN_134_SCL4__FUNC_SCL4 (MTK_PIN_NO(134) | 1)
+
+#endif /* __DTS_MT8173_PINFUNC_H */
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 8554ec31dd9e..924fdb6673ff 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -13,6 +13,7 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "mt8173-pinfunc.h"
/ {
compatible = "mediatek,mt8173";
@@ -105,6 +106,25 @@
compatible = "simple-bus";
ranges;
+ syscfg_pctl_a: syscfg_pctl_a@10005000 {
+ compatible = "mediatek,mt8173-pctl-a-syscfg", "syscon";
+ reg = <0 0x10005000 0 0x1000>;
+ };
+
+ pio: pinctrl@0x10005000 {
+ compatible = "mediatek,mt8173-pinctrl";
+ reg = <0 0x1000B000 0 0x1000>;
+ mediatek,pctl-regmap = <&syscfg_pctl_a>;
+ pins-are-numbered;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
sysirq: intpol-controller@10200620 {
compatible = "mediatek,mt8173-sysirq",
"mediatek,mt6577-sysirq";
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
new file mode 100644
index 000000000000..8e94af64ee94
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb msm8916-mtp.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm/mach-qcom/scm-boot.h b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
index 3e210fb818bb..825f489a2af7 100644
--- a/arch/arm/mach-qcom/scm-boot.h
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,18 +10,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#ifndef __MACH_SCM_BOOT_H
-#define __MACH_SCM_BOOT_H
-#define SCM_BOOT_ADDR 0x1
-#define SCM_FLAG_COLDBOOT_CPU1 0x01
-#define SCM_FLAG_COLDBOOT_CPU2 0x08
-#define SCM_FLAG_COLDBOOT_CPU3 0x20
-#define SCM_FLAG_WARMBOOT_CPU0 0x04
-#define SCM_FLAG_WARMBOOT_CPU1 0x02
-#define SCM_FLAG_WARMBOOT_CPU2 0x10
-#define SCM_FLAG_WARMBOOT_CPU3 0x40
+/dts-v1/;
-int scm_set_boot_addr(u32 addr, int flags);
+#include "apq8016-sbc.dtsi"
-#endif
+/ {
+ model = "Qualcomm Technologies, Inc. APQ 8016 SBC";
+ compatible = "qcom,apq8016-sbc", "qcom,apq8016", "qcom,sbc";
+};
diff --git a/arch/arm/mach-qcom/scm.h b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 00b31ea58f29..703a4f16e711 100644
--- a/arch/arm/mach-qcom/scm.h
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,17 +10,24 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#ifndef __MACH_SCM_H
-#define __MACH_SCM_H
-#define SCM_SVC_BOOT 0x1
-#define SCM_SVC_PIL 0x2
+#include "msm8916.dtsi"
-extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
- void *resp_buf, size_t resp_len);
+/ {
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
-#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+ chosen {
+ stdout-path = "serial0";
+ };
-extern u32 scm_get_version(void);
-
-#endif
+ soc {
+ serial@78b0000 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart2_default>;
+ pinctrl-1 = <&blsp1_uart2_sleep>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
new file mode 100644
index 000000000000..fced77f0fd3a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8916-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8916 MTP";
+ compatible = "qcom,msm8916-mtp", "qcom,msm8916-mtp-smb1360",
+ "qcom,msm8916", "qcom,mtp";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi
new file mode 100644
index 000000000000..bea871b0df13
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8916.dtsi"
+
+/ {
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ soc {
+ serial@78b0000 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart2_default>;
+ pinctrl-1 = <&blsp1_uart2_sleep>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
new file mode 100644
index 000000000000..f212b8303d04
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8916.h>
+#include <dt-bindings/reset/qcom,gcc-msm8916.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8916";
+ compatible = "qcom,msm8916";
+
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases { };
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0>;
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x1>;
+ };
+
+ CPU2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x2>;
+ };
+
+ CPU3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x3>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ pinctrl@1000000 {
+ compatible = "qcom,msm8916-pinctrl";
+ reg = <0x1000000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ blsp1_uart2_default: blsp1_uart2_default {
+ pinmux {
+ function = "blsp_uart2";
+ pins = "gpio4", "gpio5";
+ };
+ pinconf {
+ pins = "gpio4", "gpio5";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart2_sleep: blsp1_uart2_sleep {
+ pinmux {
+ function = "blsp_uart2";
+ pins = "gpio4", "gpio5";
+ };
+ pinconf {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+
+ gcc: qcom,gcc@1800000 {
+ compatible = "qcom,gcc-msm8916";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ reg = <0x1800000 0x80000>;
+ };
+
+ blsp1_uart2: serial@78b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x78b0000 0x200>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ intc: interrupt-controller@b000000 {
+ compatible = "qcom,msm-qgic2";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>;
+ };
+
+ timer@b020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xb020000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@b021000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb021000 0x1000>,
+ <0xb022000 0x1000>;
+ };
+
+ frame@b023000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b024000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b025000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b026000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b027000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b028000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb028000 0x1000>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/sprd/Makefile b/arch/arm64/boot/dts/sprd/Makefile
new file mode 100644
index 000000000000..b658c5e09b15
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_SPRD) += sc9836-openphone.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/sprd/sc9836-openphone.dts b/arch/arm64/boot/dts/sprd/sc9836-openphone.dts
new file mode 100644
index 000000000000..e5657c35cd10
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/sc9836-openphone.dts
@@ -0,0 +1,49 @@
+/*
+ * Spreadtrum SC9836 openphone board DTS file
+ *
+ * Copyright (C) 2014, Spreadtrum Communications Inc.
+ *
+ * This file is licensed under a dual GPLv2 or X11 license.
+ */
+
+/dts-v1/;
+
+#include "sc9836.dtsi"
+
+/ {
+ model = "Spreadtrum SC9836 Openphone Board";
+
+ compatible = "sprd,sc9836-openphone", "sprd,sc9836";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x20000000>;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/sprd/sc9836.dtsi b/arch/arm64/boot/dts/sprd/sc9836.dtsi
new file mode 100644
index 000000000000..ee34e1a36e03
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/sc9836.dtsi
@@ -0,0 +1,129 @@
+/*
+ * Spreadtrum SC9836 SoC DTS file
+ *
+ * Copyright (C) 2014, Spreadtrum Communications Inc.
+ *
+ * This file is licensed under a dual GPLv2 or X11 license.
+ */
+
+#include "sharkl64.dtsi"
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "sprd,sc9836";
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ };
+ };
+
+ etf@10003000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x10003000 0 0x1000>;
+ clocks = <&clk26mhz>;
+ clock-names = "apb_pclk";
+ port {
+ etf_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel_out_port0>;
+ };
+ };
+ };
+
+ funnel@10001000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x10001000 0 0x1000>;
+ clocks = <&clk26mhz>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel output port */
+ port@0 {
+ reg = <0>;
+ funnel_out_port0: endpoint {
+ remote-endpoint = <&etf_in>;
+ };
+ };
+
+ /* funnel input port 0~3 is reserved for ETMs */
+ port@1 {
+ reg = <4>;
+ funnel_in_port4: endpoint {
+ slave-mode;
+ remote-endpoint = <&stm_out>;
+ };
+ };
+ };
+ };
+
+ stm@10006000 {
+ compatible = "arm,coresight-stm", "arm,primecell";
+ reg = <0 0x10006000 0 0x1000>,
+ <0 0x01000000 0 0x180000>;
+ reg-names = "stm-base", "stm-stimulus-base";
+ clocks = <&clk26mhz>;
+ clock-names = "apb_pclk";
+ port {
+ stm_out: endpoint {
+ remote-endpoint = <&funnel_in_port4>;
+ };
+ };
+ };
+
+ gic: interrupt-controller@12001000 {
+ compatible = "arm,gic-400";
+ reg = <0 0x12001000 0 0x1000>,
+ <0 0x12002000 0 0x2000>,
+ <0 0x12004000 0 0x2000>,
+ <0 0x12006000 0 0x2000>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_on = <0xc4000003>;
+ cpu_off = <0x84000002>;
+ cpu_suspend = <0xc4000001>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
diff --git a/arch/arm64/boot/dts/sprd/sharkl64.dtsi b/arch/arm64/boot/dts/sprd/sharkl64.dtsi
new file mode 100644
index 000000000000..69f64e7fce7c
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/sharkl64.dtsi
@@ -0,0 +1,65 @@
+/*
+ * Spreadtrum Sharkl64 platform DTS file
+ *
+ * Copyright (C) 2014, Spreadtrum Communications Inc.
+ *
+ * This file is licensed under a dual GPLv2 or X11 license.
+ */
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ap-apb {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ uart0: serial@70000000 {
+ compatible = "sprd,sc9836-uart";
+ reg = <0 0x70000000 0 0x100>;
+ interrupts = <0 2 0xf04>;
+ clocks = <&clk26mhz>;
+ status = "disabled";
+ };
+
+ uart1: serial@70100000 {
+ compatible = "sprd,sc9836-uart";
+ reg = <0 0x70100000 0 0x100>;
+ interrupts = <0 3 0xf04>;
+ clocks = <&clk26mhz>;
+ status = "disabled";
+ };
+
+ uart2: serial@70200000 {
+ compatible = "sprd,sc9836-uart";
+ reg = <0 0x70200000 0 0x100>;
+ interrupts = <0 4 0xf04>;
+ clocks = <&clk26mhz>;
+ status = "disabled";
+ };
+
+ uart3: serial@70300000 {
+ compatible = "sprd,sc9836-uart";
+ reg = <0 0x70300000 0 0x100>;
+ interrupts = <0 5 0xf04>;
+ clocks = <&clk26mhz>;
+ status = "disabled";
+ };
+ };
+ };
+
+ clk26mhz: clk26mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
new file mode 100644
index 000000000000..ae16427f6a4a
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-ep108.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
new file mode 100644
index 000000000000..0a3f40ecd06d
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -0,0 +1,47 @@
+/*
+ * dts file for Xilinx ZynqMP ep108 development board
+ *
+ * (C) Copyright 2014 - 2015, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/dts-v1/;
+
+/include/ "zynqmp.dtsi"
+
+/ {
+ model = "ZynqMP EP108";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0 0x40000000>;
+ };
+};
+
+&gem0 {
+ status = "okay";
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ phy0: phy@0{
+ reg = <0>;
+ max-speed = <100>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
new file mode 100644
index 000000000000..11e0b00045cf
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -0,0 +1,305 @@
+/*
+ * dts file for Xilinx ZynqMP
+ *
+ * (C) Copyright 2014 - 2015, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/ {
+ compatible = "xlnx,zynqmp";
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x0>;
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x1>;
+ };
+
+ cpu@2 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x2>;
+ };
+
+ cpu@3 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x3>;
+ };
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <0 143 4>,
+ <0 144 4>,
+ <0 145 4>,
+ <0 146 4>;
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&gic>;
+ interrupts = <1 13 0xf01>,
+ <1 14 0xf01>,
+ <1 11 0xf01>,
+ <1 10 0xf01>;
+ };
+
+ amba_apu {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ gic: interrupt-controller@f9010000 {
+ compatible = "arm,gic-400", "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ reg = <0x0 0xf9010000 0x10000>,
+ <0x0 0xf902f000 0x2000>,
+ <0x0 0xf9040000 0x20000>,
+ <0x0 0xf906f000 0x2000>;
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <1 9 0xf04>;
+ };
+ };
+
+ amba {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ misc_clk: misc_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+ ttc0: timer@ff110000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 36 4>, <0 37 4>, <0 38 4>;
+ reg = <0x0 0xff110000 0x1000>;
+ clocks = <&misc_clk>;
+ timer-width = <32>;
+ };
+
+ ttc1: timer@ff120000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 39 4>, <0 40 4>, <0 41 4>;
+ reg = <0x0 0xff120000 0x1000>;
+ clocks = <&misc_clk>;
+ timer-width = <32>;
+ };
+
+ ttc2: timer@ff130000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 42 4>, <0 43 4>, <0 44 4>;
+ reg = <0x0 0xff130000 0x1000>;
+ clocks = <&misc_clk>;
+ timer-width = <32>;
+ };
+
+ ttc3: timer@ff140000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 45 4>, <0 46 4>, <0 47 4>;
+ reg = <0x0 0xff140000 0x1000>;
+ clocks = <&misc_clk>;
+ timer-width = <32>;
+ };
+
+ uart0: serial@ff000000 {
+ compatible = "cdns,uart-r1p8";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 21 4>;
+ reg = <0x0 0xff000000 0x1000>;
+ clock-names = "uart_clk", "pclk";
+ clocks = <&misc_clk &misc_clk>;
+ };
+
+ uart1: serial@ff010000 {
+ compatible = "cdns,uart-r1p8";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 22 4>;
+ reg = <0x0 0xff010000 0x1000>;
+ clock-names = "uart_clk", "pclk";
+ clocks = <&misc_clk &misc_clk>;
+ };
+
+ gpio: gpio@ff0a0000 {
+ compatible = "xlnx,zynq-gpio-1.0";
+ status = "disabled";
+ #gpio-cells = <0x2>;
+ clocks = <&misc_clk>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 16 4>;
+ reg = <0x0 0xff0a0000 0x1000>;
+ };
+
+ gem0: ethernet@ff0b0000 {
+ compatible = "cdns,gem";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 57 4>, <0 57 4>;
+ reg = <0x0 0xff0b0000 0x1000>;
+ clock-names = "pclk", "hclk", "tx_clk";
+ clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ gem1: ethernet@ff0c0000 {
+ compatible = "cdns,gem";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 59 4>, <0 59 4>;
+ reg = <0x0 0xff0c0000 0x1000>;
+ clock-names = "pclk", "hclk", "tx_clk";
+ clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ gem2: ethernet@ff0d0000 {
+ compatible = "cdns,gem";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 61 4>, <0 61 4>;
+ reg = <0x0 0xff0d0000 0x1000>;
+ clock-names = "pclk", "hclk", "tx_clk";
+ clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ gem3: ethernet@ff0e0000 {
+ compatible = "cdns,gem";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 63 4>, <0 63 4>;
+ reg = <0x0 0xff0e0000 0x1000>;
+ clock-names = "pclk", "hclk", "tx_clk";
+ clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi0: spi@ff040000 {
+ compatible = "cdns,spi-r1p6";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 19 4>;
+ reg = <0x0 0xff040000 0x1000>;
+ clock-names = "ref_clk", "pclk";
+ clocks = <&misc_clk &misc_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi1: spi@ff050000 {
+ compatible = "cdns,spi-r1p6";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 20 4>;
+ reg = <0x0 0xff050000 0x1000>;
+ clock-names = "ref_clk", "pclk";
+ clocks = <&misc_clk &misc_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c_clk: i2c_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0x0>;
+ clock-frequency = <111111111>;
+ };
+
+ i2c0: i2c@ff020000 {
+ compatible = "cdns,i2c-r1p10";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 17 4>;
+ reg = <0x0 0xff020000 0x1000>;
+ clocks = <&i2c_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c1: i2c@ff030000 {
+ compatible = "cdns,i2c-r1p10";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 18 4>;
+ reg = <0x0 0xff030000 0x1000>;
+ clocks = <&i2c_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ sdhci0: sdhci@ff160000 {
+ compatible = "arasan,sdhci-8.9a";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 48 4>;
+ reg = <0x0 0xff160000 0x1000>;
+ clock-names = "clk_xin", "clk_ahb";
+ clocks = <&misc_clk>, <&misc_clk>;
+ };
+
+ sdhci1: sdhci@ff170000 {
+ compatible = "arasan,sdhci-8.9a";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 49 4>;
+ reg = <0x0 0xff170000 0x1000>;
+ clock-names = "clk_xin", "clk_ahb";
+ clocks = <&misc_clk>, <&misc_clk>;
+ };
+
+ watchdog0: watchdog@fd4d0000 {
+ compatible = "cdns,wdt-r1p2";
+ status = "disabled";
+ clocks= <&misc_clk>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 52 1>;
+ reg = <0x0 0xfd4d0000 0x1000>;
+ timeout-sec = <10>;
+ };
+ };
+};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index be1f12a5a5f0..2ed7449d9273 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -31,11 +31,18 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_EXYNOS7=y
CONFIG_ARCH_FSL_LS2085A=y
CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SPRD=y
CONFIG_ARCH_THUNDER=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZYNQMP=y
CONFIG_PCI=y
CONFIG_PCI_MSI=y
CONFIG_PCI_XGENE=y
@@ -48,7 +55,7 @@ CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
CONFIG_CPU_IDLE=y
-CONFIG_ARM64_CPUIDLE=y
+CONFIG_ARM_CPUIDLE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -62,6 +69,7 @@ CONFIG_BPF_JIT=y
# CONFIG_WIRELESS is not set
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
+# CONFIG_TEGRA_AHB is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -81,6 +89,7 @@ CONFIG_NETDEVICES=y
CONFIG_TUN=y
CONFIG_VIRTIO_NET=y
CONFIG_NET_XGENE=y
+CONFIG_SKY2=y
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
# CONFIG_WLAN is not set
@@ -93,13 +102,20 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_MT6577=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
CONFIG_SPI_PL022=y
+CONFIG_PINCTRL_MSM8916=y
CONFIG_GPIO_PL061=y
CONFIG_GPIO_XGENE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -112,10 +128,10 @@ CONFIG_LOGO=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_ISP1760=y
CONFIG_USB_ULPI=y
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
@@ -125,8 +141,11 @@ CONFIG_MMC_SPI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_XGENE=y
+CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_MMIO=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_MSM_GCC_8916=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PHY_XGENE=y
CONFIG_EXT2_FS=y
@@ -143,8 +162,10 @@ CONFIG_CUSE=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
+CONFIG_EFIVAR_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
@@ -159,7 +180,6 @@ CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
-CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index 432e4841cd81..a2a7fbcacc14 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -101,19 +101,19 @@ ENTRY(ce_aes_ccm_final)
0: mov v4.16b, v3.16b
1: ld1 {v5.2d}, [x2], #16 /* load next round key */
aese v0.16b, v4.16b
- aese v1.16b, v4.16b
aesmc v0.16b, v0.16b
+ aese v1.16b, v4.16b
aesmc v1.16b, v1.16b
2: ld1 {v3.2d}, [x2], #16 /* load next round key */
aese v0.16b, v5.16b
- aese v1.16b, v5.16b
aesmc v0.16b, v0.16b
+ aese v1.16b, v5.16b
aesmc v1.16b, v1.16b
3: ld1 {v4.2d}, [x2], #16 /* load next round key */
subs w3, w3, #3
aese v0.16b, v3.16b
- aese v1.16b, v3.16b
aesmc v0.16b, v0.16b
+ aese v1.16b, v3.16b
aesmc v1.16b, v1.16b
bpl 1b
aese v0.16b, v4.16b
@@ -146,19 +146,19 @@ ENDPROC(ce_aes_ccm_final)
ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
2: /* inner loop: 3 rounds, 2x interleaved */
aese v0.16b, v4.16b
- aese v1.16b, v4.16b
aesmc v0.16b, v0.16b
+ aese v1.16b, v4.16b
aesmc v1.16b, v1.16b
3: ld1 {v3.2d}, [x10], #16 /* load next round key */
aese v0.16b, v5.16b
- aese v1.16b, v5.16b
aesmc v0.16b, v0.16b
+ aese v1.16b, v5.16b
aesmc v1.16b, v1.16b
4: ld1 {v4.2d}, [x10], #16 /* load next round key */
subs w7, w7, #3
aese v0.16b, v3.16b
- aese v1.16b, v3.16b
aesmc v0.16b, v0.16b
+ aese v1.16b, v3.16b
aesmc v1.16b, v1.16b
ld1 {v5.2d}, [x10], #16 /* load next round key */
bpl 2b
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 685a18f731eb..78f3cfe92c08 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -45,18 +45,14 @@
.macro do_enc_Nx, de, mc, k, i0, i1, i2, i3
aes\de \i0\().16b, \k\().16b
- .ifnb \i1
- aes\de \i1\().16b, \k\().16b
- .ifnb \i3
- aes\de \i2\().16b, \k\().16b
- aes\de \i3\().16b, \k\().16b
- .endif
- .endif
aes\mc \i0\().16b, \i0\().16b
.ifnb \i1
+ aes\de \i1\().16b, \k\().16b
aes\mc \i1\().16b, \i1\().16b
.ifnb \i3
+ aes\de \i2\().16b, \k\().16b
aes\mc \i2\().16b, \i2\().16b
+ aes\de \i3\().16b, \k\().16b
aes\mc \i3\().16b, \i3\().16b
.endif
.endif
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index b1b5b893eb20..05d9e16c0dfd 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -284,7 +284,8 @@ static struct crypto_alg aes_algs[] = { {
.cra_name = "__ecb-aes-" MODE,
.cra_driver_name = "__driver-ecb-aes-" MODE,
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7,
@@ -302,7 +303,8 @@ static struct crypto_alg aes_algs[] = { {
.cra_name = "__cbc-aes-" MODE,
.cra_driver_name = "__driver-cbc-aes-" MODE,
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7,
@@ -320,7 +322,8 @@ static struct crypto_alg aes_algs[] = { {
.cra_name = "__ctr-aes-" MODE,
.cra_driver_name = "__driver-ctr-aes-" MODE,
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7,
@@ -338,7 +341,8 @@ static struct crypto_alg aes_algs[] = { {
.cra_name = "__xts-aes-" MODE,
.cra_driver_name = "__driver-xts-aes-" MODE,
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
.cra_alignmask = 7,
diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
index 9499199924ae..6a37c3c6b11d 100644
--- a/arch/arm64/crypto/crc32-arm64.c
+++ b/arch/arm64/crypto/crc32-arm64.c
@@ -147,13 +147,21 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+ put_unaligned_le32(ctx->crc, out);
+ return 0;
+}
+
+static int chksumc_final(struct shash_desc *desc, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
put_unaligned_le32(~ctx->crc, out);
return 0;
}
static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
- put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
+ put_unaligned_le32(crc32_arm64_le_hw(crc, data, len), out);
return 0;
}
@@ -199,6 +207,14 @@ static int crc32_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+ mctx->key = 0;
+ return 0;
+}
+
+static int crc32c_cra_init(struct crypto_tfm *tfm)
+{
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+
mctx->key = ~0;
return 0;
}
@@ -229,7 +245,7 @@ static struct shash_alg crc32c_alg = {
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksumc_update,
- .final = chksum_final,
+ .final = chksumc_final,
.finup = chksumc_finup,
.digest = chksumc_digest,
.descsize = sizeof(struct chksum_desc_ctx),
@@ -241,7 +257,7 @@ static struct shash_alg crc32c_alg = {
.cra_alignmask = 0,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
- .cra_init = crc32_cra_init,
+ .cra_init = crc32c_cra_init,
}
};
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index 09d57d98609c..033aae6d732a 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -66,8 +66,8 @@
.word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
/*
- * void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
- * u8 *head, long bytes)
+ * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+ * int blocks)
*/
ENTRY(sha1_ce_transform)
/* load round constants */
@@ -78,25 +78,22 @@ ENTRY(sha1_ce_transform)
ld1r {k3.4s}, [x6]
/* load state */
- ldr dga, [x2]
- ldr dgb, [x2, #16]
+ ldr dga, [x0]
+ ldr dgb, [x0, #16]
- /* load partial state (if supplied) */
- cbz x3, 0f
- ld1 {v8.4s-v11.4s}, [x3]
- b 1f
+ /* load sha1_ce_state::finalize */
+ ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
/* load input */
0: ld1 {v8.4s-v11.4s}, [x1], #64
- sub w0, w0, #1
+ sub w2, w2, #1
-1:
CPU_LE( rev32 v8.16b, v8.16b )
CPU_LE( rev32 v9.16b, v9.16b )
CPU_LE( rev32 v10.16b, v10.16b )
CPU_LE( rev32 v11.16b, v11.16b )
-2: add t0.4s, v8.4s, k0.4s
+1: add t0.4s, v8.4s, k0.4s
mov dg0v.16b, dgav.16b
add_update c, ev, k0, 8, 9, 10, 11, dgb
@@ -127,15 +124,15 @@ CPU_LE( rev32 v11.16b, v11.16b )
add dgbv.2s, dgbv.2s, dg1v.2s
add dgav.4s, dgav.4s, dg0v.4s
- cbnz w0, 0b
+ cbnz w2, 0b
/*
* Final block: add padding and total bit count.
- * Skip if we have no total byte count in x4. In that case, the input
- * size was not a round multiple of the block size, and the padding is
- * handled by the C code.
+ * Skip if the input size was not a round multiple of the block size,
+ * the padding is handled by the C code in that case.
*/
cbz x4, 3f
+ ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
movi v9.2d, #0
mov x8, #0x80000000
movi v10.2d, #0
@@ -144,10 +141,10 @@ CPU_LE( rev32 v11.16b, v11.16b )
mov x4, #0
mov v11.d[0], xzr
mov v11.d[1], x7
- b 2b
+ b 1b
/* store new state */
-3: str dga, [x2]
- str dgb, [x2, #16]
+3: str dga, [x0]
+ str dgb, [x0, #16]
ret
ENDPROC(sha1_ce_transform)
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 6fe83f37a750..aefda9868627 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -12,144 +12,84 @@
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
+#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#define ASM_EXPORT(sym, val) \
+ asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
+
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
-asmlinkage void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
- u8 *head, long bytes);
+struct sha1_ce_state {
+ struct sha1_state sst;
+ u32 finalize;
+};
-static int sha1_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
+asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+ int blocks);
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
- return 0;
-}
-
-static int sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->count += len;
-
- if ((partial + len) >= SHA1_BLOCK_SIZE) {
- int blocks;
-
- if (partial) {
- int p = SHA1_BLOCK_SIZE - partial;
+ struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
- }
-
- blocks = len / SHA1_BLOCK_SIZE;
- len %= SHA1_BLOCK_SIZE;
-
- kernel_neon_begin_partial(16);
- sha1_ce_transform(blocks, data, sctx->state,
- partial ? sctx->buffer : NULL, 0);
- kernel_neon_end();
+ sctx->finalize = 0;
+ kernel_neon_begin_partial(16);
+ sha1_base_do_update(desc, data, len,
+ (sha1_block_fn *)sha1_ce_transform);
+ kernel_neon_end();
- data += blocks * SHA1_BLOCK_SIZE;
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
return 0;
}
-static int sha1_final(struct shash_desc *desc, u8 *out)
+static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
-
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be64 bits = cpu_to_be64(sctx->count << 3);
- __be32 *dst = (__be32 *)out;
- int i;
-
- u32 padlen = SHA1_BLOCK_SIZE
- - ((sctx->count + sizeof(bits)) % SHA1_BLOCK_SIZE);
-
- sha1_update(desc, padding, padlen);
- sha1_update(desc, (const u8 *)&bits, sizeof(bits));
+ struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+ bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], dst++);
-
- *sctx = (struct sha1_state){};
- return 0;
-}
-
-static int sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- int blocks;
- int i;
-
- if (sctx->count || !len || (len % SHA1_BLOCK_SIZE)) {
- sha1_update(desc, data, len);
- return sha1_final(desc, out);
- }
+ ASM_EXPORT(sha1_ce_offsetof_count,
+ offsetof(struct sha1_ce_state, sst.count));
+ ASM_EXPORT(sha1_ce_offsetof_finalize,
+ offsetof(struct sha1_ce_state, finalize));
/*
- * Use a fast path if the input is a multiple of 64 bytes. In
- * this case, there is no need to copy data around, and we can
- * perform the entire digest calculation in a single invocation
- * of sha1_ce_transform()
+ * Allow the asm code to perform the finalization if there is no
+ * partial data and the input is a round multiple of the block size.
*/
- blocks = len / SHA1_BLOCK_SIZE;
+ sctx->finalize = finalize;
kernel_neon_begin_partial(16);
- sha1_ce_transform(blocks, data, sctx->state, NULL, len);
+ sha1_base_do_update(desc, data, len,
+ (sha1_block_fn *)sha1_ce_transform);
+ if (!finalize)
+ sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
kernel_neon_end();
-
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], dst++);
-
- *sctx = (struct sha1_state){};
- return 0;
+ return sha1_base_finish(desc, out);
}
-static int sha1_export(struct shash_desc *desc, void *out)
+static int sha1_ce_final(struct shash_desc *desc, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- struct sha1_state *dst = out;
+ struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- *dst = *sctx;
- return 0;
-}
-
-static int sha1_import(struct shash_desc *desc, const void *in)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- struct sha1_state const *src = in;
-
- *sctx = *src;
- return 0;
+ sctx->finalize = 0;
+ kernel_neon_begin_partial(16);
+ sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ kernel_neon_end();
+ return sha1_base_finish(desc, out);
}
static struct shash_alg alg = {
- .init = sha1_init,
- .update = sha1_update,
- .final = sha1_final,
- .finup = sha1_finup,
- .export = sha1_export,
- .import = sha1_import,
- .descsize = sizeof(struct sha1_state),
+ .init = sha1_base_init,
+ .update = sha1_ce_update,
+ .final = sha1_ce_final,
+ .finup = sha1_ce_finup,
+ .descsize = sizeof(struct sha1_ce_state),
.digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 7f29fc031ea8..5df9d9d470ad 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -73,8 +73,8 @@
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/*
- * void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
- * u8 *head, long bytes)
+ * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+ * int blocks)
*/
ENTRY(sha2_ce_transform)
/* load round constants */
@@ -85,24 +85,21 @@ ENTRY(sha2_ce_transform)
ld1 {v12.4s-v15.4s}, [x8]
/* load state */
- ldp dga, dgb, [x2]
+ ldp dga, dgb, [x0]
- /* load partial input (if supplied) */
- cbz x3, 0f
- ld1 {v16.4s-v19.4s}, [x3]
- b 1f
+ /* load sha256_ce_state::finalize */
+ ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
/* load input */
0: ld1 {v16.4s-v19.4s}, [x1], #64
- sub w0, w0, #1
+ sub w2, w2, #1
-1:
CPU_LE( rev32 v16.16b, v16.16b )
CPU_LE( rev32 v17.16b, v17.16b )
CPU_LE( rev32 v18.16b, v18.16b )
CPU_LE( rev32 v19.16b, v19.16b )
-2: add t0.4s, v16.4s, v0.4s
+1: add t0.4s, v16.4s, v0.4s
mov dg0v.16b, dgav.16b
mov dg1v.16b, dgbv.16b
@@ -131,15 +128,15 @@ CPU_LE( rev32 v19.16b, v19.16b )
add dgbv.4s, dgbv.4s, dg1v.4s
/* handled all input blocks? */
- cbnz w0, 0b
+ cbnz w2, 0b
/*
* Final block: add padding and total bit count.
- * Skip if we have no total byte count in x4. In that case, the input
- * size was not a round multiple of the block size, and the padding is
- * handled by the C code.
+ * Skip if the input size was not a round multiple of the block size,
+ * the padding is handled by the C code in that case.
*/
cbz x4, 3f
+ ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
movi v17.2d, #0
mov x8, #0x80000000
movi v18.2d, #0
@@ -148,9 +145,9 @@ CPU_LE( rev32 v19.16b, v19.16b )
mov x4, #0
mov v19.d[0], xzr
mov v19.d[1], x7
- b 2b
+ b 1b
/* store new state */
-3: stp dga, dgb, [x2]
+3: stp dga, dgb, [x0]
ret
ENDPROC(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index ae67e88c28b9..7cd587564a41 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -12,206 +12,85 @@
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
+#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
+#define ASM_EXPORT(sym, val) \
+ asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
+
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
-asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state,
- u8 *head, long bytes);
-
-static int sha224_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha256_state){
- .state = {
- SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
- }
- };
- return 0;
-}
-
-static int sha256_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha256_state){
- .state = {
- SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
- }
- };
- return 0;
-}
-
-static int sha2_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->count += len;
-
- if ((partial + len) >= SHA256_BLOCK_SIZE) {
- int blocks;
-
- if (partial) {
- int p = SHA256_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
- }
-
- blocks = len / SHA256_BLOCK_SIZE;
- len %= SHA256_BLOCK_SIZE;
-
- kernel_neon_begin_partial(28);
- sha2_ce_transform(blocks, data, sctx->state,
- partial ? sctx->buf : NULL, 0);
- kernel_neon_end();
-
- data += blocks * SHA256_BLOCK_SIZE;
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
- return 0;
-}
-
-static void sha2_final(struct shash_desc *desc)
-{
- static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
-
- struct sha256_state *sctx = shash_desc_ctx(desc);
- __be64 bits = cpu_to_be64(sctx->count << 3);
- u32 padlen = SHA256_BLOCK_SIZE
- - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE);
-
- sha2_update(desc, padding, padlen);
- sha2_update(desc, (const u8 *)&bits, sizeof(bits));
-}
-
-static int sha224_final(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- int i;
-
- sha2_final(desc);
+struct sha256_ce_state {
+ struct sha256_state sst;
+ u32 finalize;
+};
- for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], dst++);
+asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+ int blocks);
- *sctx = (struct sha256_state){};
- return 0;
-}
-
-static int sha256_final(struct shash_desc *desc, u8 *out)
+static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- int i;
-
- sha2_final(desc);
+ struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], dst++);
+ sctx->finalize = 0;
+ kernel_neon_begin_partial(28);
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha2_ce_transform);
+ kernel_neon_end();
- *sctx = (struct sha256_state){};
return 0;
}
-static void sha2_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- int blocks;
+ struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+ bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
- if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) {
- sha2_update(desc, data, len);
- sha2_final(desc);
- return;
- }
+ ASM_EXPORT(sha256_ce_offsetof_count,
+ offsetof(struct sha256_ce_state, sst.count));
+ ASM_EXPORT(sha256_ce_offsetof_finalize,
+ offsetof(struct sha256_ce_state, finalize));
/*
- * Use a fast path if the input is a multiple of 64 bytes. In
- * this case, there is no need to copy data around, and we can
- * perform the entire digest calculation in a single invocation
- * of sha2_ce_transform()
+ * Allow the asm code to perform the finalization if there is no
+ * partial data and the input is a round multiple of the block size.
*/
- blocks = len / SHA256_BLOCK_SIZE;
+ sctx->finalize = finalize;
kernel_neon_begin_partial(28);
- sha2_ce_transform(blocks, data, sctx->state, NULL, len);
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha2_ce_transform);
+ if (!finalize)
+ sha256_base_do_finalize(desc,
+ (sha256_block_fn *)sha2_ce_transform);
kernel_neon_end();
+ return sha256_base_finish(desc, out);
}
-static int sha224_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int sha256_ce_final(struct shash_desc *desc, u8 *out)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- int i;
-
- sha2_finup(desc, data, len);
-
- for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], dst++);
-
- *sctx = (struct sha256_state){};
- return 0;
-}
-
-static int sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- int i;
-
- sha2_finup(desc, data, len);
+ struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], dst++);
-
- *sctx = (struct sha256_state){};
- return 0;
-}
-
-static int sha2_export(struct shash_desc *desc, void *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- struct sha256_state *dst = out;
-
- *dst = *sctx;
- return 0;
-}
-
-static int sha2_import(struct shash_desc *desc, const void *in)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- struct sha256_state const *src = in;
-
- *sctx = *src;
- return 0;
+ sctx->finalize = 0;
+ kernel_neon_begin_partial(28);
+ sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+ kernel_neon_end();
+ return sha256_base_finish(desc, out);
}
static struct shash_alg algs[] = { {
- .init = sha224_init,
- .update = sha2_update,
- .final = sha224_final,
- .finup = sha224_finup,
- .export = sha2_export,
- .import = sha2_import,
- .descsize = sizeof(struct sha256_state),
+ .init = sha224_base_init,
+ .update = sha256_ce_update,
+ .final = sha256_ce_final,
+ .finup = sha256_ce_finup,
+ .descsize = sizeof(struct sha256_ce_state),
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "sha224-ce",
@@ -221,15 +100,12 @@ static struct shash_alg algs[] = { {
.cra_module = THIS_MODULE,
}
}, {
- .init = sha256_init,
- .update = sha2_update,
- .final = sha256_final,
- .finup = sha256_finup,
- .export = sha2_export,
- .import = sha2_import,
- .descsize = sizeof(struct sha256_state),
+ .init = sha256_base_init,
+ .update = sha256_ce_update,
+ .final = sha256_ce_final,
+ .finup = sha256_ce_finup,
+ .descsize = sizeof(struct sha256_ce_state),
.digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-ce",
diff --git a/arch/arm64/include/asm/acenv.h b/arch/arm64/include/asm/acenv.h
new file mode 100644
index 000000000000..b49166fde7ea
--- /dev/null
+++ b/arch/arm64/include/asm/acenv.h
@@ -0,0 +1,18 @@
+/*
+ * ARM64 specific ACPICA environments and implementation
+ *
+ * Copyright (C) 2014, Linaro Ltd.
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ACENV_H
+#define _ASM_ACENV_H
+
+/* It is required unconditionally by ACPI core, update it when needed. */
+
+#endif /* _ASM_ACENV_H */
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
new file mode 100644
index 000000000000..59c05d8ea4a0
--- /dev/null
+++ b/arch/arm64/include/asm/acpi.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2013-2014, Linaro Ltd.
+ * Author: Al Stone <al.stone@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation;
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+#include <linux/mm.h>
+#include <linux/irqchip/arm-gic-acpi.h>
+
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+/* Basic configuration for ACPI */
+#ifdef CONFIG_ACPI
+/* ACPI table mapping after acpi_gbl_permanent_mmap is set */
+static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
+ acpi_size size)
+{
+ if (!page_is_ram(phys >> PAGE_SHIFT))
+ return ioremap(phys, size);
+
+ return ioremap_cache(phys, size);
+}
+#define acpi_os_ioremap acpi_os_ioremap
+
+typedef u64 phys_cpuid_t;
+#define PHYS_CPUID_INVALID INVALID_HWID
+
+#define acpi_strict 1 /* No out-of-spec workarounds on ARM64 */
+extern int acpi_disabled;
+extern int acpi_noirq;
+extern int acpi_pci_disabled;
+
+/* 1 to indicate PSCI 0.2+ is implemented */
+static inline bool acpi_psci_present(void)
+{
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
+}
+
+/* 1 to indicate HVC must be used instead of SMC as the PSCI conduit */
+static inline bool acpi_psci_use_hvc(void)
+{
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
+}
+
+static inline void disable_acpi(void)
+{
+ acpi_disabled = 1;
+ acpi_pci_disabled = 1;
+ acpi_noirq = 1;
+}
+
+static inline void enable_acpi(void)
+{
+ acpi_disabled = 0;
+ acpi_pci_disabled = 0;
+ acpi_noirq = 0;
+}
+
+/*
+ * The ACPI processor driver for ACPI core code needs this macro
+ * to find out this cpu was already mapped (mapping from CPU hardware
+ * ID to CPU logical ID) or not.
+ */
+#define cpu_physical_id(cpu) cpu_logical_map(cpu)
+
+/*
+ * It's used from ACPI core in kdump to boot UP system with SMP kernel,
+ * with this check the ACPI core will not override the CPU index
+ * obtained from GICC with 0 and not print some error message as well.
+ * Since MADT must provide at least one GICC structure for GIC
+ * initialization, CPU will be always available in MADT on ARM64.
+ */
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return true;
+}
+
+static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+void __init acpi_init_cpus(void);
+
+#else
+static inline bool acpi_psci_present(void) { return false; }
+static inline bool acpi_psci_use_hvc(void) { return false; }
+static inline void acpi_init_cpus(void) { }
+#endif /* CONFIG_ACPI */
+
+#endif /*_ASM_ACPI_H*/
diff --git a/arch/arm64/include/asm/cputable.h b/arch/arm64/include/asm/arm-cci.h
index e3bd983d3661..f0b63712e10e 100644
--- a/arch/arm64/include/asm/cputable.h
+++ b/arch/arm64/include/asm/arm-cci.h
@@ -1,9 +1,9 @@
/*
- * arch/arm64/include/asm/cputable.h
+ * arch/arm64/include/asm/arm-cci.h
*
- * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 ARM Ltd.
*
- * This program is free software: you can redistribute it and/or modify
+ * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
@@ -15,16 +15,13 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ASM_CPUTABLE_H
-#define __ASM_CPUTABLE_H
-struct cpu_info {
- unsigned int cpu_id_val;
- unsigned int cpu_id_mask;
- const char *cpu_name;
- unsigned long (*cpu_setup)(void);
-};
+#ifndef __ASM_ARM_CCI_H
+#define __ASM_ARM_CCI_H
-extern struct cpu_info *lookup_processor_type(unsigned int);
+static inline bool platform_has_secure_cci_access(void)
+{
+ return false;
+}
#endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 750bac4e637e..144b64ad96c3 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -159,4 +159,52 @@ lr .req x30 // link register
orr \rd, \lbits, \hbits, lsl #32
.endm
+/*
+ * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
+ * <symbol> is within the range +/- 4 GB of the PC.
+ */
+ /*
+ * @dst: destination register (64 bit wide)
+ * @sym: name of the symbol
+ * @tmp: optional scratch register to be used if <dst> == sp, which
+ * is not allowed in an adrp instruction
+ */
+ .macro adr_l, dst, sym, tmp=
+ .ifb \tmp
+ adrp \dst, \sym
+ add \dst, \dst, :lo12:\sym
+ .else
+ adrp \tmp, \sym
+ add \dst, \tmp, :lo12:\sym
+ .endif
+ .endm
+
+ /*
+ * @dst: destination register (32 or 64 bit wide)
+ * @sym: name of the symbol
+ * @tmp: optional 64-bit scratch register to be used if <dst> is a
+ * 32-bit wide register, in which case it cannot be used to hold
+ * the address
+ */
+ .macro ldr_l, dst, sym, tmp=
+ .ifb \tmp
+ adrp \dst, \sym
+ ldr \dst, [\dst, :lo12:\sym]
+ .else
+ adrp \tmp, \sym
+ ldr \dst, [\tmp, :lo12:\sym]
+ .endif
+ .endm
+
+ /*
+ * @src: source register (32 or 64 bit wide)
+ * @sym: name of the symbol
+ * @tmp: mandatory 64-bit scratch register to calculate the address
+ * while <src> needs to be preserved.
+ */
+ .macro str_l, src, sym, tmp
+ adrp \tmp, \sym
+ str \src, [\tmp, :lo12:\sym]
+ .endm
+
#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index a5abb0062d6e..71f19c4dc0de 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -65,6 +65,14 @@ do { \
do { \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("stlrb %w1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("stlrh %w1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
case 4: \
asm volatile ("stlr %w1, %0" \
: "=Q" (*p) : "r" (v) : "memory"); \
@@ -81,6 +89,14 @@ do { \
typeof(*p) ___p1; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("ldarb %w0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("ldarh %w0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
case 4: \
asm volatile ("ldar %w0, %1" \
: "=r" (___p1) : "Q" (*p) : "memory"); \
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index da301ee7395c..5a31d6716914 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -66,5 +66,6 @@ struct cpu_operations {
extern const struct cpu_operations *cpu_ops[NR_CPUS];
int __init cpu_read_ops(struct device_node *dn, int cpu);
void __init cpu_read_bootcpu_ops(void);
+const struct cpu_operations *cpu_get_ops(const char *name);
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index b6c16d5f622f..82cb9f98ba1a 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -23,11 +23,24 @@
#define ARM64_WORKAROUND_CLEAN_CACHE 0
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
+#define ARM64_WORKAROUND_845719 2
-#define ARM64_NCAPS 2
+#define ARM64_NCAPS 3
#ifndef __ASSEMBLY__
+struct arm64_cpu_capabilities {
+ const char *desc;
+ u16 capability;
+ bool (*matches)(const struct arm64_cpu_capabilities *);
+ union {
+ struct { /* To be used for erratum handling only */
+ u32 midr_model;
+ u32 midr_range_min, midr_range_max;
+ };
+ };
+};
+
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
static inline bool cpu_have_feature(unsigned int num)
@@ -51,7 +64,10 @@ static inline void cpus_set_cap(unsigned int num)
__set_bit(num, cpu_hwcaps);
}
+void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+ const char *info);
void check_local_cpu_errata(void);
+void check_local_cpu_features(void);
bool cpu_supports_mixed_endian_el0(void);
bool system_supports_mixed_endian_el0(void);
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index c60643f14cda..141b2fcabaa6 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -4,10 +4,10 @@
#include <asm/proc-fns.h>
#ifdef CONFIG_CPU_IDLE
-extern int cpu_init_idle(unsigned int cpu);
+extern int arm_cpuidle_init(unsigned int cpu);
extern int cpu_suspend(unsigned long arg);
#else
-static inline int cpu_init_idle(unsigned int cpu)
+static inline int arm_cpuidle_init(unsigned int cpu)
{
return -EOPNOTSUPP;
}
@@ -17,5 +17,8 @@ static inline int cpu_suspend(unsigned long arg)
return -EOPNOTSUPP;
}
#endif
-
+static inline int arm_cpuidle_suspend(int index)
+{
+ return cpu_suspend(index);
+}
#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 6932bb57dba0..9437e3dc5833 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -97,7 +97,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
- return 0;
+ return false;
return addr + size - 1 <= *dev->dma_mask;
}
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 1f65be393139..faad6df49e5b 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -125,7 +125,6 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
-extern unsigned long randomize_et_dyn(unsigned long base);
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
/*
@@ -157,10 +156,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
#ifdef CONFIG_COMPAT
#ifdef __AARCH64EB__
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index defa0ff98250..95e6b6dcbe37 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -33,6 +33,7 @@
enum fixed_addresses {
FIX_HOLE,
FIX_EARLYCON_MEM_BASE,
+ FIX_TEXT_POKE0,
__end_of_permanent_fixed_addresses,
/*
@@ -49,7 +50,6 @@ enum fixed_addresses {
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
- FIX_TEXT_POKE0,
__end_of_fixed_addresses
};
@@ -62,6 +62,9 @@ void __init early_fixmap_init(void);
#define __early_set_fixmap __set_fixmap
+#define __late_set_fixmap __set_fixmap
+#define __late_clear_fixmap(idx) __set_fixmap((idx), 0, FIXMAP_PAGE_CLEAR)
+
extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
#include <asm-generic/fixmap.h>
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index d2f49423c5dc..f81b328d9cf4 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -285,6 +285,7 @@ bool aarch64_insn_is_nop(u32 insn);
int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn);
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn);
u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm);
u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 94c53674a31d..bbb251b14746 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -1,6 +1,8 @@
#ifndef __ASM_IRQ_H
#define __ASM_IRQ_H
+#include <linux/irqchip/arm-gic-acpi.h>
+
#include <asm-generic/irq.h>
struct pt_regs;
@@ -8,4 +10,15 @@ struct pt_regs;
extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
+static inline void acpi_irq_init(void)
+{
+ /*
+ * Hardcode ACPI IRQ chip initialization to GICv2 for now.
+ * Proper irqchip infrastructure will be implemented along with
+ * incoming GICv2m|GICv3|ITS bits.
+ */
+ acpi_gic_init();
+}
+#define acpi_irq_init acpi_irq_init
+
#endif
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index bbfb600fa822..61505676d085 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -68,6 +68,8 @@
#include <asm/pgalloc.h>
#include <asm/cachetype.h>
#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
@@ -163,12 +165,12 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
/*
* If we are concatenating first level stage-2 page tables, we would have less
* than or equal to 16 pointers in the fake PGD, because that's what the
- * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS)
+ * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS)
* represents the first level for the host, and we add 1 to go to the next
* level (which uses contatenation) for the stage-2 tables.
*/
#if PTRS_PER_S2_PGD <= 16
-#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1)
+#define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1)
#else
#define KVM_PREALLOC_LEVEL (0)
#endif
@@ -269,5 +271,36 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
+static inline bool __kvm_cpu_uses_extended_idmap(void)
+{
+ return __cpu_uses_extended_idmap();
+}
+
+static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
+ pgd_t *hyp_pgd,
+ pgd_t *merged_hyp_pgd,
+ unsigned long hyp_idmap_start)
+{
+ int idmap_idx;
+
+ /*
+ * Use the first entry to access the HYP mappings. It is
+ * guaranteed to be free, otherwise we wouldn't use an
+ * extended idmap.
+ */
+ VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
+ merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
+
+ /*
+ * Create another extended level entry that points to the boot HYP map,
+ * which contains an ID mapping of the HYP init code. We essentially
+ * merge the boot and runtime HYP maps by doing so, but they don't
+ * overlap anyway, so this is fine.
+ */
+ idmap_idx = hyp_idmap_start >> VA_BITS;
+ VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
+ merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 101a42bde728..8ec41e5f56f0 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void)
: "r" (ttbr));
}
+/*
+ * TCR.T0SZ value to use when the ID map is active. Usually equals
+ * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
+ * physical memory, in which case it will be smaller.
+ */
+extern u64 idmap_t0sz;
+
+static inline bool __cpu_uses_extended_idmap(void)
+{
+ return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) &&
+ unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
+}
+
+static inline void __cpu_set_tcr_t0sz(u64 t0sz)
+{
+ unsigned long tcr;
+
+ if (__cpu_uses_extended_idmap())
+ asm volatile (
+ " mrs %0, tcr_el1 ;"
+ " bfi %0, %1, %2, %3 ;"
+ " msr tcr_el1, %0 ;"
+ " isb"
+ : "=&r" (tcr)
+ : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+}
+
+/*
+ * Set TCR.T0SZ to the value appropriate for activating the identity map.
+ */
+static inline void cpu_set_idmap_tcr_t0sz(void)
+{
+ __cpu_set_tcr_t0sz(idmap_t0sz);
+}
+
+/*
+ * Set TCR.T0SZ to its default value (based on VA_BITS)
+ */
+static inline void cpu_set_default_tcr_t0sz(void)
+{
+ __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS));
+}
+
static inline void switch_new_context(struct mm_struct *mm)
{
unsigned long flags;
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 22b16232bd60..7d9c7e4a424b 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -33,16 +33,18 @@
* image. Both require pgd, pud (4 levels only) and pmd tables to (section)
* map the kernel. With the 64K page configuration, swapper and idmap need to
* map to pte level. The swapper also maps the FDT (see __create_page_tables
- * for more information).
+ * for more information). Note that the number of ID map translation levels
+ * could be increased on the fly if system RAM is out of reach for the default
+ * VA range, so 3 pages are reserved in all cases.
*/
#ifdef CONFIG_ARM64_64K_PAGES
-#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS)
+#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
#else
-#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS - 1)
+#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
#endif
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
-#define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE)
+#define IDMAP_DIR_SIZE (3 * PAGE_SIZE)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index 872ba939fcb2..b008a72f8bc0 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -27,6 +27,12 @@
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ /* no legacy IRQ on arm64 */
+ return -ENODEV;
+}
+
static inline int pci_proc_domain(struct pci_bus *bus)
{
return 1;
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index e20df38a8ff3..76420568d66a 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -28,7 +28,7 @@
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
@@ -46,9 +46,9 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
}
-#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
@@ -66,7 +66,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
}
-#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 5f930cc9ea83..59bfae75dc98 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -21,7 +21,7 @@
/*
* PMD_SHIFT determines the size a level 2 page table entry can map.
*/
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
#define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
@@ -31,7 +31,7 @@
/*
* PUD_SHIFT determines the size a level 1 page table entry can map.
*/
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
#define PUD_SHIFT ((PAGE_SHIFT - 3) * 3 + 3)
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
@@ -42,7 +42,7 @@
* PGDIR_SHIFT determines the size a top-level page table entry can map
* (depending on the configuration, this level can be 0, 1 or 2).
*/
-#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_ARM64_PGTABLE_LEVELS + 3)
+#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_PGTABLE_LEVELS + 3)
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
@@ -143,7 +143,12 @@
/*
* TCR flags.
*/
-#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0))
+#define TCR_T0SZ_OFFSET 0
+#define TCR_T1SZ_OFFSET 16
+#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
+#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET)
+#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
+#define TCR_TxSZ_WIDTH 6
#define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24))
#define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24))
#define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24))
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index ca9df80af896..2b1bd7e52c3b 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -38,13 +38,13 @@ typedef struct { pteval_t pte; } pte_t;
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) } )
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
typedef struct { pmdval_t pmd; } pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) } )
#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
typedef struct { pudval_t pud; } pud_t;
#define pud_val(x) ((x).pud)
#define __pud(x) ((pud_t) { (x) } )
@@ -64,13 +64,13 @@ typedef pteval_t pte_t;
#define pte_val(x) (x)
#define __pte(x) (x)
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
typedef pmdval_t pmd_t;
#define pmd_val(x) (x)
#define __pmd(x) (x)
#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
typedef pudval_t pud_t;
#define pud_val(x) (x)
#define __pud(x) (x)
@@ -86,9 +86,9 @@ typedef pteval_t pgprot_t;
#endif /* STRICT_MM_TYPECHECKS */
-#if CONFIG_ARM64_PGTABLE_LEVELS == 2
+#if CONFIG_PGTABLE_LEVELS == 2
#include <asm-generic/pgtable-nopmd.h>
-#elif CONFIG_ARM64_PGTABLE_LEVELS == 3
+#elif CONFIG_PGTABLE_LEVELS == 3
#include <asm-generic/pgtable-nopud.h>
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 800ec0e87ed9..56283f8a675c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -374,7 +374,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
*/
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
@@ -409,9 +409,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
-#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
@@ -445,7 +445,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
-#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h
index e6f087806aaf..b7710a59672c 100644
--- a/arch/arm64/include/asm/pmu.h
+++ b/arch/arm64/include/asm/pmu.h
@@ -44,6 +44,7 @@ struct pmu_hw_events {
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
+ int *irq_affinity;
const char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 941c375616e2..220633b791b8 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -45,15 +45,6 @@ do { \
cpu_do_switch_mm(virt_to_phys(pgd),mm); \
} while (0)
-#define cpu_get_pgd() \
-({ \
- unsigned long pg; \
- asm("mrs %0, ttbr0_el1\n" \
- : "=r" (pg)); \
- pg &= ~0xffff000000003ffful; \
- (pgd_t *)phys_to_virt(pg); \
-})
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 20e9591a60cf..d2c37a1df0eb 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -127,7 +127,11 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
-#define cpu_relax() barrier()
+static inline void cpu_relax(void)
+{
+ asm volatile("yield" ::: "memory");
+}
+
#define cpu_relax_lowlatency() cpu_relax()
/* Thread switching */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index e5312ea0ec1a..2454bc59c916 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,6 +14,7 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
-int psci_init(void);
+int psci_dt_init(void);
+int psci_acpi_init(void);
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 780f82c827b6..bf22650b1a78 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -39,9 +39,10 @@ extern void show_ipi_list(struct seq_file *p, int prec);
extern void handle_IPI(int ipinr, struct pt_regs *regs);
/*
- * Setup the set of possible CPUs (via set_cpu_possible)
+ * Discover the set of possible CPUs and determine their
+ * SMP operations.
*/
-extern void smp_init_cpus(void);
+extern void of_smp_init_cpus(void);
/*
* Provide a function to raise an IPI cross call on CPUs in callmap.
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 59e282311b58..8dcd61e32176 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void)
extern u64 __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+void __init do_post_cpus_up_work(void);
+
#endif /* __ASM_SMP_PLAT_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 702e1e6a0d80..dcd06d18a42a 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -33,7 +33,6 @@
#ifndef __ASSEMBLY__
struct task_struct;
-struct exec_domain;
#include <asm/types.h>
@@ -47,7 +46,6 @@ struct thread_info {
unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
int preempt_count; /* 0 => preemptable, <0 => bug */
int cpu; /* cpu */
};
@@ -55,7 +53,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 53d9c354219f..3a0242c7eb8d 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -53,7 +53,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
tlb_remove_entry(tlb, pte);
}
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
@@ -62,7 +62,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
}
#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 27224426e0bf..cef934a90f17 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -406,7 +406,7 @@ __SYSCALL(__NR_vfork, sys_vfork)
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */
#define __NR_mmap2 192
-__SYSCALL(__NR_mmap2, sys_mmap_pgoff)
+__SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper)
#define __NR_truncate64 193
__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper)
#define __NR_ftruncate64 194
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index c154c0b7eb60..d26832022127 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -188,8 +188,14 @@ struct kvm_arch_memory_slot {
#define KVM_ARM_IRQ_CPU_IRQ 0
#define KVM_ARM_IRQ_CPU_FIQ 1
-/* Highest supported SPI, from VGIC_NR_IRQS */
+/*
+ * This used to hold the highest supported SPI, but it is now obsolete
+ * and only here to provide source code level compatibility with older
+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
+ */
+#ifndef __KERNEL__
#define KVM_ARM_IRQ_GIC_MAX 127
+#endif
/* One single KVM irqchip, ie. the VGIC */
#define KVM_NR_IRQCHIPS 1
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5ee07eee80c2..426d0763c81b 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -12,12 +12,12 @@ CFLAGS_REMOVE_insn.o = -pg
CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
-arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
+arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \
- alternative.o cacheinfo.o
+ cpufeature.o alternative.o cacheinfo.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o entry32.o \
@@ -35,6 +35,7 @@ arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
arm64-obj-$(CONFIG_PCI) += pci.o
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
+arm64-obj-$(CONFIG_ACPI) += acpi.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
new file mode 100644
index 000000000000..8b839558838e
--- /dev/null
+++ b/arch/arm64/kernel/acpi.c
@@ -0,0 +1,345 @@
+/*
+ * ARM64 Specific Low-Level ACPI Boot Support
+ *
+ * Copyright (C) 2013-2014, Linaro Ltd.
+ * Author: Al Stone <al.stone@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
+ * Author: Naresh Bhat <naresh.bhat@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/acpi.h>
+#include <linux/bootmem.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/smp.h>
+
+#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
+
+int acpi_noirq = 1; /* skip ACPI IRQ initialization */
+int acpi_disabled = 1;
+EXPORT_SYMBOL(acpi_disabled);
+
+int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
+EXPORT_SYMBOL(acpi_pci_disabled);
+
+/* Processors with enabled flag and sane MPIDR */
+static int enabled_cpus;
+
+/* Boot CPU is valid or not in MADT */
+static bool bootcpu_valid __initdata;
+
+static bool param_acpi_off __initdata;
+static bool param_acpi_force __initdata;
+
+static int __init parse_acpi(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ /* "acpi=off" disables both ACPI table parsing and interpreter */
+ if (strcmp(arg, "off") == 0)
+ param_acpi_off = true;
+ else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
+ param_acpi_force = true;
+ else
+ return -EINVAL; /* Core will print when we return error */
+
+ return 0;
+}
+early_param("acpi", parse_acpi);
+
+static int __init dt_scan_depth1_nodes(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ /*
+ * Return 1 as soon as we encounter a node at depth 1 that is
+ * not the /chosen node.
+ */
+ if (depth == 1 && (strcmp(uname, "chosen") != 0))
+ return 1;
+ return 0;
+}
+
+/*
+ * __acpi_map_table() will be called before page_init(), so early_ioremap()
+ * or early_memremap() should be called here to for ACPI table mapping.
+ */
+char *__init __acpi_map_table(unsigned long phys, unsigned long size)
+{
+ if (!size)
+ return NULL;
+
+ return early_memremap(phys, size);
+}
+
+void __init __acpi_unmap_table(char *map, unsigned long size)
+{
+ if (!map || !size)
+ return;
+
+ early_memunmap(map, size);
+}
+
+/**
+ * acpi_map_gic_cpu_interface - generates a logical cpu number
+ * and map to MPIDR represented by GICC structure
+ */
+static void __init
+acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
+{
+ int i;
+ u64 mpidr = processor->arm_mpidr & MPIDR_HWID_BITMASK;
+ bool enabled = !!(processor->flags & ACPI_MADT_ENABLED);
+
+ if (mpidr == INVALID_HWID) {
+ pr_info("Skip MADT cpu entry with invalid MPIDR\n");
+ return;
+ }
+
+ total_cpus++;
+ if (!enabled)
+ return;
+
+ if (enabled_cpus >= NR_CPUS) {
+ pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n",
+ NR_CPUS, total_cpus, mpidr);
+ return;
+ }
+
+ /* Check if GICC structure of boot CPU is available in the MADT */
+ if (cpu_logical_map(0) == mpidr) {
+ if (bootcpu_valid) {
+ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
+ mpidr);
+ return;
+ }
+
+ bootcpu_valid = true;
+ }
+
+ /*
+ * Duplicate MPIDRs are a recipe for disaster. Scan
+ * all initialized entries and check for
+ * duplicates. If any is found just ignore the CPU.
+ */
+ for (i = 1; i < enabled_cpus; i++) {
+ if (cpu_logical_map(i) == mpidr) {
+ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
+ mpidr);
+ return;
+ }
+ }
+
+ if (!acpi_psci_present())
+ return;
+
+ cpu_ops[enabled_cpus] = cpu_get_ops("psci");
+ /* CPU 0 was already initialized */
+ if (enabled_cpus) {
+ if (!cpu_ops[enabled_cpus])
+ return;
+
+ if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus))
+ return;
+
+ /* map the logical cpu id to cpu MPIDR */
+ cpu_logical_map(enabled_cpus) = mpidr;
+ }
+
+ enabled_cpus++;
+}
+
+static int __init
+acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+ acpi_map_gic_cpu_interface(processor);
+ return 0;
+}
+
+/* Parse GIC cpu interface entries in MADT for SMP init */
+void __init acpi_init_cpus(void)
+{
+ int count, i;
+
+ /*
+ * do a partial walk of MADT to determine how many CPUs
+ * we have including disabled CPUs, and get information
+ * we need for SMP init
+ */
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_gic_cpu_interface, 0);
+
+ if (!count) {
+ pr_err("No GIC CPU interface entries present\n");
+ return;
+ } else if (count < 0) {
+ pr_err("Error parsing GIC CPU interface entry\n");
+ return;
+ }
+
+ if (!bootcpu_valid) {
+ pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n");
+ return;
+ }
+
+ for (i = 0; i < enabled_cpus; i++)
+ set_cpu_possible(i, true);
+
+ /* Make boot-up look pretty */
+ pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus);
+}
+
+/*
+ * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity
+ * checks on it
+ *
+ * Return 0 on success, <0 on failure
+ */
+static int __init acpi_fadt_sanity_check(void)
+{
+ struct acpi_table_header *table;
+ struct acpi_table_fadt *fadt;
+ acpi_status status;
+ acpi_size tbl_size;
+ int ret = 0;
+
+ /*
+ * FADT is required on arm64; retrieve it to check its presence
+ * and carry out revision and ACPI HW reduced compliancy tests
+ */
+ status = acpi_get_table_with_size(ACPI_SIG_FADT, 0, &table, &tbl_size);
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get FADT table, %s\n", msg);
+ return -ENODEV;
+ }
+
+ fadt = (struct acpi_table_fadt *)table;
+
+ /*
+ * Revision in table header is the FADT Major revision, and there
+ * is a minor revision of FADT which was introduced by ACPI 5.1,
+ * we only deal with ACPI 5.1 or newer revision to get GIC and SMP
+ * boot protocol configuration data.
+ */
+ if (table->revision < 5 ||
+ (table->revision == 5 && fadt->minor_revision < 1)) {
+ pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
+ table->revision, fadt->minor_revision);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
+ pr_err("FADT not ACPI hardware reduced compliant\n");
+ ret = -EINVAL;
+ }
+
+out:
+ /*
+ * acpi_get_table_with_size() creates FADT table mapping that
+ * should be released after parsing and before resuming boot
+ */
+ early_acpi_os_unmap_memory(table, tbl_size);
+ return ret;
+}
+
+/*
+ * acpi_boot_table_init() called from setup_arch(), always.
+ * 1. find RSDP and get its address, and then find XSDT
+ * 2. extract all tables and checksums them all
+ * 3. check ACPI FADT revision
+ * 4. check ACPI FADT HW reduced flag
+ *
+ * We can parse ACPI boot-time tables such as MADT after
+ * this function is called.
+ *
+ * On return ACPI is enabled if either:
+ *
+ * - ACPI tables are initialized and sanity checks passed
+ * - acpi=force was passed in the command line and ACPI was not disabled
+ * explicitly through acpi=off command line parameter
+ *
+ * ACPI is disabled on function return otherwise
+ */
+void __init acpi_boot_table_init(void)
+{
+ /*
+ * Enable ACPI instead of device tree unless
+ * - ACPI has been disabled explicitly (acpi=off), or
+ * - the device tree is not empty (it has more than just a /chosen node)
+ * and ACPI has not been force enabled (acpi=force)
+ */
+ if (param_acpi_off ||
+ (!param_acpi_force && of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
+ return;
+
+ /*
+ * ACPI is disabled at this point. Enable it in order to parse
+ * the ACPI tables and carry out sanity checks
+ */
+ enable_acpi();
+
+ /*
+ * If ACPI tables are initialized and FADT sanity checks passed,
+ * leave ACPI enabled and carry on booting; otherwise disable ACPI
+ * on initialization error.
+ * If acpi=force was passed on the command line it forces ACPI
+ * to be enabled even if its initialization failed.
+ */
+ if (acpi_table_init() || acpi_fadt_sanity_check()) {
+ pr_err("Failed to init ACPI tables\n");
+ if (!param_acpi_force)
+ disable_acpi();
+ }
+}
+
+void __init acpi_gic_init(void)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+ acpi_size tbl_size;
+ int err;
+
+ if (acpi_disabled)
+ return;
+
+ status = acpi_get_table_with_size(ACPI_SIG_MADT, 0, &table, &tbl_size);
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get MADT table, %s\n", msg);
+ return;
+ }
+
+ err = gic_v2_acpi_init(table);
+ if (err)
+ pr_err("Failed to initialize GIC IRQ controller");
+
+ early_acpi_os_unmap_memory((char *)table, tbl_size);
+}
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index ad7821d64a1d..28f8365edc4c 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -43,7 +43,7 @@ static int __apply_alternatives(void *alt_region)
if (!cpus_have_cap(alt->cpufeature))
continue;
- BUG_ON(alt->alt_len > alt->orig_len);
+ BUG_ON(alt->alt_len != alt->orig_len);
pr_info_once("patching kernel code\n");
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index f7fa65d4c352..da675cc5dfae 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -24,7 +24,6 @@
#include <linux/kvm_host.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
-#include <asm/cputable.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/vdso_datapage.h>
@@ -38,7 +37,6 @@ int main(void)
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
@@ -71,9 +69,6 @@ int main(void)
BLANK();
DEFINE(PAGE_SZ, PAGE_SIZE);
BLANK();
- DEFINE(CPU_INFO_SZ, sizeof(struct cpu_info));
- DEFINE(CPU_INFO_SETUP, offsetof(struct cpu_info, cpu_setup));
- BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index fa62637e63a8..6ffd91438560 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -16,8 +16,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#define pr_fmt(fmt) "alternatives: " fmt
-
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
@@ -26,27 +24,11 @@
#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-/*
- * Add a struct or another datatype to the union below if you need
- * different means to detect an affected CPU.
- */
-struct arm64_cpu_capabilities {
- const char *desc;
- u16 capability;
- bool (*is_affected)(struct arm64_cpu_capabilities *);
- union {
- struct {
- u32 midr_model;
- u32 midr_range_min, midr_range_max;
- };
- };
-};
-
#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
MIDR_ARCHITECTURE_MASK)
static bool __maybe_unused
-is_affected_midr_range(struct arm64_cpu_capabilities *entry)
+is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
{
u32 midr = read_cpuid_id();
@@ -59,12 +41,12 @@ is_affected_midr_range(struct arm64_cpu_capabilities *entry)
}
#define MIDR_RANGE(model, min, max) \
- .is_affected = is_affected_midr_range, \
+ .matches = is_affected_midr_range, \
.midr_model = model, \
.midr_range_min = min, \
.midr_range_max = max
-struct arm64_cpu_capabilities arm64_errata[] = {
+const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
defined(CONFIG_ARM64_ERRATUM_824069)
@@ -88,7 +70,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A57 r0p0 - r1p2 */
.desc = "ARM erratum 832075",
.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
- MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
+ (1 << MIDR_VARIANT_SHIFT) | 2),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_845719
+ {
+ /* Cortex-A53 r0p[01234] */
+ .desc = "ARM erratum 845719",
+ .capability = ARM64_WORKAROUND_845719,
+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
},
#endif
{
@@ -97,15 +88,5 @@ struct arm64_cpu_capabilities arm64_errata[] = {
void check_local_cpu_errata(void)
{
- struct arm64_cpu_capabilities *cpus = arm64_errata;
- int i;
-
- for (i = 0; cpus[i].desc; i++) {
- if (!cpus[i].is_affected(&cpus[i]))
- continue;
-
- if (!cpus_have_cap(cpus[i].capability))
- pr_info("enabling workaround for %s\n", cpus[i].desc);
- cpus_set_cap(cpus[i].capability);
- }
+ check_cpu_capabilities(arm64_errata, "enabling workaround for");
}
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index cce952440c64..fb8ff9ba467a 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -35,7 +35,7 @@ static const struct cpu_operations *supported_cpu_ops[] __initconst = {
NULL,
};
-static const struct cpu_operations * __init cpu_get_ops(const char *name)
+const struct cpu_operations * __init cpu_get_ops(const char *name)
{
const struct cpu_operations **ops = supported_cpu_ops;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
new file mode 100644
index 000000000000..3d9967e43d89
--- /dev/null
+++ b/arch/arm64/kernel/cpufeature.c
@@ -0,0 +1,47 @@
+/*
+ * Contains CPU feature definitions
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "alternatives: " fmt
+
+#include <linux/types.h>
+#include <asm/cpu.h>
+#include <asm/cpufeature.h>
+
+static const struct arm64_cpu_capabilities arm64_features[] = {
+ {},
+};
+
+void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+ const char *info)
+{
+ int i;
+
+ for (i = 0; caps[i].desc; i++) {
+ if (!caps[i].matches(&caps[i]))
+ continue;
+
+ if (!cpus_have_cap(caps[i].capability))
+ pr_info("%s %s\n", info, caps[i].desc);
+ cpus_set_cap(caps[i].capability);
+ }
+}
+
+void check_local_cpu_features(void)
+{
+ check_cpu_capabilities(arm64_features, "detected feature");
+}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 5c0896647fd1..a78143a5c99f 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -15,7 +15,7 @@
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
-int cpu_init_idle(unsigned int cpu)
+int arm_cpuidle_init(unsigned int cpu)
{
int ret = -EOPNOTSUPP;
struct device_node *cpu_node = of_cpu_device_node_get(cpu);
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 929855691dae..75d5a867e7fb 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -236,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
cpuinfo_detect_icache_policy(info);
check_local_cpu_errata();
+ check_local_cpu_features();
update_cpu_features(info);
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index cf21bb3bf752..959fe8733560 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -21,8 +21,10 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/thread_info.h>
@@ -120,6 +122,24 @@
ct_user_enter
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
+
+#ifdef CONFIG_ARM64_ERRATUM_845719
+ alternative_insn \
+ "nop", \
+ "tbz x22, #4, 1f", \
+ ARM64_WORKAROUND_845719
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+ alternative_insn \
+ "nop; nop", \
+ "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
+ ARM64_WORKAROUND_845719
+#else
+ alternative_insn \
+ "nop", \
+ "msr contextidr_el1, xzr; 1:", \
+ ARM64_WORKAROUND_845719
+#endif
+#endif
.endif
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S
index 9a8f6ae2530e..bd9bfaa9269b 100644
--- a/arch/arm64/kernel/entry32.S
+++ b/arch/arm64/kernel/entry32.S
@@ -19,9 +19,12 @@
*/
#include <linux/linkage.h>
+#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/page.h>
/*
* System call wrappers for the AArch32 compatibility layer.
@@ -54,6 +57,21 @@ ENTRY(compat_sys_fstatfs64_wrapper)
ENDPROC(compat_sys_fstatfs64_wrapper)
/*
+ * Note: off_4k (w5) is always in units of 4K. If we can't do the
+ * requested offset because it is not page-aligned, we return -EINVAL.
+ */
+ENTRY(compat_sys_mmap2_wrapper)
+#if PAGE_SHIFT > 12
+ tst w5, #~PAGE_MASK >> 12
+ b.ne 1f
+ lsr w5, w5, #PAGE_SHIFT - 12
+#endif
+ b sys_mmap_pgoff
+1: mov x0, #-EINVAL
+ ret
+ENDPROC(compat_sys_mmap2_wrapper)
+
+/*
* Wrappers for AArch32 syscalls that either take 64-bit parameters
* in registers or that take 32-bit parameters which require sign
* extension.
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 07f930540f4a..19f915e8f6e0 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -36,7 +36,7 @@
#include <asm/page.h>
#include <asm/virt.h>
-#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
+#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
#if (TEXT_OFFSET & 0xfff) != 0
#error TEXT_OFFSET must be at least 4KB aligned
@@ -46,13 +46,6 @@
#error TEXT_OFFSET must be less than 2MB
#endif
- .macro pgtbl, ttb0, ttb1, virt_to_phys
- ldr \ttb1, =swapper_pg_dir
- ldr \ttb0, =idmap_pg_dir
- add \ttb1, \ttb1, \virt_to_phys
- add \ttb0, \ttb0, \virt_to_phys
- .endm
-
#ifdef CONFIG_ARM64_64K_PAGES
#define BLOCK_SHIFT PAGE_SHIFT
#define BLOCK_SIZE PAGE_SIZE
@@ -63,7 +56,7 @@
#define TABLE_SHIFT PUD_SHIFT
#endif
-#define KERNEL_START KERNEL_RAM_VADDR
+#define KERNEL_START _text
#define KERNEL_END _end
/*
@@ -240,40 +233,43 @@ section_table:
#endif
ENTRY(stext)
- mov x21, x0 // x21=FDT
+ bl preserve_boot_args
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
- bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
+ adrp x24, __PHYS_OFFSET
bl set_cpu_boot_mode_flag
- mrs x22, midr_el1 // x22=cpuid
- mov x0, x22
- bl lookup_processor_type
- mov x23, x0 // x23=current cpu_table
- /*
- * __error_p may end up out of range for cbz if text areas are
- * aligned up to section sizes.
- */
- cbnz x23, 1f // invalid processor (x23=0)?
- b __error_p
-1:
+
bl __vet_fdt
bl __create_page_tables // x25=TTBR0, x26=TTBR1
/*
- * The following calls CPU specific code in a position independent
- * manner. See arch/arm64/mm/proc.S for details. x23 = base of
- * cpu_info structure selected by lookup_processor_type above.
+ * The following calls CPU setup code, see arch/arm64/mm/proc.S for
+ * details.
* On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set.
*/
- ldr x27, __switch_data // address to jump to after
+ ldr x27, =__mmap_switched // address to jump to after
// MMU has been enabled
- adrp lr, __enable_mmu // return (PIC) address
- add lr, lr, #:lo12:__enable_mmu
- ldr x12, [x23, #CPU_INFO_SETUP]
- add x12, x12, x28 // __virt_to_phys
- br x12 // initialise processor
+ adr_l lr, __enable_mmu // return (PIC) address
+ b __cpu_setup // initialise processor
ENDPROC(stext)
/*
+ * Preserve the arguments passed by the bootloader in x0 .. x3
+ */
+preserve_boot_args:
+ mov x21, x0 // x21=FDT
+
+ adr_l x0, boot_args // record the contents of
+ stp x21, x1, [x0] // x0 .. x3 at kernel entry
+ stp x2, x3, [x0, #16]
+
+ dmb sy // needed before dc ivac with
+ // MMU off
+
+ add x1, x0, #0x20 // 4 x 8 bytes
+ b __inval_cache_range // tail call
+ENDPROC(preserve_boot_args)
+
+/*
* Determine validity of the x21 FDT pointer.
* The dtb must be 8-byte aligned and live in the first 512M of memory.
*/
@@ -356,7 +352,8 @@ ENDPROC(__vet_fdt)
* - pgd entry for fixed mappings (TTBR1)
*/
__create_page_tables:
- pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
+ adrp x25, idmap_pg_dir
+ adrp x26, swapper_pg_dir
mov x27, lr
/*
@@ -385,12 +382,50 @@ __create_page_tables:
* Create the identity mapping.
*/
mov x0, x25 // idmap_pg_dir
- ldr x3, =KERNEL_START
- add x3, x3, x28 // __pa(KERNEL_START)
+ adrp x3, KERNEL_START // __pa(KERNEL_START)
+
+#ifndef CONFIG_ARM64_VA_BITS_48
+#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT))
+
+ /*
+ * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
+ * created that covers system RAM if that is located sufficiently high
+ * in the physical address space. So for the ID map, use an extended
+ * virtual range in that case, by configuring an additional translation
+ * level.
+ * First, we have to verify our assumption that the current value of
+ * VA_BITS was chosen such that all translation levels are fully
+ * utilised, and that lowering T0SZ will always result in an additional
+ * translation level to be configured.
+ */
+#if VA_BITS != EXTRA_SHIFT
+#error "Mismatch between VA_BITS and page size/number of translation levels"
+#endif
+
+ /*
+ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
+ * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used),
+ * this number conveniently equals the number of leading zeroes in
+ * the physical address of KERNEL_END.
+ */
+ adrp x5, KERNEL_END
+ clz x5, x5
+ cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
+ b.ge 1f // .. then skip additional level
+
+ adr_l x6, idmap_t0sz
+ str x5, [x6]
+ dmb sy
+ dc ivac, x6 // Invalidate potentially stale cache line
+
+ create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
+1:
+#endif
+
create_pgd_entry x0, x3, x5, x6
- ldr x6, =KERNEL_END
mov x5, x3 // __pa(KERNEL_START)
- add x6, x6, x28 // __pa(KERNEL_END)
+ adr_l x6, KERNEL_END // __pa(KERNEL_END)
create_block_map x0, x7, x3, x5, x6
/*
@@ -399,7 +434,7 @@ __create_page_tables:
mov x0, x26 // swapper_pg_dir
mov x5, #PAGE_OFFSET
create_pgd_entry x0, x5, x3, x6
- ldr x6, =KERNEL_END
+ ldr x6, =KERNEL_END // __va(KERNEL_END)
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -426,6 +461,7 @@ __create_page_tables:
*/
mov x0, x25
add x1, x26, #SWAPPER_DIR_SIZE
+ dmb sy
bl __inval_cache_range
mov lr, x27
@@ -433,37 +469,22 @@ __create_page_tables:
ENDPROC(__create_page_tables)
.ltorg
- .align 3
- .type __switch_data, %object
-__switch_data:
- .quad __mmap_switched
- .quad __bss_start // x6
- .quad __bss_stop // x7
- .quad processor_id // x4
- .quad __fdt_pointer // x5
- .quad memstart_addr // x6
- .quad init_thread_union + THREAD_START_SP // sp
-
/*
- * The following fragment of code is executed with the MMU on in MMU mode, and
- * uses absolute addresses; this is not position independent.
+ * The following fragment of code is executed with the MMU enabled.
*/
+ .set initial_sp, init_thread_union + THREAD_START_SP
__mmap_switched:
- adr x3, __switch_data + 8
+ adr_l x6, __bss_start
+ adr_l x7, __bss_stop
- ldp x6, x7, [x3], #16
1: cmp x6, x7
b.hs 2f
str xzr, [x6], #8 // Clear BSS
b 1b
2:
- ldp x4, x5, [x3], #16
- ldr x6, [x3], #8
- ldr x16, [x3]
- mov sp, x16
- str x22, [x4] // Save processor ID
- str x21, [x5] // Save FDT pointer
- str x24, [x6] // Save PHYS_OFFSET
+ adr_l sp, initial_sp, x4
+ str_l x21, __fdt_pointer, x5 // Save FDT pointer
+ str_l x24, memstart_addr, x6 // Save PHYS_OFFSET
mov x29, #0
b start_kernel
ENDPROC(__mmap_switched)
@@ -566,8 +587,7 @@ ENDPROC(el2_setup)
* in x20. See arch/arm64/include/asm/virt.h for more info.
*/
ENTRY(set_cpu_boot_mode_flag)
- ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
- add x1, x1, x28
+ adr_l x1, __boot_cpu_mode
cmp w20, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4
@@ -588,29 +608,21 @@ ENDPROC(set_cpu_boot_mode_flag)
.align L1_CACHE_SHIFT
ENTRY(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL2
- .long 0
+ .long BOOT_CPU_MODE_EL1
.popsection
#ifdef CONFIG_SMP
- .align 3
-1: .quad .
- .quad secondary_holding_pen_release
-
/*
* This provides a "holding pen" for platforms to hold all secondary
* cores are held until we're ready for them to initialise.
*/
ENTRY(secondary_holding_pen)
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
- bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
ldr x1, =MPIDR_HWID_BITMASK
and x0, x0, x1
- adr x1, 1b
- ldp x2, x3, [x1]
- sub x1, x1, x2
- add x3, x3, x1
+ adr_l x3, secondary_holding_pen_release
pen: ldr x4, [x3]
cmp x4, x0
b.eq secondary_startup
@@ -624,7 +636,6 @@ ENDPROC(secondary_holding_pen)
*/
ENTRY(secondary_entry)
bl el2_setup // Drop to EL1
- bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
bl set_cpu_boot_mode_flag
b secondary_startup
ENDPROC(secondary_entry)
@@ -633,16 +644,9 @@ ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
*/
- mrs x22, midr_el1 // x22=cpuid
- mov x0, x22
- bl lookup_processor_type
- mov x23, x0 // x23=current cpu_table
- cbz x23, __error_p // invalid processor (x23=0)?
-
- pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
- ldr x12, [x23, #CPU_INFO_SETUP]
- add x12, x12, x28 // __virt_to_phys
- blr x12 // initialise processor
+ adrp x25, idmap_pg_dir
+ adrp x26, swapper_pg_dir
+ bl __cpu_setup // initialise processor
ldr x21, =secondary_data
ldr x27, =__secondary_switched // address to jump to after enabling the MMU
@@ -658,11 +662,12 @@ ENDPROC(__secondary_switched)
#endif /* CONFIG_SMP */
/*
- * Setup common bits before finally enabling the MMU. Essentially this is just
- * loading the page table pointer and vector base registers.
+ * Enable the MMU.
*
- * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on
- * the MMU.
+ * x0 = SCTLR_EL1 value for turning on the MMU.
+ * x27 = *virtual* address to jump to upon completion
+ *
+ * other registers depend on the function called upon completion
*/
__enable_mmu:
ldr x5, =vectors
@@ -670,89 +675,7 @@ __enable_mmu:
msr ttbr0_el1, x25 // load TTBR0
msr ttbr1_el1, x26 // load TTBR1
isb
- b __turn_mmu_on
-ENDPROC(__enable_mmu)
-
-/*
- * Enable the MMU. This completely changes the structure of the visible memory
- * space. You will not be able to trace execution through this.
- *
- * x0 = system control register
- * x27 = *virtual* address to jump to upon completion
- *
- * other registers depend on the function called upon completion
- *
- * We align the entire function to the smallest power of two larger than it to
- * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
- * close to the end of a 512MB or 1GB block we might require an additional
- * table to map the entire function.
- */
- .align 4
-__turn_mmu_on:
msr sctlr_el1, x0
isb
br x27
-ENDPROC(__turn_mmu_on)
-
-/*
- * Calculate the start of physical memory.
- */
-__calc_phys_offset:
- adr x0, 1f
- ldp x1, x2, [x0]
- sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET
- add x24, x2, x28 // x24 = PHYS_OFFSET
- ret
-ENDPROC(__calc_phys_offset)
-
- .align 3
-1: .quad .
- .quad PAGE_OFFSET
-
-/*
- * Exception handling. Something went wrong and we can't proceed. We ought to
- * tell the user, but since we don't have any guarantee that we're even
- * running on the right architecture, we do virtually nothing.
- */
-__error_p:
-ENDPROC(__error_p)
-
-__error:
-1: nop
- b 1b
-ENDPROC(__error)
-
-/*
- * This function gets the processor ID in w0 and searches the cpu_table[] for
- * a match. It returns a pointer to the struct cpu_info it found. The
- * cpu_table[] must end with an empty (all zeros) structure.
- *
- * This routine can be called via C code and it needs to work with the MMU
- * both disabled and enabled (the offset is calculated automatically).
- */
-ENTRY(lookup_processor_type)
- adr x1, __lookup_processor_type_data
- ldp x2, x3, [x1]
- sub x1, x1, x2 // get offset between VA and PA
- add x3, x3, x1 // convert VA to PA
-1:
- ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask
- cbz w5, 2f // end of list?
- and w6, w6, w0
- cmp w5, w6
- b.eq 3f
- add x3, x3, #CPU_INFO_SZ
- b 1b
-2:
- mov x3, #0 // unknown processor
-3:
- mov x0, x3
- ret
-ENDPROC(lookup_processor_type)
-
- .align 3
- .type __lookup_processor_type_data, %object
-__lookup_processor_type_data:
- .quad .
- .quad cpu_table
- .size __lookup_processor_type_data, . - __lookup_processor_type_data
+ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 98bbe06e469c..e7d934d3afe0 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
*/
- if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target)
+ if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
return -EINVAL;
return 0;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index c8eca88f12e6..924902083e47 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -265,23 +265,13 @@ int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
return aarch64_insn_patch_text_sync(addrs, insns, cnt);
}
-u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
- u32 insn, u64 imm)
+static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
+ u32 *maskp, int *shiftp)
{
- u32 immlo, immhi, lomask, himask, mask;
+ u32 mask;
int shift;
switch (type) {
- case AARCH64_INSN_IMM_ADR:
- lomask = 0x3;
- himask = 0x7ffff;
- immlo = imm & lomask;
- imm >>= 2;
- immhi = imm & himask;
- imm = (immlo << 24) | (immhi);
- mask = (lomask << 24) | (himask);
- shift = 5;
- break;
case AARCH64_INSN_IMM_26:
mask = BIT(26) - 1;
shift = 0;
@@ -320,9 +310,68 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
shift = 16;
break;
default:
- pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
- type);
- return 0;
+ return -EINVAL;
+ }
+
+ *maskp = mask;
+ *shiftp = shift;
+
+ return 0;
+}
+
+#define ADR_IMM_HILOSPLIT 2
+#define ADR_IMM_SIZE SZ_2M
+#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
+#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
+#define ADR_IMM_LOSHIFT 29
+#define ADR_IMM_HISHIFT 5
+
+u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
+{
+ u32 immlo, immhi, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ shift = 0;
+ immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
+ immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
+ insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
+ mask = ADR_IMM_SIZE - 1;
+ break;
+ default:
+ if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
+ pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
+ }
+
+ return (insn >> shift) & mask;
+}
+
+u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm)
+{
+ u32 immlo, immhi, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ shift = 0;
+ immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
+ imm >>= ADR_IMM_HILOSPLIT;
+ immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
+ imm = immlo | immhi;
+ mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
+ (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
+ break;
+ default:
+ if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
+ pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
}
/* Update the immediate field. */
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 6f93c24ca801..4095379dc069 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -10,6 +10,7 @@
*
*/
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -46,3 +47,27 @@ int pcibios_add_device(struct pci_dev *dev)
return 0;
}
+
+/*
+ * raw_pci_read/write - Platform-specific PCI config space access.
+ */
+int raw_pci_read(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 *val)
+{
+ return -ENXIO;
+}
+
+int raw_pci_write(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 val)
+{
+ return -ENXIO;
+}
+
+#ifdef CONFIG_ACPI
+/* Root bridge scanning */
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+{
+ /* TODO: Should be revisited when implementing PCI on ACPI */
+ return NULL;
+}
+#endif
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 25a5308744b1..cce18c85d2e8 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,8 +25,10 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
@@ -322,22 +324,31 @@ out:
}
static int
-validate_event(struct pmu_hw_events *hw_events,
- struct perf_event *event)
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
+ struct perf_event *event)
{
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct arm_pmu *armpmu;
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
+ /*
+ * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+ * core perf code won't check that the pmu->ctx == leader->ctx
+ * until after pmu->event_init(event).
+ */
+ if (event->pmu != pmu)
+ return 0;
+
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
+ armpmu = to_arm_pmu(event->pmu);
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
}
@@ -355,15 +366,15 @@ validate_group(struct perf_event *event)
memset(fake_used_mask, 0, sizeof(fake_used_mask));
fake_pmu.used_mask = fake_used_mask;
- if (!validate_event(&fake_pmu, leader))
+ if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
- if (!validate_event(&fake_pmu, sibling))
+ if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
- if (!validate_event(&fake_pmu, event))
+ if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;
@@ -396,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
free_percpu_irq(irq, &cpu_hw_events);
} else {
for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ int cpu = i;
+
+ if (armpmu->irq_affinity)
+ cpu = armpmu->irq_affinity[i];
+
+ if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq > 0)
@@ -450,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
} else {
for (i = 0; i < irqs; ++i) {
+ int cpu = i;
+
err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq <= 0)
continue;
+ if (armpmu->irq_affinity)
+ cpu = armpmu->irq_affinity[i];
+
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
+ irq, cpu);
continue;
}
@@ -476,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return err;
}
- cpumask_set_cpu(i, &armpmu->active_irqs);
+ cpumask_set_cpu(cpu, &armpmu->active_irqs);
}
}
@@ -1289,9 +1310,51 @@ static const struct of_device_id armpmu_of_device_ids[] = {
static int armpmu_device_probe(struct platform_device *pdev)
{
+ int i, irq, *irqs;
+
if (!cpu_pmu)
return -ENODEV;
+ /* Don't bother with PPIs; they're already affine */
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0 && irq_is_percpu(irq))
+ return 0;
+
+ irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < pdev->num_resources; ++i) {
+ struct device_node *dn;
+ int cpu;
+
+ dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
+ i);
+ if (!dn) {
+ pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
+ of_node_full_name(pdev->dev.of_node), i);
+ break;
+ }
+
+ for_each_possible_cpu(cpu)
+ if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
+ break;
+
+ of_node_put(dn);
+ if (cpu >= nr_cpu_ids) {
+ pr_warn("Failed to find logical CPU for %s\n",
+ dn->name);
+ break;
+ }
+
+ irqs[i] = cpu;
+ }
+
+ if (i == pdev->num_resources)
+ cpu_pmu->irq_affinity = irqs;
+ else
+ kfree(irqs);
+
cpu_pmu->plat_device = pdev;
return 0;
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 9b8a70ae64a1..ea18cb53921e 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "psci: " fmt
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
@@ -24,6 +25,7 @@
#include <linux/slab.h>
#include <uapi/linux/psci.h>
+#include <asm/acpi.h>
#include <asm/compiler.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
@@ -273,39 +275,8 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
-/*
- * PSCI Function IDs for v0.2+ are well defined so use
- * standard values.
- */
-static int __init psci_0_2_init(struct device_node *np)
+static void __init psci_0_2_set_functions(void)
{
- int err, ver;
-
- err = get_set_conduit_method(np);
-
- if (err)
- goto out_put_node;
-
- ver = psci_get_version();
-
- if (ver == PSCI_RET_NOT_SUPPORTED) {
- /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
- pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
- err = -EOPNOTSUPP;
- goto out_put_node;
- } else {
- pr_info("PSCIv%d.%d detected in firmware.\n",
- PSCI_VERSION_MAJOR(ver),
- PSCI_VERSION_MINOR(ver));
-
- if (PSCI_VERSION_MAJOR(ver) == 0 &&
- PSCI_VERSION_MINOR(ver) < 2) {
- err = -EINVAL;
- pr_err("Conflicting PSCI version detected.\n");
- goto out_put_node;
- }
- }
-
pr_info("Using standard PSCI v0.2 function IDs\n");
psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
psci_ops.cpu_suspend = psci_cpu_suspend;
@@ -329,6 +300,60 @@ static int __init psci_0_2_init(struct device_node *np)
arm_pm_restart = psci_sys_reset;
pm_power_off = psci_sys_poweroff;
+}
+
+/*
+ * Probe function for PSCI firmware versions >= 0.2
+ */
+static int __init psci_probe(void)
+{
+ int ver = psci_get_version();
+
+ if (ver == PSCI_RET_NOT_SUPPORTED) {
+ /*
+ * PSCI versions >=0.2 mandates implementation of
+ * PSCI_VERSION.
+ */
+ pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
+ return -EOPNOTSUPP;
+ } else {
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 &&
+ PSCI_VERSION_MINOR(ver) < 2) {
+ pr_err("Conflicting PSCI version detected.\n");
+ return -EINVAL;
+ }
+ }
+
+ psci_0_2_set_functions();
+
+ return 0;
+}
+
+/*
+ * PSCI init function for PSCI versions >=0.2
+ *
+ * Probe based on PSCI PSCI_VERSION function
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+ int err;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+ /*
+ * Starting with v0.2, the PSCI specification introduced a call
+ * (PSCI_VERSION) that allows probing the firmware version, so
+ * that PSCI function IDs and version specific initialization
+ * can be carried out according to the specific version reported
+ * by firmware
+ */
+ err = psci_probe();
out_put_node:
of_node_put(np);
@@ -381,7 +406,7 @@ static const struct of_device_id psci_of_match[] __initconst = {
{},
};
-int __init psci_init(void)
+int __init psci_dt_init(void)
{
struct device_node *np;
const struct of_device_id *matched_np;
@@ -396,6 +421,27 @@ int __init psci_init(void)
return init_fn(np);
}
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+ if (!acpi_psci_present()) {
+ pr_info("is not implemented in ACPI.\n");
+ return -EOPNOTSUPP;
+ }
+
+ pr_info("probing for conduit method from ACPI.\n");
+
+ if (acpi_psci_use_hvc())
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ else
+ invoke_psci_fn = __invoke_psci_fn_smc;
+
+ return psci_probe();
+}
+
#ifdef CONFIG_SMP
static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index e8420f635bd4..74753132c3ac 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
@@ -46,11 +47,11 @@
#include <linux/efi.h>
#include <linux/personality.h>
+#include <asm/acpi.h>
#include <asm/fixmap.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
-#include <asm/cputable.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/sections.h>
@@ -62,9 +63,7 @@
#include <asm/memblock.h>
#include <asm/psci.h>
#include <asm/efi.h>
-
-unsigned int processor_id;
-EXPORT_SYMBOL(processor_id);
+#include <asm/virt.h>
unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
@@ -83,7 +82,6 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
-static const char *cpu_name;
phys_addr_t __fdt_pointer __initdata;
/*
@@ -119,6 +117,11 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
+/*
+ * The recorded values of x0 .. x3 upon kernel entry.
+ */
+u64 __cacheline_aligned boot_args[4];
+
void __init smp_setup_processor_id(void)
{
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
@@ -207,24 +210,38 @@ static void __init smp_build_mpidr_hash(void)
}
#endif
+static void __init hyp_mode_check(void)
+{
+ if (is_hyp_mode_available())
+ pr_info("CPU: All CPU(s) started at EL2\n");
+ else if (is_hyp_mode_mismatched())
+ WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
+ "CPU: CPUs started in inconsistent modes");
+ else
+ pr_info("CPU: All CPU(s) started at EL1\n");
+}
+
+void __init do_post_cpus_up_work(void)
+{
+ hyp_mode_check();
+ apply_alternatives_all();
+}
+
+#ifdef CONFIG_UP_LATE_INIT
+void __init up_late_init(void)
+{
+ do_post_cpus_up_work();
+}
+#endif /* CONFIG_UP_LATE_INIT */
+
static void __init setup_processor(void)
{
- struct cpu_info *cpu_info;
u64 features, block;
u32 cwg;
int cls;
- cpu_info = lookup_processor_type(read_cpuid_id());
- if (!cpu_info) {
- printk("CPU configuration botched (ID %08x), unable to continue.\n",
- read_cpuid_id());
- while (1);
- }
-
- cpu_name = cpu_info->cpu_name;
-
- printk("CPU: %s [%08x] revision %d\n",
- cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
+ printk("CPU: AArch64 Processor [%08x] revision %d\n",
+ read_cpuid_id(), read_cpuid_id() & 15);
sprintf(init_utsname()->machine, ELF_PLATFORM);
elf_hwcap = 0;
@@ -380,18 +397,27 @@ void __init setup_arch(char **cmdline_p)
efi_init();
arm64_memblock_init();
+ /* Parse the ACPI tables for possible boot-time configuration */
+ acpi_boot_table_init();
+
paging_init();
request_standard_resources();
early_ioremap_reset();
- unflatten_device_tree();
-
- psci_init();
+ if (acpi_disabled) {
+ unflatten_device_tree();
+ psci_dt_init();
+ cpu_read_bootcpu_ops();
+#ifdef CONFIG_SMP
+ of_smp_init_cpus();
+#endif
+ } else {
+ psci_acpi_init();
+ acpi_init_cpus();
+ }
- cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
- smp_init_cpus();
smp_build_mpidr_hash();
#endif
@@ -402,6 +428,12 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con;
#endif
#endif
+ if (boot_args[1] || boot_args[2] || boot_args[3]) {
+ pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
+ "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
+ "This indicates a broken bootloader or old kernel\n",
+ boot_args[1], boot_args[2], boot_args[3]);
+ }
}
static int __init arm64_device_init(void)
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 660ccf9f7524..e18c48cb6db1 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -287,19 +287,12 @@ static void setup_restart_syscall(struct pt_regs *regs)
*/
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
- struct thread_info *thread = current_thread_info();
struct task_struct *tsk = current;
sigset_t *oldset = sigmask_to_save();
int usig = ksig->sig;
int ret;
/*
- * translate the signal
- */
- if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
- usig = thread->exec_domain->signal_invmap[usig];
-
- /*
* Set up the stack frame
*/
if (is_compat_task()) {
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 328b8ce4b007..2cb008177252 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void)
*/
cpu_set_reserved_ttbr0();
flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
preempt_disable();
trace_hardirqs_off();
@@ -309,7 +310,7 @@ void cpu_die(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
- apply_alternatives_all();
+ do_post_cpus_up_work();
}
void __init smp_prepare_boot_cpu(void)
@@ -322,7 +323,7 @@ void __init smp_prepare_boot_cpu(void)
* cpu logical map array containing MPIDR values related to logical
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
-void __init smp_init_cpus(void)
+void __init of_smp_init_cpus(void)
{
struct device_node *dn = NULL;
unsigned int i, cpu = 1;
@@ -635,7 +636,7 @@ void smp_send_stop(void)
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
- cpu_clear(smp_processor_id(), mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP);
}
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index 2d5ab3c90b82..a40b1343b819 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -37,6 +37,7 @@ asmlinkage long compat_sys_readahead_wrapper(void);
asmlinkage long compat_sys_fadvise64_64_wrapper(void);
asmlinkage long compat_sys_sync_file_range2_wrapper(void);
asmlinkage long compat_sys_fallocate_wrapper(void);
+asmlinkage long compat_sys_mmap2_wrapper(void);
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = sym,
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 1a7125c3099b..42f9195cf2f8 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/clk-provider.h>
+#include <linux/acpi.h>
#include <clocksource/arm_arch_timer.h>
@@ -72,6 +73,12 @@ void __init time_init(void)
tick_setup_hrtimer_broadcast();
+ /*
+ * Since ACPI or FDT will only one be available in the system,
+ * we can use acpi_generic_timer_init() here safely
+ */
+ acpi_generic_timer_init();
+
arch_timer_rate = arch_timer_get_rate();
if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n");
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 5d9d2dca530d..a2c29865c3fe 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -23,10 +23,14 @@ jiffies = jiffies_64;
#define HYPERVISOR_TEXT \
/* \
- * Force the alignment to be compatible with \
- * the vectors requirements \
+ * Align to 4 KB so that \
+ * a) the HYP vector table is at its minimum \
+ * alignment of 2048 bytes \
+ * b) the HYP init code will not cross a page \
+ * boundary if its size does not exceed \
+ * 4 KB (see related ASSERT() below) \
*/ \
- . = ALIGN(2048); \
+ . = ALIGN(SZ_4K); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
@@ -163,10 +167,11 @@ SECTIONS
}
/*
- * The HYP init code can't be more than a page long.
+ * The HYP init code can't be more than a page long,
+ * and should not cross a page boundary.
*/
-ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
- "HYP init code too big")
+ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
+ "HYP init code too big or misaligned")
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index c3191168a994..178ba2248a98 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -20,6 +20,7 @@
#include <asm/assembler.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
+#include <asm/pgtable-hwdef.h>
.text
.pushsection .hyp.idmap.text, "ax"
@@ -65,6 +66,25 @@ __do_hyp_init:
and x4, x4, x5
ldr x5, =TCR_EL2_FLAGS
orr x4, x4, x5
+
+#ifndef CONFIG_ARM64_VA_BITS_48
+ /*
+ * If we are running with VA_BITS < 48, we may be running with an extra
+ * level of translation in the ID map. This is only the case if system
+ * RAM is out of range for the currently configured page size and number
+ * of translation levels, in which case we will also need the extra
+ * level for the HYP ID map, or we won't be able to enable the EL2 MMU.
+ *
+ * However, at EL2, there is only one TTBR register, and we can't switch
+ * between translation tables *and* update TCR_EL2.T0SZ at the same
+ * time. Bottom line: we need the extra level in *both* our translation
+ * tables.
+ *
+ * So use the same T0SZ value we use for the ID map.
+ */
+ ldr_l x5, idmap_t0sz
+ bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+#endif
msr tcr_el2, x4
ldr x4, =VTCR_EL2_FLAGS
@@ -91,6 +111,10 @@ __do_hyp_init:
msr sctlr_el2, x4
isb
+ /* Skip the trampoline dance if we merged the boot and runtime PGDs */
+ cmp x0, x1
+ b.eq merged
+
/* MMU is now enabled. Get ready for the trampoline dance */
ldr x4, =TRAMPOLINE_VA
adr x5, target
@@ -105,6 +129,7 @@ target: /* We're now in the trampoline code, switch page tables */
tlbi alle2
dsb sy
+merged:
/* Set the stack and new vectors */
kern_hyp_va x2
mov sp, x2
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index ef7d112f5ce0..b0bd4e5fd5cf 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
*ret_page = phys_to_page(phys);
ptr = (void *)val;
- if (flags & __GFP_ZERO)
- memset(ptr, 0, size);
+ memset(ptr, 0, size);
}
return ptr;
@@ -105,7 +104,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
struct page *page;
void *addr;
- size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size));
if (!page)
@@ -113,8 +111,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
*dma_handle = phys_to_dma(dev, page_to_phys(page));
addr = page_address(page);
- if (flags & __GFP_ZERO)
- memset(addr, 0, size);
+ memset(addr, 0, size);
return addr;
} else {
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
@@ -195,6 +192,8 @@ static void __dma_free(struct device *dev, size_t size,
{
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+ size = PAGE_ALIGN(size);
+
if (!is_device_dma_coherent(dev)) {
if (__free_from_pool(vaddr, size))
return;
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 74c256744b25..f3d6221cd5bd 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -328,10 +328,12 @@ static int ptdump_init(void)
for (j = 0; j < pg_level[i].num; j++)
pg_level[i].mask |= pg_level[i].bits[j].mask;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
address_markers[VMEMMAP_START_NR].start_address =
(unsigned long)virt_to_page(PAGE_OFFSET);
address_markers[VMEMMAP_END_NR].start_address =
(unsigned long)virt_to_page(high_memory);
+#endif
pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
&ptdump_fops);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ae85da6307bb..597831bdddf3 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -190,6 +190,8 @@ void __init bootmem_init(void)
min = PFN_UP(memblock_start_of_DRAM());
max = PFN_DOWN(memblock_end_of_DRAM());
+ early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
+
/*
* Sparsemem tries to allocate bootmem in memory_present(), so must be
* done after the fixed reservations.
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 54922d1275b8..ed177475dd8c 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -47,17 +47,16 @@ static int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- unsigned long rnd = 0;
+ unsigned long rnd;
- if (current->flags & PF_RANDOMIZE)
- rnd = (long)get_random_int() & STACK_RND_MASK;
+ rnd = (unsigned long)get_random_int() & STACK_RND_MASK;
return rnd << PAGE_SHIFT;
}
-static unsigned long mmap_base(void)
+static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -66,7 +65,7 @@ static unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd());
+ return PAGE_ALIGN(STACK_TOP - gap - rnd);
}
/*
@@ -75,15 +74,20 @@ static unsigned long mmap_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
/*
* Fall back to the standard layout if the personality bit is set, or
* if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c6daaf6c6f97..5b8b664422d3 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -40,6 +40,8 @@
#include "mm.h"
+u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
+
/*
* Empty_zero_page is a special page that is used for zero-initialized data
* and COW.
@@ -454,6 +456,7 @@ void __init paging_init(void)
*/
cpu_set_reserved_ttbr0();
flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
}
/*
@@ -461,8 +464,10 @@ void __init paging_init(void)
*/
void setup_mm_for_reboot(void)
{
- cpu_switch_mm(idmap_pg_dir, &init_mm);
+ cpu_set_reserved_ttbr0();
flush_tlb_all();
+ cpu_set_idmap_tcr_t0sz();
+ cpu_switch_mm(idmap_pg_dir, &init_mm);
}
/*
@@ -550,10 +555,10 @@ void vmemmap_free(unsigned long start, unsigned long end)
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
#endif
@@ -627,10 +632,7 @@ void __set_fixmap(enum fixed_addresses idx,
unsigned long addr = __fix_to_virt(idx);
pte_t *pte;
- if (idx >= __end_of_fixed_addresses) {
- BUG();
- return;
- }
+ BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
pte = fixmap_pte(addr);
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 1d3ec3ddd84b..e47ed1c5dce1 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -73,7 +73,6 @@ int set_memory_ro(unsigned long addr, int numpages)
__pgprot(PTE_RDONLY),
__pgprot(PTE_WRITE));
}
-EXPORT_SYMBOL_GPL(set_memory_ro);
int set_memory_rw(unsigned long addr, int numpages)
{
@@ -81,7 +80,6 @@ int set_memory_rw(unsigned long addr, int numpages)
__pgprot(PTE_WRITE),
__pgprot(PTE_RDONLY));
}
-EXPORT_SYMBOL_GPL(set_memory_rw);
int set_memory_nx(unsigned long addr, int numpages)
{
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 005d29e2977d..4c4d93c4bf65 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -52,3 +52,13 @@
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
+
+/*
+ * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
+ */
+ .macro tcr_set_idmap_t0sz, valreg, tmpreg
+#ifndef CONFIG_ARM64_VA_BITS_48
+ ldr_l \tmpreg, idmap_t0sz
+ bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+#endif
+ .endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 28eebfb6af76..cdd754e19b9b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -156,6 +156,7 @@ ENTRY(cpu_do_resume)
msr cpacr_el1, x6
msr ttbr0_el1, x1
msr ttbr1_el1, x7
+ tcr_set_idmap_t0sz x8, x7
msr tcr_el1, x8
msr vbar_el1, x9
msr mdscr_el1, x10
@@ -233,6 +234,8 @@ ENTRY(__cpu_setup)
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
+ tcr_set_idmap_t0sz x10, x9
+
/*
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
* TCR_EL1.
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index edba042b2325..dc6a4842683a 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -487,7 +487,7 @@ emit_cond_jmp:
return -EINVAL;
}
- imm64 = (u64)insn1.imm << 32 | imm;
+ imm64 = (u64)insn1.imm << 32 | (u32)imm;
emit_a64_mov_i64(dst, imm64, ctx);
return 1;
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index d56afa99a514..d4d3079541ea 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -17,11 +17,9 @@
#include <asm/types.h>
struct task_struct;
-struct exec_domain;
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
__u32 cpu;
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -36,7 +34,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/avr32/kernel/asm-offsets.c b/arch/avr32/kernel/asm-offsets.c
index e41c84516e5d..2c9764fe3532 100644
--- a/arch/avr32/kernel/asm-offsets.c
+++ b/arch/avr32/kernel/asm-offsets.c
@@ -12,7 +12,6 @@
void foo(void)
{
OFFSET(TI_task, thread_info, task);
- OFFSET(TI_exec_domain, thread_info, exec_domain);
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_cpu, thread_info, cpu);
OFFSET(TI_preempt_count, thread_info, preempt_count);
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index 383007877b2b..99c00d835f47 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -48,7 +48,6 @@ CONFIG_IP_PNP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_JEDECPROBE=m
CONFIG_MTD_RAM=y
diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
index cd0636bb24a0..cdeb51856f26 100644
--- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
+++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
@@ -67,7 +67,6 @@ CONFIG_BFIN_SIR0=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index 16273a922056..ed7d2c096739 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -50,7 +50,6 @@ CONFIG_IRTTY_SIR=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=m
CONFIG_MTD_BLOCK=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index 0df2f921f7e5..0c241f4d28d7 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -50,7 +50,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=m
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=m
CONFIG_MTD_CFI_AMDSTD=m
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 91d3eda42742..e5360b30e39a 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -55,13 +55,14 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=m
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=m
CONFIG_MTD_CFI_AMDSTD=m
CONFIG_MTD_RAM=y
CONFIG_MTD_ROM=m
CONFIG_MTD_PHYSMAP=m
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
CONFIG_NET_BFIN=y
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index be03be6ba543..60f6fb86125c 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -60,7 +60,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=m
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=m
CONFIG_MTD_CFI_AMDSTD=m
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
index 802f9c421621..78f6bc79f910 100644
--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
@@ -50,7 +50,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_PLATRAM=y
CONFIG_MTD_PHRAM=y
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
index e2a2fa5935ce..fac8bb578249 100644
--- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -52,7 +52,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 680730eeaf23..2a2e4d0cebc1 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -54,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index fcec5ce71392..ba4267f658af 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -105,6 +105,7 @@ CONFIG_SPI=y
CONFIG_SPI_ADI_V3=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_MCP23S08=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_BFIN_WDT=y
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index 05108b85ab12..1902bb05d086 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -55,7 +55,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index 5e0db82b679e..9a5716d57ebc 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -37,7 +37,6 @@ CONFIG_UNIX=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index 2e47df77490f..684592884349 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -52,7 +52,6 @@ CONFIG_IP_PNP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 6da629ffc2f1..d9915e984787 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -48,7 +48,6 @@ CONFIG_INET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 9ff79df6825c..92d8130cdb51 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -54,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index d6dd98e67146..fa8d91132a57 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -52,7 +52,6 @@ CONFIG_INET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
index 2b58cb221283..88600593c731 100644
--- a/arch/blackfin/configs/DNP5370_defconfig
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -36,7 +36,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_DEBUG=y
CONFIG_MTD_DEBUG_VERBOSE=1
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_NFTL=y
CONFIG_NFTL_RW=y
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index 5adf0da58499..9e3ae4b36d20 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -43,7 +43,6 @@ CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_MANGLE=y
# CONFIG_WIRELESS is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index a6a7298962ed..c7926812971c 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -46,7 +46,6 @@ CONFIG_IP_PNP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=m
CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index bc216646fe18..23fdc57d657a 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -38,7 +38,6 @@ CONFIG_IRTTY_SIR=m
# CONFIG_WIRELESS is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_CHAR=m
CONFIG_MTD_BLOCK=y
CONFIG_MTD_JEDECPROBE=m
CONFIG_MTD_RAM=y
diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
index ea88158ab432..e28959479fe0 100644
--- a/arch/blackfin/configs/TCM-BF518_defconfig
+++ b/arch/blackfin/configs/TCM-BF518_defconfig
@@ -55,7 +55,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index c1f45f15295c..39e85cce95d7 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -44,7 +44,6 @@ CONFIG_INET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h
index dccae26805b0..4e8ad0523118 100644
--- a/arch/blackfin/include/asm/io.h
+++ b/arch/blackfin/include/asm/io.h
@@ -11,27 +11,12 @@
#include <linux/types.h>
#include <asm/byteorder.h>
-#define DECLARE_BFIN_RAW_READX(size, type, asm, asm_sign) \
-static inline type __raw_read##size(const volatile void __iomem *addr) \
-{ \
- unsigned int val; \
- int tmp; \
- __asm__ __volatile__ ( \
- "cli %1;" \
- "NOP; NOP; SSYNC;" \
- "%0 = "#asm" [%2] "#asm_sign";" \
- "sti %1;" \
- : "=d"(val), "=d"(tmp) \
- : "a"(addr) \
- ); \
- return (type) val; \
-}
-DECLARE_BFIN_RAW_READX(b, u8, b, (z))
-#define __raw_readb __raw_readb
-DECLARE_BFIN_RAW_READX(w, u16, w, (z))
-#define __raw_readw __raw_readw
-DECLARE_BFIN_RAW_READX(l, u32, , )
-#define __raw_readl __raw_readl
+#define __raw_readb bfin_read8
+#define __raw_readw bfin_read16
+#define __raw_readl bfin_read32
+#define __raw_writeb(val, addr) bfin_write8(addr, val)
+#define __raw_writew(val, addr) bfin_write16(addr, val)
+#define __raw_writel(val, addr) bfin_write32(addr, val)
extern void outsb(unsigned long port, const void *addr, unsigned long count);
extern void outsw(unsigned long port, const void *addr, unsigned long count);
@@ -50,14 +35,6 @@ extern void insl_16(unsigned long port, void *addr, unsigned long count);
#define insw insw
#define insl insl
-extern void dma_outsb(unsigned long port, const void *addr, unsigned short count);
-extern void dma_outsw(unsigned long port, const void *addr, unsigned short count);
-extern void dma_outsl(unsigned long port, const void *addr, unsigned short count);
-
-extern void dma_insb(unsigned long port, void *addr, unsigned short count);
-extern void dma_insw(unsigned long port, void *addr, unsigned short count);
-extern void dma_insl(unsigned long port, void *addr, unsigned short count);
-
/**
* I/O write barrier
*
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 57c3a8bd583d..2966b93850a1 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -37,7 +37,6 @@ typedef unsigned long mm_segment_t;
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -53,7 +52,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
@@ -76,15 +74,6 @@ static inline struct thread_info *current_thread_info(void)
#endif /* __ASSEMBLY__ */
/*
- * Offsets in thread_info structure, used in assembly code
- */
-#define TI_TASK 0
-#define TI_EXECDOMAIN 4
-#define TI_FLAGS 8
-#define TI_CPU 12
-#define TI_PREEMPT 16
-
-/*
* thread information flag bit numbers
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
diff --git a/arch/blackfin/include/uapi/asm/unistd.h b/arch/blackfin/include/uapi/asm/unistd.h
index a4511649a864..0cb9078ef482 100644
--- a/arch/blackfin/include/uapi/asm/unistd.h
+++ b/arch/blackfin/include/uapi/asm/unistd.h
@@ -401,8 +401,18 @@
#define __NR_sendmmsg 380
#define __NR_process_vm_readv 381
#define __NR_process_vm_writev 382
+#define __NR_kcmp 383
+#define __NR_finit_module 384
+#define __NR_sched_setattr 385
+#define __NR_sched_getattr 386
+#define __NR_renameat2 387
+#define __NR_seccomp 388
+#define __NR_getrandom 389
+#define __NR_memfd_create 390
+#define __NR_bpf 391
+#define __NR_execveat 392
-#define __NR_syscall 383
+#define __NR_syscall 393 /* For internal using, not implemented */
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index 37fcae950216..486560aea050 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -42,6 +42,12 @@ int main(void)
DEFINE(THREAD_PC, offsetof(struct thread_struct, pc));
DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
+ /* offsets in thread_info struct */
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PREEMPT, thread_info, preempt_count);
+
/* offsets into the pt_regs */
DEFINE(PT_ORIG_R0, offsetof(struct pt_regs, orig_r0));
DEFINE(PT_ORIG_P0, offsetof(struct pt_regs, orig_p0));
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
index 947ad0832338..86b1cd3a0309 100644
--- a/arch/blackfin/kernel/debug-mmrs.c
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -1620,7 +1620,6 @@ static int __init bfin_debug_mmrs_init(void)
D16(USB_APHY_CNTRL);
D16(USB_APHY_CALIB);
D16(USB_APHY_CNTRL2);
- D16(USB_PHY_TEST);
D16(USB_PLLOSC_CTRL);
D16(USB_SRP_CLKDIV);
D16(USB_EP_NI0_TXMAXP);
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index fa53faeeb0e9..cf773f0f1f30 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -330,9 +330,6 @@ static void bfin_disable_hw_debug(struct pt_regs *regs)
}
#ifdef CONFIG_SMP
-extern void generic_exec_single(int cpu, struct call_single_data *data, int wait);
-static struct call_single_data kgdb_smp_ipi_data[NR_CPUS];
-
void kgdb_passive_cpu_callback(void *info)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
@@ -343,15 +340,14 @@ void kgdb_roundup_cpus(unsigned long flags)
unsigned int cpu;
for (cpu = cpumask_first(cpu_online_mask); cpu < nr_cpu_ids;
- cpu = cpumask_next(cpu, cpu_online_mask)) {
- kgdb_smp_ipi_data[cpu].func = kgdb_passive_cpu_callback;
- generic_exec_single(cpu, &kgdb_smp_ipi_data[cpu], 0);
- }
+ cpu = cpumask_next(cpu, cpu_online_mask))
+ smp_call_function_single(cpu, kgdb_passive_cpu_callback,
+ NULL, 0);
}
void kgdb_roundup_cpu(int cpu, unsigned long flags)
{
- generic_exec_single(cpu, &kgdb_smp_ipi_data[cpu], 0);
+ smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0);
}
#endif
@@ -359,19 +355,6 @@ void kgdb_roundup_cpu(int cpu, unsigned long flags)
static unsigned long kgdb_arch_imask;
#endif
-void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
- if (kgdb_single_step)
- preempt_enable();
-
-#ifdef CONFIG_IPIPE
- if (kgdb_arch_imask) {
- cpu_pda[raw_smp_processor_id()].ex_imask = kgdb_arch_imask;
- kgdb_arch_imask = 0;
- }
-#endif
-}
-
int kgdb_arch_handle_exception(int vector, int signo,
int err_code, char *remcom_in_buffer,
char *remcom_out_buffer,
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 4f424ae3b36d..ad82468bd94d 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -1464,5 +1464,5 @@ void __init cmdline_init(const char *r0)
{
early_shadow_stamp();
if (r0)
- strncpy(command_line, r0, COMMAND_LINE_SIZE);
+ strlcpy(command_line, r0, COMMAND_LINE_SIZE);
}
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index f2a8b5493bd3..ea570db598e5 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -151,11 +151,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
frame = get_sigframe(ksig, sizeof(*frame));
- err |= __put_user((current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && ksig->sig < 32
- ? current_thread_info()->exec_domain->
- signal_invmap[ksig->sig] : ksig->sig), &frame->sig);
+ err |= __put_user(ksig->sig, &frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index de5c2c3ebd9b..1ed85ddadc0d 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -18,6 +18,7 @@
#include <asm/fixed_code.h>
#include <asm/pseudo_instructions.h>
#include <asm/pda.h>
+#include <asm/asm-offsets.h>
#ifdef CONFIG_KGDB
# include <linux/kgdb.h>
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
index d90a85b6b6b9..bd045318a250 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
@@ -122,11 +122,6 @@
#define bfin_read_USB_APHY_CNTRL2() bfin_read16(USB_APHY_CNTRL2)
#define bfin_write_USB_APHY_CNTRL2(val) bfin_write16(USB_APHY_CNTRL2, val)
-/* (PHY_TEST is for ADI usage only) */
-
-#define bfin_read_USB_PHY_TEST() bfin_read16(USB_PHY_TEST)
-#define bfin_write_USB_PHY_TEST(val) bfin_write16(USB_PHY_TEST, val)
-
#define bfin_read_USB_PLLOSC_CTRL() bfin_read16(USB_PLLOSC_CTRL)
#define bfin_write_USB_PLLOSC_CTRL(val) bfin_write16(USB_PLLOSC_CTRL, val)
#define bfin_read_USB_SRP_CLKDIV() bfin_read16(USB_SRP_CLKDIV)
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF525.h b/arch/blackfin/mach-bf527/include/mach/defBF525.h
index 71578d964d00..591e00ff620a 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF525.h
@@ -77,10 +77,6 @@
#define USB_APHY_CNTRL2 0xffc039e8 /* Register used to prevent re-enumeration once Moab goes into hibernate mode */
-/* (PHY_TEST is for ADI usage only) */
-
-#define USB_PHY_TEST 0xffc039ec /* Used for reducing simulation time and simplifies FIFO testability */
-
#define USB_PLLOSC_CTRL 0xffc039f0 /* Used to program different parameters for USB PLL and Oscillator */
#define USB_SRP_CLKDIV 0xffc039f4 /* Used to program clock divide value for the clock fed to the SRP detection logic */
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
index d09c19cd1b7b..916347901d5a 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
@@ -241,10 +241,6 @@
#define bfin_read_USB_APHY_CNTRL2() bfin_read16(USB_APHY_CNTRL2)
#define bfin_write_USB_APHY_CNTRL2(val) bfin_write16(USB_APHY_CNTRL2, val)
-/* (PHY_TEST is for ADI usage only) */
-
-#define bfin_read_USB_PHY_TEST() bfin_read16(USB_PHY_TEST)
-#define bfin_write_USB_PHY_TEST(val) bfin_write16(USB_PHY_TEST, val)
#define bfin_read_USB_PLLOSC_CTRL() bfin_read16(USB_PLLOSC_CTRL)
#define bfin_write_USB_PLLOSC_CTRL(val) bfin_write16(USB_PLLOSC_CTRL, val)
#define bfin_read_USB_SRP_CLKDIV() bfin_read16(USB_SRP_CLKDIV)
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
index bcb9726dea54..be83f645bba8 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
@@ -408,10 +408,6 @@
#define bfin_read_USB_APHY_CNTRL2() bfin_read16(USB_APHY_CNTRL2)
#define bfin_write_USB_APHY_CNTRL2(val) bfin_write16(USB_APHY_CNTRL2, val)
-/* (PHY_TEST is for ADI usage only) */
-
-#define bfin_read_USB_PHY_TEST() bfin_read16(USB_PHY_TEST)
-#define bfin_write_USB_PHY_TEST(val) bfin_write16(USB_PHY_TEST, val)
#define bfin_read_USB_PLLOSC_CTRL() bfin_read16(USB_PLLOSC_CTRL)
#define bfin_write_USB_PLLOSC_CTRL(val) bfin_write16(USB_PLLOSC_CTRL, val)
#define bfin_read_USB_SRP_CLKDIV() bfin_read16(USB_SRP_CLKDIV)
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF542.h b/arch/blackfin/mach-bf548/include/mach/defBF542.h
index 51161575a163..ae4b889e3606 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF542.h
@@ -140,9 +140,6 @@
#define USB_APHY_CALIB 0xffc03de4 /* Register used to set some calibration values */
#define USB_APHY_CNTRL2 0xffc03de8 /* Register used to prevent re-enumeration once Moab goes into hibernate mode */
-/* (PHY_TEST is for ADI usage only) */
-
-#define USB_PHY_TEST 0xffc03dec /* Used for reducing simulation time and simplifies FIFO testability */
#define USB_PLLOSC_CTRL 0xffc03df0 /* Used to program different parameters for USB PLL and Oscillator */
#define USB_SRP_CLKDIV 0xffc03df4 /* Used to program clock divide value for the clock fed to the SRP detection logic */
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index d55dcc0f5324..7cc7928a3c73 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -254,9 +254,6 @@
#define USB_APHY_CALIB 0xffc03de4 /* Register used to set some calibration values */
#define USB_APHY_CNTRL2 0xffc03de8 /* Register used to prevent re-enumeration once Moab goes into hibernate mode */
-/* (PHY_TEST is for ADI usage only) */
-
-#define USB_PHY_TEST 0xffc03dec /* Used for reducing simulation time and simplifies FIFO testability */
#define USB_PLLOSC_CTRL 0xffc03df0 /* Used to program different parameters for USB PLL and Oscillator */
#define USB_SRP_CLKDIV 0xffc03df4 /* Used to program clock divide value for the clock fed to the SRP detection logic */
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index 11789beca75a..8c0c80fd1a45 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -124,7 +124,7 @@ void platform_send_ipi(cpumask_t callmap, int irq)
unsigned int cpu;
int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
- for_each_cpu_mask(cpu, callmap) {
+ for_each_cpu(cpu, &callmap) {
BUG_ON(cpu >= 2);
SSYNC();
bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 7f9fc272ec30..2c61fc0c98f9 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -780,8 +780,8 @@ static struct adi_spi3_chip spidev_chip_info = {
};
#endif
-#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
-static struct platform_device bfin_i2s_pcm = {
+#if IS_ENABLED(CONFIG_SND_BF6XX_PCM)
+static struct platform_device bfin_pcm = {
.name = "bfin-i2s-pcm-audio",
.id = -1,
};
@@ -1034,7 +1034,6 @@ static struct adv7842_platform_data adv7842_data = {
.i2c_infoframe = 0x48,
.i2c_cec = 0x49,
.i2c_avlink = 0x4a,
- .i2c_ex = 0x26,
};
static struct bfin_capture_config bfin_capture_data = {
@@ -1104,7 +1103,6 @@ static struct disp_route adv7511_routes[] = {
static struct adv7511_platform_data adv7511_data = {
.edid_addr = 0x7e,
- .i2c_ex = 0x25,
};
static struct bfin_display_config bfin_display_data = {
@@ -1209,6 +1207,35 @@ static struct platform_device bfin_display_device = {
};
#endif
+#if defined(CONFIG_FB_BF609_NL8048) \
+ || defined(CONFIG_FB_BF609_NL8048_MODULE)
+static struct resource nl8048_resources[] = {
+ {
+ .start = EPPI2_STAT,
+ .end = EPPI2_STAT,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = CH_EPPI2_CH0,
+ .end = CH_EPPI2_CH0,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .start = IRQ_EPPI2_STAT,
+ .end = IRQ_EPPI2_STAT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+static struct platform_device bfin_fb_device = {
+ .name = "bf609_nl8048",
+ .num_resources = ARRAY_SIZE(nl8048_resources),
+ .resource = nl8048_resources,
+ .dev = {
+ .platform_data = (void *)GPIO_PC15,
+ },
+};
+#endif
+
#if defined(CONFIG_BFIN_CRC)
#define BFIN_CRC_NAME "bfin-crc"
@@ -1862,6 +1889,29 @@ static struct platform_device i2c_bfin_twi1_device = {
};
#endif
+#if IS_ENABLED(CONFIG_GPIO_MCP23S08)
+#include <linux/spi/mcp23s08.h>
+static const struct mcp23s08_platform_data bfin_mcp23s08_soft_switch0 = {
+ .base = 120,
+};
+static const struct mcp23s08_platform_data bfin_mcp23s08_soft_switch1 = {
+ .base = 130,
+};
+static const struct mcp23s08_platform_data bfin_mcp23s08_soft_switch2 = {
+ .base = 140,
+};
+# if IS_ENABLED(CONFIG_VIDEO_ADV7842)
+static const struct mcp23s08_platform_data bfin_adv7842_soft_switch = {
+ .base = 150,
+};
+# endif
+# if IS_ENABLED(CONFIG_VIDEO_ADV7511) || IS_ENABLED(CONFIG_VIDEO_ADV7343)
+static const struct mcp23s08_platform_data bfin_adv7511_soft_switch = {
+ .base = 160,
+};
+# endif
+#endif
+
static struct i2c_board_info __initdata bfin_i2c_board_info0[] = {
#if IS_ENABLED(CONFIG_INPUT_ADXL34X_I2C)
{
@@ -1881,6 +1931,32 @@ static struct i2c_board_info __initdata bfin_i2c_board_info0[] = {
I2C_BOARD_INFO("ssm2602", 0x1b),
},
#endif
+#if IS_ENABLED(CONFIG_GPIO_MCP23S08)
+ {
+ I2C_BOARD_INFO("mcp23017", 0x21),
+ .platform_data = (void *)&bfin_mcp23s08_soft_switch0
+ },
+ {
+ I2C_BOARD_INFO("mcp23017", 0x22),
+ .platform_data = (void *)&bfin_mcp23s08_soft_switch1
+ },
+ {
+ I2C_BOARD_INFO("mcp23017", 0x23),
+ .platform_data = (void *)&bfin_mcp23s08_soft_switch2
+ },
+# if IS_ENABLED(CONFIG_VIDEO_ADV7842)
+ {
+ I2C_BOARD_INFO("mcp23017", 0x26),
+ .platform_data = (void *)&bfin_adv7842_soft_switch
+ },
+# endif
+# if IS_ENABLED(CONFIG_VIDEO_ADV7511) || IS_ENABLED(CONFIG_VIDEO_ADV7343)
+ {
+ I2C_BOARD_INFO("mcp23017", 0x25),
+ .platform_data = (void *)&bfin_adv7511_soft_switch
+ },
+# endif
+#endif
};
static struct i2c_board_info __initdata bfin_i2c_board_info1[] = {
@@ -2023,8 +2099,8 @@ static struct platform_device *ezkit_devices[] __initdata = {
#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
&ezkit_flash_device,
#endif
-#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
- &bfin_i2s_pcm,
+#if IS_ENABLED(CONFIG_SND_BF6XX_PCM)
+ &bfin_pcm,
#endif
#if IS_ENABLED(CONFIG_SND_BF6XX_SOC_I2S)
&bfin_i2s,
@@ -2060,7 +2136,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
- PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+ PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
diff --git a/arch/blackfin/mach-bf609/clock.c b/arch/blackfin/mach-bf609/clock.c
index 244fa4ab4c56..378305844b2c 100644
--- a/arch/blackfin/mach-bf609/clock.c
+++ b/arch/blackfin/mach-bf609/clock.c
@@ -363,6 +363,12 @@ static struct clk ethclk = {
.ops = &dummy_clk_ops,
};
+static struct clk ethpclk = {
+ .name = "pclk",
+ .parent = &sclk0,
+ .ops = &dummy_clk_ops,
+};
+
static struct clk spiclk = {
.name = "spi",
.parent = &sclk1,
@@ -381,6 +387,7 @@ static struct clk_lookup bf609_clks[] = {
CLK(dclk, NULL, "DCLK"),
CLK(oclk, NULL, "OCLK"),
CLK(ethclk, NULL, "stmmaceth"),
+ CLK(ethpclk, NULL, "pclk"),
CLK(spiclk, NULL, "spi"),
};
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 86b5a095c5a1..8d9431e22e8c 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1694,6 +1694,16 @@ ENTRY(_sys_call_table)
.long _sys_sendmmsg /* 380 */
.long _sys_process_vm_readv
.long _sys_process_vm_writev
+ .long _sys_kcmp
+ .long _sys_finit_module
+ .long _sys_sched_setattr /* 385 */
+ .long _sys_sched_getattr
+ .long _sys_renameat2
+ .long _sys_seccomp
+ .long _sys_getrandom
+ .long _sys_memfd_create /* 390 */
+ .long _sys_bpf
+ .long _sys_execveat
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 1387a94bcfd5..a66d979ec651 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/delay.h>
#include <asm/cplb.h>
#include <asm/gpio.h>
@@ -180,6 +181,7 @@ int bfin_pm_suspend_mem_enter(void)
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
flushinv_all_dcache();
+ udelay(1);
#endif
_disable_dcplb();
_disable_icplb();
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 8ad3e90cc8fc..1c7259597395 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -413,16 +413,14 @@ int __cpu_disable(void)
return 0;
}
-static DECLARE_COMPLETION(cpu_killed);
-
int __cpu_die(unsigned int cpu)
{
- return wait_for_completion_timeout(&cpu_killed, 5000);
+ return cpu_wait_death(cpu, 5);
}
void cpu_die(void)
{
- complete(&cpu_killed);
+ (void)cpu_report_death();
atomic_dec(&init_mm.mm_users);
atomic_dec(&init_mm.mm_count);
diff --git a/arch/c6x/Makefile b/arch/c6x/Makefile
index e72eb3417239..6b0be670ddfa 100644
--- a/arch/c6x/Makefile
+++ b/arch/c6x/Makefile
@@ -8,7 +8,7 @@
KBUILD_DEFCONFIG := dsk6455_defconfig
-cflags-y += -mno-dsbt -msdata=none
+cflags-y += -mno-dsbt -msdata=none -D__linux__
cflags-$(CONFIG_C6X_BIG_KERNEL) += -mlong-calls
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 2de73391b81e..ae0a51f5376c 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -41,6 +41,7 @@ generic-y += resource.h
generic-y += scatterlist.h
generic-y += segment.h
generic-y += sembuf.h
+generic-y += serial.h
generic-y += shmbuf.h
generic-y += shmparam.h
generic-y += siginfo.h
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index 88bd0d899bdb..bbd7774e4d4e 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -17,6 +17,14 @@
#define dma_supported(d, m) 1
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
diff --git a/arch/c6x/include/asm/flat.h b/arch/c6x/include/asm/flat.h
new file mode 100644
index 000000000000..a1858bd5f6c8
--- /dev/null
+++ b/arch/c6x/include/asm/flat.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_C6X_FLAT_H
+#define __ASM_C6X_FLAT_H
+
+#define flat_argvp_envp_on_stack() 0
+#define flat_old_ram_flag(flags) (flags)
+#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
+#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
+#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val, rp)
+#define flat_get_relocate_addr(rel) (rel)
+#define flat_set_persistent(relval, p) 0
+
+#endif /* __ASM_C6X_FLAT_H */
diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h
index 696804475f55..852afb209afb 100644
--- a/arch/c6x/include/asm/setup.h
+++ b/arch/c6x/include/asm/setup.h
@@ -12,6 +12,7 @@
#define _ASM_C6X_SETUP_H
#include <uapi/asm/setup.h>
+#include <linux/types.h>
#ifndef __ASSEMBLY__
extern int c6x_add_memory(phys_addr_t start, unsigned long size);
diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h
index 584e253f3217..acc70c135ab8 100644
--- a/arch/c6x/include/asm/thread_info.h
+++ b/arch/c6x/include/asm/thread_info.h
@@ -40,7 +40,6 @@ typedef struct {
*/
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
int preempt_count; /* 0 = preemptable, <0 = BUG */
@@ -55,7 +54,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
index 57d2ea8d1977..3ae9f5a166a0 100644
--- a/arch/c6x/kernel/process.c
+++ b/arch/c6x/kernel/process.c
@@ -101,7 +101,6 @@ void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
*/
usp -= 8;
- set_fs(USER_DS);
regs->pc = pc;
regs->sp = usp;
regs->tsr |= 0x40; /* set user mode */
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
index 757128868d43..72e17f7ebd6f 100644
--- a/arch/c6x/kernel/setup.c
+++ b/arch/c6x/kernel/setup.c
@@ -26,7 +26,8 @@
#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/of.h>
-
+#include <linux/console.h>
+#include <linux/screen_info.h>
#include <asm/sections.h>
#include <asm/div64.h>
@@ -38,6 +39,8 @@
static const char *c6x_soc_name;
+struct screen_info screen_info;
+
int c6x_num_cores;
EXPORT_SYMBOL_GPL(c6x_num_cores);
@@ -60,6 +63,7 @@ unsigned char c6x_fuse_mac[6];
unsigned long memory_start;
unsigned long memory_end;
+EXPORT_SYMBOL(memory_end);
unsigned long ram_start;
unsigned long ram_end;
@@ -265,8 +269,8 @@ int __init c6x_add_memory(phys_addr_t start, unsigned long size)
*/
notrace void __init machine_init(unsigned long dt_ptr)
{
- const void *dtb = __va(dt_ptr);
- const void *fdt = _fdt_start;
+ void *dtb = __va(dt_ptr);
+ void *fdt = _fdt_start;
/* interrupts must be masked */
set_creg(IER, 2);
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
index 356ee84cad95..04845aaf5985 100644
--- a/arch/c6x/kernel/time.c
+++ b/arch/c6x/kernel/time.c
@@ -49,7 +49,7 @@ u64 sched_clock(void)
return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
}
-void time_init(void)
+void __init time_init(void)
{
u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
diff --git a/arch/c6x/platforms/cache.c b/arch/c6x/platforms/cache.c
index 86318a16a252..46fd2d530271 100644
--- a/arch/c6x/platforms/cache.c
+++ b/arch/c6x/platforms/cache.c
@@ -350,6 +350,7 @@ void L1P_cache_block_invalidate(unsigned int start, unsigned int end)
(unsigned int *) end,
IMCR_L1PIBAR, IMCR_L1PIWC);
}
+EXPORT_SYMBOL(L1P_cache_block_invalidate);
void L1D_cache_block_invalidate(unsigned int start, unsigned int end)
{
@@ -371,6 +372,7 @@ void L1D_cache_block_writeback(unsigned int start, unsigned int end)
(unsigned int *) end,
IMCR_L1DWBAR, IMCR_L1DWWC);
}
+EXPORT_SYMBOL(L1D_cache_block_writeback);
/*
* L2 block operations
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 4a03911053ab..0314e325a669 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -46,12 +46,18 @@ config CRIS
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
- select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32
select GENERIC_CMOS_UPDATE
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS2
select OLD_SIGSUSPEND
select OLD_SIGACTION
+ select ARCH_REQUIRE_GPIOLIB
+ select IRQ_DOMAIN if ETRAX_ARCH_V32
+ select OF if ETRAX_ARCH_V32
+ select OF_EARLY_FLATTREE if ETRAX_ARCH_V32
+ select CLKSRC_MMIO if ETRAX_ARCH_V32
+ select GENERIC_CLOCKEVENTS if ETRAX_ARCH_V32
+ select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32
config HZ
int
@@ -61,6 +67,10 @@ config NR_CPUS
int
default "1"
+config BUILTIN_DTB
+ string "DTB to build into the kernel image"
+ depends on OF
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/cris/Makefile b/arch/cris/Makefile
index 39dc7d00083e..4a5404b3d0e4 100644
--- a/arch/cris/Makefile
+++ b/arch/cris/Makefile
@@ -40,6 +40,10 @@ else
MACH :=
endif
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+core-$(CONFIG_OF) += arch/cris/boot/dts/
+endif
+
LD = $(CROSS_COMPILE)ld -mcrislinux
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
diff --git a/arch/cris/arch-v10/kernel/fasttimer.c b/arch/cris/arch-v10/kernel/fasttimer.c
index 48a59afbeeb1..e9298739d72e 100644
--- a/arch/cris/arch-v10/kernel/fasttimer.c
+++ b/arch/cris/arch-v10/kernel/fasttimer.c
@@ -527,7 +527,8 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
i = debug_log_cnt;
while (i != end_i || debug_log_cnt_wrapped) {
- if (seq_printf(m, debug_log_string[i], debug_log_value[i]) < 0)
+ seq_printf(m, debug_log_string[i], debug_log_value[i]);
+ if (seq_has_overflowed(m))
return 0;
i = (i+1) % DEBUG_LOG_MAX;
}
@@ -542,24 +543,22 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
int cur = (fast_timers_started - i - 1) % NUM_TIMER_STATS;
#if 1 //ndef FAST_TIMER_LOG
- seq_printf(m, "div: %i freq: %i delay: %i"
- "\n",
+ seq_printf(m, "div: %i freq: %i delay: %i\n",
timer_div_settings[cur],
timer_freq_settings[cur],
timer_delay_settings[cur]);
#endif
#ifdef FAST_TIMER_LOG
t = &timer_started_log[cur];
- if (seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
- "d: %6li us data: 0x%08lX"
- "\n",
- t->name,
- (unsigned long)t->tv_set.tv_jiff,
- (unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_jiff,
- (unsigned long)t->tv_expires.tv_usec,
- t->delay_us,
- t->data) < 0)
+ seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu d: %6li us data: 0x%08lX\n",
+ t->name,
+ (unsigned long)t->tv_set.tv_jiff,
+ (unsigned long)t->tv_set.tv_usec,
+ (unsigned long)t->tv_expires.tv_jiff,
+ (unsigned long)t->tv_expires.tv_usec,
+ t->delay_us,
+ t->data);
+ if (seq_has_overflowed(m))
return 0;
#endif
}
@@ -571,16 +570,15 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
seq_printf(m, "Timers added: %i\n", fast_timers_added);
for (i = 0; i < num_to_show; i++) {
t = &timer_added_log[(fast_timers_added - i - 1) % NUM_TIMER_STATS];
- if (seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
- "d: %6li us data: 0x%08lX"
- "\n",
- t->name,
- (unsigned long)t->tv_set.tv_jiff,
- (unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_jiff,
- (unsigned long)t->tv_expires.tv_usec,
- t->delay_us,
- t->data) < 0)
+ seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu d: %6li us data: 0x%08lX\n",
+ t->name,
+ (unsigned long)t->tv_set.tv_jiff,
+ (unsigned long)t->tv_set.tv_usec,
+ (unsigned long)t->tv_expires.tv_jiff,
+ (unsigned long)t->tv_expires.tv_usec,
+ t->delay_us,
+ t->data);
+ if (seq_has_overflowed(m))
return 0;
}
seq_putc(m, '\n');
@@ -590,16 +588,15 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
seq_printf(m, "Timers expired: %i\n", fast_timers_expired);
for (i = 0; i < num_to_show; i++) {
t = &timer_expired_log[(fast_timers_expired - i - 1) % NUM_TIMER_STATS];
- if (seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
- "d: %6li us data: 0x%08lX"
- "\n",
- t->name,
- (unsigned long)t->tv_set.tv_jiff,
- (unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_jiff,
- (unsigned long)t->tv_expires.tv_usec,
- t->delay_us,
- t->data) < 0)
+ seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu d: %6li us data: 0x%08lX\n",
+ t->name,
+ (unsigned long)t->tv_set.tv_jiff,
+ (unsigned long)t->tv_set.tv_usec,
+ (unsigned long)t->tv_expires.tv_jiff,
+ (unsigned long)t->tv_expires.tv_usec,
+ t->delay_us,
+ t->data);
+ if (seq_has_overflowed(m))
return 0;
}
seq_putc(m, '\n');
@@ -611,19 +608,15 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
while (t) {
nextt = t->next;
local_irq_restore(flags);
- if (seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
- "d: %6li us data: 0x%08lX"
-/* " func: 0x%08lX" */
- "\n",
- t->name,
- (unsigned long)t->tv_set.tv_jiff,
- (unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_jiff,
- (unsigned long)t->tv_expires.tv_usec,
- t->delay_us,
- t->data
-/* , t->function */
- ) < 0)
+ seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu d: %6li us data: 0x%08lX\n",
+ t->name,
+ (unsigned long)t->tv_set.tv_jiff,
+ (unsigned long)t->tv_set.tv_usec,
+ (unsigned long)t->tv_expires.tv_jiff,
+ (unsigned long)t->tv_expires.tv_usec,
+ t->delay_us,
+ t->data);
+ if (seq_has_overflowed(m))
return 0;
local_irq_save(flags);
if (t->next != nextt)
diff --git a/arch/cris/arch-v10/kernel/setup.c b/arch/cris/arch-v10/kernel/setup.c
index 4f96d71b5154..7ab31f1c7540 100644
--- a/arch/cris/arch-v10/kernel/setup.c
+++ b/arch/cris/arch-v10/kernel/setup.c
@@ -63,35 +63,37 @@ int show_cpuinfo(struct seq_file *m, void *v)
else
info = &cpu_info[revision];
- return seq_printf(m,
- "processor\t: 0\n"
- "cpu\t\t: CRIS\n"
- "cpu revision\t: %lu\n"
- "cpu model\t: %s\n"
- "cache size\t: %d kB\n"
- "fpu\t\t: %s\n"
- "mmu\t\t: %s\n"
- "mmu DMA bug\t: %s\n"
- "ethernet\t: %s Mbps\n"
- "token ring\t: %s\n"
- "scsi\t\t: %s\n"
- "ata\t\t: %s\n"
- "usb\t\t: %s\n"
- "bogomips\t: %lu.%02lu\n",
+ seq_printf(m,
+ "processor\t: 0\n"
+ "cpu\t\t: CRIS\n"
+ "cpu revision\t: %lu\n"
+ "cpu model\t: %s\n"
+ "cache size\t: %d kB\n"
+ "fpu\t\t: %s\n"
+ "mmu\t\t: %s\n"
+ "mmu DMA bug\t: %s\n"
+ "ethernet\t: %s Mbps\n"
+ "token ring\t: %s\n"
+ "scsi\t\t: %s\n"
+ "ata\t\t: %s\n"
+ "usb\t\t: %s\n"
+ "bogomips\t: %lu.%02lu\n",
- revision,
- info->model,
- info->cache,
- info->flags & HAS_FPU ? "yes" : "no",
- info->flags & HAS_MMU ? "yes" : "no",
- info->flags & HAS_MMU_BUG ? "yes" : "no",
- info->flags & HAS_ETHERNET100 ? "10/100" : "10",
- info->flags & HAS_TOKENRING ? "4/16 Mbps" : "no",
- info->flags & HAS_SCSI ? "yes" : "no",
- info->flags & HAS_ATA ? "yes" : "no",
- info->flags & HAS_USB ? "yes" : "no",
- (loops_per_jiffy * HZ + 500) / 500000,
- ((loops_per_jiffy * HZ + 500) / 5000) % 100);
+ revision,
+ info->model,
+ info->cache,
+ info->flags & HAS_FPU ? "yes" : "no",
+ info->flags & HAS_MMU ? "yes" : "no",
+ info->flags & HAS_MMU_BUG ? "yes" : "no",
+ info->flags & HAS_ETHERNET100 ? "10/100" : "10",
+ info->flags & HAS_TOKENRING ? "4/16 Mbps" : "no",
+ info->flags & HAS_SCSI ? "yes" : "no",
+ info->flags & HAS_ATA ? "yes" : "no",
+ info->flags & HAS_USB ? "yes" : "no",
+ (loops_per_jiffy * HZ + 500) / 500000,
+ ((loops_per_jiffy * HZ + 500) / 5000) % 100);
+
+ return 0;
}
#endif /* CONFIG_PROC_FS */
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c
index 74d7ba35120d..7122d9773b13 100644
--- a/arch/cris/arch-v10/kernel/signal.c
+++ b/arch/cris/arch-v10/kernel/signal.c
@@ -321,8 +321,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
if (err)
return -EFAULT;
- /* TODO what is the current->exec_domain stuff and invmap ? */
-
/* Set up registers for signal handler */
/* What we enter NOW */
diff --git a/arch/cris/arch-v32/kernel/Makefile b/arch/cris/arch-v32/kernel/Makefile
index 40358355d0cb..d9fc617ea253 100644
--- a/arch/cris/arch-v32/kernel/Makefile
+++ b/arch/cris/arch-v32/kernel/Makefile
@@ -9,7 +9,6 @@ obj-y := entry.o traps.o irq.o debugport.o \
process.o ptrace.o setup.o signal.o traps.o time.o \
cache.o cacheflush.o
-obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_ETRAX_KGDB) += kgdb.o kgdb_asm.o
obj-$(CONFIG_ETRAX_FAST_TIMER) += fasttimer.o
obj-$(CONFIG_MODULES) += crisksyms.o
diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S
index 2f19ac6217aa..026a0b21b8f0 100644
--- a/arch/cris/arch-v32/kernel/entry.S
+++ b/arch/cris/arch-v32/kernel/entry.S
@@ -99,6 +99,8 @@ ret_from_kernel_thread:
.type ret_from_intr,@function
ret_from_intr:
+ moveq 0, $r9 ; not a syscall
+
;; Check for resched if preemptive kernel, or if we're going back to
;; user-mode. This test matches the user_regs(regs) macro. Don't simply
;; test CCS since that doesn't necessarily reflect what mode we'll
@@ -145,7 +147,7 @@ system_call:
;; Stack-frame similar to the irq heads, which is reversed in
;; ret_from_sys_call.
- sub.d 92, $sp ; Skip EXS and EDA.
+ sub.d 92, $sp ; Skip EDA.
movem $r13, [$sp]
move.d $sp, $r8
addq 14*4, $r8
@@ -156,8 +158,9 @@ system_call:
move $ccs, $r4
move $srp, $r5
move $erp, $r6
+ move.d $r9, $r7 ; Store syscall number in EXS
subq 4, $sp
- movem $r6, [$r8]
+ movem $r7, [$r8]
ei ; Enable interrupts while processing syscalls.
move.d $r10, [$sp]
@@ -278,43 +281,14 @@ _syscall_exit_work:
.type _work_pending,@function
_work_pending:
addoq +TI_flags, $r0, $acr
- move.d [$acr], $r10
- btstq TIF_NEED_RESCHED, $r10 ; Need resched?
- bpl _work_notifysig ; No, must be signal/notify.
- nop
- .size _work_pending, . - _work_pending
-
- .type _work_resched,@function
-_work_resched:
- move.d $r9, $r1 ; Preserve R9.
- jsr schedule
- nop
- move.d $r1, $r9
- di
-
- addoq +TI_flags, $r0, $acr
- move.d [$acr], $r1
- and.d _TIF_WORK_MASK, $r1 ; Ignore sycall trace counter.
- beq _Rexit
- nop
- btstq TIF_NEED_RESCHED, $r1
- bmi _work_resched ; current->work.need_resched.
- nop
- .size _work_resched, . - _work_resched
-
- .type _work_notifysig,@function
-_work_notifysig:
- ;; Deal with pending signals and notify-resume requests.
-
- addoq +TI_flags, $r0, $acr
move.d [$acr], $r12 ; The thread_info_flags parameter.
move.d $sp, $r11 ; The regs param.
- jsr do_notify_resume
- move.d $r9, $r10 ; do_notify_resume syscall/irq param.
+ jsr do_work_pending
+ move.d $r9, $r10 ; The syscall/irq param.
ba _Rexit
nop
- .size _work_notifysig, . - _work_notifysig
+ .size _work_pending, . - _work_pending
;; We get here as a sidetrack when we've entered a syscall with the
;; trace-bit set. We need to call do_syscall_trace and then continue
diff --git a/arch/cris/arch-v32/kernel/fasttimer.c b/arch/cris/arch-v32/kernel/fasttimer.c
index b130c2c5fdd8..5c84dbb99f30 100644
--- a/arch/cris/arch-v32/kernel/fasttimer.c
+++ b/arch/cris/arch-v32/kernel/fasttimer.c
@@ -501,7 +501,8 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
i = debug_log_cnt;
while ((i != end_i || debug_log_cnt_wrapped)) {
- if (seq_printf(m, debug_log_string[i], debug_log_value[i]) < 0)
+ seq_printf(m, debug_log_string[i], debug_log_value[i]);
+ if (seq_has_overflowed(m))
return 0;
i = (i+1) % DEBUG_LOG_MAX;
}
@@ -516,23 +517,21 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
int cur = (fast_timers_started - i - 1) % NUM_TIMER_STATS;
#if 1 //ndef FAST_TIMER_LOG
- seq_printf(m, "div: %i delay: %i"
- "\n",
+ seq_printf(m, "div: %i delay: %i\n",
timer_div_settings[cur],
timer_delay_settings[cur]);
#endif
#ifdef FAST_TIMER_LOG
t = &timer_started_log[cur];
- if (seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
- "d: %6li us data: 0x%08lX"
- "\n",
- t->name,
- (unsigned long)t->tv_set.tv_jiff,
- (unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_jiff,
- (unsigned long)t->tv_expires.tv_usec,
- t->delay_us,
- t->data) < 0)
+ seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu d: %6li us data: 0x%08lX\n",
+ t->name,
+ (unsigned long)t->tv_set.tv_jiff,
+ (unsigned long)t->tv_set.tv_usec,
+ (unsigned long)t->tv_expires.tv_jiff,
+ (unsigned long)t->tv_expires.tv_usec,
+ t->delay_us,
+ t->data);
+ if (seq_has_overflowed(m))
return 0;
#endif
}
@@ -544,16 +543,15 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
seq_printf(m, "Timers added: %i\n", fast_timers_added);
for (i = 0; i < num_to_show; i++) {
t = &timer_added_log[(fast_timers_added - i - 1) % NUM_TIMER_STATS];
- if (seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
- "d: %6li us data: 0x%08lX"
- "\n",
- t->name,
- (unsigned long)t->tv_set.tv_jiff,
- (unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_jiff,
- (unsigned long)t->tv_expires.tv_usec,
- t->delay_us,
- t->data) < 0)
+ seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu d: %6li us data: 0x%08lX\n",
+ t->name,
+ (unsigned long)t->tv_set.tv_jiff,
+ (unsigned long)t->tv_set.tv_usec,
+ (unsigned long)t->tv_expires.tv_jiff,
+ (unsigned long)t->tv_expires.tv_usec,
+ t->delay_us,
+ t->data);
+ if (seq_has_overflowed(m))
return 0;
}
seq_putc(m, '\n');
@@ -563,16 +561,15 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
seq_printf(m, "Timers expired: %i\n", fast_timers_expired);
for (i = 0; i < num_to_show; i++){
t = &timer_expired_log[(fast_timers_expired - i - 1) % NUM_TIMER_STATS];
- if (seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
- "d: %6li us data: 0x%08lX"
- "\n",
- t->name,
- (unsigned long)t->tv_set.tv_jiff,
- (unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_jiff,
- (unsigned long)t->tv_expires.tv_usec,
- t->delay_us,
- t->data) < 0)
+ seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu d: %6li us data: 0x%08lX\n",
+ t->name,
+ (unsigned long)t->tv_set.tv_jiff,
+ (unsigned long)t->tv_set.tv_usec,
+ (unsigned long)t->tv_expires.tv_jiff,
+ (unsigned long)t->tv_expires.tv_usec,
+ t->delay_us,
+ t->data);
+ if (seq_has_overflowed(m))
return 0;
}
seq_putc(m, '\n');
@@ -584,19 +581,15 @@ static int proc_fasttimer_show(struct seq_file *m, void *v)
while (t != NULL){
nextt = t->next;
local_irq_restore(flags);
- if (seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
- "d: %6li us data: 0x%08lX"
-/* " func: 0x%08lX" */
- "\n",
- t->name,
- (unsigned long)t->tv_set.tv_jiff,
- (unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_jiff,
- (unsigned long)t->tv_expires.tv_usec,
- t->delay_us,
- t->data
-/* , t->function */
- ) < 0)
+ seq_printf(m, "%-14s s: %6lu.%06lu e: %6lu.%06lu d: %6li us data: 0x%08lX\n",
+ t->name,
+ (unsigned long)t->tv_set.tv_jiff,
+ (unsigned long)t->tv_set.tv_usec,
+ (unsigned long)t->tv_expires.tv_jiff,
+ (unsigned long)t->tv_expires.tv_usec,
+ t->delay_us,
+ t->data);
+ if (seq_has_overflowed(m))
return 0;
local_irq_save(flags);
if (t->next != nextt)
diff --git a/arch/cris/arch-v32/kernel/head.S b/arch/cris/arch-v32/kernel/head.S
index 51e34165ece7..74a66e0e3777 100644
--- a/arch/cris/arch-v32/kernel/head.S
+++ b/arch/cris/arch-v32/kernel/head.S
@@ -52,11 +52,6 @@ tstart:
GIO_INIT
-#ifdef CONFIG_SMP
-secondary_cpu_entry: /* Entry point for secondary CPUs */
- di
-#endif
-
;; Setup and enable the MMU. Use same configuration for both the data
;; and the instruction MMU.
;;
@@ -164,33 +159,6 @@ secondary_cpu_entry: /* Entry point for secondary CPUs */
nop
nop
-#ifdef CONFIG_SMP
- ;; Read CPU ID
- move 0, $srs
- nop
- nop
- nop
- move $s12, $r0
- cmpq 0, $r0
- beq master_cpu
- nop
-slave_cpu:
- ; Time to boot-up. Get stack location provided by master CPU.
- move.d smp_init_current_idle_thread, $r1
- move.d [$r1], $sp
- add.d 8192, $sp
- move.d ebp_start, $r0 ; Defined in linker-script.
- move $r0, $ebp
- jsr smp_callin
- nop
-master_cpu:
- /* Set up entry point for secondary CPUs. The boot ROM has set up
- * EBP at start of internal memory. The CPU will get there
- * later when we issue an IPI to them... */
- move.d MEM_INTMEM_START + IPI_INTR_VECT * 4, $r0
- move.d secondary_cpu_entry, $r1
- move.d $r1, [$r0]
-#endif
; Check if starting from DRAM (network->RAM boot or unpacked
; compressed kernel), or directly from flash.
lapcq ., $r0
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 25437ae28128..6a881e0e92b4 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -10,6 +10,8 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/profile.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/threads.h>
@@ -56,9 +58,6 @@ struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
static unsigned long irq_regs[NR_CPUS] =
{
regi_irq,
-#ifdef CONFIG_SMP
- regi_irq2,
-#endif
};
#if NR_REAL_IRQS > 32
@@ -431,6 +430,19 @@ crisv32_do_multiple(struct pt_regs* regs)
irq_exit();
}
+static int crisv32_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &crisv32_irq_type, handle_simple_irq);
+
+ return 0;
+}
+
+static struct irq_domain_ops crisv32_irq_ops = {
+ .map = crisv32_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
/*
* This is called by start_kernel. It fixes the IRQ masks and setup the
* interrupt vector table to point to bad_interrupt pointers.
@@ -441,6 +453,8 @@ init_IRQ(void)
int i;
int j;
reg_intr_vect_rw_mask vect_mask = {0};
+ struct device_node *np;
+ struct irq_domain *domain;
/* Clear all interrupts masks. */
for (i = 0; i < NBR_REGS; i++)
@@ -449,10 +463,15 @@ init_IRQ(void)
for (i = 0; i < 256; i++)
etrax_irv->v[i] = weird_irq;
- /* Point all IRQ's to bad handlers. */
+ np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc");
+ domain = irq_domain_add_legacy(np, NR_IRQS - FIRST_IRQ,
+ FIRST_IRQ, FIRST_IRQ,
+ &crisv32_irq_ops, NULL);
+ BUG_ON(!domain);
+ irq_set_default_host(domain);
+ of_node_put(np);
+
for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
- irq_set_chip_and_handler(j, &crisv32_irq_type,
- handle_simple_irq);
set_exception_vector(i, interrupt[j]);
}
diff --git a/arch/cris/arch-v32/kernel/setup.c b/arch/cris/arch-v32/kernel/setup.c
index 61e10ae65296..cd1865d68b2e 100644
--- a/arch/cris/arch-v32/kernel/setup.c
+++ b/arch/cris/arch-v32/kernel/setup.c
@@ -63,11 +63,6 @@ int show_cpuinfo(struct seq_file *m, void *v)
info = &cpinfo[ARRAY_SIZE(cpinfo) - 1];
-#ifdef CONFIG_SMP
- if (!cpu_online(cpu))
- return 0;
-#endif
-
revision = rdvr();
for (i = 0; i < ARRAY_SIZE(cpinfo); i++) {
@@ -77,36 +72,38 @@ int show_cpuinfo(struct seq_file *m, void *v)
}
}
- return seq_printf(m,
- "processor\t: %d\n"
- "cpu\t\t: CRIS\n"
- "cpu revision\t: %lu\n"
- "cpu model\t: %s\n"
- "cache size\t: %d KB\n"
- "fpu\t\t: %s\n"
- "mmu\t\t: %s\n"
- "mmu DMA bug\t: %s\n"
- "ethernet\t: %s Mbps\n"
- "token ring\t: %s\n"
- "scsi\t\t: %s\n"
- "ata\t\t: %s\n"
- "usb\t\t: %s\n"
- "bogomips\t: %lu.%02lu\n\n",
-
- cpu,
- revision,
- info->cpu_model,
- info->cache_size,
- info->flags & HAS_FPU ? "yes" : "no",
- info->flags & HAS_MMU ? "yes" : "no",
- info->flags & HAS_MMU_BUG ? "yes" : "no",
- info->flags & HAS_ETHERNET100 ? "10/100" : "10",
- info->flags & HAS_TOKENRING ? "4/16 Mbps" : "no",
- info->flags & HAS_SCSI ? "yes" : "no",
- info->flags & HAS_ATA ? "yes" : "no",
- info->flags & HAS_USB ? "yes" : "no",
- (loops_per_jiffy * HZ + 500) / 500000,
- ((loops_per_jiffy * HZ + 500) / 5000) % 100);
+ seq_printf(m,
+ "processor\t: %d\n"
+ "cpu\t\t: CRIS\n"
+ "cpu revision\t: %lu\n"
+ "cpu model\t: %s\n"
+ "cache size\t: %d KB\n"
+ "fpu\t\t: %s\n"
+ "mmu\t\t: %s\n"
+ "mmu DMA bug\t: %s\n"
+ "ethernet\t: %s Mbps\n"
+ "token ring\t: %s\n"
+ "scsi\t\t: %s\n"
+ "ata\t\t: %s\n"
+ "usb\t\t: %s\n"
+ "bogomips\t: %lu.%02lu\n\n",
+
+ cpu,
+ revision,
+ info->cpu_model,
+ info->cache_size,
+ info->flags & HAS_FPU ? "yes" : "no",
+ info->flags & HAS_MMU ? "yes" : "no",
+ info->flags & HAS_MMU_BUG ? "yes" : "no",
+ info->flags & HAS_ETHERNET100 ? "10/100" : "10",
+ info->flags & HAS_TOKENRING ? "4/16 Mbps" : "no",
+ info->flags & HAS_SCSI ? "yes" : "no",
+ info->flags & HAS_ATA ? "yes" : "no",
+ info->flags & HAS_USB ? "yes" : "no",
+ (loops_per_jiffy * HZ + 500) / 500000,
+ ((loops_per_jiffy * HZ + 500) / 5000) % 100);
+
+ return 0;
}
#endif /* CONFIG_PROC_FS */
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index 870e3e069318..3a36ae6b79d5 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -72,6 +72,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
/* Make that the user-mode flag is set. */
regs->ccs |= (1 << (U_CCS_BITNR + CCS_SHIFT));
+ /* Don't perform syscall restarting */
+ regs->exs = -1;
+
/* Restore the old USP. */
err |= __get_user(old_usp, &sc->usp);
wrusp(old_usp);
@@ -287,8 +290,6 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- /* TODO: what is the current->exec_domain stuff and invmap ? */
-
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
@@ -427,6 +428,8 @@ do_signal(int canrestart, struct pt_regs *regs)
{
struct ksignal ksig;
+ canrestart = canrestart && ((int)regs->exs >= 0);
+
/*
* The common case should go fast, which is why this point is
* reached from kernel-mode. If that's the case, just return
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
deleted file mode 100644
index 0698582467ca..000000000000
--- a/arch/cris/arch-v32/kernel/smp.c
+++ /dev/null
@@ -1,358 +0,0 @@
-#include <linux/types.h>
-#include <asm/delay.h>
-#include <irq.h>
-#include <hwregs/intr_vect.h>
-#include <hwregs/intr_vect_defs.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-#include <hwregs/asm/mmu_defs_asm.h>
-#include <hwregs/supp_reg.h>
-#include <linux/atomic.h>
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-
-#define IPI_SCHEDULE 1
-#define IPI_CALL 2
-#define IPI_FLUSH_TLB 4
-#define IPI_BOOT 8
-
-#define FLUSH_ALL (void*)0xffffffff
-
-/* Vector of locks used for various atomic operations */
-spinlock_t cris_atomic_locks[] = {
- [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks)
-};
-
-/* CPU masks */
-cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(phys_cpu_present_map);
-
-/* Variables used during SMP boot */
-volatile int cpu_now_booting = 0;
-volatile struct thread_info *smp_init_current_idle_thread;
-
-/* Variables used during IPI */
-static DEFINE_SPINLOCK(call_lock);
-static DEFINE_SPINLOCK(tlbstate_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- int wait;
-};
-
-static struct call_data_struct * call_data;
-
-static struct mm_struct* flush_mm;
-static struct vm_area_struct* flush_vma;
-static unsigned long flush_addr;
-
-/* Mode registers */
-static unsigned long irq_regs[NR_CPUS] = {
- regi_irq,
- regi_irq2
-};
-
-static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id);
-static int send_ipi(int vector, int wait, cpumask_t cpu_mask);
-static struct irqaction irq_ipi = {
- .handler = crisv32_ipi_interrupt,
- .flags = 0,
- .name = "ipi",
-};
-
-extern void cris_mmu_init(void);
-extern void cris_timer_init(void);
-
-/* SMP initialization */
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
- int i;
-
- /* From now on we can expect IPIs so set them up */
- setup_irq(IPI_INTR_VECT, &irq_ipi);
-
- /* Mark all possible CPUs as present */
- for (i = 0; i < max_cpus; i++)
- cpumask_set_cpu(i, &phys_cpu_present_map);
-}
-
-void smp_prepare_boot_cpu(void)
-{
- /* PGD pointer has moved after per_cpu initialization so
- * update the MMU.
- */
- pgd_t **pgd;
- pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
-
- SUPP_BANK_SEL(1);
- SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
- SUPP_BANK_SEL(2);
- SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
-
- set_cpu_online(0, true);
- cpumask_set_cpu(0, &phys_cpu_present_map);
- set_cpu_possible(0, true);
-}
-
-void __init smp_cpus_done(unsigned int max_cpus)
-{
-}
-
-/* Bring one cpu online.*/
-static int __init
-smp_boot_one_cpu(int cpuid, struct task_struct idle)
-{
- unsigned timeout;
- cpumask_t cpu_mask;
-
- cpumask_clear(&cpu_mask);
- task_thread_info(idle)->cpu = cpuid;
-
- /* Information to the CPU that is about to boot */
- smp_init_current_idle_thread = task_thread_info(idle);
- cpu_now_booting = cpuid;
-
- /* Kick it */
- set_cpu_online(cpuid, true);
- cpumask_set_cpu(cpuid, &cpu_mask);
- send_ipi(IPI_BOOT, 0, cpu_mask);
- set_cpu_online(cpuid, false);
-
- /* Wait for CPU to come online */
- for (timeout = 0; timeout < 10000; timeout++) {
- if(cpu_online(cpuid)) {
- cpu_now_booting = 0;
- smp_init_current_idle_thread = NULL;
- return 0; /* CPU online */
- }
- udelay(100);
- barrier();
- }
-
- printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
- return -1;
-}
-
-/* Secondary CPUs starts using C here. Here we need to setup CPU
- * specific stuff such as the local timer and the MMU. */
-void __init smp_callin(void)
-{
- int cpu = cpu_now_booting;
- reg_intr_vect_rw_mask vect_mask = {0};
-
- /* Initialise the idle task for this CPU */
- atomic_inc(&init_mm.mm_count);
- current->active_mm = &init_mm;
-
- /* Set up MMU */
- cris_mmu_init();
- __flush_tlb_all();
-
- /* Setup local timer. */
- cris_timer_init();
-
- /* Enable IRQ and idle */
- REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
- crisv32_unmask_irq(IPI_INTR_VECT);
- crisv32_unmask_irq(TIMER0_INTR_VECT);
- preempt_disable();
- notify_cpu_starting(cpu);
- local_irq_enable();
-
- set_cpu_online(cpu, true);
- cpu_startup_entry(CPUHP_ONLINE);
-}
-
-/* Stop execution on this CPU.*/
-void stop_this_cpu(void* dummy)
-{
- local_irq_disable();
- asm volatile("halt");
-}
-
-/* Other calls */
-void smp_send_stop(void)
-{
- smp_call_function(stop_this_cpu, NULL, 0);
-}
-
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
-
-/* cache_decay_ticks is used by the scheduler to decide if a process
- * is "hot" on one CPU. A higher value means a higher penalty to move
- * a process to another CPU. Our cache is rather small so we report
- * 1 tick.
- */
-unsigned long cache_decay_ticks = 1;
-
-int __cpu_up(unsigned int cpu, struct task_struct *tidle)
-{
- smp_boot_one_cpu(cpu, tidle);
- return cpu_online(cpu) ? 0 : -ENOSYS;
-}
-
-void smp_send_reschedule(int cpu)
-{
- cpumask_t cpu_mask;
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(cpu, &cpu_mask);
- send_ipi(IPI_SCHEDULE, 0, cpu_mask);
-}
-
-/* TLB flushing
- *
- * Flush needs to be done on the local CPU and on any other CPU that
- * may have the same mapping. The mm->cpu_vm_mask is used to keep track
- * of which CPUs that a specific process has been executed on.
- */
-void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr)
-{
- unsigned long flags;
- cpumask_t cpu_mask;
-
- spin_lock_irqsave(&tlbstate_lock, flags);
- cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- flush_mm = mm;
- flush_vma = vma;
- flush_addr = addr;
- send_ipi(IPI_FLUSH_TLB, 1, cpu_mask);
- spin_unlock_irqrestore(&tlbstate_lock, flags);
-}
-
-void flush_tlb_all(void)
-{
- __flush_tlb_all();
- flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0);
-}
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
- __flush_tlb_mm(mm);
- flush_tlb_common(mm, FLUSH_ALL, 0);
- /* No more mappings in other CPUs */
- cpumask_clear(mm_cpumask(mm));
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-}
-
-void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- __flush_tlb_page(vma, addr);
- flush_tlb_common(vma->vm_mm, vma, addr);
-}
-
-/* Inter processor interrupts
- *
- * The IPIs are used for:
- * * Force a schedule on a CPU
- * * FLush TLB on other CPUs
- * * Call a function on other CPUs
- */
-
-int send_ipi(int vector, int wait, cpumask_t cpu_mask)
-{
- int i = 0;
- reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
- int ret = 0;
-
- /* Calculate CPUs to send to. */
- cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask);
-
- /* Send the IPI. */
- for_each_cpu(i, &cpu_mask)
- {
- ipi.vector |= vector;
- REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
- }
-
- /* Wait for IPI to finish on other CPUS */
- if (wait) {
- for_each_cpu(i, &cpu_mask) {
- int j;
- for (j = 0 ; j < 1000; j++) {
- ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
- if (!ipi.vector)
- break;
- udelay(100);
- }
-
- /* Timeout? */
- if (ipi.vector) {
- printk("SMP call timeout from %d to %d\n", smp_processor_id(), i);
- ret = -ETIMEDOUT;
- dump_stack();
- }
- }
- }
- return ret;
-}
-
-/*
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func)(void *info), void *info, int wait)
-{
- cpumask_t cpu_mask;
- struct call_data_struct data;
- int ret;
-
- cpumask_setall(&cpu_mask);
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
-
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- data.wait = wait;
-
- spin_lock(&call_lock);
- call_data = &data;
- ret = send_ipi(IPI_CALL, wait, cpu_mask);
- spin_unlock(&call_lock);
-
- return ret;
-}
-
-irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
-{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- reg_intr_vect_rw_ipi ipi;
-
- ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
-
- if (ipi.vector & IPI_SCHEDULE) {
- scheduler_ipi();
- }
- if (ipi.vector & IPI_CALL) {
- func(info);
- }
- if (ipi.vector & IPI_FLUSH_TLB) {
- if (flush_mm == FLUSH_ALL)
- __flush_tlb_all();
- else if (flush_vma == FLUSH_ALL)
- __flush_tlb_mm(flush_mm);
- else
- __flush_tlb_page(flush_vma, flush_addr);
- }
-
- ipi.vector = 0;
- REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi);
-
- return IRQ_HANDLED;
-}
-
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index c17b01abdc3b..4fce9f1f7cc0 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -8,12 +8,14 @@
#include <linux/timex.h>
#include <linux/time.h>
#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/swap.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/cpufreq.h>
+#include <linux/sched_clock.h>
#include <linux/mm.h>
#include <asm/types.h>
#include <asm/signal.h>
@@ -36,33 +38,11 @@
/* Number of 763 counts before watchdog bites */
#define ETRAX_WD_CNT ((2*ETRAX_WD_HZ)/HZ + 1)
-/* Register the continuos readonly timer available in FS and ARTPEC-3. */
-static cycle_t read_cont_rotime(struct clocksource *cs)
-{
- return (u32)REG_RD(timer, regi_timer0, r_time);
-}
-
-static struct clocksource cont_rotime = {
- .name = "crisv32_rotime",
- .rating = 300,
- .read = read_cont_rotime,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init etrax_init_cont_rotime(void)
-{
- clocksource_register_khz(&cont_rotime, 100000);
- return 0;
-}
-arch_initcall(etrax_init_cont_rotime);
+#define CRISV32_TIMER_FREQ (100000000lu)
unsigned long timer_regs[NR_CPUS] =
{
regi_timer0,
-#ifdef CONFIG_SMP
- regi_timer2
-#endif
};
extern int set_rtc_mmss(unsigned long nowtime);
@@ -189,81 +169,104 @@ void handle_watchdog_bite(struct pt_regs *regs)
#endif
}
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick.
- */
-extern void cris_do_profile(struct pt_regs *regs);
+extern void cris_profile_sample(struct pt_regs *regs);
+static void __iomem *timer_base;
-static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
+static void crisv32_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
{
- struct pt_regs *regs = get_irq_regs();
- int cpu = smp_processor_id();
- reg_timer_r_masked_intr masked_intr;
- reg_timer_rw_ack_intr ack_intr = { 0 };
-
- /* Check if the timer interrupt is for us (a tmr0 int) */
- masked_intr = REG_RD(timer, timer_regs[cpu], r_masked_intr);
- if (!masked_intr.tmr0)
- return IRQ_NONE;
+ reg_timer_rw_tmr0_ctrl ctrl = {
+ .op = regk_timer_hold,
+ .freq = regk_timer_f100,
+ };
- /* Acknowledge the timer irq. */
- ack_intr.tmr0 = 1;
- REG_WR(timer, timer_regs[cpu], rw_ack_intr, ack_intr);
+ REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
+}
- /* Reset watchdog otherwise it resets us! */
- reset_watchdog();
+static int crisv32_clkevt_next_event(unsigned long evt,
+ struct clock_event_device *dev)
+{
+ reg_timer_rw_tmr0_ctrl ctrl = {
+ .op = regk_timer_ld,
+ .freq = regk_timer_f100,
+ };
+
+ REG_WR(timer, timer_base, rw_tmr0_div, evt);
+ REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
+
+ ctrl.op = regk_timer_run;
+ REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
+
+ return 0;
+}
+
+static irqreturn_t crisv32_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ reg_timer_rw_tmr0_ctrl ctrl = {
+ .op = regk_timer_hold,
+ .freq = regk_timer_f100,
+ };
+ reg_timer_rw_ack_intr ack = { .tmr0 = 1 };
+ reg_timer_r_masked_intr intr;
+
+ intr = REG_RD(timer, timer_base, r_masked_intr);
+ if (!intr.tmr0)
+ return IRQ_NONE;
- /* Update statistics. */
- update_process_times(user_mode(regs));
+ REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
+ REG_WR(timer, timer_base, rw_ack_intr, ack);
- cris_do_profile(regs); /* Save profiling information */
+ reset_watchdog();
+#ifdef CONFIG_SYSTEM_PROFILER
+ cris_profile_sample(get_irq_regs());
+#endif
- /* The master CPU is responsible for the time keeping. */
- if (cpu != 0)
- return IRQ_HANDLED;
+ evt->event_handler(evt);
- /* Call the real timer interrupt handler */
- xtime_update(1);
return IRQ_HANDLED;
}
+static struct clock_event_device crisv32_clockevent = {
+ .name = "crisv32-timer",
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = crisv32_clkevt_mode,
+ .set_next_event = crisv32_clkevt_next_event,
+};
+
/* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */
static struct irqaction irq_timer = {
- .handler = timer_interrupt,
- .flags = IRQF_SHARED,
- .name = "timer"
+ .handler = crisv32_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_SHARED,
+ .name = "crisv32-timer",
+ .dev_id = &crisv32_clockevent,
};
-void __init cris_timer_init(void)
+static u64 notrace crisv32_timer_sched_clock(void)
{
- int cpu = smp_processor_id();
- reg_timer_rw_tmr0_ctrl tmr0_ctrl = { 0 };
- reg_timer_rw_tmr0_div tmr0_div = TIMER0_DIV;
- reg_timer_rw_intr_mask timer_intr_mask;
+ return REG_RD(timer, timer_base, r_time);
+}
- /* Setup the etrax timers.
- * Base frequency is 100MHz, divider 1000000 -> 100 HZ
- * We use timer0, so timer1 is free.
- * The trig timer is used by the fasttimer API if enabled.
- */
+static void __init crisv32_timer_init(void)
+{
+ reg_timer_rw_intr_mask timer_intr_mask;
+ reg_timer_rw_tmr0_ctrl ctrl = {
+ .op = regk_timer_hold,
+ .freq = regk_timer_f100,
+ };
- tmr0_ctrl.op = regk_timer_ld;
- tmr0_ctrl.freq = regk_timer_f100;
- REG_WR(timer, timer_regs[cpu], rw_tmr0_div, tmr0_div);
- REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Load */
- tmr0_ctrl.op = regk_timer_run;
- REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Start */
+ REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
- /* Enable the timer irq. */
- timer_intr_mask = REG_RD(timer, timer_regs[cpu], rw_intr_mask);
+ timer_intr_mask = REG_RD(timer, timer_base, rw_intr_mask);
timer_intr_mask.tmr0 = 1;
- REG_WR(timer, timer_regs[cpu], rw_intr_mask, timer_intr_mask);
+ REG_WR(timer, timer_base, rw_intr_mask, timer_intr_mask);
}
void __init time_init(void)
{
- reg_intr_vect_rw_mask intr_mask;
+ int irq;
+ int ret;
/* Probe for the RTC and read it if it exists.
* Before the RTC can be probed the loops_per_usec variable needs
@@ -273,17 +276,28 @@ void __init time_init(void)
*/
loops_per_usec = 50;
- /* Start CPU local timer. */
- cris_timer_init();
+ irq = TIMER0_INTR_VECT;
+ timer_base = (void __iomem *) regi_timer0;
+
+ crisv32_timer_init();
+
+ sched_clock_register(crisv32_timer_sched_clock, 32,
+ CRISV32_TIMER_FREQ);
+
+ clocksource_mmio_init(timer_base + REG_RD_ADDR_timer_r_time,
+ "crisv32-timer", CRISV32_TIMER_FREQ,
+ 300, 32, clocksource_mmio_readl_up);
+
+ crisv32_clockevent.cpumask = cpu_possible_mask;
+ crisv32_clockevent.irq = irq;
- /* Enable the timer irq in global config. */
- intr_mask = REG_RD_VECT(intr_vect, regi_irq, rw_mask, 1);
- intr_mask.timer0 = 1;
- REG_WR_VECT(intr_vect, regi_irq, rw_mask, 1, intr_mask);
+ ret = setup_irq(irq, &irq_timer);
+ if (ret)
+ pr_warn("failed to setup irq %d\n", irq);
- /* Now actually register the timer irq handler that calls
- * timer_interrupt(). */
- setup_irq(TIMER0_INTR_VECT, &irq_timer);
+ clockevents_config_and_register(&crisv32_clockevent,
+ CRISV32_TIMER_FREQ,
+ 2, 0xffffffff);
/* Enable watchdog if we should use one. */
diff --git a/arch/cris/arch-v32/lib/Makefile b/arch/cris/arch-v32/lib/Makefile
index dd296b9db034..e91cf02f625d 100644
--- a/arch/cris/arch-v32/lib/Makefile
+++ b/arch/cris/arch-v32/lib/Makefile
@@ -3,5 +3,5 @@
#
lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o \
- csumcpfruser.o spinlock.o delay.o strcmp.o
+ csumcpfruser.o delay.o strcmp.o
diff --git a/arch/cris/arch-v32/lib/spinlock.S b/arch/cris/arch-v32/lib/spinlock.S
deleted file mode 100644
index fe610b9d775f..000000000000
--- a/arch/cris/arch-v32/lib/spinlock.S
+++ /dev/null
@@ -1,40 +0,0 @@
-;; Core of the spinlock implementation
-;;
-;; Copyright (C) 2004 Axis Communications AB.
-;;
-;; Author: Mikael Starvik
-
-
- .global cris_spin_lock
- .type cris_spin_lock,@function
- .global cris_spin_trylock
- .type cris_spin_trylock,@function
-
- .text
-
-cris_spin_lock:
- clearf p
-1: test.b [$r10]
- beq 1b
- clearf p
- ax
- clear.b [$r10]
- bcs 1b
- clearf p
- ret
- nop
-
- .size cris_spin_lock, . - cris_spin_lock
-
-cris_spin_trylock:
- clearf p
-1: move.b [$r10], $r11
- ax
- clear.b [$r10]
- bcs 1b
- clearf p
- ret
- movu.b $r11,$r10
-
- .size cris_spin_trylock, . - cris_spin_trylock
-
diff --git a/arch/cris/arch-v32/mm/init.c b/arch/cris/arch-v32/mm/init.c
index 3deca5253d91..f5438ca8122d 100644
--- a/arch/cris/arch-v32/mm/init.c
+++ b/arch/cris/arch-v32/mm/init.c
@@ -40,17 +40,6 @@ void __init cris_mmu_init(void)
*/
per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
-#ifdef CONFIG_SMP
- {
- pgd_t **pgd;
- pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
- SUPP_BANK_SEL(1);
- SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
- SUPP_BANK_SEL(2);
- SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
- }
-#endif
-
/* Initialise the TLB. Function found in tlb.c. */
tlb_init();
diff --git a/arch/cris/arch-v32/mm/mmu.S b/arch/cris/arch-v32/mm/mmu.S
index 72727c1d8e60..c0981044eccb 100644
--- a/arch/cris/arch-v32/mm/mmu.S
+++ b/arch/cris/arch-v32/mm/mmu.S
@@ -115,11 +115,7 @@
move.d $r0, [$r1] ; last_refill_cause = rw_mm_cause
3: ; Probably not in a loop, continue normal processing
-#ifdef CONFIG_SMP
- move $s7, $acr ; PGD
-#else
move.d current_pgd, $acr ; PGD
-#endif
; Look up PMD in PGD
lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
move.d [$acr], $acr ; PGD for the current process
diff --git a/arch/cris/boot/dts/Makefile b/arch/cris/boot/dts/Makefile
new file mode 100644
index 000000000000..faf69fb9919f
--- /dev/null
+++ b/arch/cris/boot/dts/Makefile
@@ -0,0 +1,6 @@
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+endif
+
+clean-files := *.dtb.S
diff --git a/arch/cris/boot/dts/dev88.dts b/arch/cris/boot/dts/dev88.dts
new file mode 100644
index 000000000000..4fa5a3f9d0ec
--- /dev/null
+++ b/arch/cris/boot/dts/dev88.dts
@@ -0,0 +1,18 @@
+/dts-v1/;
+
+/include/ "etraxfs.dtsi"
+
+/ {
+ model = "Axis 88 Developer Board";
+ compatible = "axis,dev88";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ soc {
+ uart0: serial@b00260000 {
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/cris/boot/dts/etraxfs.dtsi b/arch/cris/boot/dts/etraxfs.dtsi
new file mode 100644
index 000000000000..909bcedc3565
--- /dev/null
+++ b/arch/cris/boot/dts/etraxfs.dtsi
@@ -0,0 +1,38 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "axis,crisv32";
+ reg = <0>;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ model = "etraxfs";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ intc: interrupt-controller {
+ compatible = "axis,crisv32-intc";
+ reg = <0xb001c000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ serial@b00260000 {
+ compatible = "axis,etraxfs-uart";
+ reg = <0xb0026000 0x1000>;
+ interrupts = <68>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/cris/include/arch-v10/arch/atomic.h b/arch/cris/include/arch-v10/arch/atomic.h
deleted file mode 100644
index 6ef5e7d09024..000000000000
--- a/arch/cris/include/arch-v10/arch/atomic.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_CRIS_ARCH_ATOMIC__
-#define __ASM_CRIS_ARCH_ATOMIC__
-
-#define cris_atomic_save(addr, flags) local_irq_save(flags);
-#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
-
-#endif
diff --git a/arch/cris/include/arch-v10/arch/system.h b/arch/cris/include/arch-v10/arch/system.h
index 935fde34aa15..9b5580f58b96 100644
--- a/arch/cris/include/arch-v10/arch/system.h
+++ b/arch/cris/include/arch-v10/arch/system.h
@@ -36,12 +36,4 @@ static inline unsigned long _get_base(char * addr)
return 0;
}
-#define nop() __asm__ __volatile__ ("nop");
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
#endif
diff --git a/arch/cris/include/arch-v32/arch/atomic.h b/arch/cris/include/arch-v32/arch/atomic.h
deleted file mode 100644
index 852ceff8013f..000000000000
--- a/arch/cris/include/arch-v32/arch/atomic.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __ASM_CRIS_ARCH_ATOMIC__
-#define __ASM_CRIS_ARCH_ATOMIC__
-
-#include <linux/spinlock_types.h>
-
-extern void cris_spin_unlock(void *l, int val);
-extern void cris_spin_lock(void *l);
-extern int cris_spin_trylock(void* l);
-
-#ifndef CONFIG_SMP
-#define cris_atomic_save(addr, flags) local_irq_save(flags);
-#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
-#else
-
-extern spinlock_t cris_atomic_locks[];
-#define LOCK_COUNT 128
-#define HASH_ADDR(a) (((int)a) & 127)
-
-#define cris_atomic_save(addr, flags) \
- local_irq_save(flags); \
- cris_spin_lock((void *)&cris_atomic_locks[HASH_ADDR(addr)].raw_lock.slock);
-
-#define cris_atomic_restore(addr, flags) \
- { \
- spinlock_t *lock = (void*)&cris_atomic_locks[HASH_ADDR(addr)]; \
- __asm__ volatile ("move.d %1,%0" \
- : "=m" (lock->raw_lock.slock) \
- : "r" (1) \
- : "memory"); \
- local_irq_restore(flags); \
- }
-
-#endif
-
-#endif
-
diff --git a/arch/cris/include/arch-v32/arch/processor.h b/arch/cris/include/arch-v32/arch/processor.h
index a024b7d32fed..568759271ab5 100644
--- a/arch/cris/include/arch-v32/arch/processor.h
+++ b/arch/cris/include/arch-v32/arch/processor.h
@@ -25,8 +25,7 @@ struct thread_struct {
*/
#define TASK_SIZE (0xB0000000UL)
-/* CCS I=1, enable interrupts. */
-#define INIT_THREAD { 0, 0, (1 << I_CCS_BITNR) }
+#define INIT_THREAD { }
#define KSTK_EIP(tsk) \
({ \
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
deleted file mode 100644
index f13275522f4d..000000000000
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ /dev/null
@@ -1,131 +0,0 @@
-#ifndef __ASM_ARCH_SPINLOCK_H
-#define __ASM_ARCH_SPINLOCK_H
-
-#include <linux/spinlock_types.h>
-
-#define RW_LOCK_BIAS 0x01000000
-
-extern void cris_spin_unlock(void *l, int val);
-extern void cris_spin_lock(void *l);
-extern int cris_spin_trylock(void *l);
-
-static inline int arch_spin_is_locked(arch_spinlock_t *x)
-{
- return *(volatile signed char *)(&(x)->slock) <= 0;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- __asm__ volatile ("move.d %1,%0" \
- : "=m" (lock->slock) \
- : "r" (1) \
- : "memory");
-}
-
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- while (arch_spin_is_locked(lock))
- cpu_relax();
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- return cris_spin_trylock((void *)&lock->slock);
-}
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- cris_spin_lock((void *)&lock->slock);
-}
-
-static inline void
-arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
- arch_spin_lock(lock);
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts
- * but no interrupt writers. For those circumstances we
- * can "mix" irq-safe locks - any writer needs to get a
- * irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- *
- */
-
-static inline int arch_read_can_lock(arch_rwlock_t *x)
-{
- return (int)(x)->lock > 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *x)
-{
- return (x)->lock == RW_LOCK_BIAS;
-}
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- arch_spin_lock(&rw->slock);
- while (rw->lock == 0);
- rw->lock--;
- arch_spin_unlock(&rw->slock);
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- arch_spin_lock(&rw->slock);
- while (rw->lock != RW_LOCK_BIAS);
- rw->lock = 0;
- arch_spin_unlock(&rw->slock);
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- arch_spin_lock(&rw->slock);
- rw->lock++;
- arch_spin_unlock(&rw->slock);
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- arch_spin_lock(&rw->slock);
- while (rw->lock != RW_LOCK_BIAS);
- rw->lock = RW_LOCK_BIAS;
- arch_spin_unlock(&rw->slock);
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- int ret = 0;
- arch_spin_lock(&rw->slock);
- if (rw->lock != 0) {
- rw->lock--;
- ret = 1;
- }
- arch_spin_unlock(&rw->slock);
- return ret;
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- int ret = 0;
- arch_spin_lock(&rw->slock);
- if (rw->lock == RW_LOCK_BIAS) {
- rw->lock = 0;
- ret = 1;
- }
- arch_spin_unlock(&rw->slock);
- return ret;
-}
-
-#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
-#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
-#endif /* __ASM_ARCH_SPINLOCK_H */
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 889f2de050a3..057e51859b0a 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -1,16 +1,29 @@
-
+generic-y += atomic.h
generic-y += barrier.h
generic-y += clkdev.h
+generic-y += cmpxchg.h
generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
generic-y += exec.h
+generic-y += emergency-restart.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += irq_regs.h
generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += linkage.h
+generic-y += local.h
+generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += module.h
+generic-y += percpu.h
generic-y += preempt.h
generic-y += scatterlist.h
generic-y += sections.h
+generic-y += topology.h
generic-y += trace_clock.h
generic-y += vga.h
generic-y += xor.h
diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h
deleted file mode 100644
index 279766a70664..000000000000
--- a/arch/cris/include/asm/atomic.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* $Id: atomic.h,v 1.3 2001/07/25 16:15:19 bjornw Exp $ */
-
-#ifndef __ASM_CRIS_ATOMIC__
-#define __ASM_CRIS_ATOMIC__
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/cmpxchg.h>
-#include <arch/atomic.h>
-#include <arch/system.h>
-#include <asm/barrier.h>
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
-
-/* These should be written in asm but we do it in C for now. */
-
-#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, volatile atomic_t *v) \
-{ \
- unsigned long flags; \
- cris_atomic_save(v, flags); \
- v->counter c_op i; \
- cris_atomic_restore(v, flags); \
-} \
-
-#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, volatile atomic_t *v) \
-{ \
- unsigned long flags; \
- int retval; \
- cris_atomic_save(v, flags); \
- retval = (v->counter c_op i); \
- cris_atomic_restore(v, flags); \
- return retval; \
-}
-
-#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
-
-ATOMIC_OPS(add, +=)
-ATOMIC_OPS(sub, -=)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-static inline int atomic_sub_and_test(int i, volatile atomic_t *v)
-{
- int retval;
- unsigned long flags;
- cris_atomic_save(v, flags);
- retval = (v->counter -= i) == 0;
- cris_atomic_restore(v, flags);
- return retval;
-}
-
-static inline void atomic_inc(volatile atomic_t *v)
-{
- unsigned long flags;
- cris_atomic_save(v, flags);
- (v->counter)++;
- cris_atomic_restore(v, flags);
-}
-
-static inline void atomic_dec(volatile atomic_t *v)
-{
- unsigned long flags;
- cris_atomic_save(v, flags);
- (v->counter)--;
- cris_atomic_restore(v, flags);
-}
-
-static inline int atomic_inc_return(volatile atomic_t *v)
-{
- unsigned long flags;
- int retval;
- cris_atomic_save(v, flags);
- retval = ++(v->counter);
- cris_atomic_restore(v, flags);
- return retval;
-}
-
-static inline int atomic_dec_return(volatile atomic_t *v)
-{
- unsigned long flags;
- int retval;
- cris_atomic_save(v, flags);
- retval = --(v->counter);
- cris_atomic_restore(v, flags);
- return retval;
-}
-static inline int atomic_dec_and_test(volatile atomic_t *v)
-{
- int retval;
- unsigned long flags;
- cris_atomic_save(v, flags);
- retval = --(v->counter) == 0;
- cris_atomic_restore(v, flags);
- return retval;
-}
-
-static inline int atomic_inc_and_test(volatile atomic_t *v)
-{
- int retval;
- unsigned long flags;
- cris_atomic_save(v, flags);
- retval = ++(v->counter) == 0;
- cris_atomic_restore(v, flags);
- return retval;
-}
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- unsigned long flags;
-
- cris_atomic_save(v, flags);
- ret = v->counter;
- if (likely(ret == old))
- v->counter = new;
- cris_atomic_restore(v, flags);
- return ret;
-}
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int ret;
- unsigned long flags;
-
- cris_atomic_save(v, flags);
- ret = v->counter;
- if (ret != u)
- v->counter += a;
- cris_atomic_restore(v, flags);
- return ret;
-}
-
-#endif
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h
index bd49a546f4f5..8062cb52d343 100644
--- a/arch/cris/include/asm/bitops.h
+++ b/arch/cris/include/asm/bitops.h
@@ -19,119 +19,10 @@
#endif
#include <arch/bitops.h>
-#include <linux/atomic.h>
#include <linux/compiler.h>
#include <asm/barrier.h>
-/*
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-
-#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)
-
-/*
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
- * in order to ensure changes are visible on other processors.
- */
-
-#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)
-
-/*
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-
-#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr)
-
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int mask, retval;
- unsigned long flags;
- unsigned int *adr = (unsigned int *)addr;
-
- adr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- cris_atomic_save(addr, flags);
- retval = (mask & *adr) != 0;
- *adr |= mask;
- cris_atomic_restore(addr, flags);
- return retval;
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int mask, retval;
- unsigned long flags;
- unsigned int *adr = (unsigned int *)addr;
-
- adr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- cris_atomic_save(addr, flags);
- retval = (mask & *adr) != 0;
- *adr &= ~mask;
- cris_atomic_restore(addr, flags);
- return retval;
-}
-
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int mask, retval;
- unsigned long flags;
- unsigned int *adr = (unsigned int *)addr;
- adr += nr >> 5;
- mask = 1 << (nr & 0x1f);
- cris_atomic_save(addr, flags);
- retval = (mask & *adr) != 0;
- *adr ^= mask;
- cris_atomic_restore(addr, flags);
- return retval;
-}
-
+#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
/*
diff --git a/arch/cris/include/asm/cmpxchg.h b/arch/cris/include/asm/cmpxchg.h
deleted file mode 100644
index b756dac8aa3f..000000000000
--- a/arch/cris/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __ASM_CRIS_CMPXCHG__
-#define __ASM_CRIS_CMPXCHG__
-
-#include <linux/irqflags.h>
-
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- /* since Etrax doesn't have any atomic xchg instructions, we need to disable
- irq's (if enabled) and do it with move.d's */
- unsigned long flags,temp;
- local_irq_save(flags); /* save flags, including irq enable bit and shut off irqs */
- switch (size) {
- case 1:
- *((unsigned char *)&temp) = x;
- x = *(unsigned char *)ptr;
- *(unsigned char *)ptr = *((unsigned char *)&temp);
- break;
- case 2:
- *((unsigned short *)&temp) = x;
- x = *(unsigned short *)ptr;
- *(unsigned short *)ptr = *((unsigned short *)&temp);
- break;
- case 4:
- temp = x;
- x = *(unsigned long *)ptr;
- *(unsigned long *)ptr = temp;
- break;
- }
- local_irq_restore(flags); /* restore irq enable bit */
- return x;
-}
-
-#define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-#define tas(ptr) (xchg((ptr),1))
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#ifndef CONFIG_SMP
-#include <asm-generic/cmpxchg.h>
-#endif
-
-#endif /* __ASM_CRIS_CMPXCHG__ */
diff --git a/arch/cris/include/asm/device.h b/arch/cris/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/cris/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/cris/include/asm/div64.h b/arch/cris/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/cris/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/asm/elf.h
index 30ded8fbf592..c2a394ff55ff 100644
--- a/arch/cris/include/asm/elf.h
+++ b/arch/cris/include/asm/elf.h
@@ -71,7 +71,7 @@ typedef unsigned long elf_fpregset_t;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
diff --git a/arch/cris/include/asm/emergency-restart.h b/arch/cris/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/cris/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/cris/include/asm/futex.h b/arch/cris/include/asm/futex.h
deleted file mode 100644
index 6a332a9f099c..000000000000
--- a/arch/cris/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif
diff --git a/arch/cris/include/asm/hardirq.h b/arch/cris/include/asm/hardirq.h
deleted file mode 100644
index 04126f7bfab2..000000000000
--- a/arch/cris/include/asm/hardirq.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_HARDIRQ_H
-#define __ASM_HARDIRQ_H
-
-#include <asm/irq.h>
-#include <asm-generic/hardirq.h>
-
-#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/cris/include/asm/irq_regs.h b/arch/cris/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/cris/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/cris/include/asm/kdebug.h b/arch/cris/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/cris/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/cris/include/asm/kmap_types.h b/arch/cris/include/asm/kmap_types.h
deleted file mode 100644
index d2d643c4ea59..000000000000
--- a/arch/cris/include/asm/kmap_types.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-/* Dummy header just to define km_type. None of this
- * is actually used on cris.
- */
-
-#include <asm-generic/kmap_types.h>
-
-#endif
diff --git a/arch/cris/include/asm/local.h b/arch/cris/include/asm/local.h
deleted file mode 100644
index c11c530f74d0..000000000000
--- a/arch/cris/include/asm/local.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local.h>
diff --git a/arch/cris/include/asm/local64.h b/arch/cris/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/cris/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/cris/include/asm/percpu.h b/arch/cris/include/asm/percpu.h
deleted file mode 100644
index 6db9b43cf80a..000000000000
--- a/arch/cris/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _CRIS_PERCPU_H
-#define _CRIS_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif /* _CRIS_PERCPU_H */
diff --git a/arch/cris/include/asm/smp.h b/arch/cris/include/asm/smp.h
deleted file mode 100644
index c615a06dd757..000000000000
--- a/arch/cris/include/asm/smp.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_SMP_H
-#define __ASM_SMP_H
-
-#include <linux/cpumask.h>
-
-extern cpumask_t phys_cpu_present_map;
-
-#define raw_smp_processor_id() (current_thread_info()->cpu)
-
-#endif
diff --git a/arch/cris/include/asm/spinlock.h b/arch/cris/include/asm/spinlock.h
deleted file mode 100644
index ed816b57face..000000000000
--- a/arch/cris/include/asm/spinlock.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <arch/spinlock.h>
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 7286db5ed90e..4ead1b40d2d7 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -28,7 +28,6 @@
#ifndef __ASSEMBLY__
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -50,7 +49,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/cris/include/asm/tlbflush.h b/arch/cris/include/asm/tlbflush.h
index 20697e7ef4f2..b424f43a9fd6 100644
--- a/arch/cris/include/asm/tlbflush.h
+++ b/arch/cris/include/asm/tlbflush.h
@@ -22,16 +22,9 @@ extern void __flush_tlb_mm(struct mm_struct *mm);
extern void __flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr);
-#ifdef CONFIG_SMP
-extern void flush_tlb_all(void);
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr);
-#else
#define flush_tlb_all __flush_tlb_all
#define flush_tlb_mm __flush_tlb_mm
#define flush_tlb_page __flush_tlb_page
-#endif
static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
{
diff --git a/arch/cris/include/asm/topology.h b/arch/cris/include/asm/topology.h
deleted file mode 100644
index 2ac613d32a89..000000000000
--- a/arch/cris/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_CRIS_TOPOLOGY_H
-#define _ASM_CRIS_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_CRIS_TOPOLOGY_H */
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile
index b45640b3e600..edef71f12bb8 100644
--- a/arch/cris/kernel/Makefile
+++ b/arch/cris/kernel/Makefile
@@ -7,6 +7,7 @@ CPPFLAGS_vmlinux.lds := -DDRAM_VIRTUAL_BASE=0x$(CONFIG_ETRAX_DRAM_VIRTUAL_BASE)
extra-y := vmlinux.lds
obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
+obj-y += devicetree.o
obj-$(CONFIG_MODULES) += crisksyms.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/cris/kernel/devicetree.c b/arch/cris/kernel/devicetree.c
new file mode 100644
index 000000000000..53ff8d73e7e1
--- /dev/null
+++ b/arch/cris/kernel/devicetree.c
@@ -0,0 +1,14 @@
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/printk.h>
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ pr_err("%s(%llx, %llx)\n",
+ __func__, base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return alloc_bootmem_align(size, align);
+}
diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c
index 58d44ee1a71f..fd3427e563c5 100644
--- a/arch/cris/kernel/ptrace.c
+++ b/arch/cris/kernel/ptrace.c
@@ -42,3 +42,26 @@ void do_notify_resume(int canrestart, struct pt_regs *regs,
tracehook_notify_resume(regs);
}
}
+
+void do_work_pending(int syscall, struct pt_regs *regs,
+ unsigned int thread_flags)
+{
+ do {
+ if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+ return;
+ local_irq_enable();
+ if (thread_flags & _TIF_SIGPENDING) {
+ do_signal(syscall, regs);
+ syscall = 0;
+ } else {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
+ }
+ local_irq_disable();
+ thread_flags = current_thread_info()->flags;
+ } while (thread_flags & _TIF_WORK_MASK);
+}
diff --git a/arch/cris/kernel/setup.c b/arch/cris/kernel/setup.c
index 905b70ea9939..bb12aa93201d 100644
--- a/arch/cris/kernel/setup.c
+++ b/arch/cris/kernel/setup.c
@@ -19,6 +19,9 @@
#include <linux/utsname.h>
#include <linux/pfn.h>
#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
#include <asm/setup.h>
#include <arch/system.h>
@@ -64,6 +67,10 @@ void __init setup_arch(char **cmdline_p)
unsigned long start_pfn, max_pfn;
unsigned long memory_start;
+#ifdef CONFIG_OF
+ early_init_dt_scan(__dtb_start);
+#endif
+
/* register an initial console printing routine for printk's */
init_etrax_debug();
@@ -141,6 +148,8 @@ void __init setup_arch(char **cmdline_p)
reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT);
+ unflatten_and_copy_device_tree();
+
/* paging_init() sets up the MMU and marks all pages as reserved */
paging_init();
@@ -204,3 +213,9 @@ static int __init topology_init(void)
subsys_initcall(topology_init);
+static int __init cris_of_init(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ return 0;
+}
+core_initcall(cris_of_init);
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index fe6acdabbc8d..7780d379522f 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -79,11 +79,13 @@ cris_do_profile(struct pt_regs* regs)
#endif
}
+#ifndef CONFIG_GENERIC_SCHED_CLOCK
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ) +
get_ns_in_jiffie();
}
+#endif
static int
__init init_udelay(void)
diff --git a/arch/frv/include/asm/io.h b/arch/frv/include/asm/io.h
index 99bb7efaf9b7..0b78bc89e840 100644
--- a/arch/frv/include/asm/io.h
+++ b/arch/frv/include/asm/io.h
@@ -342,6 +342,11 @@ static inline void iowrite32(u32 val, void __iomem *p)
__flush_PCI_writes();
}
+#define ioread16be(addr) be16_to_cpu(ioread16(addr))
+#define ioread32be(addr) be32_to_cpu(ioread32(addr))
+#define iowrite16be(v, addr) iowrite16(cpu_to_be16(v), (addr))
+#define iowrite32be(v, addr) iowrite32(cpu_to_be32(v), (addr))
+
static inline void ioread8_rep(void __iomem *p, void *dst, unsigned long count)
{
io_insb((unsigned long) p, dst, count);
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index 6b917f1c2955..ccba3b6ce918 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -31,7 +31,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
__u32 cpu; /* current CPU */
@@ -59,7 +58,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/frv/kernel/asm-offsets.c b/arch/frv/kernel/asm-offsets.c
index 446e89d500cc..8414293f213a 100644
--- a/arch/frv/kernel/asm-offsets.c
+++ b/arch/frv/kernel/asm-offsets.c
@@ -34,7 +34,6 @@ void foo(void)
{
/* offsets into the thread_info structure */
OFFSET(TI_TASK, thread_info, task);
- OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_STATUS, thread_info, status);
OFFSET(TI_CPU, thread_info, cpu);
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 336713ab4745..82d5e914dc15 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -174,22 +174,14 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
static int setup_frame(struct ksignal *ksig, sigset_t *set)
{
struct sigframe __user *frame;
- int rsig, sig = ksig->sig;
-
- set_fs(USER_DS);
+ int sig = ksig->sig;
frame = get_sigframe(ksig, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- rsig = sig;
- if (sig < 32 &&
- __current_thread_info->exec_domain &&
- __current_thread_info->exec_domain->signal_invmap)
- rsig = __current_thread_info->exec_domain->signal_invmap[sig];
-
- if (__put_user(rsig, &frame->sig) < 0)
+ if (__put_user(sig, &frame->sig) < 0)
return -EFAULT;
if (setup_sigcontext(&frame->sc, set->sig[0]))
@@ -255,22 +247,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set)
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set)
{
struct rt_sigframe __user *frame;
- int rsig, sig = ksig->sig;
-
- set_fs(USER_DS);
+ int sig = ksig->sig;
frame = get_sigframe(ksig, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- rsig = sig;
- if (sig < 32 &&
- __current_thread_info->exec_domain &&
- __current_thread_info->exec_domain->signal_invmap)
- rsig = __current_thread_info->exec_domain->signal_invmap[sig];
-
- if (__put_user(rsig, &frame->sig) ||
+ if (__put_user(sig, &frame->sig) ||
__put_user(&frame->info, &frame->pinfo) ||
__put_user(&frame->uc, &frame->puc))
return -EFAULT;
diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h
index bacd3d6030c5..b80fe1db7b64 100644
--- a/arch/hexagon/include/asm/thread_info.h
+++ b/arch/hexagon/include/asm/thread_info.h
@@ -47,7 +47,6 @@ typedef struct {
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
__u32 cpu; /* current cpu */
int preempt_count; /* 0=>preemptible,<0=>BUG */
@@ -77,7 +76,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 0a0dd5c05b46..a9ebd471823a 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -37,8 +37,6 @@
*/
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
- /* Set to run with user-mode data segmentation */
- set_fs(USER_DS);
/* We want to zero all data-containing registers. Is this overkill? */
memset(regs, 0, sizeof(*regs));
/* We might want to also zero all Processor registers here */
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 074e52bf815c..76d25b2cfbbe 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -1,3 +1,8 @@
+config PGTABLE_LEVELS
+ int "Page Table Levels" if !IA64_PAGE_SIZE_64KB
+ range 3 4 if !IA64_PAGE_SIZE_64KB
+ default 3
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -10,6 +15,7 @@ config IA64
select ARCH_MIGHT_HAVE_PC_SERIO
select PCI if (!IA64_HP_SIM)
select ACPI if (!IA64_HP_SIM)
+ select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
@@ -286,19 +292,6 @@ config IA64_PAGE_SIZE_64KB
endchoice
-choice
- prompt "Page Table Levels"
- default PGTABLE_3
-
-config PGTABLE_3
- bool "3 Levels"
-
-config PGTABLE_4
- depends on !IA64_PAGE_SIZE_64KB
- bool "4 Levels"
-
-endchoice
-
if IA64_HP_SIM
config HZ
default 32
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index a1d91ab4c5ef..aa0fdf125aba 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -117,7 +117,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \
- for_each_cpu_mask((cpu), early_cpu_possible_map)
+ for_each_cpu((cpu), &early_cpu_possible_map)
static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
{
@@ -125,13 +125,13 @@ static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
int cpu;
int next_nid = 0;
- low_cpu = cpus_weight(early_cpu_possible_map);
+ low_cpu = cpumask_weight(&early_cpu_possible_map);
high_cpu = max(low_cpu, min_cpus);
high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
for (cpu = low_cpu; cpu < high_cpu; cpu++) {
- cpu_set(cpu, early_cpu_possible_map);
+ cpumask_set_cpu(cpu, &early_cpu_possible_map);
if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
node_cpuid[cpu].nid = next_nid;
next_nid++;
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 1f1bf144fe62..ec48bb9f95e1 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -173,7 +173,7 @@ get_order (unsigned long size)
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
typedef struct { unsigned long pud; } pud_t;
#endif
typedef struct { unsigned long pgd; } pgd_t;
@@ -182,7 +182,7 @@ get_order (unsigned long size)
# define pte_val(x) ((x).pte)
# define pmd_val(x) ((x).pmd)
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
# define pud_val(x) ((x).pud)
#endif
# define pgd_val(x) ((x).pgd)
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index 5767cdfc08db..f5e70e961948 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -32,7 +32,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
quicklist_free(0, NULL, pgd);
}
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
static inline void
pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
{
@@ -49,7 +49,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
quicklist_free(0, NULL, pud);
}
#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
-#endif /* CONFIG_PGTABLE_4 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
static inline void
pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 7b6f8801df57..9f3ed9ee8f13 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -99,7 +99,7 @@
#define PMD_MASK (~(PMD_SIZE-1))
#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
/*
* Definitions for second level:
*
@@ -117,7 +117,7 @@
*
* PGDIR_SHIFT determines what a first-level page table entry can map.
*/
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
#else
#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
@@ -180,7 +180,7 @@
#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
#endif
#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
@@ -281,7 +281,7 @@ extern unsigned long VMALLOC_END;
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
#define pgd_none(pgd) (!pgd_val(pgd))
#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
@@ -384,7 +384,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
here. */
#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
/* Find an entry in the second-level page table.. */
#define pud_offset(dir,addr) \
((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
@@ -586,7 +586,7 @@ extern struct page *zero_page_memmap_ptr;
#define __HAVE_ARCH_PGD_OFFSET_GATE
-#ifndef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 3
#include <asm-generic/pgtable-nopud.h>
#endif
#include <asm-generic/pgtable.h>
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index c16f21a068ff..aa995b67c3f5 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -20,7 +20,6 @@
*/
struct thread_info {
struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */
- struct exec_domain *exec_domain;/* execution domain */
__u32 flags; /* thread_info flags (see TIF_*) */
__u32 cpu; /* current CPU */
__u32 last_cpu; /* Last CPU thread ran on */
@@ -40,7 +39,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.addr_limit = KERNEL_DS, \
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 20678a9ed11a..d68b5cf81e31 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -51,7 +51,7 @@ obj-$(CONFIG_BINFMT_ELF) += elfcore.o
CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
# The gate DSO image is built using a special linker script.
-include $(srctree)/arch/ia64/kernel/Makefile.gate
+include $(src)/Makefile.gate
# tell compiled for native
CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 2c4498919d3c..b1698bc042c8 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -483,7 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
(pa->apic_id << 8) | (pa->local_sapic_eid);
/* nid should be overridden as logical node id later */
node_cpuid[srat_num_cpus].nid = pxm;
- cpu_set(srat_num_cpus, early_cpu_possible_map);
+ cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
srat_num_cpus++;
}
@@ -887,7 +887,7 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index cd44a57c73be..bc9501e36e77 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -690,7 +690,7 @@ skip_numa_setup:
do {
if (++cpu >= nr_cpu_ids)
cpu = 0;
- } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
+ } while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain));
return cpu_physical_id(cpu);
#else /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 698d8fefde6c..eaa3199f98c8 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -109,13 +109,13 @@ static inline int find_unassigned_vector(cpumask_t domain)
int pos, vector;
cpumask_and(&mask, &domain, cpu_online_mask);
- if (cpus_empty(mask))
+ if (cpumask_empty(&mask))
return -EINVAL;
for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
vector = IA64_FIRST_DEVICE_VECTOR + pos;
- cpus_and(mask, domain, vector_table[vector]);
- if (!cpus_empty(mask))
+ cpumask_and(&mask, &domain, &vector_table[vector]);
+ if (!cpumask_empty(&mask))
continue;
return vector;
}
@@ -132,18 +132,18 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
cpumask_and(&mask, &domain, cpu_online_mask);
- if (cpus_empty(mask))
+ if (cpumask_empty(&mask))
return -EINVAL;
- if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
+ if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
return 0;
if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
return -EBUSY;
- for_each_cpu_mask(cpu, mask)
+ for_each_cpu(cpu, &mask)
per_cpu(vector_irq, cpu)[vector] = irq;
cfg->vector = vector;
cfg->domain = domain;
irq_status[irq] = IRQ_USED;
- cpus_or(vector_table[vector], vector_table[vector], domain);
+ cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
return 0;
}
@@ -161,7 +161,6 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain)
static void __clear_irq_vector(int irq)
{
int vector, cpu;
- cpumask_t mask;
cpumask_t domain;
struct irq_cfg *cfg = &irq_cfg[irq];
@@ -169,13 +168,12 @@ static void __clear_irq_vector(int irq)
BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
vector = cfg->vector;
domain = cfg->domain;
- cpumask_and(&mask, &cfg->domain, cpu_online_mask);
- for_each_cpu_mask(cpu, mask)
+ for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
per_cpu(vector_irq, cpu)[vector] = -1;
cfg->vector = IRQ_VECTOR_UNASSIGNED;
cfg->domain = CPU_MASK_NONE;
irq_status[irq] = IRQ_UNUSED;
- cpus_andnot(vector_table[vector], vector_table[vector], domain);
+ cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
}
static void clear_irq_vector(int irq)
@@ -244,7 +242,7 @@ void __setup_vector_irq(int cpu)
per_cpu(vector_irq, cpu)[vector] = -1;
/* Mark the inuse vectors */
for (irq = 0; irq < NR_IRQS; ++irq) {
- if (!cpu_isset(cpu, irq_cfg[irq].domain))
+ if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
continue;
vector = irq_to_vector(irq);
per_cpu(vector_irq, cpu)[vector] = irq;
@@ -261,7 +259,7 @@ static enum vector_domain_type {
static cpumask_t vector_allocation_domain(int cpu)
{
if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
- return cpumask_of_cpu(cpu);
+ return *cpumask_of(cpu);
return CPU_MASK_ALL;
}
@@ -275,7 +273,7 @@ static int __irq_prepare_move(int irq, int cpu)
return -EBUSY;
if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
return -EINVAL;
- if (cpu_isset(cpu, cfg->domain))
+ if (cpumask_test_cpu(cpu, &cfg->domain))
return 0;
domain = vector_allocation_domain(cpu);
vector = find_unassigned_vector(domain);
@@ -309,12 +307,12 @@ void irq_complete_move(unsigned irq)
if (likely(!cfg->move_in_progress))
return;
- if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
+ if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
return;
cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
- cfg->move_cleanup_count = cpus_weight(cleanup_mask);
- for_each_cpu_mask(i, cleanup_mask)
+ cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
+ for_each_cpu(i, &cleanup_mask)
platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
cfg->move_in_progress = 0;
}
@@ -340,12 +338,12 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
if (!cfg->move_cleanup_count)
goto unlock;
- if (!cpu_isset(me, cfg->old_domain))
+ if (!cpumask_test_cpu(me, &cfg->old_domain))
goto unlock;
spin_lock_irqsave(&vector_lock, flags);
__this_cpu_write(vector_irq[vector], -1);
- cpu_clear(me, vector_table[vector]);
+ cpumask_clear_cpu(me, &vector_table[vector]);
spin_unlock_irqrestore(&vector_lock, flags);
cfg->move_cleanup_count--;
unlock:
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 18e794a57248..e42bf7a913f3 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -146,7 +146,7 @@ ENTRY(vhpt_miss)
(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
shr.u r28=r22,PUD_SHIFT // shift pud index into position
#else
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
@@ -155,7 +155,7 @@ ENTRY(vhpt_miss)
ld8 r17=[r17] // get *pgd (may be 0)
;;
(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
;;
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
@@ -222,13 +222,13 @@ ENTRY(vhpt_miss)
*/
ld8 r25=[r21] // read *pte again
ld8 r26=[r17] // read *pmd again
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
ld8 r19=[r28] // read *pud again
#endif
cmp.ne p6,p7=r0,r0
;;
cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
#endif
mov r27=PAGE_SHIFT<<2
@@ -476,7 +476,7 @@ ENTRY(nested_dtlb_miss)
(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
shr.u r18=r22,PUD_SHIFT // shift pud index into position
#else
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
@@ -487,7 +487,7 @@ ENTRY(nested_dtlb_miss)
(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
;;
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
(p7) ld8 r17=[r17] // get *pud (may be 0)
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
;;
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 5151a649c96b..b72cd7a07222 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -156,9 +156,9 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_OFFSET(node_memblk_s, start_paddr);
VMCOREINFO_OFFSET(node_memblk_s, size);
#endif
-#ifdef CONFIG_PGTABLE_3
+#if CONFIG_PGTABLE_LEVELS == 3
VMCOREINFO_CONFIG(PGTABLE_3);
-#elif defined(CONFIG_PGTABLE_4)
+#elif CONFIG_PGTABLE_LEVELS == 4
VMCOREINFO_CONFIG(PGTABLE_4);
#endif
}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 8bfd36af46f8..dd5801eb4c69 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1293,7 +1293,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
monarch_cpu = cpu;
sos->monarch = 1;
} else {
- cpu_set(cpu, mca_cpu);
+ cpumask_set_cpu(cpu, &mca_cpu);
sos->monarch = 0;
}
mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
@@ -1316,7 +1316,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
*/
ia64_mca_wakeup_all();
} else {
- while (cpu_isset(cpu, mca_cpu))
+ while (cpumask_test_cpu(cpu, &mca_cpu))
cpu_relax(); /* spin until monarch wakes us */
}
@@ -1355,9 +1355,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
* and put this cpu in the rendez loop.
*/
for_each_online_cpu(i) {
- if (cpu_isset(i, mca_cpu)) {
+ if (cpumask_test_cpu(i, &mca_cpu)) {
monarch_cpu = i;
- cpu_clear(i, mca_cpu); /* wake next cpu */
+ cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */
while (monarch_cpu != -1)
cpu_relax(); /* spin until last cpu leaves */
set_curr_task(cpu, previous_current);
@@ -1822,7 +1822,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
ti->cpu = cpu;
p->stack = ti;
p->state = TASK_UNINTERRUPTIBLE;
- cpu_set(cpu, p->cpus_allowed);
+ cpumask_set_cpu(cpu, &p->cpus_allowed);
INIT_LIST_HEAD(&p->tasks);
p->parent = p->real_parent = p->group_leader = p;
INIT_LIST_HEAD(&p->children);
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 8ae36ea177d3..9dd7464f8c17 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -47,15 +47,14 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
struct msi_msg msg;
unsigned long dest_phys_id;
int irq, vector;
- cpumask_t mask;
irq = create_irq();
if (irq < 0)
return irq;
irq_set_msi_desc(irq, desc);
- cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
- dest_phys_id = cpu_physical_id(first_cpu(mask));
+ dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)),
+ cpu_online_mask));
vector = irq_to_vector(irq);
msg.address_hi = 0;
@@ -171,10 +170,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
{
struct irq_cfg *cfg = irq_cfg + irq;
unsigned dest;
- cpumask_t mask;
- cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
- dest = cpu_physical_id(first_cpu(mask));
+ dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)),
+ cpu_online_mask));
msg->address_hi = 0;
msg->address_lo =
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index d288cde93606..92c376279c6d 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -39,7 +39,7 @@ void map_cpu_to_node(int cpu, int nid)
}
/* sanity check first */
oldnid = cpu_to_node_map[cpu];
- if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) {
+ if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) {
return; /* nothing to do */
}
/* we don't have cpu-driven node hot add yet...
@@ -47,16 +47,16 @@ void map_cpu_to_node(int cpu, int nid)
if (!node_online(nid))
nid = first_online_node;
cpu_to_node_map[cpu] = nid;
- cpu_set(cpu, node_to_cpu_mask[nid]);
+ cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]);
return;
}
void unmap_cpu_from_node(int cpu, int nid)
{
- WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid]));
+ WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid]));
WARN_ON(cpu_to_node_map[cpu] != nid);
cpu_to_node_map[cpu] = 0;
- cpu_clear(cpu, node_to_cpu_mask[nid]);
+ cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]);
}
@@ -71,7 +71,7 @@ void __init build_cpu_to_node_map(void)
int cpu, i, node;
for(node=0; node < MAX_NUMNODES; node++)
- cpus_clear(node_to_cpu_mask[node]);
+ cpumask_clear(&node_to_cpu_mask[node]);
for_each_possible_early_cpu(cpu) {
node = -1;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 5f4243f0acfa..60e02f7747ff 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2159,7 +2159,7 @@ static const struct file_operations pfm_file_ops = {
static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
- dentry->d_inode->i_ino);
+ d_inode(dentry)->i_ino);
}
static const struct dentry_operations pfmfs_dentry_operations = {
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index ee9719eebb1e..1eeffb7fbb16 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -256,7 +256,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
data_saved->buffer = buffer;
}
}
- cpu_set(smp_processor_id(), data->cpu_event);
+ cpumask_set_cpu(smp_processor_id(), &data->cpu_event);
if (irqsafe) {
salinfo_work_to_do(data);
spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -274,7 +274,7 @@ salinfo_timeout_check(struct salinfo_data *data)
unsigned long flags;
if (!data->open)
return;
- if (!cpus_empty(data->cpu_event)) {
+ if (!cpumask_empty(&data->cpu_event)) {
spin_lock_irqsave(&data_saved_lock, flags);
salinfo_work_to_do(data);
spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -308,7 +308,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
int i, n, cpu = -1;
retry:
- if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) {
+ if (cpumask_empty(&data->cpu_event) && down_trylock(&data->mutex)) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (down_interruptible(&data->mutex))
@@ -317,9 +317,9 @@ retry:
n = data->cpu_check;
for (i = 0; i < nr_cpu_ids; i++) {
- if (cpu_isset(n, data->cpu_event)) {
+ if (cpumask_test_cpu(n, &data->cpu_event)) {
if (!cpu_online(n)) {
- cpu_clear(n, data->cpu_event);
+ cpumask_clear_cpu(n, &data->cpu_event);
continue;
}
cpu = n;
@@ -451,7 +451,7 @@ retry:
call_on_cpu(cpu, salinfo_log_read_cpu, data);
if (!data->log_size) {
data->state = STATE_NO_DATA;
- cpu_clear(cpu, data->cpu_event);
+ cpumask_clear_cpu(cpu, &data->cpu_event);
} else {
data->state = STATE_LOG_RECORD;
}
@@ -491,11 +491,11 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
unsigned long flags;
spin_lock_irqsave(&data_saved_lock, flags);
data->state = STATE_NO_DATA;
- if (!cpu_isset(cpu, data->cpu_event)) {
+ if (!cpumask_test_cpu(cpu, &data->cpu_event)) {
spin_unlock_irqrestore(&data_saved_lock, flags);
return 0;
}
- cpu_clear(cpu, data->cpu_event);
+ cpumask_clear_cpu(cpu, &data->cpu_event);
if (data->saved_num) {
shift1_data_saved(data, data->saved_num - 1);
data->saved_num = 0;
@@ -509,7 +509,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
salinfo_log_new_read(cpu, data);
if (data->state == STATE_LOG_RECORD) {
spin_lock_irqsave(&data_saved_lock, flags);
- cpu_set(cpu, data->cpu_event);
+ cpumask_set_cpu(cpu, &data->cpu_event);
salinfo_work_to_do(data);
spin_unlock_irqrestore(&data_saved_lock, flags);
}
@@ -581,7 +581,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
for (i = 0, data = salinfo_data;
i < ARRAY_SIZE(salinfo_data);
++i, ++data) {
- cpu_set(cpu, data->cpu_event);
+ cpumask_set_cpu(cpu, &data->cpu_event);
salinfo_work_to_do(data);
}
spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -601,7 +601,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
shift1_data_saved(data, j);
}
}
- cpu_clear(cpu, data->cpu_event);
+ cpumask_clear_cpu(cpu, &data->cpu_event);
}
spin_unlock_irqrestore(&data_saved_lock, flags);
break;
@@ -659,7 +659,7 @@ salinfo_init(void)
/* we missed any events before now */
for_each_online_cpu(j)
- cpu_set(j, data->cpu_event);
+ cpumask_set_cpu(j, &data->cpu_event);
*sdir++ = dir;
}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index d86669bcdfb2..b9761389cb8d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -562,8 +562,8 @@ setup_arch (char **cmdline_p)
# ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
# endif
- per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
- 32 : cpus_weight(early_cpu_possible_map)),
+ per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
+ 32 : cpumask_weight(&early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0);
# endif
#endif /* CONFIG_APCI_BOOT */
@@ -702,7 +702,8 @@ show_cpuinfo (struct seq_file *m, void *v)
c->itc_freq / 1000000, c->itc_freq % 1000000,
lpj*HZ/500000, (lpj*HZ/5000) % 100);
#ifdef CONFIG_SMP
- seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum]));
+ seq_printf(m, "siblings : %u\n",
+ cpumask_weight(&cpu_core_map[cpunum]));
if (c->socket_id != -1)
seq_printf(m, "physical id: %u\n", c->socket_id);
if (c->threads_per_core > 1 || c->cores_per_socket > 1)
@@ -933,8 +934,8 @@ cpu_init (void)
* (must be done after per_cpu area is setup)
*/
if (smp_processor_id() == 0) {
- cpu_set(0, per_cpu(cpu_sibling_map, 0));
- cpu_set(0, cpu_core_map[0]);
+ cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
+ cpumask_set_cpu(0, &cpu_core_map[0]);
} else {
/*
* Set ar.k3 so that assembly code in MCA handler can compute
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 9fcd4e63048f..7f706d4f84f7 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -262,11 +262,11 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
preempt_disable();
mycpu = smp_processor_id();
- for_each_cpu_mask(cpu, cpumask)
+ for_each_cpu(cpu, &cpumask)
counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
mb();
- for_each_cpu_mask(cpu, cpumask) {
+ for_each_cpu(cpu, &cpumask) {
if (cpu == mycpu)
flush_mycpu = 1;
else
@@ -276,7 +276,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
if (flush_mycpu)
smp_local_flush_tlb();
- for_each_cpu_mask(cpu, cpumask)
+ for_each_cpu(cpu, &cpumask)
while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
udelay(FLUSH_DELAY);
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 547a48d78bd7..15051e9c2c6f 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -434,7 +434,7 @@ smp_callin (void)
/*
* Allow the master to continue.
*/
- cpu_set(cpuid, cpu_callin_map);
+ cpumask_set_cpu(cpuid, &cpu_callin_map);
Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}
@@ -475,13 +475,13 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
*/
Dprintk("Waiting on callin_map ...");
for (timeout = 0; timeout < 100000; timeout++) {
- if (cpu_isset(cpu, cpu_callin_map))
+ if (cpumask_test_cpu(cpu, &cpu_callin_map))
break; /* It has booted */
udelay(100);
}
Dprintk("\n");
- if (!cpu_isset(cpu, cpu_callin_map)) {
+ if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
ia64_cpu_to_sapicid[cpu] = -1;
set_cpu_online(cpu, false); /* was set in smp_callin() */
@@ -541,7 +541,7 @@ smp_prepare_cpus (unsigned int max_cpus)
smp_setup_percpu_timer();
- cpu_set(0, cpu_callin_map);
+ cpumask_set_cpu(0, &cpu_callin_map);
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
ia64_cpu_to_sapicid[0] = boot_cpu_id;
@@ -565,7 +565,7 @@ smp_prepare_cpus (unsigned int max_cpus)
void smp_prepare_boot_cpu(void)
{
set_cpu_online(smp_processor_id(), true);
- cpu_set(smp_processor_id(), cpu_callin_map);
+ cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
set_numa_node(cpu_to_node_map[smp_processor_id()]);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
paravirt_post_smp_prepare_boot_cpu();
@@ -577,10 +577,10 @@ clear_cpu_sibling_map(int cpu)
{
int i;
- for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
- cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
- for_each_cpu_mask(i, cpu_core_map[cpu])
- cpu_clear(cpu, cpu_core_map[i]);
+ for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
+ cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
+ for_each_cpu(i, &cpu_core_map[cpu])
+ cpumask_clear_cpu(cpu, &cpu_core_map[i]);
per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
}
@@ -592,12 +592,12 @@ remove_siblinginfo(int cpu)
if (cpu_data(cpu)->threads_per_core == 1 &&
cpu_data(cpu)->cores_per_socket == 1) {
- cpu_clear(cpu, cpu_core_map[cpu]);
- cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
+ cpumask_clear_cpu(cpu, &cpu_core_map[cpu]);
+ cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
return;
}
- last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
+ last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0);
/* remove it from all sibling map's */
clear_cpu_sibling_map(cpu);
@@ -673,7 +673,7 @@ int __cpu_disable(void)
remove_siblinginfo(cpu);
fixup_irqs();
local_flush_tlb_all();
- cpu_clear(cpu, cpu_callin_map);
+ cpumask_clear_cpu(cpu, &cpu_callin_map);
return 0;
}
@@ -718,11 +718,13 @@ static inline void set_cpu_sibling_map(int cpu)
for_each_online_cpu(i) {
if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
- cpu_set(i, cpu_core_map[cpu]);
- cpu_set(cpu, cpu_core_map[i]);
+ cpumask_set_cpu(i, &cpu_core_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_core_map[i]);
if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
- cpu_set(i, per_cpu(cpu_sibling_map, cpu));
- cpu_set(cpu, per_cpu(cpu_sibling_map, i));
+ cpumask_set_cpu(i,
+ &per_cpu(cpu_sibling_map, cpu));
+ cpumask_set_cpu(cpu,
+ &per_cpu(cpu_sibling_map, i));
}
}
}
@@ -742,7 +744,7 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle)
* Already booted cpu? not valid anymore since we dont
* do idle loop tightspin anymore.
*/
- if (cpu_isset(cpu, cpu_callin_map))
+ if (cpumask_test_cpu(cpu, &cpu_callin_map))
return -EINVAL;
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
@@ -753,8 +755,8 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle)
if (cpu_data(cpu)->threads_per_core == 1 &&
cpu_data(cpu)->cores_per_socket == 1) {
- cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
- cpu_set(cpu, cpu_core_map[cpu]);
+ cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
+ cpumask_set_cpu(cpu, &cpu_core_map[cpu]);
return 0;
}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 965ab42fabb0..c01fe8991244 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -148,7 +148,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu,
if (cpu_data(cpu)->threads_per_core <= 1 &&
cpu_data(cpu)->cores_per_socket <= 1) {
- cpu_set(cpu, this_leaf->shared_cpu_map);
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
return;
}
@@ -164,7 +164,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu,
if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
&& cpu_data(j)->core_id == csi.log1_cid
&& cpu_data(j)->thread_id == csi.log1_tid)
- cpu_set(j, this_leaf->shared_cpu_map);
+ cpumask_set_cpu(j, &this_leaf->shared_cpu_map);
i++;
} while (i < num_shared &&
@@ -177,7 +177,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu,
static void cache_shared_cpu_map_setup(unsigned int cpu,
struct cache_info * this_leaf)
{
- cpu_set(cpu, this_leaf->shared_cpu_map);
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
return;
}
#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 6b3345758d3e..a9b65cf7b34a 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -692,31 +692,6 @@ int arch_remove_memory(u64 start, u64 size)
#endif
#endif
-/*
- * Even when CONFIG_IA32_SUPPORT is not enabled it is
- * useful to have the Linux/x86 domain registered to
- * avoid an attempted module load when emulators call
- * personality(PER_LINUX32). This saves several milliseconds
- * on each such call.
- */
-static struct exec_domain ia32_exec_domain;
-
-static int __init
-per_linux32_init(void)
-{
- ia32_exec_domain.name = "Linux/x86";
- ia32_exec_domain.handler = NULL;
- ia32_exec_domain.pers_low = PER_LINUX32;
- ia32_exec_domain.pers_high = PER_LINUX32;
- ia32_exec_domain.signal_map = default_exec_domain.signal_map;
- ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
- register_exec_domain(&ia32_exec_domain);
-
- return 0;
-}
-
-__initcall(per_linux32_init);
-
/**
* show_mem - give short summary of memory stats
*
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 48cc65705db4..d4e162d35b34 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -240,15 +240,12 @@ static acpi_status resource_to_window(struct acpi_resource *resource,
* We're only interested in _CRS descriptors that are
* - address space descriptors for memory or I/O space
* - non-zero size
- * - producers, i.e., the address space is routed downstream,
- * not consumed by the bridge itself
*/
status = acpi_resource_to_address64(resource, addr);
if (ACPI_SUCCESS(status) &&
(addr->resource_type == ACPI_MEMORY_RANGE ||
addr->resource_type == ACPI_IO_RANGE) &&
- addr->address.address_length &&
- addr->producer_consumer == ACPI_PRODUCER)
+ addr->address.address_length)
return AE_OK;
return AE_ERROR;
diff --git a/arch/m32r/include/asm/asm-offsets.h b/arch/m32r/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/m32r/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index 6e7787f3dac7..9cc00dbd59ce 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -67,6 +67,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
extern void iounmap(volatile void __iomem *addr);
#define ioremap_nocache(off,size) ioremap(off,size)
+#define ioremap_wc ioremap_nocache
/*
* IO bus memory addresses are also 1:1 with the physical address
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 32422d0211c3..f630d9c30b28 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -24,7 +24,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
__u32 cpu; /* current CPU */
@@ -38,18 +37,7 @@ struct thread_info {
__u8 supervisor_stack[0];
};
-#else /* !__ASSEMBLY__ */
-
-/* offsets into the thread_info struct for assembly code access */
-#define TI_TASK 0x00000000
-#define TI_EXEC_DOMAIN 0x00000004
-#define TI_FLAGS 0x00000008
-#define TI_STATUS 0x0000000C
-#define TI_CPU 0x00000010
-#define TI_PRE_COUNT 0x00000014
-#define TI_ADDR_LIMIT 0x00000018
-
-#endif
+#endif /* !__ASSEMBLY__ */
#define THREAD_SIZE (PAGE_SIZE << 1)
#define THREAD_SIZE_ORDER 1
@@ -61,7 +49,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/m32r/kernel/asm-offsets.c b/arch/m32r/kernel/asm-offsets.c
index 9e263112a6e2..cd3d2fc9c8df 100644
--- a/arch/m32r/kernel/asm-offsets.c
+++ b/arch/m32r/kernel/asm-offsets.c
@@ -1 +1,14 @@
-/* Dummy asm-offsets.c file. Required by kbuild and ready to be used - hint! */
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+
+int foo(void)
+{
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_STATUS, thread_info, status);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+
+ return 0;
+}
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 7c3db9940ce1..c639bfa32232 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -65,6 +65,7 @@
#include <asm/page.h>
#include <asm/m32r.h>
#include <asm/mmu_context.h>
+#include <asm/asm-offsets.h>
#if !defined(CONFIG_MMU)
#define sys_madvise sys_ni_syscall
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 7736c6660a15..1c81e24fd006 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -172,20 +172,14 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
{
struct rt_sigframe __user *frame;
int err = 0;
- int signal, sig = ksig->sig;
+ int sig = ksig->sig;
frame = get_sigframe(ksig, regs->spu, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
- err |= __put_user(signal, &frame->sig);
+ err |= __put_user(sig, &frame->sig);
if (err)
return -EFAULT;
@@ -209,13 +203,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
/* Set up registers for signal handler */
regs->spu = (unsigned long)frame;
- regs->r0 = signal; /* Arg for signal handler */
+ regs->r0 = sig; /* Arg for signal handler */
regs->r1 = (unsigned long)&frame->info;
regs->r2 = (unsigned long)&frame->uc;
regs->bpc = (unsigned long)ksig->ka.sa.sa_handler;
- set_fs(USER_DS);
-
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sp=%p pc=%p\n",
current->comm, current->pid, frame, regs->pc);
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index ce7aea34fdf4..c18ddc74ef9a 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -45,7 +45,7 @@ static volatile unsigned long flushcache_cpumask = 0;
/*
* For flush_tlb_others()
*/
-static volatile cpumask_t flush_cpumask;
+static cpumask_t flush_cpumask;
static struct mm_struct *flush_mm;
static struct vm_area_struct *flush_vma;
static volatile unsigned long flush_va;
@@ -415,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
*/
send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
- while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
+ while (!cpumask_empty(&flush_cpumask)) {
/* nothing. lockup detection does not belong here */
mb();
}
@@ -468,7 +468,7 @@ void smp_invalidate_interrupt(void)
__flush_tlb_page(va);
}
}
- cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
+ cpumask_clear_cpu(cpu_id, &flush_cpumask);
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index bb21f4f63170..a468467542f4 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -376,7 +376,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
BUG();
- for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
+ for_each_online_cpu(cpu_id)
show_cpu_info(cpu_id);
/*
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 87b7c7581b1d..2dd8f63bfbbb 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -67,6 +67,10 @@ config HZ
default 1000 if CLEOPATRA
default 100
+config PGTABLE_LEVELS
+ default 2 if SUN3 || COLDFIRE
+ default 3
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/m68k/coldfire/m527x.c b/arch/m68k/coldfire/m527x.c
index 2ba470735bed..c0b3e28f91df 100644
--- a/arch/m68k/coldfire/m527x.c
+++ b/arch/m68k/coldfire/m527x.c
@@ -92,7 +92,6 @@ static void __init m527x_uarts_init(void)
static void __init m527x_fec_init(void)
{
- u16 par;
u8 v;
/* Set multi-function pins to ethernet mode for fec0 */
@@ -100,6 +99,8 @@ static void __init m527x_fec_init(void)
v = readb(MCFGPIO_PAR_FECI2C);
writeb(v | 0xf0, MCFGPIO_PAR_FECI2C);
#else
+ u16 par;
+
par = readw(MCFGPIO_PAR_FECI2C);
writew(par | 0xf00, MCFGPIO_PAR_FECI2C);
v = readb(MCFGPIO_PAR_FEC0HL);
diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h
index 1bebbe78055a..2c648a043f24 100644
--- a/arch/m68k/include/asm/m527xsim.h
+++ b/arch/m68k/include/asm/m527xsim.h
@@ -103,8 +103,10 @@
*/
#define MCFFEC_BASE0 (MCF_IPSBAR + 0x1000)
#define MCFFEC_SIZE0 0x800
+#ifdef CONFIG_M5275
#define MCFFEC_BASE1 (MCF_IPSBAR + 0x1800)
#define MCFFEC_SIZE1 0x800
+#endif
/*
* QSPI module.
diff --git a/arch/m68k/include/asm/m68360_pram.h b/arch/m68k/include/asm/m68360_pram.h
index e6088bbce93d..c0cbd96f09bc 100644
--- a/arch/m68k/include/asm/m68360_pram.h
+++ b/arch/m68k/include/asm/m68360_pram.h
@@ -170,7 +170,7 @@ struct uart_pram {
unsigned short frmer; /* Rx framing error counter */
unsigned short nosec; /* Rx noise counter */
unsigned short brkec; /* Rx break character counter */
- unsigned short brkln; /* Reaceive break length */
+ unsigned short brkln; /* Receive break length */
unsigned short uaddr1; /* address character 1 */
unsigned short uaddr2; /* address character 2 */
@@ -338,7 +338,7 @@ struct ethernet_pram {
unsigned long c_pres; /* preset CRC */
unsigned long c_mask; /* constant mask for CRC */
unsigned long crcec; /* CRC error counter */
- unsigned long alec; /* alighnment error counter */
+ unsigned long alec; /* alignment error counter */
unsigned long disfc; /* discard frame counter */
unsigned short pads; /* short frame PAD characters */
unsigned short ret_lim; /* retry limit threshold */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index c54256e69e64..cee13c2e5161 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -26,7 +26,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
unsigned long flags;
- struct exec_domain *exec_domain; /* execution domain */
mm_segment_t addr_limit; /* thread address space */
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */
@@ -37,7 +36,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index d7179281e74a..af1c4f330aef 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -863,12 +863,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
if (fsize)
err |= copy_to_user (frame + 1, regs + 1, fsize);
- err |= __put_user((current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig),
- &frame->sig);
+ err |= __put_user(sig, &frame->sig);
err |= __put_user(regs->vector, &frame->code);
err |= __put_user(&frame->sc, &frame->psc);
@@ -948,12 +943,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
if (fsize)
err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
- err |= __put_user((current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig),
- &frame->sig);
+ err |= __put_user(sig, &frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index 13272fd5a5ba..0838ca699764 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -111,7 +111,6 @@ struct thread_struct {
*/
#define start_thread(regs, pc, usp) do { \
unsigned int *argc = (unsigned int *) bprm->exec; \
- set_fs(USER_DS); \
current->thread.int_depth = 1; \
/* Force this process down to user land */ \
regs->ctx.SaveMask = TBICTX_PRIV_BIT; \
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index afb3ca4776d1..32677cc278aa 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -28,7 +28,6 @@
/* This must be 8 byte aligned so we can ensure stack alignment. */
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
u32 cpu; /* current CPU */
@@ -68,7 +67,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
index 5385dd1216b7..4f8f1f87ef11 100644
--- a/arch/metag/kernel/irq.c
+++ b/arch/metag/kernel/irq.c
@@ -132,7 +132,6 @@ void irq_ctx_init(int cpu)
irqctx = (union irq_ctx *) &hardirq_stack[cpu * THREAD_SIZE];
irqctx->tinfo.task = NULL;
- irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
@@ -141,7 +140,6 @@ void irq_ctx_init(int cpu)
irqctx = (union irq_ctx *) &softirq_stack[cpu * THREAD_SIZE];
irqctx->tinfo.task = NULL;
- irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = 0;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
index 483dff986a23..7f546183a0f0 100644
--- a/arch/metag/kernel/process.c
+++ b/arch/metag/kernel/process.c
@@ -174,8 +174,11 @@ void show_regs(struct pt_regs *regs)
show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs);
}
+/*
+ * Copy architecture-specific thread state
+ */
int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *tsk)
+ unsigned long kthread_arg, struct task_struct *tsk)
{
struct pt_regs *childregs = task_pt_regs(tsk);
void *kernel_context = ((void *) childregs +
@@ -202,12 +205,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
global_base = __core_reg_get(A1GbP);
childregs->ctx.AX[0].U1 = (unsigned long) global_base;
childregs->ctx.AX[0].U0 = (unsigned long) kernel_context;
- /* Set D1Ar1=arg and D1RtP=usp (fn) */
+ /* Set D1Ar1=kthread_arg and D1RtP=usp (fn) */
childregs->ctx.DX[4].U1 = usp;
- childregs->ctx.DX[3].U1 = arg;
+ childregs->ctx.DX[3].U1 = kthread_arg;
tsk->thread.int_depth = 2;
return 0;
}
+
/*
* Get a pointer to where the new child's register block should have
* been pushed.
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index f006d2276f40..ac3a199e33e7 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -261,7 +261,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
}
#ifdef CONFIG_HOTPLUG_CPU
-static DECLARE_COMPLETION(cpu_killed);
/*
* __cpu_disable runs on the processor to be shutdown.
@@ -299,7 +298,7 @@ int __cpu_disable(void)
*/
void __cpu_die(unsigned int cpu)
{
- if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
+ if (!cpu_wait_death(cpu, 1))
pr_err("CPU%u: unable to kill\n", cpu);
}
@@ -314,7 +313,7 @@ void cpu_die(void)
local_irq_disable();
idle_task_exit();
- complete(&cpu_killed);
+ (void)cpu_report_death();
asm ("XOR TXENABLE, D0Re0,D0Re0\n");
}
diff --git a/arch/microblaze/include/asm/seccomp.h b/arch/microblaze/include/asm/seccomp.h
index 0d912758a0d7..204618a2ce84 100644
--- a/arch/microblaze/include/asm/seccomp.h
+++ b/arch/microblaze/include/asm/seccomp.h
@@ -3,14 +3,8 @@
#include <linux/unistd.h>
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
#define __NR_seccomp_sigreturn __NR_sigreturn
-#define __NR_seccomp_read_32 __NR_read
-#define __NR_seccomp_write_32 __NR_write
-#define __NR_seccomp_exit_32 __NR_exit
-#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+#include <asm-generic/seccomp.h>
#endif /* _ASM_MICROBLAZE_SECCOMP_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index b699fbd7de4a..383f387b4eee 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -65,7 +65,6 @@ typedef struct {
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
__u32 cpu; /* current CPU */
@@ -81,7 +80,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c
index 7b5dca7ed39d..9581d194d9e4 100644
--- a/arch/microblaze/kernel/cpu/mb.c
+++ b/arch/microblaze/kernel/cpu/mb.c
@@ -27,7 +27,6 @@
static int show_cpuinfo(struct seq_file *m, void *v)
{
- int count = 0;
char *fpga_family = "Unknown";
char *cpu_ver = "Unknown";
int i;
@@ -48,91 +47,89 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
}
- count = seq_printf(m,
- "CPU-Family: MicroBlaze\n"
- "FPGA-Arch: %s\n"
- "CPU-Ver: %s, %s endian\n"
- "CPU-MHz: %d.%02d\n"
- "BogoMips: %lu.%02lu\n",
- fpga_family,
- cpu_ver,
- cpuinfo.endian ? "little" : "big",
- cpuinfo.cpu_clock_freq /
- 1000000,
- cpuinfo.cpu_clock_freq %
- 1000000,
- loops_per_jiffy / (500000 / HZ),
- (loops_per_jiffy / (5000 / HZ)) % 100);
-
- count += seq_printf(m,
- "HW:\n Shift:\t\t%s\n"
- " MSR:\t\t%s\n"
- " PCMP:\t\t%s\n"
- " DIV:\t\t%s\n",
- (cpuinfo.use_instr & PVR0_USE_BARREL_MASK) ? "yes" : "no",
- (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) ? "yes" : "no",
- (cpuinfo.use_instr & PVR2_USE_PCMP_INSTR) ? "yes" : "no",
- (cpuinfo.use_instr & PVR0_USE_DIV_MASK) ? "yes" : "no");
-
- count += seq_printf(m,
- " MMU:\t\t%x\n",
- cpuinfo.mmu);
-
- count += seq_printf(m,
- " MUL:\t\t%s\n"
- " FPU:\t\t%s\n",
- (cpuinfo.use_mult & PVR2_USE_MUL64_MASK) ? "v2" :
- (cpuinfo.use_mult & PVR0_USE_HW_MUL_MASK) ? "v1" : "no",
- (cpuinfo.use_fpu & PVR2_USE_FPU2_MASK) ? "v2" :
- (cpuinfo.use_fpu & PVR0_USE_FPU_MASK) ? "v1" : "no");
-
- count += seq_printf(m,
- " Exc:\t\t%s%s%s%s%s%s%s%s\n",
- (cpuinfo.use_exc & PVR2_OPCODE_0x0_ILL_MASK) ? "op0x0 " : "",
- (cpuinfo.use_exc & PVR2_UNALIGNED_EXC_MASK) ? "unal " : "",
- (cpuinfo.use_exc & PVR2_ILL_OPCODE_EXC_MASK) ? "ill " : "",
- (cpuinfo.use_exc & PVR2_IOPB_BUS_EXC_MASK) ? "iopb " : "",
- (cpuinfo.use_exc & PVR2_DOPB_BUS_EXC_MASK) ? "dopb " : "",
- (cpuinfo.use_exc & PVR2_DIV_ZERO_EXC_MASK) ? "zero " : "",
- (cpuinfo.use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "",
- (cpuinfo.use_exc & PVR2_USE_FSL_EXC) ? "fsl " : "");
-
- count += seq_printf(m,
- "Stream-insns:\t%sprivileged\n",
- cpuinfo.mmu_privins ? "un" : "");
+ seq_printf(m,
+ "CPU-Family: MicroBlaze\n"
+ "FPGA-Arch: %s\n"
+ "CPU-Ver: %s, %s endian\n"
+ "CPU-MHz: %d.%02d\n"
+ "BogoMips: %lu.%02lu\n",
+ fpga_family,
+ cpu_ver,
+ cpuinfo.endian ? "little" : "big",
+ cpuinfo.cpu_clock_freq / 1000000,
+ cpuinfo.cpu_clock_freq % 1000000,
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m,
+ "HW:\n Shift:\t\t%s\n"
+ " MSR:\t\t%s\n"
+ " PCMP:\t\t%s\n"
+ " DIV:\t\t%s\n",
+ (cpuinfo.use_instr & PVR0_USE_BARREL_MASK) ? "yes" : "no",
+ (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) ? "yes" : "no",
+ (cpuinfo.use_instr & PVR2_USE_PCMP_INSTR) ? "yes" : "no",
+ (cpuinfo.use_instr & PVR0_USE_DIV_MASK) ? "yes" : "no");
+
+ seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu);
+
+ seq_printf(m,
+ " MUL:\t\t%s\n"
+ " FPU:\t\t%s\n",
+ (cpuinfo.use_mult & PVR2_USE_MUL64_MASK) ? "v2" :
+ (cpuinfo.use_mult & PVR0_USE_HW_MUL_MASK) ? "v1" : "no",
+ (cpuinfo.use_fpu & PVR2_USE_FPU2_MASK) ? "v2" :
+ (cpuinfo.use_fpu & PVR0_USE_FPU_MASK) ? "v1" : "no");
+
+ seq_printf(m,
+ " Exc:\t\t%s%s%s%s%s%s%s%s\n",
+ (cpuinfo.use_exc & PVR2_OPCODE_0x0_ILL_MASK) ? "op0x0 " : "",
+ (cpuinfo.use_exc & PVR2_UNALIGNED_EXC_MASK) ? "unal " : "",
+ (cpuinfo.use_exc & PVR2_ILL_OPCODE_EXC_MASK) ? "ill " : "",
+ (cpuinfo.use_exc & PVR2_IOPB_BUS_EXC_MASK) ? "iopb " : "",
+ (cpuinfo.use_exc & PVR2_DOPB_BUS_EXC_MASK) ? "dopb " : "",
+ (cpuinfo.use_exc & PVR2_DIV_ZERO_EXC_MASK) ? "zero " : "",
+ (cpuinfo.use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "",
+ (cpuinfo.use_exc & PVR2_USE_FSL_EXC) ? "fsl " : "");
+
+ seq_printf(m,
+ "Stream-insns:\t%sprivileged\n",
+ cpuinfo.mmu_privins ? "un" : "");
if (cpuinfo.use_icache)
- count += seq_printf(m,
- "Icache:\t\t%ukB\tline length:\t%dB\n",
- cpuinfo.icache_size >> 10,
- cpuinfo.icache_line_length);
+ seq_printf(m,
+ "Icache:\t\t%ukB\tline length:\t%dB\n",
+ cpuinfo.icache_size >> 10,
+ cpuinfo.icache_line_length);
else
- count += seq_printf(m, "Icache:\t\tno\n");
+ seq_puts(m, "Icache:\t\tno\n");
if (cpuinfo.use_dcache) {
- count += seq_printf(m,
- "Dcache:\t\t%ukB\tline length:\t%dB\n",
- cpuinfo.dcache_size >> 10,
- cpuinfo.dcache_line_length);
- seq_printf(m, "Dcache-Policy:\t");
+ seq_printf(m,
+ "Dcache:\t\t%ukB\tline length:\t%dB\n",
+ cpuinfo.dcache_size >> 10,
+ cpuinfo.dcache_line_length);
+ seq_puts(m, "Dcache-Policy:\t");
if (cpuinfo.dcache_wb)
- count += seq_printf(m, "write-back\n");
+ seq_puts(m, "write-back\n");
else
- count += seq_printf(m, "write-through\n");
- } else
- count += seq_printf(m, "Dcache:\t\tno\n");
+ seq_puts(m, "write-through\n");
+ } else {
+ seq_puts(m, "Dcache:\t\tno\n");
+ }
+
+ seq_printf(m,
+ "HW-Debug:\t%s\n",
+ cpuinfo.hw_debug ? "yes" : "no");
- count += seq_printf(m,
- "HW-Debug:\t%s\n",
- cpuinfo.hw_debug ? "yes" : "no");
+ seq_printf(m,
+ "PVR-USR1:\t%02x\n"
+ "PVR-USR2:\t%08x\n",
+ cpuinfo.pvr_user1,
+ cpuinfo.pvr_user2);
- count += seq_printf(m,
- "PVR-USR1:\t%02x\n"
- "PVR-USR2:\t%08x\n",
- cpuinfo.pvr_user1,
- cpuinfo.pvr_user2);
+ seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE);
- count += seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE);
return 0;
}
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index a1cbaf90e2ea..97001524ca2d 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -158,7 +158,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
- unsigned long signal;
unsigned long address = 0;
#ifdef CONFIG_MMU
pmd_t *pmdp;
@@ -170,12 +169,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : (unsigned long)sig;
-
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
@@ -230,14 +223,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
regs->r1 = (unsigned long) frame;
/* Signal handler args: */
- regs->r5 = signal; /* arg 0: signum */
+ regs->r5 = sig; /* arg 0: signum */
regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */
regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */
/* Offset to handle microblaze rtid r14, 0 */
regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
- set_fs(USER_DS);
-
#ifdef DEBUG_SIG
pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
current->comm, current->pid, frame, regs->pc);
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index e5fc463b36d0..39cf40da5f14 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -4,9 +4,9 @@ platforms += alchemy
platforms += ar7
platforms += ath25
platforms += ath79
-platforms += bcm3384
platforms += bcm47xx
platforms += bcm63xx
+platforms += bmips
platforms += cavium-octeon
platforms += cobalt
platforms += dec
@@ -21,6 +21,7 @@ platforms += mti-malta
platforms += mti-sead3
platforms += netlogic
platforms += paravirt
+platforms += pistachio
platforms += pmcs-msp71xx
platforms += pnx833x
platforms += ralink
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c7a16904cd03..f5016656494f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -23,7 +23,7 @@ config MIPS
select HAVE_KRETPROBES
select HAVE_DEBUG_KMEMLEAK
select HAVE_SYSCALL_TRACEPOINTS
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ARCH_HAS_ELF_RANDOMIZE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
@@ -43,6 +43,7 @@ config MIPS
select GENERIC_SMP_IDLE_THREAD
select BUILDTIME_EXTABLE_SORT
select GENERIC_CLOCKEVENTS
+ select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_CMOS_UPDATE
select HAVE_MOD_ARCH_SPECIFIC
select VIRT_TO_BUS
@@ -55,6 +56,8 @@ config MIPS
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_BINFMT_ELF_STATE
select SYSCTL_EXCEPTION_TRACE
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
+ select HAVE_IRQ_TIME_ACCOUNTING
menu "Machine selection"
@@ -131,8 +134,8 @@ config ATH79
help
Support for the Atheros AR71XX/AR724X/AR913X SoCs.
-config BCM3384
- bool "Broadcom BCM3384 based boards"
+config BMIPS_GENERIC
+ bool "Broadcom Generic BMIPS kernel"
select BOOT_RAW
select NO_EXCEPT_FILL
select USE_OF
@@ -140,22 +143,30 @@ config BCM3384
select CSRC_R4K
select SYNC_R4K
select COMMON_CLK
- select DMA_NONCOHERENT
+ select BCM7038_L1_IRQ
+ select BCM7120_L2_IRQ
+ select BRCMSTB_L2_IRQ
select IRQ_CPU
+ select RAW_IRQ_ACCESSORS
+ select DMA_NONCOHERENT
select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_CPU_BMIPS32_3300
+ select SYS_HAS_CPU_BMIPS4350
+ select SYS_HAS_CPU_BMIPS4380
select SYS_HAS_CPU_BMIPS5000
select SWAP_IO_SPACE
- select USB_EHCI_BIG_ENDIAN_DESC
- select USB_EHCI_BIG_ENDIAN_MMIO
- select USB_OHCI_BIG_ENDIAN_DESC
- select USB_OHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
+ select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
+ select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
help
- Support for BCM3384 based boards. BCM3384/BCM33843 is a cable modem
- chipset with a Linux application processor that is often used to
- provide Samba services, a CUPS print server, and/or advanced routing
- features.
+ Build a generic DT-based kernel image that boots on select
+ BCM33xx cable modem chips, BCM63xx DSL chips, and BCM7xxx set-top
+ box chips. Note that CONFIG_CPU_BIG_ENDIAN/CONFIG_CPU_LITTLE_ENDIAN
+ must be set appropriately for your board.
config BCM47XX
bool "Broadcom BCM47XX based boards"
@@ -352,6 +363,33 @@ config MACH_LOONGSON1
the ICT (Institute of Computing Technology) and the Chinese Academy
of Sciences.
+config MACH_PISTACHIO
+ bool "IMG Pistachio SoC based boards"
+ select ARCH_REQUIRE_GPIOLIB
+ select BOOT_ELF32
+ select BOOT_RAW
+ select CEVT_R4K
+ select CLKSRC_MIPS_GIC
+ select COMMON_CLK
+ select CSRC_R4K
+ select DMA_MAYBE_COHERENT
+ select IRQ_CPU
+ select LIBFDT
+ select MFD_SYSCON
+ select MIPS_CPU_SCACHE
+ select MIPS_GIC
+ select PINCTRL
+ select REGULATOR
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MIPS_CPS
+ select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_ZBOOT
+ select USE_OF
+ help
+ This enables support for the IMG Pistachio SoC platform.
+
config MIPS_MALTA
bool "MIPS Malta board"
select ARCH_MAY_HAVE_PC_FDC
@@ -377,6 +415,7 @@ config MIPS_MALTA
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_CPU_MIPS32_R3_5
+ select SYS_HAS_CPU_MIPS32_R5
select SYS_HAS_CPU_MIPS32_R6
select SYS_HAS_CPU_MIPS64_R1
select SYS_HAS_CPU_MIPS64_R2
@@ -386,6 +425,7 @@ config MIPS_MALTA
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MICROMIPS
select SYS_SUPPORTS_MIPS_CMP
@@ -779,7 +819,8 @@ config CAVIUM_OCTEON_SOC
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select EDAC_SUPPORT
- select SYS_SUPPORTS_HOTPLUG_CPU
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HOTPLUG_CPU if CPU_BIG_ENDIAN
select SYS_HAS_EARLY_PRINTK
select SYS_HAS_CPU_CAVIUM_OCTEON
select SWAP_IO_SPACE
@@ -793,6 +834,7 @@ config CAVIUM_OCTEON_SOC
select SYS_SUPPORTS_SMP
select NR_CPUS_DEFAULT_16
select BUILTIN_DTB
+ select MTD_COMPLEX_MAPPINGS
help
This option supports all of the Octeon reference boards from Cavium
Networks. It builds a kernel that dynamically determines the Octeon
@@ -887,6 +929,7 @@ source "arch/mips/ath25/Kconfig"
source "arch/mips/ath79/Kconfig"
source "arch/mips/bcm47xx/Kconfig"
source "arch/mips/bcm63xx/Kconfig"
+source "arch/mips/bmips/Kconfig"
source "arch/mips/jazz/Kconfig"
source "arch/mips/jz4740/Kconfig"
source "arch/mips/lantiq/Kconfig"
@@ -1202,10 +1245,10 @@ config MIPS_L1_CACHE_SHIFT_7
config MIPS_L1_CACHE_SHIFT
int
- default "4" if MIPS_L1_CACHE_SHIFT_4
- default "5" if MIPS_L1_CACHE_SHIFT_5
- default "6" if MIPS_L1_CACHE_SHIFT_6
default "7" if MIPS_L1_CACHE_SHIFT_7
+ default "6" if MIPS_L1_CACHE_SHIFT_6
+ default "5" if MIPS_L1_CACHE_SHIFT_5
+ default "4" if MIPS_L1_CACHE_SHIFT_4
default "5"
config HAVE_STD_PC_SERIAL_PORT
@@ -1245,6 +1288,7 @@ config CPU_LOONGSON3
select CPU_SUPPORTS_HUGEPAGES
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
+ select ARCH_REQUIRE_GPIOLIB
help
The Loongson 3 processor implements the MIPS64R2 instruction
set with many extensions.
@@ -1572,6 +1616,7 @@ config CPU_XLP
select WEAK_REORDERING_BEYOND_LLSC
select CPU_HAS_PREFETCH
select CPU_MIPSR2
+ select CPU_SUPPORTS_HUGEPAGES
help
Netlogic Microsystems XLP processors.
endchoice
@@ -1596,6 +1641,33 @@ config CPU_MIPS32_3_5_EVA
One of its primary benefits is an increase in the maximum size
of lowmem (up to 3GB). If unsure, say 'N' here.
+config CPU_MIPS32_R5_FEATURES
+ bool "MIPS32 Release 5 Features"
+ depends on SYS_HAS_CPU_MIPS32_R5
+ depends on CPU_MIPS32_R2
+ help
+ Choose this option to build a kernel for release 2 or later of the
+ MIPS32 architecture including features from release 5 such as
+ support for Extended Physical Addressing (XPA).
+
+config CPU_MIPS32_R5_XPA
+ bool "Extended Physical Addressing (XPA)"
+ depends on CPU_MIPS32_R5_FEATURES
+ depends on !EVA
+ depends on !PAGE_SIZE_4KB
+ depends on SYS_SUPPORTS_HIGHMEM
+ select XPA
+ select HIGHMEM
+ select ARCH_PHYS_ADDR_T_64BIT
+ default n
+ help
+ Choose this option if you want to enable the Extended Physical
+ Addressing (XPA) on your MIPS32 core (such as P5600 series). The
+ benefit is to increase physical addressing equal to or greater
+ than 40 bits. Note that this has the side effect of turning on
+ 64-bit addressing which in turn makes the PTEs 64-bit in size.
+ If unsure, say 'N' here.
+
if CPU_LOONGSON2F
config CPU_NOP_WORKAROUNDS
bool
@@ -1699,6 +1771,9 @@ config SYS_HAS_CPU_MIPS32_R2
config SYS_HAS_CPU_MIPS32_R3_5
bool
+config SYS_HAS_CPU_MIPS32_R5
+ bool
+
config SYS_HAS_CPU_MIPS32_R6
bool
@@ -1836,6 +1911,9 @@ config CPU_MIPSR6
config EVA
bool
+config XPA
+ bool
+
config SYS_SUPPORTS_32BIT_KERNEL
bool
config SYS_SUPPORTS_64BIT_KERNEL
@@ -2072,7 +2150,7 @@ config MIPSR2_TO_R6_EMULATOR
help
Choose this option if you want to run non-R6 MIPS userland code.
Even if you say 'Y' here, the emulator will still be disabled by
- default. You can enable it using the 'mipsr2emul' kernel option.
+ default. You can enable it using the 'mipsr2emu' kernel option.
The only reason this is a build-time option is to save ~14K from the
final kernel image.
comment "MIPS R2-to-R6 emulator is only available for UP kernels"
@@ -2142,7 +2220,7 @@ config MIPS_CMP
config MIPS_CPS
bool "MIPS Coherent Processing System support"
- depends on SYS_SUPPORTS_MIPS_CPS
+ depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
select MIPS_CM
select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU
@@ -2348,7 +2426,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP)
+ depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON3)
default y
help
Enable hardware performance counter support for perf events. If
@@ -2500,6 +2578,9 @@ config HZ
default 1000 if HZ_1000
default 1024 if HZ_1024
+config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
+
source "kernel/Kconfig.preempt"
config KEXEC
@@ -2600,6 +2681,11 @@ config STACKTRACE_SUPPORT
bool
default y
+config PGTABLE_LEVELS
+ int
+ default 3 if 64BIT && !PAGE_SIZE_64KB
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 8f57fc72d62c..ae2dd59050f7 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -197,11 +197,17 @@ endif
# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
# been fixed properly.
-mips-cflags := "$(cflags-y)"
-cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
-cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips)
+mips-cflags := $(cflags-y)
+ifeq ($(CONFIG_CPU_HAS_SMARTMIPS),y)
+smartmips-ase := $(call cc-option-yn,$(mips-cflags) -msmartmips)
+cflags-$(smartmips-ase) += -msmartmips -Wa,--no-warn
+endif
+ifeq ($(CONFIG_CPU_MICROMIPS),y)
+micromips-ase := $(call cc-option-yn,$(mips-cflags) -mmicromips)
+cflags-$(micromips-ase) += -mmicromips
+endif
ifeq ($(CONFIG_CPU_HAS_MSA),y)
-toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
+toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa)
cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
endif
@@ -225,7 +231,7 @@ endif
#
# Board-dependent options and extra files
#
-include $(srctree)/arch/mips/Kbuild.platforms
+include arch/mips/Kbuild.platforms
ifdef CONFIG_PHYSICAL_START
load-y = $(CONFIG_PHYSICAL_START)
@@ -271,7 +277,7 @@ LDFLAGS += -m $(ld-emul)
ifdef CONFIG_MIPS
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
- sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
+ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
ifdef CONFIG_64BIT
CHECKFLAGS += -m64
endif
@@ -365,7 +371,11 @@ core-$(CONFIG_BUILTIN_DTB) += arch/mips/boot/dts/
PHONY += dtbs
dtbs: scripts
- $(Q)$(MAKE) $(build)=arch/mips/boot/dts dtbs
+ $(Q)$(MAKE) $(build)=arch/mips/boot/dts
+
+PHONY += dtbs_install
+dtbs_install:
+ $(Q)$(MAKE) $(dtbinst)=arch/mips/boot/dts
archprepare:
ifdef CONFIG_MIPS32_N32
@@ -407,6 +417,7 @@ define archhelp
echo ' uImage.lzma - U-Boot image (lzma)'
echo ' uImage.lzo - U-Boot image (lzo)'
echo ' dtbs - Device-tree blobs for enabled boards'
+ echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'
echo
echo ' These will be default as appropriate for a configured platform.'
endef
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index af2441dbfc12..be9ff1673ded 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -307,10 +307,7 @@ static void __init cpmac_get_mac(int instance, unsigned char *dev_addr)
}
if (mac) {
- if (sscanf(mac, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
- &dev_addr[0], &dev_addr[1],
- &dev_addr[2], &dev_addr[3],
- &dev_addr[4], &dev_addr[5]) != 6) {
+ if (!mac_pton(mac, dev_addr)) {
pr_warn("cannot parse mac address, using random address\n");
eth_random_addr(dev_addr);
}
diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h
index a3120714f0b7..c39de61f9b36 100644
--- a/arch/mips/ath79/common.h
+++ b/arch/mips/ath79/common.h
@@ -17,7 +17,7 @@
#include <linux/types.h>
#define ATH79_MEM_SIZE_MIN (2 * 1024 * 1024)
-#define ATH79_MEM_SIZE_MAX (128 * 1024 * 1024)
+#define ATH79_MEM_SIZE_MAX (256 * 1024 * 1024)
void ath79_clocks_init(void);
unsigned long ath79_get_sys_clk_rate(const char *id);
diff --git a/arch/mips/bcm3384/Platform b/arch/mips/bcm3384/Platform
deleted file mode 100644
index 8e1ca0819e1b..000000000000
--- a/arch/mips/bcm3384/Platform
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Broadcom BCM3384 boards
-#
-platform-$(CONFIG_BCM3384) += bcm3384/
-cflags-$(CONFIG_BCM3384) += \
- -I$(srctree)/arch/mips/include/asm/mach-bcm3384/
-load-$(CONFIG_BCM3384) := 0xffffffff80010000
diff --git a/arch/mips/bcm3384/dma.c b/arch/mips/bcm3384/dma.c
deleted file mode 100644
index ea42012fd4f5..000000000000
--- a/arch/mips/bcm3384/dma.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
- */
-
-#include <linux/device.h>
-#include <linux/dma-direction.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/of.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <dma-coherence.h>
-
-/*
- * BCM3384 has configurable address translation windows which allow the
- * peripherals' DMA addresses to be different from the Zephyr-visible
- * physical addresses. e.g. usb_dma_addr = zephyr_pa ^ 0x08000000
- *
- * If our DT "memory" node has a "dma-xor-mask" property we will enable this
- * translation using the provided offset.
- */
-static u32 bcm3384_dma_xor_mask;
-static u32 bcm3384_dma_xor_limit = 0xffffffff;
-
-/*
- * PCI collapses the memory hole at 0x10000000 - 0x1fffffff.
- * On systems with a dma-xor-mask, this range is guaranteed to live above
- * the dma-xor-limit.
- */
-#define BCM3384_MEM_HOLE_PA 0x10000000
-#define BCM3384_MEM_HOLE_SIZE 0x10000000
-
-static dma_addr_t bcm3384_phys_to_dma(struct device *dev, phys_addr_t pa)
-{
- if (dev && dev_is_pci(dev) &&
- pa >= (BCM3384_MEM_HOLE_PA + BCM3384_MEM_HOLE_SIZE))
- return pa - BCM3384_MEM_HOLE_SIZE;
- if (pa <= bcm3384_dma_xor_limit)
- return pa ^ bcm3384_dma_xor_mask;
- return pa;
-}
-
-dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return bcm3384_phys_to_dma(dev, virt_to_phys(addr));
-}
-
-dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
-{
- return bcm3384_phys_to_dma(dev, page_to_phys(page));
-}
-
-unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
-{
- if (dev && dev_is_pci(dev) &&
- dma_addr >= BCM3384_MEM_HOLE_PA)
- return dma_addr + BCM3384_MEM_HOLE_SIZE;
- if ((dma_addr ^ bcm3384_dma_xor_mask) <= bcm3384_dma_xor_limit)
- return dma_addr ^ bcm3384_dma_xor_mask;
- return dma_addr;
-}
-
-static int __init bcm3384_init_dma_xor(void)
-{
- struct device_node *np = of_find_node_by_type(NULL, "memory");
-
- if (!np)
- return 0;
-
- of_property_read_u32(np, "dma-xor-mask", &bcm3384_dma_xor_mask);
- of_property_read_u32(np, "dma-xor-limit", &bcm3384_dma_xor_limit);
-
- of_node_put(np);
- return 0;
-}
-arch_initcall(bcm3384_init_dma_xor);
diff --git a/arch/mips/bcm3384/irq.c b/arch/mips/bcm3384/irq.c
deleted file mode 100644
index fd94fe849af6..000000000000
--- a/arch/mips/bcm3384/irq.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Partially based on arch/mips/ralink/irq.c
- *
- * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
- */
-
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#include <asm/bmips.h>
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-
-/* INTC register offsets */
-#define INTC_REG_ENABLE 0x00
-#define INTC_REG_STATUS 0x04
-
-#define MAX_WORDS 2
-#define IRQS_PER_WORD 32
-
-struct bcm3384_intc {
- int n_words;
- void __iomem *reg[MAX_WORDS];
- u32 enable[MAX_WORDS];
- spinlock_t lock;
-};
-
-static void bcm3384_intc_irq_unmask(struct irq_data *d)
-{
- struct bcm3384_intc *priv = d->domain->host_data;
- unsigned long flags;
- int idx = d->hwirq / IRQS_PER_WORD;
- int bit = d->hwirq % IRQS_PER_WORD;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->enable[idx] |= BIT(bit);
- __raw_writel(priv->enable[idx], priv->reg[idx] + INTC_REG_ENABLE);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void bcm3384_intc_irq_mask(struct irq_data *d)
-{
- struct bcm3384_intc *priv = d->domain->host_data;
- unsigned long flags;
- int idx = d->hwirq / IRQS_PER_WORD;
- int bit = d->hwirq % IRQS_PER_WORD;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->enable[idx] &= ~BIT(bit);
- __raw_writel(priv->enable[idx], priv->reg[idx] + INTC_REG_ENABLE);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static struct irq_chip bcm3384_intc_irq_chip = {
- .name = "INTC",
- .irq_unmask = bcm3384_intc_irq_unmask,
- .irq_mask = bcm3384_intc_irq_mask,
- .irq_mask_ack = bcm3384_intc_irq_mask,
-};
-
-unsigned int get_c0_compare_int(void)
-{
- return CP0_LEGACY_COMPARE_IRQ;
-}
-
-static void bcm3384_intc_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_domain *domain = irq_get_handler_data(irq);
- struct bcm3384_intc *priv = domain->host_data;
- unsigned long flags;
- unsigned int idx;
-
- for (idx = 0; idx < priv->n_words; idx++) {
- unsigned long pending;
- int hwirq;
-
- spin_lock_irqsave(&priv->lock, flags);
- pending = __raw_readl(priv->reg[idx] + INTC_REG_STATUS) &
- priv->enable[idx];
- spin_unlock_irqrestore(&priv->lock, flags);
-
- for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
- generic_handle_irq(irq_find_mapping(domain,
- hwirq + idx * IRQS_PER_WORD));
- }
- }
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned long pending =
- (read_c0_status() & read_c0_cause() & ST0_IM) >> STATUSB_IP0;
- int bit;
-
- for_each_set_bit(bit, &pending, 8)
- do_IRQ(MIPS_CPU_IRQ_BASE + bit);
-}
-
-static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
-{
- irq_set_chip_and_handler(irq, &bcm3384_intc_irq_chip, handle_level_irq);
- return 0;
-}
-
-static const struct irq_domain_ops irq_domain_ops = {
- .xlate = irq_domain_xlate_onecell,
- .map = intc_map,
-};
-
-static int __init ioremap_one_pair(struct bcm3384_intc *priv,
- struct device_node *node,
- int idx)
-{
- struct resource res;
-
- if (of_address_to_resource(node, idx, &res))
- return 0;
-
- if (request_mem_region(res.start, resource_size(&res),
- res.name) < 0)
- pr_err("Failed to request INTC register region\n");
-
- priv->reg[idx] = ioremap_nocache(res.start, resource_size(&res));
- if (!priv->reg[idx])
- panic("Failed to ioremap INTC register range");
-
- /* start up with everything masked before we hook the parent IRQ */
- __raw_writel(0, priv->reg[idx] + INTC_REG_ENABLE);
- priv->enable[idx] = 0;
-
- return IRQS_PER_WORD;
-}
-
-static int __init intc_of_init(struct device_node *node,
- struct device_node *parent)
-{
- struct irq_domain *domain;
- unsigned int parent_irq, n_irqs = 0;
- struct bcm3384_intc *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- panic("Failed to allocate bcm3384_intc struct");
-
- spin_lock_init(&priv->lock);
-
- parent_irq = irq_of_parse_and_map(node, 0);
- if (!parent_irq)
- panic("Failed to get INTC IRQ");
-
- n_irqs += ioremap_one_pair(priv, node, 0);
- n_irqs += ioremap_one_pair(priv, node, 1);
-
- if (!n_irqs)
- panic("Failed to map INTC registers");
-
- priv->n_words = n_irqs / IRQS_PER_WORD;
- domain = irq_domain_add_linear(node, n_irqs, &irq_domain_ops, priv);
- if (!domain)
- panic("Failed to add irqdomain");
-
- irq_set_chained_handler(parent_irq, bcm3384_intc_irq_handler);
- irq_set_handler_data(parent_irq, domain);
-
- return 0;
-}
-
-static struct of_device_id of_irq_ids[] __initdata = {
- { .compatible = "mti,cpu-interrupt-controller",
- .data = mips_cpu_irq_of_init },
- { .compatible = "brcm,bcm3384-intc",
- .data = intc_of_init },
- {},
-};
-
-void __init arch_init_irq(void)
-{
- bmips_tp1_irqs = 0;
- of_irq_init(of_irq_ids);
-}
diff --git a/arch/mips/bcm3384/setup.c b/arch/mips/bcm3384/setup.c
deleted file mode 100644
index d84b8400b874..000000000000
--- a/arch/mips/bcm3384/setup.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
- * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
- */
-
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/clk-provider.h>
-#include <linux/ioport.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
-#include <linux/smp.h>
-#include <asm/addrspace.h>
-#include <asm/bmips.h>
-#include <asm/bootinfo.h>
-#include <asm/prom.h>
-#include <asm/smp-ops.h>
-#include <asm/time.h>
-
-void __init prom_init(void)
-{
- register_bmips_smp_ops();
-}
-
-void __init prom_free_prom_memory(void)
-{
-}
-
-const char *get_system_type(void)
-{
- return "BCM3384";
-}
-
-void __init plat_time_init(void)
-{
- struct device_node *np;
- u32 freq;
-
- np = of_find_node_by_name(NULL, "cpus");
- if (!np)
- panic("missing 'cpus' DT node");
- if (of_property_read_u32(np, "mips-hpt-frequency", &freq) < 0)
- panic("missing 'mips-hpt-frequency' property");
- of_node_put(np);
-
- mips_hpt_frequency = freq;
-}
-
-void __init plat_mem_setup(void)
-{
- void *dtb = __dtb_start;
-
- set_io_port_base(0);
- ioport_resource.start = 0;
- ioport_resource.end = ~0;
-
- /* intended to somewhat resemble ARM; see Documentation/arm/Booting */
- if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
- dtb = phys_to_virt(fw_arg2);
-
- __dt_setup_arch(dtb);
-
- strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
-}
-
-void __init device_tree_init(void)
-{
- struct device_node *np;
-
- unflatten_and_copy_device_tree();
-
- /* Disable SMP boot unless both CPUs are listed in DT and !disabled */
- np = of_find_node_by_name(NULL, "cpus");
- if (np && of_get_available_child_count(np) <= 1)
- bmips_smp_enabled = 0;
- of_node_put(np);
-}
-
-int __init plat_of_setup(void)
-{
- return __dt_register_buses("brcm,bcm3384", "simple-bus");
-}
-
-arch_initcall(plat_of_setup);
-
-static int __init plat_dev_init(void)
-{
- of_clk_init(NULL);
- return 0;
-}
-
-device_initcall(plat_dev_init);
diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h
index ea909a56a3ee..41796befa9df 100644
--- a/arch/mips/bcm47xx/bcm47xx_private.h
+++ b/arch/mips/bcm47xx/bcm47xx_private.h
@@ -1,6 +1,10 @@
#ifndef LINUX_BCM47XX_PRIVATE_H_
#define LINUX_BCM47XX_PRIVATE_H_
+#ifndef pr_fmt
+#define pr_fmt(fmt) "bcm47xx: " fmt
+#endif
+
#include <linux/kernel.h>
/* prom.c */
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index b3ae068ca4fa..bd56415f2f3b 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -1,8 +1,8 @@
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/string.h>
+#include <bcm47xx.h>
#include <bcm47xx_board.h>
-#include <bcm47xx_nvram.h>
struct bcm47xx_board_type {
const enum bcm47xx_board board;
@@ -40,20 +40,6 @@ struct bcm47xx_board_type_list1 bcm47xx_board_list_model_name[] __initconst = {
{ {0}, NULL},
};
-/* model_no */
-static const
-struct bcm47xx_board_type_list1 bcm47xx_board_list_model_no[] __initconst = {
- {{BCM47XX_BOARD_ASUS_WL700GE, "Asus WL700"}, "WL700"},
- { {0}, NULL},
-};
-
-/* machine_name */
-static const
-struct bcm47xx_board_type_list1 bcm47xx_board_list_machine_name[] __initconst = {
- {{BCM47XX_BOARD_LINKSYS_WRTSL54GS, "Linksys WRTSL54GS"}, "WRTSL54GS"},
- { {0}, NULL},
-};
-
/* hardware_version */
static const
struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initconst = {
@@ -165,9 +151,11 @@ static const
struct bcm47xx_board_type_list1 bcm47xx_board_list_board_id[] __initconst = {
{{BCM47XX_BOARD_NETGEAR_WGR614V8, "Netgear WGR614 V8"}, "U12H072T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WGR614V9, "Netgear WGR614 V9"}, "U12H094T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WGR614_V10, "Netgear WGR614 V10"}, "U12H139T01_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNDR3300, "Netgear WNDR3300"}, "U12H093T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNDR3400V1, "Netgear WNDR3400 V1"}, "U12H155T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNDR3400V2, "Netgear WNDR3400 V2"}, "U12H187T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3400_V3, "Netgear WNDR3400 V3"}, "U12H208T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNDR3400VCNA, "Netgear WNDR3400 Vcna"}, "U12H155T01_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNDR3700V3, "Netgear WNDR3700 V3"}, "U12H194T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNDR4000, "Netgear WNDR4000"}, "U12H181T00_NETGEAR"},
@@ -202,6 +190,20 @@ struct bcm47xx_board_type_list2 bcm47xx_board_list_board_type_rev[] __initconst
{ {0}, NULL},
};
+/*
+ * Some devices don't use any common NVRAM entry for identification and they
+ * have only one model specific variable.
+ * They don't deserve own arrays, let's group them there using key-value array.
+ */
+static const
+struct bcm47xx_board_type_list2 bcm47xx_board_list_key_value[] __initconst = {
+ {{BCM47XX_BOARD_ASUS_WL700GE, "Asus WL700"}, "model_no", "WL700"},
+ {{BCM47XX_BOARD_LINKSYS_WRT300N_V1, "Linksys WRT300N V1"}, "router_name", "WRT300N"},
+ {{BCM47XX_BOARD_LINKSYS_WRT600N_V11, "Linksys WRT600N V1.1"}, "Model_Name", "WRT600N"},
+ {{BCM47XX_BOARD_LINKSYS_WRTSL54GS, "Linksys WRTSL54GS"}, "machine_name", "WRTSL54GS"},
+ { {0}, NULL},
+};
+
static const
struct bcm47xx_board_type bcm47xx_board_unknown[] __initconst = {
{BCM47XX_BOARD_UNKNOWN, "Unknown Board"},
@@ -225,20 +227,6 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
}
}
- if (bcm47xx_nvram_getenv("model_no", buf1, sizeof(buf1)) >= 0) {
- for (e1 = bcm47xx_board_list_model_no; e1->value1; e1++) {
- if (strstarts(buf1, e1->value1))
- return &e1->board;
- }
- }
-
- if (bcm47xx_nvram_getenv("machine_name", buf1, sizeof(buf1)) >= 0) {
- for (e1 = bcm47xx_board_list_machine_name; e1->value1; e1++) {
- if (strstarts(buf1, e1->value1))
- return &e1->board;
- }
- }
-
if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0) {
for (e1 = bcm47xx_board_list_hardware_version; e1->value1; e1++) {
if (strstarts(buf1, e1->value1))
@@ -247,8 +235,8 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
}
if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 &&
- bcm47xx_nvram_getenv("boardtype", buf2, sizeof(buf2)) >= 0) {
- for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
+ bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0) {
+ for (e2 = bcm47xx_board_list_hw_version_num; e2->value1; e2++) {
if (!strstarts(buf1, e2->value1) &&
!strcmp(buf2, e2->value2))
return &e2->board;
@@ -314,6 +302,14 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
return &e2->board;
}
}
+
+ for (e2 = bcm47xx_board_list_key_value; e2->value1; e2++) {
+ if (bcm47xx_nvram_getenv(e2->value1, buf1, sizeof(buf1)) >= 0) {
+ if (!strcmp(buf1, e2->value2))
+ return &e2->board;
+ }
+ }
+
return bcm47xx_board_unknown;
}
@@ -330,9 +326,8 @@ void __init bcm47xx_board_detect(void)
err = bcm47xx_nvram_getenv("boardtype", buf, sizeof(buf));
/* init of nvram failed, probably too early now */
- if (err == -ENXIO) {
+ if (err == -ENXIO)
return;
- }
board_detected = bcm47xx_board_get_nvram();
bcm47xx_board.board = board_detected->board;
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
index 913182bcafb8..276276a8c6d7 100644
--- a/arch/mips/bcm47xx/buttons.c
+++ b/arch/mips/bcm47xx/buttons.c
@@ -252,6 +252,12 @@ bcm47xx_buttons_linksys_wrt160nv3[] __initconst = {
};
static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt300n_v1[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
bcm47xx_buttons_linksys_wrt300nv11[] __initconst = {
BCM47XX_GPIO_KEY(4, KEY_UNKNOWN),
BCM47XX_GPIO_KEY(6, KEY_RESTART),
@@ -327,6 +333,12 @@ bcm47xx_buttons_netgear_wndr3400v1[] __initconst = {
};
static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wndr3400_v3[] __initconst = {
+ BCM47XX_GPIO_KEY(12, KEY_RESTART),
+ BCM47XX_GPIO_KEY(23, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
bcm47xx_buttons_netgear_wndr3700v3[] __initconst = {
BCM47XX_GPIO_KEY(2, KEY_RFKILL),
BCM47XX_GPIO_KEY(3, KEY_RESTART),
@@ -516,6 +528,9 @@ int __init bcm47xx_buttons_register(void)
case BCM47XX_BOARD_LINKSYS_WRT160NV3:
err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt160nv3);
break;
+ case BCM47XX_BOARD_LINKSYS_WRT300N_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt300n_v1);
+ break;
case BCM47XX_BOARD_LINKSYS_WRT300NV11:
err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt300nv11);
break;
@@ -557,6 +572,9 @@ int __init bcm47xx_buttons_register(void)
case BCM47XX_BOARD_NETGEAR_WNDR3400V1:
err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr3400v1);
break;
+ case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr3400_v3);
+ break;
case BCM47XX_BOARD_NETGEAR_WNDR3700V3:
err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr3700v3);
break;
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
index 903a656d4119..0e4ade342333 100644
--- a/arch/mips/bcm47xx/leds.c
+++ b/arch/mips/bcm47xx/leds.c
@@ -292,6 +292,13 @@ bcm47xx_leds_linksys_wrt160nv3[] __initconst = {
};
static const struct gpio_led
+bcm47xx_leds_linksys_wrt300n_v1[] __initconst = {
+ BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(5, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
bcm47xx_leds_linksys_wrt300nv11[] __initconst = {
BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
@@ -585,6 +592,9 @@ void __init bcm47xx_leds_register(void)
case BCM47XX_BOARD_LINKSYS_WRT160NV3:
bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt160nv3);
break;
+ case BCM47XX_BOARD_LINKSYS_WRT300N_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt300n_v1);
+ break;
case BCM47XX_BOARD_LINKSYS_WRT300NV11:
bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt300nv11);
break;
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index c5c381c43f17..ba632ff08a13 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -11,15 +11,18 @@
* option) any later version.
*/
+#include <linux/io.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mtd/mtd.h>
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
-#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
-#define NVRAM_SPACE 0x8000
+#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
+#define NVRAM_SPACE 0x10000
+#define NVRAM_MAX_GPIO_ENTRIES 32
+#define NVRAM_MAX_GPIO_VALUE_LEN 30
#define FLASH_MIN 0x00020000 /* Minimum flash size */
@@ -91,20 +94,18 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
return -ENXIO;
found:
-
if (header->len > size)
pr_err("The nvram size accoridng to the header seems to be bigger than the partition on flash\n");
if (header->len > NVRAM_SPACE)
pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
header->len, NVRAM_SPACE);
- src = (u32 *) header;
- dst = (u32 *) nvram_buf;
+ src = (u32 *)header;
+ dst = (u32 *)nvram_buf;
for (i = 0; i < sizeof(struct nvram_header); i += 4)
- *dst++ = *src++;
+ *dst++ = __raw_readl(src++);
for (; i < header->len && i < NVRAM_SPACE && i < size; i += 4)
- *dst++ = le32_to_cpu(*src++);
- memset(dst, 0x0, NVRAM_SPACE - i);
+ *dst++ = readl(src++);
return 0;
}
@@ -138,37 +139,28 @@ static int nvram_init(void)
struct mtd_info *mtd;
struct nvram_header header;
size_t bytes_read;
- int err, i;
+ int err;
mtd = get_mtd_device_nm("nvram");
if (IS_ERR(mtd))
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
- loff_t from = mtd->size - nvram_sizes[i];
+ err = mtd_read(mtd, 0, sizeof(header), &bytes_read, (uint8_t *)&header);
+ if (!err && header.magic == NVRAM_MAGIC) {
+ u8 *dst = (uint8_t *)nvram_buf;
+ size_t len = header.len;
- if (from < 0)
- continue;
-
- err = mtd_read(mtd, from, sizeof(header), &bytes_read,
- (uint8_t *)&header);
- if (!err && header.magic == NVRAM_MAGIC) {
- u8 *dst = (uint8_t *)nvram_buf;
- size_t len = header.len;
-
- if (header.len > NVRAM_SPACE) {
- pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
- header.len, NVRAM_SPACE);
- len = NVRAM_SPACE;
- }
+ if (header.len > NVRAM_SPACE) {
+ pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+ header.len, NVRAM_SPACE);
+ len = NVRAM_SPACE;
+ }
- err = mtd_read(mtd, from, len, &bytes_read, dst);
- if (err)
- return err;
- memset(dst + bytes_read, 0x0, NVRAM_SPACE - bytes_read);
+ err = mtd_read(mtd, 0, len, &bytes_read, dst);
+ if (err)
+ return err;
- return 0;
- }
+ return 0;
}
#endif
@@ -178,7 +170,7 @@ static int nvram_init(void)
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len)
{
char *var, *value, *end, *eq;
- int err;
+ int data_left, err;
if (!name)
return -EINVAL;
@@ -192,16 +184,18 @@ int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len)
/* Look for name=value and return value */
var = &nvram_buf[sizeof(struct nvram_header)];
end = nvram_buf + sizeof(nvram_buf) - 2;
- end[0] = end[1] = '\0';
+ end[0] = '\0';
+ end[1] = '\0';
for (; *var; var = value + strlen(value) + 1) {
- eq = strchr(var, '=');
+ data_left = end - var;
+
+ eq = strnchr(var, data_left, '=');
if (!eq)
break;
value = eq + 1;
- if ((eq - var) == strlen(name) &&
- strncmp(var, name, (eq - var)) == 0) {
+ if (eq - var == strlen(name) &&
+ strncmp(var, name, eq - var) == 0)
return snprintf(val, val_len, "%s", value);
- }
}
return -ENOENT;
}
@@ -210,10 +204,11 @@ EXPORT_SYMBOL(bcm47xx_nvram_getenv);
int bcm47xx_nvram_gpio_pin(const char *name)
{
int i, err;
- char nvram_var[10];
- char buf[30];
+ char nvram_var[] = "gpioXX";
+ char buf[NVRAM_MAX_GPIO_VALUE_LEN];
- for (i = 0; i < 32; i++) {
+ /* TODO: Optimize it to don't call getenv so many times */
+ for (i = 0; i < NVRAM_MAX_GPIO_ENTRIES; i++) {
err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
if (err <= 0)
continue;
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index 1b170bf5f7f0..ab698bad6d62 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -35,7 +35,6 @@
#include <bcm47xx.h>
#include <bcm47xx_board.h>
-
static char bcm47xx_system_type[20] = "Broadcom BCM47XX";
const char *get_system_type(void)
@@ -83,7 +82,7 @@ static __init void prom_init_mem(void)
/* Loop condition may be not enough, off may be over 1 MiB */
if (off + mem >= max) {
mem = max;
- printk(KERN_DEBUG "assume 128MB RAM\n");
+ pr_debug("Assume 128MB RAM\n");
break;
}
if (!memcmp(prom_init, prom_init + mem, 32))
diff --git a/arch/mips/bcm47xx/serial.c b/arch/mips/bcm47xx/serial.c
index 2f5bbd68e9a0..df761d38f7fc 100644
--- a/arch/mips/bcm47xx/serial.c
+++ b/arch/mips/bcm47xx/serial.c
@@ -36,8 +36,8 @@ static int __init uart8250_init_ssb(void)
struct plat_serial8250_port *p = &(uart8250_data[i]);
struct ssb_serial_port *ssb_port = &(mcore->serial_ports[i]);
- p->mapbase = (unsigned int) ssb_port->regs;
- p->membase = (void *) ssb_port->regs;
+ p->mapbase = (unsigned int)ssb_port->regs;
+ p->membase = (void *)ssb_port->regs;
p->irq = ssb_port->irq + 2;
p->uartclk = ssb_port->baud_base;
p->regshift = ssb_port->reg_shift;
@@ -62,8 +62,8 @@ static int __init uart8250_init_bcma(void)
struct bcma_serial_port *bcma_port;
bcma_port = &(cc->serial_ports[i]);
- p->mapbase = (unsigned int) bcma_port->regs;
- p->membase = (void *) bcma_port->regs;
+ p->mapbase = (unsigned int)bcma_port->regs;
+ p->membase = (void *)bcma_port->regs;
p->irq = bcma_port->irq;
p->uartclk = bcma_port->baud_base;
p->regshift = bcma_port->reg_shift;
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index e43b5046cb30..82ff9fd2ab6e 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -42,7 +42,6 @@
#include <asm/reboot.h>
#include <asm/time.h>
#include <bcm47xx.h>
-#include <bcm47xx_nvram.h>
#include <bcm47xx_board.h>
union bcm47xx_bus bcm47xx_bus;
@@ -53,7 +52,7 @@ EXPORT_SYMBOL(bcm47xx_bus_type);
static void bcm47xx_machine_restart(char *command)
{
- printk(KERN_ALERT "Please stand by while rebooting the system...\n");
+ pr_alert("Please stand by while rebooting the system...\n");
local_irq_disable();
/* Set the watchdog timer to reset immediately */
switch (bcm47xx_bus_type) {
@@ -108,7 +107,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
char buf[20];
/* Fill boardinfo structure */
- memset(&(iv->boardinfo), 0 , sizeof(struct ssb_boardinfo));
+ memset(&iv->boardinfo, 0 , sizeof(struct ssb_boardinfo));
bcm47xx_fill_ssb_boardinfo(&iv->boardinfo, NULL);
@@ -127,7 +126,7 @@ static void __init bcm47xx_register_ssb(void)
char buf[100];
struct ssb_mipscore *mcore;
- err = ssb_bus_ssbbus_register(&(bcm47xx_bus.ssb), SSB_ENUM_BASE,
+ err = ssb_bus_ssbbus_register(&bcm47xx_bus.ssb, SSB_ENUM_BASE,
bcm47xx_get_invariants);
if (err)
panic("Failed to initialize SSB bus (err %d)", err);
@@ -137,7 +136,7 @@ static void __init bcm47xx_register_ssb(void)
if (strstr(buf, "console=ttyS1")) {
struct ssb_serial_port port;
- printk(KERN_DEBUG "Swapping serial ports!\n");
+ pr_debug("Swapping serial ports!\n");
/* swap serial ports */
memcpy(&port, &mcore->serial_ports[0], sizeof(port));
memcpy(&mcore->serial_ports[0], &mcore->serial_ports[1],
@@ -169,7 +168,7 @@ void __init plat_mem_setup(void)
struct cpuinfo_mips *c = &current_cpu_data;
if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) {
- printk(KERN_INFO "bcm47xx: using bcma bus\n");
+ pr_info("Using bcma bus\n");
#ifdef CONFIG_BCM47XX_BCMA
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
bcm47xx_sprom_register_fallbacks();
@@ -180,7 +179,7 @@ void __init plat_mem_setup(void)
#endif
#endif
} else {
- printk(KERN_INFO "bcm47xx: using ssb bus\n");
+ pr_info("Using ssb bus\n");
#ifdef CONFIG_BCM47XX_SSB
bcm47xx_bus_type = BCM47XX_BUS_TYPE_SSB;
bcm47xx_sprom_register_fallbacks();
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index 2eff7fe99c6b..68ebf2322f8b 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -27,7 +27,6 @@
*/
#include <bcm47xx.h>
-#include <bcm47xx_nvram.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
@@ -181,94 +180,245 @@ static void nvram_read_alpha2(const char *prefix, const char *name,
memcpy(val, buf, 2);
}
+/* This is one-function-only macro, it uses local "sprom" variable! */
+#define ENTRY(_revmask, _type, _prefix, _name, _val, _allset, _fallback) \
+ if (_revmask & BIT(sprom->revision)) \
+ nvram_read_ ## _type(_prefix, NULL, _name, &sprom->_val, \
+ _allset, _fallback)
+/*
+ * Special version of filling function that can be safely called for any SPROM
+ * revision. For every NVRAM to SPROM mapping it contains bitmask of revisions
+ * for which the mapping is valid.
+ * It obviously requires some hexadecimal/bitmasks knowledge, but allows
+ * writing cleaner code (easy revisions handling).
+ * Note that while SPROM revision 0 was never used, we still keep BIT(0)
+ * reserved for it, just to keep numbering sane.
+ */
+static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ const char *pre = prefix;
+ bool fb = fallback;
+
+ ENTRY(0xfffffffe, u16, pre, "boardrev", board_rev, 0, true);
+ ENTRY(0x00000002, u16, pre, "boardflags", boardflags_lo, 0, fb);
+ ENTRY(0xfffffffc, u16, pre, "boardtype", board_type, 0, true);
+ ENTRY(0xfffffffe, u16, pre, "boardnum", board_num, 0, fb);
+ ENTRY(0x00000002, u8, pre, "cc", country_code, 0, fb);
+ ENTRY(0xfffffff8, u8, pre, "regrev", regrev, 0, fb);
+
+ ENTRY(0xfffffffe, u8, pre, "ledbh0", gpio0, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh1", gpio1, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh2", gpio2, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh3", gpio3, 0xff, fb);
+
+ ENTRY(0x0000070e, u16, pre, "pa0b0", pa0b0, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa0b1", pa0b1, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa0b2", pa0b2, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa0itssit", itssi_bg, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa0maxpwr", maxpwr_bg, 0, fb);
+
+ ENTRY(0x0000070c, u8, pre, "opo", opo, 0, fb);
+ ENTRY(0xfffffffe, u8, pre, "aa2g", ant_available_bg, 0, fb);
+ ENTRY(0xfffffffe, u8, pre, "aa5g", ant_available_a, 0, fb);
+ ENTRY(0x000007fe, s8, pre, "ag0", antenna_gain.a0, 0, fb);
+ ENTRY(0x000007fe, s8, pre, "ag1", antenna_gain.a1, 0, fb);
+ ENTRY(0x000007f0, s8, pre, "ag2", antenna_gain.a2, 0, fb);
+ ENTRY(0x000007f0, s8, pre, "ag3", antenna_gain.a3, 0, fb);
+
+ ENTRY(0x0000070e, u16, pre, "pa1b0", pa1b0, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa1b1", pa1b1, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa1b2", pa1b2, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob0", pa1lob0, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob1", pa1lob1, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob2", pa1lob2, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib0", pa1hib0, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib1", pa1hib1, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib2", pa1hib2, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa1itssit", itssi_a, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa1maxpwr", maxpwr_a, 0, fb);
+ ENTRY(0x0000070c, u8, pre, "pa1lomaxpwr", maxpwr_al, 0, fb);
+ ENTRY(0x0000070c, u8, pre, "pa1himaxpwr", maxpwr_ah, 0, fb);
+
+ ENTRY(0x00000708, u8, pre, "bxa2g", bxa2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssisav2g", rssisav2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismc2g", rssismc2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismf2g", rssismf2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "bxa5g", bxa5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssisav5g", rssisav5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismc5g", rssismc5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismf5g", rssismf5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri2g", tri2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5g", tri5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5gl", tri5gl, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5gh", tri5gh, 0, fb);
+ ENTRY(0x00000708, s8, pre, "rxpo2g", rxpo2g, 0, fb);
+ ENTRY(0x00000708, s8, pre, "rxpo5g", rxpo5g, 0, fb);
+ ENTRY(0xfffffff0, u8, pre, "txchain", txchain, 0xf, fb);
+ ENTRY(0xfffffff0, u8, pre, "rxchain", rxchain, 0xf, fb);
+ ENTRY(0xfffffff0, u8, pre, "antswitch", antswitch, 0xff, fb);
+ ENTRY(0x00000700, u8, pre, "tssipos2g", fem.ghz2.tssipos, 0, fb);
+ ENTRY(0x00000700, u8, pre, "extpagain2g", fem.ghz2.extpa_gain, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pdetrange2g", fem.ghz2.pdet_range, 0, fb);
+ ENTRY(0x00000700, u8, pre, "triso2g", fem.ghz2.tr_iso, 0, fb);
+ ENTRY(0x00000700, u8, pre, "antswctl2g", fem.ghz2.antswlut, 0, fb);
+ ENTRY(0x00000700, u8, pre, "tssipos5g", fem.ghz5.tssipos, 0, fb);
+ ENTRY(0x00000700, u8, pre, "extpagain5g", fem.ghz5.extpa_gain, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pdetrange5g", fem.ghz5.pdet_range, 0, fb);
+ ENTRY(0x00000700, u8, pre, "triso5g", fem.ghz5.tr_iso, 0, fb);
+ ENTRY(0x00000700, u8, pre, "antswctl5g", fem.ghz5.antswlut, 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga0", txpid2g[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga1", txpid2g[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga2", txpid2g[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga3", txpid2g[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga0", txpid5g[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga1", txpid5g[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga2", txpid5g[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga3", txpid5g[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla0", txpid5gl[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla1", txpid5gl[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla2", txpid5gl[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla3", txpid5gl[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha0", txpid5gh[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha1", txpid5gh[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha2", txpid5gh[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha3", txpid5gh[3], 0, fb);
+
+ ENTRY(0xffffff00, u8, pre, "tempthresh", tempthresh, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempoffset", tempoffset, 0, fb);
+ ENTRY(0xffffff00, u16, pre, "rawtempsense", rawtempsense, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower", measpower, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempsense_slope", tempsense_slope, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempcorrx", tempcorrx, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempsense_option", tempsense_option, 0, fb);
+ ENTRY(0x00000700, u8, pre, "freqoffset_corr", freqoffset_corr, 0, fb);
+ ENTRY(0x00000700, u8, pre, "iqcal_swp_dis", iqcal_swp_dis, 0, fb);
+ ENTRY(0x00000700, u8, pre, "hw_iqcal_en", hw_iqcal_en, 0, fb);
+ ENTRY(0x00000700, u8, pre, "elna2g", elna2g, 0, fb);
+ ENTRY(0x00000700, u8, pre, "elna5g", elna5g, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "phycal_tempdelta", phycal_tempdelta, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "temps_period", temps_period, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "temps_hysteresis", temps_hysteresis, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower1", measpower1, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower2", measpower2, 0, fb);
+
+ ENTRY(0x000001f0, u16, pre, "cck2gpo", cck2gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm2gpo", ofdm2gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5gpo", ofdm5gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5glpo", ofdm5glpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5ghpo", ofdm5ghpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo0", mcs2gpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo1", mcs2gpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo2", mcs2gpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo3", mcs2gpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo4", mcs2gpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo5", mcs2gpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo6", mcs2gpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo7", mcs2gpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo0", mcs5gpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo1", mcs5gpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo2", mcs5gpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo3", mcs5gpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo4", mcs5gpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo5", mcs5gpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo6", mcs5gpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo7", mcs5gpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo0", mcs5glpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo1", mcs5glpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo2", mcs5glpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo3", mcs5glpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo4", mcs5glpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo5", mcs5glpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo6", mcs5glpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo7", mcs5glpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo0", mcs5ghpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo1", mcs5ghpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo2", mcs5ghpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo3", mcs5ghpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo4", mcs5ghpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo5", mcs5ghpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo6", mcs5ghpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo7", mcs5ghpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "cddpo", cddpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "stbcpo", stbcpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "bw40po", bw40po, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "bwduppo", bwduppo, 0, fb);
+
+ ENTRY(0xfffffe00, u16, pre, "cckbw202gpo", cckbw202gpo, 0, fb);
+ ENTRY(0xfffffe00, u16, pre, "cckbw20ul2gpo", cckbw20ul2gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw202gpo", legofdmbw202gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul2gpo", legofdmbw20ul2gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205glpo", legofdmbw205glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5glpo", legofdmbw20ul5glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205gmpo", legofdmbw205gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5gmpo", legofdmbw20ul5gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205ghpo", legofdmbw205ghpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5ghpo", legofdmbw20ul5ghpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw202gpo", mcsbw202gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul2gpo", mcsbw20ul2gpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw402gpo", mcsbw402gpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205glpo", mcsbw205glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5glpo", mcsbw20ul5glpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405glpo", mcsbw405glpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205gmpo", mcsbw205gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5gmpo", mcsbw20ul5gmpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405gmpo", mcsbw405gmpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205ghpo", mcsbw205ghpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5ghpo", mcsbw20ul5ghpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405ghpo", mcsbw405ghpo, 0, fb);
+ ENTRY(0x00000600, u16, pre, "mcs32po", mcs32po, 0, fb);
+ ENTRY(0x00000600, u16, pre, "legofdm40duppo", legofdm40duppo, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pcieingress_war", pcieingress_war, 0, fb);
+
+ /* TODO: rev 11 support */
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga0", rxgainerr2ga[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga1", rxgainerr2ga[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga2", rxgainerr2ga[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla0", rxgainerr5gla[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla1", rxgainerr5gla[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla2", rxgainerr5gla[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma0", rxgainerr5gma[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma1", rxgainerr5gma[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma2", rxgainerr5gma[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha0", rxgainerr5gha[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha1", rxgainerr5gha[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha2", rxgainerr5gha[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua0", rxgainerr5gua[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua1", rxgainerr5gua[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua2", rxgainerr5gua[2], 0, fb);
+
+ ENTRY(0xfffffe00, u8, pre, "sar2g", sar2g, 0, fb);
+ ENTRY(0xfffffe00, u8, pre, "sar5g", sar5g, 0, fb);
+
+ /* TODO: rev 11 support */
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga0", noiselvl2ga[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga1", noiselvl2ga[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga2", noiselvl2ga[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla0", noiselvl5gla[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla1", noiselvl5gla[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla2", noiselvl5gla[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma0", noiselvl5gma[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma1", noiselvl5gma[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma2", noiselvl5gma[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha0", noiselvl5gha[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha1", noiselvl5gha[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha2", noiselvl5gha[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua0", noiselvl5gua[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
+}
+#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+
static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
{
nvram_read_u16(prefix, NULL, "devid", &sprom->dev_id, 0, fallback);
- nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback);
- nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback);
- nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback);
- nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff, fallback);
- nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0,
- fallback);
- nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0,
- fallback);
- nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0,
- fallback);
nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
}
-static void bcm47xx_fill_sprom_r12389(struct ssb_sprom *sprom,
- const char *prefix, bool fallback)
-{
- nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0, fallback);
- nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0, fallback);
- nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0,
- fallback);
- nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0, fallback);
- nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0, fallback);
- nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0, fallback);
-}
-
-static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix,
- bool fallback)
-{
- nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0, fallback);
-}
-
-static void bcm47xx_fill_sprom_r2389(struct ssb_sprom *sprom,
- const char *prefix, bool fallback)
-{
- nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0, fallback);
- nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0, fallback);
- nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0,
- fallback);
-}
-
-static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix,
- bool fallback)
-{
- nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0, fallback);
- nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0, fallback);
- nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0, fallback);
- nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0, fallback);
- nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0, fallback);
- nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0, fallback);
- nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0, fallback);
- nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0, fallback);
-}
-
static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
- nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback);
nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
&sprom->leddc_off_time, fallback);
}
@@ -276,309 +426,10 @@ static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix,
static void bcm47xx_fill_sprom_r4589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
{
- nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback);
- nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0,
- fallback);
- nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf, fallback);
- nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf, fallback);
- nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff,
- fallback);
nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
&sprom->leddc_off_time, fallback);
}
-static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix,
- bool fallback)
-{
- nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0,
- fallback);
- nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0, fallback);
- nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0, fallback);
- nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0, fallback);
- nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0, fallback);
- nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0,
- fallback);
-}
-
-static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix,
- bool fallback)
-{
- nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0,
- fallback);
-}
-
-static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix,
- bool fallback)
-{
- nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "extpagain2g",
- &sprom->fem.ghz2.extpa_gain, 0, fallback);
- nvram_read_u8(prefix, NULL, "pdetrange2g",
- &sprom->fem.ghz2.pdet_range, 0, fallback);
- nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "extpagain5g",
- &sprom->fem.ghz5.extpa_gain, 0, fallback);
- nvram_read_u8(prefix, NULL, "pdetrange5g",
- &sprom->fem.ghz5.pdet_range, 0, fallback);
- nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0,
- fallback);
- nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "tempsense_slope",
- &sprom->tempsense_slope, 0, fallback);
- nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "tempsense_option",
- &sprom->tempsense_option, 0, fallback);
- nvram_read_u8(prefix, NULL, "freqoffset_corr",
- &sprom->freqoffset_corr, 0, fallback);
- nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0, fallback);
- nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0, fallback);
- nvram_read_u8(prefix, NULL, "phycal_tempdelta",
- &sprom->phycal_tempdelta, 0, fallback);
- nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "temps_hysteresis",
- &sprom->temps_hysteresis, 0, fallback);
- nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr2ga0",
- &sprom->rxgainerr2ga[0], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr2ga1",
- &sprom->rxgainerr2ga[1], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr2ga2",
- &sprom->rxgainerr2ga[2], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gla0",
- &sprom->rxgainerr5gla[0], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gla1",
- &sprom->rxgainerr5gla[1], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gla2",
- &sprom->rxgainerr5gla[2], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gma0",
- &sprom->rxgainerr5gma[0], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gma1",
- &sprom->rxgainerr5gma[1], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gma2",
- &sprom->rxgainerr5gma[2], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gha0",
- &sprom->rxgainerr5gha[0], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gha1",
- &sprom->rxgainerr5gha[1], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gha2",
- &sprom->rxgainerr5gha[2], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gua0",
- &sprom->rxgainerr5gua[0], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gua1",
- &sprom->rxgainerr5gua[1], 0, fallback);
- nvram_read_u8(prefix, NULL, "rxgainerr5gua2",
- &sprom->rxgainerr5gua[2], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0,
- fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gla0",
- &sprom->noiselvl5gla[0], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gla1",
- &sprom->noiselvl5gla[1], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gla2",
- &sprom->noiselvl5gla[2], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gma0",
- &sprom->noiselvl5gma[0], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gma1",
- &sprom->noiselvl5gma[1], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gma2",
- &sprom->noiselvl5gma[2], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gha0",
- &sprom->noiselvl5gha[0], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gha1",
- &sprom->noiselvl5gha[1], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gha2",
- &sprom->noiselvl5gha[2], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gua0",
- &sprom->noiselvl5gua[0], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gua1",
- &sprom->noiselvl5gua[1], 0, fallback);
- nvram_read_u8(prefix, NULL, "noiselvl5gua2",
- &sprom->noiselvl5gua[2], 0, fallback);
- nvram_read_u8(prefix, NULL, "pcieingress_war",
- &sprom->pcieingress_war, 0, fallback);
-}
-
-static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix,
- bool fallback)
-{
- nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0,
- fallback);
- nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "legofdmbw202gpo",
- &sprom->legofdmbw202gpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "legofdmbw20ul2gpo",
- &sprom->legofdmbw20ul2gpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "legofdmbw205glpo",
- &sprom->legofdmbw205glpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "legofdmbw20ul5glpo",
- &sprom->legofdmbw20ul5glpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "legofdmbw205gmpo",
- &sprom->legofdmbw205gmpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "legofdmbw20ul5gmpo",
- &sprom->legofdmbw20ul5gmpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "legofdmbw205ghpo",
- &sprom->legofdmbw205ghpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "legofdmbw20ul5ghpo",
- &sprom->legofdmbw20ul5ghpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "mcsbw20ul5glpo",
- &sprom->mcsbw20ul5glpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "mcsbw20ul5gmpo",
- &sprom->mcsbw20ul5gmpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0,
- fallback);
- nvram_read_u32(prefix, NULL, "mcsbw20ul5ghpo",
- &sprom->mcsbw20ul5ghpo, 0, fallback);
- nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0,
- fallback);
- nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0, fallback);
- nvram_read_u16(prefix, NULL, "legofdm40duppo",
- &sprom->legofdm40duppo, 0, fallback);
- nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0, fallback);
- nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0, fallback);
-}
-
static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
{
@@ -715,10 +566,6 @@ static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
- nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0, true);
- nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0,
- fallback);
- nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0, true);
nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
&sprom->boardflags_hi, fallback);
nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
@@ -736,58 +583,39 @@ void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
switch (sprom->revision) {
case 1:
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r1(sprom, prefix, fallback);
break;
case 2:
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
break;
case 3:
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r3(sprom, prefix, fallback);
break;
case 4:
case 5:
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r4589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r458(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r45(sprom, prefix, fallback);
bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
break;
case 8:
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r4589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r458(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r89(sprom, prefix, fallback);
bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
break;
case 9:
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r4589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r89(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r9(sprom, prefix, fallback);
bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
break;
default:
- pr_warn("Unsupported SPROM revision %d detected. Will extract"
- " v1\n", sprom->revision);
+ pr_warn("Unsupported SPROM revision %d detected. Will extract v1\n",
+ sprom->revision);
sprom->revision = 1;
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
- bcm47xx_fill_sprom_r1(sprom, prefix, fallback);
}
+
+ bcm47xx_sprom_fill_auto(sprom, prefix, fallback);
}
#ifdef CONFIG_BCM47XX_SSB
@@ -829,13 +657,45 @@ static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
bcm47xx_fill_sprom(out, prefix, false);
return 0;
} else {
- pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n");
+ pr_warn("Unable to fill SPROM for given bustype.\n");
return -EINVAL;
}
}
#endif
#if defined(CONFIG_BCM47XX_BCMA)
+/*
+ * Having many NVRAM entries for PCI devices led to repeating prefixes like
+ * pci/1/1/ all the time and wasting flash space. So at some point Broadcom
+ * decided to introduce prefixes like 0: 1: 2: etc.
+ * If we find e.g. devpath0=pci/2/1 or devpath0=pci/2/1/ we should use 0:
+ * instead of pci/2/1/.
+ */
+static void bcm47xx_sprom_apply_prefix_alias(char *prefix, size_t prefix_size)
+{
+ size_t prefix_len = strlen(prefix);
+ size_t short_len = prefix_len - 1;
+ char nvram_var[10];
+ char buf[20];
+ int i;
+
+ /* Passed prefix has to end with a slash */
+ if (prefix_len <= 0 || prefix[prefix_len - 1] != '/')
+ return;
+
+ for (i = 0; i < 3; i++) {
+ if (snprintf(nvram_var, sizeof(nvram_var), "devpath%d", i) <= 0)
+ continue;
+ if (bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)) < 0)
+ continue;
+ if (!strcmp(buf, prefix) ||
+ (short_len && strlen(buf) == short_len && !strncmp(buf, prefix, short_len))) {
+ snprintf(prefix, prefix_size, "%d:", i);
+ return;
+ }
+ }
+}
+
static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
{
char prefix[10];
@@ -847,6 +707,7 @@ static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
+ bcm47xx_sprom_apply_prefix_alias(prefix, sizeof(prefix));
bcm47xx_fill_sprom(out, prefix, false);
return 0;
case BCMA_HOSTTYPE_SOC:
@@ -861,7 +722,7 @@ static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
}
return 0;
default:
- pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n");
+ pr_warn("Unable to fill SPROM for given bustype.\n");
return -EINVAL;
}
}
diff --git a/arch/mips/bcm47xx/time.c b/arch/mips/bcm47xx/time.c
index 2c85d9254b5e..74224cf2e84d 100644
--- a/arch/mips/bcm47xx/time.c
+++ b/arch/mips/bcm47xx/time.c
@@ -22,12 +22,10 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
#include <linux/init.h>
#include <linux/ssb/ssb.h>
#include <asm/time.h>
#include <bcm47xx.h>
-#include <bcm47xx_nvram.h>
#include <bcm47xx_board.h>
void __init plat_time_init(void)
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index b94bf44d8d8e..e3e808a6c542 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -58,9 +58,9 @@ static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
#ifdef CONFIG_SMP
if (m)
- enable &= cpu_isset(cpu, *m);
+ enable &= cpumask_test_cpu(cpu, m);
else if (irqd_affinity_was_set(d))
- enable &= cpu_isset(cpu, *d->affinity);
+ enable &= cpumask_test_cpu(cpu, d->affinity);
#endif
return enable;
}
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index e1f27d653f60..7019e2967009 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -17,7 +17,6 @@
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
-#include <bcm63xx_gpio.h>
void __init prom_init(void)
{
@@ -53,9 +52,6 @@ void __init prom_init(void)
reg &= ~mask;
bcm_perf_writel(reg, PERF_CKCTL_REG);
- /* register gpiochip */
- bcm63xx_gpio_init();
-
/* do low level board init */
board_prom_init();
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 6660c7ddf87b..240fb4ffa55c 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -20,6 +20,7 @@
#include <bcm63xx_cpu.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
+#include <bcm63xx_gpio.h>
void bcm63xx_machine_halt(void)
{
@@ -160,6 +161,9 @@ void __init plat_mem_setup(void)
int __init bcm63xx_register_devices(void)
{
+ /* register gpiochip */
+ bcm63xx_gpio_init();
+
return board_register_devices();
}
diff --git a/arch/mips/bmips/Kconfig b/arch/mips/bmips/Kconfig
new file mode 100644
index 000000000000..f35c84c019df
--- /dev/null
+++ b/arch/mips/bmips/Kconfig
@@ -0,0 +1,62 @@
+if BMIPS_GENERIC
+
+choice
+ prompt "Built-in device tree"
+ help
+ Legacy bootloaders do not pass a DTB pointer to the kernel, so
+ if a "wrapper" is not being used, the kernel will need to include
+ a device tree that matches the target board.
+
+ The builtin DTB will only be used if the firmware does not supply
+ a valid DTB.
+
+config DT_NONE
+ bool "None"
+
+config DT_BCM93384WVG
+ bool "BCM93384WVG Zephyr CPU"
+ select BUILTIN_DTB
+
+config DT_BCM93384WVG_VIPER
+ bool "BCM93384WVG Viper CPU (EXPERIMENTAL)"
+ select BUILTIN_DTB
+
+config DT_BCM96368MVWG
+ bool "BCM96368MVWG"
+ select BUILTIN_DTB
+
+config DT_BCM9EJTAGPRB
+ bool "BCM9EJTAGPRB"
+ select BUILTIN_DTB
+
+config DT_BCM97125CBMB
+ bool "BCM97125CBMB"
+ select BUILTIN_DTB
+
+config DT_BCM97346DBSMB
+ bool "BCM97346DBSMB"
+ select BUILTIN_DTB
+
+config DT_BCM97358SVMB
+ bool "BCM97358SVMB"
+ select BUILTIN_DTB
+
+config DT_BCM97360SVMB
+ bool "BCM97360SVMB"
+ select BUILTIN_DTB
+
+config DT_BCM97362SVMB
+ bool "BCM97362SVMB"
+ select BUILTIN_DTB
+
+config DT_BCM97420C
+ bool "BCM97420C"
+ select BUILTIN_DTB
+
+config DT_BCM97425SVMB
+ bool "BCM97425SVMB"
+ select BUILTIN_DTB
+
+endchoice
+
+endif
diff --git a/arch/mips/bcm3384/Makefile b/arch/mips/bmips/Makefile
index a393955cba08..a393955cba08 100644
--- a/arch/mips/bcm3384/Makefile
+++ b/arch/mips/bmips/Makefile
diff --git a/arch/mips/bmips/Platform b/arch/mips/bmips/Platform
new file mode 100644
index 000000000000..5f127fd7f4b5
--- /dev/null
+++ b/arch/mips/bmips/Platform
@@ -0,0 +1,7 @@
+#
+# Broadcom Generic BMIPS kernel
+#
+platform-$(CONFIG_BMIPS_GENERIC) += bmips/
+cflags-$(CONFIG_BMIPS_GENERIC) += \
+ -I$(srctree)/arch/mips/include/asm/mach-bmips/
+load-$(CONFIG_BMIPS_GENERIC) := 0xffffffff80010000
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
new file mode 100644
index 000000000000..04790f4e1805
--- /dev/null
+++ b/arch/mips/bmips/dma.c
@@ -0,0 +1,117 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
+ */
+
+#define pr_fmt(fmt) "bmips-dma: " fmt
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <dma-coherence.h>
+
+/*
+ * BCM338x has configurable address translation windows which allow the
+ * peripherals' DMA addresses to be different from the Zephyr-visible
+ * physical addresses. e.g. usb_dma_addr = zephyr_pa ^ 0x08000000
+ *
+ * If the "brcm,ubus" node has a "dma-ranges" property we will enable this
+ * translation globally using the provided information. This implements a
+ * very limited subset of "dma-ranges" support and it will probably be
+ * replaced by a more generic version later.
+ */
+
+struct bmips_dma_range {
+ u32 child_addr;
+ u32 parent_addr;
+ u32 size;
+};
+
+static struct bmips_dma_range *bmips_dma_ranges;
+
+#define FLUSH_RAC 0x100
+
+static dma_addr_t bmips_phys_to_dma(struct device *dev, phys_addr_t pa)
+{
+ struct bmips_dma_range *r;
+
+ for (r = bmips_dma_ranges; r && r->size; r++) {
+ if (pa >= r->child_addr &&
+ pa < (r->child_addr + r->size))
+ return pa - r->child_addr + r->parent_addr;
+ }
+ return pa;
+}
+
+dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
+{
+ return bmips_phys_to_dma(dev, virt_to_phys(addr));
+}
+
+dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
+{
+ return bmips_phys_to_dma(dev, page_to_phys(page));
+}
+
+unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ struct bmips_dma_range *r;
+
+ for (r = bmips_dma_ranges; r && r->size; r++) {
+ if (dma_addr >= r->parent_addr &&
+ dma_addr < (r->parent_addr + r->size))
+ return dma_addr - r->parent_addr + r->child_addr;
+ }
+ return dma_addr;
+}
+
+static int __init bmips_init_dma_ranges(void)
+{
+ struct device_node *np =
+ of_find_compatible_node(NULL, NULL, "brcm,ubus");
+ const __be32 *data;
+ struct bmips_dma_range *r;
+ int len;
+
+ if (!np)
+ return 0;
+
+ data = of_get_property(np, "dma-ranges", &len);
+ if (!data)
+ goto out_good;
+
+ len /= sizeof(*data) * 3;
+ if (!len)
+ goto out_bad;
+
+ /* add a dummy (zero) entry at the end as a sentinel */
+ bmips_dma_ranges = kzalloc(sizeof(struct bmips_dma_range) * (len + 1),
+ GFP_KERNEL);
+ if (!bmips_dma_ranges)
+ goto out_bad;
+
+ for (r = bmips_dma_ranges; len; len--, r++) {
+ r->child_addr = be32_to_cpup(data++);
+ r->parent_addr = be32_to_cpup(data++);
+ r->size = be32_to_cpup(data++);
+ }
+
+out_good:
+ of_node_put(np);
+ return 0;
+
+out_bad:
+ pr_err("error parsing dma-ranges property\n");
+ of_node_put(np);
+ return -EINVAL;
+}
+arch_initcall(bmips_init_dma_ranges);
diff --git a/arch/mips/bmips/irq.c b/arch/mips/bmips/irq.c
new file mode 100644
index 000000000000..14552e58ff7e
--- /dev/null
+++ b/arch/mips/bmips/irq.c
@@ -0,0 +1,38 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Author: Kevin Cernekee <cernekee@gmail.com>
+ */
+
+#include <linux/of.h>
+#include <linux/irqchip.h>
+
+#include <asm/bmips.h>
+#include <asm/irq.h>
+#include <asm/irq_cpu.h>
+#include <asm/time.h>
+
+unsigned int get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+void __init arch_init_irq(void)
+{
+ struct device_node *dn;
+
+ /* Only the STB (bcm7038) controller supports SMP IRQ affinity */
+ dn = of_find_compatible_node(NULL, NULL, "brcm,bcm7038-l1-intc");
+ if (dn)
+ of_node_put(dn);
+ else
+ bmips_tp1_irqs = 0;
+
+ irqchip_init();
+}
+
+OF_DECLARE_2(irqchip, mips_cpu_intc, "mti,cpu-interrupt-controller",
+ mips_cpu_irq_of_init);
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
new file mode 100644
index 000000000000..fae800e8b1e1
--- /dev/null
+++ b/arch/mips/bmips/setup.c
@@ -0,0 +1,194 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/bootmem.h>
+#include <linux/clk-provider.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/smp.h>
+#include <asm/addrspace.h>
+#include <asm/bmips.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu-type.h>
+#include <asm/mipsregs.h>
+#include <asm/prom.h>
+#include <asm/smp-ops.h>
+#include <asm/time.h>
+#include <asm/traps.h>
+
+#define RELO_NORMAL_VEC BIT(18)
+
+#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
+#define BCM6328_TP1_DISABLED BIT(9)
+
+static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
+
+struct bmips_quirk {
+ const char *compatible;
+ void (*quirk_fn)(void);
+};
+
+static void kbase_setup(void)
+{
+ __raw_writel(kbase | RELO_NORMAL_VEC,
+ BMIPS_GET_CBR() + BMIPS_RELO_VECTOR_CONTROL_1);
+ ebase = kbase;
+}
+
+static void bcm3384_viper_quirks(void)
+{
+ /*
+ * Some experimental CM boxes are set up to let CM own the Viper TP0
+ * and let Linux own TP1. This requires moving the kernel
+ * load address to a non-conflicting region (e.g. via
+ * CONFIG_PHYSICAL_START) and supplying an alternate DTB.
+ * If we detect this condition, we need to move the MIPS exception
+ * vectors up to an area that we own.
+ *
+ * This is distinct from the OTHER special case mentioned in
+ * smp-bmips.c (boot on TP1, but enable SMP, then TP0 becomes our
+ * logical CPU#1). For the Viper TP1 case, SMP is off limits.
+ *
+ * Also note that many BMIPS435x CPUs do not have a
+ * BMIPS_RELO_VECTOR_CONTROL_1 register, so it isn't safe to just
+ * write VMLINUX_LOAD_ADDRESS into that register on every SoC.
+ */
+ board_ebase_setup = &kbase_setup;
+ bmips_smp_enabled = 0;
+}
+
+static void bcm63xx_fixup_cpu1(void)
+{
+ /*
+ * The bootloader has set up the CPU1 reset vector at
+ * 0xa000_0200.
+ * This conflicts with the special interrupt vector (IV).
+ * The bootloader has also set up CPU1 to respond to the wrong
+ * IPI interrupt.
+ * Here we will start up CPU1 in the background and ask it to
+ * reconfigure itself then go back to sleep.
+ */
+ memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
+ __sync();
+ set_c0_cause(C_SW0);
+ cpumask_set_cpu(1, &bmips_booted_mask);
+}
+
+static void bcm6328_quirks(void)
+{
+ /* Check CPU1 status in OTP (it is usually disabled) */
+ if (__raw_readl(REG_BCM6328_OTP) & BCM6328_TP1_DISABLED)
+ bmips_smp_enabled = 0;
+ else
+ bcm63xx_fixup_cpu1();
+}
+
+static void bcm6368_quirks(void)
+{
+ bcm63xx_fixup_cpu1();
+}
+
+static const struct bmips_quirk bmips_quirk_list[] = {
+ { "brcm,bcm3384-viper", &bcm3384_viper_quirks },
+ { "brcm,bcm33843-viper", &bcm3384_viper_quirks },
+ { "brcm,bcm6328", &bcm6328_quirks },
+ { "brcm,bcm6368", &bcm6368_quirks },
+ { },
+};
+
+void __init prom_init(void)
+{
+ register_bmips_smp_ops();
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+const char *get_system_type(void)
+{
+ return "Generic BMIPS kernel";
+}
+
+void __init plat_time_init(void)
+{
+ struct device_node *np;
+ u32 freq;
+
+ np = of_find_node_by_name(NULL, "cpus");
+ if (!np)
+ panic("missing 'cpus' DT node");
+ if (of_property_read_u32(np, "mips-hpt-frequency", &freq) < 0)
+ panic("missing 'mips-hpt-frequency' property");
+ of_node_put(np);
+
+ mips_hpt_frequency = freq;
+}
+
+void __init plat_mem_setup(void)
+{
+ void *dtb;
+ const struct bmips_quirk *q;
+
+ set_io_port_base(0);
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0;
+
+ /* intended to somewhat resemble ARM; see Documentation/arm/Booting */
+ if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
+ dtb = phys_to_virt(fw_arg2);
+ else if (__dtb_start != __dtb_end)
+ dtb = (void *)__dtb_start;
+ else
+ panic("no dtb found");
+
+ __dt_setup_arch(dtb);
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+
+ for (q = bmips_quirk_list; q->quirk_fn; q++) {
+ if (of_flat_dt_is_compatible(of_get_flat_dt_root(),
+ q->compatible)) {
+ q->quirk_fn();
+ }
+ }
+}
+
+void __init device_tree_init(void)
+{
+ struct device_node *np;
+
+ unflatten_and_copy_device_tree();
+
+ /* Disable SMP boot unless both CPUs are listed in DT and !disabled */
+ np = of_find_node_by_name(NULL, "cpus");
+ if (np && of_get_available_child_count(np) <= 1)
+ bmips_smp_enabled = 0;
+ of_node_put(np);
+}
+
+int __init plat_of_setup(void)
+{
+ return __dt_register_buses("simple-bus", NULL);
+}
+
+arch_initcall(plat_of_setup);
+
+static int __init plat_dev_init(void)
+{
+ of_clk_init(NULL);
+ return 0;
+}
+
+device_initcall(plat_dev_init);
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 61af6b6ab13d..dc91bde10d62 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -12,6 +12,8 @@
# Author: Wu Zhangjin <wuzhangjin@gmail.com>
#
+include $(srctree)/arch/mips/Kbuild.platforms
+
# set the default size of the mallocing area for decompressing
BOOT_HEAP_SIZE := 0x400000
@@ -30,9 +32,10 @@ KBUILD_AFLAGS := $(LINUXINCLUDE) $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
targets := head.o decompress.o string.o dbg.o uart-16550.o uart-alchemy.o
# decompressor objects (linked with vmlinuz)
-vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o $(obj)/dbg.o
+vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
ifdef CONFIG_DEBUG_ZBOOT
+vmlinuzobjs-$(CONFIG_DEBUG_ZBOOT) += $(obj)/dbg.o
vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
endif
@@ -66,8 +69,8 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
# Calculate the load address of the compressed kernel image
hostprogs-y := calc_vmlinuz_load_addr
-ifeq ($(CONFIG_MACH_JZ4740),y)
-VMLINUZ_LOAD_ADDRESS := 0x80600000
+ifneq ($(zload-y),)
+VMLINUZ_LOAD_ADDRESS := $(zload-y)
else
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
$(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 31903cf9709d..54831069a206 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -28,8 +28,13 @@ unsigned long free_mem_end_ptr;
extern unsigned char __image_begin, __image_end;
/* debug interfaces */
+#ifdef CONFIG_DEBUG_ZBOOT
extern void puts(const char *s);
extern void puthex(unsigned long long val);
+#else
+#define puts(s) do {} while (0)
+#define puthex(val) do {} while (0)
+#endif
void error(char *x)
{
diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile
index 4f49fa477f14..5d95e4bd709a 100644
--- a/arch/mips/boot/dts/Makefile
+++ b/arch/mips/boot/dts/Makefile
@@ -1,21 +1,12 @@
-dtb-$(CONFIG_BCM3384) += bcm93384wvg.dtb
-dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb
-dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb
-dtb-$(CONFIG_DT_XLP_EVP) += xlp_evp.dtb
-dtb-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb
-dtb-$(CONFIG_DT_XLP_FVP) += xlp_fvp.dtb
-dtb-$(CONFIG_DT_XLP_GVP) += xlp_gvp.dtb
-dtb-$(CONFIG_DTB_RT2880_EVAL) += rt2880_eval.dtb
-dtb-$(CONFIG_DTB_RT305X_EVAL) += rt3052_eval.dtb
-dtb-$(CONFIG_DTB_RT3883_EVAL) += rt3883_eval.dtb
-dtb-$(CONFIG_DTB_MT7620A_EVAL) += mt7620a_eval.dtb
-dtb-$(CONFIG_MIPS_SEAD3) += sead3.dtb
-
-obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-targets += dtbs
-targets += $(dtb-y)
-
-dtbs: $(addprefix $(obj)/, $(dtb-y))
-
-clean-files += *.dtb *.dtb.S
+dts-dirs += brcm
+dts-dirs += cavium-octeon
+dts-dirs += lantiq
+dts-dirs += mti
+dts-dirs += netlogic
+dts-dirs += ralink
+
+obj-y := $(addsuffix /, $(dts-dirs))
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/bcm3384.dtsi b/arch/mips/boot/dts/bcm3384.dtsi
deleted file mode 100644
index 21b074a99c94..000000000000
--- a/arch/mips/boot/dts/bcm3384.dtsi
+++ /dev/null
@@ -1,109 +0,0 @@
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "brcm,bcm3384", "brcm,bcm33843";
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* On BMIPS5000 this is 1/8th of the CPU core clock */
- mips-hpt-frequency = <100000000>;
-
- cpu@0 {
- compatible = "brcm,bmips5000";
- device_type = "cpu";
- reg = <0>;
- };
-
- cpu@1 {
- compatible = "brcm,bmips5000";
- device_type = "cpu";
- reg = <1>;
- };
- };
-
- clocks {
- #address-cells = <1>;
- #size-cells = <0>;
-
- periph_clk: periph_clk@0 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <54000000>;
- };
- };
-
- aliases {
- uart0 = &uart0;
- };
-
- cpu_intc: cpu_intc@0 {
- #address-cells = <0>;
- compatible = "mti,cpu-interrupt-controller";
-
- interrupt-controller;
- #interrupt-cells = <1>;
- };
-
- periph_intc: periph_intc@14e00038 {
- compatible = "brcm,bcm3384-intc";
- reg = <0x14e00038 0x8 0x14e00340 0x8>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- interrupt-parent = <&cpu_intc>;
- interrupts = <4>;
- };
-
- zmips_intc: zmips_intc@104b0060 {
- compatible = "brcm,bcm3384-intc";
- reg = <0x104b0060 0x8>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- interrupt-parent = <&periph_intc>;
- interrupts = <29>;
- };
-
- iop_intc: iop_intc@14e00058 {
- compatible = "brcm,bcm3384-intc";
- reg = <0x14e00058 0x8>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- interrupt-parent = <&cpu_intc>;
- interrupts = <6>;
- };
-
- uart0: serial@14e00520 {
- compatible = "brcm,bcm6345-uart";
- reg = <0x14e00520 0x18>;
- interrupt-parent = <&periph_intc>;
- interrupts = <2>;
- clocks = <&periph_clk>;
- status = "disabled";
- };
-
- ehci0: usb@15400300 {
- compatible = "brcm,bcm3384-ehci", "generic-ehci";
- reg = <0x15400300 0x100>;
- big-endian;
- interrupt-parent = <&periph_intc>;
- interrupts = <41>;
- status = "disabled";
- };
-
- ohci0: usb@15400400 {
- compatible = "brcm,bcm3384-ohci", "generic-ohci";
- reg = <0x15400400 0x100>;
- big-endian;
- no-big-frame-no;
- interrupt-parent = <&periph_intc>;
- interrupts = <40>;
- status = "disabled";
- };
-};
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
new file mode 100644
index 000000000000..1c8353bfe003
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -0,0 +1,19 @@
+dtb-$(CONFIG_DT_BCM93384WVG) += bcm93384wvg.dtb
+dtb-$(CONFIG_DT_BCM93384WVG_VIPER) += bcm93384wvg_viper.dtb
+dtb-$(CONFIG_DT_BCM96368MVWG) += bcm96368mvwg.dtb
+dtb-$(CONFIG_DT_BCM9EJTAGPRB) += bcm9ejtagprb.dtb
+dtb-$(CONFIG_DT_BCM97125CBMB) += bcm97125cbmb.dtb
+dtb-$(CONFIG_DT_BCM97346DBSMB) += bcm97346dbsmb.dtb
+dtb-$(CONFIG_DT_BCM97358SVMB) += bcm97358svmb.dtb
+dtb-$(CONFIG_DT_BCM97360SVMB) += bcm97360svmb.dtb
+dtb-$(CONFIG_DT_BCM97362SVMB) += bcm97362svmb.dtb
+dtb-$(CONFIG_DT_BCM97420C) += bcm97420c.dtb
+dtb-$(CONFIG_DT_BCM97425SVMB) += bcm97425svmb.dtb
+
+obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+
+# Force kbuild to make empty built-in.o if necessary
+obj- += dummy.o
+
+always := $(dtb-y)
+clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/brcm/bcm3384_viper.dtsi b/arch/mips/boot/dts/brcm/bcm3384_viper.dtsi
new file mode 100644
index 000000000000..aa406b43c65f
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3384_viper.dtsi
@@ -0,0 +1,108 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm3384-viper", "brcm,bcm33843-viper";
+
+ memory@0 {
+ device_type = "memory";
+
+ /* Typical ranges. The bootloader should fill these in. */
+ reg = <0x06000000 0x02000000>,
+ <0x0e000000 0x02000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 1/2 of the CPU core clock (standard MIPS behavior) */
+ mips-hpt-frequency = <300000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ periph_clk: periph_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <54000000>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "brcm,ubus", "simple-bus";
+ ranges;
+ /* No dma-ranges on Viper. */
+
+ periph_intc: periph_intc@14e00048 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x14e00048 0x4 0x14e0004c 0x4>,
+ <0x14e00350 0x4 0x14e00354 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <4>;
+ };
+
+ cmips_intc: cmips_intc@151f8048 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x151f8048 0x4 0x151f804c 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ brcm,int-map-mask = <0xffffffff>;
+ };
+
+ uart0: serial@14e00520 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x14e00520 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+ clocks = <&periph_clk>;
+ status = "disabled";
+ };
+
+ ehci0: usb@15400300 {
+ compatible = "brcm,bcm3384-ehci", "generic-ehci";
+ reg = <0x15400300 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <41>;
+ status = "disabled";
+ };
+
+ ohci0: usb@15400400 {
+ compatible = "brcm,bcm3384-ohci", "generic-ohci";
+ reg = <0x15400400 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <40>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm3384_zephyr.dtsi b/arch/mips/boot/dts/brcm/bcm3384_zephyr.dtsi
new file mode 100644
index 000000000000..a7bd8564e9f6
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3384_zephyr.dtsi
@@ -0,0 +1,126 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm3384", "brcm,bcm33843";
+
+ memory@0 {
+ device_type = "memory";
+
+ /* Typical range. The bootloader should fill this in. */
+ reg = <0x0 0x08000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* On BMIPS5000 this is 1/8th of the CPU core clock */
+ mips-hpt-frequency = <100000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ periph_clk: periph_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <54000000>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "brcm,ubus", "simple-bus";
+ ranges;
+ dma-ranges = <0x00000000 0x08000000 0x08000000>,
+ <0x08000000 0x00000000 0x08000000>;
+
+ periph_intc: periph_intc@14e00038 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x14e00038 0x4 0x14e0003c 0x4>,
+ <0x14e00340 0x4 0x14e00344 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <4>;
+ };
+
+ zmips_intc: zmips_intc@104b0060 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x104b0060 0x4 0x104b0064 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <29>;
+ brcm,int-map-mask = <0xffffffff>;
+ };
+
+ iop_intc: iop_intc@14e00058 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x14e00058 0x4 0x14e0005c 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <6>;
+ brcm,int-map-mask = <0xffffffff>;
+ };
+
+ uart0: serial@14e00520 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x14e00520 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+ clocks = <&periph_clk>;
+ status = "disabled";
+ };
+
+ ehci0: usb@15400300 {
+ compatible = "brcm,bcm3384-ehci", "generic-ehci";
+ reg = <0x15400300 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <41>;
+ status = "disabled";
+ };
+
+ ohci0: usb@15400400 {
+ compatible = "brcm,bcm3384-ohci", "generic-ohci";
+ reg = <0x15400400 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <40>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi
new file mode 100644
index 000000000000..41891c1e58bd
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi
@@ -0,0 +1,86 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm6328";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <160000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ periph_intc: periph_intc@10000020 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x10000024 0x4 0x1000002c 0x4>,
+ <0x10000020 0x4 0x10000028 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+ };
+
+ uart0: serial@10000100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000100 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <28>;
+ clocks = <&periph_clk>;
+ status = "disabled";
+ };
+
+ timer: timer@10000040 {
+ compatible = "syscon";
+ reg = <0x10000040 0x2c>;
+ little-endian;
+ };
+
+ reboot {
+ compatible = "syscon-reboot";
+ regmap = <&timer>;
+ offset = <0x28>;
+ mask = <0x1>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
new file mode 100644
index 000000000000..45152bc22117
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
@@ -0,0 +1,93 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm6368";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <200000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+
+ };
+
+ clocks {
+ periph_clk: periph_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ periph_intc: periph_intc@10000020 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x10000024 0x4 0x1000002c 0x4>,
+ <0x10000020 0x4 0x10000028 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+ };
+
+ uart0: serial@10000100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000100 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+ clocks = <&periph_clk>;
+ status = "disabled";
+ };
+
+ ehci0: usb@10001500 {
+ compatible = "brcm,bcm6368-ehci", "generic-ehci";
+ reg = <0x10001500 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <7>;
+ status = "disabled";
+ };
+
+ ohci0: usb@10001600 {
+ compatible = "brcm,bcm6368-ohci", "generic-ohci";
+ reg = <0x10001600 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <5>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi
new file mode 100644
index 000000000000..1a7efa883c5e
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi
@@ -0,0 +1,139 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7125";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <202500000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4380";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4380";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: periph_intc@441400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x441400 0x30>, <0x441600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: sun_l2_intc@401800 {
+ compatible = "brcm,l2-intc";
+ reg = <0x401800 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <23>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x2f7>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "pci_0",
+ "bsp_0", "rdc_0", "rptd_0",
+ "avd_0", "jtag_0";
+ };
+
+ upg_irq0_intc: upg_irq0_intc@406780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406780 0x8>;
+
+ brcm,int-map-mask = <0x44>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <18>;
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x60c>;
+ little-endian;
+ };
+
+ reboot {
+ compatible = "brcm,bcm7038-reboot";
+ syscon = <&sun_top_ctrl 0x8 0x14>;
+ };
+
+ uart0: serial@406b00 {
+ compatible = "ns16550a";
+ reg = <0x406b00 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <21>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ ehci0: usb@488300 {
+ compatible = "brcm,bcm7125-ehci", "generic-ehci";
+ reg = <0x488300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <60>;
+ status = "disabled";
+ };
+
+ ohci0: usb@488400 {
+ compatible = "brcm,bcm7125-ohci", "generic-ohci";
+ reg = <0x488400 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi
new file mode 100644
index 000000000000..1f30728a3177
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi
@@ -0,0 +1,224 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7346";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <163125000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: periph_intc@411400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x411400 0x30>, <0x411600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: sun_l2_intc@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <51>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x673>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "bsp_0",
+ "rdc_0", "raaga_0",
+ "jtag_0", "svd_0";
+ };
+
+ upg_irq0_intc: upg_irq0_intc@406780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406780 0x8>;
+
+ brcm,int-map-mask = <0x44>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <59>;
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ little-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406900 {
+ compatible = "ns16550a";
+ reg = <0x406900 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <64>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ enet0: ethernet@430000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x430000 0x4c8c>;
+ interrupts = <24>, <25>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7346-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <68>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7346-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <70>;
+ status = "disabled";
+ };
+
+ ehci1: usb@480500 {
+ compatible = "brcm,bcm7346-ehci", "generic-ehci";
+ reg = <0x480500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <69>;
+ status = "disabled";
+ };
+
+ ohci1: usb@480600 {
+ compatible = "brcm,bcm7346-ohci", "generic-ohci";
+ reg = <0x480600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <71>;
+ status = "disabled";
+ };
+
+ ehci2: usb@490300 {
+ compatible = "brcm,bcm7346-ehci", "generic-ehci";
+ reg = <0x490300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <73>;
+ status = "disabled";
+ };
+
+ ohci2: usb@490400 {
+ compatible = "brcm,bcm7346-ohci", "generic-ohci";
+ reg = <0x490400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <75>;
+ status = "disabled";
+ };
+
+ ehci3: usb@490500 {
+ compatible = "brcm,bcm7346-ehci", "generic-ehci";
+ reg = <0x490500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <74>;
+ status = "disabled";
+ };
+
+ ohci3: usb@490600 {
+ compatible = "brcm,bcm7346-ohci", "generic-ohci";
+ reg = <0x490600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <76>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi
new file mode 100644
index 000000000000..2c2aa9368f76
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi
@@ -0,0 +1,161 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7358";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <375000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips3300";
+ device_type = "cpu";
+ reg = <0>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: periph_intc@411400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x411400 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+ };
+
+ sun_l2_intc: sun_l2_intc@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <48>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x2f3>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "bsp_0",
+ "rdc_0", "raaga_0",
+ "avd_0", "jtag_0";
+ };
+
+ upg_irq0_intc: upg_irq0_intc@406600 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406600 0x8>;
+
+ brcm,int-map-mask = <0x44>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <56>;
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ little-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406800 {
+ compatible = "ns16550a";
+ reg = <0x406800 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ enet0: ethernet@430000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x430000 0x4c8c>;
+ interrupts = <24>, <25>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7358-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7358-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi
new file mode 100644
index 000000000000..f23b0aed276f
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi
@@ -0,0 +1,161 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7360";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <375000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips3300";
+ device_type = "cpu";
+ reg = <0>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: periph_intc@411400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x411400 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+ };
+
+ sun_l2_intc: sun_l2_intc@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <48>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x2f3>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "bsp_0",
+ "rdc_0", "raaga_0",
+ "avd_0", "jtag_0";
+ };
+
+ upg_irq0_intc: upg_irq0_intc@406600 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406600 0x8>;
+
+ brcm,int-map-mask = <0x44>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <56>;
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ little-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406800 {
+ compatible = "ns16550a";
+ reg = <0x406800 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ enet0: ethernet@430000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x430000 0x4c8c>;
+ interrupts = <24>, <25>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7360-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7360-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi
new file mode 100644
index 000000000000..da99db665bbc
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi
@@ -0,0 +1,167 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7362";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <375000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4380";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4380";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: periph_intc@411400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x411400 0x30>, <0x411600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: sun_l2_intc@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <48>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x2f3>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "bsp_0",
+ "rdc_0", "raaga_0",
+ "avd_0", "jtag_0";
+ };
+
+ upg_irq0_intc: upg_irq0_intc@406600 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406600 0x8>;
+
+ brcm,int-map-mask = <0x44>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <56>;
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ little-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406800 {
+ compatible = "ns16550a";
+ reg = <0x406800 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ enet0: ethernet@430000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x430000 0x4c8c>;
+ interrupts = <24>, <25>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7362-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7362-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi
new file mode 100644
index 000000000000..5f55d0a50a28
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi
@@ -0,0 +1,184 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7420";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <93750000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: periph_intc@441400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x441400 0x30>, <0x441600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: sun_l2_intc@401800 {
+ compatible = "brcm,l2-intc";
+ reg = <0x401800 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <23>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x3ff>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "pci_0",
+ "pcie_0", "bsp_0", "rdc_0",
+ "rptd_0", "avd_0", "avd_1",
+ "jtag_0";
+ };
+
+ upg_irq0_intc: upg_irq0_intc@406780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406780 0x8>;
+
+ brcm,int-map-mask = <0x44>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <18>;
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x60c>;
+ little-endian;
+ };
+
+ reboot {
+ compatible = "brcm,bcm7038-reboot";
+ syscon = <&sun_top_ctrl 0x8 0x14>;
+ };
+
+ uart0: serial@406b00 {
+ compatible = "ns16550a";
+ reg = <0x406b00 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <21>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ enet0: ethernet@468000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v1";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x468000 0x3c8c>;
+ interrupts = <69>, <79>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v1";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,65nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@488300 {
+ compatible = "brcm,bcm7420-ehci", "generic-ehci";
+ reg = <0x488300 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <60>;
+ status = "disabled";
+ };
+
+ ohci0: usb@488400 {
+ compatible = "brcm,bcm7420-ohci", "generic-ohci";
+ reg = <0x488400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ status = "disabled";
+ };
+
+ ehci1: usb@488500 {
+ compatible = "brcm,bcm7420-ehci", "generic-ehci";
+ reg = <0x488500 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <55>;
+ status = "disabled";
+ };
+
+ ohci1: usb@488600 {
+ compatible = "brcm,bcm7420-ohci", "generic-ohci";
+ reg = <0x488600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <62>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
new file mode 100644
index 000000000000..5b660b617ead
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -0,0 +1,225 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7425";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <163125000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: periph_intc@41a400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x41a400 0x30>, <0x41a600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: sun_l2_intc@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <47>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x177b>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "pcie_0",
+ "bsp_0", "rdc_0",
+ "raaga_0", "avd_1",
+ "jtag_0", "svd_0",
+ "vice_0";
+ };
+
+ upg_irq0_intc: upg_irq0_intc@406780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406780 0x8>;
+
+ brcm,int-map-mask = <0x44>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <55>;
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ little-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406b00 {
+ compatible = "ns16550a";
+ reg = <0x406b00 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ enet0: ethernet@b80000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v3";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0xb80000 0x11c88>;
+ interrupts = <17>, <18>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v3";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7425-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7425-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <67>;
+ status = "disabled";
+ };
+
+ ehci1: usb@480500 {
+ compatible = "brcm,bcm7425-ehci", "generic-ehci";
+ reg = <0x480500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ status = "disabled";
+ };
+
+ ohci1: usb@480600 {
+ compatible = "brcm,bcm7425-ohci", "generic-ohci";
+ reg = <0x480600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <68>;
+ status = "disabled";
+ };
+
+ ehci2: usb@490300 {
+ compatible = "brcm,bcm7425-ehci", "generic-ehci";
+ reg = <0x490300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <70>;
+ status = "disabled";
+ };
+
+ ohci2: usb@490400 {
+ compatible = "brcm,bcm7425-ohci", "generic-ohci";
+ reg = <0x490400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <72>;
+ status = "disabled";
+ };
+
+ ehci3: usb@490500 {
+ compatible = "brcm,bcm7425-ehci", "generic-ehci";
+ reg = <0x490500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <71>;
+ status = "disabled";
+ };
+
+ ohci3: usb@490600 {
+ compatible = "brcm,bcm7425-ohci", "generic-ohci";
+ reg = <0x490600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <73>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/bcm93384wvg.dts b/arch/mips/boot/dts/brcm/bcm93384wvg.dts
index 831741179212..d1e44a17d41a 100644
--- a/arch/mips/boot/dts/bcm93384wvg.dts
+++ b/arch/mips/boot/dts/brcm/bcm93384wvg.dts
@@ -1,6 +1,6 @@
/dts-v1/;
-/include/ "bcm3384.dtsi"
+/include/ "bcm3384_zephyr.dtsi"
/ {
compatible = "brcm,bcm93384wvg", "brcm,bcm3384";
@@ -10,13 +10,6 @@
bootargs = "console=ttyS0,115200";
stdout-path = &uart0;
};
-
- memory@0 {
- device_type = "memory";
- reg = <0x0 0x04000000>;
- dma-xor-mask = <0x08000000>;
- dma-xor-limit = <0x0fffffff>;
- };
};
&uart0 {
diff --git a/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts
new file mode 100644
index 000000000000..1ecb2696aca8
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts
@@ -0,0 +1,25 @@
+/dts-v1/;
+
+/include/ "bcm3384_viper.dtsi"
+
+/ {
+ compatible = "brcm,bcm93384wvg-viper", "brcm,bcm3384-viper";
+ model = "Broadcom BCM93384WVG-viper";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
new file mode 100644
index 000000000000..0e890c28fe5c
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
@@ -0,0 +1,31 @@
+/dts-v1/;
+
+/include/ "bcm6368.dtsi"
+
+/ {
+ compatible = "brcm,bcm96368mvwg", "brcm,bcm6368";
+ model = "Broadcom BCM96368MVWG";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x04000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+/* FIXME: need to set up USB_CTRL registers first */
+&ehci0 {
+ status = "disabled";
+};
+
+&ohci0 {
+ status = "disabled";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
new file mode 100644
index 000000000000..e046b1109eab
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
@@ -0,0 +1,31 @@
+/dts-v1/;
+
+/include/ "bcm7125.dtsi"
+
+/ {
+ compatible = "brcm,bcm97125cbmb", "brcm,bcm7125";
+ model = "Broadcom BCM97125CBMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+/* FIXME: USB is wonky; disable it for now */
+&ehci0 {
+ status = "disabled";
+};
+
+&ohci0 {
+ status = "disabled";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
new file mode 100644
index 000000000000..70f196d89d26
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
@@ -0,0 +1,58 @@
+/dts-v1/;
+
+/include/ "bcm7346.dtsi"
+
+/ {
+ compatible = "brcm,bcm97346dbsmb", "brcm,bcm7346";
+ model = "Broadcom BCM97346DBSMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>, <0x20000000 0x30000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97358svmb.dts b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
new file mode 100644
index 000000000000..d18e6d947739
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
@@ -0,0 +1,34 @@
+/dts-v1/;
+
+/include/ "bcm7358.dtsi"
+
+/ {
+ compatible = "brcm,bcm97358svmb", "brcm,bcm7358";
+ model = "Broadcom BCM97358SVMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
new file mode 100644
index 000000000000..4fe515500102
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
@@ -0,0 +1,34 @@
+/dts-v1/;
+
+/include/ "bcm7360.dtsi"
+
+/ {
+ compatible = "brcm,bcm97360svmb", "brcm,bcm7360";
+ model = "Broadcom BCM97360SVMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97362svmb.dts b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
new file mode 100644
index 000000000000..b7b88e5dc9e7
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
@@ -0,0 +1,34 @@
+/dts-v1/;
+
+/include/ "bcm7362.dtsi"
+
+/ {
+ compatible = "brcm,bcm97362svmb", "brcm,bcm7362";
+ model = "Broadcom BCM97362SVMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>, <0x20000000 0x30000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97420c.dts b/arch/mips/boot/dts/brcm/bcm97420c.dts
new file mode 100644
index 000000000000..67fe1f3a3891
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97420c.dts
@@ -0,0 +1,45 @@
+/dts-v1/;
+
+/include/ "bcm7420.dtsi"
+
+/ {
+ compatible = "brcm,bcm97420c", "brcm,bcm7420";
+ model = "Broadcom BCM97420C";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>,
+ <0x20000000 0x30000000>,
+ <0x60000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+/* FIXME: MAC driver comes up but cannot attach to PHY */
+&enet0 {
+ status = "disabled";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
new file mode 100644
index 000000000000..689c68a4f9c8
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
@@ -0,0 +1,60 @@
+/dts-v1/;
+
+/include/ "bcm7425.dtsi"
+
+/ {
+ compatible = "brcm,bcm97425svmb", "brcm,bcm7425";
+ model = "Broadcom BCM97425SVMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>,
+ <0x20000000 0x30000000>,
+ <0x90000000 0x40000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts
new file mode 100644
index 000000000000..1da4608680aa
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts
@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/include/ "bcm6328.dtsi"
+
+/ {
+ compatible = "brcm,bcm9ejtagprb", "brcm,bcm6328";
+ model = "Broadcom BCM9EJTAGPRB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x08000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/Makefile b/arch/mips/boot/dts/cavium-octeon/Makefile
new file mode 100644
index 000000000000..5b99c40a058f
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/Makefile
@@ -0,0 +1,9 @@
+dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb
+
+obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+
+# Force kbuild to make empty built-in.o if necessary
+obj- += dummy.o
+
+always := $(dtb-y)
+clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
index fa33115bde33..9c48e0586ba7 100644
--- a/arch/mips/boot/dts/octeon_3xxx.dts
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
@@ -587,4 +587,16 @@
usbn = &usbn;
led0 = &led0;
};
+
+ dsr1000n-leds {
+ compatible = "gpio-leds";
+ usb1 {
+ label = "usb1";
+ gpios = <&gpio 9 1>; /* Active low */
+ };
+ usb2 {
+ label = "usb2";
+ gpios = <&gpio 10 1>; /* Active low */
+ };
+ };
};
diff --git a/arch/mips/boot/dts/octeon_68xx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_68xx.dts
index 79b46fcb0a11..79b46fcb0a11 100644
--- a/arch/mips/boot/dts/octeon_68xx.dts
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_68xx.dts
diff --git a/arch/mips/boot/dts/lantiq/Makefile b/arch/mips/boot/dts/lantiq/Makefile
new file mode 100644
index 000000000000..0906c62141b9
--- /dev/null
+++ b/arch/mips/boot/dts/lantiq/Makefile
@@ -0,0 +1,9 @@
+dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb
+
+obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+
+# Force kbuild to make empty built-in.o if necessary
+obj- += dummy.o
+
+always := $(dtb-y)
+clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/danube.dtsi b/arch/mips/boot/dts/lantiq/danube.dtsi
index d4c59e003708..d4c59e003708 100644
--- a/arch/mips/boot/dts/danube.dtsi
+++ b/arch/mips/boot/dts/lantiq/danube.dtsi
diff --git a/arch/mips/boot/dts/easy50712.dts b/arch/mips/boot/dts/lantiq/easy50712.dts
index 143b8a37b5e4..143b8a37b5e4 100644
--- a/arch/mips/boot/dts/easy50712.dts
+++ b/arch/mips/boot/dts/lantiq/easy50712.dts
diff --git a/arch/mips/boot/dts/mti/Makefile b/arch/mips/boot/dts/mti/Makefile
new file mode 100644
index 000000000000..ef1f3dbed033
--- /dev/null
+++ b/arch/mips/boot/dts/mti/Makefile
@@ -0,0 +1,9 @@
+dtb-$(CONFIG_MIPS_SEAD3) += sead3.dtb
+
+obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+
+# Force kbuild to make empty built-in.o if necessary
+obj- += dummy.o
+
+always := $(dtb-y)
+clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/sead3.dts b/arch/mips/boot/dts/mti/sead3.dts
index e4b317d414f1..e4b317d414f1 100644
--- a/arch/mips/boot/dts/sead3.dts
+++ b/arch/mips/boot/dts/mti/sead3.dts
diff --git a/arch/mips/boot/dts/netlogic/Makefile b/arch/mips/boot/dts/netlogic/Makefile
new file mode 100644
index 000000000000..9868057140b5
--- /dev/null
+++ b/arch/mips/boot/dts/netlogic/Makefile
@@ -0,0 +1,13 @@
+dtb-$(CONFIG_DT_XLP_EVP) += xlp_evp.dtb
+dtb-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb
+dtb-$(CONFIG_DT_XLP_FVP) += xlp_fvp.dtb
+dtb-$(CONFIG_DT_XLP_GVP) += xlp_gvp.dtb
+dtb-$(CONFIG_DT_XLP_RVP) += xlp_rvp.dtb
+
+obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+
+# Force kbuild to make empty built-in.o if necessary
+obj- += dummy.o
+
+always := $(dtb-y)
+clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/xlp_evp.dts b/arch/mips/boot/dts/netlogic/xlp_evp.dts
index 89ad04808c02..89ad04808c02 100644
--- a/arch/mips/boot/dts/xlp_evp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_evp.dts
diff --git a/arch/mips/boot/dts/xlp_fvp.dts b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
index 63e62b7bd758..63e62b7bd758 100644
--- a/arch/mips/boot/dts/xlp_fvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
diff --git a/arch/mips/boot/dts/xlp_gvp.dts b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
index bb4ecd1d47fc..bb4ecd1d47fc 100644
--- a/arch/mips/boot/dts/xlp_gvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
diff --git a/arch/mips/boot/dts/netlogic/xlp_rvp.dts b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
new file mode 100644
index 000000000000..7188aed2ea2e
--- /dev/null
+++ b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
@@ -0,0 +1,77 @@
+/*
+ * XLP5XX Device Tree Source for RVP boards
+ */
+
+/dts-v1/;
+/ {
+ model = "netlogic,XLP-RVP";
+ compatible = "netlogic,xlp";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ 1 0 0 0x16000000 0x02000000>; // GBU chipselects
+
+ serial0: serial@30000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x112100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <125000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <17>;
+ };
+ pic: pic@110000 {
+ compatible = "netlogic,xlp-pic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0 0x110000 0x200>;
+ interrupt-controller;
+ };
+
+ nor_flash@1,0 {
+ compatible = "cfi-flash";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <2>;
+ reg = <1 0 0x1000000>;
+
+ partition@0 {
+ label = "x-loader";
+ reg = <0x0 0x100000>; /* 1M */
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u-boot";
+ reg = <0x100000 0x100000>; /* 1M */
+ };
+
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x500000>; /* 5M */
+ };
+
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x700000 0x800000>; /* 8M */
+ };
+
+ partition@f00000 {
+ label = "env";
+ reg = <0xf00000 0x100000>; /* 1M */
+ read-only;
+ };
+ };
+
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+ };
+};
diff --git a/arch/mips/boot/dts/xlp_svp.dts b/arch/mips/boot/dts/netlogic/xlp_svp.dts
index 1ebd00edaacc..1ebd00edaacc 100644
--- a/arch/mips/boot/dts/xlp_svp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_svp.dts
diff --git a/arch/mips/boot/dts/ralink/Makefile b/arch/mips/boot/dts/ralink/Makefile
new file mode 100644
index 000000000000..2a7225954bf6
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/Makefile
@@ -0,0 +1,12 @@
+dtb-$(CONFIG_DTB_RT2880_EVAL) += rt2880_eval.dtb
+dtb-$(CONFIG_DTB_RT305X_EVAL) += rt3052_eval.dtb
+dtb-$(CONFIG_DTB_RT3883_EVAL) += rt3883_eval.dtb
+dtb-$(CONFIG_DTB_MT7620A_EVAL) += mt7620a_eval.dtb
+
+obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+
+# Force kbuild to make empty built-in.o if necessary
+obj- += dummy.o
+
+always := $(dtb-y)
+clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/mt7620a.dtsi b/arch/mips/boot/dts/ralink/mt7620a.dtsi
index 08bf24fefe9f..08bf24fefe9f 100644
--- a/arch/mips/boot/dts/mt7620a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7620a.dtsi
diff --git a/arch/mips/boot/dts/mt7620a_eval.dts b/arch/mips/boot/dts/ralink/mt7620a_eval.dts
index 709f58132f5c..709f58132f5c 100644
--- a/arch/mips/boot/dts/mt7620a_eval.dts
+++ b/arch/mips/boot/dts/ralink/mt7620a_eval.dts
diff --git a/arch/mips/boot/dts/rt2880.dtsi b/arch/mips/boot/dts/ralink/rt2880.dtsi
index 182afde2f2e1..182afde2f2e1 100644
--- a/arch/mips/boot/dts/rt2880.dtsi
+++ b/arch/mips/boot/dts/ralink/rt2880.dtsi
diff --git a/arch/mips/boot/dts/rt2880_eval.dts b/arch/mips/boot/dts/ralink/rt2880_eval.dts
index 0a685db093d4..0a685db093d4 100644
--- a/arch/mips/boot/dts/rt2880_eval.dts
+++ b/arch/mips/boot/dts/ralink/rt2880_eval.dts
diff --git a/arch/mips/boot/dts/rt3050.dtsi b/arch/mips/boot/dts/ralink/rt3050.dtsi
index e3203d414fee..e3203d414fee 100644
--- a/arch/mips/boot/dts/rt3050.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3050.dtsi
diff --git a/arch/mips/boot/dts/rt3052_eval.dts b/arch/mips/boot/dts/ralink/rt3052_eval.dts
index ec9e9a035541..ec9e9a035541 100644
--- a/arch/mips/boot/dts/rt3052_eval.dts
+++ b/arch/mips/boot/dts/ralink/rt3052_eval.dts
diff --git a/arch/mips/boot/dts/rt3883.dtsi b/arch/mips/boot/dts/ralink/rt3883.dtsi
index 3b131dd0d5ac..3b131dd0d5ac 100644
--- a/arch/mips/boot/dts/rt3883.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3883.dtsi
diff --git a/arch/mips/boot/dts/rt3883_eval.dts b/arch/mips/boot/dts/ralink/rt3883_eval.dts
index e8df21a5d10d..e8df21a5d10d 100644
--- a/arch/mips/boot/dts/rt3883_eval.dts
+++ b/arch/mips/boot/dts/ralink/rt3883_eval.dts
diff --git a/arch/mips/cavium-octeon/crypto/Makefile b/arch/mips/cavium-octeon/crypto/Makefile
index a74f76d85a2f..f7aa9d5d3b87 100644
--- a/arch/mips/cavium-octeon/crypto/Makefile
+++ b/arch/mips/cavium-octeon/crypto/Makefile
@@ -4,4 +4,7 @@
obj-y += octeon-crypto.o
-obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o
+obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o
+obj-$(CONFIG_CRYPTO_SHA1_OCTEON) += octeon-sha1.o
+obj-$(CONFIG_CRYPTO_SHA256_OCTEON) += octeon-sha256.o
+obj-$(CONFIG_CRYPTO_SHA512_OCTEON) += octeon-sha512.o
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
index 7c82ff463b65..f66bd1adc7ff 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-crypto.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
@@ -17,7 +17,7 @@
* crypto operations in calls to octeon_crypto_enable/disable in order to make
* sure the state of COP2 isn't corrupted if userspace is also performing
* hardware crypto operations. Allocate the state parameter on the stack.
- * Preemption must be disabled to prevent context switches.
+ * Returns with preemption disabled.
*
* @state: Pointer to state structure to store current COP2 state in.
*
@@ -28,6 +28,7 @@ unsigned long octeon_crypto_enable(struct octeon_cop2_state *state)
int status;
unsigned long flags;
+ preempt_disable();
local_irq_save(flags);
status = read_c0_status();
write_c0_status(status | ST0_CU2);
@@ -62,5 +63,6 @@ void octeon_crypto_disable(struct octeon_cop2_state *state,
else
write_c0_status(read_c0_status() & ~ST0_CU2);
local_irq_restore(flags);
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(octeon_crypto_disable);
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.h b/arch/mips/cavium-octeon/crypto/octeon-crypto.h
index e2a4aece9c24..7315cc307397 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-crypto.h
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.h
@@ -5,7 +5,8 @@
*
* Copyright (C) 2012-2013 Cavium Inc., All Rights Reserved.
*
- * MD5 instruction definitions added by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ * MD5/SHA1/SHA256/SHA512 instruction definitions added by
+ * Aaro Koskinen <aaro.koskinen@iki.fi>.
*
*/
#ifndef __LINUX_OCTEON_CRYPTO_H
@@ -21,22 +22,22 @@ extern void octeon_crypto_disable(struct octeon_cop2_state *state,
unsigned long flags);
/*
- * Macros needed to implement MD5:
+ * Macros needed to implement MD5/SHA1/SHA256:
*/
/*
- * The index can be 0-1.
+ * The index can be 0-1 (MD5) or 0-2 (SHA1), 0-3 (SHA256).
*/
#define write_octeon_64bit_hash_dword(value, index) \
do { \
__asm__ __volatile__ ( \
"dmtc2 %[rt],0x0048+" STR(index) \
: \
- : [rt] "d" (value)); \
+ : [rt] "d" (cpu_to_be64(value))); \
} while (0)
/*
- * The index can be 0-1.
+ * The index can be 0-1 (MD5) or 0-2 (SHA1), 0-3 (SHA256).
*/
#define read_octeon_64bit_hash_dword(index) \
({ \
@@ -47,7 +48,7 @@ do { \
: [rt] "=d" (__value) \
: ); \
\
- __value; \
+ be64_to_cpu(__value); \
})
/*
@@ -58,7 +59,7 @@ do { \
__asm__ __volatile__ ( \
"dmtc2 %[rt],0x0040+" STR(index) \
: \
- : [rt] "d" (value)); \
+ : [rt] "d" (cpu_to_be64(value))); \
} while (0)
/*
@@ -69,6 +70,154 @@ do { \
__asm__ __volatile__ ( \
"dmtc2 %[rt],0x4047" \
: \
+ : [rt] "d" (cpu_to_be64(value))); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha1_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x4057" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha256_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x404f" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * Macros needed to implement SHA512:
+ */
+
+/*
+ * The index can be 0-7.
+ */
+#define write_octeon_64bit_hash_sha512(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0250+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The index can be 0-7.
+ */
+#define read_octeon_64bit_hash_sha512(index) \
+({ \
+ u64 __value; \
+ \
+ __asm__ __volatile__ ( \
+ "dmfc2 %[rt],0x0250+" STR(index) \
+ : [rt] "=d" (__value) \
+ : ); \
+ \
+ __value; \
+})
+
+/*
+ * The index can be 0-14.
+ */
+#define write_octeon_64bit_block_sha512(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0240+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block word (64-bit).
+ */
+#define octeon_sha512_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x424f" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha1_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x4057" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha256_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x404f" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * Macros needed to implement SHA512:
+ */
+
+/*
+ * The index can be 0-7.
+ */
+#define write_octeon_64bit_hash_sha512(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0250+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The index can be 0-7.
+ */
+#define read_octeon_64bit_hash_sha512(index) \
+({ \
+ u64 __value; \
+ \
+ __asm__ __volatile__ ( \
+ "dmfc2 %[rt],0x0250+" STR(index) \
+ : [rt] "=d" (__value) \
+ : ); \
+ \
+ __value; \
+})
+
+/*
+ * The index can be 0-14.
+ */
+#define write_octeon_64bit_block_sha512(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0240+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block word (64-bit).
+ */
+#define octeon_sha512_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x424f" \
+ : \
: [rt] "d" (value)); \
} while (0)
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
index b909881ba6c1..12dccdb38286 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-md5.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -97,8 +97,6 @@ static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data,
avail);
- local_bh_disable();
- preempt_disable();
flags = octeon_crypto_enable(&state);
octeon_md5_store_hash(mctx);
@@ -114,8 +112,6 @@ static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
octeon_md5_read_hash(mctx);
octeon_crypto_disable(&state, flags);
- preempt_enable();
- local_bh_enable();
memcpy(mctx->block, data, len);
@@ -133,8 +129,6 @@ static int octeon_md5_final(struct shash_desc *desc, u8 *out)
*p++ = 0x80;
- local_bh_disable();
- preempt_disable();
flags = octeon_crypto_enable(&state);
octeon_md5_store_hash(mctx);
@@ -152,8 +146,6 @@ static int octeon_md5_final(struct shash_desc *desc, u8 *out)
octeon_md5_read_hash(mctx);
octeon_crypto_disable(&state, flags);
- preempt_enable();
- local_bh_enable();
memcpy(out, mctx->hash, sizeof(mctx->hash));
memset(mctx, 0, sizeof(*mctx));
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
new file mode 100644
index 000000000000..2b74b5b67cae
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -0,0 +1,241 @@
+/*
+ * Cryptographic API.
+ *
+ * SHA1 Secure Hash Algorithm.
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/sha1_generic.c, which is:
+ *
+ * Copyright (c) Alan Smithee.
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/mm.h>
+#include <crypto/sha.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_sha1_store_hash(struct sha1_state *sctx)
+{
+ u64 *hash = (u64 *)sctx->state;
+ union {
+ u32 word[2];
+ u64 dword;
+ } hash_tail = { { sctx->state[4], } };
+
+ write_octeon_64bit_hash_dword(hash[0], 0);
+ write_octeon_64bit_hash_dword(hash[1], 1);
+ write_octeon_64bit_hash_dword(hash_tail.dword, 2);
+ memzero_explicit(&hash_tail.word[0], sizeof(hash_tail.word[0]));
+}
+
+static void octeon_sha1_read_hash(struct sha1_state *sctx)
+{
+ u64 *hash = (u64 *)sctx->state;
+ union {
+ u32 word[2];
+ u64 dword;
+ } hash_tail;
+
+ hash[0] = read_octeon_64bit_hash_dword(0);
+ hash[1] = read_octeon_64bit_hash_dword(1);
+ hash_tail.dword = read_octeon_64bit_hash_dword(2);
+ sctx->state[4] = hash_tail.word[0];
+ memzero_explicit(&hash_tail.dword, sizeof(hash_tail.dword));
+}
+
+static void octeon_sha1_transform(const void *_block)
+{
+ const u64 *block = _block;
+
+ write_octeon_64bit_block_dword(block[0], 0);
+ write_octeon_64bit_block_dword(block[1], 1);
+ write_octeon_64bit_block_dword(block[2], 2);
+ write_octeon_64bit_block_dword(block[3], 3);
+ write_octeon_64bit_block_dword(block[4], 4);
+ write_octeon_64bit_block_dword(block[5], 5);
+ write_octeon_64bit_block_dword(block[6], 6);
+ octeon_sha1_start(block[7]);
+}
+
+static int octeon_sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA1_H0;
+ sctx->state[1] = SHA1_H1;
+ sctx->state[2] = SHA1_H2;
+ sctx->state[3] = SHA1_H3;
+ sctx->state[4] = SHA1_H4;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data,
+ unsigned int len)
+{
+ unsigned int partial;
+ unsigned int done;
+ const u8 *src;
+
+ partial = sctx->count % SHA1_BLOCK_SIZE;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) >= SHA1_BLOCK_SIZE) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buffer + partial, data,
+ done + SHA1_BLOCK_SIZE);
+ src = sctx->buffer;
+ }
+
+ do {
+ octeon_sha1_transform(src);
+ done += SHA1_BLOCK_SIZE;
+ src = data + done;
+ } while (done + SHA1_BLOCK_SIZE <= len);
+
+ partial = 0;
+ }
+ memcpy(sctx->buffer + partial, src, len - done);
+}
+
+static int octeon_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ /*
+ * Small updates never reach the crypto engine, so the generic sha1 is
+ * faster because of the heavyweight octeon_crypto_enable() /
+ * octeon_crypto_disable().
+ */
+ if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
+ return crypto_sha1_update(desc, data, len);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha1_store_hash(sctx);
+
+ __octeon_sha1_update(sctx, data, len);
+
+ octeon_sha1_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ return 0;
+}
+
+static int octeon_sha1_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ static const u8 padding[64] = { 0x80, };
+ struct octeon_cop2_state state;
+ __be32 *dst = (__be32 *)out;
+ unsigned int pad_len;
+ unsigned long flags;
+ unsigned int index;
+ __be64 bits;
+ int i;
+
+ /* Save number of bits. */
+ bits = cpu_to_be64(sctx->count << 3);
+
+ /* Pad out to 56 mod 64. */
+ index = sctx->count & 0x3f;
+ pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha1_store_hash(sctx);
+
+ __octeon_sha1_update(sctx, padding, pad_len);
+
+ /* Append length (before padding). */
+ __octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits));
+
+ octeon_sha1_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ /* Store state in digest */
+ for (i = 0; i < 5; i++)
+ dst[i] = cpu_to_be32(sctx->state[i]);
+
+ /* Zeroize sensitive information. */
+ memset(sctx, 0, sizeof(*sctx));
+
+ return 0;
+}
+
+static int octeon_sha1_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int octeon_sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg octeon_sha1_alg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = octeon_sha1_init,
+ .update = octeon_sha1_update,
+ .final = octeon_sha1_final,
+ .export = octeon_sha1_export,
+ .import = octeon_sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name= "octeon-sha1",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init octeon_sha1_mod_init(void)
+{
+ if (!octeon_has_crypto())
+ return -ENOTSUPP;
+ return crypto_register_shash(&octeon_sha1_alg);
+}
+
+static void __exit octeon_sha1_mod_fini(void)
+{
+ crypto_unregister_shash(&octeon_sha1_alg);
+}
+
+module_init(octeon_sha1_mod_init);
+module_exit(octeon_sha1_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
new file mode 100644
index 000000000000..97e96fead08a
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -0,0 +1,280 @@
+/*
+ * Cryptographic API.
+ *
+ * SHA-224 and SHA-256 Secure Hash Algorithm.
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/sha256_generic.c, which is:
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/mm.h>
+#include <crypto/sha.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_sha256_store_hash(struct sha256_state *sctx)
+{
+ u64 *hash = (u64 *)sctx->state;
+
+ write_octeon_64bit_hash_dword(hash[0], 0);
+ write_octeon_64bit_hash_dword(hash[1], 1);
+ write_octeon_64bit_hash_dword(hash[2], 2);
+ write_octeon_64bit_hash_dword(hash[3], 3);
+}
+
+static void octeon_sha256_read_hash(struct sha256_state *sctx)
+{
+ u64 *hash = (u64 *)sctx->state;
+
+ hash[0] = read_octeon_64bit_hash_dword(0);
+ hash[1] = read_octeon_64bit_hash_dword(1);
+ hash[2] = read_octeon_64bit_hash_dword(2);
+ hash[3] = read_octeon_64bit_hash_dword(3);
+}
+
+static void octeon_sha256_transform(const void *_block)
+{
+ const u64 *block = _block;
+
+ write_octeon_64bit_block_dword(block[0], 0);
+ write_octeon_64bit_block_dword(block[1], 1);
+ write_octeon_64bit_block_dword(block[2], 2);
+ write_octeon_64bit_block_dword(block[3], 3);
+ write_octeon_64bit_block_dword(block[4], 4);
+ write_octeon_64bit_block_dword(block[5], 5);
+ write_octeon_64bit_block_dword(block[6], 6);
+ octeon_sha256_start(block[7]);
+}
+
+static int octeon_sha224_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static int octeon_sha256_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data,
+ unsigned int len)
+{
+ unsigned int partial;
+ unsigned int done;
+ const u8 *src;
+
+ partial = sctx->count % SHA256_BLOCK_SIZE;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) >= SHA256_BLOCK_SIZE) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buf + partial, data,
+ done + SHA256_BLOCK_SIZE);
+ src = sctx->buf;
+ }
+
+ do {
+ octeon_sha256_transform(src);
+ done += SHA256_BLOCK_SIZE;
+ src = data + done;
+ } while (done + SHA256_BLOCK_SIZE <= len);
+
+ partial = 0;
+ }
+ memcpy(sctx->buf + partial, src, len - done);
+}
+
+static int octeon_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ /*
+ * Small updates never reach the crypto engine, so the generic sha256 is
+ * faster because of the heavyweight octeon_crypto_enable() /
+ * octeon_crypto_disable().
+ */
+ if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
+ return crypto_sha256_update(desc, data, len);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha256_store_hash(sctx);
+
+ __octeon_sha256_update(sctx, data, len);
+
+ octeon_sha256_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ return 0;
+}
+
+static int octeon_sha256_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ static const u8 padding[64] = { 0x80, };
+ struct octeon_cop2_state state;
+ __be32 *dst = (__be32 *)out;
+ unsigned int pad_len;
+ unsigned long flags;
+ unsigned int index;
+ __be64 bits;
+ int i;
+
+ /* Save number of bits. */
+ bits = cpu_to_be64(sctx->count << 3);
+
+ /* Pad out to 56 mod 64. */
+ index = sctx->count & 0x3f;
+ pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha256_store_hash(sctx);
+
+ __octeon_sha256_update(sctx, padding, pad_len);
+
+ /* Append length (before padding). */
+ __octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
+
+ octeon_sha256_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ /* Store state in digest */
+ for (i = 0; i < 8; i++)
+ dst[i] = cpu_to_be32(sctx->state[i]);
+
+ /* Zeroize sensitive information. */
+ memset(sctx, 0, sizeof(*sctx));
+
+ return 0;
+}
+
+static int octeon_sha224_final(struct shash_desc *desc, u8 *hash)
+{
+ u8 D[SHA256_DIGEST_SIZE];
+
+ octeon_sha256_final(desc, D);
+
+ memcpy(hash, D, SHA224_DIGEST_SIZE);
+ memzero_explicit(D, SHA256_DIGEST_SIZE);
+
+ return 0;
+}
+
+static int octeon_sha256_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int octeon_sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg octeon_sha256_algs[2] = { {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = octeon_sha256_init,
+ .update = octeon_sha256_update,
+ .final = octeon_sha256_final,
+ .export = octeon_sha256_export,
+ .import = octeon_sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name= "octeon-sha256",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = octeon_sha224_init,
+ .update = octeon_sha256_update,
+ .final = octeon_sha224_final,
+ .descsize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name= "octeon-sha224",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init octeon_sha256_mod_init(void)
+{
+ if (!octeon_has_crypto())
+ return -ENOTSUPP;
+ return crypto_register_shashes(octeon_sha256_algs,
+ ARRAY_SIZE(octeon_sha256_algs));
+}
+
+static void __exit octeon_sha256_mod_fini(void)
+{
+ crypto_unregister_shashes(octeon_sha256_algs,
+ ARRAY_SIZE(octeon_sha256_algs));
+}
+
+module_init(octeon_sha256_mod_init);
+module_exit(octeon_sha256_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
new file mode 100644
index 000000000000..d5fb3c6f22ae
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -0,0 +1,277 @@
+/*
+ * Cryptographic API.
+ *
+ * SHA-512 and SHA-384 Secure Hash Algorithm.
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/sha512_generic.c, which is:
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ */
+
+#include <linux/mm.h>
+#include <crypto/sha.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_sha512_store_hash(struct sha512_state *sctx)
+{
+ write_octeon_64bit_hash_sha512(sctx->state[0], 0);
+ write_octeon_64bit_hash_sha512(sctx->state[1], 1);
+ write_octeon_64bit_hash_sha512(sctx->state[2], 2);
+ write_octeon_64bit_hash_sha512(sctx->state[3], 3);
+ write_octeon_64bit_hash_sha512(sctx->state[4], 4);
+ write_octeon_64bit_hash_sha512(sctx->state[5], 5);
+ write_octeon_64bit_hash_sha512(sctx->state[6], 6);
+ write_octeon_64bit_hash_sha512(sctx->state[7], 7);
+}
+
+static void octeon_sha512_read_hash(struct sha512_state *sctx)
+{
+ sctx->state[0] = read_octeon_64bit_hash_sha512(0);
+ sctx->state[1] = read_octeon_64bit_hash_sha512(1);
+ sctx->state[2] = read_octeon_64bit_hash_sha512(2);
+ sctx->state[3] = read_octeon_64bit_hash_sha512(3);
+ sctx->state[4] = read_octeon_64bit_hash_sha512(4);
+ sctx->state[5] = read_octeon_64bit_hash_sha512(5);
+ sctx->state[6] = read_octeon_64bit_hash_sha512(6);
+ sctx->state[7] = read_octeon_64bit_hash_sha512(7);
+}
+
+static void octeon_sha512_transform(const void *_block)
+{
+ const u64 *block = _block;
+
+ write_octeon_64bit_block_sha512(block[0], 0);
+ write_octeon_64bit_block_sha512(block[1], 1);
+ write_octeon_64bit_block_sha512(block[2], 2);
+ write_octeon_64bit_block_sha512(block[3], 3);
+ write_octeon_64bit_block_sha512(block[4], 4);
+ write_octeon_64bit_block_sha512(block[5], 5);
+ write_octeon_64bit_block_sha512(block[6], 6);
+ write_octeon_64bit_block_sha512(block[7], 7);
+ write_octeon_64bit_block_sha512(block[8], 8);
+ write_octeon_64bit_block_sha512(block[9], 9);
+ write_octeon_64bit_block_sha512(block[10], 10);
+ write_octeon_64bit_block_sha512(block[11], 11);
+ write_octeon_64bit_block_sha512(block[12], 12);
+ write_octeon_64bit_block_sha512(block[13], 13);
+ write_octeon_64bit_block_sha512(block[14], 14);
+ octeon_sha512_start(block[15]);
+}
+
+static int octeon_sha512_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA512_H0;
+ sctx->state[1] = SHA512_H1;
+ sctx->state[2] = SHA512_H2;
+ sctx->state[3] = SHA512_H3;
+ sctx->state[4] = SHA512_H4;
+ sctx->state[5] = SHA512_H5;
+ sctx->state[6] = SHA512_H6;
+ sctx->state[7] = SHA512_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static int octeon_sha384_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA384_H0;
+ sctx->state[1] = SHA384_H1;
+ sctx->state[2] = SHA384_H2;
+ sctx->state[3] = SHA384_H3;
+ sctx->state[4] = SHA384_H4;
+ sctx->state[5] = SHA384_H5;
+ sctx->state[6] = SHA384_H6;
+ sctx->state[7] = SHA384_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data,
+ unsigned int len)
+{
+ unsigned int part_len;
+ unsigned int index;
+ unsigned int i;
+
+ /* Compute number of bytes mod 128. */
+ index = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ /* Update number of bytes. */
+ if ((sctx->count[0] += len) < len)
+ sctx->count[1]++;
+
+ part_len = SHA512_BLOCK_SIZE - index;
+
+ /* Transform as many times as possible. */
+ if (len >= part_len) {
+ memcpy(&sctx->buf[index], data, part_len);
+ octeon_sha512_transform(sctx->buf);
+
+ for (i = part_len; i + SHA512_BLOCK_SIZE <= len;
+ i += SHA512_BLOCK_SIZE)
+ octeon_sha512_transform(&data[i]);
+
+ index = 0;
+ } else {
+ i = 0;
+ }
+
+ /* Buffer remaining input. */
+ memcpy(&sctx->buf[index], &data[i], len - i);
+}
+
+static int octeon_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ /*
+ * Small updates never reach the crypto engine, so the generic sha512 is
+ * faster because of the heavyweight octeon_crypto_enable() /
+ * octeon_crypto_disable().
+ */
+ if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
+ return crypto_sha512_update(desc, data, len);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha512_store_hash(sctx);
+
+ __octeon_sha512_update(sctx, data, len);
+
+ octeon_sha512_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ return 0;
+}
+
+static int octeon_sha512_final(struct shash_desc *desc, u8 *hash)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ static u8 padding[128] = { 0x80, };
+ struct octeon_cop2_state state;
+ __be64 *dst = (__be64 *)hash;
+ unsigned int pad_len;
+ unsigned long flags;
+ unsigned int index;
+ __be64 bits[2];
+ int i;
+
+ /* Save number of bits. */
+ bits[1] = cpu_to_be64(sctx->count[0] << 3);
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+
+ /* Pad out to 112 mod 128. */
+ index = sctx->count[0] & 0x7f;
+ pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha512_store_hash(sctx);
+
+ __octeon_sha512_update(sctx, padding, pad_len);
+
+ /* Append length (before padding). */
+ __octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits));
+
+ octeon_sha512_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ /* Store state in digest. */
+ for (i = 0; i < 8; i++)
+ dst[i] = cpu_to_be64(sctx->state[i]);
+
+ /* Zeroize sensitive information. */
+ memset(sctx, 0, sizeof(struct sha512_state));
+
+ return 0;
+}
+
+static int octeon_sha384_final(struct shash_desc *desc, u8 *hash)
+{
+ u8 D[64];
+
+ octeon_sha512_final(desc, D);
+
+ memcpy(hash, D, 48);
+ memzero_explicit(D, 64);
+
+ return 0;
+}
+
+static struct shash_alg octeon_sha512_algs[2] = { {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .init = octeon_sha512_init,
+ .update = octeon_sha512_update,
+ .final = octeon_sha512_final,
+ .descsize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name= "octeon-sha512",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .init = octeon_sha384_init,
+ .update = octeon_sha512_update,
+ .final = octeon_sha384_final,
+ .descsize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name= "octeon-sha384",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init octeon_sha512_mod_init(void)
+{
+ if (!octeon_has_crypto())
+ return -ENOTSUPP;
+ return crypto_register_shashes(octeon_sha512_algs,
+ ARRAY_SIZE(octeon_sha512_algs));
+}
+
+static void __exit octeon_sha512_mod_fini(void)
+{
+ crypto_unregister_shashes(octeon_sha512_algs,
+ ARRAY_SIZE(octeon_sha512_algs));
+}
+
+module_init(octeon_sha512_mod_init);
+module_exit(octeon_sha512_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 7d8987818ccf..d8960d46417b 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -306,7 +306,7 @@ void __init plat_swiotlb_setup(void)
swiotlbsize = 64 * (1<<20);
}
#endif
-#ifdef CONFIG_USB_OCTEON_OHCI
+#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
/* OCTEON II ohci is only 32-bit. */
if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
swiotlbsize = 64 * (1<<20);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index 42e38c30b540..89b5273299ab 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -519,44 +519,89 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
union __cvmx_l2c_tag {
uint64_t u64;
struct cvmx_l2c_tag_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:40;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:20; /* Phys mem addr (33..14) */
+#else
+ uint64_t addr:20; /* Phys mem addr (33..14) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:40;
+#endif
} cn50xx;
struct cvmx_l2c_tag_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:41;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:19; /* Phys mem addr (33..15) */
+#else
+ uint64_t addr:19; /* Phys mem addr (33..15) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:41;
+#endif
} cn30xx;
struct cvmx_l2c_tag_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:42;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:18; /* Phys mem addr (33..16) */
+#else
+ uint64_t addr:18; /* Phys mem addr (33..16) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:42;
+#endif
} cn31xx;
struct cvmx_l2c_tag_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:43;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:17; /* Phys mem addr (33..17) */
+#else
+ uint64_t addr:17; /* Phys mem addr (33..17) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:43;
+#endif
} cn38xx;
struct cvmx_l2c_tag_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:44;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:16; /* Phys mem addr (33..18) */
+#else
+ uint64_t addr:16; /* Phys mem addr (33..18) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:44;
+#endif
} cn58xx;
struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index 237e5b1a72d8..a5e8f4a784af 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -8,9 +8,11 @@
* Copyright (C) 2007, 2008 Cavium Networks
*/
#include <linux/kernel.h>
-#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/semaphore.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
+#include <linux/of_platform.h>
#include <linux/mtd/partitions.h>
#include <asm/octeon/octeon.h>
@@ -25,19 +27,62 @@ static const char *part_probe_types[] = {
NULL
};
+static map_word octeon_flash_map_read(struct map_info *map, unsigned long ofs)
+{
+ map_word r;
+
+ down(&octeon_bootbus_sem);
+ r = inline_map_read(map, ofs);
+ up(&octeon_bootbus_sem);
+
+ return r;
+}
+
+static void octeon_flash_map_write(struct map_info *map, const map_word datum,
+ unsigned long ofs)
+{
+ down(&octeon_bootbus_sem);
+ inline_map_write(map, datum, ofs);
+ up(&octeon_bootbus_sem);
+}
+
+static void octeon_flash_map_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ down(&octeon_bootbus_sem);
+ inline_map_copy_from(map, to, from, len);
+ up(&octeon_bootbus_sem);
+}
+
+static void octeon_flash_map_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
+{
+ down(&octeon_bootbus_sem);
+ inline_map_copy_to(map, to, from, len);
+ up(&octeon_bootbus_sem);
+}
+
/**
* Module/ driver initialization.
*
* Returns Zero on success
*/
-static int __init flash_init(void)
+static int octeon_flash_probe(struct platform_device *pdev)
{
+ union cvmx_mio_boot_reg_cfgx region_cfg;
+ u32 cs;
+ int r;
+ struct device_node *np = pdev->dev.of_node;
+
+ r = of_property_read_u32(np, "reg", &cs);
+ if (r)
+ return r;
+
/*
* Read the bootbus region 0 setup to determine the base
* address of the flash.
*/
- union cvmx_mio_boot_reg_cfgx region_cfg;
- region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0));
+ region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
if (region_cfg.s.en) {
/*
* The bootloader always takes the flash and sets its
@@ -56,7 +101,11 @@ static int __init flash_init(void)
flash_map.virt = ioremap(flash_map.phys, flash_map.size);
pr_notice("Bootbus flash: Setting flash for %luMB flash at "
"0x%08llx\n", flash_map.size >> 20, flash_map.phys);
- simple_map_init(&flash_map);
+ WARN_ON(!map_bankwidth_supported(flash_map.bankwidth));
+ flash_map.read = octeon_flash_map_read;
+ flash_map.write = octeon_flash_map_write;
+ flash_map.copy_from = octeon_flash_map_copy_from;
+ flash_map.copy_to = octeon_flash_map_copy_to;
mymtd = do_map_probe("cfi_probe", &flash_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
@@ -69,4 +118,26 @@ static int __init flash_init(void)
return 0;
}
-late_initcall(flash_init);
+static const struct of_device_id of_flash_match[] = {
+ {
+ .compatible = "cfi-flash",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_flash_match);
+
+static struct platform_driver of_flash_driver = {
+ .driver = {
+ .name = "octeon-of-flash",
+ .of_match_table = of_flash_match,
+ },
+ .probe = octeon_flash_probe,
+};
+
+static int octeon_flash_init(void)
+{
+ return platform_driver_register(&of_flash_driver);
+}
+late_initcall(octeon_flash_init);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 12410a2788d8..d113c8ded6e2 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -325,8 +325,14 @@ static void __init octeon_ehci_hw_start(struct device *dev)
/* Use 64-bit addressing. */
ehci_ctl.s.ehci_64b_addr_en = 1;
ehci_ctl.s.l2c_addr_msb = 0;
+#ifdef __BIG_ENDIAN
ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+#else
+ ehci_ctl.s.l2c_buff_emod = 0; /* not swapped. */
+ ehci_ctl.s.l2c_desc_emod = 0; /* not swapped. */
+ ehci_ctl.s.inv_reg_a2 = 1;
+#endif
cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64);
octeon2_usb_clocks_stop();
@@ -381,8 +387,14 @@ static void __init octeon_ohci_hw_start(struct device *dev)
ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0));
ohci_ctl.s.l2c_addr_msb = 0;
+#ifdef __BIG_ENDIAN
ohci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
ohci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+#else
+ ohci_ctl.s.l2c_buff_emod = 0; /* not swapped. */
+ ohci_ctl.s.l2c_desc_emod = 0; /* not swapped. */
+ ohci_ctl.s.inv_reg_a2 = 1;
+#endif
cvmx_write_csr(CVMX_UCTLX_OHCI_CTL(0), ohci_ctl.u64);
octeon2_usb_clocks_stop();
@@ -958,6 +970,13 @@ end_led:
}
}
+ if (octeon_bootinfo->board_type != CVMX_BOARD_TYPE_CUST_DSR1000N) {
+ int dsr1000n_leds = fdt_path_offset(initial_boot_params,
+ "/dsr1000n-leds");
+ if (dsr1000n_leds >= 0)
+ fdt_nop_node(initial_boot_params, dsr1000n_leds);
+ }
+
return 0;
}
diff --git a/arch/mips/cavium-octeon/octeon_boot.h b/arch/mips/cavium-octeon/octeon_boot.h
index 7b066bbca86d..a6ce7c43e0ae 100644
--- a/arch/mips/cavium-octeon/octeon_boot.h
+++ b/arch/mips/cavium-octeon/octeon_boot.h
@@ -37,11 +37,13 @@ struct boot_init_vector {
/* similar to bootloader's linux_app_boot_info but without global data */
struct linux_app_boot_info {
+#ifdef __BIG_ENDIAN_BITFIELD
uint32_t labi_signature;
uint32_t start_core0_addr;
uint32_t avail_coremask;
uint32_t pci_console_active;
uint32_t icache_prefetch_disable;
+ uint32_t padding;
uint64_t InitTLBStart_addr;
uint32_t start_app_addr;
uint32_t cur_exception_base;
@@ -49,6 +51,27 @@ struct linux_app_boot_info {
uint32_t compact_flash_common_base_addr;
uint32_t compact_flash_attribute_base_addr;
uint32_t led_display_base_addr;
+#else
+ uint32_t start_core0_addr;
+ uint32_t labi_signature;
+
+ uint32_t pci_console_active;
+ uint32_t avail_coremask;
+
+ uint32_t padding;
+ uint32_t icache_prefetch_disable;
+
+ uint64_t InitTLBStart_addr;
+
+ uint32_t cur_exception_base;
+ uint32_t start_app_addr;
+
+ uint32_t compact_flash_common_base_addr;
+ uint32_t no_mark_private_data;
+
+ uint32_t led_display_base_addr;
+ uint32_t compact_flash_attribute_base_addr;
+#endif
};
/* If not to copy a lot of bootloader's structures
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index a42110e7edbc..89a628455bc2 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -51,6 +51,9 @@ extern void pci_console_init(const char *arg);
static unsigned long long MAX_MEMORY = 512ull << 20;
+DEFINE_SEMAPHORE(octeon_bootbus_sem);
+EXPORT_SYMBOL(octeon_bootbus_sem);
+
struct octeon_boot_descriptor *octeon_boot_desc_ptr;
struct cvmx_bootinfo *octeon_bootinfo;
@@ -413,7 +416,10 @@ static void octeon_restart(char *command)
mb();
while (1)
- cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
+ if (OCTEON_IS_OCTEON3())
+ cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
}
@@ -1043,7 +1049,7 @@ int prom_putchar(char c)
}
EXPORT_SYMBOL(prom_putchar);
-void prom_free_prom_memory(void)
+void __init prom_free_prom_memory(void)
{
if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) {
/* Check for presence of Core-14449 fix. */
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 8b1eeffa12ed..56f5d080ef9d 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -72,7 +72,7 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask,
{
unsigned int i;
- for_each_cpu_mask(i, *mask)
+ for_each_cpu(i, mask)
octeon_send_ipi_single(i, action);
}
@@ -239,7 +239,7 @@ static int octeon_cpu_disable(void)
return -ENOTSUPP;
set_cpu_online(cpu, false);
- cpu_clear(cpu, cpu_callin_map);
+ cpumask_clear_cpu(cpu, &cpu_callin_map);
octeon_fixup_irqs();
flush_cache_all();
diff --git a/arch/mips/configs/bcm3384_defconfig b/arch/mips/configs/bmips_be_defconfig
index 88711c28ff32..f5585c8f35ad 100644
--- a/arch/mips/configs/bcm3384_defconfig
+++ b/arch/mips/configs/bmips_be_defconfig
@@ -1,4 +1,4 @@
-CONFIG_BCM3384=y
+CONFIG_BMIPS_GENERIC=y
CONFIG_HIGHMEM=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
@@ -33,6 +33,7 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BRCMSTB_GISB_ARB=y
CONFIG_MTD=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -43,15 +44,19 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
+CONFIG_BCMGENET=y
CONFIG_USB_USBNET=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_EARLYCON_FORCE=y
CONFIG_SERIAL_BCM63XX=y
CONFIG_SERIAL_BCM63XX_CONSOLE=y
# CONFIG_HW_RANDOM is not set
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_BRCMSTB=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
@@ -75,4 +80,6 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon"
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/bmips_stb_defconfig b/arch/mips/configs/bmips_stb_defconfig
new file mode 100644
index 000000000000..400a47ec1ef1
--- /dev/null
+++ b/arch/mips/configs/bmips_stb_defconfig
@@ -0,0 +1,88 @@
+CONFIG_BMIPS_GENERIC=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_HIGHMEM=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_NO_HZ=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+CONFIG_EXPERT=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_MAC80211=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BRCMSTB_GISB_ARB=y
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_BLK_DEV is not set
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_BCMGENET=y
+CONFIG_USB_USBNET=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_BRCMSTB=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_CIFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon"
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 70ffe9b55829..fe48220157a9 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -105,7 +105,8 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
# CONFIG_RTC_INTF_SYSFS is not set
# CONFIG_RTC_INTF_PROC is not set
-CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1685_FAMILY=y
+CONFIG_RTC_DRV_DS1685=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index e51aad9a94b1..0cbc9863c7c8 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -171,6 +171,7 @@ CONFIG_SERIAL_8250_FOURPORT=y
CONFIG_LEGACY_PTY_COUNT=16
CONFIG_HW_RANDOM=y
CONFIG_RTC=y
+CONFIG_GPIO_LOONGSON=y
CONFIG_THERMAL=y
CONFIG_MEDIA_SUPPORT=m
CONFIG_VIDEO_DEV=m
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 7eabcd2031ea..c8442997477b 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -243,6 +243,7 @@ CONFIG_HW_RANDOM=y
CONFIG_RAW_DRIVER=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
+CONFIG_GPIO_LOONGSON=y
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83627HF=m
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
new file mode 100644
index 000000000000..c388bff09148
--- /dev/null
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -0,0 +1,439 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPS32_R5_FEATURES=y
+CONFIG_CPU_MIPS32_R5_XPA=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
new file mode 100644
index 000000000000..f22e92ee7709
--- /dev/null
+++ b/arch/mips/configs/pistachio_defconfig
@@ -0,0 +1,336 @@
+CONFIG_MACH_PISTACHIO=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_MIPS_CPS=y
+# CONFIG_COMPACTION is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_ZSMALLOC=y
+CONFIG_NR_CPUS=4
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="localhost"
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=m
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+CONFIG_CPU_IDLE=y
+# CONFIG_MIPS_CPS_CPUIDLE is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_IPV6_SIT=m
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_MARK=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_CFG80211=m
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_MAC80211_DEBUG_MENU=y
+CONFIG_MAC80211_VERBOSE_DEBUG=y
+CONFIG_RFKILL=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_ZRAM=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=m
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=m
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_MCS7830=m
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_LIBERTAS_THINFIRM=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_MAC80211_HWSIM=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+CONFIG_TCG_TPM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_IMG=y
+CONFIG_I2C_STUB=m
+CONFIG_SPI=y
+CONFIG_SPI_BITBANG=m
+CONFIG_SPI_IMG_SPFI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_IMGPDC_WDT=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_RC_SUPPORT=y
+# CONFIG_RC_DECODERS is not set
+CONFIG_RC_DEVICES=y
+CONFIG_IR_IMG=y
+CONFIG_IR_IMG_NEC=y
+CONFIG_IR_IMG_JVC=y
+CONFIG_IR_IMG_SONY=y
+CONFIG_IR_IMG_SHARP=y
+CONFIG_IR_IMG_SANYO=y
+CONFIG_IR_IMG_RC5=y
+CONFIG_IR_IMG_RC6=y
+# CONFIG_DVB_TUNER_DIB0070 is not set
+# CONFIG_DVB_TUNER_DIB0090 is not set
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_HRTIMER=m
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB_AUDIO=m
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+# CONFIG_USB_DEFAULT_PERSIST is not set
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_QUALCOMM=m
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_TEST=m
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_IMG_MDC_DMA=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+# CONFIG_ANDROID_TIMED_OUTPUT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MEMORY=y
+CONFIG_IIO=y
+CONFIG_CC10001_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_IMG=y
+CONFIG_ANDROID=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_HFSPLUS_FS=m
+CONFIG_UBIFS_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=y
+CONFIG_TEST_UDELAY=m
+CONFIG_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_YAMA=y
+CONFIG_SECURITY_YAMA_STACKED=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC_T10DIF=m
+CONFIG_CRC7=m
+CONFIG_LIBCRC32C=m
+# CONFIG_XZ_DEC_X86 is not set
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 41a2fa1fa12e..8c6f508e59de 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -267,8 +267,13 @@ handle_it:
#ifdef CONFIG_32BIT
fpu:
+ lw t0,fpu_kstat_irq
+ nop
+ lw t1,(t0)
+ nop
+ addu t1,1
j handle_fpe_int
- nop
+ sw t1,(t0)
#endif
spurious:
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index 41bbffd9cc0e..a0b8943c8f11 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -12,13 +12,15 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqnr.h>
#include <linux/module.h>
#include <linux/param.h>
+#include <linux/percpu-defs.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pm.h>
-#include <linux/irq.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
@@ -98,6 +100,7 @@ int_ptr asic_mask_nr_tbl[DEC_MAX_ASIC_INTS][2] = {
{ { .i = ~0 }, { .p = asic_intr_unimplemented } },
};
int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU);
+int *fpu_kstat_irq;
static struct irqaction ioirq = {
.handler = no_action,
@@ -755,8 +758,15 @@ void __init arch_init_irq(void)
dec_interrupt[DEC_IRQ_HALT] = -1;
/* Register board interrupts: FPU and cascade. */
- if (dec_interrupt[DEC_IRQ_FPU] >= 0)
- setup_irq(dec_interrupt[DEC_IRQ_FPU], &fpuirq);
+ if (dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) {
+ struct irq_desc *desc_fpu;
+ int irq_fpu;
+
+ irq_fpu = dec_interrupt[DEC_IRQ_FPU];
+ setup_irq(irq_fpu, &fpuirq);
+ desc_fpu = irq_to_desc(irq_fpu);
+ fpu_kstat_irq = this_cpu_ptr(desc_fpu->kstat_irqs);
+ }
if (dec_interrupt[DEC_IRQ_CASCADE] >= 0)
setup_irq(dec_interrupt[DEC_IRQ_CASCADE], &ioirq);
diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
index e41c56e375b1..1e38f0e1ea3e 100644
--- a/arch/mips/include/asm/asm-eva.h
+++ b/arch/mips/include/asm/asm-eva.h
@@ -11,6 +11,36 @@
#define __ASM_ASM_EVA_H
#ifndef __ASSEMBLY__
+
+/* Kernel variants */
+
+#define kernel_cache(op, base) "cache " op ", " base "\n"
+#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
+#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
+#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
+#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
+#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
+#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
+#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
+#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
+#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
+#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
+#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
+#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
+#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define kernel_sd(reg, addr) user_sw(reg, addr)
+#define kernel_ld(reg, addr) user_lw(reg, addr)
+#else
+#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
+#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
+#endif /* CONFIG_32BIT */
+
#ifdef CONFIG_EVA
#define __BUILD_EVA_INSN(insn, reg, addr) \
@@ -41,37 +71,60 @@
#else
-#define user_cache(op, base) "cache " op ", " base "\n"
-#define user_ll(reg, addr) "ll " reg ", " addr "\n"
-#define user_sc(reg, addr) "sc " reg ", " addr "\n"
-#define user_lw(reg, addr) "lw " reg ", " addr "\n"
-#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
-#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
-#define user_lh(reg, addr) "lh " reg ", " addr "\n"
-#define user_lb(reg, addr) "lb " reg ", " addr "\n"
-#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
-#define user_sw(reg, addr) "sw " reg ", " addr "\n"
-#define user_swl(reg, addr) "swl " reg ", " addr "\n"
-#define user_swr(reg, addr) "swr " reg ", " addr "\n"
-#define user_sh(reg, addr) "sh " reg ", " addr "\n"
-#define user_sb(reg, addr) "sb " reg ", " addr "\n"
+#define user_cache(op, base) kernel_cache(op, base)
+#define user_ll(reg, addr) kernel_ll(reg, addr)
+#define user_sc(reg, addr) kernel_sc(reg, addr)
+#define user_lw(reg, addr) kernel_lw(reg, addr)
+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
+#define user_lh(reg, addr) kernel_lh(reg, addr)
+#define user_lb(reg, addr) kernel_lb(reg, addr)
+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
+#define user_sw(reg, addr) kernel_sw(reg, addr)
+#define user_swl(reg, addr) kernel_swl(reg, addr)
+#define user_swr(reg, addr) kernel_swr(reg, addr)
+#define user_sh(reg, addr) kernel_sh(reg, addr)
+#define user_sb(reg, addr) kernel_sb(reg, addr)
#ifdef CONFIG_32BIT
-/*
- * No 'sd' or 'ld' instructions in 32-bit but the code will
- * do the correct thing
- */
-#define user_sd(reg, addr) user_sw(reg, addr)
-#define user_ld(reg, addr) user_lw(reg, addr)
+#define user_sd(reg, addr) kernel_sw(reg, addr)
+#define user_ld(reg, addr) kernel_lw(reg, addr)
#else
-#define user_sd(reg, addr) "sd " reg", " addr "\n"
-#define user_ld(reg, addr) "ld " reg", " addr "\n"
+#define user_sd(reg, addr) kernel_sd(reg, addr)
+#define user_ld(reg, addr) kernel_ld(reg, addr)
#endif /* CONFIG_32BIT */
#endif /* CONFIG_EVA */
#else /* __ASSEMBLY__ */
+#define kernel_cache(op, base) cache op, base
+#define kernel_ll(reg, addr) ll reg, addr
+#define kernel_sc(reg, addr) sc reg, addr
+#define kernel_lw(reg, addr) lw reg, addr
+#define kernel_lwl(reg, addr) lwl reg, addr
+#define kernel_lwr(reg, addr) lwr reg, addr
+#define kernel_lh(reg, addr) lh reg, addr
+#define kernel_lb(reg, addr) lb reg, addr
+#define kernel_lbu(reg, addr) lbu reg, addr
+#define kernel_sw(reg, addr) sw reg, addr
+#define kernel_swl(reg, addr) swl reg, addr
+#define kernel_swr(reg, addr) swr reg, addr
+#define kernel_sh(reg, addr) sh reg, addr
+#define kernel_sb(reg, addr) sb reg, addr
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define kernel_sd(reg, addr) user_sw(reg, addr)
+#define kernel_ld(reg, addr) user_lw(reg, addr)
+#else
+#define kernel_sd(reg, addr) sd reg, addr
+#define kernel_ld(reg, addr) ld reg, addr
+#endif /* CONFIG_32BIT */
+
#ifdef CONFIG_EVA
#define __BUILD_EVA_INSN(insn, reg, addr) \
@@ -101,31 +154,27 @@
#define user_sd(reg, addr) user_sw(reg, addr)
#else
-#define user_cache(op, base) cache op, base
-#define user_ll(reg, addr) ll reg, addr
-#define user_sc(reg, addr) sc reg, addr
-#define user_lw(reg, addr) lw reg, addr
-#define user_lwl(reg, addr) lwl reg, addr
-#define user_lwr(reg, addr) lwr reg, addr
-#define user_lh(reg, addr) lh reg, addr
-#define user_lb(reg, addr) lb reg, addr
-#define user_lbu(reg, addr) lbu reg, addr
-#define user_sw(reg, addr) sw reg, addr
-#define user_swl(reg, addr) swl reg, addr
-#define user_swr(reg, addr) swr reg, addr
-#define user_sh(reg, addr) sh reg, addr
-#define user_sb(reg, addr) sb reg, addr
+#define user_cache(op, base) kernel_cache(op, base)
+#define user_ll(reg, addr) kernel_ll(reg, addr)
+#define user_sc(reg, addr) kernel_sc(reg, addr)
+#define user_lw(reg, addr) kernel_lw(reg, addr)
+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
+#define user_lh(reg, addr) kernel_lh(reg, addr)
+#define user_lb(reg, addr) kernel_lb(reg, addr)
+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
+#define user_sw(reg, addr) kernel_sw(reg, addr)
+#define user_swl(reg, addr) kernel_swl(reg, addr)
+#define user_swr(reg, addr) kernel_swr(reg, addr)
+#define user_sh(reg, addr) kernel_sh(reg, addr)
+#define user_sb(reg, addr) kernel_sb(reg, addr)
#ifdef CONFIG_32BIT
-/*
- * No 'sd' or 'ld' instructions in 32-bit but the code will
- * do the correct thing
- */
-#define user_sd(reg, addr) user_sw(reg, addr)
-#define user_ld(reg, addr) user_lw(reg, addr)
+#define user_sd(reg, addr) kernel_sw(reg, addr)
+#define user_ld(reg, addr) kernel_lw(reg, addr)
#else
-#define user_sd(reg, addr) sd reg, addr
-#define user_ld(reg, addr) ld reg, addr
+#define user_sd(reg, addr) kernel_sd(reg, addr)
+#define user_ld(reg, addr) kernel_sd(reg, addr)
#endif /* CONFIG_32BIT */
#endif /* CONFIG_EVA */
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index 80386470d3a4..0ef39ad0f2d4 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -16,38 +16,22 @@
.set push
SET_HARDFLOAT
cfc1 \tmp, fcr31
- swc1 $f0, THREAD_FPR0(\thread)
- swc1 $f1, THREAD_FPR1(\thread)
- swc1 $f2, THREAD_FPR2(\thread)
- swc1 $f3, THREAD_FPR3(\thread)
- swc1 $f4, THREAD_FPR4(\thread)
- swc1 $f5, THREAD_FPR5(\thread)
- swc1 $f6, THREAD_FPR6(\thread)
- swc1 $f7, THREAD_FPR7(\thread)
- swc1 $f8, THREAD_FPR8(\thread)
- swc1 $f9, THREAD_FPR9(\thread)
- swc1 $f10, THREAD_FPR10(\thread)
- swc1 $f11, THREAD_FPR11(\thread)
- swc1 $f12, THREAD_FPR12(\thread)
- swc1 $f13, THREAD_FPR13(\thread)
- swc1 $f14, THREAD_FPR14(\thread)
- swc1 $f15, THREAD_FPR15(\thread)
- swc1 $f16, THREAD_FPR16(\thread)
- swc1 $f17, THREAD_FPR17(\thread)
- swc1 $f18, THREAD_FPR18(\thread)
- swc1 $f19, THREAD_FPR19(\thread)
- swc1 $f20, THREAD_FPR20(\thread)
- swc1 $f21, THREAD_FPR21(\thread)
- swc1 $f22, THREAD_FPR22(\thread)
- swc1 $f23, THREAD_FPR23(\thread)
- swc1 $f24, THREAD_FPR24(\thread)
- swc1 $f25, THREAD_FPR25(\thread)
- swc1 $f26, THREAD_FPR26(\thread)
- swc1 $f27, THREAD_FPR27(\thread)
- swc1 $f28, THREAD_FPR28(\thread)
- swc1 $f29, THREAD_FPR29(\thread)
- swc1 $f30, THREAD_FPR30(\thread)
- swc1 $f31, THREAD_FPR31(\thread)
+ s.d $f0, THREAD_FPR0(\thread)
+ s.d $f2, THREAD_FPR2(\thread)
+ s.d $f4, THREAD_FPR4(\thread)
+ s.d $f6, THREAD_FPR6(\thread)
+ s.d $f8, THREAD_FPR8(\thread)
+ s.d $f10, THREAD_FPR10(\thread)
+ s.d $f12, THREAD_FPR12(\thread)
+ s.d $f14, THREAD_FPR14(\thread)
+ s.d $f16, THREAD_FPR16(\thread)
+ s.d $f18, THREAD_FPR18(\thread)
+ s.d $f20, THREAD_FPR20(\thread)
+ s.d $f22, THREAD_FPR22(\thread)
+ s.d $f24, THREAD_FPR24(\thread)
+ s.d $f26, THREAD_FPR26(\thread)
+ s.d $f28, THREAD_FPR28(\thread)
+ s.d $f30, THREAD_FPR30(\thread)
sw \tmp, THREAD_FCR31(\thread)
.set pop
.endm
@@ -56,38 +40,22 @@
.set push
SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
- lwc1 $f0, THREAD_FPR0(\thread)
- lwc1 $f1, THREAD_FPR1(\thread)
- lwc1 $f2, THREAD_FPR2(\thread)
- lwc1 $f3, THREAD_FPR3(\thread)
- lwc1 $f4, THREAD_FPR4(\thread)
- lwc1 $f5, THREAD_FPR5(\thread)
- lwc1 $f6, THREAD_FPR6(\thread)
- lwc1 $f7, THREAD_FPR7(\thread)
- lwc1 $f8, THREAD_FPR8(\thread)
- lwc1 $f9, THREAD_FPR9(\thread)
- lwc1 $f10, THREAD_FPR10(\thread)
- lwc1 $f11, THREAD_FPR11(\thread)
- lwc1 $f12, THREAD_FPR12(\thread)
- lwc1 $f13, THREAD_FPR13(\thread)
- lwc1 $f14, THREAD_FPR14(\thread)
- lwc1 $f15, THREAD_FPR15(\thread)
- lwc1 $f16, THREAD_FPR16(\thread)
- lwc1 $f17, THREAD_FPR17(\thread)
- lwc1 $f18, THREAD_FPR18(\thread)
- lwc1 $f19, THREAD_FPR19(\thread)
- lwc1 $f20, THREAD_FPR20(\thread)
- lwc1 $f21, THREAD_FPR21(\thread)
- lwc1 $f22, THREAD_FPR22(\thread)
- lwc1 $f23, THREAD_FPR23(\thread)
- lwc1 $f24, THREAD_FPR24(\thread)
- lwc1 $f25, THREAD_FPR25(\thread)
- lwc1 $f26, THREAD_FPR26(\thread)
- lwc1 $f27, THREAD_FPR27(\thread)
- lwc1 $f28, THREAD_FPR28(\thread)
- lwc1 $f29, THREAD_FPR29(\thread)
- lwc1 $f30, THREAD_FPR30(\thread)
- lwc1 $f31, THREAD_FPR31(\thread)
+ l.d $f0, THREAD_FPR0(\thread)
+ l.d $f2, THREAD_FPR2(\thread)
+ l.d $f4, THREAD_FPR4(\thread)
+ l.d $f6, THREAD_FPR6(\thread)
+ l.d $f8, THREAD_FPR8(\thread)
+ l.d $f10, THREAD_FPR10(\thread)
+ l.d $f12, THREAD_FPR12(\thread)
+ l.d $f14, THREAD_FPR14(\thread)
+ l.d $f16, THREAD_FPR16(\thread)
+ l.d $f18, THREAD_FPR18(\thread)
+ l.d $f20, THREAD_FPR20(\thread)
+ l.d $f22, THREAD_FPR22(\thread)
+ l.d $f24, THREAD_FPR24(\thread)
+ l.d $f26, THREAD_FPR26(\thread)
+ l.d $f28, THREAD_FPR28(\thread)
+ l.d $f30, THREAD_FPR30(\thread)
ctc1 \tmp, fcr31
.set pop
.endm
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 9f935f6aa996..0cf29bd5dc5c 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -481,7 +481,7 @@ static inline unsigned long __fls(unsigned long word)
{
int num;
- if (BITS_PER_LONG == 32 &&
+ if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__(
" .set push \n"
@@ -494,7 +494,7 @@ static inline unsigned long __fls(unsigned long word)
return 31 - num;
}
- if (BITS_PER_LONG == 64 &&
+ if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
__builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
__asm__(
" .set push \n"
@@ -559,7 +559,8 @@ static inline int fls(int x)
{
int r;
- if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
+ if (!__builtin_constant_p(x) &&
+ __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__(
" .set push \n"
" .set "MIPS_ISA_LEVEL" \n"
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index 30939b02e3ff..6d25ad33ec78 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -122,6 +122,22 @@ static inline void bmips_write_zscm_reg(unsigned int offset, unsigned long data)
barrier();
}
+static inline void bmips_post_dma_flush(struct device *dev)
+{
+ void __iomem *cbr = BMIPS_GET_CBR();
+ u32 cfg;
+
+ if (boot_cpu_type() != CPU_BMIPS3300 &&
+ boot_cpu_type() != CPU_BMIPS4350 &&
+ boot_cpu_type() != CPU_BMIPS4380)
+ return;
+
+ /* Flush stale data out of the readahead cache */
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+}
+
#endif /* !defined(__ASSEMBLY__) */
#endif /* _ASM_BMIPS_H */
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index e08381a37f8b..723229f4cf27 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -29,6 +29,20 @@
* - flush_icache_all() flush the entire instruction cache
* - flush_data_cache_page() flushes a page from the data cache
*/
+
+ /*
+ * This flag is used to indicate that the page pointed to by a pte
+ * is dirty and requires cleaning before returning it to the user.
+ */
+#define PG_dcache_dirty PG_arch_1
+
+#define Page_dcache_dirty(page) \
+ test_bit(PG_dcache_dirty, &(page)->flags)
+#define SetPageDcacheDirty(page) \
+ set_bit(PG_dcache_dirty, &(page)->flags)
+#define ClearPageDcacheDirty(page) \
+ clear_bit(PG_dcache_dirty, &(page)->flags)
+
extern void (*flush_cache_all)(void);
extern void (*__flush_cache_all)(void);
extern void (*flush_cache_mm)(struct mm_struct *mm);
@@ -37,13 +51,15 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
extern void __flush_dcache_page(struct page *page);
+extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
{
- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
+ if (cpu_has_dc_aliases)
__flush_dcache_page(page);
-
+ else if (!cpu_has_ic_fills_f_dc)
+ SetPageDcacheDirty(page);
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
@@ -61,6 +77,11 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
+ if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
+ Page_dcache_dirty(page)) {
+ __flush_icache_page(vma, page);
+ ClearPageDcacheDirty(page);
+ }
}
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
@@ -95,19 +116,6 @@ extern void (*flush_icache_all)(void);
extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr);
-/*
- * This flag is used to indicate that the page pointed to by a pte
- * is dirty and requires cleaning before returning it to the user.
- */
-#define PG_dcache_dirty PG_arch_1
-
-#define Page_dcache_dirty(page) \
- test_bit(PG_dcache_dirty, &(page)->flags)
-#define SetPageDcacheDirty(page) \
- set_bit(PG_dcache_dirty, &(page)->flags)
-#define ClearPageDcacheDirty(page) \
- clear_bit(PG_dcache_dirty, &(page)->flags)
-
/* Run kernel code uncached, useful for cache probing functions. */
unsigned long run_uncached(void *func);
diff --git a/arch/mips/include/asm/cdmm.h b/arch/mips/include/asm/cdmm.h
new file mode 100644
index 000000000000..16e22ce9719f
--- /dev/null
+++ b/arch/mips/include/asm/cdmm.h
@@ -0,0 +1,98 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#ifndef __ASM_CDMM_H
+#define __ASM_CDMM_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * struct mips_cdmm_device - Represents a single device on a CDMM bus.
+ * @dev: Driver model device object.
+ * @cpu: CPU which can access this device.
+ * @res: MMIO resource.
+ * @type: Device type identifier.
+ * @rev: Device revision number.
+ */
+struct mips_cdmm_device {
+ struct device dev;
+ unsigned int cpu;
+ struct resource res;
+ unsigned int type;
+ unsigned int rev;
+};
+
+/**
+ * struct mips_cdmm_driver - Represents a driver for a CDMM device.
+ * @drv: Driver model driver object.
+ * @probe Callback for probing newly discovered devices.
+ * @remove: Callback to remove the device.
+ * @shutdown: Callback on system shutdown.
+ * @cpu_down: Callback when the parent CPU is going down.
+ * Any CPU pinned threads/timers should be disabled.
+ * @cpu_up: Callback when the parent CPU is coming back up again.
+ * CPU pinned threads/timers can be restarted.
+ * @id_table: Table for CDMM IDs to match against.
+ */
+struct mips_cdmm_driver {
+ struct device_driver drv;
+ int (*probe)(struct mips_cdmm_device *);
+ int (*remove)(struct mips_cdmm_device *);
+ void (*shutdown)(struct mips_cdmm_device *);
+ int (*cpu_down)(struct mips_cdmm_device *);
+ int (*cpu_up)(struct mips_cdmm_device *);
+ const struct mips_cdmm_device_id *id_table;
+};
+
+/**
+ * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
+ *
+ * Picking a suitable physical address at which to map the CDMM region is
+ * platform specific, so this weak function can be defined by platform code to
+ * pick a suitable value if none is configured by the bootloader.
+ *
+ * This address must be 32kB aligned, and the region occupies a maximum of 32kB
+ * of physical address space which must not be used for anything else.
+ *
+ * Returns: Physical base address for CDMM region, or 0 on failure.
+ */
+phys_addr_t __weak mips_cdmm_phys_base(void);
+
+extern struct bus_type mips_cdmm_bustype;
+void __iomem *mips_cdmm_early_probe(unsigned int dev_type);
+
+#define to_mips_cdmm_device(d) container_of(d, struct mips_cdmm_device, dev)
+
+#define mips_cdmm_get_drvdata(d) dev_get_drvdata(&d->dev)
+#define mips_cdmm_set_drvdata(d, p) dev_set_drvdata(&d->dev, p)
+
+int mips_cdmm_driver_register(struct mips_cdmm_driver *);
+void mips_cdmm_driver_unregister(struct mips_cdmm_driver *);
+
+/*
+ * module_mips_cdmm_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_mips_cdmm_driver(__mips_cdmm_driver) \
+ module_driver(__mips_cdmm_driver, mips_cdmm_driver_register, \
+ mips_cdmm_driver_unregister)
+
+/* drivers/tty/mips_ejtag_fdc.c */
+
+#ifdef CONFIG_MIPS_EJTAG_FDC_EARLYCON
+int setup_early_fdc_console(void);
+#else
+static inline int setup_early_fdc_console(void)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ASM_CDMM_H */
diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h
index 65f9bdd02f1f..f0edf6fcd002 100644
--- a/arch/mips/include/asm/cevt-r4k.h
+++ b/arch/mips/include/asm/cevt-r4k.h
@@ -27,23 +27,4 @@ irqreturn_t c0_compare_interrupt(int, void *);
extern struct irqaction c0_compare_irqaction;
extern int cp0_timer_irq_installed;
-/*
- * Possibly handle a performance counter interrupt.
- * Return true if the timer interrupt should not be checked
- */
-
-static inline int handle_perf_irq(int r2)
-{
- /*
- * The performance counter overflow interrupt may be shared with the
- * timer interrupt (cp0_perfcount_irq < 0). If it is and a
- * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
- * and we can't reliably determine if a counter interrupt has also
- * happened (!r2) then don't check for a timer interrupt.
- */
- return (cp0_perfcount_irq < 0) &&
- perf_irq() == IRQ_HANDLED &&
- !r2;
-}
-
#endif /* __ASM_CEVT_R4K_H */
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 5c585c5c1c3e..3ceacde5eb6e 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -218,6 +218,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
__u32 len, unsigned short proto,
__wsum sum)
{
+ __wsum tmp;
+
__asm__(
" .set push # csum_ipv6_magic\n"
" .set noreorder \n"
@@ -270,9 +272,9 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" addu %0, $1 # Add final carry\n"
" .set pop"
- : "=r" (sum), "=r" (proto)
+ : "=&r" (sum), "=&r" (tmp)
: "r" (saddr), "r" (daddr),
- "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
+ "0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
return csum_fold(sum);
}
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index d0a2a68ca600..412f945f1f5e 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -229,21 +229,22 @@ extern void __cmpxchg_called_with_bad_pointer(void);
#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, , )
-#define cmpxchg64(ptr, o, n) \
+#ifdef CONFIG_64BIT
+#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ cmpxchg_local((ptr), (o), (n)); \
})
-#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
+#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ cmpxchg((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 0d8208de9a3f..5aeaf19c26b0 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -68,6 +68,7 @@
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
#endif
+/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
#ifndef cpu_has_fpu
#define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU)
#define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU)
@@ -139,6 +140,9 @@
# endif
#endif
+#ifndef cpu_has_xpa
+#define cpu_has_xpa (cpu_data[0].options & MIPS_CPU_XPA)
+#endif
#ifndef cpu_has_vtag_icache
#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
#endif
@@ -220,8 +224,11 @@
#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r)
#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r)
-#define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \
- cpu_has_mips_r6)
+#define cpu_has_mips_3_4_5_64_r2_r6 \
+ (cpu_has_mips_3 | cpu_has_mips_4_5_64_r2_r6)
+#define cpu_has_mips_4_5_64_r2_r6 \
+ (cpu_has_mips_4_5 | cpu_has_mips64r1 | \
+ cpu_has_mips_r2 | cpu_has_mips_r6)
#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
@@ -235,8 +242,39 @@
/* MIPSR2 and MIPSR6 have a lot of similarities */
#define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6)
+/*
+ * cpu_has_mips_r2_exec_hazard - return if IHB is required on current processor
+ *
+ * Returns non-zero value if the current processor implementation requires
+ * an IHB instruction to deal with an instruction hazard as per MIPS R2
+ * architecture specification, zero otherwise.
+ */
#ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
+#define cpu_has_mips_r2_exec_hazard \
+({ \
+ int __res; \
+ \
+ switch (current_cpu_type()) { \
+ case CPU_M14KC: \
+ case CPU_74K: \
+ case CPU_1074K: \
+ case CPU_PROAPTIV: \
+ case CPU_P5600: \
+ case CPU_M5150: \
+ case CPU_QEMU_GENERIC: \
+ case CPU_CAVIUM_OCTEON: \
+ case CPU_CAVIUM_OCTEON_PLUS: \
+ case CPU_CAVIUM_OCTEON2: \
+ case CPU_CAVIUM_OCTEON3: \
+ __res = 0; \
+ break; \
+ \
+ default: \
+ __res = 1; \
+ } \
+ \
+ __res; \
+})
#endif
/*
@@ -366,4 +404,8 @@
# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
#endif
+#ifndef cpu_has_cdmm
+# define cpu_has_cdmm (cpu_data[0].options & MIPS_CPU_CDMM)
+#endif
+
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index c3f4f2d2e108..e7dc785a91ca 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -49,6 +49,8 @@ struct cpuinfo_mips {
unsigned int udelay_val;
unsigned int processor_id;
unsigned int fpu_id;
+ unsigned int fpu_csr31;
+ unsigned int fpu_msk31;
unsigned int msa_id;
unsigned int cputype;
int isa_level;
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 8245875f8b33..33f3cab9e689 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -157,6 +157,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
#endif
#ifdef CONFIG_SYS_HAS_CPU_RM7000
case CPU_RM7000:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 15687234d70a..e3adca1d0b99 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -67,7 +67,7 @@
#define PRID_IMP_R4300 0x0b00
#define PRID_IMP_VR41XX 0x0c00
#define PRID_IMP_R12000 0x0e00
-#define PRID_IMP_R14000 0x0f00
+#define PRID_IMP_R14000 0x0f00 /* R14K && R16K */
#define PRID_IMP_R8000 0x1000
#define PRID_IMP_PR4450 0x1200
#define PRID_IMP_R4600 0x2000
@@ -284,8 +284,8 @@ enum cpu_type_enum {
CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310,
CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000,
- CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122,
- CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
+ CPU_R12000, CPU_R14000, CPU_R16000, CPU_VR41XX, CPU_VR4111, CPU_VR4121,
+ CPU_VR4122, CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
CPU_SR71000, CPU_TX49XX,
/*
@@ -377,6 +377,8 @@ enum cpu_type_enum {
#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */
#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */
+#define MIPS_CPU_XPA 0x2000000000ull /* CPU supports Extended Physical Addressing */
+#define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */
/*
* CPU ASE encodings
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 06412aa9e3fb..fd1b4a150759 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -23,7 +23,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
- return 0;
+ return false;
return addr + size <= *dev->dma_mask;
}
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 535f196ffe02..f19e890b99d2 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -11,6 +11,9 @@
#include <linux/fs.h>
#include <uapi/linux/elf.h>
+#include <asm/cpu-info.h>
+#include <asm/current.h>
+
/* ELF header e_flags defines. */
/* MIPS architecture level. */
#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
@@ -294,9 +297,14 @@ do { \
if (personality(current->personality) != PER_LINUX) \
set_personality(PER_LINUX); \
\
+ clear_thread_flag(TIF_HYBRID_FPREGS); \
+ set_thread_flag(TIF_32BIT_FPREGS); \
+ \
mips_set_personality_fp(state); \
\
current->thread.abi = &mips_abi; \
+ \
+ current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31; \
} while (0)
#endif /* CONFIG_32BIT */
@@ -319,6 +327,8 @@ do { \
do { \
set_thread_flag(TIF_32BIT_REGS); \
set_thread_flag(TIF_32BIT_ADDR); \
+ clear_thread_flag(TIF_HYBRID_FPREGS); \
+ set_thread_flag(TIF_32BIT_FPREGS); \
\
mips_set_personality_fp(state); \
\
@@ -356,6 +366,8 @@ do { \
else \
current->thread.abi = &mips_abi; \
\
+ current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31; \
+ \
p = personality(current->personality); \
if (p != PER_LINUX32 && p != PER_LINUX) \
set_personality(PER_LINUX); \
@@ -410,10 +422,6 @@ struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
struct arch_elf_state {
int fp_abi;
int interp_fp_abi;
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index b104ad9d655f..084780b355aa 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -30,7 +30,7 @@
struct sigcontext;
struct sigcontext32;
-extern void _init_fpu(void);
+extern void _init_fpu(unsigned int);
extern void _save_fp(struct task_struct *);
extern void _restore_fp(struct task_struct *);
@@ -188,6 +188,7 @@ static inline void lose_fpu(int save)
static inline int init_fpu(void)
{
+ unsigned int fcr31 = current->thread.fpu.fcr31;
int ret = 0;
if (cpu_has_fpu) {
@@ -198,7 +199,7 @@ static inline int init_fpu(void)
return ret;
if (!cpu_has_fre) {
- _init_fpu();
+ _init_fpu(fcr31);
return 0;
}
@@ -212,7 +213,7 @@ static inline int init_fpu(void)
config5 = clear_c0_config5(MIPS_CONF5_FRE);
enable_fpu_hazard();
- _init_fpu();
+ _init_fpu(fcr31);
/* Restore FRE */
write_c0_config5(config5);
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 3ee347713307..2f021cdfba4f 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -44,6 +44,7 @@ struct mips_fpu_emulator_stats {
unsigned long ieee754_overflow;
unsigned long ieee754_zerodiv;
unsigned long ieee754_invalidop;
+ unsigned long ds_emul;
};
DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
@@ -65,7 +66,8 @@ extern int do_dsemulret(struct pt_regs *xcp);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
-int process_fpemu_return(int sig, void __user *fault_addr);
+int process_fpemu_return(int sig, void __user *fault_addr,
+ unsigned long fcr31);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc);
@@ -86,8 +88,6 @@ static inline void fpu_emulator_init_fpu(void)
struct task_struct *t = current;
int i;
- t->thread.fpu.fcr31 = 0;
-
for (i = 0; i < 32; i++)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 5a4e1bb8fb1b..f0db99f8defe 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -47,6 +47,9 @@ extern void free_irqno(unsigned int irq);
extern int cp0_compare_irq;
extern int cp0_compare_irq_shift;
extern int cp0_perfcount_irq;
+extern int cp0_fdc_irq;
+
+extern int __weak get_c0_fdc_int(void);
void arch_trigger_all_cpu_backtrace(bool);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
diff --git a/arch/mips/include/asm/mach-ar7/war.h b/arch/mips/include/asm/mach-ar7/war.h
deleted file mode 100644
index 99071e50faab..000000000000
--- a/arch/mips/include/asm/mach-ar7/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_AR7_WAR_H
-#define __ASM_MIPS_MACH_AR7_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_AR7_WAR_H */
diff --git a/arch/mips/include/asm/mach-ath25/dma-coherence.h b/arch/mips/include/asm/mach-ath25/dma-coherence.h
index d8009c93a465..d5defdde32db 100644
--- a/arch/mips/include/asm/mach-ath25/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ath25/dma-coherence.h
@@ -59,16 +59,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1;
}
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
- return 0;
-}
-
static inline int plat_device_is_coherent(struct device *dev)
{
#ifdef CONFIG_DMA_COHERENT
@@ -79,4 +69,8 @@ static inline int plat_device_is_coherent(struct device *dev)
#endif
}
+static inline void plat_post_dma_flush(struct device *dev)
+{
+}
+
#endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ath25/war.h b/arch/mips/include/asm/mach-ath25/war.h
deleted file mode 100644
index e3a5250ebd67..000000000000
--- a/arch/mips/include/asm/mach-ath25/war.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
- */
-#ifndef __ASM_MACH_ATH25_WAR_H
-#define __ASM_MACH_ATH25_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MACH_ATH25_WAR_H */
diff --git a/arch/mips/include/asm/mach-ath79/war.h b/arch/mips/include/asm/mach-ath79/war.h
deleted file mode 100644
index 0bb30905fd5b..000000000000
--- a/arch/mips/include/asm/mach-ath79/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MACH_ATH79_WAR_H
-#define __ASM_MACH_ATH79_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MACH_ATH79_WAR_H */
diff --git a/arch/mips/include/asm/mach-au1x00/war.h b/arch/mips/include/asm/mach-au1x00/war.h
deleted file mode 100644
index 72e260d24e59..000000000000
--- a/arch/mips/include/asm/mach-au1x00/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_AU1X00_WAR_H
-#define __ASM_MIPS_MACH_AU1X00_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_AU1X00_WAR_H */
diff --git a/arch/mips/include/asm/mach-bcm3384/war.h b/arch/mips/include/asm/mach-bcm3384/war.h
deleted file mode 100644
index 59d7599059b0..000000000000
--- a/arch/mips/include/asm/mach-bcm3384/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_BCM3384_WAR_H
-#define __ASM_MIPS_MACH_BCM3384_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_BCM3384_WAR_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
index 7527c1d33d02..8ed77f618940 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
@@ -22,6 +22,7 @@
#include <linux/ssb/ssb.h>
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_soc.h>
+#include <linux/bcm47xx_nvram.h>
enum bcm47xx_bus_type {
#ifdef CONFIG_BCM47XX_SSB
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
index 1f5643b89a91..c41d1dce1062 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -67,6 +67,7 @@ enum bcm47xx_board {
BCM47XX_BOARD_LINKSYS_WRT150NV11,
BCM47XX_BOARD_LINKSYS_WRT160NV1,
BCM47XX_BOARD_LINKSYS_WRT160NV3,
+ BCM47XX_BOARD_LINKSYS_WRT300N_V1,
BCM47XX_BOARD_LINKSYS_WRT300NV11,
BCM47XX_BOARD_LINKSYS_WRT310NV1,
BCM47XX_BOARD_LINKSYS_WRT310NV2,
@@ -74,6 +75,7 @@ enum bcm47xx_board {
BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101,
BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467,
BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708,
+ BCM47XX_BOARD_LINKSYS_WRT600N_V11,
BCM47XX_BOARD_LINKSYS_WRT610NV1,
BCM47XX_BOARD_LINKSYS_WRT610NV2,
BCM47XX_BOARD_LINKSYS_WRTSL54GS,
@@ -86,9 +88,11 @@ enum bcm47xx_board {
BCM47XX_BOARD_NETGEAR_WGR614V8,
BCM47XX_BOARD_NETGEAR_WGR614V9,
+ BCM47XX_BOARD_NETGEAR_WGR614_V10,
BCM47XX_BOARD_NETGEAR_WNDR3300,
BCM47XX_BOARD_NETGEAR_WNDR3400V1,
BCM47XX_BOARD_NETGEAR_WNDR3400V2,
+ BCM47XX_BOARD_NETGEAR_WNDR3400_V3,
BCM47XX_BOARD_NETGEAR_WNDR3400VCNA,
BCM47XX_BOARD_NETGEAR_WNDR3700V3,
BCM47XX_BOARD_NETGEAR_WNDR4000,
diff --git a/arch/mips/include/asm/mach-bcm47xx/war.h b/arch/mips/include/asm/mach-bcm47xx/war.h
deleted file mode 100644
index a3d2f448b10e..000000000000
--- a/arch/mips/include/asm/mach-bcm47xx/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_BCM47XX_WAR_H
-#define __ASM_MIPS_MACH_BCM47XX_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_BCM47XX_WAR_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 4794067cb5a7..5035f09c5427 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -1259,20 +1259,6 @@
#define M2M_DSTID_REG(x) ((x) * 0x40 + 0x18)
/*************************************************************************
- * _REG relative to RSET_RNG
- *************************************************************************/
-
-#define RNG_CTRL 0x00
-#define RNG_EN (1 << 0)
-
-#define RNG_STAT 0x04
-#define RNG_AVAIL_MASK (0xff000000)
-
-#define RNG_DATA 0x08
-#define RNG_THRES 0x0c
-#define RNG_MASK 0x10
-
-/*************************************************************************
* _REG relative to RSET_SPI
*************************************************************************/
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
new file mode 100644
index 000000000000..11d3b572b1b3
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
+#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
+
+#include <asm/bmips.h>
+
+#define plat_post_dma_flush bmips_post_dma_flush
+
+#include <asm/mach-generic/dma-coherence.h>
+
+#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/war.h b/arch/mips/include/asm/mach-bcm63xx/war.h
deleted file mode 100644
index 05ee8671bef1..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_BCM63XX_WAR_H
-#define __ASM_MIPS_MACH_BCM63XX_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_BCM63XX_WAR_H */
diff --git a/arch/mips/include/asm/mach-bcm3384/dma-coherence.h b/arch/mips/include/asm/mach-bmips/dma-coherence.h
index a3be8e50e1f0..d29781f02285 100644
--- a/arch/mips/include/asm/mach-bcm3384/dma-coherence.h
+++ b/arch/mips/include/asm/mach-bmips/dma-coherence.h
@@ -12,8 +12,12 @@
* GNU General Public License for more details.
*/
-#ifndef __ASM_MACH_BCM3384_DMA_COHERENCE_H
-#define __ASM_MACH_BCM3384_DMA_COHERENCE_H
+#ifndef __ASM_MACH_BMIPS_DMA_COHERENCE_H
+#define __ASM_MACH_BMIPS_DMA_COHERENCE_H
+
+#include <asm/bmips.h>
+#include <asm/cpu-type.h>
+#include <asm/cpu.h>
struct device;
@@ -45,4 +49,6 @@ static inline int plat_device_is_coherent(struct device *dev)
return 0;
}
-#endif /* __ASM_MACH_BCM3384_DMA_COHERENCE_H */
+#define plat_post_dma_flush bmips_post_dma_flush
+
+#endif /* __ASM_MACH_BMIPS_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-bmips/spaces.h b/arch/mips/include/asm/mach-bmips/spaces.h
new file mode 100644
index 000000000000..1b05bddc8ec5
--- /dev/null
+++ b/arch/mips/include/asm/mach-bmips/spaces.h
@@ -0,0 +1,18 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_BMIPS_SPACES_H
+#define _ASM_BMIPS_SPACES_H
+
+/* Avoid collisions with system base register (SBR) region on BMIPS3300 */
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_BMIPS_SPACES_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index fa1f3cfbae8d..d68e685cde60 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -50,7 +50,6 @@
#define cpu_has_mips32r2 0
#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 1
-#define cpu_has_mips_r2_exec_hazard 0
#define cpu_has_dsp 0
#define cpu_has_dsp2 0
#define cpu_has_mipsmt 0
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
index f9f448650505..460042ee5d6f 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -57,6 +57,10 @@ static inline int plat_device_is_coherent(struct device *dev)
return 1;
}
+static inline void plat_post_dma_flush(struct device *dev)
+{
+}
+
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
diff --git a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
new file mode 100644
index 000000000000..374eefafb320
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
@@ -0,0 +1,74 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ */
+#ifndef __ASM_MACH_GENERIC_MANGLE_PORT_H
+#define __ASM_MACH_GENERIC_MANGLE_PORT_H
+
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+# define __swizzle_addr_b(port) (port)
+# define __swizzle_addr_w(port) (port)
+# define __swizzle_addr_l(port) (port)
+# define __swizzle_addr_q(port) (port)
+
+#else /* __LITTLE_ENDIAN */
+
+static inline bool __should_swizzle_addr(unsigned long p)
+{
+ /* boot bus? */
+ return ((p >> 40) & 0xff) == 0;
+}
+
+# define __swizzle_addr_b(port) \
+ (__should_swizzle_addr(port) ? (port) ^ 7 : (port))
+# define __swizzle_addr_w(port) \
+ (__should_swizzle_addr(port) ? (port) ^ 6 : (port))
+# define __swizzle_addr_l(port) \
+ (__should_swizzle_addr(port) ? (port) ^ 4 : (port))
+# define __swizzle_addr_q(port) (port)
+
+#endif /* __BIG_ENDIAN */
+
+/*
+ * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
+ * less sane hardware forces software to fiddle with this...
+ *
+ * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
+ * you can't have the numerical value of data and byte addresses within
+ * multibyte quantities both preserved at the same time. Hence two
+ * variations of functions: non-prefixed ones that preserve the value
+ * and prefixed ones that preserve byte addresses. The latters are
+ * typically used for moving raw data between a peripheral and memory (cf.
+ * string I/O functions), hence the "__mem_" prefix.
+ */
+#if defined(CONFIG_SWAP_IO_SPACE)
+
+# define ioswabb(a, x) (x)
+# define __mem_ioswabb(a, x) (x)
+# define ioswabw(a, x) le16_to_cpu(x)
+# define __mem_ioswabw(a, x) (x)
+# define ioswabl(a, x) le32_to_cpu(x)
+# define __mem_ioswabl(a, x) (x)
+# define ioswabq(a, x) le64_to_cpu(x)
+# define __mem_ioswabq(a, x) (x)
+
+#else
+
+# define ioswabb(a, x) (x)
+# define __mem_ioswabb(a, x) (x)
+# define ioswabw(a, x) (x)
+# define __mem_ioswabw(a, x) cpu_to_le16(x)
+# define ioswabl(a, x) (x)
+# define __mem_ioswabl(a, x) cpu_to_le32(x)
+# define ioswabq(a, x) (x)
+# define __mem_ioswabq(a, x) cpu_to_le32(x)
+
+#endif
+
+#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
index 71d4bface1dc..30c5cd9fd973 100644
--- a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
@@ -14,7 +14,6 @@
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
#define cpu_has_tx39_cache 0
-#define cpu_has_fpu 1
#define cpu_has_32fpr 1
#define cpu_has_counter 1
#define cpu_has_watch 0
diff --git a/arch/mips/include/asm/mach-cobalt/war.h b/arch/mips/include/asm/mach-cobalt/war.h
deleted file mode 100644
index 34ae4046541e..000000000000
--- a/arch/mips/include/asm/mach-cobalt/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_COBALT_WAR_H
-#define __ASM_MIPS_MACH_COBALT_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_COBALT_WAR_H */
diff --git a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
index acce27fd2bb8..bdf045fb00c8 100644
--- a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
@@ -15,7 +15,6 @@
/* Generic ones first. */
#define cpu_has_tlb 1
#define cpu_has_tx39_cache 0
-#define cpu_has_fpu 1
#define cpu_has_divec 0
#define cpu_has_prefetch 0
#define cpu_has_mcheck 0
diff --git a/arch/mips/include/asm/mach-dec/war.h b/arch/mips/include/asm/mach-dec/war.h
deleted file mode 100644
index d29996feb3e7..000000000000
--- a/arch/mips/include/asm/mach-dec/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_DEC_WAR_H
-#define __ASM_MIPS_MACH_DEC_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_DEC_WAR_H */
diff --git a/arch/mips/include/asm/mach-emma2rh/war.h b/arch/mips/include/asm/mach-emma2rh/war.h
deleted file mode 100644
index 79ae82da3ec7..000000000000
--- a/arch/mips/include/asm/mach-emma2rh/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_EMMA2RH_WAR_H
-#define __ASM_MIPS_MACH_EMMA2RH_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_EMMA2RH_WAR_H */
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 7629c35986f7..0f8a354fd468 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -52,6 +52,12 @@ static inline int plat_device_is_coherent(struct device *dev)
return coherentio;
}
+#ifndef plat_post_dma_flush
+static inline void plat_post_dma_flush(struct device *dev)
+{
+}
+#endif
+
#ifdef CONFIG_SWIOTLB
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
diff --git a/arch/mips/include/asm/mach-ralink/war.h b/arch/mips/include/asm/mach-generic/war.h
index c074b5dc1f82..a1bc2e71f983 100644
--- a/arch/mips/include/asm/mach-ralink/war.h
+++ b/arch/mips/include/asm/mach-generic/war.h
@@ -5,8 +5,8 @@
*
* Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
*/
-#ifndef __ASM_MACH_RALINK_WAR_H
-#define __ASM_MACH_RALINK_WAR_H
+#ifndef __ASM_MACH_GENERIC_WAR_H
+#define __ASM_MACH_GENERIC_WAR_H
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
@@ -21,4 +21,4 @@
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
-#endif /* __ASM_MACH_RALINK_WAR_H */
+#endif /* __ASM_MACH_GENERIC_WAR_H */
diff --git a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
index 1dfe47453ea4..9b19b72dba56 100644
--- a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
@@ -16,7 +16,6 @@
#define cpu_has_tlb 1
#define cpu_has_4kex 1
#define cpu_has_4k_cache 1
-#define cpu_has_fpu 1
#define cpu_has_32fpr 1
#define cpu_has_counter 1
#define cpu_has_mips16 0
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index 4ffddfdb5062..1daa64412569 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -58,6 +58,10 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1;
}
+static inline void plat_post_dma_flush(struct device *dev)
+{
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 1; /* IP27 non-cohernet mode is unsupported */
diff --git a/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h
index 2e1ec6cfedd5..241409b78ff1 100644
--- a/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h
@@ -26,7 +26,6 @@
/* Settings which are common for all ip32 CPUs */
#define cpu_has_tlb 1
#define cpu_has_4kex 1
-#define cpu_has_fpu 1
#define cpu_has_32fpr 1
#define cpu_has_counter 1
#define cpu_has_mips16 0
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index 104cfbc3ed63..0a0b0e2ced60 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -80,6 +80,10 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1;
}
+static inline void plat_post_dma_flush(struct device *dev)
+{
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 0; /* IP32 is non-cohernet */
diff --git a/arch/mips/include/asm/mach-ip32/mc146818rtc.h b/arch/mips/include/asm/mach-ip32/mc146818rtc.h
deleted file mode 100644
index 6b6bab43d5c1..000000000000
--- a/arch/mips/include/asm/mach-ip32/mc146818rtc.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998, 2001, 03 by Ralf Baechle
- * Copyright (C) 2000 Harald Koerfgen
- *
- * RTC routines for IP32 style attached Dallas chip.
- */
-#ifndef __ASM_MACH_IP32_MC146818RTC_H
-#define __ASM_MACH_IP32_MC146818RTC_H
-
-#include <asm/ip32/mace.h>
-
-#define RTC_PORT(x) (0x70 + (x))
-
-static unsigned char CMOS_READ(unsigned long addr)
-{
- return mace->isa.rtc[addr << 8];
-}
-
-static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
-{
- mace->isa.rtc[addr << 8] = data;
-}
-
-/*
- * FIXME: Do it right. For now just assume that no one lives in 20th century
- * and no O2 user in 22th century ;-)
- */
-#define mc146818_decode_year(year) ((year) + 2000)
-
-#define RTC_ALWAYS_BCD 0
-
-#endif /* __ASM_MACH_IP32_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
index 949003ef97b3..dc347c25c343 100644
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -48,6 +48,10 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1;
}
+static inline void plat_post_dma_flush(struct device *dev)
+{
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
diff --git a/arch/mips/include/asm/mach-jazz/war.h b/arch/mips/include/asm/mach-jazz/war.h
deleted file mode 100644
index 5b18b9a3d0ec..000000000000
--- a/arch/mips/include/asm/mach-jazz/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_JAZZ_WAR_H
-#define __ASM_MIPS_MACH_JAZZ_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_JAZZ_WAR_H */
diff --git a/arch/mips/include/asm/mach-jz4740/war.h b/arch/mips/include/asm/mach-jz4740/war.h
deleted file mode 100644
index 9b511d323838..000000000000
--- a/arch/mips/include/asm/mach-jz4740/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_JZ4740_WAR_H
-#define __ASM_MIPS_MACH_JZ4740_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_JZ4740_WAR_H */
diff --git a/arch/mips/include/asm/mach-lantiq/war.h b/arch/mips/include/asm/mach-lantiq/war.h
deleted file mode 100644
index 358ca979c1bd..000000000000
--- a/arch/mips/include/asm/mach-lantiq/war.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
-#ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H
-#define __ASM_MIPS_MACH_LANTIQ_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif
diff --git a/arch/mips/include/asm/mach-lasat/war.h b/arch/mips/include/asm/mach-lasat/war.h
deleted file mode 100644
index 741ae724adc6..000000000000
--- a/arch/mips/include/asm/mach-lasat/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_LASAT_WAR_H
-#define __ASM_MIPS_MACH_LASAT_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_LASAT_WAR_H */
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 6d69332f21ec..acc376897e46 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -34,7 +34,6 @@
#define cpu_has_dsp 0
#define cpu_has_dsp2 0
#define cpu_has_ejtag 0
-#define cpu_has_fpu 1
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_inclusive_pcaches 1
#define cpu_has_llsc 1
diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h
index a90534161bd2..4bf4e19f72e8 100644
--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
@@ -78,4 +78,8 @@ static inline int plat_device_is_coherent(struct device *dev)
#endif /* CONFIG_DMA_NONCOHERENT */
}
+static inline void plat_post_dma_flush(struct device *dev)
+{
+}
+
#endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson/gpio.h b/arch/mips/include/asm/mach-loongson/gpio.h
index 211a7b7138fe..b3b216904a9a 100644
--- a/arch/mips/include/asm/mach-loongson/gpio.h
+++ b/arch/mips/include/asm/mach-loongson/gpio.h
@@ -1,8 +1,9 @@
/*
- * STLS2F GPIO Support
+ * Loongson GPIO Support
*
* Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
* Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
+ * Copyright (c) 2014 Huacai Chen <chenhc@lemote.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -10,14 +11,14 @@
* (at your option) any later version.
*/
-#ifndef __STLS2F_GPIO_H
-#define __STLS2F_GPIO_H
+#ifndef __LOONGSON_GPIO_H
+#define __LOONGSON_GPIO_H
#include <asm-generic/gpio.h>
-extern void gpio_set_value(unsigned gpio, int value);
-extern int gpio_get_value(unsigned gpio);
-extern int gpio_cansleep(unsigned gpio);
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
/* The chip can do interrupt
* but it has not been tested and doc not clear
@@ -32,4 +33,4 @@ static inline int irq_to_gpio(int gpio)
return -EINVAL;
}
-#endif /* __STLS2F_GPIO_H */
+#endif /* __LOONGSON_GPIO_H */
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index 5459ac09679f..9783103fd6f6 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -255,6 +255,10 @@ static inline void do_perfcnt_IRQ(void)
extern u64 loongson_chipcfg[MAX_PACKAGES];
#define LOONGSON_CHIPCFG(id) (*(volatile u32 *)(loongson_chipcfg[id]))
+/* Chip Temperature registor of each physical cpu package, PRid >= Loongson-3A */
+extern u64 loongson_chiptemp[MAX_PACKAGES];
+#define LOONGSON_CHIPTEMP(id) (*(volatile u32 *)(loongson_chiptemp[id]))
+
/* Freq Control register of each physical cpu package, PRid >= Loongson-3B */
extern u64 loongson_freqctrl[MAX_PACKAGES];
#define LOONGSON_FREQCTRL(id) (*(volatile u32 *)(loongson_freqctrl[id]))
diff --git a/arch/mips/include/asm/mach-loongson/war.h b/arch/mips/include/asm/mach-loongson/war.h
deleted file mode 100644
index f2570df66bb5..000000000000
--- a/arch/mips/include/asm/mach-loongson/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MACH_LOONGSON_WAR_H
-#define __ASM_MACH_LOONGSON_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MACH_LEMOTE_WAR_H */
diff --git a/arch/mips/include/asm/mach-loongson1/war.h b/arch/mips/include/asm/mach-loongson1/war.h
deleted file mode 100644
index 8fb50d008131..000000000000
--- a/arch/mips/include/asm/mach-loongson1/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MACH_LOONGSON1_WAR_H
-#define __ASM_MACH_LOONGSON1_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MACH_LOONGSON1_WAR_H */
diff --git a/arch/mips/include/asm/mach-netlogic/multi-node.h b/arch/mips/include/asm/mach-netlogic/multi-node.h
index 9ed8dacdc37c..8bdf47e29145 100644
--- a/arch/mips/include/asm/mach-netlogic/multi-node.h
+++ b/arch/mips/include/asm/mach-netlogic/multi-node.h
@@ -48,15 +48,6 @@
#endif
#define NLM_THREADS_PER_CORE 4
-#ifdef CONFIG_CPU_XLR
-#define nlm_cores_per_node() 8
-#else
-extern unsigned int xlp_cores_per_node;
-#define nlm_cores_per_node() xlp_cores_per_node
-#endif
-
-#define nlm_threads_per_node() (nlm_cores_per_node() * NLM_THREADS_PER_CORE)
-#define nlm_cpuid_to_node(c) ((c) / nlm_threads_per_node())
struct nlm_soc_info {
unsigned long coremask; /* cores enabled on the soc */
diff --git a/arch/mips/include/asm/mach-netlogic/topology.h b/arch/mips/include/asm/mach-netlogic/topology.h
deleted file mode 100644
index 0eb43c832b25..000000000000
--- a/arch/mips/include/asm/mach-netlogic/topology.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2013 Broadcom Corporation
- */
-#ifndef _ASM_MACH_NETLOGIC_TOPOLOGY_H
-#define _ASM_MACH_NETLOGIC_TOPOLOGY_H
-
-#include <asm/mach-netlogic/multi-node.h>
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/mach-netlogic/war.h b/arch/mips/include/asm/mach-netlogic/war.h
deleted file mode 100644
index 2c7216840e18..000000000000
--- a/arch/mips/include/asm/mach-netlogic/war.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2011 Netlogic Microsystems.
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_NLM_WAR_H
-#define __ASM_MIPS_MACH_NLM_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_NLM_WAR_H */
diff --git a/arch/mips/include/asm/mach-paravirt/war.h b/arch/mips/include/asm/mach-paravirt/war.h
deleted file mode 100644
index 36d3afb98451..000000000000
--- a/arch/mips/include/asm/mach-paravirt/war.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2013 Cavium Networks <support@caviumnetworks.com>
- */
-#ifndef __ASM_MIPS_MACH_PARAVIRT_WAR_H
-#define __ASM_MIPS_MACH_PARAVIRT_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_PARAVIRT_WAR_H */
diff --git a/arch/mips/include/asm/mach-pistachio/gpio.h b/arch/mips/include/asm/mach-pistachio/gpio.h
new file mode 100644
index 000000000000..6c1649c27b8d
--- /dev/null
+++ b/arch/mips/include/asm/mach-pistachio/gpio.h
@@ -0,0 +1,21 @@
+/*
+ * Pistachio IRQ setup
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_PISTACHIO_GPIO_H
+#define __ASM_MACH_PISTACHIO_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+#endif /* __ASM_MACH_PISTACHIO_GPIO_H */
diff --git a/arch/mips/include/asm/mach-pistachio/irq.h b/arch/mips/include/asm/mach-pistachio/irq.h
new file mode 100644
index 000000000000..b94a09a54221
--- /dev/null
+++ b/arch/mips/include/asm/mach-pistachio/irq.h
@@ -0,0 +1,18 @@
+/*
+ * Pistachio IRQ setup
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_PISTACHIO_IRQ_H
+#define __ASM_MACH_PISTACHIO_IRQ_H
+
+#define NR_IRQS 256
+
+#include_next <irq.h>
+
+#endif /* __ASM_MACH_PISTACHIO_IRQ_H */
diff --git a/arch/mips/include/asm/mach-pnx833x/war.h b/arch/mips/include/asm/mach-pnx833x/war.h
deleted file mode 100644
index e410df4e1b3a..000000000000
--- a/arch/mips/include/asm/mach-pnx833x/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_PNX833X_WAR_H
-#define __ASM_MIPS_MACH_PNX833X_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */
diff --git a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
index f095c529c48c..98cf40417c5d 100644
--- a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
@@ -15,7 +15,6 @@
#define cpu_has_tlb 1
#define cpu_has_4kex 1
#define cpu_has_4k_cache 1
-#define cpu_has_fpu 1
#define cpu_has_32fpr 1
#define cpu_has_counter 1
#define cpu_has_watch 0
diff --git a/arch/mips/include/asm/mach-tx39xx/war.h b/arch/mips/include/asm/mach-tx39xx/war.h
deleted file mode 100644
index 6a52e6534776..000000000000
--- a/arch/mips/include/asm/mach-tx39xx/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_TX39XX_WAR_H
-#define __ASM_MIPS_MACH_TX39XX_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_TX39XX_WAR_H */
diff --git a/arch/mips/include/asm/mach-vr41xx/war.h b/arch/mips/include/asm/mach-vr41xx/war.h
deleted file mode 100644
index ffe31e736009..000000000000
--- a/arch/mips/include/asm/mach-vr41xx/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_VR41XX_WAR_H
-#define __ASM_MIPS_MACH_VR41XX_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_VR41XX_WAR_H */
diff --git a/arch/mips/include/asm/mips-boards/sead3-addr.h b/arch/mips/include/asm/mips-boards/sead3-addr.h
new file mode 100644
index 000000000000..c0db57802f7c
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/sead3-addr.h
@@ -0,0 +1,83 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2015 Imagination Technologies, Inc.
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_BOARDS_SEAD3_ADDR_H
+#define __ASM_MIPS_BOARDS_SEAD3_ADDR_H
+
+/*
+ * Target #0 Register Decode
+ */
+#define SEAD3_SD_SPDCNF 0xbb000040
+#define SEAD3_SD_SPADDR 0xbb000048
+#define SEAD3_SD_DATA 0xbb000050
+
+/*
+ * Target #1 Register Decode
+ */
+#define SEAD3_CFG 0xbb100110
+#define SEAD3_GIC_BASE_ADDRESS 0xbb1c0000
+#define SEAD3_SHARED_SECTION 0xbb1c0000
+#define SEAD3_VPE_LOCAL_SECTION 0xbb1c8000
+#define SEAD3_VPE_OTHER_SECTION 0xbb1cc000
+#define SEAD3_USER_MODE_VISIBLE_SECTION 0xbb1d0000
+
+/*
+ * Target #3 Register Decode
+ */
+#define SEAD3_USB_HS_BASE 0xbb200000
+#define SEAD3_USB_HS_IDENTIFICATION_REGS 0xbb200000
+#define SEAD3_USB_HS_CAPABILITY_REGS 0xbb200100
+#define SEAD3_USB_HS_OPERATIONAL_REGS 0xbb200140
+#define SEAD3_RESERVED 0xbe800000
+
+/*
+ * Target #3 Register Decode
+ */
+#define SEAD3_SRAM 0xbe000000
+#define SEAD3_OPTIONAL_SRAM 0xbe400000
+#define SEAD3_FPGA 0xbf000000
+
+#define SEAD3_PI_PIC32_USB_STATUS 0xbf000060
+#define SEAD3_PI_PIC32_USB_STATUS_IO_RDY (1 << 0)
+#define SEAD3_PI_PIC32_USB_STATUS_SPL_INT (1 << 1)
+#define SEAD3_PI_PIC32_USB_STATUS_GPIOA_INT (1 << 2)
+#define SEAD3_PI_PIC32_USB_STATUS_GPIOB_INT (1 << 3)
+
+#define SEAD3_PI_SOFT_ENDIAN 0xbf000070
+
+#define SEAD3_CPLD_P_SWITCH 0xbf000200
+#define SEAD3_CPLD_F_SWITCH 0xbf000208
+#define SEAD3_CPLD_P_LED 0xbf000210
+#define SEAD3_CPLD_F_LED 0xbf000218
+#define SEAD3_NEWSC_LIVE 0xbf000220
+#define SEAD3_NEWSC_REG 0xbf000228
+#define SEAD3_NEWSC_CTRL 0xbf000230
+
+#define SEAD3_LCD_CONTROL 0xbf000400
+#define SEAD3_LCD_DATA 0xbf000408
+#define SEAD3_CPLD_LCD_STATUS 0xbf000410
+#define SEAD3_CPLD_LCD_DATA 0xbf000418
+
+#define SEAD3_CPLD_PI_DEVRST 0xbf000480
+#define SEAD3_CPLD_PI_DEVRST_IC32_RST (1 << 0)
+#define SEAD3_RESERVED_0 0xbf000500
+
+#define SEAD3_PIC32_REGISTERS 0xbf000600
+#define SEAD3_RESERVED_1 0xbf000700
+#define SEAD3_UART_CH_0 0xbf000800
+#define SEAD3_UART_CH_1 0xbf000900
+#define SEAD3_RESERVED_2 0xbf000a00
+#define SEAD3_ETHERNET 0xbf010000
+#define SEAD3_RESERVED_3 0xbf020000
+#define SEAD3_USER_EXPANSION 0xbf400000
+#define SEAD3_RESERVED_4 0xbf800000
+#define SEAD3_BOOT_FLASH_EXTENSION 0xbfa00000
+#define SEAD3_BOOT_FLASH 0xbfc00000
+#define SEAD3_REVISION_REGISTER 0xbfc00010
+
+#endif /* __ASM_MIPS_BOARDS_SEAD3_ADDR_H */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
index 60570f2c3ba2..4b89f28047f7 100644
--- a/arch/mips/include/asm/mips-r2-to-r6-emul.h
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -84,11 +84,16 @@ extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
static int mipsr2_emulation;
-static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; };
+static inline int mipsr2_decoder(struct pt_regs *regs, u32 inst,
+ unsigned long *fcr31)
+{
+ return 0;
+};
#else
/* MIPS R2 Emulator ON/OFF */
extern int mipsr2_emulation;
-extern int mipsr2_decoder(struct pt_regs *regs, u32 inst);
+extern int mipsr2_decoder(struct pt_regs *regs, u32 inst,
+ unsigned long *fcr31);
#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
#define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation)
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index fef004434096..764e2756b54d 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -111,70 +111,6 @@
*/
#define CP0_TX39_CACHE $7
-/*
- * Coprocessor 1 (FPU) register names
- */
-#define CP1_REVISION $0
-#define CP1_STATUS $31
-
-/*
- * FPU Status Register Values
- */
-/*
- * Status Register Values
- */
-
-#define FPU_CSR_FLUSH 0x01000000 /* flush denormalised results to 0 */
-#define FPU_CSR_COND 0x00800000 /* $fcc0 */
-#define FPU_CSR_COND0 0x00800000 /* $fcc0 */
-#define FPU_CSR_COND1 0x02000000 /* $fcc1 */
-#define FPU_CSR_COND2 0x04000000 /* $fcc2 */
-#define FPU_CSR_COND3 0x08000000 /* $fcc3 */
-#define FPU_CSR_COND4 0x10000000 /* $fcc4 */
-#define FPU_CSR_COND5 0x20000000 /* $fcc5 */
-#define FPU_CSR_COND6 0x40000000 /* $fcc6 */
-#define FPU_CSR_COND7 0x80000000 /* $fcc7 */
-
-/*
- * Bits 18 - 20 of the FPU Status Register will be read as 0,
- * and should be written as zero.
- */
-#define FPU_CSR_RSVD 0x001c0000
-
-/*
- * X the exception cause indicator
- * E the exception enable
- * S the sticky/flag bit
-*/
-#define FPU_CSR_ALL_X 0x0003f000
-#define FPU_CSR_UNI_X 0x00020000
-#define FPU_CSR_INV_X 0x00010000
-#define FPU_CSR_DIV_X 0x00008000
-#define FPU_CSR_OVF_X 0x00004000
-#define FPU_CSR_UDF_X 0x00002000
-#define FPU_CSR_INE_X 0x00001000
-
-#define FPU_CSR_ALL_E 0x00000f80
-#define FPU_CSR_INV_E 0x00000800
-#define FPU_CSR_DIV_E 0x00000400
-#define FPU_CSR_OVF_E 0x00000200
-#define FPU_CSR_UDF_E 0x00000100
-#define FPU_CSR_INE_E 0x00000080
-
-#define FPU_CSR_ALL_S 0x0000007c
-#define FPU_CSR_INV_S 0x00000040
-#define FPU_CSR_DIV_S 0x00000020
-#define FPU_CSR_OVF_S 0x00000010
-#define FPU_CSR_UDF_S 0x00000008
-#define FPU_CSR_INE_S 0x00000004
-
-/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
-#define FPU_CSR_RM 0x00000003
-#define FPU_CSR_RN 0x0 /* nearest */
-#define FPU_CSR_RZ 0x1 /* towards zero */
-#define FPU_CSR_RU 0x2 /* towards +Infinity */
-#define FPU_CSR_RD 0x3 /* towards -Infinity */
-
/*
* Values for PageMask register
@@ -341,39 +277,6 @@
#define ST0_MX 0x01000000
/*
- * Bitfields in the TX39 family CP0 Configuration Register 3
- */
-#define TX39_CONF_ICS_SHIFT 19
-#define TX39_CONF_ICS_MASK 0x00380000
-#define TX39_CONF_ICS_1KB 0x00000000
-#define TX39_CONF_ICS_2KB 0x00080000
-#define TX39_CONF_ICS_4KB 0x00100000
-#define TX39_CONF_ICS_8KB 0x00180000
-#define TX39_CONF_ICS_16KB 0x00200000
-
-#define TX39_CONF_DCS_SHIFT 16
-#define TX39_CONF_DCS_MASK 0x00070000
-#define TX39_CONF_DCS_1KB 0x00000000
-#define TX39_CONF_DCS_2KB 0x00010000
-#define TX39_CONF_DCS_4KB 0x00020000
-#define TX39_CONF_DCS_8KB 0x00030000
-#define TX39_CONF_DCS_16KB 0x00040000
-
-#define TX39_CONF_CWFON 0x00004000
-#define TX39_CONF_WBON 0x00002000
-#define TX39_CONF_RF_SHIFT 10
-#define TX39_CONF_RF_MASK 0x00000c00
-#define TX39_CONF_DOZE 0x00000200
-#define TX39_CONF_HALT 0x00000100
-#define TX39_CONF_LOCK 0x00000080
-#define TX39_CONF_ICE 0x00000020
-#define TX39_CONF_DCE 0x00000010
-#define TX39_CONF_IRSIZE_SHIFT 2
-#define TX39_CONF_IRSIZE_MASK 0x0000000c
-#define TX39_CONF_DRSIZE_SHIFT 0
-#define TX39_CONF_DRSIZE_MASK 0x00000003
-
-/*
* Status register bits available in all MIPS CPUs.
*/
#define ST0_IM 0x0000ff00
@@ -425,9 +328,9 @@
/*
* Bitfields and bit numbers in the coprocessor 0 IntCtl register. (MIPSR2)
- *
- * Refer to your MIPS R4xx0 manual, chapter 5 for explanation.
*/
+#define INTCTLB_IPFDC 23
+#define INTCTLF_IPFDC (_ULCAST_(7) << INTCTLB_IPFDC)
#define INTCTLB_IPPCI 26
#define INTCTLF_IPPCI (_ULCAST_(7) << INTCTLB_IPPCI)
#define INTCTLB_IPTI 29
@@ -438,10 +341,10 @@
*
* Refer to your MIPS R4xx0 manual, chapter 5 for explanation.
*/
-#define CAUSEB_EXCCODE 2
-#define CAUSEF_EXCCODE (_ULCAST_(31) << 2)
-#define CAUSEB_IP 8
-#define CAUSEF_IP (_ULCAST_(255) << 8)
+#define CAUSEB_EXCCODE 2
+#define CAUSEF_EXCCODE (_ULCAST_(31) << 2)
+#define CAUSEB_IP 8
+#define CAUSEF_IP (_ULCAST_(255) << 8)
#define CAUSEB_IP0 8
#define CAUSEF_IP0 (_ULCAST_(1) << 8)
#define CAUSEB_IP1 9
@@ -458,16 +361,18 @@
#define CAUSEF_IP6 (_ULCAST_(1) << 14)
#define CAUSEB_IP7 15
#define CAUSEF_IP7 (_ULCAST_(1) << 15)
-#define CAUSEB_IV 23
-#define CAUSEF_IV (_ULCAST_(1) << 23)
-#define CAUSEB_PCI 26
-#define CAUSEF_PCI (_ULCAST_(1) << 26)
-#define CAUSEB_CE 28
-#define CAUSEF_CE (_ULCAST_(3) << 28)
-#define CAUSEB_TI 30
-#define CAUSEF_TI (_ULCAST_(1) << 30)
-#define CAUSEB_BD 31
-#define CAUSEF_BD (_ULCAST_(1) << 31)
+#define CAUSEB_FDCI 21
+#define CAUSEF_FDCI (_ULCAST_(1) << 21)
+#define CAUSEB_IV 23
+#define CAUSEF_IV (_ULCAST_(1) << 23)
+#define CAUSEB_PCI 26
+#define CAUSEF_PCI (_ULCAST_(1) << 26)
+#define CAUSEB_CE 28
+#define CAUSEF_CE (_ULCAST_(3) << 28)
+#define CAUSEB_TI 30
+#define CAUSEF_TI (_ULCAST_(1) << 30)
+#define CAUSEB_BD 31
+#define CAUSEF_BD (_ULCAST_(1) << 31)
/*
* Bits in the coprocessor 0 config register.
@@ -689,18 +594,6 @@
#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
/*
- * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
- */
-#define MIPS_FPIR_S (_ULCAST_(1) << 16)
-#define MIPS_FPIR_D (_ULCAST_(1) << 17)
-#define MIPS_FPIR_PS (_ULCAST_(1) << 18)
-#define MIPS_FPIR_3D (_ULCAST_(1) << 19)
-#define MIPS_FPIR_W (_ULCAST_(1) << 20)
-#define MIPS_FPIR_L (_ULCAST_(1) << 21)
-#define MIPS_FPIR_F64 (_ULCAST_(1) << 22)
-#define MIPS_FPIR_FREP (_ULCAST_(1) << 29)
-
-/*
* Bits in the MIPS32 Memory Segmentation registers.
*/
#define MIPS_SEGCFG_PA_SHIFT 9
@@ -751,6 +644,172 @@
#define MIPS_PWCTL_PSN_SHIFT 0
#define MIPS_PWCTL_PSN_MASK 0x0000003f
+/* CDMMBase register bit definitions */
+#define MIPS_CDMMBASE_SIZE_SHIFT 0
+#define MIPS_CDMMBASE_SIZE (_ULCAST_(511) << MIPS_CDMMBASE_SIZE_SHIFT)
+#define MIPS_CDMMBASE_CI (_ULCAST_(1) << 9)
+#define MIPS_CDMMBASE_EN (_ULCAST_(1) << 10)
+#define MIPS_CDMMBASE_ADDR_SHIFT 11
+#define MIPS_CDMMBASE_ADDR_START 15
+
+/*
+ * Bitfields in the TX39 family CP0 Configuration Register 3
+ */
+#define TX39_CONF_ICS_SHIFT 19
+#define TX39_CONF_ICS_MASK 0x00380000
+#define TX39_CONF_ICS_1KB 0x00000000
+#define TX39_CONF_ICS_2KB 0x00080000
+#define TX39_CONF_ICS_4KB 0x00100000
+#define TX39_CONF_ICS_8KB 0x00180000
+#define TX39_CONF_ICS_16KB 0x00200000
+
+#define TX39_CONF_DCS_SHIFT 16
+#define TX39_CONF_DCS_MASK 0x00070000
+#define TX39_CONF_DCS_1KB 0x00000000
+#define TX39_CONF_DCS_2KB 0x00010000
+#define TX39_CONF_DCS_4KB 0x00020000
+#define TX39_CONF_DCS_8KB 0x00030000
+#define TX39_CONF_DCS_16KB 0x00040000
+
+#define TX39_CONF_CWFON 0x00004000
+#define TX39_CONF_WBON 0x00002000
+#define TX39_CONF_RF_SHIFT 10
+#define TX39_CONF_RF_MASK 0x00000c00
+#define TX39_CONF_DOZE 0x00000200
+#define TX39_CONF_HALT 0x00000100
+#define TX39_CONF_LOCK 0x00000080
+#define TX39_CONF_ICE 0x00000020
+#define TX39_CONF_DCE 0x00000010
+#define TX39_CONF_IRSIZE_SHIFT 2
+#define TX39_CONF_IRSIZE_MASK 0x0000000c
+#define TX39_CONF_DRSIZE_SHIFT 0
+#define TX39_CONF_DRSIZE_MASK 0x00000003
+
+
+/*
+ * Coprocessor 1 (FPU) register names
+ */
+#define CP1_REVISION $0
+#define CP1_UFR $1
+#define CP1_UNFR $4
+#define CP1_FCCR $25
+#define CP1_FEXR $26
+#define CP1_FENR $28
+#define CP1_STATUS $31
+
+
+/*
+ * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
+ */
+#define MIPS_FPIR_S (_ULCAST_(1) << 16)
+#define MIPS_FPIR_D (_ULCAST_(1) << 17)
+#define MIPS_FPIR_PS (_ULCAST_(1) << 18)
+#define MIPS_FPIR_3D (_ULCAST_(1) << 19)
+#define MIPS_FPIR_W (_ULCAST_(1) << 20)
+#define MIPS_FPIR_L (_ULCAST_(1) << 21)
+#define MIPS_FPIR_F64 (_ULCAST_(1) << 22)
+#define MIPS_FPIR_HAS2008 (_ULCAST_(1) << 23)
+#define MIPS_FPIR_UFRP (_ULCAST_(1) << 28)
+#define MIPS_FPIR_FREP (_ULCAST_(1) << 29)
+
+/*
+ * Bits in the MIPS32/64 coprocessor 1 (FPU) condition codes register.
+ */
+#define MIPS_FCCR_CONDX_S 0
+#define MIPS_FCCR_CONDX (_ULCAST_(255) << MIPS_FCCR_CONDX_S)
+#define MIPS_FCCR_COND0_S 0
+#define MIPS_FCCR_COND0 (_ULCAST_(1) << MIPS_FCCR_COND0_S)
+#define MIPS_FCCR_COND1_S 1
+#define MIPS_FCCR_COND1 (_ULCAST_(1) << MIPS_FCCR_COND1_S)
+#define MIPS_FCCR_COND2_S 2
+#define MIPS_FCCR_COND2 (_ULCAST_(1) << MIPS_FCCR_COND2_S)
+#define MIPS_FCCR_COND3_S 3
+#define MIPS_FCCR_COND3 (_ULCAST_(1) << MIPS_FCCR_COND3_S)
+#define MIPS_FCCR_COND4_S 4
+#define MIPS_FCCR_COND4 (_ULCAST_(1) << MIPS_FCCR_COND4_S)
+#define MIPS_FCCR_COND5_S 5
+#define MIPS_FCCR_COND5 (_ULCAST_(1) << MIPS_FCCR_COND5_S)
+#define MIPS_FCCR_COND6_S 6
+#define MIPS_FCCR_COND6 (_ULCAST_(1) << MIPS_FCCR_COND6_S)
+#define MIPS_FCCR_COND7_S 7
+#define MIPS_FCCR_COND7 (_ULCAST_(1) << MIPS_FCCR_COND7_S)
+
+/*
+ * Bits in the MIPS32/64 coprocessor 1 (FPU) enables register.
+ */
+#define MIPS_FENR_FS_S 2
+#define MIPS_FENR_FS (_ULCAST_(1) << MIPS_FENR_FS_S)
+
+/*
+ * FPU Status Register Values
+ */
+#define FPU_CSR_COND_S 23 /* $fcc0 */
+#define FPU_CSR_COND (_ULCAST_(1) << FPU_CSR_COND_S)
+
+#define FPU_CSR_FS_S 24 /* flush denormalised results to 0 */
+#define FPU_CSR_FS (_ULCAST_(1) << FPU_CSR_FS_S)
+
+#define FPU_CSR_CONDX_S 25 /* $fcc[7:1] */
+#define FPU_CSR_CONDX (_ULCAST_(127) << FPU_CSR_CONDX_S)
+#define FPU_CSR_COND1_S 25 /* $fcc1 */
+#define FPU_CSR_COND1 (_ULCAST_(1) << FPU_CSR_COND1_S)
+#define FPU_CSR_COND2_S 26 /* $fcc2 */
+#define FPU_CSR_COND2 (_ULCAST_(1) << FPU_CSR_COND2_S)
+#define FPU_CSR_COND3_S 27 /* $fcc3 */
+#define FPU_CSR_COND3 (_ULCAST_(1) << FPU_CSR_COND3_S)
+#define FPU_CSR_COND4_S 28 /* $fcc4 */
+#define FPU_CSR_COND4 (_ULCAST_(1) << FPU_CSR_COND4_S)
+#define FPU_CSR_COND5_S 29 /* $fcc5 */
+#define FPU_CSR_COND5 (_ULCAST_(1) << FPU_CSR_COND5_S)
+#define FPU_CSR_COND6_S 30 /* $fcc6 */
+#define FPU_CSR_COND6 (_ULCAST_(1) << FPU_CSR_COND6_S)
+#define FPU_CSR_COND7_S 31 /* $fcc7 */
+#define FPU_CSR_COND7 (_ULCAST_(1) << FPU_CSR_COND7_S)
+
+/*
+ * Bits 22:20 of the FPU Status Register will be read as 0,
+ * and should be written as zero.
+ */
+#define FPU_CSR_RSVD (_ULCAST_(7) << 20)
+
+#define FPU_CSR_ABS2008 (_ULCAST_(1) << 19)
+#define FPU_CSR_NAN2008 (_ULCAST_(1) << 18)
+
+/*
+ * X the exception cause indicator
+ * E the exception enable
+ * S the sticky/flag bit
+*/
+#define FPU_CSR_ALL_X 0x0003f000
+#define FPU_CSR_UNI_X 0x00020000
+#define FPU_CSR_INV_X 0x00010000
+#define FPU_CSR_DIV_X 0x00008000
+#define FPU_CSR_OVF_X 0x00004000
+#define FPU_CSR_UDF_X 0x00002000
+#define FPU_CSR_INE_X 0x00001000
+
+#define FPU_CSR_ALL_E 0x00000f80
+#define FPU_CSR_INV_E 0x00000800
+#define FPU_CSR_DIV_E 0x00000400
+#define FPU_CSR_OVF_E 0x00000200
+#define FPU_CSR_UDF_E 0x00000100
+#define FPU_CSR_INE_E 0x00000080
+
+#define FPU_CSR_ALL_S 0x0000007c
+#define FPU_CSR_INV_S 0x00000040
+#define FPU_CSR_DIV_S 0x00000020
+#define FPU_CSR_OVF_S 0x00000010
+#define FPU_CSR_UDF_S 0x00000008
+#define FPU_CSR_INE_S 0x00000004
+
+/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
+#define FPU_CSR_RM 0x00000003
+#define FPU_CSR_RN 0x0 /* nearest */
+#define FPU_CSR_RZ 0x1 /* towards zero */
+#define FPU_CSR_RU 0x2 /* towards +Infinity */
+#define FPU_CSR_RD 0x3 /* towards -Infinity */
+
+
#ifndef __ASSEMBLY__
/*
@@ -1282,6 +1341,9 @@ do { \
#define read_c0_ebase() __read_32bit_c0_register($15, 1)
#define write_c0_ebase(val) __write_32bit_c0_register($15, 1, val)
+#define read_c0_cdmmbase() __read_ulong_c0_register($15, 2)
+#define write_c0_cdmmbase(val) __write_ulong_c0_register($15, 2, val)
+
/* MIPSR3 */
#define read_c0_segctl0() __read_32bit_c0_register($5, 2)
#define write_c0_segctl0(val) __write_32bit_c0_register($5, 2, val)
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
index c281f03eb312..2a4c128277e4 100644
--- a/arch/mips/include/asm/netlogic/common.h
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -111,6 +111,25 @@ static inline int nlm_irq_to_xirq(int node, int irq)
return node * NR_IRQS / NLM_NR_NODES + irq;
}
-extern int nlm_cpu_ready[];
+#ifdef CONFIG_CPU_XLR
+#define nlm_cores_per_node() 8
+#else
+static inline int nlm_cores_per_node(void)
+{
+ return ((read_c0_prid() & PRID_IMP_MASK)
+ == PRID_IMP_NETLOGIC_XLP9XX) ? 32 : 8;
+}
#endif
+static inline int nlm_threads_per_node(void)
+{
+ return nlm_cores_per_node() * NLM_THREADS_PER_CORE;
+}
+
+static inline int nlm_hwtid_to_node(int hwtid)
+{
+ return hwtid / nlm_threads_per_node();
+}
+
+extern int nlm_cpu_ready[];
+#endif /* __ASSEMBLY__ */
#endif /* _NETLOGIC_COMMON_H_ */
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 06f1f75bfa9b..788baf399e69 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -157,7 +157,13 @@ static inline int nlm_nodeid(void)
static inline unsigned int nlm_core_id(void)
{
- return (read_c0_ebase() & 0x1c) >> 2;
+ uint32_t prid = read_c0_prid() & PRID_IMP_MASK;
+
+ if ((prid == PRID_IMP_NETLOGIC_XLP9XX) ||
+ (prid == PRID_IMP_NETLOGIC_XLP5XX))
+ return (read_c0_ebase() & 0x7c) >> 2;
+ else
+ return (read_c0_ebase() & 0x1c) >> 2;
}
static inline unsigned int nlm_thread_id(void)
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
index 6d2e58a9a542..a06b59292153 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
@@ -46,6 +46,8 @@
#define CPU_BLOCKID_FPU 9
#define CPU_BLOCKID_MAP 10
+#define IFU_BRUB_RESERVE 0x007
+
#define ICU_DEFEATURE 0x100
#define LSU_DEFEATURE 0x304
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/sys.h b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
index bc7bddf25be9..6bcf3952e556 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/sys.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
@@ -177,6 +177,9 @@
#define SYS_9XX_CLK_DEV_DIV 0x18d
#define SYS_9XX_CLK_DEV_CHG 0x18f
+#define SYS_9XX_CLK_DEV_SEL_REG 0x1a4
+#define SYS_9XX_CLK_DEV_DIV_REG 0x1a6
+
/* Registers changed on 9XX */
#define SYS_9XX_POWER_ON_RESET_CFG 0x00
#define SYS_9XX_CHIP_RESET 0x01
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
index a862b93223cc..feb6ed807ec6 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
@@ -52,6 +52,7 @@
#define PIC_2XX_XHCI_2_IRQ 25
#define PIC_9XX_XHCI_0_IRQ 23
#define PIC_9XX_XHCI_1_IRQ 24
+#define PIC_9XX_XHCI_2_IRQ 25
#define PIC_MMC_IRQ 29
#define PIC_I2C_0_IRQ 30
@@ -89,7 +90,7 @@ void xlp_wakeup_secondary_cpus(void);
void xlp_mmu_init(void);
void nlm_hal_init(void);
-int xlp_get_dram_map(int n, uint64_t *dram_map);
+int nlm_get_dram_map(int node, uint64_t *dram_map, int nentries);
struct pci_dev;
int xlp_socdev_to_node(const struct pci_dev *dev);
diff --git a/arch/mips/include/asm/octeon/cvmx-address.h b/arch/mips/include/asm/octeon/cvmx-address.h
index e2d874e681f6..e4444f8c4a61 100644
--- a/arch/mips/include/asm/octeon/cvmx-address.h
+++ b/arch/mips/include/asm/octeon/cvmx-address.h
@@ -104,6 +104,7 @@ typedef enum {
typedef union {
uint64_t u64;
+#ifdef __BIG_ENDIAN_BITFIELD
/* mapped or unmapped virtual address */
struct {
uint64_t R:2;
@@ -202,6 +203,72 @@ typedef union {
uint64_t didspace:24;
uint64_t unused:40;
} sfilldidspace;
+#else
+ struct {
+ uint64_t offset:62;
+ uint64_t R:2;
+ } sva;
+
+ struct {
+ uint64_t offset:31;
+ uint64_t zeroes:33;
+ } suseg;
+
+ struct {
+ uint64_t offset:29;
+ uint64_t sp:2;
+ uint64_t ones:33;
+ } sxkseg;
+
+ struct {
+ uint64_t pa:49;
+ uint64_t mbz:10;
+ uint64_t cca:3;
+ uint64_t R:2;
+ } sxkphys;
+
+ struct {
+ uint64_t offset:36;
+ uint64_t unaddr:4;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t mbz:15;
+ } sphys;
+
+ struct {
+ uint64_t offset:36;
+ uint64_t unaddr:4;
+ uint64_t zeroes:24;
+ } smem;
+
+ struct {
+ uint64_t offset:36;
+ uint64_t unaddr:4;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t mbz:13;
+ uint64_t mem_region:2;
+ } sio;
+
+ struct {
+ uint64_t addr:13;
+ cvmx_add_win_dec_t csrdec:2;
+ uint64_t ones:49;
+ } sscr;
+
+ struct {
+ uint64_t addr:7;
+ uint64_t type:3;
+ uint64_t unused2:3;
+ uint64_t csrdec:2;
+ uint64_t ones:49;
+ } sdma;
+
+ struct {
+ uint64_t unused:40;
+ uint64_t didspace:24;
+ } sfilldidspace;
+#endif
} cvmx_addr_t;
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index 2298199a287e..c373d95b5e2c 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -53,6 +53,7 @@
* to 0.
*/
struct cvmx_bootinfo {
+#ifdef __BIG_ENDIAN_BITFIELD
uint32_t major_version;
uint32_t minor_version;
@@ -123,6 +124,60 @@ struct cvmx_bootinfo {
*/
uint64_t fdt_addr;
#endif
+#else /* __BIG_ENDIAN */
+ /*
+ * Little-Endian: When the CPU mode is switched to
+ * little-endian, the view of the structure has some of the
+ * fields swapped.
+ */
+ uint32_t minor_version;
+ uint32_t major_version;
+
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ uint64_t desc_vaddr;
+
+ uint32_t stack_size;
+ uint32_t exception_base_addr;
+
+ uint32_t core_mask;
+ uint32_t flags;
+
+ uint32_t phy_mem_desc_addr;
+ uint32_t dram_size;
+
+ uint32_t eclock_hz;
+ uint32_t debugger_flags_base_addr;
+
+ uint32_t reserved0;
+ uint32_t dclock_hz;
+
+ uint8_t reserved3;
+ uint8_t reserved2;
+ uint16_t reserved1;
+ uint8_t board_rev_minor;
+ uint8_t board_rev_major;
+ uint16_t board_type;
+
+ char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+ uint8_t pad[5];
+
+#if (CVMX_BOOTINFO_MIN_VER >= 1)
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ uint64_t led_display_base_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 2)
+ uint32_t config_flags;
+ uint32_t dfa_ref_clock_hz;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 3)
+ uint64_t fdt_addr;
+#endif
+#endif
};
#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
index 352f1dc2508b..374562507d0b 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootmem.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -95,6 +95,7 @@ struct cvmx_bootmem_named_block_desc {
* positions for backwards compatibility.
*/
struct cvmx_bootmem_desc {
+#if defined(__BIG_ENDIAN_BITFIELD) || defined(CVMX_BUILD_FOR_LINUX_HOST)
/* spinlock to control access to list */
uint32_t lock;
/* flags for indicating various conditions */
@@ -120,7 +121,20 @@ struct cvmx_bootmem_desc {
uint32_t named_block_name_len;
/* address of named memory block descriptors */
uint64_t named_block_array_addr;
+#else /* __LITTLE_ENDIAN */
+ uint32_t flags;
+ uint32_t lock;
+ uint64_t head_addr;
+ uint32_t minor_version;
+ uint32_t major_version;
+ uint64_t app_data_addr;
+ uint64_t app_data_size;
+
+ uint32_t named_block_name_len;
+ uint32_t named_block_num_blocks;
+ uint64_t named_block_array_addr;
+#endif
};
/**
diff --git a/arch/mips/include/asm/octeon/cvmx-fau.h b/arch/mips/include/asm/octeon/cvmx-fau.h
index ef98f7fc102f..dafeae300c97 100644
--- a/arch/mips/include/asm/octeon/cvmx-fau.h
+++ b/arch/mips/include/asm/octeon/cvmx-fau.h
@@ -105,6 +105,16 @@ typedef union {
} s;
} cvmx_fau_async_tagwait_result_t;
+#ifdef __BIG_ENDIAN_BITFIELD
+#define SWIZZLE_8 0
+#define SWIZZLE_16 0
+#define SWIZZLE_32 0
+#else
+#define SWIZZLE_8 0x7
+#define SWIZZLE_16 0x6
+#define SWIZZLE_32 0x4
+#endif
+
/**
* Builds a store I/O address for writing to the FAU
*
@@ -175,6 +185,7 @@ static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
int32_t value)
{
+ reg ^= SWIZZLE_32;
return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
}
@@ -189,6 +200,7 @@ static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
int16_t value)
{
+ reg ^= SWIZZLE_16;
return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
}
@@ -201,6 +213,7 @@ static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
*/
static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
{
+ reg ^= SWIZZLE_8;
return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
}
@@ -247,6 +260,7 @@ cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
uint64_t i32;
cvmx_fau_tagwait32_t t;
} result;
+ reg ^= SWIZZLE_32;
result.i32 =
cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
@@ -270,6 +284,7 @@ cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
uint64_t i16;
cvmx_fau_tagwait16_t t;
} result;
+ reg ^= SWIZZLE_16;
result.i16 =
cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
@@ -292,6 +307,7 @@ cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
uint64_t i8;
cvmx_fau_tagwait8_t t;
} result;
+ reg ^= SWIZZLE_8;
result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
}
@@ -521,6 +537,7 @@ static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
*/
static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
{
+ reg ^= SWIZZLE_32;
cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
}
@@ -533,6 +550,7 @@ static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
*/
static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
{
+ reg ^= SWIZZLE_16;
cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
}
@@ -544,6 +562,7 @@ static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
*/
static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
{
+ reg ^= SWIZZLE_8;
cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
}
@@ -568,6 +587,7 @@ static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
*/
static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
{
+ reg ^= SWIZZLE_32;
cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
}
@@ -580,6 +600,7 @@ static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
*/
static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
{
+ reg ^= SWIZZLE_16;
cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
}
@@ -591,6 +612,7 @@ static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
*/
static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
{
+ reg ^= SWIZZLE_8;
cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
}
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa.h b/arch/mips/include/asm/octeon/cvmx-fpa.h
index aa26a2ce5a0e..c00501d0f7ae 100644
--- a/arch/mips/include/asm/octeon/cvmx-fpa.h
+++ b/arch/mips/include/asm/octeon/cvmx-fpa.h
@@ -49,6 +49,7 @@
typedef union {
uint64_t u64;
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/*
* the (64-bit word) location in scratchpad to write
* to (if len != 0)
@@ -63,6 +64,12 @@ typedef union {
* the NCB bus.
*/
uint64_t addr:40;
+#else
+ uint64_t addr:40;
+ uint64_t did:8;
+ uint64_t len:8;
+ uint64_t scraddr:8;
+#endif
} s;
} cvmx_fpa_iobdma_data_t;
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
index 11c0a8fa8eb5..ddb429210a0e 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -53,12 +53,21 @@
union cvmx_l2c_tag {
uint64_t u64;
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:28;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:32; /* Phys mem (not all bits valid) */
+#else
+ uint64_t addr:32; /* Phys mem (not all bits valid) */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t L:1; /* Line locked */
+ uint64_t D:1; /* Line dirty */
+ uint64_t V:1; /* Line valid */
+ uint64_t reserved:28;
+#endif
} s;
};
diff --git a/arch/mips/include/asm/octeon/cvmx-packet.h b/arch/mips/include/asm/octeon/cvmx-packet.h
index 38aefa1bab9d..895e93d682c2 100644
--- a/arch/mips/include/asm/octeon/cvmx-packet.h
+++ b/arch/mips/include/asm/octeon/cvmx-packet.h
@@ -39,6 +39,7 @@ union cvmx_buf_ptr {
void *ptr;
uint64_t u64;
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/* if set, invert the "free" pick of the overall
* packet. HW always sets this bit to 0 on inbound
* packet */
@@ -55,6 +56,13 @@ union cvmx_buf_ptr {
uint64_t size:16;
/* Pointer to the first byte of the data, NOT buffer */
uint64_t addr:40;
+#else
+ uint64_t addr:40;
+ uint64_t size:16;
+ uint64_t pool:3;
+ uint64_t back:4;
+ uint64_t i:1;
+#endif
} s;
};
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
index f7d2a6718849..3da59bb8ce24 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -127,6 +127,7 @@ typedef struct {
typedef union {
uint64_t u64;
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/* Must CVMX_IO_SEG */
uint64_t mem_space:2;
/* Must be zero */
@@ -151,6 +152,17 @@ typedef union {
uint64_t queue:9;
/* Must be zero */
uint64_t reserved4:3;
+#else
+ uint64_t reserved4:3;
+ uint64_t queue:9;
+ uint64_t port:9;
+ uint64_t reserved3:15;
+ uint64_t reserved2:4;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved:13;
+ uint64_t mem_space:2;
+#endif
} s;
} cvmx_pko_doorbell_address_t;
@@ -160,6 +172,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/*
* The size of the reg1 operation - could be 8, 16,
* 32, or 64 bits.
@@ -229,6 +242,24 @@ typedef union {
uint64_t segs:6;
/* Including L2, but no trailing CRC */
uint64_t total_bytes:16;
+#else
+ uint64_t total_bytes:16;
+ uint64_t segs:6;
+ uint64_t dontfree:1;
+ uint64_t ignore_i:1;
+ uint64_t ipoffp1:7;
+ uint64_t gather:1;
+ uint64_t rsp:1;
+ uint64_t wqp:1;
+ uint64_t n2:1;
+ uint64_t le:1;
+ uint64_t reg0:11;
+ uint64_t subone0:1;
+ uint64_t reg1:11;
+ uint64_t subone1:1;
+ uint64_t size0:2;
+ uint64_t size1:2;
+#endif
} s;
} cvmx_pko_command_word0_t;
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 2188e65afb86..d5565d758ddd 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -178,6 +178,7 @@ typedef enum {
typedef union {
uint64_t u64;
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/*
* Don't reschedule this entry. no_sched is used for
* CVMX_POW_TAG_OP_SWTAG_DESCH and
@@ -217,6 +218,17 @@ typedef union {
* CVMX_POW_TAG_OP_*_NSCHED
*/
uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t type:3;
+ uint64_t grp:4;
+ uint64_t qos:3;
+ uint64_t unused2:2;
+ cvmx_pow_tag_op_t op:4;
+ uint64_t index:13;
+ uint64_t unused:2;
+ uint64_t no_sched:1;
+#endif
} s;
} cvmx_pow_tag_req_t;
@@ -230,6 +242,7 @@ typedef union {
* Address for new work request loads (did<2:0> == 0)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/* Mips64 address region. Should be CVMX_IO_SEG */
uint64_t mem_region:2;
/* Must be zero */
@@ -247,12 +260,22 @@ typedef union {
uint64_t wait:1;
/* Must be zero */
uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t wait:1;
+ uint64_t reserved_4_39:36;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
} swork;
/**
* Address for loads to get POW internal status
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/* Mips64 address region. Should be CVMX_IO_SEG */
uint64_t mem_region:2;
/* Must be zero */
@@ -282,12 +305,25 @@ typedef union {
uint64_t get_wqp:1;
/* Must be zero */
uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t get_wqp:1;
+ uint64_t get_cur:1;
+ uint64_t get_rev:1;
+ uint64_t coreid:4;
+ uint64_t reserved_10_39:30;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
} sstatus;
/**
* Address for memory loads to get POW internal state
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/* Mips64 address region. Should be CVMX_IO_SEG */
uint64_t mem_region:2;
/* Must be zero */
@@ -314,12 +350,24 @@ typedef union {
uint64_t get_wqp:1;
/* Must be zero */
uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t get_wqp:1;
+ uint64_t get_des:1;
+ uint64_t index:11;
+ uint64_t reserved_16_39:24;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
} smemload;
/**
* Address for index/pointer loads
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/* Mips64 address region. Should be CVMX_IO_SEG */
uint64_t mem_region:2;
/* Must be zero */
@@ -366,6 +414,17 @@ typedef union {
uint64_t get_rmt:1;
/* Must be zero */
uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t get_rmt:1;
+ uint64_t get_des_get_tail:1;
+ uint64_t qosgrp:4;
+ uint64_t reserved_9_39:31;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
} sindexload;
/**
@@ -377,6 +436,7 @@ typedef union {
* available.)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/* Mips64 address region. Should be CVMX_IO_SEG */
uint64_t mem_region:2;
/* Must be zero */
@@ -387,6 +447,13 @@ typedef union {
uint64_t did:8;
/* Must be zero */
uint64_t reserved_0_39:40;
+#else
+ uint64_t reserved_0_39:40;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
} snull_rd;
} cvmx_pow_load_addr_t;
@@ -401,6 +468,7 @@ typedef union {
* Response to new work request loads
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/*
* Set when no new work queue entry was returned. *
* If there was de-scheduled work, the HW will
@@ -419,12 +487,18 @@ typedef union {
uint64_t reserved_40_62:23;
/* 36 in O1 -- the work queue pointer */
uint64_t addr:40;
+#else
+ uint64_t addr:40;
+ uint64_t reserved_40_62:23;
+ uint64_t no_work:1;
+#endif
} s_work;
/**
* Result for a POW Status Load (when get_cur==0 and get_wqp==0)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_62_63:2;
/* Set when there is a pending non-NULL SWTAG or
* SWTAG_FULL, and the POW entry has not left the list
@@ -476,12 +550,32 @@ typedef union {
* AND pend_desched_switch) are set.
*/
uint64_t pend_tag:32;
+#else
+ uint64_t pend_tag:32;
+ uint64_t pend_type:2;
+ uint64_t reserved_34_35:2;
+ uint64_t pend_grp:4;
+ uint64_t pend_index:11;
+ uint64_t reserved_51:1;
+ uint64_t pend_nosched_clr:1;
+ uint64_t pend_null_rd:1;
+ uint64_t pend_new_work_wait:1;
+ uint64_t pend_new_work:1;
+ uint64_t pend_nosched:1;
+ uint64_t pend_desched_switch:1;
+ uint64_t pend_desched:1;
+ uint64_t pend_switch_null:1;
+ uint64_t pend_switch_full:1;
+ uint64_t pend_switch:1;
+ uint64_t reserved_62_63:2;
+#endif
} s_sstatus0;
/**
* Result for a POW Status Load (when get_cur==0 and get_wqp==1)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_62_63:2;
/*
* Set when there is a pending non-NULL SWTAG or
@@ -529,6 +623,23 @@ typedef union {
uint64_t pend_grp:4;
/* This is the wqp when pend_nosched_clr is set. */
uint64_t pend_wqp:36;
+#else
+ uint64_t pend_wqp:36;
+ uint64_t pend_grp:4;
+ uint64_t pend_index:11;
+ uint64_t reserved_51:1;
+ uint64_t pend_nosched_clr:1;
+ uint64_t pend_null_rd:1;
+ uint64_t pend_new_work_wait:1;
+ uint64_t pend_new_work:1;
+ uint64_t pend_nosched:1;
+ uint64_t pend_desched_switch:1;
+ uint64_t pend_desched:1;
+ uint64_t pend_switch_null:1;
+ uint64_t pend_switch_full:1;
+ uint64_t pend_switch:1;
+ uint64_t reserved_62_63:2;
+#endif
} s_sstatus1;
/**
@@ -536,6 +647,7 @@ typedef union {
* get_rev==0)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_62_63:2;
/*
* Points to the next POW entry in the tag list when
@@ -573,12 +685,23 @@ typedef union {
* SWTAG_DESCHED).
*/
uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t tail:1;
+ uint64_t head:1;
+ uint64_t grp:4;
+ uint64_t index:11;
+ uint64_t link_index:11;
+ uint64_t reserved_62_63:2;
+#endif
} s_sstatus2;
/**
* Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_62_63:2;
/*
* Points to the prior POW entry in the tag list when
@@ -617,6 +740,16 @@ typedef union {
* SWTAG_DESCHED).
*/
uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t tail:1;
+ uint64_t head:1;
+ uint64_t grp:4;
+ uint64_t index:11;
+ uint64_t revlink_index:11;
+ uint64_t reserved_62_63:2;
+#endif
} s_sstatus3;
/**
@@ -624,6 +757,7 @@ typedef union {
* get_rev==0)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_62_63:2;
/*
* Points to the next POW entry in the tag list when
@@ -642,6 +776,13 @@ typedef union {
* list entered on SWTAG_FULL).
*/
uint64_t wqp:36;
+#else
+ uint64_t wqp:36;
+ uint64_t grp:4;
+ uint64_t index:11;
+ uint64_t link_index:11;
+ uint64_t reserved_62_63:2;
+#endif
} s_sstatus4;
/**
@@ -649,6 +790,7 @@ typedef union {
* get_rev==1)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_62_63:2;
/*
* Points to the prior POW entry in the tag list when
@@ -669,12 +811,20 @@ typedef union {
* list entered on SWTAG_FULL).
*/
uint64_t wqp:36;
+#else
+ uint64_t wqp:36;
+ uint64_t grp:4;
+ uint64_t index:11;
+ uint64_t revlink_index:11;
+ uint64_t reserved_62_63:2;
+#endif
} s_sstatus5;
/**
* Result For POW Memory Load (get_des == 0 and get_wqp == 0)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_51_63:13;
/*
* The next entry in the input, free, descheduled_head
@@ -695,12 +845,22 @@ typedef union {
uint64_t tag_type:2;
/* The tag of the POW entry. */
uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t tail:1;
+ uint64_t reserved_35:1;
+ uint64_t grp:4;
+ uint64_t next_index:11;
+ uint64_t reserved_51_63:13;
+#endif
} s_smemload0;
/**
* Result For POW Memory Load (get_des == 0 and get_wqp == 1)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_51_63:13;
/*
* The next entry in the input, free, descheduled_head
@@ -712,12 +872,19 @@ typedef union {
uint64_t grp:4;
/* The WQP held in the POW entry. */
uint64_t wqp:36;
+#else
+ uint64_t wqp:36;
+ uint64_t grp:4;
+ uint64_t next_index:11;
+ uint64_t reserved_51_63:13;
+#endif
} s_smemload1;
/**
* Result For POW Memory Load (get_des == 1)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_51_63:13;
/*
* The next entry in the tag list connected to the
@@ -740,12 +907,22 @@ typedef union {
* is set.
*/
uint64_t pend_tag:32;
+#else
+ uint64_t pend_tag:32;
+ uint64_t pend_type:2;
+ uint64_t pend_switch:1;
+ uint64_t nosched:1;
+ uint64_t grp:4;
+ uint64_t fwd_index:11;
+ uint64_t reserved_51_63:13;
+#endif
} s_smemload2;
/**
* Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_52_63:12;
/*
* set when there is one or more POW entries on the
@@ -791,12 +968,28 @@ typedef union {
* the input Q list selected by qosgrp.
*/
uint64_t loc_tail:11;
+#else
+ uint64_t loc_tail:11;
+ uint64_t reserved_11:1;
+ uint64_t loc_head:11;
+ uint64_t reserved_23:1;
+ uint64_t loc_one:1;
+ uint64_t loc_val:1;
+ uint64_t free_tail:11;
+ uint64_t reserved_37:1;
+ uint64_t free_head:11;
+ uint64_t reserved_49:1;
+ uint64_t free_one:1;
+ uint64_t free_val:1;
+ uint64_t reserved_52_63:12;
+#endif
} sindexload0;
/**
* Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_52_63:12;
/*
* set when there is one or more POW entries on the
@@ -843,12 +1036,28 @@ typedef union {
* head on the descheduled list selected by qosgrp.
*/
uint64_t des_tail:11;
+#else
+ uint64_t des_tail:11;
+ uint64_t reserved_11:1;
+ uint64_t des_head:11;
+ uint64_t reserved_23:1;
+ uint64_t des_one:1;
+ uint64_t des_val:1;
+ uint64_t nosched_tail:11;
+ uint64_t reserved_37:1;
+ uint64_t nosched_head:11;
+ uint64_t reserved_49:1;
+ uint64_t nosched_one:1;
+ uint64_t nosched_val:1;
+ uint64_t reserved_52_63:12;
+#endif
} sindexload1;
/**
* Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_39_63:25;
/*
* Set when this DRAM list is the current head
@@ -877,6 +1086,13 @@ typedef union {
* qosgrp.
*/
uint64_t rmt_head:36;
+#else
+ uint64_t rmt_head:36;
+ uint64_t rmt_one:1;
+ uint64_t rmt_val:1;
+ uint64_t rmt_is_head:1;
+ uint64_t reserved_39_63:25;
+#endif
} sindexload2;
/**
@@ -884,6 +1100,7 @@ typedef union {
* 1/get_des_get_tail == 1)
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_39_63:25;
/*
* set when this DRAM list is the current head
@@ -912,12 +1129,20 @@ typedef union {
* qosgrp.
*/
uint64_t rmt_tail:36;
+#else
+ uint64_t rmt_tail:36;
+ uint64_t rmt_one:1;
+ uint64_t rmt_val:1;
+ uint64_t rmt_is_head:1;
+ uint64_t reserved_39_63:25;
+#endif
} sindexload3;
/**
* Response to NULL_RD request loads
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused:62;
/* of type cvmx_pow_tag_type_t. state is one of the
* following:
@@ -928,6 +1153,10 @@ typedef union {
* - CVMX_POW_TAG_TYPE_NULL_NULL
*/
uint64_t state:2;
+#else
+ uint64_t state:2;
+ uint64_t unused:62;
+#endif
} s_null_rd;
} cvmx_pow_tag_load_resp_t;
@@ -962,6 +1191,7 @@ typedef union {
uint64_t u64;
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/* Memory region. Should be CVMX_IO_SEG in most cases */
uint64_t mem_reg:2;
uint64_t reserved_49_61:13; /* Must be zero */
@@ -971,6 +1201,14 @@ typedef union {
uint64_t reserved_36_39:4; /* Must be zero */
/* Address field. addr<2:0> must be zero */
uint64_t addr:36;
+#else
+ uint64_t addr:36;
+ uint64_t reserved_36_39:4;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_reg:2;
+#endif
} stag;
} cvmx_pow_tag_store_addr_t;
@@ -981,6 +1219,7 @@ typedef union {
uint64_t u64;
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/*
* the (64-bit word) location in scratchpad to write
* to (if len != 0)
@@ -994,6 +1233,14 @@ typedef union {
/* if set, don't return load response until work is available */
uint64_t wait:1;
uint64_t unused2:3;
+#else
+ uint64_t unused2:3;
+ uint64_t wait:1;
+ uint64_t unused:36;
+ uint64_t did:8;
+ uint64_t len:8;
+ uint64_t scraddr:8;
+#endif
} s;
} cvmx_pow_iobdma_store_t;
diff --git a/arch/mips/include/asm/octeon/cvmx-wqe.h b/arch/mips/include/asm/octeon/cvmx-wqe.h
index aa0d3d0de75c..2d6d0c7127a7 100644
--- a/arch/mips/include/asm/octeon/cvmx-wqe.h
+++ b/arch/mips/include/asm/octeon/cvmx-wqe.h
@@ -57,6 +57,7 @@ typedef union {
/* Use this struct if the hardware determines that the packet is IP */
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/* HW sets this to the number of buffers used by this packet */
uint64_t bufs:8;
/* HW sets to the number of L2 bytes prior to the IP */
@@ -166,13 +167,45 @@ typedef union {
* the slow path */
/* type is cvmx_pip_err_t */
uint64_t err_code:8;
+#else
+ uint64_t err_code:8;
+ uint64_t rcv_error:1;
+ uint64_t not_IP:1;
+ uint64_t is_mcast:1;
+ uint64_t is_bcast:1;
+ uint64_t IP_exc:1;
+ uint64_t is_frag:1;
+ uint64_t L4_error:1;
+ uint64_t software:1;
+ uint64_t is_v6:1;
+ uint64_t dec_ipsec:1;
+ uint64_t tcp_or_udp:1;
+ uint64_t dec_ipcomp:1;
+ uint64_t unassigned2:4;
+ uint64_t unassigned2a:4;
+ uint64_t pr:4;
+ uint64_t vlan_id:12;
+ uint64_t vlan_cfi:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_stacked:1;
+ uint64_t vlan_valid:1;
+ uint64_t ip_offset:8;
+ uint64_t bufs:8;
+#endif
} s;
/* use this to get at the 16 vlan bits */
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused1:16;
uint64_t vlan:16;
uint64_t unused2:32;
+#else
+ uint64_t unused2:32;
+ uint64_t vlan:16;
+ uint64_t unused1:16;
+
+#endif
} svlan;
/*
@@ -180,6 +213,7 @@ typedef union {
* the packet is ip.
*/
struct {
+#ifdef __BIG_ENDIAN_BITFIELD
/*
* HW sets this to the number of buffers used by this
* packet.
@@ -296,6 +330,27 @@ typedef union {
*/
/* type is cvmx_pip_err_t (union, so can't use directly */
uint64_t err_code:8;
+#else
+ uint64_t err_code:8;
+ uint64_t rcv_error:1;
+ uint64_t not_IP:1;
+ uint64_t is_mcast:1;
+ uint64_t is_bcast:1;
+ uint64_t is_arp:1;
+ uint64_t is_rarp:1;
+ uint64_t unassigned3:1;
+ uint64_t software:1;
+ uint64_t unassigned2:4;
+ uint64_t unassigned2a:8;
+ uint64_t pr:4;
+ uint64_t vlan_id:12;
+ uint64_t vlan_cfi:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_stacked:1;
+ uint64_t vlan_valid:1;
+ uint64_t unused:8;
+ uint64_t bufs:8;
+#endif
} snoip;
} cvmx_pip_wqe_word2;
@@ -312,6 +367,7 @@ typedef struct {
* HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
+#ifdef __BIG_ENDIAN_BITFIELD
/**
* raw chksum result generated by the HW
*/
@@ -327,12 +383,18 @@ typedef struct {
* (Only 36 bits used in Octeon 1)
*/
uint64_t next_ptr:40;
+#else
+ uint64_t next_ptr:40;
+ uint8_t unused;
+ uint16_t hw_chksum;
+#endif
/*****************************************************************
* WORD 1
* HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
+#ifdef __BIG_ENDIAN_BITFIELD
/**
* HW sets to the total number of bytes in the packet
*/
@@ -359,6 +421,15 @@ typedef struct {
* the synchronization/ordering tag
*/
uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t zero_2:1;
+ uint64_t grp:4;
+ uint64_t qos:3;
+ uint64_t ipprt:6;
+ uint64_t len:16;
+#endif
/**
* WORD 2 HW WRITE: the following 64-bits are filled in by
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 33db1c806b01..774bb45834cb 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -436,14 +436,6 @@ static inline uint64_t cvmx_get_cycle_global(void)
/***************************************************************************/
-static inline void cvmx_reset_octeon(void)
-{
- union cvmx_ciu_soft_rst ciu_soft_rst;
- ciu_soft_rst.u64 = 0;
- ciu_soft_rst.s.soft_rst = 1;
- cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
-}
-
/* Return the number of cores available in the chip */
static inline uint32_t cvmx_octeon_num_cores(void)
{
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 041596570856..de9f74ee5dd0 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -335,4 +335,6 @@ void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t);
extern void octeon_fixup_irqs(void);
+extern struct semaphore octeon_bootbus_sem;
+
#endif /* __ASM_OCTEON_OCTEON_H */
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index 64ba56a02843..1884609741a8 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -11,9 +11,6 @@
#include <linux/pci.h>
-/* Some PCI cards require delays when accessing config space. */
-#define PCI_CONFIG_SPACE_DELAY 10000
-
/*
* The physical memory base mapped by BAR1. 256MB at the end of the
* first 4GB.
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 154b70a10483..89dd7fed1a57 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -105,8 +105,6 @@ static inline void clear_user_page(void *addr, unsigned long vaddr,
flush_data_cache_page((unsigned long)addr);
}
-extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *to);
struct vm_area_struct;
extern void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 193b4c6b7541..d9692993fc83 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -35,6 +35,8 @@ struct pci_controller {
struct resource *io_resource;
unsigned long io_offset;
unsigned long io_map_base;
+ struct resource *busn_resource;
+ unsigned long busn_offset;
unsigned int index;
/* For compatibility with current (as of July 2003) pciutils
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index af2c8a351ca7..8d7a63b52ac7 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -835,6 +835,7 @@ struct bridge_controller {
struct pci_controller pc;
struct resource mem;
struct resource io;
+ struct resource busn;
bridge_t *base;
nasid_t nasid;
unsigned int widget_id;
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index a6be006b6f75..7d56686c0e62 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -105,13 +105,16 @@ static inline void pmd_clear(pmd_t *pmdp)
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
+#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
static inline pte_t
pfn_pte(unsigned long pfn, pgprot_t prot)
{
pte_t pte;
- pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
- pte.pte_low = pgprot_val(prot);
+
+ pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
+ (pgprot_val(prot) & ~_PFNX_MASK);
+ pte.pte_high = (pfn << _PFN_SHIFT) |
+ (pgprot_val(prot) & ~_PFN_MASK);
return pte;
}
@@ -166,9 +169,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
/* Swap entries must have VALID and GLOBAL bits cleared. */
-#define __swp_type(x) (((x).val >> 2) & 0x1f)
-#define __swp_offset(x) ((x).val >> 7)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
+#define __swp_type(x) (((x).val >> 4) & 0x1f)
+#define __swp_offset(x) ((x).val >> 9)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 1659bb91ae21..cf661a2fb141 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -279,14 +279,14 @@ extern void pgd_init(unsigned long page);
extern void pmd_init(unsigned long page, unsigned long pagetable);
/*
- * Non-present pages: high 24 bits are offset, next 8 bits type,
- * low 32 bits zero.
+ * Non-present pages: high 40 bits are offset, next 8 bits type,
+ * low 16 bits zero.
*/
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
-{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
+{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
-#define __swp_type(x) (((x).val >> 32) & 0xff)
-#define __swp_offset(x) ((x).val >> 40)
+#define __swp_type(x) (((x).val >> 16) & 0xff)
+#define __swp_offset(x) ((x).val >> 24)
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 91747c282bb3..18ae5ddef118 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -37,7 +37,11 @@
/*
* The following bits are implemented by the TLB hardware
*/
-#define _PAGE_GLOBAL_SHIFT 0
+#define _PAGE_NO_EXEC_SHIFT 0
+#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
+#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
+#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
+#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
@@ -49,7 +53,7 @@
/*
* The following bits are implemented in software
*/
-#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3)
+#define _PAGE_PRESENT_SHIFT (24)
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
@@ -62,6 +66,11 @@
#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
+/*
+ * Bits for extended EntryLo0/EntryLo1 registers
+ */
+#define _PFNX_MASK 0xffffff
+
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
/*
@@ -95,11 +104,7 @@
#else
/*
- * When using the RI/XI bit support, we have 13 bits of flags below
- * the physical address. The RI/XI bits are placed such that a SRL 5
- * can strip off the software bits, then a ROTR 2 can move the RI/XI
- * into bits [63:62]. This also limits physical address to 56 bits,
- * which is more than we need right now.
+ * Below are the "Normal" R4K cases
*/
/*
@@ -107,38 +112,59 @@
*/
#define _PAGE_PRESENT_SHIFT 0
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
+/* R2 or later cores check for RI/XI support to determine _PAGE_READ */
+#ifdef CONFIG_CPU_MIPSR2
+#define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+#else
+#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+#endif
#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-/* huge tlb page */
+#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+/* Huge TLB page */
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
+
+/* Only R2 or newer cores have the XI bit */
+#ifdef CONFIG_CPU_MIPSR2
+#define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1)
#else
-#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
-#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
-#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
-#endif
+#define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1)
+#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
+#endif /* CONFIG_CPU_MIPSR2 */
-/* Page cannot be executed */
-#define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT)
-#define _PAGE_NO_EXEC ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; })
+#endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
-/* Page cannot be read */
-#define _PAGE_NO_READ_SHIFT (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT)
-#define _PAGE_NO_READ ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; })
+#ifdef CONFIG_CPU_MIPSR2
+/* XI - page cannot be executed */
+#ifndef _PAGE_NO_EXEC_SHIFT
+#define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+#endif
+#define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
+
+/* RI - page cannot be read */
+#define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
+#define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
+#define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT
+#define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
+
+#else /* !CONFIG_CPU_MIPSR2 */
+#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
+#endif /* CONFIG_CPU_MIPSR2 */
+
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
@@ -150,18 +176,26 @@
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
+#ifndef _PAGE_NO_EXEC
+#define _PAGE_NO_EXEC 0
+#endif
+#ifndef _PAGE_NO_READ
+#define _PAGE_NO_READ 0
+#endif
+
#define _PAGE_SILENT_READ _PAGE_VALID
#define _PAGE_SILENT_WRITE _PAGE_DIRTY
#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
-#ifndef _PAGE_NO_READ
-#define _PAGE_NO_READ ({BUG(); 0; })
-#define _PAGE_NO_READ_SHIFT ({BUG(); 0; })
-#endif
-#ifndef _PAGE_NO_EXEC
-#define _PAGE_NO_EXEC ({BUG(); 0; })
-#endif
+/*
+ * The final layouts of the PTE bits are:
+ *
+ * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P
+ * 32-bit, R1 or earler: CCC D V G M A W R P
+ * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P
+ * 32-bit, R2 or later: CCC D V G RI/R XI M A W P
+ */
#ifndef __ASSEMBLY__
@@ -171,6 +205,7 @@
*/
static inline uint64_t pte_to_entrylo(unsigned long pte_val)
{
+#ifdef CONFIG_CPU_MIPSR2
if (cpu_has_rixi) {
int sa;
#ifdef CONFIG_32BIT
@@ -186,6 +221,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
return (pte_val >> _PAGE_GLOBAL_SHIFT) |
((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
}
+#endif
return pte_val >> _PAGE_GLOBAL_SHIFT;
}
@@ -245,7 +281,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
#endif
-#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
+#define __READABLE (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED)
#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index bef782c4a44b..819af9d057a8 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -24,17 +24,17 @@ struct mm_struct;
struct vm_area_struct;
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
_page_cachable_default)
-#define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
- (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
+ _page_cachable_default)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
_page_cachable_default)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _page_cachable_default)
#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
-#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
+#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
@@ -127,13 +127,9 @@ do { \
} \
} while(0)
-
-extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
- pte_t pteval);
-
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
-#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
+#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
static inline void set_pte(pte_t *ptep, pte_t pte)
@@ -142,18 +138,17 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
smp_wmb();
ptep->pte_low = pte.pte_low;
- if (pte.pte_low & _PAGE_GLOBAL) {
+ if (pte.pte_high & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep);
/*
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
- if (pte_none(*buddy)) {
- buddy->pte_low |= _PAGE_GLOBAL;
+ if (pte_none(*buddy))
buddy->pte_high |= _PAGE_GLOBAL;
- }
}
}
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
@@ -161,8 +156,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
htw_stop();
/* Preserve global status for the pair */
- if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
- null.pte_low = null.pte_high = _PAGE_GLOBAL;
+ if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
+ null.pte_high = _PAGE_GLOBAL;
set_pte_at(mm, addr, ptep, null);
htw_start();
@@ -192,6 +187,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
}
#endif
}
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
@@ -242,21 +238,21 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
static inline pte_t pte_wrprotect(pte_t pte)
{
- pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
+ pte.pte_low &= ~_PAGE_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
- pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
+ pte.pte_low &= ~_PAGE_MODIFIED;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
- pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
+ pte.pte_low &= ~_PAGE_ACCESSED;
pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
}
@@ -264,30 +260,24 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{
pte.pte_low |= _PAGE_WRITE;
- if (pte.pte_low & _PAGE_MODIFIED) {
- pte.pte_low |= _PAGE_SILENT_WRITE;
+ if (pte.pte_low & _PAGE_MODIFIED)
pte.pte_high |= _PAGE_SILENT_WRITE;
- }
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte.pte_low |= _PAGE_MODIFIED;
- if (pte.pte_low & _PAGE_WRITE) {
- pte.pte_low |= _PAGE_SILENT_WRITE;
+ if (pte.pte_low & _PAGE_WRITE)
pte.pte_high |= _PAGE_SILENT_WRITE;
- }
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte.pte_low |= _PAGE_ACCESSED;
- if (pte.pte_low & _PAGE_READ) {
- pte.pte_low |= _PAGE_SILENT_READ;
+ if (pte.pte_low & _PAGE_READ)
pte.pte_high |= _PAGE_SILENT_READ;
- }
return pte;
}
#else
@@ -332,13 +322,13 @@ static inline pte_t pte_mkdirty(pte_t pte)
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= _PAGE_ACCESSED;
- if (cpu_has_rixi) {
- if (!(pte_val(pte) & _PAGE_NO_READ))
- pte_val(pte) |= _PAGE_SILENT_READ;
- } else {
- if (pte_val(pte) & _PAGE_READ)
- pte_val(pte) |= _PAGE_SILENT_READ;
- }
+#ifdef CONFIG_CPU_MIPSR2
+ if (!(pte_val(pte) & _PAGE_NO_READ))
+ pte_val(pte) |= _PAGE_SILENT_READ;
+ else
+#endif
+ if (pte_val(pte) & _PAGE_READ)
+ pte_val(pte) |= _PAGE_SILENT_READ;
return pte;
}
@@ -391,10 +381,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
- pte.pte_low |= pgprot_val(newprot);
- pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
+ pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
+ pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
return pte;
}
#else
@@ -407,12 +397,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
pte_t pte);
+extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
__update_tlb(vma, address, pte);
+ __update_cache(vma, address, pte);
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
@@ -534,13 +527,13 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_ACCESSED;
- if (cpu_has_rixi) {
- if (!(pmd_val(pmd) & _PAGE_NO_READ))
- pmd_val(pmd) |= _PAGE_SILENT_READ;
- } else {
- if (pmd_val(pmd) & _PAGE_READ)
- pmd_val(pmd) |= _PAGE_SILENT_READ;
- }
+#ifdef CONFIG_CPU_MIPSR2
+ if (!(pmd_val(pmd) & _PAGE_NO_READ))
+ pmd_val(pmd) |= _PAGE_SILENT_READ;
+ else
+#endif
+ if (pmd_val(pmd) & _PAGE_READ)
+ pmd_val(pmd) |= _PAGE_SILENT_READ;
return pmd;
}
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 1b22d2da88a1..38902bf97adc 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -12,6 +12,8 @@
#ifndef _ASM_R4KCACHE_H
#define _ASM_R4KCACHE_H
+#include <linux/stringify.h>
+
#include <asm/asm.h>
#include <asm/cacheops.h>
#include <asm/compiler.h>
@@ -344,7 +346,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
" cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
" cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
" cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
- " addiu $1, $0, 0x100 \n" \
+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x010($1)\n" \
" cache %1, 0x020($1); cache %1, 0x030($1)\n" \
" cache %1, 0x040($1); cache %1, 0x050($1)\n" \
@@ -368,17 +370,17 @@ static inline void invalidate_tcache_page(unsigned long addr)
" cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
" cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
" cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
- " addiu $1, %0, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x020($1)\n" \
" cache %1, 0x040($1); cache %1, 0x060($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
" cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
- " addiu $1, $1, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x020($1)\n" \
" cache %1, 0x040($1); cache %1, 0x060($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
" cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
- " addiu $1, $1, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
" cache %1, 0x000($1); cache %1, 0x020($1)\n" \
" cache %1, 0x040($1); cache %1, 0x060($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
@@ -396,25 +398,25 @@ static inline void invalidate_tcache_page(unsigned long addr)
" .set noat\n" \
" cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
" cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
- " addiu $1, %0, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " addiu $1, %0, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " addiu $1, %0, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " addiu $1, %0, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " addiu $1, %0, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " addiu $1, %0, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " addiu $1, %0, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
" cache %1, 0x000($1); cache %1, 0x040($1)\n" \
" cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
" .set pop\n" \
@@ -429,39 +431,38 @@ static inline void invalidate_tcache_page(unsigned long addr)
" .set mips64r6\n" \
" .set noat\n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " addiu $1, %0, 0x100\n" \
+ " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
+ " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
" .set pop\n" \
: \
: "r" (base), \
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index f29c75cf83c6..1d8a2e2c75c1 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -2,11 +2,6 @@
#include <linux/unistd.h>
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
/*
* Kludge alert:
*
@@ -29,4 +24,6 @@
#endif /* CONFIG_MIPS32_O32 */
+#include <asm-generic/seccomp.h>
+
#endif /* __ASM_SECCOMP_H */
diff --git a/arch/mips/include/asm/sgi/sgi.h b/arch/mips/include/asm/sgi/sgi.h
index 645cea7c0f8e..b61557151e3f 100644
--- a/arch/mips/include/asm/sgi/sgi.h
+++ b/arch/mips/include/asm/sgi/sgi.h
@@ -22,14 +22,15 @@ enum sgi_mach {
ip17, /* R4K UP */
ip19, /* R4K MP */
ip20, /* R4K UP, Indigo */
- ip21, /* TFP MP */
- ip22, /* R4x00 UP, Indigo2 */
+ ip21, /* R8k/TFP MP */
+ ip22, /* R4x00 UP, Indy, Indigo2 */
ip25, /* R10k MP */
- ip26, /* TFP UP, Indigo2 */
- ip27, /* R10k MP, R12k MP, Origin */
- ip28, /* R10k UP, Indigo2 */
- ip30, /* Octane */
- ip32, /* O2 */
+ ip26, /* R8k/TFP UP, Indigo2 */
+ ip27, /* R10k MP, R12k MP, R14k MP, Origin 200/2k, Onyx2 */
+ ip28, /* R10k UP, Indigo2 Impact R10k */
+ ip30, /* R10k MP, R12k MP, R14k MP, Octane */
+ ip32, /* R5k UP, RM5200 UP, RM7k UP, R10k UP, R12k UP, O2 */
+ ip35, /* R14k MP, R16k MP, Origin 300/3k, Onyx3, Fuel, Tezro */
};
extern enum sgi_mach sgimach;
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index eacf865d21c2..2b25d1ba1ea0 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -45,7 +45,7 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_DUMP 0x8
#define SMP_ASK_C0COUNT 0x10
-extern volatile cpumask_t cpu_callin_map;
+extern cpumask_t cpu_callin_map;
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;
@@ -88,7 +88,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
- mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
+ mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index b4548690ade9..1fca2e0793dc 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -263,7 +263,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_unlock \n"
- " addiu %1, 1 \n"
+ " addiu %1, -1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 55ed6602204c..9c0014e87c17 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -23,7 +23,6 @@
*/
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long tp_value; /* thread pointer */
__u32 cpu; /* current CPU */
@@ -44,7 +43,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = _TIF_FIXADE, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
@@ -55,10 +53,10 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
/* How to get the thread information struct from C. */
+register struct thread_info *__current_thread_info __asm__("$28");
+
static inline struct thread_info *current_thread_info(void)
{
- register struct thread_info *__current_thread_info __asm__("$28");
-
return __current_thread_info;
}
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform
index ba91be9c21ef..c41d30080098 100644
--- a/arch/mips/jz4740/Platform
+++ b/arch/mips/jz4740/Platform
@@ -1,3 +1,4 @@
platform-$(CONFIG_MACH_JZ4740) += jz4740/
cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000
+zload-$(CONFIG_MACH_JZ4740) += 0xffffffff80600000
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c
index 5e430ce9ac7e..72b0cecbc17c 100644
--- a/arch/mips/jz4740/time.c
+++ b/arch/mips/jz4740/time.c
@@ -18,6 +18,7 @@
#include <linux/time.h>
#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
#include <asm/mach-jz4740/irq.h>
#include <asm/mach-jz4740/timer.h>
@@ -43,6 +44,11 @@ static struct clocksource jz4740_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static u64 notrace jz4740_read_sched_clock(void)
+{
+ return jz4740_timer_get_count(TIMER_CLOCKSOURCE);
+}
+
static irqreturn_t jz4740_clockevent_irq(int irq, void *devid)
{
struct clock_event_device *cd = devid;
@@ -126,6 +132,8 @@ void __init plat_time_init(void)
if (ret)
printk(KERN_ERR "Failed to register clocksource: %d\n", ret);
+ sched_clock_register(jz4740_read_sched_clock, 16, clk_rate);
+
setup_irq(JZ4740_IRQ_TCU0, &timer_irqaction);
ctrl = JZ_TIMER_CTRL_PRESCALE_16 | JZ_TIMER_CTRL_SRC_EXT;
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index e59fd7cfac9e..beabe19ff8e5 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -92,7 +92,6 @@ void output_thread_info_defines(void)
{
COMMENT("MIPS thread_info offsets.");
OFFSET(TI_TASK, thread_info, task);
- OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu);
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index c2e0f45ddf6c..c0c5e5972256 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -36,8 +36,10 @@ int __isa_exception_epc(struct pt_regs *regs)
return epc;
}
if (cpu_has_mips16) {
- if (((union mips16e_instruction)inst).ri.opcode
- == MIPS16e_jal_op)
+ union mips16e_instruction inst_mips16e;
+
+ inst_mips16e.full = inst;
+ if (inst_mips16e.ri.opcode == MIPS16e_jal_op)
epc += 4;
else
epc += 2;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 82bd2b278a24..d70c4d893219 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -37,6 +37,24 @@ void mips_set_clock_mode(enum clock_event_mode mode,
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed;
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+static inline int handle_perf_irq(int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (cp0_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (cp0_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2_r6;
@@ -50,27 +68,32 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
* the performance counter interrupt handler anyway.
*/
if (handle_perf_irq(r2))
- goto out;
+ return IRQ_HANDLED;
/*
* The same applies to performance counter interrupts. But with the
* above we now know that the reason we got here must be a timer
* interrupt. Being the paranoiacs we are we check anyway.
*/
- if (!r2 || (read_c0_cause() & (1 << 30))) {
+ if (!r2 || (read_c0_cause() & CAUSEF_TI)) {
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
+
+ return IRQ_HANDLED;
}
-out:
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
- .flags = IRQF_PERCPU | IRQF_TIMER,
+ /*
+ * IRQF_SHARED: The timer interrupt may be shared with other interrupts
+ * such as perf counter and FDC interrupts.
+ */
+ .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
.name = "timer",
};
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 2ae08462e46e..723932441ecc 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/sched_clock.h>
#include <asm/time.h>
#include <asm/txx9tmr.h>
@@ -46,6 +47,11 @@ static struct txx9_clocksource txx9_clocksource = {
},
};
+static u64 notrace txx9_read_sched_clock(void)
+{
+ return __raw_readl(&txx9_clocksource.tmrptr->trr);
+}
+
void __init txx9_clocksource_init(unsigned long baseaddr,
unsigned int imbusclk)
{
@@ -61,6 +67,9 @@ void __init txx9_clocksource_init(unsigned long baseaddr,
__raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra);
__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
txx9_clocksource.tmrptr = tmrptr;
+
+ sched_clock_register(txx9_read_sched_clock, TXX9_CLOCKSOURCE_BITS,
+ TIMER_CLK(imbusclk));
}
struct txx9_clock_event_device {
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 48dfb9de853d..e36515dcd3b2 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -20,6 +20,7 @@
#include <asm/bugs.h>
#include <asm/cpu.h>
+#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
@@ -31,11 +32,127 @@
#include <asm/spram.h>
#include <asm/uaccess.h>
+/*
+ * Get the FPU Implementation/Revision.
+ */
+static inline unsigned long cpu_get_fpu_id(void)
+{
+ unsigned long tmp, fpu_id;
+
+ tmp = read_c0_status();
+ __enable_fpu(FPU_AS_IS);
+ fpu_id = read_32bit_cp1_register(CP1_REVISION);
+ write_c0_status(tmp);
+ return fpu_id;
+}
+
+/*
+ * Check if the CPU has an external FPU.
+ */
+static inline int __cpu_has_fpu(void)
+{
+ return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE;
+}
+
+static inline unsigned long cpu_get_msa_id(void)
+{
+ unsigned long status, msa_id;
+
+ status = read_c0_status();
+ __enable_fpu(FPU_64BIT);
+ enable_msa();
+ msa_id = read_msa_ir();
+ disable_msa();
+ write_c0_status(status);
+ return msa_id;
+}
+
+/*
+ * Determine the FCSR mask for FPU hardware.
+ */
+static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
+{
+ unsigned long sr, mask, fcsr, fcsr0, fcsr1;
+
+ mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
+
+ sr = read_c0_status();
+ __enable_fpu(FPU_AS_IS);
+
+ fcsr = read_32bit_cp1_register(CP1_STATUS);
+
+ fcsr0 = fcsr & mask;
+ write_32bit_cp1_register(CP1_STATUS, fcsr0);
+ fcsr0 = read_32bit_cp1_register(CP1_STATUS);
+
+ fcsr1 = fcsr | ~mask;
+ write_32bit_cp1_register(CP1_STATUS, fcsr1);
+ fcsr1 = read_32bit_cp1_register(CP1_STATUS);
+
+ write_32bit_cp1_register(CP1_STATUS, fcsr);
+
+ write_c0_status(sr);
+
+ c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask;
+}
+
+/*
+ * Set the FIR feature flags for the FPU emulator.
+ */
+static void cpu_set_nofpu_id(struct cpuinfo_mips *c)
+{
+ u32 value;
+
+ value = 0;
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
+ value |= MIPS_FPIR_D | MIPS_FPIR_S;
+ if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
+ value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W;
+ c->fpu_id = value;
+}
+
+/* Determined FPU emulator mask to use for the boot CPU with "nofpu". */
+static unsigned int mips_nofpu_msk31;
+
+/*
+ * Set options for FPU hardware.
+ */
+static void cpu_set_fpu_opts(struct cpuinfo_mips *c)
+{
+ c->fpu_id = cpu_get_fpu_id();
+ mips_nofpu_msk31 = c->fpu_msk31;
+
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+ if (c->fpu_id & MIPS_FPIR_3D)
+ c->ases |= MIPS_ASE_MIPS3D;
+ if (c->fpu_id & MIPS_FPIR_FREP)
+ c->options |= MIPS_CPU_FRE;
+ }
+
+ cpu_set_fpu_fcsr_mask(c);
+}
+
+/*
+ * Set options for the FPU emulator.
+ */
+static void cpu_set_nofpu_opts(struct cpuinfo_mips *c)
+{
+ c->options &= ~MIPS_CPU_FPU;
+ c->fpu_msk31 = mips_nofpu_msk31;
+
+ cpu_set_nofpu_id(c);
+}
+
static int mips_fpu_disabled;
static int __init fpu_disable(char *s)
{
- cpu_data[0].options &= ~MIPS_CPU_FPU;
+ cpu_set_nofpu_opts(&boot_cpu_data);
mips_fpu_disabled = 1;
return 1;
@@ -178,41 +295,6 @@ static inline void set_elf_platform(int cpu, const char *plat)
__elf_platform = plat;
}
-/*
- * Get the FPU Implementation/Revision.
- */
-static inline unsigned long cpu_get_fpu_id(void)
-{
- unsigned long tmp, fpu_id;
-
- tmp = read_c0_status();
- __enable_fpu(FPU_AS_IS);
- fpu_id = read_32bit_cp1_register(CP1_REVISION);
- write_c0_status(tmp);
- return fpu_id;
-}
-
-/*
- * Check the CPU has an FPU the official way.
- */
-static inline int __cpu_has_fpu(void)
-{
- return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE;
-}
-
-static inline unsigned long cpu_get_msa_id(void)
-{
- unsigned long status, msa_id;
-
- status = read_c0_status();
- __enable_fpu(FPU_64BIT);
- enable_msa();
- msa_id = read_msa_ir();
- disable_msa();
- write_c0_status(status);
- return msa_id;
-}
-
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
{
#ifdef __NEED_VMBITS_PROBE
@@ -441,6 +523,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->htw_seq = 0;
c->options |= MIPS_CPU_HTW;
}
+ if (config3 & MIPS_CONF3_CDMM)
+ c->options |= MIPS_CPU_CDMM;
return config3 & MIPS_CONF_M;
}
@@ -516,6 +600,10 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_MAAR;
if (config5 & MIPS_CONF5_LLB)
c->options |= MIPS_CPU_RW_LLB;
+#ifdef CONFIG_XPA
+ if (config5 & MIPS_CONF5_MVH)
+ c->options |= MIPS_CPU_XPA;
+#endif
return config5 & MIPS_CONF_M;
}
@@ -575,6 +663,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R2000:
c->cputype = CPU_R2000;
__cpu_name[cpu] = "R2000";
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
@@ -594,6 +683,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_R3000;
__cpu_name[cpu] = "R3000";
}
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
@@ -642,6 +732,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
}
set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_VCE |
MIPS_CPU_LLSC;
@@ -649,6 +740,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
break;
case PRID_IMP_VR41XX:
set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS;
c->tlbsize = 32;
switch (c->processor_id & 0xf0) {
@@ -690,6 +782,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_R4300;
__cpu_name[cpu] = "R4300";
set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 32;
@@ -698,6 +791,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_R4600;
__cpu_name[cpu] = "R4600";
set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -713,11 +807,13 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_R4650;
__cpu_name[cpu] = "R4650";
set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#endif
case PRID_IMP_TX39:
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
@@ -743,6 +839,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_R4700;
__cpu_name[cpu] = "R4700";
set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -751,6 +848,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_TX49XX;
__cpu_name[cpu] = "R49XX";
set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_LLSC;
if (!(c->processor_id & 0x08))
c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
@@ -792,6 +890,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_R6000;
__cpu_name[cpu] = "R6000";
set_isa(c, MIPS_CPU_ISA_II);
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
MIPS_CPU_LLSC;
c->tlbsize = 32;
@@ -800,6 +899,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_R6000A;
__cpu_name[cpu] = "R6000A";
set_isa(c, MIPS_CPU_ISA_II);
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
MIPS_CPU_LLSC;
c->tlbsize = 32;
@@ -850,8 +950,13 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->tlbsize = 64;
break;
case PRID_IMP_R14000:
- c->cputype = CPU_R14000;
- __cpu_name[cpu] = "R14000";
+ if (((c->processor_id >> 4) & 0x0f) > 2) {
+ c->cputype = CPU_R16000;
+ __cpu_name[cpu] = "R16000";
+ } else {
+ c->cputype = CPU_R14000;
+ __cpu_name[cpu] = "R14000";
+ }
set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
@@ -866,12 +971,14 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2e");
set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
break;
case PRID_REV_LOONGSON2F:
c->cputype = CPU_LOONGSON2;
__cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2f");
set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
break;
case PRID_REV_LOONGSON3A:
c->cputype = CPU_LOONGSON3;
@@ -1308,6 +1415,9 @@ void cpu_probe(void)
c->cputype = CPU_UNKNOWN;
c->writecombine = _CACHE_UNCACHED;
+ c->fpu_csr31 = FPU_CSR_RN;
+ c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+
c->processor_id = read_c0_prid();
switch (c->processor_id & PRID_COMP_MASK) {
case PRID_COMP_LEGACY:
@@ -1364,16 +1474,10 @@ void cpu_probe(void)
~(1 << MIPS_PWCTL_PWEN_SHIFT));
}
- if (c->options & MIPS_CPU_FPU) {
- c->fpu_id = cpu_get_fpu_id();
-
- if (c->isa_level & cpu_has_mips_r) {
- if (c->fpu_id & MIPS_FPIR_3D)
- c->ases |= MIPS_ASE_MIPS3D;
- if (c->fpu_id & MIPS_FPIR_FREP)
- c->options |= MIPS_CPU_FRE;
- }
- }
+ if (c->options & MIPS_CPU_FPU)
+ cpu_set_fpu_opts(c);
+ else
+ cpu_set_nofpu_opts(c);
if (cpu_has_mips_r2_r6) {
c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index d21264681e97..d434d5d5ae6e 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -25,9 +25,9 @@ static void crash_shutdown_secondary(void *ignore)
return;
local_irq_disable();
- if (!cpu_isset(cpu, cpus_in_crash))
+ if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
- cpu_set(cpu, cpus_in_crash);
+ cpumask_set_cpu(cpu, &cpus_in_crash);
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
@@ -50,7 +50,7 @@ static void crash_kexec_prepare_cpus(void)
*/
pr_emerg("Sending IPI to other cpus...\n");
msecs = 10000;
- while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
cpu_relax();
mdelay(1);
}
@@ -66,5 +66,5 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus();
- cpu_set(crashing_cpu, cpus_in_crash);
+ cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
}
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
index 468f3eba4132..7f65b53d1b24 100644
--- a/arch/mips/kernel/csrc-bcm1480.c
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -10,12 +10,9 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
#include <asm/addrspace.h>
#include <asm/io.h>
@@ -41,6 +38,11 @@ struct clocksource bcm1480_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static u64 notrace sb1480_read_sched_clock(void)
+{
+ return __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
+}
+
void __init sb1480_clocksource_init(void)
{
struct clocksource *cs = &bcm1480_clocksource;
@@ -50,4 +52,6 @@ void __init sb1480_clocksource_init(void)
plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
clocksource_register_hz(cs, zbbus);
+
+ sched_clock_register(sb1480_read_sched_clock, 64, zbbus);
}
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
index 6cbbf6e106b9..722f5589cd1d 100644
--- a/arch/mips/kernel/csrc-ioasic.c
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -12,12 +12,9 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
#include <linux/init.h>
#include <asm/ds1287.h>
@@ -37,6 +34,11 @@ static struct clocksource clocksource_dec = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static u64 notrace dec_ioasic_read_sched_clock(void)
+{
+ return ioasic_read(IO_REG_FCTR);
+}
+
int __init dec_ioasic_clocksource_init(void)
{
unsigned int freq;
@@ -65,5 +67,8 @@ int __init dec_ioasic_clocksource_init(void)
clocksource_dec.rating = 200 + freq / 10000000;
clocksource_register_hz(&clocksource_dec, freq);
+
+ sched_clock_register(dec_ioasic_read_sched_clock, 32, freq);
+
return 0;
}
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index decd1fa38d55..e5ed7ada1433 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -7,6 +7,7 @@
*/
#include <linux/clocksource.h>
#include <linux/init.h>
+#include <linux/sched_clock.h>
#include <asm/time.h>
@@ -22,6 +23,11 @@ static struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static u64 notrace r4k_read_sched_clock(void)
+{
+ return read_c0_count();
+}
+
int __init init_r4k_clocksource(void)
{
if (!cpu_has_counter || !mips_hpt_frequency)
@@ -32,5 +38,7 @@ int __init init_r4k_clocksource(void)
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+ sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
+
return 0;
}
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
index 6ecb77d82063..d915652b4d56 100644
--- a/arch/mips/kernel/csrc-sb1250.c
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -10,12 +10,9 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
#include <asm/addrspace.h>
#include <asm/io.h>
@@ -33,15 +30,22 @@
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
* again.
*/
-static cycle_t sb1250_hpt_read(struct clocksource *cs)
+static inline cycle_t sb1250_hpt_get_cycles(void)
{
unsigned int count;
+ void __iomem *addr;
- count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT))));
+ addr = IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT));
+ count = G_SCD_TIMER_CNT(__raw_readq(addr));
return SB1250_HPT_VALUE - count;
}
+static cycle_t sb1250_hpt_read(struct clocksource *cs)
+{
+ return sb1250_hpt_get_cycles();
+}
+
struct clocksource bcm1250_clocksource = {
.name = "bcm1250-counter-3",
.rating = 200,
@@ -50,6 +54,11 @@ struct clocksource bcm1250_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static u64 notrace sb1250_read_sched_clock(void)
+{
+ return sb1250_hpt_get_cycles();
+}
+
void __init sb1250_clocksource_init(void)
{
struct clocksource *cs = &bcm1250_clocksource;
@@ -66,4 +75,6 @@ void __init sb1250_clocksource_init(void)
R_SCD_TIMER_CFG)));
clocksource_register_hz(cs, V_SCD_TIMER_FREQ);
+
+ sched_clock_register(sb1250_read_sched_clock, 23, V_SCD_TIMER_FREQ);
}
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index d2c09f6475c5..4a4d9e067c89 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
/* Lets see if this is an O32 ELF */
if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
- /* FR = 1 for N32 */
- if (ehdr32->e_flags & EF_MIPS_ABI2)
- state->overall_fp_mode = FP_FR1;
- else
- /* Set a good default FPU mode for O32 */
- state->overall_fp_mode = cpu_has_mips_r6 ?
- FP_FRE : FP_FR0;
-
if (ehdr32->e_flags & EF_MIPS_FP64) {
/*
* Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
@@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
(char *)&abiflags,
sizeof(abiflags));
} else {
- /* FR=1 is really the only option for 64-bit */
- state->overall_fp_mode = FP_FR1;
-
if (phdr64->p_type != PT_MIPS_ABIFLAGS)
return 0;
if (phdr64->p_filesz < sizeof(abiflags))
@@ -131,30 +120,21 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
return 0;
}
-static inline unsigned get_fp_abi(int in_abi)
-{
- /* If the ABI requirement is provided, simply return that */
- if (in_abi != MIPS_ABI_FP_UNKNOWN)
- return in_abi;
-
- /* Unknown ABI */
- return MIPS_ABI_FP_UNKNOWN;
-}
-
int arch_check_elf(void *_ehdr, bool has_interpreter,
struct arch_elf_state *state)
{
struct elf32_hdr *ehdr = _ehdr;
struct mode_req prog_req, interp_req;
int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
+ bool is_mips64;
if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
- fp_abi = get_fp_abi(state->fp_abi);
+ fp_abi = state->fp_abi;
if (has_interpreter) {
- interp_fp_abi = get_fp_abi(state->interp_fp_abi);
+ interp_fp_abi = state->interp_fp_abi;
abi0 = min(fp_abi, interp_fp_abi);
abi1 = max(fp_abi, interp_fp_abi);
@@ -162,10 +142,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
abi0 = abi1 = fp_abi;
}
- /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
- max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
- (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
- MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
+ is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
+ (ehdr->e_flags & EF_MIPS_ABI2);
+
+ if (is_mips64) {
+ /* MIPS64 code always uses FR=1, thus the default is easy */
+ state->overall_fp_mode = FP_FR1;
+
+ /* Disallow access to the various FPXX & FP64 ABIs */
+ max_abi = MIPS_ABI_FP_SOFT;
+ } else {
+ /* Default to a mode capable of running code expecting FR=0 */
+ state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
+
+ /* Allow all ABIs we know about */
+ max_abi = MIPS_ABI_FP_64A;
+ }
if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
(abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index af41ba6db960..7791840cf22c 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -10,6 +10,7 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
+#include <asm/compiler.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
@@ -185,7 +186,7 @@ syscall_exit_work:
* For C code use the inline version named instruction_hazard().
*/
LEAF(mips_ihb)
- .set mips32r2
+ .set MIPS_ISA_LEVEL_RAW
jr.hb ra
nop
END(mips_ihb)
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 368c88b7eb6c..e4f62b7875d2 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -176,6 +176,17 @@ void __init check_wait(void)
cpu_wait = rm7k_wait_irqoff;
break;
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ /*
+ * Incoming Fast Debug Channel (FDC) data during a wait
+ * instruction causes the wait never to resume, even if an
+ * interrupt is received. Avoid using wait at all if FDC data is
+ * likely to be received.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
+ break;
+ /* fall through */
case CPU_M14KC:
case CPU_M14KEC:
case CPU_24K:
@@ -183,8 +194,6 @@ void __init check_wait(void)
case CPU_1004K:
case CPU_1074K:
case CPU_INTERAPTIV:
- case CPU_PROAPTIV:
- case CPU_P5600:
case CPU_M5150:
case CPU_QEMU_GENERIC:
cpu_wait = r4k_wait;
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 362bb3707e62..3e4491aa6d6b 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -114,8 +114,8 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
/* Compute new global allowed CPU set if necessary */
ti = task_thread_info(p);
if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
- cpus_intersects(*new_mask, mt_fpu_cpumask)) {
- cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
+ cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
+ cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
retval = set_cpus_allowed_ptr(p, effective_mask);
} else {
cpumask_copy(effective_mask, new_mask);
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 64d17e41093b..f2977f00911b 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -187,7 +187,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
}
/**
- * movt_func - Emulate a MOVT instruction
+ * movf_func - Emulate a MOVF instruction
* @regs: Process register set
* @ir: Instruction
*
@@ -200,9 +200,12 @@ static int movf_func(struct pt_regs *regs, u32 ir)
csr = current->thread.fpu.fcr31;
cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
if (((csr & cond) == 0) && MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
MIPS_R2_STATS(movs);
+
return 0;
}
@@ -895,8 +898,9 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
* @regs: Process register set
* @inst: Instruction to decode and emulate
+ * @fcr31: Floating Point Control and Status Register returned
*/
-int mipsr2_decoder(struct pt_regs *regs, u32 inst)
+int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
{
int err = 0;
unsigned long vaddr;
@@ -1165,6 +1169,13 @@ fpu_emul:
err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
+ *fcr31 = current->thread.fpu.fcr31;
+
+ /*
+ * We can't allow the emulated instruction to leave any of
+ * the cause bits set in $fcr31.
+ */
+ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/*
* this is a tricky issue - lose_fpu() uses LL/SC atomics
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 9466184d0039..cc1b6fadf089 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -558,8 +558,10 @@ static int mipspmu_get_irq(void)
if (mipspmu.irq >= 0) {
/* Request my own irq handler. */
err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
- IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD,
- "mips_perf_pmu", NULL);
+ IRQF_PERCPU | IRQF_NOBALANCING |
+ IRQF_NO_THREAD | IRQF_NO_SUSPEND |
+ IRQF_SHARED,
+ "mips_perf_pmu", &mipspmu);
if (err) {
pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
mipspmu.irq);
@@ -582,7 +584,7 @@ static int mipspmu_get_irq(void)
static void mipspmu_free_irq(void)
{
if (mipspmu.irq >= 0)
- free_irq(mipspmu.irq, NULL);
+ free_irq(mipspmu.irq, &mipspmu);
else if (cp0_perfcount_irq < 0)
perf_irq = save_perf_irq;
}
@@ -775,6 +777,7 @@ static int n_counters(void)
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
counters = 4;
break;
@@ -822,6 +825,13 @@ static const struct mips_perf_event mipsxxcore_event_map2
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
};
+static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
+};
+
static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
@@ -1005,6 +1015,61 @@ static const struct mips_perf_event mipsxxcore_cache_map2
},
};
+static const struct mips_perf_event loongson3_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD },
+ },
+},
+};
+
/* BMIPS5000 */
static const struct mips_perf_event bmips5000_cache_map
[PERF_COUNT_HW_CACHE_MAX]
@@ -1539,6 +1604,10 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+ break;
+ case CPU_LOONGSON3:
+ raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+ break;
}
raw_event.event_id = base_id;
@@ -1615,8 +1684,7 @@ init_hw_perf_events(void)
if (get_c0_perfcount_int)
irq = get_c0_perfcount_int();
- else if ((cp0_perfcount_irq >= 0) &&
- (cp0_compare_irq != cp0_perfcount_irq))
+ else if (cp0_perfcount_irq >= 0)
irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
else
irq = -1;
@@ -1669,6 +1737,11 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
+ case CPU_LOONGSON3:
+ mipspmu.name = "mips/loongson3";
+ mipspmu.general_event_map = &loongson3_event_map;
+ mipspmu.cache_event_map = &loongson3_cache_map;
+ break;
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 130af7d26a9c..298b2b773d12 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -120,6 +120,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_msa) seq_printf(m, "%s", " msa");
if (cpu_has_eva) seq_printf(m, "%s", " eva");
if (cpu_has_htw) seq_printf(m, "%s", " htw");
+ if (cpu_has_xpa) seq_printf(m, "%s", " xpa");
seq_printf(m, "\n");
if (cpu_has_mmips) {
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index bf85cc180d91..f2975d4d1e44 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -49,7 +49,7 @@
void arch_cpu_idle_dead(void)
{
/* What the heck is this check doing ? */
- if (!cpu_isset(smp_processor_id(), cpu_callin_map))
+ if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
play_dead();
}
#endif
@@ -107,8 +107,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
+/*
+ * Copy architecture-specific thread state
+ */
int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
+ unsigned long kthread_arg, struct task_struct *p)
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
@@ -123,11 +126,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childksp = (unsigned long) childregs;
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
if (unlikely(p->flags & PF_KTHREAD)) {
+ /* kernel thread */
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
ti->addr_limit = KERNEL_DS;
p->thread.reg16 = usp; /* fn */
- p->thread.reg17 = arg;
+ p->thread.reg17 = kthread_arg;
p->thread.reg29 = childksp;
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
@@ -139,6 +143,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs->cp0_status = status;
return 0;
}
+
+ /* user thread */
*childregs = *regs;
childregs->regs[7] = 0; /* Clear error flag */
childregs->regs[2] = 0; /* Child gets zero as return value */
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 452d4350ce42..e303cb1ef2f4 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -64,7 +64,10 @@ int __init __dt_register_buses(const char *bus0, const char *bus1)
panic("device tree not present");
strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible));
- strlcpy(of_ids[1].compatible, bus1, sizeof(of_ids[1].compatible));
+ if (bus1) {
+ strlcpy(of_ids[1].compatible, bus1,
+ sizeof(of_ids[1].compatible));
+ }
if (of_platform_populate(NULL, of_ids, NULL, NULL))
panic("failed to populate DT");
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 7da6e324dd35..e933a309f2ea 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -32,6 +32,7 @@
#include <asm/byteorder.h>
#include <asm/cpu.h>
+#include <asm/cpu-info.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
@@ -157,6 +158,9 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
union fpureg *fregs;
u64 fpr_val;
+ u32 fcr31;
+ u32 value;
+ u32 mask;
int i;
if (!access_ok(VERIFY_READ, data, 33 * 8))
@@ -170,8 +174,10 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
set_fpr64(&fregs[i], 0, fpr_val);
}
- __get_user(child->thread.fpu.fcr31, data + 64);
- child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ __get_user(value, data + 64);
+ fcr31 = child->thread.fpu.fcr31;
+ mask = boot_cpu_data.fpu_msk31;
+ child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
/* FIR may not be written. */
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 435ea652f5fa..5087a4b72e6b 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -115,11 +115,9 @@ LEAF(_restore_fp)
* the property that no matter whether considered as single or as double
* precision represents signaling NANS.
*
- * We initialize fcr31 to rounding to nearest, no exceptions.
+ * The value to initialize fcr31 to comes in $a0.
*/
-#define FPU_DEFAULT 0x00000000
-
.set push
SET_HARDFLOAT
@@ -129,8 +127,7 @@ LEAF(_init_fpu)
or t0, t1
mtc0 t0, CP0_STATUS
- li t1, FPU_DEFAULT
- ctc1 t1, fcr31
+ ctc1 a0, fcr31
li t0, -1
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 3b1a36f13a7d..04cbbde3521b 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -165,11 +165,9 @@ LEAF(_init_msa_upper)
* the property that no matter whether considered as single or as double
* precision represents signaling NANS.
*
- * We initialize fcr31 to rounding to nearest, no exceptions.
+ * The value to initialize fcr31 to comes in $a0.
*/
-#define FPU_DEFAULT 0x00000000
-
.set push
SET_HARDFLOAT
@@ -180,8 +178,7 @@ LEAF(_init_fpu)
mtc0 t0, CP0_STATUS
enable_fpu_hazard
- li t1, FPU_DEFAULT
- ctc1 t1, fcr31
+ ctc1 a0, fcr31
li t1, -1 # SNaN
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
index 07fc5244aed4..7c746d3458e7 100644
--- a/arch/mips/kernel/reset.c
+++ b/arch/mips/kernel/reset.c
@@ -11,6 +11,7 @@
#include <linux/pm.h>
#include <linux/types.h>
#include <linux/reboot.h>
+#include <linux/delay.h>
#include <asm/reboot.h>
@@ -29,16 +30,40 @@ void machine_restart(char *command)
{
if (_machine_restart)
_machine_restart(command);
+
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ do_kernel_restart(command);
+ mdelay(1000);
+ pr_emerg("Reboot failed -- System halted\n");
+ local_irq_disable();
+ while (1);
}
void machine_halt(void)
{
if (_machine_halt)
_machine_halt();
+
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ local_irq_disable();
+ while (1);
}
void machine_power_off(void)
{
if (pm_power_off)
pm_power_off();
+
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ local_irq_disable();
+ while (1);
}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 058929041368..be73c491182b 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -31,6 +31,7 @@
#include <asm/bootinfo.h>
#include <asm/bugs.h>
#include <asm/cache.h>
+#include <asm/cdmm.h>
#include <asm/cpu.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -763,6 +764,7 @@ void __init setup_arch(char **cmdline_p)
cpu_probe();
prom_init();
+ setup_early_fdc_console();
#ifdef CONFIG_EARLY_PRINTK
setup_early_printk();
#endif
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index b8bd9340c9c7..fd528d7ea278 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -362,7 +362,7 @@ static int bmips_cpu_disable(void)
pr_info("SMP: CPU%d is offline\n", cpu);
set_cpu_online(cpu, false);
- cpu_clear(cpu, cpu_callin_map);
+ cpumask_clear_cpu(cpu, &cpu_callin_map);
clear_c0_status(IE_IRQ5);
local_flush_tlb_all();
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index e36a859af666..d5e0f949dc48 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -66,7 +66,7 @@ static void cmp_smp_finish(void)
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
- cpu_set(smp_processor_id(), mt_fpu_cpumask);
+ cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
@@ -110,7 +110,7 @@ void __init cmp_smp_setup(void)
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
- cpu_set(0, mt_fpu_cpumask);
+ cpumask_set_cpu(0, &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
for (i = 1; i < NR_CPUS; i++) {
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index bed7590e475f..4251d390b5b6 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -88,6 +88,12 @@ static void __init cps_smp_setup(void)
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(0, &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
}
static void __init cps_prepare_cpus(unsigned int max_cpus)
@@ -284,7 +290,7 @@ static void cps_smp_finish(void)
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
- cpu_set(smp_processor_id(), mt_fpu_cpumask);
+ cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
@@ -307,7 +313,7 @@ static int cps_cpu_disable(void)
atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic();
set_cpu_online(cpu, false);
- cpu_clear(cpu, cpu_callin_map);
+ cpumask_clear_cpu(cpu, &cpu_callin_map);
return 0;
}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 17ea705f6c40..86311a164ef1 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -178,7 +178,7 @@ static void vsmp_smp_finish(void)
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
- cpu_set(smp_processor_id(), mt_fpu_cpumask);
+ cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
@@ -239,7 +239,7 @@ static void __init vsmp_smp_setup(void)
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
- cpu_set(0, mt_fpu_cpumask);
+ cpumask_set_cpu(0, &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (!cpu_has_mipsmt)
return;
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 1c0d8c50b7e1..faa46ebd9dda 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -43,7 +43,7 @@
#include <asm/time.h>
#include <asm/setup.h>
-volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
+cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
EXPORT_SYMBOL(__cpu_number_map);
@@ -75,30 +75,30 @@ static inline void set_cpu_sibling_map(int cpu)
{
int i;
- cpu_set(cpu, cpu_sibling_setup_map);
+ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
- for_each_cpu_mask(i, cpu_sibling_setup_map) {
+ for_each_cpu(i, &cpu_sibling_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package &&
cpu_data[cpu].core == cpu_data[i].core) {
- cpu_set(i, cpu_sibling_map[cpu]);
- cpu_set(cpu, cpu_sibling_map[i]);
+ cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
}
}
} else
- cpu_set(cpu, cpu_sibling_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
}
static inline void set_cpu_core_map(int cpu)
{
int i;
- cpu_set(cpu, cpu_core_setup_map);
+ cpumask_set_cpu(cpu, &cpu_core_setup_map);
- for_each_cpu_mask(i, cpu_core_setup_map) {
+ for_each_cpu(i, &cpu_core_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package) {
- cpu_set(i, cpu_core_map[cpu]);
- cpu_set(cpu, cpu_core_map[i]);
+ cpumask_set_cpu(i, &cpu_core_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_core_map[i]);
}
}
}
@@ -138,7 +138,7 @@ asmlinkage void start_secondary(void)
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
- cpu_set(cpu, cpu_coherent_mask);
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
@@ -146,7 +146,7 @@ asmlinkage void start_secondary(void)
set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu);
- cpu_set(cpu, cpu_callin_map);
+ cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
@@ -176,10 +176,8 @@ static void stop_this_cpu(void *dummy)
* Remove this CPU:
*/
set_cpu_online(smp_processor_id(), false);
- for (;;) {
- if (cpu_wait)
- (*cpu_wait)(); /* Wait if available. */
- }
+ local_irq_disable();
+ while (1);
}
void smp_send_stop(void)
@@ -210,7 +208,7 @@ void smp_prepare_boot_cpu(void)
{
set_cpu_possible(0, true);
set_cpu_online(0, true);
- cpu_set(0, cpu_callin_map);
+ cpumask_set_cpu(0, &cpu_callin_map);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
@@ -220,8 +218,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
/*
* Trust is futile. We should really have timeouts ...
*/
- while (!cpu_isset(cpu, cpu_callin_map))
+ while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
udelay(100);
+ schedule();
+ }
synchronise_count_master(cpu);
return 0;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 5b4d711f878d..d2d1c1933bc9 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -12,6 +12,7 @@
* Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2014, Imagination Technologies Ltd.
*/
+#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/context_tracking.h>
@@ -268,7 +269,6 @@ static void __show_regs(const struct pt_regs *regs)
*/
printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
(void *) regs->cp0_epc);
- printk(" %s\n", print_tainted());
printk("ra : %0*lx %pS\n", field, regs->regs[31],
(void *) regs->regs[31]);
@@ -699,36 +699,60 @@ asmlinkage void do_ov(struct pt_regs *regs)
exception_exit(prev_state);
}
-int process_fpemu_return(int sig, void __user *fault_addr)
+int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
- /*
- * We can't allow the emulated instruction to leave any of the cause
- * bits set in FCSR. If they were then the kernel would take an FP
- * exception when restoring FP context.
- */
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ struct siginfo si = { 0 };
+
+ switch (sig) {
+ case 0:
+ return 0;
- if (sig == SIGSEGV || sig == SIGBUS) {
- struct siginfo si = {0};
+ case SIGFPE:
si.si_addr = fault_addr;
si.si_signo = sig;
- if (sig == SIGSEGV) {
- down_read(&current->mm->mmap_sem);
- if (find_vma(current->mm, (unsigned long)fault_addr))
- si.si_code = SEGV_ACCERR;
- else
- si.si_code = SEGV_MAPERR;
- up_read(&current->mm->mmap_sem);
- } else {
- si.si_code = BUS_ADRERR;
- }
+ /*
+ * Inexact can happen together with Overflow or Underflow.
+ * Respect the mask to deliver the correct exception.
+ */
+ fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
+ if (fcr31 & FPU_CSR_INV_X)
+ si.si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ si.si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ si.si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ si.si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ si.si_code = FPE_FLTRES;
+ else
+ si.si_code = __SI_FAULT;
+ force_sig_info(sig, &si, current);
+ return 1;
+
+ case SIGBUS:
+ si.si_addr = fault_addr;
+ si.si_signo = sig;
+ si.si_code = BUS_ADRERR;
+ force_sig_info(sig, &si, current);
+ return 1;
+
+ case SIGSEGV:
+ si.si_addr = fault_addr;
+ si.si_signo = sig;
+ down_read(&current->mm->mmap_sem);
+ if (find_vma(current->mm, (unsigned long)fault_addr))
+ si.si_code = SEGV_ACCERR;
+ else
+ si.si_code = SEGV_MAPERR;
+ up_read(&current->mm->mmap_sem);
force_sig_info(sig, &si, current);
return 1;
- } else if (sig) {
+
+ default:
force_sig(sig, current);
return 1;
- } else {
- return 0;
}
}
@@ -736,7 +760,8 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
unsigned long old_epc, unsigned long old_ra)
{
union mips_instruction inst = { .word = opcode };
- void __user *fault_addr = NULL;
+ void __user *fault_addr;
+ unsigned long fcr31;
int sig;
/* If it's obviously not an FP instruction, skip it */
@@ -766,13 +791,20 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
+ fcr31 = current->thread.fpu.fcr31;
- /* If something went wrong, signal */
- process_fpemu_return(sig, fault_addr);
+ /*
+ * We can't allow the emulated instruction to leave any of
+ * the cause bits set in $fcr31.
+ */
+ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
own_fpu(1);
+ /* Send a signal if required. */
+ process_fpemu_return(sig, fault_addr, fcr31);
+
return 0;
}
@@ -782,7 +814,8 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
enum ctx_state prev_state;
- siginfo_t info = {0};
+ void __user *fault_addr;
+ int sig;
prev_state = exception_enter();
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
@@ -796,9 +829,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
- int sig;
- void __user *fault_addr = NULL;
-
/*
* Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it...
@@ -815,30 +845,23 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
+ fcr31 = current->thread.fpu.fcr31;
- /* If something went wrong, signal */
- process_fpemu_return(sig, fault_addr);
+ /*
+ * We can't allow the emulated instruction to leave any of
+ * the cause bits set in $fcr31.
+ */
+ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
+ } else {
+ sig = SIGFPE;
+ fault_addr = (void __user *) regs->cp0_epc;
+ }
- goto out;
- } else if (fcr31 & FPU_CSR_INV_X)
- info.si_code = FPE_FLTINV;
- else if (fcr31 & FPU_CSR_DIV_X)
- info.si_code = FPE_FLTDIV;
- else if (fcr31 & FPU_CSR_OVF_X)
- info.si_code = FPE_FLTOVF;
- else if (fcr31 & FPU_CSR_UDF_X)
- info.si_code = FPE_FLTUND;
- else if (fcr31 & FPU_CSR_INE_X)
- info.si_code = FPE_FLTRES;
- else
- info.si_code = __SI_FAULT;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void __user *) regs->cp0_epc;
- force_sig_info(SIGFPE, &info, current);
+ /* Send a signal if required. */
+ process_fpemu_return(sig, fault_addr, fcr31);
out:
exception_exit(prev_state);
@@ -885,9 +908,9 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
break;
case BRK_MEMU:
/*
- * Address errors may be deliberately induced by the FPU
- * emulator to retake control of the CPU after executing the
- * instruction in the delay slot of an emulated branch.
+ * This breakpoint code is used by the FPU emulator to retake
+ * control of the CPU after executing the instruction from the
+ * delay slot of an emulated branch.
*
* Terminate if exception was recognized as a delay slot return
* otherwise handle as normal.
@@ -907,10 +930,9 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
asmlinkage void do_bp(struct pt_regs *regs)
{
+ unsigned long epc = msk_isa16_mode(exception_epc(regs));
unsigned int opcode, bcode;
enum ctx_state prev_state;
- unsigned long epc;
- u16 instr[2];
mm_segment_t seg;
seg = get_fs();
@@ -919,26 +941,28 @@ asmlinkage void do_bp(struct pt_regs *regs)
prev_state = exception_enter();
if (get_isa16_mode(regs->cp0_epc)) {
- /* Calculate EPC. */
- epc = exception_epc(regs);
- if (cpu_has_mmips) {
- if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
- (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
- goto out_sigsegv;
- opcode = (instr[0] << 16) | instr[1];
- } else {
+ u16 instr[2];
+
+ if (__get_user(instr[0], (u16 __user *)epc))
+ goto out_sigsegv;
+
+ if (!cpu_has_mmips) {
/* MIPS16e mode */
- if (__get_user(instr[0],
- (u16 __user *)msk_isa16_mode(epc)))
+ bcode = (instr[0] >> 5) & 0x3f;
+ } else if (mm_insn_16bit(instr[0])) {
+ /* 16-bit microMIPS BREAK */
+ bcode = instr[0] & 0xf;
+ } else {
+ /* 32-bit microMIPS BREAK */
+ if (__get_user(instr[1], (u16 __user *)(epc + 2)))
goto out_sigsegv;
- bcode = (instr[0] >> 6) & 0x3f;
- do_trap_or_bp(regs, bcode, "Break");
- goto out;
+ opcode = (instr[0] << 16) | instr[1];
+ bcode = (opcode >> 6) & ((1 << 20) - 1);
}
} else {
- if (__get_user(opcode,
- (unsigned int __user *) exception_epc(regs)))
+ if (__get_user(opcode, (unsigned int __user *)epc))
goto out_sigsegv;
+ bcode = (opcode >> 6) & ((1 << 20) - 1);
}
/*
@@ -947,9 +971,8 @@ asmlinkage void do_bp(struct pt_regs *regs)
* Gas is bug-compatible, but not always, grrr...
* We handle both cases with a simple heuristics. --macro
*/
- bcode = ((opcode >> 6) & ((1 << 20) - 1));
if (bcode >= (1 << 10))
- bcode >>= 10;
+ bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
/*
* notify the kprobe handlers, if instruction is likely to
@@ -1039,22 +1062,24 @@ asmlinkage void do_ri(struct pt_regs *regs)
* as quickly as possible.
*/
if (mipsr2_emulation && cpu_has_mips_r6 &&
- likely(user_mode(regs))) {
- if (likely(get_user(opcode, epc) >= 0)) {
- status = mipsr2_decoder(regs, opcode);
- switch (status) {
- case 0:
- case SIGEMT:
- task_thread_info(current)->r2_emul_return = 1;
- return;
- case SIGILL:
- goto no_r2_instr;
- default:
- process_fpemu_return(status,
- &current->thread.cp0_baduaddr);
- task_thread_info(current)->r2_emul_return = 1;
- return;
- }
+ likely(user_mode(regs)) &&
+ likely(get_user(opcode, epc) >= 0)) {
+ unsigned long fcr31 = 0;
+
+ status = mipsr2_decoder(regs, opcode, &fcr31);
+ switch (status) {
+ case 0:
+ case SIGEMT:
+ task_thread_info(current)->r2_emul_return = 1;
+ return;
+ case SIGILL:
+ goto no_r2_instr;
+ default:
+ process_fpemu_return(status,
+ &current->thread.cp0_baduaddr,
+ fcr31);
+ task_thread_info(current)->r2_emul_return = 1;
+ return;
}
}
@@ -1127,13 +1152,13 @@ static void mt_ase_fp_affinity(void)
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
- if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
+ if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
cpumask_t tmask;
current->thread.user_cpus_allowed
= current->cpus_allowed;
- cpus_and(tmask, current->cpus_allowed,
- mt_fpu_cpumask);
+ cpumask_and(&tmask, &current->cpus_allowed,
+ &mt_fpu_cpumask);
set_cpus_allowed_ptr(current, &tmask);
set_thread_flag(TIF_FPUBOUND);
}
@@ -1299,10 +1324,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
enum ctx_state prev_state;
unsigned int __user *epc;
unsigned long old_epc, old31;
+ void __user *fault_addr;
unsigned int opcode;
+ unsigned long fcr31;
unsigned int cpid;
int status, err;
unsigned long __maybe_unused flags;
+ int sig;
prev_state = exception_enter();
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
@@ -1319,7 +1347,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
status = -1;
if (unlikely(compute_return_epc(regs) < 0))
- goto out;
+ break;
if (get_isa16_mode(regs->cp0_epc)) {
unsigned short mmop[2] = { 0 };
@@ -1352,49 +1380,54 @@ asmlinkage void do_cpu(struct pt_regs *regs)
force_sig(status, current);
}
- goto out;
+ break;
case 3:
/*
- * Old (MIPS I and MIPS II) processors will set this code
- * for COP1X opcode instructions that replaced the original
- * COP3 space. We don't limit COP1 space instructions in
- * the emulator according to the CPU ISA, so we want to
- * treat COP1X instructions consistently regardless of which
- * code the CPU chose. Therefore we redirect this trap to
- * the FP emulator too.
- *
- * Then some newer FPU-less processors use this code
- * erroneously too, so they are covered by this choice
- * as well.
+ * The COP3 opcode space and consequently the CP0.Status.CU3
+ * bit and the CP0.Cause.CE=3 encoding have been removed as
+ * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
+ * up the space has been reused for COP1X instructions, that
+ * are enabled by the CP0.Status.CU1 bit and consequently
+ * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
+ * exceptions. Some FPU-less processors that implement one
+ * of these ISAs however use this code erroneously for COP1X
+ * instructions. Therefore we redirect this trap to the FP
+ * emulator too.
*/
- if (raw_cpu_has_fpu)
+ if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
+ force_sig(SIGILL, current);
break;
+ }
/* Fall through. */
case 1:
err = enable_restore_fp_context(0);
- if (!raw_cpu_has_fpu || err) {
- int sig;
- void __user *fault_addr = NULL;
- sig = fpu_emulator_cop1Handler(regs,
- &current->thread.fpu,
- 0, &fault_addr);
- if (!process_fpemu_return(sig, fault_addr) && !err)
- mt_ase_fp_affinity();
- }
+ if (raw_cpu_has_fpu && !err)
+ break;
- goto out;
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+ &fault_addr);
+ fcr31 = current->thread.fpu.fcr31;
+
+ /*
+ * We can't allow the emulated instruction to leave
+ * any of the cause bits set in $fcr31.
+ */
+ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+
+ /* Send a signal if required. */
+ if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
+ mt_ase_fp_affinity();
+
+ break;
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
- goto out;
+ break;
}
- force_sig(SIGILL, current);
-
-out:
exception_exit(prev_state);
}
@@ -1984,6 +2017,12 @@ int cp0_compare_irq_shift;
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
+/*
+ * Fast debug channel IRQ or -1 if not present
+ */
+int cp0_fdc_irq;
+EXPORT_SYMBOL_GPL(cp0_fdc_irq);
+
static int noulri;
static int __init ulri_disable(char *s)
@@ -2065,17 +2104,21 @@ void per_cpu_trap_init(bool is_boot_cpu)
*
* o read IntCtl.IPTI to determine the timer interrupt
* o read IntCtl.IPPCI to determine the performance counter interrupt
+ * o read IntCtl.IPFDC to determine the fast debug channel interrupt
*/
if (cpu_has_mips_r2_r6) {
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
- if (cp0_perfcount_irq == cp0_compare_irq)
- cp0_perfcount_irq = -1;
+ cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
+ if (!cp0_fdc_irq)
+ cp0_fdc_irq = -1;
+
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
cp0_perfcount_irq = -1;
+ cp0_fdc_irq = -1;
}
if (!cpu_data[cpu].asid_cache)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index bbb69695a0a1..af84bef0c90d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -89,8 +89,6 @@
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
-#include <asm/fpu.h>
-#include <asm/fpu_emulator.h>
#define STR(x) __STR(x)
#define __STR(x) #x
@@ -109,10 +107,11 @@ static u32 unaligned_action;
extern void show_registers(struct pt_regs *regs);
#ifdef __BIG_ENDIAN
-#define LoadHW(addr, value, res) \
+#define _LoadHW(addr, value, res, type) \
+do { \
__asm__ __volatile__ (".set\tnoat\n" \
- "1:\t"user_lb("%0", "0(%2)")"\n" \
- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
+ "1:\t"type##_lb("%0", "0(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -127,13 +126,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
#ifndef CONFIG_CPU_MIPSR6
-#define LoadW(addr, value, res) \
+#define _LoadW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_lwl("%0", "(%2)")"\n" \
- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
"li\t%1, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -146,21 +147,24 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has no lwl instruction */
-#define LoadW(addr, value, res) \
+#define _LoadW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n" \
".set\tnoat\n\t" \
- "1:"user_lb("%0", "0(%2)")"\n\t" \
- "2:"user_lbu("$1", "1(%2)")"\n\t" \
+ "1:"type##_lb("%0", "0(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "3:"user_lbu("$1", "2(%2)")"\n\t" \
+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "4:"user_lbu("$1", "3(%2)")"\n\t" \
+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -178,14 +182,17 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */
-#define LoadHWU(addr, value, res) \
+#define _LoadHWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\t"user_lbu("%0", "0(%2)")"\n" \
- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
+ "1:\t"type##_lbu("%0", "0(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -201,13 +208,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
#ifndef CONFIG_CPU_MIPSR6
-#define LoadWU(addr, value, res) \
+#define _LoadWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_lwl("%0", "(%2)")"\n" \
- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
"dsll\t%0, %0, 32\n\t" \
"dsrl\t%0, %0, 32\n\t" \
"li\t%1, 0\n" \
@@ -222,9 +231,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
-#define LoadDW(addr, value, res) \
+#define _LoadDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
"1:\tldl\t%0, (%2)\n" \
"2:\tldr\t%0, 7(%2)\n\t" \
@@ -240,21 +251,24 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has not lwl and ldl instructions */
-#define LoadWU(addr, value, res) \
+#define _LoadWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "1:"user_lbu("%0", "0(%2)")"\n\t" \
- "2:"user_lbu("$1", "1(%2)")"\n\t" \
+ "1:"type##_lbu("%0", "0(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "3:"user_lbu("$1", "2(%2)")"\n\t" \
+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "4:"user_lbu("$1", "3(%2)")"\n\t" \
+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -272,9 +286,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
-#define LoadDW(addr, value, res) \
+#define _LoadDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
@@ -319,16 +335,19 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t8b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */
-#define StoreHW(addr, value, res) \
+#define _StoreHW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\t"user_sb("%1", "1(%2)")"\n" \
+ "1:\t"type##_sb("%1", "1(%2)")"\n" \
"srl\t$1, %1, 0x8\n" \
- "2:\t"user_sb("$1", "0(%2)")"\n" \
+ "2:\t"type##_sb("$1", "0(%2)")"\n" \
".set\tat\n\t" \
"li\t%0, 0\n" \
"3:\n\t" \
@@ -342,13 +361,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT));\
+} while(0)
#ifndef CONFIG_CPU_MIPSR6
-#define StoreW(addr, value, res) \
+#define _StoreW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_swl("%1", "(%2)")"\n" \
- "2:\t"user_swr("%1", "3(%2)")"\n\t" \
+ "1:\t"type##_swl("%1", "(%2)")"\n" \
+ "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
"li\t%0, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -361,9 +382,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while(0)
-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
"1:\tsdl\t%1,(%2)\n" \
"2:\tsdr\t%1, 7(%2)\n\t" \
@@ -379,20 +402,23 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has no swl and sdl instructions */
-#define StoreW(addr, value, res) \
+#define _StoreW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "1:"user_sb("%1", "3(%2)")"\n\t" \
+ "1:"type##_sb("%1", "3(%2)")"\n\t" \
"srl\t$1, %1, 0x8\n\t" \
- "2:"user_sb("$1", "2(%2)")"\n\t" \
+ "2:"type##_sb("$1", "2(%2)")"\n\t" \
"srl\t$1, $1, 0x8\n\t" \
- "3:"user_sb("$1", "1(%2)")"\n\t" \
+ "3:"type##_sb("$1", "1(%2)")"\n\t" \
"srl\t$1, $1, 0x8\n\t" \
- "4:"user_sb("$1", "0(%2)")"\n\t" \
+ "4:"type##_sb("$1", "0(%2)")"\n\t" \
".set\tpop\n\t" \
"li\t%0, 0\n" \
"10:\n\t" \
@@ -409,9 +435,11 @@ extern void show_registers(struct pt_regs *regs);
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
- : "memory");
+ : "memory"); \
+} while(0)
#define StoreDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
@@ -451,15 +479,18 @@ extern void show_registers(struct pt_regs *regs);
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
- : "memory");
+ : "memory"); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */
#else /* __BIG_ENDIAN */
-#define LoadHW(addr, value, res) \
+#define _LoadHW(addr, value, res, type) \
+do { \
__asm__ __volatile__ (".set\tnoat\n" \
- "1:\t"user_lb("%0", "1(%2)")"\n" \
- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
+ "1:\t"type##_lb("%0", "1(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -474,13 +505,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
#ifndef CONFIG_CPU_MIPSR6
-#define LoadW(addr, value, res) \
+#define _LoadW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_lwl("%0", "3(%2)")"\n" \
- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
"li\t%1, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -493,21 +526,24 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has no lwl instruction */
-#define LoadW(addr, value, res) \
+#define _LoadW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n" \
".set\tnoat\n\t" \
- "1:"user_lb("%0", "3(%2)")"\n\t" \
- "2:"user_lbu("$1", "2(%2)")"\n\t" \
+ "1:"type##_lb("%0", "3(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "3:"user_lbu("$1", "1(%2)")"\n\t" \
+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "4:"user_lbu("$1", "0(%2)")"\n\t" \
+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -525,15 +561,18 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */
-#define LoadHWU(addr, value, res) \
+#define _LoadHWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\t"user_lbu("%0", "1(%2)")"\n" \
- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
+ "1:\t"type##_lbu("%0", "1(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -549,13 +588,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
#ifndef CONFIG_CPU_MIPSR6
-#define LoadWU(addr, value, res) \
+#define _LoadWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_lwl("%0", "3(%2)")"\n" \
- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
"dsll\t%0, %0, 32\n\t" \
"dsrl\t%0, %0, 32\n\t" \
"li\t%1, 0\n" \
@@ -570,9 +611,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
-#define LoadDW(addr, value, res) \
+#define _LoadDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
"1:\tldl\t%0, 7(%2)\n" \
"2:\tldr\t%0, (%2)\n\t" \
@@ -588,21 +631,24 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has not lwl and ldl instructions */
-#define LoadWU(addr, value, res) \
+#define _LoadWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "1:"user_lbu("%0", "3(%2)")"\n\t" \
- "2:"user_lbu("$1", "2(%2)")"\n\t" \
+ "1:"type##_lbu("%0", "3(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "3:"user_lbu("$1", "1(%2)")"\n\t" \
+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "4:"user_lbu("$1", "0(%2)")"\n\t" \
+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -620,9 +666,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
-#define LoadDW(addr, value, res) \
+#define _LoadDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
@@ -667,15 +715,17 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t8b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
#endif /* CONFIG_CPU_MIPSR6 */
-#define StoreHW(addr, value, res) \
+#define _StoreHW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\t"user_sb("%1", "0(%2)")"\n" \
+ "1:\t"type##_sb("%1", "0(%2)")"\n" \
"srl\t$1,%1, 0x8\n" \
- "2:\t"user_sb("$1", "1(%2)")"\n" \
+ "2:\t"type##_sb("$1", "1(%2)")"\n" \
".set\tat\n\t" \
"li\t%0, 0\n" \
"3:\n\t" \
@@ -689,12 +739,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT));\
+} while(0)
+
#ifndef CONFIG_CPU_MIPSR6
-#define StoreW(addr, value, res) \
+#define _StoreW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_swl("%1", "3(%2)")"\n" \
- "2:\t"user_swr("%1", "(%2)")"\n\t" \
+ "1:\t"type##_swl("%1", "3(%2)")"\n" \
+ "2:\t"type##_swr("%1", "(%2)")"\n\t"\
"li\t%0, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -707,9 +760,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while(0)
-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
"1:\tsdl\t%1, 7(%2)\n" \
"2:\tsdr\t%1, (%2)\n\t" \
@@ -725,20 +780,23 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has no swl and sdl instructions */
-#define StoreW(addr, value, res) \
+#define _StoreW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "1:"user_sb("%1", "0(%2)")"\n\t" \
+ "1:"type##_sb("%1", "0(%2)")"\n\t" \
"srl\t$1, %1, 0x8\n\t" \
- "2:"user_sb("$1", "1(%2)")"\n\t" \
+ "2:"type##_sb("$1", "1(%2)")"\n\t" \
"srl\t$1, $1, 0x8\n\t" \
- "3:"user_sb("$1", "2(%2)")"\n\t" \
+ "3:"type##_sb("$1", "2(%2)")"\n\t" \
"srl\t$1, $1, 0x8\n\t" \
- "4:"user_sb("$1", "3(%2)")"\n\t" \
+ "4:"type##_sb("$1", "3(%2)")"\n\t" \
".set\tpop\n\t" \
"li\t%0, 0\n" \
"10:\n\t" \
@@ -755,9 +813,11 @@ extern void show_registers(struct pt_regs *regs);
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
- : "memory");
+ : "memory"); \
+} while(0)
-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
@@ -797,10 +857,28 @@ extern void show_registers(struct pt_regs *regs);
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
- : "memory");
+ : "memory"); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */
#endif
+#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
+#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
+#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
+#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
+#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
+#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
+#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
+#define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
+#define LoadDW(addr, value, res) _LoadDW(addr, value, res)
+
+#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
+#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
+#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
+#define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
+#define StoreDW(addr, value, res) _StoreDW(addr, value, res)
+
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
@@ -872,7 +950,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
set_fs(seg);
goto sigbus;
}
- LoadHW(addr, value, res);
+ LoadHWE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -885,7 +963,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
set_fs(seg);
goto sigbus;
}
- LoadW(addr, value, res);
+ LoadWE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -898,7 +976,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
set_fs(seg);
goto sigbus;
}
- LoadHWU(addr, value, res);
+ LoadHWUE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -913,7 +991,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
}
compute_return_epc(regs);
value = regs->regs[insn.spec3_format.rt];
- StoreHW(addr, value, res);
+ StoreHWE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -926,7 +1004,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
}
compute_return_epc(regs);
value = regs->regs[insn.spec3_format.rt];
- StoreW(addr, value, res);
+ StoreWE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -943,7 +1021,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- LoadHW(addr, value, res);
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ LoadHW(addr, value, res);
+ else
+ LoadHWE(addr, value, res);
+ } else {
+ LoadHW(addr, value, res);
+ }
+
if (res)
goto fault;
compute_return_epc(regs);
@@ -954,7 +1040,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
- LoadW(addr, value, res);
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ LoadW(addr, value, res);
+ else
+ LoadWE(addr, value, res);
+ } else {
+ LoadW(addr, value, res);
+ }
+
if (res)
goto fault;
compute_return_epc(regs);
@@ -965,7 +1059,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- LoadHWU(addr, value, res);
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ LoadHWU(addr, value, res);
+ else
+ LoadHWUE(addr, value, res);
+ } else {
+ LoadHWU(addr, value, res);
+ }
+
if (res)
goto fault;
compute_return_epc(regs);
@@ -1024,7 +1126,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- StoreHW(addr, value, res);
+
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ StoreHW(addr, value, res);
+ else
+ StoreHWE(addr, value, res);
+ } else {
+ StoreHW(addr, value, res);
+ }
+
if (res)
goto fault;
break;
@@ -1035,7 +1146,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- StoreW(addr, value, res);
+
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ StoreW(addr, value, res);
+ else
+ StoreWE(addr, value, res);
+ } else {
+ StoreW(addr, value, res);
+ }
+
if (res)
goto fault;
break;
@@ -1076,7 +1196,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
own_fpu(1); /* Restore FPU state. */
/* Signal if something went wrong. */
- process_fpemu_return(res, fault_addr);
+ process_fpemu_return(res, fault_addr, 0);
if (res == 0)
break;
@@ -1511,7 +1631,7 @@ fpu_emul:
own_fpu(1); /* restore FPU state */
/* If something went wrong, signal */
- process_fpemu_return(res, fault_addr);
+ process_fpemu_return(res, fault_addr, 0);
if (res == 0)
goto success;
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 6230f376a44e..4b50c5787e25 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -2389,7 +2389,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
{
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE;
- unsigned long curr_pc;
if (run->mmio.len > sizeof(*gpr)) {
kvm_err("Bad MMIO length: %d", run->mmio.len);
@@ -2397,11 +2396,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
goto done;
}
- /*
- * Update PC and hold onto current PC in case there is
- * an error and we want to rollback the PC
- */
- curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, vcpu->arch.pending_load_cause);
if (er == EMULATE_FAIL)
return er;
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 39ab3e786e59..0db099ecc016 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -41,7 +41,7 @@ int ltq_soc_type(void)
return soc_info.type;
}
-void prom_free_prom_memory(void)
+void __init prom_free_prom_memory(void)
{
}
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index 696cd57f6f13..d001bc38908a 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -61,7 +61,6 @@ static struct platform_driver vmmc_driver = {
.probe = vmmc_probe,
.driver = {
.name = "lantiq,vmmc",
- .owner = THIS_MODULE,
.of_match_table = vmmc_match,
},
};
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index cf9b4633257e..a57959e648a6 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -53,21 +53,6 @@ int proc_dolasatstring(struct ctl_table *table, int write,
return 0;
}
-/* proc function to write EEPROM after changing int entry */
-int proc_dolasatint(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- int r;
-
- r = proc_dointvec(table, write, buffer, lenp, ppos);
- if ((!write) || r)
- return r;
-
- lasat_write_eeprom_info();
-
- return 0;
-}
-
#ifdef CONFIG_DS1603
static int rtctmp;
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 4c721e247ac9..ed88647b57e2 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -76,10 +76,10 @@
LOAD _t1, (offset + UNIT(1))(src); \
LOAD _t2, (offset + UNIT(2))(src); \
LOAD _t3, (offset + UNIT(3))(src); \
+ ADDC(_t0, _t1); \
+ ADDC(_t2, _t3); \
ADDC(sum, _t0); \
- ADDC(sum, _t1); \
- ADDC(sum, _t2); \
- ADDC(sum, _t3)
+ ADDC(sum, _t2)
#ifdef USE_DOUBLE
#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
@@ -504,21 +504,21 @@ LEAF(csum_partial)
SUB len, len, 8*NBYTES
ADD src, src, 8*NBYTES
STORE(t0, UNIT(0)(dst), .Ls_exc\@)
- ADDC(sum, t0)
+ ADDC(t0, t1)
STORE(t1, UNIT(1)(dst), .Ls_exc\@)
- ADDC(sum, t1)
+ ADDC(sum, t0)
STORE(t2, UNIT(2)(dst), .Ls_exc\@)
- ADDC(sum, t2)
+ ADDC(t2, t3)
STORE(t3, UNIT(3)(dst), .Ls_exc\@)
- ADDC(sum, t3)
+ ADDC(sum, t2)
STORE(t4, UNIT(4)(dst), .Ls_exc\@)
- ADDC(sum, t4)
+ ADDC(t4, t5)
STORE(t5, UNIT(5)(dst), .Ls_exc\@)
- ADDC(sum, t5)
+ ADDC(sum, t4)
STORE(t6, UNIT(6)(dst), .Ls_exc\@)
- ADDC(sum, t6)
+ ADDC(t6, t7)
STORE(t7, UNIT(7)(dst), .Ls_exc\@)
- ADDC(sum, t7)
+ ADDC(sum, t6)
.set reorder /* DADDI_WAR */
ADD dst, dst, 8*NBYTES
bgez len, 1b
@@ -544,13 +544,13 @@ LEAF(csum_partial)
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
STORE(t0, UNIT(0)(dst), .Ls_exc\@)
- ADDC(sum, t0)
+ ADDC(t0, t1)
STORE(t1, UNIT(1)(dst), .Ls_exc\@)
- ADDC(sum, t1)
+ ADDC(sum, t0)
STORE(t2, UNIT(2)(dst), .Ls_exc\@)
- ADDC(sum, t2)
+ ADDC(t2, t3)
STORE(t3, UNIT(3)(dst), .Ls_exc\@)
- ADDC(sum, t3)
+ ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
beqz len, .Ldone\@
@@ -649,13 +649,13 @@ LEAF(csum_partial)
nop # improves slotting
#endif
STORE(t0, UNIT(0)(dst), .Ls_exc\@)
- ADDC(sum, t0)
+ ADDC(t0, t1)
STORE(t1, UNIT(1)(dst), .Ls_exc\@)
- ADDC(sum, t1)
+ ADDC(sum, t0)
STORE(t2, UNIT(2)(dst), .Ls_exc\@)
- ADDC(sum, t2)
+ ADDC(t2, t3)
STORE(t3, UNIT(3)(dst), .Ls_exc\@)
- ADDC(sum, t3)
+ ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
bne len, rem, 1b
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index d87e03330b29..e70c33fdb881 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -4,7 +4,6 @@
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
bonito-irq.o mem.o machtype.o platform.o
-obj-$(CONFIG_GPIOLIB) += gpio.o
obj-$(CONFIG_PCI) += pci.o
#
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index 045ea3d47c87..22f04ca2ff3e 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -29,6 +29,7 @@ struct efi_memory_map_loongson *loongson_memmap;
struct loongson_system_configuration loongson_sysconf;
u64 loongson_chipcfg[MAX_PACKAGES] = {0xffffffffbfc00180};
+u64 loongson_chiptemp[MAX_PACKAGES];
u64 loongson_freqctrl[MAX_PACKAGES];
unsigned long long smp_group[4];
@@ -97,6 +98,10 @@ void __init prom_init_env(void)
loongson_chipcfg[1] = 0x900010001fe00180;
loongson_chipcfg[2] = 0x900020001fe00180;
loongson_chipcfg[3] = 0x900030001fe00180;
+ loongson_chiptemp[0] = 0x900000001fe0019c;
+ loongson_chiptemp[1] = 0x900010001fe0019c;
+ loongson_chiptemp[2] = 0x900020001fe0019c;
+ loongson_chiptemp[3] = 0x900030001fe0019c;
loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
loongson_sysconf.workarounds = WORKAROUND_CPUFREQ;
} else if (ecpu->cputype == Loongson_3B) {
@@ -110,6 +115,10 @@ void __init prom_init_env(void)
loongson_chipcfg[1] = 0x900020001fe00180;
loongson_chipcfg[2] = 0x900040001fe00180;
loongson_chipcfg[3] = 0x900060001fe00180;
+ loongson_chiptemp[0] = 0x900000001fe0019c;
+ loongson_chiptemp[1] = 0x900020001fe0019c;
+ loongson_chiptemp[2] = 0x900040001fe0019c;
+ loongson_chiptemp[3] = 0x900060001fe0019c;
loongson_freqctrl[0] = 0x900000001fe001d0;
loongson_freqctrl[1] = 0x900020001fe001d0;
loongson_freqctrl[2] = 0x900040001fe001d0;
diff --git a/arch/mips/loongson/common/pci.c b/arch/mips/loongson/common/pci.c
index 003ab4e618b3..4e2575643781 100644
--- a/arch/mips/loongson/common/pci.c
+++ b/arch/mips/loongson/common/pci.c
@@ -78,6 +78,8 @@ static void __init setup_pcimap(void)
#endif
}
+extern int sbx00_acpi_init(void);
+
static int __init pcibios_init(void)
{
setup_pcimap();
@@ -89,6 +91,10 @@ static int __init pcibios_init(void)
#endif
register_pci_controller(&loongson_pci_controller);
+#ifdef CONFIG_CPU_LOONGSON3
+ sbx00_acpi_init();
+#endif
+
return 0;
}
diff --git a/arch/mips/loongson/loongson-3/cop2-ex.c b/arch/mips/loongson/loongson-3/cop2-ex.c
index b03e37d2071a..ea13764d0a03 100644
--- a/arch/mips/loongson/loongson-3/cop2-ex.c
+++ b/arch/mips/loongson/loongson-3/cop2-ex.c
@@ -43,7 +43,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (!fpu_owned) {
set_thread_flag(TIF_USEDFPU);
if (!used_math()) {
- _init_fpu();
+ _init_fpu(current->thread.fpu.fcr31);
set_used_math();
} else
_restore_fp(current);
diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
index 21221edda7a9..0f75b6b3d218 100644
--- a/arch/mips/loongson/loongson-3/irq.c
+++ b/arch/mips/loongson/loongson-3/irq.c
@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)
static struct irqaction cascade_irqaction = {
.handler = no_action,
+ .flags = IRQF_NO_SUSPEND,
.name = "cascade",
};
diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c
index 6cae0e75de27..12d14ed48778 100644
--- a/arch/mips/loongson/loongson-3/numa.c
+++ b/arch/mips/loongson/loongson-3/numa.c
@@ -233,7 +233,7 @@ static __init void prom_meminit(void)
if (node_online(node)) {
szmem(node);
node_mem_init(node);
- cpus_clear(__node_data[(node)]->cpumask);
+ cpumask_clear(&__node_data[(node)]->cpumask);
}
}
for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
@@ -244,7 +244,7 @@ static __init void prom_meminit(void)
if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
continue;
- cpu_set(active_cpu, __node_data[(node)]->cpumask);
+ cpumask_set_cpu(active_cpu, &__node_data[(node)]->cpumask);
pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node);
active_cpu++;
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c
index e2eb688b5434..e3c68b5da18d 100644
--- a/arch/mips/loongson/loongson-3/smp.c
+++ b/arch/mips/loongson/loongson-3/smp.c
@@ -408,7 +408,7 @@ static int loongson3_cpu_disable(void)
return -EBUSY;
set_cpu_online(cpu, false);
- cpu_clear(cpu, cpu_callin_map);
+ cpumask_clear_cpu(cpu, &cpu_callin_map);
local_irq_save(flags);
fixup_irqs();
local_irq_restore(flags);
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index 619cfc1a2442..2e5f96275c38 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -2,12 +2,15 @@
# Makefile for the Linux/MIPS kernel FPU emulation.
#
-obj-y += cp1emu.o ieee754dp.o ieee754sp.o ieee754.o dp_div.o dp_mul.o \
- dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o dp_tint.o \
- dp_fint.o dp_tlong.o dp_flong.o sp_div.o sp_mul.o sp_sub.o \
- sp_add.o sp_fdp.o sp_cmp.o sp_simple.o sp_tint.o sp_fint.o \
- sp_tlong.o sp_flong.o dsemul.o
+obj-y += cp1emu.o ieee754dp.o ieee754sp.o ieee754.o \
+ dp_div.o dp_mul.o dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o \
+ dp_tint.o dp_fint.o \
+ sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_simple.o \
+ sp_tint.o sp_fint.o \
+ dsemul.o
-lib-y += ieee754d.o dp_sqrt.o sp_sqrt.o
+lib-y += ieee754d.o \
+ dp_tlong.o dp_flong.o dp_sqrt.o \
+ sp_tlong.o sp_flong.o sp_sqrt.o
obj-$(CONFIG_DEBUG_FS) += me-debugfs.o
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index b30bf65c7d7d..22b9b2cb9219 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -45,6 +45,7 @@
#include <asm/signal.h>
#include <asm/uaccess.h>
+#include <asm/cpu-info.h>
#include <asm/processor.h>
#include <asm/fpu_emulator.h>
#include <asm/fpu.h>
@@ -63,14 +64,14 @@ static int fpux_emu(struct pt_regs *,
/* Control registers */
#define FPCREG_RID 0 /* $0 = revision id */
+#define FPCREG_FCCR 25 /* $25 = fccr */
+#define FPCREG_FEXR 26 /* $26 = fexr */
+#define FPCREG_FENR 28 /* $28 = fenr */
#define FPCREG_CSR 31 /* $31 = csr */
-/* Determine rounding mode from the RM bits of the FCSR */
-#define modeindex(v) ((v) & FPU_CSR_RM)
-
/* convert condition code register number to csr bit */
const unsigned int fpucondbit[8] = {
- FPU_CSR_COND0,
+ FPU_CSR_COND,
FPU_CSR_COND1,
FPU_CSR_COND2,
FPU_CSR_COND3,
@@ -843,6 +844,127 @@ do { \
#define DPTOREG(dp, x) DITOREG((dp).bits, x)
/*
+ * Emulate a CFC1 instruction.
+ */
+static inline void cop1_cfc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ mips_instruction ir)
+{
+ u32 fcr31 = ctx->fcr31;
+ u32 value = 0;
+
+ switch (MIPSInst_RD(ir)) {
+ case FPCREG_CSR:
+ value = fcr31;
+ pr_debug("%p gpr[%d]<-csr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ break;
+
+ case FPCREG_FENR:
+ if (!cpu_has_mips_r)
+ break;
+ value = (fcr31 >> (FPU_CSR_FS_S - MIPS_FENR_FS_S)) &
+ MIPS_FENR_FS;
+ value |= fcr31 & (FPU_CSR_ALL_E | FPU_CSR_RM);
+ pr_debug("%p gpr[%d]<-enr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ break;
+
+ case FPCREG_FEXR:
+ if (!cpu_has_mips_r)
+ break;
+ value = fcr31 & (FPU_CSR_ALL_X | FPU_CSR_ALL_S);
+ pr_debug("%p gpr[%d]<-exr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ break;
+
+ case FPCREG_FCCR:
+ if (!cpu_has_mips_r)
+ break;
+ value = (fcr31 >> (FPU_CSR_COND_S - MIPS_FCCR_COND0_S)) &
+ MIPS_FCCR_COND0;
+ value |= (fcr31 >> (FPU_CSR_COND1_S - MIPS_FCCR_COND1_S)) &
+ (MIPS_FCCR_CONDX & ~MIPS_FCCR_COND0);
+ pr_debug("%p gpr[%d]<-ccr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ break;
+
+ case FPCREG_RID:
+ value = boot_cpu_data.fpu_id;
+ break;
+
+ default:
+ break;
+ }
+
+ if (MIPSInst_RT(ir))
+ xcp->regs[MIPSInst_RT(ir)] = value;
+}
+
+/*
+ * Emulate a CTC1 instruction.
+ */
+static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ mips_instruction ir)
+{
+ u32 fcr31 = ctx->fcr31;
+ u32 value;
+ u32 mask;
+
+ if (MIPSInst_RT(ir) == 0)
+ value = 0;
+ else
+ value = xcp->regs[MIPSInst_RT(ir)];
+
+ switch (MIPSInst_RD(ir)) {
+ case FPCREG_CSR:
+ pr_debug("%p gpr[%d]->csr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+
+ /* Preserve read-only bits. */
+ mask = boot_cpu_data.fpu_msk31;
+ fcr31 = (value & ~mask) | (fcr31 & mask);
+ break;
+
+ case FPCREG_FENR:
+ if (!cpu_has_mips_r)
+ break;
+ pr_debug("%p gpr[%d]->enr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ fcr31 &= ~(FPU_CSR_FS | FPU_CSR_ALL_E | FPU_CSR_RM);
+ fcr31 |= (value << (FPU_CSR_FS_S - MIPS_FENR_FS_S)) &
+ FPU_CSR_FS;
+ fcr31 |= value & (FPU_CSR_ALL_E | FPU_CSR_RM);
+ break;
+
+ case FPCREG_FEXR:
+ if (!cpu_has_mips_r)
+ break;
+ pr_debug("%p gpr[%d]->exr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ fcr31 &= ~(FPU_CSR_ALL_X | FPU_CSR_ALL_S);
+ fcr31 |= value & (FPU_CSR_ALL_X | FPU_CSR_ALL_S);
+ break;
+
+ case FPCREG_FCCR:
+ if (!cpu_has_mips_r)
+ break;
+ pr_debug("%p gpr[%d]->ccr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ fcr31 &= ~(FPU_CSR_CONDX | FPU_CSR_COND);
+ fcr31 |= (value << (FPU_CSR_COND_S - MIPS_FCCR_COND0_S)) &
+ FPU_CSR_COND;
+ fcr31 |= (value << (FPU_CSR_COND1_S - MIPS_FCCR_COND1_S)) &
+ FPU_CSR_CONDX;
+ break;
+
+ default:
+ break;
+ }
+
+ ctx->fcr31 = fcr31;
+}
+
+/*
* Emulate the single floating point instruction pointed at by EPC.
* Two instructions if the instruction is in a branch delay slot.
*/
@@ -856,7 +978,6 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int likely, pc_inc;
u32 __user *wva;
u64 __user *dva;
- u32 value;
u32 wval;
u64 dval;
int sig;
@@ -1049,42 +1170,12 @@ emul:
case cfc_op:
/* cop control register rd -> gpr[rt] */
- if (MIPSInst_RD(ir) == FPCREG_CSR) {
- value = ctx->fcr31;
- value = (value & ~FPU_CSR_RM) | modeindex(value);
- pr_debug("%p gpr[%d]<-csr=%08x\n",
- (void *) (xcp->cp0_epc),
- MIPSInst_RT(ir), value);
- }
- else if (MIPSInst_RD(ir) == FPCREG_RID)
- value = 0;
- else
- value = 0;
- if (MIPSInst_RT(ir))
- xcp->regs[MIPSInst_RT(ir)] = value;
+ cop1_cfc(xcp, ctx, ir);
break;
case ctc_op:
/* copregister rd <- rt */
- if (MIPSInst_RT(ir) == 0)
- value = 0;
- else
- value = xcp->regs[MIPSInst_RT(ir)];
-
- /* we only have one writable control reg
- */
- if (MIPSInst_RD(ir) == FPCREG_CSR) {
- pr_debug("%p gpr[%d]->csr=%08x\n",
- (void *) (xcp->cp0_epc),
- MIPSInst_RT(ir), value);
-
- /*
- * Don't write reserved bits,
- * and convert to ieee library modes
- */
- ctx->fcr31 = (value & ~(FPU_CSR_RSVD | FPU_CSR_RM)) |
- modeindex(value);
- }
+ cop1_ctc(xcp, ctx, ir);
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
return SIGFPE;
}
@@ -1103,17 +1194,18 @@ emul:
likely = 0;
switch (MIPSInst_RT(ir) & 3) {
case bcfl_op:
- likely = 1;
+ if (cpu_has_mips_2_3_4_5_r)
+ likely = 1;
+ /* Fall through */
case bcf_op:
cond = !cond;
break;
case bctl_op:
- likely = 1;
+ if (cpu_has_mips_2_3_4_5_r)
+ likely = 1;
+ /* Fall through */
case bct_op:
break;
- default:
- /* thats an illegal instruction */
- return SIGILL;
}
set_delay_slot(xcp);
@@ -1121,6 +1213,14 @@ emul:
/*
* Branch taken: emulate dslot instruction
*/
+ unsigned long bcpc;
+
+ /*
+ * Remember EPC at the branch to point back
+ * at so that any delay-slot instruction
+ * signal is not silently ignored.
+ */
+ bcpc = xcp->cp0_epc;
xcp->cp0_epc += dec_insn.pc_inc;
contpc = MIPSInst_SIMM(ir);
@@ -1146,63 +1246,77 @@ emul:
* Single step the non-CP1
* instruction in the dslot.
*/
- return mips_dsemul(xcp, ir, contpc);
+ sig = mips_dsemul(xcp, ir,
+ contpc);
+ if (sig)
+ xcp->cp0_epc = bcpc;
+ /*
+ * SIGILL forces out of
+ * the emulation loop.
+ */
+ return sig ? sig : SIGILL;
}
} else
contpc = (xcp->cp0_epc + (contpc << 2));
switch (MIPSInst_OPCODE(ir)) {
case lwc1_op:
- goto emul;
-
case swc1_op:
goto emul;
case ldc1_op:
case sdc1_op:
- if (cpu_has_mips_2_3_4_5 ||
- cpu_has_mips64)
+ if (cpu_has_mips_2_3_4_5_r)
goto emul;
- return SIGILL;
- goto emul;
+ goto bc_sigill;
case cop1_op:
goto emul;
case cop1x_op:
- if (cpu_has_mips_4_5 || cpu_has_mips64 || cpu_has_mips32r2)
+ if (cpu_has_mips_4_5_64_r2_r6)
/* its one of ours */
goto emul;
- return SIGILL;
+ goto bc_sigill;
case spec_op:
- if (!cpu_has_mips_4_5_r)
- return SIGILL;
+ switch (MIPSInst_FUNC(ir)) {
+ case movc_op:
+ if (cpu_has_mips_4_5_r)
+ goto emul;
- if (MIPSInst_FUNC(ir) == movc_op)
- goto emul;
+ goto bc_sigill;
+ }
break;
+
+ bc_sigill:
+ xcp->cp0_epc = bcpc;
+ return SIGILL;
}
/*
* Single step the non-cp1
* instruction in the dslot
*/
- return mips_dsemul(xcp, ir, contpc);
+ sig = mips_dsemul(xcp, ir, contpc);
+ if (sig)
+ xcp->cp0_epc = bcpc;
+ /* SIGILL forces out of the emulation loop. */
+ return sig ? sig : SIGILL;
} else if (likely) { /* branch not taken */
- /*
- * branch likely nullifies
- * dslot if not taken
- */
- xcp->cp0_epc += dec_insn.pc_inc;
- contpc += dec_insn.pc_inc;
- /*
- * else continue & execute
- * dslot as normal insn
- */
- }
+ /*
+ * branch likely nullifies
+ * dslot if not taken
+ */
+ xcp->cp0_epc += dec_insn.pc_inc;
+ contpc += dec_insn.pc_inc;
+ /*
+ * else continue & execute
+ * dslot as normal insn
+ */
+ }
break;
default:
@@ -1216,7 +1330,7 @@ emul:
break;
case cop1x_op:
- if (!cpu_has_mips_4_5 && !cpu_has_mips64 && !cpu_has_mips32r2)
+ if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
sig = fpux_emu(xcp, ctx, ir, fault_addr);
@@ -1549,7 +1663,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
/* unary ops */
case fsqrt_op:
- if (!cpu_has_mips_4_5_r)
+ if (!cpu_has_mips_2_3_4_5_r)
return SIGILL;
handler.u = ieee754sp_sqrt;
@@ -1561,14 +1675,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
* achieve full IEEE-754 accuracy - however this emulator does.
*/
case frsqrt_op:
- if (!cpu_has_mips_4_5_r2_r6)
+ if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
handler.u = fpemu_sp_rsqrt;
goto scopuop;
case frecip_op:
- if (!cpu_has_mips_4_5_r2_r6)
+ if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
handler.u = fpemu_sp_recip;
@@ -1670,19 +1784,19 @@ copcsr:
case ftrunc_op:
case fceil_op:
case ffloor_op:
- if (!cpu_has_mips_2_3_4_5 && !cpu_has_mips64)
+ if (!cpu_has_mips_2_3_4_5_r)
return SIGILL;
oldrm = ieee754_csr.rm;
SPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir));
+ ieee754_csr.rm = MIPSInst_FUNC(ir);
rv.w = ieee754sp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
case fcvtl_op:
- if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
SPFROMREG(fs, MIPSInst_FS(ir));
@@ -1694,12 +1808,12 @@ copcsr:
case ftruncl_op:
case fceill_op:
case ffloorl_op:
- if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
oldrm = ieee754_csr.rm;
SPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir));
+ ieee754_csr.rm = MIPSInst_FUNC(ir);
rv.l = ieee754sp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
@@ -1763,13 +1877,13 @@ copcsr:
* achieve full IEEE-754 accuracy - however this emulator does.
*/
case frsqrt_op:
- if (!cpu_has_mips_4_5_r2_r6)
+ if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
handler.u = fpemu_dp_rsqrt;
goto dcopuop;
case frecip_op:
- if (!cpu_has_mips_4_5_r2_r6)
+ if (!cpu_has_mips_4_5_64_r2_r6)
return SIGILL;
handler.u = fpemu_dp_recip;
@@ -1852,14 +1966,14 @@ dcopuop:
oldrm = ieee754_csr.rm;
DPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir));
+ ieee754_csr.rm = MIPSInst_FUNC(ir);
rv.w = ieee754dp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
case fcvtl_op:
- if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
DPFROMREG(fs, MIPSInst_FS(ir));
@@ -1871,12 +1985,12 @@ dcopuop:
case ftruncl_op:
case fceill_op:
case ffloorl_op:
- if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
oldrm = ieee754_csr.rm;
DPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir));
+ ieee754_csr.rm = MIPSInst_FUNC(ir);
rv.l = ieee754dp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
@@ -1930,7 +2044,7 @@ dcopuop:
case l_fmt:
- if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
DIFROMREG(bits, MIPSInst_FS(ir));
@@ -1994,7 +2108,7 @@ dcopuop:
SITOREG(rv.w, MIPSInst_FD(ir));
break;
case l_fmt:
- if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
DITOREG(rv.l, MIPSInst_FD(ir));
@@ -2081,10 +2195,8 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
xcp->cp0_epc += dec_insn.pc_inc; /* Skip NOPs */
else {
/*
- * The 'ieee754_csr' is an alias of
- * ctx->fcr31. No need to copy ctx->fcr31 to
- * ieee754_csr. But ieee754_csr.rm is ieee
- * library modes. (not mips rounding mode)
+ * The 'ieee754_csr' is an alias of ctx->fcr31.
+ * No need to copy ctx->fcr31 to ieee754_csr.
*/
sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
}
diff --git a/arch/mips/math-emu/dp_add.c b/arch/mips/math-emu/dp_add.c
index 7f64577df984..8954ef031f84 100644
--- a/arch/mips/math-emu/dp_add.c
+++ b/arch/mips/math-emu/dp_add.c
@@ -37,19 +37,20 @@ union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y)
FLUSHYDP;
switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(ieee754dp_indef());
+ return ieee754dp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -150,8 +151,6 @@ union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y)
* leaving result in xm, xs and xe.
*/
xm = xm + ym;
- xe = xe;
- xs = xs;
if (xm >> (DP_FBITS + 1 + 3)) { /* carry out */
xm = XDPSRS1(xm);
@@ -160,11 +159,8 @@ union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y)
} else {
if (xm >= ym) {
xm = xm - ym;
- xe = xe;
- xs = xs;
} else {
xm = ym - xm;
- xe = xe;
xs = ys;
}
if (xm == 0)
diff --git a/arch/mips/math-emu/dp_cmp.c b/arch/mips/math-emu/dp_cmp.c
index 30f95f6e9ac4..a29880e29ae4 100644
--- a/arch/mips/math-emu/dp_cmp.c
+++ b/arch/mips/math-emu/dp_cmp.c
@@ -35,16 +35,11 @@ int ieee754dp_cmp(union ieee754dp x, union ieee754dp y, int cmp, int sig)
FLUSHYDP;
ieee754_clearcx(); /* Even clear inexact flag here */
- if (ieee754dp_isnan(x) || ieee754dp_isnan(y)) {
- if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
+ if (ieee754_class_nan(xc) || ieee754_class_nan(yc)) {
+ if (sig ||
+ xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
ieee754_setcx(IEEE754_INVALID_OPERATION);
- if (cmp & IEEE754_CUN)
- return 1;
- if (cmp & (IEEE754_CLT | IEEE754_CGT)) {
- if (sig && ieee754_setandtestcx(IEEE754_INVALID_OPERATION))
- return 0;
- }
- return 0;
+ return (cmp & IEEE754_CUN) != 0;
} else {
vx = x.bits;
vy = y.bits;
diff --git a/arch/mips/math-emu/dp_div.c b/arch/mips/math-emu/dp_div.c
index bef0e55e5938..f4746f7c5f63 100644
--- a/arch/mips/math-emu/dp_div.c
+++ b/arch/mips/math-emu/dp_div.c
@@ -39,19 +39,20 @@ union ieee754dp ieee754dp_div(union ieee754dp x, union ieee754dp y)
FLUSHYDP;
switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(ieee754dp_indef());
+ return ieee754dp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
diff --git a/arch/mips/math-emu/dp_fsp.c b/arch/mips/math-emu/dp_fsp.c
index ffb69c5830b0..57d09ca5403a 100644
--- a/arch/mips/math-emu/dp_fsp.c
+++ b/arch/mips/math-emu/dp_fsp.c
@@ -22,6 +22,12 @@
#include "ieee754sp.h"
#include "ieee754dp.h"
+static inline union ieee754dp ieee754dp_nan_fsp(int xs, u64 xm)
+{
+ return builddp(xs, DP_EMAX + 1 + DP_EBIAS,
+ xm << (DP_FBITS - SP_FBITS));
+}
+
union ieee754dp ieee754dp_fsp(union ieee754sp x)
{
COMPXSP;
@@ -34,15 +40,11 @@ union ieee754dp ieee754dp_fsp(union ieee754sp x)
switch (xc) {
case IEEE754_CLASS_SNAN:
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(ieee754dp_indef());
+ return ieee754dp_nanxcpt(ieee754dp_nan_fsp(xs, xm));
case IEEE754_CLASS_QNAN:
- return ieee754dp_nanxcpt(builddp(xs,
- DP_EMAX + 1 + DP_EBIAS,
- ((u64) xm
- << (DP_FBITS -
- SP_FBITS))));
+ return ieee754dp_nan_fsp(xs, xm);
+
case IEEE754_CLASS_INF:
return ieee754dp_inf(xs);
diff --git a/arch/mips/math-emu/dp_mul.c b/arch/mips/math-emu/dp_mul.c
index d3acdedb5b9d..d0901f03fa19 100644
--- a/arch/mips/math-emu/dp_mul.c
+++ b/arch/mips/math-emu/dp_mul.c
@@ -47,19 +47,20 @@ union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y)
FLUSHYDP;
switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(ieee754dp_indef());
+ return ieee754dp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
diff --git a/arch/mips/math-emu/dp_simple.c b/arch/mips/math-emu/dp_simple.c
index bccbe90efceb..926d56bf37f2 100644
--- a/arch/mips/math-emu/dp_simple.c
+++ b/arch/mips/math-emu/dp_simple.c
@@ -23,44 +23,27 @@
union ieee754dp ieee754dp_neg(union ieee754dp x)
{
- COMPXDP;
-
- EXPLODEXDP;
- ieee754_clearcx();
- FLUSHXDP;
-
- /*
- * Invert the sign ALWAYS to prevent an endless recursion on
- * pow() in libc.
- */
- /* quick fix up */
- DPSIGN(x) ^= 1;
-
- if (xc == IEEE754_CLASS_SNAN) {
- union ieee754dp y = ieee754dp_indef();
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- DPSIGN(y) = DPSIGN(x);
- return ieee754dp_nanxcpt(y);
- }
-
- return x;
+ unsigned int oldrm;
+ union ieee754dp y;
+
+ oldrm = ieee754_csr.rm;
+ ieee754_csr.rm = FPU_CSR_RD;
+ y = ieee754dp_sub(ieee754dp_zero(0), x);
+ ieee754_csr.rm = oldrm;
+ return y;
}
union ieee754dp ieee754dp_abs(union ieee754dp x)
{
- COMPXDP;
-
- EXPLODEXDP;
- ieee754_clearcx();
- FLUSHXDP;
-
- /* Clear sign ALWAYS, irrespective of NaN */
- DPSIGN(x) = 0;
-
- if (xc == IEEE754_CLASS_SNAN) {
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(ieee754dp_indef());
- }
-
- return x;
+ unsigned int oldrm;
+ union ieee754dp y;
+
+ oldrm = ieee754_csr.rm;
+ ieee754_csr.rm = FPU_CSR_RD;
+ if (DPSIGN(x))
+ y = ieee754dp_sub(ieee754dp_zero(0), x);
+ else
+ y = ieee754dp_add(ieee754dp_zero(0), x);
+ ieee754_csr.rm = oldrm;
+ return y;
}
diff --git a/arch/mips/math-emu/dp_sqrt.c b/arch/mips/math-emu/dp_sqrt.c
index 041bbb6124bb..cd5bc083001e 100644
--- a/arch/mips/math-emu/dp_sqrt.c
+++ b/arch/mips/math-emu/dp_sqrt.c
@@ -42,13 +42,12 @@ union ieee754dp ieee754dp_sqrt(union ieee754dp x)
/* x == INF or NAN? */
switch (xc) {
- case IEEE754_CLASS_QNAN:
- /* sqrt(Nan) = Nan */
+ case IEEE754_CLASS_SNAN:
return ieee754dp_nanxcpt(x);
- case IEEE754_CLASS_SNAN:
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(ieee754dp_indef());
+ case IEEE754_CLASS_QNAN:
+ /* sqrt(Nan) = Nan */
+ return x;
case IEEE754_CLASS_ZERO:
/* sqrt(0) = 0 */
@@ -58,7 +57,7 @@ union ieee754dp ieee754dp_sqrt(union ieee754dp x)
if (xs) {
/* sqrt(-Inf) = Nan */
ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(ieee754dp_indef());
+ return ieee754dp_indef();
}
/* sqrt(+Inf) = Inf */
return x;
@@ -71,7 +70,7 @@ union ieee754dp ieee754dp_sqrt(union ieee754dp x)
if (xs) {
/* sqrt(-x) = Nan */
ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(ieee754dp_indef());
+ return ieee754dp_indef();
}
break;
}
diff --git a/arch/mips/math-emu/dp_sub.c b/arch/mips/math-emu/dp_sub.c
index 7a174029043a..fc17a781b9ae 100644
--- a/arch/mips/math-emu/dp_sub.c
+++ b/arch/mips/math-emu/dp_sub.c
@@ -37,19 +37,20 @@ union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y)
FLUSHYDP;
switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(ieee754dp_indef());
+ return ieee754dp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -153,8 +154,6 @@ union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y)
/* generate 28 bit result of adding two 27 bit numbers
*/
xm = xm + ym;
- xe = xe;
- xs = xs;
if (xm >> (DP_FBITS + 1 + 3)) { /* carry out */
xm = XDPSRS1(xm); /* shift preserving sticky */
@@ -163,11 +162,8 @@ union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y)
} else {
if (xm >= ym) {
xm = xm - ym;
- xe = xe;
- xs = xs;
} else {
xm = ym - xm;
- xe = xe;
xs = ys;
}
if (xm == 0) {
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 4f514f3724cb..e0b5cc27d78b 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -94,9 +94,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
regs->cp0_epc = ((unsigned long) &fr->emul) |
get_isa16_mode(regs->cp0_epc);
- flush_cache_sigtramp((unsigned long)&fr->badinst);
+ flush_cache_sigtramp((unsigned long)&fr->emul);
- return SIGILL; /* force out of emulation loop */
+ return 0;
}
int do_dsemulret(struct pt_regs *xcp)
@@ -158,6 +158,6 @@ int do_dsemulret(struct pt_regs *xcp)
/* Set EPC to return to post-branch instruction */
xcp->cp0_epc = epc;
-
+ MIPS_FPU_EMU_INC_STATS(ds_emul);
return 1;
}
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
index 43c4fb522ac2..a5ca108ce467 100644
--- a/arch/mips/math-emu/ieee754.h
+++ b/arch/mips/math-emu/ieee754.h
@@ -126,84 +126,21 @@ enum {
#define IEEE754_CGT 0x04
#define IEEE754_CUN 0x08
-/* "normal" comparisons
-*/
-static inline int ieee754sp_eq(union ieee754sp x, union ieee754sp y)
-{
- return ieee754sp_cmp(x, y, IEEE754_CEQ, 0);
-}
-
-static inline int ieee754sp_ne(union ieee754sp x, union ieee754sp y)
-{
- return ieee754sp_cmp(x, y,
- IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0);
-}
-
-static inline int ieee754sp_lt(union ieee754sp x, union ieee754sp y)
-{
- return ieee754sp_cmp(x, y, IEEE754_CLT, 0);
-}
-
-static inline int ieee754sp_le(union ieee754sp x, union ieee754sp y)
-{
- return ieee754sp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0);
-}
-
-static inline int ieee754sp_gt(union ieee754sp x, union ieee754sp y)
-{
- return ieee754sp_cmp(x, y, IEEE754_CGT, 0);
-}
-
-
-static inline int ieee754sp_ge(union ieee754sp x, union ieee754sp y)
-{
- return ieee754sp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0);
-}
-
-static inline int ieee754dp_eq(union ieee754dp x, union ieee754dp y)
-{
- return ieee754dp_cmp(x, y, IEEE754_CEQ, 0);
-}
-
-static inline int ieee754dp_ne(union ieee754dp x, union ieee754dp y)
-{
- return ieee754dp_cmp(x, y,
- IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0);
-}
-
-static inline int ieee754dp_lt(union ieee754dp x, union ieee754dp y)
-{
- return ieee754dp_cmp(x, y, IEEE754_CLT, 0);
-}
-
-static inline int ieee754dp_le(union ieee754dp x, union ieee754dp y)
-{
- return ieee754dp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0);
-}
-
-static inline int ieee754dp_gt(union ieee754dp x, union ieee754dp y)
-{
- return ieee754dp_cmp(x, y, IEEE754_CGT, 0);
-}
-
-static inline int ieee754dp_ge(union ieee754dp x, union ieee754dp y)
-{
- return ieee754dp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0);
-}
-
/*
* The control status register
*/
struct _ieee754_csr {
- __BITFIELD_FIELD(unsigned pad0:7,
- __BITFIELD_FIELD(unsigned nod:1, /* set 1 for no denormalised numbers */
- __BITFIELD_FIELD(unsigned c:1, /* condition */
- __BITFIELD_FIELD(unsigned pad1:5,
+ __BITFIELD_FIELD(unsigned fcc:7, /* condition[7:1] */
+ __BITFIELD_FIELD(unsigned nod:1, /* set 1 for no denormals */
+ __BITFIELD_FIELD(unsigned c:1, /* condition[0] */
+ __BITFIELD_FIELD(unsigned pad0:3,
+ __BITFIELD_FIELD(unsigned abs2008:1, /* IEEE 754-2008 ABS/NEG.fmt */
+ __BITFIELD_FIELD(unsigned nan2008:1, /* IEEE 754-2008 NaN mode */
__BITFIELD_FIELD(unsigned cx:6, /* exceptions this operation */
__BITFIELD_FIELD(unsigned mx:5, /* exception enable mask */
__BITFIELD_FIELD(unsigned sx:5, /* exceptions total */
__BITFIELD_FIELD(unsigned rm:2, /* current rounding mode */
- ;))))))))
+ ;))))))))))
};
#define ieee754_csr (*(struct _ieee754_csr *)(&current->thread.fpu.fcr31))
@@ -257,23 +194,23 @@ static inline int ieee754_sxtest(unsigned n)
union ieee754sp ieee754sp_dump(char *s, union ieee754sp x);
union ieee754dp ieee754dp_dump(char *s, union ieee754dp x);
-#define IEEE754_SPCVAL_PZERO 0
-#define IEEE754_SPCVAL_NZERO 1
-#define IEEE754_SPCVAL_PONE 2
-#define IEEE754_SPCVAL_NONE 3
-#define IEEE754_SPCVAL_PTEN 4
-#define IEEE754_SPCVAL_NTEN 5
-#define IEEE754_SPCVAL_PINFINITY 6
-#define IEEE754_SPCVAL_NINFINITY 7
-#define IEEE754_SPCVAL_INDEF 8
-#define IEEE754_SPCVAL_PMAX 9 /* +max norm */
-#define IEEE754_SPCVAL_NMAX 10 /* -max norm */
-#define IEEE754_SPCVAL_PMIN 11 /* +min norm */
-#define IEEE754_SPCVAL_NMIN 12 /* +min norm */
-#define IEEE754_SPCVAL_PMIND 13 /* +min denorm */
-#define IEEE754_SPCVAL_NMIND 14 /* +min denorm */
-#define IEEE754_SPCVAL_P1E31 15 /* + 1.0e31 */
-#define IEEE754_SPCVAL_P1E63 16 /* + 1.0e63 */
+#define IEEE754_SPCVAL_PZERO 0 /* +0.0 */
+#define IEEE754_SPCVAL_NZERO 1 /* -0.0 */
+#define IEEE754_SPCVAL_PONE 2 /* +1.0 */
+#define IEEE754_SPCVAL_NONE 3 /* -1.0 */
+#define IEEE754_SPCVAL_PTEN 4 /* +10.0 */
+#define IEEE754_SPCVAL_NTEN 5 /* -10.0 */
+#define IEEE754_SPCVAL_PINFINITY 6 /* +inf */
+#define IEEE754_SPCVAL_NINFINITY 7 /* -inf */
+#define IEEE754_SPCVAL_INDEF 8 /* quiet NaN */
+#define IEEE754_SPCVAL_PMAX 9 /* +max norm */
+#define IEEE754_SPCVAL_NMAX 10 /* -max norm */
+#define IEEE754_SPCVAL_PMIN 11 /* +min norm */
+#define IEEE754_SPCVAL_NMIN 12 /* -min norm */
+#define IEEE754_SPCVAL_PMIND 13 /* +min denorm */
+#define IEEE754_SPCVAL_NMIND 14 /* -min denorm */
+#define IEEE754_SPCVAL_P1E31 15 /* + 1.0e31 */
+#define IEEE754_SPCVAL_P1E63 16 /* + 1.0e63 */
extern const union ieee754dp __ieee754dp_spcvals[];
extern const union ieee754sp __ieee754sp_spcvals[];
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index 068f45a415fc..522d843f2ffd 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -30,9 +30,9 @@ int ieee754dp_class(union ieee754dp x)
return xc;
}
-int ieee754dp_isnan(union ieee754dp x)
+static inline int ieee754dp_isnan(union ieee754dp x)
{
- return ieee754dp_class(x) >= IEEE754_CLASS_SNAN;
+ return ieee754_class_nan(ieee754dp_class(x));
}
static inline int ieee754dp_issnan(union ieee754dp x)
@@ -42,23 +42,16 @@ static inline int ieee754dp_issnan(union ieee754dp x)
}
+/*
+ * Raise the Invalid Operation IEEE 754 exception
+ * and convert the signaling NaN supplied to a quiet NaN.
+ */
union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp r)
{
- assert(ieee754dp_isnan(r));
-
- if (!ieee754dp_issnan(r)) /* QNAN does not cause invalid op !! */
- return r;
-
- if (!ieee754_setandtestcx(IEEE754_INVALID_OPERATION)) {
- /* not enabled convert to a quiet NaN */
- DPMANT(r) &= (~DP_MBIT(DP_FBITS-1));
- if (ieee754dp_isnan(r))
- return r;
- else
- return ieee754dp_indef();
- }
+ assert(ieee754dp_issnan(r));
- return r;
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
}
static u64 ieee754dp_get_rounding(int sn, u64 xm)
diff --git a/arch/mips/math-emu/ieee754dp.h b/arch/mips/math-emu/ieee754dp.h
index 61fd6fd31350..e2babd98fee3 100644
--- a/arch/mips/math-emu/ieee754dp.h
+++ b/arch/mips/math-emu/ieee754dp.h
@@ -77,6 +77,5 @@ static inline union ieee754dp builddp(int s, int bx, u64 m)
return r;
}
-extern int ieee754dp_isnan(union ieee754dp);
extern union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp);
extern union ieee754dp ieee754dp_format(int, int, u64);
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index f0365bb86747..05389d5e3a93 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -44,6 +44,11 @@ static inline int ieee754_setandtestcx(const unsigned int x)
return ieee754_csr.mx & x;
}
+static inline int ieee754_class_nan(int xc)
+{
+ return xc >= IEEE754_CLASS_SNAN;
+}
+
#define COMPXSP \
unsigned xm; int xe; int xs __maybe_unused; int xc
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index ba88301579c2..ca8e35e33bf7 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -30,9 +30,9 @@ int ieee754sp_class(union ieee754sp x)
return xc;
}
-int ieee754sp_isnan(union ieee754sp x)
+static inline int ieee754sp_isnan(union ieee754sp x)
{
- return ieee754sp_class(x) >= IEEE754_CLASS_SNAN;
+ return ieee754_class_nan(ieee754sp_class(x));
}
static inline int ieee754sp_issnan(union ieee754sp x)
@@ -42,23 +42,16 @@ static inline int ieee754sp_issnan(union ieee754sp x)
}
+/*
+ * Raise the Invalid Operation IEEE 754 exception
+ * and convert the signaling NaN supplied to a quiet NaN.
+ */
union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp r)
{
- assert(ieee754sp_isnan(r));
-
- if (!ieee754sp_issnan(r)) /* QNAN does not cause invalid op !! */
- return r;
-
- if (!ieee754_setandtestcx(IEEE754_INVALID_OPERATION)) {
- /* not enabled convert to a quiet NaN */
- SPMANT(r) &= (~SP_MBIT(SP_FBITS-1));
- if (ieee754sp_isnan(r))
- return r;
- else
- return ieee754sp_indef();
- }
+ assert(ieee754sp_issnan(r));
- return r;
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
}
static unsigned ieee754sp_get_rounding(int sn, unsigned xm)
diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
index ad268e332318..374a3f00a589 100644
--- a/arch/mips/math-emu/ieee754sp.h
+++ b/arch/mips/math-emu/ieee754sp.h
@@ -82,6 +82,5 @@ static inline union ieee754sp buildsp(int s, int bx, unsigned m)
return r;
}
-extern int ieee754sp_isnan(union ieee754sp);
extern union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp);
extern union ieee754sp ieee754sp_format(int, int, unsigned);
diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c
index becdd63e14a9..f308e0f05fc5 100644
--- a/arch/mips/math-emu/me-debugfs.c
+++ b/arch/mips/math-emu/me-debugfs.c
@@ -61,6 +61,7 @@ do { \
FPU_STAT_CREATE(ieee754_overflow);
FPU_STAT_CREATE(ieee754_zerodiv);
FPU_STAT_CREATE(ieee754_invalidop);
+ FPU_STAT_CREATE(ds_emul);
return 0;
}
diff --git a/arch/mips/math-emu/sp_add.c b/arch/mips/math-emu/sp_add.c
index 2d84d460cb67..f1c87b07d3b4 100644
--- a/arch/mips/math-emu/sp_add.c
+++ b/arch/mips/math-emu/sp_add.c
@@ -37,19 +37,20 @@ union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y)
FLUSHYSP;
switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(ieee754sp_indef());
+ return ieee754sp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -148,8 +149,6 @@ union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y)
* leaving result in xm, xs and xe.
*/
xm = xm + ym;
- xe = xe;
- xs = xs;
if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */
SPXSRSX1();
@@ -157,11 +156,8 @@ union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y)
} else {
if (xm >= ym) {
xm = xm - ym;
- xe = xe;
- xs = xs;
} else {
xm = ym - xm;
- xe = xe;
xs = ys;
}
if (xm == 0)
diff --git a/arch/mips/math-emu/sp_cmp.c b/arch/mips/math-emu/sp_cmp.c
index addbccb2f556..67b82f1e2c4a 100644
--- a/arch/mips/math-emu/sp_cmp.c
+++ b/arch/mips/math-emu/sp_cmp.c
@@ -35,16 +35,11 @@ int ieee754sp_cmp(union ieee754sp x, union ieee754sp y, int cmp, int sig)
FLUSHYSP;
ieee754_clearcx(); /* Even clear inexact flag here */
- if (ieee754sp_isnan(x) || ieee754sp_isnan(y)) {
- if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
+ if (ieee754_class_nan(xc) || ieee754_class_nan(yc)) {
+ if (sig ||
+ xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
ieee754_setcx(IEEE754_INVALID_OPERATION);
- if (cmp & IEEE754_CUN)
- return 1;
- if (cmp & (IEEE754_CLT | IEEE754_CGT)) {
- if (sig && ieee754_setandtestcx(IEEE754_INVALID_OPERATION))
- return 0;
- }
- return 0;
+ return (cmp & IEEE754_CUN) != 0;
} else {
vx = x.bits;
vy = y.bits;
diff --git a/arch/mips/math-emu/sp_div.c b/arch/mips/math-emu/sp_div.c
index 721f317aa877..27f6db3a0a4c 100644
--- a/arch/mips/math-emu/sp_div.c
+++ b/arch/mips/math-emu/sp_div.c
@@ -39,19 +39,20 @@ union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y)
FLUSHYSP;
switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(ieee754sp_indef());
+ return ieee754sp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
diff --git a/arch/mips/math-emu/sp_fdp.c b/arch/mips/math-emu/sp_fdp.c
index 1b266fb16973..3797148893ad 100644
--- a/arch/mips/math-emu/sp_fdp.c
+++ b/arch/mips/math-emu/sp_fdp.c
@@ -22,12 +22,19 @@
#include "ieee754sp.h"
#include "ieee754dp.h"
+static inline union ieee754sp ieee754sp_nan_fdp(int xs, u64 xm)
+{
+ return buildsp(xs, SP_EMAX + 1 + SP_EBIAS,
+ xm >> (DP_FBITS - SP_FBITS));
+}
+
union ieee754sp ieee754sp_fdp(union ieee754dp x)
{
+ union ieee754sp y;
u32 rm;
COMPXDP;
- union ieee754sp nan;
+ COMPYSP;
EXPLODEXDP;
@@ -37,15 +44,14 @@ union ieee754sp ieee754sp_fdp(union ieee754dp x)
switch (xc) {
case IEEE754_CLASS_SNAN:
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(ieee754sp_indef());
+ return ieee754sp_nanxcpt(ieee754sp_nan_fdp(xs, xm));
case IEEE754_CLASS_QNAN:
- nan = buildsp(xs, SP_EMAX + 1 + SP_EBIAS, (u32)
- (xm >> (DP_FBITS - SP_FBITS)));
- if (!ieee754sp_isnan(nan))
- nan = ieee754sp_indef();
- return ieee754sp_nanxcpt(nan);
+ y = ieee754sp_nan_fdp(xs, xm);
+ EXPLODEYSP;
+ if (!ieee754_class_nan(yc))
+ y = ieee754sp_indef();
+ return y;
case IEEE754_CLASS_INF:
return ieee754sp_inf(xs);
diff --git a/arch/mips/math-emu/sp_mul.c b/arch/mips/math-emu/sp_mul.c
index 890c13a2965e..d910c43a6f30 100644
--- a/arch/mips/math-emu/sp_mul.c
+++ b/arch/mips/math-emu/sp_mul.c
@@ -47,19 +47,20 @@ union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y)
FLUSHYSP;
switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(ieee754sp_indef());
+ return ieee754sp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
diff --git a/arch/mips/math-emu/sp_simple.c b/arch/mips/math-emu/sp_simple.c
index f1ffaa9a17e0..c50e9451f2d2 100644
--- a/arch/mips/math-emu/sp_simple.c
+++ b/arch/mips/math-emu/sp_simple.c
@@ -23,44 +23,27 @@
union ieee754sp ieee754sp_neg(union ieee754sp x)
{
- COMPXSP;
-
- EXPLODEXSP;
- ieee754_clearcx();
- FLUSHXSP;
-
- /*
- * Invert the sign ALWAYS to prevent an endless recursion on
- * pow() in libc.
- */
- /* quick fix up */
- SPSIGN(x) ^= 1;
-
- if (xc == IEEE754_CLASS_SNAN) {
- union ieee754sp y = ieee754sp_indef();
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- SPSIGN(y) = SPSIGN(x);
- return ieee754sp_nanxcpt(y);
- }
-
- return x;
+ unsigned int oldrm;
+ union ieee754sp y;
+
+ oldrm = ieee754_csr.rm;
+ ieee754_csr.rm = FPU_CSR_RD;
+ y = ieee754sp_sub(ieee754sp_zero(0), x);
+ ieee754_csr.rm = oldrm;
+ return y;
}
union ieee754sp ieee754sp_abs(union ieee754sp x)
{
- COMPXSP;
-
- EXPLODEXSP;
- ieee754_clearcx();
- FLUSHXSP;
-
- /* Clear sign ALWAYS, irrespective of NaN */
- SPSIGN(x) = 0;
-
- if (xc == IEEE754_CLASS_SNAN) {
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(ieee754sp_indef());
- }
-
- return x;
+ unsigned int oldrm;
+ union ieee754sp y;
+
+ oldrm = ieee754_csr.rm;
+ ieee754_csr.rm = FPU_CSR_RD;
+ if (SPSIGN(x))
+ y = ieee754sp_sub(ieee754sp_zero(0), x);
+ else
+ y = ieee754sp_add(ieee754sp_zero(0), x);
+ ieee754_csr.rm = oldrm;
+ return y;
}
diff --git a/arch/mips/math-emu/sp_sqrt.c b/arch/mips/math-emu/sp_sqrt.c
index b7c098a86f95..67059c33a250 100644
--- a/arch/mips/math-emu/sp_sqrt.c
+++ b/arch/mips/math-emu/sp_sqrt.c
@@ -35,13 +35,12 @@ union ieee754sp ieee754sp_sqrt(union ieee754sp x)
/* x == INF or NAN? */
switch (xc) {
- case IEEE754_CLASS_QNAN:
- /* sqrt(Nan) = Nan */
+ case IEEE754_CLASS_SNAN:
return ieee754sp_nanxcpt(x);
- case IEEE754_CLASS_SNAN:
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(ieee754sp_indef());
+ case IEEE754_CLASS_QNAN:
+ /* sqrt(Nan) = Nan */
+ return x;
case IEEE754_CLASS_ZERO:
/* sqrt(0) = 0 */
@@ -51,7 +50,7 @@ union ieee754sp ieee754sp_sqrt(union ieee754sp x)
if (xs) {
/* sqrt(-Inf) = Nan */
ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(ieee754sp_indef());
+ return ieee754sp_indef();
}
/* sqrt(+Inf) = Inf */
return x;
@@ -61,7 +60,7 @@ union ieee754sp ieee754sp_sqrt(union ieee754sp x)
if (xs) {
/* sqrt(-x) = Nan */
ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(ieee754sp_indef());
+ return ieee754sp_indef();
}
break;
}
diff --git a/arch/mips/math-emu/sp_sub.c b/arch/mips/math-emu/sp_sub.c
index 8592e49032b8..ec5f937a8b3e 100644
--- a/arch/mips/math-emu/sp_sub.c
+++ b/arch/mips/math-emu/sp_sub.c
@@ -37,19 +37,20 @@ union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y)
FLUSHYSP;
switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(ieee754sp_indef());
+ return ieee754sp_nanxcpt(x);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -148,8 +149,6 @@ union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y)
/* generate 28 bit result of adding two 27 bit numbers
*/
xm = xm + ym;
- xe = xe;
- xs = xs;
if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */
SPXSRSX1(); /* shift preserving sticky */
@@ -157,11 +156,8 @@ union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y)
} else {
if (xm >= ym) {
xm = xm - ym;
- xe = xe;
- xs = xs;
} else {
xm = ym - xm;
- xe = xe;
xs = ys;
}
if (xm == 0) {
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 3f8059602765..0dbb65a51ce5 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -430,6 +430,7 @@ static inline void local_r4k___flush_cache_all(void * args)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
/*
* These caches are inclusive caches, that is, if something
* is not cached in the S-cache, we know it also won't be
@@ -506,7 +507,7 @@ static inline void local_r4k_flush_cache_mm(void * args)
/*
* Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
- * only flush the primary caches but R10000 and R12000 behave sane ...
+ * only flush the primary caches but R1x000 behave sane ...
* R4000SC and R4400SC indexed S-cache ops also invalidate primary
* caches, so we can bail out early.
*/
@@ -888,33 +889,39 @@ static inline void rm7k_erratum31(void)
}
}
-static inline void alias_74k_erratum(struct cpuinfo_mips *c)
+static inline int alias_74k_erratum(struct cpuinfo_mips *c)
{
unsigned int imp = c->processor_id & PRID_IMP_MASK;
unsigned int rev = c->processor_id & PRID_REV_MASK;
+ int present = 0;
/*
* Early versions of the 74K do not update the cache tags on a
* vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
- * aliases. In this case it is better to treat the cache as always
- * having aliases.
+ * aliases. In this case it is better to treat the cache as always
+ * having aliases. Also disable the synonym tag update feature
+ * where available. In this case no opportunistic tag update will
+ * happen where a load causes a virtual address miss but a physical
+ * address hit during a D-cache look-up.
*/
switch (imp) {
case PRID_IMP_74K:
if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
- c->dcache.flags |= MIPS_CACHE_VTAG;
+ present = 1;
if (rev == PRID_REV_ENCODE_332(2, 4, 0))
write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
break;
case PRID_IMP_1074K:
if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
- c->dcache.flags |= MIPS_CACHE_VTAG;
+ present = 1;
write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
}
break;
default:
BUG();
}
+
+ return present;
}
static void b5k_instruction_hazard(void)
@@ -938,6 +945,7 @@ static void probe_pcache(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
unsigned int prid = read_c0_prid();
+ int has_74k_erratum = 0;
unsigned long config1;
unsigned int lsize;
@@ -1012,6 +1020,7 @@ static void probe_pcache(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
c->icache.linesz = 64;
c->icache.ways = 2;
@@ -1223,8 +1232,8 @@ static void probe_pcache(void)
dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
/*
- * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
- * 2-way virtually indexed so normally would suffer from aliases. So
+ * R1x000 P-caches are odd in a positive way. They're 32kB 2-way
+ * virtually indexed so normally would suffer from aliases. So
* normally they'd suffer from aliases but magic in the hardware deals
* with that for us so we don't need to take care ourselves.
*/
@@ -1240,11 +1249,12 @@ static void probe_pcache(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
break;
case CPU_74K:
case CPU_1074K:
- alias_74k_erratum(c);
+ has_74k_erratum = alias_74k_erratum(c);
/* Fall through. */
case CPU_M14KC:
case CPU_M14KEC:
@@ -1259,7 +1269,7 @@ static void probe_pcache(void)
if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
(c->icache.waysize > PAGE_SIZE))
c->icache.flags |= MIPS_CACHE_ALIASES;
- if (read_c0_config7() & MIPS_CONF7_AR) {
+ if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
/*
* Effectively physically indexed dcache,
* thus no virtual aliases.
@@ -1268,7 +1278,7 @@ static void probe_pcache(void)
break;
}
default:
- if (c->dcache.waysize > PAGE_SIZE)
+ if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
c->dcache.flags |= MIPS_CACHE_ALIASES;
}
@@ -1438,6 +1448,7 @@ static void setup_scache(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
c->scache.linesz = 64 << ((config >> 13) & 1);
c->scache.ways = 2;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 7e3ea7766822..77d96db8253c 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -119,36 +119,37 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
EXPORT_SYMBOL(__flush_anon_page);
-static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
+void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long addr;
+
+ if (PageHighMem(page))
+ return;
+
+ addr = (unsigned long) page_address(page);
+ flush_data_cache_page(addr);
+}
+EXPORT_SYMBOL_GPL(__flush_icache_page);
+
+void __update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte)
{
struct page *page;
- unsigned long pfn = pte_pfn(pteval);
+ unsigned long pfn, addr;
+ int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
+ pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
-
page = pfn_to_page(pfn);
if (page_mapping(page) && Page_dcache_dirty(page)) {
- unsigned long page_addr = (unsigned long) page_address(page);
-
- if (!cpu_has_ic_fills_f_dc ||
- pages_do_alias(page_addr, address & PAGE_MASK))
- flush_data_cache_page(page_addr);
+ addr = (unsigned long) page_address(page);
+ if (exec || pages_do_alias(addr, address & PAGE_MASK))
+ flush_data_cache_page(addr);
ClearPageDcacheDirty(page);
}
}
-void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
- if (pte_present(pteval))
- mips_flush_dcache_from_pte(pteval, addr);
- }
-
- set_pte(ptep, pteval);
-}
-
unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index af5f046e627e..609d1241b0c4 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -258,7 +258,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction);
-
+ plat_post_dma_flush(dev);
plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
@@ -312,6 +312,7 @@ static void mips_dma_sync_single_for_cpu(struct device *dev,
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction);
+ plat_post_dma_flush(dev);
}
static void mips_dma_sync_single_for_device(struct device *dev,
@@ -331,6 +332,7 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
for (i = 0; i < nelems; i++, sg++)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
+ plat_post_dma_flush(dev);
}
static void mips_dma_sync_sg_for_device(struct device *dev,
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 448cde372af0..faa5c9822ecc 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -96,7 +96,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, prot);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
- entrylo = pte.pte_high;
+ entrylo = pte_to_entrylo(pte.pte_high);
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
@@ -106,6 +106,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
+#ifdef CONFIG_XPA
+ entrylo = (pte.pte_low & _PFNX_MASK);
+ writex_c0_entrylo0(entrylo);
+ writex_c0_entrylo1(entrylo);
+#endif
tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index f1baadd56e82..5c81fdd032c3 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -142,18 +142,26 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
addr0, len, pgoff, flags, DOWN);
}
+unsigned long arch_mmap_rnd(void)
+{
+ unsigned long rnd;
+
+ rnd = (unsigned long)get_random_int();
+ rnd <<= PAGE_SHIFT;
+ if (TASK_IS_32BIT_ADDR)
+ rnd &= 0xfffffful;
+ else
+ rnd &= 0xffffffful;
+
+ return rnd;
+}
+
void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
- if (current->flags & PF_RANDOMIZE) {
- random_factor = get_random_int();
- random_factor = random_factor << PAGE_SHIFT;
- if (TASK_IS_32BIT_ADDR)
- random_factor &= 0xfffffful;
- else
- random_factor &= 0xffffffful;
- }
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 3f85f921801b..885d73ffd6fb 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -157,6 +157,7 @@ static void set_prefetch_parameters(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
/*
* Those values have been experimentally tuned for an
* Origin 200.
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index b2afa49beab0..08318ecb803a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -333,9 +333,17 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
ptep = pte_offset_map(pmdp, address);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#ifdef CONFIG_XPA
+ write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
+ writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+ ptep++;
+ write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
+ writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+#else
write_c0_entrylo0(ptep->pte_high);
ptep++;
write_c0_entrylo1(ptep->pte_high);
+#endif
#else
write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
@@ -355,6 +363,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
+#ifdef CONFIG_XPA
+ panic("Broken for XPA kernels");
+#else
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
@@ -383,6 +394,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
+#endif
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -477,12 +489,13 @@ static void r4k_tlb_configure(void)
write_c0_wired(0);
if (current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000 ||
- current_cpu_type() == CPU_R14000)
+ current_cpu_type() == CPU_R14000 ||
+ current_cpu_type() == CPU_R16000)
write_c0_framemask(0);
if (cpu_has_rixi) {
/*
- * Enable the no read, no exec bits, and enable large virtual
+ * Enable the no read, no exec bits, and enable large physical
* address.
*/
#ifdef CONFIG_64BIT
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index d75ff73a2012..97c87027c17f 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -35,6 +35,17 @@
#include <asm/uasm.h>
#include <asm/setup.h>
+static int __cpuinitdata mips_xpa_disabled;
+
+static int __init xpa_disable(char *s)
+{
+ mips_xpa_disabled = 1;
+
+ return 1;
+}
+
+__setup("noxpa", xpa_disable);
+
/*
* TLB load/store/modify handlers.
*
@@ -231,14 +242,14 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
#endif
+#ifdef CONFIG_CPU_MIPSR2
if (cpu_has_rixi) {
#ifdef _PAGE_NO_EXEC_SHIFT
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
-#endif
-#ifdef _PAGE_NO_READ_SHIFT
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
#endif
}
+#endif
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
@@ -501,26 +512,9 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case tlb_indexed: tlbw = uasm_i_tlbwi; break;
}
- if (cpu_has_mips_r2_exec_hazard) {
- /*
- * The architecture spec says an ehb is required here,
- * but a number of cores do not have the hazard and
- * using an ehb causes an expensive pipeline stall.
- */
- switch (current_cpu_type()) {
- case CPU_M14KC:
- case CPU_74K:
- case CPU_1074K:
- case CPU_PROAPTIV:
- case CPU_P5600:
- case CPU_M5150:
- case CPU_QEMU_GENERIC:
- break;
-
- default:
+ if (cpu_has_mips_r2_r6) {
+ if (cpu_has_mips_r2_exec_hazard)
uasm_i_ehb(p);
- break;
- }
tlbw(p);
return;
}
@@ -569,6 +563,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
case CPU_4KC:
case CPU_4KEC:
case CPU_M14KC:
@@ -1027,12 +1022,27 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
} else {
int pte_off_even = sizeof(pte_t) / 2;
int pte_off_odd = pte_off_even + sizeof(pte_t);
+#ifdef CONFIG_XPA
+ const int scratch = 1; /* Our extra working register */
- /* The pte entries are pre-shifted */
- uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
- UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
+ uasm_i_addu(p, scratch, 0, ptep);
+#endif
+ uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
+ uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
+ UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
+#ifdef CONFIG_XPA
+ uasm_i_lw(p, tmp, 0, scratch);
+ uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
+ uasm_i_lui(p, scratch, 0xff);
+ uasm_i_ori(p, scratch, scratch, 0xffff);
+ uasm_i_and(p, tmp, scratch, tmp);
+ uasm_i_and(p, ptep, scratch, ptep);
+ uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+ uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
+#endif
}
#else
UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
@@ -1533,8 +1543,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
{
#ifdef CONFIG_PHYS_ADDR_T_64BIT
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
-#endif
+ if (!cpu_has_64bits) {
+ const int scratch = 1; /* Our extra working register */
+
+ uasm_i_lui(p, scratch, (mode >> 16));
+ uasm_i_or(p, pte, pte, scratch);
+ } else
+#endif
uasm_i_ori(p, pte, pte, mode);
#ifdef CONFIG_SMP
# ifdef CONFIG_PHYS_ADDR_T_64BIT
@@ -1598,15 +1614,17 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
uasm_i_nop(p);
} else {
- uasm_i_andi(p, t, pte, _PAGE_PRESENT);
+ uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+ uasm_i_andi(p, t, t, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
} else {
- uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
- uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
+ uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+ uasm_i_andi(p, t, t, 3);
+ uasm_i_xori(p, t, t, 3);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -1635,8 +1653,9 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
{
int t = scratch >= 0 ? scratch : pte;
- uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
- uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
+ uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+ uasm_i_andi(p, t, t, 5);
+ uasm_i_xori(p, t, t, 5);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -1672,7 +1691,8 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
uasm_i_nop(p);
} else {
int t = scratch >= 0 ? scratch : pte;
- uasm_i_andi(p, t, pte, _PAGE_WRITE);
+ uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
+ uasm_i_andi(p, t, t, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -2285,6 +2305,11 @@ static void config_htw_params(void)
pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
+
+ /* If XPA has been enabled, PTEs are 64-bit in size. */
+ if (read_c0_pagegrain() & PG_ELPA)
+ pwsize |= 1;
+
write_c0_pwsize(pwsize);
/* Make sure everything is set before we enable the HTW */
@@ -2298,6 +2323,28 @@ static void config_htw_params(void)
print_htw_config();
}
+static void config_xpa_params(void)
+{
+#ifdef CONFIG_XPA
+ unsigned int pagegrain;
+
+ if (mips_xpa_disabled) {
+ pr_info("Extended Physical Addressing (XPA) disabled\n");
+ return;
+ }
+
+ pagegrain = read_c0_pagegrain();
+ write_c0_pagegrain(pagegrain | PG_ELPA);
+ back_to_back_c0_hazard();
+ pagegrain = read_c0_pagegrain();
+
+ if (pagegrain & PG_ELPA)
+ pr_info("Extended Physical Addressing (XPA) enabled\n");
+ else
+ panic("Extended Physical Addressing (XPA) disabled");
+#endif
+}
+
void build_tlb_refill_handler(void)
{
/*
@@ -2362,8 +2409,9 @@ void build_tlb_refill_handler(void)
}
if (cpu_has_local_ebase)
build_r4000_tlb_refill_handler();
+ if (cpu_has_xpa)
+ config_xpa_params();
if (cpu_has_htw)
config_htw_params();
-
}
}
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 6849f533154f..cec3e187c48f 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
-#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
#include <asm/cacheflush.h>
#include <asm/smp-ops.h>
@@ -75,7 +75,7 @@ static void __init console_config(void)
if ((strstr(fw_getcmdline(), "earlycon=")) == NULL) {
sprintf(console_string, "uart8250,io,0x3f8,%d%c%c", baud,
parity, bits);
- setup_early_serial8250_console(console_string);
+ setup_earlycon(console_string);
}
if ((strstr(fw_getcmdline(), "console=")) == NULL) {
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 8fddd2cdbff7..b769657be4d4 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <asm/bootinfo.h>
+#include <asm/cdmm.h>
#include <asm/maar.h>
#include <asm/sections.h>
#include <asm/fw/fw.h>
@@ -53,6 +54,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
physical_memsize = 0x02000000;
} else {
+ if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
+ pr_warn("Unsupported memsize value (0x%lx) detected! "
+ "Using 0x10000000 (256M) instead\n",
+ memsize);
+ memsize = 256 << 20;
+ }
/* If ememsize is set, then set physical_memsize to that */
physical_memsize = ememsize ? : memsize;
}
@@ -196,3 +203,9 @@ unsigned platform_maar_init(unsigned num_pairs)
return maar_config(cfg, num_cfg, num_pairs);
}
+
+phys_addr_t mips_cdmm_phys_base(void)
+{
+ /* This address is "typically unused" */
+ return 0x1fc10000;
+}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index ce02dbdedc62..185e68261f45 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -87,8 +87,10 @@ static void __init estimate_frequencies(void)
/* Initialize counters. */
start = read_c0_count();
- if (gic_present)
+ if (gic_present) {
+ gic_start_count();
gicstart = gic_read_count();
+ }
/* Read counter exactly on falling edge of update flag. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
@@ -115,6 +117,22 @@ void read_persistent_clock(struct timespec *ts)
ts->tv_nsec = 0;
}
+int get_c0_fdc_int(void)
+{
+ int mips_cpu_fdc_irq;
+
+ if (cpu_has_veic)
+ mips_cpu_fdc_irq = -1;
+ else if (gic_present)
+ mips_cpu_fdc_irq = gic_get_c0_fdc_int();
+ else if (cp0_fdc_irq >= 0)
+ mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+ else
+ mips_cpu_fdc_irq = -1;
+
+ return mips_cpu_fdc_irq;
+}
+
int get_c0_perfcount_int(void)
{
if (cpu_has_veic) {
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index 2ae49e99eb67..ecd71db6258b 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -9,14 +9,11 @@
# Steven J. Hill <sjhill@mips.com>
#
obj-y := sead3-lcd.o sead3-display.o sead3-init.o \
- sead3-int.o sead3-mtd.o sead3-net.o \
- sead3-platform.o sead3-reset.o \
+ sead3-int.o sead3-platform.o sead3-reset.o \
sead3-setup.o sead3-time.o
-obj-y += sead3-i2c-dev.o sead3-i2c.o \
- leds-sead3.o sead3-leds.o
+obj-y += leds-sead3.o
obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o
-obj-$(CONFIG_USB_EHCI_HCD) += sead3-ehci.o
CFLAGS_sead3-setup.o = -I$(src)/../../../scripts/dtc/libfdt
diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c
index 3abe47b316aa..c938ceeb8848 100644
--- a/arch/mips/mti-sead3/leds-sead3.c
+++ b/arch/mips/mti-sead3/leds-sead3.c
@@ -4,6 +4,7 @@
* for more details.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2015 Imagination Technologies, Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -13,22 +14,18 @@
#include <linux/err.h>
#include <linux/io.h>
-#define DRVNAME "sead3-led"
-
-static struct platform_device *pdev;
+#include <asm/mips-boards/sead3-addr.h>
static void sead3_pled_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
- pr_debug("sead3_pled_set\n");
- writel(value, (void __iomem *)0xBF000210); /* FIXME */
+ writel(value, (void __iomem *)SEAD3_CPLD_P_LED);
}
static void sead3_fled_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
- pr_debug("sead3_fled_set\n");
- writel(value, (void __iomem *)0xBF000218); /* FIXME */
+ writel(value, (void __iomem *)SEAD3_CPLD_F_LED);
}
static struct led_classdev sead3_pled = {
@@ -69,37 +66,11 @@ static struct platform_driver sead3_led_driver = {
.probe = sead3_led_probe,
.remove = sead3_led_remove,
.driver = {
- .name = DRVNAME,
+ .name = "sead3-led",
},
};
-static int __init sead3_led_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&sead3_led_driver);
- if (ret < 0)
- goto out;
-
- pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
- platform_driver_unregister(&sead3_led_driver);
- goto out;
- }
-
-out:
- return ret;
-}
-
-static void __exit sead3_led_exit(void)
-{
- platform_device_unregister(pdev);
- platform_driver_unregister(&sead3_led_driver);
-}
-
-module_init(sead3_led_init);
-module_exit(sead3_led_exit);
+module_platform_driver(sead3_led_driver);
MODULE_AUTHOR("Kristian Kielhofner <kris@krisk.org>");
MODULE_DESCRIPTION("SEAD3 LED driver");
diff --git a/arch/mips/mti-sead3/sead3-ehci.c b/arch/mips/mti-sead3/sead3-ehci.c
deleted file mode 100644
index 014dd7ba4d68..000000000000
--- a/arch/mips/mti-sead3/sead3-ehci.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/irqchip/mips-gic.h>
-
-#include <asm/mips-boards/sead3int.h>
-
-struct resource ehci_resources[] = {
- {
- .start = 0x1b200000,
- .end = 0x1b200fff,
- .flags = IORESOURCE_MEM
- },
- {
- .flags = IORESOURCE_IRQ
- }
-};
-
-u64 sead3_usbdev_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ehci_device = {
- .name = "sead3-ehci",
- .id = 0,
- .dev = {
- .dma_mask = &sead3_usbdev_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32)
- },
- .num_resources = ARRAY_SIZE(ehci_resources),
- .resource = ehci_resources
-};
-
-static int __init ehci_init(void)
-{
- if (gic_present)
- ehci_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_EHCI;
- else
- ehci_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_EHCI;
- return platform_device_register(&ehci_device);
-}
-
-module_init(ehci_init);
-
-MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("EHCI probe driver for SEAD3");
diff --git a/arch/mips/mti-sead3/sead3-i2c-dev.c b/arch/mips/mti-sead3/sead3-i2c-dev.c
deleted file mode 100644
index eca0b53a71dd..000000000000
--- a/arch/mips/mti-sead3/sead3-i2c-dev.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/i2c.h>
-
-static struct i2c_board_info __initdata sead3_i2c_devices[] = {
- {
- I2C_BOARD_INFO("adt7476", 0x2c),
- .irq = 0,
- },
- {
- I2C_BOARD_INFO("m41t80", 0x68),
- .irq = 0,
- },
-};
-
-static int __init sead3_i2c_init(void)
-{
- int err;
-
- err = i2c_register_board_info(0, sead3_i2c_devices,
- ARRAY_SIZE(sead3_i2c_devices));
- if (err < 0)
- pr_err("sead3-i2c-dev: cannot register board I2C devices\n");
- return err;
-}
-
-arch_initcall(sead3_i2c_init);
diff --git a/arch/mips/mti-sead3/sead3-i2c-drv.c b/arch/mips/mti-sead3/sead3-i2c-drv.c
deleted file mode 100644
index 2bebf0974e39..000000000000
--- a/arch/mips/mti-sead3/sead3-i2c-drv.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/platform_device.h>
-
-#define PIC32_I2CxCON 0x0000
-#define PIC32_I2CCON_ON (1<<15)
-#define PIC32_I2CCON_ACKDT (1<<5)
-#define PIC32_I2CCON_ACKEN (1<<4)
-#define PIC32_I2CCON_RCEN (1<<3)
-#define PIC32_I2CCON_PEN (1<<2)
-#define PIC32_I2CCON_RSEN (1<<1)
-#define PIC32_I2CCON_SEN (1<<0)
-#define PIC32_I2CxCONCLR 0x0004
-#define PIC32_I2CxCONSET 0x0008
-#define PIC32_I2CxSTAT 0x0010
-#define PIC32_I2CxSTATCLR 0x0014
-#define PIC32_I2CSTAT_ACKSTAT (1<<15)
-#define PIC32_I2CSTAT_TRSTAT (1<<14)
-#define PIC32_I2CSTAT_BCL (1<<10)
-#define PIC32_I2CSTAT_IWCOL (1<<7)
-#define PIC32_I2CSTAT_I2COV (1<<6)
-#define PIC32_I2CxBRG 0x0040
-#define PIC32_I2CxTRN 0x0050
-#define PIC32_I2CxRCV 0x0060
-
-static DEFINE_SPINLOCK(pic32_bus_lock);
-
-static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
-static void __iomem *bus_status = (void __iomem *)0xbf000060;
-
-#define DELAY() udelay(100)
-
-static inline unsigned int ioready(void)
-{
- return readl(bus_status) & 1;
-}
-
-static inline void wait_ioready(void)
-{
- do { } while (!ioready());
-}
-
-static inline void wait_ioclear(void)
-{
- do { } while (ioready());
-}
-
-static inline void check_ioclear(void)
-{
- if (ioready()) {
- do {
- (void) readl(bus_xfer);
- DELAY();
- } while (ioready());
- }
-}
-
-static u32 pic32_bus_readl(u32 reg)
-{
- unsigned long flags;
- u32 status, val;
-
- spin_lock_irqsave(&pic32_bus_lock, flags);
-
- check_ioclear();
- writel((0x01 << 24) | (reg & 0x00ffffff), bus_xfer);
- DELAY();
- wait_ioready();
- status = readl(bus_xfer);
- DELAY();
- val = readl(bus_xfer);
- wait_ioclear();
-
- spin_unlock_irqrestore(&pic32_bus_lock, flags);
-
- return val;
-}
-
-static void pic32_bus_writel(u32 val, u32 reg)
-{
- unsigned long flags;
- u32 status;
-
- spin_lock_irqsave(&pic32_bus_lock, flags);
-
- check_ioclear();
- writel((0x10 << 24) | (reg & 0x00ffffff), bus_xfer);
- DELAY();
- writel(val, bus_xfer);
- DELAY();
- wait_ioready();
- status = readl(bus_xfer);
- wait_ioclear();
-
- spin_unlock_irqrestore(&pic32_bus_lock, flags);
-}
-
-struct pic32_i2c_platform_data {
- u32 base;
- struct i2c_adapter adap;
- u32 xfer_timeout;
- u32 ack_timeout;
- u32 ctl_timeout;
-};
-
-static inline void pic32_i2c_start(struct pic32_i2c_platform_data *adap)
-{
- pic32_bus_writel(PIC32_I2CCON_SEN, adap->base + PIC32_I2CxCONSET);
-}
-
-static inline void pic32_i2c_stop(struct pic32_i2c_platform_data *adap)
-{
- pic32_bus_writel(PIC32_I2CCON_PEN, adap->base + PIC32_I2CxCONSET);
-}
-
-static inline void pic32_i2c_ack(struct pic32_i2c_platform_data *adap)
-{
- pic32_bus_writel(PIC32_I2CCON_ACKDT, adap->base + PIC32_I2CxCONCLR);
- pic32_bus_writel(PIC32_I2CCON_ACKEN, adap->base + PIC32_I2CxCONSET);
-}
-
-static inline void pic32_i2c_nack(struct pic32_i2c_platform_data *adap)
-{
- pic32_bus_writel(PIC32_I2CCON_ACKDT, adap->base + PIC32_I2CxCONSET);
- pic32_bus_writel(PIC32_I2CCON_ACKEN, adap->base + PIC32_I2CxCONSET);
-}
-
-static inline int pic32_i2c_idle(struct pic32_i2c_platform_data *adap)
-{
- int i;
-
- for (i = 0; i < adap->ctl_timeout; i++) {
- if (((pic32_bus_readl(adap->base + PIC32_I2CxCON) &
- (PIC32_I2CCON_ACKEN | PIC32_I2CCON_RCEN |
- PIC32_I2CCON_PEN | PIC32_I2CCON_RSEN |
- PIC32_I2CCON_SEN)) == 0) &&
- ((pic32_bus_readl(adap->base + PIC32_I2CxSTAT) &
- (PIC32_I2CSTAT_TRSTAT)) == 0))
- return 0;
- udelay(1);
- }
- return -ETIMEDOUT;
-}
-
-static inline u32 pic32_i2c_master_write(struct pic32_i2c_platform_data *adap,
- u32 byte)
-{
- pic32_bus_writel(byte, adap->base + PIC32_I2CxTRN);
- return pic32_bus_readl(adap->base + PIC32_I2CxSTAT) &
- PIC32_I2CSTAT_IWCOL;
-}
-
-static inline u32 pic32_i2c_master_read(struct pic32_i2c_platform_data *adap)
-{
- pic32_bus_writel(PIC32_I2CCON_RCEN, adap->base + PIC32_I2CxCONSET);
- while (pic32_bus_readl(adap->base + PIC32_I2CxCON) & PIC32_I2CCON_RCEN)
- ;
- pic32_bus_writel(PIC32_I2CSTAT_I2COV, adap->base + PIC32_I2CxSTATCLR);
- return pic32_bus_readl(adap->base + PIC32_I2CxRCV);
-}
-
-static int pic32_i2c_address(struct pic32_i2c_platform_data *adap,
- unsigned int addr, int rd)
-{
- pic32_i2c_idle(adap);
- pic32_i2c_start(adap);
- pic32_i2c_idle(adap);
-
- addr <<= 1;
- if (rd)
- addr |= 1;
-
- if (pic32_i2c_master_write(adap, addr))
- return -EIO;
- pic32_i2c_idle(adap);
- if (pic32_bus_readl(adap->base + PIC32_I2CxSTAT) &
- PIC32_I2CSTAT_ACKSTAT)
- return -EIO;
- return 0;
-}
-
-static int sead3_i2c_read(struct pic32_i2c_platform_data *adap,
- unsigned char *buf, unsigned int len)
-{
- u32 data;
- int i;
-
- i = 0;
- while (i < len) {
- data = pic32_i2c_master_read(adap);
- buf[i++] = data;
- if (i < len)
- pic32_i2c_ack(adap);
- else
- pic32_i2c_nack(adap);
- }
-
- pic32_i2c_stop(adap);
- pic32_i2c_idle(adap);
- return 0;
-}
-
-static int sead3_i2c_write(struct pic32_i2c_platform_data *adap,
- unsigned char *buf, unsigned int len)
-{
- int i;
- u32 data;
-
- i = 0;
- while (i < len) {
- data = buf[i];
- if (pic32_i2c_master_write(adap, data))
- return -EIO;
- pic32_i2c_idle(adap);
- if (pic32_bus_readl(adap->base + PIC32_I2CxSTAT) &
- PIC32_I2CSTAT_ACKSTAT)
- return -EIO;
- i++;
- }
-
- pic32_i2c_stop(adap);
- pic32_i2c_idle(adap);
- return 0;
-}
-
-static int sead3_pic32_platform_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs, int num)
-{
- struct pic32_i2c_platform_data *adap = i2c_adap->algo_data;
- struct i2c_msg *p;
- int i, err = 0;
-
- for (i = 0; i < num; i++) {
-#define __BUFSIZE 80
- int ii;
- static char buf[__BUFSIZE];
- char *b = buf;
-
- p = &msgs[i];
- b += sprintf(buf, " [%d bytes]", p->len);
- if ((p->flags & I2C_M_RD) == 0) {
- for (ii = 0; ii < p->len; ii++) {
- if (b < &buf[__BUFSIZE-4]) {
- b += sprintf(b, " %02x", p->buf[ii]);
- } else {
- strcat(b, "...");
- break;
- }
- }
- }
- }
-
- for (i = 0; !err && i < num; i++) {
- p = &msgs[i];
- err = pic32_i2c_address(adap, p->addr, p->flags & I2C_M_RD);
- if (err || !p->len)
- continue;
- if (p->flags & I2C_M_RD)
- err = sead3_i2c_read(adap, p->buf, p->len);
- else
- err = sead3_i2c_write(adap, p->buf, p->len);
- }
-
- /* Return the number of messages processed, or the error code. */
- if (err == 0)
- err = num;
-
- return err;
-}
-
-static u32 sead3_pic32_platform_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm sead3_platform_algo = {
- .master_xfer = sead3_pic32_platform_xfer,
- .functionality = sead3_pic32_platform_func,
-};
-
-static void sead3_i2c_platform_setup(struct pic32_i2c_platform_data *priv)
-{
- pic32_bus_writel(500, priv->base + PIC32_I2CxBRG);
- pic32_bus_writel(PIC32_I2CCON_ON, priv->base + PIC32_I2CxCONCLR);
- pic32_bus_writel(PIC32_I2CCON_ON, priv->base + PIC32_I2CxCONSET);
- pic32_bus_writel(PIC32_I2CSTAT_BCL | PIC32_I2CSTAT_IWCOL,
- priv->base + PIC32_I2CxSTATCLR);
-}
-
-static int sead3_i2c_platform_probe(struct platform_device *pdev)
-{
- struct pic32_i2c_platform_data *priv;
- struct resource *r;
- int ret;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- ret = -ENODEV;
- goto out;
- }
-
- priv = kzalloc(sizeof(struct pic32_i2c_platform_data), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto out;
- }
-
- priv->base = r->start;
- if (!priv->base) {
- ret = -EBUSY;
- goto out_mem;
- }
-
- priv->xfer_timeout = 200;
- priv->ack_timeout = 200;
- priv->ctl_timeout = 200;
-
- priv->adap.nr = pdev->id;
- priv->adap.algo = &sead3_platform_algo;
- priv->adap.algo_data = priv;
- priv->adap.dev.parent = &pdev->dev;
- strlcpy(priv->adap.name, "SEAD3 PIC32", sizeof(priv->adap.name));
-
- sead3_i2c_platform_setup(priv);
-
- ret = i2c_add_numbered_adapter(&priv->adap);
- if (ret == 0) {
- platform_set_drvdata(pdev, priv);
- return 0;
- }
-
-out_mem:
- kfree(priv);
-out:
- return ret;
-}
-
-static int sead3_i2c_platform_remove(struct platform_device *pdev)
-{
- struct pic32_i2c_platform_data *priv = platform_get_drvdata(pdev);
-
- platform_set_drvdata(pdev, NULL);
- i2c_del_adapter(&priv->adap);
- kfree(priv);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int sead3_i2c_platform_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- dev_dbg(&pdev->dev, "i2c_platform_disable\n");
- return 0;
-}
-
-static int sead3_i2c_platform_resume(struct platform_device *pdev)
-{
- struct pic32_i2c_platform_data *priv = platform_get_drvdata(pdev);
-
- dev_dbg(&pdev->dev, "sead3_i2c_platform_setup\n");
- sead3_i2c_platform_setup(priv);
-
- return 0;
-}
-#else
-#define sead3_i2c_platform_suspend NULL
-#define sead3_i2c_platform_resume NULL
-#endif
-
-static struct platform_driver sead3_i2c_platform_driver = {
- .driver = {
- .name = "sead3-i2c",
- },
- .probe = sead3_i2c_platform_probe,
- .remove = sead3_i2c_platform_remove,
- .suspend = sead3_i2c_platform_suspend,
- .resume = sead3_i2c_platform_resume,
-};
-
-static int __init sead3_i2c_platform_init(void)
-{
- return platform_driver_register(&sead3_i2c_platform_driver);
-}
-module_init(sead3_i2c_platform_init);
-
-static void __exit sead3_i2c_platform_exit(void)
-{
- platform_driver_unregister(&sead3_i2c_platform_driver);
-}
-module_exit(sead3_i2c_platform_exit);
-
-MODULE_AUTHOR("Chris Dearman, MIPS Technologies INC.");
-MODULE_DESCRIPTION("SEAD3 PIC32 I2C driver");
-MODULE_LICENSE("GPL");
diff --git a/arch/mips/mti-sead3/sead3-i2c.c b/arch/mips/mti-sead3/sead3-i2c.c
deleted file mode 100644
index 795ae83894e0..000000000000
--- a/arch/mips/mti-sead3/sead3-i2c.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/platform_device.h>
-
-struct resource sead3_i2c_resources[] = {
- {
- .start = 0x805200,
- .end = 0x8053ff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device sead3_i2c_device = {
- .name = "sead3-i2c",
- .id = 0,
- .num_resources = ARRAY_SIZE(sead3_i2c_resources),
- .resource = sead3_i2c_resources,
-};
-
-static int __init sead3_i2c_init(void)
-{
- return platform_device_register(&sead3_i2c_device);
-}
-
-device_initcall(sead3_i2c_init);
diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c
index bfbd17b120a2..3572ea30173e 100644
--- a/arch/mips/mti-sead3/sead3-init.c
+++ b/arch/mips/mti-sead3/sead3-init.c
@@ -147,6 +147,6 @@ void __init prom_init(void)
#endif
}
-void prom_free_prom_memory(void)
+void __init prom_free_prom_memory(void)
{
}
diff --git a/arch/mips/mti-sead3/sead3-leds.c b/arch/mips/mti-sead3/sead3-leds.c
deleted file mode 100644
index c427c5778186..000000000000
--- a/arch/mips/mti-sead3/sead3-leds.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/leds.h>
-#include <linux/platform_device.h>
-
-#define LEDFLAGS(bits, shift) \
- ((bits << 8) | (shift << 8))
-
-#define LEDBITS(id, shift, bits) \
- .name = id #shift, \
- .flags = LEDFLAGS(bits, shift)
-
-struct led_info led_data_info[] = {
- { LEDBITS("bit", 0, 1) },
- { LEDBITS("bit", 1, 1) },
- { LEDBITS("bit", 2, 1) },
- { LEDBITS("bit", 3, 1) },
- { LEDBITS("bit", 4, 1) },
- { LEDBITS("bit", 5, 1) },
- { LEDBITS("bit", 6, 1) },
- { LEDBITS("bit", 7, 1) },
- { LEDBITS("all", 0, 8) },
-};
-
-static struct led_platform_data led_data = {
- .num_leds = ARRAY_SIZE(led_data_info),
- .leds = led_data_info
-};
-
-static struct resource pled_resources[] = {
- {
- .start = 0x1f000210,
- .end = 0x1f000217,
- .flags = IORESOURCE_MEM
- }
-};
-
-static struct platform_device pled_device = {
- .name = "sead3::pled",
- .id = 0,
- .dev = {
- .platform_data = &led_data,
- },
- .num_resources = ARRAY_SIZE(pled_resources),
- .resource = pled_resources
-};
-
-
-static struct resource fled_resources[] = {
- {
- .start = 0x1f000218,
- .end = 0x1f00021f,
- .flags = IORESOURCE_MEM
- }
-};
-
-static struct platform_device fled_device = {
- .name = "sead3::fled",
- .id = 0,
- .dev = {
- .platform_data = &led_data,
- },
- .num_resources = ARRAY_SIZE(fled_resources),
- .resource = fled_resources
-};
-
-static int __init led_init(void)
-{
- platform_device_register(&pled_device);
- return platform_device_register(&fled_device);
-}
-
-device_initcall(led_init);
diff --git a/arch/mips/mti-sead3/sead3-mtd.c b/arch/mips/mti-sead3/sead3-mtd.c
deleted file mode 100644
index f9c890d72677..000000000000
--- a/arch/mips/mti-sead3/sead3-mtd.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-
-static struct mtd_partition sead3_mtd_partitions[] = {
- {
- .name = "User FS",
- .offset = 0x00000000,
- .size = 0x01fc0000,
- }, {
- .name = "Board Config",
- .offset = 0x01fc0000,
- .size = 0x00040000,
- .mask_flags = MTD_WRITEABLE
- },
-};
-
-static struct physmap_flash_data sead3_flash_data = {
- .width = 4,
- .nr_parts = ARRAY_SIZE(sead3_mtd_partitions),
- .parts = sead3_mtd_partitions
-};
-
-static struct resource sead3_flash_resource = {
- .start = 0x1c000000,
- .end = 0x1dffffff,
- .flags = IORESOURCE_MEM
-};
-
-static struct platform_device sead3_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &sead3_flash_data,
- },
- .num_resources = 1,
- .resource = &sead3_flash_resource,
-};
-
-static int __init sead3_mtd_init(void)
-{
- platform_device_register(&sead3_flash);
-
- return 0;
-}
-device_initcall(sead3_mtd_init);
diff --git a/arch/mips/mti-sead3/sead3-net.c b/arch/mips/mti-sead3/sead3-net.c
deleted file mode 100644
index 46176b804576..000000000000
--- a/arch/mips/mti-sead3/sead3-net.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/irqchip/mips-gic.h>
-#include <linux/platform_device.h>
-#include <linux/smsc911x.h>
-
-#include <asm/mips-boards/sead3int.h>
-
-static struct smsc911x_platform_config sead3_smsc911x_data = {
- .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
- .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
- .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
- .phy_interface = PHY_INTERFACE_MODE_MII,
-};
-
-struct resource sead3_net_resources[] = {
- {
- .start = 0x1f010000,
- .end = 0x1f01ffff,
- .flags = IORESOURCE_MEM
- },
- {
- .flags = IORESOURCE_IRQ
- }
-};
-
-static struct platform_device sead3_net_device = {
- .name = "smsc911x",
- .id = 0,
- .dev = {
- .platform_data = &sead3_smsc911x_data,
- },
- .num_resources = ARRAY_SIZE(sead3_net_resources),
- .resource = sead3_net_resources
-};
-
-static int __init sead3_net_init(void)
-{
- if (gic_present)
- sead3_net_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_NET;
- else
- sead3_net_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_NET;
- return platform_device_register(&sead3_net_device);
-}
-
-module_init(sead3_net_init);
-
-MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Network probe driver for SEAD-3");
diff --git a/arch/mips/mti-sead3/sead3-platform.c b/arch/mips/mti-sead3/sead3-platform.c
index 53ee6f1f018d..73b73efbfb05 100644
--- a/arch/mips/mti-sead3/sead3-platform.c
+++ b/arch/mips/mti-sead3/sead3-platform.c
@@ -5,10 +5,15 @@
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
-#include <linux/module.h>
+#include <linux/dma-mapping.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/irqchip/mips-gic.h>
+#include <linux/leds.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
#include <linux/serial_8250.h>
+#include <linux/smsc911x.h>
#include <asm/mips-boards/sead3int.h>
@@ -36,20 +41,183 @@ static struct platform_device uart8250_device = {
},
};
-static int __init uart8250_init(void)
+static struct smsc911x_platform_config sead3_smsc911x_data = {
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
+ .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
+static struct resource sead3_net_resources[] = {
+ {
+ .start = 0x1f010000,
+ .end = 0x1f01ffff,
+ .flags = IORESOURCE_MEM
+ }, {
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct platform_device sead3_net_device = {
+ .name = "smsc911x",
+ .id = 0,
+ .dev = {
+ .platform_data = &sead3_smsc911x_data,
+ },
+ .num_resources = ARRAY_SIZE(sead3_net_resources),
+ .resource = sead3_net_resources
+};
+
+static struct mtd_partition sead3_mtd_partitions[] = {
+ {
+ .name = "User FS",
+ .offset = 0x00000000,
+ .size = 0x01fc0000,
+ }, {
+ .name = "Board Config",
+ .offset = 0x01fc0000,
+ .size = 0x00040000,
+ .mask_flags = MTD_WRITEABLE
+ },
+};
+
+static struct physmap_flash_data sead3_flash_data = {
+ .width = 4,
+ .nr_parts = ARRAY_SIZE(sead3_mtd_partitions),
+ .parts = sead3_mtd_partitions
+};
+
+static struct resource sead3_flash_resource = {
+ .start = 0x1c000000,
+ .end = 0x1dffffff,
+ .flags = IORESOURCE_MEM
+};
+
+static struct platform_device sead3_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &sead3_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &sead3_flash_resource,
+};
+
+#define LEDFLAGS(bits, shift) \
+ ((bits << 8) | (shift << 8))
+
+#define LEDBITS(id, shift, bits) \
+ .name = id #shift, \
+ .flags = LEDFLAGS(bits, shift)
+
+static struct led_info led_data_info[] = {
+ { LEDBITS("bit", 0, 1) },
+ { LEDBITS("bit", 1, 1) },
+ { LEDBITS("bit", 2, 1) },
+ { LEDBITS("bit", 3, 1) },
+ { LEDBITS("bit", 4, 1) },
+ { LEDBITS("bit", 5, 1) },
+ { LEDBITS("bit", 6, 1) },
+ { LEDBITS("bit", 7, 1) },
+ { LEDBITS("all", 0, 8) },
+};
+
+static struct led_platform_data led_data = {
+ .num_leds = ARRAY_SIZE(led_data_info),
+ .leds = led_data_info
+};
+
+static struct resource pled_resources[] = {
+ {
+ .start = 0x1f000210,
+ .end = 0x1f000217,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct platform_device pled_device = {
+ .name = "sead3::pled",
+ .id = 0,
+ .dev = {
+ .platform_data = &led_data,
+ },
+ .num_resources = ARRAY_SIZE(pled_resources),
+ .resource = pled_resources
+};
+
+
+static struct resource fled_resources[] = {
+ {
+ .start = 0x1f000218,
+ .end = 0x1f00021f,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct platform_device fled_device = {
+ .name = "sead3::fled",
+ .id = 0,
+ .dev = {
+ .platform_data = &led_data,
+ },
+ .num_resources = ARRAY_SIZE(fled_resources),
+ .resource = fled_resources
+};
+
+static struct platform_device sead3_led_device = {
+ .name = "sead3-led",
+ .id = -1,
+};
+
+static struct resource ehci_resources[] = {
+ {
+ .start = 0x1b200000,
+ .end = 0x1b200fff,
+ .flags = IORESOURCE_MEM
+ }, {
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static u64 sead3_usbdev_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device ehci_device = {
+ .name = "sead3-ehci",
+ .id = 0,
+ .dev = {
+ .dma_mask = &sead3_usbdev_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32)
+ },
+ .num_resources = ARRAY_SIZE(ehci_resources),
+ .resource = ehci_resources
+};
+
+static struct platform_device *sead3_platform_devices[] __initdata = {
+ &uart8250_device,
+ &sead3_flash,
+ &pled_device,
+ &fled_device,
+ &sead3_led_device,
+ &ehci_device,
+ &sead3_net_device,
+};
+
+static int __init sead3_platforms_device_init(void)
{
if (gic_present) {
uart8250_data[0].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART0;
uart8250_data[1].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART1;
+ ehci_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_EHCI;
+ sead3_net_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_NET;
} else {
uart8250_data[0].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART0;
uart8250_data[1].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART1;
+ ehci_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_EHCI;
+ sead3_net_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_NET;
}
- return platform_device_register(&uart8250_device);
-}
-module_init(uart8250_init);
+ return platform_add_devices(sead3_platform_devices,
+ ARRAY_SIZE(sead3_platform_devices));
+}
-MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("8250 UART probe driver for SEAD3");
+device_initcall(sead3_platforms_device_init);
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 0823321c10e0..fb00606e352d 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -41,6 +41,15 @@ config DT_XLP_GVP
pointer to the kernel. The corresponding DTS file is at
arch/mips/netlogic/dts/xlp_gvp.dts
+config DT_XLP_RVP
+ bool "Built-in device tree for XLP RVP boards"
+ default y
+ help
+ Add an FDT blob for XLP RVP board into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_rvp.dts
+
config NLM_MULTINODE
bool "Support for multi-chip boards"
depends on NLM_XLP_BOARD
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index c100b9afa0ab..5f5d18b0e94d 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -230,16 +230,16 @@ static void nlm_init_node_irqs(int node)
}
}
-void nlm_smp_irq_init(int hwcpuid)
+void nlm_smp_irq_init(int hwtid)
{
- int node, cpu;
+ int cpu, node;
- node = nlm_cpuid_to_node(hwcpuid);
- cpu = hwcpuid % nlm_threads_per_node();
+ cpu = hwtid % nlm_threads_per_node();
+ node = hwtid / nlm_threads_per_node();
if (cpu == 0 && node != 0)
nlm_init_node_irqs(node);
- write_c0_eimr(nlm_current_node()->irqmask);
+ write_c0_eimr(nlm_get_node(node)->irqmask);
}
asmlinkage void plat_irq_dispatch(void)
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
index 701c4bcb9e47..edbab9b8691f 100644
--- a/arch/mips/netlogic/common/reset.S
+++ b/arch/mips/netlogic/common/reset.S
@@ -60,7 +60,7 @@
li t0, LSU_DEFEATURE
mfcr t1, t0
- lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
+ lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
or t1, t1, t2
mtcr t1, t0
@@ -235,6 +235,26 @@ EXPORT(nlm_boot_siblings)
mfc0 v0, CP0_EBASE, 1
andi v0, 0x3ff /* v0 <- node/core */
+ /*
+ * Errata: to avoid potential live lock, setup IFU_BRUB_RESERVE
+ * when running 4 threads per core
+ */
+ andi v1, v0, 0x3 /* v1 <- thread id */
+ bnez v1, 2f
+ nop
+
+ /* thread 0 of each core. */
+ li t0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
+ subu t1, 0x3 /* 4-thread per core mode? */
+ bnez t1, 2f
+ nop
+
+ li t0, IFU_BRUB_RESERVE
+ li t1, 0x55
+ mtcr t1, t0
+ _ehb
+2:
beqz v0, 4f /* boot cpu (cpuid == 0)? */
nop
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index e743bdd6e20c..dc3e327fbbac 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -59,17 +59,17 @@
void nlm_send_ipi_single(int logical_cpu, unsigned int action)
{
- int cpu, node;
+ unsigned int hwtid;
uint64_t picbase;
- cpu = cpu_logical_map(logical_cpu);
- node = nlm_cpuid_to_node(cpu);
- picbase = nlm_get_node(node)->picbase;
+ /* node id is part of hwtid, and needed for send_ipi */
+ hwtid = cpu_logical_map(logical_cpu);
+ picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
if (action & SMP_CALL_FUNCTION)
- nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
+ nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_FUNCTION, 0);
if (action & SMP_RESCHEDULE_YOURSELF)
- nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
+ nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_RESCHEDULE, 0);
}
void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -120,6 +120,7 @@ static void nlm_init_secondary(void)
hwtid = hard_smp_processor_id();
current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
+ current_cpu_data.package = nlm_nodeid();
nlm_percpu_init(hwtid);
nlm_smp_irq_init(hwtid);
}
@@ -145,16 +146,18 @@ static cpumask_t phys_cpu_present_mask;
void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
{
- int cpu, node;
+ uint64_t picbase;
+ int hwtid;
+
+ hwtid = cpu_logical_map(logical_cpu);
+ picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
- cpu = cpu_logical_map(logical_cpu);
- node = nlm_cpuid_to_node(logical_cpu);
nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
nlm_next_gp = (unsigned long)task_thread_info(idle);
/* barrier for sp/gp store above */
__sync();
- nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
+ nlm_pic_send_ipi(picbase, hwtid, 1, 1); /* NMI */
}
void __init nlm_smp_setup(void)
@@ -182,7 +185,7 @@ void __init nlm_smp_setup(void)
__cpu_number_map[i] = num_cpus;
__cpu_logical_map[num_cpus] = i;
set_cpu_possible(num_cpus, true);
- node = nlm_cpuid_to_node(i);
+ node = nlm_hwtid_to_node(i);
cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
++num_cpus;
}
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
index 0c0a1a606f73..5873c83e65be 100644
--- a/arch/mips/netlogic/common/time.c
+++ b/arch/mips/netlogic/common/time.c
@@ -40,7 +40,6 @@
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/common.h>
#include <asm/netlogic/haldefs.h>
-#include <asm/netlogic/common.h>
#if defined(CONFIG_CPU_XLP)
#include <asm/netlogic/xlp-hal/iomap.h>
diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
index c83dbf3689e2..7b066a44e679 100644
--- a/arch/mips/netlogic/xlp/ahci-init-xlp2.c
+++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
@@ -203,6 +203,7 @@ static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
static void config_sata_phy(u64 regbase)
{
u32 port, i, reg;
+ u8 val;
for (port = 0; port < 2; port++) {
for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
@@ -210,6 +211,18 @@ static void config_sata_phy(u64 regbase)
for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
+
+ /* Fix for PHY link up failures at lower temperatures */
+ write_phy_reg(regbase, 0x800F, port, 0x1f);
+
+ val = read_phy_reg(regbase, 0x0029, port);
+ write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1));
+
+ val = read_phy_reg(regbase, 0x0056, port);
+ write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3));
+
+ val = read_phy_reg(regbase, 0x0018, port);
+ write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0));
}
}
diff --git a/arch/mips/netlogic/xlp/ahci-init.c b/arch/mips/netlogic/xlp/ahci-init.c
index a9d0fae02103..92be1a3258b1 100644
--- a/arch/mips/netlogic/xlp/ahci-init.c
+++ b/arch/mips/netlogic/xlp/ahci-init.c
@@ -151,7 +151,7 @@ static void nlm_sata_firmware_init(int node)
static int __init nlm_ahci_init(void)
{
int node = 0;
- int chip = read_c0_prid() & PRID_REV_MASK;
+ int chip = read_c0_prid() & PRID_IMP_MASK;
if (chip == PRID_IMP_NETLOGIC_XLP3XX)
nlm_sata_firmware_init(node);
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
index 7cc46032b28e..a625bdb6d6aa 100644
--- a/arch/mips/netlogic/xlp/dt.c
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -41,17 +41,21 @@
#include <asm/prom.h>
-extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[],
- __dtb_xlp_fvp_begin[], __dtb_xlp_gvp_begin[];
+extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_xlp_fvp_begin[],
+ __dtb_xlp_gvp_begin[], __dtb_xlp_rvp_begin[];
static void *xlp_fdt_blob;
void __init *xlp_dt_init(void *fdtp)
{
if (!fdtp) {
switch (current_cpu_data.processor_id & PRID_IMP_MASK) {
+#ifdef CONFIG_DT_XLP_RVP
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ fdtp = __dtb_xlp_rvp_begin;
+ break;
+#endif
#ifdef CONFIG_DT_XLP_GVP
case PRID_IMP_NETLOGIC_XLP9XX:
- case PRID_IMP_NETLOGIC_XLP5XX:
fdtp = __dtb_xlp_gvp_begin;
break;
#endif
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index bc24beb3a426..a8f4144a0297 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -71,10 +71,20 @@ static int xlp9xx_irq_to_irt(int irq)
switch (irq) {
case PIC_GPIO_IRQ:
return 12;
+ case PIC_I2C_0_IRQ:
+ return 125;
+ case PIC_I2C_1_IRQ:
+ return 126;
+ case PIC_I2C_2_IRQ:
+ return 127;
+ case PIC_I2C_3_IRQ:
+ return 128;
case PIC_9XX_XHCI_0_IRQ:
return 114;
case PIC_9XX_XHCI_1_IRQ:
return 115;
+ case PIC_9XX_XHCI_2_IRQ:
+ return 116;
case PIC_UART_0_IRQ:
return 133;
case PIC_UART_1_IRQ:
@@ -170,16 +180,23 @@ static int xlp_irq_to_irt(int irq)
}
if (devoff != 0) {
+ uint32_t val;
+
pcibase = nlm_pcicfg_base(devoff);
- irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff;
- /* HW weirdness, I2C IRT entry has to be fixed up */
- switch (irq) {
- case PIC_I2C_1_IRQ:
- irt = irt + 1; break;
- case PIC_I2C_2_IRQ:
- irt = irt + 2; break;
- case PIC_I2C_3_IRQ:
- irt = irt + 3; break;
+ val = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG);
+ if (val == 0xffffffff) {
+ irt = -1;
+ } else {
+ irt = val & 0xffff;
+ /* HW weirdness, I2C IRT entry has to be fixed up */
+ switch (irq) {
+ case PIC_I2C_1_IRQ:
+ irt = irt + 1; break;
+ case PIC_I2C_2_IRQ:
+ irt = irt + 2; break;
+ case PIC_I2C_3_IRQ:
+ irt = irt + 3; break;
+ }
}
} else if (irq >= PIC_PCIE_LINK_LEGACY_IRQ(0) &&
irq <= PIC_PCIE_LINK_LEGACY_IRQ(3)) {
@@ -325,7 +342,7 @@ static unsigned int nlm_xlp2_get_pic_frequency(int node)
/* Find the clock source PLL device for PIC */
if (cpu_xlp9xx) {
reg_select = nlm_read_sys_reg(clockbase,
- SYS_9XX_CLK_DEV_SEL) & 0x3;
+ SYS_9XX_CLK_DEV_SEL_REG) & 0x3;
switch (reg_select) {
case 0:
ctrl_val0 = nlm_read_sys_reg(clockbase,
@@ -354,7 +371,7 @@ static unsigned int nlm_xlp2_get_pic_frequency(int node)
}
} else {
reg_select = (nlm_read_sys_reg(sysbase,
- SYS_CLK_DEV_SEL) >> 22) & 0x3;
+ SYS_CLK_DEV_SEL_REG) >> 22) & 0x3;
switch (reg_select) {
case 0:
ctrl_val0 = nlm_read_sys_reg(sysbase,
@@ -410,7 +427,7 @@ static unsigned int nlm_xlp2_get_pic_frequency(int node)
fdiv = fdiv/(1 << 13);
pll_out_freq_num = ((ref_clk >> 1) * (6 + mdiv)) + fdiv;
- pll_out_freq_den = (1 << vco_post_div) * pll_post_div * 3;
+ pll_out_freq_den = (1 << vco_post_div) * pll_post_div * ref_div;
if (pll_out_freq_den > 0)
do_div(pll_out_freq_num, pll_out_freq_den);
@@ -418,10 +435,10 @@ static unsigned int nlm_xlp2_get_pic_frequency(int node)
/* PIC post divider, which happens after PLL */
if (cpu_xlp9xx)
pic_div = nlm_read_sys_reg(clockbase,
- SYS_9XX_CLK_DEV_DIV) & 0x3;
+ SYS_9XX_CLK_DEV_DIV_REG) & 0x3;
else
pic_div = (nlm_read_sys_reg(sysbase,
- SYS_CLK_DEV_DIV) >> 22) & 0x3;
+ SYS_CLK_DEV_DIV_REG) >> 22) & 0x3;
do_div(pll_out_freq_num, 1 << pic_div);
return pll_out_freq_num;
@@ -442,19 +459,21 @@ unsigned int nlm_get_cpu_frequency(void)
/*
* Fills upto 8 pairs of entries containing the DRAM map of a node
- * if n < 0, get dram map for all nodes
+ * if node < 0, get dram map for all nodes
*/
-int xlp_get_dram_map(int n, uint64_t *dram_map)
+int nlm_get_dram_map(int node, uint64_t *dram_map, int nentries)
{
uint64_t bridgebase, base, lim;
uint32_t val;
unsigned int barreg, limreg, xlatreg;
- int i, node, rv;
+ int i, n, rv;
/* Look only at mapping on Node 0, we don't handle crazy configs */
bridgebase = nlm_get_bridge_regbase(0);
rv = 0;
for (i = 0; i < 8; i++) {
+ if (rv + 1 >= nentries)
+ break;
if (cpu_is_xlp9xx()) {
barreg = BRIDGE_9XX_DRAM_BAR(i);
limreg = BRIDGE_9XX_DRAM_LIMIT(i);
@@ -464,10 +483,10 @@ int xlp_get_dram_map(int n, uint64_t *dram_map)
limreg = BRIDGE_DRAM_LIMIT(i);
xlatreg = BRIDGE_DRAM_NODE_TRANSLN(i);
}
- if (n >= 0) {
+ if (node >= 0) {
/* node specified, get node mapping of BAR */
val = nlm_read_bridge_reg(bridgebase, xlatreg);
- node = (val >> 1) & 0x3;
+ n = (val >> 1) & 0x3;
if (n != node)
continue;
}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 4fdd9fd29d1d..f743fd9da323 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -51,7 +51,6 @@ uint64_t nlm_io_base;
struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
cpumask_t nlm_cpumask = CPU_MASK_CPU0;
unsigned int nlm_threads_per_core;
-unsigned int xlp_cores_per_node;
static void nlm_linux_exit(void)
{
@@ -82,7 +81,7 @@ static void __init xlp_init_mem_from_bars(void)
uint64_t map[16];
int i, n;
- n = xlp_get_dram_map(-1, map); /* -1: info for all nodes */
+ n = nlm_get_dram_map(-1, map, ARRAY_SIZE(map)); /* -1 : all nodes */
for (i = 0; i < n; i += 2) {
/* exclude 0x1000_0000-0x2000_0000, u-boot device */
if (map[i] <= 0x10000000 && map[i+1] > 0x10000000)
@@ -163,10 +162,6 @@ void __init prom_init(void)
void *reset_vec;
nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
- if (cpu_is_xlp9xx())
- xlp_cores_per_node = 32;
- else
- xlp_cores_per_node = 8;
nlm_init_boot_cpu();
xlp_mmu_init();
nlm_node_init(0);
diff --git a/arch/mips/netlogic/xlp/usb-init-xlp2.c b/arch/mips/netlogic/xlp/usb-init-xlp2.c
index 17ade1ce5dfd..2524939a5e3a 100644
--- a/arch/mips/netlogic/xlp/usb-init-xlp2.c
+++ b/arch/mips/netlogic/xlp/usb-init-xlp2.c
@@ -128,6 +128,9 @@ static void xlp9xx_usb_ack(struct irq_data *data)
case PIC_9XX_XHCI_1_IRQ:
port_addr = nlm_xlpii_get_usb_regbase(node, 2);
break;
+ case PIC_9XX_XHCI_2_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(node, 3);
+ break;
default:
pr_err("No matching USB irq %d node %d!\n", irq, node);
return;
@@ -222,14 +225,16 @@ static int __init nlm_platform_xlpii_usb_init(void)
}
/* XLP 9XX, multi-node */
- pr_info("Initializing 9XX USB Interface\n");
+ pr_info("Initializing 9XX/5XX USB Interface\n");
for (node = 0; node < NLM_NR_NODES; node++) {
if (!nlm_node_present(node))
continue;
nlm_xlpii_usb_hw_reset(node, 1);
nlm_xlpii_usb_hw_reset(node, 2);
+ nlm_xlpii_usb_hw_reset(node, 3);
nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_0_IRQ, xlp9xx_usb_ack);
nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_1_IRQ, xlp9xx_usb_ack);
+ nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_2_IRQ, xlp9xx_usb_ack);
}
return 0;
}
@@ -253,6 +258,9 @@ static void nlm_xlp9xx_usb_fixup_final(struct pci_dev *dev)
case 0x22:
dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_1_IRQ);
break;
+ case 0x23:
+ dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_2_IRQ);
+ break;
}
}
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index e5f44d2605a8..87d7846af2d0 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -99,7 +99,7 @@ static int wait_for_cpus(int cpu, int bootcpu)
do {
notready = nlm_threads_per_core;
for (i = 0; i < nlm_threads_per_core; i++)
- if (cpu_ready[cpu + i] || cpu == bootcpu)
+ if (cpu_ready[cpu + i] || (cpu + i) == bootcpu)
--notready;
} while (notready != 0 && --count > 0);
@@ -111,7 +111,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
struct nlm_soc_info *nodep;
uint64_t syspcibase, fusebase;
uint32_t syscoremask, mask, fusemask;
- int core, n, cpu;
+ int core, n, cpu, ncores;
for (n = 0; n < NLM_NR_NODES; n++) {
if (n != 0) {
@@ -168,7 +168,8 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
syscoremask = (1 << hweight32(~fusemask & mask)) - 1;
pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
- for (core = 0; core < nlm_cores_per_node(); core++) {
+ ncores = nlm_cores_per_node();
+ for (core = 0; core < ncores; core++) {
/* we will be on node 0 core 0 */
if (n == 0 && core == 0)
continue;
@@ -178,8 +179,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
continue;
/* see if at least the first hw thread is enabled */
- cpu = (n * nlm_cores_per_node() + core)
- * NLM_THREADS_PER_CORE;
+ cpu = (n * ncores + core) * NLM_THREADS_PER_CORE;
if (!cpumask_test_cpu(cpu, wakeup_mask))
continue;
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index a26cbe372e06..81f58958cf08 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -98,6 +98,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
case CPU_XLR:
lmodel = &op_model_mipsxx_ops;
break;
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 01f721a85c5b..6a6e2cc55b89 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -246,7 +246,7 @@ static int mipsxx_perfcount_handler(void)
unsigned int counter;
int handled = IRQ_NONE;
- if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
+ if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
return handled;
switch (counters) {
@@ -296,6 +296,7 @@ static inline int n_counters(void)
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
counters = 4;
break;
@@ -411,6 +412,10 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/r12000";
break;
+ case CPU_R16000:
+ op_model_mipsxx_ops.cpu_type = "mips/r16000";
+ break;
+
case CPU_SB1:
case CPU_SB1A:
op_model_mipsxx_ops.cpu_type = "mips/sb1";
@@ -435,15 +440,17 @@ static int __init mipsxx_init(void)
if (get_c0_perfcount_int)
perfcount_irq = get_c0_perfcount_int();
- else if ((cp0_perfcount_irq >= 0) &&
- (cp0_compare_irq != cp0_perfcount_irq))
+ else if (cp0_perfcount_irq >= 0)
perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
else
perfcount_irq = -1;
if (perfcount_irq >= 0)
return request_irq(perfcount_irq, mipsxx_perfcount_int,
- 0, "Perfcounter", save_perf_irq);
+ IRQF_PERCPU | IRQF_NOBALANCING |
+ IRQF_NO_THREAD | IRQF_NO_SUSPEND |
+ IRQF_SHARED,
+ "Perfcounter", save_perf_irq);
return 0;
}
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c
index 0164b0c48352..42181c7105df 100644
--- a/arch/mips/paravirt/paravirt-smp.c
+++ b/arch/mips/paravirt/paravirt-smp.c
@@ -75,7 +75,7 @@ static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int acti
{
unsigned int cpu;
- for_each_cpu_mask(cpu, *mask)
+ for_each_cpu(cpu, mask)
paravirt_send_ipi_single(cpu, action);
}
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 300591c6278d..2eda01e6e08f 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
-obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o
+obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o
obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
index 6a40f24c91b4..3407495fcbe2 100644
--- a/arch/mips/pci/msi-xlp.c
+++ b/arch/mips/pci/msi-xlp.c
@@ -178,13 +178,6 @@ static void xlp_msi_mask_ack(struct irq_data *d)
else
nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec);
- /* Ack at eirr and PIC */
- ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link));
- if (cpu_is_xlp9xx())
- nlm_pic_ack(md->node->picbase,
- PIC_9XX_IRT_PCIE_LINK_INDEX(link));
- else
- nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link));
}
static struct irq_chip xlp_msi_chip = {
@@ -230,8 +223,6 @@ static void xlp_msix_mask_ack(struct irq_data *d)
}
nlm_write_reg(md->lnkbase, status_reg, 1u << bit);
- /* Ack at eirr and PIC */
- ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link));
if (!cpu_is_xlp9xx())
nlm_pic_ack(md->node->picbase,
PIC_IRT_PCIE_MSIX_INDEX(msixvec));
@@ -541,6 +532,14 @@ void nlm_dispatch_msi(int node, int lirq)
do_IRQ(irqbase + i);
status &= status - 1;
}
+
+ /* Ack at eirr and PIC */
+ ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link));
+ if (cpu_is_xlp9xx())
+ nlm_pic_ack(md->node->picbase,
+ PIC_9XX_IRT_PCIE_LINK_INDEX(link));
+ else
+ nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link));
}
void nlm_dispatch_msix(int node, int lirq)
@@ -567,4 +566,6 @@ void nlm_dispatch_msix(int node, int lirq)
do_IRQ(irqbase + i);
status &= status - 1;
}
+ /* Ack at eirr and PIC */
+ ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link));
}
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index bd2b3b60da83..07a18228e63a 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -488,7 +488,6 @@ static struct platform_driver ar2315_pci_driver = {
.probe = ar2315_pci_probe,
.driver = {
.name = "ar2315-pci",
- .owner = THIS_MODULE,
},
};
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index a04af55d89f1..c258cd406fbb 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -214,6 +214,8 @@ const char *octeon_get_pci_interrupts(void)
return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
case CVMX_BOARD_TYPE_BBGW_REF:
return "AABCD";
+ case CVMX_BOARD_TYPE_CUST_DSR1000N:
+ return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
case CVMX_BOARD_TYPE_THUNDER:
case CVMX_BOARD_TYPE_EBH3000:
default:
@@ -271,9 +273,6 @@ static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
pci_addr.s.func = devfn & 0x7;
pci_addr.s.reg = reg;
-#if PCI_CONFIG_SPACE_DELAY
- udelay(PCI_CONFIG_SPACE_DELAY);
-#endif
switch (size) {
case 4:
*val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
@@ -308,9 +307,6 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
pci_addr.s.func = devfn & 0x7;
pci_addr.s.reg = reg;
-#if PCI_CONFIG_SPACE_DELAY
- udelay(PCI_CONFIG_SPACE_DELAY);
-#endif
switch (size) {
case 4:
cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index a4574947e698..8a978022630b 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -267,7 +267,6 @@ static struct platform_driver rt288x_pci_driver = {
.probe = rt288x_pci_probe,
.driver = {
.name = "rt288x-pci",
- .owner = THIS_MODULE,
.of_match_table = rt288x_pci_match,
},
};
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 8bb13a4af68a..b8a0bf5766f2 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -91,7 +91,10 @@ static void pcibios_scanbus(struct pci_controller *hose)
pci_add_resource_offset(&resources,
hose->mem_resource, hose->mem_offset);
- pci_add_resource_offset(&resources, hose->io_resource, hose->io_offset);
+ pci_add_resource_offset(&resources,
+ hose->io_resource, hose->io_offset);
+ pci_add_resource_offset(&resources,
+ hose->busn_resource, hose->busn_offset);
bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
&resources);
hose->bus = bus;
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 1bb0b2bf8d6e..99f3db4f0a9b 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -1762,14 +1762,6 @@ static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
default:
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
-#if PCI_CONFIG_SPACE_DELAY
- /*
- * Delay on writes so that devices have time to come up. Some
- * bridges need this to allow time for the secondary busses to
- * work
- */
- udelay(PCI_CONFIG_SPACE_DELAY);
-#endif
return PCIBIOS_SUCCESSFUL;
}
diff --git a/arch/mips/pistachio/Makefile b/arch/mips/pistachio/Makefile
new file mode 100644
index 000000000000..32189c6ebea5
--- /dev/null
+++ b/arch/mips/pistachio/Makefile
@@ -0,0 +1 @@
+obj-y += init.o irq.o time.o
diff --git a/arch/mips/pistachio/Platform b/arch/mips/pistachio/Platform
new file mode 100644
index 000000000000..d80cd612df1f
--- /dev/null
+++ b/arch/mips/pistachio/Platform
@@ -0,0 +1,8 @@
+#
+# IMG Pistachio SoC
+#
+platform-$(CONFIG_MACH_PISTACHIO) += pistachio/
+cflags-$(CONFIG_MACH_PISTACHIO) += \
+ -I$(srctree)/arch/mips/include/asm/mach-pistachio
+load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
+zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
new file mode 100644
index 000000000000..d2dc836523a3
--- /dev/null
+++ b/arch/mips/pistachio/init.c
@@ -0,0 +1,131 @@
+/*
+ * Pistachio platform setup
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+#include <asm/cacheflush.h>
+#include <asm/dma-coherence.h>
+#include <asm/fw/fw.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/prom.h>
+#include <asm/smp-ops.h>
+#include <asm/traps.h>
+
+const char *get_system_type(void)
+{
+ return "IMG Pistachio SoC";
+}
+
+static void __init plat_setup_iocoherency(void)
+{
+ /*
+ * Kernel has been configured with software coherency
+ * but we might choose to turn it off and use hardware
+ * coherency instead.
+ */
+ if (mips_cm_numiocu() != 0) {
+ /* Nothing special needs to be done to enable coherency */
+ pr_info("CMP IOCU detected\n");
+ hw_coherentio = 1;
+ if (coherentio == 0)
+ pr_info("Hardware DMA cache coherency disabled\n");
+ else
+ pr_info("Hardware DMA cache coherency enabled\n");
+ } else {
+ if (coherentio == 1)
+ pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
+ else
+ pr_info("Software DMA cache coherency enabled\n");
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+ if (fw_arg0 != -2)
+ panic("Device-tree not present");
+
+ __dt_setup_arch((void *)fw_arg1);
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+
+ plat_setup_iocoherency();
+}
+
+#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
+
+phys_addr_t mips_cpc_default_phys_base(void)
+{
+ return DEFAULT_CPC_BASE_ADDR;
+}
+
+static void __init mips_nmi_setup(void)
+{
+ void *base;
+ extern char except_vec_nmi;
+
+ base = cpu_has_veic ?
+ (void *)(CAC_BASE + 0xa80) :
+ (void *)(CAC_BASE + 0x380);
+ memcpy(base, &except_vec_nmi, 0x80);
+ flush_icache_range((unsigned long)base,
+ (unsigned long)base + 0x80);
+}
+
+static void __init mips_ejtag_setup(void)
+{
+ void *base;
+ extern char except_vec_ejtag_debug;
+
+ base = cpu_has_veic ?
+ (void *)(CAC_BASE + 0xa00) :
+ (void *)(CAC_BASE + 0x300);
+ memcpy(base, &except_vec_ejtag_debug, 0x80);
+ flush_icache_range((unsigned long)base,
+ (unsigned long)base + 0x80);
+}
+
+void __init prom_init(void)
+{
+ board_nmi_handler_setup = mips_nmi_setup;
+ board_ejtag_handler_setup = mips_ejtag_setup;
+
+ mips_cm_probe();
+ mips_cpc_probe();
+ register_cps_smp_ops();
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+void __init device_tree_init(void)
+{
+ if (!initial_boot_params)
+ return;
+
+ unflatten_and_copy_device_tree();
+}
+
+static int __init plat_of_setup(void)
+{
+ if (!of_have_populated_dt())
+ panic("Device tree not present");
+
+ if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
+ panic("Failed to populate DT");
+
+ return 0;
+}
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/pistachio/irq.c b/arch/mips/pistachio/irq.c
new file mode 100644
index 000000000000..0a6b24c24652
--- /dev/null
+++ b/arch/mips/pistachio/irq.c
@@ -0,0 +1,28 @@
+/*
+ * Pistachio IRQ setup
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/mips-gic.h>
+#include <linux/kernel.h>
+
+#include <asm/cpu-features.h>
+#include <asm/irq_cpu.h>
+
+void __init arch_init_irq(void)
+{
+ pr_info("EIC is %s\n", cpu_has_veic ? "on" : "off");
+ pr_info("VINT is %s\n", cpu_has_vint ? "on" : "off");
+
+ if (!cpu_has_veic)
+ mips_cpu_irq_init();
+
+ irqchip_init();
+}
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
new file mode 100644
index 000000000000..67889fcea8aa
--- /dev/null
+++ b/arch/mips/pistachio/time.c
@@ -0,0 +1,52 @@
+/*
+ * Pistachio clocksource/timer setup
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/irqchip/mips-gic.h>
+#include <linux/of.h>
+
+#include <asm/time.h>
+
+unsigned int get_c0_compare_int(void)
+{
+ return gic_get_c0_compare_int();
+}
+
+int get_c0_perfcount_int(void)
+{
+ return gic_get_c0_perfcount_int();
+}
+
+void __init plat_time_init(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+
+ of_clk_init(NULL);
+ clocksource_of_init();
+
+ np = of_get_cpu_node(0, NULL);
+ if (!np) {
+ pr_err("Failed to get CPU node\n");
+ return;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
+ return;
+ }
+
+ mips_hpt_frequency = clk_get_rate(clk) / 2;
+ clk_put(clk);
+}
diff --git a/arch/mips/power/Makefile b/arch/mips/power/Makefile
index 73d56b87cb9b..70bd7883bc1b 100644
--- a/arch/mips/power/Makefile
+++ b/arch/mips/power/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_HIBERNATION) += cpu.o hibernate.o
+obj-$(CONFIG_HIBERNATION) += cpu.o hibernate.o hibernate_asm.o
diff --git a/arch/mips/power/hibernate.c b/arch/mips/power/hibernate.c
new file mode 100644
index 000000000000..19a9af68bcdb
--- /dev/null
+++ b/arch/mips/power/hibernate.c
@@ -0,0 +1,10 @@
+#include <asm/tlbflush.h>
+
+extern int restore_image(void);
+
+int swsusp_arch_resume(void)
+{
+ /* Avoid TLB mismatch during and after kernel resume */
+ local_flush_tlb_all();
+ return restore_image();
+}
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate_asm.S
index 32a7c828f073..b1fab951100f 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate_asm.S
@@ -29,7 +29,7 @@ LEAF(swsusp_arch_suspend)
j swsusp_save
END(swsusp_arch_suspend)
-LEAF(swsusp_arch_resume)
+LEAF(restore_image)
PTR_L t0, restore_pblist
0:
PTR_L t1, PBE_ADDRESS(t0) /* source */
@@ -43,7 +43,6 @@ LEAF(swsusp_arch_resume)
bne t1, t3, 1b
PTR_L t0, PBE_NEXT(t0)
bnez t0, 0b
- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
PTR_LA t0, saved_regs
PTR_L ra, PT_R31(t0)
PTR_L sp, PT_R29(t0)
@@ -59,4 +58,4 @@ LEAF(swsusp_arch_resume)
PTR_L s7, PT_R23(t0)
PTR_LI v0, 0x0
jr ra
-END(swsusp_arch_resume)
+END(restore_image)
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index b1c52ca580f9..e9bc8c96174e 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -7,6 +7,11 @@ config CLKEVT_RT3352
select CLKSRC_OF
select CLKSRC_MMIO
+config RALINK_ILL_ACC
+ bool
+ depends on SOC_RT305X
+ default y
+
choice
prompt "Ralink SoC selection"
default SOC_RT305X
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index ee736bd103f8..570098bfdf87 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -60,7 +60,7 @@ static void per_hub_init(cnodeid_t cnode)
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
int i;
- cpu_set(smp_processor_id(), hub->h_cpus);
+ cpumask_set_cpu(smp_processor_id(), &hub->h_cpus);
if (test_and_set_bit(cnode, hub_init_mask))
return;
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index ecbb62f339c5..bda90cf87e8c 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -29,8 +29,8 @@ static cpumask_t ktext_repmask;
void __init setup_replication_mask(void)
{
/* Set only the master cnode's bit. The master cnode is always 0. */
- cpus_clear(ktext_repmask);
- cpu_set(0, ktext_repmask);
+ cpumask_clear(&ktext_repmask);
+ cpumask_set_cpu(0, &ktext_repmask);
#ifdef CONFIG_REPLICATE_KTEXT
#ifndef CONFIG_MAPPED_KERNEL
@@ -43,7 +43,7 @@ void __init setup_replication_mask(void)
if (cnode == 0)
continue;
/* Advertise that we have a copy of the kernel */
- cpu_set(cnode, ktext_repmask);
+ cpumask_set_cpu(cnode, &ktext_repmask);
}
}
#endif
@@ -99,7 +99,7 @@ void __init replicate_kernel_text()
client_nasid = COMPACT_TO_NASID_NODEID(cnode);
/* Check if this node should get a copy of the kernel */
- if (cpu_isset(cnode, ktext_repmask)) {
+ if (cpumask_test_cpu(cnode, &ktext_repmask)) {
server_nasid = client_nasid;
copy_kernel(server_nasid);
}
@@ -124,7 +124,7 @@ unsigned long node_getfirstfree(cnodeid_t cnode)
loadbase += 16777216;
#endif
offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
- if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask)))
+ if ((cnode == 0) || (cpumask_test_cpu(cnode, &ktext_repmask)))
return TO_NODE(nasid, offset) >> PAGE_SHIFT;
else
return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 0b68469e063f..8d0eb2643248 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -404,7 +404,7 @@ static void __init node_mem_init(cnodeid_t node)
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
- cpus_clear(hub_data(node)->h_cpus);
+ cpumask_clear(&hub_data(node)->h_cpus);
slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
sizeof(struct hub_data));
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 1d97eaba0c5f..a6d10f607f34 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/sched_clock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/param.h>
@@ -159,11 +160,18 @@ struct clocksource hub_rt_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static u64 notrace hub_rt_read_sched_clock(void)
+{
+ return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
+}
+
static void __init hub_rt_clocksource_init(void)
{
struct clocksource *cs = &hub_rt_clocksource;
clocksource_register_hz(cs, CYCLES_PER_SEC);
+
+ sched_clock_register(hub_rt_read_sched_clock, 52, CYCLES_PER_SEC);
}
void __init plat_time_init(void)
diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c
index 511e9ff2acfd..5a2a82148d8d 100644
--- a/arch/mips/sgi-ip32/ip32-platform.c
+++ b/arch/mips/sgi-ip32/ip32-platform.c
@@ -5,14 +5,16 @@
*
* Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
+#include <linux/rtc/ds1685.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
+extern void ip32_prepare_poweroff(void);
+
#define MACEISA_SERIAL1_OFFS offsetof(struct sgi_mace, isa.serial1)
#define MACEISA_SERIAL2_OFFS offsetof(struct sgi_mace, isa.serial2)
@@ -90,22 +92,47 @@ static __init int sgio2btns_devinit(void)
device_initcall(sgio2btns_devinit);
-static struct resource sgio2_cmos_rsrc[] = {
+#define MACE_RTC_RES_START (MACE_BASE + offsetof(struct sgi_mace, isa.rtc))
+#define MACE_RTC_RES_END (MACE_RTC_RES_START + 32767)
+
+static struct resource ip32_rtc_resources[] = {
{
- .start = 0x70,
- .end = 0x71,
- .flags = IORESOURCE_IO
+ .start = MACEISA_RTC_IRQ,
+ .end = MACEISA_RTC_IRQ,
+ .flags = IORESOURCE_IRQ
+ }, {
+ .start = MACE_RTC_RES_START,
+ .end = MACE_RTC_RES_END,
+ .flags = IORESOURCE_MEM,
}
};
-static __init int sgio2_cmos_devinit(void)
+/* RTC registers on IP32 are each padded by 256 bytes (0x100). */
+static struct ds1685_rtc_platform_data
+ip32_rtc_platform_data[] = {
+ {
+ .regstep = 0x100,
+ .bcd_mode = true,
+ .no_irq = false,
+ .uie_unsupported = false,
+ .alloc_io_resources = true,
+ .plat_prepare_poweroff = ip32_prepare_poweroff,
+ },
+};
+
+struct platform_device ip32_rtc_device = {
+ .name = "rtc-ds1685",
+ .id = -1,
+ .dev = {
+ .platform_data = ip32_rtc_platform_data,
+ },
+ .num_resources = ARRAY_SIZE(ip32_rtc_resources),
+ .resource = ip32_rtc_resources,
+};
+
+static __init int sgio2_rtc_devinit(void)
{
- return IS_ERR(platform_device_register_simple("rtc_cmos", -1,
- sgio2_cmos_rsrc, 1));
+ return platform_device_register(&ip32_rtc_device);
}
-device_initcall(sgio2_cmos_devinit);
-
-MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("8250 UART probe driver for SGI IP32 aka O2");
+device_initcall(sgio2_rtc_devinit);
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 44b3470a0bbb..8bd415c8729f 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -11,10 +11,11 @@
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/delay.h>
-#include <linux/ds17287rtc.h>
+#include <linux/rtc/ds1685.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
@@ -33,53 +34,40 @@
#define POWERDOWN_FREQ (HZ / 4)
#define PANIC_FREQ (HZ / 8)
-static struct timer_list power_timer, blink_timer, debounce_timer;
-static int has_panicked, shuting_down;
+extern struct platform_device ip32_rtc_device;
-static void ip32_machine_restart(char *command) __noreturn;
-static void ip32_machine_halt(void) __noreturn;
-static void ip32_machine_power_off(void) __noreturn;
+static struct timer_list power_timer, blink_timer;
+static int has_panicked, shutting_down;
-static void ip32_machine_restart(char *cmd)
+static __noreturn void ip32_poweroff(void *data)
{
- crime->control = CRIME_CONTROL_HARD_RESET;
- while (1);
-}
+ void (*poweroff_func)(struct platform_device *) =
+ symbol_get(ds1685_rtc_poweroff);
+
+#ifdef CONFIG_MODULES
+ /* If the first __symbol_get failed, our module wasn't loaded. */
+ if (!poweroff_func) {
+ request_module("rtc-ds1685");
+ poweroff_func = symbol_get(ds1685_rtc_poweroff);
+ }
+#endif
-static inline void ip32_machine_halt(void)
-{
- ip32_machine_power_off();
-}
+ if (!poweroff_func)
+ pr_emerg("RTC not available for power-off. Spinning forever ...\n");
+ else {
+ (*poweroff_func)((struct platform_device *)data);
+ symbol_put(ds1685_rtc_poweroff);
+ }
-static void ip32_machine_power_off(void)
-{
- unsigned char reg_a, xctrl_a, xctrl_b;
-
- disable_irq(MACEISA_RTC_IRQ);
- reg_a = CMOS_READ(RTC_REG_A);
-
- /* setup for kickstart & wake-up (DS12287 Ref. Man. p. 19) */
- reg_a &= ~DS_REGA_DV2;
- reg_a |= DS_REGA_DV1;
-
- CMOS_WRITE(reg_a | DS_REGA_DV0, RTC_REG_A);
- wbflush();
- xctrl_b = CMOS_READ(DS_B1_XCTRL4B)
- | DS_XCTRL4B_ABE | DS_XCTRL4B_KFE;
- CMOS_WRITE(xctrl_b, DS_B1_XCTRL4B);
- xctrl_a = CMOS_READ(DS_B1_XCTRL4A) & ~DS_XCTRL4A_IFS;
- CMOS_WRITE(xctrl_a, DS_B1_XCTRL4A);
- wbflush();
- /* adios amigos... */
- CMOS_WRITE(xctrl_a | DS_XCTRL4A_PAB, DS_B1_XCTRL4A);
- CMOS_WRITE(reg_a, RTC_REG_A);
- wbflush();
- while (1);
+ unreachable();
}
-static void power_timeout(unsigned long data)
+static void ip32_machine_restart(char *cmd) __noreturn;
+static void ip32_machine_restart(char *cmd)
{
- ip32_machine_power_off();
+ msleep(20);
+ crime->control = CRIME_CONTROL_HARD_RESET;
+ unreachable();
}
static void blink_timeout(unsigned long data)
@@ -89,44 +77,27 @@ static void blink_timeout(unsigned long data)
mod_timer(&blink_timer, jiffies + data);
}
-static void debounce(unsigned long data)
+static void ip32_machine_halt(void)
{
- unsigned char reg_a, reg_c, xctrl_a;
-
- reg_c = CMOS_READ(RTC_INTR_FLAGS);
- reg_a = CMOS_READ(RTC_REG_A);
- CMOS_WRITE(reg_a | DS_REGA_DV0, RTC_REG_A);
- wbflush();
- xctrl_a = CMOS_READ(DS_B1_XCTRL4A);
- if ((xctrl_a & DS_XCTRL4A_IFS) || (reg_c & RTC_IRQF )) {
- /* Interrupt still being sent. */
- debounce_timer.expires = jiffies + 50;
- add_timer(&debounce_timer);
-
- /* clear interrupt source */
- CMOS_WRITE(xctrl_a & ~DS_XCTRL4A_IFS, DS_B1_XCTRL4A);
- CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A);
- return;
- }
- CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A);
-
- if (has_panicked)
- ip32_machine_restart(NULL);
+ ip32_poweroff(&ip32_rtc_device);
+}
- enable_irq(MACEISA_RTC_IRQ);
+static void power_timeout(unsigned long data)
+{
+ ip32_poweroff(&ip32_rtc_device);
}
-static inline void ip32_power_button(void)
+void ip32_prepare_poweroff(void)
{
if (has_panicked)
return;
- if (shuting_down || kill_cad_pid(SIGINT, 1)) {
+ if (shutting_down || kill_cad_pid(SIGINT, 1)) {
/* No init process or button pressed twice. */
- ip32_machine_power_off();
+ ip32_poweroff(&ip32_rtc_device);
}
- shuting_down = 1;
+ shutting_down = 1;
blink_timer.data = POWERDOWN_FREQ;
blink_timeout(POWERDOWN_FREQ);
@@ -136,27 +107,6 @@ static inline void ip32_power_button(void)
add_timer(&power_timer);
}
-static irqreturn_t ip32_rtc_int(int irq, void *dev_id)
-{
- unsigned char reg_c;
-
- reg_c = CMOS_READ(RTC_INTR_FLAGS);
- if (!(reg_c & RTC_IRQF)) {
- printk(KERN_WARNING
- "%s: RTC IRQ without RTC_IRQF\n", __func__);
- }
- /* Wait until interrupt goes away */
- disable_irq_nosync(MACEISA_RTC_IRQ);
- init_timer(&debounce_timer);
- debounce_timer.function = debounce;
- debounce_timer.expires = jiffies + 50;
- add_timer(&debounce_timer);
-
- printk(KERN_DEBUG "Power button pressed\n");
- ip32_power_button();
- return IRQ_HANDLED;
-}
-
static int panic_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -190,15 +140,12 @@ static __init int ip32_reboot_setup(void)
_machine_restart = ip32_machine_restart;
_machine_halt = ip32_machine_halt;
- pm_power_off = ip32_machine_power_off;
+ pm_power_off = ip32_machine_halt;
init_timer(&blink_timer);
blink_timer.function = blink_timeout;
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
- if (request_irq(MACEISA_RTC_IRQ, ip32_rtc_int, 0, "rtc", NULL))
- panic("Can't allocate MACEISA RTC IRQ");
-
return 0;
}
diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h
index 897ba3c12b32..cc4a2ba9e228 100644
--- a/arch/mn10300/include/asm/io.h
+++ b/arch/mn10300/include/asm/io.h
@@ -197,6 +197,11 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
#define iowrite16(v, addr) writew((v), (addr))
#define iowrite32(v, addr) writel((v), (addr))
+#define ioread16be(addr) be16_to_cpu(readw(addr))
+#define ioread32be(addr) be32_to_cpu(readl(addr))
+#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
+#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr))
+
#define ioread8_rep(p, dst, count) \
insb((unsigned long) (p), (dst), (count))
#define ioread16_rep(p, dst, count) \
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index c1c374f0ec12..4861a78c7160 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -40,7 +40,6 @@ typedef struct {
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
struct pt_regs *frame; /* current exception frame */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
@@ -74,7 +73,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c
index d780670cbaf3..e5a61c659b5a 100644
--- a/arch/mn10300/kernel/asm-offsets.c
+++ b/arch/mn10300/kernel/asm-offsets.c
@@ -22,7 +22,6 @@ void foo(void)
BLANK();
OFFSET(TI_task, thread_info, task);
- OFFSET(TI_exec_domain, thread_info, exec_domain);
OFFSET(TI_frame, thread_info, frame);
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_cpu, thread_info, cpu);
@@ -85,7 +84,6 @@ void foo(void)
DEFINE(SIGCHLD_asm, SIGCHLD);
BLANK();
- OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
OFFSET(RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 8609845f12c5..dfd0301cf200 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -202,20 +202,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct sigframe __user *frame;
- int rsig, sig = ksig->sig;
+ int sig = ksig->sig;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- rsig = sig;
- if (sig < 32 &&
- current_thread_info()->exec_domain &&
- current_thread_info()->exec_domain->signal_invmap)
- rsig = current_thread_info()->exec_domain->signal_invmap[sig];
-
- if (__put_user(rsig, &frame->sig) < 0 ||
+ if (__put_user(sig, &frame->sig) < 0 ||
__put_user(&frame->sc, &frame->psc) < 0)
return -EFAULT;
@@ -270,20 +264,14 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
- int rsig, sig = ksig->sig;
+ int sig = ksig->sig;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- rsig = sig;
- if (sig < 32 &&
- current_thread_info()->exec_domain &&
- current_thread_info()->exec_domain->signal_invmap)
- rsig = current_thread_info()->exec_domain->signal_invmap[sig];
-
- if (__put_user(rsig, &frame->sig) ||
+ if (__put_user(sig, &frame->sig) ||
__put_user(&frame->info, &frame->pinfo) ||
__put_user(&frame->uc, &frame->puc) ||
copy_siginfo_to_user(&frame->info, &ksig->info))
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 01c75f36e8b3..24b3d8999ac7 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -46,7 +46,6 @@ generic-y += segment.h
generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
-generic-y += shmparam.h
generic-y += siginfo.h
generic-y += signal.h
generic-y += socket.h
diff --git a/arch/nios2/include/asm/shmparam.h b/arch/nios2/include/asm/shmparam.h
new file mode 100644
index 000000000000..60784294e407
--- /dev/null
+++ b/arch/nios2/include/asm/shmparam.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright Altera Corporation (C) <2015>. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _ASM_NIOS2_SHMPARAM_H
+#define _ASM_NIOS2_SHMPARAM_H
+
+#define SHMLBA CONFIG_NIOS2_DCACHE_SIZE
+
+#endif /* _ASM_NIOS2_SHMPARAM_H */
diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h
index a16e55cbd8ad..d69c338bd19c 100644
--- a/arch/nios2/include/asm/thread_info.h
+++ b/arch/nios2/include/asm/thread_info.h
@@ -39,7 +39,6 @@ typedef struct {
*/
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable,<0 => BUG */
@@ -58,7 +57,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h
index eff00e67c0a2..1d35de90a977 100644
--- a/arch/nios2/include/uapi/asm/ptrace.h
+++ b/arch/nios2/include/uapi/asm/ptrace.h
@@ -14,6 +14,8 @@
#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
/*
* Register numbers used by 'ptrace' system call interface.
*/
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index a223691dff4f..1d96de0bd4aa 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -126,47 +126,46 @@ void __init setup_cpuinfo(void)
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
- int count = 0;
const u32 clockfreq = cpuinfo.cpu_clock_freq;
- count = seq_printf(m,
- "CPU:\t\tNios II/%s\n"
- "MMU:\t\t%s\n"
- "FPU:\t\tnone\n"
- "Clocking:\t%u.%02u MHz\n"
- "BogoMips:\t%lu.%02lu\n"
- "Calibration:\t%lu loops\n",
- cpuinfo.cpu_impl,
- cpuinfo.mmu ? "present" : "none",
- clockfreq / 1000000, (clockfreq / 100000) % 10,
- (loops_per_jiffy * HZ) / 500000,
- ((loops_per_jiffy * HZ) / 5000) % 100,
- (loops_per_jiffy * HZ));
-
- count += seq_printf(m,
- "HW:\n"
- " MUL:\t\t%s\n"
- " MULX:\t\t%s\n"
- " DIV:\t\t%s\n",
- cpuinfo.has_mul ? "yes" : "no",
- cpuinfo.has_mulx ? "yes" : "no",
- cpuinfo.has_div ? "yes" : "no");
-
- count += seq_printf(m,
- "Icache:\t\t%ukB, line length: %u\n",
- cpuinfo.icache_size >> 10,
- cpuinfo.icache_line_size);
-
- count += seq_printf(m,
- "Dcache:\t\t%ukB, line length: %u\n",
- cpuinfo.dcache_size >> 10,
- cpuinfo.dcache_line_size);
-
- count += seq_printf(m,
- "TLB:\t\t%u ways, %u entries, %u PID bits\n",
- cpuinfo.tlb_num_ways,
- cpuinfo.tlb_num_entries,
- cpuinfo.tlb_pid_num_bits);
+ seq_printf(m,
+ "CPU:\t\tNios II/%s\n"
+ "MMU:\t\t%s\n"
+ "FPU:\t\tnone\n"
+ "Clocking:\t%u.%02u MHz\n"
+ "BogoMips:\t%lu.%02lu\n"
+ "Calibration:\t%lu loops\n",
+ cpuinfo.cpu_impl,
+ cpuinfo.mmu ? "present" : "none",
+ clockfreq / 1000000, (clockfreq / 100000) % 10,
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100,
+ (loops_per_jiffy * HZ));
+
+ seq_printf(m,
+ "HW:\n"
+ " MUL:\t\t%s\n"
+ " MULX:\t\t%s\n"
+ " DIV:\t\t%s\n",
+ cpuinfo.has_mul ? "yes" : "no",
+ cpuinfo.has_mulx ? "yes" : "no",
+ cpuinfo.has_div ? "yes" : "no");
+
+ seq_printf(m,
+ "Icache:\t\t%ukB, line length: %u\n",
+ cpuinfo.icache_size >> 10,
+ cpuinfo.icache_line_size);
+
+ seq_printf(m,
+ "Dcache:\t\t%ukB, line length: %u\n",
+ cpuinfo.dcache_size >> 10,
+ cpuinfo.dcache_line_size);
+
+ seq_printf(m,
+ "TLB:\t\t%u ways, %u entries, %u PID bits\n",
+ cpuinfo.tlb_num_ways,
+ cpuinfo.tlb_num_entries,
+ cpuinfo.tlb_pid_num_bits);
return 0;
}
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index 27b006c52e12..1e515ccd698e 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -92,35 +92,35 @@ exception_table:
trap_table:
.word handle_system_call /* 0 */
- .word instruction_trap /* 1 */
- .word instruction_trap /* 2 */
- .word instruction_trap /* 3 */
- .word instruction_trap /* 4 */
- .word instruction_trap /* 5 */
- .word instruction_trap /* 6 */
- .word instruction_trap /* 7 */
- .word instruction_trap /* 8 */
- .word instruction_trap /* 9 */
- .word instruction_trap /* 10 */
- .word instruction_trap /* 11 */
- .word instruction_trap /* 12 */
- .word instruction_trap /* 13 */
- .word instruction_trap /* 14 */
- .word instruction_trap /* 15 */
- .word instruction_trap /* 16 */
- .word instruction_trap /* 17 */
- .word instruction_trap /* 18 */
- .word instruction_trap /* 19 */
- .word instruction_trap /* 20 */
- .word instruction_trap /* 21 */
- .word instruction_trap /* 22 */
- .word instruction_trap /* 23 */
- .word instruction_trap /* 24 */
- .word instruction_trap /* 25 */
- .word instruction_trap /* 26 */
- .word instruction_trap /* 27 */
- .word instruction_trap /* 28 */
- .word instruction_trap /* 29 */
+ .word handle_trap_1 /* 1 */
+ .word handle_trap_2 /* 2 */
+ .word handle_trap_3 /* 3 */
+ .word handle_trap_reserved /* 4 */
+ .word handle_trap_reserved /* 5 */
+ .word handle_trap_reserved /* 6 */
+ .word handle_trap_reserved /* 7 */
+ .word handle_trap_reserved /* 8 */
+ .word handle_trap_reserved /* 9 */
+ .word handle_trap_reserved /* 10 */
+ .word handle_trap_reserved /* 11 */
+ .word handle_trap_reserved /* 12 */
+ .word handle_trap_reserved /* 13 */
+ .word handle_trap_reserved /* 14 */
+ .word handle_trap_reserved /* 15 */
+ .word handle_trap_reserved /* 16 */
+ .word handle_trap_reserved /* 17 */
+ .word handle_trap_reserved /* 18 */
+ .word handle_trap_reserved /* 19 */
+ .word handle_trap_reserved /* 20 */
+ .word handle_trap_reserved /* 21 */
+ .word handle_trap_reserved /* 22 */
+ .word handle_trap_reserved /* 23 */
+ .word handle_trap_reserved /* 24 */
+ .word handle_trap_reserved /* 25 */
+ .word handle_trap_reserved /* 26 */
+ .word handle_trap_reserved /* 27 */
+ .word handle_trap_reserved /* 28 */
+ .word handle_trap_reserved /* 29 */
#ifdef CONFIG_KGDB
.word handle_kgdb_breakpoint /* 30 KGDB breakpoint */
#else
@@ -455,6 +455,19 @@ handle_kgdb_breakpoint:
br ret_from_exception
#endif
+handle_trap_1:
+ call handle_trap_1_c
+ br ret_from_exception
+
+handle_trap_2:
+ call handle_trap_2_c
+ br ret_from_exception
+
+handle_trap_3:
+handle_trap_reserved:
+ call handle_trap_3_c
+ br ret_from_exception
+
/*
* Beware - when entering resume, prev (the current task) is
* in r4, next (the new task) is in r5, don't change these
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index 0e075b5ad2a5..2f8c74f93e70 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -94,7 +94,6 @@ void show_regs(struct pt_regs *regs)
void flush_thread(void)
{
- set_fs(USER_DS);
}
int copy_thread(unsigned long clone_flags,
diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
index b7b97641a9a6..81f7da7b1d55 100644
--- a/arch/nios2/kernel/traps.c
+++ b/arch/nios2/kernel/traps.c
@@ -23,6 +23,17 @@
static DEFINE_SPINLOCK(die_lock);
+static void _send_sig(int signo, int code, unsigned long addr)
+{
+ siginfo_t info;
+
+ info.si_signo = signo;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = (void __user *) addr;
+ force_sig_info(signo, &info, current);
+}
+
void die(const char *str, struct pt_regs *regs, long err)
{
console_verbose();
@@ -39,16 +50,10 @@ void die(const char *str, struct pt_regs *regs, long err)
void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
{
- siginfo_t info;
-
if (!user_mode(regs))
die("Exception in kernel mode", regs, signo);
- info.si_signo = signo;
- info.si_errno = 0;
- info.si_code = code;
- info.si_addr = (void __user *) addr;
- force_sig_info(signo, &info, current);
+ _send_sig(signo, code, addr);
}
/*
@@ -183,3 +188,18 @@ asmlinkage void unhandled_exception(struct pt_regs *regs, int cause)
pr_emerg("opcode: 0x%08lx\n", *(unsigned long *)(regs->ea));
}
+
+asmlinkage void handle_trap_1_c(struct pt_regs *fp)
+{
+ _send_sig(SIGUSR1, 0, fp->ea);
+}
+
+asmlinkage void handle_trap_2_c(struct pt_regs *fp)
+{
+ _send_sig(SIGUSR2, 0, fp->ea);
+}
+
+asmlinkage void handle_trap_3_c(struct pt_regs *fp)
+{
+ _send_sig(SIGILL, ILL_ILLTRP, fp->ea);
+}
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 796642932e2e..223cdcc8203f 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -58,9 +58,6 @@ static void __invalidate_dcache(unsigned long start, unsigned long end)
end += (cpuinfo.dcache_line_size - 1);
end &= ~(cpuinfo.dcache_line_size - 1);
- if (end > start + cpuinfo.dcache_size)
- end = start + cpuinfo.dcache_size;
-
for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
__asm__ __volatile__ (" initda 0(%0)\n"
: /* Outputs */
@@ -131,12 +128,14 @@ void flush_cache_dup_mm(struct mm_struct *mm)
void flush_icache_range(unsigned long start, unsigned long end)
{
+ __flush_dcache(start, end);
__flush_icache(start, end);
}
void flush_dcache_range(unsigned long start, unsigned long end)
{
__flush_dcache(start, end);
+ __flush_icache(start, end);
}
EXPORT_SYMBOL(flush_dcache_range);
@@ -159,6 +158,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
unsigned long start = (unsigned long) page_address(page);
unsigned long end = start + PAGE_SIZE;
+ __flush_dcache(start, end);
__flush_icache(start, end);
}
@@ -173,6 +173,18 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
__flush_icache(start, end);
}
+void __flush_dcache_page(struct address_space *mapping, struct page *page)
+{
+ /*
+ * Writeback any data associated with the kernel mapping of this
+ * page. This ensures that data in the physical page is mutually
+ * coherent with the kernels mapping.
+ */
+ unsigned long start = (unsigned long)page_address(page);
+
+ __flush_dcache_all(start, start + PAGE_SIZE);
+}
+
void flush_dcache_page(struct page *page)
{
struct address_space *mapping;
@@ -190,11 +202,12 @@ void flush_dcache_page(struct page *page)
if (mapping && !mapping_mapped(mapping)) {
clear_bit(PG_dcache_clean, &page->flags);
} else {
- unsigned long start = (unsigned long)page_address(page);
-
- __flush_dcache_all(start, start + PAGE_SIZE);
- if (mapping)
+ __flush_dcache_page(mapping, page);
+ if (mapping) {
+ unsigned long start = (unsigned long)page_address(page);
flush_aliases(mapping, page);
+ flush_icache_range(start, start + PAGE_SIZE);
+ }
set_bit(PG_dcache_clean, &page->flags);
}
}
@@ -205,6 +218,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
{
unsigned long pfn = pte_pfn(*pte);
struct page *page;
+ struct address_space *mapping;
if (!pfn_valid(pfn))
return;
@@ -217,16 +231,15 @@ void update_mmu_cache(struct vm_area_struct *vma,
if (page == ZERO_PAGE(0))
return;
- if (!PageReserved(page) &&
- !test_and_set_bit(PG_dcache_clean, &page->flags)) {
- unsigned long start = page_to_virt(page);
- struct address_space *mapping;
-
- __flush_dcache(start, start + PAGE_SIZE);
-
- mapping = page_mapping(page);
- if (mapping)
- flush_aliases(mapping, page);
+ mapping = page_mapping(page);
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ __flush_dcache_page(mapping, page);
+
+ if(mapping)
+ {
+ flush_aliases(mapping, page);
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_page(vma, page);
}
}
@@ -234,15 +247,19 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *to)
{
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
+ __flush_icache(vaddr, vaddr + PAGE_SIZE);
copy_page(vto, vfrom);
__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
+ __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
}
void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
{
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
+ __flush_icache(vaddr, vaddr + PAGE_SIZE);
clear_page(addr);
__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
+ __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
@@ -251,7 +268,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
{
flush_cache_page(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len);
- __flush_dcache((unsigned long)src, (unsigned long)src + len);
+ __flush_dcache_all((unsigned long)src, (unsigned long)src + len);
if (vma->vm_flags & VM_EXEC)
__flush_icache((unsigned long)src, (unsigned long)src + len);
}
@@ -262,7 +279,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
{
flush_cache_page(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len);
- __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
+ __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len);
if (vma->vm_flags & VM_EXEC)
__flush_icache((unsigned long)dst, (unsigned long)dst + len);
}
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
index 875f0845a707..6e619a79a401 100644
--- a/arch/openrisc/include/asm/thread_info.h
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -48,7 +48,6 @@ typedef unsigned long mm_segment_t;
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -73,7 +72,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 386af258591d..7095dfe7666b 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -197,7 +197,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
unsigned long sr = mfspr(SPR_SR) & ~SPR_SR_SM;
- set_fs(USER_DS);
memset(regs, 0, sizeof(struct pt_regs));
regs->pc = pc;
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index 4fc7ccc0a2cf..b4ed8b36e078 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -329,30 +329,32 @@ static int show_cpuinfo(struct seq_file *m, void *v)
version = (vr & SPR_VR_VER) >> 24;
revision = vr & SPR_VR_REV;
- return seq_printf(m,
- "cpu\t\t: OpenRISC-%x\n"
- "revision\t: %d\n"
- "frequency\t: %ld\n"
- "dcache size\t: %d bytes\n"
- "dcache block size\t: %d bytes\n"
- "icache size\t: %d bytes\n"
- "icache block size\t: %d bytes\n"
- "immu\t\t: %d entries, %lu ways\n"
- "dmmu\t\t: %d entries, %lu ways\n"
- "bogomips\t: %lu.%02lu\n",
- version,
- revision,
- loops_per_jiffy * HZ,
- cpuinfo.dcache_size,
- cpuinfo.dcache_block_size,
- cpuinfo.icache_size,
- cpuinfo.icache_block_size,
- 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
- 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW),
- 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
- 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW),
- (loops_per_jiffy * HZ) / 500000,
- ((loops_per_jiffy * HZ) / 5000) % 100);
+ seq_printf(m,
+ "cpu\t\t: OpenRISC-%x\n"
+ "revision\t: %d\n"
+ "frequency\t: %ld\n"
+ "dcache size\t: %d bytes\n"
+ "dcache block size\t: %d bytes\n"
+ "icache size\t: %d bytes\n"
+ "icache block size\t: %d bytes\n"
+ "immu\t\t: %d entries, %lu ways\n"
+ "dmmu\t\t: %d entries, %lu ways\n"
+ "bogomips\t: %lu.%02lu\n",
+ version,
+ revision,
+ loops_per_jiffy * HZ,
+ cpuinfo.dcache_size,
+ cpuinfo.dcache_block_size,
+ cpuinfo.icache_size,
+ cpuinfo.icache_block_size,
+ 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW),
+ 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW),
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100);
+
+ return 0;
}
static void *c_start(struct seq_file *m, loff_t * pos)
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index 4112175bf803..c82be69b43c6 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -193,8 +193,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
if (err)
return -EFAULT;
- /* TODO what is the current->exec_domain stuff and invmap ? */
-
/* Set up registers for signal handler */
regs->pc = (unsigned long)ksig->ka.sa.sa_handler; /* what we enter NOW */
regs->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 8014727a2743..c36546959e86 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -103,6 +103,11 @@ config ARCH_MAY_HAVE_PC_FDC
depends on BROKEN
default y
+config PGTABLE_LEVELS
+ int
+ default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 8686237a3c3c..7a4bcc36303d 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -20,6 +20,8 @@ generic-y += param.h
generic-y += percpu.h
generic-y += poll.h
generic-y += preempt.h
+generic-y += scatterlist.h
+generic-y += seccomp.h
generic-y += segment.h
generic-y += topology.h
generic-y += trace_clock.h
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 3391d061eccc..78c9fd32c554 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -348,6 +348,10 @@ struct pt_regs; /* forward declaration... */
#define ELF_HWCAP 0
+#define STACK_RND_MASK (is_32bit_task() ? \
+ 0x7ff >> (PAGE_SHIFT - 12) : \
+ 0x3ffff >> (PAGE_SHIFT - 12))
+
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *);
#define arch_randomize_brk arch_randomize_brk
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index d17437238a2c..3a08eae3318f 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,13 +45,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
}
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
/* Three Level Page Table Support for pmd's */
@@ -102,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 15207b9362bf..0a183756d6ec 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -68,13 +68,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
-#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
-#define PT_NLEVELS 3
+#if CONFIG_PGTABLE_LEVELS == 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
#else
-#define PT_NLEVELS 2
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PGD_ALLOC_ORDER PGD_ORDER
#endif
@@ -93,7 +91,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
#else
#define __PAGETABLE_PMD_FOLDED
@@ -277,7 +275,7 @@ extern unsigned long *empty_zero_page;
#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
/* The first entry of the permanent pmd is not there if it contains
* the gateway marker */
#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
@@ -287,7 +285,7 @@ extern unsigned long *empty_zero_page;
#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
static inline void pmd_clear(pmd_t *pmd) {
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the entry pointing to the permanent pmd
* attached to the pgd; cannot clear it */
@@ -299,7 +297,7 @@ static inline void pmd_clear(pmd_t *pmd) {
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd))
@@ -309,7 +307,7 @@ static inline void pmd_clear(pmd_t *pmd) {
#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
static inline void pgd_clear(pgd_t *pgd) {
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd; cannot
* free it */
@@ -393,7 +391,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* Find an entry in the second-level page table.. */
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
#define pmd_offset(dir,address) \
((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
#else
diff --git a/arch/parisc/include/asm/scatterlist.h b/arch/parisc/include/asm/scatterlist.h
deleted file mode 100644
index 8bf1f0dd1f15..000000000000
--- a/arch/parisc/include/asm/scatterlist.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_PARISC_SCATTERLIST_H
-#define _ASM_PARISC_SCATTERLIST_H
-
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm-generic/scatterlist.h>
-
-#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
-
-#endif /* _ASM_PARISC_SCATTERLIST_H */
diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h
deleted file mode 100644
index 015f7887aa29..000000000000
--- a/arch/parisc/include/asm/seccomp.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _ASM_PARISC_SECCOMP_H
-#define _ASM_PARISC_SECCOMP_H
-
-#include <linux/unistd.h>
-
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#define __NR_seccomp_read_32 __NR_read
-#define __NR_seccomp_write_32 __NR_write
-#define __NR_seccomp_exit_32 __NR_exit
-#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
-
-#endif /* _ASM_PARISC_SECCOMP_H */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index fb13e3865563..e96e693fd58c 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -9,7 +9,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain;/* execution domain */
unsigned long flags; /* thread_info flags (see TIF_*) */
mm_segment_t addr_limit; /* user-level address space limit */
__u32 cpu; /* current CPU */
@@ -19,7 +18,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.addr_limit = KERNEL_DS, \
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index dcd55103a4bb..59001cea13f9 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -242,7 +242,6 @@ int main(void)
DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
BLANK();
DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 2ab16bb160a8..75819617f93b 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -398,7 +398,7 @@
* can address up to 1TB
*/
.macro L2_ptep pmd,pte,index,va,fault
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
# if defined(CONFIG_64BIT)
@@ -436,7 +436,7 @@
* all ILP32 processes and all the kernel for machines with
* under 4GB of memory) */
.macro L3_ptep pgd,pte,index,va,fault
-#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
+#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
copy %r0,\pte
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index d4dc588c0dc1..e7d64527aff9 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -74,7 +74,7 @@ $bss_loop:
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
/* Set pmd in pgd */
load32 PA(pmd0),%r5
shrd %r5,PxD_VALUE_SHIFT,%r3
@@ -97,7 +97,7 @@ $bss_loop:
stw %r3,0(%r4)
ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
addib,> -1,%r1,1b
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index cfe056fe7f5c..f3191db6e2e9 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -525,8 +525,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
desc = irq_to_desc(irq);
cpumask_copy(&dest, desc->irq_data.affinity);
if (irqd_is_per_cpu(&desc->irq_data) &&
- !cpu_isset(smp_processor_id(), dest)) {
- int cpu = first_cpu(dest);
+ !cpumask_test_cpu(smp_processor_id(), &dest)) {
+ int cpu = cpumask_first(&dest);
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index d87d1c476d85..ff834fd67478 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -482,7 +482,7 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sglist++ ) {
- unsigned long vaddr = sg_virt_addr(sglist);
+ unsigned long vaddr = (unsigned long)sg_virt(sglist);
sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
sg_dma_len(sglist) = sglist->length;
flush_kernel_dcache_range(vaddr, sglist->length);
@@ -502,7 +502,7 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
- flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
+ flush_kernel_vmap_range(sg_virt(sglist), sglist->length);
return;
}
@@ -527,7 +527,7 @@ static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
- flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
+ flush_kernel_vmap_range(sg_virt(sglist), sglist->length);
}
static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
@@ -537,7 +537,7 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
- flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
+ flush_kernel_vmap_range(sg_virt(sglist), sglist->length);
}
struct hppa_dma_ops pcxl_dma_ops = {
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 8a488c22a99f..809905a811ed 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -181,9 +181,12 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
return 1;
}
+/*
+ * Copy architecture-specific thread state
+ */
int
copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
+ unsigned long kthread_arg, struct task_struct *p)
{
struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
@@ -195,11 +198,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
extern void * const child_return;
if (unlikely(p->flags & PF_KTHREAD)) {
+ /* kernel thread */
memset(cregs, 0, sizeof(struct pt_regs));
if (!usp) /* idle thread */
return 0;
-
- /* kernel thread */
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
*/
@@ -215,7 +217,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
#else
cregs->gr[26] = usp;
#endif
- cregs->gr[25] = arg;
+ cregs->gr[25] = kthread_arg;
} else {
/* user thread */
/* usp must be word aligned. This also prevents users from
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index e1ffea2f9a0b..5aba01ac457f 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(void)
if (stack_base > STACK_SIZE_MAX)
stack_base = STACK_SIZE_MAX;
+ /* Add space for stack randomization. */
+ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
+
return PAGE_ALIGN(STACK_TOP - stack_base);
}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 15dbe81cf5f3..c229427fa546 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -34,7 +34,7 @@
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 22b0940494bb..190cc48abc0c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -88,7 +88,7 @@ config PPC
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select BINFMT_ELF
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ARCH_HAS_ELF_RANDOMIZE
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
@@ -126,7 +126,7 @@ config PPC
select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_BPF_JIT if PPC64
+ select HAVE_BPF_JIT
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -152,6 +152,7 @@ config PPC
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select NO_BOOTMEM
select HAVE_GENERIC_RCU_GUP
+ select HAVE_PERF_EVENTS_NMI if PPC64
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -189,9 +190,6 @@ config ARCH_MAY_HAVE_PC_FDC
bool
default PCI
-config PPC_OF
- def_bool y
-
config PPC_UDBG_16550
bool
default n
@@ -297,6 +295,12 @@ config ZONE_DMA32
bool
default y if PPC64
+config PGTABLE_LEVELS
+ int
+ default 2 if !PPC64
+ default 3 if PPC_64K_PAGES
+ default 4
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index ec2e40f2cc11..0efa8f90a8f1 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -117,7 +117,7 @@ config BDI_SWITCH
config BOOTX_TEXT
bool "Support for early boot text console (BootX or OpenFirmware only)"
- depends on PPC_OF && PPC_BOOK3S
+ depends on PPC_BOOK3S
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires either BootX or Open Firmware.
@@ -193,13 +193,6 @@ config PPC_EARLY_DEBUG_PAS_REALMODE
Select this to enable early debugging for PA Semi.
Output will be on UART0.
-config PPC_EARLY_DEBUG_BEAT
- bool "Beat HV Console"
- depends on PPC_CELLEB
- select PPC_UDBG_BEAT
- help
- Select this to enable early debugging for Celleb with Beat.
-
config PPC_EARLY_DEBUG_44x
bool "Early serial debugging for IBM/AMCC 44x CPUs"
depends on 44x
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index fc502e042438..07a480861f78 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -248,10 +248,10 @@ boot := arch/$(ARCH)/boot
ifeq ($(CONFIG_RELOCATABLE),y)
quiet_cmd_relocs_check = CALL $<
- cmd_relocs_check = perl $< "$(OBJDUMP)" "$(obj)/vmlinux"
+ cmd_relocs_check = $(CONFIG_SHELL) $< "$(OBJDUMP)" "$(obj)/vmlinux"
PHONY += relocs_check
-relocs_check: arch/powerpc/relocs_check.pl vmlinux
+relocs_check: arch/powerpc/relocs_check.sh vmlinux
$(call cmd,relocs_check)
zImage: relocs_check
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8a5bc1cfc6aa..73eddda53b8e 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -110,7 +110,6 @@ src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S
src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
-src-plat-$(CONFIG_PPC_CELLEB) += pseries-head.S
src-plat-$(CONFIG_PPC_CELL_QPACE) += pseries-head.S
src-wlib := $(sort $(src-wlib-y))
@@ -215,7 +214,6 @@ image-$(CONFIG_PPC_POWERNV) += zImage.pseries
image-$(CONFIG_PPC_MAPLE) += zImage.maple
image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
image-$(CONFIG_PPC_PS3) += dtbImage.ps3
-image-$(CONFIG_PPC_CELLEB) += zImage.pseries
image-$(CONFIG_PPC_CELL_QPACE) += zImage.pseries
image-$(CONFIG_PPC_CHRP) += zImage.chrp
image-$(CONFIG_PPC_EFIKA) += zImage.chrp
@@ -317,7 +315,7 @@ endif
# Allow extra targets to be added to the defconfig
image-y += $(subst ",,$(CONFIG_EXTRA_TARGETS))
-initrd- := $(patsubst zImage%, zImage.initrd%, $(image-n) $(image-))
+initrd- := $(patsubst zImage%, zImage.initrd%, $(image-))
initrd-y := $(patsubst zImage%, zImage.initrd%, \
$(patsubst dtbImage%, dtbImage.initrd%, \
$(patsubst simpleImage%, simpleImage.initrd%, \
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 14de4f8778a7..12866ccb5694 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -155,29 +155,29 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
ld r9,(p_rela-p_base)(r10)
add r9,r9,r10
- li r7,0
+ li r13,0
li r8,0
-9: ld r6,0(r11) /* get tag */
- cmpdi r6,0
+9: ld r12,0(r11) /* get tag */
+ cmpdi r12,0
beq 12f /* end of list */
- cmpdi r6,RELA
+ cmpdi r12,RELA
bne 10f
- ld r7,8(r11) /* get RELA pointer in r7 */
+ ld r13,8(r11) /* get RELA pointer in r13 */
b 11f
-10: addis r6,r6,(-RELACOUNT)@ha
- cmpdi r6,RELACOUNT@l
+10: addis r12,r12,(-RELACOUNT)@ha
+ cmpdi r12,RELACOUNT@l
bne 11f
ld r8,8(r11) /* get RELACOUNT value in r8 */
11: addi r11,r11,16
b 9b
12:
- cmpdi r7,0 /* check we have both RELA and RELACOUNT */
+ cmpdi r13,0 /* check we have both RELA and RELACOUNT */
cmpdi cr1,r8,0
beq 3f
beq cr1,3f
/* Calcuate the runtime offset. */
- subf r7,r7,r9
+ subf r13,r13,r9
/* Run through the list of relocations and process the
* R_PPC64_RELATIVE ones. */
@@ -185,10 +185,10 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
13: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */
cmpdi r0,22 /* R_PPC64_RELATIVE */
bne 3f
- ld r6,0(r9) /* reloc->r_offset */
+ ld r12,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */
- add r0,r0,r7
- stdx r0,r7,r6
+ add r0,r0,r13
+ stdx r0,r13,r12
addi r9,r9,24
bdnz 13b
@@ -218,7 +218,7 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
beq 6f
ld r1,0(r8)
li r0,0
- stdu r0,-16(r1) /* establish a stack frame */
+ stdu r0,-112(r1) /* establish a stack frame */
6:
#endif /* __powerpc64__ */
/* Call platform_init() */
diff --git a/arch/powerpc/boot/dts/b4860emu.dts b/arch/powerpc/boot/dts/b4860emu.dts
deleted file mode 100644
index 2aa5cd318ce8..000000000000
--- a/arch/powerpc/boot/dts/b4860emu.dts
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * B4860 emulator Device Tree Source
- *
- * Copyright 2013 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * This software is provided by Freescale Semiconductor "as is" and any
- * express or implied warranties, including, but not limited to, the implied
- * warranties of merchantability and fitness for a particular purpose are
- * disclaimed. In no event shall Freescale Semiconductor be liable for any
- * direct, indirect, incidental, special, exemplary, or consequential damages
- * (including, but not limited to, procurement of substitute goods or services;
- * loss of use, data, or profits; or business interruption) however caused and
- * on any theory of liability, whether in contract, strict liability, or tort
- * (including negligence or otherwise) arising in any way out of the use of
- * this software, even if advised of the possibility of such damage.
- */
-
-/dts-v1/;
-
-/include/ "fsl/e6500_power_isa.dtsi"
-
-/ {
- compatible = "fsl,B4860";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- aliases {
- ccsr = &soc;
-
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- dma0 = &dma0;
- dma1 = &dma1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,e6500@0 {
- device_type = "cpu";
- reg = <0 1>;
- next-level-cache = <&L2>;
- fsl,portid-mapping = <0x80000000>;
- };
- cpu1: PowerPC,e6500@2 {
- device_type = "cpu";
- reg = <2 3>;
- next-level-cache = <&L2>;
- fsl,portid-mapping = <0x80000000>;
- };
- cpu2: PowerPC,e6500@4 {
- device_type = "cpu";
- reg = <4 5>;
- next-level-cache = <&L2>;
- fsl,portid-mapping = <0x80000000>;
- };
- cpu3: PowerPC,e6500@6 {
- device_type = "cpu";
- reg = <6 7>;
- next-level-cache = <&L2>;
- fsl,portid-mapping = <0x80000000>;
- };
- };
-};
-
-/ {
- model = "fsl,B4860QDS";
- compatible = "fsl,B4860EMU", "fsl,B4860QDS";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- ifc: localbus@ffe124000 {
- reg = <0xf 0xfe124000 0 0x2000>;
- ranges = <0 0 0xf 0xe8000000 0x08000000
- 2 0 0xf 0xff800000 0x00010000
- 3 0 0xf 0xffdf0000 0x00008000>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x8000000>;
- bank-width = <2>;
- device-width = <1>;
- };
- };
-
- memory {
- device_type = "memory";
- };
-
- soc: soc@ffe000000 {
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
- };
-};
-
-&ifc {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
- interrupts = <25 2 0 0>;
-};
-
-&soc {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
-
- soc-sram-error {
- compatible = "fsl,soc-sram-error";
- interrupts = <16 2 1 2>;
- };
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- ddr1: memory-controller@8000 {
- compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
- reg = <0x8000 0x1000>;
- interrupts = <16 2 1 8>;
- };
-
- ddr2: memory-controller@9000 {
- compatible = "fsl,qoriq-memory-controller-v4.5","fsl,qoriq-memory-controller";
- reg = <0x9000 0x1000>;
- interrupts = <16 2 1 9>;
- };
-
- cpc: l3-cache-controller@10000 {
- compatible = "fsl,b4-l3-cache-controller", "cache";
- reg = <0x10000 0x1000
- 0x11000 0x1000>;
- interrupts = <16 2 1 4>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet2-cf", "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- interrupts = <16 2 1 0>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,pamu-v1.0", "fsl,pamu";
- reg = <0x20000 0x4000>;
- fsl,portid-mapping = <0x8000>;
- #address-cells = <1>;
- #size-cells = <1>;
- interrupts = <
- 24 2 0 0
- 16 2 1 1>;
- pamu0: pamu@0 {
- reg = <0 0x1000>;
- fsl,primary-cache-geometry = <8 1>;
- fsl,secondary-cache-geometry = <32 2>;
- };
- };
-
-/include/ "fsl/qoriq-mpic.dtsi"
-
- guts: global-utilities@e0000 {
- compatible = "fsl,b4-device-config";
- reg = <0xe0000 0xe00>;
- fsl,has-rstcr;
- fsl,liodn-bits = <12>;
- };
-
-/include/ "fsl/qoriq-clockgen2.dtsi"
- global-utilities@e1000 {
- compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0";
- };
-
-/include/ "fsl/qoriq-dma-0.dtsi"
- dma@100300 {
- fsl,iommu-parent = <&pamu0>;
- fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */
- };
-
-/include/ "fsl/qoriq-dma-1.dtsi"
- dma@101300 {
- fsl,iommu-parent = <&pamu0>;
- fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */
- };
-
-/include/ "fsl/qoriq-i2c-0.dtsi"
-/include/ "fsl/qoriq-i2c-1.dtsi"
-/include/ "fsl/qoriq-duart-0.dtsi"
-/include/ "fsl/qoriq-duart-1.dtsi"
-
- L2: l2-cache-controller@c20000 {
- compatible = "fsl,b4-l2-cache-controller";
- reg = <0xc20000 0x1000>;
- next-level-cache = <&cpc>;
- };
-};
diff --git a/arch/powerpc/boot/dts/b4qds.dtsi b/arch/powerpc/boot/dts/b4qds.dtsi
index e5bde0b85135..24ed80dc2120 100644
--- a/arch/powerpc/boot/dts/b4qds.dtsi
+++ b/arch/powerpc/boot/dts/b4qds.dtsi
@@ -1,7 +1,7 @@
/*
* B4420DS Device Tree Source
*
- * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -97,10 +97,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01052000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
index 65100b9636b7..f35e9e0a5445 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
@@ -1,7 +1,7 @@
/*
* B4860 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -109,6 +109,64 @@
};
};
+&bportals {
+ bman-portal@38000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x38000 0x4000>, <0x100e000 0x1000>;
+ interrupts = <133 2 0 0>;
+ };
+ bman-portal@3c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x3c000 0x4000>, <0x100f000 0x1000>;
+ interrupts = <135 2 0 0>;
+ };
+ bman-portal@40000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x40000 0x4000>, <0x1010000 0x1000>;
+ interrupts = <137 2 0 0>;
+ };
+ bman-portal@44000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x44000 0x4000>, <0x1011000 0x1000>;
+ interrupts = <139 2 0 0>;
+ };
+ bman-portal@48000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x48000 0x4000>, <0x1012000 0x1000>;
+ interrupts = <141 2 0 0>;
+ };
+ bman-portal@4c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4c000 0x4000>, <0x1013000 0x1000>;
+ interrupts = <143 2 0 0>;
+ };
+ bman-portal@50000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x50000 0x4000>, <0x1014000 0x1000>;
+ interrupts = <145 2 0 0>;
+ };
+ bman-portal@54000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x54000 0x4000>, <0x1015000 0x1000>;
+ interrupts = <147 2 0 0>;
+ };
+ bman-portal@58000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x58000 0x4000>, <0x1016000 0x1000>;
+ interrupts = <149 2 0 0>;
+ };
+ bman-portal@5c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x5c000 0x4000>, <0x1017000 0x1000>;
+ interrupts = <151 2 0 0>;
+ };
+ bman-portal@60000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x60000 0x4000>, <0x1018000 0x1000>;
+ interrupts = <153 2 0 0>;
+ };
+};
+
&soc {
ddr2: memory-controller@9000 {
compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 1a54ba71f685..73136c0029d2 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -1,7 +1,7 @@
/*
* B4420 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* this software, even if advised of the possibility of such damage.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -128,6 +133,83 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x1006000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x1008000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x1009000 0x1000>;
+ interrupts = <123 2 0 0>;
+ };
+ bman-portal@28000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x28000 0x4000>, <0x100a000 0x1000>;
+ interrupts = <125 2 0 0>;
+ };
+ bman-portal@2c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x2c000 0x4000>, <0x100b000 0x1000>;
+ interrupts = <127 2 0 0>;
+ };
+ bman-portal@30000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x30000 0x4000>, <0x100c000 0x1000>;
+ interrupts = <129 2 0 0>;
+ };
+ bman-portal@34000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x34000 0x4000>, <0x100d000 0x1000>;
+ interrupts = <131 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -261,6 +343,11 @@
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-sec5.3-0.dtsi"
+/include/ "qoriq-bman1.dtsi"
+ bman: bman@31a000 {
+ interrupts = <16 2 1 29>;
+ };
+
L2: l2-cache-controller@c20000 {
compatible = "fsl,b4-l2-cache-controller";
reg = <0xc20000 0x1000>;
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
index 81437fdf1db4..7780f21430cb 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P1023/P1017 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10 0>;
+};
+
&lbc {
#address-cells = <2>;
#size-cells = <1>;
@@ -97,6 +102,28 @@
};
};
+&bportals {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x100000 0x1000>;
+ interrupts = <30 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x101000 0x1000>;
+ interrupts = <32 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x102000 0x1000>;
+ interrupts = <34 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -221,6 +248,14 @@
/include/ "pq3-mpic.dtsi"
/include/ "pq3-mpic-timer-B.dtsi"
+ bman: bman@8a000 {
+ compatible = "fsl,bman";
+ reg = <0x8a000 0x1000>;
+ interrupts = <16 2 0 0>;
+ fsl,bman-portals = <&bportals>;
+ memory-region = <&bman_fbpr>;
+ };
+
global-utilities@e0000 {
compatible = "fsl,p1023-guts";
reg = <0xe0000 0x1000>;
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index efd74db4f9b0..f2feacfd9a25 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P2041/P2040 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10 0>;
+};
+
&lbc {
compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -216,6 +221,8 @@
};
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -407,4 +414,6 @@
crypto: crypto@300000 {
fsl,iommu-parent = <&pamu1>;
};
+
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index d7425ef1ae41..d6fea37395ad 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P3041 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10 0>;
+};
+
&lbc {
compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -243,6 +248,8 @@
};
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -434,4 +441,6 @@
crypto: crypto@300000 {
fsl,iommu-parent = <&pamu1>;
};
+
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 7005a4a4cef0..89482c9b2301 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P4080/P4040 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10 0>;
+};
+
&lbc {
compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -243,6 +248,8 @@
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -490,4 +497,6 @@
crypto: crypto@300000 {
fsl,iommu-parent = <&pamu1>;
};
+
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index 55834211bd28..6e04851e2fc9 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P5020/5010 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&lbc {
compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -240,6 +245,8 @@
};
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -421,6 +428,8 @@
fsl,iommu-parent = <&pamu1>;
};
+/include/ "qoriq-bman1.dtsi"
+
/include/ "qoriq-raid1.0-0.dtsi"
raideng@320000 {
fsl,iommu-parent = <&pamu1>;
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
index 6e4cd6ce363c..5e44dfa1e1a5 100644
--- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P5040 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* software, even if advised of the possibility of such damage.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&lbc {
compatible = "fsl,p5040-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -195,6 +200,8 @@
};
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -399,4 +406,6 @@
crypto@300000 {
fsl,iommu-parent = <&pamu4>;
};
+
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 15ae462e758f..5cc01be5b152 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -1,7 +1,7 @@
/*
* T1040 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -218,6 +223,63 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x1006000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x1008000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x1009000 0x1000>;
+ interrupts = <123 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -401,4 +463,5 @@
fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */
};
/include/ "qoriq-sec5.0-0.dtsi"
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
index 1ce91e3485a9..86bdaf6cbd14 100644
--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
@@ -1,7 +1,7 @@
/*
* T2081 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -224,6 +229,103 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x1006000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x1008000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x1009000 0x1000>;
+ interrupts = <123 2 0 0>;
+ };
+ bman-portal@28000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x28000 0x4000>, <0x100a000 0x1000>;
+ interrupts = <125 2 0 0>;
+ };
+ bman-portal@2c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x2c000 0x4000>, <0x100b000 0x1000>;
+ interrupts = <127 2 0 0>;
+ };
+ bman-portal@30000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x30000 0x4000>, <0x100c000 0x1000>;
+ interrupts = <129 2 0 0>;
+ };
+ bman-portal@34000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x34000 0x4000>, <0x100d000 0x1000>;
+ interrupts = <131 2 0 0>;
+ };
+ bman-portal@38000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x38000 0x4000>, <0x100e000 0x1000>;
+ interrupts = <133 2 0 0>;
+ };
+ bman-portal@3c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x3c000 0x4000>, <0x100f000 0x1000>;
+ interrupts = <135 2 0 0>;
+ };
+ bman-portal@40000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x40000 0x4000>, <0x1010000 0x1000>;
+ interrupts = <137 2 0 0>;
+ };
+ bman-portal@44000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x44000 0x4000>, <0x1011000 0x1000>;
+ interrupts = <139 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -400,6 +502,7 @@
phy_type = "utmi";
};
/include/ "qoriq-sec5.2-0.dtsi"
+/include/ "qoriq-bman1.dtsi"
L2_1: l2-cache-controller@c20000 {
/* Cluster 0 L2 cache */
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index 0e96fcabe812..4d4f25895d8c 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -1,7 +1,7 @@
/*
* T4240 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -294,6 +299,263 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x1006000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x1008000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x1009000 0x1000>;
+ interrupts = <123 2 0 0>;
+ };
+ bman-portal@28000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x28000 0x4000>, <0x100a000 0x1000>;
+ interrupts = <125 2 0 0>;
+ };
+ bman-portal@2c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x2c000 0x4000>, <0x100b000 0x1000>;
+ interrupts = <127 2 0 0>;
+ };
+ bman-portal@30000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x30000 0x4000>, <0x100c000 0x1000>;
+ interrupts = <129 2 0 0>;
+ };
+ bman-portal@34000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x34000 0x4000>, <0x100d000 0x1000>;
+ interrupts = <131 2 0 0>;
+ };
+ bman-portal@38000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x38000 0x4000>, <0x100e000 0x1000>;
+ interrupts = <133 2 0 0>;
+ };
+ bman-portal@3c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x3c000 0x4000>, <0x100f000 0x1000>;
+ interrupts = <135 2 0 0>;
+ };
+ bman-portal@40000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x40000 0x4000>, <0x1010000 0x1000>;
+ interrupts = <137 2 0 0>;
+ };
+ bman-portal@44000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x44000 0x4000>, <0x1011000 0x1000>;
+ interrupts = <139 2 0 0>;
+ };
+ bman-portal@48000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x48000 0x4000>, <0x1012000 0x1000>;
+ interrupts = <141 2 0 0>;
+ };
+ bman-portal@4c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4c000 0x4000>, <0x1013000 0x1000>;
+ interrupts = <143 2 0 0>;
+ };
+ bman-portal@50000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x50000 0x4000>, <0x1014000 0x1000>;
+ interrupts = <145 2 0 0>;
+ };
+ bman-portal@54000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x54000 0x4000>, <0x1015000 0x1000>;
+ interrupts = <147 2 0 0>;
+ };
+ bman-portal@58000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x58000 0x4000>, <0x1016000 0x1000>;
+ interrupts = <149 2 0 0>;
+ };
+ bman-portal@5c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x5c000 0x4000>, <0x1017000 0x1000>;
+ interrupts = <151 2 0 0>;
+ };
+ bman-portal@60000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x60000 0x4000>, <0x1018000 0x1000>;
+ interrupts = <153 2 0 0>;
+ };
+ bman-portal@64000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x64000 0x4000>, <0x1019000 0x1000>;
+ interrupts = <155 2 0 0>;
+ };
+ bman-portal@68000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x68000 0x4000>, <0x101a000 0x1000>;
+ interrupts = <157 2 0 0>;
+ };
+ bman-portal@6c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x6c000 0x4000>, <0x101b000 0x1000>;
+ interrupts = <159 2 0 0>;
+ };
+ bman-portal@70000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x70000 0x4000>, <0x101c000 0x1000>;
+ interrupts = <161 2 0 0>;
+ };
+ bman-portal@74000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x74000 0x4000>, <0x101d000 0x1000>;
+ interrupts = <163 2 0 0>;
+ };
+ bman-portal@78000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x78000 0x4000>, <0x101e000 0x1000>;
+ interrupts = <165 2 0 0>;
+ };
+ bman-portal@7c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x7c000 0x4000>, <0x101f000 0x1000>;
+ interrupts = <167 2 0 0>;
+ };
+ bman-portal@80000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x80000 0x4000>, <0x1020000 0x1000>;
+ interrupts = <169 2 0 0>;
+ };
+ bman-portal@84000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x84000 0x4000>, <0x1021000 0x1000>;
+ interrupts = <171 2 0 0>;
+ };
+ bman-portal@88000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x88000 0x4000>, <0x1022000 0x1000>;
+ interrupts = <173 2 0 0>;
+ };
+ bman-portal@8c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8c000 0x4000>, <0x1023000 0x1000>;
+ interrupts = <175 2 0 0>;
+ };
+ bman-portal@90000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x90000 0x4000>, <0x1024000 0x1000>;
+ interrupts = <385 2 0 0>;
+ };
+ bman-portal@94000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x94000 0x4000>, <0x1025000 0x1000>;
+ interrupts = <387 2 0 0>;
+ };
+ bman-portal@98000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x98000 0x4000>, <0x1026000 0x1000>;
+ interrupts = <389 2 0 0>;
+ };
+ bman-portal@9c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x9c000 0x4000>, <0x1027000 0x1000>;
+ interrupts = <391 2 0 0>;
+ };
+ bman-portal@a0000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xa0000 0x4000>, <0x1028000 0x1000>;
+ interrupts = <393 2 0 0>;
+ };
+ bman-portal@a4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xa4000 0x4000>, <0x1029000 0x1000>;
+ interrupts = <395 2 0 0>;
+ };
+ bman-portal@a8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xa8000 0x4000>, <0x102a000 0x1000>;
+ interrupts = <397 2 0 0>;
+ };
+ bman-portal@ac000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xac000 0x4000>, <0x102b000 0x1000>;
+ interrupts = <399 2 0 0>;
+ };
+ bman-portal@b0000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xb0000 0x4000>, <0x102c000 0x1000>;
+ interrupts = <401 2 0 0>;
+ };
+ bman-portal@b4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xb4000 0x4000>, <0x102d000 0x1000>;
+ interrupts = <403 2 0 0>;
+ };
+ bman-portal@b8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xb8000 0x4000>, <0x102e000 0x1000>;
+ interrupts = <405 2 0 0>;
+ };
+ bman-portal@bc000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xbc000 0x4000>, <0x102f000 0x1000>;
+ interrupts = <407 2 0 0>;
+ };
+ bman-portal@c0000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc0000 0x4000>, <0x1030000 0x1000>;
+ interrupts = <409 2 0 0>;
+ };
+ bman-portal@c4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc4000 0x4000>, <0x1031000 0x1000>;
+ interrupts = <411 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -486,6 +748,7 @@
/include/ "qoriq-sata2-0.dtsi"
/include/ "qoriq-sata2-1.dtsi"
/include/ "qoriq-sec5.0-0.dtsi"
+/include/ "qoriq-bman1.dtsi"
L2_1: l2-cache-controller@c20000 {
compatible = "fsl,t4240-l2-cache-controller";
diff --git a/arch/powerpc/boot/dts/kmcoge4.dts b/arch/powerpc/boot/dts/kmcoge4.dts
index 89b4119f3b19..97e6d11d1e6d 100644
--- a/arch/powerpc/boot/dts/kmcoge4.dts
+++ b/arch/powerpc/boot/dts/kmcoge4.dts
@@ -25,10 +25,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/oca4080.dts b/arch/powerpc/boot/dts/oca4080.dts
index 3d4c751d1608..eb76caae11d9 100644
--- a/arch/powerpc/boot/dts/oca4080.dts
+++ b/arch/powerpc/boot/dts/oca4080.dts
@@ -49,10 +49,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p1023rdb.dts b/arch/powerpc/boot/dts/p1023rdb.dts
index 0a06a88ddbd5..9236e3742a23 100644
--- a/arch/powerpc/boot/dts/p1023rdb.dts
+++ b/arch/powerpc/boot/dts/p1023rdb.dts
@@ -1,7 +1,7 @@
/*
* P1023 RDB Device Tree Source
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Author: Chunhe Lan <Chunhe.Lan@freescale.com>
*
@@ -47,6 +47,21 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
+ bportals: bman-portals@ff200000 {
+ ranges = <0x0 0xf 0xff200000 0x200000>;
+ };
+
soc: soc@ff600000 {
ranges = <0x0 0x0 0xff600000 0x200000>;
@@ -228,7 +243,6 @@
0x0 0x100000>;
};
};
-
};
/include/ "fsl/p1023si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2041rdb.dts b/arch/powerpc/boot/dts/p2041rdb.dts
index d97ad74c7279..c1e69dc7188e 100644
--- a/arch/powerpc/boot/dts/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/p2041rdb.dts
@@ -1,7 +1,7 @@
/*
* P2041RDB Device Tree Source
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index 394ea9c943c9..2192fe94866d 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -1,7 +1,7 @@
/*
* P3041DS Device Tree Source
*
- * Copyright 2010-2011 Freescale Semiconductor Inc.
+ * Copyright 2010 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index 1cf6148b8b05..fad441654642 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -1,7 +1,7 @@
/*
* P4080DS Device Tree Source
*
- * Copyright 2009-2011 Freescale Semiconductor Inc.
+ * Copyright 2009 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index b7f3057cd894..7382636dc560 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -1,7 +1,7 @@
/*
* P5020DS Device Tree Source
*
- * Copyright 2010-2011 Freescale Semiconductor Inc.
+ * Copyright 2010 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p5040ds.dts b/arch/powerpc/boot/dts/p5040ds.dts
index 7e04bf487c04..35dabf5b6098 100644
--- a/arch/powerpc/boot/dts/p5040ds.dts
+++ b/arch/powerpc/boot/dts/p5040ds.dts
@@ -1,7 +1,7 @@
/*
* P5040DS Device Tree Source
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t104xqds.dtsi b/arch/powerpc/boot/dts/t104xqds.dtsi
index 234f4b596c5b..f7e9bfbeefc7 100644
--- a/arch/powerpc/boot/dts/t104xqds.dtsi
+++ b/arch/powerpc/boot/dts/t104xqds.dtsi
@@ -1,7 +1,7 @@
/*
* T104xQDS Device Tree Source
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -38,6 +38,17 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -77,6 +88,10 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t104xrdb.dtsi b/arch/powerpc/boot/dts/t104xrdb.dtsi
index 187add885cae..76e07a3f2ca8 100644
--- a/arch/powerpc/boot/dts/t104xrdb.dtsi
+++ b/arch/powerpc/boot/dts/t104xrdb.dtsi
@@ -33,6 +33,16 @@
*/
/ {
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
@@ -69,6 +79,10 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t208xqds.dtsi b/arch/powerpc/boot/dts/t208xqds.dtsi
index 59061834d54e..c42e07f4f648 100644
--- a/arch/powerpc/boot/dts/t208xqds.dtsi
+++ b/arch/powerpc/boot/dts/t208xqds.dtsi
@@ -1,7 +1,7 @@
/*
* T2080/T2081 QDS Device Tree Source
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -39,6 +39,17 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -78,6 +89,10 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
@@ -137,7 +152,7 @@
rtc@68 {
compatible = "dallas,ds3232";
reg = <0x68>;
- interrupts = <0x1 0x1 0 0>;
+ interrupts = <0xb 0x1 0 0>;
};
};
diff --git a/arch/powerpc/boot/dts/t208xrdb.dtsi b/arch/powerpc/boot/dts/t208xrdb.dtsi
index 1481e192e783..e1463b165d0e 100644
--- a/arch/powerpc/boot/dts/t208xrdb.dtsi
+++ b/arch/powerpc/boot/dts/t208xrdb.dtsi
@@ -39,6 +39,17 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -79,6 +90,10 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t4240qds.dts b/arch/powerpc/boot/dts/t4240qds.dts
index 97683f6a2936..6df77766410b 100644
--- a/arch/powerpc/boot/dts/t4240qds.dts
+++ b/arch/powerpc/boot/dts/t4240qds.dts
@@ -1,7 +1,7 @@
/*
* T4240QDS Device Tree Source
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -100,10 +100,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t4240rdb.dts b/arch/powerpc/boot/dts/t4240rdb.dts
index 53761d4e8c51..46049cf37f02 100644
--- a/arch/powerpc/boot/dts/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/t4240rdb.dts
@@ -69,10 +69,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c
index bb8b9b3505ee..535e8fd8900d 100644
--- a/arch/powerpc/boot/libfdt-wrapper.c
+++ b/arch/powerpc/boot/libfdt-wrapper.c
@@ -44,12 +44,12 @@
#define offset_devp(off) \
({ \
- int _offset = (off); \
+ unsigned long _offset = (off); \
check_err(_offset) ? NULL : (void *)(_offset+1); \
})
-#define devp_offset_find(devp) (((int)(devp))-1)
-#define devp_offset(devp) (devp ? ((int)(devp))-1 : 0)
+#define devp_offset_find(devp) (((unsigned long)(devp))-1)
+#define devp_offset(devp) (devp ? ((unsigned long)(devp))-1 : 0)
static void *fdt;
static void *buf; /* = NULL */
diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
index c89fdb1b80e1..8dcd744e5728 100644
--- a/arch/powerpc/boot/libfdt_env.h
+++ b/arch/powerpc/boot/libfdt_env.h
@@ -4,15 +4,17 @@
#include <types.h>
#include <string.h>
+#include "of.h"
+
typedef u32 uint32_t;
typedef u64 uint64_t;
typedef unsigned long uintptr_t;
-#define fdt16_to_cpu(x) (x)
-#define cpu_to_fdt16(x) (x)
-#define fdt32_to_cpu(x) (x)
-#define cpu_to_fdt32(x) (x)
-#define fdt64_to_cpu(x) (x)
-#define cpu_to_fdt64(x) (x)
+#define fdt16_to_cpu(x) be16_to_cpu(x)
+#define cpu_to_fdt16(x) cpu_to_be16(x)
+#define fdt32_to_cpu(x) be32_to_cpu(x)
+#define cpu_to_fdt32(x) cpu_to_be32(x)
+#define fdt64_to_cpu(x) be64_to_cpu(x)
+#define cpu_to_fdt64(x) cpu_to_be64(x)
#endif /* _ARCH_POWERPC_BOOT_LIBFDT_ENV_H */
diff --git a/arch/powerpc/boot/of.h b/arch/powerpc/boot/of.h
index c8c1750aba0c..5603320dce07 100644
--- a/arch/powerpc/boot/of.h
+++ b/arch/powerpc/boot/of.h
@@ -24,11 +24,19 @@ void of_console_init(void);
typedef u32 __be32;
#ifdef __LITTLE_ENDIAN__
+#define cpu_to_be16(x) swab16(x)
+#define be16_to_cpu(x) swab16(x)
#define cpu_to_be32(x) swab32(x)
#define be32_to_cpu(x) swab32(x)
+#define cpu_to_be64(x) swab64(x)
+#define be64_to_cpu(x) swab64(x)
#else
+#define cpu_to_be16(x) (x)
+#define be16_to_cpu(x) (x)
#define cpu_to_be32(x) (x)
#define be32_to_cpu(x) (x)
+#define cpu_to_be64(x) (x)
+#define be64_to_cpu(x) (x)
#endif
#define PROM_ERROR (-1u)
diff --git a/arch/powerpc/boot/planetcore.c b/arch/powerpc/boot/planetcore.c
index 0d8558a475bb..75117e63e6db 100644
--- a/arch/powerpc/boot/planetcore.c
+++ b/arch/powerpc/boot/planetcore.c
@@ -131,36 +131,3 @@ void planetcore_set_stdout_path(const char *table)
setprop_str(chosen, "linux,stdout-path", path);
}
-
-void planetcore_set_serial_speed(const char *table)
-{
- void *chosen, *stdout;
- u64 baud;
- u32 baud32;
- int len;
-
- chosen = finddevice("/chosen");
- if (!chosen)
- return;
-
- len = getprop(chosen, "linux,stdout-path", prop_buf, MAX_PROP_LEN);
- if (len <= 0)
- return;
-
- stdout = finddevice(prop_buf);
- if (!stdout) {
- printf("planetcore_set_serial_speed: "
- "Bad /chosen/linux,stdout-path.\r\n");
-
- return;
- }
-
- if (!planetcore_get_decimal(table, PLANETCORE_KEY_SERIAL_BAUD,
- &baud)) {
- printf("planetcore_set_serial_speed: No SB tag.\r\n");
- return;
- }
-
- baud32 = baud;
- setprop(stdout, "current-speed", &baud32, 4);
-}
diff --git a/arch/powerpc/boot/planetcore.h b/arch/powerpc/boot/planetcore.h
index 0d4094f1771c..d53c733cc463 100644
--- a/arch/powerpc/boot/planetcore.h
+++ b/arch/powerpc/boot/planetcore.h
@@ -43,7 +43,4 @@ void planetcore_set_mac_addrs(const char *table);
*/
void planetcore_set_stdout_path(const char *table);
-/* Sets the current-speed property in the serial node. */
-void planetcore_set_serial_speed(const char *table);
-
#endif
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index ae0f88ec4a32..3f50c27ed8f8 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -277,7 +277,7 @@ treeboot-iss4xx-mpic)
platformo="$object/treeboot-iss4xx.o"
;;
epapr)
- platformo="$object/epapr.o $object/epapr-wrapper.o"
+ platformo="$object/pseries-head.o $object/epapr.o $object/epapr-wrapper.o"
link_address='0x20000000'
pie=-pie
;;
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 9788b3c2d563..9227b517560a 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -28,7 +28,6 @@ CONFIG_PS3_ROM=m
CONFIG_PS3_FLASH=m
CONFIG_PS3_LPM=m
CONFIG_PPC_IBM_CELL_BLADE=y
-CONFIG_PPC_CELLEB=y
CONFIG_RTAS_FLASH=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -113,7 +112,6 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_AEC62XX=y
CONFIG_BLK_DEV_SIIMAGE=y
-CONFIG_BLK_DEV_CELLEB=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
@@ -156,7 +154,6 @@ CONFIG_SERIAL_TXX9_NR_UARTS=2
CONFIG_SERIAL_TXX9_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_HVC_RTAS=y
-CONFIG_HVC_BEAT=y
CONFIG_IPMI_HANDLER=m
CONFIG_IPMI_DEVICE_INTERFACE=m
CONFIG_IPMI_SI=m
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig
deleted file mode 100644
index ff454dcd2dd3..000000000000
--- a/arch/powerpc/configs/celleb_defconfig
+++ /dev/null
@@ -1,152 +0,0 @@
-CONFIG_PPC64=y
-CONFIG_TUNE_CELL=y
-CONFIG_ALTIVEC=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=4
-CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=15
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_PPC_POWERNV is not set
-# CONFIG_PPC_PSERIES is not set
-# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_CELLEB=y
-CONFIG_SPU_FS=y
-# CONFIG_CBE_THERM is not set
-CONFIG_UDBG_RTAS_CONSOLE=y
-# CONFIG_RTAS_PROC is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_KEXEC=y
-CONFIG_NUMA=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_SYN_COOKIES=y
-CONFIG_IPV6=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_NETFILTER=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_CELLEB=y
-CONFIG_SCSI=m
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_NETDEVICES=y
-CONFIG_SPIDER_NET=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO_I8042 is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_SERIAL_TXX9_NR_UARTS=3
-CONFIG_SERIAL_TXX9_CONSOLE=y
-CONFIG_HVC_RTAS=y
-CONFIG_HVC_BEAT=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
-CONFIG_I2C=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=m
-# CONFIG_USB_EHCI_HCD_PPC_OF is not set
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_STORAGE=m
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_LIBCRC32C=m
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_XMON=y
-CONFIG_XMON_DEFAULT=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index ca7957b09a3c..37659937bd12 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -99,6 +99,8 @@ CONFIG_E1000E=y
CONFIG_AT803X_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
+CONFIG_MDIO_BUS_MUX_GPIO=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -114,11 +116,14 @@ CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
-# CONFIG_HWMON is not set
+CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_INA2XX=y
CONFIG_USB_HID=m
CONFIG_USB=y
CONFIG_USB_MON=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 04737aaa8b6b..33cd1df818ad 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -12,6 +12,10 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
@@ -75,6 +79,10 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_EEPROM_LEGACY=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
CONFIG_ATA=y
CONFIG_SATA_FSL=y
CONFIG_SATA_SIL24=y
@@ -85,6 +93,8 @@ CONFIG_FSL_XGMAC_MDIO=y
CONFIG_E1000E=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
+CONFIG_MDIO_BUS_MUX_GPIO=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -99,11 +109,14 @@ CONFIG_SERIAL_8250_RSA=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
-# CONFIG_HWMON is not set
+CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_INA2XX=y
CONFIG_USB_HID=m
CONFIG_USB=y
CONFIG_USB_MON=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 8535c343dd57..6ecf7bdbc2f9 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -150,8 +150,7 @@ CONFIG_SPI=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
CONFIG_GPIO_MPC8XXX=y
-CONFIG_HWMON=m
-CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_LM90=y
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
# CONFIG_VGA_CONSOLE is not set
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index c45ad2e01b0c..b6c7111ea913 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -143,7 +143,7 @@ CONFIG_SPI=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
CONFIG_GPIO_MPC8XXX=y
-# CONFIG_HWMON is not set
+CONFIG_SENSORS_LM90=y
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
# CONFIG_VGA_CONSOLE is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 3315c9f0828a..aad501ae3834 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -36,7 +36,6 @@ CONFIG_PS3_ROM=m
CONFIG_PS3_FLASH=m
CONFIG_PS3_LPM=m
CONFIG_PPC_IBM_CELL_BLADE=y
-CONFIG_PPC_CELLEB=y
CONFIG_PPC_CELL_QPACE=y
CONFIG_RTAS_FLASH=m
CONFIG_IBMEBUS=y
@@ -89,7 +88,6 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_AMD74XX=y
-CONFIG_BLK_DEV_CELLEB=y
CONFIG_BLK_DEV_IDE_PMAC=y
CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
CONFIG_BLK_DEV_SD=y
@@ -196,7 +194,6 @@ CONFIG_SERIAL_TXX9_CONSOLE=y
CONFIG_SERIAL_JSM=m
CONFIG_HVC_CONSOLE=y
CONFIG_HVC_RTAS=y
-CONFIG_HVC_BEAT=y
CONFIG_HVCS=m
CONFIG_VIRTIO_CONSOLE=m
CONFIG_IBM_BSR=m
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 2926fb9c570a..9c221b69c181 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -4,6 +4,14 @@
# Arch-specific CryptoAPI modules.
#
+obj-$(CONFIG_CRYPTO_AES_PPC_SPE) += aes-ppc-spe.o
+obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
+obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
+obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
+aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
+md5-ppc-y := md5-asm.o md5-glue.o
sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
+sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
+sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
diff --git a/arch/powerpc/crypto/aes-spe-core.S b/arch/powerpc/crypto/aes-spe-core.S
new file mode 100644
index 000000000000..5dc6bce90a77
--- /dev/null
+++ b/arch/powerpc/crypto/aes-spe-core.S
@@ -0,0 +1,351 @@
+/*
+ * Fast AES implementation for SPE instruction set (PPC)
+ *
+ * This code makes use of the SPE SIMD instruction set as defined in
+ * http://cache.freescale.com/files/32bit/doc/ref_manual/SPEPIM.pdf
+ * Implementation is based on optimization guide notes from
+ * http://cache.freescale.com/files/32bit/doc/app_note/AN2665.pdf
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/ppc_asm.h>
+#include "aes-spe-regs.h"
+
+#define EAD(in, bpos) \
+ rlwimi rT0,in,28-((bpos+3)%4)*8,20,27;
+
+#define DAD(in, bpos) \
+ rlwimi rT1,in,24-((bpos+3)%4)*8,24,31;
+
+#define LWH(out, off) \
+ evlwwsplat out,off(rT0); /* load word high */
+
+#define LWL(out, off) \
+ lwz out,off(rT0); /* load word low */
+
+#define LBZ(out, tab, off) \
+ lbz out,off(tab); /* load byte */
+
+#define LAH(out, in, bpos, off) \
+ EAD(in, bpos) /* calc addr + load word high */ \
+ LWH(out, off)
+
+#define LAL(out, in, bpos, off) \
+ EAD(in, bpos) /* calc addr + load word low */ \
+ LWL(out, off)
+
+#define LAE(out, in, bpos) \
+ EAD(in, bpos) /* calc addr + load enc byte */ \
+ LBZ(out, rT0, 8)
+
+#define LBE(out) \
+ LBZ(out, rT0, 8) /* load enc byte */
+
+#define LAD(out, in, bpos) \
+ DAD(in, bpos) /* calc addr + load dec byte */ \
+ LBZ(out, rT1, 0)
+
+#define LBD(out) \
+ LBZ(out, rT1, 0)
+
+/*
+ * ppc_encrypt_block: The central encryption function for a single 16 bytes
+ * block. It does no stack handling or register saving to support fast calls
+ * via bl/blr. It expects that caller has pre-xored input data with first
+ * 4 words of encryption key into rD0-rD3. Pointer/counter registers must
+ * have also been set up before (rT0, rKP, CTR). Output is stored in rD0-rD3
+ * and rW0-rW3 and caller must execute a final xor on the ouput registers.
+ * All working registers rD0-rD3 & rW0-rW7 are overwritten during processing.
+ *
+ */
+_GLOBAL(ppc_encrypt_block)
+ LAH(rW4, rD1, 2, 4)
+ LAH(rW6, rD0, 3, 0)
+ LAH(rW3, rD0, 1, 8)
+ppc_encrypt_block_loop:
+ LAH(rW0, rD3, 0, 12)
+ LAL(rW0, rD0, 0, 12)
+ LAH(rW1, rD1, 0, 12)
+ LAH(rW2, rD2, 1, 8)
+ LAL(rW2, rD3, 1, 8)
+ LAL(rW3, rD1, 1, 8)
+ LAL(rW4, rD2, 2, 4)
+ LAL(rW6, rD1, 3, 0)
+ LAH(rW5, rD3, 2, 4)
+ LAL(rW5, rD0, 2, 4)
+ LAH(rW7, rD2, 3, 0)
+ evldw rD1,16(rKP)
+ EAD(rD3, 3)
+ evxor rW2,rW2,rW4
+ LWL(rW7, 0)
+ evxor rW2,rW2,rW6
+ EAD(rD2, 0)
+ evxor rD1,rD1,rW2
+ LWL(rW1, 12)
+ evxor rD1,rD1,rW0
+ evldw rD3,24(rKP)
+ evmergehi rD0,rD0,rD1
+ EAD(rD1, 2)
+ evxor rW3,rW3,rW5
+ LWH(rW4, 4)
+ evxor rW3,rW3,rW7
+ EAD(rD0, 3)
+ evxor rD3,rD3,rW3
+ LWH(rW6, 0)
+ evxor rD3,rD3,rW1
+ EAD(rD0, 1)
+ evmergehi rD2,rD2,rD3
+ LWH(rW3, 8)
+ LAH(rW0, rD3, 0, 12)
+ LAL(rW0, rD0, 0, 12)
+ LAH(rW1, rD1, 0, 12)
+ LAH(rW2, rD2, 1, 8)
+ LAL(rW2, rD3, 1, 8)
+ LAL(rW3, rD1, 1, 8)
+ LAL(rW4, rD2, 2, 4)
+ LAL(rW6, rD1, 3, 0)
+ LAH(rW5, rD3, 2, 4)
+ LAL(rW5, rD0, 2, 4)
+ LAH(rW7, rD2, 3, 0)
+ evldw rD1,32(rKP)
+ EAD(rD3, 3)
+ evxor rW2,rW2,rW4
+ LWL(rW7, 0)
+ evxor rW2,rW2,rW6
+ EAD(rD2, 0)
+ evxor rD1,rD1,rW2
+ LWL(rW1, 12)
+ evxor rD1,rD1,rW0
+ evldw rD3,40(rKP)
+ evmergehi rD0,rD0,rD1
+ EAD(rD1, 2)
+ evxor rW3,rW3,rW5
+ LWH(rW4, 4)
+ evxor rW3,rW3,rW7
+ EAD(rD0, 3)
+ evxor rD3,rD3,rW3
+ LWH(rW6, 0)
+ evxor rD3,rD3,rW1
+ EAD(rD0, 1)
+ evmergehi rD2,rD2,rD3
+ LWH(rW3, 8)
+ addi rKP,rKP,32
+ bdnz ppc_encrypt_block_loop
+ LAH(rW0, rD3, 0, 12)
+ LAL(rW0, rD0, 0, 12)
+ LAH(rW1, rD1, 0, 12)
+ LAH(rW2, rD2, 1, 8)
+ LAL(rW2, rD3, 1, 8)
+ LAL(rW3, rD1, 1, 8)
+ LAL(rW4, rD2, 2, 4)
+ LAH(rW5, rD3, 2, 4)
+ LAL(rW6, rD1, 3, 0)
+ LAL(rW5, rD0, 2, 4)
+ LAH(rW7, rD2, 3, 0)
+ evldw rD1,16(rKP)
+ EAD(rD3, 3)
+ evxor rW2,rW2,rW4
+ LWL(rW7, 0)
+ evxor rW2,rW2,rW6
+ EAD(rD2, 0)
+ evxor rD1,rD1,rW2
+ LWL(rW1, 12)
+ evxor rD1,rD1,rW0
+ evldw rD3,24(rKP)
+ evmergehi rD0,rD0,rD1
+ EAD(rD1, 0)
+ evxor rW3,rW3,rW5
+ LBE(rW2)
+ evxor rW3,rW3,rW7
+ EAD(rD0, 1)
+ evxor rD3,rD3,rW3
+ LBE(rW6)
+ evxor rD3,rD3,rW1
+ EAD(rD0, 0)
+ evmergehi rD2,rD2,rD3
+ LBE(rW1)
+ LAE(rW0, rD3, 0)
+ LAE(rW1, rD0, 0)
+ LAE(rW4, rD2, 1)
+ LAE(rW5, rD3, 1)
+ LAE(rW3, rD2, 0)
+ LAE(rW7, rD1, 1)
+ rlwimi rW0,rW4,8,16,23
+ rlwimi rW1,rW5,8,16,23
+ LAE(rW4, rD1, 2)
+ LAE(rW5, rD2, 2)
+ rlwimi rW2,rW6,8,16,23
+ rlwimi rW3,rW7,8,16,23
+ LAE(rW6, rD3, 2)
+ LAE(rW7, rD0, 2)
+ rlwimi rW0,rW4,16,8,15
+ rlwimi rW1,rW5,16,8,15
+ LAE(rW4, rD0, 3)
+ LAE(rW5, rD1, 3)
+ rlwimi rW2,rW6,16,8,15
+ lwz rD0,32(rKP)
+ rlwimi rW3,rW7,16,8,15
+ lwz rD1,36(rKP)
+ LAE(rW6, rD2, 3)
+ LAE(rW7, rD3, 3)
+ rlwimi rW0,rW4,24,0,7
+ lwz rD2,40(rKP)
+ rlwimi rW1,rW5,24,0,7
+ lwz rD3,44(rKP)
+ rlwimi rW2,rW6,24,0,7
+ rlwimi rW3,rW7,24,0,7
+ blr
+
+/*
+ * ppc_decrypt_block: The central decryption function for a single 16 bytes
+ * block. It does no stack handling or register saving to support fast calls
+ * via bl/blr. It expects that caller has pre-xored input data with first
+ * 4 words of encryption key into rD0-rD3. Pointer/counter registers must
+ * have also been set up before (rT0, rKP, CTR). Output is stored in rD0-rD3
+ * and rW0-rW3 and caller must execute a final xor on the ouput registers.
+ * All working registers rD0-rD3 & rW0-rW7 are overwritten during processing.
+ *
+ */
+_GLOBAL(ppc_decrypt_block)
+ LAH(rW0, rD1, 0, 12)
+ LAH(rW6, rD0, 3, 0)
+ LAH(rW3, rD0, 1, 8)
+ppc_decrypt_block_loop:
+ LAH(rW1, rD3, 0, 12)
+ LAL(rW0, rD2, 0, 12)
+ LAH(rW2, rD2, 1, 8)
+ LAL(rW2, rD3, 1, 8)
+ LAH(rW4, rD3, 2, 4)
+ LAL(rW4, rD0, 2, 4)
+ LAL(rW6, rD1, 3, 0)
+ LAH(rW5, rD1, 2, 4)
+ LAH(rW7, rD2, 3, 0)
+ LAL(rW7, rD3, 3, 0)
+ LAL(rW3, rD1, 1, 8)
+ evldw rD1,16(rKP)
+ EAD(rD0, 0)
+ evxor rW4,rW4,rW6
+ LWL(rW1, 12)
+ evxor rW0,rW0,rW4
+ EAD(rD2, 2)
+ evxor rW0,rW0,rW2
+ LWL(rW5, 4)
+ evxor rD1,rD1,rW0
+ evldw rD3,24(rKP)
+ evmergehi rD0,rD0,rD1
+ EAD(rD1, 0)
+ evxor rW3,rW3,rW7
+ LWH(rW0, 12)
+ evxor rW3,rW3,rW1
+ EAD(rD0, 3)
+ evxor rD3,rD3,rW3
+ LWH(rW6, 0)
+ evxor rD3,rD3,rW5
+ EAD(rD0, 1)
+ evmergehi rD2,rD2,rD3
+ LWH(rW3, 8)
+ LAH(rW1, rD3, 0, 12)
+ LAL(rW0, rD2, 0, 12)
+ LAH(rW2, rD2, 1, 8)
+ LAL(rW2, rD3, 1, 8)
+ LAH(rW4, rD3, 2, 4)
+ LAL(rW4, rD0, 2, 4)
+ LAL(rW6, rD1, 3, 0)
+ LAH(rW5, rD1, 2, 4)
+ LAH(rW7, rD2, 3, 0)
+ LAL(rW7, rD3, 3, 0)
+ LAL(rW3, rD1, 1, 8)
+ evldw rD1,32(rKP)
+ EAD(rD0, 0)
+ evxor rW4,rW4,rW6
+ LWL(rW1, 12)
+ evxor rW0,rW0,rW4
+ EAD(rD2, 2)
+ evxor rW0,rW0,rW2
+ LWL(rW5, 4)
+ evxor rD1,rD1,rW0
+ evldw rD3,40(rKP)
+ evmergehi rD0,rD0,rD1
+ EAD(rD1, 0)
+ evxor rW3,rW3,rW7
+ LWH(rW0, 12)
+ evxor rW3,rW3,rW1
+ EAD(rD0, 3)
+ evxor rD3,rD3,rW3
+ LWH(rW6, 0)
+ evxor rD3,rD3,rW5
+ EAD(rD0, 1)
+ evmergehi rD2,rD2,rD3
+ LWH(rW3, 8)
+ addi rKP,rKP,32
+ bdnz ppc_decrypt_block_loop
+ LAH(rW1, rD3, 0, 12)
+ LAL(rW0, rD2, 0, 12)
+ LAH(rW2, rD2, 1, 8)
+ LAL(rW2, rD3, 1, 8)
+ LAH(rW4, rD3, 2, 4)
+ LAL(rW4, rD0, 2, 4)
+ LAL(rW6, rD1, 3, 0)
+ LAH(rW5, rD1, 2, 4)
+ LAH(rW7, rD2, 3, 0)
+ LAL(rW7, rD3, 3, 0)
+ LAL(rW3, rD1, 1, 8)
+ evldw rD1,16(rKP)
+ EAD(rD0, 0)
+ evxor rW4,rW4,rW6
+ LWL(rW1, 12)
+ evxor rW0,rW0,rW4
+ EAD(rD2, 2)
+ evxor rW0,rW0,rW2
+ LWL(rW5, 4)
+ evxor rD1,rD1,rW0
+ evldw rD3,24(rKP)
+ evmergehi rD0,rD0,rD1
+ DAD(rD1, 0)
+ evxor rW3,rW3,rW7
+ LBD(rW0)
+ evxor rW3,rW3,rW1
+ DAD(rD0, 1)
+ evxor rD3,rD3,rW3
+ LBD(rW6)
+ evxor rD3,rD3,rW5
+ DAD(rD0, 0)
+ evmergehi rD2,rD2,rD3
+ LBD(rW3)
+ LAD(rW2, rD3, 0)
+ LAD(rW1, rD2, 0)
+ LAD(rW4, rD2, 1)
+ LAD(rW5, rD3, 1)
+ LAD(rW7, rD1, 1)
+ rlwimi rW0,rW4,8,16,23
+ rlwimi rW1,rW5,8,16,23
+ LAD(rW4, rD3, 2)
+ LAD(rW5, rD0, 2)
+ rlwimi rW2,rW6,8,16,23
+ rlwimi rW3,rW7,8,16,23
+ LAD(rW6, rD1, 2)
+ LAD(rW7, rD2, 2)
+ rlwimi rW0,rW4,16,8,15
+ rlwimi rW1,rW5,16,8,15
+ LAD(rW4, rD0, 3)
+ LAD(rW5, rD1, 3)
+ rlwimi rW2,rW6,16,8,15
+ lwz rD0,32(rKP)
+ rlwimi rW3,rW7,16,8,15
+ lwz rD1,36(rKP)
+ LAD(rW6, rD2, 3)
+ LAD(rW7, rD3, 3)
+ rlwimi rW0,rW4,24,0,7
+ lwz rD2,40(rKP)
+ rlwimi rW1,rW5,24,0,7
+ lwz rD3,44(rKP)
+ rlwimi rW2,rW6,24,0,7
+ rlwimi rW3,rW7,24,0,7
+ blr
diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c
new file mode 100644
index 000000000000..bd5e63f72ad4
--- /dev/null
+++ b/arch/powerpc/crypto/aes-spe-glue.c
@@ -0,0 +1,512 @@
+/*
+ * Glue code for AES implementation for SPE instructions (PPC)
+ *
+ * Based on generic implementation. The assembler module takes care
+ * about the SPE registers so it can run from interrupt context.
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/aes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/crypto.h>
+#include <asm/byteorder.h>
+#include <asm/switch_to.h>
+#include <crypto/algapi.h>
+
+/*
+ * MAX_BYTES defines the number of bytes that are allowed to be processed
+ * between preempt_disable() and preempt_enable(). e500 cores can issue two
+ * instructions per clock cycle using one 32/64 bit unit (SU1) and one 32
+ * bit unit (SU2). One of these can be a memory access that is executed via
+ * a single load and store unit (LSU). XTS-AES-256 takes ~780 operations per
+ * 16 byte block block or 25 cycles per byte. Thus 768 bytes of input data
+ * will need an estimated maximum of 20,000 cycles. Headroom for cache misses
+ * included. Even with the low end model clocked at 667 MHz this equals to a
+ * critical time window of less than 30us. The value has been choosen to
+ * process a 512 byte disk block in one or a large 1400 bytes IPsec network
+ * packet in two runs.
+ *
+ */
+#define MAX_BYTES 768
+
+struct ppc_aes_ctx {
+ u32 key_enc[AES_MAX_KEYLENGTH_U32];
+ u32 key_dec[AES_MAX_KEYLENGTH_U32];
+ u32 rounds;
+};
+
+struct ppc_xts_ctx {
+ u32 key_enc[AES_MAX_KEYLENGTH_U32];
+ u32 key_dec[AES_MAX_KEYLENGTH_U32];
+ u32 key_twk[AES_MAX_KEYLENGTH_U32];
+ u32 rounds;
+};
+
+extern void ppc_encrypt_aes(u8 *out, const u8 *in, u32 *key_enc, u32 rounds);
+extern void ppc_decrypt_aes(u8 *out, const u8 *in, u32 *key_dec, u32 rounds);
+extern void ppc_encrypt_ecb(u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
+ u32 bytes);
+extern void ppc_decrypt_ecb(u8 *out, const u8 *in, u32 *key_dec, u32 rounds,
+ u32 bytes);
+extern void ppc_encrypt_cbc(u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
+ u32 bytes, u8 *iv);
+extern void ppc_decrypt_cbc(u8 *out, const u8 *in, u32 *key_dec, u32 rounds,
+ u32 bytes, u8 *iv);
+extern void ppc_crypt_ctr (u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
+ u32 bytes, u8 *iv);
+extern void ppc_encrypt_xts(u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
+ u32 bytes, u8 *iv, u32 *key_twk);
+extern void ppc_decrypt_xts(u8 *out, const u8 *in, u32 *key_dec, u32 rounds,
+ u32 bytes, u8 *iv, u32 *key_twk);
+
+extern void ppc_expand_key_128(u32 *key_enc, const u8 *key);
+extern void ppc_expand_key_192(u32 *key_enc, const u8 *key);
+extern void ppc_expand_key_256(u32 *key_enc, const u8 *key);
+
+extern void ppc_generate_decrypt_key(u32 *key_dec,u32 *key_enc,
+ unsigned int key_len);
+
+static void spe_begin(void)
+{
+ /* disable preemption and save users SPE registers if required */
+ preempt_disable();
+ enable_kernel_spe();
+}
+
+static void spe_end(void)
+{
+ /* reenable preemption */
+ preempt_enable();
+}
+
+static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (key_len != AES_KEYSIZE_128 &&
+ key_len != AES_KEYSIZE_192 &&
+ key_len != AES_KEYSIZE_256) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ ctx->rounds = 4;
+ ppc_expand_key_128(ctx->key_enc, in_key);
+ break;
+ case AES_KEYSIZE_192:
+ ctx->rounds = 5;
+ ppc_expand_key_192(ctx->key_enc, in_key);
+ break;
+ case AES_KEYSIZE_256:
+ ctx->rounds = 6;
+ ppc_expand_key_256(ctx->key_enc, in_key);
+ break;
+ }
+
+ ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
+
+ return 0;
+}
+
+static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct ppc_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ key_len >>= 1;
+
+ if (key_len != AES_KEYSIZE_128 &&
+ key_len != AES_KEYSIZE_192 &&
+ key_len != AES_KEYSIZE_256) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ ctx->rounds = 4;
+ ppc_expand_key_128(ctx->key_enc, in_key);
+ ppc_expand_key_128(ctx->key_twk, in_key + AES_KEYSIZE_128);
+ break;
+ case AES_KEYSIZE_192:
+ ctx->rounds = 5;
+ ppc_expand_key_192(ctx->key_enc, in_key);
+ ppc_expand_key_192(ctx->key_twk, in_key + AES_KEYSIZE_192);
+ break;
+ case AES_KEYSIZE_256:
+ ctx->rounds = 6;
+ ppc_expand_key_256(ctx->key_enc, in_key);
+ ppc_expand_key_256(ctx->key_twk, in_key + AES_KEYSIZE_256);
+ break;
+ }
+
+ ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
+
+ return 0;
+}
+
+static void ppc_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ spe_begin();
+ ppc_encrypt_aes(out, in, ctx->key_enc, ctx->rounds);
+ spe_end();
+}
+
+static void ppc_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ spe_begin();
+ ppc_decrypt_aes(out, in, ctx->key_dec, ctx->rounds);
+ spe_end();
+}
+
+static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int ubytes;
+ int err;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ ubytes = nbytes > MAX_BYTES ?
+ nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
+ nbytes -= ubytes;
+
+ spe_begin();
+ ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, ctx->rounds, nbytes);
+ spe_end();
+
+ err = blkcipher_walk_done(desc, &walk, ubytes);
+ }
+
+ return err;
+}
+
+static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int ubytes;
+ int err;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ ubytes = nbytes > MAX_BYTES ?
+ nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
+ nbytes -= ubytes;
+
+ spe_begin();
+ ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, ctx->rounds, nbytes);
+ spe_end();
+
+ err = blkcipher_walk_done(desc, &walk, ubytes);
+ }
+
+ return err;
+}
+
+static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int ubytes;
+ int err;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ ubytes = nbytes > MAX_BYTES ?
+ nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
+ nbytes -= ubytes;
+
+ spe_begin();
+ ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, ctx->rounds, nbytes, walk.iv);
+ spe_end();
+
+ err = blkcipher_walk_done(desc, &walk, ubytes);
+ }
+
+ return err;
+}
+
+static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int ubytes;
+ int err;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ ubytes = nbytes > MAX_BYTES ?
+ nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
+ nbytes -= ubytes;
+
+ spe_begin();
+ ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, ctx->rounds, nbytes, walk.iv);
+ spe_end();
+
+ err = blkcipher_walk_done(desc, &walk, ubytes);
+ }
+
+ return err;
+}
+
+static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int pbytes, ubytes;
+ int err;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+
+ while ((pbytes = walk.nbytes)) {
+ pbytes = pbytes > MAX_BYTES ? MAX_BYTES : pbytes;
+ pbytes = pbytes == nbytes ?
+ nbytes : pbytes & ~(AES_BLOCK_SIZE - 1);
+ ubytes = walk.nbytes - pbytes;
+
+ spe_begin();
+ ppc_crypt_ctr(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, ctx->rounds, pbytes , walk.iv);
+ spe_end();
+
+ nbytes -= pbytes;
+ err = blkcipher_walk_done(desc, &walk, ubytes);
+ }
+
+ return err;
+}
+
+static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int ubytes;
+ int err;
+ u32 *twk;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+ twk = ctx->key_twk;
+
+ while ((nbytes = walk.nbytes)) {
+ ubytes = nbytes > MAX_BYTES ?
+ nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
+ nbytes -= ubytes;
+
+ spe_begin();
+ ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, ctx->rounds, nbytes, walk.iv, twk);
+ spe_end();
+
+ twk = NULL;
+ err = blkcipher_walk_done(desc, &walk, ubytes);
+ }
+
+ return err;
+}
+
+static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ unsigned int ubytes;
+ int err;
+ u32 *twk;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+ twk = ctx->key_twk;
+
+ while ((nbytes = walk.nbytes)) {
+ ubytes = nbytes > MAX_BYTES ?
+ nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
+ nbytes -= ubytes;
+
+ spe_begin();
+ ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, ctx->rounds, nbytes, walk.iv, twk);
+ spe_end();
+
+ twk = NULL;
+ err = blkcipher_walk_done(desc, &walk, ubytes);
+ }
+
+ return err;
+}
+
+/*
+ * Algorithm definitions. Disabling alignment (cra_alignmask=0) was chosen
+ * because the e500 platform can handle unaligned reads/writes very efficently.
+ * This improves IPsec thoughput by another few percent. Additionally we assume
+ * that AES context is always aligned to at least 8 bytes because it is created
+ * with kmalloc() in the crypto infrastructure
+ *
+ */
+static struct crypto_alg aes_algs[] = { {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-ppc-spe",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ppc_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = ppc_aes_setkey,
+ .cia_encrypt = ppc_aes_encrypt,
+ .cia_decrypt = ppc_aes_decrypt
+ }
+ }
+}, {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-ppc-spe",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ppc_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ppc_aes_setkey,
+ .encrypt = ppc_ecb_encrypt,
+ .decrypt = ppc_ecb_decrypt,
+ }
+ }
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-ppc-spe",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ppc_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ppc_aes_setkey,
+ .encrypt = ppc_cbc_encrypt,
+ .decrypt = ppc_cbc_decrypt,
+ }
+ }
+}, {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-ppc-spe",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct ppc_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ppc_aes_setkey,
+ .encrypt = ppc_ctr_crypt,
+ .decrypt = ppc_ctr_crypt,
+ }
+ }
+}, {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-ppc-spe",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ppc_xts_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ppc_xts_setkey,
+ .encrypt = ppc_xts_encrypt,
+ .decrypt = ppc_xts_decrypt,
+ }
+ }
+} };
+
+static int __init ppc_aes_mod_init(void)
+{
+ return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+static void __exit ppc_aes_mod_fini(void)
+{
+ crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+module_init(ppc_aes_mod_init);
+module_exit(ppc_aes_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS, SPE optimized");
+
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("ecb(aes)");
+MODULE_ALIAS_CRYPTO("cbc(aes)");
+MODULE_ALIAS_CRYPTO("ctr(aes)");
+MODULE_ALIAS_CRYPTO("xts(aes)");
+MODULE_ALIAS_CRYPTO("aes-ppc-spe");
diff --git a/arch/powerpc/crypto/aes-spe-keys.S b/arch/powerpc/crypto/aes-spe-keys.S
new file mode 100644
index 000000000000..be8090f3d700
--- /dev/null
+++ b/arch/powerpc/crypto/aes-spe-keys.S
@@ -0,0 +1,283 @@
+/*
+ * Key handling functions for PPC AES implementation
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/ppc_asm.h>
+
+#ifdef __BIG_ENDIAN__
+#define LOAD_KEY(d, s, off) \
+ lwz d,off(s);
+#else
+#define LOAD_KEY(d, s, off) \
+ li r0,off; \
+ lwbrx d,s,r0;
+#endif
+
+#define INITIALIZE_KEY \
+ stwu r1,-32(r1); /* create stack frame */ \
+ stw r14,8(r1); /* save registers */ \
+ stw r15,12(r1); \
+ stw r16,16(r1);
+
+#define FINALIZE_KEY \
+ lwz r14,8(r1); /* restore registers */ \
+ lwz r15,12(r1); \
+ lwz r16,16(r1); \
+ xor r5,r5,r5; /* clear sensitive data */ \
+ xor r6,r6,r6; \
+ xor r7,r7,r7; \
+ xor r8,r8,r8; \
+ xor r9,r9,r9; \
+ xor r10,r10,r10; \
+ xor r11,r11,r11; \
+ xor r12,r12,r12; \
+ addi r1,r1,32; /* cleanup stack */
+
+#define LS_BOX(r, t1, t2) \
+ lis t2,PPC_AES_4K_ENCTAB@h; \
+ ori t2,t2,PPC_AES_4K_ENCTAB@l; \
+ rlwimi t2,r,4,20,27; \
+ lbz t1,8(t2); \
+ rlwimi r,t1,0,24,31; \
+ rlwimi t2,r,28,20,27; \
+ lbz t1,8(t2); \
+ rlwimi r,t1,8,16,23; \
+ rlwimi t2,r,20,20,27; \
+ lbz t1,8(t2); \
+ rlwimi r,t1,16,8,15; \
+ rlwimi t2,r,12,20,27; \
+ lbz t1,8(t2); \
+ rlwimi r,t1,24,0,7;
+
+#define GF8_MUL(out, in, t1, t2) \
+ lis t1,0x8080; /* multiplication in GF8 */ \
+ ori t1,t1,0x8080; \
+ and t1,t1,in; \
+ srwi t1,t1,7; \
+ mulli t1,t1,0x1b; \
+ lis t2,0x7f7f; \
+ ori t2,t2,0x7f7f; \
+ and t2,t2,in; \
+ slwi t2,t2,1; \
+ xor out,t1,t2;
+
+/*
+ * ppc_expand_key_128(u32 *key_enc, const u8 *key)
+ *
+ * Expand 128 bit key into 176 bytes encryption key. It consists of
+ * key itself plus 10 rounds with 16 bytes each
+ *
+ */
+_GLOBAL(ppc_expand_key_128)
+ INITIALIZE_KEY
+ LOAD_KEY(r5,r4,0)
+ LOAD_KEY(r6,r4,4)
+ LOAD_KEY(r7,r4,8)
+ LOAD_KEY(r8,r4,12)
+ stw r5,0(r3) /* key[0..3] = input data */
+ stw r6,4(r3)
+ stw r7,8(r3)
+ stw r8,12(r3)
+ li r16,10 /* 10 expansion rounds */
+ lis r0,0x0100 /* RCO(1) */
+ppc_expand_128_loop:
+ addi r3,r3,16
+ mr r14,r8 /* apply LS_BOX to 4th temp */
+ rotlwi r14,r14,8
+ LS_BOX(r14, r15, r4)
+ xor r14,r14,r0
+ xor r5,r5,r14 /* xor next 4 keys */
+ xor r6,r6,r5
+ xor r7,r7,r6
+ xor r8,r8,r7
+ stw r5,0(r3) /* store next 4 keys */
+ stw r6,4(r3)
+ stw r7,8(r3)
+ stw r8,12(r3)
+ GF8_MUL(r0, r0, r4, r14) /* multiply RCO by 2 in GF */
+ subi r16,r16,1
+ cmpwi r16,0
+ bt eq,ppc_expand_128_end
+ b ppc_expand_128_loop
+ppc_expand_128_end:
+ FINALIZE_KEY
+ blr
+
+/*
+ * ppc_expand_key_192(u32 *key_enc, const u8 *key)
+ *
+ * Expand 192 bit key into 208 bytes encryption key. It consists of key
+ * itself plus 12 rounds with 16 bytes each
+ *
+ */
+_GLOBAL(ppc_expand_key_192)
+ INITIALIZE_KEY
+ LOAD_KEY(r5,r4,0)
+ LOAD_KEY(r6,r4,4)
+ LOAD_KEY(r7,r4,8)
+ LOAD_KEY(r8,r4,12)
+ LOAD_KEY(r9,r4,16)
+ LOAD_KEY(r10,r4,20)
+ stw r5,0(r3)
+ stw r6,4(r3)
+ stw r7,8(r3)
+ stw r8,12(r3)
+ stw r9,16(r3)
+ stw r10,20(r3)
+ li r16,8 /* 8 expansion rounds */
+ lis r0,0x0100 /* RCO(1) */
+ppc_expand_192_loop:
+ addi r3,r3,24
+ mr r14,r10 /* apply LS_BOX to 6th temp */
+ rotlwi r14,r14,8
+ LS_BOX(r14, r15, r4)
+ xor r14,r14,r0
+ xor r5,r5,r14 /* xor next 6 keys */
+ xor r6,r6,r5
+ xor r7,r7,r6
+ xor r8,r8,r7
+ xor r9,r9,r8
+ xor r10,r10,r9
+ stw r5,0(r3)
+ stw r6,4(r3)
+ stw r7,8(r3)
+ stw r8,12(r3)
+ subi r16,r16,1
+ cmpwi r16,0 /* last round early kick out */
+ bt eq,ppc_expand_192_end
+ stw r9,16(r3)
+ stw r10,20(r3)
+ GF8_MUL(r0, r0, r4, r14) /* multiply RCO GF8 */
+ b ppc_expand_192_loop
+ppc_expand_192_end:
+ FINALIZE_KEY
+ blr
+
+/*
+ * ppc_expand_key_256(u32 *key_enc, const u8 *key)
+ *
+ * Expand 256 bit key into 240 bytes encryption key. It consists of key
+ * itself plus 14 rounds with 16 bytes each
+ *
+ */
+_GLOBAL(ppc_expand_key_256)
+ INITIALIZE_KEY
+ LOAD_KEY(r5,r4,0)
+ LOAD_KEY(r6,r4,4)
+ LOAD_KEY(r7,r4,8)
+ LOAD_KEY(r8,r4,12)
+ LOAD_KEY(r9,r4,16)
+ LOAD_KEY(r10,r4,20)
+ LOAD_KEY(r11,r4,24)
+ LOAD_KEY(r12,r4,28)
+ stw r5,0(r3)
+ stw r6,4(r3)
+ stw r7,8(r3)
+ stw r8,12(r3)
+ stw r9,16(r3)
+ stw r10,20(r3)
+ stw r11,24(r3)
+ stw r12,28(r3)
+ li r16,7 /* 7 expansion rounds */
+ lis r0,0x0100 /* RCO(1) */
+ppc_expand_256_loop:
+ addi r3,r3,32
+ mr r14,r12 /* apply LS_BOX to 8th temp */
+ rotlwi r14,r14,8
+ LS_BOX(r14, r15, r4)
+ xor r14,r14,r0
+ xor r5,r5,r14 /* xor 4 keys */
+ xor r6,r6,r5
+ xor r7,r7,r6
+ xor r8,r8,r7
+ mr r14,r8
+ LS_BOX(r14, r15, r4) /* apply LS_BOX to 4th temp */
+ xor r9,r9,r14 /* xor 4 keys */
+ xor r10,r10,r9
+ xor r11,r11,r10
+ xor r12,r12,r11
+ stw r5,0(r3)
+ stw r6,4(r3)
+ stw r7,8(r3)
+ stw r8,12(r3)
+ subi r16,r16,1
+ cmpwi r16,0 /* last round early kick out */
+ bt eq,ppc_expand_256_end
+ stw r9,16(r3)
+ stw r10,20(r3)
+ stw r11,24(r3)
+ stw r12,28(r3)
+ GF8_MUL(r0, r0, r4, r14)
+ b ppc_expand_256_loop
+ppc_expand_256_end:
+ FINALIZE_KEY
+ blr
+
+/*
+ * ppc_generate_decrypt_key: derive decryption key from encryption key
+ * number of bytes to handle are calculated from length of key (16/24/32)
+ *
+ */
+_GLOBAL(ppc_generate_decrypt_key)
+ addi r6,r5,24
+ slwi r6,r6,2
+ lwzx r7,r4,r6 /* first/last 4 words are same */
+ stw r7,0(r3)
+ lwz r7,0(r4)
+ stwx r7,r3,r6
+ addi r6,r6,4
+ lwzx r7,r4,r6
+ stw r7,4(r3)
+ lwz r7,4(r4)
+ stwx r7,r3,r6
+ addi r6,r6,4
+ lwzx r7,r4,r6
+ stw r7,8(r3)
+ lwz r7,8(r4)
+ stwx r7,r3,r6
+ addi r6,r6,4
+ lwzx r7,r4,r6
+ stw r7,12(r3)
+ lwz r7,12(r4)
+ stwx r7,r3,r6
+ addi r3,r3,16
+ add r4,r4,r6
+ subi r4,r4,28
+ addi r5,r5,20
+ srwi r5,r5,2
+ppc_generate_decrypt_block:
+ li r6,4
+ mtctr r6
+ppc_generate_decrypt_word:
+ lwz r6,0(r4)
+ GF8_MUL(r7, r6, r0, r7)
+ GF8_MUL(r8, r7, r0, r8)
+ GF8_MUL(r9, r8, r0, r9)
+ xor r10,r9,r6
+ xor r11,r7,r8
+ xor r11,r11,r9
+ xor r12,r7,r10
+ rotrwi r12,r12,24
+ xor r11,r11,r12
+ xor r12,r8,r10
+ rotrwi r12,r12,16
+ xor r11,r11,r12
+ rotrwi r12,r10,8
+ xor r11,r11,r12
+ stw r11,0(r3)
+ addi r3,r3,4
+ addi r4,r4,4
+ bdnz ppc_generate_decrypt_word
+ subi r4,r4,32
+ subi r5,r5,1
+ cmpwi r5,0
+ bt gt,ppc_generate_decrypt_block
+ blr
diff --git a/arch/powerpc/crypto/aes-spe-modes.S b/arch/powerpc/crypto/aes-spe-modes.S
new file mode 100644
index 000000000000..ad48032ca8e0
--- /dev/null
+++ b/arch/powerpc/crypto/aes-spe-modes.S
@@ -0,0 +1,630 @@
+/*
+ * AES modes (ECB/CBC/CTR/XTS) for PPC AES implementation
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/ppc_asm.h>
+#include "aes-spe-regs.h"
+
+#ifdef __BIG_ENDIAN__ /* Macros for big endian builds */
+
+#define LOAD_DATA(reg, off) \
+ lwz reg,off(rSP); /* load with offset */
+#define SAVE_DATA(reg, off) \
+ stw reg,off(rDP); /* save with offset */
+#define NEXT_BLOCK \
+ addi rSP,rSP,16; /* increment pointers per bloc */ \
+ addi rDP,rDP,16;
+#define LOAD_IV(reg, off) \
+ lwz reg,off(rIP); /* IV loading with offset */
+#define SAVE_IV(reg, off) \
+ stw reg,off(rIP); /* IV saving with offset */
+#define START_IV /* nothing to reset */
+#define CBC_DEC 16 /* CBC decrement per block */
+#define CTR_DEC 1 /* CTR decrement one byte */
+
+#else /* Macros for little endian */
+
+#define LOAD_DATA(reg, off) \
+ lwbrx reg,0,rSP; /* load reversed */ \
+ addi rSP,rSP,4; /* and increment pointer */
+#define SAVE_DATA(reg, off) \
+ stwbrx reg,0,rDP; /* save reversed */ \
+ addi rDP,rDP,4; /* and increment pointer */
+#define NEXT_BLOCK /* nothing todo */
+#define LOAD_IV(reg, off) \
+ lwbrx reg,0,rIP; /* load reversed */ \
+ addi rIP,rIP,4; /* and increment pointer */
+#define SAVE_IV(reg, off) \
+ stwbrx reg,0,rIP; /* load reversed */ \
+ addi rIP,rIP,4; /* and increment pointer */
+#define START_IV \
+ subi rIP,rIP,16; /* must reset pointer */
+#define CBC_DEC 32 /* 2 blocks because of incs */
+#define CTR_DEC 17 /* 1 block because of incs */
+
+#endif
+
+#define SAVE_0_REGS
+#define LOAD_0_REGS
+
+#define SAVE_4_REGS \
+ stw rI0,96(r1); /* save 32 bit registers */ \
+ stw rI1,100(r1); \
+ stw rI2,104(r1); \
+ stw rI3,108(r1);
+
+#define LOAD_4_REGS \
+ lwz rI0,96(r1); /* restore 32 bit registers */ \
+ lwz rI1,100(r1); \
+ lwz rI2,104(r1); \
+ lwz rI3,108(r1);
+
+#define SAVE_8_REGS \
+ SAVE_4_REGS \
+ stw rG0,112(r1); /* save 32 bit registers */ \
+ stw rG1,116(r1); \
+ stw rG2,120(r1); \
+ stw rG3,124(r1);
+
+#define LOAD_8_REGS \
+ LOAD_4_REGS \
+ lwz rG0,112(r1); /* restore 32 bit registers */ \
+ lwz rG1,116(r1); \
+ lwz rG2,120(r1); \
+ lwz rG3,124(r1);
+
+#define INITIALIZE_CRYPT(tab,nr32bitregs) \
+ mflr r0; \
+ stwu r1,-160(r1); /* create stack frame */ \
+ lis rT0,tab@h; /* en-/decryption table pointer */ \
+ stw r0,8(r1); /* save link register */ \
+ ori rT0,rT0,tab@l; \
+ evstdw r14,16(r1); \
+ mr rKS,rKP; \
+ evstdw r15,24(r1); /* We must save non volatile */ \
+ evstdw r16,32(r1); /* registers. Take the chance */ \
+ evstdw r17,40(r1); /* and save the SPE part too */ \
+ evstdw r18,48(r1); \
+ evstdw r19,56(r1); \
+ evstdw r20,64(r1); \
+ evstdw r21,72(r1); \
+ evstdw r22,80(r1); \
+ evstdw r23,88(r1); \
+ SAVE_##nr32bitregs##_REGS
+
+#define FINALIZE_CRYPT(nr32bitregs) \
+ lwz r0,8(r1); \
+ evldw r14,16(r1); /* restore SPE registers */ \
+ evldw r15,24(r1); \
+ evldw r16,32(r1); \
+ evldw r17,40(r1); \
+ evldw r18,48(r1); \
+ evldw r19,56(r1); \
+ evldw r20,64(r1); \
+ evldw r21,72(r1); \
+ evldw r22,80(r1); \
+ evldw r23,88(r1); \
+ LOAD_##nr32bitregs##_REGS \
+ mtlr r0; /* restore link register */ \
+ xor r0,r0,r0; \
+ stw r0,16(r1); /* delete sensitive data */ \
+ stw r0,24(r1); /* that we might have pushed */ \
+ stw r0,32(r1); /* from other context that runs */ \
+ stw r0,40(r1); /* the same code */ \
+ stw r0,48(r1); \
+ stw r0,56(r1); \
+ stw r0,64(r1); \
+ stw r0,72(r1); \
+ stw r0,80(r1); \
+ stw r0,88(r1); \
+ addi r1,r1,160; /* cleanup stack frame */
+
+#define ENDIAN_SWAP(t0, t1, s0, s1) \
+ rotrwi t0,s0,8; /* swap endianness for 2 GPRs */ \
+ rotrwi t1,s1,8; \
+ rlwimi t0,s0,8,8,15; \
+ rlwimi t1,s1,8,8,15; \
+ rlwimi t0,s0,8,24,31; \
+ rlwimi t1,s1,8,24,31;
+
+#define GF128_MUL(d0, d1, d2, d3, t0) \
+ li t0,0x87; /* multiplication in GF128 */ \
+ cmpwi d3,-1; \
+ iselgt t0,0,t0; \
+ rlwimi d3,d2,0,0,0; /* propagate "carry" bits */ \
+ rotlwi d3,d3,1; \
+ rlwimi d2,d1,0,0,0; \
+ rotlwi d2,d2,1; \
+ rlwimi d1,d0,0,0,0; \
+ slwi d0,d0,1; /* shift left 128 bit */ \
+ rotlwi d1,d1,1; \
+ xor d0,d0,t0;
+
+#define START_KEY(d0, d1, d2, d3) \
+ lwz rW0,0(rKP); \
+ mtctr rRR; \
+ lwz rW1,4(rKP); \
+ lwz rW2,8(rKP); \
+ lwz rW3,12(rKP); \
+ xor rD0,d0,rW0; \
+ xor rD1,d1,rW1; \
+ xor rD2,d2,rW2; \
+ xor rD3,d3,rW3;
+
+/*
+ * ppc_encrypt_aes(u8 *out, const u8 *in, u32 *key_enc,
+ * u32 rounds)
+ *
+ * called from glue layer to encrypt a single 16 byte block
+ * round values are AES128 = 4, AES192 = 5, AES256 = 6
+ *
+ */
+_GLOBAL(ppc_encrypt_aes)
+ INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 0)
+ LOAD_DATA(rD0, 0)
+ LOAD_DATA(rD1, 4)
+ LOAD_DATA(rD2, 8)
+ LOAD_DATA(rD3, 12)
+ START_KEY(rD0, rD1, rD2, rD3)
+ bl ppc_encrypt_block
+ xor rD0,rD0,rW0
+ SAVE_DATA(rD0, 0)
+ xor rD1,rD1,rW1
+ SAVE_DATA(rD1, 4)
+ xor rD2,rD2,rW2
+ SAVE_DATA(rD2, 8)
+ xor rD3,rD3,rW3
+ SAVE_DATA(rD3, 12)
+ FINALIZE_CRYPT(0)
+ blr
+
+/*
+ * ppc_decrypt_aes(u8 *out, const u8 *in, u32 *key_dec,
+ * u32 rounds)
+ *
+ * called from glue layer to decrypt a single 16 byte block
+ * round values are AES128 = 4, AES192 = 5, AES256 = 6
+ *
+ */
+_GLOBAL(ppc_decrypt_aes)
+ INITIALIZE_CRYPT(PPC_AES_4K_DECTAB,0)
+ LOAD_DATA(rD0, 0)
+ addi rT1,rT0,4096
+ LOAD_DATA(rD1, 4)
+ LOAD_DATA(rD2, 8)
+ LOAD_DATA(rD3, 12)
+ START_KEY(rD0, rD1, rD2, rD3)
+ bl ppc_decrypt_block
+ xor rD0,rD0,rW0
+ SAVE_DATA(rD0, 0)
+ xor rD1,rD1,rW1
+ SAVE_DATA(rD1, 4)
+ xor rD2,rD2,rW2
+ SAVE_DATA(rD2, 8)
+ xor rD3,rD3,rW3
+ SAVE_DATA(rD3, 12)
+ FINALIZE_CRYPT(0)
+ blr
+
+/*
+ * ppc_encrypt_ecb(u8 *out, const u8 *in, u32 *key_enc,
+ * u32 rounds, u32 bytes);
+ *
+ * called from glue layer to encrypt multiple blocks via ECB
+ * Bytes must be larger or equal 16 and only whole blocks are
+ * processed. round values are AES128 = 4, AES192 = 5 and
+ * AES256 = 6
+ *
+ */
+_GLOBAL(ppc_encrypt_ecb)
+ INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 0)
+ppc_encrypt_ecb_loop:
+ LOAD_DATA(rD0, 0)
+ mr rKP,rKS
+ LOAD_DATA(rD1, 4)
+ subi rLN,rLN,16
+ LOAD_DATA(rD2, 8)
+ cmpwi rLN,15
+ LOAD_DATA(rD3, 12)
+ START_KEY(rD0, rD1, rD2, rD3)
+ bl ppc_encrypt_block
+ xor rD0,rD0,rW0
+ SAVE_DATA(rD0, 0)
+ xor rD1,rD1,rW1
+ SAVE_DATA(rD1, 4)
+ xor rD2,rD2,rW2
+ SAVE_DATA(rD2, 8)
+ xor rD3,rD3,rW3
+ SAVE_DATA(rD3, 12)
+ NEXT_BLOCK
+ bt gt,ppc_encrypt_ecb_loop
+ FINALIZE_CRYPT(0)
+ blr
+
+/*
+ * ppc_decrypt_ecb(u8 *out, const u8 *in, u32 *key_dec,
+ * u32 rounds, u32 bytes);
+ *
+ * called from glue layer to decrypt multiple blocks via ECB
+ * Bytes must be larger or equal 16 and only whole blocks are
+ * processed. round values are AES128 = 4, AES192 = 5 and
+ * AES256 = 6
+ *
+ */
+_GLOBAL(ppc_decrypt_ecb)
+ INITIALIZE_CRYPT(PPC_AES_4K_DECTAB, 0)
+ addi rT1,rT0,4096
+ppc_decrypt_ecb_loop:
+ LOAD_DATA(rD0, 0)
+ mr rKP,rKS
+ LOAD_DATA(rD1, 4)
+ subi rLN,rLN,16
+ LOAD_DATA(rD2, 8)
+ cmpwi rLN,15
+ LOAD_DATA(rD3, 12)
+ START_KEY(rD0, rD1, rD2, rD3)
+ bl ppc_decrypt_block
+ xor rD0,rD0,rW0
+ SAVE_DATA(rD0, 0)
+ xor rD1,rD1,rW1
+ SAVE_DATA(rD1, 4)
+ xor rD2,rD2,rW2
+ SAVE_DATA(rD2, 8)
+ xor rD3,rD3,rW3
+ SAVE_DATA(rD3, 12)
+ NEXT_BLOCK
+ bt gt,ppc_decrypt_ecb_loop
+ FINALIZE_CRYPT(0)
+ blr
+
+/*
+ * ppc_encrypt_cbc(u8 *out, const u8 *in, u32 *key_enc,
+ * 32 rounds, u32 bytes, u8 *iv);
+ *
+ * called from glue layer to encrypt multiple blocks via CBC
+ * Bytes must be larger or equal 16 and only whole blocks are
+ * processed. round values are AES128 = 4, AES192 = 5 and
+ * AES256 = 6
+ *
+ */
+_GLOBAL(ppc_encrypt_cbc)
+ INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 4)
+ LOAD_IV(rI0, 0)
+ LOAD_IV(rI1, 4)
+ LOAD_IV(rI2, 8)
+ LOAD_IV(rI3, 12)
+ppc_encrypt_cbc_loop:
+ LOAD_DATA(rD0, 0)
+ mr rKP,rKS
+ LOAD_DATA(rD1, 4)
+ subi rLN,rLN,16
+ LOAD_DATA(rD2, 8)
+ cmpwi rLN,15
+ LOAD_DATA(rD3, 12)
+ xor rD0,rD0,rI0
+ xor rD1,rD1,rI1
+ xor rD2,rD2,rI2
+ xor rD3,rD3,rI3
+ START_KEY(rD0, rD1, rD2, rD3)
+ bl ppc_encrypt_block
+ xor rI0,rD0,rW0
+ SAVE_DATA(rI0, 0)
+ xor rI1,rD1,rW1
+ SAVE_DATA(rI1, 4)
+ xor rI2,rD2,rW2
+ SAVE_DATA(rI2, 8)
+ xor rI3,rD3,rW3
+ SAVE_DATA(rI3, 12)
+ NEXT_BLOCK
+ bt gt,ppc_encrypt_cbc_loop
+ START_IV
+ SAVE_IV(rI0, 0)
+ SAVE_IV(rI1, 4)
+ SAVE_IV(rI2, 8)
+ SAVE_IV(rI3, 12)
+ FINALIZE_CRYPT(4)
+ blr
+
+/*
+ * ppc_decrypt_cbc(u8 *out, const u8 *in, u32 *key_dec,
+ * u32 rounds, u32 bytes, u8 *iv);
+ *
+ * called from glue layer to decrypt multiple blocks via CBC
+ * round values are AES128 = 4, AES192 = 5, AES256 = 6
+ *
+ */
+_GLOBAL(ppc_decrypt_cbc)
+ INITIALIZE_CRYPT(PPC_AES_4K_DECTAB, 4)
+ li rT1,15
+ LOAD_IV(rI0, 0)
+ andc rLN,rLN,rT1
+ LOAD_IV(rI1, 4)
+ subi rLN,rLN,16
+ LOAD_IV(rI2, 8)
+ add rSP,rSP,rLN /* reverse processing */
+ LOAD_IV(rI3, 12)
+ add rDP,rDP,rLN
+ LOAD_DATA(rD0, 0)
+ addi rT1,rT0,4096
+ LOAD_DATA(rD1, 4)
+ LOAD_DATA(rD2, 8)
+ LOAD_DATA(rD3, 12)
+ START_IV
+ SAVE_IV(rD0, 0)
+ SAVE_IV(rD1, 4)
+ SAVE_IV(rD2, 8)
+ cmpwi rLN,16
+ SAVE_IV(rD3, 12)
+ bt lt,ppc_decrypt_cbc_end
+ppc_decrypt_cbc_loop:
+ mr rKP,rKS
+ START_KEY(rD0, rD1, rD2, rD3)
+ bl ppc_decrypt_block
+ subi rLN,rLN,16
+ subi rSP,rSP,CBC_DEC
+ xor rW0,rD0,rW0
+ LOAD_DATA(rD0, 0)
+ xor rW1,rD1,rW1
+ LOAD_DATA(rD1, 4)
+ xor rW2,rD2,rW2
+ LOAD_DATA(rD2, 8)
+ xor rW3,rD3,rW3
+ LOAD_DATA(rD3, 12)
+ xor rW0,rW0,rD0
+ SAVE_DATA(rW0, 0)
+ xor rW1,rW1,rD1
+ SAVE_DATA(rW1, 4)
+ xor rW2,rW2,rD2
+ SAVE_DATA(rW2, 8)
+ xor rW3,rW3,rD3
+ SAVE_DATA(rW3, 12)
+ cmpwi rLN,15
+ subi rDP,rDP,CBC_DEC
+ bt gt,ppc_decrypt_cbc_loop
+ppc_decrypt_cbc_end:
+ mr rKP,rKS
+ START_KEY(rD0, rD1, rD2, rD3)
+ bl ppc_decrypt_block
+ xor rW0,rW0,rD0
+ xor rW1,rW1,rD1
+ xor rW2,rW2,rD2
+ xor rW3,rW3,rD3
+ xor rW0,rW0,rI0 /* decrypt with initial IV */
+ SAVE_DATA(rW0, 0)
+ xor rW1,rW1,rI1
+ SAVE_DATA(rW1, 4)
+ xor rW2,rW2,rI2
+ SAVE_DATA(rW2, 8)
+ xor rW3,rW3,rI3
+ SAVE_DATA(rW3, 12)
+ FINALIZE_CRYPT(4)
+ blr
+
+/*
+ * ppc_crypt_ctr(u8 *out, const u8 *in, u32 *key_enc,
+ * u32 rounds, u32 bytes, u8 *iv);
+ *
+ * called from glue layer to encrypt/decrypt multiple blocks
+ * via CTR. Number of bytes does not need to be a multiple of
+ * 16. Round values are AES128 = 4, AES192 = 5, AES256 = 6
+ *
+ */
+_GLOBAL(ppc_crypt_ctr)
+ INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 4)
+ LOAD_IV(rI0, 0)
+ LOAD_IV(rI1, 4)
+ LOAD_IV(rI2, 8)
+ cmpwi rLN,16
+ LOAD_IV(rI3, 12)
+ START_IV
+ bt lt,ppc_crypt_ctr_partial
+ppc_crypt_ctr_loop:
+ mr rKP,rKS
+ START_KEY(rI0, rI1, rI2, rI3)
+ bl ppc_encrypt_block
+ xor rW0,rD0,rW0
+ xor rW1,rD1,rW1
+ xor rW2,rD2,rW2
+ xor rW3,rD3,rW3
+ LOAD_DATA(rD0, 0)
+ subi rLN,rLN,16
+ LOAD_DATA(rD1, 4)
+ LOAD_DATA(rD2, 8)
+ LOAD_DATA(rD3, 12)
+ xor rD0,rD0,rW0
+ SAVE_DATA(rD0, 0)
+ xor rD1,rD1,rW1
+ SAVE_DATA(rD1, 4)
+ xor rD2,rD2,rW2
+ SAVE_DATA(rD2, 8)
+ xor rD3,rD3,rW3
+ SAVE_DATA(rD3, 12)
+ addic rI3,rI3,1 /* increase counter */
+ addze rI2,rI2
+ addze rI1,rI1
+ addze rI0,rI0
+ NEXT_BLOCK
+ cmpwi rLN,15
+ bt gt,ppc_crypt_ctr_loop
+ppc_crypt_ctr_partial:
+ cmpwi rLN,0
+ bt eq,ppc_crypt_ctr_end
+ mr rKP,rKS
+ START_KEY(rI0, rI1, rI2, rI3)
+ bl ppc_encrypt_block
+ xor rW0,rD0,rW0
+ SAVE_IV(rW0, 0)
+ xor rW1,rD1,rW1
+ SAVE_IV(rW1, 4)
+ xor rW2,rD2,rW2
+ SAVE_IV(rW2, 8)
+ xor rW3,rD3,rW3
+ SAVE_IV(rW3, 12)
+ mtctr rLN
+ subi rIP,rIP,CTR_DEC
+ subi rSP,rSP,1
+ subi rDP,rDP,1
+ppc_crypt_ctr_xorbyte:
+ lbzu rW4,1(rIP) /* bytewise xor for partial block */
+ lbzu rW5,1(rSP)
+ xor rW4,rW4,rW5
+ stbu rW4,1(rDP)
+ bdnz ppc_crypt_ctr_xorbyte
+ subf rIP,rLN,rIP
+ addi rIP,rIP,1
+ addic rI3,rI3,1
+ addze rI2,rI2
+ addze rI1,rI1
+ addze rI0,rI0
+ppc_crypt_ctr_end:
+ SAVE_IV(rI0, 0)
+ SAVE_IV(rI1, 4)
+ SAVE_IV(rI2, 8)
+ SAVE_IV(rI3, 12)
+ FINALIZE_CRYPT(4)
+ blr
+
+/*
+ * ppc_encrypt_xts(u8 *out, const u8 *in, u32 *key_enc,
+ * u32 rounds, u32 bytes, u8 *iv, u32 *key_twk);
+ *
+ * called from glue layer to encrypt multiple blocks via XTS
+ * If key_twk is given, the initial IV encryption will be
+ * processed too. Round values are AES128 = 4, AES192 = 5,
+ * AES256 = 6
+ *
+ */
+_GLOBAL(ppc_encrypt_xts)
+ INITIALIZE_CRYPT(PPC_AES_4K_ENCTAB, 8)
+ LOAD_IV(rI0, 0)
+ LOAD_IV(rI1, 4)
+ LOAD_IV(rI2, 8)
+ cmpwi rKT,0
+ LOAD_IV(rI3, 12)
+ bt eq,ppc_encrypt_xts_notweak
+ mr rKP,rKT
+ START_KEY(rI0, rI1, rI2, rI3)
+ bl ppc_encrypt_block
+ xor rI0,rD0,rW0
+ xor rI1,rD1,rW1
+ xor rI2,rD2,rW2
+ xor rI3,rD3,rW3
+ppc_encrypt_xts_notweak:
+ ENDIAN_SWAP(rG0, rG1, rI0, rI1)
+ ENDIAN_SWAP(rG2, rG3, rI2, rI3)
+ppc_encrypt_xts_loop:
+ LOAD_DATA(rD0, 0)
+ mr rKP,rKS
+ LOAD_DATA(rD1, 4)
+ subi rLN,rLN,16
+ LOAD_DATA(rD2, 8)
+ LOAD_DATA(rD3, 12)
+ xor rD0,rD0,rI0
+ xor rD1,rD1,rI1
+ xor rD2,rD2,rI2
+ xor rD3,rD3,rI3
+ START_KEY(rD0, rD1, rD2, rD3)
+ bl ppc_encrypt_block
+ xor rD0,rD0,rW0
+ xor rD1,rD1,rW1
+ xor rD2,rD2,rW2
+ xor rD3,rD3,rW3
+ xor rD0,rD0,rI0
+ SAVE_DATA(rD0, 0)
+ xor rD1,rD1,rI1
+ SAVE_DATA(rD1, 4)
+ xor rD2,rD2,rI2
+ SAVE_DATA(rD2, 8)
+ xor rD3,rD3,rI3
+ SAVE_DATA(rD3, 12)
+ GF128_MUL(rG0, rG1, rG2, rG3, rW0)
+ ENDIAN_SWAP(rI0, rI1, rG0, rG1)
+ ENDIAN_SWAP(rI2, rI3, rG2, rG3)
+ cmpwi rLN,0
+ NEXT_BLOCK
+ bt gt,ppc_encrypt_xts_loop
+ START_IV
+ SAVE_IV(rI0, 0)
+ SAVE_IV(rI1, 4)
+ SAVE_IV(rI2, 8)
+ SAVE_IV(rI3, 12)
+ FINALIZE_CRYPT(8)
+ blr
+
+/*
+ * ppc_decrypt_xts(u8 *out, const u8 *in, u32 *key_dec,
+ * u32 rounds, u32 blocks, u8 *iv, u32 *key_twk);
+ *
+ * called from glue layer to decrypt multiple blocks via XTS
+ * If key_twk is given, the initial IV encryption will be
+ * processed too. Round values are AES128 = 4, AES192 = 5,
+ * AES256 = 6
+ *
+ */
+_GLOBAL(ppc_decrypt_xts)
+ INITIALIZE_CRYPT(PPC_AES_4K_DECTAB, 8)
+ LOAD_IV(rI0, 0)
+ addi rT1,rT0,4096
+ LOAD_IV(rI1, 4)
+ LOAD_IV(rI2, 8)
+ cmpwi rKT,0
+ LOAD_IV(rI3, 12)
+ bt eq,ppc_decrypt_xts_notweak
+ subi rT0,rT0,4096
+ mr rKP,rKT
+ START_KEY(rI0, rI1, rI2, rI3)
+ bl ppc_encrypt_block
+ xor rI0,rD0,rW0
+ xor rI1,rD1,rW1
+ xor rI2,rD2,rW2
+ xor rI3,rD3,rW3
+ addi rT0,rT0,4096
+ppc_decrypt_xts_notweak:
+ ENDIAN_SWAP(rG0, rG1, rI0, rI1)
+ ENDIAN_SWAP(rG2, rG3, rI2, rI3)
+ppc_decrypt_xts_loop:
+ LOAD_DATA(rD0, 0)
+ mr rKP,rKS
+ LOAD_DATA(rD1, 4)
+ subi rLN,rLN,16
+ LOAD_DATA(rD2, 8)
+ LOAD_DATA(rD3, 12)
+ xor rD0,rD0,rI0
+ xor rD1,rD1,rI1
+ xor rD2,rD2,rI2
+ xor rD3,rD3,rI3
+ START_KEY(rD0, rD1, rD2, rD3)
+ bl ppc_decrypt_block
+ xor rD0,rD0,rW0
+ xor rD1,rD1,rW1
+ xor rD2,rD2,rW2
+ xor rD3,rD3,rW3
+ xor rD0,rD0,rI0
+ SAVE_DATA(rD0, 0)
+ xor rD1,rD1,rI1
+ SAVE_DATA(rD1, 4)
+ xor rD2,rD2,rI2
+ SAVE_DATA(rD2, 8)
+ xor rD3,rD3,rI3
+ SAVE_DATA(rD3, 12)
+ GF128_MUL(rG0, rG1, rG2, rG3, rW0)
+ ENDIAN_SWAP(rI0, rI1, rG0, rG1)
+ ENDIAN_SWAP(rI2, rI3, rG2, rG3)
+ cmpwi rLN,0
+ NEXT_BLOCK
+ bt gt,ppc_decrypt_xts_loop
+ START_IV
+ SAVE_IV(rI0, 0)
+ SAVE_IV(rI1, 4)
+ SAVE_IV(rI2, 8)
+ SAVE_IV(rI3, 12)
+ FINALIZE_CRYPT(8)
+ blr
diff --git a/arch/powerpc/crypto/aes-spe-regs.h b/arch/powerpc/crypto/aes-spe-regs.h
new file mode 100644
index 000000000000..30d217b399c3
--- /dev/null
+++ b/arch/powerpc/crypto/aes-spe-regs.h
@@ -0,0 +1,42 @@
+/*
+ * Common registers for PPC AES implementation
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#define rKS r0 /* copy of en-/decryption key pointer */
+#define rDP r3 /* destination pointer */
+#define rSP r4 /* source pointer */
+#define rKP r5 /* pointer to en-/decryption key pointer */
+#define rRR r6 /* en-/decryption rounds */
+#define rLN r7 /* length of data to be processed */
+#define rIP r8 /* potiner to IV (CBC/CTR/XTS modes) */
+#define rKT r9 /* pointer to tweak key (XTS mode) */
+#define rT0 r11 /* pointers to en-/decrpytion tables */
+#define rT1 r10
+#define rD0 r9 /* data */
+#define rD1 r14
+#define rD2 r12
+#define rD3 r15
+#define rW0 r16 /* working registers */
+#define rW1 r17
+#define rW2 r18
+#define rW3 r19
+#define rW4 r20
+#define rW5 r21
+#define rW6 r22
+#define rW7 r23
+#define rI0 r24 /* IV */
+#define rI1 r25
+#define rI2 r26
+#define rI3 r27
+#define rG0 r28 /* endian reversed tweak (XTS mode) */
+#define rG1 r29
+#define rG2 r30
+#define rG3 r31
diff --git a/arch/powerpc/crypto/aes-tab-4k.S b/arch/powerpc/crypto/aes-tab-4k.S
new file mode 100644
index 000000000000..701e60240dc3
--- /dev/null
+++ b/arch/powerpc/crypto/aes-tab-4k.S
@@ -0,0 +1,331 @@
+/*
+ * 4K AES tables for PPC AES implementation
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+/*
+ * These big endian AES encryption/decryption tables have been taken from
+ * crypto/aes_generic.c and are designed to be simply accessed by a combination
+ * of rlwimi/lwz instructions with a minimum of table registers (usually only
+ * one required). Thus they are aligned to 4K. The locality of rotated values
+ * is derived from the reduced offsets that are available in the SPE load
+ * instructions. E.g. evldw, evlwwsplat, ...
+ *
+ * For the safety-conscious it has to be noted that they might be vulnerable
+ * to cache timing attacks because of their size. Nevertheless in contrast to
+ * the generic tables they have been reduced from 16KB to 8KB + 256 bytes.
+ * This is a quite good tradeoff for low power devices (e.g. routers) without
+ * dedicated encryption hardware where we usually have no multiuser
+ * environment.
+ *
+ */
+
+#define R(a, b, c, d) \
+ 0x##a##b##c##d, 0x##d##a##b##c, 0x##c##d##a##b, 0x##b##c##d##a
+
+.data
+.align 12
+.globl PPC_AES_4K_ENCTAB
+PPC_AES_4K_ENCTAB:
+/* encryption table, same as crypto_ft_tab in crypto/aes-generic.c */
+ .long R(c6, 63, 63, a5), R(f8, 7c, 7c, 84)
+ .long R(ee, 77, 77, 99), R(f6, 7b, 7b, 8d)
+ .long R(ff, f2, f2, 0d), R(d6, 6b, 6b, bd)
+ .long R(de, 6f, 6f, b1), R(91, c5, c5, 54)
+ .long R(60, 30, 30, 50), R(02, 01, 01, 03)
+ .long R(ce, 67, 67, a9), R(56, 2b, 2b, 7d)
+ .long R(e7, fe, fe, 19), R(b5, d7, d7, 62)
+ .long R(4d, ab, ab, e6), R(ec, 76, 76, 9a)
+ .long R(8f, ca, ca, 45), R(1f, 82, 82, 9d)
+ .long R(89, c9, c9, 40), R(fa, 7d, 7d, 87)
+ .long R(ef, fa, fa, 15), R(b2, 59, 59, eb)
+ .long R(8e, 47, 47, c9), R(fb, f0, f0, 0b)
+ .long R(41, ad, ad, ec), R(b3, d4, d4, 67)
+ .long R(5f, a2, a2, fd), R(45, af, af, ea)
+ .long R(23, 9c, 9c, bf), R(53, a4, a4, f7)
+ .long R(e4, 72, 72, 96), R(9b, c0, c0, 5b)
+ .long R(75, b7, b7, c2), R(e1, fd, fd, 1c)
+ .long R(3d, 93, 93, ae), R(4c, 26, 26, 6a)
+ .long R(6c, 36, 36, 5a), R(7e, 3f, 3f, 41)
+ .long R(f5, f7, f7, 02), R(83, cc, cc, 4f)
+ .long R(68, 34, 34, 5c), R(51, a5, a5, f4)
+ .long R(d1, e5, e5, 34), R(f9, f1, f1, 08)
+ .long R(e2, 71, 71, 93), R(ab, d8, d8, 73)
+ .long R(62, 31, 31, 53), R(2a, 15, 15, 3f)
+ .long R(08, 04, 04, 0c), R(95, c7, c7, 52)
+ .long R(46, 23, 23, 65), R(9d, c3, c3, 5e)
+ .long R(30, 18, 18, 28), R(37, 96, 96, a1)
+ .long R(0a, 05, 05, 0f), R(2f, 9a, 9a, b5)
+ .long R(0e, 07, 07, 09), R(24, 12, 12, 36)
+ .long R(1b, 80, 80, 9b), R(df, e2, e2, 3d)
+ .long R(cd, eb, eb, 26), R(4e, 27, 27, 69)
+ .long R(7f, b2, b2, cd), R(ea, 75, 75, 9f)
+ .long R(12, 09, 09, 1b), R(1d, 83, 83, 9e)
+ .long R(58, 2c, 2c, 74), R(34, 1a, 1a, 2e)
+ .long R(36, 1b, 1b, 2d), R(dc, 6e, 6e, b2)
+ .long R(b4, 5a, 5a, ee), R(5b, a0, a0, fb)
+ .long R(a4, 52, 52, f6), R(76, 3b, 3b, 4d)
+ .long R(b7, d6, d6, 61), R(7d, b3, b3, ce)
+ .long R(52, 29, 29, 7b), R(dd, e3, e3, 3e)
+ .long R(5e, 2f, 2f, 71), R(13, 84, 84, 97)
+ .long R(a6, 53, 53, f5), R(b9, d1, d1, 68)
+ .long R(00, 00, 00, 00), R(c1, ed, ed, 2c)
+ .long R(40, 20, 20, 60), R(e3, fc, fc, 1f)
+ .long R(79, b1, b1, c8), R(b6, 5b, 5b, ed)
+ .long R(d4, 6a, 6a, be), R(8d, cb, cb, 46)
+ .long R(67, be, be, d9), R(72, 39, 39, 4b)
+ .long R(94, 4a, 4a, de), R(98, 4c, 4c, d4)
+ .long R(b0, 58, 58, e8), R(85, cf, cf, 4a)
+ .long R(bb, d0, d0, 6b), R(c5, ef, ef, 2a)
+ .long R(4f, aa, aa, e5), R(ed, fb, fb, 16)
+ .long R(86, 43, 43, c5), R(9a, 4d, 4d, d7)
+ .long R(66, 33, 33, 55), R(11, 85, 85, 94)
+ .long R(8a, 45, 45, cf), R(e9, f9, f9, 10)
+ .long R(04, 02, 02, 06), R(fe, 7f, 7f, 81)
+ .long R(a0, 50, 50, f0), R(78, 3c, 3c, 44)
+ .long R(25, 9f, 9f, ba), R(4b, a8, a8, e3)
+ .long R(a2, 51, 51, f3), R(5d, a3, a3, fe)
+ .long R(80, 40, 40, c0), R(05, 8f, 8f, 8a)
+ .long R(3f, 92, 92, ad), R(21, 9d, 9d, bc)
+ .long R(70, 38, 38, 48), R(f1, f5, f5, 04)
+ .long R(63, bc, bc, df), R(77, b6, b6, c1)
+ .long R(af, da, da, 75), R(42, 21, 21, 63)
+ .long R(20, 10, 10, 30), R(e5, ff, ff, 1a)
+ .long R(fd, f3, f3, 0e), R(bf, d2, d2, 6d)
+ .long R(81, cd, cd, 4c), R(18, 0c, 0c, 14)
+ .long R(26, 13, 13, 35), R(c3, ec, ec, 2f)
+ .long R(be, 5f, 5f, e1), R(35, 97, 97, a2)
+ .long R(88, 44, 44, cc), R(2e, 17, 17, 39)
+ .long R(93, c4, c4, 57), R(55, a7, a7, f2)
+ .long R(fc, 7e, 7e, 82), R(7a, 3d, 3d, 47)
+ .long R(c8, 64, 64, ac), R(ba, 5d, 5d, e7)
+ .long R(32, 19, 19, 2b), R(e6, 73, 73, 95)
+ .long R(c0, 60, 60, a0), R(19, 81, 81, 98)
+ .long R(9e, 4f, 4f, d1), R(a3, dc, dc, 7f)
+ .long R(44, 22, 22, 66), R(54, 2a, 2a, 7e)
+ .long R(3b, 90, 90, ab), R(0b, 88, 88, 83)
+ .long R(8c, 46, 46, ca), R(c7, ee, ee, 29)
+ .long R(6b, b8, b8, d3), R(28, 14, 14, 3c)
+ .long R(a7, de, de, 79), R(bc, 5e, 5e, e2)
+ .long R(16, 0b, 0b, 1d), R(ad, db, db, 76)
+ .long R(db, e0, e0, 3b), R(64, 32, 32, 56)
+ .long R(74, 3a, 3a, 4e), R(14, 0a, 0a, 1e)
+ .long R(92, 49, 49, db), R(0c, 06, 06, 0a)
+ .long R(48, 24, 24, 6c), R(b8, 5c, 5c, e4)
+ .long R(9f, c2, c2, 5d), R(bd, d3, d3, 6e)
+ .long R(43, ac, ac, ef), R(c4, 62, 62, a6)
+ .long R(39, 91, 91, a8), R(31, 95, 95, a4)
+ .long R(d3, e4, e4, 37), R(f2, 79, 79, 8b)
+ .long R(d5, e7, e7, 32), R(8b, c8, c8, 43)
+ .long R(6e, 37, 37, 59), R(da, 6d, 6d, b7)
+ .long R(01, 8d, 8d, 8c), R(b1, d5, d5, 64)
+ .long R(9c, 4e, 4e, d2), R(49, a9, a9, e0)
+ .long R(d8, 6c, 6c, b4), R(ac, 56, 56, fa)
+ .long R(f3, f4, f4, 07), R(cf, ea, ea, 25)
+ .long R(ca, 65, 65, af), R(f4, 7a, 7a, 8e)
+ .long R(47, ae, ae, e9), R(10, 08, 08, 18)
+ .long R(6f, ba, ba, d5), R(f0, 78, 78, 88)
+ .long R(4a, 25, 25, 6f), R(5c, 2e, 2e, 72)
+ .long R(38, 1c, 1c, 24), R(57, a6, a6, f1)
+ .long R(73, b4, b4, c7), R(97, c6, c6, 51)
+ .long R(cb, e8, e8, 23), R(a1, dd, dd, 7c)
+ .long R(e8, 74, 74, 9c), R(3e, 1f, 1f, 21)
+ .long R(96, 4b, 4b, dd), R(61, bd, bd, dc)
+ .long R(0d, 8b, 8b, 86), R(0f, 8a, 8a, 85)
+ .long R(e0, 70, 70, 90), R(7c, 3e, 3e, 42)
+ .long R(71, b5, b5, c4), R(cc, 66, 66, aa)
+ .long R(90, 48, 48, d8), R(06, 03, 03, 05)
+ .long R(f7, f6, f6, 01), R(1c, 0e, 0e, 12)
+ .long R(c2, 61, 61, a3), R(6a, 35, 35, 5f)
+ .long R(ae, 57, 57, f9), R(69, b9, b9, d0)
+ .long R(17, 86, 86, 91), R(99, c1, c1, 58)
+ .long R(3a, 1d, 1d, 27), R(27, 9e, 9e, b9)
+ .long R(d9, e1, e1, 38), R(eb, f8, f8, 13)
+ .long R(2b, 98, 98, b3), R(22, 11, 11, 33)
+ .long R(d2, 69, 69, bb), R(a9, d9, d9, 70)
+ .long R(07, 8e, 8e, 89), R(33, 94, 94, a7)
+ .long R(2d, 9b, 9b, b6), R(3c, 1e, 1e, 22)
+ .long R(15, 87, 87, 92), R(c9, e9, e9, 20)
+ .long R(87, ce, ce, 49), R(aa, 55, 55, ff)
+ .long R(50, 28, 28, 78), R(a5, df, df, 7a)
+ .long R(03, 8c, 8c, 8f), R(59, a1, a1, f8)
+ .long R(09, 89, 89, 80), R(1a, 0d, 0d, 17)
+ .long R(65, bf, bf, da), R(d7, e6, e6, 31)
+ .long R(84, 42, 42, c6), R(d0, 68, 68, b8)
+ .long R(82, 41, 41, c3), R(29, 99, 99, b0)
+ .long R(5a, 2d, 2d, 77), R(1e, 0f, 0f, 11)
+ .long R(7b, b0, b0, cb), R(a8, 54, 54, fc)
+ .long R(6d, bb, bb, d6), R(2c, 16, 16, 3a)
+.globl PPC_AES_4K_DECTAB
+PPC_AES_4K_DECTAB:
+/* decryption table, same as crypto_it_tab in crypto/aes-generic.c */
+ .long R(51, f4, a7, 50), R(7e, 41, 65, 53)
+ .long R(1a, 17, a4, c3), R(3a, 27, 5e, 96)
+ .long R(3b, ab, 6b, cb), R(1f, 9d, 45, f1)
+ .long R(ac, fa, 58, ab), R(4b, e3, 03, 93)
+ .long R(20, 30, fa, 55), R(ad, 76, 6d, f6)
+ .long R(88, cc, 76, 91), R(f5, 02, 4c, 25)
+ .long R(4f, e5, d7, fc), R(c5, 2a, cb, d7)
+ .long R(26, 35, 44, 80), R(b5, 62, a3, 8f)
+ .long R(de, b1, 5a, 49), R(25, ba, 1b, 67)
+ .long R(45, ea, 0e, 98), R(5d, fe, c0, e1)
+ .long R(c3, 2f, 75, 02), R(81, 4c, f0, 12)
+ .long R(8d, 46, 97, a3), R(6b, d3, f9, c6)
+ .long R(03, 8f, 5f, e7), R(15, 92, 9c, 95)
+ .long R(bf, 6d, 7a, eb), R(95, 52, 59, da)
+ .long R(d4, be, 83, 2d), R(58, 74, 21, d3)
+ .long R(49, e0, 69, 29), R(8e, c9, c8, 44)
+ .long R(75, c2, 89, 6a), R(f4, 8e, 79, 78)
+ .long R(99, 58, 3e, 6b), R(27, b9, 71, dd)
+ .long R(be, e1, 4f, b6), R(f0, 88, ad, 17)
+ .long R(c9, 20, ac, 66), R(7d, ce, 3a, b4)
+ .long R(63, df, 4a, 18), R(e5, 1a, 31, 82)
+ .long R(97, 51, 33, 60), R(62, 53, 7f, 45)
+ .long R(b1, 64, 77, e0), R(bb, 6b, ae, 84)
+ .long R(fe, 81, a0, 1c), R(f9, 08, 2b, 94)
+ .long R(70, 48, 68, 58), R(8f, 45, fd, 19)
+ .long R(94, de, 6c, 87), R(52, 7b, f8, b7)
+ .long R(ab, 73, d3, 23), R(72, 4b, 02, e2)
+ .long R(e3, 1f, 8f, 57), R(66, 55, ab, 2a)
+ .long R(b2, eb, 28, 07), R(2f, b5, c2, 03)
+ .long R(86, c5, 7b, 9a), R(d3, 37, 08, a5)
+ .long R(30, 28, 87, f2), R(23, bf, a5, b2)
+ .long R(02, 03, 6a, ba), R(ed, 16, 82, 5c)
+ .long R(8a, cf, 1c, 2b), R(a7, 79, b4, 92)
+ .long R(f3, 07, f2, f0), R(4e, 69, e2, a1)
+ .long R(65, da, f4, cd), R(06, 05, be, d5)
+ .long R(d1, 34, 62, 1f), R(c4, a6, fe, 8a)
+ .long R(34, 2e, 53, 9d), R(a2, f3, 55, a0)
+ .long R(05, 8a, e1, 32), R(a4, f6, eb, 75)
+ .long R(0b, 83, ec, 39), R(40, 60, ef, aa)
+ .long R(5e, 71, 9f, 06), R(bd, 6e, 10, 51)
+ .long R(3e, 21, 8a, f9), R(96, dd, 06, 3d)
+ .long R(dd, 3e, 05, ae), R(4d, e6, bd, 46)
+ .long R(91, 54, 8d, b5), R(71, c4, 5d, 05)
+ .long R(04, 06, d4, 6f), R(60, 50, 15, ff)
+ .long R(19, 98, fb, 24), R(d6, bd, e9, 97)
+ .long R(89, 40, 43, cc), R(67, d9, 9e, 77)
+ .long R(b0, e8, 42, bd), R(07, 89, 8b, 88)
+ .long R(e7, 19, 5b, 38), R(79, c8, ee, db)
+ .long R(a1, 7c, 0a, 47), R(7c, 42, 0f, e9)
+ .long R(f8, 84, 1e, c9), R(00, 00, 00, 00)
+ .long R(09, 80, 86, 83), R(32, 2b, ed, 48)
+ .long R(1e, 11, 70, ac), R(6c, 5a, 72, 4e)
+ .long R(fd, 0e, ff, fb), R(0f, 85, 38, 56)
+ .long R(3d, ae, d5, 1e), R(36, 2d, 39, 27)
+ .long R(0a, 0f, d9, 64), R(68, 5c, a6, 21)
+ .long R(9b, 5b, 54, d1), R(24, 36, 2e, 3a)
+ .long R(0c, 0a, 67, b1), R(93, 57, e7, 0f)
+ .long R(b4, ee, 96, d2), R(1b, 9b, 91, 9e)
+ .long R(80, c0, c5, 4f), R(61, dc, 20, a2)
+ .long R(5a, 77, 4b, 69), R(1c, 12, 1a, 16)
+ .long R(e2, 93, ba, 0a), R(c0, a0, 2a, e5)
+ .long R(3c, 22, e0, 43), R(12, 1b, 17, 1d)
+ .long R(0e, 09, 0d, 0b), R(f2, 8b, c7, ad)
+ .long R(2d, b6, a8, b9), R(14, 1e, a9, c8)
+ .long R(57, f1, 19, 85), R(af, 75, 07, 4c)
+ .long R(ee, 99, dd, bb), R(a3, 7f, 60, fd)
+ .long R(f7, 01, 26, 9f), R(5c, 72, f5, bc)
+ .long R(44, 66, 3b, c5), R(5b, fb, 7e, 34)
+ .long R(8b, 43, 29, 76), R(cb, 23, c6, dc)
+ .long R(b6, ed, fc, 68), R(b8, e4, f1, 63)
+ .long R(d7, 31, dc, ca), R(42, 63, 85, 10)
+ .long R(13, 97, 22, 40), R(84, c6, 11, 20)
+ .long R(85, 4a, 24, 7d), R(d2, bb, 3d, f8)
+ .long R(ae, f9, 32, 11), R(c7, 29, a1, 6d)
+ .long R(1d, 9e, 2f, 4b), R(dc, b2, 30, f3)
+ .long R(0d, 86, 52, ec), R(77, c1, e3, d0)
+ .long R(2b, b3, 16, 6c), R(a9, 70, b9, 99)
+ .long R(11, 94, 48, fa), R(47, e9, 64, 22)
+ .long R(a8, fc, 8c, c4), R(a0, f0, 3f, 1a)
+ .long R(56, 7d, 2c, d8), R(22, 33, 90, ef)
+ .long R(87, 49, 4e, c7), R(d9, 38, d1, c1)
+ .long R(8c, ca, a2, fe), R(98, d4, 0b, 36)
+ .long R(a6, f5, 81, cf), R(a5, 7a, de, 28)
+ .long R(da, b7, 8e, 26), R(3f, ad, bf, a4)
+ .long R(2c, 3a, 9d, e4), R(50, 78, 92, 0d)
+ .long R(6a, 5f, cc, 9b), R(54, 7e, 46, 62)
+ .long R(f6, 8d, 13, c2), R(90, d8, b8, e8)
+ .long R(2e, 39, f7, 5e), R(82, c3, af, f5)
+ .long R(9f, 5d, 80, be), R(69, d0, 93, 7c)
+ .long R(6f, d5, 2d, a9), R(cf, 25, 12, b3)
+ .long R(c8, ac, 99, 3b), R(10, 18, 7d, a7)
+ .long R(e8, 9c, 63, 6e), R(db, 3b, bb, 7b)
+ .long R(cd, 26, 78, 09), R(6e, 59, 18, f4)
+ .long R(ec, 9a, b7, 01), R(83, 4f, 9a, a8)
+ .long R(e6, 95, 6e, 65), R(aa, ff, e6, 7e)
+ .long R(21, bc, cf, 08), R(ef, 15, e8, e6)
+ .long R(ba, e7, 9b, d9), R(4a, 6f, 36, ce)
+ .long R(ea, 9f, 09, d4), R(29, b0, 7c, d6)
+ .long R(31, a4, b2, af), R(2a, 3f, 23, 31)
+ .long R(c6, a5, 94, 30), R(35, a2, 66, c0)
+ .long R(74, 4e, bc, 37), R(fc, 82, ca, a6)
+ .long R(e0, 90, d0, b0), R(33, a7, d8, 15)
+ .long R(f1, 04, 98, 4a), R(41, ec, da, f7)
+ .long R(7f, cd, 50, 0e), R(17, 91, f6, 2f)
+ .long R(76, 4d, d6, 8d), R(43, ef, b0, 4d)
+ .long R(cc, aa, 4d, 54), R(e4, 96, 04, df)
+ .long R(9e, d1, b5, e3), R(4c, 6a, 88, 1b)
+ .long R(c1, 2c, 1f, b8), R(46, 65, 51, 7f)
+ .long R(9d, 5e, ea, 04), R(01, 8c, 35, 5d)
+ .long R(fa, 87, 74, 73), R(fb, 0b, 41, 2e)
+ .long R(b3, 67, 1d, 5a), R(92, db, d2, 52)
+ .long R(e9, 10, 56, 33), R(6d, d6, 47, 13)
+ .long R(9a, d7, 61, 8c), R(37, a1, 0c, 7a)
+ .long R(59, f8, 14, 8e), R(eb, 13, 3c, 89)
+ .long R(ce, a9, 27, ee), R(b7, 61, c9, 35)
+ .long R(e1, 1c, e5, ed), R(7a, 47, b1, 3c)
+ .long R(9c, d2, df, 59), R(55, f2, 73, 3f)
+ .long R(18, 14, ce, 79), R(73, c7, 37, bf)
+ .long R(53, f7, cd, ea), R(5f, fd, aa, 5b)
+ .long R(df, 3d, 6f, 14), R(78, 44, db, 86)
+ .long R(ca, af, f3, 81), R(b9, 68, c4, 3e)
+ .long R(38, 24, 34, 2c), R(c2, a3, 40, 5f)
+ .long R(16, 1d, c3, 72), R(bc, e2, 25, 0c)
+ .long R(28, 3c, 49, 8b), R(ff, 0d, 95, 41)
+ .long R(39, a8, 01, 71), R(08, 0c, b3, de)
+ .long R(d8, b4, e4, 9c), R(64, 56, c1, 90)
+ .long R(7b, cb, 84, 61), R(d5, 32, b6, 70)
+ .long R(48, 6c, 5c, 74), R(d0, b8, 57, 42)
+.globl PPC_AES_4K_DECTAB2
+PPC_AES_4K_DECTAB2:
+/* decryption table, same as crypto_il_tab in crypto/aes-generic.c */
+ .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+ .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+ .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+ .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+ .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+ .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+ .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+ .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+ .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+ .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+ .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+ .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+ .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+ .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+ .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+ .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+ .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+ .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+ .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+ .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+ .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+ .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+ .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+ .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+ .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+ .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+ .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+ .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+ .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+ .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+ .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+ .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
diff --git a/arch/powerpc/crypto/md5-asm.S b/arch/powerpc/crypto/md5-asm.S
new file mode 100644
index 000000000000..10cdf5bceebb
--- /dev/null
+++ b/arch/powerpc/crypto/md5-asm.S
@@ -0,0 +1,243 @@
+/*
+ * Fast MD5 implementation for PPC
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+#define rHP r3
+#define rWP r4
+
+#define rH0 r0
+#define rH1 r6
+#define rH2 r7
+#define rH3 r5
+
+#define rW00 r8
+#define rW01 r9
+#define rW02 r10
+#define rW03 r11
+#define rW04 r12
+#define rW05 r14
+#define rW06 r15
+#define rW07 r16
+#define rW08 r17
+#define rW09 r18
+#define rW10 r19
+#define rW11 r20
+#define rW12 r21
+#define rW13 r22
+#define rW14 r23
+#define rW15 r24
+
+#define rT0 r25
+#define rT1 r26
+
+#define INITIALIZE \
+ PPC_STLU r1,-INT_FRAME_SIZE(r1); \
+ SAVE_8GPRS(14, r1); /* push registers onto stack */ \
+ SAVE_4GPRS(22, r1); \
+ SAVE_GPR(26, r1)
+
+#define FINALIZE \
+ REST_8GPRS(14, r1); /* pop registers from stack */ \
+ REST_4GPRS(22, r1); \
+ REST_GPR(26, r1); \
+ addi r1,r1,INT_FRAME_SIZE;
+
+#ifdef __BIG_ENDIAN__
+#define LOAD_DATA(reg, off) \
+ lwbrx reg,0,rWP; /* load data */
+#define INC_PTR \
+ addi rWP,rWP,4; /* increment per word */
+#define NEXT_BLOCK /* nothing to do */
+#else
+#define LOAD_DATA(reg, off) \
+ lwz reg,off(rWP); /* load data */
+#define INC_PTR /* nothing to do */
+#define NEXT_BLOCK \
+ addi rWP,rWP,64; /* increment per block */
+#endif
+
+#define R_00_15(a, b, c, d, w0, w1, p, q, off, k0h, k0l, k1h, k1l) \
+ LOAD_DATA(w0, off) /* W */ \
+ and rT0,b,c; /* 1: f = b and c */ \
+ INC_PTR /* ptr++ */ \
+ andc rT1,d,b; /* 1: f' = ~b and d */ \
+ LOAD_DATA(w1, off+4) /* W */ \
+ or rT0,rT0,rT1; /* 1: f = f or f' */ \
+ addi w0,w0,k0l; /* 1: wk = w + k */ \
+ add a,a,rT0; /* 1: a = a + f */ \
+ addis w0,w0,k0h; /* 1: wk = w + k' */ \
+ addis w1,w1,k1h; /* 2: wk = w + k */ \
+ add a,a,w0; /* 1: a = a + wk */ \
+ addi w1,w1,k1l; /* 2: wk = w + k' */ \
+ rotrwi a,a,p; /* 1: a = a rotl x */ \
+ add d,d,w1; /* 2: a = a + wk */ \
+ add a,a,b; /* 1: a = a + b */ \
+ and rT0,a,b; /* 2: f = b and c */ \
+ andc rT1,c,a; /* 2: f' = ~b and d */ \
+ or rT0,rT0,rT1; /* 2: f = f or f' */ \
+ add d,d,rT0; /* 2: a = a + f */ \
+ INC_PTR /* ptr++ */ \
+ rotrwi d,d,q; /* 2: a = a rotl x */ \
+ add d,d,a; /* 2: a = a + b */
+
+#define R_16_31(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \
+ andc rT0,c,d; /* 1: f = c and ~d */ \
+ and rT1,b,d; /* 1: f' = b and d */ \
+ addi w0,w0,k0l; /* 1: wk = w + k */ \
+ or rT0,rT0,rT1; /* 1: f = f or f' */ \
+ addis w0,w0,k0h; /* 1: wk = w + k' */ \
+ add a,a,rT0; /* 1: a = a + f */ \
+ addi w1,w1,k1l; /* 2: wk = w + k */ \
+ add a,a,w0; /* 1: a = a + wk */ \
+ addis w1,w1,k1h; /* 2: wk = w + k' */ \
+ andc rT0,b,c; /* 2: f = c and ~d */ \
+ rotrwi a,a,p; /* 1: a = a rotl x */ \
+ add a,a,b; /* 1: a = a + b */ \
+ add d,d,w1; /* 2: a = a + wk */ \
+ and rT1,a,c; /* 2: f' = b and d */ \
+ or rT0,rT0,rT1; /* 2: f = f or f' */ \
+ add d,d,rT0; /* 2: a = a + f */ \
+ rotrwi d,d,q; /* 2: a = a rotl x */ \
+ add d,d,a; /* 2: a = a +b */
+
+#define R_32_47(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \
+ xor rT0,b,c; /* 1: f' = b xor c */ \
+ addi w0,w0,k0l; /* 1: wk = w + k */ \
+ xor rT1,rT0,d; /* 1: f = f xor f' */ \
+ addis w0,w0,k0h; /* 1: wk = w + k' */ \
+ add a,a,rT1; /* 1: a = a + f */ \
+ addi w1,w1,k1l; /* 2: wk = w + k */ \
+ add a,a,w0; /* 1: a = a + wk */ \
+ addis w1,w1,k1h; /* 2: wk = w + k' */ \
+ rotrwi a,a,p; /* 1: a = a rotl x */ \
+ add d,d,w1; /* 2: a = a + wk */ \
+ add a,a,b; /* 1: a = a + b */ \
+ xor rT1,rT0,a; /* 2: f = b xor f' */ \
+ add d,d,rT1; /* 2: a = a + f */ \
+ rotrwi d,d,q; /* 2: a = a rotl x */ \
+ add d,d,a; /* 2: a = a + b */
+
+#define R_48_63(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \
+ addi w0,w0,k0l; /* 1: w = w + k */ \
+ orc rT0,b,d; /* 1: f = b or ~d */ \
+ addis w0,w0,k0h; /* 1: w = w + k' */ \
+ xor rT0,rT0,c; /* 1: f = f xor c */ \
+ add a,a,w0; /* 1: a = a + wk */ \
+ addi w1,w1,k1l; /* 2: w = w + k */ \
+ add a,a,rT0; /* 1: a = a + f */ \
+ addis w1,w1,k1h; /* 2: w = w + k' */ \
+ rotrwi a,a,p; /* 1: a = a rotl x */ \
+ add a,a,b; /* 1: a = a + b */ \
+ orc rT0,a,c; /* 2: f = b or ~d */ \
+ add d,d,w1; /* 2: a = a + wk */ \
+ xor rT0,rT0,b; /* 2: f = f xor c */ \
+ add d,d,rT0; /* 2: a = a + f */ \
+ rotrwi d,d,q; /* 2: a = a rotl x */ \
+ add d,d,a; /* 2: a = a + b */
+
+_GLOBAL(ppc_md5_transform)
+ INITIALIZE
+
+ mtctr r5
+ lwz rH0,0(rHP)
+ lwz rH1,4(rHP)
+ lwz rH2,8(rHP)
+ lwz rH3,12(rHP)
+
+ppc_md5_main:
+ R_00_15(rH0, rH1, rH2, rH3, rW00, rW01, 25, 20, 0,
+ 0xd76b, -23432, 0xe8c8, -18602)
+ R_00_15(rH2, rH3, rH0, rH1, rW02, rW03, 15, 10, 8,
+ 0x2420, 0x70db, 0xc1be, -12562)
+ R_00_15(rH0, rH1, rH2, rH3, rW04, rW05, 25, 20, 16,
+ 0xf57c, 0x0faf, 0x4788, -14806)
+ R_00_15(rH2, rH3, rH0, rH1, rW06, rW07, 15, 10, 24,
+ 0xa830, 0x4613, 0xfd47, -27391)
+ R_00_15(rH0, rH1, rH2, rH3, rW08, rW09, 25, 20, 32,
+ 0x6981, -26408, 0x8b45, -2129)
+ R_00_15(rH2, rH3, rH0, rH1, rW10, rW11, 15, 10, 40,
+ 0xffff, 0x5bb1, 0x895d, -10306)
+ R_00_15(rH0, rH1, rH2, rH3, rW12, rW13, 25, 20, 48,
+ 0x6b90, 0x1122, 0xfd98, 0x7193)
+ R_00_15(rH2, rH3, rH0, rH1, rW14, rW15, 15, 10, 56,
+ 0xa679, 0x438e, 0x49b4, 0x0821)
+
+ R_16_31(rH0, rH1, rH2, rH3, rW01, rW06, 27, 23,
+ 0x0d56, 0x6e0c, 0x1810, 0x6d2d)
+ R_16_31(rH2, rH3, rH0, rH1, rW11, rW00, 18, 12,
+ 0x9d02, -32109, 0x124c, 0x2332)
+ R_16_31(rH0, rH1, rH2, rH3, rW05, rW10, 27, 23,
+ 0x8ea7, 0x4a33, 0x0245, -18270)
+ R_16_31(rH2, rH3, rH0, rH1, rW15, rW04, 18, 12,
+ 0x8eee, -8608, 0xf258, -5095)
+ R_16_31(rH0, rH1, rH2, rH3, rW09, rW14, 27, 23,
+ 0x969d, -10697, 0x1cbe, -15288)
+ R_16_31(rH2, rH3, rH0, rH1, rW03, rW08, 18, 12,
+ 0x3317, 0x3e99, 0xdbd9, 0x7c15)
+ R_16_31(rH0, rH1, rH2, rH3, rW13, rW02, 27, 23,
+ 0xac4b, 0x7772, 0xd8cf, 0x331d)
+ R_16_31(rH2, rH3, rH0, rH1, rW07, rW12, 18, 12,
+ 0x6a28, 0x6dd8, 0x219a, 0x3b68)
+
+ R_32_47(rH0, rH1, rH2, rH3, rW05, rW08, 28, 21,
+ 0x29cb, 0x28e5, 0x4218, -7788)
+ R_32_47(rH2, rH3, rH0, rH1, rW11, rW14, 16, 9,
+ 0x473f, 0x06d1, 0x3aae, 0x3036)
+ R_32_47(rH0, rH1, rH2, rH3, rW01, rW04, 28, 21,
+ 0xaea1, -15134, 0x640b, -11295)
+ R_32_47(rH2, rH3, rH0, rH1, rW07, rW10, 16, 9,
+ 0x8f4c, 0x4887, 0xbc7c, -22499)
+ R_32_47(rH0, rH1, rH2, rH3, rW13, rW00, 28, 21,
+ 0x7eb8, -27199, 0x00ea, 0x6050)
+ R_32_47(rH2, rH3, rH0, rH1, rW03, rW06, 16, 9,
+ 0xe01a, 0x22fe, 0x4447, 0x69c5)
+ R_32_47(rH0, rH1, rH2, rH3, rW09, rW12, 28, 21,
+ 0xb7f3, 0x0253, 0x59b1, 0x4d5b)
+ R_32_47(rH2, rH3, rH0, rH1, rW15, rW02, 16, 9,
+ 0x4701, -27017, 0xc7bd, -19859)
+
+ R_48_63(rH0, rH1, rH2, rH3, rW00, rW07, 26, 22,
+ 0x0988, -1462, 0x4c70, -19401)
+ R_48_63(rH2, rH3, rH0, rH1, rW14, rW05, 17, 11,
+ 0xadaf, -5221, 0xfc99, 0x66f7)
+ R_48_63(rH0, rH1, rH2, rH3, rW12, rW03, 26, 22,
+ 0x7e80, -16418, 0xba1e, -25587)
+ R_48_63(rH2, rH3, rH0, rH1, rW10, rW01, 17, 11,
+ 0x4130, 0x380d, 0xe0c5, 0x738d)
+ lwz rW00,0(rHP)
+ R_48_63(rH0, rH1, rH2, rH3, rW08, rW15, 26, 22,
+ 0xe837, -30770, 0xde8a, 0x69e8)
+ lwz rW14,4(rHP)
+ R_48_63(rH2, rH3, rH0, rH1, rW06, rW13, 17, 11,
+ 0x9e79, 0x260f, 0x256d, -27941)
+ lwz rW12,8(rHP)
+ R_48_63(rH0, rH1, rH2, rH3, rW04, rW11, 26, 22,
+ 0xab75, -20775, 0x4f9e, -28397)
+ lwz rW10,12(rHP)
+ R_48_63(rH2, rH3, rH0, rH1, rW02, rW09, 17, 11,
+ 0x662b, 0x7c56, 0x11b2, 0x0358)
+
+ add rH0,rH0,rW00
+ stw rH0,0(rHP)
+ add rH1,rH1,rW14
+ stw rH1,4(rHP)
+ add rH2,rH2,rW12
+ stw rH2,8(rHP)
+ add rH3,rH3,rW10
+ stw rH3,12(rHP)
+ NEXT_BLOCK
+
+ bdnz ppc_md5_main
+
+ FINALIZE
+ blr
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
new file mode 100644
index 000000000000..452fb4dc575f
--- /dev/null
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -0,0 +1,165 @@
+/*
+ * Glue code for MD5 implementation for PPC assembler
+ *
+ * Based on generic implementation.
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/md5.h>
+#include <asm/byteorder.h>
+
+extern void ppc_md5_transform(u32 *state, const u8 *src, u32 blocks);
+
+static inline void ppc_md5_clear_context(struct md5_state *sctx)
+{
+ int count = sizeof(struct md5_state) >> 2;
+ u32 *ptr = (u32 *)sctx;
+
+ /* make sure we can clear the fast way */
+ BUILD_BUG_ON(sizeof(struct md5_state) % 4);
+ do { *ptr++ = 0; } while (--count);
+}
+
+static int ppc_md5_init(struct shash_desc *desc)
+{
+ struct md5_state *sctx = shash_desc_ctx(desc);
+
+ sctx->hash[0] = 0x67452301;
+ sctx->hash[1] = 0xefcdab89;
+ sctx->hash[2] = 0x98badcfe;
+ sctx->hash[3] = 0x10325476;
+ sctx->byte_count = 0;
+
+ return 0;
+}
+
+static int ppc_md5_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct md5_state *sctx = shash_desc_ctx(desc);
+ const unsigned int offset = sctx->byte_count & 0x3f;
+ unsigned int avail = 64 - offset;
+ const u8 *src = data;
+
+ sctx->byte_count += len;
+
+ if (avail > len) {
+ memcpy((char *)sctx->block + offset, src, len);
+ return 0;
+ }
+
+ if (offset) {
+ memcpy((char *)sctx->block + offset, src, avail);
+ ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1);
+ len -= avail;
+ src += avail;
+ }
+
+ if (len > 63) {
+ ppc_md5_transform(sctx->hash, src, len >> 6);
+ src += len & ~0x3f;
+ len &= 0x3f;
+ }
+
+ memcpy((char *)sctx->block, src, len);
+ return 0;
+}
+
+static int ppc_md5_final(struct shash_desc *desc, u8 *out)
+{
+ struct md5_state *sctx = shash_desc_ctx(desc);
+ const unsigned int offset = sctx->byte_count & 0x3f;
+ const u8 *src = (const u8 *)sctx->block;
+ u8 *p = (u8 *)src + offset;
+ int padlen = 55 - offset;
+ __le64 *pbits = (__le64 *)((char *)sctx->block + 56);
+ __le32 *dst = (__le32 *)out;
+
+ *p++ = 0x80;
+
+ if (padlen < 0) {
+ memset(p, 0x00, padlen + sizeof (u64));
+ ppc_md5_transform(sctx->hash, src, 1);
+ p = (char *)sctx->block;
+ padlen = 56;
+ }
+
+ memset(p, 0, padlen);
+ *pbits = cpu_to_le64(sctx->byte_count << 3);
+ ppc_md5_transform(sctx->hash, src, 1);
+
+ dst[0] = cpu_to_le32(sctx->hash[0]);
+ dst[1] = cpu_to_le32(sctx->hash[1]);
+ dst[2] = cpu_to_le32(sctx->hash[2]);
+ dst[3] = cpu_to_le32(sctx->hash[3]);
+
+ ppc_md5_clear_context(sctx);
+ return 0;
+}
+
+static int ppc_md5_export(struct shash_desc *desc, void *out)
+{
+ struct md5_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int ppc_md5_import(struct shash_desc *desc, const void *in)
+{
+ struct md5_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg alg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .init = ppc_md5_init,
+ .update = ppc_md5_update,
+ .final = ppc_md5_final,
+ .export = ppc_md5_export,
+ .import = ppc_md5_import,
+ .descsize = sizeof(struct md5_state),
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name= "md5-ppc",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init ppc_md5_mod_init(void)
+{
+ return crypto_register_shash(&alg);
+}
+
+static void __exit ppc_md5_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(ppc_md5_mod_init);
+module_exit(ppc_md5_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, PPC assembler");
+
+MODULE_ALIAS_CRYPTO("md5");
+MODULE_ALIAS_CRYPTO("md5-ppc");
diff --git a/arch/powerpc/crypto/sha1-spe-asm.S b/arch/powerpc/crypto/sha1-spe-asm.S
new file mode 100644
index 000000000000..fcb6cf002889
--- /dev/null
+++ b/arch/powerpc/crypto/sha1-spe-asm.S
@@ -0,0 +1,299 @@
+/*
+ * Fast SHA-1 implementation for SPE instruction set (PPC)
+ *
+ * This code makes use of the SPE SIMD instruction set as defined in
+ * http://cache.freescale.com/files/32bit/doc/ref_manual/SPEPIM.pdf
+ * Implementation is based on optimization guide notes from
+ * http://cache.freescale.com/files/32bit/doc/app_note/AN2665.pdf
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+#define rHP r3 /* pointer to hash value */
+#define rWP r4 /* pointer to input */
+#define rKP r5 /* pointer to constants */
+
+#define rW0 r14 /* 64 bit round words */
+#define rW1 r15
+#define rW2 r16
+#define rW3 r17
+#define rW4 r18
+#define rW5 r19
+#define rW6 r20
+#define rW7 r21
+
+#define rH0 r6 /* 32 bit hash values */
+#define rH1 r7
+#define rH2 r8
+#define rH3 r9
+#define rH4 r10
+
+#define rT0 r22 /* 64 bit temporary */
+#define rT1 r0 /* 32 bit temporaries */
+#define rT2 r11
+#define rT3 r12
+
+#define rK r23 /* 64 bit constant in volatile register */
+
+#define LOAD_K01
+
+#define LOAD_K11 \
+ evlwwsplat rK,0(rKP);
+
+#define LOAD_K21 \
+ evlwwsplat rK,4(rKP);
+
+#define LOAD_K31 \
+ evlwwsplat rK,8(rKP);
+
+#define LOAD_K41 \
+ evlwwsplat rK,12(rKP);
+
+#define INITIALIZE \
+ stwu r1,-128(r1); /* create stack frame */ \
+ evstdw r14,8(r1); /* We must save non volatile */ \
+ evstdw r15,16(r1); /* registers. Take the chance */ \
+ evstdw r16,24(r1); /* and save the SPE part too */ \
+ evstdw r17,32(r1); \
+ evstdw r18,40(r1); \
+ evstdw r19,48(r1); \
+ evstdw r20,56(r1); \
+ evstdw r21,64(r1); \
+ evstdw r22,72(r1); \
+ evstdw r23,80(r1);
+
+
+#define FINALIZE \
+ evldw r14,8(r1); /* restore SPE registers */ \
+ evldw r15,16(r1); \
+ evldw r16,24(r1); \
+ evldw r17,32(r1); \
+ evldw r18,40(r1); \
+ evldw r19,48(r1); \
+ evldw r20,56(r1); \
+ evldw r21,64(r1); \
+ evldw r22,72(r1); \
+ evldw r23,80(r1); \
+ xor r0,r0,r0; \
+ stw r0,8(r1); /* Delete sensitive data */ \
+ stw r0,16(r1); /* that we might have pushed */ \
+ stw r0,24(r1); /* from other context that runs */ \
+ stw r0,32(r1); /* the same code. Assume that */ \
+ stw r0,40(r1); /* the lower part of the GPRs */ \
+ stw r0,48(r1); /* were already overwritten on */ \
+ stw r0,56(r1); /* the way down to here */ \
+ stw r0,64(r1); \
+ stw r0,72(r1); \
+ stw r0,80(r1); \
+ addi r1,r1,128; /* cleanup stack frame */
+
+#ifdef __BIG_ENDIAN__
+#define LOAD_DATA(reg, off) \
+ lwz reg,off(rWP); /* load data */
+#define NEXT_BLOCK \
+ addi rWP,rWP,64; /* increment per block */
+#else
+#define LOAD_DATA(reg, off) \
+ lwbrx reg,0,rWP; /* load data */ \
+ addi rWP,rWP,4; /* increment per word */
+#define NEXT_BLOCK /* nothing to do */
+#endif
+
+#define R_00_15(a, b, c, d, e, w0, w1, k, off) \
+ LOAD_DATA(w0, off) /* 1: W */ \
+ and rT2,b,c; /* 1: F' = B and C */ \
+ LOAD_K##k##1 \
+ andc rT1,d,b; /* 1: F" = ~B and D */ \
+ rotrwi rT0,a,27; /* 1: A' = A rotl 5 */ \
+ or rT2,rT2,rT1; /* 1: F = F' or F" */ \
+ add e,e,rT0; /* 1: E = E + A' */ \
+ rotrwi b,b,2; /* 1: B = B rotl 30 */ \
+ add e,e,w0; /* 1: E = E + W */ \
+ LOAD_DATA(w1, off+4) /* 2: W */ \
+ add e,e,rT2; /* 1: E = E + F */ \
+ and rT1,a,b; /* 2: F' = B and C */ \
+ add e,e,rK; /* 1: E = E + K */ \
+ andc rT2,c,a; /* 2: F" = ~B and D */ \
+ add d,d,rK; /* 2: E = E + K */ \
+ or rT2,rT2,rT1; /* 2: F = F' or F" */ \
+ rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \
+ add d,d,w1; /* 2: E = E + W */ \
+ rotrwi a,a,2; /* 2: B = B rotl 30 */ \
+ add d,d,rT0; /* 2: E = E + A' */ \
+ evmergelo w1,w1,w0; /* mix W[0]/W[1] */ \
+ add d,d,rT2 /* 2: E = E + F */
+
+#define R_16_19(a, b, c, d, e, w0, w1, w4, w6, w7, k) \
+ and rT2,b,c; /* 1: F' = B and C */ \
+ evmergelohi rT0,w7,w6; /* W[-3] */ \
+ andc rT1,d,b; /* 1: F" = ~B and D */ \
+ evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \
+ or rT1,rT1,rT2; /* 1: F = F' or F" */ \
+ evxor w0,w0,w4; /* W = W xor W[-8] */ \
+ add e,e,rT1; /* 1: E = E + F */ \
+ evxor w0,w0,w1; /* W = W xor W[-14] */ \
+ rotrwi rT2,a,27; /* 1: A' = A rotl 5 */ \
+ evrlwi w0,w0,1; /* W = W rotl 1 */ \
+ add e,e,rT2; /* 1: E = E + A' */ \
+ evaddw rT0,w0,rK; /* WK = W + K */ \
+ rotrwi b,b,2; /* 1: B = B rotl 30 */ \
+ LOAD_K##k##1 \
+ evmergehi rT1,rT1,rT0; /* WK1/WK2 */ \
+ add e,e,rT0; /* 1: E = E + WK */ \
+ add d,d,rT1; /* 2: E = E + WK */ \
+ and rT2,a,b; /* 2: F' = B and C */ \
+ andc rT1,c,a; /* 2: F" = ~B and D */ \
+ rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \
+ or rT1,rT1,rT2; /* 2: F = F' or F" */ \
+ add d,d,rT0; /* 2: E = E + A' */ \
+ rotrwi a,a,2; /* 2: B = B rotl 30 */ \
+ add d,d,rT1 /* 2: E = E + F */
+
+#define R_20_39(a, b, c, d, e, w0, w1, w4, w6, w7, k) \
+ evmergelohi rT0,w7,w6; /* W[-3] */ \
+ xor rT2,b,c; /* 1: F' = B xor C */ \
+ evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \
+ xor rT2,rT2,d; /* 1: F = F' xor D */ \
+ evxor w0,w0,w4; /* W = W xor W[-8] */ \
+ add e,e,rT2; /* 1: E = E + F */ \
+ evxor w0,w0,w1; /* W = W xor W[-14] */ \
+ rotrwi rT2,a,27; /* 1: A' = A rotl 5 */ \
+ evrlwi w0,w0,1; /* W = W rotl 1 */ \
+ add e,e,rT2; /* 1: E = E + A' */ \
+ evaddw rT0,w0,rK; /* WK = W + K */ \
+ rotrwi b,b,2; /* 1: B = B rotl 30 */ \
+ LOAD_K##k##1 \
+ evmergehi rT1,rT1,rT0; /* WK1/WK2 */ \
+ add e,e,rT0; /* 1: E = E + WK */ \
+ xor rT2,a,b; /* 2: F' = B xor C */ \
+ add d,d,rT1; /* 2: E = E + WK */ \
+ xor rT2,rT2,c; /* 2: F = F' xor D */ \
+ rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \
+ add d,d,rT2; /* 2: E = E + F */ \
+ rotrwi a,a,2; /* 2: B = B rotl 30 */ \
+ add d,d,rT0 /* 2: E = E + A' */
+
+#define R_40_59(a, b, c, d, e, w0, w1, w4, w6, w7, k) \
+ and rT2,b,c; /* 1: F' = B and C */ \
+ evmergelohi rT0,w7,w6; /* W[-3] */ \
+ or rT1,b,c; /* 1: F" = B or C */ \
+ evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \
+ and rT1,d,rT1; /* 1: F" = F" and D */ \
+ evxor w0,w0,w4; /* W = W xor W[-8] */ \
+ or rT2,rT2,rT1; /* 1: F = F' or F" */ \
+ evxor w0,w0,w1; /* W = W xor W[-14] */ \
+ add e,e,rT2; /* 1: E = E + F */ \
+ evrlwi w0,w0,1; /* W = W rotl 1 */ \
+ rotrwi rT2,a,27; /* 1: A' = A rotl 5 */ \
+ evaddw rT0,w0,rK; /* WK = W + K */ \
+ add e,e,rT2; /* 1: E = E + A' */ \
+ LOAD_K##k##1 \
+ evmergehi rT1,rT1,rT0; /* WK1/WK2 */ \
+ rotrwi b,b,2; /* 1: B = B rotl 30 */ \
+ add e,e,rT0; /* 1: E = E + WK */ \
+ and rT2,a,b; /* 2: F' = B and C */ \
+ or rT0,a,b; /* 2: F" = B or C */ \
+ add d,d,rT1; /* 2: E = E + WK */ \
+ and rT0,c,rT0; /* 2: F" = F" and D */ \
+ rotrwi a,a,2; /* 2: B = B rotl 30 */ \
+ or rT2,rT2,rT0; /* 2: F = F' or F" */ \
+ rotrwi rT0,e,27; /* 2: A' = A rotl 5 */ \
+ add d,d,rT2; /* 2: E = E + F */ \
+ add d,d,rT0 /* 2: E = E + A' */
+
+#define R_60_79(a, b, c, d, e, w0, w1, w4, w6, w7, k) \
+ R_20_39(a, b, c, d, e, w0, w1, w4, w6, w7, k)
+
+_GLOBAL(ppc_spe_sha1_transform)
+ INITIALIZE
+
+ lwz rH0,0(rHP)
+ lwz rH1,4(rHP)
+ mtctr r5
+ lwz rH2,8(rHP)
+ lis rKP,PPC_SPE_SHA1_K@h
+ lwz rH3,12(rHP)
+ ori rKP,rKP,PPC_SPE_SHA1_K@l
+ lwz rH4,16(rHP)
+
+ppc_spe_sha1_main:
+ R_00_15(rH0, rH1, rH2, rH3, rH4, rW1, rW0, 1, 0)
+ R_00_15(rH3, rH4, rH0, rH1, rH2, rW2, rW1, 0, 8)
+ R_00_15(rH1, rH2, rH3, rH4, rH0, rW3, rW2, 0, 16)
+ R_00_15(rH4, rH0, rH1, rH2, rH3, rW4, rW3, 0, 24)
+ R_00_15(rH2, rH3, rH4, rH0, rH1, rW5, rW4, 0, 32)
+ R_00_15(rH0, rH1, rH2, rH3, rH4, rW6, rW5, 0, 40)
+ R_00_15(rH3, rH4, rH0, rH1, rH2, rT3, rW6, 0, 48)
+ R_00_15(rH1, rH2, rH3, rH4, rH0, rT3, rW7, 0, 56)
+
+ R_16_19(rH4, rH0, rH1, rH2, rH3, rW0, rW1, rW4, rW6, rW7, 0)
+ R_16_19(rH2, rH3, rH4, rH0, rH1, rW1, rW2, rW5, rW7, rW0, 2)
+
+ R_20_39(rH0, rH1, rH2, rH3, rH4, rW2, rW3, rW6, rW0, rW1, 0)
+ R_20_39(rH3, rH4, rH0, rH1, rH2, rW3, rW4, rW7, rW1, rW2, 0)
+ R_20_39(rH1, rH2, rH3, rH4, rH0, rW4, rW5, rW0, rW2, rW3, 0)
+ R_20_39(rH4, rH0, rH1, rH2, rH3, rW5, rW6, rW1, rW3, rW4, 0)
+ R_20_39(rH2, rH3, rH4, rH0, rH1, rW6, rW7, rW2, rW4, rW5, 0)
+ R_20_39(rH0, rH1, rH2, rH3, rH4, rW7, rW0, rW3, rW5, rW6, 0)
+ R_20_39(rH3, rH4, rH0, rH1, rH2, rW0, rW1, rW4, rW6, rW7, 0)
+ R_20_39(rH1, rH2, rH3, rH4, rH0, rW1, rW2, rW5, rW7, rW0, 0)
+ R_20_39(rH4, rH0, rH1, rH2, rH3, rW2, rW3, rW6, rW0, rW1, 0)
+ R_20_39(rH2, rH3, rH4, rH0, rH1, rW3, rW4, rW7, rW1, rW2, 3)
+
+ R_40_59(rH0, rH1, rH2, rH3, rH4, rW4, rW5, rW0, rW2, rW3, 0)
+ R_40_59(rH3, rH4, rH0, rH1, rH2, rW5, rW6, rW1, rW3, rW4, 0)
+ R_40_59(rH1, rH2, rH3, rH4, rH0, rW6, rW7, rW2, rW4, rW5, 0)
+ R_40_59(rH4, rH0, rH1, rH2, rH3, rW7, rW0, rW3, rW5, rW6, 0)
+ R_40_59(rH2, rH3, rH4, rH0, rH1, rW0, rW1, rW4, rW6, rW7, 0)
+ R_40_59(rH0, rH1, rH2, rH3, rH4, rW1, rW2, rW5, rW7, rW0, 0)
+ R_40_59(rH3, rH4, rH0, rH1, rH2, rW2, rW3, rW6, rW0, rW1, 0)
+ R_40_59(rH1, rH2, rH3, rH4, rH0, rW3, rW4, rW7, rW1, rW2, 0)
+ R_40_59(rH4, rH0, rH1, rH2, rH3, rW4, rW5, rW0, rW2, rW3, 0)
+ R_40_59(rH2, rH3, rH4, rH0, rH1, rW5, rW6, rW1, rW3, rW4, 4)
+
+ R_60_79(rH0, rH1, rH2, rH3, rH4, rW6, rW7, rW2, rW4, rW5, 0)
+ R_60_79(rH3, rH4, rH0, rH1, rH2, rW7, rW0, rW3, rW5, rW6, 0)
+ R_60_79(rH1, rH2, rH3, rH4, rH0, rW0, rW1, rW4, rW6, rW7, 0)
+ R_60_79(rH4, rH0, rH1, rH2, rH3, rW1, rW2, rW5, rW7, rW0, 0)
+ R_60_79(rH2, rH3, rH4, rH0, rH1, rW2, rW3, rW6, rW0, rW1, 0)
+ R_60_79(rH0, rH1, rH2, rH3, rH4, rW3, rW4, rW7, rW1, rW2, 0)
+ R_60_79(rH3, rH4, rH0, rH1, rH2, rW4, rW5, rW0, rW2, rW3, 0)
+ lwz rT3,0(rHP)
+ R_60_79(rH1, rH2, rH3, rH4, rH0, rW5, rW6, rW1, rW3, rW4, 0)
+ lwz rW1,4(rHP)
+ R_60_79(rH4, rH0, rH1, rH2, rH3, rW6, rW7, rW2, rW4, rW5, 0)
+ lwz rW2,8(rHP)
+ R_60_79(rH2, rH3, rH4, rH0, rH1, rW7, rW0, rW3, rW5, rW6, 0)
+ lwz rW3,12(rHP)
+ NEXT_BLOCK
+ lwz rW4,16(rHP)
+
+ add rH0,rH0,rT3
+ stw rH0,0(rHP)
+ add rH1,rH1,rW1
+ stw rH1,4(rHP)
+ add rH2,rH2,rW2
+ stw rH2,8(rHP)
+ add rH3,rH3,rW3
+ stw rH3,12(rHP)
+ add rH4,rH4,rW4
+ stw rH4,16(rHP)
+
+ bdnz ppc_spe_sha1_main
+
+ FINALIZE
+ blr
+
+.data
+.align 4
+PPC_SPE_SHA1_K:
+ .long 0x5A827999,0x6ED9EBA1,0x8F1BBCDC,0xCA62C1D6
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
new file mode 100644
index 000000000000..3e1d22212521
--- /dev/null
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -0,0 +1,210 @@
+/*
+ * Glue code for SHA-1 implementation for SPE instructions (PPC)
+ *
+ * Based on generic implementation.
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/sha.h>
+#include <asm/byteorder.h>
+#include <asm/switch_to.h>
+#include <linux/hardirq.h>
+
+/*
+ * MAX_BYTES defines the number of bytes that are allowed to be processed
+ * between preempt_disable() and preempt_enable(). SHA1 takes ~1000
+ * operations per 64 bytes. e500 cores can issue two arithmetic instructions
+ * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2).
+ * Thus 2KB of input data will need an estimated maximum of 18,000 cycles.
+ * Headroom for cache misses included. Even with the low end model clocked
+ * at 667 MHz this equals to a critical time window of less than 27us.
+ *
+ */
+#define MAX_BYTES 2048
+
+extern void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks);
+
+static void spe_begin(void)
+{
+ /* We just start SPE operations and will save SPE registers later. */
+ preempt_disable();
+ enable_kernel_spe();
+}
+
+static void spe_end(void)
+{
+ /* reenable preemption */
+ preempt_enable();
+}
+
+static inline void ppc_sha1_clear_context(struct sha1_state *sctx)
+{
+ int count = sizeof(struct sha1_state) >> 2;
+ u32 *ptr = (u32 *)sctx;
+
+ /* make sure we can clear the fast way */
+ BUILD_BUG_ON(sizeof(struct sha1_state) % 4);
+ do { *ptr++ = 0; } while (--count);
+}
+
+static int ppc_spe_sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA1_H0;
+ sctx->state[1] = SHA1_H1;
+ sctx->state[2] = SHA1_H2;
+ sctx->state[3] = SHA1_H3;
+ sctx->state[4] = SHA1_H4;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ const unsigned int offset = sctx->count & 0x3f;
+ const unsigned int avail = 64 - offset;
+ unsigned int bytes;
+ const u8 *src = data;
+
+ if (avail > len) {
+ sctx->count += len;
+ memcpy((char *)sctx->buffer + offset, src, len);
+ return 0;
+ }
+
+ sctx->count += len;
+
+ if (offset) {
+ memcpy((char *)sctx->buffer + offset, src, avail);
+
+ spe_begin();
+ ppc_spe_sha1_transform(sctx->state, (const u8 *)sctx->buffer, 1);
+ spe_end();
+
+ len -= avail;
+ src += avail;
+ }
+
+ while (len > 63) {
+ bytes = (len > MAX_BYTES) ? MAX_BYTES : len;
+ bytes = bytes & ~0x3f;
+
+ spe_begin();
+ ppc_spe_sha1_transform(sctx->state, src, bytes >> 6);
+ spe_end();
+
+ src += bytes;
+ len -= bytes;
+ };
+
+ memcpy((char *)sctx->buffer, src, len);
+ return 0;
+}
+
+static int ppc_spe_sha1_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ const unsigned int offset = sctx->count & 0x3f;
+ char *p = (char *)sctx->buffer + offset;
+ int padlen;
+ __be64 *pbits = (__be64 *)(((char *)&sctx->buffer) + 56);
+ __be32 *dst = (__be32 *)out;
+
+ padlen = 55 - offset;
+ *p++ = 0x80;
+
+ spe_begin();
+
+ if (padlen < 0) {
+ memset(p, 0x00, padlen + sizeof (u64));
+ ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
+ p = (char *)sctx->buffer;
+ padlen = 56;
+ }
+
+ memset(p, 0, padlen);
+ *pbits = cpu_to_be64(sctx->count << 3);
+ ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
+
+ spe_end();
+
+ dst[0] = cpu_to_be32(sctx->state[0]);
+ dst[1] = cpu_to_be32(sctx->state[1]);
+ dst[2] = cpu_to_be32(sctx->state[2]);
+ dst[3] = cpu_to_be32(sctx->state[3]);
+ dst[4] = cpu_to_be32(sctx->state[4]);
+
+ ppc_sha1_clear_context(sctx);
+ return 0;
+}
+
+static int ppc_spe_sha1_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int ppc_spe_sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg alg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = ppc_spe_sha1_init,
+ .update = ppc_spe_sha1_update,
+ .final = ppc_spe_sha1_final,
+ .export = ppc_spe_sha1_export,
+ .import = ppc_spe_sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name= "sha1-ppc-spe",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init ppc_spe_sha1_mod_init(void)
+{
+ return crypto_register_shash(&alg);
+}
+
+static void __exit ppc_spe_sha1_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(ppc_spe_sha1_mod_init);
+module_exit(ppc_spe_sha1_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, SPE optimized");
+
+MODULE_ALIAS_CRYPTO("sha1");
+MODULE_ALIAS_CRYPTO("sha1-ppc-spe");
diff --git a/arch/powerpc/crypto/sha256-spe-asm.S b/arch/powerpc/crypto/sha256-spe-asm.S
new file mode 100644
index 000000000000..2d10e4c08f03
--- /dev/null
+++ b/arch/powerpc/crypto/sha256-spe-asm.S
@@ -0,0 +1,323 @@
+/*
+ * Fast SHA-256 implementation for SPE instruction set (PPC)
+ *
+ * This code makes use of the SPE SIMD instruction set as defined in
+ * http://cache.freescale.com/files/32bit/doc/ref_manual/SPEPIM.pdf
+ * Implementation is based on optimization guide notes from
+ * http://cache.freescale.com/files/32bit/doc/app_note/AN2665.pdf
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+#define rHP r3 /* pointer to hash values in memory */
+#define rKP r24 /* pointer to round constants */
+#define rWP r4 /* pointer to input data */
+
+#define rH0 r5 /* 8 32 bit hash values in 8 registers */
+#define rH1 r6
+#define rH2 r7
+#define rH3 r8
+#define rH4 r9
+#define rH5 r10
+#define rH6 r11
+#define rH7 r12
+
+#define rW0 r14 /* 64 bit registers. 16 words in 8 registers */
+#define rW1 r15
+#define rW2 r16
+#define rW3 r17
+#define rW4 r18
+#define rW5 r19
+#define rW6 r20
+#define rW7 r21
+
+#define rT0 r22 /* 64 bit temporaries */
+#define rT1 r23
+#define rT2 r0 /* 32 bit temporaries */
+#define rT3 r25
+
+#define CMP_KN_LOOP
+#define CMP_KC_LOOP \
+ cmpwi rT1,0;
+
+#define INITIALIZE \
+ stwu r1,-128(r1); /* create stack frame */ \
+ evstdw r14,8(r1); /* We must save non volatile */ \
+ evstdw r15,16(r1); /* registers. Take the chance */ \
+ evstdw r16,24(r1); /* and save the SPE part too */ \
+ evstdw r17,32(r1); \
+ evstdw r18,40(r1); \
+ evstdw r19,48(r1); \
+ evstdw r20,56(r1); \
+ evstdw r21,64(r1); \
+ evstdw r22,72(r1); \
+ evstdw r23,80(r1); \
+ stw r24,88(r1); /* save normal registers */ \
+ stw r25,92(r1);
+
+
+#define FINALIZE \
+ evldw r14,8(r1); /* restore SPE registers */ \
+ evldw r15,16(r1); \
+ evldw r16,24(r1); \
+ evldw r17,32(r1); \
+ evldw r18,40(r1); \
+ evldw r19,48(r1); \
+ evldw r20,56(r1); \
+ evldw r21,64(r1); \
+ evldw r22,72(r1); \
+ evldw r23,80(r1); \
+ lwz r24,88(r1); /* restore normal registers */ \
+ lwz r25,92(r1); \
+ xor r0,r0,r0; \
+ stw r0,8(r1); /* Delete sensitive data */ \
+ stw r0,16(r1); /* that we might have pushed */ \
+ stw r0,24(r1); /* from other context that runs */ \
+ stw r0,32(r1); /* the same code. Assume that */ \
+ stw r0,40(r1); /* the lower part of the GPRs */ \
+ stw r0,48(r1); /* was already overwritten on */ \
+ stw r0,56(r1); /* the way down to here */ \
+ stw r0,64(r1); \
+ stw r0,72(r1); \
+ stw r0,80(r1); \
+ addi r1,r1,128; /* cleanup stack frame */
+
+#ifdef __BIG_ENDIAN__
+#define LOAD_DATA(reg, off) \
+ lwz reg,off(rWP); /* load data */
+#define NEXT_BLOCK \
+ addi rWP,rWP,64; /* increment per block */
+#else
+#define LOAD_DATA(reg, off) \
+ lwbrx reg,0,rWP; /* load data */ \
+ addi rWP,rWP,4; /* increment per word */
+#define NEXT_BLOCK /* nothing to do */
+#endif
+
+#define R_LOAD_W(a, b, c, d, e, f, g, h, w, off) \
+ LOAD_DATA(w, off) /* 1: W */ \
+ rotrwi rT0,e,6; /* 1: S1 = e rotr 6 */ \
+ rotrwi rT1,e,11; /* 1: S1' = e rotr 11 */ \
+ rotrwi rT2,e,25; /* 1: S1" = e rotr 25 */ \
+ xor rT0,rT0,rT1; /* 1: S1 = S1 xor S1' */ \
+ and rT3,e,f; /* 1: ch = e and f */ \
+ xor rT0,rT0,rT2; /* 1: S1 = S1 xor S1" */ \
+ andc rT1,g,e; /* 1: ch' = ~e and g */ \
+ lwz rT2,off(rKP); /* 1: K */ \
+ xor rT3,rT3,rT1; /* 1: ch = ch xor ch' */ \
+ add h,h,rT0; /* 1: temp1 = h + S1 */ \
+ add rT3,rT3,w; /* 1: temp1' = ch + w */ \
+ rotrwi rT0,a,2; /* 1: S0 = a rotr 2 */ \
+ add h,h,rT3; /* 1: temp1 = temp1 + temp1' */ \
+ rotrwi rT1,a,13; /* 1: S0' = a rotr 13 */ \
+ add h,h,rT2; /* 1: temp1 = temp1 + K */ \
+ rotrwi rT3,a,22; /* 1: S0" = a rotr 22 */ \
+ xor rT0,rT0,rT1; /* 1: S0 = S0 xor S0' */ \
+ add d,d,h; /* 1: d = d + temp1 */ \
+ xor rT3,rT0,rT3; /* 1: S0 = S0 xor S0" */ \
+ evmergelo w,w,w; /* shift W */ \
+ or rT2,a,b; /* 1: maj = a or b */ \
+ and rT1,a,b; /* 1: maj' = a and b */ \
+ and rT2,rT2,c; /* 1: maj = maj and c */ \
+ LOAD_DATA(w, off+4) /* 2: W */ \
+ or rT2,rT1,rT2; /* 1: maj = maj or maj' */ \
+ rotrwi rT0,d,6; /* 2: S1 = e rotr 6 */ \
+ add rT3,rT3,rT2; /* 1: temp2 = S0 + maj */ \
+ rotrwi rT1,d,11; /* 2: S1' = e rotr 11 */ \
+ add h,h,rT3; /* 1: h = temp1 + temp2 */ \
+ rotrwi rT2,d,25; /* 2: S1" = e rotr 25 */ \
+ xor rT0,rT0,rT1; /* 2: S1 = S1 xor S1' */ \
+ and rT3,d,e; /* 2: ch = e and f */ \
+ xor rT0,rT0,rT2; /* 2: S1 = S1 xor S1" */ \
+ andc rT1,f,d; /* 2: ch' = ~e and g */ \
+ lwz rT2,off+4(rKP); /* 2: K */ \
+ xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \
+ add g,g,rT0; /* 2: temp1 = h + S1 */ \
+ add rT3,rT3,w; /* 2: temp1' = ch + w */ \
+ rotrwi rT0,h,2; /* 2: S0 = a rotr 2 */ \
+ add g,g,rT3; /* 2: temp1 = temp1 + temp1' */ \
+ rotrwi rT1,h,13; /* 2: S0' = a rotr 13 */ \
+ add g,g,rT2; /* 2: temp1 = temp1 + K */ \
+ rotrwi rT3,h,22; /* 2: S0" = a rotr 22 */ \
+ xor rT0,rT0,rT1; /* 2: S0 = S0 xor S0' */ \
+ or rT2,h,a; /* 2: maj = a or b */ \
+ xor rT3,rT0,rT3; /* 2: S0 = S0 xor S0" */ \
+ and rT1,h,a; /* 2: maj' = a and b */ \
+ and rT2,rT2,b; /* 2: maj = maj and c */ \
+ add c,c,g; /* 2: d = d + temp1 */ \
+ or rT2,rT1,rT2; /* 2: maj = maj or maj' */ \
+ add rT3,rT3,rT2; /* 2: temp2 = S0 + maj */ \
+ add g,g,rT3 /* 2: h = temp1 + temp2 */
+
+#define R_CALC_W(a, b, c, d, e, f, g, h, w0, w1, w4, w5, w7, k, off) \
+ rotrwi rT2,e,6; /* 1: S1 = e rotr 6 */ \
+ evmergelohi rT0,w0,w1; /* w[-15] */ \
+ rotrwi rT3,e,11; /* 1: S1' = e rotr 11 */ \
+ evsrwiu rT1,rT0,3; /* s0 = w[-15] >> 3 */ \
+ xor rT2,rT2,rT3; /* 1: S1 = S1 xor S1' */ \
+ evrlwi rT0,rT0,25; /* s0' = w[-15] rotr 7 */ \
+ rotrwi rT3,e,25; /* 1: S1' = e rotr 25 */ \
+ evxor rT1,rT1,rT0; /* s0 = s0 xor s0' */ \
+ xor rT2,rT2,rT3; /* 1: S1 = S1 xor S1' */ \
+ evrlwi rT0,rT0,21; /* s0' = w[-15] rotr 18 */ \
+ add h,h,rT2; /* 1: temp1 = h + S1 */ \
+ evxor rT0,rT0,rT1; /* s0 = s0 xor s0' */ \
+ and rT2,e,f; /* 1: ch = e and f */ \
+ evaddw w0,w0,rT0; /* w = w[-16] + s0 */ \
+ andc rT3,g,e; /* 1: ch' = ~e and g */ \
+ evsrwiu rT0,w7,10; /* s1 = w[-2] >> 10 */ \
+ xor rT2,rT2,rT3; /* 1: ch = ch xor ch' */ \
+ evrlwi rT1,w7,15; /* s1' = w[-2] rotr 17 */ \
+ add h,h,rT2; /* 1: temp1 = temp1 + ch */ \
+ evxor rT0,rT0,rT1; /* s1 = s1 xor s1' */ \
+ rotrwi rT2,a,2; /* 1: S0 = a rotr 2 */ \
+ evrlwi rT1,w7,13; /* s1' = w[-2] rotr 19 */ \
+ rotrwi rT3,a,13; /* 1: S0' = a rotr 13 */ \
+ evxor rT0,rT0,rT1; /* s1 = s1 xor s1' */ \
+ xor rT2,rT2,rT3; /* 1: S0 = S0 xor S0' */ \
+ evldw rT1,off(rKP); /* k */ \
+ rotrwi rT3,a,22; /* 1: S0' = a rotr 22 */ \
+ evaddw w0,w0,rT0; /* w = w + s1 */ \
+ xor rT2,rT2,rT3; /* 1: S0 = S0 xor S0' */ \
+ evmergelohi rT0,w4,w5; /* w[-7] */ \
+ and rT3,a,b; /* 1: maj = a and b */ \
+ evaddw w0,w0,rT0; /* w = w + w[-7] */ \
+ CMP_K##k##_LOOP \
+ add rT2,rT2,rT3; /* 1: temp2 = S0 + maj */ \
+ evaddw rT1,rT1,w0; /* wk = w + k */ \
+ xor rT3,a,b; /* 1: maj = a xor b */ \
+ evmergehi rT0,rT1,rT1; /* wk1/wk2 */ \
+ and rT3,rT3,c; /* 1: maj = maj and c */ \
+ add h,h,rT0; /* 1: temp1 = temp1 + wk */ \
+ add rT2,rT2,rT3; /* 1: temp2 = temp2 + maj */ \
+ add g,g,rT1; /* 2: temp1 = temp1 + wk */ \
+ add d,d,h; /* 1: d = d + temp1 */ \
+ rotrwi rT0,d,6; /* 2: S1 = e rotr 6 */ \
+ add h,h,rT2; /* 1: h = temp1 + temp2 */ \
+ rotrwi rT1,d,11; /* 2: S1' = e rotr 11 */ \
+ rotrwi rT2,d,25; /* 2: S" = e rotr 25 */ \
+ xor rT0,rT0,rT1; /* 2: S1 = S1 xor S1' */ \
+ and rT3,d,e; /* 2: ch = e and f */ \
+ xor rT0,rT0,rT2; /* 2: S1 = S1 xor S1" */ \
+ andc rT1,f,d; /* 2: ch' = ~e and g */ \
+ add g,g,rT0; /* 2: temp1 = h + S1 */ \
+ xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \
+ rotrwi rT0,h,2; /* 2: S0 = a rotr 2 */ \
+ add g,g,rT3; /* 2: temp1 = temp1 + ch */ \
+ rotrwi rT1,h,13; /* 2: S0' = a rotr 13 */ \
+ rotrwi rT3,h,22; /* 2: S0" = a rotr 22 */ \
+ xor rT0,rT0,rT1; /* 2: S0 = S0 xor S0' */ \
+ or rT2,h,a; /* 2: maj = a or b */ \
+ and rT1,h,a; /* 2: maj' = a and b */ \
+ and rT2,rT2,b; /* 2: maj = maj and c */ \
+ xor rT3,rT0,rT3; /* 2: S0 = S0 xor S0" */ \
+ or rT2,rT1,rT2; /* 2: maj = maj or maj' */ \
+ add c,c,g; /* 2: d = d + temp1 */ \
+ add rT3,rT3,rT2; /* 2: temp2 = S0 + maj */ \
+ add g,g,rT3 /* 2: h = temp1 + temp2 */
+
+_GLOBAL(ppc_spe_sha256_transform)
+ INITIALIZE
+
+ mtctr r5
+ lwz rH0,0(rHP)
+ lwz rH1,4(rHP)
+ lwz rH2,8(rHP)
+ lwz rH3,12(rHP)
+ lwz rH4,16(rHP)
+ lwz rH5,20(rHP)
+ lwz rH6,24(rHP)
+ lwz rH7,28(rHP)
+
+ppc_spe_sha256_main:
+ lis rKP,PPC_SPE_SHA256_K@ha
+ addi rKP,rKP,PPC_SPE_SHA256_K@l
+
+ R_LOAD_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW0, 0)
+ R_LOAD_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW1, 8)
+ R_LOAD_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW2, 16)
+ R_LOAD_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW3, 24)
+ R_LOAD_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW4, 32)
+ R_LOAD_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW5, 40)
+ R_LOAD_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW6, 48)
+ R_LOAD_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW7, 56)
+ppc_spe_sha256_16_rounds:
+ addi rKP,rKP,64
+ R_CALC_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7,
+ rW0, rW1, rW4, rW5, rW7, N, 0)
+ R_CALC_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5,
+ rW1, rW2, rW5, rW6, rW0, N, 8)
+ R_CALC_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3,
+ rW2, rW3, rW6, rW7, rW1, N, 16)
+ R_CALC_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1,
+ rW3, rW4, rW7, rW0, rW2, N, 24)
+ R_CALC_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7,
+ rW4, rW5, rW0, rW1, rW3, N, 32)
+ R_CALC_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5,
+ rW5, rW6, rW1, rW2, rW4, N, 40)
+ R_CALC_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3,
+ rW6, rW7, rW2, rW3, rW5, N, 48)
+ R_CALC_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1,
+ rW7, rW0, rW3, rW4, rW6, C, 56)
+ bt gt,ppc_spe_sha256_16_rounds
+
+ lwz rW0,0(rHP)
+ NEXT_BLOCK
+ lwz rW1,4(rHP)
+ lwz rW2,8(rHP)
+ lwz rW3,12(rHP)
+ lwz rW4,16(rHP)
+ lwz rW5,20(rHP)
+ lwz rW6,24(rHP)
+ lwz rW7,28(rHP)
+
+ add rH0,rH0,rW0
+ stw rH0,0(rHP)
+ add rH1,rH1,rW1
+ stw rH1,4(rHP)
+ add rH2,rH2,rW2
+ stw rH2,8(rHP)
+ add rH3,rH3,rW3
+ stw rH3,12(rHP)
+ add rH4,rH4,rW4
+ stw rH4,16(rHP)
+ add rH5,rH5,rW5
+ stw rH5,20(rHP)
+ add rH6,rH6,rW6
+ stw rH6,24(rHP)
+ add rH7,rH7,rW7
+ stw rH7,28(rHP)
+
+ bdnz ppc_spe_sha256_main
+
+ FINALIZE
+ blr
+
+.data
+.align 5
+PPC_SPE_SHA256_K:
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
new file mode 100644
index 000000000000..f4a616fe1a82
--- /dev/null
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -0,0 +1,275 @@
+/*
+ * Glue code for SHA-256 implementation for SPE instructions (PPC)
+ *
+ * Based on generic implementation. The assembler module takes care
+ * about the SPE registers so it can run from interrupt context.
+ *
+ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/sha.h>
+#include <asm/byteorder.h>
+#include <asm/switch_to.h>
+#include <linux/hardirq.h>
+
+/*
+ * MAX_BYTES defines the number of bytes that are allowed to be processed
+ * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000
+ * operations per 64 bytes. e500 cores can issue two arithmetic instructions
+ * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2).
+ * Thus 1KB of input data will need an estimated maximum of 18,000 cycles.
+ * Headroom for cache misses included. Even with the low end model clocked
+ * at 667 MHz this equals to a critical time window of less than 27us.
+ *
+ */
+#define MAX_BYTES 1024
+
+extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks);
+
+static void spe_begin(void)
+{
+ /* We just start SPE operations and will save SPE registers later. */
+ preempt_disable();
+ enable_kernel_spe();
+}
+
+static void spe_end(void)
+{
+ /* reenable preemption */
+ preempt_enable();
+}
+
+static inline void ppc_sha256_clear_context(struct sha256_state *sctx)
+{
+ int count = sizeof(struct sha256_state) >> 2;
+ u32 *ptr = (u32 *)sctx;
+
+ /* make sure we can clear the fast way */
+ BUILD_BUG_ON(sizeof(struct sha256_state) % 4);
+ do { *ptr++ = 0; } while (--count);
+}
+
+static int ppc_spe_sha256_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static int ppc_spe_sha224_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ const unsigned int offset = sctx->count & 0x3f;
+ const unsigned int avail = 64 - offset;
+ unsigned int bytes;
+ const u8 *src = data;
+
+ if (avail > len) {
+ sctx->count += len;
+ memcpy((char *)sctx->buf + offset, src, len);
+ return 0;
+ }
+
+ sctx->count += len;
+
+ if (offset) {
+ memcpy((char *)sctx->buf + offset, src, avail);
+
+ spe_begin();
+ ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1);
+ spe_end();
+
+ len -= avail;
+ src += avail;
+ }
+
+ while (len > 63) {
+ /* cut input data into smaller blocks */
+ bytes = (len > MAX_BYTES) ? MAX_BYTES : len;
+ bytes = bytes & ~0x3f;
+
+ spe_begin();
+ ppc_spe_sha256_transform(sctx->state, src, bytes >> 6);
+ spe_end();
+
+ src += bytes;
+ len -= bytes;
+ };
+
+ memcpy((char *)sctx->buf, src, len);
+ return 0;
+}
+
+static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ const unsigned int offset = sctx->count & 0x3f;
+ char *p = (char *)sctx->buf + offset;
+ int padlen;
+ __be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56);
+ __be32 *dst = (__be32 *)out;
+
+ padlen = 55 - offset;
+ *p++ = 0x80;
+
+ spe_begin();
+
+ if (padlen < 0) {
+ memset(p, 0x00, padlen + sizeof (u64));
+ ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);
+ p = (char *)sctx->buf;
+ padlen = 56;
+ }
+
+ memset(p, 0, padlen);
+ *pbits = cpu_to_be64(sctx->count << 3);
+ ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);
+
+ spe_end();
+
+ dst[0] = cpu_to_be32(sctx->state[0]);
+ dst[1] = cpu_to_be32(sctx->state[1]);
+ dst[2] = cpu_to_be32(sctx->state[2]);
+ dst[3] = cpu_to_be32(sctx->state[3]);
+ dst[4] = cpu_to_be32(sctx->state[4]);
+ dst[5] = cpu_to_be32(sctx->state[5]);
+ dst[6] = cpu_to_be32(sctx->state[6]);
+ dst[7] = cpu_to_be32(sctx->state[7]);
+
+ ppc_sha256_clear_context(sctx);
+ return 0;
+}
+
+static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out)
+{
+ u32 D[SHA256_DIGEST_SIZE >> 2];
+ __be32 *dst = (__be32 *)out;
+
+ ppc_spe_sha256_final(desc, (u8 *)D);
+
+ /* avoid bytewise memcpy */
+ dst[0] = D[0];
+ dst[1] = D[1];
+ dst[2] = D[2];
+ dst[3] = D[3];
+ dst[4] = D[4];
+ dst[5] = D[5];
+ dst[6] = D[6];
+
+ /* clear sensitive data */
+ memzero_explicit(D, SHA256_DIGEST_SIZE);
+ return 0;
+}
+
+static int ppc_spe_sha256_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int ppc_spe_sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg algs[2] = { {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = ppc_spe_sha256_init,
+ .update = ppc_spe_sha256_update,
+ .final = ppc_spe_sha256_final,
+ .export = ppc_spe_sha256_export,
+ .import = ppc_spe_sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name= "sha256-ppc-spe",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = ppc_spe_sha224_init,
+ .update = ppc_spe_sha256_update,
+ .final = ppc_spe_sha224_final,
+ .export = ppc_spe_sha256_export,
+ .import = ppc_spe_sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name= "sha224-ppc-spe",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init ppc_spe_sha256_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit ppc_spe_sha256_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_init(ppc_spe_sha256_mod_init);
+module_exit(ppc_spe_sha256_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, SPE optimized");
+
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-ppc-spe");
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-ppc-spe");
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 382b28e364dc..4b87205c230c 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,6 +1,8 @@
-
generic-y += clkdev.h
+generic-y += div64.h
+generic-y += irq_regs.h
generic-y += irq_work.h
+generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index bde531103638..0cc6eedc4780 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -30,8 +30,6 @@ static inline int arch_has_random(void)
return !!ppc_md.get_random_long;
}
-int powernv_get_random_long(unsigned long *v);
-
static inline int arch_get_random_seed_long(unsigned long *v)
{
return 0;
@@ -47,4 +45,13 @@ static inline int arch_has_random_seed(void)
#endif /* CONFIG_ARCH_RANDOM */
+#ifdef CONFIG_PPC_POWERNV
+int powernv_hwrng_present(void);
+int powernv_get_random_long(unsigned long *v);
+int powernv_get_random_real_mode(unsigned long *v);
+#else
+static inline int powernv_hwrng_present(void) { return 0; }
+static inline int powernv_get_random_real_mode(unsigned long *v) { return 0; }
+#endif
+
#endif /* _ASM_POWERPC_ARCHRANDOM_H */
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 21be8ae8f809..dc85dcb891cf 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -23,6 +23,8 @@
#define PPC_STL stringify_in_c(std)
#define PPC_STLU stringify_in_c(stdu)
#define PPC_LCMPI stringify_in_c(cmpdi)
+#define PPC_LCMPLI stringify_in_c(cmpldi)
+#define PPC_LCMP stringify_in_c(cmpd)
#define PPC_LONG stringify_in_c(.llong)
#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
#define PPC_TLNEI stringify_in_c(tdnei)
@@ -52,6 +54,8 @@
#define PPC_STL stringify_in_c(stw)
#define PPC_STLU stringify_in_c(stwu)
#define PPC_LCMPI stringify_in_c(cmpwi)
+#define PPC_LCMPLI stringify_in_c(cmplwi)
+#define PPC_LCMP stringify_in_c(cmpw)
#define PPC_LONG stringify_in_c(.long)
#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
#define PPC_TLNEI stringify_in_c(twnei)
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 34a05a1a990b..0dc42c5082b7 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -76,9 +76,6 @@ extern void _set_L3CR(unsigned long);
#define _set_L3CR(val) do { } while(0)
#endif
-extern void cacheable_memzero(void *p, unsigned int nb);
-extern void *cacheable_memcpy(void *, const void *, unsigned int);
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 5cf5a6d10685..6367b8347dad 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -100,7 +100,7 @@ struct cpu_spec {
/*
* Processor specific routine to flush tlbs.
*/
- void (*flush_tlb)(unsigned long inval_selector);
+ void (*flush_tlb)(unsigned int action);
};
@@ -114,6 +114,12 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
extern const char *powerpc_base_platform;
+/* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */
+enum {
+ TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
+ TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
+};
+
#endif /* __ASSEMBLY__ */
/* CPU kernel features */
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index 4c8ad592ae33..5be6c4753667 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -25,7 +25,7 @@ extern cpumask_t threads_core_mask;
#define threads_per_core 1
#define threads_per_subcore 1
#define threads_shift 0
-#define threads_core_mask (CPU_MASK_CPU0)
+#define threads_core_mask (*get_cpu_mask(0))
#endif
/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
diff --git a/arch/powerpc/include/asm/dbdma.h b/arch/powerpc/include/asm/dbdma.h
index e23f07e73cb3..6c69836b4ec2 100644
--- a/arch/powerpc/include/asm/dbdma.h
+++ b/arch/powerpc/include/asm/dbdma.h
@@ -42,12 +42,12 @@ struct dbdma_regs {
* DBDMA command structure. These fields are all little-endian!
*/
struct dbdma_cmd {
- unsigned short req_count; /* requested byte transfer count */
- unsigned short command; /* command word (has bit-fields) */
- unsigned int phy_addr; /* physical data address */
- unsigned int cmd_dep; /* command-dependent field */
- unsigned short res_count; /* residual count after completion */
- unsigned short xfer_status; /* transfer status */
+ __le16 req_count; /* requested byte transfer count */
+ __le16 command; /* command word (has bit-fields) */
+ __le32 phy_addr; /* physical data address */
+ __le32 cmd_dep; /* command-dependent field */
+ __le16 res_count; /* residual count after completion */
+ __le16 xfer_status; /* transfer status */
};
/* DBDMA command values in command field */
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 7d2e6235726d..4efc11dacb98 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -31,7 +31,7 @@ typedef struct {
static inline bool dcr_map_ok_native(dcr_host_native_t host)
{
- return 1;
+ return true;
}
#define dcr_map_native(dev, dcr_n, dcr_c) \
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 38faeded7d59..9f1371bab5fc 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -8,6 +8,9 @@
struct dma_map_ops;
struct device_node;
+#ifdef CONFIG_PPC64
+struct pci_dn;
+#endif
/*
* Arch extensions to struct device.
@@ -34,6 +37,9 @@ struct dev_archdata {
#ifdef CONFIG_SWIOTLB
dma_addr_t max_direct_dma_addr;
#endif
+#ifdef CONFIG_PPC64
+ struct pci_dn *pci_data;
+#endif
#ifdef CONFIG_EEH
struct eeh_dev *edev;
#endif
diff --git a/arch/powerpc/include/asm/div64.h b/arch/powerpc/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/powerpc/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 894d538f3567..9103687b0436 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -191,11 +191,11 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
struct dev_archdata *sd = &dev->archdata;
if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
- return 0;
+ return false;
#endif
if (!dev->dma_mask)
- return 0;
+ return false;
return addr + size - 1 <= *dev->dma_mask;
}
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 55abfd09e47f..a52db28ecc1e 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -29,7 +29,7 @@
struct pci_dev;
struct pci_bus;
-struct device_node;
+struct pci_dn;
#ifdef CONFIG_EEH
@@ -136,14 +136,14 @@ struct eeh_dev {
struct eeh_pe *pe; /* Associated PE */
struct list_head list; /* Form link list in the PE */
struct pci_controller *phb; /* Associated PHB */
- struct device_node *dn; /* Associated device node */
+ struct pci_dn *pdn; /* Associated PCI device node */
struct pci_dev *pdev; /* Associated PCI device */
struct pci_bus *bus; /* PCI bus for partial hotplug */
};
-static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
+static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev)
{
- return edev ? edev->dn : NULL;
+ return edev ? edev->pdn : NULL;
}
static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
@@ -200,8 +200,7 @@ struct eeh_ops {
char *name;
int (*init)(void);
int (*post_init)(void);
- void* (*of_probe)(struct device_node *dn, void *flag);
- int (*dev_probe)(struct pci_dev *dev, void *flag);
+ void* (*probe)(struct pci_dn *pdn, void *data);
int (*set_option)(struct eeh_pe *pe, int option);
int (*get_pe_addr)(struct eeh_pe *pe);
int (*get_state)(struct eeh_pe *pe, int *state);
@@ -211,10 +210,10 @@ struct eeh_ops {
int (*configure_bridge)(struct eeh_pe *pe);
int (*err_inject)(struct eeh_pe *pe, int type, int func,
unsigned long addr, unsigned long mask);
- int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
- int (*write_config)(struct device_node *dn, int where, int size, u32 val);
+ int (*read_config)(struct pci_dn *pdn, int where, int size, u32 *val);
+ int (*write_config)(struct pci_dn *pdn, int where, int size, u32 val);
int (*next_error)(struct eeh_pe **pe);
- int (*restore_config)(struct device_node *dn);
+ int (*restore_config)(struct pci_dn *pdn);
};
extern int eeh_subsystem_flags;
@@ -272,7 +271,7 @@ void eeh_pe_restore_bars(struct eeh_pe *pe);
const char *eeh_pe_loc_get(struct eeh_pe *pe);
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
-void *eeh_dev_init(struct device_node *dn, void *data);
+void *eeh_dev_init(struct pci_dn *pdn, void *data);
void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
int eeh_init(void);
int __init eeh_ops_register(struct eeh_ops *ops);
@@ -280,8 +279,8 @@ int __exit eeh_ops_unregister(const char *name);
int eeh_check_failure(const volatile void __iomem *token);
int eeh_dev_check_failure(struct eeh_dev *edev);
void eeh_addr_cache_build(void);
-void eeh_add_device_early(struct device_node *);
-void eeh_add_device_tree_early(struct device_node *);
+void eeh_add_device_early(struct pci_dn *);
+void eeh_add_device_tree_early(struct pci_dn *);
void eeh_add_device_late(struct pci_dev *);
void eeh_add_device_tree_late(struct pci_bus *);
void eeh_add_sysfs_files(struct pci_bus *);
@@ -323,7 +322,7 @@ static inline int eeh_init(void)
return 0;
}
-static inline void *eeh_dev_init(struct device_node *dn, void *data)
+static inline void *eeh_dev_init(struct pci_dn *pdn, void *data)
{
return NULL;
}
@@ -339,9 +338,9 @@ static inline int eeh_check_failure(const volatile void __iomem *token)
static inline void eeh_addr_cache_build(void) { }
-static inline void eeh_add_device_early(struct device_node *dn) { }
+static inline void eeh_add_device_early(struct pci_dn *pdn) { }
-static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { }
static inline void eeh_add_device_late(struct pci_dev *dev) { }
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 57d289acb803..ee46ffef608e 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -128,10 +128,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
(0x7ff >> (PAGE_SHIFT - 12)) : \
(0x3ffff >> (PAGE_SHIFT - 12)))
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
-
#ifdef CONFIG_SPU_BASE
/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
#define NT_SPU 1
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 681bc0314b6b..e05808a328db 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -42,7 +42,7 @@
#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
-#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
+/* Free ASM_CONST(0x0000000001000000) */
#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
@@ -75,8 +75,6 @@ enum {
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
- FW_FEATURE_CELLEB_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_BEAT,
- FW_FEATURE_CELLEB_ALWAYS = 0,
FW_FEATURE_NATIVE_POSSIBLE = 0,
FW_FEATURE_NATIVE_ALWAYS = 0,
FW_FEATURE_POSSIBLE =
@@ -89,9 +87,6 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_POSSIBLE |
#endif
-#ifdef CONFIG_PPC_CELLEB
- FW_FEATURE_CELLEB_POSSIBLE |
-#endif
#ifdef CONFIG_PPC_NATIVE
FW_FEATURE_NATIVE_ALWAYS |
#endif
@@ -106,9 +101,6 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_ALWAYS &
#endif
-#ifdef CONFIG_PPC_CELLEB
- FW_FEATURE_CELLEB_ALWAYS &
-#endif
#ifdef CONFIG_PPC_NATIVE
FW_FEATURE_NATIVE_ALWAYS &
#endif
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index f1ea5972f6ec..1e27d6338565 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -29,6 +29,7 @@
#include <linux/bitops.h>
#include <asm/machdep.h>
#include <asm/types.h>
+#include <asm/pci-bridge.h>
#define IOMMU_PAGE_SHIFT_4K 12
#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
@@ -78,6 +79,9 @@ struct iommu_table {
struct iommu_group *it_group;
#endif
void (*set_bypass)(struct iommu_table *tbl, bool enable);
+#ifdef CONFIG_PPC_POWERNV
+ void *data;
+#endif
};
/* Pure 2^n version of get_order */
@@ -169,7 +173,7 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
struct dma_attrs *attrs);
extern void iommu_init_early_pSeries(void);
-extern void iommu_init_early_dart(void);
+extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
extern void iommu_init_early_pasemi(void);
extern void alloc_dart_table(void);
diff --git a/arch/powerpc/include/asm/irq_regs.h b/arch/powerpc/include/asm/irq_regs.h
deleted file mode 100644
index ba94b51a0a70..000000000000
--- a/arch/powerpc/include/asm/irq_regs.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm-generic/irq_regs.h>
-
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 942c7b1678e3..b91e74a817d8 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -106,10 +106,6 @@ struct kvmppc_vcpu_book3s {
spinlock_t mmu_lock;
};
-#define CONTEXT_HOST 0
-#define CONTEXT_GUEST 1
-#define CONTEXT_GUEST_END 2
-
#define VSID_REAL 0x07ffffffffc00000ULL
#define VSID_BAT 0x07ffffffffb00000ULL
#define VSID_64K 0x0800000000000000ULL
@@ -292,6 +288,9 @@ static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
return !is_kvmppc_hv_enabled(vcpu->kvm);
}
+extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
+extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
+
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 2d81e202bdcc..3536d12eb798 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -85,6 +85,20 @@ static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
return old == 0;
}
+static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
+{
+ hpte_v &= ~HPTE_V_HVLOCK;
+ asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
+ hpte[0] = cpu_to_be64(hpte_v);
+}
+
+/* Without barrier */
+static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
+{
+ hpte_v &= ~HPTE_V_HVLOCK;
+ hpte[0] = cpu_to_be64(hpte_v);
+}
+
static inline int __hpte_actual_psize(unsigned int lp, int psize)
{
int i, shift;
@@ -281,40 +295,37 @@ static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
/*
* If it's present and writable, atomically set dirty and referenced bits and
- * return the PTE, otherwise return 0. If we find a transparent hugepage
- * and if it is marked splitting we return 0;
+ * return the PTE, otherwise return 0.
*/
-static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
- unsigned int hugepage)
+static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
{
pte_t old_pte, new_pte = __pte(0);
while (1) {
- old_pte = pte_val(*ptep);
+ /*
+ * Make sure we don't reload from ptep
+ */
+ old_pte = READ_ONCE(*ptep);
/*
* wait until _PAGE_BUSY is clear then set it atomically
*/
- if (unlikely(old_pte & _PAGE_BUSY)) {
+ if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
cpu_relax();
continue;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- /* If hugepage and is trans splitting return None */
- if (unlikely(hugepage &&
- pmd_trans_splitting(pte_pmd(old_pte))))
- return __pte(0);
-#endif
/* If pte is not present return None */
- if (unlikely(!(old_pte & _PAGE_PRESENT)))
+ if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
return __pte(0);
new_pte = pte_mkyoung(old_pte);
if (writing && pte_write(old_pte))
new_pte = pte_mkdirty(new_pte);
- if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
- new_pte))
+ if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
+ pte_val(old_pte),
+ pte_val(new_pte))) {
break;
+ }
}
return new_pte;
}
@@ -335,7 +346,7 @@ static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
{
if (key)
return PP_RWRX <= pp && pp <= PP_RXRX;
- return 1;
+ return true;
}
static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
@@ -373,7 +384,7 @@ static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
if (pagesize <= PAGE_SIZE)
- return 1;
+ return true;
return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
}
@@ -422,6 +433,10 @@ static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
return rcu_dereference_raw_notrace(kvm->memslots);
}
+extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
+
+extern void kvmhv_rm_send_ipi(int cpu);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 8ef05121d3cd..a193a13cf08b 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -227,10 +227,8 @@ struct kvm_arch {
unsigned long host_sdr1;
int tlbie_lock;
unsigned long lpcr;
- unsigned long rmor;
- struct kvm_rma_info *rma;
unsigned long vrma_slb_v;
- int rma_setup_done;
+ int hpte_setup_done;
u32 hpt_order;
atomic_t vcpus_running;
u32 online_vcores;
@@ -239,6 +237,8 @@ struct kvm_arch {
atomic_t hpte_mod_interest;
cpumask_t need_tlb_flush;
int hpt_cma_alloc;
+ struct dentry *debugfs_dir;
+ struct dentry *htab_dentry;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
struct mutex hpt_mutex;
@@ -263,18 +263,15 @@ struct kvm_arch {
/*
* Struct for a virtual core.
- * Note: entry_exit_count combines an entry count in the bottom 8 bits
- * and an exit count in the next 8 bits. This is so that we can
- * atomically increment the entry count iff the exit count is 0
- * without taking the lock.
+ * Note: entry_exit_map combines a bitmap of threads that have entered
+ * in the bottom 8 bits and a bitmap of threads that have exited in the
+ * next 8 bits. This is so that we can atomically set the entry bit
+ * iff the exit map is 0 without taking a lock.
*/
struct kvmppc_vcore {
int n_runnable;
- int n_busy;
int num_threads;
- int entry_exit_count;
- int n_woken;
- int nap_count;
+ int entry_exit_map;
int napping_threads;
int first_vcpuid;
u16 pcpu;
@@ -299,13 +296,14 @@ struct kvmppc_vcore {
ulong conferring_threads;
};
-#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
-#define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
+#define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
+#define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
+#define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
/* Values for vcore_state */
#define VCORE_INACTIVE 0
#define VCORE_SLEEPING 1
-#define VCORE_STARTING 2
+#define VCORE_PREEMPT 2
#define VCORE_RUNNING 3
#define VCORE_EXITING 4
@@ -368,6 +366,14 @@ struct kvmppc_slb {
u8 base_page_size; /* MMU_PAGE_xxx */
};
+/* Struct used to accumulate timing information in HV real mode code */
+struct kvmhv_tb_accumulator {
+ u64 seqcount; /* used to synchronize access, also count * 2 */
+ u64 tb_total; /* total time in timebase ticks */
+ u64 tb_min; /* min time */
+ u64 tb_max; /* max time */
+};
+
# ifdef CONFIG_PPC_FSL_BOOK3E
#define KVMPPC_BOOKE_IAC_NUM 2
#define KVMPPC_BOOKE_DAC_NUM 2
@@ -585,7 +591,7 @@ struct kvm_vcpu_arch {
pgd_t *pgdir;
u8 io_gpr; /* GPR used as IO source/target */
- u8 mmio_is_bigendian;
+ u8 mmio_host_swabbed;
u8 mmio_sign_extend;
u8 osi_needed;
u8 osi_enabled;
@@ -656,6 +662,19 @@ struct kvm_vcpu_arch {
u32 emul_inst;
#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */
+ u64 cur_tb_start; /* when it started */
+ struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */
+ struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */
+ struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */
+ struct kvmhv_tb_accumulator guest_time; /* guest execution */
+ struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
+
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_timings;
+#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
};
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 46bf652c9169..b8475daad884 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -302,6 +302,8 @@ static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
return kvm->arch.kvm_ops == kvmppc_hv_ops;
}
+extern int kvmppc_hwrng_present(void);
+
/*
* Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included.
diff --git a/arch/powerpc/include/asm/local64.h b/arch/powerpc/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/powerpc/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index c8175a3fe560..ef8899432ae7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -103,9 +103,6 @@ struct machdep_calls {
#endif
#endif /* CONFIG_PPC64 */
- void (*pci_dma_dev_setup)(struct pci_dev *dev);
- void (*pci_dma_bus_setup)(struct pci_bus *bus);
-
/* Platform set_dma_mask and dma_get_required_mask overrides */
int (*dma_set_mask)(struct device *dev, u64 dma_mask);
u64 (*dma_get_required_mask)(struct device *dev);
@@ -125,9 +122,8 @@ struct machdep_calls {
unsigned int (*get_irq)(void);
/* PCI stuff */
- /* Called after scanning the bus, before allocating resources */
+ /* Called after allocating resources */
void (*pcibios_fixup)(void);
- int (*pci_probe_mode)(struct pci_bus *);
void (*pci_irq_fixup)(struct pci_dev *dev);
int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
*bridge);
@@ -237,18 +233,13 @@ struct machdep_calls {
/* Called for each PCI bus in the system when it's probed */
void (*pcibios_fixup_bus)(struct pci_bus *);
- /* Called when pci_enable_device() is called. Returns 0 to
- * allow assignment/enabling of the device. */
- int (*pcibios_enable_device_hook)(struct pci_dev *);
-
/* Called after scan and before resource survey */
void (*pcibios_fixup_phb)(struct pci_controller *hose);
- /* Called during PCI resource reassignment */
- resource_size_t (*pcibios_window_alignment)(struct pci_bus *, unsigned long type);
-
- /* Reset the secondary bus of bridge */
- void (*pcibios_reset_secondary_bus)(struct pci_dev *dev);
+#ifdef CONFIG_PCI_IOV
+ void (*pcibios_fixup_sriov)(struct pci_dev *pdev);
+ resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno);
+#endif /* CONFIG_PCI_IOV */
/* Called to shutdown machine specific hardware not already controlled
* by other drivers.
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 4f13c3ed7acf..1da6a81ce541 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -112,6 +112,7 @@
#define TLBIEL_INVAL_SET_SHIFT 12
#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
+#define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
index 3bef74a9914b..213f3a81593d 100644
--- a/arch/powerpc/include/asm/mpc85xx.h
+++ b/arch/powerpc/include/asm/mpc85xx.h
@@ -61,6 +61,7 @@
#define SVR_T4240 0x824000
#define SVR_T4120 0x824001
#define SVR_T4160 0x824100
+#define SVR_T4080 0x824102
#define SVR_C291 0x850000
#define SVR_C292 0x850020
#define SVR_C293 0x850030
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 754f93d208fa..98697611e7b3 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -34,10 +34,6 @@
#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
#define MPIC_GREG_GCONF_MCK 0x08000000
#define MPIC_GREG_GLOBAL_CONF_1 0x00030
-#define MPIC_GREG_GLOBAL_CONF_1_SIE 0x08000000
-#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK 0x70000000
-#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(r) \
- (((r) << 28) & MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK)
#define MPIC_GREG_VENDOR_0 0x00040
#define MPIC_GREG_VENDOR_1 0x00050
#define MPIC_GREG_VENDOR_2 0x00060
@@ -396,14 +392,7 @@ extern struct bus_type mpic_subsys;
#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
/* Get the version of primary MPIC */
-#ifdef CONFIG_MPIC
extern u32 fsl_mpic_primary_get_version(void);
-#else
-static inline u32 fsl_mpic_primary_get_version(void)
-{
- return 0;
-}
-#endif
/* Allocate the controller structure and setup the linux irq descs
* for the range if interrupts passed in. No HW initialization is
@@ -496,11 +485,5 @@ extern unsigned int mpic_get_coreint_irq(void);
/* Fetch Machine Check interrupt from primary mpic */
extern unsigned int mpic_get_mcirq(void);
-/* Set the EPIC clock ratio */
-void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
-
-/* Enable/Disable EPIC serial interrupt mode */
-void mpic_set_serial_int(struct mpic *mpic, int enable);
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
new file mode 100644
index 000000000000..ff1ccb375e60
--- /dev/null
+++ b/arch/powerpc/include/asm/nmi.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_NMI_H
+#define _ASM_NMI_H
+
+#endif /* _ASM_NMI_H */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index b0fe0fe4e626..09a518bb7c03 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -9,12 +9,43 @@
#ifndef _ASM_POWERPC_NVRAM_H
#define _ASM_POWERPC_NVRAM_H
-
+#include <linux/types.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <uapi/asm/nvram.h>
+/*
+ * Set oops header version to distinguish between old and new format header.
+ * lnx,oops-log partition max size is 4000, header version > 4000 will
+ * help in identifying new header.
+ */
+#define OOPS_HDR_VERSION 5000
+
+struct err_log_info {
+ __be32 error_type;
+ __be32 seq_num;
+};
+
+struct nvram_os_partition {
+ const char *name;
+ int req_size; /* desired size, in bytes */
+ int min_size; /* minimum acceptable size (0 means req_size) */
+ long size; /* size of data portion (excluding err_log_info) */
+ long index; /* offset of data portion of partition */
+ bool os_partition; /* partition initialized by OS, not FW */
+};
+
+struct oops_log_info {
+ __be16 version;
+ __be16 report_length;
+ __be64 timestamp;
+} __attribute__((packed));
+
+extern struct nvram_os_partition oops_log_partition;
+
#ifdef CONFIG_PPC_PSERIES
+extern struct nvram_os_partition rtas_log_partition;
+
extern int nvram_write_error_log(char * buff, int length,
unsigned int err_type, unsigned int err_seq);
extern int nvram_read_error_log(char * buff, int length,
@@ -50,6 +81,23 @@ extern void pmac_xpram_write(int xpaddr, u8 data);
/* Synchronize NVRAM */
extern void nvram_sync(void);
+/* Initialize NVRAM OS partition */
+extern int __init nvram_init_os_partition(struct nvram_os_partition *part);
+
+/* Initialize NVRAM oops partition */
+extern void __init nvram_init_oops_partition(int rtas_partition_exists);
+
+/* Read a NVRAM partition */
+extern int nvram_read_partition(struct nvram_os_partition *part, char *buff,
+ int length, unsigned int *err_type,
+ unsigned int *error_log_cnt);
+
+/* Write to NVRAM OS partition */
+extern int nvram_write_os_partition(struct nvram_os_partition *part,
+ char *buff, int length,
+ unsigned int err_type,
+ unsigned int error_log_cnt);
+
/* Determine NVRAM size */
extern ssize_t nvram_get_size(void);
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
new file mode 100644
index 000000000000..0321a909e663
--- /dev/null
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -0,0 +1,735 @@
+/*
+ * OPAL API definitions.
+ *
+ * Copyright 2011-2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __OPAL_API_H
+#define __OPAL_API_H
+
+/****** OPAL APIs ******/
+
+/* Return codes */
+#define OPAL_SUCCESS 0
+#define OPAL_PARAMETER -1
+#define OPAL_BUSY -2
+#define OPAL_PARTIAL -3
+#define OPAL_CONSTRAINED -4
+#define OPAL_CLOSED -5
+#define OPAL_HARDWARE -6
+#define OPAL_UNSUPPORTED -7
+#define OPAL_PERMISSION -8
+#define OPAL_NO_MEM -9
+#define OPAL_RESOURCE -10
+#define OPAL_INTERNAL_ERROR -11
+#define OPAL_BUSY_EVENT -12
+#define OPAL_HARDWARE_FROZEN -13
+#define OPAL_WRONG_STATE -14
+#define OPAL_ASYNC_COMPLETION -15
+#define OPAL_EMPTY -16
+#define OPAL_I2C_TIMEOUT -17
+#define OPAL_I2C_INVALID_CMD -18
+#define OPAL_I2C_LBUS_PARITY -19
+#define OPAL_I2C_BKEND_OVERRUN -20
+#define OPAL_I2C_BKEND_ACCESS -21
+#define OPAL_I2C_ARBT_LOST -22
+#define OPAL_I2C_NACK_RCVD -23
+#define OPAL_I2C_STOP_ERR -24
+
+/* API Tokens (in r0) */
+#define OPAL_INVALID_CALL -1
+#define OPAL_TEST 0
+#define OPAL_CONSOLE_WRITE 1
+#define OPAL_CONSOLE_READ 2
+#define OPAL_RTC_READ 3
+#define OPAL_RTC_WRITE 4
+#define OPAL_CEC_POWER_DOWN 5
+#define OPAL_CEC_REBOOT 6
+#define OPAL_READ_NVRAM 7
+#define OPAL_WRITE_NVRAM 8
+#define OPAL_HANDLE_INTERRUPT 9
+#define OPAL_POLL_EVENTS 10
+#define OPAL_PCI_SET_HUB_TCE_MEMORY 11
+#define OPAL_PCI_SET_PHB_TCE_MEMORY 12
+#define OPAL_PCI_CONFIG_READ_BYTE 13
+#define OPAL_PCI_CONFIG_READ_HALF_WORD 14
+#define OPAL_PCI_CONFIG_READ_WORD 15
+#define OPAL_PCI_CONFIG_WRITE_BYTE 16
+#define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
+#define OPAL_PCI_CONFIG_WRITE_WORD 18
+#define OPAL_SET_XIVE 19
+#define OPAL_GET_XIVE 20
+#define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
+#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
+#define OPAL_PCI_EEH_FREEZE_STATUS 23
+#define OPAL_PCI_SHPC 24
+#define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
+#define OPAL_PCI_EEH_FREEZE_CLEAR 26
+#define OPAL_PCI_PHB_MMIO_ENABLE 27
+#define OPAL_PCI_SET_PHB_MEM_WINDOW 28
+#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
+#define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
+#define OPAL_PCI_SET_PE 31
+#define OPAL_PCI_SET_PELTV 32
+#define OPAL_PCI_SET_MVE 33
+#define OPAL_PCI_SET_MVE_ENABLE 34
+#define OPAL_PCI_GET_XIVE_REISSUE 35
+#define OPAL_PCI_SET_XIVE_REISSUE 36
+#define OPAL_PCI_SET_XIVE_PE 37
+#define OPAL_GET_XIVE_SOURCE 38
+#define OPAL_GET_MSI_32 39
+#define OPAL_GET_MSI_64 40
+#define OPAL_START_CPU 41
+#define OPAL_QUERY_CPU_STATUS 42
+#define OPAL_WRITE_OPPANEL 43 /* unimplemented */
+#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
+#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
+#define OPAL_PCI_RESET 49
+#define OPAL_PCI_GET_HUB_DIAG_DATA 50
+#define OPAL_PCI_GET_PHB_DIAG_DATA 51
+#define OPAL_PCI_FENCE_PHB 52
+#define OPAL_PCI_REINIT 53
+#define OPAL_PCI_MASK_PE_ERROR 54
+#define OPAL_SET_SLOT_LED_STATUS 55
+#define OPAL_GET_EPOW_STATUS 56
+#define OPAL_SET_SYSTEM_ATTENTION_LED 57
+#define OPAL_RESERVED1 58
+#define OPAL_RESERVED2 59
+#define OPAL_PCI_NEXT_ERROR 60
+#define OPAL_PCI_EEH_FREEZE_STATUS2 61
+#define OPAL_PCI_POLL 62
+#define OPAL_PCI_MSI_EOI 63
+#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
+#define OPAL_XSCOM_READ 65
+#define OPAL_XSCOM_WRITE 66
+#define OPAL_LPC_READ 67
+#define OPAL_LPC_WRITE 68
+#define OPAL_RETURN_CPU 69
+#define OPAL_REINIT_CPUS 70
+#define OPAL_ELOG_READ 71
+#define OPAL_ELOG_WRITE 72
+#define OPAL_ELOG_ACK 73
+#define OPAL_ELOG_RESEND 74
+#define OPAL_ELOG_SIZE 75
+#define OPAL_FLASH_VALIDATE 76
+#define OPAL_FLASH_MANAGE 77
+#define OPAL_FLASH_UPDATE 78
+#define OPAL_RESYNC_TIMEBASE 79
+#define OPAL_CHECK_TOKEN 80
+#define OPAL_DUMP_INIT 81
+#define OPAL_DUMP_INFO 82
+#define OPAL_DUMP_READ 83
+#define OPAL_DUMP_ACK 84
+#define OPAL_GET_MSG 85
+#define OPAL_CHECK_ASYNC_COMPLETION 86
+#define OPAL_SYNC_HOST_REBOOT 87
+#define OPAL_SENSOR_READ 88
+#define OPAL_GET_PARAM 89
+#define OPAL_SET_PARAM 90
+#define OPAL_DUMP_RESEND 91
+#define OPAL_ELOG_SEND 92 /* Deprecated */
+#define OPAL_PCI_SET_PHB_CAPI_MODE 93
+#define OPAL_DUMP_INFO2 94
+#define OPAL_WRITE_OPPANEL_ASYNC 95
+#define OPAL_PCI_ERR_INJECT 96
+#define OPAL_PCI_EEH_FREEZE_SET 97
+#define OPAL_HANDLE_HMI 98
+#define OPAL_CONFIG_CPU_IDLE_STATE 99
+#define OPAL_SLW_SET_REG 100
+#define OPAL_REGISTER_DUMP_REGION 101
+#define OPAL_UNREGISTER_DUMP_REGION 102
+#define OPAL_WRITE_TPO 103
+#define OPAL_READ_TPO 104
+#define OPAL_GET_DPO_STATUS 105
+#define OPAL_OLD_I2C_REQUEST 106 /* Deprecated */
+#define OPAL_IPMI_SEND 107
+#define OPAL_IPMI_RECV 108
+#define OPAL_I2C_REQUEST 109
+#define OPAL_FLASH_READ 110
+#define OPAL_FLASH_WRITE 111
+#define OPAL_FLASH_ERASE 112
+#define OPAL_LAST 112
+
+/* Device tree flags */
+
+/* Flags set in power-mgmt nodes in device tree if
+ * respective idle states are supported in the platform.
+ */
+#define OPAL_PM_NAP_ENABLED 0x00010000
+#define OPAL_PM_SLEEP_ENABLED 0x00020000
+#define OPAL_PM_WINKLE_ENABLED 0x00040000
+#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */
+
+#ifndef __ASSEMBLY__
+
+/* Other enums */
+enum OpalFreezeState {
+ OPAL_EEH_STOPPED_NOT_FROZEN = 0,
+ OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
+ OPAL_EEH_STOPPED_DMA_FREEZE = 2,
+ OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
+ OPAL_EEH_STOPPED_RESET = 4,
+ OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
+ OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
+};
+
+enum OpalEehFreezeActionToken {
+ OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3,
+
+ OPAL_EEH_ACTION_SET_FREEZE_MMIO = 1,
+ OPAL_EEH_ACTION_SET_FREEZE_DMA = 2,
+ OPAL_EEH_ACTION_SET_FREEZE_ALL = 3
+};
+
+enum OpalPciStatusToken {
+ OPAL_EEH_NO_ERROR = 0,
+ OPAL_EEH_IOC_ERROR = 1,
+ OPAL_EEH_PHB_ERROR = 2,
+ OPAL_EEH_PE_ERROR = 3,
+ OPAL_EEH_PE_MMIO_ERROR = 4,
+ OPAL_EEH_PE_DMA_ERROR = 5
+};
+
+enum OpalPciErrorSeverity {
+ OPAL_EEH_SEV_NO_ERROR = 0,
+ OPAL_EEH_SEV_IOC_DEAD = 1,
+ OPAL_EEH_SEV_PHB_DEAD = 2,
+ OPAL_EEH_SEV_PHB_FENCED = 3,
+ OPAL_EEH_SEV_PE_ER = 4,
+ OPAL_EEH_SEV_INF = 5
+};
+
+enum OpalErrinjectType {
+ OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR = 0,
+ OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64 = 1,
+};
+
+enum OpalErrinjectFunc {
+ /* IOA bus specific errors */
+ OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR = 0,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA = 1,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR = 2,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA = 3,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR = 4,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA = 5,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR = 6,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA = 7,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR = 8,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA = 9,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR = 10,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA = 11,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR = 12,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA = 13,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER = 14,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET = 15,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR = 16,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA = 17,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER = 18,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET = 19,
+};
+
+enum OpalMmioWindowType {
+ OPAL_M32_WINDOW_TYPE = 1,
+ OPAL_M64_WINDOW_TYPE = 2,
+ OPAL_IO_WINDOW_TYPE = 3
+};
+
+enum OpalExceptionHandler {
+ OPAL_MACHINE_CHECK_HANDLER = 1,
+ OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
+ OPAL_SOFTPATCH_HANDLER = 3
+};
+
+enum OpalPendingState {
+ OPAL_EVENT_OPAL_INTERNAL = 0x1,
+ OPAL_EVENT_NVRAM = 0x2,
+ OPAL_EVENT_RTC = 0x4,
+ OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
+ OPAL_EVENT_CONSOLE_INPUT = 0x10,
+ OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
+ OPAL_EVENT_ERROR_LOG = 0x40,
+ OPAL_EVENT_EPOW = 0x80,
+ OPAL_EVENT_LED_STATUS = 0x100,
+ OPAL_EVENT_PCI_ERROR = 0x200,
+ OPAL_EVENT_DUMP_AVAIL = 0x400,
+ OPAL_EVENT_MSG_PENDING = 0x800,
+};
+
+enum OpalThreadStatus {
+ OPAL_THREAD_INACTIVE = 0x0,
+ OPAL_THREAD_STARTED = 0x1,
+ OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
+};
+
+enum OpalPciBusCompare {
+ OpalPciBusAny = 0, /* Any bus number match */
+ OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
+ OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
+ OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
+ OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
+ OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
+ OpalPciBusAll = 7, /* Match bus number exactly */
+};
+
+enum OpalDeviceCompare {
+ OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
+ OPAL_COMPARE_RID_DEVICE_NUMBER = 1
+};
+
+enum OpalFuncCompare {
+ OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
+ OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
+};
+
+enum OpalPeAction {
+ OPAL_UNMAP_PE = 0,
+ OPAL_MAP_PE = 1
+};
+
+enum OpalPeltvAction {
+ OPAL_REMOVE_PE_FROM_DOMAIN = 0,
+ OPAL_ADD_PE_TO_DOMAIN = 1
+};
+
+enum OpalMveEnableAction {
+ OPAL_DISABLE_MVE = 0,
+ OPAL_ENABLE_MVE = 1
+};
+
+enum OpalM64Action {
+ OPAL_DISABLE_M64 = 0,
+ OPAL_ENABLE_M64_SPLIT = 1,
+ OPAL_ENABLE_M64_NON_SPLIT = 2
+};
+
+enum OpalPciResetScope {
+ OPAL_RESET_PHB_COMPLETE = 1,
+ OPAL_RESET_PCI_LINK = 2,
+ OPAL_RESET_PHB_ERROR = 3,
+ OPAL_RESET_PCI_HOT = 4,
+ OPAL_RESET_PCI_FUNDAMENTAL = 5,
+ OPAL_RESET_PCI_IODA_TABLE = 6
+};
+
+enum OpalPciReinitScope {
+ /*
+ * Note: we chose values that do not overlap
+ * OpalPciResetScope as OPAL v2 used the same
+ * enum for both
+ */
+ OPAL_REINIT_PCI_DEV = 1000
+};
+
+enum OpalPciResetState {
+ OPAL_DEASSERT_RESET = 0,
+ OPAL_ASSERT_RESET = 1
+};
+
+/*
+ * Address cycle types for LPC accesses. These also correspond
+ * to the content of the first cell of the "reg" property for
+ * device nodes on the LPC bus
+ */
+enum OpalLPCAddressType {
+ OPAL_LPC_MEM = 0,
+ OPAL_LPC_IO = 1,
+ OPAL_LPC_FW = 2,
+};
+
+enum opal_msg_type {
+ OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
+ * additional params function-specific
+ */
+ OPAL_MSG_MEM_ERR,
+ OPAL_MSG_EPOW,
+ OPAL_MSG_SHUTDOWN, /* params[0] = 1 reboot, 0 shutdown */
+ OPAL_MSG_HMI_EVT,
+ OPAL_MSG_DPO,
+ OPAL_MSG_TYPE_MAX,
+};
+
+struct opal_msg {
+ __be32 msg_type;
+ __be32 reserved;
+ __be64 params[8];
+};
+
+/* System parameter permission */
+enum OpalSysparamPerm {
+ OPAL_SYSPARAM_READ = 0x1,
+ OPAL_SYSPARAM_WRITE = 0x2,
+ OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
+};
+
+enum {
+ OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
+};
+
+struct opal_ipmi_msg {
+ uint8_t version;
+ uint8_t netfn;
+ uint8_t cmd;
+ uint8_t data[];
+};
+
+/* FSP memory errors handling */
+enum OpalMemErr_Version {
+ OpalMemErr_V1 = 1,
+};
+
+enum OpalMemErrType {
+ OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
+ OPAL_MEM_ERR_TYPE_DYN_DALLOC,
+};
+
+/* Memory Reilience error type */
+enum OpalMemErr_ResilErrType {
+ OPAL_MEM_RESILIENCE_CE = 0,
+ OPAL_MEM_RESILIENCE_UE,
+ OPAL_MEM_RESILIENCE_UE_SCRUB,
+};
+
+/* Dynamic Memory Deallocation type */
+enum OpalMemErr_DynErrType {
+ OPAL_MEM_DYNAMIC_DEALLOC = 0,
+};
+
+struct OpalMemoryErrorData {
+ enum OpalMemErr_Version version:8; /* 0x00 */
+ enum OpalMemErrType type:8; /* 0x01 */
+ __be16 flags; /* 0x02 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ union {
+ /* Memory Resilience corrected/uncorrected error info */
+ struct {
+ enum OpalMemErr_ResilErrType resil_err_type:8;
+ uint8_t reserved_1[7];
+ __be64 physical_address_start;
+ __be64 physical_address_end;
+ } resilience;
+ /* Dynamic memory deallocation error info */
+ struct {
+ enum OpalMemErr_DynErrType dyn_err_type:8;
+ uint8_t reserved_1[7];
+ __be64 physical_address_start;
+ __be64 physical_address_end;
+ } dyn_dealloc;
+ } u;
+};
+
+/* HMI interrupt event */
+enum OpalHMI_Version {
+ OpalHMIEvt_V1 = 1,
+};
+
+enum OpalHMI_Severity {
+ OpalHMI_SEV_NO_ERROR = 0,
+ OpalHMI_SEV_WARNING = 1,
+ OpalHMI_SEV_ERROR_SYNC = 2,
+ OpalHMI_SEV_FATAL = 3,
+};
+
+enum OpalHMI_Disposition {
+ OpalHMI_DISPOSITION_RECOVERED = 0,
+ OpalHMI_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum OpalHMI_ErrType {
+ OpalHMI_ERROR_MALFUNC_ALERT = 0,
+ OpalHMI_ERROR_PROC_RECOV_DONE,
+ OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN,
+ OpalHMI_ERROR_PROC_RECOV_MASKED,
+ OpalHMI_ERROR_TFAC,
+ OpalHMI_ERROR_TFMR_PARITY,
+ OpalHMI_ERROR_HA_OVERFLOW_WARN,
+ OpalHMI_ERROR_XSCOM_FAIL,
+ OpalHMI_ERROR_XSCOM_DONE,
+ OpalHMI_ERROR_SCOM_FIR,
+ OpalHMI_ERROR_DEBUG_TRIG_FIR,
+ OpalHMI_ERROR_HYP_RESOURCE,
+ OpalHMI_ERROR_CAPP_RECOVERY,
+};
+
+struct OpalHMIEvent {
+ uint8_t version; /* 0x00 */
+ uint8_t severity; /* 0x01 */
+ uint8_t type; /* 0x02 */
+ uint8_t disposition; /* 0x03 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ __be64 hmer;
+ /* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
+ __be64 tfmr;
+};
+
+enum {
+ OPAL_P7IOC_DIAG_TYPE_NONE = 0,
+ OPAL_P7IOC_DIAG_TYPE_RGC = 1,
+ OPAL_P7IOC_DIAG_TYPE_BI = 2,
+ OPAL_P7IOC_DIAG_TYPE_CI = 3,
+ OPAL_P7IOC_DIAG_TYPE_MISC = 4,
+ OPAL_P7IOC_DIAG_TYPE_I2C = 5,
+ OPAL_P7IOC_DIAG_TYPE_LAST = 6
+};
+
+struct OpalIoP7IOCErrorData {
+ __be16 type;
+
+ /* GEM */
+ __be64 gemXfir;
+ __be64 gemRfir;
+ __be64 gemRirqfir;
+ __be64 gemMask;
+ __be64 gemRwof;
+
+ /* LEM */
+ __be64 lemFir;
+ __be64 lemErrMask;
+ __be64 lemAction0;
+ __be64 lemAction1;
+ __be64 lemWof;
+
+ union {
+ struct OpalIoP7IOCRgcErrorData {
+ __be64 rgcStatus; /* 3E1C10 */
+ __be64 rgcLdcp; /* 3E1C18 */
+ }rgc;
+ struct OpalIoP7IOCBiErrorData {
+ __be64 biLdcp0; /* 3C0100, 3C0118 */
+ __be64 biLdcp1; /* 3C0108, 3C0120 */
+ __be64 biLdcp2; /* 3C0110, 3C0128 */
+ __be64 biFenceStatus; /* 3C0130, 3C0130 */
+
+ uint8_t biDownbound; /* BI Downbound or Upbound */
+ }bi;
+ struct OpalIoP7IOCCiErrorData {
+ __be64 ciPortStatus; /* 3Dn008 */
+ __be64 ciPortLdcp; /* 3Dn010 */
+
+ uint8_t ciPort; /* Index of CI port: 0/1 */
+ }ci;
+ };
+};
+
+/**
+ * This structure defines the overlay which will be used to store PHB error
+ * data upon request.
+ */
+enum {
+ OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
+};
+
+enum {
+ OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
+ OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
+};
+
+enum {
+ OPAL_P7IOC_NUM_PEST_REGS = 128,
+ OPAL_PHB3_NUM_PEST_REGS = 256
+};
+
+struct OpalIoPhbErrorCommon {
+ __be32 version;
+ __be32 ioType;
+ __be32 len;
+};
+
+struct OpalIoP7IOCPhbErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ __be32 brdgCtl;
+
+ // P7IOC utl regs
+ __be32 portStatusReg;
+ __be32 rootCmplxStatus;
+ __be32 busAgentStatus;
+
+ // P7IOC cfg regs
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
+
+ // cfg AER regs
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
+
+ __be32 rsv3;
+
+ // Record data about the call to allocate a buffer.
+ __be64 errorClass;
+ __be64 correlator;
+
+ //P7IOC MMIO Error Regs
+ __be64 p7iocPlssr; // n120
+ __be64 p7iocCsr; // n110
+ __be64 lemFir; // nC00
+ __be64 lemErrorMask; // nC18
+ __be64 lemWOF; // nC40
+ __be64 phbErrorStatus; // nC80
+ __be64 phbFirstErrorStatus; // nC88
+ __be64 phbErrorLog0; // nCC0
+ __be64 phbErrorLog1; // nCC8
+ __be64 mmioErrorStatus; // nD00
+ __be64 mmioFirstErrorStatus; // nD08
+ __be64 mmioErrorLog0; // nD40
+ __be64 mmioErrorLog1; // nD48
+ __be64 dma0ErrorStatus; // nD80
+ __be64 dma0FirstErrorStatus; // nD88
+ __be64 dma0ErrorLog0; // nDC0
+ __be64 dma0ErrorLog1; // nDC8
+ __be64 dma1ErrorStatus; // nE00
+ __be64 dma1FirstErrorStatus; // nE08
+ __be64 dma1ErrorLog0; // nE40
+ __be64 dma1ErrorLog1; // nE48
+ __be64 pestA[OPAL_P7IOC_NUM_PEST_REGS];
+ __be64 pestB[OPAL_P7IOC_NUM_PEST_REGS];
+};
+
+struct OpalIoPhb3ErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ __be32 brdgCtl;
+
+ /* PHB3 UTL regs */
+ __be32 portStatusReg;
+ __be32 rootCmplxStatus;
+ __be32 busAgentStatus;
+
+ /* PHB3 cfg regs */
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
+
+ /* cfg AER regs */
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
+
+ __be32 rsv3;
+
+ /* Record data about the call to allocate a buffer */
+ __be64 errorClass;
+ __be64 correlator;
+
+ /* PHB3 MMIO Error Regs */
+ __be64 nFir; /* 000 */
+ __be64 nFirMask; /* 003 */
+ __be64 nFirWOF; /* 008 */
+ __be64 phbPlssr; /* 120 */
+ __be64 phbCsr; /* 110 */
+ __be64 lemFir; /* C00 */
+ __be64 lemErrorMask; /* C18 */
+ __be64 lemWOF; /* C40 */
+ __be64 phbErrorStatus; /* C80 */
+ __be64 phbFirstErrorStatus; /* C88 */
+ __be64 phbErrorLog0; /* CC0 */
+ __be64 phbErrorLog1; /* CC8 */
+ __be64 mmioErrorStatus; /* D00 */
+ __be64 mmioFirstErrorStatus; /* D08 */
+ __be64 mmioErrorLog0; /* D40 */
+ __be64 mmioErrorLog1; /* D48 */
+ __be64 dma0ErrorStatus; /* D80 */
+ __be64 dma0FirstErrorStatus; /* D88 */
+ __be64 dma0ErrorLog0; /* DC0 */
+ __be64 dma0ErrorLog1; /* DC8 */
+ __be64 dma1ErrorStatus; /* E00 */
+ __be64 dma1FirstErrorStatus; /* E08 */
+ __be64 dma1ErrorLog0; /* E40 */
+ __be64 dma1ErrorLog1; /* E48 */
+ __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
+ __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
+};
+
+enum {
+ OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
+ OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
+};
+
+typedef struct oppanel_line {
+ __be64 line;
+ __be64 line_len;
+} oppanel_line_t;
+
+/*
+ * SG entries
+ *
+ * WARNING: The current implementation requires each entry
+ * to represent a block that is 4k aligned *and* each block
+ * size except the last one in the list to be as well.
+ */
+struct opal_sg_entry {
+ __be64 data;
+ __be64 length;
+};
+
+/*
+ * Candiate image SG list.
+ *
+ * length = VER | length
+ */
+struct opal_sg_list {
+ __be64 length;
+ __be64 next;
+ struct opal_sg_entry entry[];
+};
+
+/*
+ * Dump region ID range usable by the OS
+ */
+#define OPAL_DUMP_REGION_HOST_START 0x80
+#define OPAL_DUMP_REGION_LOG_BUF 0x80
+#define OPAL_DUMP_REGION_HOST_END 0xFF
+
+/* CAPI modes for PHB */
+enum {
+ OPAL_PHB_CAPI_MODE_PCIE = 0,
+ OPAL_PHB_CAPI_MODE_CAPI = 1,
+ OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
+ OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
+};
+
+/* OPAL I2C request */
+struct opal_i2c_request {
+ uint8_t type;
+#define OPAL_I2C_RAW_READ 0
+#define OPAL_I2C_RAW_WRITE 1
+#define OPAL_I2C_SM_READ 2
+#define OPAL_I2C_SM_WRITE 3
+ uint8_t flags;
+#define OPAL_I2C_ADDR_10 0x01 /* Not supported yet */
+ uint8_t subaddr_sz; /* Max 4 */
+ uint8_t reserved;
+ __be16 addr; /* 7 or 10 bit address */
+ __be16 reserved2;
+ __be32 subaddr; /* Sub-address if any */
+ __be32 size; /* Data size */
+ __be64 buffer_ra; /* Buffer real address */
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9ee0a30a02ce..042af1abfc4d 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -9,755 +9,17 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifndef __OPAL_H
-#define __OPAL_H
+#ifndef _ASM_POWERPC_OPAL_H
+#define _ASM_POWERPC_OPAL_H
-#ifndef __ASSEMBLY__
-/*
- * SG entry
- *
- * WARNING: The current implementation requires each entry
- * to represent a block that is 4k aligned *and* each block
- * size except the last one in the list to be as well.
- */
-struct opal_sg_entry {
- __be64 data;
- __be64 length;
-};
-
-/* SG list */
-struct opal_sg_list {
- __be64 length;
- __be64 next;
- struct opal_sg_entry entry[];
-};
-
-/* We calculate number of sg entries based on PAGE_SIZE */
-#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
-
-#endif /* __ASSEMBLY__ */
-
-/****** OPAL APIs ******/
-
-/* Return codes */
-#define OPAL_SUCCESS 0
-#define OPAL_PARAMETER -1
-#define OPAL_BUSY -2
-#define OPAL_PARTIAL -3
-#define OPAL_CONSTRAINED -4
-#define OPAL_CLOSED -5
-#define OPAL_HARDWARE -6
-#define OPAL_UNSUPPORTED -7
-#define OPAL_PERMISSION -8
-#define OPAL_NO_MEM -9
-#define OPAL_RESOURCE -10
-#define OPAL_INTERNAL_ERROR -11
-#define OPAL_BUSY_EVENT -12
-#define OPAL_HARDWARE_FROZEN -13
-#define OPAL_WRONG_STATE -14
-#define OPAL_ASYNC_COMPLETION -15
-#define OPAL_I2C_TIMEOUT -17
-#define OPAL_I2C_INVALID_CMD -18
-#define OPAL_I2C_LBUS_PARITY -19
-#define OPAL_I2C_BKEND_OVERRUN -20
-#define OPAL_I2C_BKEND_ACCESS -21
-#define OPAL_I2C_ARBT_LOST -22
-#define OPAL_I2C_NACK_RCVD -23
-#define OPAL_I2C_STOP_ERR -24
-
-/* API Tokens (in r0) */
-#define OPAL_INVALID_CALL -1
-#define OPAL_CONSOLE_WRITE 1
-#define OPAL_CONSOLE_READ 2
-#define OPAL_RTC_READ 3
-#define OPAL_RTC_WRITE 4
-#define OPAL_CEC_POWER_DOWN 5
-#define OPAL_CEC_REBOOT 6
-#define OPAL_READ_NVRAM 7
-#define OPAL_WRITE_NVRAM 8
-#define OPAL_HANDLE_INTERRUPT 9
-#define OPAL_POLL_EVENTS 10
-#define OPAL_PCI_SET_HUB_TCE_MEMORY 11
-#define OPAL_PCI_SET_PHB_TCE_MEMORY 12
-#define OPAL_PCI_CONFIG_READ_BYTE 13
-#define OPAL_PCI_CONFIG_READ_HALF_WORD 14
-#define OPAL_PCI_CONFIG_READ_WORD 15
-#define OPAL_PCI_CONFIG_WRITE_BYTE 16
-#define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
-#define OPAL_PCI_CONFIG_WRITE_WORD 18
-#define OPAL_SET_XIVE 19
-#define OPAL_GET_XIVE 20
-#define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
-#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
-#define OPAL_PCI_EEH_FREEZE_STATUS 23
-#define OPAL_PCI_SHPC 24
-#define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
-#define OPAL_PCI_EEH_FREEZE_CLEAR 26
-#define OPAL_PCI_PHB_MMIO_ENABLE 27
-#define OPAL_PCI_SET_PHB_MEM_WINDOW 28
-#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
-#define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
-#define OPAL_PCI_SET_PE 31
-#define OPAL_PCI_SET_PELTV 32
-#define OPAL_PCI_SET_MVE 33
-#define OPAL_PCI_SET_MVE_ENABLE 34
-#define OPAL_PCI_GET_XIVE_REISSUE 35
-#define OPAL_PCI_SET_XIVE_REISSUE 36
-#define OPAL_PCI_SET_XIVE_PE 37
-#define OPAL_GET_XIVE_SOURCE 38
-#define OPAL_GET_MSI_32 39
-#define OPAL_GET_MSI_64 40
-#define OPAL_START_CPU 41
-#define OPAL_QUERY_CPU_STATUS 42
-#define OPAL_WRITE_OPPANEL 43
-#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
-#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
-#define OPAL_PCI_RESET 49
-#define OPAL_PCI_GET_HUB_DIAG_DATA 50
-#define OPAL_PCI_GET_PHB_DIAG_DATA 51
-#define OPAL_PCI_FENCE_PHB 52
-#define OPAL_PCI_REINIT 53
-#define OPAL_PCI_MASK_PE_ERROR 54
-#define OPAL_SET_SLOT_LED_STATUS 55
-#define OPAL_GET_EPOW_STATUS 56
-#define OPAL_SET_SYSTEM_ATTENTION_LED 57
-#define OPAL_RESERVED1 58
-#define OPAL_RESERVED2 59
-#define OPAL_PCI_NEXT_ERROR 60
-#define OPAL_PCI_EEH_FREEZE_STATUS2 61
-#define OPAL_PCI_POLL 62
-#define OPAL_PCI_MSI_EOI 63
-#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
-#define OPAL_XSCOM_READ 65
-#define OPAL_XSCOM_WRITE 66
-#define OPAL_LPC_READ 67
-#define OPAL_LPC_WRITE 68
-#define OPAL_RETURN_CPU 69
-#define OPAL_REINIT_CPUS 70
-#define OPAL_ELOG_READ 71
-#define OPAL_ELOG_WRITE 72
-#define OPAL_ELOG_ACK 73
-#define OPAL_ELOG_RESEND 74
-#define OPAL_ELOG_SIZE 75
-#define OPAL_FLASH_VALIDATE 76
-#define OPAL_FLASH_MANAGE 77
-#define OPAL_FLASH_UPDATE 78
-#define OPAL_RESYNC_TIMEBASE 79
-#define OPAL_CHECK_TOKEN 80
-#define OPAL_DUMP_INIT 81
-#define OPAL_DUMP_INFO 82
-#define OPAL_DUMP_READ 83
-#define OPAL_DUMP_ACK 84
-#define OPAL_GET_MSG 85
-#define OPAL_CHECK_ASYNC_COMPLETION 86
-#define OPAL_SYNC_HOST_REBOOT 87
-#define OPAL_SENSOR_READ 88
-#define OPAL_GET_PARAM 89
-#define OPAL_SET_PARAM 90
-#define OPAL_DUMP_RESEND 91
-#define OPAL_PCI_SET_PHB_CXL_MODE 93
-#define OPAL_DUMP_INFO2 94
-#define OPAL_PCI_ERR_INJECT 96
-#define OPAL_PCI_EEH_FREEZE_SET 97
-#define OPAL_HANDLE_HMI 98
-#define OPAL_CONFIG_CPU_IDLE_STATE 99
-#define OPAL_SLW_SET_REG 100
-#define OPAL_REGISTER_DUMP_REGION 101
-#define OPAL_UNREGISTER_DUMP_REGION 102
-#define OPAL_WRITE_TPO 103
-#define OPAL_READ_TPO 104
-#define OPAL_IPMI_SEND 107
-#define OPAL_IPMI_RECV 108
-#define OPAL_I2C_REQUEST 109
-
-/* Device tree flags */
-
-/* Flags set in power-mgmt nodes in device tree if
- * respective idle states are supported in the platform.
- */
-#define OPAL_PM_NAP_ENABLED 0x00010000
-#define OPAL_PM_SLEEP_ENABLED 0x00020000
-#define OPAL_PM_WINKLE_ENABLED 0x00040000
-#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000
+#include <asm/opal-api.h>
#ifndef __ASSEMBLY__
#include <linux/notifier.h>
-/* Other enums */
-enum OpalVendorApiTokens {
- OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999
-};
-
-enum OpalFreezeState {
- OPAL_EEH_STOPPED_NOT_FROZEN = 0,
- OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
- OPAL_EEH_STOPPED_DMA_FREEZE = 2,
- OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
- OPAL_EEH_STOPPED_RESET = 4,
- OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
- OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
-};
-
-enum OpalEehFreezeActionToken {
- OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
- OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
- OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3,
-
- OPAL_EEH_ACTION_SET_FREEZE_MMIO = 1,
- OPAL_EEH_ACTION_SET_FREEZE_DMA = 2,
- OPAL_EEH_ACTION_SET_FREEZE_ALL = 3
-};
-
-enum OpalPciStatusToken {
- OPAL_EEH_NO_ERROR = 0,
- OPAL_EEH_IOC_ERROR = 1,
- OPAL_EEH_PHB_ERROR = 2,
- OPAL_EEH_PE_ERROR = 3,
- OPAL_EEH_PE_MMIO_ERROR = 4,
- OPAL_EEH_PE_DMA_ERROR = 5
-};
-
-enum OpalPciErrorSeverity {
- OPAL_EEH_SEV_NO_ERROR = 0,
- OPAL_EEH_SEV_IOC_DEAD = 1,
- OPAL_EEH_SEV_PHB_DEAD = 2,
- OPAL_EEH_SEV_PHB_FENCED = 3,
- OPAL_EEH_SEV_PE_ER = 4,
- OPAL_EEH_SEV_INF = 5
-};
-
-enum OpalErrinjectType {
- OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR = 0,
- OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64 = 1,
-};
-
-enum OpalErrinjectFunc {
- /* IOA bus specific errors */
- OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR = 0,
- OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA = 1,
- OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR = 2,
- OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA = 3,
- OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR = 4,
- OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA = 5,
- OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR = 6,
- OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA = 7,
- OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR = 8,
- OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA = 9,
- OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR = 10,
- OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA = 11,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR = 12,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA = 13,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER = 14,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET = 15,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR = 16,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA = 17,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER = 18,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET = 19,
-};
-
-enum OpalShpcAction {
- OPAL_SHPC_GET_LINK_STATE = 0,
- OPAL_SHPC_GET_SLOT_STATE = 1
-};
-
-enum OpalShpcLinkState {
- OPAL_SHPC_LINK_DOWN = 0,
- OPAL_SHPC_LINK_UP = 1
-};
-
-enum OpalMmioWindowType {
- OPAL_M32_WINDOW_TYPE = 1,
- OPAL_M64_WINDOW_TYPE = 2,
- OPAL_IO_WINDOW_TYPE = 3
-};
-
-enum OpalShpcSlotState {
- OPAL_SHPC_DEV_NOT_PRESENT = 0,
- OPAL_SHPC_DEV_PRESENT = 1
-};
-
-enum OpalExceptionHandler {
- OPAL_MACHINE_CHECK_HANDLER = 1,
- OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
- OPAL_SOFTPATCH_HANDLER = 3
-};
-
-enum OpalPendingState {
- OPAL_EVENT_OPAL_INTERNAL = 0x1,
- OPAL_EVENT_NVRAM = 0x2,
- OPAL_EVENT_RTC = 0x4,
- OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
- OPAL_EVENT_CONSOLE_INPUT = 0x10,
- OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
- OPAL_EVENT_ERROR_LOG = 0x40,
- OPAL_EVENT_EPOW = 0x80,
- OPAL_EVENT_LED_STATUS = 0x100,
- OPAL_EVENT_PCI_ERROR = 0x200,
- OPAL_EVENT_DUMP_AVAIL = 0x400,
- OPAL_EVENT_MSG_PENDING = 0x800,
-};
-
-enum OpalMessageType {
- OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
- * additional params function-specific
- */
- OPAL_MSG_MEM_ERR,
- OPAL_MSG_EPOW,
- OPAL_MSG_SHUTDOWN, /* params[0] = 1 reboot, 0 shutdown */
- OPAL_MSG_HMI_EVT,
- OPAL_MSG_TYPE_MAX,
-};
-
-enum OpalThreadStatus {
- OPAL_THREAD_INACTIVE = 0x0,
- OPAL_THREAD_STARTED = 0x1,
- OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
-};
-
-enum OpalPciBusCompare {
- OpalPciBusAny = 0, /* Any bus number match */
- OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
- OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
- OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
- OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
- OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
- OpalPciBusAll = 7, /* Match bus number exactly */
-};
-
-enum OpalDeviceCompare {
- OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
- OPAL_COMPARE_RID_DEVICE_NUMBER = 1
-};
-
-enum OpalFuncCompare {
- OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
- OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
-};
-
-enum OpalPeAction {
- OPAL_UNMAP_PE = 0,
- OPAL_MAP_PE = 1
-};
-
-enum OpalPeltvAction {
- OPAL_REMOVE_PE_FROM_DOMAIN = 0,
- OPAL_ADD_PE_TO_DOMAIN = 1
-};
-
-enum OpalMveEnableAction {
- OPAL_DISABLE_MVE = 0,
- OPAL_ENABLE_MVE = 1
-};
-
-enum OpalM64EnableAction {
- OPAL_DISABLE_M64 = 0,
- OPAL_ENABLE_M64_SPLIT = 1,
- OPAL_ENABLE_M64_NON_SPLIT = 2
-};
-
-enum OpalPciResetScope {
- OPAL_RESET_PHB_COMPLETE = 1,
- OPAL_RESET_PCI_LINK = 2,
- OPAL_RESET_PHB_ERROR = 3,
- OPAL_RESET_PCI_HOT = 4,
- OPAL_RESET_PCI_FUNDAMENTAL = 5,
- OPAL_RESET_PCI_IODA_TABLE = 6
-};
-
-enum OpalPciReinitScope {
- OPAL_REINIT_PCI_DEV = 1000
-};
-
-enum OpalPciResetState {
- OPAL_DEASSERT_RESET = 0,
- OPAL_ASSERT_RESET = 1
-};
-
-enum OpalPciMaskAction {
- OPAL_UNMASK_ERROR_TYPE = 0,
- OPAL_MASK_ERROR_TYPE = 1
-};
-
-enum OpalSlotLedType {
- OPAL_SLOT_LED_ID_TYPE = 0,
- OPAL_SLOT_LED_FAULT_TYPE = 1
-};
-
-enum OpalLedAction {
- OPAL_TURN_OFF_LED = 0,
- OPAL_TURN_ON_LED = 1,
- OPAL_QUERY_LED_STATE_AFTER_BUSY = 2
-};
-
-enum OpalEpowStatus {
- OPAL_EPOW_NONE = 0,
- OPAL_EPOW_UPS = 1,
- OPAL_EPOW_OVER_AMBIENT_TEMP = 2,
- OPAL_EPOW_OVER_INTERNAL_TEMP = 3
-};
-
-/*
- * Address cycle types for LPC accesses. These also correspond
- * to the content of the first cell of the "reg" property for
- * device nodes on the LPC bus
- */
-enum OpalLPCAddressType {
- OPAL_LPC_MEM = 0,
- OPAL_LPC_IO = 1,
- OPAL_LPC_FW = 2,
-};
-
-/* System parameter permission */
-enum OpalSysparamPerm {
- OPAL_SYSPARAM_READ = 0x1,
- OPAL_SYSPARAM_WRITE = 0x2,
- OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
-};
-
-struct opal_msg {
- __be32 msg_type;
- __be32 reserved;
- __be64 params[8];
-};
-
-enum {
- OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
-};
-
-struct opal_ipmi_msg {
- uint8_t version;
- uint8_t netfn;
- uint8_t cmd;
- uint8_t data[];
-};
-
-/* FSP memory errors handling */
-enum OpalMemErr_Version {
- OpalMemErr_V1 = 1,
-};
-
-enum OpalMemErrType {
- OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
- OPAL_MEM_ERR_TYPE_DYN_DALLOC,
- OPAL_MEM_ERR_TYPE_SCRUB,
-};
-
-/* Memory Reilience error type */
-enum OpalMemErr_ResilErrType {
- OPAL_MEM_RESILIENCE_CE = 0,
- OPAL_MEM_RESILIENCE_UE,
- OPAL_MEM_RESILIENCE_UE_SCRUB,
-};
-
-/* Dynamic Memory Deallocation type */
-enum OpalMemErr_DynErrType {
- OPAL_MEM_DYNAMIC_DEALLOC = 0,
-};
-
-/* OpalMemoryErrorData->flags */
-#define OPAL_MEM_CORRECTED_ERROR 0x0001
-#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
-#define OPAL_MEM_ACK_REQUIRED 0x8000
-
-struct OpalMemoryErrorData {
- enum OpalMemErr_Version version:8; /* 0x00 */
- enum OpalMemErrType type:8; /* 0x01 */
- __be16 flags; /* 0x02 */
- uint8_t reserved_1[4]; /* 0x04 */
-
- union {
- /* Memory Resilience corrected/uncorrected error info */
- struct {
- enum OpalMemErr_ResilErrType resil_err_type:8;
- uint8_t reserved_1[7];
- __be64 physical_address_start;
- __be64 physical_address_end;
- } resilience;
- /* Dynamic memory deallocation error info */
- struct {
- enum OpalMemErr_DynErrType dyn_err_type:8;
- uint8_t reserved_1[7];
- __be64 physical_address_start;
- __be64 physical_address_end;
- } dyn_dealloc;
- } u;
-};
-
-/* HMI interrupt event */
-enum OpalHMI_Version {
- OpalHMIEvt_V1 = 1,
-};
-
-enum OpalHMI_Severity {
- OpalHMI_SEV_NO_ERROR = 0,
- OpalHMI_SEV_WARNING = 1,
- OpalHMI_SEV_ERROR_SYNC = 2,
- OpalHMI_SEV_FATAL = 3,
-};
-
-enum OpalHMI_Disposition {
- OpalHMI_DISPOSITION_RECOVERED = 0,
- OpalHMI_DISPOSITION_NOT_RECOVERED = 1,
-};
-
-enum OpalHMI_ErrType {
- OpalHMI_ERROR_MALFUNC_ALERT = 0,
- OpalHMI_ERROR_PROC_RECOV_DONE,
- OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN,
- OpalHMI_ERROR_PROC_RECOV_MASKED,
- OpalHMI_ERROR_TFAC,
- OpalHMI_ERROR_TFMR_PARITY,
- OpalHMI_ERROR_HA_OVERFLOW_WARN,
- OpalHMI_ERROR_XSCOM_FAIL,
- OpalHMI_ERROR_XSCOM_DONE,
- OpalHMI_ERROR_SCOM_FIR,
- OpalHMI_ERROR_DEBUG_TRIG_FIR,
- OpalHMI_ERROR_HYP_RESOURCE,
-};
-
-struct OpalHMIEvent {
- uint8_t version; /* 0x00 */
- uint8_t severity; /* 0x01 */
- uint8_t type; /* 0x02 */
- uint8_t disposition; /* 0x03 */
- uint8_t reserved_1[4]; /* 0x04 */
-
- __be64 hmer;
- /* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
- __be64 tfmr;
-};
-
-enum {
- OPAL_P7IOC_DIAG_TYPE_NONE = 0,
- OPAL_P7IOC_DIAG_TYPE_RGC = 1,
- OPAL_P7IOC_DIAG_TYPE_BI = 2,
- OPAL_P7IOC_DIAG_TYPE_CI = 3,
- OPAL_P7IOC_DIAG_TYPE_MISC = 4,
- OPAL_P7IOC_DIAG_TYPE_I2C = 5,
- OPAL_P7IOC_DIAG_TYPE_LAST = 6
-};
-
-struct OpalIoP7IOCErrorData {
- __be16 type;
-
- /* GEM */
- __be64 gemXfir;
- __be64 gemRfir;
- __be64 gemRirqfir;
- __be64 gemMask;
- __be64 gemRwof;
-
- /* LEM */
- __be64 lemFir;
- __be64 lemErrMask;
- __be64 lemAction0;
- __be64 lemAction1;
- __be64 lemWof;
-
- union {
- struct OpalIoP7IOCRgcErrorData {
- __be64 rgcStatus; /* 3E1C10 */
- __be64 rgcLdcp; /* 3E1C18 */
- }rgc;
- struct OpalIoP7IOCBiErrorData {
- __be64 biLdcp0; /* 3C0100, 3C0118 */
- __be64 biLdcp1; /* 3C0108, 3C0120 */
- __be64 biLdcp2; /* 3C0110, 3C0128 */
- __be64 biFenceStatus; /* 3C0130, 3C0130 */
-
- u8 biDownbound; /* BI Downbound or Upbound */
- }bi;
- struct OpalIoP7IOCCiErrorData {
- __be64 ciPortStatus; /* 3Dn008 */
- __be64 ciPortLdcp; /* 3Dn010 */
-
- u8 ciPort; /* Index of CI port: 0/1 */
- }ci;
- };
-};
-
-/**
- * This structure defines the overlay which will be used to store PHB error
- * data upon request.
- */
-enum {
- OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
-};
-
-enum {
- OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
- OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
-};
-
-enum {
- OPAL_P7IOC_NUM_PEST_REGS = 128,
- OPAL_PHB3_NUM_PEST_REGS = 256
-};
-
-/* CAPI modes for PHB */
-enum {
- OPAL_PHB_CAPI_MODE_PCIE = 0,
- OPAL_PHB_CAPI_MODE_CAPI = 1,
- OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
- OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
-};
-
-struct OpalIoPhbErrorCommon {
- __be32 version;
- __be32 ioType;
- __be32 len;
-};
-
-struct OpalIoP7IOCPhbErrorData {
- struct OpalIoPhbErrorCommon common;
-
- __be32 brdgCtl;
-
- // P7IOC utl regs
- __be32 portStatusReg;
- __be32 rootCmplxStatus;
- __be32 busAgentStatus;
-
- // P7IOC cfg regs
- __be32 deviceStatus;
- __be32 slotStatus;
- __be32 linkStatus;
- __be32 devCmdStatus;
- __be32 devSecStatus;
-
- // cfg AER regs
- __be32 rootErrorStatus;
- __be32 uncorrErrorStatus;
- __be32 corrErrorStatus;
- __be32 tlpHdr1;
- __be32 tlpHdr2;
- __be32 tlpHdr3;
- __be32 tlpHdr4;
- __be32 sourceId;
-
- __be32 rsv3;
-
- // Record data about the call to allocate a buffer.
- __be64 errorClass;
- __be64 correlator;
-
- //P7IOC MMIO Error Regs
- __be64 p7iocPlssr; // n120
- __be64 p7iocCsr; // n110
- __be64 lemFir; // nC00
- __be64 lemErrorMask; // nC18
- __be64 lemWOF; // nC40
- __be64 phbErrorStatus; // nC80
- __be64 phbFirstErrorStatus; // nC88
- __be64 phbErrorLog0; // nCC0
- __be64 phbErrorLog1; // nCC8
- __be64 mmioErrorStatus; // nD00
- __be64 mmioFirstErrorStatus; // nD08
- __be64 mmioErrorLog0; // nD40
- __be64 mmioErrorLog1; // nD48
- __be64 dma0ErrorStatus; // nD80
- __be64 dma0FirstErrorStatus; // nD88
- __be64 dma0ErrorLog0; // nDC0
- __be64 dma0ErrorLog1; // nDC8
- __be64 dma1ErrorStatus; // nE00
- __be64 dma1FirstErrorStatus; // nE08
- __be64 dma1ErrorLog0; // nE40
- __be64 dma1ErrorLog1; // nE48
- __be64 pestA[OPAL_P7IOC_NUM_PEST_REGS];
- __be64 pestB[OPAL_P7IOC_NUM_PEST_REGS];
-};
-
-struct OpalIoPhb3ErrorData {
- struct OpalIoPhbErrorCommon common;
-
- __be32 brdgCtl;
-
- /* PHB3 UTL regs */
- __be32 portStatusReg;
- __be32 rootCmplxStatus;
- __be32 busAgentStatus;
-
- /* PHB3 cfg regs */
- __be32 deviceStatus;
- __be32 slotStatus;
- __be32 linkStatus;
- __be32 devCmdStatus;
- __be32 devSecStatus;
-
- /* cfg AER regs */
- __be32 rootErrorStatus;
- __be32 uncorrErrorStatus;
- __be32 corrErrorStatus;
- __be32 tlpHdr1;
- __be32 tlpHdr2;
- __be32 tlpHdr3;
- __be32 tlpHdr4;
- __be32 sourceId;
-
- __be32 rsv3;
-
- /* Record data about the call to allocate a buffer */
- __be64 errorClass;
- __be64 correlator;
-
- __be64 nFir; /* 000 */
- __be64 nFirMask; /* 003 */
- __be64 nFirWOF; /* 008 */
-
- /* PHB3 MMIO Error Regs */
- __be64 phbPlssr; /* 120 */
- __be64 phbCsr; /* 110 */
- __be64 lemFir; /* C00 */
- __be64 lemErrorMask; /* C18 */
- __be64 lemWOF; /* C40 */
- __be64 phbErrorStatus; /* C80 */
- __be64 phbFirstErrorStatus; /* C88 */
- __be64 phbErrorLog0; /* CC0 */
- __be64 phbErrorLog1; /* CC8 */
- __be64 mmioErrorStatus; /* D00 */
- __be64 mmioFirstErrorStatus; /* D08 */
- __be64 mmioErrorLog0; /* D40 */
- __be64 mmioErrorLog1; /* D48 */
- __be64 dma0ErrorStatus; /* D80 */
- __be64 dma0FirstErrorStatus; /* D88 */
- __be64 dma0ErrorLog0; /* DC0 */
- __be64 dma0ErrorLog1; /* DC8 */
- __be64 dma1ErrorStatus; /* E00 */
- __be64 dma1FirstErrorStatus; /* E08 */
- __be64 dma1ErrorLog0; /* E40 */
- __be64 dma1ErrorLog1; /* E48 */
- __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
- __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
-};
-
-enum {
- OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
- OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
-};
-
-typedef struct oppanel_line {
- const char * line;
- uint64_t line_len;
-} oppanel_line_t;
-
-/* OPAL I2C request */
-struct opal_i2c_request {
- uint8_t type;
-#define OPAL_I2C_RAW_READ 0
-#define OPAL_I2C_RAW_WRITE 1
-#define OPAL_I2C_SM_READ 2
-#define OPAL_I2C_SM_WRITE 3
- uint8_t flags;
-#define OPAL_I2C_ADDR_10 0x01 /* Not supported yet */
- uint8_t subaddr_sz; /* Max 4 */
- uint8_t reserved;
- __be16 addr; /* 7 or 10 bit address */
- __be16 reserved2;
- __be32 subaddr; /* Sub-address if any */
- __be32 size; /* Data size */
- __be64 buffer_ra; /* Buffer real address */
-};
+/* We calculate number of sg entries based on PAGE_SIZE */
+#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
/* /sys/firmware/opal */
extern struct kobject *opal_kobj;
@@ -932,6 +194,13 @@ int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
struct opal_i2c_request *oreq);
+int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
+ uint64_t size, uint64_t token);
+int64_t opal_flash_write(uint64_t id, uint64_t offset, uint64_t buf,
+ uint64_t size, uint64_t token);
+int64_t opal_flash_erase(uint64_t id, uint64_t offset, uint64_t size,
+ uint64_t token);
+
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
@@ -946,8 +215,10 @@ extern void hvc_opal_init_early(void);
extern int opal_notifier_register(struct notifier_block *nb);
extern int opal_notifier_unregister(struct notifier_block *nb);
-extern int opal_message_notifier_register(enum OpalMessageType msg_type,
+extern int opal_message_notifier_register(enum opal_msg_type msg_type,
struct notifier_block *nb);
+extern int opal_message_notifier_unregister(enum opal_msg_type msg_type,
+ struct notifier_block *nb);
extern void opal_notifier_enable(void);
extern void opal_notifier_disable(void);
extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
@@ -962,7 +233,7 @@ extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
struct rtc_time;
extern unsigned long opal_get_boot_time(void);
extern void opal_nvram_init(void);
-extern void opal_flash_init(void);
+extern void opal_flash_update_init(void);
extern void opal_flash_term_callback(void);
extern int opal_elog_init(void);
extern void opal_platform_dump_init(void);
@@ -983,13 +254,8 @@ struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
unsigned long vmalloc_size);
void opal_free_sg_list(struct opal_sg_list *sg);
-/*
- * Dump region ID range usable by the OS
- */
-#define OPAL_DUMP_REGION_HOST_START 0x80
-#define OPAL_DUMP_REGION_LOG_BUF 0x80
-#define OPAL_DUMP_REGION_HOST_END 0xFF
+extern int opal_error_code(int rc);
#endif /* __ASSEMBLY__ */
-#endif /* __OPAL_H */
+#endif /* _ASM_POWERPC_OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e5f22c6c4bf9..70bd4381f8e6 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -106,9 +106,9 @@ struct paca_struct {
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC_BOOK3E
- u64 exgen[8] __attribute__((aligned(0x80)));
+ u64 exgen[8] __aligned(0x40);
/* Keep pgd in the same cacheline as the start of extlb */
- pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
+ pgd_t *pgd __aligned(0x40); /* Current PGD */
pgd_t *kernel_pgd; /* Kernel PGD */
/* Shared by all threads of a core -- points to tcd of first thread */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 546d036fe925..1811c44bf34b 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -15,6 +15,24 @@
struct device_node;
/*
+ * PCI controller operations
+ */
+struct pci_controller_ops {
+ void (*dma_dev_setup)(struct pci_dev *dev);
+ void (*dma_bus_setup)(struct pci_bus *bus);
+
+ int (*probe_mode)(struct pci_bus *);
+
+ /* Called when pci_enable_device() is called. Returns true to
+ * allow assignment/enabling of the device. */
+ bool (*enable_device_hook)(struct pci_dev *);
+
+ /* Called during PCI resource reassignment */
+ resource_size_t (*window_alignment)(struct pci_bus *, unsigned long type);
+ void (*reset_secondary_bus)(struct pci_dev *dev);
+};
+
+/*
* Structure of a PCI controller (host bridge)
*/
struct pci_controller {
@@ -46,6 +64,7 @@ struct pci_controller {
resource_size_t isa_mem_phys;
resource_size_t isa_mem_size;
+ struct pci_controller_ops controller_ops;
struct pci_ops *ops;
unsigned int __iomem *cfg_addr;
void __iomem *cfg_data;
@@ -89,6 +108,7 @@ struct pci_controller {
#ifdef CONFIG_PPC64
unsigned long buid;
+ struct pci_dn *pci_data;
#endif /* CONFIG_PPC64 */
void *private_data;
@@ -154,31 +174,51 @@ static inline int isa_vaddr_is_ioport(void __iomem *address)
struct iommu_table;
struct pci_dn {
+ int flags;
+#define PCI_DN_FLAG_IOV_VF 0x01
+
int busno; /* pci bus number */
int devfn; /* pci device and function number */
+ int vendor_id; /* Vendor ID */
+ int device_id; /* Device ID */
+ int class_code; /* Device class code */
+ struct pci_dn *parent;
struct pci_controller *phb; /* for pci devices */
struct iommu_table *iommu_table; /* for phb's or bridges */
struct device_node *node; /* back-pointer to the device_node */
int pci_ext_config_space; /* for pci devices */
- struct pci_dev *pcidev; /* back-pointer to the pci device */
#ifdef CONFIG_EEH
struct eeh_dev *edev; /* eeh device */
#endif
#define IODA_INVALID_PE (-1)
#ifdef CONFIG_PPC_POWERNV
int pe_number;
+#ifdef CONFIG_PCI_IOV
+ u16 vfs_expanded; /* number of VFs IOV BAR expanded */
+ u16 num_vfs; /* number of VFs enabled*/
+ int offset; /* PE# for the first VF PE */
+#define M64_PER_IOV 4
+ int m64_per_iov;
+#define IODA_INVALID_M64 (-1)
+ int m64_wins[PCI_SRIOV_NUM_BARS][M64_PER_IOV];
+#endif /* CONFIG_PCI_IOV */
#endif
+ struct list_head child_list;
+ struct list_head list;
};
/* Get the pointer to a device_node's pci_dn */
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
+extern struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
+ int devfn);
extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
-
-extern void * update_dn_pci_info(struct device_node *dn, void *data);
+extern struct pci_dn *add_dev_pci_data(struct pci_dev *pdev);
+extern void remove_dev_pci_data(struct pci_dev *pdev);
+extern void *update_dn_pci_info(struct device_node *dn, void *data);
static inline int pci_device_from_OF_node(struct device_node *np,
u8 *bus, u8 *devfn)
@@ -191,20 +231,12 @@ static inline int pci_device_from_OF_node(struct device_node *np,
}
#if defined(CONFIG_EEH)
-static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
+static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn)
{
- /*
- * For those OF nodes whose parent isn't PCI bridge, they
- * don't have PCI_DN actually. So we have to skip them for
- * any EEH operations.
- */
- if (!dn || !PCI_DN(dn))
- return NULL;
-
- return PCI_DN(dn)->edev;
+ return pdn ? pdn->edev : NULL;
}
#else
-#define of_node_to_eeh_dev(x) (NULL)
+#define pdn_to_eeh_dev(x) (NULL)
#endif
/** Find the bus corresponding to the indicated device node */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 1b0739bc14b5..4aef8d660999 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -22,7 +22,7 @@
#include <asm-generic/pci-dma-compat.h>
-/* Return values for ppc_md.pci_probe_mode function */
+/* Return values for pci_controller_ops.probe_mode function */
#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 9835ac4173b7..11a38635dd65 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -247,28 +247,16 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#define pmd_large(pmd) 0
#define has_transparent_hugepage() 0
#endif
-pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
+pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
unsigned *shift);
-
-static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
- unsigned long *pte_sizep)
+static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
+ unsigned *shift)
{
- pte_t *ptep;
- unsigned long ps = *pte_sizep;
- unsigned int shift;
-
- ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
- if (!ptep)
- return NULL;
- if (shift)
- *pte_sizep = 1ul << shift;
- else
- *pte_sizep = PAGE_SIZE;
-
- if (ps > *pte_sizep)
- return NULL;
-
- return ptep;
+ if (!arch_irqs_disabled()) {
+ pr_info("%s called with irq enabled\n", __func__);
+ dump_stack();
+ }
+ return __find_linux_pte_or_hugepte(pgdir, ea, shift);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 4cbe23af400a..5c93f691b495 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -213,6 +213,8 @@
#define PPC_INST_LWZ 0x80000000
#define PPC_INST_STD 0xf8000000
#define PPC_INST_STDU 0xf8000001
+#define PPC_INST_STW 0x90000000
+#define PPC_INST_STWU 0x94000000
#define PPC_INST_MFLR 0x7c0802a6
#define PPC_INST_MTLR 0x7c0803a6
#define PPC_INST_CMPWI 0x2c000000
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index db1e2b8eff3c..4122a86d6858 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -23,8 +23,6 @@ extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
extern struct list_head hose_list;
-extern void find_and_init_phbs(void);
-
extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
@@ -33,9 +31,14 @@ extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
/* PCI device_node operations */
struct device_node;
+struct pci_dn;
+
typedef void *(*traverse_func)(struct device_node *me, void *data);
void *traverse_pci_devices(struct device_node *start, traverse_func pre,
void *data);
+void *traverse_pci_dn(struct pci_dn *root,
+ void *(*fn)(struct pci_dn *, void *),
+ void *data);
extern void pci_devs_phb_init(void);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
@@ -76,7 +79,6 @@ static inline const char *eeh_driver_name(struct pci_dev *pdev)
#endif /* CONFIG_EEH */
#else /* CONFIG_PCI */
-static inline void find_and_init_phbs(void) { }
static inline void init_pci_config_tokens(void) { }
#endif /* !CONFIG_PCI */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 7e4612528546..dd0fc18d8103 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -637,105 +637,105 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
/* AltiVec Registers (VPRs) */
-#define vr0 0
-#define vr1 1
-#define vr2 2
-#define vr3 3
-#define vr4 4
-#define vr5 5
-#define vr6 6
-#define vr7 7
-#define vr8 8
-#define vr9 9
-#define vr10 10
-#define vr11 11
-#define vr12 12
-#define vr13 13
-#define vr14 14
-#define vr15 15
-#define vr16 16
-#define vr17 17
-#define vr18 18
-#define vr19 19
-#define vr20 20
-#define vr21 21
-#define vr22 22
-#define vr23 23
-#define vr24 24
-#define vr25 25
-#define vr26 26
-#define vr27 27
-#define vr28 28
-#define vr29 29
-#define vr30 30
-#define vr31 31
+#define v0 0
+#define v1 1
+#define v2 2
+#define v3 3
+#define v4 4
+#define v5 5
+#define v6 6
+#define v7 7
+#define v8 8
+#define v9 9
+#define v10 10
+#define v11 11
+#define v12 12
+#define v13 13
+#define v14 14
+#define v15 15
+#define v16 16
+#define v17 17
+#define v18 18
+#define v19 19
+#define v20 20
+#define v21 21
+#define v22 22
+#define v23 23
+#define v24 24
+#define v25 25
+#define v26 26
+#define v27 27
+#define v28 28
+#define v29 29
+#define v30 30
+#define v31 31
/* VSX Registers (VSRs) */
-#define vsr0 0
-#define vsr1 1
-#define vsr2 2
-#define vsr3 3
-#define vsr4 4
-#define vsr5 5
-#define vsr6 6
-#define vsr7 7
-#define vsr8 8
-#define vsr9 9
-#define vsr10 10
-#define vsr11 11
-#define vsr12 12
-#define vsr13 13
-#define vsr14 14
-#define vsr15 15
-#define vsr16 16
-#define vsr17 17
-#define vsr18 18
-#define vsr19 19
-#define vsr20 20
-#define vsr21 21
-#define vsr22 22
-#define vsr23 23
-#define vsr24 24
-#define vsr25 25
-#define vsr26 26
-#define vsr27 27
-#define vsr28 28
-#define vsr29 29
-#define vsr30 30
-#define vsr31 31
-#define vsr32 32
-#define vsr33 33
-#define vsr34 34
-#define vsr35 35
-#define vsr36 36
-#define vsr37 37
-#define vsr38 38
-#define vsr39 39
-#define vsr40 40
-#define vsr41 41
-#define vsr42 42
-#define vsr43 43
-#define vsr44 44
-#define vsr45 45
-#define vsr46 46
-#define vsr47 47
-#define vsr48 48
-#define vsr49 49
-#define vsr50 50
-#define vsr51 51
-#define vsr52 52
-#define vsr53 53
-#define vsr54 54
-#define vsr55 55
-#define vsr56 56
-#define vsr57 57
-#define vsr58 58
-#define vsr59 59
-#define vsr60 60
-#define vsr61 61
-#define vsr62 62
-#define vsr63 63
+#define vs0 0
+#define vs1 1
+#define vs2 2
+#define vs3 3
+#define vs4 4
+#define vs5 5
+#define vs6 6
+#define vs7 7
+#define vs8 8
+#define vs9 9
+#define vs10 10
+#define vs11 11
+#define vs12 12
+#define vs13 13
+#define vs14 14
+#define vs15 15
+#define vs16 16
+#define vs17 17
+#define vs18 18
+#define vs19 19
+#define vs20 20
+#define vs21 21
+#define vs22 22
+#define vs23 23
+#define vs24 24
+#define vs25 25
+#define vs26 26
+#define vs27 27
+#define vs28 28
+#define vs29 29
+#define vs30 30
+#define vs31 31
+#define vs32 32
+#define vs33 33
+#define vs34 34
+#define vs35 35
+#define vs36 36
+#define vs37 37
+#define vs38 38
+#define vs39 39
+#define vs40 40
+#define vs41 41
+#define vs42 42
+#define vs43 43
+#define vs44 44
+#define vs45 45
+#define vs46 46
+#define vs47 47
+#define vs48 48
+#define vs49 49
+#define vs50 50
+#define vs51 51
+#define vs52 52
+#define vs53 53
+#define vs54 54
+#define vs55 55
+#define vs56 56
+#define vs57 57
+#define vs58 58
+#define vs59 59
+#define vs60 60
+#define vs61 61
+#define vs62 62
+#define vs63 63
/* SPE Registers (EVPRs) */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 2e23e92a4372..7a4ede16b283 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -4,6 +4,7 @@
#include <linux/spinlock.h>
#include <asm/page.h>
+#include <linux/time.h>
/*
* Definitions for talking to the RTAS on CHRP machines.
@@ -273,6 +274,7 @@ inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
+#define PSERIES_ELOG_SECT_ID_HOTPLUG (('H' << 8) | 'P')
/* Vendor specific Platform Event Log Format, Version 6, section header */
struct pseries_errorlog {
@@ -296,6 +298,31 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
return be16_to_cpu(sect->length);
}
+/* RTAS pseries hotplug errorlog section */
+struct pseries_hp_errorlog {
+ u8 resource;
+ u8 action;
+ u8 id_type;
+ u8 reserved;
+ union {
+ __be32 drc_index;
+ __be32 drc_count;
+ char drc_name[1];
+ } _drc_u;
+};
+
+#define PSERIES_HP_ELOG_RESOURCE_CPU 1
+#define PSERIES_HP_ELOG_RESOURCE_MEM 2
+#define PSERIES_HP_ELOG_RESOURCE_SLOT 3
+#define PSERIES_HP_ELOG_RESOURCE_PHB 4
+
+#define PSERIES_HP_ELOG_ACTION_ADD 1
+#define PSERIES_HP_ELOG_ACTION_REMOVE 2
+
+#define PSERIES_HP_ELOG_ID_DRC_NAME 1
+#define PSERIES_HP_ELOG_ID_DRC_INDEX 2
+#define PSERIES_HP_ELOG_ID_DRC_COUNT 3
+
struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
uint16_t section_id);
@@ -327,7 +354,7 @@ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
extern int rtas_online_cpus_mask(cpumask_var_t cpus);
extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
-extern int rtas_ibm_suspend_me(u64 handle, int *vasi_return);
+extern int rtas_ibm_suspend_me(u64 handle);
struct rtc_time;
extern unsigned long rtas_get_boot_time(void);
@@ -343,8 +370,12 @@ extern int early_init_dt_scan_rtas(unsigned long node,
extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
#ifdef CONFIG_PPC_PSERIES
+extern time64_t last_rtas_event;
+extern int clobbering_unread_rtas_event(void);
extern int pseries_devicetree_update(s32 scope);
extern void post_mobility_fixup(void);
+#else
+static inline int clobbering_unread_rtas_event(void) { return 0; }
#endif
#ifdef CONFIG_PPC_RTAS_DAEMON
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
new file mode 100644
index 000000000000..c1818e35cf02
--- /dev/null
+++ b/arch/powerpc/include/asm/seccomp.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_POWERPC_SECCOMP_H
+#define _ASM_POWERPC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+
+#include <asm-generic/seccomp.h>
+
+#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index fbdf18cf954c..e9d384cbd021 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -7,7 +7,6 @@
extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
-extern int mem_init_done; /* set on boot once kmalloc can be called */
extern unsigned long long memory_limit;
extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d607df5081a7..825663c30945 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -42,7 +42,7 @@ struct smp_ops_t {
#ifdef CONFIG_PPC_SMP_MUXED_IPI
void (*cause_ipi)(int cpu, unsigned long data);
#endif
- int (*probe)(void);
+ void (*probe)(void);
int (*kick_cpu)(int nr);
void (*setup_cpu)(int nr);
void (*bringup_done)(void);
@@ -125,7 +125,6 @@ extern irqreturn_t smp_ipi_demux(void);
void smp_init_pSeries(void);
void smp_init_cell(void);
-void smp_init_celleb(void);
void smp_setup_cpu_maps(void);
extern int __cpu_disable(void);
@@ -175,7 +174,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
extern int smt_enabled_at_boot;
-extern int smp_mpic_probe(void);
+extern void smp_mpic_probe(void);
extern void smp_mpic_setup_cpu(int cpu);
extern int smp_generic_kick_cpu(int nr);
extern int smp_generic_cpu_bootable(unsigned int nr);
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index 6e909f3e6a46..37d2da6feabf 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -478,7 +478,7 @@ extern unsigned long smu_cmdbuf_abs;
/*
- * Kenrel asynchronous i2c interface
+ * Kernel asynchronous i2c interface
*/
#define SMU_I2C_READ_MAX 0x1d
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h
index 96f59de61855..487e09077a3e 100644
--- a/arch/powerpc/include/asm/swab.h
+++ b/arch/powerpc/include/asm/swab.h
@@ -9,30 +9,4 @@
#include <uapi/asm/swab.h>
-static __inline__ __u16 ld_le16(const volatile __u16 *addr)
-{
- __u16 val;
-
- __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
- return val;
-}
-
-static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
-{
- __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-static __inline__ __u32 ld_le32(const volatile __u32 *addr)
-{
- __u32 val;
-
- __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
- return val;
-}
-
-static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
-{
- __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
#endif /* _ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 91062eef582f..f1863a138b4a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -367,3 +367,4 @@ SYSCALL_SPU(getrandom)
SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf)
COMPAT_SYS(execveat)
+PPC64ONLY(switch_endian)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 72489799cf02..7efee4a3240b 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -39,7 +39,6 @@
*/
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
@@ -55,7 +54,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.flags = 0, \
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 03cbada59d3a..10fc784a2ad4 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -211,5 +211,8 @@ extern void secondary_cpu_time_init(void);
DECLARE_PER_CPU(u64, decrementers_next_tb);
+/* Convert timebase ticks to nanoseconds */
+unsigned long long tb_to_ns(unsigned long long tb_ticks);
+
#endif /* __KERNEL__ */
#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h
index c44131e68e11..233ef5fe5fde 100644
--- a/arch/powerpc/include/asm/ucc_slow.h
+++ b/arch/powerpc/include/asm/ucc_slow.h
@@ -251,19 +251,6 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode);
*/
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode);
-/* ucc_slow_poll_transmitter_now
- * Immediately forces a poll of the transmitter for data to be sent.
- * Typically, the hardware performs a periodic poll for data that the
- * transmit routine has set up to be transmitted. In cases where
- * this polling cycle is not soon enough, this optional routine can
- * be invoked to force a poll right away, instead. Proper use for
- * each transmission for which this functionality is desired is to
- * call the transmit routine and then this routine right after.
- *
- * uccs - (In) pointer to the slow UCC structure.
- */
-void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs);
-
/* ucc_slow_graceful_stop_tx
* Smoothly stops transmission on a specified slow UCC.
*
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 36b79c31eedd..f4f8b667d75b 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 363
+#define __NR_syscalls 364
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h
index e5f8dd366212..ab3acd2f2786 100644
--- a/arch/powerpc/include/asm/vga.h
+++ b/arch/powerpc/include/asm/vga.h
@@ -25,12 +25,12 @@
static inline void scr_writew(u16 val, volatile u16 *addr)
{
- st_le16(addr, val);
+ *addr = cpu_to_le16(val);
}
static inline u16 scr_readw(volatile const u16 *addr)
{
- return ld_le16(addr);
+ return le16_to_cpu(*addr);
}
#define VT_BUF_HAVE_MEMCPYW
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 6997f4a271df..0e25bdb190bb 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -146,7 +146,7 @@ extern void xics_update_irq_servers(void);
extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
extern void xics_mask_unknown_vec(unsigned int vec);
extern irqreturn_t xics_ipi_dispatch(int cpu);
-extern int xics_smp_probe(void);
+extern void xics_smp_probe(void);
extern void xics_register_ics(struct ics *ics);
extern void xics_teardown_cpu(void);
extern void xics_kexec_teardown_cpu(int secondary);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index 7a3f795ac218..79c4068be278 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -25,7 +25,6 @@ header-y += posix_types.h
header-y += ps3fb.h
header-y += ptrace.h
header-y += resource.h
-header-y += seccomp.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h
index 77d2ed35b111..8036b385417d 100644
--- a/arch/powerpc/include/uapi/asm/ptrace.h
+++ b/arch/powerpc/include/uapi/asm/ptrace.h
@@ -136,7 +136,7 @@ struct pt_regs {
#endif /* __powerpc64__ */
/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * Get/set all the altivec registers v0..v31, vscr, vrsave, in one go.
* The transfer totals 34 quadword. Quadwords 0-31 contain the
* corresponding vector registers. Quadword 32 contains the vscr as the
* last word (offset 12) within that quadword. Quadword 33 contains the
diff --git a/arch/powerpc/include/uapi/asm/seccomp.h b/arch/powerpc/include/uapi/asm/seccomp.h
deleted file mode 100644
index 00c1d9133cfe..000000000000
--- a/arch/powerpc/include/uapi/asm/seccomp.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _ASM_POWERPC_SECCOMP_H
-#define _ASM_POWERPC_SECCOMP_H
-
-#include <linux/unistd.h>
-
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#define __NR_seccomp_read_32 __NR_read
-#define __NR_seccomp_write_32 __NR_write
-#define __NR_seccomp_exit_32 __NR_exit
-#define __NR_seccomp_sigreturn_32 __NR_sigreturn
-
-#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index ef5b5b1f3123..e4aa173dae62 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -385,5 +385,6 @@
#define __NR_memfd_create 360
#define __NR_bpf 361
#define __NR_execveat 362
+#define __NR_switch_endian 363
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 502cf69b6c89..c1ebbdaac28f 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -33,7 +33,8 @@ obj-y := cputable.o ptrace.o syscalls.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o dma.o \
- misc_$(CONFIG_WORD_SIZE).o vdso32/
+ misc_$(CONFIG_WORD_SIZE).o vdso32/ \
+ of_platform.o prom_parse.o
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
@@ -47,7 +48,6 @@ obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
-obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 4717859fdd04..0034b6b3556a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -37,6 +37,7 @@
#include <asm/thread_info.h>
#include <asm/rtas.h>
#include <asm/vdso_datapage.h>
+#include <asm/dbell.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
@@ -459,6 +460,19 @@ int main(void)
DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
#endif
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ DEFINE(VCPU_TB_RMENTRY, offsetof(struct kvm_vcpu, arch.rm_entry));
+ DEFINE(VCPU_TB_RMINTR, offsetof(struct kvm_vcpu, arch.rm_intr));
+ DEFINE(VCPU_TB_RMEXIT, offsetof(struct kvm_vcpu, arch.rm_exit));
+ DEFINE(VCPU_TB_GUEST, offsetof(struct kvm_vcpu, arch.guest_time));
+ DEFINE(VCPU_TB_CEDE, offsetof(struct kvm_vcpu, arch.cede_time));
+ DEFINE(VCPU_CUR_ACTIVITY, offsetof(struct kvm_vcpu, arch.cur_activity));
+ DEFINE(VCPU_ACTIVITY_START, offsetof(struct kvm_vcpu, arch.cur_tb_start));
+ DEFINE(TAS_SEQCOUNT, offsetof(struct kvmhv_tb_accumulator, seqcount));
+ DEFINE(TAS_TOTAL, offsetof(struct kvmhv_tb_accumulator, tb_total));
+ DEFINE(TAS_MIN, offsetof(struct kvmhv_tb_accumulator, tb_min));
+ DEFINE(TAS_MAX, offsetof(struct kvmhv_tb_accumulator, tb_max));
+#endif
DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3));
DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
@@ -492,7 +506,6 @@ int main(void)
DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
- DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
@@ -550,8 +563,7 @@ int main(void)
DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
- DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
- DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
+ DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
@@ -748,5 +760,7 @@ int main(void)
offsetof(struct paca_struct, subcore_sibling_mask));
#endif
+ DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
+
return 0;
}
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index ae77b7e59889..c641983bbdd6 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -61,12 +61,22 @@ struct cache_type_info {
};
/* These are used to index the cache_type_info array. */
-#define CACHE_TYPE_UNIFIED 0
-#define CACHE_TYPE_INSTRUCTION 1
-#define CACHE_TYPE_DATA 2
+#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
+#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
+#define CACHE_TYPE_INSTRUCTION 2
+#define CACHE_TYPE_DATA 3
static const struct cache_type_info cache_type_info[] = {
{
+ /* Embedded systems that use cache-size, cache-block-size,
+ * etc. for the Unified (typically L2) cache. */
+ .name = "Unified",
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ },
+ {
/* PowerPC Processor binding says the [di]-cache-*
* must be equal on unified caches, so just use
* d-cache properties. */
@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
{
struct cache *iter;
- if (cache->type == CACHE_TYPE_UNIFIED)
+ if (cache->type == CACHE_TYPE_UNIFIED ||
+ cache->type == CACHE_TYPE_UNIFIED_D)
return cache;
list_for_each_entry(iter, &cache_list, list)
@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
return of_get_property(np, "cache-unified", NULL);
}
-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
- int level)
+/*
+ * Unified caches can have two different sets of tags. Most embedded
+ * use cache-size, etc. for the unified cache size, but open firmware systems
+ * use d-cache-size, etc. Check on initialization for which type we have, and
+ * return the appropriate structure type. Assume it's embedded if it isn't
+ * open firmware. If it's yet a 3rd type, then there will be missing entries
+ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
+ * to be extended further.
+ */
+static int cache_is_unified_d(const struct device_node *np)
{
- struct cache *cache;
+ return of_get_property(np,
+ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
+ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
+}
+/*
+ */
+static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
+{
pr_debug("creating L%d ucache for %s\n", level, node->full_name);
- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
-
- return cache;
+ return new_cache(cache_is_unified_d(node), level, node);
}
static struct cache *cache_do_one_devnode_split(struct device_node *node,
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 46733535cc0b..9c9b7411b28b 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -137,15 +137,11 @@ __init_HFSCR:
/*
* Clear the TLB using the specified IS form of tlbiel instruction
* (invalidate by congruence class). P7 has 128 CCs., P8 has 512.
- *
- * r3 = IS field
*/
__init_tlb_power7:
- li r3,0xc00 /* IS field = 0b11 */
-_GLOBAL(__flush_tlb_power7)
li r6,128
mtctr r6
- mr r7,r3 /* IS field */
+ li r7,0xc00 /* IS field = 0b11 */
ptesync
2: tlbiel r7
addi r7,r7,0x1000
@@ -154,11 +150,9 @@ _GLOBAL(__flush_tlb_power7)
1: blr
__init_tlb_power8:
- li r3,0xc00 /* IS field = 0b11 */
-_GLOBAL(__flush_tlb_power8)
li r6,512
mtctr r6
- mr r7,r3 /* IS field */
+ li r7,0xc00 /* IS field = 0b11 */
ptesync
2: tlbiel r7
addi r7,r7,0x1000
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f83046878336..60262fdf35ba 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -71,8 +71,8 @@ extern void __restore_cpu_power7(void);
extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power8(void);
extern void __restore_cpu_a2(void);
-extern void __flush_tlb_power7(unsigned long inval_selector);
-extern void __flush_tlb_power8(unsigned long inval_selector);
+extern void __flush_tlb_power7(unsigned int action);
+extern void __flush_tlb_power8(unsigned int action);
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 735979764cd4..6e8d764ce47b 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -116,16 +116,13 @@ void __init swiotlb_detect_4g(void)
}
}
-static int __init swiotlb_late_init(void)
+static int __init check_swiotlb_enabled(void)
{
- if (ppc_swiotlb_enable) {
+ if (ppc_swiotlb_enable)
swiotlb_print_info();
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- } else {
+ else
swiotlb_free();
- }
return 0;
}
-subsys_initcall(swiotlb_late_init);
+subsys_initcall(check_swiotlb_enabled);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 3b2252e7731b..9ee61d15653d 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -164,30 +164,34 @@ __setup("eeh=", eeh_setup);
*/
static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
{
- struct device_node *dn = eeh_dev_to_of_node(edev);
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
u32 cfg;
int cap, i;
int n = 0, l = 0;
char buffer[128];
- n += scnprintf(buf+n, len-n, "%s\n", dn->full_name);
- pr_warn("EEH: of node=%s\n", dn->full_name);
+ n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n",
+ edev->phb->global_number, pdn->busno,
+ PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
+ pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n",
+ edev->phb->global_number, pdn->busno,
+ PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
- eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg);
+ eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
- eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg);
+ eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg);
n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
/* Gather bridge-specific registers */
if (edev->mode & EEH_DEV_BRIDGE) {
- eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg);
+ eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg);
n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg);
+ eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg);
n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
pr_warn("EEH: Bridge control: %04x\n", cfg);
}
@@ -195,11 +199,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
/* Dump out the PCI-X command and status regs */
cap = edev->pcix_cap;
if (cap) {
- eeh_ops->read_config(dn, cap, 4, &cfg);
+ eeh_ops->read_config(pdn, cap, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
- eeh_ops->read_config(dn, cap+4, 4, &cfg);
+ eeh_ops->read_config(pdn, cap+4, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
pr_warn("EEH: PCI-X status: %08x\n", cfg);
}
@@ -211,7 +215,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
pr_warn("EEH: PCI-E capabilities and status follow:\n");
for (i=0; i<=8; i++) {
- eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
@@ -238,7 +242,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
pr_warn("EEH: PCI-E AER capability register set follows:\n");
for (i=0; i<=13; i++) {
- eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
@@ -330,9 +334,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
int hugepage_shift;
/*
- * We won't find hugepages here, iomem
+ * We won't find hugepages here(this is iomem). Hence we are not
+ * worried about _PAGE_SPLITTING/collapse. Also we will not hit
+ * page table free, because of init_mm.
*/
- ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
+ ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
if (!ptep)
return token;
WARN_ON(hugepage_shift);
@@ -414,11 +420,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
int ret;
int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
unsigned long flags;
- struct device_node *dn;
+ struct pci_dn *pdn;
struct pci_dev *dev;
struct eeh_pe *pe, *parent_pe, *phb_pe;
int rc = 0;
- const char *location;
+ const char *location = NULL;
eeh_stats.total_mmio_ffs++;
@@ -429,15 +435,14 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
eeh_stats.no_dn++;
return 0;
}
- dn = eeh_dev_to_of_node(edev);
dev = eeh_dev_to_pci_dev(edev);
pe = eeh_dev_to_pe(edev);
/* Access to IO BARs might get this far and still not want checking. */
if (!pe) {
eeh_stats.ignored_check++;
- pr_debug("EEH: Ignored check for %s %s\n",
- eeh_pci_name(dev), dn->full_name);
+ pr_debug("EEH: Ignored check for %s\n",
+ eeh_pci_name(dev));
return 0;
}
@@ -473,10 +478,13 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
if (pe->state & EEH_PE_ISOLATED) {
pe->check_count++;
if (pe->check_count % EEH_MAX_FAILS == 0) {
- location = of_get_property(dn, "ibm,loc-code", NULL);
+ pdn = eeh_dev_to_pdn(edev);
+ if (pdn->node)
+ location = of_get_property(pdn->node, "ibm,loc-code", NULL);
printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
"location=%s driver=%s pci addr=%s\n",
- pe->check_count, location,
+ pe->check_count,
+ location ? location : "unknown",
eeh_driver_name(dev), eeh_pci_name(dev));
printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
eeh_driver_name(dev));
@@ -667,6 +675,55 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
return rc;
}
+static void *eeh_disable_and_save_dev_state(void *data, void *userdata)
+{
+ struct eeh_dev *edev = data;
+ struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
+ struct pci_dev *dev = userdata;
+
+ /*
+ * The caller should have disabled and saved the
+ * state for the specified device
+ */
+ if (!pdev || pdev == dev)
+ return NULL;
+
+ /* Ensure we have D0 power state */
+ pci_set_power_state(pdev, PCI_D0);
+
+ /* Save device state */
+ pci_save_state(pdev);
+
+ /*
+ * Disable device to avoid any DMA traffic and
+ * interrupt from the device
+ */
+ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ return NULL;
+}
+
+static void *eeh_restore_dev_state(void *data, void *userdata)
+{
+ struct eeh_dev *edev = data;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+ struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
+ struct pci_dev *dev = userdata;
+
+ if (!pdev)
+ return NULL;
+
+ /* Apply customization from firmware */
+ if (pdn && eeh_ops->restore_config)
+ eeh_ops->restore_config(pdn);
+
+ /* The caller should restore state for the specified device */
+ if (pdev != dev)
+ pci_save_state(pdev);
+
+ return NULL;
+}
+
/**
* pcibios_set_pcie_slot_reset - Set PCI-E reset state
* @dev: pci device struct
@@ -689,18 +746,27 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
switch (state) {
case pcie_deassert_reset:
eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
+ eeh_unfreeze_pe(pe, false);
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
+ eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
+ eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
break;
case pcie_hot_reset:
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
+ eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_HOT);
break;
case pcie_warm_reset:
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
+ eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
break;
default:
- eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
+ eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED);
return -EINVAL;
};
@@ -815,15 +881,15 @@ out:
*/
void eeh_save_bars(struct eeh_dev *edev)
{
+ struct pci_dn *pdn;
int i;
- struct device_node *dn;
- if (!edev)
+ pdn = eeh_dev_to_pdn(edev);
+ if (!pdn)
return;
- dn = eeh_dev_to_of_node(edev);
for (i = 0; i < 16; i++)
- eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
+ eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]);
/*
* For PCI bridges including root port, we need enable bus
@@ -914,7 +980,7 @@ static struct notifier_block eeh_reboot_nb = {
int eeh_init(void)
{
struct pci_controller *hose, *tmp;
- struct device_node *phb;
+ struct pci_dn *pdn;
static int cnt = 0;
int ret = 0;
@@ -949,20 +1015,9 @@ int eeh_init(void)
return ret;
/* Enable EEH for all adapters */
- if (eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb = hose->dn;
- traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
- }
- } else if (eeh_has_flag(EEH_PROBE_MODE_DEV)) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node)
- pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL);
- } else {
- pr_warn("%s: Invalid probe mode %x",
- __func__, eeh_subsystem_flags);
- return -EINVAL;
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ pdn = hose->pci_data;
+ traverse_pci_dn(pdn, eeh_ops->probe, NULL);
}
/*
@@ -987,8 +1042,8 @@ int eeh_init(void)
core_initcall_sync(eeh_init);
/**
- * eeh_add_device_early - Enable EEH for the indicated device_node
- * @dn: device node for which to set up EEH
+ * eeh_add_device_early - Enable EEH for the indicated device node
+ * @pdn: PCI device node for which to set up EEH
*
* This routine must be used to perform EEH initialization for PCI
* devices that were added after system boot (e.g. hotplug, dlpar).
@@ -998,44 +1053,44 @@ core_initcall_sync(eeh_init);
* on the CEC architecture, type of the device, on earlier boot
* command-line arguments & etc.
*/
-void eeh_add_device_early(struct device_node *dn)
+void eeh_add_device_early(struct pci_dn *pdn)
{
struct pci_controller *phb;
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
- /*
- * If we're doing EEH probe based on PCI device, we
- * would delay the probe until late stage because
- * the PCI device isn't available this moment.
- */
- if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
+ if (!edev || !eeh_enabled())
return;
- if (!of_node_to_eeh_dev(dn))
+ if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
return;
- phb = of_node_to_eeh_dev(dn)->phb;
/* USB Bus children of PCI devices will not have BUID's */
- if (NULL == phb || 0 == phb->buid)
+ phb = edev->phb;
+ if (NULL == phb ||
+ (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid))
return;
- eeh_ops->of_probe(dn, NULL);
+ eeh_ops->probe(pdn, NULL);
}
/**
* eeh_add_device_tree_early - Enable EEH for the indicated device
- * @dn: device node
+ * @pdn: PCI device node
*
* This routine must be used to perform EEH initialization for the
* indicated PCI device that was added after system boot (e.g.
* hotplug, dlpar).
*/
-void eeh_add_device_tree_early(struct device_node *dn)
+void eeh_add_device_tree_early(struct pci_dn *pdn)
{
- struct device_node *sib;
+ struct pci_dn *n;
- for_each_child_of_node(dn, sib)
- eeh_add_device_tree_early(sib);
- eeh_add_device_early(dn);
+ if (!pdn)
+ return;
+
+ list_for_each_entry(n, &pdn->child_list, list)
+ eeh_add_device_tree_early(n);
+ eeh_add_device_early(pdn);
}
EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
@@ -1048,7 +1103,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
*/
void eeh_add_device_late(struct pci_dev *dev)
{
- struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
if (!dev || !eeh_enabled())
@@ -1056,13 +1111,16 @@ void eeh_add_device_late(struct pci_dev *dev)
pr_debug("EEH: Adding device %s\n", pci_name(dev));
- dn = pci_device_to_OF_node(dev);
- edev = of_node_to_eeh_dev(dn);
+ pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ edev = pdn_to_eeh_dev(pdn);
if (edev->pdev == dev) {
pr_debug("EEH: Already referenced !\n");
return;
}
+ if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+ eeh_ops->probe(pdn, NULL);
+
/*
* The EEH cache might not be removed correctly because of
* unbalanced kref to the device during unplug time, which
@@ -1089,13 +1147,6 @@ void eeh_add_device_late(struct pci_dev *dev)
edev->pdev = dev;
dev->dev.archdata.edev = edev;
- /*
- * We have to do the EEH probe here because the PCI device
- * hasn't been created yet in the early stage.
- */
- if (eeh_has_flag(EEH_PROBE_MODE_DEV))
- eeh_ops->dev_probe(dev, NULL);
-
eeh_addr_cache_insert_dev(dev);
}
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index 07d8a2423a61..eeabeabea49c 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -171,30 +171,27 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
{
- struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
int i;
- dn = pci_device_to_OF_node(dev);
- if (!dn) {
+ pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ if (!pdn) {
pr_warn("PCI: no pci dn found for dev=%s\n",
pci_name(dev));
return;
}
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn_to_eeh_dev(pdn);
if (!edev) {
- pr_warn("PCI: no EEH dev found for dn=%s\n",
- dn->full_name);
+ pr_warn("PCI: no EEH dev found for %s\n",
+ pci_name(dev));
return;
}
/* Skip any devices for which EEH is not enabled. */
if (!edev->pe) {
-#ifdef DEBUG
- pr_info("PCI: skip building address cache for=%s - %s\n",
- pci_name(dev), dn->full_name);
-#endif
+ dev_dbg(&dev->dev, "EEH: Skip building address cache\n");
return;
}
@@ -282,18 +279,18 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
*/
void eeh_addr_cache_build(void)
{
- struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
struct pci_dev *dev = NULL;
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
for_each_pci_dev(dev) {
- dn = pci_device_to_OF_node(dev);
- if (!dn)
+ pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ if (!pdn)
continue;
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn_to_eeh_dev(pdn);
if (!edev)
continue;
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
index e5274ee9a75f..aabba94ff9cb 100644
--- a/arch/powerpc/kernel/eeh_dev.c
+++ b/arch/powerpc/kernel/eeh_dev.c
@@ -43,13 +43,13 @@
/**
* eeh_dev_init - Create EEH device according to OF node
- * @dn: device node
+ * @pdn: PCI device node
* @data: PHB
*
* It will create EEH device according to the given OF node. The function
* might be called by PCI emunation, DR, PHB hotplug.
*/
-void *eeh_dev_init(struct device_node *dn, void *data)
+void *eeh_dev_init(struct pci_dn *pdn, void *data)
{
struct pci_controller *phb = data;
struct eeh_dev *edev;
@@ -63,8 +63,8 @@ void *eeh_dev_init(struct device_node *dn, void *data)
}
/* Associate EEH device with OF node */
- PCI_DN(dn)->edev = edev;
- edev->dn = dn;
+ pdn->edev = edev;
+ edev->pdn = pdn;
edev->phb = phb;
INIT_LIST_HEAD(&edev->list);
@@ -80,16 +80,16 @@ void *eeh_dev_init(struct device_node *dn, void *data)
*/
void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
{
- struct device_node *dn = phb->dn;
+ struct pci_dn *root = phb->pci_data;
/* EEH PE for PHB */
eeh_phb_pe_create(phb);
/* EEH device for PHB */
- eeh_dev_init(dn, phb);
+ eeh_dev_init(root, phb);
/* EEH devices for children OF nodes */
- traverse_pci_devices(dn, eeh_dev_init, phb);
+ traverse_pci_dn(root, eeh_dev_init, phb);
}
/**
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d099540c0f56..24768ff3cb73 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -83,28 +83,6 @@ static inline void eeh_pcid_put(struct pci_dev *pdev)
module_put(pdev->driver->driver.owner);
}
-#if 0
-static void print_device_node_tree(struct pci_dn *pdn, int dent)
-{
- int i;
- struct device_node *pc;
-
- if (!pdn)
- return;
- for (i = 0; i < dent; i++)
- printk(" ");
- printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
- pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
- pdn->eeh_pe_config_addr, pdn->node->full_name);
- dent += 3;
- pc = pdn->node->child;
- while (pc) {
- print_device_node_tree(PCI_DN(pc), dent);
- pc = pc->sibling;
- }
-}
-#endif
-
/**
* eeh_disable_irq - Disable interrupt for the recovering device
* @dev: PCI device
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 1e4946c36f9e..35f0b62259bb 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -291,27 +291,25 @@ struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
*/
static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
{
- struct device_node *dn;
struct eeh_dev *parent;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
/*
* It might have the case for the indirect parent
* EEH device already having associated PE, but
* the direct parent EEH device doesn't have yet.
*/
- dn = edev->dn->parent;
- while (dn) {
+ pdn = pdn ? pdn->parent : NULL;
+ while (pdn) {
/* We're poking out of PCI territory */
- if (!PCI_DN(dn)) return NULL;
-
- parent = of_node_to_eeh_dev(dn);
- /* We're poking out of PCI territory */
- if (!parent) return NULL;
+ parent = pdn_to_eeh_dev(pdn);
+ if (!parent)
+ return NULL;
if (parent->pe)
return parent->pe;
- dn = dn->parent;
+ pdn = pdn->parent;
}
return NULL;
@@ -330,6 +328,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
{
struct eeh_pe *pe, *parent;
+ /* Check if the PE number is valid */
+ if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) {
+ pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n",
+ __func__, edev->config_addr, edev->phb->global_number);
+ return -EINVAL;
+ }
+
/*
* Search the PE has been existing or not according
* to the PE address. If that has been existing, the
@@ -338,21 +343,18 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
*/
pe = eeh_pe_get(edev);
if (pe && !(pe->type & EEH_PE_INVALID)) {
- if (!edev->pe_config_addr) {
- pr_err("%s: PE with addr 0x%x already exists\n",
- __func__, edev->config_addr);
- return -EEXIST;
- }
-
/* Mark the PE as type of PCI bus */
pe->type = EEH_PE_BUS;
edev->pe = pe;
/* Put the edev to PE */
list_add_tail(&edev->list, &pe->edevs);
- pr_debug("EEH: Add %s to Bus PE#%x\n",
- edev->dn->full_name, pe->addr);
-
+ pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n",
+ edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF),
+ pe->addr);
return 0;
} else if (pe && (pe->type & EEH_PE_INVALID)) {
list_add_tail(&edev->list, &pe->edevs);
@@ -368,9 +370,14 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
parent = parent->parent;
}
- pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
- edev->dn->full_name, pe->addr, pe->parent->addr);
+ pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device "
+ "PE#%x, Parent PE#%x\n",
+ edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF),
+ pe->addr, pe->parent->addr);
return 0;
}
@@ -409,8 +416,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
list_add_tail(&pe->child, &parent->child_list);
list_add_tail(&edev->list, &pe->edevs);
edev->pe = pe;
- pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
- edev->dn->full_name, pe->addr, pe->parent->addr);
+ pr_debug("EEH: Add %04x:%02x:%02x.%01x to "
+ "Device PE#%x, Parent PE#%x\n",
+ edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF),
+ pe->addr, pe->parent->addr);
return 0;
}
@@ -430,8 +442,11 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
int cnt;
if (!edev->pe) {
- pr_debug("%s: No PE found for EEH device %s\n",
- __func__, edev->dn->full_name);
+ pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n",
+ __func__, edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF));
return -EEXIST;
}
@@ -653,9 +668,9 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state)
* blocked on normal path during the stage. So we need utilize
* eeh operations, which is always permitted.
*/
-static void eeh_bridge_check_link(struct eeh_dev *edev,
- struct device_node *dn)
+static void eeh_bridge_check_link(struct eeh_dev *edev)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int cap;
uint32_t val;
int timeout = 0;
@@ -675,32 +690,32 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
/* Check slot status */
cap = edev->pcie_cap;
- eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val);
if (!(val & PCI_EXP_SLTSTA_PDS)) {
pr_debug(" No card in the slot (0x%04x) !\n", val);
return;
}
/* Check power status if we have the capability */
- eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val);
if (val & PCI_EXP_SLTCAP_PCP) {
- eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val);
if (val & PCI_EXP_SLTCTL_PCC) {
pr_debug(" In power-off state, power it on ...\n");
val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
- eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val);
+ eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val);
msleep(2 * 1000);
}
}
/* Enable link */
- eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val);
val &= ~PCI_EXP_LNKCTL_LD;
- eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val);
+ eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val);
/* Check link */
- eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val);
if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
pr_debug(" No link reporting capability (0x%08x) \n", val);
msleep(1000);
@@ -713,7 +728,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
msleep(20);
timeout += 20;
- eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val);
if (val & PCI_EXP_LNKSTA_DLLLA)
break;
}
@@ -728,9 +743,9 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
-static void eeh_restore_bridge_bars(struct eeh_dev *edev,
- struct device_node *dn)
+static void eeh_restore_bridge_bars(struct eeh_dev *edev)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int i;
/*
@@ -738,49 +753,49 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev,
* Bus numbers and windows: 0x18 - 0x30
*/
for (i = 4; i < 13; i++)
- eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
+ eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
/* Rom: 0x38 */
- eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]);
+ eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]);
/* Cache line & Latency timer: 0xC 0xD */
- eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
+ eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
+ eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* Max latency, min grant, interrupt ping and line: 0x3C */
- eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
+ eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
- eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]);
+ eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
/* Check the PCIe link is ready */
- eeh_bridge_check_link(edev, dn);
+ eeh_bridge_check_link(edev);
}
-static void eeh_restore_device_bars(struct eeh_dev *edev,
- struct device_node *dn)
+static void eeh_restore_device_bars(struct eeh_dev *edev)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int i;
u32 cmd;
for (i = 4; i < 10; i++)
- eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
+ eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
/* 12 == Expansion ROM Address */
- eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
+ eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]);
- eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
+ eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
+ eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* max latency, min grant, interrupt pin and line */
- eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
+ eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/*
* Restore PERR & SERR bits, some devices require it,
* don't touch the other command bits
*/
- eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
+ eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd);
if (edev->config_space[1] & PCI_COMMAND_PARITY)
cmd |= PCI_COMMAND_PARITY;
else
@@ -789,7 +804,7 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
cmd |= PCI_COMMAND_SERR;
else
cmd &= ~PCI_COMMAND_SERR;
- eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
+ eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd);
}
/**
@@ -804,16 +819,16 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
static void *eeh_restore_one_device_bars(void *data, void *flag)
{
struct eeh_dev *edev = (struct eeh_dev *)data;
- struct device_node *dn = eeh_dev_to_of_node(edev);
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
/* Do special restore for bridges */
if (edev->mode & EEH_DEV_BRIDGE)
- eeh_restore_bridge_bars(edev, dn);
+ eeh_restore_bridge_bars(edev);
else
- eeh_restore_device_bars(edev, dn);
+ eeh_restore_device_bars(edev);
- if (eeh_ops->restore_config)
- eeh_ops->restore_config(dn);
+ if (eeh_ops->restore_config && pdn)
+ eeh_ops->restore_config(pdn);
return NULL;
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d180caf2d6de..afbc20019c2e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -356,6 +356,11 @@ _GLOBAL(ppc64_swapcontext)
bl sys_swapcontext
b .Lsyscall_exit
+_GLOBAL(ppc_switch_endian)
+ bl save_nvgprs
+ bl sys_switch_endian
+ b .Lsyscall_exit
+
_GLOBAL(ret_from_fork)
bl schedule_tail
REST_NVGPRS(r1)
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 05adc8bbdef8..ccde8f084ce4 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -94,6 +94,7 @@ _GLOBAL(power7_powersave_common)
beq 1f
addi r1,r1,INT_FRAME_SIZE
ld r0,16(r1)
+ li r3,0 /* Return 0 (no nap) */
mtlr r0
blr
@@ -500,9 +501,11 @@ BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
ld r1,PACAR1(r13)
+ ld r6,_CCR(r1)
ld r4,_MSR(r1)
ld r5,_NIP(r1)
addi r1,r1,INT_FRAME_SIZE
+ mtcr r6
mtspr SPRN_SRR1,r4
mtspr SPRN_SRR0,r5
rfid
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 24b968f8e4d8..63d9cc4d7366 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -71,15 +71,15 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
return NULL;
-
- ptep = find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
+ /*
+ * We won't find huge pages here (iomem). Also can't hit
+ * a page table free due to init_mm
+ */
+ ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
&hugepage_shift);
if (ptep == NULL)
paddr = 0;
else {
- /*
- * we don't have hugepages backing iomem
- */
WARN_ON(hugepage_shift);
paddr = pte_pfn(*ptep) << PAGE_SHIFT;
}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 15c99b649b04..b2eb4686bd8f 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
uint64_t nip, uint64_t addr)
{
uint64_t srr1;
- int index = __this_cpu_inc_return(mce_nest_count);
+ int index = __this_cpu_inc_return(mce_nest_count) - 1;
struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
/*
@@ -184,7 +184,7 @@ void machine_check_queue_event(void)
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
- index = __this_cpu_inc_return(mce_queue_count);
+ index = __this_cpu_inc_return(mce_queue_count) - 1;
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
__this_cpu_dec(mce_queue_count);
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index b6f123ab90ed..2c647b1e62e4 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -28,6 +28,55 @@
#include <asm/mce.h>
#include <asm/machdep.h>
+static void flush_tlb_206(unsigned int num_sets, unsigned int action)
+{
+ unsigned long rb;
+ unsigned int i;
+
+ switch (action) {
+ case TLB_INVAL_SCOPE_GLOBAL:
+ rb = TLBIEL_INVAL_SET;
+ break;
+ case TLB_INVAL_SCOPE_LPID:
+ rb = TLBIEL_INVAL_SET_LPID;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < num_sets; i++) {
+ asm volatile("tlbiel %0" : : "r" (rb));
+ rb += 1 << TLBIEL_INVAL_SET_SHIFT;
+ }
+ asm volatile("ptesync" : : : "memory");
+}
+
+/*
+ * Generic routine to flush TLB on power7. This routine is used as
+ * flush_tlb hook in cpu_spec for Power7 processor.
+ *
+ * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs.
+ * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
+ */
+void __flush_tlb_power7(unsigned int action)
+{
+ flush_tlb_206(POWER7_TLB_SETS, action);
+}
+
+/*
+ * Generic routine to flush TLB on power8. This routine is used as
+ * flush_tlb hook in cpu_spec for power8 processor.
+ *
+ * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs.
+ * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
+ */
+void __flush_tlb_power8(unsigned int action)
+{
+ flush_tlb_206(POWER8_TLB_SETS, action);
+}
+
/* flush SLBs and reload */
static void flush_and_reload_slb(void)
{
@@ -79,7 +128,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
}
if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
/* reset error bits */
dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
}
@@ -110,7 +159,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
break;
case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
handled = 1;
}
break;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 34f7c9b7cd96..1e703f8ebad4 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -26,6 +26,9 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/kmsg_dump.h>
+#include <linux/pstore.h>
+#include <linux/zlib.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
@@ -54,6 +57,680 @@ struct nvram_partition {
static LIST_HEAD(nvram_partitions);
+#ifdef CONFIG_PPC_PSERIES
+struct nvram_os_partition rtas_log_partition = {
+ .name = "ibm,rtas-log",
+ .req_size = 2079,
+ .min_size = 1055,
+ .index = -1,
+ .os_partition = true
+};
+#endif
+
+struct nvram_os_partition oops_log_partition = {
+ .name = "lnx,oops-log",
+ .req_size = 4000,
+ .min_size = 2000,
+ .index = -1,
+ .os_partition = true
+};
+
+static const char *nvram_os_partitions[] = {
+#ifdef CONFIG_PPC_PSERIES
+ "ibm,rtas-log",
+#endif
+ "lnx,oops-log",
+ NULL
+};
+
+static void oops_to_nvram(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason);
+
+static struct kmsg_dumper nvram_kmsg_dumper = {
+ .dump = oops_to_nvram
+};
+
+/*
+ * For capturing and compressing an oops or panic report...
+
+ * big_oops_buf[] holds the uncompressed text we're capturing.
+ *
+ * oops_buf[] holds the compressed text, preceded by a oops header.
+ * oops header has u16 holding the version of oops header (to differentiate
+ * between old and new format header) followed by u16 holding the length of
+ * the compressed* text (*Or uncompressed, if compression fails.) and u64
+ * holding the timestamp. oops_buf[] gets written to NVRAM.
+ *
+ * oops_log_info points to the header. oops_data points to the compressed text.
+ *
+ * +- oops_buf
+ * | +- oops_data
+ * v v
+ * +-----------+-----------+-----------+------------------------+
+ * | version | length | timestamp | text |
+ * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
+ * +-----------+-----------+-----------+------------------------+
+ * ^
+ * +- oops_log_info
+ *
+ * We preallocate these buffers during init to avoid kmalloc during oops/panic.
+ */
+static size_t big_oops_buf_sz;
+static char *big_oops_buf, *oops_buf;
+static char *oops_data;
+static size_t oops_data_sz;
+
+/* Compression parameters */
+#define COMPR_LEVEL 6
+#define WINDOW_BITS 12
+#define MEM_LEVEL 4
+static struct z_stream_s stream;
+
+#ifdef CONFIG_PSTORE
+#ifdef CONFIG_PPC_POWERNV
+static struct nvram_os_partition skiboot_partition = {
+ .name = "ibm,skiboot",
+ .index = -1,
+ .os_partition = false
+};
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+static struct nvram_os_partition of_config_partition = {
+ .name = "of-config",
+ .index = -1,
+ .os_partition = false
+};
+#endif
+
+static struct nvram_os_partition common_partition = {
+ .name = "common",
+ .index = -1,
+ .os_partition = false
+};
+
+static enum pstore_type_id nvram_type_ids[] = {
+ PSTORE_TYPE_DMESG,
+ PSTORE_TYPE_PPC_COMMON,
+ -1,
+ -1,
+ -1
+};
+static int read_type;
+#endif
+
+/* nvram_write_os_partition
+ *
+ * We need to buffer the error logs into nvram to ensure that we have
+ * the failure information to decode. If we have a severe error there
+ * is no way to guarantee that the OS or the machine is in a state to
+ * get back to user land and write the error to disk. For example if
+ * the SCSI device driver causes a Machine Check by writing to a bad
+ * IO address, there is no way of guaranteeing that the device driver
+ * is in any state that is would also be able to write the error data
+ * captured to disk, thus we buffer it in NVRAM for analysis on the
+ * next boot.
+ *
+ * In NVRAM the partition containing the error log buffer will looks like:
+ * Header (in bytes):
+ * +-----------+----------+--------+------------+------------------+
+ * | signature | checksum | length | name | data |
+ * |0 |1 |2 3|4 15|16 length-1|
+ * +-----------+----------+--------+------------+------------------+
+ *
+ * The 'data' section would look like (in bytes):
+ * +--------------+------------+-----------------------------------+
+ * | event_logged | sequence # | error log |
+ * |0 3|4 7|8 error_log_size-1|
+ * +--------------+------------+-----------------------------------+
+ *
+ * event_logged: 0 if event has not been logged to syslog, 1 if it has
+ * sequence #: The unique sequence # for each event. (until it wraps)
+ * error log: The error log from event_scan
+ */
+int nvram_write_os_partition(struct nvram_os_partition *part,
+ char *buff, int length,
+ unsigned int err_type,
+ unsigned int error_log_cnt)
+{
+ int rc;
+ loff_t tmp_index;
+ struct err_log_info info;
+
+ if (part->index == -1)
+ return -ESPIPE;
+
+ if (length > part->size)
+ length = part->size;
+
+ info.error_type = cpu_to_be32(err_type);
+ info.seq_num = cpu_to_be32(error_log_cnt);
+
+ tmp_index = part->index;
+
+ rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info),
+ &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
+ return rc;
+ }
+
+ rc = ppc_md.nvram_write(buff, length, &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* nvram_read_partition
+ *
+ * Reads nvram partition for at most 'length'
+ */
+int nvram_read_partition(struct nvram_os_partition *part, char *buff,
+ int length, unsigned int *err_type,
+ unsigned int *error_log_cnt)
+{
+ int rc;
+ loff_t tmp_index;
+ struct err_log_info info;
+
+ if (part->index == -1)
+ return -1;
+
+ if (length > part->size)
+ length = part->size;
+
+ tmp_index = part->index;
+
+ if (part->os_partition) {
+ rc = ppc_md.nvram_read((char *)&info,
+ sizeof(struct err_log_info),
+ &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
+ return rc;
+ }
+ }
+
+ rc = ppc_md.nvram_read(buff, length, &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
+ return rc;
+ }
+
+ if (part->os_partition) {
+ *error_log_cnt = be32_to_cpu(info.seq_num);
+ *err_type = be32_to_cpu(info.error_type);
+ }
+
+ return 0;
+}
+
+/* nvram_init_os_partition
+ *
+ * This sets up a partition with an "OS" signature.
+ *
+ * The general strategy is the following:
+ * 1.) If a partition with the indicated name already exists...
+ * - If it's large enough, use it.
+ * - Otherwise, recycle it and keep going.
+ * 2.) Search for a free partition that is large enough.
+ * 3.) If there's not a free partition large enough, recycle any obsolete
+ * OS partitions and try again.
+ * 4.) Will first try getting a chunk that will satisfy the requested size.
+ * 5.) If a chunk of the requested size cannot be allocated, then try finding
+ * a chunk that will satisfy the minum needed.
+ *
+ * Returns 0 on success, else -1.
+ */
+int __init nvram_init_os_partition(struct nvram_os_partition *part)
+{
+ loff_t p;
+ int size;
+
+ /* Look for ours */
+ p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
+
+ /* Found one but too small, remove it */
+ if (p && size < part->min_size) {
+ pr_info("nvram: Found too small %s partition,"
+ " removing it...\n", part->name);
+ nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
+ p = 0;
+ }
+
+ /* Create one if we didn't find */
+ if (!p) {
+ p = nvram_create_partition(part->name, NVRAM_SIG_OS,
+ part->req_size, part->min_size);
+ if (p == -ENOSPC) {
+ pr_info("nvram: No room to create %s partition, "
+ "deleting any obsolete OS partitions...\n",
+ part->name);
+ nvram_remove_partition(NULL, NVRAM_SIG_OS,
+ nvram_os_partitions);
+ p = nvram_create_partition(part->name, NVRAM_SIG_OS,
+ part->req_size, part->min_size);
+ }
+ }
+
+ if (p <= 0) {
+ pr_err("nvram: Failed to find or create %s"
+ " partition, err %d\n", part->name, (int)p);
+ return -1;
+ }
+
+ part->index = p;
+ part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
+
+ return 0;
+}
+
+/* Derived from logfs_compress() */
+static int nvram_compress(const void *in, void *out, size_t inlen,
+ size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+ MEM_LEVEL, Z_DEFAULT_STRATEGY);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_deflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_deflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ if (stream.total_out >= stream.total_in)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ return ret;
+}
+
+/* Compress the text from big_oops_buf into oops_buf. */
+static int zip_oops(size_t text_len)
+{
+ struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+ int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
+ oops_data_sz);
+ if (zipped_len < 0) {
+ pr_err("nvram: compression failed; returned %d\n", zipped_len);
+ pr_err("nvram: logging uncompressed oops/panic report\n");
+ return -1;
+ }
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(zipped_len);
+ oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
+ return 0;
+}
+
+#ifdef CONFIG_PSTORE
+static int nvram_pstore_open(struct pstore_info *psi)
+{
+ /* Reset the iterator to start reading partitions again */
+ read_type = -1;
+ return 0;
+}
+
+/**
+ * nvram_pstore_write - pstore write callback for nvram
+ * @type: Type of message logged
+ * @reason: reason behind dump (oops/panic)
+ * @id: identifier to indicate the write performed
+ * @part: pstore writes data to registered buffer in parts,
+ * part number will indicate the same.
+ * @count: Indicates oops count
+ * @compressed: Flag to indicate the log is compressed
+ * @size: number of bytes written to the registered buffer
+ * @psi: registered pstore_info structure
+ *
+ * Called by pstore_dump() when an oops or panic report is logged in the
+ * printk buffer.
+ * Returns 0 on successful write.
+ */
+static int nvram_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part, int count,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ int rc;
+ unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
+ struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
+
+ /* part 1 has the recent messages from printk buffer */
+ if (part > 1 || (type != PSTORE_TYPE_DMESG))
+ return -1;
+
+ if (clobbering_unread_rtas_event())
+ return -1;
+
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(size);
+ oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
+
+ if (compressed)
+ err_type = ERR_TYPE_KERNEL_PANIC_GZ;
+
+ rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
+ (int) (sizeof(*oops_hdr) + size), err_type, count);
+
+ if (rc != 0)
+ return rc;
+
+ *id = part;
+ return 0;
+}
+
+/*
+ * Reads the oops/panic report, rtas, of-config and common partition.
+ * Returns the length of the data we read from each partition.
+ * Returns 0 if we've been called before.
+ */
+static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
+ int *count, struct timespec *time, char **buf,
+ bool *compressed, struct pstore_info *psi)
+{
+ struct oops_log_info *oops_hdr;
+ unsigned int err_type, id_no, size = 0;
+ struct nvram_os_partition *part = NULL;
+ char *buff = NULL;
+ int sig = 0;
+ loff_t p;
+
+ read_type++;
+
+ switch (nvram_type_ids[read_type]) {
+ case PSTORE_TYPE_DMESG:
+ part = &oops_log_partition;
+ *type = PSTORE_TYPE_DMESG;
+ break;
+ case PSTORE_TYPE_PPC_COMMON:
+ sig = NVRAM_SIG_SYS;
+ part = &common_partition;
+ *type = PSTORE_TYPE_PPC_COMMON;
+ *id = PSTORE_TYPE_PPC_COMMON;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+#ifdef CONFIG_PPC_PSERIES
+ case PSTORE_TYPE_PPC_RTAS:
+ part = &rtas_log_partition;
+ *type = PSTORE_TYPE_PPC_RTAS;
+ time->tv_sec = last_rtas_event;
+ time->tv_nsec = 0;
+ break;
+ case PSTORE_TYPE_PPC_OF:
+ sig = NVRAM_SIG_OF;
+ part = &of_config_partition;
+ *type = PSTORE_TYPE_PPC_OF;
+ *id = PSTORE_TYPE_PPC_OF;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+#endif
+#ifdef CONFIG_PPC_POWERNV
+ case PSTORE_TYPE_PPC_OPAL:
+ sig = NVRAM_SIG_FW;
+ part = &skiboot_partition;
+ *type = PSTORE_TYPE_PPC_OPAL;
+ *id = PSTORE_TYPE_PPC_OPAL;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+#endif
+ default:
+ return 0;
+ }
+
+ if (!part->os_partition) {
+ p = nvram_find_partition(part->name, sig, &size);
+ if (p <= 0) {
+ pr_err("nvram: Failed to find partition %s, "
+ "err %d\n", part->name, (int)p);
+ return 0;
+ }
+ part->index = p;
+ part->size = size;
+ }
+
+ buff = kmalloc(part->size, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+
+ if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
+ kfree(buff);
+ return 0;
+ }
+
+ *count = 0;
+
+ if (part->os_partition)
+ *id = id_no;
+
+ if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
+ size_t length, hdr_size;
+
+ oops_hdr = (struct oops_log_info *)buff;
+ if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
+ /* Old format oops header had 2-byte record size */
+ hdr_size = sizeof(u16);
+ length = be16_to_cpu(oops_hdr->version);
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ } else {
+ hdr_size = sizeof(*oops_hdr);
+ length = be16_to_cpu(oops_hdr->report_length);
+ time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
+ time->tv_nsec = 0;
+ }
+ *buf = kmalloc(length, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, buff + hdr_size, length);
+ kfree(buff);
+
+ if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
+ *compressed = true;
+ else
+ *compressed = false;
+ return length;
+ }
+
+ *buf = buff;
+ return part->size;
+}
+
+static struct pstore_info nvram_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "nvram",
+ .open = nvram_pstore_open,
+ .read = nvram_pstore_read,
+ .write = nvram_pstore_write,
+};
+
+static int nvram_pstore_init(void)
+{
+ int rc = 0;
+
+ if (machine_is(pseries)) {
+ nvram_type_ids[2] = PSTORE_TYPE_PPC_RTAS;
+ nvram_type_ids[3] = PSTORE_TYPE_PPC_OF;
+ } else
+ nvram_type_ids[2] = PSTORE_TYPE_PPC_OPAL;
+
+ nvram_pstore_info.buf = oops_data;
+ nvram_pstore_info.bufsize = oops_data_sz;
+
+ spin_lock_init(&nvram_pstore_info.buf_lock);
+
+ rc = pstore_register(&nvram_pstore_info);
+ if (rc != 0)
+ pr_err("nvram: pstore_register() failed, defaults to "
+ "kmsg_dump; returned %d\n", rc);
+
+ return rc;
+}
+#else
+static int nvram_pstore_init(void)
+{
+ return -1;
+}
+#endif
+
+void __init nvram_init_oops_partition(int rtas_partition_exists)
+{
+ int rc;
+
+ rc = nvram_init_os_partition(&oops_log_partition);
+ if (rc != 0) {
+#ifdef CONFIG_PPC_PSERIES
+ if (!rtas_partition_exists) {
+ pr_err("nvram: Failed to initialize oops partition!");
+ return;
+ }
+ pr_notice("nvram: Using %s partition to log both"
+ " RTAS errors and oops/panic reports\n",
+ rtas_log_partition.name);
+ memcpy(&oops_log_partition, &rtas_log_partition,
+ sizeof(rtas_log_partition));
+#else
+ pr_err("nvram: Failed to initialize oops partition!");
+ return;
+#endif
+ }
+ oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
+ if (!oops_buf) {
+ pr_err("nvram: No memory for %s partition\n",
+ oops_log_partition.name);
+ return;
+ }
+ oops_data = oops_buf + sizeof(struct oops_log_info);
+ oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
+
+ rc = nvram_pstore_init();
+
+ if (!rc)
+ return;
+
+ /*
+ * Figure compression (preceded by elimination of each line's <n>
+ * severity prefix) will reduce the oops/panic report to at most
+ * 45% of its original size.
+ */
+ big_oops_buf_sz = (oops_data_sz * 100) / 45;
+ big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+ if (big_oops_buf) {
+ stream.workspace = kmalloc(zlib_deflate_workspacesize(
+ WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
+ if (!stream.workspace) {
+ pr_err("nvram: No memory for compression workspace; "
+ "skipping compression of %s partition data\n",
+ oops_log_partition.name);
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ }
+ } else {
+ pr_err("No memory for uncompressed %s data; "
+ "skipping compression\n", oops_log_partition.name);
+ stream.workspace = NULL;
+ }
+
+ rc = kmsg_dump_register(&nvram_kmsg_dumper);
+ if (rc != 0) {
+ pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
+ kfree(oops_buf);
+ kfree(big_oops_buf);
+ kfree(stream.workspace);
+ }
+}
+
+/*
+ * This is our kmsg_dump callback, called after an oops or panic report
+ * has been written to the printk buffer. We want to capture as much
+ * of the printk buffer as possible. First, capture as much as we can
+ * that we think will compress sufficiently to fit in the lnx,oops-log
+ * partition. If that's too much, go back and capture uncompressed text.
+ */
+static void oops_to_nvram(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+ static unsigned int oops_count = 0;
+ static bool panicking = false;
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ size_t text_len;
+ unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
+ int rc = -1;
+
+ switch (reason) {
+ case KMSG_DUMP_RESTART:
+ case KMSG_DUMP_HALT:
+ case KMSG_DUMP_POWEROFF:
+ /* These are almost always orderly shutdowns. */
+ return;
+ case KMSG_DUMP_OOPS:
+ break;
+ case KMSG_DUMP_PANIC:
+ panicking = true;
+ break;
+ case KMSG_DUMP_EMERG:
+ if (panicking)
+ /* Panic report already captured. */
+ return;
+ break;
+ default:
+ pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
+ __func__, (int) reason);
+ return;
+ }
+
+ if (clobbering_unread_rtas_event())
+ return;
+
+ if (!spin_trylock_irqsave(&lock, flags))
+ return;
+
+ if (big_oops_buf) {
+ kmsg_dump_get_buffer(dumper, false,
+ big_oops_buf, big_oops_buf_sz, &text_len);
+ rc = zip_oops(text_len);
+ }
+ if (rc != 0) {
+ kmsg_dump_rewind(dumper);
+ kmsg_dump_get_buffer(dumper, false,
+ oops_data, oops_data_sz, &text_len);
+ err_type = ERR_TYPE_KERNEL_PANIC;
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(text_len);
+ oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
+ }
+
+ (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
+ (int) (sizeof(*oops_hdr) + text_len), err_type,
+ ++oops_count);
+
+ spin_unlock_irqrestore(&lock, flags);
+}
+
static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
{
int size;
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 2f35a72642c6..b60a67d92ebd 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -72,7 +72,7 @@ static int of_pci_phb_probe(struct platform_device *dev)
/* Register devices with EEH */
if (dev->dev.of_node->child)
- eeh_add_device_tree_early(dev->dev.of_node);
+ eeh_add_device_tree_early(PCI_DN(dev->dev.of_node));
/* Scan the bus */
pcibios_scan_phb(phb);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2a525c938158..0d054068a21d 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -76,7 +76,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
phb->dn = dev;
- phb->is_dynamic = mem_init_done;
+ phb->is_dynamic = slab_is_available();
#ifdef CONFIG_PPC64
if (dev) {
int nid = of_node_to_nid(dev);
@@ -109,8 +109,10 @@ void pcibios_free_controller(struct pci_controller *phb)
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type)
{
- if (ppc_md.pcibios_window_alignment)
- return ppc_md.pcibios_window_alignment(bus, type);
+ struct pci_controller *phb = pci_bus_to_host(bus);
+
+ if (phb->controller_ops.window_alignment)
+ return phb->controller_ops.window_alignment(bus, type);
/*
* PCI core will figure out the default
@@ -122,14 +124,26 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
void pcibios_reset_secondary_bus(struct pci_dev *dev)
{
- if (ppc_md.pcibios_reset_secondary_bus) {
- ppc_md.pcibios_reset_secondary_bus(dev);
+ struct pci_controller *phb = pci_bus_to_host(dev->bus);
+
+ if (phb->controller_ops.reset_secondary_bus) {
+ phb->controller_ops.reset_secondary_bus(dev);
return;
}
pci_reset_secondary_bus(dev);
}
+#ifdef CONFIG_PCI_IOV
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
+{
+ if (ppc_md.pcibios_iov_resource_alignment)
+ return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
+
+ return pci_iov_resource_size(pdev, resno);
+}
+#endif /* CONFIG_PCI_IOV */
+
static resource_size_t pcibios_io_size(const struct pci_controller *hose)
{
#ifdef CONFIG_PPC64
@@ -788,6 +802,10 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
pci_name(dev));
return;
}
+
+ if (dev->is_virtfn)
+ return;
+
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
struct pci_bus_region reg;
@@ -942,6 +960,8 @@ static void pcibios_fixup_bridge(struct pci_bus *bus)
void pcibios_setup_bus_self(struct pci_bus *bus)
{
+ struct pci_controller *phb;
+
/* Fix up the bus resources for P2P bridges */
if (bus->self != NULL)
pcibios_fixup_bridge(bus);
@@ -953,12 +973,14 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
ppc_md.pcibios_fixup_bus(bus);
/* Setup bus DMA mappings */
- if (ppc_md.pci_dma_bus_setup)
- ppc_md.pci_dma_bus_setup(bus);
+ phb = pci_bus_to_host(bus);
+ if (phb->controller_ops.dma_bus_setup)
+ phb->controller_ops.dma_bus_setup(bus);
}
static void pcibios_setup_device(struct pci_dev *dev)
{
+ struct pci_controller *phb;
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
*/
@@ -969,8 +991,9 @@ static void pcibios_setup_device(struct pci_dev *dev)
set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
/* Additional platform DMA/iommu setup */
- if (ppc_md.pci_dma_dev_setup)
- ppc_md.pci_dma_dev_setup(dev);
+ phb = pci_bus_to_host(dev->bus);
+ if (phb->controller_ops.dma_dev_setup)
+ phb->controller_ops.dma_dev_setup(dev);
/* Read default IRQs and fixup if necessary */
pci_read_irq_line(dev);
@@ -986,6 +1009,12 @@ int pcibios_add_device(struct pci_dev *dev)
*/
if (dev->bus->is_added)
pcibios_setup_device(dev);
+
+#ifdef CONFIG_PCI_IOV
+ if (ppc_md.pcibios_fixup_sriov)
+ ppc_md.pcibios_fixup_sriov(dev);
+#endif /* CONFIG_PCI_IOV */
+
return 0;
}
@@ -1450,8 +1479,10 @@ EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- if (ppc_md.pcibios_enable_device_hook)
- if (ppc_md.pcibios_enable_device_hook(dev))
+ struct pci_controller *phb = pci_bus_to_host(dev->bus);
+
+ if (phb->controller_ops.enable_device_hook)
+ if (!phb->controller_ops.enable_device_hook(dev))
return -EINVAL;
return pci_enable_resources(dev, mask);
@@ -1624,8 +1655,8 @@ void pcibios_scan_phb(struct pci_controller *hose)
/* Get probe mode and perform scan */
mode = PCI_PROBE_NORMAL;
- if (node && ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
+ if (node && hose->controller_ops.probe_mode)
+ mode = hose->controller_ops.probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 5b789177aa29..7ed85a69a9c2 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -73,13 +73,16 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
{
int slotno, mode, pass, max;
struct pci_dev *dev;
+ struct pci_controller *phb;
struct device_node *dn = pci_bus_to_OF_node(bus);
- eeh_add_device_tree_early(dn);
+ eeh_add_device_tree_early(PCI_DN(dn));
+
+ phb = pci_bus_to_host(bus);
mode = PCI_PROBE_NORMAL;
- if (ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
+ if (phb->controller_ops.probe_mode)
+ mode = phb->controller_ops.probe_mode(bus);
if (mode == PCI_PROBE_DEVTREE) {
/* use ofdt-based probe */
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 83df3075d3df..b3b4df91b792 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -32,12 +32,237 @@
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
+/*
+ * The function is used to find the firmware data of one
+ * specific PCI device, which is attached to the indicated
+ * PCI bus. For VFs, their firmware data is linked to that
+ * one of PF's bridge. For other devices, their firmware
+ * data is linked to that of their bridge.
+ */
+static struct pci_dn *pci_bus_to_pdn(struct pci_bus *bus)
+{
+ struct pci_bus *pbus;
+ struct device_node *dn;
+ struct pci_dn *pdn;
+
+ /*
+ * We probably have virtual bus which doesn't
+ * have associated bridge.
+ */
+ pbus = bus;
+ while (pbus) {
+ if (pci_is_root_bus(pbus) || pbus->self)
+ break;
+
+ pbus = pbus->parent;
+ }
+
+ /*
+ * Except virtual bus, all PCI buses should
+ * have device nodes.
+ */
+ dn = pci_bus_to_OF_node(pbus);
+ pdn = dn ? PCI_DN(dn) : NULL;
+
+ return pdn;
+}
+
+struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
+ int devfn)
+{
+ struct device_node *dn = NULL;
+ struct pci_dn *parent, *pdn;
+ struct pci_dev *pdev = NULL;
+
+ /* Fast path: fetch from PCI device */
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (pdev->devfn == devfn) {
+ if (pdev->dev.archdata.pci_data)
+ return pdev->dev.archdata.pci_data;
+
+ dn = pci_device_to_OF_node(pdev);
+ break;
+ }
+ }
+
+ /* Fast path: fetch from device node */
+ pdn = dn ? PCI_DN(dn) : NULL;
+ if (pdn)
+ return pdn;
+
+ /* Slow path: fetch from firmware data hierarchy */
+ parent = pci_bus_to_pdn(bus);
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(pdn, &parent->child_list, list) {
+ if (pdn->busno == bus->number &&
+ pdn->devfn == devfn)
+ return pdn;
+ }
+
+ return NULL;
+}
+
struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
{
- struct device_node *dn = pci_device_to_OF_node(pdev);
- if (!dn)
+ struct device_node *dn;
+ struct pci_dn *parent, *pdn;
+
+ /* Search device directly */
+ if (pdev->dev.archdata.pci_data)
+ return pdev->dev.archdata.pci_data;
+
+ /* Check device node */
+ dn = pci_device_to_OF_node(pdev);
+ pdn = dn ? PCI_DN(dn) : NULL;
+ if (pdn)
+ return pdn;
+
+ /*
+ * VFs don't have device nodes. We hook their
+ * firmware data to PF's bridge.
+ */
+ parent = pci_bus_to_pdn(pdev->bus);
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(pdn, &parent->child_list, list) {
+ if (pdn->busno == pdev->bus->number &&
+ pdn->devfn == pdev->devfn)
+ return pdn;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_PCI_IOV
+static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
+ struct pci_dev *pdev,
+ int busno, int devfn)
+{
+ struct pci_dn *pdn;
+
+ /* Except PHB, we always have the parent */
+ if (!parent)
+ return NULL;
+
+ pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
+ if (!pdn) {
+ dev_warn(&pdev->dev, "%s: Out of memory!\n", __func__);
return NULL;
- return PCI_DN(dn);
+ }
+
+ pdn->phb = parent->phb;
+ pdn->parent = parent;
+ pdn->busno = busno;
+ pdn->devfn = devfn;
+#ifdef CONFIG_PPC_POWERNV
+ pdn->pe_number = IODA_INVALID_PE;
+#endif
+ INIT_LIST_HEAD(&pdn->child_list);
+ INIT_LIST_HEAD(&pdn->list);
+ list_add_tail(&pdn->list, &parent->child_list);
+
+ /*
+ * If we already have PCI device instance, lets
+ * bind them.
+ */
+ if (pdev)
+ pdev->dev.archdata.pci_data = pdn;
+
+ return pdn;
+}
+#endif
+
+struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PCI_IOV
+ struct pci_dn *parent, *pdn;
+ int i;
+
+ /* Only support IOV for now */
+ if (!pdev->is_physfn)
+ return pci_get_pdn(pdev);
+
+ /* Check if VFs have been populated */
+ pdn = pci_get_pdn(pdev);
+ if (!pdn || (pdn->flags & PCI_DN_FLAG_IOV_VF))
+ return NULL;
+
+ pdn->flags |= PCI_DN_FLAG_IOV_VF;
+ parent = pci_bus_to_pdn(pdev->bus);
+ if (!parent)
+ return NULL;
+
+ for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ pdn = add_one_dev_pci_data(parent, NULL,
+ pci_iov_virtfn_bus(pdev, i),
+ pci_iov_virtfn_devfn(pdev, i));
+ if (!pdn) {
+ dev_warn(&pdev->dev, "%s: Cannot create firmware data for VF#%d\n",
+ __func__, i);
+ return NULL;
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ return pci_get_pdn(pdev);
+}
+
+void remove_dev_pci_data(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PCI_IOV
+ struct pci_dn *parent;
+ struct pci_dn *pdn, *tmp;
+ int i;
+
+ /*
+ * VF and VF PE are created/released dynamically, so we need to
+ * bind/unbind them. Otherwise the VF and VF PE would be mismatched
+ * when re-enabling SR-IOV.
+ */
+ if (pdev->is_virtfn) {
+ pdn = pci_get_pdn(pdev);
+#ifdef CONFIG_PPC_POWERNV
+ pdn->pe_number = IODA_INVALID_PE;
+#endif
+ return;
+ }
+
+ /* Only support IOV PF for now */
+ if (!pdev->is_physfn)
+ return;
+
+ /* Check if VFs have been populated */
+ pdn = pci_get_pdn(pdev);
+ if (!pdn || !(pdn->flags & PCI_DN_FLAG_IOV_VF))
+ return;
+
+ pdn->flags &= ~PCI_DN_FLAG_IOV_VF;
+ parent = pci_bus_to_pdn(pdev->bus);
+ if (!parent)
+ return;
+
+ /*
+ * We might introduce flag to pci_dn in future
+ * so that we can release VF's firmware data in
+ * a batch mode.
+ */
+ for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ list_for_each_entry_safe(pdn, tmp,
+ &parent->child_list, list) {
+ if (pdn->busno != pci_iov_virtfn_bus(pdev, i) ||
+ pdn->devfn != pci_iov_virtfn_devfn(pdev, i))
+ continue;
+
+ if (!list_empty(&pdn->list))
+ list_del(&pdn->list);
+
+ kfree(pdn);
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
}
/*
@@ -49,6 +274,7 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
struct pci_controller *phb = data;
const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL);
const __be32 *regs;
+ struct device_node *parent;
struct pci_dn *pdn;
pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
@@ -69,7 +295,25 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
pdn->devfn = (addr >> 8) & 0xff;
}
+ /* vendor/device IDs and class code */
+ regs = of_get_property(dn, "vendor-id", NULL);
+ pdn->vendor_id = regs ? of_read_number(regs, 1) : 0;
+ regs = of_get_property(dn, "device-id", NULL);
+ pdn->device_id = regs ? of_read_number(regs, 1) : 0;
+ regs = of_get_property(dn, "class-code", NULL);
+ pdn->class_code = regs ? of_read_number(regs, 1) : 0;
+
+ /* Extended config space */
pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
+
+ /* Attach to parent node */
+ INIT_LIST_HEAD(&pdn->child_list);
+ INIT_LIST_HEAD(&pdn->list);
+ parent = of_get_parent(dn);
+ pdn->parent = parent ? PCI_DN(parent) : NULL;
+ if (pdn->parent)
+ list_add_tail(&pdn->list, &pdn->parent->child_list);
+
return NULL;
}
@@ -131,6 +375,46 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
return NULL;
}
+static struct pci_dn *pci_dn_next_one(struct pci_dn *root,
+ struct pci_dn *pdn)
+{
+ struct list_head *next = pdn->child_list.next;
+
+ if (next != &pdn->child_list)
+ return list_entry(next, struct pci_dn, list);
+
+ while (1) {
+ if (pdn == root)
+ return NULL;
+
+ next = pdn->list.next;
+ if (next != &pdn->parent->child_list)
+ break;
+
+ pdn = pdn->parent;
+ }
+
+ return list_entry(next, struct pci_dn, list);
+}
+
+void *traverse_pci_dn(struct pci_dn *root,
+ void *(*fn)(struct pci_dn *, void *),
+ void *data)
+{
+ struct pci_dn *pdn = root;
+ void *ret;
+
+ /* Only scan the child nodes */
+ for (pdn = pci_dn_next_one(root, pdn); pdn;
+ pdn = pci_dn_next_one(root, pdn)) {
+ ret = fn(pdn, data);
+ if (ret)
+ return ret;
+ }
+
+ return NULL;
+}
+
/**
* pci_devs_phb_init_dynamic - setup pci devices under this PHB
* phb: pci-to-host bridge (top-level bridge connecting to cpu)
@@ -147,8 +431,12 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb)
/* PHB nodes themselves must not match */
update_dn_pci_info(dn, phb);
pdn = dn->data;
- if (pdn)
+ if (pdn) {
pdn->devfn = pdn->busno = -1;
+ pdn->vendor_id = pdn->device_id = pdn->class_code = 0;
+ pdn->phb = phb;
+ phb->pci_data = pdn;
+ }
/* Update dn->phb ptrs for new phb and children devices */
traverse_pci_devices(dn, update_dn_pci_info, phb);
@@ -171,3 +459,16 @@ void __init pci_devs_phb_init(void)
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
pci_devs_phb_init_dynamic(phb);
}
+
+static void pci_dev_pdn_setup(struct pci_dev *pdev)
+{
+ struct pci_dn *pdn;
+
+ if (pdev->dev.archdata.pci_data)
+ return;
+
+ /* Setup the fast path */
+ pdn = pci_get_pdn(pdev);
+ pdev->dev.archdata.pci_data = pdn;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pci_dev_pdn_setup);
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index e6245e9c7d8d..42e02a2d570b 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -207,6 +207,7 @@ void of_scan_pci_bridge(struct pci_dev *dev)
{
struct device_node *node = dev->dev.of_node;
struct pci_bus *bus;
+ struct pci_controller *phb;
const __be32 *busrange, *ranges;
int len, i, mode;
struct pci_bus_region region;
@@ -286,9 +287,11 @@ void of_scan_pci_bridge(struct pci_dev *dev)
bus->number);
pr_debug(" bus name: %s\n", bus->name);
+ phb = pci_bus_to_host(bus);
+
mode = PCI_PROBE_NORMAL;
- if (ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
+ if (phb->controller_ops.probe_mode)
+ mode = phb->controller_ops.probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
@@ -305,7 +308,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
const __be32 *reg;
int reglen, devfn;
#ifdef CONFIG_EEH
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(PCI_DN(dn));
#endif
pr_debug(" * %s\n", dn->full_name);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b4cc7bef6b16..febb50dd5328 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1114,8 +1114,11 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
*/
extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
+/*
+ * Copy architecture-specific thread state
+ */
int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
+ unsigned long kthread_arg, struct task_struct *p)
{
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
@@ -1127,6 +1130,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
if (unlikely(p->flags & PF_KTHREAD)) {
+ /* kernel thread */
struct thread_info *ti = (void *)task_stack_page(p);
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
@@ -1137,11 +1141,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
clear_tsk_thread_flag(p, TIF_32BIT);
childregs->softe = 1;
#endif
- childregs->gpr[15] = arg;
+ childregs->gpr[15] = kthread_arg;
p->thread.regs = NULL; /* no user register state */
ti->flags |= _TIF_RESTOREALL;
f = ret_from_kernel_thread;
} else {
+ /* user thread */
struct pt_regs *regs = current_pt_regs();
CHECK_FULL_REGS(regs);
*childregs = *regs;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index b8e15c678960..308c5e15676b 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -721,7 +721,7 @@ void __init early_init_devtree(void *params)
*/
of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
if (boot_cpuid < 0) {
- printk("Failed to indentify boot CPU !\n");
+ printk("Failed to identify boot CPU !\n");
BUG();
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1a85d8f96739..fd1fe4c37599 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2898,7 +2898,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* Call OF "quiesce" method to shut down pending DMA's from
* devices etc...
*/
- prom_printf("Calling quiesce...\n");
+ prom_printf("Quiescing Open Firmware ...\n");
call_prom("quiesce", 0, 0);
/*
@@ -2910,7 +2910,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/* Don't print anything after quiesce under OPAL, it crashes OFW */
if (of_platform != PLATFORM_OPAL) {
- prom_printf("returning from prom_init\n");
+ prom_printf("Booting Linux via __start() ...\n");
prom_debug("->dt_header_start=0x%x\n", hdr);
}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 21c45a2d0706..7a488c108410 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -401,7 +401,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
buf = altbuf;
} else {
buf = rtas_err_buf;
- if (mem_init_done)
+ if (slab_is_available())
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
}
if (buf)
@@ -461,7 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
- if (mem_init_done)
+ if (slab_is_available())
kfree(buff_copy);
}
return ret;
@@ -897,7 +897,7 @@ int rtas_offline_cpus_mask(cpumask_var_t cpus)
}
EXPORT_SYMBOL(rtas_offline_cpus_mask);
-int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
+int rtas_ibm_suspend_me(u64 handle)
{
long state;
long rc;
@@ -919,13 +919,11 @@ int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
return rc;
} else if (state == H_VASI_ENABLED) {
- *vasi_return = RTAS_NOT_SUSPENDABLE;
- return 0;
+ return -EAGAIN;
} else if (state != H_VASI_SUSPENDING) {
printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
state);
- *vasi_return = -1;
- return 0;
+ return -EIO;
}
if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
@@ -972,7 +970,7 @@ out:
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
-int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
+int rtas_ibm_suspend_me(u64 handle)
{
return -ENOSYS;
}
@@ -1022,7 +1020,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
unsigned long flags;
char *buff_copy, *errbuf = NULL;
int nargs, nret, token;
- int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1054,15 +1051,18 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (token == ibm_suspend_me_token) {
/*
- * rtas_ibm_suspend_me assumes args are in cpu endian, or at least the
- * hcall within it requires it.
+ * rtas_ibm_suspend_me assumes the streamid handle is in cpu
+ * endian, or at least the hcall within it requires it.
*/
- int vasi_rc = 0;
+ int rc = 0;
u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
| be32_to_cpu(args.args[1]);
- rc = rtas_ibm_suspend_me(handle, &vasi_rc);
- args.rets[0] = cpu_to_be32(vasi_rc);
- if (rc)
+ rc = rtas_ibm_suspend_me(handle);
+ if (rc == -EAGAIN)
+ args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
+ else if (rc == -EIO)
+ args.rets[0] = cpu_to_be32(-1);
+ else if (rc)
return rc;
goto copy_return;
}
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index ce230da2c015..73f1934582c2 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -113,7 +113,7 @@ static int rtas_pci_read_config(struct pci_bus *bus,
ret = rtas_read_config(pdn, where, size, val);
if (*val == EEH_IO_ERROR_VALUE(size) &&
- eeh_dev_check_failure(of_node_to_eeh_dev(dn)))
+ eeh_dev_check_failure(pdn_to_eeh_dev(pdn)))
return PCIBIOS_DEVICE_NOT_FOUND;
return ret;
@@ -277,50 +277,3 @@ int rtas_setup_phb(struct pci_controller *phb)
return 0;
}
-
-void __init find_and_init_phbs(void)
-{
- struct device_node *node;
- struct pci_controller *phb;
- struct device_node *root = of_find_node_by_path("/");
-
- for_each_child_of_node(root, node) {
- if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
- strcmp(node->type, "pciex") != 0))
- continue;
-
- phb = pcibios_alloc_controller(node);
- if (!phb)
- continue;
- rtas_setup_phb(phb);
- pci_process_bridge_OF_ranges(phb, node, 0);
- isa_bridge_find_early(phb);
- }
-
- of_node_put(root);
- pci_devs_phb_init();
-
- /*
- * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
- * in chosen.
- */
- if (of_chosen) {
- const int *prop;
-
- prop = of_get_property(of_chosen,
- "linux,pci-probe-only", NULL);
- if (prop) {
- if (*prop)
- pci_add_flags(PCI_PROBE_ONLY);
- else
- pci_clear_flags(PCI_PROBE_ONLY);
- }
-
-#ifdef CONFIG_PPC32 /* Will be made generic soon */
- prop = of_get_property(of_chosen,
- "linux,pci-assign-all-buses", NULL);
- if (prop && *prop)
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-#endif /* CONFIG_PPC32 */
- }
-}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 49f553bbb360..c69671c03c3b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -37,6 +37,7 @@
#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/memory.h>
+#include <linux/nmi.h>
#include <asm/io.h>
#include <asm/kdump.h>
@@ -779,3 +780,22 @@ unsigned long memory_block_size_bytes(void)
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+u64 hw_nmi_get_sample_period(int watchdog_thresh)
+{
+ return ppc_proc_freq * watchdog_thresh;
+}
+
+/*
+ * The hardlockup detector breaks PMU event based branches and is likely
+ * to get false positives in KVM guests, so disable it by default.
+ */
+static int __init disable_hardlockup_detector(void)
+{
+ hardlockup_detector_disable();
+
+ return 0;
+}
+early_initcall(disable_hardlockup_detector);
+#endif
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index b2702e87db0d..5fa92706444b 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
(u64)len_high << 32 | len_low, advice);
}
+
+long sys_switch_endian(void)
+{
+ struct thread_info *ti;
+
+ current->thread.regs->msr ^= MSR_LE;
+
+ /*
+ * Set TIF_RESTOREALL so that r3 isn't clobbered on return to
+ * userspace. That also has the effect of restoring the non-volatile
+ * GPRs, so we saved them on the way in here.
+ */
+ ti = current_thread_info();
+ ti->flags |= _TIF_RESTOREALL;
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 7ab5d434e2ee..4d6b1d3a747f 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -22,6 +22,7 @@
#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func)
#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
+#define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264)
#else
#define SYSCALL(func) .long sys_##func
@@ -29,6 +30,7 @@
#define PPC_SYS(func) .long ppc_##func
#define OLDSYS(func) .long sys_##func
#define SYS32ONLY(func) .long sys_##func
+#define PPC64ONLY(func) .long sys_ni_syscall
#define SYSX(f, f3264, f32) .long f32
#endif
#define SYSCALL_SPU(func) SYSCALL(func)
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
index 238aa63ced8f..2384129f5893 100644
--- a/arch/powerpc/kernel/systbl_chk.c
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -21,9 +21,11 @@
#ifdef CONFIG_PPC64
#define OLDSYS(func) -1
#define SYS32ONLY(func) -1
+#define PPC64ONLY(func) __NR_##func
#else
#define OLDSYS(func) __NR_old##func
#define SYS32ONLY(func) __NR_##func
+#define PPC64ONLY(func) -1
#endif
#define SYSX(f, f3264, f32) -1
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2d7b33fab953..56f44848b044 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -608,6 +608,12 @@ void arch_suspend_enable_irqs(void)
}
#endif
+unsigned long long tb_to_ns(unsigned long long ticks)
+{
+ return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
+}
+EXPORT_SYMBOL_GPL(tb_to_ns);
+
/*
* Scheduler clock - returns current time in nanosec units.
*
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2a324f4cb1b9..5754b226da7e 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -152,9 +152,9 @@ _GLOBAL(tm_reclaim)
addi r7, r3, THREAD_TRANSACT_VRSTATE
SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
- mfvscr vr0
+ mfvscr v0
li r6, VRSTATE_VSCR
- stvx vr0, r7, r6
+ stvx v0, r7, r6
dont_backup_vec:
mfspr r0, SPRN_VRSAVE
std r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -359,8 +359,8 @@ _GLOBAL(__tm_recheckpoint)
addi r8, r3, THREAD_VRSTATE
li r5, VRSTATE_VSCR
- lvx vr0, r8, r5
- mtvscr vr0
+ lvx v0, r8, r5
+ mtvscr v0
REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */
dont_restore_vec:
ld r5, THREAD_VRSAVE(r3)
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index b7aa07279a63..7cc38b5b58bc 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -46,8 +46,6 @@ void __init udbg_early_init(void)
#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
/* Maple real mode debug */
udbg_init_maple_realmode();
-#elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT)
- udbg_init_debug_beat();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
udbg_init_pas_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX)
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 74f8050518d6..f5c80d567d8d 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -24,8 +24,8 @@ _GLOBAL(do_load_up_transact_altivec)
stw r4,THREAD_USED_VR(r3)
li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
- lvx vr0,r10,r3
- mtvscr vr0
+ lvx v0,r10,r3
+ mtvscr v0
addi r10,r3,THREAD_TRANSACT_VRSTATE
REST_32VRS(0,r4,r10)
@@ -52,8 +52,8 @@ _GLOBAL(vec_enable)
*/
_GLOBAL(load_vr_state)
li r4,VRSTATE_VSCR
- lvx vr0,r4,r3
- mtvscr vr0
+ lvx v0,r4,r3
+ mtvscr v0
REST_32VRS(0,r4,r3)
blr
@@ -63,9 +63,9 @@ _GLOBAL(load_vr_state)
*/
_GLOBAL(store_vr_state)
SAVE_32VRS(0, r4, r3)
- mfvscr vr0
+ mfvscr v0
li r4, VRSTATE_VSCR
- stvx vr0, r4, r3
+ stvx v0, r4, r3
blr
/*
@@ -104,9 +104,9 @@ _GLOBAL(load_up_altivec)
addi r4,r4,THREAD
addi r6,r4,THREAD_VRSTATE
SAVE_32VRS(0,r5,r6)
- mfvscr vr0
+ mfvscr v0
li r10,VRSTATE_VSCR
- stvx vr0,r10,r6
+ stvx v0,r10,r6
/* Disable VMX for last_task_used_altivec */
PPC_LL r5,PT_REGS(r4)
toreal(r5)
@@ -142,8 +142,8 @@ _GLOBAL(load_up_altivec)
li r4,1
li r10,VRSTATE_VSCR
stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r6
- mtvscr vr0
+ lvx v0,r10,r6
+ mtvscr v0
REST_32VRS(0,r4,r6)
#ifndef CONFIG_SMP
/* Update last_task_used_altivec to 'current' */
@@ -186,9 +186,9 @@ _GLOBAL(giveup_altivec)
addi r7,r3,THREAD_VRSTATE
2: PPC_LCMPI 0,r5,0
SAVE_32VRS(0,r4,r7)
- mfvscr vr0
+ mfvscr v0
li r4,VRSTATE_VSCR
- stvx vr0,r4,r7
+ stvx v0,r4,r7
beq 1f
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
#ifdef CONFIG_VSX
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index f096e72262f4..1db685104ffc 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -213,6 +213,7 @@ SECTIONS
*(.opd)
}
+ . = ALIGN(256);
.got : AT(ADDR(.got) - LOAD_OFFSET) {
__toc_start = .;
#ifndef CONFIG_RELOCATABLE
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 11850f310fb4..3caec2c42105 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -75,7 +75,7 @@ config KVM_BOOK3S_64
config KVM_BOOK3S_64_HV
tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
- depends on KVM_BOOK3S_64
+ depends on KVM_BOOK3S_64 && PPC_POWERNV
select KVM_BOOK3S_HV_POSSIBLE
select MMU_NOTIFIER
select CMA
@@ -110,6 +110,20 @@ config KVM_BOOK3S_64_PR
processor, including emulating 32-bit processors on a 64-bit
host.
+config KVM_BOOK3S_HV_EXIT_TIMING
+ bool "Detailed timing for hypervisor real-mode code"
+ depends on KVM_BOOK3S_HV_POSSIBLE && DEBUG_FS
+ ---help---
+ Calculate time taken for each vcpu in the real-mode guest entry,
+ exit, and interrupt handling code, plus time spent in the guest
+ and in nap mode due to idle (cede) while other threads are still
+ in the guest. The total, minimum and maximum times in nanoseconds
+ together with the number of executions are reported in debugfs in
+ kvm/vm#/vcpu#/timings. The overhead is of the order of 30 - 40
+ ns per exit on POWER8.
+
+ If unsure, say N.
+
config KVM_BOOKE_HV
bool
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index cfbcdc654201..453a8a47a467 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -821,6 +821,82 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
#endif
}
+int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
+{
+ unsigned long size = kvmppc_get_gpr(vcpu, 4);
+ unsigned long addr = kvmppc_get_gpr(vcpu, 5);
+ u64 buf;
+ int ret;
+
+ if (!is_power_of_2(size) || (size > sizeof(buf)))
+ return H_TOO_HARD;
+
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+ if (ret != 0)
+ return H_TOO_HARD;
+
+ switch (size) {
+ case 1:
+ kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
+ break;
+
+ case 2:
+ kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
+ break;
+
+ case 4:
+ kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
+ break;
+
+ case 8:
+ kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
+ break;
+
+ default:
+ BUG();
+ }
+
+ return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
+
+int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
+{
+ unsigned long size = kvmppc_get_gpr(vcpu, 4);
+ unsigned long addr = kvmppc_get_gpr(vcpu, 5);
+ unsigned long val = kvmppc_get_gpr(vcpu, 6);
+ u64 buf;
+ int ret;
+
+ switch (size) {
+ case 1:
+ *(u8 *)&buf = val;
+ break;
+
+ case 2:
+ *(__be16 *)&buf = cpu_to_be16(val);
+ break;
+
+ case 4:
+ *(__be32 *)&buf = cpu_to_be32(val);
+ break;
+
+ case 8:
+ *(__be64 *)&buf = cpu_to_be64(val);
+ break;
+
+ default:
+ return H_TOO_HARD;
+ }
+
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+ if (ret != 0)
+ return H_TOO_HARD;
+
+ return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
+
int kvmppc_core_check_processor_compat(void)
{
/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 534acb3c6c3d..1a4acf8bf4f4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -27,6 +27,7 @@
#include <linux/srcu.h>
#include <linux/anon_inodes.h>
#include <linux/file.h>
+#include <linux/debugfs.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
@@ -116,12 +117,12 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
long order;
mutex_lock(&kvm->lock);
- if (kvm->arch.rma_setup_done) {
- kvm->arch.rma_setup_done = 0;
- /* order rma_setup_done vs. vcpus_running */
+ if (kvm->arch.hpte_setup_done) {
+ kvm->arch.hpte_setup_done = 0;
+ /* order hpte_setup_done vs. vcpus_running */
smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) {
- kvm->arch.rma_setup_done = 1;
+ kvm->arch.hpte_setup_done = 1;
goto out;
}
}
@@ -338,9 +339,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte;
- /* Unlock the HPTE */
- asm volatile("lwsync" : : : "memory");
- hptep[0] = cpu_to_be64(v);
+ unlock_hpte(hptep, v);
preempt_enable();
gpte->eaddr = eaddr;
@@ -469,8 +468,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
hpte[1] = be64_to_cpu(hptep[1]);
hpte[2] = r = rev->guest_rpte;
- asm volatile("lwsync" : : : "memory");
- hptep[0] = cpu_to_be64(hpte[0]);
+ unlock_hpte(hptep, hpte[0]);
preempt_enable();
if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
@@ -537,23 +535,21 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
/* if the guest wants write access, see if that is OK */
if (!writing && hpte_is_writable(r)) {
- unsigned int hugepage_shift;
pte_t *ptep, pte;
-
+ unsigned long flags;
/*
* We need to protect against page table destruction
- * while looking up and updating the pte.
+ * hugepage split and collapse.
*/
- rcu_read_lock_sched();
+ local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
- hva, &hugepage_shift);
+ hva, NULL);
if (ptep) {
- pte = kvmppc_read_update_linux_pte(ptep, 1,
- hugepage_shift);
+ pte = kvmppc_read_update_linux_pte(ptep, 1);
if (pte_write(pte))
write_ok = 1;
}
- rcu_read_unlock_sched();
+ local_irq_restore(flags);
}
}
@@ -621,7 +617,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hptep[1] = cpu_to_be64(r);
eieio();
- hptep[0] = cpu_to_be64(hpte[0]);
+ __unlock_hpte(hptep, hpte[0]);
asm volatile("ptesync" : : : "memory");
preempt_enable();
if (page && hpte_is_writable(r))
@@ -642,7 +638,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return ret;
out_unlock:
- hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
preempt_enable();
goto out_put;
}
@@ -771,7 +767,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
}
}
unlock_rmap(rmapp);
- hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
}
return 0;
}
@@ -857,7 +853,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
}
ret = 1;
}
- hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} while ((i = j) != head);
unlock_rmap(rmapp);
@@ -974,8 +970,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
/* Now check and modify the HPTE */
if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
- /* unlock and continue */
- hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
continue;
}
@@ -996,9 +991,9 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
npages_dirty = n;
eieio();
}
- v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
+ v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID;
- hptep[0] = cpu_to_be64(v);
+ __unlock_hpte(hptep, v);
} while ((i = j) != head);
unlock_rmap(rmapp);
@@ -1218,8 +1213,7 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
r &= ~HPTE_GR_MODIFIED;
revp->guest_rpte = r;
}
- asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
- hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ unlock_hpte(hptp, be64_to_cpu(hptp[0]));
preempt_enable();
if (!(valid == want_valid && (first_pass || dirty)))
ok = 0;
@@ -1339,20 +1333,20 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
unsigned long tmp[2];
ssize_t nb;
long int err, ret;
- int rma_setup;
+ int hpte_setup;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
/* lock out vcpus from running while we're doing this */
mutex_lock(&kvm->lock);
- rma_setup = kvm->arch.rma_setup_done;
- if (rma_setup) {
- kvm->arch.rma_setup_done = 0; /* temporarily */
- /* order rma_setup_done vs. vcpus_running */
+ hpte_setup = kvm->arch.hpte_setup_done;
+ if (hpte_setup) {
+ kvm->arch.hpte_setup_done = 0; /* temporarily */
+ /* order hpte_setup_done vs. vcpus_running */
smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) {
- kvm->arch.rma_setup_done = 1;
+ kvm->arch.hpte_setup_done = 1;
mutex_unlock(&kvm->lock);
return -EBUSY;
}
@@ -1405,7 +1399,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
"r=%lx\n", ret, i, v, r);
goto out;
}
- if (!rma_setup && is_vrma_hpte(v)) {
+ if (!hpte_setup && is_vrma_hpte(v)) {
unsigned long psize = hpte_base_page_size(v, r);
unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr;
@@ -1414,7 +1408,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
(VRMA_VSID << SLB_VSID_SHIFT_1T);
lpcr = senc << (LPCR_VRMASD_SH - 4);
kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
- rma_setup = 1;
+ hpte_setup = 1;
}
++i;
hptp += 2;
@@ -1430,9 +1424,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
}
out:
- /* Order HPTE updates vs. rma_setup_done */
+ /* Order HPTE updates vs. hpte_setup_done */
smp_wmb();
- kvm->arch.rma_setup_done = rma_setup;
+ kvm->arch.hpte_setup_done = hpte_setup;
mutex_unlock(&kvm->lock);
if (err)
@@ -1495,6 +1489,141 @@ int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
return ret;
}
+struct debugfs_htab_state {
+ struct kvm *kvm;
+ struct mutex mutex;
+ unsigned long hpt_index;
+ int chars_left;
+ int buf_index;
+ char buf[64];
+};
+
+static int debugfs_htab_open(struct inode *inode, struct file *file)
+{
+ struct kvm *kvm = inode->i_private;
+ struct debugfs_htab_state *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ kvm_get_kvm(kvm);
+ p->kvm = kvm;
+ mutex_init(&p->mutex);
+ file->private_data = p;
+
+ return nonseekable_open(inode, file);
+}
+
+static int debugfs_htab_release(struct inode *inode, struct file *file)
+{
+ struct debugfs_htab_state *p = file->private_data;
+
+ kvm_put_kvm(p->kvm);
+ kfree(p);
+ return 0;
+}
+
+static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct debugfs_htab_state *p = file->private_data;
+ ssize_t ret, r;
+ unsigned long i, n;
+ unsigned long v, hr, gr;
+ struct kvm *kvm;
+ __be64 *hptp;
+
+ ret = mutex_lock_interruptible(&p->mutex);
+ if (ret)
+ return ret;
+
+ if (p->chars_left) {
+ n = p->chars_left;
+ if (n > len)
+ n = len;
+ r = copy_to_user(buf, p->buf + p->buf_index, n);
+ n -= r;
+ p->chars_left -= n;
+ p->buf_index += n;
+ buf += n;
+ len -= n;
+ ret = n;
+ if (r) {
+ if (!n)
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ kvm = p->kvm;
+ i = p->hpt_index;
+ hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+ for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) {
+ if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
+ continue;
+
+ /* lock the HPTE so it's stable and read it */
+ preempt_disable();
+ while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
+ cpu_relax();
+ v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
+ hr = be64_to_cpu(hptp[1]);
+ gr = kvm->arch.revmap[i].guest_rpte;
+ unlock_hpte(hptp, v);
+ preempt_enable();
+
+ if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
+ continue;
+
+ n = scnprintf(p->buf, sizeof(p->buf),
+ "%6lx %.16lx %.16lx %.16lx\n",
+ i, v, hr, gr);
+ p->chars_left = n;
+ if (n > len)
+ n = len;
+ r = copy_to_user(buf, p->buf, n);
+ n -= r;
+ p->chars_left -= n;
+ p->buf_index = n;
+ buf += n;
+ len -= n;
+ ret += n;
+ if (r) {
+ if (!ret)
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ p->hpt_index = i;
+
+ out:
+ mutex_unlock(&p->mutex);
+ return ret;
+}
+
+ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -EACCES;
+}
+
+static const struct file_operations debugfs_htab_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_htab_open,
+ .release = debugfs_htab_release,
+ .read = debugfs_htab_read,
+ .write = debugfs_htab_write,
+ .llseek = generic_file_llseek,
+};
+
+void kvmppc_mmu_debugfs_init(struct kvm *kvm)
+{
+ kvm->arch.htab_dentry = debugfs_create_file("htab", 0400,
+ kvm->arch.debugfs_dir, kvm,
+ &debugfs_htab_fops);
+}
+
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{
struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de747563d29d..48d3c5d2ecc9 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -32,6 +32,7 @@
#include <linux/page-flags.h>
#include <linux/srcu.h>
#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -50,6 +51,7 @@
#include <asm/hvcall.h>
#include <asm/switch_to.h>
#include <asm/smp.h>
+#include <asm/dbell.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
@@ -83,9 +85,35 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
+static bool kvmppc_ipi_thread(int cpu)
+{
+ /* On POWER8 for IPIs to threads in the same core, use msgsnd */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ preempt_disable();
+ if (cpu_first_thread_sibling(cpu) ==
+ cpu_first_thread_sibling(smp_processor_id())) {
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ msg |= cpu_thread_in_core(cpu);
+ smp_mb();
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ preempt_enable();
+ return true;
+ }
+ preempt_enable();
+ }
+
+#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
+ if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) {
+ xics_wake_cpu(cpu);
+ return true;
+ }
+#endif
+
+ return false;
+}
+
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
- int me;
int cpu = vcpu->cpu;
wait_queue_head_t *wqp;
@@ -95,20 +123,12 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
++vcpu->stat.halt_wakeup;
}
- me = get_cpu();
+ if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid))
+ return;
/* CPU points to the first thread of the core */
- if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
-#ifdef CONFIG_PPC_ICP_NATIVE
- int real_cpu = cpu + vcpu->arch.ptid;
- if (paca[real_cpu].kvm_hstate.xics_phys)
- xics_wake_cpu(real_cpu);
- else
-#endif
- if (cpu_online(cpu))
- smp_send_reschedule(cpu);
- }
- put_cpu();
+ if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
+ smp_send_reschedule(cpu);
}
/*
@@ -706,6 +726,16 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
/* Send the error out to userspace via KVM_RUN */
return rc;
+ case H_LOGICAL_CI_LOAD:
+ ret = kvmppc_h_logical_ci_load(vcpu);
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_LOGICAL_CI_STORE:
+ ret = kvmppc_h_logical_ci_store(vcpu);
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
case H_SET_MODE:
ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
@@ -740,6 +770,8 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_CONFER:
case H_REGISTER_VPA:
case H_SET_MODE:
+ case H_LOGICAL_CI_LOAD:
+ case H_LOGICAL_CI_STORE:
#ifdef CONFIG_KVM_XICS
case H_XIRR:
case H_CPPR:
@@ -1410,6 +1442,154 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
return vcore;
}
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+static struct debugfs_timings_element {
+ const char *name;
+ size_t offset;
+} timings[] = {
+ {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
+ {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
+ {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
+ {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
+ {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
+};
+
+#define N_TIMINGS (sizeof(timings) / sizeof(timings[0]))
+
+struct debugfs_timings_state {
+ struct kvm_vcpu *vcpu;
+ unsigned int buflen;
+ char buf[N_TIMINGS * 100];
+};
+
+static int debugfs_timings_open(struct inode *inode, struct file *file)
+{
+ struct kvm_vcpu *vcpu = inode->i_private;
+ struct debugfs_timings_state *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ kvm_get_kvm(vcpu->kvm);
+ p->vcpu = vcpu;
+ file->private_data = p;
+
+ return nonseekable_open(inode, file);
+}
+
+static int debugfs_timings_release(struct inode *inode, struct file *file)
+{
+ struct debugfs_timings_state *p = file->private_data;
+
+ kvm_put_kvm(p->vcpu->kvm);
+ kfree(p);
+ return 0;
+}
+
+static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct debugfs_timings_state *p = file->private_data;
+ struct kvm_vcpu *vcpu = p->vcpu;
+ char *s, *buf_end;
+ struct kvmhv_tb_accumulator tb;
+ u64 count;
+ loff_t pos;
+ ssize_t n;
+ int i, loops;
+ bool ok;
+
+ if (!p->buflen) {
+ s = p->buf;
+ buf_end = s + sizeof(p->buf);
+ for (i = 0; i < N_TIMINGS; ++i) {
+ struct kvmhv_tb_accumulator *acc;
+
+ acc = (struct kvmhv_tb_accumulator *)
+ ((unsigned long)vcpu + timings[i].offset);
+ ok = false;
+ for (loops = 0; loops < 1000; ++loops) {
+ count = acc->seqcount;
+ if (!(count & 1)) {
+ smp_rmb();
+ tb = *acc;
+ smp_rmb();
+ if (count == acc->seqcount) {
+ ok = true;
+ break;
+ }
+ }
+ udelay(1);
+ }
+ if (!ok)
+ snprintf(s, buf_end - s, "%s: stuck\n",
+ timings[i].name);
+ else
+ snprintf(s, buf_end - s,
+ "%s: %llu %llu %llu %llu\n",
+ timings[i].name, count / 2,
+ tb_to_ns(tb.tb_total),
+ tb_to_ns(tb.tb_min),
+ tb_to_ns(tb.tb_max));
+ s += strlen(s);
+ }
+ p->buflen = s - p->buf;
+ }
+
+ pos = *ppos;
+ if (pos >= p->buflen)
+ return 0;
+ if (len > p->buflen - pos)
+ len = p->buflen - pos;
+ n = copy_to_user(buf, p->buf + pos, len);
+ if (n) {
+ if (n == len)
+ return -EFAULT;
+ len -= n;
+ }
+ *ppos = pos + len;
+ return len;
+}
+
+static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -EACCES;
+}
+
+static const struct file_operations debugfs_timings_ops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_timings_open,
+ .release = debugfs_timings_release,
+ .read = debugfs_timings_read,
+ .write = debugfs_timings_write,
+ .llseek = generic_file_llseek,
+};
+
+/* Create a debugfs directory for the vcpu */
+static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
+{
+ char buf[16];
+ struct kvm *kvm = vcpu->kvm;
+
+ snprintf(buf, sizeof(buf), "vcpu%u", id);
+ if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
+ return;
+ vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
+ if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
+ return;
+ vcpu->arch.debugfs_timings =
+ debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
+ vcpu, &debugfs_timings_ops);
+}
+
+#else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
+static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
+{
+}
+#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
+
static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
unsigned int id)
{
@@ -1479,6 +1659,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
vcpu->arch.cpu_type = KVM_CPU_3S_64;
kvmppc_sanity_check(vcpu);
+ debugfs_vcpu_init(vcpu, id);
+
return vcpu;
free_vcpu:
@@ -1566,8 +1748,10 @@ static int kvmppc_grab_hwthread(int cpu)
tpaca = &paca[cpu];
/* Ensure the thread won't go into the kernel if it wakes */
- tpaca->kvm_hstate.hwthread_req = 1;
tpaca->kvm_hstate.kvm_vcpu = NULL;
+ tpaca->kvm_hstate.napping = 0;
+ smp_wmb();
+ tpaca->kvm_hstate.hwthread_req = 1;
/*
* If the thread is already executing in the kernel (e.g. handling
@@ -1610,35 +1794,41 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
}
cpu = vc->pcpu + vcpu->arch.ptid;
tpaca = &paca[cpu];
- tpaca->kvm_hstate.kvm_vcpu = vcpu;
tpaca->kvm_hstate.kvm_vcore = vc;
tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
vcpu->cpu = vc->pcpu;
+ /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */
smp_wmb();
-#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
- if (cpu != smp_processor_id()) {
- xics_wake_cpu(cpu);
- if (vcpu->arch.ptid)
- ++vc->n_woken;
- }
-#endif
+ tpaca->kvm_hstate.kvm_vcpu = vcpu;
+ if (cpu != smp_processor_id())
+ kvmppc_ipi_thread(cpu);
}
-static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
+static void kvmppc_wait_for_nap(void)
{
- int i;
+ int cpu = smp_processor_id();
+ int i, loops;
- HMT_low();
- i = 0;
- while (vc->nap_count < vc->n_woken) {
- if (++i >= 1000000) {
- pr_err("kvmppc_wait_for_nap timeout %d %d\n",
- vc->nap_count, vc->n_woken);
- break;
+ for (loops = 0; loops < 1000000; ++loops) {
+ /*
+ * Check if all threads are finished.
+ * We set the vcpu pointer when starting a thread
+ * and the thread clears it when finished, so we look
+ * for any threads that still have a non-NULL vcpu ptr.
+ */
+ for (i = 1; i < threads_per_subcore; ++i)
+ if (paca[cpu + i].kvm_hstate.kvm_vcpu)
+ break;
+ if (i == threads_per_subcore) {
+ HMT_medium();
+ return;
}
- cpu_relax();
+ HMT_low();
}
HMT_medium();
+ for (i = 1; i < threads_per_subcore; ++i)
+ if (paca[cpu + i].kvm_hstate.kvm_vcpu)
+ pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
}
/*
@@ -1700,63 +1890,103 @@ static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
}
+static void prepare_threads(struct kvmppc_vcore *vc)
+{
+ struct kvm_vcpu *vcpu, *vnext;
+
+ list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
+ arch.run_list) {
+ if (signal_pending(vcpu->arch.run_task))
+ vcpu->arch.ret = -EINTR;
+ else if (vcpu->arch.vpa.update_pending ||
+ vcpu->arch.slb_shadow.update_pending ||
+ vcpu->arch.dtl.update_pending)
+ vcpu->arch.ret = RESUME_GUEST;
+ else
+ continue;
+ kvmppc_remove_runnable(vc, vcpu);
+ wake_up(&vcpu->arch.cpu_run);
+ }
+}
+
+static void post_guest_process(struct kvmppc_vcore *vc)
+{
+ u64 now;
+ long ret;
+ struct kvm_vcpu *vcpu, *vnext;
+
+ now = get_tb();
+ list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
+ arch.run_list) {
+ /* cancel pending dec exception if dec is positive */
+ if (now < vcpu->arch.dec_expires &&
+ kvmppc_core_pending_dec(vcpu))
+ kvmppc_core_dequeue_dec(vcpu);
+
+ trace_kvm_guest_exit(vcpu);
+
+ ret = RESUME_GUEST;
+ if (vcpu->arch.trap)
+ ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+ vcpu->arch.run_task);
+
+ vcpu->arch.ret = ret;
+ vcpu->arch.trap = 0;
+
+ if (vcpu->arch.ceded) {
+ if (!is_kvmppc_resume_guest(ret))
+ kvmppc_end_cede(vcpu);
+ else
+ kvmppc_set_timer(vcpu);
+ }
+ if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
+ kvmppc_remove_runnable(vc, vcpu);
+ wake_up(&vcpu->arch.cpu_run);
+ }
+ }
+}
+
/*
* Run a set of guest threads on a physical core.
* Called with vc->lock held.
*/
-static void kvmppc_run_core(struct kvmppc_vcore *vc)
+static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
{
- struct kvm_vcpu *vcpu, *vnext;
- long ret;
- u64 now;
- int i, need_vpa_update;
+ struct kvm_vcpu *vcpu;
+ int i;
int srcu_idx;
- struct kvm_vcpu *vcpus_to_update[threads_per_core];
- /* don't start if any threads have a signal pending */
- need_vpa_update = 0;
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
- if (signal_pending(vcpu->arch.run_task))
- return;
- if (vcpu->arch.vpa.update_pending ||
- vcpu->arch.slb_shadow.update_pending ||
- vcpu->arch.dtl.update_pending)
- vcpus_to_update[need_vpa_update++] = vcpu;
- }
+ /*
+ * Remove from the list any threads that have a signal pending
+ * or need a VPA update done
+ */
+ prepare_threads(vc);
+
+ /* if the runner is no longer runnable, let the caller pick a new one */
+ if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
+ return;
/*
- * Initialize *vc, in particular vc->vcore_state, so we can
- * drop the vcore lock if necessary.
+ * Initialize *vc.
*/
- vc->n_woken = 0;
- vc->nap_count = 0;
- vc->entry_exit_count = 0;
+ vc->entry_exit_map = 0;
vc->preempt_tb = TB_NIL;
- vc->vcore_state = VCORE_STARTING;
vc->in_guest = 0;
vc->napping_threads = 0;
vc->conferring_threads = 0;
/*
- * Updating any of the vpas requires calling kvmppc_pin_guest_page,
- * which can't be called with any spinlocks held.
- */
- if (need_vpa_update) {
- spin_unlock(&vc->lock);
- for (i = 0; i < need_vpa_update; ++i)
- kvmppc_update_vpas(vcpus_to_update[i]);
- spin_lock(&vc->lock);
- }
-
- /*
* Make sure we are running on primary threads, and that secondary
* threads are offline. Also check if the number of threads in this
* guest are greater than the current system threads per guest.
*/
if ((threads_per_core > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
vcpu->arch.ret = -EBUSY;
+ kvmppc_remove_runnable(vc, vcpu);
+ wake_up(&vcpu->arch.cpu_run);
+ }
goto out;
}
@@ -1797,8 +2027,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
vcpu->cpu = -1;
/* wait for secondary threads to finish writing their state to memory */
- if (vc->nap_count < vc->n_woken)
- kvmppc_wait_for_nap(vc);
+ kvmppc_wait_for_nap();
for (i = 0; i < threads_per_subcore; ++i)
kvmppc_release_hwthread(vc->pcpu + i);
/* prevent other vcpu threads from doing kvmppc_start_thread() now */
@@ -1812,44 +2041,12 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
kvm_guest_exit();
preempt_enable();
- cond_resched();
spin_lock(&vc->lock);
- now = get_tb();
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
- /* cancel pending dec exception if dec is positive */
- if (now < vcpu->arch.dec_expires &&
- kvmppc_core_pending_dec(vcpu))
- kvmppc_core_dequeue_dec(vcpu);
-
- trace_kvm_guest_exit(vcpu);
-
- ret = RESUME_GUEST;
- if (vcpu->arch.trap)
- ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
- vcpu->arch.run_task);
-
- vcpu->arch.ret = ret;
- vcpu->arch.trap = 0;
-
- if (vcpu->arch.ceded) {
- if (!is_kvmppc_resume_guest(ret))
- kvmppc_end_cede(vcpu);
- else
- kvmppc_set_timer(vcpu);
- }
- }
+ post_guest_process(vc);
out:
vc->vcore_state = VCORE_INACTIVE;
- list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
- arch.run_list) {
- if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
- kvmppc_remove_runnable(vc, vcpu);
- wake_up(&vcpu->arch.cpu_run);
- }
- }
-
trace_kvmppc_run_core(vc, 1);
}
@@ -1939,8 +2136,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* this thread straight away and have it join in.
*/
if (!signal_pending(current)) {
- if (vc->vcore_state == VCORE_RUNNING &&
- VCORE_EXIT_COUNT(vc) == 0) {
+ if (vc->vcore_state == VCORE_RUNNING && !VCORE_IS_EXITING(vc)) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu);
trace_kvm_guest_enter(vcpu);
@@ -1971,7 +2167,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
break;
- vc->runner = vcpu;
n_ceded = 0;
list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
if (!v->arch.pending_exceptions)
@@ -1979,10 +2174,17 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
else
v->arch.ceded = 0;
}
- if (n_ceded == vc->n_runnable)
+ vc->runner = vcpu;
+ if (n_ceded == vc->n_runnable) {
kvmppc_vcore_blocked(vc);
- else
+ } else if (should_resched()) {
+ vc->vcore_state = VCORE_PREEMPT;
+ /* Let something else run */
+ cond_resched_lock(&vc->lock);
+ vc->vcore_state = VCORE_INACTIVE;
+ } else {
kvmppc_run_core(vc);
+ }
vc->runner = NULL;
}
@@ -2032,11 +2234,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
atomic_inc(&vcpu->kvm->arch.vcpus_running);
- /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
+ /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
smp_mb();
/* On the first time here, set up HTAB and VRMA */
- if (!vcpu->kvm->arch.rma_setup_done) {
+ if (!vcpu->kvm->arch.hpte_setup_done) {
r = kvmppc_hv_setup_htab_rma(vcpu);
if (r)
goto out;
@@ -2238,7 +2440,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
int srcu_idx;
mutex_lock(&kvm->lock);
- if (kvm->arch.rma_setup_done)
+ if (kvm->arch.hpte_setup_done)
goto out; /* another vcpu beat us to it */
/* Allocate hashed page table (if not done already) and reset it */
@@ -2289,9 +2491,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
- /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
+ /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
smp_wmb();
- kvm->arch.rma_setup_done = 1;
+ kvm->arch.hpte_setup_done = 1;
err = 0;
out_srcu:
srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -2307,6 +2509,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
static int kvmppc_core_init_vm_hv(struct kvm *kvm)
{
unsigned long lpcr, lpid;
+ char buf[32];
/* Allocate the guest's logical partition ID */
@@ -2347,6 +2550,14 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
*/
kvm_hv_vm_activated();
+ /*
+ * Create a debugfs directory for the VM
+ */
+ snprintf(buf, sizeof(buf), "vm%d", current->pid);
+ kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
+ if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
+ kvmppc_mmu_debugfs_init(kvm);
+
return 0;
}
@@ -2367,6 +2578,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
{
+ debugfs_remove_recursive(kvm->arch.debugfs_dir);
+
kvm_hv_vm_deactivated();
kvmppc_free_vcores(kvm);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 1f083ff8a61a..ed2589d4593f 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -21,6 +21,10 @@
#include <asm/cputable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
+#include <asm/archrandom.h>
+#include <asm/xics.h>
+#include <asm/dbell.h>
+#include <asm/cputhreads.h>
#define KVM_CMA_CHUNK_ORDER 18
@@ -114,11 +118,11 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
int rv = H_SUCCESS; /* => don't yield */
set_bit(vcpu->arch.ptid, &vc->conferring_threads);
- while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) {
- threads_running = VCORE_ENTRY_COUNT(vc);
- threads_ceded = hweight32(vc->napping_threads);
- threads_conferring = hweight32(vc->conferring_threads);
- if (threads_ceded + threads_conferring >= threads_running) {
+ while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
+ threads_running = VCORE_ENTRY_MAP(vc);
+ threads_ceded = vc->napping_threads;
+ threads_conferring = vc->conferring_threads;
+ if ((threads_ceded | threads_conferring) == threads_running) {
rv = H_TOO_HARD; /* => do yield */
break;
}
@@ -169,3 +173,89 @@ int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
return 0;
}
EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
+
+int kvmppc_hwrng_present(void)
+{
+ return powernv_hwrng_present();
+}
+EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
+
+long kvmppc_h_random(struct kvm_vcpu *vcpu)
+{
+ if (powernv_get_random_real_mode(&vcpu->arch.gpr[4]))
+ return H_SUCCESS;
+
+ return H_HARDWARE;
+}
+
+static inline void rm_writeb(unsigned long paddr, u8 val)
+{
+ __asm__ __volatile__("stbcix %0,0,%1"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+/*
+ * Send an interrupt or message to another CPU.
+ * This can only be called in real mode.
+ * The caller needs to include any barrier needed to order writes
+ * to memory vs. the IPI/message.
+ */
+void kvmhv_rm_send_ipi(int cpu)
+{
+ unsigned long xics_phys;
+
+ /* On POWER8 for IPIs to threads in the same core, use msgsnd */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ cpu_first_thread_sibling(cpu) ==
+ cpu_first_thread_sibling(raw_smp_processor_id())) {
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ msg |= cpu_thread_in_core(cpu);
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ return;
+ }
+
+ /* Else poke the target with an IPI */
+ xics_phys = paca[cpu].kvm_hstate.xics_phys;
+ rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+}
+
+/*
+ * The following functions are called from the assembly code
+ * in book3s_hv_rmhandlers.S.
+ */
+static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
+{
+ int cpu = vc->pcpu;
+
+ /* Order setting of exit map vs. msgsnd/IPI */
+ smp_mb();
+ for (; active; active >>= 1, ++cpu)
+ if (active & 1)
+ kvmhv_rm_send_ipi(cpu);
+}
+
+void kvmhv_commence_exit(int trap)
+{
+ struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
+ int ptid = local_paca->kvm_hstate.ptid;
+ int me, ee;
+
+ /* Set our bit in the threads-exiting-guest map in the 0xff00
+ bits of vcore->entry_exit_map */
+ me = 0x100 << ptid;
+ do {
+ ee = vc->entry_exit_map;
+ } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
+
+ /* Are we the first here? */
+ if ((ee >> 8) != 0)
+ return;
+
+ /*
+ * Trigger the other threads in this vcore to exit the guest.
+ * If this is a hypervisor decrementer interrupt then they
+ * will be already on their way out of the guest.
+ */
+ if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
+ kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
+}
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 60081bd75847..93b5f5c9b445 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -84,7 +84,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
}
if (dsisr & DSISR_MC_TLB_MULTI) {
if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
dsisr &= ~DSISR_MC_TLB_MULTI;
}
/* Any other errors we don't understand? */
@@ -102,7 +102,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
break;
case SRR1_MC_IFETCH_TLBMULTI:
if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
break;
default:
handled = 0;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 625407e4d3b0..b027a89737b6 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -26,11 +26,14 @@ static void *real_vmalloc_addr(void *x)
{
unsigned long addr = (unsigned long) x;
pte_t *p;
-
- p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
+ /*
+ * assume we don't have huge pages in vmalloc space...
+ * So don't worry about THP collapse/split. Called
+ * Only in realmode, hence won't need irq_save/restore.
+ */
+ p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
if (!p || !pte_present(*p))
return NULL;
- /* assume we don't have huge pages in vmalloc space... */
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
return __va(addr);
}
@@ -131,31 +134,6 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
unlock_rmap(rmap);
}
-static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
- int writing, unsigned long *pte_sizep)
-{
- pte_t *ptep;
- unsigned long ps = *pte_sizep;
- unsigned int hugepage_shift;
-
- ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift);
- if (!ptep)
- return __pte(0);
- if (hugepage_shift)
- *pte_sizep = 1ul << hugepage_shift;
- else
- *pte_sizep = PAGE_SIZE;
- if (ps > *pte_sizep)
- return __pte(0);
- return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
-}
-
-static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
-{
- asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
- hpte[0] = cpu_to_be64(hpte_v);
-}
-
long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel,
pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
@@ -166,13 +144,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
struct revmap_entry *rev;
unsigned long g_ptel;
struct kvm_memory_slot *memslot;
- unsigned long pte_size;
+ unsigned hpage_shift;
unsigned long is_io;
unsigned long *rmap;
- pte_t pte;
+ pte_t *ptep;
unsigned int writing;
unsigned long mmu_seq;
- unsigned long rcbits;
+ unsigned long rcbits, irq_flags = 0;
psize = hpte_page_size(pteh, ptel);
if (!psize)
@@ -208,22 +186,46 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn);
-
- /* Look up the Linux PTE for the backing page */
- pte_size = psize;
- pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
- if (pte_present(pte) && !pte_protnone(pte)) {
- if (writing && !pte_write(pte))
- /* make the actual HPTE be read-only */
- ptel = hpte_make_readonly(ptel);
- is_io = hpte_cache_bits(pte_val(pte));
- pa = pte_pfn(pte) << PAGE_SHIFT;
- pa |= hva & (pte_size - 1);
- pa |= gpa & ~PAGE_MASK;
+ /*
+ * If we had a page table table change after lookup, we would
+ * retry via mmu_notifier_retry.
+ */
+ if (realmode)
+ ptep = __find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
+ else {
+ local_irq_save(irq_flags);
+ ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
}
+ if (ptep) {
+ pte_t pte;
+ unsigned int host_pte_size;
- if (pte_size < psize)
- return H_PARAMETER;
+ if (hpage_shift)
+ host_pte_size = 1ul << hpage_shift;
+ else
+ host_pte_size = PAGE_SIZE;
+ /*
+ * We should always find the guest page size
+ * to <= host page size, if host is using hugepage
+ */
+ if (host_pte_size < psize) {
+ if (!realmode)
+ local_irq_restore(flags);
+ return H_PARAMETER;
+ }
+ pte = kvmppc_read_update_linux_pte(ptep, writing);
+ if (pte_present(pte) && !pte_protnone(pte)) {
+ if (writing && !pte_write(pte))
+ /* make the actual HPTE be read-only */
+ ptel = hpte_make_readonly(ptel);
+ is_io = hpte_cache_bits(pte_val(pte));
+ pa = pte_pfn(pte) << PAGE_SHIFT;
+ pa |= hva & (host_pte_size - 1);
+ pa |= gpa & ~PAGE_MASK;
+ }
+ }
+ if (!realmode)
+ local_irq_restore(irq_flags);
ptel &= ~(HPTE_R_PP0 - psize);
ptel |= pa;
@@ -271,10 +273,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
u64 pte;
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- pte = be64_to_cpu(*hpte);
+ pte = be64_to_cpu(hpte[0]);
if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
break;
- *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hpte, pte);
hpte += 2;
}
if (i == 8)
@@ -290,9 +292,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- pte = be64_to_cpu(*hpte);
+ pte = be64_to_cpu(hpte[0]);
if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
- *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hpte, pte);
return H_PTEG_FULL;
}
}
@@ -331,7 +333,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Write the first HPTE dword, unlocking the HPTE and making it valid */
eieio();
- hpte[0] = cpu_to_be64(pteh);
+ __unlock_hpte(hpte, pteh);
asm volatile("ptesync" : : : "memory");
*pte_idx_ret = pte_index;
@@ -412,7 +414,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
((flags & H_ANDCOND) && (pte & avpn) != 0)) {
- hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hpte, pte);
return H_NOT_FOUND;
}
@@ -548,7 +550,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
args[j] |= rcbits << (56 - 5);
- hp[0] = 0;
+ __unlock_hpte(hp, 0);
}
}
@@ -574,7 +576,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
pte = be64_to_cpu(hpte[0]);
if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
- hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ __unlock_hpte(hpte, pte);
return H_NOT_FOUND;
}
@@ -755,8 +757,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
/* Return with the HPTE still locked */
return (hash << 3) + (i >> 1);
- /* Unlock and move on */
- hpte[i] = cpu_to_be64(v);
+ __unlock_hpte(&hpte[i], v);
}
if (val & HPTE_V_SECONDARY)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 7c22997de906..00e45b6d4f24 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -23,17 +23,37 @@
#define DEBUG_PASSUP
-static inline void rm_writeb(unsigned long paddr, u8 val)
+static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u32 new_irq);
+
+/* -- ICS routines -- */
+static void ics_rm_check_resend(struct kvmppc_xics *xics,
+ struct kvmppc_ics *ics, struct kvmppc_icp *icp)
{
- __asm__ __volatile__("sync; stbcix %0,0,%1"
- : : "r" (val), "r" (paddr) : "memory");
+ int i;
+
+ arch_spin_lock(&ics->lock);
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct ics_irq_state *state = &ics->irq_state[i];
+
+ if (!state->resend)
+ continue;
+
+ arch_spin_unlock(&ics->lock);
+ icp_rm_deliver_irq(xics, icp, state->number);
+ arch_spin_lock(&ics->lock);
+ }
+
+ arch_spin_unlock(&ics->lock);
}
+/* -- ICP routines -- */
+
static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
struct kvm_vcpu *this_vcpu)
{
struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
- unsigned long xics_phys;
int cpu;
/* Mark the target VCPU as having an interrupt pending */
@@ -56,9 +76,8 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
/* In SMT cpu will always point to thread 0, we adjust it */
cpu += vcpu->arch.ptid;
- /* Not too hard, then poke the target */
- xics_phys = paca[cpu].kvm_hstate.xics_phys;
- rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ smp_mb();
+ kvmhv_rm_send_ipi(cpu);
}
static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
@@ -116,6 +135,180 @@ static inline int check_too_hard(struct kvmppc_xics *xics,
return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
}
+static void icp_rm_check_resend(struct kvmppc_xics *xics,
+ struct kvmppc_icp *icp)
+{
+ u32 icsid;
+
+ /* Order this load with the test for need_resend in the caller */
+ smp_rmb();
+ for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
+ struct kvmppc_ics *ics = xics->ics[icsid];
+
+ if (!test_and_clear_bit(icsid, icp->resend_map))
+ continue;
+ if (!ics)
+ continue;
+ ics_rm_check_resend(xics, ics, icp);
+ }
+}
+
+static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
+ u32 *reject)
+{
+ union kvmppc_icp_state old_state, new_state;
+ bool success;
+
+ do {
+ old_state = new_state = READ_ONCE(icp->state);
+
+ *reject = 0;
+
+ /* See if we can deliver */
+ success = new_state.cppr > priority &&
+ new_state.mfrr > priority &&
+ new_state.pending_pri > priority;
+
+ /*
+ * If we can, check for a rejection and perform the
+ * delivery
+ */
+ if (success) {
+ *reject = new_state.xisr;
+ new_state.xisr = irq;
+ new_state.pending_pri = priority;
+ } else {
+ /*
+ * If we failed to deliver we set need_resend
+ * so a subsequent CPPR state change causes us
+ * to try a new delivery.
+ */
+ new_state.need_resend = true;
+ }
+
+ } while (!icp_rm_try_update(icp, old_state, new_state));
+
+ return success;
+}
+
+static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u32 new_irq)
+{
+ struct ics_irq_state *state;
+ struct kvmppc_ics *ics;
+ u32 reject;
+ u16 src;
+
+ /*
+ * This is used both for initial delivery of an interrupt and
+ * for subsequent rejection.
+ *
+ * Rejection can be racy vs. resends. We have evaluated the
+ * rejection in an atomic ICP transaction which is now complete,
+ * so potentially the ICP can already accept the interrupt again.
+ *
+ * So we need to retry the delivery. Essentially the reject path
+ * boils down to a failed delivery. Always.
+ *
+ * Now the interrupt could also have moved to a different target,
+ * thus we may need to re-do the ICP lookup as well
+ */
+
+ again:
+ /* Get the ICS state and lock it */
+ ics = kvmppc_xics_find_ics(xics, new_irq, &src);
+ if (!ics) {
+ /* Unsafe increment, but this does not need to be accurate */
+ xics->err_noics++;
+ return;
+ }
+ state = &ics->irq_state[src];
+
+ /* Get a lock on the ICS */
+ arch_spin_lock(&ics->lock);
+
+ /* Get our server */
+ if (!icp || state->server != icp->server_num) {
+ icp = kvmppc_xics_find_server(xics->kvm, state->server);
+ if (!icp) {
+ /* Unsafe increment again*/
+ xics->err_noicp++;
+ goto out;
+ }
+ }
+
+ /* Clear the resend bit of that interrupt */
+ state->resend = 0;
+
+ /*
+ * If masked, bail out
+ *
+ * Note: PAPR doesn't mention anything about masked pending
+ * when doing a resend, only when doing a delivery.
+ *
+ * However that would have the effect of losing a masked
+ * interrupt that was rejected and isn't consistent with
+ * the whole masked_pending business which is about not
+ * losing interrupts that occur while masked.
+ *
+ * I don't differentiate normal deliveries and resends, this
+ * implementation will differ from PAPR and not lose such
+ * interrupts.
+ */
+ if (state->priority == MASKED) {
+ state->masked_pending = 1;
+ goto out;
+ }
+
+ /*
+ * Try the delivery, this will set the need_resend flag
+ * in the ICP as part of the atomic transaction if the
+ * delivery is not possible.
+ *
+ * Note that if successful, the new delivery might have itself
+ * rejected an interrupt that was "delivered" before we took the
+ * ics spin lock.
+ *
+ * In this case we do the whole sequence all over again for the
+ * new guy. We cannot assume that the rejected interrupt is less
+ * favored than the new one, and thus doesn't need to be delivered,
+ * because by the time we exit icp_rm_try_to_deliver() the target
+ * processor may well have already consumed & completed it, and thus
+ * the rejected interrupt might actually be already acceptable.
+ */
+ if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
+ /*
+ * Delivery was successful, did we reject somebody else ?
+ */
+ if (reject && reject != XICS_IPI) {
+ arch_spin_unlock(&ics->lock);
+ new_irq = reject;
+ goto again;
+ }
+ } else {
+ /*
+ * We failed to deliver the interrupt we need to set the
+ * resend map bit and mark the ICS state as needing a resend
+ */
+ set_bit(ics->icsid, icp->resend_map);
+ state->resend = 1;
+
+ /*
+ * If the need_resend flag got cleared in the ICP some time
+ * between icp_rm_try_to_deliver() atomic update and now, then
+ * we know it might have missed the resend_map bit. So we
+ * retry
+ */
+ smp_mb();
+ if (!icp->state.need_resend) {
+ arch_spin_unlock(&ics->lock);
+ goto again;
+ }
+ }
+ out:
+ arch_spin_unlock(&ics->lock);
+}
+
static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u8 new_cppr)
{
@@ -184,8 +377,8 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* separately here as well.
*/
if (resend) {
- icp->rm_action |= XICS_RM_CHECK_RESEND;
- icp->rm_resend_icp = icp;
+ icp->n_check_resend++;
+ icp_rm_check_resend(xics, icp);
}
}
@@ -300,16 +493,16 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
}
} while (!icp_rm_try_update(icp, old_state, new_state));
- /* Pass rejects to virtual mode */
+ /* Handle reject in real mode */
if (reject && reject != XICS_IPI) {
- this_icp->rm_action |= XICS_RM_REJECT;
- this_icp->rm_reject = reject;
+ this_icp->n_reject++;
+ icp_rm_deliver_irq(xics, icp, reject);
}
- /* Pass resends to virtual mode */
+ /* Handle resends in real mode */
if (resend) {
- this_icp->rm_action |= XICS_RM_CHECK_RESEND;
- this_icp->rm_resend_icp = icp;
+ this_icp->n_check_resend++;
+ icp_rm_check_resend(xics, icp);
}
return check_too_hard(xics, this_icp);
@@ -365,10 +558,13 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
} while (!icp_rm_try_update(icp, old_state, new_state));
- /* Pass rejects to virtual mode */
+ /*
+ * Check for rejects. They are handled by doing a new delivery
+ * attempt (see comments in icp_rm_deliver_irq).
+ */
if (reject && reject != XICS_IPI) {
- icp->rm_action |= XICS_RM_REJECT;
- icp->rm_reject = reject;
+ icp->n_reject++;
+ icp_rm_deliver_irq(xics, icp, reject);
}
bail:
return check_too_hard(xics, icp);
@@ -416,10 +612,10 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
goto bail;
state = &ics->irq_state[src];
- /* Still asserted, resend it, we make it look like a reject */
+ /* Still asserted, resend it */
if (state->asserted) {
- icp->rm_action |= XICS_RM_REJECT;
- icp->rm_reject = irq;
+ icp->n_reject++;
+ icp_rm_deliver_irq(xics, icp, irq);
}
if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 6cbf1630cb70..4d70df26c402 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -172,6 +172,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
+ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
+ mfspr r3, SPRN_HDEC
+ mtspr SPRN_DEC, r3
+ /*
+ * Make sure the primary has finished the MMU switch.
+ * We should never get here on a secondary thread, but
+ * check it for robustness' sake.
+ */
+ ld r5, HSTATE_KVM_VCORE(r13)
+65: lbz r0, VCORE_IN_GUEST(r5)
+ cmpwi r0, 0
+ beq 65b
+ /* Set LPCR. */
+ ld r8,VCORE_LPCR(r5)
+ mtspr SPRN_LPCR,r8
+ isync
/* set our bit in napping_threads */
ld r5, HSTATE_KVM_VCORE(r13)
lbz r7, HSTATE_PTID(r13)
@@ -182,7 +198,7 @@ kvmppc_primary_no_guest:
or r3, r3, r0
stwcx. r3, 0, r6
bne 1b
- /* order napping_threads update vs testing entry_exit_count */
+ /* order napping_threads update vs testing entry_exit_map */
isync
li r12, 0
lwz r7, VCORE_ENTRY_EXIT(r5)
@@ -191,6 +207,7 @@ kvmppc_primary_no_guest:
li r3, NAPPING_NOVCPU
stb r3, HSTATE_NAPPING(r13)
+ li r3, 0 /* Don't wake on privileged (OS) doorbell */
b kvm_do_nap
kvm_novcpu_wakeup:
@@ -202,7 +219,7 @@ kvm_novcpu_wakeup:
/* check the wake reason */
bl kvmppc_check_wake_reason
-
+
/* see if any other thread is already exiting */
lwz r0, VCORE_ENTRY_EXIT(r5)
cmpwi r0, 0x100
@@ -222,13 +239,37 @@ kvm_novcpu_wakeup:
cmpdi r3, 0
bge kvm_novcpu_exit
+ /* See if our timeslice has expired (HDEC is negative) */
+ mfspr r0, SPRN_HDEC
+ li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
+ cmpwi r0, 0
+ blt kvm_novcpu_exit
+
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
ld r4, HSTATE_KVM_VCPU(r13)
cmpdi r4, 0
- bne kvmppc_got_guest
+ beq kvmppc_primary_no_guest
+
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ addi r3, r4, VCPU_TB_RMENTRY
+ bl kvmhv_start_timing
+#endif
+ b kvmppc_got_guest
kvm_novcpu_exit:
- b hdec_soon
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ ld r4, HSTATE_KVM_VCPU(r13)
+ cmpdi r4, 0
+ beq 13f
+ addi r3, r4, VCPU_TB_RMEXIT
+ bl kvmhv_accumulate_time
+#endif
+13: mr r3, r12
+ stw r12, 112-4(r1)
+ bl kvmhv_commence_exit
+ nop
+ lwz r12, 112-4(r1)
+ b kvmhv_switch_to_host
/*
* We come in here when wakened from nap mode.
@@ -239,9 +280,9 @@ kvm_novcpu_exit:
kvm_start_guest:
/* Set runlatch bit the minute you wake up from nap */
- mfspr r1, SPRN_CTRLF
- ori r1, r1, 1
- mtspr SPRN_CTRLT, r1
+ mfspr r0, SPRN_CTRLF
+ ori r0, r0, 1
+ mtspr SPRN_CTRLT, r0
ld r2,PACATOC(r13)
@@ -286,26 +327,21 @@ kvm_secondary_got_guest:
ld r6, PACA_DSCR(r13)
std r6, HSTATE_DSCR(r13)
+ /* Order load of vcore, ptid etc. after load of vcpu */
+ lwsync
bl kvmppc_hv_entry
/* Back from the guest, go back to nap */
/* Clear our vcpu pointer so we don't come back in early */
li r0, 0
- std r0, HSTATE_KVM_VCPU(r13)
/*
- * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
- * the nap_count, because once the increment to nap_count is
- * visible we could be given another vcpu.
+ * Once we clear HSTATE_KVM_VCPU(r13), the code in
+ * kvmppc_run_core() is going to assume that all our vcpu
+ * state is visible in memory. This lwsync makes sure
+ * that that is true.
*/
lwsync
-
- /* increment the nap count and then go to nap mode */
- ld r4, HSTATE_KVM_VCORE(r13)
- addi r4, r4, VCORE_NAP_COUNT
-51: lwarx r3, 0, r4
- addi r3, r3, 1
- stwcx. r3, 0, r4
- bne 51b
+ std r0, HSTATE_KVM_VCPU(r13)
/*
* At this point we have finished executing in the guest.
@@ -376,6 +412,14 @@ kvmppc_hv_entry:
li r6, KVM_GUEST_MODE_HOST_HV
stb r6, HSTATE_IN_GUEST(r13)
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ /* Store initial timestamp */
+ cmpdi r4, 0
+ beq 1f
+ addi r3, r4, VCPU_TB_RMENTRY
+ bl kvmhv_start_timing
+1:
+#endif
/* Clear out SLB */
li r6,0
slbmte r6,r6
@@ -387,21 +431,23 @@ kvmppc_hv_entry:
* We don't have to lock against concurrent tlbies,
* but we do have to coordinate across hardware threads.
*/
- /* Increment entry count iff exit count is zero. */
- ld r5,HSTATE_KVM_VCORE(r13)
- addi r9,r5,VCORE_ENTRY_EXIT
-21: lwarx r3,0,r9
- cmpwi r3,0x100 /* any threads starting to exit? */
+ /* Set bit in entry map iff exit map is zero. */
+ ld r5, HSTATE_KVM_VCORE(r13)
+ li r7, 1
+ lbz r6, HSTATE_PTID(r13)
+ sld r7, r7, r6
+ addi r9, r5, VCORE_ENTRY_EXIT
+21: lwarx r3, 0, r9
+ cmpwi r3, 0x100 /* any threads starting to exit? */
bge secondary_too_late /* if so we're too late to the party */
- addi r3,r3,1
- stwcx. r3,0,r9
+ or r3, r3, r7
+ stwcx. r3, 0, r9
bne 21b
/* Primary thread switches to guest partition. */
ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
- lbz r6,HSTATE_PTID(r13)
cmpwi r6,0
- bne 20f
+ bne 10f
ld r6,KVM_SDR1(r9)
lwz r7,KVM_LPID(r9)
li r0,LPID_RSVD /* switch to reserved LPID */
@@ -472,28 +518,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
li r0,1
stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
- b 10f
-
- /* Secondary threads wait for primary to have done partition switch */
-20: lbz r0,VCORE_IN_GUEST(r5)
- cmpwi r0,0
- beq 20b
-
- /* Set LPCR and RMOR. */
-10: ld r8,VCORE_LPCR(r5)
- mtspr SPRN_LPCR,r8
- ld r8,KVM_RMOR(r9)
- mtspr SPRN_RMOR,r8
- isync
-
- /* Check if HDEC expires soon */
- mfspr r3,SPRN_HDEC
- cmpwi r3,512 /* 1 microsecond */
- li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
- blt hdec_soon
/* Do we have a guest vcpu to run? */
- cmpdi r4, 0
+10: cmpdi r4, 0
beq kvmppc_primary_no_guest
kvmppc_got_guest:
@@ -818,6 +845,30 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
clrrdi r6,r6,1
mtspr SPRN_CTRLT,r6
4:
+ /* Secondary threads wait for primary to have done partition switch */
+ ld r5, HSTATE_KVM_VCORE(r13)
+ lbz r6, HSTATE_PTID(r13)
+ cmpwi r6, 0
+ beq 21f
+ lbz r0, VCORE_IN_GUEST(r5)
+ cmpwi r0, 0
+ bne 21f
+ HMT_LOW
+20: lbz r0, VCORE_IN_GUEST(r5)
+ cmpwi r0, 0
+ beq 20b
+ HMT_MEDIUM
+21:
+ /* Set LPCR. */
+ ld r8,VCORE_LPCR(r5)
+ mtspr SPRN_LPCR,r8
+ isync
+
+ /* Check if HDEC expires soon */
+ mfspr r3, SPRN_HDEC
+ cmpwi r3, 512 /* 1 microsecond */
+ blt hdec_soon
+
ld r6, VCPU_CTR(r4)
lwz r7, VCPU_XER(r4)
@@ -880,6 +931,12 @@ fast_guest_return:
li r9, KVM_GUEST_MODE_GUEST_HV
stb r9, HSTATE_IN_GUEST(r13)
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ /* Accumulate timing */
+ addi r3, r4, VCPU_TB_GUEST
+ bl kvmhv_accumulate_time
+#endif
+
/* Enter guest */
BEGIN_FTR_SECTION
@@ -917,6 +974,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
hrfid
b .
+secondary_too_late:
+ li r12, 0
+ cmpdi r4, 0
+ beq 11f
+ stw r12, VCPU_TRAP(r4)
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ addi r3, r4, VCPU_TB_RMEXIT
+ bl kvmhv_accumulate_time
+#endif
+11: b kvmhv_switch_to_host
+
+hdec_soon:
+ li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
+ stw r12, VCPU_TRAP(r4)
+ mr r9, r4
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ addi r3, r4, VCPU_TB_RMEXIT
+ bl kvmhv_accumulate_time
+#endif
+ b guest_exit_cont
+
/******************************************************************************
* *
* Exit code *
@@ -1002,6 +1080,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
stw r12,VCPU_TRAP(r9)
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ addi r3, r9, VCPU_TB_RMINTR
+ mr r4, r9
+ bl kvmhv_accumulate_time
+ ld r5, VCPU_GPR(R5)(r9)
+ ld r6, VCPU_GPR(R6)(r9)
+ ld r7, VCPU_GPR(R7)(r9)
+ ld r8, VCPU_GPR(R8)(r9)
+#endif
+
/* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED
@@ -1028,34 +1116,37 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
bne 2f
mfspr r3,SPRN_HDEC
cmpwi r3,0
- bge ignore_hdec
+ mr r4,r9
+ bge fast_guest_return
2:
/* See if this is an hcall we can handle in real mode */
cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
beq hcall_try_real_mode
+ /* Hypervisor doorbell - exit only if host IPI flag set */
+ cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
+ bne 3f
+ lbz r0, HSTATE_HOST_IPI(r13)
+ beq 4f
+ b guest_exit_cont
+3:
/* External interrupt ? */
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
- bne+ ext_interrupt_to_host
+ bne+ guest_exit_cont
/* External interrupt, first check for host_ipi. If this is
* set, we know the host wants us out so let's do it now
*/
bl kvmppc_read_intr
cmpdi r3, 0
- bgt ext_interrupt_to_host
+ bgt guest_exit_cont
/* Check if any CPU is heading out to the host, if so head out too */
- ld r5, HSTATE_KVM_VCORE(r13)
+4: ld r5, HSTATE_KVM_VCORE(r13)
lwz r0, VCORE_ENTRY_EXIT(r5)
cmpwi r0, 0x100
- bge ext_interrupt_to_host
-
- /* Return to guest after delivering any pending interrupt */
mr r4, r9
- b deliver_guest_interrupt
-
-ext_interrupt_to_host:
+ blt deliver_guest_interrupt
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Save more register state */
@@ -1065,7 +1156,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
stw r7, VCPU_DSISR(r9)
/* don't overwrite fault_dar/fault_dsisr if HDSI */
cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
- beq 6f
+ beq mc_cont
std r6, VCPU_FAULT_DAR(r9)
stw r7, VCPU_FAULT_DSISR(r9)
@@ -1073,9 +1164,20 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
beq machine_check_realmode
mc_cont:
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ addi r3, r9, VCPU_TB_RMEXIT
+ mr r4, r9
+ bl kvmhv_accumulate_time
+#endif
+
+ /* Increment exit count, poke other threads to exit */
+ bl kvmhv_commence_exit
+ nop
+ ld r9, HSTATE_KVM_VCPU(r13)
+ lwz r12, VCPU_TRAP(r9)
/* Save guest CTRL register, set runlatch to 1 */
-6: mfspr r6,SPRN_CTRLF
+ mfspr r6,SPRN_CTRLF
stw r6,VCPU_CTRL(r9)
andi. r0,r6,1
bne 4f
@@ -1417,68 +1519,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
slbia
ptesync
-hdec_soon: /* r12 = trap, r13 = paca */
/*
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
* have to coordinate the hardware threads.
*/
- /* Increment the threads-exiting-guest count in the 0xff00
- bits of vcore->entry_exit_count */
- ld r5,HSTATE_KVM_VCORE(r13)
- addi r6,r5,VCORE_ENTRY_EXIT
-41: lwarx r3,0,r6
- addi r0,r3,0x100
- stwcx. r0,0,r6
- bne 41b
- isync /* order stwcx. vs. reading napping_threads */
-
- /*
- * At this point we have an interrupt that we have to pass
- * up to the kernel or qemu; we can't handle it in real mode.
- * Thus we have to do a partition switch, so we have to
- * collect the other threads, if we are the first thread
- * to take an interrupt. To do this, we set the HDEC to 0,
- * which causes an HDEC interrupt in all threads within 2ns
- * because the HDEC register is shared between all 4 threads.
- * However, we don't need to bother if this is an HDEC
- * interrupt, since the other threads will already be on their
- * way here in that case.
- */
- cmpwi r3,0x100 /* Are we the first here? */
- bge 43f
- cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
- beq 40f
- li r0,0
- mtspr SPRN_HDEC,r0
-40:
- /*
- * Send an IPI to any napping threads, since an HDEC interrupt
- * doesn't wake CPUs up from nap.
- */
- lwz r3,VCORE_NAPPING_THREADS(r5)
- lbz r4,HSTATE_PTID(r13)
- li r0,1
- sld r0,r0,r4
- andc. r3,r3,r0 /* no sense IPI'ing ourselves */
- beq 43f
- /* Order entry/exit update vs. IPIs */
- sync
- mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
- subf r6,r4,r13
-42: andi. r0,r3,1
- beq 44f
- ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
- li r0,IPI_PRIORITY
- li r7,XICS_MFRR
- stbcix r0,r7,r8 /* trigger the IPI */
-44: srdi. r3,r3,1
- addi r6,r6,PACA_SIZE
- bne 42b
-
-secondary_too_late:
+kvmhv_switch_to_host:
/* Secondary threads wait for primary to do partition switch */
-43: ld r5,HSTATE_KVM_VCORE(r13)
+ ld r5,HSTATE_KVM_VCORE(r13)
ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
lbz r3,HSTATE_PTID(r13)
cmpwi r3,0
@@ -1562,6 +1610,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1: addi r8,r8,16
.endr
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ /* Finish timing, if we have a vcpu */
+ ld r4, HSTATE_KVM_VCPU(r13)
+ cmpdi r4, 0
+ li r3, 0
+ beq 2f
+ bl kvmhv_accumulate_time
+2:
+#endif
/* Unset guest mode */
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
@@ -1696,8 +1753,10 @@ kvmppc_hisi:
* Returns to the guest if we handle it, or continues on up to
* the kernel if we can't (i.e. if we don't have a handler for
* it, or if the handler returns H_TOO_HARD).
+ *
+ * r5 - r8 contain hcall args,
+ * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
*/
- .globl hcall_try_real_mode
hcall_try_real_mode:
ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR
@@ -1839,13 +1898,124 @@ hcall_real_table:
.long 0 /* 0x12c */
.long 0 /* 0x130 */
.long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
+ .long 0 /* 0x138 */
+ .long 0 /* 0x13c */
+ .long 0 /* 0x140 */
+ .long 0 /* 0x144 */
+ .long 0 /* 0x148 */
+ .long 0 /* 0x14c */
+ .long 0 /* 0x150 */
+ .long 0 /* 0x154 */
+ .long 0 /* 0x158 */
+ .long 0 /* 0x15c */
+ .long 0 /* 0x160 */
+ .long 0 /* 0x164 */
+ .long 0 /* 0x168 */
+ .long 0 /* 0x16c */
+ .long 0 /* 0x170 */
+ .long 0 /* 0x174 */
+ .long 0 /* 0x178 */
+ .long 0 /* 0x17c */
+ .long 0 /* 0x180 */
+ .long 0 /* 0x184 */
+ .long 0 /* 0x188 */
+ .long 0 /* 0x18c */
+ .long 0 /* 0x190 */
+ .long 0 /* 0x194 */
+ .long 0 /* 0x198 */
+ .long 0 /* 0x19c */
+ .long 0 /* 0x1a0 */
+ .long 0 /* 0x1a4 */
+ .long 0 /* 0x1a8 */
+ .long 0 /* 0x1ac */
+ .long 0 /* 0x1b0 */
+ .long 0 /* 0x1b4 */
+ .long 0 /* 0x1b8 */
+ .long 0 /* 0x1bc */
+ .long 0 /* 0x1c0 */
+ .long 0 /* 0x1c4 */
+ .long 0 /* 0x1c8 */
+ .long 0 /* 0x1cc */
+ .long 0 /* 0x1d0 */
+ .long 0 /* 0x1d4 */
+ .long 0 /* 0x1d8 */
+ .long 0 /* 0x1dc */
+ .long 0 /* 0x1e0 */
+ .long 0 /* 0x1e4 */
+ .long 0 /* 0x1e8 */
+ .long 0 /* 0x1ec */
+ .long 0 /* 0x1f0 */
+ .long 0 /* 0x1f4 */
+ .long 0 /* 0x1f8 */
+ .long 0 /* 0x1fc */
+ .long 0 /* 0x200 */
+ .long 0 /* 0x204 */
+ .long 0 /* 0x208 */
+ .long 0 /* 0x20c */
+ .long 0 /* 0x210 */
+ .long 0 /* 0x214 */
+ .long 0 /* 0x218 */
+ .long 0 /* 0x21c */
+ .long 0 /* 0x220 */
+ .long 0 /* 0x224 */
+ .long 0 /* 0x228 */
+ .long 0 /* 0x22c */
+ .long 0 /* 0x230 */
+ .long 0 /* 0x234 */
+ .long 0 /* 0x238 */
+ .long 0 /* 0x23c */
+ .long 0 /* 0x240 */
+ .long 0 /* 0x244 */
+ .long 0 /* 0x248 */
+ .long 0 /* 0x24c */
+ .long 0 /* 0x250 */
+ .long 0 /* 0x254 */
+ .long 0 /* 0x258 */
+ .long 0 /* 0x25c */
+ .long 0 /* 0x260 */
+ .long 0 /* 0x264 */
+ .long 0 /* 0x268 */
+ .long 0 /* 0x26c */
+ .long 0 /* 0x270 */
+ .long 0 /* 0x274 */
+ .long 0 /* 0x278 */
+ .long 0 /* 0x27c */
+ .long 0 /* 0x280 */
+ .long 0 /* 0x284 */
+ .long 0 /* 0x288 */
+ .long 0 /* 0x28c */
+ .long 0 /* 0x290 */
+ .long 0 /* 0x294 */
+ .long 0 /* 0x298 */
+ .long 0 /* 0x29c */
+ .long 0 /* 0x2a0 */
+ .long 0 /* 0x2a4 */
+ .long 0 /* 0x2a8 */
+ .long 0 /* 0x2ac */
+ .long 0 /* 0x2b0 */
+ .long 0 /* 0x2b4 */
+ .long 0 /* 0x2b8 */
+ .long 0 /* 0x2bc */
+ .long 0 /* 0x2c0 */
+ .long 0 /* 0x2c4 */
+ .long 0 /* 0x2c8 */
+ .long 0 /* 0x2cc */
+ .long 0 /* 0x2d0 */
+ .long 0 /* 0x2d4 */
+ .long 0 /* 0x2d8 */
+ .long 0 /* 0x2dc */
+ .long 0 /* 0x2e0 */
+ .long 0 /* 0x2e4 */
+ .long 0 /* 0x2e8 */
+ .long 0 /* 0x2ec */
+ .long 0 /* 0x2f0 */
+ .long 0 /* 0x2f4 */
+ .long 0 /* 0x2f8 */
+ .long 0 /* 0x2fc */
+ .long DOTSYM(kvmppc_h_random) - hcall_real_table
.globl hcall_real_table_end
hcall_real_table_end:
-ignore_hdec:
- mr r4,r9
- b fast_guest_return
-
_GLOBAL(kvmppc_h_set_xdabr)
andi. r0, r5, DABRX_USER | DABRX_KERNEL
beq 6f
@@ -1884,7 +2054,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
li r3, 0
blr
-_GLOBAL(kvmppc_h_cede)
+_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
ori r11,r11,MSR_EE
std r11,VCPU_MSR(r3)
li r0,1
@@ -1893,8 +2063,8 @@ _GLOBAL(kvmppc_h_cede)
lbz r5,VCPU_PRODDED(r3)
cmpwi r5,0
bne kvm_cede_prodded
- li r0,0 /* set trap to 0 to say hcall is handled */
- stw r0,VCPU_TRAP(r3)
+ li r12,0 /* set trap to 0 to say hcall is handled */
+ stw r12,VCPU_TRAP(r3)
li r0,H_SUCCESS
std r0,VCPU_GPR(R3)(r3)
@@ -1912,12 +2082,11 @@ _GLOBAL(kvmppc_h_cede)
addi r6,r5,VCORE_NAPPING_THREADS
31: lwarx r4,0,r6
or r4,r4,r0
- PPC_POPCNTW(R7,R4)
- cmpw r7,r8
- bge kvm_cede_exit
+ cmpw r4,r8
+ beq kvm_cede_exit
stwcx. r4,0,r6
bne 31b
- /* order napping_threads update vs testing entry_exit_count */
+ /* order napping_threads update vs testing entry_exit_map */
isync
li r0,NAPPING_CEDE
stb r0,HSTATE_NAPPING(r13)
@@ -1955,21 +2124,52 @@ _GLOBAL(kvmppc_h_cede)
bl kvmppc_save_fp
/*
+ * Set DEC to the smaller of DEC and HDEC, so that we wake
+ * no later than the end of our timeslice (HDEC interrupts
+ * don't wake us from nap).
+ */
+ mfspr r3, SPRN_DEC
+ mfspr r4, SPRN_HDEC
+ mftb r5
+ cmpw r3, r4
+ ble 67f
+ mtspr SPRN_DEC, r4
+67:
+ /* save expiry time of guest decrementer */
+ extsw r3, r3
+ add r3, r3, r5
+ ld r4, HSTATE_KVM_VCPU(r13)
+ ld r5, HSTATE_KVM_VCORE(r13)
+ ld r6, VCORE_TB_OFFSET(r5)
+ subf r3, r6, r3 /* convert to host TB value */
+ std r3, VCPU_DEC_EXPIRES(r4)
+
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ ld r4, HSTATE_KVM_VCPU(r13)
+ addi r3, r4, VCPU_TB_CEDE
+ bl kvmhv_accumulate_time
+#endif
+
+ lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
+
+ /*
* Take a nap until a decrementer or external or doobell interrupt
- * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
- * runlatch bit before napping.
+ * occurs, with PECE1 and PECE0 set in LPCR.
+ * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
+ * Also clear the runlatch bit before napping.
*/
kvm_do_nap:
- mfspr r2, SPRN_CTRLF
- clrrdi r2, r2, 1
- mtspr SPRN_CTRLT, r2
+ mfspr r0, SPRN_CTRLF
+ clrrdi r0, r0, 1
+ mtspr SPRN_CTRLT, r0
li r0,1
stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR
ori r5,r5,LPCR_PECE0 | LPCR_PECE1
BEGIN_FTR_SECTION
- oris r5,r5,LPCR_PECEDP@h
+ ori r5, r5, LPCR_PECEDH
+ rlwimi r5, r3, 0, LPCR_PECEDP
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_LPCR,r5
isync
@@ -1994,9 +2194,23 @@ kvm_end_cede:
/* Woken by external or decrementer interrupt */
ld r1, HSTATE_HOST_R1(r13)
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ addi r3, r4, VCPU_TB_RMINTR
+ bl kvmhv_accumulate_time
+#endif
+
/* load up FP state */
bl kvmppc_load_fp
+ /* Restore guest decrementer */
+ ld r3, VCPU_DEC_EXPIRES(r4)
+ ld r5, HSTATE_KVM_VCORE(r13)
+ ld r6, VCORE_TB_OFFSET(r5)
+ add r3, r3, r6 /* convert host TB to guest TB value */
+ mftb r7
+ subf r3, r7, r3
+ mtspr SPRN_DEC, r3
+
/* Load NV GPRS */
ld r14, VCPU_GPR(R14)(r4)
ld r15, VCPU_GPR(R15)(r4)
@@ -2057,7 +2271,8 @@ kvm_cede_prodded:
/* we've ceded but we want to give control to the host */
kvm_cede_exit:
- b hcall_real_fallback
+ ld r9, HSTATE_KVM_VCPU(r13)
+ b guest_exit_cont
/* Try to handle a machine check in real mode */
machine_check_realmode:
@@ -2089,13 +2304,14 @@ machine_check_realmode:
/*
* Check the reason we woke from nap, and take appropriate action.
- * Returns:
+ * Returns (in r3):
* 0 if nothing needs to be done
* 1 if something happened that needs to be handled by the host
- * -1 if there was a guest wakeup (IPI)
+ * -1 if there was a guest wakeup (IPI or msgsnd)
*
* Also sets r12 to the interrupt vector for any interrupt that needs
* to be handled now by the host (0x500 for external interrupt), or zero.
+ * Modifies r0, r6, r7, r8.
*/
kvmppc_check_wake_reason:
mfspr r6, SPRN_SRR1
@@ -2122,7 +2338,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* hypervisor doorbell */
3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
+ /* see if it's a host IPI */
li r3, 1
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bnelr
+ /* if not, clear it and return -1 */
+ lis r6, (PPC_DBELL_SERVER << (63-36))@h
+ PPC_MSGCLR(6)
+ li r3, -1
blr
/*
@@ -2131,6 +2355,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* 0 if no interrupt is pending
* 1 if an interrupt is pending that needs to be handled by the host
* -1 if there was a guest wakeup IPI (which has now been cleared)
+ * Modifies r0, r6, r7, r8, returns value in r3.
*/
kvmppc_read_intr:
/* see if a host IPI is pending */
@@ -2185,6 +2410,7 @@ kvmppc_read_intr:
bne- 43f
/* OK, it's an IPI for us */
+ li r12, 0
li r3, -1
1: blr
@@ -2314,3 +2540,62 @@ kvmppc_fix_pmao:
mtspr SPRN_PMC6, r3
isync
blr
+
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+/*
+ * Start timing an activity
+ * r3 = pointer to time accumulation struct, r4 = vcpu
+ */
+kvmhv_start_timing:
+ ld r5, HSTATE_KVM_VCORE(r13)
+ lbz r6, VCORE_IN_GUEST(r5)
+ cmpwi r6, 0
+ beq 5f /* if in guest, need to */
+ ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
+5: mftb r5
+ subf r5, r6, r5
+ std r3, VCPU_CUR_ACTIVITY(r4)
+ std r5, VCPU_ACTIVITY_START(r4)
+ blr
+
+/*
+ * Accumulate time to one activity and start another.
+ * r3 = pointer to new time accumulation struct, r4 = vcpu
+ */
+kvmhv_accumulate_time:
+ ld r5, HSTATE_KVM_VCORE(r13)
+ lbz r8, VCORE_IN_GUEST(r5)
+ cmpwi r8, 0
+ beq 4f /* if in guest, need to */
+ ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
+4: ld r5, VCPU_CUR_ACTIVITY(r4)
+ ld r6, VCPU_ACTIVITY_START(r4)
+ std r3, VCPU_CUR_ACTIVITY(r4)
+ mftb r7
+ subf r7, r8, r7
+ std r7, VCPU_ACTIVITY_START(r4)
+ cmpdi r5, 0
+ beqlr
+ subf r3, r6, r7
+ ld r8, TAS_SEQCOUNT(r5)
+ cmpdi r8, 0
+ addi r8, r8, 1
+ std r8, TAS_SEQCOUNT(r5)
+ lwsync
+ ld r7, TAS_TOTAL(r5)
+ add r7, r7, r3
+ std r7, TAS_TOTAL(r5)
+ ld r6, TAS_MIN(r5)
+ ld r7, TAS_MAX(r5)
+ beq 3f
+ cmpd r3, r6
+ bge 1f
+3: std r3, TAS_MIN(r5)
+1: cmpd r3, r7
+ ble 2f
+ std r3, TAS_MAX(r5)
+2: lwsync
+ addi r8, r8, 1
+ std r8, TAS_SEQCOUNT(r5)
+ blr
+#endif
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index ce3c893d509b..f2c75a1e0536 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -258,6 +258,28 @@ static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
+{
+ long rc;
+
+ rc = kvmppc_h_logical_ci_load(vcpu);
+ if (rc == H_TOO_HARD)
+ return EMULATE_FAIL;
+ kvmppc_set_gpr(vcpu, 3, rc);
+ return EMULATE_DONE;
+}
+
+static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
+{
+ long rc;
+
+ rc = kvmppc_h_logical_ci_store(vcpu);
+ if (rc == H_TOO_HARD)
+ return EMULATE_FAIL;
+ kvmppc_set_gpr(vcpu, 3, rc);
+ return EMULATE_DONE;
+}
+
static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{
long rc = kvmppc_xics_hcall(vcpu, cmd);
@@ -290,6 +312,10 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
return EMULATE_DONE;
+ case H_LOGICAL_CI_LOAD:
+ return kvmppc_h_pr_logical_ci_load(vcpu);
+ case H_LOGICAL_CI_STORE:
+ return kvmppc_h_pr_logical_ci_store(vcpu);
case H_XIRR:
case H_CPPR:
case H_EOI:
@@ -323,6 +349,8 @@ int kvmppc_hcall_impl_pr(unsigned long cmd)
case H_BULK_REMOVE:
case H_PUT_TCE:
case H_CEDE:
+ case H_LOGICAL_CI_LOAD:
+ case H_LOGICAL_CI_STORE:
#ifdef CONFIG_KVM_XICS
case H_XIRR:
case H_CPPR:
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index a4a8d9f0dcb7..c6ca7db64673 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/anon_inodes.h>
+#include <linux/spinlock.h>
#include <asm/uaccess.h>
#include <asm/kvm_book3s.h>
@@ -39,7 +40,7 @@
* LOCKING
* =======
*
- * Each ICS has a mutex protecting the information about the IRQ
+ * Each ICS has a spin lock protecting the information about the IRQ
* sources and avoiding simultaneous deliveries if the same interrupt.
*
* ICP operations are done via a single compare & swap transaction
@@ -109,7 +110,10 @@ static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
{
int i;
- mutex_lock(&ics->lock);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
struct ics_irq_state *state = &ics->irq_state[i];
@@ -120,12 +124,15 @@ static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
XICS_DBG("resend %#x prio %#x\n", state->number,
state->priority);
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
icp_deliver_irq(xics, icp, state->number);
- mutex_lock(&ics->lock);
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
}
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
}
static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
@@ -133,8 +140,10 @@ static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
u32 server, u32 priority, u32 saved_priority)
{
bool deliver;
+ unsigned long flags;
- mutex_lock(&ics->lock);
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
state->server = server;
state->priority = priority;
@@ -145,7 +154,8 @@ static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
deliver = true;
}
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
return deliver;
}
@@ -186,6 +196,7 @@ int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
struct kvmppc_ics *ics;
struct ics_irq_state *state;
u16 src;
+ unsigned long flags;
if (!xics)
return -ENODEV;
@@ -195,10 +206,12 @@ int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
return -EINVAL;
state = &ics->irq_state[src];
- mutex_lock(&ics->lock);
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
*server = state->server;
*priority = state->priority;
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
return 0;
}
@@ -365,6 +378,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
struct kvmppc_ics *ics;
u32 reject;
u16 src;
+ unsigned long flags;
/*
* This is used both for initial delivery of an interrupt and
@@ -391,7 +405,8 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
state = &ics->irq_state[src];
/* Get a lock on the ICS */
- mutex_lock(&ics->lock);
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
/* Get our server */
if (!icp || state->server != icp->server_num) {
@@ -434,7 +449,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
*
* Note that if successful, the new delivery might have itself
* rejected an interrupt that was "delivered" before we took the
- * icp mutex.
+ * ics spin lock.
*
* In this case we do the whole sequence all over again for the
* new guy. We cannot assume that the rejected interrupt is less
@@ -448,7 +463,8 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* Delivery was successful, did we reject somebody else ?
*/
if (reject && reject != XICS_IPI) {
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
new_irq = reject;
goto again;
}
@@ -468,12 +484,14 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
*/
smp_mb();
if (!icp->state.need_resend) {
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
goto again;
}
}
out:
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
}
static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
@@ -802,14 +820,22 @@ static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
- if (icp->rm_action & XICS_RM_KICK_VCPU)
+ if (icp->rm_action & XICS_RM_KICK_VCPU) {
+ icp->n_rm_kick_vcpu++;
kvmppc_fast_vcpu_kick(icp->rm_kick_target);
- if (icp->rm_action & XICS_RM_CHECK_RESEND)
+ }
+ if (icp->rm_action & XICS_RM_CHECK_RESEND) {
+ icp->n_rm_check_resend++;
icp_check_resend(xics, icp->rm_resend_icp);
- if (icp->rm_action & XICS_RM_REJECT)
+ }
+ if (icp->rm_action & XICS_RM_REJECT) {
+ icp->n_rm_reject++;
icp_deliver_irq(xics, icp, icp->rm_reject);
- if (icp->rm_action & XICS_RM_NOTIFY_EOI)
+ }
+ if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
+ icp->n_rm_notify_eoi++;
kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
+ }
icp->rm_action = 0;
@@ -872,10 +898,21 @@ static int xics_debug_show(struct seq_file *m, void *private)
struct kvm *kvm = xics->kvm;
struct kvm_vcpu *vcpu;
int icsid, i;
+ unsigned long flags;
+ unsigned long t_rm_kick_vcpu, t_rm_check_resend;
+ unsigned long t_rm_reject, t_rm_notify_eoi;
+ unsigned long t_reject, t_check_resend;
if (!kvm)
return 0;
+ t_rm_kick_vcpu = 0;
+ t_rm_notify_eoi = 0;
+ t_rm_check_resend = 0;
+ t_rm_reject = 0;
+ t_check_resend = 0;
+ t_reject = 0;
+
seq_printf(m, "=========\nICP state\n=========\n");
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -890,8 +927,19 @@ static int xics_debug_show(struct seq_file *m, void *private)
icp->server_num, state.xisr,
state.pending_pri, state.cppr, state.mfrr,
state.out_ee, state.need_resend);
+ t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
+ t_rm_notify_eoi += icp->n_rm_notify_eoi;
+ t_rm_check_resend += icp->n_rm_check_resend;
+ t_rm_reject += icp->n_rm_reject;
+ t_check_resend += icp->n_check_resend;
+ t_reject += icp->n_reject;
}
+ seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu reject=%lu notify_eoi=%lu\n",
+ t_rm_kick_vcpu, t_rm_check_resend,
+ t_rm_reject, t_rm_notify_eoi);
+ seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
+ t_check_resend, t_reject);
for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
struct kvmppc_ics *ics = xics->ics[icsid];
@@ -901,7 +949,8 @@ static int xics_debug_show(struct seq_file *m, void *private)
seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
icsid);
- mutex_lock(&ics->lock);
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
struct ics_irq_state *irq = &ics->irq_state[i];
@@ -912,7 +961,8 @@ static int xics_debug_show(struct seq_file *m, void *private)
irq->resend, irq->masked_pending);
}
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
}
return 0;
}
@@ -965,7 +1015,6 @@ static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
if (!ics)
goto out;
- mutex_init(&ics->lock);
ics->icsid = icsid;
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
@@ -1107,13 +1156,15 @@ static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
u64 __user *ubufp = (u64 __user *) addr;
u16 idx;
u64 val, prio;
+ unsigned long flags;
ics = kvmppc_xics_find_ics(xics, irq, &idx);
if (!ics)
return -ENOENT;
irqp = &ics->irq_state[idx];
- mutex_lock(&ics->lock);
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
ret = -ENOENT;
if (irqp->exists) {
val = irqp->server;
@@ -1129,7 +1180,8 @@ static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
val |= KVM_XICS_PENDING;
ret = 0;
}
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
if (!ret && put_user(val, ubufp))
ret = -EFAULT;
@@ -1146,6 +1198,7 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
u64 val;
u8 prio;
u32 server;
+ unsigned long flags;
if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
return -ENOENT;
@@ -1166,7 +1219,8 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
kvmppc_xics_find_server(xics->kvm, server) == NULL)
return -EINVAL;
- mutex_lock(&ics->lock);
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
irqp->server = server;
irqp->saved_priority = prio;
if (val & KVM_XICS_MASKED)
@@ -1178,7 +1232,8 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE))
irqp->asserted = 1;
irqp->exists = 1;
- mutex_unlock(&ics->lock);
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
if (val & KVM_XICS_PENDING)
icp_deliver_irq(xics, NULL, irqp->number);
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index 73f0f2723c07..56ea44f9867f 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -78,13 +78,22 @@ struct kvmppc_icp {
u32 rm_reject;
u32 rm_eoied_irq;
+ /* Counters for each reason we exited real mode */
+ unsigned long n_rm_kick_vcpu;
+ unsigned long n_rm_check_resend;
+ unsigned long n_rm_reject;
+ unsigned long n_rm_notify_eoi;
+ /* Counters for handling ICP processing in real mode */
+ unsigned long n_check_resend;
+ unsigned long n_reject;
+
/* Debug stuff for real mode */
union kvmppc_icp_state rm_dbgstate;
struct kvm_vcpu *rm_dbgtgt;
};
struct kvmppc_ics {
- struct mutex lock;
+ arch_spinlock_t lock;
u16 icsid;
struct ics_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
};
@@ -96,6 +105,8 @@ struct kvmppc_xics {
u32 max_icsid;
bool real_mode;
bool real_mode_dbg;
+ u32 err_noics;
+ u32 err_noicp;
struct kvmppc_ics *ics[KVMPPC_XICS_MAX_ICS_ID + 1];
};
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index cc536d4a75ef..4d33e199edcc 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -338,6 +338,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
pte_t *ptep;
unsigned int wimg = 0;
pgd_t *pgdir;
+ unsigned long flags;
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
@@ -468,15 +469,28 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
pgdir = vcpu_e500->vcpu.arch.pgdir;
- ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages);
- if (pte_present(*ptep))
- wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
- else {
- if (printk_ratelimit())
- pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
- __func__, (long)gfn, pfn);
- ret = -EINVAL;
- goto out;
+ /*
+ * We are just looking at the wimg bits, so we don't
+ * care much about the trans splitting bit.
+ * We are holding kvm->mmu_lock so a notifier invalidate
+ * can't run hence pfn won't change.
+ */
+ local_irq_save(flags);
+ ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL);
+ if (ptep) {
+ pte_t pte = READ_ONCE(*ptep);
+
+ if (pte_present(pte)) {
+ wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
+ MAS2_WIMGE_MASK;
+ local_irq_restore(flags);
+ } else {
+ local_irq_restore(flags);
+ pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
+ __func__, (long)gfn, pfn);
+ ret = -EINVAL;
+ goto out;
+ }
}
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 24bfe401373e..ac3ddf115f3d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -529,6 +529,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PPC_RMA:
r = 0;
break;
+ case KVM_CAP_PPC_HWRNG:
+ r = kvmppc_hwrng_present();
+ break;
#endif
case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -720,7 +723,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
return;
}
- if (vcpu->arch.mmio_is_bigendian) {
+ if (!vcpu->arch.mmio_host_swabbed) {
switch (run->mmio.len) {
case 8: gpr = *(u64 *)run->mmio.data; break;
case 4: gpr = *(u32 *)run->mmio.data; break;
@@ -728,10 +731,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
case 1: gpr = *(u8 *)run->mmio.data; break;
}
} else {
- /* Convert BE data from userland back to LE. */
switch (run->mmio.len) {
- case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
- case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
+ case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
+ case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
+ case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
case 1: gpr = *(u8 *)run->mmio.data; break;
}
}
@@ -780,14 +783,13 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
int is_default_endian)
{
int idx, ret;
- int is_bigendian;
+ bool host_swabbed;
+ /* Pity C doesn't have a logical XOR operator */
if (kvmppc_need_byteswap(vcpu)) {
- /* Default endianness is "little endian". */
- is_bigendian = !is_default_endian;
+ host_swabbed = is_default_endian;
} else {
- /* Default endianness is "big endian". */
- is_bigendian = is_default_endian;
+ host_swabbed = !is_default_endian;
}
if (bytes > sizeof(run->mmio.data)) {
@@ -800,7 +802,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->mmio.is_write = 0;
vcpu->arch.io_gpr = rt;
- vcpu->arch.mmio_is_bigendian = is_bigendian;
+ vcpu->arch.mmio_host_swabbed = host_swabbed;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 0;
vcpu->arch.mmio_sign_extend = 0;
@@ -840,14 +842,13 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
void *data = run->mmio.data;
int idx, ret;
- int is_bigendian;
+ bool host_swabbed;
+ /* Pity C doesn't have a logical XOR operator */
if (kvmppc_need_byteswap(vcpu)) {
- /* Default endianness is "little endian". */
- is_bigendian = !is_default_endian;
+ host_swabbed = is_default_endian;
} else {
- /* Default endianness is "big endian". */
- is_bigendian = is_default_endian;
+ host_swabbed = !is_default_endian;
}
if (bytes > sizeof(run->mmio.data)) {
@@ -862,7 +863,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->mmio_is_write = 1;
/* Store the value at the lowest bytes in 'data'. */
- if (is_bigendian) {
+ if (!host_swabbed) {
switch (bytes) {
case 8: *(u64 *)data = val; break;
case 4: *(u32 *)data = val; break;
@@ -870,11 +871,11 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 1: *(u8 *)data = val; break;
}
} else {
- /* Store LE value into 'data'. */
switch (bytes) {
- case 4: st_le32(data, val); break;
- case 2: st_le16(data, val); break;
- case 1: *(u8 *)data = val; break;
+ case 8: *(u64 *)data = swab64(val); break;
+ case 4: *(u32 *)data = swab32(val); break;
+ case 2: *(u16 *)data = swab16(val); break;
+ case 1: *(u8 *)data = val; break;
}
}
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index 4a6c2cf890d9..60b0b3fc8fc1 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -10,7 +10,7 @@ void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
- if (mem_init_done)
+ if (slab_is_available())
p = kzalloc(size, mask);
else {
p = memblock_virt_alloc(size, 0);
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 55f19f9fd708..6813f80d1eec 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -69,54 +69,6 @@ CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
-/*
- * Use dcbz on the complete cache lines in the destination
- * to set them to zero. This requires that the destination
- * area is cacheable. -- paulus
- */
-_GLOBAL(cacheable_memzero)
- mr r5,r4
- li r4,0
- addi r6,r3,-4
- cmplwi 0,r5,4
- blt 7f
- stwu r4,4(r6)
- beqlr
- andi. r0,r6,3
- add r5,r0,r5
- subf r6,r0,r6
- clrlwi r7,r6,32-LG_CACHELINE_BYTES
- add r8,r7,r5
- srwi r9,r8,LG_CACHELINE_BYTES
- addic. r9,r9,-1 /* total number of complete cachelines */
- ble 2f
- xori r0,r7,CACHELINE_MASK & ~3
- srwi. r0,r0,2
- beq 3f
- mtctr r0
-4: stwu r4,4(r6)
- bdnz 4b
-3: mtctr r9
- li r7,4
-10: dcbz r7,r6
- addi r6,r6,CACHELINE_BYTES
- bdnz 10b
- clrlwi r5,r8,32-LG_CACHELINE_BYTES
- addi r5,r5,4
-2: srwi r0,r5,2
- mtctr r0
- bdz 6f
-1: stwu r4,4(r6)
- bdnz 1b
-6: andi. r5,r5,3
-7: cmpwi 0,r5,0
- beqlr
- mtctr r5
- addi r6,r6,3
-8: stbu r4,1(r6)
- bdnz 8b
- blr
-
_GLOBAL(memset)
rlwimi r4,r4,8,16,23
rlwimi r4,r4,16,0,15
@@ -142,85 +94,6 @@ _GLOBAL(memset)
bdnz 8b
blr
-/*
- * This version uses dcbz on the complete cache lines in the
- * destination area to reduce memory traffic. This requires that
- * the destination area is cacheable.
- * We only use this version if the source and dest don't overlap.
- * -- paulus.
- */
-_GLOBAL(cacheable_memcpy)
- add r7,r3,r5 /* test if the src & dst overlap */
- add r8,r4,r5
- cmplw 0,r4,r7
- cmplw 1,r3,r8
- crand 0,0,4 /* cr0.lt &= cr1.lt */
- blt memcpy /* if regions overlap */
-
- addi r4,r4,-4
- addi r6,r3,-4
- neg r0,r3
- andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
- beq 58f
-
- cmplw 0,r5,r0 /* is this more than total to do? */
- blt 63f /* if not much to do */
- andi. r8,r0,3 /* get it word-aligned first */
- subf r5,r0,r5
- mtctr r8
- beq+ 61f
-70: lbz r9,4(r4) /* do some bytes */
- stb r9,4(r6)
- addi r4,r4,1
- addi r6,r6,1
- bdnz 70b
-61: srwi. r0,r0,2
- mtctr r0
- beq 58f
-72: lwzu r9,4(r4) /* do some words */
- stwu r9,4(r6)
- bdnz 72b
-
-58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
- clrlwi r5,r5,32-LG_CACHELINE_BYTES
- li r11,4
- mtctr r0
- beq 63f
-53:
- dcbz r11,r6
- COPY_16_BYTES
-#if L1_CACHE_BYTES >= 32
- COPY_16_BYTES
-#if L1_CACHE_BYTES >= 64
- COPY_16_BYTES
- COPY_16_BYTES
-#if L1_CACHE_BYTES >= 128
- COPY_16_BYTES
- COPY_16_BYTES
- COPY_16_BYTES
- COPY_16_BYTES
-#endif
-#endif
-#endif
- bdnz 53b
-
-63: srwi. r0,r5,2
- mtctr r0
- beq 64f
-30: lwzu r0,4(r4)
- stwu r0,4(r6)
- bdnz 30b
-
-64: andi. r0,r5,3
- mtctr r0
- beq+ 65f
-40: lbz r0,4(r4)
- stb r0,4(r6)
- addi r4,r4,1
- addi r6,r6,1
- bdnz 40b
-65: blr
-
_GLOBAL(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index d7dafb3777ac..a84d333ecb09 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -83,23 +83,23 @@ _GLOBAL(copypage_power7)
li r12,112
.align 5
-1: lvx vr7,r0,r4
- lvx vr6,r4,r6
- lvx vr5,r4,r7
- lvx vr4,r4,r8
- lvx vr3,r4,r9
- lvx vr2,r4,r10
- lvx vr1,r4,r11
- lvx vr0,r4,r12
+1: lvx v7,r0,r4
+ lvx v6,r4,r6
+ lvx v5,r4,r7
+ lvx v4,r4,r8
+ lvx v3,r4,r9
+ lvx v2,r4,r10
+ lvx v1,r4,r11
+ lvx v0,r4,r12
addi r4,r4,128
- stvx vr7,r0,r3
- stvx vr6,r3,r6
- stvx vr5,r3,r7
- stvx vr4,r3,r8
- stvx vr3,r3,r9
- stvx vr2,r3,r10
- stvx vr1,r3,r11
- stvx vr0,r3,r12
+ stvx v7,r0,r3
+ stvx v6,r3,r6
+ stvx v5,r3,r7
+ stvx v4,r3,r8
+ stvx v3,r3,r9
+ stvx v2,r3,r10
+ stvx v1,r3,r11
+ stvx v0,r3,r12
addi r3,r3,128
bdnz 1b
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 92ee840529bc..da0c568d18c4 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -388,29 +388,29 @@ err3; std r0,0(r3)
li r11,48
bf cr7*4+3,5f
-err3; lvx vr1,r0,r4
+err3; lvx v1,r0,r4
addi r4,r4,16
-err3; stvx vr1,r0,r3
+err3; stvx v1,r0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
-err3; lvx vr1,r0,r4
-err3; lvx vr0,r4,r9
+err3; lvx v1,r0,r4
+err3; lvx v0,r4,r9
addi r4,r4,32
-err3; stvx vr1,r0,r3
-err3; stvx vr0,r3,r9
+err3; stvx v1,r0,r3
+err3; stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
-err3; lvx vr3,r0,r4
-err3; lvx vr2,r4,r9
-err3; lvx vr1,r4,r10
-err3; lvx vr0,r4,r11
+err3; lvx v3,r0,r4
+err3; lvx v2,r4,r9
+err3; lvx v1,r4,r10
+err3; lvx v0,r4,r11
addi r4,r4,64
-err3; stvx vr3,r0,r3
-err3; stvx vr2,r3,r9
-err3; stvx vr1,r3,r10
-err3; stvx vr0,r3,r11
+err3; stvx v3,r0,r3
+err3; stvx v2,r3,r9
+err3; stvx v1,r3,r10
+err3; stvx v0,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
@@ -433,23 +433,23 @@ err3; stvx vr0,r3,r11
*/
.align 5
8:
-err4; lvx vr7,r0,r4
-err4; lvx vr6,r4,r9
-err4; lvx vr5,r4,r10
-err4; lvx vr4,r4,r11
-err4; lvx vr3,r4,r12
-err4; lvx vr2,r4,r14
-err4; lvx vr1,r4,r15
-err4; lvx vr0,r4,r16
+err4; lvx v7,r0,r4
+err4; lvx v6,r4,r9
+err4; lvx v5,r4,r10
+err4; lvx v4,r4,r11
+err4; lvx v3,r4,r12
+err4; lvx v2,r4,r14
+err4; lvx v1,r4,r15
+err4; lvx v0,r4,r16
addi r4,r4,128
-err4; stvx vr7,r0,r3
-err4; stvx vr6,r3,r9
-err4; stvx vr5,r3,r10
-err4; stvx vr4,r3,r11
-err4; stvx vr3,r3,r12
-err4; stvx vr2,r3,r14
-err4; stvx vr1,r3,r15
-err4; stvx vr0,r3,r16
+err4; stvx v7,r0,r3
+err4; stvx v6,r3,r9
+err4; stvx v5,r3,r10
+err4; stvx v4,r3,r11
+err4; stvx v3,r3,r12
+err4; stvx v2,r3,r14
+err4; stvx v1,r3,r15
+err4; stvx v0,r3,r16
addi r3,r3,128
bdnz 8b
@@ -463,29 +463,29 @@ err4; stvx vr0,r3,r16
mtocrf 0x01,r6
bf cr7*4+1,9f
-err3; lvx vr3,r0,r4
-err3; lvx vr2,r4,r9
-err3; lvx vr1,r4,r10
-err3; lvx vr0,r4,r11
+err3; lvx v3,r0,r4
+err3; lvx v2,r4,r9
+err3; lvx v1,r4,r10
+err3; lvx v0,r4,r11
addi r4,r4,64
-err3; stvx vr3,r0,r3
-err3; stvx vr2,r3,r9
-err3; stvx vr1,r3,r10
-err3; stvx vr0,r3,r11
+err3; stvx v3,r0,r3
+err3; stvx v2,r3,r9
+err3; stvx v1,r3,r10
+err3; stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
-err3; lvx vr1,r0,r4
-err3; lvx vr0,r4,r9
+err3; lvx v1,r0,r4
+err3; lvx v0,r4,r9
addi r4,r4,32
-err3; stvx vr1,r0,r3
-err3; stvx vr0,r3,r9
+err3; stvx v1,r0,r3
+err3; stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
-err3; lvx vr1,r0,r4
+err3; lvx v1,r0,r4
addi r4,r4,16
-err3; stvx vr1,r0,r3
+err3; stvx v1,r0,r3
addi r3,r3,16
/* Up to 15B to go */
@@ -560,42 +560,42 @@ err3; stw r7,4(r3)
li r10,32
li r11,48
- LVS(vr16,0,r4) /* Setup permute control vector */
-err3; lvx vr0,0,r4
+ LVS(v16,0,r4) /* Setup permute control vector */
+err3; lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
-err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+err3; lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
addi r4,r4,16
-err3; stvx vr8,r0,r3
+err3; stvx v8,r0,r3
addi r3,r3,16
- vor vr0,vr1,vr1
+ vor v0,v1,v1
5: bf cr7*4+2,6f
-err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
-err3; lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+err3; lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
+err3; lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
addi r4,r4,32
-err3; stvx vr8,r0,r3
-err3; stvx vr9,r3,r9
+err3; stvx v8,r0,r3
+err3; stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
-err3; lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
-err3; lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
-err3; lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
-err3; lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+err3; lvx v3,r0,r4
+ VPERM(v8,v0,v3,v16)
+err3; lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+err3; lvx v1,r4,r10
+ VPERM(v10,v2,v1,v16)
+err3; lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
addi r4,r4,64
-err3; stvx vr8,r0,r3
-err3; stvx vr9,r3,r9
-err3; stvx vr10,r3,r10
-err3; stvx vr11,r3,r11
+err3; stvx v8,r0,r3
+err3; stvx v9,r3,r9
+err3; stvx v10,r3,r10
+err3; stvx v11,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
@@ -618,31 +618,31 @@ err3; stvx vr11,r3,r11
*/
.align 5
8:
-err4; lvx vr7,r0,r4
- VPERM(vr8,vr0,vr7,vr16)
-err4; lvx vr6,r4,r9
- VPERM(vr9,vr7,vr6,vr16)
-err4; lvx vr5,r4,r10
- VPERM(vr10,vr6,vr5,vr16)
-err4; lvx vr4,r4,r11
- VPERM(vr11,vr5,vr4,vr16)
-err4; lvx vr3,r4,r12
- VPERM(vr12,vr4,vr3,vr16)
-err4; lvx vr2,r4,r14
- VPERM(vr13,vr3,vr2,vr16)
-err4; lvx vr1,r4,r15
- VPERM(vr14,vr2,vr1,vr16)
-err4; lvx vr0,r4,r16
- VPERM(vr15,vr1,vr0,vr16)
+err4; lvx v7,r0,r4
+ VPERM(v8,v0,v7,v16)
+err4; lvx v6,r4,r9
+ VPERM(v9,v7,v6,v16)
+err4; lvx v5,r4,r10
+ VPERM(v10,v6,v5,v16)
+err4; lvx v4,r4,r11
+ VPERM(v11,v5,v4,v16)
+err4; lvx v3,r4,r12
+ VPERM(v12,v4,v3,v16)
+err4; lvx v2,r4,r14
+ VPERM(v13,v3,v2,v16)
+err4; lvx v1,r4,r15
+ VPERM(v14,v2,v1,v16)
+err4; lvx v0,r4,r16
+ VPERM(v15,v1,v0,v16)
addi r4,r4,128
-err4; stvx vr8,r0,r3
-err4; stvx vr9,r3,r9
-err4; stvx vr10,r3,r10
-err4; stvx vr11,r3,r11
-err4; stvx vr12,r3,r12
-err4; stvx vr13,r3,r14
-err4; stvx vr14,r3,r15
-err4; stvx vr15,r3,r16
+err4; stvx v8,r0,r3
+err4; stvx v9,r3,r9
+err4; stvx v10,r3,r10
+err4; stvx v11,r3,r11
+err4; stvx v12,r3,r12
+err4; stvx v13,r3,r14
+err4; stvx v14,r3,r15
+err4; stvx v15,r3,r16
addi r3,r3,128
bdnz 8b
@@ -656,36 +656,36 @@ err4; stvx vr15,r3,r16
mtocrf 0x01,r6
bf cr7*4+1,9f
-err3; lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
-err3; lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
-err3; lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
-err3; lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+err3; lvx v3,r0,r4
+ VPERM(v8,v0,v3,v16)
+err3; lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+err3; lvx v1,r4,r10
+ VPERM(v10,v2,v1,v16)
+err3; lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
addi r4,r4,64
-err3; stvx vr8,r0,r3
-err3; stvx vr9,r3,r9
-err3; stvx vr10,r3,r10
-err3; stvx vr11,r3,r11
+err3; stvx v8,r0,r3
+err3; stvx v9,r3,r9
+err3; stvx v10,r3,r10
+err3; stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
-err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
-err3; lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+err3; lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
+err3; lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
addi r4,r4,32
-err3; stvx vr8,r0,r3
-err3; stvx vr9,r3,r9
+err3; stvx v8,r0,r3
+err3; stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
-err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+err3; lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
addi r4,r4,16
-err3; stvx vr8,r0,r3
+err3; stvx v8,r0,r3
addi r3,r3,16
/* Up to 15B to go */
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index a5b30c71a8d3..18af0b3d3eb2 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -236,78 +236,78 @@ _GLOBAL(_rest32gpr_31_x)
_GLOBAL(_savevr_20)
li r11,-192
- stvx vr20,r11,r0
+ stvx v20,r11,r0
_GLOBAL(_savevr_21)
li r11,-176
- stvx vr21,r11,r0
+ stvx v21,r11,r0
_GLOBAL(_savevr_22)
li r11,-160
- stvx vr22,r11,r0
+ stvx v22,r11,r0
_GLOBAL(_savevr_23)
li r11,-144
- stvx vr23,r11,r0
+ stvx v23,r11,r0
_GLOBAL(_savevr_24)
li r11,-128
- stvx vr24,r11,r0
+ stvx v24,r11,r0
_GLOBAL(_savevr_25)
li r11,-112
- stvx vr25,r11,r0
+ stvx v25,r11,r0
_GLOBAL(_savevr_26)
li r11,-96
- stvx vr26,r11,r0
+ stvx v26,r11,r0
_GLOBAL(_savevr_27)
li r11,-80
- stvx vr27,r11,r0
+ stvx v27,r11,r0
_GLOBAL(_savevr_28)
li r11,-64
- stvx vr28,r11,r0
+ stvx v28,r11,r0
_GLOBAL(_savevr_29)
li r11,-48
- stvx vr29,r11,r0
+ stvx v29,r11,r0
_GLOBAL(_savevr_30)
li r11,-32
- stvx vr30,r11,r0
+ stvx v30,r11,r0
_GLOBAL(_savevr_31)
li r11,-16
- stvx vr31,r11,r0
+ stvx v31,r11,r0
blr
_GLOBAL(_restvr_20)
li r11,-192
- lvx vr20,r11,r0
+ lvx v20,r11,r0
_GLOBAL(_restvr_21)
li r11,-176
- lvx vr21,r11,r0
+ lvx v21,r11,r0
_GLOBAL(_restvr_22)
li r11,-160
- lvx vr22,r11,r0
+ lvx v22,r11,r0
_GLOBAL(_restvr_23)
li r11,-144
- lvx vr23,r11,r0
+ lvx v23,r11,r0
_GLOBAL(_restvr_24)
li r11,-128
- lvx vr24,r11,r0
+ lvx v24,r11,r0
_GLOBAL(_restvr_25)
li r11,-112
- lvx vr25,r11,r0
+ lvx v25,r11,r0
_GLOBAL(_restvr_26)
li r11,-96
- lvx vr26,r11,r0
+ lvx v26,r11,r0
_GLOBAL(_restvr_27)
li r11,-80
- lvx vr27,r11,r0
+ lvx v27,r11,r0
_GLOBAL(_restvr_28)
li r11,-64
- lvx vr28,r11,r0
+ lvx v28,r11,r0
_GLOBAL(_restvr_29)
li r11,-48
- lvx vr29,r11,r0
+ lvx v29,r11,r0
_GLOBAL(_restvr_30)
li r11,-32
- lvx vr30,r11,r0
+ lvx v30,r11,r0
_GLOBAL(_restvr_31)
li r11,-16
- lvx vr31,r11,r0
+ lvx v31,r11,r0
blr
#endif /* CONFIG_ALTIVEC */
@@ -443,101 +443,101 @@ _restgpr0_31:
.globl _savevr_20
_savevr_20:
li r12,-192
- stvx vr20,r12,r0
+ stvx v20,r12,r0
.globl _savevr_21
_savevr_21:
li r12,-176
- stvx vr21,r12,r0
+ stvx v21,r12,r0
.globl _savevr_22
_savevr_22:
li r12,-160
- stvx vr22,r12,r0
+ stvx v22,r12,r0
.globl _savevr_23
_savevr_23:
li r12,-144
- stvx vr23,r12,r0
+ stvx v23,r12,r0
.globl _savevr_24
_savevr_24:
li r12,-128
- stvx vr24,r12,r0
+ stvx v24,r12,r0
.globl _savevr_25
_savevr_25:
li r12,-112
- stvx vr25,r12,r0
+ stvx v25,r12,r0
.globl _savevr_26
_savevr_26:
li r12,-96
- stvx vr26,r12,r0
+ stvx v26,r12,r0
.globl _savevr_27
_savevr_27:
li r12,-80
- stvx vr27,r12,r0
+ stvx v27,r12,r0
.globl _savevr_28
_savevr_28:
li r12,-64
- stvx vr28,r12,r0
+ stvx v28,r12,r0
.globl _savevr_29
_savevr_29:
li r12,-48
- stvx vr29,r12,r0
+ stvx v29,r12,r0
.globl _savevr_30
_savevr_30:
li r12,-32
- stvx vr30,r12,r0
+ stvx v30,r12,r0
.globl _savevr_31
_savevr_31:
li r12,-16
- stvx vr31,r12,r0
+ stvx v31,r12,r0
blr
.globl _restvr_20
_restvr_20:
li r12,-192
- lvx vr20,r12,r0
+ lvx v20,r12,r0
.globl _restvr_21
_restvr_21:
li r12,-176
- lvx vr21,r12,r0
+ lvx v21,r12,r0
.globl _restvr_22
_restvr_22:
li r12,-160
- lvx vr22,r12,r0
+ lvx v22,r12,r0
.globl _restvr_23
_restvr_23:
li r12,-144
- lvx vr23,r12,r0
+ lvx v23,r12,r0
.globl _restvr_24
_restvr_24:
li r12,-128
- lvx vr24,r12,r0
+ lvx v24,r12,r0
.globl _restvr_25
_restvr_25:
li r12,-112
- lvx vr25,r12,r0
+ lvx v25,r12,r0
.globl _restvr_26
_restvr_26:
li r12,-96
- lvx vr26,r12,r0
+ lvx v26,r12,r0
.globl _restvr_27
_restvr_27:
li r12,-80
- lvx vr27,r12,r0
+ lvx v27,r12,r0
.globl _restvr_28
_restvr_28:
li r12,-64
- lvx vr28,r12,r0
+ lvx v28,r12,r0
.globl _restvr_29
_restvr_29:
li r12,-48
- lvx vr29,r12,r0
+ lvx v29,r12,r0
.globl _restvr_30
_restvr_30:
li r12,-32
- lvx vr30,r12,r0
+ lvx v30,r12,r0
.globl _restvr_31
_restvr_31:
li r12,-16
- lvx vr31,r12,r0
+ lvx v31,r12,r0
blr
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index 85aec08ab234..5d0cdbfbe3f2 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -184,16 +184,16 @@ _GLOBAL(do_stfd)
extab 2b,3b
#ifdef CONFIG_ALTIVEC
-/* Get the contents of vrN into vr0; N is in r3. */
+/* Get the contents of vrN into v0; N is in r3. */
_GLOBAL(get_vr)
mflr r0
rlwinm r3,r3,3,0xf8
bcl 20,31,1f
- blr /* vr0 is already in vr0 */
+ blr /* v0 is already in v0 */
nop
reg = 1
.rept 31
- vor vr0,reg,reg /* assembler doesn't know vmr? */
+ vor v0,reg,reg /* assembler doesn't know vmr? */
blr
reg = reg + 1
.endr
@@ -203,16 +203,16 @@ reg = reg + 1
mtlr r0
bctr
-/* Put the contents of vr0 into vrN; N is in r3. */
+/* Put the contents of v0 into vrN; N is in r3. */
_GLOBAL(put_vr)
mflr r0
rlwinm r3,r3,3,0xf8
bcl 20,31,1f
- blr /* vr0 is already in vr0 */
+ blr /* v0 is already in v0 */
nop
reg = 1
.rept 31
- vor reg,vr0,vr0
+ vor reg,v0,v0
blr
reg = reg + 1
.endr
@@ -234,13 +234,13 @@ _GLOBAL(do_lvx)
MTMSRD(r7)
isync
beq cr7,1f
- stvx vr0,r1,r8
+ stvx v0,r1,r8
1: li r9,-EFAULT
-2: lvx vr0,0,r4
+2: lvx v0,0,r4
li r9,0
3: beq cr7,4f
bl put_vr
- lvx vr0,r1,r8
+ lvx v0,r1,r8
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
MTMSRD(r6)
@@ -262,13 +262,13 @@ _GLOBAL(do_stvx)
MTMSRD(r7)
isync
beq cr7,1f
- stvx vr0,r1,r8
+ stvx v0,r1,r8
bl get_vr
1: li r9,-EFAULT
-2: stvx vr0,0,r4
+2: stvx v0,0,r4
li r9,0
3: beq cr7,4f
- lvx vr0,r1,r8
+ lvx v0,r1,r8
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
MTMSRD(r6)
@@ -280,12 +280,12 @@ _GLOBAL(do_stvx)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
-/* Get the contents of vsrN into vsr0; N is in r3. */
+/* Get the contents of vsN into vs0; N is in r3. */
_GLOBAL(get_vsr)
mflr r0
rlwinm r3,r3,3,0x1f8
bcl 20,31,1f
- blr /* vsr0 is already in vsr0 */
+ blr /* vs0 is already in vs0 */
nop
reg = 1
.rept 63
@@ -299,12 +299,12 @@ reg = reg + 1
mtlr r0
bctr
-/* Put the contents of vsr0 into vsrN; N is in r3. */
+/* Put the contents of vs0 into vsN; N is in r3. */
_GLOBAL(put_vsr)
mflr r0
rlwinm r3,r3,3,0x1f8
bcl 20,31,1f
- blr /* vr0 is already in vr0 */
+ blr /* v0 is already in v0 */
nop
reg = 1
.rept 63
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 170a0346f756..f7deebdf3365 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -41,6 +41,7 @@ void __spin_yield(arch_spinlock_t *lock)
plpar_hcall_norets(H_CONFER,
get_hard_smp_processor_id(holder_cpu), yield_count);
}
+EXPORT_SYMBOL_GPL(__spin_yield);
/*
* Waiting for a read lock or a write lock on a rwlock...
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0830587df16e..786234fd4e91 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -321,29 +321,29 @@ _GLOBAL(memcpy_power7)
li r11,48
bf cr7*4+3,5f
- lvx vr1,r0,r4
+ lvx v1,r0,r4
addi r4,r4,16
- stvx vr1,r0,r3
+ stvx v1,r0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
- lvx vr1,r0,r4
- lvx vr0,r4,r9
+ lvx v1,r0,r4
+ lvx v0,r4,r9
addi r4,r4,32
- stvx vr1,r0,r3
- stvx vr0,r3,r9
+ stvx v1,r0,r3
+ stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
- lvx vr3,r0,r4
- lvx vr2,r4,r9
- lvx vr1,r4,r10
- lvx vr0,r4,r11
+ lvx v3,r0,r4
+ lvx v2,r4,r9
+ lvx v1,r4,r10
+ lvx v0,r4,r11
addi r4,r4,64
- stvx vr3,r0,r3
- stvx vr2,r3,r9
- stvx vr1,r3,r10
- stvx vr0,r3,r11
+ stvx v3,r0,r3
+ stvx v2,r3,r9
+ stvx v1,r3,r10
+ stvx v0,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
@@ -366,23 +366,23 @@ _GLOBAL(memcpy_power7)
*/
.align 5
8:
- lvx vr7,r0,r4
- lvx vr6,r4,r9
- lvx vr5,r4,r10
- lvx vr4,r4,r11
- lvx vr3,r4,r12
- lvx vr2,r4,r14
- lvx vr1,r4,r15
- lvx vr0,r4,r16
+ lvx v7,r0,r4
+ lvx v6,r4,r9
+ lvx v5,r4,r10
+ lvx v4,r4,r11
+ lvx v3,r4,r12
+ lvx v2,r4,r14
+ lvx v1,r4,r15
+ lvx v0,r4,r16
addi r4,r4,128
- stvx vr7,r0,r3
- stvx vr6,r3,r9
- stvx vr5,r3,r10
- stvx vr4,r3,r11
- stvx vr3,r3,r12
- stvx vr2,r3,r14
- stvx vr1,r3,r15
- stvx vr0,r3,r16
+ stvx v7,r0,r3
+ stvx v6,r3,r9
+ stvx v5,r3,r10
+ stvx v4,r3,r11
+ stvx v3,r3,r12
+ stvx v2,r3,r14
+ stvx v1,r3,r15
+ stvx v0,r3,r16
addi r3,r3,128
bdnz 8b
@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6
bf cr7*4+1,9f
- lvx vr3,r0,r4
- lvx vr2,r4,r9
- lvx vr1,r4,r10
- lvx vr0,r4,r11
+ lvx v3,r0,r4
+ lvx v2,r4,r9
+ lvx v1,r4,r10
+ lvx v0,r4,r11
addi r4,r4,64
- stvx vr3,r0,r3
- stvx vr2,r3,r9
- stvx vr1,r3,r10
- stvx vr0,r3,r11
+ stvx v3,r0,r3
+ stvx v2,r3,r9
+ stvx v1,r3,r10
+ stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
- lvx vr1,r0,r4
- lvx vr0,r4,r9
+ lvx v1,r0,r4
+ lvx v0,r4,r9
addi r4,r4,32
- stvx vr1,r0,r3
- stvx vr0,r3,r9
+ stvx v1,r0,r3
+ stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
- lvx vr1,r0,r4
+ lvx v1,r0,r4
addi r4,r4,16
- stvx vr1,r0,r3
+ stvx v1,r0,r3
addi r3,r3,16
/* Up to 15B to go */
@@ -494,42 +494,42 @@ _GLOBAL(memcpy_power7)
li r10,32
li r11,48
- LVS(vr16,0,r4) /* Setup permute control vector */
- lvx vr0,0,r4
+ LVS(v16,0,r4) /* Setup permute control vector */
+ lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
- lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
addi r4,r4,16
- stvx vr8,r0,r3
+ stvx v8,r0,r3
addi r3,r3,16
- vor vr0,vr1,vr1
+ vor v0,v1,v1
5: bf cr7*4+2,6f
- lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
- lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+ lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
+ lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
addi r4,r4,32
- stvx vr8,r0,r3
- stvx vr9,r3,r9
+ stvx v8,r0,r3
+ stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
- lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
- lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
- lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
- lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+ lvx v3,r0,r4
+ VPERM(v8,v0,v3,v16)
+ lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+ lvx v1,r4,r10
+ VPERM(v10,v2,v1,v16)
+ lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
addi r4,r4,64
- stvx vr8,r0,r3
- stvx vr9,r3,r9
- stvx vr10,r3,r10
- stvx vr11,r3,r11
+ stvx v8,r0,r3
+ stvx v9,r3,r9
+ stvx v10,r3,r10
+ stvx v11,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
@@ -552,31 +552,31 @@ _GLOBAL(memcpy_power7)
*/
.align 5
8:
- lvx vr7,r0,r4
- VPERM(vr8,vr0,vr7,vr16)
- lvx vr6,r4,r9
- VPERM(vr9,vr7,vr6,vr16)
- lvx vr5,r4,r10
- VPERM(vr10,vr6,vr5,vr16)
- lvx vr4,r4,r11
- VPERM(vr11,vr5,vr4,vr16)
- lvx vr3,r4,r12
- VPERM(vr12,vr4,vr3,vr16)
- lvx vr2,r4,r14
- VPERM(vr13,vr3,vr2,vr16)
- lvx vr1,r4,r15
- VPERM(vr14,vr2,vr1,vr16)
- lvx vr0,r4,r16
- VPERM(vr15,vr1,vr0,vr16)
+ lvx v7,r0,r4
+ VPERM(v8,v0,v7,v16)
+ lvx v6,r4,r9
+ VPERM(v9,v7,v6,v16)
+ lvx v5,r4,r10
+ VPERM(v10,v6,v5,v16)
+ lvx v4,r4,r11
+ VPERM(v11,v5,v4,v16)
+ lvx v3,r4,r12
+ VPERM(v12,v4,v3,v16)
+ lvx v2,r4,r14
+ VPERM(v13,v3,v2,v16)
+ lvx v1,r4,r15
+ VPERM(v14,v2,v1,v16)
+ lvx v0,r4,r16
+ VPERM(v15,v1,v0,v16)
addi r4,r4,128
- stvx vr8,r0,r3
- stvx vr9,r3,r9
- stvx vr10,r3,r10
- stvx vr11,r3,r11
- stvx vr12,r3,r12
- stvx vr13,r3,r14
- stvx vr14,r3,r15
- stvx vr15,r3,r16
+ stvx v8,r0,r3
+ stvx v9,r3,r9
+ stvx v10,r3,r10
+ stvx v11,r3,r11
+ stvx v12,r3,r12
+ stvx v13,r3,r14
+ stvx v14,r3,r15
+ stvx v15,r3,r16
addi r3,r3,128
bdnz 8b
@@ -590,36 +590,36 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6
bf cr7*4+1,9f
- lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
- lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
- lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
- lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+ lvx v3,r0,r4
+ VPERM(v8,v0,v3,v16)
+ lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+ lvx v1,r4,r10
+ VPERM(v10,v2,v1,v16)
+ lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
addi r4,r4,64
- stvx vr8,r0,r3
- stvx vr9,r3,r9
- stvx vr10,r3,r10
- stvx vr11,r3,r11
+ stvx v8,r0,r3
+ stvx v9,r3,r9
+ stvx v10,r3,r10
+ stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
- lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
- lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+ lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
+ lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
addi r4,r4,32
- stvx vr8,r0,r3
- stvx vr9,r3,r9
+ stvx v8,r0,r3
+ stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
- lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
addi r4,r4,16
- stvx vr8,r0,r3
+ stvx v8,r0,r3
addi r3,r3,16
/* Up to 15B to go */
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c
index f993959647b5..c7f8e9586316 100644
--- a/arch/powerpc/lib/ppc_ksyms.c
+++ b/arch/powerpc/lib/ppc_ksyms.c
@@ -8,10 +8,6 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
-#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(cacheable_memcpy);
-EXPORT_SYMBOL(cacheable_memzero);
-#endif
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index a1060a868e69..69abf844c2c3 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -284,7 +284,7 @@ EXPORT_SYMBOL_GPL(rh_create);
*/
void rh_destroy(rh_info_t * info)
{
- if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
+ if ((info->flags & RHIF_STATIC_BLOCK) == 0)
kfree(info->block);
if ((info->flags & RHIF_STATIC_INFO) == 0)
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 438dcd3fd0d1..9c8770b5f96f 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
+obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-y += hugetlbpage.o
ifeq ($(CONFIG_HUGETLB_PAGE),y)
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index d85e86aac7fb..169aba446a74 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
do {
SetPageReserved(page);
map_page(vaddr, page_to_phys(page),
- pgprot_noncached(PAGE_KERNEL));
+ pgprot_val(pgprot_noncached(PAGE_KERNEL)));
page++;
vaddr += PAGE_SIZE;
} while (size -= PAGE_SIZE);
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index b46912fee7cd..9c90e66cffb6 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -181,7 +181,7 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long cam_sz;
cam_sz = calc_cam_sz(ram, virt, phys);
- settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0);
+ settlbcam(i, virt, phys, cam_sz, pgprot_val(PAGE_KERNEL_X), 0);
ram -= cam_sz;
amount_mapped += cam_sz;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2c2022d16059..fda236f908eb 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1066,7 +1066,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
#endif /* CONFIG_PPC_64K_PAGES */
/* Get PTE and page size from page tables */
- ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
+ ptep = __find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
if (ptep == NULL || !pte_present(*ptep)) {
DBG_LOW(" no PTE !\n");
rc = 1;
@@ -1394,6 +1394,7 @@ tm_abort:
tm_abort(TM_CAUSE_TLBI);
}
#endif
+ return;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 86686514ae13..43dafb9d6a46 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -33,7 +33,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* atomically mark the linux large page PMD busy and dirty
*/
do {
- pmd_t pmd = ACCESS_ONCE(*pmdp);
+ pmd_t pmd = READ_ONCE(*pmdp);
old_pmd = pmd_val(pmd);
/* If PMD busy, retry the access */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e408bfc7948..3385e3d0506e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -109,7 +109,7 @@ int pgd_huge(pgd_t pgd)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
/* Only called for hugetlbfs pages, hence can ignore THP */
- return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
+ return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
}
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
@@ -581,6 +581,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
+ mm_dec_nr_pmds(tlb->mm);
}
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -681,28 +682,42 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
} while (addr = next, addr != end);
}
+/*
+ * We are holding mmap_sem, so a parallel huge page collapse cannot run.
+ * To prevent hugepage split, disable irq.
+ */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
- pte_t *ptep;
- struct page *page;
+ pte_t *ptep, pte;
unsigned shift;
- unsigned long mask;
+ unsigned long mask, flags;
+ struct page *page = ERR_PTR(-EINVAL);
+
+ local_irq_save(flags);
+ ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
+ if (!ptep)
+ goto no_page;
+ pte = READ_ONCE(*ptep);
/*
+ * Verify it is a huge page else bail.
* Transparent hugepages are handled by generic code. We can skip them
* here.
*/
- ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
-
- /* Verify it is a huge page else bail. */
- if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
- return ERR_PTR(-EINVAL);
+ if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
+ goto no_page;
+ if (!pte_present(pte)) {
+ page = NULL;
+ goto no_page;
+ }
mask = (1UL << shift) - 1;
- page = pte_page(*ptep);
+ page = pte_page(pte);
if (page)
page += (address & mask) / PAGE_SIZE;
+no_page:
+ local_irq_restore(flags);
return page;
}
@@ -949,9 +964,12 @@ void flush_dcache_icache_hugepage(struct page *page)
*
* So long as we atomically load page table pointers we are safe against teardown,
* we can follow the address down to the the page and take a ref on it.
+ * This function need to be called with interrupts disabled. We use this variant
+ * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
*/
-pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
+pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
+ unsigned *shift)
{
pgd_t pgd, *pgdp;
pud_t pud, *pudp;
@@ -964,7 +982,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
*shift = 0;
pgdp = pgdir + pgd_index(ea);
- pgd = ACCESS_ONCE(*pgdp);
+ pgd = READ_ONCE(*pgdp);
/*
* Always operate on the local stack value. This make sure the
* value don't get updated by a parallel THP split/collapse,
@@ -1003,12 +1021,11 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
* A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate.
*
- * A hugepage split is captured by pmd_trans_splitting
- * because we mark the pmd trans splitting and do a
- * hpte invalidate
- *
+ * We don't worry about pmd_trans_splitting here, The
+ * caller if it needs to handle the splitting case
+ * should check for that.
*/
- if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ if (pmd_none(pmd))
return NULL;
if (pmd_huge(pmd) || pmd_large(pmd)) {
@@ -1030,7 +1047,7 @@ out:
*shift = pdshift;
return ret_pte;
}
-EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
+EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
@@ -1045,7 +1062,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
if (pte_end < end)
end = pte_end;
- pte = ACCESS_ONCE(*ptep);
+ pte = READ_ONCE(*ptep);
mask = _PAGE_PRESENT | _PAGE_USER;
if (write)
mask |= _PAGE_RW;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 10471f9bb63f..d747dd7bc90b 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -132,6 +132,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
+ kfree(name);
pgtable_cache[shift - 1] = new;
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b7285a5870f8..45fda71feb27 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -61,7 +61,6 @@
#define CPU_FTR_NOEXECUTE 0
#endif
-int mem_init_done;
unsigned long long memory_limit;
#ifdef CONFIG_HIGHMEM
@@ -377,8 +376,6 @@ void __init mem_init(void)
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */
-
- mem_init_done = 1;
}
void free_initmem(void)
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index cb8bdbe4972f..0f0502e12f6c 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -53,21 +53,20 @@ static inline int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- unsigned long rnd = 0;
+ unsigned long rnd;
+
+ /* 8MB for 32bit, 1GB for 64bit */
+ if (is_32bit_task())
+ rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT));
+ else
+ rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT));
- if (current->flags & PF_RANDOMIZE) {
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
- else
- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
- }
return rnd << PAGE_SHIFT;
}
-static inline unsigned long mmap_base(void)
+static inline unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -76,7 +75,7 @@ static inline unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+ return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
/*
@@ -85,6 +84,11 @@ static inline unsigned long mmap_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
@@ -93,7 +97,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 78c45f392f5b..085b66b10891 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -96,7 +96,7 @@ extern void _tlbia(void);
extern void mapin_ram(void);
extern int map_page(unsigned long va, phys_addr_t pa, int flags);
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
- unsigned int size, int flags);
+ unsigned int size, pgprot_t prot);
extern int __map_without_bats;
extern int __allow_ioremap_reserved;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0257a7d659ef..5e80621d9324 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -958,6 +958,13 @@ void __init initmem_init(void)
memblock_dump_all();
+ /*
+ * Reduce the possible NUMA nodes to the online NUMA nodes,
+ * since we do not support node hotplug. This ensures that we
+ * lower the maximum NUMA node ID to what is actually present.
+ */
+ nodes_and(node_possible_map, node_possible_map, node_online_map);
+
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
@@ -1177,6 +1184,9 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
+
+#include "vphn.h"
+
struct topology_update_data {
struct topology_update_data *next;
unsigned int cpu;
@@ -1248,55 +1258,6 @@ static int update_cpu_associativity_changes_mask(void)
}
/*
- * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
- * the complete property we have to add the length in the first cell.
- */
-#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
-
-/*
- * Convert the associativity domain numbers returned from the hypervisor
- * to the sequence they would appear in the ibm,associativity property.
- */
-static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
-{
- int i, nr_assoc_doms = 0;
- const __be16 *field = (const __be16 *) packed;
-
-#define VPHN_FIELD_UNUSED (0xffff)
-#define VPHN_FIELD_MSB (0x8000)
-#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
-
- for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
- if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
- /* All significant fields processed, and remaining
- * fields contain the reserved value of all 1's.
- * Just store them.
- */
- unpacked[i] = *((__be32 *)field);
- field += 2;
- } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
- /* Data is in the lower 15 bits of this field */
- unpacked[i] = cpu_to_be32(
- be16_to_cpup(field) & VPHN_FIELD_MASK);
- field++;
- nr_assoc_doms++;
- } else {
- /* Data is in the lower 15 bits of this field
- * concatenated with the next 16 bit field
- */
- unpacked[i] = *((__be32 *)field);
- field += 2;
- nr_assoc_doms++;
- }
- }
-
- /* The first cell contains the length of the property */
- unpacked[0] = cpu_to_be32(nr_assoc_doms);
-
- return nr_assoc_doms;
-}
-
-/*
* Retrieve the new associativity information for a virtual processor's
* home node.
*/
@@ -1306,11 +1267,8 @@ static long hcall_vphn(unsigned long cpu, __be32 *associativity)
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
u64 flags = 1;
int hwcpu = get_hard_smp_processor_id(cpu);
- int i;
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
- for (i = 0; i < 6; i++)
- retbuf[i] = cpu_to_be64(retbuf[i]);
vphn_unpack_associativity(retbuf, associativity);
return rc;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 03b1a3b0fbd5..7692d1bb1bc6 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -54,9 +54,6 @@ extern char etext[], _stext[];
#ifdef HAVE_BATS
extern phys_addr_t v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(phys_addr_t pa);
-void setbat(int index, unsigned long virt, phys_addr_t phys,
- unsigned int size, int flags);
-
#else /* !HAVE_BATS */
#define v_mapped_by_bats(x) (0UL)
#define p_mapped_by_bats(x) (0UL)
@@ -110,9 +107,8 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
- extern int mem_init_done;
- if (mem_init_done) {
+ if (slab_is_available()) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
} else {
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
@@ -192,7 +188,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
/* Make sure we have the base flags */
if ((flags & _PAGE_PRESENT) == 0)
- flags |= PAGE_KERNEL;
+ flags |= pgprot_val(PAGE_KERNEL);
/* Non-cacheable page cannot be coherent */
if (flags & _PAGE_NO_CACHE)
@@ -219,9 +215,9 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
- if (mem_init_done && (p < virt_to_phys(high_memory)) &&
+ if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
!(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
- printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n",
+ printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
@@ -247,7 +243,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
if ((v = p_mapped_by_tlbcam(p)))
goto out;
- if (mem_init_done) {
+ if (slab_is_available()) {
struct vm_struct *area;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (area == 0)
@@ -266,7 +262,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
err = map_page(v+i, p+i, flags);
if (err) {
- if (mem_init_done)
+ if (slab_is_available())
vunmap((void *)v);
return NULL;
}
@@ -327,7 +323,7 @@ void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
p = memstart_addr + s;
for (; s < top; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
- f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL;
+ f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
map_page(v, p, f);
#ifdef CONFIG_PPC_STD_MMU_32
if (ktext)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 6957cc1ca0a7..6bfadf1aa5cb 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -231,7 +231,7 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
if ((size == 0) || (paligned == 0))
return NULL;
- if (mem_init_done) {
+ if (slab_is_available()) {
struct vm_struct *area;
area = __get_vm_area_caller(size, VM_IOREMAP,
@@ -315,7 +315,7 @@ void __iounmap(volatile void __iomem *token)
{
void *addr;
- if (!mem_init_done)
+ if (!slab_is_available())
return;
addr = (void *) ((unsigned long __force)
@@ -723,7 +723,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
assert_spin_locked(&mm->page_table_lock);
WARN_ON(!pmd_trans_huge(pmd));
#endif
- trace_hugepage_set_pmd(addr, pmd);
+ trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
@@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
* hash fault look at them.
*/
memset(pgtable, 0, PTE_FRAG_SIZE);
+ /*
+ * Serialize against find_linux_pte_or_hugepte which does lock-less
+ * lookup in page tables with local interrupts disabled. For huge pages
+ * it casts pmd_t to pte_t. Since format of pte_t is different from
+ * pmd_t we want to prevent transit from pmd pointing to page table
+ * to pmd pointing to huge page (and back) while interrupts are disabled.
+ * We clear pmd to possibly replace it with page table pointer in
+ * different code paths. So make sure we wait for the parallel
+ * find_linux_pte_or_hugepage to finish.
+ */
+ kick_all_cpus_sync();
return old_pmd;
}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 5029dc19b517..6b2f3e457171 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -113,11 +113,12 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
* of 2 between 128k and 256M.
*/
void __init setbat(int index, unsigned long virt, phys_addr_t phys,
- unsigned int size, int flags)
+ unsigned int size, pgprot_t prot)
{
unsigned int bl;
int wimgxpp;
struct ppc_bat *bat = BATS[index];
+ unsigned long flags = pgprot_val(prot);
if ((flags & _PAGE_NO_CACHE) ||
(cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
@@ -224,7 +225,7 @@ void __init MMU_init_hw(void)
*/
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
Hash = __va(memblock_alloc(Hash_size, Hash_size));
- cacheable_memzero(Hash, Hash_size);
+ memset(Hash, 0, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index d2a94b85dbc2..c522969f012d 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
continue;
pte = pte_val(*ptep);
if (hugepage_shift)
- trace_hugepage_invalidate(start, pte_val(pte));
+ trace_hugepage_invalidate(start, pte);
if (!(pte & _PAGE_HASHPTE))
continue;
if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
diff --git a/arch/powerpc/mm/vphn.c b/arch/powerpc/mm/vphn.c
new file mode 100644
index 000000000000..5f8ef50e5c66
--- /dev/null
+++ b/arch/powerpc/mm/vphn.c
@@ -0,0 +1,70 @@
+#include <asm/byteorder.h>
+#include "vphn.h"
+
+/*
+ * The associativity domain numbers are returned from the hypervisor as a
+ * stream of mixed 16-bit and 32-bit fields. The stream is terminated by the
+ * special value of "all ones" (aka. 0xffff) and its size may not exceed 48
+ * bytes.
+ *
+ * --- 16-bit fields -->
+ * _________________________
+ * | 0 | 1 | 2 | 3 | be_packed[0]
+ * ------+-----+-----+------
+ * _________________________
+ * | 4 | 5 | 6 | 7 | be_packed[1]
+ * -------------------------
+ * ...
+ * _________________________
+ * | 20 | 21 | 22 | 23 | be_packed[5]
+ * -------------------------
+ *
+ * Convert to the sequence they would appear in the ibm,associativity property.
+ */
+int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
+{
+ __be64 be_packed[VPHN_REGISTER_COUNT];
+ int i, nr_assoc_doms = 0;
+ const __be16 *field = (const __be16 *) be_packed;
+ u16 last = 0;
+ bool is_32bit = false;
+
+#define VPHN_FIELD_UNUSED (0xffff)
+#define VPHN_FIELD_MSB (0x8000)
+#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
+
+ /* Let's fix the values returned by plpar_hcall9() */
+ for (i = 0; i < VPHN_REGISTER_COUNT; i++)
+ be_packed[i] = cpu_to_be64(packed[i]);
+
+ for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
+ u16 new = be16_to_cpup(field++);
+
+ if (is_32bit) {
+ /* Let's concatenate the 16 bits of this field to the
+ * 15 lower bits of the previous field
+ */
+ unpacked[++nr_assoc_doms] =
+ cpu_to_be32(last << 16 | new);
+ is_32bit = false;
+ } else if (new == VPHN_FIELD_UNUSED)
+ /* This is the list terminator */
+ break;
+ else if (new & VPHN_FIELD_MSB) {
+ /* Data is in the lower 15 bits of this field */
+ unpacked[++nr_assoc_doms] =
+ cpu_to_be32(new & VPHN_FIELD_MASK);
+ } else {
+ /* Data is in the lower 15 bits of this field
+ * concatenated with the next 16 bit field
+ */
+ last = new;
+ is_32bit = true;
+ }
+ }
+
+ /* The first cell contains the length of the property */
+ unpacked[0] = cpu_to_be32(nr_assoc_doms);
+
+ return nr_assoc_doms;
+}
diff --git a/arch/powerpc/mm/vphn.h b/arch/powerpc/mm/vphn.h
new file mode 100644
index 000000000000..fe8b7805b78f
--- /dev/null
+++ b/arch/powerpc/mm/vphn.h
@@ -0,0 +1,16 @@
+#ifndef _ARCH_POWERPC_MM_VPHN_H_
+#define _ARCH_POWERPC_MM_VPHN_H_
+
+/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers.
+ */
+#define VPHN_REGISTER_COUNT 6
+
+/*
+ * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
+ * form the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
+
+extern int vphn_unpack_associativity(const long *packed, __be32 *unpacked);
+
+#endif
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
index 266b3950c3ac..1306a58ac541 100644
--- a/arch/powerpc/net/Makefile
+++ b/arch/powerpc/net/Makefile
@@ -1,4 +1,4 @@
#
# Arch-specific network modules
#
-obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o
+obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index c406aa95b2bc..889fd199a821 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -10,12 +10,25 @@
#ifndef _BPF_JIT_H
#define _BPF_JIT_H
+#ifdef CONFIG_PPC64
+#define BPF_PPC_STACK_R3_OFF 48
#define BPF_PPC_STACK_LOCALS 32
#define BPF_PPC_STACK_BASIC (48+64)
#define BPF_PPC_STACK_SAVE (18*8)
#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
BPF_PPC_STACK_SAVE)
#define BPF_PPC_SLOWPATH_FRAME (48+64)
+#else
+#define BPF_PPC_STACK_R3_OFF 24
+#define BPF_PPC_STACK_LOCALS 16
+#define BPF_PPC_STACK_BASIC (24+32)
+#define BPF_PPC_STACK_SAVE (18*4)
+#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
+ BPF_PPC_STACK_SAVE)
+#define BPF_PPC_SLOWPATH_FRAME (24+32)
+#endif
+
+#define REG_SZ (BITS_PER_LONG/8)
/*
* Generated code register usage:
@@ -57,7 +70,11 @@ DECLARE_LOAD_FUNC(sk_load_half);
DECLARE_LOAD_FUNC(sk_load_byte);
DECLARE_LOAD_FUNC(sk_load_byte_msh);
+#ifdef CONFIG_PPC64
#define FUNCTION_DESCR_SIZE 24
+#else
+#define FUNCTION_DESCR_SIZE 0
+#endif
/*
* 16-bit immediate helper macros: HA() is for use with sign-extending instrs
@@ -86,7 +103,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
-
+#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
+ ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
+ ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \
+ ___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
@@ -98,6 +120,17 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RA(base) | IMM_L(i))
#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \
___PPC_RA(base) | ___PPC_RB(b))
+
+#ifdef CONFIG_PPC64
+#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
+#else
+#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
+#endif
+
/* Convenience helpers for the above with 'far' offsets: */
#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
else { PPC_ADDIS(r, base, IMM_HA(i)); \
@@ -115,6 +148,29 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
else { PPC_ADDIS(r, base, IMM_HA(i)); \
PPC_LHZ(r, r, IMM_L(i)); } } while(0)
+#ifdef CONFIG_PPC64
+#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
+#else
+#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
+#endif
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_PPC64
+#define PPC_BPF_LOAD_CPU(r) \
+ do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
+ PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
+ } while (0)
+#else
+#define PPC_BPF_LOAD_CPU(r) \
+ do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \
+ PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \
+ offsetof(struct thread_info, cpu)); \
+ } while(0)
+#endif
+#else
+#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
+#endif
+
#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
@@ -196,6 +252,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
} } while (0);
+#ifdef CONFIG_PPC64
+#define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
+#else
+#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
+#endif
+
#define PPC_LHBRX_OFFS(r, base, i) \
do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
#ifdef __LITTLE_ENDIAN__
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_asm.S
index 8f87d9217122..8ff5a3b5d1c3 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_asm.S
@@ -34,13 +34,13 @@
*/
.globl sk_load_word
sk_load_word:
- cmpdi r_addr, 0
+ PPC_LCMPI r_addr, 0
blt bpf_slow_path_word_neg
.globl sk_load_word_positive_offset
sk_load_word_positive_offset:
/* Are we accessing past headlen? */
subi r_scratch1, r_HL, 4
- cmpd r_scratch1, r_addr
+ PPC_LCMP r_scratch1, r_addr
blt bpf_slow_path_word
/* Nope, just hitting the header. cr0 here is eq or gt! */
#ifdef __LITTLE_ENDIAN__
@@ -52,12 +52,12 @@ sk_load_word_positive_offset:
.globl sk_load_half
sk_load_half:
- cmpdi r_addr, 0
+ PPC_LCMPI r_addr, 0
blt bpf_slow_path_half_neg
.globl sk_load_half_positive_offset
sk_load_half_positive_offset:
subi r_scratch1, r_HL, 2
- cmpd r_scratch1, r_addr
+ PPC_LCMP r_scratch1, r_addr
blt bpf_slow_path_half
#ifdef __LITTLE_ENDIAN__
lhbrx r_A, r_D, r_addr
@@ -68,11 +68,11 @@ sk_load_half_positive_offset:
.globl sk_load_byte
sk_load_byte:
- cmpdi r_addr, 0
+ PPC_LCMPI r_addr, 0
blt bpf_slow_path_byte_neg
.globl sk_load_byte_positive_offset
sk_load_byte_positive_offset:
- cmpd r_HL, r_addr
+ PPC_LCMP r_HL, r_addr
ble bpf_slow_path_byte
lbzx r_A, r_D, r_addr
blr
@@ -83,11 +83,11 @@ sk_load_byte_positive_offset:
*/
.globl sk_load_byte_msh
sk_load_byte_msh:
- cmpdi r_addr, 0
+ PPC_LCMPI r_addr, 0
blt bpf_slow_path_byte_msh_neg
.globl sk_load_byte_msh_positive_offset
sk_load_byte_msh_positive_offset:
- cmpd r_HL, r_addr
+ PPC_LCMP r_HL, r_addr
ble bpf_slow_path_byte_msh
lbzx r_X, r_D, r_addr
rlwinm r_X, r_X, 2, 32-4-2, 31-2
@@ -101,13 +101,13 @@ sk_load_byte_msh_positive_offset:
*/
#define bpf_slow_path_common(SIZE) \
mflr r0; \
- std r0, 16(r1); \
+ PPC_STL r0, PPC_LR_STKOFF(r1); \
/* R3 goes in parameter space of caller's frame */ \
- std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
- std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
- std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
- addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \
- stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
+ PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
+ PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
+ PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
+ addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \
+ PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
/* R3 = r_skb, as passed */ \
mr r4, r_addr; \
li r6, SIZE; \
@@ -115,19 +115,19 @@ sk_load_byte_msh_positive_offset:
nop; \
/* R3 = 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
- ld r0, 16(r1); \
- ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
- ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
+ PPC_LL r0, PPC_LR_STKOFF(r1); \
+ PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
+ PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
mtlr r0; \
- cmpdi r3, 0; \
+ PPC_LCMPI r3, 0; \
blt bpf_error; /* cr0 = LT */ \
- ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
+ PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
/* Great success! */
bpf_slow_path_word:
bpf_slow_path_common(4)
/* Data value is on stack, and cr0 != LT */
- lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
+ lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
blr
bpf_slow_path_half:
@@ -154,12 +154,12 @@ bpf_slow_path_byte_msh:
*/
#define sk_negative_common(SIZE) \
mflr r0; \
- std r0, 16(r1); \
+ PPC_STL r0, PPC_LR_STKOFF(r1); \
/* R3 goes in parameter space of caller's frame */ \
- std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
- std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
- std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
- stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
+ PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
+ PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
+ PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
+ PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
/* R3 = r_skb, as passed */ \
mr r4, r_addr; \
li r5, SIZE; \
@@ -167,19 +167,19 @@ bpf_slow_path_byte_msh:
nop; \
/* R3 != 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
- ld r0, 16(r1); \
- ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
- ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
+ PPC_LL r0, PPC_LR_STKOFF(r1); \
+ PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
+ PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
mtlr r0; \
- cmpldi r3, 0; \
+ PPC_LCMPLI r3, 0; \
beq bpf_error_slow; /* cr0 = EQ */ \
mr r_addr, r3; \
- ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
+ PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
/* Great success! */
bpf_slow_path_word_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
- cmpd r_addr, r_scratch1 /* addr < SKF_* */
+ PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_word_negative_offset
sk_load_word_negative_offset:
@@ -189,7 +189,7 @@ sk_load_word_negative_offset:
bpf_slow_path_half_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
- cmpd r_addr, r_scratch1 /* addr < SKF_* */
+ PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_half_negative_offset
sk_load_half_negative_offset:
@@ -199,7 +199,7 @@ sk_load_half_negative_offset:
bpf_slow_path_byte_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
- cmpd r_addr, r_scratch1 /* addr < SKF_* */
+ PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_byte_negative_offset
sk_load_byte_negative_offset:
@@ -209,7 +209,7 @@ sk_load_byte_negative_offset:
bpf_slow_path_byte_msh_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
- cmpd r_addr, r_scratch1 /* addr < SKF_* */
+ PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_byte_msh_negative_offset
sk_load_byte_msh_negative_offset:
@@ -221,7 +221,7 @@ sk_load_byte_msh_negative_offset:
bpf_error_slow:
/* fabricate a cr0 = lt */
li r_scratch1, -1
- cmpdi r_scratch1, 0
+ PPC_LCMPI r_scratch1, 0
bpf_error:
/* Entered with cr0 = lt */
li r3, 0
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index d1916b577f2c..17cea18a09d3 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -1,8 +1,9 @@
-/* bpf_jit_comp.c: BPF JIT compiler for PPC64
+/* bpf_jit_comp.c: BPF JIT compiler
*
* Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
*
* Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
+ * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -36,11 +37,11 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
if (ctx->seen & SEEN_DATAREF) {
/* If we call any helpers (for loads), save LR */
EMIT(PPC_INST_MFLR | __PPC_RT(R0));
- PPC_STD(0, 1, 16);
+ PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
/* Back up non-volatile regs. */
- PPC_STD(r_D, 1, -(8*(32-r_D)));
- PPC_STD(r_HL, 1, -(8*(32-r_HL)));
+ PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
+ PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
}
if (ctx->seen & SEEN_MEM) {
/*
@@ -49,11 +50,10 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
*/
for (i = r_M; i < (r_M+16); i++) {
if (ctx->seen & (1 << (i-r_M)))
- PPC_STD(i, 1, -(8*(32-i)));
+ PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
}
}
- EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
- (-BPF_PPC_STACKFRAME & 0xfffc));
+ PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
}
if (ctx->seen & SEEN_DATAREF) {
@@ -67,7 +67,7 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
data_len));
PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
PPC_SUB(r_HL, r_HL, r_scratch1);
- PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
+ PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
}
if (ctx->seen & SEEN_XREG) {
@@ -99,16 +99,16 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
if (ctx->seen & SEEN_DATAREF) {
- PPC_LD(0, 1, 16);
+ PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
PPC_MTLR(0);
- PPC_LD(r_D, 1, -(8*(32-r_D)));
- PPC_LD(r_HL, 1, -(8*(32-r_HL)));
+ PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
+ PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
}
if (ctx->seen & SEEN_MEM) {
/* Restore any saved non-vol registers */
for (i = r_M; i < (r_M+16); i++) {
if (ctx->seen & (1 << (i-r_M)))
- PPC_LD(i, 1, -(8*(32-i)));
+ PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
}
}
}
@@ -355,7 +355,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
ifindex) != 4);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
type) != 2);
- PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
+ PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
dev));
PPC_CMPDI(r_scratch1, 0);
if (ctx->pc_ret0 != -1) {
@@ -411,20 +411,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
PPC_SRWI(r_A, r_A, 5);
break;
case BPF_ANC | SKF_AD_CPU:
-#ifdef CONFIG_SMP
- /*
- * PACA ptr is r13:
- * raw_smp_processor_id() = local_paca->paca_index
- */
- BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
- paca_index) != 2);
- PPC_LHZ_OFFS(r_A, 13,
- offsetof(struct paca_struct, paca_index));
-#else
- PPC_LI(r_A, 0);
-#endif
+ PPC_BPF_LOAD_CPU(r_A);
break;
-
/*** Absolute loads from packet header/data ***/
case BPF_LD | BPF_W | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_word);
@@ -437,7 +425,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
common_load:
/* Load from [K]. */
ctx->seen |= SEEN_DATAREF;
- PPC_LI64(r_scratch1, func);
+ PPC_FUNC_ADDR(r_scratch1, func);
PPC_MTLR(r_scratch1);
PPC_LI32(r_addr, K);
PPC_BLRL();
@@ -463,7 +451,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
* in the helper functions.
*/
ctx->seen |= SEEN_DATAREF | SEEN_XREG;
- PPC_LI64(r_scratch1, func);
+ PPC_FUNC_ADDR(r_scratch1, func);
PPC_MTLR(r_scratch1);
PPC_ADDI(r_addr, r_X, IMM_L(K));
if (K >= 32768)
@@ -685,9 +673,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
if (image) {
bpf_flush_icache(code_base, code_base + (proglen/4));
+#ifdef CONFIG_PPC64
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
((u64 *)image)[1] = local_paca->kernel_toc;
+#endif
fp->bpf_func = (void *)image;
fp->jited = true;
}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 1c27831df1ac..ed7b0977072a 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -22,6 +22,7 @@
#include <linux/kref.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/file.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/numa.h>
@@ -322,18 +323,20 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
unsigned long app_cookie = 0;
unsigned int my_offset = 0;
struct vm_area_struct *vma;
+ struct file *exe_file;
struct mm_struct *mm = spu->mm;
if (!mm)
goto out;
- down_read(&mm->mmap_sem);
-
- if (mm->exe_file) {
- app_cookie = fast_get_dcookie(&mm->exe_file->f_path);
- pr_debug("got dcookie for %pD\n", mm->exe_file);
+ exe_file = get_mm_exe_file(mm);
+ if (exe_file) {
+ app_cookie = fast_get_dcookie(&exe_file->f_path);
+ pr_debug("got dcookie for %pD\n", exe_file);
+ fput(exe_file);
}
+ down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
continue;
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 2396dda282cd..ff09cde20cd2 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -111,41 +111,45 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
-static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
+static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
{
+ int ret = -EFAULT;
pgd_t *pgdir;
pte_t *ptep, pte;
unsigned shift;
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
- unsigned long pfn;
+ unsigned long pfn, flags;
void *kaddr;
pgdir = current->mm->pgd;
if (!pgdir)
return -EFAULT;
+ local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
+ if (!ptep)
+ goto err_out;
if (!shift)
shift = PAGE_SHIFT;
/* align address to page boundary */
offset = addr & ((1UL << shift) - 1);
- addr -= offset;
- if (ptep == NULL)
- return -EFAULT;
- pte = *ptep;
+ pte = READ_ONCE(*ptep);
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
- return -EFAULT;
+ goto err_out;
pfn = pte_pfn(pte);
if (!page_is_ram(pfn))
- return -EFAULT;
+ goto err_out;
/* no highmem to worry about here */
kaddr = pfn_to_kaddr(pfn);
- memcpy(ret, kaddr + offset, nb);
- return 0;
+ memcpy(buf, kaddr + offset, nb);
+ ret = 0;
+err_out:
+ local_irq_restore(flags);
+ return ret;
}
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
@@ -243,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
- for (;;) {
+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned long __user *) sp;
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
return;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 7c4f6690533a..12b638425bb9 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -124,7 +124,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
-static void power_pmu_flush_branch_stack(void) {}
+static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */
@@ -350,6 +350,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
cpuhw->bhrb_context = event->ctx;
}
cpuhw->bhrb_users++;
+ perf_sched_cb_inc(event->ctx->pmu);
}
static void power_pmu_bhrb_disable(struct perf_event *event)
@@ -361,6 +362,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
cpuhw->bhrb_users--;
WARN_ON_ONCE(cpuhw->bhrb_users < 0);
+ perf_sched_cb_dec(event->ctx->pmu);
if (!cpuhw->disabled && !cpuhw->bhrb_users) {
/* BHRB cannot be turned off when other
@@ -375,9 +377,12 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
/* Called from ctxsw to prevent one process's branch entries to
* mingle with the other process's entries during context switch.
*/
-static void power_pmu_flush_branch_stack(void)
+static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{
- if (ppmu->bhrb_nr)
+ if (!ppmu->bhrb_nr)
+ return;
+
+ if (sched_in)
power_pmu_bhrb_reset();
}
/* Calculate the to address for a branch */
@@ -1832,8 +1837,10 @@ static int power_pmu_event_init(struct perf_event *event)
cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
- if(cpuhw->bhrb_filter == -1)
+ if (cpuhw->bhrb_filter == -1) {
+ put_cpu_var(cpu_hw_events);
return -EOPNOTSUPP;
+ }
}
put_cpu_var(cpu_hw_events);
@@ -1901,7 +1908,7 @@ static struct pmu power_pmu = {
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
.event_idx = power_pmu_event_idx,
- .flush_branch_stack = power_pmu_flush_branch_stack,
+ .sched_task = power_pmu_sched_task,
};
/*
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 9445a824819e..ec2eb20631d1 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -142,6 +142,15 @@ static struct attribute_group event_long_desc_group = {
static struct kmem_cache *hv_page_cache;
+/*
+ * request_buffer and result_buffer are not required to be 4k aligned,
+ * but are not allowed to cross any 4k boundary. Aligning them to 4k is
+ * the simplest way to ensure that.
+ */
+#define H24x7_DATA_BUFFER_SIZE 4096
+DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+
static char *event_name(struct hv_24x7_event_data *ev, int *len)
{
*len = be16_to_cpu(ev->event_name_len) - 2;
@@ -152,6 +161,7 @@ static char *event_desc(struct hv_24x7_event_data *ev, int *len)
{
unsigned nl = be16_to_cpu(ev->event_name_len);
__be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
+
*len = be16_to_cpu(*desc_len) - 2;
return (char *)ev->remainder + nl;
}
@@ -162,6 +172,7 @@ static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
__be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
unsigned desc_len = be16_to_cpu(*desc_len_);
__be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
+
*len = be16_to_cpu(*long_desc_len) - 2;
return (char *)ev->remainder + nl + desc_len;
}
@@ -239,14 +250,12 @@ static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
unsigned long index)
{
pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
- phys_4096,
- version,
- index);
+ phys_4096, version, index);
+
WARN_ON(!IS_ALIGNED(phys_4096, 4096));
+
return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
- phys_4096,
- version,
- index);
+ phys_4096, version, index);
}
static unsigned long h_get_24x7_catalog_page(char page[],
@@ -300,6 +309,7 @@ static ssize_t device_show_string(struct device *dev,
struct dev_ext_attribute *d;
d = container_of(attr, struct dev_ext_attribute, attr);
+
return sprintf(buf, "%s\n", (char *)d->var);
}
@@ -314,6 +324,7 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
attr->attr.attr.name = name;
attr->attr.attr.mode = 0444;
attr->attr.show = device_show_string;
+
return &attr->attr.attr;
}
@@ -387,7 +398,6 @@ static struct attribute *event_to_attr(unsigned ix,
a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d",
(int)event_name_len, ev_name, ev_suffix, nonce);
-
if (!a_ev_name)
goto out_val;
@@ -637,7 +647,7 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
#define MAX_4K (SIZE_MAX / 4096)
-static void create_events_from_catalog(struct attribute ***events_,
+static int create_events_from_catalog(struct attribute ***events_,
struct attribute ***event_descs_,
struct attribute ***event_long_descs_)
{
@@ -655,19 +665,25 @@ static void create_events_from_catalog(struct attribute ***events_,
void *event_data, *end;
struct hv_24x7_event_data *event;
struct rb_root ev_uniq = RB_ROOT;
+ int ret = 0;
- if (!page)
+ if (!page) {
+ ret = -ENOMEM;
goto e_out;
+ }
hret = h_get_24x7_catalog_page(page, 0, 0);
- if (hret)
+ if (hret) {
+ ret = -EIO;
goto e_free;
+ }
catalog_version_num = be64_to_cpu(page_0->version);
catalog_page_len = be32_to_cpu(page_0->length);
if (MAX_4K < catalog_page_len) {
pr_err("invalid page count: %zu\n", catalog_page_len);
+ ret = -EIO;
goto e_free;
}
@@ -686,6 +702,7 @@ static void create_events_from_catalog(struct attribute ***events_,
|| (MAX_4K - event_data_offs < event_data_len)) {
pr_err("invalid event data offs %zu and/or len %zu\n",
event_data_offs, event_data_len);
+ ret = -EIO;
goto e_free;
}
@@ -694,12 +711,14 @@ static void create_events_from_catalog(struct attribute ***events_,
event_data_offs,
event_data_offs + event_data_len,
catalog_page_len);
+ ret = -EIO;
goto e_free;
}
if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) {
pr_err("event_entry_count %zu is invalid\n",
event_entry_count);
+ ret = -EIO;
goto e_free;
}
@@ -712,6 +731,7 @@ static void create_events_from_catalog(struct attribute ***events_,
event_data = vmalloc(event_data_bytes);
if (!event_data) {
pr_err("could not allocate event data\n");
+ ret = -ENOMEM;
goto e_free;
}
@@ -731,6 +751,7 @@ static void create_events_from_catalog(struct attribute ***events_,
if (hret) {
pr_err("failed to get event data in page %zu\n",
i + event_data_offs);
+ ret = -EIO;
goto e_event_data;
}
}
@@ -778,18 +799,24 @@ static void create_events_from_catalog(struct attribute ***events_,
event_idx_last, event_entry_count, junk_events);
events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
- if (!events)
+ if (!events) {
+ ret = -ENOMEM;
goto e_event_data;
+ }
event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
GFP_KERNEL);
- if (!event_descs)
+ if (!event_descs) {
+ ret = -ENOMEM;
goto e_event_attrs;
+ }
event_long_descs = kmalloc_array(event_idx + 1,
sizeof(*event_long_descs), GFP_KERNEL);
- if (!event_long_descs)
+ if (!event_long_descs) {
+ ret = -ENOMEM;
goto e_event_descs;
+ }
/* Iterate over the catalog filling in the attribute vector */
for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
@@ -843,7 +870,7 @@ static void create_events_from_catalog(struct attribute ***events_,
*events_ = events;
*event_descs_ = event_descs;
*event_long_descs_ = event_long_descs;
- return;
+ return 0;
e_event_descs:
kfree(event_descs);
@@ -857,6 +884,7 @@ e_out:
*events_ = NULL;
*event_descs_ = NULL;
*event_long_descs_ = NULL;
+ return ret;
}
static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
@@ -872,6 +900,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
uint64_t catalog_version_num = 0;
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
struct hv_24x7_catalog_page_0 *page_0 = page;
+
if (!page)
return -ENOMEM;
@@ -976,31 +1005,104 @@ static const struct attribute_group *attr_groups[] = {
NULL,
};
-DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096);
-DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096);
+static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
+ struct hv_24x7_data_result_buffer *result_buffer,
+ unsigned long ret)
+{
+ struct hv_24x7_request *req;
+
+ req = &request_buffer->requests[0];
+ pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => "
+ "ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
+ req->performance_domain, req->data_offset,
+ req->starting_ix, req->starting_lpar_ix, ret, ret,
+ result_buffer->detailed_rc,
+ result_buffer->failing_request_ix);
+}
+
+/*
+ * Start the process for a new H_GET_24x7_DATA hcall.
+ */
+static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
+ struct hv_24x7_data_result_buffer *result_buffer)
+{
+
+ memset(request_buffer, 0, 4096);
+ memset(result_buffer, 0, 4096);
+
+ request_buffer->interface_version = HV_24X7_IF_VERSION_CURRENT;
+ /* memset above set request_buffer->num_requests to 0 */
+}
-static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
- u16 lpar, u64 *res,
- bool success_expected)
+/*
+ * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected
+ * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
+ */
+static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
+ struct hv_24x7_data_result_buffer *result_buffer)
{
unsigned long ret;
/*
- * request_buffer and result_buffer are not required to be 4k aligned,
- * but are not allowed to cross any 4k boundary. Aligning them to 4k is
- * the simplest way to ensure that.
+ * NOTE: Due to variable number of array elements in request and
+ * result buffer(s), sizeof() is not reliable. Use the actual
+ * allocated buffer size, H24x7_DATA_BUFFER_SIZE.
*/
- struct reqb {
- struct hv_24x7_request_buffer buf;
- struct hv_24x7_request req;
- } __packed *request_buffer;
-
- struct {
- struct hv_24x7_data_result_buffer buf;
- struct hv_24x7_result res;
- struct hv_24x7_result_element elem;
- __be64 result;
- } __packed *result_buffer;
+ ret = plpar_hcall_norets(H_GET_24X7_DATA,
+ virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
+ virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
+
+ if (ret)
+ log_24x7_hcall(request_buffer, result_buffer, ret);
+
+ return ret;
+}
+
+/*
+ * Add the given @event to the next slot in the 24x7 request_buffer.
+ *
+ * Note that H_GET_24X7_DATA hcall allows reading several counters'
+ * values in a single HCALL. We expect the caller to add events to the
+ * request buffer one by one, make the HCALL and process the results.
+ */
+static int add_event_to_24x7_request(struct perf_event *event,
+ struct hv_24x7_request_buffer *request_buffer)
+{
+ u16 idx;
+ int i;
+ struct hv_24x7_request *req;
+
+ if (request_buffer->num_requests > 254) {
+ pr_devel("Too many requests for 24x7 HCALL %d\n",
+ request_buffer->num_requests);
+ return -EINVAL;
+ }
+
+ if (is_physical_domain(event_get_domain(event)))
+ idx = event_get_core(event);
+ else
+ idx = event_get_vcpu(event);
+
+ i = request_buffer->num_requests++;
+ req = &request_buffer->requests[i];
+
+ req->performance_domain = event_get_domain(event);
+ req->data_size = cpu_to_be16(8);
+ req->data_offset = cpu_to_be32(event_get_offset(event));
+ req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)),
+ req->max_num_lpars = cpu_to_be16(1);
+ req->starting_ix = cpu_to_be16(idx);
+ req->max_ix = cpu_to_be16(1);
+
+ return 0;
+}
+
+static unsigned long single_24x7_request(struct perf_event *event, u64 *count)
+{
+ unsigned long ret;
+ struct hv_24x7_request_buffer *request_buffer;
+ struct hv_24x7_data_result_buffer *result_buffer;
+ struct hv_24x7_result *resb;
BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
@@ -1008,63 +1110,28 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
result_buffer = (void *)get_cpu_var(hv_24x7_resb);
- memset(request_buffer, 0, 4096);
- memset(result_buffer, 0, 4096);
-
- *request_buffer = (struct reqb) {
- .buf = {
- .interface_version = HV_24X7_IF_VERSION_CURRENT,
- .num_requests = 1,
- },
- .req = {
- .performance_domain = domain,
- .data_size = cpu_to_be16(8),
- .data_offset = cpu_to_be32(offset),
- .starting_lpar_ix = cpu_to_be16(lpar),
- .max_num_lpars = cpu_to_be16(1),
- .starting_ix = cpu_to_be16(ix),
- .max_ix = cpu_to_be16(1),
- }
- };
+ init_24x7_request(request_buffer, result_buffer);
- ret = plpar_hcall_norets(H_GET_24X7_DATA,
- virt_to_phys(request_buffer), sizeof(*request_buffer),
- virt_to_phys(result_buffer), sizeof(*result_buffer));
+ ret = add_event_to_24x7_request(event, request_buffer);
+ if (ret)
+ goto out;
+ ret = make_24x7_request(request_buffer, result_buffer);
if (ret) {
- if (success_expected)
- pr_err_ratelimited("hcall failed: %d %#x %#x %d => "
- "0x%lx (%ld) detail=0x%x failing ix=%x\n",
- domain, offset, ix, lpar, ret, ret,
- result_buffer->buf.detailed_rc,
- result_buffer->buf.failing_request_ix);
+ log_24x7_hcall(request_buffer, result_buffer, ret);
goto out;
}
- *res = be64_to_cpu(result_buffer->result);
+ /* process result from hcall */
+ resb = &result_buffer->results[0];
+ *count = be64_to_cpu(resb->elements[0].element_data[0]);
out:
+ put_cpu_var(hv_24x7_reqb);
+ put_cpu_var(hv_24x7_resb);
return ret;
}
-static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
- bool success_expected)
-{
- u16 idx;
- unsigned domain = event_get_domain(event);
-
- if (is_physical_domain(domain))
- idx = event_get_core(event);
- else
- idx = event_get_vcpu(event);
-
- return single_24x7_request(event_get_domain(event),
- event_get_offset(event),
- idx,
- event_get_lpar(event),
- res,
- success_expected);
-}
static int h_24x7_event_init(struct perf_event *event)
{
@@ -1126,14 +1193,14 @@ static int h_24x7_event_init(struct perf_event *event)
/* Physical domains & other lpars require extra capabilities */
if (!caps.collect_privileged && (is_physical_domain(domain) ||
(event_get_lpar(event) != event_get_lpar_max()))) {
- pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
+ pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n",
is_physical_domain(domain),
event_get_lpar(event));
return -EACCES;
}
/* see if the event complains */
- if (event_24x7_request(event, &ct, false)) {
+ if (single_24x7_request(event, &ct)) {
pr_devel("test hcall failed\n");
return -EIO;
}
@@ -1145,7 +1212,7 @@ static u64 h_24x7_get_value(struct perf_event *event)
{
unsigned long ret;
u64 ct;
- ret = event_24x7_request(event, &ct, true);
+ ret = single_24x7_request(event, &ct);
if (ret)
/* We checked this in event init, shouldn't fail here... */
return 0;
@@ -1153,15 +1220,22 @@ static u64 h_24x7_get_value(struct perf_event *event)
return ct;
}
-static void h_24x7_event_update(struct perf_event *event)
+static void update_event_count(struct perf_event *event, u64 now)
{
s64 prev;
- u64 now;
- now = h_24x7_get_value(event);
+
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
}
+static void h_24x7_event_read(struct perf_event *event)
+{
+ u64 now;
+
+ now = h_24x7_get_value(event);
+ update_event_count(event, now);
+}
+
static void h_24x7_event_start(struct perf_event *event, int flags)
{
if (flags & PERF_EF_RELOAD)
@@ -1170,7 +1244,7 @@ static void h_24x7_event_start(struct perf_event *event, int flags)
static void h_24x7_event_stop(struct perf_event *event, int flags)
{
- h_24x7_event_update(event);
+ h_24x7_event_read(event);
}
static int h_24x7_event_add(struct perf_event *event, int flags)
@@ -1191,7 +1265,7 @@ static struct pmu h_24x7_pmu = {
.del = h_24x7_event_stop,
.start = h_24x7_event_start,
.stop = h_24x7_event_stop,
- .read = h_24x7_event_update,
+ .read = h_24x7_event_read,
};
static int hv_24x7_init(void)
@@ -1219,10 +1293,13 @@ static int hv_24x7_init(void)
/* sampling not supported */
h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
- create_events_from_catalog(&event_group.attrs,
+ r = create_events_from_catalog(&event_group.attrs,
&event_desc_group.attrs,
&event_long_desc_group.attrs);
+ if (r)
+ return r;
+
r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
if (r)
return r;
diff --git a/arch/powerpc/perf/hv-24x7.h b/arch/powerpc/perf/hv-24x7.h
index 69cd4e690f58..0f9fa21a29f2 100644
--- a/arch/powerpc/perf/hv-24x7.h
+++ b/arch/powerpc/perf/hv-24x7.h
@@ -50,7 +50,7 @@ struct hv_24x7_request_buffer {
__u8 interface_version;
__u8 num_requests;
__u8 reserved[0xE];
- struct hv_24x7_request requests[];
+ struct hv_24x7_request requests[1];
} __packed;
struct hv_24x7_result_element {
@@ -66,7 +66,7 @@ struct hv_24x7_result_element {
__be32 lpar_cfg_instance_id;
/* size = @result_element_data_size of cointaining result. */
- __u8 element_data[];
+ __u64 element_data[1];
} __packed;
struct hv_24x7_result {
@@ -87,7 +87,7 @@ struct hv_24x7_result {
/* WARNING: only valid for first result element due to variable sizes
* of result elements */
/* struct hv_24x7_result_element[@num_elements_returned] */
- struct hv_24x7_result_element elements[];
+ struct hv_24x7_result_element elements[1];
} __packed;
struct hv_24x7_data_result_buffer {
@@ -103,7 +103,7 @@ struct hv_24x7_data_result_buffer {
__u8 reserved2[0x8];
/* WARNING: only valid for the first result due to variable sizes of
* results */
- struct hv_24x7_result results[]; /* [@num_results] */
+ struct hv_24x7_result results[1]; /* [@num_results] */
} __packed;
#endif
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 4a9ad871a168..7bfb9b184dd4 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -40,6 +40,7 @@ static const struct of_device_id mpc85xx_common_ids[] __initconst = {
{ .compatible = "fsl,qoriq-pcie-v2.4", },
{ .compatible = "fsl,qoriq-pcie-v2.3", },
{ .compatible = "fsl,qoriq-pcie-v2.2", },
+ { .compatible = "fsl,fman", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 1f309ccb096e..9824d2cf79bd 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -88,6 +88,15 @@ static const struct of_device_id of_device_ids[] = {
.compatible = "simple-bus"
},
{
+ .compatible = "mdio-mux-gpio"
+ },
+ {
+ .compatible = "fsl,fpga-ngpixis"
+ },
+ {
+ .compatible = "fsl,fpga-qixis"
+ },
+ {
.compatible = "fsl,srio",
},
{
@@ -108,6 +117,9 @@ static const struct of_device_id of_device_ids[] = {
{
.compatible = "fsl,qe",
},
+ {
+ .compatible = "fsl,fman",
+ },
/* The following two are for the Freescale hypervisor */
{
.name = "hypervisor",
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 7a180f0308d5..680232d6ba48 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -50,14 +50,14 @@ void p1022rdk_set_pixel_clock(unsigned int pixclock)
/* Map the global utilities registers. */
guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
if (!guts_np) {
- pr_err("p1022rdk: missing global utilties device node\n");
+ pr_err("p1022rdk: missing global utilities device node\n");
return;
}
guts = of_iomap(guts_np, 0);
of_node_put(guts_np);
if (!guts) {
- pr_err("p1022rdk: could not map global utilties device\n");
+ pr_err("p1022rdk: could not map global utilities device\n");
return;
}
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d7c1e69f3070..8631ac5f0e57 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -360,10 +360,10 @@ static void mpc85xx_smp_kexec_down(void *arg)
static void map_and_flush(unsigned long paddr)
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
- unsigned long kaddr = (unsigned long)kmap(page);
+ unsigned long kaddr = (unsigned long)kmap_atomic(page);
flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
- kunmap(page);
+ kunmap_atomic((void *)kaddr);
}
/**
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 391b3f6b54a3..b7f9c408bf24 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -72,11 +72,6 @@ config PPC_SMP_MUXED_IPI
cpu. This will enable the generic code to multiplex the 4
messages on to one ipi.
-config PPC_UDBG_BEAT
- bool "BEAT based debug console"
- depends on PPC_CELLEB
- default n
-
config IPIC
bool
default n
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 76483e3acd60..7264e91190be 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -2,6 +2,7 @@ config PPC64
bool "64-bit kernel"
default n
select HAVE_VIRT_CPU_ACCOUNTING
+ select ZLIB_DEFLATE
help
This option selects whether a 32-bit or a 64-bit kernel
will be built.
@@ -15,7 +16,7 @@ choice
The most common ones are the desktop and server CPUs (601, 603,
604, 740, 750, 74xx) CPUs from Freescale and IBM, with their
embedded 512x/52xx/82xx/83xx/86xx counterparts.
- The other embeeded parts, namely 4xx, 8xx, e200 (55xx) and e500
+ The other embedded parts, namely 4xx, 8xx, e200 (55xx) and e500
(85xx) each form a family of their own that is not compatible
with the others.
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 870b6dbd4d18..2f23133ab3d1 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -33,17 +33,6 @@ config PPC_IBM_CELL_BLADE
select PPC_UDBG_16550
select UDBG_RTAS_CONSOLE
-config PPC_CELLEB
- bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
- depends on PPC64 && PPC_BOOK3S
- select PPC_CELL_NATIVE
- select PPC_OF_PLATFORM_PCI
- select PCI
- select HAS_TXX9_SERIAL
- select PPC_UDBG_BEAT
- select USB_OHCI_BIG_ENDIAN_MMIO
- select USB_EHCI_BIG_ENDIAN_MMIO
-
config PPC_CELL_QPACE
bool "IBM Cell - QPACE"
depends on PPC64 && PPC_BOOK3S
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 2d16884f67b9..34699bddfddd 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -29,18 +29,3 @@ obj-$(CONFIG_AXON_MSI) += axon_msi.o
# qpace setup
obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o
-
-# celleb stuff
-ifeq ($(CONFIG_PPC_CELLEB),y)
-obj-y += celleb_setup.o \
- celleb_pci.o celleb_scc_epci.o \
- celleb_scc_pciex.o \
- celleb_scc_uhc.o \
- spider-pci.o beat.o beat_htab.o \
- beat_hvCall.o beat_interrupt.o \
- beat_iommu.o
-
-obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
-obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
-obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
-endif
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
deleted file mode 100644
index affcf566d460..000000000000
--- a/arch/powerpc/platforms/cell/beat.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Simple routines for Celleb/Beat
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/rtc.h>
-#include <linux/interrupt.h>
-#include <linux/irqreturn.h>
-#include <linux/reboot.h>
-
-#include <asm/hvconsole.h>
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/firmware.h>
-
-#include "beat_wrapper.h"
-#include "beat.h"
-#include "beat_interrupt.h"
-
-static int beat_pm_poweroff_flag;
-
-void beat_restart(char *cmd)
-{
- beat_shutdown_logical_partition(!beat_pm_poweroff_flag);
-}
-
-void beat_power_off(void)
-{
- beat_shutdown_logical_partition(0);
-}
-
-u64 beat_halt_code = 0x1000000000000000UL;
-EXPORT_SYMBOL(beat_halt_code);
-
-void beat_halt(void)
-{
- beat_shutdown_logical_partition(beat_halt_code);
-}
-
-int beat_set_rtc_time(struct rtc_time *rtc_time)
-{
- u64 tim;
- tim = mktime(rtc_time->tm_year+1900,
- rtc_time->tm_mon+1, rtc_time->tm_mday,
- rtc_time->tm_hour, rtc_time->tm_min, rtc_time->tm_sec);
- if (beat_rtc_write(tim))
- return -1;
- return 0;
-}
-
-void beat_get_rtc_time(struct rtc_time *rtc_time)
-{
- u64 tim;
-
- if (beat_rtc_read(&tim))
- tim = 0;
- to_tm(tim, rtc_time);
- rtc_time->tm_year -= 1900;
- rtc_time->tm_mon -= 1;
-}
-
-#define BEAT_NVRAM_SIZE 4096
-
-ssize_t beat_nvram_read(char *buf, size_t count, loff_t *index)
-{
- unsigned int i;
- unsigned long len;
- char *p = buf;
-
- if (*index >= BEAT_NVRAM_SIZE)
- return -ENODEV;
- i = *index;
- if (i + count > BEAT_NVRAM_SIZE)
- count = BEAT_NVRAM_SIZE - i;
-
- for (; count != 0; count -= len) {
- len = count;
- if (len > BEAT_NVRW_CNT)
- len = BEAT_NVRW_CNT;
- if (beat_eeprom_read(i, len, p))
- return -EIO;
-
- p += len;
- i += len;
- }
- *index = i;
- return p - buf;
-}
-
-ssize_t beat_nvram_write(char *buf, size_t count, loff_t *index)
-{
- unsigned int i;
- unsigned long len;
- char *p = buf;
-
- if (*index >= BEAT_NVRAM_SIZE)
- return -ENODEV;
- i = *index;
- if (i + count > BEAT_NVRAM_SIZE)
- count = BEAT_NVRAM_SIZE - i;
-
- for (; count != 0; count -= len) {
- len = count;
- if (len > BEAT_NVRW_CNT)
- len = BEAT_NVRW_CNT;
- if (beat_eeprom_write(i, len, p))
- return -EIO;
-
- p += len;
- i += len;
- }
- *index = i;
- return p - buf;
-}
-
-ssize_t beat_nvram_get_size(void)
-{
- return BEAT_NVRAM_SIZE;
-}
-
-int beat_set_xdabr(unsigned long dabr, unsigned long dabrx)
-{
- if (beat_set_dabr(dabr, dabrx))
- return -1;
- return 0;
-}
-
-int64_t beat_get_term_char(u64 vterm, u64 *len, u64 *t1, u64 *t2)
-{
- u64 db[2];
- s64 ret;
-
- ret = beat_get_characters_from_console(vterm, len, (u8 *)db);
- if (ret == 0) {
- *t1 = db[0];
- *t2 = db[1];
- }
- return ret;
-}
-EXPORT_SYMBOL(beat_get_term_char);
-
-int64_t beat_put_term_char(u64 vterm, u64 len, u64 t1, u64 t2)
-{
- u64 db[2];
-
- db[0] = t1;
- db[1] = t2;
- return beat_put_characters_to_console(vterm, len, (u8 *)db);
-}
-EXPORT_SYMBOL(beat_put_term_char);
-
-void beat_power_save(void)
-{
- beat_pause(0);
-}
-
-#ifdef CONFIG_KEXEC
-void beat_kexec_cpu_down(int crash, int secondary)
-{
- beatic_deinit_IRQ();
-}
-#endif
-
-static irqreturn_t beat_power_event(int virq, void *arg)
-{
- printk(KERN_DEBUG "Beat: power button pressed\n");
- beat_pm_poweroff_flag = 1;
- ctrl_alt_del();
- return IRQ_HANDLED;
-}
-
-static irqreturn_t beat_reset_event(int virq, void *arg)
-{
- printk(KERN_DEBUG "Beat: reset button pressed\n");
- beat_pm_poweroff_flag = 0;
- ctrl_alt_del();
- return IRQ_HANDLED;
-}
-
-static struct beat_event_list {
- const char *typecode;
- irq_handler_t handler;
- unsigned int virq;
-} beat_event_list[] = {
- { "power", beat_power_event, 0 },
- { "reset", beat_reset_event, 0 },
-};
-
-static int __init beat_register_event(void)
-{
- u64 path[4], data[2];
- int rc, i;
- unsigned int virq;
-
- for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) {
- struct beat_event_list *ev = &beat_event_list[i];
-
- if (beat_construct_event_receive_port(data) != 0) {
- printk(KERN_ERR "Beat: "
- "cannot construct event receive port for %s\n",
- ev->typecode);
- return -EINVAL;
- }
-
- virq = irq_create_mapping(NULL, data[0]);
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Beat: failed to get virtual IRQ"
- " for event receive port for %s\n",
- ev->typecode);
- beat_destruct_event_receive_port(data[0]);
- return -EIO;
- }
- ev->virq = virq;
-
- rc = request_irq(virq, ev->handler, 0,
- ev->typecode, NULL);
- if (rc != 0) {
- printk(KERN_ERR "Beat: failed to request virtual IRQ"
- " for event receive port for %s\n",
- ev->typecode);
- beat_destruct_event_receive_port(data[0]);
- return rc;
- }
-
- path[0] = 0x1000000065780000ul; /* 1,ex */
- path[1] = 0x627574746f6e0000ul; /* button */
- path[2] = 0;
- strncpy((char *)&path[2], ev->typecode, 8);
- path[3] = 0;
- data[1] = 0;
-
- beat_create_repository_node(path, data);
- }
- return 0;
-}
-
-static int __init beat_event_init(void)
-{
- if (!firmware_has_feature(FW_FEATURE_BEAT))
- return -EINVAL;
-
- beat_pm_poweroff_flag = 0;
- return beat_register_event();
-}
-
-device_initcall(beat_event_init);
diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h
deleted file mode 100644
index bfcb8e351ae5..000000000000
--- a/arch/powerpc/platforms/cell/beat.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Guest OS Interfaces.
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _CELLEB_BEAT_H
-#define _CELLEB_BEAT_H
-
-int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *);
-int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t);
-int64_t beat_repository_encode(int, const char *, uint64_t[4]);
-void beat_restart(char *);
-void beat_power_off(void);
-void beat_halt(void);
-int beat_set_rtc_time(struct rtc_time *);
-void beat_get_rtc_time(struct rtc_time *);
-ssize_t beat_nvram_get_size(void);
-ssize_t beat_nvram_read(char *, size_t, loff_t *);
-ssize_t beat_nvram_write(char *, size_t, loff_t *);
-int beat_set_xdabr(unsigned long, unsigned long);
-void beat_power_save(void);
-void beat_kexec_cpu_down(int, int);
-
-#endif /* _CELLEB_BEAT_H */
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
deleted file mode 100644
index bee9232fe619..000000000000
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * "Cell Reference Set" HTAB support.
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/pseries/lpar.c:
- * Copyright (C) 2001 Todd Inglett, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG_LOW
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-
-#include <asm/mmu.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/machdep.h>
-#include <asm/udbg.h>
-
-#include "beat_wrapper.h"
-
-#ifdef DEBUG_LOW
-#define DBG_LOW(fmt...) do { udbg_printf(fmt); } while (0)
-#else
-#define DBG_LOW(fmt...) do { } while (0)
-#endif
-
-static DEFINE_RAW_SPINLOCK(beat_htab_lock);
-
-static inline unsigned int beat_read_mask(unsigned hpte_group)
-{
- unsigned long rmask = 0;
- u64 hpte_v[5];
-
- beat_read_htab_entries(0, hpte_group + 0, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x8000;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x4000;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x2000;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x1000;
- beat_read_htab_entries(0, hpte_group + 4, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x0800;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x0400;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x0200;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x0100;
- hpte_group = ~hpte_group & (htab_hash_mask * HPTES_PER_GROUP);
- beat_read_htab_entries(0, hpte_group + 0, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x80;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x40;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x20;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x10;
- beat_read_htab_entries(0, hpte_group + 4, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x08;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x04;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x02;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x01;
- return rmask;
-}
-
-static long beat_lpar_hpte_insert(unsigned long hpte_group,
- unsigned long vpn, unsigned long pa,
- unsigned long rflags, unsigned long vflags,
- int psize, int apsize, int ssize)
-{
- unsigned long lpar_rc;
- u64 hpte_v, hpte_r, slot;
-
- if (vflags & HPTE_V_SECONDARY)
- return -1;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
- "rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, va, pa, rflags, vflags, psize);
-
- hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
- vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
-
- if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~HPTE_R_M;
-
- raw_spin_lock(&beat_htab_lock);
- lpar_rc = beat_read_mask(hpte_group);
- if (lpar_rc == 0) {
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" full\n");
- raw_spin_unlock(&beat_htab_lock);
- return -1;
- }
-
- lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48,
- hpte_v, hpte_r, &slot);
- raw_spin_unlock(&beat_htab_lock);
-
- /*
- * Since we try and ioremap PHBs we don't own, the pte insert
- * will fail. However we must catch the failure in hash_page
- * or we will loop forever, so return -2 in this case.
- */
- if (unlikely(lpar_rc != 0)) {
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" lpar err %lx\n", lpar_rc);
- return -2;
- }
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" -> slot: %lx\n", slot);
-
- /* We have to pass down the secondary bucket bit here as well */
- return (slot ^ hpte_group) & 15;
-}
-
-static long beat_lpar_hpte_remove(unsigned long hpte_group)
-{
- DBG_LOW("hpte_remove(group=%lx)\n", hpte_group);
- return -1;
-}
-
-static unsigned long beat_lpar_hpte_getword0(unsigned long slot)
-{
- unsigned long dword0;
- unsigned long lpar_rc;
- u64 dword[5];
-
- lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword);
-
- dword0 = dword[slot&3];
-
- BUG_ON(lpar_rc != 0);
-
- return dword0;
-}
-
-static void beat_lpar_hptab_clear(void)
-{
- unsigned long size_bytes = 1UL << ppc64_pft_size;
- unsigned long hpte_count = size_bytes >> 4;
- int i;
- u64 dummy0, dummy1;
-
- /* TODO: Use bulk call */
- for (i = 0; i < hpte_count; i++)
- beat_write_htab_entry(0, i, 0, 0, -1UL, -1UL, &dummy0, &dummy1);
-}
-
-/*
- * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
- * the low 3 bits of flags happen to line up. So no transform is needed.
- * We can probably optimize here and assume the high bits of newpp are
- * already zero. For now I am paranoid.
- */
-static long beat_lpar_hpte_updatepp(unsigned long slot,
- unsigned long newpp,
- unsigned long vpn,
- int psize, int apsize,
- int ssize, unsigned long flags)
-{
- unsigned long lpar_rc;
- u64 dummy0, dummy1;
- unsigned long want_v;
-
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
-
- DBG_LOW(" update: "
- "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
- want_v & HPTE_V_AVPN, slot, psize, newpp);
-
- raw_spin_lock(&beat_htab_lock);
- dummy0 = beat_lpar_hpte_getword0(slot);
- if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) {
- DBG_LOW("not found !\n");
- raw_spin_unlock(&beat_htab_lock);
- return -1;
- }
-
- lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0,
- &dummy1);
- raw_spin_unlock(&beat_htab_lock);
- if (lpar_rc != 0 || dummy0 == 0) {
- DBG_LOW("not found !\n");
- return -1;
- }
-
- DBG_LOW("ok %lx %lx\n", dummy0, dummy1);
-
- BUG_ON(lpar_rc != 0);
-
- return 0;
-}
-
-static long beat_lpar_hpte_find(unsigned long vpn, int psize)
-{
- unsigned long hash;
- unsigned long i, j;
- long slot;
- unsigned long want_v, hpte_v;
-
- hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
-
- for (j = 0; j < 2; j++) {
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- for (i = 0; i < HPTES_PER_GROUP; i++) {
- hpte_v = beat_lpar_hpte_getword0(slot);
-
- if (HPTE_V_COMPARE(hpte_v, want_v)
- && (hpte_v & HPTE_V_VALID)
- && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
- /* HPTE matches */
- if (j)
- slot = -slot;
- return slot;
- }
- ++slot;
- }
- hash = ~hash;
- }
-
- return -1;
-}
-
-static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
- unsigned long ea,
- int psize, int ssize)
-{
- unsigned long vpn;
- unsigned long lpar_rc, slot, vsid;
- u64 dummy0, dummy1;
-
- vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
- vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
-
- raw_spin_lock(&beat_htab_lock);
- slot = beat_lpar_hpte_find(vpn, psize);
- BUG_ON(slot == -1);
-
- lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
- &dummy0, &dummy1);
- raw_spin_unlock(&beat_htab_lock);
-
- BUG_ON(lpar_rc != 0);
-}
-
-static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
- int psize, int apsize,
- int ssize, int local)
-{
- unsigned long want_v;
- unsigned long lpar_rc;
- u64 dummy1, dummy2;
- unsigned long flags;
-
- DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
- slot, va, psize, local);
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
-
- raw_spin_lock_irqsave(&beat_htab_lock, flags);
- dummy1 = beat_lpar_hpte_getword0(slot);
-
- if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) {
- DBG_LOW("not found !\n");
- raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
- return;
- }
-
- lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0,
- &dummy1, &dummy2);
- raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
-
- BUG_ON(lpar_rc != 0);
-}
-
-void __init hpte_init_beat(void)
-{
- ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
- ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = beat_lpar_hpte_insert;
- ppc_md.hpte_remove = beat_lpar_hpte_remove;
- ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
-}
-
-static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
- unsigned long vpn, unsigned long pa,
- unsigned long rflags, unsigned long vflags,
- int psize, int apsize, int ssize)
-{
- unsigned long lpar_rc;
- u64 hpte_v, hpte_r, slot;
-
- if (vflags & HPTE_V_SECONDARY)
- return -1;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
- "rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, vpn, pa, rflags, vflags, psize);
-
- hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
- vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
-
- if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~HPTE_R_M;
-
- /* insert into not-volted entry */
- lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
- HPTE_V_BOLTED, 0, &slot);
- /*
- * Since we try and ioremap PHBs we don't own, the pte insert
- * will fail. However we must catch the failure in hash_page
- * or we will loop forever, so return -2 in this case.
- */
- if (unlikely(lpar_rc != 0)) {
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" lpar err %lx\n", lpar_rc);
- return -2;
- }
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" -> slot: %lx\n", slot);
-
- /* We have to pass down the secondary bucket bit here as well */
- return (slot ^ hpte_group) & 15;
-}
-
-/*
- * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
- * the low 3 bits of flags happen to line up. So no transform is needed.
- * We can probably optimize here and assume the high bits of newpp are
- * already zero. For now I am paranoid.
- */
-static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
- unsigned long newpp,
- unsigned long vpn,
- int psize, int apsize,
- int ssize, unsigned long flags)
-{
- unsigned long lpar_rc;
- unsigned long want_v;
- unsigned long pss;
-
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
-
- DBG_LOW(" update: "
- "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
- want_v & HPTE_V_AVPN, slot, psize, newpp);
-
- lpar_rc = beat_update_htab_permission3(0, slot, want_v, pss, 7, newpp);
-
- if (lpar_rc == 0xfffffff7) {
- DBG_LOW("not found !\n");
- return -1;
- }
-
- DBG_LOW("ok\n");
-
- BUG_ON(lpar_rc != 0);
-
- return 0;
-}
-
-static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
- int psize, int apsize,
- int ssize, int local)
-{
- unsigned long want_v;
- unsigned long lpar_rc;
- unsigned long pss;
-
- DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
- slot, vpn, psize, local);
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
-
- lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
-
- /* E_busy can be valid output: page may be already replaced */
- BUG_ON(lpar_rc != 0 && lpar_rc != 0xfffffff7);
-}
-
-static int64_t _beat_lpar_hptab_clear_v3(void)
-{
- return beat_clear_htab3(0);
-}
-
-static void beat_lpar_hptab_clear_v3(void)
-{
- _beat_lpar_hptab_clear_v3();
-}
-
-void __init hpte_init_beat_v3(void)
-{
- if (_beat_lpar_hptab_clear_v3() == 0) {
- ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate_v3;
- ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp_v3;
- ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = beat_lpar_hpte_insert_v3;
- ppc_md.hpte_remove = beat_lpar_hpte_remove;
- ppc_md.hpte_clear_all = beat_lpar_hptab_clear_v3;
- } else {
- ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
- ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = beat_lpar_hpte_insert;
- ppc_md.hpte_remove = beat_lpar_hpte_remove;
- ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
- }
-}
diff --git a/arch/powerpc/platforms/cell/beat_hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S
deleted file mode 100644
index 96c801907126..000000000000
--- a/arch/powerpc/platforms/cell/beat_hvCall.S
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Beat hypervisor call I/F
- *
- * (C) Copyright 2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/pseries/hvCall.S.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <asm/ppc_asm.h>
-
-/* Not implemented on Beat, now */
-#define HCALL_INST_PRECALL
-#define HCALL_INST_POSTCALL
-
- .text
-
-#define HVSC .long 0x44000022
-
-/* Note: takes only 7 input parameters at maximum */
-_GLOBAL(beat_hcall_norets)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- mr r11,r3
- mr r3,r4
- mr r4,r5
- mr r5,r6
- mr r6,r7
- mr r7,r8
- mr r8,r9
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes 8 input parameters at maximum */
-_GLOBAL(beat_hcall_norets8)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- mr r11,r3
- mr r3,r4
- mr r4,r5
- mr r5,r6
- mr r6,r7
- mr r7,r8
- mr r8,r9
- ld r10,STK_PARAM(R10)(r1)
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 1 output parameters at maximum */
-_GLOBAL(beat_hcall1)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 2 output parameters at maximum */
-_GLOBAL(beat_hcall2)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 3 output parameters at maximum */
-_GLOBAL(beat_hcall3)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 4 output parameters at maximum */
-_GLOBAL(beat_hcall4)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 5 output parameters at maximum */
-_GLOBAL(beat_hcall5)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
- std r8, 32(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 6 output parameters at maximum */
-_GLOBAL(beat_hcall6)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
- std r8, 32(r12)
- std r9, 40(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
deleted file mode 100644
index 9e5dfbcc00af..000000000000
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Celleb/Beat Interrupt controller
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/percpu.h>
-#include <linux/types.h>
-
-#include <asm/machdep.h>
-
-#include "beat_interrupt.h"
-#include "beat_wrapper.h"
-
-#define MAX_IRQS NR_IRQS
-static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
-static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
-static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
-
-static struct irq_domain *beatic_host;
-
-/*
- * In this implementation, "virq" == "IRQ plug number",
- * "(irq_hw_number_t)hwirq" == "IRQ outlet number".
- */
-
-/* assumption: locked */
-static inline void beatic_update_irq_mask(unsigned int irq_plug)
-{
- int off;
- unsigned long masks[4];
-
- off = (irq_plug / 256) * 4;
- masks[0] = beatic_irq_mask_enable[off + 0]
- & beatic_irq_mask_ack[off + 0];
- masks[1] = beatic_irq_mask_enable[off + 1]
- & beatic_irq_mask_ack[off + 1];
- masks[2] = beatic_irq_mask_enable[off + 2]
- & beatic_irq_mask_ack[off + 2];
- masks[3] = beatic_irq_mask_enable[off + 3]
- & beatic_irq_mask_ack[off + 3];
- if (beat_set_interrupt_mask(irq_plug&~255UL,
- masks[0], masks[1], masks[2], masks[3]) != 0)
- panic("Failed to set mask IRQ!");
-}
-
-static void beatic_mask_irq(struct irq_data *d)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static void beatic_unmask_irq(struct irq_data *d)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static void beatic_ack_irq(struct irq_data *d)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static void beatic_end_irq(struct irq_data *d)
-{
- s64 err;
- unsigned long flags;
-
- err = beat_downcount_of_interrupt(d->irq);
- if (err != 0) {
- if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */
- panic("Failed to downcount IRQ! Error = %16llx", err);
-
- printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq);
- }
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static struct irq_chip beatic_pic = {
- .name = "CELL-BEAT",
- .irq_unmask = beatic_unmask_irq,
- .irq_mask = beatic_mask_irq,
- .irq_eoi = beatic_end_irq,
-};
-
-/*
- * Dispose binding hardware IRQ number (hw) and Virtuql IRQ number (virq),
- * update flags.
- *
- * Note that the number (virq) is already assigned at upper layer.
- */
-static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq)
-{
- beat_destruct_irq_plug(virq);
-}
-
-/*
- * Create or update binding hardware IRQ number (hw) and Virtuql
- * IRQ number (virq). This is called only once for a given mapping.
- *
- * Note that the number (virq) is already assigned at upper layer.
- */
-static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- int64_t err;
-
- err = beat_construct_and_connect_irq_plug(virq, hw);
- if (err < 0)
- return -EIO;
-
- irq_set_status_flags(virq, IRQ_LEVEL);
- irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq);
- return 0;
-}
-
-/*
- * Translate device-tree interrupt spec to irq_hw_number_t style (ulong),
- * to pass away to irq_create_mapping().
- *
- * Called from irq_create_of_mapping() only.
- * Note: We have only 1 entry to translate.
- */
-static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq,
- unsigned int *out_flags)
-{
- const u64 *intspec2 = (const u64 *)intspec;
-
- *out_hwirq = *intspec2;
- *out_flags |= IRQ_TYPE_LEVEL_LOW;
- return 0;
-}
-
-static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np)
-{
- /* Match all */
- return 1;
-}
-
-static const struct irq_domain_ops beatic_pic_host_ops = {
- .map = beatic_pic_host_map,
- .unmap = beatic_pic_host_unmap,
- .xlate = beatic_pic_host_xlate,
- .match = beatic_pic_host_match,
-};
-
-/*
- * Get an IRQ number
- * Note: returns VIRQ
- */
-static inline unsigned int beatic_get_irq_plug(void)
-{
- int i;
- uint64_t pending[4], ub;
-
- for (i = 0; i < MAX_IRQS; i += 256) {
- beat_detect_pending_interrupts(i, pending);
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[0] & beatic_irq_mask_enable[i/64+0]
- & beatic_irq_mask_ack[i/64+0]));
- if (ub != 64)
- return i + ub + 0;
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[1] & beatic_irq_mask_enable[i/64+1]
- & beatic_irq_mask_ack[i/64+1]));
- if (ub != 64)
- return i + ub + 64;
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[2] & beatic_irq_mask_enable[i/64+2]
- & beatic_irq_mask_ack[i/64+2]));
- if (ub != 64)
- return i + ub + 128;
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[3] & beatic_irq_mask_enable[i/64+3]
- & beatic_irq_mask_ack[i/64+3]));
- if (ub != 64)
- return i + ub + 192;
- }
-
- return NO_IRQ;
-}
-unsigned int beatic_get_irq(void)
-{
- unsigned int ret;
-
- ret = beatic_get_irq_plug();
- if (ret != NO_IRQ)
- beatic_ack_irq(irq_get_irq_data(ret));
- return ret;
-}
-
-/*
- */
-void __init beatic_init_IRQ(void)
-{
- int i;
-
- memset(beatic_irq_mask_enable, 0, sizeof(beatic_irq_mask_enable));
- memset(beatic_irq_mask_ack, 255, sizeof(beatic_irq_mask_ack));
- for (i = 0; i < MAX_IRQS; i += 256)
- beat_set_interrupt_mask(i, 0L, 0L, 0L, 0L);
-
- /* Set out get_irq function */
- ppc_md.get_irq = beatic_get_irq;
-
- /* Allocate an irq host */
- beatic_host = irq_domain_add_nomap(NULL, ~0, &beatic_pic_host_ops, NULL);
- BUG_ON(beatic_host == NULL);
- irq_set_default_host(beatic_host);
-}
-
-void beatic_deinit_IRQ(void)
-{
- int i;
-
- for (i = 1; i < nr_irqs; i++)
- beat_destruct_irq_plug(i);
-}
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
deleted file mode 100644
index 3ce685568935..000000000000
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Support for IOMMU on Celleb platform.
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/pci.h>
-#include <linux/of_platform.h>
-
-#include <asm/machdep.h>
-
-#include "beat_wrapper.h"
-
-#define DMA_FLAGS 0xf800000000000000UL /* r/w permitted, coherency required,
- strongest order */
-
-static int __init find_dma_window(u64 *io_space_id, u64 *ioid,
- u64 *base, u64 *size, u64 *io_page_size)
-{
- struct device_node *dn;
- const unsigned long *dma_window;
-
- for_each_node_by_type(dn, "ioif") {
- dma_window = of_get_property(dn, "toshiba,dma-window", NULL);
- if (dma_window) {
- *io_space_id = (dma_window[0] >> 32) & 0xffffffffUL;
- *ioid = dma_window[0] & 0x7ffUL;
- *base = dma_window[1];
- *size = dma_window[2];
- *io_page_size = 1 << dma_window[3];
- of_node_put(dn);
- return 1;
- }
- }
- return 0;
-}
-
-static unsigned long celleb_dma_direct_offset;
-
-static void __init celleb_init_direct_mapping(void)
-{
- u64 lpar_addr, io_addr;
- u64 io_space_id, ioid, dma_base, dma_size, io_page_size;
-
- if (!find_dma_window(&io_space_id, &ioid, &dma_base, &dma_size,
- &io_page_size)) {
- pr_info("No dma window found !\n");
- return;
- }
-
- for (lpar_addr = 0; lpar_addr < dma_size; lpar_addr += io_page_size) {
- io_addr = lpar_addr + dma_base;
- (void)beat_put_iopte(io_space_id, io_addr, lpar_addr,
- ioid, DMA_FLAGS);
- }
-
- celleb_dma_direct_offset = dma_base;
-}
-
-static void celleb_dma_dev_setup(struct device *dev)
-{
- set_dma_ops(dev, &dma_direct_ops);
- set_dma_offset(dev, celleb_dma_direct_offset);
-}
-
-static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
-{
- celleb_dma_dev_setup(&pdev->dev);
-}
-
-static int celleb_of_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
-
- /* We are only intereted in device addition */
- if (action != BUS_NOTIFY_ADD_DEVICE)
- return 0;
-
- celleb_dma_dev_setup(dev);
-
- return 0;
-}
-
-static struct notifier_block celleb_of_bus_notifier = {
- .notifier_call = celleb_of_bus_notify
-};
-
-static int __init celleb_init_iommu(void)
-{
- celleb_init_direct_mapping();
- ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
- bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
-
- return 0;
-}
-
-machine_arch_initcall(celleb_beat, celleb_init_iommu);
diff --git a/arch/powerpc/platforms/cell/beat_spu_priv1.c b/arch/powerpc/platforms/cell/beat_spu_priv1.c
deleted file mode 100644
index 13f52589d3a9..000000000000
--- a/arch/powerpc/platforms/cell/beat_spu_priv1.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * spu hypervisor abstraction for Beat
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <asm/types.h>
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-
-#include "beat_wrapper.h"
-
-static inline void _int_mask_set(struct spu *spu, int class, u64 mask)
-{
- spu->shadow_int_mask_RW[class] = mask;
- beat_set_irq_mask_for_spe(spu->spe_id, class, mask);
-}
-
-static inline u64 _int_mask_get(struct spu *spu, int class)
-{
- return spu->shadow_int_mask_RW[class];
-}
-
-static void int_mask_set(struct spu *spu, int class, u64 mask)
-{
- _int_mask_set(spu, class, mask);
-}
-
-static u64 int_mask_get(struct spu *spu, int class)
-{
- return _int_mask_get(spu, class);
-}
-
-static void int_mask_and(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
- old_mask = _int_mask_get(spu, class);
- _int_mask_set(spu, class, old_mask & mask);
-}
-
-static void int_mask_or(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
- old_mask = _int_mask_get(spu, class);
- _int_mask_set(spu, class, old_mask | mask);
-}
-
-static void int_stat_clear(struct spu *spu, int class, u64 stat)
-{
- beat_clear_interrupt_status_of_spe(spu->spe_id, class, stat);
-}
-
-static u64 int_stat_get(struct spu *spu, int class)
-{
- u64 int_stat;
- beat_get_interrupt_status_of_spe(spu->spe_id, class, &int_stat);
- return int_stat;
-}
-
-static void cpu_affinity_set(struct spu *spu, int cpu)
-{
- return;
-}
-
-static u64 mfc_dar_get(struct spu *spu)
-{
- u64 dar;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_dar_RW), &dar);
- return dar;
-}
-
-static u64 mfc_dsisr_get(struct spu *spu)
-{
- u64 dsisr;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_dsisr_RW), &dsisr);
- return dsisr;
-}
-
-static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_dsisr_RW), dsisr);
-}
-
-static void mfc_sdr_setup(struct spu *spu)
-{
- return;
-}
-
-static void mfc_sr1_set(struct spu *spu, u64 sr1)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_sr1_RW), sr1);
-}
-
-static u64 mfc_sr1_get(struct spu *spu)
-{
- u64 sr1;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_sr1_RW), &sr1);
- return sr1;
-}
-
-static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_tclass_id_RW), tclass_id);
-}
-
-static u64 mfc_tclass_id_get(struct spu *spu)
-{
- u64 tclass_id;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_tclass_id_RW), &tclass_id);
- return tclass_id;
-}
-
-static void tlb_invalidate(struct spu *spu)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, tlb_invalidate_entry_W), 0ul);
-}
-
-static void resource_allocation_groupID_set(struct spu *spu, u64 id)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_groupID_RW),
- id);
-}
-
-static u64 resource_allocation_groupID_get(struct spu *spu)
-{
- u64 id;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_groupID_RW),
- &id);
- return id;
-}
-
-static void resource_allocation_enable_set(struct spu *spu, u64 enable)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_enable_RW),
- enable);
-}
-
-static u64 resource_allocation_enable_get(struct spu *spu)
-{
- u64 enable;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_enable_RW),
- &enable);
- return enable;
-}
-
-const struct spu_priv1_ops spu_priv1_beat_ops = {
- .int_mask_and = int_mask_and,
- .int_mask_or = int_mask_or,
- .int_mask_set = int_mask_set,
- .int_mask_get = int_mask_get,
- .int_stat_clear = int_stat_clear,
- .int_stat_get = int_stat_get,
- .cpu_affinity_set = cpu_affinity_set,
- .mfc_dar_get = mfc_dar_get,
- .mfc_dsisr_get = mfc_dsisr_get,
- .mfc_dsisr_set = mfc_dsisr_set,
- .mfc_sdr_setup = mfc_sdr_setup,
- .mfc_sr1_set = mfc_sr1_set,
- .mfc_sr1_get = mfc_sr1_get,
- .mfc_tclass_id_set = mfc_tclass_id_set,
- .mfc_tclass_id_get = mfc_tclass_id_get,
- .tlb_invalidate = tlb_invalidate,
- .resource_allocation_groupID_set = resource_allocation_groupID_set,
- .resource_allocation_groupID_get = resource_allocation_groupID_get,
- .resource_allocation_enable_set = resource_allocation_enable_set,
- .resource_allocation_enable_get = resource_allocation_enable_get,
-};
diff --git a/arch/powerpc/platforms/cell/beat_syscall.h b/arch/powerpc/platforms/cell/beat_syscall.h
deleted file mode 100644
index 8580dc7e1798..000000000000
--- a/arch/powerpc/platforms/cell/beat_syscall.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Beat hypervisor call numbers
- *
- * (C) Copyright 2004-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef BEAT_BEAT_syscall_H
-#define BEAT_BEAT_syscall_H
-
-#ifdef __ASSEMBLY__
-#define __BEAT_ADD_VENDOR_ID(__x, __v) ((__v)<<60|(__x))
-#else
-#define __BEAT_ADD_VENDOR_ID(__x, __v) ((u64)(__v)<<60|(__x))
-#endif
-#define HV_allocate_memory __BEAT_ADD_VENDOR_ID(0, 0)
-#define HV_construct_virtual_address_space __BEAT_ADD_VENDOR_ID(2, 0)
-#define HV_destruct_virtual_address_space __BEAT_ADD_VENDOR_ID(10, 0)
-#define HV_get_virtual_address_space_id_of_ppe __BEAT_ADD_VENDOR_ID(4, 0)
-#define HV_query_logical_partition_address_region_info \
- __BEAT_ADD_VENDOR_ID(6, 0)
-#define HV_release_memory __BEAT_ADD_VENDOR_ID(13, 0)
-#define HV_select_virtual_address_space __BEAT_ADD_VENDOR_ID(7, 0)
-#define HV_load_range_registers __BEAT_ADD_VENDOR_ID(68, 0)
-#define HV_set_ppe_l2cache_rmt_entry __BEAT_ADD_VENDOR_ID(70, 0)
-#define HV_set_ppe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(71, 0)
-#define HV_set_spe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(72, 0)
-#define HV_get_io_address_translation_fault_info __BEAT_ADD_VENDOR_ID(14, 0)
-#define HV_get_iopte __BEAT_ADD_VENDOR_ID(16, 0)
-#define HV_preload_iopt_cache __BEAT_ADD_VENDOR_ID(17, 0)
-#define HV_put_iopte __BEAT_ADD_VENDOR_ID(15, 0)
-#define HV_connect_event_ports __BEAT_ADD_VENDOR_ID(21, 0)
-#define HV_construct_event_receive_port __BEAT_ADD_VENDOR_ID(18, 0)
-#define HV_destruct_event_receive_port __BEAT_ADD_VENDOR_ID(19, 0)
-#define HV_destruct_event_send_port __BEAT_ADD_VENDOR_ID(22, 0)
-#define HV_get_state_of_event_send_port __BEAT_ADD_VENDOR_ID(25, 0)
-#define HV_request_to_connect_event_ports __BEAT_ADD_VENDOR_ID(20, 0)
-#define HV_send_event_externally __BEAT_ADD_VENDOR_ID(23, 0)
-#define HV_send_event_locally __BEAT_ADD_VENDOR_ID(24, 0)
-#define HV_construct_and_connect_irq_plug __BEAT_ADD_VENDOR_ID(28, 0)
-#define HV_destruct_irq_plug __BEAT_ADD_VENDOR_ID(29, 0)
-#define HV_detect_pending_interrupts __BEAT_ADD_VENDOR_ID(26, 0)
-#define HV_end_of_interrupt __BEAT_ADD_VENDOR_ID(27, 0)
-#define HV_assign_control_signal_notification_port __BEAT_ADD_VENDOR_ID(45, 0)
-#define HV_end_of_control_signal_processing __BEAT_ADD_VENDOR_ID(48, 0)
-#define HV_get_control_signal __BEAT_ADD_VENDOR_ID(46, 0)
-#define HV_set_irq_mask_for_spe __BEAT_ADD_VENDOR_ID(61, 0)
-#define HV_shutdown_logical_partition __BEAT_ADD_VENDOR_ID(44, 0)
-#define HV_connect_message_ports __BEAT_ADD_VENDOR_ID(35, 0)
-#define HV_destruct_message_port __BEAT_ADD_VENDOR_ID(36, 0)
-#define HV_receive_message __BEAT_ADD_VENDOR_ID(37, 0)
-#define HV_get_message_port_info __BEAT_ADD_VENDOR_ID(34, 0)
-#define HV_request_to_connect_message_ports __BEAT_ADD_VENDOR_ID(33, 0)
-#define HV_send_message __BEAT_ADD_VENDOR_ID(32, 0)
-#define HV_get_logical_ppe_id __BEAT_ADD_VENDOR_ID(69, 0)
-#define HV_pause __BEAT_ADD_VENDOR_ID(9, 0)
-#define HV_destruct_shared_memory_handle __BEAT_ADD_VENDOR_ID(51, 0)
-#define HV_get_shared_memory_info __BEAT_ADD_VENDOR_ID(52, 0)
-#define HV_permit_sharing_memory __BEAT_ADD_VENDOR_ID(50, 0)
-#define HV_request_to_attach_shared_memory __BEAT_ADD_VENDOR_ID(49, 0)
-#define HV_enable_logical_spe_execution __BEAT_ADD_VENDOR_ID(55, 0)
-#define HV_construct_logical_spe __BEAT_ADD_VENDOR_ID(53, 0)
-#define HV_disable_logical_spe_execution __BEAT_ADD_VENDOR_ID(56, 0)
-#define HV_destruct_logical_spe __BEAT_ADD_VENDOR_ID(54, 0)
-#define HV_sense_spe_execution_status __BEAT_ADD_VENDOR_ID(58, 0)
-#define HV_insert_htab_entry __BEAT_ADD_VENDOR_ID(101, 0)
-#define HV_read_htab_entries __BEAT_ADD_VENDOR_ID(95, 0)
-#define HV_write_htab_entry __BEAT_ADD_VENDOR_ID(94, 0)
-#define HV_assign_io_address_translation_fault_port \
- __BEAT_ADD_VENDOR_ID(100, 0)
-#define HV_set_interrupt_mask __BEAT_ADD_VENDOR_ID(73, 0)
-#define HV_get_logical_partition_id __BEAT_ADD_VENDOR_ID(74, 0)
-#define HV_create_repository_node2 __BEAT_ADD_VENDOR_ID(90, 0)
-#define HV_create_repository_node __BEAT_ADD_VENDOR_ID(90, 0) /* alias */
-#define HV_get_repository_node_value2 __BEAT_ADD_VENDOR_ID(91, 0)
-#define HV_get_repository_node_value __BEAT_ADD_VENDOR_ID(91, 0) /* alias */
-#define HV_modify_repository_node_value2 __BEAT_ADD_VENDOR_ID(92, 0)
-#define HV_modify_repository_node_value __BEAT_ADD_VENDOR_ID(92, 0) /* alias */
-#define HV_remove_repository_node2 __BEAT_ADD_VENDOR_ID(93, 0)
-#define HV_remove_repository_node __BEAT_ADD_VENDOR_ID(93, 0) /* alias */
-#define HV_cancel_shared_memory __BEAT_ADD_VENDOR_ID(104, 0)
-#define HV_clear_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(206, 0)
-#define HV_construct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(80, 0)
-#define HV_destruct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(81, 0)
-#define HV_disconnect_ipspc_service __BEAT_ADD_VENDOR_ID(88, 0)
-#define HV_execute_ipspc_command __BEAT_ADD_VENDOR_ID(86, 0)
-#define HV_get_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(205, 0)
-#define HV_get_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(208, 0)
-#define HV_permit_use_of_ipspc_service __BEAT_ADD_VENDOR_ID(85, 0)
-#define HV_reinitialize_logical_spe __BEAT_ADD_VENDOR_ID(82, 0)
-#define HV_request_ipspc_service __BEAT_ADD_VENDOR_ID(84, 0)
-#define HV_stop_ipspc_command __BEAT_ADD_VENDOR_ID(87, 0)
-#define HV_set_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(204, 0)
-#define HV_get_status_of_ipspc_service __BEAT_ADD_VENDOR_ID(203, 0)
-#define HV_put_characters_to_console __BEAT_ADD_VENDOR_ID(0x101, 1)
-#define HV_get_characters_from_console __BEAT_ADD_VENDOR_ID(0x102, 1)
-#define HV_get_base_clock __BEAT_ADD_VENDOR_ID(0x111, 1)
-#define HV_set_base_clock __BEAT_ADD_VENDOR_ID(0x112, 1)
-#define HV_get_frame_cycle __BEAT_ADD_VENDOR_ID(0x114, 1)
-#define HV_disable_console __BEAT_ADD_VENDOR_ID(0x115, 1)
-#define HV_disable_all_console __BEAT_ADD_VENDOR_ID(0x116, 1)
-#define HV_oneshot_timer __BEAT_ADD_VENDOR_ID(0x117, 1)
-#define HV_set_dabr __BEAT_ADD_VENDOR_ID(0x118, 1)
-#define HV_get_dabr __BEAT_ADD_VENDOR_ID(0x119, 1)
-#define HV_start_hv_stats __BEAT_ADD_VENDOR_ID(0x21c, 1)
-#define HV_stop_hv_stats __BEAT_ADD_VENDOR_ID(0x21d, 1)
-#define HV_get_hv_stats __BEAT_ADD_VENDOR_ID(0x21e, 1)
-#define HV_get_hv_error_stats __BEAT_ADD_VENDOR_ID(0x221, 1)
-#define HV_get_stats __BEAT_ADD_VENDOR_ID(0x224, 1)
-#define HV_get_heap_stats __BEAT_ADD_VENDOR_ID(0x225, 1)
-#define HV_get_memory_stats __BEAT_ADD_VENDOR_ID(0x227, 1)
-#define HV_get_memory_detail __BEAT_ADD_VENDOR_ID(0x228, 1)
-#define HV_set_priority_of_irq_outlet __BEAT_ADD_VENDOR_ID(0x122, 1)
-#define HV_get_physical_spe_by_reservation_id __BEAT_ADD_VENDOR_ID(0x128, 1)
-#define HV_get_spe_context __BEAT_ADD_VENDOR_ID(0x129, 1)
-#define HV_set_spe_context __BEAT_ADD_VENDOR_ID(0x12a, 1)
-#define HV_downcount_of_interrupt __BEAT_ADD_VENDOR_ID(0x12e, 1)
-#define HV_peek_spe_context __BEAT_ADD_VENDOR_ID(0x12f, 1)
-#define HV_read_bpa_register __BEAT_ADD_VENDOR_ID(0x131, 1)
-#define HV_write_bpa_register __BEAT_ADD_VENDOR_ID(0x132, 1)
-#define HV_map_context_table_of_spe __BEAT_ADD_VENDOR_ID(0x137, 1)
-#define HV_get_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x138, 1)
-#define HV_set_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x139, 1)
-#define HV_init_pm __BEAT_ADD_VENDOR_ID(0x150, 1)
-#define HV_set_pm_signal __BEAT_ADD_VENDOR_ID(0x151, 1)
-#define HV_get_pm_signal __BEAT_ADD_VENDOR_ID(0x152, 1)
-#define HV_set_pm_config __BEAT_ADD_VENDOR_ID(0x153, 1)
-#define HV_get_pm_config __BEAT_ADD_VENDOR_ID(0x154, 1)
-#define HV_get_inner_trace_data __BEAT_ADD_VENDOR_ID(0x155, 1)
-#define HV_set_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x156, 1)
-#define HV_get_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x157, 1)
-#define HV_set_pm_interrupt __BEAT_ADD_VENDOR_ID(0x158, 1)
-#define HV_get_pm_interrupt __BEAT_ADD_VENDOR_ID(0x159, 1)
-#define HV_kick_pm __BEAT_ADD_VENDOR_ID(0x160, 1)
-#define HV_construct_pm_context __BEAT_ADD_VENDOR_ID(0x164, 1)
-#define HV_destruct_pm_context __BEAT_ADD_VENDOR_ID(0x165, 1)
-#define HV_be_slow __BEAT_ADD_VENDOR_ID(0x170, 1)
-#define HV_assign_ipspc_server_connection_status_notification_port \
- __BEAT_ADD_VENDOR_ID(0x173, 1)
-#define HV_get_raid_of_physical_spe __BEAT_ADD_VENDOR_ID(0x174, 1)
-#define HV_set_physical_spe_to_rag __BEAT_ADD_VENDOR_ID(0x175, 1)
-#define HV_release_physical_spe_from_rag __BEAT_ADD_VENDOR_ID(0x176, 1)
-#define HV_rtc_read __BEAT_ADD_VENDOR_ID(0x190, 1)
-#define HV_rtc_write __BEAT_ADD_VENDOR_ID(0x191, 1)
-#define HV_eeprom_read __BEAT_ADD_VENDOR_ID(0x192, 1)
-#define HV_eeprom_write __BEAT_ADD_VENDOR_ID(0x193, 1)
-#define HV_insert_htab_entry3 __BEAT_ADD_VENDOR_ID(0x104, 1)
-#define HV_invalidate_htab_entry3 __BEAT_ADD_VENDOR_ID(0x105, 1)
-#define HV_update_htab_permission3 __BEAT_ADD_VENDOR_ID(0x106, 1)
-#define HV_clear_htab3 __BEAT_ADD_VENDOR_ID(0x107, 1)
-#endif
diff --git a/arch/powerpc/platforms/cell/beat_udbg.c b/arch/powerpc/platforms/cell/beat_udbg.c
deleted file mode 100644
index 350735bc8888..000000000000
--- a/arch/powerpc/platforms/cell/beat_udbg.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * udbg function for Beat
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/console.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-
-#include "beat.h"
-
-#define celleb_vtermno 0
-
-static void udbg_putc_beat(char c)
-{
- unsigned long rc;
-
- if (c == '\n')
- udbg_putc_beat('\r');
-
- rc = beat_put_term_char(celleb_vtermno, 1, (uint64_t)c << 56, 0);
-}
-
-/* Buffered chars getc */
-static u64 inbuflen;
-static u64 inbuf[2]; /* must be 2 u64s */
-
-static int udbg_getc_poll_beat(void)
-{
- /* The interface is tricky because it may return up to 16 chars.
- * We save them statically for future calls to udbg_getc().
- */
- char ch, *buf = (char *)inbuf;
- int i;
- long rc;
- if (inbuflen == 0) {
- /* get some more chars. */
- inbuflen = 0;
- rc = beat_get_term_char(celleb_vtermno, &inbuflen,
- inbuf+0, inbuf+1);
- if (rc != 0)
- inbuflen = 0; /* otherwise inbuflen is garbage */
- }
- if (inbuflen <= 0 || inbuflen > 16) {
- /* Catch error case as well as other oddities (corruption) */
- inbuflen = 0;
- return -1;
- }
- ch = buf[0];
- for (i = 1; i < inbuflen; i++) /* shuffle them down. */
- buf[i-1] = buf[i];
- inbuflen--;
- return ch;
-}
-
-static int udbg_getc_beat(void)
-{
- int ch;
- for (;;) {
- ch = udbg_getc_poll_beat();
- if (ch == -1) {
- /* This shouldn't be needed...but... */
- volatile unsigned long delay;
- for (delay = 0; delay < 2000000; delay++)
- ;
- } else {
- return ch;
- }
- }
-}
-
-/* call this from early_init() for a working debug console on
- * vterm capable LPAR machines
- */
-void __init udbg_init_debug_beat(void)
-{
- udbg_putc = udbg_putc_beat;
- udbg_getc = udbg_getc_beat;
- udbg_getc_poll = udbg_getc_poll_beat;
-}
diff --git a/arch/powerpc/platforms/cell/beat_wrapper.h b/arch/powerpc/platforms/cell/beat_wrapper.h
deleted file mode 100644
index c1109969f242..000000000000
--- a/arch/powerpc/platforms/cell/beat_wrapper.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Beat hypervisor call I/F
- *
- * (C) Copyright 2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/pseries/plpar_wrapper.h.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#ifndef BEAT_HCALL
-#include <linux/string.h>
-#include "beat_syscall.h"
-
-/* defined in hvCall.S */
-extern s64 beat_hcall_norets(u64 opcode, ...);
-extern s64 beat_hcall_norets8(u64 opcode, u64 arg1, u64 arg2, u64 arg3,
- u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8);
-extern s64 beat_hcall1(u64 opcode, u64 retbuf[1], ...);
-extern s64 beat_hcall2(u64 opcode, u64 retbuf[2], ...);
-extern s64 beat_hcall3(u64 opcode, u64 retbuf[3], ...);
-extern s64 beat_hcall4(u64 opcode, u64 retbuf[4], ...);
-extern s64 beat_hcall5(u64 opcode, u64 retbuf[5], ...);
-extern s64 beat_hcall6(u64 opcode, u64 retbuf[6], ...);
-
-static inline s64 beat_downcount_of_interrupt(u64 plug_id)
-{
- return beat_hcall_norets(HV_downcount_of_interrupt, plug_id);
-}
-
-static inline s64 beat_set_interrupt_mask(u64 index,
- u64 val0, u64 val1, u64 val2, u64 val3)
-{
- return beat_hcall_norets(HV_set_interrupt_mask, index,
- val0, val1, val2, val3);
-}
-
-static inline s64 beat_destruct_irq_plug(u64 plug_id)
-{
- return beat_hcall_norets(HV_destruct_irq_plug, plug_id);
-}
-
-static inline s64 beat_construct_and_connect_irq_plug(u64 plug_id,
- u64 outlet_id)
-{
- return beat_hcall_norets(HV_construct_and_connect_irq_plug, plug_id,
- outlet_id);
-}
-
-static inline s64 beat_detect_pending_interrupts(u64 index, u64 *retbuf)
-{
- return beat_hcall4(HV_detect_pending_interrupts, retbuf, index);
-}
-
-static inline s64 beat_pause(u64 style)
-{
- return beat_hcall_norets(HV_pause, style);
-}
-
-static inline s64 beat_read_htab_entries(u64 htab_id, u64 index, u64 *retbuf)
-{
- return beat_hcall5(HV_read_htab_entries, retbuf, htab_id, index);
-}
-
-static inline s64 beat_insert_htab_entry(u64 htab_id, u64 group,
- u64 bitmask, u64 hpte_v, u64 hpte_r, u64 *slot)
-{
- u64 dummy[3];
- s64 ret;
-
- ret = beat_hcall3(HV_insert_htab_entry, dummy, htab_id, group,
- bitmask, hpte_v, hpte_r);
- *slot = dummy[0];
- return ret;
-}
-
-static inline s64 beat_write_htab_entry(u64 htab_id, u64 slot,
- u64 hpte_v, u64 hpte_r, u64 mask_v, u64 mask_r,
- u64 *ret_v, u64 *ret_r)
-{
- u64 dummy[2];
- s64 ret;
-
- ret = beat_hcall2(HV_write_htab_entry, dummy, htab_id, slot,
- hpte_v, hpte_r, mask_v, mask_r);
- *ret_v = dummy[0];
- *ret_r = dummy[1];
- return ret;
-}
-
-static inline s64 beat_insert_htab_entry3(u64 htab_id, u64 group,
- u64 hpte_v, u64 hpte_r, u64 mask_v, u64 value_v, u64 *slot)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_insert_htab_entry3, dummy, htab_id, group,
- hpte_v, hpte_r, mask_v, value_v);
- *slot = dummy[0];
- return ret;
-}
-
-static inline s64 beat_invalidate_htab_entry3(u64 htab_id, u64 group,
- u64 va, u64 pss)
-{
- return beat_hcall_norets(HV_invalidate_htab_entry3,
- htab_id, group, va, pss);
-}
-
-static inline s64 beat_update_htab_permission3(u64 htab_id, u64 group,
- u64 va, u64 pss, u64 ptel_mask, u64 ptel_value)
-{
- return beat_hcall_norets(HV_update_htab_permission3,
- htab_id, group, va, pss, ptel_mask, ptel_value);
-}
-
-static inline s64 beat_clear_htab3(u64 htab_id)
-{
- return beat_hcall_norets(HV_clear_htab3, htab_id);
-}
-
-static inline void beat_shutdown_logical_partition(u64 code)
-{
- (void)beat_hcall_norets(HV_shutdown_logical_partition, code);
-}
-
-static inline s64 beat_rtc_write(u64 time_from_epoch)
-{
- return beat_hcall_norets(HV_rtc_write, time_from_epoch);
-}
-
-static inline s64 beat_rtc_read(u64 *time_from_epoch)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_rtc_read, dummy);
- *time_from_epoch = dummy[0];
- return ret;
-}
-
-#define BEAT_NVRW_CNT (sizeof(u64) * 6)
-
-static inline s64 beat_eeprom_write(u64 index, u64 length, u8 *buffer)
-{
- u64 b[6];
-
- if (length > BEAT_NVRW_CNT)
- return -1;
- memcpy(b, buffer, sizeof(b));
- return beat_hcall_norets8(HV_eeprom_write, index, length,
- b[0], b[1], b[2], b[3], b[4], b[5]);
-}
-
-static inline s64 beat_eeprom_read(u64 index, u64 length, u8 *buffer)
-{
- u64 b[6];
- s64 ret;
-
- if (length > BEAT_NVRW_CNT)
- return -1;
- ret = beat_hcall6(HV_eeprom_read, b, index, length);
- memcpy(buffer, b, length);
- return ret;
-}
-
-static inline s64 beat_set_dabr(u64 value, u64 style)
-{
- return beat_hcall_norets(HV_set_dabr, value, style);
-}
-
-static inline s64 beat_get_characters_from_console(u64 termno, u64 *len,
- u8 *buffer)
-{
- u64 dummy[3];
- s64 ret;
-
- ret = beat_hcall3(HV_get_characters_from_console, dummy, termno, len);
- *len = dummy[0];
- memcpy(buffer, dummy + 1, *len);
- return ret;
-}
-
-static inline s64 beat_put_characters_to_console(u64 termno, u64 len,
- u8 *buffer)
-{
- u64 b[2];
-
- memcpy(b, buffer, len);
- return beat_hcall_norets(HV_put_characters_to_console, termno, len,
- b[0], b[1]);
-}
-
-static inline s64 beat_get_spe_privileged_state_1_registers(
- u64 id, u64 offsetof, u64 *value)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_get_spe_privileged_state_1_registers, dummy, id,
- offsetof);
- *value = dummy[0];
- return ret;
-}
-
-static inline s64 beat_set_irq_mask_for_spe(u64 id, u64 class, u64 mask)
-{
- return beat_hcall_norets(HV_set_irq_mask_for_spe, id, class, mask);
-}
-
-static inline s64 beat_clear_interrupt_status_of_spe(u64 id, u64 class,
- u64 mask)
-{
- return beat_hcall_norets(HV_clear_interrupt_status_of_spe,
- id, class, mask);
-}
-
-static inline s64 beat_set_spe_privileged_state_1_registers(
- u64 id, u64 offsetof, u64 value)
-{
- return beat_hcall_norets(HV_set_spe_privileged_state_1_registers,
- id, offsetof, value);
-}
-
-static inline s64 beat_get_interrupt_status_of_spe(u64 id, u64 class, u64 *val)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_get_interrupt_status_of_spe, dummy, id, class);
- *val = dummy[0];
- return ret;
-}
-
-static inline s64 beat_put_iopte(u64 ioas_id, u64 io_addr, u64 real_addr,
- u64 ioid, u64 flags)
-{
- return beat_hcall_norets(HV_put_iopte, ioas_id, io_addr, real_addr,
- ioid, flags);
-}
-
-static inline s64 beat_construct_event_receive_port(u64 *port)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_construct_event_receive_port, dummy);
- *port = dummy[0];
- return ret;
-}
-
-static inline s64 beat_destruct_event_receive_port(u64 port)
-{
- s64 ret;
-
- ret = beat_hcall_norets(HV_destruct_event_receive_port, port);
- return ret;
-}
-
-static inline s64 beat_create_repository_node(u64 path[4], u64 data[2])
-{
- s64 ret;
-
- ret = beat_hcall_norets(HV_create_repository_node2,
- path[0], path[1], path[2], path[3], data[0], data[1]);
- return ret;
-}
-
-static inline s64 beat_get_repository_node_value(u64 lpid, u64 path[4],
- u64 data[2])
-{
- s64 ret;
-
- ret = beat_hcall2(HV_get_repository_node_value2, data,
- lpid, path[0], path[1], path[2], path[3]);
- return ret;
-}
-
-#endif
diff --git a/arch/powerpc/platforms/cell/cell.h b/arch/powerpc/platforms/cell/cell.h
new file mode 100644
index 000000000000..ef143dfee068
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cell.h
@@ -0,0 +1,24 @@
+/*
+ * Cell Platform common data structures
+ *
+ * Copyright 2015, Daniel Axtens, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CELL_H
+#define CELL_H
+
+#include <asm/pci-bridge.h>
+
+extern struct pci_controller_ops cell_pci_controller_ops;
+
+#endif
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
deleted file mode 100644
index 3ce70ded2d6a..000000000000
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- * Support for PCI on Celleb platform.
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/kernel/rtas_pci.c:
- * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
- * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/memblock.h>
-#include <linux/pci_regs.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-#include "celleb_pci.h"
-
-#define MAX_PCI_DEVICES 32
-#define MAX_PCI_FUNCTIONS 8
-#define MAX_PCI_BASE_ADDRS 3 /* use 64 bit address */
-
-/* definition for fake pci configuration area for GbE, .... ,and etc. */
-
-struct celleb_pci_resource {
- struct resource r[MAX_PCI_BASE_ADDRS];
-};
-
-struct celleb_pci_private {
- unsigned char *fake_config[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
- struct celleb_pci_resource *res[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
-};
-
-static inline u8 celleb_fake_config_readb(void *addr)
-{
- u8 *p = addr;
- return *p;
-}
-
-static inline u16 celleb_fake_config_readw(void *addr)
-{
- __le16 *p = addr;
- return le16_to_cpu(*p);
-}
-
-static inline u32 celleb_fake_config_readl(void *addr)
-{
- __le32 *p = addr;
- return le32_to_cpu(*p);
-}
-
-static inline void celleb_fake_config_writeb(u32 val, void *addr)
-{
- u8 *p = addr;
- *p = val;
-}
-
-static inline void celleb_fake_config_writew(u32 val, void *addr)
-{
- __le16 val16;
- __le16 *p = addr;
- val16 = cpu_to_le16(val);
- *p = val16;
-}
-
-static inline void celleb_fake_config_writel(u32 val, void *addr)
-{
- __le32 val32;
- __le32 *p = addr;
- val32 = cpu_to_le32(val);
- *p = val32;
-}
-
-static unsigned char *get_fake_config_start(struct pci_controller *hose,
- int devno, int fn)
-{
- struct celleb_pci_private *private = hose->private_data;
-
- if (private == NULL)
- return NULL;
-
- return private->fake_config[devno][fn];
-}
-
-static struct celleb_pci_resource *get_resource_start(
- struct pci_controller *hose,
- int devno, int fn)
-{
- struct celleb_pci_private *private = hose->private_data;
-
- if (private == NULL)
- return NULL;
-
- return private->res[devno][fn];
-}
-
-
-static void celleb_config_read_fake(unsigned char *config, int where,
- int size, u32 *val)
-{
- char *p = config + where;
-
- switch (size) {
- case 1:
- *val = celleb_fake_config_readb(p);
- break;
- case 2:
- *val = celleb_fake_config_readw(p);
- break;
- case 4:
- *val = celleb_fake_config_readl(p);
- break;
- }
-}
-
-static void celleb_config_write_fake(unsigned char *config, int where,
- int size, u32 val)
-{
- char *p = config + where;
-
- switch (size) {
- case 1:
- celleb_fake_config_writeb(val, p);
- break;
- case 2:
- celleb_fake_config_writew(val, p);
- break;
- case 4:
- celleb_fake_config_writel(val, p);
- break;
- }
-}
-
-static int celleb_fake_pci_read_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val)
-{
- char *config;
- struct pci_controller *hose = pci_bus_to_host(bus);
- unsigned int devno = devfn >> 3;
- unsigned int fn = devfn & 0x7;
-
- /* allignment check */
- BUG_ON(where % size);
-
- pr_debug(" fake read: bus=0x%x, ", bus->number);
- config = get_fake_config_start(hose, devno, fn);
-
- pr_debug("devno=0x%x, where=0x%x, size=0x%x, ", devno, where, size);
- if (!config) {
- pr_debug("failed\n");
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- celleb_config_read_fake(config, where, size, val);
- pr_debug("val=0x%x\n", *val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-static int celleb_fake_pci_write_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
-{
- char *config;
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct celleb_pci_resource *res;
- unsigned int devno = devfn >> 3;
- unsigned int fn = devfn & 0x7;
-
- /* allignment check */
- BUG_ON(where % size);
-
- config = get_fake_config_start(hose, devno, fn);
-
- if (!config)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (val == ~0) {
- int i = (where - PCI_BASE_ADDRESS_0) >> 3;
-
- switch (where) {
- case PCI_BASE_ADDRESS_0:
- case PCI_BASE_ADDRESS_2:
- if (size != 4)
- return PCIBIOS_DEVICE_NOT_FOUND;
- res = get_resource_start(hose, devno, fn);
- if (!res)
- return PCIBIOS_DEVICE_NOT_FOUND;
- celleb_config_write_fake(config, where, size,
- (res->r[i].end - res->r[i].start));
- return PCIBIOS_SUCCESSFUL;
- case PCI_BASE_ADDRESS_1:
- case PCI_BASE_ADDRESS_3:
- case PCI_BASE_ADDRESS_4:
- case PCI_BASE_ADDRESS_5:
- break;
- default:
- break;
- }
- }
-
- celleb_config_write_fake(config, where, size, val);
- pr_debug(" fake write: where=%x, size=%d, val=%x\n",
- where, size, val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops celleb_fake_pci_ops = {
- .read = celleb_fake_pci_read_config,
- .write = celleb_fake_pci_write_config,
-};
-
-static inline void celleb_setup_pci_base_addrs(struct pci_controller *hose,
- unsigned int devno, unsigned int fn,
- unsigned int num_base_addr)
-{
- u32 val;
- unsigned char *config;
- struct celleb_pci_resource *res;
-
- config = get_fake_config_start(hose, devno, fn);
- res = get_resource_start(hose, devno, fn);
-
- if (!config || !res)
- return;
-
- switch (num_base_addr) {
- case 3:
- val = (res->r[2].start & 0xfffffff0)
- | PCI_BASE_ADDRESS_MEM_TYPE_64;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_4, 4, val);
- val = res->r[2].start >> 32;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_5, 4, val);
- /* FALLTHROUGH */
- case 2:
- val = (res->r[1].start & 0xfffffff0)
- | PCI_BASE_ADDRESS_MEM_TYPE_64;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_2, 4, val);
- val = res->r[1].start >> 32;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_3, 4, val);
- /* FALLTHROUGH */
- case 1:
- val = (res->r[0].start & 0xfffffff0)
- | PCI_BASE_ADDRESS_MEM_TYPE_64;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_0, 4, val);
- val = res->r[0].start >> 32;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_1, 4, val);
- break;
- }
-
- val = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- celleb_config_write_fake(config, PCI_COMMAND, 2, val);
-}
-
-static int __init celleb_setup_fake_pci_device(struct device_node *node,
- struct pci_controller *hose)
-{
- unsigned int rlen;
- int num_base_addr = 0;
- u32 val;
- const u32 *wi0, *wi1, *wi2, *wi3, *wi4;
- unsigned int devno, fn;
- struct celleb_pci_private *private = hose->private_data;
- unsigned char **config = NULL;
- struct celleb_pci_resource **res = NULL;
- const char *name;
- const unsigned long *li;
- int size, result;
-
- if (private == NULL) {
- printk(KERN_ERR "PCI: "
- "memory space for pci controller is not assigned\n");
- goto error;
- }
-
- name = of_get_property(node, "model", &rlen);
- if (!name) {
- printk(KERN_ERR "PCI: model property not found.\n");
- goto error;
- }
-
- wi4 = of_get_property(node, "reg", &rlen);
- if (wi4 == NULL)
- goto error;
-
- devno = ((wi4[0] >> 8) & 0xff) >> 3;
- fn = (wi4[0] >> 8) & 0x7;
-
- pr_debug("PCI: celleb_setup_fake_pci() %s devno=%x fn=%x\n", name,
- devno, fn);
-
- size = 256;
- config = &private->fake_config[devno][fn];
- *config = zalloc_maybe_bootmem(size, GFP_KERNEL);
- if (*config == NULL) {
- printk(KERN_ERR "PCI: "
- "not enough memory for fake configuration space\n");
- goto error;
- }
- pr_debug("PCI: fake config area assigned 0x%016lx\n",
- (unsigned long)*config);
-
- size = sizeof(struct celleb_pci_resource);
- res = &private->res[devno][fn];
- *res = zalloc_maybe_bootmem(size, GFP_KERNEL);
- if (*res == NULL) {
- printk(KERN_ERR
- "PCI: not enough memory for resource data space\n");
- goto error;
- }
- pr_debug("PCI: res assigned 0x%016lx\n", (unsigned long)*res);
-
- wi0 = of_get_property(node, "device-id", NULL);
- wi1 = of_get_property(node, "vendor-id", NULL);
- wi2 = of_get_property(node, "class-code", NULL);
- wi3 = of_get_property(node, "revision-id", NULL);
- if (!wi0 || !wi1 || !wi2 || !wi3) {
- printk(KERN_ERR "PCI: Missing device tree properties.\n");
- goto error;
- }
-
- celleb_config_write_fake(*config, PCI_DEVICE_ID, 2, wi0[0] & 0xffff);
- celleb_config_write_fake(*config, PCI_VENDOR_ID, 2, wi1[0] & 0xffff);
- pr_debug("class-code = 0x%08x\n", wi2[0]);
-
- celleb_config_write_fake(*config, PCI_CLASS_PROG, 1, wi2[0] & 0xff);
- celleb_config_write_fake(*config, PCI_CLASS_DEVICE, 2,
- (wi2[0] >> 8) & 0xffff);
- celleb_config_write_fake(*config, PCI_REVISION_ID, 1, wi3[0]);
-
- while (num_base_addr < MAX_PCI_BASE_ADDRS) {
- result = of_address_to_resource(node,
- num_base_addr, &(*res)->r[num_base_addr]);
- if (result)
- break;
- num_base_addr++;
- }
-
- celleb_setup_pci_base_addrs(hose, devno, fn, num_base_addr);
-
- li = of_get_property(node, "interrupts", &rlen);
- if (!li) {
- printk(KERN_ERR "PCI: interrupts not found.\n");
- goto error;
- }
- val = li[0];
- celleb_config_write_fake(*config, PCI_INTERRUPT_PIN, 1, 1);
- celleb_config_write_fake(*config, PCI_INTERRUPT_LINE, 1, val);
-
-#ifdef DEBUG
- pr_debug("PCI: %s irq=%ld\n", name, li[0]);
- for (i = 0; i < 6; i++) {
- celleb_config_read_fake(*config,
- PCI_BASE_ADDRESS_0 + 0x4 * i, 4,
- &val);
- pr_debug("PCI: %s fn=%d base_address_%d=0x%x\n",
- name, fn, i, val);
- }
-#endif
-
- celleb_config_write_fake(*config, PCI_HEADER_TYPE, 1,
- PCI_HEADER_TYPE_NORMAL);
-
- return 0;
-
-error:
- if (mem_init_done) {
- if (config && *config)
- kfree(*config);
- if (res && *res)
- kfree(*res);
-
- } else {
- if (config && *config) {
- size = 256;
- memblock_free(__pa(*config), size);
- }
- if (res && *res) {
- size = sizeof(struct celleb_pci_resource);
- memblock_free(__pa(*res), size);
- }
- }
-
- return 1;
-}
-
-static int __init phb_set_bus_ranges(struct device_node *dev,
- struct pci_controller *phb)
-{
- const int *bus_range;
- unsigned int len;
-
- bus_range = of_get_property(dev, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int))
- return 1;
-
- phb->first_busno = bus_range[0];
- phb->last_busno = bus_range[1];
-
- return 0;
-}
-
-static void __init celleb_alloc_private_mem(struct pci_controller *hose)
-{
- hose->private_data =
- zalloc_maybe_bootmem(sizeof(struct celleb_pci_private),
- GFP_KERNEL);
-}
-
-static int __init celleb_setup_fake_pci(struct device_node *dev,
- struct pci_controller *phb)
-{
- struct device_node *node;
-
- phb->ops = &celleb_fake_pci_ops;
- celleb_alloc_private_mem(phb);
-
- for (node = of_get_next_child(dev, NULL);
- node != NULL; node = of_get_next_child(dev, node))
- celleb_setup_fake_pci_device(node, phb);
-
- return 0;
-}
-
-static struct celleb_phb_spec celleb_fake_pci_spec __initdata = {
- .setup = celleb_setup_fake_pci,
-};
-
-static const struct of_device_id celleb_phb_match[] __initconst = {
- {
- .name = "pci-pseudo",
- .data = &celleb_fake_pci_spec,
- }, {
- .name = "epci",
- .data = &celleb_epci_spec,
- }, {
- .name = "pcie",
- .data = &celleb_pciex_spec,
- }, {
- },
-};
-
-int __init celleb_setup_phb(struct pci_controller *phb)
-{
- struct device_node *dev = phb->dn;
- const struct of_device_id *match;
- const struct celleb_phb_spec *phb_spec;
- int rc;
-
- match = of_match_node(celleb_phb_match, dev);
- if (!match)
- return 1;
-
- phb_set_bus_ranges(dev, phb);
- phb->buid = 1;
-
- phb_spec = match->data;
- rc = (*phb_spec->setup)(dev, phb);
- if (rc)
- return 1;
-
- if (phb_spec->ops)
- iowa_register_bus(phb, phb_spec->ops,
- phb_spec->iowa_init,
- phb_spec->iowa_data);
- return 0;
-}
-
-int celleb_pci_probe_mode(struct pci_bus *bus)
-{
- return PCI_PROBE_DEVTREE;
-}
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h
deleted file mode 100644
index a801fcc5f389..000000000000
--- a/arch/powerpc/platforms/cell/celleb_pci.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * pci prototypes for Celleb platform
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _CELLEB_PCI_H
-#define _CELLEB_PCI_H
-
-#include <linux/pci.h>
-
-#include <asm/pci-bridge.h>
-#include <asm/prom.h>
-#include <asm/ppc-pci.h>
-#include <asm/io-workarounds.h>
-
-struct iowa_bus;
-
-struct celleb_phb_spec {
- int (*setup)(struct device_node *, struct pci_controller *);
- struct ppc_pci_io *ops;
- int (*iowa_init)(struct iowa_bus *, void *);
- void *iowa_data;
-};
-
-extern int celleb_setup_phb(struct pci_controller *);
-extern int celleb_pci_probe_mode(struct pci_bus *);
-
-extern struct celleb_phb_spec celleb_epci_spec;
-extern struct celleb_phb_spec celleb_pciex_spec;
-
-#endif /* _CELLEB_PCI_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc.h b/arch/powerpc/platforms/cell/celleb_scc.h
deleted file mode 100644
index b596a711c348..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * SCC (Super Companion Chip) definitions
- *
- * (C) Copyright 2004-2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _CELLEB_SCC_H
-#define _CELLEB_SCC_H
-
-#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
-#define PCI_DEVICE_ID_TOSHIBA_SCC_PCIEXC_BRIDGE 0x01b0
-#define PCI_DEVICE_ID_TOSHIBA_SCC_EPCI_BRIDGE 0x01b1
-#define PCI_DEVICE_ID_TOSHIBA_SCC_BRIDGE 0x01b2
-#define PCI_DEVICE_ID_TOSHIBA_SCC_GBE 0x01b3
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
-#define PCI_DEVICE_ID_TOSHIBA_SCC_USB2 0x01b5
-#define PCI_DEVICE_ID_TOSHIBA_SCC_USB 0x01b6
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ENCDEC 0x01b7
-
-#define SCC_EPCI_REG 0x0000d000
-
-/* EPCI registers */
-#define SCC_EPCI_CNF10_REG 0x010
-#define SCC_EPCI_CNF14_REG 0x014
-#define SCC_EPCI_CNF18_REG 0x018
-#define SCC_EPCI_PVBAT 0x100
-#define SCC_EPCI_VPMBAT 0x104
-#define SCC_EPCI_VPIBAT 0x108
-#define SCC_EPCI_VCSR 0x110
-#define SCC_EPCI_VIENAB 0x114
-#define SCC_EPCI_VISTAT 0x118
-#define SCC_EPCI_VRDCOUNT 0x124
-#define SCC_EPCI_BAM0 0x12c
-#define SCC_EPCI_BAM1 0x134
-#define SCC_EPCI_BAM2 0x13c
-#define SCC_EPCI_IADR 0x164
-#define SCC_EPCI_CLKRST 0x800
-#define SCC_EPCI_INTSET 0x804
-#define SCC_EPCI_STATUS 0x808
-#define SCC_EPCI_ABTSET 0x80c
-#define SCC_EPCI_WATRP 0x810
-#define SCC_EPCI_DUMYRADR 0x814
-#define SCC_EPCI_SWRESP 0x818
-#define SCC_EPCI_CNTOPT 0x81c
-#define SCC_EPCI_ECMODE 0xf00
-#define SCC_EPCI_IOM_AC_NUM 5
-#define SCC_EPCI_IOM_ACTE(n) (0xf10 + (n) * 4)
-#define SCC_EPCI_IOT_AC_NUM 4
-#define SCC_EPCI_IOT_ACTE(n) (0xf30 + (n) * 4)
-#define SCC_EPCI_MAEA 0xf50
-#define SCC_EPCI_MAEC 0xf54
-#define SCC_EPCI_CKCTRL 0xff0
-
-/* bits for SCC_EPCI_VCSR */
-#define SCC_EPCI_VCSR_FRE 0x00020000
-#define SCC_EPCI_VCSR_FWE 0x00010000
-#define SCC_EPCI_VCSR_DR 0x00000400
-#define SCC_EPCI_VCSR_SR 0x00000008
-#define SCC_EPCI_VCSR_AT 0x00000004
-
-/* bits for SCC_EPCI_VIENAB/SCC_EPCI_VISTAT */
-#define SCC_EPCI_VISTAT_PMPE 0x00000008
-#define SCC_EPCI_VISTAT_PMFE 0x00000004
-#define SCC_EPCI_VISTAT_PRA 0x00000002
-#define SCC_EPCI_VISTAT_PRD 0x00000001
-#define SCC_EPCI_VISTAT_ALL 0x0000000f
-
-#define SCC_EPCI_VIENAB_PMPEE 0x00000008
-#define SCC_EPCI_VIENAB_PMFEE 0x00000004
-#define SCC_EPCI_VIENAB_PRA 0x00000002
-#define SCC_EPCI_VIENAB_PRD 0x00000001
-#define SCC_EPCI_VIENAB_ALL 0x0000000f
-
-/* bits for SCC_EPCI_CLKRST */
-#define SCC_EPCI_CLKRST_CKS_MASK 0x00030000
-#define SCC_EPCI_CLKRST_CKS_2 0x00000000
-#define SCC_EPCI_CLKRST_CKS_4 0x00010000
-#define SCC_EPCI_CLKRST_CKS_8 0x00020000
-#define SCC_EPCI_CLKRST_PCICRST 0x00000400
-#define SCC_EPCI_CLKRST_BC 0x00000200
-#define SCC_EPCI_CLKRST_PCIRST 0x00000100
-#define SCC_EPCI_CLKRST_PCKEN 0x00000001
-
-/* bits for SCC_EPCI_INTSET/SCC_EPCI_STATUS */
-#define SCC_EPCI_INT_2M 0x01000000
-#define SCC_EPCI_INT_RERR 0x00200000
-#define SCC_EPCI_INT_SERR 0x00100000
-#define SCC_EPCI_INT_PRTER 0x00080000
-#define SCC_EPCI_INT_SER 0x00040000
-#define SCC_EPCI_INT_PER 0x00020000
-#define SCC_EPCI_INT_PAI 0x00010000
-#define SCC_EPCI_INT_1M 0x00000100
-#define SCC_EPCI_INT_PME 0x00000010
-#define SCC_EPCI_INT_INTD 0x00000008
-#define SCC_EPCI_INT_INTC 0x00000004
-#define SCC_EPCI_INT_INTB 0x00000002
-#define SCC_EPCI_INT_INTA 0x00000001
-#define SCC_EPCI_INT_DEVINT 0x0000000f
-#define SCC_EPCI_INT_ALL 0x003f001f
-#define SCC_EPCI_INT_ALLERR 0x003f0000
-
-/* bits for SCC_EPCI_CKCTRL */
-#define SCC_EPCI_CKCTRL_CRST0 0x00010000
-#define SCC_EPCI_CKCTRL_CRST1 0x00020000
-#define SCC_EPCI_CKCTRL_OCLKEN 0x00000100
-#define SCC_EPCI_CKCTRL_LCLKEN 0x00000001
-
-#define SCC_EPCI_IDSEL_AD_TO_SLOT(ad) ((ad) - 10)
-#define SCC_EPCI_MAX_DEVNU SCC_EPCI_IDSEL_AD_TO_SLOT(32)
-
-/* bits for SCC_EPCI_CNTOPT */
-#define SCC_EPCI_CNTOPT_O2PMB 0x00000002
-
-/* SCC PCIEXC SMMIO registers */
-#define PEXCADRS 0x000
-#define PEXCWDATA 0x004
-#define PEXCRDATA 0x008
-#define PEXDADRS 0x010
-#define PEXDCMND 0x014
-#define PEXDWDATA 0x018
-#define PEXDRDATA 0x01c
-#define PEXREQID 0x020
-#define PEXTIDMAP 0x024
-#define PEXINTMASK 0x028
-#define PEXINTSTS 0x02c
-#define PEXAERRMASK 0x030
-#define PEXAERRSTS 0x034
-#define PEXPRERRMASK 0x040
-#define PEXPRERRSTS 0x044
-#define PEXPRERRID01 0x048
-#define PEXPRERRID23 0x04c
-#define PEXVDMASK 0x050
-#define PEXVDSTS 0x054
-#define PEXRCVCPLIDA 0x060
-#define PEXLENERRIDA 0x068
-#define PEXPHYPLLST 0x070
-#define PEXDMRDEN0 0x100
-#define PEXDMRDADR0 0x104
-#define PEXDMRDENX 0x110
-#define PEXDMRDADRX 0x114
-#define PEXECMODE 0xf00
-#define PEXMAEA(n) (0xf50 + (8 * n))
-#define PEXMAEC(n) (0xf54 + (8 * n))
-#define PEXCCRCTRL 0xff0
-
-/* SCC PCIEXC bits and shifts for PEXCADRS */
-#define PEXCADRS_BYTE_EN_SHIFT 20
-#define PEXCADRS_CMD_SHIFT 16
-#define PEXCADRS_CMD_READ (0xa << PEXCADRS_CMD_SHIFT)
-#define PEXCADRS_CMD_WRITE (0xb << PEXCADRS_CMD_SHIFT)
-
-/* SCC PCIEXC shifts for PEXDADRS */
-#define PEXDADRS_BUSNO_SHIFT 20
-#define PEXDADRS_DEVNO_SHIFT 15
-#define PEXDADRS_FUNCNO_SHIFT 12
-
-/* SCC PCIEXC bits and shifts for PEXDCMND */
-#define PEXDCMND_BYTE_EN_SHIFT 4
-#define PEXDCMND_IO_READ 0x2
-#define PEXDCMND_IO_WRITE 0x3
-#define PEXDCMND_CONFIG_READ 0xa
-#define PEXDCMND_CONFIG_WRITE 0xb
-
-/* SCC PCIEXC bits for PEXPHYPLLST */
-#define PEXPHYPLLST_PEXPHYAPLLST 0x00000001
-
-/* SCC PCIEXC bits for PEXECMODE */
-#define PEXECMODE_ALL_THROUGH 0x00000000
-#define PEXECMODE_ALL_8BIT 0x00550155
-#define PEXECMODE_ALL_16BIT 0x00aa02aa
-
-/* SCC PCIEXC bits for PEXCCRCTRL */
-#define PEXCCRCTRL_PEXIPCOREEN 0x00040000
-#define PEXCCRCTRL_PEXIPCONTEN 0x00020000
-#define PEXCCRCTRL_PEXPHYPLLEN 0x00010000
-#define PEXCCRCTRL_PCIEXCAOCKEN 0x00000100
-
-/* SCC PCIEXC port configuration registers */
-#define PEXTCERRCHK 0x21c
-#define PEXTAMAPB0 0x220
-#define PEXTAMAPL0 0x224
-#define PEXTAMAPB(n) (PEXTAMAPB0 + 8 * (n))
-#define PEXTAMAPL(n) (PEXTAMAPL0 + 8 * (n))
-#define PEXCHVC0P 0x500
-#define PEXCHVC0NP 0x504
-#define PEXCHVC0C 0x508
-#define PEXCDVC0P 0x50c
-#define PEXCDVC0NP 0x510
-#define PEXCDVC0C 0x514
-#define PEXCHVCXP 0x518
-#define PEXCHVCXNP 0x51c
-#define PEXCHVCXC 0x520
-#define PEXCDVCXP 0x524
-#define PEXCDVCXNP 0x528
-#define PEXCDVCXC 0x52c
-#define PEXCTTRG 0x530
-#define PEXTSCTRL 0x700
-#define PEXTSSTS 0x704
-#define PEXSKPCTRL 0x708
-
-/* UHC registers */
-#define SCC_UHC_CKRCTRL 0xff0
-#define SCC_UHC_ECMODE 0xf00
-
-/* bits for SCC_UHC_CKRCTRL */
-#define SCC_UHC_F48MCKLEN 0x00000001
-#define SCC_UHC_P_SUSPEND 0x00000002
-#define SCC_UHC_PHY_SUSPEND_SEL 0x00000004
-#define SCC_UHC_HCLKEN 0x00000100
-#define SCC_UHC_USBEN 0x00010000
-#define SCC_UHC_USBCEN 0x00020000
-#define SCC_UHC_PHYEN 0x00040000
-
-/* bits for SCC_UHC_ECMODE */
-#define SCC_UHC_ECMODE_BY_BYTE 0x00000555
-#define SCC_UHC_ECMODE_BY_WORD 0x00000aaa
-
-#endif /* _CELLEB_SCC_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
deleted file mode 100644
index 9438bbed402f..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_epci.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Support for SCC external PCI
- *
- * (C) Copyright 2004-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/pci_regs.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-#include "celleb_scc.h"
-#include "celleb_pci.h"
-
-#define MAX_PCI_DEVICES 32
-#define MAX_PCI_FUNCTIONS 8
-
-#define iob() __asm__ __volatile__("eieio; sync":::"memory")
-
-static inline PCI_IO_ADDR celleb_epci_get_epci_base(
- struct pci_controller *hose)
-{
- /*
- * Note:
- * Celleb epci uses cfg_addr as a base address for
- * epci control registers.
- */
-
- return hose->cfg_addr;
-}
-
-static inline PCI_IO_ADDR celleb_epci_get_epci_cfg(
- struct pci_controller *hose)
-{
- /*
- * Note:
- * Celleb epci uses cfg_data as a base address for
- * configuration area for epci devices.
- */
-
- return hose->cfg_data;
-}
-
-static inline void clear_and_disable_master_abort_interrupt(
- struct pci_controller *hose)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR reg;
- epci_base = celleb_epci_get_epci_base(hose);
- reg = epci_base + PCI_COMMAND;
- out_be32(reg, in_be32(reg) | (PCI_STATUS_REC_MASTER_ABORT << 16));
-}
-
-static int celleb_epci_check_abort(struct pci_controller *hose,
- PCI_IO_ADDR addr)
-{
- PCI_IO_ADDR reg;
- PCI_IO_ADDR epci_base;
- u32 val;
-
- iob();
- epci_base = celleb_epci_get_epci_base(hose);
-
- reg = epci_base + PCI_COMMAND;
- val = in_be32(reg);
-
- if (val & (PCI_STATUS_REC_MASTER_ABORT << 16)) {
- out_be32(reg,
- (val & 0xffff) | (PCI_STATUS_REC_MASTER_ABORT << 16));
-
- /* clear PCI Controller error, FRE, PMFE */
- reg = epci_base + SCC_EPCI_STATUS;
- out_be32(reg, SCC_EPCI_INT_PAI);
-
- reg = epci_base + SCC_EPCI_VCSR;
- val = in_be32(reg) & 0xffff;
- val |= SCC_EPCI_VCSR_FRE;
- out_be32(reg, val);
-
- reg = epci_base + SCC_EPCI_VISTAT;
- out_be32(reg, SCC_EPCI_VISTAT_PMFE);
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static PCI_IO_ADDR celleb_epci_make_config_addr(struct pci_bus *bus,
- struct pci_controller *hose, unsigned int devfn, int where)
-{
- PCI_IO_ADDR addr;
-
- if (bus != hose->bus)
- addr = celleb_epci_get_epci_cfg(hose) +
- (((bus->number & 0xff) << 16)
- | ((devfn & 0xff) << 8)
- | (where & 0xff)
- | 0x01000000);
- else
- addr = celleb_epci_get_epci_cfg(hose) +
- (((devfn & 0xff) << 8) | (where & 0xff));
-
- pr_debug("EPCI: config_addr = 0x%p\n", addr);
-
- return addr;
-}
-
-static int celleb_epci_read_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR addr;
- struct pci_controller *hose = pci_bus_to_host(bus);
-
- /* allignment check */
- BUG_ON(where % size);
-
- if (!celleb_epci_get_epci_cfg(hose))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == hose->first_busno && devfn == 0) {
- /* EPCI controller self */
-
- epci_base = celleb_epci_get_epci_base(hose);
- addr = epci_base + where;
-
- switch (size) {
- case 1:
- *val = in_8(addr);
- break;
- case 2:
- *val = in_be16(addr);
- break;
- case 4:
- *val = in_be32(addr);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- } else {
-
- clear_and_disable_master_abort_interrupt(hose);
- addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
-
- switch (size) {
- case 1:
- *val = in_8(addr);
- break;
- case 2:
- *val = in_le16(addr);
- break;
- case 4:
- *val = in_le32(addr);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- }
-
- pr_debug("EPCI: "
- "addr=0x%p, devfn=0x%x, where=0x%x, size=0x%x, val=0x%x\n",
- addr, devfn, where, size, *val);
-
- return celleb_epci_check_abort(hose, NULL);
-}
-
-static int celleb_epci_write_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR addr;
- struct pci_controller *hose = pci_bus_to_host(bus);
-
- /* allignment check */
- BUG_ON(where % size);
-
- if (!celleb_epci_get_epci_cfg(hose))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == hose->first_busno && devfn == 0) {
- /* EPCI controller self */
-
- epci_base = celleb_epci_get_epci_base(hose);
- addr = epci_base + where;
-
- switch (size) {
- case 1:
- out_8(addr, val);
- break;
- case 2:
- out_be16(addr, val);
- break;
- case 4:
- out_be32(addr, val);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- } else {
-
- clear_and_disable_master_abort_interrupt(hose);
- addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
-
- switch (size) {
- case 1:
- out_8(addr, val);
- break;
- case 2:
- out_le16(addr, val);
- break;
- case 4:
- out_le32(addr, val);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- }
-
- return celleb_epci_check_abort(hose, addr);
-}
-
-struct pci_ops celleb_epci_ops = {
- .read = celleb_epci_read_config,
- .write = celleb_epci_write_config,
-};
-
-/* to be moved in FW */
-static int __init celleb_epci_init(struct pci_controller *hose)
-{
- u32 val;
- PCI_IO_ADDR reg;
- PCI_IO_ADDR epci_base;
- int hwres = 0;
-
- epci_base = celleb_epci_get_epci_base(hose);
-
- /* PCI core reset(Internal bus and PCI clock) */
- reg = epci_base + SCC_EPCI_CKCTRL;
- val = in_be32(reg);
- if (val == 0x00030101)
- hwres = 1;
- else {
- val &= ~(SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
- out_be32(reg, val);
-
- /* set PCI core clock */
- val = in_be32(reg);
- val |= (SCC_EPCI_CKCTRL_OCLKEN | SCC_EPCI_CKCTRL_LCLKEN);
- out_be32(reg, val);
-
- /* release PCI core reset (internal bus) */
- val = in_be32(reg);
- val |= SCC_EPCI_CKCTRL_CRST0;
- out_be32(reg, val);
-
- /* set PCI clock select */
- reg = epci_base + SCC_EPCI_CLKRST;
- val = in_be32(reg);
- val &= ~SCC_EPCI_CLKRST_CKS_MASK;
- val |= SCC_EPCI_CLKRST_CKS_2;
- out_be32(reg, val);
-
- /* set arbiter */
- reg = epci_base + SCC_EPCI_ABTSET;
- out_be32(reg, 0x0f1f001f); /* temporary value */
-
- /* buffer on */
- reg = epci_base + SCC_EPCI_CLKRST;
- val = in_be32(reg);
- val |= SCC_EPCI_CLKRST_BC;
- out_be32(reg, val);
-
- /* PCI clock enable */
- val = in_be32(reg);
- val |= SCC_EPCI_CLKRST_PCKEN;
- out_be32(reg, val);
-
- /* release PCI core reset (all) */
- reg = epci_base + SCC_EPCI_CKCTRL;
- val = in_be32(reg);
- val |= (SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
- out_be32(reg, val);
-
- /* set base translation registers. (already set by Beat) */
-
- /* set base address masks. (already set by Beat) */
- }
-
- /* release interrupt masks and clear all interrupts */
- reg = epci_base + SCC_EPCI_INTSET;
- out_be32(reg, 0x013f011f); /* all interrupts enable */
- reg = epci_base + SCC_EPCI_VIENAB;
- val = SCC_EPCI_VIENAB_PMPEE | SCC_EPCI_VIENAB_PMFEE;
- out_be32(reg, val);
- reg = epci_base + SCC_EPCI_STATUS;
- out_be32(reg, 0xffffffff);
- reg = epci_base + SCC_EPCI_VISTAT;
- out_be32(reg, 0xffffffff);
-
- /* disable PCI->IB address translation */
- reg = epci_base + SCC_EPCI_VCSR;
- val = in_be32(reg);
- val &= ~(SCC_EPCI_VCSR_DR | SCC_EPCI_VCSR_AT);
- out_be32(reg, val);
-
- /* set base addresses. (no need to set?) */
-
- /* memory space, bus master enable */
- reg = epci_base + PCI_COMMAND;
- val = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- out_be32(reg, val);
-
- /* endian mode setup */
- reg = epci_base + SCC_EPCI_ECMODE;
- val = 0x00550155;
- out_be32(reg, val);
-
- /* set control option */
- reg = epci_base + SCC_EPCI_CNTOPT;
- val = in_be32(reg);
- val |= SCC_EPCI_CNTOPT_O2PMB;
- out_be32(reg, val);
-
- /* XXX: temporay: set registers for address conversion setup */
- reg = epci_base + SCC_EPCI_CNF10_REG;
- out_be32(reg, 0x80000008);
- reg = epci_base + SCC_EPCI_CNF14_REG;
- out_be32(reg, 0x40000008);
-
- reg = epci_base + SCC_EPCI_BAM0;
- out_be32(reg, 0x80000000);
- reg = epci_base + SCC_EPCI_BAM1;
- out_be32(reg, 0xe0000000);
-
- reg = epci_base + SCC_EPCI_PVBAT;
- out_be32(reg, 0x80000000);
-
- if (!hwres) {
- /* release external PCI reset */
- reg = epci_base + SCC_EPCI_CLKRST;
- val = in_be32(reg);
- val |= SCC_EPCI_CLKRST_PCIRST;
- out_be32(reg, val);
- }
-
- return 0;
-}
-
-static int __init celleb_setup_epci(struct device_node *node,
- struct pci_controller *hose)
-{
- struct resource r;
-
- pr_debug("PCI: celleb_setup_epci()\n");
-
- /*
- * Note:
- * Celleb epci uses cfg_addr and cfg_data member of
- * pci_controller structure in irregular way.
- *
- * cfg_addr is used to map for control registers of
- * celleb epci.
- *
- * cfg_data is used for configuration area of devices
- * on Celleb epci buses.
- */
-
- if (of_address_to_resource(node, 0, &r))
- goto error;
- hose->cfg_addr = ioremap(r.start, resource_size(&r));
- if (!hose->cfg_addr)
- goto error;
- pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n",
- r.start, (unsigned long)hose->cfg_addr, resource_size(&r));
-
- if (of_address_to_resource(node, 2, &r))
- goto error;
- hose->cfg_data = ioremap(r.start, resource_size(&r));
- if (!hose->cfg_data)
- goto error;
- pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n",
- r.start, (unsigned long)hose->cfg_data, resource_size(&r));
-
- hose->ops = &celleb_epci_ops;
- celleb_epci_init(hose);
-
- return 0;
-
-error:
- if (hose->cfg_addr)
- iounmap(hose->cfg_addr);
-
- if (hose->cfg_data)
- iounmap(hose->cfg_data);
- return 1;
-}
-
-struct celleb_phb_spec celleb_epci_spec __initdata = {
- .setup = celleb_setup_epci,
- .ops = &spiderpci_ops,
- .iowa_init = &spiderpci_iowa_init,
- .iowa_data = (void *)0,
-};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
deleted file mode 100644
index 94170e4f2ce7..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- * Support for Celleb PCI-Express.
- *
- * (C) Copyright 2007-2008 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/iommu.h>
-#include <asm/byteorder.h>
-
-#include "celleb_scc.h"
-#include "celleb_pci.h"
-
-#define PEX_IN(base, off) in_be32((void __iomem *)(base) + (off))
-#define PEX_OUT(base, off, data) out_be32((void __iomem *)(base) + (off), (data))
-
-static void scc_pciex_io_flush(struct iowa_bus *bus)
-{
- (void)PEX_IN(bus->phb->cfg_addr, PEXDMRDEN0);
-}
-
-/*
- * Memory space access to device on PCIEX
- */
-#define PCIEX_MMIO_READ(name, ret) \
-static ret scc_pciex_##name(const PCI_IO_ADDR addr) \
-{ \
- ret val = __do_##name(addr); \
- scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
- return val; \
-}
-
-#define PCIEX_MMIO_READ_STR(name) \
-static void scc_pciex_##name(const PCI_IO_ADDR addr, void *buf, \
- unsigned long count) \
-{ \
- __do_##name(addr, buf, count); \
- scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
-}
-
-PCIEX_MMIO_READ(readb, u8)
-PCIEX_MMIO_READ(readw, u16)
-PCIEX_MMIO_READ(readl, u32)
-PCIEX_MMIO_READ(readq, u64)
-PCIEX_MMIO_READ(readw_be, u16)
-PCIEX_MMIO_READ(readl_be, u32)
-PCIEX_MMIO_READ(readq_be, u64)
-PCIEX_MMIO_READ_STR(readsb)
-PCIEX_MMIO_READ_STR(readsw)
-PCIEX_MMIO_READ_STR(readsl)
-
-static void scc_pciex_memcpy_fromio(void *dest, const PCI_IO_ADDR src,
- unsigned long n)
-{
- __do_memcpy_fromio(dest, src, n);
- scc_pciex_io_flush(iowa_mem_find_bus(src));
-}
-
-/*
- * I/O port access to devices on PCIEX.
- */
-
-static inline unsigned long get_bus_address(struct pci_controller *phb,
- unsigned long port)
-{
- return port - ((unsigned long)(phb->io_base_virt) - _IO_BASE);
-}
-
-static u32 scc_pciex_read_port(struct pci_controller *phb,
- unsigned long port, int size)
-{
- unsigned int byte_enable;
- unsigned int cmd, shift;
- unsigned long addr;
- u32 data, ret;
-
- BUG_ON(((port & 0x3ul) + size) > 4);
-
- addr = get_bus_address(phb, port);
- shift = addr & 0x3ul;
- byte_enable = ((1 << size) - 1) << shift;
- cmd = PEXDCMND_IO_READ | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
- PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
- PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
- data = PEX_IN(phb->cfg_addr, PEXDRDATA);
- ret = (data >> (shift * 8)) & (0xFFFFFFFF >> ((4 - size) * 8));
-
- pr_debug("PCIEX:PIO READ:port=0x%lx, addr=0x%lx, size=%d, be=%x,"
- " cmd=%x, data=%x, ret=%x\n", port, addr, size, byte_enable,
- cmd, data, ret);
-
- return ret;
-}
-
-static void scc_pciex_write_port(struct pci_controller *phb,
- unsigned long port, int size, u32 val)
-{
- unsigned int byte_enable;
- unsigned int cmd, shift;
- unsigned long addr;
- u32 data;
-
- BUG_ON(((port & 0x3ul) + size) > 4);
-
- addr = get_bus_address(phb, port);
- shift = addr & 0x3ul;
- byte_enable = ((1 << size) - 1) << shift;
- cmd = PEXDCMND_IO_WRITE | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
- data = (val & (0xFFFFFFFF >> (4 - size) * 8)) << (shift * 8);
- PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
- PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
- PEX_OUT(phb->cfg_addr, PEXDWDATA, data);
-
- pr_debug("PCIEX:PIO WRITE:port=0x%lx, addr=%lx, size=%d, val=%x,"
- " be=%x, cmd=%x, data=%x\n", port, addr, size, val,
- byte_enable, cmd, data);
-}
-
-static u8 __scc_pciex_inb(struct pci_controller *phb, unsigned long port)
-{
- return (u8)scc_pciex_read_port(phb, port, 1);
-}
-
-static u16 __scc_pciex_inw(struct pci_controller *phb, unsigned long port)
-{
- u32 data;
- if ((port & 0x3ul) < 3)
- data = scc_pciex_read_port(phb, port, 2);
- else {
- u32 d1 = scc_pciex_read_port(phb, port, 1);
- u32 d2 = scc_pciex_read_port(phb, port + 1, 1);
- data = d1 | (d2 << 8);
- }
- return (u16)data;
-}
-
-static u32 __scc_pciex_inl(struct pci_controller *phb, unsigned long port)
-{
- unsigned int mod = port & 0x3ul;
- u32 data;
- if (mod == 0)
- data = scc_pciex_read_port(phb, port, 4);
- else {
- u32 d1 = scc_pciex_read_port(phb, port, 4 - mod);
- u32 d2 = scc_pciex_read_port(phb, port + 1, mod);
- data = d1 | (d2 << (mod * 8));
- }
- return data;
-}
-
-static void __scc_pciex_outb(struct pci_controller *phb,
- u8 val, unsigned long port)
-{
- scc_pciex_write_port(phb, port, 1, (u32)val);
-}
-
-static void __scc_pciex_outw(struct pci_controller *phb,
- u16 val, unsigned long port)
-{
- if ((port & 0x3ul) < 3)
- scc_pciex_write_port(phb, port, 2, (u32)val);
- else {
- u32 d1 = val & 0x000000FF;
- u32 d2 = (val & 0x0000FF00) >> 8;
- scc_pciex_write_port(phb, port, 1, d1);
- scc_pciex_write_port(phb, port + 1, 1, d2);
- }
-}
-
-static void __scc_pciex_outl(struct pci_controller *phb,
- u32 val, unsigned long port)
-{
- unsigned int mod = port & 0x3ul;
- if (mod == 0)
- scc_pciex_write_port(phb, port, 4, val);
- else {
- u32 d1 = val & (0xFFFFFFFFul >> (mod * 8));
- u32 d2 = val >> ((4 - mod) * 8);
- scc_pciex_write_port(phb, port, 4 - mod, d1);
- scc_pciex_write_port(phb, port + 1, mod, d2);
- }
-}
-
-#define PCIEX_PIO_FUNC(size, name) \
-static u##size scc_pciex_in##name(unsigned long port) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(port); \
- u##size data = __scc_pciex_in##name(bus->phb, port); \
- scc_pciex_io_flush(bus); \
- return data; \
-} \
-static void scc_pciex_ins##name(unsigned long p, void *b, unsigned long c) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(p); \
- __le##size *dst = b; \
- for (; c != 0; c--, dst++) \
- *dst = cpu_to_le##size(__scc_pciex_in##name(bus->phb, p)); \
- scc_pciex_io_flush(bus); \
-} \
-static void scc_pciex_out##name(u##size val, unsigned long port) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(port); \
- __scc_pciex_out##name(bus->phb, val, port); \
-} \
-static void scc_pciex_outs##name(unsigned long p, const void *b, \
- unsigned long c) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(p); \
- const __le##size *src = b; \
- for (; c != 0; c--, src++) \
- __scc_pciex_out##name(bus->phb, le##size##_to_cpu(*src), p); \
-}
-#define __le8 u8
-#define cpu_to_le8(x) (x)
-#define le8_to_cpu(x) (x)
-PCIEX_PIO_FUNC(8, b)
-PCIEX_PIO_FUNC(16, w)
-PCIEX_PIO_FUNC(32, l)
-
-static struct ppc_pci_io scc_pciex_ops = {
- .readb = scc_pciex_readb,
- .readw = scc_pciex_readw,
- .readl = scc_pciex_readl,
- .readq = scc_pciex_readq,
- .readw_be = scc_pciex_readw_be,
- .readl_be = scc_pciex_readl_be,
- .readq_be = scc_pciex_readq_be,
- .readsb = scc_pciex_readsb,
- .readsw = scc_pciex_readsw,
- .readsl = scc_pciex_readsl,
- .memcpy_fromio = scc_pciex_memcpy_fromio,
- .inb = scc_pciex_inb,
- .inw = scc_pciex_inw,
- .inl = scc_pciex_inl,
- .outb = scc_pciex_outb,
- .outw = scc_pciex_outw,
- .outl = scc_pciex_outl,
- .insb = scc_pciex_insb,
- .insw = scc_pciex_insw,
- .insl = scc_pciex_insl,
- .outsb = scc_pciex_outsb,
- .outsw = scc_pciex_outsw,
- .outsl = scc_pciex_outsl,
-};
-
-static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
-{
- dma_addr_t dummy_page_da;
- void *dummy_page_va;
-
- dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!dummy_page_va) {
- pr_err("PCIEX:Alloc dummy_page_va failed\n");
- return -1;
- }
-
- dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
- PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(bus->phb->parent, dummy_page_da)) {
- pr_err("PCIEX:Map dummy page failed.\n");
- kfree(dummy_page_va);
- return -1;
- }
-
- PEX_OUT(bus->phb->cfg_addr, PEXDMRDADR0, dummy_page_da);
-
- return 0;
-}
-
-/*
- * config space access
- */
-#define MK_PEXDADRS(bus_no, dev_no, func_no, addr) \
- ((uint32_t)(((addr) & ~0x3UL) | \
- ((bus_no) << PEXDADRS_BUSNO_SHIFT) | \
- ((dev_no) << PEXDADRS_DEVNO_SHIFT) | \
- ((func_no) << PEXDADRS_FUNCNO_SHIFT)))
-
-#define MK_PEXDCMND_BYTE_EN(addr, size) \
- ((((0x1 << (size))-1) << ((addr) & 0x3)) << PEXDCMND_BYTE_EN_SHIFT)
-#define MK_PEXDCMND(cmd, addr, size) ((cmd) | MK_PEXDCMND_BYTE_EN(addr, size))
-
-static uint32_t config_read_pciex_dev(unsigned int __iomem *base,
- uint64_t bus_no, uint64_t dev_no, uint64_t func_no,
- uint64_t off, uint64_t size)
-{
- uint32_t ret;
- uint32_t addr, cmd;
-
- addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
- cmd = MK_PEXDCMND(PEXDCMND_CONFIG_READ, off, size);
- PEX_OUT(base, PEXDADRS, addr);
- PEX_OUT(base, PEXDCMND, cmd);
- ret = (PEX_IN(base, PEXDRDATA)
- >> ((off & (4-size)) * 8)) & ((0x1 << (size * 8)) - 1);
- return ret;
-}
-
-static void config_write_pciex_dev(unsigned int __iomem *base, uint64_t bus_no,
- uint64_t dev_no, uint64_t func_no, uint64_t off, uint64_t size,
- uint32_t data)
-{
- uint32_t addr, cmd;
-
- addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
- cmd = MK_PEXDCMND(PEXDCMND_CONFIG_WRITE, off, size);
- PEX_OUT(base, PEXDADRS, addr);
- PEX_OUT(base, PEXDCMND, cmd);
- PEX_OUT(base, PEXDWDATA,
- (data & ((0x1 << (size * 8)) - 1)) << ((off & (4-size)) * 8));
-}
-
-#define MK_PEXCADRS_BYTE_EN(off, len) \
- ((((0x1 << (len)) - 1) << ((off) & 0x3)) << PEXCADRS_BYTE_EN_SHIFT)
-#define MK_PEXCADRS(cmd, addr, size) \
- ((cmd) | MK_PEXCADRS_BYTE_EN(addr, size) | ((addr) & ~0x3))
-static uint32_t config_read_pciex_rc(unsigned int __iomem *base,
- uint32_t where, uint32_t size)
-{
- PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_READ, where, size));
- return (PEX_IN(base, PEXCRDATA)
- >> ((where & (4 - size)) * 8)) & ((0x1 << (size * 8)) - 1);
-}
-
-static void config_write_pciex_rc(unsigned int __iomem *base, uint32_t where,
- uint32_t size, uint32_t val)
-{
- uint32_t data;
-
- data = (val & ((0x1 << (size * 8)) - 1)) << ((where & (4 - size)) * 8);
- PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_WRITE, where, size));
- PEX_OUT(base, PEXCWDATA, data);
-}
-
-/* Interfaces */
-/* Note: Work-around
- * On SCC PCIEXC, one device is seen on all 32 dev_no.
- * As SCC PCIEXC can have only one device on the bus, we look only one dev_no.
- * (dev_no = 1)
- */
-static int scc_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, unsigned int *val)
-{
- struct pci_controller *phb = pci_bus_to_host(bus);
-
- if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1) {
- *val = ~0;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- if (bus->number == 0 && PCI_SLOT(devfn) == 0)
- *val = config_read_pciex_rc(phb->cfg_addr, where, size);
- else
- *val = config_read_pciex_dev(phb->cfg_addr, bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where, size);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, unsigned int val)
-{
- struct pci_controller *phb = pci_bus_to_host(bus);
-
- if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == 0 && PCI_SLOT(devfn) == 0)
- config_write_pciex_rc(phb->cfg_addr, where, size, val);
- else
- config_write_pciex_dev(phb->cfg_addr, bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops scc_pciex_pci_ops = {
- .read = scc_pciex_read_config,
- .write = scc_pciex_write_config,
-};
-
-static void pciex_clear_intr_all(unsigned int __iomem *base)
-{
- PEX_OUT(base, PEXAERRSTS, 0xffffffff);
- PEX_OUT(base, PEXPRERRSTS, 0xffffffff);
- PEX_OUT(base, PEXINTSTS, 0xffffffff);
-}
-
-#if 0
-static void pciex_disable_intr_all(unsigned int *base)
-{
- PEX_OUT(base, PEXINTMASK, 0x0);
- PEX_OUT(base, PEXAERRMASK, 0x0);
- PEX_OUT(base, PEXPRERRMASK, 0x0);
- PEX_OUT(base, PEXVDMASK, 0x0);
-}
-#endif
-
-static void pciex_enable_intr_all(unsigned int __iomem *base)
-{
- PEX_OUT(base, PEXINTMASK, 0x0000e7f1);
- PEX_OUT(base, PEXAERRMASK, 0x03ff01ff);
- PEX_OUT(base, PEXPRERRMASK, 0x0001010f);
- PEX_OUT(base, PEXVDMASK, 0x00000001);
-}
-
-static void pciex_check_status(unsigned int __iomem *base)
-{
- uint32_t err = 0;
- uint32_t intsts, aerr, prerr, rcvcp, lenerr;
- uint32_t maea, maec;
-
- intsts = PEX_IN(base, PEXINTSTS);
- aerr = PEX_IN(base, PEXAERRSTS);
- prerr = PEX_IN(base, PEXPRERRSTS);
- rcvcp = PEX_IN(base, PEXRCVCPLIDA);
- lenerr = PEX_IN(base, PEXLENERRIDA);
-
- if (intsts || aerr || prerr || rcvcp || lenerr)
- err = 1;
-
- pr_info("PCEXC interrupt!!\n");
- pr_info("PEXINTSTS :0x%08x\n", intsts);
- pr_info("PEXAERRSTS :0x%08x\n", aerr);
- pr_info("PEXPRERRSTS :0x%08x\n", prerr);
- pr_info("PEXRCVCPLIDA :0x%08x\n", rcvcp);
- pr_info("PEXLENERRIDA :0x%08x\n", lenerr);
-
- /* print detail of Protection Error */
- if (intsts & 0x00004000) {
- uint32_t i, n;
- for (i = 0; i < 4; i++) {
- n = 1 << i;
- if (prerr & n) {
- maea = PEX_IN(base, PEXMAEA(i));
- maec = PEX_IN(base, PEXMAEC(i));
- pr_info("PEXMAEC%d :0x%08x\n", i, maec);
- pr_info("PEXMAEA%d :0x%08x\n", i, maea);
- }
- }
- }
-
- if (err)
- pciex_clear_intr_all(base);
-}
-
-static irqreturn_t pciex_handle_internal_irq(int irq, void *dev_id)
-{
- struct pci_controller *phb = dev_id;
-
- pr_debug("PCIEX:pciex_handle_internal_irq(irq=%d)\n", irq);
-
- BUG_ON(phb->cfg_addr == NULL);
-
- pciex_check_status(phb->cfg_addr);
-
- return IRQ_HANDLED;
-}
-
-static __init int celleb_setup_pciex(struct device_node *node,
- struct pci_controller *phb)
-{
- struct resource r;
- int virq;
-
- /* SMMIO registers; used inside this file */
- if (of_address_to_resource(node, 0, &r)) {
- pr_err("PCIEXC:Failed to get config resource.\n");
- return 1;
- }
- phb->cfg_addr = ioremap(r.start, resource_size(&r));
- if (!phb->cfg_addr) {
- pr_err("PCIEXC:Failed to remap SMMIO region.\n");
- return 1;
- }
-
- /* Not use cfg_data, cmd and data regs are near address reg */
- phb->cfg_data = NULL;
-
- /* set pci_ops */
- phb->ops = &scc_pciex_pci_ops;
-
- /* internal interrupt handler */
- virq = irq_of_parse_and_map(node, 1);
- if (!virq) {
- pr_err("PCIEXC:Failed to map irq\n");
- goto error;
- }
- if (request_irq(virq, pciex_handle_internal_irq,
- 0, "pciex", (void *)phb)) {
- pr_err("PCIEXC:Failed to request irq\n");
- goto error;
- }
-
- /* enable all interrupts */
- pciex_clear_intr_all(phb->cfg_addr);
- pciex_enable_intr_all(phb->cfg_addr);
- /* MSI: TBD */
-
- return 0;
-
-error:
- phb->cfg_data = NULL;
- if (phb->cfg_addr)
- iounmap(phb->cfg_addr);
- phb->cfg_addr = NULL;
- return 1;
-}
-
-struct celleb_phb_spec celleb_pciex_spec __initdata = {
- .setup = celleb_setup_pciex,
- .ops = &scc_pciex_ops,
- .iowa_init = &scc_pciex_iowa_init,
-};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
deleted file mode 100644
index c8eb57193826..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_sio.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * setup serial port in SCC
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/console.h>
-
-#include <asm/io.h>
-#include <asm/prom.h>
-
-/* sio irq0=0xb00010022 irq0=0xb00010023 irq2=0xb00010024
- mmio=0xfff000-0x1000,0xff2000-0x1000 */
-static int txx9_serial_bitmap __initdata;
-
-static struct {
- uint32_t offset;
- uint32_t index;
-} txx9_scc_tab[3] __initdata = {
- { 0x300, 0 }, /* 0xFFF300 */
- { 0x400, 0 }, /* 0xFFF400 */
- { 0x800, 1 } /* 0xFF2800 */
-};
-
-static int __init txx9_serial_init(void)
-{
- extern int early_serial_txx9_setup(struct uart_port *port);
- struct device_node *node;
- int i;
- struct uart_port req;
- struct of_phandle_args irq;
- struct resource res;
-
- for_each_compatible_node(node, "serial", "toshiba,sio-scc") {
- for (i = 0; i < ARRAY_SIZE(txx9_scc_tab); i++) {
- if (!(txx9_serial_bitmap & (1<<i)))
- continue;
-
- if (of_irq_parse_one(node, i, &irq))
- continue;
- if (of_address_to_resource(node,
- txx9_scc_tab[i].index, &res))
- continue;
-
- memset(&req, 0, sizeof(req));
- req.line = i;
- req.iotype = UPIO_MEM;
- req.mapbase = res.start + txx9_scc_tab[i].offset;
-#ifdef CONFIG_SERIAL_TXX9_CONSOLE
- req.membase = ioremap(req.mapbase, 0x24);
-#endif
- req.irq = irq_create_of_mapping(&irq);
- req.flags |= UPF_IOREMAP | UPF_BUGGY_UART
- /*HAVE_CTS_LINE*/;
- req.uartclk = 83300000;
- early_serial_txx9_setup(&req);
- }
- }
-
- return 0;
-}
-
-static int __init txx9_serial_config(char *ptr)
-{
- int i;
-
- for (;;) {
- switch (get_option(&ptr, &i)) {
- default:
- return 0;
- case 2:
- txx9_serial_bitmap |= 1 << i;
- break;
- case 1:
- txx9_serial_bitmap |= 1 << i;
- return 0;
- }
- }
-}
-__setup("txx9_serial=", txx9_serial_config);
-
-console_initcall(txx9_serial_init);
diff --git a/arch/powerpc/platforms/cell/celleb_scc_uhc.c b/arch/powerpc/platforms/cell/celleb_scc_uhc.c
deleted file mode 100644
index d63b720bfe3a..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_uhc.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * SCC (Super Companion Chip) UHC setup
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-
-#include "celleb_scc.h"
-
-#define UHC_RESET_WAIT_MAX 10000
-
-static inline int uhc_clkctrl_ready(u32 val)
-{
- const u32 mask = SCC_UHC_USBCEN | SCC_UHC_USBCEN;
- return((val & mask) == mask);
-}
-
-/*
- * UHC(usb host controller) enable function.
- * affect to both of OHCI and EHCI core module.
- */
-static void enable_scc_uhc(struct pci_dev *dev)
-{
- void __iomem *uhc_base;
- u32 __iomem *uhc_clkctrl;
- u32 __iomem *uhc_ecmode;
- u32 val = 0;
- int i;
-
- if (!machine_is(celleb_beat) &&
- !machine_is(celleb_native))
- return;
-
- uhc_base = ioremap(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
- if (!uhc_base) {
- printk(KERN_ERR "failed to map UHC register base.\n");
- return;
- }
- uhc_clkctrl = uhc_base + SCC_UHC_CKRCTRL;
- uhc_ecmode = uhc_base + SCC_UHC_ECMODE;
-
- /* setup for normal mode */
- val |= SCC_UHC_F48MCKLEN;
- out_be32(uhc_clkctrl, val);
- val |= SCC_UHC_PHY_SUSPEND_SEL;
- out_be32(uhc_clkctrl, val);
- udelay(10);
- val |= SCC_UHC_PHYEN;
- out_be32(uhc_clkctrl, val);
- udelay(50);
-
- /* disable reset */
- val |= SCC_UHC_HCLKEN;
- out_be32(uhc_clkctrl, val);
- val |= (SCC_UHC_USBCEN | SCC_UHC_USBEN);
- out_be32(uhc_clkctrl, val);
- i = 0;
- while (!uhc_clkctrl_ready(in_be32(uhc_clkctrl))) {
- udelay(10);
- if (i++ > UHC_RESET_WAIT_MAX) {
- printk(KERN_ERR "Failed to disable UHC reset %x\n",
- in_be32(uhc_clkctrl));
- break;
- }
- }
-
- /* Endian Conversion Mode for Master ALL area */
- out_be32(uhc_ecmode, SCC_UHC_ECMODE_BY_BYTE);
-
- iounmap(uhc_base);
-}
-
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
- PCI_DEVICE_ID_TOSHIBA_SCC_USB, enable_scc_uhc);
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
deleted file mode 100644
index 90be8ec51686..000000000000
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Celleb setup code
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/cell/setup.c:
- * Copyright (C) 1995 Linus Torvalds
- * Adapted from 'alpha' version by Gary Thomas
- * Modified by Cort Dougan (cort@cs.nmt.edu)
- * Modified by PPC64 Team, IBM Corp
- * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/cpu.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/root_dev.h>
-#include <linux/console.h>
-#include <linux/of_platform.h>
-
-#include <asm/mmu.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/cputable.h>
-#include <asm/irq.h>
-#include <asm/time.h>
-#include <asm/spu_priv1.h>
-#include <asm/firmware.h>
-#include <asm/rtas.h>
-#include <asm/cell-regs.h>
-
-#include "beat_interrupt.h"
-#include "beat_wrapper.h"
-#include "beat.h"
-#include "celleb_pci.h"
-#include "interrupt.h"
-#include "pervasive.h"
-#include "ras.h"
-
-static char celleb_machine_type[128] = "Celleb";
-
-static void celleb_show_cpuinfo(struct seq_file *m)
-{
- struct device_node *root;
- const char *model = "";
-
- root = of_find_node_by_path("/");
- if (root)
- model = of_get_property(root, "model", NULL);
- /* using "CHRP" is to trick anaconda into installing FCx into Celleb */
- seq_printf(m, "machine\t\t: %s %s\n", celleb_machine_type, model);
- of_node_put(root);
-}
-
-static int __init celleb_machine_type_hack(char *ptr)
-{
- strlcpy(celleb_machine_type, ptr, sizeof(celleb_machine_type));
- return 0;
-}
-
-__setup("celleb_machine_type_hack=", celleb_machine_type_hack);
-
-static void celleb_progress(char *s, unsigned short hex)
-{
- printk("*** %04x : %s\n", hex, s ? s : "");
-}
-
-static void __init celleb_setup_arch_common(void)
-{
- /* init to some ~sane value until calibrate_delay() runs */
- loops_per_jiffy = 50000000;
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-}
-
-static const struct of_device_id celleb_bus_ids[] __initconst = {
- { .type = "scc", },
- { .type = "ioif", }, /* old style */
- {},
-};
-
-static int __init celleb_publish_devices(void)
-{
- /* Publish OF platform devices for southbridge IOs */
- of_platform_bus_probe(NULL, celleb_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(celleb_beat, celleb_publish_devices);
-machine_device_initcall(celleb_native, celleb_publish_devices);
-
-
-/*
- * functions for Celleb-Beat
- */
-static void __init celleb_setup_arch_beat(void)
-{
-#ifdef CONFIG_SPU_BASE
- spu_priv1_ops = &spu_priv1_beat_ops;
- spu_management_ops = &spu_management_of_ops;
-#endif
-
- celleb_setup_arch_common();
-}
-
-static int __init celleb_probe_beat(void)
-{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "Beat"))
- return 0;
-
- powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS
- | FW_FEATURE_BEAT | FW_FEATURE_LPAR;
- hpte_init_beat_v3();
- pm_power_off = beat_power_off;
-
- return 1;
-}
-
-
-/*
- * functions for Celleb-native
- */
-static void __init celleb_init_IRQ_native(void)
-{
- iic_init_IRQ();
- spider_init_IRQ();
-}
-
-static void __init celleb_setup_arch_native(void)
-{
-#ifdef CONFIG_SPU_BASE
- spu_priv1_ops = &spu_priv1_mmio_ops;
- spu_management_ops = &spu_management_of_ops;
-#endif
-
- cbe_regs_init();
-
-#ifdef CONFIG_CBE_RAS
- cbe_ras_init();
-#endif
-
-#ifdef CONFIG_SMP
- smp_init_cell();
-#endif
-
- cbe_pervasive_init();
-
- /* XXX: nvram initialization should be added */
-
- celleb_setup_arch_common();
-}
-
-static int __init celleb_probe_native(void)
-{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "Beat") ||
- !of_flat_dt_is_compatible(root, "TOSHIBA,Celleb"))
- return 0;
-
- powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS;
- hpte_init_native();
- pm_power_off = rtas_power_off;
-
- return 1;
-}
-
-
-/*
- * machine definitions
- */
-define_machine(celleb_beat) {
- .name = "Cell Reference Set (Beat)",
- .probe = celleb_probe_beat,
- .setup_arch = celleb_setup_arch_beat,
- .show_cpuinfo = celleb_show_cpuinfo,
- .restart = beat_restart,
- .halt = beat_halt,
- .get_rtc_time = beat_get_rtc_time,
- .set_rtc_time = beat_set_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = celleb_progress,
- .power_save = beat_power_save,
- .nvram_size = beat_nvram_get_size,
- .nvram_read = beat_nvram_read,
- .nvram_write = beat_nvram_write,
- .set_dabr = beat_set_xdabr,
- .init_IRQ = beatic_init_IRQ,
- .get_irq = beatic_get_irq,
- .pci_probe_mode = celleb_pci_probe_mode,
- .pci_setup_phb = celleb_setup_phb,
-#ifdef CONFIG_KEXEC
- .kexec_cpu_down = beat_kexec_cpu_down,
-#endif
-};
-
-define_machine(celleb_native) {
- .name = "Cell Reference Set (native)",
- .probe = celleb_probe_native,
- .setup_arch = celleb_setup_arch_native,
- .show_cpuinfo = celleb_show_cpuinfo,
- .restart = rtas_restart,
- .halt = rtas_halt,
- .get_boot_time = rtas_get_boot_time,
- .get_rtc_time = rtas_get_rtc_time,
- .set_rtc_time = rtas_set_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = celleb_progress,
- .pci_probe_mode = celleb_pci_probe_mode,
- .pci_setup_phb = celleb_setup_phb,
- .init_IRQ = celleb_init_IRQ_native,
-};
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 4c11421847be..3af8324c122e 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
void iic_setup_cpu(void)
{
- out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
+ out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
}
u8 iic_get_target_id(int cpu)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index c7c8720aa39f..21b502398bf3 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -39,6 +39,7 @@
#include <asm/firmware.h>
#include <asm/cell-regs.h>
+#include "cell.h"
#include "interrupt.h"
/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
@@ -197,7 +198,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
+ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
mb();
@@ -857,7 +858,7 @@ static int __init cell_iommu_init_disabled(void)
cell_dma_direct_offset += base;
if (cell_dma_direct_offset != 0)
- ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
printk("iommu: disabled, direct DMA offset is 0x%lx\n",
cell_dma_direct_offset);
@@ -1197,8 +1198,8 @@ static int __init cell_iommu_init(void)
if (cell_iommu_init_disabled() == 0)
goto bail;
- /* Setup various ppc_md. callbacks */
- ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ /* Setup various callbacks */
+ cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
ppc_md.tce_build = tce_build_cell;
ppc_md.tce_free = tce_free_cell;
@@ -1234,5 +1235,3 @@ static int __init cell_iommu_init(void)
return 0;
}
machine_arch_initcall(cell, cell_iommu_init);
-machine_arch_initcall(celleb_native, cell_iommu_init);
-
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index d62aa982d530..36cff28d0293 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -54,6 +54,7 @@
#include <asm/cell-regs.h>
#include <asm/io-workarounds.h>
+#include "cell.h"
#include "interrupt.h"
#include "pervasive.h"
#include "ras.h"
@@ -126,6 +127,8 @@ static int cell_setup_phb(struct pci_controller *phb)
if (rc)
return rc;
+ phb->controller_ops = cell_pci_controller_ops;
+
np = phb->dn;
model = of_get_property(np, "model", NULL);
if (model == NULL || strcmp(np->name, "pci"))
@@ -279,3 +282,5 @@ define_machine(cell) {
.init_IRQ = cell_init_irq,
.pci_setup_phb = cell_setup_phb,
};
+
+struct pci_controller_ops cell_pci_controller_ops;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index b64e7ead752f..895560f4be69 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -102,13 +102,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
return 1;
}
-static int __init smp_iic_probe(void)
-{
- iic_request_IPIs();
-
- return num_possible_cpus();
-}
-
static void smp_cell_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
@@ -139,7 +132,7 @@ static int smp_cell_kick_cpu(int nr)
static struct smp_ops_t bpa_iic_smp_ops = {
.message_pass = iic_message_pass,
- .probe = smp_iic_probe,
+ .probe = iic_request_IPIs,
.kick_cpu = smp_cell_kick_cpu,
.setup_cpu = smp_cell_setup_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index b0ec78e8ad68..a494028b2cdf 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -39,6 +39,7 @@ static void *spu_syscall_table[] = {
#define PPC_SYS(func) sys_ni_syscall,
#define OLDSYS(func) sys_ni_syscall,
#define SYS32ONLY(func) sys_ni_syscall,
+#define PPC64ONLY(func) sys_ni_syscall,
#define SYSX(f, f3264, f32) sys_ni_syscall,
#define SYSCALL_SPU(func) sys_##func,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1a3429e1ccb5..1ba6307be4db 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -111,7 +111,7 @@ out:
static int
spufs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if ((attr->ia_valid & ATTR_SIZE) &&
(attr->ia_size != inode->i_size))
@@ -163,14 +163,14 @@ static void spufs_prune_dir(struct dentry *dir)
{
struct dentry *dentry, *tmp;
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock(&d_inode(dir)->i_mutex);
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
spin_lock(&dentry->d_lock);
- if (!(d_unhashed(dentry)) && dentry->d_inode) {
+ if (!(d_unhashed(dentry)) && d_really_is_positive(dentry)) {
dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- simple_unlink(dir->d_inode, dentry);
+ simple_unlink(d_inode(dir), dentry);
/* XXX: what was dcache_lock protecting here? Other
* filesystems (IB, configfs) release dcache_lock
* before unlink */
@@ -180,7 +180,7 @@ static void spufs_prune_dir(struct dentry *dir)
}
}
shrink_dcache_parent(dir);
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
}
/* Caller must hold parent->i_mutex */
@@ -192,7 +192,7 @@ static int spufs_rmdir(struct inode *parent, struct dentry *dir)
d_drop(dir);
res = simple_rmdir(parent, dir);
/* We have to give up the mm_struct */
- spu_forget(SPUFS_I(dir->d_inode)->i_ctx);
+ spu_forget(SPUFS_I(d_inode(dir))->i_ctx);
return res;
}
@@ -222,8 +222,8 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
int ret;
dir = file->f_path.dentry;
- parent = dir->d_parent->d_inode;
- ctx = SPUFS_I(dir->d_inode)->i_ctx;
+ parent = d_inode(dir->d_parent);
+ ctx = SPUFS_I(d_inode(dir))->i_ctx;
mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
ret = spufs_rmdir(parent, dir);
@@ -460,7 +460,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
goto out_aff_unlock;
if (affinity) {
- spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx,
+ spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
neighbor);
if (neighbor)
put_spu_context(neighbor);
@@ -504,7 +504,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
d_instantiate(dentry, inode);
inc_nlink(dir);
- inc_nlink(dentry->d_inode);
+ inc_nlink(d_inode(dentry));
return ret;
out_iput:
@@ -561,7 +561,7 @@ static struct file_system_type spufs_type;
long spufs_create(struct path *path, struct dentry *dentry,
unsigned int flags, umode_t mode, struct file *filp)
{
- struct inode *dir = path->dentry->d_inode;
+ struct inode *dir = d_inode(path->dentry);
int ret;
/* check if we are on spufs */
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 860a59eb8ea2..15ebc4e8a151 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -253,7 +253,7 @@ static void briq_restart(char *cmd)
* But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
* the the built-in serial node. Instead, a /failsafe node is created.
*/
-static void chrp_init_early(void)
+static __init void chrp_init_early(void)
{
struct device_node *node;
const char *property;
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
index c6911ddc479f..eecfa182b06e 100644
--- a/arch/powerpc/platforms/maple/maple.h
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -10,3 +10,5 @@ extern void maple_calibrate_decr(void);
extern void maple_pci_init(void);
extern void maple_pci_irq_fixup(struct pci_dev *dev);
extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
+
+extern struct pci_controller_ops maple_pci_controller_ops;
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index d3a13067ec42..a923230e575b 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -510,6 +510,7 @@ static int __init maple_add_bridge(struct device_node *dev)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
+ hose->controller_ops = maple_pci_controller_ops;
disp_name = NULL;
if (of_device_is_compatible(dev, "u3-agp")) {
@@ -660,3 +661,6 @@ static void quirk_ipr_msi(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
quirk_ipr_msi);
+
+struct pci_controller_ops maple_pci_controller_ops = {
+};
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 56b85cd61aaf..a837188544c8 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -203,7 +203,7 @@ static void __init maple_init_early(void)
{
DBG(" -> maple_init_early\n");
- iommu_init_early_dart();
+ iommu_init_early_dart(&maple_pci_controller_ops);
DBG(" <- maple_init_early\n");
}
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 2e576f2ae442..b8f567b2ea19 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -27,6 +27,8 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include "pasemi.h"
+
#define IOBMAP_PAGE_SHIFT 12
#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
@@ -248,8 +250,8 @@ void __init iommu_init_early_pasemi(void)
iob_init(NULL);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pasemi;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pasemi;
+ pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
+ pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
ppc_md.tce_build = iobmap_build;
ppc_md.tce_free = iobmap_free;
set_pci_dma_ops(&dma_iommu_ops);
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index ea65bf0eb897..11f230a48227 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -30,5 +30,6 @@ static inline void restore_astate(int cpu)
}
#endif
+extern struct pci_controller_ops pasemi_pci_controller_ops;
#endif /* _PASEMI_PASEMI_H */
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index aa862713258c..f3a68a0fef23 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -31,6 +31,8 @@
#include <asm/ppc-pci.h>
+#include "pasemi.h"
+
#define PA_PXP_CFA(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
static inline int pa_pxp_offset_valid(u8 bus, u8 devfn, int offset)
@@ -199,6 +201,7 @@ static int __init pas_add_bridge(struct device_node *dev)
hose->first_busno = 0;
hose->last_busno = 0xff;
+ hose->controller_ops = pasemi_pci_controller_ops;
setup_pa_pxp(hose);
@@ -239,3 +242,5 @@ void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
return (void __iomem *)pa_pxp_cfg_addr(hose, dev->bus->number, dev->devfn, offset);
}
+
+struct pci_controller_ops pasemi_pci_controller_ops;
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 3e91ef538114..76f5013c35e5 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -246,7 +246,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base,
DBG(" detected display ! adding properties names !\n");
bootx_dt_add_string("linux,boot-display", mem_end);
bootx_dt_add_string("linux,opened", mem_end);
- strncpy(bootx_disp_path, namep, 255);
+ strlcpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
}
/* get and store all property names */
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index f4071a67ad00..59ab16fa600f 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -27,6 +27,8 @@
#include <asm/grackle.h>
#include <asm/ppc-pci.h>
+#include "pmac.h"
+
#undef DEBUG
#ifdef DEBUG
@@ -798,6 +800,7 @@ static int __init pmac_add_bridge(struct device_node *dev)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
+ hose->controller_ops = pmac_pci_controller_ops;
disp_name = NULL;
@@ -942,7 +945,7 @@ void __init pmac_pci_init(void)
}
#ifdef CONFIG_PPC32
-int pmac_pci_enable_device_hook(struct pci_dev *dev)
+static bool pmac_pci_enable_device_hook(struct pci_dev *dev)
{
struct device_node* node;
int updatecfg = 0;
@@ -958,11 +961,11 @@ int pmac_pci_enable_device_hook(struct pci_dev *dev)
&& !node) {
printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
pci_name(dev));
- return -EINVAL;
+ return false;
}
if (!node)
- return 0;
+ return true;
uninorth_child = node->parent &&
of_device_is_compatible(node->parent, "uni-north");
@@ -1003,7 +1006,7 @@ int pmac_pci_enable_device_hook(struct pci_dev *dev)
L1_CACHE_BYTES >> 2);
}
- return 0;
+ return true;
}
void pmac_pci_fixup_ohci(struct pci_dev *dev)
@@ -1223,3 +1226,30 @@ static void fixup_u4_pcie(struct pci_dev* dev)
pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie);
+
+#ifdef CONFIG_PPC64
+static int pmac_pci_probe_mode(struct pci_bus *bus)
+{
+ struct device_node *node = pci_bus_to_OF_node(bus);
+
+ /* We need to use normal PCI probing for the AGP bus,
+ * since the device for the AGP bridge isn't in the tree.
+ * Same for the PCIe host on U4 and the HT host bridge.
+ */
+ if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
+ of_device_is_compatible(node, "u4-pcie") ||
+ of_device_is_compatible(node, "u3-ht")))
+ return PCI_PROBE_NORMAL;
+ return PCI_PROBE_DEVTREE;
+}
+#endif /* CONFIG_PPC64 */
+
+struct pci_controller_ops pmac_pci_controller_ops = {
+#ifdef CONFIG_PPC64
+ .probe_mode = pmac_pci_probe_mode,
+#endif
+#ifdef CONFIG_PPC32
+ .enable_device_hook = pmac_pci_enable_device_hook,
+#endif
+};
+
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 4c24bf60d39d..59cfc9d63c2d 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -321,6 +321,9 @@ static void __init pmac_pic_probe_oldstyle(void)
max_irqs = max_real_irqs = 64;
/* We might have a second cascaded heathrow */
+
+ /* Compensate for of_node_put() in of_find_node_by_name() */
+ of_node_get(master);
slave = of_find_node_by_name(master, "mac-io");
/* Check ordering of master & slave */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 8327cce2bdb0..e7f8163d6769 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -25,7 +25,6 @@ extern void pmac_pci_init(void);
extern void pmac_nvram_update(void);
extern unsigned char pmac_nvram_read_byte(int addr);
extern void pmac_nvram_write_byte(int addr, unsigned char val);
-extern int pmac_pci_enable_device_hook(struct pci_dev *dev);
extern void pmac_pcibios_after_init(void);
extern int of_show_percpuinfo(struct seq_file *m, int i);
@@ -39,4 +38,6 @@ extern void low_cpu_die(void) __attribute__((noreturn));
extern int pmac_nvram_init(void);
extern void pmac_pic_init(void);
+extern struct pci_controller_ops pmac_pci_controller_ops;
+
#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 713d36d45d1d..8dd78f4e1af4 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -473,7 +473,7 @@ static void __init pmac_init_early(void)
udbg_adb_init(!!strstr(boot_command_line, "btextdbg"));
#ifdef CONFIG_PPC64
- iommu_init_early_dart();
+ iommu_init_early_dart(&pmac_pci_controller_ops);
#endif
/* SMP Init has to be done early as we need to patch up
@@ -637,24 +637,6 @@ static int __init pmac_probe(void)
return 1;
}
-#ifdef CONFIG_PPC64
-/* Move that to pci.c */
-static int pmac_pci_probe_mode(struct pci_bus *bus)
-{
- struct device_node *node = pci_bus_to_OF_node(bus);
-
- /* We need to use normal PCI probing for the AGP bus,
- * since the device for the AGP bridge isn't in the tree.
- * Same for the PCIe host on U4 and the HT host bridge.
- */
- if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
- of_device_is_compatible(node, "u4-pcie") ||
- of_device_is_compatible(node, "u3-ht")))
- return PCI_PROBE_NORMAL;
- return PCI_PROBE_DEVTREE;
-}
-#endif /* CONFIG_PPC64 */
-
define_machine(powermac) {
.name = "PowerMac",
.probe = pmac_probe,
@@ -674,12 +656,10 @@ define_machine(powermac) {
.feature_call = pmac_do_feature_call,
.progress = udbg_progress,
#ifdef CONFIG_PPC64
- .pci_probe_mode = pmac_pci_probe_mode,
.power_save = power4_idle,
.enable_pmcs = power4_enable_pmcs,
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
- .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
.pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index af094ae03dbb..28a147ca32ba 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -268,14 +268,14 @@ static void __init psurge_quad_init(void)
mdelay(33);
}
-static int __init smp_psurge_probe(void)
+static void __init smp_psurge_probe(void)
{
int i, ncpus;
struct device_node *dn;
/* We don't do SMP on the PPC601 -- paulus */
if (PVR_VER(mfspr(SPRN_PVR)) == 1)
- return 1;
+ return;
/*
* The powersurge cpu board can be used in the generation
@@ -289,7 +289,7 @@ static int __init smp_psurge_probe(void)
*/
dn = of_find_node_by_name(NULL, "hammerhead");
if (dn == NULL)
- return 1;
+ return;
of_node_put(dn);
hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
@@ -310,13 +310,13 @@ static int __init smp_psurge_probe(void)
/* not a dual-cpu card */
iounmap(hhead_base);
psurge_type = PSURGE_NONE;
- return 1;
+ return;
}
ncpus = 2;
}
if (psurge_secondary_ipi_init())
- return 1;
+ return;
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
@@ -332,8 +332,6 @@ static int __init smp_psurge_probe(void)
set_cpu_present(i, true);
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
-
- return ncpus;
}
static int __init smp_psurge_kick_cpu(int nr)
@@ -766,7 +764,7 @@ static void __init smp_core99_setup(int ncpus)
powersave_nap = 0;
}
-static int __init smp_core99_probe(void)
+static void __init smp_core99_probe(void)
{
struct device_node *cpus;
int ncpus = 0;
@@ -781,7 +779,7 @@ static int __init smp_core99_probe(void)
/* Nothing more to do if less than 2 of them */
if (ncpus <= 1)
- return 1;
+ return;
/* We need to perform some early initialisations before we can start
* setting up SMP as we are running before initcalls
@@ -797,8 +795,6 @@ static int __init smp_core99_probe(void)
/* Collect l2cr and l3cr values from CPU 0 */
core99_init_caches(0);
-
- return ncpus;
}
static int smp_core99_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 45a8ed0585cd..4b044d8cb49a 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -19,10 +19,3 @@ config PPC_POWERNV
select CPU_FREQ_GOV_CONSERVATIVE
select PPC_DOORBELL
default y
-
-config PPC_POWERNV_RTAS
- depends on PPC_POWERNV
- bool "Support for RTAS based PowerNV platforms such as BML"
- default y
- select PPC_ICS_RTAS
- select PPC_RTAS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 6f3c5d33c3af..33e44f37212f 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -5,7 +5,7 @@ obj-y += opal-msglog.o opal-hmi.o opal-power.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
-obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
+obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
deleted file mode 100644
index 2809c9895288..000000000000
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ /dev/null
@@ -1,1149 +0,0 @@
-/*
- * The file intends to implement the functions needed by EEH, which is
- * built on IODA compliant chip. Actually, lots of functions related
- * to EEH would be built based on the OPAL APIs.
- *
- * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/msi.h>
-#include <linux/notifier.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-
-#include <asm/eeh.h>
-#include <asm/eeh_event.h>
-#include <asm/io.h>
-#include <asm/iommu.h>
-#include <asm/msi_bitmap.h>
-#include <asm/opal.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-#include <asm/tce.h>
-
-#include "powernv.h"
-#include "pci.h"
-
-static int ioda_eeh_nb_init = 0;
-
-static int ioda_eeh_event(struct notifier_block *nb,
- unsigned long events, void *change)
-{
- uint64_t changed_evts = (uint64_t)change;
-
- /*
- * We simply send special EEH event if EEH has
- * been enabled, or clear pending events in
- * case that we enable EEH soon
- */
- if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
- !(events & OPAL_EVENT_PCI_ERROR))
- return 0;
-
- if (eeh_enabled())
- eeh_send_failure_event(NULL);
- else
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
-
- return 0;
-}
-
-static struct notifier_block ioda_eeh_nb = {
- .notifier_call = ioda_eeh_event,
- .next = NULL,
- .priority = 0
-};
-
-#ifdef CONFIG_DEBUG_FS
-static ssize_t ioda_eeh_ei_write(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct pci_controller *hose = filp->private_data;
- struct pnv_phb *phb = hose->private_data;
- struct eeh_dev *edev;
- struct eeh_pe *pe;
- int pe_no, type, func;
- unsigned long addr, mask;
- char buf[50];
- int ret;
-
- if (!phb->eeh_ops || !phb->eeh_ops->err_inject)
- return -ENXIO;
-
- ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
- if (!ret)
- return -EFAULT;
-
- /* Retrieve parameters */
- ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
- &pe_no, &type, &func, &addr, &mask);
- if (ret != 5)
- return -EINVAL;
-
- /* Retrieve PE */
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return -ENOMEM;
- edev->phb = hose;
- edev->pe_config_addr = pe_no;
- pe = eeh_pe_get(edev);
- kfree(edev);
- if (!pe)
- return -ENODEV;
-
- /* Do error injection */
- ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
- return ret < 0 ? ret : count;
-}
-
-static const struct file_operations ioda_eeh_ei_fops = {
- .open = simple_open,
- .llseek = no_llseek,
- .write = ioda_eeh_ei_write,
-};
-
-static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
-{
- struct pci_controller *hose = data;
- struct pnv_phb *phb = hose->private_data;
-
- out_be64(phb->regs + offset, val);
- return 0;
-}
-
-static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
-{
- struct pci_controller *hose = data;
- struct pnv_phb *phb = hose->private_data;
-
- *val = in_be64(phb->regs + offset);
- return 0;
-}
-
-static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
-{
- return ioda_eeh_dbgfs_set(data, 0xD10, val);
-}
-
-static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
-{
- return ioda_eeh_dbgfs_get(data, 0xD10, val);
-}
-
-static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
-{
- return ioda_eeh_dbgfs_set(data, 0xD90, val);
-}
-
-static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
-{
- return ioda_eeh_dbgfs_get(data, 0xD90, val);
-}
-
-static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
-{
- return ioda_eeh_dbgfs_set(data, 0xE10, val);
-}
-
-static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
-{
- return ioda_eeh_dbgfs_get(data, 0xE10, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
- ioda_eeh_outb_dbgfs_set, "0x%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
- ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
- ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
-#endif /* CONFIG_DEBUG_FS */
-
-
-/**
- * ioda_eeh_post_init - Chip dependent post initialization
- * @hose: PCI controller
- *
- * The function will be called after eeh PEs and devices
- * have been built. That means the EEH is ready to supply
- * service with I/O cache.
- */
-static int ioda_eeh_post_init(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
- int ret;
-
- /* Register OPAL event notifier */
- if (!ioda_eeh_nb_init) {
- ret = opal_notifier_register(&ioda_eeh_nb);
- if (ret) {
- pr_err("%s: Can't register OPAL event notifier (%d)\n",
- __func__, ret);
- return ret;
- }
-
- ioda_eeh_nb_init = 1;
- }
-
-#ifdef CONFIG_DEBUG_FS
- if (!phb->has_dbgfs && phb->dbgfs) {
- phb->has_dbgfs = 1;
-
- debugfs_create_file("err_injct", 0200,
- phb->dbgfs, hose,
- &ioda_eeh_ei_fops);
-
- debugfs_create_file("err_injct_outbound", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_outb_dbgfs_ops);
- debugfs_create_file("err_injct_inboundA", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_inbA_dbgfs_ops);
- debugfs_create_file("err_injct_inboundB", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_inbB_dbgfs_ops);
- }
-#endif
-
- /* If EEH is enabled, we're going to rely on that.
- * Otherwise, we restore to conventional mechanism
- * to clear frozen PE during PCI config access.
- */
- if (eeh_enabled())
- phb->flags |= PNV_PHB_FLAG_EEH;
- else
- phb->flags &= ~PNV_PHB_FLAG_EEH;
-
- return 0;
-}
-
-/**
- * ioda_eeh_set_option - Set EEH operation or I/O setting
- * @pe: EEH PE
- * @option: options
- *
- * Enable or disable EEH option for the indicated PE. The
- * function also can be used to enable I/O or DMA for the
- * PE.
- */
-static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
-{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- bool freeze_pe = false;
- int enable, ret = 0;
- s64 rc;
-
- /* Check on PE number */
- if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
- pr_err("%s: PE address %x out of range [0, %x] "
- "on PHB#%x\n",
- __func__, pe->addr, phb->ioda.total_pe,
- hose->global_number);
- return -EINVAL;
- }
-
- switch (option) {
- case EEH_OPT_DISABLE:
- return -EPERM;
- case EEH_OPT_ENABLE:
- return 0;
- case EEH_OPT_THAW_MMIO:
- enable = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
- break;
- case EEH_OPT_THAW_DMA:
- enable = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
- break;
- case EEH_OPT_FREEZE_PE:
- freeze_pe = true;
- enable = OPAL_EEH_ACTION_SET_FREEZE_ALL;
- break;
- default:
- pr_warn("%s: Invalid option %d\n",
- __func__, option);
- return -EINVAL;
- }
-
- /* If PHB supports compound PE, to handle it */
- if (freeze_pe) {
- if (phb->freeze_pe) {
- phb->freeze_pe(phb, pe->addr);
- } else {
- rc = opal_pci_eeh_freeze_set(phb->opal_id,
- pe->addr,
- enable);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld freezing "
- "PHB#%x-PE#%x\n",
- __func__, rc,
- phb->hose->global_number, pe->addr);
- ret = -EIO;
- }
- }
- } else {
- if (phb->unfreeze_pe) {
- ret = phb->unfreeze_pe(phb, pe->addr, enable);
- } else {
- rc = opal_pci_eeh_freeze_clear(phb->opal_id,
- pe->addr,
- enable);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld enable %d "
- "for PHB#%x-PE#%x\n",
- __func__, rc, option,
- phb->hose->global_number, pe->addr);
- ret = -EIO;
- }
- }
- }
-
- return ret;
-}
-
-static void ioda_eeh_phb_diag(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
- long rc;
-
- rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
- PNV_PCI_DIAG_BUF_SIZE);
- if (rc != OPAL_SUCCESS)
- pr_warn("%s: Failed to get diag-data for PHB#%x (%ld)\n",
- __func__, pe->phb->global_number, rc);
-}
-
-static int ioda_eeh_get_phb_state(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
- s64 rc;
- int result = 0;
-
- rc = opal_pci_eeh_freeze_status(phb->opal_id,
- pe->addr,
- &fstate,
- &pcierr,
- NULL);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld getting PHB#%x state\n",
- __func__, rc, phb->hose->global_number);
- return EEH_STATE_NOT_SUPPORT;
- }
-
- /*
- * Check PHB state. If the PHB is frozen for the
- * first time, to dump the PHB diag-data.
- */
- if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE |
- EEH_STATE_MMIO_ENABLED |
- EEH_STATE_DMA_ENABLED);
- } else if (!(pe->state & EEH_PE_ISOLATED)) {
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
- ioda_eeh_phb_diag(pe);
-
- if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
- }
-
- return result;
-}
-
-static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
- s64 rc;
- int result;
-
- /*
- * We don't clobber hardware frozen state until PE
- * reset is completed. In order to keep EEH core
- * moving forward, we have to return operational
- * state during PE reset.
- */
- if (pe->state & EEH_PE_RESET) {
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE |
- EEH_STATE_MMIO_ENABLED |
- EEH_STATE_DMA_ENABLED);
- return result;
- }
-
- /*
- * Fetch PE state from hardware. If the PHB
- * supports compound PE, let it handle that.
- */
- if (phb->get_pe_state) {
- fstate = phb->get_pe_state(phb, pe->addr);
- } else {
- rc = opal_pci_eeh_freeze_status(phb->opal_id,
- pe->addr,
- &fstate,
- &pcierr,
- NULL);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
- __func__, rc, phb->hose->global_number, pe->addr);
- return EEH_STATE_NOT_SUPPORT;
- }
- }
-
- /* Figure out state */
- switch (fstate) {
- case OPAL_EEH_STOPPED_NOT_FROZEN:
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE |
- EEH_STATE_MMIO_ENABLED |
- EEH_STATE_DMA_ENABLED);
- break;
- case OPAL_EEH_STOPPED_MMIO_FREEZE:
- result = (EEH_STATE_DMA_ACTIVE |
- EEH_STATE_DMA_ENABLED);
- break;
- case OPAL_EEH_STOPPED_DMA_FREEZE:
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_MMIO_ENABLED);
- break;
- case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
- result = 0;
- break;
- case OPAL_EEH_STOPPED_RESET:
- result = EEH_STATE_RESET_ACTIVE;
- break;
- case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
- result = EEH_STATE_UNAVAILABLE;
- break;
- case OPAL_EEH_STOPPED_PERM_UNAVAIL:
- result = EEH_STATE_NOT_SUPPORT;
- break;
- default:
- result = EEH_STATE_NOT_SUPPORT;
- pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
- __func__, phb->hose->global_number,
- pe->addr, fstate);
- }
-
- /*
- * If PHB supports compound PE, to freeze all
- * slave PEs for consistency.
- *
- * If the PE is switching to frozen state for the
- * first time, to dump the PHB diag-data.
- */
- if (!(result & EEH_STATE_NOT_SUPPORT) &&
- !(result & EEH_STATE_UNAVAILABLE) &&
- !(result & EEH_STATE_MMIO_ACTIVE) &&
- !(result & EEH_STATE_DMA_ACTIVE) &&
- !(pe->state & EEH_PE_ISOLATED)) {
- if (phb->freeze_pe)
- phb->freeze_pe(phb, pe->addr);
-
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
- ioda_eeh_phb_diag(pe);
-
- if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
- }
-
- return result;
-}
-
-/**
- * ioda_eeh_get_state - Retrieve the state of PE
- * @pe: EEH PE
- *
- * The PE's state should be retrieved from the PEEV, PEST
- * IODA tables. Since the OPAL has exported the function
- * to do it, it'd better to use that.
- */
-static int ioda_eeh_get_state(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
-
- /* Sanity check on PE number. PHB PE should have 0 */
- if (pe->addr < 0 ||
- pe->addr >= phb->ioda.total_pe) {
- pr_warn("%s: PHB#%x-PE#%x out of range [0, %x]\n",
- __func__, phb->hose->global_number,
- pe->addr, phb->ioda.total_pe);
- return EEH_STATE_NOT_SUPPORT;
- }
-
- if (pe->type & EEH_PE_PHB)
- return ioda_eeh_get_phb_state(pe);
-
- return ioda_eeh_get_pe_state(pe);
-}
-
-static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
-{
- s64 rc = OPAL_HARDWARE;
-
- while (1) {
- rc = opal_pci_poll(phb->opal_id);
- if (rc <= 0)
- break;
-
- if (system_state < SYSTEM_RUNNING)
- udelay(1000 * rc);
- else
- msleep(rc);
- }
-
- return rc;
-}
-
-int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
-{
- struct pnv_phb *phb = hose->private_data;
- s64 rc = OPAL_HARDWARE;
-
- pr_debug("%s: Reset PHB#%x, option=%d\n",
- __func__, hose->global_number, option);
-
- /* Issue PHB complete reset request */
- if (option == EEH_RESET_FUNDAMENTAL ||
- option == EEH_RESET_HOT)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PHB_COMPLETE,
- OPAL_ASSERT_RESET);
- else if (option == EEH_RESET_DEACTIVATE)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PHB_COMPLETE,
- OPAL_DEASSERT_RESET);
- if (rc < 0)
- goto out;
-
- /*
- * Poll state of the PHB until the request is done
- * successfully. The PHB reset is usually PHB complete
- * reset followed by hot reset on root bus. So we also
- * need the PCI bus settlement delay.
- */
- rc = ioda_eeh_phb_poll(phb);
- if (option == EEH_RESET_DEACTIVATE) {
- if (system_state < SYSTEM_RUNNING)
- udelay(1000 * EEH_PE_RST_SETTLE_TIME);
- else
- msleep(EEH_PE_RST_SETTLE_TIME);
- }
-out:
- if (rc != OPAL_SUCCESS)
- return -EIO;
-
- return 0;
-}
-
-static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
-{
- struct pnv_phb *phb = hose->private_data;
- s64 rc = OPAL_SUCCESS;
-
- pr_debug("%s: Reset PHB#%x, option=%d\n",
- __func__, hose->global_number, option);
-
- /*
- * During the reset deassert time, we needn't care
- * the reset scope because the firmware does nothing
- * for fundamental or hot reset during deassert phase.
- */
- if (option == EEH_RESET_FUNDAMENTAL)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PCI_FUNDAMENTAL,
- OPAL_ASSERT_RESET);
- else if (option == EEH_RESET_HOT)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PCI_HOT,
- OPAL_ASSERT_RESET);
- else if (option == EEH_RESET_DEACTIVATE)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PCI_HOT,
- OPAL_DEASSERT_RESET);
- if (rc < 0)
- goto out;
-
- /* Poll state of the PHB until the request is done */
- rc = ioda_eeh_phb_poll(phb);
- if (option == EEH_RESET_DEACTIVATE)
- msleep(EEH_PE_RST_SETTLE_TIME);
-out:
- if (rc != OPAL_SUCCESS)
- return -EIO;
-
- return 0;
-}
-
-static int ioda_eeh_bridge_reset(struct pci_dev *dev, int option)
-
-{
- struct device_node *dn = pci_device_to_OF_node(dev);
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
- int aer = edev ? edev->aer_cap : 0;
- u32 ctrl;
-
- pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
- __func__, pci_domain_nr(dev->bus),
- dev->bus->number, option);
-
- switch (option) {
- case EEH_RESET_FUNDAMENTAL:
- case EEH_RESET_HOT:
- /* Don't report linkDown event */
- if (aer) {
- eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, &ctrl);
- ctrl |= PCI_ERR_UNC_SURPDN;
- eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, ctrl);
- }
-
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
- ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
- msleep(EEH_PE_RST_HOLD_TIME);
-
- break;
- case EEH_RESET_DEACTIVATE:
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
- ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
- msleep(EEH_PE_RST_SETTLE_TIME);
-
- /* Continue reporting linkDown event */
- if (aer) {
- eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, &ctrl);
- ctrl &= ~PCI_ERR_UNC_SURPDN;
- eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, ctrl);
- }
-
- break;
- }
-
- return 0;
-}
-
-void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
-{
- struct pci_controller *hose;
-
- if (pci_is_root_bus(dev->bus)) {
- hose = pci_bus_to_host(dev->bus);
- ioda_eeh_root_reset(hose, EEH_RESET_HOT);
- ioda_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
- } else {
- ioda_eeh_bridge_reset(dev, EEH_RESET_HOT);
- ioda_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
- }
-}
-
-/**
- * ioda_eeh_reset - Reset the indicated PE
- * @pe: EEH PE
- * @option: reset option
- *
- * Do reset on the indicated PE. For PCI bus sensitive PE,
- * we need to reset the parent p2p bridge. The PHB has to
- * be reinitialized if the p2p bridge is root bridge. For
- * PCI device sensitive PE, we will try to reset the device
- * through FLR. For now, we don't have OPAL APIs to do HARD
- * reset yet, so all reset would be SOFT (HOT) reset.
- */
-static int ioda_eeh_reset(struct eeh_pe *pe, int option)
-{
- struct pci_controller *hose = pe->phb;
- struct pci_bus *bus;
- int ret;
-
- /*
- * For PHB reset, we always have complete reset. For those PEs whose
- * primary bus derived from root complex (root bus) or root port
- * (usually bus#1), we apply hot or fundamental reset on the root port.
- * For other PEs, we always have hot reset on the PE primary bus.
- *
- * Here, we have different design to pHyp, which always clear the
- * frozen state during PE reset. However, the good idea here from
- * benh is to keep frozen state before we get PE reset done completely
- * (until BAR restore). With the frozen state, HW drops illegal IO
- * or MMIO access, which can incur recrusive frozen PE during PE
- * reset. The side effect is that EEH core has to clear the frozen
- * state explicitly after BAR restore.
- */
- if (pe->type & EEH_PE_PHB) {
- ret = ioda_eeh_phb_reset(hose, option);
- } else {
- struct pnv_phb *phb;
- s64 rc;
-
- /*
- * The frozen PE might be caused by PAPR error injection
- * registers, which are expected to be cleared after hitting
- * frozen PE as stated in the hardware spec. Unfortunately,
- * that's not true on P7IOC. So we have to clear it manually
- * to avoid recursive EEH errors during recovery.
- */
- phb = hose->private_data;
- if (phb->model == PNV_PHB_MODEL_P7IOC &&
- (option == EEH_RESET_HOT ||
- option == EEH_RESET_FUNDAMENTAL)) {
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PHB_ERROR,
- OPAL_ASSERT_RESET);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld clearing "
- "error injection registers\n",
- __func__, rc);
- return -EIO;
- }
- }
-
- bus = eeh_pe_bus_get(pe);
- if (pci_is_root_bus(bus) ||
- pci_is_root_bus(bus->parent))
- ret = ioda_eeh_root_reset(hose, option);
- else
- ret = ioda_eeh_bridge_reset(bus->self, option);
- }
-
- return ret;
-}
-
-/**
- * ioda_eeh_get_log - Retrieve error log
- * @pe: frozen PE
- * @severity: permanent or temporary error
- * @drv_log: device driver log
- * @len: length of device driver log
- *
- * Retrieve error log, which contains log from device driver
- * and firmware.
- */
-static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len)
-{
- if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
-
- return 0;
-}
-
-/**
- * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
- * @pe: EEH PE
- *
- * For particular PE, it might have included PCI bridges. In order
- * to make the PE work properly, those PCI bridges should be configured
- * correctly. However, we need do nothing on P7IOC since the reset
- * function will do everything that should be covered by the function.
- */
-static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
-{
- return 0;
-}
-
-static int ioda_eeh_err_inject(struct eeh_pe *pe, int type, int func,
- unsigned long addr, unsigned long mask)
-{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- s64 ret;
-
- /* Sanity check on error type */
- if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
- type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
- pr_warn("%s: Invalid error type %d\n",
- __func__, type);
- return -ERANGE;
- }
-
- if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
- func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
- pr_warn("%s: Invalid error function %d\n",
- __func__, func);
- return -ERANGE;
- }
-
- /* Firmware supports error injection ? */
- if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
- pr_warn("%s: Firmware doesn't support error injection\n",
- __func__);
- return -ENXIO;
- }
-
- /* Do error injection */
- ret = opal_pci_err_inject(phb->opal_id, pe->addr,
- type, func, addr, mask);
- if (ret != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld injecting error "
- "%d-%d to PHB#%x-PE#%x\n",
- __func__, ret, type, func,
- hose->global_number, pe->addr);
- return -EIO;
- }
-
- return 0;
-}
-
-static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
-{
- /* GEM */
- if (data->gemXfir || data->gemRfir ||
- data->gemRirqfir || data->gemMask || data->gemRwof)
- pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
- be64_to_cpu(data->gemXfir),
- be64_to_cpu(data->gemRfir),
- be64_to_cpu(data->gemRirqfir),
- be64_to_cpu(data->gemMask),
- be64_to_cpu(data->gemRwof));
-
- /* LEM */
- if (data->lemFir || data->lemErrMask ||
- data->lemAction0 || data->lemAction1 || data->lemWof)
- pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
- be64_to_cpu(data->lemFir),
- be64_to_cpu(data->lemErrMask),
- be64_to_cpu(data->lemAction0),
- be64_to_cpu(data->lemAction1),
- be64_to_cpu(data->lemWof));
-}
-
-static void ioda_eeh_hub_diag(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
- struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
- long rc;
-
- rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
- __func__, phb->hub_id, rc);
- return;
- }
-
- switch (data->type) {
- case OPAL_P7IOC_DIAG_TYPE_RGC:
- pr_info("P7IOC diag-data for RGC\n\n");
- ioda_eeh_hub_diag_common(data);
- if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
- pr_info(" RGC: %016llx %016llx\n",
- be64_to_cpu(data->rgc.rgcStatus),
- be64_to_cpu(data->rgc.rgcLdcp));
- break;
- case OPAL_P7IOC_DIAG_TYPE_BI:
- pr_info("P7IOC diag-data for BI %s\n\n",
- data->bi.biDownbound ? "Downbound" : "Upbound");
- ioda_eeh_hub_diag_common(data);
- if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
- data->bi.biLdcp2 || data->bi.biFenceStatus)
- pr_info(" BI: %016llx %016llx %016llx %016llx\n",
- be64_to_cpu(data->bi.biLdcp0),
- be64_to_cpu(data->bi.biLdcp1),
- be64_to_cpu(data->bi.biLdcp2),
- be64_to_cpu(data->bi.biFenceStatus));
- break;
- case OPAL_P7IOC_DIAG_TYPE_CI:
- pr_info("P7IOC diag-data for CI Port %d\n\n",
- data->ci.ciPort);
- ioda_eeh_hub_diag_common(data);
- if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
- pr_info(" CI: %016llx %016llx\n",
- be64_to_cpu(data->ci.ciPortStatus),
- be64_to_cpu(data->ci.ciPortLdcp));
- break;
- case OPAL_P7IOC_DIAG_TYPE_MISC:
- pr_info("P7IOC diag-data for MISC\n\n");
- ioda_eeh_hub_diag_common(data);
- break;
- case OPAL_P7IOC_DIAG_TYPE_I2C:
- pr_info("P7IOC diag-data for I2C\n\n");
- ioda_eeh_hub_diag_common(data);
- break;
- default:
- pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
- __func__, phb->hub_id, data->type);
- }
-}
-
-static int ioda_eeh_get_pe(struct pci_controller *hose,
- u16 pe_no, struct eeh_pe **pe)
-{
- struct pnv_phb *phb = hose->private_data;
- struct pnv_ioda_pe *pnv_pe;
- struct eeh_pe *dev_pe;
- struct eeh_dev edev;
-
- /*
- * If PHB supports compound PE, to fetch
- * the master PE because slave PE is invisible
- * to EEH core.
- */
- pnv_pe = &phb->ioda.pe_array[pe_no];
- if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
- pnv_pe = pnv_pe->master;
- WARN_ON(!pnv_pe ||
- !(pnv_pe->flags & PNV_IODA_PE_MASTER));
- pe_no = pnv_pe->pe_number;
- }
-
- /* Find the PE according to PE# */
- memset(&edev, 0, sizeof(struct eeh_dev));
- edev.phb = hose;
- edev.pe_config_addr = pe_no;
- dev_pe = eeh_pe_get(&edev);
- if (!dev_pe)
- return -EEXIST;
-
- /* Freeze the (compound) PE */
- *pe = dev_pe;
- if (!(dev_pe->state & EEH_PE_ISOLATED))
- phb->freeze_pe(phb, pe_no);
-
- /*
- * At this point, we're sure the (compound) PE should
- * have been frozen. However, we still need poke until
- * hitting the frozen PE on top level.
- */
- dev_pe = dev_pe->parent;
- while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
- int ret;
- int active_flags = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE);
-
- ret = eeh_ops->get_state(dev_pe, NULL);
- if (ret <= 0 || (ret & active_flags) == active_flags) {
- dev_pe = dev_pe->parent;
- continue;
- }
-
- /* Frozen parent PE */
- *pe = dev_pe;
- if (!(dev_pe->state & EEH_PE_ISOLATED))
- phb->freeze_pe(phb, dev_pe->addr);
-
- /* Next one */
- dev_pe = dev_pe->parent;
- }
-
- return 0;
-}
-
-/**
- * ioda_eeh_next_error - Retrieve next error for EEH core to handle
- * @pe: The affected PE
- *
- * The function is expected to be called by EEH core while it gets
- * special EEH event (without binding PE). The function calls to
- * OPAL APIs for next error to handle. The informational error is
- * handled internally by platform. However, the dead IOC, dead PHB,
- * fenced PHB and frozen PE should be handled by EEH core eventually.
- */
-static int ioda_eeh_next_error(struct eeh_pe **pe)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct eeh_pe *phb_pe, *parent_pe;
- __be64 frozen_pe_no;
- __be16 err_type, severity;
- int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
- long rc;
- int state, ret = EEH_NEXT_ERR_NONE;
-
- /*
- * While running here, it's safe to purge the event queue.
- * And we should keep the cached OPAL notifier event sychronized
- * between the kernel and firmware.
- */
- eeh_remove_event(NULL, false);
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
-
- list_for_each_entry(hose, &hose_list, list_node) {
- /*
- * If the subordinate PCI buses of the PHB has been
- * removed or is exactly under error recovery, we
- * needn't take care of it any more.
- */
- phb = hose->private_data;
- phb_pe = eeh_phb_pe_get(hose);
- if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
- continue;
-
- rc = opal_pci_next_error(phb->opal_id,
- &frozen_pe_no, &err_type, &severity);
-
- /* If OPAL API returns error, we needn't proceed */
- if (rc != OPAL_SUCCESS) {
- pr_devel("%s: Invalid return value on "
- "PHB#%x (0x%lx) from opal_pci_next_error",
- __func__, hose->global_number, rc);
- continue;
- }
-
- /* If the PHB doesn't have error, stop processing */
- if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
- be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
- pr_devel("%s: No error found on PHB#%x\n",
- __func__, hose->global_number);
- continue;
- }
-
- /*
- * Processing the error. We're expecting the error with
- * highest priority reported upon multiple errors on the
- * specific PHB.
- */
- pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
- __func__, be16_to_cpu(err_type), be16_to_cpu(severity),
- be64_to_cpu(frozen_pe_no), hose->global_number);
- switch (be16_to_cpu(err_type)) {
- case OPAL_EEH_IOC_ERROR:
- if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
- pr_err("EEH: dead IOC detected\n");
- ret = EEH_NEXT_ERR_DEAD_IOC;
- } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
- pr_info("EEH: IOC informative error "
- "detected\n");
- ioda_eeh_hub_diag(hose);
- ret = EEH_NEXT_ERR_NONE;
- }
-
- break;
- case OPAL_EEH_PHB_ERROR:
- if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
- *pe = phb_pe;
- pr_err("EEH: dead PHB#%x detected, "
- "location: %s\n",
- hose->global_number,
- eeh_pe_loc_get(phb_pe));
- ret = EEH_NEXT_ERR_DEAD_PHB;
- } else if (be16_to_cpu(severity) ==
- OPAL_EEH_SEV_PHB_FENCED) {
- *pe = phb_pe;
- pr_err("EEH: Fenced PHB#%x detected, "
- "location: %s\n",
- hose->global_number,
- eeh_pe_loc_get(phb_pe));
- ret = EEH_NEXT_ERR_FENCED_PHB;
- } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
- pr_info("EEH: PHB#%x informative error "
- "detected, location: %s\n",
- hose->global_number,
- eeh_pe_loc_get(phb_pe));
- ioda_eeh_phb_diag(phb_pe);
- pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
- ret = EEH_NEXT_ERR_NONE;
- }
-
- break;
- case OPAL_EEH_PE_ERROR:
- /*
- * If we can't find the corresponding PE, we
- * just try to unfreeze.
- */
- if (ioda_eeh_get_pe(hose,
- be64_to_cpu(frozen_pe_no), pe)) {
- /* Try best to clear it */
- pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
- hose->global_number, frozen_pe_no);
- pr_info("EEH: PHB location: %s\n",
- eeh_pe_loc_get(phb_pe));
- opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no,
- OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
- ret = EEH_NEXT_ERR_NONE;
- } else if ((*pe)->state & EEH_PE_ISOLATED ||
- eeh_pe_passed(*pe)) {
- ret = EEH_NEXT_ERR_NONE;
- } else {
- pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
- (*pe)->addr, (*pe)->phb->global_number);
- pr_err("EEH: PE location: %s, PHB location: %s\n",
- eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe));
- ret = EEH_NEXT_ERR_FROZEN_PE;
- }
-
- break;
- default:
- pr_warn("%s: Unexpected error type %d\n",
- __func__, be16_to_cpu(err_type));
- }
-
- /*
- * EEH core will try recover from fenced PHB or
- * frozen PE. In the time for frozen PE, EEH core
- * enable IO path for that before collecting logs,
- * but it ruins the site. So we have to dump the
- * log in advance here.
- */
- if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
- ret == EEH_NEXT_ERR_FENCED_PHB) &&
- !((*pe)->state & EEH_PE_ISOLATED)) {
- eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
- ioda_eeh_phb_diag(*pe);
-
- if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data((*pe)->phb,
- (*pe)->data);
- }
-
- /*
- * We probably have the frozen parent PE out there and
- * we need have to handle frozen parent PE firstly.
- */
- if (ret == EEH_NEXT_ERR_FROZEN_PE) {
- parent_pe = (*pe)->parent;
- while (parent_pe) {
- /* Hit the ceiling ? */
- if (parent_pe->type & EEH_PE_PHB)
- break;
-
- /* Frozen parent PE ? */
- state = ioda_eeh_get_state(parent_pe);
- if (state > 0 &&
- (state & active_flags) != active_flags)
- *pe = parent_pe;
-
- /* Next parent level */
- parent_pe = parent_pe->parent;
- }
-
- /* We possibly migrate to another PE */
- eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
- }
-
- /*
- * If we have no errors on the specific PHB or only
- * informative error there, we continue poking it.
- * Otherwise, we need actions to be taken by upper
- * layer.
- */
- if (ret > EEH_NEXT_ERR_INF)
- break;
- }
-
- return ret;
-}
-
-struct pnv_eeh_ops ioda_eeh_ops = {
- .post_init = ioda_eeh_post_init,
- .set_option = ioda_eeh_set_option,
- .get_state = ioda_eeh_get_state,
- .reset = ioda_eeh_reset,
- .get_log = ioda_eeh_get_log,
- .configure_bridge = ioda_eeh_configure_bridge,
- .err_inject = ioda_eeh_err_inject,
- .next_error = ioda_eeh_next_error
-};
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index e261869adc86..ce738ab3d5a9 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -12,6 +12,7 @@
*/
#include <linux/atomic.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -38,12 +39,14 @@
#include "powernv.h"
#include "pci.h"
+static bool pnv_eeh_nb_init = false;
+
/**
- * powernv_eeh_init - EEH platform dependent initialization
+ * pnv_eeh_init - EEH platform dependent initialization
*
* EEH platform dependent initialization on powernv
*/
-static int powernv_eeh_init(void)
+static int pnv_eeh_init(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
@@ -85,37 +88,280 @@ static int powernv_eeh_init(void)
return 0;
}
+static int pnv_eeh_event(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ uint64_t changed_evts = (uint64_t)change;
+
+ /*
+ * We simply send special EEH event if EEH has
+ * been enabled, or clear pending events in
+ * case that we enable EEH soon
+ */
+ if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
+ !(events & OPAL_EVENT_PCI_ERROR))
+ return 0;
+
+ if (eeh_enabled())
+ eeh_send_failure_event(NULL);
+ else
+ opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
+
+ return 0;
+}
+
+static struct notifier_block pnv_eeh_nb = {
+ .notifier_call = pnv_eeh_event,
+ .next = NULL,
+ .priority = 0
+};
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t pnv_eeh_ei_write(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_controller *hose = filp->private_data;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ int pe_no, type, func;
+ unsigned long addr, mask;
+ char buf[50];
+ int ret;
+
+ if (!eeh_ops || !eeh_ops->err_inject)
+ return -ENXIO;
+
+ /* Copy over argument buffer */
+ ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (!ret)
+ return -EFAULT;
+
+ /* Retrieve parameters */
+ ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
+ &pe_no, &type, &func, &addr, &mask);
+ if (ret != 5)
+ return -EINVAL;
+
+ /* Retrieve PE */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+ edev->phb = hose;
+ edev->pe_config_addr = pe_no;
+ pe = eeh_pe_get(edev);
+ kfree(edev);
+ if (!pe)
+ return -ENODEV;
+
+ /* Do error injection */
+ ret = eeh_ops->err_inject(pe, type, func, addr, mask);
+ return ret < 0 ? ret : count;
+}
+
+static const struct file_operations pnv_eeh_ei_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = pnv_eeh_ei_write,
+};
+
+static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
+{
+ struct pci_controller *hose = data;
+ struct pnv_phb *phb = hose->private_data;
+
+ out_be64(phb->regs + offset, val);
+ return 0;
+}
+
+static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
+{
+ struct pci_controller *hose = data;
+ struct pnv_phb *phb = hose->private_data;
+
+ *val = in_be64(phb->regs + offset);
+ return 0;
+}
+
+static int pnv_eeh_outb_dbgfs_set(void *data, u64 val)
+{
+ return pnv_eeh_dbgfs_set(data, 0xD10, val);
+}
+
+static int pnv_eeh_outb_dbgfs_get(void *data, u64 *val)
+{
+ return pnv_eeh_dbgfs_get(data, 0xD10, val);
+}
+
+static int pnv_eeh_inbA_dbgfs_set(void *data, u64 val)
+{
+ return pnv_eeh_dbgfs_set(data, 0xD90, val);
+}
+
+static int pnv_eeh_inbA_dbgfs_get(void *data, u64 *val)
+{
+ return pnv_eeh_dbgfs_get(data, 0xD90, val);
+}
+
+static int pnv_eeh_inbB_dbgfs_set(void *data, u64 val)
+{
+ return pnv_eeh_dbgfs_set(data, 0xE10, val);
+}
+
+static int pnv_eeh_inbB_dbgfs_get(void *data, u64 *val)
+{
+ return pnv_eeh_dbgfs_get(data, 0xE10, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_outb_dbgfs_ops, pnv_eeh_outb_dbgfs_get,
+ pnv_eeh_outb_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbA_dbgfs_ops, pnv_eeh_inbA_dbgfs_get,
+ pnv_eeh_inbA_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbB_dbgfs_ops, pnv_eeh_inbB_dbgfs_get,
+ pnv_eeh_inbB_dbgfs_set, "0x%llx\n");
+#endif /* CONFIG_DEBUG_FS */
+
/**
- * powernv_eeh_post_init - EEH platform dependent post initialization
+ * pnv_eeh_post_init - EEH platform dependent post initialization
*
* EEH platform dependent post initialization on powernv. When
* the function is called, the EEH PEs and devices should have
* been built. If the I/O cache staff has been built, EEH is
* ready to supply service.
*/
-static int powernv_eeh_post_init(void)
+static int pnv_eeh_post_init(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
int ret = 0;
+ /* Register OPAL event notifier */
+ if (!pnv_eeh_nb_init) {
+ ret = opal_notifier_register(&pnv_eeh_nb);
+ if (ret) {
+ pr_warn("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ pnv_eeh_nb_init = true;
+ }
+
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
- if (phb->eeh_ops && phb->eeh_ops->post_init) {
- ret = phb->eeh_ops->post_init(hose);
- if (ret)
- break;
- }
+ /*
+ * If EEH is enabled, we're going to rely on that.
+ * Otherwise, we restore to conventional mechanism
+ * to clear frozen PE during PCI config access.
+ */
+ if (eeh_enabled())
+ phb->flags |= PNV_PHB_FLAG_EEH;
+ else
+ phb->flags &= ~PNV_PHB_FLAG_EEH;
+
+ /* Create debugfs entries */
+#ifdef CONFIG_DEBUG_FS
+ if (phb->has_dbgfs || !phb->dbgfs)
+ continue;
+
+ phb->has_dbgfs = 1;
+ debugfs_create_file("err_injct", 0200,
+ phb->dbgfs, hose,
+ &pnv_eeh_ei_fops);
+
+ debugfs_create_file("err_injct_outbound", 0600,
+ phb->dbgfs, hose,
+ &pnv_eeh_outb_dbgfs_ops);
+ debugfs_create_file("err_injct_inboundA", 0600,
+ phb->dbgfs, hose,
+ &pnv_eeh_inbA_dbgfs_ops);
+ debugfs_create_file("err_injct_inboundB", 0600,
+ phb->dbgfs, hose,
+ &pnv_eeh_inbB_dbgfs_ops);
+#endif /* CONFIG_DEBUG_FS */
}
+
return ret;
}
+static int pnv_eeh_cap_start(struct pci_dn *pdn)
+{
+ u32 status;
+
+ if (!pdn)
+ return 0;
+
+ pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ return PCI_CAPABILITY_LIST;
+}
+
+static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap)
+{
+ int pos = pnv_eeh_cap_start(pdn);
+ int cnt = 48; /* Maximal number of capabilities */
+ u32 id;
+
+ if (!pos)
+ return 0;
+
+ while (cnt--) {
+ pnv_pci_cfg_read(pdn, pos, 1, &pos);
+ if (pos < 0x40)
+ break;
+
+ pos &= ~3;
+ pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
+ if (id == 0xff)
+ break;
+
+ /* Found */
+ if (id == cap)
+ return pos;
+
+ /* Next one */
+ pos += PCI_CAP_LIST_NEXT;
+ }
+
+ return 0;
+}
+
+static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
+{
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ u32 header;
+ int pos = 256, ttl = (4096 - 256) / 8;
+
+ if (!edev || !edev->pcie_cap)
+ return 0;
+ if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ return 0;
+ else if (!header)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap && pos)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < 256)
+ break;
+
+ if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ break;
+ }
+
+ return 0;
+}
+
/**
- * powernv_eeh_dev_probe - Do probe on PCI device
- * @dev: PCI device
- * @flag: unused
+ * pnv_eeh_probe - Do probe on PCI device
+ * @pdn: PCI device node
+ * @data: unused
*
* When EEH module is installed during system boot, all PCI devices
* are checked one by one to see if it supports EEH. The function
@@ -129,12 +375,12 @@ static int powernv_eeh_post_init(void)
* was possiblly triggered by EEH core, the binding between EEH device
* and the PCI device isn't built yet.
*/
-static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
+static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pci_controller *hose = pdn->phb;
struct pnv_phb *phb = hose->private_data;
- struct device_node *dn = pci_device_to_OF_node(dev);
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ uint32_t pcie_flags;
int ret;
/*
@@ -143,40 +389,42 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* the root bridge. So it's not reasonable to continue
* the probing.
*/
- if (!dn || !edev || edev->pe)
- return 0;
+ if (!edev || edev->pe)
+ return NULL;
/* Skip for PCI-ISA bridge */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
- return 0;
+ if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
+ return NULL;
/* Initialize eeh device */
- edev->class_code = dev->class;
+ edev->class_code = pdn->class_code;
edev->mode &= 0xFFFFFF00;
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
+ edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
+ edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
+ if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
- edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (pci_is_pcie(dev)) {
- edev->pcie_cap = pci_pcie_cap(dev);
-
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- edev->mode |= EEH_DEV_ROOT_PORT;
- else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
- edev->mode |= EEH_DEV_DS_PORT;
-
- edev->aer_cap = pci_find_ext_capability(dev,
- PCI_EXT_CAP_ID_ERR);
+ if (edev->pcie_cap) {
+ pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
+ 2, &pcie_flags);
+ pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
+ if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
+ edev->mode |= EEH_DEV_ROOT_PORT;
+ else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
+ edev->mode |= EEH_DEV_DS_PORT;
+ }
}
- edev->config_addr = ((dev->bus->number << 8) | dev->devfn);
- edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
+ edev->config_addr = (pdn->busno << 8) | (pdn->devfn);
+ edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr];
/* Create PE */
ret = eeh_add_to_parent_pe(edev);
if (ret) {
- pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n",
- __func__, pci_name(dev), ret);
- return ret;
+ pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n",
+ __func__, hose->global_number, pdn->busno,
+ PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
+ return NULL;
}
/*
@@ -195,8 +443,10 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Broadcom Austin 4-ports NICs (14e4:1657)
* Broadcom Shiner 2-ports 10G NICs (14e4:168e)
*/
- if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) ||
- (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e))
+ if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
+ pdn->device_id == 0x1657) ||
+ (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
+ pdn->device_id == 0x168e))
edev->pe->state |= EEH_PE_CFG_RESTRICTED;
/*
@@ -206,7 +456,8 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* to PE reset.
*/
if (!edev->pe->bus)
- edev->pe->bus = dev->bus;
+ edev->pe->bus = pci_find_bus(hose->global_number,
+ pdn->busno);
/*
* Enable EEH explicitly so that we will do EEH check
@@ -217,11 +468,11 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
/* Save memory bars */
eeh_save_bars(edev);
- return 0;
+ return NULL;
}
/**
- * powernv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
+ * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
* @pe: EEH PE
* @option: operation to be issued
*
@@ -229,36 +480,236 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Currently, following options are support according to PAPR:
* Enable EEH, Disable EEH, Enable MMIO and Enable DMA
*/
-static int powernv_eeh_set_option(struct eeh_pe *pe, int option)
+static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ bool freeze_pe = false;
+ int opt, ret = 0;
+ s64 rc;
+
+ /* Sanity check on option */
+ switch (option) {
+ case EEH_OPT_DISABLE:
+ return -EPERM;
+ case EEH_OPT_ENABLE:
+ return 0;
+ case EEH_OPT_THAW_MMIO:
+ opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
+ break;
+ case EEH_OPT_THAW_DMA:
+ opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
+ break;
+ case EEH_OPT_FREEZE_PE:
+ freeze_pe = true;
+ opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
+ break;
+ default:
+ pr_warn("%s: Invalid option %d\n", __func__, option);
+ return -EINVAL;
+ }
- /*
- * What we need do is pass it down for hardware
- * implementation to handle it.
- */
- if (phb->eeh_ops && phb->eeh_ops->set_option)
- ret = phb->eeh_ops->set_option(pe, option);
+ /* If PHB supports compound PE, to handle it */
+ if (freeze_pe) {
+ if (phb->freeze_pe) {
+ phb->freeze_pe(phb, pe->addr);
+ } else {
+ rc = opal_pci_eeh_freeze_set(phb->opal_id,
+ pe->addr, opt);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld freezing "
+ "PHB#%x-PE#%x\n",
+ __func__, rc,
+ phb->hose->global_number, pe->addr);
+ ret = -EIO;
+ }
+ }
+ } else {
+ if (phb->unfreeze_pe) {
+ ret = phb->unfreeze_pe(phb, pe->addr, opt);
+ } else {
+ rc = opal_pci_eeh_freeze_clear(phb->opal_id,
+ pe->addr, opt);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld enable %d "
+ "for PHB#%x-PE#%x\n",
+ __func__, rc, option,
+ phb->hose->global_number, pe->addr);
+ ret = -EIO;
+ }
+ }
+ }
return ret;
}
/**
- * powernv_eeh_get_pe_addr - Retrieve PE address
+ * pnv_eeh_get_pe_addr - Retrieve PE address
* @pe: EEH PE
*
* Retrieve the PE address according to the given tranditional
* PCI BDF (Bus/Device/Function) address.
*/
-static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
+static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
{
return pe->addr;
}
+static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb->private_data;
+ s64 rc;
+
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
+ PNV_PCI_DIAG_BUF_SIZE);
+ if (rc != OPAL_SUCCESS)
+ pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
+ __func__, rc, pe->phb->global_number);
+}
+
+static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb->private_data;
+ u8 fstate;
+ __be16 pcierr;
+ s64 rc;
+ int result = 0;
+
+ rc = opal_pci_eeh_freeze_status(phb->opal_id,
+ pe->addr,
+ &fstate,
+ &pcierr,
+ NULL);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld getting PHB#%x state\n",
+ __func__, rc, phb->hose->global_number);
+ return EEH_STATE_NOT_SUPPORT;
+ }
+
+ /*
+ * Check PHB state. If the PHB is frozen for the
+ * first time, to dump the PHB diag-data.
+ */
+ if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED);
+ } else if (!(pe->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ pnv_eeh_get_phb_diag(pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
+ }
+
+ return result;
+}
+
+static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb->private_data;
+ u8 fstate;
+ __be16 pcierr;
+ s64 rc;
+ int result;
+
+ /*
+ * We don't clobber hardware frozen state until PE
+ * reset is completed. In order to keep EEH core
+ * moving forward, we have to return operational
+ * state during PE reset.
+ */
+ if (pe->state & EEH_PE_RESET) {
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED);
+ return result;
+ }
+
+ /*
+ * Fetch PE state from hardware. If the PHB
+ * supports compound PE, let it handle that.
+ */
+ if (phb->get_pe_state) {
+ fstate = phb->get_pe_state(phb, pe->addr);
+ } else {
+ rc = opal_pci_eeh_freeze_status(phb->opal_id,
+ pe->addr,
+ &fstate,
+ &pcierr,
+ NULL);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
+ __func__, rc, phb->hose->global_number,
+ pe->addr);
+ return EEH_STATE_NOT_SUPPORT;
+ }
+ }
+
+ /* Figure out state */
+ switch (fstate) {
+ case OPAL_EEH_STOPPED_NOT_FROZEN:
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED);
+ break;
+ case OPAL_EEH_STOPPED_MMIO_FREEZE:
+ result = (EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_DMA_ENABLED);
+ break;
+ case OPAL_EEH_STOPPED_DMA_FREEZE:
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_MMIO_ENABLED);
+ break;
+ case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
+ result = 0;
+ break;
+ case OPAL_EEH_STOPPED_RESET:
+ result = EEH_STATE_RESET_ACTIVE;
+ break;
+ case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
+ result = EEH_STATE_UNAVAILABLE;
+ break;
+ case OPAL_EEH_STOPPED_PERM_UNAVAIL:
+ result = EEH_STATE_NOT_SUPPORT;
+ break;
+ default:
+ result = EEH_STATE_NOT_SUPPORT;
+ pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
+ __func__, phb->hose->global_number,
+ pe->addr, fstate);
+ }
+
+ /*
+ * If PHB supports compound PE, to freeze all
+ * slave PEs for consistency.
+ *
+ * If the PE is switching to frozen state for the
+ * first time, to dump the PHB diag-data.
+ */
+ if (!(result & EEH_STATE_NOT_SUPPORT) &&
+ !(result & EEH_STATE_UNAVAILABLE) &&
+ !(result & EEH_STATE_MMIO_ACTIVE) &&
+ !(result & EEH_STATE_DMA_ACTIVE) &&
+ !(pe->state & EEH_PE_ISOLATED)) {
+ if (phb->freeze_pe)
+ phb->freeze_pe(phb, pe->addr);
+
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ pnv_eeh_get_phb_diag(pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
+ }
+
+ return result;
+}
+
/**
- * powernv_eeh_get_state - Retrieve PE state
+ * pnv_eeh_get_state - Retrieve PE state
* @pe: EEH PE
* @delay: delay while PE state is temporarily unavailable
*
@@ -267,64 +718,279 @@ static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
* we prefer passing down to hardware implementation to handle
* it.
*/
-static int powernv_eeh_get_state(struct eeh_pe *pe, int *delay)
+static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
+{
+ int ret;
+
+ if (pe->type & EEH_PE_PHB)
+ ret = pnv_eeh_get_phb_state(pe);
+ else
+ ret = pnv_eeh_get_pe_state(pe);
+
+ if (!delay)
+ return ret;
+
+ /*
+ * If the PE state is temporarily unavailable,
+ * to inform the EEH core delay for default
+ * period (1 second)
+ */
+ *delay = 0;
+ if (ret & EEH_STATE_UNAVAILABLE)
+ *delay = 1000;
+
+ return ret;
+}
+
+static s64 pnv_eeh_phb_poll(struct pnv_phb *phb)
+{
+ s64 rc = OPAL_HARDWARE;
+
+ while (1) {
+ rc = opal_pci_poll(phb->opal_id);
+ if (rc <= 0)
+ break;
+
+ if (system_state < SYSTEM_RUNNING)
+ udelay(1000 * rc);
+ else
+ msleep(rc);
+ }
+
+ return rc;
+}
+
+int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
{
- struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
- int ret = EEH_STATE_NOT_SUPPORT;
+ s64 rc = OPAL_HARDWARE;
+
+ pr_debug("%s: Reset PHB#%x, option=%d\n",
+ __func__, hose->global_number, option);
+
+ /* Issue PHB complete reset request */
+ if (option == EEH_RESET_FUNDAMENTAL ||
+ option == EEH_RESET_HOT)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_COMPLETE,
+ OPAL_ASSERT_RESET);
+ else if (option == EEH_RESET_DEACTIVATE)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_COMPLETE,
+ OPAL_DEASSERT_RESET);
+ if (rc < 0)
+ goto out;
- if (phb->eeh_ops && phb->eeh_ops->get_state) {
- ret = phb->eeh_ops->get_state(pe);
+ /*
+ * Poll state of the PHB until the request is done
+ * successfully. The PHB reset is usually PHB complete
+ * reset followed by hot reset on root bus. So we also
+ * need the PCI bus settlement delay.
+ */
+ rc = pnv_eeh_phb_poll(phb);
+ if (option == EEH_RESET_DEACTIVATE) {
+ if (system_state < SYSTEM_RUNNING)
+ udelay(1000 * EEH_PE_RST_SETTLE_TIME);
+ else
+ msleep(EEH_PE_RST_SETTLE_TIME);
+ }
+out:
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
- /*
- * If the PE state is temporarily unavailable,
- * to inform the EEH core delay for default
- * period (1 second)
- */
- if (delay) {
- *delay = 0;
- if (ret & EEH_STATE_UNAVAILABLE)
- *delay = 1000;
+ return 0;
+}
+
+static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
+{
+ struct pnv_phb *phb = hose->private_data;
+ s64 rc = OPAL_HARDWARE;
+
+ pr_debug("%s: Reset PHB#%x, option=%d\n",
+ __func__, hose->global_number, option);
+
+ /*
+ * During the reset deassert time, we needn't care
+ * the reset scope because the firmware does nothing
+ * for fundamental or hot reset during deassert phase.
+ */
+ if (option == EEH_RESET_FUNDAMENTAL)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PCI_FUNDAMENTAL,
+ OPAL_ASSERT_RESET);
+ else if (option == EEH_RESET_HOT)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PCI_HOT,
+ OPAL_ASSERT_RESET);
+ else if (option == EEH_RESET_DEACTIVATE)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PCI_HOT,
+ OPAL_DEASSERT_RESET);
+ if (rc < 0)
+ goto out;
+
+ /* Poll state of the PHB until the request is done */
+ rc = pnv_eeh_phb_poll(phb);
+ if (option == EEH_RESET_DEACTIVATE)
+ msleep(EEH_PE_RST_SETTLE_TIME);
+out:
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
+{
+ struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ int aer = edev ? edev->aer_cap : 0;
+ u32 ctrl;
+
+ pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
+ __func__, pci_domain_nr(dev->bus),
+ dev->bus->number, option);
+
+ switch (option) {
+ case EEH_RESET_FUNDAMENTAL:
+ case EEH_RESET_HOT:
+ /* Don't report linkDown event */
+ if (aer) {
+ eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, &ctrl);
+ ctrl |= PCI_ERR_UNC_SURPDN;
+ eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, ctrl);
}
+
+ eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
+
+ msleep(EEH_PE_RST_HOLD_TIME);
+ break;
+ case EEH_RESET_DEACTIVATE:
+ eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
+
+ msleep(EEH_PE_RST_SETTLE_TIME);
+
+ /* Continue reporting linkDown event */
+ if (aer) {
+ eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, &ctrl);
+ ctrl &= ~PCI_ERR_UNC_SURPDN;
+ eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, ctrl);
+ }
+
+ break;
}
- return ret;
+ return 0;
+}
+
+void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
+{
+ struct pci_controller *hose;
+
+ if (pci_is_root_bus(dev->bus)) {
+ hose = pci_bus_to_host(dev->bus);
+ pnv_eeh_root_reset(hose, EEH_RESET_HOT);
+ pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
+ } else {
+ pnv_eeh_bridge_reset(dev, EEH_RESET_HOT);
+ pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
+ }
}
/**
- * powernv_eeh_reset - Reset the specified PE
+ * pnv_eeh_reset - Reset the specified PE
* @pe: EEH PE
* @option: reset option
*
- * Reset the specified PE
+ * Do reset on the indicated PE. For PCI bus sensitive PE,
+ * we need to reset the parent p2p bridge. The PHB has to
+ * be reinitialized if the p2p bridge is root bridge. For
+ * PCI device sensitive PE, we will try to reset the device
+ * through FLR. For now, we don't have OPAL APIs to do HARD
+ * reset yet, so all reset would be SOFT (HOT) reset.
*/
-static int powernv_eeh_reset(struct eeh_pe *pe, int option)
+static int pnv_eeh_reset(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ struct pci_bus *bus;
+ int ret;
+
+ /*
+ * For PHB reset, we always have complete reset. For those PEs whose
+ * primary bus derived from root complex (root bus) or root port
+ * (usually bus#1), we apply hot or fundamental reset on the root port.
+ * For other PEs, we always have hot reset on the PE primary bus.
+ *
+ * Here, we have different design to pHyp, which always clear the
+ * frozen state during PE reset. However, the good idea here from
+ * benh is to keep frozen state before we get PE reset done completely
+ * (until BAR restore). With the frozen state, HW drops illegal IO
+ * or MMIO access, which can incur recrusive frozen PE during PE
+ * reset. The side effect is that EEH core has to clear the frozen
+ * state explicitly after BAR restore.
+ */
+ if (pe->type & EEH_PE_PHB) {
+ ret = pnv_eeh_phb_reset(hose, option);
+ } else {
+ struct pnv_phb *phb;
+ s64 rc;
- if (phb->eeh_ops && phb->eeh_ops->reset)
- ret = phb->eeh_ops->reset(pe, option);
+ /*
+ * The frozen PE might be caused by PAPR error injection
+ * registers, which are expected to be cleared after hitting
+ * frozen PE as stated in the hardware spec. Unfortunately,
+ * that's not true on P7IOC. So we have to clear it manually
+ * to avoid recursive EEH errors during recovery.
+ */
+ phb = hose->private_data;
+ if (phb->model == PNV_PHB_MODEL_P7IOC &&
+ (option == EEH_RESET_HOT ||
+ option == EEH_RESET_FUNDAMENTAL)) {
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_ERROR,
+ OPAL_ASSERT_RESET);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld clearing "
+ "error injection registers\n",
+ __func__, rc);
+ return -EIO;
+ }
+ }
+
+ bus = eeh_pe_bus_get(pe);
+ if (pci_is_root_bus(bus) ||
+ pci_is_root_bus(bus->parent))
+ ret = pnv_eeh_root_reset(hose, option);
+ else
+ ret = pnv_eeh_bridge_reset(bus->self, option);
+ }
return ret;
}
/**
- * powernv_eeh_wait_state - Wait for PE state
+ * pnv_eeh_wait_state - Wait for PE state
* @pe: EEH PE
* @max_wait: maximal period in microsecond
*
* Wait for the state of associated PE. It might take some time
* to retrieve the PE's state.
*/
-static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
+static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
{
int ret;
int mwait;
while (1) {
- ret = powernv_eeh_get_state(pe, &mwait);
+ ret = pnv_eeh_get_state(pe, &mwait);
/*
* If the PE's state is temporarily unavailable,
@@ -348,7 +1014,7 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
}
/**
- * powernv_eeh_get_log - Retrieve error log
+ * pnv_eeh_get_log - Retrieve error log
* @pe: EEH PE
* @severity: temporary or permanent error log
* @drv_log: driver log to be combined with retrieved error log
@@ -356,41 +1022,30 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
*
* Retrieve the temporary or permanent error from the PE.
*/
-static int powernv_eeh_get_log(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len)
+static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
+ char *drv_log, unsigned long len)
{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
- if (phb->eeh_ops && phb->eeh_ops->get_log)
- ret = phb->eeh_ops->get_log(pe, severity, drv_log, len);
-
- return ret;
+ return 0;
}
/**
- * powernv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
+ * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
* @pe: EEH PE
*
* The function will be called to reconfigure the bridges included
* in the specified PE so that the mulfunctional PE would be recovered
* again.
*/
-static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
+static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- int ret = 0;
-
- if (phb->eeh_ops && phb->eeh_ops->configure_bridge)
- ret = phb->eeh_ops->configure_bridge(pe);
-
- return ret;
+ return 0;
}
/**
- * powernv_pe_err_inject - Inject specified error to the indicated PE
+ * pnv_pe_err_inject - Inject specified error to the indicated PE
* @pe: the indicated PE
* @type: error type
* @func: specific error type
@@ -401,22 +1056,52 @@ static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
* determined by @type and @func, to the indicated PE for
* testing purpose.
*/
-static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
- unsigned long addr, unsigned long mask)
+static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask)
{
struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ s64 rc;
+
+ /* Sanity check on error type */
+ if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
+ type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
+ pr_warn("%s: Invalid error type %d\n",
+ __func__, type);
+ return -ERANGE;
+ }
- if (phb->eeh_ops && phb->eeh_ops->err_inject)
- ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
+ if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
+ func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
+ pr_warn("%s: Invalid error function %d\n",
+ __func__, func);
+ return -ERANGE;
+ }
- return ret;
+ /* Firmware supports error injection ? */
+ if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
+ pr_warn("%s: Firmware doesn't support error injection\n",
+ __func__);
+ return -ENXIO;
+ }
+
+ /* Do error injection */
+ rc = opal_pci_err_inject(phb->opal_id, pe->addr,
+ type, func, addr, mask);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld injecting error "
+ "%d-%d to PHB#%x-PE#%x\n",
+ __func__, rc, type, func,
+ hose->global_number, pe->addr);
+ return -EIO;
+ }
+
+ return 0;
}
-static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
+static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
{
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
if (!edev || !edev->pe)
return false;
@@ -427,51 +1112,377 @@ static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
return false;
}
-static int powernv_eeh_read_config(struct device_node *dn,
- int where, int size, u32 *val)
+static int pnv_eeh_read_config(struct pci_dn *pdn,
+ int where, int size, u32 *val)
{
- if (powernv_eeh_cfg_blocked(dn)) {
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pnv_eeh_cfg_blocked(pdn)) {
*val = 0xFFFFFFFF;
return PCIBIOS_SET_FAILED;
}
- return pnv_pci_cfg_read(dn, where, size, val);
+ return pnv_pci_cfg_read(pdn, where, size, val);
}
-static int powernv_eeh_write_config(struct device_node *dn,
- int where, int size, u32 val)
+static int pnv_eeh_write_config(struct pci_dn *pdn,
+ int where, int size, u32 val)
{
- if (powernv_eeh_cfg_blocked(dn))
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pnv_eeh_cfg_blocked(pdn))
return PCIBIOS_SET_FAILED;
- return pnv_pci_cfg_write(dn, where, size, val);
+ return pnv_pci_cfg_write(pdn, where, size, val);
+}
+
+static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
+{
+ /* GEM */
+ if (data->gemXfir || data->gemRfir ||
+ data->gemRirqfir || data->gemMask || data->gemRwof)
+ pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->gemXfir),
+ be64_to_cpu(data->gemRfir),
+ be64_to_cpu(data->gemRirqfir),
+ be64_to_cpu(data->gemMask),
+ be64_to_cpu(data->gemRwof));
+
+ /* LEM */
+ if (data->lemFir || data->lemErrMask ||
+ data->lemAction0 || data->lemAction1 || data->lemWof)
+ pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->lemFir),
+ be64_to_cpu(data->lemErrMask),
+ be64_to_cpu(data->lemAction0),
+ be64_to_cpu(data->lemAction1),
+ be64_to_cpu(data->lemWof));
+}
+
+static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
+{
+ struct pnv_phb *phb = hose->private_data;
+ struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
+ long rc;
+
+ rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
+ __func__, phb->hub_id, rc);
+ return;
+ }
+
+ switch (data->type) {
+ case OPAL_P7IOC_DIAG_TYPE_RGC:
+ pr_info("P7IOC diag-data for RGC\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+ if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
+ pr_info(" RGC: %016llx %016llx\n",
+ be64_to_cpu(data->rgc.rgcStatus),
+ be64_to_cpu(data->rgc.rgcLdcp));
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_BI:
+ pr_info("P7IOC diag-data for BI %s\n\n",
+ data->bi.biDownbound ? "Downbound" : "Upbound");
+ pnv_eeh_dump_hub_diag_common(data);
+ if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
+ data->bi.biLdcp2 || data->bi.biFenceStatus)
+ pr_info(" BI: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->bi.biLdcp0),
+ be64_to_cpu(data->bi.biLdcp1),
+ be64_to_cpu(data->bi.biLdcp2),
+ be64_to_cpu(data->bi.biFenceStatus));
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_CI:
+ pr_info("P7IOC diag-data for CI Port %d\n\n",
+ data->ci.ciPort);
+ pnv_eeh_dump_hub_diag_common(data);
+ if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
+ pr_info(" CI: %016llx %016llx\n",
+ be64_to_cpu(data->ci.ciPortStatus),
+ be64_to_cpu(data->ci.ciPortLdcp));
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_MISC:
+ pr_info("P7IOC diag-data for MISC\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_I2C:
+ pr_info("P7IOC diag-data for I2C\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+ break;
+ default:
+ pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
+ __func__, phb->hub_id, data->type);
+ }
+}
+
+static int pnv_eeh_get_pe(struct pci_controller *hose,
+ u16 pe_no, struct eeh_pe **pe)
+{
+ struct pnv_phb *phb = hose->private_data;
+ struct pnv_ioda_pe *pnv_pe;
+ struct eeh_pe *dev_pe;
+ struct eeh_dev edev;
+
+ /*
+ * If PHB supports compound PE, to fetch
+ * the master PE because slave PE is invisible
+ * to EEH core.
+ */
+ pnv_pe = &phb->ioda.pe_array[pe_no];
+ if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
+ pnv_pe = pnv_pe->master;
+ WARN_ON(!pnv_pe ||
+ !(pnv_pe->flags & PNV_IODA_PE_MASTER));
+ pe_no = pnv_pe->pe_number;
+ }
+
+ /* Find the PE according to PE# */
+ memset(&edev, 0, sizeof(struct eeh_dev));
+ edev.phb = hose;
+ edev.pe_config_addr = pe_no;
+ dev_pe = eeh_pe_get(&edev);
+ if (!dev_pe)
+ return -EEXIST;
+
+ /* Freeze the (compound) PE */
+ *pe = dev_pe;
+ if (!(dev_pe->state & EEH_PE_ISOLATED))
+ phb->freeze_pe(phb, pe_no);
+
+ /*
+ * At this point, we're sure the (compound) PE should
+ * have been frozen. However, we still need poke until
+ * hitting the frozen PE on top level.
+ */
+ dev_pe = dev_pe->parent;
+ while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
+ int ret;
+ int active_flags = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE);
+
+ ret = eeh_ops->get_state(dev_pe, NULL);
+ if (ret <= 0 || (ret & active_flags) == active_flags) {
+ dev_pe = dev_pe->parent;
+ continue;
+ }
+
+ /* Frozen parent PE */
+ *pe = dev_pe;
+ if (!(dev_pe->state & EEH_PE_ISOLATED))
+ phb->freeze_pe(phb, dev_pe->addr);
+
+ /* Next one */
+ dev_pe = dev_pe->parent;
+ }
+
+ return 0;
}
/**
- * powernv_eeh_next_error - Retrieve next EEH error to handle
+ * pnv_eeh_next_error - Retrieve next EEH error to handle
* @pe: Affected PE
*
- * Using OPAL API, to retrieve next EEH error for EEH core to handle
+ * The function is expected to be called by EEH core while it gets
+ * special EEH event (without binding PE). The function calls to
+ * OPAL APIs for next error to handle. The informational error is
+ * handled internally by platform. However, the dead IOC, dead PHB,
+ * fenced PHB and frozen PE should be handled by EEH core eventually.
*/
-static int powernv_eeh_next_error(struct eeh_pe **pe)
+static int pnv_eeh_next_error(struct eeh_pe **pe)
{
struct pci_controller *hose;
- struct pnv_phb *phb = NULL;
+ struct pnv_phb *phb;
+ struct eeh_pe *phb_pe, *parent_pe;
+ __be64 frozen_pe_no;
+ __be16 err_type, severity;
+ int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
+ long rc;
+ int state, ret = EEH_NEXT_ERR_NONE;
+
+ /*
+ * While running here, it's safe to purge the event queue.
+ * And we should keep the cached OPAL notifier event sychronized
+ * between the kernel and firmware.
+ */
+ eeh_remove_event(NULL, false);
+ opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
list_for_each_entry(hose, &hose_list, list_node) {
+ /*
+ * If the subordinate PCI buses of the PHB has been
+ * removed or is exactly under error recovery, we
+ * needn't take care of it any more.
+ */
phb = hose->private_data;
- break;
- }
+ phb_pe = eeh_phb_pe_get(hose);
+ if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
+ continue;
+
+ rc = opal_pci_next_error(phb->opal_id,
+ &frozen_pe_no, &err_type, &severity);
+ if (rc != OPAL_SUCCESS) {
+ pr_devel("%s: Invalid return value on "
+ "PHB#%x (0x%lx) from opal_pci_next_error",
+ __func__, hose->global_number, rc);
+ continue;
+ }
+
+ /* If the PHB doesn't have error, stop processing */
+ if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
+ be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
+ pr_devel("%s: No error found on PHB#%x\n",
+ __func__, hose->global_number);
+ continue;
+ }
+
+ /*
+ * Processing the error. We're expecting the error with
+ * highest priority reported upon multiple errors on the
+ * specific PHB.
+ */
+ pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
+ __func__, be16_to_cpu(err_type),
+ be16_to_cpu(severity), be64_to_cpu(frozen_pe_no),
+ hose->global_number);
+ switch (be16_to_cpu(err_type)) {
+ case OPAL_EEH_IOC_ERROR:
+ if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
+ pr_err("EEH: dead IOC detected\n");
+ ret = EEH_NEXT_ERR_DEAD_IOC;
+ } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
+ pr_info("EEH: IOC informative error "
+ "detected\n");
+ pnv_eeh_get_and_dump_hub_diag(hose);
+ ret = EEH_NEXT_ERR_NONE;
+ }
+
+ break;
+ case OPAL_EEH_PHB_ERROR:
+ if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
+ *pe = phb_pe;
+ pr_err("EEH: dead PHB#%x detected, "
+ "location: %s\n",
+ hose->global_number,
+ eeh_pe_loc_get(phb_pe));
+ ret = EEH_NEXT_ERR_DEAD_PHB;
+ } else if (be16_to_cpu(severity) ==
+ OPAL_EEH_SEV_PHB_FENCED) {
+ *pe = phb_pe;
+ pr_err("EEH: Fenced PHB#%x detected, "
+ "location: %s\n",
+ hose->global_number,
+ eeh_pe_loc_get(phb_pe));
+ ret = EEH_NEXT_ERR_FENCED_PHB;
+ } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
+ pr_info("EEH: PHB#%x informative error "
+ "detected, location: %s\n",
+ hose->global_number,
+ eeh_pe_loc_get(phb_pe));
+ pnv_eeh_get_phb_diag(phb_pe);
+ pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
+ ret = EEH_NEXT_ERR_NONE;
+ }
+
+ break;
+ case OPAL_EEH_PE_ERROR:
+ /*
+ * If we can't find the corresponding PE, we
+ * just try to unfreeze.
+ */
+ if (pnv_eeh_get_pe(hose,
+ be64_to_cpu(frozen_pe_no), pe)) {
+ /* Try best to clear it */
+ pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
+ hose->global_number, frozen_pe_no);
+ pr_info("EEH: PHB location: %s\n",
+ eeh_pe_loc_get(phb_pe));
+ opal_pci_eeh_freeze_clear(phb->opal_id,
+ frozen_pe_no,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ ret = EEH_NEXT_ERR_NONE;
+ } else if ((*pe)->state & EEH_PE_ISOLATED ||
+ eeh_pe_passed(*pe)) {
+ ret = EEH_NEXT_ERR_NONE;
+ } else {
+ pr_err("EEH: Frozen PE#%x "
+ "on PHB#%x detected\n",
+ (*pe)->addr,
+ (*pe)->phb->global_number);
+ pr_err("EEH: PE location: %s, "
+ "PHB location: %s\n",
+ eeh_pe_loc_get(*pe),
+ eeh_pe_loc_get(phb_pe));
+ ret = EEH_NEXT_ERR_FROZEN_PE;
+ }
+
+ break;
+ default:
+ pr_warn("%s: Unexpected error type %d\n",
+ __func__, be16_to_cpu(err_type));
+ }
- if (phb && phb->eeh_ops->next_error)
- return phb->eeh_ops->next_error(pe);
+ /*
+ * EEH core will try recover from fenced PHB or
+ * frozen PE. In the time for frozen PE, EEH core
+ * enable IO path for that before collecting logs,
+ * but it ruins the site. So we have to dump the
+ * log in advance here.
+ */
+ if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
+ ret == EEH_NEXT_ERR_FENCED_PHB) &&
+ !((*pe)->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ pnv_eeh_get_phb_diag(*pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data((*pe)->phb,
+ (*pe)->data);
+ }
- return -EEXIST;
+ /*
+ * We probably have the frozen parent PE out there and
+ * we need have to handle frozen parent PE firstly.
+ */
+ if (ret == EEH_NEXT_ERR_FROZEN_PE) {
+ parent_pe = (*pe)->parent;
+ while (parent_pe) {
+ /* Hit the ceiling ? */
+ if (parent_pe->type & EEH_PE_PHB)
+ break;
+
+ /* Frozen parent PE ? */
+ state = eeh_ops->get_state(parent_pe, NULL);
+ if (state > 0 &&
+ (state & active_flags) != active_flags)
+ *pe = parent_pe;
+
+ /* Next parent level */
+ parent_pe = parent_pe->parent;
+ }
+
+ /* We possibly migrate to another PE */
+ eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ }
+
+ /*
+ * If we have no errors on the specific PHB or only
+ * informative error there, we continue poking it.
+ * Otherwise, we need actions to be taken by upper
+ * layer.
+ */
+ if (ret > EEH_NEXT_ERR_INF)
+ break;
+ }
+
+ return ret;
}
-static int powernv_eeh_restore_config(struct device_node *dn)
+static int pnv_eeh_restore_config(struct pci_dn *pdn)
{
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
struct pnv_phb *phb;
s64 ret;
@@ -490,24 +1501,23 @@ static int powernv_eeh_restore_config(struct device_node *dn)
return 0;
}
-static struct eeh_ops powernv_eeh_ops = {
+static struct eeh_ops pnv_eeh_ops = {
.name = "powernv",
- .init = powernv_eeh_init,
- .post_init = powernv_eeh_post_init,
- .of_probe = NULL,
- .dev_probe = powernv_eeh_dev_probe,
- .set_option = powernv_eeh_set_option,
- .get_pe_addr = powernv_eeh_get_pe_addr,
- .get_state = powernv_eeh_get_state,
- .reset = powernv_eeh_reset,
- .wait_state = powernv_eeh_wait_state,
- .get_log = powernv_eeh_get_log,
- .configure_bridge = powernv_eeh_configure_bridge,
- .err_inject = powernv_eeh_err_inject,
- .read_config = powernv_eeh_read_config,
- .write_config = powernv_eeh_write_config,
- .next_error = powernv_eeh_next_error,
- .restore_config = powernv_eeh_restore_config
+ .init = pnv_eeh_init,
+ .post_init = pnv_eeh_post_init,
+ .probe = pnv_eeh_probe,
+ .set_option = pnv_eeh_set_option,
+ .get_pe_addr = pnv_eeh_get_pe_addr,
+ .get_state = pnv_eeh_get_state,
+ .reset = pnv_eeh_reset,
+ .wait_state = pnv_eeh_wait_state,
+ .get_log = pnv_eeh_get_log,
+ .configure_bridge = pnv_eeh_configure_bridge,
+ .err_inject = pnv_eeh_err_inject,
+ .read_config = pnv_eeh_read_config,
+ .write_config = pnv_eeh_write_config,
+ .next_error = pnv_eeh_next_error,
+ .restore_config = pnv_eeh_restore_config
};
/**
@@ -521,7 +1531,7 @@ static int __init eeh_powernv_init(void)
int ret = -EINVAL;
eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
- ret = eeh_ops_register(&powernv_eeh_ops);
+ ret = eeh_ops_register(&pnv_eeh_ops);
if (!ret)
pr_info("EEH: PowerNV platform initialized\n");
else
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 23260f7dfa7a..5aa9c1ce4de3 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -452,5 +452,6 @@ void __init opal_platform_dump_init(void)
return;
}
- opal_dump_resend_notification();
+ if (opal_check_token(OPAL_DUMP_RESEND))
+ opal_dump_resend_notification();
}
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 518fe95dbf24..38ce757e5e2a 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -313,7 +313,8 @@ int __init opal_elog_init(void)
}
/* We are now ready to pull error logs from opal. */
- opal_resend_pending_logs();
+ if (opal_check_token(OPAL_ELOG_RESEND))
+ opal_resend_pending_logs();
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index 5c21d9c07f45..4ec6219287fc 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -120,7 +120,11 @@ static struct image_header_t image_header;
static struct image_data_t image_data;
static struct validate_flash_t validate_flash_data;
static struct manage_flash_t manage_flash_data;
-static struct update_flash_t update_flash_data;
+
+/* Initialize update_flash_data status to No Operation */
+static struct update_flash_t update_flash_data = {
+ .status = FLASH_NO_OP,
+};
static DEFINE_MUTEX(image_data_mutex);
@@ -542,7 +546,7 @@ static struct attribute_group image_op_attr_group = {
.attrs = image_op_attrs,
};
-void __init opal_flash_init(void)
+void __init opal_flash_update_init(void)
{
int ret;
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index f9896fd5d04a..9db4398ded5d 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <asm/opal.h>
+#include <asm/nvram.h>
#include <asm/machdep.h>
static unsigned int nvram_size;
@@ -62,6 +63,15 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
return count;
}
+static int __init opal_nvram_init_log_partitions(void)
+{
+ /* Scan nvram for partitions */
+ nvram_scan_partitions();
+ nvram_init_oops_partition(0);
+ return 0;
+}
+machine_arch_initcall(powernv, opal_nvram_init_log_partitions);
+
void __init opal_nvram_init(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
index 48bf5b080bcf..ac46c2c24f99 100644
--- a/arch/powerpc/platforms/powernv/opal-power.c
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -29,8 +29,9 @@ static int opal_power_control_event(struct notifier_block *nb,
switch (type) {
case SOFT_REBOOT:
- /* Fall through. The service processor is responsible for
- * bringing the machine back up */
+ pr_info("OPAL: reboot requested\n");
+ orderly_reboot();
+ break;
case SOFT_OFF:
pr_info("OPAL: poweroff requested\n");
orderly_poweroff(true);
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
index 4ab67ef7abc9..655250499d18 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -46,18 +46,28 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
mutex_lock(&opal_sensor_mutex);
ret = opal_sensor_read(sensor_hndl, token, &data);
- if (ret != OPAL_ASYNC_COMPLETION)
- goto out_token;
+ switch (ret) {
+ case OPAL_ASYNC_COMPLETION:
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_err("%s: Failed to wait for the async response, %d\n",
+ __func__, ret);
+ goto out_token;
+ }
- ret = opal_async_wait_response(token, &msg);
- if (ret) {
- pr_err("%s: Failed to wait for the async response, %d\n",
- __func__, ret);
- goto out_token;
- }
+ ret = opal_error_code(be64_to_cpu(msg.params[1]));
+ *sensor_data = be32_to_cpu(data);
+ break;
+
+ case OPAL_SUCCESS:
+ ret = 0;
+ *sensor_data = be32_to_cpu(data);
+ break;
- *sensor_data = be32_to_cpu(data);
- ret = be64_to_cpu(msg.params[1]);
+ default:
+ ret = opal_error_code(ret);
+ break;
+ }
out_token:
mutex_unlock(&opal_sensor_mutex);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index fcbe899fe299..a7ade94cdf87 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -286,9 +286,12 @@ OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
-OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE);
+OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CAPI_MODE);
OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO);
OPAL_CALL(opal_tpo_read, OPAL_READ_TPO);
OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND);
OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV);
OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST);
+OPAL_CALL(opal_flash_read, OPAL_FLASH_READ);
+OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE);
+OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 18fd4e71c9c1..2241565b0739 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -23,6 +23,8 @@
#include <linux/kobject.h>
#include <linux/delay.h>
#include <linux/memblock.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <asm/machdep.h>
#include <asm/opal.h>
@@ -58,6 +60,7 @@ static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
static DEFINE_SPINLOCK(opal_notifier_lock);
static uint64_t last_notified_mask = 0x0ul;
static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
+static uint32_t opal_heartbeat;
static void opal_reinit_cores(void)
{
@@ -302,23 +305,26 @@ void opal_notifier_disable(void)
* Opal message notifier based on message type. Allow subscribers to get
* notified for specific messgae type.
*/
-int opal_message_notifier_register(enum OpalMessageType msg_type,
+int opal_message_notifier_register(enum opal_msg_type msg_type,
struct notifier_block *nb)
{
- if (!nb) {
- pr_warning("%s: Invalid argument (%p)\n",
- __func__, nb);
- return -EINVAL;
- }
- if (msg_type > OPAL_MSG_TYPE_MAX) {
- pr_warning("%s: Invalid message type argument (%d)\n",
+ if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
+ pr_warning("%s: Invalid arguments, msg_type:%d\n",
__func__, msg_type);
return -EINVAL;
}
+
return atomic_notifier_chain_register(
&opal_msg_notifier_head[msg_type], nb);
}
+int opal_message_notifier_unregister(enum opal_msg_type msg_type,
+ struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(
+ &opal_msg_notifier_head[msg_type], nb);
+}
+
static void opal_message_do_notify(uint32_t msg_type, void *msg)
{
/* notify subscribers */
@@ -351,7 +357,7 @@ static void opal_handle_message(void)
type = be32_to_cpu(msg.msg_type);
/* Sanity check */
- if (type > OPAL_MSG_TYPE_MAX) {
+ if (type >= OPAL_MSG_TYPE_MAX) {
pr_warning("%s: Unknown message type: %u\n", __func__, type);
return;
}
@@ -665,6 +671,9 @@ static void __init opal_dump_region_init(void)
uint64_t size;
int rc;
+ if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
+ return;
+
/* Register kernel log buffer */
addr = log_buf_addr_get();
if (addr == NULL)
@@ -684,6 +693,15 @@ static void __init opal_dump_region_init(void)
"rc = %d\n", rc);
}
+static void opal_flash_init(struct device_node *opal_node)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(opal_node, np)
+ if (of_device_is_compatible(np, "ibm,opal-flash"))
+ of_platform_device_create(np, NULL, NULL);
+}
+
static void opal_ipmi_init(struct device_node *opal_node)
{
struct device_node *np;
@@ -741,6 +759,29 @@ static void __init opal_irq_init(struct device_node *dn)
}
}
+static int kopald(void *unused)
+{
+ set_freezable();
+ do {
+ try_to_freeze();
+ opal_poll_events(NULL);
+ msleep_interruptible(opal_heartbeat);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static void opal_init_heartbeat(void)
+{
+ /* Old firwmware, we assume the HVC heartbeat is sufficient */
+ if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
+ &opal_heartbeat) != 0)
+ opal_heartbeat = 0;
+
+ if (opal_heartbeat)
+ kthread_run(kopald, NULL, "kopald");
+}
+
static int __init opal_init(void)
{
struct device_node *np, *consoles;
@@ -769,6 +810,9 @@ static int __init opal_init(void)
/* Create i2c platform devices */
opal_i2c_create_devs();
+ /* Setup a heatbeat thread if requested by OPAL */
+ opal_init_heartbeat();
+
/* Find all OPAL interrupts and request them */
opal_irq_init(opal_node);
@@ -782,7 +826,7 @@ static int __init opal_init(void)
/* Setup error log interface */
rc = opal_elog_init();
/* Setup code update interface */
- opal_flash_init();
+ opal_flash_update_init();
/* Setup platform dump extract interface */
opal_platform_dump_init();
/* Setup system parameters interface */
@@ -791,8 +835,11 @@ static int __init opal_init(void)
opal_msglog_init();
}
+ /* Initialize OPAL IPMI backend */
opal_ipmi_init(opal_node);
+ opal_flash_init(opal_node);
+
return 0;
}
machine_subsys_initcall(powernv, opal_init);
@@ -823,13 +870,17 @@ void opal_shutdown(void)
}
/* Unregister memory dump region */
- opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
+ if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
+ opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
}
/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
EXPORT_SYMBOL_GPL(opal_ipmi_send);
EXPORT_SYMBOL_GPL(opal_ipmi_recv);
+EXPORT_SYMBOL_GPL(opal_flash_read);
+EXPORT_SYMBOL_GPL(opal_flash_write);
+EXPORT_SYMBOL_GPL(opal_flash_erase);
/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
@@ -894,6 +945,25 @@ void opal_free_sg_list(struct opal_sg_list *sg)
}
}
+int opal_error_code(int rc)
+{
+ switch (rc) {
+ case OPAL_SUCCESS: return 0;
+
+ case OPAL_PARAMETER: return -EINVAL;
+ case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
+ case OPAL_BUSY_EVENT: return -EBUSY;
+ case OPAL_NO_MEM: return -ENOMEM;
+
+ case OPAL_UNSUPPORTED: return -EIO;
+ case OPAL_HARDWARE: return -EIO;
+ case OPAL_INTERNAL_ERROR: return -EIO;
+ default:
+ pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
+ return -EIO;
+ }
+}
+
EXPORT_SYMBOL_GPL(opal_poll_events);
EXPORT_SYMBOL_GPL(opal_rtc_read);
EXPORT_SYMBOL_GPL(opal_rtc_write);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6c9ff2b95119..f8bc950efcae 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -44,6 +44,9 @@
#include "powernv.h"
#include "pci.h"
+/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
+#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
+
static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...)
{
@@ -56,11 +59,18 @@ static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
vaf.fmt = fmt;
vaf.va = &args;
- if (pe->pdev)
+ if (pe->flags & PNV_IODA_PE_DEV)
strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
- else
+ else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
sprintf(pfix, "%04x:%02x ",
pci_domain_nr(pe->pbus), pe->pbus->number);
+#ifdef CONFIG_PCI_IOV
+ else if (pe->flags & PNV_IODA_PE_VF)
+ sprintf(pfix, "%04x:%02x:%2x.%d",
+ pci_domain_nr(pe->parent_dev->bus),
+ (pe->rid & 0xff00) >> 8,
+ PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
+#endif /* CONFIG_PCI_IOV*/
printk("%spci %s: [PE# %.3d] %pV",
level, pfix, pe->pe_number, &vaf);
@@ -591,7 +601,7 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
bool is_add)
{
struct pnv_ioda_pe *slave;
- struct pci_dev *pdev;
+ struct pci_dev *pdev = NULL;
int ret;
/*
@@ -630,8 +640,12 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
pdev = pe->pbus->self;
- else
+ else if (pe->flags & PNV_IODA_PE_DEV)
pdev = pe->pdev->bus->self;
+#ifdef CONFIG_PCI_IOV
+ else if (pe->flags & PNV_IODA_PE_VF)
+ pdev = pe->parent_dev->bus->self;
+#endif /* CONFIG_PCI_IOV */
while (pdev) {
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *parent;
@@ -649,6 +663,87 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
return 0;
}
+#ifdef CONFIG_PCI_IOV
+static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
+{
+ struct pci_dev *parent;
+ uint8_t bcomp, dcomp, fcomp;
+ int64_t rc;
+ long rid_end, rid;
+
+ /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
+ if (pe->pbus) {
+ int count;
+
+ dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
+ fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
+ parent = pe->pbus->self;
+ if (pe->flags & PNV_IODA_PE_BUS_ALL)
+ count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
+ else
+ count = 1;
+
+ switch(count) {
+ case 1: bcomp = OpalPciBusAll; break;
+ case 2: bcomp = OpalPciBus7Bits; break;
+ case 4: bcomp = OpalPciBus6Bits; break;
+ case 8: bcomp = OpalPciBus5Bits; break;
+ case 16: bcomp = OpalPciBus4Bits; break;
+ case 32: bcomp = OpalPciBus3Bits; break;
+ default:
+ dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
+ count);
+ /* Do an exact match only */
+ bcomp = OpalPciBusAll;
+ }
+ rid_end = pe->rid + (count << 8);
+ } else {
+ if (pe->flags & PNV_IODA_PE_VF)
+ parent = pe->parent_dev;
+ else
+ parent = pe->pdev->bus->self;
+ bcomp = OpalPciBusAll;
+ dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
+ fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
+ rid_end = pe->rid + 1;
+ }
+
+ /* Clear the reverse map */
+ for (rid = pe->rid; rid < rid_end; rid++)
+ phb->ioda.pe_rmap[rid] = 0;
+
+ /* Release from all parents PELT-V */
+ while (parent) {
+ struct pci_dn *pdn = pci_get_pdn(parent);
+ if (pdn && pdn->pe_number != IODA_INVALID_PE) {
+ rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
+ pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
+ /* XXX What to do in case of error ? */
+ }
+ parent = parent->bus->self;
+ }
+
+ opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+
+ /* Disassociate PE in PELT */
+ rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
+ pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
+ if (rc)
+ pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
+ rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
+ bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
+ if (rc)
+ pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
+
+ pe->pbus = NULL;
+ pe->pdev = NULL;
+ pe->parent_dev = NULL;
+
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
struct pci_dev *parent;
@@ -675,15 +770,19 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
case 16: bcomp = OpalPciBus4Bits; break;
case 32: bcomp = OpalPciBus3Bits; break;
default:
- pr_err("%s: Number of subordinate busses %d"
- " unsupported\n",
- pci_name(pe->pbus->self), count);
+ dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
+ count);
/* Do an exact match only */
bcomp = OpalPciBusAll;
}
rid_end = pe->rid + (count << 8);
} else {
- parent = pe->pdev->bus->self;
+#ifdef CONFIG_PCI_IOV
+ if (pe->flags & PNV_IODA_PE_VF)
+ parent = pe->parent_dev;
+ else
+#endif /* CONFIG_PCI_IOV */
+ parent = pe->pdev->bus->self;
bcomp = OpalPciBusAll;
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
@@ -774,6 +873,78 @@ static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
return 10;
}
+#ifdef CONFIG_PCI_IOV
+static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
+{
+ struct pci_dn *pdn = pci_get_pdn(dev);
+ int i;
+ struct resource *res, res2;
+ resource_size_t size;
+ u16 num_vfs;
+
+ if (!dev->is_physfn)
+ return -EINVAL;
+
+ /*
+ * "offset" is in VFs. The M64 windows are sized so that when they
+ * are segmented, each segment is the same size as the IOV BAR.
+ * Each segment is in a separate PE, and the high order bits of the
+ * address are the PE number. Therefore, each VF's BAR is in a
+ * separate PE, and changing the IOV BAR start address changes the
+ * range of PEs the VFs are in.
+ */
+ num_vfs = pdn->num_vfs;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ if (!pnv_pci_is_mem_pref_64(res->flags))
+ continue;
+
+ /*
+ * The actual IOV BAR range is determined by the start address
+ * and the actual size for num_vfs VFs BAR. This check is to
+ * make sure that after shifting, the range will not overlap
+ * with another device.
+ */
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ res2.flags = res->flags;
+ res2.start = res->start + (size * offset);
+ res2.end = res2.start + (size * num_vfs) - 1;
+
+ if (res2.end > res->end) {
+ dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
+ i, &res2, res, num_vfs, offset);
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * After doing so, there would be a "hole" in the /proc/iomem when
+ * offset is a positive value. It looks like the device return some
+ * mmio back to the system, which actually no one could use it.
+ */
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ if (!pnv_pci_is_mem_pref_64(res->flags))
+ continue;
+
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ res2 = *res;
+ res->start += size * offset;
+
+ dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
+ i, &res2, res, num_vfs, offset);
+ pci_update_resource(dev, i + PCI_IOV_RESOURCES);
+ }
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
#if 0
static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
{
@@ -857,7 +1028,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
pci_name(dev));
continue;
}
- pdn->pcidev = dev;
pdn->pe_number = pe->pe_number;
pe->dma_weight += pnv_ioda_dma_weight(dev);
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -916,6 +1086,10 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
return;
}
+ pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
+ GFP_KERNEL, hose->node);
+ pe->tce32_table->data = pe;
+
/* Associate it with all child devices */
pnv_ioda_setup_same_PE(bus, pe);
@@ -974,6 +1148,441 @@ static void pnv_pci_ioda_setup_PEs(void)
}
}
+#ifdef CONFIG_PCI_IOV
+static int pnv_pci_vf_release_m64(struct pci_dev *pdev)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ int i, j;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
+ for (j = 0; j < M64_PER_IOV; j++) {
+ if (pdn->m64_wins[i][j] == IODA_INVALID_M64)
+ continue;
+ opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 0);
+ clear_bit(pdn->m64_wins[i][j], &phb->ioda.m64_bar_alloc);
+ pdn->m64_wins[i][j] = IODA_INVALID_M64;
+ }
+
+ return 0;
+}
+
+static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ unsigned int win;
+ struct resource *res;
+ int i, j;
+ int64_t rc;
+ int total_vfs;
+ resource_size_t size, start;
+ int pe_num;
+ int vf_groups;
+ int vf_per_group;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Initialize the m64_wins to IODA_INVALID_M64 */
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
+ for (j = 0; j < M64_PER_IOV; j++)
+ pdn->m64_wins[i][j] = IODA_INVALID_M64;
+
+ if (pdn->m64_per_iov == M64_PER_IOV) {
+ vf_groups = (num_vfs <= M64_PER_IOV) ? num_vfs: M64_PER_IOV;
+ vf_per_group = (num_vfs <= M64_PER_IOV)? 1:
+ roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
+ } else {
+ vf_groups = 1;
+ vf_per_group = 1;
+ }
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ if (!pnv_pci_is_mem_pref_64(res->flags))
+ continue;
+
+ for (j = 0; j < vf_groups; j++) {
+ do {
+ win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
+ phb->ioda.m64_bar_idx + 1, 0);
+
+ if (win >= phb->ioda.m64_bar_idx + 1)
+ goto m64_failed;
+ } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
+
+ pdn->m64_wins[i][j] = win;
+
+ if (pdn->m64_per_iov == M64_PER_IOV) {
+ size = pci_iov_resource_size(pdev,
+ PCI_IOV_RESOURCES + i);
+ size = size * vf_per_group;
+ start = res->start + size * j;
+ } else {
+ size = resource_size(res);
+ start = res->start;
+ }
+
+ /* Map the M64 here */
+ if (pdn->m64_per_iov == M64_PER_IOV) {
+ pe_num = pdn->offset + j;
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ pe_num, OPAL_M64_WINDOW_TYPE,
+ pdn->m64_wins[i][j], 0);
+ }
+
+ rc = opal_pci_set_phb_mem_window(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE,
+ pdn->m64_wins[i][j],
+ start,
+ 0, /* unused */
+ size);
+
+
+ if (rc != OPAL_SUCCESS) {
+ dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
+ win, rc);
+ goto m64_failed;
+ }
+
+ if (pdn->m64_per_iov == M64_PER_IOV)
+ rc = opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 2);
+ else
+ rc = opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 1);
+
+ if (rc != OPAL_SUCCESS) {
+ dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
+ win, rc);
+ goto m64_failed;
+ }
+ }
+ }
+ return 0;
+
+m64_failed:
+ pnv_pci_vf_release_m64(pdev);
+ return -EBUSY;
+}
+
+static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct iommu_table *tbl;
+ unsigned long addr;
+ int64_t rc;
+
+ bus = dev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ tbl = pe->tce32_table;
+ addr = tbl->it_base;
+
+ opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
+ pe->pe_number << 1, 1, __pa(addr),
+ 0, 0x1000);
+
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ (pe->pe_number << 1) + 1,
+ pe->tce_bypass_base,
+ 0);
+ if (rc)
+ pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
+
+ iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
+ free_pages(addr, get_order(TCE32_TABLE_SIZE));
+ pe->tce32_table = NULL;
+}
+
+static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe, *pe_n;
+ struct pci_dn *pdn;
+ u16 vf_index;
+ int64_t rc;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ if (!pdev->is_physfn)
+ return;
+
+ if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
+ int vf_group;
+ int vf_per_group;
+ int vf_index1;
+
+ vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
+
+ for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++)
+ for (vf_index = vf_group * vf_per_group;
+ vf_index < (vf_group + 1) * vf_per_group &&
+ vf_index < num_vfs;
+ vf_index++)
+ for (vf_index1 = vf_group * vf_per_group;
+ vf_index1 < (vf_group + 1) * vf_per_group &&
+ vf_index1 < num_vfs;
+ vf_index1++){
+
+ rc = opal_pci_set_peltv(phb->opal_id,
+ pdn->offset + vf_index,
+ pdn->offset + vf_index1,
+ OPAL_REMOVE_PE_FROM_DOMAIN);
+
+ if (rc)
+ dev_warn(&pdev->dev, "%s: Failed to unlink same group PE#%d(%lld)\n",
+ __func__,
+ pdn->offset + vf_index1, rc);
+ }
+ }
+
+ list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
+ if (pe->parent_dev != pdev)
+ continue;
+
+ pnv_pci_ioda2_release_dma_pe(pdev, pe);
+
+ /* Remove from list */
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_del(&pe->list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+
+ pnv_ioda_deconfigure_pe(phb, pe);
+
+ pnv_ioda_free_pe(phb, pe->pe_number);
+ }
+}
+
+void pnv_pci_sriov_disable(struct pci_dev *pdev)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ struct pci_sriov *iov;
+ u16 num_vfs;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+ iov = pdev->sriov;
+ num_vfs = pdn->num_vfs;
+
+ /* Release VF PEs */
+ pnv_ioda_release_vf_PE(pdev, num_vfs);
+
+ if (phb->type == PNV_PHB_IODA2) {
+ if (pdn->m64_per_iov == 1)
+ pnv_pci_vf_resource_shift(pdev, -pdn->offset);
+
+ /* Release M64 windows */
+ pnv_pci_vf_release_m64(pdev);
+
+ /* Release PE numbers */
+ bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
+ pdn->offset = 0;
+ }
+}
+
+static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe);
+static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe;
+ int pe_num;
+ u16 vf_index;
+ struct pci_dn *pdn;
+ int64_t rc;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ if (!pdev->is_physfn)
+ return;
+
+ /* Reserve PE for each VF */
+ for (vf_index = 0; vf_index < num_vfs; vf_index++) {
+ pe_num = pdn->offset + vf_index;
+
+ pe = &phb->ioda.pe_array[pe_num];
+ pe->pe_number = pe_num;
+ pe->phb = phb;
+ pe->flags = PNV_IODA_PE_VF;
+ pe->pbus = NULL;
+ pe->parent_dev = pdev;
+ pe->tce32_seg = -1;
+ pe->mve_number = -1;
+ pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
+ pci_iov_virtfn_devfn(pdev, vf_index);
+
+ pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
+ hose->global_number, pdev->bus->number,
+ PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
+ PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
+
+ if (pnv_ioda_configure_pe(phb, pe)) {
+ /* XXX What do we do here ? */
+ if (pe_num)
+ pnv_ioda_free_pe(phb, pe_num);
+ pe->pdev = NULL;
+ continue;
+ }
+
+ pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
+ GFP_KERNEL, hose->node);
+ pe->tce32_table->data = pe;
+
+ /* Put PE to the list */
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_add_tail(&pe->list, &phb->ioda.pe_list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+
+ pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ }
+
+ if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
+ int vf_group;
+ int vf_per_group;
+ int vf_index1;
+
+ vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
+
+ for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++) {
+ for (vf_index = vf_group * vf_per_group;
+ vf_index < (vf_group + 1) * vf_per_group &&
+ vf_index < num_vfs;
+ vf_index++) {
+ for (vf_index1 = vf_group * vf_per_group;
+ vf_index1 < (vf_group + 1) * vf_per_group &&
+ vf_index1 < num_vfs;
+ vf_index1++) {
+
+ rc = opal_pci_set_peltv(phb->opal_id,
+ pdn->offset + vf_index,
+ pdn->offset + vf_index1,
+ OPAL_ADD_PE_TO_DOMAIN);
+
+ if (rc)
+ dev_warn(&pdev->dev, "%s: Failed to link same group PE#%d(%lld)\n",
+ __func__,
+ pdn->offset + vf_index1, rc);
+ }
+ }
+ }
+ }
+}
+
+int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ int ret;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ if (phb->type == PNV_PHB_IODA2) {
+ /* Calculate available PE for required VFs */
+ mutex_lock(&phb->ioda.pe_alloc_mutex);
+ pdn->offset = bitmap_find_next_zero_area(
+ phb->ioda.pe_alloc, phb->ioda.total_pe,
+ 0, num_vfs, 0);
+ if (pdn->offset >= phb->ioda.total_pe) {
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
+ dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
+ pdn->offset = 0;
+ return -EBUSY;
+ }
+ bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
+ pdn->num_vfs = num_vfs;
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
+
+ /* Assign M64 window accordingly */
+ ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
+ if (ret) {
+ dev_info(&pdev->dev, "Not enough M64 window resources\n");
+ goto m64_failed;
+ }
+
+ /*
+ * When using one M64 BAR to map one IOV BAR, we need to shift
+ * the IOV BAR according to the PE# allocated to the VFs.
+ * Otherwise, the PE# for the VF will conflict with others.
+ */
+ if (pdn->m64_per_iov == 1) {
+ ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
+ if (ret)
+ goto m64_failed;
+ }
+ }
+
+ /* Setup VF PEs */
+ pnv_ioda_setup_vf_PE(pdev, num_vfs);
+
+ return 0;
+
+m64_failed:
+ bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
+ pdn->offset = 0;
+
+ return ret;
+}
+
+int pcibios_sriov_disable(struct pci_dev *pdev)
+{
+ pnv_pci_sriov_disable(pdev);
+
+ /* Release PCI data */
+ remove_dev_pci_data(pdev);
+ return 0;
+}
+
+int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ /* Allocate PCI data */
+ add_dev_pci_data(pdev);
+
+ pnv_pci_sriov_enable(pdev, num_vfs);
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
{
struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -989,7 +1598,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
- set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+ set_iommu_table_base_and_group(&pdev->dev, pe->tce32_table);
}
static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
@@ -1016,7 +1625,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
} else {
dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(&pdev->dev, &dma_iommu_ops);
- set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ set_iommu_table_base(&pdev->dev, pe->tce32_table);
}
*pdev->dev.dma_mask = dma_mask;
return 0;
@@ -1053,9 +1662,9 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
list_for_each_entry(dev, &bus->devices, bus_list) {
if (add_to_iommu_group)
set_iommu_table_base_and_group(&dev->dev,
- &pe->tce32_table);
+ pe->tce32_table);
else
- set_iommu_table_base(&dev->dev, &pe->tce32_table);
+ set_iommu_table_base(&dev->dev, pe->tce32_table);
if (dev->subordinate)
pnv_ioda_setup_bus_dma(pe, dev->subordinate,
@@ -1145,8 +1754,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
__be64 *startp, __be64 *endp, bool rm)
{
- struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
- tce32_table);
+ struct pnv_ioda_pe *pe = tbl->data;
struct pnv_phb *phb = pe->phb;
if (phb->type == PNV_PHB_IODA1)
@@ -1167,9 +1775,6 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
int64_t rc;
void *addr;
- /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
-#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
-
/* XXX FIXME: Handle 64-bit only DMA devices */
/* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
/* XXX FIXME: Allocate multi-level tables on PHB3 */
@@ -1212,7 +1817,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
}
/* Setup linux iommu table */
- tbl = &pe->tce32_table;
+ tbl = pe->tce32_table;
pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
base << 28, IOMMU_PAGE_SHIFT_4K);
@@ -1232,12 +1837,19 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
TCE_PCI_SWINV_PAIR);
}
iommu_init_table(tbl, phb->hose->node);
- iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
- if (pe->pdev)
+ if (pe->flags & PNV_IODA_PE_DEV) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
- else
+ } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
+ } else if (pe->flags & PNV_IODA_PE_VF) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
+ }
return;
fail:
@@ -1250,8 +1862,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
{
- struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
- tce32_table);
+ struct pnv_ioda_pe *pe = tbl->data;
uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc;
@@ -1296,10 +1907,10 @@ static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
pe->tce_bypass_base = 1ull << 59;
/* Install set_bypass callback for VFIO */
- pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
+ pe->tce32_table->set_bypass = pnv_pci_ioda2_set_bypass;
/* Enable bypass by default */
- pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
+ pnv_pci_ioda2_set_bypass(pe->tce32_table, true);
}
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
@@ -1347,7 +1958,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
}
/* Setup linux iommu table */
- tbl = &pe->tce32_table;
+ tbl = pe->tce32_table;
pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
IOMMU_PAGE_SHIFT_4K);
@@ -1365,12 +1976,19 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
}
iommu_init_table(tbl, phb->hose->node);
- iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
- if (pe->pdev)
+ if (pe->flags & PNV_IODA_PE_DEV) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
- else
+ } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
+ } else if (pe->flags & PNV_IODA_PE_VF) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
+ }
/* Also create a bypass window */
if (!pnv_iommu_bypass_disabled)
@@ -1731,6 +2349,73 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
#endif /* CONFIG_PCI_MSI */
+#ifdef CONFIG_PCI_IOV
+static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
+{
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct resource *res;
+ int i;
+ resource_size_t size;
+ struct pci_dn *pdn;
+ int mul, total_vfs;
+
+ if (!pdev->is_physfn || pdev->is_added)
+ return;
+
+ hose = pci_bus_to_host(pdev->bus);
+ phb = hose->private_data;
+
+ pdn = pci_get_pdn(pdev);
+ pdn->vfs_expanded = 0;
+
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+ pdn->m64_per_iov = 1;
+ mul = phb->ioda.total_pe;
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || res->parent)
+ continue;
+ if (!pnv_pci_is_mem_pref_64(res->flags)) {
+ dev_warn(&pdev->dev, " non M64 VF BAR%d: %pR\n",
+ i, res);
+ continue;
+ }
+
+ size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
+
+ /* bigger than 64M */
+ if (size > (1 << 26)) {
+ dev_info(&pdev->dev, "PowerNV: VF BAR%d: %pR IOV size is bigger than 64M, roundup power2\n",
+ i, res);
+ pdn->m64_per_iov = M64_PER_IOV;
+ mul = roundup_pow_of_two(total_vfs);
+ break;
+ }
+ }
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || res->parent)
+ continue;
+ if (!pnv_pci_is_mem_pref_64(res->flags)) {
+ dev_warn(&pdev->dev, "Skipping expanding VF BAR%d: %pR\n",
+ i, res);
+ continue;
+ }
+
+ dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
+ size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
+ res->end = res->start + size * mul - 1;
+ dev_dbg(&pdev->dev, " %pR\n", res);
+ dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
+ i, res, mul);
+ }
+ pdn->vfs_expanded = mul;
+}
+#endif /* CONFIG_PCI_IOV */
+
/*
* This function is supposed to be called on basis of PE from top
* to bottom style. So the the I/O or MMIO segment assigned to
@@ -1777,7 +2462,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
region.start += phb->ioda.io_segsize;
index++;
}
- } else if (res->flags & IORESOURCE_MEM) {
+ } else if ((res->flags & IORESOURCE_MEM) &&
+ !pnv_pci_is_mem_pref_64(res->flags)) {
region.start = res->start -
hose->mem_offset[0] -
phb->ioda.m32_pci_base;
@@ -1907,10 +2593,29 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
return phb->ioda.io_segsize;
}
+#ifdef CONFIG_PCI_IOV
+static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
+ int resno)
+{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ resource_size_t align, iov_align;
+
+ iov_align = resource_size(&pdev->resource[resno]);
+ if (iov_align)
+ return iov_align;
+
+ align = pci_iov_resource_size(pdev, resno);
+ if (pdn->vfs_expanded)
+ return pdn->vfs_expanded * align;
+
+ return align;
+}
+#endif /* CONFIG_PCI_IOV */
+
/* Prevent enabling devices for which we couldn't properly
* assign a PE
*/
-static int pnv_pci_enable_device_hook(struct pci_dev *dev)
+static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -1922,13 +2627,13 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev)
* PEs isn't ready.
*/
if (!phb->initialized)
- return 0;
+ return true;
pdn = pci_get_pdn(dev);
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
- return -EINVAL;
+ return false;
- return 0;
+ return true;
}
static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
@@ -1991,6 +2696,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->hub_id = hub_id;
phb->opal_id = phb_id;
phb->type = ioda_type;
+ mutex_init(&phb->ioda.pe_alloc_mutex);
/* Detect specific models for error handling */
if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
@@ -2050,6 +2756,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
INIT_LIST_HEAD(&phb->ioda.pe_list);
+ mutex_init(&phb->ioda.pe_list_mutex);
/* Calculate how many 32-bit TCE segments we have */
phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
@@ -2078,9 +2785,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->get_pe_state = pnv_ioda_get_pe_state;
phb->freeze_pe = pnv_ioda_freeze_pe;
phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
-#ifdef CONFIG_EEH
- phb->eeh_ops = &ioda_eeh_ops;
-#endif
/* Setup RID -> PE mapping function */
phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
@@ -2104,9 +2808,16 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
* the child P2P bridges) can form individual PE.
*/
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
- ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
- ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
- ppc_md.pcibios_reset_secondary_bus = pnv_pci_reset_secondary_bus;
+ pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook;
+ pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment;
+ pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus;
+ hose->controller_ops = pnv_pci_controller_ops;
+
+#ifdef CONFIG_PCI_IOV
+ ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
+ ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
+#endif
+
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
/* Reset IODA tables to a clean state */
@@ -2121,8 +2832,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
*/
if (is_kdump_kernel()) {
pr_info(" Issue PHB reset ...\n");
- ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
- ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
+ pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
+ pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
}
/* Remove M64 resource if we can't configure it successfully */
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 6ef6d4d8e7e2..4729ca793813 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -133,6 +133,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
phb->hose->first_busno = 0;
phb->hose->last_busno = 0xff;
phb->hose->private_data = phb;
+ phb->hose->controller_ops = pnv_pci_controller_ops;
phb->hub_id = hub_id;
phb->opal_id = phb_id;
phb->type = PNV_PHB_P5IOC2;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 54323d6b5166..bca2aeb6e4b6 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -366,9 +366,9 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
spin_unlock_irqrestore(&phb->lock, flags);
}
-static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
- struct device_node *dn)
+static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
{
+ struct pnv_phb *phb = pdn->phb->private_data;
u8 fstate;
__be16 pcierr;
int pe_no;
@@ -379,7 +379,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
* setup that yet. So all ER errors should be mapped to
* reserved PE.
*/
- pe_no = PCI_DN(dn)->pe_number;
+ pe_no = pdn->pe_number;
if (pe_no == IODA_INVALID_PE) {
if (phb->type == PNV_PHB_P5IOC2)
pe_no = 0;
@@ -407,8 +407,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
}
cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
- (PCI_DN(dn)->busno << 8) | (PCI_DN(dn)->devfn),
- pe_no, fstate);
+ (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
/* Clear the frozen state if applicable */
if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
@@ -425,10 +424,9 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
}
}
-int pnv_pci_cfg_read(struct device_node *dn,
+int pnv_pci_cfg_read(struct pci_dn *pdn,
int where, int size, u32 *val)
{
- struct pci_dn *pdn = PCI_DN(dn);
struct pnv_phb *phb = pdn->phb->private_data;
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
s64 rc;
@@ -462,10 +460,9 @@ int pnv_pci_cfg_read(struct device_node *dn,
return PCIBIOS_SUCCESSFUL;
}
-int pnv_pci_cfg_write(struct device_node *dn,
+int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val)
{
- struct pci_dn *pdn = PCI_DN(dn);
struct pnv_phb *phb = pdn->phb->private_data;
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
@@ -489,18 +486,17 @@ int pnv_pci_cfg_write(struct device_node *dn,
}
#if CONFIG_EEH
-static bool pnv_pci_cfg_check(struct pci_controller *hose,
- struct device_node *dn)
+static bool pnv_pci_cfg_check(struct pci_dn *pdn)
{
struct eeh_dev *edev = NULL;
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pdn->phb->private_data;
/* EEH not enabled ? */
if (!(phb->flags & PNV_PHB_FLAG_EEH))
return true;
/* PE reset or device removed ? */
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn->edev;
if (edev) {
if (edev->pe &&
(edev->pe->state & EEH_PE_CFG_BLOCKED))
@@ -513,8 +509,7 @@ static bool pnv_pci_cfg_check(struct pci_controller *hose,
return true;
}
#else
-static inline pnv_pci_cfg_check(struct pci_controller *hose,
- struct device_node *dn)
+static inline pnv_pci_cfg_check(struct pci_dn *pdn)
{
return true;
}
@@ -524,32 +519,26 @@ static int pnv_pci_read_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 *val)
{
- struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
struct pci_dn *pdn;
struct pnv_phb *phb;
- bool found = false;
int ret;
*val = 0xFFFFFFFF;
- for (dn = busdn->child; dn; dn = dn->sibling) {
- pdn = PCI_DN(dn);
- if (pdn && pdn->devfn == devfn) {
- phb = pdn->phb->private_data;
- found = true;
- break;
- }
- }
+ pdn = pci_get_pdn_by_devfn(bus, devfn);
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
- if (!found || !pnv_pci_cfg_check(pdn->phb, dn))
+ if (!pnv_pci_cfg_check(pdn))
return PCIBIOS_DEVICE_NOT_FOUND;
- ret = pnv_pci_cfg_read(dn, where, size, val);
- if (phb->flags & PNV_PHB_FLAG_EEH) {
+ ret = pnv_pci_cfg_read(pdn, where, size, val);
+ phb = pdn->phb->private_data;
+ if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
if (*val == EEH_IO_ERROR_VALUE(size) &&
- eeh_dev_check_failure(of_node_to_eeh_dev(dn)))
+ eeh_dev_check_failure(pdn->edev))
return PCIBIOS_DEVICE_NOT_FOUND;
} else {
- pnv_pci_config_check_eeh(phb, dn);
+ pnv_pci_config_check_eeh(pdn);
}
return ret;
@@ -559,27 +548,21 @@ static int pnv_pci_write_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 val)
{
- struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
struct pci_dn *pdn;
struct pnv_phb *phb;
- bool found = false;
int ret;
- for (dn = busdn->child; dn; dn = dn->sibling) {
- pdn = PCI_DN(dn);
- if (pdn && pdn->devfn == devfn) {
- phb = pdn->phb->private_data;
- found = true;
- break;
- }
- }
+ pdn = pci_get_pdn_by_devfn(bus, devfn);
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
- if (!found || !pnv_pci_cfg_check(pdn->phb, dn))
+ if (!pnv_pci_cfg_check(pdn))
return PCIBIOS_DEVICE_NOT_FOUND;
- ret = pnv_pci_cfg_write(dn, where, size, val);
+ ret = pnv_pci_cfg_write(pdn, where, size, val);
+ phb = pdn->phb->private_data;
if (!(phb->flags & PNV_PHB_FLAG_EEH))
- pnv_pci_config_check_eeh(phb, dn);
+ pnv_pci_config_check_eeh(pdn);
return ret;
}
@@ -679,66 +662,31 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
tbl->it_type = TCE_PCI;
}
-static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose)
-{
- struct iommu_table *tbl;
- const __be64 *basep, *swinvp;
- const __be32 *sizep;
-
- basep = of_get_property(hose->dn, "linux,tce-base", NULL);
- sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
- if (basep == NULL || sizep == NULL) {
- pr_err("PCI: %s has missing tce entries !\n",
- hose->dn->full_name);
- return NULL;
- }
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
- if (WARN_ON(!tbl))
- return NULL;
- pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
- be32_to_cpup(sizep), 0, IOMMU_PAGE_SHIFT_4K);
- iommu_init_table(tbl, hose->node);
- iommu_register_group(tbl, pci_domain_nr(hose->bus), 0);
-
- /* Deal with SW invalidated TCEs when needed (BML way) */
- swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
- NULL);
- if (swinvp) {
- tbl->it_busno = be64_to_cpu(swinvp[1]);
- tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
- tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
- }
- return tbl;
-}
-
-static void pnv_pci_dma_fallback_setup(struct pci_controller *hose,
- struct pci_dev *pdev)
-{
- struct device_node *np = pci_bus_to_OF_node(hose->bus);
- struct pci_dn *pdn;
-
- if (np == NULL)
- return;
- pdn = PCI_DN(np);
- if (!pdn->iommu_table)
- pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
- if (!pdn->iommu_table)
- return;
- set_iommu_table_base_and_group(&pdev->dev, pdn->iommu_table);
-}
-
static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
+#ifdef CONFIG_PCI_IOV
+ struct pnv_ioda_pe *pe;
+ struct pci_dn *pdn;
+
+ /* Fix the VF pdn PE number */
+ if (pdev->is_virtfn) {
+ pdn = pci_get_pdn(pdev);
+ WARN_ON(pdn->pe_number != IODA_INVALID_PE);
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+ if (pe->rid == ((pdev->bus->number << 8) |
+ (pdev->devfn & 0xff))) {
+ pdn->pe_number = pe->pe_number;
+ pe->pdev = pdev;
+ break;
+ }
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
- /* If we have no phb structure, try to setup a fallback based on
- * the device-tree (RTAS PCI for example)
- */
if (phb && phb->dma_dev_setup)
phb->dma_dev_setup(phb, pdev);
- else
- pnv_pci_dma_fallback_setup(hose, pdev);
}
int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
@@ -784,44 +732,36 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
void __init pnv_pci_init(void)
{
struct device_node *np;
+ bool found_ioda = false;
pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
- /* OPAL absent, try POPAL first then RTAS detection of PHBs */
- if (!firmware_has_feature(FW_FEATURE_OPAL)) {
-#ifdef CONFIG_PPC_POWERNV_RTAS
- init_pci_config_tokens();
- find_and_init_phbs();
-#endif /* CONFIG_PPC_POWERNV_RTAS */
- }
- /* OPAL is here, do our normal stuff */
- else {
- int found_ioda = 0;
+ /* If we don't have OPAL, eg. in sim, just skip PCI probe */
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return;
- /* Look for IODA IO-Hubs. We don't support mixing IODA
- * and p5ioc2 due to the need to change some global
- * probing flags
- */
- for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
- pnv_pci_init_ioda_hub(np);
- found_ioda = 1;
- }
+ /* Look for IODA IO-Hubs. We don't support mixing IODA
+ * and p5ioc2 due to the need to change some global
+ * probing flags
+ */
+ for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
+ pnv_pci_init_ioda_hub(np);
+ found_ioda = true;
+ }
- /* Look for p5ioc2 IO-Hubs */
- if (!found_ioda)
- for_each_compatible_node(np, NULL, "ibm,p5ioc2")
- pnv_pci_init_p5ioc2_hub(np);
+ /* Look for p5ioc2 IO-Hubs */
+ if (!found_ioda)
+ for_each_compatible_node(np, NULL, "ibm,p5ioc2")
+ pnv_pci_init_p5ioc2_hub(np);
- /* Look for ioda2 built-in PHB3's */
- for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
- pnv_pci_init_ioda2_phb(np);
- }
+ /* Look for ioda2 built-in PHB3's */
+ for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
+ pnv_pci_init_ioda2_phb(np);
/* Setup the linkage between OF nodes and PHBs */
pci_devs_phb_init();
/* Configure IOMMU DMA hooks */
- ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
ppc_md.tce_build = pnv_tce_build_vm;
ppc_md.tce_free = pnv_tce_free_vm;
ppc_md.tce_build_rm = pnv_tce_build_rm;
@@ -837,3 +777,7 @@ void __init pnv_pci_init(void)
}
machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
+
+struct pci_controller_ops pnv_pci_controller_ops = {
+ .dma_dev_setup = pnv_pci_dma_dev_setup,
+};
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 6c02ff8dd69f..070ee888fc95 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -23,6 +23,7 @@ enum pnv_phb_model {
#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
#define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
+#define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
/* Data associated with a PE, including IOMMU tracking etc.. */
struct pnv_phb;
@@ -34,6 +35,9 @@ struct pnv_ioda_pe {
* entire bus (& children). In the former case, pdev
* is populated, in the later case, pbus is.
*/
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *parent_dev;
+#endif
struct pci_dev *pdev;
struct pci_bus *pbus;
@@ -53,7 +57,7 @@ struct pnv_ioda_pe {
/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
int tce32_seg;
int tce32_segcount;
- struct iommu_table tce32_table;
+ struct iommu_table *tce32_table;
phys_addr_t tce_inval_reg_phys;
/* 64-bit TCE bypass region */
@@ -75,22 +79,6 @@ struct pnv_ioda_pe {
struct list_head list;
};
-/* IOC dependent EEH operations */
-#ifdef CONFIG_EEH
-struct pnv_eeh_ops {
- int (*post_init)(struct pci_controller *hose);
- int (*set_option)(struct eeh_pe *pe, int option);
- int (*get_state)(struct eeh_pe *pe);
- int (*reset)(struct eeh_pe *pe, int option);
- int (*get_log)(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len);
- int (*configure_bridge)(struct eeh_pe *pe);
- int (*err_inject)(struct eeh_pe *pe, int type, int func,
- unsigned long addr, unsigned long mask);
- int (*next_error)(struct eeh_pe **pe);
-};
-#endif /* CONFIG_EEH */
-
#define PNV_PHB_FLAG_EEH (1 << 0)
struct pnv_phb {
@@ -104,10 +92,6 @@ struct pnv_phb {
int initialized;
spinlock_t lock;
-#ifdef CONFIG_EEH
- struct pnv_eeh_ops *eeh_ops;
-#endif
-
#ifdef CONFIG_DEBUG_FS
int has_dbgfs;
struct dentry *dbgfs;
@@ -165,6 +149,8 @@ struct pnv_phb {
/* PE allocation bitmap */
unsigned long *pe_alloc;
+ /* PE allocation mutex */
+ struct mutex pe_alloc_mutex;
/* M32 & IO segment maps */
unsigned int *m32_segmap;
@@ -179,6 +165,7 @@ struct pnv_phb {
* on the sequence of creation
*/
struct list_head pe_list;
+ struct mutex pe_list_mutex;
/* Reverse map of PEs, will have to extend if
* we are to support more than 256 PEs, indexed
@@ -213,15 +200,12 @@ struct pnv_phb {
};
extern struct pci_ops pnv_pci_ops;
-#ifdef CONFIG_EEH
-extern struct pnv_eeh_ops ioda_eeh_ops;
-#endif
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
unsigned char *log_buff);
-int pnv_pci_cfg_read(struct device_node *dn,
+int pnv_pci_cfg_read(struct pci_dn *pdn,
int where, int size, u32 *val);
-int pnv_pci_cfg_write(struct device_node *dn,
+int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val);
extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
@@ -232,6 +216,6 @@ extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
__be64 *startp, __be64 *endp, bool rm);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
-extern int ioda_eeh_phb_reset(struct pci_controller *hose, int option);
+extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 604c48e7879a..826d2c9bea56 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -29,6 +29,8 @@ static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
}
#endif
+extern struct pci_controller_ops pnv_pci_controller_ops;
+
extern u32 pnv_get_supported_cpuidle_states(void);
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 80db43944afe..6eb808ff637e 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -24,12 +24,22 @@
struct powernv_rng {
void __iomem *regs;
+ void __iomem *regs_real;
unsigned long mask;
};
static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
+int powernv_hwrng_present(void)
+{
+ struct powernv_rng *rng;
+
+ rng = get_cpu_var(powernv_rng);
+ put_cpu_var(rng);
+ return rng != NULL;
+}
+
static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
{
unsigned long parity;
@@ -46,6 +56,17 @@ static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
return val;
}
+int powernv_get_random_real_mode(unsigned long *v)
+{
+ struct powernv_rng *rng;
+
+ rng = raw_cpu_read(powernv_rng);
+
+ *v = rng_whiten(rng, in_rm64(rng->regs_real));
+
+ return 1;
+}
+
int powernv_get_random_long(unsigned long *v)
{
struct powernv_rng *rng;
@@ -80,12 +101,20 @@ static __init void rng_init_per_cpu(struct powernv_rng *rng,
static __init int rng_create(struct device_node *dn)
{
struct powernv_rng *rng;
+ struct resource res;
unsigned long val;
rng = kzalloc(sizeof(*rng), GFP_KERNEL);
if (!rng)
return -ENOMEM;
+ if (of_address_to_resource(dn, 0, &res)) {
+ kfree(rng);
+ return -ENXIO;
+ }
+
+ rng->regs_real = (void __iomem *)res.start;
+
rng->regs = of_iomap(dn, 0);
if (!rng->regs) {
kfree(rng);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index d2de7d5d7574..16fdcb23f4c3 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -32,7 +32,6 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/xics.h>
-#include <asm/rtas.h>
#include <asm/opal.h>
#include <asm/kexec.h>
#include <asm/smp.h>
@@ -278,20 +277,6 @@ static void __init pnv_setup_machdep_opal(void)
ppc_md.handle_hmi_exception = opal_handle_hmi_exception;
}
-#ifdef CONFIG_PPC_POWERNV_RTAS
-static void __init pnv_setup_machdep_rtas(void)
-{
- if (rtas_token("get-time-of-day") != RTAS_UNKNOWN_SERVICE) {
- ppc_md.get_boot_time = rtas_get_boot_time;
- ppc_md.get_rtc_time = rtas_get_rtc_time;
- ppc_md.set_rtc_time = rtas_set_rtc_time;
- }
- ppc_md.restart = rtas_restart;
- pm_power_off = rtas_power_off;
- ppc_md.halt = rtas_halt;
-}
-#endif /* CONFIG_PPC_POWERNV_RTAS */
-
static u32 supported_cpuidle_states;
int pnv_save_sprs_for_winkle(void)
@@ -409,37 +394,39 @@ static int __init pnv_init_idle_states(void)
{
struct device_node *power_mgt;
int dt_idle_states;
- const __be32 *idle_state_flags;
- u32 len_flags, flags;
+ u32 *flags;
int i;
supported_cpuidle_states = 0;
if (cpuidle_disable != IDLE_NO_OVERRIDE)
- return 0;
+ goto out;
if (!firmware_has_feature(FW_FEATURE_OPALv3))
- return 0;
+ goto out;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
pr_warn("opal: PowerMgmt Node not found\n");
- return 0;
+ goto out;
+ }
+ dt_idle_states = of_property_count_u32_elems(power_mgt,
+ "ibm,cpu-idle-state-flags");
+ if (dt_idle_states < 0) {
+ pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ goto out;
}
- idle_state_flags = of_get_property(power_mgt,
- "ibm,cpu-idle-state-flags", &len_flags);
- if (!idle_state_flags) {
- pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
- return 0;
+ flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+ if (of_property_read_u32_array(power_mgt,
+ "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
+ goto out_free;
}
- dt_idle_states = len_flags / sizeof(u32);
+ for (i = 0; i < dt_idle_states; i++)
+ supported_cpuidle_states |= flags[i];
- for (i = 0; i < dt_idle_states; i++) {
- flags = be32_to_cpu(idle_state_flags[i]);
- supported_cpuidle_states |= flags;
- }
if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
patch_instruction(
(unsigned int *)pnv_fastsleep_workaround_at_entry,
@@ -449,6 +436,9 @@ static int __init pnv_init_idle_states(void)
PPC_INST_NOP);
}
pnv_alloc_idle_core_states();
+out_free:
+ kfree(flags);
+out:
return 0;
}
@@ -465,10 +455,6 @@ static int __init pnv_probe(void)
if (firmware_has_feature(FW_FEATURE_OPAL))
pnv_setup_machdep_opal();
-#ifdef CONFIG_PPC_POWERNV_RTAS
- else if (rtas.base)
- pnv_setup_machdep_rtas();
-#endif /* CONFIG_PPC_POWERNV_RTAS */
pr_debug("PowerNV detected !\n");
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 38a45088f633..8f70ba681a78 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -25,7 +25,6 @@
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
-#include <asm/rtas.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
@@ -251,18 +250,6 @@ void __init pnv_smp_init(void)
{
smp_ops = &pnv_smp_ops;
- /* XXX We don't yet have a proper entry point from HAL, for
- * now we rely on kexec-style entry from BML
- */
-
-#ifdef CONFIG_PPC_RTAS
- /* Non-lpar has additional take/give timebase */
- if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = rtas_give_timebase;
- smp_ops->take_timebase = rtas_take_timebase;
- }
-#endif /* CONFIG_PPC_RTAS */
-
#ifdef CONFIG_HOTPLUG_CPU
ppc_md.cpu_die = pnv_smp_cpu_kill_self;
#endif
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index b358bec6c8cb..3c7707af3384 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -57,7 +57,7 @@ static void ps3_smp_message_pass(int cpu, int msg)
" (%d)\n", __func__, __LINE__, cpu, msg, result);
}
-static int __init ps3_smp_probe(void)
+static void __init ps3_smp_probe(void)
{
int cpu;
@@ -100,8 +100,6 @@ static int __init ps3_smp_probe(void)
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
}
-
- return 2;
}
void ps3_smp_cleanup_cpu(int cpu)
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index a758a9c3bbba..54c87d5d349d 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -16,7 +16,6 @@ config PPC_PSERIES
select PPC_UDBG_16550
select PPC_NATIVE
select PPC_PCI_CHOICE if EXPERT
- select ZLIB_DEFLATE
select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index c22bb1b4beb8..019d34aaf054 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -10,6 +10,8 @@
* 2 as published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) "dlpar: " fmt
+
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
@@ -410,6 +412,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
if (rc)
return -EINVAL;
+ rc = dlpar_acquire_drc(drc_index);
+ if (rc)
+ return -EINVAL;
+
parent = of_find_node_by_path("/cpus");
if (!parent)
return -ENODEV;
@@ -420,12 +426,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
of_node_put(parent);
- rc = dlpar_acquire_drc(drc_index);
- if (rc) {
- dlpar_free_cc_nodes(dn);
- return -EINVAL;
- }
-
rc = dlpar_attach_node(dn);
if (rc) {
dlpar_release_drc(drc_index);
@@ -535,13 +535,125 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
return count;
}
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
+static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
+{
+ int rc;
+
+ /* pseries error logs are in BE format, convert to cpu type */
+ switch (hp_elog->id_type) {
+ case PSERIES_HP_ELOG_ID_DRC_COUNT:
+ hp_elog->_drc_u.drc_count =
+ be32_to_cpu(hp_elog->_drc_u.drc_count);
+ break;
+ case PSERIES_HP_ELOG_ID_DRC_INDEX:
+ hp_elog->_drc_u.drc_index =
+ be32_to_cpu(hp_elog->_drc_u.drc_index);
+ }
+
+ switch (hp_elog->resource) {
+ case PSERIES_HP_ELOG_RESOURCE_MEM:
+ rc = dlpar_memory(hp_elog);
+ break;
+ default:
+ pr_warn_ratelimited("Invalid resource (%d) specified\n",
+ hp_elog->resource);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pseries_hp_errorlog *hp_elog;
+ const char *arg;
+ int rc;
+
+ hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
+ if (!hp_elog) {
+ rc = -ENOMEM;
+ goto dlpar_store_out;
+ }
+
+ /* Parse out the request from the user, this will be in the form
+ * <resource> <action> <id_type> <id>
+ */
+ arg = buf;
+ if (!strncmp(arg, "memory", 6)) {
+ hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
+ arg += strlen("memory ");
+ } else {
+ pr_err("Invalid resource specified: \"%s\"\n", buf);
+ rc = -EINVAL;
+ goto dlpar_store_out;
+ }
+
+ if (!strncmp(arg, "add", 3)) {
+ hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
+ arg += strlen("add ");
+ } else if (!strncmp(arg, "remove", 6)) {
+ hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
+ arg += strlen("remove ");
+ } else {
+ pr_err("Invalid action specified: \"%s\"\n", buf);
+ rc = -EINVAL;
+ goto dlpar_store_out;
+ }
+
+ if (!strncmp(arg, "index", 5)) {
+ u32 index;
+
+ hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
+ arg += strlen("index ");
+ if (kstrtou32(arg, 0, &index)) {
+ rc = -EINVAL;
+ pr_err("Invalid drc_index specified: \"%s\"\n", buf);
+ goto dlpar_store_out;
+ }
+
+ hp_elog->_drc_u.drc_index = cpu_to_be32(index);
+ } else if (!strncmp(arg, "count", 5)) {
+ u32 count;
+
+ hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
+ arg += strlen("count ");
+ if (kstrtou32(arg, 0, &count)) {
+ rc = -EINVAL;
+ pr_err("Invalid count specified: \"%s\"\n", buf);
+ goto dlpar_store_out;
+ }
+
+ hp_elog->_drc_u.drc_count = cpu_to_be32(count);
+ } else {
+ pr_err("Invalid id_type specified: \"%s\"\n", buf);
+ rc = -EINVAL;
+ goto dlpar_store_out;
+ }
+
+ rc = handle_dlpar_errorlog(hp_elog);
+
+dlpar_store_out:
+ kfree(hp_elog);
+ return rc ? rc : count;
+}
+
+static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store);
+
static int __init pseries_dlpar_init(void)
{
+ int rc;
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ppc_md.cpu_probe = dlpar_cpu_probe;
ppc_md.cpu_release = dlpar_cpu_release;
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
- return 0;
+ rc = sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
+
+ return rc;
}
machine_device_initcall(pseries, pseries_dlpar_init);
-#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index a6c7e19f5eb3..2039397cc75d 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -118,9 +118,8 @@ static int pseries_eeh_init(void)
return 0;
}
-static int pseries_eeh_cap_start(struct device_node *dn)
+static int pseries_eeh_cap_start(struct pci_dn *pdn)
{
- struct pci_dn *pdn = PCI_DN(dn);
u32 status;
if (!pdn)
@@ -134,10 +133,9 @@ static int pseries_eeh_cap_start(struct device_node *dn)
}
-static int pseries_eeh_find_cap(struct device_node *dn, int cap)
+static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
{
- struct pci_dn *pdn = PCI_DN(dn);
- int pos = pseries_eeh_cap_start(dn);
+ int pos = pseries_eeh_cap_start(pdn);
int cnt = 48; /* Maximal number of capabilities */
u32 id;
@@ -160,10 +158,9 @@ static int pseries_eeh_find_cap(struct device_node *dn, int cap)
return 0;
}
-static int pseries_eeh_find_ecap(struct device_node *dn, int cap)
+static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
{
- struct pci_dn *pdn = PCI_DN(dn);
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
u32 header;
int pos = 256;
int ttl = (4096 - 256) / 8;
@@ -191,53 +188,44 @@ static int pseries_eeh_find_ecap(struct device_node *dn, int cap)
}
/**
- * pseries_eeh_of_probe - EEH probe on the given device
- * @dn: OF node
- * @flag: Unused
+ * pseries_eeh_probe - EEH probe on the given device
+ * @pdn: PCI device node
+ * @data: Unused
*
* When EEH module is installed during system boot, all PCI devices
* are checked one by one to see if it supports EEH. The function
* is introduced for the purpose.
*/
-static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
+static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
{
struct eeh_dev *edev;
struct eeh_pe pe;
- struct pci_dn *pdn = PCI_DN(dn);
- const __be32 *classp, *vendorp, *devicep;
- u32 class_code;
- const __be32 *regs;
u32 pcie_flags;
int enable = 0;
int ret;
/* Retrieve OF node and eeh device */
- edev = of_node_to_eeh_dev(dn);
- if (edev->pe || !of_device_is_available(dn))
+ edev = pdn_to_eeh_dev(pdn);
+ if (!edev || edev->pe)
return NULL;
- /* Retrieve class/vendor/device IDs */
- classp = of_get_property(dn, "class-code", NULL);
- vendorp = of_get_property(dn, "vendor-id", NULL);
- devicep = of_get_property(dn, "device-id", NULL);
-
- /* Skip for bad OF node or PCI-ISA bridge */
- if (!classp || !vendorp || !devicep)
- return NULL;
- if (dn->type && !strcmp(dn->type, "isa"))
+ /* Check class/vendor/device IDs */
+ if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
return NULL;
- class_code = of_read_number(classp, 1);
+ /* Skip for PCI-ISA bridge */
+ if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
+ return NULL;
/*
* Update class code and mode of eeh device. We need
* correctly reflects that current device is root port
* or PCIe switch downstream port.
*/
- edev->class_code = class_code;
- edev->pcix_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_PCIX);
- edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
- edev->aer_cap = pseries_eeh_find_ecap(dn, PCI_EXT_CAP_ID_ERR);
+ edev->class_code = pdn->class_code;
+ edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
+ edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
+ edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
edev->mode &= 0xFFFFFF00;
if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
@@ -252,24 +240,16 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
}
}
- /* Retrieve the device address */
- regs = of_get_property(dn, "reg", NULL);
- if (!regs) {
- pr_warn("%s: OF node property %s::reg not found\n",
- __func__, dn->full_name);
- return NULL;
- }
-
/* Initialize the fake PE */
memset(&pe, 0, sizeof(struct eeh_pe));
pe.phb = edev->phb;
- pe.config_addr = of_read_number(regs, 1);
+ pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
/* Enable EEH on the device */
ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
if (!ret) {
- edev->config_addr = of_read_number(regs, 1);
/* Retrieve PE address */
+ edev->config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
pe.addr = edev->pe_config_addr;
@@ -285,16 +265,17 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
eeh_add_flag(EEH_ENABLED);
eeh_add_to_parent_pe(edev);
- pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
- __func__, dn->full_name, pe.phb->global_number,
- pe.addr, pe.config_addr);
- } else if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
- (of_node_to_eeh_dev(dn->parent))->pe) {
+ pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n",
+ __func__, pdn->busno, PCI_SLOT(pdn->devfn),
+ PCI_FUNC(pdn->devfn), pe.phb->global_number,
+ pe.addr);
+ } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) &&
+ (pdn_to_eeh_dev(pdn->parent))->pe) {
/* This device doesn't support EEH, but it may have an
* EEH parent, in which case we mark it as supported.
*/
- edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
- edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr;
+ edev->config_addr = pdn_to_eeh_dev(pdn->parent)->config_addr;
+ edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr;
eeh_add_to_parent_pe(edev);
}
}
@@ -670,45 +651,36 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
/**
* pseries_eeh_read_config - Read PCI config space
- * @dn: device node
+ * @pdn: PCI device node
* @where: PCI address
* @size: size to read
* @val: return value
*
* Read config space from the speicifed device
*/
-static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val)
+static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
{
- struct pci_dn *pdn;
-
- pdn = PCI_DN(dn);
-
return rtas_read_config(pdn, where, size, val);
}
/**
* pseries_eeh_write_config - Write PCI config space
- * @dn: device node
+ * @pdn: PCI device node
* @where: PCI address
* @size: size to write
* @val: value to be written
*
* Write config space to the specified device
*/
-static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val)
+static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val)
{
- struct pci_dn *pdn;
-
- pdn = PCI_DN(dn);
-
return rtas_write_config(pdn, where, size, val);
}
static struct eeh_ops pseries_eeh_ops = {
.name = "pseries",
.init = pseries_eeh_init,
- .of_probe = pseries_eeh_of_probe,
- .dev_probe = NULL,
+ .probe = pseries_eeh_probe,
.set_option = pseries_eeh_set_option,
.get_pe_addr = pseries_eeh_get_pe_addr,
.get_state = pseries_eeh_get_state,
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index fa41f0da5b6f..0ced387e1463 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -9,11 +9,14 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
+
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
+#include <linux/slab.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
@@ -21,6 +24,8 @@
#include <asm/sparsemem.h>
#include "pseries.h"
+static bool rtas_hp_event;
+
unsigned long pseries_memory_block_size(void)
{
struct device_node *np;
@@ -64,6 +69,67 @@ unsigned long pseries_memory_block_size(void)
return memblock_size;
}
+static void dlpar_free_drconf_property(struct property *prop)
+{
+ kfree(prop->name);
+ kfree(prop->value);
+ kfree(prop);
+}
+
+static struct property *dlpar_clone_drconf_property(struct device_node *dn)
+{
+ struct property *prop, *new_prop;
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int i;
+
+ prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
+ if (!prop)
+ return NULL;
+
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ return NULL;
+
+ new_prop->name = kstrdup(prop->name, GFP_KERNEL);
+ new_prop->value = kmalloc(prop->length, GFP_KERNEL);
+ if (!new_prop->name || !new_prop->value) {
+ dlpar_free_drconf_property(new_prop);
+ return NULL;
+ }
+
+ memcpy(new_prop->value, prop->value, prop->length);
+ new_prop->length = prop->length;
+
+ /* Convert the property to cpu endian-ness */
+ p = new_prop->value;
+ *p = be32_to_cpu(*p);
+
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ for (i = 0; i < num_lmbs; i++) {
+ lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
+ lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+ lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
+ }
+
+ return new_prop;
+}
+
+static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
+{
+ unsigned long section_nr;
+ struct mem_section *mem_sect;
+ struct memory_block *mem_block;
+
+ section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
+ mem_sect = __nr_to_section(section_nr);
+
+ mem_block = find_memory_block(mem_sect);
+ return mem_block;
+}
+
#ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
@@ -122,6 +188,173 @@ static int pseries_remove_mem_node(struct device_node *np)
pseries_remove_memblock(base, lmb_size);
return 0;
}
+
+static bool lmb_is_removable(struct of_drconf_cell *lmb)
+{
+ int i, scns_per_block;
+ int rc = 1;
+ unsigned long pfn, block_sz;
+ u64 phys_addr;
+
+ if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
+ return false;
+
+ block_sz = memory_block_size_bytes();
+ scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+ phys_addr = lmb->base_addr;
+
+ for (i = 0; i < scns_per_block; i++) {
+ pfn = PFN_DOWN(phys_addr);
+ if (!pfn_present(pfn))
+ continue;
+
+ rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
+ phys_addr += MIN_MEMORY_BLOCK_SIZE;
+ }
+
+ return rc ? true : false;
+}
+
+static int dlpar_add_lmb(struct of_drconf_cell *);
+
+static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
+{
+ struct memory_block *mem_block;
+ unsigned long block_sz;
+ int nid, rc;
+
+ if (!lmb_is_removable(lmb))
+ return -EINVAL;
+
+ mem_block = lmb_to_memblock(lmb);
+ if (!mem_block)
+ return -EINVAL;
+
+ rc = device_offline(&mem_block->dev);
+ put_device(&mem_block->dev);
+ if (rc)
+ return rc;
+
+ block_sz = pseries_memory_block_size();
+ nid = memory_add_physaddr_to_nid(lmb->base_addr);
+
+ remove_memory(nid, lmb->base_addr, block_sz);
+
+ /* Update memory regions for memory remove */
+ memblock_remove(lmb->base_addr, block_sz);
+
+ dlpar_release_drc(lmb->drc_index);
+
+ lmb->flags &= ~DRCONF_MEM_ASSIGNED;
+ return 0;
+}
+
+static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
+ struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ int lmbs_removed = 0;
+ int lmbs_available = 0;
+ u32 num_lmbs, *p;
+ int i, rc;
+
+ pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
+
+ if (lmbs_to_remove == 0)
+ return -EINVAL;
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ /* Validate that there are enough LMBs to satisfy the request */
+ for (i = 0; i < num_lmbs; i++) {
+ if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
+ lmbs_available++;
+ }
+
+ if (lmbs_available < lmbs_to_remove)
+ return -EINVAL;
+
+ for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
+ rc = dlpar_remove_lmb(&lmbs[i]);
+ if (rc)
+ continue;
+
+ lmbs_removed++;
+
+ /* Mark this lmb so we can add it later if all of the
+ * requested LMBs cannot be removed.
+ */
+ lmbs[i].reserved = 1;
+ }
+
+ if (lmbs_removed != lmbs_to_remove) {
+ pr_err("Memory hot-remove failed, adding LMB's back\n");
+
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ rc = dlpar_add_lmb(&lmbs[i]);
+ if (rc)
+ pr_err("Failed to add LMB back, drc index %x\n",
+ lmbs[i].drc_index);
+
+ lmbs[i].reserved = 0;
+ }
+
+ rc = -EINVAL;
+ } else {
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ pr_info("Memory at %llx was hot-removed\n",
+ lmbs[i].base_addr);
+
+ lmbs[i].reserved = 0;
+ }
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int lmb_found;
+ int i, rc;
+
+ pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ lmb_found = 0;
+ for (i = 0; i < num_lmbs; i++) {
+ if (lmbs[i].drc_index == drc_index) {
+ lmb_found = 1;
+ rc = dlpar_remove_lmb(&lmbs[i]);
+ break;
+ }
+ }
+
+ if (!lmb_found)
+ rc = -EINVAL;
+
+ if (rc)
+ pr_info("Failed to hot-remove memory at %llx\n",
+ lmbs[i].base_addr);
+ else
+ pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
+
+ return rc;
+}
+
#else
static inline int pseries_remove_memblock(unsigned long base,
unsigned int memblock_size)
@@ -132,8 +365,261 @@ static inline int pseries_remove_mem_node(struct device_node *np)
{
return 0;
}
+static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
+{
+ return -EOPNOTSUPP;
+}
+static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
+{
+ return -EOPNOTSUPP;
+}
+static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
+ struct property *prop)
+{
+ return -EOPNOTSUPP;
+}
+static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_MEMORY_HOTREMOVE */
+static int dlpar_add_lmb(struct of_drconf_cell *lmb)
+{
+ struct memory_block *mem_block;
+ unsigned long block_sz;
+ int nid, rc;
+
+ if (lmb->flags & DRCONF_MEM_ASSIGNED)
+ return -EINVAL;
+
+ block_sz = memory_block_size_bytes();
+
+ rc = dlpar_acquire_drc(lmb->drc_index);
+ if (rc)
+ return rc;
+
+ /* Find the node id for this address */
+ nid = memory_add_physaddr_to_nid(lmb->base_addr);
+
+ /* Add the memory */
+ rc = add_memory(nid, lmb->base_addr, block_sz);
+ if (rc) {
+ dlpar_release_drc(lmb->drc_index);
+ return rc;
+ }
+
+ /* Register this block of memory */
+ rc = memblock_add(lmb->base_addr, block_sz);
+ if (rc) {
+ remove_memory(nid, lmb->base_addr, block_sz);
+ dlpar_release_drc(lmb->drc_index);
+ return rc;
+ }
+
+ mem_block = lmb_to_memblock(lmb);
+ if (!mem_block) {
+ remove_memory(nid, lmb->base_addr, block_sz);
+ dlpar_release_drc(lmb->drc_index);
+ return -EINVAL;
+ }
+
+ rc = device_online(&mem_block->dev);
+ put_device(&mem_block->dev);
+ if (rc) {
+ remove_memory(nid, lmb->base_addr, block_sz);
+ dlpar_release_drc(lmb->drc_index);
+ return rc;
+ }
+
+ lmb->flags |= DRCONF_MEM_ASSIGNED;
+ return 0;
+}
+
+static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int lmbs_available = 0;
+ int lmbs_added = 0;
+ int i, rc;
+
+ pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
+
+ if (lmbs_to_add == 0)
+ return -EINVAL;
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ /* Validate that there are enough LMBs to satisfy the request */
+ for (i = 0; i < num_lmbs; i++) {
+ if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
+ lmbs_available++;
+ }
+
+ if (lmbs_available < lmbs_to_add)
+ return -EINVAL;
+
+ for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
+ rc = dlpar_add_lmb(&lmbs[i]);
+ if (rc)
+ continue;
+
+ lmbs_added++;
+
+ /* Mark this lmb so we can remove it later if all of the
+ * requested LMBs cannot be added.
+ */
+ lmbs[i].reserved = 1;
+ }
+
+ if (lmbs_added != lmbs_to_add) {
+ pr_err("Memory hot-add failed, removing any added LMBs\n");
+
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ rc = dlpar_remove_lmb(&lmbs[i]);
+ if (rc)
+ pr_err("Failed to remove LMB, drc index %x\n",
+ be32_to_cpu(lmbs[i].drc_index));
+ }
+ rc = -EINVAL;
+ } else {
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ pr_info("Memory at %llx (drc index %x) was hot-added\n",
+ lmbs[i].base_addr, lmbs[i].drc_index);
+ lmbs[i].reserved = 0;
+ }
+ }
+
+ return rc;
+}
+
+static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int i, lmb_found;
+ int rc;
+
+ pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ lmb_found = 0;
+ for (i = 0; i < num_lmbs; i++) {
+ if (lmbs[i].drc_index == drc_index) {
+ lmb_found = 1;
+ rc = dlpar_add_lmb(&lmbs[i]);
+ break;
+ }
+ }
+
+ if (!lmb_found)
+ rc = -EINVAL;
+
+ if (rc)
+ pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
+ else
+ pr_info("Memory at %llx (drc index %x) was hot-added\n",
+ lmbs[i].base_addr, drc_index);
+
+ return rc;
+}
+
+static void dlpar_update_drconf_property(struct device_node *dn,
+ struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int i;
+
+ /* Convert the property back to BE */
+ p = prop->value;
+ num_lmbs = *p;
+ *p = cpu_to_be32(*p);
+ p++;
+
+ lmbs = (struct of_drconf_cell *)p;
+ for (i = 0; i < num_lmbs; i++) {
+ lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
+ lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+ lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
+ }
+
+ rtas_hp_event = true;
+ of_update_property(dn, prop);
+ rtas_hp_event = false;
+}
+
+int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
+{
+ struct device_node *dn;
+ struct property *prop;
+ u32 count, drc_index;
+ int rc;
+
+ count = hp_elog->_drc_u.drc_count;
+ drc_index = hp_elog->_drc_u.drc_index;
+
+ lock_device_hotplug();
+
+ dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (!dn) {
+ rc = -EINVAL;
+ goto dlpar_memory_out;
+ }
+
+ prop = dlpar_clone_drconf_property(dn);
+ if (!prop) {
+ rc = -EINVAL;
+ goto dlpar_memory_out;
+ }
+
+ switch (hp_elog->action) {
+ case PSERIES_HP_ELOG_ACTION_ADD:
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+ rc = dlpar_memory_add_by_count(count, prop);
+ else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ rc = dlpar_memory_add_by_index(drc_index, prop);
+ else
+ rc = -EINVAL;
+ break;
+ case PSERIES_HP_ELOG_ACTION_REMOVE:
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+ rc = dlpar_memory_remove_by_count(count, prop);
+ else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ rc = dlpar_memory_remove_by_index(drc_index, prop);
+ else
+ rc = -EINVAL;
+ break;
+ default:
+ pr_err("Invalid action (%d) specified\n", hp_elog->action);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ dlpar_free_drconf_property(prop);
+ else
+ dlpar_update_drconf_property(dn, prop);
+
+dlpar_memory_out:
+ of_node_put(dn);
+ unlock_device_hotplug();
+ return rc;
+}
+
static int pseries_add_mem_node(struct device_node *np)
{
const char *type;
@@ -174,6 +660,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
__be32 *p;
int i, rc = -EINVAL;
+ if (rtas_hp_event)
+ return 0;
+
memblock_size = pseries_memory_block_size();
if (!memblock_size)
return -EINVAL;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 7803a19adb31..61d5a17f45c0 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -49,6 +49,7 @@
#include <asm/mmzone.h>
#include <asm/plpar_wrappers.h>
+#include "pseries.h"
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
__be64 *startp, __be64 *endp)
@@ -1307,16 +1308,16 @@ void iommu_init_early_pSeries(void)
ppc_md.tce_free = tce_free_pSeriesLP;
}
ppc_md.tce_get = tce_get_pSeriesLP;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
+ pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
+ pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
} else {
ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries;
ppc_md.tce_get = tce_get_pseries;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
+ pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
+ pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
}
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 8f35d525cede..ceb18d349459 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -320,28 +320,34 @@ static ssize_t migrate_store(struct class *class, struct class_attribute *attr,
{
u64 streamid;
int rc;
- int vasi_rc = 0;
rc = kstrtou64(buf, 0, &streamid);
if (rc)
return rc;
do {
- rc = rtas_ibm_suspend_me(streamid, &vasi_rc);
- if (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE)
+ rc = rtas_ibm_suspend_me(streamid);
+ if (rc == -EAGAIN)
ssleep(1);
- } while (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE);
+ } while (rc == -EAGAIN);
if (rc)
return rc;
- if (vasi_rc)
- return vasi_rc;
post_mobility_fixup();
return count;
}
+/*
+ * Used by drmgr to determine the kernel behavior of the migration interface.
+ *
+ * Version 1: Performs all PAPR requirements for migration including
+ * firmware activation and device tree update.
+ */
+#define MIGRATION_API_VERSION 1
+
static CLASS_ATTR(migration, S_IWUSR, NULL, migrate_store);
+static CLASS_ATTR_STRING(api_version, S_IRUGO, __stringify(MIGRATION_API_VERSION));
static int __init mobility_sysfs_init(void)
{
@@ -352,7 +358,13 @@ static int __init mobility_sysfs_init(void)
return -ENOMEM;
rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
+ if (rc)
+ pr_err("mobility: unable to create migration sysfs file (%d)\n", rc);
- return rc;
+ rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
+ if (rc)
+ pr_err("mobility: unable to create api_version sysfs file (%d)\n", rc);
+
+ return 0;
}
machine_device_initcall(pseries, mobility_sysfs_init);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 691a154c286d..c8d24f9a6948 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -195,6 +195,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
{
struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
/* Found our PE and assume 8 at that point. */
@@ -204,10 +205,11 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
return NULL;
/* Get the top level device in the PE */
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn_to_eeh_dev(PCI_DN(dn));
if (edev->pe)
edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
- dn = eeh_dev_to_of_node(edev);
+ pdn = eeh_dev_to_pdn(edev);
+ dn = pdn ? pdn->node : NULL;
if (!dn)
return NULL;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 054a0ed5c7ee..9f8184175c86 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -20,7 +20,6 @@
#include <linux/kmsg_dump.h>
#include <linux/pstore.h>
#include <linux/ctype.h>
-#include <linux/zlib.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
@@ -30,129 +29,17 @@
/* Max bytes to read/write in one go */
#define NVRW_CNT 0x20
-/*
- * Set oops header version to distinguish between old and new format header.
- * lnx,oops-log partition max size is 4000, header version > 4000 will
- * help in identifying new header.
- */
-#define OOPS_HDR_VERSION 5000
-
static unsigned int nvram_size;
static int nvram_fetch, nvram_store;
static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock);
-struct err_log_info {
- __be32 error_type;
- __be32 seq_num;
-};
-
-struct nvram_os_partition {
- const char *name;
- int req_size; /* desired size, in bytes */
- int min_size; /* minimum acceptable size (0 means req_size) */
- long size; /* size of data portion (excluding err_log_info) */
- long index; /* offset of data portion of partition */
- bool os_partition; /* partition initialized by OS, not FW */
-};
-
-static struct nvram_os_partition rtas_log_partition = {
- .name = "ibm,rtas-log",
- .req_size = 2079,
- .min_size = 1055,
- .index = -1,
- .os_partition = true
-};
-
-static struct nvram_os_partition oops_log_partition = {
- .name = "lnx,oops-log",
- .req_size = 4000,
- .min_size = 2000,
- .index = -1,
- .os_partition = true
-};
-
-static const char *pseries_nvram_os_partitions[] = {
- "ibm,rtas-log",
- "lnx,oops-log",
- NULL
-};
-
-struct oops_log_info {
- __be16 version;
- __be16 report_length;
- __be64 timestamp;
-} __attribute__((packed));
-
-static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason);
-
-static struct kmsg_dumper nvram_kmsg_dumper = {
- .dump = oops_to_nvram
-};
-
/* See clobbering_unread_rtas_event() */
#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */
-static unsigned long last_unread_rtas_event; /* timestamp */
-
-/*
- * For capturing and compressing an oops or panic report...
-
- * big_oops_buf[] holds the uncompressed text we're capturing.
- *
- * oops_buf[] holds the compressed text, preceded by a oops header.
- * oops header has u16 holding the version of oops header (to differentiate
- * between old and new format header) followed by u16 holding the length of
- * the compressed* text (*Or uncompressed, if compression fails.) and u64
- * holding the timestamp. oops_buf[] gets written to NVRAM.
- *
- * oops_log_info points to the header. oops_data points to the compressed text.
- *
- * +- oops_buf
- * | +- oops_data
- * v v
- * +-----------+-----------+-----------+------------------------+
- * | version | length | timestamp | text |
- * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
- * +-----------+-----------+-----------+------------------------+
- * ^
- * +- oops_log_info
- *
- * We preallocate these buffers during init to avoid kmalloc during oops/panic.
- */
-static size_t big_oops_buf_sz;
-static char *big_oops_buf, *oops_buf;
-static char *oops_data;
-static size_t oops_data_sz;
-
-/* Compression parameters */
-#define COMPR_LEVEL 6
-#define WINDOW_BITS 12
-#define MEM_LEVEL 4
-static struct z_stream_s stream;
+static time64_t last_unread_rtas_event; /* timestamp */
#ifdef CONFIG_PSTORE
-static struct nvram_os_partition of_config_partition = {
- .name = "of-config",
- .index = -1,
- .os_partition = false
-};
-
-static struct nvram_os_partition common_partition = {
- .name = "common",
- .index = -1,
- .os_partition = false
-};
-
-static enum pstore_type_id nvram_type_ids[] = {
- PSTORE_TYPE_DMESG,
- PSTORE_TYPE_PPC_RTAS,
- PSTORE_TYPE_PPC_OF,
- PSTORE_TYPE_PPC_COMMON,
- -1
-};
-static int read_type;
-static unsigned long last_rtas_event;
+time64_t last_rtas_event;
#endif
static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
@@ -246,132 +133,26 @@ static ssize_t pSeries_nvram_get_size(void)
return nvram_size ? nvram_size : -ENODEV;
}
-
-/* nvram_write_os_partition, nvram_write_error_log
+/* nvram_write_error_log
*
* We need to buffer the error logs into nvram to ensure that we have
- * the failure information to decode. If we have a severe error there
- * is no way to guarantee that the OS or the machine is in a state to
- * get back to user land and write the error to disk. For example if
- * the SCSI device driver causes a Machine Check by writing to a bad
- * IO address, there is no way of guaranteeing that the device driver
- * is in any state that is would also be able to write the error data
- * captured to disk, thus we buffer it in NVRAM for analysis on the
- * next boot.
- *
- * In NVRAM the partition containing the error log buffer will looks like:
- * Header (in bytes):
- * +-----------+----------+--------+------------+------------------+
- * | signature | checksum | length | name | data |
- * |0 |1 |2 3|4 15|16 length-1|
- * +-----------+----------+--------+------------+------------------+
- *
- * The 'data' section would look like (in bytes):
- * +--------------+------------+-----------------------------------+
- * | event_logged | sequence # | error log |
- * |0 3|4 7|8 error_log_size-1|
- * +--------------+------------+-----------------------------------+
- *
- * event_logged: 0 if event has not been logged to syslog, 1 if it has
- * sequence #: The unique sequence # for each event. (until it wraps)
- * error log: The error log from event_scan
+ * the failure information to decode.
*/
-static int nvram_write_os_partition(struct nvram_os_partition *part,
- char *buff, int length,
- unsigned int err_type,
- unsigned int error_log_cnt)
-{
- int rc;
- loff_t tmp_index;
- struct err_log_info info;
-
- if (part->index == -1) {
- return -ESPIPE;
- }
-
- if (length > part->size) {
- length = part->size;
- }
-
- info.error_type = cpu_to_be32(err_type);
- info.seq_num = cpu_to_be32(error_log_cnt);
-
- tmp_index = part->index;
-
- rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
- return rc;
- }
-
- rc = ppc_md.nvram_write(buff, length, &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
- return rc;
- }
-
- return 0;
-}
-
int nvram_write_error_log(char * buff, int length,
unsigned int err_type, unsigned int error_log_cnt)
{
int rc = nvram_write_os_partition(&rtas_log_partition, buff, length,
err_type, error_log_cnt);
if (!rc) {
- last_unread_rtas_event = get_seconds();
+ last_unread_rtas_event = ktime_get_real_seconds();
#ifdef CONFIG_PSTORE
- last_rtas_event = get_seconds();
+ last_rtas_event = ktime_get_real_seconds();
#endif
}
return rc;
}
-/* nvram_read_partition
- *
- * Reads nvram partition for at most 'length'
- */
-static int nvram_read_partition(struct nvram_os_partition *part, char *buff,
- int length, unsigned int *err_type,
- unsigned int *error_log_cnt)
-{
- int rc;
- loff_t tmp_index;
- struct err_log_info info;
-
- if (part->index == -1)
- return -1;
-
- if (length > part->size)
- length = part->size;
-
- tmp_index = part->index;
-
- if (part->os_partition) {
- rc = ppc_md.nvram_read((char *)&info,
- sizeof(struct err_log_info),
- &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
- return rc;
- }
- }
-
- rc = ppc_md.nvram_read(buff, length, &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
- return rc;
- }
-
- if (part->os_partition) {
- *error_log_cnt = be32_to_cpu(info.seq_num);
- *err_type = be32_to_cpu(info.error_type);
- }
-
- return 0;
-}
-
/* nvram_read_error_log
*
* Reads nvram for error log for at most 'length'
@@ -407,67 +188,6 @@ int nvram_clear_error_log(void)
return 0;
}
-/* pseries_nvram_init_os_partition
- *
- * This sets up a partition with an "OS" signature.
- *
- * The general strategy is the following:
- * 1.) If a partition with the indicated name already exists...
- * - If it's large enough, use it.
- * - Otherwise, recycle it and keep going.
- * 2.) Search for a free partition that is large enough.
- * 3.) If there's not a free partition large enough, recycle any obsolete
- * OS partitions and try again.
- * 4.) Will first try getting a chunk that will satisfy the requested size.
- * 5.) If a chunk of the requested size cannot be allocated, then try finding
- * a chunk that will satisfy the minum needed.
- *
- * Returns 0 on success, else -1.
- */
-static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
- *part)
-{
- loff_t p;
- int size;
-
- /* Look for ours */
- p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
-
- /* Found one but too small, remove it */
- if (p && size < part->min_size) {
- pr_info("nvram: Found too small %s partition,"
- " removing it...\n", part->name);
- nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
- p = 0;
- }
-
- /* Create one if we didn't find */
- if (!p) {
- p = nvram_create_partition(part->name, NVRAM_SIG_OS,
- part->req_size, part->min_size);
- if (p == -ENOSPC) {
- pr_info("nvram: No room to create %s partition, "
- "deleting any obsolete OS partitions...\n",
- part->name);
- nvram_remove_partition(NULL, NVRAM_SIG_OS,
- pseries_nvram_os_partitions);
- p = nvram_create_partition(part->name, NVRAM_SIG_OS,
- part->req_size, part->min_size);
- }
- }
-
- if (p <= 0) {
- pr_err("nvram: Failed to find or create %s"
- " partition, err %d\n", part->name, (int)p);
- return -1;
- }
-
- part->index = p;
- part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
-
- return 0;
-}
-
/*
* Are we using the ibm,rtas-log for oops/panic reports? And if so,
* would logging this oops/panic overwrite an RTAS event that rtas_errd
@@ -476,321 +196,14 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
* We assume that if rtas_errd hasn't read the RTAS event in
* NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to.
*/
-static int clobbering_unread_rtas_event(void)
+int clobbering_unread_rtas_event(void)
{
return (oops_log_partition.index == rtas_log_partition.index
&& last_unread_rtas_event
- && get_seconds() - last_unread_rtas_event <=
+ && ktime_get_real_seconds() - last_unread_rtas_event <=
NVRAM_RTAS_READ_TIMEOUT);
}
-/* Derived from logfs_compress() */
-static int nvram_compress(const void *in, void *out, size_t inlen,
- size_t outlen)
-{
- int err, ret;
-
- ret = -EIO;
- err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
- MEM_LEVEL, Z_DEFAULT_STRATEGY);
- if (err != Z_OK)
- goto error;
-
- stream.next_in = in;
- stream.avail_in = inlen;
- stream.total_in = 0;
- stream.next_out = out;
- stream.avail_out = outlen;
- stream.total_out = 0;
-
- err = zlib_deflate(&stream, Z_FINISH);
- if (err != Z_STREAM_END)
- goto error;
-
- err = zlib_deflateEnd(&stream);
- if (err != Z_OK)
- goto error;
-
- if (stream.total_out >= stream.total_in)
- goto error;
-
- ret = stream.total_out;
-error:
- return ret;
-}
-
-/* Compress the text from big_oops_buf into oops_buf. */
-static int zip_oops(size_t text_len)
-{
- struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
- int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
- oops_data_sz);
- if (zipped_len < 0) {
- pr_err("nvram: compression failed; returned %d\n", zipped_len);
- pr_err("nvram: logging uncompressed oops/panic report\n");
- return -1;
- }
- oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
- oops_hdr->report_length = cpu_to_be16(zipped_len);
- oops_hdr->timestamp = cpu_to_be64(get_seconds());
- return 0;
-}
-
-#ifdef CONFIG_PSTORE
-static int nvram_pstore_open(struct pstore_info *psi)
-{
- /* Reset the iterator to start reading partitions again */
- read_type = -1;
- return 0;
-}
-
-/**
- * nvram_pstore_write - pstore write callback for nvram
- * @type: Type of message logged
- * @reason: reason behind dump (oops/panic)
- * @id: identifier to indicate the write performed
- * @part: pstore writes data to registered buffer in parts,
- * part number will indicate the same.
- * @count: Indicates oops count
- * @compressed: Flag to indicate the log is compressed
- * @size: number of bytes written to the registered buffer
- * @psi: registered pstore_info structure
- *
- * Called by pstore_dump() when an oops or panic report is logged in the
- * printk buffer.
- * Returns 0 on successful write.
- */
-static int nvram_pstore_write(enum pstore_type_id type,
- enum kmsg_dump_reason reason,
- u64 *id, unsigned int part, int count,
- bool compressed, size_t size,
- struct pstore_info *psi)
-{
- int rc;
- unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
- struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
-
- /* part 1 has the recent messages from printk buffer */
- if (part > 1 || type != PSTORE_TYPE_DMESG ||
- clobbering_unread_rtas_event())
- return -1;
-
- oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
- oops_hdr->report_length = cpu_to_be16(size);
- oops_hdr->timestamp = cpu_to_be64(get_seconds());
-
- if (compressed)
- err_type = ERR_TYPE_KERNEL_PANIC_GZ;
-
- rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + size), err_type, count);
-
- if (rc != 0)
- return rc;
-
- *id = part;
- return 0;
-}
-
-/*
- * Reads the oops/panic report, rtas, of-config and common partition.
- * Returns the length of the data we read from each partition.
- * Returns 0 if we've been called before.
- */
-static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
- int *count, struct timespec *time, char **buf,
- bool *compressed, struct pstore_info *psi)
-{
- struct oops_log_info *oops_hdr;
- unsigned int err_type, id_no, size = 0;
- struct nvram_os_partition *part = NULL;
- char *buff = NULL;
- int sig = 0;
- loff_t p;
-
- read_type++;
-
- switch (nvram_type_ids[read_type]) {
- case PSTORE_TYPE_DMESG:
- part = &oops_log_partition;
- *type = PSTORE_TYPE_DMESG;
- break;
- case PSTORE_TYPE_PPC_RTAS:
- part = &rtas_log_partition;
- *type = PSTORE_TYPE_PPC_RTAS;
- time->tv_sec = last_rtas_event;
- time->tv_nsec = 0;
- break;
- case PSTORE_TYPE_PPC_OF:
- sig = NVRAM_SIG_OF;
- part = &of_config_partition;
- *type = PSTORE_TYPE_PPC_OF;
- *id = PSTORE_TYPE_PPC_OF;
- time->tv_sec = 0;
- time->tv_nsec = 0;
- break;
- case PSTORE_TYPE_PPC_COMMON:
- sig = NVRAM_SIG_SYS;
- part = &common_partition;
- *type = PSTORE_TYPE_PPC_COMMON;
- *id = PSTORE_TYPE_PPC_COMMON;
- time->tv_sec = 0;
- time->tv_nsec = 0;
- break;
- default:
- return 0;
- }
-
- if (!part->os_partition) {
- p = nvram_find_partition(part->name, sig, &size);
- if (p <= 0) {
- pr_err("nvram: Failed to find partition %s, "
- "err %d\n", part->name, (int)p);
- return 0;
- }
- part->index = p;
- part->size = size;
- }
-
- buff = kmalloc(part->size, GFP_KERNEL);
-
- if (!buff)
- return -ENOMEM;
-
- if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
- kfree(buff);
- return 0;
- }
-
- *count = 0;
-
- if (part->os_partition)
- *id = id_no;
-
- if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
- size_t length, hdr_size;
-
- oops_hdr = (struct oops_log_info *)buff;
- if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
- /* Old format oops header had 2-byte record size */
- hdr_size = sizeof(u16);
- length = be16_to_cpu(oops_hdr->version);
- time->tv_sec = 0;
- time->tv_nsec = 0;
- } else {
- hdr_size = sizeof(*oops_hdr);
- length = be16_to_cpu(oops_hdr->report_length);
- time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
- time->tv_nsec = 0;
- }
- *buf = kmalloc(length, GFP_KERNEL);
- if (*buf == NULL)
- return -ENOMEM;
- memcpy(*buf, buff + hdr_size, length);
- kfree(buff);
-
- if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
- *compressed = true;
- else
- *compressed = false;
- return length;
- }
-
- *buf = buff;
- return part->size;
-}
-
-static struct pstore_info nvram_pstore_info = {
- .owner = THIS_MODULE,
- .name = "nvram",
- .open = nvram_pstore_open,
- .read = nvram_pstore_read,
- .write = nvram_pstore_write,
-};
-
-static int nvram_pstore_init(void)
-{
- int rc = 0;
-
- nvram_pstore_info.buf = oops_data;
- nvram_pstore_info.bufsize = oops_data_sz;
-
- spin_lock_init(&nvram_pstore_info.buf_lock);
-
- rc = pstore_register(&nvram_pstore_info);
- if (rc != 0)
- pr_err("nvram: pstore_register() failed, defaults to "
- "kmsg_dump; returned %d\n", rc);
-
- return rc;
-}
-#else
-static int nvram_pstore_init(void)
-{
- return -1;
-}
-#endif
-
-static void __init nvram_init_oops_partition(int rtas_partition_exists)
-{
- int rc;
-
- rc = pseries_nvram_init_os_partition(&oops_log_partition);
- if (rc != 0) {
- if (!rtas_partition_exists)
- return;
- pr_notice("nvram: Using %s partition to log both"
- " RTAS errors and oops/panic reports\n",
- rtas_log_partition.name);
- memcpy(&oops_log_partition, &rtas_log_partition,
- sizeof(rtas_log_partition));
- }
- oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
- if (!oops_buf) {
- pr_err("nvram: No memory for %s partition\n",
- oops_log_partition.name);
- return;
- }
- oops_data = oops_buf + sizeof(struct oops_log_info);
- oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
-
- rc = nvram_pstore_init();
-
- if (!rc)
- return;
-
- /*
- * Figure compression (preceded by elimination of each line's <n>
- * severity prefix) will reduce the oops/panic report to at most
- * 45% of its original size.
- */
- big_oops_buf_sz = (oops_data_sz * 100) / 45;
- big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
- if (big_oops_buf) {
- stream.workspace = kmalloc(zlib_deflate_workspacesize(
- WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
- if (!stream.workspace) {
- pr_err("nvram: No memory for compression workspace; "
- "skipping compression of %s partition data\n",
- oops_log_partition.name);
- kfree(big_oops_buf);
- big_oops_buf = NULL;
- }
- } else {
- pr_err("No memory for uncompressed %s data; "
- "skipping compression\n", oops_log_partition.name);
- stream.workspace = NULL;
- }
-
- rc = kmsg_dump_register(&nvram_kmsg_dumper);
- if (rc != 0) {
- pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
- kfree(oops_buf);
- kfree(big_oops_buf);
- kfree(stream.workspace);
- }
-}
-
static int __init pseries_nvram_init_log_partitions(void)
{
int rc;
@@ -798,7 +211,7 @@ static int __init pseries_nvram_init_log_partitions(void)
/* Scan nvram for partitions */
nvram_scan_partitions();
- rc = pseries_nvram_init_os_partition(&rtas_log_partition);
+ rc = nvram_init_os_partition(&rtas_log_partition);
nvram_init_oops_partition(rc == 0);
return 0;
}
@@ -834,72 +247,3 @@ int __init pSeries_nvram_init(void)
return 0;
}
-
-/*
- * This is our kmsg_dump callback, called after an oops or panic report
- * has been written to the printk buffer. We want to capture as much
- * of the printk buffer as possible. First, capture as much as we can
- * that we think will compress sufficiently to fit in the lnx,oops-log
- * partition. If that's too much, go back and capture uncompressed text.
- */
-static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
-{
- struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
- static unsigned int oops_count = 0;
- static bool panicking = false;
- static DEFINE_SPINLOCK(lock);
- unsigned long flags;
- size_t text_len;
- unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
- int rc = -1;
-
- switch (reason) {
- case KMSG_DUMP_RESTART:
- case KMSG_DUMP_HALT:
- case KMSG_DUMP_POWEROFF:
- /* These are almost always orderly shutdowns. */
- return;
- case KMSG_DUMP_OOPS:
- break;
- case KMSG_DUMP_PANIC:
- panicking = true;
- break;
- case KMSG_DUMP_EMERG:
- if (panicking)
- /* Panic report already captured. */
- return;
- break;
- default:
- pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
- __func__, (int) reason);
- return;
- }
-
- if (clobbering_unread_rtas_event())
- return;
-
- if (!spin_trylock_irqsave(&lock, flags))
- return;
-
- if (big_oops_buf) {
- kmsg_dump_get_buffer(dumper, false,
- big_oops_buf, big_oops_buf_sz, &text_len);
- rc = zip_oops(text_len);
- }
- if (rc != 0) {
- kmsg_dump_rewind(dumper);
- kmsg_dump_get_buffer(dumper, false,
- oops_data, oops_data_sz, &text_len);
- err_type = ERR_TYPE_KERNEL_PANIC;
- oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
- oops_hdr->report_length = cpu_to_be16(text_len);
- oops_hdr->timestamp = cpu_to_be64(get_seconds());
- }
-
- (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + text_len), err_type,
- ++oops_count);
-
- spin_unlock_irqrestore(&lock, flags);
-}
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 89e23811199c..5d4a3df59d0c 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -32,6 +32,8 @@
#include <asm/firmware.h>
#include <asm/eeh.h>
+#include "pseries.h"
+
static struct pci_bus *
find_bus_among_children(struct pci_bus *bus,
struct device_node *dn)
@@ -75,6 +77,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
return NULL;
rtas_setup_phb(phb);
pci_process_bridge_OF_ranges(phb, dn, 0);
+ phb->controller_ops = pseries_pci_controller_ops;
pci_devs_phb_init_dynamic(phb);
@@ -82,7 +85,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
eeh_dev_phb_init_dynamic(phb);
if (dn->child)
- eeh_add_device_tree_early(dn);
+ eeh_add_device_tree_early(PCI_DN(dn));
pcibios_scan_phb(phb);
pcibios_finish_adding_to_bus(phb->bus);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 1796c5438cc6..8411c27293e4 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -11,6 +11,7 @@
#define _PSERIES_PSERIES_H
#include <linux/interrupt.h>
+#include <asm/rtas.h>
struct device_node;
@@ -60,11 +61,24 @@ extern struct device_node *dlpar_configure_connector(__be32,
struct device_node *);
extern int dlpar_attach_node(struct device_node *);
extern int dlpar_detach_node(struct device_node *);
+extern int dlpar_acquire_drc(u32 drc_index);
+extern int dlpar_release_drc(u32 drc_index);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int dlpar_memory(struct pseries_hp_errorlog *hp_elog);
+#else
+static inline int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
+{
+ return -EOPNOTSUPP;
+}
+#endif
/* PCI root bridge prepare function override for pseries */
struct pci_host_bridge;
int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
+extern struct pci_controller_ops pseries_pci_controller_ops;
+
unsigned long pseries_memory_block_size(void);
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index e445b6701f50..df6a7041922b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -265,7 +265,7 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
update_dn_pci_info(np, pci->phb);
/* Create EEH device for the OF node */
- eeh_dev_init(np, pci->phb);
+ eeh_dev_init(PCI_DN(np), pci->phb);
}
break;
default:
@@ -461,6 +461,47 @@ static long pseries_little_endian_exceptions(void)
}
#endif
+static void __init find_and_init_phbs(void)
+{
+ struct device_node *node;
+ struct pci_controller *phb;
+ struct device_node *root = of_find_node_by_path("/");
+
+ for_each_child_of_node(root, node) {
+ if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
+ strcmp(node->type, "pciex") != 0))
+ continue;
+
+ phb = pcibios_alloc_controller(node);
+ if (!phb)
+ continue;
+ rtas_setup_phb(phb);
+ pci_process_bridge_OF_ranges(phb, node, 0);
+ isa_bridge_find_early(phb);
+ phb->controller_ops = pseries_pci_controller_ops;
+ }
+
+ of_node_put(root);
+ pci_devs_phb_init();
+
+ /*
+ * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
+ * in chosen.
+ */
+ if (of_chosen) {
+ const int *prop;
+
+ prop = of_get_property(of_chosen,
+ "linux,pci-probe-only", NULL);
+ if (prop) {
+ if (*prop)
+ pci_add_flags(PCI_PROBE_ONLY);
+ else
+ pci_clear_flags(PCI_PROBE_ONLY);
+ }
+ }
+}
+
static void __init pSeries_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -793,6 +834,10 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
void pSeries_final_fixup(void) { }
#endif
+struct pci_controller_ops pseries_pci_controller_ops = {
+ .probe_mode = pSeries_pci_probe_mode,
+};
+
define_machine(pseries) {
.name = "pSeries",
.probe = pSeries_probe,
@@ -801,7 +846,6 @@ define_machine(pseries) {
.show_cpuinfo = pSeries_show_cpuinfo,
.log_error = pSeries_log_error,
.pcibios_fixup = pSeries_final_fixup,
- .pci_probe_mode = pSeries_pci_probe_mode,
.restart = rtas_restart,
.halt = rtas_halt,
.panic = rtas_os_term,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index a3555b10c1a5..6932ea803e33 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -197,16 +197,14 @@ static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
xics_cause_ipi(cpu, data);
}
-static __init int pSeries_smp_probe(void)
+static __init void pSeries_smp_probe(void)
{
- int ret = xics_smp_probe();
+ xics_smp_probe();
if (cpu_has_feature(CPU_FTR_DBELL)) {
xics_cause_ipi = smp_ops->cause_ipi;
smp_ops->cause_ipi = pSeries_cause_ipi_mux;
}
-
- return ret;
}
static struct smp_ops_t pSeries_mpic_smp_ops = {
diff --git a/arch/powerpc/relocs_check.pl b/arch/powerpc/relocs_check.pl
deleted file mode 100755
index 3f46e8b9c56d..000000000000
--- a/arch/powerpc/relocs_check.pl
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/perl
-
-# Copyright © 2009 IBM Corporation
-
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version
-# 2 of the License, or (at your option) any later version.
-
-# This script checks the relocations of a vmlinux for "suspicious"
-# relocations.
-
-use strict;
-use warnings;
-
-if ($#ARGV != 1) {
- die "$0 [path to objdump] [path to vmlinux]\n";
-}
-
-# Have Kbuild supply the path to objdump so we handle cross compilation.
-my $objdump = shift;
-my $vmlinux = shift;
-my $bad_relocs_count = 0;
-my $bad_relocs = "";
-my $old_binutils = 0;
-
-open(FD, "$objdump -R $vmlinux|") or die;
-while (<FD>) {
- study $_;
-
- # Only look at relocation lines.
- next if (!/\s+R_/);
-
- # These relocations are okay
- # On PPC64:
- # R_PPC64_RELATIVE, R_PPC64_NONE, R_PPC64_ADDR64
- # On PPC:
- # R_PPC_RELATIVE, R_PPC_ADDR16_HI,
- # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
- # R_PPC_NONE
-
- next if (/\bR_PPC64_RELATIVE\b/ or /\bR_PPC64_NONE\b/ or
- /\bR_PPC64_ADDR64\s+mach_/);
- next if (/\bR_PPC_ADDR16_LO\b/ or /\bR_PPC_ADDR16_HI\b/ or
- /\bR_PPC_ADDR16_HA\b/ or /\bR_PPC_RELATIVE\b/ or
- /\bR_PPC_NONE\b/);
-
- # If we see this type of relocation it's an idication that
- # we /may/ be using an old version of binutils.
- if (/R_PPC64_UADDR64/) {
- $old_binutils++;
- }
-
- $bad_relocs_count++;
- $bad_relocs .= $_;
-}
-
-if ($bad_relocs_count) {
- print "WARNING: $bad_relocs_count bad relocations\n";
- print $bad_relocs;
-}
-
-if ($old_binutils) {
- print "WARNING: You need at least binutils >= 2.19 to build a ".
- "CONFIG_RELOCATABLE kernel\n";
-}
diff --git a/arch/powerpc/relocs_check.sh b/arch/powerpc/relocs_check.sh
new file mode 100755
index 000000000000..2e4ebd0e25b3
--- /dev/null
+++ b/arch/powerpc/relocs_check.sh
@@ -0,0 +1,59 @@
+#!/bin/sh
+
+# Copyright © 2015 IBM Corporation
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+# This script checks the relocations of a vmlinux for "suspicious"
+# relocations.
+
+# based on relocs_check.pl
+# Copyright © 2009 IBM Corporation
+
+if [ $# -lt 2 ]; then
+ echo "$0 [path to objdump] [path to vmlinux]" 1>&2
+ exit 1
+fi
+
+# Have Kbuild supply the path to objdump so we handle cross compilation.
+objdump="$1"
+vmlinux="$2"
+
+bad_relocs=$(
+"$objdump" -R "$vmlinux" |
+ # Only look at relocation lines.
+ grep -E '\<R_' |
+ # These relocations are okay
+ # On PPC64:
+ # R_PPC64_RELATIVE, R_PPC64_NONE
+ # R_PPC64_ADDR64 mach_<name>
+ # On PPC:
+ # R_PPC_RELATIVE, R_PPC_ADDR16_HI,
+ # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
+ # R_PPC_NONE
+ grep -F -w -v 'R_PPC64_RELATIVE
+R_PPC64_NONE
+R_PPC_ADDR16_LO
+R_PPC_ADDR16_HI
+R_PPC_ADDR16_HA
+R_PPC_RELATIVE
+R_PPC_NONE' |
+ grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_'
+)
+
+if [ -z "$bad_relocs" ]; then
+ exit 0
+fi
+
+num_bad=$(echo "$bad_relocs" | wc -l)
+echo "WARNING: $num_bad bad relocations"
+echo "$bad_relocs"
+
+# If we see this type of relocation it's an idication that
+# we /may/ be using an old version of binutils.
+if echo "$bad_relocs" | grep -q -F -w R_PPC64_UADDR64; then
+ echo "WARNING: You need at least binutils >= 2.19 to build a CONFIG_RELOCATABLE kernel"
+fi
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 9e5353ff6d1b..d00a5663e312 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -369,7 +369,7 @@ static int dart_dma_set_mask(struct device *dev, u64 dma_mask)
return 0;
}
-void __init iommu_init_early_dart(void)
+void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
{
struct device_node *dn;
@@ -395,8 +395,8 @@ void __init iommu_init_early_dart(void)
if (dart_is_u4)
ppc_md.dma_set_mask = dart_dma_set_mask;
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart;
+ controller_ops->dma_dev_setup = pci_dma_dev_setup_dart;
+ controller_ops->dma_bus_setup = pci_dma_bus_setup_dart;
/* Setup pci_dma ops */
set_pci_dma_ops(&dma_iommu_ops);
@@ -404,8 +404,8 @@ void __init iommu_init_early_dart(void)
bail:
/* If init failed, use direct iommu and null setup functions */
- ppc_md.pci_dma_dev_setup = NULL;
- ppc_md.pci_dma_bus_setup = NULL;
+ controller_ops->dma_dev_setup = NULL;
+ controller_ops->dma_bus_setup = NULL;
/* Setup pci_dma ops */
set_pci_dma_ops(&dma_direct_ops);
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 2d8a101b6b9e..121e26fffd50 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -54,7 +54,7 @@ bool dcr_map_ok_generic(dcr_host_t host)
else if (host.type == DCR_HOST_MMIO)
return dcr_map_ok_mmio(host.host.mmio);
else
- return 0;
+ return false;
}
EXPORT_SYMBOL_GPL(dcr_map_ok_generic);
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 4bbb4b8dfd09..f086c6f22dc9 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -162,7 +162,17 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
msg->address_lo = lower_32_bits(address);
msg->address_hi = upper_32_bits(address);
- msg->data = hwirq;
+ /*
+ * MPIC version 2.0 has erratum PIC1. It causes
+ * that neither MSI nor MSI-X can work fine.
+ * This is a workaround to allow MSI-X to function
+ * properly. It only works for MSI-X, we prevent
+ * MSI on buggy chips in fsl_setup_msi_irqs().
+ */
+ if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
+ msg->data = __swab32(hwirq);
+ else
+ msg->data = hwirq;
pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
(hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
@@ -180,8 +190,16 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
struct msi_msg msg;
struct fsl_msi *msi_data;
- if (type == PCI_CAP_ID_MSIX)
- pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
+ if (type == PCI_CAP_ID_MSI) {
+ /*
+ * MPIC version 2.0 has erratum PIC1. For now MSI
+ * could not work. So check to prevent MSI from
+ * being used on the board with this erratum.
+ */
+ list_for_each_entry(msi_data, &msi_head, list)
+ if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
+ return -EINVAL;
+ }
/*
* If the PCI node has an fsl,msi property, then we need to use it
@@ -446,6 +464,11 @@ static int fsl_of_msi_probe(struct platform_device *dev)
msi->feature = features->fsl_pic_ip;
+ /* For erratum PIC1 on MPIC version 2.0*/
+ if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
+ && (fsl_mpic_primary_get_version() == 0x0200))
+ msi->feature |= MSI_HW_ERRATA_ENDIAN;
+
/*
* Remember the phandle, so that we can match with any PCI nodes
* that have an "fsl,msi" property.
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index 420cfcbdac01..a67359d993e5 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -27,6 +27,8 @@
#define FSL_PIC_IP_IPIC 0x00000002
#define FSL_PIC_IP_VMPIC 0x00000003
+#define MSI_HW_ERRATA_ENDIAN 0x00000010
+
struct fsl_msi_cascade_data;
struct fsl_msi {
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4b74c276e427..9a8fcf0d79d7 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -111,6 +111,18 @@ static struct pci_ops fsl_indirect_pcie_ops =
#define MAX_PHYS_ADDR_BITS 40
static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
+#ifdef CONFIG_SWIOTLB
+static void setup_swiotlb_ops(struct pci_controller *hose)
+{
+ if (ppc_swiotlb_enable) {
+ hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
+ set_pci_dma_ops(&swiotlb_dma_ops);
+ }
+}
+#else
+static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
+#endif
+
static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -548,6 +560,9 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
/* Setup PEX window registers */
setup_pci_atmu(hose);
+ /* Set up controller operations */
+ setup_swiotlb_ops(hose);
+
return 0;
no_bridge:
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index bbfbbf2025fd..b2b8447a227a 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -655,7 +655,6 @@ static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
static inline void mpic_eoi(struct mpic *mpic)
{
mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
- (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
}
/*
@@ -1676,31 +1675,6 @@ void __init mpic_init(struct mpic *mpic)
mpic_err_int_init(mpic, MPIC_FSL_ERR_INT);
}
-void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
-{
- u32 v;
-
- v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
- v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
- v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
- mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
-}
-
-void __init mpic_set_serial_int(struct mpic *mpic, int enable)
-{
- unsigned long flags;
- u32 v;
-
- raw_spin_lock_irqsave(&mpic_lock, flags);
- v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
- if (enable)
- v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
- else
- v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
- mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
- raw_spin_unlock_irqrestore(&mpic_lock, flags);
-}
-
void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
{
struct mpic *mpic = mpic_find(irq);
@@ -1923,7 +1897,7 @@ void smp_mpic_message_pass(int cpu, int msg)
msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
}
-int __init smp_mpic_probe(void)
+void __init smp_mpic_probe(void)
{
int nr_cpus;
@@ -1935,8 +1909,6 @@ int __init smp_mpic_probe(void)
if (nr_cpus > 1)
mpic_request_ipis();
-
- return nr_cpus;
}
void smp_mpic_setup_cpu(int cpu)
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index d09994164daf..7ea0174f6d3d 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -190,28 +190,3 @@ int par_io_of_config(struct device_node *np)
return 0;
}
EXPORT_SYMBOL(par_io_of_config);
-
-#ifdef DEBUG
-static void dump_par_io(void)
-{
- unsigned int i;
-
- printk(KERN_INFO "%s: par_io=%p\n", __func__, par_io);
- for (i = 0; i < num_par_io_ports; i++) {
- printk(KERN_INFO " cpodr[%u]=%08x\n", i,
- in_be32(&par_io[i].cpodr));
- printk(KERN_INFO " cpdata[%u]=%08x\n", i,
- in_be32(&par_io[i].cpdata));
- printk(KERN_INFO " cpdir1[%u]=%08x\n", i,
- in_be32(&par_io[i].cpdir1));
- printk(KERN_INFO " cpdir2[%u]=%08x\n", i,
- in_be32(&par_io[i].cpdir2));
- printk(KERN_INFO " cppar1[%u]=%08x\n", i,
- in_be32(&par_io[i].cppar1));
- printk(KERN_INFO " cppar2[%u]=%08x\n", i,
- in_be32(&par_io[i].cppar2));
- }
-
-}
-EXPORT_SYMBOL(dump_par_io);
-#endif /* DEBUG */
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index befaf1123f7f..5f91628209eb 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -43,11 +43,6 @@ u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
}
EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
-void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)
-{
- out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD);
-}
-
void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
{
struct ucc_slow_info *us_info = uccs->us_info;
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 125743b58c70..878a54036a25 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -140,15 +140,13 @@ static void xics_request_ipi(void)
IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
}
-int __init xics_smp_probe(void)
+void __init xics_smp_probe(void)
{
/* Setup cause_ipi callback based on which ICP is used */
smp_ops->cause_ipi = icp_ops->cause_ipi;
/* Register all the IPIs */
xics_request_ipi();
-
- return num_possible_cpus();
}
#endif /* CONFIG_SMP */
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index 647c3eccc3d0..2938934c6518 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -4,6 +4,5 @@ obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_CRYPTO_HW) += crypto/
obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
obj-$(CONFIG_APPLDATA_BASE) += appldata/
-obj-$(CONFIG_MATHEMU) += math-emu/
obj-y += net/
obj-$(CONFIG_PCI) += pci/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 373cd5badf1c..b06dc3839268 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -35,7 +35,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
config ARCH_DMA_ADDR_T_64BIT
- def_bool 64BIT
+ def_bool y
config GENERIC_LOCKBREAK
def_bool y if SMP && PREEMPT
@@ -59,12 +59,13 @@ config PCI_QUIRKS
def_bool n
config ARCH_SUPPORTS_UPROBES
- def_bool 64BIT
+ def_bool y
config S390
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SG_CHAIN
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -110,19 +111,19 @@ config S390
select GENERIC_TIME_VSYSCALL
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
+ select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
- select HAVE_BPF_JIT if 64BIT && PACK_STACK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
- select HAVE_DYNAMIC_FTRACE if 64BIT
- select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
- select HAVE_FUNCTION_TRACER if 64BIT
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
@@ -132,7 +133,8 @@ config S390
select HAVE_KERNEL_XZ
select HAVE_KPROBES
select HAVE_KRETPROBES
- select HAVE_KVM if 64BIT
+ select HAVE_KVM
+ select HAVE_LIVEPATCH
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
@@ -141,7 +143,6 @@ config S390
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
@@ -155,10 +156,17 @@ config S390
config SCHED_OMIT_FRAME_POINTER
def_bool y
+config PGTABLE_LEVELS
+ int
+ default 4 if 64BIT
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
+source "kernel/livepatch/Kconfig"
+
menu "Processor type and features"
config HAVE_MARCH_Z900_FEATURES
@@ -190,18 +198,11 @@ config HAVE_MARCH_Z13_FEATURES
choice
prompt "Processor type"
- default MARCH_G5
-
-config MARCH_G5
- bool "System/390 model G5 and G6"
- depends on !64BIT
- help
- Select this to build a 31 bit kernel that works
- on all ESA/390 and z/Architecture machines.
+ default MARCH_Z900
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
- select HAVE_MARCH_Z900_FEATURES if 64BIT
+ select HAVE_MARCH_Z900_FEATURES
help
Select this to enable optimizations for model z800/z900 (2064 and
2066 series). This will enable some optimizations that are not
@@ -209,7 +210,7 @@ config MARCH_Z900
config MARCH_Z990
bool "IBM zSeries model z890 and z990"
- select HAVE_MARCH_Z990_FEATURES if 64BIT
+ select HAVE_MARCH_Z990_FEATURES
help
Select this to enable optimizations for model z890/z990 (2084 and
2086 series). The kernel will be slightly faster but will not work
@@ -217,7 +218,7 @@ config MARCH_Z990
config MARCH_Z9_109
bool "IBM System z9"
- select HAVE_MARCH_Z9_109_FEATURES if 64BIT
+ select HAVE_MARCH_Z9_109_FEATURES
help
Select this to enable optimizations for IBM System z9 (2094 and
2096 series). The kernel will be slightly faster but will not work
@@ -225,7 +226,7 @@ config MARCH_Z9_109
config MARCH_Z10
bool "IBM System z10"
- select HAVE_MARCH_Z10_FEATURES if 64BIT
+ select HAVE_MARCH_Z10_FEATURES
help
Select this to enable optimizations for IBM System z10 (2097 and
2098 series). The kernel will be slightly faster but will not work
@@ -233,7 +234,7 @@ config MARCH_Z10
config MARCH_Z196
bool "IBM zEnterprise 114 and 196"
- select HAVE_MARCH_Z196_FEATURES if 64BIT
+ select HAVE_MARCH_Z196_FEATURES
help
Select this to enable optimizations for IBM zEnterprise 114 and 196
(2818 and 2817 series). The kernel will be slightly faster but will
@@ -241,7 +242,7 @@ config MARCH_Z196
config MARCH_ZEC12
bool "IBM zBC12 and zEC12"
- select HAVE_MARCH_ZEC12_FEATURES if 64BIT
+ select HAVE_MARCH_ZEC12_FEATURES
help
Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
2827 series). The kernel will be slightly faster but will not work on
@@ -249,7 +250,7 @@ config MARCH_ZEC12
config MARCH_Z13
bool "IBM z13"
- select HAVE_MARCH_Z13_FEATURES if 64BIT
+ select HAVE_MARCH_Z13_FEATURES
help
Select this to enable optimizations for IBM z13 (2964 series).
The kernel will be slightly faster but will not work on older
@@ -257,9 +258,6 @@ config MARCH_Z13
endchoice
-config MARCH_G5_TUNE
- def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT
-
config MARCH_Z900_TUNE
def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT
@@ -298,9 +296,6 @@ config TUNE_DEFAULT
Tune the generated code for the target processor for which the kernel
will be compiled.
-config TUNE_G5
- bool "System/390 model G5 and G6"
-
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
@@ -326,21 +321,14 @@ endchoice
config 64BIT
def_bool y
- prompt "64 bit kernel"
- help
- Select this option if you have an IBM z/Architecture machine
- and want to use the 64 bit addressing mode.
-
-config 32BIT
- def_bool y if !64BIT
config COMPAT
def_bool y
prompt "Kernel support for 31 bit emulation"
- depends on 64BIT
select COMPAT_BINFMT_ELF if BINFMT_ELF
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
+ depends on MULTIUSER
help
Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option
@@ -376,8 +364,7 @@ config NR_CPUS
int "Maximum number of CPUs (2-512)"
range 2 512
depends on SMP
- default "32" if !64BIT
- default "64" if 64BIT
+ default "64"
help
This allows you to specify the maximum number of CPUs which this
kernel will support. The maximum supported value is 512 and the
@@ -418,15 +405,6 @@ config SCHED_TOPOLOGY
source kernel/Kconfig.preempt
-config MATHEMU
- def_bool y
- prompt "IEEE FPU emulation"
- depends on MARCH_G5
- help
- This option is required for IEEE compliant floating point arithmetic
- on older ESA/390 machines. Say Y unless you know your machine doesn't
- need this.
-
source kernel/Kconfig.hz
endmenu
@@ -437,7 +415,6 @@ config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
select SPARSEMEM_VMEMMAP
- select SPARSEMEM_STATIC if !64BIT
config ARCH_SPARSEMEM_DEFAULT
def_bool y
@@ -453,7 +430,6 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y
- depends on 64BIT
config FORCE_MAX_ZONEORDER
int
@@ -528,7 +504,6 @@ config QDIO
menuconfig PCI
bool "PCI support"
- depends on 64BIT
select HAVE_DMA_ATTRS
select PCI_MSI
help
@@ -598,7 +573,6 @@ config CHSC_SCH
config SCM_BUS
def_bool y
- depends on 64BIT
prompt "SCM bus driver"
help
Bus driver for Storage Class Memory.
@@ -620,7 +594,7 @@ menu "Dump support"
config CRASH_DUMP
bool "kernel crash dumps"
- depends on 64BIT && SMP
+ depends on SMP
select KEXEC
help
Generate crash dump after being started by kexec.
@@ -659,7 +633,7 @@ endmenu
menu "Power Management"
config ARCH_HIBERNATION_POSSIBLE
- def_bool y if 64BIT
+ def_bool y
source "kernel/power/Kconfig"
@@ -810,7 +784,6 @@ source "arch/s390/kvm/Kconfig"
config S390_GUEST
def_bool y
prompt "s390 support for virtio devices"
- depends on 64BIT
select TTY
select VIRTUALIZATION
select VIRTIO
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index acb6859c6a95..667b1bca5681 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -13,15 +13,6 @@
# Copyright (C) 1994 by Linus Torvalds
#
-ifndef CONFIG_64BIT
-LD_BFD := elf32-s390
-LDFLAGS := -m elf_s390
-KBUILD_CFLAGS += -m31
-KBUILD_AFLAGS += -m31
-UTS_MACHINE := s390
-STACK_SIZE := 8192
-CHECKFLAGS += -D__s390__ -msize-long
-else
LD_BFD := elf64-s390
LDFLAGS := -m elf64_s390
KBUILD_AFLAGS_MODULE += -fPIC
@@ -31,11 +22,9 @@ KBUILD_AFLAGS += -m64
UTS_MACHINE := s390x
STACK_SIZE := 16384
CHECKFLAGS += -D__s390__ -D__s390x__
-endif
export LD_BFD
-mflags-$(CONFIG_MARCH_G5) := -march=g5
mflags-$(CONFIG_MARCH_Z900) := -march=z900
mflags-$(CONFIG_MARCH_Z990) := -march=z990
mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
@@ -47,7 +36,6 @@ mflags-$(CONFIG_MARCH_Z13) := -march=z13
aflags-y += $(mflags-y)
cflags-y += $(mflags-y)
-cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990
cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
@@ -104,7 +92,7 @@ KBUILD_AFLAGS += $(aflags-y)
OBJCOPYFLAGS := -O binary
head-y := arch/s390/kernel/head.o
-head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o)
+head-y += arch/s390/kernel/head64.o
# See arch/s390/Kbuild for content of core part of the kernel
core-y += arch/s390/
@@ -129,9 +117,7 @@ zfcpdump:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
vdso_install:
-ifeq ($(CONFIG_64BIT),y)
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
-endif
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
archclean:
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index f90d1fc6d603..d4788111c161 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -4,13 +4,11 @@
# create a compressed vmlinux image from the original vmlinux
#
-BITS := $(if $(CONFIG_64BIT),64,31)
-
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += misc.o piggy.o sizes.h head$(BITS).o
+targets += misc.o piggy.o sizes.h head.o
-KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
+KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
@@ -19,7 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
GCOV_PROFILE := n
OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o)
-OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o
+OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
@@ -34,8 +32,8 @@ quiet_cmd_sizes = GEN $@
$(obj)/sizes.h: vmlinux
$(call if_changed,sizes)
-AFLAGS_head$(BITS).o += -I$(obj)
-$(obj)/head$(BITS).o: $(obj)/sizes.h
+AFLAGS_head.o += -I$(obj)
+$(obj)/head.o: $(obj)/sizes.h
CFLAGS_misc.o += -I$(obj)
$(obj)/misc.o: $(obj)/sizes.h
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head.S
index f86a4eef28a9..f86a4eef28a9 100644
--- a/arch/s390/boot/compressed/head64.S
+++ b/arch/s390/boot/compressed/head.S
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S
deleted file mode 100644
index e8c9e18b8039..000000000000
--- a/arch/s390/boot/compressed/head31.S
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Startup glue code to uncompress the kernel
- *
- * Copyright IBM Corp. 2010
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include "sizes.h"
-
-__HEAD
-ENTRY(startup_continue)
- basr %r13,0 # get base
-.LPG1:
- # setup stack
- l %r15,.Lstack-.LPG1(%r13)
- ahi %r15,-96
- l %r1,.Ldecompress-.LPG1(%r13)
- basr %r14,%r1
- # setup registers for memory mover & branch to target
- lr %r4,%r2
- l %r2,.Loffset-.LPG1(%r13)
- la %r4,0(%r2,%r4)
- l %r3,.Lmvsize-.LPG1(%r13)
- lr %r5,%r3
- # move the memory mover someplace safe
- la %r1,0x200
- mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
- # decompress image is started at 0x11000
- lr %r6,%r2
- br %r1
-mover:
- mvcle %r2,%r4,0
- jo mover
- br %r6
-mover_end:
-
- .align 8
-.Lstack:
- .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
-.Ldecompress:
- .long decompress_kernel
-.Loffset:
- .long 0x11000
-.Lmvsize:
- .long SZ__bss_start
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index 8e1fb8239287..747735f83426 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -1,12 +1,7 @@
#include <asm-generic/vmlinux.lds.h>
-#ifdef CONFIG_64BIT
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
-#else
-OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390:31-bit)
-#endif
ENTRY(startup)
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 6c5cc6da7111..d9c4c313fbc6 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -3,9 +3,10 @@
*
* Support for s390 cryptographic instructions.
*
- * Copyright IBM Corp. 2003, 2007
+ * Copyright IBM Corp. 2003, 2015
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
+ * Harald Freudenberger (freude@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -28,15 +29,17 @@
#define CRYPT_S390_MSA 0x1
#define CRYPT_S390_MSA3 0x2
#define CRYPT_S390_MSA4 0x4
+#define CRYPT_S390_MSA5 0x8
/* s390 cryptographic operations */
enum crypt_s390_operations {
- CRYPT_S390_KM = 0x0100,
- CRYPT_S390_KMC = 0x0200,
- CRYPT_S390_KIMD = 0x0300,
- CRYPT_S390_KLMD = 0x0400,
- CRYPT_S390_KMAC = 0x0500,
- CRYPT_S390_KMCTR = 0x0600
+ CRYPT_S390_KM = 0x0100,
+ CRYPT_S390_KMC = 0x0200,
+ CRYPT_S390_KIMD = 0x0300,
+ CRYPT_S390_KLMD = 0x0400,
+ CRYPT_S390_KMAC = 0x0500,
+ CRYPT_S390_KMCTR = 0x0600,
+ CRYPT_S390_PPNO = 0x0700
};
/*
@@ -138,6 +141,16 @@ enum crypt_s390_kmac_func {
KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
};
+/*
+ * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER
+ * OPERATION) instruction
+ */
+enum crypt_s390_ppno_func {
+ PPNO_QUERY = CRYPT_S390_PPNO | 0,
+ PPNO_SHA512_DRNG_GEN = CRYPT_S390_PPNO | 3,
+ PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83
+};
+
/**
* crypt_s390_km:
* @func: the function code passed to KM; see crypt_s390_km_func
@@ -162,11 +175,11 @@ static inline int crypt_s390_km(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb92e0000,%3,%1\n" /* KM opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -198,11 +211,11 @@ static inline int crypt_s390_kmc(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb92f0000,%3,%1\n" /* KMC opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -233,11 +246,11 @@ static inline int crypt_s390_kimd(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb93e0000,%1,%1\n" /* KIMD opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -267,11 +280,11 @@ static inline int crypt_s390_klmd(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb93f0000,%1,%1\n" /* KLMD opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -302,11 +315,11 @@ static inline int crypt_s390_kmac(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb91e0000,%1,%1\n" /* KLAC opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -340,11 +353,11 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
int ret = -1;
asm volatile(
- "0: .insn rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
"+a" (__ctr)
: "d" (__func), "a" (__param) : "cc", "memory");
@@ -354,6 +367,47 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
}
/**
+ * crypt_s390_ppno:
+ * @func: the function code passed to PPNO; see crypt_s390_ppno_func
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @dest_len: size of destination memory area in bytes
+ * @seed: address of seed data
+ * @seed_len: size of seed data in bytes
+ *
+ * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
+ * operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of random
+ * bytes stored in dest buffer for generate function
+ */
+static inline int crypt_s390_ppno(long func, void *param,
+ u8 *dest, long dest_len,
+ const u8 *seed, long seed_len)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param; /* param block (240 bytes) */
+ register u8 *__dest asm("2") = dest; /* buf for recv random bytes */
+ register long __dest_len asm("3") = dest_len; /* requested random bytes */
+ register const u8 *__seed asm("4") = seed; /* buf with seed data */
+ register long __seed_len asm("5") = seed_len; /* bytes in seed buf */
+ int ret = -1;
+
+ asm volatile (
+ "0: .insn rre,0xb93c0000,%1,%5\n" /* PPNO opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "+d" (ret), "+a"(__dest), "+d"(__dest_len)
+ : "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len)
+ : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0;
+}
+
+/**
* crypt_s390_func_available:
* @func: the function code of the specific function; 0 if op in general
*
@@ -369,12 +423,11 @@ static inline int crypt_s390_func_available(int func,
if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
return 0;
-
- if (facility_mask & CRYPT_S390_MSA3 &&
- (!test_facility(2) || !test_facility(76)))
+ if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
+ return 0;
+ if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
return 0;
- if (facility_mask & CRYPT_S390_MSA4 &&
- (!test_facility(2) || !test_facility(77)))
+ if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57))
return 0;
switch (func & CRYPT_S390_OP_MASK) {
@@ -394,8 +447,12 @@ static inline int crypt_s390_func_available(int func,
ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
break;
case CRYPT_S390_KMCTR:
- ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0,
- NULL);
+ ret = crypt_s390_kmctr(KMCTR_QUERY, &status,
+ NULL, NULL, 0, NULL);
+ break;
+ case CRYPT_S390_PPNO:
+ ret = crypt_s390_ppno(PPNO_QUERY, &status,
+ NULL, 0, NULL, 0);
break;
default:
return 0;
@@ -423,15 +480,14 @@ static inline int crypt_s390_pcc(long func, void *param)
int ret = -1;
asm volatile(
- "0: .insn rre,0xb92c0000,0,0 \n" /* PCC opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb92c0000,0,0\n" /* PCC opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (ret)
: "d" (__func), "a" (__param) : "cc", "memory");
return ret;
}
-
#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 94a35a4c1b48..1f374b39a4ec 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -1,106 +1,529 @@
/*
- * Copyright IBM Corp. 2006, 2007
+ * Copyright IBM Corp. 2006, 2015
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+ * Harald Freudenberger <freude@de.ibm.com>
* Driver for the s390 pseudo random number generator
*/
+
+#define KMSG_COMPONENT "prng"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/fs.h>
+#include <linux/fips.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
+#include <asm/timex.h>
#include "crypt_s390.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>");
+MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 PRNG interface");
-static int prng_chunk_size = 256;
-module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH);
+
+#define PRNG_MODE_AUTO 0
+#define PRNG_MODE_TDES 1
+#define PRNG_MODE_SHA512 2
+
+static unsigned int prng_mode = PRNG_MODE_AUTO;
+module_param_named(mode, prng_mode, int, 0);
+MODULE_PARM_DESC(prng_mode, "PRNG mode: 0 - auto, 1 - TDES, 2 - SHA512");
+
+
+#define PRNG_CHUNKSIZE_TDES_MIN 8
+#define PRNG_CHUNKSIZE_TDES_MAX (64*1024)
+#define PRNG_CHUNKSIZE_SHA512_MIN 64
+#define PRNG_CHUNKSIZE_SHA512_MAX (64*1024)
+
+static unsigned int prng_chunk_size = 256;
+module_param_named(chunksize, prng_chunk_size, int, 0);
MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
-static int prng_entropy_limit = 4096;
-module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR);
-MODULE_PARM_DESC(prng_entropy_limit,
- "PRNG add entropy after that much bytes were produced");
+
+#define PRNG_RESEED_LIMIT_TDES 4096
+#define PRNG_RESEED_LIMIT_TDES_LOWER 4096
+#define PRNG_RESEED_LIMIT_SHA512 100000
+#define PRNG_RESEED_LIMIT_SHA512_LOWER 10000
+
+static unsigned int prng_reseed_limit;
+module_param_named(reseed_limit, prng_reseed_limit, int, 0);
+MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit");
+
/*
* Any one who considers arithmetical methods of producing random digits is,
* of course, in a state of sin. -- John von Neumann
*/
-struct s390_prng_data {
- unsigned long count; /* how many bytes were produced */
- char *buf;
+static int prng_errorflag;
+
+#define PRNG_GEN_ENTROPY_FAILED 1
+#define PRNG_SELFTEST_FAILED 2
+#define PRNG_INSTANTIATE_FAILED 3
+#define PRNG_SEED_FAILED 4
+#define PRNG_RESEED_FAILED 5
+#define PRNG_GEN_FAILED 6
+
+struct prng_ws_s {
+ u8 parm_block[32];
+ u32 reseed_counter;
+ u64 byte_counter;
};
-static struct s390_prng_data *p;
+struct ppno_ws_s {
+ u32 res;
+ u32 reseed_counter;
+ u64 stream_bytes;
+ u8 V[112];
+ u8 C[112];
+};
-/* copied from libica, use a non-zero initial parameter block */
-static unsigned char parm_block[32] = {
-0x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4,
-0x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0,
+struct prng_data_s {
+ struct mutex mutex;
+ union {
+ struct prng_ws_s prngws;
+ struct ppno_ws_s ppnows;
+ };
+ u8 *buf;
+ u32 rest;
+ u8 *prev;
};
-static int prng_open(struct inode *inode, struct file *file)
+static struct prng_data_s *prng_data;
+
+/* initial parameter block for tdes mode, copied from libica */
+static const u8 initial_parm_block[32] __initconst = {
+ 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
+ 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
+ 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
+ 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0 };
+
+
+/*** helper functions ***/
+
+static int generate_entropy(u8 *ebuf, size_t nbytes)
{
- return nonseekable_open(inode, file);
+ int n, ret = 0;
+ u8 *pg, *h, hash[32];
+
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!pg) {
+ prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
+ return -ENOMEM;
+ }
+
+ while (nbytes) {
+ /* fill page with urandom bytes */
+ get_random_bytes(pg, PAGE_SIZE);
+ /* exor page with stckf values */
+ for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
+ u64 *p = ((u64 *)pg) + n;
+ *p ^= get_tod_clock_fast();
+ }
+ n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash);
+ if (n < sizeof(hash))
+ h = hash;
+ else
+ h = ebuf;
+ /* generate sha256 from this page */
+ if (crypt_s390_kimd(KIMD_SHA_256, h,
+ pg, PAGE_SIZE) != PAGE_SIZE) {
+ prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+ if (n < sizeof(hash))
+ memcpy(ebuf, hash, n);
+ ret += n;
+ ebuf += n;
+ nbytes -= n;
+ }
+
+out:
+ free_page((unsigned long)pg);
+ return ret;
}
-static void prng_add_entropy(void)
+
+/*** tdes functions ***/
+
+static void prng_tdes_add_entropy(void)
{
__u64 entropy[4];
unsigned int i;
int ret;
for (i = 0; i < 16; i++) {
- ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy,
- (char *)entropy, sizeof(entropy));
+ ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
+ (char *)entropy, (char *)entropy,
+ sizeof(entropy));
BUG_ON(ret < 0 || ret != sizeof(entropy));
- memcpy(parm_block, entropy, sizeof(entropy));
+ memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
}
}
-static void prng_seed(int nbytes)
+
+static void prng_tdes_seed(int nbytes)
{
char buf[16];
int i = 0;
- BUG_ON(nbytes > 16);
+ BUG_ON(nbytes > sizeof(buf));
+
get_random_bytes(buf, nbytes);
/* Add the entropy */
while (nbytes >= 8) {
- *((__u64 *)parm_block) ^= *((__u64 *)(buf+i));
- prng_add_entropy();
+ *((__u64 *)prng_data->prngws.parm_block) ^= *((__u64 *)(buf+i));
+ prng_tdes_add_entropy();
i += 8;
nbytes -= 8;
}
- prng_add_entropy();
+ prng_tdes_add_entropy();
+ prng_data->prngws.reseed_counter = 0;
+}
+
+
+static int __init prng_tdes_instantiate(void)
+{
+ int datalen;
+
+ pr_debug("prng runs in TDES mode with "
+ "chunksize=%d and reseed_limit=%u\n",
+ prng_chunk_size, prng_reseed_limit);
+
+ /* memory allocation, prng_data struct init, mutex init */
+ datalen = sizeof(struct prng_data_s) + prng_chunk_size;
+ prng_data = kzalloc(datalen, GFP_KERNEL);
+ if (!prng_data) {
+ prng_errorflag = PRNG_INSTANTIATE_FAILED;
+ return -ENOMEM;
+ }
+ mutex_init(&prng_data->mutex);
+ prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
+ memcpy(prng_data->prngws.parm_block, initial_parm_block, 32);
+
+ /* initialize the PRNG, add 128 bits of entropy */
+ prng_tdes_seed(16);
+
+ return 0;
}
-static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
- loff_t *ppos)
+
+static void prng_tdes_deinstantiate(void)
+{
+ pr_debug("The prng module stopped "
+ "after running in triple DES mode\n");
+ kzfree(prng_data);
+}
+
+
+/*** sha512 functions ***/
+
+static int __init prng_sha512_selftest(void)
{
- int chunk, n;
+ /* NIST DRBG testvector for Hash Drbg, Sha-512, Count #0 */
+ static const u8 seed[] __initconst = {
+ 0x6b, 0x50, 0xa7, 0xd8, 0xf8, 0xa5, 0x5d, 0x7a,
+ 0x3d, 0xf8, 0xbb, 0x40, 0xbc, 0xc3, 0xb7, 0x22,
+ 0xd8, 0x70, 0x8d, 0xe6, 0x7f, 0xda, 0x01, 0x0b,
+ 0x03, 0xc4, 0xc8, 0x4d, 0x72, 0x09, 0x6f, 0x8c,
+ 0x3e, 0xc6, 0x49, 0xcc, 0x62, 0x56, 0xd9, 0xfa,
+ 0x31, 0xdb, 0x7a, 0x29, 0x04, 0xaa, 0xf0, 0x25 };
+ static const u8 V0[] __initconst = {
+ 0x00, 0xad, 0xe3, 0x6f, 0x9a, 0x01, 0xc7, 0x76,
+ 0x61, 0x34, 0x35, 0xf5, 0x4e, 0x24, 0x74, 0x22,
+ 0x21, 0x9a, 0x29, 0x89, 0xc7, 0x93, 0x2e, 0x60,
+ 0x1e, 0xe8, 0x14, 0x24, 0x8d, 0xd5, 0x03, 0xf1,
+ 0x65, 0x5d, 0x08, 0x22, 0x72, 0xd5, 0xad, 0x95,
+ 0xe1, 0x23, 0x1e, 0x8a, 0xa7, 0x13, 0xd9, 0x2b,
+ 0x5e, 0xbc, 0xbb, 0x80, 0xab, 0x8d, 0xe5, 0x79,
+ 0xab, 0x5b, 0x47, 0x4e, 0xdd, 0xee, 0x6b, 0x03,
+ 0x8f, 0x0f, 0x5c, 0x5e, 0xa9, 0x1a, 0x83, 0xdd,
+ 0xd3, 0x88, 0xb2, 0x75, 0x4b, 0xce, 0x83, 0x36,
+ 0x57, 0x4b, 0xf1, 0x5c, 0xca, 0x7e, 0x09, 0xc0,
+ 0xd3, 0x89, 0xc6, 0xe0, 0xda, 0xc4, 0x81, 0x7e,
+ 0x5b, 0xf9, 0xe1, 0x01, 0xc1, 0x92, 0x05, 0xea,
+ 0xf5, 0x2f, 0xc6, 0xc6, 0xc7, 0x8f, 0xbc, 0xf4 };
+ static const u8 C0[] __initconst = {
+ 0x00, 0xf4, 0xa3, 0xe5, 0xa0, 0x72, 0x63, 0x95,
+ 0xc6, 0x4f, 0x48, 0xd0, 0x8b, 0x5b, 0x5f, 0x8e,
+ 0x6b, 0x96, 0x1f, 0x16, 0xed, 0xbc, 0x66, 0x94,
+ 0x45, 0x31, 0xd7, 0x47, 0x73, 0x22, 0xa5, 0x86,
+ 0xce, 0xc0, 0x4c, 0xac, 0x63, 0xb8, 0x39, 0x50,
+ 0xbf, 0xe6, 0x59, 0x6c, 0x38, 0x58, 0x99, 0x1f,
+ 0x27, 0xa7, 0x9d, 0x71, 0x2a, 0xb3, 0x7b, 0xf9,
+ 0xfb, 0x17, 0x86, 0xaa, 0x99, 0x81, 0xaa, 0x43,
+ 0xe4, 0x37, 0xd3, 0x1e, 0x6e, 0xe5, 0xe6, 0xee,
+ 0xc2, 0xed, 0x95, 0x4f, 0x53, 0x0e, 0x46, 0x8a,
+ 0xcc, 0x45, 0xa5, 0xdb, 0x69, 0x0d, 0x81, 0xc9,
+ 0x32, 0x92, 0xbc, 0x8f, 0x33, 0xe6, 0xf6, 0x09,
+ 0x7c, 0x8e, 0x05, 0x19, 0x0d, 0xf1, 0xb6, 0xcc,
+ 0xf3, 0x02, 0x21, 0x90, 0x25, 0xec, 0xed, 0x0e };
+ static const u8 random[] __initconst = {
+ 0x95, 0xb7, 0xf1, 0x7e, 0x98, 0x02, 0xd3, 0x57,
+ 0x73, 0x92, 0xc6, 0xa9, 0xc0, 0x80, 0x83, 0xb6,
+ 0x7d, 0xd1, 0x29, 0x22, 0x65, 0xb5, 0xf4, 0x2d,
+ 0x23, 0x7f, 0x1c, 0x55, 0xbb, 0x9b, 0x10, 0xbf,
+ 0xcf, 0xd8, 0x2c, 0x77, 0xa3, 0x78, 0xb8, 0x26,
+ 0x6a, 0x00, 0x99, 0x14, 0x3b, 0x3c, 0x2d, 0x64,
+ 0x61, 0x1e, 0xee, 0xb6, 0x9a, 0xcd, 0xc0, 0x55,
+ 0x95, 0x7c, 0x13, 0x9e, 0x8b, 0x19, 0x0c, 0x7a,
+ 0x06, 0x95, 0x5f, 0x2c, 0x79, 0x7c, 0x27, 0x78,
+ 0xde, 0x94, 0x03, 0x96, 0xa5, 0x01, 0xf4, 0x0e,
+ 0x91, 0x39, 0x6a, 0xcf, 0x8d, 0x7e, 0x45, 0xeb,
+ 0xdb, 0xb5, 0x3b, 0xbf, 0x8c, 0x97, 0x52, 0x30,
+ 0xd2, 0xf0, 0xff, 0x91, 0x06, 0xc7, 0x61, 0x19,
+ 0xae, 0x49, 0x8e, 0x7f, 0xbc, 0x03, 0xd9, 0x0f,
+ 0x8e, 0x4c, 0x51, 0x62, 0x7a, 0xed, 0x5c, 0x8d,
+ 0x42, 0x63, 0xd5, 0xd2, 0xb9, 0x78, 0x87, 0x3a,
+ 0x0d, 0xe5, 0x96, 0xee, 0x6d, 0xc7, 0xf7, 0xc2,
+ 0x9e, 0x37, 0xee, 0xe8, 0xb3, 0x4c, 0x90, 0xdd,
+ 0x1c, 0xf6, 0xa9, 0xdd, 0xb2, 0x2b, 0x4c, 0xbd,
+ 0x08, 0x6b, 0x14, 0xb3, 0x5d, 0xe9, 0x3d, 0xa2,
+ 0xd5, 0xcb, 0x18, 0x06, 0x69, 0x8c, 0xbd, 0x7b,
+ 0xbb, 0x67, 0xbf, 0xe3, 0xd3, 0x1f, 0xd2, 0xd1,
+ 0xdb, 0xd2, 0xa1, 0xe0, 0x58, 0xa3, 0xeb, 0x99,
+ 0xd7, 0xe5, 0x1f, 0x1a, 0x93, 0x8e, 0xed, 0x5e,
+ 0x1c, 0x1d, 0xe2, 0x3a, 0x6b, 0x43, 0x45, 0xd3,
+ 0x19, 0x14, 0x09, 0xf9, 0x2f, 0x39, 0xb3, 0x67,
+ 0x0d, 0x8d, 0xbf, 0xb6, 0x35, 0xd8, 0xe6, 0xa3,
+ 0x69, 0x32, 0xd8, 0x10, 0x33, 0xd1, 0x44, 0x8d,
+ 0x63, 0xb4, 0x03, 0xdd, 0xf8, 0x8e, 0x12, 0x1b,
+ 0x6e, 0x81, 0x9a, 0xc3, 0x81, 0x22, 0x6c, 0x13,
+ 0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c,
+ 0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 };
+
int ret = 0;
- int tmp;
+ u8 buf[sizeof(random)];
+ struct ppno_ws_s ws;
+
+ memset(&ws, 0, sizeof(ws));
+
+ /* initial seed */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+ &ws, NULL, 0,
+ seed, sizeof(seed));
+ if (ret < 0) {
+ pr_err("The prng self test seed operation for the "
+ "SHA-512 mode failed with rc=%d\n", ret);
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ /* check working states V and C */
+ if (memcmp(ws.V, V0, sizeof(V0)) != 0
+ || memcmp(ws.C, C0, sizeof(C0)) != 0) {
+ pr_err("The prng self test state test "
+ "for the SHA-512 mode failed\n");
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ /* generate random bytes */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf),
+ NULL, 0);
+ if (ret < 0) {
+ pr_err("The prng self test generate operation for "
+ "the SHA-512 mode failed with rc=%d\n", ret);
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf),
+ NULL, 0);
+ if (ret < 0) {
+ pr_err("The prng self test generate operation for "
+ "the SHA-512 mode failed with rc=%d\n", ret);
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ /* check against expected data */
+ if (memcmp(buf, random, sizeof(random)) != 0) {
+ pr_err("The prng self test data test "
+ "for the SHA-512 mode failed\n");
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static int __init prng_sha512_instantiate(void)
+{
+ int ret, datalen;
+ u8 seed[64];
+
+ pr_debug("prng runs in SHA-512 mode "
+ "with chunksize=%d and reseed_limit=%u\n",
+ prng_chunk_size, prng_reseed_limit);
+
+ /* memory allocation, prng_data struct init, mutex init */
+ datalen = sizeof(struct prng_data_s) + prng_chunk_size;
+ if (fips_enabled)
+ datalen += prng_chunk_size;
+ prng_data = kzalloc(datalen, GFP_KERNEL);
+ if (!prng_data) {
+ prng_errorflag = PRNG_INSTANTIATE_FAILED;
+ return -ENOMEM;
+ }
+ mutex_init(&prng_data->mutex);
+ prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
+
+ /* selftest */
+ ret = prng_sha512_selftest();
+ if (ret)
+ goto outfree;
+
+ /* generate initial seed bytestring, first 48 bytes of entropy */
+ ret = generate_entropy(seed, 48);
+ if (ret != 48)
+ goto outfree;
+ /* followed by 16 bytes of unique nonce */
+ get_tod_clock_ext(seed + 48);
+
+ /* initial seed of the ppno drng */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0,
+ seed, sizeof(seed));
+ if (ret < 0) {
+ prng_errorflag = PRNG_SEED_FAILED;
+ ret = -EIO;
+ goto outfree;
+ }
+
+ /* if fips mode is enabled, generate a first block of random
+ bytes for the FIPS 140-2 Conditional Self Test */
+ if (fips_enabled) {
+ prng_data->prev = prng_data->buf + prng_chunk_size;
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows,
+ prng_data->prev,
+ prng_chunk_size,
+ NULL, 0);
+ if (ret < 0 || ret != prng_chunk_size) {
+ prng_errorflag = PRNG_GEN_FAILED;
+ ret = -EIO;
+ goto outfree;
+ }
+ }
+
+ return 0;
+
+outfree:
+ kfree(prng_data);
+ return ret;
+}
+
+
+static void prng_sha512_deinstantiate(void)
+{
+ pr_debug("The prng module stopped after running in SHA-512 mode\n");
+ kzfree(prng_data);
+}
+
+
+static int prng_sha512_reseed(void)
+{
+ int ret;
+ u8 seed[32];
+
+ /* generate 32 bytes of fresh entropy */
+ ret = generate_entropy(seed, sizeof(seed));
+ if (ret != sizeof(seed))
+ return ret;
+
+ /* do a reseed of the ppno drng with this bytestring */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0,
+ seed, sizeof(seed));
+ if (ret) {
+ prng_errorflag = PRNG_RESEED_FAILED;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static int prng_sha512_generate(u8 *buf, size_t nbytes)
+{
+ int ret;
+
+ /* reseed needed ? */
+ if (prng_data->ppnows.reseed_counter > prng_reseed_limit) {
+ ret = prng_sha512_reseed();
+ if (ret)
+ return ret;
+ }
+
+ /* PPNO generate */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows, buf, nbytes,
+ NULL, 0);
+ if (ret < 0 || ret != nbytes) {
+ prng_errorflag = PRNG_GEN_FAILED;
+ return -EIO;
+ }
+
+ /* FIPS 140-2 Conditional Self Test */
+ if (fips_enabled) {
+ if (!memcmp(prng_data->prev, buf, nbytes)) {
+ prng_errorflag = PRNG_GEN_FAILED;
+ return -EILSEQ;
+ }
+ memcpy(prng_data->prev, buf, nbytes);
+ }
+
+ return ret;
+}
+
+
+/*** file io functions ***/
+
+static int prng_open(struct inode *inode, struct file *file)
+{
+ return nonseekable_open(inode, file);
+}
+
+
+static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
+ size_t nbytes, loff_t *ppos)
+{
+ int chunk, n, tmp, ret = 0;
+
+ /* lock prng_data struct */
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
- /* nbytes can be arbitrary length, we split it into chunks */
while (nbytes) {
- /* same as in extract_entropy_user in random.c */
if (need_resched()) {
if (signal_pending(current)) {
if (ret == 0)
ret = -ERESTARTSYS;
break;
}
+ /* give mutex free before calling schedule() */
+ mutex_unlock(&prng_data->mutex);
schedule();
+ /* occopy mutex again */
+ if (mutex_lock_interruptible(&prng_data->mutex)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
}
/*
@@ -112,12 +535,11 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
/* PRNG only likes multiples of 8 bytes */
n = (chunk + 7) & -8;
- if (p->count > prng_entropy_limit)
- prng_seed(8);
+ if (prng_data->prngws.reseed_counter > prng_reseed_limit)
+ prng_tdes_seed(8);
/* if the CPU supports PRNG stckf is present too */
- asm volatile(".insn s,0xb27c0000,%0"
- : "=m" (*((unsigned long long *)p->buf)) : : "cc");
+ *((unsigned long long *)prng_data->buf) = get_tod_clock_fast();
/*
* Beside the STCKF the input for the TDES-EDE is the output
@@ -132,35 +554,259 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
* Note: you can still get strict X9.17 conformity by setting
* prng_chunk_size to 8 bytes.
*/
- tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n);
- BUG_ON((tmp < 0) || (tmp != n));
+ tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
+ prng_data->buf, prng_data->buf, n);
+ if (tmp < 0 || tmp != n) {
+ ret = -EIO;
+ break;
+ }
- p->count += n;
+ prng_data->prngws.byte_counter += n;
+ prng_data->prngws.reseed_counter += n;
- if (copy_to_user(ubuf, p->buf, chunk))
+ if (copy_to_user(ubuf, prng_data->buf, chunk))
return -EFAULT;
nbytes -= chunk;
ret += chunk;
ubuf += chunk;
}
+
+ /* unlock prng_data struct */
+ mutex_unlock(&prng_data->mutex);
+
return ret;
}
-static const struct file_operations prng_fops = {
+
+static ssize_t prng_sha512_read(struct file *file, char __user *ubuf,
+ size_t nbytes, loff_t *ppos)
+{
+ int n, ret = 0;
+ u8 *p;
+
+ /* if errorflag is set do nothing and return 'broken pipe' */
+ if (prng_errorflag)
+ return -EPIPE;
+
+ /* lock prng_data struct */
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+
+ while (nbytes) {
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* give mutex free before calling schedule() */
+ mutex_unlock(&prng_data->mutex);
+ schedule();
+ /* occopy mutex again */
+ if (mutex_lock_interruptible(&prng_data->mutex)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
+ }
+ if (prng_data->rest) {
+ /* push left over random bytes from the previous read */
+ p = prng_data->buf + prng_chunk_size - prng_data->rest;
+ n = (nbytes < prng_data->rest) ?
+ nbytes : prng_data->rest;
+ prng_data->rest -= n;
+ } else {
+ /* generate one chunk of random bytes into read buf */
+ p = prng_data->buf;
+ n = prng_sha512_generate(p, prng_chunk_size);
+ if (n < 0) {
+ ret = n;
+ break;
+ }
+ if (nbytes < prng_chunk_size) {
+ n = nbytes;
+ prng_data->rest = prng_chunk_size - n;
+ } else {
+ n = prng_chunk_size;
+ prng_data->rest = 0;
+ }
+ }
+ if (copy_to_user(ubuf, p, n)) {
+ ret = -EFAULT;
+ break;
+ }
+ ubuf += n;
+ nbytes -= n;
+ ret += n;
+ }
+
+ /* unlock prng_data struct */
+ mutex_unlock(&prng_data->mutex);
+
+ return ret;
+}
+
+
+/*** sysfs stuff ***/
+
+static const struct file_operations prng_sha512_fops = {
+ .owner = THIS_MODULE,
+ .open = &prng_open,
+ .release = NULL,
+ .read = &prng_sha512_read,
+ .llseek = noop_llseek,
+};
+static const struct file_operations prng_tdes_fops = {
.owner = THIS_MODULE,
.open = &prng_open,
.release = NULL,
- .read = &prng_read,
+ .read = &prng_tdes_read,
.llseek = noop_llseek,
};
-static struct miscdevice prng_dev = {
+static struct miscdevice prng_sha512_dev = {
+ .name = "prandom",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &prng_sha512_fops,
+};
+static struct miscdevice prng_tdes_dev = {
.name = "prandom",
.minor = MISC_DYNAMIC_MINOR,
- .fops = &prng_fops,
+ .fops = &prng_tdes_fops,
};
+
+/* chunksize attribute (ro) */
+static ssize_t prng_chunksize_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size);
+}
+static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL);
+
+/* counter attribute (ro) */
+static ssize_t prng_counter_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 counter;
+
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+ if (prng_mode == PRNG_MODE_SHA512)
+ counter = prng_data->ppnows.stream_bytes;
+ else
+ counter = prng_data->prngws.byte_counter;
+ mutex_unlock(&prng_data->mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", counter);
+}
+static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL);
+
+/* errorflag attribute (ro) */
+static ssize_t prng_errorflag_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag);
+}
+static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL);
+
+/* mode attribute (ro) */
+static ssize_t prng_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (prng_mode == PRNG_MODE_TDES)
+ return snprintf(buf, PAGE_SIZE, "TDES\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "SHA512\n");
+}
+static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL);
+
+/* reseed attribute (w) */
+static ssize_t prng_reseed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+ prng_sha512_reseed();
+ mutex_unlock(&prng_data->mutex);
+
+ return count;
+}
+static DEVICE_ATTR(reseed, 0200, NULL, prng_reseed_store);
+
+/* reseed limit attribute (rw) */
+static ssize_t prng_reseed_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit);
+}
+static ssize_t prng_reseed_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned limit;
+
+ if (sscanf(buf, "%u\n", &limit) != 1)
+ return -EINVAL;
+
+ if (prng_mode == PRNG_MODE_SHA512) {
+ if (limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
+ return -EINVAL;
+ } else {
+ if (limit < PRNG_RESEED_LIMIT_TDES_LOWER)
+ return -EINVAL;
+ }
+
+ prng_reseed_limit = limit;
+
+ return count;
+}
+static DEVICE_ATTR(reseed_limit, 0644,
+ prng_reseed_limit_show, prng_reseed_limit_store);
+
+/* strength attribute (ro) */
+static ssize_t prng_strength_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "256\n");
+}
+static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL);
+
+static struct attribute *prng_sha512_dev_attrs[] = {
+ &dev_attr_errorflag.attr,
+ &dev_attr_chunksize.attr,
+ &dev_attr_byte_counter.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_reseed.attr,
+ &dev_attr_reseed_limit.attr,
+ &dev_attr_strength.attr,
+ NULL
+};
+static struct attribute *prng_tdes_dev_attrs[] = {
+ &dev_attr_chunksize.attr,
+ &dev_attr_byte_counter.attr,
+ &dev_attr_mode.attr,
+ NULL
+};
+
+static struct attribute_group prng_sha512_dev_attr_group = {
+ .attrs = prng_sha512_dev_attrs
+};
+static struct attribute_group prng_tdes_dev_attr_group = {
+ .attrs = prng_tdes_dev_attrs
+};
+
+
+/*** module init and exit ***/
+
static int __init prng_init(void)
{
int ret;
@@ -169,43 +815,105 @@ static int __init prng_init(void)
if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
return -EOPNOTSUPP;
- if (prng_chunk_size < 8)
- return -EINVAL;
+ /* choose prng mode */
+ if (prng_mode != PRNG_MODE_TDES) {
+ /* check for MSA5 support for PPNO operations */
+ if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN,
+ CRYPT_S390_MSA5)) {
+ if (prng_mode == PRNG_MODE_SHA512) {
+ pr_err("The prng module cannot "
+ "start in SHA-512 mode\n");
+ return -EOPNOTSUPP;
+ }
+ prng_mode = PRNG_MODE_TDES;
+ } else
+ prng_mode = PRNG_MODE_SHA512;
+ }
- p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
- p->count = 0;
+ if (prng_mode == PRNG_MODE_SHA512) {
- p->buf = kmalloc(prng_chunk_size, GFP_KERNEL);
- if (!p->buf) {
- ret = -ENOMEM;
- goto out_free;
- }
+ /* SHA512 mode */
- /* initialize the PRNG, add 128 bits of entropy */
- prng_seed(16);
+ if (prng_chunk_size < PRNG_CHUNKSIZE_SHA512_MIN
+ || prng_chunk_size > PRNG_CHUNKSIZE_SHA512_MAX)
+ return -EINVAL;
+ prng_chunk_size = (prng_chunk_size + 0x3f) & ~0x3f;
- ret = misc_register(&prng_dev);
- if (ret)
- goto out_buf;
- return 0;
+ if (prng_reseed_limit == 0)
+ prng_reseed_limit = PRNG_RESEED_LIMIT_SHA512;
+ else if (prng_reseed_limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
+ return -EINVAL;
+
+ ret = prng_sha512_instantiate();
+ if (ret)
+ goto out;
+
+ ret = misc_register(&prng_sha512_dev);
+ if (ret) {
+ prng_sha512_deinstantiate();
+ goto out;
+ }
+ ret = sysfs_create_group(&prng_sha512_dev.this_device->kobj,
+ &prng_sha512_dev_attr_group);
+ if (ret) {
+ misc_deregister(&prng_sha512_dev);
+ prng_sha512_deinstantiate();
+ goto out;
+ }
-out_buf:
- kfree(p->buf);
-out_free:
- kfree(p);
+ } else {
+
+ /* TDES mode */
+
+ if (prng_chunk_size < PRNG_CHUNKSIZE_TDES_MIN
+ || prng_chunk_size > PRNG_CHUNKSIZE_TDES_MAX)
+ return -EINVAL;
+ prng_chunk_size = (prng_chunk_size + 0x07) & ~0x07;
+
+ if (prng_reseed_limit == 0)
+ prng_reseed_limit = PRNG_RESEED_LIMIT_TDES;
+ else if (prng_reseed_limit < PRNG_RESEED_LIMIT_TDES_LOWER)
+ return -EINVAL;
+
+ ret = prng_tdes_instantiate();
+ if (ret)
+ goto out;
+
+ ret = misc_register(&prng_tdes_dev);
+ if (ret) {
+ prng_tdes_deinstantiate();
+ goto out;
+ }
+ ret = sysfs_create_group(&prng_tdes_dev.this_device->kobj,
+ &prng_tdes_dev_attr_group);
+ if (ret) {
+ misc_deregister(&prng_tdes_dev);
+ prng_tdes_deinstantiate();
+ goto out;
+ }
+
+ }
+
+out:
return ret;
}
+
static void __exit prng_exit(void)
{
- /* wipe me */
- kzfree(p->buf);
- kfree(p);
-
- misc_deregister(&prng_dev);
+ if (prng_mode == PRNG_MODE_SHA512) {
+ sysfs_remove_group(&prng_sha512_dev.this_device->kobj,
+ &prng_sha512_dev_attr_group);
+ misc_deregister(&prng_sha512_dev);
+ prng_sha512_deinstantiate();
+ } else {
+ sysfs_remove_group(&prng_tdes_dev.this_device->kobj,
+ &prng_tdes_dev_attr_group);
+ misc_deregister(&prng_tdes_dev);
+ prng_tdes_deinstantiate();
+ }
}
+
module_init(prng_init);
module_exit(prng_exit);
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
index d4c0d3717543..24c747a0fcc3 100644
--- a/arch/s390/hypfs/hypfs_diag0c.c
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -19,13 +19,9 @@
static void diag0c(struct hypfs_diag0c_entry *entry)
{
asm volatile (
-#ifdef CONFIG_64BIT
" sam31\n"
" diag %0,%0,0x0c\n"
" sam64\n"
-#else
- " diag %0,%0,0x0c\n"
-#endif
: /* no output register */
: "a" (entry)
: "memory");
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 99824ff8dd35..d3f896a35b98 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include <asm/ebcdic.h>
#include "hypfs.h"
@@ -48,7 +48,7 @@ static struct dentry *hypfs_last_dentry;
static void hypfs_update_update(struct super_block *sb)
{
struct hypfs_sb_info *sb_info = sb->s_fs_info;
- struct inode *inode = sb_info->update_file->d_inode;
+ struct inode *inode = d_inode(sb_info->update_file);
sb_info->last_update = get_seconds();
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -64,7 +64,7 @@ static void hypfs_add_dentry(struct dentry *dentry)
static inline int hypfs_positive(struct dentry *dentry)
{
- return dentry->d_inode && !d_unhashed(dentry);
+ return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
static void hypfs_remove(struct dentry *dentry)
@@ -72,16 +72,16 @@ static void hypfs_remove(struct dentry *dentry)
struct dentry *parent;
parent = dentry->d_parent;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
if (hypfs_positive(dentry)) {
if (d_is_dir(dentry))
- simple_rmdir(parent->d_inode, dentry);
+ simple_rmdir(d_inode(parent), dentry);
else
- simple_unlink(parent->d_inode, dentry);
+ simple_unlink(d_inode(parent), dentry);
}
d_delete(dentry);
dput(dentry);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
}
static void hypfs_delete_tree(struct dentry *root)
@@ -336,7 +336,7 @@ static struct dentry *hypfs_create_file(struct dentry *parent, const char *name,
struct dentry *dentry;
struct inode *inode;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
dentry = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(dentry)) {
dentry = ERR_PTR(-ENOMEM);
@@ -357,14 +357,14 @@ static struct dentry *hypfs_create_file(struct dentry *parent, const char *name,
} else if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
- inc_nlink(parent->d_inode);
+ inc_nlink(d_inode(parent));
} else
BUG();
inode->i_private = data;
d_instantiate(dentry, inode);
dget(dentry);
fail:
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return dentry;
}
@@ -437,8 +437,6 @@ struct dentry *hypfs_create_str(struct dentry *dir,
static const struct file_operations hypfs_file_ops = {
.open = hypfs_open,
.release = hypfs_release,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = hypfs_read_iter,
.write_iter = hypfs_write_iter,
.llseek = no_llseek,
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index 32a705987156..16887c5fd989 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -9,28 +9,6 @@
#include <asm/io.h>
-#ifndef CONFIG_64BIT
-
-#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
-#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
-#define APPLDATA_GEN_EVENT_REC 0x02
-#define APPLDATA_START_CONFIG_REC 0x03
-
-/*
- * Parameter list for DIAGNOSE X'DC'
- */
-struct appldata_parameter_list {
- u16 diag; /* The DIAGNOSE code X'00DC' */
- u8 function; /* The function code for the DIAGNOSE */
- u8 parlist_length; /* Length of the parameter list */
- u32 product_id_addr; /* Address of the 16-byte product ID */
- u16 reserved;
- u16 buffer_length; /* Length of the application data buffer */
- u32 buffer_addr; /* Address of the application data buffer */
-} __attribute__ ((packed));
-
-#else /* CONFIG_64BIT */
-
#define APPLDATA_START_INTERVAL_REC 0x80
#define APPLDATA_STOP_REC 0x81
#define APPLDATA_GEN_EVENT_REC 0x82
@@ -51,8 +29,6 @@ struct appldata_parameter_list {
u64 buffer_addr;
} __attribute__ ((packed));
-#endif /* CONFIG_64BIT */
-
struct appldata_product_id {
char prod_nr[7]; /* product number */
u16 prod_fn; /* product function */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index fa934fe080c1..adbe3802e377 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -160,8 +160,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) { (i) }
-#ifdef CONFIG_64BIT
-
#define __ATOMIC64_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -274,99 +272,6 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
#undef __ATOMIC64_LOOP
-#else /* CONFIG_64BIT */
-
-typedef struct {
- long long counter;
-} atomic64_t;
-
-static inline long long atomic64_read(const atomic64_t *v)
-{
- register_pair rp;
-
- asm volatile(
- " lm %0,%N0,%1"
- : "=&d" (rp) : "Q" (v->counter) );
- return rp.pair;
-}
-
-static inline void atomic64_set(atomic64_t *v, long long i)
-{
- register_pair rp = {.pair = i};
-
- asm volatile(
- " stm %1,%N1,%0"
- : "=Q" (v->counter) : "d" (rp) );
-}
-
-static inline long long atomic64_xchg(atomic64_t *v, long long new)
-{
- register_pair rp_new = {.pair = new};
- register_pair rp_old;
-
- asm volatile(
- " lm %0,%N0,%1\n"
- "0: cds %0,%2,%1\n"
- " jl 0b\n"
- : "=&d" (rp_old), "+Q" (v->counter)
- : "d" (rp_new)
- : "cc");
- return rp_old.pair;
-}
-
-static inline long long atomic64_cmpxchg(atomic64_t *v,
- long long old, long long new)
-{
- register_pair rp_old = {.pair = old};
- register_pair rp_new = {.pair = new};
-
- asm volatile(
- " cds %0,%2,%1"
- : "+&d" (rp_old), "+Q" (v->counter)
- : "d" (rp_new)
- : "cc");
- return rp_old.pair;
-}
-
-
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old + i;
- } while (atomic64_cmpxchg(v, old, new) != old);
- return new;
-}
-
-static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old | mask;
- } while (atomic64_cmpxchg(v, old, new) != old);
-}
-
-static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old & mask;
- } while (atomic64_cmpxchg(v, old, new) != old);
-}
-
-static inline void atomic64_add(long long i, atomic64_t *v)
-{
- atomic64_add_return(i, v);
-}
-
-#endif /* CONFIG_64BIT */
-
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{
long long c, old;
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 520542477678..9b68e98a724f 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -51,32 +51,6 @@
#define __BITOPS_NO_BARRIER "\n"
-#ifndef CONFIG_64BIT
-
-#define __BITOPS_OR "or"
-#define __BITOPS_AND "nr"
-#define __BITOPS_XOR "xr"
-#define __BITOPS_BARRIER "\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
-({ \
- unsigned long __old, __new; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- " l %0,%2\n" \
- "0: lr %1,%0\n" \
- __op_string " %1,%3\n" \
- " cs %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
- : "d" (__val) \
- : "cc", "memory"); \
- __old; \
-})
-
-#else /* CONFIG_64BIT */
-
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __BITOPS_OR "laog"
@@ -125,8 +99,6 @@
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-#endif /* CONFIG_64BIT */
-
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 6259895fcd97..4eadec466b8c 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -80,15 +80,10 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
({ \
__typeof__(p1) __p1 = (p1); \
__typeof__(p2) __p2 = (p2); \
- int __ret; \
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
- if (sizeof(long) == 4) \
- __ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \
- else \
- __ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
- __ret; \
+ __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
})
#define system_has_cmpxchg_double() 1
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index b91e960e4045..221b454c734a 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -22,15 +22,7 @@ typedef unsigned long long __nocast cputime64_t;
static inline unsigned long __div(unsigned long long n, unsigned long base)
{
-#ifndef CONFIG_64BIT
- register_pair rp;
-
- rp.pair = n >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
- return rp.subreg.odd;
-#else /* CONFIG_64BIT */
return n / base;
-#endif /* CONFIG_64BIT */
}
#define cputime_one_jiffy jiffies_to_cputime(1)
@@ -101,17 +93,8 @@ static inline void cputime_to_timespec(const cputime_t cputime,
struct timespec *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef CONFIG_64BIT
- register_pair rp;
-
- rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2));
- value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC;
- value->tv_sec = rp.subreg.odd;
-#else
value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
value->tv_sec = __cputime / CPUTIME_PER_SEC;
-#endif
}
/*
@@ -129,17 +112,8 @@ static inline void cputime_to_timeval(const cputime_t cputime,
struct timeval *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef CONFIG_64BIT
- register_pair rp;
-
- rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2));
- value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC;
- value->tv_sec = rp.subreg.odd;
-#else
value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
value->tv_sec = __cputime / CPUTIME_PER_SEC;
-#endif
}
/*
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 31ab9f346d7e..cfad7fca01d6 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -9,20 +9,12 @@
#include <linux/bug.h>
-#ifdef CONFIG_64BIT
-# define __CTL_LOAD "lctlg"
-# define __CTL_STORE "stctg"
-#else
-# define __CTL_LOAD "lctl"
-# define __CTL_STORE "stctl"
-#endif
-
#define __ctl_load(array, low, high) { \
typedef struct { char _[sizeof(array)]; } addrtype; \
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
- __CTL_LOAD " %1,%2,%0\n" \
+ " lctlg %1,%2,%0\n" \
: : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
}
@@ -31,7 +23,7 @@
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
- __CTL_STORE " %1,%2,%0\n" \
+ " stctg %1,%2,%0\n" \
: "=Q" (*(addrtype *)(&array)) \
: "i" (low), "i" (high)); \
}
@@ -60,9 +52,7 @@ void smp_ctl_clear_bit(int cr, int bit);
union ctlreg0 {
unsigned long val;
struct {
-#ifdef CONFIG_64BIT
unsigned long : 32;
-#endif
unsigned long : 3;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 709955ddaa4d..9d395961e713 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -42,7 +42,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
- return 0;
+ return false;
return addr + size - 1 <= *dev->dma_mask;
}
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c9c875d9ed31..3ad48f22de78 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -107,11 +107,7 @@
/*
* These are used to set parameters in the core dumps.
*/
-#ifndef CONFIG_64BIT
-#define ELF_CLASS ELFCLASS32
-#else /* CONFIG_64BIT */
#define ELF_CLASS ELFCLASS64
-#endif /* CONFIG_64BIT */
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
@@ -161,10 +157,11 @@ extern unsigned int vdso_enabled;
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-extern unsigned long randomize_et_dyn(void);
-#define ELF_ET_DYN_BASE randomize_et_dyn()
+ that it will "exec", and that there is sufficient room for the brk. 64-bit
+ tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_32bit_task() ? \
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
@@ -225,9 +222,6 @@ struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
int arch_setup_additional_pages(struct linux_binprm *, int);
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
#endif
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index ea5a6e45fd93..a7b2d7504049 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -19,11 +19,7 @@
#include <asm/cio.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_64BIT
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
-#else
-#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
-#endif
#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
/*
@@ -32,11 +28,7 @@
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
-#ifdef CONFIG_64BIT
return ((__pa(vaddr) + length - 1) >> 31) != 0;
-#else
- return 0;
-#endif
}
@@ -77,7 +69,6 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
static inline int
set_normalized_cda(struct ccw1 * ccw, void *vaddr)
{
-#ifdef CONFIG_64BIT
unsigned int nridaws;
unsigned long *idal;
@@ -93,7 +84,6 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
ccw->flags |= CCW_FLAG_IDA;
vaddr = idal;
}
-#endif
ccw->cda = (__u32)(unsigned long) vaddr;
return 0;
}
@@ -104,12 +94,10 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
static inline void
clear_normalized_cda(struct ccw1 * ccw)
{
-#ifdef CONFIG_64BIT
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
}
-#endif
ccw->cda = 0;
}
@@ -181,12 +169,8 @@ idal_buffer_free(struct idal_buffer *ib)
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
-#ifdef CONFIG_64BIT
return ib->size > (4096ul << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
-#else
- return ib->size > (4096ul << ib->page_order);
-#endif
}
/*
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 343ea7c987aa..ff95d15a2384 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -57,7 +57,6 @@ enum interruption_class {
IRQIO_TAP,
IRQIO_VMR,
IRQIO_LCS,
- IRQIO_CLW,
IRQIO_CTC,
IRQIO_APB,
IRQIO_ADM,
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 2b77e235b5fb..69972b7957ee 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -8,14 +8,6 @@
#define JUMP_LABEL_NOP_SIZE 6
#define JUMP_LABEL_NOP_OFFSET 2
-#ifdef CONFIG_64BIT
-#define ASM_PTR ".quad"
-#define ASM_ALIGN ".balign 8"
-#else
-#define ASM_PTR ".long"
-#define ASM_ALIGN ".balign 4"
-#endif
-
/*
* We use a brcl 0,2 instruction for jump labels at compile time so it
* can be easily distinguished from a hotpatch generated instruction.
@@ -24,8 +16,8 @@ static __always_inline bool arch_static_branch(struct static_key *key)
{
asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
".pushsection __jump_table, \"aw\"\n"
- ASM_ALIGN "\n"
- ASM_PTR " 0b, %l[label], %0\n"
+ ".balign 8\n"
+ ".quad 0b, %l[label], %0\n"
".popsection\n"
: : "X" (key) : : label);
return false;
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 694bcd6bd927..2f924bc30e35 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -26,6 +26,9 @@
/* Not more than 2GB */
#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
+/* Allocate control page with GFP_DMA */
+#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
+
/* Maximum address we can use for the crash control pages */
#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
new file mode 100644
index 000000000000..7aa799134a11
--- /dev/null
+++ b/arch/s390/include/asm/livepatch.h
@@ -0,0 +1,43 @@
+/*
+ * livepatch.h - s390-specific Kernel Live Patching Core
+ *
+ * Copyright (c) 2013-2015 SUSE
+ * Authors: Jiri Kosina
+ * Vojtech Pavlik
+ * Jiri Slaby
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef ASM_LIVEPATCH_H
+#define ASM_LIVEPATCH_H
+
+#include <linux/module.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+ return 0;
+}
+
+static inline int klp_write_module_reloc(struct module *mod, unsigned long
+ type, unsigned long loc, unsigned long value)
+{
+ /* not supported yet */
+ return -ENOSYS;
+}
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->psw.addr = ip;
+}
+#else
+#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#endif
+
+#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 34fbcac61133..663f23e37460 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -13,163 +13,6 @@
#include <asm/cpu.h>
#include <asm/types.h>
-#ifdef CONFIG_32BIT
-
-#define LC_ORDER 0
-#define LC_PAGES 1
-
-struct save_area {
- u32 ext_save;
- u64 timer;
- u64 clk_cmp;
- u8 pad1[24];
- u8 psw[8];
- u32 pref_reg;
- u8 pad2[20];
- u32 acc_regs[16];
- u64 fp_regs[4];
- u32 gp_regs[16];
- u32 ctrl_regs[16];
-} __packed;
-
-struct save_area_ext {
- struct save_area sa;
- __vector128 vx_regs[32];
-};
-
-struct _lowcore {
- psw_t restart_psw; /* 0x0000 */
- psw_t restart_old_psw; /* 0x0008 */
- __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
- __u32 ipl_parmblock_ptr; /* 0x0014 */
- psw_t external_old_psw; /* 0x0018 */
- psw_t svc_old_psw; /* 0x0020 */
- psw_t program_old_psw; /* 0x0028 */
- psw_t mcck_old_psw; /* 0x0030 */
- psw_t io_old_psw; /* 0x0038 */
- __u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */
- psw_t external_new_psw; /* 0x0058 */
- psw_t svc_new_psw; /* 0x0060 */
- psw_t program_new_psw; /* 0x0068 */
- psw_t mcck_new_psw; /* 0x0070 */
- psw_t io_new_psw; /* 0x0078 */
- __u32 ext_params; /* 0x0080 */
- __u16 ext_cpu_addr; /* 0x0084 */
- __u16 ext_int_code; /* 0x0086 */
- __u16 svc_ilc; /* 0x0088 */
- __u16 svc_code; /* 0x008a */
- __u16 pgm_ilc; /* 0x008c */
- __u16 pgm_code; /* 0x008e */
- __u32 trans_exc_code; /* 0x0090 */
- __u16 mon_class_num; /* 0x0094 */
- __u8 per_code; /* 0x0096 */
- __u8 per_atmid; /* 0x0097 */
- __u32 per_address; /* 0x0098 */
- __u32 monitor_code; /* 0x009c */
- __u8 exc_access_id; /* 0x00a0 */
- __u8 per_access_id; /* 0x00a1 */
- __u8 op_access_id; /* 0x00a2 */
- __u8 ar_mode_id; /* 0x00a3 */
- __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
- __u16 subchannel_id; /* 0x00b8 */
- __u16 subchannel_nr; /* 0x00ba */
- __u32 io_int_parm; /* 0x00bc */
- __u32 io_int_word; /* 0x00c0 */
- __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
- __u32 stfl_fac_list; /* 0x00c8 */
- __u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */
- __u32 extended_save_area_addr; /* 0x00d4 */
- __u32 cpu_timer_save_area[2]; /* 0x00d8 */
- __u32 clock_comp_save_area[2]; /* 0x00e0 */
- __u32 mcck_interruption_code[2]; /* 0x00e8 */
- __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
- __u32 external_damage_code; /* 0x00f4 */
- __u32 failing_storage_address; /* 0x00f8 */
- __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
- psw_t psw_save_area; /* 0x0100 */
- __u32 prefixreg_save_area; /* 0x0108 */
- __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
-
- /* CPU register save area: defined by architecture */
- __u32 access_regs_save_area[16]; /* 0x0120 */
- __u32 floating_pt_save_area[8]; /* 0x0160 */
- __u32 gpregs_save_area[16]; /* 0x0180 */
- __u32 cregs_save_area[16]; /* 0x01c0 */
-
- /* Save areas. */
- __u32 save_area_sync[8]; /* 0x0200 */
- __u32 save_area_async[8]; /* 0x0220 */
- __u32 save_area_restart[1]; /* 0x0240 */
-
- /* CPU flags. */
- __u32 cpu_flags; /* 0x0244 */
-
- /* Return psws. */
- psw_t return_psw; /* 0x0248 */
- psw_t return_mcck_psw; /* 0x0250 */
-
- /* CPU time accounting values */
- __u64 sync_enter_timer; /* 0x0258 */
- __u64 async_enter_timer; /* 0x0260 */
- __u64 mcck_enter_timer; /* 0x0268 */
- __u64 exit_timer; /* 0x0270 */
- __u64 user_timer; /* 0x0278 */
- __u64 system_timer; /* 0x0280 */
- __u64 steal_timer; /* 0x0288 */
- __u64 last_update_timer; /* 0x0290 */
- __u64 last_update_clock; /* 0x0298 */
- __u64 int_clock; /* 0x02a0 */
- __u64 mcck_clock; /* 0x02a8 */
- __u64 clock_comparator; /* 0x02b0 */
-
- /* Current process. */
- __u32 current_task; /* 0x02b8 */
- __u32 thread_info; /* 0x02bc */
- __u32 kernel_stack; /* 0x02c0 */
-
- /* Interrupt, panic and restart stack. */
- __u32 async_stack; /* 0x02c4 */
- __u32 panic_stack; /* 0x02c8 */
- __u32 restart_stack; /* 0x02cc */
-
- /* Restart function and parameter. */
- __u32 restart_fn; /* 0x02d0 */
- __u32 restart_data; /* 0x02d4 */
- __u32 restart_source; /* 0x02d8 */
-
- /* Address space pointer. */
- __u32 kernel_asce; /* 0x02dc */
- __u32 user_asce; /* 0x02e0 */
- __u32 current_pid; /* 0x02e4 */
-
- /* SMP info area */
- __u32 cpu_nr; /* 0x02e8 */
- __u32 softirq_pending; /* 0x02ec */
- __u32 percpu_offset; /* 0x02f0 */
- __u32 machine_flags; /* 0x02f4 */
- __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
- __u32 spinlock_lockval; /* 0x02fc */
-
- __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
-
- /*
- * 0xe00 contains the address of the IPL Parameter Information
- * block. Dump tools need IPIB for IPL after dump.
- * Note: do not change the position of any fields in 0x0e00-0x0f00
- */
- __u32 ipib; /* 0x0e00 */
- __u32 ipib_checksum; /* 0x0e04 */
- __u32 vmcore_info; /* 0x0e08 */
- __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */
- __u32 os_info; /* 0x0e18 */
- __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */
-
- /* Extended facility list */
- __u64 stfle_fac_list[32]; /* 0x0f00 */
-} __packed;
-
-#else /* CONFIG_32BIT */
-
#define LC_ORDER 1
#define LC_PAGES 2
@@ -354,8 +197,6 @@ struct _lowcore {
__u8 vector_save_area[1024]; /* 0x1c00 */
} __packed;
-#endif /* CONFIG_32BIT */
-
#define S390_lowcore (*((struct _lowcore *) 0))
extern struct _lowcore *lowcore_ptr[];
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index 9977e08df5bd..b55a59e1d134 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -8,7 +8,7 @@
#include <uapi/asm/mman.h>
-#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
+#ifndef __ASSEMBLY__
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
#endif
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index a5e656260a70..d29ad9545b41 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -14,7 +14,9 @@ typedef struct {
unsigned long asce_bits;
unsigned long asce_limit;
unsigned long vdso_base;
- /* The mmu context has extended page tables. */
+ /* The mmu context allocates 4K page tables. */
+ unsigned int alloc_pgste:1;
+ /* The mmu context uses extended page tables. */
unsigned int has_pgste:1;
/* The mmu context uses storage keys. */
unsigned int use_skey:1;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 8fb3802f8fad..fb1b93ea3e3f 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -19,11 +19,12 @@ static inline int init_new_context(struct task_struct *tsk,
atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
-#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
-#endif
+#ifdef CONFIG_PGSTE
+ mm->context.alloc_pgste = page_table_allocate_pgste;
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
+#endif
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
@@ -110,10 +111,8 @@ static inline void activate_mm(struct mm_struct *prev,
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
-#ifdef CONFIG_64BIT
if (oldmm->context.asce_limit < mm->context.asce_limit)
crst_table_downgrade(mm, oldmm->context.asce_limit);
-#endif
}
static inline void arch_exit_mmap(struct mm_struct *mm)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index ef803c202d42..a648338c434a 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -7,6 +7,7 @@
#define PCI_BAR_COUNT 6
#include <linux/pci.h>
+#include <linux/mutex.h>
#include <asm-generic/pci.h>
#include <asm-generic/pci-dma-compat.h>
#include <asm/pci_clp.h>
@@ -44,10 +45,6 @@ struct zpci_fmb {
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
- /* software counters */
- atomic64_t allocated_pages;
- atomic64_t mapped_pages;
- atomic64_t unmapped_pages;
} __packed __aligned(16);
enum zpci_state {
@@ -80,6 +77,7 @@ struct zpci_dev {
u8 pft; /* pci function type */
u16 domain;
+ struct mutex lock;
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
@@ -111,6 +109,10 @@ struct zpci_dev {
/* Function measurement block */
struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */
+ /* software counters */
+ atomic64_t allocated_pages;
+ atomic64_t mapped_pages;
+ atomic64_t unmapped_pages;
enum pci_bus_speed max_bus_speed;
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 933355e0d091..6d6556ca24aa 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,8 +10,6 @@
*/
#define __my_cpu_offset S390_lowcore.percpu_offset
-#ifdef CONFIG_64BIT
-
/*
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
@@ -183,8 +181,6 @@
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif /* CONFIG_64BIT */
-
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 159a8ec6da9a..4cb19fe76dd9 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -9,8 +9,6 @@
#ifndef _ASM_S390_PERF_EVENT_H
#define _ASM_S390_PERF_EVENT_H
-#ifdef CONFIG_64BIT
-
#include <linux/perf_event.h>
#include <linux/device.h>
#include <asm/cpu_mf.h>
@@ -92,5 +90,4 @@ struct sf_raw_sample {
int perf_reserve_sampling(void);
void perf_release_sampling(void);
-#endif /* CONFIG_64BIT */
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 3009c2ba46d2..7b7858f158b4 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -21,6 +21,7 @@ void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
+extern int page_table_allocate_pgste;
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned long key, bool nq);
@@ -33,11 +34,7 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
*s = val;
n = (n / 256) - 1;
asm volatile(
-#ifdef CONFIG_64BIT
" mvc 8(248,%0),0(%0)\n"
-#else
- " mvc 4(252,%0),0(%0)\n"
-#endif
"0: mvc 256(256,%0),0(%0)\n"
" la %0,256(%0)\n"
" brct %1,0b\n"
@@ -50,24 +47,6 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
clear_table(crst, entry, sizeof(unsigned long)*2048);
}
-#ifndef CONFIG_64BIT
-
-static inline unsigned long pgd_entry_type(struct mm_struct *mm)
-{
- return _SEGMENT_ENTRY_EMPTY;
-}
-
-#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
-#define pud_free(mm, x) do { } while (0)
-
-#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(mm, x) do { } while (0)
-
-#define pgd_populate(mm, pgd, pud) BUG()
-#define pud_populate(mm, pud, pmd) BUG()
-
-#else /* CONFIG_64BIT */
-
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
if (mm->context.asce_limit <= (1UL << 31))
@@ -119,8 +98,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
}
-#endif /* CONFIG_64BIT */
-
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e08ec38f8c6e..fc642399b489 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -12,12 +12,9 @@
#define _ASM_S390_PGTABLE_H
/*
- * The Linux memory management assumes a three-level page table setup. For
- * s390 31 bit we "fold" the mid level into the top-level page table, so
- * that we physically have the same two-level page table as the s390 mmu
- * expects in 31 bit mode. For s390 64 bit we use three of the five levels
- * the hardware provides (region first and region second tables are not
- * used).
+ * The Linux memory management assumes a three-level page table setup.
+ * For s390 64 bit we use up to four of the five levels the hardware
+ * provides (region first tables are not used).
*
* The "pgd_xxx()" functions are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
@@ -66,15 +63,9 @@ extern unsigned long zero_page_mask;
* table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
-#ifndef CONFIG_64BIT
-# define PMD_SHIFT 20
-# define PUD_SHIFT 20
-# define PGDIR_SHIFT 20
-#else /* CONFIG_64BIT */
-# define PMD_SHIFT 20
-# define PUD_SHIFT 31
-# define PGDIR_SHIFT 42
-#endif /* CONFIG_64BIT */
+#define PMD_SHIFT 20
+#define PUD_SHIFT 31
+#define PGDIR_SHIFT 42
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
@@ -90,15 +81,8 @@ extern unsigned long zero_page_mask;
* that leads to 1024 pte per pgd
*/
#define PTRS_PER_PTE 256
-#ifndef CONFIG_64BIT
-#define __PAGETABLE_PUD_FOLDED
-#define PTRS_PER_PMD 1
-#define __PAGETABLE_PMD_FOLDED
-#define PTRS_PER_PUD 1
-#else /* CONFIG_64BIT */
#define PTRS_PER_PMD 2048
#define PTRS_PER_PUD 2048
-#endif /* CONFIG_64BIT */
#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0UL
@@ -114,8 +98,8 @@ extern unsigned long zero_page_mask;
#ifndef __ASSEMBLY__
/*
- * The vmalloc and module area will always be on the topmost area of the kernel
- * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
+ * The vmalloc and module area will always be on the topmost area of the
+ * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
* On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
* modules will reside. That makes sure that inter module branches always
* happen without trampolines and in addition the placement within a 2GB frame
@@ -127,59 +111,23 @@ extern struct page *vmemmap;
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
-#ifdef CONFIG_64BIT
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
-#endif
static inline int is_module_addr(void *addr)
{
-#ifdef CONFIG_64BIT
BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
if (addr < (void *)MODULES_VADDR)
return 0;
if (addr > (void *)MODULES_END)
return 0;
-#endif
return 1;
}
/*
- * A 31 bit pagetable entry of S390 has following format:
- * | PFRA | | OS |
- * 0 0IP0
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * I Page-Invalid Bit: Page is not available for address-translation
- * P Page-Protection Bit: Store access not possible for page
- *
- * A 31 bit segmenttable entry of S390 has following format:
- * | P-table origin | |PTL
- * 0 IC
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * I Segment-Invalid Bit: Segment is not available for address-translation
- * C Common-Segment Bit: Segment is not private (PoP 3-30)
- * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
- *
- * The 31 bit segmenttable origin of S390 has following format:
- *
- * |S-table origin | | STL |
- * X **GPS
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * X Space-Switch event:
- * G Segment-Invalid Bit: *
- * P Private-Space Bit: Segment is not private (PoP 3-30)
- * S Storage-Alteration:
- * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
- *
* A 64 bit pagetable entry of S390 has following format:
* | PFRA |0IPC| OS |
* 0000000000111111111122222222223333333333444444444455555555556666
@@ -237,7 +185,6 @@ static inline int is_module_addr(void *addr)
/* Software bits in the page table entry */
#define _PAGE_PRESENT 0x001 /* SW pte present bit */
-#define _PAGE_TYPE 0x002 /* SW pte type bit */
#define _PAGE_YOUNG 0x004 /* SW pte young bit */
#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
#define _PAGE_READ 0x010 /* SW pte read bit */
@@ -257,83 +204,36 @@ static inline int is_module_addr(void *addr)
* table lock held.
*
* The following table gives the different possible bit combinations for
- * the pte hardware and software bits in the last 12 bits of a pte:
+ * the pte hardware and software bits in the last 12 bits of a pte
+ * (. unassigned bit, x don't care, t swap type):
*
* 842100000000
* 000084210000
* 000000008421
- * .IR...wrdytp
- * empty .10...000000
- * swap .10...xxxx10
- * file .11...xxxxx0
- * prot-none, clean, old .11...000001
- * prot-none, clean, young .11...000101
- * prot-none, dirty, old .10...001001
- * prot-none, dirty, young .10...001101
- * read-only, clean, old .11...010001
- * read-only, clean, young .01...010101
- * read-only, dirty, old .11...011001
- * read-only, dirty, young .01...011101
- * read-write, clean, old .11...110001
- * read-write, clean, young .01...110101
- * read-write, dirty, old .10...111001
- * read-write, dirty, young .00...111101
+ * .IR.uswrdy.p
+ * empty .10.00000000
+ * swap .11..ttttt.0
+ * prot-none, clean, old .11.xx0000.1
+ * prot-none, clean, young .11.xx0001.1
+ * prot-none, dirty, old .10.xx0010.1
+ * prot-none, dirty, young .10.xx0011.1
+ * read-only, clean, old .11.xx0100.1
+ * read-only, clean, young .01.xx0101.1
+ * read-only, dirty, old .11.xx0110.1
+ * read-only, dirty, young .01.xx0111.1
+ * read-write, clean, old .11.xx1100.1
+ * read-write, clean, young .01.xx1101.1
+ * read-write, dirty, old .10.xx1110.1
+ * read-write, dirty, young .00.xx1111.1
+ * HW-bits: R read-only, I invalid
+ * SW-bits: p present, y young, d dirty, r read, w write, s special,
+ * u unused, l large
*
- * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
- * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
- * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
+ * pte_none is true for the bit pattern .10.00000000, pte == 0x400
+ * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
+ * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
*/
-#ifndef CONFIG_64BIT
-
-/* Bits in the segment table address-space-control-element */
-#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
-#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
-#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
-#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
-#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
-
-/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
-#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
-#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
-#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
-#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
-#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
-
-#define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
-#define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
-#define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
-#define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
-#define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
-#define _SEGMENT_ENTRY_BITS_LARGE 0
-#define _SEGMENT_ENTRY_ORIGIN_LARGE 0
-
-#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
-#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
-
-/*
- * Segment table entry encoding (I = invalid, R = read-only bit):
- * ..R...I.....
- * prot-none ..1...1.....
- * read-only ..1...0.....
- * read-write ..0...0.....
- * empty ..0...1.....
- */
-
-/* Page status table bits for virtualization */
-#define PGSTE_ACC_BITS 0xf0000000UL
-#define PGSTE_FP_BIT 0x08000000UL
-#define PGSTE_PCL_BIT 0x00800000UL
-#define PGSTE_HR_BIT 0x00400000UL
-#define PGSTE_HC_BIT 0x00200000UL
-#define PGSTE_GR_BIT 0x00040000UL
-#define PGSTE_GC_BIT 0x00020000UL
-#define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
-
-#else /* CONFIG_64BIT */
-
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
@@ -402,6 +302,8 @@ static inline int is_module_addr(void *addr)
* read-write, dirty, young 11..0...0...11
* The segment table origin is used to distinguish empty (origin==0) from
* read-write, old segment table entries (origin!=0)
+ * HW-bits: R read-only, I invalid
+ * SW-bits: y young, d dirty, r read, w write
*/
#define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
@@ -417,8 +319,6 @@ static inline int is_module_addr(void *addr)
#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
-#endif /* CONFIG_64BIT */
-
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
@@ -492,6 +392,15 @@ static inline int mm_has_pgste(struct mm_struct *mm)
return 0;
}
+static inline int mm_alloc_pgste(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ if (unlikely(mm->context.alloc_pgste))
+ return 1;
+#endif
+ return 0;
+}
+
/*
* In the case that a guest uses storage keys
* faults should no longer be backed by zero pages
@@ -509,19 +418,6 @@ static inline int mm_use_skey(struct mm_struct *mm)
/*
* pgd/pmd/pte query functions
*/
-#ifndef CONFIG_64BIT
-
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-
-static inline int pud_present(pud_t pud) { return 1; }
-static inline int pud_none(pud_t pud) { return 0; }
-static inline int pud_large(pud_t pud) { return 0; }
-static inline int pud_bad(pud_t pud) { return 0; }
-
-#else /* CONFIG_64BIT */
-
static inline int pgd_present(pgd_t pgd)
{
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
@@ -583,8 +479,6 @@ static inline int pud_bad(pud_t pud)
return (pud_val(pud) & mask) != 0;
}
-#endif /* CONFIG_64BIT */
-
static inline int pmd_present(pmd_t pmd)
{
return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
@@ -666,10 +560,9 @@ static inline int pte_none(pte_t pte)
static inline int pte_swap(pte_t pte)
{
- /* Bit pattern: (pte & 0x603) == 0x402 */
- return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
- _PAGE_TYPE | _PAGE_PRESENT))
- == (_PAGE_INVALID | _PAGE_TYPE);
+ /* Bit pattern: (pte & 0x201) == 0x200 */
+ return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
+ == _PAGE_PROTECT;
}
static inline int pte_special(pte_t pte)
@@ -916,18 +809,14 @@ static inline int pte_unused(pte_t pte)
static inline void pgd_clear(pgd_t *pgd)
{
-#ifdef CONFIG_64BIT
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
-#endif
}
static inline void pud_clear(pud_t *pud)
{
-#ifdef CONFIG_64BIT
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
-#endif
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -1026,10 +915,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidation + global TLB flush for the pte */
asm volatile(
" ipte %2,%3"
@@ -1040,10 +925,6 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidation + local TLB flush for the pte */
asm volatile(
" .insn rrf,0xb2210000,%2,%3,0,1"
@@ -1054,10 +935,6 @@ static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidate a range of ptes + global TLB flush of the ptes */
do {
asm volatile(
@@ -1376,17 +1253,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#ifndef CONFIG_64BIT
-
-#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
-#define pud_deref(pmd) ({ BUG(); 0UL; })
-#define pgd_deref(pmd) ({ BUG(); 0UL; })
-
-#define pud_offset(pgd, address) ((pud_t *) pgd)
-#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
-
-#else /* CONFIG_64BIT */
-
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
@@ -1407,8 +1273,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
return pmd + pmd_index(address);
}
-#endif /* CONFIG_64BIT */
-
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -1699,53 +1563,51 @@ static inline int has_transparent_hugepage(void)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * 31 bit swap entry format:
- * A page-table entry has some bits we have to treat in a special way.
- * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
- * exception will occur instead of a page translation exception. The
- * specifiation exception has the bad habit not to store necessary
- * information in the lowcore.
- * Bits 21, 22, 30 and 31 are used to indicate the page type.
- * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
- * This leaves the bits 1-19 and bits 24-29 to store type and offset.
- * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
- * plus 24 for the offset.
- * 0| offset |0110|o|type |00|
- * 0 0000000001111111111 2222 2 22222 33
- * 0 1234567890123456789 0123 4 56789 01
- *
* 64 bit swap entry format:
* A page-table entry has some bits we have to treat in a special way.
* Bits 52 and bit 55 have to be zero, otherwise an specification
* exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary
* information in the lowcore.
- * Bits 53, 54, 62 and 63 are used to indicate the page type.
- * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
- * This leaves the bits 0-51 and bits 56-61 to store type and offset.
- * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
- * plus 56 for the offset.
- * | offset |0110|o|type |00|
- * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
- * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
+ * Bits 54 and 63 are used to indicate the page type.
+ * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
+ * This leaves the bits 0-51 and bits 56-62 to store type and offset.
+ * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
+ * for the offset.
+ * | offset |01100|type |00|
+ * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
+ * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
*/
-#ifndef CONFIG_64BIT
-#define __SWP_OFFSET_MASK (~0UL >> 12)
-#else
-#define __SWP_OFFSET_MASK (~0UL >> 11)
-#endif
+
+#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
+#define __SWP_OFFSET_SHIFT 12
+#define __SWP_TYPE_MASK ((1UL << 5) - 1)
+#define __SWP_TYPE_SHIFT 2
+
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;
- offset &= __SWP_OFFSET_MASK;
- pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
- ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
+
+ pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
+ pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
+ pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
return pte;
}
-#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
-#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
-#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
+static inline unsigned long __swp_type(swp_entry_t entry)
+{
+ return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
+}
+
+static inline unsigned long __swp_offset(swp_entry_t entry)
+{
+ return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
+}
+
+static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
+{
+ return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
+}
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e7cbbdcdee13..dedb6218544b 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -19,7 +19,6 @@
#define _CIF_ASCE (1<<CIF_ASCE)
#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
-
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -66,13 +65,6 @@ extern void execve_tail(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/
-#ifndef CONFIG_64BIT
-
-#define TASK_SIZE (1UL << 31)
-#define TASK_MAX_SIZE (1UL << 31)
-#define TASK_UNMAPPED_BASE (1UL << 30)
-
-#else /* CONFIG_64BIT */
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
@@ -80,15 +72,8 @@ extern void execve_tail(void);
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_MAX_SIZE (1UL << 53)
-#endif /* CONFIG_64BIT */
-
-#ifndef CONFIG_64BIT
-#define STACK_TOP (1UL << 31)
-#define STACK_TOP_MAX (1UL << 31)
-#else /* CONFIG_64BIT */
#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
#define STACK_TOP_MAX (1UL << 42)
-#endif /* CONFIG_64BIT */
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -115,10 +100,8 @@ struct thread_struct {
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
int ri_signum;
-#ifdef CONFIG_64BIT
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
__vector128 *vxrs; /* Vector register save area */
-#endif
};
/* Flag to disable transactions. */
@@ -181,11 +164,7 @@ struct task_struct;
struct mm_struct;
struct seq_file;
-#ifdef CONFIG_64BIT
-extern void show_cacheinfo(struct seq_file *m);
-#else
-static inline void show_cacheinfo(struct seq_file *m) { }
-#endif
+void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -229,11 +208,7 @@ static inline void psw_set_key(unsigned int key)
*/
static inline void __load_psw(psw_t psw)
{
-#ifndef CONFIG_64BIT
- asm volatile("lpsw %0" : : "Q" (psw) : "cc");
-#else
asm volatile("lpswe %0" : : "Q" (psw) : "cc");
-#endif
}
/*
@@ -247,22 +222,12 @@ static inline void __load_psw_mask (unsigned long mask)
psw.mask = mask;
-#ifndef CONFIG_64BIT
- asm volatile(
- " basr %0,0\n"
- "0: ahi %0,1f-0b\n"
- " st %0,%O1+4(%R1)\n"
- " lpsw %1\n"
- "1:"
- : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#else /* CONFIG_64BIT */
asm volatile(
" larl %0,1f\n"
" stg %0,%O1+8(%R1)\n"
" lpswe %1\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#endif /* CONFIG_64BIT */
}
/*
@@ -270,20 +235,12 @@ static inline void __load_psw_mask (unsigned long mask)
*/
static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
{
-#ifndef CONFIG_64BIT
- if (psw.addr & PSW_ADDR_AMODE)
- /* 31 bit mode */
- return (psw.addr - ilc) | PSW_ADDR_AMODE;
- /* 24 bit mode */
- return (psw.addr - ilc) & ((1UL << 24) - 1);
-#else
unsigned long mask;
mask = (psw.mask & PSW_MASK_EA) ? -1UL :
(psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
(1UL << 24) - 1;
return (psw.addr - ilc) & mask;
-#endif
}
/*
@@ -305,26 +262,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
* Store status and then load disabled wait psw,
* the processor is dead afterwards
*/
-#ifndef CONFIG_64BIT
- asm volatile(
- " stctl 0,0,0(%2)\n"
- " ni 0(%2),0xef\n" /* switch off protection */
- " lctl 0,0,0(%2)\n"
- " stpt 0xd8\n" /* store timer */
- " stckc 0xe0\n" /* store clock comparator */
- " stpx 0x108\n" /* store prefix register */
- " stam 0,15,0x120\n" /* store access registers */
- " std 0,0x160\n" /* store f0 */
- " std 2,0x168\n" /* store f2 */
- " std 4,0x170\n" /* store f4 */
- " std 6,0x178\n" /* store f6 */
- " stm 0,15,0x180\n" /* store general registers */
- " stctl 0,15,0x1c0\n" /* store control registers */
- " oi 0x1c0,0x10\n" /* fake protection bit */
- " lpsw 0(%1)"
- : "=m" (ctl_buf)
- : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
-#else /* CONFIG_64BIT */
asm volatile(
" stctg 0,0,0(%2)\n"
" ni 4(%2),0xef\n" /* switch off protection */
@@ -357,7 +294,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
" lpswe 0(%1)"
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
-#endif /* CONFIG_64BIT */
while (1);
}
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index be317feff7ac..6feda2599282 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -40,12 +40,8 @@ struct psw_bits {
unsigned long long ri : 1; /* Runtime Instrumentation */
unsigned long long : 6;
unsigned long long eaba : 2; /* Addressing Mode */
-#ifdef CONFIG_64BIT
unsigned long long : 31;
unsigned long long ia : 64;/* Instruction Address */
-#else
- unsigned long long ia : 31;/* Instruction Address */
-#endif
};
enum {
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 06f3034605a1..998b61cd0e56 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -211,11 +211,6 @@ struct qdio_buffer_element {
u8 scount;
u8 sflags;
u32 length;
-#ifdef CONFIG_32BIT
- /* private: */
- void *res2;
- /* public: */
-#endif
void *addr;
} __attribute__ ((packed, aligned(16)));
@@ -232,11 +227,6 @@ struct qdio_buffer {
* @sbal: absolute SBAL address
*/
struct sl_element {
-#ifdef CONFIG_32BIT
- /* private: */
- unsigned long reserved;
- /* public: */
-#endif
unsigned long sbal;
} __attribute__ ((packed));
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index 830da737ff85..402ad6df4897 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -72,27 +72,19 @@ static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
{
-#ifdef CONFIG_64BIT
if (cb_prev)
store_runtime_instr_cb(cb_prev);
-#endif
}
static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
struct runtime_instr_cb *cb_prev)
{
-#ifdef CONFIG_64BIT
if (cb_next)
load_runtime_instr_cb(cb_next);
else if (cb_prev)
load_runtime_instr_cb(&runtime_instr_empty_cb);
-#endif
}
-#ifdef CONFIG_64BIT
-extern void exit_thread_runtime_instr(void);
-#else
-static inline void exit_thread_runtime_instr(void) { }
-#endif
+void exit_thread_runtime_instr(void);
#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 487f9b64efb9..4b43ee7e6776 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -39,17 +39,10 @@
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
-#ifndef CONFIG_64BIT
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
-#else /* CONFIG_64BIT */
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
-#endif /* CONFIG_64BIT */
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
@@ -61,19 +54,11 @@ static inline void __down_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ahi %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -89,15 +74,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: ltr %1,%0\n"
- " jm 1f\n"
- " ahi %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b\n"
- "1:"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: ltgr %1,%0\n"
" jm 1f\n"
@@ -105,7 +81,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
" csg %0,%1,%2\n"
" jl 0b\n"
"1:"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -121,19 +96,11 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " a %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -154,19 +121,11 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
signed long old;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%1\n"
- "0: ltr %0,%0\n"
- " jnz 1f\n"
- " cs %0,%3,%1\n"
- " jl 0b\n"
-#else /* CONFIG_64BIT */
" lg %0,%1\n"
"0: ltgr %0,%0\n"
" jnz 1f\n"
" csg %0,%3,%1\n"
" jl 0b\n"
-#endif /* CONFIG_64BIT */
"1:"
: "=&d" (old), "=Q" (sem->count)
: "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -182,19 +141,11 @@ static inline void __up_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ahi %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -212,19 +163,11 @@ static inline void __up_write(struct rw_semaphore *sem)
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " a %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -242,19 +185,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
tmp = -RWSEM_WAITING_BIAS;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " a %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -270,19 +205,11 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ar %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
@@ -296,19 +223,11 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ar %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b8d1e54b4733..b8ffc1bd0a9f 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -15,19 +15,11 @@
#include <asm/lowcore.h>
#include <asm/types.h>
-#ifndef CONFIG_64BIT
-#define IPL_DEVICE (*(unsigned long *) (0x10404))
-#define INITRD_START (*(unsigned long *) (0x1040C))
-#define INITRD_SIZE (*(unsigned long *) (0x10414))
-#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
-#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
-#else /* CONFIG_64BIT */
#define IPL_DEVICE (*(unsigned long *) (0x10400))
#define INITRD_START (*(unsigned long *) (0x10408))
#define INITRD_SIZE (*(unsigned long *) (0x10410))
#define OLDMEM_BASE (*(unsigned long *) (0x10418))
#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
-#endif /* CONFIG_64BIT */
#define COMMAND_LINE ((char *) (0x10480))
extern int memory_end_set;
@@ -68,26 +60,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
-#ifndef CONFIG_64BIT
-#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
-#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
-#define MACHINE_HAS_IDTE (0)
-#define MACHINE_HAS_DIAG44 (1)
-#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
-#define MACHINE_HAS_EDAT1 (0)
-#define MACHINE_HAS_EDAT2 (0)
-#define MACHINE_HAS_LPP (0)
-#define MACHINE_HAS_TOPOLOGY (0)
-#define MACHINE_HAS_TE (0)
-#define MACHINE_HAS_TLB_LC (0)
-#define MACHINE_HAS_VX (0)
-#define MACHINE_HAS_CAD (0)
-#else /* CONFIG_64BIT */
-#define MACHINE_HAS_IEEE (1)
-#define MACHINE_HAS_CSP (1)
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
-#define MACHINE_HAS_MVPG (1)
#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
@@ -96,7 +70,6 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
-#endif /* CONFIG_64BIT */
/*
* Console mode. Override with conmode=
@@ -135,19 +108,11 @@ extern void (*_machine_power_off)(void);
#else /* __ASSEMBLY__ */
-#ifndef CONFIG_64BIT
-#define IPL_DEVICE 0x10404
-#define INITRD_START 0x1040C
-#define INITRD_SIZE 0x10414
-#define OLDMEM_BASE 0x1041C
-#define OLDMEM_SIZE 0x10424
-#else /* CONFIG_64BIT */
#define IPL_DEVICE 0x10400
#define INITRD_START 0x10408
#define INITRD_SIZE 0x10410
#define OLDMEM_BASE 0x10418
#define OLDMEM_SIZE 0x10420
-#endif /* CONFIG_64BIT */
#define COMMAND_LINE 0x10480
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index 5959bfb3b693..c8b7cf9d6279 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -51,7 +51,6 @@
wl = __wl; \
})
-#ifdef CONFIG_64BIT
#define udiv_qrnnd(q, r, n1, n0, d) \
do { unsigned long __n; \
unsigned int __r, __d; \
@@ -60,15 +59,6 @@
(q) = __n / __d; \
(r) = __n % __d; \
} while (0)
-#else
-#define udiv_qrnnd(q, r, n1, n0, d) \
- do { unsigned int __r; \
- (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
- (r) = __r; \
- } while (0)
-extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
- unsigned int , unsigned int);
-#endif
#define UDIV_NEEDS_NORMALIZATION 0
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index a60d085ddb4d..487428b6d099 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -1,16 +1,7 @@
#ifndef _ASM_S390_SPARSEMEM_H
#define _ASM_S390_SPARSEMEM_H
-#ifdef CONFIG_64BIT
-
#define SECTION_SIZE_BITS 28
#define MAX_PHYSMEM_BITS 46
-#else
-
-#define SECTION_SIZE_BITS 25
-#define MAX_PHYSMEM_BITS 31
-
-#endif /* CONFIG_64BIT */
-
#endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 2542a7e4c8b4..d62e7a69605f 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -18,9 +18,6 @@ static inline int test_fp_ctl(u32 fpc)
u32 orig_fpc;
int rc;
- if (!MACHINE_HAS_IEEE)
- return 0;
-
asm volatile(
" efpc %1\n"
" sfpc %2\n"
@@ -35,9 +32,6 @@ static inline int test_fp_ctl(u32 fpc)
static inline void save_fp_ctl(u32 *fpc)
{
- if (!MACHINE_HAS_IEEE)
- return;
-
asm volatile(
" stfpc %0\n"
: "+Q" (*fpc));
@@ -47,9 +41,6 @@ static inline int restore_fp_ctl(u32 *fpc)
{
int rc;
- if (!MACHINE_HAS_IEEE)
- return 0;
-
asm volatile(
" lfpc %1\n"
"0: la %0,0\n"
@@ -65,8 +56,6 @@ static inline void save_fp_regs(freg_t *fprs)
asm volatile("std 2,%0" : "=Q" (fprs[2]));
asm volatile("std 4,%0" : "=Q" (fprs[4]));
asm volatile("std 6,%0" : "=Q" (fprs[6]));
- if (!MACHINE_HAS_IEEE)
- return;
asm volatile("std 1,%0" : "=Q" (fprs[1]));
asm volatile("std 3,%0" : "=Q" (fprs[3]));
asm volatile("std 5,%0" : "=Q" (fprs[5]));
@@ -87,8 +76,6 @@ static inline void restore_fp_regs(freg_t *fprs)
asm volatile("ld 2,%0" : : "Q" (fprs[2]));
asm volatile("ld 4,%0" : : "Q" (fprs[4]));
asm volatile("ld 6,%0" : : "Q" (fprs[6]));
- if (!MACHINE_HAS_IEEE)
- return;
asm volatile("ld 1,%0" : : "Q" (fprs[1]));
asm volatile("ld 3,%0" : : "Q" (fprs[3]));
asm volatile("ld 5,%0" : : "Q" (fprs[5]));
@@ -140,22 +127,18 @@ static inline void restore_vx_regs(__vector128 *vxrs)
static inline void save_fp_vx_regs(struct task_struct *task)
{
-#ifdef CONFIG_64BIT
if (task->thread.vxrs)
save_vx_regs(task->thread.vxrs);
else
-#endif
- save_fp_regs(task->thread.fp_regs.fprs);
+ save_fp_regs(task->thread.fp_regs.fprs);
}
static inline void restore_fp_vx_regs(struct task_struct *task)
{
-#ifdef CONFIG_64BIT
if (task->thread.vxrs)
restore_vx_regs(task->thread.vxrs);
else
-#endif
- restore_fp_regs(task->thread.fp_regs.fprs);
+ restore_fp_regs(task->thread.fp_regs.fprs);
}
static inline void save_access_regs(unsigned int *acrs)
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 5bc12598ae9e..6ba0bf928909 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -95,6 +95,6 @@ static inline int syscall_get_arch(void)
if (test_tsk_thread_flag(current, TIF_31BIT))
return AUDIT_ARCH_S390;
#endif
- return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
+ return AUDIT_ARCH_S390X;
}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ef1df718642d..4c27ec764c36 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -10,13 +10,8 @@
/*
* Size of kernel stack for each process
*/
-#ifndef CONFIG_64BIT
-#define THREAD_ORDER 1
-#define ASYNC_ORDER 1
-#else /* CONFIG_64BIT */
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
-#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
@@ -34,7 +29,6 @@
*/
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
@@ -51,7 +45,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
@@ -66,6 +59,8 @@ static inline struct thread_info *current_thread_info(void)
return (struct thread_info *) S390_lowcore.thread_info;
}
+void arch_release_task_struct(struct task_struct *tsk);
+
#define THREAD_SIZE_ORDER THREAD_ORDER
#endif
@@ -99,10 +94,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
-#ifdef CONFIG_64BIT
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
-#else
-#define is_32bit_task() (1)
-#endif
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 06d8741ad6f4..7a92e69c50bc 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -118,12 +118,10 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
-#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
tlb_remove_table(tlb, pmd);
-#endif
}
/*
@@ -136,11 +134,9 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
-#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 42))
return;
tlb_remove_table(tlb, pud);
-#endif
}
#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 16c9c88658c8..ca148f7c3eaa 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -49,13 +49,6 @@ static inline void __tlb_flush_global(void)
register unsigned long reg4 asm("4");
long dummy;
-#ifndef CONFIG_64BIT
- if (!MACHINE_HAS_CSP) {
- smp_ptlb_all();
- return;
- }
-#endif /* CONFIG_64BIT */
-
dummy = 0;
reg2 = reg3 = 0;
reg4 = ((unsigned long) &dummy) + 1;
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index dccef3ca91fa..6740f4f9781f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -8,21 +8,4 @@
#include <uapi/asm/types.h>
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-
-#ifndef __ASSEMBLY__
-
-#ifndef CONFIG_64BIT
-typedef union {
- unsigned long long pair;
- struct {
- unsigned long even;
- unsigned long odd;
- } subreg;
-} register_pair;
-
-#endif /* ! CONFIG_64BIT */
-#endif /* __ASSEMBLY__ */
#endif /* _S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cd4c68e0398d..d64a7a62164f 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -372,5 +372,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
}
int copy_to_user_real(void __user *dest, void *src, unsigned long count);
+void s390_kernel_write(void *dst, const void *src, size_t size);
#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 651886353551..91f56b1d8156 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -9,11 +9,7 @@
#include <uapi/asm/unistd.h>
-#ifndef CONFIG_64BIT
-#define __IGNORE_select
-#else
#define __IGNORE_time
-#endif
/* Ignore NUMA system calls. Not wired up on s390. */
#define __IGNORE_mbind
@@ -43,10 +39,6 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
-# ifndef CONFIG_64BIT
-# define __ARCH_WANT_STAT64
-# define __ARCH_WANT_SYS_TIME
-# endif
# ifdef CONFIG_COMPAT
# define __ARCH_WANT_COMPAT_SYS_TIME
# endif
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a62526d09201..787acd4f9668 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -42,10 +42,8 @@ struct vdso_per_cpu_data {
extern struct vdso_data *vdso_data;
-#ifdef CONFIG_64BIT
int vdso_alloc_per_cpu(struct _lowcore *lowcore);
void vdso_free_per_cpu(struct _lowcore *lowcore);
-#endif
#endif /* __ASSEMBLY__ */
#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 31fab2676fe9..ffb87617a36c 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -26,25 +26,21 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
+CFLAGS_sysinfo.o += -w
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
-obj-y += dumpstack.o
+obj-y += runtime_instr.o cache.o dumpstack.o
+obj-y += entry.o reipl.o relocate_kernel.o
-obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
-obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
-obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
-
-extra-y += head.o vmlinux.lds
-extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
+extra-y += head.o head64.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_BOOK) += topology.o
-obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
+obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
@@ -56,13 +52,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
-ifdef CONFIG_64BIT
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
- perf_cpum_cf_events.o
-obj-y += runtime_instr.o cache.o
-endif
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
+obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
# vdso
-obj-$(CONFIG_64BIT) += vdso64/
-obj-$(CONFIG_32BIT) += vdso32/
+obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 8dc4db10d160..c7d1b9d09011 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -34,7 +34,6 @@ int main(void)
DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task));
- DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
@@ -166,9 +165,6 @@ int main(void)
DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
-#ifdef CONFIG_32BIT
- DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
-#else /* CONFIG_32BIT */
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
@@ -184,6 +180,5 @@ int main(void)
DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
-#endif /* CONFIG_32BIT */
return 0;
}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f74a53d339b0..daed3fde42ec 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -11,8 +11,6 @@
#include <asm/ptrace.h>
#include <asm/sigp.h>
-#ifdef CONFIG_64BIT
-
ENTRY(s390_base_mcck_handler)
basr %r13,0
0: lg %r15,__LC_PANIC_STACK # load panic stack
@@ -131,77 +129,3 @@ ENTRY(diag308_reset)
.Lfpctl:
.long 0
.previous
-
-#else /* CONFIG_64BIT */
-
-ENTRY(s390_base_mcck_handler)
- basr %r13,0
-0: l %r15,__LC_PANIC_STACK # load panic stack
- ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,2f-0b(%r13)
- l %r1,0(%r1)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
-1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
- lpsw __LC_MCK_OLD_PSW
-
-2: .long s390_base_mcck_handler_fn
-
- .section .bss
- .align 4
- .globl s390_base_mcck_handler_fn
-s390_base_mcck_handler_fn:
- .long 0
- .previous
-
-ENTRY(s390_base_ext_handler)
- stm %r0,%r15,__LC_SAVE_AREA_ASYNC
- basr %r13,0
-0: ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,2f-0b(%r13)
- l %r1,0(%r1)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
-1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC
- ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
- lpsw __LC_EXT_OLD_PSW
-
-2: .long s390_base_ext_handler_fn
-
- .section .bss
- .align 4
- .globl s390_base_ext_handler_fn
-s390_base_ext_handler_fn:
- .long 0
- .previous
-
-ENTRY(s390_base_pgm_handler)
- stm %r0,%r15,__LC_SAVE_AREA_SYNC
- basr %r13,0
-0: ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,2f-0b(%r13)
- l %r1,0(%r1)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
- lm %r0,%r15,__LC_SAVE_AREA_SYNC
- lpsw __LC_PGM_OLD_PSW
-
-1: lpsw disabled_wait_psw-0b(%r13)
-
-2: .long s390_base_pgm_handler_fn
-
-disabled_wait_psw:
- .align 8
- .long 0x000a0000,0x00000000 + s390_base_pgm_handler
-
- .section .bss
- .align 4
- .globl s390_base_pgm_handler_fn
-s390_base_pgm_handler_fn:
- .long 0
- .previous
-
-#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 0969d113b3d6..bff5e3b6d822 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -70,6 +70,8 @@ void show_cacheinfo(struct seq_file *m)
struct cacheinfo *cache;
int idx;
+ if (!test_facility(34))
+ return;
get_online_cpus();
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
@@ -159,6 +161,8 @@ int populate_cache_leaves(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
+ if (!test_facility(34))
+ return -EOPNOTSUPP;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index bc1df12dd4f8..fe8d6924efaa 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -370,16 +370,6 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}
-static inline int map_signal(int sig)
-{
- if (current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32)
- return current_thread_info()->exec_domain->signal_invmap[sig];
- else
- return sig;
-}
-
static int setup_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
@@ -449,7 +439,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ksig->ka.sa.sa_handler;
- regs->gprs[2] = map_signal(sig);
+ regs->gprs[2] = sig;
regs->gprs[3] = (__force __u64) &frame->sc;
/* We forgot to include these in the sigcontext.
@@ -532,7 +522,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64 __force) ksig->ka.sa.sa_handler;
- regs->gprs[2] = map_signal(ksig->sig);
+ regs->gprs[2] = ksig->sig;
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
regs->gprs[5] = task_thread_info(current)->last_break;
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d7b0c4d27880..199ec92ef4fe 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -27,13 +27,9 @@ static int diag8_noresponse(int cmdlen)
register unsigned long reg3 asm ("3") = cmdlen;
asm volatile(
-#ifndef CONFIG_64BIT
- " diag %1,%0,0x8\n"
-#else /* CONFIG_64BIT */
" sam31\n"
" diag %1,%0,0x8\n"
" sam64\n"
-#endif /* CONFIG_64BIT */
: "+d" (reg3) : "d" (reg2) : "cc");
return reg3;
}
@@ -46,17 +42,11 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
register unsigned long reg5 asm ("5") = *rlen;
asm volatile(
-#ifndef CONFIG_64BIT
- " diag %2,%0,0x8\n"
- " brc 8,1f\n"
- " ar %1,%4\n"
-#else /* CONFIG_64BIT */
" sam31\n"
" diag %2,%0,0x8\n"
" sam64\n"
" brc 8,1f\n"
" agr %1,%4\n"
-#endif /* CONFIG_64BIT */
"1:\n"
: "+d" (reg4), "+d" (reg5)
: "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 8237fc07ac79..2f69243bf700 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -18,13 +18,9 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
int rc = 0;
asm volatile(
-#ifdef CONFIG_64BIT
" sam31\n"
" diag %2,2,0x14\n"
" sam64\n"
-#else
- " diag %2,2,0x14\n"
-#endif
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc), "+d" (_ry2)
@@ -52,7 +48,6 @@ int diag210(struct diag210 *addr)
spin_lock_irqsave(&diag210_lock, flags);
diag210_tmp = *addr;
-#ifdef CONFIG_64BIT
asm volatile(
" lhi %0,-1\n"
" sam31\n"
@@ -62,16 +57,6 @@ int diag210(struct diag210 *addr)
"1: sam64\n"
EX_TABLE(0b, 1b)
: "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#else
- asm volatile(
- " lhi %0,-1\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#endif
*addr = diag210_tmp;
spin_unlock_irqrestore(&diag210_lock, flags);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 533430307da8..8140d10c6785 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -32,12 +32,6 @@
#include <asm/debug.h>
#include <asm/irq.h>
-#ifndef CONFIG_64BIT
-#define ONELONG "%08lx: "
-#else /* CONFIG_64BIT */
-#define ONELONG "%016lx: "
-#endif /* CONFIG_64BIT */
-
enum {
UNUSED, /* Indicates the end of the operand list */
R_8, /* GPR starting at position 8 */
@@ -536,12 +530,10 @@ static char *long_insn_name[] = {
};
static struct s390_insn opcode[] = {
-#ifdef CONFIG_64BIT
{ "bprp", 0xc5, INSTR_MII_UPI },
{ "bpp", 0xc7, INSTR_SMI_U0RDP },
{ "trtr", 0xd0, INSTR_SS_L0RDRD },
{ "lmd", 0xef, INSTR_SS_RRRDRD3 },
-#endif
{ "spm", 0x04, INSTR_RR_R0 },
{ "balr", 0x05, INSTR_RR_RR },
{ "bctr", 0x06, INSTR_RR_RR },
@@ -725,11 +717,9 @@ static struct s390_insn opcode[] = {
};
static struct s390_insn opcode_01[] = {
-#ifdef CONFIG_64BIT
{ "ptff", 0x04, INSTR_E },
{ "pfpo", 0x0a, INSTR_E },
{ "sam64", 0x0e, INSTR_E },
-#endif
{ "pr", 0x01, INSTR_E },
{ "upt", 0x02, INSTR_E },
{ "sckpf", 0x07, INSTR_E },
@@ -741,7 +731,6 @@ static struct s390_insn opcode_01[] = {
};
static struct s390_insn opcode_a5[] = {
-#ifdef CONFIG_64BIT
{ "iihh", 0x00, INSTR_RI_RU },
{ "iihl", 0x01, INSTR_RI_RU },
{ "iilh", 0x02, INSTR_RI_RU },
@@ -758,12 +747,10 @@ static struct s390_insn opcode_a5[] = {
{ "llihl", 0x0d, INSTR_RI_RU },
{ "llilh", 0x0e, INSTR_RI_RU },
{ "llill", 0x0f, INSTR_RI_RU },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_a7[] = {
-#ifdef CONFIG_64BIT
{ "tmhh", 0x02, INSTR_RI_RU },
{ "tmhl", 0x03, INSTR_RI_RU },
{ "brctg", 0x07, INSTR_RI_RP },
@@ -771,7 +758,6 @@ static struct s390_insn opcode_a7[] = {
{ "aghi", 0x0b, INSTR_RI_RI },
{ "mghi", 0x0d, INSTR_RI_RI },
{ "cghi", 0x0f, INSTR_RI_RI },
-#endif
{ "tmlh", 0x00, INSTR_RI_RU },
{ "tmll", 0x01, INSTR_RI_RU },
{ "brc", 0x04, INSTR_RI_UP },
@@ -785,18 +771,15 @@ static struct s390_insn opcode_a7[] = {
};
static struct s390_insn opcode_aa[] = {
-#ifdef CONFIG_64BIT
{ { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
{ "rion", 0x01, INSTR_RI_RI },
{ "tric", 0x02, INSTR_RI_RI },
{ "rioff", 0x03, INSTR_RI_RI },
{ { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_b2[] = {
-#ifdef CONFIG_64BIT
{ "stckf", 0x7c, INSTR_S_RD },
{ "lpp", 0x80, INSTR_S_RD },
{ "lcctl", 0x84, INSTR_S_RD },
@@ -819,7 +802,6 @@ static struct s390_insn opcode_b2[] = {
{ "tend", 0xf8, INSTR_S_00 },
{ "niai", 0xfa, INSTR_IE_UU },
{ { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
-#endif
{ "stidp", 0x02, INSTR_S_RD },
{ "sck", 0x04, INSTR_S_RD },
{ "stck", 0x05, INSTR_S_RD },
@@ -908,7 +890,6 @@ static struct s390_insn opcode_b2[] = {
};
static struct s390_insn opcode_b3[] = {
-#ifdef CONFIG_64BIT
{ "maylr", 0x38, INSTR_RRF_F0FF },
{ "mylr", 0x39, INSTR_RRF_F0FF },
{ "mayr", 0x3a, INSTR_RRF_F0FF },
@@ -996,7 +977,6 @@ static struct s390_insn opcode_b3[] = {
{ "qaxtr", 0xfd, INSTR_RRF_FUFF },
{ "iextr", 0xfe, INSTR_RRF_F0FR },
{ "rrxtr", 0xff, INSTR_RRF_FFRU },
-#endif
{ "lpebr", 0x00, INSTR_RRE_FF },
{ "lnebr", 0x01, INSTR_RRE_FF },
{ "ltebr", 0x02, INSTR_RRE_FF },
@@ -1091,7 +1071,6 @@ static struct s390_insn opcode_b3[] = {
};
static struct s390_insn opcode_b9[] = {
-#ifdef CONFIG_64BIT
{ "lpgr", 0x00, INSTR_RRE_RR },
{ "lngr", 0x01, INSTR_RRE_RR },
{ "ltgr", 0x02, INSTR_RRE_RR },
@@ -1204,7 +1183,6 @@ static struct s390_insn opcode_b9[] = {
{ "srk", 0xf9, INSTR_RRF_R0RR2 },
{ "alrk", 0xfa, INSTR_RRF_R0RR2 },
{ "slrk", 0xfb, INSTR_RRF_R0RR2 },
-#endif
{ "kmac", 0x1e, INSTR_RRE_RR },
{ "lrvr", 0x1f, INSTR_RRE_RR },
{ "km", 0x2e, INSTR_RRE_RR },
@@ -1224,7 +1202,6 @@ static struct s390_insn opcode_b9[] = {
};
static struct s390_insn opcode_c0[] = {
-#ifdef CONFIG_64BIT
{ "lgfi", 0x01, INSTR_RIL_RI },
{ "xihf", 0x06, INSTR_RIL_RU },
{ "xilf", 0x07, INSTR_RIL_RU },
@@ -1236,7 +1213,6 @@ static struct s390_insn opcode_c0[] = {
{ "oilf", 0x0d, INSTR_RIL_RU },
{ "llihf", 0x0e, INSTR_RIL_RU },
{ "llilf", 0x0f, INSTR_RIL_RU },
-#endif
{ "larl", 0x00, INSTR_RIL_RP },
{ "brcl", 0x04, INSTR_RIL_UP },
{ "brasl", 0x05, INSTR_RIL_RP },
@@ -1244,7 +1220,6 @@ static struct s390_insn opcode_c0[] = {
};
static struct s390_insn opcode_c2[] = {
-#ifdef CONFIG_64BIT
{ "msgfi", 0x00, INSTR_RIL_RI },
{ "msfi", 0x01, INSTR_RIL_RI },
{ "slgfi", 0x04, INSTR_RIL_RU },
@@ -1257,12 +1232,10 @@ static struct s390_insn opcode_c2[] = {
{ "cfi", 0x0d, INSTR_RIL_RI },
{ "clgfi", 0x0e, INSTR_RIL_RU },
{ "clfi", 0x0f, INSTR_RIL_RU },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_c4[] = {
-#ifdef CONFIG_64BIT
{ "llhrl", 0x02, INSTR_RIL_RP },
{ "lghrl", 0x04, INSTR_RIL_RP },
{ "lhrl", 0x05, INSTR_RIL_RP },
@@ -1274,12 +1247,10 @@ static struct s390_insn opcode_c4[] = {
{ "lrl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
{ "strl", 0x0f, INSTR_RIL_RP },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_c6[] = {
-#ifdef CONFIG_64BIT
{ "exrl", 0x00, INSTR_RIL_RP },
{ "pfdrl", 0x02, INSTR_RIL_UP },
{ "cghrl", 0x04, INSTR_RIL_RP },
@@ -1292,35 +1263,29 @@ static struct s390_insn opcode_c6[] = {
{ "crl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
{ "clrl", 0x0f, INSTR_RIL_RP },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_c8[] = {
-#ifdef CONFIG_64BIT
{ "mvcos", 0x00, INSTR_SSF_RRDRD },
{ "ectg", 0x01, INSTR_SSF_RRDRD },
{ "csst", 0x02, INSTR_SSF_RRDRD },
{ "lpd", 0x04, INSTR_SSF_RRDRD2 },
{ "lpdg", 0x05, INSTR_SSF_RRDRD2 },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_cc[] = {
-#ifdef CONFIG_64BIT
{ "brcth", 0x06, INSTR_RIL_RP },
{ "aih", 0x08, INSTR_RIL_RI },
{ "alsih", 0x0a, INSTR_RIL_RI },
{ { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
{ "cih", 0x0d, INSTR_RIL_RI },
{ "clih", 0x0f, INSTR_RIL_RI },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_e3[] = {
-#ifdef CONFIG_64BIT
{ "ltg", 0x02, INSTR_RXY_RRRD },
{ "lrag", 0x03, INSTR_RXY_RRRD },
{ "lg", 0x04, INSTR_RXY_RRRD },
@@ -1414,7 +1379,6 @@ static struct s390_insn opcode_e3[] = {
{ "clhf", 0xcf, INSTR_RXY_RRRD },
{ { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
{ { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
-#endif
{ "lrv", 0x1e, INSTR_RXY_RRRD },
{ "lrvh", 0x1f, INSTR_RXY_RRRD },
{ "strv", 0x3e, INSTR_RXY_RRRD },
@@ -1426,7 +1390,6 @@ static struct s390_insn opcode_e3[] = {
};
static struct s390_insn opcode_e5[] = {
-#ifdef CONFIG_64BIT
{ "strag", 0x02, INSTR_SSE_RDRD },
{ "mvhhi", 0x44, INSTR_SIL_RDI },
{ "mvghi", 0x48, INSTR_SIL_RDI },
@@ -1439,7 +1402,6 @@ static struct s390_insn opcode_e5[] = {
{ { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
-#endif
{ "lasp", 0x00, INSTR_SSE_RDRD },
{ "tprot", 0x01, INSTR_SSE_RDRD },
{ "mvcsk", 0x0e, INSTR_SSE_RDRD },
@@ -1448,7 +1410,6 @@ static struct s390_insn opcode_e5[] = {
};
static struct s390_insn opcode_e7[] = {
-#ifdef CONFIG_64BIT
{ "lcbb", 0x27, INSTR_RXE_RRRDM },
{ "vgef", 0x13, INSTR_VRV_VVRDM },
{ "vgeg", 0x12, INSTR_VRV_VVRDM },
@@ -1588,11 +1549,9 @@ static struct s390_insn opcode_e7[] = {
{ "vfsq", 0xce, INSTR_VRR_VV000MM },
{ "vfs", 0xe2, INSTR_VRR_VVV00MM },
{ "vftci", 0x4a, INSTR_VRI_VVIMM },
-#endif
};
static struct s390_insn opcode_eb[] = {
-#ifdef CONFIG_64BIT
{ "lmg", 0x04, INSTR_RSY_RRRD },
{ "srag", 0x0a, INSTR_RSY_RRRD },
{ "slag", 0x0b, INSTR_RSY_RRRD },
@@ -1659,7 +1618,6 @@ static struct s390_insn opcode_eb[] = {
{ "stric", 0x61, INSTR_RSY_RDRM },
{ "mric", 0x62, INSTR_RSY_RDRM },
{ { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
-#endif
{ "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
{ "tp", 0xc0, INSTR_RSL_R0RD },
@@ -1667,7 +1625,6 @@ static struct s390_insn opcode_eb[] = {
};
static struct s390_insn opcode_ec[] = {
-#ifdef CONFIG_64BIT
{ "brxhg", 0x44, INSTR_RIE_RRP },
{ "brxlg", 0x45, INSTR_RIE_RRP },
{ { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
@@ -1701,12 +1658,10 @@ static struct s390_insn opcode_ec[] = {
{ "clgib", 0xfd, INSTR_RIS_RURDU },
{ "cib", 0xfe, INSTR_RIS_RURDI },
{ "clib", 0xff, INSTR_RIS_RURDU },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_ed[] = {
-#ifdef CONFIG_64BIT
{ "mayl", 0x38, INSTR_RXF_FRRDF },
{ "myl", 0x39, INSTR_RXF_FRRDF },
{ "may", 0x3a, INSTR_RXF_FRRDF },
@@ -1731,7 +1686,6 @@ static struct s390_insn opcode_ed[] = {
{ "czxt", 0xa9, INSTR_RSL_LRDFU },
{ "cdzt", 0xaa, INSTR_RSL_LRDFU },
{ "cxzt", 0xab, INSTR_RSL_LRDFU },
-#endif
{ "ldeb", 0x04, INSTR_RXE_FRRD },
{ "lxdb", 0x05, INSTR_RXE_FRRD },
{ "lxeb", 0x06, INSTR_RXE_FRRD },
@@ -2051,7 +2005,7 @@ void show_code(struct pt_regs *regs)
else
*ptr++ = ' ';
addr = regs->psw.addr + start - 32;
- ptr += sprintf(ptr, ONELONG, addr);
+ ptr += sprintf(ptr, "%016lx: ", addr);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index a99852e96a77..dc8e20473484 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -18,16 +18,6 @@
#include <asm/dis.h>
#include <asm/ipl.h>
-#ifndef CONFIG_64BIT
-#define LONG "%08lx "
-#define FOURLONG "%08lx %08lx %08lx %08lx\n"
-static int kstack_depth_to_print = 12;
-#else /* CONFIG_64BIT */
-#define LONG "%016lx "
-#define FOURLONG "%016lx %016lx %016lx %016lx\n"
-static int kstack_depth_to_print = 20;
-#endif /* CONFIG_64BIT */
-
/*
* For show_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
@@ -115,12 +105,12 @@ void show_stack(struct task_struct *task, unsigned long *sp)
else
stack = sp;
- for (i = 0; i < kstack_depth_to_print; i++) {
+ for (i = 0; i < 20; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
if ((i * sizeof(long) % 32) == 0)
printk("%s ", i == 0 ? "" : "\n");
- printk(LONG, *stack++);
+ printk("%016lx ", *stack++);
}
printk("\n");
show_trace(task, sp);
@@ -128,10 +118,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
static void show_last_breaking_event(struct pt_regs *regs)
{
-#ifdef CONFIG_64BIT
printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
-#endif
}
static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
@@ -155,16 +143,14 @@ void show_registers(struct pt_regs *regs)
mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
-#ifdef CONFIG_64BIT
printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
-#endif
- printk("\n%s GPRS: " FOURLONG, mode,
+ printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
- printk(" " FOURLONG,
+ printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
- printk(" " FOURLONG,
+ printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
- printk(" " FOURLONG,
+ printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
show_code(regs);
}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 4427ab7ac23a..549a73a4b543 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -64,7 +64,6 @@ asm(
" .align 4\n"
" .type savesys_ipl_nss, @function\n"
"savesys_ipl_nss:\n"
-#ifdef CONFIG_64BIT
" stmg 6,15,48(15)\n"
" lgr 14,3\n"
" sam31\n"
@@ -72,13 +71,6 @@ asm(
" sam64\n"
" lgr 2,14\n"
" lmg 6,15,48(15)\n"
-#else
- " stm 6,15,24(15)\n"
- " lr 14,3\n"
- " diag 2,14,0x8\n"
- " lr 2,14\n"
- " lm 6,15,24(15)\n"
-#endif
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n"
" .previous\n");
@@ -240,7 +232,6 @@ static noinline __init void detect_machine_type(void)
static __init void setup_topology(void)
{
-#ifdef CONFIG_64BIT
int max_mnest;
if (!test_facility(11))
@@ -251,7 +242,6 @@ static __init void setup_topology(void)
break;
}
topology_max_mnest = max_mnest;
-#endif
}
static void early_pgm_check_handler(void)
@@ -290,58 +280,6 @@ static noinline __init void setup_facility_list(void)
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
}
-static __init void detect_mvpg(void)
-{
-#ifndef CONFIG_64BIT
- int rc;
-
- asm volatile(
- " la 0,0\n"
- " mvpg %2,%2\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
-#endif
-}
-
-static __init void detect_ieee(void)
-{
-#ifndef CONFIG_64BIT
- int rc, tmp;
-
- asm volatile(
- " efpc %1,0\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
-#endif
-}
-
-static __init void detect_csp(void)
-{
-#ifndef CONFIG_64BIT
- int rc;
-
- asm volatile(
- " la 0,0\n"
- " la 1,0\n"
- " la 2,4\n"
- " csp 0,2\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
-#endif
-}
-
static __init void detect_diag9c(void)
{
unsigned int cpu_address;
@@ -360,7 +298,6 @@ static __init void detect_diag9c(void)
static __init void detect_diag44(void)
{
-#ifdef CONFIG_64BIT
int rc;
asm volatile(
@@ -371,12 +308,10 @@ static __init void detect_diag44(void)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
-#endif
}
static __init void detect_machine_facilities(void)
{
-#ifdef CONFIG_64BIT
if (test_facility(8)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
__ctl_set_bit(0, 23);
@@ -393,7 +328,6 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
-#endif
}
static int __init cad_setup(char *str)
@@ -501,9 +435,6 @@ void __init startup_init(void)
ipl_update_parameters();
setup_boot_command_line();
create_kernel_nss();
- detect_mvpg();
- detect_ieee();
- detect_csp();
detect_diag9c();
detect_diag44();
detect_machine_facilities();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 398329b2b518..99b44acbfcc7 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -22,27 +22,28 @@
#include <asm/irq.h>
__PT_R0 = __PT_GPRS
-__PT_R1 = __PT_GPRS + 4
-__PT_R2 = __PT_GPRS + 8
-__PT_R3 = __PT_GPRS + 12
-__PT_R4 = __PT_GPRS + 16
-__PT_R5 = __PT_GPRS + 20
-__PT_R6 = __PT_GPRS + 24
-__PT_R7 = __PT_GPRS + 28
-__PT_R8 = __PT_GPRS + 32
-__PT_R9 = __PT_GPRS + 36
-__PT_R10 = __PT_GPRS + 40
-__PT_R11 = __PT_GPRS + 44
-__PT_R12 = __PT_GPRS + 48
-__PT_R13 = __PT_GPRS + 524
-__PT_R14 = __PT_GPRS + 56
-__PT_R15 = __PT_GPRS + 60
+__PT_R1 = __PT_GPRS + 8
+__PT_R2 = __PT_GPRS + 16
+__PT_R3 = __PT_GPRS + 24
+__PT_R4 = __PT_GPRS + 32
+__PT_R5 = __PT_GPRS + 40
+__PT_R6 = __PT_GPRS + 48
+__PT_R7 = __PT_GPRS + 56
+__PT_R8 = __PT_GPRS + 64
+__PT_R9 = __PT_GPRS + 72
+__PT_R10 = __PT_GPRS + 80
+__PT_R11 = __PT_GPRS + 88
+__PT_R12 = __PT_GPRS + 96
+__PT_R13 = __PT_GPRS + 104
+__PT_R14 = __PT_GPRS + 112
+__PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
-STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
+_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
+ _TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
@@ -53,16 +54,14 @@ _PIF_WORK = (_PIF_PER_TRAP)
.macro TRACE_IRQS_ON
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Lc_hardirqs_on)
- basr %r14,%r1 # call trace_hardirqs_on_caller
+ brasl %r14,trace_hardirqs_on_caller
#endif
.endm
.macro TRACE_IRQS_OFF
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Lc_hardirqs_off)
- basr %r14,%r1 # call trace_hardirqs_off_caller
+ brasl %r14,trace_hardirqs_off_caller
#endif
.endm
@@ -70,73 +69,104 @@ _PIF_WORK = (_PIF_PER_TRAP)
#ifdef CONFIG_LOCKDEP
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jz .+10
- l %r1,BASED(.Lc_lockdep_sys_exit)
- basr %r14,%r1 # call lockdep_sys_exit
+ brasl %r14,lockdep_sys_exit
+#endif
+ .endm
+
+ .macro LPP newpp
+#if IS_ENABLED(CONFIG_KVM)
+ tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
+ jz .+8
+ .insn s,0xb2800000,\newpp
+#endif
+ .endm
+
+ .macro HANDLE_SIE_INTERCEPT scratch,reason
+#if IS_ENABLED(CONFIG_KVM)
+ tmhh %r8,0x0001 # interrupting from user ?
+ jnz .+62
+ lgr \scratch,%r9
+ slg \scratch,BASED(.Lsie_critical)
+ clg \scratch,BASED(.Lsie_critical_length)
+ .if \reason==1
+ # Some program interrupts are suppressing (e.g. protection).
+ # We must also check the instruction after SIE in that case.
+ # do_protection_exception will rewind to .Lrewind_pad
+ jh .+42
+ .else
+ jhe .+42
+ .endif
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ LPP __SF_EMPTY+16(%r15) # set host id
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+ mvi __SF_EMPTY+31(%r15),\reason # set exit reason
#endif
.endm
.macro CHECK_STACK stacksize,savearea
#ifdef CONFIG_CHECK_STACK
tml %r15,\stacksize - CONFIG_STACK_GUARD
- la %r14,\savearea
+ lghi %r14,\savearea
jz stack_overflow
#endif
.endm
.macro SWITCH_ASYNC savearea,stack,shift
- tmh %r8,0x0001 # interrupting from user ?
+ tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
- lr %r14,%r9
- sl %r14,BASED(.Lc_critical_start)
- cl %r14,BASED(.Lc_critical_length)
+ lgr %r14,%r9
+ slg %r14,BASED(.Lcritical_start)
+ clg %r14,BASED(.Lcritical_length)
jhe 0f
- la %r11,\savearea # inside critical section, do cleanup
- bras %r14,cleanup_critical
- tmh %r8,0x0001 # retest problem state after cleanup
+ lghi %r11,\savearea # inside critical section, do cleanup
+ brasl %r14,cleanup_critical
+ tmhh %r8,0x0001 # retest problem state after cleanup
jnz 1f
-0: l %r14,\stack # are we already on the target stack?
- slr %r14,%r15
- sra %r14,\shift
+0: lg %r14,\stack # are we already on the target stack?
+ slgr %r14,%r15
+ srag %r14,%r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: l %r15,\stack # load target stack
+1: lg %r15,\stack # load target stack
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
- .macro ADD64 high,low,timer
- al \high,\timer
- al \low,4+\timer
- brc 12,.+8
- ahi \high,1
- .endm
-
- .macro SUB64 high,low,timer
- sl \high,\timer
- sl \low,4+\timer
- brc 3,.+8
- ahi \high,-1
+ .macro UPDATE_VTIME scratch,enter_timer
+ lg \scratch,__LC_EXIT_TIMER
+ slg \scratch,\enter_timer
+ alg \scratch,__LC_USER_TIMER
+ stg \scratch,__LC_USER_TIMER
+ lg \scratch,__LC_LAST_UPDATE_TIMER
+ slg \scratch,__LC_EXIT_TIMER
+ alg \scratch,__LC_SYSTEM_TIMER
+ stg \scratch,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm
- .macro UPDATE_VTIME high,low,enter_timer
- lm \high,\low,__LC_EXIT_TIMER
- SUB64 \high,\low,\enter_timer
- ADD64 \high,\low,__LC_USER_TIMER
- stm \high,\low,__LC_USER_TIMER
- lm \high,\low,__LC_LAST_UPDATE_TIMER
- SUB64 \high,\low,__LC_EXIT_TIMER
- ADD64 \high,\low,__LC_SYSTEM_TIMER
- stm \high,\low,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
+ .macro LAST_BREAK scratch
+ srag \scratch,%r10,23
+ jz .+10
+ stg %r10,__TI_last_break(%r12)
.endm
.macro REENABLE_IRQS
- st %r8,__LC_RETURN_PSW
+ stg %r8,__LC_RETURN_PSW
ni __LC_RETURN_PSW,0xbf
ssm __LC_RETURN_PSW
.endm
+ .macro STCK savearea
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+ .insn s,0xb27c0000,\savearea # store clock fast
+#else
+ .insn s,0xb2050000,\savearea # store clock
+#endif
+ .endm
+
.section .kprobes.text, "ax"
/*
@@ -147,19 +177,19 @@ _PIF_WORK = (_PIF_PER_TRAP)
* gpr2 = prev
*/
ENTRY(__switch_to)
- stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
- l %r4,__THREAD_info(%r2) # get thread_info of prev
- l %r5,__THREAD_info(%r3) # get thread_info of next
- lr %r15,%r5
- ahi %r15,STACK_INIT # end of kernel stack of next
- st %r3,__LC_CURRENT # store task struct of next
- st %r5,__LC_THREAD_INFO # store thread info of next
- st %r15,__LC_KERNEL_STACK # store end of kernel stack
+ stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
+ stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
+ lg %r4,__THREAD_info(%r2) # get thread_info of prev
+ lg %r5,__THREAD_info(%r3) # get thread_info of next
+ lgr %r15,%r5
+ aghi %r15,STACK_INIT # end of kernel stack of next
+ stg %r3,__LC_CURRENT # store task struct of next
+ stg %r5,__LC_THREAD_INFO # store thread info of next
+ stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
- mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
- l %r15,__THREAD_ksp(%r3) # load kernel stack of next
- lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
+ mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
+ lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
+ lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14
.L__critical_start:
@@ -170,75 +200,83 @@ ENTRY(__switch_to)
ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
-.Lsysc_stm:
- stm %r8,%r15,__LC_SAVE_AREA_SYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lhi %r14,_PIF_SYSCALL
+.Lsysc_stmg:
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ lghi %r14,_PIF_SYSCALL
.Lsysc_per:
- l %r15,__LC_KERNEL_STACK
+ lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
.Lsysc_vtime:
- UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
- mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
+ UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
+ LAST_BREAK %r13
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
- st %r14,__PT_FLAGS(%r11)
+ stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
- l %r10,__TI_sysc_table(%r12) # 31 bit system call table
- lh %r8,__PT_INT_CODE+2(%r11)
- sla %r8,2 # shift and test for svc0
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
+ llgh %r8,__PT_INT_CODE+2(%r11)
+ slag %r8,%r8,2 # shift and test for svc 0
jnz .Lsysc_nr_ok
# svc 0: system call number in %r1
- cl %r1,BASED(.Lnr_syscalls)
+ llgfr %r1,%r1 # clear high word in r1
+ cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok
sth %r1,__PT_INT_CODE+2(%r11)
- lr %r8,%r1
- sla %r8,2
+ slag %r8,%r1,2
.Lsysc_nr_ok:
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- st %r2,__PT_ORIG_GPR2(%r11)
- st %r7,STACK_FRAME_OVERHEAD(%r15)
- l %r9,0(%r8,%r10) # get system call addr.
- tm __TI_flags+3(%r12),_TIF_TRACE
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ stg %r2,__PT_ORIG_GPR2(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lgf %r9,0(%r8,%r10) # get system call add.
+ tm __TI_flags+7(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
basr %r14,%r9 # call sys_xxxx
- st %r2,__PT_R2(%r11) # store return value
+ stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
LOCKDEP_SYS_EXIT
.Lsysc_tif:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lsysc_restore
- tm __PT_FLAGS+3(%r11),_PIF_WORK
+ tm __PT_FLAGS+7(%r11),_PIF_WORK
jnz .Lsysc_work
- tm __TI_flags+3(%r12),_TIF_WORK
- jnz .Lsysc_work # check for thread work
- tm __LC_CPU_FLAGS+3,_CIF_WORK
+ tm __TI_flags+7(%r12),_TIF_WORK
+ jnz .Lsysc_work # check for work
+ tm __LC_CPU_FLAGS+7,_CIF_WORK
jnz .Lsysc_work
.Lsysc_restore:
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
- lm %r0,%r15,__PT_R0(%r11)
- lpsw __LC_RETURN_PSW
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_PSW
.Lsysc_done:
#
# One of the work bits is on. Find out which one.
#
.Lsysc_work:
- tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
+ tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jo .Lsysc_mcck_pending
- tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo .Lsysc_reschedule
- tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
+#ifdef CONFIG_UPROBES
+ tm __TI_flags+7(%r12),_TIF_UPROBE
+ jo .Lsysc_uprobe_notify
+#endif
+ tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
jo .Lsysc_singlestep
- tm __TI_flags+3(%r12),_TIF_SIGPENDING
+ tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo .Lsysc_sigpending
- tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
+ tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo .Lsysc_notify_resume
- tm __LC_CPU_FLAGS+3,_CIF_ASCE
+ tm __LC_CPU_FLAGS+7,_CIF_ASCE
jo .Lsysc_uaccess
j .Lsysc_return # beware of critical section cleanup
@@ -246,109 +284,109 @@ ENTRY(system_call)
# _TIF_NEED_RESCHED is set, call schedule
#
.Lsysc_reschedule:
- l %r1,BASED(.Lc_schedule)
- la %r14,BASED(.Lsysc_return)
- br %r1 # call schedule
+ larl %r14,.Lsysc_return
+ jg schedule
#
# _CIF_MCCK_PENDING is set, call handler
#
.Lsysc_mcck_pending:
- l %r1,BASED(.Lc_handle_mcck)
- la %r14,BASED(.Lsysc_return)
- br %r1 # TIF bit will be cleared by handler
+ larl %r14,.Lsysc_return
+ jg s390_handle_mcck # TIF bit will be cleared by handler
#
# _CIF_ASCE is set, load user space asce
#
.Lsysc_uaccess:
- ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
- lctl %c1,%c1,__LC_USER_ASCE # load primary asce
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lsysc_return
#
# _TIF_SIGPENDING is set, call do_signal
#
.Lsysc_sigpending:
- lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Lc_do_signal)
- basr %r14,%r1 # call do_signal
- tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_signal
+ tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
jno .Lsysc_return
- lm %r2,%r7,__PT_R2(%r11) # load svc arguments
- l %r10,__TI_sysc_table(%r12) # 31 bit system call table
- xr %r8,%r8 # svc 0 returns -ENOSYS
- clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
+ lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
+ lghi %r8,0 # svc 0 returns -ENOSYS
+ llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
+ cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
- lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
- sla %r8,2
+ slag %r8,%r1,2
j .Lsysc_nr_ok # restart svc
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
.Lsysc_notify_resume:
- lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Lc_do_notify_resume)
- la %r14,BASED(.Lsysc_return)
- br %r1 # call do_notify_resume
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg do_notify_resume
+
+#
+# _TIF_UPROBE is set, call uprobe_notify_resume
+#
+#ifdef CONFIG_UPROBES
+.Lsysc_uprobe_notify:
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg uprobe_notify_resume
+#endif
#
# _PIF_PER_TRAP is set, call do_per_trap
#
.Lsysc_singlestep:
- ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
- lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Lc_do_per_trap)
- la %r14,BASED(.Lsysc_return)
- br %r1 # call do_per_trap
+ ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg do_per_trap
#
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
# and after the system call
#
.Lsysc_tracesys:
- l %r1,BASED(.Lc_trace_enter)
- lr %r2,%r11 # pass pointer to pt_regs
+ lgr %r2,%r11 # pass pointer to pt_regs
la %r3,0
- xr %r0,%r0
- icm %r0,3,__PT_INT_CODE+2(%r11)
- st %r0,__PT_R2(%r11)
- basr %r14,%r1 # call do_syscall_trace_enter
- cl %r2,BASED(.Lnr_syscalls)
- jnl .Lsysc_tracenogo
- lr %r8,%r2
- sll %r8,2
- l %r9,0(%r8,%r10)
+ llgh %r0,__PT_INT_CODE+2(%r11)
+ stg %r0,__PT_R2(%r11)
+ brasl %r14,do_syscall_trace_enter
+ lghi %r0,NR_syscalls
+ clgr %r0,%r2
+ jnh .Lsysc_tracenogo
+ sllg %r8,%r2,2
+ lgf %r9,0(%r8,%r10)
.Lsysc_tracego:
- lm %r3,%r7,__PT_R3(%r11)
- st %r7,STACK_FRAME_OVERHEAD(%r15)
- l %r2,__PT_ORIG_GPR2(%r11)
+ lmg %r3,%r7,__PT_R3(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lg %r2,__PT_ORIG_GPR2(%r11)
basr %r14,%r9 # call sys_xxx
- st %r2,__PT_R2(%r11) # store return value
+ stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
- tm __TI_flags+3(%r12),_TIF_TRACE
+ tm __TI_flags+7(%r12),_TIF_TRACE
jz .Lsysc_return
- l %r1,BASED(.Lc_trace_exit)
- lr %r2,%r11 # pass pointer to pt_regs
- la %r14,BASED(.Lsysc_return)
- br %r1 # call do_syscall_trace_exit
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg do_syscall_trace_exit
#
# a new process exits the kernel with ret_from_fork
#
ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- l %r1,BASED(.Lc_schedule_tail)
- basr %r14,%r1 # call schedule_tail
+ lg %r12,__LC_THREAD_INFO
+ brasl %r14,schedule_tail
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
jne .Lsysc_tracenogo
# it's a kernel thread
- lm %r9,%r10,__PT_R9(%r11) # load gprs
+ lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
basr %r14,%r9
@@ -360,46 +398,54 @@ ENTRY(kernel_thread_starter)
ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
- stm %r8,%r15,__LC_SAVE_AREA_SYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_PGM_OLD_PSW
- tmh %r8,0x0001 # test problem state bit
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_PGM_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,1
+ tmhh %r8,0x0001 # test problem state bit
jnz 1f # -> fault in user space
- tmh %r8,0x4000 # PER bit set in old PSW ?
+ tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 0f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
- l %r15,__LC_KERNEL_STACK
+1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
+ LAST_BREAK %r14
+ lg %r15,__LC_KERNEL_STACK
+ lg %r14,__TI_task(%r12)
+ lghi %r13,__LC_PGM_TDB
+ tm __LC_PGM_ILC+2,0x02 # check for transaction abort
+ jz 2f
+ mvc __THREAD_trap_tdb(256,%r14),0(%r13)
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
- stm %r8,%r9,__PT_PSW(%r11)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
- mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
+ mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 0f
- l %r1,__TI_task(%r12)
- tmh %r8,0x0001 # kernel per event ?
+ tmhh %r8,0x0001 # kernel per event ?
jz .Lpgm_kprobe
- oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
- mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
- mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE
- mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID
+ oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
+ mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
+ mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
+ mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
0: REENABLE_IRQS
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Lc_jump_table)
- la %r10,0x7f
- n %r10,__PT_INT_CODE(%r11)
- je .Lsysc_return
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ larl %r1,pgm_check_table
+ llgh %r10,__PT_INT_CODE+2(%r11)
+ nill %r10,0x007f
sll %r10,2
- l %r1,0(%r10,%r1) # load address of handler routine
- lr %r2,%r11 # pass pointer to pt_regs
+ je .Lsysc_return
+ lgf %r1,0(%r10,%r1) # load address of handler routine
+ lgr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
j .Lsysc_return
@@ -408,54 +454,55 @@ ENTRY(pgm_check_handler)
#
.Lpgm_kprobe:
REENABLE_IRQS
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Lc_do_per_trap)
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call do_per_trap
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_per_trap
j .Lsysc_return
#
# single stepped system call
#
.Lpgm_svcper:
- mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
- mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per)
- lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
+ mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
+ larl %r14,.Lsysc_per
+ stg %r14,__LC_RETURN_PSW+8
+ lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
+ lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
/*
* IO interrupt handler routine
*/
-
ENTRY(io_int_handler)
- stck __LC_INT_CLOCK
+ STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- stm %r8,%r15,__LC_SAVE_AREA_ASYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_IO_OLD_PSW
- tmh %r8,0x0001 # interrupting from user ?
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_IO_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,2
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ tmhh %r8,0x0001 # interrupting from user?
jz .Lio_skip
- UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+ UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK %r14
.Lio_skip:
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
- stm %r8,%r9,__PT_PSW(%r11)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop:
- l %r1,BASED(.Lc_do_IRQ)
- lr %r2,%r11 # pass pointer to pt_regs
- lhi %r3,IO_INTERRUPT
+ lgr %r2,%r11 # pass pointer to pt_regs
+ lghi %r3,IO_INTERRUPT
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
jz .Lio_call
- lhi %r3,THIN_INTERRUPT
+ lghi %r3,THIN_INTERRUPT
.Lio_call:
- basr %r14,%r1 # call do_IRQ
- tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
+ brasl %r14,do_IRQ
+ tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
jz .Lio_return
tpi 0
jz .Lio_return
@@ -465,21 +512,26 @@ ENTRY(io_int_handler)
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON
.Lio_tif:
- tm __TI_flags+3(%r12),_TIF_WORK
+ tm __TI_flags+7(%r12),_TIF_WORK
jnz .Lio_work # there is work to do (signals etc.)
- tm __LC_CPU_FLAGS+3,_CIF_WORK
+ tm __LC_CPU_FLAGS+7,_CIF_WORK
jnz .Lio_work
.Lio_restore:
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
- lm %r0,%r15,__PT_R0(%r11)
- lpsw __LC_RETURN_PSW
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_PSW
.Lio_done:
#
# There is work todo, find out in which context we have been interrupted:
# 1) if we return to user space we can do all _TIF_WORK work
-# 2) if we return to kernel code and preemptive scheduling is enabled check
+# 2) if we return to kernel code and kvm is enabled check if we need to
+# modify the psw to leave SIE
+# 3) if we return to kernel code and preemptive scheduling is enabled check
# the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required.
#
@@ -489,21 +541,20 @@ ENTRY(io_int_handler)
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r12)
- jnz .Lio_restore # preemption disabled
- tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
+ jnz .Lio_restore # preemption is disabled
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jno .Lio_restore
# switch to kernel stack
- l %r1,__PT_R15(%r11)
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ lg %r1,__PT_R15(%r11)
+ aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
- lr %r15,%r1
+ lgr %r15,%r1
# TRACE_IRQS_ON already done at .Lio_return, call
# TRACE_IRQS_OFF to keep things symmetrical
TRACE_IRQS_OFF
- l %r1,BASED(.Lc_preempt_irq)
- basr %r14,%r1 # call preempt_schedule_irq
+ brasl %r14,preempt_schedule_irq
j .Lio_return
#else
j .Lio_restore
@@ -513,25 +564,25 @@ ENTRY(io_int_handler)
# Need to do work before returning to userspace, switch to kernel stack
#
.Lio_work_user:
- l %r1,__LC_KERNEL_STACK
+ lg %r1,__LC_KERNEL_STACK
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
- lr %r15,%r1
+ lgr %r15,%r1
#
# One of the work bits is on. Find out which one.
#
.Lio_work_tif:
- tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
+ tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jo .Lio_mcck_pending
- tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo .Lio_reschedule
- tm __TI_flags+3(%r12),_TIF_SIGPENDING
+ tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo .Lio_sigpending
- tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
+ tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo .Lio_notify_resume
- tm __LC_CPU_FLAGS+3,_CIF_ASCE
+ tm __LC_CPU_FLAGS+7,_CIF_ASCE
jo .Lio_uaccess
j .Lio_return # beware of critical section cleanup
@@ -540,8 +591,7 @@ ENTRY(io_int_handler)
#
.Lio_mcck_pending:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_handle_mcck)
- basr %r14,%r1 # TIF bit will be cleared by handler
+ brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
TRACE_IRQS_OFF
j .Lio_return
@@ -549,8 +599,8 @@ ENTRY(io_int_handler)
# _CIF_ASCE is set, load user space asce
#
.Lio_uaccess:
- ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
- lctl %c1,%c1,__LC_USER_ASCE # load primary asce
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lio_return
#
@@ -558,35 +608,32 @@ ENTRY(io_int_handler)
#
.Lio_reschedule:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_schedule)
ssm __LC_SVC_NEW_PSW # reenable interrupts
- basr %r14,%r1 # call scheduler
+ brasl %r14,schedule # call scheduler
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
#
-# _TIF_SIGPENDING is set, call do_signal
+# _TIF_SIGPENDING or is set, call do_signal
#
.Lio_sigpending:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_do_signal)
ssm __LC_SVC_NEW_PSW # reenable interrupts
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call do_signal
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_signal
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
#
-# _TIF_SIGPENDING is set, call do_signal
+# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
.Lio_notify_resume:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_do_notify_resume)
ssm __LC_SVC_NEW_PSW # reenable interrupts
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call do_notify_resume
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_notify_resume
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
@@ -594,45 +641,47 @@ ENTRY(io_int_handler)
/*
* External interrupt handler routine
*/
-
ENTRY(ext_int_handler)
- stck __LC_INT_CLOCK
+ STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- stm %r8,%r15,__LC_SAVE_AREA_ASYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_EXT_OLD_PSW
- tmh %r8,0x0001 # interrupting from user ?
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_EXT_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,3
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ tmhh %r8,0x0001 # interrupting from user ?
jz .Lext_skip
- UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+ UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK %r14
.Lext_skip:
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
- stm %r8,%r9,__PT_PSW(%r11)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
+ lghi %r1,__LC_EXT_PARAMS2
mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
+ mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF
- l %r1,BASED(.Lc_do_IRQ)
- lr %r2,%r11 # pass pointer to pt_regs
- lhi %r3,EXT_INTERRUPT
- basr %r14,%r1 # call do_IRQ
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ lghi %r3,EXT_INTERRUPT
+ brasl %r14,do_IRQ
j .Lio_return
/*
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
*/
ENTRY(psw_idle)
- st %r3,__SF_EMPTY(%r15)
- basr %r1,0
- la %r1,.Lpsw_idle_lpsw+4-.(%r1)
- st %r1,__SF_EMPTY+4(%r15)
- oi __SF_EMPTY+4(%r15),0x80
- stck __CLOCK_IDLE_ENTER(%r2)
+ stg %r3,__SF_EMPTY(%r15)
+ larl %r1,.Lpsw_idle_lpsw+4
+ stg %r1,__SF_EMPTY+8(%r15)
+ STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
- lpsw __SF_EMPTY(%r15)
+ lpswe __SF_EMPTY(%r15)
br %r14
.Lpsw_idle_end:
@@ -641,17 +690,19 @@ ENTRY(psw_idle)
/*
* Machine check handler routines
*/
-
ENTRY(mcck_int_handler)
- stck __LC_MCCK_CLOCK
- spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
- lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_MCK_OLD_PSW
+ STCK __LC_MCCK_CLOCK
+ la %r1,4095 # revalidate r1
+ spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_MCK_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,4
tm __LC_MCCK_CODE,0x80 # system damage?
jo .Lmcck_panic # yes -> rest of mcck code invalid
- la %r14,__LC_CPU_TIMER_SAVE_AREA
+ lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
jo 3f
@@ -669,76 +720,76 @@ ENTRY(mcck_int_handler)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno .Lmcck_panic # no -> skip cleanup critical
+ SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
tm %r8,0x0001 # interrupting from user ?
jz .Lmcck_skip
- UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
+ UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
+ LAST_BREAK %r14
.Lmcck_skip:
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
- stm %r8,%r9,__PT_PSW(%r11)
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Lc_do_machine_check)
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call s390_do_machine_check
+ lghi %r14,__LC_GPREGS_SAVE_AREA+64
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,s390_do_machine_check
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lmcck_return
- l %r1,__LC_KERNEL_STACK # switch to kernel stack
+ lg %r1,__LC_KERNEL_STACK # switch to kernel stack
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- lr %r15,%r1
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+ la %r11,STACK_FRAME_OVERHEAD(%r1)
+ lgr %r15,%r1
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
+ tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jno .Lmcck_return
TRACE_IRQS_OFF
- l %r1,BASED(.Lc_handle_mcck)
- basr %r14,%r1 # call s390_handle_mcck
+ brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
.Lmcck_return:
- mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
- lm %r0,%r15,__PT_R0(%r11)
stpt __LC_EXIT_TIMER
- lpsw __LC_RETURN_MCCK_PSW
-0: lm %r0,%r15,__PT_R0(%r11)
- lpsw __LC_RETURN_MCCK_PSW
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+0: lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_MCCK_PSW
.Lmcck_panic:
- l %r14,__LC_PANIC_STACK
- slr %r14,%r15
- sra %r14,PAGE_SHIFT
+ lg %r14,__LC_PANIC_STACK
+ slgr %r14,%r15
+ srag %r14,%r14,PAGE_SHIFT
jz 0f
- l %r15,__LC_PANIC_STACK
- j .Lmcck_skip
-0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ lg %r15,__LC_PANIC_STACK
+0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j .Lmcck_skip
#
# PSW restart interrupt handler
#
ENTRY(restart_int_handler)
- st %r15,__LC_SAVE_AREA_RESTART
- l %r15,__LC_RESTART_STACK
- ahi %r15,-__PT_SIZE # create pt_regs on stack
+ stg %r15,__LC_SAVE_AREA_RESTART
+ lg %r15,__LC_RESTART_STACK
+ aghi %r15,-__PT_SIZE # create pt_regs on stack
xc 0(__PT_SIZE,%r15),0(%r15)
- stm %r0,%r14,__PT_R0(%r15)
- mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
- mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
- ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
+ stmg %r0,%r14,__PT_R0(%r15)
+ mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
+ mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
+ aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
- l %r1,__LC_RESTART_FN # load fn, parm & source cpu
- l %r2,__LC_RESTART_DATA
- l %r3,__LC_RESTART_SOURCE
- ltr %r3,%r3 # test source cpu address
+ lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
+ lg %r2,__LC_RESTART_DATA
+ lg %r3,__LC_RESTART_SOURCE
+ ltgr %r3,%r3 # test source cpu address
jm 1f # negative -> skip source stop
0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
brc 10,0b # wait for status stored
1: basr %r14,%r1 # call function
stap __SF_EMPTY(%r15) # store cpu address
- lh %r3,__SF_EMPTY(%r15)
+ llgh %r3,__SF_EMPTY(%r15)
2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
brc 2,2b
3: j 3b
@@ -752,215 +803,257 @@ ENTRY(restart_int_handler)
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
- l %r15,__LC_PANIC_STACK # change to panic stack
+ lg %r15,__LC_PANIC_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
- stm %r0,%r7,__PT_R0(%r11)
- stm %r8,%r9,__PT_PSW(%r11)
- mvc __PT_R8(32,%r11),0(%r14)
- l %r1,BASED(1f)
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- lr %r2,%r11 # pass pointer to pt_regs
- br %r1 # branch to kernel_stack_overflow
-1: .long kernel_stack_overflow
+ stmg %r0,%r7,__PT_R0(%r11)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
+ stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ jg kernel_stack_overflow
#endif
+ .align 8
.Lcleanup_table:
- .long system_call + 0x80000000
- .long .Lsysc_do_svc + 0x80000000
- .long .Lsysc_tif + 0x80000000
- .long .Lsysc_restore + 0x80000000
- .long .Lsysc_done + 0x80000000
- .long .Lio_tif + 0x80000000
- .long .Lio_restore + 0x80000000
- .long .Lio_done + 0x80000000
- .long psw_idle + 0x80000000
- .long .Lpsw_idle_end + 0x80000000
+ .quad system_call
+ .quad .Lsysc_do_svc
+ .quad .Lsysc_tif
+ .quad .Lsysc_restore
+ .quad .Lsysc_done
+ .quad .Lio_tif
+ .quad .Lio_restore
+ .quad .Lio_done
+ .quad psw_idle
+ .quad .Lpsw_idle_end
cleanup_critical:
- cl %r9,BASED(.Lcleanup_table) # system_call
+ clg %r9,BASED(.Lcleanup_table) # system_call
jl 0f
- cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc
+ clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
jl .Lcleanup_system_call
- cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif
+ clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
jl 0f
- cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore
+ clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
jl .Lcleanup_sysc_tif
- cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done
+ clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
jl .Lcleanup_sysc_restore
- cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif
+ clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
jl 0f
- cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore
+ clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
jl .Lcleanup_io_tif
- cl %r9,BASED(.Lcleanup_table+28) # .Lio_done
+ clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
jl .Lcleanup_io_restore
- cl %r9,BASED(.Lcleanup_table+32) # psw_idle
+ clg %r9,BASED(.Lcleanup_table+64) # psw_idle
jl 0f
- cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end
+ clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
jl .Lcleanup_idle
0: br %r14
+
.Lcleanup_system_call:
# check if stpt has been executed
- cl %r9,BASED(.Lcleanup_system_call_insn)
+ clg %r9,BASED(.Lcleanup_system_call_insn)
jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
- chi %r11,__LC_SAVE_AREA_ASYNC
+ cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: # check if stm has been executed
- cl %r9,BASED(.Lcleanup_system_call_insn+4)
+0: # check if stmg has been executed
+ clg %r9,BASED(.Lcleanup_system_call_insn+8)
jh 0f
- mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
-0: # set up saved registers r12, and r13
- st %r12,16(%r11) # r12 thread-info pointer
- st %r13,20(%r11) # r13 literal-pool pointer
- # check if the user time calculation has been done
- cl %r9,BASED(.Lcleanup_system_call_insn+8)
+ mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
+0: # check if base register setup + TIF bit load has been done
+ clg %r9,BASED(.Lcleanup_system_call_insn+16)
+ jhe 0f
+ # set up saved registers r10 and r12
+ stg %r10,16(%r11) # r10 last break
+ stg %r12,32(%r11) # r12 thread-info pointer
+0: # check if the user time update has been done
+ clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
- l %r10,__LC_EXIT_TIMER
- l %r15,__LC_EXIT_TIMER+4
- SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER
- ADD64 %r10,%r15,__LC_USER_TIMER
- st %r10,__LC_USER_TIMER
- st %r15,__LC_USER_TIMER+4
-0: # check if the system time calculation has been done
- cl %r9,BASED(.Lcleanup_system_call_insn+12)
+ lg %r15,__LC_EXIT_TIMER
+ slg %r15,__LC_SYNC_ENTER_TIMER
+ alg %r15,__LC_USER_TIMER
+ stg %r15,__LC_USER_TIMER
+0: # check if the system time update has been done
+ clg %r9,BASED(.Lcleanup_system_call_insn+32)
jh 0f
- l %r10,__LC_LAST_UPDATE_TIMER
- l %r15,__LC_LAST_UPDATE_TIMER+4
- SUB64 %r10,%r15,__LC_EXIT_TIMER
- ADD64 %r10,%r15,__LC_SYSTEM_TIMER
- st %r10,__LC_SYSTEM_TIMER
- st %r15,__LC_SYSTEM_TIMER+4
+ lg %r15,__LC_LAST_UPDATE_TIMER
+ slg %r15,__LC_EXIT_TIMER
+ alg %r15,__LC_SYSTEM_TIMER
+ stg %r15,__LC_SYSTEM_TIMER
0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- # set up saved register 11
- l %r15,__LC_KERNEL_STACK
+ # do LAST_BREAK
+ lg %r9,16(%r11)
+ srag %r9,%r9,23
+ jz 0f
+ mvc __TI_last_break(8,%r12),16(%r11)
+0: # set up saved register r11
+ lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
- st %r9,12(%r11) # r11 pt_regs pointer
+ stg %r9,24(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
- stm %r0,%r7,__PT_R0(%r9)
- mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
+ stmg %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
- xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9)
- mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL
- # setup saved register 15
- st %r15,28(%r11) # r15 stack pointer
+ xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
+ mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
+ # setup saved register r15
+ stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
- l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000
+ larl %r9,.Lsysc_do_svc
br %r14
.Lcleanup_system_call_insn:
- .long system_call + 0x80000000
- .long .Lsysc_stm + 0x80000000
- .long .Lsysc_vtime + 0x80000000 + 36
- .long .Lsysc_vtime + 0x80000000 + 76
+ .quad system_call
+ .quad .Lsysc_stmg
+ .quad .Lsysc_per
+ .quad .Lsysc_vtime+18
+ .quad .Lsysc_vtime+42
.Lcleanup_sysc_tif:
- l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000
+ larl %r9,.Lsysc_tif
br %r14
.Lcleanup_sysc_restore:
- cl %r9,BASED(.Lcleanup_sysc_restore_insn)
- jhe 0f
- l %r9,12(%r11) # get saved pointer to pt_regs
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
- mvc 0(32,%r11),__PT_R8(%r9)
- lm %r0,%r7,__PT_R0(%r9)
-0: lm %r8,%r9,__LC_RETURN_PSW
+ clg %r9,BASED(.Lcleanup_sysc_restore_insn)
+ je 0f
+ lg %r9,24(%r11) # get saved pointer to pt_regs
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_sysc_restore_insn:
- .long .Lsysc_done - 4 + 0x80000000
+ .quad .Lsysc_done - 4
.Lcleanup_io_tif:
- l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000
+ larl %r9,.Lio_tif
br %r14
.Lcleanup_io_restore:
- cl %r9,BASED(.Lcleanup_io_restore_insn)
- jhe 0f
- l %r9,12(%r11) # get saved r11 pointer to pt_regs
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
- mvc 0(32,%r11),__PT_R8(%r9)
- lm %r0,%r7,__PT_R0(%r9)
-0: lm %r8,%r9,__LC_RETURN_PSW
+ clg %r9,BASED(.Lcleanup_io_restore_insn)
+ je 0f
+ lg %r9,24(%r11) # get saved r11 pointer to pt_regs
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_io_restore_insn:
- .long .Lio_done - 4 + 0x80000000
+ .quad .Lio_done - 4
.Lcleanup_idle:
# copy interrupt clock & cpu timer
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
- chi %r11,__LC_SAVE_AREA_ASYNC
+ cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
-0: # check if stck has been executed
- cl %r9,BASED(.Lcleanup_idle_insn)
+0: # check if stck & stpt have been executed
+ clg %r9,BASED(.Lcleanup_idle_insn)
jhe 1f
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
- mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
+ mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
1: # account system time going idle
- lm %r9,%r10,__LC_STEAL_TIMER
- ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
- SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
- stm %r9,%r10,__LC_STEAL_TIMER
+ lg %r9,__LC_STEAL_TIMER
+ alg %r9,__CLOCK_IDLE_ENTER(%r2)
+ slg %r9,__LC_LAST_UPDATE_CLOCK
+ stg %r9,__LC_STEAL_TIMER
mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
- lm %r9,%r10,__LC_SYSTEM_TIMER
- ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
- SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
- stm %r9,%r10,__LC_SYSTEM_TIMER
+ lg %r9,__LC_SYSTEM_TIMER
+ alg %r9,__LC_LAST_UPDATE_TIMER
+ slg %r9,__TIMER_IDLE_ENTER(%r2)
+ stg %r9,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
- n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits
- l %r9,24(%r11) # return from psw_idle
+ nihh %r8,0xfcfd # clear irq & wait state bits
+ lg %r9,48(%r11) # return from psw_idle
br %r14
.Lcleanup_idle_insn:
- .long .Lpsw_idle_lpsw + 0x80000000
-.Lcleanup_idle_wait:
- .long 0xfcfdffff
+ .quad .Lpsw_idle_lpsw
/*
* Integer constants
*/
- .align 4
-.Lnr_syscalls:
- .long NR_syscalls
-.Lvtimer_max:
- .quad 0x7fffffffffffffff
+ .align 8
+.Lcritical_start:
+ .quad .L__critical_start
+.Lcritical_length:
+ .quad .L__critical_end - .L__critical_start
+
+#if IS_ENABLED(CONFIG_KVM)
/*
- * Symbol constants
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
*/
-.Lc_do_machine_check: .long s390_do_machine_check
-.Lc_handle_mcck: .long s390_handle_mcck
-.Lc_do_IRQ: .long do_IRQ
-.Lc_do_signal: .long do_signal
-.Lc_do_notify_resume: .long do_notify_resume
-.Lc_do_per_trap: .long do_per_trap
-.Lc_jump_table: .long pgm_check_table
-.Lc_schedule: .long schedule
-#ifdef CONFIG_PREEMPT
-.Lc_preempt_irq: .long preempt_schedule_irq
-#endif
-.Lc_trace_enter: .long do_syscall_trace_enter
-.Lc_trace_exit: .long do_syscall_trace_exit
-.Lc_schedule_tail: .long schedule_tail
-.Lc_sysc_per: .long .Lsysc_per + 0x80000000
-#ifdef CONFIG_TRACE_IRQFLAGS
-.Lc_hardirqs_on: .long trace_hardirqs_on_caller
-.Lc_hardirqs_off: .long trace_hardirqs_off_caller
-#endif
-#ifdef CONFIG_LOCKDEP
-.Lc_lockdep_sys_exit: .long lockdep_sys_exit
+ENTRY(sie64a)
+ stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ stg %r2,__SF_EMPTY(%r15) # save control block pointer
+ stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
+ xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
+ lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+ lg %r14,__LC_GMAP # get gmap pointer
+ ltgr %r14,%r14
+ jz .Lsie_gmap
+ lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
+.Lsie_gmap:
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
+ tm __SIE_PROG20+3(%r14),1 # last exit...
+ jnz .Lsie_done
+ LPP __SF_EMPTY(%r15) # set guest id
+ sie 0(%r14)
+.Lsie_done:
+ LPP __SF_EMPTY+16(%r15) # set host id
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+# some program checks are suppressing. C code (e.g. do_protection_exception)
+# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
+# instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# See also HANDLE_SIE_INTERCEPT
+.Lrewind_pad:
+ nop 0
+ .globl sie_exit
+sie_exit:
+ lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
+ br %r14
+.Lsie_fault:
+ lghi %r14,-EFAULT
+ stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
+ j sie_exit
+
+ .align 8
+.Lsie_critical:
+ .quad .Lsie_gmap
+.Lsie_critical_length:
+ .quad .Lsie_done - .Lsie_gmap
+
+ EX_TABLE(.Lrewind_pad,.Lsie_fault)
+ EX_TABLE(sie_exit,.Lsie_fault)
#endif
-.Lc_critical_start: .long .L__critical_start + 0x80000000
-.Lc_critical_length: .long .L__critical_end - .L__critical_start
- .section .rodata, "a"
-#define SYSCALL(esa,esame,emu) .long esa
+ .section .rodata, "a"
+#define SYSCALL(esame,emu) .long esame
.globl sys_call_table
sys_call_table:
#include "syscalls.S"
#undef SYSCALL
+
+#ifdef CONFIG_COMPAT
+
+#define SYSCALL(esame,emu) .long emu
+ .globl sys_call_table_emu
+sys_call_table_emu:
+#include "syscalls.S"
+#undef SYSCALL
+#endif
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
deleted file mode 100644
index c329446a951d..000000000000
--- a/arch/s390/kernel/entry64.S
+++ /dev/null
@@ -1,1059 +0,0 @@
-/*
- * S390 low-level entry points.
- *
- * Copyright IBM Corp. 1999, 2012
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Hartmut Penner (hp@de.ibm.com),
- * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
- * Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/errno.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-#include <asm/page.h>
-#include <asm/sigp.h>
-#include <asm/irq.h>
-
-__PT_R0 = __PT_GPRS
-__PT_R1 = __PT_GPRS + 8
-__PT_R2 = __PT_GPRS + 16
-__PT_R3 = __PT_GPRS + 24
-__PT_R4 = __PT_GPRS + 32
-__PT_R5 = __PT_GPRS + 40
-__PT_R6 = __PT_GPRS + 48
-__PT_R7 = __PT_GPRS + 56
-__PT_R8 = __PT_GPRS + 64
-__PT_R9 = __PT_GPRS + 72
-__PT_R10 = __PT_GPRS + 80
-__PT_R11 = __PT_GPRS + 88
-__PT_R12 = __PT_GPRS + 96
-__PT_R13 = __PT_GPRS + 104
-__PT_R14 = __PT_GPRS + 112
-__PT_R15 = __PT_GPRS + 120
-
-STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
-STACK_SIZE = 1 << STACK_SHIFT
-STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-
-_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
- _TIF_UPROBE)
-_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
- _TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
-_PIF_WORK = (_PIF_PER_TRAP)
-
-#define BASED(name) name-system_call(%r13)
-
- .macro TRACE_IRQS_ON
-#ifdef CONFIG_TRACE_IRQFLAGS
- basr %r2,%r0
- brasl %r14,trace_hardirqs_on_caller
-#endif
- .endm
-
- .macro TRACE_IRQS_OFF
-#ifdef CONFIG_TRACE_IRQFLAGS
- basr %r2,%r0
- brasl %r14,trace_hardirqs_off_caller
-#endif
- .endm
-
- .macro LOCKDEP_SYS_EXIT
-#ifdef CONFIG_LOCKDEP
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jz .+10
- brasl %r14,lockdep_sys_exit
-#endif
- .endm
-
- .macro LPP newpp
-#if IS_ENABLED(CONFIG_KVM)
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
- jz .+8
- .insn s,0xb2800000,\newpp
-#endif
- .endm
-
- .macro HANDLE_SIE_INTERCEPT scratch,reason
-#if IS_ENABLED(CONFIG_KVM)
- tmhh %r8,0x0001 # interrupting from user ?
- jnz .+62
- lgr \scratch,%r9
- slg \scratch,BASED(.Lsie_critical)
- clg \scratch,BASED(.Lsie_critical_length)
- .if \reason==1
- # Some program interrupts are suppressing (e.g. protection).
- # We must also check the instruction after SIE in that case.
- # do_protection_exception will rewind to .Lrewind_pad
- jh .+42
- .else
- jhe .+42
- .endif
- lg %r14,__SF_EMPTY(%r15) # get control block pointer
- LPP __SF_EMPTY+16(%r15) # set host id
- ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- larl %r9,sie_exit # skip forward to sie_exit
- mvi __SF_EMPTY+31(%r15),\reason # set exit reason
-#endif
- .endm
-
- .macro CHECK_STACK stacksize,savearea
-#ifdef CONFIG_CHECK_STACK
- tml %r15,\stacksize - CONFIG_STACK_GUARD
- lghi %r14,\savearea
- jz stack_overflow
-#endif
- .endm
-
- .macro SWITCH_ASYNC savearea,stack,shift
- tmhh %r8,0x0001 # interrupting from user ?
- jnz 1f
- lgr %r14,%r9
- slg %r14,BASED(.Lcritical_start)
- clg %r14,BASED(.Lcritical_length)
- jhe 0f
- lghi %r11,\savearea # inside critical section, do cleanup
- brasl %r14,cleanup_critical
- tmhh %r8,0x0001 # retest problem state after cleanup
- jnz 1f
-0: lg %r14,\stack # are we already on the target stack?
- slgr %r14,%r15
- srag %r14,%r14,\shift
- jnz 1f
- CHECK_STACK 1<<\shift,\savearea
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 2f
-1: lg %r15,\stack # load target stack
-2: la %r11,STACK_FRAME_OVERHEAD(%r15)
- .endm
-
- .macro UPDATE_VTIME scratch,enter_timer
- lg \scratch,__LC_EXIT_TIMER
- slg \scratch,\enter_timer
- alg \scratch,__LC_USER_TIMER
- stg \scratch,__LC_USER_TIMER
- lg \scratch,__LC_LAST_UPDATE_TIMER
- slg \scratch,__LC_EXIT_TIMER
- alg \scratch,__LC_SYSTEM_TIMER
- stg \scratch,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
- .endm
-
- .macro LAST_BREAK scratch
- srag \scratch,%r10,23
- jz .+10
- stg %r10,__TI_last_break(%r12)
- .endm
-
- .macro REENABLE_IRQS
- stg %r8,__LC_RETURN_PSW
- ni __LC_RETURN_PSW,0xbf
- ssm __LC_RETURN_PSW
- .endm
-
- .macro STCK savearea
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- .insn s,0xb27c0000,\savearea # store clock fast
-#else
- .insn s,0xb2050000,\savearea # store clock
-#endif
- .endm
-
- .section .kprobes.text, "ax"
-
-/*
- * Scheduler resume function, called by switch_to
- * gpr2 = (task_struct *) prev
- * gpr3 = (task_struct *) next
- * Returns:
- * gpr2 = prev
- */
-ENTRY(__switch_to)
- stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
- lg %r4,__THREAD_info(%r2) # get thread_info of prev
- lg %r5,__THREAD_info(%r3) # get thread_info of next
- lgr %r15,%r5
- aghi %r15,STACK_INIT # end of kernel stack of next
- stg %r3,__LC_CURRENT # store task struct of next
- stg %r5,__LC_THREAD_INFO # store thread info of next
- stg %r15,__LC_KERNEL_STACK # store end of kernel stack
- lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
- mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
- lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
- lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
- br %r14
-
-.L__critical_start:
-/*
- * SVC interrupt handler routine. System calls are synchronous events and
- * are executed with interrupts enabled.
- */
-
-ENTRY(system_call)
- stpt __LC_SYNC_ENTER_TIMER
-.Lsysc_stmg:
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- lghi %r14,_PIF_SYSCALL
-.Lsysc_per:
- lg %r15,__LC_KERNEL_STACK
- la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
-.Lsysc_vtime:
- UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
- LAST_BREAK %r13
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
- stg %r14,__PT_FLAGS(%r11)
-.Lsysc_do_svc:
- lg %r10,__TI_sysc_table(%r12) # address of system call table
- llgh %r8,__PT_INT_CODE+2(%r11)
- slag %r8,%r8,2 # shift and test for svc 0
- jnz .Lsysc_nr_ok
- # svc 0: system call number in %r1
- llgfr %r1,%r1 # clear high word in r1
- cghi %r1,NR_syscalls
- jnl .Lsysc_nr_ok
- sth %r1,__PT_INT_CODE+2(%r11)
- slag %r8,%r1,2
-.Lsysc_nr_ok:
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- stg %r2,__PT_ORIG_GPR2(%r11)
- stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lgf %r9,0(%r8,%r10) # get system call add.
- tm __TI_flags+7(%r12),_TIF_TRACE
- jnz .Lsysc_tracesys
- basr %r14,%r9 # call sys_xxxx
- stg %r2,__PT_R2(%r11) # store return value
-
-.Lsysc_return:
- LOCKDEP_SYS_EXIT
-.Lsysc_tif:
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lsysc_restore
- tm __PT_FLAGS+7(%r11),_PIF_WORK
- jnz .Lsysc_work
- tm __TI_flags+7(%r12),_TIF_WORK
- jnz .Lsysc_work # check for work
- tm __LC_CPU_FLAGS+7,_CIF_WORK
- jnz .Lsysc_work
-.Lsysc_restore:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_PSW
-.Lsysc_done:
-
-#
-# One of the work bits is on. Find out which one.
-#
-.Lsysc_work:
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jo .Lsysc_mcck_pending
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jo .Lsysc_reschedule
-#ifdef CONFIG_UPROBES
- tm __TI_flags+7(%r12),_TIF_UPROBE
- jo .Lsysc_uprobe_notify
-#endif
- tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
- jo .Lsysc_singlestep
- tm __TI_flags+7(%r12),_TIF_SIGPENDING
- jo .Lsysc_sigpending
- tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
- jo .Lsysc_notify_resume
- tm __LC_CPU_FLAGS+7,_CIF_ASCE
- jo .Lsysc_uaccess
- j .Lsysc_return # beware of critical section cleanup
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lsysc_reschedule:
- larl %r14,.Lsysc_return
- jg schedule
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lsysc_mcck_pending:
- larl %r14,.Lsysc_return
- jg s390_handle_mcck # TIF bit will be cleared by handler
-
-#
-# _CIF_ASCE is set, load user space asce
-#
-.Lsysc_uaccess:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j .Lsysc_return
-
-#
-# _TIF_SIGPENDING is set, call do_signal
-#
-.Lsysc_sigpending:
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_signal
- tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
- jno .Lsysc_return
- lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
- lg %r10,__TI_sysc_table(%r12) # address of system call table
- lghi %r8,0 # svc 0 returns -ENOSYS
- llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
- cghi %r1,NR_syscalls
- jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
- slag %r8,%r1,2
- j .Lsysc_nr_ok # restart svc
-
-#
-# _TIF_NOTIFY_RESUME is set, call do_notify_resume
-#
-.Lsysc_notify_resume:
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_notify_resume
-
-#
-# _TIF_UPROBE is set, call uprobe_notify_resume
-#
-#ifdef CONFIG_UPROBES
-.Lsysc_uprobe_notify:
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg uprobe_notify_resume
-#endif
-
-#
-# _PIF_PER_TRAP is set, call do_per_trap
-#
-.Lsysc_singlestep:
- ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_per_trap
-
-#
-# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
-# and after the system call
-#
-.Lsysc_tracesys:
- lgr %r2,%r11 # pass pointer to pt_regs
- la %r3,0
- llgh %r0,__PT_INT_CODE+2(%r11)
- stg %r0,__PT_R2(%r11)
- brasl %r14,do_syscall_trace_enter
- lghi %r0,NR_syscalls
- clgr %r0,%r2
- jnh .Lsysc_tracenogo
- sllg %r8,%r2,2
- lgf %r9,0(%r8,%r10)
-.Lsysc_tracego:
- lmg %r3,%r7,__PT_R3(%r11)
- stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lg %r2,__PT_ORIG_GPR2(%r11)
- basr %r14,%r9 # call sys_xxx
- stg %r2,__PT_R2(%r11) # store return value
-.Lsysc_tracenogo:
- tm __TI_flags+7(%r12),_TIF_TRACE
- jz .Lsysc_return
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_syscall_trace_exit
-
-#
-# a new process exits the kernel with ret_from_fork
-#
-ENTRY(ret_from_fork)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- lg %r12,__LC_THREAD_INFO
- brasl %r14,schedule_tail
- TRACE_IRQS_ON
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
- jne .Lsysc_tracenogo
- # it's a kernel thread
- lmg %r9,%r10,__PT_R9(%r11) # load gprs
-ENTRY(kernel_thread_starter)
- la %r2,0(%r10)
- basr %r14,%r9
- j .Lsysc_tracenogo
-
-/*
- * Program check handler routine
- */
-
-ENTRY(pgm_check_handler)
- stpt __LC_SYNC_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_PGM_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,1
- tmhh %r8,0x0001 # test problem state bit
- jnz 1f # -> fault in user space
- tmhh %r8,0x4000 # PER bit set in old PSW ?
- jnz 0f # -> enabled, can't be a double fault
- tm __LC_PGM_ILC+3,0x80 # check for per exception
- jnz .Lpgm_svcper # -> single stepped svc
-0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 2f
-1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
- LAST_BREAK %r14
- lg %r15,__LC_KERNEL_STACK
- lg %r14,__TI_task(%r12)
- lghi %r13,__LC_PGM_TDB
- tm __LC_PGM_ILC+2,0x02 # check for transaction abort
- jz 2f
- mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-2: la %r11,STACK_FRAME_OVERHEAD(%r15)
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
- mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- stg %r10,__PT_ARGS(%r11)
- tm __LC_PGM_ILC+3,0x80 # check for per exception
- jz 0f
- tmhh %r8,0x0001 # kernel per event ?
- jz .Lpgm_kprobe
- oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
- mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
- mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
- mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-0: REENABLE_IRQS
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- larl %r1,pgm_check_table
- llgh %r10,__PT_INT_CODE+2(%r11)
- nill %r10,0x007f
- sll %r10,2
- je .Lsysc_return
- lgf %r1,0(%r10,%r1) # load address of handler routine
- lgr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # branch to interrupt-handler
- j .Lsysc_return
-
-#
-# PER event in supervisor state, must be kprobes
-#
-.Lpgm_kprobe:
- REENABLE_IRQS
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_per_trap
- j .Lsysc_return
-
-#
-# single stepped system call
-#
-.Lpgm_svcper:
- mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
- larl %r14,.Lsysc_per
- stg %r14,__LC_RETURN_PSW+8
- lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
-
-/*
- * IO interrupt handler routine
- */
-ENTRY(io_int_handler)
- STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_IO_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,2
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- tmhh %r8,0x0001 # interrupting from user?
- jz .Lio_skip
- UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
- LAST_BREAK %r14
-.Lio_skip:
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- TRACE_IRQS_OFF
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-.Lio_loop:
- lgr %r2,%r11 # pass pointer to pt_regs
- lghi %r3,IO_INTERRUPT
- tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
- jz .Lio_call
- lghi %r3,THIN_INTERRUPT
-.Lio_call:
- brasl %r14,do_IRQ
- tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
- jz .Lio_return
- tpi 0
- jz .Lio_return
- mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- j .Lio_loop
-.Lio_return:
- LOCKDEP_SYS_EXIT
- TRACE_IRQS_ON
-.Lio_tif:
- tm __TI_flags+7(%r12),_TIF_WORK
- jnz .Lio_work # there is work to do (signals etc.)
- tm __LC_CPU_FLAGS+7,_CIF_WORK
- jnz .Lio_work
-.Lio_restore:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_PSW
-.Lio_done:
-
-#
-# There is work todo, find out in which context we have been interrupted:
-# 1) if we return to user space we can do all _TIF_WORK work
-# 2) if we return to kernel code and kvm is enabled check if we need to
-# modify the psw to leave SIE
-# 3) if we return to kernel code and preemptive scheduling is enabled check
-# the preemption counter and if it is zero call preempt_schedule_irq
-# Before any work can be done, a switch to the kernel stack is required.
-#
-.Lio_work:
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jo .Lio_work_user # yes -> do resched & signal
-#ifdef CONFIG_PREEMPT
- # check for preemptive scheduling
- icm %r0,15,__TI_precount(%r12)
- jnz .Lio_restore # preemption is disabled
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jno .Lio_restore
- # switch to kernel stack
- lg %r1,__PT_R15(%r11)
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
- # TRACE_IRQS_ON already done at .Lio_return, call
- # TRACE_IRQS_OFF to keep things symmetrical
- TRACE_IRQS_OFF
- brasl %r14,preempt_schedule_irq
- j .Lio_return
-#else
- j .Lio_restore
-#endif
-
-#
-# Need to do work before returning to userspace, switch to kernel stack
-#
-.Lio_work_user:
- lg %r1,__LC_KERNEL_STACK
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
-
-#
-# One of the work bits is on. Find out which one.
-#
-.Lio_work_tif:
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jo .Lio_mcck_pending
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jo .Lio_reschedule
- tm __TI_flags+7(%r12),_TIF_SIGPENDING
- jo .Lio_sigpending
- tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
- jo .Lio_notify_resume
- tm __LC_CPU_FLAGS+7,_CIF_ASCE
- jo .Lio_uaccess
- j .Lio_return # beware of critical section cleanup
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lio_mcck_pending:
- # TRACE_IRQS_ON already done at .Lio_return
- brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
- TRACE_IRQS_OFF
- j .Lio_return
-
-#
-# _CIF_ASCE is set, load user space asce
-#
-.Lio_uaccess:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j .Lio_return
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lio_reschedule:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- brasl %r14,schedule # call scheduler
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
- j .Lio_return
-
-#
-# _TIF_SIGPENDING or is set, call do_signal
-#
-.Lio_sigpending:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_signal
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
- j .Lio_return
-
-#
-# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
-#
-.Lio_notify_resume:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_notify_resume
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
- j .Lio_return
-
-/*
- * External interrupt handler routine
- */
-ENTRY(ext_int_handler)
- STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_EXT_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,3
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- tmhh %r8,0x0001 # interrupting from user ?
- jz .Lext_skip
- UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
- LAST_BREAK %r14
-.Lext_skip:
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- lghi %r1,__LC_EXT_PARAMS2
- mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
- mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
- mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- TRACE_IRQS_OFF
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- lghi %r3,EXT_INTERRUPT
- brasl %r14,do_IRQ
- j .Lio_return
-
-/*
- * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
- */
-ENTRY(psw_idle)
- stg %r3,__SF_EMPTY(%r15)
- larl %r1,.Lpsw_idle_lpsw+4
- stg %r1,__SF_EMPTY+8(%r15)
- STCK __CLOCK_IDLE_ENTER(%r2)
- stpt __TIMER_IDLE_ENTER(%r2)
-.Lpsw_idle_lpsw:
- lpswe __SF_EMPTY(%r15)
- br %r14
-.Lpsw_idle_end:
-
-.L__critical_end:
-
-/*
- * Machine check handler routines
- */
-ENTRY(mcck_int_handler)
- STCK __LC_MCCK_CLOCK
- la %r1,4095 # revalidate r1
- spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_MCK_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,4
- tm __LC_MCCK_CODE,0x80 # system damage?
- jo .Lmcck_panic # yes -> rest of mcck code invalid
- lghi %r14,__LC_CPU_TIMER_SAVE_AREA
- mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
- tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
- jo 3f
- la %r14,__LC_SYNC_ENTER_TIMER
- clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
- jl 0f
- la %r14,__LC_ASYNC_ENTER_TIMER
-0: clc 0(8,%r14),__LC_EXIT_TIMER
- jl 1f
- la %r14,__LC_EXIT_TIMER
-1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
- jl 2f
- la %r14,__LC_LAST_UPDATE_TIMER
-2: spt 0(%r14)
- mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
-3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
- jno .Lmcck_panic # no -> skip cleanup critical
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
- tm %r8,0x0001 # interrupting from user ?
- jz .Lmcck_skip
- UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
- LAST_BREAK %r14
-.Lmcck_skip:
- lghi %r14,__LC_GPREGS_SAVE_AREA+64
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),0(%r14)
- stmg %r8,%r9,__PT_PSW(%r11)
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,s390_do_machine_check
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lmcck_return
- lg %r1,__LC_KERNEL_STACK # switch to kernel stack
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
- ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jno .Lmcck_return
- TRACE_IRQS_OFF
- brasl %r14,s390_handle_mcck
- TRACE_IRQS_ON
-.Lmcck_return:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
- tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
- jno 0f
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
-0: lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_MCCK_PSW
-
-.Lmcck_panic:
- lg %r14,__LC_PANIC_STACK
- slgr %r14,%r15
- srag %r14,%r14,PAGE_SHIFT
- jz 0f
- lg %r15,__LC_PANIC_STACK
-0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j .Lmcck_skip
-
-#
-# PSW restart interrupt handler
-#
-ENTRY(restart_int_handler)
- stg %r15,__LC_SAVE_AREA_RESTART
- lg %r15,__LC_RESTART_STACK
- aghi %r15,-__PT_SIZE # create pt_regs on stack
- xc 0(__PT_SIZE,%r15),0(%r15)
- stmg %r0,%r14,__PT_R0(%r15)
- mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
- mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
- aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
- xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
- lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
- lg %r2,__LC_RESTART_DATA
- lg %r3,__LC_RESTART_SOURCE
- ltgr %r3,%r3 # test source cpu address
- jm 1f # negative -> skip source stop
-0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
- brc 10,0b # wait for status stored
-1: basr %r14,%r1 # call function
- stap __SF_EMPTY(%r15) # store cpu address
- llgh %r3,__SF_EMPTY(%r15)
-2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
- brc 2,2b
-3: j 3b
-
- .section .kprobes.text, "ax"
-
-#ifdef CONFIG_CHECK_STACK
-/*
- * The synchronous or the asynchronous stack overflowed. We are dead.
- * No need to properly save the registers, we are going to panic anyway.
- * Setup a pt_regs so that show_trace can provide a good call trace.
- */
-stack_overflow:
- lg %r15,__LC_PANIC_STACK # change to panic stack
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- stmg %r0,%r7,__PT_R0(%r11)
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_R8(64,%r11),0(%r14)
- stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- jg kernel_stack_overflow
-#endif
-
- .align 8
-.Lcleanup_table:
- .quad system_call
- .quad .Lsysc_do_svc
- .quad .Lsysc_tif
- .quad .Lsysc_restore
- .quad .Lsysc_done
- .quad .Lio_tif
- .quad .Lio_restore
- .quad .Lio_done
- .quad psw_idle
- .quad .Lpsw_idle_end
-
-cleanup_critical:
- clg %r9,BASED(.Lcleanup_table) # system_call
- jl 0f
- clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
- jl .Lcleanup_system_call
- clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
- jl .Lcleanup_sysc_tif
- clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
- jl .Lcleanup_sysc_restore
- clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
- jl .Lcleanup_io_tif
- clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
- jl .Lcleanup_io_restore
- clg %r9,BASED(.Lcleanup_table+64) # psw_idle
- jl 0f
- clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
- jl .Lcleanup_idle
-0: br %r14
-
-
-.Lcleanup_system_call:
- # check if stpt has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn)
- jh 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: # check if stmg has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn+8)
- jh 0f
- mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
-0: # check if base register setup + TIF bit load has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+16)
- jhe 0f
- # set up saved registers r10 and r12
- stg %r10,16(%r11) # r10 last break
- stg %r12,32(%r11) # r12 thread-info pointer
-0: # check if the user time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+24)
- jh 0f
- lg %r15,__LC_EXIT_TIMER
- slg %r15,__LC_SYNC_ENTER_TIMER
- alg %r15,__LC_USER_TIMER
- stg %r15,__LC_USER_TIMER
-0: # check if the system time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+32)
- jh 0f
- lg %r15,__LC_LAST_UPDATE_TIMER
- slg %r15,__LC_EXIT_TIMER
- alg %r15,__LC_SYSTEM_TIMER
- stg %r15,__LC_SYSTEM_TIMER
-0: # update accounting time stamp
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- # do LAST_BREAK
- lg %r9,16(%r11)
- srag %r9,%r9,23
- jz 0f
- mvc __TI_last_break(8,%r12),16(%r11)
-0: # set up saved register r11
- lg %r15,__LC_KERNEL_STACK
- la %r9,STACK_FRAME_OVERHEAD(%r15)
- stg %r9,24(%r11) # r11 pt_regs pointer
- # fill pt_regs
- mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
- stmg %r0,%r7,__PT_R0(%r9)
- mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
- xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
- mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
- # setup saved register r15
- stg %r15,56(%r11) # r15 stack pointer
- # set new psw address and exit
- larl %r9,.Lsysc_do_svc
- br %r14
-.Lcleanup_system_call_insn:
- .quad system_call
- .quad .Lsysc_stmg
- .quad .Lsysc_per
- .quad .Lsysc_vtime+18
- .quad .Lsysc_vtime+42
-
-.Lcleanup_sysc_tif:
- larl %r9,.Lsysc_tif
- br %r14
-
-.Lcleanup_sysc_restore:
- clg %r9,BASED(.Lcleanup_sysc_restore_insn)
- je 0f
- lg %r9,24(%r11) # get saved pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
-.Lcleanup_sysc_restore_insn:
- .quad .Lsysc_done - 4
-
-.Lcleanup_io_tif:
- larl %r9,.Lio_tif
- br %r14
-
-.Lcleanup_io_restore:
- clg %r9,BASED(.Lcleanup_io_restore_insn)
- je 0f
- lg %r9,24(%r11) # get saved r11 pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
-.Lcleanup_io_restore_insn:
- .quad .Lio_done - 4
-
-.Lcleanup_idle:
- # copy interrupt clock & cpu timer
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
-0: # check if stck & stpt have been executed
- clg %r9,BASED(.Lcleanup_idle_insn)
- jhe 1f
- mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
- mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1: # account system time going idle
- lg %r9,__LC_STEAL_TIMER
- alg %r9,__CLOCK_IDLE_ENTER(%r2)
- slg %r9,__LC_LAST_UPDATE_CLOCK
- stg %r9,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
- lg %r9,__LC_SYSTEM_TIMER
- alg %r9,__LC_LAST_UPDATE_TIMER
- slg %r9,__TIMER_IDLE_ENTER(%r2)
- stg %r9,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
- # prepare return psw
- nihh %r8,0xfcfd # clear irq & wait state bits
- lg %r9,48(%r11) # return from psw_idle
- br %r14
-.Lcleanup_idle_insn:
- .quad .Lpsw_idle_lpsw
-
-/*
- * Integer constants
- */
- .align 8
-.Lcritical_start:
- .quad .L__critical_start
-.Lcritical_length:
- .quad .L__critical_end - .L__critical_start
-
-
-#if IS_ENABLED(CONFIG_KVM)
-/*
- * sie64a calling convention:
- * %r2 pointer to sie control block
- * %r3 guest register save area
- */
-ENTRY(sie64a)
- stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
- stg %r2,__SF_EMPTY(%r15) # save control block pointer
- stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
- xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
- lmg %r0,%r13,0(%r3) # load guest gprs 0-13
- lg %r14,__LC_GMAP # get gmap pointer
- ltgr %r14,%r14
- jz .Lsie_gmap
- lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
-.Lsie_gmap:
- lg %r14,__SF_EMPTY(%r15) # get control block pointer
- oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
- tm __SIE_PROG20+3(%r14),1 # last exit...
- jnz .Lsie_done
- LPP __SF_EMPTY(%r15) # set guest id
- sie 0(%r14)
-.Lsie_done:
- LPP __SF_EMPTY+16(%r15) # set host id
- ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
-# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
-# See also HANDLE_SIE_INTERCEPT
-.Lrewind_pad:
- nop 0
- .globl sie_exit
-sie_exit:
- lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
- stmg %r0,%r13,0(%r14) # save guest gprs 0-13
- lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
- lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
- br %r14
-.Lsie_fault:
- lghi %r14,-EFAULT
- stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
- j sie_exit
-
- .align 8
-.Lsie_critical:
- .quad .Lsie_gmap
-.Lsie_critical_length:
- .quad .Lsie_done - .Lsie_gmap
-
- EX_TABLE(.Lrewind_pad,.Lsie_fault)
- EX_TABLE(sie_exit,.Lsie_fault)
-#endif
-
- .section .rodata, "a"
-#define SYSCALL(esa,esame,emu) .long esame
- .globl sys_call_table
-sys_call_table:
-#include "syscalls.S"
-#undef SYSCALL
-
-#ifdef CONFIG_COMPAT
-
-#define SYSCALL(esa,esame,emu) .long emu
- .globl sys_call_table_emu
-sys_call_table_emu:
-#include "syscalls.S"
-#undef SYSCALL
-#endif
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 6c79f1b44fe7..e0eaf11134b4 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -130,8 +130,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
/* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL;
- if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
- return -EPERM;
+ s390_kernel_write((void *) rec->ip, &new, sizeof(new));
return 0;
}
@@ -159,8 +158,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
/* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL;
- if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
- return -EPERM;
+ s390_kernel_write((void *) rec->ip, &new, sizeof(new));
return 0;
}
@@ -231,14 +229,16 @@ int ftrace_enable_ftrace_graph_caller(void)
{
u8 op = 0x04; /* set mask field to zero */
- return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
u8 op = 0xf4; /* set mask field to all ones */
- return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ return 0;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 132f4c9ade60..59b7c6470567 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -27,11 +27,7 @@
#include <asm/thread_info.h>
#include <asm/page.h>
-#ifdef CONFIG_64BIT
#define ARCH_OFFSET 4
-#else
-#define ARCH_OFFSET 0
-#endif
__HEAD
@@ -67,7 +63,6 @@ __HEAD
# subroutine to set architecture mode
#
.Lsetmode:
-#ifdef CONFIG_64BIT
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
lhi %r1,2 # mode 2 = esame (dump)
@@ -76,16 +71,12 @@ __HEAD
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
sam31 # switch to 31 bit addressing mode
-#else
- mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
-#endif
br %r14
#
# subroutine to wait for end I/O
#
.Lirqwait:
-#ifdef CONFIG_64BIT
mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw
lpsw .Lwaitpsw
.Lioint:
@@ -93,15 +84,6 @@ __HEAD
.align 8
.Lnewpsw:
.quad 0x0000000080000000,.Lioint
-#else
- mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
- lpsw .Lwaitpsw
-.Lioint:
- br %r14
- .align 8
-.Lnewpsw:
- .long 0x00080000,0x80000000+.Lioint
-#endif
.Lwaitpsw:
.long 0x020a0000,0x80000000+.Lioint
@@ -375,7 +357,6 @@ ENTRY(startup)
ENTRY(startup_kdump)
j .Lep_startup_kdump
.Lep_startup_normal:
-#ifdef CONFIG_64BIT
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
lhi %r1,2 # mode 2 = esame (dump)
@@ -384,9 +365,6 @@ ENTRY(startup_kdump)
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
sam31 # switch to 31 bit addressing mode
-#else
- mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
-#endif
basr %r13,0 # get base
.LPG0:
xc 0x200(256),0x200 # partially clear lowcore
@@ -396,7 +374,6 @@ ENTRY(startup_kdump)
spt 6f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
-#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
.insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
@@ -435,7 +412,6 @@ ENTRY(startup_kdump)
# the kernel will crash. Format is number of facility words with bits set,
# followed by the facility words.
-#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_Z13)
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_ZEC12)
@@ -451,35 +427,10 @@ ENTRY(startup_kdump)
#elif defined(CONFIG_MARCH_Z900)
.long 1, 0xc0000000
#endif
-#else
-#if defined(CONFIG_MARCH_ZEC12)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z196)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z10)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z9_109)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z990)
- .long 1, 0x80002000
-#elif defined(CONFIG_MARCH_Z900)
- .long 1, 0x80000000
-#endif
-#endif
4:
-#endif
-
-#ifdef CONFIG_64BIT
/* Continue with 64bit startup code in head64.S */
sam64 # switch to 64 bit mode
jg startup_continue
-#else
- /* Continue with 31bit startup code in head31.S */
- l %r13,5f-.LPG0(%r13)
- b 0(%r13)
- .align 8
-5: .long startup_continue
-#endif
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
deleted file mode 100644
index 6dbe80983a24..000000000000
--- a/arch/s390/kernel/head31.S
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright IBM Corp. 2005, 2010
- *
- * Author(s): Hartmut Penner <hp@de.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Rob van der Heij <rvdhei@iae.nl>
- * Heiko Carstens <heiko.carstens@de.ibm.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-
-__HEAD
-ENTRY(startup_continue)
- basr %r13,0 # get base
-.LPG1:
-
- l %r1,.Lbase_cc-.LPG1(%r13)
- mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
- lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
- l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
- # move IPL device to lowcore
-#
-# Setup stack
-#
- l %r15,.Linittu-.LPG1(%r13)
- st %r15,__LC_THREAD_INFO # cache thread info in lowcore
- mvc __LC_CURRENT(4),__TI_task(%r15)
- ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
- st %r15,__LC_KERNEL_STACK # set end of kernel stack
- ahi %r15,-96
-#
-# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
-# and create a kernel NSS if the SAVESYS= parm is defined
-#
- l %r14,.Lstartup_init-.LPG1(%r13)
- basr %r14,%r14
- lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
- # virtual and never return ...
- .align 8
-.Lentry:.long 0x00080000,0x80000000 + _stext
-.Lctl: .long 0x04b50000 # cr0: various things
- .long 0 # cr1: primary space segment table
- .long .Lduct # cr2: dispatchable unit control table
- .long 0 # cr3: instruction authorization
- .long 0 # cr4: instruction authorization
- .long .Lduct # cr5: primary-aste origin
- .long 0 # cr6: I/O interrupts
- .long 0 # cr7: secondary space segment table
- .long 0 # cr8: access registers translation
- .long 0 # cr9: tracing off
- .long 0 # cr10: tracing off
- .long 0 # cr11: tracing off
- .long 0 # cr12: tracing off
- .long 0 # cr13: home space segment table
- .long 0xc0000000 # cr14: machine check handling off
- .long 0 # cr15: linkage stack operations
-.Lbss_bgn: .long __bss_start
-.Lbss_end: .long _end
-.Lparmaddr: .long PARMAREA
-.Linittu: .long init_thread_union
-.Lstartup_init:
- .long startup_init
- .align 64
-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
- .long 0,0,0,0,0,0,0,0
- .align 128
-.Lduald:.rept 8
- .long 0x80000000,0,0,0 # invalid access-list entries
- .endr
-.Lbase_cc:
- .long sched_clock_base_cc
-
-ENTRY(_ehead)
-
- .org 0x100000 - 0x11000 # head.o ends at 0x11000
-#
-# startup-code, running in absolute addressing mode
-#
-ENTRY(_stext)
- basr %r13,0 # get base
-.LPG3:
-# check control registers
- stctl %c0,%c15,0(%r15)
- oi 2(%r15),0x60 # enable sigp emergency & external call
- oi 0(%r15),0x10 # switch on low address protection
- lctl %c0,%c15,0(%r15)
-
-#
- lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
- l %r14,.Lstart-.LPG3(%r13)
- basr %r14,%r14 # call start_kernel
-#
-# We returned from start_kernel ?!? PANIK
-#
- basr %r13,0
- lpsw .Ldw-.(%r13) # load disabled wait psw
-#
- .align 8
-.Ldw: .long 0x000a0000,0x00000000
-.Lstart:.long start_kernel
-.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index 085a95eb315f..d05950f02c34 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -92,17 +92,9 @@ startup_kdump_relocated:
#else
.align 2
.Lep_startup_kdump:
-#ifdef CONFIG_64BIT
larl %r13,startup_kdump_crash
lpswe 0(%r13)
.align 8
startup_kdump_crash:
.quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
-#else
- basr %r13,0
-0: lpsw startup_kdump_crash-0b(%r13)
-.align 8
-startup_kdump_crash:
- .long 0x000a0000,0x00000000 + startup_kdump_crash
-#endif /* CONFIG_64BIT */
#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5c8651f36509..52fbef91d1d9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -182,24 +182,21 @@ EXPORT_SYMBOL_GPL(diag308);
/* SYSFS */
-#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
+#define IPL_ATTR_SHOW_FN(_prefix, _name, _format, args...) \
static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
char *page) \
{ \
- return sprintf(page, _format, _value); \
-} \
+ return snprintf(page, PAGE_SIZE, _format, ##args); \
+}
+
+#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
+IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
- __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
+ __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL)
#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
-static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *page) \
-{ \
- return sprintf(page, _fmt_out, \
- (unsigned long long) _value); \
-} \
+IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
@@ -213,15 +210,10 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
- sys_##_prefix##_##_name##_store);
+ sys_##_prefix##_##_name##_store)
#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
-static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *page) \
-{ \
- return sprintf(page, _fmt_out, _value); \
-} \
+IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
@@ -233,7 +225,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
- sys_##_prefix##_##_name##_store);
+ sys_##_prefix##_##_name##_store)
static void make_attrs_ro(struct attribute **attrs)
{
@@ -415,15 +407,9 @@ static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
IPL_PARMBLOCK_SIZE);
}
-
-static struct bin_attribute ipl_parameter_attr = {
- .attr = {
- .name = "binary_parameter",
- .mode = S_IRUGO,
- },
- .size = PAGE_SIZE,
- .read = &ipl_parameter_read,
-};
+static struct bin_attribute ipl_parameter_attr =
+ __BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL,
+ PAGE_SIZE);
static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
@@ -434,14 +420,13 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
+static struct bin_attribute ipl_scp_data_attr =
+ __BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE);
-static struct bin_attribute ipl_scp_data_attr = {
- .attr = {
- .name = "scp_data",
- .mode = S_IRUGO,
- },
- .size = PAGE_SIZE,
- .read = ipl_scp_data_read,
+static struct bin_attribute *ipl_fcp_bin_attrs[] = {
+ &ipl_parameter_attr,
+ &ipl_scp_data_attr,
+ NULL,
};
/* FCP ipl device attributes */
@@ -484,6 +469,7 @@ static struct attribute *ipl_fcp_attrs[] = {
static struct attribute_group ipl_fcp_attr_group = {
.attrs = ipl_fcp_attrs,
+ .bin_attrs = ipl_fcp_bin_attrs,
};
/* CCW ipl device attributes */
@@ -540,28 +526,6 @@ static struct attribute_group ipl_unknown_attr_group = {
static struct kset *ipl_kset;
-static int __init ipl_register_fcp_files(void)
-{
- int rc;
-
- rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
- if (rc)
- goto out;
- rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
- if (rc)
- goto out_ipl_parm;
- rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr);
- if (!rc)
- goto out;
-
- sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
-
-out_ipl_parm:
- sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
-out:
- return rc;
-}
-
static void __ipl_run(void *unused)
{
diag308(DIAG308_IPL, NULL);
@@ -596,7 +560,7 @@ static int __init ipl_init(void)
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
- rc = ipl_register_fcp_files();
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break;
case IPL_TYPE_NSS:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
@@ -744,15 +708,13 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
return count;
}
+static struct bin_attribute sys_reipl_fcp_scp_data_attr =
+ __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
+ reipl_fcp_scpdata_write, PAGE_SIZE);
-static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
- .attr = {
- .name = "scp_data",
- .mode = S_IRUGO | S_IWUSR,
- },
- .size = PAGE_SIZE,
- .read = reipl_fcp_scpdata_read,
- .write = reipl_fcp_scpdata_write,
+static struct bin_attribute *reipl_fcp_bin_attrs[] = {
+ &sys_reipl_fcp_scp_data_attr,
+ NULL,
};
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
@@ -841,6 +803,7 @@ static struct attribute *reipl_fcp_attrs[] = {
static struct attribute_group reipl_fcp_attr_group = {
.attrs = reipl_fcp_attrs,
+ .bin_attrs = reipl_fcp_bin_attrs,
};
/* CCW reipl device attributes */
@@ -1261,15 +1224,6 @@ static int __init reipl_fcp_init(void)
return rc;
}
- rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj,
- &sys_reipl_fcp_scp_data_attr);
- if (rc) {
- sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
- kset_unregister(reipl_fcp_kset);
- free_page((unsigned long) reipl_block_fcp);
- return rc;
- }
-
if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
/*
@@ -1713,9 +1667,7 @@ static ssize_t on_reboot_store(struct kobject *kobj,
{
return set_trigger(buf, &on_reboot_trigger, len);
}
-
-static struct kobj_attribute on_reboot_attr =
- __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store);
+static struct kobj_attribute on_reboot_attr = __ATTR_RW(on_reboot);
static void do_machine_restart(char *__unused)
{
@@ -1741,9 +1693,7 @@ static ssize_t on_panic_store(struct kobject *kobj,
{
return set_trigger(buf, &on_panic_trigger, len);
}
-
-static struct kobj_attribute on_panic_attr =
- __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
+static struct kobj_attribute on_panic_attr = __ATTR_RW(on_panic);
static void do_panic(void)
{
@@ -1769,9 +1719,7 @@ static ssize_t on_restart_store(struct kobject *kobj,
{
return set_trigger(buf, &on_restart_trigger, len);
}
-
-static struct kobj_attribute on_restart_attr =
- __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
+static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
static void __do_restart(void *ignore)
{
@@ -1808,10 +1756,7 @@ static ssize_t on_halt_store(struct kobject *kobj,
{
return set_trigger(buf, &on_halt_trigger, len);
}
-
-static struct kobj_attribute on_halt_attr =
- __ATTR(on_halt, 0644, on_halt_show, on_halt_store);
-
+static struct kobj_attribute on_halt_attr = __ATTR_RW(on_halt);
static void do_machine_halt(void)
{
@@ -1837,10 +1782,7 @@ static ssize_t on_poff_store(struct kobject *kobj,
{
return set_trigger(buf, &on_poff_trigger, len);
}
-
-static struct kobj_attribute on_poff_attr =
- __ATTR(on_poff, 0644, on_poff_show, on_poff_store);
-
+static struct kobj_attribute on_poff_attr = __ATTR_RW(on_poff);
static void do_machine_power_off(void)
{
@@ -1850,26 +1792,27 @@ static void do_machine_power_off(void)
}
void (*_machine_power_off)(void) = do_machine_power_off;
+static struct attribute *shutdown_action_attrs[] = {
+ &on_restart_attr.attr,
+ &on_reboot_attr.attr,
+ &on_panic_attr.attr,
+ &on_halt_attr.attr,
+ &on_poff_attr.attr,
+ NULL,
+};
+
+static struct attribute_group shutdown_action_attr_group = {
+ .attrs = shutdown_action_attrs,
+};
+
static void __init shutdown_triggers_init(void)
{
shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
firmware_kobj);
if (!shutdown_actions_kset)
goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_reboot_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_panic_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_halt_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_poff_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_restart_attr.attr))
+ if (sysfs_create_group(&shutdown_actions_kset->kobj,
+ &shutdown_action_attr_group))
goto fail;
return;
fail:
@@ -2062,12 +2005,10 @@ static void do_reset_calls(void)
{
struct reset_call *reset;
-#ifdef CONFIG_64BIT
if (diag308_set_works) {
diag308_reset();
return;
}
-#endif
list_for_each_entry(reset, &rcall, list)
reset->fn();
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f238720690f3..e9d9addfaa44 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -56,7 +56,7 @@ static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
* /proc/interrupts.
* In addition this list contains non external / I/O events like NMIs.
*/
-static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
+static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"},
{.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"},
{.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"},
@@ -79,7 +79,6 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
{.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"},
{.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
{.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
- {.irq = IRQIO_CLW, .name = "CLW", .desc = "[I/O] CLAW"},
{.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
{.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
{.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
@@ -94,6 +93,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
void __init init_IRQ(void)
{
+ BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS);
init_cio_interrupts();
init_airq_interrupts();
init_ext_interrupts();
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index 830066f936c8..a90299600483 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -78,7 +78,7 @@ static void __jump_label_transform(struct jump_entry *entry,
if (memcmp((void *)entry->code, &old, sizeof(old)))
jump_label_bug(entry, &old, &new);
}
- probe_kernel_write((void *)entry->code, &new, sizeof(new));
+ s390_kernel_write((void *)entry->code, &new, sizeof(new));
}
static int __sm_arch_jump_label_transform(void *data)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index f516edc1fbe3..389db56a2208 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -178,7 +178,7 @@ static int swap_instruction(void *data)
}
skip_ftrace:
kcb->kprobe_status = KPROBE_SWAP_INST;
- probe_kernel_write(p->addr, &new_insn, len);
+ s390_kernel_write(p->addr, &new_insn, len);
kcb->kprobe_status = status;
return 0;
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 2ca95862e336..0c1a679314dd 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -38,13 +38,8 @@
#define DEBUGP(fmt , ...)
#endif
-#ifndef CONFIG_64BIT
-#define PLT_ENTRY_SIZE 12
-#else /* CONFIG_64BIT */
#define PLT_ENTRY_SIZE 20
-#endif /* CONFIG_64BIT */
-#ifdef CONFIG_64BIT
void *module_alloc(unsigned long size)
{
if (PAGE_ALIGN(size) > MODULES_LEN)
@@ -53,7 +48,6 @@ void *module_alloc(unsigned long size)
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
-#endif
void module_arch_freeing_init(struct module *mod)
{
@@ -323,17 +317,11 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
unsigned int *ip;
ip = me->module_core + me->arch.plt_offset +
info->plt_offset;
-#ifndef CONFIG_64BIT
- ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
- ip[1] = 0x100607f1;
- ip[2] = val;
-#else /* CONFIG_64BIT */
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
ip[1] = 0x100a0004;
ip[2] = 0x07f10000;
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
-#endif /* CONFIG_64BIT */
info->plt_initialized = 1;
}
if (r_type == R_390_PLTOFF16 ||
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 3f51cf4e8f02..505c17c0ae1a 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -117,55 +117,36 @@ static int notrace s390_revalidate_registers(struct mci *mci)
*/
kill_task = 1;
}
-#ifndef CONFIG_64BIT
+ fpt_save_area = &S390_lowcore.floating_pt_save_area;
+ fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
+ if (!mci->fc) {
+ /*
+ * Floating point control register can't be restored.
+ * Task will be terminated.
+ */
+ asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
+ kill_task = 1;
+ } else
+ asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
+
asm volatile(
" ld 0,0(%0)\n"
- " ld 2,8(%0)\n"
- " ld 4,16(%0)\n"
- " ld 6,24(%0)"
- : : "a" (&S390_lowcore.floating_pt_save_area));
-#endif
-
- if (MACHINE_HAS_IEEE) {
-#ifdef CONFIG_64BIT
- fpt_save_area = &S390_lowcore.floating_pt_save_area;
- fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
-#else
- fpt_save_area = (void *) S390_lowcore.extended_save_area_addr;
- fpt_creg_save_area = fpt_save_area + 128;
-#endif
- if (!mci->fc) {
- /*
- * Floating point control register can't be restored.
- * Task will be terminated.
- */
- asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
- kill_task = 1;
-
- } else
- asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
-
- asm volatile(
- " ld 0,0(%0)\n"
- " ld 1,8(%0)\n"
- " ld 2,16(%0)\n"
- " ld 3,24(%0)\n"
- " ld 4,32(%0)\n"
- " ld 5,40(%0)\n"
- " ld 6,48(%0)\n"
- " ld 7,56(%0)\n"
- " ld 8,64(%0)\n"
- " ld 9,72(%0)\n"
- " ld 10,80(%0)\n"
- " ld 11,88(%0)\n"
- " ld 12,96(%0)\n"
- " ld 13,104(%0)\n"
- " ld 14,112(%0)\n"
- " ld 15,120(%0)\n"
- : : "a" (fpt_save_area));
- }
-
-#ifdef CONFIG_64BIT
+ " ld 1,8(%0)\n"
+ " ld 2,16(%0)\n"
+ " ld 3,24(%0)\n"
+ " ld 4,32(%0)\n"
+ " ld 5,40(%0)\n"
+ " ld 6,48(%0)\n"
+ " ld 7,56(%0)\n"
+ " ld 8,64(%0)\n"
+ " ld 9,72(%0)\n"
+ " ld 10,80(%0)\n"
+ " ld 11,88(%0)\n"
+ " ld 12,96(%0)\n"
+ " ld 13,104(%0)\n"
+ " ld 14,112(%0)\n"
+ " ld 15,120(%0)\n"
+ : : "a" (fpt_save_area));
/* Revalidate vector registers */
if (MACHINE_HAS_VX && current->thread.vxrs) {
if (!mci->vr) {
@@ -178,7 +159,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
restore_vx_regs((__vector128 *)
S390_lowcore.vector_save_area_addr);
}
-#endif
/* Revalidate access registers */
asm volatile(
" lam 0,15,0(%0)"
@@ -198,21 +178,14 @@ static int notrace s390_revalidate_registers(struct mci *mci)
*/
s390_handle_damage("invalid control registers.");
} else {
-#ifdef CONFIG_64BIT
asm volatile(
" lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area));
-#else
- asm volatile(
- " lctl 0,15,0(%0)"
- : : "a" (&S390_lowcore.cregs_save_area));
-#endif
}
/*
* We don't even try to revalidate the TOD register, since we simply
* can't write something sensible into that register.
*/
-#ifdef CONFIG_64BIT
/*
* See if we can revalidate the TOD programmable register with its
* old contents (should be zero) otherwise set it to zero.
@@ -228,7 +201,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
" sckpf"
: : "a" (&S390_lowcore.tod_progreg_save_area)
: "0", "cc");
-#endif
/* Revalidate clock comparator register */
set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
@@ -280,19 +252,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
if (mci->b) {
/* Processing backup -> verify if we can survive this */
u64 z_mcic, o_mcic, t_mcic;
-#ifdef CONFIG_64BIT
z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
1ULL<<16);
-#else
- z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
- 1ULL<<29);
- o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
- 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
- 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
-#endif
t_mcic = *(u64 *)mci;
if (((t_mcic & z_mcic) != 0) ||
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index f6f8886399f6..036aa01d06a9 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -6,19 +6,13 @@
#include <linux/linkage.h>
-#ifdef CONFIG_32BIT
-#define PGM_CHECK_64BIT(handler) .long default_trap_handler
-#else
-#define PGM_CHECK_64BIT(handler) .long handler
-#endif
-
#define PGM_CHECK(handler) .long handler
#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
/*
* The program check table contains exactly 128 (0x00-0x7f) entries. Each
- * line defines the 31 and/or 64 bit function to be called corresponding
- * to the program check interruption code.
+ * line defines the function to be called corresponding to the program check
+ * interruption code.
*/
.section .rodata, "a"
ENTRY(pgm_check_table)
@@ -46,10 +40,10 @@ PGM_CHECK_DEFAULT /* 14 */
PGM_CHECK(operand_exception) /* 15 */
PGM_CHECK_DEFAULT /* 16 */
PGM_CHECK_DEFAULT /* 17 */
-PGM_CHECK_64BIT(transaction_exception) /* 18 */
+PGM_CHECK(transaction_exception) /* 18 */
PGM_CHECK_DEFAULT /* 19 */
PGM_CHECK_DEFAULT /* 1a */
-PGM_CHECK_64BIT(vector_exception) /* 1b */
+PGM_CHECK(vector_exception) /* 1b */
PGM_CHECK(space_switch_exception) /* 1c */
PGM_CHECK(hfp_sqrt_exception) /* 1d */
PGM_CHECK_DEFAULT /* 1e */
@@ -78,10 +72,10 @@ PGM_CHECK_DEFAULT /* 34 */
PGM_CHECK_DEFAULT /* 35 */
PGM_CHECK_DEFAULT /* 36 */
PGM_CHECK_DEFAULT /* 37 */
-PGM_CHECK_64BIT(do_dat_exception) /* 38 */
-PGM_CHECK_64BIT(do_dat_exception) /* 39 */
-PGM_CHECK_64BIT(do_dat_exception) /* 3a */
-PGM_CHECK_64BIT(do_dat_exception) /* 3b */
+PGM_CHECK(do_dat_exception) /* 38 */
+PGM_CHECK(do_dat_exception) /* 39 */
+PGM_CHECK(do_dat_exception) /* 3a */
+PGM_CHECK(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */
PGM_CHECK_DEFAULT /* 3d */
PGM_CHECK_DEFAULT /* 3e */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 13fc0978ca7e..dc5edc29b73a 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,13 +79,11 @@ void release_thread(struct task_struct *dead_task)
{
}
-#ifdef CONFIG_64BIT
void arch_release_task_struct(struct task_struct *tsk)
{
if (tsk->thread.vxrs)
kfree(tsk->thread.vxrs);
}
-#endif
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
@@ -144,19 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.ri_signum = 0;
frame->childregs.psw.mask &= ~PSW_MASK_RI;
-#ifndef CONFIG_64BIT
- /*
- * save fprs to current->thread.fp_regs to merge them with
- * the emulated registers and then copy the result to the child.
- */
- save_fp_ctl(&current->thread.fp_regs.fpc);
- save_fp_regs(current->thread.fp_regs.fprs);
- memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
- sizeof(s390_fp_regs));
- /* Set a new TLS ? */
- if (clone_flags & CLONE_SETTLS)
- p->thread.acrs[0] = frame->childregs.gprs[6];
-#else /* CONFIG_64BIT */
/* Save the fpu registers to new thread structure. */
save_fp_ctl(&p->thread.fp_regs.fpc);
save_fp_regs(p->thread.fp_regs.fprs);
@@ -172,15 +157,13 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.acrs[1] = (unsigned int)tls;
}
}
-#endif /* CONFIG_64BIT */
return 0;
}
asmlinkage void execve_tail(void)
{
current->thread.fp_regs.fpc = 0;
- if (MACHINE_HAS_IEEE)
- asm volatile("sfpc %0,%0" : : "d" (0));
+ asm volatile("sfpc %0,%0" : : "d" (0));
}
/*
@@ -188,18 +171,8 @@ asmlinkage void execve_tail(void)
*/
int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
{
-#ifndef CONFIG_64BIT
- /*
- * save fprs to current->thread.fp_regs to merge them with
- * the emulated registers and then copy the result to the dump.
- */
- save_fp_ctl(&current->thread.fp_regs.fpc);
- save_fp_regs(current->thread.fp_regs.fprs);
- memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
-#else /* CONFIG_64BIT */
save_fp_ctl(&fpregs->fpc);
save_fp_regs(fpregs->fprs);
-#endif /* CONFIG_64BIT */
return 1;
}
EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index eabfb4594517..d363c9c322a1 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -44,7 +44,6 @@ void update_cr_regs(struct task_struct *task)
struct thread_struct *thread = &task->thread;
struct per_regs old, new;
-#ifdef CONFIG_64BIT
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
unsigned long cr, cr_new;
@@ -80,7 +79,6 @@ void update_cr_regs(struct task_struct *task)
__ctl_load(cr_new, 2, 2);
}
}
-#endif
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
@@ -93,10 +91,8 @@ void update_cr_regs(struct task_struct *task)
new.control |= PER_EVENT_BRANCH;
else
new.control |= PER_EVENT_IFETCH;
-#ifdef CONFIG_64BIT
new.control |= PER_CONTROL_SUSPENSION;
new.control |= PER_EVENT_TRANSACTION_END;
-#endif
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
new.control |= PER_EVENT_IFETCH;
new.start = 0;
@@ -146,11 +142,7 @@ void ptrace_disable(struct task_struct *task)
task->thread.per_flags = 0;
}
-#ifndef CONFIG_64BIT
-# define __ADDR_MASK 3
-#else
-# define __ADDR_MASK 7
-#endif
+#define __ADDR_MASK 7
static inline unsigned long __peek_user_per(struct task_struct *child,
addr_t addr)
@@ -223,7 +215,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_64BIT
/*
* Very special case: old & broken 64 bit gdb reading
* from acrs[15]. Result is a 64 bit value. Read the
@@ -232,8 +223,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
if (addr == (addr_t) &dummy->regs.acrs[15])
tmp = ((unsigned long) child->thread.acrs[15]) << 32;
else
-#endif
- tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
+ tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
/*
@@ -261,12 +251,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
tmp = *(addr_t *)
((addr_t) child->thread.vxrs + 2*offset);
else
-#endif
tmp = *(addr_t *)
((addr_t) &child->thread.fp_regs.fprs + offset);
@@ -293,11 +281,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
-#ifdef CONFIG_64BIT
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3;
-#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@@ -370,7 +356,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_64BIT
/*
* Very special case: old & broken 64 bit gdb writing
* to acrs[15] with a 64 bit value. Ignore the lower
@@ -380,8 +365,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
if (addr == (addr_t) &dummy->regs.acrs[15])
child->thread.acrs[15] = (unsigned int) (data >> 32);
else
-#endif
- *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
+ *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
/*
@@ -411,12 +395,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
*(addr_t *)((addr_t)
child->thread.vxrs + 2*offset) = data;
else
-#endif
*(addr_t *)((addr_t)
&child->thread.fp_regs.fprs + offset) = data;
@@ -441,11 +423,9 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell indeed...
*/
mask = __ADDR_MASK;
-#ifdef CONFIG_64BIT
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3;
-#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@@ -649,12 +629,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
tmp = *(__u32 *)
((addr_t) child->thread.vxrs + 2*offset);
else
-#endif
tmp = *(__u32 *)
((addr_t) &child->thread.fp_regs.fprs + offset);
@@ -776,12 +754,10 @@ static int __poke_user_compat(struct task_struct *child,
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
*(__u32 *)((addr_t)
child->thread.vxrs + 2*offset) = tmp;
else
-#endif
*(__u32 *)((addr_t)
&child->thread.fp_regs.fprs + offset) = tmp;
@@ -979,16 +955,13 @@ static int s390_fpregs_get(struct task_struct *target,
if (target == current) {
save_fp_ctl(&target->thread.fp_regs.fpc);
save_fp_regs(target->thread.fp_regs.fprs);
- }
-#ifdef CONFIG_64BIT
- else if (target->thread.vxrs) {
+ } else if (target->thread.vxrs) {
int i;
for (i = 0; i < __NUM_VXRS_LOW; i++)
target->thread.fp_regs.fprs[i] =
*(freg_t *)(target->thread.vxrs + i);
}
-#endif
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1);
}
@@ -1026,23 +999,18 @@ static int s390_fpregs_set(struct task_struct *target,
if (target == current) {
restore_fp_ctl(&target->thread.fp_regs.fpc);
restore_fp_regs(target->thread.fp_regs.fprs);
- }
-#ifdef CONFIG_64BIT
- else if (target->thread.vxrs) {
+ } else if (target->thread.vxrs) {
int i;
for (i = 0; i < __NUM_VXRS_LOW; i++)
*(freg_t *)(target->thread.vxrs + i) =
target->thread.fp_regs.fprs[i];
}
-#endif
}
return rc;
}
-#ifdef CONFIG_64BIT
-
static int s390_last_break_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -1182,8 +1150,6 @@ static int s390_vxrs_high_set(struct task_struct *target,
return rc;
}
-#endif
-
static int s390_system_call_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -1229,7 +1195,6 @@ static const struct user_regset s390_regsets[] = {
.get = s390_system_call_get,
.set = s390_system_call_set,
},
-#ifdef CONFIG_64BIT
{
.core_note_type = NT_S390_LAST_BREAK,
.n = 1,
@@ -1262,7 +1227,6 @@ static const struct user_regset s390_regsets[] = {
.get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
},
-#endif
};
static const struct user_regset_view user_s390_view = {
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index dd8016b0477e..52aab0bd84f8 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -1,7 +1,7 @@
/*
- * S390 version
- * Copyright IBM Corp. 2000
- * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
+ * Copyright IBM Corp 2000, 2011
+ * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
+ * Denis Joseph Barrow,
*/
#include <linux/linkage.h>
@@ -9,43 +9,90 @@
#include <asm/sigp.h>
#
-# store_status: Empty implementation until kdump is supported on 31 bit
+# store_status
+#
+# Prerequisites to run this function:
+# - Prefix register is set to zero
+# - Original prefix register is stored in "dump_prefix_page"
+# - Lowcore protection is off
#
ENTRY(store_status)
- br %r14
+ /* Save register one and load save area base */
+ stg %r1,__LC_SAVE_AREA_RESTART
+ lghi %r1,SAVE_AREA_BASE
+ /* General purpose registers */
+ stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ lg %r2,__LC_SAVE_AREA_RESTART
+ stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
+ /* Control registers */
+ stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Access registers */
+ stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Floating point registers */
+ std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Floating point control register */
+ stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* CPU timer */
+ stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Saved prefix register */
+ larl %r2,dump_prefix_page
+ mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
+ /* Clock comparator - seven bytes */
+ larl %r2,.Lclkcmp
+ stckc 0(%r2)
+ mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
+ /* Program status word */
+ epsw %r2,%r3
+ st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
+ st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
+ larl %r2,store_status
+ stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
+ br %r14
+
+ .section .bss
+ .align 8
+.Lclkcmp: .quad 0x0000000000000000
+ .previous
#
# do_reipl_asm
# Parameter: r2 = schid of reipl device
#
+
ENTRY(do_reipl_asm)
basr %r13,0
-.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
-.Lpg1: # do store status of all registers
+.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
+.Lpg1: brasl %r14,store_status
- stm %r0,%r15,__LC_GPREGS_SAVE_AREA
- stctl %c0,%c15,__LC_CREGS_SAVE_AREA
- stam %a0,%a15,__LC_AREGS_SAVE_AREA
- l %r10,.Ldump_pfx-.Lpg0(%r13)
- mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
- stckc .Lclkcmp-.Lpg0(%r13)
- mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
- stpt __LC_CPU_TIMER_SAVE_AREA
- st %r13, __LC_PSW_SAVE_AREA+4
- lctl %c6,%c6,.Lall-.Lpg0(%r13)
- lr %r1,%r2
- mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
+ lctlg %c6,%c6,.Lall-.Lpg0(%r13)
+ lgr %r1,%r2
+ mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
stsch .Lschib-.Lpg0(%r13)
oi .Lschib+5-.Lpg0(%r13),0x84
-.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
+.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
msch .Lschib-.Lpg0(%r13)
- lhi %r0,5
+ lghi %r0,5
.Lssch: ssch .Liplorb-.Lpg0(%r13)
jz .L001
brct %r0,.Lssch
bas %r14,.Ldisab-.Lpg0(%r13)
-.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
-.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
+.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
+.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
.Lcont: c %r1,__LC_SUBCHANNEL_ID
jnz .Ltpi
clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
@@ -58,20 +105,36 @@ ENTRY(do_reipl_asm)
jz .L003
bas %r14,.Ldisab-.Lpg0(%r13)
.L003: st %r1,__LC_SUBCHANNEL_ID
+ lhi %r1,0 # mode 0 = esa
+ slr %r0,%r0 # set cpuid to zero
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
lpsw 0
- sigp 0,0,SIGP_RESTART
-.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
- lpsw .Ldispsw-.Lpg0(%r13)
+.Ldisab: sll %r14,1
+ srl %r14,1 # need to kill hi bit to avoid specification exceptions.
+ st %r14,.Ldispsw+12-.Lpg0(%r13)
+ lpswe .Ldispsw-.Lpg0(%r13)
.align 8
-.Lclkcmp: .quad 0x0000000000000000
-.Lall: .long 0xff000000
-.Ldump_pfx: .long dump_prefix_page
- .align 8
-.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
-.Lpcnew: .long 0x00080000,0x80000000+.Lecs
-.Lionew: .long 0x00080000,0x80000000+.Lcont
-.Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi
-.Ldispsw: .long 0x000a0000,0x00000000
+.Lall: .quad 0x00000000ff000000
+ .align 16
+/*
+ * These addresses have to be 31 bit otherwise
+ * the sigp will throw a specifcation exception
+ * when switching to ESA mode as bit 31 be set
+ * in the ESA psw.
+ * Bit 31 of the addresses has to be 0 for the
+ * 31bit lpswe instruction a fact they appear to have
+ * omitted from the pop.
+ */
+.Lnewpsw: .quad 0x0000000080000000
+ .quad .Lpg1
+.Lpcnew: .quad 0x0000000080000000
+ .quad .Lecs
+.Lionew: .quad 0x0000000080000000
+ .quad .Lcont
+.Lwaitpsw: .quad 0x0202000080000000
+ .quad .Ltpi
+.Ldispsw: .quad 0x0002000080000000
+ .quad 0x0000000000000000
.Liplccws: .long 0x02000000,0x60000018
.long 0x08000008,0x20000001
.Liplorb: .long 0x0049504c,0x0040ff80
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
deleted file mode 100644
index dc3b1273c4dc..000000000000
--- a/arch/s390/kernel/reipl64.S
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright IBM Corp 2000, 2011
- * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
- * Denis Joseph Barrow,
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/sigp.h>
-
-#
-# store_status
-#
-# Prerequisites to run this function:
-# - Prefix register is set to zero
-# - Original prefix register is stored in "dump_prefix_page"
-# - Lowcore protection is off
-#
-ENTRY(store_status)
- /* Save register one and load save area base */
- stg %r1,__LC_SAVE_AREA_RESTART
- lghi %r1,SAVE_AREA_BASE
- /* General purpose registers */
- stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- lg %r2,__LC_SAVE_AREA_RESTART
- stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
- /* Control registers */
- stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Access registers */
- stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Floating point registers */
- std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Floating point control register */
- stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* CPU timer */
- stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Saved prefix register */
- larl %r2,dump_prefix_page
- mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
- /* Clock comparator - seven bytes */
- larl %r2,.Lclkcmp
- stckc 0(%r2)
- mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
- /* Program status word */
- epsw %r2,%r3
- st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
- st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
- larl %r2,store_status
- stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
- br %r14
-
- .section .bss
- .align 8
-.Lclkcmp: .quad 0x0000000000000000
- .previous
-
-#
-# do_reipl_asm
-# Parameter: r2 = schid of reipl device
-#
-
-ENTRY(do_reipl_asm)
- basr %r13,0
-.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
-.Lpg1: brasl %r14,store_status
-
- lctlg %c6,%c6,.Lall-.Lpg0(%r13)
- lgr %r1,%r2
- mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
- stsch .Lschib-.Lpg0(%r13)
- oi .Lschib+5-.Lpg0(%r13),0x84
-.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
- msch .Lschib-.Lpg0(%r13)
- lghi %r0,5
-.Lssch: ssch .Liplorb-.Lpg0(%r13)
- jz .L001
- brct %r0,.Lssch
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
-.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
-.Lcont: c %r1,__LC_SUBCHANNEL_ID
- jnz .Ltpi
- clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
- jnz .Ltpi
- tsch .Liplirb-.Lpg0(%r13)
- tm .Liplirb+9-.Lpg0(%r13),0xbf
- jz .L002
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
- jz .L003
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L003: st %r1,__LC_SUBCHANNEL_ID
- lhi %r1,0 # mode 0 = esa
- slr %r0,%r0 # set cpuid to zero
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
- lpsw 0
-.Ldisab: sll %r14,1
- srl %r14,1 # need to kill hi bit to avoid specification exceptions.
- st %r14,.Ldispsw+12-.Lpg0(%r13)
- lpswe .Ldispsw-.Lpg0(%r13)
- .align 8
-.Lall: .quad 0x00000000ff000000
- .align 16
-/*
- * These addresses have to be 31 bit otherwise
- * the sigp will throw a specifcation exception
- * when switching to ESA mode as bit 31 be set
- * in the ESA psw.
- * Bit 31 of the addresses has to be 0 for the
- * 31bit lpswe instruction a fact they appear to have
- * omitted from the pop.
- */
-.Lnewpsw: .quad 0x0000000080000000
- .quad .Lpg1
-.Lpcnew: .quad 0x0000000080000000
- .quad .Lecs
-.Lionew: .quad 0x0000000080000000
- .quad .Lcont
-.Lwaitpsw: .quad 0x0202000080000000
- .quad .Ltpi
-.Ldispsw: .quad 0x0002000080000000
- .quad 0x0000000000000000
-.Liplccws: .long 0x02000000,0x60000018
- .long 0x08000008,0x20000001
-.Liplorb: .long 0x0049504c,0x0040ff80
- .long 0x00000000+.Liplccws
-.Lschib: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
-.Liplirb: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index f4e6f20e117a..cfac28330b03 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -19,7 +19,8 @@
* %r7 = PAGE_SIZE
* %r8 holds the source address
* %r9 = PAGE_SIZE
- * %r10 is a page mask
+ *
+ * 0xf000 is a page_mask
*/
.text
@@ -27,46 +28,47 @@ ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT
- stctl %c0,%c15,ctlregs-.base(%r13)
- stm %r0,%r15,gprregs-.base(%r13)
+ stctg %c0,%c15,ctlregs-.base(%r13)
+ stmg %r0,%r15,gprregs-.base(%r13)
+ lghi %r0,3
+ sllg %r0,%r0,31
+ stg %r0,0x1d0(%r0)
+ la %r0,.back_pgm-.base(%r13)
+ stg %r0,0x1d8(%r0)
la %r1,load_psw-.base(%r13)
mvc 0(8,%r0),0(%r1)
la %r0,.back-.base(%r13)
st %r0,4(%r0)
oi 4(%r0),0x80
- mvc 0x68(8,%r0),0(%r1)
- la %r0,.back_pgm-.base(%r13)
- st %r0,0x6c(%r0)
- oi 0x6c(%r0),0x80
- lhi %r0,0
+ lghi %r0,0
diag %r0,%r0,0x308
.back:
+ lhi %r1,1 # mode 1 = esame
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
+ sam64 # switch to 64 bit addressing mode
basr %r13,0
.back_base:
oi have_diag308-.back_base(%r13),0x01
- lctl %c0,%c15,ctlregs-.back_base(%r13)
- lm %r0,%r15,gprregs-.back_base(%r13)
- j .start_reloc
+ lctlg %c0,%c15,ctlregs-.back_base(%r13)
+ lmg %r0,%r15,gprregs-.back_base(%r13)
+ j .top
.back_pgm:
- lm %r0,%r15,gprregs-.base(%r13)
- .start_reloc:
- lhi %r10,-1 # preparing the mask
- sll %r10,12 # shift it such that it becomes 0xf000
+ lmg %r0,%r15,gprregs-.base(%r13)
.top:
- lhi %r7,4096 # load PAGE_SIZE in r7
- lhi %r9,4096 # load PAGE_SIZE in r9
- l %r5,0(%r2) # read another word for indirection page
- ahi %r2,4 # increment pointer
+ lghi %r7,4096 # load PAGE_SIZE in r7
+ lghi %r9,4096 # load PAGE_SIZE in r9
+ lg %r5,0(%r2) # read another word for indirection page
+ aghi %r2,8 # increment pointer
tml %r5,0x1 # is it a destination page?
je .indir_check # NO, goto "indir_check"
- lr %r6,%r5 # r6 = r5
- nr %r6,%r10 # mask it out and...
+ lgr %r6,%r5 # r6 = r5
+ nill %r6,0xf000 # mask it out and...
j .top # ...next iteration
.indir_check:
tml %r5,0x2 # is it a indirection page?
je .done_test # NO, goto "done_test"
- nr %r5,%r10 # YES, mask out,
- lr %r2,%r5 # move it into the right register,
+ nill %r5,0xf000 # YES, mask out,
+ lgr %r2,%r5 # move it into the right register,
j .top # and read next...
.done_test:
tml %r5,0x4 # is it the done indicator?
@@ -75,13 +77,13 @@ ENTRY(relocate_kernel)
.source_test:
tml %r5,0x8 # it should be a source indicator...
je .top # NO, ignore it...
- lr %r8,%r5 # r8 = r5
- nr %r8,%r10 # masking
+ lgr %r8,%r5 # r8 = r5
+ nill %r8,0xf000 # masking
0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
jo 0b
j .top
.done:
- sr %r0,%r0 # clear register r0
+ sgr %r0,%r0 # clear register r0
la %r4,load_psw-.base(%r13) # load psw-address into the register
o %r3,4(%r4) # or load address into psw
st %r3,4(%r4)
@@ -90,8 +92,9 @@ ENTRY(relocate_kernel)
jno .no_diag308
diag %r0,%r0,0x308
.no_diag308:
- sr %r1,%r1 # clear %r1
- sr %r2,%r2 # clear %r2
+ sam31 # 31 bit mode
+ sr %r1,%r1 # erase register r1
+ sr %r2,%r2 # erase register r2
sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
lpsw 0 # hopefully start new kernel...
@@ -102,11 +105,11 @@ ENTRY(relocate_kernel)
.quad 0
ctlregs:
.rept 16
- .long 0
+ .quad 0
.endr
gprregs:
.rept 16
- .long 0
+ .quad 0
.endr
have_diag308:
.byte 0
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
deleted file mode 100644
index cfac28330b03..000000000000
--- a/arch/s390/kernel/relocate_kernel64.S
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright IBM Corp. 2005
- *
- * Author(s): Rolf Adelsberger,
- * Heiko Carstens <heiko.carstens@de.ibm.com>
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/sigp.h>
-
-/*
- * moves the new kernel to its destination...
- * %r2 = pointer to first kimage_entry_t
- * %r3 = start address - where to jump to after the job is done...
- *
- * %r5 will be used as temp. storage
- * %r6 holds the destination address
- * %r7 = PAGE_SIZE
- * %r8 holds the source address
- * %r9 = PAGE_SIZE
- *
- * 0xf000 is a page_mask
- */
-
- .text
-ENTRY(relocate_kernel)
- basr %r13,0 # base address
- .base:
- stnsm sys_msk-.base(%r13),0xfb # disable DAT
- stctg %c0,%c15,ctlregs-.base(%r13)
- stmg %r0,%r15,gprregs-.base(%r13)
- lghi %r0,3
- sllg %r0,%r0,31
- stg %r0,0x1d0(%r0)
- la %r0,.back_pgm-.base(%r13)
- stg %r0,0x1d8(%r0)
- la %r1,load_psw-.base(%r13)
- mvc 0(8,%r0),0(%r1)
- la %r0,.back-.base(%r13)
- st %r0,4(%r0)
- oi 4(%r0),0x80
- lghi %r0,0
- diag %r0,%r0,0x308
- .back:
- lhi %r1,1 # mode 1 = esame
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
- sam64 # switch to 64 bit addressing mode
- basr %r13,0
- .back_base:
- oi have_diag308-.back_base(%r13),0x01
- lctlg %c0,%c15,ctlregs-.back_base(%r13)
- lmg %r0,%r15,gprregs-.back_base(%r13)
- j .top
- .back_pgm:
- lmg %r0,%r15,gprregs-.base(%r13)
- .top:
- lghi %r7,4096 # load PAGE_SIZE in r7
- lghi %r9,4096 # load PAGE_SIZE in r9
- lg %r5,0(%r2) # read another word for indirection page
- aghi %r2,8 # increment pointer
- tml %r5,0x1 # is it a destination page?
- je .indir_check # NO, goto "indir_check"
- lgr %r6,%r5 # r6 = r5
- nill %r6,0xf000 # mask it out and...
- j .top # ...next iteration
- .indir_check:
- tml %r5,0x2 # is it a indirection page?
- je .done_test # NO, goto "done_test"
- nill %r5,0xf000 # YES, mask out,
- lgr %r2,%r5 # move it into the right register,
- j .top # and read next...
- .done_test:
- tml %r5,0x4 # is it the done indicator?
- je .source_test # NO! Well, then it should be the source indicator...
- j .done # ok, lets finish it here...
- .source_test:
- tml %r5,0x8 # it should be a source indicator...
- je .top # NO, ignore it...
- lgr %r8,%r5 # r8 = r5
- nill %r8,0xf000 # masking
- 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
- jo 0b
- j .top
- .done:
- sgr %r0,%r0 # clear register r0
- la %r4,load_psw-.base(%r13) # load psw-address into the register
- o %r3,4(%r4) # or load address into psw
- st %r3,4(%r4)
- mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
- tm have_diag308-.base(%r13),0x01
- jno .no_diag308
- diag %r0,%r0,0x308
- .no_diag308:
- sam31 # 31 bit mode
- sr %r1,%r1 # erase register r1
- sr %r2,%r2 # erase register r2
- sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
- lpsw 0 # hopefully start new kernel...
-
- .align 8
- load_psw:
- .long 0x00080000,0x80000000
- sys_msk:
- .quad 0
- ctlregs:
- .rept 16
- .quad 0
- .endr
- gprregs:
- .rept 16
- .quad 0
- .endr
- have_diag308:
- .byte 0
- .align 8
- relocate_kernel_end:
- .align 8
- .globl relocate_kernel_len
- relocate_kernel_len:
- .quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 7e77e03378f3..43c3169ea49c 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -36,21 +36,17 @@ _sclp_wait_int:
ahi %r15,-96 # create stack frame
la %r8,LC_EXT_NEW_PSW # register int handler
la %r9,.LextpswS1-.LbaseS1(%r13)
-#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa1
la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit
la %r9,.LextpswS1_64-.LbaseS1(%r13)
.Lesa1:
-#endif
mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
mvc 0(16,%r8),0(%r9)
-#ifdef CONFIG_64BIT
epsw %r6,%r7 # set current addressing mode
nill %r6,0x1 # in new psw (31 or 64 bit mode)
nilh %r7,0x8000
stm %r6,%r7,0(%r8)
-#endif
lhi %r6,0x0200 # cr mask for ext int (cr0.54)
ltr %r2,%r2
jz .LsetctS1
@@ -92,10 +88,8 @@ _sclp_wait_int:
.long 0, 0, 0, 0 # old ext int PSW
.LextpswS1:
.long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
-#ifdef CONFIG_64BIT
.LextpswS1_64:
.quad 0, .LwaitS1 # PSW to handle ext int, 64 bit
-#endif
.LwaitpswS1:
.long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
.LtimeS1:
@@ -272,13 +266,11 @@ _sclp_print:
ENTRY(_sclp_print_early)
stm %r6,%r15,24(%r15) # save registers
ahi %r15,-96 # create stack frame
-#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa2
ahi %r15,-80
stmh %r6,%r15,96(%r15) # store upper register halves
.Lesa2:
-#endif
lr %r10,%r2 # save string pointer
lhi %r2,0
bras %r14,_sclp_setup # enable console
@@ -291,14 +283,12 @@ ENTRY(_sclp_print_early)
lhi %r2,1
bras %r14,_sclp_setup # disable console
.LendS5:
-#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa3
lgfr %r2,%r2 # sign extend return value
lmh %r6,%r15,96(%r15) # restore upper register halves
ahi %r15,80
.Lesa3:
-#endif
lm %r6,%r15,120(%r15) # restore registers
br %r14
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5ea8bc17cb3..7262fe438c99 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -92,10 +92,8 @@ EXPORT_SYMBOL(VMALLOC_END);
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);
-#ifdef CONFIG_64BIT
unsigned long MODULES_VADDR;
unsigned long MODULES_END;
-#endif
/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
@@ -334,19 +332,10 @@ static void __init setup_lowcore(void)
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- lc->extended_save_area_addr = (__u32)
- __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
- /* enable extended save area */
- __ctl_set_bit(14, 29);
- }
-#else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
-#endif
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
lc->async_enter_timer = S390_lowcore.async_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
@@ -450,7 +439,6 @@ static void __init setup_memory_end(void)
unsigned long vmax, vmalloc_size, tmp;
/* Choose kernel address space layout: 2, 3, or 4 levels. */
-#ifdef CONFIG_64BIT
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
@@ -462,12 +450,6 @@ static void __init setup_memory_end(void)
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
-#else
- vmalloc_size = VMALLOC_END ?: 96UL << 20;
- vmax = 1UL << 31; /* 2-level kernel page table */
- /* vmalloc area is at the end of the kernel address space. */
- VMALLOC_END = vmax;
-#endif
VMALLOC_START = vmax - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
@@ -754,7 +736,6 @@ static void __init setup_hwcaps(void)
if (MACHINE_HAS_HPAGE)
elf_hwcap |= HWCAP_S390_HPAGE;
-#if defined(CONFIG_64BIT)
/*
* 64-bit register support for 31-bit processes
* HWCAP_S390_HIGH_GPRS is bit 9.
@@ -772,22 +753,15 @@ static void __init setup_hwcaps(void)
*/
if (test_facility(129))
elf_hwcap |= HWCAP_S390_VXRS;
-#endif
-
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
case 0x9672:
-#if !defined(CONFIG_64BIT)
- default: /* Use "g5" as default for 31 bit kernels. */
-#endif
strcpy(elf_platform, "g5");
break;
case 0x2064:
case 0x2066:
-#if defined(CONFIG_64BIT)
default: /* Use "z900" as default for 64 bit kernels. */
-#endif
strcpy(elf_platform, "z900");
break;
case 0x2084:
@@ -839,19 +813,6 @@ void __init setup_arch(char **cmdline_p)
/*
* print what head.S has found out about the machine
*/
-#ifndef CONFIG_64BIT
- if (MACHINE_IS_VM)
- pr_info("Linux is running as a z/VM "
- "guest operating system in 31-bit mode\n");
- else if (MACHINE_IS_LPAR)
- pr_info("Linux is running natively in 31-bit mode\n");
- if (MACHINE_HAS_IEEE)
- pr_info("The hardware system has IEEE compatible "
- "floating point units\n");
- else
- pr_info("The hardware system has no IEEE compatible "
- "floating point units\n");
-#else /* CONFIG_64BIT */
if (MACHINE_IS_VM)
pr_info("Linux is running as a z/VM "
"guest operating system in 64-bit mode\n");
@@ -859,7 +820,6 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
-#endif /* CONFIG_64BIT */
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
@@ -930,35 +890,3 @@ void __init setup_arch(char **cmdline_p)
/* Add system specific data to the random pool */
setup_randomness();
}
-
-#ifdef CONFIG_32BIT
-static int no_removal_warning __initdata;
-
-static int __init parse_no_removal_warning(char *str)
-{
- no_removal_warning = 1;
- return 0;
-}
-__setup("no_removal_warning", parse_no_removal_warning);
-
-static int __init removal_warning(void)
-{
- if (no_removal_warning)
- return 0;
- printk(KERN_ALERT "\n\n");
- printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n");
- printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n");
- printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n");
- printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n");
- printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n");
- printk(KERN_CONT "please let us know. Please write to:\n");
- printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n");
- printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n");
- printk(KERN_CONT "Thank you!\n\n");
- printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n");
- printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n");
- schedule_timeout_uninterruptible(300 * HZ);
- return 0;
-}
-early_initcall(removal_warning);
-#endif
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index b3ae6f70c6d6..c551f22ce066 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -106,7 +106,6 @@ static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
save_fp_ctl(&current->thread.fp_regs.fpc);
-#ifdef CONFIG_64BIT
if (current->thread.vxrs) {
int i;
@@ -115,7 +114,6 @@ static void store_sigregs(void)
current->thread.fp_regs.fprs[i] =
*(freg_t *)(current->thread.vxrs + i);
} else
-#endif
save_fp_regs(current->thread.fp_regs.fprs);
}
@@ -124,7 +122,6 @@ static void load_sigregs(void)
{
restore_access_regs(current->thread.acrs);
/* restore_fp_ctl is done in restore_sigregs */
-#ifdef CONFIG_64BIT
if (current->thread.vxrs) {
int i;
@@ -133,7 +130,6 @@ static void load_sigregs(void)
current->thread.fp_regs.fprs[i];
restore_vx_regs(current->thread.vxrs);
} else
-#endif
restore_fp_regs(current->thread.fp_regs.fprs);
}
@@ -200,7 +196,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
static int save_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
-#ifdef CONFIG_64BIT
__u64 vxrs[__NUM_VXRS_LOW];
int i;
@@ -215,14 +210,12 @@ static int save_sigregs_ext(struct pt_regs *regs,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
-#endif
return 0;
}
static int restore_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
-#ifdef CONFIG_64BIT
__u64 vxrs[__NUM_VXRS_LOW];
int i;
@@ -237,7 +230,6 @@ static int restore_sigregs_ext(struct pt_regs *regs,
for (i = 0; i < __NUM_VXRS_LOW; i++)
*((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
}
-#endif
return 0;
}
@@ -309,16 +301,6 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}
-static inline int map_signal(int sig)
-{
- if (current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32)
- return current_thread_info()->exec_domain->signal_invmap[sig];
- else
- return sig;
-}
-
static int setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs)
{
@@ -386,7 +368,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
- regs->gprs[2] = map_signal(sig);
+ regs->gprs[2] = sig;
regs->gprs[3] = (unsigned long) &frame->sc;
/* We forgot to include these in the sigcontext.
@@ -416,13 +398,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
* included in the signal frame on a 31-bit system.
*/
uc_flags = 0;
-#ifdef CONFIG_64BIT
if (MACHINE_HAS_VX) {
frame_size += sizeof(_sigregs_ext);
if (current->thread.vxrs)
uc_flags |= UC_VXRS;
}
-#endif
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
@@ -468,7 +448,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler | PSW_ADDR_AMODE;
- regs->gprs[2] = map_signal(ksig->sig);
+ regs->gprs[2] = ksig->sig;
regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (unsigned long) &frame->uc;
regs->gprs[5] = task_thread_info(current)->last_break;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index db8f1115a3bf..efd2c1968000 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -198,19 +198,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
- if (!lc->extended_save_area_addr)
- goto out;
- }
-#else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
if (vdso_alloc_per_cpu(lc))
goto out;
-#endif
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
@@ -229,16 +221,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
{
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- struct _lowcore *lc = pcpu->lowcore;
-
- free_page((unsigned long) lc->extended_save_area_addr);
- lc->extended_save_area_addr = 0;
- }
-#else
vdso_free_per_cpu(pcpu->lowcore);
-#endif
if (pcpu == &pcpu_devices[0])
return;
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
@@ -492,22 +475,6 @@ void arch_send_call_function_single_ipi(int cpu)
pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
}
-#ifndef CONFIG_64BIT
-/*
- * this function sends a 'purge tlb' signal to another CPU.
- */
-static void smp_ptlb_callback(void *info)
-{
- __tlb_flush_local();
-}
-
-void smp_ptlb_all(void)
-{
- on_each_cpu(smp_ptlb_callback, NULL, 1);
-}
-EXPORT_SYMBOL(smp_ptlb_all);
-#endif /* ! CONFIG_64BIT */
-
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
@@ -851,7 +818,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
- while (!cpu_online(cpu))
+ /* Wait until cpu puts itself in the online & active maps */
+ while (!cpu_online(cpu) || !cpu_active(cpu))
cpu_relax();
return 0;
}
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index 1c4c5accd220..d3236c9e226b 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
/* Always save lowcore pages (LC protection might be enabled). */
if (pfn <= LC_PAGES)
@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
+ if (pfn >= stext_pfn && pfn <= eshared_pfn)
+ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
if (tprot(PFN_PHYS(pfn)))
return 1;
return 0;
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp.S
index ca6294645dd3..ca6294645dd3 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp.S
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 23eb222c1658..f145490cce54 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -76,7 +76,6 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
return sys_ipc(call, first, second, third, ptr, third);
}
-#ifdef CONFIG_64BIT
SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
unsigned int ret;
@@ -90,51 +89,3 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
return ret;
}
-#endif /* CONFIG_64BIT */
-
-/*
- * Wrapper function for sys_fadvise64/fadvise64_64
- */
-#ifndef CONFIG_64BIT
-
-SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
- size_t, len, int, advice)
-{
- return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
- len, advice);
-}
-
-struct fadvise64_64_args {
- int fd;
- long long offset;
- long long len;
- int advice;
-};
-
-SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
-{
- struct fadvise64_64_args a;
-
- if ( copy_from_user(&a, args, sizeof(a)) )
- return -EFAULT;
- return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
-}
-
-/*
- * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
- * 64 bit argument "len" is split into the upper and lower 32 bits. The
- * system call wrapper in the user space loads the value to %r6/%r7.
- * The code in entry.S keeps the values in %r2 - %r6 where they are and
- * stores %r7 to 96(%r15). But the standard C linkage requires that
- * the whole 64 bit value for len is stored on the stack and doesn't
- * use %r6 at all. So s390_fallocate has to convert the arguments from
- * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len
- * to
- * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
- */
-SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset,
- u32, len_high, u32, len_low)
-{
- return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
-}
-#endif
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 939ec474b1dd..1acad02681c4 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -1,365 +1,365 @@
/*
* definitions for sys_call_table, each line represents an
- * entry in the table in the form
- * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall)
+ * entry in the table in the form
+ * SYSCALL(64 bit syscall, 31 bit emulated syscall)
*
- * this file is meant to be included from entry.S and entry64.S
+ * this file is meant to be included from entry.S
*/
-#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall)
+#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall)
-NI_SYSCALL /* 0 */
-SYSCALL(sys_exit,sys_exit,compat_sys_exit)
-SYSCALL(sys_fork,sys_fork,sys_fork)
-SYSCALL(sys_read,sys_read,compat_sys_s390_read)
-SYSCALL(sys_write,sys_write,compat_sys_s390_write)
-SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */
-SYSCALL(sys_close,sys_close,compat_sys_close)
-SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
-SYSCALL(sys_creat,sys_creat,compat_sys_creat)
-SYSCALL(sys_link,sys_link,compat_sys_link)
-SYSCALL(sys_unlink,sys_unlink,compat_sys_unlink) /* 10 */
-SYSCALL(sys_execve,sys_execve,compat_sys_execve)
-SYSCALL(sys_chdir,sys_chdir,compat_sys_chdir)
-SYSCALL(sys_time,sys_ni_syscall,compat_sys_time) /* old time syscall */
-SYSCALL(sys_mknod,sys_mknod,compat_sys_mknod)
-SYSCALL(sys_chmod,sys_chmod,compat_sys_chmod) /* 15 */
-SYSCALL(sys_lchown16,sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/
-NI_SYSCALL /* old break syscall holder */
-NI_SYSCALL /* old stat syscall holder */
-SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek)
-SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */
-SYSCALL(sys_mount,sys_mount,compat_sys_mount)
-SYSCALL(sys_oldumount,sys_oldumount,compat_sys_oldumount)
-SYSCALL(sys_setuid16,sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
-SYSCALL(sys_getuid16,sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
-SYSCALL(sys_stime,sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
-SYSCALL(sys_ptrace,sys_ptrace,compat_sys_ptrace)
-SYSCALL(sys_alarm,sys_alarm,compat_sys_alarm)
-NI_SYSCALL /* old fstat syscall */
-SYSCALL(sys_pause,sys_pause,sys_pause)
-SYSCALL(sys_utime,sys_utime,compat_sys_utime) /* 30 */
-NI_SYSCALL /* old stty syscall */
-NI_SYSCALL /* old gtty syscall */
-SYSCALL(sys_access,sys_access,compat_sys_access)
-SYSCALL(sys_nice,sys_nice,compat_sys_nice)
-NI_SYSCALL /* 35 old ftime syscall */
-SYSCALL(sys_sync,sys_sync,sys_sync)
-SYSCALL(sys_kill,sys_kill,compat_sys_kill)
-SYSCALL(sys_rename,sys_rename,compat_sys_rename)
-SYSCALL(sys_mkdir,sys_mkdir,compat_sys_mkdir)
-SYSCALL(sys_rmdir,sys_rmdir,compat_sys_rmdir) /* 40 */
-SYSCALL(sys_dup,sys_dup,compat_sys_dup)
-SYSCALL(sys_pipe,sys_pipe,compat_sys_pipe)
-SYSCALL(sys_times,sys_times,compat_sys_times)
-NI_SYSCALL /* old prof syscall */
-SYSCALL(sys_brk,sys_brk,compat_sys_brk) /* 45 */
-SYSCALL(sys_setgid16,sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/
-SYSCALL(sys_getgid16,sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/
-SYSCALL(sys_signal,sys_signal,compat_sys_signal)
-SYSCALL(sys_geteuid16,sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */
-SYSCALL(sys_getegid16,sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */
-SYSCALL(sys_acct,sys_acct,compat_sys_acct)
-SYSCALL(sys_umount,sys_umount,compat_sys_umount)
-NI_SYSCALL /* old lock syscall */
-SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl)
-SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl) /* 55 */
-NI_SYSCALL /* intel mpx syscall */
-SYSCALL(sys_setpgid,sys_setpgid,compat_sys_setpgid)
-NI_SYSCALL /* old ulimit syscall */
-NI_SYSCALL /* old uname syscall */
-SYSCALL(sys_umask,sys_umask,compat_sys_umask) /* 60 */
-SYSCALL(sys_chroot,sys_chroot,compat_sys_chroot)
-SYSCALL(sys_ustat,sys_ustat,compat_sys_ustat)
-SYSCALL(sys_dup2,sys_dup2,compat_sys_dup2)
-SYSCALL(sys_getppid,sys_getppid,sys_getppid)
-SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */
-SYSCALL(sys_setsid,sys_setsid,sys_setsid)
-SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction)
-NI_SYSCALL /* old sgetmask syscall*/
-NI_SYSCALL /* old ssetmask syscall*/
-SYSCALL(sys_setreuid16,sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */
-SYSCALL(sys_setregid16,sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */
-SYSCALL(sys_sigsuspend,sys_sigsuspend,compat_sys_sigsuspend)
-SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending)
-SYSCALL(sys_sethostname,sys_sethostname,compat_sys_sethostname)
-SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit) /* 75 */
-SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit)
-SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage)
-SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday)
-SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday)
-SYSCALL(sys_getgroups16,sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */
-SYSCALL(sys_setgroups16,sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */
-NI_SYSCALL /* old select syscall */
-SYSCALL(sys_symlink,sys_symlink,compat_sys_symlink)
-NI_SYSCALL /* old lstat syscall */
-SYSCALL(sys_readlink,sys_readlink,compat_sys_readlink) /* 85 */
-SYSCALL(sys_uselib,sys_uselib,compat_sys_uselib)
-SYSCALL(sys_swapon,sys_swapon,compat_sys_swapon)
-SYSCALL(sys_reboot,sys_reboot,compat_sys_reboot)
-SYSCALL(sys_ni_syscall,sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */
-SYSCALL(sys_old_mmap,sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
-SYSCALL(sys_munmap,sys_munmap,compat_sys_munmap)
-SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate)
-SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate)
-SYSCALL(sys_fchmod,sys_fchmod,compat_sys_fchmod)
-SYSCALL(sys_fchown16,sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
-SYSCALL(sys_getpriority,sys_getpriority,compat_sys_getpriority)
-SYSCALL(sys_setpriority,sys_setpriority,compat_sys_setpriority)
-NI_SYSCALL /* old profil syscall */
-SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs)
-SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs) /* 100 */
-NI_SYSCALL /* ioperm for i386 */
-SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall)
-SYSCALL(sys_syslog,sys_syslog,compat_sys_syslog)
-SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer)
-SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */
-SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat)
-SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat)
-SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat)
-NI_SYSCALL /* old uname syscall */
-SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */
-SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
-NI_SYSCALL /* old "idle" system call */
-NI_SYSCALL /* vm86old for i386 */
-SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4)
-SYSCALL(sys_swapoff,sys_swapoff,compat_sys_swapoff) /* 115 */
-SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo)
-SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc)
-SYSCALL(sys_fsync,sys_fsync,compat_sys_fsync)
-SYSCALL(sys_sigreturn,sys_sigreturn,compat_sys_sigreturn)
-SYSCALL(sys_clone,sys_clone,compat_sys_clone) /* 120 */
-SYSCALL(sys_setdomainname,sys_setdomainname,compat_sys_setdomainname)
-SYSCALL(sys_newuname,sys_newuname,compat_sys_newuname)
-NI_SYSCALL /* modify_ldt for i386 */
-SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex)
-SYSCALL(sys_mprotect,sys_mprotect,compat_sys_mprotect) /* 125 */
-SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask)
-NI_SYSCALL /* old "create module" */
-SYSCALL(sys_init_module,sys_init_module,compat_sys_init_module)
-SYSCALL(sys_delete_module,sys_delete_module,compat_sys_delete_module)
-NI_SYSCALL /* 130: old get_kernel_syms */
-SYSCALL(sys_quotactl,sys_quotactl,compat_sys_quotactl)
-SYSCALL(sys_getpgid,sys_getpgid,compat_sys_getpgid)
-SYSCALL(sys_fchdir,sys_fchdir,compat_sys_fchdir)
-SYSCALL(sys_bdflush,sys_bdflush,compat_sys_bdflush)
-SYSCALL(sys_sysfs,sys_sysfs,compat_sys_sysfs) /* 135 */
-SYSCALL(sys_personality,sys_s390_personality,compat_sys_s390_personality)
-NI_SYSCALL /* for afs_syscall */
-SYSCALL(sys_setfsuid16,sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
-SYSCALL(sys_setfsgid16,sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
-SYSCALL(sys_llseek,sys_llseek,compat_sys_llseek) /* 140 */
-SYSCALL(sys_getdents,sys_getdents,compat_sys_getdents)
-SYSCALL(sys_select,sys_select,compat_sys_select)
-SYSCALL(sys_flock,sys_flock,compat_sys_flock)
-SYSCALL(sys_msync,sys_msync,compat_sys_msync)
-SYSCALL(sys_readv,sys_readv,compat_sys_readv) /* 145 */
-SYSCALL(sys_writev,sys_writev,compat_sys_writev)
-SYSCALL(sys_getsid,sys_getsid,compat_sys_getsid)
-SYSCALL(sys_fdatasync,sys_fdatasync,compat_sys_fdatasync)
-SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
-SYSCALL(sys_mlock,sys_mlock,compat_sys_mlock) /* 150 */
-SYSCALL(sys_munlock,sys_munlock,compat_sys_munlock)
-SYSCALL(sys_mlockall,sys_mlockall,compat_sys_mlockall)
-SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall)
-SYSCALL(sys_sched_setparam,sys_sched_setparam,compat_sys_sched_setparam)
-SYSCALL(sys_sched_getparam,sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
-SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,compat_sys_sched_setscheduler)
-SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,compat_sys_sched_getscheduler)
-SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
-SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,compat_sys_sched_get_priority_max)
-SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */
-SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
-SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep)
-SYSCALL(sys_mremap,sys_mremap,compat_sys_mremap)
-SYSCALL(sys_setresuid16,sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */
-SYSCALL(sys_getresuid16,sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */
-NI_SYSCALL /* for vm86 */
-NI_SYSCALL /* old sys_query_module */
-SYSCALL(sys_poll,sys_poll,compat_sys_poll)
-NI_SYSCALL /* old nfsservctl */
-SYSCALL(sys_setresgid16,sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */
-SYSCALL(sys_getresgid16,sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */
-SYSCALL(sys_prctl,sys_prctl,compat_sys_prctl)
-SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,compat_sys_rt_sigreturn)
-SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction)
-SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
-SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending)
-SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
-SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
-SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
-SYSCALL(sys_pread64,sys_pread64,compat_sys_s390_pread64) /* 180 */
-SYSCALL(sys_pwrite64,sys_pwrite64,compat_sys_s390_pwrite64)
-SYSCALL(sys_chown16,sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */
-SYSCALL(sys_getcwd,sys_getcwd,compat_sys_getcwd)
-SYSCALL(sys_capget,sys_capget,compat_sys_capget)
-SYSCALL(sys_capset,sys_capset,compat_sys_capset) /* 185 */
-SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack)
-SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile)
-NI_SYSCALL /* streams1 */
-NI_SYSCALL /* streams2 */
-SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */
-SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit)
-SYSCALL(sys_mmap2,sys_mmap2,compat_sys_s390_mmap2)
-SYSCALL(sys_truncate64,sys_ni_syscall,compat_sys_s390_truncate64)
-SYSCALL(sys_ftruncate64,sys_ni_syscall,compat_sys_s390_ftruncate64)
-SYSCALL(sys_stat64,sys_ni_syscall,compat_sys_s390_stat64) /* 195 */
-SYSCALL(sys_lstat64,sys_ni_syscall,compat_sys_s390_lstat64)
-SYSCALL(sys_fstat64,sys_ni_syscall,compat_sys_s390_fstat64)
-SYSCALL(sys_lchown,sys_lchown,compat_sys_lchown)
-SYSCALL(sys_getuid,sys_getuid,sys_getuid)
-SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */
-SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid)
-SYSCALL(sys_getegid,sys_getegid,sys_getegid)
-SYSCALL(sys_setreuid,sys_setreuid,compat_sys_setreuid)
-SYSCALL(sys_setregid,sys_setregid,compat_sys_setregid)
-SYSCALL(sys_getgroups,sys_getgroups,compat_sys_getgroups) /* 205 */
-SYSCALL(sys_setgroups,sys_setgroups,compat_sys_setgroups)
-SYSCALL(sys_fchown,sys_fchown,compat_sys_fchown)
-SYSCALL(sys_setresuid,sys_setresuid,compat_sys_setresuid)
-SYSCALL(sys_getresuid,sys_getresuid,compat_sys_getresuid)
-SYSCALL(sys_setresgid,sys_setresgid,compat_sys_setresgid) /* 210 */
-SYSCALL(sys_getresgid,sys_getresgid,compat_sys_getresgid)
-SYSCALL(sys_chown,sys_chown,compat_sys_chown)
-SYSCALL(sys_setuid,sys_setuid,compat_sys_setuid)
-SYSCALL(sys_setgid,sys_setgid,compat_sys_setgid)
-SYSCALL(sys_setfsuid,sys_setfsuid,compat_sys_setfsuid) /* 215 */
-SYSCALL(sys_setfsgid,sys_setfsgid,compat_sys_setfsgid)
-SYSCALL(sys_pivot_root,sys_pivot_root,compat_sys_pivot_root)
-SYSCALL(sys_mincore,sys_mincore,compat_sys_mincore)
-SYSCALL(sys_madvise,sys_madvise,compat_sys_madvise)
-SYSCALL(sys_getdents64,sys_getdents64,compat_sys_getdents64) /* 220 */
-SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64)
-SYSCALL(sys_readahead,sys_readahead,compat_sys_s390_readahead)
-SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64)
-SYSCALL(sys_setxattr,sys_setxattr,compat_sys_setxattr)
-SYSCALL(sys_lsetxattr,sys_lsetxattr,compat_sys_lsetxattr) /* 225 */
-SYSCALL(sys_fsetxattr,sys_fsetxattr,compat_sys_fsetxattr)
-SYSCALL(sys_getxattr,sys_getxattr,compat_sys_getxattr)
-SYSCALL(sys_lgetxattr,sys_lgetxattr,compat_sys_lgetxattr)
-SYSCALL(sys_fgetxattr,sys_fgetxattr,compat_sys_fgetxattr)
-SYSCALL(sys_listxattr,sys_listxattr,compat_sys_listxattr) /* 230 */
-SYSCALL(sys_llistxattr,sys_llistxattr,compat_sys_llistxattr)
-SYSCALL(sys_flistxattr,sys_flistxattr,compat_sys_flistxattr)
-SYSCALL(sys_removexattr,sys_removexattr,compat_sys_removexattr)
-SYSCALL(sys_lremovexattr,sys_lremovexattr,compat_sys_lremovexattr)
-SYSCALL(sys_fremovexattr,sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
-SYSCALL(sys_gettid,sys_gettid,sys_gettid)
-SYSCALL(sys_tkill,sys_tkill,compat_sys_tkill)
-SYSCALL(sys_futex,sys_futex,compat_sys_futex)
-SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,compat_sys_sched_setaffinity)
-SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
-SYSCALL(sys_tgkill,sys_tgkill,compat_sys_tgkill)
-NI_SYSCALL /* reserved for TUX */
-SYSCALL(sys_io_setup,sys_io_setup,compat_sys_io_setup)
-SYSCALL(sys_io_destroy,sys_io_destroy,compat_sys_io_destroy)
-SYSCALL(sys_io_getevents,sys_io_getevents,compat_sys_io_getevents) /* 245 */
-SYSCALL(sys_io_submit,sys_io_submit,compat_sys_io_submit)
-SYSCALL(sys_io_cancel,sys_io_cancel,compat_sys_io_cancel)
-SYSCALL(sys_exit_group,sys_exit_group,compat_sys_exit_group)
-SYSCALL(sys_epoll_create,sys_epoll_create,compat_sys_epoll_create)
-SYSCALL(sys_epoll_ctl,sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
-SYSCALL(sys_epoll_wait,sys_epoll_wait,compat_sys_epoll_wait)
-SYSCALL(sys_set_tid_address,sys_set_tid_address,compat_sys_set_tid_address)
-SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,compat_sys_s390_fadvise64)
-SYSCALL(sys_timer_create,sys_timer_create,compat_sys_timer_create)
-SYSCALL(sys_timer_settime,sys_timer_settime,compat_sys_timer_settime) /* 255 */
-SYSCALL(sys_timer_gettime,sys_timer_gettime,compat_sys_timer_gettime)
-SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,compat_sys_timer_getoverrun)
-SYSCALL(sys_timer_delete,sys_timer_delete,compat_sys_timer_delete)
-SYSCALL(sys_clock_settime,sys_clock_settime,compat_sys_clock_settime)
-SYSCALL(sys_clock_gettime,sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
-SYSCALL(sys_clock_getres,sys_clock_getres,compat_sys_clock_getres)
-SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,compat_sys_clock_nanosleep)
-NI_SYSCALL /* reserved for vserver */
-SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,compat_sys_s390_fadvise64_64)
-SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64)
-SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64)
-SYSCALL(sys_remap_file_pages,sys_remap_file_pages,compat_sys_remap_file_pages)
-NI_SYSCALL /* 268 sys_mbind */
-NI_SYSCALL /* 269 sys_get_mempolicy */
-NI_SYSCALL /* 270 sys_set_mempolicy */
-SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open)
-SYSCALL(sys_mq_unlink,sys_mq_unlink,compat_sys_mq_unlink)
-SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend)
-SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive)
-SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify) /* 275 */
-SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr)
-SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load)
-SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key)
-SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key)
-SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */
-SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid)
-SYSCALL(sys_ioprio_set,sys_ioprio_set,compat_sys_ioprio_set)
-SYSCALL(sys_ioprio_get,sys_ioprio_get,compat_sys_ioprio_get)
-SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init)
-SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
-SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
-NI_SYSCALL /* 287 sys_migrate_pages */
-SYSCALL(sys_openat,sys_openat,compat_sys_openat)
-SYSCALL(sys_mkdirat,sys_mkdirat,compat_sys_mkdirat)
-SYSCALL(sys_mknodat,sys_mknodat,compat_sys_mknodat) /* 290 */
-SYSCALL(sys_fchownat,sys_fchownat,compat_sys_fchownat)
-SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat)
-SYSCALL(sys_fstatat64,sys_newfstatat,compat_sys_s390_fstatat64)
-SYSCALL(sys_unlinkat,sys_unlinkat,compat_sys_unlinkat)
-SYSCALL(sys_renameat,sys_renameat,compat_sys_renameat) /* 295 */
-SYSCALL(sys_linkat,sys_linkat,compat_sys_linkat)
-SYSCALL(sys_symlinkat,sys_symlinkat,compat_sys_symlinkat)
-SYSCALL(sys_readlinkat,sys_readlinkat,compat_sys_readlinkat)
-SYSCALL(sys_fchmodat,sys_fchmodat,compat_sys_fchmodat)
-SYSCALL(sys_faccessat,sys_faccessat,compat_sys_faccessat) /* 300 */
-SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6)
-SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll)
-SYSCALL(sys_unshare,sys_unshare,compat_sys_unshare)
-SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list)
-SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list)
-SYSCALL(sys_splice,sys_splice,compat_sys_splice)
-SYSCALL(sys_sync_file_range,sys_sync_file_range,compat_sys_s390_sync_file_range)
-SYSCALL(sys_tee,sys_tee,compat_sys_tee)
-SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice)
-NI_SYSCALL /* 310 sys_move_pages */
-SYSCALL(sys_getcpu,sys_getcpu,compat_sys_getcpu)
-SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait)
-SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes)
-SYSCALL(sys_s390_fallocate,sys_fallocate,compat_sys_s390_fallocate)
-SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat) /* 315 */
-SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd)
+NI_SYSCALL /* 0 */
+SYSCALL(sys_exit,compat_sys_exit)
+SYSCALL(sys_fork,sys_fork)
+SYSCALL(sys_read,compat_sys_s390_read)
+SYSCALL(sys_write,compat_sys_s390_write)
+SYSCALL(sys_open,compat_sys_open) /* 5 */
+SYSCALL(sys_close,compat_sys_close)
+SYSCALL(sys_restart_syscall,sys_restart_syscall)
+SYSCALL(sys_creat,compat_sys_creat)
+SYSCALL(sys_link,compat_sys_link)
+SYSCALL(sys_unlink,compat_sys_unlink) /* 10 */
+SYSCALL(sys_execve,compat_sys_execve)
+SYSCALL(sys_chdir,compat_sys_chdir)
+SYSCALL(sys_ni_syscall,compat_sys_time) /* old time syscall */
+SYSCALL(sys_mknod,compat_sys_mknod)
+SYSCALL(sys_chmod,compat_sys_chmod) /* 15 */
+SYSCALL(sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/
+NI_SYSCALL /* old break syscall holder */
+NI_SYSCALL /* old stat syscall holder */
+SYSCALL(sys_lseek,compat_sys_lseek)
+SYSCALL(sys_getpid,sys_getpid) /* 20 */
+SYSCALL(sys_mount,compat_sys_mount)
+SYSCALL(sys_oldumount,compat_sys_oldumount)
+SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
+SYSCALL(sys_ptrace,compat_sys_ptrace)
+SYSCALL(sys_alarm,compat_sys_alarm)
+NI_SYSCALL /* old fstat syscall */
+SYSCALL(sys_pause,sys_pause)
+SYSCALL(sys_utime,compat_sys_utime) /* 30 */
+NI_SYSCALL /* old stty syscall */
+NI_SYSCALL /* old gtty syscall */
+SYSCALL(sys_access,compat_sys_access)
+SYSCALL(sys_nice,compat_sys_nice)
+NI_SYSCALL /* 35 old ftime syscall */
+SYSCALL(sys_sync,sys_sync)
+SYSCALL(sys_kill,compat_sys_kill)
+SYSCALL(sys_rename,compat_sys_rename)
+SYSCALL(sys_mkdir,compat_sys_mkdir)
+SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */
+SYSCALL(sys_dup,compat_sys_dup)
+SYSCALL(sys_pipe,compat_sys_pipe)
+SYSCALL(sys_times,compat_sys_times)
+NI_SYSCALL /* old prof syscall */
+SYSCALL(sys_brk,compat_sys_brk) /* 45 */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/
+SYSCALL(sys_signal,compat_sys_signal)
+SYSCALL(sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */
+SYSCALL(sys_acct,compat_sys_acct)
+SYSCALL(sys_umount,compat_sys_umount)
+NI_SYSCALL /* old lock syscall */
+SYSCALL(sys_ioctl,compat_sys_ioctl)
+SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */
+NI_SYSCALL /* intel mpx syscall */
+SYSCALL(sys_setpgid,compat_sys_setpgid)
+NI_SYSCALL /* old ulimit syscall */
+NI_SYSCALL /* old uname syscall */
+SYSCALL(sys_umask,compat_sys_umask) /* 60 */
+SYSCALL(sys_chroot,compat_sys_chroot)
+SYSCALL(sys_ustat,compat_sys_ustat)
+SYSCALL(sys_dup2,compat_sys_dup2)
+SYSCALL(sys_getppid,sys_getppid)
+SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */
+SYSCALL(sys_setsid,sys_setsid)
+SYSCALL(sys_sigaction,compat_sys_sigaction)
+NI_SYSCALL /* old sgetmask syscall*/
+NI_SYSCALL /* old ssetmask syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */
+SYSCALL(sys_sigsuspend,compat_sys_sigsuspend)
+SYSCALL(sys_sigpending,compat_sys_sigpending)
+SYSCALL(sys_sethostname,compat_sys_sethostname)
+SYSCALL(sys_setrlimit,compat_sys_setrlimit) /* 75 */
+SYSCALL(sys_getrlimit,compat_sys_old_getrlimit)
+SYSCALL(sys_getrusage,compat_sys_getrusage)
+SYSCALL(sys_gettimeofday,compat_sys_gettimeofday)
+SYSCALL(sys_settimeofday,compat_sys_settimeofday)
+SYSCALL(sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */
+NI_SYSCALL /* old select syscall */
+SYSCALL(sys_symlink,compat_sys_symlink)
+NI_SYSCALL /* old lstat syscall */
+SYSCALL(sys_readlink,compat_sys_readlink) /* 85 */
+SYSCALL(sys_uselib,compat_sys_uselib)
+SYSCALL(sys_swapon,compat_sys_swapon)
+SYSCALL(sys_reboot,compat_sys_reboot)
+SYSCALL(sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */
+SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
+SYSCALL(sys_munmap,compat_sys_munmap)
+SYSCALL(sys_truncate,compat_sys_truncate)
+SYSCALL(sys_ftruncate,compat_sys_ftruncate)
+SYSCALL(sys_fchmod,compat_sys_fchmod)
+SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
+SYSCALL(sys_getpriority,compat_sys_getpriority)
+SYSCALL(sys_setpriority,compat_sys_setpriority)
+NI_SYSCALL /* old profil syscall */
+SYSCALL(sys_statfs,compat_sys_statfs)
+SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */
+NI_SYSCALL /* ioperm for i386 */
+SYSCALL(sys_socketcall,compat_sys_socketcall)
+SYSCALL(sys_syslog,compat_sys_syslog)
+SYSCALL(sys_setitimer,compat_sys_setitimer)
+SYSCALL(sys_getitimer,compat_sys_getitimer) /* 105 */
+SYSCALL(sys_newstat,compat_sys_newstat)
+SYSCALL(sys_newlstat,compat_sys_newlstat)
+SYSCALL(sys_newfstat,compat_sys_newfstat)
+NI_SYSCALL /* old uname syscall */
+SYSCALL(sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */
+SYSCALL(sys_vhangup,sys_vhangup)
+NI_SYSCALL /* old "idle" system call */
+NI_SYSCALL /* vm86old for i386 */
+SYSCALL(sys_wait4,compat_sys_wait4)
+SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */
+SYSCALL(sys_sysinfo,compat_sys_sysinfo)
+SYSCALL(sys_s390_ipc,compat_sys_s390_ipc)
+SYSCALL(sys_fsync,compat_sys_fsync)
+SYSCALL(sys_sigreturn,compat_sys_sigreturn)
+SYSCALL(sys_clone,compat_sys_clone) /* 120 */
+SYSCALL(sys_setdomainname,compat_sys_setdomainname)
+SYSCALL(sys_newuname,compat_sys_newuname)
+NI_SYSCALL /* modify_ldt for i386 */
+SYSCALL(sys_adjtimex,compat_sys_adjtimex)
+SYSCALL(sys_mprotect,compat_sys_mprotect) /* 125 */
+SYSCALL(sys_sigprocmask,compat_sys_sigprocmask)
+NI_SYSCALL /* old "create module" */
+SYSCALL(sys_init_module,compat_sys_init_module)
+SYSCALL(sys_delete_module,compat_sys_delete_module)
+NI_SYSCALL /* 130: old get_kernel_syms */
+SYSCALL(sys_quotactl,compat_sys_quotactl)
+SYSCALL(sys_getpgid,compat_sys_getpgid)
+SYSCALL(sys_fchdir,compat_sys_fchdir)
+SYSCALL(sys_bdflush,compat_sys_bdflush)
+SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */
+SYSCALL(sys_s390_personality,compat_sys_s390_personality)
+NI_SYSCALL /* for afs_syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
+SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */
+SYSCALL(sys_getdents,compat_sys_getdents)
+SYSCALL(sys_select,compat_sys_select)
+SYSCALL(sys_flock,compat_sys_flock)
+SYSCALL(sys_msync,compat_sys_msync)
+SYSCALL(sys_readv,compat_sys_readv) /* 145 */
+SYSCALL(sys_writev,compat_sys_writev)
+SYSCALL(sys_getsid,compat_sys_getsid)
+SYSCALL(sys_fdatasync,compat_sys_fdatasync)
+SYSCALL(sys_sysctl,compat_sys_sysctl)
+SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */
+SYSCALL(sys_munlock,compat_sys_munlock)
+SYSCALL(sys_mlockall,compat_sys_mlockall)
+SYSCALL(sys_munlockall,sys_munlockall)
+SYSCALL(sys_sched_setparam,compat_sys_sched_setparam)
+SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
+SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler)
+SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler)
+SYSCALL(sys_sched_yield,sys_sched_yield)
+SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max)
+SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */
+SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
+SYSCALL(sys_nanosleep,compat_sys_nanosleep)
+SYSCALL(sys_mremap,compat_sys_mremap)
+SYSCALL(sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */
+NI_SYSCALL /* for vm86 */
+NI_SYSCALL /* old sys_query_module */
+SYSCALL(sys_poll,compat_sys_poll)
+NI_SYSCALL /* old nfsservctl */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */
+SYSCALL(sys_prctl,compat_sys_prctl)
+SYSCALL(sys_rt_sigreturn,compat_sys_rt_sigreturn)
+SYSCALL(sys_rt_sigaction,compat_sys_rt_sigaction)
+SYSCALL(sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
+SYSCALL(sys_rt_sigpending,compat_sys_rt_sigpending)
+SYSCALL(sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
+SYSCALL(sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
+SYSCALL(sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
+SYSCALL(sys_pread64,compat_sys_s390_pread64) /* 180 */
+SYSCALL(sys_pwrite64,compat_sys_s390_pwrite64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */
+SYSCALL(sys_getcwd,compat_sys_getcwd)
+SYSCALL(sys_capget,compat_sys_capget)
+SYSCALL(sys_capset,compat_sys_capset) /* 185 */
+SYSCALL(sys_sigaltstack,compat_sys_sigaltstack)
+SYSCALL(sys_sendfile64,compat_sys_sendfile)
+NI_SYSCALL /* streams1 */
+NI_SYSCALL /* streams2 */
+SYSCALL(sys_vfork,sys_vfork) /* 190 */
+SYSCALL(sys_getrlimit,compat_sys_getrlimit)
+SYSCALL(sys_mmap2,compat_sys_s390_mmap2)
+SYSCALL(sys_ni_syscall,compat_sys_s390_truncate64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_ftruncate64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_stat64) /* 195 */
+SYSCALL(sys_ni_syscall,compat_sys_s390_lstat64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_fstat64)
+SYSCALL(sys_lchown,compat_sys_lchown)
+SYSCALL(sys_getuid,sys_getuid)
+SYSCALL(sys_getgid,sys_getgid) /* 200 */
+SYSCALL(sys_geteuid,sys_geteuid)
+SYSCALL(sys_getegid,sys_getegid)
+SYSCALL(sys_setreuid,compat_sys_setreuid)
+SYSCALL(sys_setregid,compat_sys_setregid)
+SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */
+SYSCALL(sys_setgroups,compat_sys_setgroups)
+SYSCALL(sys_fchown,compat_sys_fchown)
+SYSCALL(sys_setresuid,compat_sys_setresuid)
+SYSCALL(sys_getresuid,compat_sys_getresuid)
+SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */
+SYSCALL(sys_getresgid,compat_sys_getresgid)
+SYSCALL(sys_chown,compat_sys_chown)
+SYSCALL(sys_setuid,compat_sys_setuid)
+SYSCALL(sys_setgid,compat_sys_setgid)
+SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */
+SYSCALL(sys_setfsgid,compat_sys_setfsgid)
+SYSCALL(sys_pivot_root,compat_sys_pivot_root)
+SYSCALL(sys_mincore,compat_sys_mincore)
+SYSCALL(sys_madvise,compat_sys_madvise)
+SYSCALL(sys_getdents64,compat_sys_getdents64) /* 220 */
+SYSCALL(sys_ni_syscall,compat_sys_fcntl64)
+SYSCALL(sys_readahead,compat_sys_s390_readahead)
+SYSCALL(sys_ni_syscall,compat_sys_sendfile64)
+SYSCALL(sys_setxattr,compat_sys_setxattr)
+SYSCALL(sys_lsetxattr,compat_sys_lsetxattr) /* 225 */
+SYSCALL(sys_fsetxattr,compat_sys_fsetxattr)
+SYSCALL(sys_getxattr,compat_sys_getxattr)
+SYSCALL(sys_lgetxattr,compat_sys_lgetxattr)
+SYSCALL(sys_fgetxattr,compat_sys_fgetxattr)
+SYSCALL(sys_listxattr,compat_sys_listxattr) /* 230 */
+SYSCALL(sys_llistxattr,compat_sys_llistxattr)
+SYSCALL(sys_flistxattr,compat_sys_flistxattr)
+SYSCALL(sys_removexattr,compat_sys_removexattr)
+SYSCALL(sys_lremovexattr,compat_sys_lremovexattr)
+SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
+SYSCALL(sys_gettid,sys_gettid)
+SYSCALL(sys_tkill,compat_sys_tkill)
+SYSCALL(sys_futex,compat_sys_futex)
+SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity)
+SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
+SYSCALL(sys_tgkill,compat_sys_tgkill)
+NI_SYSCALL /* reserved for TUX */
+SYSCALL(sys_io_setup,compat_sys_io_setup)
+SYSCALL(sys_io_destroy,compat_sys_io_destroy)
+SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */
+SYSCALL(sys_io_submit,compat_sys_io_submit)
+SYSCALL(sys_io_cancel,compat_sys_io_cancel)
+SYSCALL(sys_exit_group,compat_sys_exit_group)
+SYSCALL(sys_epoll_create,compat_sys_epoll_create)
+SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
+SYSCALL(sys_epoll_wait,compat_sys_epoll_wait)
+SYSCALL(sys_set_tid_address,compat_sys_set_tid_address)
+SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64)
+SYSCALL(sys_timer_create,compat_sys_timer_create)
+SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */
+SYSCALL(sys_timer_gettime,compat_sys_timer_gettime)
+SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun)
+SYSCALL(sys_timer_delete,compat_sys_timer_delete)
+SYSCALL(sys_clock_settime,compat_sys_clock_settime)
+SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
+SYSCALL(sys_clock_getres,compat_sys_clock_getres)
+SYSCALL(sys_clock_nanosleep,compat_sys_clock_nanosleep)
+NI_SYSCALL /* reserved for vserver */
+SYSCALL(sys_ni_syscall,compat_sys_s390_fadvise64_64)
+SYSCALL(sys_statfs64,compat_sys_statfs64)
+SYSCALL(sys_fstatfs64,compat_sys_fstatfs64)
+SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages)
+NI_SYSCALL /* 268 sys_mbind */
+NI_SYSCALL /* 269 sys_get_mempolicy */
+NI_SYSCALL /* 270 sys_set_mempolicy */
+SYSCALL(sys_mq_open,compat_sys_mq_open)
+SYSCALL(sys_mq_unlink,compat_sys_mq_unlink)
+SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend)
+SYSCALL(sys_mq_timedreceive,compat_sys_mq_timedreceive)
+SYSCALL(sys_mq_notify,compat_sys_mq_notify) /* 275 */
+SYSCALL(sys_mq_getsetattr,compat_sys_mq_getsetattr)
+SYSCALL(sys_kexec_load,compat_sys_kexec_load)
+SYSCALL(sys_add_key,compat_sys_add_key)
+SYSCALL(sys_request_key,compat_sys_request_key)
+SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */
+SYSCALL(sys_waitid,compat_sys_waitid)
+SYSCALL(sys_ioprio_set,compat_sys_ioprio_set)
+SYSCALL(sys_ioprio_get,compat_sys_ioprio_get)
+SYSCALL(sys_inotify_init,sys_inotify_init)
+SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
+SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
+NI_SYSCALL /* 287 sys_migrate_pages */
+SYSCALL(sys_openat,compat_sys_openat)
+SYSCALL(sys_mkdirat,compat_sys_mkdirat)
+SYSCALL(sys_mknodat,compat_sys_mknodat) /* 290 */
+SYSCALL(sys_fchownat,compat_sys_fchownat)
+SYSCALL(sys_futimesat,compat_sys_futimesat)
+SYSCALL(sys_newfstatat,compat_sys_s390_fstatat64)
+SYSCALL(sys_unlinkat,compat_sys_unlinkat)
+SYSCALL(sys_renameat,compat_sys_renameat) /* 295 */
+SYSCALL(sys_linkat,compat_sys_linkat)
+SYSCALL(sys_symlinkat,compat_sys_symlinkat)
+SYSCALL(sys_readlinkat,compat_sys_readlinkat)
+SYSCALL(sys_fchmodat,compat_sys_fchmodat)
+SYSCALL(sys_faccessat,compat_sys_faccessat) /* 300 */
+SYSCALL(sys_pselect6,compat_sys_pselect6)
+SYSCALL(sys_ppoll,compat_sys_ppoll)
+SYSCALL(sys_unshare,compat_sys_unshare)
+SYSCALL(sys_set_robust_list,compat_sys_set_robust_list)
+SYSCALL(sys_get_robust_list,compat_sys_get_robust_list)
+SYSCALL(sys_splice,compat_sys_splice)
+SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range)
+SYSCALL(sys_tee,compat_sys_tee)
+SYSCALL(sys_vmsplice,compat_sys_vmsplice)
+NI_SYSCALL /* 310 sys_move_pages */
+SYSCALL(sys_getcpu,compat_sys_getcpu)
+SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait)
+SYSCALL(sys_utimes,compat_sys_utimes)
+SYSCALL(sys_fallocate,compat_sys_s390_fallocate)
+SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */
+SYSCALL(sys_signalfd,compat_sys_signalfd)
NI_SYSCALL /* 317 old sys_timer_fd */
-SYSCALL(sys_eventfd,sys_eventfd,compat_sys_eventfd)
-SYSCALL(sys_timerfd_create,sys_timerfd_create,compat_sys_timerfd_create)
-SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
-SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime)
-SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4)
-SYSCALL(sys_eventfd2,sys_eventfd2,compat_sys_eventfd2)
-SYSCALL(sys_inotify_init1,sys_inotify_init1,compat_sys_inotify_init1)
-SYSCALL(sys_pipe2,sys_pipe2,compat_sys_pipe2) /* 325 */
-SYSCALL(sys_dup3,sys_dup3,compat_sys_dup3)
-SYSCALL(sys_epoll_create1,sys_epoll_create1,compat_sys_epoll_create1)
-SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv)
-SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev)
-SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
-SYSCALL(sys_perf_event_open,sys_perf_event_open,compat_sys_perf_event_open)
-SYSCALL(sys_fanotify_init,sys_fanotify_init,compat_sys_fanotify_init)
-SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
-SYSCALL(sys_prlimit64,sys_prlimit64,compat_sys_prlimit64)
-SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
-SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
-SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime)
-SYSCALL(sys_syncfs,sys_syncfs,compat_sys_syncfs)
-SYSCALL(sys_setns,sys_setns,compat_sys_setns)
-SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
-SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev)
-SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,compat_sys_s390_runtime_instr)
-SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp)
-SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module)
-SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
-SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr)
-SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2)
-SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
-SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
-SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
-SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
-SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
-SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
-SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
+SYSCALL(sys_eventfd,compat_sys_eventfd)
+SYSCALL(sys_timerfd_create,compat_sys_timerfd_create)
+SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
+SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime)
+SYSCALL(sys_signalfd4,compat_sys_signalfd4)
+SYSCALL(sys_eventfd2,compat_sys_eventfd2)
+SYSCALL(sys_inotify_init1,compat_sys_inotify_init1)
+SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */
+SYSCALL(sys_dup3,compat_sys_dup3)
+SYSCALL(sys_epoll_create1,compat_sys_epoll_create1)
+SYSCALL(sys_preadv,compat_sys_preadv)
+SYSCALL(sys_pwritev,compat_sys_pwritev)
+SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
+SYSCALL(sys_perf_event_open,compat_sys_perf_event_open)
+SYSCALL(sys_fanotify_init,compat_sys_fanotify_init)
+SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark)
+SYSCALL(sys_prlimit64,compat_sys_prlimit64)
+SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
+SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at)
+SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime)
+SYSCALL(sys_syncfs,compat_sys_syncfs)
+SYSCALL(sys_setns,compat_sys_setns)
+SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
+SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev)
+SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr)
+SYSCALL(sys_kcmp,compat_sys_kcmp)
+SYSCALL(sys_finit_module,compat_sys_finit_module)
+SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
+SYSCALL(sys_sched_getattr,compat_sys_sched_getattr)
+SYSCALL(sys_renameat2,compat_sys_renameat2)
+SYSCALL(sys_seccomp,compat_sys_seccomp)
+SYSCALL(sys_getrandom,compat_sys_getrandom)
+SYSCALL(sys_memfd_create,compat_sys_memfd_create) /* 350 */
+SYSCALL(sys_bpf,compat_sys_bpf)
+SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
+SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,compat_sys_execveat)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 14da43b801d9..5728c5bd44a8 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -421,7 +421,7 @@ int topology_cpu_init(struct cpu *cpu)
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
}
-const struct cpumask *cpu_thread_mask(int cpu)
+static const struct cpumask *cpu_thread_mask(int cpu)
{
return &per_cpu(cpu_topology, cpu).thread_mask;
}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f081cf1157c3..4d96c9f53455 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -26,7 +26,6 @@ int show_unhandled_signals = 1;
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
-#ifdef CONFIG_64BIT
unsigned long address;
if (regs->int_code & 0x200)
@@ -35,10 +34,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
address = regs->psw.addr;
return (void __user *)
((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
-#else
- return (void __user *)
- ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
-#endif
}
static inline void report_user_fault(struct pt_regs *regs, int signr)
@@ -153,11 +148,8 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
"privileged operation")
DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
"special operation exception")
-
-#ifdef CONFIG_64BIT
DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
"transaction constraint exception")
-#endif
static inline void do_fp_trap(struct pt_regs *regs, int fpc)
{
@@ -182,7 +174,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
void translation_exception(struct pt_regs *regs)
{
/* May never happen. */
- die(regs, "Translation exception");
+ panic("Translation exception");
}
void illegal_op(struct pt_regs *regs)
@@ -211,29 +203,6 @@ void illegal_op(struct pt_regs *regs)
} else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
is_uprobe_insn = 1;
#endif
-#ifdef CONFIG_MATHEMU
- } else if (opcode[0] == 0xb3) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_b3(opcode, regs);
- } else if (opcode[0] == 0xed) {
- if (get_user(*((__u32 *) (opcode+2)),
- (__u32 __user *)(location+1)))
- return;
- signal = math_emu_ed(opcode, regs);
- } else if (*((__u16 *) opcode) == 0xb299) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_srnm(opcode, regs);
- } else if (*((__u16 *) opcode) == 0xb29c) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_stfpc(opcode, regs);
- } else if (*((__u16 *) opcode) == 0xb29d) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_lfpc(opcode, regs);
-#endif
} else
signal = SIGILL;
}
@@ -247,71 +216,14 @@ void illegal_op(struct pt_regs *regs)
3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
}
-
-#ifdef CONFIG_MATHEMU
- if (signal == SIGFPE)
- do_fp_trap(regs, current->thread.fp_regs.fpc);
- else if (signal == SIGSEGV)
- do_trap(regs, signal, SEGV_MAPERR, "user address fault");
- else
-#endif
if (signal)
do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
}
NOKPROBE_SYMBOL(illegal_op);
-#ifdef CONFIG_MATHEMU
-void specification_exception(struct pt_regs *regs)
-{
- __u8 opcode[6];
- __u16 __user *location = NULL;
- int signal = 0;
-
- location = (__u16 __user *) get_trap_ip(regs);
-
- if (user_mode(regs)) {
- get_user(*((__u16 *) opcode), location);
- switch (opcode[0]) {
- case 0x28: /* LDR Rx,Ry */
- signal = math_emu_ldr(opcode);
- break;
- case 0x38: /* LER Rx,Ry */
- signal = math_emu_ler(opcode);
- break;
- case 0x60: /* STD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_std(opcode, regs);
- break;
- case 0x68: /* LD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ld(opcode, regs);
- break;
- case 0x70: /* STE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ste(opcode, regs);
- break;
- case 0x78: /* LE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_le(opcode, regs);
- break;
- default:
- signal = SIGILL;
- break;
- }
- } else
- signal = SIGILL;
-
- if (signal == SIGFPE)
- do_fp_trap(regs, current->thread.fp_regs.fpc);
- else if (signal)
- do_trap(regs, signal, ILL_ILLOPN, "specification exception");
-}
-#else
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
-#endif
-#ifdef CONFIG_64BIT
int alloc_vector_registers(struct task_struct *tsk)
{
__vector128 *vxrs;
@@ -377,7 +289,6 @@ static int __init disable_vector_extension(char *str)
return 1;
}
__setup("novx", disable_vector_extension);
-#endif
void data_exception(struct pt_regs *regs)
{
@@ -386,65 +297,7 @@ void data_exception(struct pt_regs *regs)
location = get_trap_ip(regs);
- if (MACHINE_HAS_IEEE)
- asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
-
-#ifdef CONFIG_MATHEMU
- else if (user_mode(regs)) {
- __u8 opcode[6];
- get_user(*((__u16 *) opcode), location);
- switch (opcode[0]) {
- case 0x28: /* LDR Rx,Ry */
- signal = math_emu_ldr(opcode);
- break;
- case 0x38: /* LER Rx,Ry */
- signal = math_emu_ler(opcode);
- break;
- case 0x60: /* STD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_std(opcode, regs);
- break;
- case 0x68: /* LD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ld(opcode, regs);
- break;
- case 0x70: /* STE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ste(opcode, regs);
- break;
- case 0x78: /* LE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_le(opcode, regs);
- break;
- case 0xb3:
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_b3(opcode, regs);
- break;
- case 0xed:
- get_user(*((__u32 *) (opcode+2)),
- (__u32 __user *)(location+1));
- signal = math_emu_ed(opcode, regs);
- break;
- case 0xb2:
- if (opcode[1] == 0x99) {
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_srnm(opcode, regs);
- } else if (opcode[1] == 0x9c) {
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_stfpc(opcode, regs);
- } else if (opcode[1] == 0x9d) {
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_lfpc(opcode, regs);
- } else
- signal = SIGILL;
- break;
- default:
- signal = SIGILL;
- break;
- }
- }
-#endif
-#ifdef CONFIG_64BIT
+ asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !current->thread.vxrs &&
(current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
@@ -454,13 +307,11 @@ void data_exception(struct pt_regs *regs)
clear_pt_regs_flag(regs, PIF_PER_TRAP);
return;
}
-#endif
-
if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
signal = SIGFPE;
else
signal = SIGILL;
- if (signal == SIGFPE)
+ if (signal == SIGFPE)
do_fp_trap(regs, current->thread.fp_regs.fpc);
else if (signal)
do_trap(regs, signal, ILL_ILLOPN, "data exception");
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index cc7328080b60..66956c09d5bf 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -188,7 +188,9 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
if (__rc == 0) \
- sim_stor_event(regs, __ptr, mask + 1); \
+ sim_stor_event(regs, \
+ (void __force *)__ptr, \
+ mask + 1); \
__rc; \
})
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 0bbb7e027c5a..0d58269ff425 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -32,19 +32,17 @@
#include <asm/vdso.h>
#include <asm/facility.h>
-#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
static struct page **vdso32_pagelist;
#endif
-#ifdef CONFIG_64BIT
extern char vdso64_start, vdso64_end;
static void *vdso64_kbase = &vdso64_start;
static unsigned int vdso64_pages;
static struct page **vdso64_pagelist;
-#endif /* CONFIG_64BIT */
/*
* Should the kernel map a VDSO page into processes and pass its
@@ -87,7 +85,6 @@ static void vdso_init_data(struct vdso_data *vd)
vd->ectg_available = test_facility(31);
}
-#ifdef CONFIG_64BIT
/*
* Allocate/free per cpu vdso data.
*/
@@ -169,7 +166,6 @@ static void vdso_init_cr5(void)
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
}
-#endif /* CONFIG_64BIT */
/*
* This is called from binfmt_elf, we create the special vma for the
@@ -191,7 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!uses_interp)
return 0;
-#ifdef CONFIG_64BIT
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
@@ -200,11 +195,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_pages = vdso32_pages;
}
#endif
-#else
- vdso_pagelist = vdso32_pagelist;
- vdso_pages = vdso32_pages;
-#endif
-
/*
* vDSO has a problem and was disabled, just don't "enable" it for
* the process
@@ -268,7 +258,7 @@ static int __init vdso_init(void)
if (!vdso_enabled)
return 0;
vdso_init_data(vdso_data);
-#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -287,7 +277,6 @@ static int __init vdso_init(void)
vdso32_pagelist[vdso32_pages] = NULL;
#endif
-#ifdef CONFIG_64BIT
/* Calculate the size of the 64 bit vDSO */
vdso64_pages = ((&vdso64_end - &vdso64_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -307,7 +296,6 @@ static int __init vdso_init(void)
if (vdso_alloc_per_cpu(&S390_lowcore))
BUG();
vdso_init_cr5();
-#endif /* CONFIG_64BIT */
get_page(virt_to_page(vdso_data));
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 35b13ed0af5f..445657fe658c 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -6,17 +6,10 @@
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
-#ifndef CONFIG_64BIT
-OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390:31-bit)
-ENTRY(startup)
-jiffies = jiffies_64 + 4;
-#else
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
ENTRY(startup)
jiffies = jiffies_64;
-#endif
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index afa2bd750ffc..8cd8e7b288c5 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -110,7 +110,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[] = {
0xffe6fffbfcfdfc40UL,
- 0x205c800000000000UL,
+ 0x005c800000000000UL,
};
unsigned long kvm_s390_fac_list_mask_size(void)
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 653a7ec09ef5..3208d33a48cb 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -10,6 +10,13 @@
#define TRACE_INCLUDE_FILE trace-s390
/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR kvm_s390
+
+/*
* Trace point for the creation of the kvm instance.
*/
TRACE_EVENT(kvm_s390_create_vm,
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index a01df233856f..0e8fefe5b0ce 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,8 +3,7 @@
#
lib-y += delay.o string.o uaccess.o find.o
-obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
-obj-$(CONFIG_64BIT) += mem64.o
+obj-y += mem.o
lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c
deleted file mode 100644
index 261152f83242..000000000000
--- a/arch/s390/lib/div64.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * __div64_32 implementation for 31 bit.
- *
- * Copyright IBM Corp. 2006
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-
-#ifdef CONFIG_MARCH_G5
-
-/*
- * Function to divide an unsigned 64 bit integer by an unsigned
- * 31 bit integer using signed 64/32 bit division.
- */
-static uint32_t __div64_31(uint64_t *n, uint32_t base)
-{
- register uint32_t reg2 asm("2");
- register uint32_t reg3 asm("3");
- uint32_t *words = (uint32_t *) n;
- uint32_t tmp;
-
- /* Special case base==1, remainder = 0, quotient = n */
- if (base == 1)
- return 0;
- /*
- * Special case base==0 will cause a fixed point divide exception
- * on the dr instruction and may not happen anyway. For the
- * following calculation we can assume base > 1. The first
- * signed 64 / 32 bit division with an upper half of 0 will
- * give the correct upper half of the 64 bit quotient.
- */
- reg2 = 0UL;
- reg3 = words[0];
- asm volatile(
- " dr %0,%2\n"
- : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
- words[0] = reg3;
- reg3 = words[1];
- /*
- * To get the lower half of the 64 bit quotient and the 32 bit
- * remainder we have to use a little trick. Since we only have
- * a signed division the quotient can get too big. To avoid this
- * the 64 bit dividend is halved, then the signed division will
- * work. Afterwards the quotient and the remainder are doubled.
- * If the last bit of the dividend has been one the remainder
- * is increased by one then checked against the base. If the
- * remainder has overflown subtract base and increase the
- * quotient. Simple, no ?
- */
- asm volatile(
- " nr %2,%1\n"
- " srdl %0,1\n"
- " dr %0,%3\n"
- " alr %0,%0\n"
- " alr %1,%1\n"
- " alr %0,%2\n"
- " clr %0,%3\n"
- " jl 0f\n"
- " slr %0,%3\n"
- " ahi %1,1\n"
- "0:\n"
- : "+d" (reg2), "+d" (reg3), "=d" (tmp)
- : "d" (base), "2" (1UL) : "cc" );
- words[1] = reg3;
- return reg2;
-}
-
-/*
- * Function to divide an unsigned 64 bit integer by an unsigned
- * 32 bit integer using the unsigned 64/31 bit division.
- */
-uint32_t __div64_32(uint64_t *n, uint32_t base)
-{
- uint32_t r;
-
- /*
- * If the most significant bit of base is set, divide n by
- * (base/2). That allows to use 64/31 bit division and gives a
- * good approximation of the result: n = (base/2)*q + r. The
- * result needs to be corrected with two simple transformations.
- * If base is already < 2^31-1 __div64_31 can be used directly.
- */
- r = __div64_31(n, ((signed) base < 0) ? (base/2) : base);
- if ((signed) base < 0) {
- uint64_t q = *n;
- /*
- * First transformation:
- * n = (base/2)*q + r
- * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r
- * Since r < (base/2), r + (base/2) < base.
- * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0)
- * n = ((base/2)*2)*q1 + r1 with r1 < base.
- */
- if (q & 1)
- r += base/2;
- q >>= 1;
- /*
- * Second transformation. ((base/2)*2) could have lost the
- * last bit.
- * n = ((base/2)*2)*q1 + r1
- * = base*q1 - ((base&1) ? q1 : 0) + r1
- */
- if (base & 1) {
- int64_t rx = r - q;
- /*
- * base is >= 2^31. The worst case for the while
- * loop is n=2^64-1 base=2^31+1. That gives a
- * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since
- * base >= 2^31 the loop is finished after a maximum
- * of three iterations.
- */
- while (rx < 0) {
- rx += base;
- q--;
- }
- r = rx;
- }
- *n = q;
- }
- return r;
-}
-
-#else /* MARCH_G5 */
-
-uint32_t __div64_32(uint64_t *n, uint32_t base)
-{
- register uint32_t reg2 asm("2");
- register uint32_t reg3 asm("3");
- uint32_t *words = (uint32_t *) n;
-
- reg2 = 0UL;
- reg3 = words[0];
- asm volatile(
- " dlr %0,%2\n"
- : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
- words[0] = reg3;
- reg3 = words[1];
- asm volatile(
- " dlr %0,%2\n"
- : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
- words[1] = reg3;
- return reg2;
-}
-
-#endif /* MARCH_G5 */
diff --git a/arch/s390/lib/mem64.S b/arch/s390/lib/mem.S
index c6d553e85ab1..c6d553e85ab1 100644
--- a/arch/s390/lib/mem64.S
+++ b/arch/s390/lib/mem.S
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S
deleted file mode 100644
index 14ca9244b615..000000000000
--- a/arch/s390/lib/mem32.S
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * String handling functions.
- *
- * Copyright IBM Corp. 2012
- */
-
-#include <linux/linkage.h>
-
-/*
- * memset implementation
- *
- * This code corresponds to the C construct below. We do distinguish
- * between clearing (c == 0) and setting a memory array (c != 0) simply
- * because nearly all memset invocations in the kernel clear memory and
- * the xc instruction is preferred in such cases.
- *
- * void *memset(void *s, int c, size_t n)
- * {
- * if (likely(c == 0))
- * return __builtin_memset(s, 0, n);
- * return __builtin_memset(s, c, n);
- * }
- */
-ENTRY(memset)
- basr %r5,%r0
-.Lmemset_base:
- ltr %r4,%r4
- bzr %r14
- ltr %r3,%r3
- jnz .Lmemset_fill
- ahi %r4,-1
- lr %r3,%r4
- srl %r3,8
- ltr %r3,%r3
- lr %r1,%r2
- je .Lmemset_clear_rest
-.Lmemset_clear_loop:
- xc 0(256,%r1),0(%r1)
- la %r1,256(%r1)
- brct %r3,.Lmemset_clear_loop
-.Lmemset_clear_rest:
- ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
- br %r14
-.Lmemset_fill:
- stc %r3,0(%r2)
- chi %r4,1
- lr %r1,%r2
- ber %r14
- ahi %r4,-2
- lr %r3,%r4
- srl %r3,8
- ltr %r3,%r3
- je .Lmemset_fill_rest
-.Lmemset_fill_loop:
- mvc 1(256,%r1),0(%r1)
- la %r1,256(%r1)
- brct %r3,.Lmemset_fill_loop
-.Lmemset_fill_rest:
- ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
- br %r14
-.Lmemset_xc:
- xc 0(1,%r1),0(%r1)
-.Lmemset_mvc:
- mvc 1(1,%r1),0(%r1)
-
-/*
- * memcpy implementation
- *
- * void *memcpy(void *dest, const void *src, size_t n)
- */
-ENTRY(memcpy)
- basr %r5,%r0
-.Lmemcpy_base:
- ltr %r4,%r4
- bzr %r14
- ahi %r4,-1
- lr %r0,%r4
- srl %r0,8
- ltr %r0,%r0
- lr %r1,%r2
- jnz .Lmemcpy_loop
-.Lmemcpy_rest:
- ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
- br %r14
-.Lmemcpy_loop:
- mvc 0(256,%r1),0(%r3)
- la %r1,256(%r1)
- la %r3,256(%r3)
- brct %r0,.Lmemcpy_loop
- j .Lmemcpy_rest
-.Lmemcpy_mvc:
- mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
deleted file mode 100644
index d321329130ec..000000000000
--- a/arch/s390/lib/qrnnd.S
+++ /dev/null
@@ -1,78 +0,0 @@
-# S/390 __udiv_qrnnd
-
-#include <linux/linkage.h>
-
-# r2 : &__r
-# r3 : upper half of 64 bit word n
-# r4 : lower half of 64 bit word n
-# r5 : divisor d
-# the reminder r of the division is to be stored to &__r and
-# the quotient q is to be returned
-
- .text
-ENTRY(__udiv_qrnnd)
- st %r2,24(%r15) # store pointer to reminder for later
- lr %r0,%r3 # reload n
- lr %r1,%r4
- ltr %r2,%r5 # reload and test divisor
- jp 5f
- # divisor >= 0x80000000
- srdl %r0,2 # n/4
- srl %r2,1 # d/2
- slr %r1,%r2 # special case if last bit of d is set
- brc 3,0f # (n/4) div (n/2) can overflow by 1
- ahi %r0,-1 # trick: subtract n/2, then divide
-0: dr %r0,%r2 # signed division
- ahi %r1,1 # trick part 2: add 1 to the quotient
- # now (n >> 2) = (d >> 1) * %r1 + %r0
- lhi %r3,1
- nr %r3,%r1 # test last bit of q
- jz 1f
- alr %r0,%r2 # add (d>>1) to r
-1: srl %r1,1 # q >>= 1
- # now (n >> 2) = (d&-2) * %r1 + %r0
- lhi %r3,1
- nr %r3,%r5 # test last bit of d
- jz 2f
- slr %r0,%r1 # r -= q
- brc 3,2f # borrow ?
- alr %r0,%r5 # r += d
- ahi %r1,-1
-2: # now (n >> 2) = d * %r1 + %r0
- alr %r1,%r1 # q <<= 1
- alr %r0,%r0 # r <<= 1
- brc 12,3f # overflow on r ?
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
-3: lhi %r3,2
- nr %r3,%r4 # test next to last bit of n
- jz 4f
- ahi %r0,1 # r += 1
-4: clr %r0,%r5 # r >= d ?
- jl 6f
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
- # now (n >> 1) = d * %r1 + %r0
- j 6f
-5: # divisor < 0x80000000
- srdl %r0,1
- dr %r0,%r2 # signed division
- # now (n >> 1) = d * %r1 + %r0
-6: alr %r1,%r1 # q <<= 1
- alr %r0,%r0 # r <<= 1
- brc 12,7f # overflow on r ?
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
-7: lhi %r3,1
- nr %r3,%r4 # isolate last bit of n
- alr %r0,%r3 # r += (n & 1)
- clr %r0,%r5 # r >= d ?
- jl 8f
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
-8: # now n = d * %r1 + %r0
- l %r2,24(%r15)
- st %r0,0(%r2)
- lr %r2,%r1
- br %r14
- .end __udiv_qrnnd
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 53dd5d7a0c96..4614d415bb58 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,20 +15,6 @@
#include <asm/mmu_context.h>
#include <asm/facility.h>
-#ifndef CONFIG_64BIT
-#define AHI "ahi"
-#define ALR "alr"
-#define CLR "clr"
-#define LHI "lhi"
-#define SLR "slr"
-#else
-#define AHI "aghi"
-#define ALR "algr"
-#define CLR "clgr"
-#define LHI "lghi"
-#define SLR "slgr"
-#endif
-
static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
@@ -41,29 +27,29 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"9: jz 7f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
+ "1: algr %0,%3\n"
+ " slgr %1,%3\n"
+ " slgr %2,%3\n"
" j 0b\n"
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 4f\n"
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
- "10:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "4:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ "10:slgr %0,%4\n"
+ " algr %2,%4\n"
+ "4: lghi %4,-1\n"
+ " algr %4,%0\n" /* copy remaining size, subtract 1 */
" bras %3,6f\n" /* memset loop */
" xc 0(1,%2),0(%2)\n"
"5: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
- "6:"AHI" %4,-256\n"
+ "6: aghi %4,-256\n"
" jnm 5b\n"
" ex %4,0(%3)\n"
" j 8f\n"
- "7:"SLR" %0,%0\n"
+ "7:slgr %0,%0\n"
"8:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
@@ -82,32 +68,32 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
" sacf 0\n"
"0: mvcp 0(%0,%2),0(%1),%3\n"
"10:jz 8f\n"
- "1:"ALR" %0,%3\n"
+ "1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%3\n"
"11:jnz 1b\n"
" j 8f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
+ " lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"4: mvcp 0(%4,%2),0(%1),%3\n"
- "12:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "5:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ "12:slgr %0,%4\n"
+ " algr %2,%4\n"
+ "5: lghi %4,-1\n"
+ " algr %4,%0\n" /* copy remaining size, subtract 1 */
" bras %3,7f\n" /* memset loop */
" xc 0(1,%2),0(%2)\n"
"6: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
- "7:"AHI" %4,-256\n"
+ "7: aghi %4,-256\n"
" jnm 6b\n"
" ex %4,0(%3)\n"
" j 9f\n"
- "8:"SLR" %0,%0\n"
+ "8:slgr %0,%0\n"
"9: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
@@ -134,19 +120,19 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
"6: jz 4f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
+ "1: algr %0,%3\n"
+ " slgr %1,%3\n"
+ " slgr %2,%3\n"
" j 0b\n"
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
- "7:"SLR" %0,%4\n"
+ "7: slgr %0,%4\n"
" j 5f\n"
- "4:"SLR" %0,%0\n"
+ "4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
@@ -165,22 +151,22 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
" sacf 0\n"
"0: mvcs 0(%0,%1),0(%2),%3\n"
"7: jz 5f\n"
- "1:"ALR" %0,%3\n"
+ "1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcs 0(%0,%1),0(%2),%3\n"
"8: jnz 1b\n"
" j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
+ " lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"4: mvcs 0(%4,%1),0(%2),%3\n"
- "9:"SLR" %0,%4\n"
+ "9: slgr %0,%4\n"
" j 6f\n"
- "5:"SLR" %0,%0\n"
+ "5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
@@ -208,11 +194,11 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
" jz 2f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
+ "1: algr %0,%3\n"
+ " slgr %1,%3\n"
+ " slgr %2,%3\n"
" j 0b\n"
- "2:"SLR" %0,%0\n"
+ "2:slgr %0,%0\n"
"3: \n"
EX_TABLE(0b,3b)
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
@@ -228,23 +214,23 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
load_kernel_asce();
asm volatile(
" sacf 256\n"
- " "AHI" %0,-1\n"
+ " aghi %0,-1\n"
" jo 5f\n"
" bras %3,3f\n"
- "0:"AHI" %0,257\n"
+ "0: aghi %0,257\n"
"1: mvc 0(1,%1),0(%2)\n"
" la %1,1(%1)\n"
" la %2,1(%2)\n"
- " "AHI" %0,-1\n"
+ " aghi %0,-1\n"
" jnz 1b\n"
" j 5f\n"
"2: mvc 0(256,%1),0(%2)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
- "3:"AHI" %0,-256\n"
+ "3: aghi %0,-256\n"
" jnm 2b\n"
"4: ex %0,1b-0b(%3)\n"
- "5: "SLR" %0,%0\n"
+ "5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
@@ -269,18 +255,18 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
" jz 4f\n"
- "1:"ALR" %0,%2\n"
- " "SLR" %1,%2\n"
+ "1: algr %0,%2\n"
+ " slgr %1,%2\n"
" j 0b\n"
"2: la %3,4095(%1)\n"/* %4 = to + 4095 */
" nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
- " "SLR" %3,%1\n"
- " "CLR" %0,%3\n" /* copy crosses next page boundary? */
+ " slgr %3,%1\n"
+ " clgr %0,%3\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
- " "SLR" %0,%3\n"
+ " slgr %0,%3\n"
" j 5f\n"
- "4:"SLR" %0,%0\n"
+ "4:slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
@@ -295,28 +281,28 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
load_kernel_asce();
asm volatile(
" sacf 256\n"
- " "AHI" %0,-1\n"
+ " aghi %0,-1\n"
" jo 5f\n"
" bras %3,3f\n"
" xc 0(1,%1),0(%1)\n"
- "0:"AHI" %0,257\n"
+ "0: aghi %0,257\n"
" la %2,255(%1)\n" /* %2 = ptr + 255 */
" srl %2,12\n"
" sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
- " "SLR" %2,%1\n"
- " "CLR" %0,%2\n" /* clear crosses next page boundary? */
+ " slgr %2,%1\n"
+ " clgr %0,%2\n" /* clear crosses next page boundary? */
" jnh 5f\n"
- " "AHI" %2,-1\n"
+ " aghi %2,-1\n"
"1: ex %2,0(%3)\n"
- " "AHI" %2,1\n"
- " "SLR" %0,%2\n"
+ " aghi %2,1\n"
+ " slgr %0,%2\n"
" j 5f\n"
"2: xc 0(256,%1),0(%1)\n"
" la %1,256(%1)\n"
- "3:"AHI" %0,-256\n"
+ "3: aghi %0,-256\n"
" jnm 2b\n"
"4: ex %0,0(%3)\n"
- "5: "SLR" %0,%0\n"
+ "5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
@@ -341,12 +327,12 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
asm volatile(
" la %2,0(%1)\n"
" la %3,0(%0,%1)\n"
- " "SLR" %0,%0\n"
+ " slgr %0,%0\n"
" sacf 256\n"
"0: srst %3,%2\n"
" jo 0b\n"
" la %0,1(%3)\n" /* strnlen_user results includes \0 */
- " "SLR" %0,%1\n"
+ " slgr %0,%1\n"
"1: sacf 768\n"
EX_TABLE(0b,1b)
: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
@@ -399,7 +385,7 @@ early_param("uaccess_primary", parse_uaccess_pt);
static int __init uaccess_init(void)
{
- if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27))
+ if (!uaccess_primary && test_facility(27))
static_key_slow_inc(&have_mvcos);
return 0;
}
diff --git a/arch/s390/lib/ucmpdi2.c b/arch/s390/lib/ucmpdi2.c
deleted file mode 100644
index 3e05ff532582..000000000000
--- a/arch/s390/lib/ucmpdi2.c
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <linux/module.h>
-
-union ull_union {
- unsigned long long ull;
- struct {
- unsigned int high;
- unsigned int low;
- } ui;
-};
-
-int __ucmpdi2(unsigned long long a, unsigned long long b)
-{
- union ull_union au = {.ull = a};
- union ull_union bu = {.ull = b};
-
- if (au.ui.high < bu.ui.high)
- return 0;
- else if (au.ui.high > bu.ui.high)
- return 2;
- if (au.ui.low < bu.ui.low)
- return 0;
- else if (au.ui.low > bu.ui.low)
- return 2;
- return 1;
-}
-EXPORT_SYMBOL(__ucmpdi2);
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile
deleted file mode 100644
index 51d399549f60..000000000000
--- a/arch/s390/math-emu/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the FPU instruction emulation.
-#
-
-obj-$(CONFIG_MATHEMU) := math.o
-
-ccflags-y := -I$(src) -Iinclude/math-emu -w
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
deleted file mode 100644
index a6ba0d724335..000000000000
--- a/arch/s390/math-emu/math.c
+++ /dev/null
@@ -1,2255 +0,0 @@
-/*
- * S390 version
- * Copyright IBM Corp. 1999, 2001
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *
- * 'math.c' emulates IEEE instructions on a S390 processor
- * that does not have the IEEE fpu (all processors before G5).
- */
-
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <asm/uaccess.h>
-#include <asm/lowcore.h>
-
-#include <asm/sfp-util.h>
-#include <math-emu/soft-fp.h>
-#include <math-emu/single.h>
-#include <math-emu/double.h>
-#include <math-emu/quad.h>
-
-#define FPC_VALID_MASK 0xF8F8FF03
-
-/*
- * I miss a macro to round a floating point number to the
- * nearest integer in the same floating point format.
- */
-#define _FP_TO_FPINT_ROUND(fs, wc, X) \
- do { \
- switch (X##_c) \
- { \
- case FP_CLS_NORMAL: \
- if (X##_e > _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs) \
- { /* floating point number has no bits after the dot. */ \
- } \
- else if (X##_e <= _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs && \
- X##_e > _FP_EXPBIAS_##fs) \
- { /* some bits before the dot, some after it. */ \
- _FP_FRAC_SRS_##wc(X, _FP_WFRACBITS_##fs, \
- X##_e - _FP_EXPBIAS_##fs \
- + _FP_FRACBITS_##fs); \
- _FP_ROUND(wc, X); \
- _FP_FRAC_SLL_##wc(X, X##_e - _FP_EXPBIAS_##fs \
- + _FP_FRACBITS_##fs); \
- } \
- else \
- { /* all bits after the dot. */ \
- FP_SET_EXCEPTION(FP_EX_INEXACT); \
- X##_c = FP_CLS_ZERO; \
- } \
- break; \
- case FP_CLS_NAN: \
- case FP_CLS_INF: \
- case FP_CLS_ZERO: \
- break; \
- } \
- } while (0)
-
-#define FP_TO_FPINT_ROUND_S(X) _FP_TO_FPINT_ROUND(S,1,X)
-#define FP_TO_FPINT_ROUND_D(X) _FP_TO_FPINT_ROUND(D,2,X)
-#define FP_TO_FPINT_ROUND_Q(X) _FP_TO_FPINT_ROUND(Q,4,X)
-
-typedef union {
- long double ld;
- struct {
- __u64 high;
- __u64 low;
- } w;
-} mathemu_ldcv;
-
-#ifdef CONFIG_SYSCTL
-int sysctl_ieee_emulation_warnings=1;
-#endif
-
-#define mathemu_put_user(x, p) \
- do { \
- if (put_user((x),(p))) \
- return SIGSEGV; \
- } while (0)
-
-#define mathemu_get_user(x, p) \
- do { \
- if (get_user((x),(p))) \
- return SIGSEGV; \
- } while (0)
-
-#define mathemu_copy_from_user(d, s, n)\
- do { \
- if (copy_from_user((d),(s),(n)) != 0) \
- return SIGSEGV; \
- } while (0)
-
-#define mathemu_copy_to_user(d, s, n) \
- do { \
- if (copy_to_user((d),(s),(n)) != 0) \
- return SIGSEGV; \
- } while (0)
-
-static void display_emulation_not_implemented(struct pt_regs *regs, char *instr)
-{
- __u16 *location;
-
-#ifdef CONFIG_SYSCTL
- if(sysctl_ieee_emulation_warnings)
-#endif
- {
- location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
- printk("%s ieee fpu instruction not emulated "
- "process name: %s pid: %d \n",
- instr, current->comm, current->pid);
- printk("%s's PSW: %08lx %08lx\n", instr,
- (unsigned long) regs->psw.mask,
- (unsigned long) location);
- }
-}
-
-static inline void emu_set_CC (struct pt_regs *regs, int cc)
-{
- regs->psw.mask = (regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12);
-}
-
-/*
- * Set the condition code in the user psw.
- * 0 : Result is zero
- * 1 : Result is less than zero
- * 2 : Result is greater than zero
- * 3 : Result is NaN or INF
- */
-static inline void emu_set_CC_cs(struct pt_regs *regs, int class, int sign)
-{
- switch (class) {
- case FP_CLS_NORMAL:
- case FP_CLS_INF:
- emu_set_CC(regs, sign ? 1 : 2);
- break;
- case FP_CLS_ZERO:
- emu_set_CC(regs, 0);
- break;
- case FP_CLS_NAN:
- emu_set_CC(regs, 3);
- break;
- }
-}
-
-/* Add long double */
-static int emu_axbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_ADD_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Add double */
-static int emu_adbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_ADD_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Add double */
-static int emu_adb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_ADD_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Add float */
-static int emu_aebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_ADD_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Add float */
-static int emu_aeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_ADD_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Compare long double */
-static int emu_cxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB);
- mathemu_ldcv cvt;
- int IR;
-
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_RAW_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_RAW_QP(QB, &cvt.ld);
- FP_CMP_Q(IR, QA, QB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare double */
-static int emu_cdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB);
- int IR;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_CMP_D(IR, DA, DB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare double */
-static int emu_cdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB);
- int IR;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_RAW_DP(DB, val);
- FP_CMP_D(IR, DA, DB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare float */
-static int emu_cebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB);
- int IR;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_CMP_S(IR, SA, SB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare float */
-static int emu_ceb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB);
- int IR;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_RAW_SP(SB, val);
- FP_CMP_S(IR, SA, SB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare and signal long double */
-static int emu_kxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int IR;
-
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_RAW_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_CMP_Q(IR, QA, QB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Compare and signal double */
-static int emu_kdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB);
- FP_DECL_EX;
- int IR;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_CMP_D(IR, DA, DB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Compare and signal double */
-static int emu_kdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB);
- FP_DECL_EX;
- int IR;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_RAW_DP(DB, val);
- FP_CMP_D(IR, DA, DB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Compare and signal float */
-static int emu_kebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB);
- FP_DECL_EX;
- int IR;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_CMP_S(IR, SA, SB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Compare and signal float */
-static int emu_keb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB);
- FP_DECL_EX;
- int IR;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_RAW_SP(SB, val);
- FP_CMP_S(IR, SA, SB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Convert from fixed long double */
-static int emu_cxfbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- __s32 si;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- si = regs->gprs[ry];
- FP_FROM_INT_Q(QR, si, 32, int);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Convert from fixed double */
-static int emu_cdfbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DR);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- si = regs->gprs[ry];
- FP_FROM_INT_D(DR, si, 32, int);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Convert from fixed float */
-static int emu_cefbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SR);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- si = regs->gprs[ry];
- FP_FROM_INT_S(SR, si, 32, int);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Convert to fixed long double */
-static int emu_cfxbr (struct pt_regs *regs, int rx, int ry, int mask) {
- FP_DECL_Q(QA);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = current->thread.fp_regs.fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_TO_INT_ROUND_Q(si, QA, 32, 1);
- regs->gprs[rx] = si;
- emu_set_CC_cs(regs, QA_c, QA_s);
- return _fex;
-}
-
-/* Convert to fixed double */
-static int emu_cfdbr (struct pt_regs *regs, int rx, int ry, int mask) {
- FP_DECL_D(DA);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = current->thread.fp_regs.fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_TO_INT_ROUND_D(si, DA, 32, 1);
- regs->gprs[rx] = si;
- emu_set_CC_cs(regs, DA_c, DA_s);
- return _fex;
-}
-
-/* Convert to fixed float */
-static int emu_cfebr (struct pt_regs *regs, int rx, int ry, int mask) {
- FP_DECL_S(SA);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = current->thread.fp_regs.fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_TO_INT_ROUND_S(si, SA, 32, 1);
- regs->gprs[rx] = si;
- emu_set_CC_cs(regs, SA_c, SA_s);
- return _fex;
-}
-
-/* Divide long double */
-static int emu_dxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_DIV_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Divide double */
-static int emu_ddbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_DIV_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Divide double */
-static int emu_ddb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_DIV_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Divide float */
-static int emu_debr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_DIV_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Divide float */
-static int emu_deb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_DIV_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Divide to integer double */
-static int emu_didbr (struct pt_regs *regs, int rx, int ry, int mask) {
- display_emulation_not_implemented(regs, "didbr");
- return 0;
-}
-
-/* Divide to integer float */
-static int emu_diebr (struct pt_regs *regs, int rx, int ry, int mask) {
- display_emulation_not_implemented(regs, "diebr");
- return 0;
-}
-
-/* Extract fpc */
-static int emu_efpc (struct pt_regs *regs, int rx, int ry) {
- regs->gprs[rx] = current->thread.fp_regs.fpc;
- return 0;
-}
-
-/* Load and test long double */
-static int emu_ltxbr (struct pt_regs *regs, int rx, int ry) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- mathemu_ldcv cvt;
- FP_DECL_Q(QA);
- FP_DECL_EX;
-
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
- fp_regs->fprs[rx+2].ui = fp_regs->fprs[ry+2].ui;
- emu_set_CC_cs(regs, QA_c, QA_s);
- return _fex;
-}
-
-/* Load and test double */
-static int emu_ltdbr (struct pt_regs *regs, int rx, int ry) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_D(DA);
- FP_DECL_EX;
-
- FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d);
- fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
- emu_set_CC_cs(regs, DA_c, DA_s);
- return _fex;
-}
-
-/* Load and test double */
-static int emu_ltebr (struct pt_regs *regs, int rx, int ry) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_S(SA);
- FP_DECL_EX;
-
- FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f);
- fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
- emu_set_CC_cs(regs, SA_c, SA_s);
- return _fex;
-}
-
-/* Load complement long double */
-static int emu_lcxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_NEG_Q(QR, QA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Load complement double */
-static int emu_lcdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_NEG_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Load complement float */
-static int emu_lcebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_NEG_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Load floating point integer long double */
-static int emu_fixbr (struct pt_regs *regs, int rx, int ry, int mask) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_Q(QA);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = fp_regs->fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- cvt.w.high = fp_regs->fprs[ry].ui;
- cvt.w.low = fp_regs->fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_TO_FPINT_ROUND_Q(QA);
- FP_PACK_QP(&cvt.ld, QA);
- fp_regs->fprs[rx].ui = cvt.w.high;
- fp_regs->fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load floating point integer double */
-static int emu_fidbr (struct pt_regs *regs, int rx, int ry, int mask) {
- /* FIXME: rounding mode !! */
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_D(DA);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = fp_regs->fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d);
- FP_TO_FPINT_ROUND_D(DA);
- FP_PACK_DP(&fp_regs->fprs[rx].d, DA);
- return _fex;
-}
-
-/* Load floating point integer float */
-static int emu_fiebr (struct pt_regs *regs, int rx, int ry, int mask) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_S(SA);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = fp_regs->fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f);
- FP_TO_FPINT_ROUND_S(SA);
- FP_PACK_SP(&fp_regs->fprs[rx].f, SA);
- return _fex;
-}
-
-/* Load lengthened double to long double */
-static int emu_lxdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_CONV (Q, D, 4, 2, QR, DA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load lengthened double to long double */
-static int emu_lxdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, val);
- FP_CONV (Q, D, 4, 2, QR, DA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load lengthened float to long double */
-static int emu_lxebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_CONV (Q, S, 4, 1, QR, SA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load lengthened float to long double */
-static int emu_lxeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, val);
- FP_CONV (Q, S, 4, 1, QR, SA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load lengthened float to double */
-static int emu_ldebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_CONV (D, S, 2, 1, DR, SA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Load lengthened float to double */
-static int emu_ldeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, val);
- FP_CONV (D, S, 2, 1, DR, SA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Load negative long double */
-static int emu_lnxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- if (QA_s == 0) {
- FP_NEG_Q(QR, QA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- } else {
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- current->thread.fp_regs.fprs[rx+2].ui =
- current->thread.fp_regs.fprs[ry+2].ui;
- }
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Load negative double */
-static int emu_lndbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- if (DA_s == 0) {
- FP_NEG_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- } else
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Load negative float */
-static int emu_lnebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- if (SA_s == 0) {
- FP_NEG_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- } else
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Load positive long double */
-static int emu_lpxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- if (QA_s != 0) {
- FP_NEG_Q(QR, QA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- } else{
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- current->thread.fp_regs.fprs[rx+2].ui =
- current->thread.fp_regs.fprs[ry+2].ui;
- }
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Load positive double */
-static int emu_lpdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- if (DA_s != 0) {
- FP_NEG_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- } else
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Load positive float */
-static int emu_lpebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- if (SA_s != 0) {
- FP_NEG_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- } else
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Load rounded long double to double */
-static int emu_ldxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_D(DR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_CONV (D, Q, 2, 4, DR, QA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].f, DR);
- return _fex;
-}
-
-/* Load rounded long double to float */
-static int emu_lexbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_S(SR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_CONV (S, Q, 1, 4, SR, QA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Load rounded double to float */
-static int emu_ledbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_CONV (S, D, 1, 2, SR, DA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Multiply long double */
-static int emu_mxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_MUL_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Multiply double */
-static int emu_mdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_MUL_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Multiply double */
-static int emu_mdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_MUL_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Multiply double to long double */
-static int emu_mxdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_CONV (Q, D, 4, 2, QA, DA);
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_CONV (Q, D, 4, 2, QB, DA);
- FP_MUL_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Multiply double to long double */
-static int emu_mxdb (struct pt_regs *regs, int rx, long double *val) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_UNPACK_QP(QB, val);
- FP_MUL_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Multiply float */
-static int emu_meebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_MUL_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Multiply float */
-static int emu_meeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_MUL_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Multiply float to double */
-static int emu_mdebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_CONV (D, S, 2, 1, DA, SA);
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_CONV (D, S, 2, 1, DB, SA);
- FP_MUL_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Multiply float to double */
-static int emu_mdeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_CONV (D, S, 2, 1, DA, SA);
- FP_UNPACK_SP(SA, val);
- FP_CONV (D, S, 2, 1, DB, SA);
- FP_MUL_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Multiply and add double */
-static int emu_madbr (struct pt_regs *regs, int rx, int ry, int rz) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
- FP_MUL_D(DR, DA, DB);
- FP_ADD_D(DR, DR, DC);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
- return _fex;
-}
-
-/* Multiply and add double */
-static int emu_madb (struct pt_regs *regs, int rx, double *val, int rz) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
- FP_MUL_D(DR, DA, DB);
- FP_ADD_D(DR, DR, DC);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
- return _fex;
-}
-
-/* Multiply and add float */
-static int emu_maebr (struct pt_regs *regs, int rx, int ry, int rz) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
- FP_MUL_S(SR, SA, SB);
- FP_ADD_S(SR, SR, SC);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
- return _fex;
-}
-
-/* Multiply and add float */
-static int emu_maeb (struct pt_regs *regs, int rx, float *val, int rz) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
- FP_MUL_S(SR, SA, SB);
- FP_ADD_S(SR, SR, SC);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
- return _fex;
-}
-
-/* Multiply and subtract double */
-static int emu_msdbr (struct pt_regs *regs, int rx, int ry, int rz) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
- FP_MUL_D(DR, DA, DB);
- FP_SUB_D(DR, DR, DC);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
- return _fex;
-}
-
-/* Multiply and subtract double */
-static int emu_msdb (struct pt_regs *regs, int rx, double *val, int rz) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
- FP_MUL_D(DR, DA, DB);
- FP_SUB_D(DR, DR, DC);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
- return _fex;
-}
-
-/* Multiply and subtract float */
-static int emu_msebr (struct pt_regs *regs, int rx, int ry, int rz) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
- FP_MUL_S(SR, SA, SB);
- FP_SUB_S(SR, SR, SC);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
- return _fex;
-}
-
-/* Multiply and subtract float */
-static int emu_mseb (struct pt_regs *regs, int rx, float *val, int rz) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
- FP_MUL_S(SR, SA, SB);
- FP_SUB_S(SR, SR, SC);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
- return _fex;
-}
-
-/* Set floating point control word */
-static int emu_sfpc (struct pt_regs *regs, int rx, int ry) {
- __u32 temp;
-
- temp = regs->gprs[rx];
- if ((temp & ~FPC_VALID_MASK) != 0)
- return SIGILL;
- current->thread.fp_regs.fpc = temp;
- return 0;
-}
-
-/* Square root long double */
-static int emu_sqxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_SQRT_Q(QR, QA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Square root double */
-static int emu_sqdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_SQRT_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Square root double */
-static int emu_sqdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, val);
- FP_SQRT_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Square root float */
-static int emu_sqebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_SQRT_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Square root float */
-static int emu_sqeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, val);
- FP_SQRT_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Subtract long double */
-static int emu_sxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_SUB_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Subtract double */
-static int emu_sdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_SUB_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Subtract double */
-static int emu_sdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_SUB_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Subtract float */
-static int emu_sebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_SUB_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Subtract float */
-static int emu_seb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_SUB_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Test data class long double */
-static int emu_tcxb (struct pt_regs *regs, int rx, long val) {
- FP_DECL_Q(QA);
- mathemu_ldcv cvt;
- int bit;
-
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_RAW_QP(QA, &cvt.ld);
- switch (QA_e) {
- default:
- bit = 8; /* normalized number */
- break;
- case 0:
- if (_FP_FRAC_ZEROP_4(QA))
- bit = 10; /* zero */
- else
- bit = 6; /* denormalized number */
- break;
- case _FP_EXPMAX_Q:
- if (_FP_FRAC_ZEROP_4(QA))
- bit = 4; /* infinity */
- else if (_FP_FRAC_HIGH_RAW_Q(QA) & _FP_QNANBIT_Q)
- bit = 2; /* quiet NAN */
- else
- bit = 0; /* signaling NAN */
- break;
- }
- if (!QA_s)
- bit++;
- emu_set_CC(regs, ((__u32) val >> bit) & 1);
- return 0;
-}
-
-/* Test data class double */
-static int emu_tcdb (struct pt_regs *regs, int rx, long val) {
- FP_DECL_D(DA);
- int bit;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- switch (DA_e) {
- default:
- bit = 8; /* normalized number */
- break;
- case 0:
- if (_FP_FRAC_ZEROP_2(DA))
- bit = 10; /* zero */
- else
- bit = 6; /* denormalized number */
- break;
- case _FP_EXPMAX_D:
- if (_FP_FRAC_ZEROP_2(DA))
- bit = 4; /* infinity */
- else if (_FP_FRAC_HIGH_RAW_D(DA) & _FP_QNANBIT_D)
- bit = 2; /* quiet NAN */
- else
- bit = 0; /* signaling NAN */
- break;
- }
- if (!DA_s)
- bit++;
- emu_set_CC(regs, ((__u32) val >> bit) & 1);
- return 0;
-}
-
-/* Test data class float */
-static int emu_tceb (struct pt_regs *regs, int rx, long val) {
- FP_DECL_S(SA);
- int bit;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- switch (SA_e) {
- default:
- bit = 8; /* normalized number */
- break;
- case 0:
- if (_FP_FRAC_ZEROP_1(SA))
- bit = 10; /* zero */
- else
- bit = 6; /* denormalized number */
- break;
- case _FP_EXPMAX_S:
- if (_FP_FRAC_ZEROP_1(SA))
- bit = 4; /* infinity */
- else if (_FP_FRAC_HIGH_RAW_S(SA) & _FP_QNANBIT_S)
- bit = 2; /* quiet NAN */
- else
- bit = 0; /* signaling NAN */
- break;
- }
- if (!SA_s)
- bit++;
- emu_set_CC(regs, ((__u32) val >> bit) & 1);
- return 0;
-}
-
-static inline void emu_load_regd(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
- return;
- asm volatile( /* load reg from fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " ld 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d)
- : "1");
-}
-
-static inline void emu_load_rege(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
- return;
- asm volatile( /* load reg from fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " le 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
- : "1");
-}
-
-static inline void emu_store_regd(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
- return;
- asm volatile( /* store reg to fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " std 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d)
- : "1");
-}
-
-
-static inline void emu_store_rege(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
- return;
- asm volatile( /* store reg to fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " ste 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
- : "1");
-}
-
-int math_emu_b3(__u8 *opcode, struct pt_regs * regs) {
- int _fex = 0;
- static const __u8 format_table[256] = {
- [0x00] = 0x03,[0x01] = 0x03,[0x02] = 0x03,[0x03] = 0x03,
- [0x04] = 0x0f,[0x05] = 0x0d,[0x06] = 0x0e,[0x07] = 0x0d,
- [0x08] = 0x03,[0x09] = 0x03,[0x0a] = 0x03,[0x0b] = 0x03,
- [0x0c] = 0x0f,[0x0d] = 0x03,[0x0e] = 0x06,[0x0f] = 0x06,
- [0x10] = 0x02,[0x11] = 0x02,[0x12] = 0x02,[0x13] = 0x02,
- [0x14] = 0x03,[0x15] = 0x02,[0x16] = 0x01,[0x17] = 0x03,
- [0x18] = 0x02,[0x19] = 0x02,[0x1a] = 0x02,[0x1b] = 0x02,
- [0x1c] = 0x02,[0x1d] = 0x02,[0x1e] = 0x05,[0x1f] = 0x05,
- [0x40] = 0x01,[0x41] = 0x01,[0x42] = 0x01,[0x43] = 0x01,
- [0x44] = 0x12,[0x45] = 0x0d,[0x46] = 0x11,[0x47] = 0x04,
- [0x48] = 0x01,[0x49] = 0x01,[0x4a] = 0x01,[0x4b] = 0x01,
- [0x4c] = 0x01,[0x4d] = 0x01,[0x53] = 0x06,[0x57] = 0x06,
- [0x5b] = 0x05,[0x5f] = 0x05,[0x84] = 0x13,[0x8c] = 0x13,
- [0x94] = 0x09,[0x95] = 0x08,[0x96] = 0x07,[0x98] = 0x0c,
- [0x99] = 0x0b,[0x9a] = 0x0a
- };
- static const void *jump_table[256]= {
- [0x00] = emu_lpebr,[0x01] = emu_lnebr,[0x02] = emu_ltebr,
- [0x03] = emu_lcebr,[0x04] = emu_ldebr,[0x05] = emu_lxdbr,
- [0x06] = emu_lxebr,[0x07] = emu_mxdbr,[0x08] = emu_kebr,
- [0x09] = emu_cebr, [0x0a] = emu_aebr, [0x0b] = emu_sebr,
- [0x0c] = emu_mdebr,[0x0d] = emu_debr, [0x0e] = emu_maebr,
- [0x0f] = emu_msebr,[0x10] = emu_lpdbr,[0x11] = emu_lndbr,
- [0x12] = emu_ltdbr,[0x13] = emu_lcdbr,[0x14] = emu_sqebr,
- [0x15] = emu_sqdbr,[0x16] = emu_sqxbr,[0x17] = emu_meebr,
- [0x18] = emu_kdbr, [0x19] = emu_cdbr, [0x1a] = emu_adbr,
- [0x1b] = emu_sdbr, [0x1c] = emu_mdbr, [0x1d] = emu_ddbr,
- [0x1e] = emu_madbr,[0x1f] = emu_msdbr,[0x40] = emu_lpxbr,
- [0x41] = emu_lnxbr,[0x42] = emu_ltxbr,[0x43] = emu_lcxbr,
- [0x44] = emu_ledbr,[0x45] = emu_ldxbr,[0x46] = emu_lexbr,
- [0x47] = emu_fixbr,[0x48] = emu_kxbr, [0x49] = emu_cxbr,
- [0x4a] = emu_axbr, [0x4b] = emu_sxbr, [0x4c] = emu_mxbr,
- [0x4d] = emu_dxbr, [0x53] = emu_diebr,[0x57] = emu_fiebr,
- [0x5b] = emu_didbr,[0x5f] = emu_fidbr,[0x84] = emu_sfpc,
- [0x8c] = emu_efpc, [0x94] = emu_cefbr,[0x95] = emu_cdfbr,
- [0x96] = emu_cxfbr,[0x98] = emu_cfebr,[0x99] = emu_cfdbr,
- [0x9a] = emu_cfxbr
- };
-
- switch (format_table[opcode[1]]) {
- case 1: /* RRE format, long double operation */
- if (opcode[3] & 0x22)
- return SIGILL;
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(((opcode[3] >> 4) & 15) + 2);
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *,int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- emu_load_regd(opcode[3] & 15);
- emu_load_regd((opcode[3] & 15) + 2);
- break;
- case 2: /* RRE format, double operation */
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(opcode[3] & 15);
- break;
- case 3: /* RRE format, float operation */
- emu_store_rege((opcode[3] >> 4) & 15);
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- emu_load_rege(opcode[3] & 15);
- break;
- case 4: /* RRF format, long double operation */
- if (opcode[3] & 0x22)
- return SIGILL;
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(((opcode[3] >> 4) & 15) + 2);
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- emu_load_regd(opcode[3] & 15);
- emu_load_regd((opcode[3] & 15) + 2);
- break;
- case 5: /* RRF format, double operation */
- emu_store_regd((opcode[2] >> 4) & 15);
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- emu_load_regd((opcode[2] >> 4) & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(opcode[3] & 15);
- break;
- case 6: /* RRF format, float operation */
- emu_store_rege((opcode[2] >> 4) & 15);
- emu_store_rege((opcode[3] >> 4) & 15);
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- emu_load_rege((opcode[2] >> 4) & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- emu_load_rege(opcode[3] & 15);
- break;
- case 7: /* RRE format, cxfbr instruction */
- /* call the emulation function */
- if (opcode[3] & 0x20)
- return SIGILL;
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- break;
- case 8: /* RRE format, cdfbr instruction */
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- break;
- case 9: /* RRE format, cefbr instruction */
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- break;
- case 10: /* RRF format, cfxbr instruction */
- if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
- /* mask of { 2,3,8-15 } is invalid */
- return SIGILL;
- if (opcode[3] & 2)
- return SIGILL;
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- break;
- case 11: /* RRF format, cfdbr instruction */
- if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
- /* mask of { 2,3,8-15 } is invalid */
- return SIGILL;
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- break;
- case 12: /* RRF format, cfebr instruction */
- if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
- /* mask of { 2,3,8-15 } is invalid */
- return SIGILL;
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- break;
- case 13: /* RRE format, ldxbr & mdxbr instruction */
- /* double store but long double load */
- if (opcode[3] & 0x20)
- return SIGILL;
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- break;
- case 14: /* RRE format, ldxbr & mdxbr instruction */
- /* float store but long double load */
- if (opcode[3] & 0x20)
- return SIGILL;
- emu_store_rege((opcode[3] >> 4) & 15);
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- break;
- case 15: /* RRE format, ldebr & mdebr instruction */
- /* float store but double load */
- emu_store_rege((opcode[3] >> 4) & 15);
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- break;
- case 16: /* RRE format, ldxbr instruction */
- /* long double store but double load */
- if (opcode[3] & 2)
- return SIGILL;
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- break;
- case 17: /* RRE format, ldxbr instruction */
- /* long double store but float load */
- if (opcode[3] & 2)
- return SIGILL;
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- break;
- case 18: /* RRE format, ledbr instruction */
- /* double store but float load */
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- break;
- case 19: /* RRE format, efpc & sfpc instruction */
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- break;
- default: /* invalid operation */
- return SIGILL;
- }
- if (_fex != 0) {
- current->thread.fp_regs.fpc |= _fex;
- if (current->thread.fp_regs.fpc & (_fex << 8))
- return SIGFPE;
- }
- return 0;
-}
-
-static void* calc_addr(struct pt_regs *regs, int rx, int rb, int disp)
-{
- addr_t addr;
-
- rx &= 15;
- rb &= 15;
- addr = disp & 0xfff;
- addr += (rx != 0) ? regs->gprs[rx] : 0; /* + index */
- addr += (rb != 0) ? regs->gprs[rb] : 0; /* + base */
- return (void*) addr;
-}
-
-int math_emu_ed(__u8 *opcode, struct pt_regs * regs) {
- int _fex = 0;
-
- static const __u8 format_table[256] = {
- [0x04] = 0x06,[0x05] = 0x05,[0x06] = 0x07,[0x07] = 0x05,
- [0x08] = 0x02,[0x09] = 0x02,[0x0a] = 0x02,[0x0b] = 0x02,
- [0x0c] = 0x06,[0x0d] = 0x02,[0x0e] = 0x04,[0x0f] = 0x04,
- [0x10] = 0x08,[0x11] = 0x09,[0x12] = 0x0a,[0x14] = 0x02,
- [0x15] = 0x01,[0x17] = 0x02,[0x18] = 0x01,[0x19] = 0x01,
- [0x1a] = 0x01,[0x1b] = 0x01,[0x1c] = 0x01,[0x1d] = 0x01,
- [0x1e] = 0x03,[0x1f] = 0x03,
- };
- static const void *jump_table[]= {
- [0x04] = emu_ldeb,[0x05] = emu_lxdb,[0x06] = emu_lxeb,
- [0x07] = emu_mxdb,[0x08] = emu_keb, [0x09] = emu_ceb,
- [0x0a] = emu_aeb, [0x0b] = emu_seb, [0x0c] = emu_mdeb,
- [0x0d] = emu_deb, [0x0e] = emu_maeb,[0x0f] = emu_mseb,
- [0x10] = emu_tceb,[0x11] = emu_tcdb,[0x12] = emu_tcxb,
- [0x14] = emu_sqeb,[0x15] = emu_sqdb,[0x17] = emu_meeb,
- [0x18] = emu_kdb, [0x19] = emu_cdb, [0x1a] = emu_adb,
- [0x1b] = emu_sdb, [0x1c] = emu_mdb, [0x1d] = emu_ddb,
- [0x1e] = emu_madb,[0x1f] = emu_msdb
- };
-
- switch (format_table[opcode[5]]) {
- case 1: /* RXE format, double constant */ {
- __u64 *dxb, temp;
- __u32 opc;
-
- emu_store_regd((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_from_user(&temp, dxb, 8);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, double *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (double *) &temp);
- emu_load_regd((opcode[1] >> 4) & 15);
- break;
- }
- case 2: /* RXE format, float constant */ {
- __u32 *dxb, temp;
- __u32 opc;
-
- emu_store_rege((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_get_user(temp, dxb);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, float *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (float *) &temp);
- emu_load_rege((opcode[1] >> 4) & 15);
- break;
- }
- case 3: /* RXF format, double constant */ {
- __u64 *dxb, temp;
- __u32 opc;
-
- emu_store_regd((opcode[1] >> 4) & 15);
- emu_store_regd((opcode[4] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_from_user(&temp, dxb, 8);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, double *, int))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (double *) &temp, opcode[4] >> 4);
- emu_load_regd((opcode[1] >> 4) & 15);
- break;
- }
- case 4: /* RXF format, float constant */ {
- __u32 *dxb, temp;
- __u32 opc;
-
- emu_store_rege((opcode[1] >> 4) & 15);
- emu_store_rege((opcode[4] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_get_user(temp, dxb);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, float *, int))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (float *) &temp, opcode[4] >> 4);
- emu_load_rege((opcode[4] >> 4) & 15);
- break;
- }
- case 5: /* RXE format, double constant */
- /* store double and load long double */
- {
- __u64 *dxb, temp;
- __u32 opc;
- if ((opcode[1] >> 4) & 0x20)
- return SIGILL;
- emu_store_regd((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_from_user(&temp, dxb, 8);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, double *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (double *) &temp);
- emu_load_regd((opcode[1] >> 4) & 15);
- emu_load_regd(((opcode[1] >> 4) & 15) + 2);
- break;
- }
- case 6: /* RXE format, float constant */
- /* store float and load double */
- {
- __u32 *dxb, temp;
- __u32 opc;
- emu_store_rege((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_get_user(temp, dxb);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, float *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (float *) &temp);
- emu_load_regd((opcode[1] >> 4) & 15);
- break;
- }
- case 7: /* RXE format, float constant */
- /* store float and load long double */
- {
- __u32 *dxb, temp;
- __u32 opc;
- if ((opcode[1] >> 4) & 0x20)
- return SIGILL;
- emu_store_rege((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_get_user(temp, dxb);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, float *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (float *) &temp);
- emu_load_regd((opcode[1] >> 4) & 15);
- emu_load_regd(((opcode[1] >> 4) & 15) + 2);
- break;
- }
- case 8: /* RXE format, RX address used as int value */ {
- __u64 dxb;
- __u32 opc;
-
- emu_store_rege((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, long))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, dxb);
- break;
- }
- case 9: /* RXE format, RX address used as int value */ {
- __u64 dxb;
- __u32 opc;
-
- emu_store_regd((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, long))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, dxb);
- break;
- }
- case 10: /* RXE format, RX address used as int value */ {
- __u64 dxb;
- __u32 opc;
-
- if ((opcode[1] >> 4) & 2)
- return SIGILL;
- emu_store_regd((opcode[1] >> 4) & 15);
- emu_store_regd(((opcode[1] >> 4) & 15) + 2);
- opc = *((__u32 *) opcode);
- dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, long))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, dxb);
- break;
- }
- default: /* invalid operation */
- return SIGILL;
- }
- if (_fex != 0) {
- current->thread.fp_regs.fpc |= _fex;
- if (current->thread.fp_regs.fpc & (_fex << 8))
- return SIGFPE;
- }
- return 0;
-}
-
-/*
- * Emulate LDR Rx,Ry with Rx or Ry not in {0, 2, 4, 6}
- */
-int math_emu_ldr(__u8 *opcode) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u16 opc = *((__u16 *) opcode);
-
- if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
- /* we got an exception therefore ry can't be in {0,2,4,6} */
- asm volatile( /* load rx from fp_regs.fprs[ry] */
- " bras 1,0f\n"
- " ld 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d)
- : "1");
- } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
- asm volatile ( /* store ry to fp_regs.fprs[rx] */
- " bras 1,0f\n"
- " std 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" ((opc & 0xf) << 4),
- "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d)
- : "1");
- } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
- fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
- return 0;
-}
-
-/*
- * Emulate LER Rx,Ry with Rx or Ry not in {0, 2, 4, 6}
- */
-int math_emu_ler(__u8 *opcode) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u16 opc = *((__u16 *) opcode);
-
- if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
- /* we got an exception therefore ry can't be in {0,2,4,6} */
- asm volatile( /* load rx from fp_regs.fprs[ry] */
- " bras 1,0f\n"
- " le 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f)
- : "1");
- } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
- asm volatile( /* store ry to fp_regs.fprs[rx] */
- " bras 1,0f\n"
- " ste 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" ((opc & 0xf) << 4),
- "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f)
- : "1");
- } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
- fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
- return 0;
-}
-
-/*
- * Emulate LD R,D(X,B) with R not in {0, 2, 4, 6}
- */
-int math_emu_ld(__u8 *opcode, struct pt_regs * regs) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u32 opc = *((__u32 *) opcode);
- __u64 *dxb;
-
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_from_user(&fp_regs->fprs[(opc >> 20) & 0xf].d, dxb, 8);
- return 0;
-}
-
-/*
- * Emulate LE R,D(X,B) with R not in {0, 2, 4, 6}
- */
-int math_emu_le(__u8 *opcode, struct pt_regs * regs) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u32 opc = *((__u32 *) opcode);
- __u32 *mem, *dxb;
-
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f);
- mathemu_get_user(mem[0], dxb);
- return 0;
-}
-
-/*
- * Emulate STD R,D(X,B) with R not in {0, 2, 4, 6}
- */
-int math_emu_std(__u8 *opcode, struct pt_regs * regs) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u32 opc = *((__u32 *) opcode);
- __u64 *dxb;
-
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_to_user(dxb, &fp_regs->fprs[(opc >> 20) & 0xf].d, 8);
- return 0;
-}
-
-/*
- * Emulate STE R,D(X,B) with R not in {0, 2, 4, 6}
- */
-int math_emu_ste(__u8 *opcode, struct pt_regs * regs) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u32 opc = *((__u32 *) opcode);
- __u32 *mem, *dxb;
-
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f);
- mathemu_put_user(mem[0], dxb);
- return 0;
-}
-
-/*
- * Emulate LFPC D(B)
- */
-int math_emu_lfpc(__u8 *opcode, struct pt_regs *regs) {
- __u32 opc = *((__u32 *) opcode);
- __u32 *dxb, temp;
-
- dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc);
- mathemu_get_user(temp, dxb);
- if ((temp & ~FPC_VALID_MASK) != 0)
- return SIGILL;
- current->thread.fp_regs.fpc = temp;
- return 0;
-}
-
-/*
- * Emulate STFPC D(B)
- */
-int math_emu_stfpc(__u8 *opcode, struct pt_regs *regs) {
- __u32 opc = *((__u32 *) opcode);
- __u32 *dxb;
-
- dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc);
- mathemu_put_user(current->thread.fp_regs.fpc, dxb);
- return 0;
-}
-
-/*
- * Emulate SRNM D(B)
- */
-int math_emu_srnm(__u8 *opcode, struct pt_regs *regs) {
- __u32 opc = *((__u32 *) opcode);
- __u32 temp;
-
- temp = calc_addr(regs, 0, opc>>12, opc);
- current->thread.fp_regs.fpc &= ~3;
- current->thread.fp_regs.fpc |= (temp & 3);
- return 0;
-}
-
-/* broken compiler ... */
-long long
-__negdi2 (long long u)
-{
-
- union lll {
- long long ll;
- long s[2];
- };
-
- union lll w,uu;
-
- uu.ll = u;
-
- w.s[1] = -uu.s[1];
- w.s[0] = -uu.s[0] - ((int) w.s[1] != 0);
-
- return w.ll;
-}
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index d46cadeda204..8556d6be9b54 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,9 +18,7 @@ enum address_markers_idx {
KERNEL_END_NR,
VMEMMAP_NR,
VMALLOC_NR,
-#ifdef CONFIG_64BIT
MODULES_NR,
-#endif
};
static struct addr_marker address_markers[] = {
@@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = {
[KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
[VMEMMAP_NR] = {0, "vmemmap Area"},
[VMALLOC_NR] = {0, "vmalloc Area"},
-#ifdef CONFIG_64BIT
[MODULES_NR] = {0, "Modules Area"},
-#endif
{ -1, NULL }
};
@@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
}
}
-#ifdef CONFIG_64BIT
-#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
-#else
-#define _PMD_PROT_MASK 0
-#endif
-
static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pud_t *pud, unsigned long addr)
{
@@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
if (pmd_large(*pmd)) {
- prot = pmd_val(*pmd) & _PMD_PROT_MASK;
+ prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
note_page(m, st, prot, 3);
} else
walk_pte_level(m, st, pmd, addr);
@@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
}
}
-#ifdef CONFIG_64BIT
-#define _PUD_PROT_MASK _REGION3_ENTRY_RO
-#else
-#define _PUD_PROT_MASK 0
-#endif
-
static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pgd_t *pgd, unsigned long addr)
{
@@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
if (pud_large(*pud)) {
- prot = pud_val(*pud) & _PUD_PROT_MASK;
+ prot = pud_val(*pud) & _REGION3_ENTRY_RO;
note_page(m, st, prot, 2);
} else
walk_pmd_level(m, st, pud, addr);
@@ -230,13 +214,9 @@ static int pt_dump_init(void)
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
-#ifdef CONFIG_32BIT
- max_addr = 1UL << 31;
-#else
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[MODULES_NR].start_address = MODULES_VADDR;
-#endif
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 519bba716cc3..23c496957c22 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -51,7 +51,6 @@ struct qout64 {
struct qrange range[6];
};
-#ifdef CONFIG_64BIT
struct qrange_old {
unsigned int start; /* last byte type */
unsigned int end; /* last byte reserved */
@@ -65,7 +64,6 @@ struct qout64_old {
int segrcnt;
struct qrange_old range[6];
};
-#endif
struct qin64 {
char qopcode;
@@ -103,7 +101,6 @@ static int scode_set;
static int
dcss_set_subcodes(void)
{
-#ifdef CONFIG_64BIT
char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
unsigned long rx, ry;
int rc;
@@ -135,7 +132,6 @@ dcss_set_subcodes(void)
segext_scode = DCSS_SEGEXTX;
return 0;
}
-#endif
/* Diag x'64' new subcodes are not supported, set to old subcodes */
loadshr_scode = DCSS_LOADNOLY;
loadnsr_scode = DCSS_LOADNSR;
@@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter,
rx = (unsigned long) parameter;
ry = (unsigned long) *func;
-#ifdef CONFIG_64BIT
/* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
if (*func > DCSS_SEGEXT)
asm volatile(
@@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter,
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-#else
- asm volatile(
- " diag %0,%1,0x64\n"
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-#endif
*ret1 = rx;
*ret2 = ry;
return rc;
@@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg)
goto out_free;
}
-#ifdef CONFIG_64BIT
/* Only old format of output area of Diagnose x'64' is supported,
copy data for the new format. */
if (segext_scode == DCSS_SEGEXT) {
@@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg)
}
kfree(qout_old);
}
-#endif
if (qout->segcnt > 6) {
rc = -EOPNOTSUPP;
goto out_free;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3ff86533f7db..76515bcea2f1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -36,15 +36,9 @@
#include <asm/facility.h>
#include "../kernel/entry.h"
-#ifndef CONFIG_64BIT
-#define __FAIL_ADDR_MASK 0x7ffff000
-#define __SUBCODE_MASK 0x0200
-#define __PF_RES_FIELD 0ULL
-#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
-#endif /* CONFIG_64BIT */
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
@@ -54,7 +48,6 @@
static unsigned long store_indication __read_mostly;
-#ifdef CONFIG_64BIT
static int __init fault_init(void)
{
if (test_facility(75))
@@ -62,7 +55,6 @@ static int __init fault_init(void)
return 0;
}
early_initcall(fault_init);
-#endif
static inline int notify_page_fault(struct pt_regs *regs)
{
@@ -133,7 +125,6 @@ static int bad_address(void *p)
return probe_kernel_address((unsigned long *)p, dummy);
}
-#ifdef CONFIG_64BIT
static void dump_pagetable(unsigned long asce, unsigned long address)
{
unsigned long *table = __va(asce & PAGE_MASK);
@@ -187,33 +178,6 @@ bad:
pr_cont("BAD\n");
}
-#else /* CONFIG_64BIT */
-
-static void dump_pagetable(unsigned long asce, unsigned long address)
-{
- unsigned long *table = __va(asce & PAGE_MASK);
-
- pr_alert("AS:%08lx ", asce);
- table = table + ((address >> 20) & 0x7ff);
- if (bad_address(table))
- goto bad;
- pr_cont("S:%08lx ", *table);
- if (*table & _SEGMENT_ENTRY_INVALID)
- goto out;
- table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
- table = table + ((address >> 12) & 0xff);
- if (bad_address(table))
- goto bad;
- pr_cont("P:%08lx ", *table);
-out:
- pr_cont("\n");
- return;
-bad:
- pr_cont("BAD\n");
-}
-
-#endif /* CONFIG_64BIT */
-
static void dump_fault_info(struct pt_regs *regs)
{
unsigned long asce;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 5c586c78ca8d..1eb41bb3010c 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
pmd_t *pmdp, pmd;
pmdp = (pmd_t *) pudp;
-#ifdef CONFIG_64BIT
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pmdp = (pmd_t *) pud_deref(pud);
pmdp += pmd_index(addr);
-#endif
do {
pmd = *pmdp;
barrier();
@@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
pud_t *pudp, pud;
pudp = (pud_t *) pgdp;
-#ifdef CONFIG_64BIT
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pudp = (pud_t *) pgd_deref(pgd);
pudp += pud_index(addr);
-#endif
do {
pud = *pudp;
barrier();
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 210ffede0153..e617e74b7be2 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -14,20 +14,23 @@ static inline pmd_t __pte_to_pmd(pte_t pte)
/*
* Convert encoding pte bits pmd bits
- * .IR...wrdytp dy..R...I...wr
- * empty .10...000000 -> 00..0...1...00
- * prot-none, clean, old .11...000001 -> 00..1...1...00
- * prot-none, clean, young .11...000101 -> 01..1...1...00
- * prot-none, dirty, old .10...001001 -> 10..1...1...00
- * prot-none, dirty, young .10...001101 -> 11..1...1...00
- * read-only, clean, old .11...010001 -> 00..1...1...01
- * read-only, clean, young .01...010101 -> 01..1...0...01
- * read-only, dirty, old .11...011001 -> 10..1...1...01
- * read-only, dirty, young .01...011101 -> 11..1...0...01
- * read-write, clean, old .11...110001 -> 00..0...1...11
- * read-write, clean, young .01...110101 -> 01..0...0...11
- * read-write, dirty, old .10...111001 -> 10..0...1...11
- * read-write, dirty, young .00...111101 -> 11..0...0...11
+ * lIR.uswrdy.p dy..R...I...wr
+ * empty 010.000000.0 -> 00..0...1...00
+ * prot-none, clean, old 111.000000.1 -> 00..1...1...00
+ * prot-none, clean, young 111.000001.1 -> 01..1...1...00
+ * prot-none, dirty, old 111.000010.1 -> 10..1...1...00
+ * prot-none, dirty, young 111.000011.1 -> 11..1...1...00
+ * read-only, clean, old 111.000100.1 -> 00..1...1...01
+ * read-only, clean, young 101.000101.1 -> 01..1...0...01
+ * read-only, dirty, old 111.000110.1 -> 10..1...1...01
+ * read-only, dirty, young 101.000111.1 -> 11..1...0...01
+ * read-write, clean, old 111.001100.1 -> 00..1...1...11
+ * read-write, clean, young 101.001101.1 -> 01..1...0...11
+ * read-write, dirty, old 110.001110.1 -> 10..0...1...11
+ * read-write, dirty, young 100.001111.1 -> 11..0...0...11
+ * HW-bits: R read-only, I invalid
+ * SW-bits: p present, y young, d dirty, r read, w write, s special,
+ * u unused, l large
*/
if (pte_present(pte)) {
pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
@@ -48,20 +51,23 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
/*
* Convert encoding pmd bits pte bits
- * dy..R...I...wr .IR...wrdytp
- * empty 00..0...1...00 -> .10...001100
- * prot-none, clean, old 00..0...1...00 -> .10...000001
- * prot-none, clean, young 01..0...1...00 -> .10...000101
- * prot-none, dirty, old 10..0...1...00 -> .10...001001
- * prot-none, dirty, young 11..0...1...00 -> .10...001101
- * read-only, clean, old 00..1...1...01 -> .11...010001
- * read-only, clean, young 01..1...1...01 -> .11...010101
- * read-only, dirty, old 10..1...1...01 -> .11...011001
- * read-only, dirty, young 11..1...1...01 -> .11...011101
- * read-write, clean, old 00..0...1...11 -> .10...110001
- * read-write, clean, young 01..0...1...11 -> .10...110101
- * read-write, dirty, old 10..0...1...11 -> .10...111001
- * read-write, dirty, young 11..0...1...11 -> .10...111101
+ * dy..R...I...wr lIR.uswrdy.p
+ * empty 00..0...1...00 -> 010.000000.0
+ * prot-none, clean, old 00..1...1...00 -> 111.000000.1
+ * prot-none, clean, young 01..1...1...00 -> 111.000001.1
+ * prot-none, dirty, old 10..1...1...00 -> 111.000010.1
+ * prot-none, dirty, young 11..1...1...00 -> 111.000011.1
+ * read-only, clean, old 00..1...1...01 -> 111.000100.1
+ * read-only, clean, young 01..1...0...01 -> 101.000101.1
+ * read-only, dirty, old 10..1...1...01 -> 111.000110.1
+ * read-only, dirty, young 11..1...0...01 -> 101.000111.1
+ * read-write, clean, old 00..1...1...11 -> 111.001100.1
+ * read-write, clean, young 01..1...0...11 -> 101.001101.1
+ * read-write, dirty, old 10..0...1...11 -> 110.001110.1
+ * read-write, dirty, young 11..0...0...11 -> 100.001111.1
+ * HW-bits: R read-only, I invalid
+ * SW-bits: p present, y young, d dirty, r read, w write, s special,
+ * u unused, l large
*/
if (pmd_present(pmd)) {
pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
@@ -70,8 +76,8 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
+ pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10;
+ pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10;
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d35b15113b17..80875c43a4a4 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -105,7 +105,6 @@ void __init paging_init(void)
unsigned long pgd_type, asce_bits;
init_mm.pgd = swapper_pg_dir;
-#ifdef CONFIG_64BIT
if (VMALLOC_END > (1UL << 42)) {
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION2_ENTRY_EMPTY;
@@ -113,10 +112,6 @@ void __init paging_init(void)
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
-#else
- asce_bits = _ASCE_TABLE_LENGTH;
- pgd_type = _SEGMENT_ENTRY_EMPTY;
-#endif
S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 2eb34bdfc613..8a993a53fcd6 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -1,7 +1,7 @@
/*
* Access kernel memory without faulting -- s390 specific implementation.
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2015
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
@@ -16,51 +16,55 @@
#include <asm/ctl_reg.h>
#include <asm/io.h>
-/*
- * This function writes to kernel memory bypassing DAT and possible
- * write protection. It copies one to four bytes from src to dst
- * using the stura instruction.
- * Returns the number of bytes copied or -EFAULT.
- */
-static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
+static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
{
- unsigned long count, aligned;
- int offset, mask;
- int rc = -EFAULT;
+ unsigned long aligned, offset, count;
+ char tmp[8];
- aligned = (unsigned long) dst & ~3UL;
- offset = (unsigned long) dst & 3;
- count = min_t(unsigned long, 4 - offset, size);
- mask = (0xf << (4 - count)) & 0xf;
- mask >>= offset;
+ aligned = (unsigned long) dst & ~7UL;
+ offset = (unsigned long) dst & 7UL;
+ size = min(8UL - offset, size);
+ count = size - 1;
asm volatile(
" bras 1,0f\n"
- " icm 0,0,0(%3)\n"
- "0: l 0,0(%1)\n"
- " lra %1,0(%1)\n"
- "1: ex %2,0(1)\n"
- "2: stura 0,%1\n"
- " la %0,0\n"
- "3:\n"
- EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
- : "+d" (rc), "+a" (aligned)
- : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
- return rc ? rc : count;
+ " mvc 0(1,%4),0(%5)\n"
+ "0: mvc 0(8,%3),0(%0)\n"
+ " ex %1,0(1)\n"
+ " lg %1,0(%3)\n"
+ " lra %0,0(%0)\n"
+ " sturg %1,%0\n"
+ : "+&a" (aligned), "+&a" (count), "=m" (tmp)
+ : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
+ : "cc", "memory", "1");
+ return size;
}
-long probe_kernel_write(void *dst, const void *src, size_t size)
+/*
+ * s390_kernel_write - write to kernel memory bypassing DAT
+ * @dst: destination address
+ * @src: source address
+ * @size: number of bytes to copy
+ *
+ * This function writes to kernel memory bypassing DAT and possible page table
+ * write protection. It writes to the destination using the sturg instruction.
+ * Therefore we have a read-modify-write sequence: the function reads eight
+ * bytes from destination at an eight byte boundary, modifies the bytes
+ * requested and writes the result back in a loop.
+ *
+ * Note: this means that this function may not be called concurrently on
+ * several cpus with overlapping words, since this may potentially
+ * cause data corruption.
+ */
+void notrace s390_kernel_write(void *dst, const void *src, size_t size)
{
- long copied = 0;
+ long copied;
while (size) {
- copied = probe_kernel_write_odd(dst, src, size);
- if (copied < 0)
- break;
+ copied = s390_kernel_write_odd(dst, src, size);
dst += copied;
src += copied;
size -= copied;
}
- return copied < 0 ? -EFAULT : 0;
}
static int __memcpy_real(void *dest, void *src, size_t count)
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 5535cfe0ee11..0f3604395805 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -36,10 +36,6 @@ void __init detect_memory_memblock(void)
memsize = rzm * rnmax;
if (!rzm)
rzm = 1ULL << 17;
- if (IS_ENABLED(CONFIG_32BIT)) {
- rzm = min(ADDR2G, rzm);
- memsize = min(ADDR2G, memsize);
- }
max_physmem_end = memsize;
addr = 0;
/* keep memblock lists close to the kernel */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 179a2c20b01f..6e552af08c76 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -32,7 +32,7 @@
#include <asm/pgalloc.h>
unsigned long mmap_rnd_mask;
-unsigned long mmap_align_mask;
+static unsigned long mmap_align_mask;
static unsigned long stack_maxrandom_size(void)
{
@@ -60,22 +60,20 @@ static inline int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
if (is_32bit_task())
return (get_random_int() & 0x7ff) << PAGE_SHIFT;
else
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
}
-static unsigned long mmap_base_legacy(void)
+static unsigned long mmap_base_legacy(unsigned long rnd)
{
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return TASK_UNMAPPED_BASE + rnd;
}
-static inline unsigned long mmap_base(void)
+static inline unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -84,7 +82,7 @@ static inline unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
gap &= PAGE_MASK;
- return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
+ return STACK_TOP - stack_maxrandom_size() - rnd - gap;
}
unsigned long
@@ -179,40 +177,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
-unsigned long randomize_et_dyn(void)
-{
- unsigned long base;
-
- base = STACK_TOP / 3 * 2;
- if (!is_32bit_task())
- /* Align to 4GB */
- base &= ~((1UL << 32) - 1);
- return base + mmap_rnd();
-}
-
-#ifndef CONFIG_64BIT
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
-}
-
-#else
-
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
{
if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
@@ -273,15 +237,20 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
+ mm->mmap_base = mmap_base_legacy(random_factor);
mm->get_unmapped_area = s390_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
}
}
@@ -317,5 +286,3 @@ static int __init setup_mmap_rnd(void)
return 0;
}
early_initcall(setup_mmap_rnd);
-
-#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 426c9d462d1c..749c98407b41 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
{
int i;
- if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) {
+ if (test_facility(13)) {
__ptep_ipte_range(address, nr - 1, pte);
return;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b2c1542f2ba2..b33f66110ca9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -18,6 +18,7 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/swapops.h>
+#include <linux/sysctl.h>
#include <linux/ksm.h>
#include <linux/mman.h>
@@ -27,14 +28,8 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
-#ifndef CONFIG_64BIT
-#define ALLOC_ORDER 1
-#define FRAG_MASK 0x0f
-#else
#define ALLOC_ORDER 2
#define FRAG_MASK 0x03
-#endif
-
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
@@ -50,7 +45,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
free_pages((unsigned long) table, ALLOC_ORDER);
}
-#ifdef CONFIG_64BIT
static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
@@ -140,7 +134,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
if (current->active_mm == mm)
set_user_asce(mm);
}
-#endif
#ifdef CONFIG_PGSTE
@@ -928,6 +921,40 @@ unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
}
EXPORT_SYMBOL(get_guest_storage_key);
+static int page_table_allocate_pgste_min = 0;
+static int page_table_allocate_pgste_max = 1;
+int page_table_allocate_pgste = 0;
+EXPORT_SYMBOL(page_table_allocate_pgste);
+
+static struct ctl_table page_table_sysctl[] = {
+ {
+ .procname = "allocate_pgste",
+ .data = &page_table_allocate_pgste,
+ .maxlen = sizeof(int),
+ .mode = S_IRUGO | S_IWUSR,
+ .proc_handler = proc_dointvec,
+ .extra1 = &page_table_allocate_pgste_min,
+ .extra2 = &page_table_allocate_pgste_max,
+ },
+ { }
+};
+
+static struct ctl_table page_table_sysctl_dir[] = {
+ {
+ .procname = "vm",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = page_table_sysctl,
+ },
+ { }
+};
+
+static int __init page_table_register_sysctl(void)
+{
+ return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
+}
+__initcall(page_table_register_sysctl);
+
#else /* CONFIG_PGSTE */
static inline int page_table_with_pgste(struct page *page)
@@ -971,7 +998,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
struct page *uninitialized_var(page);
unsigned int mask, bit;
- if (mm_has_pgste(mm))
+ if (mm_alloc_pgste(mm))
return page_table_alloc_pgste(mm);
/* Allocate fragments of a 4K page as 1K/2K page table */
spin_lock_bh(&mm->context.list_lock);
@@ -1173,116 +1200,25 @@ static inline void thp_split_mm(struct mm_struct *mm)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
- struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end)
-{
- unsigned long next, *table, *new;
- struct page *page;
- spinlock_t *ptl;
- pmd_t *pmd;
-
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
-again:
- if (pmd_none_or_clear_bad(pmd))
- continue;
- table = (unsigned long *) pmd_deref(*pmd);
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- if (page_table_with_pgste(page))
- continue;
- /* Allocate new page table with pgstes */
- new = page_table_alloc_pgste(mm);
- if (!new)
- return -ENOMEM;
-
- ptl = pmd_lock(mm, pmd);
- if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
- /* Nuke pmd entry pointing to the "short" page table */
- pmdp_flush_lazy(mm, addr, pmd);
- pmd_clear(pmd);
- /* Copy ptes from old table to new table */
- memcpy(new, table, PAGE_SIZE/2);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- /* Establish new table */
- pmd_populate(mm, pmd, (pte_t *) new);
- /* Free old table with rcu, there might be a walker! */
- page_table_free_rcu(tlb, table, addr);
- new = NULL;
- }
- spin_unlock(ptl);
- if (new) {
- page_table_free_pgste(new);
- goto again;
- }
- } while (pmd++, addr = next, addr != end);
-
- return addr;
-}
-
-static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
- struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end)
-{
- unsigned long next;
- pud_t *pud;
-
- pud = pud_offset(pgd, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud))
- continue;
- next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
- if (unlikely(IS_ERR_VALUE(next)))
- return next;
- } while (pud++, addr = next, addr != end);
-
- return addr;
-}
-
-static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long addr, unsigned long end)
-{
- unsigned long next;
- pgd_t *pgd;
-
- pgd = pgd_offset(mm, addr);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- continue;
- next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
- if (unlikely(IS_ERR_VALUE(next)))
- return next;
- } while (pgd++, addr = next, addr != end);
-
- return 0;
-}
-
/*
* switch on pgstes for its userspace process (for kvm)
*/
int s390_enable_sie(void)
{
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- struct mmu_gather tlb;
+ struct mm_struct *mm = current->mm;
/* Do we have pgstes? if yes, we are done */
- if (mm_has_pgste(tsk->mm))
+ if (mm_has_pgste(mm))
return 0;
-
+ /* Fail if the page tables are 2K */
+ if (!mm_alloc_pgste(mm))
+ return -EINVAL;
down_write(&mm->mmap_sem);
+ mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
- /* Reallocate the page tables with pgstes */
- tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
- if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
- mm->context.has_pgste = 1;
- tlb_finish_mmu(&tlb, 0, TASK_SIZE);
up_write(&mm->mmap_sem);
- return mm->context.has_pgste ? 0 : -ENOMEM;
+ return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b1593c2f751a..ef7d6c8fea66 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void)
{
pud_t *pud = NULL;
-#ifdef CONFIG_64BIT
pud = vmem_alloc_pages(2);
if (!pud)
return NULL;
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
-#endif
return pud;
}
@@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void)
{
pmd_t *pmd = NULL;
-#ifdef CONFIG_64BIT
pmd = vmem_alloc_pages(2);
if (!pmd)
return NULL;
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
-#endif
return pmd;
}
@@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
-#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+#ifndef CONFIG_DEBUG_PAGEALLOC
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
pud_val(*pu_dir) = __pa(address) |
@@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
-#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+#ifndef CONFIG_DEBUG_PAGEALLOC
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
pmd_val(*pm_dir) = __pa(address) |
@@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
-#ifdef CONFIG_64BIT
/* Use 1MB frames for vmemmap if available. We always
* use large frames even if they are only partially
* used.
@@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
-#endif
pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index ba44c9f55346..a1c917d881ec 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -1,134 +1,115 @@
/*
* BPF Jit compiler for s390, help functions.
*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012,2015
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
+
#include <linux/linkage.h>
+#include "bpf_jit.h"
/*
* Calling convention:
- * registers %r2, %r6-%r8, %r10-%r11, %r13, %r15 are call saved
- * %r2: skb pointer
- * %r3: offset parameter
- * %r5: BPF A accumulator
- * %r8: return address
- * %r9: save register for skb pointer
- * %r10: skb->data
- * %r11: skb->len - skb->data_len (headlen)
- * %r12: BPF X accumulator
+ * registers %r7-%r10, %r11,%r13, and %r15 are call saved
+ *
+ * Input (64 bit):
+ * %r3 (%b2) = offset into skb data
+ * %r6 (%b5) = return address
+ * %r7 (%b6) = skb pointer
+ * %r12 = skb data pointer
+ *
+ * Output:
+ * %r14= %b0 = return value (read skb value)
+ *
+ * Work registers: %r2,%r4,%r5,%r14
*
* skb_copy_bits takes 4 parameters:
* %r2 = skb pointer
* %r3 = offset into skb data
* %r4 = pointer to temp buffer
* %r5 = length to copy
+ * Return value in %r2: 0 = ok
+ *
+ * bpf_internal_load_pointer_neg_helper takes 3 parameters:
+ * %r2 = skb pointer
+ * %r3 = offset into data
+ * %r4 = length to copy
+ * Return value in %r2: Pointer to data
*/
-#define SKBDATA %r8
-
- /* A = *(u32 *) (skb->data+K+X) */
-ENTRY(sk_load_word_ind)
- ar %r3,%r12 # offset += X
- bmr %r8 # < 0 -> return with cc
-
- /* A = *(u32 *) (skb->data+K) */
-ENTRY(sk_load_word)
- llgfr %r1,%r3 # extend offset
- ahi %r3,4 # offset + 4
- clr %r11,%r3 # hlen <= offset + 4 ?
- jl sk_load_word_slow
- l %r5,0(%r1,%r10) # get word from skb
- xr %r1,%r1 # set cc to zero
- br %r8
-sk_load_word_slow:
- lgr %r9,%r2 # save %r2
- lgr %r3,%r1 # offset
- la %r4,160(%r15) # pointer to temp buffer
- lghi %r5,4 # 4 bytes
- brasl %r14,skb_copy_bits # get data from skb
- l %r5,160(%r15) # load result from temp buffer
- ltgr %r2,%r2 # set cc to (%r2 != 0)
- lgr %r2,%r9 # restore %r2
- br %r8
+#define SKF_MAX_NEG_OFF -0x200000 /* SKF_LL_OFF from filter.h */
- /* A = *(u16 *) (skb->data+K+X) */
-ENTRY(sk_load_half_ind)
- ar %r3,%r12 # offset += X
- bmr %r8 # < 0 -> return with cc
-
- /* A = *(u16 *) (skb->data+K) */
-ENTRY(sk_load_half)
- llgfr %r1,%r3 # extend offset
- ahi %r3,2 # offset + 2
- clr %r11,%r3 # hlen <= offset + 2 ?
- jl sk_load_half_slow
- llgh %r5,0(%r1,%r10) # get half from skb
- xr %r1,%r1 # set cc to zero
- br %r8
-
-sk_load_half_slow:
- lgr %r9,%r2 # save %r2
- lgr %r3,%r1 # offset
- la %r4,162(%r15) # pointer to temp buffer
- lghi %r5,2 # 2 bytes
- brasl %r14,skb_copy_bits # get data from skb
- xc 160(2,%r15),160(%r15)
- l %r5,160(%r15) # load result from temp buffer
- ltgr %r2,%r2 # set cc to (%r2 != 0)
- lgr %r2,%r9 # restore %r2
- br %r8
+/*
+ * Load SIZE bytes from SKB
+ */
+#define sk_load_common(NAME, SIZE, LOAD) \
+ENTRY(sk_load_##NAME); \
+ ltgr %r3,%r3; /* Is offset negative? */ \
+ jl sk_load_##NAME##_slow_neg; \
+ENTRY(sk_load_##NAME##_pos); \
+ aghi %r3,SIZE; /* Offset + SIZE */ \
+ clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
+ jh sk_load_##NAME##_slow; \
+ LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
+ b OFF_OK(%r6); /* Return */ \
+ \
+sk_load_##NAME##_slow:; \
+ lgr %r2,%r7; /* Arg1 = skb pointer */ \
+ aghi %r3,-SIZE; /* Arg2 = offset */ \
+ la %r4,STK_OFF_TMP(%r15); /* Arg3 = temp bufffer */ \
+ lghi %r5,SIZE; /* Arg4 = size */ \
+ brasl %r14,skb_copy_bits; /* Get data from skb */ \
+ LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
+ ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
+ br %r6; /* Return */
- /* A = *(u8 *) (skb->data+K+X) */
-ENTRY(sk_load_byte_ind)
- ar %r3,%r12 # offset += X
- bmr %r8 # < 0 -> return with cc
+sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
+sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
- /* A = *(u8 *) (skb->data+K) */
+/*
+ * Load 1 byte from SKB (optimized version)
+ */
+ /* r14 = *(u8 *) (skb->data+offset) */
ENTRY(sk_load_byte)
- llgfr %r1,%r3 # extend offset
- clr %r11,%r3 # hlen < offset ?
- jle sk_load_byte_slow
- lhi %r5,0
- ic %r5,0(%r1,%r10) # get byte from skb
- xr %r1,%r1 # set cc to zero
- br %r8
+ ltgr %r3,%r3 # Is offset negative?
+ jl sk_load_byte_slow_neg
+ENTRY(sk_load_byte_pos)
+ clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
+ jnl sk_load_byte_slow
+ llgc %r14,0(%r3,%r12) # Get byte from skb
+ b OFF_OK(%r6) # Return OK
sk_load_byte_slow:
- lgr %r9,%r2 # save %r2
- lgr %r3,%r1 # offset
- la %r4,163(%r15) # pointer to temp buffer
- lghi %r5,1 # 1 byte
- brasl %r14,skb_copy_bits # get data from skb
- xc 160(3,%r15),160(%r15)
- l %r5,160(%r15) # load result from temp buffer
- ltgr %r2,%r2 # set cc to (%r2 != 0)
- lgr %r2,%r9 # restore %r2
- br %r8
+ lgr %r2,%r7 # Arg1 = skb pointer
+ # Arg2 = offset
+ la %r4,STK_OFF_TMP(%r15) # Arg3 = pointer to temp buffer
+ lghi %r5,1 # Arg4 = size (1 byte)
+ brasl %r14,skb_copy_bits # Get data from skb
+ llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
+ ltgr %r2,%r2 # Set cc to (%r2 != 0)
+ br %r6 # Return cc
+
+#define sk_negative_common(NAME, SIZE, LOAD) \
+sk_load_##NAME##_slow_neg:; \
+ cgfi %r3,SKF_MAX_NEG_OFF; \
+ jl bpf_error; \
+ lgr %r2,%r7; /* Arg1 = skb pointer */ \
+ /* Arg2 = offset */ \
+ lghi %r4,SIZE; /* Arg3 = size */ \
+ brasl %r14,bpf_internal_load_pointer_neg_helper; \
+ ltgr %r2,%r2; \
+ jz bpf_error; \
+ LOAD %r14,0(%r2); /* Get data from pointer */ \
+ xr %r3,%r3; /* Set cc to zero */ \
+ br %r6; /* Return cc */
- /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
-ENTRY(sk_load_byte_msh)
- llgfr %r1,%r3 # extend offset
- clr %r11,%r3 # hlen < offset ?
- jle sk_load_byte_msh_slow
- lhi %r12,0
- ic %r12,0(%r1,%r10) # get byte from skb
- nill %r12,0x0f
- sll %r12,2
- xr %r1,%r1 # set cc to zero
- br %r8
+sk_negative_common(word, 4, llgf)
+sk_negative_common(half, 2, llgh)
+sk_negative_common(byte, 1, llgc)
-sk_load_byte_msh_slow:
- lgr %r9,%r2 # save %r2
- lgr %r3,%r1 # offset
- la %r4,163(%r15) # pointer to temp buffer
- lghi %r5,1 # 1 byte
- brasl %r14,skb_copy_bits # get data from skb
- xc 160(3,%r15),160(%r15)
- l %r12,160(%r15) # load result from temp buffer
- nill %r12,0x0f
- sll %r12,2
- ltgr %r2,%r2 # set cc to (%r2 != 0)
- lgr %r2,%r9 # restore %r2
- br %r8
+bpf_error:
+# force a return 0 from jit handler
+ ltgr %r15,%r15 # Set condition code
+ br %r6
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
new file mode 100644
index 000000000000..ba8593a515ba
--- /dev/null
+++ b/arch/s390/net/bpf_jit.h
@@ -0,0 +1,58 @@
+/*
+ * BPF Jit compiler defines
+ *
+ * Copyright IBM Corp. 2012,2015
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#ifndef __ARCH_S390_NET_BPF_JIT_H
+#define __ARCH_S390_NET_BPF_JIT_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/filter.h>
+#include <linux/types.h>
+
+extern u8 sk_load_word_pos[], sk_load_half_pos[], sk_load_byte_pos[];
+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Stackframe layout (packed stack):
+ *
+ * ^ high
+ * +---------------+ |
+ * | old backchain | |
+ * +---------------+ |
+ * | r15 - r6 | |
+ * BFP -> +===============+ |
+ * | | |
+ * | BPF stack | |
+ * | | |
+ * +---------------+ |
+ * | 8 byte hlen | |
+ * R15+168 -> +---------------+ |
+ * | 4 byte align | |
+ * +---------------+ |
+ * | 4 byte temp | |
+ * | for bpf_jit.S | |
+ * R15+160 -> +---------------+ |
+ * | new backchain | |
+ * R15+152 -> +---------------+ |
+ * | + 152 byte SA | |
+ * R15 -> +---------------+ + low
+ *
+ * We get 160 bytes stack space from calling function, but only use
+ * 11 * 8 byte (old backchain + r15 - r6) for storing registers.
+ */
+#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8))
+#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
+#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
+
+/* Offset to skip condition code check */
+#define OFF_OK 4
+
+#endif /* __ARCH_S390_NET_BPF_JIT_H */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bbd1981cc150..7690dc8e1ab5 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1,817 +1,1209 @@
/*
* BPF Jit compiler for s390.
*
- * Copyright IBM Corp. 2012
+ * Minimum build requirements:
+ *
+ * - HAVE_MARCH_Z196_FEATURES: laal, laalg
+ * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
+ * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
+ * - PACK_STACK
+ * - 64BIT
+ *
+ * Copyright IBM Corp. 2012,2015
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
+
+#define KMSG_COMPONENT "bpf_jit"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/netdevice.h>
-#include <linux/if_vlan.h>
#include <linux/filter.h>
#include <linux/init.h>
#include <asm/cacheflush.h>
-#include <asm/facility.h>
#include <asm/dis.h>
+#include "bpf_jit.h"
-/*
- * Conventions:
- * %r2 = skb pointer
- * %r3 = offset parameter
- * %r4 = scratch register / length parameter
- * %r5 = BPF A accumulator
- * %r8 = return address
- * %r9 = save register for skb pointer
- * %r10 = skb->data
- * %r11 = skb->len - skb->data_len (headlen)
- * %r12 = BPF X accumulator
- * %r13 = literal pool pointer
- * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
- */
int bpf_jit_enable __read_mostly;
+struct bpf_jit {
+ u32 seen; /* Flags to remember seen eBPF instructions */
+ u32 seen_reg[16]; /* Array to remember which registers are used */
+ u32 *addrs; /* Array with relative instruction addresses */
+ u8 *prg_buf; /* Start of program */
+ int size; /* Size of program and literal pool */
+ int size_prg; /* Size of program */
+ int prg; /* Current position in program */
+ int lit_start; /* Start of literal pool */
+ int lit; /* Current position in literal pool */
+ int base_ip; /* Base address for literal pool */
+ int ret0_ip; /* Address of return 0 */
+ int exit_ip; /* Address of exit */
+};
+
+#define BPF_SIZE_MAX 4096 /* Max size for program */
+
+#define SEEN_SKB 1 /* skb access */
+#define SEEN_MEM 2 /* use mem[] for temporary storage */
+#define SEEN_RET0 4 /* ret0_ip points to a valid return 0 */
+#define SEEN_LITERAL 8 /* code uses literals */
+#define SEEN_FUNC 16 /* calls C functions */
+#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
+
/*
- * assembly code in arch/x86/net/bpf_jit.S
+ * s390 registers
*/
-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
-extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
+#define REG_W0 (__MAX_BPF_REG+0) /* Work register 1 (even) */
+#define REG_W1 (__MAX_BPF_REG+1) /* Work register 2 (odd) */
+#define REG_SKB_DATA (__MAX_BPF_REG+2) /* SKB data register */
+#define REG_L (__MAX_BPF_REG+3) /* Literal pool register */
+#define REG_15 (__MAX_BPF_REG+4) /* Register 15 */
+#define REG_0 REG_W0 /* Register 0 */
+#define REG_2 BPF_REG_1 /* Register 2 */
+#define REG_14 BPF_REG_0 /* Register 14 */
-struct bpf_jit {
- unsigned int seen;
- u8 *start;
- u8 *prg;
- u8 *mid;
- u8 *lit;
- u8 *end;
- u8 *base_ip;
- u8 *ret0_ip;
- u8 *exit_ip;
- unsigned int off_load_word;
- unsigned int off_load_half;
- unsigned int off_load_byte;
- unsigned int off_load_bmsh;
- unsigned int off_load_iword;
- unsigned int off_load_ihalf;
- unsigned int off_load_ibyte;
+/*
+ * Mapping of BPF registers to s390 registers
+ */
+static const int reg2hex[] = {
+ /* Return code */
+ [BPF_REG_0] = 14,
+ /* Function parameters */
+ [BPF_REG_1] = 2,
+ [BPF_REG_2] = 3,
+ [BPF_REG_3] = 4,
+ [BPF_REG_4] = 5,
+ [BPF_REG_5] = 6,
+ /* Call saved registers */
+ [BPF_REG_6] = 7,
+ [BPF_REG_7] = 8,
+ [BPF_REG_8] = 9,
+ [BPF_REG_9] = 10,
+ /* BPF stack pointer */
+ [BPF_REG_FP] = 13,
+ /* SKB data pointer */
+ [REG_SKB_DATA] = 12,
+ /* Work registers for s390x backend */
+ [REG_W0] = 0,
+ [REG_W1] = 1,
+ [REG_L] = 11,
+ [REG_15] = 15,
};
-#define BPF_SIZE_MAX 4096 /* Max size for program */
+static inline u32 reg(u32 dst_reg, u32 src_reg)
+{
+ return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
+}
+
+static inline u32 reg_high(u32 reg)
+{
+ return reg2hex[reg] << 4;
+}
+
+static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+{
+ u32 r1 = reg2hex[b1];
+
+ if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
+ jit->seen_reg[r1] = 1;
+}
+
+#define REG_SET_SEEN(b1) \
+({ \
+ reg_set_seen(jit, b1); \
+})
+
+#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
+
+/*
+ * EMIT macros for code generation
+ */
+
+#define _EMIT2(op) \
+({ \
+ if (jit->prg_buf) \
+ *(u16 *) (jit->prg_buf + jit->prg) = op; \
+ jit->prg += 2; \
+})
-#define SEEN_DATAREF 1 /* might call external helpers */
-#define SEEN_XREG 2 /* ebx is used */
-#define SEEN_MEM 4 /* use mem[] for temporary storage */
-#define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
-#define SEEN_LITERAL 16 /* code uses literals */
-#define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
-#define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
-#define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
-#define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
-#define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
-#define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
-#define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
-
-#define EMIT2(op) \
-({ \
- if (jit->prg + 2 <= jit->mid) \
- *(u16 *) jit->prg = op; \
- jit->prg += 2; \
+#define EMIT2(op, b1, b2) \
+({ \
+ _EMIT2(op | reg(b1, b2)); \
+ REG_SET_SEEN(b1); \
+ REG_SET_SEEN(b2); \
})
-#define EMIT4(op) \
-({ \
- if (jit->prg + 4 <= jit->mid) \
- *(u32 *) jit->prg = op; \
- jit->prg += 4; \
+#define _EMIT4(op) \
+({ \
+ if (jit->prg_buf) \
+ *(u32 *) (jit->prg_buf + jit->prg) = op; \
+ jit->prg += 4; \
})
-#define EMIT4_DISP(op, disp) \
-({ \
- unsigned int __disp = (disp) & 0xfff; \
- EMIT4(op | __disp); \
+#define EMIT4(op, b1, b2) \
+({ \
+ _EMIT4(op | reg(b1, b2)); \
+ REG_SET_SEEN(b1); \
+ REG_SET_SEEN(b2); \
})
-#define EMIT4_IMM(op, imm) \
-({ \
- unsigned int __imm = (imm) & 0xffff; \
- EMIT4(op | __imm); \
+#define EMIT4_RRF(op, b1, b2, b3) \
+({ \
+ _EMIT4(op | reg_high(b3) << 8 | reg(b1, b2)); \
+ REG_SET_SEEN(b1); \
+ REG_SET_SEEN(b2); \
+ REG_SET_SEEN(b3); \
})
-#define EMIT4_PCREL(op, pcrel) \
-({ \
- long __pcrel = ((pcrel) >> 1) & 0xffff; \
- EMIT4(op | __pcrel); \
+#define _EMIT4_DISP(op, disp) \
+({ \
+ unsigned int __disp = (disp) & 0xfff; \
+ _EMIT4(op | __disp); \
})
-#define EMIT6(op1, op2) \
-({ \
- if (jit->prg + 6 <= jit->mid) { \
- *(u32 *) jit->prg = op1; \
- *(u16 *) (jit->prg + 4) = op2; \
- } \
- jit->prg += 6; \
+#define EMIT4_DISP(op, b1, b2, disp) \
+({ \
+ _EMIT4_DISP(op | reg_high(b1) << 16 | \
+ reg_high(b2) << 8, disp); \
+ REG_SET_SEEN(b1); \
+ REG_SET_SEEN(b2); \
})
-#define EMIT6_DISP(op1, op2, disp) \
-({ \
- unsigned int __disp = (disp) & 0xfff; \
- EMIT6(op1 | __disp, op2); \
+#define EMIT4_IMM(op, b1, imm) \
+({ \
+ unsigned int __imm = (imm) & 0xffff; \
+ _EMIT4(op | reg_high(b1) << 16 | __imm); \
+ REG_SET_SEEN(b1); \
})
-#define EMIT6_IMM(op, imm) \
-({ \
- unsigned int __imm = (imm); \
- EMIT6(op | (__imm >> 16), __imm & 0xffff); \
+#define EMIT4_PCREL(op, pcrel) \
+({ \
+ long __pcrel = ((pcrel) >> 1) & 0xffff; \
+ _EMIT4(op | __pcrel); \
})
-#define EMIT_CONST(val) \
-({ \
- unsigned int ret; \
- ret = (unsigned int) (jit->lit - jit->base_ip); \
- jit->seen |= SEEN_LITERAL; \
- if (jit->lit + 4 <= jit->end) \
- *(u32 *) jit->lit = val; \
- jit->lit += 4; \
- ret; \
+#define _EMIT6(op1, op2) \
+({ \
+ if (jit->prg_buf) { \
+ *(u32 *) (jit->prg_buf + jit->prg) = op1; \
+ *(u16 *) (jit->prg_buf + jit->prg + 4) = op2; \
+ } \
+ jit->prg += 6; \
})
-#define EMIT_FN_CONST(bit, fn) \
-({ \
- unsigned int ret; \
- ret = (unsigned int) (jit->lit - jit->base_ip); \
- if (jit->seen & bit) { \
- jit->seen |= SEEN_LITERAL; \
- if (jit->lit + 8 <= jit->end) \
- *(void **) jit->lit = fn; \
- jit->lit += 8; \
- } \
- ret; \
+#define _EMIT6_DISP(op1, op2, disp) \
+({ \
+ unsigned int __disp = (disp) & 0xfff; \
+ _EMIT6(op1 | __disp, op2); \
})
-static void bpf_jit_fill_hole(void *area, unsigned int size)
+#define EMIT6_DISP(op1, op2, b1, b2, b3, disp) \
+({ \
+ _EMIT6_DISP(op1 | reg(b1, b2) << 16 | \
+ reg_high(b3) << 8, op2, disp); \
+ REG_SET_SEEN(b1); \
+ REG_SET_SEEN(b2); \
+ REG_SET_SEEN(b3); \
+})
+
+#define _EMIT6_DISP_LH(op1, op2, disp) \
+({ \
+ unsigned int __disp_h = ((u32)disp) & 0xff000; \
+ unsigned int __disp_l = ((u32)disp) & 0x00fff; \
+ _EMIT6(op1 | __disp_l, op2 | __disp_h >> 4); \
+})
+
+#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \
+({ \
+ _EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 | \
+ reg_high(b3) << 8, op2, disp); \
+ REG_SET_SEEN(b1); \
+ REG_SET_SEEN(b2); \
+ REG_SET_SEEN(b3); \
+})
+
+#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
+({ \
+ /* Branch instruction needs 6 bytes */ \
+ int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
+ _EMIT6(op1 | reg(b1, b2) << 16 | rel, op2 | mask); \
+ REG_SET_SEEN(b1); \
+ REG_SET_SEEN(b2); \
+})
+
+#define _EMIT6_IMM(op, imm) \
+({ \
+ unsigned int __imm = (imm); \
+ _EMIT6(op | (__imm >> 16), __imm & 0xffff); \
+})
+
+#define EMIT6_IMM(op, b1, imm) \
+({ \
+ _EMIT6_IMM(op | reg_high(b1) << 16, imm); \
+ REG_SET_SEEN(b1); \
+})
+
+#define EMIT_CONST_U32(val) \
+({ \
+ unsigned int ret; \
+ ret = jit->lit - jit->base_ip; \
+ jit->seen |= SEEN_LITERAL; \
+ if (jit->prg_buf) \
+ *(u32 *) (jit->prg_buf + jit->lit) = (u32) val; \
+ jit->lit += 4; \
+ ret; \
+})
+
+#define EMIT_CONST_U64(val) \
+({ \
+ unsigned int ret; \
+ ret = jit->lit - jit->base_ip; \
+ jit->seen |= SEEN_LITERAL; \
+ if (jit->prg_buf) \
+ *(u64 *) (jit->prg_buf + jit->lit) = (u64) val; \
+ jit->lit += 8; \
+ ret; \
+})
+
+#define EMIT_ZERO(b1) \
+({ \
+ /* llgfr %dst,%dst (zero extend to 64 bit) */ \
+ EMIT4(0xb9160000, b1, b1); \
+ REG_SET_SEEN(b1); \
+})
+
+/*
+ * Fill whole space with illegal instructions
+ */
+static void jit_fill_hole(void *area, unsigned int size)
{
- /* Fill whole space with illegal instructions */
memset(area, 0, size);
}
-static void bpf_jit_prologue(struct bpf_jit *jit)
+/*
+ * Save registers from "rs" (register start) to "re" (register end) on stack
+ */
+static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
+{
+ u32 off = 72 + (rs - 6) * 8;
+
+ if (rs == re)
+ /* stg %rs,off(%r15) */
+ _EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
+ else
+ /* stmg %rs,%re,off(%r15) */
+ _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
+}
+
+/*
+ * Restore registers from "rs" (register start) to "re" (register end) on stack
+ */
+static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
{
- /* Save registers and create stack frame if necessary */
- if (jit->seen & SEEN_DATAREF) {
- /* stmg %r8,%r15,88(%r15) */
- EMIT6(0xeb8ff058, 0x0024);
- /* lgr %r14,%r15 */
- EMIT4(0xb90400ef);
- /* aghi %r15,<offset> */
- EMIT4_IMM(0xa7fb0000, (jit->seen & SEEN_MEM) ? -112 : -80);
- /* stg %r14,152(%r15) */
- EMIT6(0xe3e0f098, 0x0024);
- } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
- /* stmg %r12,%r13,120(%r15) */
- EMIT6(0xebcdf078, 0x0024);
- else if (jit->seen & SEEN_XREG)
- /* stg %r12,120(%r15) */
- EMIT6(0xe3c0f078, 0x0024);
- else if (jit->seen & SEEN_LITERAL)
- /* stg %r13,128(%r15) */
- EMIT6(0xe3d0f080, 0x0024);
+ u32 off = 72 + (rs - 6) * 8;
+
+ if (jit->seen & SEEN_STACK)
+ off += STK_OFF;
+
+ if (rs == re)
+ /* lg %rs,off(%r15) */
+ _EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
+ else
+ /* lmg %rs,%re,off(%r15) */
+ _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
+}
+/*
+ * Return first seen register (from start)
+ */
+static int get_start(struct bpf_jit *jit, int start)
+{
+ int i;
+
+ for (i = start; i <= 15; i++) {
+ if (jit->seen_reg[i])
+ return i;
+ }
+ return 0;
+}
+
+/*
+ * Return last seen register (from start) (gap >= 2)
+ */
+static int get_end(struct bpf_jit *jit, int start)
+{
+ int i;
+
+ for (i = start; i < 15; i++) {
+ if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
+ return i - 1;
+ }
+ return jit->seen_reg[15] ? 15 : 14;
+}
+
+#define REGS_SAVE 1
+#define REGS_RESTORE 0
+/*
+ * Save and restore clobbered registers (6-15) on stack.
+ * We save/restore registers in chunks with gap >= 2 registers.
+ */
+static void save_restore_regs(struct bpf_jit *jit, int op)
+{
+
+ int re = 6, rs;
+
+ do {
+ rs = get_start(jit, re);
+ if (!rs)
+ break;
+ re = get_end(jit, rs + 1);
+ if (op == REGS_SAVE)
+ save_regs(jit, rs, re);
+ else
+ restore_regs(jit, rs, re);
+ re++;
+ } while (re <= 15);
+}
+
+/*
+ * Emit function prologue
+ *
+ * Save registers and create stack frame if necessary.
+ * See stack frame layout desription in "bpf_jit.h"!
+ */
+static void bpf_jit_prologue(struct bpf_jit *jit)
+{
+ /* Save registers */
+ save_restore_regs(jit, REGS_SAVE);
/* Setup literal pool */
if (jit->seen & SEEN_LITERAL) {
/* basr %r13,0 */
- EMIT2(0x0dd0);
+ EMIT2(0x0d00, REG_L, REG_0);
jit->base_ip = jit->prg;
}
- jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
- jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
- jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
- jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
- jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
- jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
- jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
-
- /* Filter needs to access skb data */
- if (jit->seen & SEEN_DATAREF) {
- /* l %r11,<len>(%r2) */
- EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
- /* s %r11,<data_len>(%r2) */
- EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
- /* lg %r10,<data>(%r2) */
- EMIT6_DISP(0xe3a02000, 0x0004,
- offsetof(struct sk_buff, data));
+ /* Setup stack and backchain */
+ if (jit->seen & SEEN_STACK) {
+ /* lgr %bfp,%r15 (BPF frame pointer) */
+ EMIT4(0xb9040000, BPF_REG_FP, REG_15);
+ /* aghi %r15,-STK_OFF */
+ EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
+ if (jit->seen & SEEN_FUNC)
+ /* stg %bfp,152(%r15) (backchain) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0,
+ REG_15, 152);
+ }
+ /*
+ * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
+ * we store the SKB header length on the stack and the SKB data
+ * pointer in REG_SKB_DATA.
+ */
+ if (jit->seen & SEEN_SKB) {
+ /* Header length: llgf %w1,<len>(%b1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
+ offsetof(struct sk_buff, len));
+ /* s %w1,<data_len>(%b1) */
+ EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
+ offsetof(struct sk_buff, data_len));
+ /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
+ STK_OFF_HLEN);
+ /* lg %skb_data,data_off(%b1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+ BPF_REG_1, offsetof(struct sk_buff, data));
}
+ /* BPF compatibility: clear A (%b7) and X (%b8) registers */
+ if (REG_SEEN(BPF_REG_7))
+ /* lghi %b7,0 */
+ EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
+ if (REG_SEEN(BPF_REG_8))
+ /* lghi %b8,0 */
+ EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
}
+/*
+ * Function epilogue
+ */
static void bpf_jit_epilogue(struct bpf_jit *jit)
{
/* Return 0 */
if (jit->seen & SEEN_RET0) {
jit->ret0_ip = jit->prg;
- /* lghi %r2,0 */
- EMIT4(0xa7290000);
+ /* lghi %b0,0 */
+ EMIT4_IMM(0xa7090000, BPF_REG_0, 0);
}
jit->exit_ip = jit->prg;
+ /* Load exit code: lgr %r2,%b0 */
+ EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
- if (jit->seen & SEEN_DATAREF)
- /* lmg %r8,%r15,<offset>(%r15) */
- EMIT6_DISP(0xeb8ff000, 0x0004,
- (jit->seen & SEEN_MEM) ? 200 : 168);
- else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
- /* lmg %r12,%r13,120(%r15) */
- EMIT6(0xebcdf078, 0x0004);
- else if (jit->seen & SEEN_XREG)
- /* lg %r12,120(%r15) */
- EMIT6(0xe3c0f078, 0x0004);
- else if (jit->seen & SEEN_LITERAL)
- /* lg %r13,128(%r15) */
- EMIT6(0xe3d0f080, 0x0004);
+ save_restore_regs(jit, REGS_RESTORE);
/* br %r14 */
- EMIT2(0x07fe);
+ _EMIT2(0x07fe);
}
/*
- * make sure we dont leak kernel information to user
+ * Compile one eBPF instruction into s390x code
*/
-static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
+static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
{
- /* Clear temporary memory if (seen & SEEN_MEM) */
- if (jit->seen & SEEN_MEM)
- /* xc 0(64,%r15),0(%r15) */
- EMIT6(0xd73ff000, 0xf000);
- /* Clear X if (seen & SEEN_XREG) */
- if (jit->seen & SEEN_XREG)
- /* lhi %r12,0 */
- EMIT4(0xa7c80000);
- /* Clear A if the first register does not set it. */
- switch (filter[0].code) {
- case BPF_LD | BPF_W | BPF_ABS:
- case BPF_LD | BPF_H | BPF_ABS:
- case BPF_LD | BPF_B | BPF_ABS:
- case BPF_LD | BPF_W | BPF_LEN:
- case BPF_LD | BPF_W | BPF_IND:
- case BPF_LD | BPF_H | BPF_IND:
- case BPF_LD | BPF_B | BPF_IND:
- case BPF_LD | BPF_IMM:
- case BPF_LD | BPF_MEM:
- case BPF_MISC | BPF_TXA:
- case BPF_RET | BPF_K:
- /* first instruction sets A register */
- break;
- default: /* A = 0 */
- /* lhi %r5,0 */
- EMIT4(0xa7580000);
- }
-}
+ struct bpf_insn *insn = &fp->insnsi[i];
+ int jmp_off, last, insn_count = 1;
+ unsigned int func_addr, mask;
+ u32 dst_reg = insn->dst_reg;
+ u32 src_reg = insn->src_reg;
+ u32 *addrs = jit->addrs;
+ s32 imm = insn->imm;
+ s16 off = insn->off;
-static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
- unsigned int *addrs, int i, int last)
-{
- unsigned int K;
- int offset;
- unsigned int mask;
- u16 code;
-
- K = filter->k;
- code = bpf_anc_helper(filter);
-
- switch (code) {
- case BPF_ALU | BPF_ADD | BPF_X: /* A += X */
- jit->seen |= SEEN_XREG;
- /* ar %r5,%r12 */
- EMIT2(0x1a5c);
- break;
- case BPF_ALU | BPF_ADD | BPF_K: /* A += K */
- if (!K)
+ switch (insn->code) {
+ /*
+ * BPF_MOV
+ */
+ case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
+ /* llgfr %dst,%src */
+ EMIT4(0xb9160000, dst_reg, src_reg);
+ break;
+ case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
+ /* lgr %dst,%src */
+ EMIT4(0xb9040000, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
+ /* llilf %dst,imm */
+ EMIT6_IMM(0xc00f0000, dst_reg, imm);
+ break;
+ case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
+ /* lgfi %dst,imm */
+ EMIT6_IMM(0xc0010000, dst_reg, imm);
+ break;
+ /*
+ * BPF_LD 64
+ */
+ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
+ {
+ /* 16 byte instruction that uses two 'struct bpf_insn' */
+ u64 imm64;
+
+ imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
+ /* lg %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm64));
+ insn_count = 2;
+ break;
+ }
+ /*
+ * BPF_ADD
+ */
+ case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
+ /* ar %dst,%src */
+ EMIT2(0x1a00, dst_reg, src_reg);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
+ /* agr %dst,%src */
+ EMIT4(0xb9080000, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
+ if (!imm)
break;
- if (K <= 16383)
- /* ahi %r5,<K> */
- EMIT4_IMM(0xa75a0000, K);
- else if (test_facility(21))
- /* alfi %r5,<K> */
- EMIT6_IMM(0xc25b0000, K);
- else
- /* a %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
+ /* alfi %dst,imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, imm);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
+ if (!imm)
+ break;
+ /* agfi %dst,imm */
+ EMIT6_IMM(0xc2080000, dst_reg, imm);
+ break;
+ /*
+ * BPF_SUB
+ */
+ case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
+ /* sr %dst,%src */
+ EMIT2(0x1b00, dst_reg, src_reg);
+ EMIT_ZERO(dst_reg);
break;
- case BPF_ALU | BPF_SUB | BPF_X: /* A -= X */
- jit->seen |= SEEN_XREG;
- /* sr %r5,%r12 */
- EMIT2(0x1b5c);
+ case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
+ /* sgr %dst,%src */
+ EMIT4(0xb9090000, dst_reg, src_reg);
break;
- case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
- if (!K)
+ case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
+ if (!imm)
break;
- if (K <= 16384)
- /* ahi %r5,-K */
- EMIT4_IMM(0xa75a0000, -K);
- else if (test_facility(21))
- /* alfi %r5,-K */
- EMIT6_IMM(0xc25b0000, -K);
- else
- /* s %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
- break;
- case BPF_ALU | BPF_MUL | BPF_X: /* A *= X */
- jit->seen |= SEEN_XREG;
- /* msr %r5,%r12 */
- EMIT4(0xb252005c);
- break;
- case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
- if (K <= 16383)
- /* mhi %r5,K */
- EMIT4_IMM(0xa75c0000, K);
- else if (test_facility(34))
- /* msfi %r5,<K> */
- EMIT6_IMM(0xc2510000, K);
- else
- /* ms %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x7150d000, EMIT_CONST(K));
+ /* alfi %dst,-imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+ EMIT_ZERO(dst_reg);
break;
- case BPF_ALU | BPF_DIV | BPF_X: /* A /= X */
- jit->seen |= SEEN_XREG | SEEN_RET0;
- /* ltr %r12,%r12 */
- EMIT2(0x12cc);
- /* jz <ret0> */
- EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
- /* lhi %r4,0 */
- EMIT4(0xa7480000);
- /* dlr %r4,%r12 */
- EMIT4(0xb997004c);
- break;
- case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
- if (K == 1)
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
+ if (!imm)
+ break;
+ /* agfi %dst,-imm */
+ EMIT6_IMM(0xc2080000, dst_reg, -imm);
+ break;
+ /*
+ * BPF_MUL
+ */
+ case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
+ /* msr %dst,%src */
+ EMIT4(0xb2520000, dst_reg, src_reg);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
+ /* msgr %dst,%src */
+ EMIT4(0xb90c0000, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
+ if (imm == 1)
+ break;
+ /* msfi %r5,imm */
+ EMIT6_IMM(0xc2010000, dst_reg, imm);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
+ if (imm == 1)
break;
- /* lhi %r4,0 */
- EMIT4(0xa7480000);
- /* dl %r4,<d(K)>(%r13) */
- EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
- break;
- case BPF_ALU | BPF_MOD | BPF_X: /* A %= X */
- jit->seen |= SEEN_XREG | SEEN_RET0;
- /* ltr %r12,%r12 */
- EMIT2(0x12cc);
+ /* msgfi %dst,imm */
+ EMIT6_IMM(0xc2000000, dst_reg, imm);
+ break;
+ /*
+ * BPF_DIV / BPF_MOD
+ */
+ case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
+ case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
+ {
+ int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
+
+ jit->seen |= SEEN_RET0;
+ /* ltr %src,%src (if src == 0 goto fail) */
+ EMIT2(0x1200, src_reg, src_reg);
+ /* jz <ret0> */
+ EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
+ /* lhi %w0,0 */
+ EMIT4_IMM(0xa7080000, REG_W0, 0);
+ /* lr %w1,%dst */
+ EMIT2(0x1800, REG_W1, dst_reg);
+ /* dlr %w0,%src */
+ EMIT4(0xb9970000, REG_W0, src_reg);
+ /* llgfr %dst,%rc */
+ EMIT4(0xb9160000, dst_reg, rc_reg);
+ break;
+ }
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */
+ {
+ int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
+
+ jit->seen |= SEEN_RET0;
+ /* ltgr %src,%src (if src == 0 goto fail) */
+ EMIT4(0xb9020000, src_reg, src_reg);
/* jz <ret0> */
- EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
- /* lhi %r4,0 */
- EMIT4(0xa7480000);
- /* dlr %r4,%r12 */
- EMIT4(0xb997004c);
- /* lr %r5,%r4 */
- EMIT2(0x1854);
- break;
- case BPF_ALU | BPF_MOD | BPF_K: /* A %= K */
- if (K == 1) {
- /* lhi %r5,0 */
- EMIT4(0xa7580000);
+ EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
+ /* lghi %w0,0 */
+ EMIT4_IMM(0xa7090000, REG_W0, 0);
+ /* lgr %w1,%dst */
+ EMIT4(0xb9040000, REG_W1, dst_reg);
+ /* llgfr %dst,%src (u32 cast) */
+ EMIT4(0xb9160000, dst_reg, src_reg);
+ /* dlgr %w0,%dst */
+ EMIT4(0xb9870000, REG_W0, dst_reg);
+ /* lgr %dst,%rc */
+ EMIT4(0xb9040000, dst_reg, rc_reg);
+ break;
+ }
+ case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
+ case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
+ {
+ int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
+
+ if (imm == 1) {
+ if (BPF_OP(insn->code) == BPF_MOD)
+ /* lhgi %dst,0 */
+ EMIT4_IMM(0xa7090000, dst_reg, 0);
break;
}
- /* lhi %r4,0 */
- EMIT4(0xa7480000);
- /* dl %r4,<d(K)>(%r13) */
- EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
- /* lr %r5,%r4 */
- EMIT2(0x1854);
- break;
- case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
- jit->seen |= SEEN_XREG;
- /* nr %r5,%r12 */
- EMIT2(0x145c);
- break;
- case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
- if (test_facility(21))
- /* nilf %r5,<K> */
- EMIT6_IMM(0xc05b0000, K);
- else
- /* n %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5450d000, EMIT_CONST(K));
- break;
- case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
- jit->seen |= SEEN_XREG;
- /* or %r5,%r12 */
- EMIT2(0x165c);
- break;
- case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
- if (test_facility(21))
- /* oilf %r5,<K> */
- EMIT6_IMM(0xc05d0000, K);
- else
- /* o %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5650d000, EMIT_CONST(K));
+ /* lhi %w0,0 */
+ EMIT4_IMM(0xa7080000, REG_W0, 0);
+ /* lr %w1,%dst */
+ EMIT2(0x1800, REG_W1, dst_reg);
+ /* dl %w0,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
+ EMIT_CONST_U32(imm));
+ /* llgfr %dst,%rc */
+ EMIT4(0xb9160000, dst_reg, rc_reg);
+ break;
+ }
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */
+ {
+ int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
+
+ if (imm == 1) {
+ if (BPF_OP(insn->code) == BPF_MOD)
+ /* lhgi %dst,0 */
+ EMIT4_IMM(0xa7090000, dst_reg, 0);
+ break;
+ }
+ /* lghi %w0,0 */
+ EMIT4_IMM(0xa7090000, REG_W0, 0);
+ /* lgr %w1,%dst */
+ EMIT4(0xb9040000, REG_W1, dst_reg);
+ /* dlg %w0,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
+ EMIT_CONST_U64((u32) imm));
+ /* lgr %dst,%rc */
+ EMIT4(0xb9040000, dst_reg, rc_reg);
+ break;
+ }
+ /*
+ * BPF_AND
+ */
+ case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
+ /* nr %dst,%src */
+ EMIT2(0x1400, dst_reg, src_reg);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
+ /* ngr %dst,%src */
+ EMIT4(0xb9800000, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
+ /* nilf %dst,imm */
+ EMIT6_IMM(0xc00b0000, dst_reg, imm);
+ EMIT_ZERO(dst_reg);
break;
- case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
- case BPF_ALU | BPF_XOR | BPF_X:
- jit->seen |= SEEN_XREG;
- /* xr %r5,%r12 */
- EMIT2(0x175c);
+ case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
+ /* ng %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
break;
- case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
- if (!K)
+ /*
+ * BPF_OR
+ */
+ case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
+ /* or %dst,%src */
+ EMIT2(0x1600, dst_reg, src_reg);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
+ /* ogr %dst,%src */
+ EMIT4(0xb9810000, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
+ /* oilf %dst,imm */
+ EMIT6_IMM(0xc00d0000, dst_reg, imm);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
+ /* og %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ break;
+ /*
+ * BPF_XOR
+ */
+ case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
+ /* xr %dst,%src */
+ EMIT2(0x1700, dst_reg, src_reg);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
+ /* xgr %dst,%src */
+ EMIT4(0xb9820000, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
+ if (!imm)
break;
- /* x %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5750d000, EMIT_CONST(K));
+ /* xilf %dst,imm */
+ EMIT6_IMM(0xc0070000, dst_reg, imm);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
+ /* xg %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ break;
+ /*
+ * BPF_LSH
+ */
+ case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
+ /* sll %dst,0(%src) */
+ EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
+ EMIT_ZERO(dst_reg);
break;
- case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
- jit->seen |= SEEN_XREG;
- /* sll %r5,0(%r12) */
- EMIT4(0x8950c000);
+ case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
+ /* sllg %dst,%dst,0(%src) */
+ EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
break;
- case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
- if (K == 0)
+ case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
+ if (imm == 0)
break;
- /* sll %r5,K */
- EMIT4_DISP(0x89500000, K);
+ /* sll %dst,imm(%r0) */
+ EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+ EMIT_ZERO(dst_reg);
break;
- case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
- jit->seen |= SEEN_XREG;
- /* srl %r5,0(%r12) */
- EMIT4(0x8850c000);
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
+ if (imm == 0)
+ break;
+ /* sllg %dst,%dst,imm(%r0) */
+ EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
+ break;
+ /*
+ * BPF_RSH
+ */
+ case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
+ /* srl %dst,0(%src) */
+ EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
+ EMIT_ZERO(dst_reg);
break;
- case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
- if (K == 0)
+ case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
+ /* srlg %dst,%dst,0(%src) */
+ EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
+ if (imm == 0)
break;
- /* srl %r5,K */
- EMIT4_DISP(0x88500000, K);
- break;
- case BPF_ALU | BPF_NEG: /* A = -A */
- /* lcr %r5,%r5 */
- EMIT2(0x1355);
- break;
- case BPF_JMP | BPF_JA: /* ip += K */
- offset = addrs[i + K] + jit->start - jit->prg;
- EMIT4_PCREL(0xa7f40000, offset);
- break;
- case BPF_JMP | BPF_JGT | BPF_K: /* ip += (A > K) ? jt : jf */
- mask = 0x200000; /* jh */
- goto kbranch;
- case BPF_JMP | BPF_JGE | BPF_K: /* ip += (A >= K) ? jt : jf */
- mask = 0xa00000; /* jhe */
- goto kbranch;
- case BPF_JMP | BPF_JEQ | BPF_K: /* ip += (A == K) ? jt : jf */
- mask = 0x800000; /* je */
-kbranch: /* Emit compare if the branch targets are different */
- if (filter->jt != filter->jf) {
- if (test_facility(21))
- /* clfi %r5,<K> */
- EMIT6_IMM(0xc25f0000, K);
- else
- /* cl %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5550d000, EMIT_CONST(K));
- }
-branch: if (filter->jt == filter->jf) {
- if (filter->jt == 0)
- break;
- /* j <jt> */
- offset = addrs[i + filter->jt] + jit->start - jit->prg;
- EMIT4_PCREL(0xa7f40000, offset);
+ /* srl %dst,imm(%r0) */
+ EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
+ if (imm == 0)
break;
- }
- if (filter->jt != 0) {
- /* brc <mask>,<jt> */
- offset = addrs[i + filter->jt] + jit->start - jit->prg;
- EMIT4_PCREL(0xa7040000 | mask, offset);
- }
- if (filter->jf != 0) {
- /* brc <mask^15>,<jf> */
- offset = addrs[i + filter->jf] + jit->start - jit->prg;
- EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
- }
+ /* srlg %dst,%dst,imm(%r0) */
+ EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
break;
- case BPF_JMP | BPF_JSET | BPF_K: /* ip += (A & K) ? jt : jf */
- mask = 0x700000; /* jnz */
- /* Emit test if the branch targets are different */
- if (filter->jt != filter->jf) {
- if (K > 65535) {
- /* lr %r4,%r5 */
- EMIT2(0x1845);
- /* n %r4,<d(K)>(%r13) */
- EMIT4_DISP(0x5440d000, EMIT_CONST(K));
- } else
- /* tmll %r5,K */
- EMIT4_IMM(0xa7510000, K);
- }
- goto branch;
- case BPF_JMP | BPF_JGT | BPF_X: /* ip += (A > X) ? jt : jf */
- mask = 0x200000; /* jh */
- goto xbranch;
- case BPF_JMP | BPF_JGE | BPF_X: /* ip += (A >= X) ? jt : jf */
- mask = 0xa00000; /* jhe */
- goto xbranch;
- case BPF_JMP | BPF_JEQ | BPF_X: /* ip += (A == X) ? jt : jf */
- mask = 0x800000; /* je */
-xbranch: /* Emit compare if the branch targets are different */
- if (filter->jt != filter->jf) {
- jit->seen |= SEEN_XREG;
- /* clr %r5,%r12 */
- EMIT2(0x155c);
- }
- goto branch;
- case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
- mask = 0x700000; /* jnz */
- /* Emit test if the branch targets are different */
- if (filter->jt != filter->jf) {
- jit->seen |= SEEN_XREG;
- /* lr %r4,%r5 */
- EMIT2(0x1845);
- /* nr %r4,%r12 */
- EMIT2(0x144c);
+ /*
+ * BPF_ARSH
+ */
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
+ /* srag %dst,%dst,0(%src) */
+ EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
+ if (imm == 0)
+ break;
+ /* srag %dst,%dst,imm(%r0) */
+ EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
+ break;
+ /*
+ * BPF_NEG
+ */
+ case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
+ /* lcr %dst,%dst */
+ EMIT2(0x1300, dst_reg, dst_reg);
+ EMIT_ZERO(dst_reg);
+ break;
+ case BPF_ALU64 | BPF_NEG: /* dst = -dst */
+ /* lcgr %dst,%dst */
+ EMIT4(0xb9130000, dst_reg, dst_reg);
+ break;
+ /*
+ * BPF_FROM_BE/LE
+ */
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ /* s390 is big endian, therefore only clear high order bytes */
+ switch (imm) {
+ case 16: /* dst = (u16) cpu_to_be16(dst) */
+ /* llghr %dst,%dst */
+ EMIT4(0xb9850000, dst_reg, dst_reg);
+ break;
+ case 32: /* dst = (u32) cpu_to_be32(dst) */
+ /* llgfr %dst,%dst */
+ EMIT4(0xb9160000, dst_reg, dst_reg);
+ break;
+ case 64: /* dst = (u64) cpu_to_be64(dst) */
+ break;
}
- goto branch;
- case BPF_LD | BPF_W | BPF_ABS: /* A = *(u32 *) (skb->data+K) */
- jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
- offset = jit->off_load_word;
- goto load_abs;
- case BPF_LD | BPF_H | BPF_ABS: /* A = *(u16 *) (skb->data+K) */
- jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
- offset = jit->off_load_half;
- goto load_abs;
- case BPF_LD | BPF_B | BPF_ABS: /* A = *(u8 *) (skb->data+K) */
- jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
- offset = jit->off_load_byte;
-load_abs: if ((int) K < 0)
- goto out;
-call_fn: /* lg %r1,<d(function)>(%r13) */
- EMIT6_DISP(0xe310d000, 0x0004, offset);
- /* l %r3,<d(K)>(%r13) */
- EMIT4_DISP(0x5830d000, EMIT_CONST(K));
- /* basr %r8,%r1 */
- EMIT2(0x0d81);
- /* jnz <ret0> */
- EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
break;
- case BPF_LD | BPF_W | BPF_IND: /* A = *(u32 *) (skb->data+K+X) */
- jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
- offset = jit->off_load_iword;
- goto call_fn;
- case BPF_LD | BPF_H | BPF_IND: /* A = *(u16 *) (skb->data+K+X) */
- jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
- offset = jit->off_load_ihalf;
- goto call_fn;
- case BPF_LD | BPF_B | BPF_IND: /* A = *(u8 *) (skb->data+K+X) */
- jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
- offset = jit->off_load_ibyte;
- goto call_fn;
- case BPF_LDX | BPF_B | BPF_MSH:
- /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
- jit->seen |= SEEN_RET0;
- if ((int) K < 0) {
- /* j <ret0> */
- EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ switch (imm) {
+ case 16: /* dst = (u16) cpu_to_le16(dst) */
+ /* lrvr %dst,%dst */
+ EMIT4(0xb91f0000, dst_reg, dst_reg);
+ /* srl %dst,16(%r0) */
+ EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
+ /* llghr %dst,%dst */
+ EMIT4(0xb9850000, dst_reg, dst_reg);
+ break;
+ case 32: /* dst = (u32) cpu_to_le32(dst) */
+ /* lrvr %dst,%dst */
+ EMIT4(0xb91f0000, dst_reg, dst_reg);
+ /* llgfr %dst,%dst */
+ EMIT4(0xb9160000, dst_reg, dst_reg);
+ break;
+ case 64: /* dst = (u64) cpu_to_le64(dst) */
+ /* lrvgr %dst,%dst */
+ EMIT4(0xb90f0000, dst_reg, dst_reg);
break;
}
- jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
- offset = jit->off_load_bmsh;
- goto call_fn;
- case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
- /* l %r5,<d(len)>(%r2) */
- EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
- break;
- case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
- jit->seen |= SEEN_XREG;
- /* l %r12,<d(len)>(%r2) */
- EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
- break;
- case BPF_LD | BPF_IMM: /* A = K */
- if (K <= 16383)
- /* lhi %r5,K */
- EMIT4_IMM(0xa7580000, K);
- else if (test_facility(21))
- /* llilf %r5,<K> */
- EMIT6_IMM(0xc05f0000, K);
- else
- /* l %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5850d000, EMIT_CONST(K));
- break;
- case BPF_LDX | BPF_IMM: /* X = K */
- jit->seen |= SEEN_XREG;
- if (K <= 16383)
- /* lhi %r12,<K> */
- EMIT4_IMM(0xa7c80000, K);
- else if (test_facility(21))
- /* llilf %r12,<K> */
- EMIT6_IMM(0xc0cf0000, K);
- else
- /* l %r12,<d(K)>(%r13) */
- EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
break;
- case BPF_LD | BPF_MEM: /* A = mem[K] */
+ /*
+ * BPF_ST(X)
+ */
+ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
+ /* stcy %src,off(%dst) */
+ EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
+ /* sthy %src,off(%dst) */
+ EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
+ /* sty %src,off(%dst) */
+ EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
+ /* stg %src,off(%dst) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
+ /* lhi %w0,imm */
+ EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
+ /* stcy %w0,off(dst) */
+ EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
+ /* lhi %w0,imm */
+ EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
+ /* sthy %w0,off(dst) */
+ EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
jit->seen |= SEEN_MEM;
- /* l %r5,<K>(%r15) */
- EMIT4_DISP(0x5850f000,
- (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
- break;
- case BPF_LDX | BPF_MEM: /* X = mem[K] */
- jit->seen |= SEEN_XREG | SEEN_MEM;
- /* l %r12,<K>(%r15) */
- EMIT4_DISP(0x58c0f000,
- (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
- break;
- case BPF_MISC | BPF_TAX: /* X = A */
- jit->seen |= SEEN_XREG;
- /* lr %r12,%r5 */
- EMIT2(0x18c5);
- break;
- case BPF_MISC | BPF_TXA: /* A = X */
- jit->seen |= SEEN_XREG;
- /* lr %r5,%r12 */
- EMIT2(0x185c);
- break;
- case BPF_RET | BPF_K:
- if (K == 0) {
- jit->seen |= SEEN_RET0;
- if (last)
- break;
- /* j <ret0> */
- EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
- } else {
- if (K <= 16383)
- /* lghi %r2,K */
- EMIT4_IMM(0xa7290000, K);
- else
- /* llgf %r2,<K>(%r13) */
- EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
- /* j <exit> */
- if (last && !(jit->seen & SEEN_RET0))
- break;
- EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
- }
break;
- case BPF_RET | BPF_A:
- /* llgfr %r2,%r5 */
- EMIT4(0xb9160025);
+ case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
+ /* llilf %w0,imm */
+ EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
+ /* sty %w0,off(%dst) */
+ EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
+ /* lgfi %w0,imm */
+ EMIT6_IMM(0xc0010000, REG_W0, imm);
+ /* stg %w0,off(%dst) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ /*
+ * BPF_STX XADD (atomic_add)
+ */
+ case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
+ /* laal %w0,%src,off(%dst) */
+ EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
+ dst_reg, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
+ /* laalg %w0,%src,off(%dst) */
+ EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
+ dst_reg, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ /*
+ * BPF_LDX
+ */
+ case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
+ /* llgc %dst,0(off,%src) */
+ EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
+ /* llgh %dst,0(off,%src) */
+ EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
+ jit->seen |= SEEN_MEM;
+ break;
+ case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
+ /* llgf %dst,off(%src) */
+ jit->seen |= SEEN_MEM;
+ EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
+ break;
+ case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
+ /* lg %dst,0(off,%src) */
+ jit->seen |= SEEN_MEM;
+ EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
+ break;
+ /*
+ * BPF_JMP / CALL
+ */
+ case BPF_JMP | BPF_CALL:
+ {
+ /*
+ * b0 = (__bpf_call_base + imm)(b1, b2, b3, b4, b5)
+ */
+ const u64 func = (u64)__bpf_call_base + imm;
+
+ REG_SET_SEEN(BPF_REG_5);
+ jit->seen |= SEEN_FUNC;
+ /* lg %w1,<d(imm)>(%l) */
+ EMIT6_DISP(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+ EMIT_CONST_U64(func));
+ /* basr %r14,%w1 */
+ EMIT2(0x0d00, REG_14, REG_W1);
+ /* lgr %b0,%r2: load return value into %b0 */
+ EMIT4(0xb9040000, BPF_REG_0, REG_2);
+ break;
+ }
+ case BPF_JMP | BPF_EXIT: /* return b0 */
+ last = (i == fp->len - 1) ? 1 : 0;
+ if (last && !(jit->seen & SEEN_RET0))
+ break;
/* j <exit> */
EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
break;
- case BPF_ST: /* mem[K] = A */
- jit->seen |= SEEN_MEM;
- /* st %r5,<K>(%r15) */
- EMIT4_DISP(0x5050f000,
- (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
- break;
- case BPF_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
- jit->seen |= SEEN_XREG | SEEN_MEM;
- /* st %r12,<K>(%r15) */
- EMIT4_DISP(0x50c0f000,
- (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
- break;
- case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
- /* lhi %r5,0 */
- EMIT4(0xa7580000);
- /* icm %r5,3,<d(protocol)>(%r2) */
- EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
- break;
- case BPF_ANC | SKF_AD_IFINDEX: /* if (!skb->dev) return 0;
- * A = skb->dev->ifindex */
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
- jit->seen |= SEEN_RET0;
- /* lg %r1,<d(dev)>(%r2) */
- EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
- /* ltgr %r1,%r1 */
- EMIT4(0xb9020011);
- /* jz <ret0> */
- EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
- /* l %r5,<d(ifindex)>(%r1) */
- EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
- break;
- case BPF_ANC | SKF_AD_MARK: /* A = skb->mark */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
- /* l %r5,<d(mark)>(%r2) */
- EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
- break;
- case BPF_ANC | SKF_AD_QUEUE: /* A = skb->queue_mapping */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
- /* lhi %r5,0 */
- EMIT4(0xa7580000);
- /* icm %r5,3,<d(queue_mapping)>(%r2) */
- EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
- break;
- case BPF_ANC | SKF_AD_HATYPE: /* if (!skb->dev) return 0;
- * A = skb->dev->type */
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
- jit->seen |= SEEN_RET0;
- /* lg %r1,<d(dev)>(%r2) */
- EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
- /* ltgr %r1,%r1 */
- EMIT4(0xb9020011);
- /* jz <ret0> */
- EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
- /* lhi %r5,0 */
- EMIT4(0xa7580000);
- /* icm %r5,3,<d(type)>(%r1) */
- EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
- break;
- case BPF_ANC | SKF_AD_RXHASH: /* A = skb->hash */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
- /* l %r5,<d(hash)>(%r2) */
- EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash));
- break;
- case BPF_ANC | SKF_AD_VLAN_TAG:
- case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
- /* lhi %r5,0 */
- EMIT4(0xa7580000);
- /* icm %r5,3,<d(vlan_tci)>(%r2) */
- EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
- if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
- /* nill %r5,0xefff */
- EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
- } else {
- /* nill %r5,0x1000 */
- EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT);
- /* srl %r5,12 */
- EMIT4_DISP(0x88500000, 12);
- }
+ /*
+ * Branch relative (number of skipped instructions) to offset on
+ * condition.
+ *
+ * Condition code to mask mapping:
+ *
+ * CC | Description | Mask
+ * ------------------------------
+ * 0 | Operands equal | 8
+ * 1 | First operand low | 4
+ * 2 | First operand high | 2
+ * 3 | Unused | 1
+ *
+ * For s390x relative branches: ip = ip + off_bytes
+ * For BPF relative branches: insn = insn + off_insns + 1
+ *
+ * For example for s390x with offset 0 we jump to the branch
+ * instruction itself (loop) and for BPF with offset 0 we
+ * branch to the instruction behind the branch.
+ */
+ case BPF_JMP | BPF_JA: /* if (true) */
+ mask = 0xf000; /* j */
+ goto branch_oc;
+ case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
+ mask = 0x2000; /* jh */
+ goto branch_ks;
+ case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
+ mask = 0xa000; /* jhe */
+ goto branch_ks;
+ case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
+ mask = 0x2000; /* jh */
+ goto branch_ku;
+ case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
+ mask = 0xa000; /* jhe */
+ goto branch_ku;
+ case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
+ mask = 0x7000; /* jne */
+ goto branch_ku;
+ case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
+ mask = 0x8000; /* je */
+ goto branch_ku;
+ case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
+ mask = 0x7000; /* jnz */
+ /* lgfi %w1,imm (load sign extend imm) */
+ EMIT6_IMM(0xc0010000, REG_W1, imm);
+ /* ngr %w1,%dst */
+ EMIT4(0xb9800000, REG_W1, dst_reg);
+ goto branch_oc;
+
+ case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
+ mask = 0x2000; /* jh */
+ goto branch_xs;
+ case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
+ mask = 0xa000; /* jhe */
+ goto branch_xs;
+ case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
+ mask = 0x2000; /* jh */
+ goto branch_xu;
+ case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
+ mask = 0xa000; /* jhe */
+ goto branch_xu;
+ case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
+ mask = 0x7000; /* jne */
+ goto branch_xu;
+ case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
+ mask = 0x8000; /* je */
+ goto branch_xu;
+ case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
+ mask = 0x7000; /* jnz */
+ /* ngrk %w1,%dst,%src */
+ EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg);
+ goto branch_oc;
+branch_ks:
+ /* lgfi %w1,imm (load sign extend imm) */
+ EMIT6_IMM(0xc0010000, REG_W1, imm);
+ /* cgrj %dst,%w1,mask,off */
+ EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask);
+ break;
+branch_ku:
+ /* lgfi %w1,imm (load sign extend imm) */
+ EMIT6_IMM(0xc0010000, REG_W1, imm);
+ /* clgrj %dst,%w1,mask,off */
+ EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask);
+ break;
+branch_xs:
+ /* cgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask);
+ break;
+branch_xu:
+ /* clgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask);
+ break;
+branch_oc:
+ /* brc mask,jmp_off (branch instruction needs 4 bytes) */
+ jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
+ EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
break;
- case BPF_ANC | SKF_AD_PKTTYPE:
- /* lhi %r5,0 */
- EMIT4(0xa7580000);
- /* ic %r5,<d(pkt_type_offset)>(%r2) */
- EMIT4_DISP(0x43502000, PKT_TYPE_OFFSET());
- /* srl %r5,5 */
- EMIT4_DISP(0x88500000, 5);
- break;
- case BPF_ANC | SKF_AD_CPU: /* A = smp_processor_id() */
-#ifdef CONFIG_SMP
- /* l %r5,<d(cpu_nr)> */
- EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
-#else
- /* lhi %r5,0 */
- EMIT4(0xa7580000);
-#endif
+ /*
+ * BPF_LD
+ */
+ case BPF_LD | BPF_ABS | BPF_B: /* b0 = *(u8 *) (skb->data+imm) */
+ case BPF_LD | BPF_IND | BPF_B: /* b0 = *(u8 *) (skb->data+imm+src) */
+ if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
+ func_addr = __pa(sk_load_byte_pos);
+ else
+ func_addr = __pa(sk_load_byte);
+ goto call_fn;
+ case BPF_LD | BPF_ABS | BPF_H: /* b0 = *(u16 *) (skb->data+imm) */
+ case BPF_LD | BPF_IND | BPF_H: /* b0 = *(u16 *) (skb->data+imm+src) */
+ if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
+ func_addr = __pa(sk_load_half_pos);
+ else
+ func_addr = __pa(sk_load_half);
+ goto call_fn;
+ case BPF_LD | BPF_ABS | BPF_W: /* b0 = *(u32 *) (skb->data+imm) */
+ case BPF_LD | BPF_IND | BPF_W: /* b0 = *(u32 *) (skb->data+imm+src) */
+ if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
+ func_addr = __pa(sk_load_word_pos);
+ else
+ func_addr = __pa(sk_load_word);
+ goto call_fn;
+call_fn:
+ jit->seen |= SEEN_SKB | SEEN_RET0 | SEEN_FUNC;
+ REG_SET_SEEN(REG_14); /* Return address of possible func call */
+
+ /*
+ * Implicit input:
+ * BPF_REG_6 (R7) : skb pointer
+ * REG_SKB_DATA (R12): skb data pointer
+ *
+ * Calculated input:
+ * BPF_REG_2 (R3) : offset of byte(s) to fetch in skb
+ * BPF_REG_5 (R6) : return address
+ *
+ * Output:
+ * BPF_REG_0 (R14): data read from skb
+ *
+ * Scratch registers (BPF_REG_1-5)
+ */
+
+ /* Call function: llilf %w1,func_addr */
+ EMIT6_IMM(0xc00f0000, REG_W1, func_addr);
+
+ /* Offset: lgfi %b2,imm */
+ EMIT6_IMM(0xc0010000, BPF_REG_2, imm);
+ if (BPF_MODE(insn->code) == BPF_IND)
+ /* agfr %b2,%src (%src is s32 here) */
+ EMIT4(0xb9180000, BPF_REG_2, src_reg);
+
+ /* basr %b5,%w1 (%b5 is call saved) */
+ EMIT2(0x0d00, BPF_REG_5, REG_W1);
+
+ /*
+ * Note: For fast access we jump directly after the
+ * jnz instruction from bpf_jit.S
+ */
+ /* jnz <ret0> */
+ EMIT4_PCREL(0xa7740000, jit->ret0_ip - jit->prg);
break;
default: /* too complex, give up */
- goto out;
+ pr_err("Unknown opcode %02x\n", insn->code);
+ return -1;
+ }
+ return insn_count;
+}
+
+/*
+ * Compile eBPF program into s390x code
+ */
+static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
+{
+ int i, insn_count;
+
+ jit->lit = jit->lit_start;
+ jit->prg = 0;
+
+ bpf_jit_prologue(jit);
+ for (i = 0; i < fp->len; i += insn_count) {
+ insn_count = bpf_jit_insn(jit, fp, i);
+ if (insn_count < 0)
+ return -1;
+ jit->addrs[i + 1] = jit->prg; /* Next instruction address */
}
- addrs[i] = jit->prg - jit->start;
+ bpf_jit_epilogue(jit);
+
+ jit->lit_start = jit->prg;
+ jit->size = jit->lit;
+ jit->size_prg = jit->prg;
return 0;
-out:
- return -1;
}
+/*
+ * Classic BPF function stub. BPF programs will be converted into
+ * eBPF and then bpf_int_jit_compile() will be called.
+ */
void bpf_jit_compile(struct bpf_prog *fp)
{
- struct bpf_binary_header *header = NULL;
- unsigned long size, prg_len, lit_len;
- struct bpf_jit jit, cjit;
- unsigned int *addrs;
- int pass, i;
+}
+
+/*
+ * Compile eBPF program "fp"
+ */
+void bpf_int_jit_compile(struct bpf_prog *fp)
+{
+ struct bpf_binary_header *header;
+ struct bpf_jit jit;
+ int pass;
if (!bpf_jit_enable)
return;
- addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
- if (addrs == NULL)
+ memset(&jit, 0, sizeof(jit));
+ jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
+ if (jit.addrs == NULL)
return;
- memset(&jit, 0, sizeof(cjit));
- memset(&cjit, 0, sizeof(cjit));
-
- for (pass = 0; pass < 10; pass++) {
- jit.prg = jit.start;
- jit.lit = jit.mid;
-
- bpf_jit_prologue(&jit);
- bpf_jit_noleaks(&jit, fp->insns);
- for (i = 0; i < fp->len; i++) {
- if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
- i == fp->len - 1))
- goto out;
- }
- bpf_jit_epilogue(&jit);
- if (jit.start) {
- WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
- if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
- break;
- } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
- prg_len = jit.prg - jit.start;
- lit_len = jit.lit - jit.mid;
- size = prg_len + lit_len;
- if (size >= BPF_SIZE_MAX)
- goto out;
- header = bpf_jit_binary_alloc(size, &jit.start,
- 2, bpf_jit_fill_hole);
- if (!header)
- goto out;
- jit.prg = jit.mid = jit.start + prg_len;
- jit.lit = jit.end = jit.start + prg_len + lit_len;
- jit.base_ip += (unsigned long) jit.start;
- jit.exit_ip += (unsigned long) jit.start;
- jit.ret0_ip += (unsigned long) jit.start;
- }
- cjit = jit;
+ /*
+ * Three initial passes:
+ * - 1/2: Determine clobbered registers
+ * - 3: Calculate program size and addrs arrray
+ */
+ for (pass = 1; pass <= 3; pass++) {
+ if (bpf_jit_prog(&jit, fp))
+ goto free_addrs;
}
+ /*
+ * Final pass: Allocate and generate program
+ */
+ if (jit.size >= BPF_SIZE_MAX)
+ goto free_addrs;
+ header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
+ if (!header)
+ goto free_addrs;
+ if (bpf_jit_prog(&jit, fp))
+ goto free_addrs;
if (bpf_jit_enable > 1) {
- bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
- if (jit.start)
- print_fn_code(jit.start, jit.mid - jit.start);
+ bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
+ if (jit.prg_buf)
+ print_fn_code(jit.prg_buf, jit.size_prg);
}
- if (jit.start) {
+ if (jit.prg_buf) {
set_memory_ro((unsigned long)header, header->pages);
- fp->bpf_func = (void *) jit.start;
+ fp->bpf_func = (void *) jit.prg_buf;
fp->jited = true;
}
-out:
- kfree(addrs);
+free_addrs:
+ kfree(jit.addrs);
}
+/*
+ * Free eBPF program
+ */
void bpf_jit_free(struct bpf_prog *fp)
{
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index 524c4b615821..1bd23017191e 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -7,4 +7,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
-oprofile-$(CONFIG_64BIT) += hwsampler.o
+oprofile-y += hwsampler.o
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 9ffe645d5989..bc927a09a172 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -21,8 +21,6 @@
extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
-#ifdef CONFIG_64BIT
-
#include "hwsampler.h"
#include "op_counter.h"
@@ -495,14 +493,10 @@ static void oprofile_hwsampler_exit(void)
hwsampler_shutdown();
}
-#endif /* CONFIG_64BIT */
-
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = s390_backtrace;
-#ifdef CONFIG_64BIT
-
/*
* -ENODEV is not reported to the caller. The module itself
* will use the timer mode sampling as fallback and this is
@@ -511,14 +505,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
hwsampler_available = oprofile_hwsampler_init(ops) == 0;
return 0;
-#else
- return -ENODEV;
-#endif
}
void oprofile_arch_exit(void)
{
-#ifdef CONFIG_64BIT
oprofile_hwsampler_exit();
-#endif
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index b2c76f64c530..598f023cf8a6 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -190,6 +190,11 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
return -ENOMEM;
WARN_ON((u64) zdev->fmb & 0xf);
+ /* reset software counters */
+ atomic64_set(&zdev->allocated_pages, 0);
+ atomic64_set(&zdev->mapped_pages, 0);
+ atomic64_set(&zdev->unmapped_pages, 0);
+
args.fmb_addr = virt_to_phys(zdev->fmb);
return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
}
@@ -822,6 +827,7 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc)
goto out;
+ mutex_init(&zdev->lock);
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
rc = zpci_enable_device(zdev);
if (rc)
@@ -913,8 +919,7 @@ static int __init pci_base_init(void)
if (!s390_pci_probe)
return 0;
- if (!test_facility(2) || !test_facility(69)
- || !test_facility(71) || !test_facility(72))
+ if (!test_facility(69) || !test_facility(71) || !test_facility(72))
return 0;
rc = zpci_debug_init();
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 3229a2e570df..4129b0a5fd78 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -31,12 +31,25 @@ static char *pci_perf_names[] = {
"Refresh operations",
"DMA read bytes",
"DMA write bytes",
- /* software counters */
+};
+
+static char *pci_sw_names[] = {
"Allocated pages",
"Mapped pages",
"Unmapped pages",
};
+static void pci_sw_counter_show(struct seq_file *m)
+{
+ struct zpci_dev *zdev = m->private;
+ atomic64_t *counter = &zdev->allocated_pages;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
+ seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
+ atomic64_read(counter));
+}
+
static int pci_perf_show(struct seq_file *m, void *v)
{
struct zpci_dev *zdev = m->private;
@@ -45,8 +58,13 @@ static int pci_perf_show(struct seq_file *m, void *v)
if (!zdev)
return 0;
- if (!zdev->fmb)
- return seq_printf(m, "FMB statistics disabled\n");
+
+ mutex_lock(&zdev->lock);
+ if (!zdev->fmb) {
+ mutex_unlock(&zdev->lock);
+ seq_puts(m, "FMB statistics disabled\n");
+ return 0;
+ }
/* header */
seq_printf(m, "FMB @ %p\n", zdev->fmb);
@@ -63,12 +81,9 @@ static int pci_perf_show(struct seq_file *m, void *v)
for (i = 4; i < 6; i++)
seq_printf(m, "%26s:\t%llu\n",
pci_perf_names[i], *(stat + i));
- /* software counters */
- for (i = 6; i < ARRAY_SIZE(pci_perf_names); i++)
- seq_printf(m, "%26s:\t%llu\n",
- pci_perf_names[i],
- atomic64_read((atomic64_t *) (stat + i)));
+ pci_sw_counter_show(m);
+ mutex_unlock(&zdev->lock);
return 0;
}
@@ -86,19 +101,17 @@ static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf,
if (rc)
return rc;
+ mutex_lock(&zdev->lock);
switch (val) {
case 0:
rc = zpci_fmb_disable_device(zdev);
- if (rc)
- return rc;
break;
case 1:
rc = zpci_fmb_enable_device(zdev);
- if (rc)
- return rc;
break;
}
- return count;
+ mutex_unlock(&zdev->lock);
+ return rc ? rc : count;
}
static int pci_perf_seq_open(struct inode *inode, struct file *filp)
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 4cbb29a4d615..6fd8d5836138 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -300,7 +300,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
flags |= ZPCI_TABLE_PROTECTED;
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
- atomic64_add(nr_pages, &zdev->fmb->mapped_pages);
+ atomic64_add(nr_pages, &zdev->mapped_pages);
return dma_addr + (offset & ~PAGE_MASK);
}
@@ -328,7 +328,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
zpci_err_hex(&dma_addr, sizeof(dma_addr));
}
- atomic64_add(npages, &zdev->fmb->unmapped_pages);
+ atomic64_add(npages, &zdev->unmapped_pages);
iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
dma_free_iommu(zdev, iommu_page_index, npages);
}
@@ -357,7 +357,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
return NULL;
}
- atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
+ atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
if (dma_handle)
*dma_handle = map;
return (void *) pa;
@@ -370,7 +370,7 @@ static void s390_dma_free(struct device *dev, size_t size,
struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
size = PAGE_ALIGN(size);
- atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
+ atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long) pa, get_order(size));
}
diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h
index 33864fa2a8d4..7d9ffb15c477 100644
--- a/arch/score/include/asm/thread_info.h
+++ b/arch/score/include/asm/thread_info.h
@@ -28,7 +28,6 @@
*/
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long tp_value; /* thread pointer */
__u32 cpu; /* current CPU */
@@ -53,7 +52,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
diff --git a/arch/score/kernel/asm-offsets.c b/arch/score/kernel/asm-offsets.c
index b4d5214a7a7e..52794f9421e2 100644
--- a/arch/score/kernel/asm-offsets.c
+++ b/arch/score/kernel/asm-offsets.c
@@ -100,7 +100,6 @@ void output_thread_info_defines(void)
{
COMMENT("SCORE thread_info offsets.");
OFFSET(TI_TASK, thread_info, task);
- OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index eb4ef274ae9b..50057fed819d 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -162,6 +162,10 @@ config NEED_DMA_MAP_STATE
config NEED_SG_DMA_LENGTH
def_bool y
+config PGTABLE_LEVELS
+ default 3 if X2TLB
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 669df51a82e3..324599bfad14 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -17,6 +17,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/io.h>
+#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
#include <linux/mmc/sh_mobile_sdhi.h>
@@ -243,10 +244,10 @@ static struct platform_device sh_mmcif_device = {
};
/* SDHI0 */
-static struct sh_mobile_sdhi_info sdhi_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI_RX,
- .tmio_caps = MMC_CAP_SD_HIGHSPEED,
+static struct tmio_mmc_data sdhi_info = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI_RX,
+ .capabilities = MMC_CAP_SD_HIGHSPEED,
};
static struct resource sdhi_resources[] = {
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index d4b01d4cc102..cbd2a9f02a91 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -18,6 +18,7 @@
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/sh_flctl.h>
+#include <linux/mfd/tmio.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/regulator/fixed.h>
@@ -447,8 +448,8 @@ static struct resource sdhi0_cn3_resources[] = {
},
};
-static struct sh_mobile_sdhi_info sdhi0_cn3_data = {
- .tmio_caps = MMC_CAP_SDIO_IRQ,
+static struct tmio_mmc_data sdhi0_cn3_data = {
+ .capabilities = MMC_CAP_SDIO_IRQ,
};
static struct platform_device sdhi0_cn3_device = {
@@ -474,8 +475,8 @@ static struct resource sdhi1_cn7_resources[] = {
},
};
-static struct sh_mobile_sdhi_info sdhi1_cn7_data = {
- .tmio_caps = MMC_CAP_SDIO_IRQ,
+static struct tmio_mmc_data sdhi1_cn7_data = {
+ .capabilities = MMC_CAP_SDIO_IRQ,
};
static struct platform_device sdhi1_cn7_device = {
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 0d3049244cd3..d531791f06ff 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -601,12 +601,12 @@ static struct platform_device sdhi0_power = {
},
};
-static struct sh_mobile_sdhi_info sdhi0_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
- .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
+static struct tmio_mmc_data sdhi0_info = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX,
+ .capabilities = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
MMC_CAP_NEEDS_POLL,
- .tmio_flags = TMIO_MMC_USE_GPIO_CD,
+ .flags = TMIO_MMC_USE_GPIO_CD,
.cd_gpio = GPIO_PTY7,
};
@@ -635,12 +635,12 @@ static struct platform_device sdhi0_device = {
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* SDHI1 */
-static struct sh_mobile_sdhi_info sdhi1_info = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
- .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
+static struct tmio_mmc_data sdhi1_info = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI1_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI1_RX,
+ .capabilities = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
MMC_CAP_NEEDS_POLL,
- .tmio_flags = TMIO_MMC_USE_GPIO_CD,
+ .flags = TMIO_MMC_USE_GPIO_CD,
.cd_gpio = GPIO_PTW7,
};
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 1df4398f8375..7d997cec09c5 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -373,11 +373,11 @@ static struct resource kfr2r09_sh_sdhi0_resources[] = {
},
};
-static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
- .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
- .tmio_caps = MMC_CAP_SDIO_IRQ,
+static struct tmio_mmc_data sh7724_sdhi0_data = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX,
+ .flags = TMIO_MMC_WRPROTECT_DISABLE,
+ .capabilities = MMC_CAP_SDIO_IRQ,
};
static struct platform_device kfr2r09_sh_sdhi0_device = {
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 8b73194ed2ce..29b7c0dcfc51 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -15,6 +15,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
+#include <linux/mfd/tmio.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
#include <linux/regulator/fixed.h>
@@ -408,10 +409,10 @@ static struct resource sdhi_cn9_resources[] = {
},
};
-static struct sh_mobile_sdhi_info sh7724_sdhi_data = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
- .tmio_caps = MMC_CAP_SDIO_IRQ,
+static struct tmio_mmc_data sh7724_sdhi_data = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX,
+ .capabilities = MMC_CAP_SDIO_IRQ,
};
static struct platform_device sdhi_cn9_device = {
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 1162bc6945a3..4f6635a075f2 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
#include <linux/mtd/physmap.h>
#include <linux/delay.h>
#include <linux/regulator/fixed.h>
@@ -468,10 +469,10 @@ static struct resource sdhi0_cn7_resources[] = {
},
};
-static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
- .tmio_caps = MMC_CAP_SDIO_IRQ,
+static struct tmio_mmc_data sh7724_sdhi0_data = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX,
+ .capabilities = MMC_CAP_SDIO_IRQ,
};
static struct platform_device sdhi0_cn7_device = {
@@ -497,10 +498,10 @@ static struct resource sdhi1_cn8_resources[] = {
},
};
-static struct sh_mobile_sdhi_info sh7724_sdhi1_data = {
- .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
- .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
- .tmio_caps = MMC_CAP_SDIO_IRQ,
+static struct tmio_mmc_data sh7724_sdhi1_data = {
+ .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI1_TX,
+ .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI1_RX,
+ .capabilities = MMC_CAP_SDIO_IRQ,
};
static struct platform_device sdhi1_cn8_device = {
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index b9d9489a5012..9f417feaf6e8 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -99,7 +99,7 @@ static inline int init_new_context(struct task_struct *tsk,
{
int i;
- for (i = 0; i < num_online_cpus(); i++)
+ for_each_online_cpu(i)
cpu_context(i, mm) = NO_CONTEXT;
return 0;
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 657c03919627..2afa321157be 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -27,7 +27,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
__u32 status; /* thread synchronous flags */
__u32 cpu;
@@ -56,7 +55,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.status = 0, \
.cpu = 0, \
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
index 542225fedb11..4bd44da910f3 100644
--- a/arch/sh/kernel/asm-offsets.c
+++ b/arch/sh/kernel/asm-offsets.c
@@ -21,7 +21,6 @@ int main(void)
{
/* offsets into the thread_info struct */
DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 67a049e75ec1..9d209a07235e 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -993,7 +993,7 @@ static struct unwinder dwarf_unwinder = {
.rating = 150,
};
-static void dwarf_unwinder_cleanup(void)
+static void __init dwarf_unwinder_cleanup(void)
{
struct dwarf_fde *fde, *next_fde;
struct dwarf_cie *cie, *next_cie;
@@ -1009,6 +1009,10 @@ static void dwarf_unwinder_cleanup(void)
rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
kfree(cie);
+ if (dwarf_reg_pool)
+ mempool_destroy(dwarf_reg_pool);
+ if (dwarf_frame_pool)
+ mempool_destroy(dwarf_frame_pool);
kmem_cache_destroy(dwarf_reg_cachep);
kmem_cache_destroy(dwarf_frame_cachep);
}
@@ -1176,17 +1180,13 @@ static int __init dwarf_unwinder_init(void)
sizeof(struct dwarf_reg), 0,
SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
- dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
- mempool_alloc_slab,
- mempool_free_slab,
- dwarf_frame_cachep);
+ dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
+ dwarf_frame_cachep);
if (!dwarf_frame_pool)
goto out;
- dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
- mempool_alloc_slab,
- mempool_free_slab,
- dwarf_reg_cachep);
+ dwarf_reg_pool = mempool_create_slab_pool(DWARF_REG_MIN_REQ,
+ dwarf_reg_cachep);
if (!dwarf_reg_pool)
goto out;
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 65a1ecd77f96..eb10ff84015c 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -124,7 +124,6 @@ void irq_ctx_init(int cpu)
irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
irqctx->tinfo.task = NULL;
- irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
@@ -133,7 +132,6 @@ void irq_ctx_init(int cpu)
irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
irqctx->tinfo.task = NULL;
- irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = 0;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 0b34f2a704fe..f7c3d5c25caf 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -267,19 +267,12 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
{
struct sigframe __user *frame;
int err = 0, sig = ksig->sig;
- int signal;
frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
if (_NSIG_WORDS > 1)
@@ -313,7 +306,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
/* Set up registers for signal handler */
regs->regs[15] = (unsigned long) frame;
- regs->regs[4] = signal; /* Arg for signal handler */
+ regs->regs[4] = sig; /* Arg for signal handler */
regs->regs[5] = 0;
regs->regs[6] = (unsigned long) &frame->sc;
@@ -329,8 +322,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
if (err)
return -EFAULT;
- set_fs(USER_DS);
-
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
@@ -342,19 +333,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
- int signal;
frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
/* Create the ucontext. */
@@ -392,7 +376,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
/* Set up registers for signal handler */
regs->regs[15] = (unsigned long) frame;
- regs->regs[4] = signal; /* Arg for signal handler */
+ regs->regs[4] = sig; /* Arg for signal handler */
regs->regs[5] = (unsigned long) &frame->info;
regs->regs[6] = (unsigned long) &frame->uc;
@@ -408,8 +392,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
if (err)
return -EFAULT;
- set_fs(USER_DS);
-
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 71993c6a7d94..d8a3f0d22809 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -385,12 +385,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
/* Give up earlier as i386, in case */
@@ -441,7 +435,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
- regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
+ regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
/* FIXME:
The glibc profiling support for SH-5 needs to be passed a sigcontext
@@ -457,11 +451,9 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
- set_fs(USER_DS);
-
/* Broken %016Lx */
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
- signal, current->comm, current->pid, frame,
+ sig, current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
@@ -473,19 +465,12 @@ static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
- int signal;
frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
@@ -542,15 +527,13 @@ static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
- regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
+ regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
- set_fs(USER_DS);
-
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
- signal, current->comm, current->pid, frame,
+ sig, current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index fc5acfc93c92..de6be008fc01 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -363,7 +363,7 @@ void flush_tlb_mm(struct mm_struct *mm)
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
} else {
int i;
- for (i = 0; i < num_online_cpus(); i++)
+ for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, mm) = 0;
}
@@ -400,7 +400,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
} else {
int i;
- for (i = 0; i < num_online_cpus(); i++)
+ for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, mm) = 0;
}
@@ -443,7 +443,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
} else {
int i;
- for (i = 0; i < num_online_cpus(); i++)
+ for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, vma->vm_mm) = 0;
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index efb00ec75805..e49502acbab4 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -146,6 +146,10 @@ config GENERIC_ISA_DMA
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y if SPARC64
+config PGTABLE_LEVELS
+ default 4 if 64BIT
+ default 3
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h
index 2b9321ab064d..cd0d69fa7592 100644
--- a/arch/sparc/include/asm/iommu_64.h
+++ b/arch/sparc/include/asm/iommu_64.h
@@ -16,6 +16,7 @@
#define IOPTE_WRITE 0x0000000000000002UL
#define IOMMU_NUM_CTXS 4096
+#include <linux/iommu-common.h>
struct iommu_arena {
unsigned long *map;
@@ -24,11 +25,10 @@ struct iommu_arena {
};
struct iommu {
+ struct iommu_map_table tbl;
spinlock_t lock;
- struct iommu_arena arena;
- void (*flush_all)(struct iommu *);
+ u32 dma_addr_mask;
iopte_t *page_table;
- u32 page_table_map_base;
unsigned long iommu_control;
unsigned long iommu_tsbbase;
unsigned long iommu_flush;
@@ -40,7 +40,6 @@ struct iommu {
unsigned long dummy_page_pa;
unsigned long ctx_lowest_free;
DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS);
- u32 dma_addr_mask;
};
struct strbuf {
diff --git a/arch/sparc/include/asm/seccomp.h b/arch/sparc/include/asm/seccomp.h
index adca1bce41d4..5ef8826d44f8 100644
--- a/arch/sparc/include/asm/seccomp.h
+++ b/arch/sparc/include/asm/seccomp.h
@@ -1,15 +1,10 @@
#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
#include <linux/unistd.h>
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#define __NR_seccomp_read_32 __NR_read
-#define __NR_seccomp_write_32 __NR_write
-#define __NR_seccomp_exit_32 __NR_exit
#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+#include <asm-generic/seccomp.h>
+
#endif /* _ASM_SECCOMP_H */
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index fd7bd0a440ca..229475f0d7ce 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -27,7 +27,6 @@
struct thread_info {
unsigned long uwinmask;
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
@@ -35,6 +34,8 @@ struct thread_info {
int softirq_count;
int hardirq_count;
+ u32 __unused;
+
/* Context switch saved kernel state. */
unsigned long ksp; /* ... ksp __attribute__ ((aligned (8))); */
unsigned long kpc;
@@ -56,7 +57,6 @@ struct thread_info {
{ \
.uwinmask = 0, \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
@@ -85,12 +85,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
*/
#define TI_UWINMASK 0x00 /* uwinmask */
#define TI_TASK 0x04
-#define TI_EXECDOMAIN 0x08 /* exec_domain */
-#define TI_FLAGS 0x0c
-#define TI_CPU 0x10
-#define TI_PREEMPT 0x14 /* preempt_count */
-#define TI_SOFTIRQ 0x18 /* softirq_count */
-#define TI_HARDIRQ 0x1c /* hardirq_count */
+#define TI_FLAGS 0x08
+#define TI_CPU 0x0c
+#define TI_PREEMPT 0x10 /* preempt_count */
+#define TI_SOFTIRQ 0x14 /* softirq_count */
+#define TI_HARDIRQ 0x18 /* hardirq_count */
#define TI_KSP 0x20 /* ksp */
#define TI_KPC 0x24 /* kpc (ldd'ed with kpc) */
#define TI_KPSR 0x28 /* kpsr */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index ff455164732a..bde59825d06c 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -31,7 +31,6 @@
#include <asm/types.h>
struct task_struct;
-struct exec_domain;
struct thread_info {
/* D$ line 1 */
@@ -44,7 +43,6 @@ struct thread_info {
/* D$ line 2 */
unsigned long fault_address;
struct pt_regs *kregs;
- struct exec_domain *exec_domain;
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u8 new_child;
__u8 current_ds;
@@ -80,18 +78,17 @@ struct thread_info {
#define TI_KSP 0x00000018
#define TI_FAULT_ADDR 0x00000020
#define TI_KREGS 0x00000028
-#define TI_EXEC_DOMAIN 0x00000030
-#define TI_PRE_COUNT 0x00000038
-#define TI_NEW_CHILD 0x0000003c
-#define TI_CURRENT_DS 0x0000003d
-#define TI_CPU 0x0000003e
-#define TI_UTRAPS 0x00000040
-#define TI_REG_WINDOW 0x00000048
-#define TI_RWIN_SPTRS 0x000003c8
-#define TI_GSR 0x00000400
-#define TI_XFSR 0x00000438
-#define TI_KUNA_REGS 0x00000470
-#define TI_KUNA_INSN 0x00000478
+#define TI_PRE_COUNT 0x00000030
+#define TI_NEW_CHILD 0x00000034
+#define TI_CURRENT_DS 0x00000035
+#define TI_CPU 0x00000036
+#define TI_UTRAPS 0x00000038
+#define TI_REG_WINDOW 0x00000040
+#define TI_RWIN_SPTRS 0x000003c0
+#define TI_GSR 0x000003f8
+#define TI_XFSR 0x00000430
+#define TI_KUNA_REGS 0x00000468
+#define TI_KUNA_INSN 0x00000470
#define TI_FPREGS 0x00000480
/* We embed this in the uppermost byte of thread_info->flags */
@@ -119,7 +116,6 @@ struct thread_info {
{ \
.task = &tsk, \
.current_ds = ASI_P, \
- .exec_domain = &default_exec_domain, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index bfa4d0c2df42..5320689c06e9 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#include <linux/bitmap.h>
+#include <linux/iommu-common.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
@@ -45,8 +46,9 @@
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
-static void iommu_flushall(struct iommu *iommu)
+static void iommu_flushall(struct iommu_map_table *iommu_map_table)
{
+ struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
@@ -87,94 +89,6 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
iopte_val(*iopte) = val;
}
-/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
- * facility it must all be done in one pass while under the iommu lock.
- *
- * On sun4u platforms, we only flush the IOMMU once every time we've passed
- * over the entire page table doing allocations. Therefore we only ever advance
- * the hint and cannot backtrack it.
- */
-unsigned long iommu_range_alloc(struct device *dev,
- struct iommu *iommu,
- unsigned long npages,
- unsigned long *handle)
-{
- unsigned long n, end, start, limit, boundary_size;
- struct iommu_arena *arena = &iommu->arena;
- int pass = 0;
-
- /* This allocator was derived from x86_64's bit string search */
-
- /* Sanity check */
- if (unlikely(npages == 0)) {
- if (printk_ratelimit())
- WARN_ON(1);
- return DMA_ERROR_CODE;
- }
-
- if (handle && *handle)
- start = *handle;
- else
- start = arena->hint;
-
- limit = arena->limit;
-
- /* The case below can happen if we have a small segment appended
- * to a large, or when the previous alloc was at the very end of
- * the available space. If so, go back to the beginning and flush.
- */
- if (start >= limit) {
- start = 0;
- if (iommu->flush_all)
- iommu->flush_all(iommu);
- }
-
- again:
-
- if (dev)
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << IO_PAGE_SHIFT);
- else
- boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
-
- n = iommu_area_alloc(arena->map, limit, start, npages,
- iommu->page_table_map_base >> IO_PAGE_SHIFT,
- boundary_size >> IO_PAGE_SHIFT, 0);
- if (n == -1) {
- if (likely(pass < 1)) {
- /* First failure, rescan from the beginning. */
- start = 0;
- if (iommu->flush_all)
- iommu->flush_all(iommu);
- pass++;
- goto again;
- } else {
- /* Second failure, give up */
- return DMA_ERROR_CODE;
- }
- }
-
- end = n + npages;
-
- arena->hint = end;
-
- /* Update handle for SG allocations */
- if (handle)
- *handle = end;
-
- return n;
-}
-
-void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
-{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long entry;
-
- entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
-
- bitmap_clear(arena->map, entry, npages);
-}
-
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
int numa_node)
@@ -187,22 +101,20 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
- iommu->page_table_map_base = dma_offset;
+ iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_addr_mask;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
- if (!iommu->arena.map) {
- printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
+ iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
+ if (!iommu->tbl.map)
return -ENOMEM;
- }
- memset(iommu->arena.map, 0, sz);
- iommu->arena.limit = num_tsb_entries;
+ memset(iommu->tbl.map, 0, sz);
- if (tlb_type != hypervisor)
- iommu->flush_all = iommu_flushall;
+ iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
+ (tlb_type != hypervisor ? iommu_flushall : NULL),
+ false, 1, false);
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
@@ -235,18 +147,20 @@ out_free_dummy_page:
iommu->dummy_page = 0UL;
out_free_map:
- kfree(iommu->arena.map);
- iommu->arena.map = NULL;
+ kfree(iommu->tbl.map);
+ iommu->tbl.map = NULL;
return -ENOMEM;
}
-static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
+static inline iopte_t *alloc_npages(struct device *dev,
+ struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
- entry = iommu_range_alloc(dev, iommu, npages, NULL);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ (unsigned long)(-1), 0);
if (unlikely(entry == DMA_ERROR_CODE))
return NULL;
@@ -284,7 +198,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
struct dma_attrs *attrs)
{
- unsigned long flags, order, first_page;
+ unsigned long order, first_page;
struct iommu *iommu;
struct page *page;
int npages, nid;
@@ -306,16 +220,14 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
iommu = dev->archdata.iommu;
- spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
- *dma_addrp = (iommu->page_table_map_base +
+ *dma_addrp = (iommu->tbl.table_map_base +
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
@@ -336,16 +248,12 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs)
{
struct iommu *iommu;
- unsigned long flags, order, npages;
+ unsigned long order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
- spin_lock_irqsave(&iommu->lock, flags);
-
- iommu_range_free(iommu, dvma, npages);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE);
order = get_order(size);
if (order < 10)
@@ -375,8 +283,8 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(dev, iommu, npages);
+ spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
@@ -385,7 +293,7 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
if (unlikely(!base))
goto bad;
- bus_addr = (iommu->page_table_map_base +
+ bus_addr = (iommu->tbl.table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
@@ -496,7 +404,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
@@ -515,11 +423,10 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
- iommu_range_free(iommu, bus_addr, npages);
-
iommu_free_ctx(iommu, ctx);
-
spin_unlock_irqrestore(&iommu->lock, flags);
+
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -567,7 +474,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
+ base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
@@ -581,7 +488,8 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_range_alloc(dev, iommu, npages, &handle);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+ &handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
@@ -594,7 +502,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->page_table_map_base +
+ dma_addr = iommu->tbl.table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
@@ -654,15 +562,17 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_range_free(iommu, vaddr, npages);
- entry = (vaddr - iommu->page_table_map_base)
+ entry = (vaddr - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (j = 0; j < npages; j++)
iopte_make_dummy(iommu, base + j);
+ iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+ DMA_ERROR_CODE);
+
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
@@ -684,10 +594,11 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
+ struct iommu_map_table *tbl = &iommu->tbl;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
@@ -723,9 +634,8 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
- iommu_range_free(iommu, dma_handle, npages);
- entry = ((dma_handle - iommu->page_table_map_base)
+ entry = ((dma_handle - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT);
base = iommu->page_table + entry;
@@ -737,6 +647,8 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
+ iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+ DMA_ERROR_CODE);
sg = sg_next(sg);
}
@@ -770,9 +682,10 @@ static void dma_4u_sync_single_for_cpu(struct device *dev,
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
+ struct iommu_map_table *tbl = &iommu->tbl;
iopte = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
+ ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
@@ -805,9 +718,10 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
+ struct iommu_map_table *tbl = &iommu->tbl;
- iopte = iommu->page_table +
- ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ iopte = iommu->page_table + ((sglist[0].dma_address -
+ tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index 1ec0de4156e7..f4be0d724fc6 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -48,12 +48,4 @@ static inline int is_span_boundary(unsigned long entry,
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
-unsigned long iommu_range_alloc(struct device *dev,
- struct iommu *iommu,
- unsigned long npages,
- unsigned long *handle);
-void iommu_range_free(struct iommu *iommu,
- dma_addr_t dma_addr,
- unsigned long npages);
-
#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 274a9f59d95c..7d3ca30fcd15 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/bitmap.h>
+#include <linux/iommu-common.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
@@ -27,6 +28,10 @@
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "July 22, 2008"
+#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
+#define COOKIE_PGSZ_CODE_SHIFT 60ULL
+
+
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
#define LDC_PACKET_SIZE 64
@@ -98,10 +103,10 @@ static const struct ldc_mode_ops stream_ops;
int ldom_domaining_enabled;
struct ldc_iommu {
- /* Protects arena alloc/free. */
+ /* Protects ldc_unmap. */
spinlock_t lock;
- struct iommu_arena arena;
struct ldc_mtable_entry *page_table;
+ struct iommu_map_table iommu_map_table;
};
struct ldc_channel {
@@ -998,31 +1003,59 @@ static void free_queue(unsigned long num_entries, struct ldc_packet *q)
free_pages((unsigned long)q, order);
}
+static unsigned long ldc_cookie_to_index(u64 cookie, void *arg)
+{
+ u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
+ /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */
+
+ cookie &= ~COOKIE_PGSZ_CODE;
+
+ return (cookie >> (13ULL + (szcode * 3ULL)));
+}
+
+static void ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie,
+ unsigned long entry, unsigned long npages)
+{
+ struct ldc_mtable_entry *base;
+ unsigned long i, shift;
+
+ shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3;
+ base = iommu->page_table + entry;
+ for (i = 0; i < npages; i++) {
+ if (base->cookie)
+ sun4v_ldc_revoke(id, cookie + (i << shift),
+ base->cookie);
+ base->mte = 0;
+ }
+}
+
/* XXX Make this configurable... XXX */
#define LDC_IOTABLE_SIZE (8 * 1024)
-static int ldc_iommu_init(struct ldc_channel *lp)
+static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
{
unsigned long sz, num_tsb_entries, tsbsize, order;
- struct ldc_iommu *iommu = &lp->iommu;
+ struct ldc_iommu *ldc_iommu = &lp->iommu;
+ struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
struct ldc_mtable_entry *table;
unsigned long hv_err;
int err;
num_tsb_entries = LDC_IOTABLE_SIZE;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
-
- spin_lock_init(&iommu->lock);
+ spin_lock_init(&ldc_iommu->lock);
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->arena.map) {
+ iommu->map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->map) {
printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
return -ENOMEM;
}
-
- iommu->arena.limit = num_tsb_entries;
+ iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT,
+ NULL, false /* no large pool */,
+ 1 /* npools */,
+ true /* skip span boundary check */);
order = get_order(tsbsize);
@@ -1037,7 +1070,7 @@ static int ldc_iommu_init(struct ldc_channel *lp)
memset(table, 0, PAGE_SIZE << order);
- iommu->page_table = table;
+ ldc_iommu->page_table = table;
hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
num_tsb_entries);
@@ -1049,31 +1082,32 @@ static int ldc_iommu_init(struct ldc_channel *lp)
out_free_table:
free_pages((unsigned long) table, order);
- iommu->page_table = NULL;
+ ldc_iommu->page_table = NULL;
out_free_map:
- kfree(iommu->arena.map);
- iommu->arena.map = NULL;
+ kfree(iommu->map);
+ iommu->map = NULL;
return err;
}
static void ldc_iommu_release(struct ldc_channel *lp)
{
- struct ldc_iommu *iommu = &lp->iommu;
+ struct ldc_iommu *ldc_iommu = &lp->iommu;
+ struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
unsigned long num_tsb_entries, tsbsize, order;
(void) sun4v_ldc_set_map_table(lp->id, 0, 0);
- num_tsb_entries = iommu->arena.limit;
+ num_tsb_entries = iommu->poolsize * iommu->nr_pools;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
order = get_order(tsbsize);
- free_pages((unsigned long) iommu->page_table, order);
- iommu->page_table = NULL;
+ free_pages((unsigned long) ldc_iommu->page_table, order);
+ ldc_iommu->page_table = NULL;
- kfree(iommu->arena.map);
- iommu->arena.map = NULL;
+ kfree(iommu->map);
+ iommu->map = NULL;
}
struct ldc_channel *ldc_alloc(unsigned long id,
@@ -1140,7 +1174,7 @@ struct ldc_channel *ldc_alloc(unsigned long id,
lp->id = id;
- err = ldc_iommu_init(lp);
+ err = ldc_iommu_init(name, lp);
if (err)
goto out_free_ldc;
@@ -1885,40 +1919,6 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
}
EXPORT_SYMBOL(ldc_read);
-static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
-{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long n, start, end, limit;
- int pass;
-
- limit = arena->limit;
- start = arena->hint;
- pass = 0;
-
-again:
- n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
- end = n + npages;
- if (unlikely(end >= limit)) {
- if (likely(pass < 1)) {
- limit = start;
- start = 0;
- pass++;
- goto again;
- } else {
- /* Scanned the whole thing, give up. */
- return -1;
- }
- }
- bitmap_set(arena->map, n, npages);
-
- arena->hint = end;
-
- return n;
-}
-
-#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
-#define COOKIE_PGSZ_CODE_SHIFT 60ULL
-
static u64 pagesize_code(void)
{
switch (PAGE_SIZE) {
@@ -1945,23 +1945,14 @@ static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
page_offset);
}
-static u64 cookie_to_index(u64 cookie, unsigned long *shift)
-{
- u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
-
- cookie &= ~COOKIE_PGSZ_CODE;
-
- *shift = szcode * 3;
-
- return (cookie >> (13ULL + (szcode * 3ULL)));
-}
static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
unsigned long npages)
{
long entry;
- entry = arena_alloc(iommu, npages);
+ entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
+ npages, NULL, (unsigned long)-1, 0);
if (unlikely(entry < 0))
return NULL;
@@ -2090,7 +2081,7 @@ int ldc_map_sg(struct ldc_channel *lp,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
- unsigned long i, npages, flags;
+ unsigned long i, npages;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
@@ -2109,9 +2100,7 @@ int ldc_map_sg(struct ldc_channel *lp,
iommu = &lp->iommu;
- spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
@@ -2136,7 +2125,7 @@ int ldc_map_single(struct ldc_channel *lp,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
- unsigned long npages, pa, flags;
+ unsigned long npages, pa;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
@@ -2152,9 +2141,7 @@ int ldc_map_single(struct ldc_channel *lp,
iommu = &lp->iommu;
- spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
@@ -2172,35 +2159,25 @@ int ldc_map_single(struct ldc_channel *lp,
}
EXPORT_SYMBOL(ldc_map_single);
+
static void free_npages(unsigned long id, struct ldc_iommu *iommu,
u64 cookie, u64 size)
{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long i, shift, index, npages;
- struct ldc_mtable_entry *base;
+ unsigned long npages, entry;
npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
- index = cookie_to_index(cookie, &shift);
- base = iommu->page_table + index;
- BUG_ON(index > arena->limit ||
- (index + npages) > arena->limit);
-
- for (i = 0; i < npages; i++) {
- if (base->cookie)
- sun4v_ldc_revoke(id, cookie + (i << shift),
- base->cookie);
- base->mte = 0;
- __clear_bit(index + i, arena->map);
- }
+ entry = ldc_cookie_to_index(cookie, iommu);
+ ldc_demap(iommu, id, cookie, entry, npages);
+ iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry);
}
void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
int ncookies)
{
struct ldc_iommu *iommu = &lp->iommu;
- unsigned long flags;
int i;
+ unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
for (i = 0; i < ncookies; i++) {
@@ -2313,7 +2290,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
if (len & (8UL - 1))
return ERR_PTR(-EINVAL);
- buf = kzalloc(len, GFP_KERNEL);
+ buf = kzalloc(len, GFP_ATOMIC);
if (!buf)
return ERR_PTR(-ENOMEM);
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 99632a87e697..26c80e18d7b1 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -130,26 +130,26 @@ static struct mdesc_mem_ops memblock_mdesc_ops = {
static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
{
unsigned int handle_size;
+ struct mdesc_handle *hp;
+ unsigned long addr;
void *base;
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
+ /*
+ * Allocation has to succeed because mdesc update would be missed
+ * and such events are not retransmitted.
+ */
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
- if (base) {
- struct mdesc_handle *hp;
- unsigned long addr;
-
- addr = (unsigned long)base;
- addr = (addr + 15UL) & ~15UL;
- hp = (struct mdesc_handle *) addr;
+ addr = (unsigned long)base;
+ addr = (addr + 15UL) & ~15UL;
+ hp = (struct mdesc_handle *) addr;
- mdesc_handle_init(hp, handle_size, base);
- return hp;
- }
+ mdesc_handle_init(hp, handle_size, base);
- return NULL;
+ return hp;
}
static void mdesc_kfree(struct mdesc_handle *hp)
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 47ddbd496a1e..d2fe57dad433 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/log2.h>
#include <linux/of_device.h>
+#include <linux/iommu-common.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -155,15 +156,13 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
iommu = dev->archdata.iommu;
- spin_lock_irqsave(&iommu->lock, flags);
- entry = iommu_range_alloc(dev, iommu, npages, NULL);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ (unsigned long)(-1), 0);
if (unlikely(entry == DMA_ERROR_CODE))
goto range_alloc_fail;
- *dma_addrp = (iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT));
+ *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
@@ -188,45 +187,46 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
return ret;
iommu_map_fail:
- /* Interrupts are disabled. */
- spin_lock(&iommu->lock);
- iommu_range_free(iommu, *dma_addrp, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE);
range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
+static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
+ unsigned long npages)
+{
+ u32 devhandle = *(u32 *)demap_arg;
+ unsigned long num, flags;
+
+ local_irq_save(flags);
+ do {
+ num = pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages);
+
+ entry += num;
+ npages -= num;
+ } while (npages != 0);
+ local_irq_restore(flags);
+}
+
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
dma_addr_t dvma, struct dma_attrs *attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
- unsigned long flags, order, npages, entry;
+ unsigned long order, npages, entry;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
- entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- iommu_range_free(iommu, dvma, npages);
-
- do {
- unsigned long num;
-
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
- } while (npages != 0);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
+ entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
+ dma_4v_iommu_demap(&devhandle, entry, npages);
+ iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
@@ -253,15 +253,13 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- spin_lock_irqsave(&iommu->lock, flags);
- entry = iommu_range_alloc(dev, iommu, npages, NULL);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ (unsigned long)(-1), 0);
if (unlikely(entry == DMA_ERROR_CODE))
goto bad;
- bus_addr = (iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT));
+ bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
@@ -290,11 +288,7 @@ bad:
return DMA_ERROR_CODE;
iommu_map_fail:
- /* Interrupts are disabled. */
- spin_lock(&iommu->lock);
- iommu_range_free(iommu, bus_addr, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
-
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
return DMA_ERROR_CODE;
}
@@ -304,7 +298,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
- unsigned long flags, npages;
+ unsigned long npages;
long entry;
u32 devhandle;
@@ -321,22 +315,9 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- iommu_range_free(iommu, bus_addr, npages);
-
- entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- do {
- unsigned long num;
-
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
- } while (npages != 0);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
+ dma_4v_iommu_demap(&devhandle, entry, npages);
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -371,14 +352,14 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Init first segment length for backout at failure */
outs->dma_length = 0;
- spin_lock_irqsave(&iommu->lock, flags);
+ local_irq_save(flags);
iommu_batch_start(dev, prot, ~0UL);
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
+ base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
@@ -391,7 +372,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_range_alloc(dev, iommu, npages, &handle);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+ &handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
@@ -404,8 +386,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
iommu_batch_new_entry(entry);
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT);
+ dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
@@ -451,7 +432,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(err < 0L))
goto iommu_map_failed;
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
if (outcount < incount) {
outs = sg_next(outs);
@@ -469,7 +450,8 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_range_free(iommu, vaddr, npages);
+ iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+ DMA_ERROR_CODE);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -477,7 +459,7 @@ iommu_map_failed:
if (s == outs)
break;
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
return 0;
}
@@ -489,7 +471,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct pci_pbm_info *pbm;
struct scatterlist *sg;
struct iommu *iommu;
- unsigned long flags;
+ unsigned long flags, entry;
u32 devhandle;
BUG_ON(direction == DMA_NONE);
@@ -498,33 +480,27 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
- spin_lock_irqsave(&iommu->lock, flags);
+ local_irq_save(flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
- unsigned long npages, entry;
+ unsigned long npages;
+ struct iommu_map_table *tbl = &iommu->tbl;
+ unsigned long shift = IO_PAGE_SHIFT;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
- iommu_range_free(iommu, dma_handle, npages);
-
- entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
- while (npages) {
- unsigned long num;
-
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
- }
-
+ entry = ((dma_handle - tbl->table_map_base) >> shift);
+ dma_4v_iommu_demap(&devhandle, entry, npages);
+ iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+ DMA_ERROR_CODE);
sg = sg_next(sg);
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
}
static struct dma_map_ops sun4v_dma_ops = {
@@ -550,30 +526,33 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
}
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
- struct iommu *iommu)
+ struct iommu_map_table *iommu)
{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long i, cnt = 0;
+ struct iommu_pool *pool;
+ unsigned long i, pool_nr, cnt = 0;
u32 devhandle;
devhandle = pbm->devhandle;
- for (i = 0; i < arena->limit; i++) {
- unsigned long ret, io_attrs, ra;
-
- ret = pci_sun4v_iommu_getmap(devhandle,
- HV_PCI_TSBID(0, i),
- &io_attrs, &ra);
- if (ret == HV_EOK) {
- if (page_in_phys_avail(ra)) {
- pci_sun4v_iommu_demap(devhandle,
- HV_PCI_TSBID(0, i), 1);
- } else {
- cnt++;
- __set_bit(i, arena->map);
+ for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
+ pool = &(iommu->pools[pool_nr]);
+ for (i = pool->start; i <= pool->end; i++) {
+ unsigned long ret, io_attrs, ra;
+
+ ret = pci_sun4v_iommu_getmap(devhandle,
+ HV_PCI_TSBID(0, i),
+ &io_attrs, &ra);
+ if (ret == HV_EOK) {
+ if (page_in_phys_avail(ra)) {
+ pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0,
+ i), 1);
+ } else {
+ cnt++;
+ __set_bit(i, iommu->map);
+ }
}
}
}
-
return cnt;
}
@@ -603,20 +582,22 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
- iommu->page_table_map_base = dma_offset;
+ iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_mask;
/* Allocate and initialize the free area map. */
sz = (num_tsb_entries + 7) / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->arena.map) {
+ iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->tbl.map) {
printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
- iommu->arena.limit = num_tsb_entries;
-
- sz = probe_existing_entries(pbm, iommu);
+ iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
+ NULL, false /* no large_pool */,
+ 0 /* default npools */,
+ false /* want span boundary checking */);
+ sz = probe_existing_entries(pbm, &iommu->tbl);
if (sz)
printk("%s: Imported %lu TSB entries from OBP\n",
pbm->name, sz);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 86eebfa3b158..59cf917a77b5 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -737,25 +737,9 @@ static void sparc_vt_write_pmc(int idx, u64 val)
{
u64 pcr;
- /* There seems to be an internal latch on the overflow event
- * on SPARC-T4 that prevents it from triggering unless you
- * update the PIC exactly as we do here. The requirement
- * seems to be that you have to turn off event counting in the
- * PCR around the PIC update.
- *
- * For example, after the following sequence:
- *
- * 1) set PIC to -1
- * 2) enable event counting and overflow reporting in PCR
- * 3) overflow triggers, softint 15 handler invoked
- * 4) clear OV bit in PCR
- * 5) write PIC to -1
- *
- * a subsequent overflow event will not trigger. This
- * sequence works on SPARC-T3 and previous chips.
- */
pcr = pcr_ops->read_pcr(idx);
- pcr_ops->write_pcr(idx, PCR_N4_PICNPT);
+ /* ensure ov and ntc are reset */
+ pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
pcr_ops->write_pic(idx, val & 0xffffffff);
@@ -792,25 +776,12 @@ static const struct sparc_pmu niagara4_pmu = {
.num_pic_regs = 4,
};
-static void sparc_m7_write_pmc(int idx, u64 val)
-{
- u64 pcr;
-
- pcr = pcr_ops->read_pcr(idx);
- /* ensure ov and ntc are reset */
- pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
-
- pcr_ops->write_pic(idx, val & 0xffffffff);
-
- pcr_ops->write_pcr(idx, pcr);
-}
-
static const struct sparc_pmu sparc_m7_pmu = {
.event_map = niagara4_event_map,
.cache_map = &niagara4_cache_map,
.max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
.read_pmc = sparc_vt_read_pmc,
- .write_pmc = sparc_m7_write_pmc,
+ .write_pmc = sparc_vt_write_pmc,
.upper_shift = 5,
.lower_shift = 5,
.event_mask = 0x7ff,
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 18147a5523d9..8caf45ee81d9 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -194,7 +194,7 @@ static __init int setup_timer_cs(void)
static void percpu_ce_setup(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- int cpu = __first_cpu(evt->cpumask);
+ int cpu = cpumask_first(evt->cpumask);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -214,7 +214,7 @@ static void percpu_ce_setup(enum clock_event_mode mode,
static int percpu_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- int cpu = __first_cpu(evt->cpumask);
+ int cpu = cpumask_first(evt->cpumask);
unsigned int next = (unsigned int)delta;
sparc_config.load_profile_irq(cpu, next);
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 6fd386c5232a..4f21df7d4f13 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -433,7 +433,6 @@ void trap_init(void)
/* Force linker to barf if mismatched */
if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) ||
TI_TASK != offsetof(struct thread_info, task) ||
- TI_EXECDOMAIN != offsetof(struct thread_info, exec_domain) ||
TI_FLAGS != offsetof(struct thread_info, flags) ||
TI_CPU != offsetof(struct thread_info, cpu) ||
TI_PREEMPT != offsetof(struct thread_info, preempt_count) ||
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 0e699745d643..d21cd625c0de 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2691,8 +2691,6 @@ void __init trap_init(void)
fault_address) ||
TI_KREGS != offsetof(struct thread_info, kregs) ||
TI_UTRAPS != offsetof(struct thread_info, utraps) ||
- TI_EXEC_DOMAIN != offsetof(struct thread_info,
- exec_domain) ||
TI_REG_WINDOW != offsetof(struct thread_info,
reg_window) ||
TI_RWIN_SPTRS != offsetof(struct thread_info,
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 7cca41842a9e..a07e31b50d3f 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -27,6 +27,7 @@ config TILE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_DEBUG_STACKOVERFLOW
select ARCH_WANT_FRAME_POINTERS
+ select HAVE_CONTEXT_TRACKING
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
@@ -147,6 +148,11 @@ config ARCH_DEFCONFIG
default "arch/tile/configs/tilepro_defconfig" if !TILEGX
default "arch/tile/configs/tilegx_defconfig" if TILEGX
+config PGTABLE_LEVELS
+ int
+ default 3 if 64BIT
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index 6f00e9850636..ee186e13dfe6 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -456,7 +456,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
cycles_t cycles = get_cycles();
return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
@@ -466,7 +466,7 @@ int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
- struct timespec *ts)
+ struct timespec64 *ts)
{
int ret;
cycles_t cycles_prev, cycles_now, clock_rate;
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index b4c488b65745..f5433e0e34e0 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
-generic-y += irq_work.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
diff --git a/arch/tile/include/asm/ftrace.h b/arch/tile/include/asm/ftrace.h
index 13a9bb81a8ab..738d239b792f 100644
--- a/arch/tile/include/asm/ftrace.h
+++ b/arch/tile/include/asm/ftrace.h
@@ -23,6 +23,8 @@
#ifndef __ASSEMBLY__
extern void __mcount(void);
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+
#ifdef CONFIG_DYNAMIC_FTRACE
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
diff --git a/arch/tile/include/asm/irq_work.h b/arch/tile/include/asm/irq_work.h
new file mode 100644
index 000000000000..48af33a61a2c
--- /dev/null
+++ b/arch/tile/include/asm/irq_work.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_IRQ_WORK_H
+#define __ASM_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+#ifdef CONFIG_SMP
+ extern bool self_interrupt_ok;
+ return self_interrupt_ok;
+#else
+ return false;
+#endif
+}
+
+#endif /* __ASM_IRQ_WORK_H */
diff --git a/arch/tile/include/asm/smp.h b/arch/tile/include/asm/smp.h
index 9a326b64f7ae..735e7f144733 100644
--- a/arch/tile/include/asm/smp.h
+++ b/arch/tile/include/asm/smp.h
@@ -69,6 +69,7 @@ static inline int xy_to_cpu(int x, int y)
#define MSG_TAG_STOP_CPU 2
#define MSG_TAG_CALL_FUNCTION_MANY 3
#define MSG_TAG_CALL_FUNCTION_SINGLE 4
+#define MSG_TAG_IRQ_WORK 5
/* Hook for the generic smp_call_function_many() routine. */
static inline void arch_send_call_function_ipi_mask(struct cpumask *mask)
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 96c14c1430d8..f804c39a5e4d 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -26,7 +26,6 @@
*/
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
__u32 homecache_cpu; /* CPU we are homecached on */
@@ -51,7 +50,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
@@ -126,6 +124,7 @@ extern void _cpu_idle(void);
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */
#define TIF_POLLING_NRFLAG 10 /* idle is polling for TIF_NEED_RESCHED */
+#define TIF_NOHZ 11 /* in adaptive nohz mode */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@@ -138,14 +137,16 @@ extern void _cpu_idle(void);
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+#define _TIF_NOHZ (1<<TIF_NOHZ)
/* Work to do on any return to user space. */
#define _TIF_ALLWORK_MASK \
- (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
- _TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
+ (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP | \
+ _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME | _TIF_NOHZ)
/* Work to do at syscall entry. */
-#define _TIF_SYSCALL_ENTRY_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SYSCALL_ENTRY_WORK \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
/* Work to do at syscall exit. */
#define _TIF_SYSCALL_EXIT_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h
index e37cf4f0cffd..73e83a187866 100644
--- a/arch/tile/include/gxio/mpipe.h
+++ b/arch/tile/include/gxio/mpipe.h
@@ -1830,7 +1830,7 @@ extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
* code.
*/
extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
- struct timespec *ts);
+ struct timespec64 *ts);
/* Set the timestamp of mPIPE.
*
@@ -1840,7 +1840,7 @@ extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
* code.
*/
extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
- const struct timespec *ts);
+ const struct timespec64 *ts);
/* Adjust the timestamp of mPIPE.
*
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index dfcdeb61ba34..e0e6af4e783b 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -961,7 +961,11 @@ typedef enum {
HV_INQ_TILES_HFH_CACHE = 2,
/** The set of tiles that can be legally used as a LOTAR for a PTE. */
- HV_INQ_TILES_LOTAR = 3
+ HV_INQ_TILES_LOTAR = 3,
+
+ /** The set of "shared" driver tiles that the hypervisor may
+ * periodically interrupt. */
+ HV_INQ_TILES_SHARED = 4
} HV_InqTileSet;
/** Returns specific information about various sets of tiles within the
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 8c5abf2e4794..e8c2c04143cd 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -68,7 +68,7 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *fr
if (from->si_code < 0) {
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
+ err |= __put_user(from->si_int, &to->si_int);
} else {
/*
* First 32bits of unions are always present:
@@ -93,8 +93,7 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *fr
break;
case __SI_TIMER >> 16:
err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(ptr_to_compat(from->si_ptr),
- &to->si_ptr);
+ err |= __put_user(from->si_int, &to->si_int);
break;
/* This is not generated by the kernel as of now. */
case __SI_RT >> 16:
@@ -110,19 +109,19 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *fr
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
{
int err;
- u32 ptr32;
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
return -EFAULT;
+ memset(to, 0, sizeof(*to));
+
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);
err |= __get_user(to->si_pid, &from->si_pid);
err |= __get_user(to->si_uid, &from->si_uid);
- err |= __get_user(ptr32, &from->si_ptr);
- to->si_ptr = compat_ptr(ptr32);
+ err |= __get_user(to->si_int, &from->si_int);
return err;
}
@@ -196,19 +195,12 @@ int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
unsigned long restorer;
struct compat_rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
- int usig;
frame = compat_get_sigframe(&ksig->ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto err;
- usig = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
/* Always write at least the signal number for the stack backtracer. */
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
/* At sigreturn time, restore the callee-save registers too. */
@@ -243,7 +235,7 @@ int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
regs->sp = ptr_to_compat_reg(frame);
regs->lr = restorer;
- regs->regs[0] = (unsigned long) usig;
+ regs->regs[0] = (unsigned long) sig;
regs->regs[1] = ptr_to_compat_reg(&frame->info);
regs->regs[2] = ptr_to_compat_reg(&frame->uc);
regs->flags |= PT_FLAGS_CALLER_SAVES;
diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c
index 8d52d83cc516..0c0996175b1e 100644
--- a/arch/tile/kernel/ftrace.c
+++ b/arch/tile/kernel/ftrace.c
@@ -74,7 +74,11 @@ static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
create_JumpOff_X1(pcrel_by_instr);
}
- if (addr == FTRACE_ADDR) {
+ /*
+ * Also put { move r10, lr; jal ftrace_stub } in a bundle, which
+ * is used to replace the instruction in address ftrace_call.
+ */
+ if (addr == FTRACE_ADDR || addr == (unsigned long)ftrace_stub) {
/* opcode: or r10, lr, zero */
opcode_x0 =
create_Dest_X0(10) |
diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S
index 3c2b8d5e1d1a..6c6702451962 100644
--- a/arch/tile/kernel/mcount_64.S
+++ b/arch/tile/kernel/mcount_64.S
@@ -81,7 +81,12 @@ STD_ENTRY(ftrace_caller)
/* arg1: self return address */
/* arg2: parent's return address */
- { move r0, lr; move r1, r10 }
+ /* arg3: ftrace_ops */
+ /* arg4: regs (but make it NULL) */
+ { move r0, lr; moveli r2, hw2_last(function_trace_op) }
+ { move r1, r10; shl16insli r2, r2, hw1(function_trace_op) }
+ { movei r3, 0; shl16insli r2, r2, hw0(function_trace_op) }
+ ld r2,r2
.global ftrace_call
ftrace_call:
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 48e5773dd0b7..b403c2e3e263 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/tracehook.h>
#include <linux/signal.h>
+#include <linux/context_tracking.h>
#include <asm/stack.h>
#include <asm/switch_to.h>
#include <asm/homecache.h>
@@ -474,6 +475,8 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
if (!user_mode(regs))
return 0;
+ user_exit();
+
/* Enable interrupts; they are disabled again on return to caller. */
local_irq_enable();
@@ -496,11 +499,12 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
tracehook_notify_resume(regs);
return 1;
}
- if (thread_info_flags & _TIF_SINGLESTEP) {
+ if (thread_info_flags & _TIF_SINGLESTEP)
single_step_once(regs);
- return 0;
- }
- panic("work_pending: bad flags %#x\n", thread_info_flags);
+
+ user_enter();
+
+ return 0;
}
unsigned long get_wchan(struct task_struct *p)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index de98c6ddf136..f84eed8243da 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -22,6 +22,7 @@
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/tracehook.h>
+#include <linux/context_tracking.h>
#include <asm/traps.h>
#include <arch/chip.h>
@@ -252,12 +253,21 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
int do_syscall_trace_enter(struct pt_regs *regs)
{
- if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ u32 work = ACCESS_ONCE(current_thread_info()->flags);
+
+ /*
+ * If TIF_NOHZ is set, we are required to call user_exit() before
+ * doing anything that could touch RCU.
+ */
+ if (work & _TIF_NOHZ)
+ user_exit();
+
+ if (work & _TIF_SYSCALL_TRACE) {
if (tracehook_report_syscall_entry(regs))
regs->regs[TREG_SYSCALL_NR] = -1;
}
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ if (work & _TIF_SYSCALL_TRACEPOINT)
trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]);
return regs->regs[TREG_SYSCALL_NR];
@@ -268,6 +278,12 @@ void do_syscall_trace_exit(struct pt_regs *regs)
long errno;
/*
+ * We may come here right after calling schedule_user()
+ * in which case we can be in RCU user mode.
+ */
+ user_exit();
+
+ /*
* The standard tile calling convention returns the value (or negative
* errno) in r0, and zero (or positive errno) in r1.
* It saves a couple of cycles on the hot path to do this work in
@@ -303,5 +319,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs)
/* Handle synthetic interrupt delivered only by the simulator. */
void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
{
+ enum ctx_state prev_state = exception_enter();
send_sigtrap(current, regs);
+ exception_exit(prev_state);
}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index f1f579914952..d366675e4bf8 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -32,6 +32,7 @@
#include <linux/hugetlb.h>
#include <linux/start_kernel.h>
#include <linux/screen_info.h>
+#include <linux/tick.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@@ -773,7 +774,7 @@ static void __init zone_sizes_init(void)
* though, there'll be no lowmem, so we just alloc_bootmem
* the memmap. There will be no percpu memory either.
*/
- if (i != 0 && cpu_isset(i, isolnodes)) {
+ if (i != 0 && node_isset(i, isolnodes)) {
node_memmap_pfn[i] =
alloc_bootmem_pfn(0, memmap_size, 0);
BUG_ON(node_percpu[i] != 0);
@@ -1390,6 +1391,28 @@ static int __init dataplane(char *str)
early_param("dataplane", dataplane);
+#ifdef CONFIG_NO_HZ_FULL
+/* Warn if hypervisor shared cpus are marked as nohz_full. */
+static int __init check_nohz_full_cpus(void)
+{
+ struct cpumask shared;
+ int cpu;
+
+ if (hv_inquire_tiles(HV_INQ_TILES_SHARED,
+ (HV_VirtAddr) shared.bits, sizeof(shared)) < 0) {
+ pr_warn("WARNING: No support for inquiring hv shared tiles\n");
+ return 0;
+ }
+ for_each_cpu(cpu, &shared) {
+ if (tick_nohz_full_cpu(cpu))
+ pr_warn("WARNING: nohz_full cpu %d receives hypervisor interrupts!\n",
+ cpu);
+ }
+ return 0;
+}
+arch_initcall(check_nohz_full_cpus);
+#endif
+
#ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
#endif
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 8a524e332c1a..87299a6cfec8 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -151,19 +151,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
unsigned long restorer;
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
- int usig;
frame = get_sigframe(&ksig->ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto err;
- usig = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
/* Always write at least the signal number for the stack backtracer. */
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
/* At sigreturn time, restore the callee-save registers too. */
@@ -198,7 +191,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
regs->sp = (unsigned long) frame;
regs->lr = restorer;
- regs->regs[0] = (unsigned long) usig;
+ regs->regs[0] = (unsigned long) sig;
regs->regs[1] = (unsigned long) &frame->info;
regs->regs[2] = (unsigned long) &frame->uc;
regs->flags |= PT_FLAGS_CALLER_SAVES;
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 862973074bf9..53f7b9def07b 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/err.h>
#include <linux/prctl.h>
+#include <linux/context_tracking.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
@@ -738,6 +739,7 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
{
+ enum ctx_state prev_state = exception_enter();
unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
struct thread_info *info = (void *)current_thread_info();
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
@@ -754,6 +756,7 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
send_sigtrap(current, regs);
}
+ exception_exit(prev_state);
}
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index d3c4ed780ce2..07e3ff5cc740 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irq_work.h>
#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/homecache.h>
@@ -33,6 +34,8 @@ EXPORT_SYMBOL(smp_topology);
static unsigned long __iomem *ipi_mappings[NR_CPUS];
#endif
+/* Does messaging work correctly to the local cpu? */
+bool self_interrupt_ok;
/*
* Top-level send_IPI*() functions to send messages to other cpus.
@@ -147,6 +150,10 @@ void evaluate_message(int tag)
generic_smp_call_function_single_interrupt();
break;
+ case MSG_TAG_IRQ_WORK: /* Invoke IRQ work */
+ irq_work_run();
+ break;
+
default:
panic("Unknown IPI message tag %d", tag);
break;
@@ -186,6 +193,15 @@ void flush_icache_range(unsigned long start, unsigned long end)
EXPORT_SYMBOL(flush_icache_range);
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ if (arch_irq_work_has_interrupt())
+ send_IPI_single(smp_processor_id(), MSG_TAG_IRQ_WORK);
+}
+#endif
+
+
/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
static irqreturn_t handle_reschedule_ipi(int irq, void *token)
{
@@ -203,8 +219,22 @@ static struct irqaction resched_action = {
void __init ipi_init(void)
{
+ int cpu = smp_processor_id();
+ HV_Recipient recip = { .y = cpu_y(cpu), .x = cpu_x(cpu),
+ .state = HV_TO_BE_SENT };
+ int tag = MSG_TAG_CALL_FUNCTION_SINGLE;
+
+ /*
+ * Test if we can message ourselves for arch_irq_work_raise.
+ * This functionality is only available in the Tilera hypervisor
+ * in versions 4.3.4 and following.
+ */
+ if (hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)) == 1)
+ self_interrupt_ok = true;
+ else
+ pr_warn("Older hypervisor: disabling fast irq_work_raise\n");
+
#if CHIP_HAS_IPI()
- int cpu;
/* Map IPI trigger MMIO addresses. */
for_each_possible_cpu(cpu) {
HV_Coord tile;
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 7ff5afdbd3aa..c42dce50acd8 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -108,14 +108,15 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
p->sp < PAGE_OFFSET && p->sp != 0) {
if (kbt->verbose)
pr_err(" <%s while in user mode>\n", fault);
- } else if (kbt->verbose) {
- pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
- p->pc, p->sp, p->ex1);
- p = NULL;
+ } else {
+ if (kbt->verbose)
+ pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
+ p->pc, p->sp, p->ex1);
+ return NULL;
}
- if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
- return p;
- return NULL;
+ if (kbt->profile && ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) != 0)
+ return NULL;
+ return p;
}
/* Is the pc pointing to a sigreturn trampoline? */
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index bf841ca517bb..312fc134c1cb 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -20,6 +20,7 @@
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
+#include <linux/context_tracking.h>
#include <asm/stack.h>
#include <asm/traps.h>
#include <asm/setup.h>
@@ -253,6 +254,7 @@ static int do_bpt(struct pt_regs *regs)
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
unsigned long reason)
{
+ enum ctx_state prev_state = exception_enter();
siginfo_t info = { 0 };
int signo, code;
unsigned long address = 0;
@@ -261,7 +263,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
/* Handle breakpoints, etc. */
if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
- return;
+ goto done;
/* Re-enable interrupts, if they were previously enabled. */
if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
@@ -275,7 +277,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
const char *name;
char buf[100];
if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
- return;
+ goto done;
if (fault_num >= 0 &&
fault_num < ARRAY_SIZE(int_name) &&
int_name[fault_num] != NULL)
@@ -294,7 +296,6 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
fault_num, name, regs->pc, buf);
show_regs(regs);
do_exit(SIGKILL); /* FIXME: implement i386 die() */
- return;
}
switch (fault_num) {
@@ -308,7 +309,6 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
pr_err("Unreadable instruction for INT_ILL: %#lx\n",
regs->pc);
do_exit(SIGKILL);
- return;
}
if (!special_ill(instr, &signo, &code)) {
signo = SIGILL;
@@ -319,7 +319,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
case INT_GPV:
#if CHIP_HAS_TILE_DMA()
if (retry_gpv(reason))
- return;
+ goto done;
#endif
/*FALLTHROUGH*/
case INT_UDN_ACCESS:
@@ -346,7 +346,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
if (!state ||
(void __user *)(regs->pc) != state->buffer) {
single_step_once(regs);
- return;
+ goto done;
}
}
#endif
@@ -380,7 +380,6 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
#endif
default:
panic("Unexpected do_trap interrupt number %d", fault_num);
- return;
}
info.si_signo = signo;
@@ -391,6 +390,9 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
if (signo != SIGTRAP)
trace_unhandled_signal("trap", regs, address, signo);
force_sig_info(signo, &info, current);
+
+done:
+ exception_exit(prev_state);
}
void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index 7d9a83be0aca..d075f92ccee0 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/prctl.h>
+#include <linux/context_tracking.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
@@ -1448,6 +1449,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
void do_unaligned(struct pt_regs *regs, int vecnum)
{
+ enum ctx_state prev_state = exception_enter();
tilegx_bundle_bits __user *pc;
tilegx_bundle_bits bundle;
struct thread_info *info = current_thread_info();
@@ -1487,12 +1489,11 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
(int)unaligned_fixup,
(unsigned long long)regs->ex1,
(unsigned long long)regs->pc);
- return;
+ } else {
+ /* Not fixable. Go panic. */
+ panic("Unalign exception in Kernel. pc=%lx",
+ regs->pc);
}
- /* Not fixable. Go panic. */
- panic("Unalign exception in Kernel. pc=%lx",
- regs->pc);
- return;
} else {
/*
* Try to fix the exception. If we can't, panic the
@@ -1501,8 +1502,8 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
bundle = GX_INSN_BSWAP(
*((tilegx_bundle_bits *)(regs->pc)));
jit_bundle_gen(regs, bundle, align_ctl);
- return;
}
+ goto done;
}
/*
@@ -1526,7 +1527,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
force_sig_info(info.si_signo, &info, current);
- return;
+ goto done;
}
@@ -1543,7 +1544,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
trace_unhandled_signal("segfault in unalign fixup", regs,
(unsigned long)info.si_addr, SIGSEGV);
force_sig_info(info.si_signo, &info, current);
- return;
+ goto done;
}
if (!info->unalign_jit_base) {
@@ -1578,7 +1579,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
if (IS_ERR((void __force *)user_page)) {
pr_err("Out of kernel pages trying do_mmap\n");
- return;
+ goto done;
}
/* Save the address in the thread_info struct */
@@ -1591,6 +1592,9 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
/* Generate unalign JIT */
jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
+
+done:
+ exception_exit(prev_state);
}
#endif /* __tilegx__ */
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c
index 23f044e8a7ab..f7ddae3725a4 100644
--- a/arch/tile/mm/elf.c
+++ b/arch/tile/mm/elf.c
@@ -17,6 +17,7 @@
#include <linux/binfmts.h>
#include <linux/compat.h>
#include <linux/mman.h>
+#include <linux/file.h>
#include <linux/elf.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -39,30 +40,34 @@ static void sim_notify_exec(const char *binary_name)
static int notify_exec(struct mm_struct *mm)
{
+ int ret = 0;
char *buf, *path;
struct vm_area_struct *vma;
+ struct file *exe_file;
if (!sim_is_simulator())
return 1;
- if (mm->exe_file == NULL)
- return 0;
-
- for (vma = current->mm->mmap; ; vma = vma->vm_next) {
- if (vma == NULL)
- return 0;
- if (vma->vm_file == mm->exe_file)
- break;
- }
-
buf = (char *) __get_free_page(GFP_KERNEL);
if (buf == NULL)
return 0;
- path = d_path(&mm->exe_file->f_path, buf, PAGE_SIZE);
- if (IS_ERR(path)) {
- free_page((unsigned long)buf);
- return 0;
+ exe_file = get_mm_exe_file(mm);
+ if (exe_file == NULL)
+ goto done_free;
+
+ path = d_path(&exe_file->f_path, buf, PAGE_SIZE);
+ if (IS_ERR(path))
+ goto done_put;
+
+ down_read(&mm->mmap_sem);
+ for (vma = current->mm->mmap; ; vma = vma->vm_next) {
+ if (vma == NULL) {
+ up_read(&mm->mmap_sem);
+ goto done_put;
+ }
+ if (vma->vm_file == exe_file)
+ break;
}
/*
@@ -80,14 +85,20 @@ static int notify_exec(struct mm_struct *mm)
__insn_mtspr(SPR_SIM_CONTROL,
(SIM_CONTROL_DLOPEN
| (c << _SIM_CONTROL_OPERATOR_BITS)));
- if (c == '\0')
+ if (c == '\0') {
+ ret = 1; /* success */
break;
+ }
}
}
+ up_read(&mm->mmap_sem);
sim_notify_exec(path);
+done_put:
+ fput(exe_file);
+done_free:
free_page((unsigned long)buf);
- return 1;
+ return ret;
}
/* Notify a running simulator, if any, that we loaded an interpreter. */
@@ -109,8 +120,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct mm_struct *mm = current->mm;
int retval = 0;
- down_write(&mm->mmap_sem);
-
/*
* Notify the simulator that an exec just occurred.
* If we can't find the filename of the mapping, just use
@@ -119,6 +128,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
if (!notify_exec(mm))
sim_notify_exec(bprm->filename);
+ down_write(&mm->mmap_sem);
+
retval = setup_vdso_pages();
#ifndef __tilegx__
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 0f61a73534e6..e83cc999da02 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -35,6 +35,7 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
+#include <linux/context_tracking.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
@@ -702,6 +703,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
unsigned long address, unsigned long write)
{
int is_page_fault;
+ enum ctx_state prev_state = exception_enter();
#ifdef CONFIG_KPROBES
/*
@@ -711,7 +713,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
*/
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
regs->faultnum, SIGSEGV) == NOTIFY_STOP)
- return;
+ goto done;
#endif
#ifdef __tilegx__
@@ -750,7 +752,6 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
current->comm, current->pid, pc, address);
show_regs(regs);
do_group_exit(SIGKILL);
- return;
}
}
#else
@@ -834,12 +835,15 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
async->is_fault = is_page_fault;
async->is_write = write;
async->address = address;
- return;
+ goto done;
}
}
#endif
handle_page_fault(regs, fault_num, is_page_fault, address, write);
+
+done:
+ exception_exit(prev_state);
}
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index ace32d7d3864..5bd252e3fdc5 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -233,9 +233,12 @@ static pgprot_t __init init_pgprot(ulong address)
if (kdata_huge)
return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
- /* We map the aliased pages of permanent text inaccessible. */
+ /*
+ * We map the aliased pages of permanent text so we can
+ * update them if necessary, for ftrace, etc.
+ */
if (address < (ulong) _sinittext - CODE_DELTA)
- return PAGE_NONE;
+ return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
/* We map read-only data non-coherent for performance. */
if ((address >= (ulong) __start_rodata &&
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index a7520c90f62d..6e67847f5272 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -95,48 +95,6 @@ config MAGIC_SYSRQ
The keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
-config SMP
- bool "Symmetric multi-processing support"
- default n
- depends on BROKEN
- help
- This option enables UML SMP support.
- It is NOT related to having a real SMP box. Not directly, at least.
-
- UML implements virtual SMP by allowing as many processes to run
- simultaneously on the host as there are virtual processors configured.
-
- Obviously, if the host is a uniprocessor, those processes will
- timeshare, but, inside UML, will appear to be running simultaneously.
- If the host is a multiprocessor, then UML processes may run
- simultaneously, depending on the host scheduler.
-
- This, however, is supported only in TT mode. So, if you use the SKAS
- patch on your host, switching to TT mode and enabling SMP usually
- gives you worse performances.
- Also, since the support for SMP has been under-developed, there could
- be some bugs being exposed by enabling SMP.
-
- If you don't know what to do, say N.
-
-config NR_CPUS
- int "Maximum number of CPUs (2-32)"
- range 2 32
- depends on SMP
- default "32"
-
-config HIGHMEM
- bool "Highmem support"
- depends on !64BIT && BROKEN
- default n
- help
- This was used to allow UML to run with big amounts of memory.
- Currently it is unstable, so if unsure say N.
-
- To use big amounts of memory, it is recommended enable static
- linking (i.e. CONFIG_STATIC_LINK) - this should allow the
- guest to use up to 2.75G of memory.
-
config KERNEL_STACK_ORDER
int "Kernel stack size order"
default 1 if 64BIT
@@ -155,3 +113,8 @@ config MMAPPER
config NO_DMA
def_bool y
+
+config PGTABLE_LEVELS
+ int
+ default 3 if 3_LEVEL_PGTABLES
+ default 2
diff --git a/arch/um/Makefile b/arch/um/Makefile
index e4b1a9639c4d..17d4460b1af3 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -43,8 +43,8 @@ endif
HOST_DIR := arch/$(HEADER_ARCH)
-include $(srctree)/$(ARCH_DIR)/Makefile-skas
-include $(srctree)/$(HOST_DIR)/Makefile.um
+include $(ARCH_DIR)/Makefile-skas
+include $(HOST_DIR)/Makefile.um
core-y += $(HOST_DIR)/um/
@@ -73,7 +73,7 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
$(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
#This will adjust *FLAGS accordingly to the platform.
-include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
+include $(ARCH_DIR)/Makefile-os-$(OS)
KBUILD_CPPFLAGS += -I$(srctree)/$(HOST_DIR)/include \
-I$(srctree)/$(HOST_DIR)/include/uapi \
diff --git a/arch/um/Makefile-ia64 b/arch/um/Makefile-ia64
deleted file mode 100644
index f84dc23b0f6e..000000000000
--- a/arch/um/Makefile-ia64
+++ /dev/null
@@ -1 +0,0 @@
-START_ADDR = 0x1000000000000000
diff --git a/arch/um/Makefile-ppc b/arch/um/Makefile-ppc
deleted file mode 100644
index 66fd2003e165..000000000000
--- a/arch/um/Makefile-ppc
+++ /dev/null
@@ -1,9 +0,0 @@
-ifeq ($(CONFIG_HOST_2G_2G), y)
-START_ADDR = 0x80000000
-else
-START_ADDR = 0xc0000000
-endif
-ARCH_CFLAGS = -U__powerpc__ -D__UM_PPC__
-
-# The arch is ppc, but the elf32 name is powerpc
-ELF_SUBARCH = powerpc
diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h
index 3094ea3c73b0..1761fd75bf13 100644
--- a/arch/um/include/asm/fixmap.h
+++ b/arch/um/include/asm/fixmap.h
@@ -33,10 +33,6 @@
* fix-mapped?
*/
enum fixed_addresses {
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
__end_of_fixed_addresses
};
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 2324b624f195..18eb9924dda3 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -47,11 +47,7 @@ extern unsigned long end_iomem;
#define VMALLOC_OFFSET (__va_space)
#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
-#ifdef CONFIG_HIGHMEM
-# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
-#else
-# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
-#endif
+#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#define MODULES_VADDR VMALLOC_START
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index cbc5edd5a901..2d1e0dd5bb0b 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -98,16 +98,8 @@ struct cpuinfo_um {
extern struct cpuinfo_um boot_cpu_data;
-#define my_cpu_data cpu_data[smp_processor_id()]
-
-#ifdef CONFIG_SMP
-extern struct cpuinfo_um cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
-#else
#define cpu_data (&boot_cpu_data)
#define current_cpu_data boot_cpu_data
-#endif
-
#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
extern unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/um/include/asm/smp.h b/arch/um/include/asm/smp.h
index e4507938d8cf..9c3be355ed01 100644
--- a/arch/um/include/asm/smp.h
+++ b/arch/um/include/asm/smp.h
@@ -1,32 +1,6 @@
#ifndef __UM_SMP_H
#define __UM_SMP_H
-#ifdef CONFIG_SMP
-
-#include <linux/bitops.h>
-#include <asm/current.h>
-#include <linux/cpumask.h>
-
-#define raw_smp_processor_id() (current_thread->cpu)
-
-#define cpu_logical_map(n) (n)
-#define cpu_number_map(n) (n)
-extern int hard_smp_processor_id(void);
-#define NO_PROC_ID -1
-
-extern int ncpus;
-
-
-static inline void smp_cpus_done(unsigned int maxcpus)
-{
-}
-
-extern struct task_struct *idle_threads[NR_CPUS];
-
-#else
-
#define hard_smp_processor_id() 0
#endif
-
-#endif
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index e04114c4fcd9..b30c85b141d9 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -14,7 +14,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable,
@@ -28,7 +27,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index 41c8c774ec10..ca1843e1df15 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -56,6 +56,7 @@ extern unsigned long brk_start;
extern unsigned long host_task_size;
extern int linux_main(int argc, char **argv);
+extern void uml_finishsetup(void);
struct siginfo;
extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *);
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 08eec0b691b0..d824528f6f62 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -174,7 +174,6 @@ extern unsigned long long os_makedev(unsigned major, unsigned minor);
/* start_up.c */
extern void os_early_checks(void);
-extern void can_do_skas(void);
extern void os_check_bugs(void);
extern void check_host_supports_tls(int *supports_tls, int *tls_min);
@@ -187,7 +186,6 @@ extern int os_process_parent(int pid);
extern void os_stop_process(int pid);
extern void os_kill_process(int pid, int reap_child);
extern void os_kill_ptraced_process(int pid, int reap_child);
-extern long os_ptrace_ldt(long pid, long addr, long data);
extern int os_getpid(void);
extern int os_getpgrp(void);
diff --git a/arch/um/include/shared/skas/proc_mm.h b/arch/um/include/shared/skas/proc_mm.h
deleted file mode 100644
index 902809209603..000000000000
--- a/arch/um/include/shared/skas/proc_mm.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SKAS_PROC_MM_H
-#define __SKAS_PROC_MM_H
-
-#define MM_MMAP 54
-#define MM_MUNMAP 55
-#define MM_MPROTECT 56
-#define MM_COPY_SEGMENTS 57
-
-struct mm_mmap {
- unsigned long addr;
- unsigned long len;
- unsigned long prot;
- unsigned long flags;
- unsigned long fd;
- unsigned long offset;
-};
-
-struct mm_munmap {
- unsigned long addr;
- unsigned long len;
-};
-
-struct mm_mprotect {
- unsigned long addr;
- unsigned long len;
- unsigned int prot;
-};
-
-struct proc_mm_op {
- int op;
- union {
- struct mm_mmap mmap;
- struct mm_munmap munmap;
- struct mm_mprotect mprotect;
- int copy_segments;
- } u;
-};
-
-#endif
diff --git a/arch/um/include/shared/skas/skas.h b/arch/um/include/shared/skas/skas.h
index c45df961c874..911f3c45ad1f 100644
--- a/arch/um/include/shared/skas/skas.h
+++ b/arch/um/include/shared/skas/skas.h
@@ -9,13 +9,10 @@
#include <sysdep/ptrace.h>
extern int userspace_pid[];
-extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
-extern int skas_needs_stub;
extern int user_thread(unsigned long stack, int flags);
extern void new_thread_handler(void);
extern void handle_syscall(struct uml_pt_regs *regs);
-extern int new_mm(unsigned long stack);
extern long execute_syscall_skas(void *r);
extern unsigned long current_stub_stack(void);
diff --git a/arch/um/include/shared/skas_ptrace.h b/arch/um/include/shared/skas_ptrace.h
deleted file mode 100644
index 630a9c92b93c..000000000000
--- a/arch/um/include/shared/skas_ptrace.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#ifndef __SKAS_PTRACE_H
-#define __SKAS_PTRACE_H
-
-#define PTRACE_FAULTINFO 52
-#define PTRACE_SWITCH_MM 55
-
-#include <sysdep/skas_ptrace.h>
-
-#endif
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 2d840a070c8b..a6a5e42caaef 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -12,8 +12,8 @@ clean-files :=
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
physmem.o process.o ptrace.o reboot.o sigio.o \
- signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \
- um_arch.o umid.o maccess.o skas/
+ signal.o syscall.o sysrq.o time.o tlb.o trap.o \
+ um_arch.o umid.o maccess.o kmsg_dump.o skas/
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
obj-$(CONFIG_GPROF) += gprof_syms.o
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 1d8505b1e290..23cb9350d47e 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -35,9 +35,6 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
struct irq_fd *irq_fd;
int n;
- if (smp_sigio_handler())
- return;
-
while (1) {
n = os_waiting_for_events(active_fds);
if (n <= 0) {
diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c
new file mode 100644
index 000000000000..407d49251d6f
--- /dev/null
+++ b/arch/um/kernel/kmsg_dump.c
@@ -0,0 +1,43 @@
+#include <linux/kmsg_dump.h>
+#include <linux/console.h>
+#include <shared/init.h>
+#include <shared/kern.h>
+#include <os.h>
+
+static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ static char line[1024];
+
+ size_t len = 0;
+ bool con_available = false;
+
+ /* only dump kmsg when no console is available */
+ if (!console_trylock())
+ return;
+
+ if (console_drivers != NULL)
+ con_available = true;
+
+ console_unlock();
+
+ if (con_available == true)
+ return;
+
+ printf("kmsg_dump:\n");
+ while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len)) {
+ line[len] = '\0';
+ printf("%s", line);
+ }
+}
+
+static struct kmsg_dumper kmsg_dumper = {
+ .dump = kmsg_dumper_stdout
+};
+
+int __init kmsg_dumper_stdout_init(void)
+{
+ return kmsg_dump_register(&kmsg_dumper);
+}
+
+__uml_postsetup(kmsg_dumper_stdout_init);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 8636e905426f..b2a2dff50b4e 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -38,19 +38,6 @@ int kmalloc_ok = 0;
/* Used during early boot */
static unsigned long brk_end;
-#ifdef CONFIG_HIGHMEM
-static void setup_highmem(unsigned long highmem_start,
- unsigned long highmem_len)
-{
- unsigned long highmem_pfn;
- int i;
-
- highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
- for (i = 0; i < highmem_len >> PAGE_SHIFT; i++)
- free_highmem_page(&mem_map[highmem_pfn + i]);
-}
-#endif
-
void __init mem_init(void)
{
/* clear the zero-page */
@@ -67,9 +54,6 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
free_all_bootmem();
max_low_pfn = totalram_pages;
-#ifdef CONFIG_HIGHMEM
- setup_highmem(end_iomem, highmem);
-#endif
max_pfn = totalram_pages;
mem_init_print_info(NULL);
kmalloc_ok = 1;
@@ -127,49 +111,6 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
}
}
-#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
- (vaddr)), (vaddr))
-
-static void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
- kmap_prot = PAGE_KERNEL;
-}
-
-static void __init init_highmem(void)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long vaddr;
-
- /*
- * Permanent kmaps:
- */
- vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
-
- pgd = swapper_pg_dir + pgd_index(vaddr);
- pud = pud_offset(pgd, vaddr);
- pmd = pmd_offset(pud, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
- pkmap_page_table = pte;
-
- kmap_init();
-}
-#endif /* CONFIG_HIGHMEM */
-
static void __init fixaddr_user_init( void)
{
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
@@ -211,9 +152,6 @@ void __init paging_init(void)
zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
(uml_physmem >> PAGE_SHIFT);
-#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
-#endif
free_area_init(zones_size);
/*
@@ -224,10 +162,6 @@ void __init paging_init(void)
fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
fixaddr_user_init();
-
-#ifdef CONFIG_HIGHMEM
- init_highmem();
-#endif
}
/*
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 549ecf3f5857..9034fc8056b4 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -57,22 +57,51 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
extern int __syscall_stub_start;
+/**
+ * setup_physmem() - Setup physical memory for UML
+ * @start: Start address of the physical kernel memory,
+ * i.e start address of the executable image.
+ * @reserve_end: end address of the physical kernel memory.
+ * @len: Length of total physical memory that should be mapped/made
+ * available, in bytes.
+ * @highmem: Number of highmem bytes that should be mapped/made available.
+ *
+ * Creates an unlinked temporary file of size (len + highmem) and memory maps
+ * it on the last executable image address (uml_reserved).
+ *
+ * The offset is needed as the length of the total physical memory
+ * (len + highmem) includes the size of the memory used be the executable image,
+ * but the mapped-to address is the last address of the executable image
+ * (uml_reserved == end address of executable image).
+ *
+ * The memory mapped memory of the temporary file is used as backing memory
+ * of all user space processes/kernel tasks.
+ */
void __init setup_physmem(unsigned long start, unsigned long reserve_end,
unsigned long len, unsigned long long highmem)
{
unsigned long reserve = reserve_end - start;
- int pfn = PFN_UP(__pa(reserve_end));
- int delta = (len - reserve) >> PAGE_SHIFT;
- int err, offset, bootmap_size;
+ unsigned long pfn = PFN_UP(__pa(reserve_end));
+ unsigned long delta = (len - reserve) >> PAGE_SHIFT;
+ unsigned long offset, bootmap_size;
+ long map_size;
+ int err;
+
+ offset = uml_reserved - uml_physmem;
+ map_size = len - offset;
+ if(map_size <= 0) {
+ printf("Too few physical memory! Needed=%d, given=%d\n",
+ offset, len);
+ exit(1);
+ }
physmem_fd = create_mem_file(len + highmem);
- offset = uml_reserved - uml_physmem;
err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
- len - offset, 1, 1, 1);
+ map_size, 1, 1, 1);
if (err < 0) {
printf("setup_physmem - mapping %ld bytes of memory at 0x%p "
- "failed - errno = %d\n", len - offset,
+ "failed - errno = %d\n", map_size,
(void *) uml_reserved, err);
exit(1);
}
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index f17bca8ed2ce..68b9119841cd 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -259,17 +259,6 @@ int strlen_user_proc(char __user *str)
return strlen_user(str);
}
-int smp_sigio_handler(void)
-{
-#ifdef CONFIG_SMP
- int cpu = current_thread_info()->cpu;
- IPI_handler(cpu);
- if (cpu != 0)
- return 1;
-#endif
- return 0;
-}
-
int cpu(void)
{
return current_thread_info()->cpu;
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 62435ef003d9..174ee5017264 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -8,9 +8,6 @@
#include <linux/sched.h>
#include <linux/tracehook.h>
#include <asm/uaccess.h>
-#include <skas_ptrace.h>
-
-
void user_enable_single_step(struct task_struct *child)
{
@@ -104,35 +101,6 @@ long arch_ptrace(struct task_struct *child, long request,
ret = ptrace_set_thread_area(child, addr, vp);
break;
- case PTRACE_FAULTINFO: {
- /*
- * Take the info from thread->arch->faultinfo,
- * but transfer max. sizeof(struct ptrace_faultinfo).
- * On i386, ptrace_faultinfo is smaller!
- */
- ret = copy_to_user(p, &child->thread.arch.faultinfo,
- sizeof(struct ptrace_faultinfo)) ?
- -EIO : 0;
- break;
- }
-
-#ifdef PTRACE_LDT
- case PTRACE_LDT: {
- struct ptrace_ldt ldt;
-
- if (copy_from_user(&ldt, p, sizeof(ldt))) {
- ret = -EIO;
- break;
- }
-
- /*
- * This one is confusing, so just punt and return -EIO for
- * now
- */
- ret = -EIO;
- break;
- }
-#endif
default:
ret = ptrace_request(child, request, addr, data);
if (ret == -EIO)
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index ced8903921ae..9bdf67a092a5 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -15,28 +15,21 @@ void (*pm_power_off)(void);
static void kill_off_processes(void)
{
- if (proc_mm)
- /*
- * FIXME: need to loop over userspace_pids
- */
- os_kill_ptraced_process(userspace_pid[0], 1);
- else {
- struct task_struct *p;
- int pid;
-
- read_lock(&tasklist_lock);
- for_each_process(p) {
- struct task_struct *t;
-
- t = find_lock_task_mm(p);
- if (!t)
- continue;
- pid = t->mm->context.id.u.pid;
- task_unlock(t);
- os_kill_ptraced_process(pid, 1);
- }
- read_unlock(&tasklist_lock);
+ struct task_struct *p;
+ int pid;
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ struct task_struct *t;
+
+ t = find_lock_task_mm(p);
+ if (!t)
+ continue;
+ pid = t->mm->context.id.u.pid;
+ task_unlock(t);
+ os_kill_ptraced_process(pid, 1);
}
+ read_unlock(&tasklist_lock);
}
void uml_cleanup(void)
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 007d5503f49b..94abdcc1d6ad 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -54,35 +54,22 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
unsigned long stack = 0;
int ret = -ENOMEM;
- if (skas_needs_stub) {
- stack = get_zeroed_page(GFP_KERNEL);
- if (stack == 0)
- goto out;
- }
+ stack = get_zeroed_page(GFP_KERNEL);
+ if (stack == 0)
+ goto out;
to_mm->id.stack = stack;
if (current->mm != NULL && current->mm != &init_mm)
from_mm = &current->mm->context;
- if (proc_mm) {
- ret = new_mm(stack);
- if (ret < 0) {
- printk(KERN_ERR "init_new_context_skas - "
- "new_mm failed, errno = %d\n", ret);
- goto out_free;
- }
- to_mm->id.u.mm_fd = ret;
- }
- else {
- if (from_mm)
- to_mm->id.u.pid = copy_context_skas0(stack,
- from_mm->id.u.pid);
- else to_mm->id.u.pid = start_userspace(stack);
-
- if (to_mm->id.u.pid < 0) {
- ret = to_mm->id.u.pid;
- goto out_free;
- }
+ if (from_mm)
+ to_mm->id.u.pid = copy_context_skas0(stack,
+ from_mm->id.u.pid);
+ else to_mm->id.u.pid = start_userspace(stack);
+
+ if (to_mm->id.u.pid < 0) {
+ ret = to_mm->id.u.pid;
+ goto out_free;
}
ret = init_new_ldt(to_mm, from_mm);
@@ -105,9 +92,6 @@ void uml_setup_stubs(struct mm_struct *mm)
{
int err, ret;
- if (!skas_needs_stub)
- return;
-
ret = init_stub_pte(mm, STUB_CODE,
(unsigned long) &__syscall_stub_start);
if (ret)
@@ -154,25 +138,19 @@ void destroy_context(struct mm_struct *mm)
{
struct mm_context *mmu = &mm->context;
- if (proc_mm)
- os_close_file(mmu->id.u.mm_fd);
- else {
- /*
- * If init_new_context wasn't called, this will be
- * zero, resulting in a kill(0), which will result in the
- * whole UML suddenly dying. Also, cover negative and
- * 1 cases, since they shouldn't happen either.
- */
- if (mmu->id.u.pid < 2) {
- printk(KERN_ERR "corrupt mm_context - pid = %d\n",
- mmu->id.u.pid);
- return;
- }
- os_kill_ptraced_process(mmu->id.u.pid, 1);
+ /*
+ * If init_new_context wasn't called, this will be
+ * zero, resulting in a kill(0), which will result in the
+ * whole UML suddenly dying. Also, cover negative and
+ * 1 cases, since they shouldn't happen either.
+ */
+ if (mmu->id.u.pid < 2) {
+ printk(KERN_ERR "corrupt mm_context - pid = %d\n",
+ mmu->id.u.pid);
+ return;
}
+ os_kill_ptraced_process(mmu->id.u.pid, 1);
- if (skas_needs_stub)
- free_page(mmu->id.stack);
-
+ free_page(mmu->id.stack);
free_ldt(mmu);
}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 4da11b3c8ddb..527fa5881915 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -10,25 +10,6 @@
#include <os.h>
#include <skas.h>
-int new_mm(unsigned long stack)
-{
- int fd, err;
-
- fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
- if (fd < 0)
- return fd;
-
- if (skas_needs_stub) {
- err = map_stub_pages(fd, STUB_CODE, STUB_DATA, stack);
- if (err) {
- os_close_file(fd);
- return err;
- }
- }
-
- return fd;
-}
-
extern void start_kernel(void);
static int __init start_kernel_proc(void *unused)
@@ -40,9 +21,7 @@ static int __init start_kernel_proc(void *unused)
cpu_tasks[0].pid = pid;
cpu_tasks[0].task = current;
-#ifdef CONFIG_SMP
- init_cpu_online(get_cpu_mask(0));
-#endif
+
start_kernel();
return 0;
}
@@ -55,14 +34,6 @@ int __init start_uml(void)
{
stack_protections((unsigned long) &cpu0_irqstack);
set_sigstack(cpu0_irqstack, THREAD_SIZE);
- if (proc_mm) {
- userspace_pid[0] = start_userspace(0);
- if (userspace_pid[0] < 0) {
- printf("start_uml - start_userspace returned %d\n",
- userspace_pid[0]);
- exit(1);
- }
- }
init_new_thread_signals();
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
deleted file mode 100644
index 5c8c3ea7db7b..000000000000
--- a/arch/um/kernel/smp.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <linux/percpu.h>
-#include <asm/pgalloc.h>
-#include <asm/tlb.h>
-
-#ifdef CONFIG_SMP
-
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/threads.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/hardirq.h>
-#include <asm/smp.h>
-#include <asm/processor.h>
-#include <asm/spinlock.h>
-#include <kern.h>
-#include <irq_user.h>
-#include <os.h>
-
-/* Per CPU bogomips and other parameters
- * The only piece used here is the ipi pipe, which is set before SMP is
- * started and never changed.
- */
-struct cpuinfo_um cpu_data[NR_CPUS];
-
-/* A statistic, can be a little off */
-int num_reschedules_sent = 0;
-
-/* Not changed after boot */
-struct task_struct *idle_threads[NR_CPUS];
-
-void smp_send_reschedule(int cpu)
-{
- os_write_file(cpu_data[cpu].ipi_pipe[1], "R", 1);
- num_reschedules_sent++;
-}
-
-void smp_send_stop(void)
-{
- int i;
-
- printk(KERN_INFO "Stopping all CPUs...");
- for (i = 0; i < num_online_cpus(); i++) {
- if (i == current_thread->cpu)
- continue;
- os_write_file(cpu_data[i].ipi_pipe[1], "S", 1);
- }
- printk(KERN_CONT "done\n");
-}
-
-static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
-static cpumask_t cpu_callin_map = CPU_MASK_NONE;
-
-static int idle_proc(void *cpup)
-{
- int cpu = (int) cpup, err;
-
- err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
- if (err < 0)
- panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err);
-
- os_set_fd_async(cpu_data[cpu].ipi_pipe[0]);
-
- wmb();
- if (cpu_test_and_set(cpu, cpu_callin_map)) {
- printk(KERN_ERR "huh, CPU#%d already present??\n", cpu);
- BUG();
- }
-
- while (!cpu_isset(cpu, smp_commenced_mask))
- cpu_relax();
-
- notify_cpu_starting(cpu);
- set_cpu_online(cpu, true);
- default_idle();
- return 0;
-}
-
-static struct task_struct *idle_thread(int cpu)
-{
- struct task_struct *new_task;
-
- current->thread.request.u.thread.proc = idle_proc;
- current->thread.request.u.thread.arg = (void *) cpu;
- new_task = fork_idle(cpu);
- if (IS_ERR(new_task))
- panic("copy_process failed in idle_thread, error = %ld",
- PTR_ERR(new_task));
-
- cpu_tasks[cpu] = ((struct cpu_task)
- { .pid = new_task->thread.mode.tt.extern_pid,
- .task = new_task } );
- idle_threads[cpu] = new_task;
- panic("skas mode doesn't support SMP");
- return new_task;
-}
-
-void smp_prepare_cpus(unsigned int maxcpus)
-{
- struct task_struct *idle;
- unsigned long waittime;
- int err, cpu, me = smp_processor_id();
- int i;
-
- for (i = 0; i < ncpus; ++i)
- set_cpu_possible(i, true);
-
- set_cpu_online(me, true);
- cpu_set(me, cpu_callin_map);
-
- err = os_pipe(cpu_data[me].ipi_pipe, 1, 1);
- if (err < 0)
- panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
-
- os_set_fd_async(cpu_data[me].ipi_pipe[0]);
-
- for (cpu = 1; cpu < ncpus; cpu++) {
- printk(KERN_INFO "Booting processor %d...\n", cpu);
-
- idle = idle_thread(cpu);
-
- init_idle(idle, cpu);
-
- waittime = 200000000;
- while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
- cpu_relax();
-
- printk(KERN_INFO "%s\n",
- cpu_isset(cpu, cpu_calling_map) ? "done" : "failed");
- }
-}
-
-void smp_prepare_boot_cpu(void)
-{
- set_cpu_online(smp_processor_id(), true);
-}
-
-int __cpu_up(unsigned int cpu, struct task_struct *tidle)
-{
- cpu_set(cpu, smp_commenced_mask);
- while (!cpu_online(cpu))
- mb();
- return 0;
-}
-
-int setup_profiling_timer(unsigned int multiplier)
-{
- printk(KERN_INFO "setup_profiling_timer\n");
- return 0;
-}
-
-void smp_call_function_slave(int cpu);
-
-void IPI_handler(int cpu)
-{
- unsigned char c;
- int fd;
-
- fd = cpu_data[cpu].ipi_pipe[0];
- while (os_read_file(fd, &c, 1) == 1) {
- switch (c) {
- case 'C':
- smp_call_function_slave(cpu);
- break;
-
- case 'R':
- scheduler_ipi();
- break;
-
- case 'S':
- printk(KERN_INFO "CPU#%d stopping\n", cpu);
- while (1)
- pause();
- break;
-
- default:
- printk(KERN_ERR "CPU#%d received unknown IPI [%c]!\n",
- cpu, c);
- break;
- }
- }
-}
-
-int hard_smp_processor_id(void)
-{
- return pid_to_processor_id(os_getpid());
-}
-
-static DEFINE_SPINLOCK(call_lock);
-static atomic_t scf_started;
-static atomic_t scf_finished;
-static void (*func)(void *info);
-static void *info;
-
-void smp_call_function_slave(int cpu)
-{
- atomic_inc(&scf_started);
- (*func)(info);
- atomic_inc(&scf_finished);
-}
-
-int smp_call_function(void (*_func)(void *info), void *_info, int wait)
-{
- int cpus = num_online_cpus() - 1;
- int i;
-
- if (!cpus)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- spin_lock_bh(&call_lock);
- atomic_set(&scf_started, 0);
- atomic_set(&scf_finished, 0);
- func = _func;
- info = _info;
-
- for_each_online_cpu(i)
- os_write_file(cpu_data[i].ipi_pipe[1], "C", 1);
-
- while (atomic_read(&scf_started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&scf_finished) != cpus)
- barrier();
-
- spin_unlock_bh(&call_lock);
- return 0;
-}
-
-#endif
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 894c8d303cda..aa1b56f5ac68 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -29,7 +29,7 @@ static const struct stacktrace_ops stackops = {
void show_stack(struct task_struct *task, unsigned long *stack)
{
- unsigned long *sp = stack, bp = 0;
+ unsigned long *sp = stack;
struct pt_regs *segv_regs = current->thread.segv_regs;
int i;
@@ -39,10 +39,6 @@ void show_stack(struct task_struct *task, unsigned long *stack)
return;
}
-#ifdef CONFIG_FRAME_POINTER
- bp = get_frame_pointer(task, segv_regs);
-#endif
-
if (!stack)
sp = get_stack_pointer(task, segv_regs);
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 209617302df8..8e4daf44e980 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -220,7 +220,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
panic("Segfault with no mm");
}
- if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi))
+ if (SEGV_IS_FIXABLE(&fi))
err = handle_page_fault(address, ip, is_write, is_user,
&si.si_code);
else {
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 9274eae6ae7b..07f798f4bcee 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -11,6 +11,7 @@
#include <linux/string.h>
#include <linux/utsname.h>
#include <linux/sched.h>
+#include <linux/kmsg_dump.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/sections.h>
@@ -66,12 +67,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
{
int index = 0;
-#ifdef CONFIG_SMP
- index = (struct cpuinfo_um *) v - cpu_data;
- if (!cpu_online(index))
- return 0;
-#endif
-
seq_printf(m, "processor\t: %d\n", index);
seq_printf(m, "vendor_id\t: User Mode Linux\n");
seq_printf(m, "model name\t: UML\n");
@@ -168,23 +163,6 @@ __uml_setup("debug", no_skas_debug_setup,
" this flag is not needed to run gdb on UML in skas mode\n\n"
);
-#ifdef CONFIG_SMP
-static int __init uml_ncpus_setup(char *line, int *add)
-{
- if (!sscanf(line, "%d", &ncpus)) {
- printf("Couldn't parse [%s]\n", line);
- return -1;
- }
-
- return 0;
-}
-
-__uml_setup("ncpus=", uml_ncpus_setup,
-"ncpus=<# of desired CPUs>\n"
-" This tells an SMP kernel how many virtual processors to start.\n\n"
-);
-#endif
-
static int __init Usage(char *line, int *add)
{
const char **p;
@@ -234,6 +212,7 @@ static void __init uml_postsetup(void)
static int panic_exit(struct notifier_block *self, unsigned long unused1,
void *unused2)
{
+ kmsg_dump(KMSG_DUMP_PANIC);
bust_spinlocks(1);
bust_spinlocks(0);
uml_exitcode = 1;
@@ -247,6 +226,16 @@ static struct notifier_block panic_exit_notifier = {
.priority = 0
};
+void uml_finishsetup(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &panic_exit_notifier);
+
+ uml_postsetup();
+
+ new_thread_handler();
+}
+
/* Set during early boot */
unsigned long task_size;
EXPORT_SYMBOL(task_size);
@@ -268,7 +257,6 @@ int __init linux_main(int argc, char **argv)
unsigned long stack;
unsigned int i;
int add;
- char * mode;
for (i = 1; i < argc; i++) {
if ((i == 1) && (argv[i][0] == ' '))
@@ -291,15 +279,6 @@ int __init linux_main(int argc, char **argv)
/* OS sanity checks that need to happen before the kernel runs */
os_early_checks();
- can_do_skas();
-
- if (proc_mm && ptrace_faultinfo)
- mode = "SKAS3";
- else
- mode = "SKAS0";
-
- printf("UML running in %s mode\n", mode);
-
brk_start = (unsigned long) sbrk(0);
/*
@@ -334,11 +313,6 @@ int __init linux_main(int argc, char **argv)
if (physmem_size + iomem_size > max_physmem) {
highmem = physmem_size + iomem_size - max_physmem;
physmem_size -= highmem;
-#ifndef CONFIG_HIGHMEM
- highmem = 0;
- printf("CONFIG_HIGHMEM not enabled - physical memory shrunk "
- "to %Lu bytes\n", physmem_size);
-#endif
}
high_physmem = uml_physmem + physmem_size;
@@ -362,11 +336,6 @@ int __init linux_main(int argc, char **argv)
printf("Kernel virtual memory size shrunk to %lu bytes\n",
virtmem_size);
- atomic_notifier_chain_register(&panic_notifier_list,
- &panic_exit_notifier);
-
- uml_postsetup();
-
stack_protections((unsigned long) &init_thread_info);
os_flush_stdout();
@@ -390,15 +359,3 @@ void __init check_bugs(void)
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
}
-
-#ifdef CONFIG_SMP
-void alternatives_smp_module_add(struct module *mod, char *name,
- void *locks, void *locks_end,
- void *text, void *text_end)
-{
-}
-
-void alternatives_smp_module_del(struct module *mod)
-{
-}
-#endif
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 33496fe2bb52..8408aba915b2 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -16,7 +16,6 @@
#include <init.h>
#include <longjmp.h>
#include <os.h>
-#include <skas_ptrace.h>
#define ARBITRARY_ADDR -1
#define FAILURE_PID -1
@@ -102,21 +101,6 @@ void os_kill_process(int pid, int reap_child)
CATCH_EINTR(waitpid(pid, NULL, __WALL));
}
-/* This is here uniquely to have access to the userspace errno, i.e. the one
- * used by ptrace in case of error.
- */
-
-long os_ptrace_ldt(long pid, long addr, long data)
-{
- int ret;
-
- ret = ptrace(PTRACE_LDT, pid, addr, data);
-
- if (ret < 0)
- return -errno;
- return ret;
-}
-
/* Kill off a ptraced child by all means available. kill it normally first,
* then PTRACE_KILL it, then PTRACE_CONT it in case it's in a run state from
* which it can't exit directly.
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index 689b18db798f..e7f8c945a573 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -12,7 +12,6 @@
#include <as-layout.h>
#include <mm_id.h>
#include <os.h>
-#include <proc_mm.h>
#include <ptrace_user.h>
#include <registers.h>
#include <skas.h>
@@ -46,8 +45,6 @@ static int __init init_syscall_regs(void)
__initcall(init_syscall_regs);
-extern int proc_mm;
-
static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
{
int n, i;
@@ -56,10 +53,6 @@ static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
unsigned long * syscall;
int err, pid = mm_idp->u.pid;
- if (proc_mm)
- /* FIXME: Need to look up userspace_pid by cpu */
- pid = userspace_pid[0];
-
n = ptrace_setregs(pid, syscall_regs);
if (n < 0) {
printk(UM_KERN_ERR "Registers - \n");
@@ -178,38 +171,12 @@ int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, int prot,
int phys_fd, unsigned long long offset, int done, void **data)
{
int ret;
+ unsigned long args[] = { virt, len, prot,
+ MAP_SHARED | MAP_FIXED, phys_fd,
+ MMAP_OFFSET(offset) };
- if (proc_mm) {
- struct proc_mm_op map;
- int fd = mm_idp->u.mm_fd;
-
- map = ((struct proc_mm_op) { .op = MM_MMAP,
- .u =
- { .mmap =
- { .addr = virt,
- .len = len,
- .prot = prot,
- .flags = MAP_SHARED |
- MAP_FIXED,
- .fd = phys_fd,
- .offset= offset
- } } } );
- CATCH_EINTR(ret = write(fd, &map, sizeof(map)));
- if (ret != sizeof(map)) {
- ret = -errno;
- printk(UM_KERN_ERR "map : /proc/mm map failed, "
- "err = %d\n", -ret);
- }
- else ret = 0;
- }
- else {
- unsigned long args[] = { virt, len, prot,
- MAP_SHARED | MAP_FIXED, phys_fd,
- MMAP_OFFSET(offset) };
-
- ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt,
- data, done);
- }
+ ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt,
+ data, done);
return ret;
}
@@ -218,32 +185,11 @@ int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
int done, void **data)
{
int ret;
+ unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
+ 0 };
- if (proc_mm) {
- struct proc_mm_op unmap;
- int fd = mm_idp->u.mm_fd;
-
- unmap = ((struct proc_mm_op) { .op = MM_MUNMAP,
- .u =
- { .munmap =
- { .addr =
- (unsigned long) addr,
- .len = len } } } );
- CATCH_EINTR(ret = write(fd, &unmap, sizeof(unmap)));
- if (ret != sizeof(unmap)) {
- ret = -errno;
- printk(UM_KERN_ERR "unmap - proc_mm write returned "
- "%d\n", ret);
- }
- else ret = 0;
- }
- else {
- unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
- 0 };
-
- ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0,
- data, done);
- }
+ ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0,
+ data, done);
return ret;
}
@@ -251,33 +197,11 @@ int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
unsigned int prot, int done, void **data)
{
- struct proc_mm_op protect;
int ret;
+ unsigned long args[] = { addr, len, prot, 0, 0, 0 };
- if (proc_mm) {
- int fd = mm_idp->u.mm_fd;
-
- protect = ((struct proc_mm_op) { .op = MM_MPROTECT,
- .u =
- { .mprotect =
- { .addr =
- (unsigned long) addr,
- .len = len,
- .prot = prot } } } );
-
- CATCH_EINTR(ret = write(fd, &protect, sizeof(protect)));
- if (ret != sizeof(protect)) {
- ret = -errno;
- printk(UM_KERN_ERR "protect failed, err = %d", -ret);
- }
- else ret = 0;
- }
- else {
- unsigned long args[] = { addr, len, prot, 0, 0, 0 };
-
- ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
- data, done);
- }
+ ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
+ data, done);
return ret;
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 908579f2b0ab..7a9777570a62 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -16,11 +16,9 @@
#include <kern_util.h>
#include <mem.h>
#include <os.h>
-#include <proc_mm.h>
#include <ptrace_user.h>
#include <registers.h>
#include <skas.h>
-#include <skas_ptrace.h>
#include <sysdep/stub.h>
int is_skas_winch(int pid, int fd, void *data)
@@ -91,50 +89,33 @@ extern unsigned long current_stub_stack(void);
static void get_skas_faultinfo(int pid, struct faultinfo *fi)
{
int err;
+ unsigned long fpregs[FP_SIZE];
- if (ptrace_faultinfo) {
- err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
- if (err) {
- printk(UM_KERN_ERR "get_skas_faultinfo - "
- "PTRACE_FAULTINFO failed, errno = %d\n", errno);
- fatal_sigsegv();
- }
-
- /* Special handling for i386, which has different structs */
- if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
- memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
- sizeof(struct faultinfo) -
- sizeof(struct ptrace_faultinfo));
+ err = get_fp_registers(pid, fpregs);
+ if (err < 0) {
+ printk(UM_KERN_ERR "save_fp_registers returned %d\n",
+ err);
+ fatal_sigsegv();
}
- else {
- unsigned long fpregs[FP_SIZE];
-
- err = get_fp_registers(pid, fpregs);
- if (err < 0) {
- printk(UM_KERN_ERR "save_fp_registers returned %d\n",
- err);
- fatal_sigsegv();
- }
- err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
- if (err) {
- printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
- "errno = %d\n", pid, errno);
- fatal_sigsegv();
- }
- wait_stub_done(pid);
+ err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
+ if (err) {
+ printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
+ "errno = %d\n", pid, errno);
+ fatal_sigsegv();
+ }
+ wait_stub_done(pid);
- /*
- * faultinfo is prepared by the stub-segv-handler at start of
- * the stub stack page. We just have to copy it.
- */
- memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
+ /*
+ * faultinfo is prepared by the stub-segv-handler at start of
+ * the stub stack page. We just have to copy it.
+ */
+ memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
- err = put_fp_registers(pid, fpregs);
- if (err < 0) {
- printk(UM_KERN_ERR "put_fp_registers returned %d\n",
- err);
- fatal_sigsegv();
- }
+ err = put_fp_registers(pid, fpregs);
+ if (err < 0) {
+ printk(UM_KERN_ERR "put_fp_registers returned %d\n",
+ err);
+ fatal_sigsegv();
}
}
@@ -198,7 +179,8 @@ extern int __syscall_stub_start;
static int userspace_tramp(void *stack)
{
void *addr;
- int err;
+ int err, fd;
+ unsigned long long offset;
ptrace(PTRACE_TRACEME, 0, 0, 0);
@@ -211,36 +193,32 @@ static int userspace_tramp(void *stack)
exit(1);
}
- if (!proc_mm) {
- /*
- * This has a pte, but it can't be mapped in with the usual
- * tlb_flush mechanism because this is part of that mechanism
- */
- int fd;
- unsigned long long offset;
- fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
- addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
- PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
+ /*
+ * This has a pte, but it can't be mapped in with the usual
+ * tlb_flush mechanism because this is part of that mechanism
+ */
+ fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
+ addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
+ PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
+ if (addr == MAP_FAILED) {
+ printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
+ "errno = %d\n", STUB_CODE, errno);
+ exit(1);
+ }
+
+ if (stack != NULL) {
+ fd = phys_mapping(to_phys(stack), &offset);
+ addr = mmap((void *) STUB_DATA,
+ UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED, fd, offset);
if (addr == MAP_FAILED) {
- printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
- "errno = %d\n", STUB_CODE, errno);
+ printk(UM_KERN_ERR "mapping segfault stack "
+ "at 0x%lx failed, errno = %d\n",
+ STUB_DATA, errno);
exit(1);
}
-
- if (stack != NULL) {
- fd = phys_mapping(to_phys(stack), &offset);
- addr = mmap((void *) STUB_DATA,
- UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_SHARED, fd, offset);
- if (addr == MAP_FAILED) {
- printk(UM_KERN_ERR "mapping segfault stack "
- "at 0x%lx failed, errno = %d\n",
- STUB_DATA, errno);
- exit(1);
- }
- }
}
- if (!ptrace_faultinfo && (stack != NULL)) {
+ if (stack != NULL) {
struct sigaction sa;
unsigned long v = STUB_CODE +
@@ -286,11 +264,7 @@ int start_userspace(unsigned long stub_stack)
sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
- flags = CLONE_FILES;
- if (proc_mm)
- flags |= CLONE_VM;
- else
- flags |= SIGCHLD;
+ flags = CLONE_FILES | SIGCHLD;
pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
if (pid < 0) {
@@ -413,8 +387,7 @@ void userspace(struct uml_pt_regs *regs)
switch (sig) {
case SIGSEGV:
- if (PTRACE_FULL_FAULTINFO ||
- !ptrace_faultinfo) {
+ if (PTRACE_FULL_FAULTINFO) {
get_skas_faultinfo(pid,
&regs->faultinfo);
(*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
@@ -571,67 +544,6 @@ int copy_context_skas0(unsigned long new_stack, int pid)
return err;
}
-/*
- * This is used only, if stub pages are needed, while proc_mm is
- * available. Opening /proc/mm creates a new mm_context, which lacks
- * the stub-pages. Thus, we map them using /proc/mm-fd
- */
-int map_stub_pages(int fd, unsigned long code, unsigned long data,
- unsigned long stack)
-{
- struct proc_mm_op mmop;
- int n;
- unsigned long long code_offset;
- int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
- &code_offset);
-
- mmop = ((struct proc_mm_op) { .op = MM_MMAP,
- .u =
- { .mmap =
- { .addr = code,
- .len = UM_KERN_PAGE_SIZE,
- .prot = PROT_EXEC,
- .flags = MAP_FIXED | MAP_PRIVATE,
- .fd = code_fd,
- .offset = code_offset
- } } });
- CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
- if (n != sizeof(mmop)) {
- n = errno;
- printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
- "offset = %llx\n", code, code_fd,
- (unsigned long long) code_offset);
- printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code "
- "failed, err = %d\n", n);
- return -n;
- }
-
- if (stack) {
- unsigned long long map_offset;
- int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
- mmop = ((struct proc_mm_op)
- { .op = MM_MMAP,
- .u =
- { .mmap =
- { .addr = data,
- .len = UM_KERN_PAGE_SIZE,
- .prot = PROT_READ | PROT_WRITE,
- .flags = MAP_FIXED | MAP_SHARED,
- .fd = map_fd,
- .offset = map_offset
- } } });
- CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
- if (n != sizeof(mmop)) {
- n = errno;
- printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for "
- "data failed, err = %d\n", n);
- return -n;
- }
- }
-
- return 0;
-}
-
void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
{
(*buf)[0].JB_IP = (unsigned long) handler;
@@ -674,7 +586,7 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf)
n = setjmp(initial_jmpbuf);
switch (n) {
case INIT_JMP_NEW_THREAD:
- (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
+ (*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
(*switch_buf)[0].JB_SP = (unsigned long) stack +
UM_THREAD_SIZE - sizeof(void *);
break;
@@ -728,17 +640,5 @@ void reboot_skas(void)
void __switch_mm(struct mm_id *mm_idp)
{
- int err;
-
- /* FIXME: need cpu pid in __switch_mm */
- if (proc_mm) {
- err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
- mm_idp->u.mm_fd);
- if (err) {
- printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
- "failed, errno = %d\n", errno);
- fatal_sigsegv();
- }
- }
- else userspace_pid[0] = mm_idp->u.pid;
+ userspace_pid[0] = mm_idp->u.pid;
}
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 337518c5042a..47f1ff056a54 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -24,7 +24,6 @@
#include <ptrace_user.h>
#include <registers.h>
#include <skas.h>
-#include <skas_ptrace.h>
static void ptrace_child(void)
{
@@ -143,44 +142,6 @@ static int stop_ptraced_child(int pid, int exitcode, int mustexit)
}
/* Changed only during early boot */
-int ptrace_faultinfo;
-static int disable_ptrace_faultinfo;
-
-int ptrace_ldt;
-static int disable_ptrace_ldt;
-
-int proc_mm;
-static int disable_proc_mm;
-
-int have_switch_mm;
-static int disable_switch_mm;
-
-int skas_needs_stub;
-
-static int __init skas0_cmd_param(char *str, int* add)
-{
- disable_ptrace_faultinfo = 1;
- disable_ptrace_ldt = 1;
- disable_proc_mm = 1;
- disable_switch_mm = 1;
-
- return 0;
-}
-
-/* The two __uml_setup would conflict, without this stupid alias. */
-
-static int __init mode_skas0_cmd_param(char *str, int* add)
- __attribute__((alias("skas0_cmd_param")));
-
-__uml_setup("skas0", skas0_cmd_param,
-"skas0\n"
-" Disables SKAS3 and SKAS4 usage, so that SKAS0 is used\n\n");
-
-__uml_setup("mode=skas0", mode_skas0_cmd_param,
-"mode=skas0\n"
-" Disables SKAS3 and SKAS4 usage, so that SKAS0 is used.\n\n");
-
-/* Changed only during early boot */
static int force_sysemu_disabled = 0;
static int __init nosysemu_cmd_param(char *str, int* add)
@@ -376,121 +337,6 @@ void __init os_early_checks(void)
stop_ptraced_child(pid, 1, 1);
}
-static int __init noprocmm_cmd_param(char *str, int* add)
-{
- disable_proc_mm = 1;
- return 0;
-}
-
-__uml_setup("noprocmm", noprocmm_cmd_param,
-"noprocmm\n"
-" Turns off usage of /proc/mm, even if host supports it.\n"
-" To support /proc/mm, the host needs to be patched using\n"
-" the current skas3 patch.\n\n");
-
-static int __init noptracefaultinfo_cmd_param(char *str, int* add)
-{
- disable_ptrace_faultinfo = 1;
- return 0;
-}
-
-__uml_setup("noptracefaultinfo", noptracefaultinfo_cmd_param,
-"noptracefaultinfo\n"
-" Turns off usage of PTRACE_FAULTINFO, even if host supports\n"
-" it. To support PTRACE_FAULTINFO, the host needs to be patched\n"
-" using the current skas3 patch.\n\n");
-
-static int __init noptraceldt_cmd_param(char *str, int* add)
-{
- disable_ptrace_ldt = 1;
- return 0;
-}
-
-__uml_setup("noptraceldt", noptraceldt_cmd_param,
-"noptraceldt\n"
-" Turns off usage of PTRACE_LDT, even if host supports it.\n"
-" To support PTRACE_LDT, the host needs to be patched using\n"
-" the current skas3 patch.\n\n");
-
-static inline void check_skas3_ptrace_faultinfo(void)
-{
- struct ptrace_faultinfo fi;
- int pid, n;
-
- non_fatal(" - PTRACE_FAULTINFO...");
- pid = start_ptraced_child();
-
- n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
- if (n < 0) {
- if (errno == EIO)
- non_fatal("not found\n");
- else
- perror("not found");
- } else if (disable_ptrace_faultinfo)
- non_fatal("found but disabled on command line\n");
- else {
- ptrace_faultinfo = 1;
- non_fatal("found\n");
- }
-
- stop_ptraced_child(pid, 1, 1);
-}
-
-static inline void check_skas3_ptrace_ldt(void)
-{
-#ifdef PTRACE_LDT
- int pid, n;
- unsigned char ldtbuf[40];
- struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
- .func = 2, /* read default ldt */
- .ptr = ldtbuf,
- .bytecount = sizeof(ldtbuf)};
-
- non_fatal(" - PTRACE_LDT...");
- pid = start_ptraced_child();
-
- n = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op);
- if (n < 0) {
- if (errno == EIO)
- non_fatal("not found\n");
- else
- perror("not found");
- } else if (disable_ptrace_ldt)
- non_fatal("found, but use is disabled\n");
- else {
- ptrace_ldt = 1;
- non_fatal("found\n");
- }
-
- stop_ptraced_child(pid, 1, 1);
-#endif
-}
-
-static inline void check_skas3_proc_mm(void)
-{
- non_fatal(" - /proc/mm...");
- if (access("/proc/mm", W_OK) < 0)
- perror("not found");
- else if (disable_proc_mm)
- non_fatal("found but disabled on command line\n");
- else {
- proc_mm = 1;
- non_fatal("found\n");
- }
-}
-
-void can_do_skas(void)
-{
- non_fatal("Checking for the skas3 patch in the host:\n");
-
- check_skas3_proc_mm();
- check_skas3_ptrace_faultinfo();
- check_skas3_ptrace_ldt();
-
- if (!proc_mm || !ptrace_faultinfo || !ptrace_ldt)
- skas_needs_stub = 1;
-}
-
int __init parse_iomem(char *str, int *add)
{
struct iomem_region *new;
diff --git a/arch/um/sys-ia64/Makefile b/arch/um/sys-ia64/Makefile
deleted file mode 100644
index d02f4c265232..000000000000
--- a/arch/um/sys-ia64/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-OBJ = built-in.o
-
-OBJS =
-
-all: $(OBJ)
-
-$(OBJ): $(OBJS)
- rm -f $@
- $(LD) $(LINKFLAGS) --start-group $^ --end-group -o $@
-
-clean-files := $(OBJS) link.ld
diff --git a/arch/um/sys-ia64/sysdep/ptrace.h b/arch/um/sys-ia64/sysdep/ptrace.h
deleted file mode 100644
index 0f0f4e6fd334..000000000000
--- a/arch/um/sys-ia64/sysdep/ptrace.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SYSDEP_IA64_PTRACE_H
-#define __SYSDEP_IA64_PTRACE_H
-
-struct sys_pt_regs {
- int foo;
-};
-
-#define EMPTY_REGS { 0 }
-
-#endif
-
diff --git a/arch/um/sys-ia64/sysdep/sigcontext.h b/arch/um/sys-ia64/sysdep/sigcontext.h
deleted file mode 100644
index 76b43161e779..000000000000
--- a/arch/um/sys-ia64/sysdep/sigcontext.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SYSDEP_IA64_SIGCONTEXT_H
-#define __SYSDEP_IA64_SIGCONTEXT_H
-
-#endif
-
diff --git a/arch/um/sys-ia64/sysdep/skas_ptrace.h b/arch/um/sys-ia64/sysdep/skas_ptrace.h
deleted file mode 100644
index 25a38e715702..000000000000
--- a/arch/um/sys-ia64/sysdep/skas_ptrace.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SYSDEP_IA64_SKAS_PTRACE_H
-#define __SYSDEP_IA64_SKAS_PTRACE_H
-
-struct ptrace_faultinfo {
- int is_write;
- unsigned long addr;
-};
-
-struct ptrace_ldt {
- int func;
- void *ptr;
- unsigned long bytecount;
-};
-
-#define PTRACE_LDT 54
-
-#endif
diff --git a/arch/um/sys-ia64/sysdep/syscalls.h b/arch/um/sys-ia64/sysdep/syscalls.h
deleted file mode 100644
index 5f6700c41558..000000000000
--- a/arch/um/sys-ia64/sysdep/syscalls.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SYSDEP_IA64_SYSCALLS_H
-#define __SYSDEP_IA64_SYSCALLS_H
-
-#endif
-
diff --git a/arch/um/sys-ppc/Makefile b/arch/um/sys-ppc/Makefile
deleted file mode 100644
index 20d363bd7004..000000000000
--- a/arch/um/sys-ppc/Makefile
+++ /dev/null
@@ -1,65 +0,0 @@
-OBJ = built-in.o
-
-.S.o:
- $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
-
-OBJS = ptrace.o sigcontext.o checksum.o miscthings.o misc.o \
- ptrace_user.o sysrq.o
-
-asflags-y := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel
-
-all: $(OBJ)
-
-$(OBJ): $(OBJS)
- rm -f $@
- $(LD) $(LINKFLAGS) --start-group $^ --end-group -o $@
-
-ptrace_user.o: ptrace_user.c
- $(CC) -D__KERNEL__ $(USER_CFLAGS) $(ccflags-y) -c -o $@ $<
-
-sigcontext.o: sigcontext.c
- $(CC) $(USER_CFLAGS) $(ccflags-y) -c -o $@ $<
-
-checksum.S:
- rm -f $@
- ln -s $(srctree)/arch/ppc/lib/$@ $@
-
-mk_defs.c:
- rm -f $@
- ln -s $(srctree)/arch/ppc/kernel/$@ $@
-
-ppc_defs.head:
- rm -f $@
- ln -s $(srctree)/arch/ppc/kernel/$@ $@
-
-ppc_defs.h: mk_defs.c ppc_defs.head \
- $(srctree)/include/asm-ppc/mmu.h \
- $(srctree)/include/asm-ppc/processor.h \
- $(srctree)/include/asm-ppc/pgtable.h \
- $(srctree)/include/asm-ppc/ptrace.h
-# $(CC) $(CFLAGS) -S mk_defs.c
- cp ppc_defs.head ppc_defs.h
-# for bk, this way we can write to the file even if it's not checked out
- echo '#define THREAD 608' >> ppc_defs.h
- echo '#define PT_REGS 8' >> ppc_defs.h
- echo '#define CLONE_VM 256' >> ppc_defs.h
-# chmod u+w ppc_defs.h
-# grep '^#define' mk_defs.s >> ppc_defs.h
-# rm mk_defs.s
-
-# the asm link is horrible, and breaks the other targets. This is also
-# not going to work with parallel makes.
-
-checksum.o: checksum.S
- rm -f asm
- ln -s $(srctree)/include/asm-ppc asm
- $(CC) $(asflags-y) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
- rm -f asm
-
-misc.o: misc.S ppc_defs.h
- rm -f asm
- ln -s $(srctree)/include/asm-ppc asm
- $(CC) $(asflags-y) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
- rm -f asm
-
-clean-files := $(OBJS) ppc_defs.h checksum.S mk_defs.c
diff --git a/arch/um/sys-ppc/asm/archparam.h b/arch/um/sys-ppc/asm/archparam.h
deleted file mode 100644
index 4269d8a37b4f..000000000000
--- a/arch/um/sys-ppc/asm/archparam.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __UM_ARCHPARAM_PPC_H
-#define __UM_ARCHPARAM_PPC_H
-
-/********* Bits for asm-um/string.h **********/
-
-#define __HAVE_ARCH_STRRCHR
-
-#endif
diff --git a/arch/um/sys-ppc/asm/elf.h b/arch/um/sys-ppc/asm/elf.h
deleted file mode 100644
index 8aacaf56508d..000000000000
--- a/arch/um/sys-ppc/asm/elf.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __UM_ELF_PPC_H
-#define __UM_ELF_PPC_H
-
-
-extern long elf_aux_hwcap;
-#define ELF_HWCAP (elf_aux_hwcap)
-
-#define SET_PERSONALITY(ex) do ; while(0)
-
-#define ELF_EXEC_PAGESIZE 4096
-
-#define elf_check_arch(x) (1)
-
-#ifdef CONFIG_64BIT
-#define ELF_CLASS ELFCLASS64
-#else
-#define ELF_CLASS ELFCLASS32
-#endif
-
-#define R_386_NONE 0
-#define R_386_32 1
-#define R_386_PC32 2
-#define R_386_GOT32 3
-#define R_386_PLT32 4
-#define R_386_COPY 5
-#define R_386_GLOB_DAT 6
-#define R_386_JMP_SLOT 7
-#define R_386_RELATIVE 8
-#define R_386_GOTOFF 9
-#define R_386_GOTPC 10
-#define R_386_NUM 11
-
-#define ELF_PLATFORM (0)
-
-#define ELF_ET_DYN_BASE (0x08000000)
-
-/* the following stolen from asm-ppc/elf.h */
-#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
-#define ELF_NFPREG 33 /* includes fpscr */
-/* General registers */
-typedef unsigned long elf_greg_t;
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-/* Floating point registers */
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-#define ELF_DATA ELFDATA2MSB
-#define ELF_ARCH EM_PPC
-
-#endif
diff --git a/arch/um/sys-ppc/asm/processor.h b/arch/um/sys-ppc/asm/processor.h
deleted file mode 100644
index 959323151229..000000000000
--- a/arch/um/sys-ppc/asm/processor.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __UM_PROCESSOR_PPC_H
-#define __UM_PROCESSOR_PPC_H
-
-#if defined(__ASSEMBLY__)
-
-#define CONFIG_PPC_MULTIPLATFORM
-#include "arch/processor.h"
-
-#else
-
-#include "asm/processor-generic.h"
-
-#endif
-
-#endif
diff --git a/arch/um/sys-ppc/misc.S b/arch/um/sys-ppc/misc.S
deleted file mode 100644
index 1364b7da578c..000000000000
--- a/arch/um/sys-ppc/misc.S
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * This file contains miscellaneous low-level functions.
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
- * and Paul Mackerras.
- *
- * A couple of functions stolen from arch/ppc/kernel/misc.S for UML
- * by Chris Emerson.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <asm/processor.h>
-#include "ppc_asm.h"
-
-#if defined(CONFIG_4xx) || defined(CONFIG_8xx)
-#define CACHE_LINE_SIZE 16
-#define LG_CACHE_LINE_SIZE 4
-#define MAX_COPY_PREFETCH 1
-#else
-#define CACHE_LINE_SIZE 32
-#define LG_CACHE_LINE_SIZE 5
-#define MAX_COPY_PREFETCH 4
-#endif /* CONFIG_4xx || CONFIG_8xx */
-
- .text
-
-/*
- * Clear a page using the dcbz instruction, which doesn't cause any
- * memory traffic (except to write out any cache lines which get
- * displaced). This only works on cacheable memory.
- */
-_GLOBAL(clear_page)
- li r0,4096/CACHE_LINE_SIZE
- mtctr r0
-#ifdef CONFIG_8xx
- li r4, 0
-1: stw r4, 0(r3)
- stw r4, 4(r3)
- stw r4, 8(r3)
- stw r4, 12(r3)
-#else
-1: dcbz 0,r3
-#endif
- addi r3,r3,CACHE_LINE_SIZE
- bdnz 1b
- blr
-
-/*
- * Copy a whole page. We use the dcbz instruction on the destination
- * to reduce memory traffic (it eliminates the unnecessary reads of
- * the destination into cache). This requires that the destination
- * is cacheable.
- */
-#define COPY_16_BYTES \
- lwz r6,4(r4); \
- lwz r7,8(r4); \
- lwz r8,12(r4); \
- lwzu r9,16(r4); \
- stw r6,4(r3); \
- stw r7,8(r3); \
- stw r8,12(r3); \
- stwu r9,16(r3)
-
-_GLOBAL(copy_page)
- addi r3,r3,-4
- addi r4,r4,-4
- li r5,4
-
-#ifndef CONFIG_8xx
-#if MAX_COPY_PREFETCH > 1
- li r0,MAX_COPY_PREFETCH
- li r11,4
- mtctr r0
-11: dcbt r11,r4
- addi r11,r11,CACHE_LINE_SIZE
- bdnz 11b
-#else /* MAX_COPY_PREFETCH == 1 */
- dcbt r5,r4
- li r11,CACHE_LINE_SIZE+4
-#endif /* MAX_COPY_PREFETCH */
-#endif /* CONFIG_8xx */
-
- li r0,4096/CACHE_LINE_SIZE
- mtctr r0
-1:
-#ifndef CONFIG_8xx
- dcbt r11,r4
- dcbz r5,r3
-#endif
- COPY_16_BYTES
-#if CACHE_LINE_SIZE >= 32
- COPY_16_BYTES
-#if CACHE_LINE_SIZE >= 64
- COPY_16_BYTES
- COPY_16_BYTES
-#if CACHE_LINE_SIZE >= 128
- COPY_16_BYTES
- COPY_16_BYTES
- COPY_16_BYTES
- COPY_16_BYTES
-#endif
-#endif
-#endif
- bdnz 1b
- blr
diff --git a/arch/um/sys-ppc/miscthings.c b/arch/um/sys-ppc/miscthings.c
deleted file mode 100644
index 25908d26ce07..000000000000
--- a/arch/um/sys-ppc/miscthings.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include <linux/threads.h>
-#include <linux/stddef.h> // for NULL
-#include <linux/elf.h> // for AT_NULL
-
-/* The following function nicked from arch/ppc/kernel/process.c and
- * adapted slightly */
-/*
- * XXX ld.so expects the auxiliary table to start on
- * a 16-byte boundary, so we have to find it and
- * move it up. :-(
- */
-void shove_aux_table(unsigned long sp)
-{
- int argc;
- char *p;
- unsigned long e;
- unsigned long aux_start, offset;
-
- argc = *(int *)sp;
- sp += sizeof(int) + (argc + 1) * sizeof(char *);
- /* skip over the environment pointers */
- do {
- p = *(char **)sp;
- sp += sizeof(char *);
- } while (p != NULL);
- aux_start = sp;
- /* skip to the end of the auxiliary table */
- do {
- e = *(unsigned long *)sp;
- sp += 2 * sizeof(unsigned long);
- } while (e != AT_NULL);
- offset = ((aux_start + 15) & ~15) - aux_start;
- if (offset != 0) {
- do {
- sp -= sizeof(unsigned long);
- e = *(unsigned long *)sp;
- *(unsigned long *)(sp + offset) = e;
- } while (sp > aux_start);
- }
-}
-/* END stuff taken from arch/ppc/kernel/process.c */
-
diff --git a/arch/um/sys-ppc/ptrace.c b/arch/um/sys-ppc/ptrace.c
deleted file mode 100644
index 8245df41b201..000000000000
--- a/arch/um/sys-ppc/ptrace.c
+++ /dev/null
@@ -1,58 +0,0 @@
-#include <linux/sched.h>
-#include "asm/ptrace.h"
-
-int putreg(struct task_struct *child, unsigned long regno,
- unsigned long value)
-{
- child->thread.process_regs.regs[regno >> 2] = value;
- return 0;
-}
-
-int poke_user(struct task_struct *child, long addr, long data)
-{
- if ((addr & 3) || addr < 0)
- return -EIO;
-
- if (addr < MAX_REG_OFFSET)
- return putreg(child, addr, data);
-
- else if((addr >= offsetof(struct user, u_debugreg[0])) &&
- (addr <= offsetof(struct user, u_debugreg[7]))){
- addr -= offsetof(struct user, u_debugreg[0]);
- addr = addr >> 2;
- if((addr == 4) || (addr == 5)) return -EIO;
- child->thread.arch.debugregs[addr] = data;
- return 0;
- }
- return -EIO;
-}
-
-unsigned long getreg(struct task_struct *child, unsigned long regno)
-{
- unsigned long retval = ~0UL;
-
- retval &= child->thread.process_regs.regs[regno >> 2];
- return retval;
-}
-
-int peek_user(struct task_struct *child, long addr, long data)
-{
- /* read the word at location addr in the USER area. */
- unsigned long tmp;
-
- if ((addr & 3) || addr < 0)
- return -EIO;
-
- tmp = 0; /* Default return condition */
- if(addr < MAX_REG_OFFSET){
- tmp = getreg(child, addr);
- }
- else if((addr >= offsetof(struct user, u_debugreg[0])) &&
- (addr <= offsetof(struct user, u_debugreg[7]))){
- addr -= offsetof(struct user, u_debugreg[0]);
- addr = addr >> 2;
- tmp = child->thread.arch.debugregs[addr];
- }
- return put_user(tmp, (unsigned long *) data);
-}
-
diff --git a/arch/um/sys-ppc/ptrace_user.c b/arch/um/sys-ppc/ptrace_user.c
deleted file mode 100644
index 4601b9296aa7..000000000000
--- a/arch/um/sys-ppc/ptrace_user.c
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <errno.h>
-#include <asm/ptrace.h>
-#include <sysdep/ptrace.h>
-
-int ptrace_getregs(long pid, unsigned long *regs_out)
-{
- int i;
- for (i=0; i < sizeof(struct sys_pt_regs)/sizeof(PPC_REG); ++i) {
- errno = 0;
- regs_out->regs[i] = ptrace(PTRACE_PEEKUSR, pid, i*4, 0);
- if (errno) {
- return -errno;
- }
- }
- return 0;
-}
-
-int ptrace_setregs(long pid, unsigned long *regs_in)
-{
- int i;
- for (i=0; i < sizeof(struct sys_pt_regs)/sizeof(PPC_REG); ++i) {
- if (i != 34 /* FIXME: PT_ORIG_R3 */ && i <= PT_MQ) {
- if (ptrace(PTRACE_POKEUSR, pid, i*4, regs_in->regs[i]) < 0) {
- return -errno;
- }
- }
- }
- return 0;
-}
diff --git a/arch/um/sys-ppc/shared/sysdep/ptrace.h b/arch/um/sys-ppc/shared/sysdep/ptrace.h
deleted file mode 100644
index efe0c1a3ea9c..000000000000
--- a/arch/um/sys-ppc/shared/sysdep/ptrace.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed under the GPL
- */
-
-#ifndef __SYS_PTRACE_PPC_H
-#define __SYS_PTRACE_PPC_H
-
-#include <linux/types.h>
-
-/* the following taken from <asm-ppc/ptrace.h> */
-
-#ifdef CONFIG_PPC64
-#define PPC_REG unsigned long /*long*/
-#else
-#define PPC_REG unsigned long
-#endif
-struct sys_pt_regs_s {
- PPC_REG gpr[32];
- PPC_REG nip;
- PPC_REG msr;
- PPC_REG orig_gpr3; /* Used for restarting system calls */
- PPC_REG ctr;
- PPC_REG link;
- PPC_REG xer;
- PPC_REG ccr;
- PPC_REG mq; /* 601 only (not used at present) */
- /* Used on APUS to hold IPL value. */
- PPC_REG trap; /* Reason for being here */
- PPC_REG dar; /* Fault registers */
- PPC_REG dsisr;
- PPC_REG result; /* Result of a system call */
-};
-
-#define NUM_REGS (sizeof(struct sys_pt_regs_s) / sizeof(PPC_REG))
-
-struct sys_pt_regs {
- PPC_REG regs[sizeof(struct sys_pt_regs_s) / sizeof(PPC_REG)];
-};
-
-#define UM_MAX_REG (PT_FPR0)
-#define UM_MAX_REG_OFFSET (UM_MAX_REG * sizeof(PPC_REG))
-
-#define EMPTY_REGS { { [ 0 ... NUM_REGS - 1] = 0 } }
-
-#define UM_REG(r, n) ((r)->regs[n])
-
-#define UM_SYSCALL_RET(r) UM_REG(r, PT_R3)
-#define UM_SP(r) UM_REG(r, PT_R1)
-#define UM_IP(r) UM_REG(r, PT_NIP)
-#define UM_ELF_ZERO(r) UM_REG(r, PT_FPSCR)
-#define UM_SYSCALL_NR(r) UM_REG(r, PT_R0)
-#define UM_SYSCALL_ARG1(r) UM_REG(r, PT_ORIG_R3)
-#define UM_SYSCALL_ARG2(r) UM_REG(r, PT_R4)
-#define UM_SYSCALL_ARG3(r) UM_REG(r, PT_R5)
-#define UM_SYSCALL_ARG4(r) UM_REG(r, PT_R6)
-#define UM_SYSCALL_ARG5(r) UM_REG(r, PT_R7)
-#define UM_SYSCALL_ARG6(r) UM_REG(r, PT_R8)
-
-#define UM_SYSCALL_NR_OFFSET (PT_R0 * sizeof(PPC_REG))
-#define UM_SYSCALL_RET_OFFSET (PT_R3 * sizeof(PPC_REG))
-#define UM_SYSCALL_ARG1_OFFSET (PT_R3 * sizeof(PPC_REG))
-#define UM_SYSCALL_ARG2_OFFSET (PT_R4 * sizeof(PPC_REG))
-#define UM_SYSCALL_ARG3_OFFSET (PT_R5 * sizeof(PPC_REG))
-#define UM_SYSCALL_ARG4_OFFSET (PT_R6 * sizeof(PPC_REG))
-#define UM_SYSCALL_ARG5_OFFSET (PT_R7 * sizeof(PPC_REG))
-#define UM_SYSCALL_ARG6_OFFSET (PT_R8 * sizeof(PPC_REG))
-#define UM_SP_OFFSET (PT_R1 * sizeof(PPC_REG))
-#define UM_IP_OFFSET (PT_NIP * sizeof(PPC_REG))
-#define UM_ELF_ZERO_OFFSET (PT_R3 * sizeof(PPC_REG))
-
-#define UM_SET_SYSCALL_RETURN(_regs, result) \
-do { \
- if (result < 0) { \
- (_regs)->regs[PT_CCR] |= 0x10000000; \
- UM_SYSCALL_RET((_regs)) = -result; \
- } else { \
- UM_SYSCALL_RET((_regs)) = result; \
- } \
-} while(0)
-
-extern void shove_aux_table(unsigned long sp);
-#define UM_FIX_EXEC_STACK(sp) shove_aux_table(sp);
-
-/* These aren't actually defined. The undefs are just to make sure
- * everyone's clear on the concept.
- */
-#undef UML_HAVE_GETREGS
-#undef UML_HAVE_GETFPREGS
-#undef UML_HAVE_SETREGS
-#undef UML_HAVE_SETFPREGS
-
-#endif
-
diff --git a/arch/um/sys-ppc/shared/sysdep/sigcontext.h b/arch/um/sys-ppc/shared/sysdep/sigcontext.h
deleted file mode 100644
index b7286f0a1e00..000000000000
--- a/arch/um/sys-ppc/shared/sysdep/sigcontext.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SYS_SIGCONTEXT_PPC_H
-#define __SYS_SIGCONTEXT_PPC_H
-
-#define DSISR_WRITE 0x02000000
-
-#define SC_FAULT_ADDR(sc) ({ \
- struct sigcontext *_sc = (sc); \
- long retval = -1; \
- switch (_sc->regs->trap) { \
- case 0x300: \
- /* data exception */ \
- retval = _sc->regs->dar; \
- break; \
- case 0x400: \
- /* instruction exception */ \
- retval = _sc->regs->nip; \
- break; \
- default: \
- panic("SC_FAULT_ADDR: unhandled trap type\n"); \
- } \
- retval; \
- })
-
-#define SC_FAULT_WRITE(sc) ({ \
- struct sigcontext *_sc = (sc); \
- long retval = -1; \
- switch (_sc->regs->trap) { \
- case 0x300: \
- /* data exception */ \
- retval = !!(_sc->regs->dsisr & DSISR_WRITE); \
- break; \
- case 0x400: \
- /* instruction exception: not a write */ \
- retval = 0; \
- break; \
- default: \
- panic("SC_FAULT_ADDR: unhandled trap type\n"); \
- } \
- retval; \
- })
-
-#define SC_IP(sc) ((sc)->regs->nip)
-#define SC_SP(sc) ((sc)->regs->gpr[1])
-#define SEGV_IS_FIXABLE(sc) (1)
-
-#endif
-
diff --git a/arch/um/sys-ppc/shared/sysdep/skas_ptrace.h b/arch/um/sys-ppc/shared/sysdep/skas_ptrace.h
deleted file mode 100644
index d9fbbac10de0..000000000000
--- a/arch/um/sys-ppc/shared/sysdep/skas_ptrace.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SYSDEP_PPC_SKAS_PTRACE_H
-#define __SYSDEP_PPC_SKAS_PTRACE_H
-
-struct ptrace_faultinfo {
- int is_write;
- unsigned long addr;
-};
-
-struct ptrace_ldt {
- int func;
- void *ptr;
- unsigned long bytecount;
-};
-
-#define PTRACE_LDT 54
-
-#endif
diff --git a/arch/um/sys-ppc/shared/sysdep/syscalls.h b/arch/um/sys-ppc/shared/sysdep/syscalls.h
deleted file mode 100644
index 1ff81552251c..000000000000
--- a/arch/um/sys-ppc/shared/sysdep/syscalls.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-typedef long syscall_handler_t(unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4,
- unsigned long arg5, unsigned long arg6);
-
-#define EXECUTE_SYSCALL(syscall, regs) \
- (*sys_call_table[syscall])(UM_SYSCALL_ARG1(&regs), \
- UM_SYSCALL_ARG2(&regs), \
- UM_SYSCALL_ARG3(&regs), \
- UM_SYSCALL_ARG4(&regs), \
- UM_SYSCALL_ARG5(&regs), \
- UM_SYSCALL_ARG6(&regs))
-
-extern syscall_handler_t sys_mincore;
-extern syscall_handler_t sys_madvise;
-
-/* old_mmap needs the correct prototype since syscall_kern.c includes
- * this file.
- */
-int old_mmap(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long offset);
-
-#define ARCH_SYSCALLS \
- [ __NR_modify_ldt ] = sys_ni_syscall, \
- [ __NR_pciconfig_read ] = sys_ni_syscall, \
- [ __NR_pciconfig_write ] = sys_ni_syscall, \
- [ __NR_pciconfig_iobase ] = sys_ni_syscall, \
- [ __NR_pivot_root ] = sys_ni_syscall, \
- [ __NR_multiplexer ] = sys_ni_syscall, \
- [ __NR_mmap ] = old_mmap, \
- [ __NR_madvise ] = sys_madvise, \
- [ __NR_mincore ] = sys_mincore, \
- [ __NR_iopl ] = (syscall_handler_t *) sys_ni_syscall, \
- [ __NR_utimes ] = (syscall_handler_t *) sys_utimes, \
- [ __NR_fadvise64 ] = (syscall_handler_t *) sys_fadvise64,
-
-#define LAST_ARCH_SYSCALL __NR_fadvise64
-
diff --git a/arch/um/sys-ppc/sigcontext.c b/arch/um/sys-ppc/sigcontext.c
deleted file mode 100644
index aac6c83fe44e..000000000000
--- a/arch/um/sys-ppc/sigcontext.c
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "asm/ptrace.h"
-#include "asm/sigcontext.h"
-#include <sysdep/ptrace.h>
-
diff --git a/arch/um/sys-ppc/sysrq.c b/arch/um/sys-ppc/sysrq.c
deleted file mode 100644
index 1ff1ad7f27da..000000000000
--- a/arch/um/sys-ppc/sysrq.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk)
- * Licensed under the GPL
- */
-
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include "asm/ptrace.h"
-#include "sysrq.h"
-
-void show_regs(struct pt_regs_subarch *regs)
-{
- printk("\n");
- show_regs_print_info(KERN_DEFAULT);
-
- printk("show_regs(): insert regs here.\n");
-#if 0
- printk("\n");
- printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs, regs->eip,
- smp_processor_id());
- if (regs->xcs & 3)
- printk(" ESP: %04x:%08lx",0xffff & regs->xss, regs->esp);
- printk(" EFLAGS: %08lx\n", regs->eflags);
- printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
- regs->eax, regs->ebx, regs->ecx, regs->edx);
- printk("ESI: %08lx EDI: %08lx EBP: %08lx",
- regs->esi, regs->edi, regs->ebp);
- printk(" DS: %04x ES: %04x\n",
- 0xffff & regs->xds, 0xffff & regs->xes);
-#endif
-
- show_trace(current, &regs->gpr[1]);
-}
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index 63e2839dfeb8..e79ad6d5b5b2 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -24,7 +24,6 @@
#ifndef __ASSEMBLY__
struct task_struct;
-struct exec_domain;
#include <asm/types.h>
@@ -71,7 +70,6 @@ struct thread_info {
/* <0 => bug */
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
__u32 cpu; /* cpu */
struct cpu_context_save cpu_context; /* cpu context */
__u32 syscall; /* syscall number */
@@ -84,7 +82,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
diff --git a/arch/unicore32/kernel/asm-offsets.c b/arch/unicore32/kernel/asm-offsets.c
index ffcbe7536ca7..80d50c4651e3 100644
--- a/arch/unicore32/kernel/asm-offsets.c
+++ b/arch/unicore32/kernel/asm-offsets.c
@@ -42,7 +42,6 @@ int main(void)
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp));
diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c
index d329f85766cc..4ae51cf15ade 100644
--- a/arch/unicore32/kernel/signal.c
+++ b/arch/unicore32/kernel/signal.c
@@ -330,13 +330,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs,
}
/*
- * translate the signal
- */
- if (usig < 32 && thread->exec_domain
- && thread->exec_domain->signal_invmap)
- usig = thread->exec_domain->signal_invmap[usig];
-
- /*
* Set up the stack frame
*/
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index faff6934c05a..226d5696e1d1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,6 +22,7 @@ config X86_64
### Arch settings
config X86
def_bool y
+ select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_FAST_MULTIPLIER
@@ -87,7 +88,7 @@ config X86
select HAVE_ARCH_KMEMCHECK
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
select HAVE_USER_RETURN_NOTIFIER
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ARCH_HAS_ELF_RANDOMIZE
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select SPARSE_IRQ
@@ -99,6 +100,7 @@ config X86
select IRQ_FORCED_THREADING
select HAVE_BPF_JIT if X86_64
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_HUGE_VMAP if X86_64 || (X86_32 && X86_PAE)
select ARCH_HAS_SG_CHAIN
select CLKEVT_I8253
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -177,7 +179,7 @@ config SBUS
config NEED_DMA_MAP_STATE
def_bool y
- depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
+ depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
config NEED_SG_DMA_LENGTH
def_bool y
@@ -277,6 +279,12 @@ config ARCH_SUPPORTS_UPROBES
config FIX_EARLYCON_MEM
def_bool y
+config PGTABLE_LEVELS
+ int
+ default 4 if X86_64
+ default 3 if X86_PAE
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -714,17 +722,6 @@ endif #HYPERVISOR_GUEST
config NO_BOOTMEM
def_bool y
-config MEMTEST
- bool "Memtest"
- ---help---
- This option adds a kernel parameter 'memtest', which allows memtest
- to be set.
- memtest=0, mean disabled; -- default
- memtest=1, mean do 1 test pattern;
- ...
- memtest=4, mean do 4 test patterns.
- If you are unsure how to answer this question, answer N.
-
source "arch/x86/Kconfig.cpu"
config HPET_TIMER
@@ -1425,6 +1422,16 @@ config ILLEGAL_POINTER_VALUE
source "mm/Kconfig"
+config X86_PMEM_LEGACY
+ bool "Support non-standard NVDIMMs and ADR protected memory"
+ help
+ Treat memory marked using the non-standard e820 type of 12 as used
+ by the Intel Sandy Bridge-EP reference BIOS as protected memory.
+ The kernel will offer these regions to the 'pmem' driver so
+ they can be used for persistent storage.
+
+ Say Y if unsure.
+
config HIGHPTE
bool "Allocate 3rd-level pagetables from highmem"
depends on HIGHMEM
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 20028da8ae18..72484a645f05 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -43,10 +43,6 @@ config EARLY_PRINTK
with klogd/syslogd or the X server. You should normally N here,
unless you want to debug such a crash.
-config EARLY_PRINTK_INTEL_MID
- bool "Early printk for Intel MID platform support"
- depends on EARLY_PRINTK && X86_INTEL_MID
-
config EARLY_PRINTK_DBGP
bool "Early printk via EHCI debug port"
depends on EARLY_PRINTK && PCI
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5ba2d9ce82dc..2fda005bb334 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -63,7 +63,7 @@ ifeq ($(CONFIG_X86_32),y)
$(call cc-option,-fno-unit-at-a-time))
# CPU-specific tuning. Anything which can be shared with UML should go here.
- include $(srctree)/arch/x86/Makefile_32.cpu
+ include arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
# temporary until string.h is fixed
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 95eba554baf9..5b7e898ffd9a 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -18,7 +18,7 @@ LDS_EXTRA := -Ui386
export LDS_EXTRA
# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
-include $(srctree)/arch/x86/Makefile_32.cpu
+include arch/x86/Makefile_32.cpu
# prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index ef17683484e9..48304b89b601 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1109,6 +1109,8 @@ struct boot_params *make_boot_params(struct efi_config *c)
if (!cmdline_ptr)
goto fail;
hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
+ /* Fill in upper bits of command line address, NOP on 32 bit */
+ boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 54f60ab41c63..112cefacf2af 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -797,7 +797,9 @@ static int rfc4106_init(struct crypto_tfm *tfm)
PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
struct crypto_aead *cryptd_child;
struct aesni_rfc4106_gcm_ctx *child_ctx;
- cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
+ cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
@@ -890,15 +892,12 @@ out_free_ablkcipher:
return ret;
}
-static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
- unsigned int key_len)
+static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
+ unsigned int key_len)
{
int ret = 0;
- struct crypto_tfm *tfm = crypto_aead_tfm(parent);
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
- struct aesni_rfc4106_gcm_ctx *child_ctx =
- aesni_rfc4106_gcm_ctx_get(cryptd_child);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
u8 *new_key_align, *new_key_mem = NULL;
if (key_len < 4) {
@@ -943,20 +942,31 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
goto exit;
}
ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
- memcpy(child_ctx, ctx, sizeof(*ctx));
exit:
kfree(new_key_mem);
return ret;
}
-/* This is the Integrity Check Value (aka the authentication tag length and can
- * be 8, 12 or 16 bytes long. */
-static int rfc4106_set_authsize(struct crypto_aead *parent,
- unsigned int authsize)
+static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
+ unsigned int key_len)
{
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+ struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
+ struct aesni_rfc4106_gcm_ctx *c_ctx = aesni_rfc4106_gcm_ctx_get(child);
+ struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm;
+ int ret;
+ ret = crypto_aead_setkey(child, key, key_len);
+ if (!ret) {
+ memcpy(ctx, c_ctx, sizeof(*ctx));
+ ctx->cryptd_tfm = cryptd_tfm;
+ }
+ return ret;
+}
+
+static int common_rfc4106_set_authsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
switch (authsize) {
case 8:
case 12:
@@ -965,51 +975,23 @@ static int rfc4106_set_authsize(struct crypto_aead *parent,
default:
return -EINVAL;
}
- crypto_aead_crt(parent)->authsize = authsize;
- crypto_aead_crt(cryptd_child)->authsize = authsize;
+ crypto_aead_crt(aead)->authsize = authsize;
return 0;
}
-static int rfc4106_encrypt(struct aead_request *req)
-{
- int ret;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
-
- if (!irq_fpu_usable()) {
- struct aead_request *cryptd_req =
- (struct aead_request *) aead_request_ctx(req);
- memcpy(cryptd_req, req, sizeof(*req));
- aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
- return crypto_aead_encrypt(cryptd_req);
- } else {
- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
- kernel_fpu_begin();
- ret = cryptd_child->base.crt_aead.encrypt(req);
- kernel_fpu_end();
- return ret;
- }
-}
-
-static int rfc4106_decrypt(struct aead_request *req)
+/* This is the Integrity Check Value (aka the authentication tag length and can
+ * be 8, 12 or 16 bytes long. */
+static int rfc4106_set_authsize(struct crypto_aead *parent,
+ unsigned int authsize)
{
+ struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
+ struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
int ret;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
- if (!irq_fpu_usable()) {
- struct aead_request *cryptd_req =
- (struct aead_request *) aead_request_ctx(req);
- memcpy(cryptd_req, req, sizeof(*req));
- aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
- return crypto_aead_decrypt(cryptd_req);
- } else {
- struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
- kernel_fpu_begin();
- ret = cryptd_child->base.crt_aead.decrypt(req);
- kernel_fpu_end();
- return ret;
- }
+ ret = crypto_aead_setauthsize(child, authsize);
+ if (!ret)
+ crypto_aead_crt(parent)->authsize = authsize;
+ return ret;
}
static int __driver_rfc4106_encrypt(struct aead_request *req)
@@ -1185,6 +1167,78 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
}
return retval;
}
+
+static int rfc4106_encrypt(struct aead_request *req)
+{
+ int ret;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+
+ if (!irq_fpu_usable()) {
+ struct aead_request *cryptd_req =
+ (struct aead_request *) aead_request_ctx(req);
+
+ memcpy(cryptd_req, req, sizeof(*req));
+ aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+ ret = crypto_aead_encrypt(cryptd_req);
+ } else {
+ kernel_fpu_begin();
+ ret = __driver_rfc4106_encrypt(req);
+ kernel_fpu_end();
+ }
+ return ret;
+}
+
+static int rfc4106_decrypt(struct aead_request *req)
+{
+ int ret;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+
+ if (!irq_fpu_usable()) {
+ struct aead_request *cryptd_req =
+ (struct aead_request *) aead_request_ctx(req);
+
+ memcpy(cryptd_req, req, sizeof(*req));
+ aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+ ret = crypto_aead_decrypt(cryptd_req);
+ } else {
+ kernel_fpu_begin();
+ ret = __driver_rfc4106_decrypt(req);
+ kernel_fpu_end();
+ }
+ return ret;
+}
+
+static int helper_rfc4106_encrypt(struct aead_request *req)
+{
+ int ret;
+
+ if (unlikely(!irq_fpu_usable())) {
+ WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
+ ret = -EINVAL;
+ } else {
+ kernel_fpu_begin();
+ ret = __driver_rfc4106_encrypt(req);
+ kernel_fpu_end();
+ }
+ return ret;
+}
+
+static int helper_rfc4106_decrypt(struct aead_request *req)
+{
+ int ret;
+
+ if (unlikely(!irq_fpu_usable())) {
+ WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
+ ret = -EINVAL;
+ } else {
+ kernel_fpu_begin();
+ ret = __driver_rfc4106_decrypt(req);
+ kernel_fpu_end();
+ }
+ return ret;
+}
#endif
static struct crypto_alg aesni_algs[] = { {
@@ -1210,7 +1264,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_name = "__aes-aesni",
.cra_driver_name = "__driver-aes-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx) +
AESNI_ALIGN - 1,
@@ -1229,7 +1283,8 @@ static struct crypto_alg aesni_algs[] = { {
.cra_name = "__ecb-aes-aesni",
.cra_driver_name = "__driver-ecb-aes-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx) +
AESNI_ALIGN - 1,
@@ -1249,7 +1304,8 @@ static struct crypto_alg aesni_algs[] = { {
.cra_name = "__cbc-aes-aesni",
.cra_driver_name = "__driver-cbc-aes-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx) +
AESNI_ALIGN - 1,
@@ -1313,7 +1369,8 @@ static struct crypto_alg aesni_algs[] = { {
.cra_name = "__ctr-aes-aesni",
.cra_driver_name = "__driver-ctr-aes-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto_aes_ctx) +
AESNI_ALIGN - 1,
@@ -1357,7 +1414,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_name = "__gcm-aes-aesni",
.cra_driver_name = "__driver-gcm-aes-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
AESNI_ALIGN,
@@ -1366,8 +1423,12 @@ static struct crypto_alg aesni_algs[] = { {
.cra_module = THIS_MODULE,
.cra_u = {
.aead = {
- .encrypt = __driver_rfc4106_encrypt,
- .decrypt = __driver_rfc4106_decrypt,
+ .setkey = common_rfc4106_set_key,
+ .setauthsize = common_rfc4106_set_authsize,
+ .encrypt = helper_rfc4106_encrypt,
+ .decrypt = helper_rfc4106_decrypt,
+ .ivsize = 8,
+ .maxauthsize = 16,
},
},
}, {
@@ -1423,7 +1484,8 @@ static struct crypto_alg aesni_algs[] = { {
.cra_name = "__lrw-aes-aesni",
.cra_driver_name = "__driver-lrw-aes-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aesni_lrw_ctx),
.cra_alignmask = 0,
@@ -1444,7 +1506,8 @@ static struct crypto_alg aesni_algs[] = { {
.cra_name = "__xts-aes-aesni",
.cra_driver_name = "__driver-xts-aes-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aesni_xts_ctx),
.cra_alignmask = 0,
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 9a07fafe3831..baf0ac21ace5 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -343,7 +343,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__ecb-camellia-aesni-avx2",
.cra_driver_name = "__driver-ecb-camellia-aesni-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_alignmask = 0,
@@ -362,7 +363,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__cbc-camellia-aesni-avx2",
.cra_driver_name = "__driver-cbc-camellia-aesni-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_alignmask = 0,
@@ -381,7 +383,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__ctr-camellia-aesni-avx2",
.cra_driver_name = "__driver-ctr-camellia-aesni-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_alignmask = 0,
@@ -401,7 +404,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__lrw-camellia-aesni-avx2",
.cra_driver_name = "__driver-lrw-camellia-aesni-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_lrw_ctx),
.cra_alignmask = 0,
@@ -424,7 +428,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__xts-camellia-aesni-avx2",
.cra_driver_name = "__driver-xts-camellia-aesni-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_xts_ctx),
.cra_alignmask = 0,
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index ed38d959add6..78818a1e73e3 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -335,7 +335,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__ecb-camellia-aesni",
.cra_driver_name = "__driver-ecb-camellia-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_alignmask = 0,
@@ -354,7 +355,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__cbc-camellia-aesni",
.cra_driver_name = "__driver-cbc-camellia-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_alignmask = 0,
@@ -373,7 +375,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__ctr-camellia-aesni",
.cra_driver_name = "__driver-ctr-camellia-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_alignmask = 0,
@@ -393,7 +396,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__lrw-camellia-aesni",
.cra_driver_name = "__driver-lrw-camellia-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_lrw_ctx),
.cra_alignmask = 0,
@@ -416,7 +420,8 @@ static struct crypto_alg cmll_algs[10] = { {
.cra_name = "__xts-camellia-aesni",
.cra_driver_name = "__driver-xts-camellia-aesni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_xts_ctx),
.cra_alignmask = 0,
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 60ada677a928..236c80974457 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -341,7 +341,8 @@ static struct crypto_alg cast5_algs[6] = { {
.cra_name = "__ecb-cast5-avx",
.cra_driver_name = "__driver-ecb-cast5-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx),
.cra_alignmask = 0,
@@ -360,7 +361,8 @@ static struct crypto_alg cast5_algs[6] = { {
.cra_name = "__cbc-cast5-avx",
.cra_driver_name = "__driver-cbc-cast5-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx),
.cra_alignmask = 0,
@@ -379,7 +381,8 @@ static struct crypto_alg cast5_algs[6] = { {
.cra_name = "__ctr-cast5-avx",
.cra_driver_name = "__driver-ctr-cast5-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct cast5_ctx),
.cra_alignmask = 0,
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 0160f68a57ff..f448810ca4ac 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -372,7 +372,8 @@ static struct crypto_alg cast6_algs[10] = { {
.cra_name = "__ecb-cast6-avx",
.cra_driver_name = "__driver-ecb-cast6-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 0,
@@ -391,7 +392,8 @@ static struct crypto_alg cast6_algs[10] = { {
.cra_name = "__cbc-cast6-avx",
.cra_driver_name = "__driver-cbc-cast6-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 0,
@@ -410,7 +412,8 @@ static struct crypto_alg cast6_algs[10] = { {
.cra_name = "__ctr-cast6-avx",
.cra_driver_name = "__driver-ctr-cast6-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 0,
@@ -430,7 +433,8 @@ static struct crypto_alg cast6_algs[10] = { {
.cra_name = "__lrw-cast6-avx",
.cra_driver_name = "__driver-lrw-cast6-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_lrw_ctx),
.cra_alignmask = 0,
@@ -453,7 +457,8 @@ static struct crypto_alg cast6_algs[10] = { {
.cra_name = "__xts-cast6-avx",
.cra_driver_name = "__driver-xts-cast6-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_xts_ctx),
.cra_alignmask = 0,
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 8253d85aa165..2079baf06bdd 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -154,7 +154,8 @@ static struct shash_alg ghash_alg = {
.cra_name = "__ghash",
.cra_driver_name = "__ghash-pclmulqdqni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
@@ -261,7 +262,9 @@ static int ghash_async_init_tfm(struct crypto_tfm *tfm)
struct cryptd_ahash *cryptd_tfm;
struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
- cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni", 0, 0);
+ cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ctx->cryptd_tfm = cryptd_tfm;
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index 432f1d76ceb8..6a85598931b5 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -232,7 +232,6 @@ static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
le128_to_be128((be128 *)walk->iv, &ctrblk);
}
-EXPORT_SYMBOL_GPL(glue_ctr_crypt_final_128bit);
static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 437e47a4d302..2f63dc89e7a9 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -309,7 +309,8 @@ static struct crypto_alg srp_algs[10] = { {
.cra_name = "__ecb-serpent-avx2",
.cra_driver_name = "__driver-ecb-serpent-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
@@ -329,7 +330,8 @@ static struct crypto_alg srp_algs[10] = { {
.cra_name = "__cbc-serpent-avx2",
.cra_driver_name = "__driver-cbc-serpent-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
@@ -349,7 +351,8 @@ static struct crypto_alg srp_algs[10] = { {
.cra_name = "__ctr-serpent-avx2",
.cra_driver_name = "__driver-ctr-serpent-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
@@ -370,7 +373,8 @@ static struct crypto_alg srp_algs[10] = { {
.cra_name = "__lrw-serpent-avx2",
.cra_driver_name = "__driver-lrw-serpent-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_lrw_ctx),
.cra_alignmask = 0,
@@ -394,7 +398,8 @@ static struct crypto_alg srp_algs[10] = { {
.cra_name = "__xts-serpent-avx2",
.cra_driver_name = "__driver-xts-serpent-avx2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_xts_ctx),
.cra_alignmask = 0,
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 7e217398b4eb..c8d478af8456 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -378,7 +378,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__ecb-serpent-avx",
.cra_driver_name = "__driver-ecb-serpent-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
@@ -397,7 +398,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__cbc-serpent-avx",
.cra_driver_name = "__driver-cbc-serpent-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
@@ -416,7 +418,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__ctr-serpent-avx",
.cra_driver_name = "__driver-ctr-serpent-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
@@ -436,7 +439,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__lrw-serpent-avx",
.cra_driver_name = "__driver-lrw-serpent-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_lrw_ctx),
.cra_alignmask = 0,
@@ -459,7 +463,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__xts-serpent-avx",
.cra_driver_name = "__driver-xts-serpent-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_xts_ctx),
.cra_alignmask = 0,
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index bf025adaea01..3643dd508f45 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -387,7 +387,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__ecb-serpent-sse2",
.cra_driver_name = "__driver-ecb-serpent-sse2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
@@ -406,7 +407,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__cbc-serpent-sse2",
.cra_driver_name = "__driver-cbc-serpent-sse2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
@@ -425,7 +427,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__ctr-serpent-sse2",
.cra_driver_name = "__driver-ctr-serpent-sse2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 0,
@@ -445,7 +448,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__lrw-serpent-sse2",
.cra_driver_name = "__driver-lrw-serpent-sse2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_lrw_ctx),
.cra_alignmask = 0,
@@ -468,7 +472,8 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_name = "__xts-serpent-sse2",
.cra_driver_name = "__driver-xts-serpent-sse2",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_xts_ctx),
.cra_alignmask = 0,
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index fd9f6b035b16..e510b1c5d690 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -694,7 +694,8 @@ static struct shash_alg sha1_mb_shash_alg = {
* use ASYNC flag as some buffers in multi-buffer
* algo may not have completed before hashing thread sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list),
@@ -770,7 +771,9 @@ static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
struct mcryptd_hash_ctx *mctx;
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", 0, 0);
+ mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
if (IS_ERR(mcryptd_tfm))
return PTR_ERR(mcryptd_tfm);
mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
@@ -828,7 +831,7 @@ static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
while (!list_empty(&cstate->work_list)) {
rctx = list_entry(cstate->work_list.next,
struct mcryptd_hash_request_ctx, waiter);
- if time_before(cur_time, rctx->tag.expire)
+ if (time_before(cur_time, rctx->tag.expire))
break;
kernel_fpu_begin();
sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr);
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c
index 4ca7e166a2aa..822acb5b464c 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c
@@ -56,7 +56,7 @@
void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
{
unsigned int j;
- state->unused_lanes = 0xF76543210;
+ state->unused_lanes = 0xF76543210ULL;
for (j = 0; j < 8; j++) {
state->lens[j] = 0xFFFFFFFF;
state->ldata[j].job_in_lane = NULL;
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 6c20fe04a738..33d1b9dc14cc 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -28,7 +28,7 @@
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
-#include <asm/byteorder.h>
+#include <crypto/sha1_base.h>
#include <asm/i387.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
@@ -44,132 +44,51 @@ asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
- unsigned int rounds);
+ unsigned int rounds);
#endif
-static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int);
-
-
-static int sha1_ssse3_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
-
- return 0;
-}
-
-static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, unsigned int partial)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int done = 0;
-
- sctx->count += len;
-
- if (partial) {
- done = SHA1_BLOCK_SIZE - partial;
- memcpy(sctx->buffer + partial, data, done);
- sha1_transform_asm(sctx->state, sctx->buffer, 1);
- }
-
- if (len - done >= SHA1_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
-
- sha1_transform_asm(sctx->state, data + done, rounds);
- done += rounds * SHA1_BLOCK_SIZE;
- }
-
- memcpy(sctx->buffer, data + done, len - done);
-
- return 0;
-}
+static void (*sha1_transform_asm)(u32 *, const char *, unsigned int);
static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
- int res;
- /* Handle the fast case right here */
- if (partial + len < SHA1_BLOCK_SIZE) {
- sctx->count += len;
- memcpy(sctx->buffer + partial, data, len);
+ if (!irq_fpu_usable() ||
+ (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
+ return crypto_sha1_update(desc, data, len);
- return 0;
- }
+ /* make sure casting to sha1_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
- if (!irq_fpu_usable()) {
- res = crypto_sha1_update(desc, data, len);
- } else {
- kernel_fpu_begin();
- res = __sha1_ssse3_update(desc, data, len, partial);
- kernel_fpu_end();
- }
-
- return res;
-}
-
-
-/* Add padding and return the message digest. */
-static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
-
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64 and append length */
- index = sctx->count % SHA1_BLOCK_SIZE;
- padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
- if (!irq_fpu_usable()) {
- crypto_sha1_update(desc, padding, padlen);
- crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
- } else {
- kernel_fpu_begin();
- /* We need to fill a whole block for __sha1_ssse3_update() */
- if (padlen <= 56) {
- sctx->count += padlen;
- memcpy(sctx->buffer + index, padding, padlen);
- } else {
- __sha1_ssse3_update(desc, padding, padlen, index);
- }
- __sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56);
- kernel_fpu_end();
- }
-
- /* Store state in digest */
- for (i = 0; i < 5; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
+ kernel_fpu_begin();
+ sha1_base_do_update(desc, data, len,
+ (sha1_block_fn *)sha1_transform_asm);
+ kernel_fpu_end();
return 0;
}
-static int sha1_ssse3_export(struct shash_desc *desc, void *out)
+static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
+ if (!irq_fpu_usable())
+ return crypto_sha1_finup(desc, data, len, out);
- memcpy(out, sctx, sizeof(*sctx));
+ kernel_fpu_begin();
+ if (len)
+ sha1_base_do_update(desc, data, len,
+ (sha1_block_fn *)sha1_transform_asm);
+ sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_asm);
+ kernel_fpu_end();
- return 0;
+ return sha1_base_finish(desc, out);
}
-static int sha1_ssse3_import(struct shash_desc *desc, const void *in)
+/* Add padding and return the message digest. */
+static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
+ return sha1_ssse3_finup(desc, NULL, 0, out);
}
#ifdef CONFIG_AS_AVX2
@@ -186,13 +105,11 @@ static void sha1_apply_transform_avx2(u32 *digest, const char *data,
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_ssse3_init,
+ .init = sha1_base_init,
.update = sha1_ssse3_update,
.final = sha1_ssse3_final,
- .export = sha1_ssse3_export,
- .import = sha1_ssse3_import,
+ .finup = sha1_ssse3_finup,
.descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ssse3",
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
index 642f15687a0a..92b3b5d75ba9 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -96,10 +96,10 @@ SHUF_DC00 = %xmm12 # shuffle xDxC -> DC00
BYTE_FLIP_MASK = %xmm13
NUM_BLKS = %rdx # 3rd arg
-CTX = %rsi # 2nd arg
-INP = %rdi # 1st arg
+INP = %rsi # 2nd arg
+CTX = %rdi # 1st arg
-SRND = %rdi # clobbers INP
+SRND = %rsi # clobbers INP
c = %ecx
d = %r8d
e = %edx
@@ -342,8 +342,8 @@ a = TMP_
########################################################################
## void sha256_transform_avx(void *input_data, UINT32 digest[8], UINT64 num_blks)
-## arg 1 : pointer to input data
-## arg 2 : pointer to digest
+## arg 1 : pointer to digest
+## arg 2 : pointer to input data
## arg 3 : Num blocks
########################################################################
.text
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 9e86944c539d..570ec5ec62d7 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -91,12 +91,12 @@ BYTE_FLIP_MASK = %ymm13
X_BYTE_FLIP_MASK = %xmm13 # XMM version of BYTE_FLIP_MASK
NUM_BLKS = %rdx # 3rd arg
-CTX = %rsi # 2nd arg
-INP = %rdi # 1st arg
+INP = %rsi # 2nd arg
+CTX = %rdi # 1st arg
c = %ecx
d = %r8d
e = %edx # clobbers NUM_BLKS
-y3 = %edi # clobbers INP
+y3 = %esi # clobbers INP
TBL = %rbp
@@ -523,8 +523,8 @@ STACK_SIZE = _RSP + _RSP_SIZE
########################################################################
## void sha256_transform_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks)
-## arg 1 : pointer to input data
-## arg 2 : pointer to digest
+## arg 1 : pointer to digest
+## arg 2 : pointer to input data
## arg 3 : Num blocks
########################################################################
.text
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
index f833b74d902b..2cedc44e8121 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -88,10 +88,10 @@ SHUF_DC00 = %xmm11 # shuffle xDxC -> DC00
BYTE_FLIP_MASK = %xmm12
NUM_BLKS = %rdx # 3rd arg
-CTX = %rsi # 2nd arg
-INP = %rdi # 1st arg
+INP = %rsi # 2nd arg
+CTX = %rdi # 1st arg
-SRND = %rdi # clobbers INP
+SRND = %rsi # clobbers INP
c = %ecx
d = %r8d
e = %edx
@@ -348,8 +348,8 @@ a = TMP_
########################################################################
## void sha256_transform_ssse3(void *input_data, UINT32 digest[8], UINT64 num_blks)
-## arg 1 : pointer to input data
-## arg 2 : pointer to digest
+## arg 1 : pointer to digest
+## arg 2 : pointer to input data
## arg 3 : Num blocks
########################################################################
.text
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 8fad72f4dfd2..ccc338881ee8 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -36,195 +36,74 @@
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
-#include <asm/byteorder.h>
+#include <crypto/sha256_base.h>
#include <asm/i387.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <linux/string.h>
-asmlinkage void sha256_transform_ssse3(const char *data, u32 *digest,
- u64 rounds);
+asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
+ u64 rounds);
#ifdef CONFIG_AS_AVX
-asmlinkage void sha256_transform_avx(const char *data, u32 *digest,
+asmlinkage void sha256_transform_avx(u32 *digest, const char *data,
u64 rounds);
#endif
#ifdef CONFIG_AS_AVX2
-asmlinkage void sha256_transform_rorx(const char *data, u32 *digest,
- u64 rounds);
+asmlinkage void sha256_transform_rorx(u32 *digest, const char *data,
+ u64 rounds);
#endif
-static asmlinkage void (*sha256_transform_asm)(const char *, u32 *, u64);
-
-
-static int sha256_ssse3_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA256_H0;
- sctx->state[1] = SHA256_H1;
- sctx->state[2] = SHA256_H2;
- sctx->state[3] = SHA256_H3;
- sctx->state[4] = SHA256_H4;
- sctx->state[5] = SHA256_H5;
- sctx->state[6] = SHA256_H6;
- sctx->state[7] = SHA256_H7;
- sctx->count = 0;
-
- return 0;
-}
-
-static int __sha256_ssse3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, unsigned int partial)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int done = 0;
-
- sctx->count += len;
-
- if (partial) {
- done = SHA256_BLOCK_SIZE - partial;
- memcpy(sctx->buf + partial, data, done);
- sha256_transform_asm(sctx->buf, sctx->state, 1);
- }
-
- if (len - done >= SHA256_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
-
- sha256_transform_asm(data + done, sctx->state, (u64) rounds);
-
- done += rounds * SHA256_BLOCK_SIZE;
- }
-
- memcpy(sctx->buf, data + done, len - done);
-
- return 0;
-}
+static void (*sha256_transform_asm)(u32 *, const char *, u64);
static int sha256_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
- int res;
- /* Handle the fast case right here */
- if (partial + len < SHA256_BLOCK_SIZE) {
- sctx->count += len;
- memcpy(sctx->buf + partial, data, len);
+ if (!irq_fpu_usable() ||
+ (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
+ return crypto_sha256_update(desc, data, len);
- return 0;
- }
-
- if (!irq_fpu_usable()) {
- res = crypto_sha256_update(desc, data, len);
- } else {
- kernel_fpu_begin();
- res = __sha256_ssse3_update(desc, data, len, partial);
- kernel_fpu_end();
- }
-
- return res;
-}
+ /* make sure casting to sha256_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
-
-/* Add padding and return the message digest. */
-static int sha256_ssse3_final(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
-
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64 and append length */
- index = sctx->count % SHA256_BLOCK_SIZE;
- padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index);
-
- if (!irq_fpu_usable()) {
- crypto_sha256_update(desc, padding, padlen);
- crypto_sha256_update(desc, (const u8 *)&bits, sizeof(bits));
- } else {
- kernel_fpu_begin();
- /* We need to fill a whole block for __sha256_ssse3_update() */
- if (padlen <= 56) {
- sctx->count += padlen;
- memcpy(sctx->buf + index, padding, padlen);
- } else {
- __sha256_ssse3_update(desc, padding, padlen, index);
- }
- __sha256_ssse3_update(desc, (const u8 *)&bits,
- sizeof(bits), 56);
- kernel_fpu_end();
- }
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
+ kernel_fpu_begin();
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_transform_asm);
+ kernel_fpu_end();
return 0;
}
-static int sha256_ssse3_export(struct shash_desc *desc, void *out)
+static int sha256_ssse3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ if (!irq_fpu_usable())
+ return crypto_sha256_finup(desc, data, len, out);
- memcpy(out, sctx, sizeof(*sctx));
+ kernel_fpu_begin();
+ if (len)
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_transform_asm);
+ sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_transform_asm);
+ kernel_fpu_end();
- return 0;
+ return sha256_base_finish(desc, out);
}
-static int sha256_ssse3_import(struct shash_desc *desc, const void *in)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha224_ssse3_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA224_H0;
- sctx->state[1] = SHA224_H1;
- sctx->state[2] = SHA224_H2;
- sctx->state[3] = SHA224_H3;
- sctx->state[4] = SHA224_H4;
- sctx->state[5] = SHA224_H5;
- sctx->state[6] = SHA224_H6;
- sctx->state[7] = SHA224_H7;
- sctx->count = 0;
-
- return 0;
-}
-
-static int sha224_ssse3_final(struct shash_desc *desc, u8 *hash)
+/* Add padding and return the message digest. */
+static int sha256_ssse3_final(struct shash_desc *desc, u8 *out)
{
- u8 D[SHA256_DIGEST_SIZE];
-
- sha256_ssse3_final(desc, D);
-
- memcpy(hash, D, SHA224_DIGEST_SIZE);
- memzero_explicit(D, SHA256_DIGEST_SIZE);
-
- return 0;
+ return sha256_ssse3_finup(desc, NULL, 0, out);
}
static struct shash_alg algs[] = { {
.digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_ssse3_init,
+ .init = sha256_base_init,
.update = sha256_ssse3_update,
.final = sha256_ssse3_final,
- .export = sha256_ssse3_export,
- .import = sha256_ssse3_import,
+ .finup = sha256_ssse3_finup,
.descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-ssse3",
@@ -235,13 +114,11 @@ static struct shash_alg algs[] = { {
}
}, {
.digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_ssse3_init,
+ .init = sha224_base_init,
.update = sha256_ssse3_update,
- .final = sha224_ssse3_final,
- .export = sha256_ssse3_export,
- .import = sha256_ssse3_import,
+ .final = sha256_ssse3_final,
+ .finup = sha256_ssse3_finup,
.descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "sha224-ssse3",
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index 974dde9bc6cd..565274d6a641 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -54,9 +54,9 @@
# Virtual Registers
# ARG1
-msg = %rdi
+digest = %rdi
# ARG2
-digest = %rsi
+msg = %rsi
# ARG3
msglen = %rdx
T1 = %rcx
@@ -271,7 +271,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
.endm
########################################################################
-# void sha512_transform_avx(const void* M, void* D, u64 L)
+# void sha512_transform_avx(void* D, const void* M, u64 L)
# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
# The size of the message pointed to by M must be an integer multiple of SHA512
# message blocks.
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
index 568b96105f5c..1f20b35d8573 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -70,16 +70,16 @@ XFER = YTMP0
BYTE_FLIP_MASK = %ymm9
# 1st arg
-INP = %rdi
+CTX = %rdi
# 2nd arg
-CTX = %rsi
+INP = %rsi
# 3rd arg
NUM_BLKS = %rdx
c = %rcx
d = %r8
e = %rdx
-y3 = %rdi
+y3 = %rsi
TBL = %rbp
@@ -562,7 +562,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
.endm
########################################################################
-# void sha512_transform_rorx(const void* M, void* D, uint64_t L)#
+# void sha512_transform_rorx(void* D, const void* M, uint64_t L)#
# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
# The size of the message pointed to by M must be an integer multiple of SHA512
# message blocks.
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index fb56855d51f5..e610e29cbc81 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -53,9 +53,9 @@
# Virtual Registers
# ARG1
-msg = %rdi
+digest = %rdi
# ARG2
-digest = %rsi
+msg = %rsi
# ARG3
msglen = %rdx
T1 = %rcx
@@ -269,7 +269,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
.endm
########################################################################
-# void sha512_transform_ssse3(const void* M, void* D, u64 L)#
+# void sha512_transform_ssse3(void* D, const void* M, u64 L)#
# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
# The size of the message pointed to by M must be an integer multiple of SHA512
# message blocks.
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 0b6af26832bf..d9fa4c1e063f 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -34,205 +34,75 @@
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
-#include <asm/byteorder.h>
+#include <crypto/sha512_base.h>
#include <asm/i387.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <linux/string.h>
-asmlinkage void sha512_transform_ssse3(const char *data, u64 *digest,
- u64 rounds);
+asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
+ u64 rounds);
#ifdef CONFIG_AS_AVX
-asmlinkage void sha512_transform_avx(const char *data, u64 *digest,
+asmlinkage void sha512_transform_avx(u64 *digest, const char *data,
u64 rounds);
#endif
#ifdef CONFIG_AS_AVX2
-asmlinkage void sha512_transform_rorx(const char *data, u64 *digest,
- u64 rounds);
+asmlinkage void sha512_transform_rorx(u64 *digest, const char *data,
+ u64 rounds);
#endif
-static asmlinkage void (*sha512_transform_asm)(const char *, u64 *, u64);
-
-
-static int sha512_ssse3_init(struct shash_desc *desc)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA512_H0;
- sctx->state[1] = SHA512_H1;
- sctx->state[2] = SHA512_H2;
- sctx->state[3] = SHA512_H3;
- sctx->state[4] = SHA512_H4;
- sctx->state[5] = SHA512_H5;
- sctx->state[6] = SHA512_H6;
- sctx->state[7] = SHA512_H7;
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
-}
+static void (*sha512_transform_asm)(u64 *, const char *, u64);
-static int __sha512_ssse3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, unsigned int partial)
+static int sha512_ssse3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int done = 0;
-
- sctx->count[0] += len;
- if (sctx->count[0] < len)
- sctx->count[1]++;
- if (partial) {
- done = SHA512_BLOCK_SIZE - partial;
- memcpy(sctx->buf + partial, data, done);
- sha512_transform_asm(sctx->buf, sctx->state, 1);
- }
-
- if (len - done >= SHA512_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
+ if (!irq_fpu_usable() ||
+ (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
+ return crypto_sha512_update(desc, data, len);
- sha512_transform_asm(data + done, sctx->state, (u64) rounds);
-
- done += rounds * SHA512_BLOCK_SIZE;
- }
+ /* make sure casting to sha512_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
- memcpy(sctx->buf, data + done, len - done);
+ kernel_fpu_begin();
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_transform_asm);
+ kernel_fpu_end();
return 0;
}
-static int sha512_ssse3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha512_ssse3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
- int res;
-
- /* Handle the fast case right here */
- if (partial + len < SHA512_BLOCK_SIZE) {
- sctx->count[0] += len;
- if (sctx->count[0] < len)
- sctx->count[1]++;
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
- }
+ if (!irq_fpu_usable())
+ return crypto_sha512_finup(desc, data, len, out);
- if (!irq_fpu_usable()) {
- res = crypto_sha512_update(desc, data, len);
- } else {
- kernel_fpu_begin();
- res = __sha512_ssse3_update(desc, data, len, partial);
- kernel_fpu_end();
- }
+ kernel_fpu_begin();
+ if (len)
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_transform_asm);
+ sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_transform_asm);
+ kernel_fpu_end();
- return res;
+ return sha512_base_finish(desc, out);
}
-
/* Add padding and return the message digest. */
static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- __be64 *dst = (__be64 *)out;
- __be64 bits[2];
- static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
-
- /* save number of bits */
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
-
- /* Pad out to 112 mod 128 and append length */
- index = sctx->count[0] & 0x7f;
- padlen = (index < 112) ? (112 - index) : ((128+112) - index);
-
- if (!irq_fpu_usable()) {
- crypto_sha512_update(desc, padding, padlen);
- crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits));
- } else {
- kernel_fpu_begin();
- /* We need to fill a whole block for __sha512_ssse3_update() */
- if (padlen <= 112) {
- sctx->count[0] += padlen;
- if (sctx->count[0] < padlen)
- sctx->count[1]++;
- memcpy(sctx->buf + index, padding, padlen);
- } else {
- __sha512_ssse3_update(desc, padding, padlen, index);
- }
- __sha512_ssse3_update(desc, (const u8 *)&bits,
- sizeof(bits), 112);
- kernel_fpu_end();
- }
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be64(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_ssse3_export(struct shash_desc *desc, void *out)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_ssse3_import(struct shash_desc *desc, const void *in)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha384_ssse3_init(struct shash_desc *desc)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA384_H0;
- sctx->state[1] = SHA384_H1;
- sctx->state[2] = SHA384_H2;
- sctx->state[3] = SHA384_H3;
- sctx->state[4] = SHA384_H4;
- sctx->state[5] = SHA384_H5;
- sctx->state[6] = SHA384_H6;
- sctx->state[7] = SHA384_H7;
-
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
-}
-
-static int sha384_ssse3_final(struct shash_desc *desc, u8 *hash)
-{
- u8 D[SHA512_DIGEST_SIZE];
-
- sha512_ssse3_final(desc, D);
-
- memcpy(hash, D, SHA384_DIGEST_SIZE);
- memzero_explicit(D, SHA512_DIGEST_SIZE);
-
- return 0;
+ return sha512_ssse3_finup(desc, NULL, 0, out);
}
static struct shash_alg algs[] = { {
.digestsize = SHA512_DIGEST_SIZE,
- .init = sha512_ssse3_init,
+ .init = sha512_base_init,
.update = sha512_ssse3_update,
.final = sha512_ssse3_final,
- .export = sha512_ssse3_export,
- .import = sha512_ssse3_import,
+ .finup = sha512_ssse3_finup,
.descsize = sizeof(struct sha512_state),
- .statesize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-ssse3",
@@ -243,13 +113,11 @@ static struct shash_alg algs[] = { {
}
}, {
.digestsize = SHA384_DIGEST_SIZE,
- .init = sha384_ssse3_init,
+ .init = sha384_base_init,
.update = sha512_ssse3_update,
- .final = sha384_ssse3_final,
- .export = sha512_ssse3_export,
- .import = sha512_ssse3_import,
+ .final = sha512_ssse3_final,
+ .finup = sha512_ssse3_finup,
.descsize = sizeof(struct sha512_state),
- .statesize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-ssse3",
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index 1ac531ea9bcc..b5e2d5651851 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -340,7 +340,8 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_name = "__ecb-twofish-avx",
.cra_driver_name = "__driver-ecb-twofish-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
@@ -359,7 +360,8 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_name = "__cbc-twofish-avx",
.cra_driver_name = "__driver-cbc-twofish-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
@@ -378,7 +380,8 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_name = "__ctr-twofish-avx",
.cra_driver_name = "__driver-ctr-twofish-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
@@ -398,7 +401,8 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_name = "__lrw-twofish-avx",
.cra_driver_name = "__driver-lrw-twofish-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_lrw_ctx),
.cra_alignmask = 0,
@@ -421,7 +425,8 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_name = "__xts-twofish-avx",
.cra_driver_name = "__driver-xts-twofish-avx",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_xts_ctx),
.cra_alignmask = 0,
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index a821b1cd4fa7..72bf2680f819 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -427,6 +427,13 @@ sysretl_from_sys_call:
* cs and ss are loaded from MSRs.
* (Note: 32bit->32bit SYSRET is different: since r11
* does not exist, it merely sets eflags.IF=1).
+ *
+ * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
+ * descriptor is not reinitialized. This means that we must
+ * avoid SYSRET with SS == NULL, which could happen if we schedule,
+ * exit the kernel, and re-enter using an interrupt vector. (All
+ * interrupt entries on x86_64 set SS to NULL.) We prevent that
+ * from happening by reloading SS in __switch_to.
*/
USERGS_SYSRET32
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index d2b12988d2ed..bf2caa1dedc5 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -34,8 +34,6 @@ extern int _debug_hotplug_cpu(int cpu, int action);
#endif
#endif
-DECLARE_PER_CPU(int, cpu_state);
-
int mwait_usable(const struct cpuinfo_x86 *);
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 854c04b3c9c2..3d6606fb97d0 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -12,7 +12,7 @@
#include <asm/disabled-features.h>
#endif
-#define NCAPINTS 11 /* N 32-bit words worth of info */
+#define NCAPINTS 13 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
@@ -195,6 +195,7 @@
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
+#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -226,6 +227,7 @@
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
@@ -244,6 +246,12 @@
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
+#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
+
/*
* BUG word(s)
*/
@@ -257,6 +265,7 @@
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 779c2efe2e97..3ab0537872fb 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -40,14 +40,6 @@ static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
}
#endif
-#ifdef CONFIG_MEMTEST
-extern void early_memtest(unsigned long start, unsigned long end);
-#else
-static inline void early_memtest(unsigned long start, unsigned long end)
-{
-}
-#endif
-
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
extern u64 early_reserve_e820(u64 sizet, u64 align);
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 935588d95c82..f161c189c27b 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -339,9 +339,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
/*
* True on X86_32 or when emulating IA32 on X86_64
*/
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index e42f758a0fbd..055ea9941dd5 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -50,7 +50,7 @@ extern const struct hypervisor_x86 *x86_hyper;
/* Recognized hypervisors */
extern const struct hypervisor_x86 x86_hyper_vmware;
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
-extern const struct hypervisor_x86 x86_hyper_xen_hvm;
+extern const struct hypervisor_x86 x86_hyper_xen;
extern const struct hypervisor_x86 x86_hyper_kvm;
extern void init_hypervisor(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 705d35708a50..7c5af123bdbd 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -136,9 +136,6 @@ extern enum intel_mid_timer_options intel_mid_timer_options;
#define SFI_MTMR_MAX_NUM 8
#define SFI_MRTC_MAX 8
-extern struct console early_hsu_console;
-extern void hsu_early_console_init(const char *);
-
extern void intel_scu_devices_create(void);
extern void intel_scu_devices_destroy(void);
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index e2d4a4afa8c3..3bbc07a57a31 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -20,13 +20,10 @@ extern unsigned long switcher_addr;
/* Found in switcher.S */
extern unsigned long default_idt_entries[];
-/* Declarations for definitions in lguest_guest.S */
-extern char lguest_noirq_start[], lguest_noirq_end[];
+/* Declarations for definitions in arch/x86/lguest/head_32.S */
+extern char lguest_noirq_iret[];
extern const char lgstart_cli[], lgend_cli[];
-extern const char lgstart_sti[], lgend_sti[];
-extern const char lgstart_popf[], lgend_popf[];
extern const char lgstart_pushf[], lgend_pushf[];
-extern const char lgstart_iret[], lgend_iret[];
extern void lguest_iret(void);
extern void lguest_init(void);
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index a455a53d789a..2d29197bd2fb 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -32,8 +32,8 @@ static inline int klp_check_compiler_support(void)
#endif
return 0;
}
-extern int klp_write_module_reloc(struct module *mod, unsigned long type,
- unsigned long loc, unsigned long value);
+int klp_write_module_reloc(struct module *mod, unsigned long type,
+ unsigned long loc, unsigned long value);
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index f97fbe3abb67..c7c712f2648b 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -40,8 +40,10 @@
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
#else
#include <asm/page_32_types.h>
+#define IOREMAP_MAX_ORDER (PMD_SHIFT)
#endif /* CONFIG_X86_64 */
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5f6051d5d139..8957810ad7d1 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -545,7 +545,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
}
-#if PAGETABLE_LEVELS >= 3
+#if CONFIG_PGTABLE_LEVELS >= 3
static inline pmd_t __pmd(pmdval_t val)
{
pmdval_t ret;
@@ -585,7 +585,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
val);
}
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
static inline pud_t __pud(pudval_t val)
{
pudval_t ret;
@@ -636,9 +636,9 @@ static inline void pud_clear(pud_t *pudp)
set_pud(pudp, __pud(0));
}
-#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
-#endif /* PAGETABLE_LEVELS >= 3 */
+#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
#ifdef CONFIG_X86_PAE
/* Special-case pte-setting operations for PAE, which can't update a
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 7549b8b369e4..f7b0b5c112f2 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -294,7 +294,7 @@ struct pv_mmu_ops {
struct paravirt_callee_save pgd_val;
struct paravirt_callee_save make_pgd;
-#if PAGETABLE_LEVELS >= 3
+#if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
@@ -308,13 +308,13 @@ struct pv_mmu_ops {
struct paravirt_callee_save pmd_val;
struct paravirt_callee_save make_pmd;
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
struct paravirt_callee_save pud_val;
struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
-#endif /* PAGETABLE_LEVELS == 4 */
-#endif /* PAGETABLE_LEVELS >= 3 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
struct pv_lazy_ops lazy_mode;
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index c4412e972bbd..bf7f8b55b0f9 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -77,7 +77,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
#define pmd_pgtable(pmd) pmd_page(pmd)
-#if PAGETABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *page;
@@ -116,7 +116,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
#endif /* CONFIG_X86_PAE */
-#if PAGETABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
@@ -142,7 +142,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
___pud_free_tlb(tlb, pud);
}
-#endif /* PAGETABLE_LEVELS > 3 */
-#endif /* PAGETABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#endif /* _ASM_X86_PGALLOC_H */
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index daacc23e3fb9..392576433e77 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -17,7 +17,6 @@ typedef union {
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
-#define PAGETABLE_LEVELS 2
/*
* traditional i386 two-level paging structure:
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 1bd5876c8649..bcc89625ebe5 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -24,8 +24,6 @@ typedef union {
#define SHARED_KERNEL_PMD 1
#endif
-#define PAGETABLE_LEVELS 3
-
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index a0c35bf6cb92..fe57e7a98839 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -551,7 +551,7 @@ static inline unsigned long pages_to_mb(unsigned long npg)
return npg >> (20 - PAGE_SHIFT);
}
-#if PAGETABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
return native_pud_val(pud) == 0;
@@ -594,9 +594,9 @@ static inline int pud_large(pud_t pud)
{
return 0;
}
-#endif /* PAGETABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
-#if PAGETABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static inline int pgd_present(pgd_t pgd)
{
return pgd_flags(pgd) & _PAGE_PRESENT;
@@ -633,7 +633,7 @@ static inline int pgd_none(pgd_t pgd)
{
return !native_pgd_val(pgd);
}
-#endif /* PAGETABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 602b6028c5b6..e6844dfb4471 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -20,7 +20,6 @@ typedef struct { pteval_t pte; } pte_t;
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
-#define PAGETABLE_LEVELS 4
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 8c7c10802e9c..78f0c8cbe316 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -234,7 +234,7 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}
-#if PAGETABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
typedef struct { pudval_t pud; } pud_t;
static inline pud_t native_make_pud(pmdval_t val)
@@ -255,7 +255,7 @@ static inline pudval_t native_pud_val(pud_t pud)
}
#endif
-#if PAGETABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
typedef struct { pmdval_t pmd; } pmd_t;
static inline pmd_t native_make_pmd(pmdval_t val)
diff --git a/arch/x86/include/asm/resume-trace.h b/arch/x86/include/asm/pm-trace.h
index 3ff1c2cb1da5..7b7ac42c3661 100644
--- a/arch/x86/include/asm/resume-trace.h
+++ b/arch/x86/include/asm/pm-trace.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_X86_RESUME_TRACE_H
-#define _ASM_X86_RESUME_TRACE_H
+#ifndef _ASM_X86_PM_TRACE_H
+#define _ASM_X86_PM_TRACE_H
#include <asm/asm.h>
@@ -14,8 +14,10 @@ do { \
".previous" \
:"=r" (tracedata) \
: "i" (__LINE__), "i" (__FILE__)); \
- generate_resume_trace(tracedata, user); \
+ generate_pm_trace(tracedata, user); \
} \
} while (0)
-#endif /* _ASM_X86_RESUME_TRACE_H */
+#define TRACE_SUSPEND(user) TRACE_RESUME(user)
+
+#endif /* _ASM_X86_PM_TRACE_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d2203b5d9538..23ba6765b718 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -109,6 +109,9 @@ struct cpuinfo_x86 {
/* in KB - valid for CPUS which support this call: */
int x86_cache_size;
int x86_cache_alignment; /* In bytes */
+ /* Cache QoS architectural values: */
+ int x86_cache_max_rmid; /* max index */
+ int x86_cache_occ_scale; /* scale to bytes */
int x86_power;
unsigned long loops_per_jiffy;
/* cpuid returned max cores value: */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 25b1cc07d496..d6b078e9fa28 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -95,7 +95,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti;
- u32 migrate_count;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
diff --git a/arch/x86/include/asm/seccomp.h b/arch/x86/include/asm/seccomp.h
index 0f3d7f099224..0c8c7c8861b4 100644
--- a/arch/x86/include/asm/seccomp.h
+++ b/arch/x86/include/asm/seccomp.h
@@ -1,5 +1,20 @@
+#ifndef _ASM_X86_SECCOMP_H
+#define _ASM_X86_SECCOMP_H
+
+#include <asm/unistd.h>
+
#ifdef CONFIG_X86_32
-# include <asm/seccomp_32.h>
-#else
-# include <asm/seccomp_64.h>
+#define __NR_seccomp_sigreturn __NR_sigreturn
#endif
+
+#ifdef CONFIG_COMPAT
+#include <asm/ia32_unistd.h>
+#define __NR_seccomp_read_32 __NR_ia32_read
+#define __NR_seccomp_write_32 __NR_ia32_write
+#define __NR_seccomp_exit_32 __NR_ia32_exit
+#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
+#endif
+
+#include <asm-generic/seccomp.h>
+
+#endif /* _ASM_X86_SECCOMP_H */
diff --git a/arch/x86/include/asm/seccomp_32.h b/arch/x86/include/asm/seccomp_32.h
deleted file mode 100644
index b811d6f5780c..000000000000
--- a/arch/x86/include/asm/seccomp_32.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_X86_SECCOMP_32_H
-#define _ASM_X86_SECCOMP_32_H
-
-#include <linux/unistd.h>
-
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_sigreturn
-
-#endif /* _ASM_X86_SECCOMP_32_H */
diff --git a/arch/x86/include/asm/seccomp_64.h b/arch/x86/include/asm/seccomp_64.h
deleted file mode 100644
index 84ec1bd161a5..000000000000
--- a/arch/x86/include/asm/seccomp_64.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_X86_SECCOMP_64_H
-#define _ASM_X86_SECCOMP_64_H
-
-#include <linux/unistd.h>
-#include <asm/ia32_unistd.h>
-
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#define __NR_seccomp_read_32 __NR_ia32_read
-#define __NR_seccomp_write_32 __NR_ia32_write
-#define __NR_seccomp_exit_32 __NR_ia32_exit
-#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
-
-#endif /* _ASM_X86_SECCOMP_64_H */
diff --git a/arch/x86/include/asm/serial.h b/arch/x86/include/asm/serial.h
index 460b84f64556..8378b8c9109c 100644
--- a/arch/x86/include/asm/serial.h
+++ b/arch/x86/include/asm/serial.h
@@ -12,11 +12,11 @@
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
-# define STD_COMX_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-# define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | 0 | ASYNC_AUTO_IRQ)
+# define STD_COMX_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
+# define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | 0 | UPF_AUTO_IRQ)
#else
-# define STD_COMX_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | 0 )
-# define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | 0 | 0 )
+# define STD_COMX_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | 0 )
+# define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | 0 | 0 )
#endif
#define SERIAL_PORT_DFNS \
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 81d02fc7dafa..17a8dced12da 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -150,13 +150,13 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
}
void cpu_disable_common(void);
-void cpu_die_common(unsigned int cpu);
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus);
void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
+int common_cpu_die(unsigned int cpu);
void native_cpu_die(unsigned int cpu);
void native_play_dead(void);
void play_dead_common(void);
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index cf87de3fc390..64b611782ef0 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
struct __raw_tickets tmp = READ_ONCE(lock->tickets);
tmp.head &= ~TICKET_SLOWPATH_FLAG;
- return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
+ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
#define arch_spin_is_contended arch_spin_is_contended
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index ea2dbe82cba3..b4bdec3e9523 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -46,13 +46,11 @@
*/
#ifndef __ASSEMBLY__
struct task_struct;
-struct exec_domain;
#include <asm/processor.h>
#include <linux/atomic.h>
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
__u32 flags; /* low level flags */
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
@@ -66,7 +64,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.saved_preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 358dcd338915..c44a5d53e464 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -269,4 +269,9 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
return false;
}
+static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
+{
+ return __get_free_pages(__GFP_NOWARN, order);
+}
+
#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
index d993e33f5236..960a8a9dc4ab 100644
--- a/arch/x86/include/uapi/asm/e820.h
+++ b/arch/x86/include/uapi/asm/e820.h
@@ -33,6 +33,16 @@
#define E820_NVS 4
#define E820_UNUSABLE 5
+/*
+ * This is a non-standardized way to represent ADR or NVDIMM regions that
+ * persist over a reboot. The kernel will ignore their special capabilities
+ * unless the CONFIG_X86_PMEM_LEGACY=y option is set.
+ *
+ * ( Note that older platforms also used 6 for the same type of memory,
+ * but newer versions switched to 12 as 6 was assigned differently. Some
+ * time they will learn... )
+ */
+#define E820_PRAM 12
/*
* reserved RAM used by kernel itself
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 90c458e66e13..ce6068dbcfbc 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -225,6 +225,8 @@
#define HV_STATUS_INVALID_HYPERCALL_CODE 2
#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
#define HV_STATUS_INVALID_ALIGNMENT 4
+#define HV_STATUS_INSUFFICIENT_MEMORY 11
+#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
typedef struct _HV_REFERENCE_TSC_PAGE {
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 3ce079136c11..c469490db4a8 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -61,6 +61,9 @@
#define MSR_OFFCORE_RSP_1 0x000001a7
#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad
#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae
+#define MSR_TURBO_RATIO_LIMIT 0x000001ad
+#define MSR_TURBO_RATIO_LIMIT1 0x000001ae
+#define MSR_TURBO_RATIO_LIMIT2 0x000001af
#define MSR_LBR_SELECT 0x000001c8
#define MSR_LBR_TOS 0x000001c9
@@ -74,6 +77,24 @@
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
+#define MSR_IA32_RTIT_CTL 0x00000570
+#define RTIT_CTL_TRACEEN BIT(0)
+#define RTIT_CTL_OS BIT(2)
+#define RTIT_CTL_USR BIT(3)
+#define RTIT_CTL_CR3EN BIT(7)
+#define RTIT_CTL_TOPA BIT(8)
+#define RTIT_CTL_TSC_EN BIT(10)
+#define RTIT_CTL_DISRETC BIT(11)
+#define RTIT_CTL_BRANCH_EN BIT(13)
+#define MSR_IA32_RTIT_STATUS 0x00000571
+#define RTIT_STATUS_CONTEXTEN BIT(1)
+#define RTIT_STATUS_TRIGGEREN BIT(2)
+#define RTIT_STATUS_ERROR BIT(4)
+#define RTIT_STATUS_STOPPED BIT(5)
+#define MSR_IA32_RTIT_CR3_MATCH 0x00000572
+#define MSR_IA32_RTIT_OUTPUT_BASE 0x00000560
+#define MSR_IA32_RTIT_OUTPUT_MASK 0x00000561
+
#define MSR_MTRRfix64K_00000 0x00000250
#define MSR_MTRRfix16K_80000 0x00000258
#define MSR_MTRRfix16K_A0000 0x00000259
@@ -147,6 +168,11 @@
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
+#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
+#define MSR_PKG_ANY_CORE_C0_RES 0x00000659
+#define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
+#define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
+
#define MSR_CORE_C1_RES 0x00000660
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index c887cd944f0c..9bcd0b56ca17 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
+obj-$(CONFIG_X86_PMEM_LEGACY) += pmem.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 803b684676ff..dbe76a14c3c9 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -757,7 +757,7 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index d9d0bd2faaf4..ab3219b3fbda 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -171,8 +171,8 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
for_each_online_cpu(cpu) {
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
continue;
- __cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu));
- __cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu));
+ cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
+ cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
}
free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
free_cpumask_var(per_cpu(ipi_mask, this_cpu));
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 80091ae54c2b..9bff68798836 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,7 +39,8 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
endif
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o perf_event_intel_cqm.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_pt.o perf_event_intel_bts.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
perf_event_intel_uncore_snb.o \
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index fd470ebf924e..e4cf63301ff4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -720,6 +720,9 @@ static void init_amd(struct cpuinfo_x86 *c)
if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
+
+ /* AMD CPUs don't reset SS attributes on SYSRET */
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3f70538012e2..a62cf04dac8a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -646,6 +646,30 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_capability[10] = eax;
}
+ /* Additional Intel-defined flags: level 0x0000000F */
+ if (c->cpuid_level >= 0x0000000F) {
+ u32 eax, ebx, ecx, edx;
+
+ /* QoS sub-leaf, EAX=0Fh, ECX=0 */
+ cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
+ c->x86_capability[11] = edx;
+ if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
+ /* will be overridden if occupancy monitoring exists */
+ c->x86_cache_max_rmid = ebx;
+
+ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+ cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
+ c->x86_capability[12] = edx;
+ if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
+ c->x86_cache_max_rmid = ecx;
+ c->x86_cache_occ_scale = ebx;
+ }
+ } else {
+ c->x86_cache_max_rmid = -1;
+ c->x86_cache_occ_scale = -1;
+ }
+ }
+
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl;
@@ -834,6 +858,20 @@ static void generic_identify(struct cpuinfo_x86 *c)
detect_nopl(c);
}
+static void x86_init_cache_qos(struct cpuinfo_x86 *c)
+{
+ /*
+ * The heavy lifting of max_rmid and cache_occ_scale are handled
+ * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu
+ * in case CQM bits really aren't there in this CPU.
+ */
+ if (c != &boot_cpu_data) {
+ boot_cpu_data.x86_cache_max_rmid =
+ min(boot_cpu_data.x86_cache_max_rmid,
+ c->x86_cache_max_rmid);
+ }
+}
+
/*
* This does the hard work of actually picking apart the CPU stuff...
*/
@@ -923,6 +961,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
init_hypervisor(c);
x86_init_rdrand(c);
+ x86_init_cache_qos(c);
/*
* Clear/Set all flags overriden by options, need do it
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 36ce402a3fa5..d820d8eae96b 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -27,8 +27,8 @@
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
-#ifdef CONFIG_XEN_PVHVM
- &x86_hyper_xen_hvm,
+#ifdef CONFIG_XEN
+ &x86_hyper_xen,
#endif
&x86_hyper_vmware,
&x86_hyper_ms_hyperv,
diff --git a/arch/x86/kernel/cpu/intel_pt.h b/arch/x86/kernel/cpu/intel_pt.h
new file mode 100644
index 000000000000..1c338b0eba05
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_pt.h
@@ -0,0 +1,131 @@
+/*
+ * Intel(R) Processor Trace PMU driver for perf
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Intel PT is specified in the Intel Architecture Instruction Set Extensions
+ * Programming Reference:
+ * http://software.intel.com/en-us/intel-isa-extensions
+ */
+
+#ifndef __INTEL_PT_H__
+#define __INTEL_PT_H__
+
+/*
+ * Single-entry ToPA: when this close to region boundary, switch
+ * buffers to avoid losing data.
+ */
+#define TOPA_PMI_MARGIN 512
+
+/*
+ * Table of Physical Addresses bits
+ */
+enum topa_sz {
+ TOPA_4K = 0,
+ TOPA_8K,
+ TOPA_16K,
+ TOPA_32K,
+ TOPA_64K,
+ TOPA_128K,
+ TOPA_256K,
+ TOPA_512K,
+ TOPA_1MB,
+ TOPA_2MB,
+ TOPA_4MB,
+ TOPA_8MB,
+ TOPA_16MB,
+ TOPA_32MB,
+ TOPA_64MB,
+ TOPA_128MB,
+ TOPA_SZ_END,
+};
+
+static inline unsigned int sizes(enum topa_sz tsz)
+{
+ return 1 << (tsz + 12);
+};
+
+struct topa_entry {
+ u64 end : 1;
+ u64 rsvd0 : 1;
+ u64 intr : 1;
+ u64 rsvd1 : 1;
+ u64 stop : 1;
+ u64 rsvd2 : 1;
+ u64 size : 4;
+ u64 rsvd3 : 2;
+ u64 base : 36;
+ u64 rsvd4 : 16;
+};
+
+#define TOPA_SHIFT 12
+#define PT_CPUID_LEAVES 2
+
+enum pt_capabilities {
+ PT_CAP_max_subleaf = 0,
+ PT_CAP_cr3_filtering,
+ PT_CAP_topa_output,
+ PT_CAP_topa_multiple_entries,
+ PT_CAP_payloads_lip,
+};
+
+struct pt_pmu {
+ struct pmu pmu;
+ u32 caps[4 * PT_CPUID_LEAVES];
+};
+
+/**
+ * struct pt_buffer - buffer configuration; one buffer per task_struct or
+ * cpu, depending on perf event configuration
+ * @cpu: cpu for per-cpu allocation
+ * @tables: list of ToPA tables in this buffer
+ * @first: shorthand for first topa table
+ * @last: shorthand for last topa table
+ * @cur: current topa table
+ * @nr_pages: buffer size in pages
+ * @cur_idx: current output region's index within @cur table
+ * @output_off: offset within the current output region
+ * @data_size: running total of the amount of data in this buffer
+ * @lost: if data was lost/truncated
+ * @head: logical write offset inside the buffer
+ * @snapshot: if this is for a snapshot/overwrite counter
+ * @stop_pos: STOP topa entry in the buffer
+ * @intr_pos: INT topa entry in the buffer
+ * @data_pages: array of pages from perf
+ * @topa_index: table of topa entries indexed by page offset
+ */
+struct pt_buffer {
+ int cpu;
+ struct list_head tables;
+ struct topa *first, *last, *cur;
+ unsigned int cur_idx;
+ size_t output_off;
+ unsigned long nr_pages;
+ local_t data_size;
+ local_t lost;
+ local64_t head;
+ bool snapshot;
+ unsigned long stop_pos, intr_pos;
+ void **data_pages;
+ struct topa_entry *topa_index[0];
+};
+
+/**
+ * struct pt - per-cpu pt context
+ * @handle: perf output handle
+ * @handle_nmi: do handle PT PMI on this cpu, there's an active event
+ */
+struct pt {
+ struct perf_output_handle handle;
+ int handle_nmi;
+};
+
+#endif /* __INTEL_PT_H__ */
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index a041e094b8b9..d76f13d6d8d6 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -404,11 +404,10 @@ static const struct file_operations mtrr_fops = {
static int mtrr_seq_show(struct seq_file *seq, void *offset)
{
char factor;
- int i, max, len;
+ int i, max;
mtrr_type type;
unsigned long base, size;
- len = 0;
max = num_var_ranges;
for (i = 0; i < max; i++) {
mtrr_if->get(i, &base, &size, &type);
@@ -425,11 +424,10 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
size >>= 20 - PAGE_SHIFT;
}
/* Base can be > 32bit */
- len += seq_printf(seq, "reg%02i: base=0x%06lx000 "
- "(%5luMB), size=%5lu%cB, count=%d: %s\n",
- i, base, base >> (20 - PAGE_SHIFT), size,
- factor, mtrr_usage_table[i],
- mtrr_attrib_to_str(type));
+ seq_printf(seq, "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n",
+ i, base, base >> (20 - PAGE_SHIFT),
+ size, factor,
+ mtrr_usage_table[i], mtrr_attrib_to_str(type));
}
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e2888a3ad1e3..87848ebe2bb7 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -263,6 +263,14 @@ static void hw_perf_event_destroy(struct perf_event *event)
}
}
+void hw_perf_lbr_event_destroy(struct perf_event *event)
+{
+ hw_perf_event_destroy(event);
+
+ /* undo the lbr/bts event accounting */
+ x86_del_exclusive(x86_lbr_exclusive_lbr);
+}
+
static inline int x86_pmu_initialized(void)
{
return x86_pmu.handle_irq != NULL;
@@ -302,6 +310,35 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
return x86_pmu_extra_regs(val, event);
}
+/*
+ * Check if we can create event of a certain type (that no conflicting events
+ * are present).
+ */
+int x86_add_exclusive(unsigned int what)
+{
+ int ret = -EBUSY, i;
+
+ if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what]))
+ return 0;
+
+ mutex_lock(&pmc_reserve_mutex);
+ for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++)
+ if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
+ goto out;
+
+ atomic_inc(&x86_pmu.lbr_exclusive[what]);
+ ret = 0;
+
+out:
+ mutex_unlock(&pmc_reserve_mutex);
+ return ret;
+}
+
+void x86_del_exclusive(unsigned int what)
+{
+ atomic_dec(&x86_pmu.lbr_exclusive[what]);
+}
+
int x86_setup_perfctr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
@@ -346,6 +383,12 @@ int x86_setup_perfctr(struct perf_event *event)
/* BTS is currently only allowed for user-mode. */
if (!attr->exclude_kernel)
return -EOPNOTSUPP;
+
+ /* disallow bts if conflicting events are present */
+ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+ return -EBUSY;
+
+ event->destroy = hw_perf_lbr_event_destroy;
}
hwc->config |= config;
@@ -399,39 +442,41 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
- /*
- * check that PEBS LBR correction does not conflict with
- * whatever the user is asking with attr->branch_sample_type
- */
- if (event->attr.precise_ip > 1 &&
- x86_pmu.intel_cap.pebs_format < 2) {
- u64 *br_type = &event->attr.branch_sample_type;
-
- if (has_branch_stack(event)) {
- if (!precise_br_compat(event))
- return -EOPNOTSUPP;
-
- /* branch_sample_type is compatible */
-
- } else {
- /*
- * user did not specify branch_sample_type
- *
- * For PEBS fixups, we capture all
- * the branches at the priv level of the
- * event.
- */
- *br_type = PERF_SAMPLE_BRANCH_ANY;
-
- if (!event->attr.exclude_user)
- *br_type |= PERF_SAMPLE_BRANCH_USER;
-
- if (!event->attr.exclude_kernel)
- *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
- }
+ }
+ /*
+ * check that PEBS LBR correction does not conflict with
+ * whatever the user is asking with attr->branch_sample_type
+ */
+ if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
+ u64 *br_type = &event->attr.branch_sample_type;
+
+ if (has_branch_stack(event)) {
+ if (!precise_br_compat(event))
+ return -EOPNOTSUPP;
+
+ /* branch_sample_type is compatible */
+
+ } else {
+ /*
+ * user did not specify branch_sample_type
+ *
+ * For PEBS fixups, we capture all
+ * the branches at the priv level of the
+ * event.
+ */
+ *br_type = PERF_SAMPLE_BRANCH_ANY;
+
+ if (!event->attr.exclude_user)
+ *br_type |= PERF_SAMPLE_BRANCH_USER;
+
+ if (!event->attr.exclude_kernel)
+ *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
}
}
+ if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
+ event->attach_state |= PERF_ATTACH_TASK_DATA;
+
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
@@ -449,6 +494,12 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == PERF_TYPE_RAW)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+ if (event->attr.sample_period && x86_pmu.limit_period) {
+ if (x86_pmu.limit_period(event, event->attr.sample_period) >
+ event->attr.sample_period)
+ return -EINVAL;
+ }
+
return x86_setup_perfctr(event);
}
@@ -728,14 +779,17 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
struct event_constraint *c;
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
struct perf_event *e;
- int i, wmin, wmax, num = 0;
+ int i, wmin, wmax, unsched = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+ if (x86_pmu.start_scheduling)
+ x86_pmu.start_scheduling(cpuc);
+
for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
hwc = &cpuc->event_list[i]->hw;
- c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
+ c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
hwc->constraint = c;
wmin = min(wmin, c->weight);
@@ -768,24 +822,30 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
/* slow path */
if (i != n)
- num = perf_assign_events(cpuc->event_list, n, wmin,
- wmax, assign);
+ unsched = perf_assign_events(cpuc->event_list, n, wmin,
+ wmax, assign);
/*
- * Mark the event as committed, so we do not put_constraint()
- * in case new events are added and fail scheduling.
+ * In case of success (unsched = 0), mark events as committed,
+ * so we do not put_constraint() in case new events are added
+ * and fail to be scheduled
+ *
+ * We invoke the lower level commit callback to lock the resource
+ *
+ * We do not need to do all of this in case we are called to
+ * validate an event group (assign == NULL)
*/
- if (!num && assign) {
+ if (!unsched && assign) {
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
e->hw.flags |= PERF_X86_EVENT_COMMITTED;
+ if (x86_pmu.commit_scheduling)
+ x86_pmu.commit_scheduling(cpuc, e, assign[i]);
}
}
- /*
- * scheduling failed or is just a simulation,
- * free resources if necessary
- */
- if (!assign || num) {
+
+ if (!assign || unsched) {
+
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
/*
@@ -795,11 +855,18 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
continue;
+ /*
+ * release events that failed scheduling
+ */
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, e);
}
}
- return num ? -EINVAL : 0;
+
+ if (x86_pmu.stop_scheduling)
+ x86_pmu.stop_scheduling(cpuc);
+
+ return unsched ? -EINVAL : 0;
}
/*
@@ -986,6 +1053,9 @@ int x86_perf_event_set_period(struct perf_event *event)
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;
+ if (x86_pmu.limit_period)
+ left = x86_pmu.limit_period(event, left);
+
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
@@ -1033,7 +1103,6 @@ static int x86_pmu_add(struct perf_event *event, int flags)
hwc = &event->hw;
- perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
ret = n = collect_events(cpuc, event, false);
if (ret < 0)
@@ -1071,7 +1140,6 @@ done_collect:
ret = 0;
out:
- perf_pmu_enable(event->pmu);
return ret;
}
@@ -1103,7 +1171,7 @@ static void x86_pmu_start(struct perf_event *event, int flags)
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
- u64 pebs;
+ u64 pebs, debugctl;
struct cpu_hw_events *cpuc;
unsigned long flags;
int cpu, idx;
@@ -1121,14 +1189,20 @@ void perf_event_print_debug(void)
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
- rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("\n");
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
pr_info("CPU#%d: status: %016llx\n", cpu, status);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
- pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
+ if (x86_pmu.pebs_constraints) {
+ rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
+ pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
+ }
+ if (x86_pmu.lbr_nr) {
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl);
+ }
}
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
@@ -1321,11 +1395,12 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- int ret = NOTIFY_OK;
+ int i, ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- cpuc->kfree_on_online = NULL;
+ for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
+ cpuc->kfree_on_online[i] = NULL;
if (x86_pmu.cpu_prepare)
ret = x86_pmu.cpu_prepare(cpu);
break;
@@ -1336,7 +1411,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
break;
case CPU_ONLINE:
- kfree(cpuc->kfree_on_online);
+ for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
+ kfree(cpuc->kfree_on_online[i]);
+ cpuc->kfree_on_online[i] = NULL;
+ }
break;
case CPU_DYING:
@@ -1712,7 +1790,7 @@ static int validate_event(struct perf_event *event)
if (IS_ERR(fake_cpuc))
return PTR_ERR(fake_cpuc);
- c = x86_pmu.get_event_constraints(fake_cpuc, event);
+ c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
if (!c || !c->weight)
ret = -EINVAL;
@@ -1914,10 +1992,10 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
NULL,
};
-static void x86_pmu_flush_branch_stack(void)
+static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{
- if (x86_pmu.flush_branch_stack)
- x86_pmu.flush_branch_stack();
+ if (x86_pmu.sched_task)
+ x86_pmu.sched_task(ctx, sched_in);
}
void perf_check_microcode(void)
@@ -1949,7 +2027,8 @@ static struct pmu pmu = {
.commit_txn = x86_pmu_commit_txn,
.event_idx = x86_pmu_event_idx,
- .flush_branch_stack = x86_pmu_flush_branch_stack,
+ .sched_task = x86_pmu_sched_task,
+ .task_ctx_size = sizeof(struct x86_perf_task_context),
};
void arch_perf_update_userpage(struct perf_event *event,
@@ -1968,13 +2047,23 @@ void arch_perf_update_userpage(struct perf_event *event,
data = cyc2ns_read_begin();
+ /*
+ * Internal timekeeping for enabled/running/stopped times
+ * is always in the local_clock domain.
+ */
userpg->cap_user_time = 1;
userpg->time_mult = data->cyc2ns_mul;
userpg->time_shift = data->cyc2ns_shift;
userpg->time_offset = data->cyc2ns_offset - now;
- userpg->cap_user_time_zero = 1;
- userpg->time_zero = data->cyc2ns_offset;
+ /*
+ * cap_user_time_zero doesn't make sense when we're using a different
+ * time base for the records.
+ */
+ if (event->clock == &local_clock) {
+ userpg->cap_user_time_zero = 1;
+ userpg->time_zero = data->cyc2ns_offset;
+ }
cyc2ns_read_end(data);
}
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index df525d2be1e8..6ac5cb7a9e14 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -65,13 +65,15 @@ struct event_constraint {
/*
* struct hw_perf_event.flags flags
*/
-#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */
-#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */
-#define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style datala, store */
-#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */
-#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */
-#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */
-#define PERF_X86_EVENT_RDPMC_ALLOWED 0x40 /* grant rdpmc permission */
+#define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */
+#define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */
+#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */
+#define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */
+#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */
+#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */
+#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */
+#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */
+#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
struct amd_nb {
@@ -123,8 +125,37 @@ struct intel_shared_regs {
unsigned core_id; /* per-core: core id */
};
+enum intel_excl_state_type {
+ INTEL_EXCL_UNUSED = 0, /* counter is unused */
+ INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */
+ INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
+};
+
+struct intel_excl_states {
+ enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
+ enum intel_excl_state_type state[X86_PMC_IDX_MAX];
+ int num_alloc_cntrs;/* #counters allocated */
+ int max_alloc_cntrs;/* max #counters allowed */
+ bool sched_started; /* true if scheduling has started */
+};
+
+struct intel_excl_cntrs {
+ raw_spinlock_t lock;
+
+ struct intel_excl_states states[2];
+
+ int refcnt; /* per-core: #HT threads */
+ unsigned core_id; /* per-core: core id */
+};
+
#define MAX_LBR_ENTRIES 16
+enum {
+ X86_PERF_KFREE_SHARED = 0,
+ X86_PERF_KFREE_EXCL = 1,
+ X86_PERF_KFREE_MAX
+};
+
struct cpu_hw_events {
/*
* Generic x86 PMC bits
@@ -179,6 +210,12 @@ struct cpu_hw_events {
* used on Intel NHM/WSM/SNB
*/
struct intel_shared_regs *shared_regs;
+ /*
+ * manage exclusive counter access between hyperthread
+ */
+ struct event_constraint *constraint_list; /* in enable order */
+ struct intel_excl_cntrs *excl_cntrs;
+ int excl_thread_id; /* 0 or 1 */
/*
* AMD specific bits
@@ -187,7 +224,7 @@ struct cpu_hw_events {
/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
u64 perf_ctr_virt_mask;
- void *kfree_on_online;
+ void *kfree_on_online[X86_PERF_KFREE_MAX];
};
#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
@@ -202,6 +239,10 @@ struct cpu_hw_events {
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
+#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
+ __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
+ 0, PERF_X86_EVENT_EXCL)
+
/*
* The overlap flag marks event constraints with overlapping counter
* masks. This is the case if the counter mask of such an event is not
@@ -259,6 +300,10 @@ struct cpu_hw_events {
#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
+ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
+
#define INTEL_PLD_CONSTRAINT(c, n) \
__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
@@ -283,22 +328,40 @@ struct cpu_hw_events {
/* Check flags and event code, and set the HSW load flag */
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
- __EVENT_CONSTRAINT(code, n, \
+ __EVENT_CONSTRAINT(code, n, \
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, \
+ PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
+
/* Check flags and event code/umask, and set the HSW store flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
__EVENT_CONSTRAINT(code, n, \
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
+#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, \
+ PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
+
/* Check flags and event code/umask, and set the HSW load flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
__EVENT_CONSTRAINT(code, n, \
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, \
+ PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
+
/* Check flags and event code/umask, and set the HSW N/A flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
__EVENT_CONSTRAINT(code, n, \
@@ -408,6 +471,13 @@ union x86_pmu_config {
#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
+enum {
+ x86_lbr_exclusive_lbr,
+ x86_lbr_exclusive_bts,
+ x86_lbr_exclusive_pt,
+ x86_lbr_exclusive_max,
+};
+
/*
* struct x86_pmu - generic x86 pmu
*/
@@ -443,14 +513,25 @@ struct x86_pmu {
u64 max_period;
struct event_constraint *
(*get_event_constraints)(struct cpu_hw_events *cpuc,
+ int idx,
struct perf_event *event);
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
+
+ void (*commit_scheduling)(struct cpu_hw_events *cpuc,
+ struct perf_event *event,
+ int cntr);
+
+ void (*start_scheduling)(struct cpu_hw_events *cpuc);
+
+ void (*stop_scheduling)(struct cpu_hw_events *cpuc);
+
struct event_constraint *event_constraints;
struct x86_pmu_quirk *quirks;
int perfctr_second_write;
bool late_ack;
+ unsigned (*limit_period)(struct perf_event *event, unsigned l);
/*
* sysfs attrs
@@ -472,7 +553,8 @@ struct x86_pmu {
void (*cpu_dead)(int cpu);
void (*check_microcode)(void);
- void (*flush_branch_stack)(void);
+ void (*sched_task)(struct perf_event_context *ctx,
+ bool sched_in);
/*
* Intel Arch Perfmon v2+
@@ -504,10 +586,15 @@ struct x86_pmu {
bool lbr_double_abort; /* duplicated lbr aborts */
/*
+ * Intel PT/LBR/BTS are exclusive
+ */
+ atomic_t lbr_exclusive[x86_lbr_exclusive_max];
+
+ /*
* Extra registers for events
*/
struct extra_reg *extra_regs;
- unsigned int er_flags;
+ unsigned int flags;
/*
* Intel host/guest support (KVM)
@@ -515,6 +602,13 @@ struct x86_pmu {
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
};
+struct x86_perf_task_context {
+ u64 lbr_from[MAX_LBR_ENTRIES];
+ u64 lbr_to[MAX_LBR_ENTRIES];
+ int lbr_callstack_users;
+ int lbr_stack_state;
+};
+
#define x86_add_quirk(func_) \
do { \
static struct x86_pmu_quirk __quirk __initdata = { \
@@ -524,8 +618,13 @@ do { \
x86_pmu.quirks = &__quirk; \
} while (0)
-#define ERF_NO_HT_SHARING 1
-#define ERF_HAS_RSP_1 2
+/*
+ * x86_pmu flags
+ */
+#define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
+#define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
+#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
+#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -546,6 +645,12 @@ static struct perf_pmu_events_attr event_attr_##v = { \
extern struct x86_pmu x86_pmu __read_mostly;
+static inline bool x86_pmu_has_lbr_callstack(void)
+{
+ return x86_pmu.lbr_sel_map &&
+ x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
+}
+
DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
int x86_perf_event_set_period(struct perf_event *event);
@@ -588,6 +693,12 @@ static inline int x86_pmu_rdpmc_index(int index)
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
}
+int x86_add_exclusive(unsigned int what);
+
+void x86_del_exclusive(unsigned int what);
+
+void hw_perf_lbr_event_destroy(struct perf_event *event);
+
int x86_setup_perfctr(struct perf_event *event);
int x86_pmu_hw_config(struct perf_event *event);
@@ -674,10 +785,34 @@ static inline int amd_pmu_init(void)
#ifdef CONFIG_CPU_SUP_INTEL
+static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
+{
+ /* user explicitly requested branch sampling */
+ if (has_branch_stack(event))
+ return true;
+
+ /* implicit branch sampling to correct PEBS skid */
+ if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
+ x86_pmu.intel_cap.pebs_format < 2)
+ return true;
+
+ return false;
+}
+
+static inline bool intel_pmu_has_bts(struct perf_event *event)
+{
+ if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+ !event->attr.freq && event->hw.sample_period == 1)
+ return true;
+
+ return false;
+}
+
int intel_pmu_save_and_restart(struct perf_event *event);
struct event_constraint *
-x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
+x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event);
struct intel_shared_regs *allocate_shared_regs(int cpu);
@@ -727,13 +862,15 @@ void intel_pmu_pebs_disable_all(void);
void intel_ds_init(void);
+void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
+
void intel_pmu_lbr_reset(void);
void intel_pmu_lbr_enable(struct perf_event *event);
void intel_pmu_lbr_disable(struct perf_event *event);
-void intel_pmu_lbr_enable_all(void);
+void intel_pmu_lbr_enable_all(bool pmi);
void intel_pmu_lbr_disable_all(void);
@@ -747,8 +884,18 @@ void intel_pmu_lbr_init_atom(void);
void intel_pmu_lbr_init_snb(void);
+void intel_pmu_lbr_init_hsw(void);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
+void intel_pt_interrupt(void);
+
+int intel_bts_interrupt(void);
+
+void intel_bts_enable_local(void);
+
+void intel_bts_disable_local(void);
+
int p4_pmu_init(void);
int p6_pmu_init(void);
@@ -758,6 +905,10 @@ int knc_pmu_init(void);
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
+static inline int is_ht_workaround_enabled(void)
+{
+ return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
+}
#else /* CONFIG_CPU_SUP_INTEL */
static inline void reserve_ds_buffers(void)
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 28926311aac1..1cee5d2d7ece 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -382,6 +382,7 @@ static int amd_pmu_cpu_prepare(int cpu)
static void amd_pmu_cpu_starting(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
struct amd_nb *nb;
int i, nb_id;
@@ -399,7 +400,7 @@ static void amd_pmu_cpu_starting(int cpu)
continue;
if (nb->nb_id == nb_id) {
- cpuc->kfree_on_online = cpuc->amd_nb;
+ *onln = cpuc->amd_nb;
cpuc->amd_nb = nb;
break;
}
@@ -429,7 +430,8 @@ static void amd_pmu_cpu_dead(int cpu)
}
static struct event_constraint *
-amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
/*
* if not NB event or no NB, then no constraints
@@ -537,7 +539,8 @@ static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
static struct event_constraint *
-amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
+amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
unsigned int event_code = amd_get_event_code(hwc);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index a61f5c6911da..989d3c215d2b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -796,7 +796,7 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
* the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
* is using the new offset.
*/
-static int force_ibs_eilvt_setup(void)
+static void force_ibs_eilvt_setup(void)
{
int offset;
int ret;
@@ -811,26 +811,24 @@ static int force_ibs_eilvt_setup(void)
if (offset == APIC_EILVT_NR_MAX) {
printk(KERN_DEBUG "No EILVT entry available\n");
- return -EBUSY;
+ return;
}
ret = setup_ibs_ctl(offset);
if (ret)
goto out;
- if (!ibs_eilvt_valid()) {
- ret = -EFAULT;
+ if (!ibs_eilvt_valid())
goto out;
- }
pr_info("IBS: LVT offset %d assigned\n", offset);
- return 0;
+ return;
out:
preempt_disable();
put_eilvt(offset);
preempt_enable();
- return ret;
+ return;
}
static void ibs_eilvt_setup(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 258990688a5e..3998131d1a68 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/watchdog.h>
#include <asm/cpufeature.h>
#include <asm/hardirq.h>
@@ -113,6 +114,12 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+
EVENT_CONSTRAINT_END
};
@@ -131,15 +138,12 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
- /*
- * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
- * siblings; disable these events because they can corrupt unrelated
- * counters.
- */
- INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+
EVENT_CONSTRAINT_END
};
@@ -217,6 +221,21 @@ static struct event_constraint intel_hsw_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
+
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+
+ EVENT_CONSTRAINT_END
+};
+
+struct event_constraint intel_bdw_event_constraints[] = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
+ INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
EVENT_CONSTRAINT_END
};
@@ -415,6 +434,202 @@ static __initconst const u64 snb_hw_cache_event_ids
};
+/*
+ * Notes on the events:
+ * - data reads do not include code reads (comparable to earlier tables)
+ * - data counts include speculative execution (except L1 write, dtlb, bpu)
+ * - remote node access includes remote memory, remote cache, remote mmio.
+ * - prefetches are not included in the counts because they are not
+ * reliably counted.
+ */
+
+#define HSW_DEMAND_DATA_RD BIT_ULL(0)
+#define HSW_DEMAND_RFO BIT_ULL(1)
+#define HSW_ANY_RESPONSE BIT_ULL(16)
+#define HSW_SUPPLIER_NONE BIT_ULL(17)
+#define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
+#define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
+#define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
+#define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
+#define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
+ HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
+ HSW_L3_MISS_REMOTE_HOP2P)
+#define HSW_SNOOP_NONE BIT_ULL(31)
+#define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
+#define HSW_SNOOP_MISS BIT_ULL(33)
+#define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
+#define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
+#define HSW_SNOOP_HITM BIT_ULL(36)
+#define HSW_SNOOP_NON_DRAM BIT_ULL(37)
+#define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
+ HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
+ HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
+ HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
+#define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
+#define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
+#define HSW_DEMAND_WRITE HSW_DEMAND_RFO
+#define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
+ HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
+#define HSW_LLC_ACCESS HSW_ANY_RESPONSE
+
+#define BDW_L3_MISS_LOCAL BIT(26)
+#define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
+ HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
+ HSW_L3_MISS_REMOTE_HOP2P)
+
+
+static __initconst const u64 hsw_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
+ [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
+ [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+};
+
+static __initconst const u64 hsw_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
+ HSW_LLC_ACCESS,
+ [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
+ HSW_L3_MISS|HSW_ANY_SNOOP,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
+ HSW_LLC_ACCESS,
+ [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
+ HSW_L3_MISS|HSW_ANY_SNOOP,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
+ HSW_L3_MISS_LOCAL_DRAM|
+ HSW_SNOOP_DRAM,
+ [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
+ HSW_L3_MISS_REMOTE|
+ HSW_SNOOP_DRAM,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
+ HSW_L3_MISS_LOCAL_DRAM|
+ HSW_SNOOP_DRAM,
+ [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
+ HSW_L3_MISS_REMOTE|
+ HSW_SNOOP_DRAM,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+};
+
static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -919,7 +1134,7 @@ static __initconst const u64 slm_hw_cache_extra_regs
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
- [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS,
+ [ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
@@ -969,8 +1184,7 @@ static __initconst const u64 slm_hw_cache_event_ids
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
@@ -1002,7 +1216,7 @@ static __initconst const u64 slm_hw_cache_event_ids
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
- [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
+ [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -1029,21 +1243,10 @@ static __initconst const u64 slm_hw_cache_event_ids
},
};
-static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
-{
- /* user explicitly requested branch sampling */
- if (has_branch_stack(event))
- return true;
-
- /* implicit branch sampling to correct PEBS skid */
- if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
- x86_pmu.intel_cap.pebs_format < 2)
- return true;
-
- return false;
-}
-
-static void intel_pmu_disable_all(void)
+/*
+ * Use from PMIs where the LBRs are already disabled.
+ */
+static void __intel_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1051,17 +1254,24 @@ static void intel_pmu_disable_all(void)
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
+ else
+ intel_bts_disable_local();
intel_pmu_pebs_disable_all();
+}
+
+static void intel_pmu_disable_all(void)
+{
+ __intel_pmu_disable_all();
intel_pmu_lbr_disable_all();
}
-static void intel_pmu_enable_all(int added)
+static void __intel_pmu_enable_all(int added, bool pmi)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
intel_pmu_pebs_enable_all();
- intel_pmu_lbr_enable_all();
+ intel_pmu_lbr_enable_all(pmi);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
@@ -1073,7 +1283,13 @@ static void intel_pmu_enable_all(int added)
return;
intel_pmu_enable_bts(event->hw.config);
- }
+ } else
+ intel_bts_enable_local();
+}
+
+static void intel_pmu_enable_all(int added)
+{
+ __intel_pmu_enable_all(added, false);
}
/*
@@ -1207,7 +1423,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
* must disable before any actual event
* because any event may be combined with LBR
*/
- if (intel_pmu_needs_lbr_smpl(event))
+ if (needs_branch_stack(event))
intel_pmu_lbr_disable(event);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -1268,7 +1484,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
* must enabled before any actual event
* because any event may be combined with LBR
*/
- if (intel_pmu_needs_lbr_smpl(event))
+ if (needs_branch_stack(event))
intel_pmu_lbr_enable(event);
if (event->attr.exclude_host)
@@ -1334,6 +1550,18 @@ static void intel_pmu_reset(void)
if (ds)
ds->bts_index = ds->bts_buffer_base;
+ /* Ack all overflows and disable fixed counters */
+ if (x86_pmu.version >= 2) {
+ intel_pmu_ack_status(intel_pmu_get_status());
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ }
+
+ /* Reset LBRs and LBR freezing */
+ if (x86_pmu.lbr_nr) {
+ update_debugctlmsr(get_debugctlmsr() &
+ ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
+ }
+
local_irq_restore(flags);
}
@@ -1357,8 +1585,9 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
*/
if (!x86_pmu.late_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI);
- intel_pmu_disable_all();
+ __intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
+ handled += intel_bts_interrupt();
status = intel_pmu_get_status();
if (!status)
goto done;
@@ -1399,6 +1628,14 @@ again:
}
/*
+ * Intel PT
+ */
+ if (__test_and_clear_bit(55, (unsigned long *)&status)) {
+ handled++;
+ intel_pt_interrupt();
+ }
+
+ /*
* Checkpointed counters can lead to 'spurious' PMIs because the
* rollback caused by the PMI will have cleared the overflow status
* bit. Therefore always force probe these counters.
@@ -1433,7 +1670,7 @@ again:
goto again;
done:
- intel_pmu_enable_all(0);
+ __intel_pmu_enable_all(0, true);
/*
* Only unmask the NMI after the overflow counters
* have been reset. This avoids spurious NMIs on
@@ -1464,7 +1701,7 @@ intel_bts_constraints(struct perf_event *event)
static int intel_alt_er(int idx)
{
- if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
+ if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
return idx;
if (idx == EXTRA_REG_RSP_0)
@@ -1624,7 +1861,8 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
}
struct event_constraint *
-x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
struct event_constraint *c;
@@ -1641,7 +1879,8 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
}
static struct event_constraint *
-intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
struct event_constraint *c;
@@ -1657,7 +1896,278 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if (c)
return c;
- return x86_get_event_constraints(cpuc, event);
+ return x86_get_event_constraints(cpuc, idx, event);
+}
+
+static void
+intel_start_scheduling(struct cpu_hw_events *cpuc)
+{
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct intel_excl_states *xl, *xlo;
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid; /* sibling thread */
+
+ /*
+ * nothing needed if in group validation mode
+ */
+ if (cpuc->is_fake || !is_ht_workaround_enabled())
+ return;
+
+ /*
+ * no exclusion needed
+ */
+ if (!excl_cntrs)
+ return;
+
+ xlo = &excl_cntrs->states[o_tid];
+ xl = &excl_cntrs->states[tid];
+
+ xl->sched_started = true;
+ xl->num_alloc_cntrs = 0;
+ /*
+ * lock shared state until we are done scheduling
+ * in stop_event_scheduling()
+ * makes scheduling appear as a transaction
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+ raw_spin_lock(&excl_cntrs->lock);
+
+ /*
+ * save initial state of sibling thread
+ */
+ memcpy(xlo->init_state, xlo->state, sizeof(xlo->init_state));
+}
+
+static void
+intel_stop_scheduling(struct cpu_hw_events *cpuc)
+{
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct intel_excl_states *xl, *xlo;
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid; /* sibling thread */
+
+ /*
+ * nothing needed if in group validation mode
+ */
+ if (cpuc->is_fake || !is_ht_workaround_enabled())
+ return;
+ /*
+ * no exclusion needed
+ */
+ if (!excl_cntrs)
+ return;
+
+ xlo = &excl_cntrs->states[o_tid];
+ xl = &excl_cntrs->states[tid];
+
+ /*
+ * make new sibling thread state visible
+ */
+ memcpy(xlo->state, xlo->init_state, sizeof(xlo->state));
+
+ xl->sched_started = false;
+ /*
+ * release shared state lock (acquired in intel_start_scheduling())
+ */
+ raw_spin_unlock(&excl_cntrs->lock);
+}
+
+static struct event_constraint *
+intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
+ int idx, struct event_constraint *c)
+{
+ struct event_constraint *cx;
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct intel_excl_states *xl, *xlo;
+ int is_excl, i;
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid; /* alternate */
+
+ /*
+ * validating a group does not require
+ * enforcing cross-thread exclusion
+ */
+ if (cpuc->is_fake || !is_ht_workaround_enabled())
+ return c;
+
+ /*
+ * no exclusion needed
+ */
+ if (!excl_cntrs)
+ return c;
+ /*
+ * event requires exclusive counter access
+ * across HT threads
+ */
+ is_excl = c->flags & PERF_X86_EVENT_EXCL;
+
+ /*
+ * xl = state of current HT
+ * xlo = state of sibling HT
+ */
+ xl = &excl_cntrs->states[tid];
+ xlo = &excl_cntrs->states[o_tid];
+
+ /*
+ * do not allow scheduling of more than max_alloc_cntrs
+ * which is set to half the available generic counters.
+ * this helps avoid counter starvation of sibling thread
+ * by ensuring at most half the counters cannot be in
+ * exclusive mode. There is not designated counters for the
+ * limits. Any N/2 counters can be used. This helps with
+ * events with specifix counter constraints
+ */
+ if (xl->num_alloc_cntrs++ == xl->max_alloc_cntrs)
+ return &emptyconstraint;
+
+ cx = c;
+
+ /*
+ * because we modify the constraint, we need
+ * to make a copy. Static constraints come
+ * from static const tables.
+ *
+ * only needed when constraint has not yet
+ * been cloned (marked dynamic)
+ */
+ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+
+ /* sanity check */
+ if (idx < 0)
+ return &emptyconstraint;
+
+ /*
+ * grab pre-allocated constraint entry
+ */
+ cx = &cpuc->constraint_list[idx];
+
+ /*
+ * initialize dynamic constraint
+ * with static constraint
+ */
+ memcpy(cx, c, sizeof(*cx));
+
+ /*
+ * mark constraint as dynamic, so we
+ * can free it later on
+ */
+ cx->flags |= PERF_X86_EVENT_DYNAMIC;
+ }
+
+ /*
+ * From here on, the constraint is dynamic.
+ * Either it was just allocated above, or it
+ * was allocated during a earlier invocation
+ * of this function
+ */
+
+ /*
+ * Modify static constraint with current dynamic
+ * state of thread
+ *
+ * EXCLUSIVE: sibling counter measuring exclusive event
+ * SHARED : sibling counter measuring non-exclusive event
+ * UNUSED : sibling counter unused
+ */
+ for_each_set_bit(i, cx->idxmsk, X86_PMC_IDX_MAX) {
+ /*
+ * exclusive event in sibling counter
+ * our corresponding counter cannot be used
+ * regardless of our event
+ */
+ if (xl->state[i] == INTEL_EXCL_EXCLUSIVE)
+ __clear_bit(i, cx->idxmsk);
+ /*
+ * if measuring an exclusive event, sibling
+ * measuring non-exclusive, then counter cannot
+ * be used
+ */
+ if (is_excl && xl->state[i] == INTEL_EXCL_SHARED)
+ __clear_bit(i, cx->idxmsk);
+ }
+
+ /*
+ * recompute actual bit weight for scheduling algorithm
+ */
+ cx->weight = hweight64(cx->idxmsk64);
+
+ /*
+ * if we return an empty mask, then switch
+ * back to static empty constraint to avoid
+ * the cost of freeing later on
+ */
+ if (cx->weight == 0)
+ cx = &emptyconstraint;
+
+ return cx;
+}
+
+static struct event_constraint *
+intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct event_constraint *c1 = event->hw.constraint;
+ struct event_constraint *c2;
+
+ /*
+ * first time only
+ * - static constraint: no change across incremental scheduling calls
+ * - dynamic constraint: handled by intel_get_excl_constraints()
+ */
+ c2 = __intel_get_event_constraints(cpuc, idx, event);
+ if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
+ bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
+ c1->weight = c2->weight;
+ c2 = c1;
+ }
+
+ if (cpuc->excl_cntrs)
+ return intel_get_excl_constraints(cpuc, event, idx, c2);
+
+ return c2;
+}
+
+static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct intel_excl_states *xlo, *xl;
+ unsigned long flags = 0; /* keep compiler happy */
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid;
+
+ /*
+ * nothing needed if in group validation mode
+ */
+ if (cpuc->is_fake)
+ return;
+
+ WARN_ON_ONCE(!excl_cntrs);
+
+ if (!excl_cntrs)
+ return;
+
+ xl = &excl_cntrs->states[tid];
+ xlo = &excl_cntrs->states[o_tid];
+
+ /*
+ * put_constraint may be called from x86_schedule_events()
+ * which already has the lock held so here make locking
+ * conditional
+ */
+ if (!xl->sched_started)
+ raw_spin_lock_irqsave(&excl_cntrs->lock, flags);
+
+ /*
+ * if event was actually assigned, then mark the
+ * counter state as unused now
+ */
+ if (hwc->idx >= 0)
+ xlo->state[hwc->idx] = INTEL_EXCL_UNUSED;
+
+ if (!xl->sched_started)
+ raw_spin_unlock_irqrestore(&excl_cntrs->lock, flags);
}
static void
@@ -1678,7 +2188,57 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
+ struct event_constraint *c = event->hw.constraint;
+
intel_put_shared_regs_event_constraints(cpuc, event);
+
+ /*
+ * is PMU has exclusive counter restrictions, then
+ * all events are subject to and must call the
+ * put_excl_constraints() routine
+ */
+ if (c && cpuc->excl_cntrs)
+ intel_put_excl_constraints(cpuc, event);
+
+ /* cleanup dynamic constraint */
+ if (c && (c->flags & PERF_X86_EVENT_DYNAMIC))
+ event->hw.constraint = NULL;
+}
+
+static void intel_commit_scheduling(struct cpu_hw_events *cpuc,
+ struct perf_event *event, int cntr)
+{
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct event_constraint *c = event->hw.constraint;
+ struct intel_excl_states *xlo, *xl;
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid;
+ int is_excl;
+
+ if (cpuc->is_fake || !c)
+ return;
+
+ is_excl = c->flags & PERF_X86_EVENT_EXCL;
+
+ if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
+ return;
+
+ WARN_ON_ONCE(!excl_cntrs);
+
+ if (!excl_cntrs)
+ return;
+
+ xl = &excl_cntrs->states[tid];
+ xlo = &excl_cntrs->states[o_tid];
+
+ WARN_ON_ONCE(!raw_spin_is_locked(&excl_cntrs->lock));
+
+ if (cntr >= 0) {
+ if (is_excl)
+ xlo->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
+ else
+ xlo->init_state[cntr] = INTEL_EXCL_SHARED;
+ }
}
static void intel_pebs_aliases_core2(struct perf_event *event)
@@ -1747,10 +2307,21 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip && x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
- if (intel_pmu_needs_lbr_smpl(event)) {
+ if (needs_branch_stack(event)) {
ret = intel_pmu_setup_lbr_filter(event);
if (ret)
return ret;
+
+ /*
+ * BTS is set up earlier in this path, so don't account twice
+ */
+ if (!intel_pmu_has_bts(event)) {
+ /* disallow lbr if conflicting events are present */
+ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+ return -EBUSY;
+
+ event->destroy = hw_perf_lbr_event_destroy;
+ }
}
if (event->attr.type != PERF_TYPE_RAW)
@@ -1891,9 +2462,12 @@ static struct event_constraint counter2_constraint =
EVENT_CONSTRAINT(0, 0x4, 0);
static struct event_constraint *
-hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
- struct event_constraint *c = intel_get_event_constraints(cpuc, event);
+ struct event_constraint *c;
+
+ c = intel_get_event_constraints(cpuc, idx, event);
/* Handle special quirk on in_tx_checkpointed only in counter 2 */
if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
@@ -1905,6 +2479,32 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
return c;
}
+/*
+ * Broadwell:
+ *
+ * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
+ * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
+ * the two to enforce a minimum period of 128 (the smallest value that has bits
+ * 0-5 cleared and >= 100).
+ *
+ * Because of how the code in x86_perf_event_set_period() works, the truncation
+ * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
+ * to make up for the 'lost' events due to carrying the 'error' in period_left.
+ *
+ * Therefore the effective (average) period matches the requested period,
+ * despite coarser hardware granularity.
+ */
+static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
+{
+ if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
+ X86_CONFIG(.event=0xc0, .umask=0x01)) {
+ if (left < 128)
+ left = 128;
+ left &= ~0x3fu;
+ }
+ return left;
+}
+
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
@@ -1932,34 +2532,6 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
return x86_event_sysfs_show(page, config, event);
}
-static __initconst const struct x86_pmu core_pmu = {
- .name = "core",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = x86_pmu_disable_all,
- .enable_all = core_pmu_enable_all,
- .enable = core_pmu_enable_event,
- .disable = x86_pmu_disable_event,
- .hw_config = x86_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
- .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .event_map = intel_pmu_event_map,
- .max_events = ARRAY_SIZE(intel_perfmon_event_map),
- .apic = 1,
- /*
- * Intel PMCs cannot be accessed sanely above 32 bit width,
- * so we install an artificial 1<<31 period regardless of
- * the generic event period:
- */
- .max_period = (1ULL << 31) - 1,
- .get_event_constraints = intel_get_event_constraints,
- .put_event_constraints = intel_put_event_constraints,
- .event_constraints = intel_core_event_constraints,
- .guest_get_msrs = core_guest_get_msrs,
- .format_attrs = intel_arch_formats_attr,
- .events_sysfs_show = intel_event_sysfs_show,
-};
-
struct intel_shared_regs *allocate_shared_regs(int cpu)
{
struct intel_shared_regs *regs;
@@ -1979,16 +2551,52 @@ struct intel_shared_regs *allocate_shared_regs(int cpu)
return regs;
}
+static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
+{
+ struct intel_excl_cntrs *c;
+ int i;
+
+ c = kzalloc_node(sizeof(struct intel_excl_cntrs),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (c) {
+ raw_spin_lock_init(&c->lock);
+ for (i = 0; i < X86_PMC_IDX_MAX; i++) {
+ c->states[0].state[i] = INTEL_EXCL_UNUSED;
+ c->states[0].init_state[i] = INTEL_EXCL_UNUSED;
+
+ c->states[1].state[i] = INTEL_EXCL_UNUSED;
+ c->states[1].init_state[i] = INTEL_EXCL_UNUSED;
+ }
+ c->core_id = -1;
+ }
+ return c;
+}
+
static int intel_pmu_cpu_prepare(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
- return NOTIFY_OK;
+ if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
+ cpuc->shared_regs = allocate_shared_regs(cpu);
+ if (!cpuc->shared_regs)
+ return NOTIFY_BAD;
+ }
- cpuc->shared_regs = allocate_shared_regs(cpu);
- if (!cpuc->shared_regs)
- return NOTIFY_BAD;
+ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+ size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
+
+ cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
+ if (!cpuc->constraint_list)
+ return NOTIFY_BAD;
+
+ cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
+ if (!cpuc->excl_cntrs) {
+ kfree(cpuc->constraint_list);
+ kfree(cpuc->shared_regs);
+ return NOTIFY_BAD;
+ }
+ cpuc->excl_thread_id = 0;
+ }
return NOTIFY_OK;
}
@@ -2010,13 +2618,15 @@ static void intel_pmu_cpu_starting(int cpu)
if (!cpuc->shared_regs)
return;
- if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
+ if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
+ void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
+
for_each_cpu(i, topology_thread_cpumask(cpu)) {
struct intel_shared_regs *pc;
pc = per_cpu(cpu_hw_events, i).shared_regs;
if (pc && pc->core_id == core_id) {
- cpuc->kfree_on_online = cpuc->shared_regs;
+ *onln = cpuc->shared_regs;
cpuc->shared_regs = pc;
break;
}
@@ -2027,6 +2637,44 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.lbr_sel_map)
cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
+
+ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+ int h = x86_pmu.num_counters >> 1;
+
+ for_each_cpu(i, topology_thread_cpumask(cpu)) {
+ struct intel_excl_cntrs *c;
+
+ c = per_cpu(cpu_hw_events, i).excl_cntrs;
+ if (c && c->core_id == core_id) {
+ cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
+ cpuc->excl_cntrs = c;
+ cpuc->excl_thread_id = 1;
+ break;
+ }
+ }
+ cpuc->excl_cntrs->core_id = core_id;
+ cpuc->excl_cntrs->refcnt++;
+ /*
+ * set hard limit to half the number of generic counters
+ */
+ cpuc->excl_cntrs->states[0].max_alloc_cntrs = h;
+ cpuc->excl_cntrs->states[1].max_alloc_cntrs = h;
+ }
+}
+
+static void free_excl_cntrs(int cpu)
+{
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ struct intel_excl_cntrs *c;
+
+ c = cpuc->excl_cntrs;
+ if (c) {
+ if (c->core_id == -1 || --c->refcnt == 0)
+ kfree(c);
+ cpuc->excl_cntrs = NULL;
+ kfree(cpuc->constraint_list);
+ cpuc->constraint_list = NULL;
+ }
}
static void intel_pmu_cpu_dying(int cpu)
@@ -2041,19 +2689,9 @@ static void intel_pmu_cpu_dying(int cpu)
cpuc->shared_regs = NULL;
}
- fini_debug_store_on_cpu(cpu);
-}
+ free_excl_cntrs(cpu);
-static void intel_pmu_flush_branch_stack(void)
-{
- /*
- * Intel LBR does not tag entries with the
- * PID of the current task, then we need to
- * flush it on ctxsw
- * For now, we simply reset it
- */
- if (x86_pmu.lbr_nr)
- intel_pmu_lbr_reset();
+ fini_debug_store_on_cpu(cpu);
}
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
@@ -2076,6 +2714,44 @@ static struct attribute *intel_arch3_formats_attr[] = {
NULL,
};
+static __initconst const struct x86_pmu core_pmu = {
+ .name = "core",
+ .handle_irq = x86_pmu_handle_irq,
+ .disable_all = x86_pmu_disable_all,
+ .enable_all = core_pmu_enable_all,
+ .enable = core_pmu_enable_event,
+ .disable = x86_pmu_disable_event,
+ .hw_config = x86_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
+ .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+ .event_map = intel_pmu_event_map,
+ .max_events = ARRAY_SIZE(intel_perfmon_event_map),
+ .apic = 1,
+ /*
+ * Intel PMCs cannot be accessed sanely above 32-bit width,
+ * so we install an artificial 1<<31 period regardless of
+ * the generic event period:
+ */
+ .max_period = (1ULL<<31) - 1,
+ .get_event_constraints = intel_get_event_constraints,
+ .put_event_constraints = intel_put_event_constraints,
+ .event_constraints = intel_core_event_constraints,
+ .guest_get_msrs = core_guest_get_msrs,
+ .format_attrs = intel_arch_formats_attr,
+ .events_sysfs_show = intel_event_sysfs_show,
+
+ /*
+ * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
+ * together with PMU version 1 and thus be using core_pmu with
+ * shared_regs. We need following callbacks here to allocate
+ * it properly.
+ */
+ .cpu_prepare = intel_pmu_cpu_prepare,
+ .cpu_starting = intel_pmu_cpu_starting,
+ .cpu_dying = intel_pmu_cpu_dying,
+};
+
static __initconst const struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
@@ -2107,7 +2783,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
.guest_get_msrs = intel_guest_get_msrs,
- .flush_branch_stack = intel_pmu_flush_branch_stack,
+ .sched_task = intel_pmu_lbr_sched_task,
};
static __init void intel_clovertown_quirk(void)
@@ -2264,6 +2940,27 @@ static __init void intel_nehalem_quirk(void)
}
}
+/*
+ * enable software workaround for errata:
+ * SNB: BJ122
+ * IVB: BV98
+ * HSW: HSD29
+ *
+ * Only needed when HT is enabled. However detecting
+ * if HT is enabled is difficult (model specific). So instead,
+ * we enable the workaround in the early boot, and verify if
+ * it is needed in a later initcall phase once we have valid
+ * topology information to check if HT is actually enabled
+ */
+static __init void intel_ht_bug(void)
+{
+ x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
+
+ x86_pmu.commit_scheduling = intel_commit_scheduling;
+ x86_pmu.start_scheduling = intel_start_scheduling;
+ x86_pmu.stop_scheduling = intel_stop_scheduling;
+}
+
EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
@@ -2443,7 +3140,7 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_slm_event_constraints;
x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
x86_pmu.extra_regs = intel_slm_extra_regs;
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
pr_cont("Silvermont events, ");
break;
@@ -2461,7 +3158,7 @@ __init int intel_pmu_init(void)
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
x86_pmu.extra_regs = intel_westmere_extra_regs;
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.cpu_events = nhm_events_attrs;
@@ -2478,6 +3175,7 @@ __init int intel_pmu_init(void)
case 42: /* 32nm SandyBridge */
case 45: /* 32nm SandyBridge-E/EN/EP */
x86_add_quirk(intel_sandybridge_quirk);
+ x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
@@ -2492,9 +3190,11 @@ __init int intel_pmu_init(void)
x86_pmu.extra_regs = intel_snbep_extra_regs;
else
x86_pmu.extra_regs = intel_snb_extra_regs;
+
+
/* all extra regs are per-cpu when HT is on */
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
- x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu.cpu_events = snb_events_attrs;
@@ -2510,6 +3210,7 @@ __init int intel_pmu_init(void)
case 58: /* 22nm IvyBridge */
case 62: /* 22nm IvyBridge-EP/EX */
+ x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
/* dTLB-load-misses on IVB is different than SNB */
@@ -2528,8 +3229,8 @@ __init int intel_pmu_init(void)
else
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
- x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu.cpu_events = snb_events_attrs;
@@ -2545,19 +3246,20 @@ __init int intel_pmu_init(void)
case 63: /* 22nm Haswell Server */
case 69: /* 22nm Haswell ULT */
case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
+ x86_add_quirk(intel_ht_bug);
x86_pmu.late_ack = true;
- memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
- memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+ memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
- intel_pmu_lbr_init_snb();
+ intel_pmu_lbr_init_hsw();
x86_pmu.event_constraints = intel_hsw_event_constraints;
x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
x86_pmu.extra_regs = intel_snbep_extra_regs;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
/* all extra regs are per-cpu when HT is on */
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
- x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
@@ -2566,6 +3268,39 @@ __init int intel_pmu_init(void)
pr_cont("Haswell events, ");
break;
+ case 61: /* 14nm Broadwell Core-M */
+ case 86: /* 14nm Broadwell Xeon D */
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+
+ /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
+ hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
+ BDW_L3_MISS|HSW_SNOOP_DRAM;
+ hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
+ HSW_SNOOP_DRAM;
+ hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
+ BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
+ hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
+ BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
+
+ intel_pmu_lbr_init_hsw();
+
+ x86_pmu.event_constraints = intel_bdw_event_constraints;
+ x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
+ x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
+
+ x86_pmu.hw_config = hsw_hw_config;
+ x86_pmu.get_event_constraints = hsw_get_event_constraints;
+ x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.limit_period = bdw_limit_period;
+ pr_cont("Broadwell events, ");
+ break;
+
default:
switch (x86_pmu.version) {
case 1:
@@ -2651,3 +3386,47 @@ __init int intel_pmu_init(void)
return 0;
}
+
+/*
+ * HT bug: phase 2 init
+ * Called once we have valid topology information to check
+ * whether or not HT is enabled
+ * If HT is off, then we disable the workaround
+ */
+static __init int fixup_ht_bug(void)
+{
+ int cpu = smp_processor_id();
+ int w, c;
+ /*
+ * problem not present on this CPU model, nothing to do
+ */
+ if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
+ return 0;
+
+ w = cpumask_weight(topology_thread_cpumask(cpu));
+ if (w > 1) {
+ pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
+ return 0;
+ }
+
+ watchdog_nmi_disable_all();
+
+ x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
+
+ x86_pmu.commit_scheduling = NULL;
+ x86_pmu.start_scheduling = NULL;
+ x86_pmu.stop_scheduling = NULL;
+
+ watchdog_nmi_enable_all();
+
+ get_online_cpus();
+
+ for_each_online_cpu(c) {
+ free_excl_cntrs(c);
+ }
+
+ put_online_cpus();
+ pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
+ return 0;
+}
+subsys_initcall(fixup_ht_bug)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
new file mode 100644
index 000000000000..ac1f0c55f379
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
@@ -0,0 +1,525 @@
+/*
+ * BTS PMU driver for perf
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#undef DEBUG
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/coredump.h>
+
+#include <asm-generic/sizes.h>
+#include <asm/perf_event.h>
+
+#include "perf_event.h"
+
+struct bts_ctx {
+ struct perf_output_handle handle;
+ struct debug_store ds_back;
+ int started;
+};
+
+static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
+
+#define BTS_RECORD_SIZE 24
+#define BTS_SAFETY_MARGIN 4080
+
+struct bts_phys {
+ struct page *page;
+ unsigned long size;
+ unsigned long offset;
+ unsigned long displacement;
+};
+
+struct bts_buffer {
+ size_t real_size; /* multiple of BTS_RECORD_SIZE */
+ unsigned int nr_pages;
+ unsigned int nr_bufs;
+ unsigned int cur_buf;
+ bool snapshot;
+ local_t data_size;
+ local_t lost;
+ local_t head;
+ unsigned long end;
+ void **data_pages;
+ struct bts_phys buf[0];
+};
+
+struct pmu bts_pmu;
+
+void intel_pmu_enable_bts(u64 config);
+void intel_pmu_disable_bts(void);
+
+static size_t buf_size(struct page *page)
+{
+ return 1 << (PAGE_SHIFT + page_private(page));
+}
+
+static void *
+bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
+{
+ struct bts_buffer *buf;
+ struct page *page;
+ int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
+ unsigned long offset;
+ size_t size = nr_pages << PAGE_SHIFT;
+ int pg, nbuf, pad;
+
+ /* count all the high order buffers */
+ for (pg = 0, nbuf = 0; pg < nr_pages;) {
+ page = virt_to_page(pages[pg]);
+ if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
+ return NULL;
+ pg += 1 << page_private(page);
+ nbuf++;
+ }
+
+ /*
+ * to avoid interrupts in overwrite mode, only allow one physical
+ */
+ if (overwrite && nbuf > 1)
+ return NULL;
+
+ buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node);
+ if (!buf)
+ return NULL;
+
+ buf->nr_pages = nr_pages;
+ buf->nr_bufs = nbuf;
+ buf->snapshot = overwrite;
+ buf->data_pages = pages;
+ buf->real_size = size - size % BTS_RECORD_SIZE;
+
+ for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
+ unsigned int __nr_pages;
+
+ page = virt_to_page(pages[pg]);
+ __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
+ buf->buf[nbuf].page = page;
+ buf->buf[nbuf].offset = offset;
+ buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
+ buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement;
+ pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
+ buf->buf[nbuf].size -= pad;
+
+ pg += __nr_pages;
+ offset += __nr_pages << PAGE_SHIFT;
+ }
+
+ return buf;
+}
+
+static void bts_buffer_free_aux(void *data)
+{
+ kfree(data);
+}
+
+static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
+{
+ return buf->buf[idx].offset + buf->buf[idx].displacement;
+}
+
+static void
+bts_config_buffer(struct bts_buffer *buf)
+{
+ int cpu = raw_smp_processor_id();
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ struct bts_phys *phys = &buf->buf[buf->cur_buf];
+ unsigned long index, thresh = 0, end = phys->size;
+ struct page *page = phys->page;
+
+ index = local_read(&buf->head);
+
+ if (!buf->snapshot) {
+ if (buf->end < phys->offset + buf_size(page))
+ end = buf->end - phys->offset - phys->displacement;
+
+ index -= phys->offset + phys->displacement;
+
+ if (end - index > BTS_SAFETY_MARGIN)
+ thresh = end - BTS_SAFETY_MARGIN;
+ else if (end - index > BTS_RECORD_SIZE)
+ thresh = end - BTS_RECORD_SIZE;
+ else
+ thresh = end;
+ }
+
+ ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
+ ds->bts_index = ds->bts_buffer_base + index;
+ ds->bts_absolute_maximum = ds->bts_buffer_base + end;
+ ds->bts_interrupt_threshold = !buf->snapshot
+ ? ds->bts_buffer_base + thresh
+ : ds->bts_absolute_maximum + BTS_RECORD_SIZE;
+}
+
+static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head)
+{
+ unsigned long index = head - phys->offset;
+
+ memset(page_address(phys->page) + index, 0, phys->size - index);
+}
+
+static bool bts_buffer_is_full(struct bts_buffer *buf, struct bts_ctx *bts)
+{
+ if (buf->snapshot)
+ return false;
+
+ if (local_read(&buf->data_size) >= bts->handle.size ||
+ bts->handle.size - local_read(&buf->data_size) < BTS_RECORD_SIZE)
+ return true;
+
+ return false;
+}
+
+static void bts_update(struct bts_ctx *bts)
+{
+ int cpu = raw_smp_processor_id();
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ struct bts_buffer *buf = perf_get_aux(&bts->handle);
+ unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head;
+
+ if (!buf)
+ return;
+
+ head = index + bts_buffer_offset(buf, buf->cur_buf);
+ old = local_xchg(&buf->head, head);
+
+ if (!buf->snapshot) {
+ if (old == head)
+ return;
+
+ if (ds->bts_index >= ds->bts_absolute_maximum)
+ local_inc(&buf->lost);
+
+ /*
+ * old and head are always in the same physical buffer, so we
+ * can subtract them to get the data size.
+ */
+ local_add(head - old, &buf->data_size);
+ } else {
+ local_set(&buf->data_size, head);
+ }
+}
+
+static void __bts_event_start(struct perf_event *event)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_buffer *buf = perf_get_aux(&bts->handle);
+ u64 config = 0;
+
+ if (!buf || bts_buffer_is_full(buf, bts))
+ return;
+
+ event->hw.state = 0;
+
+ if (!buf->snapshot)
+ config |= ARCH_PERFMON_EVENTSEL_INT;
+ if (!event->attr.exclude_kernel)
+ config |= ARCH_PERFMON_EVENTSEL_OS;
+ if (!event->attr.exclude_user)
+ config |= ARCH_PERFMON_EVENTSEL_USR;
+
+ bts_config_buffer(buf);
+
+ /*
+ * local barrier to make sure that ds configuration made it
+ * before we enable BTS
+ */
+ wmb();
+
+ intel_pmu_enable_bts(config);
+}
+
+static void bts_event_start(struct perf_event *event, int flags)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+ __bts_event_start(event);
+
+ /* PMI handler: this counter is running and likely generating PMIs */
+ ACCESS_ONCE(bts->started) = 1;
+}
+
+static void __bts_event_stop(struct perf_event *event)
+{
+ /*
+ * No extra synchronization is mandated by the documentation to have
+ * BTS data stores globally visible.
+ */
+ intel_pmu_disable_bts();
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
+ ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
+}
+
+static void bts_event_stop(struct perf_event *event, int flags)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+ /* PMI handler: don't restart this counter */
+ ACCESS_ONCE(bts->started) = 0;
+
+ __bts_event_stop(event);
+
+ if (flags & PERF_EF_UPDATE)
+ bts_update(bts);
+}
+
+void intel_bts_enable_local(void)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+ if (bts->handle.event && bts->started)
+ __bts_event_start(bts->handle.event);
+}
+
+void intel_bts_disable_local(void)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+ if (bts->handle.event)
+ __bts_event_stop(bts->handle.event);
+}
+
+static int
+bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
+{
+ unsigned long head, space, next_space, pad, gap, skip, wakeup;
+ unsigned int next_buf;
+ struct bts_phys *phys, *next_phys;
+ int ret;
+
+ if (buf->snapshot)
+ return 0;
+
+ head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+ if (WARN_ON_ONCE(head != local_read(&buf->head)))
+ return -EINVAL;
+
+ phys = &buf->buf[buf->cur_buf];
+ space = phys->offset + phys->displacement + phys->size - head;
+ pad = space;
+ if (space > handle->size) {
+ space = handle->size;
+ space -= space % BTS_RECORD_SIZE;
+ }
+ if (space <= BTS_SAFETY_MARGIN) {
+ /* See if next phys buffer has more space */
+ next_buf = buf->cur_buf + 1;
+ if (next_buf >= buf->nr_bufs)
+ next_buf = 0;
+ next_phys = &buf->buf[next_buf];
+ gap = buf_size(phys->page) - phys->displacement - phys->size +
+ next_phys->displacement;
+ skip = pad + gap;
+ if (handle->size >= skip) {
+ next_space = next_phys->size;
+ if (next_space + skip > handle->size) {
+ next_space = handle->size - skip;
+ next_space -= next_space % BTS_RECORD_SIZE;
+ }
+ if (next_space > space || !space) {
+ if (pad)
+ bts_buffer_pad_out(phys, head);
+ ret = perf_aux_output_skip(handle, skip);
+ if (ret)
+ return ret;
+ /* Advance to next phys buffer */
+ phys = next_phys;
+ space = next_space;
+ head = phys->offset + phys->displacement;
+ /*
+ * After this, cur_buf and head won't match ds
+ * anymore, so we must not be racing with
+ * bts_update().
+ */
+ buf->cur_buf = next_buf;
+ local_set(&buf->head, head);
+ }
+ }
+ }
+
+ /* Don't go far beyond wakeup watermark */
+ wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup -
+ handle->head;
+ if (space > wakeup) {
+ space = wakeup;
+ space -= space % BTS_RECORD_SIZE;
+ }
+
+ buf->end = head + space;
+
+ /*
+ * If we have no space, the lost notification would have been sent when
+ * we hit absolute_maximum - see bts_update()
+ */
+ if (!space)
+ return -ENOSPC;
+
+ return 0;
+}
+
+int intel_bts_interrupt(void)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct perf_event *event = bts->handle.event;
+ struct bts_buffer *buf;
+ s64 old_head;
+ int err;
+
+ if (!event || !bts->started)
+ return 0;
+
+ buf = perf_get_aux(&bts->handle);
+ /*
+ * Skip snapshot counters: they don't use the interrupt, but
+ * there's no other way of telling, because the pointer will
+ * keep moving
+ */
+ if (!buf || buf->snapshot)
+ return 0;
+
+ old_head = local_read(&buf->head);
+ bts_update(bts);
+
+ /* no new data */
+ if (old_head == local_read(&buf->head))
+ return 0;
+
+ perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
+ !!local_xchg(&buf->lost, 0));
+
+ buf = perf_aux_output_begin(&bts->handle, event);
+ if (!buf)
+ return 1;
+
+ err = bts_buffer_reset(buf, &bts->handle);
+ if (err)
+ perf_aux_output_end(&bts->handle, 0, false);
+
+ return 1;
+}
+
+static void bts_event_del(struct perf_event *event, int mode)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_buffer *buf = perf_get_aux(&bts->handle);
+
+ bts_event_stop(event, PERF_EF_UPDATE);
+
+ if (buf) {
+ if (buf->snapshot)
+ bts->handle.head =
+ local_xchg(&buf->data_size,
+ buf->nr_pages << PAGE_SHIFT);
+ perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
+ !!local_xchg(&buf->lost, 0));
+ }
+
+ cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
+ cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
+ cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
+ cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
+}
+
+static int bts_event_add(struct perf_event *event, int mode)
+{
+ struct bts_buffer *buf;
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int ret = -EBUSY;
+
+ event->hw.state = PERF_HES_STOPPED;
+
+ if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
+ return -EBUSY;
+
+ if (bts->handle.event)
+ return -EBUSY;
+
+ buf = perf_aux_output_begin(&bts->handle, event);
+ if (!buf)
+ return -EINVAL;
+
+ ret = bts_buffer_reset(buf, &bts->handle);
+ if (ret) {
+ perf_aux_output_end(&bts->handle, 0, false);
+ return ret;
+ }
+
+ bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
+ bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
+ bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
+
+ if (mode & PERF_EF_START) {
+ bts_event_start(event, 0);
+ if (hwc->state & PERF_HES_STOPPED) {
+ bts_event_del(event, 0);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void bts_event_destroy(struct perf_event *event)
+{
+ x86_del_exclusive(x86_lbr_exclusive_bts);
+}
+
+static int bts_event_init(struct perf_event *event)
+{
+ if (event->attr.type != bts_pmu.type)
+ return -ENOENT;
+
+ if (x86_add_exclusive(x86_lbr_exclusive_bts))
+ return -EBUSY;
+
+ event->destroy = bts_event_destroy;
+
+ return 0;
+}
+
+static void bts_event_read(struct perf_event *event)
+{
+}
+
+static __init int bts_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
+ return -ENODEV;
+
+ bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE;
+ bts_pmu.task_ctx_nr = perf_sw_context;
+ bts_pmu.event_init = bts_event_init;
+ bts_pmu.add = bts_event_add;
+ bts_pmu.del = bts_event_del;
+ bts_pmu.start = bts_event_start;
+ bts_pmu.stop = bts_event_stop;
+ bts_pmu.read = bts_event_read;
+ bts_pmu.setup_aux = bts_buffer_setup_aux;
+ bts_pmu.free_aux = bts_buffer_free_aux;
+
+ return perf_pmu_register(&bts_pmu, "intel_bts", -1);
+}
+
+module_init(bts_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
new file mode 100644
index 000000000000..e4d1b8b738fa
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -0,0 +1,1379 @@
+/*
+ * Intel Cache Quality-of-Service Monitoring (CQM) support.
+ *
+ * Based very, very heavily on work by Peter Zijlstra.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <asm/cpu_device_id.h>
+#include "perf_event.h"
+
+#define MSR_IA32_PQR_ASSOC 0x0c8f
+#define MSR_IA32_QM_CTR 0x0c8e
+#define MSR_IA32_QM_EVTSEL 0x0c8d
+
+static unsigned int cqm_max_rmid = -1;
+static unsigned int cqm_l3_scale; /* supposedly cacheline size */
+
+struct intel_cqm_state {
+ raw_spinlock_t lock;
+ int rmid;
+ int cnt;
+};
+
+static DEFINE_PER_CPU(struct intel_cqm_state, cqm_state);
+
+/*
+ * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
+ * Also protects event->hw.cqm_rmid
+ *
+ * Hold either for stability, both for modification of ->hw.cqm_rmid.
+ */
+static DEFINE_MUTEX(cache_mutex);
+static DEFINE_RAW_SPINLOCK(cache_lock);
+
+/*
+ * Groups of events that have the same target(s), one RMID per group.
+ */
+static LIST_HEAD(cache_groups);
+
+/*
+ * Mask of CPUs for reading CQM values. We only need one per-socket.
+ */
+static cpumask_t cqm_cpumask;
+
+#define RMID_VAL_ERROR (1ULL << 63)
+#define RMID_VAL_UNAVAIL (1ULL << 62)
+
+#define QOS_L3_OCCUP_EVENT_ID (1 << 0)
+
+#define QOS_EVENT_MASK QOS_L3_OCCUP_EVENT_ID
+
+/*
+ * This is central to the rotation algorithm in __intel_cqm_rmid_rotate().
+ *
+ * This rmid is always free and is guaranteed to have an associated
+ * near-zero occupancy value, i.e. no cachelines are tagged with this
+ * RMID, once __intel_cqm_rmid_rotate() returns.
+ */
+static unsigned int intel_cqm_rotation_rmid;
+
+#define INVALID_RMID (-1)
+
+/*
+ * Is @rmid valid for programming the hardware?
+ *
+ * rmid 0 is reserved by the hardware for all non-monitored tasks, which
+ * means that we should never come across an rmid with that value.
+ * Likewise, an rmid value of -1 is used to indicate "no rmid currently
+ * assigned" and is used as part of the rotation code.
+ */
+static inline bool __rmid_valid(unsigned int rmid)
+{
+ if (!rmid || rmid == INVALID_RMID)
+ return false;
+
+ return true;
+}
+
+static u64 __rmid_read(unsigned int rmid)
+{
+ u64 val;
+
+ /*
+ * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt,
+ * it just says that to increase confusion.
+ */
+ wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid);
+ rdmsrl(MSR_IA32_QM_CTR, val);
+
+ /*
+ * Aside from the ERROR and UNAVAIL bits, assume this thing returns
+ * the number of cachelines tagged with @rmid.
+ */
+ return val;
+}
+
+enum rmid_recycle_state {
+ RMID_YOUNG = 0,
+ RMID_AVAILABLE,
+ RMID_DIRTY,
+};
+
+struct cqm_rmid_entry {
+ unsigned int rmid;
+ enum rmid_recycle_state state;
+ struct list_head list;
+ unsigned long queue_time;
+};
+
+/*
+ * cqm_rmid_free_lru - A least recently used list of RMIDs.
+ *
+ * Oldest entry at the head, newest (most recently used) entry at the
+ * tail. This list is never traversed, it's only used to keep track of
+ * the lru order. That is, we only pick entries of the head or insert
+ * them on the tail.
+ *
+ * All entries on the list are 'free', and their RMIDs are not currently
+ * in use. To mark an RMID as in use, remove its entry from the lru
+ * list.
+ *
+ *
+ * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs.
+ *
+ * This list is contains RMIDs that no one is currently using but that
+ * may have a non-zero occupancy value associated with them. The
+ * rotation worker moves RMIDs from the limbo list to the free list once
+ * the occupancy value drops below __intel_cqm_threshold.
+ *
+ * Both lists are protected by cache_mutex.
+ */
+static LIST_HEAD(cqm_rmid_free_lru);
+static LIST_HEAD(cqm_rmid_limbo_lru);
+
+/*
+ * We use a simple array of pointers so that we can lookup a struct
+ * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid()
+ * and __put_rmid() from having to worry about dealing with struct
+ * cqm_rmid_entry - they just deal with rmids, i.e. integers.
+ *
+ * Once this array is initialized it is read-only. No locks are required
+ * to access it.
+ *
+ * All entries for all RMIDs can be looked up in the this array at all
+ * times.
+ */
+static struct cqm_rmid_entry **cqm_rmid_ptrs;
+
+static inline struct cqm_rmid_entry *__rmid_entry(int rmid)
+{
+ struct cqm_rmid_entry *entry;
+
+ entry = cqm_rmid_ptrs[rmid];
+ WARN_ON(entry->rmid != rmid);
+
+ return entry;
+}
+
+/*
+ * Returns < 0 on fail.
+ *
+ * We expect to be called with cache_mutex held.
+ */
+static int __get_rmid(void)
+{
+ struct cqm_rmid_entry *entry;
+
+ lockdep_assert_held(&cache_mutex);
+
+ if (list_empty(&cqm_rmid_free_lru))
+ return INVALID_RMID;
+
+ entry = list_first_entry(&cqm_rmid_free_lru, struct cqm_rmid_entry, list);
+ list_del(&entry->list);
+
+ return entry->rmid;
+}
+
+static void __put_rmid(unsigned int rmid)
+{
+ struct cqm_rmid_entry *entry;
+
+ lockdep_assert_held(&cache_mutex);
+
+ WARN_ON(!__rmid_valid(rmid));
+ entry = __rmid_entry(rmid);
+
+ entry->queue_time = jiffies;
+ entry->state = RMID_YOUNG;
+
+ list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
+}
+
+static int intel_cqm_setup_rmid_cache(void)
+{
+ struct cqm_rmid_entry *entry;
+ unsigned int nr_rmids;
+ int r = 0;
+
+ nr_rmids = cqm_max_rmid + 1;
+ cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) *
+ nr_rmids, GFP_KERNEL);
+ if (!cqm_rmid_ptrs)
+ return -ENOMEM;
+
+ for (; r <= cqm_max_rmid; r++) {
+ struct cqm_rmid_entry *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto fail;
+
+ INIT_LIST_HEAD(&entry->list);
+ entry->rmid = r;
+ cqm_rmid_ptrs[r] = entry;
+
+ list_add_tail(&entry->list, &cqm_rmid_free_lru);
+ }
+
+ /*
+ * RMID 0 is special and is always allocated. It's used for all
+ * tasks that are not monitored.
+ */
+ entry = __rmid_entry(0);
+ list_del(&entry->list);
+
+ mutex_lock(&cache_mutex);
+ intel_cqm_rotation_rmid = __get_rmid();
+ mutex_unlock(&cache_mutex);
+
+ return 0;
+fail:
+ while (r--)
+ kfree(cqm_rmid_ptrs[r]);
+
+ kfree(cqm_rmid_ptrs);
+ return -ENOMEM;
+}
+
+/*
+ * Determine if @a and @b measure the same set of tasks.
+ *
+ * If @a and @b measure the same set of tasks then we want to share a
+ * single RMID.
+ */
+static bool __match_event(struct perf_event *a, struct perf_event *b)
+{
+ /* Per-cpu and task events don't mix */
+ if ((a->attach_state & PERF_ATTACH_TASK) !=
+ (b->attach_state & PERF_ATTACH_TASK))
+ return false;
+
+#ifdef CONFIG_CGROUP_PERF
+ if (a->cgrp != b->cgrp)
+ return false;
+#endif
+
+ /* If not task event, we're machine wide */
+ if (!(b->attach_state & PERF_ATTACH_TASK))
+ return true;
+
+ /*
+ * Events that target same task are placed into the same cache group.
+ */
+ if (a->hw.target == b->hw.target)
+ return true;
+
+ /*
+ * Are we an inherited event?
+ */
+ if (b->parent == a)
+ return true;
+
+ return false;
+}
+
+#ifdef CONFIG_CGROUP_PERF
+static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
+{
+ if (event->attach_state & PERF_ATTACH_TASK)
+ return perf_cgroup_from_task(event->hw.target);
+
+ return event->cgrp;
+}
+#endif
+
+/*
+ * Determine if @a's tasks intersect with @b's tasks
+ *
+ * There are combinations of events that we explicitly prohibit,
+ *
+ * PROHIBITS
+ * system-wide -> cgroup and task
+ * cgroup -> system-wide
+ * -> task in cgroup
+ * task -> system-wide
+ * -> task in cgroup
+ *
+ * Call this function before allocating an RMID.
+ */
+static bool __conflict_event(struct perf_event *a, struct perf_event *b)
+{
+#ifdef CONFIG_CGROUP_PERF
+ /*
+ * We can have any number of cgroups but only one system-wide
+ * event at a time.
+ */
+ if (a->cgrp && b->cgrp) {
+ struct perf_cgroup *ac = a->cgrp;
+ struct perf_cgroup *bc = b->cgrp;
+
+ /*
+ * This condition should have been caught in
+ * __match_event() and we should be sharing an RMID.
+ */
+ WARN_ON_ONCE(ac == bc);
+
+ if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
+ cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
+ return true;
+
+ return false;
+ }
+
+ if (a->cgrp || b->cgrp) {
+ struct perf_cgroup *ac, *bc;
+
+ /*
+ * cgroup and system-wide events are mutually exclusive
+ */
+ if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) ||
+ (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK)))
+ return true;
+
+ /*
+ * Ensure neither event is part of the other's cgroup
+ */
+ ac = event_to_cgroup(a);
+ bc = event_to_cgroup(b);
+ if (ac == bc)
+ return true;
+
+ /*
+ * Must have cgroup and non-intersecting task events.
+ */
+ if (!ac || !bc)
+ return false;
+
+ /*
+ * We have cgroup and task events, and the task belongs
+ * to a cgroup. Check for for overlap.
+ */
+ if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
+ cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
+ return true;
+
+ return false;
+ }
+#endif
+ /*
+ * If one of them is not a task, same story as above with cgroups.
+ */
+ if (!(a->attach_state & PERF_ATTACH_TASK) ||
+ !(b->attach_state & PERF_ATTACH_TASK))
+ return true;
+
+ /*
+ * Must be non-overlapping.
+ */
+ return false;
+}
+
+struct rmid_read {
+ unsigned int rmid;
+ atomic64_t value;
+};
+
+static void __intel_cqm_event_count(void *info);
+
+/*
+ * Exchange the RMID of a group of events.
+ */
+static unsigned int
+intel_cqm_xchg_rmid(struct perf_event *group, unsigned int rmid)
+{
+ struct perf_event *event;
+ unsigned int old_rmid = group->hw.cqm_rmid;
+ struct list_head *head = &group->hw.cqm_group_entry;
+
+ lockdep_assert_held(&cache_mutex);
+
+ /*
+ * If our RMID is being deallocated, perform a read now.
+ */
+ if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
+ struct rmid_read rr = {
+ .value = ATOMIC64_INIT(0),
+ .rmid = old_rmid,
+ };
+
+ on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
+ &rr, 1);
+ local64_set(&group->count, atomic64_read(&rr.value));
+ }
+
+ raw_spin_lock_irq(&cache_lock);
+
+ group->hw.cqm_rmid = rmid;
+ list_for_each_entry(event, head, hw.cqm_group_entry)
+ event->hw.cqm_rmid = rmid;
+
+ raw_spin_unlock_irq(&cache_lock);
+
+ return old_rmid;
+}
+
+/*
+ * If we fail to assign a new RMID for intel_cqm_rotation_rmid because
+ * cachelines are still tagged with RMIDs in limbo, we progressively
+ * increment the threshold until we find an RMID in limbo with <=
+ * __intel_cqm_threshold lines tagged. This is designed to mitigate the
+ * problem where cachelines tagged with an RMID are not steadily being
+ * evicted.
+ *
+ * On successful rotations we decrease the threshold back towards zero.
+ *
+ * __intel_cqm_max_threshold provides an upper bound on the threshold,
+ * and is measured in bytes because it's exposed to userland.
+ */
+static unsigned int __intel_cqm_threshold;
+static unsigned int __intel_cqm_max_threshold;
+
+/*
+ * Test whether an RMID has a zero occupancy value on this cpu.
+ */
+static void intel_cqm_stable(void *arg)
+{
+ struct cqm_rmid_entry *entry;
+
+ list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
+ if (entry->state != RMID_AVAILABLE)
+ break;
+
+ if (__rmid_read(entry->rmid) > __intel_cqm_threshold)
+ entry->state = RMID_DIRTY;
+ }
+}
+
+/*
+ * If we have group events waiting for an RMID that don't conflict with
+ * events already running, assign @rmid.
+ */
+static bool intel_cqm_sched_in_event(unsigned int rmid)
+{
+ struct perf_event *leader, *event;
+
+ lockdep_assert_held(&cache_mutex);
+
+ leader = list_first_entry(&cache_groups, struct perf_event,
+ hw.cqm_groups_entry);
+ event = leader;
+
+ list_for_each_entry_continue(event, &cache_groups,
+ hw.cqm_groups_entry) {
+ if (__rmid_valid(event->hw.cqm_rmid))
+ continue;
+
+ if (__conflict_event(event, leader))
+ continue;
+
+ intel_cqm_xchg_rmid(event, rmid);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Initially use this constant for both the limbo queue time and the
+ * rotation timer interval, pmu::hrtimer_interval_ms.
+ *
+ * They don't need to be the same, but the two are related since if you
+ * rotate faster than you recycle RMIDs, you may run out of available
+ * RMIDs.
+ */
+#define RMID_DEFAULT_QUEUE_TIME 250 /* ms */
+
+static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME;
+
+/*
+ * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list
+ * @nr_available: number of freeable RMIDs on the limbo list
+ *
+ * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no
+ * cachelines are tagged with those RMIDs. After this we can reuse them
+ * and know that the current set of active RMIDs is stable.
+ *
+ * Return %true or %false depending on whether stabilization needs to be
+ * reattempted.
+ *
+ * If we return %true then @nr_available is updated to indicate the
+ * number of RMIDs on the limbo list that have been queued for the
+ * minimum queue time (RMID_AVAILABLE), but whose data occupancy values
+ * are above __intel_cqm_threshold.
+ */
+static bool intel_cqm_rmid_stabilize(unsigned int *available)
+{
+ struct cqm_rmid_entry *entry, *tmp;
+
+ lockdep_assert_held(&cache_mutex);
+
+ *available = 0;
+ list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
+ unsigned long min_queue_time;
+ unsigned long now = jiffies;
+
+ /*
+ * We hold RMIDs placed into limbo for a minimum queue
+ * time. Before the minimum queue time has elapsed we do
+ * not recycle RMIDs.
+ *
+ * The reasoning is that until a sufficient time has
+ * passed since we stopped using an RMID, any RMID
+ * placed onto the limbo list will likely still have
+ * data tagged in the cache, which means we'll probably
+ * fail to recycle it anyway.
+ *
+ * We can save ourselves an expensive IPI by skipping
+ * any RMIDs that have not been queued for the minimum
+ * time.
+ */
+ min_queue_time = entry->queue_time +
+ msecs_to_jiffies(__rmid_queue_time_ms);
+
+ if (time_after(min_queue_time, now))
+ break;
+
+ entry->state = RMID_AVAILABLE;
+ (*available)++;
+ }
+
+ /*
+ * Fast return if none of the RMIDs on the limbo list have been
+ * sitting on the queue for the minimum queue time.
+ */
+ if (!*available)
+ return false;
+
+ /*
+ * Test whether an RMID is free for each package.
+ */
+ on_each_cpu_mask(&cqm_cpumask, intel_cqm_stable, NULL, true);
+
+ list_for_each_entry_safe(entry, tmp, &cqm_rmid_limbo_lru, list) {
+ /*
+ * Exhausted all RMIDs that have waited min queue time.
+ */
+ if (entry->state == RMID_YOUNG)
+ break;
+
+ if (entry->state == RMID_DIRTY)
+ continue;
+
+ list_del(&entry->list); /* remove from limbo */
+
+ /*
+ * The rotation RMID gets priority if it's
+ * currently invalid. In which case, skip adding
+ * the RMID to the the free lru.
+ */
+ if (!__rmid_valid(intel_cqm_rotation_rmid)) {
+ intel_cqm_rotation_rmid = entry->rmid;
+ continue;
+ }
+
+ /*
+ * If we have groups waiting for RMIDs, hand
+ * them one now provided they don't conflict.
+ */
+ if (intel_cqm_sched_in_event(entry->rmid))
+ continue;
+
+ /*
+ * Otherwise place it onto the free list.
+ */
+ list_add_tail(&entry->list, &cqm_rmid_free_lru);
+ }
+
+
+ return __rmid_valid(intel_cqm_rotation_rmid);
+}
+
+/*
+ * Pick a victim group and move it to the tail of the group list.
+ * @next: The first group without an RMID
+ */
+static void __intel_cqm_pick_and_rotate(struct perf_event *next)
+{
+ struct perf_event *rotor;
+ unsigned int rmid;
+
+ lockdep_assert_held(&cache_mutex);
+
+ rotor = list_first_entry(&cache_groups, struct perf_event,
+ hw.cqm_groups_entry);
+
+ /*
+ * The group at the front of the list should always have a valid
+ * RMID. If it doesn't then no groups have RMIDs assigned and we
+ * don't need to rotate the list.
+ */
+ if (next == rotor)
+ return;
+
+ rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID);
+ __put_rmid(rmid);
+
+ list_rotate_left(&cache_groups);
+}
+
+/*
+ * Deallocate the RMIDs from any events that conflict with @event, and
+ * place them on the back of the group list.
+ */
+static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
+{
+ struct perf_event *group, *g;
+ unsigned int rmid;
+
+ lockdep_assert_held(&cache_mutex);
+
+ list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) {
+ if (group == event)
+ continue;
+
+ rmid = group->hw.cqm_rmid;
+
+ /*
+ * Skip events that don't have a valid RMID.
+ */
+ if (!__rmid_valid(rmid))
+ continue;
+
+ /*
+ * No conflict? No problem! Leave the event alone.
+ */
+ if (!__conflict_event(group, event))
+ continue;
+
+ intel_cqm_xchg_rmid(group, INVALID_RMID);
+ __put_rmid(rmid);
+ }
+}
+
+/*
+ * Attempt to rotate the groups and assign new RMIDs.
+ *
+ * We rotate for two reasons,
+ * 1. To handle the scheduling of conflicting events
+ * 2. To recycle RMIDs
+ *
+ * Rotating RMIDs is complicated because the hardware doesn't give us
+ * any clues.
+ *
+ * There's problems with the hardware interface; when you change the
+ * task:RMID map cachelines retain their 'old' tags, giving a skewed
+ * picture. In order to work around this, we must always keep one free
+ * RMID - intel_cqm_rotation_rmid.
+ *
+ * Rotation works by taking away an RMID from a group (the old RMID),
+ * and assigning the free RMID to another group (the new RMID). We must
+ * then wait for the old RMID to not be used (no cachelines tagged).
+ * This ensure that all cachelines are tagged with 'active' RMIDs. At
+ * this point we can start reading values for the new RMID and treat the
+ * old RMID as the free RMID for the next rotation.
+ *
+ * Return %true or %false depending on whether we did any rotating.
+ */
+static bool __intel_cqm_rmid_rotate(void)
+{
+ struct perf_event *group, *start = NULL;
+ unsigned int threshold_limit;
+ unsigned int nr_needed = 0;
+ unsigned int nr_available;
+ bool rotated = false;
+
+ mutex_lock(&cache_mutex);
+
+again:
+ /*
+ * Fast path through this function if there are no groups and no
+ * RMIDs that need cleaning.
+ */
+ if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru))
+ goto out;
+
+ list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) {
+ if (!__rmid_valid(group->hw.cqm_rmid)) {
+ if (!start)
+ start = group;
+ nr_needed++;
+ }
+ }
+
+ /*
+ * We have some event groups, but they all have RMIDs assigned
+ * and no RMIDs need cleaning.
+ */
+ if (!nr_needed && list_empty(&cqm_rmid_limbo_lru))
+ goto out;
+
+ if (!nr_needed)
+ goto stabilize;
+
+ /*
+ * We have more event groups without RMIDs than available RMIDs,
+ * or we have event groups that conflict with the ones currently
+ * scheduled.
+ *
+ * We force deallocate the rmid of the group at the head of
+ * cache_groups. The first event group without an RMID then gets
+ * assigned intel_cqm_rotation_rmid. This ensures we always make
+ * forward progress.
+ *
+ * Rotate the cache_groups list so the previous head is now the
+ * tail.
+ */
+ __intel_cqm_pick_and_rotate(start);
+
+ /*
+ * If the rotation is going to succeed, reduce the threshold so
+ * that we don't needlessly reuse dirty RMIDs.
+ */
+ if (__rmid_valid(intel_cqm_rotation_rmid)) {
+ intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid);
+ intel_cqm_rotation_rmid = __get_rmid();
+
+ intel_cqm_sched_out_conflicting_events(start);
+
+ if (__intel_cqm_threshold)
+ __intel_cqm_threshold--;
+ }
+
+ rotated = true;
+
+stabilize:
+ /*
+ * We now need to stablize the RMID we freed above (if any) to
+ * ensure that the next time we rotate we have an RMID with zero
+ * occupancy value.
+ *
+ * Alternatively, if we didn't need to perform any rotation,
+ * we'll have a bunch of RMIDs in limbo that need stabilizing.
+ */
+ threshold_limit = __intel_cqm_max_threshold / cqm_l3_scale;
+
+ while (intel_cqm_rmid_stabilize(&nr_available) &&
+ __intel_cqm_threshold < threshold_limit) {
+ unsigned int steal_limit;
+
+ /*
+ * Don't spin if nobody is actively waiting for an RMID,
+ * the rotation worker will be kicked as soon as an
+ * event needs an RMID anyway.
+ */
+ if (!nr_needed)
+ break;
+
+ /* Allow max 25% of RMIDs to be in limbo. */
+ steal_limit = (cqm_max_rmid + 1) / 4;
+
+ /*
+ * We failed to stabilize any RMIDs so our rotation
+ * logic is now stuck. In order to make forward progress
+ * we have a few options:
+ *
+ * 1. rotate ("steal") another RMID
+ * 2. increase the threshold
+ * 3. do nothing
+ *
+ * We do both of 1. and 2. until we hit the steal limit.
+ *
+ * The steal limit prevents all RMIDs ending up on the
+ * limbo list. This can happen if every RMID has a
+ * non-zero occupancy above threshold_limit, and the
+ * occupancy values aren't dropping fast enough.
+ *
+ * Note that there is prioritisation at work here - we'd
+ * rather increase the number of RMIDs on the limbo list
+ * than increase the threshold, because increasing the
+ * threshold skews the event data (because we reuse
+ * dirty RMIDs) - threshold bumps are a last resort.
+ */
+ if (nr_available < steal_limit)
+ goto again;
+
+ __intel_cqm_threshold++;
+ }
+
+out:
+ mutex_unlock(&cache_mutex);
+ return rotated;
+}
+
+static void intel_cqm_rmid_rotate(struct work_struct *work);
+
+static DECLARE_DELAYED_WORK(intel_cqm_rmid_work, intel_cqm_rmid_rotate);
+
+static struct pmu intel_cqm_pmu;
+
+static void intel_cqm_rmid_rotate(struct work_struct *work)
+{
+ unsigned long delay;
+
+ __intel_cqm_rmid_rotate();
+
+ delay = msecs_to_jiffies(intel_cqm_pmu.hrtimer_interval_ms);
+ schedule_delayed_work(&intel_cqm_rmid_work, delay);
+}
+
+/*
+ * Find a group and setup RMID.
+ *
+ * If we're part of a group, we use the group's RMID.
+ */
+static void intel_cqm_setup_event(struct perf_event *event,
+ struct perf_event **group)
+{
+ struct perf_event *iter;
+ unsigned int rmid;
+ bool conflict = false;
+
+ list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
+ rmid = iter->hw.cqm_rmid;
+
+ if (__match_event(iter, event)) {
+ /* All tasks in a group share an RMID */
+ event->hw.cqm_rmid = rmid;
+ *group = iter;
+ return;
+ }
+
+ /*
+ * We only care about conflicts for events that are
+ * actually scheduled in (and hence have a valid RMID).
+ */
+ if (__conflict_event(iter, event) && __rmid_valid(rmid))
+ conflict = true;
+ }
+
+ if (conflict)
+ rmid = INVALID_RMID;
+ else
+ rmid = __get_rmid();
+
+ event->hw.cqm_rmid = rmid;
+}
+
+static void intel_cqm_event_read(struct perf_event *event)
+{
+ unsigned long flags;
+ unsigned int rmid;
+ u64 val;
+
+ /*
+ * Task events are handled by intel_cqm_event_count().
+ */
+ if (event->cpu == -1)
+ return;
+
+ raw_spin_lock_irqsave(&cache_lock, flags);
+ rmid = event->hw.cqm_rmid;
+
+ if (!__rmid_valid(rmid))
+ goto out;
+
+ val = __rmid_read(rmid);
+
+ /*
+ * Ignore this reading on error states and do not update the value.
+ */
+ if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
+ goto out;
+
+ local64_set(&event->count, val);
+out:
+ raw_spin_unlock_irqrestore(&cache_lock, flags);
+}
+
+static void __intel_cqm_event_count(void *info)
+{
+ struct rmid_read *rr = info;
+ u64 val;
+
+ val = __rmid_read(rr->rmid);
+
+ if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
+ return;
+
+ atomic64_add(val, &rr->value);
+}
+
+static inline bool cqm_group_leader(struct perf_event *event)
+{
+ return !list_empty(&event->hw.cqm_groups_entry);
+}
+
+static u64 intel_cqm_event_count(struct perf_event *event)
+{
+ unsigned long flags;
+ struct rmid_read rr = {
+ .value = ATOMIC64_INIT(0),
+ };
+
+ /*
+ * We only need to worry about task events. System-wide events
+ * are handled like usual, i.e. entirely with
+ * intel_cqm_event_read().
+ */
+ if (event->cpu != -1)
+ return __perf_event_count(event);
+
+ /*
+ * Only the group leader gets to report values. This stops us
+ * reporting duplicate values to userspace, and gives us a clear
+ * rule for which task gets to report the values.
+ *
+ * Note that it is impossible to attribute these values to
+ * specific packages - we forfeit that ability when we create
+ * task events.
+ */
+ if (!cqm_group_leader(event))
+ return 0;
+
+ /*
+ * Notice that we don't perform the reading of an RMID
+ * atomically, because we can't hold a spin lock across the
+ * IPIs.
+ *
+ * Speculatively perform the read, since @event might be
+ * assigned a different (possibly invalid) RMID while we're
+ * busying performing the IPI calls. It's therefore necessary to
+ * check @event's RMID afterwards, and if it has changed,
+ * discard the result of the read.
+ */
+ rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid);
+
+ if (!__rmid_valid(rr.rmid))
+ goto out;
+
+ on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1);
+
+ raw_spin_lock_irqsave(&cache_lock, flags);
+ if (event->hw.cqm_rmid == rr.rmid)
+ local64_set(&event->count, atomic64_read(&rr.value));
+ raw_spin_unlock_irqrestore(&cache_lock, flags);
+out:
+ return __perf_event_count(event);
+}
+
+static void intel_cqm_event_start(struct perf_event *event, int mode)
+{
+ struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
+ unsigned int rmid = event->hw.cqm_rmid;
+ unsigned long flags;
+
+ if (!(event->hw.cqm_state & PERF_HES_STOPPED))
+ return;
+
+ event->hw.cqm_state &= ~PERF_HES_STOPPED;
+
+ raw_spin_lock_irqsave(&state->lock, flags);
+
+ if (state->cnt++)
+ WARN_ON_ONCE(state->rmid != rmid);
+ else
+ WARN_ON_ONCE(state->rmid);
+
+ state->rmid = rmid;
+ wrmsrl(MSR_IA32_PQR_ASSOC, state->rmid);
+
+ raw_spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static void intel_cqm_event_stop(struct perf_event *event, int mode)
+{
+ struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
+ unsigned long flags;
+
+ if (event->hw.cqm_state & PERF_HES_STOPPED)
+ return;
+
+ event->hw.cqm_state |= PERF_HES_STOPPED;
+
+ raw_spin_lock_irqsave(&state->lock, flags);
+ intel_cqm_event_read(event);
+
+ if (!--state->cnt) {
+ state->rmid = 0;
+ wrmsrl(MSR_IA32_PQR_ASSOC, 0);
+ } else {
+ WARN_ON_ONCE(!state->rmid);
+ }
+
+ raw_spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int intel_cqm_event_add(struct perf_event *event, int mode)
+{
+ unsigned long flags;
+ unsigned int rmid;
+
+ raw_spin_lock_irqsave(&cache_lock, flags);
+
+ event->hw.cqm_state = PERF_HES_STOPPED;
+ rmid = event->hw.cqm_rmid;
+
+ if (__rmid_valid(rmid) && (mode & PERF_EF_START))
+ intel_cqm_event_start(event, mode);
+
+ raw_spin_unlock_irqrestore(&cache_lock, flags);
+
+ return 0;
+}
+
+static void intel_cqm_event_del(struct perf_event *event, int mode)
+{
+ intel_cqm_event_stop(event, mode);
+}
+
+static void intel_cqm_event_destroy(struct perf_event *event)
+{
+ struct perf_event *group_other = NULL;
+
+ mutex_lock(&cache_mutex);
+
+ /*
+ * If there's another event in this group...
+ */
+ if (!list_empty(&event->hw.cqm_group_entry)) {
+ group_other = list_first_entry(&event->hw.cqm_group_entry,
+ struct perf_event,
+ hw.cqm_group_entry);
+ list_del(&event->hw.cqm_group_entry);
+ }
+
+ /*
+ * And we're the group leader..
+ */
+ if (cqm_group_leader(event)) {
+ /*
+ * If there was a group_other, make that leader, otherwise
+ * destroy the group and return the RMID.
+ */
+ if (group_other) {
+ list_replace(&event->hw.cqm_groups_entry,
+ &group_other->hw.cqm_groups_entry);
+ } else {
+ unsigned int rmid = event->hw.cqm_rmid;
+
+ if (__rmid_valid(rmid))
+ __put_rmid(rmid);
+ list_del(&event->hw.cqm_groups_entry);
+ }
+ }
+
+ mutex_unlock(&cache_mutex);
+}
+
+static int intel_cqm_event_init(struct perf_event *event)
+{
+ struct perf_event *group = NULL;
+ bool rotate = false;
+
+ if (event->attr.type != intel_cqm_pmu.type)
+ return -ENOENT;
+
+ if (event->attr.config & ~QOS_EVENT_MASK)
+ return -EINVAL;
+
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest ||
+ event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&event->hw.cqm_group_entry);
+ INIT_LIST_HEAD(&event->hw.cqm_groups_entry);
+
+ event->destroy = intel_cqm_event_destroy;
+
+ mutex_lock(&cache_mutex);
+
+ /* Will also set rmid */
+ intel_cqm_setup_event(event, &group);
+
+ if (group) {
+ list_add_tail(&event->hw.cqm_group_entry,
+ &group->hw.cqm_group_entry);
+ } else {
+ list_add_tail(&event->hw.cqm_groups_entry,
+ &cache_groups);
+
+ /*
+ * All RMIDs are either in use or have recently been
+ * used. Kick the rotation worker to clean/free some.
+ *
+ * We only do this for the group leader, rather than for
+ * every event in a group to save on needless work.
+ */
+ if (!__rmid_valid(event->hw.cqm_rmid))
+ rotate = true;
+ }
+
+ mutex_unlock(&cache_mutex);
+
+ if (rotate)
+ schedule_delayed_work(&intel_cqm_rmid_work, 0);
+
+ return 0;
+}
+
+EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01");
+EVENT_ATTR_STR(llc_occupancy.per-pkg, intel_cqm_llc_pkg, "1");
+EVENT_ATTR_STR(llc_occupancy.unit, intel_cqm_llc_unit, "Bytes");
+EVENT_ATTR_STR(llc_occupancy.scale, intel_cqm_llc_scale, NULL);
+EVENT_ATTR_STR(llc_occupancy.snapshot, intel_cqm_llc_snapshot, "1");
+
+static struct attribute *intel_cqm_events_attr[] = {
+ EVENT_PTR(intel_cqm_llc),
+ EVENT_PTR(intel_cqm_llc_pkg),
+ EVENT_PTR(intel_cqm_llc_unit),
+ EVENT_PTR(intel_cqm_llc_scale),
+ EVENT_PTR(intel_cqm_llc_snapshot),
+ NULL,
+};
+
+static struct attribute_group intel_cqm_events_group = {
+ .name = "events",
+ .attrs = intel_cqm_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+static struct attribute *intel_cqm_formats_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group intel_cqm_format_group = {
+ .name = "format",
+ .attrs = intel_cqm_formats_attr,
+};
+
+static ssize_t
+max_recycle_threshold_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ ssize_t rv;
+
+ mutex_lock(&cache_mutex);
+ rv = snprintf(page, PAGE_SIZE-1, "%u\n", __intel_cqm_max_threshold);
+ mutex_unlock(&cache_mutex);
+
+ return rv;
+}
+
+static ssize_t
+max_recycle_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int bytes, cachelines;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &bytes);
+ if (ret)
+ return ret;
+
+ mutex_lock(&cache_mutex);
+
+ __intel_cqm_max_threshold = bytes;
+ cachelines = bytes / cqm_l3_scale;
+
+ /*
+ * The new maximum takes effect immediately.
+ */
+ if (__intel_cqm_threshold > cachelines)
+ __intel_cqm_threshold = cachelines;
+
+ mutex_unlock(&cache_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(max_recycle_threshold);
+
+static struct attribute *intel_cqm_attrs[] = {
+ &dev_attr_max_recycle_threshold.attr,
+ NULL,
+};
+
+static const struct attribute_group intel_cqm_group = {
+ .attrs = intel_cqm_attrs,
+};
+
+static const struct attribute_group *intel_cqm_attr_groups[] = {
+ &intel_cqm_events_group,
+ &intel_cqm_format_group,
+ &intel_cqm_group,
+ NULL,
+};
+
+static struct pmu intel_cqm_pmu = {
+ .hrtimer_interval_ms = RMID_DEFAULT_QUEUE_TIME,
+ .attr_groups = intel_cqm_attr_groups,
+ .task_ctx_nr = perf_sw_context,
+ .event_init = intel_cqm_event_init,
+ .add = intel_cqm_event_add,
+ .del = intel_cqm_event_del,
+ .start = intel_cqm_event_start,
+ .stop = intel_cqm_event_stop,
+ .read = intel_cqm_event_read,
+ .count = intel_cqm_event_count,
+};
+
+static inline void cqm_pick_event_reader(int cpu)
+{
+ int phys_id = topology_physical_package_id(cpu);
+ int i;
+
+ for_each_cpu(i, &cqm_cpumask) {
+ if (phys_id == topology_physical_package_id(i))
+ return; /* already got reader for this socket */
+ }
+
+ cpumask_set_cpu(cpu, &cqm_cpumask);
+}
+
+static void intel_cqm_cpu_prepare(unsigned int cpu)
+{
+ struct intel_cqm_state *state = &per_cpu(cqm_state, cpu);
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ raw_spin_lock_init(&state->lock);
+ state->rmid = 0;
+ state->cnt = 0;
+
+ WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
+ WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
+}
+
+static void intel_cqm_cpu_exit(unsigned int cpu)
+{
+ int phys_id = topology_physical_package_id(cpu);
+ int i;
+
+ /*
+ * Is @cpu a designated cqm reader?
+ */
+ if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
+ return;
+
+ for_each_online_cpu(i) {
+ if (i == cpu)
+ continue;
+
+ if (phys_id == topology_physical_package_id(i)) {
+ cpumask_set_cpu(i, &cqm_cpumask);
+ break;
+ }
+ }
+}
+
+static int intel_cqm_cpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ intel_cqm_cpu_prepare(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ intel_cqm_cpu_exit(cpu);
+ break;
+ case CPU_STARTING:
+ cqm_pick_event_reader(cpu);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static const struct x86_cpu_id intel_cqm_match[] = {
+ { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_OCCUP_LLC },
+ {}
+};
+
+static int __init intel_cqm_init(void)
+{
+ char *str, scale[20];
+ int i, cpu, ret;
+
+ if (!x86_match_cpu(intel_cqm_match))
+ return -ENODEV;
+
+ cqm_l3_scale = boot_cpu_data.x86_cache_occ_scale;
+
+ /*
+ * It's possible that not all resources support the same number
+ * of RMIDs. Instead of making scheduling much more complicated
+ * (where we have to match a task's RMID to a cpu that supports
+ * that many RMIDs) just find the minimum RMIDs supported across
+ * all cpus.
+ *
+ * Also, check that the scales match on all cpus.
+ */
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->x86_cache_max_rmid < cqm_max_rmid)
+ cqm_max_rmid = c->x86_cache_max_rmid;
+
+ if (c->x86_cache_occ_scale != cqm_l3_scale) {
+ pr_err("Multiple LLC scale values, disabling\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /*
+ * A reasonable upper limit on the max threshold is the number
+ * of lines tagged per RMID if all RMIDs have the same number of
+ * lines tagged in the LLC.
+ *
+ * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
+ */
+ __intel_cqm_max_threshold =
+ boot_cpu_data.x86_cache_size * 1024 / (cqm_max_rmid + 1);
+
+ snprintf(scale, sizeof(scale), "%u", cqm_l3_scale);
+ str = kstrdup(scale, GFP_KERNEL);
+ if (!str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ event_attr_intel_cqm_llc_scale.event_str = str;
+
+ ret = intel_cqm_setup_rmid_cache();
+ if (ret)
+ goto out;
+
+ for_each_online_cpu(i) {
+ intel_cqm_cpu_prepare(i);
+ cqm_pick_event_reader(i);
+ }
+
+ __perf_cpu_notifier(intel_cqm_cpu_notifier);
+
+ ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
+ if (ret)
+ pr_err("Intel CQM perf registration failed: %d\n", ret);
+ else
+ pr_info("Intel CQM monitoring enabled\n");
+
+out:
+ cpu_notifier_register_done();
+
+ return ret;
+}
+device_initcall(intel_cqm_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 073983398364..813f75d71175 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -461,7 +461,8 @@ void intel_pmu_enable_bts(u64 config)
debugctlmsr |= DEBUGCTLMSR_TR;
debugctlmsr |= DEBUGCTLMSR_BTS;
- debugctlmsr |= DEBUGCTLMSR_BTINT;
+ if (config & ARCH_PERFMON_EVENTSEL_INT)
+ debugctlmsr |= DEBUGCTLMSR_BTINT;
if (!(config & ARCH_PERFMON_EVENTSEL_OS))
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
@@ -557,6 +558,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
EVENT_CONSTRAINT_END
};
@@ -564,6 +567,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
EVENT_CONSTRAINT_END
};
@@ -587,6 +592,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};
@@ -602,6 +609,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};
@@ -611,6 +620,10 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
@@ -622,6 +635,10 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
@@ -633,16 +650,16 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
- INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
- INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
- INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 58f1a94beaf0..94e5b506caa6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -39,6 +39,7 @@ static enum {
#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
#define LBR_FAR_BIT 8 /* do not capture far branches */
+#define LBR_CALL_STACK_BIT 9 /* enable call stack */
#define LBR_KERNEL (1 << LBR_KERNEL_BIT)
#define LBR_USER (1 << LBR_USER_BIT)
@@ -49,6 +50,7 @@ static enum {
#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
#define LBR_FAR (1 << LBR_FAR_BIT)
+#define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
#define LBR_PLM (LBR_KERNEL | LBR_USER)
@@ -69,33 +71,31 @@ static enum {
#define LBR_FROM_FLAG_IN_TX (1ULL << 62)
#define LBR_FROM_FLAG_ABORT (1ULL << 61)
-#define for_each_branch_sample_type(x) \
- for ((x) = PERF_SAMPLE_BRANCH_USER; \
- (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1)
-
/*
* x86control flow change classification
* x86control flow changes include branches, interrupts, traps, faults
*/
enum {
- X86_BR_NONE = 0, /* unknown */
-
- X86_BR_USER = 1 << 0, /* branch target is user */
- X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
-
- X86_BR_CALL = 1 << 2, /* call */
- X86_BR_RET = 1 << 3, /* return */
- X86_BR_SYSCALL = 1 << 4, /* syscall */
- X86_BR_SYSRET = 1 << 5, /* syscall return */
- X86_BR_INT = 1 << 6, /* sw interrupt */
- X86_BR_IRET = 1 << 7, /* return from interrupt */
- X86_BR_JCC = 1 << 8, /* conditional */
- X86_BR_JMP = 1 << 9, /* jump */
- X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
- X86_BR_IND_CALL = 1 << 11,/* indirect calls */
- X86_BR_ABORT = 1 << 12,/* transaction abort */
- X86_BR_IN_TX = 1 << 13,/* in transaction */
- X86_BR_NO_TX = 1 << 14,/* not in transaction */
+ X86_BR_NONE = 0, /* unknown */
+
+ X86_BR_USER = 1 << 0, /* branch target is user */
+ X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
+
+ X86_BR_CALL = 1 << 2, /* call */
+ X86_BR_RET = 1 << 3, /* return */
+ X86_BR_SYSCALL = 1 << 4, /* syscall */
+ X86_BR_SYSRET = 1 << 5, /* syscall return */
+ X86_BR_INT = 1 << 6, /* sw interrupt */
+ X86_BR_IRET = 1 << 7, /* return from interrupt */
+ X86_BR_JCC = 1 << 8, /* conditional */
+ X86_BR_JMP = 1 << 9, /* jump */
+ X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
+ X86_BR_IND_CALL = 1 << 11,/* indirect calls */
+ X86_BR_ABORT = 1 << 12,/* transaction abort */
+ X86_BR_IN_TX = 1 << 13,/* in transaction */
+ X86_BR_NO_TX = 1 << 14,/* not in transaction */
+ X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
+ X86_BR_CALL_STACK = 1 << 16,/* call stack */
};
#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
@@ -112,13 +112,15 @@ enum {
X86_BR_JMP |\
X86_BR_IRQ |\
X86_BR_ABORT |\
- X86_BR_IND_CALL)
+ X86_BR_IND_CALL |\
+ X86_BR_ZERO_CALL)
#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
#define X86_BR_ANY_CALL \
(X86_BR_CALL |\
X86_BR_IND_CALL |\
+ X86_BR_ZERO_CALL |\
X86_BR_SYSCALL |\
X86_BR_IRQ |\
X86_BR_INT)
@@ -130,17 +132,32 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
* otherwise it becomes near impossible to get a reliable stack.
*/
-static void __intel_pmu_lbr_enable(void)
+static void __intel_pmu_lbr_enable(bool pmi)
{
- u64 debugctl;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ u64 debugctl, lbr_select = 0, orig_debugctl;
- if (cpuc->lbr_sel)
- wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
+ /*
+ * No need to reprogram LBR_SELECT in a PMI, as it
+ * did not change.
+ */
+ if (cpuc->lbr_sel && !pmi) {
+ lbr_select = cpuc->lbr_sel->config;
+ wrmsrl(MSR_LBR_SELECT, lbr_select);
+ }
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
- debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ orig_debugctl = debugctl;
+ debugctl |= DEBUGCTLMSR_LBR;
+ /*
+ * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
+ * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
+ * may cause superfluous increase/decrease of LBR_TOS.
+ */
+ if (!(lbr_select & LBR_CALL_STACK))
+ debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
+ if (orig_debugctl != debugctl)
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
}
static void __intel_pmu_lbr_disable(void)
@@ -181,9 +198,116 @@ void intel_pmu_lbr_reset(void)
intel_pmu_lbr_reset_64();
}
+/*
+ * TOS = most recently recorded branch
+ */
+static inline u64 intel_pmu_lbr_tos(void)
+{
+ u64 tos;
+
+ rdmsrl(x86_pmu.lbr_tos, tos);
+ return tos;
+}
+
+enum {
+ LBR_NONE,
+ LBR_VALID,
+};
+
+static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
+{
+ int i;
+ unsigned lbr_idx, mask;
+ u64 tos;
+
+ if (task_ctx->lbr_callstack_users == 0 ||
+ task_ctx->lbr_stack_state == LBR_NONE) {
+ intel_pmu_lbr_reset();
+ return;
+ }
+
+ mask = x86_pmu.lbr_nr - 1;
+ tos = intel_pmu_lbr_tos();
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ lbr_idx = (tos - i) & mask;
+ wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
+ wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+ }
+ task_ctx->lbr_stack_state = LBR_NONE;
+}
+
+static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
+{
+ int i;
+ unsigned lbr_idx, mask;
+ u64 tos;
+
+ if (task_ctx->lbr_callstack_users == 0) {
+ task_ctx->lbr_stack_state = LBR_NONE;
+ return;
+ }
+
+ mask = x86_pmu.lbr_nr - 1;
+ tos = intel_pmu_lbr_tos();
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ lbr_idx = (tos - i) & mask;
+ rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
+ rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+ }
+ task_ctx->lbr_stack_state = LBR_VALID;
+}
+
+void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct x86_perf_task_context *task_ctx;
+
+ if (!x86_pmu.lbr_nr)
+ return;
+
+ /*
+ * If LBR callstack feature is enabled and the stack was saved when
+ * the task was scheduled out, restore the stack. Otherwise flush
+ * the LBR stack.
+ */
+ task_ctx = ctx ? ctx->task_ctx_data : NULL;
+ if (task_ctx) {
+ if (sched_in) {
+ __intel_pmu_lbr_restore(task_ctx);
+ cpuc->lbr_context = ctx;
+ } else {
+ __intel_pmu_lbr_save(task_ctx);
+ }
+ return;
+ }
+
+ /*
+ * When sampling the branck stack in system-wide, it may be
+ * necessary to flush the stack on context switch. This happens
+ * when the branch stack does not tag its entries with the pid
+ * of the current task. Otherwise it becomes impossible to
+ * associate a branch entry with a task. This ambiguity is more
+ * likely to appear when the branch stack supports priv level
+ * filtering and the user sets it to monitor only at the user
+ * level (which could be a useful measurement in system-wide
+ * mode). In that case, the risk is high of having a branch
+ * stack with branch from multiple tasks.
+ */
+ if (sched_in) {
+ intel_pmu_lbr_reset();
+ cpuc->lbr_context = ctx;
+ }
+}
+
+static inline bool branch_user_callstack(unsigned br_sel)
+{
+ return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
+}
+
void intel_pmu_lbr_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
@@ -198,18 +322,33 @@ void intel_pmu_lbr_enable(struct perf_event *event)
}
cpuc->br_sel = event->hw.branch_reg.reg;
+ if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
+ event->ctx->task_ctx_data) {
+ task_ctx = event->ctx->task_ctx_data;
+ task_ctx->lbr_callstack_users++;
+ }
+
cpuc->lbr_users++;
+ perf_sched_cb_inc(event->ctx->pmu);
}
void intel_pmu_lbr_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
+ if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
+ event->ctx->task_ctx_data) {
+ task_ctx = event->ctx->task_ctx_data;
+ task_ctx->lbr_callstack_users--;
+ }
+
cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0);
+ perf_sched_cb_dec(event->ctx->pmu);
if (cpuc->enabled && !cpuc->lbr_users) {
__intel_pmu_lbr_disable();
@@ -218,12 +357,12 @@ void intel_pmu_lbr_disable(struct perf_event *event)
}
}
-void intel_pmu_lbr_enable_all(void)
+void intel_pmu_lbr_enable_all(bool pmi)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->lbr_users)
- __intel_pmu_lbr_enable();
+ __intel_pmu_lbr_enable(pmi);
}
void intel_pmu_lbr_disable_all(void)
@@ -234,18 +373,6 @@ void intel_pmu_lbr_disable_all(void)
__intel_pmu_lbr_disable();
}
-/*
- * TOS = most recently recorded branch
- */
-static inline u64 intel_pmu_lbr_tos(void)
-{
- u64 tos;
-
- rdmsrl(x86_pmu.lbr_tos, tos);
-
- return tos;
-}
-
static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
{
unsigned long mask = x86_pmu.lbr_nr - 1;
@@ -350,7 +477,7 @@ void intel_pmu_lbr_read(void)
* - in case there is no HW filter
* - in case the HW filter has errata or limitations
*/
-static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
+static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
{
u64 br_type = event->attr.branch_sample_type;
int mask = 0;
@@ -387,11 +514,21 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
if (br_type & PERF_SAMPLE_BRANCH_COND)
mask |= X86_BR_JCC;
+ if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
+ if (!x86_pmu_has_lbr_callstack())
+ return -EOPNOTSUPP;
+ if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
+ return -EINVAL;
+ mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
+ X86_BR_CALL_STACK;
+ }
+
/*
* stash actual user request into reg, it may
* be used by fixup code for some CPU
*/
event->hw.branch_reg.reg = mask;
+ return 0;
}
/*
@@ -403,14 +540,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
{
struct hw_perf_event_extra *reg;
u64 br_type = event->attr.branch_sample_type;
- u64 mask = 0, m;
- u64 v;
+ u64 mask = 0, v;
+ int i;
- for_each_branch_sample_type(m) {
- if (!(br_type & m))
+ for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
+ if (!(br_type & (1ULL << i)))
continue;
- v = x86_pmu.lbr_sel_map[m];
+ v = x86_pmu.lbr_sel_map[i];
if (v == LBR_NOT_SUPP)
return -EOPNOTSUPP;
@@ -420,8 +557,12 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
reg = &event->hw.branch_reg;
reg->idx = EXTRA_REG_LBR;
- /* LBR_SELECT operates in suppress mode so invert mask */
- reg->config = ~mask & x86_pmu.lbr_sel_mask;
+ /*
+ * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
+ * in suppress mode. So LBR_SELECT should be set to
+ * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
+ */
+ reg->config = mask ^ x86_pmu.lbr_sel_mask;
return 0;
}
@@ -439,7 +580,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event)
/*
* setup SW LBR filter
*/
- intel_pmu_setup_sw_lbr_filter(event);
+ ret = intel_pmu_setup_sw_lbr_filter(event);
+ if (ret)
+ return ret;
/*
* setup HW LBR filter, if any
@@ -568,6 +711,12 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
ret = X86_BR_INT;
break;
case 0xe8: /* call near rel */
+ insn_get_immediate(&insn);
+ if (insn.immediate1.value == 0) {
+ /* zero length call */
+ ret = X86_BR_ZERO_CALL;
+ break;
+ }
case 0x9a: /* call far absolute */
ret = X86_BR_CALL;
break;
@@ -678,35 +827,49 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
/*
* Map interface branch filters onto LBR filters
*/
-static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
- [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
- [PERF_SAMPLE_BRANCH_USER] = LBR_USER,
- [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
- [PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
- [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP
- | LBR_IND_JMP | LBR_FAR,
+static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
+ [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
+ [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
+ [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
+ [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
+ [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
+ | LBR_IND_JMP | LBR_FAR,
/*
* NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
*/
- [PERF_SAMPLE_BRANCH_ANY_CALL] =
+ [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
/*
* NHM/WSM erratum: must include IND_JMP to capture IND_CALL
*/
- [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP,
- [PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
+ [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
+ [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
};
-static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
- [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
- [PERF_SAMPLE_BRANCH_USER] = LBR_USER,
- [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
- [PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
- [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR,
- [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL
- | LBR_FAR,
- [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL,
- [PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
+static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
+ [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
+ [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
+ [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
+ [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
+ [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
+ [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
+ | LBR_FAR,
+ [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
+ [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
+};
+
+static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
+ [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
+ [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
+ [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
+ [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
+ [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
+ [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
+ | LBR_FAR,
+ [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
+ [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
+ [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
+ | LBR_RETURN | LBR_CALL_STACK,
};
/* core */
@@ -765,6 +928,20 @@ void __init intel_pmu_lbr_init_snb(void)
pr_cont("16-deep LBR, ");
}
+/* haswell */
+void intel_pmu_lbr_init_hsw(void)
+{
+ x86_pmu.lbr_nr = 16;
+ x86_pmu.lbr_tos = MSR_LBR_TOS;
+ x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
+ x86_pmu.lbr_to = MSR_LBR_NHM_TO;
+
+ x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
+ x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
+
+ pr_cont("16-deep LBR, ");
+}
+
/* atom */
void __init intel_pmu_lbr_init_atom(void)
{
diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
new file mode 100644
index 000000000000..ffe666c2c6b5
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
@@ -0,0 +1,1100 @@
+/*
+ * Intel(R) Processor Trace PMU driver for perf
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Intel PT is specified in the Intel Architecture Instruction Set Extensions
+ * Programming Reference:
+ * http://software.intel.com/en-us/intel-isa-extensions
+ */
+
+#undef DEBUG
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include <asm/perf_event.h>
+#include <asm/insn.h>
+#include <asm/io.h>
+
+#include "perf_event.h"
+#include "intel_pt.h"
+
+static DEFINE_PER_CPU(struct pt, pt_ctx);
+
+static struct pt_pmu pt_pmu;
+
+enum cpuid_regs {
+ CR_EAX = 0,
+ CR_ECX,
+ CR_EDX,
+ CR_EBX
+};
+
+/*
+ * Capabilities of Intel PT hardware, such as number of address bits or
+ * supported output schemes, are cached and exported to userspace as "caps"
+ * attribute group of pt pmu device
+ * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
+ * relevant bits together with intel_pt traces.
+ *
+ * These are necessary for both trace decoding (payloads_lip, contains address
+ * width encoded in IP-related packets), and event configuration (bitmasks with
+ * permitted values for certain bit fields).
+ */
+#define PT_CAP(_n, _l, _r, _m) \
+ [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
+ .reg = _r, .mask = _m }
+
+static struct pt_cap_desc {
+ const char *name;
+ u32 leaf;
+ u8 reg;
+ u32 mask;
+} pt_caps[] = {
+ PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff),
+ PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)),
+ PT_CAP(topa_output, 0, CR_ECX, BIT(0)),
+ PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)),
+ PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)),
+};
+
+static u32 pt_cap_get(enum pt_capabilities cap)
+{
+ struct pt_cap_desc *cd = &pt_caps[cap];
+ u32 c = pt_pmu.caps[cd->leaf * 4 + cd->reg];
+ unsigned int shift = __ffs(cd->mask);
+
+ return (c & cd->mask) >> shift;
+}
+
+static ssize_t pt_cap_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea =
+ container_of(attr, struct dev_ext_attribute, attr);
+ enum pt_capabilities cap = (long)ea->var;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
+}
+
+static struct attribute_group pt_cap_group = {
+ .name = "caps",
+};
+
+PMU_FORMAT_ATTR(tsc, "config:10" );
+PMU_FORMAT_ATTR(noretcomp, "config:11" );
+
+static struct attribute *pt_formats_attr[] = {
+ &format_attr_tsc.attr,
+ &format_attr_noretcomp.attr,
+ NULL,
+};
+
+static struct attribute_group pt_format_group = {
+ .name = "format",
+ .attrs = pt_formats_attr,
+};
+
+static const struct attribute_group *pt_attr_groups[] = {
+ &pt_cap_group,
+ &pt_format_group,
+ NULL,
+};
+
+static int __init pt_pmu_hw_init(void)
+{
+ struct dev_ext_attribute *de_attrs;
+ struct attribute **attrs;
+ size_t size;
+ int ret;
+ long i;
+
+ attrs = NULL;
+ ret = -ENODEV;
+ if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
+ goto fail;
+
+ for (i = 0; i < PT_CPUID_LEAVES; i++) {
+ cpuid_count(20, i,
+ &pt_pmu.caps[CR_EAX + i*4],
+ &pt_pmu.caps[CR_EBX + i*4],
+ &pt_pmu.caps[CR_ECX + i*4],
+ &pt_pmu.caps[CR_EDX + i*4]);
+ }
+
+ ret = -ENOMEM;
+ size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
+ attrs = kzalloc(size, GFP_KERNEL);
+ if (!attrs)
+ goto fail;
+
+ size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
+ de_attrs = kzalloc(size, GFP_KERNEL);
+ if (!de_attrs)
+ goto fail;
+
+ for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
+ struct dev_ext_attribute *de_attr = de_attrs + i;
+
+ de_attr->attr.attr.name = pt_caps[i].name;
+
+ sysfs_attr_init(&de_attrs->attr.attr);
+
+ de_attr->attr.attr.mode = S_IRUGO;
+ de_attr->attr.show = pt_cap_show;
+ de_attr->var = (void *)i;
+
+ attrs[i] = &de_attr->attr.attr;
+ }
+
+ pt_cap_group.attrs = attrs;
+
+ return 0;
+
+fail:
+ kfree(attrs);
+
+ return ret;
+}
+
+#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC)
+
+static bool pt_event_valid(struct perf_event *event)
+{
+ u64 config = event->attr.config;
+
+ if ((config & PT_CONFIG_MASK) != config)
+ return false;
+
+ return true;
+}
+
+/*
+ * PT configuration helpers
+ * These all are cpu affine and operate on a local PT
+ */
+
+static bool pt_is_running(void)
+{
+ u64 ctl;
+
+ rdmsrl(MSR_IA32_RTIT_CTL, ctl);
+
+ return !!(ctl & RTIT_CTL_TRACEEN);
+}
+
+static void pt_config(struct perf_event *event)
+{
+ u64 reg;
+
+ reg = RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
+
+ if (!event->attr.exclude_kernel)
+ reg |= RTIT_CTL_OS;
+ if (!event->attr.exclude_user)
+ reg |= RTIT_CTL_USR;
+
+ reg |= (event->attr.config & PT_CONFIG_MASK);
+
+ wrmsrl(MSR_IA32_RTIT_CTL, reg);
+}
+
+static void pt_config_start(bool start)
+{
+ u64 ctl;
+
+ rdmsrl(MSR_IA32_RTIT_CTL, ctl);
+ if (start)
+ ctl |= RTIT_CTL_TRACEEN;
+ else
+ ctl &= ~RTIT_CTL_TRACEEN;
+ wrmsrl(MSR_IA32_RTIT_CTL, ctl);
+
+ /*
+ * A wrmsr that disables trace generation serializes other PT
+ * registers and causes all data packets to be written to memory,
+ * but a fence is required for the data to become globally visible.
+ *
+ * The below WMB, separating data store and aux_head store matches
+ * the consumer's RMB that separates aux_head load and data load.
+ */
+ if (!start)
+ wmb();
+}
+
+static void pt_config_buffer(void *buf, unsigned int topa_idx,
+ unsigned int output_off)
+{
+ u64 reg;
+
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
+
+ reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
+
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
+}
+
+/*
+ * Keep ToPA table-related metadata on the same page as the actual table,
+ * taking up a few words from the top
+ */
+
+#define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
+
+/**
+ * struct topa - page-sized ToPA table with metadata at the top
+ * @table: actual ToPA table entries, as understood by PT hardware
+ * @list: linkage to struct pt_buffer's list of tables
+ * @phys: physical address of this page
+ * @offset: offset of the first entry in this table in the buffer
+ * @size: total size of all entries in this table
+ * @last: index of the last initialized entry in this table
+ */
+struct topa {
+ struct topa_entry table[TENTS_PER_PAGE];
+ struct list_head list;
+ u64 phys;
+ u64 offset;
+ size_t size;
+ int last;
+};
+
+/* make -1 stand for the last table entry */
+#define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
+
+/**
+ * topa_alloc() - allocate page-sized ToPA table
+ * @cpu: CPU on which to allocate.
+ * @gfp: Allocation flags.
+ *
+ * Return: On success, return the pointer to ToPA table page.
+ */
+static struct topa *topa_alloc(int cpu, gfp_t gfp)
+{
+ int node = cpu_to_node(cpu);
+ struct topa *topa;
+ struct page *p;
+
+ p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
+ if (!p)
+ return NULL;
+
+ topa = page_address(p);
+ topa->last = 0;
+ topa->phys = page_to_phys(p);
+
+ /*
+ * In case of singe-entry ToPA, always put the self-referencing END
+ * link as the 2nd entry in the table
+ */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
+ TOPA_ENTRY(topa, 1)->end = 1;
+ }
+
+ return topa;
+}
+
+/**
+ * topa_free() - free a page-sized ToPA table
+ * @topa: Table to deallocate.
+ */
+static void topa_free(struct topa *topa)
+{
+ free_page((unsigned long)topa);
+}
+
+/**
+ * topa_insert_table() - insert a ToPA table into a buffer
+ * @buf: PT buffer that's being extended.
+ * @topa: New topa table to be inserted.
+ *
+ * If it's the first table in this buffer, set up buffer's pointers
+ * accordingly; otherwise, add a END=1 link entry to @topa to the current
+ * "last" table and adjust the last table pointer to @topa.
+ */
+static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
+{
+ struct topa *last = buf->last;
+
+ list_add_tail(&topa->list, &buf->tables);
+
+ if (!buf->first) {
+ buf->first = buf->last = buf->cur = topa;
+ return;
+ }
+
+ topa->offset = last->offset + last->size;
+ buf->last = topa;
+
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ return;
+
+ BUG_ON(last->last != TENTS_PER_PAGE - 1);
+
+ TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
+ TOPA_ENTRY(last, -1)->end = 1;
+}
+
+/**
+ * topa_table_full() - check if a ToPA table is filled up
+ * @topa: ToPA table.
+ */
+static bool topa_table_full(struct topa *topa)
+{
+ /* single-entry ToPA is a special case */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ return !!topa->last;
+
+ return topa->last == TENTS_PER_PAGE - 1;
+}
+
+/**
+ * topa_insert_pages() - create a list of ToPA tables
+ * @buf: PT buffer being initialized.
+ * @gfp: Allocation flags.
+ *
+ * This initializes a list of ToPA tables with entries from
+ * the data_pages provided by rb_alloc_aux().
+ *
+ * Return: 0 on success or error code.
+ */
+static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
+{
+ struct topa *topa = buf->last;
+ int order = 0;
+ struct page *p;
+
+ p = virt_to_page(buf->data_pages[buf->nr_pages]);
+ if (PagePrivate(p))
+ order = page_private(p);
+
+ if (topa_table_full(topa)) {
+ topa = topa_alloc(buf->cpu, gfp);
+ if (!topa)
+ return -ENOMEM;
+
+ topa_insert_table(buf, topa);
+ }
+
+ TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
+ TOPA_ENTRY(topa, -1)->size = order;
+ if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ TOPA_ENTRY(topa, -1)->intr = 1;
+ TOPA_ENTRY(topa, -1)->stop = 1;
+ }
+
+ topa->last++;
+ topa->size += sizes(order);
+
+ buf->nr_pages += 1ul << order;
+
+ return 0;
+}
+
+/**
+ * pt_topa_dump() - print ToPA tables and their entries
+ * @buf: PT buffer.
+ */
+static void pt_topa_dump(struct pt_buffer *buf)
+{
+ struct topa *topa;
+
+ list_for_each_entry(topa, &buf->tables, list) {
+ int i;
+
+ pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
+ topa->phys, topa->offset, topa->size);
+ for (i = 0; i < TENTS_PER_PAGE; i++) {
+ pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
+ &topa->table[i],
+ (unsigned long)topa->table[i].base << TOPA_SHIFT,
+ sizes(topa->table[i].size),
+ topa->table[i].end ? 'E' : ' ',
+ topa->table[i].intr ? 'I' : ' ',
+ topa->table[i].stop ? 'S' : ' ',
+ *(u64 *)&topa->table[i]);
+ if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
+ topa->table[i].stop) ||
+ topa->table[i].end)
+ break;
+ }
+ }
+}
+
+/**
+ * pt_buffer_advance() - advance to the next output region
+ * @buf: PT buffer.
+ *
+ * Advance the current pointers in the buffer to the next ToPA entry.
+ */
+static void pt_buffer_advance(struct pt_buffer *buf)
+{
+ buf->output_off = 0;
+ buf->cur_idx++;
+
+ if (buf->cur_idx == buf->cur->last) {
+ if (buf->cur == buf->last)
+ buf->cur = buf->first;
+ else
+ buf->cur = list_entry(buf->cur->list.next, struct topa,
+ list);
+ buf->cur_idx = 0;
+ }
+}
+
+/**
+ * pt_update_head() - calculate current offsets and sizes
+ * @pt: Per-cpu pt context.
+ *
+ * Update buffer's current write pointer position and data size.
+ */
+static void pt_update_head(struct pt *pt)
+{
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+ u64 topa_idx, base, old;
+
+ /* offset of the first region in this table from the beginning of buf */
+ base = buf->cur->offset + buf->output_off;
+
+ /* offset of the current output region within this table */
+ for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
+ base += sizes(buf->cur->table[topa_idx].size);
+
+ if (buf->snapshot) {
+ local_set(&buf->data_size, base);
+ } else {
+ old = (local64_xchg(&buf->head, base) &
+ ((buf->nr_pages << PAGE_SHIFT) - 1));
+ if (base < old)
+ base += buf->nr_pages << PAGE_SHIFT;
+
+ local_add(base - old, &buf->data_size);
+ }
+}
+
+/**
+ * pt_buffer_region() - obtain current output region's address
+ * @buf: PT buffer.
+ */
+static void *pt_buffer_region(struct pt_buffer *buf)
+{
+ return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
+}
+
+/**
+ * pt_buffer_region_size() - obtain current output region's size
+ * @buf: PT buffer.
+ */
+static size_t pt_buffer_region_size(struct pt_buffer *buf)
+{
+ return sizes(buf->cur->table[buf->cur_idx].size);
+}
+
+/**
+ * pt_handle_status() - take care of possible status conditions
+ * @pt: Per-cpu pt context.
+ */
+static void pt_handle_status(struct pt *pt)
+{
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+ int advance = 0;
+ u64 status;
+
+ rdmsrl(MSR_IA32_RTIT_STATUS, status);
+
+ if (status & RTIT_STATUS_ERROR) {
+ pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
+ pt_topa_dump(buf);
+ status &= ~RTIT_STATUS_ERROR;
+ }
+
+ if (status & RTIT_STATUS_STOPPED) {
+ status &= ~RTIT_STATUS_STOPPED;
+
+ /*
+ * On systems that only do single-entry ToPA, hitting STOP
+ * means we are already losing data; need to let the decoder
+ * know.
+ */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
+ buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
+ local_inc(&buf->lost);
+ advance++;
+ }
+ }
+
+ /*
+ * Also on single-entry ToPA implementations, interrupt will come
+ * before the output reaches its output region's boundary.
+ */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
+ pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
+ void *head = pt_buffer_region(buf);
+
+ /* everything within this margin needs to be zeroed out */
+ memset(head + buf->output_off, 0,
+ pt_buffer_region_size(buf) -
+ buf->output_off);
+ advance++;
+ }
+
+ if (advance)
+ pt_buffer_advance(buf);
+
+ wrmsrl(MSR_IA32_RTIT_STATUS, status);
+}
+
+/**
+ * pt_read_offset() - translate registers into buffer pointers
+ * @buf: PT buffer.
+ *
+ * Set buffer's output pointers from MSR values.
+ */
+static void pt_read_offset(struct pt_buffer *buf)
+{
+ u64 offset, base_topa;
+
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
+ buf->cur = phys_to_virt(base_topa);
+
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
+ /* offset within current output region */
+ buf->output_off = offset >> 32;
+ /* index of current output region within this table */
+ buf->cur_idx = (offset & 0xffffff80) >> 7;
+}
+
+/**
+ * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
+ * @buf: PT buffer.
+ * @pg: Page offset in the buffer.
+ *
+ * When advancing to the next output region (ToPA entry), given a page offset
+ * into the buffer, we need to find the offset of the first page in the next
+ * region.
+ */
+static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
+{
+ struct topa_entry *te = buf->topa_index[pg];
+
+ /* one region */
+ if (buf->first == buf->last && buf->first->last == 1)
+ return pg;
+
+ do {
+ pg++;
+ pg &= buf->nr_pages - 1;
+ } while (buf->topa_index[pg] == te);
+
+ return pg;
+}
+
+/**
+ * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
+ * @buf: PT buffer.
+ * @handle: Current output handle.
+ *
+ * Place INT and STOP marks to prevent overwriting old data that the consumer
+ * hasn't yet collected.
+ */
+static int pt_buffer_reset_markers(struct pt_buffer *buf,
+ struct perf_output_handle *handle)
+
+{
+ unsigned long idx, npages, end;
+
+ if (buf->snapshot)
+ return 0;
+
+ /* can't stop in the middle of an output region */
+ if (buf->output_off + handle->size + 1 <
+ sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size))
+ return -EINVAL;
+
+
+ /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ return 0;
+
+ /* clear STOP and INT from current entry */
+ buf->topa_index[buf->stop_pos]->stop = 0;
+ buf->topa_index[buf->intr_pos]->intr = 0;
+
+ if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ npages = (handle->size + 1) >> PAGE_SHIFT;
+ end = (local64_read(&buf->head) >> PAGE_SHIFT) + npages;
+ /*if (end > handle->wakeup >> PAGE_SHIFT)
+ end = handle->wakeup >> PAGE_SHIFT;*/
+ idx = end & (buf->nr_pages - 1);
+ buf->stop_pos = idx;
+ idx = (local64_read(&buf->head) >> PAGE_SHIFT) + npages - 1;
+ idx &= buf->nr_pages - 1;
+ buf->intr_pos = idx;
+ }
+
+ buf->topa_index[buf->stop_pos]->stop = 1;
+ buf->topa_index[buf->intr_pos]->intr = 1;
+
+ return 0;
+}
+
+/**
+ * pt_buffer_setup_topa_index() - build topa_index[] table of regions
+ * @buf: PT buffer.
+ *
+ * topa_index[] references output regions indexed by offset into the
+ * buffer for purposes of quick reverse lookup.
+ */
+static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
+{
+ struct topa *cur = buf->first, *prev = buf->last;
+ struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
+ *te_prev = TOPA_ENTRY(prev, prev->last - 1);
+ int pg = 0, idx = 0, ntopa = 0;
+
+ while (pg < buf->nr_pages) {
+ int tidx;
+
+ /* pages within one topa entry */
+ for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
+ buf->topa_index[pg] = te_prev;
+
+ te_prev = te_cur;
+
+ if (idx == cur->last - 1) {
+ /* advance to next topa table */
+ idx = 0;
+ cur = list_entry(cur->list.next, struct topa, list);
+ ntopa++;
+ } else
+ idx++;
+ te_cur = TOPA_ENTRY(cur, idx);
+ }
+
+}
+
+/**
+ * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
+ * @buf: PT buffer.
+ * @head: Write pointer (aux_head) from AUX buffer.
+ *
+ * Find the ToPA table and entry corresponding to given @head and set buffer's
+ * "current" pointers accordingly.
+ */
+static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
+{
+ int pg;
+
+ if (buf->snapshot)
+ head &= (buf->nr_pages << PAGE_SHIFT) - 1;
+
+ pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
+ pg = pt_topa_next_entry(buf, pg);
+
+ buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
+ buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
+ (unsigned long)buf->cur) / sizeof(struct topa_entry);
+ buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
+
+ local64_set(&buf->head, head);
+ local_set(&buf->data_size, 0);
+}
+
+/**
+ * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
+ * @buf: PT buffer.
+ */
+static void pt_buffer_fini_topa(struct pt_buffer *buf)
+{
+ struct topa *topa, *iter;
+
+ list_for_each_entry_safe(topa, iter, &buf->tables, list) {
+ /*
+ * right now, this is in free_aux() path only, so
+ * no need to unlink this table from the list
+ */
+ topa_free(topa);
+ }
+}
+
+/**
+ * pt_buffer_init_topa() - initialize ToPA table for pt buffer
+ * @buf: PT buffer.
+ * @size: Total size of all regions within this ToPA.
+ * @gfp: Allocation flags.
+ */
+static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
+ gfp_t gfp)
+{
+ struct topa *topa;
+ int err;
+
+ topa = topa_alloc(buf->cpu, gfp);
+ if (!topa)
+ return -ENOMEM;
+
+ topa_insert_table(buf, topa);
+
+ while (buf->nr_pages < nr_pages) {
+ err = topa_insert_pages(buf, gfp);
+ if (err) {
+ pt_buffer_fini_topa(buf);
+ return -ENOMEM;
+ }
+ }
+
+ pt_buffer_setup_topa_index(buf);
+
+ /* link last table to the first one, unless we're double buffering */
+ if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
+ TOPA_ENTRY(buf->last, -1)->end = 1;
+ }
+
+ pt_topa_dump(buf);
+ return 0;
+}
+
+/**
+ * pt_buffer_setup_aux() - set up topa tables for a PT buffer
+ * @cpu: Cpu on which to allocate, -1 means current.
+ * @pages: Array of pointers to buffer pages passed from perf core.
+ * @nr_pages: Number of pages in the buffer.
+ * @snapshot: If this is a snapshot/overwrite counter.
+ *
+ * This is a pmu::setup_aux callback that sets up ToPA tables and all the
+ * bookkeeping for an AUX buffer.
+ *
+ * Return: Our private PT buffer structure.
+ */
+static void *
+pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
+{
+ struct pt_buffer *buf;
+ int node, ret;
+
+ if (!nr_pages)
+ return NULL;
+
+ if (cpu == -1)
+ cpu = raw_smp_processor_id();
+ node = cpu_to_node(cpu);
+
+ buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
+ GFP_KERNEL, node);
+ if (!buf)
+ return NULL;
+
+ buf->cpu = cpu;
+ buf->snapshot = snapshot;
+ buf->data_pages = pages;
+
+ INIT_LIST_HEAD(&buf->tables);
+
+ ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
+ if (ret) {
+ kfree(buf);
+ return NULL;
+ }
+
+ return buf;
+}
+
+/**
+ * pt_buffer_free_aux() - perf AUX deallocation path callback
+ * @data: PT buffer.
+ */
+static void pt_buffer_free_aux(void *data)
+{
+ struct pt_buffer *buf = data;
+
+ pt_buffer_fini_topa(buf);
+ kfree(buf);
+}
+
+/**
+ * pt_buffer_is_full() - check if the buffer is full
+ * @buf: PT buffer.
+ * @pt: Per-cpu pt handle.
+ *
+ * If the user hasn't read data from the output region that aux_head
+ * points to, the buffer is considered full: the user needs to read at
+ * least this region and update aux_tail to point past it.
+ */
+static bool pt_buffer_is_full(struct pt_buffer *buf, struct pt *pt)
+{
+ if (buf->snapshot)
+ return false;
+
+ if (local_read(&buf->data_size) >= pt->handle.size)
+ return true;
+
+ return false;
+}
+
+/**
+ * intel_pt_interrupt() - PT PMI handler
+ */
+void intel_pt_interrupt(void)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf;
+ struct perf_event *event = pt->handle.event;
+
+ /*
+ * There may be a dangling PT bit in the interrupt status register
+ * after PT has been disabled by pt_event_stop(). Make sure we don't
+ * do anything (particularly, re-enable) for this event here.
+ */
+ if (!ACCESS_ONCE(pt->handle_nmi))
+ return;
+
+ pt_config_start(false);
+
+ if (!event)
+ return;
+
+ buf = perf_get_aux(&pt->handle);
+ if (!buf)
+ return;
+
+ pt_read_offset(buf);
+
+ pt_handle_status(pt);
+
+ pt_update_head(pt);
+
+ perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
+ local_xchg(&buf->lost, 0));
+
+ if (!event->hw.state) {
+ int ret;
+
+ buf = perf_aux_output_begin(&pt->handle, event);
+ if (!buf) {
+ event->hw.state = PERF_HES_STOPPED;
+ return;
+ }
+
+ pt_buffer_reset_offsets(buf, pt->handle.head);
+ ret = pt_buffer_reset_markers(buf, &pt->handle);
+ if (ret) {
+ perf_aux_output_end(&pt->handle, 0, true);
+ return;
+ }
+
+ pt_config_buffer(buf->cur->table, buf->cur_idx,
+ buf->output_off);
+ wrmsrl(MSR_IA32_RTIT_STATUS, 0);
+ pt_config(event);
+ }
+}
+
+/*
+ * PMU callbacks
+ */
+
+static void pt_event_start(struct perf_event *event, int mode)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+
+ if (pt_is_running() || !buf || pt_buffer_is_full(buf, pt)) {
+ event->hw.state = PERF_HES_STOPPED;
+ return;
+ }
+
+ ACCESS_ONCE(pt->handle_nmi) = 1;
+ event->hw.state = 0;
+
+ pt_config_buffer(buf->cur->table, buf->cur_idx,
+ buf->output_off);
+ wrmsrl(MSR_IA32_RTIT_STATUS, 0);
+ pt_config(event);
+}
+
+static void pt_event_stop(struct perf_event *event, int mode)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+
+ /*
+ * Protect against the PMI racing with disabling wrmsr,
+ * see comment in intel_pt_interrupt().
+ */
+ ACCESS_ONCE(pt->handle_nmi) = 0;
+ pt_config_start(false);
+
+ if (event->hw.state == PERF_HES_STOPPED)
+ return;
+
+ event->hw.state = PERF_HES_STOPPED;
+
+ if (mode & PERF_EF_UPDATE) {
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+
+ if (!buf)
+ return;
+
+ if (WARN_ON_ONCE(pt->handle.event != event))
+ return;
+
+ pt_read_offset(buf);
+
+ pt_handle_status(pt);
+
+ pt_update_head(pt);
+ }
+}
+
+static void pt_event_del(struct perf_event *event, int mode)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf;
+
+ pt_event_stop(event, PERF_EF_UPDATE);
+
+ buf = perf_get_aux(&pt->handle);
+
+ if (buf) {
+ if (buf->snapshot)
+ pt->handle.head =
+ local_xchg(&buf->data_size,
+ buf->nr_pages << PAGE_SHIFT);
+ perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
+ local_xchg(&buf->lost, 0));
+ }
+}
+
+static int pt_event_add(struct perf_event *event, int mode)
+{
+ struct pt_buffer *buf;
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct hw_perf_event *hwc = &event->hw;
+ int ret = -EBUSY;
+
+ if (pt->handle.event)
+ goto fail;
+
+ buf = perf_aux_output_begin(&pt->handle, event);
+ ret = -EINVAL;
+ if (!buf)
+ goto fail_stop;
+
+ pt_buffer_reset_offsets(buf, pt->handle.head);
+ if (!buf->snapshot) {
+ ret = pt_buffer_reset_markers(buf, &pt->handle);
+ if (ret)
+ goto fail_end_stop;
+ }
+
+ if (mode & PERF_EF_START) {
+ pt_event_start(event, 0);
+ ret = -EBUSY;
+ if (hwc->state == PERF_HES_STOPPED)
+ goto fail_end_stop;
+ } else {
+ hwc->state = PERF_HES_STOPPED;
+ }
+
+ return 0;
+
+fail_end_stop:
+ perf_aux_output_end(&pt->handle, 0, true);
+fail_stop:
+ hwc->state = PERF_HES_STOPPED;
+fail:
+ return ret;
+}
+
+static void pt_event_read(struct perf_event *event)
+{
+}
+
+static void pt_event_destroy(struct perf_event *event)
+{
+ x86_del_exclusive(x86_lbr_exclusive_pt);
+}
+
+static int pt_event_init(struct perf_event *event)
+{
+ if (event->attr.type != pt_pmu.pmu.type)
+ return -ENOENT;
+
+ if (!pt_event_valid(event))
+ return -EINVAL;
+
+ if (x86_add_exclusive(x86_lbr_exclusive_pt))
+ return -EBUSY;
+
+ event->destroy = pt_event_destroy;
+
+ return 0;
+}
+
+static __init int pt_init(void)
+{
+ int ret, cpu, prior_warn = 0;
+
+ BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ u64 ctl;
+
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
+ if (!ret && (ctl & RTIT_CTL_TRACEEN))
+ prior_warn++;
+ }
+ put_online_cpus();
+
+ if (prior_warn) {
+ x86_add_exclusive(x86_lbr_exclusive_pt);
+ pr_warn("PT is enabled at boot time, doing nothing\n");
+
+ return -EBUSY;
+ }
+
+ ret = pt_pmu_hw_init();
+ if (ret)
+ return ret;
+
+ if (!pt_cap_get(PT_CAP_topa_output)) {
+ pr_warn("ToPA output is not supported on this CPU\n");
+ return -ENODEV;
+ }
+
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ pt_pmu.pmu.capabilities =
+ PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
+
+ pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
+ pt_pmu.pmu.attr_groups = pt_attr_groups;
+ pt_pmu.pmu.task_ctx_nr = perf_sw_context;
+ pt_pmu.pmu.event_init = pt_event_init;
+ pt_pmu.pmu.add = pt_event_add;
+ pt_pmu.pmu.del = pt_event_del;
+ pt_pmu.pmu.start = pt_event_start;
+ pt_pmu.pmu.stop = pt_event_stop;
+ pt_pmu.pmu.read = pt_event_read;
+ pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
+ pt_pmu.pmu.free_aux = pt_buffer_free_aux;
+ ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
+
+ return ret;
+}
+
+module_init(pt_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index c4bb8b8e5017..358c54ad20d4 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -62,6 +62,14 @@
#define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
+#define NR_RAPL_DOMAINS 0x4
+static const char *rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
+ "pp0-core",
+ "package",
+ "dram",
+ "pp1-gpu",
+};
+
/* Clients have PP0, PKG */
#define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
1<<RAPL_IDX_PKG_NRG_STAT|\
@@ -112,7 +120,6 @@ static struct perf_pmu_events_attr event_attr_##v = { \
struct rapl_pmu {
spinlock_t lock;
- int hw_unit; /* 1/2^hw_unit Joule */
int n_active; /* number of active events */
struct list_head active_list;
struct pmu *pmu; /* pointer to rapl_pmu_class */
@@ -120,6 +127,7 @@ struct rapl_pmu {
struct hrtimer hrtimer;
};
+static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly; /* 1/2^hw_unit Joule */
static struct pmu rapl_pmu_class;
static cpumask_t rapl_cpu_mask;
static int rapl_cntr_mask;
@@ -127,6 +135,7 @@ static int rapl_cntr_mask;
static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu);
static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free);
+static struct x86_pmu_quirk *rapl_quirks;
static inline u64 rapl_read_counter(struct perf_event *event)
{
u64 raw;
@@ -134,15 +143,28 @@ static inline u64 rapl_read_counter(struct perf_event *event)
return raw;
}
-static inline u64 rapl_scale(u64 v)
+#define rapl_add_quirk(func_) \
+do { \
+ static struct x86_pmu_quirk __quirk __initdata = { \
+ .func = func_, \
+ }; \
+ __quirk.next = rapl_quirks; \
+ rapl_quirks = &__quirk; \
+} while (0)
+
+static inline u64 rapl_scale(u64 v, int cfg)
{
+ if (cfg > NR_RAPL_DOMAINS) {
+ pr_warn("invalid domain %d, failed to scale data\n", cfg);
+ return v;
+ }
/*
* scale delta to smallest unit (1/2^32)
* users must then scale back: count * 1/(1e9*2^32) to get Joules
* or use ldexp(count, -32).
* Watts = Joules/Time delta
*/
- return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
+ return v << (32 - rapl_hw_unit[cfg - 1]);
}
static u64 rapl_event_update(struct perf_event *event)
@@ -173,7 +195,7 @@ again:
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
- sdelta = rapl_scale(delta);
+ sdelta = rapl_scale(delta, event->hw.config);
local64_add(sdelta, &event->count);
@@ -546,12 +568,22 @@ static void rapl_cpu_init(int cpu)
cpumask_set_cpu(cpu, &rapl_cpu_mask);
}
+static __init void rapl_hsw_server_quirk(void)
+{
+ /*
+ * DRAM domain on HSW server has fixed energy unit which can be
+ * different than the unit from power unit MSR.
+ * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
+ * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
+ */
+ rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
+}
+
static int rapl_cpu_prepare(int cpu)
{
struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
int phys_id = topology_physical_package_id(cpu);
u64 ms;
- u64 msr_rapl_power_unit_bits;
if (pmu)
return 0;
@@ -559,24 +591,13 @@ static int rapl_cpu_prepare(int cpu)
if (phys_id < 0)
return -1;
- /* protect rdmsrl() to handle virtualization */
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
- return -1;
-
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
if (!pmu)
return -1;
-
spin_lock_init(&pmu->lock);
INIT_LIST_HEAD(&pmu->active_list);
- /*
- * grab power unit as: 1/2^unit Joules
- *
- * we cache in local PMU instance
- */
- pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
pmu->pmu = &rapl_pmu_class;
/*
@@ -586,8 +607,8 @@ static int rapl_cpu_prepare(int cpu)
* divide interval by 2 to avoid lockstep (2 * 100)
* if hw unit is 32, then we use 2 ms 1/200/2
*/
- if (pmu->hw_unit < 32)
- ms = (1000 / (2 * 100)) * (1ULL << (32 - pmu->hw_unit - 1));
+ if (rapl_hw_unit[0] < 32)
+ ms = (1000 / (2 * 100)) * (1ULL << (32 - rapl_hw_unit[0] - 1));
else
ms = 2;
@@ -655,6 +676,20 @@ static int rapl_cpu_notifier(struct notifier_block *self,
return NOTIFY_OK;
}
+static int rapl_check_hw_unit(void)
+{
+ u64 msr_rapl_power_unit_bits;
+ int i;
+
+ /* protect rdmsrl() to handle virtualization */
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
+ return -1;
+ for (i = 0; i < NR_RAPL_DOMAINS; i++)
+ rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
+
+ return 0;
+}
+
static const struct x86_cpu_id rapl_cpu_match[] = {
[0] = { .vendor = X86_VENDOR_INTEL, .family = 6 },
[1] = {},
@@ -664,6 +699,8 @@ static int __init rapl_pmu_init(void)
{
struct rapl_pmu *pmu;
int cpu, ret;
+ struct x86_pmu_quirk *quirk;
+ int i;
/*
* check for Intel processor family 6
@@ -678,8 +715,14 @@ static int __init rapl_pmu_init(void)
rapl_cntr_mask = RAPL_IDX_CLN;
rapl_pmu_events_group.attrs = rapl_events_cln_attr;
break;
+ case 63: /* Haswell-Server */
+ rapl_add_quirk(rapl_hsw_server_quirk);
+ rapl_cntr_mask = RAPL_IDX_SRV;
+ rapl_pmu_events_group.attrs = rapl_events_srv_attr;
+ break;
case 60: /* Haswell */
case 69: /* Haswell-Celeron */
+ case 61: /* Broadwell */
rapl_cntr_mask = RAPL_IDX_HSW;
rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
break;
@@ -693,7 +736,13 @@ static int __init rapl_pmu_init(void)
/* unsupported */
return 0;
}
+ ret = rapl_check_hw_unit();
+ if (ret)
+ return ret;
+ /* run cpu model quirks */
+ for (quirk = rapl_quirks; quirk; quirk = quirk->next)
+ quirk->func();
cpu_notifier_register_begin();
for_each_online_cpu(cpu) {
@@ -714,14 +763,18 @@ static int __init rapl_pmu_init(void)
pmu = __this_cpu_read(rapl_pmu);
- pr_info("RAPL PMU detected, hw unit 2^-%d Joules,"
+ pr_info("RAPL PMU detected,"
" API unit is 2^-32 Joules,"
" %d fixed counters"
" %llu ms ovfl timer\n",
- pmu->hw_unit,
hweight32(rapl_cntr_mask),
ktime_to_ms(pmu->timer_interval));
-
+ for (i = 0; i < NR_RAPL_DOMAINS; i++) {
+ if (rapl_cntr_mask & (1 << i)) {
+ pr_info("hw unit of domain %s 2^-%d Joules\n",
+ rapl_domain_names[i], rapl_hw_unit[i]);
+ }
+ }
out:
cpu_notifier_register_done();
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
index 3001015b755c..4562e9e22c60 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
@@ -1,6 +1,13 @@
/* Nehalem/SandBridge/Haswell uncore support */
#include "perf_event_intel_uncore.h"
+/* Uncore IMC PCI IDs */
+#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
+#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
+#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
+#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
+#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
+
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
@@ -472,6 +479,10 @@ static const struct pci_device_id hsw_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* end: all zeroes */ },
};
@@ -502,6 +513,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
+ IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
{ /* end marker */ }
};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index 21af6149edf2..12d9548457e7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -1132,8 +1132,7 @@ static int snbep_pci2phy_map_init(int devid)
}
}
- if (ubox_dev)
- pci_dev_put(ubox_dev);
+ pci_dev_put(ubox_dev);
return err ? pcibios_err_to_errno(err) : 0;
}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 60639093d536..3d423a101fae 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -41,6 +41,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
{ X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
{ X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
{ X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
+ { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 7d46bb260334..e2ce85db2283 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -149,6 +149,9 @@ static void __init e820_print_type(u32 type)
case E820_UNUSABLE:
printk(KERN_CONT "unusable");
break;
+ case E820_PRAM:
+ printk(KERN_CONT "persistent (type %u)", type);
+ break;
default:
printk(KERN_CONT "type %u", type);
break;
@@ -343,7 +346,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
* continue building up new bios map based on this
* information
*/
- if (current_type != last_type) {
+ if (current_type != last_type || current_type == E820_PRAM) {
if (last_type != 0) {
new_bios[new_bios_entry].size =
change_point[chgidx]->addr - last_addr;
@@ -688,6 +691,7 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
register_nosave_region(pfn, PFN_UP(ei->addr));
pfn = PFN_DOWN(ei->addr + ei->size);
+
if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
register_nosave_region(PFN_UP(ei->addr), pfn);
@@ -748,7 +752,7 @@ u64 __init early_reserve_e820(u64 size, u64 align)
/*
* Find the highest page frame number we have available
*/
-static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
+static unsigned long __init e820_end_pfn(unsigned long limit_pfn)
{
int i;
unsigned long last_pfn = 0;
@@ -759,7 +763,11 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
unsigned long start_pfn;
unsigned long end_pfn;
- if (ei->type != type)
+ /*
+ * Persistent memory is accounted as ram for purposes of
+ * establishing max_pfn and mem_map.
+ */
+ if (ei->type != E820_RAM && ei->type != E820_PRAM)
continue;
start_pfn = ei->addr >> PAGE_SHIFT;
@@ -784,12 +792,12 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
}
unsigned long __init e820_end_of_ram_pfn(void)
{
- return e820_end_pfn(MAX_ARCH_PFN, E820_RAM);
+ return e820_end_pfn(MAX_ARCH_PFN);
}
unsigned long __init e820_end_of_low_ram_pfn(void)
{
- return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
+ return e820_end_pfn(1UL << (32-PAGE_SHIFT));
}
static void early_panic(char *msg)
@@ -866,6 +874,9 @@ static int __init parse_memmap_one(char *p)
} else if (*p == '$') {
start_at = memparse(p+1, &p);
e820_add_region(start_at, mem_size, E820_RESERVED);
+ } else if (*p == '!') {
+ start_at = memparse(p+1, &p);
+ e820_add_region(start_at, mem_size, E820_PRAM);
} else
e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
@@ -907,6 +918,7 @@ static inline const char *e820_type_to_string(int e820_type)
case E820_ACPI: return "ACPI Tables";
case E820_NVS: return "ACPI Non-volatile Storage";
case E820_UNUSABLE: return "Unusable memory";
+ case E820_PRAM: return "Persistent RAM";
default: return "reserved";
}
}
@@ -940,7 +952,9 @@ void __init e820_reserve_resources(void)
* pci device BAR resource and insert them later in
* pcibios_resource_survey()
*/
- if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) {
+ if (((e820.map[i].type != E820_RESERVED) &&
+ (e820.map[i].type != E820_PRAM)) ||
+ res->start < (1ULL<<20)) {
res->flags |= IORESOURCE_BUSY;
insert_resource(&iomem_resource, res);
}
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 49ff55ef9b26..89427d8d4fc5 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -375,12 +375,6 @@ static int __init setup_early_printk(char *buf)
if (!strncmp(buf, "xen", 3))
early_console_register(&xenboot_console, keep);
#endif
-#ifdef CONFIG_EARLY_PRINTK_INTEL_MID
- if (!strncmp(buf, "hsu", 3)) {
- hsu_early_console_init(buf + 3);
- early_console_register(&early_hsu_console, keep);
- }
-#endif
#ifdef CONFIG_EARLY_PRINTK_EFI
if (!strncmp(buf, "efi", 3))
early_console_register(&early_efi_console, keep);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index c7b238494b31..02c2eff7478d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -295,6 +295,15 @@ system_call_fastpath:
* rflags from r11 (but RF and VM bits are forced to 0),
* cs and ss are loaded from MSRs.
* Restoration of rflags re-enables interrupts.
+ *
+ * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
+ * descriptor is not reinitialized. This means that we should
+ * avoid SYSRET with SS == NULL, which could happen if we schedule,
+ * exit the kernel, and re-enter using an interrupt vector. (All
+ * interrupt entries on x86_64 set SS to NULL.) We prevent that
+ * from happening by reloading SS in __switch_to. (Actually
+ * detecting the failure in 64-bit userspace is tricky but can be
+ * done.)
*/
USERGS_SYSRET64
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 367f39d35e9c..009183276bb7 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -341,7 +341,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- struct xsave_struct *xsave = &target->thread.fpu.state->xsave;
+ struct xsave_struct *xsave;
int ret;
if (!cpu_has_xsave)
@@ -351,6 +351,8 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
if (ret)
return ret;
+ xsave = &target->thread.fpu.state->xsave;
+
/*
* Copy the 48bytes defined by the software first into the xstate
* memory layout in the thread struct, so that we can copy the entire
@@ -369,7 +371,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct xsave_struct *xsave = &target->thread.fpu.state->xsave;
+ struct xsave_struct *xsave;
int ret;
if (!cpu_has_xsave)
@@ -379,6 +381,8 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
if (ret)
return ret;
+ xsave = &target->thread.fpu.state->xsave;
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 24d079604fd5..1deffe6cc873 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -354,6 +354,7 @@ int __copy_instruction(u8 *dest, u8 *src)
{
struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
+ int length;
unsigned long recovered_insn =
recover_probed_instruction(buf, (unsigned long)src);
@@ -361,16 +362,18 @@ int __copy_instruction(u8 *dest, u8 *src)
return 0;
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
insn_get_length(&insn);
+ length = insn.length;
+
/* Another subsystem puts a breakpoint, failed to recover */
if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
return 0;
- memcpy(dest, insn.kaddr, insn.length);
+ memcpy(dest, insn.kaddr, length);
#ifdef CONFIG_X86_64
if (insn_rip_relative(&insn)) {
s64 newdisp;
u8 *disp;
- kernel_insn_init(&insn, dest, insn.length);
+ kernel_insn_init(&insn, dest, length);
insn_get_displacement(&insn);
/*
* The copied instruction uses the %rip-relative addressing
@@ -394,7 +397,7 @@ int __copy_instruction(u8 *dest, u8 *src)
*(s32 *) disp = (s32) newdisp;
}
#endif
- return insn.length;
+ return length;
}
static int arch_copy_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index e354cc6446ab..9435620062df 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -513,7 +513,7 @@ void __init kvm_guest_init(void)
* can get false positives too easily, for example if the host is
* overcommitted.
*/
- watchdog_enable_hardlockup_detector(false);
+ hardlockup_detector_disable();
}
static noinline uint32_t __kvm_cpuid_base(void)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 548d25f00c90..c614dd492f5f 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -443,7 +443,7 @@ struct pv_mmu_ops pv_mmu_ops = {
.ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
-#if PAGETABLE_LEVELS >= 3
+#if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic,
.pte_clear = native_pte_clear,
@@ -454,13 +454,13 @@ struct pv_mmu_ops pv_mmu_ops = {
.pmd_val = PTE_IDENT,
.make_pmd = PTE_IDENT,
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
.pud_val = PTE_IDENT,
.make_pud = PTE_IDENT,
.set_pgd = native_set_pgd,
#endif
-#endif /* PAGETABLE_LEVELS >= 3 */
+#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
.pte_val = PTE_IDENT,
.pgd_val = PTE_IDENT,
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
new file mode 100644
index 000000000000..3420c874ddc5
--- /dev/null
+++ b/arch/x86/kernel/pmem.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015, Christoph Hellwig.
+ */
+#include <linux/memblock.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <asm/e820.h>
+#include <asm/page_types.h>
+#include <asm/setup.h>
+
+static __init void register_pmem_device(struct resource *res)
+{
+ struct platform_device *pdev;
+ int error;
+
+ pdev = platform_device_alloc("pmem", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return;
+
+ error = platform_device_add_resources(pdev, res, 1);
+ if (error)
+ goto out_put_pdev;
+
+ error = platform_device_add(pdev);
+ if (error)
+ goto out_put_pdev;
+ return;
+
+out_put_pdev:
+ dev_warn(&pdev->dev, "failed to add 'pmem' (persistent memory) device!\n");
+ platform_device_put(pdev);
+}
+
+static __init int register_pmem_devices(void)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+
+ if (ei->type == E820_PRAM) {
+ struct resource res = {
+ .flags = IORESOURCE_MEM,
+ .start = ei->addr,
+ .end = ei->addr + ei->size - 1,
+ };
+ register_pmem_device(&res);
+ }
+ }
+
+ return 0;
+}
+device_initcall(register_pmem_devices);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 8213da62b1b7..6e338e3b1dc0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -57,7 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
#endif
};
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss);
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
@@ -156,11 +156,13 @@ void flush_thread(void)
/* FPU state will be reallocated lazily at the first use. */
drop_fpu(tsk);
free_thread_xstate(tsk);
- } else if (!used_math()) {
- /* kthread execs. TODO: cleanup this horror. */
- if (WARN_ON(init_fpu(tsk)))
- force_sig(SIGKILL, tsk);
- user_fpu_begin();
+ } else {
+ if (!tsk_used_math(tsk)) {
+ /* kthread execs. TODO: cleanup this horror. */
+ if (WARN_ON(init_fpu(tsk)))
+ force_sig(SIGKILL, tsk);
+ user_fpu_begin();
+ }
restore_init_xstate();
}
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 4baaa972f52a..ddfdbf74f174 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -419,6 +419,34 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
+ if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
+ /*
+ * AMD CPUs have a misfeature: SYSRET sets the SS selector but
+ * does not update the cached descriptor. As a result, if we
+ * do SYSRET while SS is NULL, we'll end up in user mode with
+ * SS apparently equal to __USER_DS but actually unusable.
+ *
+ * The straightforward workaround would be to fix it up just
+ * before SYSRET, but that would slow down the system call
+ * fast paths. Instead, we ensure that SS is never NULL in
+ * system call context. We do this by replacing NULL SS
+ * selectors at every context switch. SYSCALL sets up a valid
+ * SS, so the only way to get NULL is to re-enter the kernel
+ * from CPL 3 through an interrupt. Since that can't happen
+ * in the same task as a running syscall, we are guaranteed to
+ * context switch between every interrupt vector entry and a
+ * subsequent SYSRET.
+ *
+ * We read SS first because SS reads are much faster than
+ * writes. Out of caution, we force SS to __KERNEL_DS even if
+ * it previously had a different non-NULL value.
+ */
+ unsigned short ss_sel;
+ savesegment(ss, ss_sel);
+ if (ss_sel != __KERNEL_DS)
+ loadsegment(ss, __KERNEL_DS);
+ }
+
return prev_p;
}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index e5ecd20e72dd..2f355d229a58 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -141,46 +141,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
}
-static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
-
-static struct pvclock_vsyscall_time_info *
-pvclock_get_vsyscall_user_time_info(int cpu)
-{
- if (!pvclock_vdso_info) {
- BUG();
- return NULL;
- }
-
- return &pvclock_vdso_info[cpu];
-}
-
-struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
-{
- return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
-}
-
#ifdef CONFIG_X86_64
-static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
- void *v)
-{
- struct task_migration_notifier *mn = v;
- struct pvclock_vsyscall_time_info *pvti;
-
- pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
-
- /* this is NULL when pvclock vsyscall is not initialized */
- if (unlikely(pvti == NULL))
- return NOTIFY_DONE;
-
- pvti->migrate_count++;
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block pvclock_migrate = {
- .notifier_call = pvclock_task_migrate,
-};
-
/*
* Initialize the generic pvclock vsyscall state. This will allocate
* a/some page(s) for the per-vcpu pvclock information, set up a
@@ -194,17 +155,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
- pvclock_vdso_info = i;
-
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
__pa(i) + (idx*PAGE_SIZE),
PAGE_KERNEL_VVAR);
}
-
- register_task_migration_notifier(&pvclock_migrate);
-
return 0;
}
#endif
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 3e581865c8e2..1ea14fd53933 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -593,24 +593,10 @@ badframe:
return 0;
}
-/*
- * OK, we're invoking a handler:
- */
-static int signr_convert(int sig)
-{
-#ifdef CONFIG_X86_32
- struct thread_info *info = current_thread_info();
-
- if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32)
- return info->exec_domain->signal_invmap[sig];
-#endif /* CONFIG_X86_32 */
- return sig;
-}
-
static int
setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
{
- int usig = signr_convert(ksig->sig);
+ int usig = ksig->sig;
sigset_t *set = sigmask_to_save();
compat_sigset_t *cset = (compat_sigset_t *) set;
@@ -630,7 +616,8 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
- bool failed;
+ bool stepping, failed;
+
/* Are we from a system call? */
if (syscall_get_nr(current, regs) >= 0) {
/* If so, check system call restarting.. */
@@ -654,12 +641,13 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
}
/*
- * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
- * flag so that register information in the sigcontext is correct.
+ * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now
+ * so that register information in the sigcontext is correct and
+ * then notify the tracer before entering the signal handler.
*/
- if (unlikely(regs->flags & X86_EFLAGS_TF) &&
- likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
- regs->flags &= ~X86_EFLAGS_TF;
+ stepping = test_thread_flag(TIF_SINGLESTEP);
+ if (stepping)
+ user_disable_single_step(current);
failed = (setup_rt_frame(ksig, regs) < 0);
if (!failed) {
@@ -670,10 +658,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* it might disable possible debug exception from the
* signal handler.
*
- * Clear TF when entering the signal handler, but
- * notify any tracer that was single-stepping it.
- * The tracer may want to single-step inside the
- * handler too.
+ * Clear TF for the case when it wasn't set by debugger to
+ * avoid the recursive send_sigtrap() in SIGTRAP handler.
*/
regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
/*
@@ -682,7 +668,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
if (used_math())
fpu_reset_state(current);
}
- signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
+ signal_setup_done(failed, ksig, stepping);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7035f6b21c3f..50e547eac8cd 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -77,9 +77,6 @@
#include <asm/realmode.h>
#include <asm/misc.h>
-/* State of each CPU */
-DEFINE_PER_CPU(int, cpu_state) = { 0 };
-
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
@@ -257,7 +254,7 @@ static void notrace start_secondary(void *unused)
lock_vector_lock();
set_cpu_online(smp_processor_id(), true);
unlock_vector_lock();
- per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+ cpu_set_state_online(smp_processor_id());
x86_platform.nmi_init();
/* enable local interrupts */
@@ -954,7 +951,10 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
*/
mtrr_save_state();
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+ /* x86 CPUs take themselves offline, so delayed offline is OK. */
+ err = cpu_check_up_prepare(cpu);
+ if (err && err != -EBUSY)
+ return err;
/* the FPU context is blank, nobody can own it */
__cpu_disable_lazy_restore(cpu);
@@ -1197,7 +1197,7 @@ void __init native_smp_prepare_boot_cpu(void)
switch_to_new_gdt(me);
/* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask);
- per_cpu(cpu_state, me) = CPU_ONLINE;
+ cpu_set_state_online(me);
}
void __init native_smp_cpus_done(unsigned int max_cpus)
@@ -1324,14 +1324,10 @@ static void __ref remove_cpu_from_maps(int cpu)
numa_remove_cpu(cpu);
}
-static DEFINE_PER_CPU(struct completion, die_complete);
-
void cpu_disable_common(void)
{
int cpu = smp_processor_id();
- init_completion(&per_cpu(die_complete, smp_processor_id()));
-
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
@@ -1355,24 +1351,27 @@ int native_cpu_disable(void)
return 0;
}
-void cpu_die_common(unsigned int cpu)
+int common_cpu_die(unsigned int cpu)
{
- wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
-}
+ int ret = 0;
-void native_cpu_die(unsigned int cpu)
-{
/* We don't do anything here: idle task is faking death itself. */
- cpu_die_common(cpu);
-
/* They ack this in play_dead() by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ if (cpu_wait_death(cpu, 5)) {
if (system_state == SYSTEM_RUNNING)
pr_info("CPU %u is now offline\n", cpu);
} else {
pr_err("CPU %u didn't die...\n", cpu);
+ ret = -1;
}
+
+ return ret;
+}
+
+void native_cpu_die(unsigned int cpu)
+{
+ common_cpu_die(cpu);
}
void play_dead_common(void)
@@ -1381,10 +1380,8 @@ void play_dead_common(void)
reset_lazy_tlbstate();
amd_e400_remove_cpu(raw_smp_processor_id());
- mb();
/* Ack it */
- __this_cpu_write(cpu_state, CPU_DEAD);
- complete(&per_cpu(die_complete, smp_processor_id()));
+ (void)cpu_report_death();
/*
* With physical CPU hotplug, we should halt the cpu
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
index b79133abda48..5ecbfe5099da 100644
--- a/arch/x86/kernel/test_rodata.c
+++ b/arch/x86/kernel/test_rodata.c
@@ -57,7 +57,7 @@ int rodata_test(void)
/* test 3: check the value hasn't changed */
/* If this test fails, we managed to overwrite the data */
if (!rodata_test_data) {
- printk(KERN_ERR "rodata_test: Test 3 failes (end data)\n");
+ printk(KERN_ERR "rodata_test: Test 3 fails (end data)\n");
return -ENODEV;
}
/* test 4: check if the rodata section is 4Kb aligned */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index f4fa991406cd..324ab5247687 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -123,7 +123,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
* but we need to notify RCU.
*/
rcu_nmi_enter();
- prev_state = IN_KERNEL; /* the value is irrelevant. */
+ prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */
}
/*
diff --git a/arch/x86/kvm/assigned-dev.c b/arch/x86/kvm/assigned-dev.c
index 6eb5c20ee373..d090ecf08809 100644
--- a/arch/x86/kvm/assigned-dev.c
+++ b/arch/x86/kvm/assigned-dev.c
@@ -666,7 +666,7 @@ static int probe_sysfs_permissions(struct pci_dev *dev)
if (r)
return r;
- inode = path.dentry->d_inode;
+ inode = d_backing_inode(path.dentry);
r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
path_put(&path);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index d67206a7b99a..629af0f1c5c4 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -683,8 +683,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
unsigned long bitmap = 1;
struct kvm_lapic **dst;
int i;
- bool ret = false;
- bool x2apic_ipi = src && apic_x2apic_mode(src);
+ bool ret, x2apic_ipi;
*r = -1;
@@ -696,16 +695,18 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
if (irq->shorthand)
return false;
+ x2apic_ipi = src && apic_x2apic_mode(src);
if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST))
return false;
+ ret = true;
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
- if (!map)
+ if (!map) {
+ ret = false;
goto out;
-
- ret = true;
+ }
if (irq->dest_mode == APIC_DEST_PHYSICAL) {
if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 146f295ee322..d43867c33bc4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4481,9 +4481,11 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
pfn = spte_to_pfn(*sptep);
/*
- * Only EPT supported for now; otherwise, one would need to
- * find out efficiently whether the guest page tables are
- * also using huge pages.
+ * We cannot do huge page mapping for indirect shadow pages,
+ * which are found on the last rmap (level = 1) when not using
+ * tdp; such shadow pages are synced with the page table in
+ * the guest, and the guest page table is using 4K page size
+ * mapping if the indirect sp has level = 1.
*/
if (sp->role.direct &&
!kvm_is_reserved_pfn(pfn) &&
@@ -4504,19 +4506,12 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool flush = false;
unsigned long *rmapp;
unsigned long last_index, index;
- gfn_t gfn_start, gfn_end;
spin_lock(&kvm->mmu_lock);
- gfn_start = memslot->base_gfn;
- gfn_end = memslot->base_gfn + memslot->npages - 1;
-
- if (gfn_start >= gfn_end)
- goto out;
-
rmapp = memslot->arch.rmap[0];
- last_index = gfn_to_index(gfn_end, memslot->base_gfn,
- PT_PAGE_TABLE_LEVEL);
+ last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1,
+ memslot->base_gfn, PT_PAGE_TABLE_LEVEL);
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
@@ -4534,7 +4529,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
if (flush)
kvm_flush_remote_tlbs(kvm);
-out:
spin_unlock(&kvm->mmu_lock);
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f5e8dce8046c..f7b61687bd79 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3622,8 +3622,16 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
- KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
+ /*
+ * Pass through host's Machine Check Enable value to hw_cr4, which
+ * is in force while we are in guest mode. Do not let guests control
+ * this bit, even if host CR4.MCE == 0.
+ */
+ unsigned long hw_cr4 =
+ (cr4_read_shadow() & X86_CR4_MCE) |
+ (cr4 & ~X86_CR4_MCE) |
+ (to_vmx(vcpu)->rmode.vm86_active ?
+ KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
if (cr4 & X86_CR4_VMXE) {
/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e1a81267f3f6..c73efcd03e29 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1669,12 +1669,28 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
&guest_hv_clock, sizeof(guest_hv_clock))))
return 0;
- /*
- * The interface expects us to write an even number signaling that the
- * update is finished. Since the guest won't see the intermediate
- * state, we just increase by 2 at the end.
+ /* This VCPU is paused, but it's legal for a guest to read another
+ * VCPU's kvmclock, so we really have to follow the specification where
+ * it says that version is odd if data is being modified, and even after
+ * it is consistent.
+ *
+ * Version field updates must be kept separate. This is because
+ * kvm_write_guest_cached might use a "rep movs" instruction, and
+ * writes within a string instruction are weakly ordered. So there
+ * are three writes overall.
+ *
+ * As a small optimization, only write the version field in the first
+ * and third write. The vcpu->pv_time cache is still valid, because the
+ * version field is the first in the struct.
*/
- vcpu->hv_clock.version = guest_hv_clock.version + 2;
+ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+
+ vcpu->hv_clock.version = guest_hv_clock.version + 1;
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock.version));
+
+ smp_wmb();
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
@@ -1695,6 +1711,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock));
+
+ smp_wmb();
+
+ vcpu->hv_clock.version++;
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock.version));
return 0;
}
@@ -5799,7 +5822,6 @@ int kvm_arch_init(void *opaque)
kvm_set_mmio_spte_mask();
kvm_x86_ops = ops;
- kvm_init_msr_list();
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0);
@@ -7253,7 +7275,14 @@ void kvm_arch_hardware_disable(void)
int kvm_arch_hardware_setup(void)
{
- return kvm_x86_ops->hardware_setup();
+ int r;
+
+ r = kvm_x86_ops->hardware_setup();
+ if (r != 0)
+ return r;
+
+ kvm_init_msr_list();
+ return 0;
}
void kvm_arch_hardware_unsetup(void)
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 717908b16037..8f9a133cc099 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -87,8 +87,7 @@
struct lguest_data lguest_data = {
.hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
- .noirq_start = (u32)lguest_noirq_start,
- .noirq_end = (u32)lguest_noirq_end,
+ .noirq_iret = (u32)lguest_noirq_iret,
.kernel_address = PAGE_OFFSET,
.blocked_interrupts = { 1 }, /* Block timer interrupts */
.syscall_vec = SYSCALL_VECTOR,
@@ -262,7 +261,7 @@ PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl);
PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable);
/*:*/
-/* These are in i386_head.S */
+/* These are in head_32.S */
extern void lg_irq_enable(void);
extern void lg_restore_fl(unsigned long flags);
@@ -1368,7 +1367,7 @@ static void lguest_restart(char *reason)
* fit comfortably.
*
* First we need assembly templates of each of the patchable Guest operations,
- * and these are in i386_head.S.
+ * and these are in head_32.S.
*/
/*G:060 We construct a table from the assembler templates: */
diff --git a/arch/x86/lguest/head_32.S b/arch/x86/lguest/head_32.S
index 6ddfe4fc23c3..d5ae63f5ec5d 100644
--- a/arch/x86/lguest/head_32.S
+++ b/arch/x86/lguest/head_32.S
@@ -84,7 +84,7 @@ ENTRY(lg_irq_enable)
* set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we
* jump to send_interrupts, otherwise we're done.
*/
- testl $0, lguest_data+LGUEST_DATA_irq_pending
+ cmpl $0, lguest_data+LGUEST_DATA_irq_pending
jnz send_interrupts
/*
* One cool thing about x86 is that you can do many things without using
@@ -133,9 +133,8 @@ ENTRY(lg_restore_fl)
ret
/*:*/
-/* These demark the EIP range where host should never deliver interrupts. */
-.global lguest_noirq_start
-.global lguest_noirq_end
+/* These demark the EIP where host should never deliver interrupts. */
+.global lguest_noirq_iret
/*M:004
* When the Host reflects a trap or injects an interrupt into the Guest, it
@@ -168,29 +167,26 @@ ENTRY(lg_restore_fl)
* So we have to copy eflags from the stack to lguest_data.irq_enabled before
* we do the "iret".
*
- * There are two problems with this: firstly, we need to use a register to do
- * the copy and secondly, the whole thing needs to be atomic. The first
- * problem is easy to solve: push %eax on the stack so we can use it, and then
- * restore it at the end just before the real "iret".
+ * There are two problems with this: firstly, we can't clobber any registers
+ * and secondly, the whole thing needs to be atomic. The first problem
+ * is solved by using "push memory"/"pop memory" instruction pair for copying.
*
* The second is harder: copying eflags to lguest_data.irq_enabled will turn
* interrupts on before we're finished, so we could be interrupted before we
- * return to userspace or wherever. Our solution to this is to surround the
- * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the
+ * return to userspace or wherever. Our solution to this is to tell the
* Host that it is *never* to interrupt us there, even if interrupts seem to be
- * enabled.
+ * enabled. (It's not necessary to protect pop instruction, since
+ * data gets updated only after it completes, so we only need to protect
+ * one instruction, iret).
*/
ENTRY(lguest_iret)
- pushl %eax
- movl 12(%esp), %eax
-lguest_noirq_start:
+ pushl 2*4(%esp)
/*
* Note the %ss: segment prefix here. Normal data accesses use the
* "ds" segment, but that will have already been restored for whatever
* we're returning to (such as userspace): we can't trust it. The %ss:
* prefix makes sure we use the stack segment, which is still valid.
*/
- movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled
- popl %eax
+ popl %ss:lguest_data+LGUEST_DATA_irq_enabled
+lguest_noirq_iret:
iret
-lguest_noirq_end:
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 1f33b3d1fd68..0a42327a59d7 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -82,7 +82,7 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
clac();
/* If the destination is a kernel buffer, we always clear the end */
- if ((unsigned long)to >= TASK_SIZE_MAX)
+ if (!__addr_ok(to))
memset(to, 0, len);
return len;
}
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index c4cc74006c61..a482d105172b 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -32,6 +32,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
-obj-$(CONFIG_MEMTEST) += memtest.o
-
obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index fdf617c00e2f..70e7444c6835 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -67,8 +67,13 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
/*
* Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
+ * address space. It transparently creates kernel huge I/O mapping when
+ * the physical address is aligned by a huge page size (1GB or 2MB) and
+ * the requested size is at least the huge page size.
+ *
+ * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
+ * Therefore, the mapping code falls back to use a smaller page toward 4KB
+ * when a mapping range is covered by non-WB type of MTRRs.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
@@ -326,24 +331,40 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
+int arch_ioremap_pud_supported(void)
+{
+#ifdef CONFIG_X86_64
+ return cpu_has_gbpages;
+#else
+ return 0;
+#endif
+}
+
+int arch_ioremap_pmd_supported(void)
+{
+ return cpu_has_pse;
+}
+
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
void *xlate_dev_mem_ptr(phys_addr_t phys)
{
- void *addr;
- unsigned long start = phys & PAGE_MASK;
+ unsigned long start = phys & PAGE_MASK;
+ unsigned long offset = phys & ~PAGE_MASK;
+ unsigned long vaddr;
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
if (page_is_ram(start >> PAGE_SHIFT))
return __va(phys);
- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
- if (addr)
- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
+ vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
+ /* Only add the offset on success and return NULL if the ioremap() failed: */
+ if (vaddr)
+ vaddr += offset;
- return addr;
+ return (void *)vaddr;
}
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index df4552bd239e..9d518d693b4b 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -65,24 +65,23 @@ static int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- unsigned long rnd = 0;
+ unsigned long rnd;
/*
- * 8 bits of randomness in 32bit mmaps, 20 address space bits
- * 28 bits of randomness in 64bit mmaps, 40 address space bits
- */
- if (current->flags & PF_RANDOMIZE) {
- if (mmap_is_ia32())
- rnd = get_random_int() % (1<<8);
- else
- rnd = get_random_int() % (1<<28);
- }
+ * 8 bits of randomness in 32bit mmaps, 20 address space bits
+ * 28 bits of randomness in 64bit mmaps, 40 address space bits
+ */
+ if (mmap_is_ia32())
+ rnd = (unsigned long)get_random_int() % (1<<8);
+ else
+ rnd = (unsigned long)get_random_int() % (1<<28);
+
return rnd << PAGE_SHIFT;
}
-static unsigned long mmap_base(void)
+static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -91,19 +90,19 @@ static unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+ return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
/*
* Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
* does, but not when emulating X86_32
*/
-static unsigned long mmap_legacy_base(void)
+static unsigned long mmap_legacy_base(unsigned long rnd)
{
if (mmap_is_ia32())
return TASK_UNMAPPED_BASE;
else
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return TASK_UNMAPPED_BASE + rnd;
}
/*
@@ -112,13 +111,18 @@ static unsigned long mmap_legacy_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
- mm->mmap_legacy_base = mmap_legacy_base();
- mm->mmap_base = mmap_base();
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+ mm->mmap_legacy_base = mmap_legacy_base(random_factor);
if (mmap_is_legacy()) {
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 5a7e5252c878..0b97d2c75df3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -4,6 +4,7 @@
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
+#include <asm/mtrr.h>
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
@@ -58,7 +59,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
tlb_remove_page(tlb, pte);
}
-#if PAGETABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
struct page *page = virt_to_page(pmd);
@@ -74,14 +75,14 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
tlb_remove_page(tlb, page);
}
-#if PAGETABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(pud));
}
-#endif /* PAGETABLE_LEVELS > 3 */
-#endif /* PAGETABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
static inline void pgd_list_add(pgd_t *pgd)
{
@@ -117,9 +118,9 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
/* If the pgd points to a shared pagetable level (either the
ptes in non-PAE, or shared PMD in PAE), then just copy the
references from swapper_pg_dir. */
- if (PAGETABLE_LEVELS == 2 ||
- (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
- PAGETABLE_LEVELS == 4) {
+ if (CONFIG_PGTABLE_LEVELS == 2 ||
+ (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
+ CONFIG_PGTABLE_LEVELS == 4) {
clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS);
@@ -560,3 +561,67 @@ void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
{
__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ u8 mtrr;
+
+ /*
+ * Do not use a huge page when the range is covered by non-WB type
+ * of MTRRs.
+ */
+ mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE);
+ if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+ return 0;
+
+ prot = pgprot_4k_2_large(prot);
+
+ set_pte((pte_t *)pud, pfn_pte(
+ (u64)addr >> PAGE_SHIFT,
+ __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+
+ return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ u8 mtrr;
+
+ /*
+ * Do not use a huge page when the range is covered by non-WB type
+ * of MTRRs.
+ */
+ mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE);
+ if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+ return 0;
+
+ prot = pgprot_4k_2_large(prot);
+
+ set_pte((pte_t *)pmd, pfn_pte(
+ (u64)addr >> PAGE_SHIFT,
+ __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+
+ return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+ if (pud_large(*pud)) {
+ pud_clear(pud);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ if (pmd_large(*pmd)) {
+ pmd_clear(pmd);
+ return 1;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 987514396c1e..99f76103c6b7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
if (is_ereg(dst_reg))
EMIT1(0x41);
EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
+
+ /* emit 'movzwl eax, ax' */
+ if (is_ereg(dst_reg))
+ EMIT3(0x45, 0x0F, 0xB7);
+ else
+ EMIT2(0x0F, 0xB7);
+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
break;
case 32:
/* emit 'bswap eax' to swap lower 4 bytes */
@@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
break;
case BPF_ALU | BPF_END | BPF_FROM_LE:
+ switch (imm32) {
+ case 16:
+ /* emit 'movzwl eax, ax' to zero extend 16-bit
+ * into 64 bit
+ */
+ if (is_ereg(dst_reg))
+ EMIT3(0x45, 0x0F, 0xB7);
+ else
+ EMIT2(0x0F, 0xB7);
+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
+ break;
+ case 32:
+ /* emit 'mov eax, eax' to clear upper 32-bits */
+ if (is_ereg(dst_reg))
+ EMIT1(0x45);
+ EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
+ break;
+ case 64:
+ /* nop */
+ break;
+ }
break;
/* ST: *(u8*)(dst_reg + off) = imm */
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index e4695985f9de..d93963340c3c 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge)
kfree(info);
}
+/*
+ * An IO port or MMIO resource assigned to a PCI host bridge may be
+ * consumed by the host bridge itself or available to its child
+ * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
+ * to tell whether the resource is consumed by the host bridge itself,
+ * but firmware hasn't used that bit consistently, so we can't rely on it.
+ *
+ * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
+ * to be available to child bus/devices except one special case:
+ * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
+ * to access PCI configuration space.
+ *
+ * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
+ */
+static bool resource_is_pcicfg_ioport(struct resource *res)
+{
+ return (res->flags & IORESOURCE_IO) &&
+ res->start == 0xCF8 && res->end == 0xCFF;
+}
+
static void probe_pci_root_info(struct pci_root_info *info,
struct acpi_device *device,
int busnum, int domain,
@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info,
"no IO and memory resources present in _CRS\n");
else
resource_list_for_each_entry_safe(entry, tmp, list) {
- if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
- (entry->res->flags & IORESOURCE_DISABLED))
+ if ((entry->res->flags & IORESOURCE_DISABLED) ||
+ resource_is_pcicfg_ioport(entry->res))
resource_list_destroy_entry(entry);
else
entry->res->name = info->name;
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
index 0a8ee703b9fa..0ce1b1913673 100644
--- a/arch/x86/platform/intel-mid/Makefile
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -1,5 +1,4 @@
obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfl.o
-obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o
# SFI specific code
ifdef CONFIG_X86_INTEL_MID
diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
deleted file mode 100644
index 4e720829ab90..000000000000
--- a/arch/x86/platform/intel-mid/early_printk_intel_mid.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * early_printk_intel_mid.c - early consoles for Intel MID platforms
- *
- * Copyright (c) 2008-2010, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-/*
- * This file implements early console named hsu.
- * hsu is based on a High Speed UART device which only exists in the Medfield
- * platform
- */
-
-#include <linux/serial_reg.h>
-#include <linux/serial_mfd.h>
-#include <linux/console.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/intel-mid.h>
-
-/*
- * Following is the early console based on Medfield HSU (High
- * Speed UART) device.
- */
-#define HSU_PORT_BASE 0xffa28080
-
-static void __iomem *phsu;
-
-void hsu_early_console_init(const char *s)
-{
- unsigned long paddr, port = 0;
- u8 lcr;
-
- /*
- * Select the early HSU console port if specified by user in the
- * kernel command line.
- */
- if (*s && !kstrtoul(s, 10, &port))
- port = clamp_val(port, 0, 2);
-
- paddr = HSU_PORT_BASE + port * 0x80;
- phsu = (void __iomem *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
-
- /* Disable FIFO */
- writeb(0x0, phsu + UART_FCR);
-
- /* Set to default 115200 bps, 8n1 */
- lcr = readb(phsu + UART_LCR);
- writeb((0x80 | lcr), phsu + UART_LCR);
- writeb(0x18, phsu + UART_DLL);
- writeb(lcr, phsu + UART_LCR);
- writel(0x3600, phsu + UART_MUL*4);
-
- writeb(0x8, phsu + UART_MCR);
- writeb(0x7, phsu + UART_FCR);
- writeb(0x3, phsu + UART_LCR);
-
- /* Clear IRQ status */
- readb(phsu + UART_LSR);
- readb(phsu + UART_RX);
- readb(phsu + UART_IIR);
- readb(phsu + UART_MSR);
-
- /* Enable FIFO */
- writeb(0x7, phsu + UART_FCR);
-}
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-static void early_hsu_putc(char ch)
-{
- unsigned int timeout = 10000; /* 10ms */
- u8 status;
-
- while (--timeout) {
- status = readb(phsu + UART_LSR);
- if (status & BOTH_EMPTY)
- break;
- udelay(1);
- }
-
- /* Only write the char when there was no timeout */
- if (timeout)
- writeb(ch, phsu + UART_TX);
-}
-
-static void early_hsu_write(struct console *con, const char *str, unsigned n)
-{
- int i;
-
- for (i = 0; i < n && *str; i++) {
- if (*str == '\n')
- early_hsu_putc('\r');
- early_hsu_putc(*str);
- str++;
- }
-}
-
-struct console early_hsu_console = {
- .name = "earlyhsu",
- .write = early_hsu_write,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile
index 3323c2745248..a55abb9f6c5e 100644
--- a/arch/x86/syscalls/Makefile
+++ b/arch/x86/syscalls/Makefile
@@ -19,6 +19,9 @@ quiet_cmd_syshdr = SYSHDR $@
quiet_cmd_systbl = SYSTBL $@
cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
+quiet_cmd_hypercalls = HYPERCALLS $@
+ cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<,$^)
+
syshdr_abi_unistd_32 := i386
$(uapi)/unistd_32.h: $(syscall32) $(syshdr)
$(call if_changed,syshdr)
@@ -47,10 +50,16 @@ $(out)/syscalls_32.h: $(syscall32) $(systbl)
$(out)/syscalls_64.h: $(syscall64) $(systbl)
$(call if_changed,systbl)
+$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh
+ $(call if_changed,hypercalls)
+
+$(out)/xen-hypercalls.h: $(srctree)/include/xen/interface/xen*.h
+
uapisyshdr-y += unistd_32.h unistd_64.h unistd_x32.h
syshdr-y += syscalls_32.h
syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h
syshdr-$(CONFIG_X86_64) += syscalls_64.h
+syshdr-$(CONFIG_XEN) += xen-hypercalls.h
targets += $(uapisyshdr-y) $(syshdr-y)
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index eafa324eb7a5..acb384d24669 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_BINFMT_ELF) += elfcore.o
subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o
-subarch-$(CONFIG_HIGHMEM) += ../mm/highmem_32.o
else
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 8ffd2146fa6a..7e8a1a650435 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -36,22 +36,11 @@
#endif /* CONFIG_X86_PPRO_FENCE */
#define dma_wmb() barrier()
-#ifdef CONFIG_SMP
-
-#define smp_mb() mb()
-#define smp_rmb() dma_rmb()
-#define smp_wmb() barrier()
-#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-
-#else /* CONFIG_SMP */
-
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif /* CONFIG_SMP */
-
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h
index 25a1022dd793..0a656b727b1a 100644
--- a/arch/x86/um/asm/elf.h
+++ b/arch/x86/um/asm/elf.h
@@ -210,7 +210,7 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_EXEC_PAGESIZE 4096
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
extern long elf_aux_hwcap;
#define ELF_HWCAP (elf_aux_hwcap)
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
index 8e08176f0bcb..5c0b711d2433 100644
--- a/arch/x86/um/ldt.c
+++ b/arch/x86/um/ldt.c
@@ -8,9 +8,7 @@
#include <linux/slab.h>
#include <asm/unistd.h>
#include <os.h>
-#include <proc_mm.h>
#include <skas.h>
-#include <skas_ptrace.h>
#include <sysdep/tls.h>
extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
@@ -19,105 +17,20 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func,
struct user_desc *desc, void **addr, int done)
{
long res;
-
- if (proc_mm) {
- /*
- * This is a special handling for the case, that the mm to
- * modify isn't current->active_mm.
- * If this is called directly by modify_ldt,
- * (current->active_mm->context.skas.u == mm_idp)
- * will be true. So no call to __switch_mm(mm_idp) is done.
- * If this is called in case of init_new_ldt or PTRACE_LDT,
- * mm_idp won't belong to current->active_mm, but child->mm.
- * So we need to switch child's mm into our userspace, then
- * later switch back.
- *
- * Note: I'm unsure: should interrupts be disabled here?
- */
- if (!current->active_mm || current->active_mm == &init_mm ||
- mm_idp != &current->active_mm->context.id)
- __switch_mm(mm_idp);
- }
-
- if (ptrace_ldt) {
- struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
- .func = func,
- .ptr = desc,
- .bytecount = sizeof(*desc)};
- u32 cpu;
- int pid;
-
- if (!proc_mm)
- pid = mm_idp->u.pid;
- else {
- cpu = get_cpu();
- pid = userspace_pid[cpu];
- }
-
- res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
-
- if (proc_mm)
- put_cpu();
- }
- else {
- void *stub_addr;
- res = syscall_stub_data(mm_idp, (unsigned long *)desc,
- (sizeof(*desc) + sizeof(long) - 1) &
- ~(sizeof(long) - 1),
- addr, &stub_addr);
- if (!res) {
- unsigned long args[] = { func,
- (unsigned long)stub_addr,
- sizeof(*desc),
- 0, 0, 0 };
- res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
- 0, addr, done);
- }
+ void *stub_addr;
+ res = syscall_stub_data(mm_idp, (unsigned long *)desc,
+ (sizeof(*desc) + sizeof(long) - 1) &
+ ~(sizeof(long) - 1),
+ addr, &stub_addr);
+ if (!res) {
+ unsigned long args[] = { func,
+ (unsigned long)stub_addr,
+ sizeof(*desc),
+ 0, 0, 0 };
+ res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
+ 0, addr, done);
}
- if (proc_mm) {
- /*
- * This is the second part of special handling, that makes
- * PTRACE_LDT possible to implement.
- */
- if (current->active_mm && current->active_mm != &init_mm &&
- mm_idp != &current->active_mm->context.id)
- __switch_mm(&current->active_mm->context.id);
- }
-
- return res;
-}
-
-static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
-{
- int res, n;
- struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
- .func = 0,
- .bytecount = bytecount,
- .ptr = kmalloc(bytecount, GFP_KERNEL)};
- u32 cpu;
-
- if (ptrace_ldt.ptr == NULL)
- return -ENOMEM;
-
- /*
- * This is called from sys_modify_ldt only, so userspace_pid gives
- * us the right number
- */
-
- cpu = get_cpu();
- res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
- put_cpu();
- if (res < 0)
- goto out;
-
- n = copy_to_user(ptr, ptrace_ldt.ptr, res);
- if (n != 0)
- res = -EFAULT;
-
- out:
- kfree(ptrace_ldt.ptr);
-
return res;
}
@@ -145,9 +58,6 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
err = bytecount;
- if (ptrace_ldt)
- return read_ldt_from_host(ptr, bytecount);
-
mutex_lock(&ldt->lock);
if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
@@ -229,17 +139,11 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
goto out;
}
- if (!ptrace_ldt)
- mutex_lock(&ldt->lock);
+ mutex_lock(&ldt->lock);
err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
if (err)
goto out_unlock;
- else if (ptrace_ldt) {
- /* With PTRACE_LDT available, this is used as a flag only */
- ldt->entry_count = 1;
- goto out;
- }
if (ldt_info.entry_number >= ldt->entry_count &&
ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
@@ -393,91 +297,56 @@ long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
int i;
long page, err=0;
void *addr = NULL;
- struct proc_mm_op copy;
- if (!ptrace_ldt)
- mutex_init(&new_mm->arch.ldt.lock);
+ mutex_init(&new_mm->arch.ldt.lock);
if (!from_mm) {
memset(&desc, 0, sizeof(desc));
/*
- * We have to initialize a clean ldt.
+ * Now we try to retrieve info about the ldt, we
+ * inherited from the host. All ldt-entries found
+ * will be reset in the following loop
*/
- if (proc_mm) {
- /*
- * If the new mm was created using proc_mm, host's
- * default-ldt currently is assigned, which normally
- * contains the call-gates for lcall7 and lcall27.
- * To remove these gates, we simply write an empty
- * entry as number 0 to the host.
- */
- err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
- }
- else{
- /*
- * Now we try to retrieve info about the ldt, we
- * inherited from the host. All ldt-entries found
- * will be reset in the following loop
- */
- ldt_get_host_info();
- for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
- desc.entry_number = *num_p;
- err = write_ldt_entry(&new_mm->id, 1, &desc,
- &addr, *(num_p + 1) == -1);
- if (err)
- break;
- }
+ ldt_get_host_info();
+ for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
+ desc.entry_number = *num_p;
+ err = write_ldt_entry(&new_mm->id, 1, &desc,
+ &addr, *(num_p + 1) == -1);
+ if (err)
+ break;
}
new_mm->arch.ldt.entry_count = 0;
goto out;
}
- if (proc_mm) {
- /*
- * We have a valid from_mm, so we now have to copy the LDT of
- * from_mm to new_mm, because using proc_mm an new mm with
- * an empty/default LDT was created in new_mm()
- */
- copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
- .u =
- { .copy_segments =
- from_mm->id.u.mm_fd } } );
- i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
- if (i != sizeof(copy))
- printk(KERN_ERR "new_mm : /proc/mm copy_segments "
- "failed, err = %d\n", -i);
- }
-
- if (!ptrace_ldt) {
- /*
- * Our local LDT is used to supply the data for
- * modify_ldt(READLDT), if PTRACE_LDT isn't available,
- * i.e., we have to use the stub for modify_ldt, which
- * can't handle the big read buffer of up to 64kB.
- */
- mutex_lock(&from_mm->arch.ldt.lock);
- if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
- memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
- sizeof(new_mm->arch.ldt.u.entries));
- else {
- i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
- while (i-->0) {
- page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
- if (!page) {
- err = -ENOMEM;
- break;
- }
- new_mm->arch.ldt.u.pages[i] =
- (struct ldt_entry *) page;
- memcpy(new_mm->arch.ldt.u.pages[i],
- from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
+ /*
+ * Our local LDT is used to supply the data for
+ * modify_ldt(READLDT), if PTRACE_LDT isn't available,
+ * i.e., we have to use the stub for modify_ldt, which
+ * can't handle the big read buffer of up to 64kB.
+ */
+ mutex_lock(&from_mm->arch.ldt.lock);
+ if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
+ memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
+ sizeof(new_mm->arch.ldt.u.entries));
+ else {
+ i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
+ while (i-->0) {
+ page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
+ if (!page) {
+ err = -ENOMEM;
+ break;
}
+ new_mm->arch.ldt.u.pages[i] =
+ (struct ldt_entry *) page;
+ memcpy(new_mm->arch.ldt.u.pages[i],
+ from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
}
- new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
- mutex_unlock(&from_mm->arch.ldt.lock);
}
+ new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
+ mutex_unlock(&from_mm->arch.ldt.lock);
out:
return err;
@@ -488,7 +357,7 @@ void free_ldt(struct mm_context *mm)
{
int i;
- if (!ptrace_ldt && mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
+ if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
while (i-- > 0)
free_page((long) mm->arch.ldt.u.pages[i]);
diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h
index a26086b8a800..b6f2437ec29c 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_32.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_32.h
@@ -27,9 +27,6 @@ struct faultinfo {
/* This is Page Fault */
#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
-/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */
-#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo)
-
#define PTRACE_FULL_FAULTINFO 0
#endif
diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h
index f811cbe15d62..ee88f88974ea 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_64.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_64.h
@@ -27,9 +27,6 @@ struct faultinfo {
/* This is Page Fault */
#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
-/* No broken SKAS API, which doesn't pass trap_no, here. */
-#define SEGV_MAYBE_FIXABLE(fi) 0
-
#define PTRACE_FULL_FAULTINFO 1
#endif
diff --git a/arch/x86/um/shared/sysdep/skas_ptrace.h b/arch/x86/um/shared/sysdep/skas_ptrace.h
deleted file mode 100644
index 453febe98993..000000000000
--- a/arch/x86/um/shared/sysdep/skas_ptrace.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SYSDEP_X86_SKAS_PTRACE_H
-#define __SYSDEP_X86_SKAS_PTRACE_H
-
-struct ptrace_faultinfo {
- int is_write;
- unsigned long addr;
-};
-
-struct ptrace_ldt {
- int func;
- void *ptr;
- unsigned long bytecount;
-};
-
-#define PTRACE_LDT 54
-
-#endif
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 0c8c32bfd792..592491d1d70d 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -549,13 +549,6 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
if (err)
return err;
- /* Set up registers for signal handler */
- {
- struct exec_domain *ed = current_thread_info()->exec_domain;
- if (unlikely(ed && ed->signal_invmap && sig < 32))
- sig = ed->signal_invmap[sig];
- }
-
PT_REGS_SP(regs) = (unsigned long) frame;
PT_REGS_DI(regs) = sig;
/* In case the signal handler was declared without prototypes */
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 275a3a8b78af..e97032069f88 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -51,7 +51,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
-HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 40d2473836c9..9793322751e0 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -82,15 +82,18 @@ static notrace cycle_t vread_pvclock(int *mode)
cycle_t ret;
u64 last;
u32 version;
- u32 migrate_count;
u8 flags;
unsigned cpu, cpu1;
/*
- * When looping to get a consistent (time-info, tsc) pair, we
- * also need to deal with the possibility we can switch vcpus,
- * so make sure we always re-fetch time-info for the current vcpu.
+ * Note: hypervisor must guarantee that:
+ * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
+ * 2. that per-CPU pvclock time info is updated if the
+ * underlying CPU changes.
+ * 3. that version is increased whenever underlying CPU
+ * changes.
+ *
*/
do {
cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -99,27 +102,20 @@ static notrace cycle_t vread_pvclock(int *mode)
* __getcpu() calls (Gleb).
*/
- /* Make sure migrate_count will change if we leave the VCPU. */
- do {
- pvti = get_pvti(cpu);
- migrate_count = pvti->migrate_count;
-
- cpu1 = cpu;
- cpu = __getcpu() & VGETCPU_CPU_MASK;
- } while (unlikely(cpu != cpu1));
+ pvti = get_pvti(cpu);
version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
/*
* Test we're still on the cpu as well as the version.
- * - We must read TSC of pvti's VCPU.
- * - KVM doesn't follow the versioning protocol, so data could
- * change before version if we left the VCPU.
+ * We could have been migrated just after the first
+ * vgetcpu but before fetching the version, so we
+ * wouldn't notice a version change.
*/
- smp_rmb();
- } while (unlikely((pvti->pvti.version & 1) ||
- pvti->pvti.version != version ||
- pvti->migrate_count != migrate_count));
+ cpu1 = __getcpu() & VGETCPU_CPU_MASK;
+ } while (unlikely(cpu != cpu1 ||
+ (pvti->pvti.version & 1) ||
+ pvti->pvti.version != version));
if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
*mode = VCLOCK_NONE;
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 7005ced5d1ad..70e060ad879a 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -7,6 +7,7 @@
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include "xen-ops.h"
+#include "smp.h"
static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
{
@@ -28,7 +29,186 @@ static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
return 0xfd;
}
+static unsigned long xen_set_apic_id(unsigned int x)
+{
+ WARN_ON(1);
+ return x;
+}
+
+static unsigned int xen_get_apic_id(unsigned long x)
+{
+ return ((x)>>24) & 0xFFu;
+}
+
+static u32 xen_apic_read(u32 reg)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_get_cpuinfo,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u.pcpu_info.xen_cpuid = 0,
+ };
+ int ret = 0;
+
+ /* Shouldn't need this as APIC is turned off for PV, and we only
+ * get called on the bootup processor. But just in case. */
+ if (!xen_initial_domain() || smp_processor_id())
+ return 0;
+
+ if (reg == APIC_LVR)
+ return 0x10;
+#ifdef CONFIG_X86_32
+ if (reg == APIC_LDR)
+ return SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
+#endif
+ if (reg != APIC_ID)
+ return 0;
+
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ return 0;
+
+ return op.u.pcpu_info.apic_id << 24;
+}
+
+static void xen_apic_write(u32 reg, u32 val)
+{
+ /* Warn to see if there's any stray references */
+ WARN(1,"register: %x, value: %x\n", reg, val);
+}
+
+static u64 xen_apic_icr_read(void)
+{
+ return 0;
+}
+
+static void xen_apic_icr_write(u32 low, u32 id)
+{
+ /* Warn to see if there's any stray references */
+ WARN_ON(1);
+}
+
+static u32 xen_safe_apic_wait_icr_idle(void)
+{
+ return 0;
+}
+
+static int xen_apic_probe_pv(void)
+{
+ if (xen_pv_domain())
+ return 1;
+
+ return 0;
+}
+
+static int xen_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ return xen_pv_domain();
+}
+
+static int xen_id_always_valid(int apicid)
+{
+ return 1;
+}
+
+static int xen_id_always_registered(void)
+{
+ return 1;
+}
+
+static int xen_phys_pkg_id(int initial_apic_id, int index_msb)
+{
+ return initial_apic_id >> index_msb;
+}
+
+#ifdef CONFIG_X86_32
+static int xen_x86_32_early_logical_apicid(int cpu)
+{
+ /* Match with APIC_LDR read. Otherwise setup_local_APIC complains. */
+ return 1 << cpu;
+}
+#endif
+
+static void xen_noop(void)
+{
+}
+
+static void xen_silent_inquire(int apicid)
+{
+}
+
+static struct apic xen_pv_apic = {
+ .name = "Xen PV",
+ .probe = xen_apic_probe_pv,
+ .acpi_madt_oem_check = xen_madt_oem_check,
+ .apic_id_valid = xen_id_always_valid,
+ .apic_id_registered = xen_id_always_registered,
+
+ /* .irq_delivery_mode - used in native_compose_msi_msg only */
+ /* .irq_dest_mode - used in native_compose_msi_msg only */
+
+ .target_cpus = default_target_cpus,
+ .disable_esr = 0,
+ /* .dest_logical - default_send_IPI_ use it but we use our own. */
+ .check_apicid_used = default_check_apicid_used, /* Used on 32-bit */
+
+ .vector_allocation_domain = flat_vector_allocation_domain,
+ .init_apic_ldr = xen_noop, /* setup_local_APIC calls it */
+
+ .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */
+ .setup_apic_routing = NULL,
+ .cpu_present_to_apicid = default_cpu_present_to_apicid,
+ .apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */
+ .check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */
+ .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */
+
+ .get_apic_id = xen_get_apic_id,
+ .set_apic_id = xen_set_apic_id, /* Can be NULL on 32-bit. */
+ .apic_id_mask = 0xFF << 24, /* Used by verify_local_APIC. Match with what xen_get_apic_id does. */
+
+ .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
+
+#ifdef CONFIG_SMP
+ .send_IPI_mask = xen_send_IPI_mask,
+ .send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself,
+ .send_IPI_allbutself = xen_send_IPI_allbutself,
+ .send_IPI_all = xen_send_IPI_all,
+ .send_IPI_self = xen_send_IPI_self,
+#endif
+ /* .wait_for_init_deassert- used by AP bootup - smp_callin which we don't use */
+ .inquire_remote_apic = xen_silent_inquire,
+
+ .read = xen_apic_read,
+ .write = xen_apic_write,
+ .eoi_write = xen_apic_write,
+
+ .icr_read = xen_apic_icr_read,
+ .icr_write = xen_apic_icr_write,
+ .wait_icr_idle = xen_noop,
+ .safe_wait_icr_idle = xen_safe_apic_wait_icr_idle,
+
+#ifdef CONFIG_X86_32
+ /* generic_processor_info and setup_local_APIC. */
+ .x86_32_early_logical_apicid = xen_x86_32_early_logical_apicid,
+#endif
+};
+
+static void __init xen_apic_check(void)
+{
+ if (apic == &xen_pv_apic)
+ return;
+
+ pr_info("Switched APIC routing from %s to %s.\n", apic->name,
+ xen_pv_apic.name);
+ apic = &xen_pv_apic;
+}
void __init xen_init_apic(void)
{
x86_io_apic_ops.read = xen_io_apic_read;
+ /* On PV guests the APIC CPUID bit is disabled so none of the
+ * routines end up executing. */
+ if (!xen_initial_domain())
+ apic = &xen_pv_apic;
+
+ x86_platform.apic_post_init = xen_apic_check;
}
+apic_driver(xen_pv_apic);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 81665c9f2132..46957ead3060 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -928,92 +928,6 @@ static void xen_io_delay(void)
{
}
-#ifdef CONFIG_X86_LOCAL_APIC
-static unsigned long xen_set_apic_id(unsigned int x)
-{
- WARN_ON(1);
- return x;
-}
-static unsigned int xen_get_apic_id(unsigned long x)
-{
- return ((x)>>24) & 0xFFu;
-}
-static u32 xen_apic_read(u32 reg)
-{
- struct xen_platform_op op = {
- .cmd = XENPF_get_cpuinfo,
- .interface_version = XENPF_INTERFACE_VERSION,
- .u.pcpu_info.xen_cpuid = 0,
- };
- int ret = 0;
-
- /* Shouldn't need this as APIC is turned off for PV, and we only
- * get called on the bootup processor. But just in case. */
- if (!xen_initial_domain() || smp_processor_id())
- return 0;
-
- if (reg == APIC_LVR)
- return 0x10;
-
- if (reg != APIC_ID)
- return 0;
-
- ret = HYPERVISOR_dom0_op(&op);
- if (ret)
- return 0;
-
- return op.u.pcpu_info.apic_id << 24;
-}
-
-static void xen_apic_write(u32 reg, u32 val)
-{
- /* Warn to see if there's any stray references */
- WARN_ON(1);
-}
-
-static u64 xen_apic_icr_read(void)
-{
- return 0;
-}
-
-static void xen_apic_icr_write(u32 low, u32 id)
-{
- /* Warn to see if there's any stray references */
- WARN_ON(1);
-}
-
-static void xen_apic_wait_icr_idle(void)
-{
- return;
-}
-
-static u32 xen_safe_apic_wait_icr_idle(void)
-{
- return 0;
-}
-
-static void set_xen_basic_apic_ops(void)
-{
- apic->read = xen_apic_read;
- apic->write = xen_apic_write;
- apic->icr_read = xen_apic_icr_read;
- apic->icr_write = xen_apic_icr_write;
- apic->wait_icr_idle = xen_apic_wait_icr_idle;
- apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
- apic->set_apic_id = xen_set_apic_id;
- apic->get_apic_id = xen_get_apic_id;
-
-#ifdef CONFIG_SMP
- apic->send_IPI_allbutself = xen_send_IPI_allbutself;
- apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
- apic->send_IPI_mask = xen_send_IPI_mask;
- apic->send_IPI_all = xen_send_IPI_all;
- apic->send_IPI_self = xen_send_IPI_self;
-#endif
-}
-
-#endif
-
static void xen_clts(void)
{
struct multicall_space mcs;
@@ -1619,7 +1533,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
/*
* set up the basic apic ops.
*/
- set_xen_basic_apic_ops();
+ xen_init_apic();
#endif
if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
@@ -1732,8 +1646,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
if (HYPERVISOR_dom0_op(&op) == 0)
boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
- xen_init_apic();
-
/* Make sure ACS will be enabled */
pci_request_acs();
@@ -1848,6 +1760,9 @@ static struct notifier_block xen_hvm_cpu_notifier = {
static void __init xen_hvm_guest_init(void)
{
+ if (xen_pv_domain())
+ return;
+
init_hvm_pv_info();
xen_hvm_init_shared_info();
@@ -1863,6 +1778,7 @@ static void __init xen_hvm_guest_init(void)
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
}
+#endif
static bool xen_nopv = false;
static __init int xen_parse_nopv(char *arg)
@@ -1872,14 +1788,11 @@ static __init int xen_parse_nopv(char *arg)
}
early_param("xen_nopv", xen_parse_nopv);
-static uint32_t __init xen_hvm_platform(void)
+static uint32_t __init xen_platform(void)
{
if (xen_nopv)
return 0;
- if (xen_pv_domain())
- return 0;
-
return xen_cpuid_base();
}
@@ -1897,11 +1810,19 @@ bool xen_hvm_need_lapic(void)
}
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
-const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
- .name = "Xen HVM",
- .detect = xen_hvm_platform,
+static void xen_set_cpu_features(struct cpuinfo_x86 *c)
+{
+ if (xen_pv_domain())
+ clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+}
+
+const struct hypervisor_x86 x86_hyper_xen = {
+ .name = "Xen",
+ .detect = xen_platform,
+#ifdef CONFIG_XEN_PVHVM
.init_platform = xen_hvm_guest_init,
+#endif
.x2apic_available = xen_x2apic_para_available,
+ .set_cpu_features = xen_set_cpu_features,
};
-EXPORT_SYMBOL(x86_hyper_xen_hvm);
-#endif
+EXPORT_SYMBOL(x86_hyper_xen);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index adca9e2b6553..dd151b2045b0 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -502,7 +502,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
__visible pudval_t xen_pud_val(pud_t pud)
{
return pte_mfn_to_pfn(pud.pud);
@@ -589,7 +589,7 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
-#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
/*
* (Yet another) pagetable walker. This one is intended for pinning a
@@ -1628,7 +1628,7 @@ static void xen_release_pmd(unsigned long pfn)
xen_release_ptpage(pfn, PT_PMD);
}
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PUD);
@@ -2046,7 +2046,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd;
#endif
@@ -2056,7 +2056,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
pv_mmu_ops.release_pte = xen_release_pte;
pv_mmu_ops.release_pmd = xen_release_pmd;
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud;
#endif
@@ -2122,14 +2122,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
.alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init,
-#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
.activate_mm = xen_activate_mm,
.dup_mmap = xen_dup_mmap,
@@ -2436,99 +2436,11 @@ void __init xen_hvm_init_mmu_ops(void)
}
#endif
-#ifdef CONFIG_XEN_PVH
-/*
- * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
- * space creating new guest on pvh dom0 and needing to map domU pages.
- */
-static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
- unsigned int domid)
-{
- int rc, err = 0;
- xen_pfn_t gpfn = lpfn;
- xen_ulong_t idx = fgfn;
-
- struct xen_add_to_physmap_range xatp = {
- .domid = DOMID_SELF,
- .foreign_domid = domid,
- .size = 1,
- .space = XENMAPSPACE_gmfn_foreign,
- };
- set_xen_guest_handle(xatp.idxs, &idx);
- set_xen_guest_handle(xatp.gpfns, &gpfn);
- set_xen_guest_handle(xatp.errs, &err);
-
- rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
- if (rc < 0)
- return rc;
- return err;
-}
-
-static int xlate_remove_from_p2m(unsigned long spfn, int count)
-{
- struct xen_remove_from_physmap xrp;
- int i, rc;
-
- for (i = 0; i < count; i++) {
- xrp.domid = DOMID_SELF;
- xrp.gpfn = spfn+i;
- rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
- if (rc)
- break;
- }
- return rc;
-}
-
-struct xlate_remap_data {
- unsigned long fgfn; /* foreign domain's gfn */
- pgprot_t prot;
- domid_t domid;
- int index;
- struct page **pages;
-};
-
-static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
- void *data)
-{
- int rc;
- struct xlate_remap_data *remap = data;
- unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
- pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
-
- rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
- if (rc)
- return rc;
- native_set_pte(ptep, pteval);
-
- return 0;
-}
-
-static int xlate_remap_gfn_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long mfn,
- int nr, pgprot_t prot, unsigned domid,
- struct page **pages)
-{
- int err;
- struct xlate_remap_data pvhdata;
-
- BUG_ON(!pages);
-
- pvhdata.fgfn = mfn;
- pvhdata.prot = prot;
- pvhdata.domid = domid;
- pvhdata.index = 0;
- pvhdata.pages = pages;
- err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
- xlate_map_pte_fn, &pvhdata);
- flush_tlb_all();
- return err;
-}
-#endif
-
#define REMAP_BATCH_SIZE 16
struct remap_data {
- unsigned long mfn;
+ xen_pfn_t *mfn;
+ bool contiguous;
pgprot_t prot;
struct mmu_update *mmu_update;
};
@@ -2537,7 +2449,14 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
unsigned long addr, void *data)
{
struct remap_data *rmd = data;
- pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot));
+ pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
+
+ /* If we have a contigious range, just update the mfn itself,
+ else update pointer to be "next mfn". */
+ if (rmd->contiguous)
+ (*rmd->mfn)++;
+ else
+ rmd->mfn++;
rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
rmd->mmu_update->val = pte_val_ma(pte);
@@ -2546,26 +2465,26 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
return 0;
}
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t mfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages)
-
+static int do_remap_mfn(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages)
{
+ int err = 0;
struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE];
- int batch;
unsigned long range;
- int err = 0;
+ int mapped = 0;
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
if (xen_feature(XENFEAT_auto_translated_physmap)) {
#ifdef CONFIG_XEN_PVH
/* We need to update the local page tables and the xen HAP */
- return xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
- domid, pages);
+ return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
+ prot, domid, pages);
#else
return -EINVAL;
#endif
@@ -2573,9 +2492,15 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
rmd.mfn = mfn;
rmd.prot = prot;
+ /* We use the err_ptr to indicate if there we are doing a contigious
+ * mapping or a discontigious mapping. */
+ rmd.contiguous = !err_ptr;
while (nr) {
- batch = min(REMAP_BATCH_SIZE, nr);
+ int index = 0;
+ int done = 0;
+ int batch = min(REMAP_BATCH_SIZE, nr);
+ int batch_left = batch;
range = (unsigned long)batch << PAGE_SHIFT;
rmd.mmu_update = mmu_update;
@@ -2584,23 +2509,72 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
if (err)
goto out;
- err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
- if (err < 0)
- goto out;
+ /* We record the error for each page that gives an error, but
+ * continue mapping until the whole set is done */
+ do {
+ int i;
+
+ err = HYPERVISOR_mmu_update(&mmu_update[index],
+ batch_left, &done, domid);
+
+ /*
+ * @err_ptr may be the same buffer as @mfn, so
+ * only clear it after each chunk of @mfn is
+ * used.
+ */
+ if (err_ptr) {
+ for (i = index; i < index + done; i++)
+ err_ptr[i] = 0;
+ }
+ if (err < 0) {
+ if (!err_ptr)
+ goto out;
+ err_ptr[i] = err;
+ done++; /* Skip failed frame. */
+ } else
+ mapped += done;
+ batch_left -= done;
+ index += done;
+ } while (batch_left);
nr -= batch;
addr += range;
+ if (err_ptr)
+ err_ptr += batch;
}
-
- err = 0;
out:
xen_flush_tlb_all();
- return err;
+ return err < 0 ? err : mapped;
+}
+
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t mfn, int nr,
+ pgprot_t prot, unsigned domid,
+ struct page **pages)
+{
+ return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid, struct page **pages)
+{
+ /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
+ * and the consequences later is quite hard to detect what the actual
+ * cause of "wrong memory was mapped in".
+ */
+ BUG_ON(err_ptr == NULL);
+ return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages);
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+
+
/* Returns: 0 success */
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages)
@@ -2609,22 +2583,7 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
return 0;
#ifdef CONFIG_XEN_PVH
- while (numpgs--) {
- /*
- * The mmu has already cleaned up the process mmu
- * resources at this point (lookup_address will return
- * NULL).
- */
- unsigned long pfn = page_to_pfn(pages[numpgs]);
-
- xlate_remove_from_p2m(pfn, 1);
- }
- /*
- * We don't need to flush tlbs because as part of
- * xlate_remove_from_p2m, the hypervisor will do tlb flushes
- * after removing the p2m entries from the EPT/NPT
- */
- return 0;
+ return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
#else
return -EINVAL;
#endif
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 7413ee3706d0..86484384492e 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -90,14 +90,10 @@ static void cpu_bringup(void)
set_cpu_online(cpu, true);
- this_cpu_write(cpu_state, CPU_ONLINE);
-
- wmb();
+ cpu_set_state_online(cpu); /* Implies full memory barrier. */
/* We can take interrupts now: we're officially "up". */
local_irq_enable();
-
- wmb(); /* make sure everything is out */
}
/*
@@ -451,7 +447,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
xen_setup_timer(cpu);
xen_init_lock_cpu(cpu);
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+ /*
+ * PV VCPUs are always successfully taken down (see 'while' loop
+ * in xen_cpu_die()), so -EBUSY is an error.
+ */
+ rc = cpu_check_up_prepare(cpu);
+ if (rc)
+ return rc;
/* make sure interrupts start blocked */
per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
@@ -467,10 +469,8 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
BUG_ON(rc);
- while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
+ while (cpu_report_state(cpu) != CPU_ONLINE)
HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
- barrier();
- }
return 0;
}
@@ -499,11 +499,11 @@ static void xen_cpu_die(unsigned int cpu)
schedule_timeout(HZ/10);
}
- cpu_die_common(cpu);
-
- xen_smp_intr_free(cpu);
- xen_uninit_lock_cpu(cpu);
- xen_teardown_timer(cpu);
+ if (common_cpu_die(cpu) == 0) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
+ }
}
static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
@@ -735,6 +735,16 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
+
+ /*
+ * This can happen if CPU was offlined earlier and
+ * offlining timed out in common_cpu_die().
+ */
+ if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ }
+
/*
* xen_smp_intr_init() needs to run before native_cpu_up()
* so that IPI vectors are set up on the booting CPU before
@@ -756,12 +766,6 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
return rc;
}
-static void xen_hvm_cpu_die(unsigned int cpu)
-{
- xen_cpu_die(cpu);
- native_cpu_die(cpu);
-}
-
void __init xen_hvm_smp_init(void)
{
if (!xen_have_vector_callback)
@@ -769,7 +773,7 @@ void __init xen_hvm_smp_init(void)
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.cpu_up = xen_hvm_cpu_up;
- smp_ops.cpu_die = xen_hvm_cpu_die;
+ smp_ops.cpu_die = xen_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index d9497698645a..53b4c0811f4f 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -88,7 +88,17 @@ static void xen_vcpu_notify_restore(void *data)
tick_resume_local();
}
+static void xen_vcpu_notify_suspend(void *data)
+{
+ tick_suspend_local();
+}
+
void xen_arch_resume(void)
{
on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
}
+
+void xen_arch_suspend(void)
+{
+ on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
+}
diff --git a/arch/x86/xen/trace.c b/arch/x86/xen/trace.c
index 520022d1a181..a702ec2f5931 100644
--- a/arch/x86/xen/trace.c
+++ b/arch/x86/xen/trace.c
@@ -1,54 +1,12 @@
#include <linux/ftrace.h>
#include <xen/interface/xen.h>
+#include <xen/interface/xen-mca.h>
-#define N(x) [__HYPERVISOR_##x] = "("#x")"
+#define HYPERCALL(x) [__HYPERVISOR_##x] = "("#x")",
static const char *xen_hypercall_names[] = {
- N(set_trap_table),
- N(mmu_update),
- N(set_gdt),
- N(stack_switch),
- N(set_callbacks),
- N(fpu_taskswitch),
- N(sched_op_compat),
- N(dom0_op),
- N(set_debugreg),
- N(get_debugreg),
- N(update_descriptor),
- N(memory_op),
- N(multicall),
- N(update_va_mapping),
- N(set_timer_op),
- N(event_channel_op_compat),
- N(xen_version),
- N(console_io),
- N(physdev_op_compat),
- N(grant_table_op),
- N(vm_assist),
- N(update_va_mapping_otherdomain),
- N(iret),
- N(vcpu_op),
- N(set_segment_base),
- N(mmuext_op),
- N(acm_op),
- N(nmi_op),
- N(sched_op),
- N(callback_op),
- N(xenoprof_op),
- N(event_channel_op),
- N(physdev_op),
- N(hvm_op),
-
-/* Architecture-specific hypercall definitions. */
- N(arch_0),
- N(arch_1),
- N(arch_2),
- N(arch_3),
- N(arch_4),
- N(arch_5),
- N(arch_6),
- N(arch_7),
+#include <asm/xen-hypercalls.h>
};
-#undef N
+#undef HYPERCALL
static const char *xen_hypercall_name(unsigned op)
{
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 674b222544b7..8afdfccf6086 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -12,6 +12,8 @@
#include <xen/interface/elfnote.h>
#include <xen/interface/features.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/xen-mca.h>
#include <asm/xen/interface.h>
#ifdef CONFIG_XEN_PVH
@@ -85,59 +87,14 @@ ENTRY(xen_pvh_early_cpu_init)
.pushsection .text
.balign PAGE_SIZE
ENTRY(hypercall_page)
-#define NEXT_HYPERCALL(x) \
- ENTRY(xen_hypercall_##x) \
- .skip 32
-
-NEXT_HYPERCALL(set_trap_table)
-NEXT_HYPERCALL(mmu_update)
-NEXT_HYPERCALL(set_gdt)
-NEXT_HYPERCALL(stack_switch)
-NEXT_HYPERCALL(set_callbacks)
-NEXT_HYPERCALL(fpu_taskswitch)
-NEXT_HYPERCALL(sched_op_compat)
-NEXT_HYPERCALL(platform_op)
-NEXT_HYPERCALL(set_debugreg)
-NEXT_HYPERCALL(get_debugreg)
-NEXT_HYPERCALL(update_descriptor)
-NEXT_HYPERCALL(ni)
-NEXT_HYPERCALL(memory_op)
-NEXT_HYPERCALL(multicall)
-NEXT_HYPERCALL(update_va_mapping)
-NEXT_HYPERCALL(set_timer_op)
-NEXT_HYPERCALL(event_channel_op_compat)
-NEXT_HYPERCALL(xen_version)
-NEXT_HYPERCALL(console_io)
-NEXT_HYPERCALL(physdev_op_compat)
-NEXT_HYPERCALL(grant_table_op)
-NEXT_HYPERCALL(vm_assist)
-NEXT_HYPERCALL(update_va_mapping_otherdomain)
-NEXT_HYPERCALL(iret)
-NEXT_HYPERCALL(vcpu_op)
-NEXT_HYPERCALL(set_segment_base)
-NEXT_HYPERCALL(mmuext_op)
-NEXT_HYPERCALL(xsm_op)
-NEXT_HYPERCALL(nmi_op)
-NEXT_HYPERCALL(sched_op)
-NEXT_HYPERCALL(callback_op)
-NEXT_HYPERCALL(xenoprof_op)
-NEXT_HYPERCALL(event_channel_op)
-NEXT_HYPERCALL(physdev_op)
-NEXT_HYPERCALL(hvm_op)
-NEXT_HYPERCALL(sysctl)
-NEXT_HYPERCALL(domctl)
-NEXT_HYPERCALL(kexec_op)
-NEXT_HYPERCALL(tmem_op) /* 38 */
-ENTRY(xen_hypercall_rsvr)
- .skip 320
-NEXT_HYPERCALL(mca) /* 48 */
-NEXT_HYPERCALL(arch_1)
-NEXT_HYPERCALL(arch_2)
-NEXT_HYPERCALL(arch_3)
-NEXT_HYPERCALL(arch_4)
-NEXT_HYPERCALL(arch_5)
-NEXT_HYPERCALL(arch_6)
- .balign PAGE_SIZE
+ .skip PAGE_SIZE
+
+#define HYPERCALL(n) \
+ .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \
+ .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
+#include <asm/xen-hypercalls.h>
+#undef HYPERCALL
+
.popsection
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index e31d4949124a..87be10e8b57a 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -428,6 +428,36 @@ config DEFAULT_MEM_SIZE
If unsure, leave the default value here.
+config XTFPGA_LCD
+ bool "Enable XTFPGA LCD driver"
+ depends on XTENSA_PLATFORM_XTFPGA
+ default n
+ help
+ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
+ progress messages there during bootup/shutdown. It may be useful
+ during board bringup.
+
+ If unsure, say N.
+
+config XTFPGA_LCD_BASE_ADDR
+ hex "XTFPGA LCD base address"
+ depends on XTFPGA_LCD
+ default "0x0d0c0000"
+ help
+ Base address of the LCD controller inside KIO region.
+ Different boards from XTFPGA family have LCD controller at different
+ addresses. Please consult prototyping user guide for your board for
+ the correct address. Wrong address here may lead to hardware lockup.
+
+config XTFPGA_LCD_8BIT_ACCESS
+ bool "Use 8-bit access to XTFPGA LCD"
+ depends on XTFPGA_LCD
+ default n
+ help
+ LCD may be connected with 4- or 8-bit interface, 8-bit access may
+ only be used with 8-bit interface. Please consult prototyping user
+ guide for your board for the correct interface width.
+
endmenu
menu "Executable file formats"
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
index dec9178840f6..cd0b9e34adc8 100644
--- a/arch/xtensa/boot/dts/xtfpga.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -40,6 +40,12 @@
#clock-cells = <0>;
compatible = "fixed-clock";
};
+
+ clk54: clk54 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <54000000>;
+ };
};
soc {
@@ -65,5 +71,63 @@
local-mac-address = [00 50 c2 13 6f 00];
clocks = <&osc>;
};
+
+ i2s0: xtfpga-i2s@0d080000 {
+ #sound-dai-cells = <0>;
+ compatible = "cdns,xtfpga-i2s";
+ reg = <0x0d080000 0x40>;
+ interrupts = <2 1>; /* external irq 2 */
+ clocks = <&cdce706 4>;
+ };
+
+ i2c0: i2c-master@0d090000 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0d090000 0x20>;
+ reg-shift = <2>;
+ reg-io-width = <1>;
+ interrupts = <4 1>;
+ clocks = <&osc>;
+
+ cdce706: clock-synth@69 {
+ compatible = "ti,cdce706";
+ #clock-cells = <1>;
+ reg = <0x69>;
+ clocks = <&clk54>;
+ clock-names = "clk_in0";
+ };
+ };
+
+ spi0: spi-master@0d0a0000 {
+ compatible = "cdns,xtfpga-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0d0a0000 0xc>;
+
+ tlv320aic23: sound-codec@0 {
+ #sound-dai-cells = <0>;
+ compatible = "tlv320aic23";
+ reg = <0>;
+ spi-max-frequency = <12500000>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&tlv320aic23>;
+ simple-audio-card,bitclock-master = <0>;
+ simple-audio-card,frame-master = <0>;
+ clocks = <&cdce706 4>;
+ };
};
};
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
new file mode 100644
index 000000000000..c4904db15582
--- /dev/null
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -0,0 +1,142 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_CUSTOM=y
+CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_kc705_hifi"
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="kc705"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_OCORES=y
+CONFIG_SPI=y
+CONFIG_SPI_XTENSA_XTFPGA=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_XTFPGA_I2S=y
+CONFIG_SND_SOC_TLV320AIC23_SPI=y
+CONFIG_SND_SIMPLE_CARD=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_COMMON_CLK_CDCE706=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index a9b5d3ba196c..9ad12c617184 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -44,7 +44,6 @@ typedef struct xtregs_coprocessor {
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
__u32 cpu; /* current CPU */
@@ -61,17 +60,6 @@ struct thread_info {
xtregs_user_t xtregs_user;
};
-#else /* !__ASSEMBLY__ */
-
-/* offsets into the thread_info struct for assembly code access */
-#define TI_TASK 0x00000000
-#define TI_EXEC_DOMAIN 0x00000004
-#define TI_FLAGS 0x00000008
-#define TI_STATUS 0x0000000C
-#define TI_CPU 0x00000010
-#define TI_PRE_COUNT 0x00000014
-#define TI_ADDR_LIMIT 0x00000018
-
#endif
/*
@@ -83,7 +71,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index db5bb72e2f4e..b95c30594355 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
__SYSCALL(324, sys_name_to_handle_at, 5)
#define __NR_open_by_handle_at 325
__SYSCALL(325, sys_open_by_handle_at, 3)
-#define __NR_sync_file_range 326
+#define __NR_sync_file_range2 326
__SYSCALL(326, sys_sync_file_range2, 6)
#define __NR_perf_event_open 327
__SYSCALL(327, sys_perf_event_open, 5)
@@ -749,8 +749,12 @@ __SYSCALL(337, sys_seccomp, 3)
__SYSCALL(338, sys_getrandom, 3)
#define __NR_memfd_create 339
__SYSCALL(339, sys_memfd_create, 2)
+#define __NR_bpf 340
+__SYSCALL(340, sys_bpf, 3)
+#define __NR_execveat 341
+__SYSCALL(341, sys_execveat, 5)
-#define __NR_syscall_count 340
+#define __NR_syscall_count 342
/*
* sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index 18d962a8c0c2..d3a0f0fd56dd 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -29,6 +29,7 @@ AFLAGS_head.o += -mtext-section-literals
sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
-e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \
+ -e 's/\*(\(\.text .*\))/*(.literal \1)/g' \
-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
quiet_cmd__cpp_lds_S = LDS $@
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 1915c7c889ba..b123ace3b67c 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -77,6 +77,14 @@ int main(void)
DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
+ /* offsets in thread_info struct */
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_STSTUS, thread_info, status);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+
/* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 3d733ba16f28..e87adaa07ff3 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -336,7 +336,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
{
struct rt_sigframe *frame;
int err = 0, sig = ksig->sig;
- int signal;
unsigned long sp, ra, tp;
sp = regs->areg[1];
@@ -354,12 +353,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
}
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
-
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
}
@@ -400,19 +393,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
* Note: PS.CALLINC is set to one by start_thread
*/
regs->areg[4] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000;
- regs->areg[6] = (unsigned long) signal;
+ regs->areg[6] = (unsigned long) sig;
regs->areg[7] = (unsigned long) &frame->info;
regs->areg[8] = (unsigned long) &frame->uc;
regs->threadptr = tp;
- /* Set access mode to USER_DS. Nomenclature is outdated, but
- * functionality is used in uaccess.h
- */
- set_fs(USER_DS);
-
#if DEBUG_SIG
printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
- current->comm, current->pid, signal, frame, regs->pc);
+ current->comm, current->pid, sig, frame, regs->pc);
#endif
return 0;
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index d05f8feeb8d7..17b1ef3232e4 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
{
struct iss_net_private *lp = (struct iss_net_private *)priv;
- spin_lock(&lp->lock);
iss_net_poll();
+ spin_lock(&lp->lock);
mod_timer(&lp->timer, jiffies + lp->timer_val);
spin_unlock(&lp->lock);
}
@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
struct iss_net_private *lp = netdev_priv(dev);
int err;
- spin_lock(&lp->lock);
+ spin_lock_bh(&lp->lock);
err = lp->tp.open(lp);
if (err < 0)
@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
while ((err = iss_net_rx(dev)) > 0)
;
- spin_lock(&opened_lock);
+ spin_unlock_bh(&lp->lock);
+ spin_lock_bh(&opened_lock);
list_add(&lp->opened_list, &opened);
- spin_unlock(&opened_lock);
+ spin_unlock_bh(&opened_lock);
+ spin_lock_bh(&lp->lock);
init_timer(&lp->timer);
lp->timer_val = ISS_NET_TIMER_VALUE;
@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
mod_timer(&lp->timer, jiffies + lp->timer_val);
out:
- spin_unlock(&lp->lock);
+ spin_unlock_bh(&lp->lock);
return err;
}
@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
{
struct iss_net_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
- spin_lock(&lp->lock);
+ spin_lock_bh(&lp->lock);
spin_lock(&opened_lock);
list_del(&opened);
@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
lp->tp.close(lp);
- spin_unlock(&lp->lock);
+ spin_unlock_bh(&lp->lock);
return 0;
}
static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct iss_net_private *lp = netdev_priv(dev);
- unsigned long flags;
int len;
netif_stop_queue(dev);
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_bh(&lp->lock);
len = lp->tp.write(lp, &skb);
@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
}
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_bh(&lp->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(hwaddr->sa_data))
return -EADDRNOTAVAIL;
- spin_lock(&lp->lock);
+ spin_lock_bh(&lp->lock);
memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
- spin_unlock(&lp->lock);
+ spin_unlock_bh(&lp->lock);
return 0;
}
@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
*lp = (struct iss_net_private) {
.device_list = LIST_HEAD_INIT(lp->device_list),
.opened_list = LIST_HEAD_INIT(lp->opened_list),
- .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
.dev = dev,
.index = index,
- };
+ };
+ spin_lock_init(&lp->lock);
/*
* If this name ends up conflicting with an existing registered
* netdevice, that is OK, register_netdev{,ice}() will notice this
diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
index b9ae206340cd..7839d38b2337 100644
--- a/arch/xtensa/platforms/xtfpga/Makefile
+++ b/arch/xtensa/platforms/xtfpga/Makefile
@@ -6,4 +6,5 @@
#
# Note 2! The CFLAGS definitions are in the main makefile...
-obj-y = setup.o lcd.o
+obj-y += setup.o
+obj-$(CONFIG_XTFPGA_LCD) += lcd.o
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index 6edd20bb4565..0a55bb9c5420 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -40,9 +40,6 @@
/* UART */
#define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
-/* LCD instruction and data addresses. */
-#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
-#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
/* Misc. */
#define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
@@ -62,4 +59,7 @@
/* 5*rx buffs + 5*tx buffs */
#define OETH_SRAMBUFF_SIZE (5 * 0x600 + 5 * 0x600)
+#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
+#define C67X00_SIZE 0x10
+#define C67X00_IRQ 5
#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
index 0e435645af5a..4c8541ed1139 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
@@ -11,10 +11,25 @@
#ifndef __XTENSA_XTAVNET_LCD_H
#define __XTENSA_XTAVNET_LCD_H
+#ifdef CONFIG_XTFPGA_LCD
/* Display string STR at position POS on the LCD. */
void lcd_disp_at_pos(char *str, unsigned char pos);
/* Shift the contents of the LCD display left or right. */
void lcd_shiftleft(void);
void lcd_shiftright(void);
+#else
+static inline void lcd_disp_at_pos(char *str, unsigned char pos)
+{
+}
+
+static inline void lcd_shiftleft(void)
+{
+}
+
+static inline void lcd_shiftright(void)
+{
+}
+#endif
+
#endif
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
index 2872301598df..4dc0c1b43f4b 100644
--- a/arch/xtensa/platforms/xtfpga/lcd.c
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -1,50 +1,63 @@
/*
- * Driver for the LCD display on the Tensilica LX60 Board.
+ * Driver for the LCD display on the Tensilica XTFPGA board family.
+ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 2006 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
*/
-/*
- *
- * FIXME: this code is from the examples from the LX60 user guide.
- *
- * The lcd_pause function does busy waiting, which is probably not
- * great. Maybe the code could be changed to use kernel timers, or
- * change the hardware to not need to wait.
- */
-
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <platform/hardware.h>
#include <platform/lcd.h>
-#include <linux/delay.h>
-#define LCD_PAUSE_ITERATIONS 4000
+/* LCD instruction and data addresses. */
+#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
+#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
+
#define LCD_CLEAR 0x1
#define LCD_DISPLAY_ON 0xc
/* 8bit and 2 lines display */
#define LCD_DISPLAY_MODE8BIT 0x38
+#define LCD_DISPLAY_MODE4BIT 0x28
#define LCD_DISPLAY_POS 0x80
#define LCD_SHIFT_LEFT 0x18
#define LCD_SHIFT_RIGHT 0x1c
+static void lcd_put_byte(u8 *addr, u8 data)
+{
+#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
+ ACCESS_ONCE(*addr) = data;
+#else
+ ACCESS_ONCE(*addr) = data & 0xf0;
+ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
+#endif
+}
+
static int __init lcd_init(void)
{
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
mdelay(5);
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
udelay(200);
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ udelay(50);
+#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
+ udelay(50);
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);
- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
+#endif
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
udelay(50);
- *LCD_INSTR_ADDR = LCD_CLEAR;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
mdelay(10);
lcd_disp_at_pos("XTENSA LINUX", 0);
return 0;
@@ -52,10 +65,10 @@ static int __init lcd_init(void)
void lcd_disp_at_pos(char *str, unsigned char pos)
{
- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
udelay(100);
while (*str != 0) {
- *LCD_DATA_ADDR = *str;
+ lcd_put_byte(LCD_DATA_ADDR, *str);
udelay(200);
str++;
}
@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
void lcd_shiftleft(void)
{
- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
udelay(50);
}
void lcd_shiftright(void)
{
- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
udelay(50);
}
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 57fd08b36f51..b4cf70e535ab 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -189,6 +189,7 @@ void __init platform_calibrate_ccount(void)
#include <linux/serial_8250.h>
#include <linux/if.h>
#include <net/ethoc.h>
+#include <linux/usb/c67x00.h>
/*----------------------------------------------------------------------------
* Ethernet -- OpenCores Ethernet MAC (ethoc driver)
@@ -233,6 +234,38 @@ static struct platform_device ethoc_device = {
};
/*----------------------------------------------------------------------------
+ * USB Host/Device -- Cypress CY7C67300
+ */
+
+static struct resource c67x00_res[] = {
+ [0] = { /* register space */
+ .start = C67X00_PADDR,
+ .end = C67X00_PADDR + C67X00_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* IRQ number */
+ .start = C67X00_IRQ,
+ .end = C67X00_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct c67x00_platform_data c67x00_pdata = {
+ .sie_config = C67X00_SIE1_HOST | C67X00_SIE2_UNUSED,
+ .hpi_regstep = 4,
+};
+
+static struct platform_device c67x00_device = {
+ .name = "c67x00",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(c67x00_res),
+ .resource = c67x00_res,
+ .dev = {
+ .platform_data = &c67x00_pdata,
+ },
+};
+
+/*----------------------------------------------------------------------------
* UART
*/
@@ -268,6 +301,7 @@ static struct platform_device xtavnet_uart = {
/* platform devices */
static struct platform_device *platform_devices[] __initdata = {
&ethoc_device,
+ &c67x00_device,
&xtavnet_uart,
};
diff --git a/block/blk-core.c b/block/blk-core.c
index 794c3e7f01cf..7871603f0a29 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -552,11 +552,25 @@ void blk_cleanup_queue(struct request_queue *q)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
+ bdi_destroy(&q->backing_dev_info);
+
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
+/* Allocate memory local to the request queue */
+static void *alloc_request_struct(gfp_t gfp_mask, void *data)
+{
+ int nid = (int)(long)data;
+ return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
+}
+
+static void free_request_struct(void *element, void *unused)
+{
+ kmem_cache_free(request_cachep, element);
+}
+
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask)
{
@@ -569,9 +583,10 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
- rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
- mempool_free_slab, request_cachep,
- gfp_mask, q->node);
+ rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct,
+ free_request_struct,
+ (void *)(long)q->node, gfp_mask,
+ q->node);
if (!rl->rq_pool)
return -ENOMEM;
diff --git a/block/blk-map.c b/block/blk-map.c
index b8d2725324a6..da310a105429 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -124,10 +124,10 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
{
struct iovec iov;
struct iov_iter i;
+ int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
- iov.iov_base = ubuf;
- iov.iov_len = len;
- iov_iter_init(&i, rq_data_dir(rq), &iov, 1, len);
+ if (unlikely(ret < 0))
+ return ret;
return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 1630a20d5dcf..b79685e06b70 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -436,6 +436,7 @@ int blk_mq_register_disk(struct gendisk *disk)
return 0;
}
+EXPORT_SYMBOL_GPL(blk_mq_register_disk);
void blk_mq_sysfs_unregister(struct request_queue *q)
{
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 33c428530193..e68b71b85a7e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
-static void blk_mq_run_queues(struct request_queue *q);
/*
* Check if any of the ctx's have pending work in this hardware queue
@@ -42,7 +41,7 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
unsigned int i;
- for (i = 0; i < hctx->ctx_map.map_size; i++)
+ for (i = 0; i < hctx->ctx_map.size; i++)
if (hctx->ctx_map.map[i].word)
return true;
@@ -78,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
}
-static int blk_mq_queue_enter(struct request_queue *q)
+static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
{
while (true) {
int ret;
@@ -86,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
if (percpu_ref_tryget_live(&q->mq_usage_counter))
return 0;
+ if (!(gfp & __GFP_WAIT))
+ return -EBUSY;
+
ret = wait_event_interruptible(q->mq_freeze_wq,
!q->mq_freeze_depth || blk_queue_dying(q));
if (blk_queue_dying(q))
@@ -118,7 +120,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
if (freeze) {
percpu_ref_kill(&q->mq_usage_counter);
- blk_mq_run_queues(q);
+ blk_mq_run_hw_queues(q, false);
}
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
@@ -257,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct blk_mq_alloc_data alloc_data;
int ret;
- ret = blk_mq_queue_enter(q);
+ ret = blk_mq_queue_enter(q, gfp);
if (ret)
return ERR_PTR(ret);
@@ -675,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv)
data.next = blk_rq_timeout(round_jiffies_up(data.next));
mod_timer(&q->timeout, data.next);
} else {
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_tag_idle(hctx);
+ queue_for_each_hw_ctx(q, hctx, i) {
+ /* the hctx may be unmapped, so check it here */
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_idle(hctx);
+ }
}
}
@@ -728,7 +733,7 @@ static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
struct blk_mq_ctx *ctx;
int i;
- for (i = 0; i < hctx->ctx_map.map_size; i++) {
+ for (i = 0; i < hctx->ctx_map.size; i++) {
struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
unsigned int off, bit;
@@ -853,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
spin_lock(&hctx->lock);
list_splice(&rq_list, &hctx->dispatch);
spin_unlock(&hctx->lock);
+ /*
+ * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
+ * it's possible the queue is stopped and restarted again
+ * before this. Queue restart will dispatch requests. And since
+ * requests in rq_list aren't added into hctx->dispatch yet,
+ * the requests in rq_list might get lost.
+ *
+ * blk_mq_run_hw_queue() already checks the STOPPED bit
+ **/
+ blk_mq_run_hw_queue(hctx, true);
}
}
@@ -904,7 +919,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
&hctx->run_work, 0);
}
-static void blk_mq_run_queues(struct request_queue *q)
+void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{
struct blk_mq_hw_ctx *hctx;
int i;
@@ -915,9 +930,10 @@ static void blk_mq_run_queues(struct request_queue *q)
test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
- blk_mq_run_hw_queue(hctx, false);
+ blk_mq_run_hw_queue(hctx, async);
}
}
+EXPORT_SYMBOL(blk_mq_run_hw_queues);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
@@ -1186,7 +1202,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
int rw = bio_data_dir(bio);
struct blk_mq_alloc_data alloc_data;
- if (unlikely(blk_mq_queue_enter(q))) {
+ if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
bio_endio(bio, -EIO);
return NULL;
}
@@ -1517,8 +1533,6 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
if (!bitmap->map)
return -ENOMEM;
- bitmap->map_size = num_maps;
-
total = nr_cpu_ids;
for (i = 0; i < num_maps; i++) {
bitmap->map[i].depth = min(total, bitmap->bits_per_word);
@@ -1570,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
return NOTIFY_OK;
}
-static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
-{
- struct request_queue *q = hctx->queue;
- struct blk_mq_tag_set *set = q->tag_set;
-
- if (set->tags[hctx->queue_num])
- return NOTIFY_OK;
-
- set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
- if (!set->tags[hctx->queue_num])
- return NOTIFY_STOP;
-
- hctx->tags = set->tags[hctx->queue_num];
- return NOTIFY_OK;
-}
-
static int blk_mq_hctx_notify(void *data, unsigned long action,
unsigned int cpu)
{
@@ -1593,8 +1591,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
return blk_mq_hctx_cpu_offline(hctx, cpu);
- else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
- return blk_mq_hctx_cpu_online(hctx, cpu);
+
+ /*
+ * In case of CPU online, tags may be reallocated
+ * in blk_mq_map_swqueue() after mapping is updated.
+ */
return NOTIFY_OK;
}
@@ -1759,8 +1760,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
continue;
hctx = q->mq_ops->map_queue(q, i);
- cpumask_set_cpu(i, hctx->cpumask);
- hctx->nr_ctx++;
/*
* Set local node, IFF we have more than one hw queue. If
@@ -1776,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
unsigned int i;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
+ struct blk_mq_tag_set *set = q->tag_set;
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
@@ -1797,21 +1797,34 @@ static void blk_mq_map_swqueue(struct request_queue *q)
}
queue_for_each_hw_ctx(q, hctx, i) {
+ struct blk_mq_ctxmap *map = &hctx->ctx_map;
+
/*
* If no software queues are mapped to this hardware queue,
* disable it and free the request entries.
*/
if (!hctx->nr_ctx) {
- struct blk_mq_tag_set *set = q->tag_set;
-
if (set->tags[i]) {
blk_mq_free_rq_map(set, set->tags[i], i);
set->tags[i] = NULL;
- hctx->tags = NULL;
}
+ hctx->tags = NULL;
continue;
}
+ /* unmapped hw queue can be remapped after CPU topo changed */
+ if (!set->tags[i])
+ set->tags[i] = blk_mq_init_rq_map(set, i);
+ hctx->tags = set->tags[i];
+ WARN_ON(!hctx->tags);
+
+ /*
+ * Set the map size to the number of mapped software queues.
+ * This is more accurate and more efficient than looping
+ * over all possibly mapped software queues.
+ */
+ map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
+
/*
* Initialize batch roundrobin counts
*/
@@ -1889,9 +1902,25 @@ void blk_mq_release(struct request_queue *q)
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
+ struct request_queue *uninit_q, *q;
+
+ uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
+ if (!uninit_q)
+ return ERR_PTR(-ENOMEM);
+
+ q = blk_mq_init_allocated_queue(set, uninit_q);
+ if (IS_ERR(q))
+ blk_cleanup_queue(uninit_q);
+
+ return q;
+}
+EXPORT_SYMBOL(blk_mq_init_queue);
+
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q)
+{
struct blk_mq_hw_ctx **hctxs;
struct blk_mq_ctx __percpu *ctx;
- struct request_queue *q;
unsigned int *map;
int i;
@@ -1926,20 +1955,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
hctxs[i]->queue_num = i;
}
- q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
- if (!q)
- goto err_hctxs;
-
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
*/
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
- goto err_mq_usage;
+ goto err_hctxs;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
- blk_queue_rq_timeout(q, 30000);
+ blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
q->nr_queues = nr_cpu_ids;
q->nr_hw_queues = set->nr_hw_queues;
@@ -1965,9 +1990,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
else
blk_queue_make_request(q, blk_sq_make_request);
- if (set->timeout)
- blk_queue_rq_timeout(q, set->timeout);
-
/*
* Do this after blk_queue_make_request() overrides it...
*/
@@ -1979,7 +2001,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
if (blk_mq_init_hw_queues(q, set))
- goto err_mq_usage;
+ goto err_hctxs;
mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list);
@@ -1991,8 +2013,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
return q;
-err_mq_usage:
- blk_cleanup_queue(q);
err_hctxs:
kfree(map);
for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2007,7 +2027,7 @@ err_percpu:
free_percpu(ctx);
return ERR_PTR(-ENOMEM);
}
-EXPORT_SYMBOL(blk_mq_init_queue);
+EXPORT_SYMBOL(blk_mq_init_allocated_queue);
void blk_mq_free_queue(struct request_queue *q)
{
@@ -2075,9 +2095,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
*/
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_start(q);
- list_for_each_entry(q, &all_q_list, all_q_node)
+ list_for_each_entry(q, &all_q_list, all_q_node) {
blk_mq_freeze_queue_wait(q);
+ /*
+ * timeout handler can't touch hw queue during the
+ * reinitialization
+ */
+ del_timer_sync(&q->timeout);
+ }
+
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q);
@@ -2159,7 +2186,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL;
- if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
+ if (!set->ops->queue_rq || !set->ops->map_queue)
return -EINVAL;
if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index faaf36ade7eb..2b8fd302f677 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj)
blk_trace_shutdown(q);
- bdi_destroy(&q->backing_dev_info);
-
ida_simple_remove(&blk_queue_ida, q->id);
call_rcu(&q->rcu_head, blk_free_queue_rcu);
}
diff --git a/block/bounce.c b/block/bounce.c
index ab21ba203d5c..ed9dd8067120 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -221,8 +221,8 @@ bounce:
if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
continue;
- inc_zone_page_state(to->bv_page, NR_BOUNCE);
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
+ inc_zone_page_state(to->bv_page, NR_BOUNCE);
if (rw == WRITE) {
char *vto, *vfrom;
diff --git a/block/elevator.c b/block/elevator.c
index 59794d0d38e3..8985038f398c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
if (unlikely(!eq))
- goto err;
+ return NULL;
eq->type = e;
kobject_init(&eq->kobj, &elv_ktype);
@@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
hash_init(eq->hash);
return eq;
-err:
- kfree(eq);
- elevator_put(e);
- return NULL;
}
EXPORT_SYMBOL(elevator_alloc);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e1f71c396193..55b6f15dac90 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -335,16 +335,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
struct iov_iter i;
struct iovec *iov = NULL;
- ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
- 0, NULL, &iov);
- if (ret < 0) {
- kfree(iov);
+ ret = import_iovec(rq_data_dir(rq),
+ hdr->dxferp, hdr->iovec_count,
+ 0, &iov, &i);
+ if (ret < 0)
goto out_free_cdb;
- }
/* SG_IO howto says that the shorter of the two wins */
- iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count,
- min_t(unsigned, ret, hdr->dxfer_len));
+ iov_iter_truncate(&i, hdr->dxfer_len);
ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
kfree(iov);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 50f4da44a304..8aaf298a80e1 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -436,6 +436,14 @@ config CRYPTO_MD5_OCTEON
MD5 message digest algorithm (RFC1321) implemented
using OCTEON crypto instructions, when available.
+config CRYPTO_MD5_PPC
+ tristate "MD5 digest algorithm (PPC)"
+ depends on PPC
+ select CRYPTO_HASH
+ help
+ MD5 message digest algorithm (RFC1321) implemented
+ in PPC assembler.
+
config CRYPTO_MD5_SPARC64
tristate "MD5 digest algorithm (SPARC64)"
depends on SPARC64
@@ -546,34 +554,23 @@ config CRYPTO_SHA512_SSSE3
Extensions version 1 (AVX1), or Advanced Vector Extensions
version 2 (AVX2) instructions, when available.
-config CRYPTO_SHA1_SPARC64
- tristate "SHA1 digest algorithm (SPARC64)"
- depends on SPARC64
- select CRYPTO_SHA1
- select CRYPTO_HASH
- help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using sparc64 crypto instructions, when available.
-
-config CRYPTO_SHA1_ARM
- tristate "SHA1 digest algorithm (ARM-asm)"
- depends on ARM
+config CRYPTO_SHA1_OCTEON
+ tristate "SHA1 digest algorithm (OCTEON)"
+ depends on CPU_CAVIUM_OCTEON
select CRYPTO_SHA1
select CRYPTO_HASH
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using optimized ARM assembler.
+ using OCTEON crypto instructions, when available.
-config CRYPTO_SHA1_ARM_NEON
- tristate "SHA1 digest algorithm (ARM NEON)"
- depends on ARM && KERNEL_MODE_NEON
- select CRYPTO_SHA1_ARM
+config CRYPTO_SHA1_SPARC64
+ tristate "SHA1 digest algorithm (SPARC64)"
+ depends on SPARC64
select CRYPTO_SHA1
select CRYPTO_HASH
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using optimized ARM NEON assembly, when NEON instructions are
- available.
+ using sparc64 crypto instructions, when available.
config CRYPTO_SHA1_PPC
tristate "SHA1 digest algorithm (powerpc)"
@@ -582,6 +579,13 @@ config CRYPTO_SHA1_PPC
This is the powerpc hardware accelerated implementation of the
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
+config CRYPTO_SHA1_PPC_SPE
+ tristate "SHA1 digest algorithm (PPC SPE)"
+ depends on PPC && SPE
+ help
+ SHA-1 secure hash standard (DFIPS 180-4) implemented
+ using powerpc SPE SIMD instruction set.
+
config CRYPTO_SHA1_MB
tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)"
depends on X86 && 64BIT
@@ -610,6 +614,24 @@ config CRYPTO_SHA256
This code also includes SHA-224, a 224 bit hash with 112 bits
of security against collision attacks.
+config CRYPTO_SHA256_PPC_SPE
+ tristate "SHA224 and SHA256 digest algorithm (PPC SPE)"
+ depends on PPC && SPE
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
+ help
+ SHA224 and SHA256 secure hash standard (DFIPS 180-2)
+ implemented using powerpc SPE SIMD instruction set.
+
+config CRYPTO_SHA256_OCTEON
+ tristate "SHA224 and SHA256 digest algorithm (OCTEON)"
+ depends on CPU_CAVIUM_OCTEON
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
+ help
+ SHA-256 secure hash standard (DFIPS 180-2) implemented
+ using OCTEON crypto instructions, when available.
+
config CRYPTO_SHA256_SPARC64
tristate "SHA224 and SHA256 digest algorithm (SPARC64)"
depends on SPARC64
@@ -631,29 +653,23 @@ config CRYPTO_SHA512
This code also includes SHA-384, a 384 bit hash with 192 bits
of security against collision attacks.
-config CRYPTO_SHA512_SPARC64
- tristate "SHA384 and SHA512 digest algorithm (SPARC64)"
- depends on SPARC64
+config CRYPTO_SHA512_OCTEON
+ tristate "SHA384 and SHA512 digest algorithms (OCTEON)"
+ depends on CPU_CAVIUM_OCTEON
select CRYPTO_SHA512
select CRYPTO_HASH
help
SHA-512 secure hash standard (DFIPS 180-2) implemented
- using sparc64 crypto instructions, when available.
+ using OCTEON crypto instructions, when available.
-config CRYPTO_SHA512_ARM_NEON
- tristate "SHA384 and SHA512 digest algorithm (ARM NEON)"
- depends on ARM && KERNEL_MODE_NEON
+config CRYPTO_SHA512_SPARC64
+ tristate "SHA384 and SHA512 digest algorithm (SPARC64)"
+ depends on SPARC64
select CRYPTO_SHA512
select CRYPTO_HASH
help
SHA-512 secure hash standard (DFIPS 180-2) implemented
- using ARM NEON instructions, when available.
-
- This version of SHA implements a 512 bit hash with 256 bits of
- security against collision attacks.
-
- This code also includes SHA-384, a 384 bit hash with 192 bits
- of security against collision attacks.
+ using sparc64 crypto instructions, when available.
config CRYPTO_TGR192
tristate "Tiger digest algorithms"
@@ -817,45 +833,18 @@ config CRYPTO_AES_SPARC64
for some popular block cipher mode is supported too, including
ECB and CBC.
-config CRYPTO_AES_ARM
- tristate "AES cipher algorithms (ARM-asm)"
- depends on ARM
- select CRYPTO_ALGAPI
- select CRYPTO_AES
- help
- Use optimized AES assembler routines for ARM platforms.
-
- AES cipher algorithms (FIPS-197). AES uses the Rijndael
- algorithm.
-
- Rijndael appears to be consistently a very good performer in
- both hardware and software across a wide range of computing
- environments regardless of its use in feedback or non-feedback
- modes. Its key setup time is excellent, and its key agility is
- good. Rijndael's very low memory requirements make it very well
- suited for restricted-space environments, in which it also
- demonstrates excellent performance. Rijndael's operations are
- among the easiest to defend against power and timing attacks.
-
- The AES specifies three key sizes: 128, 192 and 256 bits
-
- See <http://csrc.nist.gov/encryption/aes/> for more information.
-
-config CRYPTO_AES_ARM_BS
- tristate "Bit sliced AES using NEON instructions"
- depends on ARM && KERNEL_MODE_NEON
- select CRYPTO_ALGAPI
- select CRYPTO_AES_ARM
- select CRYPTO_ABLK_HELPER
+config CRYPTO_AES_PPC_SPE
+ tristate "AES cipher algorithms (PPC SPE)"
+ depends on PPC && SPE
help
- Use a faster and more secure NEON based implementation of AES in CBC,
- CTR and XTS modes
-
- Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
- and for XTS mode encryption, CBC and XTS mode decryption speedup is
- around 25%. (CBC encryption speed is not affected by this driver.)
- This implementation does not rely on any lookup tables so it is
- believed to be invulnerable to cache timing attacks.
+ AES cipher algorithms (FIPS-197). Additionally the acceleration
+ for popular block cipher modes ECB, CBC, CTR and XTS is supported.
+ This module should only be used for low power (router) devices
+ without hardware AES acceleration (e.g. caam crypto). It reduces the
+ size of the AES tables from 16KB to 8KB + 256 bytes and mitigates
+ timining attacks. Nevertheless it might be not as secure as other
+ architecture specific assembler implementations that work on 1KB
+ tables or 256 bytes S-boxes.
config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm"
@@ -1199,7 +1188,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
Keys are allowed to be from 0 to 256 bits in length, in steps
of 8 bits.
- This module provides Serpent cipher algorithm that processes eigth
+ This module provides Serpent cipher algorithm that processes eight
blocks parallel using SSE2 instruction set.
See also:
@@ -1523,6 +1512,15 @@ config CRYPTO_USER_API_RNG
This option enables the user-spaces interface for random
number generator algorithms.
+config CRYPTO_USER_API_AEAD
+ tristate "User-space interface for AEAD cipher algorithms"
+ depends on NET
+ select CRYPTO_AEAD
+ select CRYPTO_USER_API
+ help
+ This option enables the user-spaces interface for AEAD
+ cipher algorithms.
+
config CRYPTO_HASH_INFO
bool
diff --git a/crypto/Makefile b/crypto/Makefile
index ba19465f9ad3..97b7d3ac87e7 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
+obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
#
# generic algorithms and the async_tx api
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
index ffe7278d4bd8..e1fcf53bb931 100644
--- a/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -124,7 +124,8 @@ int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_ablkcipher *cryptd_tfm;
- cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
+ cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 7f8b7edcadca..f22cc56fd1b3 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -358,8 +358,8 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (WARN_ON(npages == 0))
return -EINVAL;
-
- sg_init_table(sgl->sg, npages);
+ /* Add one extra for linking */
+ sg_init_table(sgl->sg, npages + 1);
for (i = 0, len = n; i < npages; i++) {
int plen = min_t(int, len, PAGE_SIZE - off);
@@ -369,18 +369,26 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
off = 0;
len -= plen;
}
+ sg_mark_end(sgl->sg + npages - 1);
+ sgl->npages = npages;
+
return n;
}
EXPORT_SYMBOL_GPL(af_alg_make_sg);
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
+{
+ sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
+ sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
+}
+EXPORT_SYMBOL_GPL(af_alg_link_sg);
+
void af_alg_free_sg(struct af_alg_sgl *sgl)
{
int i;
- i = 0;
- do {
+ for (i = 0; i < sgl->npages; i++)
put_page(sgl->pages[i]);
- } while (!sg_is_last(sgl->sg + (i++)));
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 83b04e0884b1..d2627a3d4ed8 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -64,6 +64,8 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_priority < 0)
return -EINVAL;
+ atomic_set(&alg->cra_refcnt, 1);
+
return crypto_set_driver_name(alg);
}
@@ -99,10 +101,9 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
return &n->list == stack ? top : &n->inst->alg.cra_users;
}
-static void crypto_remove_spawn(struct crypto_spawn *spawn,
- struct list_head *list)
+static void crypto_remove_instance(struct crypto_instance *inst,
+ struct list_head *list)
{
- struct crypto_instance *inst = spawn->inst;
struct crypto_template *tmpl = inst->tmpl;
if (crypto_is_dead(&inst->alg))
@@ -167,7 +168,7 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
if (spawn->alg)
list_move(&spawn->list, &spawn->alg->cra_users);
else
- crypto_remove_spawn(spawn, list);
+ crypto_remove_instance(spawn->inst, list);
}
}
EXPORT_SYMBOL_GPL(crypto_remove_spawns);
@@ -188,7 +189,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
ret = -EEXIST;
- atomic_set(&alg->cra_refcnt, 1);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (q == alg)
goto err;
@@ -523,11 +523,14 @@ int crypto_register_instance(struct crypto_template *tmpl,
err = crypto_check_alg(&inst->alg);
if (err)
- goto err;
+ return err;
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
+ if (unlikely(!crypto_mod_get(&inst->alg)))
+ return -EAGAIN;
+
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(&inst->alg);
@@ -545,37 +548,30 @@ unlock:
goto err;
crypto_wait_for_test(larval);
+
+ /* Remove instance if test failed */
+ if (!(inst->alg.cra_flags & CRYPTO_ALG_TESTED))
+ crypto_unregister_instance(inst);
err = 0;
err:
+ crypto_mod_put(&inst->alg);
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
-int crypto_unregister_instance(struct crypto_alg *alg)
+int crypto_unregister_instance(struct crypto_instance *inst)
{
- int err;
- struct crypto_instance *inst = (void *)alg;
- struct crypto_template *tmpl = inst->tmpl;
- LIST_HEAD(users);
-
- if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
- return -EINVAL;
-
- BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
+ LIST_HEAD(list);
down_write(&crypto_alg_sem);
- hlist_del_init(&inst->list);
- err = crypto_remove_alg(alg, &users);
+ crypto_remove_spawns(&inst->alg, &list, NULL);
+ crypto_remove_instance(inst, &list);
up_write(&crypto_alg_sem);
- if (err)
- return err;
-
- tmpl->free(inst);
- crypto_remove_final(&users);
+ crypto_remove_final(&list);
return 0;
}
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
new file mode 100644
index 000000000000..69abada22373
--- /dev/null
+++ b/crypto/algif_aead.c
@@ -0,0 +1,663 @@
+/*
+ * algif_aead: User-space interface for AEAD algorithms
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ *
+ * This file provides the user-space API for AEAD ciphers.
+ *
+ * This file is derived from algif_skcipher.c.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <crypto/scatterwalk.h>
+#include <crypto/if_alg.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+struct aead_sg_list {
+ unsigned int cur;
+ struct scatterlist sg[ALG_MAX_PAGES];
+};
+
+struct aead_ctx {
+ struct aead_sg_list tsgl;
+ /*
+ * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
+ * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
+ * pages
+ */
+#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
+ struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
+
+ void *iv;
+
+ struct af_alg_completion completion;
+
+ unsigned long used;
+
+ unsigned int len;
+ bool more;
+ bool merge;
+ bool enc;
+
+ size_t aead_assoclen;
+ struct aead_request aead_req;
+};
+
+static inline int aead_sndbuf(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct aead_ctx *ctx = ask->private;
+
+ return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
+ ctx->used, 0);
+}
+
+static inline bool aead_writable(struct sock *sk)
+{
+ return PAGE_SIZE <= aead_sndbuf(sk);
+}
+
+static inline bool aead_sufficient_data(struct aead_ctx *ctx)
+{
+ unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
+
+ return (ctx->used >= (ctx->aead_assoclen + (ctx->enc ? 0 : as)));
+}
+
+static void aead_put_sgl(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct aead_ctx *ctx = ask->private;
+ struct aead_sg_list *sgl = &ctx->tsgl;
+ struct scatterlist *sg = sgl->sg;
+ unsigned int i;
+
+ for (i = 0; i < sgl->cur; i++) {
+ if (!sg_page(sg + i))
+ continue;
+
+ put_page(sg_page(sg + i));
+ sg_assign_page(sg + i, NULL);
+ }
+ sgl->cur = 0;
+ ctx->used = 0;
+ ctx->more = 0;
+ ctx->merge = 0;
+}
+
+static void aead_wmem_wakeup(struct sock *sk)
+{
+ struct socket_wq *wq;
+
+ if (!aead_writable(sk))
+ return;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+ POLLRDNORM |
+ POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ rcu_read_unlock();
+}
+
+static int aead_wait_for_data(struct sock *sk, unsigned flags)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct aead_ctx *ctx = ask->private;
+ long timeout;
+ DEFINE_WAIT(wait);
+ int err = -ERESTARTSYS;
+
+ if (flags & MSG_DONTWAIT)
+ return -EAGAIN;
+
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+ for (;;) {
+ if (signal_pending(current))
+ break;
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (sk_wait_event(sk, &timeout, !ctx->more)) {
+ err = 0;
+ break;
+ }
+ }
+ finish_wait(sk_sleep(sk), &wait);
+
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+ return err;
+}
+
+static void aead_data_wakeup(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct aead_ctx *ctx = ask->private;
+ struct socket_wq *wq;
+
+ if (ctx->more)
+ return;
+ if (!ctx->used)
+ return;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+ POLLRDNORM |
+ POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ rcu_read_unlock();
+}
+
+static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct aead_ctx *ctx = ask->private;
+ unsigned ivsize =
+ crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
+ struct aead_sg_list *sgl = &ctx->tsgl;
+ struct af_alg_control con = {};
+ long copied = 0;
+ bool enc = 0;
+ bool init = 0;
+ int err = -EINVAL;
+
+ if (msg->msg_controllen) {
+ err = af_alg_cmsg_send(msg, &con);
+ if (err)
+ return err;
+
+ init = 1;
+ switch (con.op) {
+ case ALG_OP_ENCRYPT:
+ enc = 1;
+ break;
+ case ALG_OP_DECRYPT:
+ enc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (con.iv && con.iv->ivlen != ivsize)
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+
+ if (init) {
+ ctx->enc = enc;
+ if (con.iv)
+ memcpy(ctx->iv, con.iv->iv, ivsize);
+
+ ctx->aead_assoclen = con.aead_assoclen;
+ }
+
+ while (size) {
+ unsigned long len = size;
+ struct scatterlist *sg = NULL;
+
+ /* use the existing memory in an allocated page */
+ if (ctx->merge) {
+ sg = sgl->sg + sgl->cur - 1;
+ len = min_t(unsigned long, len,
+ PAGE_SIZE - sg->offset - sg->length);
+ err = memcpy_from_msg(page_address(sg_page(sg)) +
+ sg->offset + sg->length,
+ msg, len);
+ if (err)
+ goto unlock;
+
+ sg->length += len;
+ ctx->merge = (sg->offset + sg->length) &
+ (PAGE_SIZE - 1);
+
+ ctx->used += len;
+ copied += len;
+ size -= len;
+ continue;
+ }
+
+ if (!aead_writable(sk)) {
+ /* user space sent too much data */
+ aead_put_sgl(sk);
+ err = -EMSGSIZE;
+ goto unlock;
+ }
+
+ /* allocate a new page */
+ len = min_t(unsigned long, size, aead_sndbuf(sk));
+ while (len) {
+ int plen = 0;
+
+ if (sgl->cur >= ALG_MAX_PAGES) {
+ aead_put_sgl(sk);
+ err = -E2BIG;
+ goto unlock;
+ }
+
+ sg = sgl->sg + sgl->cur;
+ plen = min_t(int, len, PAGE_SIZE);
+
+ sg_assign_page(sg, alloc_page(GFP_KERNEL));
+ err = -ENOMEM;
+ if (!sg_page(sg))
+ goto unlock;
+
+ err = memcpy_from_msg(page_address(sg_page(sg)),
+ msg, plen);
+ if (err) {
+ __free_page(sg_page(sg));
+ sg_assign_page(sg, NULL);
+ goto unlock;
+ }
+
+ sg->offset = 0;
+ sg->length = plen;
+ len -= plen;
+ ctx->used += plen;
+ copied += plen;
+ sgl->cur++;
+ size -= plen;
+ ctx->merge = plen & (PAGE_SIZE - 1);
+ }
+ }
+
+ err = 0;
+
+ ctx->more = msg->msg_flags & MSG_MORE;
+ if (!ctx->more && !aead_sufficient_data(ctx)) {
+ aead_put_sgl(sk);
+ err = -EMSGSIZE;
+ }
+
+unlock:
+ aead_data_wakeup(sk);
+ release_sock(sk);
+
+ return err ?: copied;
+}
+
+static ssize_t aead_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct aead_ctx *ctx = ask->private;
+ struct aead_sg_list *sgl = &ctx->tsgl;
+ int err = -EINVAL;
+
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
+ if (sgl->cur >= ALG_MAX_PAGES)
+ return -E2BIG;
+
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+
+ if (!size)
+ goto done;
+
+ if (!aead_writable(sk)) {
+ /* user space sent too much data */
+ aead_put_sgl(sk);
+ err = -EMSGSIZE;
+ goto unlock;
+ }
+
+ ctx->merge = 0;
+
+ get_page(page);
+ sg_set_page(sgl->sg + sgl->cur, page, size, offset);
+ sgl->cur++;
+ ctx->used += size;
+
+ err = 0;
+
+done:
+ ctx->more = flags & MSG_MORE;
+ if (!ctx->more && !aead_sufficient_data(ctx)) {
+ aead_put_sgl(sk);
+ err = -EMSGSIZE;
+ }
+
+unlock:
+ aead_data_wakeup(sk);
+ release_sock(sk);
+
+ return err ?: size;
+}
+
+static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct aead_ctx *ctx = ask->private;
+ unsigned bs = crypto_aead_blocksize(crypto_aead_reqtfm(&ctx->aead_req));
+ unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
+ struct aead_sg_list *sgl = &ctx->tsgl;
+ struct scatterlist *sg = NULL;
+ struct scatterlist assoc[ALG_MAX_PAGES];
+ size_t assoclen = 0;
+ unsigned int i = 0;
+ int err = -EINVAL;
+ unsigned long used = 0;
+ size_t outlen = 0;
+ size_t usedpages = 0;
+ unsigned int cnt = 0;
+
+ /* Limit number of IOV blocks to be accessed below */
+ if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
+ return -ENOMSG;
+
+ lock_sock(sk);
+
+ /*
+ * AEAD memory structure: For encryption, the tag is appended to the
+ * ciphertext which implies that the memory allocated for the ciphertext
+ * must be increased by the tag length. For decryption, the tag
+ * is expected to be concatenated to the ciphertext. The plaintext
+ * therefore has a memory size of the ciphertext minus the tag length.
+ *
+ * The memory structure for cipher operation has the following
+ * structure:
+ * AEAD encryption input: assoc data || plaintext
+ * AEAD encryption output: cipherntext || auth tag
+ * AEAD decryption input: assoc data || ciphertext || auth tag
+ * AEAD decryption output: plaintext
+ */
+
+ if (ctx->more) {
+ err = aead_wait_for_data(sk, flags);
+ if (err)
+ goto unlock;
+ }
+
+ used = ctx->used;
+
+ /*
+ * Make sure sufficient data is present -- note, the same check is
+ * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
+ * shall provide an information to the data sender that something is
+ * wrong, but they are irrelevant to maintain the kernel integrity.
+ * We need this check here too in case user space decides to not honor
+ * the error message in sendmsg/sendpage and still call recvmsg. This
+ * check here protects the kernel integrity.
+ */
+ if (!aead_sufficient_data(ctx))
+ goto unlock;
+
+ /*
+ * The cipher operation input data is reduced by the associated data
+ * length as this data is processed separately later on.
+ */
+ used -= ctx->aead_assoclen;
+
+ if (ctx->enc) {
+ /* round up output buffer to multiple of block size */
+ outlen = ((used + bs - 1) / bs * bs);
+ /* add the size needed for the auth tag to be created */
+ outlen += as;
+ } else {
+ /* output data size is input without the authentication tag */
+ outlen = used - as;
+ /* round up output buffer to multiple of block size */
+ outlen = ((outlen + bs - 1) / bs * bs);
+ }
+
+ /* convert iovecs of output buffers into scatterlists */
+ while (iov_iter_count(&msg->msg_iter)) {
+ size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
+ (outlen - usedpages));
+
+ /* make one iovec available as scatterlist */
+ err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter,
+ seglen);
+ if (err < 0)
+ goto unlock;
+ usedpages += err;
+ /* chain the new scatterlist with previous one */
+ if (cnt)
+ af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
+
+ /* we do not need more iovecs as we have sufficient memory */
+ if (outlen <= usedpages)
+ break;
+ iov_iter_advance(&msg->msg_iter, err);
+ cnt++;
+ }
+
+ err = -EINVAL;
+ /* ensure output buffer is sufficiently large */
+ if (usedpages < outlen)
+ goto unlock;
+
+ sg_init_table(assoc, ALG_MAX_PAGES);
+ assoclen = ctx->aead_assoclen;
+ /*
+ * Split scatterlist into two: first part becomes AD, second part
+ * is plaintext / ciphertext. The first part is assigned to assoc
+ * scatterlist. When this loop finishes, sg points to the start of the
+ * plaintext / ciphertext.
+ */
+ for (i = 0; i < ctx->tsgl.cur; i++) {
+ sg = sgl->sg + i;
+ if (sg->length <= assoclen) {
+ /* AD is larger than one page */
+ sg_set_page(assoc + i, sg_page(sg),
+ sg->length, sg->offset);
+ assoclen -= sg->length;
+ if (i >= ctx->tsgl.cur)
+ goto unlock;
+ } else if (!assoclen) {
+ /* current page is to start of plaintext / ciphertext */
+ if (i)
+ /* AD terminates at page boundary */
+ sg_mark_end(assoc + i - 1);
+ else
+ /* AD size is zero */
+ sg_mark_end(assoc);
+ break;
+ } else {
+ /* AD does not terminate at page boundary */
+ sg_set_page(assoc + i, sg_page(sg),
+ assoclen, sg->offset);
+ sg_mark_end(assoc + i);
+ /* plaintext / ciphertext starts after AD */
+ sg->length -= assoclen;
+ sg->offset += assoclen;
+ break;
+ }
+ }
+
+ aead_request_set_assoc(&ctx->aead_req, assoc, ctx->aead_assoclen);
+ aead_request_set_crypt(&ctx->aead_req, sg, ctx->rsgl[0].sg, used,
+ ctx->iv);
+
+ err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_aead_encrypt(&ctx->aead_req) :
+ crypto_aead_decrypt(&ctx->aead_req),
+ &ctx->completion);
+
+ if (err) {
+ /* EBADMSG implies a valid cipher operation took place */
+ if (err == -EBADMSG)
+ aead_put_sgl(sk);
+ goto unlock;
+ }
+
+ aead_put_sgl(sk);
+
+ err = 0;
+
+unlock:
+ for (i = 0; i < cnt; i++)
+ af_alg_free_sg(&ctx->rsgl[i]);
+
+ aead_wmem_wakeup(sk);
+ release_sock(sk);
+
+ return err ? err : outlen;
+}
+
+static unsigned int aead_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct aead_ctx *ctx = ask->private;
+ unsigned int mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ if (!ctx->more)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (aead_writable(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+ return mask;
+}
+
+static struct proto_ops algif_aead_ops = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+
+ .release = af_alg_release,
+ .sendmsg = aead_sendmsg,
+ .sendpage = aead_sendpage,
+ .recvmsg = aead_recvmsg,
+ .poll = aead_poll,
+};
+
+static void *aead_bind(const char *name, u32 type, u32 mask)
+{
+ return crypto_alloc_aead(name, type, mask);
+}
+
+static void aead_release(void *private)
+{
+ crypto_free_aead(private);
+}
+
+static int aead_setauthsize(void *private, unsigned int authsize)
+{
+ return crypto_aead_setauthsize(private, authsize);
+}
+
+static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+ return crypto_aead_setkey(private, key, keylen);
+}
+
+static void aead_sock_destruct(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct aead_ctx *ctx = ask->private;
+ unsigned int ivlen = crypto_aead_ivsize(
+ crypto_aead_reqtfm(&ctx->aead_req));
+
+ aead_put_sgl(sk);
+ sock_kzfree_s(sk, ctx->iv, ivlen);
+ sock_kfree_s(sk, ctx, ctx->len);
+ af_alg_release_parent(sk);
+}
+
+static int aead_accept_parent(void *private, struct sock *sk)
+{
+ struct aead_ctx *ctx;
+ struct alg_sock *ask = alg_sk(sk);
+ unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
+ unsigned int ivlen = crypto_aead_ivsize(private);
+
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ memset(ctx, 0, len);
+
+ ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
+ if (!ctx->iv) {
+ sock_kfree_s(sk, ctx, len);
+ return -ENOMEM;
+ }
+ memset(ctx->iv, 0, ivlen);
+
+ ctx->len = len;
+ ctx->used = 0;
+ ctx->more = 0;
+ ctx->merge = 0;
+ ctx->enc = 0;
+ ctx->tsgl.cur = 0;
+ ctx->aead_assoclen = 0;
+ af_alg_init_completion(&ctx->completion);
+ sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
+
+ ask->private = ctx;
+
+ aead_request_set_tfm(&ctx->aead_req, private);
+ aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete, &ctx->completion);
+
+ sk->sk_destruct = aead_sock_destruct;
+
+ return 0;
+}
+
+static const struct af_alg_type algif_type_aead = {
+ .bind = aead_bind,
+ .release = aead_release,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .accept = aead_accept_parent,
+ .ops = &algif_aead_ops,
+ .name = "aead",
+ .owner = THIS_MODULE
+};
+
+static int __init algif_aead_init(void)
+{
+ return af_alg_register_type(&algif_type_aead);
+}
+
+static void __exit algif_aead_exit(void)
+{
+ int err = af_alg_unregister_type(&algif_type_aead);
+ BUG_ON(err);
+}
+
+module_init(algif_aead_init);
+module_exit(algif_aead_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 01da360bdb55..1396ad0787fc 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -34,8 +34,8 @@ struct hash_ctx {
struct ahash_request req;
};
-static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
- struct msghdr *msg, size_t ignored)
+static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored)
{
int limit = ALG_MAX_PAGES * PAGE_SIZE;
struct sock *sk = sock->sk;
@@ -56,8 +56,8 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
ctx->more = 0;
- while (iov_iter_count(&msg->msg_iter)) {
- int len = iov_iter_count(&msg->msg_iter);
+ while (msg_data_left(msg)) {
+ int len = msg_data_left(msg);
if (len > limit)
len = limit;
@@ -139,8 +139,8 @@ unlock:
return err ?: size;
}
-static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 67f612cfed97..8109aaad2726 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -55,8 +55,8 @@ struct rng_ctx {
struct crypto_rng *drng;
};
-static int rng_recvmsg(struct kiocb *unused, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -87,7 +87,7 @@ static int rng_recvmsg(struct kiocb *unused, struct socket *sock,
return genlen;
err = memcpy_to_msg(msg, result, len);
- memzero_explicit(result, genlen);
+ memzero_explicit(result, len);
return err ? err : len;
}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 0c8a1e5ccadf..945075292bc9 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -39,6 +39,7 @@ struct skcipher_ctx {
struct af_alg_completion completion;
+ atomic_t inflight;
unsigned used;
unsigned int len;
@@ -49,9 +50,65 @@ struct skcipher_ctx {
struct ablkcipher_request req;
};
+struct skcipher_async_rsgl {
+ struct af_alg_sgl sgl;
+ struct list_head list;
+};
+
+struct skcipher_async_req {
+ struct kiocb *iocb;
+ struct skcipher_async_rsgl first_sgl;
+ struct list_head list;
+ struct scatterlist *tsg;
+ char iv[];
+};
+
+#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
+ crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
+
+#define GET_REQ_SIZE(ctx) \
+ crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
+
+#define GET_IV_SIZE(ctx) \
+ crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
+
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
+static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
+{
+ struct skcipher_async_rsgl *rsgl, *tmp;
+ struct scatterlist *sgl;
+ struct scatterlist *sg;
+ int i, n;
+
+ list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
+ af_alg_free_sg(&rsgl->sgl);
+ if (rsgl != &sreq->first_sgl)
+ kfree(rsgl);
+ }
+ sgl = sreq->tsg;
+ n = sg_nents(sgl);
+ for_each_sg(sgl, sg, n, i)
+ put_page(sg_page(sg));
+
+ kfree(sreq->tsg);
+}
+
+static void skcipher_async_cb(struct crypto_async_request *req, int err)
+{
+ struct sock *sk = req->data;
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+ struct kiocb *iocb = sreq->iocb;
+
+ atomic_dec(&ctx->inflight);
+ skcipher_free_async_sgls(sreq);
+ kfree(req);
+ iocb->ki_complete(iocb, err, err);
+}
+
static inline int skcipher_sndbuf(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
@@ -96,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
return 0;
}
-static void skcipher_pull_sgl(struct sock *sk, int used)
+static void skcipher_pull_sgl(struct sock *sk, int used, int put)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
@@ -123,8 +180,8 @@ static void skcipher_pull_sgl(struct sock *sk, int used)
if (sg[i].length)
return;
-
- put_page(sg_page(sg + i));
+ if (put)
+ put_page(sg_page(sg + i));
sg_assign_page(sg + i, NULL);
}
@@ -143,7 +200,7 @@ static void skcipher_free_sgl(struct sock *sk)
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- skcipher_pull_sgl(sk, ctx->used);
+ skcipher_pull_sgl(sk, ctx->used, 1);
}
static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
@@ -239,8 +296,8 @@ static void skcipher_data_wakeup(struct sock *sk)
rcu_read_unlock();
}
-static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
- struct msghdr *msg, size_t size)
+static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t size)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -424,8 +481,153 @@ unlock:
return err ?: size;
}
-static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
- struct msghdr *msg, size_t ignored, int flags)
+static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
+{
+ struct skcipher_sg_list *sgl;
+ struct scatterlist *sg;
+ int nents = 0;
+
+ list_for_each_entry(sgl, &ctx->tsgl, list) {
+ sg = sgl->sg;
+
+ while (!sg->length)
+ sg++;
+
+ nents += sg_nents(sg);
+ }
+ return nents;
+}
+
+static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_sg_list *sgl;
+ struct scatterlist *sg;
+ struct skcipher_async_req *sreq;
+ struct ablkcipher_request *req;
+ struct skcipher_async_rsgl *last_rsgl = NULL;
+ unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
+ unsigned int reqlen = sizeof(struct skcipher_async_req) +
+ GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+ int err = -ENOMEM;
+ bool mark = false;
+
+ lock_sock(sk);
+ req = kmalloc(reqlen, GFP_KERNEL);
+ if (unlikely(!req))
+ goto unlock;
+
+ sreq = GET_SREQ(req, ctx);
+ sreq->iocb = msg->msg_iocb;
+ memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
+ INIT_LIST_HEAD(&sreq->list);
+ sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
+ if (unlikely(!sreq->tsg)) {
+ kfree(req);
+ goto unlock;
+ }
+ sg_init_table(sreq->tsg, tx_nents);
+ memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
+ ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
+ ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ skcipher_async_cb, sk);
+
+ while (iov_iter_count(&msg->msg_iter)) {
+ struct skcipher_async_rsgl *rsgl;
+ int used;
+
+ if (!ctx->used) {
+ err = skcipher_wait_for_data(sk, flags);
+ if (err)
+ goto free;
+ }
+ sgl = list_first_entry(&ctx->tsgl,
+ struct skcipher_sg_list, list);
+ sg = sgl->sg;
+
+ while (!sg->length)
+ sg++;
+
+ used = min_t(unsigned long, ctx->used,
+ iov_iter_count(&msg->msg_iter));
+ used = min_t(unsigned long, used, sg->length);
+
+ if (txbufs == tx_nents) {
+ struct scatterlist *tmp;
+ int x;
+ /* Ran out of tx slots in async request
+ * need to expand */
+ tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
+ GFP_KERNEL);
+ if (!tmp)
+ goto free;
+
+ sg_init_table(tmp, tx_nents * 2);
+ for (x = 0; x < tx_nents; x++)
+ sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
+ sreq->tsg[x].length,
+ sreq->tsg[x].offset);
+ kfree(sreq->tsg);
+ sreq->tsg = tmp;
+ tx_nents *= 2;
+ mark = true;
+ }
+ /* Need to take over the tx sgl from ctx
+ * to the asynch req - these sgls will be freed later */
+ sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
+ sg->offset);
+
+ if (list_empty(&sreq->list)) {
+ rsgl = &sreq->first_sgl;
+ list_add_tail(&rsgl->list, &sreq->list);
+ } else {
+ rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
+ if (!rsgl) {
+ err = -ENOMEM;
+ goto free;
+ }
+ list_add_tail(&rsgl->list, &sreq->list);
+ }
+
+ used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
+ err = used;
+ if (used < 0)
+ goto free;
+ if (last_rsgl)
+ af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
+
+ last_rsgl = rsgl;
+ len += used;
+ skcipher_pull_sgl(sk, used, 0);
+ iov_iter_advance(&msg->msg_iter, used);
+ }
+
+ if (mark)
+ sg_mark_end(sreq->tsg + txbufs - 1);
+
+ ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
+ len, sreq->iv);
+ err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
+ crypto_ablkcipher_decrypt(req);
+ if (err == -EINPROGRESS) {
+ atomic_inc(&ctx->inflight);
+ err = -EIOCBQUEUED;
+ goto unlock;
+ }
+free:
+ skcipher_free_async_sgls(sreq);
+ kfree(req);
+unlock:
+ skcipher_wmem_wakeup(sk);
+ release_sock(sk);
+ return err;
+}
+
+static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
+ int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -439,7 +641,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
long copied = 0;
lock_sock(sk);
- while (iov_iter_count(&msg->msg_iter)) {
+ while (msg_data_left(msg)) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
@@ -453,7 +655,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
goto unlock;
}
- used = min_t(unsigned long, ctx->used, iov_iter_count(&msg->msg_iter));
+ used = min_t(unsigned long, ctx->used, msg_data_left(msg));
used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
err = used;
@@ -484,7 +686,7 @@ free:
goto unlock;
copied += used;
- skcipher_pull_sgl(sk, used);
+ skcipher_pull_sgl(sk, used, 1);
iov_iter_advance(&msg->msg_iter, used);
}
@@ -497,6 +699,13 @@ unlock:
return copied ?: err;
}
+static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
+{
+ return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
+ skcipher_recvmsg_async(sock, msg, flags) :
+ skcipher_recvmsg_sync(sock, msg, flags);
+}
static unsigned int skcipher_poll(struct file *file, struct socket *sock,
poll_table *wait)
@@ -555,12 +764,25 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
return crypto_ablkcipher_setkey(private, key, keylen);
}
+static void skcipher_wait(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ int ctr = 0;
+
+ while (atomic_read(&ctx->inflight) && ctr++ < 100)
+ msleep(100);
+}
+
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+ if (atomic_read(&ctx->inflight))
+ skcipher_wait(sk);
+
skcipher_free_sgl(sk);
sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
@@ -592,6 +814,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
+ atomic_set(&ctx->inflight, 0);
af_alg_init_completion(&ctx->completion);
ask->private = ctx;
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 6f5bebc9bf01..765fe7609348 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -210,7 +210,11 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
byte_count = DEFAULT_BLK_SZ;
}
- err = byte_count;
+ /*
+ * Return 0 in case of success as mandated by the kernel
+ * crypto API interface definition.
+ */
+ err = 0;
dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",
byte_count, ctx);
diff --git a/crypto/api.c b/crypto/api.c
index 2a81e98a0021..afe4610afc4b 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -257,6 +257,16 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
mask |= CRYPTO_ALG_TESTED;
}
+ /*
+ * If the internal flag is set for a cipher, require a caller to
+ * to invoke the cipher with the internal flag to use that cipher.
+ * Also, if a caller wants to allocate a cipher that may or may
+ * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
+ * !(mask & CRYPTO_ALG_INTERNAL).
+ */
+ if (!((type | mask) & CRYPTO_ALG_INTERNAL))
+ mask |= CRYPTO_ALG_INTERNAL;
+
larval = crypto_larval_lookup(name, type, mask);
if (IS_ERR(larval) || !crypto_is_larval(larval))
return larval;
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index d05327caf69d..5d355e0c2633 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -124,6 +124,7 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
{
void **srcs;
int i;
+ int start = -1, stop = disks - 3;
if (submit->scribble)
srcs = submit->scribble;
@@ -134,10 +135,21 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
srcs[i] = (void*)raid6_empty_zero_page;
- } else
+ } else {
srcs[i] = page_address(blocks[i]) + offset;
+ if (i < disks - 2) {
+ stop = i;
+ if (start == -1)
+ start = i;
+ }
+ }
}
- raid6_call.gen_syndrome(disks, len, srcs);
+ if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
+ BUG_ON(!raid6_call.xor_syndrome);
+ if (start >= 0)
+ raid6_call.xor_syndrome(disks, start, stop, len, srcs);
+ } else
+ raid6_call.gen_syndrome(disks, len, srcs);
async_tx_sync_epilog(submit);
}
@@ -178,7 +190,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
- if (unmap &&
+ /* XORing P/Q is only implemented in software */
+ if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
(src_cnt <= dma_maxpq(device, 0) ||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 650afac10fd7..b0602ba03111 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -168,6 +168,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
return ictx->queue;
}
+static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
+ u32 *mask)
+{
+ struct crypto_attr_type *algt;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return;
+ if ((algt->type & CRYPTO_ALG_INTERNAL))
+ *type |= CRYPTO_ALG_INTERNAL;
+ if ((algt->mask & CRYPTO_ALG_INTERNAL))
+ *mask |= CRYPTO_ALG_INTERNAL;
+}
+
static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
const u8 *key, unsigned int keylen)
{
@@ -321,10 +335,13 @@ static int cryptd_create_blkcipher(struct crypto_template *tmpl,
struct cryptd_instance_ctx *ctx;
struct crypto_instance *inst;
struct crypto_alg *alg;
+ u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
+ u32 mask = CRYPTO_ALG_TYPE_MASK;
int err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
- CRYPTO_ALG_TYPE_MASK);
+ cryptd_check_internal(tb, &type, &mask);
+
+ alg = crypto_get_attr_alg(tb, type, mask);
if (IS_ERR(alg))
return PTR_ERR(alg);
@@ -341,7 +358,10 @@ static int cryptd_create_blkcipher(struct crypto_template *tmpl,
if (err)
goto out_free_inst;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
+ type |= CRYPTO_ALG_INTERNAL;
+ inst->alg.cra_flags = type;
inst->alg.cra_type = &crypto_ablkcipher_type;
inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
@@ -577,9 +597,13 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
struct ahash_instance *inst;
struct shash_alg *salg;
struct crypto_alg *alg;
+ u32 type = 0;
+ u32 mask = 0;
int err;
- salg = shash_attr_alg(tb[1], 0, 0);
+ cryptd_check_internal(tb, &type, &mask);
+
+ salg = shash_attr_alg(tb[1], type, mask);
if (IS_ERR(salg))
return PTR_ERR(salg);
@@ -598,7 +622,10 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ type = CRYPTO_ALG_ASYNC;
+ if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
+ type |= CRYPTO_ALG_INTERNAL;
+ inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
@@ -719,10 +746,13 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
struct aead_instance_ctx *ctx;
struct crypto_instance *inst;
struct crypto_alg *alg;
+ u32 type = CRYPTO_ALG_TYPE_AEAD;
+ u32 mask = CRYPTO_ALG_TYPE_MASK;
int err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
- CRYPTO_ALG_TYPE_MASK);
+ cryptd_check_internal(tb, &type, &mask);
+
+ alg = crypto_get_attr_alg(tb, type, mask);
if (IS_ERR(alg))
return PTR_ERR(alg);
@@ -739,7 +769,10 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
if (err)
goto out_free_inst;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
+ type |= CRYPTO_ALG_INTERNAL;
+ inst->alg.cra_flags = type;
inst->alg.cra_type = alg->cra_type;
inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
inst->alg.cra_init = cryptd_aead_init_tfm;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index c5148a35ae0a..41dfe762b7fb 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -62,10 +62,14 @@ static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
else if (!exact)
match = !strcmp(q->cra_name, p->cru_name);
- if (match) {
- alg = q;
- break;
- }
+ if (!match)
+ continue;
+
+ if (unlikely(!crypto_mod_get(q)))
+ continue;
+
+ alg = q;
+ break;
}
up_read(&crypto_alg_sem);
@@ -205,9 +209,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
if (!alg)
return -ENOENT;
+ err = -ENOMEM;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
- return -ENOMEM;
+ goto drop_alg;
info.in_skb = in_skb;
info.out_skb = skb;
@@ -215,6 +220,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
info.nlmsg_flags = 0;
err = crypto_report_alg(alg, &info);
+
+drop_alg:
+ crypto_mod_put(alg);
+
if (err)
return err;
@@ -284,6 +293,7 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
up_write(&crypto_alg_sem);
+ crypto_mod_put(alg);
crypto_remove_final(&list);
return 0;
@@ -294,6 +304,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
{
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
+ int err;
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
@@ -310,13 +321,19 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
* if we try to unregister. Unregistering such an algorithm without
* removing the module is not possible, so we restrict to crypto
* instances that are build from templates. */
+ err = -EINVAL;
if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
- return -EINVAL;
+ goto drop_alg;
- if (atomic_read(&alg->cra_refcnt) != 1)
- return -EBUSY;
+ err = -EBUSY;
+ if (atomic_read(&alg->cra_refcnt) > 2)
+ goto drop_alg;
- return crypto_unregister_instance(alg);
+ err = crypto_unregister_instance((struct crypto_instance *)alg);
+
+drop_alg:
+ crypto_mod_put(alg);
+ return err;
}
static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
@@ -395,8 +412,10 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
alg = crypto_alg_match(p, exact);
- if (alg)
+ if (alg) {
+ crypto_mod_put(alg);
return -EEXIST;
+ }
if (strlen(p->cru_driver_name))
name = p->cru_driver_name;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index d8ff16e5c322..b69409cb7e6a 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -119,19 +119,19 @@ static const struct drbg_core drbg_cores[] = {
.statelen = 32, /* 256 bits as defined in 10.2.1 */
.blocklen_bytes = 16,
.cra_name = "ctr_aes128",
- .backend_cra_name = "ecb(aes)",
+ .backend_cra_name = "aes",
}, {
.flags = DRBG_CTR | DRBG_STRENGTH192,
.statelen = 40, /* 320 bits as defined in 10.2.1 */
.blocklen_bytes = 16,
.cra_name = "ctr_aes192",
- .backend_cra_name = "ecb(aes)",
+ .backend_cra_name = "aes",
}, {
.flags = DRBG_CTR | DRBG_STRENGTH256,
.statelen = 48, /* 384 bits as defined in 10.2.1 */
.blocklen_bytes = 16,
.cra_name = "ctr_aes256",
- .backend_cra_name = "ecb(aes)",
+ .backend_cra_name = "aes",
},
#endif /* CONFIG_CRYPTO_DRBG_CTR */
#ifdef CONFIG_CRYPTO_DRBG_HASH
@@ -308,9 +308,6 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
drbg_string_fill(&data, out, drbg_blocklen(drbg));
- /* 10.4.3 step 1 */
- memset(out, 0, drbg_blocklen(drbg));
-
/* 10.4.3 step 2 / 4 */
list_for_each_entry(curr, in, list) {
const unsigned char *pos = curr->buf;
@@ -406,7 +403,6 @@ static int drbg_ctr_df(struct drbg_state *drbg,
memset(pad, 0, drbg_blocklen(drbg));
memset(iv, 0, drbg_blocklen(drbg));
- memset(temp, 0, drbg_statelen(drbg));
/* 10.4.2 step 1 is implicit as we work byte-wise */
@@ -523,7 +519,6 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
unsigned int len = 0;
struct drbg_string cipherin;
- memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
if (3 > reseed)
memset(df_data, 0, drbg_statelen(drbg));
@@ -585,8 +580,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
int ret = 0;
struct drbg_string data;
- memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
-
/* 10.2.1.5.2 step 2 */
if (addtl && !list_empty(addtl)) {
ret = drbg_ctr_update(drbg, addtl, 2);
@@ -761,7 +754,6 @@ static struct drbg_state_ops drbg_hmac_ops = {
.generate = drbg_hmac_generate,
.crypto_init = drbg_init_hash_kernel,
.crypto_fini = drbg_fini_hash_kernel,
-
};
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
@@ -838,8 +830,6 @@ static int drbg_hash_df(struct drbg_state *drbg,
unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg);
struct drbg_string data;
- memset(tmp, 0, drbg_blocklen(drbg));
-
/* 10.4.1 step 3 */
input[0] = 1;
drbg_cpu_to_be32((outlen * 8), &input[1]);
@@ -879,7 +869,6 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
unsigned char *V = drbg->scratchpad;
unsigned char prefix = DRBG_PREFIX1;
- memset(drbg->scratchpad, 0, drbg_statelen(drbg));
if (!seed)
return -EINVAL;
@@ -921,9 +910,6 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
LIST_HEAD(datalist);
unsigned char prefix = DRBG_PREFIX2;
- /* this is value w as per documentation */
- memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
-
/* 10.1.1.4 step 2 */
if (!addtl || list_empty(addtl))
return 0;
@@ -959,9 +945,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
struct drbg_string data;
LIST_HEAD(datalist);
- memset(src, 0, drbg_statelen(drbg));
- memset(dst, 0, drbg_blocklen(drbg));
-
/* 10.1.1.4 step hashgen 2 */
memcpy(src, drbg->V, drbg_statelen(drbg));
@@ -1018,7 +1001,6 @@ static int drbg_hash_generate(struct drbg_state *drbg,
len = drbg_hash_hashgen(drbg, buf, buflen);
/* this is the value H as documented in 10.1.1.4 */
- memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
/* 10.1.1.4 step 4 */
drbg_string_fill(&data1, &prefix, 1);
list_add_tail(&data1.list, &datalist);
@@ -1298,7 +1280,7 @@ static void drbg_restore_shadow(struct drbg_state *drbg,
* as defined in SP800-90A. The additional input is mixed into
* the state in addition to the pulled entropy.
*
- * return: generated number of bytes
+ * return: 0 when all bytes are generated; < 0 in case of an error
*/
static int drbg_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
@@ -1437,6 +1419,11 @@ static int drbg_generate(struct drbg_state *drbg,
}
#endif
+ /*
+ * All operations were successful, return 0 as mandated by
+ * the kernel crypto API interface.
+ */
+ len = 0;
err:
shadow->d_ops->crypto_fini(shadow);
drbg_restore_shadow(drbg, &shadow);
@@ -1644,24 +1631,24 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
int ret = 0;
- struct crypto_blkcipher *tfm;
+ struct crypto_cipher *tfm;
- tfm = crypto_alloc_blkcipher(drbg->core->backend_cra_name, 0, 0);
+ tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
if (IS_ERR(tfm)) {
pr_info("DRBG: could not allocate cipher TFM handle\n");
return PTR_ERR(tfm);
}
- BUG_ON(drbg_blocklen(drbg) != crypto_blkcipher_blocksize(tfm));
+ BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
drbg->priv_data = tfm;
return ret;
}
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
{
- struct crypto_blkcipher *tfm =
- (struct crypto_blkcipher *)drbg->priv_data;
+ struct crypto_cipher *tfm =
+ (struct crypto_cipher *)drbg->priv_data;
if (tfm)
- crypto_free_blkcipher(tfm);
+ crypto_free_cipher(tfm);
drbg->priv_data = NULL;
return 0;
}
@@ -1669,21 +1656,14 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
unsigned char *outval, const struct drbg_string *in)
{
- int ret = 0;
- struct scatterlist sg_in, sg_out;
- struct blkcipher_desc desc;
- struct crypto_blkcipher *tfm =
- (struct crypto_blkcipher *)drbg->priv_data;
-
- desc.tfm = tfm;
- desc.flags = 0;
- crypto_blkcipher_setkey(tfm, key, (drbg_keylen(drbg)));
- /* there is only component in *in */
- sg_init_one(&sg_in, in->buf, in->len);
- sg_init_one(&sg_out, outval, drbg_blocklen(drbg));
- ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, in->len);
+ struct crypto_cipher *tfm =
+ (struct crypto_cipher *)drbg->priv_data;
- return ret;
+ crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
+ /* there is only component in *in */
+ BUG_ON(in->len < drbg_blocklen(drbg));
+ crypto_cipher_encrypt_one(tfm, outval, in->buf);
+ return 0;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index a8e870444ea9..fe5b495a434d 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -258,6 +258,20 @@ out_free_inst:
goto out;
}
+static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
+ u32 *mask)
+{
+ struct crypto_attr_type *algt;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return;
+ if ((algt->type & CRYPTO_ALG_INTERNAL))
+ *type |= CRYPTO_ALG_INTERNAL;
+ if ((algt->mask & CRYPTO_ALG_INTERNAL))
+ *mask |= CRYPTO_ALG_INTERNAL;
+}
+
static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
@@ -480,9 +494,13 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
struct ahash_instance *inst;
struct shash_alg *salg;
struct crypto_alg *alg;
+ u32 type = 0;
+ u32 mask = 0;
int err;
- salg = shash_attr_alg(tb[1], 0, 0);
+ mcryptd_check_internal(tb, &type, &mask);
+
+ salg = shash_attr_alg(tb[1], type, mask);
if (IS_ERR(salg))
return PTR_ERR(salg);
@@ -502,7 +520,10 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ type = CRYPTO_ALG_ASYNC;
+ if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
+ type |= CRYPTO_ALG_INTERNAL;
+ inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
diff --git a/crypto/proc.c b/crypto/proc.c
index 4a0a7aad2204..4ffe73b51612 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -89,6 +89,9 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "selftest : %s\n",
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
"passed" : "unknown");
+ seq_printf(m, "internal : %s\n",
+ (alg->cra_flags & CRYPTO_ALG_INTERNAL) ?
+ "yes" : "no");
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
seq_printf(m, "type : larval\n");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index a3e50c37eb6f..39e3acc438d9 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -23,111 +23,49 @@
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
+#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
-static int sha1_init(struct shash_desc *desc)
+static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
+ int blocks)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
+ u32 temp[SHA_WORKSPACE_WORDS];
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
-
- return 0;
+ while (blocks--) {
+ sha_transform(sst->state, src, temp);
+ src += SHA1_BLOCK_SIZE;
+ }
+ memzero_explicit(temp, sizeof(temp));
}
int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
-
- partial = sctx->count % SHA1_BLOCK_SIZE;
- sctx->count += len;
- done = 0;
- src = data;
-
- if ((partial + len) >= SHA1_BLOCK_SIZE) {
- u32 temp[SHA_WORKSPACE_WORDS];
-
- if (partial) {
- done = -partial;
- memcpy(sctx->buffer + partial, data,
- done + SHA1_BLOCK_SIZE);
- src = sctx->buffer;
- }
-
- do {
- sha_transform(sctx->state, src, temp);
- done += SHA1_BLOCK_SIZE;
- src = data + done;
- } while (done + SHA1_BLOCK_SIZE <= len);
-
- memzero_explicit(temp, sizeof(temp));
- partial = 0;
- }
- memcpy(sctx->buffer + partial, src, len - done);
-
- return 0;
+ return sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
}
EXPORT_SYMBOL(crypto_sha1_update);
-
-/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- u32 i, index, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64 */
- index = sctx->count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- crypto_sha1_update(desc, padding, padlen);
-
- /* Append length */
- crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < 5; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof *sctx);
-
- return 0;
+ sha1_base_do_finalize(desc, sha1_generic_block_fn);
+ return sha1_base_finish(desc, out);
}
-static int sha1_export(struct shash_desc *desc, void *out)
+int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int sha1_import(struct shash_desc *desc, const void *in)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
+ sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
+ return sha1_final(desc, out);
}
+EXPORT_SYMBOL(crypto_sha1_finup);
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_init,
+ .init = sha1_base_init,
.update = crypto_sha1_update,
.final = sha1_final,
- .export = sha1_export,
- .import = sha1_import,
+ .finup = crypto_sha1_finup,
.descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index b001ff5c2efc..78431163ed3c 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
+#include <crypto/sha256_base.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -214,138 +215,43 @@ static void sha256_transform(u32 *state, const u8 *input)
memzero_explicit(W, 64 * sizeof(u32));
}
-static int sha224_init(struct shash_desc *desc)
+static void sha256_generic_block_fn(struct sha256_state *sst, u8 const *src,
+ int blocks)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- sctx->state[0] = SHA224_H0;
- sctx->state[1] = SHA224_H1;
- sctx->state[2] = SHA224_H2;
- sctx->state[3] = SHA224_H3;
- sctx->state[4] = SHA224_H4;
- sctx->state[5] = SHA224_H5;
- sctx->state[6] = SHA224_H6;
- sctx->state[7] = SHA224_H7;
- sctx->count = 0;
-
- return 0;
-}
-
-static int sha256_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- sctx->state[0] = SHA256_H0;
- sctx->state[1] = SHA256_H1;
- sctx->state[2] = SHA256_H2;
- sctx->state[3] = SHA256_H3;
- sctx->state[4] = SHA256_H4;
- sctx->state[5] = SHA256_H5;
- sctx->state[6] = SHA256_H6;
- sctx->state[7] = SHA256_H7;
- sctx->count = 0;
-
- return 0;
+ while (blocks--) {
+ sha256_transform(sst->state, src);
+ src += SHA256_BLOCK_SIZE;
+ }
}
int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
-
- if ((partial + len) > 63) {
- if (partial) {
- done = -partial;
- memcpy(sctx->buf + partial, data, done + 64);
- src = sctx->buf;
- }
-
- do {
- sha256_transform(sctx->state, src);
- done += 64;
- src = data + done;
- } while (done + 63 < len);
-
- partial = 0;
- }
- memcpy(sctx->buf + partial, src, len - done);
-
- return 0;
+ return sha256_base_do_update(desc, data, len, sha256_generic_block_fn);
}
EXPORT_SYMBOL(crypto_sha256_update);
static int sha256_final(struct shash_desc *desc, u8 *out)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- unsigned int index, pad_len;
- int i;
- static const u8 padding[64] = { 0x80, };
-
- /* Save number of bits */
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64. */
- index = sctx->count & 0x3f;
- pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
- crypto_sha256_update(desc, padding, pad_len);
-
- /* Append length (before padding) */
- crypto_sha256_update(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be32(sctx->state[i]);
-
- /* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(*sctx));
-
- return 0;
+ sha256_base_do_finalize(desc, sha256_generic_block_fn);
+ return sha256_base_finish(desc, out);
}
-static int sha224_final(struct shash_desc *desc, u8 *hash)
+int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash)
{
- u8 D[SHA256_DIGEST_SIZE];
-
- sha256_final(desc, D);
-
- memcpy(hash, D, SHA224_DIGEST_SIZE);
- memzero_explicit(D, SHA256_DIGEST_SIZE);
-
- return 0;
-}
-
-static int sha256_export(struct shash_desc *desc, void *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int sha256_import(struct shash_desc *desc, const void *in)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
+ sha256_base_do_update(desc, data, len, sha256_generic_block_fn);
+ return sha256_final(desc, hash);
}
+EXPORT_SYMBOL(crypto_sha256_finup);
static struct shash_alg sha256_algs[2] = { {
.digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_init,
+ .init = sha256_base_init,
.update = crypto_sha256_update,
.final = sha256_final,
- .export = sha256_export,
- .import = sha256_import,
+ .finup = crypto_sha256_finup,
.descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
@@ -355,9 +261,10 @@ static struct shash_alg sha256_algs[2] = { {
}
}, {
.digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_init,
+ .init = sha224_base_init,
.update = crypto_sha256_update,
- .final = sha224_final,
+ .final = sha256_final,
+ .finup = crypto_sha256_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 1c3c3767e079..eba965d18bfc 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -18,6 +18,7 @@
#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/sha.h>
+#include <crypto/sha512_base.h>
#include <linux/percpu.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -130,125 +131,42 @@ sha512_transform(u64 *state, const u8 *input)
a = b = c = d = e = f = g = h = t1 = t2 = 0;
}
-static int
-sha512_init(struct shash_desc *desc)
+static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
+ int blocks)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- sctx->state[0] = SHA512_H0;
- sctx->state[1] = SHA512_H1;
- sctx->state[2] = SHA512_H2;
- sctx->state[3] = SHA512_H3;
- sctx->state[4] = SHA512_H4;
- sctx->state[5] = SHA512_H5;
- sctx->state[6] = SHA512_H6;
- sctx->state[7] = SHA512_H7;
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
-}
-
-static int
-sha384_init(struct shash_desc *desc)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- sctx->state[0] = SHA384_H0;
- sctx->state[1] = SHA384_H1;
- sctx->state[2] = SHA384_H2;
- sctx->state[3] = SHA384_H3;
- sctx->state[4] = SHA384_H4;
- sctx->state[5] = SHA384_H5;
- sctx->state[6] = SHA384_H6;
- sctx->state[7] = SHA384_H7;
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
+ while (blocks--) {
+ sha512_transform(sst->state, src);
+ src += SHA512_BLOCK_SIZE;
+ }
}
int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- unsigned int i, index, part_len;
-
- /* Compute number of bytes mod 128 */
- index = sctx->count[0] & 0x7f;
-
- /* Update number of bytes */
- if ((sctx->count[0] += len) < len)
- sctx->count[1]++;
-
- part_len = 128 - index;
-
- /* Transform as many times as possible. */
- if (len >= part_len) {
- memcpy(&sctx->buf[index], data, part_len);
- sha512_transform(sctx->state, sctx->buf);
-
- for (i = part_len; i + 127 < len; i+=128)
- sha512_transform(sctx->state, &data[i]);
-
- index = 0;
- } else {
- i = 0;
- }
-
- /* Buffer remaining input */
- memcpy(&sctx->buf[index], &data[i], len - i);
-
- return 0;
+ return sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
}
EXPORT_SYMBOL(crypto_sha512_update);
-static int
-sha512_final(struct shash_desc *desc, u8 *hash)
+static int sha512_final(struct shash_desc *desc, u8 *hash)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- static u8 padding[128] = { 0x80, };
- __be64 *dst = (__be64 *)hash;
- __be64 bits[2];
- unsigned int index, pad_len;
- int i;
-
- /* Save number of bits */
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
-
- /* Pad out to 112 mod 128. */
- index = sctx->count[0] & 0x7f;
- pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
- crypto_sha512_update(desc, padding, pad_len);
-
- /* Append length (before padding) */
- crypto_sha512_update(desc, (const u8 *)bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be64(sctx->state[i]);
-
- /* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(struct sha512_state));
-
- return 0;
+ sha512_base_do_finalize(desc, sha512_generic_block_fn);
+ return sha512_base_finish(desc, hash);
}
-static int sha384_final(struct shash_desc *desc, u8 *hash)
+int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash)
{
- u8 D[64];
-
- sha512_final(desc, D);
-
- memcpy(hash, D, 48);
- memzero_explicit(D, 64);
-
- return 0;
+ sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
+ return sha512_final(desc, hash);
}
+EXPORT_SYMBOL(crypto_sha512_finup);
static struct shash_alg sha512_algs[2] = { {
.digestsize = SHA512_DIGEST_SIZE,
- .init = sha512_init,
+ .init = sha512_base_init,
.update = crypto_sha512_update,
.final = sha512_final,
+ .finup = crypto_sha512_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
@@ -259,9 +177,10 @@ static struct shash_alg sha512_algs[2] = { {
}
}, {
.digestsize = SHA384_DIGEST_SIZE,
- .init = sha384_init,
+ .init = sha384_base_init,
.update = crypto_sha512_update,
- .final = sha384_final,
+ .final = sha512_final,
+ .finup = crypto_sha512_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 4b9e23fa4204..1a2800107fc8 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1155,9 +1155,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
goto out_free_req;
}
- sg_init_table(sg, TVMEMSIZE);
-
k = *keysize + *b_size;
+ sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));
+
if (k > PAGE_SIZE) {
sg_set_buf(sg, tvmem[0] + *keysize,
PAGE_SIZE - *keysize);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index f4ed6d4205e7..f9bce3d7ee7f 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1474,11 +1474,11 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
for (j = 0; j < template[i].loops; j++) {
err = crypto_rng_get_bytes(tfm, result,
template[i].rlen);
- if (err != template[i].rlen) {
+ if (err < 0) {
printk(KERN_ERR "alg: cprng: Failed to obtain "
"the correct amount of random data for "
- "%s (requested %d, got %d)\n", algo,
- template[i].rlen, err);
+ "%s (requested %d)\n", algo,
+ template[i].rlen);
goto out;
}
}
@@ -1505,7 +1505,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
struct crypto_aead *tfm;
int err = 0;
- tfm = crypto_alloc_aead(driver, type, mask);
+ tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
@@ -1534,7 +1534,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
struct crypto_cipher *tfm;
int err = 0;
- tfm = crypto_alloc_cipher(driver, type, mask);
+ tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1563,7 +1563,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
struct crypto_ablkcipher *tfm;
int err = 0;
- tfm = crypto_alloc_ablkcipher(driver, type, mask);
+ tfm = crypto_alloc_ablkcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: skcipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1636,7 +1636,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
struct crypto_ahash *tfm;
int err;
- tfm = crypto_alloc_ahash(driver, type, mask);
+ tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
@@ -1664,7 +1664,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
if (err)
goto out;
- tfm = crypto_alloc_shash(driver, type, mask);
+ tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
@@ -1706,7 +1706,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
struct crypto_rng *rng;
int err;
- rng = crypto_alloc_rng(driver, type, mask);
+ rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
if (IS_ERR(rng)) {
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
@@ -1733,7 +1733,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
if (!buf)
return -ENOMEM;
- drng = crypto_alloc_rng(driver, type, mask);
+ drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
if (IS_ERR(drng)) {
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
"%s\n", driver);
@@ -1759,7 +1759,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
ret = crypto_drbg_get_bytes_addtl(drng,
buf, test->expectedlen, &addtl);
}
- if (ret <= 0) {
+ if (ret < 0) {
printk(KERN_ERR "alg: drbg: could not obtain random data for "
"driver %s\n", driver);
goto outbuf;
@@ -1774,7 +1774,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
ret = crypto_drbg_get_bytes_addtl(drng,
buf, test->expectedlen, &addtl);
}
- if (ret <= 0) {
+ if (ret < 0) {
printk(KERN_ERR "alg: drbg: could not obtain random data for "
"driver %s\n", driver);
goto outbuf;
diff --git a/drivers/Makefile b/drivers/Makefile
index 527a6da8d539..46d2554be404 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -163,5 +163,5 @@ obj-$(CONFIG_POWERCAP) += powercap/
obj-$(CONFIG_MCB) += mcb/
obj-$(CONFIG_RAS) += ras/
obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
-obj-$(CONFIG_CORESIGHT) += coresight/
+obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
obj-$(CONFIG_ANDROID) += android/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index e6c3ddd92665..ab2cbb51c6aa 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -5,7 +5,7 @@
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
depends on !IA64_HP_SIM
- depends on IA64 || X86
+ depends on IA64 || X86 || (ARM64 && EXPERT)
depends on PCI
select PNP
default y
@@ -48,9 +48,16 @@ config ACPI_LEGACY_TABLES_LOOKUP
config ARCH_MIGHT_HAVE_ACPI_PDC
bool
+config ACPI_GENERIC_GSI
+ bool
+
+config ACPI_SYSTEM_POWER_STATES_SUPPORT
+ bool
+
config ACPI_SLEEP
bool
depends on SUSPEND || HIBERNATION
+ depends on ACPI_SYSTEM_POWER_STATES_SUPPORT
default y
config ACPI_PROCFS_POWER
@@ -163,6 +170,7 @@ config ACPI_PROCESSOR
tristate "Processor"
select THERMAL
select CPU_IDLE
+ depends on X86 || IA64
default y
help
This driver installs ACPI as the idle handler for Linux and uses
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 623b117ad1a2..8a063e276530 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -23,7 +23,7 @@ acpi-y += nvs.o
# Power management related files
acpi-y += wakeup.o
-acpi-y += sleep.o
+acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o
acpi-y += device_pm.o
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
@@ -56,6 +56,7 @@ ifdef CONFIG_ACPI_VIDEO
acpi-y += video_detect.o
endif
acpi-y += acpi_lpat.o
+acpi-$(CONFIG_ACPI_GENERIC_GSI) += gsi.o
# These are (potentially) separate modules
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 1284138e42ab..4bf75597f732 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -102,7 +102,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
pdevinfo.id = -1;
pdevinfo.res = resources;
pdevinfo.num_res = count;
- pdevinfo.acpi_node.companion = adev;
+ pdevinfo.fwnode = acpi_fwnode_handle(adev);
pdevinfo.dma_mask = DMA_BIT_MASK(32);
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev))
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index b193f8425999..ff6d8adc9cda 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"PNPb006"},
/* cs423x-pnpbios */
{"CSC0100"},
+ {"CSC0103"},
+ {"CSC0110"},
{"CSC0000"},
{"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */
/* es18xx-pnpbios */
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1020b1b53a17..58f335ca2e75 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
- if (pr->phys_id == -1)
+ if (pr->phys_id == PHYS_CPUID_INVALID)
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -215,7 +215,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
- int phys_id, cpu_index, device_declaration = 0;
+ phys_cpuid_t phys_id;
+ int cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@@ -263,7 +264,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
- if (phys_id < 0)
+ if (phys_id == PHYS_CPUID_INVALID)
acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
pr->phys_id = phys_id;
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index d863016565b5..e9f0833e818d 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -64,15 +64,15 @@
/* Macros for signons and file headers */
#define ACPI_COMMON_SIGNON(utility_name) \
- "\n%s\n%s version %8.8X%s [%s]\n%s\n\n", \
+ "\n%s\n%s version %8.8X%s\n%s\n\n", \
ACPICA_NAME, \
- utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, __DATE__, \
+ utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, \
ACPICA_COPYRIGHT
#define ACPI_COMMON_HEADER(utility_name, prefix) \
- "%s%s\n%s%s version %8.8X%s [%s]\n%s%s\n%s\n", \
+ "%s%s\n%s%s version %8.8X%s\n%s%s\n%s\n", \
prefix, ACPICA_NAME, \
- prefix, utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, __DATE__, \
+ prefix, utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, \
prefix, ACPICA_COPYRIGHT, \
prefix
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index a165d25343e8..a0c478784314 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -306,6 +306,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_db_output_flags, ACPI_DB_CONSOLE_OUTPUT);
ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
@@ -321,9 +322,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_db_terminate_threads, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_tables);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_stats);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_ini_methods);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_ini_methods);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_region_support);
ACPI_GLOBAL(u8, acpi_gbl_db_output_to_file);
ACPI_GLOBAL(char *, acpi_gbl_db_buffer);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 7add32e5d8c5..87b27521fcac 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
/* Total number of aml opcodes defined */
-#define AML_NUM_OPCODES 0x81
+#define AML_NUM_OPCODES 0x82
/* Forward declarations */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index cf607fe69dbd..c240bdf824f2 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -63,23 +63,12 @@
#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
/*
- * printf() format helpers. These macros are workarounds for the difficulties
+ * printf() format helper. This macros is a workaround for the difficulties
* with emitting 64-bit integers and 64-bit pointers with the same code
* for both 32-bit and 64-bit hosts.
*/
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
-#if ACPI_MACHINE_WIDTH == 64
-#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
-#define ACPI_FORMAT_TO_UINT(i) ACPI_FORMAT_UINT64(i)
-#define ACPI_PRINTF_UINT "0x%8.8X%8.8X"
-
-#else
-#define ACPI_FORMAT_NATIVE_UINT(i) 0, (u32) (i)
-#define ACPI_FORMAT_TO_UINT(i) (u32) (i)
-#define ACPI_PRINTF_UINT "0x%8.8X"
-#endif
-
/*
* Macros for moving data around to/from buffers that are possibly unaligned.
* If the hardware supports the transfer of unaligned data, just do the store.
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index a5f17de45ac6..fd85ad05a24a 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -111,6 +111,7 @@
#define ARGP_DWORD_OP ARGP_LIST1 (ARGP_DWORDDATA)
#define ARGP_ELSE_OP ARGP_LIST2 (ARGP_PKGLENGTH, ARGP_TERMLIST)
#define ARGP_EVENT_OP ARGP_LIST1 (ARGP_NAME)
+#define ARGP_EXTERNAL_OP ARGP_LIST3 (ARGP_NAMESTRING, ARGP_BYTEDATA, ARGP_BYTEDATA)
#define ARGP_FATAL_OP ARGP_LIST3 (ARGP_BYTEDATA, ARGP_DWORDDATA, ARGP_TERMARG)
#define ARGP_FIELD_OP ARGP_LIST4 (ARGP_PKGLENGTH, ARGP_NAMESTRING, ARGP_BYTEDATA, ARGP_FIELDLIST)
#define ARGP_FIND_SET_LEFT_BIT_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
@@ -243,6 +244,7 @@
#define ARGI_DWORD_OP ARGI_INVALID_OPCODE
#define ARGI_ELSE_OP ARGI_INVALID_OPCODE
#define ARGI_EVENT_OP ARGI_INVALID_OPCODE
+#define ARGI_EXTERNAL_OP ARGI_LIST3 (ARGI_STRING, ARGI_INTEGER, ARGI_INTEGER)
#define ARGI_FATAL_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER)
#define ARGI_FIELD_OP ARGI_INVALID_OPCODE
#define ARGI_FIND_SET_LEFT_BIT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index efc4c7124ccc..6357efb01b93 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -299,11 +299,13 @@ acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
union aml_resource *aml);
/*
- * rsdump
+ * rsdump - Debugger support
*/
+#ifdef ACPI_DEBUGGER
void acpi_rs_dump_resource_list(struct acpi_resource *resource);
-void acpi_rs_dump_irq_list(u8 * route_table);
+void acpi_rs_dump_irq_list(u8 *route_table);
+#endif
/*
* Resource conversion tables
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index d14b547b7cd5..87c7860b3394 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -68,11 +68,6 @@
#define ACPI_WALK_METHOD 0x01
#define ACPI_WALK_METHOD_RESTART 0x02
-/* Flags for iASL compiler only */
-
-#define ACPI_WALK_CONST_REQUIRED 0x10
-#define ACPI_WALK_CONST_OPTIONAL 0x20
-
struct acpi_walk_state {
struct acpi_walk_state *next; /* Next walk_state in list */
u8 descriptor_type; /* To differentiate various internal objs */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 1c127a43017b..7e0b6f1bec9c 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -58,7 +58,9 @@ u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length);
/*
* tbdata - table data structure management
*/
-acpi_status acpi_tb_get_next_root_index(u32 *table_index);
+acpi_status
+acpi_tb_get_next_table_descriptor(u32 *table_index,
+ struct acpi_table_desc **table_desc);
void
acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
@@ -119,11 +121,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
u8 flags,
u8 reload, u8 override, u32 *table_index);
-acpi_status
-acpi_tb_store_table(acpi_physical_address address,
- struct acpi_table_header *table,
- u32 length, u8 flags, u32 *table_index);
-
void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc);
void acpi_tb_terminate(void);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index c2f03e8774ad..2b3c5bd222f1 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -502,6 +502,9 @@ const union acpi_predefined_info *acpi_ut_get_next_predefined_method(const union
const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name);
+void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes);
+
+#if (defined ACPI_ASL_COMPILER || defined ACPI_HELP_APP)
const union acpi_predefined_info *acpi_ut_match_resource_name(char *name);
void
@@ -509,9 +512,8 @@ acpi_ut_display_predefined_method(char *buffer,
const union acpi_predefined_info *this_name,
u8 multi_line);
-void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes);
-
u32 acpi_ut_get_resource_bit_width(char *buffer, u16 types);
+#endif
/*
* utstate - Generic state creation/cache routines
@@ -539,14 +541,6 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
u16 action,
union acpi_generic_state **state_list);
-#ifdef ACPI_FUTURE_USAGE
-acpi_status
-acpi_ut_create_pkg_state_and_push(void *internal_object,
- void *external_object,
- u16 index,
- union acpi_generic_state **state_list);
-#endif /* ACPI_FUTURE_USAGE */
-
union acpi_generic_state *acpi_ut_create_control_state(void);
void acpi_ut_delete_generic_state(union acpi_generic_state *state);
@@ -570,7 +564,9 @@ const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status
u8 acpi_ut_is_pci_root_bridge(char *id);
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
+#endif
acpi_status
acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
@@ -629,15 +625,19 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
*/
void acpi_ut_strupr(char *src_string);
+#ifdef ACPI_ASL_COMPILER
void acpi_ut_strlwr(char *src_string);
int acpi_ut_stricmp(char *string1, char *string2);
+#endif
acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
void acpi_ut_print_string(char *string, u16 max_length);
+#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
void ut_convert_backslashes(char *pathname);
+#endif
u8 acpi_ut_valid_acpi_name(char *name);
@@ -785,6 +785,8 @@ int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...);
/*
* utuuid -- UUID support functions
*/
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_HELP_APP)
void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer);
+#endif
#endif /* _ACUTILS_H */
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 3a95068fc119..be9fd009cb28 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -65,6 +65,7 @@
#define AML_PACKAGE_OP (u16) 0x12
#define AML_VAR_PACKAGE_OP (u16) 0x13 /* ACPI 2.0 */
#define AML_METHOD_OP (u16) 0x14
+#define AML_EXTERNAL_OP (u16) 0x15 /* ACPI 6.0 */
#define AML_DUAL_NAME_PREFIX (u16) 0x2e
#define AML_MULTI_NAME_PREFIX_OP (u16) 0x2f
#define AML_NAME_CHAR_SUBSEQ (u16) 0x30
@@ -206,7 +207,6 @@
#define AML_INT_RESERVEDFIELD_OP (u16) 0x0031
#define AML_INT_ACCESSFIELD_OP (u16) 0x0032
#define AML_INT_BYTELIST_OP (u16) 0x0033
-#define AML_INT_STATICSTRING_OP (u16) 0x0034
#define AML_INT_METHODCALL_OP (u16) 0x0035
#define AML_INT_RETURN_VALUE_OP (u16) 0x0036
#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 77244182ff02..ea0cc4e08f80 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
/* Now the address and length are valid for this opregion */
@@ -539,13 +539,12 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_NOT_EXIST);
}
- obj_desc->region.address =
- (acpi_physical_address) ACPI_TO_INTEGER(table);
+ obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
obj_desc->region.length = table->length;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
/* Now the address and length are valid for this opregion */
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index e5ff89bcb3f5..deeddd6d2f05 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -564,6 +564,17 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
acpi_operand_object,
acpi_gbl_root_node);
status = AE_OK;
+ } else if (parent_op->common.aml_opcode ==
+ AML_EXTERNAL_OP) {
+
+ /* TBD: May only be temporary */
+
+ obj_desc =
+ acpi_ut_create_string_object((acpi_size) name_length);
+
+ ACPI_STRNCPY(obj_desc->string.pointer,
+ name_string, name_length);
+ status = AE_OK;
} else {
/*
* We just plain didn't find it -- which is a
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 5ed064e8673c..ccf793247447 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -92,6 +92,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
ACPI_SET_BIT(gpe_register_info->enable_for_run,
(u8)register_bit);
}
+ gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
return_ACPI_STATUS(AE_OK);
}
@@ -123,7 +124,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
/* Enable the requested GPE */
- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
@@ -202,7 +203,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
if (ACPI_SUCCESS(status)) {
status =
acpi_hw_low_set_gpe(gpe_event_info,
- ACPI_GPE_DISABLE_SAVE);
+ ACPI_GPE_DISABLE);
}
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 9abace3401f9..2ba28a63fb68 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -272,7 +272,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
&region_obj->region.handler->address_space, handler,
- ACPI_FORMAT_NATIVE_UINT(address),
+ ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(region_obj->region.
space_id)));
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index df06a23c4197..faad911d46b5 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -356,7 +356,8 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
}
if (in_byte) {
- local_event_status |= ACPI_EVENT_FLAG_ENABLED;
+ local_event_status |=
+ (ACPI_EVENT_FLAG_ENABLED | ACPI_EVENT_FLAG_ENABLE_SET);
}
/* Fixed event currently active? */
@@ -369,7 +370,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
}
if (in_byte) {
- local_event_status |= ACPI_EVENT_FLAG_SET;
+ local_event_status |= ACPI_EVENT_FLAG_STATUS_SET;
}
(*event_status) = local_event_status;
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 7c213b6b6472..1da52bef632e 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -767,8 +767,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
acpi_os_printf("\n");
} else {
acpi_os_printf(" base %8.8X%8.8X Length %X\n",
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
- address),
+ ACPI_FORMAT_UINT64(obj_desc->region.
+ address),
obj_desc->region.length);
}
break;
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 49479927e7f7..725a3746a2df 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -263,17 +263,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
- " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
+ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id,
obj_desc->common_field.access_byte_width,
obj_desc->common_field.base_byte_offset,
- field_datum_byte_offset, ACPI_CAST_PTR(void,
- (rgn_desc->
- region.
- address +
- region_offset))));
+ field_datum_byte_offset,
+ ACPI_FORMAT_UINT64(rgn_desc->region.address +
+ region_offset)));
/* Invoke the appropriate address_space/op_region handler */
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index b813fed95e56..1c64a988cbee 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -114,7 +114,18 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
/* Might return while OS is shutting down, just continue */
ACPI_FREE(fatal);
- break;
+ goto cleanup;
+
+ case AML_EXTERNAL_OP:
+ /*
+ * If the interpreter sees this opcode, just ignore it. The External
+ * op is intended for use by disassemblers in order to properly
+ * disassemble control method invocations. The opcode or group of
+ * opcodes should be surrounded by an "if (0)" clause to ensure that
+ * AML interpreters never see the opcode.
+ */
+ status = AE_OK;
+ goto cleanup;
default:
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 0fe188e238ef..f6c2f5499935 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -165,8 +165,8 @@ acpi_ex_system_memory_space_handler(u32 function,
* one page, which is similar to the original code that used a 4k
* maximum window.
*/
- page_boundary_map_length =
- ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address;
+ page_boundary_map_length = (acpi_size)
+ (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
if (page_boundary_map_length == 0) {
page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
}
@@ -177,12 +177,13 @@ acpi_ex_system_memory_space_handler(u32 function,
/* Create a new mapping starting at the address given */
- mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length);
+ mem_info->mapped_logical_address =
+ acpi_os_map_memory(address, map_length);
if (!mem_info->mapped_logical_address) {
ACPI_ERROR((AE_INFO,
"Could not map memory at 0x%8.8X%8.8X, size %u",
- ACPI_FORMAT_NATIVE_UINT(address),
- (u32) map_length));
+ ACPI_FORMAT_UINT64(address),
+ (u32)map_length));
mem_info->mapped_length = 0;
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -202,8 +203,7 @@ acpi_ex_system_memory_space_handler(u32 function,
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
- bit_width, function,
- ACPI_FORMAT_NATIVE_UINT(address)));
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/*
* Perform the memory read or write
@@ -318,8 +318,7 @@ acpi_ex_system_io_space_handler(u32 function,
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
- bit_width, function,
- ACPI_FORMAT_NATIVE_UINT(address)));
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/* Decode the function parameter */
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 84bc550f4f1d..73cfa5947ff3 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -89,6 +89,8 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
* RETURN: Status
*
* DESCRIPTION: Enable or disable a single GPE in the parent enable register.
+ * The enable_mask field of the involved GPE register must be
+ * updated by the caller if necessary.
*
******************************************************************************/
@@ -119,7 +121,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Set or clear just the bit that corresponds to this GPE */
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
- switch (action & ~ACPI_GPE_SAVE_MASK) {
+ switch (action) {
case ACPI_GPE_CONDITIONAL_ENABLE:
/* Only enable if the corresponding enable_mask bit is set */
@@ -149,9 +151,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Write the updated enable mask */
status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
- if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
- gpe_register_info->enable_mask = (u8)enable_mask;
- }
return (status);
}
@@ -250,6 +249,17 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED;
}
+ /* GPE currently enabled (enable bit == 1)? */
+
+ status = acpi_hw_read(&in_byte, &gpe_register_info->enable_address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ if (register_bit & in_byte) {
+ local_event_status |= ACPI_EVENT_FLAG_ENABLE_SET;
+ }
+
/* GPE currently active (status bit == 1)? */
status = acpi_hw_read(&in_byte, &gpe_register_info->status_address);
@@ -258,7 +268,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
}
if (register_bit & in_byte) {
- local_event_status |= ACPI_EVENT_FLAG_SET;
+ local_event_status |= ACPI_EVENT_FLAG_STATUS_SET;
}
/* Set return value */
@@ -286,10 +296,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask,
{
acpi_status status;
+ gpe_register_info->enable_mask = enable_mask;
status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
- if (ACPI_SUCCESS(status)) {
- gpe_register_info->enable_mask = enable_mask;
- }
return (status);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 2bd33fe56cb3..29033d71417b 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
byte_width = ACPI_DIV_8(bit_width);
last_address = address + byte_width - 1;
- ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
- ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
- last_address),
- byte_width));
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
+ ACPI_FORMAT_UINT64(address),
+ ACPI_FORMAT_UINT64(last_address), byte_width));
/* Maximum 16-bit address in I/O space */
if (last_address > ACPI_UINT16_MAX) {
ACPI_ERROR((AE_INFO,
- "Illegal I/O port address/length above 64K: %p/0x%X",
- ACPI_CAST_PTR(void, address), byte_width));
+ "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
+ ACPI_FORMAT_UINT64(address), byte_width));
return_ACPI_STATUS(AE_LIMIT);
}
@@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
- ACPI_CAST_PTR(void, address),
+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+ ACPI_FORMAT_UINT64(address),
byte_width, port_info->name,
port_info->start,
port_info->end));
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 80f097eb7381..d259393505fa 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -271,12 +271,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
switch (type) {
case ACPI_TYPE_PROCESSOR:
- acpi_os_printf("ID %02X Len %02X Addr %p\n",
+ acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
obj_desc->processor.proc_id,
obj_desc->processor.length,
- ACPI_CAST_PTR(void,
- obj_desc->processor.
- address));
+ ACPI_FORMAT_UINT64(obj_desc->processor.
+ address));
break;
case ACPI_TYPE_DEVICE:
@@ -347,8 +346,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
space_id));
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
- ACPI_FORMAT_NATIVE_UINT
- (obj_desc->region.address),
+ ACPI_FORMAT_UINT64(obj_desc->
+ region.
+ address),
obj_desc->region.length);
} else {
acpi_os_printf
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1af4a405e351..ed90fddf2487 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -646,7 +646,13 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
- AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+
+/* ACPI 6.0 opcodes */
+
+ /* 81 */ ACPI_OP("External", ARGP_EXTERNAL_OP, ARGI_EXTERNAL_OP,
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE, /* ? */
+ AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R)
/*! [End] no source code translation !*/
};
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index e18e7c47f482..20e1a35169fc 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -210,7 +210,7 @@ const u8 acpi_gbl_short_op_index[256] = {
/* 8 9 A B C D E F */
/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
-/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
+/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, 0x81, _UNK, _UNK,
/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 1539394c8c52..c428bb33204e 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -1,6 +1,6 @@
/*******************************************************************************
*
- * Module Name: rsdump - Functions to display the resource structures.
+ * Module Name: rsdump - AML debugger support for resource structures.
*
******************************************************************************/
@@ -48,7 +48,10 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsdump")
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
+/*
+ * All functions in this module are used by the AML Debugger only
+ */
+#if defined(ACPI_DEBUGGER)
/* Local prototypes */
static void acpi_rs_out_string(char *title, char *value);
@@ -80,6 +83,116 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
/*******************************************************************************
*
+ * FUNCTION: acpi_rs_dump_resource_list
+ *
+ * PARAMETERS: resource_list - Pointer to a resource descriptor list
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dispatches the structure to the correct dump routine.
+ *
+ ******************************************************************************/
+
+void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
+{
+ u32 count = 0;
+ u32 type;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
+ return;
+ }
+
+ /* Walk list and dump all resource descriptors (END_TAG terminates) */
+
+ do {
+ acpi_os_printf("\n[%02X] ", count);
+ count++;
+
+ /* Validate Type before dispatch */
+
+ type = resource_list->type;
+ if (type > ACPI_RESOURCE_TYPE_MAX) {
+ acpi_os_printf
+ ("Invalid descriptor type (%X) in resource list\n",
+ resource_list->type);
+ return;
+ }
+
+ /* Sanity check the length. It must not be zero, or we loop forever */
+
+ if (!resource_list->length) {
+ acpi_os_printf
+ ("Invalid zero length descriptor in resource list\n");
+ return;
+ }
+
+ /* Dump the resource descriptor */
+
+ if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_serial_bus_dispatch
+ [resource_list->data.
+ common_serial_bus.type]);
+ } else {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_resource_dispatch
+ [type]);
+ }
+
+ /* Point to the next resource structure */
+
+ resource_list = ACPI_NEXT_RESOURCE(resource_list);
+
+ /* Exit when END_TAG descriptor is reached */
+
+ } while (type != ACPI_RESOURCE_TYPE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_dump_irq_list
+ *
+ * PARAMETERS: route_table - Pointer to the routing table to dump.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print IRQ routing table
+ *
+ ******************************************************************************/
+
+void acpi_rs_dump_irq_list(u8 *route_table)
+{
+ struct acpi_pci_routing_table *prt_element;
+ u8 count;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
+ return;
+ }
+
+ prt_element = ACPI_CAST_PTR(struct acpi_pci_routing_table, route_table);
+
+ /* Dump all table elements, Exit on zero length element */
+
+ for (count = 0; prt_element->length; count++) {
+ acpi_os_printf("\n[%02X] PCI IRQ Routing Table Package\n",
+ count);
+ acpi_rs_dump_descriptor(prt_element, acpi_rs_dump_prt);
+
+ prt_element = ACPI_ADD_PTR(struct acpi_pci_routing_table,
+ prt_element, prt_element->length);
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_rs_dump_descriptor
*
* PARAMETERS: resource - Buffer containing the resource
@@ -357,116 +470,6 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource)
/*******************************************************************************
*
- * FUNCTION: acpi_rs_dump_resource_list
- *
- * PARAMETERS: resource_list - Pointer to a resource descriptor list
- *
- * RETURN: None
- *
- * DESCRIPTION: Dispatches the structure to the correct dump routine.
- *
- ******************************************************************************/
-
-void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
-{
- u32 count = 0;
- u32 type;
-
- ACPI_FUNCTION_ENTRY();
-
- /* Check if debug output enabled */
-
- if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
- return;
- }
-
- /* Walk list and dump all resource descriptors (END_TAG terminates) */
-
- do {
- acpi_os_printf("\n[%02X] ", count);
- count++;
-
- /* Validate Type before dispatch */
-
- type = resource_list->type;
- if (type > ACPI_RESOURCE_TYPE_MAX) {
- acpi_os_printf
- ("Invalid descriptor type (%X) in resource list\n",
- resource_list->type);
- return;
- }
-
- /* Sanity check the length. It must not be zero, or we loop forever */
-
- if (!resource_list->length) {
- acpi_os_printf
- ("Invalid zero length descriptor in resource list\n");
- return;
- }
-
- /* Dump the resource descriptor */
-
- if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- acpi_rs_dump_descriptor(&resource_list->data,
- acpi_gbl_dump_serial_bus_dispatch
- [resource_list->data.
- common_serial_bus.type]);
- } else {
- acpi_rs_dump_descriptor(&resource_list->data,
- acpi_gbl_dump_resource_dispatch
- [type]);
- }
-
- /* Point to the next resource structure */
-
- resource_list = ACPI_NEXT_RESOURCE(resource_list);
-
- /* Exit when END_TAG descriptor is reached */
-
- } while (type != ACPI_RESOURCE_TYPE_END_TAG);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_rs_dump_irq_list
- *
- * PARAMETERS: route_table - Pointer to the routing table to dump.
- *
- * RETURN: None
- *
- * DESCRIPTION: Print IRQ routing table
- *
- ******************************************************************************/
-
-void acpi_rs_dump_irq_list(u8 * route_table)
-{
- struct acpi_pci_routing_table *prt_element;
- u8 count;
-
- ACPI_FUNCTION_ENTRY();
-
- /* Check if debug output enabled */
-
- if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
- return;
- }
-
- prt_element = ACPI_CAST_PTR(struct acpi_pci_routing_table, route_table);
-
- /* Dump all table elements, Exit on zero length element */
-
- for (count = 0; prt_element->length; count++) {
- acpi_os_printf("\n[%02X] PCI IRQ Routing Table Package\n",
- count);
- acpi_rs_dump_descriptor(prt_element, acpi_rs_dump_prt);
-
- prt_element = ACPI_ADD_PTR(struct acpi_pci_routing_table,
- prt_element, prt_element->length);
- }
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_rs_out*
*
* PARAMETERS: title - Name of the resource field
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 6a144957aadd..d7f8386455bd 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -113,9 +113,9 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc,
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
- table =
- ACPI_CAST_PTR(struct acpi_table_header,
- table_desc->address);
+ table = ACPI_CAST_PTR(struct acpi_table_header,
+ ACPI_PHYSADDR_TO_PTR(table_desc->
+ address));
break;
default:
@@ -214,7 +214,8 @@ acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
- table_header = ACPI_CAST_PTR(struct acpi_table_header, address);
+ table_header = ACPI_CAST_PTR(struct acpi_table_header,
+ ACPI_PHYSADDR_TO_PTR(address));
if (!table_header) {
return (AE_NO_MEMORY);
}
@@ -398,14 +399,14 @@ acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
table_desc->length);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
- "%4.4s " ACPI_PRINTF_UINT
+ "%4.4s 0x%8.8X%8.8X"
" Attempted table install failed",
acpi_ut_valid_acpi_name(table_desc->
signature.
ascii) ?
table_desc->signature.ascii : "????",
- ACPI_FORMAT_TO_UINT(table_desc->
- address)));
+ ACPI_FORMAT_UINT64(table_desc->
+ address)));
goto invalidate_and_exit;
}
}
@@ -483,19 +484,23 @@ acpi_status acpi_tb_resize_root_table_list(void)
/*******************************************************************************
*
- * FUNCTION: acpi_tb_get_next_root_index
+ * FUNCTION: acpi_tb_get_next_table_descriptor
*
* PARAMETERS: table_index - Where table index is returned
+ * table_desc - Where table descriptor is returned
*
- * RETURN: Status and table index.
+ * RETURN: Status and table index/descriptor.
*
* DESCRIPTION: Allocate a new ACPI table entry to the global table list
*
******************************************************************************/
-acpi_status acpi_tb_get_next_root_index(u32 *table_index)
+acpi_status
+acpi_tb_get_next_table_descriptor(u32 *table_index,
+ struct acpi_table_desc **table_desc)
{
acpi_status status;
+ u32 i;
/* Ensure that there is room for the table in the Root Table List */
@@ -507,8 +512,16 @@ acpi_status acpi_tb_get_next_root_index(u32 *table_index)
}
}
- *table_index = acpi_gbl_root_table_list.current_table_count;
+ i = acpi_gbl_root_table_list.current_table_count;
acpi_gbl_root_table_list.current_table_count++;
+
+ if (table_index) {
+ *table_index = i;
+ }
+ if (table_desc) {
+ *table_desc = &acpi_gbl_root_table_list.tables[i];
+ }
+
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 9bad45e63a45..008a251780f4 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -187,8 +187,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
status = acpi_tb_acquire_temp_table(&new_table_desc, address,
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
- ACPI_CAST_PTR(void, address)));
+ ACPI_ERROR((AE_INFO,
+ "Could not acquire table length at %8.8X%8.8X",
+ ACPI_FORMAT_UINT64(address)));
return_ACPI_STATUS(status);
}
@@ -246,8 +247,9 @@ acpi_tb_install_standard_table(acpi_physical_address address,
status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not acquire table length at %p",
- ACPI_CAST_PTR(void, address)));
+ ACPI_ERROR((AE_INFO,
+ "Could not acquire table length at %8.8X%8.8X",
+ ACPI_FORMAT_UINT64(address)));
return_ACPI_STATUS(status);
}
@@ -258,9 +260,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
if (!reload &&
acpi_gbl_disable_ssdt_table_install &&
ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
- ACPI_INFO((AE_INFO, "Ignoring installation of %4.4s at %p",
- new_table_desc.signature.ascii, ACPI_CAST_PTR(void,
- address)));
+ ACPI_INFO((AE_INFO,
+ "Ignoring installation of %4.4s at %8.8X%8.8X",
+ new_table_desc.signature.ascii,
+ ACPI_FORMAT_UINT64(address)));
goto release_and_exit;
}
@@ -346,7 +349,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
*/
acpi_tb_uninstall_table(&new_table_desc);
*table_index = i;
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(AE_OK);
}
}
@@ -354,7 +356,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Add the table to the global root table list */
- status = acpi_tb_get_next_root_index(&i);
+ status = acpi_tb_get_next_table_descriptor(&i, NULL);
if (ACPI_FAILURE(status)) {
goto release_and_exit;
}
@@ -429,11 +431,11 @@ finish_override:
return;
}
- ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT
- " %s table override, new table: " ACPI_PRINTF_UINT,
+ ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X"
+ " %s table override, new table: 0x%8.8X%8.8X",
old_table_desc->signature.ascii,
- ACPI_FORMAT_TO_UINT(old_table_desc->address),
- override_type, ACPI_FORMAT_TO_UINT(new_table_desc.address)));
+ ACPI_FORMAT_UINT64(old_table_desc->address),
+ override_type, ACPI_FORMAT_UINT64(new_table_desc.address)));
/* We can now uninstall the original table */
@@ -455,43 +457,6 @@ finish_override:
/*******************************************************************************
*
- * FUNCTION: acpi_tb_store_table
- *
- * PARAMETERS: address - Table address
- * table - Table header
- * length - Table length
- * flags - Install flags
- * table_index - Where the table index is returned
- *
- * RETURN: Status and table index.
- *
- * DESCRIPTION: Add an ACPI table to the global table list
- *
- ******************************************************************************/
-
-acpi_status
-acpi_tb_store_table(acpi_physical_address address,
- struct acpi_table_header * table,
- u32 length, u8 flags, u32 *table_index)
-{
- acpi_status status;
- struct acpi_table_desc *table_desc;
-
- status = acpi_tb_get_next_root_index(table_index);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Initialize added table */
-
- table_desc = &acpi_gbl_root_table_list.tables[*table_index];
- acpi_tb_init_table_descriptor(table_desc, address, flags, table);
- table_desc->pointer = table;
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_tb_uninstall_table
*
* PARAMETERS: table_desc - Table descriptor
@@ -517,7 +482,7 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc)
if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) {
- ACPI_FREE(ACPI_CAST_PTR(void, table_desc->address));
+ ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address));
}
table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL);
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index ef16c06e5091..77ba5c71c6e7 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -127,18 +127,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
- /*
- * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to
- * support both 32-bit and 64-bit hosts/addresses in a consistent manner.
- * The %p specifier does not emit uniform output on all hosts. On some,
- * leading zeros are not supported.
- */
if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X",
- header->signature, ACPI_FORMAT_TO_UINT(address),
+ ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X",
+ header->signature, ACPI_FORMAT_UINT64(address),
header->length));
} else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
@@ -149,9 +143,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
header)->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO,
- "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)",
- ACPI_FORMAT_TO_UINT(address),
+ ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
+ ACPI_FORMAT_UINT64(address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
@@ -165,9 +158,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
ACPI_INFO((AE_INFO,
- "%-4.4s " ACPI_PRINTF_UINT
+ "%-4.4s 0x%8.8X%8.8X"
" %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
- local_header.signature, ACPI_FORMAT_TO_UINT(address),
+ local_header.signature, ACPI_FORMAT_UINT64(address),
local_header.length, local_header.revision,
local_header.oem_id, local_header.oem_table_id,
local_header.oem_revision,
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index eac52cf14f1a..fa76a3603aa1 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -142,7 +142,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
*
******************************************************************************/
-acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
+acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
{
u8 *table_ptr;
u8 *mem_rover;
@@ -200,7 +200,8 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
physical_address +=
(u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
- *table_address = physical_address;
+ *table_address =
+ (acpi_physical_address) physical_address;
return_ACPI_STATUS(AE_OK);
}
}
@@ -233,7 +234,7 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
(ACPI_HI_RSDP_WINDOW_BASE +
ACPI_PTR_DIFF(mem_rover, table_ptr));
- *table_address = physical_address;
+ *table_address = (acpi_physical_address) physical_address;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 1279f50da757..911ea8e7fe87 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id,
acpi_gbl_address_range_list[space_id] = range_info;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
+ "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
acpi_ut_get_node_name(range_info->region_node),
- ACPI_CAST_PTR(void, address),
- ACPI_CAST_PTR(void, range_info->end_address)));
+ ACPI_FORMAT_UINT64(address),
+ ACPI_FORMAT_UINT64(range_info->end_address)));
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
@@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
}
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
+ "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
acpi_ut_get_node_name(range_info->
region_node),
- ACPI_CAST_PTR(void,
- range_info->
- start_address),
- ACPI_CAST_PTR(void,
- range_info->
- end_address)));
+ ACPI_FORMAT_UINT64(range_info->
+ start_address),
+ ACPI_FORMAT_UINT64(range_info->
+ end_address)));
ACPI_FREE(range_info);
return_VOID;
@@ -245,16 +243,14 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
region_node);
ACPI_WARNING((AE_INFO,
- "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)",
+ "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)",
acpi_ut_get_region_name(space_id),
- ACPI_CAST_PTR(void, address),
- ACPI_CAST_PTR(void, end_address),
- ACPI_CAST_PTR(void,
- range_info->
- start_address),
- ACPI_CAST_PTR(void,
- range_info->
- end_address),
+ ACPI_FORMAT_UINT64(address),
+ ACPI_FORMAT_UINT64(end_address),
+ ACPI_FORMAT_UINT64(range_info->
+ start_address),
+ ACPI_FORMAT_UINT64(range_info->
+ end_address),
pathname));
ACPI_FREE(pathname);
}
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 242bd071f007..a8c39643e618 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -150,6 +150,14 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
return;
}
+ /*
+ * Add comment characters so rest of line is ignored when
+ * compiled
+ */
+ if (j == 0) {
+ acpi_os_printf("// ");
+ }
+
buf_char = buffer[(acpi_size) i + j];
if (ACPI_IS_PRINT(buf_char)) {
acpi_os_printf("%c", buf_char);
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 56bbacd576f2..cbb7034d28d8 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -75,6 +75,7 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
return (FALSE);
}
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
/*******************************************************************************
*
* FUNCTION: acpi_ut_is_aml_table
@@ -102,6 +103,7 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
return (FALSE);
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 574cd3118313..44035abdbf29 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -100,6 +100,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
{"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
+ {"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 82ca9142e10d..2be6bd4bdc09 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -357,11 +357,11 @@ int
acpi_ut_vsnprintf(char *string,
acpi_size size, const char *format, va_list args)
{
- u8 base = 10;
- u8 type = 0;
- s32 width = -1;
- s32 precision = -1;
- char qualifier = 0;
+ u8 base;
+ u8 type;
+ s32 width;
+ s32 precision;
+ char qualifier;
u64 number;
char *pos;
char *end;
@@ -380,6 +380,9 @@ acpi_ut_vsnprintf(char *string,
continue;
}
+ type = 0;
+ base = 10;
+
/* Process sign */
do {
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 8274cc16edc3..f201171c5dda 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -49,39 +49,6 @@ ACPI_MODULE_NAME("utstate")
/*******************************************************************************
*
- * FUNCTION: acpi_ut_create_pkg_state_and_push
- *
- * PARAMETERS: object - Object to be added to the new state
- * action - Increment/Decrement
- * state_list - List the state will be added to
- *
- * RETURN: Status
- *
- * DESCRIPTION: Create a new state and push it
- *
- ******************************************************************************/
-acpi_status
-acpi_ut_create_pkg_state_and_push(void *internal_object,
- void *external_object,
- u16 index,
- union acpi_generic_state **state_list)
-{
- union acpi_generic_state *state;
-
- ACPI_FUNCTION_ENTRY();
-
- state =
- acpi_ut_create_pkg_state(internal_object, external_object, index);
- if (!state) {
- return (AE_NO_MEMORY);
- }
-
- acpi_ut_push_generic_state(state_list, state);
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_push_generic_state
*
* PARAMETERS: list_head - Head of the state stack
@@ -92,7 +59,6 @@ acpi_ut_create_pkg_state_and_push(void *internal_object,
* DESCRIPTION: Push a state object onto a state stack
*
******************************************************************************/
-
void
acpi_ut_push_generic_state(union acpi_generic_state **list_head,
union acpi_generic_state *state)
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index c6149a212149..e6cab669bd9c 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -47,6 +47,7 @@
#define _COMPONENT ACPI_COMPILER
ACPI_MODULE_NAME("utuuid")
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_HELP_APP)
/*
* UUID support functions.
*
@@ -94,3 +95,4 @@ void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer)
1]);
}
}
+#endif
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 672263a3832c..63d43677f644 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -531,8 +531,8 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
(s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
- printk_once(KERN_WARNING FW_BUG "battery: (dis)charge rate"
- " invalid.\n");
+ printk_once(KERN_WARNING FW_BUG
+ "battery: (dis)charge rate invalid.\n");
}
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 9b693d54c743..1d1791935c31 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -215,6 +215,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{
.callback = dmi_disable_osi_vista,
+ .ident = "VGN-SR19XN",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR19XN"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
.ident = "Toshiba Satellite L355",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 8b67bd0f6bb5..c412fdb28d34 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -448,6 +448,9 @@ static int __init acpi_bus_init_irq(void)
case ACPI_IRQ_MODEL_IOSAPIC:
message = "IOSAPIC";
break;
+ case ACPI_IRQ_MODEL_GIC:
+ message = "GIC";
+ break;
case ACPI_IRQ_MODEL_PLATFORM:
message = "platform specific model";
break;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index d9339b442a4e..a688aa243f6c 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -615,7 +615,7 @@ void acpi_dock_add(struct acpi_device *adev)
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.name = "dock";
pdevinfo.id = dock_station_count;
- pdevinfo.acpi_node.companion = adev;
+ pdevinfo.fwnode = acpi_fwnode_handle(adev);
pdevinfo.data = &ds;
pdevinfo.size_data = sizeof(ds);
dd = platform_device_register_full(&pdevinfo);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a8dd2f763382..5e8fed448850 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -137,6 +137,50 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
/* --------------------------------------------------------------------------
+ * Logging/Debugging
+ * -------------------------------------------------------------------------- */
+
+/*
+ * Splitters used by the developers to track the boundary of the EC
+ * handling processes.
+ */
+#ifdef DEBUG
+#define EC_DBG_SEP " "
+#define EC_DBG_DRV "+++++"
+#define EC_DBG_STM "====="
+#define EC_DBG_REQ "*****"
+#define EC_DBG_EVT "#####"
+#else
+#define EC_DBG_SEP ""
+#define EC_DBG_DRV
+#define EC_DBG_STM
+#define EC_DBG_REQ
+#define EC_DBG_EVT
+#endif
+
+#define ec_log_raw(fmt, ...) \
+ pr_info(fmt "\n", ##__VA_ARGS__)
+#define ec_dbg_raw(fmt, ...) \
+ pr_debug(fmt "\n", ##__VA_ARGS__)
+#define ec_log(filter, fmt, ...) \
+ ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
+#define ec_dbg(filter, fmt, ...) \
+ ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
+
+#define ec_log_drv(fmt, ...) \
+ ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
+#define ec_dbg_drv(fmt, ...) \
+ ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
+#define ec_dbg_stm(fmt, ...) \
+ ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
+#define ec_dbg_req(fmt, ...) \
+ ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
+#define ec_dbg_evt(fmt, ...) \
+ ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
+#define ec_dbg_ref(ec, fmt, ...) \
+ ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
+
+/* --------------------------------------------------------------------------
* Device Flags
* -------------------------------------------------------------------------- */
@@ -159,14 +203,14 @@ static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
{
u8 x = inb(ec->command_addr);
- pr_debug("EC_SC(R) = 0x%2.2x "
- "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
- x,
- !!(x & ACPI_EC_FLAG_SCI),
- !!(x & ACPI_EC_FLAG_BURST),
- !!(x & ACPI_EC_FLAG_CMD),
- !!(x & ACPI_EC_FLAG_IBF),
- !!(x & ACPI_EC_FLAG_OBF));
+ ec_dbg_raw("EC_SC(R) = 0x%2.2x "
+ "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
+ x,
+ !!(x & ACPI_EC_FLAG_SCI),
+ !!(x & ACPI_EC_FLAG_BURST),
+ !!(x & ACPI_EC_FLAG_CMD),
+ !!(x & ACPI_EC_FLAG_IBF),
+ !!(x & ACPI_EC_FLAG_OBF));
return x;
}
@@ -175,20 +219,20 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
u8 x = inb(ec->data_addr);
ec->curr->timestamp = jiffies;
- pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
+ ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
return x;
}
static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
{
- pr_debug("EC_SC(W) = 0x%2.2x\n", command);
+ ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
outb(command, ec->command_addr);
ec->curr->timestamp = jiffies;
}
static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
{
- pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
+ ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
outb(data, ec->data_addr);
ec->curr->timestamp = jiffies;
}
@@ -240,7 +284,7 @@ static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
* software need to manually trigger a pseudo GPE event on
* EN=1 writes.
*/
- pr_debug("***** Polling quirk *****\n");
+ ec_dbg_raw("Polling quirk");
advance_transaction(ec);
}
}
@@ -299,7 +343,7 @@ static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
{
if (!test_bit(flag, &ec->flags)) {
acpi_ec_disable_gpe(ec, false);
- pr_debug("+++++ Polling enabled +++++\n");
+ ec_dbg_drv("Polling enabled");
set_bit(flag, &ec->flags);
}
}
@@ -309,7 +353,7 @@ static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
if (test_bit(flag, &ec->flags)) {
clear_bit(flag, &ec->flags);
acpi_ec_enable_gpe(ec, false);
- pr_debug("+++++ Polling disabled +++++\n");
+ ec_dbg_drv("Polling disabled");
}
}
@@ -335,7 +379,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
- pr_debug("***** Event started *****\n");
+ ec_dbg_req("Event started");
schedule_work(&ec->work);
}
}
@@ -344,7 +388,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
{
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
- pr_debug("***** Event stopped *****\n");
+ ec_dbg_req("Event stopped");
}
}
@@ -366,8 +410,8 @@ static void advance_transaction(struct acpi_ec *ec)
u8 status;
bool wakeup = false;
- pr_debug("===== %s (%d) =====\n",
- in_interrupt() ? "IRQ" : "TASK", smp_processor_id());
+ ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
+ smp_processor_id());
/*
* By always clearing STS before handling all indications, we can
* ensure a hardware STS 0->1 change after this clearing can always
@@ -390,8 +434,8 @@ static void advance_transaction(struct acpi_ec *ec)
if (t->rlen == t->ri) {
t->flags |= ACPI_EC_COMMAND_COMPLETE;
if (t->command == ACPI_EC_COMMAND_QUERY)
- pr_debug("***** Command(%s) hardware completion *****\n",
- acpi_ec_cmd_string(t->command));
+ ec_dbg_req("Command(%s) hardware completion",
+ acpi_ec_cmd_string(t->command));
wakeup = true;
}
} else
@@ -410,8 +454,8 @@ static void advance_transaction(struct acpi_ec *ec)
acpi_ec_complete_query(ec);
t->rdata[t->ri++] = 0x00;
t->flags |= ACPI_EC_COMMAND_COMPLETE;
- pr_debug("***** Command(%s) software completion *****\n",
- acpi_ec_cmd_string(t->command));
+ ec_dbg_req("Command(%s) software completion",
+ acpi_ec_cmd_string(t->command));
wakeup = true;
} else if ((status & ACPI_EC_FLAG_IBF) == 0) {
acpi_ec_write_cmd(ec, t->command);
@@ -502,21 +546,21 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ret = -EINVAL;
goto unlock;
}
+ ec_dbg_ref(ec, "Increase command");
/* following two actions should be kept atomic */
ec->curr = t;
- pr_debug("***** Command(%s) started *****\n",
- acpi_ec_cmd_string(t->command));
+ ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
start_transaction(ec);
spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
- pr_debug("***** Command(%s) stopped *****\n",
- acpi_ec_cmd_string(t->command));
+ ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
acpi_ec_complete_request(ec);
+ ec_dbg_ref(ec, "Decrease command");
unlock:
spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
@@ -676,11 +720,13 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
spin_lock_irqsave(&ec->lock, flags);
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
- pr_debug("+++++ Starting EC +++++\n");
+ ec_dbg_drv("Starting EC");
/* Enable GPE for event processing (SCI_EVT=1) */
- if (!resuming)
+ if (!resuming) {
acpi_ec_submit_request(ec);
- pr_debug("EC started\n");
+ ec_dbg_ref(ec, "Increase driver");
+ }
+ ec_log_drv("EC started");
}
spin_unlock_irqrestore(&ec->lock, flags);
}
@@ -702,17 +748,19 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec)) {
- pr_debug("+++++ Stopping EC +++++\n");
+ ec_dbg_drv("Stopping EC");
set_bit(EC_FLAGS_STOPPED, &ec->flags);
spin_unlock_irqrestore(&ec->lock, flags);
wait_event(ec->wait, acpi_ec_stopped(ec));
spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */
- if (!suspending)
+ if (!suspending) {
acpi_ec_complete_request(ec);
+ ec_dbg_ref(ec, "Decrease driver");
+ }
clear_bit(EC_FLAGS_STARTED, &ec->flags);
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
- pr_debug("EC stopped\n");
+ ec_log_drv("EC stopped");
}
spin_unlock_irqrestore(&ec->lock, flags);
}
@@ -813,7 +861,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
}
}
mutex_unlock(&ec->mutex);
- list_for_each_entry(handler, &free_list, node)
+ list_for_each_entry_safe(handler, tmp, &free_list, node)
acpi_ec_put_query_handler(handler);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
@@ -824,12 +872,12 @@ static void acpi_ec_run(void *cxt)
if (!handler)
return;
- pr_debug("##### Query(0x%02x) started #####\n", handler->query_bit);
+ ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
if (handler->func)
handler->func(handler->data);
else if (handler->handle)
acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
- pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit);
+ ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
acpi_ec_put_query_handler(handler);
}
@@ -861,8 +909,8 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
if (value == handler->query_bit) {
/* have custom handler for this bit */
handler = acpi_ec_get_query_handler(handler);
- pr_debug("##### Query(0x%02x) scheduled #####\n",
- handler->query_bit);
+ ec_dbg_evt("Query(0x%02x) scheduled",
+ handler->query_bit);
status = acpi_os_execute((handler->func) ?
OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
acpi_ec_run, handler);
@@ -1099,6 +1147,9 @@ static int acpi_ec_add(struct acpi_device *device)
ret = ec_install_handlers(ec);
+ /* Reprobe devices depending on the EC */
+ acpi_walk_dep_device_list(ec->handle);
+
/* EC is fully operational, allow queries */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index f774c65ecb8b..39c485b0c25c 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -168,7 +168,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
unsigned int node_id;
int retval = -EINVAL;
- if (ACPI_COMPANION(dev)) {
+ if (has_acpi_companion(dev)) {
if (acpi_dev) {
dev_warn(dev, "ACPI companion already set\n");
return -EINVAL;
@@ -220,7 +220,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
- if (!ACPI_COMPANION(dev))
+ if (!has_acpi_companion(dev))
ACPI_COMPANION_SET(dev, acpi_dev);
acpi_physnode_link_name(physical_node_name, node_id);
diff --git a/drivers/acpi/gsi.c b/drivers/acpi/gsi.c
new file mode 100644
index 000000000000..38208f2d0e69
--- /dev/null
+++ b/drivers/acpi/gsi.c
@@ -0,0 +1,105 @@
+/*
+ * ACPI GSI IRQ layer
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+
+enum acpi_irq_model_id acpi_irq_model;
+
+static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity)
+{
+ switch (polarity) {
+ case ACPI_ACTIVE_LOW:
+ return trigger == ACPI_EDGE_SENSITIVE ?
+ IRQ_TYPE_EDGE_FALLING :
+ IRQ_TYPE_LEVEL_LOW;
+ case ACPI_ACTIVE_HIGH:
+ return trigger == ACPI_EDGE_SENSITIVE ?
+ IRQ_TYPE_EDGE_RISING :
+ IRQ_TYPE_LEVEL_HIGH;
+ case ACPI_ACTIVE_BOTH:
+ if (trigger == ACPI_EDGE_SENSITIVE)
+ return IRQ_TYPE_EDGE_BOTH;
+ default:
+ return IRQ_TYPE_NONE;
+ }
+}
+
+/**
+ * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
+ * @gsi: GSI IRQ number to map
+ * @irq: pointer where linux IRQ number is stored
+ *
+ * irq location updated with irq value [>0 on success, 0 on failure]
+ *
+ * Returns: linux IRQ number on success (>0)
+ * -EINVAL on failure
+ */
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
+{
+ /*
+ * Only default domain is supported at present, always find
+ * the mapping corresponding to default domain by passing NULL
+ * as irq_domain parameter
+ */
+ *irq = irq_find_mapping(NULL, gsi);
+ /*
+ * *irq == 0 means no mapping, that should
+ * be reported as a failure
+ */
+ return (*irq > 0) ? *irq : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
+
+/**
+ * acpi_register_gsi() - Map a GSI to a linux IRQ number
+ * @dev: device for which IRQ has to be mapped
+ * @gsi: GSI IRQ number
+ * @trigger: trigger type of the GSI number to be mapped
+ * @polarity: polarity of the GSI to be mapped
+ *
+ * Returns: a valid linux IRQ number on success
+ * -EINVAL on failure
+ */
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
+ int polarity)
+{
+ unsigned int irq;
+ unsigned int irq_type = acpi_gsi_get_irq_type(trigger, polarity);
+
+ /*
+ * There is no way at present to look-up the IRQ domain on ACPI,
+ * hence always create mapping referring to the default domain
+ * by passing NULL as irq_domain parameter
+ */
+ irq = irq_create_mapping(NULL, gsi);
+ if (!irq)
+ return -EINVAL;
+
+ /* Set irq type if specified and different than the current one */
+ if (irq_type != IRQ_TYPE_NONE &&
+ irq_type != irq_get_trigger_type(irq))
+ irq_set_irq_type(irq, irq_type);
+ return irq;
+}
+EXPORT_SYMBOL_GPL(acpi_register_gsi);
+
+/**
+ * acpi_unregister_gsi() - Free a GSI<->linux IRQ number mapping
+ * @gsi: GSI IRQ number
+ */
+void acpi_unregister_gsi(u32 gsi)
+{
+ int irq = irq_find_mapping(NULL, gsi);
+
+ irq_dispose_mapping(irq);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 56b321aa2b1c..ba4a61e964be 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -161,7 +161,11 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
/*--------------------------------------------------------------------------
Suspend/Resume
-------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT
extern int acpi_sleep_init(void);
+#else
+static inline int acpi_sleep_init(void) { return -ENXIO; }
+#endif
#ifdef CONFIG_ACPI_SLEEP
int acpi_sleep_proc_init(void);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f9eeae871593..7ccba395c9dd 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -182,7 +182,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
request_mem_region(addr, length, desc);
}
-static int __init acpi_reserve_resources(void)
+static void __init acpi_reserve_resources(void)
{
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
"ACPI PM1a_EVT_BLK");
@@ -211,10 +211,7 @@ static int __init acpi_reserve_resources(void)
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
-
- return 0;
}
-device_initcall(acpi_reserve_resources);
void acpi_os_printf(const char *fmt, ...)
{
@@ -336,11 +333,11 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
return NULL;
}
-#ifndef CONFIG_IA64
-#define should_use_kmap(pfn) page_is_ram(pfn)
-#else
+#if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
/* ioremap will take care of cache attributes */
#define should_use_kmap(pfn) 0
+#else
+#define should_use_kmap(pfn) page_is_ram(pfn)
#endif
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
@@ -1845,6 +1842,7 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
+ acpi_reserve_resources();
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index ef7d8ff95abe..42df46a86c25 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -207,5 +207,5 @@ static int __init intel_crc_pmic_opregion_driver_init(void)
}
module_init(intel_crc_pmic_opregion_driver_init);
-MODULE_DESCRIPTION("CrystalCove ACPI opration region driver");
+MODULE_DESCRIPTION("CrystalCove ACPI operation region driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 7962651cdbd4..b1ec78b8a645 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -32,7 +32,7 @@ static struct acpi_table_madt *get_madt_table(void)
}
static int map_lapic_id(struct acpi_subtable_header *entry,
- u32 acpi_id, int *apic_id)
+ u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_apic *lapic =
container_of(entry, struct acpi_madt_local_apic, header);
@@ -48,7 +48,7 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, int *apic_id)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_x2apic *apic =
container_of(entry, struct acpi_madt_local_x2apic, header);
@@ -65,7 +65,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, int *apic_id)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
container_of(entry, struct acpi_madt_local_sapic, header);
@@ -83,10 +83,35 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
return 0;
}
-static int map_madt_entry(int type, u32 acpi_id)
+/*
+ * Retrieve the ARM CPU physical identifier (MPIDR)
+ */
+static int map_gicc_mpidr(struct acpi_subtable_header *entry,
+ int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ container_of(entry, struct acpi_madt_generic_interrupt, header);
+
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return -ENODEV;
+
+ /* device_declaration means Device object in DSDT, in the
+ * GIC interrupt model, logical processors are required to
+ * have a Processor Device object in the DSDT, so we should
+ * check device_declaration here
+ */
+ if (device_declaration && (gicc->uid == acpi_id)) {
+ *mpidr = gicc->arm_mpidr;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static phys_cpuid_t map_madt_entry(int type, u32 acpi_id)
{
unsigned long madt_end, entry;
- int phys_id = -1; /* CPU hardware ID */
+ phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
struct acpi_table_madt *madt;
madt = get_madt_table();
@@ -111,18 +136,21 @@ static int map_madt_entry(int type, u32 acpi_id)
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
+ } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
+ if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
+ break;
}
entry += header->length;
}
return phys_id;
}
-static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
+static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
- int phys_id = -1;
+ phys_cpuid_t phys_id = PHYS_CPUID_INVALID;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
@@ -143,33 +171,35 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
map_x2apic_id(header, type, acpi_id, &phys_id);
+ else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
+ map_gicc_mpidr(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
return phys_id;
}
-int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
+phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
- int phys_id;
+ phys_cpuid_t phys_id;
phys_id = map_mat_entry(handle, type, acpi_id);
- if (phys_id == -1)
+ if (phys_id == PHYS_CPUID_INVALID)
phys_id = map_madt_entry(type, acpi_id);
return phys_id;
}
-int acpi_map_cpuid(int phys_id, u32 acpi_id)
+int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
- if (phys_id == -1) {
+ if (phys_id == PHYS_CPUID_INVALID) {
/*
* On UP processor, there is no _MAT or MADT table.
- * So above phys_id is always set to -1.
+ * So above phys_id is always set to PHYS_CPUID_INVALID.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
@@ -190,7 +220,7 @@ int acpi_map_cpuid(int phys_id, u32 acpi_id)
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
- return phys_id;
+ return -1;
}
#ifdef CONFIG_SMP
@@ -208,7 +238,7 @@ int acpi_map_cpuid(int phys_id, u32 acpi_id)
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
- int phys_id;
+ phys_cpuid_t phys_id;
phys_id = acpi_get_phys_id(handle, type, acpi_id);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 5589a6e2a023..8244f013f210 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
* @ares: Input ACPI resource object.
* @types: Valid resource types of IORESOURCE_XXX
*
- * This is a hepler function to support acpi_dev_get_resources(), which filters
+ * This is a helper function to support acpi_dev_get_resources(), which filters
* ACPI resource objects according to resource types.
*/
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index cd827625cf07..01504c819e8f 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -684,7 +684,7 @@ static int acpi_sbs_add(struct acpi_device *device)
if (!sbs_manager_broken) {
result = acpi_manager_get_info(sbs);
if (!result) {
- sbs->manager_present = 0;
+ sbs->manager_present = 1;
for (id = 0; id < MAX_SBS_BAT; ++id)
if ((sbs->batteries_supported & (1 << id)))
acpi_battery_add(sbs, id);
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 26e5b5060523..bf034f8b7c1a 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/dmi.h>
#include "sbshc.h"
#define PREFIX "ACPI: "
@@ -87,6 +88,8 @@ enum acpi_smb_offset {
ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
};
+static bool macbook;
+
static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
{
return ec_read(hc->offset + address, data);
@@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
}
mutex_lock(&hc->lock);
+ if (macbook)
+ udelay(5);
if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
goto end;
if (temp) {
@@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data);
+static int macbook_dmi_match(const struct dmi_system_id *d)
+{
+ pr_debug("Detected MacBook, enabling workaround\n");
+ macbook = true;
+ return 0;
+}
+
+static struct dmi_system_id acpi_smbus_dmi_table[] = {
+ { macbook_dmi_match, "Apple MacBook", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
+ },
+ { },
+};
+
static int acpi_smbus_hc_add(struct acpi_device *device)
{
int status;
unsigned long long val;
struct acpi_smb_hc *hc;
+ dmi_check_system(acpi_smbus_dmi_table);
+
if (!device)
return -EINVAL;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index bbca7830e18a..03141aa4ea95 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -114,7 +114,12 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
return 0;
}
-/*
+/**
+ * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
+ * @acpi_dev: ACPI device object.
+ * @modalias: Buffer to print into.
+ * @size: Size of the buffer.
+ *
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
* char *modalias: "acpi:IBM0001:ACPI0001"
@@ -122,68 +127,98 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
* -EINVAL: output error
* -ENOMEM: output is truncated
*/
-static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
- int size)
+static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+ int size)
{
int len;
int count;
struct acpi_hardware_id *id;
- if (list_empty(&acpi_dev->pnp.ids))
- return 0;
-
/*
- * If the device has PRP0001 we expose DT compatible modalias
- * instead in form of of:NnameTCcompatible.
+ * Since we skip PRP0001 from the modalias below, 0 should be returned
+ * if PRP0001 is the only ACPI/PNP ID in the device's list.
*/
- if (acpi_dev->data.of_compatible) {
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
- const union acpi_object *of_compatible, *obj;
- int i, nval;
- char *c;
-
- acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
- /* DT strings are all in lower case */
- for (c = buf.pointer; *c != '\0'; c++)
- *c = tolower(*c);
-
- len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
- ACPI_FREE(buf.pointer);
-
- of_compatible = acpi_dev->data.of_compatible;
- if (of_compatible->type == ACPI_TYPE_PACKAGE) {
- nval = of_compatible->package.count;
- obj = of_compatible->package.elements;
- } else { /* Must be ACPI_TYPE_STRING. */
- nval = 1;
- obj = of_compatible;
- }
- for (i = 0; i < nval; i++, obj++) {
- count = snprintf(&modalias[len], size, "C%s",
- obj->string.pointer);
- if (count < 0)
- return -EINVAL;
- if (count >= size)
- return -ENOMEM;
-
- len += count;
- size -= count;
- }
- } else {
- len = snprintf(modalias, size, "acpi:");
- size -= len;
+ count = 0;
+ list_for_each_entry(id, &acpi_dev->pnp.ids, list)
+ if (strcmp(id->id, "PRP0001"))
+ count++;
- list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
- count = snprintf(&modalias[len], size, "%s:", id->id);
- if (count < 0)
- return -EINVAL;
- if (count >= size)
- return -ENOMEM;
- len += count;
- size -= count;
- }
+ if (!count)
+ return 0;
+
+ len = snprintf(modalias, size, "acpi:");
+ if (len <= 0)
+ return len;
+
+ size -= len;
+
+ list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
+ if (!strcmp(id->id, "PRP0001"))
+ continue;
+
+ count = snprintf(&modalias[len], size, "%s:", id->id);
+ if (count < 0)
+ return -EINVAL;
+
+ if (count >= size)
+ return -ENOMEM;
+
+ len += count;
+ size -= count;
}
+ modalias[len] = '\0';
+ return len;
+}
+/**
+ * create_of_modalias - Creates DT compatible string for modalias and uevent
+ * @acpi_dev: ACPI device object.
+ * @modalias: Buffer to print into.
+ * @size: Size of the buffer.
+ *
+ * Expose DT compatible modalias as of:NnameTCcompatible. This function should
+ * only be called for devices having PRP0001 in their list of ACPI/PNP IDs.
+ */
+static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
+ int size)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ const union acpi_object *of_compatible, *obj;
+ int len, count;
+ int i, nval;
+ char *c;
+
+ acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+ /* DT strings are all in lower case */
+ for (c = buf.pointer; *c != '\0'; c++)
+ *c = tolower(*c);
+
+ len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
+ ACPI_FREE(buf.pointer);
+
+ if (len <= 0)
+ return len;
+
+ of_compatible = acpi_dev->data.of_compatible;
+ if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+ nval = of_compatible->package.count;
+ obj = of_compatible->package.elements;
+ } else { /* Must be ACPI_TYPE_STRING. */
+ nval = 1;
+ obj = of_compatible;
+ }
+ for (i = 0; i < nval; i++, obj++) {
+ count = snprintf(&modalias[len], size, "C%s",
+ obj->string.pointer);
+ if (count < 0)
+ return -EINVAL;
+
+ if (count >= size)
+ return -ENOMEM;
+
+ len += count;
+ size -= count;
+ }
modalias[len] = '\0';
return len;
}
@@ -194,7 +229,8 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
*
* Check if the given device has an ACPI companion and if that companion has
* a valid list of PNP IDs, and if the device is the first (primary) physical
- * device associated with it.
+ * device associated with it. Return the companion pointer if that's the case
+ * or NULL otherwise.
*
* If multiple physical devices are attached to a single ACPI companion, we need
* to be careful. The usage scenario for this kind of relationship is that all
@@ -208,88 +244,129 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
* resources available from it but they will be matched normally using functions
* provided by their bus types (and analogously for their modalias).
*/
-static bool acpi_companion_match(const struct device *dev)
+static struct acpi_device *acpi_companion_match(const struct device *dev)
{
struct acpi_device *adev;
- bool ret;
+ struct mutex *physical_node_lock;
adev = ACPI_COMPANION(dev);
if (!adev)
- return false;
+ return NULL;
if (list_empty(&adev->pnp.ids))
- return false;
+ return NULL;
- mutex_lock(&adev->physical_node_lock);
+ physical_node_lock = &adev->physical_node_lock;
+ mutex_lock(physical_node_lock);
if (list_empty(&adev->physical_node_list)) {
- ret = false;
+ adev = NULL;
} else {
const struct acpi_device_physical_node *node;
node = list_first_entry(&adev->physical_node_list,
struct acpi_device_physical_node, node);
- ret = node->dev == dev;
+ if (node->dev != dev)
+ adev = NULL;
}
- mutex_unlock(&adev->physical_node_lock);
+ mutex_unlock(physical_node_lock);
- return ret;
+ return adev;
}
-/*
- * Creates uevent modalias field for ACPI enumerated devices.
- * Because the other buses does not support ACPI HIDs & CIDs.
- * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
- * "acpi:IBM0001:ACPI0001"
- */
-int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+static int __acpi_device_uevent_modalias(struct acpi_device *adev,
+ struct kobj_uevent_env *env)
{
int len;
- if (!acpi_companion_match(dev))
+ if (!adev)
return -ENODEV;
+ if (list_empty(&adev->pnp.ids))
+ return 0;
+
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
- len = create_modalias(ACPI_COMPANION(dev), &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len <= 0)
+
+ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ if (len < 0)
return len;
+
env->buflen += len;
+ if (!adev->data.of_compatible)
+ return 0;
+
+ if (len > 0 && add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+
+ len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ if (len < 0)
+ return len;
+
+ env->buflen += len;
+
return 0;
}
-EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
/*
- * Creates modalias sysfs attribute for ACPI enumerated devices.
+ * Creates uevent modalias field for ACPI enumerated devices.
* Because the other buses does not support ACPI HIDs & CIDs.
* e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
* "acpi:IBM0001:ACPI0001"
*/
-int acpi_device_modalias(struct device *dev, char *buf, int size)
+int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
{
- int len;
+ return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
+}
+EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
- if (!acpi_companion_match(dev))
+static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
+{
+ int len, count;
+
+ if (!adev)
return -ENODEV;
- len = create_modalias(ACPI_COMPANION(dev), buf, size -1);
- if (len <= 0)
+ if (list_empty(&adev->pnp.ids))
+ return 0;
+
+ len = create_pnp_modalias(adev, buf, size - 1);
+ if (len < 0) {
+ return len;
+ } else if (len > 0) {
+ buf[len++] = '\n';
+ size -= len;
+ }
+ if (!adev->data.of_compatible)
return len;
- buf[len++] = '\n';
+
+ count = create_of_modalias(adev, buf + len, size - 1);
+ if (count < 0) {
+ return count;
+ } else if (count > 0) {
+ len += count;
+ buf[len++] = '\n';
+ }
+
return len;
}
+
+/*
+ * Creates modalias sysfs attribute for ACPI enumerated devices.
+ * Because the other buses does not support ACPI HIDs & CIDs.
+ * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * "acpi:IBM0001:ACPI0001"
+ */
+int acpi_device_modalias(struct device *dev, char *buf, int size)
+{
+ return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
+}
EXPORT_SYMBOL_GPL(acpi_device_modalias);
static ssize_t
acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- int len;
-
- len = create_modalias(acpi_dev, buf, 1024);
- if (len <= 0)
- return len;
- buf[len++] = '\n';
- return len;
+ return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
@@ -298,7 +375,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
struct acpi_device_physical_node *pn;
bool offline = true;
- mutex_lock(&adev->physical_node_lock);
+ /*
+ * acpi_container_offline() calls this for all of the container's
+ * children under the container's physical_node_lock lock.
+ */
+ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
list_for_each_entry(pn, &adev->physical_node_list, node)
if (device_supports_offline(pn->dev) && !pn->dev->offline) {
@@ -894,8 +975,51 @@ static void acpi_device_remove_files(struct acpi_device *dev)
ACPI Bus operations
-------------------------------------------------------------------------- */
+/**
+ * acpi_of_match_device - Match device object using the "compatible" property.
+ * @adev: ACPI device object to match.
+ * @of_match_table: List of device IDs to match against.
+ *
+ * If @dev has an ACPI companion which has the special PRP0001 device ID in its
+ * list of identifiers and a _DSD object with the "compatible" property, use
+ * that property to match against the given list of identifiers.
+ */
+static bool acpi_of_match_device(struct acpi_device *adev,
+ const struct of_device_id *of_match_table)
+{
+ const union acpi_object *of_compatible, *obj;
+ int i, nval;
+
+ if (!adev)
+ return false;
+
+ of_compatible = adev->data.of_compatible;
+ if (!of_match_table || !of_compatible)
+ return false;
+
+ if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+ nval = of_compatible->package.count;
+ obj = of_compatible->package.elements;
+ } else { /* Must be ACPI_TYPE_STRING. */
+ nval = 1;
+ obj = of_compatible;
+ }
+ /* Now we can look for the driver DT compatible strings */
+ for (i = 0; i < nval; i++, obj++) {
+ const struct of_device_id *id;
+
+ for (id = of_match_table; id->compatible[0]; id++)
+ if (!strcasecmp(obj->string.pointer, id->compatible))
+ return true;
+ }
+
+ return false;
+}
+
static const struct acpi_device_id *__acpi_match_device(
- struct acpi_device *device, const struct acpi_device_id *ids)
+ struct acpi_device *device,
+ const struct acpi_device_id *ids,
+ const struct of_device_id *of_ids)
{
const struct acpi_device_id *id;
struct acpi_hardware_id *hwid;
@@ -904,14 +1028,27 @@ static const struct acpi_device_id *__acpi_match_device(
* If the device is not present, it is unnecessary to load device
* driver for it.
*/
- if (!device->status.present)
+ if (!device || !device->status.present)
return NULL;
- for (id = ids; id->id[0]; id++)
- list_for_each_entry(hwid, &device->pnp.ids, list)
+ list_for_each_entry(hwid, &device->pnp.ids, list) {
+ /* First, check the ACPI/PNP IDs provided by the caller. */
+ for (id = ids; id->id[0]; id++)
if (!strcmp((char *) id->id, hwid->id))
return id;
+ /*
+ * Next, check the special "PRP0001" ID and try to match the
+ * "compatible" property if found.
+ *
+ * The id returned by the below is not valid, but the only
+ * caller passing non-NULL of_ids here is only interested in
+ * whether or not the return value is NULL.
+ */
+ if (!strcmp("PRP0001", hwid->id)
+ && acpi_of_match_device(device, of_ids))
+ return id;
+ }
return NULL;
}
@@ -929,68 +1066,26 @@ static const struct acpi_device_id *__acpi_match_device(
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev)
{
- struct acpi_device *adev;
- acpi_handle handle = ACPI_HANDLE(dev);
-
- if (!ids || !handle || acpi_bus_get_device(handle, &adev))
- return NULL;
-
- if (!acpi_companion_match(dev))
- return NULL;
-
- return __acpi_match_device(adev, ids);
+ return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
}
EXPORT_SYMBOL_GPL(acpi_match_device);
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
- return __acpi_match_device(device, ids) ? 0 : -ENOENT;
+ return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
-/* Performs match against special "PRP0001" shoehorn ACPI ID */
-static bool acpi_of_driver_match_device(struct device *dev,
- const struct device_driver *drv)
-{
- const union acpi_object *of_compatible, *obj;
- struct acpi_device *adev;
- int i, nval;
-
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return false;
-
- of_compatible = adev->data.of_compatible;
- if (!drv->of_match_table || !of_compatible)
- return false;
-
- if (of_compatible->type == ACPI_TYPE_PACKAGE) {
- nval = of_compatible->package.count;
- obj = of_compatible->package.elements;
- } else { /* Must be ACPI_TYPE_STRING. */
- nval = 1;
- obj = of_compatible;
- }
- /* Now we can look for the driver DT compatible strings */
- for (i = 0; i < nval; i++, obj++) {
- const struct of_device_id *id;
-
- for (id = drv->of_match_table; id->compatible[0]; id++)
- if (!strcasecmp(obj->string.pointer, id->compatible))
- return true;
- }
-
- return false;
-}
-
bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
if (!drv->acpi_match_table)
- return acpi_of_driver_match_device(dev, drv);
+ return acpi_of_match_device(ACPI_COMPANION(dev),
+ drv->of_match_table);
- return !!acpi_match_device(drv->acpi_match_table, dev);
+ return !!__acpi_match_device(acpi_companion_match(dev),
+ drv->acpi_match_table, drv->of_match_table);
}
EXPORT_SYMBOL_GPL(acpi_driver_match_device);
@@ -1031,20 +1126,7 @@ static int acpi_bus_match(struct device *dev, struct device_driver *drv)
static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- int len;
-
- if (list_empty(&acpi_dev->pnp.ids))
- return 0;
-
- if (add_uevent_var(env, "MODALIAS="))
- return -ENOMEM;
- len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len <= 0)
- return len;
- env->buflen += len;
- return 0;
+ return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
}
static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
@@ -1062,10 +1144,10 @@ static void acpi_device_notify_fixed(void *data)
acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
}
-static acpi_status acpi_device_fixed_event(void *data)
+static u32 acpi_device_fixed_event(void *data)
{
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
- return AE_OK;
+ return ACPI_INTERRUPT_HANDLED;
}
static int acpi_device_install_notify_handler(struct acpi_device *device)
@@ -2310,9 +2392,6 @@ static void acpi_default_enumeration(struct acpi_device *device)
struct list_head resource_list;
bool is_spi_i2c_slave = false;
- if (!device->pnp.type.platform_id || device->handler)
- return;
-
/*
* Do not enemerate SPI/I2C slaves as they will be enuerated by their
* respective parents.
@@ -2325,6 +2404,29 @@ static void acpi_default_enumeration(struct acpi_device *device)
acpi_create_platform_device(device);
}
+static const struct acpi_device_id generic_device_ids[] = {
+ {"PRP0001", },
+ {"", },
+};
+
+static int acpi_generic_device_attach(struct acpi_device *adev,
+ const struct acpi_device_id *not_used)
+{
+ /*
+ * Since PRP0001 is the only ID handled here, the test below can be
+ * unconditional.
+ */
+ if (adev->data.of_compatible)
+ acpi_default_enumeration(adev);
+
+ return 1;
+}
+
+static struct acpi_scan_handler generic_device_handler = {
+ .ids = generic_device_ids,
+ .attach = acpi_generic_device_attach,
+};
+
static int acpi_scan_attach_handler(struct acpi_device *device)
{
struct acpi_hardware_id *hwid;
@@ -2350,8 +2452,6 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
break;
}
}
- if (!ret)
- acpi_default_enumeration(device);
return ret;
}
@@ -2393,6 +2493,9 @@ static void acpi_bus_attach(struct acpi_device *device)
ret = device_attach(&device->dev);
if (ret < 0)
return;
+
+ if (!ret && device->pnp.type.platform_id)
+ acpi_default_enumeration(device);
}
device->flags.visited = true;
@@ -2551,6 +2654,8 @@ int __init acpi_scan_init(void)
acpi_pnp_init();
acpi_int340x_thermal_init();
+ acpi_scan_add_handler(&generic_device_handler);
+
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7f251dd1a687..2f0d4db40a9e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -629,6 +629,7 @@ static int acpi_freeze_begin(void)
static int acpi_freeze_prepare(void)
{
+ acpi_enable_wakeup_devices(ACPI_STATE_S0);
acpi_enable_all_wakeup_gpes();
acpi_os_wait_events_complete();
enable_irq_wake(acpi_gbl_FADT.sci_interrupt);
@@ -637,6 +638,7 @@ static int acpi_freeze_prepare(void)
static void acpi_freeze_restore(void)
{
+ acpi_disable_wakeup_devices(ACPI_STATE_S0);
disable_irq_wake(acpi_gbl_FADT.sci_interrupt);
acpi_enable_all_runtime_gpes();
}
@@ -806,21 +808,6 @@ static void acpi_sleep_hibernate_setup(void)
static inline void acpi_sleep_hibernate_setup(void) {}
#endif /* !CONFIG_HIBERNATION */
-int acpi_suspend(u32 acpi_state)
-{
- suspend_state_t states[] = {
- [1] = PM_SUSPEND_STANDBY,
- [3] = PM_SUSPEND_MEM,
- [5] = PM_SUSPEND_MAX
- };
-
- if (acpi_state < 6 && states[acpi_state])
- return pm_suspend(states[acpi_state]);
- if (acpi_state == 4)
- return hibernate();
- return -EINVAL;
-}
-
static void acpi_power_off_prepare(void)
{
/* Prepare to power off the system */
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 0143540a2519..c797ffa568d5 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,6 +1,4 @@
-extern int acpi_suspend(u32 state);
-
extern void acpi_enable_wakeup_devices(u8 sleep_state);
extern void acpi_disable_wakeup_devices(u8 sleep_state);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 13e577c80201..0876d77b3206 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -527,7 +527,7 @@ static ssize_t counter_show(struct kobject *kobj,
acpi_irq_not_handled;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
acpi_gpe_count;
- size = sprintf(buf, "%8d", all_counters[index].count);
+ size = sprintf(buf, "%8u", all_counters[index].count);
/* "gpe_all" or "sci" */
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 93b81523a2fe..2e19189da0ee 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -23,6 +23,8 @@
*
*/
+/* Uncomment next line to get verbose printout */
+/* #define DEBUG */
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
@@ -61,9 +63,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_apic *p =
(struct acpi_madt_local_apic *)header;
- pr_info("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
- p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
+ p->processor_id, p->id,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -71,9 +73,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_x2apic *p =
(struct acpi_madt_local_x2apic *)header;
- pr_info("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
- p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
+ p->local_apic_id, p->uid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -81,8 +83,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_apic *p =
(struct acpi_madt_io_apic *)header;
- pr_info("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
- p->id, p->address, p->global_irq_base);
+ pr_debug("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
+ p->id, p->address, p->global_irq_base);
}
break;
@@ -155,9 +157,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_sapic *p =
(struct acpi_madt_io_sapic *)header;
- pr_info("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
- p->id, (void *)(unsigned long)p->address,
- p->global_irq_base);
+ pr_debug("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
+ p->id, (void *)(unsigned long)p->address,
+ p->global_irq_base);
}
break;
@@ -165,9 +167,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_sapic *p =
(struct acpi_madt_local_sapic *)header;
- pr_info("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
- p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
+ p->processor_id, p->id, p->eid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -183,6 +185,28 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
break;
+ case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
+ {
+ struct acpi_madt_generic_interrupt *p =
+ (struct acpi_madt_generic_interrupt *)header;
+ pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
+ p->uid, p->base_address,
+ p->arm_mpidr,
+ (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+
+ }
+ break;
+
+ case ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR:
+ {
+ struct acpi_madt_generic_distributor *p =
+ (struct acpi_madt_generic_distributor *)header;
+ pr_debug("GIC Distributor (gic_id[0x%04x] address[%llx] gsi_base[%d])\n",
+ p->gic_id, p->base_address,
+ p->global_irq_base);
+ }
+ break;
+
default:
pr_warn("Found unsupported MADT entry (type = 0x%x)\n",
header->type);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 26eb70c8f518..cc79d3fedfb2 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -82,9 +82,15 @@ module_param(allow_duplicates, bool, 0644);
* For Windows 8 systems: used to decide if video module
* should skip registering backlight interface of its own.
*/
-static int use_native_backlight_param = -1;
+enum {
+ NATIVE_BACKLIGHT_NOT_SET = -1,
+ NATIVE_BACKLIGHT_OFF,
+ NATIVE_BACKLIGHT_ON,
+};
+
+static int use_native_backlight_param = NATIVE_BACKLIGHT_NOT_SET;
module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
-static bool use_native_backlight_dmi = true;
+static int use_native_backlight_dmi = NATIVE_BACKLIGHT_NOT_SET;
static int register_count;
static struct mutex video_list_lock;
@@ -237,15 +243,16 @@ static void acpi_video_switch_brightness(struct work_struct *work);
static bool acpi_video_use_native_backlight(void)
{
- if (use_native_backlight_param != -1)
+ if (use_native_backlight_param != NATIVE_BACKLIGHT_NOT_SET)
return use_native_backlight_param;
- else
+ else if (use_native_backlight_dmi != NATIVE_BACKLIGHT_NOT_SET)
return use_native_backlight_dmi;
+ return acpi_osi_is_win8();
}
bool acpi_video_verify_backlight_support(void)
{
- if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
+ if (acpi_video_use_native_backlight() &&
backlight_device_registered(BACKLIGHT_RAW))
return false;
return acpi_video_backlight_support();
@@ -414,7 +421,13 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
static int __init video_disable_native_backlight(const struct dmi_system_id *d)
{
- use_native_backlight_dmi = false;
+ use_native_backlight_dmi = NATIVE_BACKLIGHT_OFF;
+ return 0;
+}
+
+static int __init video_enable_native_backlight(const struct dmi_system_id *d)
+{
+ use_native_backlight_dmi = NATIVE_BACKLIGHT_ON;
return 0;
}
@@ -559,6 +572,17 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
},
},
+
+ /* Non win8 machines which need native backlight nevertheless */
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */
+ .callback = video_enable_native_backlight,
+ .ident = "Lenovo Ideapad Z570",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
+ },
+ },
{}
};
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 27c43499977a..c42feb2bacd0 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -174,14 +174,6 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
},
},
- {
- .callback = video_detect_force_vendor,
- .ident = "Lenovo IdeaPad Z570",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
- },
- },
{ },
};
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index c6dc3548e5d1..b0b688c481e8 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -25,49 +25,50 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <soc/tegra/ahb.h>
#define DRV_NAME "tegra-ahb"
-#define AHB_ARBITRATION_DISABLE 0x00
-#define AHB_ARBITRATION_PRIORITY_CTRL 0x04
+#define AHB_ARBITRATION_DISABLE 0x04
+#define AHB_ARBITRATION_PRIORITY_CTRL 0x08
#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
#define PRIORITY_SELECT_USB BIT(6)
#define PRIORITY_SELECT_USB2 BIT(18)
#define PRIORITY_SELECT_USB3 BIT(17)
-#define AHB_GIZMO_AHB_MEM 0x0c
+#define AHB_GIZMO_AHB_MEM 0x10
#define ENB_FAST_REARBITRATE BIT(2)
#define DONT_SPLIT_AHB_WR BIT(7)
-#define AHB_GIZMO_APB_DMA 0x10
-#define AHB_GIZMO_IDE 0x18
-#define AHB_GIZMO_USB 0x1c
-#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x20
-#define AHB_GIZMO_CPU_AHB_BRIDGE 0x24
-#define AHB_GIZMO_COP_AHB_BRIDGE 0x28
-#define AHB_GIZMO_XBAR_APB_CTLR 0x2c
-#define AHB_GIZMO_VCP_AHB_BRIDGE 0x30
-#define AHB_GIZMO_NAND 0x3c
-#define AHB_GIZMO_SDMMC4 0x44
-#define AHB_GIZMO_XIO 0x48
-#define AHB_GIZMO_BSEV 0x60
-#define AHB_GIZMO_BSEA 0x70
-#define AHB_GIZMO_NOR 0x74
-#define AHB_GIZMO_USB2 0x78
-#define AHB_GIZMO_USB3 0x7c
+#define AHB_GIZMO_APB_DMA 0x14
+#define AHB_GIZMO_IDE 0x1c
+#define AHB_GIZMO_USB 0x20
+#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x24
+#define AHB_GIZMO_CPU_AHB_BRIDGE 0x28
+#define AHB_GIZMO_COP_AHB_BRIDGE 0x2c
+#define AHB_GIZMO_XBAR_APB_CTLR 0x30
+#define AHB_GIZMO_VCP_AHB_BRIDGE 0x34
+#define AHB_GIZMO_NAND 0x40
+#define AHB_GIZMO_SDMMC4 0x48
+#define AHB_GIZMO_XIO 0x4c
+#define AHB_GIZMO_BSEV 0x64
+#define AHB_GIZMO_BSEA 0x74
+#define AHB_GIZMO_NOR 0x78
+#define AHB_GIZMO_USB2 0x7c
+#define AHB_GIZMO_USB3 0x80
#define IMMEDIATE BIT(18)
-#define AHB_GIZMO_SDMMC1 0x80
-#define AHB_GIZMO_SDMMC2 0x84
-#define AHB_GIZMO_SDMMC3 0x88
-#define AHB_MEM_PREFETCH_CFG_X 0xd8
-#define AHB_ARBITRATION_XBAR_CTRL 0xdc
-#define AHB_MEM_PREFETCH_CFG3 0xe0
-#define AHB_MEM_PREFETCH_CFG4 0xe4
-#define AHB_MEM_PREFETCH_CFG1 0xec
-#define AHB_MEM_PREFETCH_CFG2 0xf0
+#define AHB_GIZMO_SDMMC1 0x84
+#define AHB_GIZMO_SDMMC2 0x88
+#define AHB_GIZMO_SDMMC3 0x8c
+#define AHB_MEM_PREFETCH_CFG_X 0xdc
+#define AHB_ARBITRATION_XBAR_CTRL 0xe0
+#define AHB_MEM_PREFETCH_CFG3 0xe4
+#define AHB_MEM_PREFETCH_CFG4 0xe8
+#define AHB_MEM_PREFETCH_CFG1 0xf0
+#define AHB_MEM_PREFETCH_CFG2 0xf4
#define PREFETCH_ENB BIT(31)
#define MST_ID(x) (((x) & 0x1f) << 26)
#define AHBDMA_MST_ID MST_ID(5)
@@ -77,10 +78,20 @@
#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
-#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xf8
+#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xfc
#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
+/*
+ * INCORRECT_BASE_ADDR_LOW_BYTE: Legacy kernel DT files for Tegra SoCs
+ * prior to Tegra124 generally use a physical base address ending in
+ * 0x4 for the AHB IP block. According to the TRM, the low byte
+ * should be 0x0. During device probing, this macro is used to detect
+ * whether the passed-in physical address is incorrect, and if so, to
+ * correct it.
+ */
+#define INCORRECT_BASE_ADDR_LOW_BYTE 0x4
+
static struct platform_driver tegra_ahb_driver;
static const u32 tegra_ahb_gizmo[] = {
@@ -257,6 +268,15 @@ static int tegra_ahb_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* Correct the IP block base address if necessary */
+ if (res &&
+ (res->start & INCORRECT_BASE_ADDR_LOW_BYTE) ==
+ INCORRECT_BASE_ADDR_LOW_BYTE) {
+ dev_warn(&pdev->dev, "incorrect AHB base address in DT data - enabling workaround\n");
+ res->start -= INCORRECT_BASE_ADDR_LOW_BYTE;
+ }
+
ahb->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ahb->regs))
return PTR_ERR(ahb->regs);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 5f601553b9b0..9dca4b995be0 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -270,6 +270,7 @@ config ATA_PIIX
config SATA_DWC
tristate "DesignWare Cores SATA support"
depends on 460EX
+ select DW_DMAC
help
This option enables support for the on-chip SATA controller of the
AppliedMicro processor 460EX.
@@ -729,15 +730,6 @@ config PATA_SC1200
If unsure, say N.
-config PATA_SCC
- tristate "Toshiba's Cell Reference Set IDE support"
- depends on PCI && PPC_CELLEB
- help
- This option enables support for the built-in IDE controller on
- Toshiba Cell Reference Board.
-
- If unsure, say N.
-
config PATA_SCH
tristate "Intel SCH PATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index b67e995179a9..40f7865f20a1 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -75,7 +75,6 @@ obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
obj-$(CONFIG_PATA_RDC) += pata_rdc.o
obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
-obj-$(CONFIG_PATA_SCC) += pata_scc.o
obj-$(CONFIG_PATA_SCH) += pata_sch.o
obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index c7a92a743ed0..65ee94454bbd 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -66,6 +66,7 @@ enum board_ids {
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
+ board_ahci_avn,
board_ahci_mcp65,
board_ahci_mcp77,
board_ahci_mcp89,
@@ -84,6 +85,8 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
static bool is_mcp89_apple(struct pci_dev *pdev);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
@@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = {
.hardreset = ahci_p5wdh_hardreset,
};
+static struct ata_port_operations ahci_avn_ops = {
+ .inherits = &ahci_ops,
+ .hardreset = ahci_avn_hardreset,
+};
+
static const struct ata_port_info ahci_port_info[] = {
/* by features */
[board_ahci] = {
@@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = {
.port_ops = &ahci_ops,
},
/* by chipsets */
+ [board_ahci_avn] = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_avn_ops,
+ },
[board_ahci_mcp65] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
AHCI_HFLAG_YES_NCQ),
@@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
- { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
- { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
return rc;
}
+/*
+ * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
+ *
+ * It has been observed with some SSDs that the timing of events in the
+ * link synchronization phase can leave the port in a state that can not
+ * be recovered by a SATA-hard-reset alone. The failing signature is
+ * SStatus.DET stuck at 1 ("Device presence detected but Phy
+ * communication not established"). It was found that unloading and
+ * reloading the driver when this problem occurs allows the drive
+ * connection to be recovered (DET advanced to 0x3). The critical
+ * component of reloading the driver is that the port state machines are
+ * reset by bouncing "port enable" in the AHCI PCS configuration
+ * register. So, reproduce that effect by bouncing a port whenever we
+ * see DET==1 after a reset.
+ */
+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ unsigned long tmo = deadline - jiffies;
+ struct ata_taskfile tf;
+ bool online;
+ int rc, i;
+
+ DPRINTK("ENTER\n");
+
+ ahci_stop_engine(ap);
+
+ for (i = 0; i < 2; i++) {
+ u16 val;
+ u32 sstatus;
+ int port = ap->port_no;
+ struct ata_host *host = ap->host;
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = ATA_BUSY;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+ if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
+ (sstatus & 0xf) != 1)
+ break;
+
+ ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
+ port);
+
+ pci_read_config_word(pdev, 0x92, &val);
+ val &= ~(1 << port);
+ pci_write_config_word(pdev, 0x92, val);
+ ata_msleep(ap, 1000);
+ val |= 1 << port;
+ pci_write_config_word(pdev, 0x92, val);
+ deadline += tmo;
+ }
+
+ hpriv->start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+ return rc;
+}
+
+
#ifdef CONFIG_PM
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index ea0ff005b86c..8ff428fe8e0f 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -37,7 +37,6 @@ struct st_ahci_drv_data {
struct reset_control *pwr;
struct reset_control *sw_rst;
struct reset_control *pwr_rst;
- struct ahci_host_priv *hpriv;
};
static void st_ahci_configure_oob(void __iomem *mmio)
@@ -55,9 +54,10 @@ static void st_ahci_configure_oob(void __iomem *mmio)
writel(new_val, mmio + ST_AHCI_OOBR);
}
-static int st_ahci_deassert_resets(struct device *dev)
+static int st_ahci_deassert_resets(struct ahci_host_priv *hpriv,
+ struct device *dev)
{
- struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+ struct st_ahci_drv_data *drv_data = hpriv->plat_data;
int err;
if (drv_data->pwr) {
@@ -90,8 +90,8 @@ static int st_ahci_deassert_resets(struct device *dev)
static void st_ahci_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
+ struct st_ahci_drv_data *drv_data = hpriv->plat_data;
struct device *dev = host->dev;
- struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
int err;
if (drv_data->pwr) {
@@ -103,29 +103,30 @@ static void st_ahci_host_stop(struct ata_host *host)
ahci_platform_disable_resources(hpriv);
}
-static int st_ahci_probe_resets(struct platform_device *pdev)
+static int st_ahci_probe_resets(struct ahci_host_priv *hpriv,
+ struct device *dev)
{
- struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct st_ahci_drv_data *drv_data = hpriv->plat_data;
- drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn");
+ drv_data->pwr = devm_reset_control_get(dev, "pwr-dwn");
if (IS_ERR(drv_data->pwr)) {
- dev_info(&pdev->dev, "power reset control not defined\n");
+ dev_info(dev, "power reset control not defined\n");
drv_data->pwr = NULL;
}
- drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst");
+ drv_data->sw_rst = devm_reset_control_get(dev, "sw-rst");
if (IS_ERR(drv_data->sw_rst)) {
- dev_info(&pdev->dev, "soft reset control not defined\n");
+ dev_info(dev, "soft reset control not defined\n");
drv_data->sw_rst = NULL;
}
- drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst");
+ drv_data->pwr_rst = devm_reset_control_get(dev, "pwr-rst");
if (IS_ERR(drv_data->pwr_rst)) {
- dev_dbg(&pdev->dev, "power soft reset control not defined\n");
+ dev_dbg(dev, "power soft reset control not defined\n");
drv_data->pwr_rst = NULL;
}
- return st_ahci_deassert_resets(&pdev->dev);
+ return st_ahci_deassert_resets(hpriv, dev);
}
static struct ata_port_operations st_ahci_port_ops = {
@@ -154,15 +155,12 @@ static int st_ahci_probe(struct platform_device *pdev)
if (!drv_data)
return -ENOMEM;
- platform_set_drvdata(pdev, drv_data);
-
hpriv = ahci_platform_get_resources(pdev);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
+ hpriv->plat_data = drv_data;
- drv_data->hpriv = hpriv;
-
- err = st_ahci_probe_resets(pdev);
+ err = st_ahci_probe_resets(hpriv, &pdev->dev);
if (err)
return err;
@@ -170,7 +168,7 @@ static int st_ahci_probe(struct platform_device *pdev)
if (err)
return err;
- st_ahci_configure_oob(drv_data->hpriv->mmio);
+ st_ahci_configure_oob(hpriv->mmio);
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
&ahci_platform_sht);
@@ -185,8 +183,9 @@ static int st_ahci_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int st_ahci_suspend(struct device *dev)
{
- struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
- struct ahci_host_priv *hpriv = drv_data->hpriv;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct st_ahci_drv_data *drv_data = hpriv->plat_data;
int err;
err = ahci_platform_suspend_host(dev);
@@ -208,21 +207,21 @@ static int st_ahci_suspend(struct device *dev)
static int st_ahci_resume(struct device *dev)
{
- struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
- struct ahci_host_priv *hpriv = drv_data->hpriv;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
int err;
err = ahci_platform_enable_resources(hpriv);
if (err)
return err;
- err = st_ahci_deassert_resets(dev);
+ err = st_ahci_deassert_resets(hpriv, dev);
if (err) {
ahci_platform_disable_resources(hpriv);
return err;
}
- st_ahci_configure_oob(drv_data->hpriv->mmio);
+ st_ahci_configure_oob(hpriv->mmio);
return ahci_platform_resume_host(dev);
}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 61a9c07e0dff..287c4ba0219f 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
if (unlikely(resetting))
status &= ~PORT_IRQ_BAD_PMP;
- /* if LPM is enabled, PHYRDY doesn't mean anything */
- if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
+ if (sata_lpm_ignore_phy_events(&ap->link)) {
status &= ~PORT_IRQ_PHYRDY;
ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f6cb1f1b30b7..577849c6611a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4235,7 +4235,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*
@@ -6752,6 +6752,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
return tmp;
}
+/**
+ * sata_lpm_ignore_phy_events - test if PHY event should be ignored
+ * @link: Link receiving the event
+ *
+ * Test whether the received PHY event has to be ignored or not.
+ *
+ * LOCKING:
+ * None:
+ *
+ * RETURNS:
+ * True if the event has to be ignored.
+ */
+bool sata_lpm_ignore_phy_events(struct ata_link *link)
+{
+ unsigned long lpm_timeout = link->last_lpm_change +
+ msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
+
+ /* if LPM is enabled, PHYRDY doesn't mean anything */
+ if (link->lpm_policy > ATA_LPM_MAX_POWER)
+ return true;
+
+ /* ignore the first PHY event after the LPM policy changed
+ * as it is might be spurious
+ */
+ if ((link->flags & ATA_LFLAG_CHANGED) &&
+ time_before(jiffies, lpm_timeout))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
+
/*
* Dummy port_ops
*/
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 07f41be38fbe..cf0022ec07f2 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3597,6 +3597,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
}
}
+ link->last_lpm_change = jiffies;
+ link->flags |= ATA_LFLAG_CHANGED;
+
return 0;
fail:
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index b33d1f99b3a4..994f168b54a8 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -128,20 +128,8 @@ static struct pnp_driver isapnp_driver = {
.remove = isapnp_remove_one,
};
-static int __init isapnp_init(void)
-{
- return pnp_register_driver(&isapnp_driver);
-}
-
-static void __exit isapnp_exit(void)
-{
- pnp_unregister_driver(&isapnp_driver);
-}
-
+module_pnp_driver(isapnp_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ISA PnP ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(isapnp_init);
-module_exit(isapnp_exit);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index a02f76fdcfcd..b0028588ff1c 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -540,9 +540,9 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
BUG_ON (pi++ >= MAX_DCMDS);
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
- st_le16(&table->command, write ? OUTPUT_MORE: INPUT_MORE);
- st_le16(&table->req_count, len);
- st_le32(&table->phy_addr, addr);
+ table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
+ table->req_count = cpu_to_le16(len);
+ table->phy_addr = cpu_to_le32(addr);
table->cmd_dep = 0;
table->xfer_status = 0;
table->res_count = 0;
@@ -557,12 +557,12 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
/* Convert the last command to an input/output */
table--;
- st_le16(&table->command, write ? OUTPUT_LAST: INPUT_LAST);
+ table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST);
table++;
/* Add the stop command to the end of the list */
memset(table, 0, sizeof(struct dbdma_cmd));
- st_le16(&table->command, DBDMA_STOP);
+ table->command = cpu_to_le16(DBDMA_STOP);
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
}
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
deleted file mode 100644
index 5cd60d6388ec..000000000000
--- a/drivers/ata/pata_scc.c
+++ /dev/null
@@ -1,1110 +0,0 @@
-/*
- * Support for IDE interfaces on Celleb platform
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This code is based on drivers/ata/ata_piix.c:
- * Copyright 2003-2005 Red Hat Inc
- * Copyright 2003-2005 Jeff Garzik
- * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
- * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat Inc
- *
- * and drivers/ata/ahci.c:
- * Copyright 2004-2005 Red Hat, Inc.
- *
- * and drivers/ata/libata-core.c:
- * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
- * Copyright 2003-2004 Jeff Garzik
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <scsi/scsi_host.h>
-#include <linux/libata.h>
-
-#define DRV_NAME "pata_scc"
-#define DRV_VERSION "0.3"
-
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
-
-/* PCI BARs */
-#define SCC_CTRL_BAR 0
-#define SCC_BMID_BAR 1
-
-/* offset of CTRL registers */
-#define SCC_CTL_PIOSHT 0x000
-#define SCC_CTL_PIOCT 0x004
-#define SCC_CTL_MDMACT 0x008
-#define SCC_CTL_MCRCST 0x00C
-#define SCC_CTL_SDMACT 0x010
-#define SCC_CTL_SCRCST 0x014
-#define SCC_CTL_UDENVT 0x018
-#define SCC_CTL_TDVHSEL 0x020
-#define SCC_CTL_MODEREG 0x024
-#define SCC_CTL_ECMODE 0xF00
-#define SCC_CTL_MAEA0 0xF50
-#define SCC_CTL_MAEC0 0xF54
-#define SCC_CTL_CCKCTRL 0xFF0
-
-/* offset of BMID registers */
-#define SCC_DMA_CMD 0x000
-#define SCC_DMA_STATUS 0x004
-#define SCC_DMA_TABLE_OFS 0x008
-#define SCC_DMA_INTMASK 0x010
-#define SCC_DMA_INTST 0x014
-#define SCC_DMA_PTERADD 0x018
-#define SCC_REG_CMD_ADDR 0x020
-#define SCC_REG_DATA 0x000
-#define SCC_REG_ERR 0x004
-#define SCC_REG_FEATURE 0x004
-#define SCC_REG_NSECT 0x008
-#define SCC_REG_LBAL 0x00C
-#define SCC_REG_LBAM 0x010
-#define SCC_REG_LBAH 0x014
-#define SCC_REG_DEVICE 0x018
-#define SCC_REG_STATUS 0x01C
-#define SCC_REG_CMD 0x01C
-#define SCC_REG_ALTSTATUS 0x020
-
-/* register value */
-#define TDVHSEL_MASTER 0x00000001
-#define TDVHSEL_SLAVE 0x00000004
-
-#define MODE_JCUSFEN 0x00000080
-
-#define ECMODE_VALUE 0x01
-
-#define CCKCTRL_ATARESET 0x00040000
-#define CCKCTRL_BUFCNT 0x00020000
-#define CCKCTRL_CRST 0x00010000
-#define CCKCTRL_OCLKEN 0x00000100
-#define CCKCTRL_ATACLKOEN 0x00000002
-#define CCKCTRL_LCLKEN 0x00000001
-
-#define QCHCD_IOS_SS 0x00000001
-
-#define QCHSD_STPDIAG 0x00020000
-
-#define INTMASK_MSK 0xD1000012
-#define INTSTS_SERROR 0x80000000
-#define INTSTS_PRERR 0x40000000
-#define INTSTS_RERR 0x10000000
-#define INTSTS_ICERR 0x01000000
-#define INTSTS_BMSINT 0x00000010
-#define INTSTS_BMHE 0x00000008
-#define INTSTS_IOIRQS 0x00000004
-#define INTSTS_INTRQ 0x00000002
-#define INTSTS_ACTEINT 0x00000001
-
-
-/* PIO transfer mode table */
-/* JCHST */
-static const unsigned long JCHSTtbl[2][7] = {
- {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
- {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
-};
-
-/* JCHHT */
-static const unsigned long JCHHTtbl[2][7] = {
- {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
- {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
-};
-
-/* JCHCT */
-static const unsigned long JCHCTtbl[2][7] = {
- {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
- {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
-};
-
-/* DMA transfer mode table */
-/* JCHDCTM/JCHDCTS */
-static const unsigned long JCHDCTxtbl[2][7] = {
- {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
- {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
-};
-
-/* JCSTWTM/JCSTWTS */
-static const unsigned long JCSTWTxtbl[2][7] = {
- {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
- {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
-};
-
-/* JCTSS */
-static const unsigned long JCTSStbl[2][7] = {
- {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
- {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
-};
-
-/* JCENVT */
-static const unsigned long JCENVTtbl[2][7] = {
- {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
- {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
-};
-
-/* JCACTSELS/JCACTSELM */
-static const unsigned long JCACTSELtbl[2][7] = {
- {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
-};
-
-static const struct pci_device_id scc_pci_tbl[] = {
- { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0},
- { } /* terminate list */
-};
-
-/**
- * scc_set_piomode - Initialize host controller PATA PIO timings
- * @ap: Port whose timings we are configuring
- * @adev: um
- *
- * Set PIO mode for device.
- *
- * LOCKING:
- * None (inherited from caller).
- */
-
-static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
-{
- unsigned int pio = adev->pio_mode - XFER_PIO_0;
- void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
- void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
- void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT;
- void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT;
- unsigned long reg;
- int offset;
-
- reg = in_be32(cckctrl_port);
- if (reg & CCKCTRL_ATACLKOEN)
- offset = 1; /* 133MHz */
- else
- offset = 0; /* 100MHz */
-
- reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
- out_be32(piosht_port, reg);
- reg = JCHCTtbl[offset][pio];
- out_be32(pioct_port, reg);
-}
-
-/**
- * scc_set_dmamode - Initialize host controller PATA DMA timings
- * @ap: Port whose timings we are configuring
- * @adev: um
- *
- * Set UDMA mode for device.
- *
- * LOCKING:
- * None (inherited from caller).
- */
-
-static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
-{
- unsigned int udma = adev->dma_mode;
- unsigned int is_slave = (adev->devno != 0);
- u8 speed = udma;
- void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
- void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
- void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT;
- void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST;
- void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT;
- void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST;
- void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT;
- void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL;
- int offset, idx;
-
- if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN)
- offset = 1; /* 133MHz */
- else
- offset = 0; /* 100MHz */
-
- if (speed >= XFER_UDMA_0)
- idx = speed - XFER_UDMA_0;
- else
- return;
-
- if (is_slave) {
- out_be32(sdmact_port, JCHDCTxtbl[offset][idx]);
- out_be32(scrcst_port, JCSTWTxtbl[offset][idx]);
- out_be32(tdvhsel_port,
- (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2));
- } else {
- out_be32(mdmact_port, JCHDCTxtbl[offset][idx]);
- out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]);
- out_be32(tdvhsel_port,
- (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]);
- }
- out_be32(udenvt_port,
- JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
-}
-
-unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
-{
- /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */
- if (adev->class == ATA_DEV_ATAPI &&
- (mask & (0xE0 << ATA_SHIFT_UDMA))) {
- printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
- mask &= ~(0xE0 << ATA_SHIFT_UDMA);
- }
- return mask;
-}
-
-/**
- * scc_tf_load - send taskfile registers to host controller
- * @ap: Port to which output is sent
- * @tf: ATA taskfile register set
- *
- * Note: Original code is ata_sff_tf_load().
- */
-
-static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
-
- if (tf->ctl != ap->last_ctl) {
- out_be32(ioaddr->ctl_addr, tf->ctl);
- ap->last_ctl = tf->ctl;
- ata_wait_idle(ap);
- }
-
- if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
- out_be32(ioaddr->feature_addr, tf->hob_feature);
- out_be32(ioaddr->nsect_addr, tf->hob_nsect);
- out_be32(ioaddr->lbal_addr, tf->hob_lbal);
- out_be32(ioaddr->lbam_addr, tf->hob_lbam);
- out_be32(ioaddr->lbah_addr, tf->hob_lbah);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
- }
-
- if (is_addr) {
- out_be32(ioaddr->feature_addr, tf->feature);
- out_be32(ioaddr->nsect_addr, tf->nsect);
- out_be32(ioaddr->lbal_addr, tf->lbal);
- out_be32(ioaddr->lbam_addr, tf->lbam);
- out_be32(ioaddr->lbah_addr, tf->lbah);
- VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
- }
-
- if (tf->flags & ATA_TFLAG_DEVICE) {
- out_be32(ioaddr->device_addr, tf->device);
- VPRINTK("device 0x%X\n", tf->device);
- }
-
- ata_wait_idle(ap);
-}
-
-/**
- * scc_check_status - Read device status reg & clear interrupt
- * @ap: port where the device is
- *
- * Note: Original code is ata_check_status().
- */
-
-static u8 scc_check_status (struct ata_port *ap)
-{
- return in_be32(ap->ioaddr.status_addr);
-}
-
-/**
- * scc_tf_read - input device's ATA taskfile shadow registers
- * @ap: Port from which input is read
- * @tf: ATA taskfile register set for storing input
- *
- * Note: Original code is ata_sff_tf_read().
- */
-
-static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- tf->command = scc_check_status(ap);
- tf->feature = in_be32(ioaddr->error_addr);
- tf->nsect = in_be32(ioaddr->nsect_addr);
- tf->lbal = in_be32(ioaddr->lbal_addr);
- tf->lbam = in_be32(ioaddr->lbam_addr);
- tf->lbah = in_be32(ioaddr->lbah_addr);
- tf->device = in_be32(ioaddr->device_addr);
-
- if (tf->flags & ATA_TFLAG_LBA48) {
- out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB);
- tf->hob_feature = in_be32(ioaddr->error_addr);
- tf->hob_nsect = in_be32(ioaddr->nsect_addr);
- tf->hob_lbal = in_be32(ioaddr->lbal_addr);
- tf->hob_lbam = in_be32(ioaddr->lbam_addr);
- tf->hob_lbah = in_be32(ioaddr->lbah_addr);
- out_be32(ioaddr->ctl_addr, tf->ctl);
- ap->last_ctl = tf->ctl;
- }
-}
-
-/**
- * scc_exec_command - issue ATA command to host controller
- * @ap: port to which command is being issued
- * @tf: ATA taskfile register set
- *
- * Note: Original code is ata_sff_exec_command().
- */
-
-static void scc_exec_command (struct ata_port *ap,
- const struct ata_taskfile *tf)
-{
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
- out_be32(ap->ioaddr.command_addr, tf->command);
- ata_sff_pause(ap);
-}
-
-/**
- * scc_check_altstatus - Read device alternate status reg
- * @ap: port where the device is
- */
-
-static u8 scc_check_altstatus (struct ata_port *ap)
-{
- return in_be32(ap->ioaddr.altstatus_addr);
-}
-
-/**
- * scc_dev_select - Select device 0/1 on ATA bus
- * @ap: ATA channel to manipulate
- * @device: ATA device (numbered from zero) to select
- *
- * Note: Original code is ata_sff_dev_select().
- */
-
-static void scc_dev_select (struct ata_port *ap, unsigned int device)
-{
- u8 tmp;
-
- if (device == 0)
- tmp = ATA_DEVICE_OBS;
- else
- tmp = ATA_DEVICE_OBS | ATA_DEV1;
-
- out_be32(ap->ioaddr.device_addr, tmp);
- ata_sff_pause(ap);
-}
-
-/**
- * scc_set_devctl - Write device control reg
- * @ap: port where the device is
- * @ctl: value to write
- */
-
-static void scc_set_devctl(struct ata_port *ap, u8 ctl)
-{
- out_be32(ap->ioaddr.ctl_addr, ctl);
-}
-
-/**
- * scc_bmdma_setup - Set up PCI IDE BMDMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * Note: Original code is ata_bmdma_setup().
- */
-
-static void scc_bmdma_setup (struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
- u8 dmactl;
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- /* load PRD table addr */
- out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma);
-
- /* specify data direction, triple-check start bit is clear */
- dmactl = in_be32(mmio + SCC_DMA_CMD);
- dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
- if (!rw)
- dmactl |= ATA_DMA_WR;
- out_be32(mmio + SCC_DMA_CMD, dmactl);
-
- /* issue r/w command */
- ap->ops->sff_exec_command(ap, &qc->tf);
-}
-
-/**
- * scc_bmdma_start - Start a PCI IDE BMDMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * Note: Original code is ata_bmdma_start().
- */
-
-static void scc_bmdma_start (struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- u8 dmactl;
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- /* start host DMA transaction */
- dmactl = in_be32(mmio + SCC_DMA_CMD);
- out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START);
-}
-
-/**
- * scc_devchk - PATA device presence detection
- * @ap: ATA channel to examine
- * @device: Device to examine (starting at zero)
- *
- * Note: Original code is ata_devchk().
- */
-
-static unsigned int scc_devchk (struct ata_port *ap,
- unsigned int device)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- u8 nsect, lbal;
-
- ap->ops->sff_dev_select(ap, device);
-
- out_be32(ioaddr->nsect_addr, 0x55);
- out_be32(ioaddr->lbal_addr, 0xaa);
-
- out_be32(ioaddr->nsect_addr, 0xaa);
- out_be32(ioaddr->lbal_addr, 0x55);
-
- out_be32(ioaddr->nsect_addr, 0x55);
- out_be32(ioaddr->lbal_addr, 0xaa);
-
- nsect = in_be32(ioaddr->nsect_addr);
- lbal = in_be32(ioaddr->lbal_addr);
-
- if ((nsect == 0x55) && (lbal == 0xaa))
- return 1; /* we found a device */
-
- return 0; /* nothing found */
-}
-
-/**
- * scc_wait_after_reset - wait for devices to become ready after reset
- *
- * Note: Original code is ata_sff_wait_after_reset
- */
-
-static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
- unsigned long deadline)
-{
- struct ata_port *ap = link->ap;
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int dev0 = devmask & (1 << 0);
- unsigned int dev1 = devmask & (1 << 1);
- int rc, ret = 0;
-
- /* Spec mandates ">= 2ms" before checking status. We wait
- * 150ms, because that was the magic delay used for ATAPI
- * devices in Hale Landis's ATADRVR, for the period of time
- * between when the ATA command register is written, and then
- * status is checked. Because waiting for "a while" before
- * checking status is fine, post SRST, we perform this magic
- * delay here as well.
- *
- * Old drivers/ide uses the 2mS rule and then waits for ready.
- */
- ata_msleep(ap, 150);
-
- /* always check readiness of the master device */
- rc = ata_sff_wait_ready(link, deadline);
- /* -ENODEV means the odd clown forgot the D7 pulldown resistor
- * and TF status is 0xff, bail out on it too.
- */
- if (rc)
- return rc;
-
- /* if device 1 was found in ata_devchk, wait for register
- * access briefly, then wait for BSY to clear.
- */
- if (dev1) {
- int i;
-
- ap->ops->sff_dev_select(ap, 1);
-
- /* Wait for register access. Some ATAPI devices fail
- * to set nsect/lbal after reset, so don't waste too
- * much time on it. We're gonna wait for !BSY anyway.
- */
- for (i = 0; i < 2; i++) {
- u8 nsect, lbal;
-
- nsect = in_be32(ioaddr->nsect_addr);
- lbal = in_be32(ioaddr->lbal_addr);
- if ((nsect == 1) && (lbal == 1))
- break;
- ata_msleep(ap, 50); /* give drive a breather */
- }
-
- rc = ata_sff_wait_ready(link, deadline);
- if (rc) {
- if (rc != -ENODEV)
- return rc;
- ret = rc;
- }
- }
-
- /* is all this really necessary? */
- ap->ops->sff_dev_select(ap, 0);
- if (dev1)
- ap->ops->sff_dev_select(ap, 1);
- if (dev0)
- ap->ops->sff_dev_select(ap, 0);
-
- return ret;
-}
-
-/**
- * scc_bus_softreset - PATA device software reset
- *
- * Note: Original code is ata_bus_softreset().
- */
-
-static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
- unsigned long deadline)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
-
- /* software reset. causes dev0 to be selected */
- out_be32(ioaddr->ctl_addr, ap->ctl);
- udelay(20);
- out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST);
- udelay(20);
- out_be32(ioaddr->ctl_addr, ap->ctl);
-
- return scc_wait_after_reset(&ap->link, devmask, deadline);
-}
-
-/**
- * scc_softreset - reset host port via ATA SRST
- * @ap: port to reset
- * @classes: resulting classes of attached devices
- * @deadline: deadline jiffies for the operation
- *
- * Note: Original code is ata_sff_softreset().
- */
-
-static int scc_softreset(struct ata_link *link, unsigned int *classes,
- unsigned long deadline)
-{
- struct ata_port *ap = link->ap;
- unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- unsigned int devmask = 0;
- int rc;
- u8 err;
-
- DPRINTK("ENTER\n");
-
- /* determine if device 0/1 are present */
- if (scc_devchk(ap, 0))
- devmask |= (1 << 0);
- if (slave_possible && scc_devchk(ap, 1))
- devmask |= (1 << 1);
-
- /* select device 0 again */
- ap->ops->sff_dev_select(ap, 0);
-
- /* issue bus reset */
- DPRINTK("about to softreset, devmask=%x\n", devmask);
- rc = scc_bus_softreset(ap, devmask, deadline);
- if (rc) {
- ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
- return -EIO;
- }
-
- /* determine by signature whether we have ATA or ATAPI devices */
- classes[0] = ata_sff_dev_classify(&ap->link.device[0],
- devmask & (1 << 0), &err);
- if (slave_possible && err != 0x81)
- classes[1] = ata_sff_dev_classify(&ap->link.device[1],
- devmask & (1 << 1), &err);
-
- DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
- return 0;
-}
-
-/**
- * scc_bmdma_stop - Stop PCI IDE BMDMA transfer
- * @qc: Command we are ending DMA for
- */
-
-static void scc_bmdma_stop (struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
- void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR];
- u32 reg;
-
- while (1) {
- reg = in_be32(bmid_base + SCC_DMA_INTST);
-
- if (reg & INTSTS_SERROR) {
- printk(KERN_WARNING "%s: SERROR\n", DRV_NAME);
- out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT);
- out_be32(bmid_base + SCC_DMA_CMD,
- in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
- continue;
- }
-
- if (reg & INTSTS_PRERR) {
- u32 maea0, maec0;
- maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0);
- maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0);
- printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0);
- out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT);
- out_be32(bmid_base + SCC_DMA_CMD,
- in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
- continue;
- }
-
- if (reg & INTSTS_RERR) {
- printk(KERN_WARNING "%s: Response Error\n", DRV_NAME);
- out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT);
- out_be32(bmid_base + SCC_DMA_CMD,
- in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
- continue;
- }
-
- if (reg & INTSTS_ICERR) {
- out_be32(bmid_base + SCC_DMA_CMD,
- in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
- printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME);
- out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT);
- continue;
- }
-
- if (reg & INTSTS_BMSINT) {
- unsigned int classes;
- unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
- printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
- out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
- /* TBD: SW reset */
- scc_softreset(&ap->link, &classes, deadline);
- continue;
- }
-
- if (reg & INTSTS_BMHE) {
- out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE);
- continue;
- }
-
- if (reg & INTSTS_ACTEINT) {
- out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT);
- continue;
- }
-
- if (reg & INTSTS_IOIRQS) {
- out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS);
- continue;
- }
- break;
- }
-
- /* clear start/stop bit */
- out_be32(bmid_base + SCC_DMA_CMD,
- in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
-
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_dma_pause(ap); /* dummy read */
-}
-
-/**
- * scc_bmdma_status - Read PCI IDE BMDMA status
- * @ap: Port associated with this ATA transaction.
- */
-
-static u8 scc_bmdma_status (struct ata_port *ap)
-{
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
- u8 host_stat = in_be32(mmio + SCC_DMA_STATUS);
- u32 int_status = in_be32(mmio + SCC_DMA_INTST);
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
- static int retry = 0;
-
- /* return if IOS_SS is cleared */
- if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START))
- return host_stat;
-
- /* errata A252,A308 workaround: Step4 */
- if ((scc_check_altstatus(ap) & ATA_ERR)
- && (int_status & INTSTS_INTRQ))
- return (host_stat | ATA_DMA_INTR);
-
- /* errata A308 workaround Step5 */
- if (int_status & INTSTS_IOIRQS) {
- host_stat |= ATA_DMA_INTR;
-
- /* We don't check ATAPI DMA because it is limited to UDMA4 */
- if ((qc->tf.protocol == ATA_PROT_DMA &&
- qc->dev->xfer_mode > XFER_UDMA_4)) {
- if (!(int_status & INTSTS_ACTEINT)) {
- printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n",
- ap->print_id);
- host_stat |= ATA_DMA_ERR;
- if (retry++)
- ap->udma_mask &= ~(1 << qc->dev->xfer_mode);
- } else
- retry = 0;
- }
- }
-
- return host_stat;
-}
-
-/**
- * scc_data_xfer - Transfer data by PIO
- * @dev: device for this I/O
- * @buf: data buffer
- * @buflen: buffer length
- * @rw: read/write
- *
- * Note: Original code is ata_sff_data_xfer().
- */
-
-static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
- unsigned int buflen, int rw)
-{
- struct ata_port *ap = dev->link->ap;
- unsigned int words = buflen >> 1;
- unsigned int i;
- __le16 *buf16 = (__le16 *) buf;
- void __iomem *mmio = ap->ioaddr.data_addr;
-
- /* Transfer multiple of 2 bytes */
- if (rw == READ)
- for (i = 0; i < words; i++)
- buf16[i] = cpu_to_le16(in_be32(mmio));
- else
- for (i = 0; i < words; i++)
- out_be32(mmio, le16_to_cpu(buf16[i]));
-
- /* Transfer trailing 1 byte, if any. */
- if (unlikely(buflen & 0x01)) {
- __le16 align_buf[1] = { 0 };
- unsigned char *trailing_buf = buf + buflen - 1;
-
- if (rw == READ) {
- align_buf[0] = cpu_to_le16(in_be32(mmio));
- memcpy(trailing_buf, align_buf, 1);
- } else {
- memcpy(align_buf, trailing_buf, 1);
- out_be32(mmio, le16_to_cpu(align_buf[0]));
- }
- words++;
- }
-
- return words << 1;
-}
-
-/**
- * scc_postreset - standard postreset callback
- * @ap: the target ata_port
- * @classes: classes of attached devices
- *
- * Note: Original code is ata_sff_postreset().
- */
-
-static void scc_postreset(struct ata_link *link, unsigned int *classes)
-{
- struct ata_port *ap = link->ap;
-
- DPRINTK("ENTER\n");
-
- /* is double-select really necessary? */
- if (classes[0] != ATA_DEV_NONE)
- ap->ops->sff_dev_select(ap, 1);
- if (classes[1] != ATA_DEV_NONE)
- ap->ops->sff_dev_select(ap, 0);
-
- /* bail out if no device is present */
- if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
- DPRINTK("EXIT, no device\n");
- return;
- }
-
- /* set up device control */
- out_be32(ap->ioaddr.ctl_addr, ap->ctl);
-
- DPRINTK("EXIT\n");
-}
-
-/**
- * scc_irq_clear - Clear PCI IDE BMDMA interrupt.
- * @ap: Port associated with this ATA transaction.
- *
- * Note: Original code is ata_bmdma_irq_clear().
- */
-
-static void scc_irq_clear (struct ata_port *ap)
-{
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- if (!mmio)
- return;
-
- out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS));
-}
-
-/**
- * scc_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Allocate space for PRD table using ata_bmdma_port_start().
- * Set PRD table address for PTERADD. (PRD Transfer End Read)
- */
-
-static int scc_port_start (struct ata_port *ap)
-{
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
- int rc;
-
- rc = ata_bmdma_port_start(ap);
- if (rc)
- return rc;
-
- out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma);
- return 0;
-}
-
-/**
- * scc_port_stop - Undo scc_port_start()
- * @ap: Port to shut down
- *
- * Reset PTERADD.
- */
-
-static void scc_port_stop (struct ata_port *ap)
-{
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- out_be32(mmio + SCC_DMA_PTERADD, 0);
-}
-
-static struct scsi_host_template scc_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations scc_pata_ops = {
- .inherits = &ata_bmdma_port_ops,
-
- .set_piomode = scc_set_piomode,
- .set_dmamode = scc_set_dmamode,
- .mode_filter = scc_mode_filter,
-
- .sff_tf_load = scc_tf_load,
- .sff_tf_read = scc_tf_read,
- .sff_exec_command = scc_exec_command,
- .sff_check_status = scc_check_status,
- .sff_check_altstatus = scc_check_altstatus,
- .sff_dev_select = scc_dev_select,
- .sff_set_devctl = scc_set_devctl,
-
- .bmdma_setup = scc_bmdma_setup,
- .bmdma_start = scc_bmdma_start,
- .bmdma_stop = scc_bmdma_stop,
- .bmdma_status = scc_bmdma_status,
- .sff_data_xfer = scc_data_xfer,
-
- .cable_detect = ata_cable_80wire,
- .softreset = scc_softreset,
- .postreset = scc_postreset,
-
- .sff_irq_clear = scc_irq_clear,
-
- .port_start = scc_port_start,
- .port_stop = scc_port_stop,
-};
-
-static struct ata_port_info scc_port_info[] = {
- {
- .flags = ATA_FLAG_SLAVE_POSS,
- .pio_mask = ATA_PIO4,
- /* No MWDMA */
- .udma_mask = ATA_UDMA6,
- .port_ops = &scc_pata_ops,
- },
-};
-
-/**
- * scc_reset_controller - initialize SCC PATA controller.
- */
-
-static int scc_reset_controller(struct ata_host *host)
-{
- void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR];
- void __iomem *bmid_base = host->iomap[SCC_BMID_BAR];
- void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
- void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG;
- void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE;
- void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK;
- void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS;
- u32 reg = 0;
-
- out_be32(cckctrl_port, reg);
- reg |= CCKCTRL_ATACLKOEN;
- out_be32(cckctrl_port, reg);
- reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
- out_be32(cckctrl_port, reg);
- reg |= CCKCTRL_CRST;
- out_be32(cckctrl_port, reg);
-
- for (;;) {
- reg = in_be32(cckctrl_port);
- if (reg & CCKCTRL_CRST)
- break;
- udelay(5000);
- }
-
- reg |= CCKCTRL_ATARESET;
- out_be32(cckctrl_port, reg);
- out_be32(ecmode_port, ECMODE_VALUE);
- out_be32(mode_port, MODE_JCUSFEN);
- out_be32(intmask_port, INTMASK_MSK);
-
- if (in_be32(dmastatus_port) & QCHSD_STPDIAG) {
- printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * scc_setup_ports - initialize ioaddr with SCC PATA port offsets.
- * @ioaddr: IO address structure to be initialized
- * @base: base address of BMID region
- */
-
-static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base)
-{
- ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR;
- ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
- ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
- ioaddr->bmdma_addr = base;
- ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA;
- ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR;
- ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE;
- ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT;
- ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL;
- ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM;
- ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH;
- ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE;
- ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS;
- ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD;
-}
-
-static int scc_host_init(struct ata_host *host)
-{
- struct pci_dev *pdev = to_pci_dev(host->dev);
- int rc;
-
- rc = scc_reset_controller(host);
- if (rc)
- return rc;
-
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
-
- scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]);
-
- pci_set_master(pdev);
-
- return 0;
-}
-
-/**
- * scc_init_one - Register SCC PATA device with kernel services
- * @pdev: PCI device to register
- * @ent: Entry in scc_pci_tbl matching with @pdev
- *
- * LOCKING:
- * Inherited from PCI layer (may sleep).
- *
- * RETURNS:
- * Zero on success, or -ERRNO value.
- */
-
-static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- unsigned int board_idx = (unsigned int) ent->driver_data;
- const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
- struct ata_host *host;
- int rc;
-
- ata_print_version_once(&pdev->dev, DRV_VERSION);
-
- host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
- if (!host)
- return -ENOMEM;
-
- rc = pcim_enable_device(pdev);
- if (rc)
- return rc;
-
- rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
- if (rc == -EBUSY)
- pcim_pin_device(pdev);
- if (rc)
- return rc;
- host->iomap = pcim_iomap_table(pdev);
-
- ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl");
- ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid");
-
- rc = scc_host_init(host);
- if (rc)
- return rc;
-
- return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
- IRQF_SHARED, &scc_sht);
-}
-
-static struct pci_driver scc_pci_driver = {
- .name = DRV_NAME,
- .id_table = scc_pci_tbl,
- .probe = scc_init_one,
- .remove = ata_pci_remove_one,
-#ifdef CONFIG_PM_SLEEP
- .suspend = ata_pci_device_suspend,
- .resume = ata_pci_device_resume,
-#endif
-};
-
-module_pci_driver(scc_pci_driver);
-
-MODULE_AUTHOR("Toshiba corp");
-MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index ff8307b30ff0..ff614be55d0f 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -47,11 +47,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi.h>
#include <linux/libata.h>
-
-#ifdef CONFIG_PPC_OF
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#endif /* CONFIG_PPC_OF */
+#include <linux/of.h>
#define DRV_NAME "sata_svw"
#define DRV_VERSION "2.3"
@@ -320,7 +316,6 @@ static u8 k2_stat_check_status(struct ata_port *ap)
return readl(ap->ioaddr.status_addr);
}
-#ifdef CONFIG_PPC_OF
static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost)
{
struct ata_port *ap;
@@ -350,14 +345,10 @@ static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost)
}
return 0;
}
-#endif /* CONFIG_PPC_OF */
-
static struct scsi_host_template k2_sata_sht = {
ATA_BMDMA_SHT(DRV_NAME),
-#ifdef CONFIG_PPC_OF
.show_info = k2_sata_show_info,
-#endif
};
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index b7e1cc0a97c8..ddc4ceb85fc5 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -73,9 +73,6 @@
#undef GENERAL_DEBUG
#undef EXTRA_DEBUG
-#undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
- you're going to use only raw ATM */
-
/* Do not touch these */
#ifdef TX_DEBUG
@@ -138,11 +135,6 @@ static void process_tsq(ns_dev * card);
static void drain_scq(ns_dev * card, scq_info * scq, int pos);
static void process_rsq(ns_dev * card);
static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
-#ifdef NS_USE_DESTRUCTORS
-static void ns_sb_destructor(struct sk_buff *sb);
-static void ns_lb_destructor(struct sk_buff *lb);
-static void ns_hb_destructor(struct sk_buff *hb);
-#endif /* NS_USE_DESTRUCTORS */
static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
@@ -2169,9 +2161,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
} else {
skb_put(skb, len);
dequeue_sm_buf(card, skb);
-#ifdef NS_USE_DESTRUCTORS
- skb->destructor = ns_sb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
ATM_SKB(skb)->vcc = vcc;
__net_timestamp(skb);
vcc->push(vcc, skb);
@@ -2190,9 +2179,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
} else {
skb_put(sb, len);
dequeue_sm_buf(card, sb);
-#ifdef NS_USE_DESTRUCTORS
- sb->destructor = ns_sb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
ATM_SKB(sb)->vcc = vcc;
__net_timestamp(sb);
vcc->push(vcc, sb);
@@ -2208,9 +2194,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
atomic_inc(&vcc->stats->rx_drop);
} else {
dequeue_lg_buf(card, skb);
-#ifdef NS_USE_DESTRUCTORS
- skb->destructor = ns_lb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
skb_push(skb, NS_SMBUFSIZE);
skb_copy_from_linear_data(sb, skb->data,
NS_SMBUFSIZE);
@@ -2322,9 +2305,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
card->index);
#endif /* EXTRA_DEBUG */
ATM_SKB(hb)->vcc = vcc;
-#ifdef NS_USE_DESTRUCTORS
- hb->destructor = ns_hb_destructor;
-#endif /* NS_USE_DESTRUCTORS */
__net_timestamp(hb);
vcc->push(vcc, hb);
atomic_inc(&vcc->stats->rx);
@@ -2337,68 +2317,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
}
-#ifdef NS_USE_DESTRUCTORS
-
-static void ns_sb_destructor(struct sk_buff *sb)
-{
- ns_dev *card;
- u32 stat;
-
- card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
- stat = readl(card->membase + STAT);
- card->sbfqc = ns_stat_sfbqc_get(stat);
- card->lbfqc = ns_stat_lfbqc_get(stat);
-
- do {
- sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
- if (sb == NULL)
- break;
- NS_PRV_BUFTYPE(sb) = BUF_SM;
- skb_queue_tail(&card->sbpool.queue, sb);
- skb_reserve(sb, NS_AAL0_HEADER);
- push_rxbufs(card, sb);
- } while (card->sbfqc < card->sbnr.min);
-}
-
-static void ns_lb_destructor(struct sk_buff *lb)
-{
- ns_dev *card;
- u32 stat;
-
- card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
- stat = readl(card->membase + STAT);
- card->sbfqc = ns_stat_sfbqc_get(stat);
- card->lbfqc = ns_stat_lfbqc_get(stat);
-
- do {
- lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
- if (lb == NULL)
- break;
- NS_PRV_BUFTYPE(lb) = BUF_LG;
- skb_queue_tail(&card->lbpool.queue, lb);
- skb_reserve(lb, NS_SMBUFSIZE);
- push_rxbufs(card, lb);
- } while (card->lbfqc < card->lbnr.min);
-}
-
-static void ns_hb_destructor(struct sk_buff *hb)
-{
- ns_dev *card;
-
- card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
-
- while (card->hbpool.count < card->hbnr.init) {
- hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
- if (hb == NULL)
- break;
- NS_PRV_BUFTYPE(hb) = BUF_NONE;
- skb_queue_tail(&card->hbpool.queue, hb);
- card->hbpool.count++;
- }
-}
-
-#endif /* NS_USE_DESTRUCTORS */
-
static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
{
if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
@@ -2427,9 +2345,6 @@ static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
{
skb_unlink(sb, &card->sbpool.queue);
-#ifdef NS_USE_DESTRUCTORS
- if (card->sbfqc < card->sbnr.min)
-#else
if (card->sbfqc < card->sbnr.init) {
struct sk_buff *new_sb;
if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
@@ -2440,7 +2355,6 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
}
}
if (card->sbfqc < card->sbnr.init)
-#endif /* NS_USE_DESTRUCTORS */
{
struct sk_buff *new_sb;
if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
@@ -2455,9 +2369,6 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
{
skb_unlink(lb, &card->lbpool.queue);
-#ifdef NS_USE_DESTRUCTORS
- if (card->lbfqc < card->lbnr.min)
-#else
if (card->lbfqc < card->lbnr.init) {
struct sk_buff *new_lb;
if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
@@ -2468,7 +2379,6 @@ static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
}
}
if (card->lbfqc < card->lbnr.init)
-#endif /* NS_USE_DESTRUCTORS */
{
struct sk_buff *new_lb;
if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index cadf165651d8..21d13038534e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -2144,3 +2145,53 @@ define_dev_printk_level(dev_notice, KERN_NOTICE);
define_dev_printk_level(_dev_info, KERN_INFO);
#endif
+
+static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
+{
+ return fwnode && !IS_ERR(fwnode->secondary);
+}
+
+/**
+ * set_primary_fwnode - Change the primary firmware node of a given device.
+ * @dev: Device to handle.
+ * @fwnode: New primary firmware node of the device.
+ *
+ * Set the device's firmware node pointer to @fwnode, but if a secondary
+ * firmware node of the device is present, preserve it.
+ */
+void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+{
+ if (fwnode) {
+ struct fwnode_handle *fn = dev->fwnode;
+
+ if (fwnode_is_primary(fn))
+ fn = fn->secondary;
+
+ fwnode->secondary = fn;
+ dev->fwnode = fwnode;
+ } else {
+ dev->fwnode = fwnode_is_primary(dev->fwnode) ?
+ dev->fwnode->secondary : NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(set_primary_fwnode);
+
+/**
+ * set_secondary_fwnode - Change the secondary firmware node of a given device.
+ * @dev: Device to handle.
+ * @fwnode: New secondary firmware node of the device.
+ *
+ * If a primary firmware node of the device is present, set its secondary
+ * pointer to @fwnode. Otherwise, set the device's firmware node pointer to
+ * @fwnode.
+ */
+void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+{
+ if (fwnode)
+ fwnode->secondary = ERR_PTR(-ENODEV);
+
+ if (fwnode_is_primary(dev->fwnode))
+ dev->fwnode->secondary = fwnode;
+ else
+ dev->fwnode = fwnode;
+}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 49a4a12fafef..e843fdbe4925 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -298,6 +298,12 @@ static int really_probe(struct device *dev, struct device_driver *drv)
goto probe_failed;
}
+ if (dev->pm_domain && dev->pm_domain->activate) {
+ ret = dev->pm_domain->activate(dev);
+ if (ret)
+ goto probe_failed;
+ }
+
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)
@@ -308,6 +314,9 @@ static int really_probe(struct device *dev, struct device_driver *drv)
goto probe_failed;
}
+ if (dev->pm_domain && dev->pm_domain->sync)
+ dev->pm_domain->sync(dev);
+
driver_bound(dev);
ret = 1;
pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
@@ -319,6 +328,8 @@ probe_failed:
driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
+ if (dev->pm_domain && dev->pm_domain->dismiss)
+ dev->pm_domain->dismiss(dev);
switch (ret) {
case -EPROBE_DEFER:
@@ -529,6 +540,9 @@ static void __device_release_driver(struct device *dev)
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
+ if (dev->pm_domain && dev->pm_domain->dismiss)
+ dev->pm_domain->dismiss(dev);
+
klist_remove(&dev->p->knode_driver);
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 25798db14553..68f03141e432 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -157,10 +157,10 @@ static int dev_mkdir(const char *name, umode_t mode)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+ err = vfs_mkdir(d_inode(path.dentry), dentry, mode);
if (!err)
/* mark as kernel-created inode */
- dentry->d_inode->i_private = &thread;
+ d_inode(dentry)->i_private = &thread;
done_path_create(&path, dentry);
return err;
}
@@ -207,7 +207,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mknod(path.dentry->d_inode, dentry, mode, dev->devt);
+ err = vfs_mknod(d_inode(path.dentry), dentry, mode, dev->devt);
if (!err) {
struct iattr newattrs;
@@ -215,12 +215,12 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_uid = uid;
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
notify_change(dentry, &newattrs, NULL);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
/* mark as kernel-created inode */
- dentry->d_inode->i_private = &thread;
+ d_inode(dentry)->i_private = &thread;
}
done_path_create(&path, dentry);
return err;
@@ -235,16 +235,16 @@ static int dev_rmdir(const char *name)
dentry = kern_path_locked(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (dentry->d_inode) {
- if (dentry->d_inode->i_private == &thread)
- err = vfs_rmdir(parent.dentry->d_inode, dentry);
+ if (d_really_is_positive(dentry)) {
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(d_inode(parent.dentry), dentry);
else
err = -EPERM;
} else {
err = -ENOENT;
}
dput(dentry);
- mutex_unlock(&parent.dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent.dentry)->i_mutex);
path_put(&parent);
return err;
}
@@ -306,11 +306,11 @@ static int handle_remove(const char *nodename, struct device *dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (dentry->d_inode) {
+ if (d_really_is_positive(dentry)) {
struct kstat stat;
struct path p = {.mnt = parent.mnt, .dentry = dentry};
err = vfs_getattr(&p, &stat);
- if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
+ if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
struct iattr newattrs;
/*
* before unlinking this node, reset permissions
@@ -321,10 +321,10 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_mode = stat.mode & ~0777;
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
notify_change(dentry, &newattrs, NULL);
- mutex_unlock(&dentry->d_inode->i_mutex);
- err = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
+ err = vfs_unlink(d_inode(parent.dentry), dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
}
@@ -332,7 +332,7 @@ static int handle_remove(const char *nodename, struct device *dev)
err = -ENOENT;
}
dput(dentry);
- mutex_unlock(&parent.dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent.dentry)->i_mutex);
path_put(&parent);
if (deleted && strchr(nodename, '/'))
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index af9c911cd6b5..2804aed3f416 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -219,6 +219,7 @@ static bool pages_correctly_reserved(unsigned long start_pfn)
/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
+ * Must already be protected by mem_hotplug_begin().
*/
static int
memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
@@ -228,7 +229,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
struct page *first_page;
int ret;
- start_pfn = phys_index << PFN_SECTION_SHIFT;
+ start_pfn = section_nr_to_pfn(phys_index);
first_page = pfn_to_page(start_pfn);
switch (action) {
@@ -286,6 +287,7 @@ static int memory_subsys_online(struct device *dev)
if (mem->online_type < 0)
mem->online_type = MMOP_ONLINE_KEEP;
+ /* Already under protection of mem_hotplug_begin() */
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
/* clear online_type */
@@ -328,17 +330,19 @@ store_mem_state(struct device *dev,
goto err;
}
+ /*
+ * Memory hotplug needs to hold mem_hotplug_begin() for probe to find
+ * the correct memory block to online before doing device_online(dev),
+ * which will take dev->mutex. Take the lock early to prevent an
+ * inversion, memory_subsys_online() callbacks will be implemented by
+ * assuming it's already protected.
+ */
+ mem_hotplug_begin();
+
switch (online_type) {
case MMOP_ONLINE_KERNEL:
case MMOP_ONLINE_MOVABLE:
case MMOP_ONLINE_KEEP:
- /*
- * mem->online_type is not protected so there can be a
- * race here. However, when racing online, the first
- * will succeed and the second will just return as the
- * block will already be online. The online type
- * could be either one, but that is expected.
- */
mem->online_type = online_type;
ret = device_online(&mem->dev);
break;
@@ -349,6 +353,7 @@ store_mem_state(struct device *dev,
ret = -EINVAL; /* should never happen */
}
+ mem_hotplug_done();
err:
unlock_device_hotplug();
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index e68ab79df28b..ebf034b97278 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -463,7 +463,7 @@ struct platform_device *platform_device_register_full(
goto err_alloc;
pdev->dev.parent = pdevinfo->parent;
- ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion);
+ pdev->dev.fwnode = pdevinfo->fwnode;
if (pdevinfo->dma_mask) {
/*
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 45937f88e77c..2327613d4539 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -68,7 +68,36 @@ static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
return genpd;
}
-struct generic_pm_domain *dev_to_genpd(struct device *dev)
+/*
+ * Get the generic PM domain for a particular struct device.
+ * This validates the struct device pointer, the PM domain pointer,
+ * and checks that the PM domain pointer is a real generic PM domain.
+ * Any failure results in NULL being returned.
+ */
+struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
+{
+ struct generic_pm_domain *genpd = NULL, *gpd;
+
+ if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
+ return NULL;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (&gpd->domain == dev->pm_domain) {
+ genpd = gpd;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return genpd;
+}
+
+/*
+ * This should only be used where we are certain that the pm_domain
+ * attached to the device is a genpd domain.
+ */
+static struct generic_pm_domain *dev_to_genpd(struct device *dev)
{
if (IS_ERR_OR_NULL(dev->pm_domain))
return ERR_PTR(-EINVAL);
@@ -173,8 +202,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd)
genpd->power_on_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
genpd_recalc_cpu_exit_latency(genpd);
- pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
- genpd->name, "on", elapsed_ns);
+ pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
+ genpd->name, "on", elapsed_ns);
return ret;
}
@@ -199,8 +228,8 @@ static int genpd_power_off(struct generic_pm_domain *genpd)
genpd->power_off_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
- pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
- genpd->name, "off", elapsed_ns);
+ pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
+ genpd->name, "off", elapsed_ns);
return ret;
}
@@ -1513,9 +1542,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
dev_dbg(dev, "%s()\n", __func__);
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
- || IS_ERR_OR_NULL(dev->pm_domain)
- || pd_to_genpd(dev->pm_domain) != genpd)
+ if (!genpd || genpd != pm_genpd_lookup_dev(dev))
return -EINVAL;
/* The above validation also means we have existing domain_data. */
@@ -2093,21 +2120,10 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
*/
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
- struct generic_pm_domain *pd = NULL, *gpd;
+ struct generic_pm_domain *pd;
int ret = 0;
- if (!dev->pm_domain)
- return;
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (&gpd->domain == dev->pm_domain) {
- pd = gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
-
+ pd = pm_genpd_lookup_dev(dev);
if (!pd)
return;
@@ -2130,6 +2146,17 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
genpd_queue_power_off_work(pd);
}
+static void genpd_dev_pm_sync(struct device *dev)
+{
+ struct generic_pm_domain *pd;
+
+ pd = dev_to_genpd(dev);
+ if (IS_ERR(pd))
+ return;
+
+ genpd_queue_power_off_work(pd);
+}
+
/**
* genpd_dev_pm_attach - Attach a device to its PM domain using DT.
* @dev: Device to attach.
@@ -2196,6 +2223,7 @@ int genpd_dev_pm_attach(struct device *dev)
}
dev->pm_domain->detach = genpd_dev_pm_detach;
+ dev->pm_domain->sync = genpd_dev_pm_sync;
pm_genpd_poweron(pd);
return 0;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9717d5f20139..3d874eca7104 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -23,7 +23,7 @@
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/resume-trace.h>
+#include <linux/pm-trace.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/async.h>
@@ -1017,6 +1017,9 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
char *info = NULL;
int error = 0;
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
if (async_error)
goto Complete;
@@ -1057,6 +1060,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
Complete:
complete_all(&dev->power.completion);
+ TRACE_SUSPEND(error);
return error;
}
@@ -1078,7 +1082,7 @@ static int device_suspend_noirq(struct device *dev)
{
reinit_completion(&dev->power.completion);
- if (pm_async_enabled && dev->power.async_suspend) {
+ if (is_async(dev)) {
get_device(dev);
async_schedule(async_suspend_noirq, dev);
return 0;
@@ -1157,6 +1161,9 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
char *info = NULL;
int error = 0;
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
__pm_runtime_disable(dev, false);
if (async_error)
@@ -1198,6 +1205,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
async_error = error;
Complete:
+ TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
return error;
}
@@ -1219,7 +1227,7 @@ static int device_suspend_late(struct device *dev)
{
reinit_completion(&dev->power.completion);
- if (pm_async_enabled && dev->power.async_suspend) {
+ if (is_async(dev)) {
get_device(dev);
async_schedule(async_suspend_late, dev);
return 0;
@@ -1338,6 +1346,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
int error = 0;
DECLARE_DPM_WATCHDOG_ON_STACK(wd);
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
dpm_wait_for_children(dev, async);
if (async_error)
@@ -1444,6 +1455,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (error)
async_error = error;
+ TRACE_SUSPEND(error);
return error;
}
@@ -1465,7 +1477,7 @@ static int device_suspend(struct device *dev)
{
reinit_completion(&dev->power.completion);
- if (pm_async_enabled && dev->power.async_suspend) {
+ if (is_async(dev)) {
get_device(dev);
async_schedule(async_suspend, dev);
return 0;
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index d94a1f5121cf..a311cfa4c5bd 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -7,7 +7,7 @@
* devices may be working.
*/
-#include <linux/resume-trace.h>
+#include <linux/pm-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
@@ -154,7 +154,7 @@ EXPORT_SYMBOL(set_trace_device);
* it's not any guarantee, but it's a high _likelihood_ that
* the match is valid).
*/
-void generate_resume_trace(const void *tracedata, unsigned int user)
+void generate_pm_trace(const void *tracedata, unsigned int user)
{
unsigned short lineno = *(unsigned short *)tracedata;
const char *file = *(const char **)(tracedata + 2);
@@ -164,7 +164,7 @@ void generate_resume_trace(const void *tracedata, unsigned int user)
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
}
-EXPORT_SYMBOL(generate_resume_trace);
+EXPORT_SYMBOL(generate_pm_trace);
extern char __tracedata_start, __tracedata_end;
static int show_file_hash(unsigned int value)
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index aab7158d2afe..77262009f89d 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -843,7 +843,6 @@ static int print_wakeup_source_stats(struct seq_file *m,
unsigned long active_count;
ktime_t active_time;
ktime_t prevent_sleep_time;
- int ret;
spin_lock_irqsave(&ws->lock, flags);
@@ -866,17 +865,16 @@ static int print_wakeup_source_stats(struct seq_file *m,
active_time = ktime_set(0, 0);
}
- ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t"
- "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
- ws->name, active_count, ws->event_count,
- ws->wakeup_count, ws->expire_count,
- ktime_to_ms(active_time), ktime_to_ms(total_time),
- ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
- ktime_to_ms(prevent_sleep_time));
+ seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+ ws->name, active_count, ws->event_count,
+ ws->wakeup_count, ws->expire_count,
+ ktime_to_ms(active_time), ktime_to_ms(total_time),
+ ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
+ ktime_to_ms(prevent_sleep_time));
spin_unlock_irqrestore(&ws->lock, flags);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 423df593f262..1d0b116cae95 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -10,10 +10,102 @@
* published by the Free Software Foundation.
*/
-#include <linux/property.h>
-#include <linux/export.h>
#include <linux/acpi.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/property.h>
+
+/**
+ * device_add_property_set - Add a collection of properties to a device object.
+ * @dev: Device to add properties to.
+ * @pset: Collection of properties to add.
+ *
+ * Associate a collection of device properties represented by @pset with @dev
+ * as its secondary firmware node.
+ */
+void device_add_property_set(struct device *dev, struct property_set *pset)
+{
+ if (pset)
+ pset->fwnode.type = FWNODE_PDATA;
+
+ set_secondary_fwnode(dev, &pset->fwnode);
+}
+EXPORT_SYMBOL_GPL(device_add_property_set);
+
+static inline bool is_pset(struct fwnode_handle *fwnode)
+{
+ return fwnode && fwnode->type == FWNODE_PDATA;
+}
+
+static inline struct property_set *to_pset(struct fwnode_handle *fwnode)
+{
+ return is_pset(fwnode) ?
+ container_of(fwnode, struct property_set, fwnode) : NULL;
+}
+
+static struct property_entry *pset_prop_get(struct property_set *pset,
+ const char *name)
+{
+ struct property_entry *prop;
+
+ if (!pset || !pset->properties)
+ return NULL;
+
+ for (prop = pset->properties; prop->name; prop++)
+ if (!strcmp(name, prop->name))
+ return prop;
+
+ return NULL;
+}
+
+static int pset_prop_read_array(struct property_set *pset, const char *name,
+ enum dev_prop_type type, void *val, size_t nval)
+{
+ struct property_entry *prop;
+ unsigned int item_size;
+
+ prop = pset_prop_get(pset, name);
+ if (!prop)
+ return -ENODATA;
+
+ if (prop->type != type)
+ return -EPROTO;
+
+ if (!val)
+ return prop->nval;
+
+ if (prop->nval < nval)
+ return -EOVERFLOW;
+
+ switch (type) {
+ case DEV_PROP_U8:
+ item_size = sizeof(u8);
+ break;
+ case DEV_PROP_U16:
+ item_size = sizeof(u16);
+ break;
+ case DEV_PROP_U32:
+ item_size = sizeof(u32);
+ break;
+ case DEV_PROP_U64:
+ item_size = sizeof(u64);
+ break;
+ case DEV_PROP_STRING:
+ item_size = sizeof(const char *);
+ break;
+ default:
+ return -EINVAL;
+ }
+ memcpy(val, prop->value.raw_data, nval * item_size);
+ return 0;
+}
+
+static inline struct fwnode_handle *dev_fwnode(struct device *dev)
+{
+ return IS_ENABLED(CONFIG_OF) && dev->of_node ?
+ &dev->of_node->fwnode : dev->fwnode;
+}
/**
* device_property_present - check if a property of a device is present
@@ -24,10 +116,7 @@
*/
bool device_property_present(struct device *dev, const char *propname)
{
- if (IS_ENABLED(CONFIG_OF) && dev->of_node)
- return of_property_read_bool(dev->of_node, propname);
-
- return !acpi_dev_prop_get(ACPI_COMPANION(dev), propname, NULL);
+ return fwnode_property_present(dev_fwnode(dev), propname);
}
EXPORT_SYMBOL_GPL(device_property_present);
@@ -43,32 +132,22 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
else if (is_acpi_node(fwnode))
return !acpi_dev_prop_get(acpi_node(fwnode), propname, NULL);
- return false;
+ return !!pset_prop_get(to_pset(fwnode), propname);
}
EXPORT_SYMBOL_GPL(fwnode_property_present);
-#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \
- (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \
- : of_property_count_elems_of_size((node), (propname), sizeof(type))
-
-#define DEV_PROP_READ_ARRAY(_dev_, _propname_, _type_, _proptype_, _val_, _nval_) \
- IS_ENABLED(CONFIG_OF) && _dev_->of_node ? \
- (OF_DEV_PROP_READ_ARRAY(_dev_->of_node, _propname_, _type_, \
- _val_, _nval_)) : \
- acpi_dev_prop_read(ACPI_COMPANION(_dev_), _propname_, \
- _proptype_, _val_, _nval_)
-
/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u8 properties with @propname from the device
* firmware description and stores them to @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
@@ -77,7 +156,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_present);
int device_property_read_u8_array(struct device *dev, const char *propname,
u8 *val, size_t nval)
{
- return DEV_PROP_READ_ARRAY(dev, propname, u8, DEV_PROP_U8, val, nval);
+ return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u8_array);
@@ -85,13 +164,14 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array);
* device_property_read_u16_array - return a u16 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u16 properties with @propname from the device
* firmware description and stores them to @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
@@ -100,7 +180,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array);
int device_property_read_u16_array(struct device *dev, const char *propname,
u16 *val, size_t nval)
{
- return DEV_PROP_READ_ARRAY(dev, propname, u16, DEV_PROP_U16, val, nval);
+ return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u16_array);
@@ -108,13 +188,14 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array);
* device_property_read_u32_array - return a u32 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u32 properties with @propname from the device
* firmware description and stores them to @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
@@ -123,7 +204,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array);
int device_property_read_u32_array(struct device *dev, const char *propname,
u32 *val, size_t nval)
{
- return DEV_PROP_READ_ARRAY(dev, propname, u32, DEV_PROP_U32, val, nval);
+ return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u32_array);
@@ -131,13 +212,14 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array);
* device_property_read_u64_array - return a u64 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u64 properties with @propname from the device
* firmware description and stores them to @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
@@ -146,7 +228,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array);
int device_property_read_u64_array(struct device *dev, const char *propname,
u64 *val, size_t nval)
{
- return DEV_PROP_READ_ARRAY(dev, propname, u64, DEV_PROP_U64, val, nval);
+ return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u64_array);
@@ -154,13 +236,14 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array);
* device_property_read_string_array - return a string array property of device
* @dev: Device to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of string properties with @propname from the device
* firmware description and stores them to @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not an array of strings,
@@ -169,10 +252,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array);
int device_property_read_string_array(struct device *dev, const char *propname,
const char **val, size_t nval)
{
- return IS_ENABLED(CONFIG_OF) && dev->of_node ?
- of_property_read_string_array(dev->of_node, propname, val, nval) :
- acpi_dev_prop_read(ACPI_COMPANION(dev), propname,
- DEV_PROP_STRING, val, nval);
+ return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_string_array);
@@ -193,13 +273,14 @@ EXPORT_SYMBOL_GPL(device_property_read_string_array);
int device_property_read_string(struct device *dev, const char *propname,
const char **val)
{
- return IS_ENABLED(CONFIG_OF) && dev->of_node ?
- of_property_read_string(dev->of_node, propname, val) :
- acpi_dev_prop_read(ACPI_COMPANION(dev), propname,
- DEV_PROP_STRING, val, 1);
+ return fwnode_property_read_string(dev_fwnode(dev), propname, val);
}
EXPORT_SYMBOL_GPL(device_property_read_string);
+#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \
+ (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \
+ : of_property_count_elems_of_size((node), (propname), sizeof(type))
+
#define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \
({ \
int _ret_; \
@@ -210,7 +291,8 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
_ret_ = acpi_dev_prop_read(acpi_node(_fwnode_), _propname_, \
_proptype_, _val_, _nval_); \
else \
- _ret_ = -ENXIO; \
+ _ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \
+ _proptype_, _val_, _nval_); \
_ret_; \
})
@@ -218,13 +300,14 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
* fwnode_property_read_u8_array - return a u8 array property of firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u8 properties with @propname from @fwnode and stores them to
* @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
@@ -243,13 +326,14 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
* fwnode_property_read_u16_array - return a u16 array property of firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u16 properties with @propname from @fwnode and store them to
* @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
@@ -268,13 +352,14 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
* fwnode_property_read_u32_array - return a u32 array property of firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u32 properties with @propname from @fwnode store them to
* @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
@@ -293,13 +378,14 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
* fwnode_property_read_u64_array - return a u64 array property firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u64 properties with @propname from @fwnode and store them to
* @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
@@ -318,13 +404,14 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
* fwnode_property_read_string_array - return string array property of a node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
- * @val: The values are stored here
+ * @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an string list property @propname from the given firmware node and store
* them to @val if found.
*
- * Return: %0 if the property was found (success),
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of strings,
@@ -336,13 +423,16 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
size_t nval)
{
if (is_of_node(fwnode))
- return of_property_read_string_array(of_node(fwnode), propname,
- val, nval);
+ return val ?
+ of_property_read_string_array(of_node(fwnode), propname,
+ val, nval) :
+ of_property_count_strings(of_node(fwnode), propname);
else if (is_acpi_node(fwnode))
return acpi_dev_prop_read(acpi_node(fwnode), propname,
DEV_PROP_STRING, val, nval);
- return -ENXIO;
+ return pset_prop_read_array(to_pset(fwnode), propname,
+ DEV_PROP_STRING, val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 0ee48be23837..fc6ffcfa8061 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -26,6 +26,7 @@ config BCMA_HOST_PCI_POSSIBLE
config BCMA_HOST_PCI
bool "Support for BCMA on PCI-host bus"
depends on BCMA_HOST_PCI_POSSIBLE
+ select BCMA_DRIVER_PCI
default y
config BCMA_DRIVER_PCI_HOSTMODE
@@ -44,6 +45,22 @@ config BCMA_HOST_SOC
If unsure, say N
+config BCMA_DRIVER_PCI
+ bool "BCMA Broadcom PCI core driver"
+ depends on BCMA && PCI
+ default y
+ help
+ BCMA bus may have many versions of PCIe core. This driver
+ supports:
+ 1) PCIe core working in clientmode
+ 2) PCIe Gen 2 clientmode core
+
+ In general PCIe (Gen 2) clientmode core is required on PCIe
+ hosted buses. It's responsible for initialization and basic
+ hardware management.
+ This driver is also prerequisite for a hostmode PCIe core
+ support.
+
config BCMA_DRIVER_MIPS
bool "BCMA Broadcom MIPS core driver"
depends on BCMA && MIPS
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 838b4b9d352f..f32af9b76bcd 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -3,8 +3,8 @@ bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
bcma-y += driver_chipcommon_b.o
bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o
bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o
-bcma-y += driver_pci.o
-bcma-y += driver_pcie2.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI) += driver_pci.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI) += driver_pcie2.o
bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index ac6c5fca906d..15f2b2e242ea 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -26,6 +26,7 @@ bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout);
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core);
void bcma_init_bus(struct bcma_bus *bus);
+void bcma_unregister_cores(struct bcma_bus *bus);
int bcma_bus_register(struct bcma_bus *bus);
void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus);
@@ -42,6 +43,9 @@ int bcma_bus_scan(struct bcma_bus *bus);
int bcma_sprom_get(struct bcma_bus *bus);
/* driver_chipcommon.c */
+void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
+void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
+void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
#ifdef CONFIG_BCMA_DRIVER_MIPS
void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
extern struct platform_device bcma_pflash_dev;
@@ -52,6 +56,8 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb);
/* driver_chipcommon_pmu.c */
+void bcma_pmu_early_init(struct bcma_drv_cc *cc);
+void bcma_pmu_init(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc);
@@ -100,7 +106,35 @@ static inline void __exit bcma_host_soc_unregister_driver(void)
#endif /* CONFIG_BCMA_HOST_SOC && CONFIG_OF */
/* driver_pci.c */
+#ifdef CONFIG_BCMA_DRIVER_PCI
u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
+void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
+void bcma_core_pci_init(struct bcma_drv_pci *pc);
+void bcma_core_pci_up(struct bcma_drv_pci *pc);
+void bcma_core_pci_down(struct bcma_drv_pci *pc);
+#else
+static inline void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
+{
+ WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+static inline void bcma_core_pci_init(struct bcma_drv_pci *pc)
+{
+ /* Initialization is required for PCI hosted bus */
+ WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+#endif
+
+/* driver_pcie2.c */
+#ifdef CONFIG_BCMA_DRIVER_PCI
+void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
+void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2);
+#else
+static inline void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
+{
+ /* Initialization is required for PCI hosted bus */
+ WARN_ON(pcie2->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+#endif
extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
@@ -117,6 +151,39 @@ static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
}
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+/**************************************************
+ * driver_mips.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+unsigned int bcma_core_mips_irq(struct bcma_device *dev);
+void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
+void bcma_core_mips_init(struct bcma_drv_mips *mcore);
+#else
+static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
+{
+ return 0;
+}
+static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+{
+}
+static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore)
+{
+}
+#endif
+
+/**************************************************
+ * driver_gmac_cmn.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
+void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
+#else
+static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
+{
+}
+#endif
+
#ifdef CONFIG_BCMA_DRIVER_GPIO
/* driver_gpio.c */
int bcma_gpio_init(struct bcma_drv_cc *cc);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 598a6cd9028a..74ccb02e0f10 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -17,6 +17,8 @@
#include "bcma_private.h"
+#define BCMA_GPIO_MAX_PINS 32
+
static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip)
{
return container_of(chip, struct bcma_drv_cc, gpio);
@@ -76,7 +78,7 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
}
-#if IS_BUILTIN(CONFIG_BCM47XX)
+#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
@@ -204,6 +206,7 @@ static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
int bcma_gpio_init(struct bcma_drv_cc *cc)
{
+ struct bcma_bus *bus = cc->core->bus;
struct gpio_chip *chip = &cc->gpio;
int err;
@@ -215,14 +218,14 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
-#if IS_BUILTIN(CONFIG_BCM47XX)
+#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
chip->to_irq = bcma_gpio_to_irq;
#endif
#if IS_BUILTIN(CONFIG_OF)
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
chip->of_node = cc->core->dev.of_node;
#endif
- switch (cc->core->bus->chipinfo.id) {
+ switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572:
chip->ngpio = 32;
@@ -231,13 +234,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->ngpio = 16;
}
- /* There is just one SoC in one device and its GPIO addresses should be
- * deterministic to address them more easily. The other buses could get
- * a random base number. */
- if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
- chip->base = 0;
- else
- chip->base = -1;
+ /*
+ * On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO
+ * pin numbers. We don't have Device Tree there and we can't really use
+ * relative (per chip) numbers.
+ * So let's use predictable base for BCM47XX and "random" for all other.
+ */
+#if IS_BUILTIN(CONFIG_BCM47XX)
+ chip->base = bus->num * BCMA_GPIO_MAX_PINS;
+#else
+ chip->base = -1;
+#endif
err = bcma_gpio_irq_domain_init(cc);
if (err)
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 04faf6df959f..24424f3fef96 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -21,7 +21,7 @@
#include <linux/serial_reg.h>
#include <linux/time.h>
#ifdef CONFIG_BCM47XX
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
#endif
enum bcma_boot_dev {
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 786666488a2d..f499a469e66d 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -282,39 +282,6 @@ void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
}
EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
-int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
- bool enable)
-{
- struct pci_dev *pdev;
- u32 coremask, tmp;
- int err = 0;
-
- if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
- /* This bcma device is not on a PCI host-bus. So the IRQs are
- * not routed through the PCI core.
- * So we must not enable routing through the PCI core. */
- goto out;
- }
-
- pdev = pc->core->bus->host_pci;
-
- err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
- if (err)
- goto out;
-
- coremask = BIT(core->core_index) << 8;
- if (enable)
- tmp |= coremask;
- else
- tmp &= ~coremask;
-
- err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
-
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
-
static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
{
u32 w;
@@ -328,28 +295,12 @@ static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
}
-void bcma_core_pci_up(struct bcma_bus *bus)
+void bcma_core_pci_up(struct bcma_drv_pci *pc)
{
- struct bcma_drv_pci *pc;
-
- if (bus->hosttype != BCMA_HOSTTYPE_PCI)
- return;
-
- pc = &bus->drv_pci[0];
-
bcma_core_pci_extend_L1timer(pc, true);
}
-EXPORT_SYMBOL_GPL(bcma_core_pci_up);
-void bcma_core_pci_down(struct bcma_bus *bus)
+void bcma_core_pci_down(struct bcma_drv_pci *pc)
{
- struct bcma_drv_pci *pc;
-
- if (bus->hosttype != BCMA_HOSTTYPE_PCI)
- return;
-
- pc = &bus->drv_pci[0];
-
bcma_core_pci_extend_L1timer(pc, false);
}
-EXPORT_SYMBOL_GPL(bcma_core_pci_down);
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index c8a6b741967b..c42cec7c7ecc 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -11,6 +11,7 @@
#include "bcma_private.h"
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/export.h>
#include <linux/bcma/bcma.h>
#include <asm/paccess.h>
diff --git a/drivers/bcma/driver_pcie2.c b/drivers/bcma/driver_pcie2.c
index e4be537b0c66..b1a6e327cb23 100644
--- a/drivers/bcma/driver_pcie2.c
+++ b/drivers/bcma/driver_pcie2.c
@@ -10,6 +10,7 @@
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
+#include <linux/pci.h>
/**************************************************
* R/W ops.
@@ -156,14 +157,23 @@ static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2)
void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
{
- struct bcma_chipinfo *ci = &pcie2->core->bus->chipinfo;
+ struct bcma_bus *bus = pcie2->core->bus;
+ struct bcma_chipinfo *ci = &bus->chipinfo;
u32 tmp;
tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54));
if ((tmp & 0xe) >> 1 == 2)
bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17);
- /* TODO: Do we need pcie_reqsize? */
+ switch (bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM4360:
+ case BCMA_CHIP_ID_BCM4352:
+ pcie2->reqsize = 1024;
+ break;
+ default:
+ pcie2->reqsize = 128;
+ break;
+ }
if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3)
bcma_core_pcie2_war_delay_perst_enab(pcie2, true);
@@ -173,3 +183,18 @@ void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
pciedev_crwlpciegen2_180(pcie2);
pciedev_crwlpciegen2_182(pcie2);
}
+
+/**************************************************
+ * Runtime ops.
+ **************************************************/
+
+void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2)
+{
+ struct bcma_bus *bus = pcie2->core->bus;
+ struct pci_dev *dev = bus->host_pci;
+ int err;
+
+ err = pcie_set_readrq(dev, pcie2->reqsize);
+ if (err)
+ bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err);
+}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 53c6a8a58859..0856189c065f 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -213,16 +213,26 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
/* Initialize struct, detect chip */
bcma_init_bus(bus);
+ /* Scan bus to find out generation of PCIe core */
+ err = bcma_bus_scan(bus);
+ if (err)
+ goto err_pci_unmap_mmio;
+
+ if (bcma_find_core(bus, BCMA_CORE_PCIE2))
+ bus->host_is_pcie2 = true;
+
/* Register */
err = bcma_bus_register(bus);
if (err)
- goto err_pci_unmap_mmio;
+ goto err_unregister_cores;
pci_set_drvdata(dev, bus);
out:
return err;
+err_unregister_cores:
+ bcma_unregister_cores(bus);
err_pci_unmap_mmio:
pci_iounmap(dev, bus->mmio);
err_pci_release_regions:
@@ -283,9 +293,12 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */
@@ -310,3 +323,65 @@ void __exit bcma_host_pci_exit(void)
{
pci_unregister_driver(&bcma_pci_bridge_driver);
}
+
+/**************************************************
+ * Runtime ops for drivers.
+ **************************************************/
+
+/* See also pcicore_up */
+void bcma_host_pci_up(struct bcma_bus *bus)
+{
+ if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+ return;
+
+ if (bus->host_is_pcie2)
+ bcma_core_pcie2_up(&bus->drv_pcie2);
+ else
+ bcma_core_pci_up(&bus->drv_pci[0]);
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_up);
+
+/* See also pcicore_down */
+void bcma_host_pci_down(struct bcma_bus *bus)
+{
+ if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+ return;
+
+ if (!bus->host_is_pcie2)
+ bcma_core_pci_down(&bus->drv_pci[0]);
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_down);
+
+/* See also si_pci_setup */
+int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
+ bool enable)
+{
+ struct pci_dev *pdev;
+ u32 coremask, tmp;
+ int err = 0;
+
+ if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
+ /* This bcma device is not on a PCI host-bus. So the IRQs are
+ * not routed through the PCI core.
+ * So we must not enable routing through the PCI core. */
+ goto out;
+ }
+
+ pdev = bus->host_pci;
+
+ err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
+ if (err)
+ goto out;
+
+ coremask = BIT(core->core_index) << 8;
+ if (enable)
+ tmp |= coremask;
+ else
+ tmp &= ~coremask;
+
+ err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 38bde6eab8a4..9635f1033ce5 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -363,7 +363,7 @@ static int bcma_register_devices(struct bcma_bus *bus)
return 0;
}
-static void bcma_unregister_cores(struct bcma_bus *bus)
+void bcma_unregister_cores(struct bcma_bus *bus)
{
struct bcma_device *core, *tmp;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 1b8094d4d7af..eb1fed5bd516 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -404,6 +404,17 @@ config BLK_DEV_RAM_DAX
and will prevent RAM block device backing store memory from being
allocated from highmem (only a problem for highmem systems).
+config BLK_DEV_PMEM
+ tristate "Persistent memory block device support"
+ help
+ Saying Y here will allow you to use a contiguous range of reserved
+ memory as one or more persistent block devices.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 'pmem'.
+
+ If unsure, say N.
+
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media"
depends on !UML
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 02b688d1438d..9cc6c18a1c7e 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_PS3_VRAM) += ps3vram.o
obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
+obj-$(CONFIG_BLK_DEV_PMEM) += pmem.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index 9a950022ff88..a6ee3d750c30 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -424,7 +424,7 @@ static int in_flight_summary_show(struct seq_file *m, void *pos)
* So we have our own inline version of it above. :-( */
static inline int debugfs_positive(struct dentry *dentry)
{
- return dentry->d_inode && !d_unhashed(dentry);
+ return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
/* make sure at *open* time that the respective object won't go away. */
@@ -439,15 +439,15 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
* or has debugfs_remove() already been called? */
parent = file->f_path.dentry->d_parent;
/* not sure if this can happen: */
- if (!parent || !parent->d_inode)
+ if (!parent || d_really_is_negative(parent))
goto out;
/* serialize with d_delete() */
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
/* Make sure the object is still alive */
if (debugfs_positive(file->f_path.dentry)
&& kref_get_unless_zero(kref))
ret = 0;
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
if (!ret) {
ret = single_open(file, show, data);
if (ret)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1fc83427199c..81fde9ef7f8e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2107,13 +2107,12 @@ static int drbd_create_mempools(void)
if (drbd_md_io_page_pool == NULL)
goto Enomem;
- drbd_request_mempool = mempool_create(number,
- mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
+ drbd_request_mempool = mempool_create_slab_pool(number,
+ drbd_request_cache);
if (drbd_request_mempool == NULL)
goto Enomem;
- drbd_ee_mempool = mempool_create(number,
- mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
+ drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache);
if (drbd_ee_mempool == NULL)
goto Enomem;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 34f2f0ba409b..3907202fb9d9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -52,9 +52,10 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device,
{
struct drbd_request *req;
- req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO);
+ req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
if (!req)
return NULL;
+ memset(req, 0, sizeof(*req));
drbd_req_make_private_bio(req, bio_src);
req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d1f168b73634..d7173cb1ea76 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -75,6 +75,7 @@
#include <linux/sysfs.h>
#include <linux/miscdevice.h>
#include <linux/falloc.h>
+#include <linux/uio.h>
#include "loop.h"
#include <asm/uaccess.h>
@@ -87,28 +88,6 @@ static int part_shift;
static struct workqueue_struct *loop_wq;
-/*
- * Transfer functions
- */
-static int transfer_none(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block)
-{
- char *raw_buf = kmap_atomic(raw_page) + raw_off;
- char *loop_buf = kmap_atomic(loop_page) + loop_off;
-
- if (cmd == READ)
- memcpy(loop_buf, raw_buf, size);
- else
- memcpy(raw_buf, loop_buf, size);
-
- kunmap_atomic(loop_buf);
- kunmap_atomic(raw_buf);
- cond_resched();
- return 0;
-}
-
static int transfer_xor(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
@@ -147,14 +126,13 @@ static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
static struct loop_func_table none_funcs = {
.number = LO_CRYPT_NONE,
- .transfer = transfer_none,
-};
+};
static struct loop_func_table xor_funcs = {
.number = LO_CRYPT_XOR,
.transfer = transfer_xor,
.init = xor_init
-};
+};
/* xfer_funcs[0] is special - its release function is never called */
static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
@@ -214,206 +192,169 @@ lo_do_transfer(struct loop_device *lo, int cmd,
struct page *lpage, unsigned loffs,
int size, sector_t rblock)
{
- if (unlikely(!lo->transfer))
+ int ret;
+
+ ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
+ if (likely(!ret))
return 0;
- return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
+ printk_ratelimited(KERN_ERR
+ "loop: Transfer error at byte offset %llu, length %i.\n",
+ (unsigned long long)rblock << 9, size);
+ return ret;
}
-/**
- * __do_lo_send_write - helper for writing data to a loop device
- *
- * This helper just factors out common code between do_lo_send_direct_write()
- * and do_lo_send_write().
- */
-static int __do_lo_send_write(struct file *file,
- u8 *buf, const int len, loff_t pos)
+static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
+ struct iov_iter i;
ssize_t bw;
- mm_segment_t old_fs = get_fs();
+
+ iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
file_start_write(file);
- set_fs(get_ds());
- bw = file->f_op->write(file, buf, len, &pos);
- set_fs(old_fs);
+ bw = vfs_iter_write(file, &i, ppos);
file_end_write(file);
- if (likely(bw == len))
+
+ if (likely(bw == bvec->bv_len))
return 0;
- printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
- (unsigned long long)pos, len);
+
+ printk_ratelimited(KERN_ERR
+ "loop: Write error at byte offset %llu, length %i.\n",
+ (unsigned long long)*ppos, bvec->bv_len);
if (bw >= 0)
bw = -EIO;
return bw;
}
-/**
- * do_lo_send_direct_write - helper for writing data to a loop device
- *
- * This is the fast, non-transforming version that does not need double
- * buffering.
- */
-static int do_lo_send_direct_write(struct loop_device *lo,
- struct bio_vec *bvec, loff_t pos, struct page *page)
+static int lo_write_simple(struct loop_device *lo, struct request *rq,
+ loff_t pos)
{
- ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
- kmap(bvec->bv_page) + bvec->bv_offset,
- bvec->bv_len, pos);
- kunmap(bvec->bv_page);
- cond_resched();
- return bw;
+ struct bio_vec bvec;
+ struct req_iterator iter;
+ int ret = 0;
+
+ rq_for_each_segment(bvec, rq, iter) {
+ ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
+ if (ret < 0)
+ break;
+ cond_resched();
+ }
+
+ return ret;
}
-/**
- * do_lo_send_write - helper for writing data to a loop device
- *
+/*
* This is the slow, transforming version that needs to double buffer the
* data as it cannot do the transformations in place without having direct
* access to the destination pages of the backing file.
*/
-static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
- loff_t pos, struct page *page)
-{
- int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len, pos >> 9);
- if (likely(!ret))
- return __do_lo_send_write(lo->lo_backing_file,
- page_address(page), bvec->bv_len,
- pos);
- printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
- "length %i.\n", (unsigned long long)pos, bvec->bv_len);
- if (ret > 0)
- ret = -EIO;
- return ret;
-}
-
-static int lo_send(struct loop_device *lo, struct request *rq, loff_t pos)
+static int lo_write_transfer(struct loop_device *lo, struct request *rq,
+ loff_t pos)
{
- int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
- struct page *page);
- struct bio_vec bvec;
+ struct bio_vec bvec, b;
struct req_iterator iter;
- struct page *page = NULL;
+ struct page *page;
int ret = 0;
- if (lo->transfer != transfer_none) {
- page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
- if (unlikely(!page))
- goto fail;
- kmap(page);
- do_lo_send = do_lo_send_write;
- } else {
- do_lo_send = do_lo_send_direct_write;
- }
+ page = alloc_page(GFP_NOIO);
+ if (unlikely(!page))
+ return -ENOMEM;
rq_for_each_segment(bvec, rq, iter) {
- ret = do_lo_send(lo, &bvec, pos, page);
+ ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
+ bvec.bv_offset, bvec.bv_len, pos >> 9);
+ if (unlikely(ret))
+ break;
+
+ b.bv_page = page;
+ b.bv_offset = 0;
+ b.bv_len = bvec.bv_len;
+ ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
if (ret < 0)
break;
- pos += bvec.bv_len;
- }
- if (page) {
- kunmap(page);
- __free_page(page);
}
-out:
+
+ __free_page(page);
return ret;
-fail:
- printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
- ret = -ENOMEM;
- goto out;
}
-struct lo_read_data {
- struct loop_device *lo;
- struct page *page;
- unsigned offset;
- int bsize;
-};
+static int lo_read_simple(struct loop_device *lo, struct request *rq,
+ loff_t pos)
+{
+ struct bio_vec bvec;
+ struct req_iterator iter;
+ struct iov_iter i;
+ ssize_t len;
-static int
-lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
- struct splice_desc *sd)
-{
- struct lo_read_data *p = sd->u.data;
- struct loop_device *lo = p->lo;
- struct page *page = buf->page;
- sector_t IV;
- int size;
-
- IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
- (buf->offset >> 9);
- size = sd->len;
- if (size > p->bsize)
- size = p->bsize;
-
- if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
- printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
- page->index);
- size = -EINVAL;
- }
+ rq_for_each_segment(bvec, rq, iter) {
+ iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
+ len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
+ if (len < 0)
+ return len;
- flush_dcache_page(p->page);
+ flush_dcache_page(bvec.bv_page);
- if (size > 0)
- p->offset += size;
+ if (len != bvec.bv_len) {
+ struct bio *bio;
- return size;
-}
+ __rq_for_each_bio(bio, rq)
+ zero_fill_bio(bio);
+ break;
+ }
+ cond_resched();
+ }
-static int
-lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
-{
- return __splice_from_pipe(pipe, sd, lo_splice_actor);
+ return 0;
}
-static ssize_t
-do_lo_receive(struct loop_device *lo,
- struct bio_vec *bvec, int bsize, loff_t pos)
+static int lo_read_transfer(struct loop_device *lo, struct request *rq,
+ loff_t pos)
{
- struct lo_read_data cookie;
- struct splice_desc sd;
- struct file *file;
- ssize_t retval;
+ struct bio_vec bvec, b;
+ struct req_iterator iter;
+ struct iov_iter i;
+ struct page *page;
+ ssize_t len;
+ int ret = 0;
- cookie.lo = lo;
- cookie.page = bvec->bv_page;
- cookie.offset = bvec->bv_offset;
- cookie.bsize = bsize;
+ page = alloc_page(GFP_NOIO);
+ if (unlikely(!page))
+ return -ENOMEM;
- sd.len = 0;
- sd.total_len = bvec->bv_len;
- sd.flags = 0;
- sd.pos = pos;
- sd.u.data = &cookie;
+ rq_for_each_segment(bvec, rq, iter) {
+ loff_t offset = pos;
- file = lo->lo_backing_file;
- retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
+ b.bv_page = page;
+ b.bv_offset = 0;
+ b.bv_len = bvec.bv_len;
- return retval;
-}
+ iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
+ len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
+ if (len < 0) {
+ ret = len;
+ goto out_free_page;
+ }
-static int
-lo_receive(struct loop_device *lo, struct request *rq, int bsize, loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- ssize_t s;
+ ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
+ bvec.bv_offset, len, offset >> 9);
+ if (ret)
+ goto out_free_page;
- rq_for_each_segment(bvec, rq, iter) {
- s = do_lo_receive(lo, &bvec, bsize, pos);
- if (s < 0)
- return s;
+ flush_dcache_page(bvec.bv_page);
- if (s != bvec.bv_len) {
+ if (len != bvec.bv_len) {
struct bio *bio;
__rq_for_each_bio(bio, rq)
zero_fill_bio(bio);
break;
}
- pos += bvec.bv_len;
}
- return 0;
+
+ ret = 0;
+out_free_page:
+ __free_page(page);
+ return ret;
}
static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
@@ -462,10 +403,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
ret = lo_req_flush(lo, rq);
else if (rq->cmd_flags & REQ_DISCARD)
ret = lo_discard(lo, rq, pos);
+ else if (lo->transfer)
+ ret = lo_write_transfer(lo, rq, pos);
else
- ret = lo_send(lo, rq, pos);
- } else
- ret = lo_receive(lo, rq, lo->lo_blocksize, pos);
+ ret = lo_write_simple(lo, rq, pos);
+
+ } else {
+ if (lo->transfer)
+ ret = lo_read_transfer(lo, rq, pos);
+ else
+ ret = lo_read_simple(lo, rq, pos);
+ }
return ret;
}
@@ -767,7 +715,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
goto out_putf;
if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
- !file->f_op->write)
+ !file->f_op->write_iter)
lo_flags |= LO_FLAGS_READ_ONLY;
lo_blocksize = S_ISBLK(inode->i_mode) ?
@@ -786,7 +734,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
- lo->transfer = transfer_none;
+ lo->transfer = NULL;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
@@ -1005,7 +953,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
info->lo_encrypt_key_size);
lo->lo_key_owner = uid;
- }
+ }
return 0;
}
@@ -1672,8 +1620,8 @@ out:
static void loop_remove(struct loop_device *lo)
{
- del_gendisk(lo->lo_disk);
blk_cleanup_queue(lo->lo_queue);
+ del_gendisk(lo->lo_disk);
blk_mq_free_tag_set(&lo->tag_set);
put_disk(lo->lo_disk);
kfree(lo);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a98c41f72c63..39e5f7fae3ef 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -32,28 +32,36 @@
#include <net/sock.h>
#include <linux/net.h>
#include <linux/kthread.h>
+#include <linux/types.h>
#include <asm/uaccess.h>
#include <asm/types.h>
#include <linux/nbd.h>
-#define NBD_MAGIC 0x68797548
+struct nbd_device {
+ int flags;
+ int harderror; /* Code of hard error */
+ struct socket * sock; /* If == NULL, device is not ready, yet */
+ int magic;
+
+ spinlock_t queue_lock;
+ struct list_head queue_head; /* Requests waiting result */
+ struct request *active_req;
+ wait_queue_head_t active_wq;
+ struct list_head waiting_queue; /* Requests to be sent */
+ wait_queue_head_t waiting_wq;
+
+ struct mutex tx_lock;
+ struct gendisk *disk;
+ int blksize;
+ loff_t bytesize;
+ pid_t pid; /* pid of nbd-client, if attached */
+ int xmit_timeout;
+ int disconnect; /* a disconnect has been requested by user */
+};
-#ifdef NDEBUG
-#define dprintk(flags, fmt...)
-#else /* NDEBUG */
-#define dprintk(flags, fmt...) do { \
- if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
-} while (0)
-#define DBG_IOCTL 0x0004
-#define DBG_INIT 0x0010
-#define DBG_EXIT 0x0020
-#define DBG_BLKDEV 0x0100
-#define DBG_RX 0x0200
-#define DBG_TX 0x0400
-static unsigned int debugflags;
-#endif /* NDEBUG */
+#define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16;
static struct nbd_device *nbd_dev;
@@ -71,25 +79,9 @@ static int max_part;
*/
static DEFINE_SPINLOCK(nbd_lock);
-#ifndef NDEBUG
-static const char *ioctl_cmd_to_ascii(int cmd)
+static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
- switch (cmd) {
- case NBD_SET_SOCK: return "set-sock";
- case NBD_SET_BLKSIZE: return "set-blksize";
- case NBD_SET_SIZE: return "set-size";
- case NBD_SET_TIMEOUT: return "set-timeout";
- case NBD_SET_FLAGS: return "set-flags";
- case NBD_DO_IT: return "do-it";
- case NBD_CLEAR_SOCK: return "clear-sock";
- case NBD_CLEAR_QUE: return "clear-que";
- case NBD_PRINT_DEBUG: return "print-debug";
- case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
- case NBD_DISCONNECT: return "disconnect";
- case BLKROSET: return "set-read-only";
- case BLKFLSBUF: return "flush-buffer-cache";
- }
- return "unknown";
+ return disk_to_dev(nbd->disk);
}
static const char *nbdcmd_to_ascii(int cmd)
@@ -103,30 +95,26 @@ static const char *nbdcmd_to_ascii(int cmd)
}
return "invalid";
}
-#endif /* NDEBUG */
-static void nbd_end_request(struct request *req)
+static void nbd_end_request(struct nbd_device *nbd, struct request *req)
{
int error = req->errors ? -EIO : 0;
struct request_queue *q = req->q;
unsigned long flags;
- dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
- req, error ? "failed" : "done");
+ dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
+ error ? "failed" : "done");
spin_lock_irqsave(q->queue_lock, flags);
__blk_end_request_all(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
}
+/*
+ * Forcibly shutdown the socket causing all listeners to error
+ */
static void sock_shutdown(struct nbd_device *nbd, int lock)
{
- /* Forcibly shutdown the socket causing all listeners
- * to error
- *
- * FIXME: This code is duplicated from sys_shutdown, but
- * there should be a more generic interface rather than
- * calling socket ops directly here */
if (lock)
mutex_lock(&nbd->tx_lock);
if (nbd->sock) {
@@ -253,17 +241,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
}
memcpy(request.handle, &req, sizeof(req));
- dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
- nbd->disk->disk_name, req,
- nbdcmd_to_ascii(nbd_cmd(req)),
- (unsigned long long)blk_rq_pos(req) << 9,
- blk_rq_bytes(req));
+ dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
+ req, nbdcmd_to_ascii(nbd_cmd(req)),
+ (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, 1, &request, sizeof(request),
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
- goto error_out;
+ return -EIO;
}
if (nbd_cmd(req) == NBD_CMD_WRITE) {
@@ -277,21 +263,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
flags = 0;
if (!rq_iter_last(bvec, iter))
flags = MSG_MORE;
- dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
- nbd->disk->disk_name, req, bvec.bv_len);
+ dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
+ req, bvec.bv_len);
result = sock_send_bvec(nbd, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
- goto error_out;
+ return -EIO;
}
}
}
return 0;
-
-error_out:
- return -EIO;
}
static struct request *nbd_find_request(struct nbd_device *nbd,
@@ -302,7 +285,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd,
err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
if (unlikely(err))
- goto out;
+ return ERR_PTR(err);
spin_lock(&nbd->queue_lock);
list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
@@ -314,10 +297,7 @@ static struct request *nbd_find_request(struct nbd_device *nbd,
}
spin_unlock(&nbd->queue_lock);
- err = -ENOENT;
-
-out:
- return ERR_PTR(err);
+ return ERR_PTR(-ENOENT);
}
static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
@@ -371,8 +351,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
return req;
}
- dprintk(DBG_RX, "%s: request %p: got reply\n",
- nbd->disk->disk_name, req);
+ dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
if (nbd_cmd(req) == NBD_CMD_READ) {
struct req_iterator iter;
struct bio_vec bvec;
@@ -385,8 +364,8 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
req->errors++;
return req;
}
- dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
- nbd->disk->disk_name, req, bvec.bv_len);
+ dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
+ req, bvec.bv_len);
}
}
return req;
@@ -426,7 +405,7 @@ static int nbd_do_it(struct nbd_device *nbd)
}
while ((req = nbd_read_stat(nbd)) != NULL)
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
nbd->pid = 0;
@@ -455,7 +434,7 @@ static void nbd_clear_que(struct nbd_device *nbd)
queuelist);
list_del_init(&req->queuelist);
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
}
while (!list_empty(&nbd->waiting_queue)) {
@@ -463,7 +442,7 @@ static void nbd_clear_que(struct nbd_device *nbd)
queuelist);
list_del_init(&req->queuelist);
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
}
}
@@ -507,7 +486,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
if (nbd_send_req(nbd, req) != 0) {
dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
} else {
spin_lock(&nbd->queue_lock);
list_add_tail(&req->queuelist, &nbd->queue_head);
@@ -522,7 +501,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
error_out:
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
}
static int nbd_thread(void *data)
@@ -570,18 +549,18 @@ static void do_nbd_request(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
- dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
- req->rq_disk->disk_name, req, req->cmd_type);
-
nbd = req->rq_disk->private_data;
BUG_ON(nbd->magic != NBD_MAGIC);
+ dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
+ req, req->cmd_type);
+
if (unlikely(!nbd->sock)) {
dev_err(disk_to_dev(nbd->disk),
"Attempted send on closed socket\n");
req->errors++;
- nbd_end_request(req);
+ nbd_end_request(nbd, req);
spin_lock_irq(q->queue_lock);
continue;
}
@@ -706,13 +685,13 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
else
blk_queue_flush(nbd->disk->queue, 0);
- thread = kthread_create(nbd_thread, nbd, "%s",
- nbd->disk->disk_name);
+ thread = kthread_run(nbd_thread, nbd, "%s",
+ nbd->disk->disk_name);
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
return PTR_ERR(thread);
}
- wake_up_process(thread);
+
error = nbd_do_it(nbd);
kthread_stop(thread);
@@ -768,10 +747,6 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
BUG_ON(nbd->magic != NBD_MAGIC);
- /* Anyone capable of this syscall can do *real bad* things */
- dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
- nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
-
mutex_lock(&nbd->tx_lock);
error = __nbd_ioctl(bdev, nbd, cmd, arg);
mutex_unlock(&nbd->tx_lock);
@@ -861,7 +836,6 @@ static int __init nbd_init(void)
}
printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
- dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
@@ -920,7 +894,3 @@ module_param(nbds_max, int, 0444);
MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
-#ifndef NDEBUG
-module_param(debugflags, int, 0644);
-MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
-#endif
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index e23be20a3417..85b8036deaa3 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -44,7 +44,7 @@
#define NVME_MINORS (1U << MINORBITS)
#define NVME_Q_DEPTH 1024
-#define NVME_AQ_DEPTH 64
+#define NVME_AQ_DEPTH 256
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define ADMIN_TIMEOUT (admin_timeout * HZ)
@@ -152,6 +152,7 @@ struct nvme_cmd_info {
*/
#define NVME_INT_PAGES 2
#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size)
+#define NVME_INT_MASK 0x01
/*
* Will slightly overestimate the number of pages needed. This is OK
@@ -257,7 +258,7 @@ static void *iod_get_private(struct nvme_iod *iod)
*/
static bool iod_should_kfree(struct nvme_iod *iod)
{
- return (iod->private & 0x01) == 0;
+ return (iod->private & NVME_INT_MASK) == 0;
}
/* Special values must be less than 0x1000 */
@@ -301,8 +302,6 @@ static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
- struct request *req = ctx;
-
u32 result = le32_to_cpup(&cqe->result);
u16 status = le16_to_cpup(&cqe->status) >> 1;
@@ -311,8 +310,6 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
if (status == NVME_SC_SUCCESS)
dev_warn(nvmeq->q_dmadev,
"async event result %08x\n", result);
-
- blk_mq_free_hctx_request(nvmeq->hctx, req);
}
static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
@@ -432,7 +429,6 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
{
unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) :
sizeof(struct nvme_dsm_range);
- unsigned long mask = 0;
struct nvme_iod *iod;
if (rq->nr_phys_segments <= NVME_INT_PAGES &&
@@ -440,9 +436,8 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq);
iod = cmd->iod;
- mask = 0x01;
iod_init(iod, size, rq->nr_phys_segments,
- (unsigned long) rq | 0x01);
+ (unsigned long) rq | NVME_INT_MASK);
return iod;
}
@@ -522,8 +517,6 @@ static void nvme_dif_remap(struct request *req,
return;
pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
- if (!pmap)
- return;
p = pmap;
virt = bip_get_seed(bip);
@@ -645,12 +638,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
struct scatterlist *sg = iod->sg;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
- int offset = offset_in_page(dma_addr);
+ u32 page_size = dev->page_size;
+ int offset = dma_addr & (page_size - 1);
__le64 *prp_list;
__le64 **list = iod_list(iod);
dma_addr_t prp_dma;
int nprps, i;
- u32 page_size = dev->page_size;
length -= (page_size - offset);
if (length <= 0)
@@ -1028,18 +1021,19 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
struct nvme_cmd_info *cmd_info;
struct request *req;
- req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, false);
+ req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, true);
if (IS_ERR(req))
return PTR_ERR(req);
req->cmd_flags |= REQ_NO_TIMEOUT;
cmd_info = blk_mq_rq_to_pdu(req);
- nvme_set_info(cmd_info, req, async_req_completion);
+ nvme_set_info(cmd_info, NULL, async_req_completion);
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
c.common.command_id = req->tag;
+ blk_mq_free_hctx_request(nvmeq->hctx, req);
return __nvme_submit_cmd(nvmeq, &c);
}
@@ -1347,6 +1341,9 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
+ if (!nvmeq->qid && nvmeq->dev->admin_q)
+ blk_mq_freeze_queue_start(nvmeq->dev->admin_q);
+
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
@@ -1378,8 +1375,6 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
adapter_delete_sq(dev, qid);
adapter_delete_cq(dev, qid);
}
- if (!qid && dev->admin_q)
- blk_mq_freeze_queue_start(dev->admin_q);
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
@@ -1583,6 +1578,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.ops = &nvme_mq_admin_ops;
dev->admin_tagset.nr_hw_queues = 1;
dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
+ dev->admin_tagset.reserved_tags = 1;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
@@ -1749,25 +1745,31 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
struct nvme_dev *dev = ns->dev;
struct nvme_user_io io;
struct nvme_command c;
- unsigned length, meta_len;
- int status, i;
- struct nvme_iod *iod, *meta_iod = NULL;
- dma_addr_t meta_dma_addr;
- void *meta, *uninitialized_var(meta_mem);
+ unsigned length, meta_len, prp_len;
+ int status, write;
+ struct nvme_iod *iod;
+ dma_addr_t meta_dma = 0;
+ void *meta = NULL;
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
length = (io.nblocks + 1) << ns->lba_shift;
meta_len = (io.nblocks + 1) * ns->ms;
- if (meta_len && ((io.metadata & 3) || !io.metadata))
+ if (meta_len && ((io.metadata & 3) || !io.metadata) && !ns->ext)
return -EINVAL;
+ else if (meta_len && ns->ext) {
+ length += meta_len;
+ meta_len = 0;
+ }
+
+ write = io.opcode & 1;
switch (io.opcode) {
case nvme_cmd_write:
case nvme_cmd_read:
case nvme_cmd_compare:
- iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
+ iod = nvme_map_user_pages(dev, write, io.addr, length);
break;
default:
return -EINVAL;
@@ -1776,6 +1778,27 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
if (IS_ERR(iod))
return PTR_ERR(iod);
+ prp_len = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+ if (length != prp_len) {
+ status = -ENOMEM;
+ goto unmap;
+ }
+ if (meta_len) {
+ meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
+ &meta_dma, GFP_KERNEL);
+ if (!meta) {
+ status = -ENOMEM;
+ goto unmap;
+ }
+ if (write) {
+ if (copy_from_user(meta, (void __user *)io.metadata,
+ meta_len)) {
+ status = -EFAULT;
+ goto unmap;
+ }
+ }
+ }
+
memset(&c, 0, sizeof(c));
c.rw.opcode = io.opcode;
c.rw.flags = io.flags;
@@ -1787,75 +1810,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.reftag = cpu_to_le32(io.reftag);
c.rw.apptag = cpu_to_le16(io.apptag);
c.rw.appmask = cpu_to_le16(io.appmask);
-
- if (meta_len) {
- meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
- meta_len);
- if (IS_ERR(meta_iod)) {
- status = PTR_ERR(meta_iod);
- meta_iod = NULL;
- goto unmap;
- }
-
- meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
- &meta_dma_addr, GFP_KERNEL);
- if (!meta_mem) {
- status = -ENOMEM;
- goto unmap;
- }
-
- if (io.opcode & 1) {
- int meta_offset = 0;
-
- for (i = 0; i < meta_iod->nents; i++) {
- meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
- meta_iod->sg[i].offset;
- memcpy(meta_mem + meta_offset, meta,
- meta_iod->sg[i].length);
- kunmap_atomic(meta);
- meta_offset += meta_iod->sg[i].length;
- }
- }
-
- c.rw.metadata = cpu_to_le64(meta_dma_addr);
- }
-
- length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
c.rw.prp2 = cpu_to_le64(iod->first_dma);
-
- if (length != (io.nblocks + 1) << ns->lba_shift)
- status = -ENOMEM;
- else
- status = nvme_submit_io_cmd(dev, ns, &c, NULL);
-
- if (meta_len) {
- if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
- int meta_offset = 0;
-
- for (i = 0; i < meta_iod->nents; i++) {
- meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
- meta_iod->sg[i].offset;
- memcpy(meta, meta_mem + meta_offset,
- meta_iod->sg[i].length);
- kunmap_atomic(meta);
- meta_offset += meta_iod->sg[i].length;
- }
- }
-
- dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
- meta_dma_addr);
- }
-
+ c.rw.metadata = cpu_to_le64(meta_dma);
+ status = nvme_submit_io_cmd(dev, ns, &c, NULL);
unmap:
- nvme_unmap_user_pages(dev, io.opcode & 1, iod);
+ nvme_unmap_user_pages(dev, write, iod);
nvme_free_iod(dev, iod);
-
- if (meta_iod) {
- nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
- nvme_free_iod(dev, meta_iod);
+ if (meta) {
+ if (status == NVME_SC_SUCCESS && !write) {
+ if (copy_to_user((void __user *)io.metadata, meta,
+ meta_len))
+ status = -EFAULT;
+ }
+ dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
}
-
return status;
}
@@ -2018,7 +1987,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
struct nvme_dev *dev = ns->dev;
struct nvme_id_ns *id;
dma_addr_t dma_addr;
- int lbaf, pi_type, old_ms;
+ u8 lbaf, pi_type;
+ u16 old_ms;
unsigned short bs;
id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
@@ -2039,6 +2009,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
ns->lba_shift = id->lbaf[lbaf].ds;
ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+ ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
/*
* If identify namespace failed, use default 512 byte block size so
@@ -2055,14 +2026,14 @@ static int nvme_revalidate_disk(struct gendisk *disk)
if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
ns->ms != old_ms ||
bs != queue_logical_block_size(disk->queue) ||
- (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
+ (ns->ms && ns->ext)))
blk_integrity_unregister(disk);
ns->pi_type = pi_type;
blk_queue_logical_block_size(ns->queue, bs);
if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
- !(id->flbas & NVME_NS_FLBAS_META_EXT))
+ !ns->ext)
nvme_init_integrity(ns);
if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
@@ -2334,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->oncs = le16_to_cpup(&ctrl->oncs);
dev->abort_limit = ctrl->acl + 1;
dev->vwc = ctrl->vwc;
- dev->event_limit = min(ctrl->aerl + 1, 8);
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
@@ -2881,6 +2851,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
nvme_set_irq_hints(dev);
+ dev->event_limit = 1;
return result;
free_tags:
@@ -3166,8 +3137,10 @@ static int __init nvme_init(void)
nvme_char_major = result;
nvme_class = class_create(THIS_MODULE, "nvme");
- if (!nvme_class)
+ if (IS_ERR(nvme_class)) {
+ result = PTR_ERR(nvme_class);
goto unregister_chrdev;
+ }
result = pci_register_driver(&nvme_driver);
if (result)
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index e10196e0182d..88f13c525712 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -55,6 +55,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#define VPD_SERIAL_NUMBER 0x80
#define VPD_DEVICE_IDENTIFIERS 0x83
#define VPD_EXTENDED_INQUIRY 0x86
+#define VPD_BLOCK_LIMITS 0xB0
#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
/* CDB offsets */
@@ -132,9 +133,10 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
+#define INQ_BDEV_LIMITS_PAGE 0xB0
#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
#define INQ_SERIAL_NUMBER_LENGTH 0x14
-#define INQ_NUM_SUPPORTED_VPD_PAGES 5
+#define INQ_NUM_SUPPORTED_VPD_PAGES 6
#define VERSION_SPC_4 0x06
#define ACA_UNSUPPORTED 0
#define STANDARD_INQUIRY_LENGTH 36
@@ -747,6 +749,7 @@ static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
+ inq_response[9] = INQ_BDEV_LIMITS_PAGE;
xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@@ -938,6 +941,26 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
return res;
}
+static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *inq_response, int alloc_len)
+{
+ __be32 max_sectors = cpu_to_be32(
+ nvme_block_nr(ns, queue_max_hw_sectors(ns->queue)));
+ __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
+ __be32 discard_desc_count = cpu_to_be32(0x100);
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = VPD_BLOCK_LIMITS;
+ inq_response[3] = 0x3c; /* Page Length */
+ memcpy(&inq_response[8], &max_sectors, sizeof(u32));
+ memcpy(&inq_response[20], &max_discard, sizeof(u32));
+
+ if (max_discard)
+ memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
+
+ return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
+}
+
static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int alloc_len)
{
@@ -2268,6 +2291,10 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
case VPD_EXTENDED_INQUIRY:
res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
break;
+ case VPD_BLOCK_LIMITS:
+ res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
+ alloc_len);
+ break;
case VPD_BLOCK_DEV_CHARACTERISTICS:
res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
break;
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 2ce3dfd7e6b9..876d0c3eaf58 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -137,7 +137,7 @@
*/
-static bool verbose = 0;
+static int verbose;
static int major = PG_MAJOR;
static char *name = PG_NAME;
static int disable = 0;
@@ -168,7 +168,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
#include <asm/uaccess.h>
-module_param(verbose, bool, 0644);
+module_param(verbose, int, 0644);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param_array(drive0, int, NULL, 0);
diff --git a/drivers/block/pmem.c b/drivers/block/pmem.c
new file mode 100644
index 000000000000..eabf4a8d0085
--- /dev/null
+++ b/drivers/block/pmem.c
@@ -0,0 +1,262 @@
+/*
+ * Persistent Memory Driver
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
+ * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+
+#define PMEM_MINORS 16
+
+struct pmem_device {
+ struct request_queue *pmem_queue;
+ struct gendisk *pmem_disk;
+
+ /* One contiguous memory region per device */
+ phys_addr_t phys_addr;
+ void *virt_addr;
+ size_t size;
+};
+
+static int pmem_major;
+static atomic_t pmem_index;
+
+static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
+ unsigned int len, unsigned int off, int rw,
+ sector_t sector)
+{
+ void *mem = kmap_atomic(page);
+ size_t pmem_off = sector << 9;
+
+ if (rw == READ) {
+ memcpy(mem + off, pmem->virt_addr + pmem_off, len);
+ flush_dcache_page(page);
+ } else {
+ flush_dcache_page(page);
+ memcpy(pmem->virt_addr + pmem_off, mem + off, len);
+ }
+
+ kunmap_atomic(mem);
+}
+
+static void pmem_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct block_device *bdev = bio->bi_bdev;
+ struct pmem_device *pmem = bdev->bd_disk->private_data;
+ int rw;
+ struct bio_vec bvec;
+ sector_t sector;
+ struct bvec_iter iter;
+ int err = 0;
+
+ if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) {
+ err = -EIO;
+ goto out;
+ }
+
+ BUG_ON(bio->bi_rw & REQ_DISCARD);
+
+ rw = bio_data_dir(bio);
+ sector = bio->bi_iter.bi_sector;
+ bio_for_each_segment(bvec, bio, iter) {
+ pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset,
+ rw, sector);
+ sector += bvec.bv_len >> 9;
+ }
+
+out:
+ bio_endio(bio, err);
+}
+
+static int pmem_rw_page(struct block_device *bdev, sector_t sector,
+ struct page *page, int rw)
+{
+ struct pmem_device *pmem = bdev->bd_disk->private_data;
+
+ pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ page_endio(page, rw & WRITE, 0);
+
+ return 0;
+}
+
+static long pmem_direct_access(struct block_device *bdev, sector_t sector,
+ void **kaddr, unsigned long *pfn, long size)
+{
+ struct pmem_device *pmem = bdev->bd_disk->private_data;
+ size_t offset = sector << 9;
+
+ if (!pmem)
+ return -ENODEV;
+
+ *kaddr = pmem->virt_addr + offset;
+ *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
+
+ return pmem->size - offset;
+}
+
+static const struct block_device_operations pmem_fops = {
+ .owner = THIS_MODULE,
+ .rw_page = pmem_rw_page,
+ .direct_access = pmem_direct_access,
+};
+
+static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res)
+{
+ struct pmem_device *pmem;
+ struct gendisk *disk;
+ int idx, err;
+
+ err = -ENOMEM;
+ pmem = kzalloc(sizeof(*pmem), GFP_KERNEL);
+ if (!pmem)
+ goto out;
+
+ pmem->phys_addr = res->start;
+ pmem->size = resource_size(res);
+
+ err = -EINVAL;
+ if (!request_mem_region(pmem->phys_addr, pmem->size, "pmem")) {
+ dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", &pmem->phys_addr, pmem->size);
+ goto out_free_dev;
+ }
+
+ /*
+ * Map the memory as non-cachable, as we can't write back the contents
+ * of the CPU caches in case of a crash.
+ */
+ err = -ENOMEM;
+ pmem->virt_addr = ioremap_nocache(pmem->phys_addr, pmem->size);
+ if (!pmem->virt_addr)
+ goto out_release_region;
+
+ pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
+ if (!pmem->pmem_queue)
+ goto out_unmap;
+
+ blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
+ blk_queue_max_hw_sectors(pmem->pmem_queue, 1024);
+ blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
+
+ disk = alloc_disk(PMEM_MINORS);
+ if (!disk)
+ goto out_free_queue;
+
+ idx = atomic_inc_return(&pmem_index) - 1;
+
+ disk->major = pmem_major;
+ disk->first_minor = PMEM_MINORS * idx;
+ disk->fops = &pmem_fops;
+ disk->private_data = pmem;
+ disk->queue = pmem->pmem_queue;
+ disk->flags = GENHD_FL_EXT_DEVT;
+ sprintf(disk->disk_name, "pmem%d", idx);
+ disk->driverfs_dev = dev;
+ set_capacity(disk, pmem->size >> 9);
+ pmem->pmem_disk = disk;
+
+ add_disk(disk);
+
+ return pmem;
+
+out_free_queue:
+ blk_cleanup_queue(pmem->pmem_queue);
+out_unmap:
+ iounmap(pmem->virt_addr);
+out_release_region:
+ release_mem_region(pmem->phys_addr, pmem->size);
+out_free_dev:
+ kfree(pmem);
+out:
+ return ERR_PTR(err);
+}
+
+static void pmem_free(struct pmem_device *pmem)
+{
+ del_gendisk(pmem->pmem_disk);
+ put_disk(pmem->pmem_disk);
+ blk_cleanup_queue(pmem->pmem_queue);
+ iounmap(pmem->virt_addr);
+ release_mem_region(pmem->phys_addr, pmem->size);
+ kfree(pmem);
+}
+
+static int pmem_probe(struct platform_device *pdev)
+{
+ struct pmem_device *pmem;
+ struct resource *res;
+
+ if (WARN_ON(pdev->num_resources > 1))
+ return -ENXIO;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ pmem = pmem_alloc(&pdev->dev, res);
+ if (IS_ERR(pmem))
+ return PTR_ERR(pmem);
+
+ platform_set_drvdata(pdev, pmem);
+
+ return 0;
+}
+
+static int pmem_remove(struct platform_device *pdev)
+{
+ struct pmem_device *pmem = platform_get_drvdata(pdev);
+
+ pmem_free(pmem);
+ return 0;
+}
+
+static struct platform_driver pmem_driver = {
+ .probe = pmem_probe,
+ .remove = pmem_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "pmem",
+ },
+};
+
+static int __init pmem_init(void)
+{
+ int error;
+
+ pmem_major = register_blkdev(0, "pmem");
+ if (pmem_major < 0)
+ return pmem_major;
+
+ error = platform_driver_register(&pmem_driver);
+ if (error)
+ unregister_blkdev(pmem_major, "pmem");
+ return error;
+}
+module_init(pmem_init);
+
+static void pmem_exit(void)
+{
+ platform_driver_unregister(&pmem_driver);
+ unregister_blkdev(pmem_major, "pmem");
+}
+module_exit(pmem_exit);
+
+MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b40af3203089..ec6c5c6e1ac9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
result, xferred);
if (!img_request->result)
img_request->result = result;
+ /*
+ * Need to end I/O on the entire obj_request worth of
+ * bytes in case of error.
+ */
+ xferred = obj_request->length;
}
/* Image object requests don't own their page array */
@@ -3762,8 +3767,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
goto out_tag_set;
}
- /* We use the default size, but let's be explicit about it. */
- blk_queue_physical_block_size(q, SECTOR_SIZE);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
/* set io sizes to object size */
segment_size = rbd_obj_bytes(&rbd_dev->header);
@@ -5301,8 +5306,13 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
if (mapping) {
ret = rbd_dev_header_watch_sync(rbd_dev);
- if (ret)
+ if (ret) {
+ if (ret == -ENOENT)
+ pr_info("image %s/%s does not exist\n",
+ rbd_dev->spec->pool_name,
+ rbd_dev->spec->image_name);
goto out_header_name;
+ }
}
ret = rbd_dev_header_info(rbd_dev);
@@ -5319,8 +5329,14 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
ret = rbd_spec_fill_snap_id(rbd_dev);
else
ret = rbd_spec_fill_names(rbd_dev);
- if (ret)
+ if (ret) {
+ if (ret == -ENOENT)
+ pr_info("snap %s/%s@%s does not exist\n",
+ rbd_dev->spec->pool_name,
+ rbd_dev->spec->image_name,
+ rbd_dev->spec->snap_name);
goto err_out_probe;
+ }
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
ret = rbd_dev_v2_parent_info(rbd_dev);
@@ -5390,8 +5406,11 @@ static ssize_t do_rbd_add(struct bus_type *bus,
/* pick the pool */
rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
- if (rc < 0)
+ if (rc < 0) {
+ if (rc == -ENOENT)
+ pr_info("pool %s does not exist\n", spec->pool_name);
goto err_out_client;
+ }
spec->pool_id = (u64)rc;
/* The ceph file layout needs to fit pool id in 32 bits */
@@ -5673,7 +5692,7 @@ static int __init rbd_init(void)
/*
* The number of active work items is limited by the number of
- * rbd devices, so leave @max_active at default.
+ * rbd devices * queue depth, so leave @max_active at default.
*/
rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
if (!rbd_wq) {
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 523ee8fd4c15..c264f2d284a7 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -440,9 +440,9 @@ static inline void seek_track(struct floppy_state *fs, int n)
static inline void init_dma(struct dbdma_cmd *cp, int cmd,
void *buf, int count)
{
- st_le16(&cp->req_count, count);
- st_le16(&cp->command, cmd);
- st_le32(&cp->phy_addr, virt_to_bus(buf));
+ cp->req_count = cpu_to_le16(count);
+ cp->command = cpu_to_le16(cmd);
+ cp->phy_addr = cpu_to_le32(virt_to_bus(buf));
cp->xfer_status = 0;
}
@@ -771,8 +771,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
}
/* turn off DMA */
out_le32(&dr->control, (RUN | PAUSE) << 16);
- stat = ld_le16(&cp->xfer_status);
- resid = ld_le16(&cp->res_count);
+ stat = le16_to_cpu(cp->xfer_status);
+ resid = le16_to_cpu(cp->res_count);
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
@@ -1170,7 +1170,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
- st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
+ fs->dma_cmd[1].command = cpu_to_le16(DBDMA_STOP);
if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
swim3_mb_event(mdev, MB_FD);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 655e570b9b31..5ea2f0bbbc7c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -342,7 +342,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
- u64 capacity, size;
+ u64 capacity;
/* Host must always specify the capacity. */
virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
@@ -354,9 +354,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
capacity = (sector_t)-1;
}
- size = capacity * queue_logical_block_size(q);
- string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
- string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
+ string_get_size(capacity, queue_logical_block_size(q),
+ STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(capacity, queue_logical_block_size(q),
+ STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
"new size: %llu %d-byte logical blocks (%s/%s)\n",
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 2a04d341e598..713fc9ff1149 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -34,6 +34,8 @@
* IN THE SOFTWARE.
*/
+#define pr_fmt(fmt) "xen-blkback: " fmt
+
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/list.h>
@@ -211,7 +213,7 @@ static int add_persistent_gnt(struct xen_blkif *blkif,
else if (persistent_gnt->gnt > this->gnt)
new = &((*new)->rb_right);
else {
- pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
+ pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
return -EINVAL;
}
}
@@ -242,7 +244,7 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
node = node->rb_right;
else {
if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
- pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
+ pr_alert_ratelimited("requesting a grant already in use\n");
return NULL;
}
set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
@@ -257,23 +259,12 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
struct persistent_gnt *persistent_gnt)
{
if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
- pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
+ pr_alert_ratelimited("freeing a grant already unused\n");
set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
atomic_dec(&blkif->persistent_gnt_in_use);
}
-static void free_persistent_gnts_unmap_callback(int result,
- struct gntab_unmap_queue_data *data)
-{
- struct completion *c = data->data;
-
- /* BUG_ON used to reproduce existing behaviour,
- but is this the best way to deal with this? */
- BUG_ON(result);
- complete(c);
-}
-
static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
unsigned int num)
{
@@ -283,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
struct rb_node *n;
int segs_to_unmap = 0;
struct gntab_unmap_queue_data unmap_data;
- struct completion unmap_completion;
-
- init_completion(&unmap_completion);
- unmap_data.data = &unmap_completion;
- unmap_data.done = &free_persistent_gnts_unmap_callback;
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
@@ -308,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
!rb_next(&persistent_gnt->node)) {
unmap_data.count = segs_to_unmap;
- gnttab_unmap_refs_async(&unmap_data);
- wait_for_completion(&unmap_completion);
+ BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
@@ -327,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
- int ret, segs_to_unmap = 0;
+ int segs_to_unmap = 0;
struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
+ struct gntab_unmap_queue_data unmap_data;
+
+ unmap_data.pages = pages;
+ unmap_data.unmap_ops = unmap;
+ unmap_data.kunmap_ops = NULL;
while(!list_empty(&blkif->persistent_purge_list)) {
persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
@@ -344,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- ret = gnttab_unmap_refs(unmap, NULL, pages,
- segs_to_unmap);
- BUG_ON(ret);
+ unmap_data.count = segs_to_unmap;
+ BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
}
kfree(persistent_gnt);
}
if (segs_to_unmap > 0) {
- ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
- BUG_ON(ret);
+ unmap_data.count = segs_to_unmap;
+ BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap);
}
}
@@ -374,7 +363,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
}
if (work_pending(&blkif->persistent_purge_work)) {
- pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
+ pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
return;
}
@@ -396,7 +385,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
total = num_clean;
- pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
+ pr_debug("Going to purge %u persistent grants\n", num_clean);
BUG_ON(!list_empty(&blkif->persistent_purge_list));
root = &blkif->persistent_gnts;
@@ -428,13 +417,13 @@ purge_list:
* with the requested num
*/
if (!scan_used && !clean_used) {
- pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
+ pr_debug("Still missing %u purged frames\n", num_clean);
scan_used = true;
goto purge_list;
}
finished:
if (!clean_used) {
- pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
+ pr_debug("Finished scanning for grants to clean, removing used flag\n");
clean_used = true;
goto purge_list;
}
@@ -444,7 +433,7 @@ finished:
/* We can defer this work */
schedule_work(&blkif->persistent_purge_work);
- pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
+ pr_debug("Purged %u/%u\n", (total - num_clean), total);
return;
}
@@ -520,20 +509,20 @@ static void xen_vbd_resize(struct xen_blkif *blkif)
struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
unsigned long long new_size = vbd_sz(vbd);
- pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
+ pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
- pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
+ pr_info("VBD Resize: new size %llu\n", new_size);
vbd->size = new_size;
again:
err = xenbus_transaction_start(&xbt);
if (err) {
- pr_warn(DRV_PFX "Error starting transaction");
+ pr_warn("Error starting transaction\n");
return;
}
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(vbd));
if (err) {
- pr_warn(DRV_PFX "Error writing new size");
+ pr_warn("Error writing new size\n");
goto abort;
}
/*
@@ -543,7 +532,7 @@ again:
*/
err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
if (err) {
- pr_warn(DRV_PFX "Error writing the state");
+ pr_warn("Error writing the state\n");
goto abort;
}
@@ -551,7 +540,7 @@ again:
if (err == -EAGAIN)
goto again;
if (err)
- pr_warn(DRV_PFX "Error ending transaction");
+ pr_warn("Error ending transaction\n");
return;
abort:
xenbus_transaction_end(xbt, 1);
@@ -578,7 +567,7 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
static void print_stats(struct xen_blkif *blkif)
{
- pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
+ pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
" | ds %4llu | pg: %4u/%4d\n",
current->comm, blkif->st_oo_req,
blkif->st_rd_req, blkif->st_wr_req,
@@ -855,7 +844,7 @@ again:
/* This is a newly mapped grant */
BUG_ON(new_map_idx >= segs_to_map);
if (unlikely(map[new_map_idx].status != 0)) {
- pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
+ pr_debug("invalid buffer -- could not remap it\n");
put_free_pages(blkif, &pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
ret |= 1;
@@ -891,14 +880,14 @@ again:
goto next;
}
pages[seg_idx]->persistent_gnt = persistent_gnt;
- pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
+ pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
persistent_gnt->gnt, blkif->persistent_gnt_c,
xen_blkif_max_pgrants);
goto next;
}
if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
blkif->vbd.overflow_max_grants = 1;
- pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
+ pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
blkif->domid, blkif->vbd.handle);
}
/*
@@ -916,7 +905,7 @@ next:
return ret;
out_of_memory:
- pr_alert(DRV_PFX "%s: out of memory\n", __func__);
+ pr_alert("%s: out of memory\n", __func__);
put_free_pages(blkif, pages_to_gnt, segs_to_map);
return -ENOMEM;
}
@@ -996,7 +985,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
err = xen_vbd_translate(&preq, blkif, WRITE);
if (err) {
- pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
+ pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
preq.sector_number,
preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
goto fail_response;
@@ -1012,7 +1001,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
GFP_KERNEL, secure);
fail_response:
if (err == -EOPNOTSUPP) {
- pr_debug(DRV_PFX "discard op failed, not supported\n");
+ pr_debug("discard op failed, not supported\n");
status = BLKIF_RSP_EOPNOTSUPP;
} else if (err)
status = BLKIF_RSP_ERROR;
@@ -1056,16 +1045,16 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
/* An error fails the entire request. */
if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
(error == -EOPNOTSUPP)) {
- pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
+ pr_debug("flush diskcache op failed, not supported\n");
xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
(error == -EOPNOTSUPP)) {
- pr_debug(DRV_PFX "write barrier op failed, not supported\n");
+ pr_debug("write barrier op failed, not supported\n");
xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if (error) {
- pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
+ pr_debug("Buffer not up-to-date at end of operation,"
" error=%d\n", error);
pending_req->status = BLKIF_RSP_ERROR;
}
@@ -1110,7 +1099,7 @@ __do_block_io_op(struct xen_blkif *blkif)
if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
rc = blk_rings->common.rsp_prod_pvt;
- pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
+ pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
rp, rc, rp - rc, blkif->vbd.pdevice);
return -EACCES;
}
@@ -1217,8 +1206,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
if ((req->operation == BLKIF_OP_INDIRECT) &&
(req_operation != BLKIF_OP_READ) &&
(req_operation != BLKIF_OP_WRITE)) {
- pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
- req_operation);
+ pr_debug("Invalid indirect operation (%u)\n", req_operation);
goto fail_response;
}
@@ -1252,8 +1240,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
(nseg > MAX_INDIRECT_SEGMENTS))) {
- pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
- nseg);
+ pr_debug("Bad number of segments in request (%d)\n", nseg);
/* Haven't submitted any bio's yet. */
goto fail_response;
}
@@ -1288,7 +1275,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
}
if (xen_vbd_translate(&preq, blkif, operation) != 0) {
- pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
+ pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
operation == READ ? "read" : "write",
preq.sector_number,
preq.sector_number + preq.nr_sects,
@@ -1303,7 +1290,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
for (i = 0; i < nseg; i++) {
if (((int)preq.sector_number|(int)seg[i].nsec) &
((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
- pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
+ pr_debug("Misaligned I/O request from domain %d\n",
blkif->domid);
goto fail_response;
}
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 375d28851860..f620b5d3f77c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -44,12 +44,6 @@
#include <xen/interface/io/blkif.h>
#include <xen/interface/io/protocols.h>
-#define DRV_PFX "xen-blkback:"
-#define DPRINTK(fmt, args...) \
- pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
- __func__, __LINE__, ##args)
-
-
/*
* This is the maximum number of segments that would be allowed in indirect
* requests. This value will also be passed to the frontend.
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index e3afe97280b1..6ab69ad61ee1 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -14,6 +14,8 @@
*/
+#define pr_fmt(fmt) "xen-blkback: " fmt
+
#include <stdarg.h>
#include <linux/module.h>
#include <linux/kthread.h>
@@ -21,6 +23,9 @@
#include <xen/grant_table.h>
#include "common.h"
+/* Enlarge the array size in order to fully show blkback name. */
+#define BLKBACK_NAME_LEN (20)
+
struct backend_info {
struct xenbus_device *dev;
struct xen_blkif *blkif;
@@ -70,7 +75,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
else
devname = devpath;
- snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
+ snprintf(buf, BLKBACK_NAME_LEN, "blkback.%d.%s", blkif->domid, devname);
kfree(devpath);
return 0;
@@ -79,7 +84,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
static void xen_update_blkif_status(struct xen_blkif *blkif)
{
int err;
- char name[TASK_COMM_LEN];
+ char name[BLKBACK_NAME_LEN];
/* Not ready to connect? */
if (!blkif->irq || !blkif->vbd.bdev)
@@ -193,7 +198,7 @@ fail:
return ERR_PTR(-ENOMEM);
}
-static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
+static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref,
unsigned int evtchn)
{
int err;
@@ -202,7 +207,8 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
if (blkif->irq)
return 0;
- err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring);
+ err = xenbus_map_ring_valloc(blkif->be->dev, &gref, 1,
+ &blkif->blk_ring);
if (err < 0)
return err;
@@ -423,14 +429,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
FMODE_READ : FMODE_WRITE, NULL);
if (IS_ERR(bdev)) {
- DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
+ pr_warn("xen_vbd_create: device %08x could not be opened\n",
vbd->pdevice);
return -ENOENT;
}
vbd->bdev = bdev;
if (vbd->bdev->bd_disk == NULL) {
- DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
+ pr_warn("xen_vbd_create: device %08x doesn't exist\n",
vbd->pdevice);
xen_vbd_free(vbd);
return -ENOENT;
@@ -449,7 +455,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (q && blk_queue_secdiscard(q))
vbd->discard_secure = true;
- DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
+ pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
}
@@ -457,7 +463,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
- DPRINTK("");
+ pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
if (be->major || be->minor)
xenvbd_sysfs_delif(dev);
@@ -563,6 +569,10 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
int err;
struct backend_info *be = kzalloc(sizeof(struct backend_info),
GFP_KERNEL);
+
+ /* match the pr_debug in xen_blkbk_remove */
+ pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
+
if (!be) {
xenbus_dev_fatal(dev, -ENOMEM,
"allocating backend structure");
@@ -594,7 +604,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
return 0;
fail:
- DPRINTK("failed");
+ pr_warn("%s failed\n", __func__);
xen_blkbk_remove(dev);
return err;
}
@@ -618,7 +628,7 @@ static void backend_changed(struct xenbus_watch *watch,
unsigned long handle;
char *device_type;
- DPRINTK("");
+ pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
&major, &minor);
@@ -637,7 +647,7 @@ static void backend_changed(struct xenbus_watch *watch,
if (be->major | be->minor) {
if (be->major != major || be->minor != minor)
- pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
+ pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
be->major, be->minor, major, minor);
return;
}
@@ -698,13 +708,12 @@ static void frontend_changed(struct xenbus_device *dev,
struct backend_info *be = dev_get_drvdata(&dev->dev);
int err;
- DPRINTK("%s", xenbus_strstate(frontend_state));
+ pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
switch (frontend_state) {
case XenbusStateInitialising:
if (dev->state == XenbusStateClosed) {
- pr_info(DRV_PFX "%s: prepare for reconnect\n",
- dev->nodename);
+ pr_info("%s: prepare for reconnect\n", dev->nodename);
xenbus_switch_state(dev, XenbusStateInitWait);
}
break;
@@ -771,7 +780,7 @@ static void connect(struct backend_info *be)
int err;
struct xenbus_device *dev = be->dev;
- DPRINTK("%s", dev->otherend);
+ pr_debug("%s %s\n", __func__, dev->otherend);
/* Supply the information about the device the frontend needs */
again:
@@ -857,7 +866,7 @@ static int connect_ring(struct backend_info *be)
char protocol[64] = "";
int err;
- DPRINTK("%s", dev->otherend);
+ pr_debug("%s %s\n", __func__, dev->otherend);
err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
&ring_ref, "event-channel", "%u", &evtchn, NULL);
@@ -892,7 +901,7 @@ static int connect_ring(struct backend_info *be)
be->blkif->vbd.feature_gnt_persistent = pers_grants;
be->blkif->vbd.overflow_max_grants = 0;
- pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
+ pr_info("ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
ring_ref, evtchn, be->blkif->blk_protocol, protocol,
pers_grants ? "persistent grants" : "");
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 37779e4c4585..2c61cf8c6f61 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1245,6 +1245,7 @@ static int setup_blkring(struct xenbus_device *dev,
struct blkfront_info *info)
{
struct blkif_sring *sring;
+ grant_ref_t gref;
int err;
info->ring_ref = GRANT_INVALID_REF;
@@ -1257,13 +1258,13 @@ static int setup_blkring(struct xenbus_device *dev,
SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
- err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
+ err = xenbus_grant_ring(dev, info->ring.sring, 1, &gref);
if (err < 0) {
free_page((unsigned long)sring);
info->ring.sring = NULL;
goto fail;
}
- info->ring_ref = err;
+ info->ring_ref = gref;
err = xenbus_alloc_evtchn(dev, &info->evtchn);
if (err)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 871bd3550cb0..8dcbced0eafd 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -43,11 +43,22 @@ static const char *default_compressor = "lzo";
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
+static inline void deprecated_attr_warn(const char *name)
+{
+ pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
+ task_pid_nr(current),
+ current->comm,
+ name,
+ "See zram documentation.");
+}
+
#define ZRAM_ATTR_RO(name) \
static ssize_t name##_show(struct device *d, \
struct device_attribute *attr, char *b) \
{ \
struct zram *zram = dev_to_zram(d); \
+ \
+ deprecated_attr_warn(__stringify(name)); \
return scnprintf(b, PAGE_SIZE, "%llu\n", \
(u64)atomic64_read(&zram->stats.name)); \
} \
@@ -63,6 +74,27 @@ static inline struct zram *dev_to_zram(struct device *dev)
return (struct zram *)dev_to_disk(dev)->private_data;
}
+static ssize_t compact_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ unsigned long nr_migrated;
+ struct zram *zram = dev_to_zram(dev);
+ struct zram_meta *meta;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ meta = zram->meta;
+ nr_migrated = zs_compact(meta->mem_pool);
+ atomic64_add(nr_migrated, &zram->stats.num_migrated);
+ up_read(&zram->init_lock);
+
+ return len;
+}
+
static ssize_t disksize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -89,6 +121,7 @@ static ssize_t orig_data_size_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
+ deprecated_attr_warn("orig_data_size");
return scnprintf(buf, PAGE_SIZE, "%llu\n",
(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
}
@@ -99,6 +132,7 @@ static ssize_t mem_used_total_show(struct device *dev,
u64 val = 0;
struct zram *zram = dev_to_zram(dev);
+ deprecated_attr_warn("mem_used_total");
down_read(&zram->init_lock);
if (init_done(zram)) {
struct zram_meta *meta = zram->meta;
@@ -128,6 +162,7 @@ static ssize_t mem_limit_show(struct device *dev,
u64 val;
struct zram *zram = dev_to_zram(dev);
+ deprecated_attr_warn("mem_limit");
down_read(&zram->init_lock);
val = zram->limit_pages;
up_read(&zram->init_lock);
@@ -159,6 +194,7 @@ static ssize_t mem_used_max_show(struct device *dev,
u64 val = 0;
struct zram *zram = dev_to_zram(dev);
+ deprecated_attr_warn("mem_used_max");
down_read(&zram->init_lock);
if (init_done(zram))
val = atomic_long_read(&zram->stats.max_used_pages);
@@ -670,8 +706,12 @@ out:
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, int rw)
{
+ unsigned long start_time = jiffies;
int ret;
+ generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
+ &zram->disk->part0);
+
if (rw == READ) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset);
@@ -680,6 +720,8 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset);
}
+ generic_end_io_acct(rw, &zram->disk->part0, start_time);
+
if (unlikely(ret)) {
if (rw == READ)
atomic64_inc(&zram->stats.failed_reads);
@@ -1017,6 +1059,7 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
+static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
static DEVICE_ATTR_WO(reset);
@@ -1027,6 +1070,55 @@ static DEVICE_ATTR_RW(mem_used_max);
static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
+static ssize_t io_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+ ret = scnprintf(buf, PAGE_SIZE,
+ "%8llu %8llu %8llu %8llu\n",
+ (u64)atomic64_read(&zram->stats.failed_reads),
+ (u64)atomic64_read(&zram->stats.failed_writes),
+ (u64)atomic64_read(&zram->stats.invalid_io),
+ (u64)atomic64_read(&zram->stats.notify_free));
+ up_read(&zram->init_lock);
+
+ return ret;
+}
+
+static ssize_t mm_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 orig_size, mem_used = 0;
+ long max_used;
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+ if (init_done(zram))
+ mem_used = zs_get_total_pages(zram->meta->mem_pool);
+
+ orig_size = atomic64_read(&zram->stats.pages_stored);
+ max_used = atomic_long_read(&zram->stats.max_used_pages);
+
+ ret = scnprintf(buf, PAGE_SIZE,
+ "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
+ orig_size << PAGE_SHIFT,
+ (u64)atomic64_read(&zram->stats.compr_data_size),
+ mem_used << PAGE_SHIFT,
+ zram->limit_pages << PAGE_SHIFT,
+ max_used << PAGE_SHIFT,
+ (u64)atomic64_read(&zram->stats.zero_pages),
+ (u64)atomic64_read(&zram->stats.num_migrated));
+ up_read(&zram->init_lock);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RO(io_stat);
+static DEVICE_ATTR_RO(mm_stat);
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
ZRAM_ATTR_RO(failed_reads);
@@ -1044,6 +1136,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_num_writes.attr,
&dev_attr_failed_reads.attr,
&dev_attr_failed_writes.attr,
+ &dev_attr_compact.attr,
&dev_attr_invalid_io.attr,
&dev_attr_notify_free.attr,
&dev_attr_zero_pages.attr,
@@ -1054,6 +1147,8 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_mem_used_max.attr,
&dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
+ &dev_attr_io_stat.attr,
+ &dev_attr_mm_stat.attr,
NULL,
};
@@ -1082,6 +1177,7 @@ static int create_device(struct zram *zram, int device_id)
if (!zram->disk) {
pr_warn("Error allocating disk structure for device %d\n",
device_id);
+ ret = -ENOMEM;
goto out_free_queue;
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 17056e589146..570c598f4ce9 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -84,6 +84,7 @@ struct zram_stats {
atomic64_t compr_data_size; /* compressed size of pages stored */
atomic64_t num_reads; /* failed + successful */
atomic64_t num_writes; /* --do-- */
+ atomic64_t num_migrated; /* no. of migrated object */
atomic64_t failed_reads; /* can happen when memory is too low */
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 364f080768d0..ed5c2738bea2 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -2,9 +2,17 @@
menu "Bluetooth device drivers"
depends on BT
+config BT_INTEL
+ tristate
+
+config BT_BCM
+ tristate
+ select FW_LOADER
+
config BT_HCIBTUSB
tristate "HCI USB driver"
depends on USB
+ select BT_INTEL
help
Bluetooth HCI USB driver.
This driver is required if you want to use Bluetooth devices with
@@ -13,6 +21,17 @@ config BT_HCIBTUSB
Say Y here to compile support for Bluetooth USB devices into the
kernel or say M to compile it as module (btusb).
+config BT_HCIBTUSB_BCM
+ bool "Broadcom protocol support"
+ depends on BT_HCIBTUSB
+ select BT_BCM
+ default y
+ help
+ The Broadcom protocol support enables firmware and patchram
+ download support for Broadcom Bluetooth controllers.
+
+ Say Y here to compile support for Broadcom protocol.
+
config BT_HCIBTSDIO
tristate "HCI SDIO driver"
depends on MMC
@@ -62,6 +81,7 @@ config BT_HCIUART_BCSP
config BT_HCIUART_ATH3K
bool "Atheros AR300x serial support"
depends on BT_HCIUART
+ select BT_HCIUART_H4
help
HCIATH3K (HCI Atheros AR300x) is a serial protocol for
communication between host and Atheros AR300x Bluetooth devices.
@@ -94,6 +114,27 @@ config BT_HCIUART_3WIRE
Say Y here to compile support for Three-wire UART protocol.
+config BT_HCIUART_INTEL
+ bool "Intel protocol support"
+ depends on BT_HCIUART
+ select BT_INTEL
+ help
+ The Intel protocol support enables Bluetooth HCI over serial
+ port interface for Intel Bluetooth controllers.
+
+ Say Y here to compile support for Intel protocol.
+
+config BT_HCIUART_BCM
+ bool "Broadcom protocol support"
+ depends on BT_HCIUART
+ select BT_HCIUART_H4
+ select BT_BCM
+ help
+ The Broadcom protocol support enables Bluetooth HCI over serial
+ port interface for Broadcom Bluetooth controllers.
+
+ Say Y here to compile support for Broadcom protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 9fe8a875a827..dd0d9c40b999 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -15,10 +15,12 @@ obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o
obj-$(CONFIG_BT_HCIBTUSB) += btusb.o
obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o
+obj-$(CONFIG_BT_INTEL) += btintel.o
obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_BCM) += btbcm.o
btmrvl-y := btmrvl_main.o
btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
@@ -29,6 +31,8 @@ hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
+hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
+hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index de4c8499cbac..288547a3c566 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 with sflash firmware*/
{ USB_DEVICE(0x0489, 0xE027) },
{ USB_DEVICE(0x0489, 0xE03D) },
+ { USB_DEVICE(0x04F2, 0xAFF1) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0CF3, 0x3002) },
{ USB_DEVICE(0x0CF3, 0xE019) },
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 4f7e8d400bc0..6de97b3871b0 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -227,7 +227,6 @@ static void bt3c_receive(struct bt3c_info *info)
iobase = info->p_dev->resource[0]->start;
avail = bt3c_read(iobase, 0x7006);
- //printk("bt3c_cs: receiving %d bytes\n", avail);
bt3c_address(iobase, 0x7480);
while (size < avail) {
@@ -250,7 +249,6 @@ static void bt3c_receive(struct bt3c_info *info)
bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
inb(iobase + DATA_H);
- //printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
switch (bt_cb(info->rx_skb)->pkt_type) {
@@ -364,7 +362,6 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
if (stat & 0x0001)
bt3c_receive(info);
if (stat & 0x0002) {
- //BT_ERR("Ack (stat=0x%04x)", stat);
clear_bit(XMIT_SENDING, &(info->tx_state));
bt3c_write_wakeup(info);
}
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
new file mode 100644
index 000000000000..4bba86677adc
--- /dev/null
+++ b/drivers/bluetooth/btbcm.c
@@ -0,0 +1,397 @@
+/*
+ *
+ * Bluetooth support for Broadcom devices
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btbcm.h"
+
+#define VERSION "0.1"
+
+#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
+
+int btbcm_check_bdaddr(struct hci_dev *hdev)
+{
+ struct hci_rp_read_bd_addr *bda;
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ int err = PTR_ERR(skb);
+ BT_ERR("%s: BCM: Reading device address failed (%d)",
+ hdev->name, err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*bda)) {
+ BT_ERR("%s: BCM: Device address length mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ bda = (struct hci_rp_read_bd_addr *)skb->data;
+ if (bda->status) {
+ BT_ERR("%s: BCM: Device address result failed (%02x)",
+ hdev->name, bda->status);
+ kfree_skb(skb);
+ return -bt_to_errno(bda->status);
+ }
+
+ /* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
+ * with no configured address.
+ */
+ if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0)) {
+ BT_INFO("%s: BCM: Using default device address (%pMR)",
+ hdev->name, &bda->bdaddr);
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_check_bdaddr);
+
+int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: BCM: Change address command failed (%d)",
+ hdev->name, err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_set_bdaddr);
+
+int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
+{
+ const struct hci_command_hdr *cmd;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ struct sk_buff *skb;
+ u16 opcode;
+ int err;
+
+ err = request_firmware(&fw, firmware, &hdev->dev);
+ if (err < 0) {
+ BT_INFO("%s: BCM: Patch %s not found", hdev->name, firmware);
+ return err;
+ }
+
+ /* Start Download */
+ skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: BCM: Download Minidrv command failed (%d)",
+ hdev->name, err);
+ goto done;
+ }
+ kfree_skb(skb);
+
+ /* 50 msec delay after Download Minidrv completes */
+ msleep(50);
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ while (fw_size >= sizeof(*cmd)) {
+ const u8 *cmd_param;
+
+ cmd = (struct hci_command_hdr *)fw_ptr;
+ fw_ptr += sizeof(*cmd);
+ fw_size -= sizeof(*cmd);
+
+ if (fw_size < cmd->plen) {
+ BT_ERR("%s: BCM: Patch %s is corrupted", hdev->name,
+ firmware);
+ err = -EINVAL;
+ goto done;
+ }
+
+ cmd_param = fw_ptr;
+ fw_ptr += cmd->plen;
+ fw_size -= cmd->plen;
+
+ opcode = le16_to_cpu(cmd->opcode);
+
+ skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: BCM: Patch command %04x failed (%d)",
+ hdev->name, opcode, err);
+ goto done;
+ }
+ kfree_skb(skb);
+ }
+
+ /* 250 msec delay after Launch Ram completes */
+ msleep(250);
+
+done:
+ release_firmware(fw);
+ return err;
+}
+EXPORT_SYMBOL(btbcm_patchram);
+
+static int btbcm_reset(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ int err = PTR_ERR(skb);
+ BT_ERR("%s: BCM: Reset failed (%d)", hdev->name, err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static struct sk_buff *btbcm_read_local_version(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: BCM: Reading local version info failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return skb;
+ }
+
+ if (skb->len != sizeof(struct hci_rp_read_local_version)) {
+ BT_ERR("%s: BCM: Local version length mismatch", hdev->name);
+ kfree_skb(skb);
+ return ERR_PTR(-EIO);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *btbcm_read_verbose_config(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: BCM: Read verbose config info failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return skb;
+ }
+
+ if (skb->len != 7) {
+ BT_ERR("%s: BCM: Verbose config length mismatch", hdev->name);
+ kfree_skb(skb);
+ return ERR_PTR(-EIO);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc5a, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: BCM: Read USB product info failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return skb;
+ }
+
+ if (skb->len != 5) {
+ BT_ERR("%s: BCM: USB product length mismatch", hdev->name);
+ kfree_skb(skb);
+ return ERR_PTR(-EIO);
+ }
+
+ return skb;
+}
+
+static const struct {
+ u16 subver;
+ const char *name;
+} bcm_uart_subver_table[] = {
+ { 0x410e, "BCM43341B0" }, /* 002.001.014 */
+ { }
+};
+
+static const struct {
+ u16 subver;
+ const char *name;
+} bcm_usb_subver_table[] = {
+ { 0x210b, "BCM43142A0" }, /* 001.001.011 */
+ { 0x2112, "BCM4314A0" }, /* 001.001.018 */
+ { 0x2118, "BCM20702A0" }, /* 001.001.024 */
+ { 0x2126, "BCM4335A0" }, /* 001.001.038 */
+ { 0x220e, "BCM20702A1" }, /* 001.002.014 */
+ { 0x230f, "BCM4354A2" }, /* 001.003.015 */
+ { 0x4106, "BCM4335B0" }, /* 002.001.006 */
+ { 0x410e, "BCM20702B0" }, /* 002.001.014 */
+ { 0x6109, "BCM4335C0" }, /* 003.001.009 */
+ { 0x610c, "BCM4354" }, /* 003.001.012 */
+ { }
+};
+
+int btbcm_setup_patchram(struct hci_dev *hdev)
+{
+ char fw_name[64];
+ u16 subver, rev, pid, vid;
+ const char *hw_name = NULL;
+ struct sk_buff *skb;
+ struct hci_rp_read_local_version *ver;
+ int i, err;
+
+ /* Reset */
+ err = btbcm_reset(hdev);
+ if (err)
+ return err;
+
+ /* Read Local Version Info */
+ skb = btbcm_read_local_version(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ver = (struct hci_rp_read_local_version *)skb->data;
+ rev = le16_to_cpu(ver->hci_rev);
+ subver = le16_to_cpu(ver->lmp_subver);
+ kfree_skb(skb);
+
+ /* Read Verbose Config Version Info */
+ skb = btbcm_read_verbose_config(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
+ kfree_skb(skb);
+
+ switch ((rev & 0xf000) >> 12) {
+ case 0:
+ for (i = 0; bcm_uart_subver_table[i].name; i++) {
+ if (subver == bcm_uart_subver_table[i].subver) {
+ hw_name = bcm_uart_subver_table[i].name;
+ break;
+ }
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "brcm/%s.hcd",
+ hw_name ? : "BCM");
+ break;
+ case 1:
+ case 2:
+ /* Read USB Product Info */
+ skb = btbcm_read_usb_product(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ vid = get_unaligned_le16(skb->data + 1);
+ pid = get_unaligned_le16(skb->data + 3);
+ kfree_skb(skb);
+
+ for (i = 0; bcm_usb_subver_table[i].name; i++) {
+ if (subver == bcm_usb_subver_table[i].subver) {
+ hw_name = bcm_usb_subver_table[i].name;
+ break;
+ }
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd",
+ hw_name ? : "BCM", vid, pid);
+ break;
+ default:
+ return 0;
+ }
+
+ BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
+ hw_name ? : "BCM", (subver & 0x7000) >> 13,
+ (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
+ err = btbcm_patchram(hdev, fw_name);
+ if (err == -ENOENT)
+ return 0;
+
+ /* Reset */
+ err = btbcm_reset(hdev);
+ if (err)
+ return err;
+
+ /* Read Local Version Info */
+ skb = btbcm_read_local_version(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ver = (struct hci_rp_read_local_version *)skb->data;
+ rev = le16_to_cpu(ver->hci_rev);
+ subver = le16_to_cpu(ver->lmp_subver);
+ kfree_skb(skb);
+
+ BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
+ hw_name ? : "BCM", (subver & 0x7000) >> 13,
+ (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
+ btbcm_check_bdaddr(hdev);
+
+ set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_setup_patchram);
+
+int btbcm_setup_apple(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ /* Read Verbose Config Version Info */
+ skb = btbcm_read_verbose_config(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+ get_unaligned_le16(skb->data + 5));
+ kfree_skb(skb);
+
+ set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_setup_apple);
+
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
+MODULE_DESCRIPTION("Bluetooth support for Broadcom devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
new file mode 100644
index 000000000000..eb6ab5f9483d
--- /dev/null
+++ b/drivers/bluetooth/btbcm.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * Bluetooth support for Broadcom devices
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#if IS_ENABLED(CONFIG_BT_BCM)
+
+int btbcm_check_bdaddr(struct hci_dev *hdev);
+int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int btbcm_patchram(struct hci_dev *hdev, const char *firmware);
+
+int btbcm_setup_patchram(struct hci_dev *hdev);
+int btbcm_setup_apple(struct hci_dev *hdev);
+
+#else
+
+static inline int btbcm_check_bdaddr(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btbcm_setup_patchram(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static inline int btbcm_setup_apple(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
new file mode 100644
index 000000000000..2d43d4279b00
--- /dev/null
+++ b/drivers/bluetooth/btintel.c
@@ -0,0 +1,101 @@
+/*
+ *
+ * Bluetooth support for Intel devices
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btintel.h"
+
+#define VERSION "0.1"
+
+#define BDADDR_INTEL (&(bdaddr_t) {{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
+
+int btintel_check_bdaddr(struct hci_dev *hdev)
+{
+ struct hci_rp_read_bd_addr *bda;
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ int err = PTR_ERR(skb);
+ BT_ERR("%s: Reading Intel device address failed (%d)",
+ hdev->name, err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*bda)) {
+ BT_ERR("%s: Intel device address length mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ bda = (struct hci_rp_read_bd_addr *)skb->data;
+ if (bda->status) {
+ BT_ERR("%s: Intel device address result failed (%02x)",
+ hdev->name, bda->status);
+ kfree_skb(skb);
+ return -bt_to_errno(bda->status);
+ }
+
+ /* For some Intel based controllers, the default Bluetooth device
+ * address 00:03:19:9E:8B:00 can be found. These controllers are
+ * fully operational, but have the danger of duplicate addresses
+ * and that in turn can cause problems with Bluetooth operation.
+ */
+ if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
+ BT_ERR("%s: Found Intel default device address (%pMR)",
+ hdev->name, &bda->bdaddr);
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
+
+int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Changing Intel device address failed (%d)",
+ hdev->name, err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
+
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
+MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
new file mode 100644
index 000000000000..4bda6ab34f60
--- /dev/null
+++ b/drivers/bluetooth/btintel.h
@@ -0,0 +1,89 @@
+/*
+ *
+ * Bluetooth support for Intel devices
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+struct intel_version {
+ u8 status;
+ u8 hw_platform;
+ u8 hw_variant;
+ u8 hw_revision;
+ u8 fw_variant;
+ u8 fw_revision;
+ u8 fw_build_num;
+ u8 fw_build_ww;
+ u8 fw_build_yy;
+ u8 fw_patch_num;
+} __packed;
+
+struct intel_boot_params {
+ __u8 status;
+ __u8 otp_format;
+ __u8 otp_content;
+ __u8 otp_patch;
+ __le16 dev_revid;
+ __u8 secure_boot;
+ __u8 key_from_hdr;
+ __u8 key_type;
+ __u8 otp_lock;
+ __u8 api_lock;
+ __u8 debug_lock;
+ bdaddr_t otp_bdaddr;
+ __u8 min_fw_build_nn;
+ __u8 min_fw_build_cw;
+ __u8 min_fw_build_yy;
+ __u8 limited_cce;
+ __u8 unlocked_state;
+} __packed;
+
+struct intel_bootup {
+ __u8 zero;
+ __u8 num_cmds;
+ __u8 source;
+ __u8 reset_type;
+ __u8 reset_reason;
+ __u8 ddc_status;
+} __packed;
+
+struct intel_secure_send_result {
+ __u8 result;
+ __le16 opcode;
+ __u8 status;
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_INTEL)
+
+int btintel_check_bdaddr(struct hci_dev *hdev);
+int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+
+#else
+
+static inline int btintel_check_bdaddr(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index e75f8ee2512c..086f0ec89580 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -111,6 +111,7 @@ struct btmrvl_private {
/* Vendor specific Bluetooth commands */
#define BT_CMD_PSCAN_WIN_REPORT_ENABLE 0xFC03
+#define BT_CMD_ROUTE_SCO_TO_HOST 0xFC1D
#define BT_CMD_SET_BDADDR 0xFC22
#define BT_CMD_AUTO_SLEEP_MODE 0xFC23
#define BT_CMD_HOST_SLEEP_CONFIG 0xFC59
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 413597789c61..de05deb444ce 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -230,6 +230,18 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
}
EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
+static int btmrvl_enable_sco_routing_to_host(struct btmrvl_private *priv)
+{
+ int ret;
+ u8 subcmd = 0;
+
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_ROUTE_SCO_TO_HOST, &subcmd, 1);
+ if (ret)
+ BT_ERR("BT_CMD_ROUTE_SCO_TO_HOST command failed: %#x", ret);
+
+ return ret;
+}
+
int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd)
{
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
@@ -558,6 +570,8 @@ static int btmrvl_setup(struct hci_dev *hdev)
btmrvl_check_device_tree(priv);
+ btmrvl_enable_sco_routing_to_host(priv);
+
btmrvl_pscan_window_reporting(priv, 0x01);
priv->btmrvl_dev.psmode = 1;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8bfc4c2bba87..d21f3b4176d3 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -24,11 +24,15 @@
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/firmware.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#define VERSION "0.7"
+#include "btintel.h"
+#include "btbcm.h"
+
+#define VERSION "0.8"
static bool disable_scofix;
static bool force_scofix;
@@ -52,6 +56,9 @@ static struct usb_driver btusb_driver;
#define BTUSB_SWAVE 0x1000
#define BTUSB_INTEL_NEW 0x2000
#define BTUSB_AMP 0x4000
+#define BTUSB_QCA_ROME 0x8000
+#define BTUSB_BCM_APPLE 0x10000
+#define BTUSB_REALTEK 0x20000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -61,7 +68,8 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
/* Apple-specific (Broadcom) devices */
- { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_APPLE },
/* MediaTek MT76x0E */
{ USB_DEVICE(0x0e8d, 0x763f) },
@@ -107,13 +115,7 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0c10, 0x0000) },
/* Broadcom BCM20702A0 */
- { USB_DEVICE(0x0489, 0xe042) },
- { USB_DEVICE(0x04ca, 0x2003) },
- { USB_DEVICE(0x0b05, 0x17b5) },
- { USB_DEVICE(0x0b05, 0x17cb) },
{ USB_DEVICE(0x413c, 0x8197) },
- { USB_DEVICE(0x13d3, 0x3404),
- .driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom BCM20702B0 (Dynex/Insignia) */
{ USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
@@ -135,10 +137,12 @@ static const struct usb_device_id btusb_table[] = {
.driver_info = BTUSB_BCM_PATCHRAM },
/* Belkin F8065bf - Broadcom based */
- { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
/* IMC Networks - Broadcom based */
- { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
/* Intel Bluetooth USB Bootloader (RAM module) */
{ USB_DEVICE(0x8087, 0x0a5a),
@@ -159,6 +163,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Atheros 3011 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
@@ -212,6 +217,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+ /* QCA ROME chipset */
+ { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
+
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -281,6 +290,28 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_IGNORE },
+ /* Realtek Bluetooth devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
+ .driver_info = BTUSB_REALTEK },
+
+ /* Additional Realtek 8723AE Bluetooth devices */
+ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
+
+ /* Additional Realtek 8723BE Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe085), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x0489, 0xe08b), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+
+ /* Additional Realtek 8821AE Bluetooth devices */
+ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3458), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
+
{ } /* Terminating entry */
};
@@ -337,17 +368,9 @@ struct btusb_data {
int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb);
int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
-};
-static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout,
- unsigned mode)
-{
- might_sleep();
- if (!test_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout,
- mode, timeout);
-}
+ int (*setup_on_usb)(struct hci_dev *hdev);
+};
static inline void btusb_free_frags(struct btusb_data *data)
{
@@ -888,6 +911,15 @@ static int btusb_open(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ /* Patching USB firmware files prior to starting any URBs of HCI path
+ * It is more safe to use USB bulk channel for downloading USB patch
+ */
+ if (data->setup_on_usb) {
+ err = data->setup_on_usb(hdev);
+ if (err < 0)
+ return err;
+ }
+
err = usb_autopm_get_interface(data->intf);
if (err < 0)
return err;
@@ -1263,6 +1295,28 @@ static void btusb_waker(struct work_struct *work)
usb_autopm_put_interface(data->intf);
}
+static struct sk_buff *btusb_read_local_version(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return skb;
+ }
+
+ if (skb->len != sizeof(struct hci_rp_read_local_version)) {
+ BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
+ hdev->name);
+ kfree_skb(skb);
+ return ERR_PTR(-EIO);
+ }
+
+ return skb;
+}
+
static int btusb_setup_bcm92035(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -1287,12 +1341,9 @@ static int btusb_setup_csr(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
- skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("Reading local version failed (%ld)", -PTR_ERR(skb));
+ skb = btusb_read_local_version(hdev);
+ if (IS_ERR(skb))
return -PTR_ERR(skb);
- }
rp = (struct hci_rp_read_local_version *)skb->data;
@@ -1318,39 +1369,378 @@ static int btusb_setup_csr(struct hci_dev *hdev)
return ret;
}
-struct intel_version {
- u8 status;
- u8 hw_platform;
- u8 hw_variant;
- u8 hw_revision;
- u8 fw_variant;
- u8 fw_revision;
- u8 fw_build_num;
- u8 fw_build_ww;
- u8 fw_build_yy;
- u8 fw_patch_num;
+#define RTL_FRAG_LEN 252
+
+struct rtl_download_cmd {
+ __u8 index;
+ __u8 data[RTL_FRAG_LEN];
+} __packed;
+
+struct rtl_download_response {
+ __u8 status;
+ __u8 index;
} __packed;
-struct intel_boot_params {
- __u8 status;
- __u8 otp_format;
- __u8 otp_content;
- __u8 otp_patch;
- __le16 dev_revid;
- __u8 secure_boot;
- __u8 key_from_hdr;
- __u8 key_type;
- __u8 otp_lock;
- __u8 api_lock;
- __u8 debug_lock;
- bdaddr_t otp_bdaddr;
- __u8 min_fw_build_nn;
- __u8 min_fw_build_cw;
- __u8 min_fw_build_yy;
- __u8 limited_cce;
- __u8 unlocked_state;
+struct rtl_rom_version_evt {
+ __u8 status;
+ __u8 version;
} __packed;
+struct rtl_epatch_header {
+ __u8 signature[8];
+ __le32 fw_version;
+ __le16 num_patches;
+} __packed;
+
+#define RTL_EPATCH_SIGNATURE "Realtech"
+#define RTL_ROM_LMP_3499 0x3499
+#define RTL_ROM_LMP_8723A 0x1200
+#define RTL_ROM_LMP_8723B 0x8723
+#define RTL_ROM_LMP_8821A 0x8821
+#define RTL_ROM_LMP_8761A 0x8761
+
+static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
+{
+ struct rtl_rom_version_evt *rom_version;
+ struct sk_buff *skb;
+ int ret;
+
+ /* Read RTL ROM version command */
+ skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Read ROM version failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*rom_version)) {
+ BT_ERR("%s: RTL version event length mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ rom_version = (struct rtl_rom_version_evt *)skb->data;
+ BT_INFO("%s: rom_version status=%x version=%x",
+ hdev->name, rom_version->status, rom_version->version);
+
+ ret = rom_version->status;
+ if (ret == 0)
+ *version = rom_version->version;
+
+ kfree_skb(skb);
+ return ret;
+}
+
+static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
+ const struct firmware *fw,
+ unsigned char **_buf)
+{
+ const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
+ struct rtl_epatch_header *epatch_info;
+ unsigned char *buf;
+ int i, ret, len;
+ size_t min_size;
+ u8 opcode, length, data, rom_version = 0;
+ int project_id = -1;
+ const unsigned char *fwptr, *chip_id_base;
+ const unsigned char *patch_length_base, *patch_offset_base;
+ u32 patch_offset = 0;
+ u16 patch_length, num_patches;
+ const u16 project_id_to_lmp_subver[] = {
+ RTL_ROM_LMP_8723A,
+ RTL_ROM_LMP_8723B,
+ RTL_ROM_LMP_8821A,
+ RTL_ROM_LMP_8761A
+ };
+
+ ret = rtl_read_rom_version(hdev, &rom_version);
+ if (ret)
+ return -bt_to_errno(ret);
+
+ min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ fwptr = fw->data + fw->size - sizeof(extension_sig);
+ if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
+ BT_ERR("%s: extension section signature mismatch", hdev->name);
+ return -EINVAL;
+ }
+
+ /* Loop from the end of the firmware parsing instructions, until
+ * we find an instruction that identifies the "project ID" for the
+ * hardware supported by this firwmare file.
+ * Once we have that, we double-check that that project_id is suitable
+ * for the hardware we are working with.
+ */
+ while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
+ opcode = *--fwptr;
+ length = *--fwptr;
+ data = *--fwptr;
+
+ BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
+
+ if (opcode == 0xff) /* EOF */
+ break;
+
+ if (length == 0) {
+ BT_ERR("%s: found instruction with length 0",
+ hdev->name);
+ return -EINVAL;
+ }
+
+ if (opcode == 0 && length == 1) {
+ project_id = data;
+ break;
+ }
+
+ fwptr -= length;
+ }
+
+ if (project_id < 0) {
+ BT_ERR("%s: failed to find version instruction", hdev->name);
+ return -EINVAL;
+ }
+
+ if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
+ BT_ERR("%s: unknown project id %d", hdev->name, project_id);
+ return -EINVAL;
+ }
+
+ if (lmp_subver != project_id_to_lmp_subver[project_id]) {
+ BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
+ project_id_to_lmp_subver[project_id], lmp_subver);
+ return -EINVAL;
+ }
+
+ epatch_info = (struct rtl_epatch_header *)fw->data;
+ if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
+ BT_ERR("%s: bad EPATCH signature", hdev->name);
+ return -EINVAL;
+ }
+
+ num_patches = le16_to_cpu(epatch_info->num_patches);
+ BT_DBG("fw_version=%x, num_patches=%d",
+ le32_to_cpu(epatch_info->fw_version), num_patches);
+
+ /* After the rtl_epatch_header there is a funky patch metadata section.
+ * Assuming 2 patches, the layout is:
+ * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
+ *
+ * Find the right patch for this chip.
+ */
+ min_size += 8 * num_patches;
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
+ patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
+ patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
+ for (i = 0; i < num_patches; i++) {
+ u16 chip_id = get_unaligned_le16(chip_id_base +
+ (i * sizeof(u16)));
+ if (chip_id == rom_version + 1) {
+ patch_length = get_unaligned_le16(patch_length_base +
+ (i * sizeof(u16)));
+ patch_offset = get_unaligned_le32(patch_offset_base +
+ (i * sizeof(u32)));
+ break;
+ }
+ }
+
+ if (!patch_offset) {
+ BT_ERR("%s: didn't find patch for chip id %d",
+ hdev->name, rom_version);
+ return -EINVAL;
+ }
+
+ BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
+ min_size = patch_offset + patch_length;
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ /* Copy the firmware into a new buffer and write the version at
+ * the end.
+ */
+ len = patch_length;
+ buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
+
+ *_buf = buf;
+ return len;
+}
+
+static int rtl_download_firmware(struct hci_dev *hdev,
+ const unsigned char *data, int fw_len)
+{
+ struct rtl_download_cmd *dl_cmd;
+ int frag_num = fw_len / RTL_FRAG_LEN + 1;
+ int frag_len = RTL_FRAG_LEN;
+ int ret = 0;
+ int i;
+
+ dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
+ if (!dl_cmd)
+ return -ENOMEM;
+
+ for (i = 0; i < frag_num; i++) {
+ struct rtl_download_response *dl_resp;
+ struct sk_buff *skb;
+
+ BT_DBG("download fw (%d/%d)", i, frag_num);
+
+ dl_cmd->index = i;
+ if (i == (frag_num - 1)) {
+ dl_cmd->index |= 0x80; /* data end */
+ frag_len = fw_len % RTL_FRAG_LEN;
+ }
+ memcpy(dl_cmd->data, data, frag_len);
+
+ /* Send download command */
+ skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: download fw command failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ ret = -PTR_ERR(skb);
+ goto out;
+ }
+
+ if (skb->len != sizeof(*dl_resp)) {
+ BT_ERR("%s: download fw event length mismatch",
+ hdev->name);
+ kfree_skb(skb);
+ ret = -EIO;
+ goto out;
+ }
+
+ dl_resp = (struct rtl_download_response *)skb->data;
+ if (dl_resp->status != 0) {
+ kfree_skb(skb);
+ ret = bt_to_errno(dl_resp->status);
+ goto out;
+ }
+
+ kfree_skb(skb);
+ data += RTL_FRAG_LEN;
+ }
+
+out:
+ kfree(dl_cmd);
+ return ret;
+}
+
+static int btusb_setup_rtl8723a(struct hci_dev *hdev)
+{
+ struct btusb_data *data = dev_get_drvdata(&hdev->dev);
+ struct usb_device *udev = interface_to_usbdev(data->intf);
+ const struct firmware *fw;
+ int ret;
+
+ BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
+ ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev);
+ if (ret < 0) {
+ BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
+ return ret;
+ }
+
+ if (fw->size < 8) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check that the firmware doesn't have the epatch signature
+ * (which is only for RTL8723B and newer).
+ */
+ if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
+ BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = rtl_download_firmware(hdev, fw->data, fw->size);
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
+ const char *fw_name)
+{
+ struct btusb_data *data = dev_get_drvdata(&hdev->dev);
+ struct usb_device *udev = interface_to_usbdev(data->intf);
+ unsigned char *fw_data = NULL;
+ const struct firmware *fw;
+ int ret;
+
+ BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
+ ret = request_firmware(&fw, fw_name, &udev->dev);
+ if (ret < 0) {
+ BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
+ return ret;
+ }
+
+ ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+ if (ret < 0)
+ goto out;
+
+ ret = rtl_download_firmware(hdev, fw_data, ret);
+ kfree(fw_data);
+ if (ret < 0)
+ goto out;
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int btusb_setup_realtek(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct hci_rp_read_local_version *resp;
+ u16 lmp_subver;
+
+ skb = btusb_read_local_version(hdev);
+ if (IS_ERR(skb))
+ return -PTR_ERR(skb);
+
+ resp = (struct hci_rp_read_local_version *)skb->data;
+ BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
+ "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
+ resp->lmp_ver, resp->lmp_subver);
+
+ lmp_subver = le16_to_cpu(resp->lmp_subver);
+ kfree_skb(skb);
+
+ /* Match a set of subver values that correspond to stock firmware,
+ * which is not compatible with standard btusb.
+ * If matched, upload an alternative firmware that does conform to
+ * standard btusb. Once that firmware is uploaded, the subver changes
+ * to a different value.
+ */
+ switch (lmp_subver) {
+ case RTL_ROM_LMP_8723A:
+ case RTL_ROM_LMP_3499:
+ return btusb_setup_rtl8723a(hdev);
+ case RTL_ROM_LMP_8723B:
+ return btusb_setup_rtl8723b(hdev, lmp_subver,
+ "rtl_bt/rtl8723b_fw.bin");
+ case RTL_ROM_LMP_8821A:
+ return btusb_setup_rtl8723b(hdev, lmp_subver,
+ "rtl_bt/rtl8821a_fw.bin");
+ case RTL_ROM_LMP_8761A:
+ return btusb_setup_rtl8723b(hdev, lmp_subver,
+ "rtl_bt/rtl8761a_fw.bin");
+ default:
+ BT_INFO("rtl: assuming no firmware upload needed.");
+ return 0;
+ }
+}
+
static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@@ -1507,51 +1897,6 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
return 0;
}
-#define BDADDR_INTEL (&(bdaddr_t) {{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
-
-static int btusb_check_bdaddr_intel(struct hci_dev *hdev)
-{
- struct sk_buff *skb;
- struct hci_rp_read_bd_addr *rp;
-
- skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("%s reading Intel device address failed (%ld)",
- hdev->name, PTR_ERR(skb));
- return PTR_ERR(skb);
- }
-
- if (skb->len != sizeof(*rp)) {
- BT_ERR("%s Intel device address length mismatch", hdev->name);
- kfree_skb(skb);
- return -EIO;
- }
-
- rp = (struct hci_rp_read_bd_addr *)skb->data;
- if (rp->status) {
- BT_ERR("%s Intel device address result failed (%02x)",
- hdev->name, rp->status);
- kfree_skb(skb);
- return -bt_to_errno(rp->status);
- }
-
- /* For some Intel based controllers, the default Bluetooth device
- * address 00:03:19:9E:8B:00 can be found. These controllers are
- * fully operational, but have the danger of duplicate addresses
- * and that in turn can cause problems with Bluetooth operation.
- */
- if (!bacmp(&rp->bdaddr, BDADDR_INTEL)) {
- BT_ERR("%s found Intel default device address (%pMR)",
- hdev->name, &rp->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
- }
-
- kfree_skb(skb);
-
- return 0;
-}
-
static int btusb_setup_intel(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -1624,7 +1969,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
BT_INFO("%s: Intel device is already patched. patch num: %02x",
hdev->name, ver->fw_patch_num);
kfree_skb(skb);
- btusb_check_bdaddr_intel(hdev);
+ btintel_check_bdaddr(hdev);
return 0;
}
@@ -1637,7 +1982,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
fw = btusb_setup_intel_get_fw(hdev, ver);
if (!fw) {
kfree_skb(skb);
- btusb_check_bdaddr_intel(hdev);
+ btintel_check_bdaddr(hdev);
return 0;
}
fw_ptr = fw->data;
@@ -1718,7 +2063,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
hdev->name);
- btusb_check_bdaddr_intel(hdev);
+ btintel_check_bdaddr(hdev);
return 0;
exit_mfg_disable:
@@ -1734,7 +2079,7 @@ exit_mfg_disable:
BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
- btusb_check_bdaddr_intel(hdev);
+ btintel_check_bdaddr(hdev);
return 0;
exit_mfg_deactivate:
@@ -1755,7 +2100,7 @@ exit_mfg_deactivate:
BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
hdev->name);
- btusb_check_bdaddr_intel(hdev);
+ btintel_check_bdaddr(hdev);
return 0;
}
@@ -1797,6 +2142,38 @@ static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer,
return btusb_recv_bulk(data, buffer, count);
}
+static void btusb_intel_bootup(struct btusb_data *data, const void *ptr,
+ unsigned int len)
+{
+ const struct intel_bootup *evt = ptr;
+
+ if (len != sizeof(*evt))
+ return;
+
+ if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) {
+ smp_mb__after_atomic();
+ wake_up_bit(&data->flags, BTUSB_BOOTING);
+ }
+}
+
+static void btusb_intel_secure_send_result(struct btusb_data *data,
+ const void *ptr, unsigned int len)
+{
+ const struct intel_secure_send_result *evt = ptr;
+
+ if (len != sizeof(*evt))
+ return;
+
+ if (evt->result)
+ set_bit(BTUSB_FIRMWARE_FAILED, &data->flags);
+
+ if (test_and_clear_bit(BTUSB_DOWNLOADING, &data->flags) &&
+ test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) {
+ smp_mb__after_atomic();
+ wake_up_bit(&data->flags, BTUSB_DOWNLOADING);
+ }
+}
+
static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -1804,32 +2181,27 @@ static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
struct hci_event_hdr *hdr = (void *)skb->data;
- /* When the firmware loading completes the device sends
- * out a vendor specific event indicating the result of
- * the firmware loading.
- */
- if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
- skb->data[2] == 0x06) {
- if (skb->data[3] != 0x00)
- test_bit(BTUSB_FIRMWARE_FAILED, &data->flags);
-
- if (test_and_clear_bit(BTUSB_DOWNLOADING,
- &data->flags) &&
- test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) {
- smp_mb__after_atomic();
- wake_up_bit(&data->flags, BTUSB_DOWNLOADING);
- }
- }
-
- /* When switching to the operational firmware the device
- * sends a vendor specific event indicating that the bootup
- * completed.
- */
- if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
- skb->data[2] == 0x02) {
- if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) {
- smp_mb__after_atomic();
- wake_up_bit(&data->flags, BTUSB_BOOTING);
+ if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
+ hdr->plen > 0) {
+ const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
+ unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
+
+ switch (skb->data[2]) {
+ case 0x02:
+ /* When switching to the operational firmware
+ * the device sends a vendor specific event
+ * indicating that the bootup completed.
+ */
+ btusb_intel_bootup(data, ptr, len);
+ break;
+ case 0x06:
+ /* When the firmware loading completes the
+ * device sends out a vendor specific event
+ * indicating the result of the firmware
+ * loading.
+ */
+ btusb_intel_secure_send_result(data, ptr, len);
+ break;
}
}
}
@@ -2031,7 +2403,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (ver->fw_variant == 0x23) {
kfree_skb(skb);
clear_bit(BTUSB_BOOTLOADER, &data->flags);
- btusb_check_bdaddr_intel(hdev);
+ btintel_check_bdaddr(hdev);
return 0;
}
@@ -2197,9 +2569,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* and thus just timeout if that happens and fail the setup
* of this device.
*/
- err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
- msecs_to_jiffies(5000),
- TASK_INTERRUPTIBLE);
+ err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(5000));
if (err == 1) {
BT_ERR("%s: Firmware loading interrupted", hdev->name);
err = -EINTR;
@@ -2250,9 +2622,9 @@ done:
*/
BT_INFO("%s: Waiting for device to boot", hdev->name);
- err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
- msecs_to_jiffies(1000),
- TASK_INTERRUPTIBLE);
+ err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(1000));
if (err == 1) {
BT_ERR("%s: Device boot interrupted", hdev->name);
@@ -2315,15 +2687,19 @@ static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
kfree_skb(skb);
}
-static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+static int btusb_shutdown_intel(struct hci_dev *hdev)
{
struct sk_buff *skb;
long ret;
- skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
+ /* Some platforms have an issue with BT LED when the interface is
+ * down or BT radio is turned off, which takes 5 seconds to BT LED
+ * goes off. This command turns off the BT LED immediately.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
- BT_ERR("%s: changing Intel device address failed (%ld)",
+ BT_ERR("%s: turning off Intel device LED failed (%ld)",
hdev->name, ret);
return ret;
}
@@ -2355,232 +2731,279 @@ static int btusb_set_bdaddr_marvell(struct hci_dev *hdev,
return 0;
}
-#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
-
-static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
+static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
+ const bdaddr_t *bdaddr)
{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct usb_device *udev = data->udev;
- char fw_name[64];
- const struct firmware *fw;
- const u8 *fw_ptr;
- size_t fw_size;
- const struct hci_command_hdr *cmd;
- const u8 *cmd_param;
- u16 opcode;
struct sk_buff *skb;
- struct hci_rp_read_local_version *ver;
- struct hci_rp_read_bd_addr *bda;
+ u8 buf[10];
long ret;
- snprintf(fw_name, sizeof(fw_name), "brcm/%s-%04x-%04x.hcd",
- udev->product ? udev->product : "BCM",
- le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
-
- ret = request_firmware(&fw, fw_name, &hdev->dev);
- if (ret < 0) {
- BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name);
- return 0;
- }
+ buf[0] = 0x01;
+ buf[1] = 0x01;
+ buf[2] = 0x00;
+ buf[3] = sizeof(bdaddr_t);
+ memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
- /* Reset */
- skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
- BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret);
- goto done;
+ BT_ERR("%s: Change address command failed (%ld)",
+ hdev->name, ret);
+ return ret;
}
kfree_skb(skb);
- /* Read Local Version Info */
- skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
- hdev->name, ret);
- goto done;
- }
+ return 0;
+}
- if (skb->len != sizeof(*ver)) {
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
- hdev->name);
- kfree_skb(skb);
- ret = -EIO;
- goto done;
- }
+#define QCA_DFU_PACKET_LEN 4096
- ver = (struct hci_rp_read_local_version *)skb->data;
- BT_INFO("%s: BCM: patching hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
- "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev,
- ver->lmp_ver, ver->lmp_subver);
- kfree_skb(skb);
+#define QCA_GET_TARGET_VERSION 0x09
+#define QCA_CHECK_STATUS 0x05
+#define QCA_DFU_DOWNLOAD 0x01
- /* Start Download */
- skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- BT_ERR("%s: BCM: Download Minidrv command failed (%ld)",
- hdev->name, ret);
- goto reset_fw;
- }
- kfree_skb(skb);
+#define QCA_SYSCFG_UPDATED 0x40
+#define QCA_PATCH_UPDATED 0x80
+#define QCA_DFU_TIMEOUT 3000
- /* 50 msec delay after Download Minidrv completes */
- msleep(50);
+struct qca_version {
+ __le32 rom_version;
+ __le32 patch_version;
+ __le32 ram_version;
+ __le32 ref_clock;
+ __u8 reserved[4];
+} __packed;
- fw_ptr = fw->data;
- fw_size = fw->size;
-
- while (fw_size >= sizeof(*cmd)) {
- cmd = (struct hci_command_hdr *)fw_ptr;
- fw_ptr += sizeof(*cmd);
- fw_size -= sizeof(*cmd);
-
- if (fw_size < cmd->plen) {
- BT_ERR("%s: BCM: patch %s is corrupted",
- hdev->name, fw_name);
- ret = -EINVAL;
- goto reset_fw;
- }
+struct qca_rampatch_version {
+ __le16 rom_version;
+ __le16 patch_version;
+} __packed;
- cmd_param = fw_ptr;
- fw_ptr += cmd->plen;
- fw_size -= cmd->plen;
+struct qca_device_info {
+ u32 rom_version;
+ u8 rampatch_hdr; /* length of header in rampatch */
+ u8 nvm_hdr; /* length of header in NVM */
+ u8 ver_offset; /* offset of version structure in rampatch */
+};
- opcode = le16_to_cpu(cmd->opcode);
+static const struct qca_device_info qca_devices_table[] = {
+ { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
+ { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
+ { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
+ { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
+ { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
+};
- skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- BT_ERR("%s: BCM: patch command %04x failed (%ld)",
- hdev->name, opcode, ret);
- goto reset_fw;
- }
- kfree_skb(skb);
- }
+static int btusb_qca_send_vendor_req(struct hci_dev *hdev, u8 request,
+ void *data, u16 size)
+{
+ struct btusb_data *btdata = hci_get_drvdata(hdev);
+ struct usb_device *udev = btdata->udev;
+ int pipe, err;
+ u8 *buf;
- /* 250 msec delay after Launch Ram completes */
- msleep(250);
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
-reset_fw:
- /* Reset */
- skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret);
+ /* Found some of USB hosts have IOT issues with ours so that we should
+ * not wait until HCI layer is ready.
+ */
+ pipe = usb_rcvctrlpipe(udev, 0);
+ err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN,
+ 0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+ if (err < 0) {
+ BT_ERR("%s: Failed to access otp area (%d)", hdev->name, err);
goto done;
}
- kfree_skb(skb);
- /* Read Local Version Info */
- skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
- hdev->name, ret);
- goto done;
- }
+ memcpy(data, buf, size);
- if (skb->len != sizeof(*ver)) {
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
- hdev->name);
- kfree_skb(skb);
- ret = -EIO;
- goto done;
- }
+done:
+ kfree(buf);
- ver = (struct hci_rp_read_local_version *)skb->data;
- BT_INFO("%s: BCM: firmware hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
- "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev,
- ver->lmp_ver, ver->lmp_subver);
- kfree_skb(skb);
+ return err;
+}
- /* Read BD Address */
- skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- BT_ERR("%s: HCI_OP_READ_BD_ADDR failed (%ld)",
- hdev->name, ret);
+static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
+ const struct firmware *firmware,
+ size_t hdr_size)
+{
+ struct btusb_data *btdata = hci_get_drvdata(hdev);
+ struct usb_device *udev = btdata->udev;
+ size_t count, size, sent = 0;
+ int pipe, len, err;
+ u8 *buf;
+
+ buf = kmalloc(QCA_DFU_PACKET_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ count = firmware->size;
+
+ size = min_t(size_t, count, hdr_size);
+ memcpy(buf, firmware->data, size);
+
+ /* USB patches should go down to controller through USB path
+ * because binary format fits to go down through USB channel.
+ * USB control path is for patching headers and USB bulk is for
+ * patch body.
+ */
+ pipe = usb_sndctrlpipe(udev, 0);
+ err = usb_control_msg(udev, pipe, QCA_DFU_DOWNLOAD, USB_TYPE_VENDOR,
+ 0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send headers (%d)", hdev->name, err);
goto done;
}
- if (skb->len != sizeof(*bda)) {
- BT_ERR("%s: HCI_OP_READ_BD_ADDR event length mismatch",
- hdev->name);
- kfree_skb(skb);
- ret = -EIO;
- goto done;
+ sent += size;
+ count -= size;
+
+ while (count) {
+ size = min_t(size_t, count, QCA_DFU_PACKET_LEN);
+
+ memcpy(buf, firmware->data + sent, size);
+
+ pipe = usb_sndbulkpipe(udev, 0x02);
+ err = usb_bulk_msg(udev, pipe, buf, size, &len,
+ QCA_DFU_TIMEOUT);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send body at %zd of %zd (%d)",
+ hdev->name, sent, firmware->size, err);
+ break;
+ }
+
+ if (size != len) {
+ BT_ERR("%s: Failed to get bulk buffer", hdev->name);
+ err = -EILSEQ;
+ break;
+ }
+
+ sent += size;
+ count -= size;
}
- bda = (struct hci_rp_read_bd_addr *)skb->data;
- if (bda->status) {
- BT_ERR("%s: HCI_OP_READ_BD_ADDR error status (%02x)",
- hdev->name, bda->status);
- kfree_skb(skb);
- ret = -bt_to_errno(bda->status);
- goto done;
+done:
+ kfree(buf);
+ return err;
+}
+
+static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
+ struct qca_version *ver,
+ const struct qca_device_info *info)
+{
+ struct qca_rampatch_version *rver;
+ const struct firmware *fw;
+ u32 ver_rom, ver_patch;
+ u16 rver_rom, rver_patch;
+ char fwname[64];
+ int err;
+
+ ver_rom = le32_to_cpu(ver->rom_version);
+ ver_patch = le32_to_cpu(ver->patch_version);
+
+ snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom);
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err) {
+ BT_ERR("%s: failed to request rampatch file: %s (%d)",
+ hdev->name, fwname, err);
+ return err;
}
- /* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
- * with no configured address.
- */
- if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0)) {
- BT_INFO("%s: BCM: using default device address (%pMR)",
- hdev->name, &bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ BT_INFO("%s: using rampatch file: %s", hdev->name, fwname);
+
+ rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset);
+ rver_rom = le16_to_cpu(rver->rom_version);
+ rver_patch = le16_to_cpu(rver->patch_version);
+
+ BT_INFO("%s: QCA: patch rome 0x%x build 0x%x, firmware rome 0x%x "
+ "build 0x%x", hdev->name, rver_rom, rver_patch, ver_rom,
+ ver_patch);
+
+ if (rver_rom != ver_rom || rver_patch <= ver_patch) {
+ BT_ERR("%s: rampatch file version did not match with firmware",
+ hdev->name);
+ err = -EINVAL;
+ goto done;
}
- kfree_skb(skb);
+ err = btusb_setup_qca_download_fw(hdev, fw, info->rampatch_hdr);
done:
release_firmware(fw);
- return ret;
+ return err;
}
-static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
+ struct qca_version *ver,
+ const struct qca_device_info *info)
{
- struct sk_buff *skb;
- long ret;
+ const struct firmware *fw;
+ char fwname[64];
+ int err;
- skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- BT_ERR("%s: BCM: Change address command failed (%ld)",
- hdev->name, ret);
- return ret;
+ snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
+ le32_to_cpu(ver->rom_version));
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err) {
+ BT_ERR("%s: failed to request NVM file: %s (%d)",
+ hdev->name, fwname, err);
+ return err;
}
- kfree_skb(skb);
- return 0;
+ BT_INFO("%s: using NVM file: %s", hdev->name, fwname);
+
+ err = btusb_setup_qca_download_fw(hdev, fw, info->nvm_hdr);
+
+ release_firmware(fw);
+
+ return err;
}
-static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
- const bdaddr_t *bdaddr)
+static int btusb_setup_qca(struct hci_dev *hdev)
{
- struct sk_buff *skb;
- u8 buf[10];
- long ret;
+ const struct qca_device_info *info = NULL;
+ struct qca_version ver;
+ u32 ver_rom;
+ u8 status;
+ int i, err;
- buf[0] = 0x01;
- buf[1] = 0x01;
- buf[2] = 0x00;
- buf[3] = sizeof(bdaddr_t);
- memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
+ err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver,
+ sizeof(ver));
+ if (err < 0)
+ return err;
- skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- BT_ERR("%s: Change address command failed (%ld)",
- hdev->name, ret);
- return ret;
+ ver_rom = le32_to_cpu(ver.rom_version);
+ for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) {
+ if (ver_rom == qca_devices_table[i].rom_version)
+ info = &qca_devices_table[i];
+ }
+ if (!info) {
+ BT_ERR("%s: don't support firmware rome 0x%x", hdev->name,
+ ver_rom);
+ return -ENODEV;
+ }
+
+ err = btusb_qca_send_vendor_req(hdev, QCA_CHECK_STATUS, &status,
+ sizeof(status));
+ if (err < 0)
+ return err;
+
+ if (!(status & QCA_PATCH_UPDATED)) {
+ err = btusb_setup_qca_load_rampatch(hdev, &ver, info);
+ if (err < 0)
+ return err;
+ }
+
+ if (!(status & QCA_SYSCFG_UPDATED)) {
+ err = btusb_setup_qca_load_nvm(hdev, &ver, info);
+ if (err < 0)
+ return err;
}
- kfree_skb(skb);
return 0;
}
@@ -2701,23 +3124,29 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
+#ifdef CONFIG_BT_HCIBTUSB_BCM
if (id->driver_info & BTUSB_BCM_PATCHRAM) {
- hdev->setup = btusb_setup_bcm_patchram;
- hdev->set_bdaddr = btusb_set_bdaddr_bcm;
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hdev->setup = btbcm_setup_patchram;
+ hdev->set_bdaddr = btbcm_set_bdaddr;
}
+ if (id->driver_info & BTUSB_BCM_APPLE)
+ hdev->setup = btbcm_setup_apple;
+#endif
+
if (id->driver_info & BTUSB_INTEL) {
hdev->setup = btusb_setup_intel;
- hdev->set_bdaddr = btusb_set_bdaddr_intel;
+ hdev->shutdown = btusb_shutdown_intel;
+ hdev->set_bdaddr = btintel_set_bdaddr;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
}
if (id->driver_info & BTUSB_INTEL_NEW) {
hdev->send = btusb_send_frame_intel;
hdev->setup = btusb_setup_intel_new;
hdev->hw_error = btusb_hw_error_intel;
- hdev->set_bdaddr = btusb_set_bdaddr_intel;
+ hdev->set_bdaddr = btintel_set_bdaddr;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
@@ -2734,9 +3163,18 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_ATH3012) {
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
+ if (id->driver_info & BTUSB_QCA_ROME) {
+ data->setup_on_usb = btusb_setup_qca;
+ hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+ }
+
+ if (id->driver_info & BTUSB_REALTEK)
+ hdev->setup = btusb_setup_realtek;
+
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
data->isoc = NULL;
@@ -2772,6 +3210,8 @@ static int btusb_probe(struct usb_interface *intf,
/* Fake CSR devices with broken commands */
if (bcdDevice <= 0x100)
hdev->setup = btusb_setup_csr;
+
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
}
if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 9c4dcf4c62ea..ec8fa0e0f036 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -45,6 +45,7 @@ struct ath_struct {
struct hci_uart *hu;
unsigned int cur_sleep;
+ struct sk_buff *rx_skb;
struct sk_buff_head txq;
struct work_struct ctxtsw;
};
@@ -94,7 +95,6 @@ static void ath_hci_uart_work(struct work_struct *work)
hci_uart_tx_wakeup(hu);
}
-/* Initialize protocol */
static int ath_open(struct hci_uart *hu)
{
struct ath_struct *ath;
@@ -115,8 +115,7 @@ static int ath_open(struct hci_uart *hu)
return 0;
}
-/* Flush protocol data */
-static int ath_flush(struct hci_uart *hu)
+static int ath_close(struct hci_uart *hu)
{
struct ath_struct *ath = hu->priv;
@@ -124,11 +123,17 @@ static int ath_flush(struct hci_uart *hu)
skb_queue_purge(&ath->txq);
+ kfree_skb(ath->rx_skb);
+
+ cancel_work_sync(&ath->ctxtsw);
+
+ hu->priv = NULL;
+ kfree(ath);
+
return 0;
}
-/* Close protocol */
-static int ath_close(struct hci_uart *hu)
+static int ath_flush(struct hci_uart *hu)
{
struct ath_struct *ath = hu->priv;
@@ -136,17 +141,65 @@ static int ath_close(struct hci_uart *hu)
skb_queue_purge(&ath->txq);
- cancel_work_sync(&ath->ctxtsw);
+ return 0;
+}
- hu->priv = NULL;
- kfree(ath);
+static int ath_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ u8 buf[10];
+ int err;
+
+ buf[0] = 0x01;
+ buf[1] = 0x01;
+ buf[2] = 0x00;
+ buf[3] = sizeof(bdaddr_t);
+ memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
+
+ skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Change address command failed (%d)",
+ hdev->name, err);
+ return err;
+ }
+ kfree_skb(skb);
return 0;
}
+static int ath_setup(struct hci_uart *hu)
+{
+ BT_DBG("hu %p", hu);
+
+ hu->hdev->set_bdaddr = ath_set_bdaddr;
+
+ return 0;
+}
+
+static const struct h4_recv_pkt ath_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
+static int ath_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct ath_struct *ath = hu->priv;
+
+ ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
+ ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
+ if (IS_ERR(ath->rx_skb)) {
+ int err = PTR_ERR(ath->rx_skb);
+ BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ return err;
+ }
+
+ return count;
+}
+
#define HCI_OP_ATH_SLEEP 0xFC04
-/* Enqueue frame for transmittion */
static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct ath_struct *ath = hu->priv;
@@ -156,8 +209,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
return 0;
}
- /*
- * Update power management enable flag with parameters of
+ /* Update power management enable flag with parameters of
* HCI sleep enable vendor specific HCI command.
*/
if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
@@ -187,40 +239,21 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu)
return skb_dequeue(&ath->txq);
}
-/* Recv data */
-static int ath_recv(struct hci_uart *hu, void *data, int count)
-{
- int ret;
-
- ret = hci_recv_stream_fragment(hu->hdev, data, count);
- if (ret < 0) {
- BT_ERR("Frame Reassembly Failed");
- return ret;
- }
-
- return count;
-}
-
-static struct hci_uart_proto athp = {
- .id = HCI_UART_ATH3K,
- .open = ath_open,
- .close = ath_close,
- .recv = ath_recv,
- .enqueue = ath_enqueue,
- .dequeue = ath_dequeue,
- .flush = ath_flush,
+static const struct hci_uart_proto athp = {
+ .id = HCI_UART_ATH3K,
+ .name = "ATH3K",
+ .open = ath_open,
+ .close = ath_close,
+ .flush = ath_flush,
+ .setup = ath_setup,
+ .recv = ath_recv,
+ .enqueue = ath_enqueue,
+ .dequeue = ath_dequeue,
};
int __init ath_init(void)
{
- int err = hci_uart_register_proto(&athp);
-
- if (!err)
- BT_INFO("HCIATH3K protocol initialized");
- else
- BT_ERR("HCIATH3K protocol registration failed");
-
- return err;
+ return hci_uart_register_proto(&athp);
}
int __exit ath_deinit(void)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
new file mode 100644
index 000000000000..1ec0b4a5ffa6
--- /dev/null
+++ b/drivers/bluetooth/hci_bcm.c
@@ -0,0 +1,153 @@
+/*
+ *
+ * Bluetooth HCI UART driver for Broadcom devices
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btbcm.h"
+#include "hci_uart.h"
+
+struct bcm_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+};
+
+static int bcm_open(struct hci_uart *hu)
+{
+ struct bcm_data *bcm;
+
+ BT_DBG("hu %p", hu);
+
+ bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
+ if (!bcm)
+ return -ENOMEM;
+
+ skb_queue_head_init(&bcm->txq);
+
+ hu->priv = bcm;
+ return 0;
+}
+
+static int bcm_close(struct hci_uart *hu)
+{
+ struct bcm_data *bcm = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&bcm->txq);
+ kfree_skb(bcm->rx_skb);
+ kfree(bcm);
+
+ hu->priv = NULL;
+ return 0;
+}
+
+static int bcm_flush(struct hci_uart *hu)
+{
+ struct bcm_data *bcm = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&bcm->txq);
+
+ return 0;
+}
+
+static int bcm_setup(struct hci_uart *hu)
+{
+ BT_DBG("hu %p", hu);
+
+ hu->hdev->set_bdaddr = btbcm_set_bdaddr;
+
+ return btbcm_setup_patchram(hu->hdev);
+}
+
+static const struct h4_recv_pkt bcm_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
+static int bcm_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct bcm_data *bcm = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
+ bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
+ if (IS_ERR(bcm->rx_skb)) {
+ int err = PTR_ERR(bcm->rx_skb);
+ BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ return err;
+ }
+
+ return count;
+}
+
+static int bcm_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct bcm_data *bcm = hu->priv;
+
+ BT_DBG("hu %p skb %p", hu, skb);
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ skb_queue_tail(&bcm->txq, skb);
+
+ return 0;
+}
+
+static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
+{
+ struct bcm_data *bcm = hu->priv;
+
+ return skb_dequeue(&bcm->txq);
+}
+
+static const struct hci_uart_proto bcm_proto = {
+ .id = HCI_UART_BCM,
+ .name = "BCM",
+ .open = bcm_open,
+ .close = bcm_close,
+ .flush = bcm_flush,
+ .setup = bcm_setup,
+ .recv = bcm_recv,
+ .enqueue = bcm_enqueue,
+ .dequeue = bcm_dequeue,
+};
+
+int __init bcm_init(void)
+{
+ return hci_uart_register_proto(&bcm_proto);
+}
+
+int __exit bcm_deinit(void)
+{
+ return hci_uart_unregister_proto(&bcm_proto);
+}
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 21cc45b34f13..dc8e3d4356a0 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -47,8 +47,6 @@
#include "hci_uart.h"
-#define VERSION "0.3"
-
static bool txcrc = 1;
static bool hciextn = 1;
@@ -554,10 +552,10 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp)
}
/* Recv data */
-static int bcsp_recv(struct hci_uart *hu, void *data, int count)
+static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
{
struct bcsp_struct *bcsp = hu->priv;
- unsigned char *ptr;
+ const unsigned char *ptr;
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
@@ -735,8 +733,9 @@ static int bcsp_close(struct hci_uart *hu)
return 0;
}
-static struct hci_uart_proto bcsp = {
+static const struct hci_uart_proto bcsp = {
.id = HCI_UART_BCSP,
+ .name = "BCSP",
.open = bcsp_open,
.close = bcsp_close,
.enqueue = bcsp_enqueue,
@@ -747,14 +746,7 @@ static struct hci_uart_proto bcsp = {
int __init bcsp_init(void)
{
- int err = hci_uart_register_proto(&bcsp);
-
- if (!err)
- BT_INFO("HCI BCSP protocol initialized");
- else
- BT_ERR("HCI BCSP protocol registration failed");
-
- return err;
+ return hci_uart_register_proto(&bcsp);
}
int __exit bcsp_deinit(void)
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 66db9a803373..f7190f01e135 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -40,17 +40,14 @@
#include <linux/signal.h>
#include <linux/ioctl.h>
#include <linux/skbuff.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include "hci_uart.h"
-#define VERSION "1.2"
-
struct h4_struct {
- unsigned long rx_state;
- unsigned long rx_count;
struct sk_buff *rx_skb;
struct sk_buff_head txq;
};
@@ -117,18 +114,26 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
return 0;
}
+static const struct h4_recv_pkt h4_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
/* Recv data */
-static int h4_recv(struct hci_uart *hu, void *data, int count)
+static int h4_recv(struct hci_uart *hu, const void *data, int count)
{
- int ret;
+ struct h4_struct *h4 = hu->priv;
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ret = hci_recv_stream_fragment(hu->hdev, data, count);
- if (ret < 0) {
- BT_ERR("Frame Reassembly Failed");
- return ret;
+ h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
+ h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
+ if (IS_ERR(h4->rx_skb)) {
+ int err = PTR_ERR(h4->rx_skb);
+ BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ return err;
}
return count;
@@ -140,8 +145,9 @@ static struct sk_buff *h4_dequeue(struct hci_uart *hu)
return skb_dequeue(&h4->txq);
}
-static struct hci_uart_proto h4p = {
+static const struct hci_uart_proto h4p = {
.id = HCI_UART_H4,
+ .name = "H4",
.open = h4_open,
.close = h4_close,
.recv = h4_recv,
@@ -152,17 +158,105 @@ static struct hci_uart_proto h4p = {
int __init h4_init(void)
{
- int err = hci_uart_register_proto(&h4p);
-
- if (!err)
- BT_INFO("HCI H4 protocol initialized");
- else
- BT_ERR("HCI H4 protocol registration failed");
-
- return err;
+ return hci_uart_register_proto(&h4p);
}
int __exit h4_deinit(void)
{
return hci_uart_unregister_proto(&h4p);
}
+
+struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+ const unsigned char *buffer, int count,
+ const struct h4_recv_pkt *pkts, int pkts_count)
+{
+ while (count) {
+ int i, len;
+
+ if (!skb) {
+ for (i = 0; i < pkts_count; i++) {
+ if (buffer[0] != (&pkts[i])->type)
+ continue;
+
+ skb = bt_skb_alloc((&pkts[i])->maxlen,
+ GFP_ATOMIC);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ bt_cb(skb)->pkt_type = (&pkts[i])->type;
+ bt_cb(skb)->expect = (&pkts[i])->hlen;
+ break;
+ }
+
+ /* Check for invalid packet type */
+ if (!skb)
+ return ERR_PTR(-EILSEQ);
+
+ count -= 1;
+ buffer += 1;
+ }
+
+ len = min_t(uint, bt_cb(skb)->expect - skb->len, count);
+ memcpy(skb_put(skb, len), buffer, len);
+
+ count -= len;
+ buffer += len;
+
+ /* Check for partial packet */
+ if (skb->len < bt_cb(skb)->expect)
+ continue;
+
+ for (i = 0; i < pkts_count; i++) {
+ if (bt_cb(skb)->pkt_type == (&pkts[i])->type)
+ break;
+ }
+
+ if (i >= pkts_count) {
+ kfree_skb(skb);
+ return ERR_PTR(-EILSEQ);
+ }
+
+ if (skb->len == (&pkts[i])->hlen) {
+ u16 dlen;
+
+ switch ((&pkts[i])->lsize) {
+ case 0:
+ /* No variable data length */
+ (&pkts[i])->recv(hdev, skb);
+ skb = NULL;
+ break;
+ case 1:
+ /* Single octet variable length */
+ dlen = skb->data[(&pkts[i])->loff];
+ bt_cb(skb)->expect += dlen;
+
+ if (skb_tailroom(skb) < dlen) {
+ kfree_skb(skb);
+ return ERR_PTR(-EMSGSIZE);
+ }
+ break;
+ case 2:
+ /* Double octet variable length */
+ dlen = get_unaligned_le16(skb->data +
+ (&pkts[i])->loff);
+ bt_cb(skb)->expect += dlen;
+
+ if (skb_tailroom(skb) < dlen) {
+ kfree_skb(skb);
+ return ERR_PTR(-EMSGSIZE);
+ }
+ break;
+ default:
+ /* Unsupported variable length */
+ kfree_skb(skb);
+ return ERR_PTR(-EILSEQ);
+ }
+ } else {
+ /* Complete frame */
+ (&pkts[i])->recv(hdev, skb);
+ skb = NULL;
+ }
+ }
+
+ return skb;
+}
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index ec0fa7732c0d..3455cecc9ecf 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -511,10 +511,10 @@ static void h5_reset_rx(struct h5 *h5)
clear_bit(H5_RX_ESC, &h5->flags);
}
-static int h5_recv(struct hci_uart *hu, void *data, int count)
+static int h5_recv(struct hci_uart *hu, const void *data, int count)
{
struct h5 *h5 = hu->priv;
- unsigned char *ptr = data;
+ const unsigned char *ptr = data;
BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
count);
@@ -743,8 +743,9 @@ static int h5_flush(struct hci_uart *hu)
return 0;
}
-static struct hci_uart_proto h5p = {
+static const struct hci_uart_proto h5p = {
.id = HCI_UART_3WIRE,
+ .name = "Three-wire (H5)",
.open = h5_open,
.close = h5_close,
.recv = h5_recv,
@@ -755,14 +756,7 @@ static struct hci_uart_proto h5p = {
int __init h5_init(void)
{
- int err = hci_uart_register_proto(&h5p);
-
- if (!err)
- BT_INFO("HCI Three-wire UART (H5) protocol initialized");
- else
- BT_ERR("HCI Three-wire UART (H5) protocol init failed");
-
- return err;
+ return hci_uart_register_proto(&h5p);
}
int __exit h5_deinit(void)
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
new file mode 100644
index 000000000000..5dd07bf05236
--- /dev/null
+++ b/drivers/bluetooth/hci_intel.c
@@ -0,0 +1,31 @@
+/*
+ *
+ * Bluetooth HCI UART driver for Intel devices
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index dc487b5d1156..5c9a73f02664 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -44,13 +44,15 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include "btintel.h"
+#include "btbcm.h"
#include "hci_uart.h"
-#define VERSION "2.2"
+#define VERSION "2.3"
-static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
+static const struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
-int hci_uart_register_proto(struct hci_uart_proto *p)
+int hci_uart_register_proto(const struct hci_uart_proto *p)
{
if (p->id >= HCI_UART_MAX_PROTO)
return -EINVAL;
@@ -60,10 +62,12 @@ int hci_uart_register_proto(struct hci_uart_proto *p)
hup[p->id] = p;
+ BT_INFO("HCI UART protocol %s registered", p->name);
+
return 0;
}
-int hci_uart_unregister_proto(struct hci_uart_proto *p)
+int hci_uart_unregister_proto(const struct hci_uart_proto *p)
{
if (p->id >= HCI_UART_MAX_PROTO)
return -EINVAL;
@@ -76,7 +80,7 @@ int hci_uart_unregister_proto(struct hci_uart_proto *p)
return 0;
}
-static struct hci_uart_proto *hci_uart_get_proto(unsigned int id)
+static const struct hci_uart_proto *hci_uart_get_proto(unsigned int id)
{
if (id >= HCI_UART_MAX_PROTO)
return NULL;
@@ -261,6 +265,54 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static int hci_uart_setup(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct hci_rp_read_local_version *ver;
+ struct sk_buff *skb;
+
+ if (hu->proto->setup)
+ return hu->proto->setup(hu);
+
+ if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags))
+ return 0;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reading local version information failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return 0;
+ }
+
+ if (skb->len != sizeof(*ver)) {
+ BT_ERR("%s: Event length mismatch for version information",
+ hdev->name);
+ goto done;
+ }
+
+ ver = (struct hci_rp_read_local_version *)skb->data;
+
+ switch (le16_to_cpu(ver->manufacturer)) {
+#ifdef CONFIG_BT_HCIUART_INTEL
+ case 2:
+ hdev->set_bdaddr = btintel_set_bdaddr;
+ btintel_check_bdaddr(hdev);
+ break;
+#endif
+#ifdef CONFIG_BT_HCIUART_BCM
+ case 15:
+ hdev->set_bdaddr = btbcm_set_bdaddr;
+ btbcm_check_bdaddr(hdev);
+ break;
+#endif
+ }
+
+done:
+ kfree_skb(skb);
+ return 0;
+}
+
/* ------ LDISC part ------ */
/* hci_uart_tty_open
*
@@ -316,7 +368,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
*/
static void hci_uart_tty_close(struct tty_struct *tty)
{
- struct hci_uart *hu = (void *)tty->disc_data;
+ struct hci_uart *hu = tty->disc_data;
struct hci_dev *hdev;
BT_DBG("tty %p", tty);
@@ -355,7 +407,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
*/
static void hci_uart_tty_wakeup(struct tty_struct *tty)
{
- struct hci_uart *hu = (void *)tty->disc_data;
+ struct hci_uart *hu = tty->disc_data;
BT_DBG("");
@@ -383,9 +435,10 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
*
* Return Value: None
*/
-static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
+static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
+ char *flags, int count)
{
- struct hci_uart *hu = (void *)tty->disc_data;
+ struct hci_uart *hu = tty->disc_data;
if (!hu || tty != hu->tty)
return;
@@ -394,7 +447,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f
return;
spin_lock(&hu->rx_lock);
- hu->proto->recv(hu, (void *) data, count);
+ hu->proto->recv(hu, data, count);
if (hu->hdev)
hu->hdev->stat.byte_rx += count;
@@ -426,6 +479,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
hdev->close = hci_uart_close;
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
+ hdev->setup = hci_uart_setup;
SET_HCIDEV_DEV(hdev, hu->tty->dev);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
@@ -458,7 +512,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
static int hci_uart_set_proto(struct hci_uart *hu, int id)
{
- struct hci_uart_proto *p;
+ const struct hci_uart_proto *p;
int err;
p = hci_uart_get_proto(id);
@@ -486,9 +540,10 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
BIT(HCI_UART_RESET_ON_INIT) |
BIT(HCI_UART_CREATE_AMP) |
BIT(HCI_UART_INIT_PENDING) |
- BIT(HCI_UART_EXT_CONFIG);
+ BIT(HCI_UART_EXT_CONFIG) |
+ BIT(HCI_UART_VND_DETECT);
- if ((flags & ~valid_flags))
+ if (flags & ~valid_flags)
return -EINVAL;
hu->hdev_flags = flags;
@@ -509,10 +564,10 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
*
* Return Value: Command dependent
*/
-static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
- unsigned int cmd, unsigned long arg)
+static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
{
- struct hci_uart *hu = (void *)tty->disc_data;
+ struct hci_uart *hu = tty->disc_data;
int err = 0;
BT_DBG("");
@@ -566,19 +621,19 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
* We don't provide read/write/poll interface for user space.
*/
static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr)
+ unsigned char __user *buf, size_t nr)
{
return 0;
}
static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *data, size_t count)
+ const unsigned char *data, size_t count)
{
return 0;
}
static unsigned int hci_uart_tty_poll(struct tty_struct *tty,
- struct file *filp, poll_table *wait)
+ struct file *filp, poll_table *wait)
{
return 0;
}
@@ -626,6 +681,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_3WIRE
h5_init();
#endif
+#ifdef CONFIG_BT_HCIUART_BCM
+ bcm_init();
+#endif
return 0;
}
@@ -649,6 +707,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_3WIRE
h5_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_BCM
+ bcm_deinit();
+#endif
/* Release tty registration of line discipline */
err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 69a90b1b5ff5..9ee24b075f79 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -370,10 +370,10 @@ static inline int ll_check_data_len(struct hci_dev *hdev, struct ll_struct *ll,
}
/* Recv data */
-static int ll_recv(struct hci_uart *hu, void *data, int count)
+static int ll_recv(struct hci_uart *hu, const void *data, int count)
{
struct ll_struct *ll = hu->priv;
- char *ptr;
+ const char *ptr;
struct hci_event_hdr *eh;
struct hci_acl_hdr *ah;
struct hci_sco_hdr *sh;
@@ -505,8 +505,9 @@ static struct sk_buff *ll_dequeue(struct hci_uart *hu)
return skb_dequeue(&ll->txq);
}
-static struct hci_uart_proto llp = {
+static const struct hci_uart_proto llp = {
.id = HCI_UART_LL,
+ .name = "LL",
.open = ll_open,
.close = ll_close,
.recv = ll_recv,
@@ -517,14 +518,7 @@ static struct hci_uart_proto llp = {
int __init ll_init(void)
{
- int err = hci_uart_register_proto(&llp);
-
- if (!err)
- BT_INFO("HCILL protocol initialized");
- else
- BT_ERR("HCILL protocol registration failed");
-
- return err;
+ return hci_uart_register_proto(&llp);
}
int __exit ll_deinit(void)
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 247488edcbf9..72120a5ba13c 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 6
+#define HCI_UART_MAX_PROTO 8
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -43,21 +43,26 @@
#define HCI_UART_H4DS 3
#define HCI_UART_LL 4
#define HCI_UART_ATH3K 5
+#define HCI_UART_INTEL 6
+#define HCI_UART_BCM 7
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
#define HCI_UART_CREATE_AMP 2
#define HCI_UART_INIT_PENDING 3
#define HCI_UART_EXT_CONFIG 4
+#define HCI_UART_VND_DETECT 5
struct hci_uart;
struct hci_uart_proto {
unsigned int id;
+ const char *name;
int (*open)(struct hci_uart *hu);
int (*close)(struct hci_uart *hu);
int (*flush)(struct hci_uart *hu);
- int (*recv)(struct hci_uart *hu, void *data, int len);
+ int (*setup)(struct hci_uart *hu);
+ int (*recv)(struct hci_uart *hu, const void *data, int len);
int (*enqueue)(struct hci_uart *hu, struct sk_buff *skb);
struct sk_buff *(*dequeue)(struct hci_uart *hu);
};
@@ -71,7 +76,7 @@ struct hci_uart {
struct work_struct init_ready;
struct work_struct write_work;
- struct hci_uart_proto *proto;
+ const struct hci_uart_proto *proto;
void *priv;
struct sk_buff *tx_skb;
@@ -87,14 +92,48 @@ struct hci_uart {
#define HCI_UART_SENDING 1
#define HCI_UART_TX_WAKEUP 2
-int hci_uart_register_proto(struct hci_uart_proto *p);
-int hci_uart_unregister_proto(struct hci_uart_proto *p);
+int hci_uart_register_proto(const struct hci_uart_proto *p);
+int hci_uart_unregister_proto(const struct hci_uart_proto *p);
int hci_uart_tx_wakeup(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
#ifdef CONFIG_BT_HCIUART_H4
int h4_init(void);
int h4_deinit(void);
+
+struct h4_recv_pkt {
+ u8 type; /* Packet type */
+ u8 hlen; /* Header length */
+ u8 loff; /* Data length offset in header */
+ u8 lsize; /* Data length field size */
+ u16 maxlen; /* Max overall packet length */
+ int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
+};
+
+#define H4_RECV_ACL \
+ .type = HCI_ACLDATA_PKT, \
+ .hlen = HCI_ACL_HDR_SIZE, \
+ .loff = 2, \
+ .lsize = 2, \
+ .maxlen = HCI_MAX_FRAME_SIZE \
+
+#define H4_RECV_SCO \
+ .type = HCI_SCODATA_PKT, \
+ .hlen = HCI_SCO_HDR_SIZE, \
+ .loff = 2, \
+ .lsize = 1, \
+ .maxlen = HCI_MAX_SCO_SIZE
+
+#define H4_RECV_EVENT \
+ .type = HCI_EVENT_PKT, \
+ .hlen = HCI_EVENT_HDR_SIZE, \
+ .loff = 1, \
+ .lsize = 1, \
+ .maxlen = HCI_MAX_EVENT_SIZE
+
+struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+ const unsigned char *buffer, int count,
+ const struct h4_recv_pkt *pkts, int pkts_count);
#endif
#ifdef CONFIG_BT_HCIUART_BCSP
@@ -116,3 +155,8 @@ int ath_deinit(void);
int h5_init(void);
int h5_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_BCM
+int bcm_init(void);
+int bcm_deinit(void);
+#endif
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index b99729e36860..a1d4af6df3f5 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -4,6 +4,41 @@
menu "Bus devices"
+config ARM_CCI
+ bool
+
+config ARM_CCI400_COMMON
+ bool
+ select ARM_CCI
+
+config ARM_CCI400_PMU
+ bool "ARM CCI400 PMU support"
+ default y
+ depends on ARM || ARM64
+ depends on HW_PERF_EVENTS
+ select ARM_CCI400_COMMON
+ help
+ Support for PMU events monitoring on the ARM CCI cache coherent
+ interconnect.
+
+ If unsure, say Y
+
+config ARM_CCI400_PORT_CTRL
+ bool
+ depends on ARM && OF && CPU_V7
+ select ARM_CCI400_COMMON
+ help
+ Low level power management driver for CCI400 cache coherent
+ interconnect for ARM platforms.
+
+config ARM_CCN
+ bool "ARM CCN driver support"
+ depends on ARM || ARM64
+ depends on PERF_EVENTS
+ help
+ PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
+ interconnect.
+
config BRCMSTB_GISB_ARB
bool "Broadcom STB GISB bus arbiter"
depends on ARM || MIPS
@@ -20,6 +55,19 @@ config IMX_WEIM
The WEIM(Wireless External Interface Module) works like a bus.
You can attach many different devices on it, such as NOR, onenand.
+config MIPS_CDMM
+ bool "MIPS Common Device Memory Map (CDMM) Driver"
+ depends on CPU_MIPSR2
+ help
+ Driver needed for the MIPS Common Device Memory Map bus in MIPS
+ cores. This bus is for per-CPU tightly coupled devices such as the
+ Fast Debug Channel (FDC).
+
+ For this to work, either your bootloader needs to enable the CDMM
+ region at an unused physical address on the boot CPU, or else your
+ platform code needs to implement mips_cdmm_phys_base() (see
+ asm/cdmm.h).
+
config MVEBU_MBUS
bool
depends on PLAT_ORION
@@ -27,15 +75,6 @@ config MVEBU_MBUS
Driver needed for the MBus configuration on Marvell EBU SoCs
(Kirkwood, Dove, Orion5x, MV78XX0 and Armada 370/XP).
-config OMAP_OCP2SCP
- tristate "OMAP OCP2SCP DRIVER"
- depends on ARCH_OMAP2PLUS
- help
- Driver to enable ocp2scp module which transforms ocp interface
- protocol to scp protocol. In OMAP4, USB PHY is connected via
- OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via
- OCP2SCP.
-
config OMAP_INTERCONNECT
tristate "OMAP INTERCONNECT DRIVER"
depends on ARCH_OMAP2PLUS
@@ -43,20 +82,27 @@ config OMAP_INTERCONNECT
help
Driver to enable OMAP interconnect error handling driver.
-config ARM_CCI
- bool "ARM CCI driver support"
- depends on ARM && OF && CPU_V7
+config OMAP_OCP2SCP
+ tristate "OMAP OCP2SCP DRIVER"
+ depends on ARCH_OMAP2PLUS
help
- Driver supporting the CCI cache coherent interconnect for ARM
- platforms.
+ Driver to enable ocp2scp module which transforms ocp interface
+ protocol to scp protocol. In OMAP4, USB PHY is connected via
+ OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via
+ OCP2SCP.
-config ARM_CCN
- bool "ARM CCN driver support"
- depends on ARM || ARM64
- depends on PERF_EVENTS
+config SIMPLE_PM_BUS
+ bool "Simple Power-Managed Bus Driver"
+ depends on OF && PM
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
- PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
- interconnect.
+ Driver for transparent busses that don't need a real driver, but
+ where the bus controller is part of a PM domain, or under the control
+ of a functional clock, and thus relies on runtime PM for managing
+ this PM domain and/or clock.
+ An example of such a bus controller is the Renesas Bus State
+ Controller (BSC, sometimes called "LBSC within Bus Bridge", or
+ "External Bus Interface") as found on several Renesas ARM SoCs.
config VEXPRESS_CONFIG
bool "Versatile Express configuration bus"
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 2973c18cbcc2..790e7b933fb2 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -2,16 +2,18 @@
# Makefile for the bus drivers.
#
+# Interconnect bus drivers for ARM platforms
+obj-$(CONFIG_ARM_CCI) += arm-cci.o
+obj-$(CONFIG_ARM_CCN) += arm-ccn.o
+
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
-obj-$(CONFIG_IMX_WEIM) += imx-weim.o
-obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
-obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
+obj-$(CONFIG_IMX_WEIM) += imx-weim.o
+obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
+obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
# Interconnect bus driver for OMAP SoCs.
obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
-# Interconnect bus drivers for ARM platforms
-obj-$(CONFIG_ARM_CCI) += arm-cci.o
-obj-$(CONFIG_ARM_CCN) += arm-ccn.o
-
+obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
+obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 84fd66057dad..5340604b23a4 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -29,41 +29,36 @@
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
-#define DRIVER_NAME "CCI-400"
-#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
-
-#define CCI_PORT_CTRL 0x0
-#define CCI_CTRL_STATUS 0xc
-
-#define CCI_ENABLE_SNOOP_REQ 0x1
-#define CCI_ENABLE_DVM_REQ 0x2
-#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
+static void __iomem *cci_ctrl_base;
+static unsigned long cci_ctrl_phys;
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
struct cci_nb_ports {
unsigned int nb_ace;
unsigned int nb_ace_lite;
};
-enum cci_ace_port_type {
- ACE_INVALID_PORT = 0x0,
- ACE_PORT,
- ACE_LITE_PORT,
+static const struct cci_nb_ports cci400_ports = {
+ .nb_ace = 2,
+ .nb_ace_lite = 3
};
-struct cci_ace_port {
- void __iomem *base;
- unsigned long phys;
- enum cci_ace_port_type type;
- struct device_node *dn;
-};
+#define CCI400_PORTS_DATA (&cci400_ports)
+#else
+#define CCI400_PORTS_DATA (NULL)
+#endif
-static struct cci_ace_port *ports;
-static unsigned int nb_cci_ports;
+static const struct of_device_id arm_cci_matches[] = {
+#ifdef CONFIG_ARM_CCI400_COMMON
+ {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
+#endif
+ {},
+};
-static void __iomem *cci_ctrl_base;
-static unsigned long cci_ctrl_phys;
+#ifdef CONFIG_ARM_CCI400_PMU
-#ifdef CONFIG_HW_PERF_EVENTS
+#define DRIVER_NAME "CCI-400"
+#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
#define CCI_PMCR 0x0100
#define CCI_PID2 0x0fe8
@@ -75,20 +70,6 @@ static unsigned long cci_ctrl_phys;
#define CCI_PID2_REV_MASK 0xf0
#define CCI_PID2_REV_SHIFT 4
-/* Port ids */
-#define CCI_PORT_S0 0
-#define CCI_PORT_S1 1
-#define CCI_PORT_S2 2
-#define CCI_PORT_S3 3
-#define CCI_PORT_S4 4
-#define CCI_PORT_M0 5
-#define CCI_PORT_M1 6
-#define CCI_PORT_M2 7
-
-#define CCI_REV_R0 0
-#define CCI_REV_R1 1
-#define CCI_REV_R1_PX 5
-
#define CCI_PMU_EVT_SEL 0x000
#define CCI_PMU_CNTR 0x004
#define CCI_PMU_CNTR_CTRL 0x008
@@ -100,76 +81,22 @@ static unsigned long cci_ctrl_phys;
#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
-/*
- * Instead of an event id to monitor CCI cycles, a dedicated counter is
- * provided. Use 0xff to represent CCI cycles and hope that no future revisions
- * make use of this event in hardware.
- */
-enum cci400_perf_events {
- CCI_PMU_CYCLES = 0xff
-};
-
-#define CCI_PMU_EVENT_MASK 0xff
+#define CCI_PMU_EVENT_MASK 0xffUL
#define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
#define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
#define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
-#define CCI_PMU_CYCLE_CNTR_IDX 0
-#define CCI_PMU_CNTR0_IDX 1
-#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
-
-/*
- * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
- * ports and bits 4:0 are event codes. There are different event codes
- * associated with each port type.
- *
- * Additionally, the range of events associated with the port types changed
- * between Rev0 and Rev1.
- *
- * The constants below define the range of valid codes for each port type for
- * the different revisions and are used to validate the event to be monitored.
- */
-
-#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
-#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
-#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
-#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
-
-#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
-#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
-#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
-#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
-
-struct pmu_port_event_ranges {
- u8 slave_min;
- u8 slave_max;
- u8 master_min;
- u8 master_max;
-};
-
-static struct pmu_port_event_ranges port_event_range[] = {
- [CCI_REV_R0] = {
- .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV,
- .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV,
- .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV,
- .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV,
- },
- [CCI_REV_R1] = {
- .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV,
- .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV,
- .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV,
- .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV,
- },
+/* Types of interfaces that can generate events */
+enum {
+ CCI_IF_SLAVE,
+ CCI_IF_MASTER,
+ CCI_IF_MAX,
};
-/*
- * Export different PMU names for the different revisions so userspace knows
- * because the event ids are different
- */
-static char *const pmu_names[] = {
- [CCI_REV_R0] = "CCI_400",
- [CCI_REV_R1] = "CCI_400_r1",
+struct event_range {
+ u32 min;
+ u32 max;
};
struct cci_pmu_hw_events {
@@ -178,13 +105,20 @@ struct cci_pmu_hw_events {
raw_spinlock_t pmu_lock;
};
+struct cci_pmu_model {
+ char *name;
+ struct event_range event_ranges[CCI_IF_MAX];
+};
+
+static struct cci_pmu_model cci_pmu_models[];
+
struct cci_pmu {
void __iomem *base;
struct pmu pmu;
int nr_irqs;
int irqs[CCI_PMU_MAX_HW_EVENTS];
unsigned long active_irqs;
- struct pmu_port_event_ranges *port_ranges;
+ const struct cci_pmu_model *model;
struct cci_pmu_hw_events hw_events;
struct platform_device *plat_device;
int num_events;
@@ -196,52 +130,63 @@ static struct cci_pmu *pmu;
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
-static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
-{
- int i;
-
- for (i = 0; i < nr_irqs; i++)
- if (irq == irqs[i])
- return true;
-
- return false;
-}
+/* Port ids */
+#define CCI_PORT_S0 0
+#define CCI_PORT_S1 1
+#define CCI_PORT_S2 2
+#define CCI_PORT_S3 3
+#define CCI_PORT_S4 4
+#define CCI_PORT_M0 5
+#define CCI_PORT_M1 6
+#define CCI_PORT_M2 7
-static int probe_cci_revision(void)
-{
- int rev;
- rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
- rev >>= CCI_PID2_REV_SHIFT;
+#define CCI_REV_R0 0
+#define CCI_REV_R1 1
+#define CCI_REV_R1_PX 5
- if (rev < CCI_REV_R1_PX)
- return CCI_REV_R0;
- else
- return CCI_REV_R1;
-}
+/*
+ * Instead of an event id to monitor CCI cycles, a dedicated counter is
+ * provided. Use 0xff to represent CCI cycles and hope that no future revisions
+ * make use of this event in hardware.
+ */
+enum cci400_perf_events {
+ CCI_PMU_CYCLES = 0xff
+};
-static struct pmu_port_event_ranges *port_range_by_rev(void)
-{
- int rev = probe_cci_revision();
+#define CCI_PMU_CYCLE_CNTR_IDX 0
+#define CCI_PMU_CNTR0_IDX 1
+#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
- return &port_event_range[rev];
-}
+/*
+ * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
+ * ports and bits 4:0 are event codes. There are different event codes
+ * associated with each port type.
+ *
+ * Additionally, the range of events associated with the port types changed
+ * between Rev0 and Rev1.
+ *
+ * The constants below define the range of valid codes for each port type for
+ * the different revisions and are used to validate the event to be monitored.
+ */
-static int pmu_is_valid_slave_event(u8 ev_code)
-{
- return pmu->port_ranges->slave_min <= ev_code &&
- ev_code <= pmu->port_ranges->slave_max;
-}
+#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
+#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
+#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
+#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
-static int pmu_is_valid_master_event(u8 ev_code)
-{
- return pmu->port_ranges->master_min <= ev_code &&
- ev_code <= pmu->port_ranges->master_max;
-}
+#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
+#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
+#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
+#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
-static int pmu_validate_hw_event(u8 hw_event)
+static int pmu_validate_hw_event(unsigned long hw_event)
{
u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
+ int if_type;
+
+ if (hw_event & ~CCI_PMU_EVENT_MASK)
+ return -ENOENT;
switch (ev_source) {
case CCI_PORT_S0:
@@ -250,21 +195,44 @@ static int pmu_validate_hw_event(u8 hw_event)
case CCI_PORT_S3:
case CCI_PORT_S4:
/* Slave Interface */
- if (pmu_is_valid_slave_event(ev_code))
- return hw_event;
+ if_type = CCI_IF_SLAVE;
break;
case CCI_PORT_M0:
case CCI_PORT_M1:
case CCI_PORT_M2:
/* Master Interface */
- if (pmu_is_valid_master_event(ev_code))
- return hw_event;
+ if_type = CCI_IF_MASTER;
break;
+ default:
+ return -ENOENT;
}
+ if (ev_code >= pmu->model->event_ranges[if_type].min &&
+ ev_code <= pmu->model->event_ranges[if_type].max)
+ return hw_event;
+
return -ENOENT;
}
+static int probe_cci_revision(void)
+{
+ int rev;
+ rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
+ rev >>= CCI_PID2_REV_SHIFT;
+
+ if (rev < CCI_REV_R1_PX)
+ return CCI_REV_R0;
+ else
+ return CCI_REV_R1;
+}
+
+static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
+{
+ if (platform_has_secure_cci_access())
+ return &cci_pmu_models[probe_cci_revision()];
+ return NULL;
+}
+
static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
{
return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
@@ -293,7 +261,6 @@ static void pmu_enable_counter(int idx)
static void pmu_set_event(int idx, unsigned long event)
{
- event &= CCI_PMU_EVENT_MASK;
pmu_write_register(event, idx, CCI_PMU_EVT_SEL);
}
@@ -310,7 +277,7 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
struct hw_perf_event *hw_event = &event->hw;
- unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK;
+ unsigned long cci_event = hw_event->config_base;
int idx;
if (cci_event == CCI_PMU_CYCLES) {
@@ -331,7 +298,7 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev
static int pmu_map_event(struct perf_event *event)
{
int mapping;
- u8 config = event->attr.config & CCI_PMU_EVENT_MASK;
+ unsigned long config = event->attr.config;
if (event->attr.type < PERF_TYPE_MAX)
return -ENOENT;
@@ -660,12 +627,21 @@ static void cci_pmu_del(struct perf_event *event, int flags)
}
static int
-validate_event(struct cci_pmu_hw_events *hw_events,
- struct perf_event *event)
+validate_event(struct pmu *cci_pmu,
+ struct cci_pmu_hw_events *hw_events,
+ struct perf_event *event)
{
if (is_software_event(event))
return 1;
+ /*
+ * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+ * core perf code won't check that the pmu->ctx == leader->ctx
+ * until after pmu->event_init(event).
+ */
+ if (event->pmu != cci_pmu)
+ return 0;
+
if (event->state < PERF_EVENT_STATE_OFF)
return 1;
@@ -684,18 +660,18 @@ validate_group(struct perf_event *event)
* Initialise the fake PMU. We only need to populate the
* used_mask for the purposes of validation.
*/
- .used_mask = CPU_BITS_NONE,
+ .used_mask = { 0 },
};
- if (!validate_event(&fake_pmu, leader))
+ if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
- if (!validate_event(&fake_pmu, sibling))
+ if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
- if (!validate_event(&fake_pmu, event))
+ if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;
@@ -831,9 +807,9 @@ static const struct attribute_group *pmu_attr_groups[] = {
static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
{
- char *name = pmu_names[probe_cci_revision()];
+ char *name = cci_pmu->model->name;
cci_pmu->pmu = (struct pmu) {
- .name = pmu_names[probe_cci_revision()],
+ .name = cci_pmu->model->name,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = cci_pmu_enable,
.pmu_disable = cci_pmu_disable,
@@ -886,22 +862,93 @@ static struct notifier_block cci_pmu_cpu_nb = {
.priority = CPU_PRI_PERF + 1,
};
+static struct cci_pmu_model cci_pmu_models[] = {
+ [CCI_REV_R0] = {
+ .name = "CCI_400",
+ .event_ranges = {
+ [CCI_IF_SLAVE] = {
+ CCI_REV_R0_SLAVE_PORT_MIN_EV,
+ CCI_REV_R0_SLAVE_PORT_MAX_EV,
+ },
+ [CCI_IF_MASTER] = {
+ CCI_REV_R0_MASTER_PORT_MIN_EV,
+ CCI_REV_R0_MASTER_PORT_MAX_EV,
+ },
+ },
+ },
+ [CCI_REV_R1] = {
+ .name = "CCI_400_r1",
+ .event_ranges = {
+ [CCI_IF_SLAVE] = {
+ CCI_REV_R1_SLAVE_PORT_MIN_EV,
+ CCI_REV_R1_SLAVE_PORT_MAX_EV,
+ },
+ [CCI_IF_MASTER] = {
+ CCI_REV_R1_MASTER_PORT_MIN_EV,
+ CCI_REV_R1_MASTER_PORT_MAX_EV,
+ },
+ },
+ },
+};
+
static const struct of_device_id arm_cci_pmu_matches[] = {
{
.compatible = "arm,cci-400-pmu",
+ .data = NULL,
+ },
+ {
+ .compatible = "arm,cci-400-pmu,r0",
+ .data = &cci_pmu_models[CCI_REV_R0],
+ },
+ {
+ .compatible = "arm,cci-400-pmu,r1",
+ .data = &cci_pmu_models[CCI_REV_R1],
},
{},
};
+static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev)
+{
+ const struct of_device_id *match = of_match_node(arm_cci_pmu_matches,
+ pdev->dev.of_node);
+ if (!match)
+ return NULL;
+ if (match->data)
+ return match->data;
+
+ dev_warn(&pdev->dev, "DEPRECATED compatible property,"
+ "requires secure access to CCI registers");
+ return probe_cci_model(pdev);
+}
+
+static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++)
+ if (irq == irqs[i])
+ return true;
+
+ return false;
+}
+
static int cci_pmu_probe(struct platform_device *pdev)
{
struct resource *res;
int i, ret, irq;
+ const struct cci_pmu_model *model;
+
+ model = get_cci_model(pdev);
+ if (!model) {
+ dev_warn(&pdev->dev, "CCI PMU version not supported\n");
+ return -ENODEV;
+ }
pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
return -ENOMEM;
+ pmu->model = model;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pmu->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pmu->base))
@@ -933,12 +980,6 @@ static int cci_pmu_probe(struct platform_device *pdev)
return -EINVAL;
}
- pmu->port_ranges = port_range_by_rev();
- if (!pmu->port_ranges) {
- dev_warn(&pdev->dev, "CCI PMU version not supported\n");
- return -EINVAL;
- }
-
raw_spin_lock_init(&pmu->hw_events.pmu_lock);
mutex_init(&pmu->reserve_mutex);
atomic_set(&pmu->active_events, 0);
@@ -952,6 +993,7 @@ static int cci_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
+ pr_info("ARM %s PMU driver probed", pmu->model->name);
return 0;
}
@@ -963,7 +1005,66 @@ static int cci_platform_probe(struct platform_device *pdev)
return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
}
-#endif /* CONFIG_HW_PERF_EVENTS */
+static struct platform_driver cci_pmu_driver = {
+ .driver = {
+ .name = DRIVER_NAME_PMU,
+ .of_match_table = arm_cci_pmu_matches,
+ },
+ .probe = cci_pmu_probe,
+};
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_platform_probe,
+};
+
+static int __init cci_platform_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&cci_pmu_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&cci_platform_driver);
+}
+
+#else /* !CONFIG_ARM_CCI400_PMU */
+
+static int __init cci_platform_init(void)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ARM_CCI400_PMU */
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+
+#define CCI_PORT_CTRL 0x0
+#define CCI_CTRL_STATUS 0xc
+
+#define CCI_ENABLE_SNOOP_REQ 0x1
+#define CCI_ENABLE_DVM_REQ 0x2
+#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
+
+enum cci_ace_port_type {
+ ACE_INVALID_PORT = 0x0,
+ ACE_PORT,
+ ACE_LITE_PORT,
+};
+
+struct cci_ace_port {
+ void __iomem *base;
+ unsigned long phys;
+ enum cci_ace_port_type type;
+ struct device_node *dn;
+};
+
+static struct cci_ace_port *ports;
+static unsigned int nb_cci_ports;
struct cpu_port {
u64 mpidr;
@@ -1284,36 +1385,20 @@ int notrace __cci_control_port_by_index(u32 port, bool enable)
}
EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
-static const struct cci_nb_ports cci400_ports = {
- .nb_ace = 2,
- .nb_ace_lite = 3
-};
-
-static const struct of_device_id arm_cci_matches[] = {
- {.compatible = "arm,cci-400", .data = &cci400_ports },
- {},
-};
-
static const struct of_device_id arm_cci_ctrl_if_matches[] = {
{.compatible = "arm,cci-400-ctrl-if", },
{},
};
-static int cci_probe(void)
+static int cci_probe_ports(struct device_node *np)
{
struct cci_nb_ports const *cci_config;
int ret, i, nb_ace = 0, nb_ace_lite = 0;
- struct device_node *np, *cp;
+ struct device_node *cp;
struct resource res;
const char *match_str;
bool is_ace;
- np = of_find_matching_node(NULL, arm_cci_matches);
- if (!np)
- return -ENODEV;
-
- if (!of_device_is_available(np))
- return -ENODEV;
cci_config = of_match_node(arm_cci_matches, np)->data;
if (!cci_config)
@@ -1325,17 +1410,6 @@ static int cci_probe(void)
if (!ports)
return -ENOMEM;
- ret = of_address_to_resource(np, 0, &res);
- if (!ret) {
- cci_ctrl_base = ioremap(res.start, resource_size(&res));
- cci_ctrl_phys = res.start;
- }
- if (ret || !cci_ctrl_base) {
- WARN(1, "unable to ioremap CCI ctrl\n");
- ret = -ENXIO;
- goto memalloc_err;
- }
-
for_each_child_of_node(np, cp) {
if (!of_match_node(arm_cci_ctrl_if_matches, cp))
continue;
@@ -1395,12 +1469,37 @@ static int cci_probe(void)
sync_cache_w(&cpu_port);
__sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
pr_info("ARM CCI driver probed\n");
+
return 0;
+}
+#else /* !CONFIG_ARM_CCI400_PORT_CTRL */
+static inline int cci_probe_ports(struct device_node *np)
+{
+ return 0;
+}
+#endif /* CONFIG_ARM_CCI400_PORT_CTRL */
-memalloc_err:
+static int cci_probe(void)
+{
+ int ret;
+ struct device_node *np;
+ struct resource res;
+
+ np = of_find_matching_node(NULL, arm_cci_matches);
+ if(!np || !of_device_is_available(np))
+ return -ENODEV;
- kfree(ports);
- return ret;
+ ret = of_address_to_resource(np, 0, &res);
+ if (!ret) {
+ cci_ctrl_base = ioremap(res.start, resource_size(&res));
+ cci_ctrl_phys = res.start;
+ }
+ if (ret || !cci_ctrl_base) {
+ WARN(1, "unable to ioremap CCI ctrl\n");
+ return -ENXIO;
+ }
+
+ return cci_probe_ports(np);
}
static int cci_init_status = -EAGAIN;
@@ -1418,42 +1517,6 @@ static int cci_init(void)
return cci_init_status;
}
-#ifdef CONFIG_HW_PERF_EVENTS
-static struct platform_driver cci_pmu_driver = {
- .driver = {
- .name = DRIVER_NAME_PMU,
- .of_match_table = arm_cci_pmu_matches,
- },
- .probe = cci_pmu_probe,
-};
-
-static struct platform_driver cci_platform_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = arm_cci_matches,
- },
- .probe = cci_platform_probe,
-};
-
-static int __init cci_platform_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&cci_pmu_driver);
- if (ret)
- return ret;
-
- return platform_driver_register(&cci_platform_driver);
-}
-
-#else
-
-static int __init cci_platform_init(void)
-{
- return 0;
-}
-
-#endif
/*
* To sort out early init calls ordering a helper function is provided to
* check if the CCI driver has beed initialized. Function check if the driver
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 0958b6981773..e98d15eaa799 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -142,7 +142,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
&pdev->dev);
const struct imx_weim_devtype *devtype = of_id->data;
struct device_node *child;
- int ret;
+ int ret, have_child = 0;
if (devtype == &imx50_weim_devtype) {
ret = imx_weim_gpr_setup(pdev);
@@ -155,14 +155,15 @@ static int __init weim_parse_dt(struct platform_device *pdev,
continue;
ret = weim_timing_setup(child, base, devtype);
- if (ret) {
- dev_err(&pdev->dev, "%s set timing failed.\n",
+ if (ret)
+ dev_warn(&pdev->dev, "%s set timing failed.\n",
child->full_name);
- return ret;
- }
+ else
+ have_child = 1;
}
- ret = of_platform_populate(pdev->dev.of_node,
+ if (have_child)
+ ret = of_platform_populate(pdev->dev.of_node,
of_default_bus_match_table,
NULL, &pdev->dev);
if (ret)
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
new file mode 100644
index 000000000000..5bd792c68f9b
--- /dev/null
+++ b/drivers/bus/mips_cdmm.c
@@ -0,0 +1,716 @@
+/*
+ * Bus driver for MIPS Common Device Memory Map (CDMM).
+ *
+ * Copyright (C) 2014-2015 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <asm/cdmm.h>
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+
+/* Access control and status register fields */
+#define CDMM_ACSR_DEVTYPE_SHIFT 24
+#define CDMM_ACSR_DEVTYPE (255ul << CDMM_ACSR_DEVTYPE_SHIFT)
+#define CDMM_ACSR_DEVSIZE_SHIFT 16
+#define CDMM_ACSR_DEVSIZE (31ul << CDMM_ACSR_DEVSIZE_SHIFT)
+#define CDMM_ACSR_DEVREV_SHIFT 12
+#define CDMM_ACSR_DEVREV (15ul << CDMM_ACSR_DEVREV_SHIFT)
+#define CDMM_ACSR_UW (1ul << 3)
+#define CDMM_ACSR_UR (1ul << 2)
+#define CDMM_ACSR_SW (1ul << 1)
+#define CDMM_ACSR_SR (1ul << 0)
+
+/* Each block of device registers is 64 bytes */
+#define CDMM_DRB_SIZE 64
+
+#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv)
+
+/* Default physical base address */
+static phys_addr_t mips_cdmm_default_base;
+
+/* Bus operations */
+
+static const struct mips_cdmm_device_id *
+mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
+ struct mips_cdmm_device *dev)
+{
+ int ret = 0;
+
+ for (; table->type; ++table) {
+ ret = (dev->type == table->type);
+ if (ret)
+ break;
+ }
+
+ return ret ? table : NULL;
+}
+
+static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
+
+ return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
+}
+
+static int mips_cdmm_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ int retval = 0;
+
+ retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_TYPE=0x%02x", cdev->type);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_REV=%u", cdev->rev);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "MODALIAS=mipscdmm:t%02X", cdev->type);
+ return retval;
+}
+
+/* Device attributes */
+
+#define CDMM_ATTR(name, fmt, arg...) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct mips_cdmm_device *dev = to_mips_cdmm_device(_dev); \
+ return sprintf(buf, fmt, arg); \
+} \
+static DEVICE_ATTR_RO(name);
+
+CDMM_ATTR(cpu, "%u\n", dev->cpu);
+CDMM_ATTR(type, "0x%02x\n", dev->type);
+CDMM_ATTR(revision, "%u\n", dev->rev);
+CDMM_ATTR(modalias, "mipscdmm:t%02X\n", dev->type);
+CDMM_ATTR(resource, "\t%016llx\t%016llx\t%016lx\n",
+ (unsigned long long)dev->res.start,
+ (unsigned long long)dev->res.end,
+ dev->res.flags);
+
+static struct attribute *mips_cdmm_dev_attrs[] = {
+ &dev_attr_cpu.attr,
+ &dev_attr_type.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_resource.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mips_cdmm_dev);
+
+struct bus_type mips_cdmm_bustype = {
+ .name = "cdmm",
+ .dev_groups = mips_cdmm_dev_groups,
+ .match = mips_cdmm_match,
+ .uevent = mips_cdmm_uevent,
+};
+EXPORT_SYMBOL_GPL(mips_cdmm_bustype);
+
+/*
+ * Standard driver callback helpers.
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the standard driver callbacks we need a work function
+ * (mips_cdmm_{void,int}_work()) to do the actual call from the right CPU, and a
+ * wrapper function (generated with BUILD_PERCPU_HELPER) to arrange for the work
+ * function to be called on that CPU.
+ */
+
+/**
+ * struct mips_cdmm_work_dev - Data for per-device call work.
+ * @fn: CDMM driver callback function to call for the device.
+ * @dev: CDMM device to pass to @fn.
+ */
+struct mips_cdmm_work_dev {
+ void *fn;
+ struct mips_cdmm_device *dev;
+};
+
+/**
+ * mips_cdmm_void_work() - Call a void returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which doesn't return a value.
+ */
+static long mips_cdmm_void_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ void (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ fn(work->dev);
+ return 0;
+}
+
+/**
+ * mips_cdmm_int_work() - Call an int returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which returns an int.
+ */
+static long mips_cdmm_int_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ int (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ return fn(work->dev);
+}
+
+#define _BUILD_RET_void
+#define _BUILD_RET_int return
+
+/**
+ * BUILD_PERCPU_HELPER() - Helper to call a CDMM driver callback on right CPU.
+ * @_ret: Return type (void or int).
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a specific device callback function to call a CDMM driver callback
+ * function on the appropriate CPU for the device, and if applicable return the
+ * result.
+ */
+#define BUILD_PERCPU_HELPER(_ret, _name) \
+static _ret mips_cdmm_##_name(struct device *dev) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(dev->driver); \
+ struct mips_cdmm_work_dev work = { \
+ .fn = cdrv->_name, \
+ .dev = cdev, \
+ }; \
+ \
+ _BUILD_RET_##_ret work_on_cpu(cdev->cpu, \
+ mips_cdmm_##_ret##_work, &work); \
+}
+
+/* Driver callback functions */
+BUILD_PERCPU_HELPER(int, probe) /* int mips_cdmm_probe(struct device) */
+BUILD_PERCPU_HELPER(int, remove) /* int mips_cdmm_remove(struct device) */
+BUILD_PERCPU_HELPER(void, shutdown) /* void mips_cdmm_shutdown(struct device) */
+
+
+/* Driver registration */
+
+/**
+ * mips_cdmm_driver_register() - Register a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Register a CDMM driver with the CDMM subsystem. The driver will be informed
+ * of matching devices which are discovered.
+ *
+ * Returns: 0 on success.
+ */
+int mips_cdmm_driver_register(struct mips_cdmm_driver *drv)
+{
+ drv->drv.bus = &mips_cdmm_bustype;
+
+ if (drv->probe)
+ drv->drv.probe = mips_cdmm_probe;
+ if (drv->remove)
+ drv->drv.remove = mips_cdmm_remove;
+ if (drv->shutdown)
+ drv->drv.shutdown = mips_cdmm_shutdown;
+
+ return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_register);
+
+/**
+ * mips_cdmm_driver_unregister() - Unregister a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Unregister a CDMM driver from the CDMM subsystem.
+ */
+void mips_cdmm_driver_unregister(struct mips_cdmm_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_unregister);
+
+
+/* CDMM initialisation and bus discovery */
+
+/**
+ * struct mips_cdmm_bus - Info about CDMM bus.
+ * @phys: Physical address at which it is mapped.
+ * @regs: Virtual address where registers can be accessed.
+ * @drbs: Total number of DRBs.
+ * @drbs_reserved: Number of DRBs reserved.
+ * @discovered: Whether the devices on the bus have been discovered yet.
+ * @offline: Whether the CDMM bus is going offline (or very early
+ * coming back online), in which case it should be
+ * reconfigured each time.
+ */
+struct mips_cdmm_bus {
+ phys_addr_t phys;
+ void __iomem *regs;
+ unsigned int drbs;
+ unsigned int drbs_reserved;
+ bool discovered;
+ bool offline;
+};
+
+static struct mips_cdmm_bus mips_cdmm_boot_bus;
+static DEFINE_PER_CPU(struct mips_cdmm_bus *, mips_cdmm_buses);
+static atomic_t mips_cdmm_next_id = ATOMIC_INIT(-1);
+
+/**
+ * mips_cdmm_get_bus() - Get the per-CPU CDMM bus information.
+ *
+ * Get information about the per-CPU CDMM bus, if the bus is present.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: Pointer to CDMM bus information for the current CPU.
+ * May return ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+static struct mips_cdmm_bus *mips_cdmm_get_bus(void)
+{
+ struct mips_cdmm_bus *bus, **bus_p;
+ unsigned long flags;
+ unsigned int cpu;
+
+ if (!cpu_has_cdmm)
+ return ERR_PTR(-ENODEV);
+
+ cpu = smp_processor_id();
+ /* Avoid early use of per-cpu primitives before initialised */
+ if (cpu == 0)
+ return &mips_cdmm_boot_bus;
+
+ /* Get bus pointer */
+ bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu);
+ local_irq_save(flags);
+ bus = *bus_p;
+ /* Attempt allocation if NULL */
+ if (unlikely(!bus)) {
+ bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
+ if (unlikely(!bus))
+ bus = ERR_PTR(-ENOMEM);
+ else
+ *bus_p = bus;
+ }
+ local_irq_restore(flags);
+ return bus;
+}
+
+/**
+ * mips_cdmm_cur_base() - Find current physical base address of CDMM region.
+ *
+ * Returns: Physical base address of CDMM region according to cdmmbase CP0
+ * register, or 0 if the CDMM region is disabled.
+ */
+static phys_addr_t mips_cdmm_cur_base(void)
+{
+ unsigned long cdmmbase = read_c0_cdmmbase();
+
+ if (!(cdmmbase & MIPS_CDMMBASE_EN))
+ return 0;
+
+ return (cdmmbase >> MIPS_CDMMBASE_ADDR_SHIFT)
+ << MIPS_CDMMBASE_ADDR_START;
+}
+
+/**
+ * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
+ * @bus: Pointer to bus information for current CPU.
+ * IS_ERR(bus) is checked, so no need for caller to check.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+static int mips_cdmm_setup(struct mips_cdmm_bus *bus)
+{
+ unsigned long cdmmbase, flags;
+ int ret = 0;
+
+ if (IS_ERR(bus))
+ return PTR_ERR(bus);
+
+ local_irq_save(flags);
+ /* Don't set up bus a second time unless marked offline */
+ if (bus->offline) {
+ /* If CDMM region is still set up, nothing to do */
+ if (bus->phys == mips_cdmm_cur_base())
+ goto out;
+ /*
+ * The CDMM region isn't set up as expected, so it needs
+ * reconfiguring, but then we can stop checking it.
+ */
+ bus->offline = false;
+ } else if (bus->phys > 1) {
+ goto out;
+ }
+
+ /* If the CDMM region is already configured, inherit that setup */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_cur_base();
+ /* Otherwise, ask platform code for suggestions */
+ if (!bus->phys && mips_cdmm_phys_base)
+ bus->phys = mips_cdmm_phys_base();
+ /* Otherwise, copy what other CPUs have done */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_default_base;
+ /* Otherwise, complain once */
+ if (!bus->phys) {
+ bus->phys = 1;
+ /*
+ * If you hit this, either your bootloader needs to set up the
+ * CDMM on the boot CPU, or else you need to implement
+ * mips_cdmm_phys_base() for your platform (see asm/cdmm.h).
+ */
+ pr_err("cdmm%u: Failed to choose a physical base\n",
+ smp_processor_id());
+ }
+ /* Already complained? */
+ if (bus->phys == 1) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Record our success for other CPUs to copy */
+ mips_cdmm_default_base = bus->phys;
+
+ pr_debug("cdmm%u: Enabling CDMM region at %pa\n",
+ smp_processor_id(), &bus->phys);
+
+ /* Enable CDMM */
+ cdmmbase = read_c0_cdmmbase();
+ cdmmbase &= (1ul << MIPS_CDMMBASE_ADDR_SHIFT) - 1;
+ cdmmbase |= (bus->phys >> MIPS_CDMMBASE_ADDR_START)
+ << MIPS_CDMMBASE_ADDR_SHIFT;
+ cdmmbase |= MIPS_CDMMBASE_EN;
+ write_c0_cdmmbase(cdmmbase);
+ tlbw_use_hazard();
+
+ bus->regs = (void __iomem *)CKSEG1ADDR(bus->phys);
+ bus->drbs = 1 + ((cdmmbase & MIPS_CDMMBASE_SIZE) >>
+ MIPS_CDMMBASE_SIZE_SHIFT);
+ bus->drbs_reserved = !!(cdmmbase & MIPS_CDMMBASE_CI);
+
+out:
+ local_irq_restore(flags);
+ return ret;
+}
+
+/**
+ * mips_cdmm_early_probe() - Minimally probe for a specific device on CDMM.
+ * @dev_type: CDMM type code to look for.
+ *
+ * Minimally configure the in-CPU Common Device Memory Map (CDMM) and look for a
+ * specific device. This can be used to find a device very early in boot for
+ * example to configure an early FDC console device.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: MMIO pointer to device memory. The caller can read the ACSR
+ * register to find more information about the device (such as the
+ * version number or the number of blocks).
+ * May return IOMEM_ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
+{
+ struct mips_cdmm_bus *bus;
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size;
+ int err;
+
+ if (WARN_ON(!dev_type))
+ return IOMEM_ERR_PTR(-ENODEV);
+
+ bus = mips_cdmm_get_bus();
+ err = mips_cdmm_setup(bus);
+ if (err)
+ return IOMEM_ERR_PTR(err);
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Look for a specific device type */
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ if (type == dev_type)
+ return cdmm + drb * CDMM_DRB_SIZE;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ }
+
+ return IOMEM_ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_early_probe);
+
+/**
+ * mips_cdmm_release() - Release a removed CDMM device.
+ * @dev: Device object
+ *
+ * Clean up the struct mips_cdmm_device for an unused CDMM device. This is
+ * called automatically by the driver core when a device is removed.
+ */
+static void mips_cdmm_release(struct device *dev)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+
+ kfree(cdev);
+}
+
+/**
+ * mips_cdmm_bus_discover() - Discover the devices on the CDMM bus.
+ * @bus: CDMM bus information, must already be set up.
+ */
+static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
+{
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size, rev;
+ struct mips_cdmm_device *dev;
+ unsigned int cpu = smp_processor_id();
+ int ret = 0;
+ int id = 0;
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Discover devices */
+ bus->discovered = true;
+ pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT;
+
+ if (!type)
+ continue;
+
+ pr_info("cdmm%u-%u: @%u (%#x..%#x), type 0x%02x, rev %u\n",
+ cpu, id, drb, drb * CDMM_DRB_SIZE,
+ (drb + size + 1) * CDMM_DRB_SIZE - 1,
+ type, rev);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ break;
+
+ dev->cpu = cpu;
+ dev->res.start = bus->phys + drb * CDMM_DRB_SIZE;
+ dev->res.end = bus->phys +
+ (drb + size + 1) * CDMM_DRB_SIZE - 1;
+ dev->res.flags = IORESOURCE_MEM;
+ dev->type = type;
+ dev->rev = rev;
+ dev->dev.parent = get_cpu_device(cpu);
+ dev->dev.bus = &mips_cdmm_bustype;
+ dev->dev.id = atomic_inc_return(&mips_cdmm_next_id);
+ dev->dev.release = mips_cdmm_release;
+
+ dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
+ ++id;
+ ret = device_register(&dev->dev);
+ if (ret) {
+ put_device(&dev->dev);
+ kfree(dev);
+ }
+ }
+}
+
+
+/*
+ * CPU hotplug and initialisation
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the CPU callbacks, they need to be called for all devices on
+ * that CPU, so the work function calls bus_for_each_dev, using a helper
+ * (generated with BUILD_PERDEV_HELPER) to call the driver callback if the
+ * device's CPU matches.
+ */
+
+/**
+ * BUILD_PERDEV_HELPER() - Helper to call a CDMM driver callback if CPU matches.
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a bus_for_each_dev callback function to call a specific CDMM driver
+ * callback function for the device if the device's CPU matches that pointed to
+ * by the data argument.
+ *
+ * This is used for informing drivers for all devices on a given CPU of some
+ * event (such as the CPU going online/offline).
+ *
+ * It is expected to already be called from the appropriate CPU.
+ */
+#define BUILD_PERDEV_HELPER(_name) \
+static int mips_cdmm_##_name##_helper(struct device *dev, void *data) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv; \
+ unsigned int cpu = *(unsigned int *)data; \
+ \
+ if (cdev->cpu != cpu || !dev->driver) \
+ return 0; \
+ \
+ cdrv = to_mips_cdmm_driver(dev->driver); \
+ if (!cdrv->_name) \
+ return 0; \
+ return cdrv->_name(cdev); \
+}
+
+/* bus_for_each_dev callback helper functions */
+BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */
+BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
+
+/**
+ * mips_cdmm_bus_down() - Tear down the CDMM bus.
+ * @data: Pointer to unsigned int CPU number.
+ *
+ * This work_on_cpu callback function is executed on a given CPU to call the
+ * CDMM driver cpu_down callback for all devices on that CPU.
+ */
+static long mips_cdmm_bus_down(void *data)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+ mips_cdmm_cpu_down_helper);
+
+ /*
+ * While bus is offline, each use of it should reconfigure it just in
+ * case it is first use when coming back online again.
+ */
+ bus = mips_cdmm_get_bus();
+ if (!IS_ERR(bus))
+ bus->offline = true;
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_bus_up() - Bring up the CDMM bus.
+ * @data: Pointer to unsigned int CPU number.
+ *
+ * This work_on_cpu callback function is executed on a given CPU to discover
+ * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
+ * devices already discovered on that CPU.
+ *
+ * It is used during initialisation and when CPUs are brought online.
+ */
+static long mips_cdmm_bus_up(void *data)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ bus = mips_cdmm_get_bus();
+ ret = mips_cdmm_setup(bus);
+ if (ret)
+ return ret;
+
+ /* Bus now set up, so we can drop the offline flag if still set */
+ bus->offline = false;
+
+ if (!bus->discovered)
+ mips_cdmm_bus_discover(bus);
+ else
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+ mips_cdmm_cpu_up_helper);
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_cpu_notify() - Take action when a CPU is going online or offline.
+ * @nb: CPU notifier block .
+ * @action: Event that has taken place (CPU_*).
+ * @data: CPU number.
+ *
+ * This notifier is used to keep the CDMM buses updated as CPUs are offlined and
+ * onlined. When CPUs go offline or come back online, so does their CDMM bus, so
+ * devices must be informed. Also when CPUs come online for the first time the
+ * devices on the CDMM bus need discovering.
+ *
+ * Returns: NOTIFY_OK if event was used.
+ * NOTIFY_DONE if we didn't care.
+ */
+static int mips_cdmm_cpu_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ unsigned int cpu = (unsigned int)data;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ work_on_cpu(cpu, mips_cdmm_bus_down, &cpu);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mips_cdmm_cpu_nb = {
+ .notifier_call = mips_cdmm_cpu_notify,
+};
+
+/**
+ * mips_cdmm_init() - Initialise CDMM bus.
+ *
+ * Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for
+ * hotplug notifications so the CDMM drivers can be kept up to date.
+ */
+static int __init mips_cdmm_init(void)
+{
+ unsigned int cpu;
+ int ret;
+
+ /* Register the bus */
+ ret = bus_register(&mips_cdmm_bustype);
+ if (ret)
+ return ret;
+
+ /* We want to be notified about new CPUs */
+ ret = register_cpu_notifier(&mips_cdmm_cpu_nb);
+ if (ret) {
+ pr_warn("cdmm: Failed to register CPU notifier\n");
+ goto out;
+ }
+
+ /* Discover devices on CDMM of online CPUs */
+ for_each_online_cpu(cpu)
+ work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
+
+ return 0;
+out:
+ bus_unregister(&mips_cdmm_bustype);
+ return ret;
+}
+subsys_initcall(mips_cdmm_init);
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 723ec06ad2c8..9f1856948758 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -16,6 +16,7 @@
*
*/
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -23,6 +24,9 @@
#include <linux/of.h>
#include <linux/of_platform.h>
+#define OCP2SCP_TIMING 0x18
+#define SYNC2_MASK 0xf
+
static int ocp2scp_remove_devices(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -35,6 +39,9 @@ static int ocp2scp_remove_devices(struct device *dev, void *c)
static int omap_ocp2scp_probe(struct platform_device *pdev)
{
int ret;
+ u32 reg;
+ void __iomem *regs;
+ struct resource *res;
struct device_node *np = pdev->dev.of_node;
if (np) {
@@ -47,6 +54,32 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
+ /*
+ * As per AM572x TRM: http://www.ti.com/lit/ug/spruhz6/spruhz6.pdf
+ * under section 26.3.2.2, table 26-26 OCP2SCP TIMING Caution;
+ * As per OMAP4430 TRM: http://www.ti.com/lit/ug/swpu231ap/swpu231ap.pdf
+ * under section 23.12.6.2.2 , Table 23-1213 OCP2SCP TIMING Caution;
+ * As per OMAP4460 TRM: http://www.ti.com/lit/ug/swpu235ab/swpu235ab.pdf
+ * under section 23.12.6.2.2, Table 23-1213 OCP2SCP TIMING Caution;
+ * As per OMAP543x TRM http://www.ti.com/lit/pdf/swpu249
+ * under section 27.3.2.2, Table 27-27 OCP2SCP TIMING Caution;
+ *
+ * Read path of OCP2SCP is not working properly due to low reset value
+ * of SYNC2 parameter in OCP2SCP. Suggested reset value is 0x6 or more.
+ */
+ if (!of_device_is_compatible(np, "ti,am437x-ocp2scp")) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ goto err0;
+
+ pm_runtime_get_sync(&pdev->dev);
+ reg = readl_relaxed(regs + OCP2SCP_TIMING);
+ reg &= ~(SYNC2_MASK);
+ reg |= 0x6;
+ writel_relaxed(reg, regs + OCP2SCP_TIMING);
+ pm_runtime_put_sync(&pdev->dev);
+ }
return 0;
@@ -67,6 +100,7 @@ static int omap_ocp2scp_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id omap_ocp2scp_id_table[] = {
{ .compatible = "ti,omap-ocp2scp" },
+ { .compatible = "ti,am437x-ocp2scp" },
{}
};
MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index 11f7982cbdb3..ebee57d715d2 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -1,7 +1,7 @@
/*
* OMAP L3 Interconnect error handling driver
*
- * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Sricharan <r.sricharan@ti.com>
*
@@ -233,7 +233,8 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
}
static const struct of_device_id l3_noc_match[] = {
- {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data},
+ {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
+ {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
{.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
{.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
{},
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
index 95254585db86..73431f81da28 100644
--- a/drivers/bus/omap_l3_noc.h
+++ b/drivers/bus/omap_l3_noc.h
@@ -1,7 +1,7 @@
/*
* OMAP L3 Interconnect error handling driver header
*
- * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* sricharan <r.sricharan@ti.com>
*
@@ -175,16 +175,14 @@ static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
};
-static struct l3_target_data omap_l3_target_data_clk3[] = {
- {0x0100, "EMUSS",},
- {0x0300, "DEBUG SOURCE",},
- {0x0, "HOST CLK3",},
+static struct l3_target_data omap4_l3_target_data_clk3[] = {
+ {0x0100, "DEBUGSS",},
};
-static struct l3_flagmux_data omap_l3_flagmux_clk3 = {
+static struct l3_flagmux_data omap4_l3_flagmux_clk3 = {
.offset = 0x0200,
- .l3_targ = omap_l3_target_data_clk3,
- .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3),
+ .l3_targ = omap4_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3),
};
static struct l3_masters_data omap_l3_masters[] = {
@@ -215,21 +213,49 @@ static struct l3_masters_data omap_l3_masters[] = {
{ 0x32, "USBHOSTFS"}
};
-static struct l3_flagmux_data *omap_l3_flagmux[] = {
+static struct l3_flagmux_data *omap4_l3_flagmux[] = {
&omap_l3_flagmux_clk1,
&omap_l3_flagmux_clk2,
- &omap_l3_flagmux_clk3,
+ &omap4_l3_flagmux_clk3,
};
-static const struct omap_l3 omap_l3_data = {
- .l3_flagmux = omap_l3_flagmux,
- .num_modules = ARRAY_SIZE(omap_l3_flagmux),
+static const struct omap_l3 omap4_l3_data = {
+ .l3_flagmux = omap4_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap4_l3_flagmux),
.l3_masters = omap_l3_masters,
.num_masters = ARRAY_SIZE(omap_l3_masters),
/* The 6 MSBs of register field used to distinguish initiator */
.mst_addr_mask = 0xFC,
};
+/* OMAP5 data */
+static struct l3_target_data omap5_l3_target_data_clk3[] = {
+ {0x0100, "L3INSTR",},
+ {0x0300, "DEBUGSS",},
+ {0x0, "HOSTCLK3",},
+};
+
+static struct l3_flagmux_data omap5_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap5_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3),
+};
+
+static struct l3_flagmux_data *omap5_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap5_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap5_l3_data = {
+ .l3_flagmux = omap5_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap5_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0x7E0,
+};
+
/* DRA7 data */
static struct l3_target_data dra_l3_target_data_clk1[] = {
{0x2a00, "AES1",},
@@ -274,7 +300,7 @@ static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
static struct l3_target_data dra_l3_target_data_clk2[] = {
{0x0, "HOST CLK1",},
- {0x0, "HOST CLK2",},
+ {0x800000, "HOST CLK2",},
{0xdead, L3_TARGET_NOT_SUPPORTED,},
{0x3400, "SHA2_2",},
{0x0900, "BB2D",},
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
new file mode 100644
index 000000000000..c5eb46cbf388
--- /dev/null
+++ b/drivers/bus/simple-pm-bus.c
@@ -0,0 +1,58 @@
+/*
+ * Simple Power-Managed Bus Driver
+ *
+ * Copyright (C) 2014-2015 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+
+static int simple_pm_bus_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ pm_runtime_enable(&pdev->dev);
+
+ if (np)
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+ return 0;
+}
+
+static int simple_pm_bus_remove(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id simple_pm_bus_of_match[] = {
+ { .compatible = "simple-pm-bus", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
+
+static struct platform_driver simple_pm_bus_driver = {
+ .probe = simple_pm_bus_probe,
+ .remove = simple_pm_bus_remove,
+ .driver = {
+ .name = "simple-pm-bus",
+ .of_match_table = simple_pm_bus_of_match,
+ },
+};
+
+module_platform_driver(simple_pm_bus_driver);
+
+MODULE_DESCRIPTION("Simple Power-Managed Bus Driver");
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index de57b38809c7..f48cf11c655e 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -101,6 +101,19 @@ config HW_RANDOM_BCM2835
If unsure, say Y.
+config HW_RANDOM_IPROC_RNG200
+ tristate "Broadcom iProc RNG200 support"
+ depends on ARCH_BCM_IPROC
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the RNG200
+ hardware found on the Broadcom iProc SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iproc-rng200
+
+ If unsure, say Y.
+
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
depends on X86_32 && PCI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 0b4cd57f4e24..055bb01510ad 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -28,5 +28,6 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index ba6a65ac023b..4b31f1387f37 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -13,24 +13,37 @@
#include <linux/platform_device.h>
#include <linux/hw_random.h>
-#include <bcm63xx_io.h>
-#include <bcm63xx_regs.h>
+#define RNG_CTRL 0x00
+#define RNG_EN (1 << 0)
+
+#define RNG_STAT 0x04
+#define RNG_AVAIL_MASK (0xff000000)
+
+#define RNG_DATA 0x08
+#define RNG_THRES 0x0c
+#define RNG_MASK 0x10
struct bcm63xx_rng_priv {
+ struct hwrng rng;
struct clk *clk;
void __iomem *regs;
};
-#define to_rng_priv(rng) ((struct bcm63xx_rng_priv *)rng->priv)
+#define to_rng_priv(rng) container_of(rng, struct bcm63xx_rng_priv, rng)
static int bcm63xx_rng_init(struct hwrng *rng)
{
struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
u32 val;
+ int error;
+
+ error = clk_prepare_enable(priv->clk);
+ if (error)
+ return error;
- val = bcm_readl(priv->regs + RNG_CTRL);
+ val = __raw_readl(priv->regs + RNG_CTRL);
val |= RNG_EN;
- bcm_writel(val, priv->regs + RNG_CTRL);
+ __raw_writel(val, priv->regs + RNG_CTRL);
return 0;
}
@@ -40,23 +53,25 @@ static void bcm63xx_rng_cleanup(struct hwrng *rng)
struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
u32 val;
- val = bcm_readl(priv->regs + RNG_CTRL);
+ val = __raw_readl(priv->regs + RNG_CTRL);
val &= ~RNG_EN;
- bcm_writel(val, priv->regs + RNG_CTRL);
+ __raw_writel(val, priv->regs + RNG_CTRL);
+
+ clk_disable_unprepare(priv->clk);
}
static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
{
struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- return bcm_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
+ return __raw_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
}
static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
{
struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- *data = bcm_readl(priv->regs + RNG_DATA);
+ *data = __raw_readl(priv->regs + RNG_DATA);
return 4;
}
@@ -72,94 +87,53 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no iomem resource\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "no memory for private structure\n");
- ret = -ENOMEM;
- goto out;
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = bcm63xx_rng_init;
+ priv->rng.cleanup = bcm63xx_rng_cleanup;
+ priv->rng.data_present = bcm63xx_rng_data_present;
+ priv->rng.data_read = bcm63xx_rng_data_read;
+
+ priv->clk = devm_clk_get(&pdev->dev, "ipsec");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "no clock for device: %d\n", ret);
+ return ret;
}
- rng = kzalloc(sizeof(*rng), GFP_KERNEL);
- if (!rng) {
- dev_err(&pdev->dev, "no memory for rng structure\n");
- ret = -ENOMEM;
- goto out_free_priv;
- }
-
- platform_set_drvdata(pdev, rng);
- rng->priv = (unsigned long)priv;
- rng->name = pdev->name;
- rng->init = bcm63xx_rng_init;
- rng->cleanup = bcm63xx_rng_cleanup;
- rng->data_present = bcm63xx_rng_data_present;
- rng->data_read = bcm63xx_rng_data_read;
-
- clk = clk_get(&pdev->dev, "ipsec");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "no clock for device\n");
- ret = PTR_ERR(clk);
- goto out_free_rng;
- }
-
- priv->clk = clk;
-
if (!devm_request_mem_region(&pdev->dev, r->start,
resource_size(r), pdev->name)) {
dev_err(&pdev->dev, "request mem failed");
- ret = -ENOMEM;
- goto out_free_rng;
+ return -EBUSY;
}
priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
resource_size(r));
if (!priv->regs) {
dev_err(&pdev->dev, "ioremap failed");
- ret = -ENOMEM;
- goto out_free_rng;
+ return -ENOMEM;
}
- clk_enable(clk);
-
- ret = hwrng_register(rng);
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
if (ret) {
- dev_err(&pdev->dev, "failed to register rng device\n");
- goto out_clk_disable;
+ dev_err(&pdev->dev, "failed to register rng device: %d\n",
+ ret);
+ return ret;
}
dev_info(&pdev->dev, "registered RNG driver\n");
return 0;
-
-out_clk_disable:
- clk_disable(clk);
-out_free_rng:
- kfree(rng);
-out_free_priv:
- kfree(priv);
-out:
- return ret;
-}
-
-static int bcm63xx_rng_remove(struct platform_device *pdev)
-{
- struct hwrng *rng = platform_get_drvdata(pdev);
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- hwrng_unregister(rng);
- clk_disable(priv->clk);
- kfree(priv);
- kfree(rng);
-
- return 0;
}
static struct platform_driver bcm63xx_rng_driver = {
.probe = bcm63xx_rng_probe,
- .remove = bcm63xx_rng_remove,
.driver = {
.name = "bcm63xx-rng",
},
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 32a8a867f7f8..da8faf78536a 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -179,7 +179,8 @@ skip_init:
add_early_randomness(rng);
current_quality = rng->quality ? : default_quality;
- current_quality &= 1023;
+ if (current_quality > 1024)
+ current_quality = 1024;
if (current_quality == 0 && hwrng_fill)
kthread_stop(hwrng_fill);
@@ -299,11 +300,14 @@ static const struct file_operations rng_chrdev_ops = {
.llseek = noop_llseek,
};
+static const struct attribute_group *rng_dev_groups[];
+
static struct miscdevice rng_miscdev = {
.minor = RNG_MISCDEV_MINOR,
.name = RNG_MODULE_NAME,
.nodename = "hwrng",
.fops = &rng_chrdev_ops,
+ .groups = rng_dev_groups,
};
@@ -376,37 +380,22 @@ static DEVICE_ATTR(rng_available, S_IRUGO,
hwrng_attr_available_show,
NULL);
+static struct attribute *rng_dev_attrs[] = {
+ &dev_attr_rng_current.attr,
+ &dev_attr_rng_available.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(rng_dev);
static void __exit unregister_miscdev(void)
{
- device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
- device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
misc_deregister(&rng_miscdev);
}
static int __init register_miscdev(void)
{
- int err;
-
- err = misc_register(&rng_miscdev);
- if (err)
- goto out;
- err = device_create_file(rng_miscdev.this_device,
- &dev_attr_rng_current);
- if (err)
- goto err_misc_dereg;
- err = device_create_file(rng_miscdev.this_device,
- &dev_attr_rng_available);
- if (err)
- goto err_remove_current;
-out:
- return err;
-
-err_remove_current:
- device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
-err_misc_dereg:
- misc_deregister(&rng_miscdev);
- goto out;
+ return misc_register(&rng_miscdev);
}
static int hwrng_fillfn(void *unused)
@@ -536,6 +525,48 @@ void hwrng_unregister(struct hwrng *rng)
}
EXPORT_SYMBOL_GPL(hwrng_unregister);
+static void devm_hwrng_release(struct device *dev, void *res)
+{
+ hwrng_unregister(*(struct hwrng **)res);
+}
+
+static int devm_hwrng_match(struct device *dev, void *res, void *data)
+{
+ struct hwrng **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+int devm_hwrng_register(struct device *dev, struct hwrng *rng)
+{
+ struct hwrng **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = hwrng_register(rng);
+ if (error) {
+ devres_free(ptr);
+ return error;
+ }
+
+ *ptr = rng;
+ devres_add(dev, ptr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hwrng_register);
+
+void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
+{
+ devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
+}
+EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
+
static int __init hwrng_modinit(void)
{
return register_miscdev();
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index fed0830bf724..dc4701fd814f 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -131,16 +131,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- return hwrng_register(&exynos_rng->rng);
-}
-
-static int exynos_rng_remove(struct platform_device *pdev)
-{
- struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
-
- hwrng_unregister(&exynos_rng->rng);
-
- return 0;
+ return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
}
#ifdef CONFIG_PM
@@ -172,7 +163,6 @@ static struct platform_driver exynos_rng_driver = {
.pm = &exynos_rng_pm_ops,
},
.probe = exynos_rng_probe,
- .remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
new file mode 100644
index 000000000000..3eaf7cb96d36
--- /dev/null
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -0,0 +1,239 @@
+/*
+* Copyright (C) 2015 Broadcom Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation version 2.
+*
+* This program is distributed "as is" WITHOUT ANY WARRANTY of any
+* kind, whether express or implied; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+/*
+ * DESCRIPTION: The Broadcom iProc RNG200 Driver
+ */
+
+#include <linux/hw_random.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+/* Registers */
+#define RNG_CTRL_OFFSET 0x00
+#define RNG_CTRL_RNG_RBGEN_MASK 0x00001FFF
+#define RNG_CTRL_RNG_RBGEN_ENABLE 0x00000001
+#define RNG_CTRL_RNG_RBGEN_DISABLE 0x00000000
+
+#define RNG_SOFT_RESET_OFFSET 0x04
+#define RNG_SOFT_RESET 0x00000001
+
+#define RBG_SOFT_RESET_OFFSET 0x08
+#define RBG_SOFT_RESET 0x00000001
+
+#define RNG_INT_STATUS_OFFSET 0x18
+#define RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK 0x80000000
+#define RNG_INT_STATUS_STARTUP_TRANSITIONS_MET_IRQ_MASK 0x00020000
+#define RNG_INT_STATUS_NIST_FAIL_IRQ_MASK 0x00000020
+#define RNG_INT_STATUS_TOTAL_BITS_COUNT_IRQ_MASK 0x00000001
+
+#define RNG_FIFO_DATA_OFFSET 0x20
+
+#define RNG_FIFO_COUNT_OFFSET 0x24
+#define RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK 0x000000FF
+
+struct iproc_rng200_dev {
+ struct hwrng rng;
+ void __iomem *base;
+};
+
+#define to_rng_priv(rng) container_of(rng, struct iproc_rng200_dev, rng)
+
+static void iproc_rng200_restart(void __iomem *rng_base)
+{
+ uint32_t val;
+
+ /* Disable RBG */
+ val = ioread32(rng_base + RNG_CTRL_OFFSET);
+ val &= ~RNG_CTRL_RNG_RBGEN_MASK;
+ val |= RNG_CTRL_RNG_RBGEN_DISABLE;
+ iowrite32(val, rng_base + RNG_CTRL_OFFSET);
+
+ /* Clear all interrupt status */
+ iowrite32(0xFFFFFFFFUL, rng_base + RNG_INT_STATUS_OFFSET);
+
+ /* Reset RNG and RBG */
+ val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET);
+ val |= RBG_SOFT_RESET;
+ iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET);
+
+ val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET);
+ val |= RNG_SOFT_RESET;
+ iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET);
+
+ val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET);
+ val &= ~RNG_SOFT_RESET;
+ iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET);
+
+ val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET);
+ val &= ~RBG_SOFT_RESET;
+ iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET);
+
+ /* Enable RBG */
+ val = ioread32(rng_base + RNG_CTRL_OFFSET);
+ val &= ~RNG_CTRL_RNG_RBGEN_MASK;
+ val |= RNG_CTRL_RNG_RBGEN_ENABLE;
+ iowrite32(val, rng_base + RNG_CTRL_OFFSET);
+}
+
+static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct iproc_rng200_dev *priv = to_rng_priv(rng);
+ uint32_t num_remaining = max;
+ uint32_t status;
+
+ #define MAX_RESETS_PER_READ 1
+ uint32_t num_resets = 0;
+
+ #define MAX_IDLE_TIME (1 * HZ)
+ unsigned long idle_endtime = jiffies + MAX_IDLE_TIME;
+
+ while ((num_remaining > 0) && time_before(jiffies, idle_endtime)) {
+
+ /* Is RNG sane? If not, reset it. */
+ status = ioread32(priv->base + RNG_INT_STATUS_OFFSET);
+ if ((status & (RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK |
+ RNG_INT_STATUS_NIST_FAIL_IRQ_MASK)) != 0) {
+
+ if (num_resets >= MAX_RESETS_PER_READ)
+ return max - num_remaining;
+
+ iproc_rng200_restart(priv->base);
+ num_resets++;
+ }
+
+ /* Are there any random numbers available? */
+ if ((ioread32(priv->base + RNG_FIFO_COUNT_OFFSET) &
+ RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK) > 0) {
+
+ if (num_remaining >= sizeof(uint32_t)) {
+ /* Buffer has room to store entire word */
+ *(uint32_t *)buf = ioread32(priv->base +
+ RNG_FIFO_DATA_OFFSET);
+ buf += sizeof(uint32_t);
+ num_remaining -= sizeof(uint32_t);
+ } else {
+ /* Buffer can only store partial word */
+ uint32_t rnd_number = ioread32(priv->base +
+ RNG_FIFO_DATA_OFFSET);
+ memcpy(buf, &rnd_number, num_remaining);
+ buf += num_remaining;
+ num_remaining = 0;
+ }
+
+ /* Reset the IDLE timeout */
+ idle_endtime = jiffies + MAX_IDLE_TIME;
+ } else {
+ if (!wait)
+ /* Cannot wait, return immediately */
+ return max - num_remaining;
+
+ /* Can wait, give others chance to run */
+ usleep_range(min(num_remaining * 10, 500U), 500);
+ }
+ }
+
+ return max - num_remaining;
+}
+
+static int iproc_rng200_init(struct hwrng *rng)
+{
+ struct iproc_rng200_dev *priv = to_rng_priv(rng);
+ uint32_t val;
+
+ /* Setup RNG. */
+ val = ioread32(priv->base + RNG_CTRL_OFFSET);
+ val &= ~RNG_CTRL_RNG_RBGEN_MASK;
+ val |= RNG_CTRL_RNG_RBGEN_ENABLE;
+ iowrite32(val, priv->base + RNG_CTRL_OFFSET);
+
+ return 0;
+}
+
+static void iproc_rng200_cleanup(struct hwrng *rng)
+{
+ struct iproc_rng200_dev *priv = to_rng_priv(rng);
+ uint32_t val;
+
+ /* Disable RNG hardware */
+ val = ioread32(priv->base + RNG_CTRL_OFFSET);
+ val &= ~RNG_CTRL_RNG_RBGEN_MASK;
+ val |= RNG_CTRL_RNG_RBGEN_DISABLE;
+ iowrite32(val, priv->base + RNG_CTRL_OFFSET);
+}
+
+static int iproc_rng200_probe(struct platform_device *pdev)
+{
+ struct iproc_rng200_dev *priv;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Map peripheral */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get rng resources\n");
+ return -EINVAL;
+ }
+
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base)) {
+ dev_err(dev, "failed to remap rng regs\n");
+ return PTR_ERR(priv->base);
+ }
+
+ priv->rng.name = "iproc-rng200",
+ priv->rng.read = iproc_rng200_read,
+ priv->rng.init = iproc_rng200_init,
+ priv->rng.cleanup = iproc_rng200_cleanup,
+
+ /* Register driver */
+ ret = devm_hwrng_register(dev, &priv->rng);
+ if (ret) {
+ dev_err(dev, "hwrng registration failed\n");
+ return ret;
+ }
+
+ dev_info(dev, "hwrng registered\n");
+
+ return 0;
+}
+
+static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,iproc-rng200", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, iproc_rng200_of_match);
+
+static struct platform_driver iproc_rng200_driver = {
+ .driver = {
+ .name = "iproc-rng200",
+ .of_match_table = iproc_rng200_of_match,
+ },
+ .probe = iproc_rng200_probe,
+};
+module_platform_driver(iproc_rng200_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("iProc RNG200 Random Number Generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index cea1c703d62f..96fb986402eb 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -157,7 +157,7 @@ static int msm_rng_probe(struct platform_device *pdev)
rng->hwrng.cleanup = msm_rng_cleanup,
rng->hwrng.read = msm_rng_read,
- ret = hwrng_register(&rng->hwrng);
+ ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
if (ret) {
dev_err(&pdev->dev, "failed to register hwrng\n");
return ret;
@@ -166,14 +166,6 @@ static int msm_rng_probe(struct platform_device *pdev)
return 0;
}
-static int msm_rng_remove(struct platform_device *pdev)
-{
- struct msm_rng *rng = platform_get_drvdata(pdev);
-
- hwrng_unregister(&rng->hwrng);
- return 0;
-}
-
static const struct of_device_id msm_rng_of_match[] = {
{ .compatible = "qcom,prng", },
{}
@@ -182,7 +174,6 @@ MODULE_DEVICE_TABLE(of, msm_rng_of_match);
static struct platform_driver msm_rng_driver = {
.probe = msm_rng_probe,
- .remove = msm_rng_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(msm_rng_of_match),
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index be1c3f607398..6234a4a19b56 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -105,7 +105,7 @@ static int octeon_rng_probe(struct platform_device *pdev)
return 0;
}
-static int __exit octeon_rng_remove(struct platform_device *pdev)
+static int octeon_rng_remove(struct platform_device *pdev)
{
struct hwrng *rng = platform_get_drvdata(pdev);
@@ -119,7 +119,7 @@ static struct platform_driver octeon_rng_driver = {
.name = "octeon_rng",
},
.probe = octeon_rng_probe,
- .remove = __exit_p(octeon_rng_remove),
+ .remove = octeon_rng_remove,
};
module_platform_driver(octeon_rng_driver);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index d14dcf788f17..8a1432e8bb80 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -236,7 +236,7 @@ static int omap4_rng_init(struct omap_rng_dev *priv)
u32 val;
/* Return if RNG is already running. */
- if (omap_rng_read(priv, RNG_CONFIG_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+ if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
return 0;
val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
@@ -262,7 +262,7 @@ static void omap4_rng_cleanup(struct omap_rng_dev *priv)
val = omap_rng_read(priv, RNG_CONTROL_REG);
val &= ~RNG_CONTROL_ENABLE_TRNG_MASK;
- omap_rng_write(priv, RNG_CONFIG_REG, val);
+ omap_rng_write(priv, RNG_CONTROL_REG, val);
}
static irqreturn_t omap4_rng_irq(int irq, void *dev_id)
@@ -408,7 +408,7 @@ err_ioremap:
return ret;
}
-static int __exit omap_rng_remove(struct platform_device *pdev)
+static int omap_rng_remove(struct platform_device *pdev)
{
struct omap_rng_dev *priv = platform_get_drvdata(pdev);
@@ -422,9 +422,7 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-
-static int omap_rng_suspend(struct device *dev)
+static int __maybe_unused omap_rng_suspend(struct device *dev)
{
struct omap_rng_dev *priv = dev_get_drvdata(dev);
@@ -434,7 +432,7 @@ static int omap_rng_suspend(struct device *dev)
return 0;
}
-static int omap_rng_resume(struct device *dev)
+static int __maybe_unused omap_rng_resume(struct device *dev)
{
struct omap_rng_dev *priv = dev_get_drvdata(dev);
@@ -445,22 +443,15 @@ static int omap_rng_resume(struct device *dev)
}
static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
-#define OMAP_RNG_PM (&omap_rng_pm)
-
-#else
-
-#define OMAP_RNG_PM NULL
-
-#endif
static struct platform_driver omap_rng_driver = {
.driver = {
.name = "omap_rng",
- .pm = OMAP_RNG_PM,
+ .pm = &omap_rng_pm,
.of_match_table = of_match_ptr(omap_rng_of_match),
},
.probe = omap_rng_probe,
- .remove = __exit_p(omap_rng_remove),
+ .remove = omap_rng_remove,
};
module_platform_driver(omap_rng_driver);
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 3eb7bdd7f93b..51cb1d5cc489 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -133,7 +133,7 @@ static int rng_remove(struct platform_device *dev)
return 0;
}
-static struct of_device_id rng_match[] = {
+static const struct of_device_id rng_match[] = {
{ .compatible = "1682m-rng", },
{ .compatible = "pasemi,pwrficient-rng", },
{ },
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 3f4f63204560..263a5bb8e605 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -61,7 +61,7 @@ static int powernv_rng_probe(struct platform_device *pdev)
return 0;
}
-static struct of_device_id powernv_rng_match[] = {
+static const struct of_device_id powernv_rng_match[] = {
{ .compatible = "ibm,power-rng",},
{},
};
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index c85d31a5f9e3..b2cfda0fa93e 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -123,7 +123,7 @@ static int ppc4xx_rng_remove(struct platform_device *dev)
return 0;
}
-static struct of_device_id ppc4xx_rng_match[] = {
+static const struct of_device_id ppc4xx_rng_match[] = {
{ .compatible = "ppc4xx-rng", },
{ .compatible = "amcc,ppc460ex-rng", },
{ .compatible = "amcc,ppc440epx-rng", },
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index bcf86f91800a..63ce51d09af1 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -61,13 +61,13 @@ static struct hwrng pseries_rng = {
.read = pseries_rng_read,
};
-static int __init pseries_rng_probe(struct vio_dev *dev,
+static int pseries_rng_probe(struct vio_dev *dev,
const struct vio_device_id *id)
{
return hwrng_register(&pseries_rng);
}
-static int __exit pseries_rng_remove(struct vio_dev *dev)
+static int pseries_rng_remove(struct vio_dev *dev)
{
hwrng_unregister(&pseries_rng);
return 0;
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 23caa05380a8..c37cf754a985 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
@@ -310,6 +311,14 @@ static int xgene_rng_init(struct hwrng *rng)
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_rng_acpi_match[] = {
+ { "APMC0D18", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_rng_acpi_match);
+#endif
+
static struct hwrng xgene_rng_func = {
.name = "xgene-rng",
.init = xgene_rng_init,
@@ -415,6 +424,7 @@ static struct platform_driver xgene_rng_driver = {
.driver = {
.name = "xgene-rng",
.of_match_table = xgene_rng_of_match,
+ .acpi_match_table = ACPI_PTR(xgene_rng_acpi_match),
},
};
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 24cc4ed9a780..a43048b5b05f 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -510,13 +510,15 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
* 9) AC power
* 10) Fn Key status
*/
- return seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
- I8K_PROC_FMT,
- bios_version,
- i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
- cpu_temp,
- left_fan, right_fan, left_speed, right_speed,
- ac_power, fn_key);
+ seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
+ I8K_PROC_FMT,
+ bios_version,
+ i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+ cpu_temp,
+ left_fan, right_fan, left_speed, right_speed,
+ ac_power, fn_key);
+
+ return 0;
}
static int i8k_open_fs(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 9bb592872532..bf75f6361773 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2000,7 +2000,7 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
seq_printf(m, " %x", intf->channels[i].address);
seq_putc(m, '\n');
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2023,7 +2023,7 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
ipmi_version_major(&intf->bmc->id),
ipmi_version_minor(&intf->bmc->id));
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_version_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 518585c1ce94..8a45e92ff60c 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -942,8 +942,7 @@ static void sender(void *send_info,
* If we are running to completion, start it and run
* transactions until everything is clear.
*/
- smi_info->curr_msg = msg;
- smi_info->waiting_msg = NULL;
+ smi_info->waiting_msg = msg;
/*
* Run to completion means we are single-threaded, no
@@ -2244,7 +2243,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
- int rv;
+ int rv = -EINVAL;
acpi_dev = pnp_acpi_device(dev);
if (!acpi_dev)
@@ -2262,8 +2261,10 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
/* _IFT tells us the interface type: KCS, BT, etc */
status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n");
goto err_free;
+ }
switch (tmp) {
case 1:
@@ -2276,6 +2277,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
info->si_type = SI_BT;
break;
case 4: /* SSIF, just ignore */
+ rv = -ENODEV;
goto err_free;
default:
dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
@@ -2336,7 +2338,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
err_free:
kfree(info);
- return -EINVAL;
+ return rv;
}
static void ipmi_pnp_remove(struct pnp_dev *dev)
@@ -2667,7 +2669,7 @@ static struct pci_driver ipmi_pci_driver = {
};
#endif /* CONFIG_PCI */
-static struct of_device_id ipmi_match[];
+static const struct of_device_id ipmi_match[];
static int ipmi_probe(struct platform_device *dev)
{
#ifdef CONFIG_OF
@@ -2764,7 +2766,7 @@ static int ipmi_remove(struct platform_device *dev)
return 0;
}
-static struct of_device_id ipmi_match[] =
+static const struct of_device_id ipmi_match[] =
{
{ .type = "ipmi", .compatible = "ipmi-kcs",
.data = (void *)(unsigned long) SI_KCS },
@@ -3080,7 +3082,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
seq_printf(m, "%s\n", si_to_str[smi->si_type]);
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3153,7 +3155,7 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
smi->irq,
smi->slave_addr);
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_params_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index f40e3bd2c69c..207689c444a8 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -31,7 +31,6 @@
* interface into the I2C driver, I believe.
*/
-#include <linux/version.h>
#if defined(MODVERSIONS)
#include <linux/modversions.h>
#endif
@@ -166,6 +165,9 @@ enum ssif_stat_indexes {
/* Number of watchdog pretimeouts. */
SSIF_STAT_watchdog_pretimeouts,
+ /* Number of alers received. */
+ SSIF_STAT_alerts,
+
/* Always add statistics before this value, it must be last. */
SSIF_NUM_STATS
};
@@ -214,7 +216,16 @@ struct ssif_info {
#define WDT_PRE_TIMEOUT_INT 0x08
unsigned char msg_flags;
+ u8 global_enables;
bool has_event_buffer;
+ bool supports_alert;
+
+ /*
+ * Used to tell what we should do with alerts. If we are
+ * waiting on a response, read the data immediately.
+ */
+ bool got_alert;
+ bool waiting_alert;
/*
* If set to true, this will request events the next time the
@@ -478,13 +489,13 @@ static int ipmi_ssif_thread(void *data)
if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
result = i2c_smbus_write_block_data(
- ssif_info->client, SSIF_IPMI_REQUEST,
+ ssif_info->client, ssif_info->i2c_command,
ssif_info->i2c_data[0],
ssif_info->i2c_data + 1);
ssif_info->done_handler(ssif_info, result, NULL, 0);
} else {
result = i2c_smbus_read_block_data(
- ssif_info->client, SSIF_IPMI_RESPONSE,
+ ssif_info->client, ssif_info->i2c_command,
ssif_info->i2c_data);
if (result < 0)
ssif_info->done_handler(ssif_info, result,
@@ -518,15 +529,12 @@ static int ssif_i2c_send(struct ssif_info *ssif_info,
static void msg_done_handler(struct ssif_info *ssif_info, int result,
unsigned char *data, unsigned int len);
-static void retry_timeout(unsigned long data)
+static void start_get(struct ssif_info *ssif_info)
{
- struct ssif_info *ssif_info = (void *) data;
int rv;
- if (ssif_info->stopping)
- return;
-
ssif_info->rtc_us_timer = 0;
+ ssif_info->multi_pos = 0;
rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
SSIF_IPMI_RESPONSE,
@@ -540,6 +548,46 @@ static void retry_timeout(unsigned long data)
}
}
+static void retry_timeout(unsigned long data)
+{
+ struct ssif_info *ssif_info = (void *) data;
+ unsigned long oflags, *flags;
+ bool waiting;
+
+ if (ssif_info->stopping)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ waiting = ssif_info->waiting_alert;
+ ssif_info->waiting_alert = false;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ if (waiting)
+ start_get(ssif_info);
+}
+
+
+static void ssif_alert(struct i2c_client *client, unsigned int data)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+ unsigned long oflags, *flags;
+ bool do_get = false;
+
+ ssif_inc_stat(ssif_info, alerts);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (ssif_info->waiting_alert) {
+ ssif_info->waiting_alert = false;
+ del_timer(&ssif_info->retry_timer);
+ do_get = true;
+ } else if (ssif_info->curr_msg) {
+ ssif_info->got_alert = true;
+ }
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ if (do_get)
+ start_get(ssif_info);
+}
+
static int start_resend(struct ssif_info *ssif_info);
static void msg_done_handler(struct ssif_info *ssif_info, int result,
@@ -559,9 +607,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
if (ssif_info->retries_left > 0) {
ssif_inc_stat(ssif_info, receive_retries);
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->waiting_alert = true;
+ ssif_info->rtc_us_timer = SSIF_MSG_USEC;
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_JIFFIES);
- ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -581,9 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ssif_inc_stat(ssif_info, received_message_parts);
/* Remove the multi-part read marker. */
- for (i = 0; i < (len-2); i++)
- ssif_info->data[i] = data[i+2];
len -= 2;
+ for (i = 0; i < len; i++)
+ ssif_info->data[i] = data[i+2];
ssif_info->multi_len = len;
ssif_info->multi_pos = 1;
@@ -610,9 +661,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
goto continue_op;
}
- blocknum = data[ssif_info->multi_len];
+ blocknum = data[0];
- if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) {
+ if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
/* Received message too big, abort the operation. */
result = -E2BIG;
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -622,15 +673,15 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
/* Remove the blocknum from the data. */
- for (i = 0; i < (len-1); i++)
- ssif_info->data[i+ssif_info->multi_len] = data[i+1];
len--;
+ for (i = 0; i < len; i++)
+ ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
ssif_info->multi_len += len;
if (blocknum == 0xff) {
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
- } else if ((blocknum+1) != ssif_info->multi_pos) {
+ } else if (blocknum + 1 != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
@@ -650,7 +701,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
if (rv < 0) {
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
pr_info(PFX
- "Error from i2c_non_blocking_op(2)\n");
+ "Error from ssif_i2c_send\n");
result = -EIO;
} else
@@ -830,7 +881,11 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
}
if (ssif_info->multi_data) {
- /* In the middle of a multi-data write. */
+ /*
+ * In the middle of a multi-data write. See the comment
+ * in the SSIF_MULTI_n_PART case in the probe function
+ * for details on the intricacies of this.
+ */
int left;
ssif_inc_stat(ssif_info, sent_messages_parts);
@@ -864,15 +919,32 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
msg_done_handler(ssif_info, -EIO, NULL, 0);
}
} else {
+ unsigned long oflags, *flags;
+ bool got_alert;
+
ssif_inc_stat(ssif_info, sent_messages);
ssif_inc_stat(ssif_info, sent_messages_parts);
- /* Wait a jiffie then request the next message */
- ssif_info->retries_left = SSIF_RECV_RETRIES;
- ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
- mod_timer(&ssif_info->retry_timer,
- jiffies + SSIF_MSG_PART_JIFFIES);
- return;
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ got_alert = ssif_info->got_alert;
+ if (got_alert) {
+ ssif_info->got_alert = false;
+ ssif_info->waiting_alert = false;
+ }
+
+ if (got_alert) {
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ /* The alert already happened, try now. */
+ retry_timeout((unsigned long) ssif_info);
+ } else {
+ /* Wait a jiffie then request the next message */
+ ssif_info->waiting_alert = true;
+ ssif_info->retries_left = SSIF_RECV_RETRIES;
+ ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_PART_JIFFIES);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
}
}
@@ -881,6 +953,8 @@ static int start_resend(struct ssif_info *ssif_info)
int rv;
int command;
+ ssif_info->got_alert = false;
+
if (ssif_info->data_len > 32) {
command = SSIF_IPMI_MULTI_PART_REQUEST_START;
ssif_info->multi_data = ssif_info->data;
@@ -915,7 +989,7 @@ static int start_send(struct ssif_info *ssif_info,
return -E2BIG;
ssif_info->retries_left = SSIF_SEND_RETRIES;
- memcpy(ssif_info->data+1, data, len);
+ memcpy(ssif_info->data + 1, data, len);
ssif_info->data_len = len;
return start_resend(ssif_info);
}
@@ -1200,7 +1274,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
{
seq_puts(m, "ssif\n");
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -1243,6 +1317,8 @@ static int smi_stats_proc_show(struct seq_file *m, void *v)
ssif_get_stat(ssif_info, events));
seq_printf(m, "watchdog_pretimeouts: %u\n",
ssif_get_stat(ssif_info, watchdog_pretimeouts));
+ seq_printf(m, "alerts: %u\n",
+ ssif_get_stat(ssif_info, alerts));
return 0;
}
@@ -1258,6 +1334,23 @@ static const struct file_operations smi_stats_proc_ops = {
.release = single_release,
};
+static int strcmp_nospace(char *s1, char *s2)
+{
+ while (*s1 && *s2) {
+ while (isspace(*s1))
+ s1++;
+ while (isspace(*s2))
+ s2++;
+ if (*s1 > *s2)
+ return 1;
+ if (*s1 < *s2)
+ return -1;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
static struct ssif_addr_info *ssif_info_find(unsigned short addr,
char *adapter_name,
bool match_null_name)
@@ -1272,8 +1365,10 @@ restart:
/* One is NULL and one is not */
continue;
}
- if (strcmp(info->adapter_name, adapter_name))
- /* Names to not match */
+ if (adapter_name &&
+ strcmp_nospace(info->adapter_name,
+ adapter_name))
+ /* Names do not match */
continue;
}
found = info;
@@ -1306,6 +1401,12 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
return false;
}
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+ IPMI_BMC_EVT_MSG_INTR)
+
static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
unsigned char msg[3];
@@ -1391,13 +1492,33 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
break;
case SSIF_MULTI_2_PART:
- if (ssif_info->max_xmit_msg_size > 64)
- ssif_info->max_xmit_msg_size = 64;
+ if (ssif_info->max_xmit_msg_size > 63)
+ ssif_info->max_xmit_msg_size = 63;
if (ssif_info->max_recv_msg_size > 62)
ssif_info->max_recv_msg_size = 62;
break;
case SSIF_MULTI_n_PART:
+ /*
+ * The specification is rather confusing at
+ * this point, but I think I understand what
+ * is meant. At least I have a workable
+ * solution. With multi-part messages, you
+ * cannot send a message that is a multiple of
+ * 32-bytes in length, because the start and
+ * middle messages are 32-bytes and the end
+ * message must be at least one byte. You
+ * can't fudge on an extra byte, that would
+ * screw up things like fru data writes. So
+ * we limit the length to 63 bytes. That way
+ * a 32-byte message gets sent as a single
+ * part. A larger message will be a 32-byte
+ * start and the next message is always going
+ * to be 1-31 bytes in length. Not ideal, but
+ * it should work.
+ */
+ if (ssif_info->max_xmit_msg_size > 63)
+ ssif_info->max_xmit_msg_size = 63;
break;
default:
@@ -1407,7 +1528,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
} else {
no_support:
/* Assume no multi-part or PEC support */
- pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
+ pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
rv, len, resp[2]);
ssif_info->max_xmit_msg_size = 32;
@@ -1436,6 +1557,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
goto found;
}
+ ssif_info->global_enables = resp[3];
+
if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
ssif_info->has_event_buffer = true;
/* buffer is already enabled, nothing to do. */
@@ -1444,18 +1567,37 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
- msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+ msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF;
rv = do_cmd(client, 3, msg, &len, resp);
if (rv || (len < 2)) {
- pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+ pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
rv, len, resp[2]);
rv = 0; /* Not fatal */
goto found;
}
- if (resp[2] == 0)
+ if (resp[2] == 0) {
/* A successful return means the event buffer is supported. */
ssif_info->has_event_buffer = true;
+ ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF;
+ }
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 2)) {
+ pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[2] == 0) {
+ /* A successful return means the alert is supported. */
+ ssif_info->supports_alert = true;
+ ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR;
+ }
found:
ssif_info->intf_num = atomic_inc_return(&next_intf);
@@ -1813,6 +1955,7 @@ static struct i2c_driver ssif_i2c_driver = {
},
.probe = ssif_probe,
.remove = ssif_remove,
+ .alert = ssif_alert,
.id_table = ssif_id,
.detect = ssif_detect
};
@@ -1832,7 +1975,7 @@ static int init_ipmi_ssif(void)
rv = new_ssif_client(addr[i], adapter_name[i],
dbg[i], slave_addrs[i],
SI_HARDCODED);
- if (!rv)
+ if (rv)
pr_err(PFX
"Couldn't add hardcoded device at addr 0x%x\n",
addr[i]);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 297110c12635..6b1721f978c2 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -26,7 +26,7 @@
#include <linux/pfn.h>
#include <linux/export.h>
#include <linux/io.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include <linux/uaccess.h>
@@ -607,16 +607,16 @@ static ssize_t write_null(struct file *file, const char __user *buf,
return count;
}
-static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
{
return 0;
}
-static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
{
- return iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(from);
+ iov_iter_advance(from, count);
+ return count;
}
static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
@@ -718,7 +718,7 @@ static int open_port(struct inode *inode, struct file *filp)
#define zero_lseek null_lseek
#define full_lseek null_lseek
#define write_zero write_null
-#define aio_write_zero aio_write_null
+#define write_iter_zero write_iter_null
#define open_mem open_port
#define open_kmem open_mem
@@ -750,8 +750,8 @@ static const struct file_operations null_fops = {
.llseek = null_lseek,
.read = read_null,
.write = write_null,
- .aio_read = aio_read_null,
- .aio_write = aio_write_null,
+ .read_iter = read_iter_null,
+ .write_iter = write_iter_null,
.splice_write = splice_write_null,
};
@@ -764,10 +764,9 @@ static const struct file_operations __maybe_unused port_fops = {
static const struct file_operations zero_fops = {
.llseek = zero_lseek,
- .read = new_sync_read,
.write = write_zero,
.read_iter = read_iter_zero,
- .aio_write = aio_write_zero,
+ .write_iter = write_iter_zero,
.mmap = mmap_zero,
#ifndef CONFIG_MMU
.mmap_capabilities = zero_mmap_capabilities,
@@ -776,7 +775,6 @@ static const struct file_operations zero_fops = {
static const struct file_operations full_fops = {
.llseek = full_lseek,
- .read = new_sync_read,
.read_iter = read_iter_zero,
.write = write_full,
};
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index ffa97d261cf3..9fd5a91e0d81 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -140,12 +140,17 @@ static int misc_open(struct inode * inode, struct file * file)
goto fail;
}
+ /*
+ * Place the miscdevice in the file's
+ * private_data so it can be used by the
+ * file operations, including f_op->open below
+ */
+ file->private_data = c;
+
err = 0;
replace_fops(file, new_fops);
- if (file->f_op->open) {
- file->private_data = c;
+ if (file->f_op->open)
err = file->f_op->open(inode,file);
- }
fail:
mutex_unlock(&misc_mtx);
return err;
@@ -169,7 +174,9 @@ static const struct file_operations misc_fops = {
* the minor number requested is used.
*
* The structure passed is linked into the kernel and may not be
- * destroyed until it has been unregistered.
+ * destroyed until it has been unregistered. By default, an open()
+ * syscall to the device sets file->private_data to point to the
+ * structure. Drivers don't need open in fops for this.
*
* A zero is returned on success and a negative errno code for
* failure.
@@ -205,8 +212,9 @@ int misc_register(struct miscdevice * misc)
dev = MKDEV(MISC_MAJOR, misc->minor);
- misc->this_device = device_create(misc_class, misc->parent, dev,
- misc, "%s", misc->name);
+ misc->this_device =
+ device_create_with_groups(misc_class, misc->parent, dev,
+ misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
int i = DYNAMIC_MINORS - misc->minor - 1;
if (i < DYNAMIC_MINORS && i >= 0)
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 6e29bf2db536..5fc291c6157e 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -282,9 +282,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
#endif
static const struct file_operations raw_fops = {
- .read = new_sync_read,
.read_iter = blkdev_read_iter,
- .write = new_sync_write,
.write_iter = blkdev_write_iter,
.fsync = blkdev_fsync,
.open = raw_open,
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 02e76ac6d282..69f6b4acc377 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -27,7 +27,6 @@
#include <linux/types.h> /* size_t */
#include <linux/proc_fs.h>
#include <linux/fcntl.h> /* O_ACCMODE */
-#include <linux/aio.h>
#include <linux/pagemap.h>
#include <linux/hugetlb.h>
#include <linux/uaccess.h>
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 9d4e37549eb2..3b84a8b1bfbe 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -25,13 +25,14 @@ menuconfig TCG_TPM
if TCG_TPM
config TCG_TIS
- tristate "TPM Interface Specification 1.2 Interface"
+ tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
depends on X86
---help---
If you have a TPM security chip that is compliant with the
- TCG TIS 1.2 TPM specification say Yes and it will be accessible
- from within Linux. To compile this driver as a module, choose
- M here; the module will be called tpm_tis.
+ TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO
+ specification (TPM2.0) say Yes and it will be accessible from
+ within Linux. To compile this driver as a module, choose M here;
+ the module will be called tpm_tis.
config TCG_TIS_I2C_ATMEL
tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
@@ -100,16 +101,6 @@ config TCG_IBMVTPM
will be accessible from within Linux. To compile this driver
as a module, choose M here; the module will be called tpm_ibmvtpm.
-config TCG_TIS_I2C_ST33
- tristate "TPM Interface Specification 1.2 Interface (I2C - STMicroelectronics)"
- depends on I2C
- depends on GPIOLIB
- ---help---
- If you have a TPM security chip from STMicroelectronics working with
- an I2C bus say Yes and it will be accessible from within Linux.
- To compile this driver as a module, choose M here; the module will be
- called tpm_i2c_stm_st33.
-
config TCG_XEN
tristate "XEN TPM Interface"
depends on TCG_TPM && XEN
@@ -131,4 +122,5 @@ config TCG_CRB
from within Linux. To compile this driver as a module, choose
M here; the module will be called tpm_crb.
+source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 990cf183931d..56e8f1f3dc7e 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -20,6 +20,6 @@ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
-obj-$(CONFIG_TCG_TIS_I2C_ST33) += tpm_i2c_stm_st33.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
obj-$(CONFIG_TCG_CRB) += tpm_crb.o
diff --git a/drivers/char/tpm/st33zp24/Kconfig b/drivers/char/tpm/st33zp24/Kconfig
new file mode 100644
index 000000000000..09cb727864f0
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/Kconfig
@@ -0,0 +1,30 @@
+config TCG_TIS_ST33ZP24
+ tristate "STMicroelectronics TPM Interface Specification 1.2 Interface"
+ depends on GPIOLIB
+ ---help---
+ STMicroelectronics ST33ZP24 core driver. It implements the core
+ TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will
+ register against it.
+
+ To compile this driver as a module, choose m here. The module will be called
+ tpm_st33zp24.
+
+config TCG_TIS_ST33ZP24_I2C
+ tristate "TPM 1.2 ST33ZP24 I2C support"
+ depends on TCG_TIS_ST33ZP24
+ depends on I2C
+ ---help---
+ This module adds support for the STMicroelectronics TPM security chip
+ ST33ZP24 with i2c interface.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_st33zp24_i2c.
+
+config TCG_TIS_ST33ZP24_SPI
+ tristate "TPM 1.2 ST33ZP24 SPI support"
+ depends on TCG_TIS_ST33ZP24
+ depends on SPI
+ ---help---
+ This module adds support for the STMicroelectronics TPM security chip
+ ST33ZP24 with spi interface.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_st33zp24_spi.
diff --git a/drivers/char/tpm/st33zp24/Makefile b/drivers/char/tpm/st33zp24/Makefile
new file mode 100644
index 000000000000..74a722e5e068
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for ST33ZP24 TPM 1.2 driver
+#
+
+tpm_st33zp24-objs = st33zp24.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24) += tpm_st33zp24.o
+
+tpm_st33zp24_i2c-objs = i2c.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24_I2C) += tpm_st33zp24_i2c.o
+
+tpm_st33zp24_spi-objs = spi.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24_SPI) += tpm_st33zp24_spi.o
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
new file mode 100644
index 000000000000..ad1ee180e0c2
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -0,0 +1,276 @@
+/*
+ * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2015 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/tpm.h>
+#include <linux/platform_data/st33zp24.h>
+
+#include "st33zp24.h"
+
+#define TPM_DUMMY_BYTE 0xAA
+
+struct st33zp24_i2c_phy {
+ struct i2c_client *client;
+ u8 buf[TPM_BUFSIZE + 1];
+ int io_lpcpd;
+};
+
+/*
+ * write8_reg
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: Returns negative errno, or else the number of bytes written.
+ */
+static int write8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+{
+ struct st33zp24_i2c_phy *phy = phy_id;
+
+ phy->buf[0] = tpm_register;
+ memcpy(phy->buf + 1, tpm_data, tpm_size);
+ return i2c_master_send(phy->client, phy->buf, tpm_size + 1);
+} /* write8_reg() */
+
+/*
+ * read8_reg
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+{
+ struct st33zp24_i2c_phy *phy = phy_id;
+ u8 status = 0;
+ u8 data;
+
+ data = TPM_DUMMY_BYTE;
+ status = write8_reg(phy, tpm_register, &data, 1);
+ if (status == 2)
+ status = i2c_master_recv(phy->client, tpm_data, tpm_size);
+ return status;
+} /* read8_reg() */
+
+/*
+ * st33zp24_i2c_send
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, the length of the data
+ * @return: number of byte written successfully: should be one if success.
+ */
+static int st33zp24_i2c_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ return write8_reg(phy_id, tpm_register | TPM_WRITE_DIRECTION, tpm_data,
+ tpm_size);
+}
+
+/*
+ * st33zp24_i2c_recv
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int st33zp24_i2c_recv(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ return read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
+}
+
+static const struct st33zp24_phy_ops i2c_phy_ops = {
+ .send = st33zp24_i2c_send,
+ .recv = st33zp24_i2c_recv,
+};
+
+#ifdef CONFIG_OF
+static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy)
+{
+ struct device_node *pp;
+ struct i2c_client *client = phy->client;
+ int gpio;
+ int ret;
+
+ pp = client->dev.of_node;
+ if (!pp) {
+ dev_err(&client->dev, "No platform data\n");
+ return -ENODEV;
+ }
+
+ /* Get GPIO from device tree */
+ gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0);
+ if (gpio < 0) {
+ dev_err(&client->dev,
+ "Failed to retrieve lpcpd-gpios from dts.\n");
+ phy->io_lpcpd = -1;
+ /*
+ * lpcpd pin is not specified. This is not an issue as
+ * power management can be also managed by TPM specific
+ * commands. So leave with a success status code.
+ */
+ return 0;
+ }
+ /* GPIO request and configuration */
+ ret = devm_gpio_request_one(&client->dev, gpio,
+ GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD");
+ if (ret) {
+ dev_err(&client->dev, "Failed to request lpcpd pin\n");
+ return -ENODEV;
+ }
+ phy->io_lpcpd = gpio;
+
+ return 0;
+}
+#else
+static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy)
+{
+ return -ENODEV;
+}
+#endif
+
+static int st33zp24_i2c_request_resources(struct i2c_client *client,
+ struct st33zp24_i2c_phy *phy)
+{
+ struct st33zp24_platform_data *pdata;
+ int ret;
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->dev, "No platform data\n");
+ return -ENODEV;
+ }
+
+ /* store for late use */
+ phy->io_lpcpd = pdata->io_lpcpd;
+
+ if (gpio_is_valid(pdata->io_lpcpd)) {
+ ret = devm_gpio_request_one(&client->dev,
+ pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH,
+ "TPM IO_LPCPD");
+ if (ret) {
+ dev_err(&client->dev, "Failed to request lpcpd pin\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * st33zp24_i2c_probe initialize the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: id, the i2c_device_id struct.
+ * @return: 0 in case of success.
+ * -1 in other case.
+ */
+static int st33zp24_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct st33zp24_platform_data *pdata;
+ struct st33zp24_i2c_phy *phy;
+
+ if (!client) {
+ pr_info("%s: i2c client is NULL. Device not accessible.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_info(&client->dev, "client not i2c capable\n");
+ return -ENODEV;
+ }
+
+ phy = devm_kzalloc(&client->dev, sizeof(struct st33zp24_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->client = client;
+ pdata = client->dev.platform_data;
+ if (!pdata && client->dev.of_node) {
+ ret = st33zp24_i2c_of_request_resources(phy);
+ if (ret)
+ return ret;
+ } else if (pdata) {
+ ret = st33zp24_i2c_request_resources(client, phy);
+ if (ret)
+ return ret;
+ }
+
+ return st33zp24_probe(phy, &i2c_phy_ops, &client->dev, client->irq,
+ phy->io_lpcpd);
+}
+
+/*
+ * st33zp24_i2c_remove remove the TPM device
+ * @param: client, the i2c_client description (TPM I2C description).
+ * @return: 0 in case of success.
+ */
+static int st33zp24_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ return st33zp24_remove(chip);
+}
+
+static const struct i2c_device_id st33zp24_i2c_id[] = {
+ {TPM_ST33_I2C, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_st33zp24_i2c_match[] = {
+ { .compatible = "st,st33zp24-i2c", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend,
+ st33zp24_pm_resume);
+
+static struct i2c_driver st33zp24_i2c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = TPM_ST33_I2C,
+ .pm = &st33zp24_i2c_ops,
+ .of_match_table = of_match_ptr(of_st33zp24_i2c_match),
+ },
+ .probe = st33zp24_i2c_probe,
+ .remove = st33zp24_i2c_remove,
+ .id_table = st33zp24_i2c_id
+};
+
+module_i2c_driver(st33zp24_i2c_driver);
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("STM TPM 1.2 I2C ST33 Driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
new file mode 100644
index 000000000000..f0184a1b0c1c
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -0,0 +1,399 @@
+/*
+ * STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2015 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/tpm.h>
+#include <linux/platform_data/st33zp24.h>
+
+#include "st33zp24.h"
+
+#define TPM_DATA_FIFO 0x24
+#define TPM_INTF_CAPABILITY 0x14
+
+#define TPM_DUMMY_BYTE 0x00
+
+#define MAX_SPI_LATENCY 15
+#define LOCALITY0 0
+
+#define ST33ZP24_OK 0x5A
+#define ST33ZP24_UNDEFINED_ERR 0x80
+#define ST33ZP24_BADLOCALITY 0x81
+#define ST33ZP24_TISREGISTER_UKNOWN 0x82
+#define ST33ZP24_LOCALITY_NOT_ACTIVATED 0x83
+#define ST33ZP24_HASH_END_BEFORE_HASH_START 0x84
+#define ST33ZP24_BAD_COMMAND_ORDER 0x85
+#define ST33ZP24_INCORECT_RECEIVED_LENGTH 0x86
+#define ST33ZP24_TPM_FIFO_OVERFLOW 0x89
+#define ST33ZP24_UNEXPECTED_READ_FIFO 0x8A
+#define ST33ZP24_UNEXPECTED_WRITE_FIFO 0x8B
+#define ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END 0x90
+#define ST33ZP24_DUMMY_BYTES 0x00
+
+/*
+ * TPM command can be up to 2048 byte, A TPM response can be up to
+ * 1024 byte.
+ * Between command and response, there are latency byte (up to 15
+ * usually on st33zp24 2 are enough).
+ *
+ * Overall when sending a command and expecting an answer we need if
+ * worst case:
+ * 2048 (for the TPM command) + 1024 (for the TPM answer). We need
+ * some latency byte before the answer is available (max 15).
+ * We have 2048 + 1024 + 15.
+ */
+#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\
+ MAX_SPI_LATENCY)
+
+
+struct st33zp24_spi_phy {
+ struct spi_device *spi_device;
+ struct spi_transfer spi_xfer;
+ u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE];
+ u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE];
+
+ int io_lpcpd;
+ int latency;
+};
+
+static int st33zp24_status_to_errno(u8 code)
+{
+ switch (code) {
+ case ST33ZP24_OK:
+ return 0;
+ case ST33ZP24_UNDEFINED_ERR:
+ case ST33ZP24_BADLOCALITY:
+ case ST33ZP24_TISREGISTER_UKNOWN:
+ case ST33ZP24_LOCALITY_NOT_ACTIVATED:
+ case ST33ZP24_HASH_END_BEFORE_HASH_START:
+ case ST33ZP24_BAD_COMMAND_ORDER:
+ case ST33ZP24_UNEXPECTED_READ_FIFO:
+ case ST33ZP24_UNEXPECTED_WRITE_FIFO:
+ case ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END:
+ return -EPROTO;
+ case ST33ZP24_INCORECT_RECEIVED_LENGTH:
+ case ST33ZP24_TPM_FIFO_OVERFLOW:
+ return -EMSGSIZE;
+ case ST33ZP24_DUMMY_BYTES:
+ return -ENOSYS;
+ }
+ return code;
+}
+
+/*
+ * st33zp24_spi_send
+ * Send byte to the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: should be zero if success else a negative error code.
+ */
+static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ u8 data = 0;
+ int total_length = 0, nbr_dummy_bytes = 0, ret = 0;
+ struct st33zp24_spi_phy *phy = phy_id;
+ struct spi_device *dev = phy->spi_device;
+ u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf;
+ u8 *rx_buf = phy->spi_xfer.rx_buf;
+
+ /* Pre-Header */
+ data = TPM_WRITE_DIRECTION | LOCALITY0;
+ memcpy(tx_buf + total_length, &data, sizeof(data));
+ total_length++;
+ data = tpm_register;
+ memcpy(tx_buf + total_length, &data, sizeof(data));
+ total_length++;
+
+ if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) {
+ tx_buf[total_length++] = tpm_size >> 8;
+ tx_buf[total_length++] = tpm_size;
+ }
+
+ memcpy(&tx_buf[total_length], tpm_data, tpm_size);
+ total_length += tpm_size;
+
+ nbr_dummy_bytes = phy->latency;
+ memset(&tx_buf[total_length], TPM_DUMMY_BYTE, nbr_dummy_bytes);
+
+ phy->spi_xfer.len = total_length + nbr_dummy_bytes;
+
+ ret = spi_sync_transfer(dev, &phy->spi_xfer, 1);
+ if (ret == 0)
+ ret = rx_buf[total_length + nbr_dummy_bytes - 1];
+
+ return st33zp24_status_to_errno(ret);
+} /* st33zp24_spi_send() */
+
+/*
+ * read8_recv
+ * Recv byte from the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: should be zero if success else a negative error code.
+ */
+static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+{
+ u8 data = 0;
+ int total_length = 0, nbr_dummy_bytes, ret;
+ struct st33zp24_spi_phy *phy = phy_id;
+ struct spi_device *dev = phy->spi_device;
+ u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf;
+ u8 *rx_buf = phy->spi_xfer.rx_buf;
+
+ /* Pre-Header */
+ data = LOCALITY0;
+ memcpy(tx_buf + total_length, &data, sizeof(data));
+ total_length++;
+ data = tpm_register;
+ memcpy(tx_buf + total_length, &data, sizeof(data));
+ total_length++;
+
+ nbr_dummy_bytes = phy->latency;
+ memset(&tx_buf[total_length], TPM_DUMMY_BYTE,
+ nbr_dummy_bytes + tpm_size);
+
+ phy->spi_xfer.len = total_length + nbr_dummy_bytes + tpm_size;
+
+ /* header + status byte + size of the data + status byte */
+ ret = spi_sync_transfer(dev, &phy->spi_xfer, 1);
+ if (tpm_size > 0 && ret == 0) {
+ ret = rx_buf[total_length + nbr_dummy_bytes - 1];
+
+ memcpy(tpm_data, rx_buf + total_length + nbr_dummy_bytes,
+ tpm_size);
+ }
+
+ return ret;
+} /* read8_reg() */
+
+/*
+ * st33zp24_spi_recv
+ * Recv byte from the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int st33zp24_spi_recv(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ int ret;
+
+ ret = read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
+ if (!st33zp24_status_to_errno(ret))
+ return tpm_size;
+ return ret;
+} /* st33zp24_spi_recv() */
+
+static int evaluate_latency(void *phy_id)
+{
+ struct st33zp24_spi_phy *phy = phy_id;
+ int latency = 1, status = 0;
+ u8 data = 0;
+
+ while (!status && latency < MAX_SPI_LATENCY) {
+ phy->latency = latency;
+ status = read8_reg(phy_id, TPM_INTF_CAPABILITY, &data, 1);
+ latency++;
+ }
+ return latency - 1;
+} /* evaluate_latency() */
+
+static const struct st33zp24_phy_ops spi_phy_ops = {
+ .send = st33zp24_spi_send,
+ .recv = st33zp24_spi_recv,
+};
+
+#ifdef CONFIG_OF
+static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy)
+{
+ struct device_node *pp;
+ struct spi_device *dev = phy->spi_device;
+ int gpio;
+ int ret;
+
+ pp = dev->dev.of_node;
+ if (!pp) {
+ dev_err(&dev->dev, "No platform data\n");
+ return -ENODEV;
+ }
+
+ /* Get GPIO from device tree */
+ gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0);
+ if (gpio < 0) {
+ dev_err(&dev->dev,
+ "Failed to retrieve lpcpd-gpios from dts.\n");
+ phy->io_lpcpd = -1;
+ /*
+ * lpcpd pin is not specified. This is not an issue as
+ * power management can be also managed by TPM specific
+ * commands. So leave with a success status code.
+ */
+ return 0;
+ }
+ /* GPIO request and configuration */
+ ret = devm_gpio_request_one(&dev->dev, gpio,
+ GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD");
+ if (ret) {
+ dev_err(&dev->dev, "Failed to request lpcpd pin\n");
+ return -ENODEV;
+ }
+ phy->io_lpcpd = gpio;
+
+ return 0;
+}
+#else
+static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy)
+{
+ return -ENODEV;
+}
+#endif
+
+static int tpm_stm_spi_request_resources(struct spi_device *dev,
+ struct st33zp24_spi_phy *phy)
+{
+ struct st33zp24_platform_data *pdata;
+ int ret;
+
+ pdata = dev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&dev->dev, "No platform data\n");
+ return -ENODEV;
+ }
+
+ /* store for late use */
+ phy->io_lpcpd = pdata->io_lpcpd;
+
+ if (gpio_is_valid(pdata->io_lpcpd)) {
+ ret = devm_gpio_request_one(&dev->dev,
+ pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH,
+ "TPM IO_LPCPD");
+ if (ret) {
+ dev_err(&dev->dev, "%s : reset gpio_request failed\n",
+ __FILE__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * tpm_st33_spi_probe initialize the TPM device
+ * @param: dev, the spi_device drescription (TPM SPI description).
+ * @return: 0 in case of success.
+ * or a negative value describing the error.
+ */
+static int
+tpm_st33_spi_probe(struct spi_device *dev)
+{
+ int ret;
+ struct st33zp24_platform_data *pdata;
+ struct st33zp24_spi_phy *phy;
+
+ /* Check SPI platform functionnalities */
+ if (!dev) {
+ pr_info("%s: dev is NULL. Device is not accessible.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct st33zp24_spi_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->spi_device = dev;
+ pdata = dev->dev.platform_data;
+ if (!pdata && dev->dev.of_node) {
+ ret = tpm_stm_spi_of_request_resources(phy);
+ if (ret)
+ return ret;
+ } else if (pdata) {
+ ret = tpm_stm_spi_request_resources(dev, phy);
+ if (ret)
+ return ret;
+ }
+
+ phy->spi_xfer.tx_buf = phy->tx_buf;
+ phy->spi_xfer.rx_buf = phy->rx_buf;
+
+ phy->latency = evaluate_latency(phy);
+ if (phy->latency <= 0)
+ return -ENODEV;
+
+ return st33zp24_probe(phy, &spi_phy_ops, &dev->dev, dev->irq,
+ phy->io_lpcpd);
+}
+
+/*
+ * tpm_st33_spi_remove remove the TPM device
+ * @param: client, the spi_device drescription (TPM SPI description).
+ * @return: 0 in case of success.
+ */
+static int tpm_st33_spi_remove(struct spi_device *dev)
+{
+ struct tpm_chip *chip = spi_get_drvdata(dev);
+
+ return st33zp24_remove(chip);
+}
+
+static const struct spi_device_id st33zp24_spi_id[] = {
+ {TPM_ST33_SPI, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, st33zp24_spi_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_st33zp24_spi_match[] = {
+ { .compatible = "st,st33zp24-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend,
+ st33zp24_pm_resume);
+
+static struct spi_driver tpm_st33_spi_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = TPM_ST33_SPI,
+ .pm = &st33zp24_spi_ops,
+ .of_match_table = of_match_ptr(of_st33zp24_spi_match),
+ },
+ .probe = tpm_st33_spi_probe,
+ .remove = tpm_st33_spi_remove,
+ .id_table = st33zp24_spi_id,
+};
+
+module_spi_driver(tpm_st33_spi_driver);
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
new file mode 100644
index 000000000000..8d626784cd8d
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -0,0 +1,698 @@
+/*
+ * STMicroelectronics TPM Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2015 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/freezer.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_ACCESS 0x0
+#define TPM_STS 0x18
+#define TPM_DATA_FIFO 0x24
+#define TPM_INTF_CAPABILITY 0x14
+#define TPM_INT_STATUS 0x10
+#define TPM_INT_ENABLE 0x08
+
+#define LOCALITY0 0
+
+enum st33zp24_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum st33zp24_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum st33zp24_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_FIFO_AVALAIBLE_INT = 0x040,
+ TPM_INTF_WAKE_UP_READY_INT = 0x020,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_SHORT_TIMEOUT = 750,
+ TIS_LONG_TIMEOUT = 2000,
+};
+
+struct st33zp24_dev {
+ struct tpm_chip *chip;
+ void *phy_id;
+ const struct st33zp24_phy_ops *ops;
+ u32 intrs;
+ int io_lpcpd;
+};
+
+/*
+ * clear_interruption clear the pending interrupt.
+ * @param: tpm_dev, the tpm device device.
+ * @return: the interrupt status value.
+ */
+static u8 clear_interruption(struct st33zp24_dev *tpm_dev)
+{
+ u8 interrupt;
+
+ tpm_dev->ops->recv(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
+ tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
+ return interrupt;
+} /* clear_interruption() */
+
+/*
+ * st33zp24_cancel, cancel the current command execution or
+ * set STS to COMMAND READY.
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ */
+static void st33zp24_cancel(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev;
+ u8 data;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ data = TPM_STS_COMMAND_READY;
+ tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
+} /* st33zp24_cancel() */
+
+/*
+ * st33zp24_status return the TPM_STS register
+ * @param: chip, the tpm chip description
+ * @return: the TPM_STS register value.
+ */
+static u8 st33zp24_status(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev;
+ u8 data;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1);
+ return data;
+} /* st33zp24_status() */
+
+/*
+ * check_locality if the locality is active
+ * @param: chip, the tpm chip description
+ * @return: the active locality or -EACCESS.
+ */
+static int check_locality(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev;
+ u8 data;
+ u8 status;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+ if (status && (data &
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
+ return chip->vendor.locality;
+
+ return -EACCES;
+} /* check_locality() */
+
+/*
+ * request_locality request the TPM locality
+ * @param: chip, the chip description
+ * @return: the active locality or negative value.
+ */
+static int request_locality(struct tpm_chip *chip)
+{
+ unsigned long stop;
+ long ret;
+ struct st33zp24_dev *tpm_dev;
+ u8 data;
+
+ if (check_locality(chip) == chip->vendor.locality)
+ return chip->vendor.locality;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ data = TPM_ACCESS_REQUEST_USE;
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ stop = jiffies + chip->vendor.timeout_a;
+
+ /* Request locality is usually effective after the request */
+ do {
+ if (check_locality(chip) >= 0)
+ return chip->vendor.locality;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+
+ /* could not get locality */
+ return -EACCES;
+} /* request_locality() */
+
+/*
+ * release_locality release the active locality
+ * @param: chip, the tpm chip description.
+ */
+static void release_locality(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev;
+ u8 data;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+ data = TPM_ACCESS_ACTIVE_LOCALITY;
+
+ tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+}
+
+/*
+ * get_burstcount return the burstcount value
+ * @param: chip, the chip description
+ * return: the burstcount or negative value.
+ */
+static int get_burstcount(struct tpm_chip *chip)
+{
+ unsigned long stop;
+ int burstcnt, status;
+ u8 tpm_reg, temp;
+ struct st33zp24_dev *tpm_dev;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ stop = jiffies + chip->vendor.timeout_d;
+ do {
+ tpm_reg = TPM_STS + 1;
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1);
+ if (status < 0)
+ return -EBUSY;
+
+ tpm_reg = TPM_STS + 2;
+ burstcnt = temp;
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1);
+ if (status < 0)
+ return -EBUSY;
+
+ burstcnt |= temp << 8;
+ if (burstcnt)
+ return burstcnt;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -EBUSY;
+} /* get_burstcount() */
+
+
+/*
+ * wait_for_tpm_stat_cond
+ * @param: chip, chip description
+ * @param: mask, expected mask value
+ * @param: check_cancel, does the command expected to be canceled ?
+ * @param: canceled, did we received a cancel request ?
+ * @return: true if status == mask or if the command is canceled.
+ * false in other cases.
+ */
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * wait_for_stat wait for a TPM_STS value
+ * @param: chip, the tpm chip description
+ * @param: mask, the value mask to wait
+ * @param: timeout, the timeout
+ * @param: queue, the wait queue.
+ * @param: check_cancel, does the command can be cancelled ?
+ * @return: the tpm status, 0 if success, -ETIME if timeout is reached.
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ wait_queue_head_t *queue, bool check_cancel)
+{
+ unsigned long stop;
+ int ret = 0;
+ bool canceled = false;
+ bool condition;
+ u32 cur_intrs;
+ u8 status;
+ struct st33zp24_dev *tpm_dev;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ /* check current status */
+ status = st33zp24_status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->vendor.irq) {
+ cur_intrs = tpm_dev->intrs;
+ clear_interruption(tpm_dev);
+ enable_irq(chip->vendor.irq);
+
+ do {
+ if (ret == -ERESTARTSYS && freezing(current))
+ clear_thread_flag(TIF_SIGPENDING);
+
+ timeout = stop - jiffies;
+ if ((long) timeout <= 0)
+ return -1;
+
+ ret = wait_event_interruptible_timeout(*queue,
+ cur_intrs != tpm_dev->intrs,
+ timeout);
+ clear_interruption(tpm_dev);
+ condition = wait_for_tpm_stat_cond(chip, mask,
+ check_cancel, &canceled);
+ if (ret >= 0 && condition) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ } while (ret == -ERESTARTSYS && freezing(current));
+
+ disable_irq_nosync(chip->vendor.irq);
+
+ } else {
+ do {
+ msleep(TPM_TIMEOUT);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+
+ return -ETIME;
+} /* wait_for_stat() */
+
+/*
+ * recv_data receive data
+ * @param: chip, the tpm chip description
+ * @param: buf, the buffer where the data are received
+ * @param: count, the number of data to receive
+ * @return: the number of bytes read from TPM FIFO.
+ */
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ int size = 0, burstcnt, len, ret;
+ struct st33zp24_dev *tpm_dev;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ while (size < count &&
+ wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->vendor.timeout_c,
+ &chip->vendor.read_queue, true) == 0) {
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0)
+ return burstcnt;
+ len = min_t(int, burstcnt, count - size);
+ ret = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_DATA_FIFO,
+ buf + size, len);
+ if (ret < 0)
+ return ret;
+
+ size += len;
+ }
+ return size;
+}
+
+/*
+ * tpm_ioserirq_handler the serirq irq handler
+ * @param: irq, the tpm chip description
+ * @param: dev_id, the description of the chip
+ * @return: the status of the handler.
+ */
+static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct st33zp24_dev *tpm_dev;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ tpm_dev->intrs++;
+ wake_up_interruptible(&chip->vendor.read_queue);
+ disable_irq_nosync(chip->vendor.irq);
+
+ return IRQ_HANDLED;
+} /* tpm_ioserirq_handler() */
+
+/*
+ * st33zp24_send send TPM commands through the I2C bus.
+ *
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @param: buf, the buffer to send.
+ * @param: count, the number of bytes to send.
+ * @return: In case of success the number of bytes sent.
+ * In other case, a < 0 value describing the issue.
+ */
+static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
+ size_t len)
+{
+ u32 status, i, size, ordinal;
+ int burstcnt = 0;
+ int ret;
+ u8 data;
+ struct st33zp24_dev *tpm_dev;
+
+ if (!chip)
+ return -EBUSY;
+ if (len < TPM_HEADER_SIZE)
+ return -EBUSY;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ ret = request_locality(chip);
+ if (ret < 0)
+ return ret;
+
+ status = st33zp24_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ st33zp24_cancel(chip);
+ if (wait_for_stat
+ (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
+ &chip->vendor.read_queue, false) < 0) {
+ ret = -ETIME;
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < len - 1;) {
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0)
+ return burstcnt;
+ size = min_t(int, len - i - 1, burstcnt);
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO,
+ buf + i, size);
+ if (ret < 0)
+ goto out_err;
+
+ i += size;
+ }
+
+ status = st33zp24_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) == 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO,
+ buf + len - 1, 1);
+ if (ret < 0)
+ goto out_err;
+
+ status = st33zp24_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) != 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ data = TPM_STS_GO;
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
+ if (ret < 0)
+ goto out_err;
+
+ if (chip->vendor.irq) {
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+
+ ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ tpm_calc_ordinal_duration(chip, ordinal),
+ &chip->vendor.read_queue, false);
+ if (ret < 0)
+ goto out_err;
+ }
+
+ return len;
+out_err:
+ st33zp24_cancel(chip);
+ release_locality(chip);
+ return ret;
+}
+
+/*
+ * st33zp24_recv received TPM response through TPM phy.
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @param: buf, the buffer to store datas.
+ * @param: count, the number of bytes to send.
+ * @return: In case of success the number of bytes received.
+ * In other case, a < 0 value describing the issue.
+ */
+static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
+ size_t count)
+{
+ int size = 0;
+ int expected;
+
+ if (!chip)
+ return -EBUSY;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *)(buf + 2));
+ if (expected > count) {
+ size = -EIO;
+ goto out;
+ }
+
+ size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (size < expected) {
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ }
+
+out:
+ st33zp24_cancel(chip);
+ release_locality(chip);
+ return size;
+}
+
+/*
+ * st33zp24_req_canceled
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @param: status, the TPM status.
+ * @return: Does TPM ready to compute a new command ? true.
+ */
+static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops st33zp24_tpm = {
+ .send = st33zp24_send,
+ .recv = st33zp24_recv,
+ .cancel = st33zp24_cancel,
+ .status = st33zp24_status,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = st33zp24_req_canceled,
+};
+
+/*
+ * st33zp24_probe initialize the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: id, the i2c_device_id struct.
+ * @return: 0 in case of success.
+ * -1 in other case.
+ */
+int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
+ struct device *dev, int irq, int io_lpcpd)
+{
+ int ret;
+ u8 intmask = 0;
+ struct tpm_chip *chip;
+ struct st33zp24_dev *tpm_dev;
+
+ chip = tpmm_chip_alloc(dev, &st33zp24_tpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ tpm_dev = devm_kzalloc(dev, sizeof(struct st33zp24_dev),
+ GFP_KERNEL);
+ if (!tpm_dev)
+ return -ENOMEM;
+
+ TPM_VPRIV(chip) = tpm_dev;
+ tpm_dev->phy_id = phy_id;
+ tpm_dev->ops = ops;
+
+ chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ chip->vendor.locality = LOCALITY0;
+
+ if (irq) {
+ /* INTERRUPT Setup */
+ init_waitqueue_head(&chip->vendor.read_queue);
+ tpm_dev->intrs = 0;
+
+ if (request_locality(chip) != LOCALITY0) {
+ ret = -ENODEV;
+ goto _tpm_clean_answer;
+ }
+
+ clear_interruption(tpm_dev);
+ ret = devm_request_irq(dev, irq, tpm_ioserirq_handler,
+ IRQF_TRIGGER_HIGH, "TPM SERIRQ management",
+ chip);
+ if (ret < 0) {
+ dev_err(&chip->dev, "TPM SERIRQ signals %d not available\n",
+ irq);
+ goto _tpm_clean_answer;
+ }
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_STS_VALID_INT
+ | TPM_INTF_DATA_AVAIL_INT;
+
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_ENABLE,
+ &intmask, 1);
+ if (ret < 0)
+ goto _tpm_clean_answer;
+
+ intmask = TPM_GLOBAL_INT_ENABLE;
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, (TPM_INT_ENABLE + 3),
+ &intmask, 1);
+ if (ret < 0)
+ goto _tpm_clean_answer;
+
+ chip->vendor.irq = irq;
+
+ disable_irq_nosync(chip->vendor.irq);
+
+ tpm_gen_interrupt(chip);
+ }
+
+ tpm_get_timeouts(chip);
+ tpm_do_selftest(chip);
+
+ return tpm_chip_register(chip);
+_tpm_clean_answer:
+ dev_info(&chip->dev, "TPM initialization fail\n");
+ return ret;
+}
+EXPORT_SYMBOL(st33zp24_probe);
+
+/*
+ * st33zp24_remove remove the TPM device
+ * @param: tpm_data, the tpm phy.
+ * @return: 0 in case of success.
+ */
+int st33zp24_remove(struct tpm_chip *chip)
+{
+ tpm_chip_unregister(chip);
+ return 0;
+}
+EXPORT_SYMBOL(st33zp24_remove);
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * st33zp24_pm_suspend suspend the TPM device
+ * @param: tpm_data, the tpm phy.
+ * @param: mesg, the power management message.
+ * @return: 0 in case of success.
+ */
+int st33zp24_pm_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_dev *tpm_dev;
+ int ret = 0;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ if (gpio_is_valid(tpm_dev->io_lpcpd))
+ gpio_set_value(tpm_dev->io_lpcpd, 0);
+ else
+ ret = tpm_pm_suspend(dev);
+
+ return ret;
+} /* st33zp24_pm_suspend() */
+EXPORT_SYMBOL(st33zp24_pm_suspend);
+
+/*
+ * st33zp24_pm_resume resume the TPM device
+ * @param: tpm_data, the tpm phy.
+ * @return: 0 in case of success.
+ */
+int st33zp24_pm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_dev *tpm_dev;
+ int ret = 0;
+
+ tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+
+ if (gpio_is_valid(tpm_dev->io_lpcpd)) {
+ gpio_set_value(tpm_dev->io_lpcpd, 1);
+ ret = wait_for_stat(chip,
+ TPM_STS_VALID, chip->vendor.timeout_b,
+ &chip->vendor.read_queue, false);
+ } else {
+ ret = tpm_pm_resume(dev);
+ if (!ret)
+ tpm_do_selftest(chip);
+ }
+ return ret;
+} /* st33zp24_pm_resume() */
+EXPORT_SYMBOL(st33zp24_pm_resume);
+#endif
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("ST33ZP24 TPM 1.2 driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
new file mode 100644
index 000000000000..c207cebf67dd
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -0,0 +1,37 @@
+/*
+ * STMicroelectronics TPM Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2015 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_ST33ZP24_H__
+#define __LOCAL_ST33ZP24_H__
+
+#define TPM_WRITE_DIRECTION 0x80
+#define TPM_BUFSIZE 2048
+
+struct st33zp24_phy_ops {
+ int (*send)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
+ int (*recv)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
+};
+
+#ifdef CONFIG_PM_SLEEP
+int st33zp24_pm_suspend(struct device *dev);
+int st33zp24_pm_resume(struct device *dev);
+#endif
+
+int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
+ struct device *dev, int irq, int io_lpcpd);
+int st33zp24_remove(struct tpm_chip *chip);
+#endif /* __LOCAL_ST33ZP24_H__ */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e096e9cddb40..283f00a7f036 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -170,6 +170,41 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
device_unregister(&chip->dev);
}
+static int tpm1_chip_register(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return 0;
+
+ rc = tpm_sysfs_add_device(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm_add_ppi(chip);
+ if (rc) {
+ tpm_sysfs_del_device(chip);
+ return rc;
+ }
+
+ chip->bios_dir = tpm_bios_log_setup(chip->devname);
+
+ return 0;
+}
+
+static void tpm1_chip_unregister(struct tpm_chip *chip)
+{
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return;
+
+ if (chip->bios_dir)
+ tpm_bios_log_teardown(chip->bios_dir);
+
+ tpm_remove_ppi(chip);
+
+ tpm_sysfs_del_device(chip);
+}
+
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
@@ -185,22 +220,13 @@ int tpm_chip_register(struct tpm_chip *chip)
{
int rc;
- /* Populate sysfs for TPM1 devices. */
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- rc = tpm_sysfs_add_device(chip);
- if (rc)
- goto del_misc;
-
- rc = tpm_add_ppi(chip);
- if (rc)
- goto del_sysfs;
-
- chip->bios_dir = tpm_bios_log_setup(chip->devname);
- }
+ rc = tpm1_chip_register(chip);
+ if (rc)
+ return rc;
rc = tpm_dev_add_device(chip);
if (rc)
- return rc;
+ goto out_err;
/* Make the chip available. */
spin_lock(&driver_lock);
@@ -210,10 +236,8 @@ int tpm_chip_register(struct tpm_chip *chip)
chip->flags |= TPM_CHIP_FLAG_REGISTERED;
return 0;
-del_sysfs:
- tpm_sysfs_del_device(chip);
-del_misc:
- tpm_dev_del_device(chip);
+out_err:
+ tpm1_chip_unregister(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_chip_register);
@@ -238,13 +262,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
spin_unlock(&driver_lock);
synchronize_rcu();
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- if (chip->bios_dir)
- tpm_bios_log_teardown(chip->bios_dir);
- tpm_remove_ppi(chip);
- tpm_sysfs_del_device(chip);
- }
-
+ tpm1_chip_unregister(chip);
tpm_dev_del_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
deleted file mode 100644
index 612845b36c29..000000000000
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ /dev/null
@@ -1,911 +0,0 @@
-/*
- * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
- * Copyright (C) 2009, 2010, 2014 STMicroelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * STMicroelectronics version 1.2.1, Copyright (C) 2014
- * STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
- * This is free software, and you are welcome to redistribute it
- * under certain conditions.
- *
- * @Author: Christophe RICARD tpmsupport@st.com
- *
- * @File: tpm_stm_st33_i2c.c
- *
- * @Synopsis:
- * 09/15/2010: First shot driver tpm_tis driver for
- * lpc is used as model.
- */
-
-#include <linux/pci.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
-#include <linux/freezer.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/sysfs.h>
-#include <linux/gpio.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
-
-#include <linux/platform_data/tpm_stm_st33.h>
-#include "tpm.h"
-
-#define TPM_ACCESS 0x0
-#define TPM_STS 0x18
-#define TPM_HASH_END 0x20
-#define TPM_DATA_FIFO 0x24
-#define TPM_HASH_DATA 0x24
-#define TPM_HASH_START 0x28
-#define TPM_INTF_CAPABILITY 0x14
-#define TPM_INT_STATUS 0x10
-#define TPM_INT_ENABLE 0x08
-
-#define TPM_DUMMY_BYTE 0xAA
-#define TPM_WRITE_DIRECTION 0x80
-#define TPM_HEADER_SIZE 10
-#define TPM_BUFSIZE 2048
-
-#define LOCALITY0 0
-
-
-enum stm33zp24_access {
- TPM_ACCESS_VALID = 0x80,
- TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
- TPM_ACCESS_REQUEST_PENDING = 0x04,
- TPM_ACCESS_REQUEST_USE = 0x02,
-};
-
-enum stm33zp24_status {
- TPM_STS_VALID = 0x80,
- TPM_STS_COMMAND_READY = 0x40,
- TPM_STS_GO = 0x20,
- TPM_STS_DATA_AVAIL = 0x10,
- TPM_STS_DATA_EXPECT = 0x08,
-};
-
-enum stm33zp24_int_flags {
- TPM_GLOBAL_INT_ENABLE = 0x80,
- TPM_INTF_CMD_READY_INT = 0x080,
- TPM_INTF_FIFO_AVALAIBLE_INT = 0x040,
- TPM_INTF_WAKE_UP_READY_INT = 0x020,
- TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
- TPM_INTF_STS_VALID_INT = 0x002,
- TPM_INTF_DATA_AVAIL_INT = 0x001,
-};
-
-enum tis_defaults {
- TIS_SHORT_TIMEOUT = 750,
- TIS_LONG_TIMEOUT = 2000,
-};
-
-struct tpm_stm_dev {
- struct i2c_client *client;
- struct tpm_chip *chip;
- u8 buf[TPM_BUFSIZE + 1];
- u32 intrs;
- int io_lpcpd;
-};
-
-/*
- * write8_reg
- * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
- * @param: tpm_register, the tpm tis register where the data should be written
- * @param: tpm_data, the tpm_data to write inside the tpm_register
- * @param: tpm_size, The length of the data
- * @return: Returns negative errno, or else the number of bytes written.
- */
-static int write8_reg(struct tpm_stm_dev *tpm_dev, u8 tpm_register,
- u8 *tpm_data, u16 tpm_size)
-{
- tpm_dev->buf[0] = tpm_register;
- memcpy(tpm_dev->buf + 1, tpm_data, tpm_size);
- return i2c_master_send(tpm_dev->client, tpm_dev->buf, tpm_size + 1);
-} /* write8_reg() */
-
-/*
- * read8_reg
- * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
- * @param: tpm_register, the tpm tis register where the data should be read
- * @param: tpm_data, the TPM response
- * @param: tpm_size, tpm TPM response size to read.
- * @return: number of byte read successfully: should be one if success.
- */
-static int read8_reg(struct tpm_stm_dev *tpm_dev, u8 tpm_register,
- u8 *tpm_data, int tpm_size)
-{
- u8 status = 0;
- u8 data;
-
- data = TPM_DUMMY_BYTE;
- status = write8_reg(tpm_dev, tpm_register, &data, 1);
- if (status == 2)
- status = i2c_master_recv(tpm_dev->client, tpm_data, tpm_size);
- return status;
-} /* read8_reg() */
-
-/*
- * I2C_WRITE_DATA
- * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
- * @param: tpm_dev, the chip description
- * @param: tpm_register, the tpm tis register where the data should be written
- * @param: tpm_data, the tpm_data to write inside the tpm_register
- * @param: tpm_size, The length of the data
- * @return: number of byte written successfully: should be one if success.
- */
-#define I2C_WRITE_DATA(tpm_dev, tpm_register, tpm_data, tpm_size) \
- (write8_reg(tpm_dev, tpm_register | \
- TPM_WRITE_DIRECTION, tpm_data, tpm_size))
-
-/*
- * I2C_READ_DATA
- * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
- * @param: tpm_dev, the chip description
- * @param: tpm_register, the tpm tis register where the data should be read
- * @param: tpm_data, the TPM response
- * @param: tpm_size, tpm TPM response size to read.
- * @return: number of byte read successfully: should be one if success.
- */
-#define I2C_READ_DATA(tpm_dev, tpm_register, tpm_data, tpm_size) \
- (read8_reg(tpm_dev, tpm_register, tpm_data, tpm_size))
-
-/*
- * clear_interruption
- * clear the TPM interrupt register.
- * @param: tpm, the chip description
- * @return: the TPM_INT_STATUS value
- */
-static u8 clear_interruption(struct tpm_stm_dev *tpm_dev)
-{
- u8 interrupt;
-
- I2C_READ_DATA(tpm_dev, TPM_INT_STATUS, &interrupt, 1);
- I2C_WRITE_DATA(tpm_dev, TPM_INT_STATUS, &interrupt, 1);
- return interrupt;
-} /* clear_interruption() */
-
-/*
- * tpm_stm_i2c_cancel, cancel is not implemented.
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
- */
-static void tpm_stm_i2c_cancel(struct tpm_chip *chip)
-{
- struct tpm_stm_dev *tpm_dev;
- u8 data;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
-
- data = TPM_STS_COMMAND_READY;
- I2C_WRITE_DATA(tpm_dev, TPM_STS, &data, 1);
-} /* tpm_stm_i2c_cancel() */
-
-/*
- * tpm_stm_spi_status return the TPM_STS register
- * @param: chip, the tpm chip description
- * @return: the TPM_STS register value.
- */
-static u8 tpm_stm_i2c_status(struct tpm_chip *chip)
-{
- struct tpm_stm_dev *tpm_dev;
- u8 data;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
-
- I2C_READ_DATA(tpm_dev, TPM_STS, &data, 1);
- return data;
-} /* tpm_stm_i2c_status() */
-
-
-/*
- * check_locality if the locality is active
- * @param: chip, the tpm chip description
- * @return: the active locality or -EACCESS.
- */
-static int check_locality(struct tpm_chip *chip)
-{
- struct tpm_stm_dev *tpm_dev;
- u8 data;
- u8 status;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
-
- status = I2C_READ_DATA(tpm_dev, TPM_ACCESS, &data, 1);
- if (status && (data &
- (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
- (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
- return chip->vendor.locality;
-
- return -EACCES;
-} /* check_locality() */
-
-/*
- * request_locality request the TPM locality
- * @param: chip, the chip description
- * @return: the active locality or EACCESS.
- */
-static int request_locality(struct tpm_chip *chip)
-{
- unsigned long stop;
- long ret;
- struct tpm_stm_dev *tpm_dev;
- u8 data;
-
- if (check_locality(chip) == chip->vendor.locality)
- return chip->vendor.locality;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
-
- data = TPM_ACCESS_REQUEST_USE;
- ret = I2C_WRITE_DATA(tpm_dev, TPM_ACCESS, &data, 1);
- if (ret < 0)
- goto end;
-
- stop = jiffies + chip->vendor.timeout_a;
-
- /* Request locality is usually effective after the request */
- do {
- if (check_locality(chip) >= 0)
- return chip->vendor.locality;
- msleep(TPM_TIMEOUT);
- } while (time_before(jiffies, stop));
- ret = -EACCES;
-end:
- return ret;
-} /* request_locality() */
-
-/*
- * release_locality release the active locality
- * @param: chip, the tpm chip description.
- */
-static void release_locality(struct tpm_chip *chip)
-{
- struct tpm_stm_dev *tpm_dev;
- u8 data;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
- data = TPM_ACCESS_ACTIVE_LOCALITY;
-
- I2C_WRITE_DATA(tpm_dev, TPM_ACCESS, &data, 1);
-}
-
-/*
- * get_burstcount return the burstcount address 0x19 0x1A
- * @param: chip, the chip description
- * return: the burstcount.
- */
-static int get_burstcount(struct tpm_chip *chip)
-{
- unsigned long stop;
- int burstcnt, status;
- u8 tpm_reg, temp;
- struct tpm_stm_dev *tpm_dev;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
-
- stop = jiffies + chip->vendor.timeout_d;
- do {
- tpm_reg = TPM_STS + 1;
- status = I2C_READ_DATA(tpm_dev, tpm_reg, &temp, 1);
- if (status < 0)
- goto end;
-
- tpm_reg = tpm_reg + 1;
- burstcnt = temp;
- status = I2C_READ_DATA(tpm_dev, tpm_reg, &temp, 1);
- if (status < 0)
- goto end;
-
- burstcnt |= temp << 8;
- if (burstcnt)
- return burstcnt;
- msleep(TPM_TIMEOUT);
- } while (time_before(jiffies, stop));
-
-end:
- return -EBUSY;
-} /* get_burstcount() */
-
-static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
- bool check_cancel, bool *canceled)
-{
- u8 status = chip->ops->status(chip);
-
- *canceled = false;
- if ((status & mask) == mask)
- return true;
- if (check_cancel && chip->ops->req_canceled(chip, status)) {
- *canceled = true;
- return true;
- }
- return false;
-}
-
-/*
- * interrupt_to_status
- * @param: irq_mask, the irq mask value to wait
- * @return: the corresponding tpm_sts value
- */
-static u8 interrupt_to_status(u8 irq_mask)
-{
- u8 status = 0;
-
- if ((irq_mask & TPM_INTF_STS_VALID_INT) == TPM_INTF_STS_VALID_INT)
- status |= TPM_STS_VALID;
- if ((irq_mask & TPM_INTF_DATA_AVAIL_INT) == TPM_INTF_DATA_AVAIL_INT)
- status |= TPM_STS_DATA_AVAIL;
- if ((irq_mask & TPM_INTF_CMD_READY_INT) == TPM_INTF_CMD_READY_INT)
- status |= TPM_STS_COMMAND_READY;
-
- return status;
-} /* status_to_interrupt() */
-
-/*
- * wait_for_stat wait for a TPM_STS value
- * @param: chip, the tpm chip description
- * @param: mask, the value mask to wait
- * @param: timeout, the timeout
- * @param: queue, the wait queue.
- * @param: check_cancel, does the command can be cancelled ?
- * @return: the tpm status, 0 if success, -ETIME if timeout is reached.
- */
-static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue, bool check_cancel)
-{
- unsigned long stop;
- int ret;
- bool canceled = false;
- bool condition;
- u32 cur_intrs;
- u8 interrupt, status;
- struct tpm_stm_dev *tpm_dev;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
-
- /* check current status */
- status = tpm_stm_i2c_status(chip);
- if ((status & mask) == mask)
- return 0;
-
- stop = jiffies + timeout;
-
- if (chip->vendor.irq) {
- cur_intrs = tpm_dev->intrs;
- interrupt = clear_interruption(tpm_dev);
- enable_irq(chip->vendor.irq);
-
-again:
- timeout = stop - jiffies;
- if ((long) timeout <= 0)
- return -1;
-
- ret = wait_event_interruptible_timeout(*queue,
- cur_intrs != tpm_dev->intrs, timeout);
-
- interrupt |= clear_interruption(tpm_dev);
- status = interrupt_to_status(interrupt);
- condition = wait_for_tpm_stat_cond(chip, mask,
- check_cancel, &canceled);
-
- if (ret >= 0 && condition) {
- if (canceled)
- return -ECANCELED;
- return 0;
- }
- if (ret == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- disable_irq_nosync(chip->vendor.irq);
-
- } else {
- do {
- msleep(TPM_TIMEOUT);
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
- } while (time_before(jiffies, stop));
- }
-
- return -ETIME;
-} /* wait_for_stat() */
-
-/*
- * recv_data receive data
- * @param: chip, the tpm chip description
- * @param: buf, the buffer where the data are received
- * @param: count, the number of data to receive
- * @return: the number of bytes read from TPM FIFO.
- */
-static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
-{
- int size = 0, burstcnt, len, ret;
- struct tpm_stm_dev *tpm_dev;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
-
- while (size < count &&
- wait_for_stat(chip,
- TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- chip->vendor.timeout_c,
- &chip->vendor.read_queue, true) == 0) {
- burstcnt = get_burstcount(chip);
- if (burstcnt < 0)
- return burstcnt;
- len = min_t(int, burstcnt, count - size);
- ret = I2C_READ_DATA(tpm_dev, TPM_DATA_FIFO, buf + size, len);
- if (ret < 0)
- return ret;
-
- size += len;
- }
- return size;
-}
-
-/*
- * tpm_ioserirq_handler the serirq irq handler
- * @param: irq, the tpm chip description
- * @param: dev_id, the description of the chip
- * @return: the status of the handler.
- */
-static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
-{
- struct tpm_chip *chip = dev_id;
- struct tpm_stm_dev *tpm_dev;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
-
- tpm_dev->intrs++;
- wake_up_interruptible(&chip->vendor.read_queue);
- disable_irq_nosync(chip->vendor.irq);
-
- return IRQ_HANDLED;
-} /* tpm_ioserirq_handler() */
-
-
-/*
- * tpm_stm_i2c_send send TPM commands through the I2C bus.
- *
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
- * @param: buf, the buffer to send.
- * @param: count, the number of bytes to send.
- * @return: In case of success the number of bytes sent.
- * In other case, a < 0 value describing the issue.
- */
-static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
- size_t len)
-{
- u32 status, i, size;
- int burstcnt = 0;
- int ret;
- u8 data;
- struct i2c_client *client;
- struct tpm_stm_dev *tpm_dev;
-
- if (!chip)
- return -EBUSY;
- if (len < TPM_HEADER_SIZE)
- return -EBUSY;
-
- tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
- client = tpm_dev->client;
-
- client->flags = 0;
-
- ret = request_locality(chip);
- if (ret < 0)
- return ret;
-
- status = tpm_stm_i2c_status(chip);
- if ((status & TPM_STS_COMMAND_READY) == 0) {
- tpm_stm_i2c_cancel(chip);
- if (wait_for_stat
- (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
- &chip->vendor.read_queue, false) < 0) {
- ret = -ETIME;
- goto out_err;
- }
- }
-
- for (i = 0; i < len - 1;) {
- burstcnt = get_burstcount(chip);
- if (burstcnt < 0)
- return burstcnt;
- size = min_t(int, len - i - 1, burstcnt);
- ret = I2C_WRITE_DATA(tpm_dev, TPM_DATA_FIFO, buf + i, size);
- if (ret < 0)
- goto out_err;
-
- i += size;
- }
-
- status = tpm_stm_i2c_status(chip);
- if ((status & TPM_STS_DATA_EXPECT) == 0) {
- ret = -EIO;
- goto out_err;
- }
-
- ret = I2C_WRITE_DATA(tpm_dev, TPM_DATA_FIFO, buf + len - 1, 1);
- if (ret < 0)
- goto out_err;
-
- status = tpm_stm_i2c_status(chip);
- if ((status & TPM_STS_DATA_EXPECT) != 0) {
- ret = -EIO;
- goto out_err;
- }
-
- data = TPM_STS_GO;
- I2C_WRITE_DATA(tpm_dev, TPM_STS, &data, 1);
-
- return len;
-out_err:
- tpm_stm_i2c_cancel(chip);
- release_locality(chip);
- return ret;
-}
-
-/*
- * tpm_stm_i2c_recv received TPM response through the I2C bus.
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
- * @param: buf, the buffer to store datas.
- * @param: count, the number of bytes to send.
- * @return: In case of success the number of bytes received.
- * In other case, a < 0 value describing the issue.
- */
-static int tpm_stm_i2c_recv(struct tpm_chip *chip, unsigned char *buf,
- size_t count)
-{
- int size = 0;
- int expected;
-
- if (!chip)
- return -EBUSY;
-
- if (count < TPM_HEADER_SIZE) {
- size = -EIO;
- goto out;
- }
-
- size = recv_data(chip, buf, TPM_HEADER_SIZE);
- if (size < TPM_HEADER_SIZE) {
- dev_err(chip->pdev, "Unable to read header\n");
- goto out;
- }
-
- expected = be32_to_cpu(*(__be32 *)(buf + 2));
- if (expected > count) {
- size = -EIO;
- goto out;
- }
-
- size += recv_data(chip, &buf[TPM_HEADER_SIZE],
- expected - TPM_HEADER_SIZE);
- if (size < expected) {
- dev_err(chip->pdev, "Unable to read remainder of result\n");
- size = -ETIME;
- goto out;
- }
-
-out:
- chip->ops->cancel(chip);
- release_locality(chip);
- return size;
-}
-
-static bool tpm_stm_i2c_req_canceled(struct tpm_chip *chip, u8 status)
-{
- return (status == TPM_STS_COMMAND_READY);
-}
-
-static const struct tpm_class_ops st_i2c_tpm = {
- .send = tpm_stm_i2c_send,
- .recv = tpm_stm_i2c_recv,
- .cancel = tpm_stm_i2c_cancel,
- .status = tpm_stm_i2c_status,
- .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- .req_canceled = tpm_stm_i2c_req_canceled,
-};
-
-#ifdef CONFIG_OF
-static int tpm_stm_i2c_of_request_resources(struct tpm_chip *chip)
-{
- struct device_node *pp;
- struct tpm_stm_dev *tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
- struct i2c_client *client = tpm_dev->client;
- int gpio;
- int ret;
-
- pp = client->dev.of_node;
- if (!pp) {
- dev_err(chip->pdev, "No platform data\n");
- return -ENODEV;
- }
-
- /* Get GPIO from device tree */
- gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0);
- if (gpio < 0) {
- dev_err(chip->pdev, "Failed to retrieve lpcpd-gpios from dts.\n");
- tpm_dev->io_lpcpd = -1;
- /*
- * lpcpd pin is not specified. This is not an issue as
- * power management can be also managed by TPM specific
- * commands. So leave with a success status code.
- */
- return 0;
- }
- /* GPIO request and configuration */
- ret = devm_gpio_request_one(&client->dev, gpio,
- GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD");
- if (ret) {
- dev_err(chip->pdev, "Failed to request lpcpd pin\n");
- return -ENODEV;
- }
- tpm_dev->io_lpcpd = gpio;
-
- return 0;
-}
-#else
-static int tpm_stm_i2c_of_request_resources(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-
-static int tpm_stm_i2c_request_resources(struct i2c_client *client,
- struct tpm_chip *chip)
-{
- struct st33zp24_platform_data *pdata;
- struct tpm_stm_dev *tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip);
- int ret;
-
- pdata = client->dev.platform_data;
- if (!pdata) {
- dev_err(chip->pdev, "No platform data\n");
- return -ENODEV;
- }
-
- /* store for late use */
- tpm_dev->io_lpcpd = pdata->io_lpcpd;
-
- if (gpio_is_valid(pdata->io_lpcpd)) {
- ret = devm_gpio_request_one(&client->dev,
- pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH,
- "TPM IO_LPCPD");
- if (ret) {
- dev_err(chip->pdev, "%s : reset gpio_request failed\n",
- __FILE__);
- return ret;
- }
- }
-
- return 0;
-}
-
-/*
- * tpm_stm_i2c_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
- * @param: id, the i2c_device_id struct.
- * @return: 0 in case of success.
- * -1 in other case.
- */
-static int
-tpm_stm_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
-{
- int ret;
- u8 intmask = 0;
- struct tpm_chip *chip;
- struct st33zp24_platform_data *platform_data;
- struct tpm_stm_dev *tpm_dev;
-
- if (!client) {
- pr_info("%s: i2c client is NULL. Device not accessible.\n",
- __func__);
- return -ENODEV;
- }
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_info(&client->dev, "client not i2c capable\n");
- return -ENODEV;
- }
-
- tpm_dev = devm_kzalloc(&client->dev, sizeof(struct tpm_stm_dev),
- GFP_KERNEL);
- if (!tpm_dev)
- return -ENOMEM;
-
- chip = tpmm_chip_alloc(&client->dev, &st_i2c_tpm);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
-
- TPM_VPRIV(chip) = tpm_dev;
- tpm_dev->client = client;
-
- platform_data = client->dev.platform_data;
- if (!platform_data && client->dev.of_node) {
- ret = tpm_stm_i2c_of_request_resources(chip);
- if (ret)
- goto _tpm_clean_answer;
- } else if (platform_data) {
- ret = tpm_stm_i2c_request_resources(client, chip);
- if (ret)
- goto _tpm_clean_answer;
- }
-
- chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
- chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
-
- chip->vendor.locality = LOCALITY0;
-
- if (client->irq) {
- /* INTERRUPT Setup */
- init_waitqueue_head(&chip->vendor.read_queue);
- tpm_dev->intrs = 0;
-
- if (request_locality(chip) != LOCALITY0) {
- ret = -ENODEV;
- goto _tpm_clean_answer;
- }
-
- clear_interruption(tpm_dev);
- ret = devm_request_irq(&client->dev, client->irq,
- tpm_ioserirq_handler,
- IRQF_TRIGGER_HIGH,
- "TPM SERIRQ management", chip);
- if (ret < 0) {
- dev_err(chip->pdev, "TPM SERIRQ signals %d not available\n",
- client->irq);
- goto _tpm_clean_answer;
- }
-
- intmask |= TPM_INTF_CMD_READY_INT
- | TPM_INTF_STS_VALID_INT
- | TPM_INTF_DATA_AVAIL_INT;
-
- ret = I2C_WRITE_DATA(tpm_dev, TPM_INT_ENABLE, &intmask, 1);
- if (ret < 0)
- goto _tpm_clean_answer;
-
- intmask = TPM_GLOBAL_INT_ENABLE;
- ret = I2C_WRITE_DATA(tpm_dev, (TPM_INT_ENABLE + 3),
- &intmask, 1);
- if (ret < 0)
- goto _tpm_clean_answer;
-
- chip->vendor.irq = client->irq;
-
- disable_irq_nosync(chip->vendor.irq);
-
- tpm_gen_interrupt(chip);
- }
-
- tpm_get_timeouts(chip);
- tpm_do_selftest(chip);
-
- return tpm_chip_register(chip);
-_tpm_clean_answer:
- dev_info(chip->pdev, "TPM I2C initialisation fail\n");
- return ret;
-}
-
-/*
- * tpm_stm_i2c_remove remove the TPM device
- * @param: client, the i2c_client description (TPM I2C description).
- * @return: 0 in case of success.
- */
-static int tpm_stm_i2c_remove(struct i2c_client *client)
-{
- struct tpm_chip *chip =
- (struct tpm_chip *) i2c_get_clientdata(client);
-
- if (chip)
- tpm_chip_unregister(chip);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-/*
- * tpm_stm_i2c_pm_suspend suspend the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
- * @param: mesg, the power management message.
- * @return: 0 in case of success.
- */
-static int tpm_stm_i2c_pm_suspend(struct device *dev)
-{
- struct st33zp24_platform_data *pin_infos = dev->platform_data;
- int ret = 0;
-
- if (gpio_is_valid(pin_infos->io_lpcpd))
- gpio_set_value(pin_infos->io_lpcpd, 0);
- else
- ret = tpm_pm_suspend(dev);
-
- return ret;
-} /* tpm_stm_i2c_suspend() */
-
-/*
- * tpm_stm_i2c_pm_resume resume the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
- * @return: 0 in case of success.
- */
-static int tpm_stm_i2c_pm_resume(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct st33zp24_platform_data *pin_infos = dev->platform_data;
-
- int ret = 0;
-
- if (gpio_is_valid(pin_infos->io_lpcpd)) {
- gpio_set_value(pin_infos->io_lpcpd, 1);
- ret = wait_for_stat(chip,
- TPM_STS_VALID, chip->vendor.timeout_b,
- &chip->vendor.read_queue, false);
- } else {
- ret = tpm_pm_resume(dev);
- if (!ret)
- tpm_do_selftest(chip);
- }
- return ret;
-} /* tpm_stm_i2c_pm_resume() */
-#endif
-
-static const struct i2c_device_id tpm_stm_i2c_id[] = {
- {TPM_ST33_I2C, 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, tpm_stm_i2c_id);
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_st33zp24_i2c_match[] = {
- { .compatible = "st,st33zp24-i2c", },
- {}
-};
-MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match);
-#endif
-
-static SIMPLE_DEV_PM_OPS(tpm_stm_i2c_ops, tpm_stm_i2c_pm_suspend,
- tpm_stm_i2c_pm_resume);
-
-static struct i2c_driver tpm_stm_i2c_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = TPM_ST33_I2C,
- .pm = &tpm_stm_i2c_ops,
- .of_match_table = of_match_ptr(of_st33zp24_i2c_match),
- },
- .probe = tpm_stm_i2c_probe,
- .remove = tpm_stm_i2c_remove,
- .id_table = tpm_stm_i2c_id
-};
-
-module_i2c_driver(tpm_stm_i2c_driver);
-
-MODULE_AUTHOR("Christophe Ricard (tpmsupport@st.com)");
-MODULE_DESCRIPTION("STM TPM I2C ST33 Driver");
-MODULE_VERSION("1.2.1");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 6d492132ad2b..6c488e635fdd 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -591,27 +591,8 @@ static void tpm_inf_pnp_remove(struct pnp_dev *dev)
}
}
-static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
-{
- struct tpm_chip *chip = pnp_get_drvdata(dev);
- int rc;
- if (chip) {
- u8 savestate[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 10, /* blob length (in bytes) */
- 0, 0, 0, 152 /* TPM_ORD_SaveState */
- };
- dev_info(&dev->dev, "saving TPM state\n");
- rc = tpm_inf_send(chip, savestate, sizeof(savestate));
- if (rc < 0) {
- dev_err(&dev->dev, "error while saving TPM state\n");
- return rc;
- }
- }
- return 0;
-}
-
-static int tpm_inf_pnp_resume(struct pnp_dev *dev)
+#ifdef CONFIG_PM_SLEEP
+static int tpm_inf_resume(struct device *dev)
{
/* Re-configure TPM after suspending */
tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
@@ -625,30 +606,22 @@ static int tpm_inf_pnp_resume(struct pnp_dev *dev)
tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
/* disable RESET, LP and IRQC */
tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
- return tpm_pm_resume(&dev->dev);
+ return tpm_pm_resume(dev);
}
+#endif
+static SIMPLE_DEV_PM_OPS(tpm_inf_pm, tpm_pm_suspend, tpm_inf_resume);
static struct pnp_driver tpm_inf_pnp_driver = {
.name = "tpm_inf_pnp",
.id_table = tpm_inf_pnp_tbl,
.probe = tpm_inf_pnp_probe,
- .suspend = tpm_inf_pnp_suspend,
- .resume = tpm_inf_pnp_resume,
- .remove = tpm_inf_pnp_remove
+ .remove = tpm_inf_pnp_remove,
+ .driver = {
+ .pm = &tpm_inf_pm,
+ }
};
-static int __init init_inf(void)
-{
- return pnp_register_driver(&tpm_inf_pnp_driver);
-}
-
-static void __exit cleanup_inf(void)
-{
- pnp_unregister_driver(&tpm_inf_pnp_driver);
-}
-
-module_init(init_inf);
-module_exit(cleanup_inf);
+module_pnp_driver(tpm_inf_pnp_driver);
MODULE_AUTHOR("Marcel Selhorst <tpmdd@sirrix.com>");
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index c3b4f5a5ac10..3111f2778079 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -193,6 +193,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
struct xenbus_transaction xbt;
const char *message = NULL;
int rv;
+ grant_ref_t gref;
priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!priv->shr) {
@@ -200,11 +201,11 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
return -ENOMEM;
}
- rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr));
+ rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref);
if (rv < 0)
return rv;
- priv->ring_ref = rv;
+ priv->ring_ref = gref;
rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
if (rv)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 72d7028f779b..50754d203310 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -355,7 +355,7 @@ static inline bool use_multiport(struct ports_device *portdev)
* early_init
*/
if (!portdev->vdev)
- return 0;
+ return false;
return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT);
}
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index b827fa095f1b..77d6c127e691 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1237,6 +1237,8 @@ static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
unsigned char *tail;
int i;
+ howmany = 0;
+
end_offset_plus1 = bufpos >>
channel->log2_element_size;
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 2002a3a28146..781865084dc1 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillybus_of";
/* Match table for of_platform binding */
-static struct of_device_id xillybus_of_match[] = {
+static const struct of_device_id xillybus_of_match[] = {
{ .compatible = "xillybus,xillybus-1.00.a", },
{ .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */
{}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 0b474a04730f..9897f353bf1a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -130,6 +130,13 @@ config COMMON_CLK_PALMAS
This driver supports TI Palmas devices 32KHz output KG and KG_AUDIO
using common clock framework.
+config COMMON_CLK_PWM
+ tristate "Clock driver for PWMs used as clock outputs"
+ depends on PWM
+ ---help---
+ Adapter driver so that any PWM output can be (mis)used as clock signal
+ at 50% duty cycle.
+
config COMMON_CLK_PXA
def_bool COMMON_CLK && ARCH_PXA
---help---
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index d478ceb69c5f..3d00c25382c5 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
obj-$(CONFIG_COMMON_CLK_MAX_GEN) += clk-max-gen.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_MAX77802) += clk-max77802.o
+obj-$(CONFIG_ARCH_MB86S7X) += clk-mb86s7x.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
@@ -42,6 +43,7 @@ obj-$(CONFIG_ARCH_U300) += clk-u300.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
+obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
@@ -54,6 +56,7 @@ obj-$(CONFIG_ARCH_MMP) += mmp/
endif
obj-$(CONFIG_PLAT_ORION) += mvebu/
obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_MACH_PISTACHIO) += pistachio/
obj-$(CONFIG_COMMON_CLK_PXA) += pxa/
obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index a23ac0c724f0..0b7c3e8840ba 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -56,22 +56,55 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
}
-static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_hw)
{
- unsigned long div;
+ struct clk *parent = NULL;
+ long best_rate = -EINVAL;
+ unsigned long tmp_rate;
+ int best_diff = -1;
+ int tmp_diff;
+ int i;
- if (!rate)
- return -EINVAL;
+ for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+ int div;
- if (rate >= *parent_rate)
- return *parent_rate;
+ parent = clk_get_parent_by_index(hw->clk, i);
+ if (!parent)
+ continue;
+
+ for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
+ unsigned long tmp_parent_rate;
+
+ tmp_parent_rate = rate * div;
+ tmp_parent_rate = __clk_round_rate(parent,
+ tmp_parent_rate);
+ tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
+ if (tmp_rate < rate)
+ tmp_diff = rate - tmp_rate;
+ else
+ tmp_diff = tmp_rate - rate;
+
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ best_rate = tmp_rate;
+ best_diff = tmp_diff;
+ *best_parent_rate = tmp_parent_rate;
+ *best_parent_hw = __clk_get_hw(parent);
+ }
+
+ if (!best_diff || tmp_rate < rate)
+ break;
+ }
- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
- if (div > SAM9X5_USB_MAX_DIV + 1)
- div = SAM9X5_USB_MAX_DIV + 1;
+ if (!best_diff)
+ break;
+ }
- return DIV_ROUND_CLOSEST(*parent_rate, div);
+ return best_rate;
}
static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -121,7 +154,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops at91sam9x5_usb_ops = {
.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
- .round_rate = at91sam9x5_clk_usb_round_rate,
+ .determine_rate = at91sam9x5_clk_usb_determine_rate,
.get_parent = at91sam9x5_clk_usb_get_parent,
.set_parent = at91sam9x5_clk_usb_set_parent,
.set_rate = at91sam9x5_clk_usb_set_rate,
@@ -159,7 +192,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
.disable = at91sam9n12_clk_usb_disable,
.is_enabled = at91sam9n12_clk_usb_is_enabled,
.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
- .round_rate = at91sam9x5_clk_usb_round_rate,
+ .determine_rate = at91sam9x5_clk_usb_determine_rate,
.set_rate = at91sam9x5_clk_usb_set_rate,
};
@@ -179,7 +212,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
init.ops = &at91sam9x5_usb_ops;
init.parent_names = parent_names;
init.num_parents = num_parents;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
usb->hw.init = &init;
usb->pmc = pmc;
@@ -207,7 +241,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
init.ops = &at91sam9n12_usb_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.flags = CLK_SET_RATE_GATE;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
usb->hw.init = &init;
usb->pmc = pmc;
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 05abae89262e..a0ef4f75d457 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -15,6 +15,7 @@
#include "clk-kona.h"
#include <linux/delay.h>
+#include <linux/kernel.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
@@ -51,21 +52,6 @@ static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
/* Divider and scaling helpers */
-/*
- * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values
- * unsigned. Note that unlike do_div(), the remainder is discarded
- * and the return value is the quotient (not the remainder).
- */
-u64 do_div_round_closest(u64 dividend, unsigned long divisor)
-{
- u64 result;
-
- result = dividend + ((u64)divisor >> 1);
- (void)do_div(result, divisor);
-
- return result;
-}
-
/* Convert a divider into the scaled divisor value it represents. */
static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
{
@@ -87,7 +73,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
combined = (u64)div_value * BILLION + billionths;
combined <<= div->u.s.frac_width;
- return do_div_round_closest(combined, BILLION);
+ return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
}
/* The scaled minimum divisor representable by a divider */
@@ -731,7 +717,7 @@ static unsigned long clk_recalc_rate(struct ccu_data *ccu,
scaled_rate = scale_rate(pre_div, parent_rate);
scaled_rate = scale_rate(div, scaled_rate);
scaled_div = divider_read_scaled(ccu, pre_div);
- scaled_parent_rate = do_div_round_closest(scaled_rate,
+ scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
scaled_div);
} else {
scaled_parent_rate = scale_rate(div, parent_rate);
@@ -743,7 +729,7 @@ static unsigned long clk_recalc_rate(struct ccu_data *ccu,
* rate.
*/
scaled_div = divider_read_scaled(ccu, div);
- result = do_div_round_closest(scaled_parent_rate, scaled_div);
+ result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div);
return (unsigned long)result;
}
@@ -790,7 +776,7 @@ static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
scaled_rate = scale_rate(pre_div, parent_rate);
scaled_rate = scale_rate(div, scaled_rate);
scaled_pre_div = divider_read_scaled(ccu, pre_div);
- scaled_parent_rate = do_div_round_closest(scaled_rate,
+ scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
scaled_pre_div);
} else {
scaled_parent_rate = scale_rate(div, parent_rate);
@@ -802,7 +788,7 @@ static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
* the best we can do.
*/
if (!divider_is_fixed(div)) {
- best_scaled_div = do_div_round_closest(scaled_parent_rate,
+ best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate,
rate);
min_scaled_div = scaled_div_min(div);
max_scaled_div = scaled_div_max(div);
@@ -815,7 +801,7 @@ static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
}
/* OK, figure out the resulting rate */
- result = do_div_round_closest(scaled_parent_rate, best_scaled_div);
+ result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div);
if (scaled_div)
*scaled_div = best_scaled_div;
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index 2537b3072910..6849a64baf6d 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -503,7 +503,6 @@ extern struct clk_ops kona_peri_clk_ops;
/* Externally visible functions */
-extern u64 do_div_round_closest(u64 dividend, unsigned long divisor);
extern u64 scaled_div_max(struct bcm_clk_div *div);
extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
u32 billionths);
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index c386ad25beb4..b8e4f8a822e9 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -58,7 +58,7 @@
#define CDCE706_CLKOUT_DIVIDER_MASK 0x7
#define CDCE706_CLKOUT_ENABLE_MASK 0x8
-static struct regmap_config cdce706_regmap_config = {
+static const struct regmap_config cdce706_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.val_format_endian = REGMAP_ENDIAN_NATIVE,
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index aad4796aa3ed..48a65b2b4027 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -13,7 +13,6 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/printk.h>
-#include "clk.h"
static int __set_clk_parents(struct device_node *node, bool clk_supplier)
{
@@ -39,7 +38,7 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
}
if (clkspec.np == node && !clk_supplier)
return 0;
- pclk = of_clk_get_by_clkspec(&clkspec);
+ pclk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(pclk)) {
pr_warn("clk: couldn't get parent clock %d for %s\n",
index, node->full_name);
@@ -54,7 +53,7 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
rc = 0;
goto err;
}
- clk = of_clk_get_by_clkspec(&clkspec);
+ clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
pr_warn("clk: couldn't get parent clock %d for %s\n",
index, node->full_name);
@@ -98,7 +97,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
if (clkspec.np == node && !clk_supplier)
return 0;
- clk = of_clk_get_by_clkspec(&clkspec);
+ clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
pr_warn("clk: couldn't get clock %d for %s\n",
index, node->full_name);
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 82a59d0086cc..6aa72d9d79ba 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -36,6 +36,9 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
m = (val & fd->mmask) >> fd->mshift;
n = (val & fd->nmask) >> fd->nshift;
+ if (!n || !m)
+ return parent_rate;
+
ret = (u64)parent_rate * m;
do_div(ret, n);
diff --git a/drivers/clk/clk-gpio-gate.c b/drivers/clk/clk-gpio-gate.c
index 08e43224fd52..a71cabedda93 100644
--- a/drivers/clk/clk-gpio-gate.c
+++ b/drivers/clk/clk-gpio-gate.c
@@ -65,10 +65,12 @@ EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_name: name of this clock's parent
- * @gpiod: gpio descriptor to gate this clock
+ * @gpio: gpio number to gate this clock
+ * @active_low: true if gpio should be set to 0 to enable clock
+ * @flags: clock flags
*/
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, struct gpio_desc *gpiod,
+ const char *parent_name, unsigned gpio, bool active_low,
unsigned long flags)
{
struct clk_gpio *clk_gpio = NULL;
@@ -77,20 +79,19 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
unsigned long gpio_flags;
int err;
- if (gpiod_is_active_low(gpiod))
- gpio_flags = GPIOF_OUT_INIT_HIGH;
+ if (active_low)
+ gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH;
else
gpio_flags = GPIOF_OUT_INIT_LOW;
if (dev)
- err = devm_gpio_request_one(dev, desc_to_gpio(gpiod),
- gpio_flags, name);
+ err = devm_gpio_request_one(dev, gpio, gpio_flags, name);
else
- err = gpio_request_one(desc_to_gpio(gpiod), gpio_flags, name);
+ err = gpio_request_one(gpio, gpio_flags, name);
if (err) {
pr_err("%s: %s: Error requesting clock control gpio %u\n",
- __func__, name, desc_to_gpio(gpiod));
+ __func__, name, gpio);
return ERR_PTR(err);
}
@@ -111,7 +112,7 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
- clk_gpio->gpiod = gpiod;
+ clk_gpio->gpiod = gpio_to_desc(gpio);
clk_gpio->hw.init = &init;
clk = clk_register(dev, &clk_gpio->hw);
@@ -123,7 +124,8 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
kfree(clk_gpio);
clk_register_gpio_gate_err:
- gpiod_put(gpiod);
+ if (!dev)
+ gpio_free(gpio);
return clk;
}
@@ -149,8 +151,8 @@ static struct clk *of_clk_gpio_gate_delayed_register_get(
struct clk *clk;
const char *clk_name = data->node->name;
const char *parent_name;
- struct gpio_desc *gpiod;
int gpio;
+ enum of_gpio_flags of_flags;
mutex_lock(&data->lock);
@@ -159,7 +161,8 @@ static struct clk *of_clk_gpio_gate_delayed_register_get(
return data->clk;
}
- gpio = of_get_named_gpio_flags(data->node, "enable-gpios", 0, NULL);
+ gpio = of_get_named_gpio_flags(data->node, "enable-gpios", 0,
+ &of_flags);
if (gpio < 0) {
mutex_unlock(&data->lock);
if (gpio != -EPROBE_DEFER)
@@ -167,11 +170,11 @@ static struct clk *of_clk_gpio_gate_delayed_register_get(
__func__, clk_name);
return ERR_PTR(gpio);
}
- gpiod = gpio_to_desc(gpio);
parent_name = of_clk_get_parent_name(data->node, 0);
- clk = clk_register_gpio_gate(NULL, clk_name, parent_name, gpiod, 0);
+ clk = clk_register_gpio_gate(NULL, clk_name, parent_name, gpio,
+ of_flags & OF_GPIO_ACTIVE_LOW, 0);
if (IS_ERR(clk)) {
mutex_unlock(&data->lock);
return clk;
diff --git a/drivers/clk/clk-mb86s7x.c b/drivers/clk/clk-mb86s7x.c
new file mode 100644
index 000000000000..f39c25a22f43
--- /dev/null
+++ b/drivers/clk/clk-mb86s7x.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2013-2015 FUJITSU SEMICONDUCTOR LIMITED
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/topology.h>
+#include <linux/mailbox_client.h>
+#include <linux/platform_device.h>
+
+#include <soc/mb86s7x/scb_mhu.h>
+
+#define to_crg_clk(p) container_of(p, struct crg_clk, hw)
+#define to_clc_clk(p) container_of(p, struct cl_clk, hw)
+
+struct mb86s7x_peri_clk {
+ u32 payload_size;
+ u32 cntrlr;
+ u32 domain;
+ u32 port;
+ u32 en;
+ u64 frequency;
+} __packed __aligned(4);
+
+struct hack_rate {
+ unsigned clk_id;
+ unsigned long rate;
+ int gated;
+};
+
+struct crg_clk {
+ struct clk_hw hw;
+ u8 cntrlr, domain, port;
+};
+
+static int crg_gate_control(struct clk_hw *hw, int en)
+{
+ struct crg_clk *crgclk = to_crg_clk(hw);
+ struct mb86s7x_peri_clk cmd;
+ int ret;
+
+ cmd.payload_size = sizeof(cmd);
+ cmd.cntrlr = crgclk->cntrlr;
+ cmd.domain = crgclk->domain;
+ cmd.port = crgclk->port;
+ cmd.en = en;
+
+ /* Port is UngatedCLK */
+ if (cmd.port == 8)
+ return en ? 0 : -EINVAL;
+
+ pr_debug("%s:%d CMD Cntrlr-%u Dom-%u Port-%u En-%u}\n",
+ __func__, __LINE__, cmd.cntrlr,
+ cmd.domain, cmd.port, cmd.en);
+
+ ret = mb86s7x_send_packet(CMD_PERI_CLOCK_GATE_SET_REQ,
+ &cmd, sizeof(cmd));
+ if (ret < 0) {
+ pr_err("%s:%d failed!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ pr_debug("%s:%d REP Cntrlr-%u Dom-%u Port-%u En-%u}\n",
+ __func__, __LINE__, cmd.cntrlr,
+ cmd.domain, cmd.port, cmd.en);
+
+ /* If the request was rejected */
+ if (cmd.en != en)
+ ret = -EINVAL;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int crg_port_prepare(struct clk_hw *hw)
+{
+ return crg_gate_control(hw, 1);
+}
+
+static void crg_port_unprepare(struct clk_hw *hw)
+{
+ crg_gate_control(hw, 0);
+}
+
+static int
+crg_rate_control(struct clk_hw *hw, int set, unsigned long *rate)
+{
+ struct crg_clk *crgclk = to_crg_clk(hw);
+ struct mb86s7x_peri_clk cmd;
+ int code, ret;
+
+ cmd.payload_size = sizeof(cmd);
+ cmd.cntrlr = crgclk->cntrlr;
+ cmd.domain = crgclk->domain;
+ cmd.port = crgclk->port;
+ cmd.frequency = *rate;
+
+ if (set) {
+ code = CMD_PERI_CLOCK_RATE_SET_REQ;
+ pr_debug("%s:%d CMD Cntrlr-%u Dom-%u Port-%u Rate-SET %lluHz}\n",
+ __func__, __LINE__, cmd.cntrlr,
+ cmd.domain, cmd.port, cmd.frequency);
+ } else {
+ code = CMD_PERI_CLOCK_RATE_GET_REQ;
+ pr_debug("%s:%d CMD Cntrlr-%u Dom-%u Port-%u Rate-GET}\n",
+ __func__, __LINE__, cmd.cntrlr,
+ cmd.domain, cmd.port);
+ }
+
+ ret = mb86s7x_send_packet(code, &cmd, sizeof(cmd));
+ if (ret < 0) {
+ pr_err("%s:%d failed!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ if (set)
+ pr_debug("%s:%d REP Cntrlr-%u Dom-%u Port-%u Rate-SET %lluHz}\n",
+ __func__, __LINE__, cmd.cntrlr,
+ cmd.domain, cmd.port, cmd.frequency);
+ else
+ pr_debug("%s:%d REP Cntrlr-%u Dom-%u Port-%u Rate-GOT %lluHz}\n",
+ __func__, __LINE__, cmd.cntrlr,
+ cmd.domain, cmd.port, cmd.frequency);
+
+ *rate = cmd.frequency;
+ return 0;
+}
+
+static unsigned long
+crg_port_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ unsigned long rate;
+
+ crg_rate_control(hw, 0, &rate);
+
+ return rate;
+}
+
+static long
+crg_port_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *pr)
+{
+ return rate;
+}
+
+static int
+crg_port_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ return crg_rate_control(hw, 1, &rate);
+}
+
+const struct clk_ops crg_port_ops = {
+ .prepare = crg_port_prepare,
+ .unprepare = crg_port_unprepare,
+ .recalc_rate = crg_port_recalc_rate,
+ .round_rate = crg_port_round_rate,
+ .set_rate = crg_port_set_rate,
+};
+
+struct mb86s70_crg11 {
+ struct mutex lock; /* protects CLK populating and searching */
+};
+
+static struct clk *crg11_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct mb86s70_crg11 *crg11 = data;
+ struct clk_init_data init;
+ u32 cntrlr, domain, port;
+ struct crg_clk *crgclk;
+ struct clk *clk;
+ char clkp[20];
+
+ if (clkspec->args_count != 3)
+ return ERR_PTR(-EINVAL);
+
+ cntrlr = clkspec->args[0];
+ domain = clkspec->args[1];
+ port = clkspec->args[2];
+
+ if (port > 7)
+ snprintf(clkp, 20, "UngatedCLK%d_%X", cntrlr, domain);
+ else
+ snprintf(clkp, 20, "CLK%d_%X_%d", cntrlr, domain, port);
+
+ mutex_lock(&crg11->lock);
+
+ clk = __clk_lookup(clkp);
+ if (clk) {
+ mutex_unlock(&crg11->lock);
+ return clk;
+ }
+
+ crgclk = kzalloc(sizeof(*crgclk), GFP_KERNEL);
+ if (!crgclk) {
+ mutex_unlock(&crg11->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = clkp;
+ init.num_parents = 0;
+ init.ops = &crg_port_ops;
+ init.flags = CLK_IS_ROOT;
+ crgclk->hw.init = &init;
+ crgclk->cntrlr = cntrlr;
+ crgclk->domain = domain;
+ crgclk->port = port;
+ clk = clk_register(NULL, &crgclk->hw);
+ if (IS_ERR(clk))
+ pr_err("%s:%d Error!\n", __func__, __LINE__);
+ else
+ pr_debug("Registered %s\n", clkp);
+
+ clk_register_clkdev(clk, clkp, NULL);
+ mutex_unlock(&crg11->lock);
+ return clk;
+}
+
+static void __init crg_port_init(struct device_node *node)
+{
+ struct mb86s70_crg11 *crg11;
+
+ crg11 = kzalloc(sizeof(*crg11), GFP_KERNEL);
+ if (!crg11)
+ return;
+
+ mutex_init(&crg11->lock);
+
+ of_clk_add_provider(node, crg11_get, crg11);
+}
+CLK_OF_DECLARE(crg11_gate, "fujitsu,mb86s70-crg11", crg_port_init);
+
+struct cl_clk {
+ struct clk_hw hw;
+ int cluster;
+};
+
+struct mb86s7x_cpu_freq {
+ u32 payload_size;
+ u32 cluster_class;
+ u32 cluster_id;
+ u32 cpu_id;
+ u64 frequency;
+};
+
+static void mhu_cluster_rate(struct clk_hw *hw, unsigned long *rate, int get)
+{
+ struct cl_clk *clc = to_clc_clk(hw);
+ struct mb86s7x_cpu_freq cmd;
+ int code, ret;
+
+ cmd.payload_size = sizeof(cmd);
+ cmd.cluster_class = 0;
+ cmd.cluster_id = clc->cluster;
+ cmd.cpu_id = 0;
+ cmd.frequency = *rate;
+
+ if (get)
+ code = CMD_CPU_CLOCK_RATE_GET_REQ;
+ else
+ code = CMD_CPU_CLOCK_RATE_SET_REQ;
+
+ pr_debug("%s:%d CMD Cl_Class-%u CL_ID-%u CPU_ID-%u Freq-%llu}\n",
+ __func__, __LINE__, cmd.cluster_class,
+ cmd.cluster_id, cmd.cpu_id, cmd.frequency);
+
+ ret = mb86s7x_send_packet(code, &cmd, sizeof(cmd));
+ if (ret < 0) {
+ pr_err("%s:%d failed!\n", __func__, __LINE__);
+ return;
+ }
+
+ pr_debug("%s:%d REP Cl_Class-%u CL_ID-%u CPU_ID-%u Freq-%llu}\n",
+ __func__, __LINE__, cmd.cluster_class,
+ cmd.cluster_id, cmd.cpu_id, cmd.frequency);
+
+ *rate = cmd.frequency;
+}
+
+static unsigned long
+clc_recalc_rate(struct clk_hw *hw, unsigned long unused)
+{
+ unsigned long rate;
+
+ mhu_cluster_rate(hw, &rate, 1);
+ return rate;
+}
+
+static long
+clc_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *unused)
+{
+ return rate;
+}
+
+static int
+clc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long unused)
+{
+ unsigned long res = rate;
+
+ mhu_cluster_rate(hw, &res, 0);
+
+ return (res == rate) ? 0 : -EINVAL;
+}
+
+static struct clk_ops clk_clc_ops = {
+ .recalc_rate = clc_recalc_rate,
+ .round_rate = clc_round_rate,
+ .set_rate = clc_set_rate,
+};
+
+struct clk *mb86s7x_clclk_register(struct device *cpu_dev)
+{
+ struct clk_init_data init;
+ struct cl_clk *clc;
+
+ clc = kzalloc(sizeof(*clc), GFP_KERNEL);
+ if (!clc)
+ return ERR_PTR(-ENOMEM);
+
+ clc->hw.init = &init;
+ clc->cluster = topology_physical_package_id(cpu_dev->id);
+
+ init.name = dev_name(cpu_dev);
+ init.ops = &clk_clc_ops;
+ init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+
+ return devm_clk_register(cpu_dev, &clc->hw);
+}
+
+static int mb86s7x_clclk_of_init(void)
+{
+ int cpu, ret = -ENODEV;
+ struct device_node *np;
+ struct clk *clk;
+
+ np = of_find_compatible_node(NULL, NULL, "fujitsu,mb86s70-scb-1.0");
+ if (!np || !of_device_is_available(np))
+ goto exit;
+
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", cpu);
+ continue;
+ }
+
+ clk = mb86s7x_clclk_register(cpu_dev);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register cpu%d clock\n", cpu);
+ continue;
+ }
+ if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
+ pr_err("failed to register cpu%d clock lookup\n", cpu);
+ continue;
+ }
+ pr_debug("registered clk for %s\n", dev_name(cpu_dev));
+ }
+ ret = 0;
+
+ platform_device_register_simple("arm-bL-cpufreq-dt", -1, NULL, 0);
+exit:
+ of_node_put(np);
+ return ret;
+}
+module_init(mb86s7x_clclk_of_init);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 8d459923a15f..45a535ab48aa 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -161,7 +161,7 @@ static struct palmas_clks_of_match_data palmas_of_clk32kgaudio = {
},
};
-static struct of_device_id palmas_clks_of_match[] = {
+static const struct of_device_id palmas_clks_of_match[] = {
{
.compatible = "ti,palmas-clk32kg",
.data = &palmas_of_clk32kg,
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
new file mode 100644
index 000000000000..328fcfcefd8c
--- /dev/null
+++ b/drivers/clk/clk-pwm.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * PWM (mis)used as clock output
+ */
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+struct clk_pwm {
+ struct clk_hw hw;
+ struct pwm_device *pwm;
+ u32 fixed_rate;
+};
+
+static inline struct clk_pwm *to_clk_pwm(struct clk_hw *hw)
+{
+ return container_of(hw, struct clk_pwm, hw);
+}
+
+static int clk_pwm_prepare(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+
+ return pwm_enable(clk_pwm->pwm);
+}
+
+static void clk_pwm_unprepare(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+
+ pwm_disable(clk_pwm->pwm);
+}
+
+static unsigned long clk_pwm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+
+ return clk_pwm->fixed_rate;
+}
+
+static const struct clk_ops clk_pwm_ops = {
+ .prepare = clk_pwm_prepare,
+ .unprepare = clk_pwm_unprepare,
+ .recalc_rate = clk_pwm_recalc_rate,
+};
+
+static int clk_pwm_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_init_data init;
+ struct clk_pwm *clk_pwm;
+ struct pwm_device *pwm;
+ const char *clk_name;
+ struct clk *clk;
+ int ret;
+
+ clk_pwm = devm_kzalloc(&pdev->dev, sizeof(*clk_pwm), GFP_KERNEL);
+ if (!clk_pwm)
+ return -ENOMEM;
+
+ pwm = devm_pwm_get(&pdev->dev, NULL);
+ if (IS_ERR(pwm))
+ return PTR_ERR(pwm);
+
+ if (!pwm->period) {
+ dev_err(&pdev->dev, "invalid PWM period\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "clock-frequency", &clk_pwm->fixed_rate))
+ clk_pwm->fixed_rate = NSEC_PER_SEC / pwm->period;
+
+ if (pwm->period != NSEC_PER_SEC / clk_pwm->fixed_rate &&
+ pwm->period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) {
+ dev_err(&pdev->dev,
+ "clock-frequency does not match PWM period\n");
+ return -EINVAL;
+ }
+
+ ret = pwm_config(pwm, (pwm->period + 1) >> 1, pwm->period);
+ if (ret < 0)
+ return ret;
+
+ clk_name = node->name;
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ init.name = clk_name;
+ init.ops = &clk_pwm_ops;
+ init.flags = CLK_IS_BASIC | CLK_IS_ROOT;
+ init.num_parents = 0;
+
+ clk_pwm->pwm = pwm;
+ clk_pwm->hw.init = &init;
+ clk = devm_clk_register(&pdev->dev, &clk_pwm->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+
+static int clk_pwm_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id clk_pwm_dt_ids[] = {
+ { .compatible = "pwm-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clk_pwm_dt_ids);
+
+static struct platform_driver clk_pwm_driver = {
+ .probe = clk_pwm_probe,
+ .remove = clk_pwm_remove,
+ .driver = {
+ .name = "pwm-clock",
+ .of_match_table = of_match_ptr(clk_pwm_dt_ids),
+ },
+};
+
+module_platform_driver(clk_pwm_driver);
+
+MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
+MODULE_DESCRIPTION("PWM clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 3b2a66f78755..30335d3b99af 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -68,16 +68,16 @@ struct si5351_driver_data {
struct si5351_hw_data *clkout;
};
-static const char const *si5351_input_names[] = {
+static const char * const si5351_input_names[] = {
"xtal", "clkin"
};
-static const char const *si5351_pll_names[] = {
+static const char * const si5351_pll_names[] = {
"plla", "pllb", "vxco"
};
-static const char const *si5351_msynth_names[] = {
+static const char * const si5351_msynth_names[] = {
"ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7"
};
-static const char const *si5351_clkout_names[] = {
+static const char * const si5351_clkout_names[] = {
"clk0", "clk1", "clk2", "clk3", "clk4", "clk5", "clk6", "clk7"
};
@@ -207,7 +207,7 @@ static bool si5351_regmap_is_writeable(struct device *dev, unsigned int reg)
return true;
}
-static struct regmap_config si5351_regmap_config = {
+static const struct regmap_config si5351_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
@@ -1128,13 +1128,6 @@ static int si5351_dt_parse(struct i2c_client *client,
if (!pdata)
return -ENOMEM;
- pdata->clk_xtal = of_clk_get(np, 0);
- if (!IS_ERR(pdata->clk_xtal))
- clk_put(pdata->clk_xtal);
- pdata->clk_clkin = of_clk_get(np, 1);
- if (!IS_ERR(pdata->clk_clkin))
- clk_put(pdata->clk_clkin);
-
/*
* property silabs,pll-source : <num src>, [<..>]
* allow to selectively set pll source
@@ -1328,8 +1321,22 @@ static int si5351_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, drvdata);
drvdata->client = client;
drvdata->variant = variant;
- drvdata->pxtal = pdata->clk_xtal;
- drvdata->pclkin = pdata->clk_clkin;
+ drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
+ drvdata->pclkin = devm_clk_get(&client->dev, "clkin");
+
+ if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
+ PTR_ERR(drvdata->pclkin) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /*
+ * Check for valid parent clock: VARIANT_A and VARIANT_B need XTAL,
+ * VARIANT_C can have CLKIN instead.
+ */
+ if (IS_ERR(drvdata->pxtal) &&
+ (drvdata->variant != SI5351_VARIANT_C || IS_ERR(drvdata->pclkin))) {
+ dev_err(&client->dev, "missing parent clock\n");
+ return -EINVAL;
+ }
drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config);
if (IS_ERR(drvdata->regmap)) {
@@ -1393,6 +1400,11 @@ static int si5351_i2c_probe(struct i2c_client *client,
}
}
+ if (!IS_ERR(drvdata->pxtal))
+ clk_prepare_enable(drvdata->pxtal);
+ if (!IS_ERR(drvdata->pclkin))
+ clk_prepare_enable(drvdata->pclkin);
+
/* register xtal input clock gate */
memset(&init, 0, sizeof(init));
init.name = si5351_input_names[0];
@@ -1407,7 +1419,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
clk = devm_clk_register(&client->dev, &drvdata->xtal);
if (IS_ERR(clk)) {
dev_err(&client->dev, "unable to register %s\n", init.name);
- return PTR_ERR(clk);
+ ret = PTR_ERR(clk);
+ goto err_clk;
}
/* register clkin input clock gate */
@@ -1425,7 +1438,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
if (IS_ERR(clk)) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
- return PTR_ERR(clk);
+ ret = PTR_ERR(clk);
+ goto err_clk;
}
}
@@ -1447,7 +1461,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw);
if (IS_ERR(clk)) {
dev_err(&client->dev, "unable to register %s\n", init.name);
- return -EINVAL;
+ ret = PTR_ERR(clk);
+ goto err_clk;
}
/* register PLLB or VXCO (Si5351B) */
@@ -1471,7 +1486,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw);
if (IS_ERR(clk)) {
dev_err(&client->dev, "unable to register %s\n", init.name);
- return -EINVAL;
+ ret = PTR_ERR(clk);
+ goto err_clk;
}
/* register clk multisync and clk out divider */
@@ -1492,8 +1508,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL);
if (WARN_ON(!drvdata->msynth || !drvdata->clkout ||
- !drvdata->onecell.clks))
- return -ENOMEM;
+ !drvdata->onecell.clks)) {
+ ret = -ENOMEM;
+ goto err_clk;
+ }
for (n = 0; n < num_clocks; n++) {
drvdata->msynth[n].num = n;
@@ -1511,7 +1529,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
if (IS_ERR(clk)) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
- return -EINVAL;
+ ret = PTR_ERR(clk);
+ goto err_clk;
}
}
@@ -1538,7 +1557,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
if (IS_ERR(clk)) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
- return -EINVAL;
+ ret = PTR_ERR(clk);
+ goto err_clk;
}
drvdata->onecell.clks[n] = clk;
@@ -1557,10 +1577,17 @@ static int si5351_i2c_probe(struct i2c_client *client,
&drvdata->onecell);
if (ret) {
dev_err(&client->dev, "unable to add clk provider\n");
- return ret;
+ goto err_clk;
}
return 0;
+
+err_clk:
+ if (!IS_ERR(drvdata->pxtal))
+ clk_disable_unprepare(drvdata->pxtal);
+ if (!IS_ERR(drvdata->pclkin))
+ clk_disable_unprepare(drvdata->pclkin);
+ return ret;
}
static const struct i2c_device_id si5351_i2c_ids[] = {
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index fc167b3f8919..20a5aec98b1a 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -393,7 +393,7 @@ static bool si570_regmap_is_writeable(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config si570_regmap_config = {
+static const struct regmap_config si570_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 237f23f68bfc..5b0f41868b42 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -77,13 +77,16 @@ struct clk_core {
struct kref ref;
};
+#define CREATE_TRACE_POINTS
+#include <trace/events/clk.h>
+
struct clk {
struct clk_core *core;
const char *dev_id;
const char *con_id;
unsigned long min_rate;
unsigned long max_rate;
- struct hlist_node child_node;
+ struct hlist_node clks_node;
};
/*** locking ***/
@@ -480,6 +483,8 @@ static void clk_unprepare_unused_subtree(struct clk_core *clk)
{
struct clk_core *child;
+ lockdep_assert_held(&prepare_lock);
+
hlist_for_each_entry(child, &clk->children, child_node)
clk_unprepare_unused_subtree(child);
@@ -490,10 +495,12 @@ static void clk_unprepare_unused_subtree(struct clk_core *clk)
return;
if (clk_core_is_prepared(clk)) {
+ trace_clk_unprepare(clk);
if (clk->ops->unprepare_unused)
clk->ops->unprepare_unused(clk->hw);
else if (clk->ops->unprepare)
clk->ops->unprepare(clk->hw);
+ trace_clk_unprepare_complete(clk);
}
}
@@ -503,6 +510,8 @@ static void clk_disable_unused_subtree(struct clk_core *clk)
struct clk_core *child;
unsigned long flags;
+ lockdep_assert_held(&prepare_lock);
+
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
@@ -520,10 +529,12 @@ static void clk_disable_unused_subtree(struct clk_core *clk)
* back to .disable
*/
if (clk_core_is_enabled(clk)) {
+ trace_clk_disable(clk);
if (clk->ops->disable_unused)
clk->ops->disable_unused(clk->hw);
else if (clk->ops->disable)
clk->ops->disable(clk->hw);
+ trace_clk_disable_complete(clk);
}
unlock_out:
@@ -851,10 +862,10 @@ static void clk_core_get_boundaries(struct clk_core *clk,
*min_rate = 0;
*max_rate = ULONG_MAX;
- hlist_for_each_entry(clk_user, &clk->clks, child_node)
+ hlist_for_each_entry(clk_user, &clk->clks, clks_node)
*min_rate = max(*min_rate, clk_user->min_rate);
- hlist_for_each_entry(clk_user, &clk->clks, child_node)
+ hlist_for_each_entry(clk_user, &clk->clks, clks_node)
*max_rate = min(*max_rate, clk_user->max_rate);
}
@@ -903,9 +914,12 @@ static void clk_core_unprepare(struct clk_core *clk)
WARN_ON(clk->enable_count > 0);
+ trace_clk_unprepare(clk);
+
if (clk->ops->unprepare)
clk->ops->unprepare(clk->hw);
+ trace_clk_unprepare_complete(clk);
clk_core_unprepare(clk->parent);
}
@@ -943,12 +957,16 @@ static int clk_core_prepare(struct clk_core *clk)
if (ret)
return ret;
- if (clk->ops->prepare) {
+ trace_clk_prepare(clk);
+
+ if (clk->ops->prepare)
ret = clk->ops->prepare(clk->hw);
- if (ret) {
- clk_core_unprepare(clk->parent);
- return ret;
- }
+
+ trace_clk_prepare_complete(clk);
+
+ if (ret) {
+ clk_core_unprepare(clk->parent);
+ return ret;
}
}
@@ -995,9 +1013,13 @@ static void clk_core_disable(struct clk_core *clk)
if (--clk->enable_count > 0)
return;
+ trace_clk_disable(clk);
+
if (clk->ops->disable)
clk->ops->disable(clk->hw);
+ trace_clk_disable_complete(clk);
+
clk_core_disable(clk->parent);
}
@@ -1050,12 +1072,16 @@ static int clk_core_enable(struct clk_core *clk)
if (ret)
return ret;
- if (clk->ops->enable) {
+ trace_clk_enable(clk);
+
+ if (clk->ops->enable)
ret = clk->ops->enable(clk->hw);
- if (ret) {
- clk_core_disable(clk->parent);
- return ret;
- }
+
+ trace_clk_enable_complete(clk);
+
+ if (ret) {
+ clk_core_disable(clk->parent);
+ return ret;
}
}
@@ -1106,6 +1132,8 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
struct clk_core *parent;
struct clk_hw *parent_hw;
+ lockdep_assert_held(&prepare_lock);
+
if (!clk)
return 0;
@@ -1245,6 +1273,8 @@ static void __clk_recalc_accuracies(struct clk_core *clk)
unsigned long parent_accuracy = 0;
struct clk_core *child;
+ lockdep_assert_held(&prepare_lock);
+
if (clk->parent)
parent_accuracy = clk->parent->accuracy;
@@ -1318,6 +1348,8 @@ static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg)
unsigned long parent_rate = 0;
struct clk_core *child;
+ lockdep_assert_held(&prepare_lock);
+
old_rate = clk->rate;
if (clk->parent)
@@ -1443,8 +1475,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
*/
if (clk->prepare_count) {
clk_core_prepare(parent);
+ flags = clk_enable_lock();
clk_core_enable(parent);
clk_core_enable(clk);
+ clk_enable_unlock(flags);
}
/* update the clk tree topology */
@@ -1459,13 +1493,17 @@ static void __clk_set_parent_after(struct clk_core *core,
struct clk_core *parent,
struct clk_core *old_parent)
{
+ unsigned long flags;
+
/*
* Finish the migration of prepare state and undo the changes done
* for preventing a race with clk_enable().
*/
if (core->prepare_count) {
+ flags = clk_enable_lock();
clk_core_disable(core);
clk_core_disable(old_parent);
+ clk_enable_unlock(flags);
clk_core_unprepare(old_parent);
}
}
@@ -1479,18 +1517,24 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
old_parent = __clk_set_parent_before(clk, parent);
+ trace_clk_set_parent(clk, parent);
+
/* change clock input source */
if (parent && clk->ops->set_parent)
ret = clk->ops->set_parent(clk->hw, p_index);
+ trace_clk_set_parent_complete(clk, parent);
+
if (ret) {
flags = clk_enable_lock();
clk_reparent(clk, old_parent);
clk_enable_unlock(flags);
if (clk->prepare_count) {
+ flags = clk_enable_lock();
clk_core_disable(clk);
clk_core_disable(parent);
+ clk_enable_unlock(flags);
clk_core_unprepare(parent);
}
return ret;
@@ -1524,6 +1568,8 @@ static int __clk_speculate_rates(struct clk_core *clk,
unsigned long new_rate;
int ret = NOTIFY_DONE;
+ lockdep_assert_held(&prepare_lock);
+
new_rate = clk_recalc(clk, parent_rate);
/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
@@ -1580,6 +1626,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
unsigned long min_rate;
unsigned long max_rate;
int p_index = 0;
+ long ret;
/* sanity */
if (IS_ERR_OR_NULL(clk))
@@ -1595,15 +1642,23 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
/* find the closest rate and parent clk/rate */
if (clk->ops->determine_rate) {
parent_hw = parent ? parent->hw : NULL;
- new_rate = clk->ops->determine_rate(clk->hw, rate,
- min_rate,
- max_rate,
- &best_parent_rate,
- &parent_hw);
+ ret = clk->ops->determine_rate(clk->hw, rate,
+ min_rate,
+ max_rate,
+ &best_parent_rate,
+ &parent_hw);
+ if (ret < 0)
+ return NULL;
+
+ new_rate = ret;
parent = parent_hw ? parent_hw->core : NULL;
} else if (clk->ops->round_rate) {
- new_rate = clk->ops->round_rate(clk->hw, rate,
- &best_parent_rate);
+ ret = clk->ops->round_rate(clk->hw, rate,
+ &best_parent_rate);
+ if (ret < 0)
+ return NULL;
+
+ new_rate = ret;
if (new_rate < min_rate || new_rate > max_rate)
return NULL;
} else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
@@ -1706,6 +1761,7 @@ static void clk_change_rate(struct clk_core *clk)
if (clk->new_parent && clk->new_parent != clk->parent) {
old_parent = __clk_set_parent_before(clk, clk->new_parent);
+ trace_clk_set_parent(clk, clk->new_parent);
if (clk->ops->set_rate_and_parent) {
skip_set_rate = true;
@@ -1716,12 +1772,17 @@ static void clk_change_rate(struct clk_core *clk)
clk->ops->set_parent(clk->hw, clk->new_parent_index);
}
+ trace_clk_set_parent_complete(clk, clk->new_parent);
__clk_set_parent_after(clk, clk->new_parent, old_parent);
}
+ trace_clk_set_rate(clk, clk->new_rate);
+
if (!skip_set_rate && clk->ops->set_rate)
clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
+ trace_clk_set_rate_complete(clk, clk->new_rate);
+
clk->rate = clk_recalc(clk, best_parent_rate);
if (clk->notifier_count && old_rate != clk->rate)
@@ -2010,16 +2071,18 @@ static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent)
if (!clk)
return 0;
- /* verify ops for for multi-parent clks */
- if ((clk->num_parents > 1) && (!clk->ops->set_parent))
- return -ENOSYS;
-
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
if (clk->parent == parent)
goto out;
+ /* verify ops for for multi-parent clks */
+ if ((clk->num_parents > 1) && (!clk->ops->set_parent)) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
/* check that we are allowed to re-parent if the clock is in use */
if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
ret = -EBUSY;
@@ -2110,10 +2173,10 @@ EXPORT_SYMBOL_GPL(clk_set_parent);
*/
int clk_set_phase(struct clk *clk, int degrees)
{
- int ret = 0;
+ int ret = -EINVAL;
if (!clk)
- goto out;
+ return 0;
/* sanity check degrees */
degrees %= 360;
@@ -2122,18 +2185,18 @@ int clk_set_phase(struct clk *clk, int degrees)
clk_prepare_lock();
- if (!clk->core->ops->set_phase)
- goto out_unlock;
+ trace_clk_set_phase(clk->core, degrees);
- ret = clk->core->ops->set_phase(clk->core->hw, degrees);
+ if (clk->core->ops->set_phase)
+ ret = clk->core->ops->set_phase(clk->core->hw, degrees);
+
+ trace_clk_set_phase_complete(clk->core, degrees);
if (!ret)
clk->core->phase = degrees;
-out_unlock:
clk_prepare_unlock();
-out:
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_phase);
@@ -2401,7 +2464,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
clk->max_rate = ULONG_MAX;
clk_prepare_lock();
- hlist_add_head(&clk->child_node, &hw->core->clks);
+ hlist_add_head(&clk->clks_node, &hw->core->clks);
clk_prepare_unlock();
return clk;
@@ -2410,7 +2473,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
void __clk_free_clk(struct clk *clk)
{
clk_prepare_lock();
- hlist_del(&clk->child_node);
+ hlist_del(&clk->clks_node);
clk_prepare_unlock();
kfree(clk);
@@ -2513,6 +2576,8 @@ static void __clk_release(struct kref *ref)
struct clk_core *clk = container_of(ref, struct clk_core, ref);
int i = clk->num_parents;
+ lockdep_assert_held(&prepare_lock);
+
kfree(clk->parents);
while (--i >= 0)
kfree_const(clk->parent_names[i]);
@@ -2688,7 +2753,7 @@ void __clk_put(struct clk *clk)
clk_prepare_lock();
- hlist_del(&clk->child_node);
+ hlist_del(&clk->clks_node);
if (clk->min_rate > clk->core->req_rate ||
clk->max_rate < clk->core->req_rate)
clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
@@ -2834,17 +2899,6 @@ static const struct of_device_id __clk_of_table_sentinel
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_mutex);
-/* of_clk_provider list locking helpers */
-void of_clk_lock(void)
-{
- mutex_lock(&of_clk_mutex);
-}
-
-void of_clk_unlock(void)
-{
- mutex_unlock(&of_clk_mutex);
-}
-
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -2928,7 +2982,11 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
struct of_clk_provider *provider;
struct clk *clk = ERR_PTR(-EPROBE_DEFER);
+ if (!clkspec)
+ return ERR_PTR(-EINVAL);
+
/* Check if we have such a provider in our array */
+ mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np)
clk = provider->get(clkspec, provider->data);
@@ -2944,19 +3002,22 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
break;
}
}
+ mutex_unlock(&of_clk_mutex);
return clk;
}
+/**
+ * of_clk_get_from_provider() - Lookup a clock from a clock provider
+ * @clkspec: pointer to a clock specifier data structure
+ *
+ * This function looks up a struct clk from the registered list of clock
+ * providers, an input is a clock specifier data structure as returned
+ * from the of_parse_phandle_with_args() function call.
+ */
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
- struct clk *clk;
-
- mutex_lock(&of_clk_mutex);
- clk = __of_clk_get_from_provider(clkspec, NULL, __func__);
- mutex_unlock(&of_clk_mutex);
-
- return clk;
+ return __of_clk_get_from_provider(clkspec, NULL, __func__);
}
int of_clk_get_parent_count(struct device_node *np)
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index ba845408cc3e..00b35a13cdf3 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -12,11 +12,8 @@
struct clk_hw;
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
-struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec);
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
const char *dev_id, const char *con_id);
-void of_clk_lock(void);
-void of_clk_unlock(void);
#endif
#ifdef CONFIG_COMMON_CLK
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 043fd3633373..1fcb6ef2cdac 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -28,34 +28,6 @@ static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
-
-static struct clk *__of_clk_get_by_clkspec(struct of_phandle_args *clkspec,
- const char *dev_id, const char *con_id)
-{
- struct clk *clk;
-
- if (!clkspec)
- return ERR_PTR(-EINVAL);
-
- of_clk_lock();
- clk = __of_clk_get_from_provider(clkspec, dev_id, con_id);
- of_clk_unlock();
- return clk;
-}
-
-/**
- * of_clk_get_by_clkspec() - Lookup a clock form a clock provider
- * @clkspec: pointer to a clock specifier data structure
- *
- * This function looks up a struct clk from the registered list of clock
- * providers, an input is a clock specifier data structure as returned
- * from the of_parse_phandle_with_args() function call.
- */
-struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec)
-{
- return __of_clk_get_by_clkspec(clkspec, NULL, __func__);
-}
-
static struct clk *__of_clk_get(struct device_node *np, int index,
const char *dev_id, const char *con_id)
{
@@ -71,7 +43,7 @@ static struct clk *__of_clk_get(struct device_node *np, int index,
if (rc)
return ERR_PTR(rc);
- clk = __of_clk_get_by_clkspec(&clkspec, dev_id, con_id);
+ clk = __of_clk_get_from_provider(&clkspec, dev_id, con_id);
of_node_put(clkspec.np);
return clk;
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 2e4f6d432beb..472dd2cb10b3 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -38,44 +38,44 @@
#include "clk.h"
/* clock parent list */
-static const char *timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
-static const char *timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
-static const char *timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
-static const char *timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
-static const char *timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
-static const char *timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
-static const char *timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
-static const char *timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
-static const char *timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
-static const char *timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
-static const char *uart0_mux_p[] __initconst = { "osc26m", "pclk", };
-static const char *uart1_mux_p[] __initconst = { "osc26m", "pclk", };
-static const char *uart2_mux_p[] __initconst = { "osc26m", "pclk", };
-static const char *uart3_mux_p[] __initconst = { "osc26m", "pclk", };
-static const char *uart4_mux_p[] __initconst = { "osc26m", "pclk", };
-static const char *spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
-static const char *spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
-static const char *spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
+static const char *timer0_mux_p[] __initdata = { "osc32k", "timerclk01", };
+static const char *timer1_mux_p[] __initdata = { "osc32k", "timerclk01", };
+static const char *timer2_mux_p[] __initdata = { "osc32k", "timerclk23", };
+static const char *timer3_mux_p[] __initdata = { "osc32k", "timerclk23", };
+static const char *timer4_mux_p[] __initdata = { "osc32k", "timerclk45", };
+static const char *timer5_mux_p[] __initdata = { "osc32k", "timerclk45", };
+static const char *timer6_mux_p[] __initdata = { "osc32k", "timerclk67", };
+static const char *timer7_mux_p[] __initdata = { "osc32k", "timerclk67", };
+static const char *timer8_mux_p[] __initdata = { "osc32k", "timerclk89", };
+static const char *timer9_mux_p[] __initdata = { "osc32k", "timerclk89", };
+static const char *uart0_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart1_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart2_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart3_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart4_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *spi0_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+static const char *spi1_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+static const char *spi2_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
/* share axi parent */
-static const char *saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
-static const char *pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
-static const char *pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
-static const char *sd_mux_p[] __initconst = { "armpll2", "armpll3", };
-static const char *mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
-static const char *mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
-static const char *g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
-static const char *venc_mux_p[] __initconst = { "armpll2", "armpll3", };
-static const char *vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
-static const char *vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
-static const char *edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
-static const char *ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
+static const char *saxi_mux_p[] __initdata = { "armpll3", "armpll2", };
+static const char *pwm0_mux_p[] __initdata = { "osc32k", "osc26m", };
+static const char *pwm1_mux_p[] __initdata = { "osc32k", "osc26m", };
+static const char *sd_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *mmc1_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *mmc1_mux2_p[] __initdata = { "osc26m", "mmc1_div", };
+static const char *g2d_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *venc_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *vdec_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *vpp_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *edc0_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *ldi0_mux_p[] __initdata = { "armpll2", "armpll4",
"armpll3", "armpll5", };
-static const char *edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
-static const char *ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
+static const char *edc1_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *ldi1_mux_p[] __initdata = { "armpll2", "armpll4",
"armpll3", "armpll5", };
-static const char *rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
-static const char *mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
-static const char *mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *rclk_hsic_p[] __initdata = { "armpll3", "armpll2", };
+static const char *mmc2_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *mmc3_mux_p[] __initdata = { "armpll2", "armpll3", };
/* fixed rate clocks */
diff --git a/drivers/clk/hisilicon/clk-hix5hd2.c b/drivers/clk/hisilicon/clk-hix5hd2.c
index 3f369c60fe56..f1d239435826 100644
--- a/drivers/clk/hisilicon/clk-hix5hd2.c
+++ b/drivers/clk/hisilicon/clk-hix5hd2.c
@@ -46,15 +46,15 @@ static struct hisi_fixed_rate_clock hix5hd2_fixed_rate_clks[] __initdata = {
{ HIX5HD2_FIXED_83M, "83m", NULL, CLK_IS_ROOT, 83333333, },
};
-static const char *sfc_mux_p[] __initconst = {
+static const char *sfc_mux_p[] __initdata = {
"24m", "150m", "200m", "100m", "75m", };
static u32 sfc_mux_table[] = {0, 4, 5, 6, 7};
-static const char *sdio_mux_p[] __initconst = {
+static const char *sdio_mux_p[] __initdata = {
"75m", "100m", "50m", "15m", };
static u32 sdio_mux_table[] = {0, 1, 2, 3};
-static const char *fephy_mux_p[] __initconst = { "25m", "125m"};
+static const char *fephy_mux_p[] __initdata = { "25m", "125m"};
static u32 fephy_mux_table[] = {0, 1};
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index 3b34dba9178d..27696255486d 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -21,6 +21,10 @@ config ARMADA_38X_CLK
bool
select MVEBU_CLK_COMMON
+config ARMADA_39X_CLK
+ bool
+ select MVEBU_CLK_COMMON
+
config ARMADA_XP_CLK
bool
select MVEBU_CLK_COMMON
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
index a9a56fc01901..645ac7ea3565 100644
--- a/drivers/clk/mvebu/Makefile
+++ b/drivers/clk/mvebu/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o
obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o
obj-$(CONFIG_ARMADA_375_CLK) += armada-375.o
obj-$(CONFIG_ARMADA_38X_CLK) += armada-38x.o
+obj-$(CONFIG_ARMADA_39X_CLK) += armada-39x.o
obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o
obj-$(CONFIG_DOVE_CLK) += dove.o
obj-$(CONFIG_KIRKWOOD_CLK) += kirkwood.o
diff --git a/drivers/clk/mvebu/armada-39x.c b/drivers/clk/mvebu/armada-39x.c
new file mode 100644
index 000000000000..efb974df9822
--- /dev/null
+++ b/drivers/clk/mvebu/armada-39x.c
@@ -0,0 +1,156 @@
+/*
+ * Marvell Armada 39x SoC clocks
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Andrew Lunn <andrew@lunn.ch>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include "common.h"
+
+/*
+ * SARL[14:10] : Ratios between CPU, NBCLK, HCLK and DCLK.
+ *
+ * SARL[15] : TCLK frequency
+ * 0 = 250 MHz
+ * 1 = 200 MHz
+ *
+ * SARH[0] : Reference clock frequency
+ * 0 = 25 Mhz
+ * 1 = 40 Mhz
+ */
+
+#define SARL 0
+#define SARL_A390_TCLK_FREQ_OPT 15
+#define SARL_A390_TCLK_FREQ_OPT_MASK 0x1
+#define SARL_A390_CPU_DDR_L2_FREQ_OPT 10
+#define SARL_A390_CPU_DDR_L2_FREQ_OPT_MASK 0x1F
+#define SARH 4
+#define SARH_A390_REFCLK_FREQ BIT(0)
+
+static const u32 armada_39x_tclk_frequencies[] __initconst = {
+ 250000000,
+ 200000000,
+};
+
+static u32 __init armada_39x_get_tclk_freq(void __iomem *sar)
+{
+ u8 tclk_freq_select;
+
+ tclk_freq_select = ((readl(sar + SARL) >> SARL_A390_TCLK_FREQ_OPT) &
+ SARL_A390_TCLK_FREQ_OPT_MASK);
+ return armada_39x_tclk_frequencies[tclk_freq_select];
+}
+
+static const u32 armada_39x_cpu_frequencies[] __initconst = {
+ [0x0] = 666 * 1000 * 1000,
+ [0x2] = 800 * 1000 * 1000,
+ [0x3] = 800 * 1000 * 1000,
+ [0x4] = 1066 * 1000 * 1000,
+ [0x5] = 1066 * 1000 * 1000,
+ [0x6] = 1200 * 1000 * 1000,
+ [0x8] = 1332 * 1000 * 1000,
+ [0xB] = 1600 * 1000 * 1000,
+ [0xC] = 1600 * 1000 * 1000,
+ [0x12] = 1800 * 1000 * 1000,
+ [0x1E] = 1800 * 1000 * 1000,
+};
+
+static u32 __init armada_39x_get_cpu_freq(void __iomem *sar)
+{
+ u8 cpu_freq_select;
+
+ cpu_freq_select = ((readl(sar + SARL) >> SARL_A390_CPU_DDR_L2_FREQ_OPT) &
+ SARL_A390_CPU_DDR_L2_FREQ_OPT_MASK);
+ if (cpu_freq_select >= ARRAY_SIZE(armada_39x_cpu_frequencies)) {
+ pr_err("Selected CPU frequency (%d) unsupported\n",
+ cpu_freq_select);
+ return 0;
+ }
+
+ return armada_39x_cpu_frequencies[cpu_freq_select];
+}
+
+enum { A390_CPU_TO_NBCLK, A390_CPU_TO_HCLK, A390_CPU_TO_DCLK };
+
+static const struct coreclk_ratio armada_39x_coreclk_ratios[] __initconst = {
+ { .id = A390_CPU_TO_NBCLK, .name = "nbclk" },
+ { .id = A390_CPU_TO_HCLK, .name = "hclk" },
+ { .id = A390_CPU_TO_DCLK, .name = "dclk" },
+};
+
+static void __init armada_39x_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case A390_CPU_TO_NBCLK:
+ *mult = 1;
+ *div = 2;
+ break;
+ case A390_CPU_TO_HCLK:
+ *mult = 1;
+ *div = 4;
+ break;
+ case A390_CPU_TO_DCLK:
+ *mult = 1;
+ *div = 2;
+ break;
+ }
+}
+
+static u32 __init armada_39x_refclk_ratio(void __iomem *sar)
+{
+ if (readl(sar + SARH) & SARH_A390_REFCLK_FREQ)
+ return 40 * 1000 * 1000;
+ else
+ return 25 * 1000 * 1000;
+}
+
+static const struct coreclk_soc_desc armada_39x_coreclks = {
+ .get_tclk_freq = armada_39x_get_tclk_freq,
+ .get_cpu_freq = armada_39x_get_cpu_freq,
+ .get_clk_ratio = armada_39x_get_clk_ratio,
+ .get_refclk_freq = armada_39x_refclk_ratio,
+ .ratios = armada_39x_coreclk_ratios,
+ .num_ratios = ARRAY_SIZE(armada_39x_coreclk_ratios),
+};
+
+static void __init armada_39x_coreclk_init(struct device_node *np)
+{
+ mvebu_coreclk_setup(np, &armada_39x_coreclks);
+}
+CLK_OF_DECLARE(armada_39x_core_clk, "marvell,armada-390-core-clock",
+ armada_39x_coreclk_init);
+
+/*
+ * Clock Gating Control
+ */
+static const struct clk_gating_soc_desc armada_39x_gating_desc[] __initconst = {
+ { "pex1", NULL, 5 },
+ { "pex2", NULL, 6 },
+ { "pex3", NULL, 7 },
+ { "pex0", NULL, 8 },
+ { "usb3h0", NULL, 9 },
+ { "sdio", NULL, 17 },
+ { "xor0", NULL, 22 },
+ { "xor1", NULL, 28 },
+ { }
+};
+
+static void __init armada_39x_clk_gating_init(struct device_node *np)
+{
+ mvebu_clk_gating_setup(np, armada_39x_gating_desc);
+}
+CLK_OF_DECLARE(armada_39x_clk_gating, "marvell,armada-390-gating-clock",
+ armada_39x_clk_gating_init);
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 0d4d1216f2dd..15b370ff3748 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -121,6 +121,11 @@ void __init mvebu_coreclk_setup(struct device_node *np,
/* Allocate struct for TCLK, cpu clk, and core ratio clocks */
clk_data.clk_num = 2 + desc->num_ratios;
+
+ /* One more clock for the optional refclk */
+ if (desc->get_refclk_freq)
+ clk_data.clk_num += 1;
+
clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
GFP_KERNEL);
if (WARN_ON(!clk_data.clks)) {
@@ -162,6 +167,18 @@ void __init mvebu_coreclk_setup(struct device_node *np,
WARN_ON(IS_ERR(clk_data.clks[2+n]));
};
+ /* Register optional refclk */
+ if (desc->get_refclk_freq) {
+ const char *name = "refclk";
+ of_property_read_string_index(np, "clock-output-names",
+ 2 + desc->num_ratios, &name);
+ rate = desc->get_refclk_freq(base);
+ clk_data.clks[2 + desc->num_ratios] =
+ clk_register_fixed_rate(NULL, name, NULL,
+ CLK_IS_ROOT, rate);
+ WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios]));
+ }
+
/* SAR register isn't needed anymore */
iounmap(base);
diff --git a/drivers/clk/mvebu/common.h b/drivers/clk/mvebu/common.h
index 783b5631a453..f0de6c8a494a 100644
--- a/drivers/clk/mvebu/common.h
+++ b/drivers/clk/mvebu/common.h
@@ -30,6 +30,7 @@ struct coreclk_soc_desc {
u32 (*get_tclk_freq)(void __iomem *sar);
u32 (*get_cpu_freq)(void __iomem *sar);
void (*get_clk_ratio)(void __iomem *sar, int id, int *mult, int *div);
+ u32 (*get_refclk_freq)(void __iomem *sar);
bool (*is_sscg_enabled)(void __iomem *sar);
u32 (*fix_sscg_deviation)(u32 system_clk);
const struct coreclk_ratio *ratios;
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index 9fc9359f5133..22d136aa699f 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -77,12 +77,12 @@ static void __init clk_misc_init(void)
writel_relaxed(30 << BP_FRAC_IOFRAC, FRAC + SET);
}
-static const char *sel_pll[] __initconst = { "pll", "ref_xtal", };
-static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
-static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
-static const char *sel_io[] __initconst = { "ref_io", "ref_xtal", };
-static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
-static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+static const char *sel_pll[] __initdata = { "pll", "ref_xtal", };
+static const char *sel_cpu[] __initdata = { "ref_cpu", "ref_xtal", };
+static const char *sel_pix[] __initdata = { "ref_pix", "ref_xtal", };
+static const char *sel_io[] __initdata = { "ref_io", "ref_xtal", };
+static const char *cpu_sels[] __initdata = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initdata = { "emi_pll", "emi_xtal", };
enum imx23_clk {
ref_xtal, pll, ref_cpu, ref_emi, ref_pix, ref_io, saif_sel,
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index a6c35010e4e5..b1be3746ce95 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -125,15 +125,15 @@ static void __init clk_misc_init(void)
writel_relaxed(val, FRAC0);
}
-static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
-static const char *sel_io0[] __initconst = { "ref_io0", "ref_xtal", };
-static const char *sel_io1[] __initconst = { "ref_io1", "ref_xtal", };
-static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
-static const char *sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", };
-static const char *sel_pll0[] __initconst = { "pll0", "ref_xtal", };
-static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
-static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
-static const char *ptp_sels[] __initconst = { "ref_xtal", "pll0", };
+static const char *sel_cpu[] __initdata = { "ref_cpu", "ref_xtal", };
+static const char *sel_io0[] __initdata = { "ref_io0", "ref_xtal", };
+static const char *sel_io1[] __initdata = { "ref_io1", "ref_xtal", };
+static const char *sel_pix[] __initdata = { "ref_pix", "ref_xtal", };
+static const char *sel_gpmi[] __initdata = { "ref_gpmi", "ref_xtal", };
+static const char *sel_pll0[] __initdata = { "pll0", "ref_xtal", };
+static const char *cpu_sels[] __initdata = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initdata = { "emi_pll", "emi_xtal", };
+static const char *ptp_sels[] __initdata = { "ref_xtal", "pll0", };
enum imx28_clk {
ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1,
diff --git a/drivers/clk/pistachio/Makefile b/drivers/clk/pistachio/Makefile
new file mode 100644
index 000000000000..f1e151fbef65
--- /dev/null
+++ b/drivers/clk/pistachio/Makefile
@@ -0,0 +1,3 @@
+obj-y += clk.o
+obj-y += clk-pll.o
+obj-y += clk-pistachio.o
diff --git a/drivers/clk/pistachio/clk-pistachio.c b/drivers/clk/pistachio/clk-pistachio.c
new file mode 100644
index 000000000000..8c0fe8828f99
--- /dev/null
+++ b/drivers/clk/pistachio/clk-pistachio.c
@@ -0,0 +1,329 @@
+/*
+ * Pistachio SoC clock controllers
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+
+#include <dt-bindings/clock/pistachio-clk.h>
+
+#include "clk.h"
+
+static struct pistachio_gate pistachio_gates[] __initdata = {
+ GATE(CLK_MIPS, "mips", "mips_div", 0x104, 0),
+ GATE(CLK_AUDIO_IN, "audio_in", "audio_clk_in_gate", 0x104, 1),
+ GATE(CLK_AUDIO, "audio", "audio_div", 0x104, 2),
+ GATE(CLK_I2S, "i2s", "i2s_div", 0x104, 3),
+ GATE(CLK_SPDIF, "spdif", "spdif_div", 0x104, 4),
+ GATE(CLK_AUDIO_DAC, "audio_dac", "audio_dac_div", 0x104, 5),
+ GATE(CLK_RPU_V, "rpu_v", "rpu_v_div", 0x104, 6),
+ GATE(CLK_RPU_L, "rpu_l", "rpu_l_div", 0x104, 7),
+ GATE(CLK_RPU_SLEEP, "rpu_sleep", "rpu_sleep_div", 0x104, 8),
+ GATE(CLK_WIFI_PLL_GATE, "wifi_pll_gate", "wifi_pll_mux", 0x104, 9),
+ GATE(CLK_RPU_CORE, "rpu_core", "rpu_core_div", 0x104, 10),
+ GATE(CLK_WIFI_ADC, "wifi_adc", "wifi_div8_mux", 0x104, 11),
+ GATE(CLK_WIFI_DAC, "wifi_dac", "wifi_div4_mux", 0x104, 12),
+ GATE(CLK_USB_PHY, "usb_phy", "usb_phy_div", 0x104, 13),
+ GATE(CLK_ENET_IN, "enet_in", "enet_clk_in_gate", 0x104, 14),
+ GATE(CLK_ENET, "enet", "enet_div", 0x104, 15),
+ GATE(CLK_UART0, "uart0", "uart0_div", 0x104, 16),
+ GATE(CLK_UART1, "uart1", "uart1_div", 0x104, 17),
+ GATE(CLK_PERIPH_SYS, "periph_sys", "sys_internal_div", 0x104, 18),
+ GATE(CLK_SPI0, "spi0", "spi0_div", 0x104, 19),
+ GATE(CLK_SPI1, "spi1", "spi1_div", 0x104, 20),
+ GATE(CLK_EVENT_TIMER, "event_timer", "event_timer_div", 0x104, 21),
+ GATE(CLK_AUX_ADC_INTERNAL, "aux_adc_internal", "sys_internal_div",
+ 0x104, 22),
+ GATE(CLK_AUX_ADC, "aux_adc", "aux_adc_div", 0x104, 23),
+ GATE(CLK_SD_HOST, "sd_host", "sd_host_div", 0x104, 24),
+ GATE(CLK_BT, "bt", "bt_div", 0x104, 25),
+ GATE(CLK_BT_DIV4, "bt_div4", "bt_div4_div", 0x104, 26),
+ GATE(CLK_BT_DIV8, "bt_div8", "bt_div8_div", 0x104, 27),
+ GATE(CLK_BT_1MHZ, "bt_1mhz", "bt_1mhz_div", 0x104, 28),
+};
+
+static struct pistachio_fixed_factor pistachio_ffs[] __initdata = {
+ FIXED_FACTOR(CLK_WIFI_DIV4, "wifi_div4", "wifi_pll", 4),
+ FIXED_FACTOR(CLK_WIFI_DIV8, "wifi_div8", "wifi_pll", 8),
+};
+
+static struct pistachio_div pistachio_divs[] __initdata = {
+ DIV(CLK_MIPS_INTERNAL_DIV, "mips_internal_div", "mips_pll_mux",
+ 0x204, 2),
+ DIV(CLK_MIPS_DIV, "mips_div", "mips_internal_div", 0x208, 8),
+ DIV_F(CLK_AUDIO_DIV, "audio_div", "audio_mux",
+ 0x20c, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_I2S_DIV, "i2s_div", "audio_pll_mux",
+ 0x210, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_SPDIF_DIV, "spdif_div", "audio_pll_mux",
+ 0x214, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_AUDIO_DAC_DIV, "audio_dac_div", "audio_pll_mux",
+ 0x218, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV(CLK_RPU_V_DIV, "rpu_v_div", "rpu_v_pll_mux", 0x21c, 2),
+ DIV(CLK_RPU_L_DIV, "rpu_l_div", "rpu_l_mux", 0x220, 2),
+ DIV(CLK_RPU_SLEEP_DIV, "rpu_sleep_div", "xtal", 0x224, 10),
+ DIV(CLK_RPU_CORE_DIV, "rpu_core_div", "rpu_core_mux", 0x228, 3),
+ DIV(CLK_USB_PHY_DIV, "usb_phy_div", "sys_internal_div", 0x22c, 6),
+ DIV(CLK_ENET_DIV, "enet_div", "enet_mux", 0x230, 6),
+ DIV_F(CLK_UART0_INTERNAL_DIV, "uart0_internal_div", "sys_pll_mux",
+ 0x234, 3, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_UART0_DIV, "uart0_div", "uart0_internal_div", 0x238, 10,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_UART1_INTERNAL_DIV, "uart1_internal_div", "sys_pll_mux",
+ 0x23c, 3, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(CLK_UART1_DIV, "uart1_div", "uart1_internal_div", 0x240, 10,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV(CLK_SYS_INTERNAL_DIV, "sys_internal_div", "sys_pll_mux", 0x244, 3),
+ DIV(CLK_SPI0_INTERNAL_DIV, "spi0_internal_div", "sys_pll_mux",
+ 0x248, 3),
+ DIV(CLK_SPI0_DIV, "spi0_div", "spi0_internal_div", 0x24c, 7),
+ DIV(CLK_SPI1_INTERNAL_DIV, "spi1_internal_div", "sys_pll_mux",
+ 0x250, 3),
+ DIV(CLK_SPI1_DIV, "spi1_div", "spi1_internal_div", 0x254, 7),
+ DIV(CLK_EVENT_TIMER_INTERNAL_DIV, "event_timer_internal_div",
+ "event_timer_mux", 0x258, 3),
+ DIV(CLK_EVENT_TIMER_DIV, "event_timer_div", "event_timer_internal_div",
+ 0x25c, 12),
+ DIV(CLK_AUX_ADC_INTERNAL_DIV, "aux_adc_internal_div",
+ "aux_adc_internal", 0x260, 3),
+ DIV(CLK_AUX_ADC_DIV, "aux_adc_div", "aux_adc_internal_div", 0x264, 10),
+ DIV(CLK_SD_HOST_DIV, "sd_host_div", "sd_host_mux", 0x268, 6),
+ DIV(CLK_BT_DIV, "bt_div", "bt_pll_mux", 0x26c, 6),
+ DIV(CLK_BT_DIV4_DIV, "bt_div4_div", "bt_pll_mux", 0x270, 6),
+ DIV(CLK_BT_DIV8_DIV, "bt_div8_div", "bt_pll_mux", 0x274, 6),
+ DIV(CLK_BT_1MHZ_INTERNAL_DIV, "bt_1mhz_internal_div", "bt_pll_mux",
+ 0x278, 3),
+ DIV(CLK_BT_1MHZ_DIV, "bt_1mhz_div", "bt_1mhz_internal_div", 0x27c, 10),
+};
+
+PNAME(mux_xtal_audio_refclk) = { "xtal", "audio_clk_in_gate" };
+PNAME(mux_xtal_mips) = { "xtal", "mips_pll" };
+PNAME(mux_xtal_audio) = { "xtal", "audio_pll", "audio_in" };
+PNAME(mux_audio_debug) = { "audio_pll_mux", "debug_mux" };
+PNAME(mux_xtal_rpu_v) = { "xtal", "rpu_v_pll" };
+PNAME(mux_xtal_rpu_l) = { "xtal", "rpu_l_pll" };
+PNAME(mux_rpu_l_mips) = { "rpu_l_pll_mux", "mips_pll_mux" };
+PNAME(mux_xtal_wifi) = { "xtal", "wifi_pll" };
+PNAME(mux_xtal_wifi_div4) = { "xtal", "wifi_div4" };
+PNAME(mux_xtal_wifi_div8) = { "xtal", "wifi_div8" };
+PNAME(mux_wifi_div4_rpu_l) = { "wifi_pll_gate", "wifi_div4_mux",
+ "rpu_l_pll_mux" };
+PNAME(mux_xtal_sys) = { "xtal", "sys_pll" };
+PNAME(mux_sys_enet) = { "sys_internal_div", "enet_in" };
+PNAME(mux_audio_sys) = { "audio_pll_mux", "sys_internal_div" };
+PNAME(mux_sys_bt) = { "sys_internal_div", "bt_pll_mux" };
+PNAME(mux_xtal_bt) = { "xtal", "bt_pll" };
+
+static struct pistachio_mux pistachio_muxes[] __initdata = {
+ MUX(CLK_AUDIO_REF_MUX, "audio_refclk_mux", mux_xtal_audio_refclk,
+ 0x200, 0),
+ MUX(CLK_MIPS_PLL_MUX, "mips_pll_mux", mux_xtal_mips, 0x200, 1),
+ MUX(CLK_AUDIO_PLL_MUX, "audio_pll_mux", mux_xtal_audio, 0x200, 2),
+ MUX(CLK_AUDIO_MUX, "audio_mux", mux_audio_debug, 0x200, 4),
+ MUX(CLK_RPU_V_PLL_MUX, "rpu_v_pll_mux", mux_xtal_rpu_v, 0x200, 5),
+ MUX(CLK_RPU_L_PLL_MUX, "rpu_l_pll_mux", mux_xtal_rpu_l, 0x200, 6),
+ MUX(CLK_RPU_L_MUX, "rpu_l_mux", mux_rpu_l_mips, 0x200, 7),
+ MUX(CLK_WIFI_PLL_MUX, "wifi_pll_mux", mux_xtal_wifi, 0x200, 8),
+ MUX(CLK_WIFI_DIV4_MUX, "wifi_div4_mux", mux_xtal_wifi_div4, 0x200, 9),
+ MUX(CLK_WIFI_DIV8_MUX, "wifi_div8_mux", mux_xtal_wifi_div8, 0x200, 10),
+ MUX(CLK_RPU_CORE_MUX, "rpu_core_mux", mux_wifi_div4_rpu_l, 0x200, 11),
+ MUX(CLK_SYS_PLL_MUX, "sys_pll_mux", mux_xtal_sys, 0x200, 13),
+ MUX(CLK_ENET_MUX, "enet_mux", mux_sys_enet, 0x200, 14),
+ MUX(CLK_EVENT_TIMER_MUX, "event_timer_mux", mux_audio_sys, 0x200, 15),
+ MUX(CLK_SD_HOST_MUX, "sd_host_mux", mux_sys_bt, 0x200, 16),
+ MUX(CLK_BT_PLL_MUX, "bt_pll_mux", mux_xtal_bt, 0x200, 17),
+};
+
+static struct pistachio_pll pistachio_plls[] __initdata = {
+ PLL_FIXED(CLK_MIPS_PLL, "mips_pll", "xtal", PLL_GF40LP_LAINT, 0x0),
+ PLL_FIXED(CLK_AUDIO_PLL, "audio_pll", "audio_refclk_mux",
+ PLL_GF40LP_FRAC, 0xc),
+ PLL_FIXED(CLK_RPU_V_PLL, "rpu_v_pll", "xtal", PLL_GF40LP_LAINT, 0x20),
+ PLL_FIXED(CLK_RPU_L_PLL, "rpu_l_pll", "xtal", PLL_GF40LP_LAINT, 0x2c),
+ PLL_FIXED(CLK_SYS_PLL, "sys_pll", "xtal", PLL_GF40LP_FRAC, 0x38),
+ PLL_FIXED(CLK_WIFI_PLL, "wifi_pll", "xtal", PLL_GF40LP_FRAC, 0x4c),
+ PLL_FIXED(CLK_BT_PLL, "bt_pll", "xtal", PLL_GF40LP_LAINT, 0x60),
+};
+
+PNAME(mux_debug) = { "mips_pll_mux", "rpu_v_pll_mux",
+ "rpu_l_pll_mux", "sys_pll_mux",
+ "wifi_pll_mux", "bt_pll_mux" };
+static u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 };
+
+static unsigned int pistachio_critical_clks[] __initdata = {
+ CLK_MIPS,
+ CLK_PERIPH_SYS,
+};
+
+static void __init pistachio_clk_init(struct device_node *np)
+{
+ struct pistachio_clk_provider *p;
+ struct clk *debug_clk;
+
+ p = pistachio_clk_alloc_provider(np, CLK_NR_CLKS);
+ if (!p)
+ return;
+
+ pistachio_clk_register_pll(p, pistachio_plls,
+ ARRAY_SIZE(pistachio_plls));
+ pistachio_clk_register_mux(p, pistachio_muxes,
+ ARRAY_SIZE(pistachio_muxes));
+ pistachio_clk_register_div(p, pistachio_divs,
+ ARRAY_SIZE(pistachio_divs));
+ pistachio_clk_register_fixed_factor(p, pistachio_ffs,
+ ARRAY_SIZE(pistachio_ffs));
+ pistachio_clk_register_gate(p, pistachio_gates,
+ ARRAY_SIZE(pistachio_gates));
+
+ debug_clk = clk_register_mux_table(NULL, "debug_mux", mux_debug,
+ ARRAY_SIZE(mux_debug),
+ CLK_SET_RATE_NO_REPARENT,
+ p->base + 0x200, 18, 0x1f, 0,
+ mux_debug_idx, NULL);
+ p->clk_data.clks[CLK_DEBUG_MUX] = debug_clk;
+
+ pistachio_clk_register_provider(p);
+
+ pistachio_clk_force_enable(p, pistachio_critical_clks,
+ ARRAY_SIZE(pistachio_critical_clks));
+}
+CLK_OF_DECLARE(pistachio_clk, "img,pistachio-clk", pistachio_clk_init);
+
+static struct pistachio_gate pistachio_periph_gates[] __initdata = {
+ GATE(PERIPH_CLK_SYS, "sys", "periph_sys", 0x100, 0),
+ GATE(PERIPH_CLK_SYS_BUS, "bus_sys", "periph_sys", 0x100, 1),
+ GATE(PERIPH_CLK_DDR, "ddr", "periph_sys", 0x100, 2),
+ GATE(PERIPH_CLK_ROM, "rom", "rom_div", 0x100, 3),
+ GATE(PERIPH_CLK_COUNTER_FAST, "counter_fast", "counter_fast_div",
+ 0x100, 4),
+ GATE(PERIPH_CLK_COUNTER_SLOW, "counter_slow", "counter_slow_div",
+ 0x100, 5),
+ GATE(PERIPH_CLK_IR, "ir", "ir_div", 0x100, 6),
+ GATE(PERIPH_CLK_WD, "wd", "wd_div", 0x100, 7),
+ GATE(PERIPH_CLK_PDM, "pdm", "pdm_div", 0x100, 8),
+ GATE(PERIPH_CLK_PWM, "pwm", "pwm_div", 0x100, 9),
+ GATE(PERIPH_CLK_I2C0, "i2c0", "i2c0_div", 0x100, 10),
+ GATE(PERIPH_CLK_I2C1, "i2c1", "i2c1_div", 0x100, 11),
+ GATE(PERIPH_CLK_I2C2, "i2c2", "i2c2_div", 0x100, 12),
+ GATE(PERIPH_CLK_I2C3, "i2c3", "i2c3_div", 0x100, 13),
+};
+
+static struct pistachio_div pistachio_periph_divs[] __initdata = {
+ DIV(PERIPH_CLK_ROM_DIV, "rom_div", "periph_sys", 0x10c, 7),
+ DIV(PERIPH_CLK_COUNTER_FAST_DIV, "counter_fast_div", "periph_sys",
+ 0x110, 7),
+ DIV(PERIPH_CLK_COUNTER_SLOW_PRE_DIV, "counter_slow_pre_div",
+ "periph_sys", 0x114, 7),
+ DIV(PERIPH_CLK_COUNTER_SLOW_DIV, "counter_slow_div",
+ "counter_slow_pre_div", 0x118, 7),
+ DIV_F(PERIPH_CLK_IR_PRE_DIV, "ir_pre_div", "periph_sys", 0x11c, 7,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(PERIPH_CLK_IR_DIV, "ir_div", "ir_pre_div", 0x120, 7,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(PERIPH_CLK_WD_PRE_DIV, "wd_pre_div", "periph_sys", 0x124, 7,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_F(PERIPH_CLK_WD_DIV, "wd_div", "wd_pre_div", 0x128, 7,
+ CLK_DIVIDER_ROUND_CLOSEST),
+ DIV(PERIPH_CLK_PDM_PRE_DIV, "pdm_pre_div", "periph_sys", 0x12c, 7),
+ DIV(PERIPH_CLK_PDM_DIV, "pdm_div", "pdm_pre_div", 0x130, 7),
+ DIV(PERIPH_CLK_PWM_PRE_DIV, "pwm_pre_div", "periph_sys", 0x134, 7),
+ DIV(PERIPH_CLK_PWM_DIV, "pwm_div", "pwm_pre_div", 0x138, 7),
+ DIV(PERIPH_CLK_I2C0_PRE_DIV, "i2c0_pre_div", "periph_sys", 0x13c, 7),
+ DIV(PERIPH_CLK_I2C0_DIV, "i2c0_div", "i2c0_pre_div", 0x140, 7),
+ DIV(PERIPH_CLK_I2C1_PRE_DIV, "i2c1_pre_div", "periph_sys", 0x144, 7),
+ DIV(PERIPH_CLK_I2C1_DIV, "i2c1_div", "i2c1_pre_div", 0x148, 7),
+ DIV(PERIPH_CLK_I2C2_PRE_DIV, "i2c2_pre_div", "periph_sys", 0x14c, 7),
+ DIV(PERIPH_CLK_I2C2_DIV, "i2c2_div", "i2c2_pre_div", 0x150, 7),
+ DIV(PERIPH_CLK_I2C3_PRE_DIV, "i2c3_pre_div", "periph_sys", 0x154, 7),
+ DIV(PERIPH_CLK_I2C3_DIV, "i2c3_div", "i2c3_pre_div", 0x158, 7),
+};
+
+static void __init pistachio_clk_periph_init(struct device_node *np)
+{
+ struct pistachio_clk_provider *p;
+
+ p = pistachio_clk_alloc_provider(np, PERIPH_CLK_NR_CLKS);
+ if (!p)
+ return;
+
+ pistachio_clk_register_div(p, pistachio_periph_divs,
+ ARRAY_SIZE(pistachio_periph_divs));
+ pistachio_clk_register_gate(p, pistachio_periph_gates,
+ ARRAY_SIZE(pistachio_periph_gates));
+
+ pistachio_clk_register_provider(p);
+}
+CLK_OF_DECLARE(pistachio_clk_periph, "img,pistachio-clk-periph",
+ pistachio_clk_periph_init);
+
+static struct pistachio_gate pistachio_sys_gates[] __initdata = {
+ GATE(SYS_CLK_I2C0, "i2c0_sys", "sys", 0x8, 0),
+ GATE(SYS_CLK_I2C1, "i2c1_sys", "sys", 0x8, 1),
+ GATE(SYS_CLK_I2C2, "i2c2_sys", "sys", 0x8, 2),
+ GATE(SYS_CLK_I2C3, "i2c3_sys", "sys", 0x8, 3),
+ GATE(SYS_CLK_I2S_IN, "i2s_in_sys", "sys", 0x8, 4),
+ GATE(SYS_CLK_PAUD_OUT, "paud_out_sys", "sys", 0x8, 5),
+ GATE(SYS_CLK_SPDIF_OUT, "spdif_out_sys", "sys", 0x8, 6),
+ GATE(SYS_CLK_SPI0_MASTER, "spi0_master_sys", "sys", 0x8, 7),
+ GATE(SYS_CLK_SPI0_SLAVE, "spi0_slave_sys", "sys", 0x8, 8),
+ GATE(SYS_CLK_PWM, "pwm_sys", "sys", 0x8, 9),
+ GATE(SYS_CLK_UART0, "uart0_sys", "sys", 0x8, 10),
+ GATE(SYS_CLK_UART1, "uart1_sys", "sys", 0x8, 11),
+ GATE(SYS_CLK_SPI1, "spi1_sys", "sys", 0x8, 12),
+ GATE(SYS_CLK_MDC, "mdc_sys", "sys", 0x8, 13),
+ GATE(SYS_CLK_SD_HOST, "sd_host_sys", "sys", 0x8, 14),
+ GATE(SYS_CLK_ENET, "enet_sys", "sys", 0x8, 15),
+ GATE(SYS_CLK_IR, "ir_sys", "sys", 0x8, 16),
+ GATE(SYS_CLK_WD, "wd_sys", "sys", 0x8, 17),
+ GATE(SYS_CLK_TIMER, "timer_sys", "sys", 0x8, 18),
+ GATE(SYS_CLK_I2S_OUT, "i2s_out_sys", "sys", 0x8, 24),
+ GATE(SYS_CLK_SPDIF_IN, "spdif_in_sys", "sys", 0x8, 25),
+ GATE(SYS_CLK_EVENT_TIMER, "event_timer_sys", "sys", 0x8, 26),
+ GATE(SYS_CLK_HASH, "hash_sys", "sys", 0x8, 27),
+};
+
+static void __init pistachio_cr_periph_init(struct device_node *np)
+{
+ struct pistachio_clk_provider *p;
+
+ p = pistachio_clk_alloc_provider(np, SYS_CLK_NR_CLKS);
+ if (!p)
+ return;
+
+ pistachio_clk_register_gate(p, pistachio_sys_gates,
+ ARRAY_SIZE(pistachio_sys_gates));
+
+ pistachio_clk_register_provider(p);
+}
+CLK_OF_DECLARE(pistachio_cr_periph, "img,pistachio-cr-periph",
+ pistachio_cr_periph_init);
+
+static struct pistachio_gate pistachio_ext_gates[] __initdata = {
+ GATE(EXT_CLK_ENET_IN, "enet_clk_in_gate", "enet_clk_in", 0x58, 5),
+ GATE(EXT_CLK_AUDIO_IN, "audio_clk_in_gate", "audio_clk_in", 0x58, 8)
+};
+
+static void __init pistachio_cr_top_init(struct device_node *np)
+{
+ struct pistachio_clk_provider *p;
+
+ p = pistachio_clk_alloc_provider(np, EXT_CLK_NR_CLKS);
+ if (!p)
+ return;
+
+ pistachio_clk_register_gate(p, pistachio_ext_gates,
+ ARRAY_SIZE(pistachio_ext_gates));
+
+ pistachio_clk_register_provider(p);
+}
+CLK_OF_DECLARE(pistachio_cr_top, "img,pistachio-cr-top",
+ pistachio_cr_top_init);
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
new file mode 100644
index 000000000000..de537560bf70
--- /dev/null
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+#define PLL_STATUS 0x0
+#define PLL_STATUS_LOCK BIT(0)
+
+#define PLL_CTRL1 0x4
+#define PLL_CTRL1_REFDIV_SHIFT 0
+#define PLL_CTRL1_REFDIV_MASK 0x3f
+#define PLL_CTRL1_FBDIV_SHIFT 6
+#define PLL_CTRL1_FBDIV_MASK 0xfff
+#define PLL_INT_CTRL1_POSTDIV1_SHIFT 18
+#define PLL_INT_CTRL1_POSTDIV1_MASK 0x7
+#define PLL_INT_CTRL1_POSTDIV2_SHIFT 21
+#define PLL_INT_CTRL1_POSTDIV2_MASK 0x7
+#define PLL_INT_CTRL1_PD BIT(24)
+#define PLL_INT_CTRL1_DSMPD BIT(25)
+#define PLL_INT_CTRL1_FOUTPOSTDIVPD BIT(26)
+#define PLL_INT_CTRL1_FOUTVCOPD BIT(27)
+
+#define PLL_CTRL2 0x8
+#define PLL_FRAC_CTRL2_FRAC_SHIFT 0
+#define PLL_FRAC_CTRL2_FRAC_MASK 0xffffff
+#define PLL_FRAC_CTRL2_POSTDIV1_SHIFT 24
+#define PLL_FRAC_CTRL2_POSTDIV1_MASK 0x7
+#define PLL_FRAC_CTRL2_POSTDIV2_SHIFT 27
+#define PLL_FRAC_CTRL2_POSTDIV2_MASK 0x7
+#define PLL_INT_CTRL2_BYPASS BIT(28)
+
+#define PLL_CTRL3 0xc
+#define PLL_FRAC_CTRL3_PD BIT(0)
+#define PLL_FRAC_CTRL3_DACPD BIT(1)
+#define PLL_FRAC_CTRL3_DSMPD BIT(2)
+#define PLL_FRAC_CTRL3_FOUTPOSTDIVPD BIT(3)
+#define PLL_FRAC_CTRL3_FOUT4PHASEPD BIT(4)
+#define PLL_FRAC_CTRL3_FOUTVCOPD BIT(5)
+
+#define PLL_CTRL4 0x10
+#define PLL_FRAC_CTRL4_BYPASS BIT(28)
+
+struct pistachio_clk_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+ struct pistachio_pll_rate_table *rates;
+ unsigned int nr_rates;
+};
+
+static inline u32 pll_readl(struct pistachio_clk_pll *pll, u32 reg)
+{
+ return readl(pll->base + reg);
+}
+
+static inline void pll_writel(struct pistachio_clk_pll *pll, u32 val, u32 reg)
+{
+ writel(val, pll->base + reg);
+}
+
+static inline u32 do_div_round_closest(u64 dividend, u32 divisor)
+{
+ dividend += divisor / 2;
+ do_div(dividend, divisor);
+
+ return dividend;
+}
+
+static inline struct pistachio_clk_pll *to_pistachio_pll(struct clk_hw *hw)
+{
+ return container_of(hw, struct pistachio_clk_pll, hw);
+}
+
+static struct pistachio_pll_rate_table *
+pll_get_params(struct pistachio_clk_pll *pll, unsigned long fref,
+ unsigned long fout)
+{
+ unsigned int i;
+
+ for (i = 0; i < pll->nr_rates; i++) {
+ if (pll->rates[i].fref == fref && pll->rates[i].fout == fout)
+ return &pll->rates[i];
+ }
+
+ return NULL;
+}
+
+static long pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ unsigned int i;
+
+ for (i = 0; i < pll->nr_rates; i++) {
+ if (i > 0 && pll->rates[i].fref == *parent_rate &&
+ pll->rates[i].fout <= rate)
+ return pll->rates[i - 1].fout;
+ }
+
+ return pll->rates[0].fout;
+}
+
+static int pll_gf40lp_frac_enable(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL3);
+ val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_DACPD |
+ PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
+ PLL_FRAC_CTRL3_FOUT4PHASEPD | PLL_FRAC_CTRL3_FOUTVCOPD);
+ pll_writel(pll, val, PLL_CTRL3);
+
+ val = pll_readl(pll, PLL_CTRL4);
+ val &= ~PLL_FRAC_CTRL4_BYPASS;
+ pll_writel(pll, val, PLL_CTRL4);
+
+ return 0;
+}
+
+static void pll_gf40lp_frac_disable(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL3);
+ val |= PLL_FRAC_CTRL3_PD;
+ pll_writel(pll, val, PLL_CTRL3);
+}
+
+static int pll_gf40lp_frac_is_enabled(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+
+ return !(pll_readl(pll, PLL_CTRL3) & PLL_FRAC_CTRL3_PD);
+}
+
+static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ struct pistachio_pll_rate_table *params;
+ bool was_enabled;
+ u32 val;
+
+ params = pll_get_params(pll, parent_rate, rate);
+ if (!params)
+ return -EINVAL;
+
+ was_enabled = pll_gf40lp_frac_is_enabled(hw);
+ if (!was_enabled)
+ pll_gf40lp_frac_enable(hw);
+
+ val = pll_readl(pll, PLL_CTRL1);
+ val &= ~((PLL_CTRL1_REFDIV_MASK << PLL_CTRL1_REFDIV_SHIFT) |
+ (PLL_CTRL1_FBDIV_MASK << PLL_CTRL1_FBDIV_SHIFT));
+ val |= (params->refdiv << PLL_CTRL1_REFDIV_SHIFT) |
+ (params->fbdiv << PLL_CTRL1_FBDIV_SHIFT);
+ pll_writel(pll, val, PLL_CTRL1);
+
+ val = pll_readl(pll, PLL_CTRL2);
+ val &= ~((PLL_FRAC_CTRL2_FRAC_MASK << PLL_FRAC_CTRL2_FRAC_SHIFT) |
+ (PLL_FRAC_CTRL2_POSTDIV1_MASK <<
+ PLL_FRAC_CTRL2_POSTDIV1_SHIFT) |
+ (PLL_FRAC_CTRL2_POSTDIV2_MASK <<
+ PLL_FRAC_CTRL2_POSTDIV2_SHIFT));
+ val |= (params->frac << PLL_FRAC_CTRL2_FRAC_SHIFT) |
+ (params->postdiv1 << PLL_FRAC_CTRL2_POSTDIV1_SHIFT) |
+ (params->postdiv2 << PLL_FRAC_CTRL2_POSTDIV2_SHIFT);
+ pll_writel(pll, val, PLL_CTRL2);
+
+ while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK))
+ cpu_relax();
+
+ if (!was_enabled)
+ pll_gf40lp_frac_disable(hw);
+
+ return 0;
+}
+
+static unsigned long pll_gf40lp_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val, prediv, fbdiv, frac, postdiv1, postdiv2;
+ u64 rate = parent_rate;
+
+ val = pll_readl(pll, PLL_CTRL1);
+ prediv = (val >> PLL_CTRL1_REFDIV_SHIFT) & PLL_CTRL1_REFDIV_MASK;
+ fbdiv = (val >> PLL_CTRL1_FBDIV_SHIFT) & PLL_CTRL1_FBDIV_MASK;
+
+ val = pll_readl(pll, PLL_CTRL2);
+ postdiv1 = (val >> PLL_FRAC_CTRL2_POSTDIV1_SHIFT) &
+ PLL_FRAC_CTRL2_POSTDIV1_MASK;
+ postdiv2 = (val >> PLL_FRAC_CTRL2_POSTDIV2_SHIFT) &
+ PLL_FRAC_CTRL2_POSTDIV2_MASK;
+ frac = (val >> PLL_FRAC_CTRL2_FRAC_SHIFT) & PLL_FRAC_CTRL2_FRAC_MASK;
+
+ rate *= (fbdiv << 24) + frac;
+ rate = do_div_round_closest(rate, (prediv * postdiv1 * postdiv2) << 24);
+
+ return rate;
+}
+
+static struct clk_ops pll_gf40lp_frac_ops = {
+ .enable = pll_gf40lp_frac_enable,
+ .disable = pll_gf40lp_frac_disable,
+ .is_enabled = pll_gf40lp_frac_is_enabled,
+ .recalc_rate = pll_gf40lp_frac_recalc_rate,
+ .round_rate = pll_round_rate,
+ .set_rate = pll_gf40lp_frac_set_rate,
+};
+
+static struct clk_ops pll_gf40lp_frac_fixed_ops = {
+ .enable = pll_gf40lp_frac_enable,
+ .disable = pll_gf40lp_frac_disable,
+ .is_enabled = pll_gf40lp_frac_is_enabled,
+ .recalc_rate = pll_gf40lp_frac_recalc_rate,
+};
+
+static int pll_gf40lp_laint_enable(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL1);
+ val &= ~(PLL_INT_CTRL1_PD | PLL_INT_CTRL1_DSMPD |
+ PLL_INT_CTRL1_FOUTPOSTDIVPD | PLL_INT_CTRL1_FOUTVCOPD);
+ pll_writel(pll, val, PLL_CTRL1);
+
+ val = pll_readl(pll, PLL_CTRL2);
+ val &= ~PLL_INT_CTRL2_BYPASS;
+ pll_writel(pll, val, PLL_CTRL2);
+
+ return 0;
+}
+
+static void pll_gf40lp_laint_disable(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL1);
+ val |= PLL_INT_CTRL1_PD;
+ pll_writel(pll, val, PLL_CTRL1);
+}
+
+static int pll_gf40lp_laint_is_enabled(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+
+ return !(pll_readl(pll, PLL_CTRL1) & PLL_INT_CTRL1_PD);
+}
+
+static int pll_gf40lp_laint_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ struct pistachio_pll_rate_table *params;
+ bool was_enabled;
+ u32 val;
+
+ params = pll_get_params(pll, parent_rate, rate);
+ if (!params)
+ return -EINVAL;
+
+ was_enabled = pll_gf40lp_laint_is_enabled(hw);
+ if (!was_enabled)
+ pll_gf40lp_laint_enable(hw);
+
+ val = pll_readl(pll, PLL_CTRL1);
+ val &= ~((PLL_CTRL1_REFDIV_MASK << PLL_CTRL1_REFDIV_SHIFT) |
+ (PLL_CTRL1_FBDIV_MASK << PLL_CTRL1_FBDIV_SHIFT) |
+ (PLL_INT_CTRL1_POSTDIV1_MASK << PLL_INT_CTRL1_POSTDIV1_SHIFT) |
+ (PLL_INT_CTRL1_POSTDIV2_MASK << PLL_INT_CTRL1_POSTDIV2_SHIFT));
+ val |= (params->refdiv << PLL_CTRL1_REFDIV_SHIFT) |
+ (params->fbdiv << PLL_CTRL1_FBDIV_SHIFT) |
+ (params->postdiv1 << PLL_INT_CTRL1_POSTDIV1_SHIFT) |
+ (params->postdiv2 << PLL_INT_CTRL1_POSTDIV2_SHIFT);
+ pll_writel(pll, val, PLL_CTRL1);
+
+ while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK))
+ cpu_relax();
+
+ if (!was_enabled)
+ pll_gf40lp_laint_disable(hw);
+
+ return 0;
+}
+
+static unsigned long pll_gf40lp_laint_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val, prediv, fbdiv, postdiv1, postdiv2;
+ u64 rate = parent_rate;
+
+ val = pll_readl(pll, PLL_CTRL1);
+ prediv = (val >> PLL_CTRL1_REFDIV_SHIFT) & PLL_CTRL1_REFDIV_MASK;
+ fbdiv = (val >> PLL_CTRL1_FBDIV_SHIFT) & PLL_CTRL1_FBDIV_MASK;
+ postdiv1 = (val >> PLL_INT_CTRL1_POSTDIV1_SHIFT) &
+ PLL_INT_CTRL1_POSTDIV1_MASK;
+ postdiv2 = (val >> PLL_INT_CTRL1_POSTDIV2_SHIFT) &
+ PLL_INT_CTRL1_POSTDIV2_MASK;
+
+ rate *= fbdiv;
+ rate = do_div_round_closest(rate, prediv * postdiv1 * postdiv2);
+
+ return rate;
+}
+
+static struct clk_ops pll_gf40lp_laint_ops = {
+ .enable = pll_gf40lp_laint_enable,
+ .disable = pll_gf40lp_laint_disable,
+ .is_enabled = pll_gf40lp_laint_is_enabled,
+ .recalc_rate = pll_gf40lp_laint_recalc_rate,
+ .round_rate = pll_round_rate,
+ .set_rate = pll_gf40lp_laint_set_rate,
+};
+
+static struct clk_ops pll_gf40lp_laint_fixed_ops = {
+ .enable = pll_gf40lp_laint_enable,
+ .disable = pll_gf40lp_laint_disable,
+ .is_enabled = pll_gf40lp_laint_is_enabled,
+ .recalc_rate = pll_gf40lp_laint_recalc_rate,
+};
+
+static struct clk *pll_register(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *base,
+ enum pistachio_pll_type type,
+ struct pistachio_pll_rate_table *rates,
+ unsigned int nr_rates)
+{
+ struct pistachio_clk_pll *pll;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags | CLK_GET_RATE_NOCACHE;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ switch (type) {
+ case PLL_GF40LP_FRAC:
+ if (rates)
+ init.ops = &pll_gf40lp_frac_ops;
+ else
+ init.ops = &pll_gf40lp_frac_fixed_ops;
+ break;
+ case PLL_GF40LP_LAINT:
+ if (rates)
+ init.ops = &pll_gf40lp_laint_ops;
+ else
+ init.ops = &pll_gf40lp_laint_fixed_ops;
+ break;
+ default:
+ pr_err("Unrecognized PLL type %u\n", type);
+ kfree(pll);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pll->hw.init = &init;
+ pll->base = base;
+ pll->rates = rates;
+ pll->nr_rates = nr_rates;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+void pistachio_clk_register_pll(struct pistachio_clk_provider *p,
+ struct pistachio_pll *pll,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = pll_register(pll[i].name, pll[i].parent,
+ 0, p->base + pll[i].reg_base,
+ pll[i].type, pll[i].rates,
+ pll[i].nr_rates);
+ p->clk_data.clks[pll[i].id] = clk;
+ }
+}
diff --git a/drivers/clk/pistachio/clk.c b/drivers/clk/pistachio/clk.c
new file mode 100644
index 000000000000..85faa83e1bd7
--- /dev/null
+++ b/drivers/clk/pistachio/clk.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+struct pistachio_clk_provider *
+pistachio_clk_alloc_provider(struct device_node *node, unsigned int num_clks)
+{
+ struct pistachio_clk_provider *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return p;
+
+ p->clk_data.clks = kcalloc(num_clks, sizeof(struct clk *), GFP_KERNEL);
+ if (!p->clk_data.clks)
+ goto free_provider;
+ p->clk_data.clk_num = num_clks;
+ p->node = node;
+ p->base = of_iomap(node, 0);
+ if (!p->base) {
+ pr_err("Failed to map clock provider registers\n");
+ goto free_clks;
+ }
+
+ return p;
+
+free_clks:
+ kfree(p->clk_data.clks);
+free_provider:
+ kfree(p);
+ return NULL;
+}
+
+void pistachio_clk_register_provider(struct pistachio_clk_provider *p)
+{
+ unsigned int i;
+
+ for (i = 0; i < p->clk_data.clk_num; i++) {
+ if (IS_ERR(p->clk_data.clks[i]))
+ pr_warn("Failed to register clock %d: %ld\n", i,
+ PTR_ERR(p->clk_data.clks[i]));
+ }
+
+ of_clk_add_provider(p->node, of_clk_src_onecell_get, &p->clk_data);
+}
+
+void pistachio_clk_register_gate(struct pistachio_clk_provider *p,
+ struct pistachio_gate *gate,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = clk_register_gate(NULL, gate[i].name, gate[i].parent,
+ CLK_SET_RATE_PARENT,
+ p->base + gate[i].reg, gate[i].shift,
+ 0, NULL);
+ p->clk_data.clks[gate[i].id] = clk;
+ }
+}
+
+void pistachio_clk_register_mux(struct pistachio_clk_provider *p,
+ struct pistachio_mux *mux,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = clk_register_mux(NULL, mux[i].name, mux[i].parents,
+ mux[i].num_parents,
+ CLK_SET_RATE_NO_REPARENT,
+ p->base + mux[i].reg, mux[i].shift,
+ get_count_order(mux[i].num_parents),
+ 0, NULL);
+ p->clk_data.clks[mux[i].id] = clk;
+ }
+}
+
+void pistachio_clk_register_div(struct pistachio_clk_provider *p,
+ struct pistachio_div *div,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = clk_register_divider(NULL, div[i].name, div[i].parent,
+ 0, p->base + div[i].reg, 0,
+ div[i].width, div[i].div_flags,
+ NULL);
+ p->clk_data.clks[div[i].id] = clk;
+ }
+}
+
+void pistachio_clk_register_fixed_factor(struct pistachio_clk_provider *p,
+ struct pistachio_fixed_factor *ff,
+ unsigned int num)
+{
+ struct clk *clk;
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ clk = clk_register_fixed_factor(NULL, ff[i].name, ff[i].parent,
+ 0, 1, ff[i].div);
+ p->clk_data.clks[ff[i].id] = clk;
+ }
+}
+
+void pistachio_clk_force_enable(struct pistachio_clk_provider *p,
+ unsigned int *clk_ids, unsigned int num)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < num; i++) {
+ struct clk *clk = p->clk_data.clks[clk_ids[i]];
+
+ if (IS_ERR(clk))
+ continue;
+
+ err = clk_prepare_enable(clk);
+ if (err)
+ pr_err("Failed to enable clock %s: %d\n",
+ __clk_get_name(clk), err);
+ }
+}
diff --git a/drivers/clk/pistachio/clk.h b/drivers/clk/pistachio/clk.h
new file mode 100644
index 000000000000..52fabbc24624
--- /dev/null
+++ b/drivers/clk/pistachio/clk.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef __PISTACHIO_CLK_H
+#define __PISTACHIO_CLK_H
+
+#include <linux/clk-provider.h>
+
+struct pistachio_gate {
+ unsigned int id;
+ unsigned long reg;
+ unsigned int shift;
+ const char *name;
+ const char *parent;
+};
+
+#define GATE(_id, _name, _pname, _reg, _shift) \
+ { \
+ .id = _id, \
+ .reg = _reg, \
+ .shift = _shift, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+struct pistachio_mux {
+ unsigned int id;
+ unsigned long reg;
+ unsigned int shift;
+ unsigned int num_parents;
+ const char *name;
+ const char **parents;
+};
+
+#define PNAME(x) static const char *x[] __initconst
+
+#define MUX(_id, _name, _pnames, _reg, _shift) \
+ { \
+ .id = _id, \
+ .reg = _reg, \
+ .shift = _shift, \
+ .name = _name, \
+ .parents = _pnames, \
+ .num_parents = ARRAY_SIZE(_pnames) \
+ }
+
+
+struct pistachio_div {
+ unsigned int id;
+ unsigned long reg;
+ unsigned int width;
+ unsigned int div_flags;
+ const char *name;
+ const char *parent;
+};
+
+#define DIV(_id, _name, _pname, _reg, _width) \
+ { \
+ .id = _id, \
+ .reg = _reg, \
+ .width = _width, \
+ .div_flags = 0, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+#define DIV_F(_id, _name, _pname, _reg, _width, _div_flags) \
+ { \
+ .id = _id, \
+ .reg = _reg, \
+ .width = _width, \
+ .div_flags = _div_flags, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+struct pistachio_fixed_factor {
+ unsigned int id;
+ unsigned int div;
+ const char *name;
+ const char *parent;
+};
+
+#define FIXED_FACTOR(_id, _name, _pname, _div) \
+ { \
+ .id = _id, \
+ .div = _div, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+struct pistachio_pll_rate_table {
+ unsigned long fref;
+ unsigned long fout;
+ unsigned int refdiv;
+ unsigned int fbdiv;
+ unsigned int postdiv1;
+ unsigned int postdiv2;
+ unsigned int frac;
+};
+
+enum pistachio_pll_type {
+ PLL_GF40LP_LAINT,
+ PLL_GF40LP_FRAC,
+};
+
+struct pistachio_pll {
+ unsigned int id;
+ unsigned long reg_base;
+ enum pistachio_pll_type type;
+ struct pistachio_pll_rate_table *rates;
+ unsigned int nr_rates;
+ const char *name;
+ const char *parent;
+};
+
+#define PLL(_id, _name, _pname, _type, _reg, _rates) \
+ { \
+ .id = _id, \
+ .reg_base = _reg, \
+ .type = _type, \
+ .rates = _rates, \
+ .nr_rates = ARRAY_SIZE(_rates), \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+#define PLL_FIXED(_id, _name, _pname, _type, _reg) \
+ { \
+ .id = _id, \
+ .reg_base = _reg, \
+ .type = _type, \
+ .rates = NULL, \
+ .nr_rates = 0, \
+ .name = _name, \
+ .parent = _pname, \
+ }
+
+struct pistachio_clk_provider {
+ struct device_node *node;
+ void __iomem *base;
+ struct clk_onecell_data clk_data;
+};
+
+extern struct pistachio_clk_provider *
+pistachio_clk_alloc_provider(struct device_node *node, unsigned int num_clks);
+extern void pistachio_clk_register_provider(struct pistachio_clk_provider *p);
+
+extern void pistachio_clk_register_gate(struct pistachio_clk_provider *p,
+ struct pistachio_gate *gate,
+ unsigned int num);
+extern void pistachio_clk_register_mux(struct pistachio_clk_provider *p,
+ struct pistachio_mux *mux,
+ unsigned int num);
+extern void pistachio_clk_register_div(struct pistachio_clk_provider *p,
+ struct pistachio_div *div,
+ unsigned int num);
+extern void
+pistachio_clk_register_fixed_factor(struct pistachio_clk_provider *p,
+ struct pistachio_fixed_factor *ff,
+ unsigned int num);
+extern void pistachio_clk_register_pll(struct pistachio_clk_provider *p,
+ struct pistachio_pll *pll,
+ unsigned int num);
+
+extern void pistachio_clk_force_enable(struct pistachio_clk_provider *p,
+ unsigned int *clk_ids, unsigned int num);
+
+#endif
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
index 323965430111..b04c5b9c0ea8 100644
--- a/drivers/clk/pxa/clk-pxa.h
+++ b/drivers/clk/pxa/clk-pxa.h
@@ -14,7 +14,7 @@
#define _CLK_PXA_
#define PARENTS(name) \
- static const char *name ## _parents[] __initconst
+ static const char *name ## _parents[] __initdata
#define MUX_RO_RATE_RO_OPS(name, clk_name) \
static struct clk_hw name ## _mux_hw; \
static struct clk_hw name ## _rate_hw; \
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 39f891bba09a..4b93a1efb36d 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -336,6 +336,9 @@ static void __init pxa3xx_base_clocks_init(void)
clk_register_clk_pxa3xx_smemc();
clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0,
(void __iomem *)&OSCC, 11, 0, NULL);
+ clkdev_pxa_register(CLK_OSTIMER, "OSTIMER0", NULL,
+ clk_register_fixed_factor(NULL, "os-timer0",
+ "osc_13mhz", 0, 1, 4));
}
int __init pxa3xx_clocks_init(void)
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 0d7ab52b7ab0..59d16668bdf5 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -1,6 +1,7 @@
config COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
depends on OF
+ depends on ARCH_QCOM || COMPILE_TEST
select REGMAP_MMIO
select RESET_CONTROLLER
@@ -46,6 +47,14 @@ config MSM_GCC_8660
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, SD/eMMC, etc.
+config MSM_GCC_8916
+ tristate "MSM8916 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8916 devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
config MSM_GCC_8960
tristate "APQ8064/MSM8960 Global Clock Controller"
depends on COMMON_CLK_QCOM
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 617826469595..50b337a24a87 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index b4325f65a1bf..245d5063a385 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -71,12 +71,8 @@ static int clk_pll_enable(struct clk_hw *hw)
udelay(50);
/* Enable PLL output. */
- ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_OUTCTRL,
+ return regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_OUTCTRL,
PLL_OUTCTRL);
- if (ret)
- return ret;
-
- return 0;
}
static void clk_pll_disable(struct clk_hw *hw)
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 0039bd7d3965..7b3d62674203 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -47,15 +47,20 @@ static u8 clk_rcg_get_parent(struct clk_hw *hw)
struct clk_rcg *rcg = to_clk_rcg(hw);
int num_parents = __clk_get_num_parents(hw->clk);
u32 ns;
- int i;
+ int i, ret;
- regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ if (ret)
+ goto err;
ns = ns_to_src(&rcg->s, ns);
for (i = 0; i < num_parents; i++)
- if (ns == rcg->s.parent_map[i])
+ if (ns == rcg->s.parent_map[i].cfg)
return i;
- return -EINVAL;
+err:
+ pr_debug("%s: Clock %s has invalid parent, using default.\n",
+ __func__, __clk_get_name(hw->clk));
+ return 0;
}
static int reg_to_bank(struct clk_dyn_rcg *rcg, u32 bank)
@@ -70,21 +75,28 @@ static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw)
int num_parents = __clk_get_num_parents(hw->clk);
u32 ns, reg;
int bank;
- int i;
+ int i, ret;
struct src_sel *s;
- regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+ ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+ if (ret)
+ goto err;
bank = reg_to_bank(rcg, reg);
s = &rcg->s[bank];
- regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns);
+ ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns);
+ if (ret)
+ goto err;
ns = ns_to_src(s, ns);
for (i = 0; i < num_parents; i++)
- if (ns == s->parent_map[i])
+ if (ns == s->parent_map[i].cfg)
return i;
- return -EINVAL;
+err:
+ pr_debug("%s: Clock %s has invalid parent, using default.\n",
+ __func__, __clk_get_name(hw->clk));
+ return 0;
}
static int clk_rcg_set_parent(struct clk_hw *hw, u8 index)
@@ -93,7 +105,7 @@ static int clk_rcg_set_parent(struct clk_hw *hw, u8 index)
u32 ns;
regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
- ns = src_to_ns(&rcg->s, rcg->s.parent_map[index], ns);
+ ns = src_to_ns(&rcg->s, rcg->s.parent_map[index].cfg, ns);
regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
return 0;
@@ -191,10 +203,10 @@ static u32 mn_to_reg(struct mn *mn, u32 m, u32 n, u32 val)
return val;
}
-static void configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
+static int configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
{
u32 ns, md, reg;
- int bank, new_bank;
+ int bank, new_bank, ret, index;
struct mn *mn;
struct pre_div *p;
struct src_sel *s;
@@ -206,38 +218,56 @@ static void configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
enabled = __clk_is_enabled(hw->clk);
- regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+ ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+ if (ret)
+ return ret;
bank = reg_to_bank(rcg, reg);
new_bank = enabled ? !bank : bank;
ns_reg = rcg->ns_reg[new_bank];
- regmap_read(rcg->clkr.regmap, ns_reg, &ns);
+ ret = regmap_read(rcg->clkr.regmap, ns_reg, &ns);
+ if (ret)
+ return ret;
if (banked_mn) {
mn = &rcg->mn[new_bank];
md_reg = rcg->md_reg[new_bank];
ns |= BIT(mn->mnctr_reset_bit);
- regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ if (ret)
+ return ret;
- regmap_read(rcg->clkr.regmap, md_reg, &md);
+ ret = regmap_read(rcg->clkr.regmap, md_reg, &md);
+ if (ret)
+ return ret;
md = mn_to_md(mn, f->m, f->n, md);
- regmap_write(rcg->clkr.regmap, md_reg, md);
-
+ ret = regmap_write(rcg->clkr.regmap, md_reg, md);
+ if (ret)
+ return ret;
ns = mn_to_ns(mn, f->m, f->n, ns);
- regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ if (ret)
+ return ret;
/* Two NS registers means mode control is in NS register */
if (rcg->ns_reg[0] != rcg->ns_reg[1]) {
ns = mn_to_reg(mn, f->m, f->n, ns);
- regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ if (ret)
+ return ret;
} else {
reg = mn_to_reg(mn, f->m, f->n, reg);
- regmap_write(rcg->clkr.regmap, rcg->bank_reg, reg);
+ ret = regmap_write(rcg->clkr.regmap, rcg->bank_reg,
+ reg);
+ if (ret)
+ return ret;
}
ns &= ~BIT(mn->mnctr_reset_bit);
- regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ if (ret)
+ return ret;
}
if (banked_p) {
@@ -246,14 +276,24 @@ static void configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
}
s = &rcg->s[new_bank];
- ns = src_to_ns(s, s->parent_map[f->src], ns);
- regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ index = qcom_find_src_index(hw, s->parent_map, f->src);
+ if (index < 0)
+ return index;
+ ns = src_to_ns(s, s->parent_map[index].cfg, ns);
+ ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
+ if (ret)
+ return ret;
if (enabled) {
- regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+ ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+ if (ret)
+ return ret;
reg ^= BIT(rcg->mux_sel_bit);
- regmap_write(rcg->clkr.regmap, rcg->bank_reg, reg);
+ ret = regmap_write(rcg->clkr.regmap, rcg->bank_reg, reg);
+ if (ret)
+ return ret;
}
+ return 0;
}
static int clk_dyn_rcg_set_parent(struct clk_hw *hw, u8 index)
@@ -279,10 +319,8 @@ static int clk_dyn_rcg_set_parent(struct clk_hw *hw, u8 index)
if (banked_p)
f.pre_div = ns_to_pre_div(&rcg->p[bank], ns) + 1;
- f.src = index;
- configure_bank(rcg, &f);
-
- return 0;
+ f.src = qcom_find_src_index(hw, rcg->s[bank].parent_map, index);
+ return configure_bank(rcg, &f);
}
/*
@@ -369,17 +407,23 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
static long _freq_tbl_determine_rate(struct clk_hw *hw,
const struct freq_tbl *f, unsigned long rate,
unsigned long min_rate, unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p_hw)
+ unsigned long *p_rate, struct clk_hw **p_hw,
+ const struct parent_map *parent_map)
{
unsigned long clk_flags;
struct clk *p;
+ int index;
f = qcom_find_freq(f, rate);
if (!f)
return -EINVAL;
+ index = qcom_find_src_index(hw, parent_map, f->src);
+ if (index < 0)
+ return index;
+
clk_flags = __clk_get_flags(hw->clk);
- p = clk_get_parent_by_index(hw->clk, f->src);
+ p = clk_get_parent_by_index(hw->clk, index);
if (clk_flags & CLK_SET_RATE_PARENT) {
rate = rate * f->pre_div;
if (f->n) {
@@ -404,7 +448,7 @@ static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
struct clk_rcg *rcg = to_clk_rcg(hw);
return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
- max_rate, p_rate, p);
+ max_rate, p_rate, p, rcg->s.parent_map);
}
static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
@@ -412,9 +456,16 @@ static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *p_rate, struct clk_hw **p)
{
struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+ u32 reg;
+ int bank;
+ struct src_sel *s;
+
+ regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
+ bank = reg_to_bank(rcg, reg);
+ s = &rcg->s[bank];
return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
- max_rate, p_rate, p);
+ max_rate, p_rate, p, s->parent_map);
}
static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
@@ -424,8 +475,9 @@ static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
struct clk_rcg *rcg = to_clk_rcg(hw);
const struct freq_tbl *f = rcg->freq_tbl;
struct clk *p;
+ int index = qcom_find_src_index(hw, rcg->s.parent_map, f->src);
- p = clk_get_parent_by_index(hw->clk, f->src);
+ p = clk_get_parent_by_index(hw->clk, index);
*p_hw = __clk_get_hw(p);
*p_rate = __clk_round_rate(p, rate);
@@ -495,6 +547,57 @@ static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
}
+/*
+ * This type of clock has a glitch-free mux that switches between the output of
+ * the M/N counter and an always on clock source (XO). When clk_set_rate() is
+ * called we need to make sure that we don't switch to the M/N counter if it
+ * isn't clocking because the mux will get stuck and the clock will stop
+ * outputting a clock. This can happen if the framework isn't aware that this
+ * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix
+ * this we switch the mux in the enable/disable ops and reprogram the M/N
+ * counter in the set_rate op. We also make sure to switch away from the M/N
+ * counter in set_rate if software thinks the clock is off.
+ */
+static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ const struct freq_tbl *f;
+ int ret;
+ u32 gfm = BIT(10);
+
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ /* Switch to XO to avoid glitches */
+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
+ ret = __clk_rcg_set_rate(rcg, f);
+ /* Switch back to M/N if it's clocking */
+ if (__clk_is_enabled(hw->clk))
+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
+
+ return ret;
+}
+
+static int clk_rcg_lcc_enable(struct clk_hw *hw)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ u32 gfm = BIT(10);
+
+ /* Use M/N */
+ return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
+}
+
+static void clk_rcg_lcc_disable(struct clk_hw *hw)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ u32 gfm = BIT(10);
+
+ /* Use XO */
+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
+}
+
static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
{
struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
@@ -504,9 +607,7 @@ static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
if (!f)
return -EINVAL;
- configure_bank(rcg, f);
-
- return 0;
+ return configure_bank(rcg, f);
}
static int clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -543,6 +644,17 @@ const struct clk_ops clk_rcg_bypass_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);
+const struct clk_ops clk_rcg_lcc_ops = {
+ .enable = clk_rcg_lcc_enable,
+ .disable = clk_rcg_lcc_disable,
+ .get_parent = clk_rcg_get_parent,
+ .set_parent = clk_rcg_set_parent,
+ .recalc_rate = clk_rcg_recalc_rate,
+ .determine_rate = clk_rcg_determine_rate,
+ .set_rate = clk_rcg_lcc_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops);
+
const struct clk_ops clk_dyn_rcg_ops = {
.enable = clk_enable_regmap,
.is_enabled = clk_is_enabled_regmap,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 687e41f91d7c..56028bb31d87 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -26,6 +26,16 @@ struct freq_tbl {
};
/**
+ * struct parent_map - map table for PLL source select configuration values
+ * @src: source PLL
+ * @cfg: configuration value
+ */
+struct parent_map {
+ u8 src;
+ u8 cfg;
+};
+
+/**
* struct mn - M/N:D counter
* @mnctr_en_bit: bit to enable mn counter
* @mnctr_reset_bit: bit to assert mn counter reset
@@ -65,7 +75,7 @@ struct pre_div {
struct src_sel {
u8 src_sel_shift;
#define SRC_SEL_MASK 0x7
- const u8 *parent_map;
+ const struct parent_map *parent_map;
};
/**
@@ -96,6 +106,7 @@ struct clk_rcg {
extern const struct clk_ops clk_rcg_ops;
extern const struct clk_ops clk_rcg_bypass_ops;
+extern const struct clk_ops clk_rcg_lcc_ops;
#define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
@@ -150,7 +161,7 @@ struct clk_rcg2 {
u32 cmd_rcgr;
u8 mnd_width;
u8 hid_width;
- const u8 *parent_map;
+ const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
struct clk_regmap clkr;
};
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 742acfa18d63..b95d17fbb8d7 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -69,16 +69,19 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw)
ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
if (ret)
- return ret;
+ goto err;
cfg &= CFG_SRC_SEL_MASK;
cfg >>= CFG_SRC_SEL_SHIFT;
for (i = 0; i < num_parents; i++)
- if (cfg == rcg->parent_map[i])
+ if (cfg == rcg->parent_map[i].cfg)
return i;
- return -EINVAL;
+err:
+ pr_debug("%s: Clock %s has invalid parent, using default.\n",
+ __func__, __clk_get_name(hw->clk));
+ return 0;
}
static int update_config(struct clk_rcg2 *rcg)
@@ -111,10 +114,10 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
int ret;
+ u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
- CFG_SRC_SEL_MASK,
- rcg->parent_map[index] << CFG_SRC_SEL_SHIFT);
+ CFG_SRC_SEL_MASK, cfg);
if (ret)
return ret;
@@ -179,13 +182,19 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
{
unsigned long clk_flags;
struct clk *p;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int index;
f = qcom_find_freq(f, rate);
if (!f)
return -EINVAL;
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ if (index < 0)
+ return index;
+
clk_flags = __clk_get_flags(hw->clk);
- p = clk_get_parent_by_index(hw->clk, f->src);
+ p = clk_get_parent_by_index(hw->clk, index);
if (clk_flags & CLK_SET_RATE_PARENT) {
if (f->pre_div) {
rate /= 2;
@@ -219,7 +228,11 @@ static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg, mask;
- int ret;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ if (index < 0)
+ return index;
if (rcg->mnd_width && f->n) {
mask = BIT(rcg->mnd_width) - 1;
@@ -242,8 +255,8 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
mask = BIT(rcg->hid_width) - 1;
mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
- cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
- if (rcg->mnd_width && f->n)
+ cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
ret = regmap_update_bits(rcg->clkr.regmap,
rcg->cmd_rcgr + CFG_REG, mask, cfg);
@@ -374,9 +387,10 @@ static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
s64 request;
u32 mask = BIT(rcg->hid_width) - 1;
u32 hid_div;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
/* Force the correct parent */
- *p = __clk_get_hw(clk_get_parent_by_index(hw->clk, f->src));
+ *p = __clk_get_hw(clk_get_parent_by_index(hw->clk, index));
if (src_rate == 810000000)
frac = frac_table_810m;
@@ -420,6 +434,7 @@ static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f = rcg->freq_tbl;
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
unsigned long parent_rate, div;
u32 mask = BIT(rcg->hid_width) - 1;
struct clk *p;
@@ -427,7 +442,7 @@ static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
if (rate == 0)
return -EINVAL;
- p = clk_get_parent_by_index(hw->clk, f->src);
+ p = clk_get_parent_by_index(hw->clk, index);
*p_hw = __clk_get_hw(p);
*p_rate = parent_rate = __clk_round_rate(p, rate);
@@ -489,7 +504,8 @@ static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
int delta = 100000;
const struct freq_tbl *f = rcg->freq_tbl;
const struct frac_entry *frac = frac_table_pixel;
- struct clk *parent = clk_get_parent_by_index(hw->clk, f->src);
+ int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ struct clk *parent = clk_get_parent_by_index(hw->clk, index);
*p = __clk_get_hw(parent);
@@ -518,7 +534,8 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
int delta = 100000;
u32 mask = BIT(rcg->hid_width) - 1;
u32 hid_div;
- struct clk *parent = clk_get_parent_by_index(hw->clk, f.src);
+ int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
+ struct clk *parent = clk_get_parent_by_index(hw->clk, index);
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index e20d947db3e5..f7101e330b1d 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -43,6 +43,18 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
}
EXPORT_SYMBOL_GPL(qcom_find_freq);
+int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
+{
+ int i, num_parents = __clk_get_num_parents(hw->clk);
+
+ for (i = 0; i < num_parents; i++)
+ if (src == map[i].src)
+ return i;
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(qcom_find_src_index);
+
struct regmap *
qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
{
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index f519322acdf3..7a0e73713063 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -19,6 +19,8 @@ struct clk_regmap;
struct qcom_reset_map;
struct regmap;
struct freq_tbl;
+struct clk_hw;
+struct parent_map;
struct qcom_cc_desc {
const struct regmap_config *config;
@@ -30,6 +32,8 @@ struct qcom_cc_desc {
extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
unsigned long rate);
+extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
+ u8 src);
extern struct regmap *qcom_cc_map(struct platform_device *pdev,
const struct qcom_cc_desc *desc);
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index e3ef90264214..54a756b90a37 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -32,18 +32,20 @@
#include "clk-branch.h"
#include "reset.h"
-#define P_XO 0
-#define P_GPLL0 1
-#define P_GPLL1 1
-#define P_GPLL4 2
-#define P_PCIE_0_1_PIPE_CLK 1
-#define P_SATA_ASIC0_CLK 1
-#define P_SATA_RX_CLK 1
-#define P_SLEEP_CLK 1
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL1,
+ P_GPLL4,
+ P_PCIE_0_1_PIPE_CLK,
+ P_SATA_ASIC0_CLK,
+ P_SATA_RX_CLK,
+ P_SLEEP_CLK,
+};
-static const u8 gcc_xo_gpll0_map[] = {
- [P_XO] = 0,
- [P_GPLL0] = 1,
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 }
};
static const char *gcc_xo_gpll0[] = {
@@ -51,10 +53,10 @@ static const char *gcc_xo_gpll0[] = {
"gpll0_vote",
};
-static const u8 gcc_xo_gpll0_gpll4_map[] = {
- [P_XO] = 0,
- [P_GPLL0] = 1,
- [P_GPLL4] = 5,
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 }
};
static const char *gcc_xo_gpll0_gpll4[] = {
@@ -63,9 +65,9 @@ static const char *gcc_xo_gpll0_gpll4[] = {
"gpll4_vote",
};
-static const u8 gcc_xo_sata_asic0_map[] = {
- [P_XO] = 0,
- [P_SATA_ASIC0_CLK] = 2,
+static const struct parent_map gcc_xo_sata_asic0_map[] = {
+ { P_XO, 0 },
+ { P_SATA_ASIC0_CLK, 2 }
};
static const char *gcc_xo_sata_asic0[] = {
@@ -73,9 +75,9 @@ static const char *gcc_xo_sata_asic0[] = {
"sata_asic0_clk",
};
-static const u8 gcc_xo_sata_rx_map[] = {
- [P_XO] = 0,
- [P_SATA_RX_CLK] = 2,
+static const struct parent_map gcc_xo_sata_rx_map[] = {
+ { P_XO, 0 },
+ { P_SATA_RX_CLK, 2}
};
static const char *gcc_xo_sata_rx[] = {
@@ -83,9 +85,9 @@ static const char *gcc_xo_sata_rx[] = {
"sata_rx_clk",
};
-static const u8 gcc_xo_pcie_map[] = {
- [P_XO] = 0,
- [P_PCIE_0_1_PIPE_CLK] = 2,
+static const struct parent_map gcc_xo_pcie_map[] = {
+ { P_XO, 0 },
+ { P_PCIE_0_1_PIPE_CLK, 2 }
};
static const char *gcc_xo_pcie[] = {
@@ -93,9 +95,9 @@ static const char *gcc_xo_pcie[] = {
"pcie_pipe",
};
-static const u8 gcc_xo_pcie_sleep_map[] = {
- [P_XO] = 0,
- [P_SLEEP_CLK] = 6,
+static const struct parent_map gcc_xo_pcie_sleep_map[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 6 }
};
static const char *gcc_xo_pcie_sleep[] = {
@@ -1263,9 +1265,9 @@ static const struct freq_tbl ftbl_gcc_usb_hsic_clk[] = {
{ }
};
-static u8 usb_hsic_clk_src_map[] = {
- [P_XO] = 0,
- [P_GPLL1] = 4,
+static const struct parent_map usb_hsic_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 4 }
};
static struct clk_rcg2 usb_hsic_clk_src = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index cbdc31dea7f4..a50936a17376 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -140,15 +140,17 @@ static struct clk_regmap pll14_vote = {
},
};
-#define P_PXO 0
-#define P_PLL8 1
-#define P_PLL3 1
-#define P_PLL0 2
-#define P_CXO 2
+enum {
+ P_PXO,
+ P_PLL8,
+ P_PLL3,
+ P_PLL0,
+ P_CXO,
+};
-static const u8 gcc_pxo_pll8_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 3,
+static const struct parent_map gcc_pxo_pll8_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 3 }
};
static const char *gcc_pxo_pll8[] = {
@@ -156,10 +158,10 @@ static const char *gcc_pxo_pll8[] = {
"pll8_vote",
};
-static const u8 gcc_pxo_pll8_cxo_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 3,
- [P_CXO] = 5,
+static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 3 },
+ { P_CXO, 5 }
};
static const char *gcc_pxo_pll8_cxo[] = {
@@ -168,14 +170,14 @@ static const char *gcc_pxo_pll8_cxo[] = {
"cxo",
};
-static const u8 gcc_pxo_pll3_map[] = {
- [P_PXO] = 0,
- [P_PLL3] = 1,
+static const struct parent_map gcc_pxo_pll3_map[] = {
+ { P_PXO, 0 },
+ { P_PLL3, 1 }
};
-static const u8 gcc_pxo_pll3_sata_map[] = {
- [P_PXO] = 0,
- [P_PLL3] = 6,
+static const struct parent_map gcc_pxo_pll3_sata_map[] = {
+ { P_PXO, 0 },
+ { P_PLL3, 6 }
};
static const char *gcc_pxo_pll3[] = {
@@ -183,10 +185,10 @@ static const char *gcc_pxo_pll3[] = {
"pll3",
};
-static const u8 gcc_pxo_pll8_pll0[] = {
- [P_PXO] = 0,
- [P_PLL8] = 3,
- [P_PLL0] = 2,
+static const struct parent_map gcc_pxo_pll8_pll0[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 3 },
+ { P_PLL0, 2 }
};
static const char *gcc_pxo_pll8_pll0_map[] = {
@@ -525,8 +527,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 10800000, P_PXO, 1, 2, 5 },
{ 15060000, P_PLL8, 1, 2, 51 },
{ 24000000, P_PLL8, 4, 1, 4 },
+ { 25000000, P_PXO, 1, 0, 0 },
{ 25600000, P_PLL8, 1, 1, 15 },
- { 27000000, P_PXO, 1, 0, 0 },
{ 48000000, P_PLL8, 4, 1, 2 },
{ 51200000, P_PLL8, 1, 2, 15 },
{ }
@@ -2170,6 +2172,36 @@ static struct clk_branch usb_fs1_h_clk = {
},
};
+static struct clk_branch ebi2_clk = {
+ .hwcg_reg = 0x3b00,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3b00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ebi2_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch ebi2_aon_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x3b00,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "ebi2_always_on_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
static struct clk_regmap *gcc_ipq806x_clks[] = {
[PLL0] = &pll0.clkr,
[PLL0_VOTE] = &pll0_vote,
@@ -2273,6 +2305,8 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
[USB_FS1_XCVR_SRC] = &usb_fs1_xcvr_clk_src.clkr,
[USB_FS1_XCVR_CLK] = &usb_fs1_xcvr_clk.clkr,
[USB_FS1_SYSTEM_CLK] = &usb_fs1_sys_clk.clkr,
+ [EBI2_CLK] = &ebi2_clk.clkr,
+ [EBI2_AON_CLK] = &ebi2_aon_clk.clkr,
};
static const struct qcom_reset_map gcc_ipq806x_resets[] = {
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index f366e68f7316..fc6b12da5b30 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -59,13 +59,15 @@ static struct clk_regmap pll8_vote = {
},
};
-#define P_PXO 0
-#define P_PLL8 1
-#define P_CXO 2
+enum {
+ P_PXO,
+ P_PLL8,
+ P_CXO,
+};
-static const u8 gcc_pxo_pll8_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 3,
+static const struct parent_map gcc_pxo_pll8_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 3 }
};
static const char *gcc_pxo_pll8[] = {
@@ -73,10 +75,10 @@ static const char *gcc_pxo_pll8[] = {
"pll8_vote",
};
-static const u8 gcc_pxo_pll8_cxo_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 3,
- [P_CXO] = 5,
+static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 3 },
+ { P_CXO, 5 }
};
static const char *gcc_pxo_pll8_cxo[] = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
new file mode 100644
index 000000000000..c66f7bc2ae87
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -0,0 +1,2868 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8916.h>
+#include <dt-bindings/reset/qcom,gcc-msm8916.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL0_AUX,
+ P_BIMC,
+ P_GPLL1,
+ P_GPLL1_AUX,
+ P_GPLL2,
+ P_GPLL2_AUX,
+ P_SLEEP_CLK,
+ P_DSI0_PHYPLL_BYTE,
+ P_DSI0_PHYPLL_DSI,
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const char *gcc_xo_gpll0[] = {
+ "xo",
+ "gpll0_vote",
+};
+
+static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_BIMC, 2 },
+};
+
+static const char *gcc_xo_gpll0_bimc[] = {
+ "xo",
+ "gpll0_vote",
+ "bimc_pll_vote",
+};
+
+static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 3 },
+ { P_GPLL1, 1 },
+ { P_GPLL2_AUX, 2 },
+};
+
+static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = {
+ "xo",
+ "gpll0_vote",
+ "gpll1_vote",
+ "gpll2_vote",
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+};
+
+static const char *gcc_xo_gpll0_gpll2[] = {
+ "xo",
+ "gpll0_vote",
+ "gpll2_vote",
+};
+
+static const struct parent_map gcc_xo_gpll0a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+};
+
+static const char *gcc_xo_gpll0a[] = {
+ "xo",
+ "gpll0_vote",
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const char *gcc_xo_gpll0_gpll1a_sleep[] = {
+ "xo",
+ "gpll0_vote",
+ "gpll1_vote",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1_AUX, 2 },
+};
+
+static const char *gcc_xo_gpll0_gpll1a[] = {
+ "xo",
+ "gpll0_vote",
+ "gpll1_vote",
+};
+
+static const struct parent_map gcc_xo_dsibyte_map[] = {
+ { P_XO, 0, },
+ { P_DSI0_PHYPLL_BYTE, 2 },
+};
+
+static const char *gcc_xo_dsibyte[] = {
+ "xo",
+ "dsi0pllbyte",
+};
+
+static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+ { P_DSI0_PHYPLL_BYTE, 1 },
+};
+
+static const char *gcc_xo_gpll0a_dsibyte[] = {
+ "xo",
+ "gpll0_vote",
+ "dsi0pllbyte",
+};
+
+static const struct parent_map gcc_xo_gpll0_dsiphy_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_DSI0_PHYPLL_DSI, 2 },
+};
+
+static const char *gcc_xo_gpll0_dsiphy[] = {
+ "xo",
+ "gpll0_vote",
+ "dsi0pll",
+};
+
+static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+ { P_DSI0_PHYPLL_DSI, 1 },
+};
+
+static const char *gcc_xo_gpll0a_dsiphy[] = {
+ "xo",
+ "gpll0_vote",
+ "dsi0pll",
+};
+
+static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 1 },
+ { P_GPLL1, 3 },
+ { P_GPLL2, 2 },
+};
+
+static const char *gcc_xo_gpll0a_gpll1_gpll2[] = {
+ "xo",
+ "gpll0_vote",
+ "gpll1_vote",
+ "gpll2_vote",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_pll gpll0 = {
+ .l_reg = 0x21004,
+ .m_reg = 0x21008,
+ .n_reg = 0x2100c,
+ .config_reg = 0x21014,
+ .mode_reg = 0x21000,
+ .status_reg = 0x2101c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll0_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_vote",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll1 = {
+ .l_reg = 0x20004,
+ .m_reg = 0x20008,
+ .n_reg = 0x2000c,
+ .config_reg = 0x20014,
+ .mode_reg = 0x20000,
+ .status_reg = 0x2001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll1_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_vote",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll2 = {
+ .l_reg = 0x4a004,
+ .m_reg = 0x4a008,
+ .n_reg = 0x4a00c,
+ .config_reg = 0x4a014,
+ .mode_reg = 0x4a000,
+ .status_reg = 0x4a01c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll2_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2_vote",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll bimc_pll = {
+ .l_reg = 0x23004,
+ .m_reg = 0x23008,
+ .n_reg = 0x2300c,
+ .config_reg = 0x23014,
+ .mode_reg = 0x23000,
+ .status_reg = 0x2301c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_pll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap bimc_pll_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_pll_vote",
+ .parent_names = (const char *[]){ "bimc_pll" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x27000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcnoc_bfdcd_clk_src",
+ .parent_names = gcc_xo_gpll0_bimc,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x26004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_bfdcd_clk_src",
+ .parent_names = gcc_xo_gpll0_bimc,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ahb_clk[] = {
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_ahb_clk_src = {
+ .cmd_rcgr = 0x5a000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_ahb_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_apss_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_ahb_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0_1_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x4e020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x4f020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_oxili_gfx3d_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_AUX, 16, 0, 0),
+ F(80000000, P_GPLL0_AUX, 10, 0, 0),
+ F(100000000, P_GPLL0_AUX, 8, 0, 0),
+ F(160000000, P_GPLL0_AUX, 5, 0, 0),
+ F(177780000, P_GPLL0_AUX, 4.5, 0, 0),
+ F(200000000, P_GPLL0_AUX, 4, 0, 0),
+ F(266670000, P_GPLL0_AUX, 3, 0, 0),
+ F(294912000, P_GPLL1, 3, 0, 0),
+ F(310000000, P_GPLL2, 3, 0, 0),
+ F(400000000, P_GPLL0_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_gpll1_gpll2a_map,
+ .freq_tbl = ftbl_gcc_oxili_gfx3d_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_names = gcc_xo_gpll0a_gpll1_gpll2a,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_vfe0_clk[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x58000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_map,
+ .freq_tbl = ftbl_gcc_camss_vfe0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_i2c_apps_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_spi_apps_clk[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_6_apps_clk[] = {
+ F(3686400, P_GPLL0, 1, 72, 15625),
+ F(7372800, P_GPLL0, 1, 144, 15625),
+ F(14745600, P_GPLL0, 1, 288, 15625),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 1, 3, 100),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x02044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x03034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_map,
+ .freq_tbl = ftbl_gcc_camss_cci_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_names = gcc_xo_gpll0a,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_gp0_1_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x54000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x55000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_jpeg0_clk[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x57000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_jpeg0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
+ F(9600000, P_XO, 2, 0, 0),
+ F(23880000, P_GPLL0, 1, 2, 67),
+ F(66670000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x52000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x53000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0_1phytimer_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x4e000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1a,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x4f000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1a,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cpp_clk[] = {
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x58018,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_map,
+ .freq_tbl = ftbl_gcc_camss_cpp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_crypto_clk[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_crypto_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "crypto_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_3_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x08004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x09004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x0a004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_gcc_mdss_byte0_clk[] = {
+ { .src = P_DSI0_PHYPLL_BYTE },
+ { }
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsibyte_map,
+ .freq_tbl = ftbl_gcc_mdss_byte0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_names = gcc_xo_gpll0a_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_byte_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_mdss_esc0_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d05c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_dsibyte_map,
+ .freq_tbl = ftbl_gcc_mdss_esc0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_names = gcc_xo_dsibyte,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_mdss_mdp_clk[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_dsiphy_map,
+ .freq_tbl = ftbl_gcc_mdss_mdp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_names = gcc_xo_gpll0_dsiphy,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_gcc_mdss_pclk[] = {
+ { .src = P_DSI0_PHYPLL_DSI },
+ { }
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsiphy_map,
+ .freq_tbl = ftbl_gcc_mdss_pclk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_names = gcc_xo_gpll0a_dsiphy,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_mdss_vsync_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_map,
+ .freq_tbl = ftbl_gcc_mdss_vsync_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_names = gcc_xo_gpll0a,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk[] = {
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_pdm2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 10, 1, 4),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(177770000, P_GPLL0, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 10, 1, 4),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_apss_tcu_clk[] = {
+ F(155000000, P_GPLL2, 6, 0, 0),
+ F(310000000, P_GPLL2, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_tcu_clk_src = {
+ .cmd_rcgr = 0x1207c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_gpll1_gpll2_map,
+ .freq_tbl = ftbl_gcc_apss_tcu_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_tcu_clk_src",
+ .parent_names = gcc_xo_gpll0a_gpll1_gpll2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x41010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_hs_system_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hs_system_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x4C000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_venus0_vcodec0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vcodec0_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x01004,
+ .clkr = {
+ .enable_reg = 0x01004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03010,
+ .clkr = {
+ .enable_reg = 0x03010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x0300c,
+ .clkr = {
+ .enable_reg = 0x0300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04020,
+ .clkr = {
+ .enable_reg = 0x04020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0401c,
+ .clkr = {
+ .enable_reg = 0x0401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05020,
+ .clkr = {
+ .enable_reg = 0x05020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x0501c,
+ .clkr = {
+ .enable_reg = 0x0501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x06020,
+ .clkr = {
+ .enable_reg = 0x06020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0601c,
+ .clkr = {
+ .enable_reg = 0x0601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x07020,
+ .clkr = {
+ .enable_reg = 0x07020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x0701c,
+ .clkr = {
+ .enable_reg = 0x0701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0203c,
+ .clkr = {
+ .enable_reg = 0x0203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0302c,
+ .clkr = {
+ .enable_reg = 0x0302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_ahb_clk = {
+ .halt_reg = 0x5101c,
+ .clkr = {
+ .enable_reg = 0x5101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_ahb_clk",
+ .parent_names = (const char *[]){
+ "camss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_clk = {
+ .halt_reg = 0x51018,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_clk",
+ .parent_names = (const char *[]){
+ "cci_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_ahb_clk = {
+ .halt_reg = 0x4e040,
+ .clkr = {
+ .enable_reg = 0x4e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0_ahb_clk",
+ .parent_names = (const char *[]){
+ "camss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_clk = {
+ .halt_reg = 0x4e03c,
+ .clkr = {
+ .enable_reg = 0x4e03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phy_clk = {
+ .halt_reg = 0x4e048,
+ .clkr = {
+ .enable_reg = 0x4e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phy_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0pix_clk = {
+ .halt_reg = 0x4e058,
+ .clkr = {
+ .enable_reg = 0x4e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0pix_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0rdi_clk = {
+ .halt_reg = 0x4e050,
+ .clkr = {
+ .enable_reg = 0x4e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0rdi_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_ahb_clk = {
+ .halt_reg = 0x4f040,
+ .clkr = {
+ .enable_reg = 0x4f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1_ahb_clk",
+ .parent_names = (const char *[]){
+ "camss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_clk = {
+ .halt_reg = 0x4f03c,
+ .clkr = {
+ .enable_reg = 0x4f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phy_clk = {
+ .halt_reg = 0x4f048,
+ .clkr = {
+ .enable_reg = 0x4f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phy_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1pix_clk = {
+ .halt_reg = 0x4f058,
+ .clkr = {
+ .enable_reg = 0x4f058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1pix_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1rdi_clk = {
+ .halt_reg = 0x4f050,
+ .clkr = {
+ .enable_reg = 0x4f050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1rdi_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi_vfe0_clk = {
+ .halt_reg = 0x58050,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi_vfe0_clk",
+ .parent_names = (const char *[]){
+ "vfe0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp0_clk = {
+ .halt_reg = 0x54018,
+ .clkr = {
+ .enable_reg = 0x54018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_gp0_clk",
+ .parent_names = (const char *[]){
+ "camss_gp0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp1_clk = {
+ .halt_reg = 0x55018,
+ .clkr = {
+ .enable_reg = 0x55018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_gp1_clk",
+ .parent_names = (const char *[]){
+ "camss_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ispif_ahb_clk = {
+ .halt_reg = 0x50004,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ispif_ahb_clk",
+ .parent_names = (const char *[]){
+ "camss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg0_clk = {
+ .halt_reg = 0x57020,
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_jpeg0_clk",
+ .parent_names = (const char *[]){
+ "jpeg0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_ahb_clk = {
+ .halt_reg = 0x57024,
+ .clkr = {
+ .enable_reg = 0x57024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_jpeg_ahb_clk",
+ .parent_names = (const char *[]){
+ "camss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_axi_clk = {
+ .halt_reg = 0x57028,
+ .clkr = {
+ .enable_reg = 0x57028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_jpeg_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x52018,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk",
+ .parent_names = (const char *[]){
+ "mclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x53018,
+ .clkr = {
+ .enable_reg = 0x53018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk",
+ .parent_names = (const char *[]){
+ "mclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_micro_ahb_clk = {
+ .halt_reg = 0x5600c,
+ .clkr = {
+ .enable_reg = 0x5600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_micro_ahb_clk",
+ .parent_names = (const char *[]){
+ "camss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x4e01c,
+ .clkr = {
+ .enable_reg = 0x4e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_names = (const char *[]){
+ "csi0phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x4f01c,
+ .clkr = {
+ .enable_reg = 0x4f01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_names = (const char *[]){
+ "csi1phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ahb_clk = {
+ .halt_reg = 0x5a014,
+ .clkr = {
+ .enable_reg = 0x5a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ahb_clk",
+ .parent_names = (const char *[]){
+ "camss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x56004,
+ .clkr = {
+ .enable_reg = 0x56004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_ahb_clk = {
+ .halt_reg = 0x58040,
+ .clkr = {
+ .enable_reg = 0x58040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cpp_ahb_clk",
+ .parent_names = (const char *[]){
+ "camss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_clk = {
+ .halt_reg = 0x5803c,
+ .clkr = {
+ .enable_reg = 0x5803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cpp_clk",
+ .parent_names = (const char *[]){
+ "cpp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe0_clk = {
+ .halt_reg = 0x58038,
+ .clkr = {
+ .enable_reg = 0x58038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_vfe0_clk",
+ .parent_names = (const char *[]){
+ "vfe0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_ahb_clk = {
+ .halt_reg = 0x58044,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_vfe_ahb_clk",
+ .parent_names = (const char *[]){
+ "camss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_axi_clk = {
+ .halt_reg = 0x58048,
+ .clkr = {
+ .enable_reg = 0x58048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_vfe_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_axi_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_clk",
+ .parent_names = (const char *[]){
+ "crypto_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gmem_clk = {
+ .halt_reg = 0x59024,
+ .clkr = {
+ .enable_reg = 0x59024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_gmem_clk",
+ .parent_names = (const char *[]){
+ "gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x08000,
+ .clkr = {
+ .enable_reg = 0x08000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x09000,
+ .clkr = {
+ .enable_reg = 0x09000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x0a000,
+ .clkr = {
+ .enable_reg = 0x0a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte0_clk",
+ .parent_names = (const char *[]){
+ "byte0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc0_clk",
+ .parent_names = (const char *[]){
+ "esc0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4D088,
+ .clkr = {
+ .enable_reg = 0x4D088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_mdp_clk",
+ .parent_names = (const char *[]){
+ "mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_names = (const char *[]){
+ "pclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_vsync_clk",
+ .parent_names = (const char *[]){
+ "vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x49000,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_names = (const char *[]){
+ "gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gtcu_ahb_clk = {
+ .halt_reg = 0x12044,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gtcu_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_jpeg_tbu_clk = {
+ .halt_reg = 0x12034,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_jpeg_tbu_clk",
+ .parent_names = (const char *[]){
+ "system_noc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_tbu_clk",
+ .parent_names = (const char *[]){
+ "system_noc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_cfg_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_tbu_clk = {
+ .halt_reg = 0x12014,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_tbu_clk",
+ .parent_names = (const char *[]){
+ "system_noc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vfe_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vfe_tbu_clk",
+ .parent_names = (const char *[]){
+ "system_noc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x4102c,
+ .clkr = {
+ .enable_reg = 0x4102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x41008,
+ .clkr = {
+ .enable_reg = 0x41008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x41004,
+ .clkr = {
+ .enable_reg = 0x41004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_system_clk",
+ .parent_names = (const char *[]){
+ "usb_hs_system_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_ahb_clk = {
+ .halt_reg = 0x4c020,
+ .clkr = {
+ .enable_reg = 0x4c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_axi_clk = {
+ .halt_reg = 0x4c024,
+ .clkr = {
+ .enable_reg = 0x4c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_vcodec0_clk = {
+ .halt_reg = 0x4c01c,
+ .clkr = {
+ .enable_reg = 0x4c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_vcodec0_clk",
+ .parent_names = (const char *[]){
+ "vcodec0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_msm8916_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_VOTE] = &gpll0_vote,
+ [BIMC_PLL] = &bimc_pll.clkr,
+ [BIMC_PLL_VOTE] = &bimc_pll_vote,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_VOTE] = &gpll1_vote,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL2_VOTE] = &gpll2_vote,
+ [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+ [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [CAMSS_AHB_CLK_SRC] = &camss_ahb_clk_src.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [APSS_TCU_CLK_SRC] = &apss_tcu_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE_AHB_CLK] = &gcc_camss_vfe_ahb_clk.clkr,
+ [GCC_CAMSS_VFE_AXI_CLK] = &gcc_camss_vfe_axi_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_OXILI_GMEM_CLK] = &gcc_oxili_gmem_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_msm8916_resets[] = {
+ [GCC_BLSP1_BCR] = { 0x01000 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x02000 },
+ [GCC_BLSP1_UART1_BCR] = { 0x02038 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x03008 },
+ [GCC_BLSP1_UART2_BCR] = { 0x03028 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x04018 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x05018 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x06018 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x07018 },
+ [GCC_IMEM_BCR] = { 0x0e000 },
+ [GCC_SMMU_BCR] = { 0x12000 },
+ [GCC_APSS_TCU_BCR] = { 0x12050 },
+ [GCC_SMMU_XPU_BCR] = { 0x12054 },
+ [GCC_PCNOC_TBU_BCR] = { 0x12058 },
+ [GCC_PRNG_BCR] = { 0x13000 },
+ [GCC_BOOT_ROM_BCR] = { 0x13008 },
+ [GCC_CRYPTO_BCR] = { 0x16000 },
+ [GCC_SEC_CTRL_BCR] = { 0x1a000 },
+ [GCC_AUDIO_CORE_BCR] = { 0x1c008 },
+ [GCC_ULT_AUDIO_BCR] = { 0x1c0b4 },
+ [GCC_DEHR_BCR] = { 0x1f000 },
+ [GCC_SYSTEM_NOC_BCR] = { 0x26000 },
+ [GCC_PCNOC_BCR] = { 0x27018 },
+ [GCC_TCSR_BCR] = { 0x28000 },
+ [GCC_QDSS_BCR] = { 0x29000 },
+ [GCC_DCD_BCR] = { 0x2a000 },
+ [GCC_MSG_RAM_BCR] = { 0x2b000 },
+ [GCC_MPM_BCR] = { 0x2c000 },
+ [GCC_SPMI_BCR] = { 0x2e000 },
+ [GCC_SPDM_BCR] = { 0x2f000 },
+ [GCC_MM_SPDM_BCR] = { 0x2f024 },
+ [GCC_BIMC_BCR] = { 0x31000 },
+ [GCC_RBCPR_BCR] = { 0x33000 },
+ [GCC_TLMM_BCR] = { 0x34000 },
+ [GCC_USB_HS_BCR] = { 0x41000 },
+ [GCC_USB2A_PHY_BCR] = { 0x41028 },
+ [GCC_SDCC1_BCR] = { 0x42000 },
+ [GCC_SDCC2_BCR] = { 0x43000 },
+ [GCC_PDM_BCR] = { 0x44000 },
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x47000 },
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x48000 },
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x48008 },
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x48010 },
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x48018 },
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x48020 },
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x48028 },
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x48030 },
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x48038 },
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x48040 },
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x48048 },
+ [GCC_MMSS_BCR] = { 0x4b000 },
+ [GCC_VENUS0_BCR] = { 0x4c014 },
+ [GCC_MDSS_BCR] = { 0x4d074 },
+ [GCC_CAMSS_PHY0_BCR] = { 0x4e018 },
+ [GCC_CAMSS_CSI0_BCR] = { 0x4e038 },
+ [GCC_CAMSS_CSI0PHY_BCR] = { 0x4e044 },
+ [GCC_CAMSS_CSI0RDI_BCR] = { 0x4e04c },
+ [GCC_CAMSS_CSI0PIX_BCR] = { 0x4e054 },
+ [GCC_CAMSS_PHY1_BCR] = { 0x4f018 },
+ [GCC_CAMSS_CSI1_BCR] = { 0x4f038 },
+ [GCC_CAMSS_CSI1PHY_BCR] = { 0x4f044 },
+ [GCC_CAMSS_CSI1RDI_BCR] = { 0x4f04c },
+ [GCC_CAMSS_CSI1PIX_BCR] = { 0x4f054 },
+ [GCC_CAMSS_ISPIF_BCR] = { 0x50000 },
+ [GCC_CAMSS_CCI_BCR] = { 0x51014 },
+ [GCC_CAMSS_MCLK0_BCR] = { 0x52014 },
+ [GCC_CAMSS_MCLK1_BCR] = { 0x53014 },
+ [GCC_CAMSS_GP0_BCR] = { 0x54014 },
+ [GCC_CAMSS_GP1_BCR] = { 0x55014 },
+ [GCC_CAMSS_TOP_BCR] = { 0x56000 },
+ [GCC_CAMSS_MICRO_BCR] = { 0x56008 },
+ [GCC_CAMSS_JPEG_BCR] = { 0x57018 },
+ [GCC_CAMSS_VFE_BCR] = { 0x58030 },
+ [GCC_CAMSS_CSI_VFE0_BCR] = { 0x5804c },
+ [GCC_OXILI_BCR] = { 0x59018 },
+ [GCC_GMEM_BCR] = { 0x5902c },
+ [GCC_CAMSS_AHB_BCR] = { 0x5a018 },
+ [GCC_MDP_TBU_BCR] = { 0x62000 },
+ [GCC_GFX_TBU_BCR] = { 0x63000 },
+ [GCC_GFX_TCU_BCR] = { 0x64000 },
+ [GCC_MSS_TBU_AXI_BCR] = { 0x65000 },
+ [GCC_MSS_TBU_GSS_AXI_BCR] = { 0x66000 },
+ [GCC_MSS_TBU_Q6_AXI_BCR] = { 0x67000 },
+ [GCC_GTCU_AHB_BCR] = { 0x68000 },
+ [GCC_SMMU_CFG_BCR] = { 0x69000 },
+ [GCC_VFE_TBU_BCR] = { 0x6a000 },
+ [GCC_VENUS_TBU_BCR] = { 0x6b000 },
+ [GCC_JPEG_TBU_BCR] = { 0x6c000 },
+ [GCC_PRONTO_TBU_BCR] = { 0x6d000 },
+ [GCC_SMMU_CATS_BCR] = { 0x7c000 },
+};
+
+static const struct regmap_config gcc_msm8916_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8916_desc = {
+ .config = &gcc_msm8916_regmap_config,
+ .clks = gcc_msm8916_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8916_clocks),
+ .resets = gcc_msm8916_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8916_resets),
+};
+
+static const struct of_device_id gcc_msm8916_match_table[] = {
+ { .compatible = "qcom,gcc-msm8916" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8916_match_table);
+
+static int gcc_msm8916_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct device *dev = &pdev->dev;
+
+ /* Temporary until RPM clocks supported */
+ clk = clk_register_fixed_rate(dev, "xo", NULL, CLK_IS_ROOT, 19200000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk = clk_register_fixed_rate(dev, "sleep_clk_src", NULL,
+ CLK_IS_ROOT, 32768);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return qcom_cc_probe(pdev, &gcc_msm8916_desc);
+}
+
+static int gcc_msm8916_remove(struct platform_device *pdev)
+{
+ qcom_cc_remove(pdev);
+ return 0;
+}
+
+static struct platform_driver gcc_msm8916_driver = {
+ .probe = gcc_msm8916_probe,
+ .remove = gcc_msm8916_remove,
+ .driver = {
+ .name = "gcc-msm8916",
+ .of_match_table = gcc_msm8916_match_table,
+ },
+};
+
+static int __init gcc_msm8916_init(void)
+{
+ return platform_driver_register(&gcc_msm8916_driver);
+}
+core_initcall(gcc_msm8916_init);
+
+static void __exit gcc_msm8916_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8916_driver);
+}
+module_exit(gcc_msm8916_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC MSM8916 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8916");
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index e60feffc10a1..eb6a4f9fa107 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -113,14 +113,16 @@ static struct clk_regmap pll14_vote = {
},
};
-#define P_PXO 0
-#define P_PLL8 1
-#define P_PLL3 2
-#define P_CXO 2
+enum {
+ P_PXO,
+ P_PLL8,
+ P_PLL3,
+ P_CXO,
+};
-static const u8 gcc_pxo_pll8_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 3,
+static const struct parent_map gcc_pxo_pll8_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 3 }
};
static const char *gcc_pxo_pll8[] = {
@@ -128,10 +130,10 @@ static const char *gcc_pxo_pll8[] = {
"pll8_vote",
};
-static const u8 gcc_pxo_pll8_cxo_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 3,
- [P_CXO] = 5,
+static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 3 },
+ { P_CXO, 5 }
};
static const char *gcc_pxo_pll8_cxo[] = {
@@ -140,10 +142,10 @@ static const char *gcc_pxo_pll8_cxo[] = {
"cxo",
};
-static const u8 gcc_pxo_pll8_pll3_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 3,
- [P_PLL3] = 6,
+static const struct parent_map gcc_pxo_pll8_pll3_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 3 },
+ { P_PLL3, 6 }
};
static const char *gcc_pxo_pll8_pll3[] = {
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index a6937fe78d8a..c39d09874e74 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -32,14 +32,16 @@
#include "clk-branch.h"
#include "reset.h"
-#define P_XO 0
-#define P_GPLL0 1
-#define P_GPLL1 1
-#define P_GPLL4 2
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL1,
+ P_GPLL4,
+};
-static const u8 gcc_xo_gpll0_map[] = {
- [P_XO] = 0,
- [P_GPLL0] = 1,
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 }
};
static const char *gcc_xo_gpll0[] = {
@@ -47,10 +49,10 @@ static const char *gcc_xo_gpll0[] = {
"gpll0_vote",
};
-static const u8 gcc_xo_gpll0_gpll4_map[] = {
- [P_XO] = 0,
- [P_GPLL0] = 1,
- [P_GPLL4] = 5,
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 }
};
static const char *gcc_xo_gpll0_gpll4[] = {
@@ -984,9 +986,9 @@ static const struct freq_tbl ftbl_gcc_usb_hsic_clk[] = {
{ }
};
-static u8 usb_hsic_clk_src_map[] = {
- [P_XO] = 0,
- [P_GPLL1] = 4,
+static const struct parent_map usb_hsic_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 4 }
};
static struct clk_rcg2 usb_hsic_clk_src = {
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index c9ff27b4648b..47f0ac16d149 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -61,12 +61,14 @@ static const struct pll_config pll4_config = {
.main_output_mask = BIT(23),
};
-#define P_PXO 0
-#define P_PLL4 1
+enum {
+ P_PXO,
+ P_PLL4,
+};
-static const u8 lcc_pxo_pll4_map[] = {
- [P_PXO] = 0,
- [P_PLL4] = 2,
+static const struct parent_map lcc_pxo_pll4_map[] = {
+ { P_PXO, 0 },
+ { P_PLL4, 2 }
};
static const char *lcc_pxo_pll4[] = {
@@ -294,14 +296,14 @@ static struct clk_regmap_mux pcm_clk = {
};
static struct freq_tbl clk_tbl_aif_osr[] = {
- { 22050, P_PLL4, 1, 147, 20480 },
- { 32000, P_PLL4, 1, 1, 96 },
- { 44100, P_PLL4, 1, 147, 10240 },
- { 48000, P_PLL4, 1, 1, 64 },
- { 88200, P_PLL4, 1, 147, 5120 },
- { 96000, P_PLL4, 1, 1, 32 },
- { 176400, P_PLL4, 1, 147, 2560 },
- { 192000, P_PLL4, 1, 1, 16 },
+ { 2822400, P_PLL4, 1, 147, 20480 },
+ { 4096000, P_PLL4, 1, 1, 96 },
+ { 5644800, P_PLL4, 1, 147, 10240 },
+ { 6144000, P_PLL4, 1, 1, 64 },
+ { 11289600, P_PLL4, 1, 147, 5120 },
+ { 12288000, P_PLL4, 1, 1, 32 },
+ { 22579200, P_PLL4, 1, 147, 2560 },
+ { 24576000, P_PLL4, 1, 1, 16 },
{ },
};
@@ -360,7 +362,7 @@ static struct clk_branch spdif_clk = {
};
static struct freq_tbl clk_tbl_ahbix[] = {
- { 131072, P_PLL4, 1, 1, 3 },
+ { 131072000, P_PLL4, 1, 1, 3 },
{ },
};
@@ -386,13 +388,12 @@ static struct clk_rcg ahbix_clk = {
.freq_tbl = clk_tbl_ahbix,
.clkr = {
.enable_reg = 0x38,
- .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
+ .enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "ahbix",
.parent_names = lcc_pxo_pll4,
.num_parents = 2,
- .ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
+ .ops = &clk_rcg_lcc_ops,
},
},
};
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index e2c863295f00..d0df9d5fc3af 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -47,12 +47,14 @@ static struct clk_pll pll4 = {
},
};
-#define P_PXO 0
-#define P_PLL4 1
+enum {
+ P_PXO,
+ P_PLL4,
+};
-static const u8 lcc_pxo_pll4_map[] = {
- [P_PXO] = 0,
- [P_PLL4] = 2,
+static const struct parent_map lcc_pxo_pll4_map[] = {
+ { P_PXO, 0 },
+ { P_PLL4, 2 }
};
static const char *lcc_pxo_pll4[] = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 157139a5c1ca..1b17df2cb0af 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -27,28 +27,30 @@
#include "clk-branch.h"
#include "reset.h"
-#define P_XO 0
-#define P_MMPLL0 1
-#define P_EDPLINK 1
-#define P_MMPLL1 2
-#define P_HDMIPLL 2
-#define P_GPLL0 3
-#define P_EDPVCO 3
-#define P_MMPLL4 4
-#define P_DSI0PLL 4
-#define P_DSI0PLL_BYTE 4
-#define P_MMPLL2 4
-#define P_MMPLL3 4
-#define P_GPLL1 5
-#define P_DSI1PLL 5
-#define P_DSI1PLL_BYTE 5
-#define P_MMSLEEP 6
-
-static const u8 mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_GPLL0] = 5,
+enum {
+ P_XO,
+ P_MMPLL0,
+ P_EDPLINK,
+ P_MMPLL1,
+ P_HDMIPLL,
+ P_GPLL0,
+ P_EDPVCO,
+ P_MMPLL4,
+ P_DSI0PLL,
+ P_DSI0PLL_BYTE,
+ P_MMPLL2,
+ P_MMPLL3,
+ P_GPLL1,
+ P_DSI1PLL,
+ P_DSI1PLL_BYTE,
+ P_MMSLEEP,
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 }
};
static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
@@ -58,13 +60,13 @@ static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
"mmss_gpll0_vote",
};
-static const u8 mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_HDMIPLL] = 4,
- [P_GPLL0] = 5,
- [P_DSI0PLL] = 2,
- [P_DSI1PLL] = 3,
+static const struct parent_map mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_HDMIPLL, 4 },
+ { P_GPLL0, 5 },
+ { P_DSI0PLL, 2 },
+ { P_DSI1PLL, 3 }
};
static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
@@ -76,12 +78,12 @@ static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
"dsi1pll",
};
-static const u8 mmcc_xo_mmpll0_1_2_gpll0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_GPLL0] = 5,
- [P_MMPLL2] = 3,
+static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_MMPLL2, 3 }
};
static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
@@ -92,12 +94,12 @@ static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
"mmpll2",
};
-static const u8 mmcc_xo_mmpll0_1_3_gpll0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_GPLL0] = 5,
- [P_MMPLL3] = 3,
+static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_MMPLL3, 3 }
};
static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
@@ -108,13 +110,13 @@ static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
"mmpll3",
};
-static const u8 mmcc_xo_dsi_hdmi_edp_map[] = {
- [P_XO] = 0,
- [P_EDPLINK] = 4,
- [P_HDMIPLL] = 3,
- [P_EDPVCO] = 5,
- [P_DSI0PLL] = 1,
- [P_DSI1PLL] = 2,
+static const struct parent_map mmcc_xo_dsi_hdmi_edp_map[] = {
+ { P_XO, 0 },
+ { P_EDPLINK, 4 },
+ { P_HDMIPLL, 3 },
+ { P_EDPVCO, 5 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 }
};
static const char *mmcc_xo_dsi_hdmi_edp[] = {
@@ -126,13 +128,13 @@ static const char *mmcc_xo_dsi_hdmi_edp[] = {
"dsi1pll",
};
-static const u8 mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
- [P_XO] = 0,
- [P_EDPLINK] = 4,
- [P_HDMIPLL] = 3,
- [P_GPLL0] = 5,
- [P_DSI0PLL] = 1,
- [P_DSI1PLL] = 2,
+static const struct parent_map mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_EDPLINK, 4 },
+ { P_HDMIPLL, 3 },
+ { P_GPLL0, 5 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 }
};
static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
@@ -144,13 +146,13 @@ static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
"dsi1pll",
};
-static const u8 mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
- [P_XO] = 0,
- [P_EDPLINK] = 4,
- [P_HDMIPLL] = 3,
- [P_GPLL0] = 5,
- [P_DSI0PLL_BYTE] = 1,
- [P_DSI1PLL_BYTE] = 2,
+static const struct parent_map mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_EDPLINK, 4 },
+ { P_HDMIPLL, 3 },
+ { P_GPLL0, 5 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 }
};
static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
@@ -162,12 +164,12 @@ static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
"dsi1pllbyte",
};
-static const u8 mmcc_xo_mmpll0_1_4_gpll0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_GPLL0] = 5,
- [P_MMPLL4] = 3,
+static const struct parent_map mmcc_xo_mmpll0_1_4_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_MMPLL4, 3 }
};
static const char *mmcc_xo_mmpll0_1_4_gpll0[] = {
@@ -178,13 +180,13 @@ static const char *mmcc_xo_mmpll0_1_4_gpll0[] = {
"gpll0",
};
-static const u8 mmcc_xo_mmpll0_1_4_gpll1_0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_MMPLL4] = 3,
- [P_GPLL0] = 5,
- [P_GPLL1] = 4,
+static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_MMPLL4, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL1, 4 }
};
static const char *mmcc_xo_mmpll0_1_4_gpll1_0[] = {
@@ -196,14 +198,14 @@ static const char *mmcc_xo_mmpll0_1_4_gpll1_0[] = {
"gpll0",
};
-static const u8 mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_MMPLL4] = 3,
- [P_GPLL0] = 5,
- [P_GPLL1] = 4,
- [P_MMSLEEP] = 6,
+static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_MMPLL4, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL1, 4 },
+ { P_MMSLEEP, 6 }
};
static const char *mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index e8b33bbc362f..9711bca9cc06 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -33,18 +33,21 @@
#include "clk-branch.h"
#include "reset.h"
-#define P_PXO 0
-#define P_PLL8 1
-#define P_PLL2 2
-#define P_PLL3 3
-#define P_PLL15 3
+enum {
+ P_PXO,
+ P_PLL8,
+ P_PLL2,
+ P_PLL3,
+ P_PLL15,
+ P_HDMI_PLL,
+};
#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
-static u8 mmcc_pxo_pll8_pll2_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 2,
- [P_PLL2] = 1,
+static const struct parent_map mmcc_pxo_pll8_pll2_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 2 },
+ { P_PLL2, 1 }
};
static const char *mmcc_pxo_pll8_pll2[] = {
@@ -53,11 +56,11 @@ static const char *mmcc_pxo_pll8_pll2[] = {
"pll2",
};
-static u8 mmcc_pxo_pll8_pll2_pll3_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 2,
- [P_PLL2] = 1,
- [P_PLL3] = 3,
+static const struct parent_map mmcc_pxo_pll8_pll2_pll3_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 2 },
+ { P_PLL2, 1 },
+ { P_PLL3, 3 }
};
static const char *mmcc_pxo_pll8_pll2_pll15[] = {
@@ -67,11 +70,11 @@ static const char *mmcc_pxo_pll8_pll2_pll15[] = {
"pll15",
};
-static u8 mmcc_pxo_pll8_pll2_pll15_map[] = {
- [P_PXO] = 0,
- [P_PLL8] = 2,
- [P_PLL2] = 1,
- [P_PLL15] = 3,
+static const struct parent_map mmcc_pxo_pll8_pll2_pll15_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 2 },
+ { P_PLL2, 1 },
+ { P_PLL15, 3 }
};
static const char *mmcc_pxo_pll8_pll2_pll3[] = {
@@ -1377,11 +1380,9 @@ static struct clk_branch rot_clk = {
},
};
-#define P_HDMI_PLL 1
-
-static u8 mmcc_pxo_hdmi_map[] = {
- [P_PXO] = 0,
- [P_HDMI_PLL] = 3,
+static const struct parent_map mmcc_pxo_hdmi_map[] = {
+ { P_PXO, 0 },
+ { P_HDMI_PLL, 3 }
};
static const char *mmcc_pxo_hdmi[] = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index be94c54a9a4f..07f4cc159ad3 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -32,26 +32,28 @@
#include "clk-branch.h"
#include "reset.h"
-#define P_XO 0
-#define P_MMPLL0 1
-#define P_EDPLINK 1
-#define P_MMPLL1 2
-#define P_HDMIPLL 2
-#define P_GPLL0 3
-#define P_EDPVCO 3
-#define P_GPLL1 4
-#define P_DSI0PLL 4
-#define P_DSI0PLL_BYTE 4
-#define P_MMPLL2 4
-#define P_MMPLL3 4
-#define P_DSI1PLL 5
-#define P_DSI1PLL_BYTE 5
-
-static const u8 mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_GPLL0] = 5,
+enum {
+ P_XO,
+ P_MMPLL0,
+ P_EDPLINK,
+ P_MMPLL1,
+ P_HDMIPLL,
+ P_GPLL0,
+ P_EDPVCO,
+ P_GPLL1,
+ P_DSI0PLL,
+ P_DSI0PLL_BYTE,
+ P_MMPLL2,
+ P_MMPLL3,
+ P_DSI1PLL,
+ P_DSI1PLL_BYTE,
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 }
};
static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
@@ -61,13 +63,13 @@ static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
"mmss_gpll0_vote",
};
-static const u8 mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_HDMIPLL] = 4,
- [P_GPLL0] = 5,
- [P_DSI0PLL] = 2,
- [P_DSI1PLL] = 3,
+static const struct parent_map mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_HDMIPLL, 4 },
+ { P_GPLL0, 5 },
+ { P_DSI0PLL, 2 },
+ { P_DSI1PLL, 3 }
};
static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
@@ -79,12 +81,12 @@ static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
"dsi1pll",
};
-static const u8 mmcc_xo_mmpll0_1_2_gpll0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_GPLL0] = 5,
- [P_MMPLL2] = 3,
+static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_MMPLL2, 3 }
};
static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
@@ -95,12 +97,12 @@ static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
"mmpll2",
};
-static const u8 mmcc_xo_mmpll0_1_3_gpll0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_GPLL0] = 5,
- [P_MMPLL3] = 3,
+static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_MMPLL3, 3 }
};
static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
@@ -111,12 +113,12 @@ static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
"mmpll3",
};
-static const u8 mmcc_xo_mmpll0_1_gpll1_0_map[] = {
- [P_XO] = 0,
- [P_MMPLL0] = 1,
- [P_MMPLL1] = 2,
- [P_GPLL0] = 5,
- [P_GPLL1] = 4,
+static const struct parent_map mmcc_xo_mmpll0_1_gpll1_0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL1, 4 }
};
static const char *mmcc_xo_mmpll0_1_gpll1_0[] = {
@@ -127,13 +129,13 @@ static const char *mmcc_xo_mmpll0_1_gpll1_0[] = {
"gpll1_vote",
};
-static const u8 mmcc_xo_dsi_hdmi_edp_map[] = {
- [P_XO] = 0,
- [P_EDPLINK] = 4,
- [P_HDMIPLL] = 3,
- [P_EDPVCO] = 5,
- [P_DSI0PLL] = 1,
- [P_DSI1PLL] = 2,
+static const struct parent_map mmcc_xo_dsi_hdmi_edp_map[] = {
+ { P_XO, 0 },
+ { P_EDPLINK, 4 },
+ { P_HDMIPLL, 3 },
+ { P_EDPVCO, 5 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 }
};
static const char *mmcc_xo_dsi_hdmi_edp[] = {
@@ -145,13 +147,13 @@ static const char *mmcc_xo_dsi_hdmi_edp[] = {
"dsi1pll",
};
-static const u8 mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
- [P_XO] = 0,
- [P_EDPLINK] = 4,
- [P_HDMIPLL] = 3,
- [P_GPLL0] = 5,
- [P_DSI0PLL] = 1,
- [P_DSI1PLL] = 2,
+static const struct parent_map mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_EDPLINK, 4 },
+ { P_HDMIPLL, 3 },
+ { P_GPLL0, 5 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 }
};
static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
@@ -163,13 +165,13 @@ static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
"dsi1pll",
};
-static const u8 mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
- [P_XO] = 0,
- [P_EDPLINK] = 4,
- [P_HDMIPLL] = 3,
- [P_GPLL0] = 5,
- [P_DSI0PLL_BYTE] = 1,
- [P_DSI1PLL_BYTE] = 2,
+static const struct parent_map mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_EDPLINK, 4 },
+ { P_HDMIPLL, 3 },
+ { P_GPLL0, 5 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 }
};
static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 7eb684c50d42..556ce041d371 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -704,7 +704,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
GATE(ACLK_GPS, "aclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(8), 13, GFLAGS),
};
-static const char *rk3188_critical_clocks[] __initconst = {
+static const char *const rk3188_critical_clocks[] __initconst = {
"aclk_cpu",
"aclk_peri",
"hclk_peri",
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 05d7a0bc0599..d17eb4528a28 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -771,7 +771,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
GATE(0, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS),
};
-static const char *rk3288_critical_clocks[] __initconst = {
+static const char *const rk3288_critical_clocks[] __initconst = {
"aclk_cpu",
"aclk_peri",
"hclk_peri",
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 20e05bbb3a67..edb5d489ae61 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -317,7 +317,8 @@ void __init rockchip_clk_register_armclk(unsigned int lookup_id,
rockchip_clk_add_lookup(clk, lookup_id);
}
-void __init rockchip_clk_protect_critical(const char *clocks[], int nclocks)
+void __init rockchip_clk_protect_critical(const char *const clocks[],
+ int nclocks)
{
int i;
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 58d2e3bdf22f..e63cafe893e1 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -182,7 +182,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
const char **parent_names, u8 num_parents,
void __iomem *reg, int shift);
-#define PNAME(x) static const char *x[] __initconst
+#define PNAME(x) static const char *x[] __initdata
enum rockchip_clk_branch_type {
branch_composite,
@@ -407,7 +407,7 @@ void rockchip_clk_register_armclk(unsigned int lookup_id, const char *name,
const struct rockchip_cpuclk_reg_data *reg_data,
const struct rockchip_cpuclk_rate_table *rates,
int nrates);
-void rockchip_clk_protect_critical(const char *clocks[], int nclocks);
+void rockchip_clk_protect_critical(const char *const clocks[], int nclocks);
void rockchip_register_restart_notifier(unsigned int reg);
#define ROCKCHIP_SOFTRST_HIWORD_MASK BIT(0)
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 006c6f294310..a17683b2cf27 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
+obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos5433.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 3a7cb2506731..03a52228b6d1 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -142,6 +142,8 @@ CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu",
exynos4_clkout_init);
CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu",
exynos4_clkout_init);
+CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu",
+ exynos4_clkout_init);
static void __init exynos5_clkout_init(struct device_node *node)
{
@@ -151,3 +153,5 @@ CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
exynos5_clkout_init);
CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
exynos5_clkout_init);
+CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu",
+ exynos5_clkout_init);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index cc4c348d8a24..538de66a759e 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -894,3 +894,166 @@ static void __init exynos3250_cmu_dmc_init(struct device_node *np)
}
CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc",
exynos3250_cmu_dmc_init);
+
+
+/*
+ * CMU ISP
+ */
+
+#define DIV_ISP0 0x300
+#define DIV_ISP1 0x304
+#define GATE_IP_ISP0 0x800
+#define GATE_IP_ISP1 0x804
+#define GATE_SCLK_ISP 0x900
+
+static struct samsung_div_clock isp_div_clks[] __initdata = {
+ /*
+ * NOTE: Following table is sorted by register address in ascending
+ * order and then bitfield shift in descending order, as it is done
+ * in the User's Manual. When adding new entries, please make sure
+ * that the order is preserved, to avoid merge conflicts and make
+ * further work with defined data easier.
+ */
+ /* DIV_ISP0 */
+ DIV(CLK_DIV_ISP1, "div_isp1", "mout_aclk_266_sub", DIV_ISP0, 4, 3),
+ DIV(CLK_DIV_ISP0, "div_isp0", "mout_aclk_266_sub", DIV_ISP0, 0, 3),
+
+ /* DIV_ISP1 */
+ DIV(CLK_DIV_MCUISP1, "div_mcuisp1", "mout_aclk_400_mcuisp_sub",
+ DIV_ISP1, 8, 3),
+ DIV(CLK_DIV_MCUISP0, "div_mcuisp0", "mout_aclk_400_mcuisp_sub",
+ DIV_ISP1, 4, 3),
+ DIV(CLK_DIV_MPWM, "div_mpwm", "div_isp1", DIV_ISP1, 0, 3),
+};
+
+static struct samsung_gate_clock isp_gate_clks[] __initdata = {
+ /*
+ * NOTE: Following table is sorted by register address in ascending
+ * order and then bitfield shift in descending order, as it is done
+ * in the User's Manual. When adding new entries, please make sure
+ * that the order is preserved, to avoid merge conflicts and make
+ * further work with defined data easier.
+ */
+
+ /* GATE_IP_ISP0 */
+ GATE(CLK_UART_ISP, "uart_isp", "uart_isp_top",
+ GATE_IP_ISP0, 31, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_WDT_ISP, "wdt_isp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 30, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PWM_ISP, "pwm_isp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 28, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_I2C1_ISP, "i2c1_isp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 26, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_I2C0_ISP, "i2c0_isp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 25, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_MPWM_ISP, "mpwm_isp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_MCUCTL_ISP, "mcuctl_isp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 23, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMUISPX, "ppmuispx", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMUISPMX, "ppmuispmx", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QE_LITE1, "qe_lite1", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QE_LITE0, "qe_lite0", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QE_FD, "qe_fd", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QE_DRC, "qe_drc", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QE_ISP, "qe_isp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CSIS1, "csis1", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMU_LITE1, "smmu_lite1", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMU_LITE0, "smmu_lite0", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMU_FD, "smmu_fd", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMU_DRC, "smmu_drc", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMU_ISP, "smmu_isp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GICISP, "gicisp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CSIS0, "csis0", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_MCUISP, "mcuisp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_LITE1, "lite1", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_LITE0, "lite0", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_FD, "fd", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_DRC, "drc", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ISP, "isp", "mout_aclk_266_sub",
+ GATE_IP_ISP0, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* GATE_IP_ISP1 */
+ GATE(CLK_QE_ISPCX, "qe_ispcx", "uart_isp_top",
+ GATE_IP_ISP0, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QE_SCALERP, "qe_scalerp", "uart_isp_top",
+ GATE_IP_ISP0, 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_QE_SCALERC, "qe_scalerc", "uart_isp_top",
+ GATE_IP_ISP0, 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMU_SCALERP, "smmu_scalerp", "uart_isp_top",
+ GATE_IP_ISP0, 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMU_SCALERC, "smmu_scalerc", "uart_isp_top",
+ GATE_IP_ISP0, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCALERP, "scalerp", "uart_isp_top",
+ GATE_IP_ISP0, 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCALERC, "scalerc", "uart_isp_top",
+ GATE_IP_ISP0, 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SPI1_ISP, "spi1_isp", "uart_isp_top",
+ GATE_IP_ISP0, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SPI0_ISP, "spi0_isp", "uart_isp_top",
+ GATE_IP_ISP0, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMU_ISPCX, "smmu_ispcx", "uart_isp_top",
+ GATE_IP_ISP0, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNCAXIM, "asyncaxim", "uart_isp_top",
+ GATE_IP_ISP0, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* GATE_SCLK_ISP */
+ GATE(CLK_SCLK_MPWM_ISP, "sclk_mpwm_isp", "div_mpwm",
+ GATE_SCLK_ISP, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static struct samsung_cmu_info isp_cmu_info __initdata = {
+ .div_clks = isp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(isp_div_clks),
+ .gate_clks = isp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(isp_gate_clks),
+ .nr_clk_ids = NR_CLKS_ISP,
+};
+
+static int __init exynos3250_cmu_isp_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ samsung_cmu_register_one(np, &isp_cmu_info);
+ return 0;
+}
+
+static const struct of_device_id exynos3250_cmu_isp_of_match[] = {
+ { .compatible = "samsung,exynos3250-cmu-isp", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver exynos3250_cmu_isp_driver = {
+ .driver = {
+ .name = "exynos3250-cmu-isp",
+ .of_match_table = exynos3250_cmu_isp_of_match,
+ },
+};
+
+static int __init exynos3250_cmu_platform_init(void)
+{
+ return platform_driver_probe(&exynos3250_cmu_isp_driver,
+ exynos3250_cmu_isp_probe);
+}
+subsys_initcall(exynos3250_cmu_platform_init);
+
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 51462e85675f..714d6ba782c8 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
VPLL_LOCK, VPLL_CON0, NULL),
};
-static void __init exynos4_core_down_clock(enum exynos4_soc soc)
+static void __init exynos4x12_core_down_clock(void)
{
unsigned int tmp;
@@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc)
__raw_writel(tmp, reg_base + PWR_CTRL1);
/*
- * Disable the clock up feature on Exynos4x12, in case it was
- * enabled by bootloader.
+ * Disable the clock up feature in case it was enabled by bootloader.
*/
- if (exynos4_soc == EXYNOS4X12)
- __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
+ __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
}
/* register exynos4 clocks */
@@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_alias(ctx, exynos4_aliases,
ARRAY_SIZE(exynos4_aliases));
- exynos4_core_down_clock(soc);
+ if (soc == EXYNOS4X12)
+ exynos4x12_core_down_clock();
exynos4_clk_sleep_init();
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 07d666cc6a29..bea4a173eef5 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
{ .offset = SRC_MASK_PERIC0, .value = 0x11111110, },
{ .offset = SRC_MASK_PERIC1, .value = 0x11111100, },
{ .offset = SRC_MASK_ISP, .value = 0x11111000, },
+ { .offset = GATE_BUS_TOP, .value = 0xffffffff, },
{ .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
{ .offset = GATE_IP_PERIC, .value = 0xffffffff, },
};
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
new file mode 100644
index 000000000000..9e04ae2bb4d7
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -0,0 +1,5423 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos5443 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#include <dt-bindings/clock/exynos5433.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+/*
+ * Register offset definitions for CMU_TOP
+ */
+#define ISP_PLL_LOCK 0x0000
+#define AUD_PLL_LOCK 0x0004
+#define ISP_PLL_CON0 0x0100
+#define ISP_PLL_CON1 0x0104
+#define ISP_PLL_FREQ_DET 0x0108
+#define AUD_PLL_CON0 0x0110
+#define AUD_PLL_CON1 0x0114
+#define AUD_PLL_CON2 0x0118
+#define AUD_PLL_FREQ_DET 0x011c
+#define MUX_SEL_TOP0 0x0200
+#define MUX_SEL_TOP1 0x0204
+#define MUX_SEL_TOP2 0x0208
+#define MUX_SEL_TOP3 0x020c
+#define MUX_SEL_TOP4 0x0210
+#define MUX_SEL_TOP_MSCL 0x0220
+#define MUX_SEL_TOP_CAM1 0x0224
+#define MUX_SEL_TOP_DISP 0x0228
+#define MUX_SEL_TOP_FSYS0 0x0230
+#define MUX_SEL_TOP_FSYS1 0x0234
+#define MUX_SEL_TOP_PERIC0 0x0238
+#define MUX_SEL_TOP_PERIC1 0x023c
+#define MUX_ENABLE_TOP0 0x0300
+#define MUX_ENABLE_TOP1 0x0304
+#define MUX_ENABLE_TOP2 0x0308
+#define MUX_ENABLE_TOP3 0x030c
+#define MUX_ENABLE_TOP4 0x0310
+#define MUX_ENABLE_TOP_MSCL 0x0320
+#define MUX_ENABLE_TOP_CAM1 0x0324
+#define MUX_ENABLE_TOP_DISP 0x0328
+#define MUX_ENABLE_TOP_FSYS0 0x0330
+#define MUX_ENABLE_TOP_FSYS1 0x0334
+#define MUX_ENABLE_TOP_PERIC0 0x0338
+#define MUX_ENABLE_TOP_PERIC1 0x033c
+#define MUX_STAT_TOP0 0x0400
+#define MUX_STAT_TOP1 0x0404
+#define MUX_STAT_TOP2 0x0408
+#define MUX_STAT_TOP3 0x040c
+#define MUX_STAT_TOP4 0x0410
+#define MUX_STAT_TOP_MSCL 0x0420
+#define MUX_STAT_TOP_CAM1 0x0424
+#define MUX_STAT_TOP_FSYS0 0x0430
+#define MUX_STAT_TOP_FSYS1 0x0434
+#define MUX_STAT_TOP_PERIC0 0x0438
+#define MUX_STAT_TOP_PERIC1 0x043c
+#define DIV_TOP0 0x0600
+#define DIV_TOP1 0x0604
+#define DIV_TOP2 0x0608
+#define DIV_TOP3 0x060c
+#define DIV_TOP4 0x0610
+#define DIV_TOP_MSCL 0x0618
+#define DIV_TOP_CAM10 0x061c
+#define DIV_TOP_CAM11 0x0620
+#define DIV_TOP_FSYS0 0x062c
+#define DIV_TOP_FSYS1 0x0630
+#define DIV_TOP_FSYS2 0x0634
+#define DIV_TOP_PERIC0 0x0638
+#define DIV_TOP_PERIC1 0x063c
+#define DIV_TOP_PERIC2 0x0640
+#define DIV_TOP_PERIC3 0x0644
+#define DIV_TOP_PERIC4 0x0648
+#define DIV_TOP_PLL_FREQ_DET 0x064c
+#define DIV_STAT_TOP0 0x0700
+#define DIV_STAT_TOP1 0x0704
+#define DIV_STAT_TOP2 0x0708
+#define DIV_STAT_TOP3 0x070c
+#define DIV_STAT_TOP4 0x0710
+#define DIV_STAT_TOP_MSCL 0x0718
+#define DIV_STAT_TOP_CAM10 0x071c
+#define DIV_STAT_TOP_CAM11 0x0720
+#define DIV_STAT_TOP_FSYS0 0x072c
+#define DIV_STAT_TOP_FSYS1 0x0730
+#define DIV_STAT_TOP_FSYS2 0x0734
+#define DIV_STAT_TOP_PERIC0 0x0738
+#define DIV_STAT_TOP_PERIC1 0x073c
+#define DIV_STAT_TOP_PERIC2 0x0740
+#define DIV_STAT_TOP_PERIC3 0x0744
+#define DIV_STAT_TOP_PLL_FREQ_DET 0x074c
+#define ENABLE_ACLK_TOP 0x0800
+#define ENABLE_SCLK_TOP 0x0a00
+#define ENABLE_SCLK_TOP_MSCL 0x0a04
+#define ENABLE_SCLK_TOP_CAM1 0x0a08
+#define ENABLE_SCLK_TOP_DISP 0x0a0c
+#define ENABLE_SCLK_TOP_FSYS 0x0a10
+#define ENABLE_SCLK_TOP_PERIC 0x0a14
+#define ENABLE_IP_TOP 0x0b00
+#define ENABLE_CMU_TOP 0x0c00
+#define ENABLE_CMU_TOP_DIV_STAT 0x0c04
+
+static unsigned long top_clk_regs[] __initdata = {
+ ISP_PLL_LOCK,
+ AUD_PLL_LOCK,
+ ISP_PLL_CON0,
+ ISP_PLL_CON1,
+ ISP_PLL_FREQ_DET,
+ AUD_PLL_CON0,
+ AUD_PLL_CON1,
+ AUD_PLL_CON2,
+ AUD_PLL_FREQ_DET,
+ MUX_SEL_TOP0,
+ MUX_SEL_TOP1,
+ MUX_SEL_TOP2,
+ MUX_SEL_TOP3,
+ MUX_SEL_TOP4,
+ MUX_SEL_TOP_MSCL,
+ MUX_SEL_TOP_CAM1,
+ MUX_SEL_TOP_DISP,
+ MUX_SEL_TOP_FSYS0,
+ MUX_SEL_TOP_FSYS1,
+ MUX_SEL_TOP_PERIC0,
+ MUX_SEL_TOP_PERIC1,
+ MUX_ENABLE_TOP0,
+ MUX_ENABLE_TOP1,
+ MUX_ENABLE_TOP2,
+ MUX_ENABLE_TOP3,
+ MUX_ENABLE_TOP4,
+ MUX_ENABLE_TOP_MSCL,
+ MUX_ENABLE_TOP_CAM1,
+ MUX_ENABLE_TOP_DISP,
+ MUX_ENABLE_TOP_FSYS0,
+ MUX_ENABLE_TOP_FSYS1,
+ MUX_ENABLE_TOP_PERIC0,
+ MUX_ENABLE_TOP_PERIC1,
+ MUX_STAT_TOP0,
+ MUX_STAT_TOP1,
+ MUX_STAT_TOP2,
+ MUX_STAT_TOP3,
+ MUX_STAT_TOP4,
+ MUX_STAT_TOP_MSCL,
+ MUX_STAT_TOP_CAM1,
+ MUX_STAT_TOP_FSYS0,
+ MUX_STAT_TOP_FSYS1,
+ MUX_STAT_TOP_PERIC0,
+ MUX_STAT_TOP_PERIC1,
+ DIV_TOP0,
+ DIV_TOP1,
+ DIV_TOP2,
+ DIV_TOP3,
+ DIV_TOP4,
+ DIV_TOP_MSCL,
+ DIV_TOP_CAM10,
+ DIV_TOP_CAM11,
+ DIV_TOP_FSYS0,
+ DIV_TOP_FSYS1,
+ DIV_TOP_FSYS2,
+ DIV_TOP_PERIC0,
+ DIV_TOP_PERIC1,
+ DIV_TOP_PERIC2,
+ DIV_TOP_PERIC3,
+ DIV_TOP_PERIC4,
+ DIV_TOP_PLL_FREQ_DET,
+ DIV_STAT_TOP0,
+ DIV_STAT_TOP1,
+ DIV_STAT_TOP2,
+ DIV_STAT_TOP3,
+ DIV_STAT_TOP4,
+ DIV_STAT_TOP_MSCL,
+ DIV_STAT_TOP_CAM10,
+ DIV_STAT_TOP_CAM11,
+ DIV_STAT_TOP_FSYS0,
+ DIV_STAT_TOP_FSYS1,
+ DIV_STAT_TOP_FSYS2,
+ DIV_STAT_TOP_PERIC0,
+ DIV_STAT_TOP_PERIC1,
+ DIV_STAT_TOP_PERIC2,
+ DIV_STAT_TOP_PERIC3,
+ DIV_STAT_TOP_PLL_FREQ_DET,
+ ENABLE_ACLK_TOP,
+ ENABLE_SCLK_TOP,
+ ENABLE_SCLK_TOP_MSCL,
+ ENABLE_SCLK_TOP_CAM1,
+ ENABLE_SCLK_TOP_DISP,
+ ENABLE_SCLK_TOP_FSYS,
+ ENABLE_SCLK_TOP_PERIC,
+ ENABLE_IP_TOP,
+ ENABLE_CMU_TOP,
+ ENABLE_CMU_TOP_DIV_STAT,
+};
+
+/* list of all parent clock list */
+PNAME(mout_aud_pll_p) = { "oscclk", "fout_aud_pll", };
+PNAME(mout_isp_pll_p) = { "oscclk", "fout_isp_pll", };
+PNAME(mout_aud_pll_user_p) = { "oscclk", "mout_aud_pll", };
+PNAME(mout_mphy_pll_user_p) = { "oscclk", "sclk_mphy_pll", };
+PNAME(mout_mfc_pll_user_p) = { "oscclk", "sclk_mfc_pll", };
+PNAME(mout_bus_pll_user_p) = { "oscclk", "sclk_bus_pll", };
+PNAME(mout_bus_pll_user_t_p) = { "oscclk", "mout_bus_pll_user", };
+PNAME(mout_mphy_pll_user_t_p) = { "oscclk", "mout_mphy_pll_user", };
+
+PNAME(mout_bus_mfc_pll_user_p) = { "mout_bus_pll_user", "mout_mfc_pll_user",};
+PNAME(mout_mfc_bus_pll_user_p) = { "mout_mfc_pll_user", "mout_bus_pll_user",};
+PNAME(mout_aclk_cam1_552_b_p) = { "mout_aclk_cam1_552_a",
+ "mout_mfc_pll_user", };
+PNAME(mout_aclk_cam1_552_a_p) = { "mout_isp_pll", "mout_bus_pll_user", };
+
+PNAME(mout_aclk_mfc_400_c_p) = { "mout_aclk_mfc_400_b",
+ "mout_mphy_pll_user", };
+PNAME(mout_aclk_mfc_400_b_p) = { "mout_aclk_mfc_400_a",
+ "mout_bus_pll_user", };
+PNAME(mout_aclk_mfc_400_a_p) = { "mout_mfc_pll_user", "mout_isp_pll", };
+
+PNAME(mout_bus_mphy_pll_user_p) = { "mout_bus_pll_user",
+ "mout_mphy_pll_user", };
+PNAME(mout_aclk_mscl_b_p) = { "mout_aclk_mscl_400_a",
+ "mout_mphy_pll_user", };
+PNAME(mout_aclk_g2d_400_b_p) = { "mout_aclk_g2d_400_a",
+ "mout_mphy_pll_user", };
+
+PNAME(mout_sclk_jpeg_c_p) = { "mout_sclk_jpeg_b", "mout_mphy_pll_user",};
+PNAME(mout_sclk_jpeg_b_p) = { "mout_sclk_jpeg_a", "mout_mfc_pll_user", };
+
+PNAME(mout_sclk_mmc2_b_p) = { "mout_sclk_mmc2_a", "mout_mfc_pll_user",};
+PNAME(mout_sclk_mmc1_b_p) = { "mout_sclk_mmc1_a", "mout_mfc_pll_user",};
+PNAME(mout_sclk_mmc0_d_p) = { "mout_sclk_mmc0_c", "mout_isp_pll", };
+PNAME(mout_sclk_mmc0_c_p) = { "mout_sclk_mmc0_b", "mout_mphy_pll_user",};
+PNAME(mout_sclk_mmc0_b_p) = { "mout_sclk_mmc0_a", "mout_mfc_pll_user", };
+
+PNAME(mout_sclk_spdif_p) = { "sclk_audio0", "sclk_audio1",
+ "oscclk", "ioclk_spdif_extclk", };
+PNAME(mout_sclk_audio1_p) = { "ioclk_audiocdclk1", "oscclk",
+ "mout_aud_pll_user_t",};
+PNAME(mout_sclk_audio0_p) = { "ioclk_audiocdclk0", "oscclk",
+ "mout_aud_pll_user_t",};
+
+PNAME(mout_sclk_hdmi_spdif_p) = { "sclk_audio1", "ioclk_spdif_extclk", };
+
+static struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initdata = {
+ FFACTOR(0, "oscclk_efuse_common", "oscclk", 1, 1, 0),
+};
+
+static struct samsung_fixed_rate_clock top_fixed_clks[] __initdata = {
+ /* Xi2s{0|1}CDCLK input clock for I2S/PCM */
+ FRATE(0, "ioclk_audiocdclk1", NULL, CLK_IS_ROOT, 100000000),
+ FRATE(0, "ioclk_audiocdclk0", NULL, CLK_IS_ROOT, 100000000),
+ /* Xi2s1SDI input clock for SPDIF */
+ FRATE(0, "ioclk_spdif_extclk", NULL, CLK_IS_ROOT, 100000000),
+ /* XspiCLK[4:0] input clock for SPI */
+ FRATE(0, "ioclk_spi4_clk_in", NULL, CLK_IS_ROOT, 50000000),
+ FRATE(0, "ioclk_spi3_clk_in", NULL, CLK_IS_ROOT, 50000000),
+ FRATE(0, "ioclk_spi2_clk_in", NULL, CLK_IS_ROOT, 50000000),
+ FRATE(0, "ioclk_spi1_clk_in", NULL, CLK_IS_ROOT, 50000000),
+ FRATE(0, "ioclk_spi0_clk_in", NULL, CLK_IS_ROOT, 50000000),
+ /* Xi2s1SCLK input clock for I2S1_BCLK */
+ FRATE(0, "ioclk_i2s1_bclk_in", NULL, CLK_IS_ROOT, 12288000),
+};
+
+static struct samsung_mux_clock top_mux_clks[] __initdata = {
+ /* MUX_SEL_TOP0 */
+ MUX(CLK_MOUT_AUD_PLL, "mout_aud_pll", mout_aud_pll_p, MUX_SEL_TOP0,
+ 4, 1),
+ MUX(CLK_MOUT_ISP_PLL, "mout_isp_pll", mout_isp_pll_p, MUX_SEL_TOP0,
+ 0, 1),
+
+ /* MUX_SEL_TOP1 */
+ MUX(CLK_MOUT_AUD_PLL_USER_T, "mout_aud_pll_user_t",
+ mout_aud_pll_user_p, MUX_SEL_TOP1, 12, 1),
+ MUX(CLK_MOUT_MPHY_PLL_USER, "mout_mphy_pll_user", mout_mphy_pll_user_p,
+ MUX_SEL_TOP1, 8, 1),
+ MUX(CLK_MOUT_MFC_PLL_USER, "mout_mfc_pll_user", mout_mfc_pll_user_p,
+ MUX_SEL_TOP1, 4, 1),
+ MUX(CLK_MOUT_BUS_PLL_USER, "mout_bus_pll_user", mout_bus_pll_user_p,
+ MUX_SEL_TOP1, 0, 1),
+
+ /* MUX_SEL_TOP2 */
+ MUX(CLK_MOUT_ACLK_HEVC_400, "mout_aclk_hevc_400",
+ mout_bus_mfc_pll_user_p, MUX_SEL_TOP2, 28, 1),
+ MUX(CLK_MOUT_ACLK_CAM1_333, "mout_aclk_cam1_333",
+ mout_mfc_bus_pll_user_p, MUX_SEL_TOP2, 16, 1),
+ MUX(CLK_MOUT_ACLK_CAM1_552_B, "mout_aclk_cam1_552_b",
+ mout_aclk_cam1_552_b_p, MUX_SEL_TOP2, 12, 1),
+ MUX(CLK_MOUT_ACLK_CAM1_552_A, "mout_aclk_cam1_552_a",
+ mout_aclk_cam1_552_a_p, MUX_SEL_TOP2, 8, 1),
+ MUX(CLK_MOUT_ACLK_ISP_DIS_400, "mout_aclk_isp_dis_400",
+ mout_bus_mfc_pll_user_p, MUX_SEL_TOP2, 4, 1),
+ MUX(CLK_MOUT_ACLK_ISP_400, "mout_aclk_isp_400",
+ mout_bus_mfc_pll_user_p, MUX_SEL_TOP2, 0, 1),
+
+ /* MUX_SEL_TOP3 */
+ MUX(CLK_MOUT_ACLK_BUS0_400, "mout_aclk_bus0_400",
+ mout_bus_mphy_pll_user_p, MUX_SEL_TOP3, 20, 1),
+ MUX(CLK_MOUT_ACLK_MSCL_400_B, "mout_aclk_mscl_400_b",
+ mout_aclk_mscl_b_p, MUX_SEL_TOP3, 16, 1),
+ MUX(CLK_MOUT_ACLK_MSCL_400_A, "mout_aclk_mscl_400_a",
+ mout_bus_mfc_pll_user_p, MUX_SEL_TOP3, 12, 1),
+ MUX(CLK_MOUT_ACLK_GSCL_333, "mout_aclk_gscl_333",
+ mout_mfc_bus_pll_user_p, MUX_SEL_TOP3, 8, 1),
+ MUX(CLK_MOUT_ACLK_G2D_400_B, "mout_aclk_g2d_400_b",
+ mout_aclk_g2d_400_b_p, MUX_SEL_TOP3, 4, 1),
+ MUX(CLK_MOUT_ACLK_G2D_400_A, "mout_aclk_g2d_400_a",
+ mout_bus_mfc_pll_user_p, MUX_SEL_TOP3, 0, 1),
+
+ /* MUX_SEL_TOP4 */
+ MUX(CLK_MOUT_ACLK_MFC_400_C, "mout_aclk_mfc_400_c",
+ mout_aclk_mfc_400_c_p, MUX_SEL_TOP4, 8, 1),
+ MUX(CLK_MOUT_ACLK_MFC_400_B, "mout_aclk_mfc_400_b",
+ mout_aclk_mfc_400_b_p, MUX_SEL_TOP4, 4, 1),
+ MUX(CLK_MOUT_ACLK_MFC_400_A, "mout_aclk_mfc_400_a",
+ mout_aclk_mfc_400_a_p, MUX_SEL_TOP4, 0, 1),
+
+ /* MUX_SEL_TOP_MSCL */
+ MUX(CLK_MOUT_SCLK_JPEG_C, "mout_sclk_jpeg_c", mout_sclk_jpeg_c_p,
+ MUX_SEL_TOP_MSCL, 8, 1),
+ MUX(CLK_MOUT_SCLK_JPEG_B, "mout_sclk_jpeg_b", mout_sclk_jpeg_b_p,
+ MUX_SEL_TOP_MSCL, 4, 1),
+ MUX(CLK_MOUT_SCLK_JPEG_A, "mout_sclk_jpeg_a", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_MSCL, 0, 1),
+
+ /* MUX_SEL_TOP_CAM1 */
+ MUX(CLK_MOUT_SCLK_ISP_SENSOR2, "mout_sclk_isp_sensor2",
+ mout_bus_pll_user_t_p, MUX_SEL_TOP_CAM1, 24, 1),
+ MUX(CLK_MOUT_SCLK_ISP_SENSOR1, "mout_sclk_isp_sensor1",
+ mout_bus_pll_user_t_p, MUX_SEL_TOP_CAM1, 20, 1),
+ MUX(CLK_MOUT_SCLK_ISP_SENSOR0, "mout_sclk_isp_sensor0",
+ mout_bus_pll_user_t_p, MUX_SEL_TOP_CAM1, 16, 1),
+ MUX(CLK_MOUT_SCLK_ISP_UART, "mout_sclk_isp_uart",
+ mout_bus_pll_user_t_p, MUX_SEL_TOP_CAM1, 8, 1),
+ MUX(CLK_MOUT_SCLK_ISP_SPI1, "mout_sclk_isp_spi1",
+ mout_bus_pll_user_t_p, MUX_SEL_TOP_CAM1, 4, 1),
+ MUX(CLK_MOUT_SCLK_ISP_SPI0, "mout_sclk_isp_spi0",
+ mout_bus_pll_user_t_p, MUX_SEL_TOP_CAM1, 0, 1),
+
+ /* MUX_SEL_TOP_FSYS0 */
+ MUX(CLK_MOUT_SCLK_MMC2_B, "mout_sclk_mmc2_b", mout_sclk_mmc2_b_p,
+ MUX_SEL_TOP_FSYS0, 28, 1),
+ MUX(CLK_MOUT_SCLK_MMC2_A, "mout_sclk_mmc2_a", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_FSYS0, 24, 1),
+ MUX(CLK_MOUT_SCLK_MMC1_B, "mout_sclk_mmc1_b", mout_sclk_mmc1_b_p,
+ MUX_SEL_TOP_FSYS0, 20, 1),
+ MUX(CLK_MOUT_SCLK_MMC1_A, "mout_sclk_mmc1_a", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_FSYS0, 16, 1),
+ MUX(CLK_MOUT_SCLK_MMC0_D, "mout_sclk_mmc0_d", mout_sclk_mmc0_d_p,
+ MUX_SEL_TOP_FSYS0, 12, 1),
+ MUX(CLK_MOUT_SCLK_MMC0_C, "mout_sclk_mmc0_c", mout_sclk_mmc0_c_p,
+ MUX_SEL_TOP_FSYS0, 8, 1),
+ MUX(CLK_MOUT_SCLK_MMC0_B, "mout_sclk_mmc0_b", mout_sclk_mmc0_b_p,
+ MUX_SEL_TOP_FSYS0, 4, 1),
+ MUX(CLK_MOUT_SCLK_MMC0_A, "mout_sclk_mmc0_a", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_FSYS0, 0, 1),
+
+ /* MUX_SEL_TOP_FSYS1 */
+ MUX(CLK_MOUT_SCLK_PCIE_100, "mout_sclk_pcie_100", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_FSYS1, 12, 1),
+ MUX(CLK_MOUT_SCLK_UFSUNIPRO, "mout_sclk_ufsunipro",
+ mout_mphy_pll_user_t_p, MUX_SEL_TOP_FSYS1, 8, 1),
+ MUX(CLK_MOUT_SCLK_USBHOST30, "mout_sclk_usbhost30",
+ mout_bus_pll_user_t_p, MUX_SEL_TOP_FSYS1, 4, 1),
+ MUX(CLK_MOUT_SCLK_USBDRD30, "mout_sclk_usbdrd30",
+ mout_bus_pll_user_t_p, MUX_SEL_TOP_FSYS1, 0, 1),
+
+ /* MUX_SEL_TOP_PERIC0 */
+ MUX(CLK_MOUT_SCLK_SPI4, "mout_sclk_spi4", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_PERIC0, 28, 1),
+ MUX(CLK_MOUT_SCLK_SPI3, "mout_sclk_spi3", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_PERIC0, 24, 1),
+ MUX(CLK_MOUT_SCLK_UART2, "mout_sclk_uart2", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_PERIC0, 20, 1),
+ MUX(CLK_MOUT_SCLK_UART1, "mout_sclk_uart1", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_PERIC0, 16, 1),
+ MUX(CLK_MOUT_SCLK_UART0, "mout_sclk_uart0", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_PERIC0, 12, 1),
+ MUX(CLK_MOUT_SCLK_SPI2, "mout_sclk_spi2", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_PERIC0, 8, 1),
+ MUX(CLK_MOUT_SCLK_SPI1, "mout_sclk_spi1", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_PERIC0, 4, 1),
+ MUX(CLK_MOUT_SCLK_SPI0, "mout_sclk_spi0", mout_bus_pll_user_t_p,
+ MUX_SEL_TOP_PERIC0, 0, 1),
+
+ /* MUX_SEL_TOP_PERIC1 */
+ MUX(CLK_MOUT_SCLK_SLIMBUS, "mout_sclk_slimbus", mout_aud_pll_user_p,
+ MUX_SEL_TOP_PERIC1, 16, 1),
+ MUX(CLK_MOUT_SCLK_SPDIF, "mout_sclk_spdif", mout_sclk_spdif_p,
+ MUX_SEL_TOP_PERIC1, 12, 2),
+ MUX(CLK_MOUT_SCLK_AUDIO1, "mout_sclk_audio1", mout_sclk_audio1_p,
+ MUX_SEL_TOP_PERIC1, 4, 2),
+ MUX(CLK_MOUT_SCLK_AUDIO0, "mout_sclk_audio0", mout_sclk_audio0_p,
+ MUX_SEL_TOP_PERIC1, 0, 2),
+
+ /* MUX_SEL_TOP_DISP */
+ MUX(CLK_MOUT_SCLK_HDMI_SPDIF, "mout_sclk_hdmi_spdif",
+ mout_sclk_hdmi_spdif_p, MUX_SEL_TOP_DISP, 0, 1),
+};
+
+static struct samsung_div_clock top_div_clks[] __initdata = {
+ /* DIV_TOP0 */
+ DIV(CLK_DIV_ACLK_CAM1_333, "div_aclk_cam1_333", "mout_aclk_cam1_333",
+ DIV_TOP0, 28, 3),
+ DIV(CLK_DIV_ACLK_CAM1_400, "div_aclk_cam1_400", "mout_bus_pll_user",
+ DIV_TOP0, 24, 3),
+ DIV(CLK_DIV_ACLK_CAM1_552, "div_aclk_cam1_552", "mout_aclk_cam1_552_b",
+ DIV_TOP0, 20, 3),
+ DIV(CLK_DIV_ACLK_CAM0_333, "div_aclk_cam0_333", "mout_mfc_pll_user",
+ DIV_TOP0, 16, 3),
+ DIV(CLK_DIV_ACLK_CAM0_400, "div_aclk_cam0_400", "mout_bus_pll_user",
+ DIV_TOP0, 12, 3),
+ DIV(CLK_DIV_ACLK_CAM0_552, "div_aclk_cam0_552", "mout_isp_pll",
+ DIV_TOP0, 8, 3),
+ DIV(CLK_DIV_ACLK_ISP_DIS_400, "div_aclk_isp_dis_400",
+ "mout_aclk_isp_dis_400", DIV_TOP0, 4, 4),
+ DIV(CLK_DIV_ACLK_ISP_400, "div_aclk_isp_400",
+ "mout_aclk_isp_400", DIV_TOP0, 0, 4),
+
+ /* DIV_TOP1 */
+ DIV(CLK_DIV_ACLK_GSCL_111, "div_aclk_gscl_111", "mout_aclk_gscl_333",
+ DIV_TOP1, 28, 3),
+ DIV(CLK_DIV_ACLK_GSCL_333, "div_aclk_gscl_333", "mout_aclk_gscl_333",
+ DIV_TOP1, 24, 3),
+ DIV(CLK_DIV_ACLK_HEVC_400, "div_aclk_hevc_400", "mout_aclk_hevc_400",
+ DIV_TOP1, 20, 3),
+ DIV(CLK_DIV_ACLK_MFC_400, "div_aclk_mfc_400", "mout_aclk_mfc_400_c",
+ DIV_TOP1, 12, 3),
+ DIV(CLK_DIV_ACLK_G2D_266, "div_aclk_g2d_266", "mout_bus_pll_user",
+ DIV_TOP1, 8, 3),
+ DIV(CLK_DIV_ACLK_G2D_400, "div_aclk_g2d_400", "mout_aclk_g2d_400_b",
+ DIV_TOP1, 0, 3),
+
+ /* DIV_TOP2 */
+ DIV(CLK_DIV_ACLK_MSCL_400, "div_aclk_mscl_400", "mout_aclk_mscl_400_b",
+ DIV_TOP2, 4, 3),
+ DIV(CLK_DIV_ACLK_FSYS_200, "div_aclk_fsys_200", "mout_bus_pll_user",
+ DIV_TOP2, 0, 3),
+
+ /* DIV_TOP3 */
+ DIV(CLK_DIV_ACLK_IMEM_SSSX_266, "div_aclk_imem_sssx_266",
+ "mout_bus_pll_user", DIV_TOP3, 24, 3),
+ DIV(CLK_DIV_ACLK_IMEM_200, "div_aclk_imem_200",
+ "mout_bus_pll_user", DIV_TOP3, 20, 3),
+ DIV(CLK_DIV_ACLK_IMEM_266, "div_aclk_imem_266",
+ "mout_bus_pll_user", DIV_TOP3, 16, 3),
+ DIV(CLK_DIV_ACLK_PERIC_66_B, "div_aclk_peric_66_b",
+ "div_aclk_peric_66_a", DIV_TOP3, 12, 3),
+ DIV(CLK_DIV_ACLK_PERIC_66_A, "div_aclk_peric_66_a",
+ "mout_bus_pll_user", DIV_TOP3, 8, 3),
+ DIV(CLK_DIV_ACLK_PERIS_66_B, "div_aclk_peris_66_b",
+ "div_aclk_peris_66_a", DIV_TOP3, 4, 3),
+ DIV(CLK_DIV_ACLK_PERIS_66_A, "div_aclk_peris_66_a",
+ "mout_bus_pll_user", DIV_TOP3, 0, 3),
+
+ /* DIV_TOP4 */
+ DIV(CLK_DIV_ACLK_G3D_400, "div_aclk_g3d_400", "mout_bus_pll_user",
+ DIV_TOP4, 8, 3),
+ DIV(CLK_DIV_ACLK_BUS0_400, "div_aclk_bus0_400", "mout_aclk_bus0_400",
+ DIV_TOP4, 4, 3),
+ DIV(CLK_DIV_ACLK_BUS1_400, "div_aclk_bus1_400", "mout_bus_pll_user",
+ DIV_TOP4, 0, 3),
+
+ /* DIV_TOP_MSCL */
+ DIV(CLK_DIV_SCLK_JPEG, "div_sclk_jpeg", "mout_sclk_jpeg_c",
+ DIV_TOP_MSCL, 0, 4),
+
+ /* DIV_TOP_CAM10 */
+ DIV(CLK_DIV_SCLK_ISP_UART, "div_sclk_isp_uart", "mout_sclk_isp_uart",
+ DIV_TOP_CAM10, 24, 5),
+ DIV(CLK_DIV_SCLK_ISP_SPI1_B, "div_sclk_isp_spi1_b",
+ "div_sclk_isp_spi1_a", DIV_TOP_CAM10, 16, 8),
+ DIV(CLK_DIV_SCLK_ISP_SPI1_A, "div_sclk_isp_spi1_a",
+ "mout_sclk_isp_spi1", DIV_TOP_CAM10, 12, 4),
+ DIV(CLK_DIV_SCLK_ISP_SPI0_B, "div_sclk_isp_spi0_b",
+ "div_sclk_isp_spi0_a", DIV_TOP_CAM10, 4, 8),
+ DIV(CLK_DIV_SCLK_ISP_SPI0_A, "div_sclk_isp_spi0_a",
+ "mout_sclk_isp_spi0", DIV_TOP_CAM10, 0, 4),
+
+ /* DIV_TOP_CAM11 */
+ DIV(CLK_DIV_SCLK_ISP_SENSOR2_B, "div_sclk_isp_sensor2_b",
+ "div_sclk_isp_sensor2_a", DIV_TOP_CAM11, 20, 4),
+ DIV(CLK_DIV_SCLK_ISP_SENSOR2_A, "div_sclk_isp_sensor2_a",
+ "mout_sclk_isp_sensor2", DIV_TOP_CAM11, 16, 4),
+ DIV(CLK_DIV_SCLK_ISP_SENSOR1_B, "div_sclk_isp_sensor1_b",
+ "div_sclk_isp_sensor1_a", DIV_TOP_CAM11, 12, 4),
+ DIV(CLK_DIV_SCLK_ISP_SENSOR1_A, "div_sclk_isp_sensor1_a",
+ "mout_sclk_isp_sensor1", DIV_TOP_CAM11, 8, 4),
+ DIV(CLK_DIV_SCLK_ISP_SENSOR0_B, "div_sclk_isp_sensor0_b",
+ "div_sclk_isp_sensor0_a", DIV_TOP_CAM11, 12, 4),
+ DIV(CLK_DIV_SCLK_ISP_SENSOR0_A, "div_sclk_isp_sensor0_a",
+ "mout_sclk_isp_sensor0", DIV_TOP_CAM11, 8, 4),
+
+ /* DIV_TOP_FSYS0 */
+ DIV(CLK_DIV_SCLK_MMC1_B, "div_sclk_mmc1_b", "div_sclk_mmc1_a",
+ DIV_TOP_FSYS0, 16, 8),
+ DIV(CLK_DIV_SCLK_MMC1_A, "div_sclk_mmc1_a", "mout_sclk_mmc1_b",
+ DIV_TOP_FSYS0, 12, 4),
+ DIV_F(CLK_DIV_SCLK_MMC0_B, "div_sclk_mmc0_b", "div_sclk_mmc0_a",
+ DIV_TOP_FSYS0, 4, 8, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DIV_SCLK_MMC0_A, "div_sclk_mmc0_a", "mout_sclk_mmc0_d",
+ DIV_TOP_FSYS0, 0, 4, CLK_SET_RATE_PARENT, 0),
+
+ /* DIV_TOP_FSYS1 */
+ DIV(CLK_DIV_SCLK_MMC2_B, "div_sclk_mmc2_b", "div_sclk_mmc2_a",
+ DIV_TOP_FSYS1, 4, 8),
+ DIV(CLK_DIV_SCLK_MMC2_A, "div_sclk_mmc2_a", "mout_sclk_mmc2_b",
+ DIV_TOP_FSYS1, 0, 4),
+
+ /* DIV_TOP_FSYS2 */
+ DIV(CLK_DIV_SCLK_PCIE_100, "div_sclk_pcie_100", "mout_sclk_pcie_100",
+ DIV_TOP_FSYS2, 12, 3),
+ DIV(CLK_DIV_SCLK_USBHOST30, "div_sclk_usbhost30",
+ "mout_sclk_usbhost30", DIV_TOP_FSYS2, 8, 4),
+ DIV(CLK_DIV_SCLK_UFSUNIPRO, "div_sclk_ufsunipro",
+ "mout_sclk_ufsunipro", DIV_TOP_FSYS2, 4, 4),
+ DIV(CLK_DIV_SCLK_USBDRD30, "div_sclk_usbdrd30", "mout_sclk_usbdrd30",
+ DIV_TOP_FSYS2, 0, 4),
+
+ /* DIV_TOP_PERIC0 */
+ DIV(CLK_DIV_SCLK_SPI1_B, "div_sclk_spi1_b", "div_sclk_spi1_a",
+ DIV_TOP_PERIC0, 16, 8),
+ DIV(CLK_DIV_SCLK_SPI1_A, "div_sclk_spi1_a", "mout_sclk_spi1",
+ DIV_TOP_PERIC0, 12, 4),
+ DIV(CLK_DIV_SCLK_SPI0_B, "div_sclk_spi0_b", "div_sclk_spi0_a",
+ DIV_TOP_PERIC0, 4, 8),
+ DIV(CLK_DIV_SCLK_SPI0_A, "div_sclk_spi0_a", "mout_sclk_spi0",
+ DIV_TOP_PERIC0, 0, 4),
+
+ /* DIV_TOP_PERIC1 */
+ DIV(CLK_DIV_SCLK_SPI2_B, "div_sclk_spi2_b", "div_sclk_spi2_a",
+ DIV_TOP_PERIC1, 4, 8),
+ DIV(CLK_DIV_SCLK_SPI2_A, "div_sclk_spi2_a", "mout_sclk_spi2",
+ DIV_TOP_PERIC1, 0, 4),
+
+ /* DIV_TOP_PERIC2 */
+ DIV(CLK_DIV_SCLK_UART2, "div_sclk_uart2", "mout_sclk_uart2",
+ DIV_TOP_PERIC2, 8, 4),
+ DIV(CLK_DIV_SCLK_UART1, "div_sclk_uart1", "mout_sclk_uart0",
+ DIV_TOP_PERIC2, 4, 4),
+ DIV(CLK_DIV_SCLK_UART0, "div_sclk_uart0", "mout_sclk_uart1",
+ DIV_TOP_PERIC2, 0, 4),
+
+ /* DIV_TOP_PERIC3 */
+ DIV(CLK_DIV_SCLK_I2S1, "div_sclk_i2s1", "sclk_audio1",
+ DIV_TOP_PERIC3, 16, 6),
+ DIV(CLK_DIV_SCLK_PCM1, "div_sclk_pcm1", "sclk_audio1",
+ DIV_TOP_PERIC3, 8, 8),
+ DIV(CLK_DIV_SCLK_AUDIO1, "div_sclk_audio1", "mout_sclk_audio1",
+ DIV_TOP_PERIC3, 4, 4),
+ DIV(CLK_DIV_SCLK_AUDIO0, "div_sclk_audio0", "mout_sclk_audio0",
+ DIV_TOP_PERIC3, 0, 4),
+
+ /* DIV_TOP_PERIC4 */
+ DIV(CLK_DIV_SCLK_SPI4_B, "div_sclk_spi4_b", "div_sclk_spi4_a",
+ DIV_TOP_PERIC4, 16, 8),
+ DIV(CLK_DIV_SCLK_SPI4_A, "div_sclk_spi4_a", "mout_sclk_spi4",
+ DIV_TOP_PERIC4, 12, 4),
+ DIV(CLK_DIV_SCLK_SPI3_B, "div_sclk_spi3_b", "div_sclk_spi3_a",
+ DIV_TOP_PERIC4, 4, 8),
+ DIV(CLK_DIV_SCLK_SPI3_A, "div_sclk_spi3_a", "mout_sclk_spi3",
+ DIV_TOP_PERIC4, 0, 4),
+};
+
+static struct samsung_gate_clock top_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_TOP */
+ GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400",
+ ENABLE_ACLK_TOP, 30, 0, 0),
+ GATE(CLK_ACLK_IMEM_SSX_266, "aclk_imem_ssx_266",
+ "div_aclk_imem_sssx_266", ENABLE_ACLK_TOP,
+ 29, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400",
+ ENABLE_ACLK_TOP, 26,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400",
+ ENABLE_ACLK_TOP, 25,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_266",
+ ENABLE_ACLK_TOP, 24,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_200",
+ ENABLE_ACLK_TOP, 23,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b",
+ ENABLE_ACLK_TOP, 22,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PERIS_66, "aclk_peris_66", "div_aclk_peris_66_b",
+ ENABLE_ACLK_TOP, 21,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MSCL_400, "aclk_mscl_400", "div_aclk_mscl_400",
+ ENABLE_ACLK_TOP, 19,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_FSYS_200, "aclk_fsys_200", "div_aclk_fsys_200",
+ ENABLE_ACLK_TOP, 18,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_GSCL_111, "aclk_gscl_111", "div_aclk_gscl_111",
+ ENABLE_ACLK_TOP, 15,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_GSCL_333, "aclk_gscl_333", "div_aclk_gscl_333",
+ ENABLE_ACLK_TOP, 14,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM1_333, "aclk_cam1_333", "div_aclk_cam1_333",
+ ENABLE_ACLK_TOP, 13,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM1_400, "aclk_cam1_400", "div_aclk_cam1_400",
+ ENABLE_ACLK_TOP, 12,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM1_552, "aclk_cam1_552", "div_aclk_cam1_552",
+ ENABLE_ACLK_TOP, 11,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM0_333, "aclk_cam0_333", "div_aclk_cam0_333",
+ ENABLE_ACLK_TOP, 10,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM0_400, "aclk_cam0_400", "div_aclk_cam0_400",
+ ENABLE_ACLK_TOP, 9,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM0_552, "aclk_cam0_552", "div_aclk_cam0_552",
+ ENABLE_ACLK_TOP, 8,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ISP_DIS_400, "aclk_isp_dis_400", "div_aclk_isp_dis_400",
+ ENABLE_ACLK_TOP, 7,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ISP_400, "aclk_isp_400", "div_aclk_isp_400",
+ ENABLE_ACLK_TOP, 6,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_HEVC_400, "aclk_hevc_400", "div_aclk_hevc_400",
+ ENABLE_ACLK_TOP, 5,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MFC_400, "aclk_mfc_400", "div_aclk_mfc_400",
+ ENABLE_ACLK_TOP, 3,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_G2D_266, "aclk_g2d_266", "div_aclk_g2d_266",
+ ENABLE_ACLK_TOP, 2,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_G2D_400, "aclk_g2d_400", "div_aclk_g2d_400",
+ ENABLE_ACLK_TOP, 0,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_TOP_MSCL */
+ GATE(CLK_SCLK_JPEG_MSCL, "sclk_jpeg_mscl", "div_sclk_jpeg",
+ ENABLE_SCLK_TOP_MSCL, 0, 0, 0),
+
+ /* ENABLE_SCLK_TOP_CAM1 */
+ GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "div_sclk_isp_sensor2_b",
+ ENABLE_SCLK_TOP_CAM1, 7, 0, 0),
+ GATE(CLK_SCLK_ISP_SENSOR1, "sclk_isp_sensor1", "div_sclk_isp_sensor1_b",
+ ENABLE_SCLK_TOP_CAM1, 6, 0, 0),
+ GATE(CLK_SCLK_ISP_SENSOR0, "sclk_isp_sensor0", "div_sclk_isp_sensor0_b",
+ ENABLE_SCLK_TOP_CAM1, 5, 0, 0),
+ GATE(CLK_SCLK_ISP_MCTADC_CAM1, "sclk_isp_mctadc_cam1", "oscclk",
+ ENABLE_SCLK_TOP_CAM1, 4, 0, 0),
+ GATE(CLK_SCLK_ISP_UART_CAM1, "sclk_isp_uart_cam1", "div_sclk_isp_uart",
+ ENABLE_SCLK_TOP_CAM1, 2, 0, 0),
+ GATE(CLK_SCLK_ISP_SPI1_CAM1, "sclk_isp_spi1_cam1", "div_sclk_isp_spi1_b",
+ ENABLE_SCLK_TOP_CAM1, 1, 0, 0),
+ GATE(CLK_SCLK_ISP_SPI0_CAM1, "sclk_isp_spi0_cam1", "div_sclk_isp_spi0_b",
+ ENABLE_SCLK_TOP_CAM1, 0, 0, 0),
+
+ /* ENABLE_SCLK_TOP_DISP */
+ GATE(CLK_SCLK_HDMI_SPDIF_DISP, "sclk_hdmi_spdif_disp",
+ "mout_sclk_hdmi_spdif", ENABLE_SCLK_TOP_DISP, 0,
+ CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_TOP_FSYS */
+ GATE(CLK_SCLK_PCIE_100_FSYS, "sclk_pcie_100_fsys", "div_sclk_pcie_100",
+ ENABLE_SCLK_TOP_FSYS, 7, 0, 0),
+ GATE(CLK_SCLK_MMC2_FSYS, "sclk_mmc2_fsys", "div_sclk_mmc2_b",
+ ENABLE_SCLK_TOP_FSYS, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC1_FSYS, "sclk_mmc1_fsys", "div_sclk_mmc1_b",
+ ENABLE_SCLK_TOP_FSYS, 5, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC0_FSYS, "sclk_mmc0_fsys", "div_sclk_mmc0_b",
+ ENABLE_SCLK_TOP_FSYS, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UFSUNIPRO_FSYS, "sclk_ufsunipro_fsys",
+ "div_sclk_ufsunipro", ENABLE_SCLK_TOP_FSYS,
+ 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_USBHOST30_FSYS, "sclk_usbhost30_fsys",
+ "div_sclk_usbhost30", ENABLE_SCLK_TOP_FSYS,
+ 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_USBDRD30_FSYS, "sclk_usbdrd30_fsys",
+ "div_sclk_usbdrd30", ENABLE_SCLK_TOP_FSYS,
+ 0, CLK_SET_RATE_PARENT, 0),
+
+ /* ENABLE_SCLK_TOP_PERIC */
+ GATE(CLK_SCLK_SPI4_PERIC, "sclk_spi4_peric", "div_sclk_spi4_b",
+ ENABLE_SCLK_TOP_PERIC, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI3_PERIC, "sclk_spi3_peric", "div_sclk_spi3_b",
+ ENABLE_SCLK_TOP_PERIC, 11, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPDIF_PERIC, "sclk_spdif_peric", "mout_sclk_spdif",
+ ENABLE_SCLK_TOP_PERIC, 9, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_I2S1_PERIC, "sclk_i2s1_peric", "div_sclk_i2s1",
+ ENABLE_SCLK_TOP_PERIC, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_PCM1_PERIC, "sclk_pcm1_peric", "div_sclk_pcm1",
+ ENABLE_SCLK_TOP_PERIC, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART2_PERIC, "sclk_uart2_peric", "div_sclk_uart2",
+ ENABLE_SCLK_TOP_PERIC, 5, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART1_PERIC, "sclk_uart1_peric", "div_sclk_uart1",
+ ENABLE_SCLK_TOP_PERIC, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART0_PERIC, "sclk_uart0_peric", "div_sclk_uart0",
+ ENABLE_SCLK_TOP_PERIC, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI2_PERIC, "sclk_spi2_peric", "div_sclk_spi2_b",
+ ENABLE_SCLK_TOP_PERIC, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI1_PERIC, "sclk_spi1_peric", "div_sclk_spi1_b",
+ ENABLE_SCLK_TOP_PERIC, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI0_PERIC, "sclk_spi0_peric", "div_sclk_spi0_b",
+ ENABLE_SCLK_TOP_PERIC, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* MUX_ENABLE_TOP_PERIC1 */
+ GATE(CLK_SCLK_SLIMBUS, "sclk_slimbus", "mout_sclk_slimbus",
+ MUX_ENABLE_TOP_PERIC1, 16, 0, 0),
+ GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_sclk_audio1",
+ MUX_ENABLE_TOP_PERIC1, 4, 0, 0),
+ GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_sclk_audio0",
+ MUX_ENABLE_TOP_PERIC1, 0, 0, 0),
+};
+
+/*
+ * ATLAS_PLL & APOLLO_PLL & MEM0_PLL & MEM1_PLL & BUS_PLL & MFC_PLL
+ * & MPHY_PLL & G3D_PLL & DISP_PLL & ISP_PLL
+ */
+static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
+ PLL_35XX_RATE(2500000000U, 625, 6, 0),
+ PLL_35XX_RATE(2400000000U, 500, 5, 0),
+ PLL_35XX_RATE(2300000000U, 575, 6, 0),
+ PLL_35XX_RATE(2200000000U, 550, 6, 0),
+ PLL_35XX_RATE(2100000000U, 350, 4, 0),
+ PLL_35XX_RATE(2000000000U, 500, 6, 0),
+ PLL_35XX_RATE(1900000000U, 475, 6, 0),
+ PLL_35XX_RATE(1800000000U, 375, 5, 0),
+ PLL_35XX_RATE(1700000000U, 425, 6, 0),
+ PLL_35XX_RATE(1600000000U, 400, 6, 0),
+ PLL_35XX_RATE(1500000000U, 250, 4, 0),
+ PLL_35XX_RATE(1400000000U, 350, 6, 0),
+ PLL_35XX_RATE(1332000000U, 222, 4, 0),
+ PLL_35XX_RATE(1300000000U, 325, 6, 0),
+ PLL_35XX_RATE(1200000000U, 500, 5, 1),
+ PLL_35XX_RATE(1100000000U, 550, 6, 1),
+ PLL_35XX_RATE(1086000000U, 362, 4, 1),
+ PLL_35XX_RATE(1066000000U, 533, 6, 1),
+ PLL_35XX_RATE(1000000000U, 500, 6, 1),
+ PLL_35XX_RATE(933000000U, 311, 4, 1),
+ PLL_35XX_RATE(921000000U, 307, 4, 1),
+ PLL_35XX_RATE(900000000U, 375, 5, 1),
+ PLL_35XX_RATE(825000000U, 275, 4, 1),
+ PLL_35XX_RATE(800000000U, 400, 6, 1),
+ PLL_35XX_RATE(733000000U, 733, 12, 1),
+ PLL_35XX_RATE(700000000U, 175, 3, 1),
+ PLL_35XX_RATE(667000000U, 222, 4, 1),
+ PLL_35XX_RATE(633000000U, 211, 4, 1),
+ PLL_35XX_RATE(600000000U, 500, 5, 2),
+ PLL_35XX_RATE(552000000U, 460, 5, 2),
+ PLL_35XX_RATE(550000000U, 550, 6, 2),
+ PLL_35XX_RATE(543000000U, 362, 4, 2),
+ PLL_35XX_RATE(533000000U, 533, 6, 2),
+ PLL_35XX_RATE(500000000U, 500, 6, 2),
+ PLL_35XX_RATE(444000000U, 370, 5, 2),
+ PLL_35XX_RATE(420000000U, 350, 5, 2),
+ PLL_35XX_RATE(400000000U, 400, 6, 2),
+ PLL_35XX_RATE(350000000U, 350, 6, 2),
+ PLL_35XX_RATE(333000000U, 222, 4, 2),
+ PLL_35XX_RATE(300000000U, 500, 5, 3),
+ PLL_35XX_RATE(266000000U, 532, 6, 3),
+ PLL_35XX_RATE(200000000U, 400, 6, 3),
+ PLL_35XX_RATE(166000000U, 332, 6, 3),
+ PLL_35XX_RATE(160000000U, 320, 6, 3),
+ PLL_35XX_RATE(133000000U, 532, 6, 4),
+ PLL_35XX_RATE(100000000U, 400, 6, 4),
+ { /* sentinel */ }
+};
+
+/* AUD_PLL */
+static struct samsung_pll_rate_table exynos5443_aud_pll_rates[] = {
+ PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
+ PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
+ PLL_36XX_RATE(384000000U, 128, 2, 2, 0),
+ PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
+ PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
+ PLL_36XX_RATE(338688000U, 113, 2, 2, -6816),
+ PLL_36XX_RATE(294912000U, 98, 1, 3, 19923),
+ PLL_36XX_RATE(288000000U, 96, 1, 3, 0),
+ PLL_36XX_RATE(252000000U, 84, 1, 3, 0),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_clock top_pll_clks[] __initdata = {
+ PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "oscclk",
+ ISP_PLL_LOCK, ISP_PLL_CON0, exynos5443_pll_rates),
+ PLL(pll_36xx, CLK_FOUT_AUD_PLL, "fout_aud_pll", "oscclk",
+ AUD_PLL_LOCK, AUD_PLL_CON0, exynos5443_aud_pll_rates),
+};
+
+static struct samsung_cmu_info top_cmu_info __initdata = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .fixed_clks = top_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(top_fixed_clks),
+ .fixed_factor_clks = top_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks),
+ .nr_clk_ids = TOP_NR_CLK,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos5433_cmu_top_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &top_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_top, "samsung,exynos5433-cmu-top",
+ exynos5433_cmu_top_init);
+
+/*
+ * Register offset definitions for CMU_CPIF
+ */
+#define MPHY_PLL_LOCK 0x0000
+#define MPHY_PLL_CON0 0x0100
+#define MPHY_PLL_CON1 0x0104
+#define MPHY_PLL_FREQ_DET 0x010c
+#define MUX_SEL_CPIF0 0x0200
+#define DIV_CPIF 0x0600
+#define ENABLE_SCLK_CPIF 0x0a00
+
+static unsigned long cpif_clk_regs[] __initdata = {
+ MPHY_PLL_LOCK,
+ MPHY_PLL_CON0,
+ MPHY_PLL_CON1,
+ MPHY_PLL_FREQ_DET,
+ MUX_SEL_CPIF0,
+ ENABLE_SCLK_CPIF,
+};
+
+/* list of all parent clock list */
+PNAME(mout_mphy_pll_p) = { "oscclk", "fout_mphy_pll", };
+
+static struct samsung_pll_clock cpif_pll_clks[] __initdata = {
+ PLL(pll_35xx, CLK_FOUT_MPHY_PLL, "fout_mphy_pll", "oscclk",
+ MPHY_PLL_LOCK, MPHY_PLL_CON0, exynos5443_pll_rates),
+};
+
+static struct samsung_mux_clock cpif_mux_clks[] __initdata = {
+ /* MUX_SEL_CPIF0 */
+ MUX(CLK_MOUT_MPHY_PLL, "mout_mphy_pll", mout_mphy_pll_p, MUX_SEL_CPIF0,
+ 0, 1),
+};
+
+static struct samsung_div_clock cpif_div_clks[] __initdata = {
+ /* DIV_CPIF */
+ DIV(CLK_DIV_SCLK_MPHY, "div_sclk_mphy", "mout_mphy_pll", DIV_CPIF,
+ 0, 6),
+};
+
+static struct samsung_gate_clock cpif_gate_clks[] __initdata = {
+ /* ENABLE_SCLK_CPIF */
+ GATE(CLK_SCLK_MPHY_PLL, "sclk_mphy_pll", "mout_mphy_pll",
+ ENABLE_SCLK_CPIF, 9, 0, 0),
+ GATE(CLK_SCLK_UFS_MPHY, "sclk_ufs_mphy", "div_sclk_mphy",
+ ENABLE_SCLK_CPIF, 4, 0, 0),
+};
+
+static struct samsung_cmu_info cpif_cmu_info __initdata = {
+ .pll_clks = cpif_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpif_pll_clks),
+ .mux_clks = cpif_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpif_mux_clks),
+ .div_clks = cpif_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpif_div_clks),
+ .gate_clks = cpif_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cpif_gate_clks),
+ .nr_clk_ids = CPIF_NR_CLK,
+ .clk_regs = cpif_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpif_clk_regs),
+};
+
+static void __init exynos5433_cmu_cpif_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &cpif_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_cpif, "samsung,exynos5433-cmu-cpif",
+ exynos5433_cmu_cpif_init);
+
+/*
+ * Register offset definitions for CMU_MIF
+ */
+#define MEM0_PLL_LOCK 0x0000
+#define MEM1_PLL_LOCK 0x0004
+#define BUS_PLL_LOCK 0x0008
+#define MFC_PLL_LOCK 0x000c
+#define MEM0_PLL_CON0 0x0100
+#define MEM0_PLL_CON1 0x0104
+#define MEM0_PLL_FREQ_DET 0x010c
+#define MEM1_PLL_CON0 0x0110
+#define MEM1_PLL_CON1 0x0114
+#define MEM1_PLL_FREQ_DET 0x011c
+#define BUS_PLL_CON0 0x0120
+#define BUS_PLL_CON1 0x0124
+#define BUS_PLL_FREQ_DET 0x012c
+#define MFC_PLL_CON0 0x0130
+#define MFC_PLL_CON1 0x0134
+#define MFC_PLL_FREQ_DET 0x013c
+#define MUX_SEL_MIF0 0x0200
+#define MUX_SEL_MIF1 0x0204
+#define MUX_SEL_MIF2 0x0208
+#define MUX_SEL_MIF3 0x020c
+#define MUX_SEL_MIF4 0x0210
+#define MUX_SEL_MIF5 0x0214
+#define MUX_SEL_MIF6 0x0218
+#define MUX_SEL_MIF7 0x021c
+#define MUX_ENABLE_MIF0 0x0300
+#define MUX_ENABLE_MIF1 0x0304
+#define MUX_ENABLE_MIF2 0x0308
+#define MUX_ENABLE_MIF3 0x030c
+#define MUX_ENABLE_MIF4 0x0310
+#define MUX_ENABLE_MIF5 0x0314
+#define MUX_ENABLE_MIF6 0x0318
+#define MUX_ENABLE_MIF7 0x031c
+#define MUX_STAT_MIF0 0x0400
+#define MUX_STAT_MIF1 0x0404
+#define MUX_STAT_MIF2 0x0408
+#define MUX_STAT_MIF3 0x040c
+#define MUX_STAT_MIF4 0x0410
+#define MUX_STAT_MIF5 0x0414
+#define MUX_STAT_MIF6 0x0418
+#define MUX_STAT_MIF7 0x041c
+#define DIV_MIF1 0x0604
+#define DIV_MIF2 0x0608
+#define DIV_MIF3 0x060c
+#define DIV_MIF4 0x0610
+#define DIV_MIF5 0x0614
+#define DIV_MIF_PLL_FREQ_DET 0x0618
+#define DIV_STAT_MIF1 0x0704
+#define DIV_STAT_MIF2 0x0708
+#define DIV_STAT_MIF3 0x070c
+#define DIV_STAT_MIF4 0x0710
+#define DIV_STAT_MIF5 0x0714
+#define DIV_STAT_MIF_PLL_FREQ_DET 0x0718
+#define ENABLE_ACLK_MIF0 0x0800
+#define ENABLE_ACLK_MIF1 0x0804
+#define ENABLE_ACLK_MIF2 0x0808
+#define ENABLE_ACLK_MIF3 0x080c
+#define ENABLE_PCLK_MIF 0x0900
+#define ENABLE_PCLK_MIF_SECURE_DREX0_TZ 0x0904
+#define ENABLE_PCLK_MIF_SECURE_DREX1_TZ 0x0908
+#define ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT 0x090c
+#define ENABLE_PCLK_MIF_SECURE_RTC 0x0910
+#define ENABLE_SCLK_MIF 0x0a00
+#define ENABLE_IP_MIF0 0x0b00
+#define ENABLE_IP_MIF1 0x0b04
+#define ENABLE_IP_MIF2 0x0b08
+#define ENABLE_IP_MIF3 0x0b0c
+#define ENABLE_IP_MIF_SECURE_DREX0_TZ 0x0b10
+#define ENABLE_IP_MIF_SECURE_DREX1_TZ 0x0b14
+#define ENABLE_IP_MIF_SECURE_MONOTONIC_CNT 0x0b18
+#define ENABLE_IP_MIF_SECURE_RTC 0x0b1c
+#define CLKOUT_CMU_MIF 0x0c00
+#define CLKOUT_CMU_MIF_DIV_STAT 0x0c04
+#define DREX_FREQ_CTRL0 0x1000
+#define DREX_FREQ_CTRL1 0x1004
+#define PAUSE 0x1008
+#define DDRPHY_LOCK_CTRL 0x100c
+
+static unsigned long mif_clk_regs[] __initdata = {
+ MEM0_PLL_LOCK,
+ MEM1_PLL_LOCK,
+ BUS_PLL_LOCK,
+ MFC_PLL_LOCK,
+ MEM0_PLL_CON0,
+ MEM0_PLL_CON1,
+ MEM0_PLL_FREQ_DET,
+ MEM1_PLL_CON0,
+ MEM1_PLL_CON1,
+ MEM1_PLL_FREQ_DET,
+ BUS_PLL_CON0,
+ BUS_PLL_CON1,
+ BUS_PLL_FREQ_DET,
+ MFC_PLL_CON0,
+ MFC_PLL_CON1,
+ MFC_PLL_FREQ_DET,
+ MUX_SEL_MIF0,
+ MUX_SEL_MIF1,
+ MUX_SEL_MIF2,
+ MUX_SEL_MIF3,
+ MUX_SEL_MIF4,
+ MUX_SEL_MIF5,
+ MUX_SEL_MIF6,
+ MUX_SEL_MIF7,
+ MUX_ENABLE_MIF0,
+ MUX_ENABLE_MIF1,
+ MUX_ENABLE_MIF2,
+ MUX_ENABLE_MIF3,
+ MUX_ENABLE_MIF4,
+ MUX_ENABLE_MIF5,
+ MUX_ENABLE_MIF6,
+ MUX_ENABLE_MIF7,
+ MUX_STAT_MIF0,
+ MUX_STAT_MIF1,
+ MUX_STAT_MIF2,
+ MUX_STAT_MIF3,
+ MUX_STAT_MIF4,
+ MUX_STAT_MIF5,
+ MUX_STAT_MIF6,
+ MUX_STAT_MIF7,
+ DIV_MIF1,
+ DIV_MIF2,
+ DIV_MIF3,
+ DIV_MIF4,
+ DIV_MIF5,
+ DIV_MIF_PLL_FREQ_DET,
+ DIV_STAT_MIF1,
+ DIV_STAT_MIF2,
+ DIV_STAT_MIF3,
+ DIV_STAT_MIF4,
+ DIV_STAT_MIF5,
+ DIV_STAT_MIF_PLL_FREQ_DET,
+ ENABLE_ACLK_MIF0,
+ ENABLE_ACLK_MIF1,
+ ENABLE_ACLK_MIF2,
+ ENABLE_ACLK_MIF3,
+ ENABLE_PCLK_MIF,
+ ENABLE_PCLK_MIF_SECURE_DREX0_TZ,
+ ENABLE_PCLK_MIF_SECURE_DREX1_TZ,
+ ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT,
+ ENABLE_PCLK_MIF_SECURE_RTC,
+ ENABLE_SCLK_MIF,
+ ENABLE_IP_MIF0,
+ ENABLE_IP_MIF1,
+ ENABLE_IP_MIF2,
+ ENABLE_IP_MIF3,
+ ENABLE_IP_MIF_SECURE_DREX0_TZ,
+ ENABLE_IP_MIF_SECURE_DREX1_TZ,
+ ENABLE_IP_MIF_SECURE_MONOTONIC_CNT,
+ ENABLE_IP_MIF_SECURE_RTC,
+ CLKOUT_CMU_MIF,
+ CLKOUT_CMU_MIF_DIV_STAT,
+ DREX_FREQ_CTRL0,
+ DREX_FREQ_CTRL1,
+ PAUSE,
+ DDRPHY_LOCK_CTRL,
+};
+
+static struct samsung_pll_clock mif_pll_clks[] __initdata = {
+ PLL(pll_35xx, CLK_FOUT_MEM0_PLL, "fout_mem0_pll", "oscclk",
+ MEM0_PLL_LOCK, MEM0_PLL_CON0, exynos5443_pll_rates),
+ PLL(pll_35xx, CLK_FOUT_MEM1_PLL, "fout_mem1_pll", "oscclk",
+ MEM1_PLL_LOCK, MEM1_PLL_CON0, exynos5443_pll_rates),
+ PLL(pll_35xx, CLK_FOUT_BUS_PLL, "fout_bus_pll", "oscclk",
+ BUS_PLL_LOCK, BUS_PLL_CON0, exynos5443_pll_rates),
+ PLL(pll_35xx, CLK_FOUT_MFC_PLL, "fout_mfc_pll", "oscclk",
+ MFC_PLL_LOCK, MFC_PLL_CON0, exynos5443_pll_rates),
+};
+
+/* list of all parent clock list */
+PNAME(mout_mfc_pll_div2_p) = { "mout_mfc_pll", "dout_mfc_pll", };
+PNAME(mout_bus_pll_div2_p) = { "mout_bus_pll", "dout_bus_pll", };
+PNAME(mout_mem1_pll_div2_p) = { "mout_mem1_pll", "dout_mem1_pll", };
+PNAME(mout_mem0_pll_div2_p) = { "mout_mem0_pll", "dout_mem0_pll", };
+PNAME(mout_mfc_pll_p) = { "oscclk", "fout_mfc_pll", };
+PNAME(mout_bus_pll_p) = { "oscclk", "fout_bus_pll", };
+PNAME(mout_mem1_pll_p) = { "oscclk", "fout_mem1_pll", };
+PNAME(mout_mem0_pll_p) = { "oscclk", "fout_mem0_pll", };
+
+PNAME(mout_clk2x_phy_c_p) = { "mout_mem0_pll_div2", "mout_clkm_phy_b", };
+PNAME(mout_clk2x_phy_b_p) = { "mout_bus_pll_div2", "mout_clkm_phy_a", };
+PNAME(mout_clk2x_phy_a_p) = { "mout_bus_pll_div2", "mout_mfc_pll_div2", };
+PNAME(mout_clkm_phy_b_p) = { "mout_mem1_pll_div2", "mout_clkm_phy_a", };
+
+PNAME(mout_aclk_mifnm_200_p) = { "mout_mem0_pll_div2", "div_mif_pre", };
+PNAME(mout_aclk_mifnm_400_p) = { "mout_mem1_pll_div2", "mout_bus_pll_div2",};
+
+PNAME(mout_aclk_disp_333_b_p) = { "mout_aclk_disp_333_a",
+ "mout_bus_pll_div2", };
+PNAME(mout_aclk_disp_333_a_p) = { "mout_mfc_pll_div2", "sclk_mphy_pll", };
+
+PNAME(mout_sclk_decon_vclk_c_p) = { "mout_sclk_decon_vclk_b",
+ "sclk_mphy_pll", };
+PNAME(mout_sclk_decon_vclk_b_p) = { "mout_sclk_decon_vclk_a",
+ "mout_mfc_pll_div2", };
+PNAME(mout_sclk_decon_p) = { "oscclk", "mout_bus_pll_div2", };
+PNAME(mout_sclk_decon_eclk_c_p) = { "mout_sclk_decon_eclk_b",
+ "sclk_mphy_pll", };
+PNAME(mout_sclk_decon_eclk_b_p) = { "mout_sclk_decon_eclk_a",
+ "mout_mfc_pll_div2", };
+
+PNAME(mout_sclk_decon_tv_eclk_c_p) = { "mout_sclk_decon_tv_eclk_b",
+ "sclk_mphy_pll", };
+PNAME(mout_sclk_decon_tv_eclk_b_p) = { "mout_sclk_decon_tv_eclk_a",
+ "mout_mfc_pll_div2", };
+PNAME(mout_sclk_dsd_c_p) = { "mout_sclk_dsd_b", "mout_bus_pll_div2", };
+PNAME(mout_sclk_dsd_b_p) = { "mout_sclk_dsd_a", "sclk_mphy_pll", };
+PNAME(mout_sclk_dsd_a_p) = { "oscclk", "mout_mfc_pll_div2", };
+
+PNAME(mout_sclk_dsim0_c_p) = { "mout_sclk_dsim0_b", "sclk_mphy_pll", };
+PNAME(mout_sclk_dsim0_b_p) = { "mout_sclk_dsim0_a", "mout_mfc_pll_div2" };
+
+PNAME(mout_sclk_decon_tv_vclk_c_p) = { "mout_sclk_decon_tv_vclk_b",
+ "sclk_mphy_pll", };
+PNAME(mout_sclk_decon_tv_vclk_b_p) = { "mout_sclk_decon_tv_vclk_a",
+ "mout_mfc_pll_div2", };
+PNAME(mout_sclk_dsim1_c_p) = { "mout_sclk_dsim1_b", "sclk_mphy_pll", };
+PNAME(mout_sclk_dsim1_b_p) = { "mout_sclk_dsim1_a", "mout_mfc_pll_div2",};
+
+static struct samsung_fixed_factor_clock mif_fixed_factor_clks[] __initdata = {
+ /* dout_{mfc|bus|mem1|mem0}_pll is half fixed rate from parent mux */
+ FFACTOR(CLK_DOUT_MFC_PLL, "dout_mfc_pll", "mout_mfc_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_BUS_PLL, "dout_bus_pll", "mout_bus_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_MEM1_PLL, "dout_mem1_pll", "mout_mem1_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_MEM0_PLL, "dout_mem0_pll", "mout_mem0_pll", 1, 1, 0),
+};
+
+static struct samsung_mux_clock mif_mux_clks[] __initdata = {
+ /* MUX_SEL_MIF0 */
+ MUX(CLK_MOUT_MFC_PLL_DIV2, "mout_mfc_pll_div2", mout_mfc_pll_div2_p,
+ MUX_SEL_MIF0, 28, 1),
+ MUX(CLK_MOUT_BUS_PLL_DIV2, "mout_bus_pll_div2", mout_bus_pll_div2_p,
+ MUX_SEL_MIF0, 24, 1),
+ MUX(CLK_MOUT_MEM1_PLL_DIV2, "mout_mem1_pll_div2", mout_mem1_pll_div2_p,
+ MUX_SEL_MIF0, 20, 1),
+ MUX(CLK_MOUT_MEM0_PLL_DIV2, "mout_mem0_pll_div2", mout_mem0_pll_div2_p,
+ MUX_SEL_MIF0, 16, 1),
+ MUX(CLK_MOUT_MFC_PLL, "mout_mfc_pll", mout_mfc_pll_p, MUX_SEL_MIF0,
+ 12, 1),
+ MUX(CLK_MOUT_BUS_PLL, "mout_bus_pll", mout_bus_pll_p, MUX_SEL_MIF0,
+ 8, 1),
+ MUX(CLK_MOUT_MEM1_PLL, "mout_mem1_pll", mout_mem1_pll_p, MUX_SEL_MIF0,
+ 4, 1),
+ MUX(CLK_MOUT_MEM0_PLL, "mout_mem0_pll", mout_mem0_pll_p, MUX_SEL_MIF0,
+ 0, 1),
+
+ /* MUX_SEL_MIF1 */
+ MUX(CLK_MOUT_CLK2X_PHY_C, "mout_clk2x_phy_c", mout_clk2x_phy_c_p,
+ MUX_SEL_MIF1, 24, 1),
+ MUX(CLK_MOUT_CLK2X_PHY_B, "mout_clk2x_phy_b", mout_clk2x_phy_b_p,
+ MUX_SEL_MIF1, 20, 1),
+ MUX(CLK_MOUT_CLK2X_PHY_A, "mout_clk2x_phy_a", mout_clk2x_phy_a_p,
+ MUX_SEL_MIF1, 16, 1),
+ MUX(CLK_MOUT_CLKM_PHY_C, "mout_clkm_phy_c", mout_clk2x_phy_c_p,
+ MUX_SEL_MIF1, 12, 1),
+ MUX(CLK_MOUT_CLKM_PHY_B, "mout_clkm_phy_b", mout_clkm_phy_b_p,
+ MUX_SEL_MIF1, 8, 1),
+ MUX(CLK_MOUT_CLKM_PHY_A, "mout_clkm_phy_a", mout_clk2x_phy_a_p,
+ MUX_SEL_MIF1, 4, 1),
+
+ /* MUX_SEL_MIF2 */
+ MUX(CLK_MOUT_ACLK_MIFNM_200, "mout_aclk_mifnm_200",
+ mout_aclk_mifnm_200_p, MUX_SEL_MIF2, 8, 1),
+ MUX(CLK_MOUT_ACLK_MIFNM_400, "mout_aclk_mifnm_400",
+ mout_aclk_mifnm_400_p, MUX_SEL_MIF2, 0, 1),
+
+ /* MUX_SEL_MIF3 */
+ MUX(CLK_MOUT_ACLK_DISP_333_B, "mout_aclk_disp_333_b",
+ mout_aclk_disp_333_b_p, MUX_SEL_MIF3, 4, 1),
+ MUX(CLK_MOUT_ACLK_DISP_333_A, "mout_aclk_disp_333_a",
+ mout_aclk_disp_333_a_p, MUX_SEL_MIF3, 0, 1),
+
+ /* MUX_SEL_MIF4 */
+ MUX(CLK_MOUT_SCLK_DECON_VCLK_C, "mout_sclk_decon_vclk_c",
+ mout_sclk_decon_vclk_c_p, MUX_SEL_MIF4, 24, 1),
+ MUX(CLK_MOUT_SCLK_DECON_VCLK_B, "mout_sclk_decon_vclk_b",
+ mout_sclk_decon_vclk_b_p, MUX_SEL_MIF4, 20, 1),
+ MUX(CLK_MOUT_SCLK_DECON_VCLK_A, "mout_sclk_decon_vclk_a",
+ mout_sclk_decon_p, MUX_SEL_MIF4, 16, 1),
+ MUX(CLK_MOUT_SCLK_DECON_ECLK_C, "mout_sclk_decon_eclk_c",
+ mout_sclk_decon_eclk_c_p, MUX_SEL_MIF4, 8, 1),
+ MUX(CLK_MOUT_SCLK_DECON_ECLK_B, "mout_sclk_decon_eclk_b",
+ mout_sclk_decon_eclk_b_p, MUX_SEL_MIF4, 4, 1),
+ MUX(CLK_MOUT_SCLK_DECON_ECLK_A, "mout_sclk_decon_eclk_a",
+ mout_sclk_decon_p, MUX_SEL_MIF4, 0, 1),
+
+ /* MUX_SEL_MIF5 */
+ MUX(CLK_MOUT_SCLK_DECON_TV_ECLK_C, "mout_sclk_decon_tv_eclk_c",
+ mout_sclk_decon_tv_eclk_c_p, MUX_SEL_MIF5, 24, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_ECLK_B, "mout_sclk_decon_tv_eclk_b",
+ mout_sclk_decon_tv_eclk_b_p, MUX_SEL_MIF5, 20, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_ECLK_A, "mout_sclk_decon_tv_eclk_a",
+ mout_sclk_decon_p, MUX_SEL_MIF5, 16, 1),
+ MUX(CLK_MOUT_SCLK_DSD_C, "mout_sclk_dsd_c", mout_sclk_dsd_c_p,
+ MUX_SEL_MIF5, 8, 1),
+ MUX(CLK_MOUT_SCLK_DSD_B, "mout_sclk_dsd_b", mout_sclk_dsd_b_p,
+ MUX_SEL_MIF5, 4, 1),
+ MUX(CLK_MOUT_SCLK_DSD_A, "mout_sclk_dsd_a", mout_sclk_dsd_a_p,
+ MUX_SEL_MIF5, 0, 1),
+
+ /* MUX_SEL_MIF6 */
+ MUX(CLK_MOUT_SCLK_DSIM0_C, "mout_sclk_dsim0_c", mout_sclk_dsim0_c_p,
+ MUX_SEL_MIF6, 8, 1),
+ MUX(CLK_MOUT_SCLK_DSIM0_B, "mout_sclk_dsim0_b", mout_sclk_dsim0_b_p,
+ MUX_SEL_MIF6, 4, 1),
+ MUX(CLK_MOUT_SCLK_DSIM0_A, "mout_sclk_dsim0_a", mout_sclk_decon_p,
+ MUX_SEL_MIF6, 0, 1),
+
+ /* MUX_SEL_MIF7 */
+ MUX(CLK_MOUT_SCLK_DECON_TV_VCLK_C, "mout_sclk_decon_tv_vclk_c",
+ mout_sclk_decon_tv_vclk_c_p, MUX_SEL_MIF7, 24, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_VCLK_B, "mout_sclk_decon_tv_vclk_b",
+ mout_sclk_decon_tv_vclk_b_p, MUX_SEL_MIF7, 20, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_VCLK_A, "mout_sclk_decon_tv_vclk_a",
+ mout_sclk_decon_p, MUX_SEL_MIF7, 16, 1),
+ MUX(CLK_MOUT_SCLK_DSIM1_C, "mout_sclk_dsim1_c", mout_sclk_dsim1_c_p,
+ MUX_SEL_MIF7, 8, 1),
+ MUX(CLK_MOUT_SCLK_DSIM1_B, "mout_sclk_dsim1_b", mout_sclk_dsim1_b_p,
+ MUX_SEL_MIF7, 4, 1),
+ MUX(CLK_MOUT_SCLK_DSIM1_A, "mout_sclk_dsim1_a", mout_sclk_decon_p,
+ MUX_SEL_MIF7, 0, 1),
+};
+
+static struct samsung_div_clock mif_div_clks[] __initdata = {
+ /* DIV_MIF1 */
+ DIV(CLK_DIV_SCLK_HPM_MIF, "div_sclk_hpm_mif", "div_clk2x_phy",
+ DIV_MIF1, 16, 2),
+ DIV(CLK_DIV_ACLK_DREX1, "div_aclk_drex1", "div_clk2x_phy", DIV_MIF1,
+ 12, 2),
+ DIV(CLK_DIV_ACLK_DREX0, "div_aclk_drex0", "div_clk2x_phy", DIV_MIF1,
+ 8, 2),
+ DIV(CLK_DIV_CLK2XPHY, "div_clk2x_phy", "mout_clk2x_phy_c", DIV_MIF1,
+ 4, 4),
+
+ /* DIV_MIF2 */
+ DIV(CLK_DIV_ACLK_MIF_266, "div_aclk_mif_266", "mout_bus_pll_div2",
+ DIV_MIF2, 20, 3),
+ DIV(CLK_DIV_ACLK_MIFND_133, "div_aclk_mifnd_133", "div_mif_pre",
+ DIV_MIF2, 16, 4),
+ DIV(CLK_DIV_ACLK_MIF_133, "div_aclk_mif_133", "div_mif_pre",
+ DIV_MIF2, 12, 4),
+ DIV(CLK_DIV_ACLK_MIFNM_200, "div_aclk_mifnm_200",
+ "mout_aclk_mifnm_200", DIV_MIF2, 8, 3),
+ DIV(CLK_DIV_ACLK_MIF_200, "div_aclk_mif_200", "div_aclk_mif_400",
+ DIV_MIF2, 4, 2),
+ DIV(CLK_DIV_ACLK_MIF_400, "div_aclk_mif_400", "mout_aclk_mifnm_400",
+ DIV_MIF2, 0, 3),
+
+ /* DIV_MIF3 */
+ DIV(CLK_DIV_ACLK_BUS2_400, "div_aclk_bus2_400", "div_mif_pre",
+ DIV_MIF3, 16, 4),
+ DIV(CLK_DIV_ACLK_DISP_333, "div_aclk_disp_333", "mout_aclk_disp_333_b",
+ DIV_MIF3, 4, 3),
+ DIV(CLK_DIV_ACLK_CPIF_200, "div_aclk_cpif_200", "mout_aclk_mifnm_200",
+ DIV_MIF3, 0, 3),
+
+ /* DIV_MIF4 */
+ DIV(CLK_DIV_SCLK_DSIM1, "div_sclk_dsim1", "mout_sclk_dsim1_c",
+ DIV_MIF4, 24, 4),
+ DIV(CLK_DIV_SCLK_DECON_TV_VCLK, "div_sclk_decon_tv_vclk",
+ "mout_sclk_decon_tv_vclk_c", DIV_MIF4, 20, 4),
+ DIV(CLK_DIV_SCLK_DSIM0, "div_sclk_dsim0", "mout_sclk_dsim0_c",
+ DIV_MIF4, 16, 4),
+ DIV(CLK_DIV_SCLK_DSD, "div_sclk_dsd", "mout_sclk_dsd_c",
+ DIV_MIF4, 12, 4),
+ DIV(CLK_DIV_SCLK_DECON_TV_ECLK, "div_sclk_decon_tv_eclk",
+ "mout_sclk_decon_tv_eclk_c", DIV_MIF4, 8, 4),
+ DIV(CLK_DIV_SCLK_DECON_VCLK, "div_sclk_decon_vclk",
+ "mout_sclk_decon_vclk_c", DIV_MIF4, 4, 4),
+ DIV(CLK_DIV_SCLK_DECON_ECLK, "div_sclk_decon_eclk",
+ "mout_sclk_decon_eclk_c", DIV_MIF4, 0, 4),
+
+ /* DIV_MIF5 */
+ DIV(CLK_DIV_MIF_PRE, "div_mif_pre", "mout_bus_pll_div2", DIV_MIF5,
+ 0, 3),
+};
+
+static struct samsung_gate_clock mif_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_MIF0 */
+ GATE(CLK_CLK2X_PHY1, "clk2k_phy1", "div_clk2x_phy", ENABLE_ACLK_MIF0,
+ 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CLK2X_PHY0, "clk2x_phy0", "div_clk2x_phy", ENABLE_ACLK_MIF0,
+ 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CLKM_PHY1, "clkm_phy1", "mout_clkm_phy_c", ENABLE_ACLK_MIF0,
+ 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CLKM_PHY0, "clkm_phy0", "mout_clkm_phy_c", ENABLE_ACLK_MIF0,
+ 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_RCLK_DREX1, "rclk_drex1", "oscclk", ENABLE_ACLK_MIF0,
+ 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_RCLK_DREX0, "rclk_drex0", "oscclk", ENABLE_ACLK_MIF0,
+ 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX1_TZ, "aclk_drex1_tz", "div_aclk_drex1",
+ ENABLE_ACLK_MIF0, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX0_TZ, "aclk_drex0_tz", "div_aclk_drex0",
+ ENABLE_ACLK_MIF0, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX1_PEREV, "aclk_drex1_perev", "div_aclk_drex1",
+ ENABLE_ACLK_MIF0, 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX0_PEREV, "aclk_drex0_perev", "div_aclk_drex0",
+ ENABLE_ACLK_MIF0, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX1_MEMIF, "aclk_drex1_memif", "div_aclk_drex1",
+ ENABLE_ACLK_MIF0, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX0_MEMIF, "aclk_drex0_memif", "div_aclk_drex0",
+ ENABLE_ACLK_MIF0, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX1_SCH, "aclk_drex1_sch", "div_aclk_drex1",
+ ENABLE_ACLK_MIF0, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX0_SCH, "aclk_drex0_sch", "div_aclk_drex0",
+ ENABLE_ACLK_MIF0, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX1_BUSIF, "aclk_drex1_busif", "div_aclk_drex1",
+ ENABLE_ACLK_MIF0, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX0_BUSIF, "aclk_drex0_busif", "div_aclk_drex0",
+ ENABLE_ACLK_MIF0, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX1_BUSIF_RD, "aclk_drex1_busif_rd", "div_aclk_drex1",
+ ENABLE_ACLK_MIF0, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX0_BUSIF_RD, "aclk_drex0_busif_rd", "div_aclk_drex0",
+ ENABLE_ACLK_MIF0, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX1, "aclk_drex1", "div_aclk_drex1",
+ ENABLE_ACLK_MIF0, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DREX0, "aclk_drex0", "div_aclk_drex0",
+ ENABLE_ACLK_MIF0, 1, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_ACLK_MIF1 */
+ GATE(CLK_ACLK_ASYNCAXIS_MIF_IMEM, "aclk_asyncaxis_mif_imem",
+ "div_aclk_mif_200", ENABLE_ACLK_MIF1, 28,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_NOC_P_CCI, "aclk_asyncaxis_noc_p_cci",
+ "div_aclk_mif_200", ENABLE_ACLK_MIF1,
+ 27, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_NOC_P_CCI, "aclk_asyncaxim_noc_p_cci",
+ "div_aclk_mif_133", ENABLE_ACLK_MIF1,
+ 26, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_CP1, "aclk_asyncaxis_cp1",
+ "div_aclk_mifnm_200", ENABLE_ACLK_MIF1,
+ 25, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_CP1, "aclk_asyncaxim_cp1",
+ "div_aclk_drex1", ENABLE_ACLK_MIF1,
+ 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_CP0, "aclk_asyncaxis_cp0",
+ "div_aclk_mifnm_200", ENABLE_ACLK_MIF1,
+ 23, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_CP0, "aclk_asyncaxim_cp0",
+ "div_aclk_drex0", ENABLE_ACLK_MIF1,
+ 22, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_DREX1_3, "aclk_asyncaxis_drex1_3",
+ "div_aclk_mif_133", ENABLE_ACLK_MIF1,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_DREX1_3, "aclk_asyncaxim_drex1_3",
+ "div_aclk_drex1", ENABLE_ACLK_MIF1,
+ 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_DREX1_1, "aclk_asyncaxis_drex1_1",
+ "div_aclk_mif_133", ENABLE_ACLK_MIF1,
+ 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_DREX1_1, "aclk_asyncaxim_drex1_1",
+ "div_aclk_drex1", ENABLE_ACLK_MIF1,
+ 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_DREX1_0, "aclk_asyncaxis_drex1_0",
+ "div_aclk_mif_133", ENABLE_ACLK_MIF1,
+ 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_DREX1_0, "aclk_asyncaxim_drex1_0",
+ "div_aclk_drex1", ENABLE_ACLK_MIF1,
+ 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_DREX0_3, "aclk_asyncaxis_drex0_3",
+ "div_aclk_mif_133", ENABLE_ACLK_MIF1,
+ 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_DREX0_3, "aclk_asyncaxim_drex0_3",
+ "div_aclk_drex0", ENABLE_ACLK_MIF1,
+ 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_DREX0_1, "aclk_asyncaxis_drex0_1",
+ "div_aclk_mif_133", ENABLE_ACLK_MIF1,
+ 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_DREX0_1, "aclk_asyncaxim_drex0_1",
+ "div_aclk_drex0", ENABLE_ACLK_MIF1,
+ 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_DREX0_0, "aclk_asyncaxis_drex0_0",
+ "div_aclk_mif_133", ENABLE_ACLK_MIF1,
+ 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_DREX0_0, "aclk_asyncaxim_drex0_0",
+ "div_aclk_drex0", ENABLE_ACLK_MIF1,
+ 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_MIF2P, "aclk_ahb2apb_mif2p", "div_aclk_mif_133",
+ ENABLE_ACLK_MIF1, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_MIF1P, "aclk_ahb2apb_mif1p", "div_aclk_mif_133",
+ ENABLE_ACLK_MIF1, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_MIF0P, "aclk_ahb2apb_mif0p", "div_aclk_mif_133",
+ ENABLE_ACLK_MIF1, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_IXIU_CCI, "aclk_ixiu_cci", "div_aclk_mif_400",
+ ENABLE_ACLK_MIF1, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_MIFSFRX, "aclk_xiu_mifsfrx", "div_aclk_mif_200",
+ ENABLE_ACLK_MIF1, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MIFNP_133, "aclk_mifnp_133", "div_aclk_mif_133",
+ ENABLE_ACLK_MIF1, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MIFNM_200, "aclk_mifnm_200", "div_aclk_mifnm_200",
+ ENABLE_ACLK_MIF1, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MIFND_133, "aclk_mifnd_133", "div_aclk_mifnd_133",
+ ENABLE_ACLK_MIF1, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MIFND_400, "aclk_mifnd_400", "div_aclk_mif_400",
+ ENABLE_ACLK_MIF1, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CCI, "aclk_cci", "div_aclk_mif_400", ENABLE_ACLK_MIF1,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_ACLK_MIF2 */
+ GATE(CLK_ACLK_MIFND_266, "aclk_mifnd_266", "div_aclk_mif_266",
+ ENABLE_ACLK_MIF2, 20, 0, 0),
+ GATE(CLK_ACLK_PPMU_DREX1S3, "aclk_ppmu_drex1s3", "div_aclk_drex1",
+ ENABLE_ACLK_MIF2, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PPMU_DREX1S1, "aclk_ppmu_drex1s1", "div_aclk_drex1",
+ ENABLE_ACLK_MIF2, 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PPMU_DREX1S0, "aclk_ppmu_drex1s0", "div_aclk_drex1",
+ ENABLE_ACLK_MIF2, 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PPMU_DREX0S3, "aclk_ppmu_drex0s3", "div_aclk_drex0",
+ ENABLE_ACLK_MIF2, 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PPMU_DREX0S1, "aclk_ppmu_drex0s1", "div_aclk_drex0",
+ ENABLE_ACLK_MIF2, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PPMU_DREX0S0, "aclk_ppmu_drex0s0", "div_aclk_drex0",
+ ENABLE_ACLK_MIF2, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIDS_CCI_MIFSFRX, "aclk_axids_cci_mifsfrx",
+ "div_aclk_mif_200", ENABLE_ACLK_MIF2, 7,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXISYNCDNS_CCI, "aclk_axisyncdns_cci",
+ "div_aclk_mif_400", ENABLE_ACLK_MIF2,
+ 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXISYNCDN_CCI, "aclk_axisyncdn_cci", "div_aclk_mif_400",
+ ENABLE_ACLK_MIF2, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXISYNCDN_NOC_D, "aclk_axisyncdn_noc_d",
+ "div_aclk_mif_200", ENABLE_ACLK_MIF2,
+ 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBS_MIF_CSSYS, "aclk_asyncapbs_mif_cssys",
+ "div_aclk_mifnd_133", ENABLE_ACLK_MIF2, 0, 0, 0),
+
+ /* ENABLE_ACLK_MIF3 */
+ GATE(CLK_ACLK_BUS2_400, "aclk_bus2_400", "div_aclk_bus2_400",
+ ENABLE_ACLK_MIF3, 4,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_ACLK_DISP_333, "aclk_disp_333", "div_aclk_disp_333",
+ ENABLE_ACLK_MIF3, 1,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_ACLK_CPIF_200, "aclk_cpif_200", "div_aclk_cpif_200",
+ ENABLE_ACLK_MIF3, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+
+ /* ENABLE_PCLK_MIF */
+ GATE(CLK_PCLK_PPMU_DREX1S3, "pclk_ppmu_drex1s3", "div_aclk_drex1",
+ ENABLE_PCLK_MIF, 29, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PPMU_DREX1S1, "pclk_ppmu_drex1s1", "div_aclk_drex1",
+ ENABLE_PCLK_MIF, 28, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PPMU_DREX1S0, "pclk_ppmu_drex1s0", "div_aclk_drex1",
+ ENABLE_PCLK_MIF, 27, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PPMU_DREX0S3, "pclk_ppmu_drex0s3", "div_aclk_drex0",
+ ENABLE_PCLK_MIF, 26, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PPMU_DREX0S1, "pclk_ppmu_drex0s1", "div_aclk_drex0",
+ ENABLE_PCLK_MIF, 25, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PPMU_DREX0S0, "pclk_ppmu_drex0s0", "div_aclk_drex0",
+ ENABLE_PCLK_MIF, 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_NOC_P_CCI, "pclk_asyncaxi_noc_p_cci",
+ "div_aclk_mif_133", ENABLE_PCLK_MIF, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_CP1, "pclk_asyncaxi_cp1", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 19, 0, 0),
+ GATE(CLK_PCLK_ASYNCAXI_CP0, "pclk_asyncaxi_cp0", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 18, 0, 0),
+ GATE(CLK_PCLK_ASYNCAXI_DREX1_3, "pclk_asyncaxi_drex1_3",
+ "div_aclk_mif_133", ENABLE_PCLK_MIF, 17, 0, 0),
+ GATE(CLK_PCLK_ASYNCAXI_DREX1_1, "pclk_asyncaxi_drex1_1",
+ "div_aclk_mif_133", ENABLE_PCLK_MIF, 16, 0, 0),
+ GATE(CLK_PCLK_ASYNCAXI_DREX1_0, "pclk_asyncaxi_drex1_0",
+ "div_aclk_mif_133", ENABLE_PCLK_MIF, 15, 0, 0),
+ GATE(CLK_PCLK_ASYNCAXI_DREX0_3, "pclk_asyncaxi_drex0_3",
+ "div_aclk_mif_133", ENABLE_PCLK_MIF, 14, 0, 0),
+ GATE(CLK_PCLK_ASYNCAXI_DREX0_1, "pclk_asyncaxi_drex0_1",
+ "div_aclk_mif_133", ENABLE_PCLK_MIF, 13, 0, 0),
+ GATE(CLK_PCLK_ASYNCAXI_DREX0_0, "pclk_asyncaxi_drex0_0",
+ "div_aclk_mif_133", ENABLE_PCLK_MIF, 12, 0, 0),
+ GATE(CLK_PCLK_MIFSRVND_133, "pclk_mifsrvnd_133", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 11, 0, 0),
+ GATE(CLK_PCLK_PMU_MIF, "pclk_pmu_mif", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_MIF, "pclk_sysreg_mif", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_GPIO_ALIVE, "pclk_gpio_alive", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ABB, "pclk_abb", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 7, 0, 0),
+ GATE(CLK_PCLK_PMU_APBIF, "pclk_pmu_apbif", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_DDR_PHY1, "pclk_ddr_phy1", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 5, 0, 0),
+ GATE(CLK_PCLK_DREX1, "pclk_drex1", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_DDR_PHY0, "pclk_ddr_phy0", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 2, 0, 0),
+ GATE(CLK_PCLK_DREX0, "pclk_drex0", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_MIF_SECURE_DREX0_TZ */
+ GATE(CLK_PCLK_DREX0_TZ, "pclk_drex0_tz", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF_SECURE_DREX0_TZ, 0, 0, 0),
+
+ /* ENABLE_PCLK_MIF_SECURE_DREX1_TZ */
+ GATE(CLK_PCLK_DREX1_TZ, "pclk_drex1_tz", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF_SECURE_DREX1_TZ, 0, 0, 0),
+
+ /* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */
+ GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT, 0, 0, 0),
+
+ /* ENABLE_PCLK_MIF_SECURE_RTC */
+ GATE(CLK_PCLK_RTC, "pclk_rtc", "div_aclk_mif_133",
+ ENABLE_PCLK_MIF_SECURE_RTC, 0, 0, 0),
+
+ /* ENABLE_SCLK_MIF */
+ GATE(CLK_SCLK_DSIM1_DISP, "sclk_dsim1_disp", "div_sclk_dsim1",
+ ENABLE_SCLK_MIF, 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_DECON_TV_VCLK_DISP, "sclk_decon_tv_vclk_disp",
+ "div_sclk_decon_tv_vclk", ENABLE_SCLK_MIF,
+ 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_DSIM0_DISP, "sclk_dsim0_disp", "div_sclk_dsim0",
+ ENABLE_SCLK_MIF, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_DSD_DISP, "sclk_dsd_disp", "div_sclk_dsd",
+ ENABLE_SCLK_MIF, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_DECON_TV_ECLK_DISP, "sclk_decon_tv_eclk_disp",
+ "div_sclk_decon_tv_eclk", ENABLE_SCLK_MIF,
+ 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_DECON_VCLK_DISP, "sclk_decon_vclk_disp",
+ "div_sclk_decon_vclk", ENABLE_SCLK_MIF,
+ 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_DECON_ECLK_DISP, "sclk_decon_eclk_disp",
+ "div_sclk_decon_eclk", ENABLE_SCLK_MIF,
+ 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_HPM_MIF, "sclk_hpm_mif", "div_sclk_hpm_mif",
+ ENABLE_SCLK_MIF, 4,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MFC_PLL, "sclk_mfc_pll", "mout_mfc_pll_div2",
+ ENABLE_SCLK_MIF, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_BUS_PLL, "sclk_bus_pll", "mout_bus_pll_div2",
+ ENABLE_SCLK_MIF, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_BUS_PLL_APOLLO, "sclk_bus_pll_apollo", "sclk_bus_pll",
+ ENABLE_SCLK_MIF, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_BUS_PLL_ATLAS, "sclk_bus_pll_atlas", "sclk_bus_pll",
+ ENABLE_SCLK_MIF, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static struct samsung_cmu_info mif_cmu_info __initdata = {
+ .pll_clks = mif_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(mif_pll_clks),
+ .mux_clks = mif_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mif_mux_clks),
+ .div_clks = mif_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mif_div_clks),
+ .gate_clks = mif_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mif_gate_clks),
+ .fixed_factor_clks = mif_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(mif_fixed_factor_clks),
+ .nr_clk_ids = MIF_NR_CLK,
+ .clk_regs = mif_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mif_clk_regs),
+};
+
+static void __init exynos5433_cmu_mif_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &mif_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_mif, "samsung,exynos5433-cmu-mif",
+ exynos5433_cmu_mif_init);
+
+/*
+ * Register offset definitions for CMU_PERIC
+ */
+#define DIV_PERIC 0x0600
+#define DIV_STAT_PERIC 0x0700
+#define ENABLE_ACLK_PERIC 0x0800
+#define ENABLE_PCLK_PERIC0 0x0900
+#define ENABLE_PCLK_PERIC1 0x0904
+#define ENABLE_SCLK_PERIC 0x0A00
+#define ENABLE_IP_PERIC0 0x0B00
+#define ENABLE_IP_PERIC1 0x0B04
+#define ENABLE_IP_PERIC2 0x0B08
+
+static unsigned long peric_clk_regs[] __initdata = {
+ DIV_PERIC,
+ DIV_STAT_PERIC,
+ ENABLE_ACLK_PERIC,
+ ENABLE_PCLK_PERIC0,
+ ENABLE_PCLK_PERIC1,
+ ENABLE_SCLK_PERIC,
+ ENABLE_IP_PERIC0,
+ ENABLE_IP_PERIC1,
+ ENABLE_IP_PERIC2,
+};
+
+static struct samsung_div_clock peric_div_clks[] __initdata = {
+ /* DIV_PERIC */
+ DIV(CLK_DIV_SCLK_SCI, "div_sclk_sci", "oscclk", DIV_PERIC, 4, 4),
+ DIV(CLK_DIV_SCLK_SC_IN, "div_sclk_sc_in", "oscclk", DIV_PERIC, 0, 4),
+};
+
+static struct samsung_gate_clock peric_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_PERIC */
+ GATE(CLK_ACLK_AHB2APB_PERIC2P, "aclk_ahb2apb_peric2p", "aclk_peric_66",
+ ENABLE_ACLK_PERIC, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_PERIC1P, "aclk_ahb2apb_peric1p", "aclk_peric_66",
+ ENABLE_ACLK_PERIC, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_PERIC0P, "aclk_ahb2apb_peric0p", "aclk_peric_66",
+ ENABLE_ACLK_PERIC, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PERICNP_66, "aclk_pericnp_66", "aclk_peric_66",
+ ENABLE_ACLK_PERIC, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_PERIC0 */
+ GATE(CLK_PCLK_SCI, "pclk_sci", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 31, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_GPIO_FINGER, "pclk_gpio_finger", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 30, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_GPIO_ESE, "pclk_gpio_ese", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 29, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PWM, "pclk_pwm", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 28, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_SPDIF, "pclk_spdif", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 26, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_PCM1, "pclk_pcm1", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 25, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_I2S1, "pclk_i2s", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 24, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_SPI2, "pclk_spi2", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 23, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_SPI1, "pclk_spi1", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 22, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_SPI0, "pclk_spi0", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_ADCIF, "pclk_adcif", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 20, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_GPIO_TOUCH, "pclk_gpio_touch", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_GPIO_NFC, "pclk_gpio_nfc", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_GPIO_PERIC, "pclk_gpio_peric", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PMU_PERIC, "pclk_pmu_peric", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 16, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_SYSREG_PERIC, "pclk_sysreg_peric", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 15,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_UART2, "pclk_uart2", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 14, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_UART1, "pclk_uart1", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 13, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_UART0, "pclk_uart0", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 12, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C3, "pclk_hsi2c3", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 11, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C2, "pclk_hsi2c2", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 10, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C1, "pclk_hsi2c1", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 9, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C0, "pclk_hsi2c0", "aclk_peric_66",
+ ENABLE_PCLK_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_I2C7, "pclk_i2c7", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_I2C6, "pclk_i2c6", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_I2C5, "pclk_i2c5", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 5, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_I2C4, "pclk_i2c4", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_I2C3, "pclk_i2c3", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_I2C2, "pclk_i2c2", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_I2C1, "pclk_i2c1", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_I2C0, "pclk_i2c0", "aclk_peric_66", ENABLE_PCLK_PERIC0,
+ 0, CLK_SET_RATE_PARENT, 0),
+
+ /* ENABLE_PCLK_PERIC1 */
+ GATE(CLK_PCLK_SPI4, "pclk_spi4", "aclk_peric_66", ENABLE_PCLK_PERIC1,
+ 9, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_SPI3, "pclk_spi3", "aclk_peric_66", ENABLE_PCLK_PERIC1,
+ 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C11, "pclk_hsi2c11", "aclk_peric_66",
+ ENABLE_PCLK_PERIC1, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C10, "pclk_hsi2c10", "aclk_peric_66",
+ ENABLE_PCLK_PERIC1, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C9, "pclk_hsi2c9", "aclk_peric_66",
+ ENABLE_PCLK_PERIC1, 5, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C8, "pclk_hsi2c8", "aclk_peric_66",
+ ENABLE_PCLK_PERIC1, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C7, "pclk_hsi2c7", "aclk_peric_66",
+ ENABLE_PCLK_PERIC1, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C6, "pclk_hsi2c6", "aclk_peric_66",
+ ENABLE_PCLK_PERIC1, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C5, "pclk_hsi2c5", "aclk_peric_66",
+ ENABLE_PCLK_PERIC1, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_PCLK_HSI2C4, "pclk_hsi2c4", "aclk_peric_66",
+ ENABLE_PCLK_PERIC1, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* ENABLE_SCLK_PERIC */
+ GATE(CLK_SCLK_IOCLK_SPI4, "sclk_ioclk_spi4", "ioclk_spi4_clk_in",
+ ENABLE_SCLK_PERIC, 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_IOCLK_SPI3, "sclk_ioclk_spi3", "ioclk_spi3_clk_in",
+ ENABLE_SCLK_PERIC, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI4, "sclk_spi4", "sclk_spi4_peric", ENABLE_SCLK_PERIC,
+ 19, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI3, "sclk_spi3", "sclk_spi3_peric", ENABLE_SCLK_PERIC,
+ 18, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SCI, "sclk_sci", "div_sclk_sci", ENABLE_SCLK_PERIC,
+ 17, 0, 0),
+ GATE(CLK_SCLK_SC_IN, "sclk_sc_in", "div_sclk_sc_in", ENABLE_SCLK_PERIC,
+ 16, 0, 0),
+ GATE(CLK_SCLK_PWM, "sclk_pwm", "oscclk", ENABLE_SCLK_PERIC, 15, 0, 0),
+ GATE(CLK_SCLK_IOCLK_SPI2, "sclk_ioclk_spi2", "ioclk_spi2_clk_in",
+ ENABLE_SCLK_PERIC, 13, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_IOCLK_SPI1, "sclk_ioclk_spi1", "ioclk_spi1_clk_in",
+ ENABLE_SCLK_PERIC, 12,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_IOCLK_SPI0, "sclk_ioclk_spi0", "ioclk_spi0_clk_in",
+ ENABLE_SCLK_PERIC, 11, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_IOCLK_I2S1_BCLK, "sclk_ioclk_i2s1_bclk",
+ "ioclk_i2s1_bclk_in", ENABLE_SCLK_PERIC, 10,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPDIF, "sclk_spdif", "sclk_spdif_peric",
+ ENABLE_SCLK_PERIC, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric",
+ ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric",
+ ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC,
+ 5, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC,
+ 4, CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "sclk_spi0_peric", ENABLE_SCLK_PERIC,
+ 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART2, "sclk_uart2", "sclk_uart2_peric",
+ ENABLE_SCLK_PERIC, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART1, "sclk_uart1", "sclk_uart1_peric",
+ ENABLE_SCLK_PERIC, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART0, "sclk_uart0", "sclk_uart0_peric",
+ ENABLE_SCLK_PERIC, 0, CLK_SET_RATE_PARENT, 0),
+};
+
+static struct samsung_cmu_info peric_cmu_info __initdata = {
+ .div_clks = peric_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric_div_clks),
+ .gate_clks = peric_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric_gate_clks),
+ .nr_clk_ids = PERIC_NR_CLK,
+ .clk_regs = peric_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric_clk_regs),
+};
+
+static void __init exynos5433_cmu_peric_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &peric_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos5433_cmu_peric, "samsung,exynos5433-cmu-peric",
+ exynos5433_cmu_peric_init);
+
+/*
+ * Register offset definitions for CMU_PERIS
+ */
+#define ENABLE_ACLK_PERIS 0x0800
+#define ENABLE_PCLK_PERIS 0x0900
+#define ENABLE_PCLK_PERIS_SECURE_TZPC 0x0904
+#define ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF 0x0908
+#define ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF 0x090c
+#define ENABLE_PCLK_PERIS_SECURE_TOPRTC 0x0910
+#define ENABLE_PCLK_PERIS_SECURE_CUSTOM_EFUSE_APBIF 0x0914
+#define ENABLE_PCLK_PERIS_SECURE_ANTIRBK_CNT_APBIF 0x0918
+#define ENABLE_PCLK_PERIS_SECURE_OTP_CON_APBIF 0x091c
+#define ENABLE_SCLK_PERIS 0x0a00
+#define ENABLE_SCLK_PERIS_SECURE_SECKEY 0x0a04
+#define ENABLE_SCLK_PERIS_SECURE_CHIPID 0x0a08
+#define ENABLE_SCLK_PERIS_SECURE_TOPRTC 0x0a0c
+#define ENABLE_SCLK_PERIS_SECURE_CUSTOM_EFUSE 0x0a10
+#define ENABLE_SCLK_PERIS_SECURE_ANTIRBK_CNT 0x0a14
+#define ENABLE_SCLK_PERIS_SECURE_OTP_CON 0x0a18
+#define ENABLE_IP_PERIS0 0x0b00
+#define ENABLE_IP_PERIS1 0x0b04
+#define ENABLE_IP_PERIS_SECURE_TZPC 0x0b08
+#define ENABLE_IP_PERIS_SECURE_SECKEY 0x0b0c
+#define ENABLE_IP_PERIS_SECURE_CHIPID 0x0b10
+#define ENABLE_IP_PERIS_SECURE_TOPRTC 0x0b14
+#define ENABLE_IP_PERIS_SECURE_CUSTOM_EFUSE 0x0b18
+#define ENABLE_IP_PERIS_SECURE_ANTIBRK_CNT 0x0b1c
+#define ENABLE_IP_PERIS_SECURE_OTP_CON 0x0b20
+
+static unsigned long peris_clk_regs[] __initdata = {
+ ENABLE_ACLK_PERIS,
+ ENABLE_PCLK_PERIS,
+ ENABLE_PCLK_PERIS_SECURE_TZPC,
+ ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF,
+ ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF,
+ ENABLE_PCLK_PERIS_SECURE_TOPRTC,
+ ENABLE_PCLK_PERIS_SECURE_CUSTOM_EFUSE_APBIF,
+ ENABLE_PCLK_PERIS_SECURE_ANTIRBK_CNT_APBIF,
+ ENABLE_PCLK_PERIS_SECURE_OTP_CON_APBIF,
+ ENABLE_SCLK_PERIS,
+ ENABLE_SCLK_PERIS_SECURE_SECKEY,
+ ENABLE_SCLK_PERIS_SECURE_CHIPID,
+ ENABLE_SCLK_PERIS_SECURE_TOPRTC,
+ ENABLE_SCLK_PERIS_SECURE_CUSTOM_EFUSE,
+ ENABLE_SCLK_PERIS_SECURE_ANTIRBK_CNT,
+ ENABLE_SCLK_PERIS_SECURE_OTP_CON,
+ ENABLE_IP_PERIS0,
+ ENABLE_IP_PERIS1,
+ ENABLE_IP_PERIS_SECURE_TZPC,
+ ENABLE_IP_PERIS_SECURE_SECKEY,
+ ENABLE_IP_PERIS_SECURE_CHIPID,
+ ENABLE_IP_PERIS_SECURE_TOPRTC,
+ ENABLE_IP_PERIS_SECURE_CUSTOM_EFUSE,
+ ENABLE_IP_PERIS_SECURE_ANTIBRK_CNT,
+ ENABLE_IP_PERIS_SECURE_OTP_CON,
+};
+
+static struct samsung_gate_clock peris_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_PERIS */
+ GATE(CLK_ACLK_AHB2APB_PERIS1P, "aclk_ahb2apb_peris1p", "aclk_peris_66",
+ ENABLE_ACLK_PERIS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_PERIS0P, "aclk_ahb2apb_peris0p", "aclk_peris_66",
+ ENABLE_ACLK_PERIS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PERISNP_66, "aclk_perisnp_66", "aclk_peris_66",
+ ENABLE_ACLK_PERIS, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_PERIS */
+ GATE(CLK_PCLK_HPM_APBIF, "pclk_hpm_apbif", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 30, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_TMU1_APBIF, "pclk_tmu1_apbif", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_TMU0_APBIF, "pclk_tmu0_apbif", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 23, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PMU_PERIS, "pclk_pmu_peris", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 22, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_PERIS, "pclk_sysreg_peris", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_CMU_TOP_APBIF, "pclk_cmu_top_apbif", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_WDT_APOLLO, "pclk_wdt_apollo", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_WDT_ATLAS, "pclk_wdt_atlas", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_MCT, "pclk_mct", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_HDMI_CEC, "pclk_hdmi_cec", "aclk_peris_66",
+ ENABLE_PCLK_PERIS, 14, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_PERIS_SECURE_TZPC */
+ GATE(CLK_PCLK_TZPC12, "pclk_tzpc12", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 12, 0, 0),
+ GATE(CLK_PCLK_TZPC11, "pclk_tzpc11", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 11, 0, 0),
+ GATE(CLK_PCLK_TZPC10, "pclk_tzpc10", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 10, 0, 0),
+ GATE(CLK_PCLK_TZPC9, "pclk_tzpc9", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 9, 0, 0),
+ GATE(CLK_PCLK_TZPC8, "pclk_tzpc8", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 8, 0, 0),
+ GATE(CLK_PCLK_TZPC7, "pclk_tzpc7", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 7, 0, 0),
+ GATE(CLK_PCLK_TZPC6, "pclk_tzpc6", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 6, 0, 0),
+ GATE(CLK_PCLK_TZPC5, "pclk_tzpc5", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 5, 0, 0),
+ GATE(CLK_PCLK_TZPC4, "pclk_tzpc4", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 4, 0, 0),
+ GATE(CLK_PCLK_TZPC3, "pclk_tzpc3", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 3, 0, 0),
+ GATE(CLK_PCLK_TZPC2, "pclk_tzpc2", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 2, 0, 0),
+ GATE(CLK_PCLK_TZPC1, "pclk_tzpc1", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 1, 0, 0),
+ GATE(CLK_PCLK_TZPC0, "pclk_tzpc0", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TZPC, 0, 0, 0),
+
+ /* ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF */
+ GATE(CLK_PCLK_SECKEY_APBIF, "pclk_seckey_apbif", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF, 0, 0, 0),
+
+ /* ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF */
+ GATE(CLK_PCLK_CHIPID_APBIF, "pclk_chipid_apbif", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF, 0, 0, 0),
+
+ /* ENABLE_PCLK_PERIS_SECURE_TOPRTC */
+ GATE(CLK_PCLK_TOPRTC, "pclk_toprtc", "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_TOPRTC, 0, 0, 0),
+
+ /* ENABLE_PCLK_PERIS_SECURE_CUSTOM_EFUSE_APBIF */
+ GATE(CLK_PCLK_CUSTOM_EFUSE_APBIF, "pclk_custom_efuse_apbif",
+ "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_CUSTOM_EFUSE_APBIF, 0, 0, 0),
+
+ /* ENABLE_PCLK_PERIS_SECURE_ANTIRBK_CNT_APBIF */
+ GATE(CLK_PCLK_ANTIRBK_CNT_APBIF, "pclk_antirbk_cnt_apbif",
+ "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_ANTIRBK_CNT_APBIF, 0, 0, 0),
+
+ /* ENABLE_PCLK_PERIS_SECURE_OTP_CON_APBIF */
+ GATE(CLK_PCLK_OTP_CON_APBIF, "pclk_otp_con_apbif",
+ "aclk_peris_66",
+ ENABLE_PCLK_PERIS_SECURE_OTP_CON_APBIF, 0, 0, 0),
+
+ /* ENABLE_SCLK_PERIS */
+ GATE(CLK_SCLK_ASV_TB, "sclk_asv_tb", "oscclk_efuse_common",
+ ENABLE_SCLK_PERIS, 10, 0, 0),
+ GATE(CLK_SCLK_TMU1, "sclk_tmu1", "oscclk_efuse_common",
+ ENABLE_SCLK_PERIS, 4, 0, 0),
+ GATE(CLK_SCLK_TMU0, "sclk_tmu0", "oscclk_efuse_common",
+ ENABLE_SCLK_PERIS, 3, 0, 0),
+
+ /* ENABLE_SCLK_PERIS_SECURE_SECKEY */
+ GATE(CLK_SCLK_SECKEY, "sclk_seckey", "oscclk_efuse_common",
+ ENABLE_SCLK_PERIS_SECURE_SECKEY, 0, 0, 0),
+
+ /* ENABLE_SCLK_PERIS_SECURE_CHIPID */
+ GATE(CLK_SCLK_CHIPID, "sclk_chipid", "oscclk_efuse_common",
+ ENABLE_SCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
+
+ /* ENABLE_SCLK_PERIS_SECURE_TOPRTC */
+ GATE(CLK_SCLK_TOPRTC, "sclk_toprtc", "oscclk_efuse_common",
+ ENABLE_SCLK_PERIS_SECURE_TOPRTC, 0, 0, 0),
+
+ /* ENABLE_SCLK_PERIS_SECURE_CUSTOM_EFUSE */
+ GATE(CLK_SCLK_CUSTOM_EFUSE, "sclk_custom_efuse", "oscclk_efuse_common",
+ ENABLE_SCLK_PERIS_SECURE_CUSTOM_EFUSE, 0, 0, 0),
+
+ /* ENABLE_SCLK_PERIS_SECURE_ANTIRBK_CNT */
+ GATE(CLK_SCLK_ANTIRBK_CNT, "sclk_antirbk_cnt", "oscclk_efuse_common",
+ ENABLE_SCLK_PERIS_SECURE_ANTIRBK_CNT, 0, 0, 0),
+
+ /* ENABLE_SCLK_PERIS_SECURE_OTP_CON */
+ GATE(CLK_SCLK_OTP_CON, "sclk_otp_con", "oscclk_efuse_common",
+ ENABLE_SCLK_PERIS_SECURE_OTP_CON, 0, 0, 0),
+};
+
+static struct samsung_cmu_info peris_cmu_info __initdata = {
+ .gate_clks = peris_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
+ .nr_clk_ids = PERIS_NR_CLK,
+ .clk_regs = peris_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
+};
+
+static void __init exynos5433_cmu_peris_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &peris_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos5433_cmu_peris, "samsung,exynos5433-cmu-peris",
+ exynos5433_cmu_peris_init);
+
+/*
+ * Register offset definitions for CMU_FSYS
+ */
+#define MUX_SEL_FSYS0 0x0200
+#define MUX_SEL_FSYS1 0x0204
+#define MUX_SEL_FSYS2 0x0208
+#define MUX_SEL_FSYS3 0x020c
+#define MUX_SEL_FSYS4 0x0210
+#define MUX_ENABLE_FSYS0 0x0300
+#define MUX_ENABLE_FSYS1 0x0304
+#define MUX_ENABLE_FSYS2 0x0308
+#define MUX_ENABLE_FSYS3 0x030c
+#define MUX_ENABLE_FSYS4 0x0310
+#define MUX_STAT_FSYS0 0x0400
+#define MUX_STAT_FSYS1 0x0404
+#define MUX_STAT_FSYS2 0x0408
+#define MUX_STAT_FSYS3 0x040c
+#define MUX_STAT_FSYS4 0x0410
+#define MUX_IGNORE_FSYS2 0x0508
+#define MUX_IGNORE_FSYS3 0x050c
+#define ENABLE_ACLK_FSYS0 0x0800
+#define ENABLE_ACLK_FSYS1 0x0804
+#define ENABLE_PCLK_FSYS 0x0900
+#define ENABLE_SCLK_FSYS 0x0a00
+#define ENABLE_IP_FSYS0 0x0b00
+#define ENABLE_IP_FSYS1 0x0b04
+
+/* list of all parent clock list */
+PNAME(mout_sclk_ufs_mphy_user_p) = { "oscclk", "sclk_ufs_mphy", };
+PNAME(mout_aclk_fsys_200_user_p) = { "oscclk", "div_aclk_fsys_200", };
+PNAME(mout_sclk_pcie_100_user_p) = { "oscclk", "sclk_pcie_100_fsys",};
+PNAME(mout_sclk_ufsunipro_user_p) = { "oscclk", "sclk_ufsunipro_fsys",};
+PNAME(mout_sclk_mmc2_user_p) = { "oscclk", "sclk_mmc2_fsys", };
+PNAME(mout_sclk_mmc1_user_p) = { "oscclk", "sclk_mmc1_fsys", };
+PNAME(mout_sclk_mmc0_user_p) = { "oscclk", "sclk_mmc0_fsys", };
+PNAME(mout_sclk_usbhost30_user_p) = { "oscclk", "sclk_usbhost30_fsys",};
+PNAME(mout_sclk_usbdrd30_user_p) = { "oscclk", "sclk_usbdrd30_fsys", };
+
+PNAME(mout_phyclk_usbhost30_uhost30_pipe_pclk_user_p)
+ = { "oscclk", "phyclk_usbhost30_uhost30_pipe_pclk_phy", };
+PNAME(mout_phyclk_usbhost30_uhost30_phyclock_user_p)
+ = { "oscclk", "phyclk_usbhost30_uhost30_phyclock_phy", };
+PNAME(mout_phyclk_usbhost20_phy_hsic1_p)
+ = { "oscclk", "phyclk_usbhost20_phy_hsic1_phy", };
+PNAME(mout_phyclk_usbhost20_phy_clk48mohci_user_p)
+ = { "oscclk", "phyclk_usbhost20_phy_clk48mohci_phy", };
+PNAME(mout_phyclk_usbhost20_phy_phyclock_user_p)
+ = { "oscclk", "phyclk_usbhost20_phy_phyclock_phy", };
+PNAME(mout_phyclk_usbhost20_phy_freeclk_user_p)
+ = { "oscclk", "phyclk_usbhost20_phy_freeclk_phy", };
+PNAME(mout_phyclk_usbdrd30_udrd30_pipe_pclk_p)
+ = { "oscclk", "phyclk_usbdrd30_udrd30_pipe_pclk_phy", };
+PNAME(mout_phyclk_usbdrd30_udrd30_phyclock_user_p)
+ = { "oscclk", "phyclk_usbdrd30_udrd30_phyclock_phy", };
+PNAME(mout_phyclk_ufs_rx1_symbol_user_p)
+ = { "oscclk", "phyclk_ufs_rx1_symbol_phy", };
+PNAME(mout_phyclk_ufs_rx0_symbol_user_p)
+ = { "oscclk", "phyclk_ufs_rx0_symbol_phy", };
+PNAME(mout_phyclk_ufs_tx1_symbol_user_p)
+ = { "oscclk", "phyclk_ufs_tx1_symbol_phy", };
+PNAME(mout_phyclk_ufs_tx0_symbol_user_p)
+ = { "oscclk", "phyclk_ufs_tx0_symbol_phy", };
+PNAME(mout_phyclk_lli_mphy_to_ufs_user_p)
+ = { "oscclk", "phyclk_lli_mphy_to_ufs_phy", };
+PNAME(mout_sclk_mphy_p)
+ = { "mout_sclk_ufs_mphy_user",
+ "mout_phyclk_lli_mphy_to_ufs_user", };
+
+static unsigned long fsys_clk_regs[] __initdata = {
+ MUX_SEL_FSYS0,
+ MUX_SEL_FSYS1,
+ MUX_SEL_FSYS2,
+ MUX_SEL_FSYS3,
+ MUX_SEL_FSYS4,
+ MUX_ENABLE_FSYS0,
+ MUX_ENABLE_FSYS1,
+ MUX_ENABLE_FSYS2,
+ MUX_ENABLE_FSYS3,
+ MUX_ENABLE_FSYS4,
+ MUX_STAT_FSYS0,
+ MUX_STAT_FSYS1,
+ MUX_STAT_FSYS2,
+ MUX_STAT_FSYS3,
+ MUX_STAT_FSYS4,
+ MUX_IGNORE_FSYS2,
+ MUX_IGNORE_FSYS3,
+ ENABLE_ACLK_FSYS0,
+ ENABLE_ACLK_FSYS1,
+ ENABLE_PCLK_FSYS,
+ ENABLE_SCLK_FSYS,
+ ENABLE_IP_FSYS0,
+ ENABLE_IP_FSYS1,
+};
+
+static struct samsung_fixed_rate_clock fsys_fixed_clks[] __initdata = {
+ /* PHY clocks from USBDRD30_PHY */
+ FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY,
+ "phyclk_usbdrd30_udrd30_phyclock_phy", NULL,
+ CLK_IS_ROOT, 60000000),
+ FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_PHY,
+ "phyclk_usbdrd30_udrd30_pipe_pclk_phy", NULL,
+ CLK_IS_ROOT, 125000000),
+ /* PHY clocks from USBHOST30_PHY */
+ FRATE(CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_PHY,
+ "phyclk_usbhost30_uhost30_phyclock_phy", NULL,
+ CLK_IS_ROOT, 60000000),
+ FRATE(CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_PHY,
+ "phyclk_usbhost30_uhost30_pipe_pclk_phy", NULL,
+ CLK_IS_ROOT, 125000000),
+ /* PHY clocks from USBHOST20_PHY */
+ FRATE(CLK_PHYCLK_USBHOST20_PHY_FREECLK_PHY,
+ "phyclk_usbhost20_phy_freeclk_phy", NULL, CLK_IS_ROOT,
+ 60000000),
+ FRATE(CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK_PHY,
+ "phyclk_usbhost20_phy_phyclock_phy", NULL, CLK_IS_ROOT,
+ 60000000),
+ FRATE(CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI_PHY,
+ "phyclk_usbhost20_phy_clk48mohci_phy", NULL,
+ CLK_IS_ROOT, 48000000),
+ FRATE(CLK_PHYCLK_USBHOST20_PHY_HSIC1_PHY,
+ "phyclk_usbhost20_phy_hsic1_phy", NULL, CLK_IS_ROOT,
+ 60000000),
+ /* PHY clocks from UFS_PHY */
+ FRATE(CLK_PHYCLK_UFS_TX0_SYMBOL_PHY, "phyclk_ufs_tx0_symbol_phy",
+ NULL, CLK_IS_ROOT, 300000000),
+ FRATE(CLK_PHYCLK_UFS_RX0_SYMBOL_PHY, "phyclk_ufs_rx0_symbol_phy",
+ NULL, CLK_IS_ROOT, 300000000),
+ FRATE(CLK_PHYCLK_UFS_TX1_SYMBOL_PHY, "phyclk_ufs_tx1_symbol_phy",
+ NULL, CLK_IS_ROOT, 300000000),
+ FRATE(CLK_PHYCLK_UFS_RX1_SYMBOL_PHY, "phyclk_ufs_rx1_symbol_phy",
+ NULL, CLK_IS_ROOT, 300000000),
+ /* PHY clocks from LLI_PHY */
+ FRATE(CLK_PHYCLK_LLI_MPHY_TO_UFS_PHY, "phyclk_lli_mphy_to_ufs_phy",
+ NULL, CLK_IS_ROOT, 26000000),
+};
+
+static struct samsung_mux_clock fsys_mux_clks[] __initdata = {
+ /* MUX_SEL_FSYS0 */
+ MUX(CLK_MOUT_SCLK_UFS_MPHY_USER, "mout_sclk_ufs_mphy_user",
+ mout_sclk_ufs_mphy_user_p, MUX_SEL_FSYS0, 4, 1),
+ MUX(CLK_MOUT_ACLK_FSYS_200_USER, "mout_aclk_fsys_200_user",
+ mout_aclk_fsys_200_user_p, MUX_SEL_FSYS0, 0, 1),
+
+ /* MUX_SEL_FSYS1 */
+ MUX(CLK_MOUT_SCLK_PCIE_100_USER, "mout_sclk_pcie_100_user",
+ mout_sclk_pcie_100_user_p, MUX_SEL_FSYS1, 28, 1),
+ MUX(CLK_MOUT_SCLK_UFSUNIPRO_USER, "mout_sclk_ufsunipro_user",
+ mout_sclk_ufsunipro_user_p, MUX_SEL_FSYS1, 24, 1),
+ MUX(CLK_MOUT_SCLK_MMC2_USER, "mout_sclk_mmc2_user",
+ mout_sclk_mmc2_user_p, MUX_SEL_FSYS1, 20, 1),
+ MUX(CLK_MOUT_SCLK_MMC1_USER, "mout_sclk_mmc1_user",
+ mout_sclk_mmc1_user_p, MUX_SEL_FSYS1, 16, 1),
+ MUX(CLK_MOUT_SCLK_MMC0_USER, "mout_sclk_mmc0_user",
+ mout_sclk_mmc0_user_p, MUX_SEL_FSYS1, 12, 1),
+ MUX(CLK_MOUT_SCLK_USBHOST30_USER, "mout_sclk_usbhost30_user",
+ mout_sclk_usbhost30_user_p, MUX_SEL_FSYS1, 4, 1),
+ MUX(CLK_MOUT_SCLK_USBDRD30_USER, "mout_sclk_usbdrd30_user",
+ mout_sclk_usbdrd30_user_p, MUX_SEL_FSYS1, 0, 1),
+
+ /* MUX_SEL_FSYS2 */
+ MUX(CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_USER,
+ "mout_phyclk_usbhost30_uhost30_pipe_pclk_user",
+ mout_phyclk_usbhost30_uhost30_pipe_pclk_user_p,
+ MUX_SEL_FSYS2, 28, 1),
+ MUX(CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_USER,
+ "mout_phyclk_usbhost30_uhost30_phyclock_user",
+ mout_phyclk_usbhost30_uhost30_phyclock_user_p,
+ MUX_SEL_FSYS2, 24, 1),
+ MUX(CLK_MOUT_PHYCLK_USBHOST20_PHY_HSIC1_USER,
+ "mout_phyclk_usbhost20_phy_hsic1",
+ mout_phyclk_usbhost20_phy_hsic1_p,
+ MUX_SEL_FSYS2, 20, 1),
+ MUX(CLK_MOUT_PHYCLK_USBHOST20_PHY_CLK48MOHCI_USER,
+ "mout_phyclk_usbhost20_phy_clk48mohci_user",
+ mout_phyclk_usbhost20_phy_clk48mohci_user_p,
+ MUX_SEL_FSYS2, 16, 1),
+ MUX(CLK_MOUT_PHYCLK_USBHOST20_PHY_PHYCLOCK_USER,
+ "mout_phyclk_usbhost20_phy_phyclock_user",
+ mout_phyclk_usbhost20_phy_phyclock_user_p,
+ MUX_SEL_FSYS2, 12, 1),
+ MUX(CLK_MOUT_PHYCLK_USBHOST20_PHY_PHY_FREECLK_USER,
+ "mout_phyclk_usbhost20_phy_freeclk_user",
+ mout_phyclk_usbhost20_phy_freeclk_user_p,
+ MUX_SEL_FSYS2, 8, 1),
+ MUX(CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_USER,
+ "mout_phyclk_usbdrd30_udrd30_pipe_pclk_user",
+ mout_phyclk_usbdrd30_udrd30_pipe_pclk_p,
+ MUX_SEL_FSYS2, 4, 1),
+ MUX(CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_USER,
+ "mout_phyclk_usbdrd30_udrd30_phyclock_user",
+ mout_phyclk_usbdrd30_udrd30_phyclock_user_p,
+ MUX_SEL_FSYS2, 0, 1),
+
+ /* MUX_SEL_FSYS3 */
+ MUX(CLK_MOUT_PHYCLK_UFS_RX1_SYMBOL_USER,
+ "mout_phyclk_ufs_rx1_symbol_user",
+ mout_phyclk_ufs_rx1_symbol_user_p,
+ MUX_SEL_FSYS3, 16, 1),
+ MUX(CLK_MOUT_PHYCLK_UFS_RX0_SYMBOL_USER,
+ "mout_phyclk_ufs_rx0_symbol_user",
+ mout_phyclk_ufs_rx0_symbol_user_p,
+ MUX_SEL_FSYS3, 12, 1),
+ MUX(CLK_MOUT_PHYCLK_UFS_TX1_SYMBOL_USER,
+ "mout_phyclk_ufs_tx1_symbol_user",
+ mout_phyclk_ufs_tx1_symbol_user_p,
+ MUX_SEL_FSYS3, 8, 1),
+ MUX(CLK_MOUT_PHYCLK_UFS_TX0_SYMBOL_USER,
+ "mout_phyclk_ufs_tx0_symbol_user",
+ mout_phyclk_ufs_tx0_symbol_user_p,
+ MUX_SEL_FSYS3, 4, 1),
+ MUX(CLK_MOUT_PHYCLK_LLI_MPHY_TO_UFS_USER,
+ "mout_phyclk_lli_mphy_to_ufs_user",
+ mout_phyclk_lli_mphy_to_ufs_user_p,
+ MUX_SEL_FSYS3, 0, 1),
+
+ /* MUX_SEL_FSYS4 */
+ MUX(CLK_MOUT_SCLK_MPHY, "mout_sclk_mphy", mout_sclk_mphy_p,
+ MUX_SEL_FSYS4, 0, 1),
+};
+
+static struct samsung_gate_clock fsys_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_FSYS0 */
+ GATE(CLK_ACLK_PCIE, "aclk_pcie", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_TSI, "aclk_tsi", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MMC1, "aclk_mmc1", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MMC0, "aclk_mmc0", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_UFS, "aclk_ufs", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_USBHOST20, "aclk_usbhost20", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_USBHOST30, "aclk_usbhost30", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_USBDRD30, "aclk_usbdrd30", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS0, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_ACLK_FSYS1 */
+ GATE(CLK_ACLK_XIU_FSYSPX, "aclk_xiu_fsyspx", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 27, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB_USBLINKH1, "aclk_ahb_usblinkh1",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 26, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_PDMA1, "aclk_smmu_pdma1", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 25, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_PCIE, "aclk_bts_pcie", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 24, 0, 0),
+ GATE(CLK_ACLK_AXIUS_PDMA1, "aclk_axius_pdma1",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 22, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_PDMA0, "aclk_smmu_pdma0", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_UFS, "aclk_bts_ufs", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_USBHOST30, "aclk_bts_usbhost30",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 13, 0, 0),
+ GATE(CLK_ACLK_BTS_USBDRD30, "aclk_bts_usbdrd30",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 12, 0, 0),
+ GATE(CLK_ACLK_AXIUS_PDMA0, "aclk_axius_pdma0",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_USBHS, "aclk_axius_usbhs",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_FSYSSX, "aclk_axius_fsyssx",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_FSYSP, "aclk_ahb2apb_fsysp",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2AXI_USBHS, "aclk_ahb2axi_usbhs",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB_USBLINKH0, "aclk_ahb_usblinkh0",
+ "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
+ 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB_USBHS, "aclk_ahb_usbhs", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB_FSYSH, "aclk_ahb_fsysh", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_FSYSX, "aclk_xiu_fsysx", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_FSYSSX, "aclk_xiu_fsyssx", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_FSYSNP_200, "aclk_fsysnp_200", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_FSYSND_200, "aclk_fsysnd_200", "mout_aclk_fsys_200_user",
+ ENABLE_ACLK_FSYS1, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_FSYS */
+ GATE(CLK_PCLK_PCIE_CTRL, "pclk_pcie_ctrl", "mout_aclk_fsys_200_user",
+ ENABLE_PCLK_FSYS, 17, 0, 0),
+ GATE(CLK_PCLK_SMMU_PDMA1, "pclk_smmu_pdma1", "mout_aclk_fsys_200_user",
+ ENABLE_PCLK_FSYS, 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PCIE_PHY, "pclk_pcie_phy", "mout_aclk_fsys_200_user",
+ ENABLE_PCLK_FSYS, 14, 0, 0),
+ GATE(CLK_PCLK_BTS_PCIE, "pclk_bts_pcie", "mout_aclk_fsys_200_user",
+ ENABLE_PCLK_FSYS, 13, 0, 0),
+ GATE(CLK_PCLK_SMMU_PDMA0, "pclk_smmu_pdma0", "mout_aclk_fsys_200_user",
+ ENABLE_PCLK_FSYS, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_UFS, "pclk_bts_ufs", "mout_aclk_fsys_200_user",
+ ENABLE_PCLK_FSYS, 5, 0, 0),
+ GATE(CLK_PCLK_BTS_USBHOST30, "pclk_bts_usbhost30",
+ "mout_aclk_fsys_200_user", ENABLE_PCLK_FSYS, 4, 0, 0),
+ GATE(CLK_PCLK_BTS_USBDRD30, "pclk_bts_usbdrd30",
+ "mout_aclk_fsys_200_user", ENABLE_PCLK_FSYS, 3, 0, 0),
+ GATE(CLK_PCLK_GPIO_FSYS, "pclk_gpio_fsys", "mout_aclk_fsys_200_user",
+ ENABLE_PCLK_FSYS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PMU_FSYS, "pclk_pmu_fsys", "mout_aclk_fsys_200_user",
+ ENABLE_PCLK_FSYS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_FSYS, "pclk_sysreg_fsys",
+ "mout_aclk_fsys_200_user", ENABLE_PCLK_FSYS,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_FSYS */
+ GATE(CLK_SCLK_PCIE_100, "sclk_pcie_100", "mout_sclk_pcie_100_user",
+ ENABLE_SCLK_FSYS, 21, 0, 0),
+ GATE(CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK,
+ "phyclk_usbhost30_uhost30_pipe_pclk",
+ "mout_phyclk_usbhost30_uhost30_pipe_pclk_user",
+ ENABLE_SCLK_FSYS, 18, 0, 0),
+ GATE(CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK,
+ "phyclk_usbhost30_uhost30_phyclock",
+ "mout_phyclk_usbhost30_uhost30_phyclock_user",
+ ENABLE_SCLK_FSYS, 17, 0, 0),
+ GATE(CLK_PHYCLK_UFS_RX1_SYMBOL, "phyclk_ufs_rx1_symbol",
+ "mout_phyclk_ufs_rx1_symbol_user", ENABLE_SCLK_FSYS,
+ 16, 0, 0),
+ GATE(CLK_PHYCLK_UFS_RX0_SYMBOL, "phyclk_ufs_rx0_symbol",
+ "mout_phyclk_ufs_rx0_symbol_user", ENABLE_SCLK_FSYS,
+ 15, 0, 0),
+ GATE(CLK_PHYCLK_UFS_TX1_SYMBOL, "phyclk_ufs_tx1_symbol",
+ "mout_phyclk_ufs_tx1_symbol_user", ENABLE_SCLK_FSYS,
+ 14, 0, 0),
+ GATE(CLK_PHYCLK_UFS_TX0_SYMBOL, "phyclk_ufs_tx0_symbol",
+ "mout_phyclk_ufs_tx0_symbol_user", ENABLE_SCLK_FSYS,
+ 13, 0, 0),
+ GATE(CLK_PHYCLK_USBHOST20_PHY_HSIC1, "phyclk_usbhost20_phy_hsic1",
+ "mout_phyclk_usbhost20_phy_hsic1", ENABLE_SCLK_FSYS,
+ 12, 0, 0),
+ GATE(CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI,
+ "phyclk_usbhost20_phy_clk48mohci",
+ "mout_phyclk_usbhost20_phy_clk48mohci_user",
+ ENABLE_SCLK_FSYS, 11, 0, 0),
+ GATE(CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK,
+ "phyclk_usbhost20_phy_phyclock",
+ "mout_phyclk_usbhost20_phy_phyclock_user",
+ ENABLE_SCLK_FSYS, 10, 0, 0),
+ GATE(CLK_PHYCLK_USBHOST20_PHY_FREECLK,
+ "phyclk_usbhost20_phy_freeclk",
+ "mout_phyclk_usbhost20_phy_freeclk_user",
+ ENABLE_SCLK_FSYS, 9, 0, 0),
+ GATE(CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK,
+ "phyclk_usbdrd30_udrd30_pipe_pclk",
+ "mout_phyclk_usbdrd30_udrd30_pipe_pclk_user",
+ ENABLE_SCLK_FSYS, 8, 0, 0),
+ GATE(CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK,
+ "phyclk_usbdrd30_udrd30_phyclock",
+ "mout_phyclk_usbdrd30_udrd30_phyclock_user",
+ ENABLE_SCLK_FSYS, 7, 0, 0),
+ GATE(CLK_SCLK_MPHY, "sclk_mphy", "mout_sclk_mphy",
+ ENABLE_SCLK_FSYS, 6, 0, 0),
+ GATE(CLK_SCLK_UFSUNIPRO, "sclk_ufsunipro", "mout_sclk_ufsunipro_user",
+ ENABLE_SCLK_FSYS, 5, 0, 0),
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "mout_sclk_mmc2_user",
+ ENABLE_SCLK_FSYS, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "mout_sclk_mmc1_user",
+ ENABLE_SCLK_FSYS, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "mout_sclk_mmc0_user",
+ ENABLE_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_USBHOST30, "sclk_usbhost30", "mout_sclk_usbhost30_user",
+ ENABLE_SCLK_FSYS, 1, 0, 0),
+ GATE(CLK_SCLK_USBDRD30, "sclk_usbdrd30", "mout_sclk_usbdrd30_user",
+ ENABLE_SCLK_FSYS, 0, 0, 0),
+
+ /* ENABLE_IP_FSYS0 */
+ GATE(CLK_PDMA1, "pdma1", "aclk_pdma1", ENABLE_IP_FSYS0, 15, 0, 0),
+ GATE(CLK_PDMA0, "pdma0", "aclk_pdma0", ENABLE_IP_FSYS0, 0, 0, 0),
+};
+
+static struct samsung_cmu_info fsys_cmu_info __initdata = {
+ .mux_clks = fsys_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys_mux_clks),
+ .gate_clks = fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys_gate_clks),
+ .fixed_clks = fsys_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(fsys_fixed_clks),
+ .nr_clk_ids = FSYS_NR_CLK,
+ .clk_regs = fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys_clk_regs),
+};
+
+static void __init exynos5433_cmu_fsys_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &fsys_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos5433_cmu_fsys, "samsung,exynos5433-cmu-fsys",
+ exynos5433_cmu_fsys_init);
+
+/*
+ * Register offset definitions for CMU_G2D
+ */
+#define MUX_SEL_G2D0 0x0200
+#define MUX_SEL_ENABLE_G2D0 0x0300
+#define MUX_SEL_STAT_G2D0 0x0400
+#define DIV_G2D 0x0600
+#define DIV_STAT_G2D 0x0700
+#define DIV_ENABLE_ACLK_G2D 0x0800
+#define DIV_ENABLE_ACLK_G2D_SECURE_SMMU_G2D 0x0804
+#define DIV_ENABLE_PCLK_G2D 0x0900
+#define DIV_ENABLE_PCLK_G2D_SECURE_SMMU_G2D 0x0904
+#define DIV_ENABLE_IP_G2D0 0x0b00
+#define DIV_ENABLE_IP_G2D1 0x0b04
+#define DIV_ENABLE_IP_G2D_SECURE_SMMU_G2D 0x0b08
+
+static unsigned long g2d_clk_regs[] __initdata = {
+ MUX_SEL_G2D0,
+ MUX_SEL_ENABLE_G2D0,
+ MUX_SEL_STAT_G2D0,
+ DIV_G2D,
+ DIV_STAT_G2D,
+ DIV_ENABLE_ACLK_G2D,
+ DIV_ENABLE_ACLK_G2D_SECURE_SMMU_G2D,
+ DIV_ENABLE_PCLK_G2D,
+ DIV_ENABLE_PCLK_G2D_SECURE_SMMU_G2D,
+ DIV_ENABLE_IP_G2D0,
+ DIV_ENABLE_IP_G2D1,
+ DIV_ENABLE_IP_G2D_SECURE_SMMU_G2D,
+};
+
+/* list of all parent clock list */
+PNAME(mout_aclk_g2d_266_user_p) = { "oscclk", "aclk_g2d_266", };
+PNAME(mout_aclk_g2d_400_user_p) = { "oscclk", "aclk_g2d_400", };
+
+static struct samsung_mux_clock g2d_mux_clks[] __initdata = {
+ /* MUX_SEL_G2D0 */
+ MUX(CLK_MUX_ACLK_G2D_266_USER, "mout_aclk_g2d_266_user",
+ mout_aclk_g2d_266_user_p, MUX_SEL_G2D0, 4, 1),
+ MUX(CLK_MUX_ACLK_G2D_400_USER, "mout_aclk_g2d_400_user",
+ mout_aclk_g2d_400_user_p, MUX_SEL_G2D0, 0, 1),
+};
+
+static struct samsung_div_clock g2d_div_clks[] __initdata = {
+ /* DIV_G2D */
+ DIV(CLK_DIV_PCLK_G2D, "div_pclk_g2d", "mout_aclk_g2d_266_user",
+ DIV_G2D, 0, 2),
+};
+
+static struct samsung_gate_clock g2d_gate_clks[] __initdata = {
+ /* DIV_ENABLE_ACLK_G2D */
+ GATE(CLK_ACLK_SMMU_MDMA1, "aclk_smmu_mdma1", "mout_aclk_g2d_266_user",
+ DIV_ENABLE_ACLK_G2D, 12, 0, 0),
+ GATE(CLK_ACLK_BTS_MDMA1, "aclk_bts_mdam1", "mout_aclk_g2d_266_user",
+ DIV_ENABLE_ACLK_G2D, 11, 0, 0),
+ GATE(CLK_ACLK_BTS_G2D, "aclk_bts_g2d", "mout_aclk_g2d_400_user",
+ DIV_ENABLE_ACLK_G2D, 10, 0, 0),
+ GATE(CLK_ACLK_ALB_G2D, "aclk_alb_g2d", "mout_aclk_g2d_400_user",
+ DIV_ENABLE_ACLK_G2D, 9, 0, 0),
+ GATE(CLK_ACLK_AXIUS_G2DX, "aclk_axius_g2dx", "mout_aclk_g2d_400_user",
+ DIV_ENABLE_ACLK_G2D, 8, 0, 0),
+ GATE(CLK_ACLK_ASYNCAXI_SYSX, "aclk_asyncaxi_sysx",
+ "mout_aclk_g2d_400_user", DIV_ENABLE_ACLK_G2D,
+ 7, 0, 0),
+ GATE(CLK_ACLK_AHB2APB_G2D1P, "aclk_ahb2apb_g2d1p", "div_pclk_g2d",
+ DIV_ENABLE_ACLK_G2D, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_G2D0P, "aclk_ahb2apb_g2d0p", "div_pclk_g2d",
+ DIV_ENABLE_ACLK_G2D, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_G2DX, "aclk_xiu_g2dx", "mout_aclk_g2d_400_user",
+ DIV_ENABLE_ACLK_G2D, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_G2DNP_133, "aclk_g2dnp_133", "div_pclk_g2d",
+ DIV_ENABLE_ACLK_G2D, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_G2DND_400, "aclk_g2dnd_400", "mout_aclk_g2d_400_user",
+ DIV_ENABLE_ACLK_G2D, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MDMA1, "aclk_mdma1", "mout_aclk_g2d_266_user",
+ DIV_ENABLE_ACLK_G2D, 1, 0, 0),
+ GATE(CLK_ACLK_G2D, "aclk_g2d", "mout_aclk_g2d_400_user",
+ DIV_ENABLE_ACLK_G2D, 0, 0, 0),
+
+ /* DIV_ENABLE_ACLK_G2D_SECURE_SMMU_G2D */
+ GATE(CLK_ACLK_SMMU_G2D, "aclk_smmu_g2d", "mout_aclk_g2d_400_user",
+ DIV_ENABLE_ACLK_G2D_SECURE_SMMU_G2D, 0, 0, 0),
+
+ /* DIV_ENABLE_PCLK_G2D */
+ GATE(CLK_PCLK_SMMU_MDMA1, "pclk_smmu_mdma1", "div_pclk_g2d",
+ DIV_ENABLE_PCLK_G2D, 7, 0, 0),
+ GATE(CLK_PCLK_BTS_MDMA1, "pclk_bts_mdam1", "div_pclk_g2d",
+ DIV_ENABLE_PCLK_G2D, 6, 0, 0),
+ GATE(CLK_PCLK_BTS_G2D, "pclk_bts_g2d", "div_pclk_g2d",
+ DIV_ENABLE_PCLK_G2D, 5, 0, 0),
+ GATE(CLK_PCLK_ALB_G2D, "pclk_alb_g2d", "div_pclk_g2d",
+ DIV_ENABLE_PCLK_G2D, 4, 0, 0),
+ GATE(CLK_PCLK_ASYNCAXI_SYSX, "pclk_asyncaxi_sysx", "div_pclk_g2d",
+ DIV_ENABLE_PCLK_G2D, 3, 0, 0),
+ GATE(CLK_PCLK_PMU_G2D, "pclk_pmu_g2d", "div_pclk_g2d",
+ DIV_ENABLE_PCLK_G2D, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_G2D, "pclk_sysreg_g2d", "div_pclk_g2d",
+ DIV_ENABLE_PCLK_G2D, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_G2D, "pclk_g2d", "div_pclk_g2d", DIV_ENABLE_PCLK_G2D,
+ 0, 0, 0),
+
+ /* DIV_ENABLE_PCLK_G2D_SECURE_SMMU_G2D */
+ GATE(CLK_PCLK_SMMU_G2D, "pclk_smmu_g2d", "div_pclk_g2d",
+ DIV_ENABLE_PCLK_G2D_SECURE_SMMU_G2D, 0, 0, 0),
+};
+
+static struct samsung_cmu_info g2d_cmu_info __initdata = {
+ .mux_clks = g2d_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(g2d_mux_clks),
+ .div_clks = g2d_div_clks,
+ .nr_div_clks = ARRAY_SIZE(g2d_div_clks),
+ .gate_clks = g2d_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(g2d_gate_clks),
+ .nr_clk_ids = G2D_NR_CLK,
+ .clk_regs = g2d_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(g2d_clk_regs),
+};
+
+static void __init exynos5433_cmu_g2d_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &g2d_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos5433_cmu_g2d, "samsung,exynos5433-cmu-g2d",
+ exynos5433_cmu_g2d_init);
+
+/*
+ * Register offset definitions for CMU_DISP
+ */
+#define DISP_PLL_LOCK 0x0000
+#define DISP_PLL_CON0 0x0100
+#define DISP_PLL_CON1 0x0104
+#define DISP_PLL_FREQ_DET 0x0108
+#define MUX_SEL_DISP0 0x0200
+#define MUX_SEL_DISP1 0x0204
+#define MUX_SEL_DISP2 0x0208
+#define MUX_SEL_DISP3 0x020c
+#define MUX_SEL_DISP4 0x0210
+#define MUX_ENABLE_DISP0 0x0300
+#define MUX_ENABLE_DISP1 0x0304
+#define MUX_ENABLE_DISP2 0x0308
+#define MUX_ENABLE_DISP3 0x030c
+#define MUX_ENABLE_DISP4 0x0310
+#define MUX_STAT_DISP0 0x0400
+#define MUX_STAT_DISP1 0x0404
+#define MUX_STAT_DISP2 0x0408
+#define MUX_STAT_DISP3 0x040c
+#define MUX_STAT_DISP4 0x0410
+#define MUX_IGNORE_DISP2 0x0508
+#define DIV_DISP 0x0600
+#define DIV_DISP_PLL_FREQ_DET 0x0604
+#define DIV_STAT_DISP 0x0700
+#define DIV_STAT_DISP_PLL_FREQ_DET 0x0704
+#define ENABLE_ACLK_DISP0 0x0800
+#define ENABLE_ACLK_DISP1 0x0804
+#define ENABLE_PCLK_DISP 0x0900
+#define ENABLE_SCLK_DISP 0x0a00
+#define ENABLE_IP_DISP0 0x0b00
+#define ENABLE_IP_DISP1 0x0b04
+#define CLKOUT_CMU_DISP 0x0c00
+#define CLKOUT_CMU_DISP_DIV_STAT 0x0c04
+
+static unsigned long disp_clk_regs[] __initdata = {
+ DISP_PLL_LOCK,
+ DISP_PLL_CON0,
+ DISP_PLL_CON1,
+ DISP_PLL_FREQ_DET,
+ MUX_SEL_DISP0,
+ MUX_SEL_DISP1,
+ MUX_SEL_DISP2,
+ MUX_SEL_DISP3,
+ MUX_SEL_DISP4,
+ MUX_ENABLE_DISP0,
+ MUX_ENABLE_DISP1,
+ MUX_ENABLE_DISP2,
+ MUX_ENABLE_DISP3,
+ MUX_ENABLE_DISP4,
+ MUX_STAT_DISP0,
+ MUX_STAT_DISP1,
+ MUX_STAT_DISP2,
+ MUX_STAT_DISP3,
+ MUX_STAT_DISP4,
+ MUX_IGNORE_DISP2,
+ DIV_DISP,
+ DIV_DISP_PLL_FREQ_DET,
+ DIV_STAT_DISP,
+ DIV_STAT_DISP_PLL_FREQ_DET,
+ ENABLE_ACLK_DISP0,
+ ENABLE_ACLK_DISP1,
+ ENABLE_PCLK_DISP,
+ ENABLE_SCLK_DISP,
+ ENABLE_IP_DISP0,
+ ENABLE_IP_DISP1,
+ CLKOUT_CMU_DISP,
+ CLKOUT_CMU_DISP_DIV_STAT,
+};
+
+/* list of all parent clock list */
+PNAME(mout_disp_pll_p) = { "oscclk", "fout_disp_pll", };
+PNAME(mout_sclk_dsim1_user_p) = { "oscclk", "sclk_dsim1_disp", };
+PNAME(mout_sclk_dsim0_user_p) = { "oscclk", "sclk_dsim0_disp", };
+PNAME(mout_sclk_dsd_user_p) = { "oscclk", "sclk_dsd_disp", };
+PNAME(mout_sclk_decon_tv_eclk_user_p) = { "oscclk",
+ "sclk_decon_tv_eclk_disp", };
+PNAME(mout_sclk_decon_vclk_user_p) = { "oscclk",
+ "sclk_decon_vclk_disp", };
+PNAME(mout_sclk_decon_eclk_user_p) = { "oscclk",
+ "sclk_decon_eclk_disp", };
+PNAME(mout_sclk_decon_tv_vlkc_user_p) = { "oscclk",
+ "sclk_decon_tv_vclk_disp", };
+PNAME(mout_aclk_disp_333_user_p) = { "oscclk", "aclk_disp_333", };
+
+PNAME(mout_phyclk_mipidphy1_bitclkdiv8_user_p) = { "oscclk",
+ "phyclk_mipidphy1_bitclkdiv8_phy", };
+PNAME(mout_phyclk_mipidphy1_rxclkesc0_user_p) = { "oscclk",
+ "phyclk_mipidphy1_rxclkesc0_phy", };
+PNAME(mout_phyclk_mipidphy0_bitclkdiv8_user_p) = { "oscclk",
+ "phyclk_mipidphy0_bitclkdiv8_phy", };
+PNAME(mout_phyclk_mipidphy0_rxclkesc0_user_p) = { "oscclk",
+ "phyclk_mipidphy0_rxclkesc0_phy", };
+PNAME(mout_phyclk_hdmiphy_tmds_clko_user_p) = { "oscclk",
+ "phyclk_hdmiphy_tmds_clko_phy", };
+PNAME(mout_phyclk_hdmiphy_pixel_clko_user_p) = { "oscclk",
+ "phyclk_hdmiphy_pixel_clko_phy", };
+
+PNAME(mout_sclk_dsim0_p) = { "mout_disp_pll",
+ "mout_sclk_dsim0_user", };
+PNAME(mout_sclk_decon_tv_eclk_p) = { "mout_disp_pll",
+ "mout_sclk_decon_tv_eclk_user", };
+PNAME(mout_sclk_decon_vclk_p) = { "mout_disp_pll",
+ "mout_sclk_decon_vclk_user", };
+PNAME(mout_sclk_decon_eclk_p) = { "mout_disp_pll",
+ "mout_sclk_decon_eclk_user", };
+
+PNAME(mout_sclk_dsim1_b_disp_p) = { "mout_sclk_dsim1_a_disp",
+ "mout_sclk_dsim1_user", };
+PNAME(mout_sclk_decon_tv_vclk_c_disp_p) = {
+ "mout_phyclk_hdmiphy_pixel_clko_user",
+ "mout_sclk_decon_tv_vclk_b_disp", };
+PNAME(mout_sclk_decon_tv_vclk_b_disp_p) = { "mout_sclk_decon_tv_vclk_a_disp",
+ "mout_sclk_decon_tv_vclk_user", };
+
+static struct samsung_pll_clock disp_pll_clks[] __initdata = {
+ PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll", "oscclk",
+ DISP_PLL_LOCK, DISP_PLL_CON0, exynos5443_pll_rates),
+};
+
+static struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initdata = {
+ /*
+ * sclk_rgb_{vclk|tv_vclk} is half clock of sclk_decon_{vclk|tv_vclk}.
+ * The divider has fixed value (2) between sclk_rgb_{vclk|tv_vclk}
+ * and sclk_decon_{vclk|tv_vclk}.
+ */
+ FFACTOR(CLK_SCLK_RGB_VCLK, "sclk_rgb_vclk", "sclk_decon_vclk",
+ 1, 2, 0),
+ FFACTOR(CLK_SCLK_RGB_TV_VCLK, "sclk_rgb_tv_vclk", "sclk_decon_tv_vclk",
+ 1, 2, 0),
+};
+
+static struct samsung_fixed_rate_clock disp_fixed_clks[] __initdata = {
+ /* PHY clocks from MIPI_DPHY1 */
+ FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, CLK_IS_ROOT,
+ 188000000),
+ FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, CLK_IS_ROOT,
+ 100000000),
+ /* PHY clocks from MIPI_DPHY0 */
+ FRATE(0, "phyclk_mipidphy0_bitclkdiv8_phy", NULL, CLK_IS_ROOT,
+ 188000000),
+ FRATE(0, "phyclk_mipidphy0_rxclkesc0_phy", NULL, CLK_IS_ROOT,
+ 100000000),
+ /* PHY clocks from HDMI_PHY */
+ FRATE(0, "phyclk_hdmiphy_tmds_clko_phy", NULL, CLK_IS_ROOT, 300000000),
+ FRATE(0, "phyclk_hdmiphy_pixel_clko_phy", NULL, CLK_IS_ROOT, 166000000),
+};
+
+static struct samsung_mux_clock disp_mux_clks[] __initdata = {
+ /* MUX_SEL_DISP0 */
+ MUX(CLK_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p, MUX_SEL_DISP0,
+ 0, 1),
+
+ /* MUX_SEL_DISP1 */
+ MUX(CLK_MOUT_SCLK_DSIM1_USER, "mout_sclk_dsim1_user",
+ mout_sclk_dsim1_user_p, MUX_SEL_DISP1, 28, 1),
+ MUX(CLK_MOUT_SCLK_DSIM0_USER, "mout_sclk_dsim0_user",
+ mout_sclk_dsim0_user_p, MUX_SEL_DISP1, 24, 1),
+ MUX(CLK_MOUT_SCLK_DSD_USER, "mout_sclk_dsd_user", mout_sclk_dsd_user_p,
+ MUX_SEL_DISP1, 20, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_ECLK_USER, "mout_sclk_decon_tv_eclk_user",
+ mout_sclk_decon_tv_eclk_user_p, MUX_SEL_DISP1, 16, 1),
+ MUX(CLK_MOUT_SCLK_DECON_VCLK_USER, "mout_sclk_decon_vclk_user",
+ mout_sclk_decon_vclk_user_p, MUX_SEL_DISP1, 12, 1),
+ MUX(CLK_MOUT_SCLK_DECON_ECLK_USER, "mout_sclk_decon_eclk_user",
+ mout_sclk_decon_eclk_user_p, MUX_SEL_DISP1, 8, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_VCLK_USER, "mout_sclk_decon_tv_vclk_user",
+ mout_sclk_decon_tv_vlkc_user_p, MUX_SEL_DISP1, 4, 1),
+ MUX(CLK_MOUT_ACLK_DISP_333_USER, "mout_aclk_disp_333_user",
+ mout_aclk_disp_333_user_p, MUX_SEL_DISP1, 0, 1),
+
+ /* MUX_SEL_DISP2 */
+ MUX(CLK_MOUT_PHYCLK_MIPIDPHY1_BITCLKDIV8_USER,
+ "mout_phyclk_mipidphy1_bitclkdiv8_user",
+ mout_phyclk_mipidphy1_bitclkdiv8_user_p, MUX_SEL_DISP2,
+ 20, 1),
+ MUX(CLK_MOUT_PHYCLK_MIPIDPHY1_RXCLKESC0_USER,
+ "mout_phyclk_mipidphy1_rxclkesc0_user",
+ mout_phyclk_mipidphy1_rxclkesc0_user_p, MUX_SEL_DISP2,
+ 16, 1),
+ MUX(CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER,
+ "mout_phyclk_mipidphy0_bitclkdiv8_user",
+ mout_phyclk_mipidphy0_bitclkdiv8_user_p, MUX_SEL_DISP2,
+ 12, 1),
+ MUX(CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER,
+ "mout_phyclk_mipidphy0_rxclkesc0_user",
+ mout_phyclk_mipidphy0_rxclkesc0_user_p, MUX_SEL_DISP2,
+ 8, 1),
+ MUX(CLK_MOUT_PHYCLK_HDMIPHY_TMDS_CLKO_USER,
+ "mout_phyclk_hdmiphy_tmds_clko_user",
+ mout_phyclk_hdmiphy_tmds_clko_user_p, MUX_SEL_DISP2,
+ 4, 1),
+ MUX(CLK_MOUT_PHYCLK_HDMIPHY_PIXEL_CLKO_USER,
+ "mout_phyclk_hdmiphy_pixel_clko_user",
+ mout_phyclk_hdmiphy_pixel_clko_user_p, MUX_SEL_DISP2,
+ 0, 1),
+
+ /* MUX_SEL_DISP3 */
+ MUX(CLK_MOUT_SCLK_DSIM0, "mout_sclk_dsim0", mout_sclk_dsim0_p,
+ MUX_SEL_DISP3, 12, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_ECLK, "mout_sclk_decon_tv_eclk",
+ mout_sclk_decon_tv_eclk_p, MUX_SEL_DISP3, 8, 1),
+ MUX(CLK_MOUT_SCLK_DECON_VCLK, "mout_sclk_decon_vclk",
+ mout_sclk_decon_vclk_p, MUX_SEL_DISP3, 4, 1),
+ MUX(CLK_MOUT_SCLK_DECON_ECLK, "mout_sclk_decon_eclk",
+ mout_sclk_decon_eclk_p, MUX_SEL_DISP3, 0, 1),
+
+ /* MUX_SEL_DISP4 */
+ MUX(CLK_MOUT_SCLK_DSIM1_B_DISP, "mout_sclk_dsim1_b_disp",
+ mout_sclk_dsim1_b_disp_p, MUX_SEL_DISP4, 16, 1),
+ MUX(CLK_MOUT_SCLK_DSIM1_A_DISP, "mout_sclk_dsim1_a_disp",
+ mout_sclk_dsim0_p, MUX_SEL_DISP4, 12, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_VCLK_C_DISP,
+ "mout_sclk_decon_tv_vclk_c_disp",
+ mout_sclk_decon_tv_vclk_c_disp_p, MUX_SEL_DISP4, 8, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_VCLK_B_DISP,
+ "mout_sclk_decon_tv_vclk_b_disp",
+ mout_sclk_decon_tv_vclk_b_disp_p, MUX_SEL_DISP4, 4, 1),
+ MUX(CLK_MOUT_SCLK_DECON_TV_VCLK_A_DISP,
+ "mout_sclk_decon_tv_vclk_a_disp",
+ mout_sclk_decon_vclk_p, MUX_SEL_DISP4, 0, 1),
+};
+
+static struct samsung_div_clock disp_div_clks[] __initdata = {
+ /* DIV_DISP */
+ DIV(CLK_DIV_SCLK_DSIM1_DISP, "div_sclk_dsim1_disp",
+ "mout_sclk_dsim1_b_disp", DIV_DISP, 24, 3),
+ DIV(CLK_DIV_SCLK_DECON_TV_VCLK_DISP, "div_sclk_decon_tv_vclk_disp",
+ "mout_sclk_decon_tv_vclk_c_disp", DIV_DISP, 20, 3),
+ DIV(CLK_DIV_SCLK_DSIM0_DISP, "div_sclk_dsim0_disp", "mout_sclk_dsim0",
+ DIV_DISP, 16, 3),
+ DIV(CLK_DIV_SCLK_DECON_TV_ECLK_DISP, "div_sclk_decon_tv_eclk_disp",
+ "mout_sclk_decon_tv_eclk", DIV_DISP, 12, 3),
+ DIV(CLK_DIV_SCLK_DECON_VCLK_DISP, "div_sclk_decon_vclk_disp",
+ "mout_sclk_decon_vclk", DIV_DISP, 8, 3),
+ DIV(CLK_DIV_SCLK_DECON_ECLK_DISP, "div_sclk_decon_eclk_disp",
+ "mout_sclk_decon_eclk", DIV_DISP, 4, 3),
+ DIV(CLK_DIV_PCLK_DISP, "div_pclk_disp", "mout_aclk_disp_333_user",
+ DIV_DISP, 0, 2),
+};
+
+static struct samsung_gate_clock disp_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_DISP0 */
+ GATE(CLK_ACLK_DECON_TV, "aclk_decon_tv", "mout_aclk_disp_333_user",
+ ENABLE_ACLK_DISP0, 2, 0, 0),
+ GATE(CLK_ACLK_DECON, "aclk_decon", "mout_aclk_disp_333_user",
+ ENABLE_ACLK_DISP0, 0, 0, 0),
+
+ /* ENABLE_ACLK_DISP1 */
+ GATE(CLK_ACLK_SMMU_TV1X, "aclk_smmu_tv1x", "mout_aclk_disp_333_user",
+ ENABLE_ACLK_DISP1, 25, 0, 0),
+ GATE(CLK_ACLK_SMMU_TV0X, "aclk_smmu_tv0x", "mout_aclk_disp_333_user",
+ ENABLE_ACLK_DISP1, 24, 0, 0),
+ GATE(CLK_ACLK_SMMU_DECON1X, "aclk_smmu_decon1x",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 23, 0, 0),
+ GATE(CLK_ACLK_SMMU_DECON0X, "aclk_smmu_decon0x",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 22, 0, 0),
+ GATE(CLK_ACLK_BTS_DECON_TV_M3, "aclk_bts_decon_tv_m3",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 21, 0, 0),
+ GATE(CLK_ACLK_BTS_DECON_TV_M2, "aclk_bts_decon_tv_m2",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 20, 0, 0),
+ GATE(CLK_ACLK_BTS_DECON_TV_M1, "aclk_bts_decon_tv_m1",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 19, 0, 0),
+ GATE(CLK_ACLK_BTS_DECON_TV_M0, "aclk-bts_decon_tv_m0",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 18, 0, 0),
+ GATE(CLK_ACLK_BTS_DECON_NM4, "aclk_bts_decon_nm4",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 17, 0, 0),
+ GATE(CLK_ACLK_BTS_DECON_NM3, "aclk_bts_decon_nm3",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 16, 0, 0),
+ GATE(CLK_ACLK_BTS_DECON_NM2, "aclk_bts_decon_nm2",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 15, 0, 0),
+ GATE(CLK_ACLK_BTS_DECON_NM1, "aclk_bts_decon_nm1",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 14, 0, 0),
+ GATE(CLK_ACLK_BTS_DECON_NM0, "aclk_bts_decon_nm0",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 13, 0, 0),
+ GATE(CLK_ACLK_AHB2APB_DISPSFR2P, "aclk_ahb2apb_dispsfr2p",
+ "div_pclk_disp", ENABLE_ACLK_DISP1,
+ 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_DISPSFR1P, "aclk_ahb2apb_dispsfr1p",
+ "div_pclk_disp", ENABLE_ACLK_DISP1,
+ 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_DISPSFR0P, "aclk_ahb2apb_dispsfr0p",
+ "div_pclk_disp", ENABLE_ACLK_DISP1,
+ 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB_DISPH, "aclk_ahb_disph", "div_pclk_disp",
+ ENABLE_ACLK_DISP1, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_TV1X, "aclk_xiu_tv1x", "mout_aclk_disp_333_user",
+ ENABLE_ACLK_DISP1, 7, 0, 0),
+ GATE(CLK_ACLK_XIU_TV0X, "aclk_xiu_tv0x", "mout_aclk_disp_333_user",
+ ENABLE_ACLK_DISP1, 6, 0, 0),
+ GATE(CLK_ACLK_XIU_DECON1X, "aclk_xiu_decon1x",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 5, 0, 0),
+ GATE(CLK_ACLK_XIU_DECON0X, "aclk_xiu_decon0x",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 4, 0, 0),
+ GATE(CLK_ACLK_XIU_DISP1X, "aclk_xiu_disp1x", "mout_aclk_disp_333_user",
+ ENABLE_ACLK_DISP1, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_DISPNP_100, "aclk_xiu_dispnp_100", "div_pclk_disp",
+ ENABLE_ACLK_DISP1, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DISP1ND_333, "aclk_disp1nd_333",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1, 1,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_DISP0ND_333, "aclk_disp0nd_333",
+ "mout_aclk_disp_333_user", ENABLE_ACLK_DISP1,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_DISP */
+ GATE(CLK_PCLK_SMMU_TV1X, "pclk_smmu_tv1x", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 23, 0, 0),
+ GATE(CLK_PCLK_SMMU_TV0X, "pclk_smmu_tv0x", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 22, 0, 0),
+ GATE(CLK_PCLK_SMMU_DECON1X, "pclk_smmu_decon1x", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 21, 0, 0),
+ GATE(CLK_PCLK_SMMU_DECON0X, "pclk_smmu_decon0x", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 20, 0, 0),
+ GATE(CLK_PCLK_BTS_DECON_TV_M3, "pclk_bts_decon_tv_m3", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 19, 0, 0),
+ GATE(CLK_PCLK_BTS_DECON_TV_M2, "pclk_bts_decon_tv_m2", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 18, 0, 0),
+ GATE(CLK_PCLK_BTS_DECON_TV_M1, "pclk_bts_decon_tv_m1", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 17, 0, 0),
+ GATE(CLK_PCLK_BTS_DECON_TV_M0, "pclk_bts_decon_tv_m0", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 16, 0, 0),
+ GATE(CLK_PCLK_BTS_DECONM4, "pclk_bts_deconm4", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 15, 0, 0),
+ GATE(CLK_PCLK_BTS_DECONM3, "pclk_bts_deconm3", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 14, 0, 0),
+ GATE(CLK_PCLK_BTS_DECONM2, "pclk_bts_deconm2", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 13, 0, 0),
+ GATE(CLK_PCLK_BTS_DECONM1, "pclk_bts_deconm1", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 12, 0, 0),
+ GATE(CLK_PCLK_BTS_DECONM0, "pclk_bts_deconm0", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 11, 0, 0),
+ GATE(CLK_PCLK_MIC1, "pclk_mic1", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 10, 0, 0),
+ GATE(CLK_PCLK_PMU_DISP, "pclk_pmu_disp", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_DISP, "pclk_sysreg_disp", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_HDMIPHY, "pclk_hdmiphy", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 7, 0, 0),
+ GATE(CLK_PCLK_HDMI, "pclk_hdmi", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 6, 0, 0),
+ GATE(CLK_PCLK_MIC0, "pclk_mic0", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 5, 0, 0),
+ GATE(CLK_PCLK_DSIM1, "pclk_dsim1", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 3, 0, 0),
+ GATE(CLK_PCLK_DSIM0, "pclk_dsim0", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 2, 0, 0),
+ GATE(CLK_PCLK_DECON_TV, "pclk_decon_tv", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 1, 0, 0),
+
+ /* ENABLE_SCLK_DISP */
+ GATE(CLK_PHYCLK_MIPIDPHY1_BITCLKDIV8, "phyclk_mipidphy1_bitclkdiv8",
+ "mout_phyclk_mipidphy1_bitclkdiv8_user",
+ ENABLE_SCLK_DISP, 26, 0, 0),
+ GATE(CLK_PHYCLK_MIPIDPHY1_RXCLKESC0, "phyclk_mipidphy1_rxclkesc0",
+ "mout_phyclk_mipidphy1_rxclkesc0_user",
+ ENABLE_SCLK_DISP, 25, 0, 0),
+ GATE(CLK_SCLK_RGB_TV_VCLK_TO_DSIM1, "sclk_rgb_tv_vclk_to_dsim1",
+ "sclk_rgb_tv_vclk", ENABLE_SCLK_DISP, 24, 0, 0),
+ GATE(CLK_SCLK_RGB_TV_VCLK_TO_MIC1, "sclk_rgb_tv_vclk_to_mic1",
+ "sclk_rgb_tv_vclk", ENABLE_SCLK_DISP, 23, 0, 0),
+ GATE(CLK_SCLK_DSIM1, "sclk_dsim1", "div_sclk_dsim1_disp",
+ ENABLE_SCLK_DISP, 22, 0, 0),
+ GATE(CLK_SCLK_DECON_TV_VCLK, "sclk_decon_tv_vclk",
+ "div_sclk_decon_tv_vclk_disp",
+ ENABLE_SCLK_DISP, 21, 0, 0),
+ GATE(CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8, "phyclk_mipidphy0_bitclkdiv8",
+ "mout_phyclk_mipidphy0_bitclkdiv8_user",
+ ENABLE_SCLK_DISP, 15, 0, 0),
+ GATE(CLK_PHYCLK_MIPIDPHY0_RXCLKESC0, "phyclk_mipidphy0_rxclkesc0",
+ "mout_phyclk_mipidphy0_rxclkesc0_user",
+ ENABLE_SCLK_DISP, 14, 0, 0),
+ GATE(CLK_PHYCLK_HDMIPHY_TMDS_CLKO, "phyclk_hdmiphy_tmds_clko",
+ "mout_phyclk_hdmiphy_tmds_clko_user",
+ ENABLE_SCLK_DISP, 13, 0, 0),
+ GATE(CLK_PHYCLK_HDMI_PIXEL, "phyclk_hdmi_pixel",
+ "sclk_rgb_tv_vclk", ENABLE_SCLK_DISP, 12, 0, 0),
+ GATE(CLK_SCLK_RGB_VCLK_TO_SMIES, "sclk_rgb_vclk_to_smies",
+ "sclk_rgb_vclk", ENABLE_SCLK_DISP, 11, 0, 0),
+ GATE(CLK_SCLK_RGB_VCLK_TO_DSIM0, "sclk_rgb_vclk_to_dsim0",
+ "sclk_rgb_vclk", ENABLE_SCLK_DISP, 9, 0, 0),
+ GATE(CLK_SCLK_RGB_VCLK_TO_MIC0, "sclk_rgb_vclk_to_mic0",
+ "sclk_rgb_vclk", ENABLE_SCLK_DISP, 8, 0, 0),
+ GATE(CLK_SCLK_DSD, "sclk_dsd", "mout_sclk_dsd_user",
+ ENABLE_SCLK_DISP, 7, 0, 0),
+ GATE(CLK_SCLK_HDMI_SPDIF, "sclk_hdmi_spdif", "sclk_hdmi_spdif_disp",
+ ENABLE_SCLK_DISP, 6, 0, 0),
+ GATE(CLK_SCLK_DSIM0, "sclk_dsim0", "div_sclk_dsim0_disp",
+ ENABLE_SCLK_DISP, 5, 0, 0),
+ GATE(CLK_SCLK_DECON_TV_ECLK, "sclk_decon_tv_eclk",
+ "div_sclk_decon_tv_eclk_disp",
+ ENABLE_SCLK_DISP, 4, 0, 0),
+ GATE(CLK_SCLK_DECON_VCLK, "sclk_decon_vclk",
+ "div_sclk_decon_vclk_disp", ENABLE_SCLK_DISP, 3, 0, 0),
+ GATE(CLK_SCLK_DECON_ECLK, "sclk_decon_eclk",
+ "div_sclk_decon_eclk_disp", ENABLE_SCLK_DISP, 2, 0, 0),
+};
+
+static struct samsung_cmu_info disp_cmu_info __initdata = {
+ .pll_clks = disp_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(disp_pll_clks),
+ .mux_clks = disp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(disp_mux_clks),
+ .div_clks = disp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(disp_div_clks),
+ .gate_clks = disp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(disp_gate_clks),
+ .fixed_clks = disp_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(disp_fixed_clks),
+ .fixed_factor_clks = disp_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(disp_fixed_factor_clks),
+ .nr_clk_ids = DISP_NR_CLK,
+ .clk_regs = disp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(disp_clk_regs),
+};
+
+static void __init exynos5433_cmu_disp_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &disp_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos5433_cmu_disp, "samsung,exynos5433-cmu-disp",
+ exynos5433_cmu_disp_init);
+
+/*
+ * Register offset definitions for CMU_AUD
+ */
+#define MUX_SEL_AUD0 0x0200
+#define MUX_SEL_AUD1 0x0204
+#define MUX_ENABLE_AUD0 0x0300
+#define MUX_ENABLE_AUD1 0x0304
+#define MUX_STAT_AUD0 0x0400
+#define DIV_AUD0 0x0600
+#define DIV_AUD1 0x0604
+#define DIV_STAT_AUD0 0x0700
+#define DIV_STAT_AUD1 0x0704
+#define ENABLE_ACLK_AUD 0x0800
+#define ENABLE_PCLK_AUD 0x0900
+#define ENABLE_SCLK_AUD0 0x0a00
+#define ENABLE_SCLK_AUD1 0x0a04
+#define ENABLE_IP_AUD0 0x0b00
+#define ENABLE_IP_AUD1 0x0b04
+
+static unsigned long aud_clk_regs[] __initdata = {
+ MUX_SEL_AUD0,
+ MUX_SEL_AUD1,
+ MUX_ENABLE_AUD0,
+ MUX_ENABLE_AUD1,
+ MUX_STAT_AUD0,
+ DIV_AUD0,
+ DIV_AUD1,
+ DIV_STAT_AUD0,
+ DIV_STAT_AUD1,
+ ENABLE_ACLK_AUD,
+ ENABLE_PCLK_AUD,
+ ENABLE_SCLK_AUD0,
+ ENABLE_SCLK_AUD1,
+ ENABLE_IP_AUD0,
+ ENABLE_IP_AUD1,
+};
+
+/* list of all parent clock list */
+PNAME(mout_aud_pll_user_aud_p) = { "oscclk", "fout_aud_pll", };
+PNAME(mout_sclk_aud_pcm_p) = { "mout_aud_pll_user", "ioclk_audiocdclk0",};
+
+static struct samsung_fixed_rate_clock aud_fixed_clks[] __initdata = {
+ FRATE(0, "ioclk_jtag_tclk", NULL, CLK_IS_ROOT, 33000000),
+ FRATE(0, "ioclk_slimbus_clk", NULL, CLK_IS_ROOT, 25000000),
+ FRATE(0, "ioclk_i2s_bclk", NULL, CLK_IS_ROOT, 50000000),
+};
+
+static struct samsung_mux_clock aud_mux_clks[] __initdata = {
+ /* MUX_SEL_AUD0 */
+ MUX(CLK_MOUT_AUD_PLL_USER, "mout_aud_pll_user",
+ mout_aud_pll_user_aud_p, MUX_SEL_AUD0, 0, 1),
+
+ /* MUX_SEL_AUD1 */
+ MUX(CLK_MOUT_SCLK_AUD_PCM, "mout_sclk_aud_pcm", mout_sclk_aud_pcm_p,
+ MUX_SEL_AUD1, 8, 1),
+ MUX(CLK_MOUT_SCLK_AUD_I2S, "mout_sclk_aud_i2s", mout_sclk_aud_pcm_p,
+ MUX_SEL_AUD1, 0, 1),
+};
+
+static struct samsung_div_clock aud_div_clks[] __initdata = {
+ /* DIV_AUD0 */
+ DIV(CLK_DIV_ATCLK_AUD, "div_atclk_aud", "div_aud_ca5", DIV_AUD0,
+ 12, 4),
+ DIV(CLK_DIV_PCLK_DBG_AUD, "div_pclk_dbg_aud", "div_aud_ca5", DIV_AUD0,
+ 8, 4),
+ DIV(CLK_DIV_ACLK_AUD, "div_aclk_aud", "div_aud_ca5", DIV_AUD0,
+ 4, 4),
+ DIV(CLK_DIV_AUD_CA5, "div_aud_ca5", "mout_aud_pll_user", DIV_AUD0,
+ 0, 4),
+
+ /* DIV_AUD1 */
+ DIV(CLK_DIV_SCLK_AUD_SLIMBUS, "div_sclk_aud_slimbus",
+ "mout_aud_pll_user", DIV_AUD1, 16, 5),
+ DIV(CLK_DIV_SCLK_AUD_UART, "div_sclk_aud_uart", "mout_aud_pll_user",
+ DIV_AUD1, 12, 4),
+ DIV(CLK_DIV_SCLK_AUD_PCM, "div_sclk_aud_pcm", "mout_sclk_aud_pcm",
+ DIV_AUD1, 4, 8),
+ DIV(CLK_DIV_SCLK_AUD_I2S, "div_sclk_aud_i2s", "mout_sclk_aud_i2s",
+ DIV_AUD1, 0, 4),
+};
+
+static struct samsung_gate_clock aud_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_AUD */
+ GATE(CLK_ACLK_INTR_CTRL, "aclk_intr_ctrl", "div_aclk_aud",
+ ENABLE_ACLK_AUD, 12, 0, 0),
+ GATE(CLK_ACLK_SMMU_LPASSX, "aclk_smmu_lpassx", "div_aclk_aud",
+ ENABLE_ACLK_AUD, 7, 0, 0),
+ GATE(CLK_ACLK_XIU_LPASSX, "aclk_xiu_lpassx", "div_aclk_aud",
+ ENABLE_ACLK_AUD, 0, 4, 0),
+ GATE(CLK_ACLK_AUDNP_133, "aclk_audnp_133", "div_aclk_aud",
+ ENABLE_ACLK_AUD, 0, 3, 0),
+ GATE(CLK_ACLK_AUDND_133, "aclk_audnd_133", "div_aclk_aud",
+ ENABLE_ACLK_AUD, 0, 2, 0),
+ GATE(CLK_ACLK_SRAMC, "aclk_sramc", "div_aclk_aud", ENABLE_ACLK_AUD,
+ 0, 1, 0),
+ GATE(CLK_ACLK_DMAC, "aclk_dmac", "div_aclk_aud", ENABLE_ACLK_AUD,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_AUD */
+ GATE(CLK_PCLK_WDT1, "pclk_wdt1", "div_aclk_aud", ENABLE_PCLK_AUD,
+ 13, 0, 0),
+ GATE(CLK_PCLK_WDT0, "pclk_wdt0", "div_aclk_aud", ENABLE_PCLK_AUD,
+ 12, 0, 0),
+ GATE(CLK_PCLK_SFR1, "pclk_sfr1", "div_aclk_aud", ENABLE_PCLK_AUD,
+ 11, 0, 0),
+ GATE(CLK_PCLK_SMMU_LPASSX, "pclk_smmu_lpassx", "div_aclk_aud",
+ ENABLE_PCLK_AUD, 10, 0, 0),
+ GATE(CLK_PCLK_GPIO_AUD, "pclk_gpio_aud", "div_aclk_aud",
+ ENABLE_PCLK_AUD, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PMU_AUD, "pclk_pmu_aud", "div_aclk_aud",
+ ENABLE_PCLK_AUD, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_AUD, "pclk_sysreg_aud", "div_aclk_aud",
+ ENABLE_PCLK_AUD, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_AUD_SLIMBUS, "pclk_aud_slimbus", "div_aclk_aud",
+ ENABLE_PCLK_AUD, 6, 0, 0),
+ GATE(CLK_PCLK_AUD_UART, "pclk_aud_uart", "div_aclk_aud",
+ ENABLE_PCLK_AUD, 5, 0, 0),
+ GATE(CLK_PCLK_AUD_PCM, "pclk_aud_pcm", "div_aclk_aud",
+ ENABLE_PCLK_AUD, 4, 0, 0),
+ GATE(CLK_PCLK_AUD_I2S, "pclk_aud_i2s", "div_aclk_aud",
+ ENABLE_PCLK_AUD, 3, 0, 0),
+ GATE(CLK_PCLK_TIMER, "pclk_timer", "div_aclk_aud", ENABLE_PCLK_AUD,
+ 2, 0, 0),
+ GATE(CLK_PCLK_SFR0_CTRL, "pclk_sfr0_ctrl", "div_aclk_aud",
+ ENABLE_PCLK_AUD, 0, 0, 0),
+
+ /* ENABLE_SCLK_AUD0 */
+ GATE(CLK_ATCLK_AUD, "atclk_aud", "div_atclk_aud", ENABLE_SCLK_AUD0,
+ 2, 0, 0),
+ GATE(CLK_PCLK_DBG_AUD, "pclk_dbg_aud", "div_pclk_dbg_aud",
+ ENABLE_SCLK_AUD0, 1, 0, 0),
+ GATE(CLK_SCLK_AUD_CA5, "sclk_aud_ca5", "div_aud_ca5", ENABLE_SCLK_AUD0,
+ 0, 0, 0),
+
+ /* ENABLE_SCLK_AUD1 */
+ GATE(CLK_SCLK_JTAG_TCK, "sclk_jtag_tck", "ioclk_jtag_tclk",
+ ENABLE_SCLK_AUD1, 6, 0, 0),
+ GATE(CLK_SCLK_SLIMBUS_CLKIN, "sclk_slimbus_clkin", "ioclk_slimbus_clk",
+ ENABLE_SCLK_AUD1, 5, 0, 0),
+ GATE(CLK_SCLK_AUD_SLIMBUS, "sclk_aud_slimbus", "div_sclk_aud_slimbus",
+ ENABLE_SCLK_AUD1, 4, 0, 0),
+ GATE(CLK_SCLK_AUD_UART, "sclk_aud_uart", "div_sclk_aud_uart",
+ ENABLE_SCLK_AUD1, 3, 0, 0),
+ GATE(CLK_SCLK_AUD_PCM, "sclk_aud_pcm", "div_sclk_aud_pcm",
+ ENABLE_SCLK_AUD1, 2, 0, 0),
+ GATE(CLK_SCLK_I2S_BCLK, "sclk_i2s_bclk", "ioclk_i2s_bclk",
+ ENABLE_SCLK_AUD1, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_AUD_I2S, "sclk_aud_i2s", "div_sclk_aud_i2s",
+ ENABLE_SCLK_AUD1, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static struct samsung_cmu_info aud_cmu_info __initdata = {
+ .mux_clks = aud_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(aud_mux_clks),
+ .div_clks = aud_div_clks,
+ .nr_div_clks = ARRAY_SIZE(aud_div_clks),
+ .gate_clks = aud_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(aud_gate_clks),
+ .fixed_clks = aud_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(aud_fixed_clks),
+ .nr_clk_ids = AUD_NR_CLK,
+ .clk_regs = aud_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(aud_clk_regs),
+};
+
+static void __init exynos5433_cmu_aud_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &aud_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_aud, "samsung,exynos5433-cmu-aud",
+ exynos5433_cmu_aud_init);
+
+
+/*
+ * Register offset definitions for CMU_BUS{0|1|2}
+ */
+#define DIV_BUS 0x0600
+#define DIV_STAT_BUS 0x0700
+#define ENABLE_ACLK_BUS 0x0800
+#define ENABLE_PCLK_BUS 0x0900
+#define ENABLE_IP_BUS0 0x0b00
+#define ENABLE_IP_BUS1 0x0b04
+
+#define MUX_SEL_BUS2 0x0200 /* Only for CMU_BUS2 */
+#define MUX_ENABLE_BUS2 0x0300 /* Only for CMU_BUS2 */
+#define MUX_STAT_BUS2 0x0400 /* Only for CMU_BUS2 */
+
+/* list of all parent clock list */
+PNAME(mout_aclk_bus2_400_p) = { "oscclk", "aclk_bus2_400", };
+
+#define CMU_BUS_COMMON_CLK_REGS \
+ DIV_BUS, \
+ DIV_STAT_BUS, \
+ ENABLE_ACLK_BUS, \
+ ENABLE_PCLK_BUS, \
+ ENABLE_IP_BUS0, \
+ ENABLE_IP_BUS1
+
+static unsigned long bus01_clk_regs[] __initdata = {
+ CMU_BUS_COMMON_CLK_REGS,
+};
+
+static unsigned long bus2_clk_regs[] __initdata = {
+ MUX_SEL_BUS2,
+ MUX_ENABLE_BUS2,
+ MUX_STAT_BUS2,
+ CMU_BUS_COMMON_CLK_REGS,
+};
+
+static struct samsung_div_clock bus0_div_clks[] __initdata = {
+ /* DIV_BUS0 */
+ DIV(CLK_DIV_PCLK_BUS_133, "div_pclk_bus0_133", "aclk_bus0_400",
+ DIV_BUS, 0, 3),
+};
+
+/* CMU_BUS0 clocks */
+static struct samsung_gate_clock bus0_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_BUS0 */
+ GATE(CLK_ACLK_AHB2APB_BUSP, "aclk_ahb2apb_bus0p", "div_pclk_bus0_133",
+ ENABLE_ACLK_BUS, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BUSNP_133, "aclk_bus0np_133", "div_pclk_bus0_133",
+ ENABLE_ACLK_BUS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BUSND_400, "aclk_bus0nd_400", "aclk_bus0_400",
+ ENABLE_ACLK_BUS, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_BUS0 */
+ GATE(CLK_PCLK_BUSSRVND_133, "pclk_bus0srvnd_133", "div_pclk_bus0_133",
+ ENABLE_PCLK_BUS, 2, 0, 0),
+ GATE(CLK_PCLK_PMU_BUS, "pclk_pmu_bus0", "div_pclk_bus0_133",
+ ENABLE_PCLK_BUS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_BUS, "pclk_sysreg_bus0", "div_pclk_bus0_133",
+ ENABLE_PCLK_BUS, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+/* CMU_BUS1 clocks */
+static struct samsung_div_clock bus1_div_clks[] __initdata = {
+ /* DIV_BUS1 */
+ DIV(CLK_DIV_PCLK_BUS_133, "div_pclk_bus1_133", "aclk_bus1_400",
+ DIV_BUS, 0, 3),
+};
+
+static struct samsung_gate_clock bus1_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_BUS1 */
+ GATE(CLK_ACLK_AHB2APB_BUSP, "aclk_ahb2apb_bus1p", "div_pclk_bus1_133",
+ ENABLE_ACLK_BUS, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BUSNP_133, "aclk_bus1np_133", "div_pclk_bus1_133",
+ ENABLE_ACLK_BUS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BUSND_400, "aclk_bus1nd_400", "aclk_bus1_400",
+ ENABLE_ACLK_BUS, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_BUS1 */
+ GATE(CLK_PCLK_BUSSRVND_133, "pclk_bus1srvnd_133", "div_pclk_bus1_133",
+ ENABLE_PCLK_BUS, 2, 0, 0),
+ GATE(CLK_PCLK_PMU_BUS, "pclk_pmu_bus1", "div_pclk_bus1_133",
+ ENABLE_PCLK_BUS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_BUS, "pclk_sysreg_bus1", "div_pclk_bus1_133",
+ ENABLE_PCLK_BUS, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+/* CMU_BUS2 clocks */
+static struct samsung_mux_clock bus2_mux_clks[] __initdata = {
+ /* MUX_SEL_BUS2 */
+ MUX(CLK_MOUT_ACLK_BUS2_400_USER, "mout_aclk_bus2_400_user",
+ mout_aclk_bus2_400_p, MUX_SEL_BUS2, 0, 1),
+};
+
+static struct samsung_div_clock bus2_div_clks[] __initdata = {
+ /* DIV_BUS2 */
+ DIV(CLK_DIV_PCLK_BUS_133, "div_pclk_bus2_133",
+ "mout_aclk_bus2_400_user", DIV_BUS, 0, 3),
+};
+
+static struct samsung_gate_clock bus2_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_BUS2 */
+ GATE(CLK_ACLK_AHB2APB_BUSP, "aclk_ahb2apb_bus2p", "div_pclk_bus2_133",
+ ENABLE_ACLK_BUS, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BUSNP_133, "aclk_bus2np_133", "div_pclk_bus2_133",
+ ENABLE_ACLK_BUS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BUS2BEND_400, "aclk_bus2bend_400",
+ "mout_aclk_bus2_400_user", ENABLE_ACLK_BUS,
+ 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BUS2RTND_400, "aclk_bus2rtnd_400",
+ "mout_aclk_bus2_400_user", ENABLE_ACLK_BUS,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_BUS2 */
+ GATE(CLK_PCLK_BUSSRVND_133, "pclk_bus2srvnd_133", "div_pclk_bus2_133",
+ ENABLE_PCLK_BUS, 2, 0, 0),
+ GATE(CLK_PCLK_PMU_BUS, "pclk_pmu_bus2", "div_pclk_bus2_133",
+ ENABLE_PCLK_BUS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_BUS, "pclk_sysreg_bus2", "div_pclk_bus2_133",
+ ENABLE_PCLK_BUS, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+#define CMU_BUS_INFO_CLKS(id) \
+ .div_clks = bus##id##_div_clks, \
+ .nr_div_clks = ARRAY_SIZE(bus##id##_div_clks), \
+ .gate_clks = bus##id##_gate_clks, \
+ .nr_gate_clks = ARRAY_SIZE(bus##id##_gate_clks), \
+ .nr_clk_ids = BUSx_NR_CLK
+
+static struct samsung_cmu_info bus0_cmu_info __initdata = {
+ CMU_BUS_INFO_CLKS(0),
+ .clk_regs = bus01_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(bus01_clk_regs),
+};
+
+static struct samsung_cmu_info bus1_cmu_info __initdata = {
+ CMU_BUS_INFO_CLKS(1),
+ .clk_regs = bus01_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(bus01_clk_regs),
+};
+
+static struct samsung_cmu_info bus2_cmu_info __initdata = {
+ CMU_BUS_INFO_CLKS(2),
+ .mux_clks = bus2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(bus2_mux_clks),
+ .clk_regs = bus2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(bus2_clk_regs),
+};
+
+#define exynos5433_cmu_bus_init(id) \
+static void __init exynos5433_cmu_bus##id##_init(struct device_node *np)\
+{ \
+ samsung_cmu_register_one(np, &bus##id##_cmu_info); \
+} \
+CLK_OF_DECLARE(exynos5433_cmu_bus##id, \
+ "samsung,exynos5433-cmu-bus"#id, \
+ exynos5433_cmu_bus##id##_init)
+
+exynos5433_cmu_bus_init(0);
+exynos5433_cmu_bus_init(1);
+exynos5433_cmu_bus_init(2);
+
+/*
+ * Register offset definitions for CMU_G3D
+ */
+#define G3D_PLL_LOCK 0x0000
+#define G3D_PLL_CON0 0x0100
+#define G3D_PLL_CON1 0x0104
+#define G3D_PLL_FREQ_DET 0x010c
+#define MUX_SEL_G3D 0x0200
+#define MUX_ENABLE_G3D 0x0300
+#define MUX_STAT_G3D 0x0400
+#define DIV_G3D 0x0600
+#define DIV_G3D_PLL_FREQ_DET 0x0604
+#define DIV_STAT_G3D 0x0700
+#define DIV_STAT_G3D_PLL_FREQ_DET 0x0704
+#define ENABLE_ACLK_G3D 0x0800
+#define ENABLE_PCLK_G3D 0x0900
+#define ENABLE_SCLK_G3D 0x0a00
+#define ENABLE_IP_G3D0 0x0b00
+#define ENABLE_IP_G3D1 0x0b04
+#define CLKOUT_CMU_G3D 0x0c00
+#define CLKOUT_CMU_G3D_DIV_STAT 0x0c04
+#define CLK_STOPCTRL 0x1000
+
+static unsigned long g3d_clk_regs[] __initdata = {
+ G3D_PLL_LOCK,
+ G3D_PLL_CON0,
+ G3D_PLL_CON1,
+ G3D_PLL_FREQ_DET,
+ MUX_SEL_G3D,
+ MUX_ENABLE_G3D,
+ MUX_STAT_G3D,
+ DIV_G3D,
+ DIV_G3D_PLL_FREQ_DET,
+ DIV_STAT_G3D,
+ DIV_STAT_G3D_PLL_FREQ_DET,
+ ENABLE_ACLK_G3D,
+ ENABLE_PCLK_G3D,
+ ENABLE_SCLK_G3D,
+ ENABLE_IP_G3D0,
+ ENABLE_IP_G3D1,
+ CLKOUT_CMU_G3D,
+ CLKOUT_CMU_G3D_DIV_STAT,
+ CLK_STOPCTRL,
+};
+
+/* list of all parent clock list */
+PNAME(mout_aclk_g3d_400_p) = { "mout_g3d_pll", "aclk_g3d_400", };
+PNAME(mout_g3d_pll_p) = { "oscclk", "fout_g3d_pll", };
+
+static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
+ PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
+ G3D_PLL_LOCK, G3D_PLL_CON0, exynos5443_pll_rates),
+};
+
+static struct samsung_mux_clock g3d_mux_clks[] __initdata = {
+ /* MUX_SEL_G3D */
+ MUX(CLK_MOUT_ACLK_G3D_400, "mout_aclk_g3d_400", mout_aclk_g3d_400_p,
+ MUX_SEL_G3D, 8, 1),
+ MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
+ MUX_SEL_G3D, 0, 1),
+};
+
+static struct samsung_div_clock g3d_div_clks[] __initdata = {
+ /* DIV_G3D */
+ DIV(CLK_DIV_SCLK_HPM_G3D, "div_sclk_hpm_g3d", "mout_g3d_pll", DIV_G3D,
+ 8, 2),
+ DIV(CLK_DIV_PCLK_G3D, "div_pclk_g3d", "div_aclk_g3d", DIV_G3D,
+ 4, 3),
+ DIV(CLK_DIV_ACLK_G3D, "div_aclk_g3d", "mout_aclk_g3d_400", DIV_G3D,
+ 0, 3),
+};
+
+static struct samsung_gate_clock g3d_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_G3D */
+ GATE(CLK_ACLK_BTS_G3D1, "aclk_bts_g3d1", "div_aclk_g3d",
+ ENABLE_ACLK_G3D, 7, 0, 0),
+ GATE(CLK_ACLK_BTS_G3D0, "aclk_bts_g3d0", "div_aclk_g3d",
+ ENABLE_ACLK_G3D, 6, 0, 0),
+ GATE(CLK_ACLK_ASYNCAPBS_G3D, "aclk_asyncapbs_g3d", "div_pclk_g3d",
+ ENABLE_ACLK_G3D, 5, 0, 0),
+ GATE(CLK_ACLK_ASYNCAPBM_G3D, "aclk_asyncapbm_g3d", "div_aclk_g3d",
+ ENABLE_ACLK_G3D, 4, 0, 0),
+ GATE(CLK_ACLK_AHB2APB_G3DP, "aclk_ahb2apb_g3dp", "div_pclk_g3d",
+ ENABLE_ACLK_G3D, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_G3DNP_150, "aclk_g3dnp_150", "div_pclk_g3d",
+ ENABLE_ACLK_G3D, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_G3DND_600, "aclk_g3dnd_600", "div_aclk_g3d",
+ ENABLE_ACLK_G3D, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_G3D, "aclk_g3d", "div_aclk_g3d",
+ ENABLE_ACLK_G3D, 0, 0, 0),
+
+ /* ENABLE_PCLK_G3D */
+ GATE(CLK_PCLK_BTS_G3D1, "pclk_bts_g3d1", "div_pclk_g3d",
+ ENABLE_PCLK_G3D, 3, 0, 0),
+ GATE(CLK_PCLK_BTS_G3D0, "pclk_bts_g3d0", "div_pclk_g3d",
+ ENABLE_PCLK_G3D, 2, 0, 0),
+ GATE(CLK_PCLK_PMU_G3D, "pclk_pmu_g3d", "div_pclk_g3d",
+ ENABLE_PCLK_G3D, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_G3D, "pclk_sysreg_g3d", "div_pclk_g3d",
+ ENABLE_PCLK_G3D, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_G3D */
+ GATE(CLK_SCLK_HPM_G3D, "sclk_hpm_g3d", "div_sclk_hpm_g3d",
+ ENABLE_SCLK_G3D, 0, 0, 0),
+};
+
+static struct samsung_cmu_info g3d_cmu_info __initdata = {
+ .pll_clks = g3d_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(g3d_pll_clks),
+ .mux_clks = g3d_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(g3d_mux_clks),
+ .div_clks = g3d_div_clks,
+ .nr_div_clks = ARRAY_SIZE(g3d_div_clks),
+ .gate_clks = g3d_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(g3d_gate_clks),
+ .nr_clk_ids = G3D_NR_CLK,
+ .clk_regs = g3d_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(g3d_clk_regs),
+};
+
+static void __init exynos5433_cmu_g3d_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &g3d_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_g3d, "samsung,exynos5433-cmu-g3d",
+ exynos5433_cmu_g3d_init);
+
+/*
+ * Register offset definitions for CMU_GSCL
+ */
+#define MUX_SEL_GSCL 0x0200
+#define MUX_ENABLE_GSCL 0x0300
+#define MUX_STAT_GSCL 0x0400
+#define ENABLE_ACLK_GSCL 0x0800
+#define ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL0 0x0804
+#define ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL1 0x0808
+#define ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL2 0x080c
+#define ENABLE_PCLK_GSCL 0x0900
+#define ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0 0x0904
+#define ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL1 0x0908
+#define ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL2 0x090c
+#define ENABLE_IP_GSCL0 0x0b00
+#define ENABLE_IP_GSCL1 0x0b04
+#define ENABLE_IP_GSCL_SECURE_SMMU_GSCL0 0x0b08
+#define ENABLE_IP_GSCL_SECURE_SMMU_GSCL1 0x0b0c
+#define ENABLE_IP_GSCL_SECURE_SMMU_GSCL2 0x0b10
+
+static unsigned long gscl_clk_regs[] __initdata = {
+ MUX_SEL_GSCL,
+ MUX_ENABLE_GSCL,
+ MUX_STAT_GSCL,
+ ENABLE_ACLK_GSCL,
+ ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL0,
+ ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL1,
+ ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL2,
+ ENABLE_PCLK_GSCL,
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0,
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL1,
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL2,
+ ENABLE_IP_GSCL0,
+ ENABLE_IP_GSCL1,
+ ENABLE_IP_GSCL_SECURE_SMMU_GSCL0,
+ ENABLE_IP_GSCL_SECURE_SMMU_GSCL1,
+ ENABLE_IP_GSCL_SECURE_SMMU_GSCL2,
+};
+
+/* list of all parent clock list */
+PNAME(aclk_gscl_111_user_p) = { "oscclk", "aclk_gscl_111", };
+PNAME(aclk_gscl_333_user_p) = { "oscclk", "aclk_gscl_333", };
+
+static struct samsung_mux_clock gscl_mux_clks[] __initdata = {
+ /* MUX_SEL_GSCL */
+ MUX(CLK_MOUT_ACLK_GSCL_111_USER, "mout_aclk_gscl_111_user",
+ aclk_gscl_111_user_p, MUX_SEL_GSCL, 4, 1),
+ MUX(CLK_MOUT_ACLK_GSCL_333_USER, "mout_aclk_gscl_333_user",
+ aclk_gscl_333_user_p, MUX_SEL_GSCL, 0, 1),
+};
+
+static struct samsung_gate_clock gscl_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_GSCL */
+ GATE(CLK_ACLK_BTS_GSCL2, "aclk_bts_gscl2", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL, 11, 0, 0),
+ GATE(CLK_ACLK_BTS_GSCL1, "aclk_bts_gscl1", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL, 10, 0, 0),
+ GATE(CLK_ACLK_BTS_GSCL0, "aclk_bts_gscl0", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL, 9, 0, 0),
+ GATE(CLK_ACLK_AHB2APB_GSCLP, "aclk_ahb2apb_gsclp",
+ "mout_aclk_gscl_111_user", ENABLE_ACLK_GSCL,
+ 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_GSCLX, "aclk_xiu_gsclx", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL, 7, 0, 0),
+ GATE(CLK_ACLK_GSCLNP_111, "aclk_gsclnp_111", "mout_aclk_gscl_111_user",
+ ENABLE_ACLK_GSCL, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_GSCLRTND_333, "aclk_gsclrtnd_333",
+ "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 5, 0, 0),
+ GATE(CLK_ACLK_GSCLBEND_333, "aclk_gsclbend_333",
+ "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 4, 0, 0),
+ GATE(CLK_ACLK_GSD, "aclk_gsd", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL, 3, 0, 0),
+ GATE(CLK_ACLK_GSCL2, "aclk_gscl2", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL, 2, 0, 0),
+ GATE(CLK_ACLK_GSCL1, "aclk_gscl1", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL, 1, 0, 0),
+ GATE(CLK_ACLK_GSCL0, "aclk_gscl0", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL, 0, 0, 0),
+
+ /* ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL0 */
+ GATE(CLK_ACLK_SMMU_GSCL0, "aclk_smmu_gscl0", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL0, 0, 0, 0),
+
+ /* ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL1 */
+ GATE(CLK_ACLK_SMMU_GSCL1, "aclk_smmu_gscl1", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL1, 0, 0, 0),
+
+ /* ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL2 */
+ GATE(CLK_ACLK_SMMU_GSCL2, "aclk_smmu_gscl2", "mout_aclk_gscl_333_user",
+ ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL2, 0, 0, 0),
+
+ /* ENABLE_PCLK_GSCL */
+ GATE(CLK_PCLK_BTS_GSCL2, "pclk_bts_gscl2", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL, 7, 0, 0),
+ GATE(CLK_PCLK_BTS_GSCL1, "pclk_bts_gscl1", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL, 6, 0, 0),
+ GATE(CLK_PCLK_BTS_GSCL0, "pclk_bts_gscl0", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL, 5, 0, 0),
+ GATE(CLK_PCLK_PMU_GSCL, "pclk_pmu_gscl", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_GSCL, "pclk_sysreg_gscl",
+ "mout_aclk_gscl_111_user", ENABLE_PCLK_GSCL,
+ 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_GSCL2, "pclk_gscl2", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL, 2, 0, 0),
+ GATE(CLK_PCLK_GSCL1, "pclk_gscl1", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL, 1, 0, 0),
+ GATE(CLK_PCLK_GSCL0, "pclk_gscl0", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL, 0, 0, 0),
+
+ /* ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0 */
+ GATE(CLK_PCLK_SMMU_GSCL0, "pclk_smmu_gscl0", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0, 0, 0, 0),
+
+ /* ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL1 */
+ GATE(CLK_PCLK_SMMU_GSCL1, "pclk_smmu_gscl1", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0, 0, 0, 0),
+
+ /* ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL2 */
+ GATE(CLK_PCLK_SMMU_GSCL2, "pclk_smmu_gscl2", "mout_aclk_gscl_111_user",
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0, 0, 0, 0),
+};
+
+static struct samsung_cmu_info gscl_cmu_info __initdata = {
+ .mux_clks = gscl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(gscl_mux_clks),
+ .gate_clks = gscl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(gscl_gate_clks),
+ .nr_clk_ids = GSCL_NR_CLK,
+ .clk_regs = gscl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(gscl_clk_regs),
+};
+
+static void __init exynos5433_cmu_gscl_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &gscl_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_gscl, "samsung,exynos5433-cmu-gscl",
+ exynos5433_cmu_gscl_init);
+
+/*
+ * Register offset definitions for CMU_APOLLO
+ */
+#define APOLLO_PLL_LOCK 0x0000
+#define APOLLO_PLL_CON0 0x0100
+#define APOLLO_PLL_CON1 0x0104
+#define APOLLO_PLL_FREQ_DET 0x010c
+#define MUX_SEL_APOLLO0 0x0200
+#define MUX_SEL_APOLLO1 0x0204
+#define MUX_SEL_APOLLO2 0x0208
+#define MUX_ENABLE_APOLLO0 0x0300
+#define MUX_ENABLE_APOLLO1 0x0304
+#define MUX_ENABLE_APOLLO2 0x0308
+#define MUX_STAT_APOLLO0 0x0400
+#define MUX_STAT_APOLLO1 0x0404
+#define MUX_STAT_APOLLO2 0x0408
+#define DIV_APOLLO0 0x0600
+#define DIV_APOLLO1 0x0604
+#define DIV_APOLLO_PLL_FREQ_DET 0x0608
+#define DIV_STAT_APOLLO0 0x0700
+#define DIV_STAT_APOLLO1 0x0704
+#define DIV_STAT_APOLLO_PLL_FREQ_DET 0x0708
+#define ENABLE_ACLK_APOLLO 0x0800
+#define ENABLE_PCLK_APOLLO 0x0900
+#define ENABLE_SCLK_APOLLO 0x0a00
+#define ENABLE_IP_APOLLO0 0x0b00
+#define ENABLE_IP_APOLLO1 0x0b04
+#define CLKOUT_CMU_APOLLO 0x0c00
+#define CLKOUT_CMU_APOLLO_DIV_STAT 0x0c04
+#define ARMCLK_STOPCTRL 0x1000
+#define APOLLO_PWR_CTRL 0x1020
+#define APOLLO_PWR_CTRL2 0x1024
+#define APOLLO_INTR_SPREAD_ENABLE 0x1080
+#define APOLLO_INTR_SPREAD_USE_STANDBYWFI 0x1084
+#define APOLLO_INTR_SPREAD_BLOCKING_DURATION 0x1088
+
+static unsigned long apollo_clk_regs[] __initdata = {
+ APOLLO_PLL_LOCK,
+ APOLLO_PLL_CON0,
+ APOLLO_PLL_CON1,
+ APOLLO_PLL_FREQ_DET,
+ MUX_SEL_APOLLO0,
+ MUX_SEL_APOLLO1,
+ MUX_SEL_APOLLO2,
+ MUX_ENABLE_APOLLO0,
+ MUX_ENABLE_APOLLO1,
+ MUX_ENABLE_APOLLO2,
+ MUX_STAT_APOLLO0,
+ MUX_STAT_APOLLO1,
+ MUX_STAT_APOLLO2,
+ DIV_APOLLO0,
+ DIV_APOLLO1,
+ DIV_APOLLO_PLL_FREQ_DET,
+ DIV_STAT_APOLLO0,
+ DIV_STAT_APOLLO1,
+ DIV_STAT_APOLLO_PLL_FREQ_DET,
+ ENABLE_ACLK_APOLLO,
+ ENABLE_PCLK_APOLLO,
+ ENABLE_SCLK_APOLLO,
+ ENABLE_IP_APOLLO0,
+ ENABLE_IP_APOLLO1,
+ CLKOUT_CMU_APOLLO,
+ CLKOUT_CMU_APOLLO_DIV_STAT,
+ ARMCLK_STOPCTRL,
+ APOLLO_PWR_CTRL,
+ APOLLO_PWR_CTRL2,
+ APOLLO_INTR_SPREAD_ENABLE,
+ APOLLO_INTR_SPREAD_USE_STANDBYWFI,
+ APOLLO_INTR_SPREAD_BLOCKING_DURATION,
+};
+
+/* list of all parent clock list */
+PNAME(mout_apollo_pll_p) = { "oscclk", "fout_apollo_pll", };
+PNAME(mout_bus_pll_apollo_user_p) = { "oscclk", "sclk_bus_pll_apollo", };
+PNAME(mout_apollo_p) = { "mout_apollo_pll",
+ "mout_bus_pll_apollo_user", };
+
+static struct samsung_pll_clock apollo_pll_clks[] __initdata = {
+ PLL(pll_35xx, CLK_FOUT_APOLLO_PLL, "fout_apollo_pll", "oscclk",
+ APOLLO_PLL_LOCK, APOLLO_PLL_CON0, exynos5443_pll_rates),
+};
+
+static struct samsung_mux_clock apollo_mux_clks[] __initdata = {
+ /* MUX_SEL_APOLLO0 */
+ MUX_F(CLK_MOUT_APOLLO_PLL, "mout_apollo_pll", mout_apollo_pll_p,
+ MUX_SEL_APOLLO0, 0, 1, 0, CLK_MUX_READ_ONLY),
+
+ /* MUX_SEL_APOLLO1 */
+ MUX(CLK_MOUT_BUS_PLL_APOLLO_USER, "mout_bus_pll_apollo_user",
+ mout_bus_pll_apollo_user_p, MUX_SEL_APOLLO1, 0, 1),
+
+ /* MUX_SEL_APOLLO2 */
+ MUX_F(CLK_MOUT_APOLLO, "mout_apollo", mout_apollo_p, MUX_SEL_APOLLO2,
+ 0, 1, 0, CLK_MUX_READ_ONLY),
+};
+
+static struct samsung_div_clock apollo_div_clks[] __initdata = {
+ /* DIV_APOLLO0 */
+ DIV_F(CLK_DIV_CNTCLK_APOLLO, "div_cntclk_apollo", "div_apollo2",
+ DIV_APOLLO0, 24, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_PCLK_DBG_APOLLO, "div_pclk_dbg_apollo", "div_apollo2",
+ DIV_APOLLO0, 20, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_ATCLK_APOLLO, "div_atclk_apollo", "div_apollo2",
+ DIV_APOLLO0, 16, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_PCLK_APOLLO, "div_pclk_apollo", "div_apollo2",
+ DIV_APOLLO0, 12, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_ACLK_APOLLO, "div_aclk_apollo", "div_apollo2",
+ DIV_APOLLO0, 8, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_APOLLO2, "div_apollo2", "div_apollo1",
+ DIV_APOLLO0, 4, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_APOLLO1, "div_apollo1", "mout_apollo",
+ DIV_APOLLO0, 0, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+
+ /* DIV_APOLLO1 */
+ DIV_F(CLK_DIV_SCLK_HPM_APOLLO, "div_sclk_hpm_apollo", "mout_apollo",
+ DIV_APOLLO1, 4, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_APOLLO_PLL, "div_apollo_pll", "mout_apollo",
+ DIV_APOLLO1, 0, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+};
+
+static struct samsung_gate_clock apollo_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_APOLLO */
+ GATE(CLK_ACLK_ASATBSLV_APOLLO_3_CSSYS, "aclk_asatbslv_apollo_3_cssys",
+ "div_atclk_apollo", ENABLE_ACLK_APOLLO,
+ 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASATBSLV_APOLLO_2_CSSYS, "aclk_asatbslv_apollo_2_cssys",
+ "div_atclk_apollo", ENABLE_ACLK_APOLLO,
+ 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASATBSLV_APOLLO_1_CSSYS, "aclk_asatbslv_apollo_1_cssys",
+ "div_atclk_apollo", ENABLE_ACLK_APOLLO,
+ 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASATBSLV_APOLLO_0_CSSYS, "aclk_asatbslv_apollo_0_cssys",
+ "div_atclk_apollo", ENABLE_ACLK_APOLLO,
+ 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCACES_APOLLO_CCI, "aclk_asyncaces_apollo_cci",
+ "div_aclk_apollo", ENABLE_ACLK_APOLLO,
+ 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_APOLLOP, "aclk_ahb2apb_apollop",
+ "div_pclk_apollo", ENABLE_ACLK_APOLLO,
+ 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_APOLLONP_200, "aclk_apollonp_200",
+ "div_pclk_apollo", ENABLE_ACLK_APOLLO,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_APOLLO */
+ GATE(CLK_PCLK_ASAPBMST_CSSYS_APOLLO, "pclk_asapbmst_cssys_apollo",
+ "div_pclk_dbg_apollo", ENABLE_PCLK_APOLLO,
+ 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PMU_APOLLO, "pclk_pmu_apollo", "div_pclk_apollo",
+ ENABLE_PCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_APOLLO, "pclk_sysreg_apollo",
+ "div_pclk_apollo", ENABLE_PCLK_APOLLO,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_APOLLO */
+ GATE(CLK_CNTCLK_APOLLO, "cntclk_apollo", "div_cntclk_apollo",
+ ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo",
+ ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2",
+ ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static struct samsung_cmu_info apollo_cmu_info __initdata = {
+ .pll_clks = apollo_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(apollo_pll_clks),
+ .mux_clks = apollo_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(apollo_mux_clks),
+ .div_clks = apollo_div_clks,
+ .nr_div_clks = ARRAY_SIZE(apollo_div_clks),
+ .gate_clks = apollo_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(apollo_gate_clks),
+ .nr_clk_ids = APOLLO_NR_CLK,
+ .clk_regs = apollo_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(apollo_clk_regs),
+};
+
+static void __init exynos5433_cmu_apollo_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &apollo_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_apollo, "samsung,exynos5433-cmu-apollo",
+ exynos5433_cmu_apollo_init);
+
+/*
+ * Register offset definitions for CMU_ATLAS
+ */
+#define ATLAS_PLL_LOCK 0x0000
+#define ATLAS_PLL_CON0 0x0100
+#define ATLAS_PLL_CON1 0x0104
+#define ATLAS_PLL_FREQ_DET 0x010c
+#define MUX_SEL_ATLAS0 0x0200
+#define MUX_SEL_ATLAS1 0x0204
+#define MUX_SEL_ATLAS2 0x0208
+#define MUX_ENABLE_ATLAS0 0x0300
+#define MUX_ENABLE_ATLAS1 0x0304
+#define MUX_ENABLE_ATLAS2 0x0308
+#define MUX_STAT_ATLAS0 0x0400
+#define MUX_STAT_ATLAS1 0x0404
+#define MUX_STAT_ATLAS2 0x0408
+#define DIV_ATLAS0 0x0600
+#define DIV_ATLAS1 0x0604
+#define DIV_ATLAS_PLL_FREQ_DET 0x0608
+#define DIV_STAT_ATLAS0 0x0700
+#define DIV_STAT_ATLAS1 0x0704
+#define DIV_STAT_ATLAS_PLL_FREQ_DET 0x0708
+#define ENABLE_ACLK_ATLAS 0x0800
+#define ENABLE_PCLK_ATLAS 0x0900
+#define ENABLE_SCLK_ATLAS 0x0a00
+#define ENABLE_IP_ATLAS0 0x0b00
+#define ENABLE_IP_ATLAS1 0x0b04
+#define CLKOUT_CMU_ATLAS 0x0c00
+#define CLKOUT_CMU_ATLAS_DIV_STAT 0x0c04
+#define ARMCLK_STOPCTRL 0x1000
+#define ATLAS_PWR_CTRL 0x1020
+#define ATLAS_PWR_CTRL2 0x1024
+#define ATLAS_INTR_SPREAD_ENABLE 0x1080
+#define ATLAS_INTR_SPREAD_USE_STANDBYWFI 0x1084
+#define ATLAS_INTR_SPREAD_BLOCKING_DURATION 0x1088
+
+static unsigned long atlas_clk_regs[] __initdata = {
+ ATLAS_PLL_LOCK,
+ ATLAS_PLL_CON0,
+ ATLAS_PLL_CON1,
+ ATLAS_PLL_FREQ_DET,
+ MUX_SEL_ATLAS0,
+ MUX_SEL_ATLAS1,
+ MUX_SEL_ATLAS2,
+ MUX_ENABLE_ATLAS0,
+ MUX_ENABLE_ATLAS1,
+ MUX_ENABLE_ATLAS2,
+ MUX_STAT_ATLAS0,
+ MUX_STAT_ATLAS1,
+ MUX_STAT_ATLAS2,
+ DIV_ATLAS0,
+ DIV_ATLAS1,
+ DIV_ATLAS_PLL_FREQ_DET,
+ DIV_STAT_ATLAS0,
+ DIV_STAT_ATLAS1,
+ DIV_STAT_ATLAS_PLL_FREQ_DET,
+ ENABLE_ACLK_ATLAS,
+ ENABLE_PCLK_ATLAS,
+ ENABLE_SCLK_ATLAS,
+ ENABLE_IP_ATLAS0,
+ ENABLE_IP_ATLAS1,
+ CLKOUT_CMU_ATLAS,
+ CLKOUT_CMU_ATLAS_DIV_STAT,
+ ARMCLK_STOPCTRL,
+ ATLAS_PWR_CTRL,
+ ATLAS_PWR_CTRL2,
+ ATLAS_INTR_SPREAD_ENABLE,
+ ATLAS_INTR_SPREAD_USE_STANDBYWFI,
+ ATLAS_INTR_SPREAD_BLOCKING_DURATION,
+};
+
+/* list of all parent clock list */
+PNAME(mout_atlas_pll_p) = { "oscclk", "fout_atlas_pll", };
+PNAME(mout_bus_pll_atlas_user_p) = { "oscclk", "sclk_bus_pll_atlas", };
+PNAME(mout_atlas_p) = { "mout_atlas_pll",
+ "mout_bus_pll_atlas_user", };
+
+static struct samsung_pll_clock atlas_pll_clks[] __initdata = {
+ PLL(pll_35xx, CLK_FOUT_ATLAS_PLL, "fout_atlas_pll", "oscclk",
+ ATLAS_PLL_LOCK, ATLAS_PLL_CON0, exynos5443_pll_rates),
+};
+
+static struct samsung_mux_clock atlas_mux_clks[] __initdata = {
+ /* MUX_SEL_ATLAS0 */
+ MUX_F(CLK_MOUT_ATLAS_PLL, "mout_atlas_pll", mout_atlas_pll_p,
+ MUX_SEL_ATLAS0, 0, 1, 0, CLK_MUX_READ_ONLY),
+
+ /* MUX_SEL_ATLAS1 */
+ MUX(CLK_MOUT_BUS_PLL_ATLAS_USER, "mout_bus_pll_atlas_user",
+ mout_bus_pll_atlas_user_p, MUX_SEL_ATLAS1, 0, 1),
+
+ /* MUX_SEL_ATLAS2 */
+ MUX_F(CLK_MOUT_ATLAS, "mout_atlas", mout_atlas_p, MUX_SEL_ATLAS2,
+ 0, 1, 0, CLK_MUX_READ_ONLY),
+};
+
+static struct samsung_div_clock atlas_div_clks[] __initdata = {
+ /* DIV_ATLAS0 */
+ DIV_F(CLK_DIV_CNTCLK_ATLAS, "div_cntclk_atlas", "div_atlas2",
+ DIV_ATLAS0, 24, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_PCLK_DBG_ATLAS, "div_pclk_dbg_atlas", "div_atclk_atlas",
+ DIV_ATLAS0, 20, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_ATCLK_ATLASO, "div_atclk_atlas", "div_atlas2",
+ DIV_ATLAS0, 16, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_PCLK_ATLAS, "div_pclk_atlas", "div_atlas2",
+ DIV_ATLAS0, 12, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_ACLK_ATLAS, "div_aclk_atlas", "div_atlas2",
+ DIV_ATLAS0, 8, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_ATLAS2, "div_atlas2", "div_atlas1",
+ DIV_ATLAS0, 4, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_ATLAS1, "div_atlas1", "mout_atlas",
+ DIV_ATLAS0, 0, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+
+ /* DIV_ATLAS1 */
+ DIV_F(CLK_DIV_SCLK_HPM_ATLAS, "div_sclk_hpm_atlas", "mout_atlas",
+ DIV_ATLAS1, 4, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+ DIV_F(CLK_DIV_ATLAS_PLL, "div_atlas_pll", "mout_atlas",
+ DIV_ATLAS1, 0, 3, CLK_GET_RATE_NOCACHE,
+ CLK_DIVIDER_READ_ONLY),
+};
+
+static struct samsung_gate_clock atlas_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_ATLAS */
+ GATE(CLK_ACLK_ATB_AUD_CSSYS, "aclk_atb_aud_cssys",
+ "div_atclk_atlas", ENABLE_ACLK_ATLAS,
+ 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ATB_APOLLO3_CSSYS, "aclk_atb_apollo3_cssys",
+ "div_atclk_atlas", ENABLE_ACLK_ATLAS,
+ 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ATB_APOLLO2_CSSYS, "aclk_atb_apollo2_cssys",
+ "div_atclk_atlas", ENABLE_ACLK_ATLAS,
+ 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ATB_APOLLO1_CSSYS, "aclk_atb_apollo1_cssys",
+ "div_atclk_atlas", ENABLE_ACLK_ATLAS,
+ 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ATB_APOLLO0_CSSYS, "aclk_atb_apollo0_cssys",
+ "div_atclk_atlas", ENABLE_ACLK_ATLAS,
+ 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAHBS_CSSYS_SSS, "aclk_asyncahbs_cssys_sss",
+ "div_atclk_atlas", ENABLE_ACLK_ATLAS,
+ 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_CSSYS_CCIX, "aclk_asyncaxis_cssys_ccix",
+ "div_pclk_dbg_atlas", ENABLE_ACLK_ATLAS,
+ 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCACES_ATLAS_CCI, "aclk_asyncaces_atlas_cci",
+ "div_aclk_atlas", ENABLE_ACLK_ATLAS,
+ 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_ATLASP, "aclk_ahb2apb_atlasp", "div_pclk_atlas",
+ ENABLE_ACLK_ATLAS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ATLASNP_200, "aclk_atlasnp_200", "div_pclk_atlas",
+ ENABLE_ACLK_ATLAS, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_ATLAS */
+ GATE(CLK_PCLK_ASYNCAPB_AUD_CSSYS, "pclk_asyncapb_aud_cssys",
+ "div_pclk_dbg_atlas", ENABLE_PCLK_ATLAS,
+ 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAPB_ISP_CSSYS, "pclk_asyncapb_isp_cssys",
+ "div_pclk_dbg_atlas", ENABLE_PCLK_ATLAS,
+ 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAPB_APOLLO_CSSYS, "pclk_asyncapb_apollo_cssys",
+ "div_pclk_dbg_atlas", ENABLE_PCLK_ATLAS,
+ 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PMU_ATLAS, "pclk_pmu_atlas", "div_pclk_atlas",
+ ENABLE_PCLK_ATLAS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_ATLAS, "pclk_sysreg_atlas", "div_pclk_atlas",
+ ENABLE_PCLK_ATLAS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SECJTAG, "pclk_secjtag", "div_pclk_dbg_atlas",
+ ENABLE_PCLK_ATLAS, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_ATLAS */
+ GATE(CLK_CNTCLK_ATLAS, "cntclk_atlas", "div_cntclk_atlas",
+ ENABLE_SCLK_ATLAS, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_HPM_ATLAS, "sclk_hpm_atlas", "div_sclk_hpm_atlas",
+ ENABLE_SCLK_ATLAS, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TRACECLK, "traceclk", "div_atclk_atlas",
+ ENABLE_SCLK_ATLAS, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CTMCLK, "ctmclk", "div_atclk_atlas",
+ ENABLE_SCLK_ATLAS, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_HCLK_CSSYS, "hclk_cssys", "div_atclk_atlas",
+ ENABLE_SCLK_ATLAS, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_DBG_CSSYS, "pclk_dbg_cssys", "div_pclk_dbg_atlas",
+ ENABLE_SCLK_ATLAS, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_DBG, "pclk_dbg", "div_pclk_dbg_atlas",
+ ENABLE_SCLK_ATLAS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ATCLK, "atclk", "div_atclk_atlas",
+ ENABLE_SCLK_ATLAS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_ATLAS, "sclk_atlas", "div_atlas2",
+ ENABLE_SCLK_ATLAS, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static struct samsung_cmu_info atlas_cmu_info __initdata = {
+ .pll_clks = atlas_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(atlas_pll_clks),
+ .mux_clks = atlas_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(atlas_mux_clks),
+ .div_clks = atlas_div_clks,
+ .nr_div_clks = ARRAY_SIZE(atlas_div_clks),
+ .gate_clks = atlas_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(atlas_gate_clks),
+ .nr_clk_ids = ATLAS_NR_CLK,
+ .clk_regs = atlas_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(atlas_clk_regs),
+};
+
+static void __init exynos5433_cmu_atlas_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &atlas_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
+ exynos5433_cmu_atlas_init);
+
+/*
+ * Register offset definitions for CMU_MSCL
+ */
+#define MUX_SEL_MSCL0 0x0200
+#define MUX_SEL_MSCL1 0x0204
+#define MUX_ENABLE_MSCL0 0x0300
+#define MUX_ENABLE_MSCL1 0x0304
+#define MUX_STAT_MSCL0 0x0400
+#define MUX_STAT_MSCL1 0x0404
+#define DIV_MSCL 0x0600
+#define DIV_STAT_MSCL 0x0700
+#define ENABLE_ACLK_MSCL 0x0800
+#define ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER0 0x0804
+#define ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER1 0x0808
+#define ENABLE_ACLK_MSCL_SECURE_SMMU_JPEG 0x080c
+#define ENABLE_PCLK_MSCL 0x0900
+#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0 0x0904
+#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1 0x0908
+#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG 0x090c
+#define ENABLE_SCLK_MSCL 0x0a00
+#define ENABLE_IP_MSCL0 0x0b00
+#define ENABLE_IP_MSCL1 0x0b04
+#define ENABLE_IP_MSCL_SECURE_SMMU_M2MSCALER0 0x0b08
+#define ENABLE_IP_MSCL_SECURE_SMMU_M2MSCALER1 0x0b0c
+#define ENABLE_IP_MSCL_SECURE_SMMU_JPEG 0x0b10
+
+static unsigned long mscl_clk_regs[] __initdata = {
+ MUX_SEL_MSCL0,
+ MUX_SEL_MSCL1,
+ MUX_ENABLE_MSCL0,
+ MUX_ENABLE_MSCL1,
+ MUX_STAT_MSCL0,
+ MUX_STAT_MSCL1,
+ DIV_MSCL,
+ DIV_STAT_MSCL,
+ ENABLE_ACLK_MSCL,
+ ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER0,
+ ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER1,
+ ENABLE_ACLK_MSCL_SECURE_SMMU_JPEG,
+ ENABLE_PCLK_MSCL,
+ ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0,
+ ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1,
+ ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG,
+ ENABLE_SCLK_MSCL,
+ ENABLE_IP_MSCL0,
+ ENABLE_IP_MSCL1,
+ ENABLE_IP_MSCL_SECURE_SMMU_M2MSCALER0,
+ ENABLE_IP_MSCL_SECURE_SMMU_M2MSCALER1,
+ ENABLE_IP_MSCL_SECURE_SMMU_JPEG,
+};
+
+/* list of all parent clock list */
+PNAME(mout_sclk_jpeg_user_p) = { "oscclk", "sclk_jpeg_mscl", };
+PNAME(mout_aclk_mscl_400_user_p) = { "oscclk", "aclk_mscl_400", };
+PNAME(mout_sclk_jpeg_p) = { "mout_sclk_jpeg_user",
+ "mout_aclk_mscl_400_user", };
+
+static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
+ /* MUX_SEL_MSCL0 */
+ MUX(CLK_MOUT_SCLK_JPEG_USER, "mout_sclk_jpeg_user",
+ mout_sclk_jpeg_user_p, MUX_SEL_MSCL0, 4, 1),
+ MUX(CLK_MOUT_ACLK_MSCL_400_USER, "mout_aclk_mscl_400_user",
+ mout_aclk_mscl_400_user_p, MUX_SEL_MSCL0, 0, 1),
+
+ /* MUX_SEL_MSCL1 */
+ MUX(CLK_MOUT_SCLK_JPEG, "mout_sclk_jpeg", mout_sclk_jpeg_p,
+ MUX_SEL_MSCL1, 0, 1),
+};
+
+static struct samsung_div_clock mscl_div_clks[] __initdata = {
+ /* DIV_MSCL */
+ DIV(CLK_DIV_PCLK_MSCL, "div_pclk_mscl", "mout_aclk_mscl_400_user",
+ DIV_MSCL, 0, 3),
+};
+
+static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_MSCL */
+ GATE(CLK_ACLK_BTS_JPEG, "aclk_bts_jpeg", "mout_aclk_mscl_400_user",
+ ENABLE_ACLK_MSCL, 9, 0, 0),
+ GATE(CLK_ACLK_BTS_M2MSCALER1, "aclk_bts_m2mscaler1",
+ "mout_aclk_mscl_400_user", ENABLE_ACLK_MSCL, 8, 0, 0),
+ GATE(CLK_ACLK_BTS_M2MSCALER0, "aclk_bts_m2mscaler0",
+ "mout_aclk_mscl_400_user", ENABLE_ACLK_MSCL, 7, 0, 0),
+ GATE(CLK_ACLK_AHB2APB_MSCL0P, "aclk_abh2apb_mscl0p", "div_pclk_mscl",
+ ENABLE_ACLK_MSCL, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_MSCLX, "aclk_xiu_msclx", "mout_aclk_mscl_400_user",
+ ENABLE_ACLK_MSCL, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MSCLNP_100, "aclk_msclnp_100", "div_pclk_mscl",
+ ENABLE_ACLK_MSCL, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MSCLND_400, "aclk_msclnd_400", "mout_aclk_mscl_400_user",
+ ENABLE_ACLK_MSCL, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_JPEG, "aclk_jpeg", "mout_aclk_mscl_400_user",
+ ENABLE_ACLK_MSCL, 2, 0, 0),
+ GATE(CLK_ACLK_M2MSCALER1, "aclk_m2mscaler1", "mout_aclk_mscl_400_user",
+ ENABLE_ACLK_MSCL, 1, 0, 0),
+ GATE(CLK_ACLK_M2MSCALER0, "aclk_m2mscaler0", "mout_aclk_mscl_400_user",
+ ENABLE_ACLK_MSCL, 0, 0, 0),
+
+ /* ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER0 */
+ GATE(CLK_ACLK_SMMU_M2MSCALER0, "aclk_smmu_m2mscaler0",
+ "mout_aclk_mscl_400_user",
+ ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER0,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER1 */
+ GATE(CLK_ACLK_SMMU_M2MSCALER1, "aclk_smmu_m2mscaler1",
+ "mout_aclk_mscl_400_user",
+ ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER1,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_ACLK_MSCL_SECURE_SMMU_JPEG */
+ GATE(CLK_ACLK_SMMU_JPEG, "aclk_smmu_jpeg", "mout_aclk_mscl_400_user",
+ ENABLE_ACLK_MSCL_SECURE_SMMU_JPEG,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_MSCL */
+ GATE(CLK_PCLK_BTS_JPEG, "pclk_bts_jpeg", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL, 7, 0, 0),
+ GATE(CLK_PCLK_BTS_M2MSCALER1, "pclk_bts_m2mscaler1", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL, 6, 0, 0),
+ GATE(CLK_PCLK_BTS_M2MSCALER0, "pclk_bts_m2mscaler0", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL, 5, 0, 0),
+ GATE(CLK_PCLK_PMU_MSCL, "pclk_pmu_mscl", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_MSCL, "pclk_sysreg_mscl", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_JPEG, "pclk_jpeg", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL, 2, 0, 0),
+ GATE(CLK_PCLK_M2MSCALER1, "pclk_m2mscaler1", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL, 1, 0, 0),
+ GATE(CLK_PCLK_M2MSCALER0, "pclk_m2mscaler0", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL, 0, 0, 0),
+
+ /* ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0 */
+ GATE(CLK_PCLK_SMMU_M2MSCALER0, "pclk_smmu_m2mscaler0", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1 */
+ GATE(CLK_PCLK_SMMU_M2MSCALER1, "pclk_smmu_m2mscaler1", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG */
+ GATE(CLK_PCLK_SMMU_JPEG, "pclk_smmu_jpeg", "div_pclk_mscl",
+ ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_MSCL */
+ GATE(CLK_SCLK_JPEG, "sclk_jpeg", "mout_sclk_jpeg", ENABLE_SCLK_MSCL, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+};
+
+static struct samsung_cmu_info mscl_cmu_info __initdata = {
+ .mux_clks = mscl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mscl_mux_clks),
+ .div_clks = mscl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mscl_div_clks),
+ .gate_clks = mscl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mscl_gate_clks),
+ .nr_clk_ids = MSCL_NR_CLK,
+ .clk_regs = mscl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mscl_clk_regs),
+};
+
+static void __init exynos5433_cmu_mscl_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &mscl_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_mscl, "samsung,exynos5433-cmu-mscl",
+ exynos5433_cmu_mscl_init);
+
+/*
+ * Register offset definitions for CMU_MFC
+ */
+#define MUX_SEL_MFC 0x0200
+#define MUX_ENABLE_MFC 0x0300
+#define MUX_STAT_MFC 0x0400
+#define DIV_MFC 0x0600
+#define DIV_STAT_MFC 0x0700
+#define ENABLE_ACLK_MFC 0x0800
+#define ENABLE_ACLK_MFC_SECURE_SMMU_MFC 0x0804
+#define ENABLE_PCLK_MFC 0x0900
+#define ENABLE_PCLK_MFC_SECURE_SMMU_MFC 0x0904
+#define ENABLE_IP_MFC0 0x0b00
+#define ENABLE_IP_MFC1 0x0b04
+#define ENABLE_IP_MFC_SECURE_SMMU_MFC 0x0b08
+
+static unsigned long mfc_clk_regs[] __initdata = {
+ MUX_SEL_MFC,
+ MUX_ENABLE_MFC,
+ MUX_STAT_MFC,
+ DIV_MFC,
+ DIV_STAT_MFC,
+ ENABLE_ACLK_MFC,
+ ENABLE_ACLK_MFC_SECURE_SMMU_MFC,
+ ENABLE_PCLK_MFC,
+ ENABLE_PCLK_MFC_SECURE_SMMU_MFC,
+ ENABLE_IP_MFC0,
+ ENABLE_IP_MFC1,
+ ENABLE_IP_MFC_SECURE_SMMU_MFC,
+};
+
+PNAME(mout_aclk_mfc_400_user_p) = { "oscclk", "aclk_mfc_400", };
+
+static struct samsung_mux_clock mfc_mux_clks[] __initdata = {
+ /* MUX_SEL_MFC */
+ MUX(CLK_MOUT_ACLK_MFC_400_USER, "mout_aclk_mfc_400_user",
+ mout_aclk_mfc_400_user_p, MUX_SEL_MFC, 0, 0),
+};
+
+static struct samsung_div_clock mfc_div_clks[] __initdata = {
+ /* DIV_MFC */
+ DIV(CLK_DIV_PCLK_MFC, "div_pclk_mfc", "mout_aclk_mfc_400_user",
+ DIV_MFC, 0, 2),
+};
+
+static struct samsung_gate_clock mfc_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_MFC */
+ GATE(CLK_ACLK_BTS_MFC_1, "aclk_bts_mfc_1", "mout_aclk_mfc_400_user",
+ ENABLE_ACLK_MFC, 6, 0, 0),
+ GATE(CLK_ACLK_BTS_MFC_0, "aclk_bts_mfc_0", "mout_aclk_mfc_400_user",
+ ENABLE_ACLK_MFC, 5, 0, 0),
+ GATE(CLK_ACLK_AHB2APB_MFCP, "aclk_ahb2apb_mfcp", "div_pclk_mfc",
+ ENABLE_ACLK_MFC, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_MFCX, "aclk_xiu_mfcx", "mout_aclk_mfc_400_user",
+ ENABLE_ACLK_MFC, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MFCNP_100, "aclk_mfcnp_100", "div_pclk_mfc",
+ ENABLE_ACLK_MFC, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MFCND_400, "aclk_mfcnd_400", "mout_aclk_mfc_400_user",
+ ENABLE_ACLK_MFC, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_MFC, "aclk_mfc", "mout_aclk_mfc_400_user",
+ ENABLE_ACLK_MFC, 0, 0, 0),
+
+ /* ENABLE_ACLK_MFC_SECURE_SMMU_MFC */
+ GATE(CLK_ACLK_SMMU_MFC_1, "aclk_smmu_mfc_1", "mout_aclk_mfc_400_user",
+ ENABLE_ACLK_MFC_SECURE_SMMU_MFC,
+ 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_MFC_0, "aclk_smmu_mfc_0", "mout_aclk_mfc_400_user",
+ ENABLE_ACLK_MFC_SECURE_SMMU_MFC,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_MFC */
+ GATE(CLK_PCLK_BTS_MFC_1, "pclk_bts_mfc_1", "div_pclk_mfc",
+ ENABLE_PCLK_MFC, 4, 0, 0),
+ GATE(CLK_PCLK_BTS_MFC_0, "pclk_bts_mfc_0", "div_pclk_mfc",
+ ENABLE_PCLK_MFC, 3, 0, 0),
+ GATE(CLK_PCLK_PMU_MFC, "pclk_pmu_mfc", "div_pclk_mfc",
+ ENABLE_PCLK_MFC, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_MFC, "pclk_sysreg_mfc", "div_pclk_mfc",
+ ENABLE_PCLK_MFC, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_MFC, "pclk_mfc", "div_pclk_mfc",
+ ENABLE_PCLK_MFC, 4, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_MFC_SECURE_SMMU_MFC */
+ GATE(CLK_PCLK_SMMU_MFC_1, "pclk_smmu_mfc_1", "div_pclk_mfc",
+ ENABLE_PCLK_MFC_SECURE_SMMU_MFC,
+ 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_MFC_0, "pclk_smmu_mfc_0", "div_pclk_mfc",
+ ENABLE_PCLK_MFC_SECURE_SMMU_MFC,
+ 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static struct samsung_cmu_info mfc_cmu_info __initdata = {
+ .mux_clks = mfc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mfc_mux_clks),
+ .div_clks = mfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mfc_div_clks),
+ .gate_clks = mfc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mfc_gate_clks),
+ .nr_clk_ids = MFC_NR_CLK,
+ .clk_regs = mfc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mfc_clk_regs),
+};
+
+static void __init exynos5433_cmu_mfc_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &mfc_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_mfc, "samsung,exynos5433-cmu-mfc",
+ exynos5433_cmu_mfc_init);
+
+/*
+ * Register offset definitions for CMU_HEVC
+ */
+#define MUX_SEL_HEVC 0x0200
+#define MUX_ENABLE_HEVC 0x0300
+#define MUX_STAT_HEVC 0x0400
+#define DIV_HEVC 0x0600
+#define DIV_STAT_HEVC 0x0700
+#define ENABLE_ACLK_HEVC 0x0800
+#define ENABLE_ACLK_HEVC_SECURE_SMMU_HEVC 0x0804
+#define ENABLE_PCLK_HEVC 0x0900
+#define ENABLE_PCLK_HEVC_SECURE_SMMU_HEVC 0x0904
+#define ENABLE_IP_HEVC0 0x0b00
+#define ENABLE_IP_HEVC1 0x0b04
+#define ENABLE_IP_HEVC_SECURE_SMMU_HEVC 0x0b08
+
+static unsigned long hevc_clk_regs[] __initdata = {
+ MUX_SEL_HEVC,
+ MUX_ENABLE_HEVC,
+ MUX_STAT_HEVC,
+ DIV_HEVC,
+ DIV_STAT_HEVC,
+ ENABLE_ACLK_HEVC,
+ ENABLE_ACLK_HEVC_SECURE_SMMU_HEVC,
+ ENABLE_PCLK_HEVC,
+ ENABLE_PCLK_HEVC_SECURE_SMMU_HEVC,
+ ENABLE_IP_HEVC0,
+ ENABLE_IP_HEVC1,
+ ENABLE_IP_HEVC_SECURE_SMMU_HEVC,
+};
+
+PNAME(mout_aclk_hevc_400_user_p) = { "oscclk", "aclk_hevc_400", };
+
+static struct samsung_mux_clock hevc_mux_clks[] __initdata = {
+ /* MUX_SEL_HEVC */
+ MUX(CLK_MOUT_ACLK_HEVC_400_USER, "mout_aclk_hevc_400_user",
+ mout_aclk_hevc_400_user_p, MUX_SEL_HEVC, 0, 0),
+};
+
+static struct samsung_div_clock hevc_div_clks[] __initdata = {
+ /* DIV_HEVC */
+ DIV(CLK_DIV_PCLK_HEVC, "div_pclk_hevc", "mout_aclk_hevc_400_user",
+ DIV_HEVC, 0, 2),
+};
+
+static struct samsung_gate_clock hevc_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_HEVC */
+ GATE(CLK_ACLK_BTS_HEVC_1, "aclk_bts_hevc_1", "mout_aclk_hevc_400_user",
+ ENABLE_ACLK_HEVC, 6, 0, 0),
+ GATE(CLK_ACLK_BTS_HEVC_0, "aclk_bts_hevc_0", "mout_aclk_hevc_400_user",
+ ENABLE_ACLK_HEVC, 5, 0, 0),
+ GATE(CLK_ACLK_AHB2APB_HEVCP, "aclk_ahb2apb_hevcp", "div_pclk_hevc",
+ ENABLE_ACLK_HEVC, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_HEVCX, "aclk_xiu_hevcx", "mout_aclk_hevc_400_user",
+ ENABLE_ACLK_HEVC, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_HEVCNP_100, "aclk_hevcnp_100", "div_pclk_hevc",
+ ENABLE_ACLK_HEVC, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_HEVCND_400, "aclk_hevcnd_400", "mout_aclk_hevc_400_user",
+ ENABLE_ACLK_HEVC, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_HEVC, "aclk_hevc", "mout_aclk_hevc_400_user",
+ ENABLE_ACLK_HEVC, 0, 0, 0),
+
+ /* ENABLE_ACLK_HEVC_SECURE_SMMU_HEVC */
+ GATE(CLK_ACLK_SMMU_HEVC_1, "aclk_smmu_hevc_1",
+ "mout_aclk_hevc_400_user",
+ ENABLE_ACLK_HEVC_SECURE_SMMU_HEVC,
+ 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_HEVC_0, "aclk_smmu_hevc_0",
+ "mout_aclk_hevc_400_user",
+ ENABLE_ACLK_HEVC_SECURE_SMMU_HEVC,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_HEVC */
+ GATE(CLK_PCLK_BTS_HEVC_1, "pclk_bts_hevc_1", "div_pclk_hevc",
+ ENABLE_PCLK_HEVC, 4, 0, 0),
+ GATE(CLK_PCLK_BTS_HEVC_0, "pclk_bts_hevc_0", "div_pclk_hevc",
+ ENABLE_PCLK_HEVC, 3, 0, 0),
+ GATE(CLK_PCLK_PMU_HEVC, "pclk_pmu_hevc", "div_pclk_hevc",
+ ENABLE_PCLK_HEVC, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_HEVC, "pclk_sysreg_hevc", "div_pclk_hevc",
+ ENABLE_PCLK_HEVC, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_HEVC, "pclk_hevc", "div_pclk_hevc",
+ ENABLE_PCLK_HEVC, 4, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_HEVC_SECURE_SMMU_HEVC */
+ GATE(CLK_PCLK_SMMU_HEVC_1, "pclk_smmu_hevc_1", "div_pclk_hevc",
+ ENABLE_PCLK_HEVC_SECURE_SMMU_HEVC,
+ 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_HEVC_0, "pclk_smmu_hevc_0", "div_pclk_hevc",
+ ENABLE_PCLK_HEVC_SECURE_SMMU_HEVC,
+ 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static struct samsung_cmu_info hevc_cmu_info __initdata = {
+ .mux_clks = hevc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hevc_mux_clks),
+ .div_clks = hevc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(hevc_div_clks),
+ .gate_clks = hevc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(hevc_gate_clks),
+ .nr_clk_ids = HEVC_NR_CLK,
+ .clk_regs = hevc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hevc_clk_regs),
+};
+
+static void __init exynos5433_cmu_hevc_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &hevc_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_hevc, "samsung,exynos5433-cmu-hevc",
+ exynos5433_cmu_hevc_init);
+
+/*
+ * Register offset definitions for CMU_ISP
+ */
+#define MUX_SEL_ISP 0x0200
+#define MUX_ENABLE_ISP 0x0300
+#define MUX_STAT_ISP 0x0400
+#define DIV_ISP 0x0600
+#define DIV_STAT_ISP 0x0700
+#define ENABLE_ACLK_ISP0 0x0800
+#define ENABLE_ACLK_ISP1 0x0804
+#define ENABLE_ACLK_ISP2 0x0808
+#define ENABLE_PCLK_ISP 0x0900
+#define ENABLE_SCLK_ISP 0x0a00
+#define ENABLE_IP_ISP0 0x0b00
+#define ENABLE_IP_ISP1 0x0b04
+#define ENABLE_IP_ISP2 0x0b08
+#define ENABLE_IP_ISP3 0x0b0c
+
+static unsigned long isp_clk_regs[] __initdata = {
+ MUX_SEL_ISP,
+ MUX_ENABLE_ISP,
+ MUX_STAT_ISP,
+ DIV_ISP,
+ DIV_STAT_ISP,
+ ENABLE_ACLK_ISP0,
+ ENABLE_ACLK_ISP1,
+ ENABLE_ACLK_ISP2,
+ ENABLE_PCLK_ISP,
+ ENABLE_SCLK_ISP,
+ ENABLE_IP_ISP0,
+ ENABLE_IP_ISP1,
+ ENABLE_IP_ISP2,
+ ENABLE_IP_ISP3,
+};
+
+PNAME(mout_aclk_isp_dis_400_user_p) = { "oscclk", "aclk_isp_dis_400", };
+PNAME(mout_aclk_isp_400_user_p) = { "oscclk", "aclk_isp_400", };
+
+static struct samsung_mux_clock isp_mux_clks[] __initdata = {
+ /* MUX_SEL_ISP */
+ MUX(CLK_MOUT_ACLK_ISP_DIS_400_USER, "mout_aclk_isp_dis_400_user",
+ mout_aclk_isp_dis_400_user_p, MUX_SEL_ISP, 4, 0),
+ MUX(CLK_MOUT_ACLK_ISP_400_USER, "mout_aclk_isp_400_user",
+ mout_aclk_isp_400_user_p, MUX_SEL_ISP, 0, 0),
+};
+
+static struct samsung_div_clock isp_div_clks[] __initdata = {
+ /* DIV_ISP */
+ DIV(CLK_DIV_PCLK_ISP_DIS, "div_pclk_isp_dis",
+ "mout_aclk_isp_dis_400_user", DIV_ISP, 12, 3),
+ DIV(CLK_DIV_PCLK_ISP, "div_pclk_isp", "mout_aclk_isp_400_user",
+ DIV_ISP, 8, 3),
+ DIV(CLK_DIV_ACLK_ISP_D_200, "div_aclk_isp_d_200",
+ "mout_aclk_isp_400_user", DIV_ISP, 4, 3),
+ DIV(CLK_DIV_ACLK_ISP_C_200, "div_aclk_isp_c_200",
+ "mout_aclk_isp_400_user", DIV_ISP, 0, 3),
+};
+
+static struct samsung_gate_clock isp_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_ISP0 */
+ GATE(CLK_ACLK_ISP_D_GLUE, "aclk_isp_d_glue", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP0, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SCALERP, "aclk_scalerp", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP0, 5, 0, 0),
+ GATE(CLK_ACLK_3DNR, "aclk_3dnr", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP0, 4, 0, 0),
+ GATE(CLK_ACLK_DIS, "aclk_dis", "mout_aclk_isp_dis_400_user",
+ ENABLE_ACLK_ISP0, 3, 0, 0),
+ GATE(CLK_ACLK_SCALERC, "aclk_scalerc", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP0, 2, 0, 0),
+ GATE(CLK_ACLK_DRC, "aclk_drc", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP0, 1, 0, 0),
+ GATE(CLK_ACLK_ISP, "aclk_isp", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP0, 0, 0, 0),
+
+ /* ENABLE_ACLK_ISP1 */
+ GATE(CLK_ACLK_AXIUS_SCALERP, "aclk_axius_scalerp",
+ "mout_aclk_isp_400_user", ENABLE_ACLK_ISP1,
+ 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_SCALERC, "aclk_axius_scalerc",
+ "mout_aclk_isp_400_user", ENABLE_ACLK_ISP1,
+ 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_DRC, "aclk_axius_drc",
+ "mout_aclk_isp_400_user", ENABLE_ACLK_ISP1,
+ 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAHBM_ISP2P, "aclk_asyncahbm_isp2p",
+ "div_pclk_isp", ENABLE_ACLK_ISP1,
+ 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAHBM_ISP1P, "aclk_asyncahbm_isp1p",
+ "div_pclk_isp", ENABLE_ACLK_ISP1,
+ 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_DIS1, "aclk_asyncaxis_dis1",
+ "mout_aclk_isp_dis_400_user", ENABLE_ACLK_ISP1,
+ 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_DIS0, "aclk_asyncaxis_dis0",
+ "mout_aclk_isp_dis_400_user", ENABLE_ACLK_ISP1,
+ 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_DIS1, "aclk_asyncaxim_dis1",
+ "mout_aclk_isp_400_user", ENABLE_ACLK_ISP1,
+ 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_DIS0, "aclk_asyncaxim_dis0",
+ "mout_aclk_isp_400_user", ENABLE_ACLK_ISP1,
+ 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_ISP2P, "aclk_asyncaxim_isp2p",
+ "div_aclk_isp_d_200", ENABLE_ACLK_ISP1,
+ 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_ISP1P, "aclk_asyncaxim_isp1p",
+ "div_aclk_isp_c_200", ENABLE_ACLK_ISP1,
+ 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_ISP2P, "aclk_ahb2apb_isp2p", "div_pclk_isp",
+ ENABLE_ACLK_ISP1, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_ISP1P, "aclk_ahb2apb_isp1p", "div_pclk_isp",
+ ENABLE_ACLK_ISP1, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXI2APB_ISP2P, "aclk_axi2apb_isp2p",
+ "div_aclk_isp_d_200", ENABLE_ACLK_ISP1,
+ 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXI2APB_ISP1P, "aclk_axi2apb_isp1p",
+ "div_aclk_isp_c_200", ENABLE_ACLK_ISP1,
+ 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_ISPEX1, "aclk_xiu_ispex1", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP1, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_ISPEX0, "aclk_xiu_ispex0", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP1, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ISPND_400, "aclk_ispnd_400", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP1, 1, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_ACLK_ISP2 */
+ GATE(CLK_ACLK_SMMU_SCALERP, "aclk_smmu_scalerp",
+ "mout_aclk_isp_400_user", ENABLE_ACLK_ISP2,
+ 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_3DNR, "aclk_smmu_3dnr", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_DIS1, "aclk_smmu_dis1", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_DIS0, "aclk_smmu_dis0", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_SCALERC, "aclk_smmu_scalerc",
+ "mout_aclk_isp_400_user", ENABLE_ACLK_ISP2,
+ 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_DRC, "aclk_smmu_drc", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_ISP, "aclk_smmu_isp", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_SCALERP, "aclk_bts_scalerp",
+ "mout_aclk_isp_400_user", ENABLE_ACLK_ISP2,
+ 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_3DR, "aclk_bts_3dnr", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_DIS1, "aclk_bts_dis1", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_DIS0, "aclk_bts_dis0", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_SCALERC, "aclk_bts_scalerc",
+ "mout_aclk_isp_400_user", ENABLE_ACLK_ISP2,
+ 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_DRC, "aclk_bts_drc", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_ISP, "aclk_bts_isp", "mout_aclk_isp_400_user",
+ ENABLE_ACLK_ISP2, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_ISP */
+ GATE(CLK_PCLK_SMMU_SCALERP, "pclk_smmu_scalerp", "div_aclk_isp_d_200",
+ ENABLE_PCLK_ISP, 25, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_3DNR, "pclk_smmu_3dnr", "div_aclk_isp_d_200",
+ ENABLE_PCLK_ISP, 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_DIS1, "pclk_smmu_dis1", "div_aclk_isp_d_200",
+ ENABLE_PCLK_ISP, 23, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_DIS0, "pclk_smmu_dis0", "div_aclk_isp_d_200",
+ ENABLE_PCLK_ISP, 22, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_SCALERC, "pclk_smmu_scalerc", "div_aclk_isp_c_200",
+ ENABLE_PCLK_ISP, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_DRC, "pclk_smmu_drc", "div_aclk_isp_c_200",
+ ENABLE_PCLK_ISP, 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_ISP, "pclk_smmu_isp", "div_aclk_isp_c_200",
+ ENABLE_PCLK_ISP, 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_SCALERP, "pclk_bts_scalerp", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_3DNR, "pclk_bts_3dnr", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_DIS1, "pclk_bts_dis1", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_DIS0, "pclk_bts_dis0", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_SCALERC, "pclk_bts_scalerc", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_DRC, "pclk_bts_drc", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_ISP, "pclk_bts_isp", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_DIS1, "pclk_asyncaxi_dis1", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_DIS0, "pclk_asyncaxi_dis0", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PMU_ISP, "pclk_pmu_isp", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_ISP, "pclk_sysreg_isp", "div_pclk_isp",
+ ENABLE_PCLK_ISP, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_CMU_ISP_LOCAL, "pclk_cmu_isp_local",
+ "div_aclk_isp_c_200", ENABLE_PCLK_ISP,
+ 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SCALERP, "pclk_scalerp", "div_aclk_isp_d_200",
+ ENABLE_PCLK_ISP, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_3DNR, "pclk_3dnr", "div_aclk_isp_d_200",
+ ENABLE_PCLK_ISP, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_DIS_CORE, "pclk_dis_core", "div_pclk_isp_dis",
+ ENABLE_PCLK_ISP, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_DIS, "pclk_dis", "div_aclk_isp_d_200",
+ ENABLE_PCLK_ISP, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SCALERC, "pclk_scalerc", "div_aclk_isp_c_200",
+ ENABLE_PCLK_ISP, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_DRC, "pclk_drc", "div_aclk_isp_c_200",
+ ENABLE_PCLK_ISP, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP, "pclk_isp", "div_aclk_isp_c_200",
+ ENABLE_PCLK_ISP, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_ISP */
+ GATE(CLK_SCLK_PIXELASYNCS_DIS, "sclk_pixelasyncs_dis",
+ "mout_aclk_isp_dis_400_user", ENABLE_SCLK_ISP,
+ 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_PIXELASYNCM_DIS, "sclk_pixelasyncm_dis",
+ "mout_aclk_isp_dis_400_user", ENABLE_SCLK_ISP,
+ 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_PIXELASYNCS_SCALERP, "sclk_pixelasyncs_scalerp",
+ "mout_aclk_isp_400_user", ENABLE_SCLK_ISP,
+ 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_PIXELASYNCM_ISPD, "sclk_pixelasyncm_ispd",
+ "mout_aclk_isp_400_user", ENABLE_SCLK_ISP,
+ 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_PIXELASYNCS_ISPC, "sclk_pixelasyncs_ispc",
+ "mout_aclk_isp_400_user", ENABLE_SCLK_ISP,
+ 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SCLK_PIXELASYNCM_ISPC, "sclk_pixelasyncm_ispc",
+ "mout_aclk_isp_400_user", ENABLE_SCLK_ISP,
+ 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static struct samsung_cmu_info isp_cmu_info __initdata = {
+ .mux_clks = isp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(isp_mux_clks),
+ .div_clks = isp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(isp_div_clks),
+ .gate_clks = isp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(isp_gate_clks),
+ .nr_clk_ids = ISP_NR_CLK,
+ .clk_regs = isp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(isp_clk_regs),
+};
+
+static void __init exynos5433_cmu_isp_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &isp_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_isp, "samsung,exynos5433-cmu-isp",
+ exynos5433_cmu_isp_init);
+
+/*
+ * Register offset definitions for CMU_CAM0
+ */
+#define MUX_SEL_CAM00 0x0200
+#define MUX_SEL_CAM01 0x0204
+#define MUX_SEL_CAM02 0x0208
+#define MUX_SEL_CAM03 0x020c
+#define MUX_SEL_CAM04 0x0210
+#define MUX_ENABLE_CAM00 0x0300
+#define MUX_ENABLE_CAM01 0x0304
+#define MUX_ENABLE_CAM02 0x0308
+#define MUX_ENABLE_CAM03 0x030c
+#define MUX_ENABLE_CAM04 0x0310
+#define MUX_STAT_CAM00 0x0400
+#define MUX_STAT_CAM01 0x0404
+#define MUX_STAT_CAM02 0x0408
+#define MUX_STAT_CAM03 0x040c
+#define MUX_STAT_CAM04 0x0410
+#define MUX_IGNORE_CAM01 0x0504
+#define DIV_CAM00 0x0600
+#define DIV_CAM01 0x0604
+#define DIV_CAM02 0x0608
+#define DIV_CAM03 0x060c
+#define DIV_STAT_CAM00 0x0700
+#define DIV_STAT_CAM01 0x0704
+#define DIV_STAT_CAM02 0x0708
+#define DIV_STAT_CAM03 0x070c
+#define ENABLE_ACLK_CAM00 0X0800
+#define ENABLE_ACLK_CAM01 0X0804
+#define ENABLE_ACLK_CAM02 0X0808
+#define ENABLE_PCLK_CAM0 0X0900
+#define ENABLE_SCLK_CAM0 0X0a00
+#define ENABLE_IP_CAM00 0X0b00
+#define ENABLE_IP_CAM01 0X0b04
+#define ENABLE_IP_CAM02 0X0b08
+#define ENABLE_IP_CAM03 0X0b0C
+
+static unsigned long cam0_clk_regs[] __initdata = {
+ MUX_SEL_CAM00,
+ MUX_SEL_CAM01,
+ MUX_SEL_CAM02,
+ MUX_SEL_CAM03,
+ MUX_SEL_CAM04,
+ MUX_ENABLE_CAM00,
+ MUX_ENABLE_CAM01,
+ MUX_ENABLE_CAM02,
+ MUX_ENABLE_CAM03,
+ MUX_ENABLE_CAM04,
+ MUX_STAT_CAM00,
+ MUX_STAT_CAM01,
+ MUX_STAT_CAM02,
+ MUX_STAT_CAM03,
+ MUX_STAT_CAM04,
+ MUX_IGNORE_CAM01,
+ DIV_CAM00,
+ DIV_CAM01,
+ DIV_CAM02,
+ DIV_CAM03,
+ DIV_STAT_CAM00,
+ DIV_STAT_CAM01,
+ DIV_STAT_CAM02,
+ DIV_STAT_CAM03,
+ ENABLE_ACLK_CAM00,
+ ENABLE_ACLK_CAM01,
+ ENABLE_ACLK_CAM02,
+ ENABLE_PCLK_CAM0,
+ ENABLE_SCLK_CAM0,
+ ENABLE_IP_CAM00,
+ ENABLE_IP_CAM01,
+ ENABLE_IP_CAM02,
+ ENABLE_IP_CAM03,
+};
+PNAME(mout_aclk_cam0_333_user_p) = { "oscclk", "aclk_cam0_333", };
+PNAME(mout_aclk_cam0_400_user_p) = { "oscclk", "aclk_cam0_400", };
+PNAME(mout_aclk_cam0_552_user_p) = { "oscclk", "aclk_cam0_552", };
+
+PNAME(mout_phyclk_rxbyteclkhs0_s4_user_p) = { "oscclk",
+ "phyclk_rxbyteclkhs0_s4_phy", };
+PNAME(mout_phyclk_rxbyteclkhs0_s2a_user_p) = { "oscclk",
+ "phyclk_rxbyteclkhs0_s2a_phy", };
+
+PNAME(mout_aclk_lite_d_b_p) = { "mout_aclk_lite_d_a",
+ "mout_aclk_cam0_333_user", };
+PNAME(mout_aclk_lite_d_a_p) = { "mout_aclk_cam0_552_user",
+ "mout_aclk_cam0_400_user", };
+PNAME(mout_aclk_lite_b_b_p) = { "mout_aclk_lite_b_a",
+ "mout_aclk_cam0_333_user", };
+PNAME(mout_aclk_lite_b_a_p) = { "mout_aclk_cam0_552_user",
+ "mout_aclk_cam0_400_user", };
+PNAME(mout_aclk_lite_a_b_p) = { "mout_aclk_lite_a_a",
+ "mout_aclk_cam0_333_user", };
+PNAME(mout_aclk_lite_a_a_p) = { "mout_aclk_cam0_552_user",
+ "mout_aclk_cam0_400_user", };
+PNAME(mout_aclk_cam0_400_p) = { "mout_aclk_cam0_400_user",
+ "mout_aclk_cam0_333_user", };
+
+PNAME(mout_aclk_csis1_b_p) = { "mout_aclk_csis1_a",
+ "mout_aclk_cam0_333_user" };
+PNAME(mout_aclk_csis1_a_p) = { "mout_aclk_cam0_552_user",
+ "mout_aclk_cam0_400_user", };
+PNAME(mout_aclk_csis0_b_p) = { "mout_aclk_csis0_a",
+ "mout_aclk_cam0_333_user", };
+PNAME(mout_aclk_csis0_a_p) = { "mout_aclk_cam0_552_user",
+ "mout_aclk-cam0_400_user", };
+PNAME(mout_aclk_3aa1_b_p) = { "mout_aclk_3aa1_a",
+ "mout_aclk_cam0_333_user", };
+PNAME(mout_aclk_3aa1_a_p) = { "mout_aclk_cam0_552_user",
+ "mout_aclk_cam0_400_user", };
+PNAME(mout_aclk_3aa0_b_p) = { "mout_aclk_3aa0_a",
+ "mout_aclk_cam0_333_user", };
+PNAME(mout_aclk_3aa0_a_p) = { "mout_aclk_cam0_552_user",
+ "mout_aclk_cam0_400_user", };
+
+PNAME(mout_sclk_lite_freecnt_c_p) = { "mout_sclk_lite_freecnt_b",
+ "div_pclk_lite_d", };
+PNAME(mout_sclk_lite_freecnt_b_p) = { "mout_sclk_lite_freecnt_a",
+ "div_pclk_pixelasync_lite_c", };
+PNAME(mout_sclk_lite_freecnt_a_p) = { "div_pclk_lite_a",
+ "div_pclk_lite_b", };
+PNAME(mout_sclk_pixelasync_lite_c_b_p) = { "mout_sclk_pixelasync_lite_c_a",
+ "mout_aclk_cam0_333_user", };
+PNAME(mout_sclk_pixelasync_lite_c_a_p) = { "mout_aclk_cam0_552_user",
+ "mout_aclk_cam0_400_user", };
+PNAME(mout_sclk_pixelasync_lite_c_init_b_p) = {
+ "mout_sclk_pixelasync_lite_c_init_a",
+ "mout_aclk_cam0_400_user", };
+PNAME(mout_sclk_pixelasync_lite_c_init_a_p) = {
+ "mout_aclk_cam0_552_user",
+ "mout_aclk_cam0_400_user", };
+
+static struct samsung_fixed_rate_clock cam0_fixed_clks[] __initdata = {
+ FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY, "phyclk_rxbyteclkhs0_s4_phy",
+ NULL, CLK_IS_ROOT, 100000000),
+ FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY, "phyclk_rxbyteclkhs0_s2a_phy",
+ NULL, CLK_IS_ROOT, 100000000),
+};
+
+static struct samsung_mux_clock cam0_mux_clks[] __initdata = {
+ /* MUX_SEL_CAM00 */
+ MUX(CLK_MOUT_ACLK_CAM0_333_USER, "mout_aclk_cam0_333_user",
+ mout_aclk_cam0_333_user_p, MUX_SEL_CAM00, 8, 1),
+ MUX(CLK_MOUT_ACLK_CAM0_400_USER, "mout_aclk_cam0_400_user",
+ mout_aclk_cam0_400_user_p, MUX_SEL_CAM00, 4, 1),
+ MUX(CLK_MOUT_ACLK_CAM0_552_USER, "mout_aclk_cam0_552_user",
+ mout_aclk_cam0_552_user_p, MUX_SEL_CAM00, 0, 1),
+
+ /* MUX_SEL_CAM01 */
+ MUX(CLK_MOUT_PHYCLK_RXBYTECLKHS0_S4_USER,
+ "mout_phyclk_rxbyteclkhs0_s4_user",
+ mout_phyclk_rxbyteclkhs0_s4_user_p,
+ MUX_SEL_CAM01, 4, 1),
+ MUX(CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2A_USER,
+ "mout_phyclk_rxbyteclkhs0_s2a_user",
+ mout_phyclk_rxbyteclkhs0_s2a_user_p,
+ MUX_SEL_CAM01, 0, 1),
+
+ /* MUX_SEL_CAM02 */
+ MUX(CLK_MOUT_ACLK_LITE_D_B, "mout_aclk_lite_d_b", mout_aclk_lite_d_b_p,
+ MUX_SEL_CAM02, 24, 1),
+ MUX(CLK_MOUT_ACLK_LITE_D_A, "mout_aclk_lite_d_a", mout_aclk_lite_d_a_p,
+ MUX_SEL_CAM02, 20, 1),
+ MUX(CLK_MOUT_ACLK_LITE_B_B, "mout_aclk_lite_b_b", mout_aclk_lite_b_b_p,
+ MUX_SEL_CAM02, 16, 1),
+ MUX(CLK_MOUT_ACLK_LITE_B_A, "mout_aclk_lite_b_a", mout_aclk_lite_b_a_p,
+ MUX_SEL_CAM02, 12, 1),
+ MUX(CLK_MOUT_ACLK_LITE_A_B, "mout_aclk_lite_a_b", mout_aclk_lite_a_b_p,
+ MUX_SEL_CAM02, 8, 1),
+ MUX(CLK_MOUT_ACLK_LITE_A_A, "mout_aclk_lite_a_a", mout_aclk_lite_a_a_p,
+ MUX_SEL_CAM02, 4, 1),
+ MUX(CLK_MOUT_ACLK_CAM0_400, "mout_aclk_cam0_400", mout_aclk_cam0_400_p,
+ MUX_SEL_CAM02, 0, 1),
+
+ /* MUX_SEL_CAM03 */
+ MUX(CLK_MOUT_ACLK_CSIS1_B, "mout_aclk_csis1_b", mout_aclk_csis1_b_p,
+ MUX_SEL_CAM03, 28, 1),
+ MUX(CLK_MOUT_ACLK_CSIS1_A, "mout_aclk_csis1_a", mout_aclk_csis1_a_p,
+ MUX_SEL_CAM03, 24, 1),
+ MUX(CLK_MOUT_ACLK_CSIS0_B, "mout_aclk_csis0_b", mout_aclk_csis0_b_p,
+ MUX_SEL_CAM03, 20, 1),
+ MUX(CLK_MOUT_ACLK_CSIS0_A, "mout_aclk_csis0_a", mout_aclk_csis0_a_p,
+ MUX_SEL_CAM03, 16, 1),
+ MUX(CLK_MOUT_ACLK_3AA1_B, "mout_aclk_3aa1_b", mout_aclk_3aa1_b_p,
+ MUX_SEL_CAM03, 12, 1),
+ MUX(CLK_MOUT_ACLK_3AA1_A, "mout_aclk_3aa1_a", mout_aclk_3aa1_a_p,
+ MUX_SEL_CAM03, 8, 1),
+ MUX(CLK_MOUT_ACLK_3AA0_B, "mout_aclk_3aa0_b", mout_aclk_3aa0_b_p,
+ MUX_SEL_CAM03, 4, 1),
+ MUX(CLK_MOUT_ACLK_3AA0_A, "mout_aclk_3aa0_a", mout_aclk_3aa0_a_p,
+ MUX_SEL_CAM03, 0, 1),
+
+ /* MUX_SEL_CAM04 */
+ MUX(CLK_MOUT_SCLK_LITE_FREECNT_C, "mout_sclk_lite_freecnt_c",
+ mout_sclk_lite_freecnt_c_p, MUX_SEL_CAM04, 24, 1),
+ MUX(CLK_MOUT_SCLK_LITE_FREECNT_B, "mout_sclk_lite_freecnt_b",
+ mout_sclk_lite_freecnt_b_p, MUX_SEL_CAM04, 24, 1),
+ MUX(CLK_MOUT_SCLK_LITE_FREECNT_A, "mout_sclk_lite_freecnt_a",
+ mout_sclk_lite_freecnt_a_p, MUX_SEL_CAM04, 24, 1),
+ MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_B, "mout_sclk_pixelasync_lite_c_b",
+ mout_sclk_pixelasync_lite_c_b_p, MUX_SEL_CAM04, 24, 1),
+ MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_A, "mout_sclk_pixelasync_lite_c_a",
+ mout_sclk_pixelasync_lite_c_a_p, MUX_SEL_CAM04, 24, 1),
+ MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_B,
+ "mout_sclk_pixelasync_lite_c_init_b",
+ mout_sclk_pixelasync_lite_c_init_b_p,
+ MUX_SEL_CAM04, 24, 1),
+ MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_A,
+ "mout_sclk_pixelasync_lite_c_init_a",
+ mout_sclk_pixelasync_lite_c_init_a_p,
+ MUX_SEL_CAM04, 24, 1),
+};
+
+static struct samsung_div_clock cam0_div_clks[] __initdata = {
+ /* DIV_CAM00 */
+ DIV(CLK_DIV_PCLK_CAM0_50, "div_pclk_cam0_50", "div_aclk_cam0_200",
+ DIV_CAM00, 8, 2),
+ DIV(CLK_DIV_ACLK_CAM0_200, "div_aclk_cam0_200", "mout_aclk_cam0_400",
+ DIV_CAM00, 4, 3),
+ DIV(CLK_DIV_ACLK_CAM0_BUS_400, "div_aclk_cam0_bus_400",
+ "mout_aclk_cam0_400", DIV_CAM00, 0, 3),
+
+ /* DIV_CAM01 */
+ DIV(CLK_DIV_PCLK_LITE_D, "div_pclk_lite_d", "div_aclk_lite_d",
+ DIV_CAM01, 20, 2),
+ DIV(CLK_DIV_ACLK_LITE_D, "div_aclk_lite_d", "mout_aclk_lite_d_b",
+ DIV_CAM01, 16, 3),
+ DIV(CLK_DIV_PCLK_LITE_B, "div_pclk_lite_b", "div_aclk_lite_b",
+ DIV_CAM01, 12, 2),
+ DIV(CLK_DIV_ACLK_LITE_B, "div_aclk_lite_b", "mout_aclk_lite_b_b",
+ DIV_CAM01, 8, 3),
+ DIV(CLK_DIV_PCLK_LITE_A, "div_pclk_lite_a", "div_aclk_lite_a",
+ DIV_CAM01, 4, 2),
+ DIV(CLK_DIV_ACLK_LITE_A, "div_aclk_lite_a", "mout_aclk_lite_a_b",
+ DIV_CAM01, 0, 3),
+
+ /* DIV_CAM02 */
+ DIV(CLK_DIV_ACLK_CSIS1, "div_aclk_csis1", "mout_aclk_csis1_b",
+ DIV_CAM02, 20, 3),
+ DIV(CLK_DIV_ACLK_CSIS0, "div_aclk_csis0", "mout_aclk_csis0_b",
+ DIV_CAM02, 16, 3),
+ DIV(CLK_DIV_PCLK_3AA1, "div_pclk_3aa1", "div_aclk_3aa1",
+ DIV_CAM02, 12, 2),
+ DIV(CLK_DIV_ACLK_3AA1, "div_aclk_3aa1", "mout_aclk_3aa1_b",
+ DIV_CAM02, 8, 3),
+ DIV(CLK_DIV_PCLK_3AA0, "div_pclk_3aa0", "div_aclk_3aa0",
+ DIV_CAM02, 4, 2),
+ DIV(CLK_DIV_ACLK_3AA0, "div_aclk_3aa0", "mout_aclk_3aa0_b",
+ DIV_CAM02, 0, 3),
+
+ /* DIV_CAM03 */
+ DIV(CLK_DIV_SCLK_PIXELASYNC_LITE_C, "div_sclk_pixelasync_lite_c",
+ "mout_sclk_pixelasync_lite_c_b", DIV_CAM03, 8, 3),
+ DIV(CLK_DIV_PCLK_PIXELASYNC_LITE_C, "div_pclk_pixelasync_lite_c",
+ "div_sclk_pixelasync_lite_c_init", DIV_CAM03, 4, 2),
+ DIV(CLK_DIV_SCLK_PIXELASYNC_LITE_C_INIT,
+ "div_sclk_pixelasync_lite_c_init",
+ "mout_sclk_pixelasync_lite_c_init_b", DIV_CAM03, 0, 3),
+};
+
+static struct samsung_gate_clock cam0_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_CAM00 */
+ GATE(CLK_ACLK_CSIS1, "aclk_csis1", "div_aclk_csis1", ENABLE_ACLK_CAM00,
+ 6, 0, 0),
+ GATE(CLK_ACLK_CSIS0, "aclk_csis0", "div_aclk_csis0", ENABLE_ACLK_CAM00,
+ 5, 0, 0),
+ GATE(CLK_ACLK_3AA1, "aclk_3aa1", "div_aclk_3aa1", ENABLE_ACLK_CAM00,
+ 4, 0, 0),
+ GATE(CLK_ACLK_3AA0, "aclk_3aa0", "div_aclk_3aa0", ENABLE_ACLK_CAM00,
+ 3, 0, 0),
+ GATE(CLK_ACLK_LITE_D, "aclk_lite_d", "div_aclk_lite_d",
+ ENABLE_ACLK_CAM00, 2, 0, 0),
+ GATE(CLK_ACLK_LITE_B, "aclk_lite_b", "div_aclk_lite_b",
+ ENABLE_ACLK_CAM00, 1, 0, 0),
+ GATE(CLK_ACLK_LITE_A, "aclk_lite_a", "div_aclk_lite_a",
+ ENABLE_ACLK_CAM00, 0, 0, 0),
+
+ /* ENABLE_ACLK_CAM01 */
+ GATE(CLK_ACLK_AHBSYNCDN, "aclk_ahbsyncdn", "div_aclk_cam0_200",
+ ENABLE_ACLK_CAM01, 31, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_LITE_D, "aclk_axius_lite_d", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM01, 30, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_LITE_B, "aclk_axius_lite_b", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM01, 29, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_LITE_A, "aclk_axius_lite_a", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM01, 28, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBM_3AA1, "aclk_asyncapbm_3aa1", "div_pclk_3aa1",
+ ENABLE_ACLK_CAM01, 27, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBS_3AA1, "aclk_asyncapbs_3aa1", "div_aclk_3aa1",
+ ENABLE_ACLK_CAM01, 26, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBM_3AA0, "aclk_asyncapbm_3aa0", "div_pclk_3aa0",
+ ENABLE_ACLK_CAM01, 25, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBS_3AA0, "aclk_asyncapbs_3aa0", "div_aclk_3aa0",
+ ENABLE_ACLK_CAM01, 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBM_LITE_D, "aclk_asyncapbm_lite_d",
+ "div_pclk_lite_d", ENABLE_ACLK_CAM01,
+ 23, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBS_LITE_D, "aclk_asyncapbs_lite_d",
+ "div_aclk_cam0_200", ENABLE_ACLK_CAM01,
+ 22, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBM_LITE_B, "aclk_asyncapbm_lite_b",
+ "div_pclk_lite_b", ENABLE_ACLK_CAM01,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBS_LITE_B, "aclk_asyncapbs_lite_b",
+ "div_aclk_cam0_200", ENABLE_ACLK_CAM01,
+ 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBM_LITE_A, "aclk_asyncapbm_lite_a",
+ "div_pclk_lite_a", ENABLE_ACLK_CAM01,
+ 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBS_LITE_A, "aclk_asyncapbs_lite_a",
+ "div_aclk_cam0_200", ENABLE_ACLK_CAM01,
+ 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_ISP0P, "aclk_asyncaxim_isp0p",
+ "div_aclk_cam0_200", ENABLE_ACLK_CAM01,
+ 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_3AA1, "aclk_asyncaxim_3aa1",
+ "div_aclk_cam0_bus_400", ENABLE_ACLK_CAM01,
+ 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_3AA1, "aclk_asyncaxis_3aa1",
+ "div_aclk_3aa1", ENABLE_ACLK_CAM01,
+ 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_3AA0, "aclk_asyncaxim_3aa0",
+ "div_aclk_cam0_bus_400", ENABLE_ACLK_CAM01,
+ 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_3AA0, "aclk_asyncaxis_3aa0",
+ "div_aclk_3aa0", ENABLE_ACLK_CAM01,
+ 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_LITE_D, "aclk_asyncaxim_lite_d",
+ "div_aclk_cam0_bus_400", ENABLE_ACLK_CAM01,
+ 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_LITE_D, "aclk_asyncaxis_lite_d",
+ "div_aclk_lite_d", ENABLE_ACLK_CAM01,
+ 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_LITE_B, "aclk_asyncaxim_lite_b",
+ "div_aclk_cam0_bus_400", ENABLE_ACLK_CAM01,
+ 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_LITE_B, "aclk_asyncaxis_lite_b",
+ "div_aclk_lite_b", ENABLE_ACLK_CAM01,
+ 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_LITE_A, "aclk_asyncaxim_lite_a",
+ "div_aclk_cam0_bus_400", ENABLE_ACLK_CAM01,
+ 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_LITE_A, "aclk_asyncaxis_lite_a",
+ "div_aclk_lite_a", ENABLE_ACLK_CAM01,
+ 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_ISPSFRP, "aclk_ahb2apb_ispsfrp",
+ "div_pclk_cam0_50", ENABLE_ACLK_CAM01,
+ 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXI2APB_ISP0P, "aclk_axi2apb_isp0p", "div_aclk_cam0_200",
+ ENABLE_ACLK_CAM01, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXI2AHB_ISP0P, "aclk_axi2ahb_isp0p", "div_aclk_cam0_200",
+ ENABLE_ACLK_CAM01, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_IS0X, "aclk_xiu_is0x", "div_aclk_cam0_200",
+ ENABLE_ACLK_CAM01, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_ISP0EX, "aclk_xiu_isp0ex", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM01, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM0NP_276, "aclk_cam0np_276", "div_aclk_cam0_200",
+ ENABLE_ACLK_CAM01, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM0ND_400, "aclk_cam0nd_400", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM01, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_ACLK_CAM02 */
+ GATE(CLK_ACLK_SMMU_3AA1, "aclk_smmu_3aa1", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_3AA0, "aclk_smmu_3aa0", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_LITE_D, "aclk_smmu_lite_d", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_LITE_B, "aclk_smmu_lite_b", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_LITE_A, "aclk_smmu_lite_a", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_3AA1, "aclk_bts_3aa1", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_3AA0, "aclk_bts_3aa0", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_LITE_D, "aclk_bts_lite_d", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_LITE_B, "aclk_bts_lite_b", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_LITE_A, "aclk_bts_lite_a", "div_aclk_cam0_bus_400",
+ ENABLE_ACLK_CAM02, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_CAM0 */
+ GATE(CLK_PCLK_SMMU_3AA1, "pclk_smmu_3aa1", "div_aclk_cam0_200",
+ ENABLE_PCLK_CAM0, 25, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_3AA0, "pclk_smmu_3aa0", "div_aclk_cam0_200",
+ ENABLE_PCLK_CAM0, 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_LITE_D, "pclk_smmu_lite_d", "div_aclk_cam0_200",
+ ENABLE_PCLK_CAM0, 23, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_LITE_B, "pclk_smmu_lite_b", "div_aclk_cam0_200",
+ ENABLE_PCLK_CAM0, 22, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_LITE_A, "pclk_smmu_lite_a", "div_aclk_cam0_200",
+ ENABLE_PCLK_CAM0, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_3AA1, "pclk_bts_3aa1", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_3AA0, "pclk_bts_3aa0", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_LITE_D, "pclk_bts_lite_d", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_LITE_B, "pclk_bts_lite_b", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_LITE_A, "pclk_bts_lite_a", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_CAM1, "pclk_asyncaxi_cam1", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_3AA1, "pclk_asyncaxi_3aa1", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_3AA0, "pclk_asyncaxi_3aa0", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_LITE_D, "pclk_asyncaxi_lite_d",
+ "div_pclk_cam0_50", ENABLE_PCLK_CAM0,
+ 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_LITE_B, "pclk_asyncaxi_lite_b",
+ "div_pclk_cam0_50", ENABLE_PCLK_CAM0,
+ 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXI_LITE_A, "pclk_asyncaxi_lite_a",
+ "div_pclk_cam0_50", ENABLE_PCLK_CAM0,
+ 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PMU_CAM0, "pclk_pmu_cam0", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_CAM0, "pclk_sysreg_cam0", "div_pclk_cam0_50",
+ ENABLE_PCLK_CAM0, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_CMU_CAM0_LOCAL, "pclk_cmu_cam0_local",
+ "div_aclk_cam0_200", ENABLE_PCLK_CAM0,
+ 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_CSIS1, "pclk_csis1", "div_aclk_cam0_200",
+ ENABLE_PCLK_CAM0, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_CSIS0, "pclk_csis0", "div_aclk_cam0_200",
+ ENABLE_PCLK_CAM0, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_3AA1, "pclk_3aa1", "div_pclk_3aa1",
+ ENABLE_PCLK_CAM0, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_3AA0, "pclk_3aa0", "div_pclk_3aa0",
+ ENABLE_PCLK_CAM0, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_LITE_D, "pclk_lite_d", "div_pclk_lite_d",
+ ENABLE_PCLK_CAM0, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_LITE_B, "pclk_lite_b", "div_pclk_lite_b",
+ ENABLE_PCLK_CAM0, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_LITE_A, "pclk_lite_a", "div_pclk_lite_a",
+ ENABLE_PCLK_CAM0, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_CAM0 */
+ GATE(CLK_PHYCLK_RXBYTECLKHS0_S4, "phyclk_rxbyteclkhs0_s4",
+ "mout_phyclk_rxbyteclkhs0_s4_user",
+ ENABLE_SCLK_CAM0, 8, 0, 0),
+ GATE(CLK_PHYCLK_RXBYTECLKHS0_S2A, "phyclk_rxbyteclkhs0_s2a",
+ "mout_phyclk_rxbyteclkhs0_s2a_user",
+ ENABLE_SCLK_CAM0, 7, 0, 0),
+ GATE(CLK_SCLK_LITE_FREECNT, "sclk_lite_freecnt",
+ "mout_sclk_lite_freecnt_c", ENABLE_SCLK_CAM0, 6, 0, 0),
+ GATE(CLK_SCLK_PIXELASYNCM_3AA1, "sclk_pixelasycm_3aa1",
+ "div_aclk_3aa1", ENABLE_SCLK_CAM0, 5, 0, 0),
+ GATE(CLK_SCLK_PIXELASYNCM_3AA0, "sclk_pixelasycm_3aa0",
+ "div_aclk_3aa0", ENABLE_SCLK_CAM0, 4, 0, 0),
+ GATE(CLK_SCLK_PIXELASYNCS_3AA0, "sclk_pixelasycs_3aa0",
+ "div_aclk_3aa0", ENABLE_SCLK_CAM0, 3, 0, 0),
+ GATE(CLK_SCLK_PIXELASYNCM_LITE_C, "sclk_pixelasyncm_lite_c",
+ "div_sclk_pixelasync_lite_c",
+ ENABLE_SCLK_CAM0, 2, 0, 0),
+ GATE(CLK_SCLK_PIXELASYNCM_LITE_C_INIT, "sclk_pixelasyncm_lite_c_init",
+ "div_sclk_pixelasync_lite_c_init",
+ ENABLE_SCLK_CAM0, 1, 0, 0),
+ GATE(CLK_SCLK_PIXELASYNCS_LITE_C_INIT, "sclk_pixelasyncs_lite_c_init",
+ "div_sclk_pixelasync_lite_c",
+ ENABLE_SCLK_CAM0, 0, 0, 0),
+};
+
+static struct samsung_cmu_info cam0_cmu_info __initdata = {
+ .mux_clks = cam0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cam0_mux_clks),
+ .div_clks = cam0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cam0_div_clks),
+ .gate_clks = cam0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cam0_gate_clks),
+ .fixed_clks = cam0_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(cam0_fixed_clks),
+ .nr_clk_ids = CAM0_NR_CLK,
+ .clk_regs = cam0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cam0_clk_regs),
+};
+
+static void __init exynos5433_cmu_cam0_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &cam0_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_cam0, "samsung,exynos5433-cmu-cam0",
+ exynos5433_cmu_cam0_init);
+
+/*
+ * Register offset definitions for CMU_CAM1
+ */
+#define MUX_SEL_CAM10 0x0200
+#define MUX_SEL_CAM11 0x0204
+#define MUX_SEL_CAM12 0x0208
+#define MUX_ENABLE_CAM10 0x0300
+#define MUX_ENABLE_CAM11 0x0304
+#define MUX_ENABLE_CAM12 0x0308
+#define MUX_STAT_CAM10 0x0400
+#define MUX_STAT_CAM11 0x0404
+#define MUX_STAT_CAM12 0x0408
+#define MUX_IGNORE_CAM11 0x0504
+#define DIV_CAM10 0x0600
+#define DIV_CAM11 0x0604
+#define DIV_STAT_CAM10 0x0700
+#define DIV_STAT_CAM11 0x0704
+#define ENABLE_ACLK_CAM10 0X0800
+#define ENABLE_ACLK_CAM11 0X0804
+#define ENABLE_ACLK_CAM12 0X0808
+#define ENABLE_PCLK_CAM1 0X0900
+#define ENABLE_SCLK_CAM1 0X0a00
+#define ENABLE_IP_CAM10 0X0b00
+#define ENABLE_IP_CAM11 0X0b04
+#define ENABLE_IP_CAM12 0X0b08
+
+static unsigned long cam1_clk_regs[] __initdata = {
+ MUX_SEL_CAM10,
+ MUX_SEL_CAM11,
+ MUX_SEL_CAM12,
+ MUX_ENABLE_CAM10,
+ MUX_ENABLE_CAM11,
+ MUX_ENABLE_CAM12,
+ MUX_STAT_CAM10,
+ MUX_STAT_CAM11,
+ MUX_STAT_CAM12,
+ MUX_IGNORE_CAM11,
+ DIV_CAM10,
+ DIV_CAM11,
+ DIV_STAT_CAM10,
+ DIV_STAT_CAM11,
+ ENABLE_ACLK_CAM10,
+ ENABLE_ACLK_CAM11,
+ ENABLE_ACLK_CAM12,
+ ENABLE_PCLK_CAM1,
+ ENABLE_SCLK_CAM1,
+ ENABLE_IP_CAM10,
+ ENABLE_IP_CAM11,
+ ENABLE_IP_CAM12,
+};
+
+PNAME(mout_sclk_isp_uart_user_p) = { "oscclk", "sclk_isp_uart_cam1", };
+PNAME(mout_sclk_isp_spi1_user_p) = { "oscclk", "sclk_isp_spi1_cam1", };
+PNAME(mout_sclk_isp_spi0_user_p) = { "oscclk", "sclk_isp_spi0_cam1", };
+
+PNAME(mout_aclk_cam1_333_user_p) = { "oscclk", "aclk_cam1_333", };
+PNAME(mout_aclk_cam1_400_user_p) = { "oscclk", "aclk_cam1_400", };
+PNAME(mout_aclk_cam1_552_user_p) = { "oscclk", "aclk_cam1_552", };
+
+PNAME(mout_phyclk_rxbyteclkhs0_s2b_user_p) = { "oscclk",
+ "phyclk_rxbyteclkhs0_s2b_phy", };
+
+PNAME(mout_aclk_csis2_b_p) = { "mout_aclk_csis2_a",
+ "mout_aclk_cam1_333_user", };
+PNAME(mout_aclk_csis2_a_p) = { "mout_aclk_cam1_552_user",
+ "mout_aclk_cam1_400_user", };
+
+PNAME(mout_aclk_fd_b_p) = { "mout_aclk_fd_a",
+ "mout_aclk_cam1_333_user", };
+PNAME(mout_aclk_fd_a_p) = { "mout_aclk_cam1_552_user",
+ "mout_aclk_cam1_400_user", };
+
+PNAME(mout_aclk_lite_c_b_p) = { "mout_aclk_lite_c_a",
+ "mout_aclk_cam1_333_user", };
+PNAME(mout_aclk_lite_c_a_p) = { "mout_aclk_cam1_552_user",
+ "mout_aclk_cam1_400_user", };
+
+static struct samsung_fixed_rate_clock cam1_fixed_clks[] __initdata = {
+ FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2B, "phyclk_rxbyteclkhs0_s2b_phy", NULL,
+ CLK_IS_ROOT, 100000000),
+};
+
+static struct samsung_mux_clock cam1_mux_clks[] __initdata = {
+ /* MUX_SEL_CAM10 */
+ MUX(CLK_MOUT_SCLK_ISP_UART_USER, "mout_sclk_isp_uart_user",
+ mout_sclk_isp_uart_user_p, MUX_SEL_CAM10, 20, 1),
+ MUX(CLK_MOUT_SCLK_ISP_SPI1_USER, "mout_sclk_isp_spi1_user",
+ mout_sclk_isp_spi1_user_p, MUX_SEL_CAM10, 16, 1),
+ MUX(CLK_MOUT_SCLK_ISP_SPI0_USER, "mout_sclk_isp_spi0_user",
+ mout_sclk_isp_spi0_user_p, MUX_SEL_CAM10, 12, 1),
+ MUX(CLK_MOUT_ACLK_CAM1_333_USER, "mout_aclk_cam1_333_user",
+ mout_aclk_cam1_333_user_p, MUX_SEL_CAM10, 8, 1),
+ MUX(CLK_MOUT_ACLK_CAM1_400_USER, "mout_aclk_cam1_400_user",
+ mout_aclk_cam1_400_user_p, MUX_SEL_CAM01, 4, 1),
+ MUX(CLK_MOUT_ACLK_CAM1_552_USER, "mout_aclk_cam1_552_user",
+ mout_aclk_cam1_552_user_p, MUX_SEL_CAM01, 0, 1),
+
+ /* MUX_SEL_CAM11 */
+ MUX(CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2B_USER,
+ "mout_phyclk_rxbyteclkhs0_s2b_user",
+ mout_phyclk_rxbyteclkhs0_s2b_user_p,
+ MUX_SEL_CAM11, 0, 1),
+
+ /* MUX_SEL_CAM12 */
+ MUX(CLK_MOUT_ACLK_CSIS2_B, "mout_aclk_csis2_b", mout_aclk_csis2_b_p,
+ MUX_SEL_CAM12, 20, 1),
+ MUX(CLK_MOUT_ACLK_CSIS2_A, "mout_aclk_csis2_a", mout_aclk_csis2_a_p,
+ MUX_SEL_CAM12, 16, 1),
+ MUX(CLK_MOUT_ACLK_FD_B, "mout_aclk_fd_b", mout_aclk_fd_b_p,
+ MUX_SEL_CAM12, 12, 1),
+ MUX(CLK_MOUT_ACLK_FD_A, "mout_aclk_fd_a", mout_aclk_fd_a_p,
+ MUX_SEL_CAM12, 8, 1),
+ MUX(CLK_MOUT_ACLK_LITE_C_B, "mout_aclk_lite_c_b", mout_aclk_lite_c_b_p,
+ MUX_SEL_CAM12, 4, 1),
+ MUX(CLK_MOUT_ACLK_LITE_C_A, "mout_aclk_lite_c_a", mout_aclk_lite_c_a_p,
+ MUX_SEL_CAM12, 0, 1),
+};
+
+static struct samsung_div_clock cam1_div_clks[] __initdata = {
+ /* DIV_CAM10 */
+ DIV(CLK_DIV_SCLK_ISP_WPWM, "div_sclk_isp_wpwm",
+ "div_pclk_cam1_83", DIV_CAM10, 16, 2),
+ DIV(CLK_DIV_PCLK_CAM1_83, "div_pclk_cam1_83",
+ "mout_aclk_cam1_333_user", DIV_CAM10, 12, 2),
+ DIV(CLK_DIV_PCLK_CAM1_166, "div_pclk_cam1_166",
+ "mout_aclk_cam1_333_user", DIV_CAM10, 8, 2),
+ DIV(CLK_DIV_PCLK_DBG_CAM1, "div_pclk_dbg_cam1",
+ "mout_aclk_cam1_552_user", DIV_CAM10, 4, 3),
+ DIV(CLK_DIV_ATCLK_CAM1, "div_atclk_cam1", "mout_aclk_cam1_552_user",
+ DIV_CAM10, 0, 3),
+
+ /* DIV_CAM11 */
+ DIV(CLK_DIV_ACLK_CSIS2, "div_aclk_csis2", "mout_aclk_csis2_b",
+ DIV_CAM11, 16, 3),
+ DIV(CLK_DIV_PCLK_FD, "div_pclk_fd", "div_aclk_fd", DIV_CAM11, 12, 2),
+ DIV(CLK_DIV_ACLK_FD, "div_aclk_fd", "mout_aclk_fd_b", DIV_CAM11, 8, 3),
+ DIV(CLK_DIV_PCLK_LITE_C, "div_pclk_lite_c", "div_aclk_lite_c",
+ DIV_CAM11, 4, 2),
+ DIV(CLK_DIV_ACLK_LITE_C, "div_aclk_lite_c", "mout_aclk_lite_c_b",
+ DIV_CAM11, 0, 3),
+};
+
+static struct samsung_gate_clock cam1_gate_clks[] __initdata = {
+ /* ENABLE_ACLK_CAM10 */
+ GATE(CLK_ACLK_ISP_GIC, "aclk_isp_gic", "mout_aclk_cam1_333_user",
+ ENABLE_ACLK_CAM10, 4, 0, 0),
+ GATE(CLK_ACLK_FD, "aclk_fd", "div_aclk_fd",
+ ENABLE_ACLK_CAM10, 3, 0, 0),
+ GATE(CLK_ACLK_LITE_C, "aclk_lite_c", "div_aclk_lite_c",
+ ENABLE_ACLK_CAM10, 1, 0, 0),
+ GATE(CLK_ACLK_CSIS2, "aclk_csis2", "div_aclk_csis2",
+ ENABLE_ACLK_CAM10, 0, 0, 0),
+
+ /* ENABLE_ACLK_CAM11 */
+ GATE(CLK_ACLK_ASYNCAPBM_FD, "aclk_asyncapbm_fd", "div_pclk_fd",
+ ENABLE_ACLK_CAM11, 29, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBS_FD, "aclk_asyncapbs_fd", "div_pclk_cam1_166",
+ ENABLE_ACLK_CAM11, 28, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBM_LITE_C, "aclk_asyncapbm_lite_c",
+ "div_pclk_lite_c", ENABLE_ACLK_CAM11,
+ 27, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAPBS_LITE_C, "aclk_asyncapbs_lite_c",
+ "div_pclk_cam1_166", ENABLE_ACLK_CAM11,
+ 26, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAHBS_SFRISP2H2, "aclk_asyncahbs_sfrisp2h2",
+ "div_pclk_cam1_83", ENABLE_ACLK_CAM11,
+ 25, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAHBS_SFRISP2H1, "aclk_asyncahbs_sfrisp2h1",
+ "div_pclk_cam1_83", ENABLE_ACLK_CAM11,
+ 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_CA5, "aclk_asyncaxim_ca5",
+ "mout_aclk_cam1_333_user", ENABLE_ACLK_CAM11,
+ 23, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_CA5, "aclk_asyncaxis_ca5",
+ "mout_aclk_cam1_552_user", ENABLE_ACLK_CAM11,
+ 22, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_ISPX2, "aclk_asyncaxis_ispx2",
+ "mout_aclk_cam1_333_user", ENABLE_ACLK_CAM11,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_ISPX1, "aclk_asyncaxis_ispx1",
+ "mout_aclk_cam1_333_user", ENABLE_ACLK_CAM11,
+ 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_ISPX0, "aclk_asyncaxis_ispx0",
+ "mout_aclk_cam1_333_user", ENABLE_ACLK_CAM11,
+ 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_ISPEX, "aclk_asyncaxim_ispex",
+ "mout_aclk_cam1_400_user", ENABLE_ACLK_CAM11,
+ 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_ISP3P, "aclk_asyncaxim_isp3p",
+ "mout_aclk_cam1_400_user", ENABLE_ACLK_CAM11,
+ 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_ISP3P, "aclk_asyncaxis_isp3p",
+ "mout_aclk_cam1_333_user", ENABLE_ACLK_CAM11,
+ 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_FD, "aclk_asyncaxim_fd",
+ "mout_aclk_cam1_400_user", ENABLE_ACLK_CAM11,
+ 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_FD, "aclk_asyncaxis_fd", "div_aclk_fd",
+ ENABLE_ACLK_CAM11, 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIM_LITE_C, "aclk_asyncaxim_lite_c",
+ "mout_aclk_cam1_400_user", ENABLE_ACLK_CAM11,
+ 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_ASYNCAXIS_LITE_C, "aclk_asyncaxis_lite_c",
+ "div_aclk_lite_c", ENABLE_ACLK_CAM11,
+ 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_ISP5P, "aclk_ahb2apb_isp5p", "div_pclk_cam1_83",
+ ENABLE_ACLK_CAM11, 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB2APB_ISP3P, "aclk_ahb2apb_isp3p", "div_pclk_cam1_83",
+ ENABLE_ACLK_CAM11, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXI2APB_ISP3P, "aclk_axi2apb_isp3p",
+ "mout_aclk_cam1_333_user", ENABLE_ACLK_CAM11,
+ 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHB_SFRISP2H, "aclk_ahb_sfrisp2h", "div_pclk_cam1_83",
+ ENABLE_ACLK_CAM11, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXI_ISP_HX_R, "aclk_axi_isp_hx_r", "div_pclk_cam1_166",
+ ENABLE_ACLK_CAM11, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXI_ISP_CX_R, "aclk_axi_isp_cx_r", "div_pclk_cam1_166",
+ ENABLE_ACLK_CAM11, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXI_ISP_HX, "aclk_axi_isp_hx", "mout_aclk_cam1_333_user",
+ ENABLE_ACLK_CAM11, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXI_ISP_CX, "aclk_axi_isp_cx", "mout_aclk_cam1_333_user",
+ ENABLE_ACLK_CAM11, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_ISPX, "aclk_xiu_ispx", "mout_aclk_cam1_333_user",
+ ENABLE_ACLK_CAM11, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_XIU_ISPEX, "aclk_xiu_ispex", "mout_aclk_cam1_400_user",
+ ENABLE_ACLK_CAM11, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM1NP_333, "aclk_cam1np_333", "mout_aclk_cam1_333_user",
+ ENABLE_ACLK_CAM11, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_CAM1ND_400, "aclk_cam1nd_400", "mout_aclk_cam1_400_user",
+ ENABLE_ACLK_CAM11, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_ACLK_CAM12 */
+ GATE(CLK_ACLK_SMMU_ISPCPU, "aclk_smmu_ispcpu",
+ "mout_aclk_cam1_400_user", ENABLE_ACLK_CAM12,
+ 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_FD, "aclk_smmu_fd", "mout_aclk_cam1_400_user",
+ ENABLE_ACLK_CAM12, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_SMMU_LITE_C, "aclk_smmu_lite_c",
+ "mout_aclk_cam1_400_user", ENABLE_ACLK_CAM12,
+ 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_ISP3P, "aclk_bts_isp3p", "mout_aclk_cam1_400_user",
+ ENABLE_ACLK_CAM12, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_FD, "aclk_bts_fd", "mout_aclk_cam1_400_user",
+ ENABLE_ACLK_CAM12, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_BTS_LITE_C, "aclk_bts_lite_c", "mout_aclk_cam1_400_user",
+ ENABLE_ACLK_CAM12, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHBDN_SFRISP2H, "aclk_ahbdn_sfrisp2h",
+ "mout_aclk_cam1_333_user", ENABLE_ACLK_CAM12,
+ 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AHBDN_ISP5P, "aclk_aclk-shbdn_isp5p",
+ "mout_aclk_cam1_333_user", ENABLE_ACLK_CAM12,
+ 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_ISP3P, "aclk_axius_isp3p",
+ "mout_aclk_cam1_400_user", ENABLE_ACLK_CAM12,
+ 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_FD, "aclk_axius_fd", "mout_aclk_cam1_400_user",
+ ENABLE_ACLK_CAM12, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ACLK_AXIUS_LITE_C, "aclk_axius_lite_c",
+ "mout_aclk_cam1_400_user", ENABLE_ACLK_CAM12,
+ 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_CAM1 */
+ GATE(CLK_PCLK_SMMU_ISPCPU, "pclk_smmu_ispcpu", "div_pclk_cam1_166",
+ ENABLE_PCLK_CAM1, 27, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_FD, "pclk_smmu_fd", "div_pclk_cam1_166",
+ ENABLE_PCLK_CAM1, 26, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SMMU_LITE_C, "pclk_smmu_lite_c", "div_pclk_cam1_166",
+ ENABLE_PCLK_CAM1, 25, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_ISP3P, "pclk_bts_isp3p", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_FD, "pclk_bts_fd", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 23, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_BTS_LITE_C, "pclk_bts_lite_c", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 22, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXIM_CA5, "pclk_asyncaxim_ca5", "div_pclk_cam1_166",
+ ENABLE_PCLK_CAM1, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXIM_ISPEX, "pclk_asyncaxim_ispex",
+ "div_pclk_cam1_83", ENABLE_PCLK_CAM1,
+ 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXIM_ISP3P, "pclk_asyncaxim_isp3p",
+ "div_pclk_cam1_83", ENABLE_PCLK_CAM1,
+ 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXIM_FD, "pclk_asyncaxim_fd", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ASYNCAXIM_LITE_C, "pclk_asyncaxim_lite_c",
+ "div_pclk_cam1_83", ENABLE_PCLK_CAM1,
+ 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_PMU_CAM1, "pclk_pmu_cam1", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_SYSREG_CAM1, "pclk_sysreg_cam1", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 15, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_CMU_CAM1_LOCAL, "pclk_cmu_cam1_local",
+ "div_pclk_cam1_166", ENABLE_PCLK_CAM1,
+ 14, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_MCTADC, "pclk_isp_mctadc", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 13, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_WDT, "pclk_isp_wdt", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 12, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_PWM, "pclk_isp_pwm", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 11, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_UART, "pclk_isp_uart", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 10, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_MCUCTL, "pclk_isp_mcuctl", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_SPI1, "pclk_isp_spi1", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 8, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_SPI0, "pclk_isp_spi0", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_I2C2, "pclk_isp_i2c2", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_I2C1, "pclk_isp_i2c1", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_I2C0, "pclk_isp_i2c0", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_ISP_MPWM, "pclk_isp_wpwm", "div_pclk_cam1_83",
+ ENABLE_PCLK_CAM1, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_FD, "pclk_fd", "div_pclk_fd",
+ ENABLE_PCLK_CAM1, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_LITE_C, "pclk_lite_c", "div_pclk_lite_c",
+ ENABLE_PCLK_CAM1, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PCLK_CSIS2, "pclk_csis2", "div_pclk_cam1_166",
+ ENABLE_PCLK_CAM1, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_SCLK_CAM1 */
+ GATE(CLK_SCLK_ISP_I2C2, "sclk_isp_i2c2", "oscclk", ENABLE_SCLK_CAM1,
+ 15, 0, 0),
+ GATE(CLK_SCLK_ISP_I2C1, "sclk_isp_i2c1", "oscclk", ENABLE_SCLK_CAM1,
+ 14, 0, 0),
+ GATE(CLK_SCLK_ISP_I2C0, "sclk_isp_i2c0", "oscclk", ENABLE_SCLK_CAM1,
+ 13, 0, 0),
+ GATE(CLK_SCLK_ISP_PWM, "sclk_isp_pwm", "oscclk", ENABLE_SCLK_CAM1,
+ 12, 0, 0),
+ GATE(CLK_PHYCLK_RXBYTECLKHS0_S2B, "phyclk_rxbyteclkhs0_s2b",
+ "mout_phyclk_rxbyteclkhs0_s2b_user",
+ ENABLE_SCLK_CAM1, 11, 0, 0),
+ GATE(CLK_SCLK_LITE_C_FREECNT, "sclk_lite_c_freecnt", "div_pclk_lite_c",
+ ENABLE_SCLK_CAM1, 10, 0, 0),
+ GATE(CLK_SCLK_PIXELASYNCM_FD, "sclk_pixelasyncm_fd", "div_aclk_fd",
+ ENABLE_SCLK_CAM1, 9, 0, 0),
+ GATE(CLK_SCLK_ISP_MCTADC, "sclk_isp_mctadc", "sclk_isp_mctadc_cam1",
+ ENABLE_SCLK_CAM1, 7, 0, 0),
+ GATE(CLK_SCLK_ISP_UART, "sclk_isp_uart", "mout_sclk_isp_uart_user",
+ ENABLE_SCLK_CAM1, 6, 0, 0),
+ GATE(CLK_SCLK_ISP_SPI1, "sclk_isp_spi1", "mout_sclk_isp_spi1_user",
+ ENABLE_SCLK_CAM1, 5, 0, 0),
+ GATE(CLK_SCLK_ISP_SPI0, "sclk_isp_spi0", "mout_sclk_isp_spi0_user",
+ ENABLE_SCLK_CAM1, 4, 0, 0),
+ GATE(CLK_SCLK_ISP_MPWM, "sclk_isp_wpwm", "div_sclk_isp_wpwm",
+ ENABLE_SCLK_CAM1, 3, 0, 0),
+ GATE(CLK_PCLK_DBG_ISP, "sclk_dbg_isp", "div_pclk_dbg_cam1",
+ ENABLE_SCLK_CAM1, 2, 0, 0),
+ GATE(CLK_ATCLK_ISP, "atclk_isp", "div_atclk_cam1",
+ ENABLE_SCLK_CAM1, 1, 0, 0),
+ GATE(CLK_SCLK_ISP_CA5, "sclk_isp_ca5", "mout_aclk_cam1_552_user",
+ ENABLE_SCLK_CAM1, 0, 0, 0),
+};
+
+static struct samsung_cmu_info cam1_cmu_info __initdata = {
+ .mux_clks = cam1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cam1_mux_clks),
+ .div_clks = cam1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cam1_div_clks),
+ .gate_clks = cam1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cam1_gate_clks),
+ .fixed_clks = cam1_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(cam1_fixed_clks),
+ .nr_clk_ids = CAM1_NR_CLK,
+ .clk_regs = cam1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cam1_clk_regs),
+};
+
+static void __init exynos5433_cmu_cam1_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &cam1_cmu_info);
+}
+CLK_OF_DECLARE(exynos5433_cmu_cam1, "samsung,exynos5433-cmu-cam1",
+ exynos5433_cmu_cam1_init);
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index d270a2084644..e668e479a697 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -169,44 +169,44 @@ static inline void s5pv210_clk_sleep_init(void) { }
#endif
/* Mux parent lists. */
-static const char *fin_pll_p[] __initconst = {
+static const char *fin_pll_p[] __initdata = {
"xxti",
"xusbxti"
};
-static const char *mout_apll_p[] __initconst = {
+static const char *mout_apll_p[] __initdata = {
"fin_pll",
"fout_apll"
};
-static const char *mout_mpll_p[] __initconst = {
+static const char *mout_mpll_p[] __initdata = {
"fin_pll",
"fout_mpll"
};
-static const char *mout_epll_p[] __initconst = {
+static const char *mout_epll_p[] __initdata = {
"fin_pll",
"fout_epll"
};
-static const char *mout_vpllsrc_p[] __initconst = {
+static const char *mout_vpllsrc_p[] __initdata = {
"fin_pll",
"sclk_hdmi27m"
};
-static const char *mout_vpll_p[] __initconst = {
+static const char *mout_vpll_p[] __initdata = {
"mout_vpllsrc",
"fout_vpll"
};
-static const char *mout_group1_p[] __initconst = {
+static const char *mout_group1_p[] __initdata = {
"dout_a2m",
"mout_mpll",
"mout_epll",
"mout_vpll"
};
-static const char *mout_group2_p[] __initconst = {
+static const char *mout_group2_p[] __initdata = {
"xxti",
"xusbxti",
"sclk_hdmi27m",
@@ -218,7 +218,7 @@ static const char *mout_group2_p[] __initconst = {
"mout_vpll",
};
-static const char *mout_audio0_p[] __initconst = {
+static const char *mout_audio0_p[] __initdata = {
"xxti",
"pcmcdclk0",
"sclk_hdmi27m",
@@ -230,7 +230,7 @@ static const char *mout_audio0_p[] __initconst = {
"mout_vpll",
};
-static const char *mout_audio1_p[] __initconst = {
+static const char *mout_audio1_p[] __initdata = {
"i2scdclk1",
"pcmcdclk1",
"sclk_hdmi27m",
@@ -242,7 +242,7 @@ static const char *mout_audio1_p[] __initconst = {
"mout_vpll",
};
-static const char *mout_audio2_p[] __initconst = {
+static const char *mout_audio2_p[] __initdata = {
"i2scdclk2",
"pcmcdclk2",
"sclk_hdmi27m",
@@ -254,63 +254,63 @@ static const char *mout_audio2_p[] __initconst = {
"mout_vpll",
};
-static const char *mout_spdif_p[] __initconst = {
+static const char *mout_spdif_p[] __initdata = {
"dout_audio0",
"dout_audio1",
"dout_audio3",
};
-static const char *mout_group3_p[] __initconst = {
+static const char *mout_group3_p[] __initdata = {
"mout_apll",
"mout_mpll"
};
-static const char *mout_group4_p[] __initconst = {
+static const char *mout_group4_p[] __initdata = {
"mout_mpll",
"dout_a2m"
};
-static const char *mout_flash_p[] __initconst = {
+static const char *mout_flash_p[] __initdata = {
"dout_hclkd",
"dout_hclkp"
};
-static const char *mout_dac_p[] __initconst = {
+static const char *mout_dac_p[] __initdata = {
"mout_vpll",
"sclk_hdmiphy"
};
-static const char *mout_hdmi_p[] __initconst = {
+static const char *mout_hdmi_p[] __initdata = {
"sclk_hdmiphy",
"dout_tblk"
};
-static const char *mout_mixer_p[] __initconst = {
+static const char *mout_mixer_p[] __initdata = {
"mout_dac",
"mout_hdmi"
};
-static const char *mout_vpll_6442_p[] __initconst = {
+static const char *mout_vpll_6442_p[] __initdata = {
"fin_pll",
"fout_vpll"
};
-static const char *mout_mixer_6442_p[] __initconst = {
+static const char *mout_mixer_6442_p[] __initdata = {
"mout_vpll",
"dout_mixer"
};
-static const char *mout_d0sync_6442_p[] __initconst = {
+static const char *mout_d0sync_6442_p[] __initdata = {
"mout_dsys",
"div_apll"
};
-static const char *mout_d1sync_6442_p[] __initconst = {
+static const char *mout_d1sync_6442_p[] __initdata = {
"mout_psys",
"div_apll"
};
-static const char *mout_group2_6442_p[] __initconst = {
+static const char *mout_group2_6442_p[] __initdata = {
"fin_pll",
"none",
"none",
@@ -322,7 +322,7 @@ static const char *mout_group2_6442_p[] __initconst = {
"mout_vpll",
};
-static const char *mout_audio0_6442_p[] __initconst = {
+static const char *mout_audio0_6442_p[] __initdata = {
"fin_pll",
"pcmcdclk0",
"none",
@@ -334,7 +334,7 @@ static const char *mout_audio0_6442_p[] __initconst = {
"mout_vpll",
};
-static const char *mout_audio1_6442_p[] __initconst = {
+static const char *mout_audio1_6442_p[] __initdata = {
"i2scdclk1",
"pcmcdclk1",
"none",
@@ -347,7 +347,7 @@ static const char *mout_audio1_6442_p[] __initconst = {
"fin_pll",
};
-static const char *mout_clksel_p[] __initconst = {
+static const char *mout_clksel_p[] __initdata = {
"fout_apll_clkout",
"fout_mpll_clkout",
"fout_epll",
@@ -370,7 +370,7 @@ static const char *mout_clksel_p[] __initconst = {
"div_dclk"
};
-static const char *mout_clksel_6442_p[] __initconst = {
+static const char *mout_clksel_6442_p[] __initdata = {
"fout_apll_clkout",
"fout_mpll_clkout",
"fout_epll",
@@ -393,7 +393,7 @@ static const char *mout_clksel_6442_p[] __initconst = {
"div_dclk"
};
-static const char *mout_clkout_p[] __initconst = {
+static const char *mout_clkout_p[] __initdata = {
"dout_clkout",
"none",
"xxti",
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile
index 0689d7fb2666..97c71c885e4f 100644
--- a/drivers/clk/shmobile/Makefile
+++ b/drivers/clk/shmobile/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o
obj-$(CONFIG_ARCH_R8A73A4) += clk-r8a73a4.o
obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o
+obj-$(CONFIG_ARCH_R8A7778) += clk-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o
obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o
obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o
diff --git a/drivers/clk/shmobile/clk-r8a7778.c b/drivers/clk/shmobile/clk-r8a7778.c
new file mode 100644
index 000000000000..cb33b57274bf
--- /dev/null
+++ b/drivers/clk/shmobile/clk-r8a7778.c
@@ -0,0 +1,143 @@
+/*
+ * r8a7778 Core CPG Clocks
+ *
+ * Copyright (C) 2014 Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/shmobile.h>
+#include <linux/of_address.h>
+
+struct r8a7778_cpg {
+ struct clk_onecell_data data;
+ spinlock_t lock;
+ void __iomem *reg;
+};
+
+/* PLL multipliers per bits 11, 12, and 18 of MODEMR */
+struct {
+ unsigned long plla_mult;
+ unsigned long pllb_mult;
+} r8a7778_rates[] __initdata = {
+ [0] = { 21, 21 },
+ [1] = { 24, 24 },
+ [2] = { 28, 28 },
+ [3] = { 32, 32 },
+ [5] = { 24, 21 },
+ [6] = { 28, 21 },
+ [7] = { 32, 24 },
+};
+
+/* Clock dividers per bits 1 and 2 of MODEMR */
+struct {
+ const char *name;
+ unsigned int div[4];
+} r8a7778_divs[6] __initdata = {
+ { "b", { 12, 12, 16, 18 } },
+ { "out", { 12, 12, 16, 18 } },
+ { "p", { 16, 12, 16, 12 } },
+ { "s", { 4, 3, 4, 3 } },
+ { "s1", { 8, 6, 8, 6 } },
+};
+
+static u32 cpg_mode_rates __initdata;
+static u32 cpg_mode_divs __initdata;
+
+static struct clk * __init
+r8a7778_cpg_register_clock(struct device_node *np, struct r8a7778_cpg *cpg,
+ const char *name)
+{
+ if (!strcmp(name, "plla")) {
+ return clk_register_fixed_factor(NULL, "plla",
+ of_clk_get_parent_name(np, 0), 0,
+ r8a7778_rates[cpg_mode_rates].plla_mult, 1);
+ } else if (!strcmp(name, "pllb")) {
+ return clk_register_fixed_factor(NULL, "pllb",
+ of_clk_get_parent_name(np, 0), 0,
+ r8a7778_rates[cpg_mode_rates].pllb_mult, 1);
+ } else {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(r8a7778_divs); i++) {
+ if (!strcmp(name, r8a7778_divs[i].name)) {
+ return clk_register_fixed_factor(NULL,
+ r8a7778_divs[i].name,
+ "plla", 0, 1,
+ r8a7778_divs[i].div[cpg_mode_divs]);
+ }
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+
+static void __init r8a7778_cpg_clocks_init(struct device_node *np)
+{
+ struct r8a7778_cpg *cpg;
+ struct clk **clks;
+ unsigned int i;
+ int num_clks;
+
+ num_clks = of_property_count_strings(np, "clock-output-names");
+ if (num_clks < 0) {
+ pr_err("%s: failed to count clocks\n", __func__);
+ return;
+ }
+
+ cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
+ if (cpg == NULL || clks == NULL) {
+ /* We're leaking memory on purpose, there's no point in cleaning
+ * up as the system won't boot anyway.
+ */
+ return;
+ }
+
+ spin_lock_init(&cpg->lock);
+
+ cpg->data.clks = clks;
+ cpg->data.clk_num = num_clks;
+
+ cpg->reg = of_iomap(np, 0);
+ if (WARN_ON(cpg->reg == NULL))
+ return;
+
+ for (i = 0; i < num_clks; ++i) {
+ const char *name;
+ struct clk *clk;
+
+ of_property_read_string_index(np, "clock-output-names", i,
+ &name);
+
+ clk = r8a7778_cpg_register_clock(np, cpg, name);
+ if (IS_ERR(clk))
+ pr_err("%s: failed to register %s %s clock (%ld)\n",
+ __func__, np->name, name, PTR_ERR(clk));
+ else
+ cpg->data.clks[i] = clk;
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+}
+
+CLK_OF_DECLARE(r8a7778_cpg_clks, "renesas,r8a7778-cpg-clocks",
+ r8a7778_cpg_clocks_init);
+
+void __init r8a7778_clocks_init(u32 mode)
+{
+ BUG_ON(!(mode & BIT(19)));
+
+ cpg_mode_rates = (!!(mode & BIT(18)) << 2) |
+ (!!(mode & BIT(12)) << 1) |
+ (!!(mode & BIT(11)));
+ cpg_mode_divs = (!!(mode & BIT(2)) << 1) |
+ (!!(mode & BIT(1)));
+
+ of_clk_init(NULL);
+}
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index af94ed82cfcb..a917c4c7eaa9 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -1057,7 +1057,7 @@ static struct clk * __init st_clk_register_quadfs_fsynth(
return clk;
}
-static struct of_device_id quadfs_of_match[] = {
+static const struct of_device_id quadfs_of_match[] = {
{
.compatible = "st,stih416-quadfs216",
.data = &st_fs216c65_416
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 9a15ec344a85..fdcff10f6d30 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -341,7 +341,7 @@ static struct clkgena_divmux_data st_divmux_c32odf3 = {
.fb_start_bit_idx = 24,
};
-static struct of_device_id clkgena_divmux_of_match[] = {
+static const struct of_device_id clkgena_divmux_of_match[] = {
{
.compatible = "st,clkgena-divmux-c65-hs",
.data = &st_divmux_c65hs,
@@ -479,7 +479,7 @@ static struct clkgena_prediv_data prediv_c32_data = {
.table = prediv_table16,
};
-static struct of_device_id clkgena_prediv_of_match[] = {
+static const struct of_device_id clkgena_prediv_of_match[] = {
{ .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data },
{ .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data },
{}
@@ -586,7 +586,7 @@ static struct clkgen_mux_data stih407_a9_mux_data = {
.width = 2,
};
-static struct of_device_id mux_of_match[] = {
+static const struct of_device_id mux_of_match[] = {
{
.compatible = "st,stih416-clkgenc-vcc-hd",
.data = &clkgen_mux_c_vcc_hd_416,
@@ -693,7 +693,7 @@ static struct clkgen_vcc_data st_clkgenf_vcc_416 = {
.lock = &clkgenf_lock,
};
-static struct of_device_id vcc_of_match[] = {
+static const struct of_device_id vcc_of_match[] = {
{ .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 },
{ .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 },
{}
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 29769d79e306..d204ba85db3a 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -593,7 +593,7 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
return clk;
}
-static struct of_device_id c32_pll_of_match[] = {
+static const struct of_device_id c32_pll_of_match[] = {
{
.compatible = "st,plls-c32-a1x-0",
.data = &st_pll3200c32_a1x_0,
@@ -708,7 +708,7 @@ err:
}
CLK_OF_DECLARE(clkgen_c32_pll, "st,clkgen-plls-c32", clkgen_c32_pll_setup);
-static struct of_device_id c32_gpu_pll_of_match[] = {
+static const struct of_device_id c32_gpu_pll_of_match[] = {
{
.compatible = "st,stih415-gpu-pll-c32",
.data = &st_pll1200c32_gpu_415,
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index 3a5292e3fcf8..058f273d6154 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -9,6 +9,7 @@ obj-y += clk-mod0.o
obj-y += clk-sun8i-mbus.o
obj-y += clk-sun9i-core.o
obj-y += clk-sun9i-mmc.o
+obj-y += clk-usb.o
obj-$(CONFIG_MFD_SUN6I_PRCM) += \
clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 379324eb5486..7e1e2bd189b6 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -482,6 +482,45 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
}
/**
+ * sun5i_a13_get_ahb_factors() - calculates m, p factors for AHB
+ * AHB rate is calculated as follows
+ * rate = parent_rate >> p
+ */
+
+static void sun5i_a13_get_ahb_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u32 div;
+
+ /* divide only */
+ if (parent_rate < *freq)
+ *freq = parent_rate;
+
+ /*
+ * user manual says valid speed is 8k ~ 276M, but tests show it
+ * can work at speeds up to 300M, just after reparenting to pll6
+ */
+ if (*freq < 8000)
+ *freq = 8000;
+ if (*freq > 300000000)
+ *freq = 300000000;
+
+ div = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+
+ /* p = 0 ~ 3 */
+ if (div > 3)
+ div = 3;
+
+ *freq = parent_rate >> div;
+
+ /* we were called to round the frequency, we can now return */
+ if (p == NULL)
+ return;
+
+ *p = div;
+}
+
+/**
* sun4i_get_apb1_factors() - calculates m, p factors for APB1
* APB1 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
@@ -616,6 +655,11 @@ static struct clk_factors_config sun6i_a31_pll6_config = {
.n_start = 1,
};
+static struct clk_factors_config sun5i_a13_ahb_config = {
+ .pshift = 4,
+ .pwidth = 2,
+};
+
static struct clk_factors_config sun4i_apb1_config = {
.mshift = 0,
.mwidth = 5,
@@ -676,6 +720,13 @@ static const struct factors_data sun6i_a31_pll6_data __initconst = {
.name = "pll6x2",
};
+static const struct factors_data sun5i_a13_ahb_data __initconst = {
+ .mux = 6,
+ .muxmask = BIT(1) | BIT(0),
+ .table = &sun5i_a13_ahb_config,
+ .getter = sun5i_a13_get_ahb_factors,
+};
+
static const struct factors_data sun4i_apb1_data __initconst = {
.mux = 24,
.muxmask = BIT(1) | BIT(0),
@@ -838,59 +889,6 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
/**
- * sunxi_gates_reset... - reset bits in leaf gate clk registers handling
- */
-
-struct gates_reset_data {
- void __iomem *reg;
- spinlock_t *lock;
- struct reset_controller_dev rcdev;
-};
-
-static int sunxi_gates_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct gates_reset_data *data = container_of(rcdev,
- struct gates_reset_data,
- rcdev);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(data->lock, flags);
-
- reg = readl(data->reg);
- writel(reg & ~BIT(id), data->reg);
-
- spin_unlock_irqrestore(data->lock, flags);
-
- return 0;
-}
-
-static int sunxi_gates_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct gates_reset_data *data = container_of(rcdev,
- struct gates_reset_data,
- rcdev);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(data->lock, flags);
-
- reg = readl(data->reg);
- writel(reg | BIT(id), data->reg);
-
- spin_unlock_irqrestore(data->lock, flags);
-
- return 0;
-}
-
-static struct reset_control_ops sunxi_gates_reset_ops = {
- .assert = sunxi_gates_reset_assert,
- .deassert = sunxi_gates_reset_deassert,
-};
-
-/**
* sunxi_gates_clk_setup() - Setup function for leaf gates on clocks
*/
@@ -898,7 +896,6 @@ static struct reset_control_ops sunxi_gates_reset_ops = {
struct gates_data {
DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
- u32 reset_mask;
};
static const struct gates_data sun4i_axi_gates_data __initconst = {
@@ -997,26 +994,10 @@ static const struct gates_data sun8i_a23_apb2_gates_data __initconst = {
.mask = {0x1F0007},
};
-static const struct gates_data sun4i_a10_usb_gates_data __initconst = {
- .mask = {0x1C0},
- .reset_mask = 0x07,
-};
-
-static const struct gates_data sun5i_a13_usb_gates_data __initconst = {
- .mask = {0x140},
- .reset_mask = 0x03,
-};
-
-static const struct gates_data sun6i_a31_usb_gates_data __initconst = {
- .mask = { BIT(18) | BIT(17) | BIT(16) | BIT(10) | BIT(9) | BIT(8) },
- .reset_mask = BIT(2) | BIT(1) | BIT(0),
-};
-
static void __init sunxi_gates_clk_setup(struct device_node *node,
struct gates_data *data)
{
struct clk_onecell_data *clk_data;
- struct gates_reset_data *reset_data;
const char *clk_parent;
const char *clk_name;
void __iomem *reg;
@@ -1057,21 +1038,6 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
clk_data->clk_num = i;
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
-
- /* Register a reset controler for gates with reset bits */
- if (data->reset_mask == 0)
- return;
-
- reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
- if (!reset_data)
- return;
-
- reset_data->reg = reg;
- reset_data->lock = &clk_lock;
- reset_data->rcdev.nr_resets = __fls(data->reset_mask) + 1;
- reset_data->rcdev.ops = &sunxi_gates_reset_ops;
- reset_data->rcdev.of_node = node;
- reset_controller_register(&reset_data->rcdev);
}
@@ -1080,13 +1046,20 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
* sunxi_divs_clk_setup() helper data
*/
-#define SUNXI_DIVS_MAX_QTY 2
+#define SUNXI_DIVS_MAX_QTY 4
#define SUNXI_DIVISOR_WIDTH 2
struct divs_data {
const struct factors_data *factors; /* data for the factor clock */
- int ndivs; /* number of children */
+ int ndivs; /* number of outputs */
+ /*
+ * List of outputs. Refer to the diagram for sunxi_divs_clk_setup():
+ * self or base factor clock refers to the output from the pll
+ * itself. The remaining refer to fixed or configurable divider
+ * outputs.
+ */
struct {
+ u8 self; /* is it the base factor clock? (only one) */
u8 fixed; /* is it a fixed divisor? if not... */
struct clk_div_table *table; /* is it a table based divisor? */
u8 shift; /* otherwise it's a normal divisor with this shift */
@@ -1109,23 +1082,27 @@ static const struct divs_data pll5_divs_data __initconst = {
.div = {
{ .shift = 0, .pow = 0, }, /* M, DDR */
{ .shift = 16, .pow = 1, }, /* P, other */
+ /* No output for the base factor clock */
}
};
static const struct divs_data pll6_divs_data __initconst = {
.factors = &sun4i_pll6_data,
- .ndivs = 2,
+ .ndivs = 4,
.div = {
{ .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
{ .fixed = 2 }, /* P, other */
+ { .self = 1 }, /* base factor clock, 2x */
+ { .fixed = 4 }, /* pll6 / 4, used as ahb input */
}
};
static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
.factors = &sun6i_a31_pll6_data,
- .ndivs = 1,
+ .ndivs = 2,
.div = {
{ .fixed = 2 }, /* normal output */
+ { .self = 1 }, /* base factor clock, 2x */
}
};
@@ -1156,6 +1133,10 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
int ndivs = SUNXI_DIVS_MAX_QTY, i = 0;
int flags, clkflags;
+ /* if number of children known, use it */
+ if (data->ndivs)
+ ndivs = data->ndivs;
+
/* Set up factor clock that we will be dividing */
pclk = sunxi_factors_clk_setup(node, data->factors);
parent = __clk_get_name(pclk);
@@ -1166,7 +1147,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
if (!clk_data)
return;
- clks = kzalloc((SUNXI_DIVS_MAX_QTY+1) * sizeof(*clks), GFP_KERNEL);
+ clks = kcalloc(ndivs, sizeof(*clks), GFP_KERNEL);
if (!clks)
goto free_clkdata;
@@ -1176,15 +1157,17 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
* our RAM clock! */
clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT;
- /* if number of children known, use it */
- if (data->ndivs)
- ndivs = data->ndivs;
-
for (i = 0; i < ndivs; i++) {
if (of_property_read_string_index(node, "clock-output-names",
i, &clk_name) != 0)
break;
+ /* If this is the base factor clock, only update clks */
+ if (data->div[i].self) {
+ clk_data->clks[i] = pclk;
+ continue;
+ }
+
gate_hw = NULL;
rate_hw = NULL;
rate_ops = NULL;
@@ -1243,9 +1226,6 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
clk_register_clkdev(clks[i], clk_name, NULL);
}
- /* The last clock available on the getter is the parent */
- clks[i++] = pclk;
-
/* Adjust to the real max */
clk_data->clk_num = i;
@@ -1269,6 +1249,7 @@ static const struct of_device_id clk_factors_match[] __initconst = {
{.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
{.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
{.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
+ {.compatible = "allwinner,sun5i-a13-ahb-clk", .data = &sun5i_a13_ahb_data,},
{.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
{.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
{}
@@ -1324,9 +1305,6 @@ static const struct of_device_id clk_gates_match[] __initconst = {
{.compatible = "allwinner,sun9i-a80-apb1-gates-clk", .data = &sun9i_a80_apb1_gates_data,},
{.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
{.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,},
- {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
- {.compatible = "allwinner,sun5i-a13-usb-clk", .data = &sun5i_a13_usb_gates_data,},
- {.compatible = "allwinner,sun6i-a31-usb-clk", .data = &sun6i_a31_usb_gates_data,},
{}
};
@@ -1348,15 +1326,15 @@ static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
{
unsigned int i;
+ /* Register divided output clocks */
+ of_sunxi_table_clock_setup(clk_divs_match, sunxi_divs_clk_setup);
+
/* Register factor clocks */
of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
/* Register divider clocks */
of_sunxi_table_clock_setup(clk_div_match, sunxi_divider_clk_setup);
- /* Register divided output clocks */
- of_sunxi_table_clock_setup(clk_divs_match, sunxi_divs_clk_setup);
-
/* Register mux clocks */
of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
@@ -1385,6 +1363,7 @@ static void __init sun4i_a10_init_clocks(struct device_node *node)
CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks);
static const char *sun5i_critical_clocks[] __initdata = {
+ "cpu",
"pll5_ddr",
"ahb_sdram",
};
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
new file mode 100644
index 000000000000..a86ed2f8d7af
--- /dev/null
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2013-2015 Emilio López
+ *
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+
+/**
+ * sunxi_usb_reset... - reset bits in usb clk registers handling
+ */
+
+struct usb_reset_data {
+ void __iomem *reg;
+ spinlock_t *lock;
+ struct clk *clk;
+ struct reset_controller_dev rcdev;
+};
+
+static int sunxi_usb_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct usb_reset_data *data = container_of(rcdev,
+ struct usb_reset_data,
+ rcdev);
+ unsigned long flags;
+ u32 reg;
+
+ clk_prepare_enable(data->clk);
+ spin_lock_irqsave(data->lock, flags);
+
+ reg = readl(data->reg);
+ writel(reg & ~BIT(id), data->reg);
+
+ spin_unlock_irqrestore(data->lock, flags);
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int sunxi_usb_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct usb_reset_data *data = container_of(rcdev,
+ struct usb_reset_data,
+ rcdev);
+ unsigned long flags;
+ u32 reg;
+
+ clk_prepare_enable(data->clk);
+ spin_lock_irqsave(data->lock, flags);
+
+ reg = readl(data->reg);
+ writel(reg | BIT(id), data->reg);
+
+ spin_unlock_irqrestore(data->lock, flags);
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static struct reset_control_ops sunxi_usb_reset_ops = {
+ .assert = sunxi_usb_reset_assert,
+ .deassert = sunxi_usb_reset_deassert,
+};
+
+/**
+ * sunxi_usb_clk_setup() - Setup function for usb gate clocks
+ */
+
+#define SUNXI_USB_MAX_SIZE 32
+
+struct usb_clk_data {
+ u32 clk_mask;
+ u32 reset_mask;
+ bool reset_needs_clk;
+};
+
+static void __init sunxi_usb_clk_setup(struct device_node *node,
+ const struct usb_clk_data *data,
+ spinlock_t *lock)
+{
+ struct clk_onecell_data *clk_data;
+ struct usb_reset_data *reset_data;
+ const char *clk_parent;
+ const char *clk_name;
+ void __iomem *reg;
+ int qty;
+ int i = 0;
+ int j = 0;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg))
+ return;
+
+ clk_parent = of_clk_get_parent_name(node, 0);
+ if (!clk_parent)
+ return;
+
+ /* Worst-case size approximation and memory allocation */
+ qty = find_last_bit((unsigned long *)&data->clk_mask,
+ SUNXI_USB_MAX_SIZE);
+
+ clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clks = kzalloc((qty+1) * sizeof(struct clk *), GFP_KERNEL);
+ if (!clk_data->clks) {
+ kfree(clk_data);
+ return;
+ }
+
+ for_each_set_bit(i, (unsigned long *)&data->clk_mask,
+ SUNXI_USB_MAX_SIZE) {
+ of_property_read_string_index(node, "clock-output-names",
+ j, &clk_name);
+ clk_data->clks[i] = clk_register_gate(NULL, clk_name,
+ clk_parent, 0,
+ reg, i, 0, lock);
+ WARN_ON(IS_ERR(clk_data->clks[i]));
+
+ j++;
+ }
+
+ /* Adjust to the real max */
+ clk_data->clk_num = i;
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ /* Register a reset controller for usb with reset bits */
+ if (data->reset_mask == 0)
+ return;
+
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
+ if (!reset_data)
+ return;
+
+ if (data->reset_needs_clk) {
+ reset_data->clk = of_clk_get(node, 0);
+ if (IS_ERR(reset_data->clk)) {
+ pr_err("Could not get clock for reset controls\n");
+ kfree(reset_data);
+ return;
+ }
+ }
+
+ reset_data->reg = reg;
+ reset_data->lock = lock;
+ reset_data->rcdev.nr_resets = __fls(data->reset_mask) + 1;
+ reset_data->rcdev.ops = &sunxi_usb_reset_ops;
+ reset_data->rcdev.of_node = node;
+ reset_controller_register(&reset_data->rcdev);
+}
+
+static const struct usb_clk_data sun4i_a10_usb_clk_data __initconst = {
+ .clk_mask = BIT(8) | BIT(7) | BIT(6),
+ .reset_mask = BIT(2) | BIT(1) | BIT(0),
+};
+
+static DEFINE_SPINLOCK(sun4i_a10_usb_lock);
+
+static void __init sun4i_a10_usb_setup(struct device_node *node)
+{
+ sunxi_usb_clk_setup(node, &sun4i_a10_usb_clk_data, &sun4i_a10_usb_lock);
+}
+CLK_OF_DECLARE(sun4i_a10_usb, "allwinner,sun4i-a10-usb-clk", sun4i_a10_usb_setup);
+
+static const struct usb_clk_data sun5i_a13_usb_clk_data __initconst = {
+ .clk_mask = BIT(8) | BIT(6),
+ .reset_mask = BIT(1) | BIT(0),
+};
+
+static void __init sun5i_a13_usb_setup(struct device_node *node)
+{
+ sunxi_usb_clk_setup(node, &sun5i_a13_usb_clk_data, &sun4i_a10_usb_lock);
+}
+CLK_OF_DECLARE(sun5i_a13_usb, "allwinner,sun5i-a13-usb-clk", sun5i_a13_usb_setup);
+
+static const struct usb_clk_data sun6i_a31_usb_clk_data __initconst = {
+ .clk_mask = BIT(18) | BIT(17) | BIT(16) | BIT(10) | BIT(9) | BIT(8),
+ .reset_mask = BIT(2) | BIT(1) | BIT(0),
+};
+
+static void __init sun6i_a31_usb_setup(struct device_node *node)
+{
+ sunxi_usb_clk_setup(node, &sun6i_a31_usb_clk_data, &sun4i_a10_usb_lock);
+}
+CLK_OF_DECLARE(sun6i_a31_usb, "allwinner,sun6i-a31-usb-clk", sun6i_a31_usb_setup);
+
+static const struct usb_clk_data sun9i_a80_usb_mod_data __initconst = {
+ .clk_mask = BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1),
+ .reset_mask = BIT(19) | BIT(18) | BIT(17),
+ .reset_needs_clk = 1,
+};
+
+static DEFINE_SPINLOCK(a80_usb_mod_lock);
+
+static void __init sun9i_a80_usb_mod_setup(struct device_node *node)
+{
+ sunxi_usb_clk_setup(node, &sun9i_a80_usb_mod_data, &a80_usb_mod_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_usb_mod, "allwinner,sun9i-a80-usb-mod-clk", sun9i_a80_usb_mod_setup);
+
+static const struct usb_clk_data sun9i_a80_usb_phy_data __initconst = {
+ .clk_mask = BIT(10) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1),
+ .reset_mask = BIT(21) | BIT(20) | BIT(19) | BIT(18) | BIT(17),
+ .reset_needs_clk = 1,
+};
+
+static DEFINE_SPINLOCK(a80_usb_phy_lock);
+
+static void __init sun9i_a80_usb_phy_setup(struct device_node *node)
+{
+ sunxi_usb_clk_setup(node, &sun9i_a80_usb_phy_data, &a80_usb_phy_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_usb_phy, "allwinner,sun9i-a80-usb-phy-clk", sun9i_a80_usb_phy_setup);
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index bfef9abdf232..05c6d08a6695 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -981,7 +981,7 @@ static int clk_pllxc_set_rate(struct clk_hw *hw, unsigned long rate,
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg, old_cfg;
unsigned long flags = 0;
- int ret = 0;
+ int ret;
ret = _pll_ramp_calc_pll(hw, &cfg, rate, parent_rate);
if (ret < 0)
@@ -1005,7 +1005,7 @@ static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct tegra_clk_pll_freq_table cfg;
- int ret = 0, p_div;
+ int ret, p_div;
u64 output_rate = *prate;
ret = _pll_ramp_calc_pll(hw, &cfg, rate, *prate);
@@ -1073,7 +1073,7 @@ static int clk_pllc_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
- int ret = 0;
+ int ret;
unsigned long flags = 0;
if (pll->lock)
@@ -1223,6 +1223,7 @@ static long _pllre_calc_rate(struct tegra_clk_pll *pll,
return output_rate;
}
+
static int clk_pllre_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index f3b773833429..605676d368eb 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -30,13 +30,12 @@
#define OSC_CTRL_OSC_FREQ_SHIFT 28
#define OSC_CTRL_PLL_REF_DIV_SHIFT 26
-int __init tegra_osc_clk_init(void __iomem *clk_base,
- struct tegra_clk *tegra_clks,
- unsigned long *input_freqs, int num,
- unsigned long *osc_freq,
- unsigned long *pll_ref_freq)
+int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
+ unsigned long *input_freqs, unsigned int num,
+ unsigned int clk_m_div, unsigned long *osc_freq,
+ unsigned long *pll_ref_freq)
{
- struct clk *clk;
+ struct clk *clk, *osc;
struct clk **dt_clk;
u32 val, pll_ref_div;
unsigned osc_idx;
@@ -54,22 +53,25 @@ int __init tegra_osc_clk_init(void __iomem *clk_base,
return -EINVAL;
}
- dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, tegra_clks);
+ osc = clk_register_fixed_rate(NULL, "osc", NULL, CLK_IS_ROOT,
+ *osc_freq);
+
+ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, clks);
if (!dt_clk)
return 0;
- clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
- *osc_freq);
+ clk = clk_register_fixed_factor(NULL, "clk_m", "osc",
+ 0, 1, clk_m_div);
*dt_clk = clk;
/* pll_ref */
val = (val >> OSC_CTRL_PLL_REF_DIV_SHIFT) & 3;
pll_ref_div = 1 << val;
- dt_clk = tegra_lookup_dt_id(tegra_clk_pll_ref, tegra_clks);
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_ref, clks);
if (!dt_clk)
return 0;
- clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "osc",
0, 1, pll_ref_div);
*dt_clk = clk;
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index cef0727b9eec..46af9244ba74 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -218,7 +218,7 @@
.clk_id = _clk_id, \
.p.parent_name = _parent_name, \
.periph = TEGRA_CLK_PERIPH(0, 0, 0, 0, 0, 0, 0, \
- _clk_num, _gate_flags, 0, NULL), \
+ _clk_num, _gate_flags, NULL, NULL), \
.flags = _flags \
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index d0766423a5d6..8237d16b4075 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -940,36 +940,6 @@ static struct clk **clks;
static unsigned long osc_freq;
static unsigned long pll_ref_freq;
-static int __init tegra114_osc_clk_init(void __iomem *clk_base)
-{
- struct clk *clk;
- u32 val, pll_ref_div;
-
- val = readl_relaxed(clk_base + OSC_CTRL);
-
- osc_freq = tegra114_input_freq[val >> OSC_CTRL_OSC_FREQ_SHIFT];
- if (!osc_freq) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- /* clk_m */
- clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
- osc_freq);
- clks[TEGRA114_CLK_CLK_M] = clk;
-
- /* pll_ref */
- val = (val >> OSC_CTRL_PLL_REF_DIV_SHIFT) & 3;
- pll_ref_div = 1 << val;
- clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
- CLK_SET_RATE_PARENT, 1, pll_ref_div);
- clks[TEGRA114_CLK_PLL_REF] = clk;
-
- pll_ref_freq = osc_freq / pll_ref_div;
-
- return 0;
-}
-
static void __init tegra114_fixed_clk_init(void __iomem *clk_base)
{
struct clk *clk;
@@ -1263,6 +1233,7 @@ static void tegra114_wait_cpu_in_reset(u32 cpu)
cpu_relax();
} while (!(reg & (1 << cpu))); /* check CPU been reset or not */
}
+
static void tegra114_disable_cpu_clock(u32 cpu)
{
/* flow controller would take care in the power sequence. */
@@ -1351,7 +1322,6 @@ static void __init tegra114_clock_apply_init_table(void)
tegra_init_from_table(init_table, clks, TEGRA114_CLK_CLK_MAX);
}
-
/**
* tegra114_car_barrier - wait for pending writes to the CAR to complete
*
@@ -1505,7 +1475,9 @@ static void __init tegra114_clock_init(struct device_node *np)
if (!clks)
return;
- if (tegra114_osc_clk_init(clk_base) < 0)
+ if (tegra_osc_clk_init(clk_base, tegra114_clks, tegra114_input_freq,
+ ARRAY_SIZE(tegra114_input_freq), 1, &osc_freq,
+ &pll_ref_freq) < 0)
return;
tegra114_fixed_clk_init(clk_base);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 9a893f2fe8e9..11f857cd5f6a 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1014,6 +1014,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "fuse", .dt_id = TEGRA124_CLK_FUSE },
{ .dev_id = "rtc-tegra", .dt_id = TEGRA124_CLK_RTC },
{ .dev_id = "timer", .dt_id = TEGRA124_CLK_TIMER },
+ { .con_id = "hda", .dt_id = TEGRA124_CLK_HDA },
+ { .con_id = "hda2codec_2x", .dt_id = TEGRA124_CLK_HDA2CODEC_2X },
+ { .con_id = "hda2hdmi", .dt_id = TEGRA124_CLK_HDA2HDMI },
};
static struct clk **clks;
@@ -1110,16 +1113,18 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
1, 2);
clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
- clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
+ clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
- clks[TEGRA124_CLK_PLLD_DSI] = clk;
+ clks[TEGRA124_CLK_PLL_D_DSI_OUT] = clk;
- clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
- 0, 48, periph_clk_enb_refcnt);
+ clk = tegra_clk_register_periph_gate("dsia", "pll_d_dsi_out", 0,
+ clk_base, 0, 48,
+ periph_clk_enb_refcnt);
clks[TEGRA124_CLK_DSIA] = clk;
- clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
- 0, 82, periph_clk_enb_refcnt);
+ clk = tegra_clk_register_periph_gate("dsib", "pll_d_dsi_out", 0,
+ clk_base, 0, 82,
+ periph_clk_enb_refcnt);
clks[TEGRA124_CLK_DSIB] = clk;
/* emc mux */
@@ -1395,6 +1400,8 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
static struct tegra_clk_init_table tegra124_init_table[] __initdata = {
{TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
{TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
+ {TEGRA124_CLK_HDA, TEGRA124_CLK_PLL_P, 102000000, 0},
+ {TEGRA124_CLK_HDA2CODEC_2X, TEGRA124_CLK_PLL_P, 48000000, 0},
/* This MUST be the last entry. */
{TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
};
@@ -1475,7 +1482,8 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
return;
if (tegra_osc_clk_init(clk_base, tegra124_clks, tegra124_input_freq,
- ARRAY_SIZE(tegra124_input_freq), &osc_freq, &pll_ref_freq) < 0)
+ ARRAY_SIZE(tegra124_input_freq), 1, &osc_freq,
+ &pll_ref_freq) < 0)
return;
tegra_fixed_clk_init(tegra124_clks);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 4b9d8bd3d0bf..4b26509fc218 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -657,16 +657,16 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "fuse_burn", .dev_id = "fuse-tegra", .dt_id = TEGRA30_CLK_FUSE_BURN },
{ .con_id = "apbif", .dev_id = "tegra30-ahub", .dt_id = TEGRA30_CLK_APBIF },
{ .con_id = "hda2hdmi", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA2HDMI },
- { .dev_id = "tegra-apbdma", .dt_id = TEGRA30_CLK_APBDMA },
- { .dev_id = "rtc-tegra", .dt_id = TEGRA30_CLK_RTC },
- { .dev_id = "timer", .dt_id = TEGRA30_CLK_TIMER },
- { .dev_id = "tegra-kbc", .dt_id = TEGRA30_CLK_KBC },
- { .dev_id = "fsl-tegra-udc", .dt_id = TEGRA30_CLK_USBD },
- { .dev_id = "tegra-ehci.1", .dt_id = TEGRA30_CLK_USB2 },
- { .dev_id = "tegra-ehci.2", .dt_id = TEGRA30_CLK_USB2 },
- { .dev_id = "kfuse-tegra", .dt_id = TEGRA30_CLK_KFUSE },
- { .dev_id = "tegra_sata_cold", .dt_id = TEGRA30_CLK_SATA_COLD },
- { .dev_id = "dtv", .dt_id = TEGRA30_CLK_DTV },
+ { .dev_id = "tegra-apbdma", .dt_id = TEGRA30_CLK_APBDMA },
+ { .dev_id = "rtc-tegra", .dt_id = TEGRA30_CLK_RTC },
+ { .dev_id = "timer", .dt_id = TEGRA30_CLK_TIMER },
+ { .dev_id = "tegra-kbc", .dt_id = TEGRA30_CLK_KBC },
+ { .dev_id = "fsl-tegra-udc", .dt_id = TEGRA30_CLK_USBD },
+ { .dev_id = "tegra-ehci.1", .dt_id = TEGRA30_CLK_USB2 },
+ { .dev_id = "tegra-ehci.2", .dt_id = TEGRA30_CLK_USB2 },
+ { .dev_id = "kfuse-tegra", .dt_id = TEGRA30_CLK_KFUSE },
+ { .dev_id = "tegra_sata_cold", .dt_id = TEGRA30_CLK_SATA_COLD },
+ { .dev_id = "dtv", .dt_id = TEGRA30_CLK_DTV },
{ .dev_id = "tegra30-i2s.0", .dt_id = TEGRA30_CLK_I2S0 },
{ .dev_id = "tegra30-i2s.1", .dt_id = TEGRA30_CLK_I2S1 },
{ .dev_id = "tegra30-i2s.2", .dt_id = TEGRA30_CLK_I2S2 },
@@ -1434,7 +1434,8 @@ static void __init tegra30_clock_init(struct device_node *np)
return;
if (tegra_osc_clk_init(clk_base, tegra30_clks, tegra30_input_freq,
- ARRAY_SIZE(tegra30_input_freq), &input_freq, NULL) < 0)
+ ARRAY_SIZE(tegra30_input_freq), 1, &input_freq,
+ NULL) < 0)
return;
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 9ddb7547cb43..41cd87c67be6 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -30,6 +30,7 @@
#define CLK_OUT_ENB_V 0x360
#define CLK_OUT_ENB_W 0x364
#define CLK_OUT_ENB_X 0x280
+#define CLK_OUT_ENB_Y 0x298
#define CLK_OUT_ENB_SET_L 0x320
#define CLK_OUT_ENB_CLR_L 0x324
#define CLK_OUT_ENB_SET_H 0x328
@@ -42,6 +43,8 @@
#define CLK_OUT_ENB_CLR_W 0x44c
#define CLK_OUT_ENB_SET_X 0x284
#define CLK_OUT_ENB_CLR_X 0x288
+#define CLK_OUT_ENB_SET_Y 0x29c
+#define CLK_OUT_ENB_CLR_Y 0x2a0
#define RST_DEVICES_L 0x004
#define RST_DEVICES_H 0x008
@@ -50,6 +53,7 @@
#define RST_DEVICES_V 0x358
#define RST_DEVICES_W 0x35C
#define RST_DEVICES_X 0x28C
+#define RST_DEVICES_Y 0x2a4
#define RST_DEVICES_SET_L 0x300
#define RST_DEVICES_CLR_L 0x304
#define RST_DEVICES_SET_H 0x308
@@ -62,6 +66,8 @@
#define RST_DEVICES_CLR_W 0x43c
#define RST_DEVICES_SET_X 0x290
#define RST_DEVICES_CLR_X 0x294
+#define RST_DEVICES_SET_Y 0x2a8
+#define RST_DEVICES_CLR_Y 0x2ac
/* Global data of Tegra CPU CAR ops */
static struct tegra_cpu_car_ops dummy_car_ops;
@@ -122,6 +128,14 @@ static struct tegra_clk_periph_regs periph_regs[] = {
.rst_set_reg = RST_DEVICES_SET_X,
.rst_clr_reg = RST_DEVICES_CLR_X,
},
+ [6] = {
+ .enb_reg = CLK_OUT_ENB_Y,
+ .enb_set_reg = CLK_OUT_ENB_SET_Y,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_Y,
+ .rst_reg = RST_DEVICES_Y,
+ .rst_set_reg = RST_DEVICES_SET_Y,
+ .rst_clr_reg = RST_DEVICES_CLR_Y,
+ },
};
static void __iomem *clk_base;
@@ -272,7 +286,7 @@ void __init tegra_add_of_provider(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
rst_ctlr.of_node = np;
- rst_ctlr.nr_resets = clk_num * 32;
+ rst_ctlr.nr_resets = periph_banks * 32;
reset_controller_register(&rst_ctlr);
}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 4e458aa8d45c..d6ac00647faf 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -548,7 +548,7 @@ struct clk *tegra_clk_register_super_mux(const char *name,
u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock);
/**
- * struct clk_init_tabel - clock initialization table
+ * struct clk_init_table - clock initialization table
* @clk_id: clock id as mentioned in device tree bindings
* @parent_id: parent clock id as mentioned in device tree bindings
* @rate: rate to set
@@ -615,10 +615,10 @@ void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
void tegra_pmc_clk_init(void __iomem *pmc_base, struct tegra_clk *tegra_clks);
void tegra_fixed_clk_init(struct tegra_clk *tegra_clks);
-int tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *tegra_clks,
- unsigned long *input_freqs, int num,
- unsigned long *osc_freq,
- unsigned long *pll_ref_freq);
+int tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
+ unsigned long *input_freqs, unsigned int num,
+ unsigned int clk_m_div, unsigned long *osc_freq,
+ unsigned long *pll_ref_freq);
void tegra_super_clk_gen4_init(void __iomem *clk_base,
void __iomem *pmc_base, struct tegra_clk *tegra_clks,
struct tegra_clk_pll_params *pll_params);
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 72d97279eae1..49baf3831546 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -203,7 +203,7 @@ static void __init of_dra7_apll_setup(struct device_node *node)
ad->control_reg = ti_clk_get_reg_addr(node, 0);
ad->idlest_reg = ti_clk_get_reg_addr(node, 1);
- if (!ad->control_reg || !ad->idlest_reg)
+ if (IS_ERR(ad->control_reg) || IS_ERR(ad->idlest_reg))
goto cleanup;
ad->idlest_mask = 0x1;
@@ -384,7 +384,8 @@ static void __init of_omap2_apll_setup(struct device_node *node)
ad->autoidle_reg = ti_clk_get_reg_addr(node, 1);
ad->idlest_reg = ti_clk_get_reg_addr(node, 2);
- if (!ad->control_reg || !ad->autoidle_reg || !ad->idlest_reg)
+ if (IS_ERR(ad->control_reg) || IS_ERR(ad->autoidle_reg) ||
+ IS_ERR(ad->idlest_reg))
goto cleanup;
clk = clk_register(NULL, &clk_hw->hw);
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index 8912ff80af34..e75c64c9e81c 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -119,7 +119,7 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node)
clk->name = node->name;
clk->reg = ti_clk_get_reg_addr(node, 0);
- if (!clk->reg) {
+ if (IS_ERR(clk->reg)) {
kfree(clk);
return -EINVAL;
}
diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c
index e0732a4c8f26..0b61548d569b 100644
--- a/drivers/clk/ti/clk-3xxx-legacy.c
+++ b/drivers/clk/ti/clk-3xxx-legacy.c
@@ -4320,7 +4320,6 @@ static struct ti_clk_alias omap3xxx_clks[] = {
CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck),
CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck),
CLK(NULL, "sys_altclk", &sys_altclk),
- CLK(NULL, "mcbsp_clks", &mcbsp_clks),
CLK(NULL, "sys_clkout1", &sys_clkout1),
CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck),
CLK(NULL, "core_ck", &core_ck),
@@ -4369,8 +4368,6 @@ static struct ti_clk_alias omap3xxx_clks[] = {
CLK(NULL, "i2c3_fck", &i2c3_fck),
CLK(NULL, "i2c2_fck", &i2c2_fck),
CLK(NULL, "i2c1_fck", &i2c1_fck),
- CLK(NULL, "mcbsp5_fck", &mcbsp5_fck),
- CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
CLK(NULL, "core_48m_fck", &core_48m_fck),
CLK(NULL, "mcspi4_fck", &mcspi4_fck),
CLK(NULL, "mcspi3_fck", &mcspi3_fck),
@@ -4409,8 +4406,6 @@ static struct ti_clk_alias omap3xxx_clks[] = {
CLK(NULL, "uart1_ick", &uart1_ick),
CLK(NULL, "gpt11_ick", &gpt11_ick),
CLK(NULL, "gpt10_ick", &gpt10_ick),
- CLK("omap-mcbsp.5", "ick", &mcbsp5_ick),
- CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
CLK(NULL, "mcbsp5_ick", &mcbsp5_ick),
CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
CLK(NULL, "omapctrl_ick", &omapctrl_ick),
@@ -4467,15 +4462,22 @@ static struct ti_clk_alias omap3xxx_clks[] = {
CLK(NULL, "gpt4_ick", &gpt4_ick),
CLK(NULL, "gpt3_ick", &gpt3_ick),
CLK(NULL, "gpt2_ick", &gpt2_ick),
+ CLK(NULL, "mcbsp_clks", &mcbsp_clks),
+ CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
CLK("omap-mcbsp.2", "ick", &mcbsp2_ick),
CLK("omap-mcbsp.3", "ick", &mcbsp3_ick),
CLK("omap-mcbsp.4", "ick", &mcbsp4_ick),
- CLK(NULL, "mcbsp4_ick", &mcbsp2_ick),
+ CLK("omap-mcbsp.5", "ick", &mcbsp5_ick),
+ CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
+ CLK(NULL, "mcbsp2_ick", &mcbsp2_ick),
CLK(NULL, "mcbsp3_ick", &mcbsp3_ick),
- CLK(NULL, "mcbsp2_ick", &mcbsp4_ick),
+ CLK(NULL, "mcbsp4_ick", &mcbsp4_ick),
+ CLK(NULL, "mcbsp5_ick", &mcbsp5_ick),
+ CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
CLK(NULL, "mcbsp2_fck", &mcbsp2_fck),
CLK(NULL, "mcbsp3_fck", &mcbsp3_fck),
CLK(NULL, "mcbsp4_fck", &mcbsp4_fck),
+ CLK(NULL, "mcbsp5_fck", &mcbsp5_fck),
CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
CLK("etb", "emu_src_ck", &emu_src_ck),
CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 383a06e49b09..757636d166cf 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -34,7 +34,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "omap_96m_alwon_fck", "omap_96m_alwon_fck"),
DT_CLK("etb", "emu_core_alwon_ck", "emu_core_alwon_ck"),
DT_CLK(NULL, "sys_altclk", "sys_altclk"),
- DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"),
DT_CLK(NULL, "sys_clkout1", "sys_clkout1"),
DT_CLK(NULL, "dpll1_ck", "dpll1_ck"),
DT_CLK(NULL, "dpll1_x2_ck", "dpll1_x2_ck"),
@@ -82,8 +81,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "i2c3_fck", "i2c3_fck"),
DT_CLK(NULL, "i2c2_fck", "i2c2_fck"),
DT_CLK(NULL, "i2c1_fck", "i2c1_fck"),
- DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"),
- DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"),
DT_CLK(NULL, "core_48m_fck", "core_48m_fck"),
DT_CLK(NULL, "mcspi4_fck", "mcspi4_fck"),
DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"),
@@ -122,10 +119,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "uart1_ick", "uart1_ick"),
DT_CLK(NULL, "gpt11_ick", "gpt11_ick"),
DT_CLK(NULL, "gpt10_ick", "gpt10_ick"),
- DT_CLK("omap-mcbsp.5", "ick", "mcbsp5_ick"),
- DT_CLK("omap-mcbsp.1", "ick", "mcbsp1_ick"),
- DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"),
- DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"),
DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"),
DT_CLK(NULL, "dss_tv_fck", "dss_tv_fck"),
DT_CLK(NULL, "dss_96m_fck", "dss_96m_fck"),
@@ -179,15 +172,17 @@ static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "gpt4_ick", "gpt4_ick"),
DT_CLK(NULL, "gpt3_ick", "gpt3_ick"),
DT_CLK(NULL, "gpt2_ick", "gpt2_ick"),
- DT_CLK("omap-mcbsp.2", "ick", "mcbsp2_ick"),
- DT_CLK("omap-mcbsp.3", "ick", "mcbsp3_ick"),
- DT_CLK("omap-mcbsp.4", "ick", "mcbsp4_ick"),
- DT_CLK(NULL, "mcbsp4_ick", "mcbsp2_ick"),
+ DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"),
+ DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"),
+ DT_CLK(NULL, "mcbsp2_ick", "mcbsp2_ick"),
DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"),
- DT_CLK(NULL, "mcbsp2_ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp4_ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"),
+ DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"),
DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"),
DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"),
DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"),
+ DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"),
DT_CLK("etb", "emu_src_ck", "emu_src_ck"),
DT_CLK(NULL, "emu_src_ck", "emu_src_ck"),
DT_CLK(NULL, "pclk_fck", "pclk_fck"),
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 4f4c87751db5..581db7711f51 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -249,17 +249,6 @@ static struct ti_dt_clk omap44xx_clks[] = {
DT_CLK("usbhs_tll", "usbtll_fck", "dummy_ck"),
DT_CLK("omap_wdt", "ick", "dummy_ck"),
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
- DT_CLK("omap_timer.1", "timer_sys_ck", "sys_clkin_ck"),
- DT_CLK("omap_timer.2", "timer_sys_ck", "sys_clkin_ck"),
- DT_CLK("omap_timer.3", "timer_sys_ck", "sys_clkin_ck"),
- DT_CLK("omap_timer.4", "timer_sys_ck", "sys_clkin_ck"),
- DT_CLK("omap_timer.9", "timer_sys_ck", "sys_clkin_ck"),
- DT_CLK("omap_timer.10", "timer_sys_ck", "sys_clkin_ck"),
- DT_CLK("omap_timer.11", "timer_sys_ck", "sys_clkin_ck"),
- DT_CLK("omap_timer.5", "timer_sys_ck", "syc_clk_div_ck"),
- DT_CLK("omap_timer.6", "timer_sys_ck", "syc_clk_div_ck"),
- DT_CLK("omap_timer.7", "timer_sys_ck", "syc_clk_div_ck"),
- DT_CLK("omap_timer.8", "timer_sys_ck", "syc_clk_div_ck"),
DT_CLK("4a318000.timer", "timer_sys_ck", "sys_clkin_ck"),
DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin_ck"),
DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin_ck"),
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 14160b223548..96c69a335975 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -208,17 +208,17 @@ static struct ti_dt_clk omap54xx_clks[] = {
DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
DT_CLK("omap_wdt", "ick", "dummy_ck"),
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
- DT_CLK("omap_timer.1", "sys_ck", "sys_clkin"),
- DT_CLK("omap_timer.2", "sys_ck", "sys_clkin"),
- DT_CLK("omap_timer.3", "sys_ck", "sys_clkin"),
- DT_CLK("omap_timer.4", "sys_ck", "sys_clkin"),
- DT_CLK("omap_timer.9", "sys_ck", "sys_clkin"),
- DT_CLK("omap_timer.10", "sys_ck", "sys_clkin"),
- DT_CLK("omap_timer.11", "sys_ck", "sys_clkin"),
- DT_CLK("omap_timer.5", "sys_ck", "dss_syc_gfclk_div"),
- DT_CLK("omap_timer.6", "sys_ck", "dss_syc_gfclk_div"),
- DT_CLK("omap_timer.7", "sys_ck", "dss_syc_gfclk_div"),
- DT_CLK("omap_timer.8", "sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("4ae18000.timer", "timer_sys_ck", "sys_clkin"),
+ DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin"),
+ DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin"),
+ DT_CLK("48036000.timer", "timer_sys_ck", "sys_clkin"),
+ DT_CLK("4803e000.timer", "timer_sys_ck", "sys_clkin"),
+ DT_CLK("48086000.timer", "timer_sys_ck", "sys_clkin"),
+ DT_CLK("48088000.timer", "timer_sys_ck", "sys_clkin"),
+ DT_CLK("40138000.timer", "timer_sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("4013a000.timer", "timer_sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("4013c000.timer", "timer_sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("4013e000.timer", "timer_sys_ck", "dss_syc_gfclk_div"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index ee32f4deebf4..5d2217ae4478 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -289,17 +289,21 @@ static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
DT_CLK("omap_wdt", "ick", "dummy_ck"),
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
- DT_CLK("4ae18000.timer", "timer_sys_ck", "sys_clkin2"),
- DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin2"),
- DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin2"),
- DT_CLK("48036000.timer", "timer_sys_ck", "sys_clkin2"),
- DT_CLK("4803e000.timer", "timer_sys_ck", "sys_clkin2"),
- DT_CLK("48086000.timer", "timer_sys_ck", "sys_clkin2"),
- DT_CLK("48088000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("4ae18000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48032000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48034000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48036000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("4803e000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48086000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48088000.timer", "timer_sys_ck", "timer_sys_clk_div"),
DT_CLK("48820000.timer", "timer_sys_ck", "timer_sys_clk_div"),
DT_CLK("48822000.timer", "timer_sys_ck", "timer_sys_clk_div"),
DT_CLK("48824000.timer", "timer_sys_ck", "timer_sys_clk_div"),
DT_CLK("48826000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48828000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("4882a000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("4882c000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("4882e000.timer", "timer_sys_ck", "timer_sys_clk_div"),
DT_CLK(NULL, "sys_clkin", "sys_clkin1"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 59bb4b39d12e..d86bc46b93bd 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -294,7 +294,7 @@ static int of_dra7_atl_clk_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id of_dra7_atl_clk_match_tbl[] = {
+static const struct of_device_id of_dra7_atl_clk_match_tbl[] = {
{ .compatible = "ti,dra7-atl", },
{},
};
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index e22b95646e09..0ebe5c51062b 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -103,7 +103,8 @@ int __init ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
* @index: register index from the clock node
*
* Builds clock register address from device tree information. This
- * is a struct of type clk_omap_reg.
+ * is a struct of type clk_omap_reg. Returns a pointer to the register
+ * address, or a pointer error value in failure.
*/
void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index)
{
@@ -121,14 +122,14 @@ void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index)
if (i == CLK_MAX_MEMMAPS) {
pr_err("clk-provider not found for %s!\n", node->name);
- return NULL;
+ return ERR_PTR(-ENOENT);
}
reg->index = i;
if (of_property_read_u32_index(node, "reg", index, &val)) {
pr_err("%s must have reg[%d]!\n", node->name, index);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
reg->offset = val;
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index b4c5faccaece..35fe1085480c 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -52,7 +52,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
}
}
-static struct of_device_id ti_clkdm_match_table[] __initdata = {
+static const struct of_device_id ti_clkdm_match_table[] __initconst = {
{ .compatible = "ti,clockdomain" },
{ }
};
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 3654f61912eb..96f83cedb4b3 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -69,7 +69,7 @@ struct component_clk {
struct list_head link;
};
-static const char * __initconst component_clk_types[] = {
+static const char * const component_clk_types[] __initconst = {
"gate", "divider", "mux"
};
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 6211893c0980..ff5f117950a9 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -530,8 +530,8 @@ static int __init ti_clk_divider_populate(struct device_node *node,
u32 val;
*reg = ti_clk_get_reg_addr(node, 0);
- if (!*reg)
- return -EINVAL;
+ if (IS_ERR(*reg))
+ return PTR_ERR(*reg);
if (!of_property_read_u32(node, "ti,bit-shift", &val))
*shift = val;
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 81dc4698dc41..11478a501c30 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -390,18 +390,18 @@ static void __init of_ti_dpll_setup(struct device_node *node,
#endif
} else {
dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
- if (!dd->idlest_reg)
+ if (IS_ERR(dd->idlest_reg))
goto cleanup;
dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
}
- if (!dd->control_reg || !dd->mult_div1_reg)
+ if (IS_ERR(dd->control_reg) || IS_ERR(dd->mult_div1_reg))
goto cleanup;
if (dd->autoidle_mask) {
dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
- if (!dd->autoidle_reg)
+ if (IS_ERR(dd->autoidle_reg))
goto cleanup;
}
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index d21640634adf..ffcd8e09e85b 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -11,19 +11,27 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk/ti.h>
-#include <asm/div64.h>
/* FAPLL Control Register PLL_CTRL */
+#define FAPLL_MAIN_MULT_N_SHIFT 16
+#define FAPLL_MAIN_DIV_P_SHIFT 8
#define FAPLL_MAIN_LOCK BIT(7)
#define FAPLL_MAIN_PLLEN BIT(3)
#define FAPLL_MAIN_BP BIT(2)
#define FAPLL_MAIN_LOC_CTL BIT(0)
+#define FAPLL_MAIN_MAX_MULT_N 0xffff
+#define FAPLL_MAIN_MAX_DIV_P 0xff
+#define FAPLL_MAIN_CLEAR_MASK \
+ ((FAPLL_MAIN_MAX_MULT_N << FAPLL_MAIN_MULT_N_SHIFT) | \
+ (FAPLL_MAIN_DIV_P_SHIFT << FAPLL_MAIN_DIV_P_SHIFT) | \
+ FAPLL_MAIN_LOC_CTL)
+
/* FAPLL powerdown register PWD */
#define FAPLL_PWD_OFFSET 4
@@ -49,6 +57,10 @@
/* Synthesizer frequency register */
#define SYNTH_LDFREQ BIT(31)
+#define SYNTH_PHASE_K 8
+#define SYNTH_MAX_INT_DIV 0xf
+#define SYNTH_MAX_DIV_M 0xff
+
struct fapll_data {
struct clk_hw hw;
void __iomem *base;
@@ -79,6 +91,48 @@ static bool ti_fapll_clock_is_bypass(struct fapll_data *fd)
return !!(v & FAPLL_MAIN_BP);
}
+static void ti_fapll_set_bypass(struct fapll_data *fd)
+{
+ u32 v = readl_relaxed(fd->base);
+
+ if (fd->bypass_bit_inverted)
+ v &= ~FAPLL_MAIN_BP;
+ else
+ v |= FAPLL_MAIN_BP;
+ writel_relaxed(v, fd->base);
+}
+
+static void ti_fapll_clear_bypass(struct fapll_data *fd)
+{
+ u32 v = readl_relaxed(fd->base);
+
+ if (fd->bypass_bit_inverted)
+ v |= FAPLL_MAIN_BP;
+ else
+ v &= ~FAPLL_MAIN_BP;
+ writel_relaxed(v, fd->base);
+}
+
+static int ti_fapll_wait_lock(struct fapll_data *fd)
+{
+ int retries = FAPLL_MAX_RETRIES;
+ u32 v;
+
+ while ((v = readl_relaxed(fd->base))) {
+ if (v & FAPLL_MAIN_LOCK)
+ return 0;
+
+ if (retries-- <= 0)
+ break;
+
+ udelay(1);
+ }
+
+ pr_err("%s failed to lock\n", fd->name);
+
+ return -ETIMEDOUT;
+}
+
static int ti_fapll_enable(struct clk_hw *hw)
{
struct fapll_data *fd = to_fapll(hw);
@@ -86,6 +140,7 @@ static int ti_fapll_enable(struct clk_hw *hw)
v |= FAPLL_MAIN_PLLEN;
writel_relaxed(v, fd->base);
+ ti_fapll_wait_lock(fd);
return 0;
}
@@ -141,12 +196,85 @@ static u8 ti_fapll_get_parent(struct clk_hw *hw)
return 0;
}
+static int ti_fapll_set_div_mult(unsigned long rate,
+ unsigned long parent_rate,
+ u32 *pre_div_p, u32 *mult_n)
+{
+ /*
+ * So far no luck getting decent clock with PLL divider,
+ * PLL does not seem to lock and the signal does not look
+ * right. It seems the divider can only be used together
+ * with the multiplier?
+ */
+ if (rate < parent_rate) {
+ pr_warn("FAPLL main divider rates unsupported\n");
+ return -EINVAL;
+ }
+
+ *mult_n = rate / parent_rate;
+ if (*mult_n > FAPLL_MAIN_MAX_MULT_N)
+ return -EINVAL;
+ *pre_div_p = 1;
+
+ return 0;
+}
+
+static long ti_fapll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 pre_div_p, mult_n;
+ int error;
+
+ if (!rate)
+ return -EINVAL;
+
+ error = ti_fapll_set_div_mult(rate, *parent_rate,
+ &pre_div_p, &mult_n);
+ if (error)
+ return error;
+
+ rate = *parent_rate / pre_div_p;
+ rate *= mult_n;
+
+ return rate;
+}
+
+static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct fapll_data *fd = to_fapll(hw);
+ u32 pre_div_p, mult_n, v;
+ int error;
+
+ if (!rate)
+ return -EINVAL;
+
+ error = ti_fapll_set_div_mult(rate, parent_rate,
+ &pre_div_p, &mult_n);
+ if (error)
+ return error;
+
+ ti_fapll_set_bypass(fd);
+ v = readl_relaxed(fd->base);
+ v &= ~FAPLL_MAIN_CLEAR_MASK;
+ v |= pre_div_p << FAPLL_MAIN_DIV_P_SHIFT;
+ v |= mult_n << FAPLL_MAIN_MULT_N_SHIFT;
+ writel_relaxed(v, fd->base);
+ if (ti_fapll_is_enabled(hw))
+ ti_fapll_wait_lock(fd);
+ ti_fapll_clear_bypass(fd);
+
+ return 0;
+}
+
static struct clk_ops ti_fapll_ops = {
.enable = ti_fapll_enable,
.disable = ti_fapll_disable,
.is_enabled = ti_fapll_is_enabled,
.recalc_rate = ti_fapll_recalc_rate,
.get_parent = ti_fapll_get_parent,
+ .round_rate = ti_fapll_round_rate,
+ .set_rate = ti_fapll_set_rate,
};
static int ti_fapll_synth_enable(struct clk_hw *hw)
@@ -204,7 +332,7 @@ static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
/*
* Synth frequency integer and fractional divider.
* Note that the phase output K is 8, so the result needs
- * to be multiplied by 8.
+ * to be multiplied by SYNTH_PHASE_K.
*/
if (synth->freq) {
u32 v, synth_int_div, synth_frac_div, synth_div_freq;
@@ -215,14 +343,138 @@ static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
synth_div_freq = (synth_int_div * 10000000) + synth_frac_div;
rate *= 10000000;
do_div(rate, synth_div_freq);
- rate *= 8;
+ rate *= SYNTH_PHASE_K;
}
- /* Synth ost-divider M */
- synth_div_m = readl_relaxed(synth->div) & 0xff;
- do_div(rate, synth_div_m);
+ /* Synth post-divider M */
+ synth_div_m = readl_relaxed(synth->div) & SYNTH_MAX_DIV_M;
- return rate;
+ return DIV_ROUND_UP_ULL(rate, synth_div_m);
+}
+
+static unsigned long ti_fapll_synth_get_frac_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ unsigned long current_rate, frac_rate;
+ u32 post_div_m;
+
+ current_rate = ti_fapll_synth_recalc_rate(hw, parent_rate);
+ post_div_m = readl_relaxed(synth->div) & SYNTH_MAX_DIV_M;
+ frac_rate = current_rate * post_div_m;
+
+ return frac_rate;
+}
+
+static u32 ti_fapll_synth_set_frac_rate(struct fapll_synth *synth,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 post_div_m, synth_int_div = 0, synth_frac_div = 0, v;
+
+ post_div_m = DIV_ROUND_UP_ULL((u64)parent_rate * SYNTH_PHASE_K, rate);
+ post_div_m = post_div_m / SYNTH_MAX_INT_DIV;
+ if (post_div_m > SYNTH_MAX_DIV_M)
+ return -EINVAL;
+ if (!post_div_m)
+ post_div_m = 1;
+
+ for (; post_div_m < SYNTH_MAX_DIV_M; post_div_m++) {
+ synth_int_div = DIV_ROUND_UP_ULL((u64)parent_rate *
+ SYNTH_PHASE_K *
+ 10000000,
+ rate * post_div_m);
+ synth_frac_div = synth_int_div % 10000000;
+ synth_int_div /= 10000000;
+
+ if (synth_int_div <= SYNTH_MAX_INT_DIV)
+ break;
+ }
+
+ if (synth_int_div > SYNTH_MAX_INT_DIV)
+ return -EINVAL;
+
+ v = readl_relaxed(synth->freq);
+ v &= ~0x1fffffff;
+ v |= (synth_int_div & SYNTH_MAX_INT_DIV) << 24;
+ v |= (synth_frac_div & 0xffffff);
+ v |= SYNTH_LDFREQ;
+ writel_relaxed(v, synth->freq);
+
+ return post_div_m;
+}
+
+static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ struct fapll_data *fd = synth->fd;
+ unsigned long r;
+
+ if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
+ return -EINVAL;
+
+ /* Only post divider m available with no fractional divider? */
+ if (!synth->freq) {
+ unsigned long frac_rate;
+ u32 synth_post_div_m;
+
+ frac_rate = ti_fapll_synth_get_frac_rate(hw, *parent_rate);
+ synth_post_div_m = DIV_ROUND_UP(frac_rate, rate);
+ r = DIV_ROUND_UP(frac_rate, synth_post_div_m);
+ goto out;
+ }
+
+ r = *parent_rate * SYNTH_PHASE_K;
+ if (rate > r)
+ goto out;
+
+ r = DIV_ROUND_UP_ULL(r, SYNTH_MAX_INT_DIV * SYNTH_MAX_DIV_M);
+ if (rate < r)
+ goto out;
+
+ r = rate;
+out:
+ return r;
+}
+
+static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ struct fapll_data *fd = synth->fd;
+ unsigned long frac_rate, post_rate = 0;
+ u32 post_div_m = 0, v;
+
+ if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
+ return -EINVAL;
+
+ /* Produce the rate with just post divider M? */
+ frac_rate = ti_fapll_synth_get_frac_rate(hw, parent_rate);
+ if (frac_rate < rate) {
+ if (!synth->freq)
+ return -EINVAL;
+ } else {
+ post_div_m = DIV_ROUND_UP(frac_rate, rate);
+ if (post_div_m && (post_div_m <= SYNTH_MAX_DIV_M))
+ post_rate = DIV_ROUND_UP(frac_rate, post_div_m);
+ if (!synth->freq && !post_rate)
+ return -EINVAL;
+ }
+
+ /* Need to recalculate the fractional divider? */
+ if ((post_rate != rate) && synth->freq)
+ post_div_m = ti_fapll_synth_set_frac_rate(synth,
+ rate,
+ parent_rate);
+
+ v = readl_relaxed(synth->div);
+ v &= ~SYNTH_MAX_DIV_M;
+ v |= post_div_m;
+ v |= SYNTH_LDMDIV1;
+ writel_relaxed(v, synth->div);
+
+ return 0;
}
static struct clk_ops ti_fapll_synt_ops = {
@@ -230,6 +482,8 @@ static struct clk_ops ti_fapll_synt_ops = {
.disable = ti_fapll_synth_disable,
.is_enabled = ti_fapll_synth_is_enabled,
.recalc_rate = ti_fapll_synth_recalc_rate,
+ .round_rate = ti_fapll_synth_round_rate,
+ .set_rate = ti_fapll_synth_set_rate,
};
static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index d493307b73f4..0c6fdfcd5f93 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -225,7 +225,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
if (ops != &omap_gate_clkdm_clk_ops) {
reg = ti_clk_get_reg_addr(node, 0);
- if (!reg)
+ if (IS_ERR(reg))
return;
if (!of_property_read_u32(node, "ti,bit-shift", &val))
@@ -264,7 +264,7 @@ _of_ti_composite_gate_clk_setup(struct device_node *node,
return;
gate->enable_reg = ti_clk_get_reg_addr(node, 0);
- if (!gate->enable_reg)
+ if (IS_ERR(gate->enable_reg))
goto cleanup;
of_property_read_u32(node, "ti,bit-shift", &val);
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 265d91f071c5..c76230d8dd04 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -111,7 +111,7 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node,
u32 val;
reg = ti_clk_get_reg_addr(node, 0);
- if (!reg)
+ if (IS_ERR(reg))
return;
if (!of_property_read_u32(node, "ti,bit-shift", &val))
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 728e253606bc..5cdeed538b08 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -210,7 +210,7 @@ static void of_mux_clk_setup(struct device_node *node)
reg = ti_clk_get_reg_addr(node, 0);
- if (!reg)
+ if (IS_ERR(reg))
goto cleanup;
of_property_read_u32(node, "ti,bit-shift", &shift);
@@ -283,7 +283,7 @@ static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
mux->reg = ti_clk_get_reg_addr(node, 0);
- if (!mux->reg)
+ if (IS_ERR(mux->reg))
goto cleanup;
if (!of_property_read_u32(node, "ti,bit-shift", &val))
diff --git a/drivers/clk/versatile/clk-versatile.c b/drivers/clk/versatile/clk-versatile.c
index a76981e88cb6..7a4f8635bd1e 100644
--- a/drivers/clk/versatile/clk-versatile.c
+++ b/drivers/clk/versatile/clk-versatile.c
@@ -69,7 +69,7 @@ static void __init cm_osc_setup(struct device_node *np,
struct device_node *parent;
parent = of_get_parent(np);
- if (!np) {
+ if (!parent) {
pr_err("no parent on core module clock\n");
return;
}
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 765f1e0eeeb2..89c0609e180b 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -110,7 +110,7 @@ static int vexpress_osc_probe(struct platform_device *pdev)
return 0;
}
-static struct of_device_id vexpress_osc_of_match[] = {
+static const struct of_device_id vexpress_osc_of_match[] = {
{ .compatible = "arm,vexpress-osc", },
{}
};
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index f870aad57711..40cb113be6af 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -85,22 +85,22 @@ static DEFINE_SPINLOCK(canmioclk_lock);
static DEFINE_SPINLOCK(dbgclk_lock);
static DEFINE_SPINLOCK(aperclk_lock);
-static const char *armpll_parents[] __initconst = {"armpll_int", "ps_clk"};
-static const char *ddrpll_parents[] __initconst = {"ddrpll_int", "ps_clk"};
-static const char *iopll_parents[] __initconst = {"iopll_int", "ps_clk"};
-static const char *gem0_mux_parents[] __initconst = {"gem0_div1", "dummy_name"};
-static const char *gem1_mux_parents[] __initconst = {"gem1_div1", "dummy_name"};
-static const char *can0_mio_mux2_parents[] __initconst = {"can0_gate",
+static const char *armpll_parents[] __initdata = {"armpll_int", "ps_clk"};
+static const char *ddrpll_parents[] __initdata = {"ddrpll_int", "ps_clk"};
+static const char *iopll_parents[] __initdata = {"iopll_int", "ps_clk"};
+static const char *gem0_mux_parents[] __initdata = {"gem0_div1", "dummy_name"};
+static const char *gem1_mux_parents[] __initdata = {"gem1_div1", "dummy_name"};
+static const char *can0_mio_mux2_parents[] __initdata = {"can0_gate",
"can0_mio_mux"};
-static const char *can1_mio_mux2_parents[] __initconst = {"can1_gate",
+static const char *can1_mio_mux2_parents[] __initdata = {"can1_gate",
"can1_mio_mux"};
-static const char *dbg_emio_mux_parents[] __initconst = {"dbg_div",
+static const char *dbg_emio_mux_parents[] __initdata = {"dbg_div",
"dummy_name"};
-static const char *dbgtrc_emio_input_names[] __initconst = {"trace_emio_clk"};
-static const char *gem0_emio_input_names[] __initconst = {"gem0_emio_clk"};
-static const char *gem1_emio_input_names[] __initconst = {"gem1_emio_clk"};
-static const char *swdt_ext_clk_input_names[] __initconst = {"swdt_ext_clk"};
+static const char *dbgtrc_emio_input_names[] __initdata = {"trace_emio_clk"};
+static const char *gem0_emio_input_names[] __initdata = {"gem0_emio_clk"};
+static const char *gem1_emio_input_names[] __initdata = {"gem1_emio_clk"};
+static const char *swdt_ext_clk_input_names[] __initdata = {"swdt_ext_clk"};
static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
const char *clk_name, void __iomem *fclk_ctrl_reg,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a0b036ccb118..51d7865fdddb 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -143,6 +143,11 @@ config ATMEL_PIT
select CLKSRC_OF if OF
def_bool SOC_AT91SAM9 || SOC_SAMA5
+config ATMEL_ST
+ bool
+ select CLKSRC_OF
+ select MFD_SYSCON
+
config CLKSRC_METAG_GENERIC
def_bool y if METAG
help
@@ -233,7 +238,7 @@ config CLKSRC_QCOM
config CLKSRC_VERSATILE
bool "ARM Versatile (Express) reference platforms clock source"
- depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
+ depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
select CLKSRC_OF
default y if MFD_VEXPRESS_SYSREG
help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 752d5c70b0ef..5b85f6adb258 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_CLKSRC_OF) += clksrc-of.o
obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
+obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 266469691e58..0aa135ddbf80 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/sched_clock.h>
+#include <linux/acpi.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -371,8 +372,12 @@ arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
if (arch_timer_rate)
return;
- /* Try to determine the frequency from the device tree or CNTFRQ */
- if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
+ /*
+ * Try to determine the frequency from the device tree or CNTFRQ,
+ * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
+ */
+ if (!acpi_disabled ||
+ of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
if (cntbase)
arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
else
@@ -691,28 +696,8 @@ static void __init arch_timer_common_init(void)
arch_timer_arch_init();
}
-static void __init arch_timer_init(struct device_node *np)
+static void __init arch_timer_init(void)
{
- int i;
-
- if (arch_timers_present & ARCH_CP15_TIMER) {
- pr_warn("arch_timer: multiple nodes in dt, skipping\n");
- return;
- }
-
- arch_timers_present |= ARCH_CP15_TIMER;
- for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
- arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
- arch_timer_detect_rate(NULL, np);
-
- /*
- * If we cannot rely on firmware initializing the timer registers then
- * we should use the physical timers instead.
- */
- if (IS_ENABLED(CONFIG_ARM) &&
- of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
- arch_timer_use_virtual = false;
-
/*
* If HYP mode is available, we know that the physical timer
* has been configured to be accessible from PL1. Use it, so
@@ -731,13 +716,39 @@ static void __init arch_timer_init(struct device_node *np)
}
}
- arch_timer_c3stop = !of_property_read_bool(np, "always-on");
-
arch_timer_register();
arch_timer_common_init();
}
-CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
-CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
+
+static void __init arch_timer_of_init(struct device_node *np)
+{
+ int i;
+
+ if (arch_timers_present & ARCH_CP15_TIMER) {
+ pr_warn("arch_timer: multiple nodes in dt, skipping\n");
+ return;
+ }
+
+ arch_timers_present |= ARCH_CP15_TIMER;
+ for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
+ arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
+
+ arch_timer_detect_rate(NULL, np);
+
+ arch_timer_c3stop = !of_property_read_bool(np, "always-on");
+
+ /*
+ * If we cannot rely on firmware initializing the timer registers then
+ * we should use the physical timers instead.
+ */
+ if (IS_ENABLED(CONFIG_ARM) &&
+ of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
+ arch_timer_use_virtual = false;
+
+ arch_timer_init();
+}
+CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
+CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
static void __init arch_timer_mem_init(struct device_node *np)
{
@@ -804,3 +815,70 @@ static void __init arch_timer_mem_init(struct device_node *np)
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init);
+
+#ifdef CONFIG_ACPI
+static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
+{
+ int trigger, polarity;
+
+ if (!interrupt)
+ return 0;
+
+ trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
+ : ACPI_LEVEL_SENSITIVE;
+
+ polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
+ : ACPI_ACTIVE_HIGH;
+
+ return acpi_register_gsi(NULL, interrupt, trigger, polarity);
+}
+
+/* Initialize per-processor generic timer */
+static int __init arch_timer_acpi_init(struct acpi_table_header *table)
+{
+ struct acpi_table_gtdt *gtdt;
+
+ if (arch_timers_present & ARCH_CP15_TIMER) {
+ pr_warn("arch_timer: already initialized, skipping\n");
+ return -EINVAL;
+ }
+
+ gtdt = container_of(table, struct acpi_table_gtdt, header);
+
+ arch_timers_present |= ARCH_CP15_TIMER;
+
+ arch_timer_ppi[PHYS_SECURE_PPI] =
+ map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
+ gtdt->secure_el1_flags);
+
+ arch_timer_ppi[PHYS_NONSECURE_PPI] =
+ map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
+ gtdt->non_secure_el1_flags);
+
+ arch_timer_ppi[VIRT_PPI] =
+ map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
+ gtdt->virtual_timer_flags);
+
+ arch_timer_ppi[HYP_PPI] =
+ map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
+ gtdt->non_secure_el2_flags);
+
+ /* Get the frequency from CNTFRQ */
+ arch_timer_detect_rate(NULL, NULL);
+
+ /* Always-on capability */
+ arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
+
+ arch_timer_init();
+ return 0;
+}
+
+/* Initialize all the generic timers presented in GTDT */
+void __init acpi_generic_timer_init(void)
+{
+ if (acpi_disabled)
+ return;
+
+ acpi_table_parse(ACPI_SIG_GTDT, arch_timer_acpi_init);
+}
+#endif
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index f3656a6b0382..35a88097af3c 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -117,7 +117,8 @@ static void apbt_set_mode(enum clock_event_mode mode,
unsigned long period;
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
- pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask),
+ pr_debug("%s CPU %d mode=%d\n", __func__,
+ cpumask_first(evt->cpumask),
mode);
switch (mode) {
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 3bd31b1321f6..b81ed1a5342d 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/init.h>
@@ -133,6 +134,9 @@ static void __init __gic_clocksource_init(void)
clocksource_register_hz(&gic_clocksource, gic_frequency);
gic_clockevent_init();
+
+ /* And finally start the counter */
+ gic_start_count();
}
void __init gic_clocksource_init(unsigned int frequency)
@@ -146,11 +150,18 @@ void __init gic_clocksource_init(unsigned int frequency)
static void __init gic_clocksource_of_init(struct device_node *node)
{
+ struct clk *clk;
+
if (WARN_ON(!gic_present || !node->parent ||
!of_device_is_compatible(node->parent, "mti,gic")))
return;
- if (of_property_read_u32(node, "clock-frequency", &gic_frequency)) {
+ clk = of_clk_get(node, 0);
+ if (!IS_ERR(clk)) {
+ gic_frequency = clk_get_rate(clk);
+ clk_put(clk);
+ } else if (of_property_read_u32(node, "clock-frequency",
+ &gic_frequency)) {
pr_err("GIC frequency not specified.\n");
return;
}
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/drivers/clocksource/timer-atmel-st.c
index b00d09555f2b..1692e17e096b 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -24,19 +24,17 @@
#include <linux/irq.h>
#include <linux/clockchips.h>
#include <linux/export.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/atmel-st.h>
#include <linux/of_irq.h>
-
-#include <asm/mach/time.h>
-
-#include <mach/at91_st.h>
-#include <mach/hardware.h>
+#include <linux/regmap.h>
static unsigned long last_crtr;
static u32 irqmask;
static struct clock_event_device clkevt;
+static struct regmap *regmap_st;
+#define AT91_SLOW_CLOCK 32768
#define RM9200_TIMER_LATCH ((AT91_SLOW_CLOCK + HZ/2) / HZ)
/*
@@ -46,11 +44,11 @@ static struct clock_event_device clkevt;
*/
static inline unsigned long read_CRTR(void)
{
- unsigned long x1, x2;
+ unsigned int x1, x2;
- x1 = at91_st_read(AT91_ST_CRTR);
+ regmap_read(regmap_st, AT91_ST_CRTR, &x1);
do {
- x2 = at91_st_read(AT91_ST_CRTR);
+ regmap_read(regmap_st, AT91_ST_CRTR, &x2);
if (x1 == x2)
break;
x1 = x2;
@@ -63,7 +61,10 @@ static inline unsigned long read_CRTR(void)
*/
static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
{
- u32 sr = at91_st_read(AT91_ST_SR) & irqmask;
+ u32 sr;
+
+ regmap_read(regmap_st, AT91_ST_SR, &sr);
+ sr &= irqmask;
/*
* irqs should be disabled here, but as the irq is shared they are only
@@ -92,13 +93,6 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
-static struct irqaction at91rm9200_timer_irq = {
- .name = "at91_tick",
- .flags = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = at91rm9200_timer_interrupt,
- .irq = NR_IRQS_LEGACY + AT91_ID_SYS,
-};
-
static cycle_t read_clk32k(struct clocksource *cs)
{
return read_CRTR();
@@ -115,23 +109,25 @@ static struct clocksource clk32k = {
static void
clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
{
+ unsigned int val;
+
/* Disable and flush pending timer interrupts */
- at91_st_write(AT91_ST_IDR, AT91_ST_PITS | AT91_ST_ALMS);
- at91_st_read(AT91_ST_SR);
+ regmap_write(regmap_st, AT91_ST_IDR, AT91_ST_PITS | AT91_ST_ALMS);
+ regmap_read(regmap_st, AT91_ST_SR, &val);
last_crtr = read_CRTR();
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/* PIT for periodic irqs; fixed rate of 1/HZ */
irqmask = AT91_ST_PITS;
- at91_st_write(AT91_ST_PIMR, RM9200_TIMER_LATCH);
+ regmap_write(regmap_st, AT91_ST_PIMR, RM9200_TIMER_LATCH);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* ALM for oneshot irqs, set by next_event()
* before 32 seconds have passed
*/
irqmask = AT91_ST_ALMS;
- at91_st_write(AT91_ST_RTAR, last_crtr);
+ regmap_write(regmap_st, AT91_ST_RTAR, last_crtr);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
@@ -139,7 +135,7 @@ clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
irqmask = 0;
break;
}
- at91_st_write(AT91_ST_IER, irqmask);
+ regmap_write(regmap_st, AT91_ST_IER, irqmask);
}
static int
@@ -147,6 +143,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
{
u32 alm;
int status = 0;
+ unsigned int val;
BUG_ON(delta < 2);
@@ -162,12 +159,12 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
alm = read_CRTR();
/* Cancel any pending alarm; flush any pending IRQ */
- at91_st_write(AT91_ST_RTAR, alm);
- at91_st_read(AT91_ST_SR);
+ regmap_write(regmap_st, AT91_ST_RTAR, alm);
+ regmap_read(regmap_st, AT91_ST_SR, &val);
/* Schedule alarm by writing RTAR. */
alm += delta;
- at91_st_write(AT91_ST_RTAR, alm);
+ regmap_write(regmap_st, AT91_ST_RTAR, alm);
return status;
}
@@ -180,66 +177,40 @@ static struct clock_event_device clkevt = {
.set_mode = clkevt32k_mode,
};
-void __iomem *at91_st_base;
-EXPORT_SYMBOL_GPL(at91_st_base);
-
-static const struct of_device_id at91rm9200_st_timer_ids[] = {
- { .compatible = "atmel,at91rm9200-st" },
- { /* sentinel */ }
-};
-
-static int __init of_at91rm9200_st_init(void)
-{
- struct device_node *np;
- int ret;
-
- np = of_find_matching_node(NULL, at91rm9200_st_timer_ids);
- if (!np)
- goto err;
-
- at91_st_base = of_iomap(np, 0);
- if (!at91_st_base)
- goto node_err;
-
- /* Get the interrupts property */
- ret = irq_of_parse_and_map(np, 0);
- if (!ret)
- goto ioremap_err;
- at91rm9200_timer_irq.irq = ret;
-
- of_node_put(np);
-
- return 0;
-
-ioremap_err:
- iounmap(at91_st_base);
-node_err:
- of_node_put(np);
-err:
- return -EINVAL;
-}
-
/*
* ST (system timer) module supports both clockevents and clocksource.
*/
-void __init at91rm9200_timer_init(void)
+static void __init atmel_st_timer_init(struct device_node *node)
{
- /* For device tree enabled device: initialize here */
- of_at91rm9200_st_init();
+ unsigned int val;
+ int irq, ret;
+
+ regmap_st = syscon_node_to_regmap(node);
+ if (IS_ERR(regmap_st))
+ panic(pr_fmt("Unable to get regmap\n"));
/* Disable all timer interrupts, and clear any pending ones */
- at91_st_write(AT91_ST_IDR,
+ regmap_write(regmap_st, AT91_ST_IDR,
AT91_ST_PITS | AT91_ST_WDOVF | AT91_ST_RTTINC | AT91_ST_ALMS);
- at91_st_read(AT91_ST_SR);
+ regmap_read(regmap_st, AT91_ST_SR, &val);
+
+ /* Get the interrupts property */
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq)
+ panic(pr_fmt("Unable to get IRQ from DT\n"));
/* Make IRQs happen for the system timer */
- setup_irq(at91rm9200_timer_irq.irq, &at91rm9200_timer_irq);
+ ret = request_irq(irq, at91rm9200_timer_interrupt,
+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
+ "at91_tick", regmap_st);
+ if (ret)
+ panic(pr_fmt("Unable to setup IRQ\n"));
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
* directly for the clocksource and all clockevents, after adjusting
* its prescaler from the 1 Hz default.
*/
- at91_st_write(AT91_ST_RTMR, 1);
+ regmap_write(regmap_st, AT91_ST_RTMR, 1);
/* Setup timer clockevent, with minimum of two ticks (important!!) */
clkevt.cpumask = cpumask_of(0);
@@ -249,3 +220,5 @@ void __init at91rm9200_timer_init(void)
/* register clocksource */
clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
}
+CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
+ atmel_st_timer_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a171fef2c2b6..659879a56dba 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -293,5 +293,13 @@ config SH_CPU_FREQ
If unsure, say N.
endif
+config QORIQ_CPUFREQ
+ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
+ depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
+ select CLK_QORIQ
+ help
+ This adds the CPUFreq driver support for Freescale QorIQ SoCs
+ which are capable of changing the CPU's frequency dynamically.
+
endif
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1b06fc4640e2..4f3dbc8cf729 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -108,6 +108,15 @@ config ARM_HIGHBANK_CPUFREQ
If in doubt, say N.
+config ARM_HISI_ACPU_CPUFREQ
+ tristate "Hisilicon ACPU CPUfreq driver"
+ depends on ARCH_HISI && CPUFREQ_DT
+ select PM_OPP
+ help
+ This enables the hisilicon ACPU CPUfreq driver.
+
+ If in doubt, say N.
+
config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6 cpufreq support"
depends on ARCH_MXC
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 7ea24413cee6..3a0595b41eab 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -23,15 +23,6 @@ config CPU_FREQ_MAPLE
This adds support for frequency switching on Maple 970FX
Evaluation Board and compatible boards (IBM JS2x blades).
-config PPC_CORENET_CPUFREQ
- tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
- depends on PPC_E500MC && OF && COMMON_CLK
- select CLK_QORIQ
- help
- This adds the CPUFreq driver support for Freescale e500mc,
- e5500 and e6500 series SoCs which are capable of changing
- the CPU's frequency dynamically.
-
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 82a1821471fd..cdce92ae2e8b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -59,6 +59,7 @@ arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
+obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
@@ -85,7 +86,7 @@ obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
-obj-$(CONFIG_PPC_CORENET_CPUFREQ) += ppc-corenet-cpufreq.o
+obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o
diff --git a/drivers/cpufreq/hisi-acpu-cpufreq.c b/drivers/cpufreq/hisi-acpu-cpufreq.c
new file mode 100644
index 000000000000..026d5b2224de
--- /dev/null
+++ b/drivers/cpufreq/hisi-acpu-cpufreq.c
@@ -0,0 +1,42 @@
+/*
+ * Hisilicon Platforms Using ACPU CPUFreq Support
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static int __init hisi_acpu_cpufreq_driver_init(void)
+{
+ struct platform_device *pdev;
+
+ if (!of_machine_is_compatible("hisilicon,hi6220"))
+ return -ENODEV;
+
+ pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ return PTR_ERR_OR_ZERO(pdev);
+}
+module_init(hisi_acpu_cpufreq_driver_init);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 872c5772c5d3..6414661ac1c4 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -31,6 +31,7 @@
#include <asm/div64.h>
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
+#include <asm/cpufeature.h>
#define BYT_RATIOS 0x66a
#define BYT_VIDS 0x66b
@@ -614,6 +615,19 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
}
+static int knl_get_turbo_pstate(void)
+{
+ u64 value;
+ int nont, ret;
+
+ rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+ nont = core_get_max_pstate();
+ ret = (((value) >> 8) & 0xFF);
+ if (ret <= nont)
+ ret = nont;
+ return ret;
+}
+
static struct cpu_defaults core_params = {
.pid_policy = {
.sample_rate_ms = 10,
@@ -636,7 +650,7 @@ static struct cpu_defaults byt_params = {
.pid_policy = {
.sample_rate_ms = 10,
.deadband = 0,
- .setpoint = 97,
+ .setpoint = 60,
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
@@ -651,6 +665,23 @@ static struct cpu_defaults byt_params = {
},
};
+static struct cpu_defaults knl_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 20,
+ .d_gain_pct = 0,
+ .i_gain_pct = 0,
+ },
+ .funcs = {
+ .get_max = core_get_max_pstate,
+ .get_min = core_get_min_pstate,
+ .get_turbo = knl_get_turbo_pstate,
+ .set = core_set_pstate,
+ },
+};
+
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
int max_perf = cpu->pstate.turbo_pstate;
@@ -865,6 +896,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x4e, core_params),
ICPU(0x4f, core_params),
ICPU(0x56, core_params),
+ ICPU(0x57, knl_params),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -1024,25 +1056,11 @@ static unsigned int force_load;
static int intel_pstate_msrs_not_valid(void)
{
- /* Check that all the msr's we are using are valid. */
- u64 aperf, mperf, tmp;
-
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
-
if (!pstate_funcs.get_max() ||
!pstate_funcs.get_min() ||
!pstate_funcs.get_turbo())
return -ENODEV;
- rdmsrl(MSR_IA32_APERF, tmp);
- if (!(tmp - aperf))
- return -ENODEV;
-
- rdmsrl(MSR_IA32_MPERF, tmp);
- if (!(tmp - mperf))
- return -ENODEV;
-
return 0;
}
@@ -1183,8 +1201,7 @@ static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
const struct x86_cpu_id *id;
- struct cpu_defaults *cpu_info;
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ struct cpu_defaults *cpu_def;
if (no_load)
return -ENODEV;
@@ -1200,10 +1217,10 @@ static int __init intel_pstate_init(void)
if (intel_pstate_platform_pwr_mgmt_exists())
return -ENODEV;
- cpu_info = (struct cpu_defaults *)id->driver_data;
+ cpu_def = (struct cpu_defaults *)id->driver_data;
- copy_pid_params(&cpu_info->pid_policy);
- copy_cpu_funcs(&cpu_info->funcs);
+ copy_pid_params(&cpu_def->pid_policy);
+ copy_cpu_funcs(&cpu_def->funcs);
if (intel_pstate_msrs_not_valid())
return -ENODEV;
@@ -1214,7 +1231,7 @@ static int __init intel_pstate_init(void)
if (!all_cpu_data)
return -ENOMEM;
- if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
+ if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
intel_pstate_hwp_enable();
if (!hwp_active && hwp_only)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 2dfd4fdb5a52..ebef0d8279c7 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -34,9 +34,13 @@
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
#define POWERNV_MAX_PSTATES 256
+#define PMSR_PSAFE_ENABLE (1UL << 30)
+#define PMSR_SPR_EM_DISABLE (1UL << 31)
+#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define PMSR_LP(x) ((x >> 48) & 0xFF)
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
-static bool rebooting;
+static bool rebooting, throttled;
/*
* Note: The set of pstates consists of contiguous integers, the
@@ -294,6 +298,44 @@ static inline unsigned int get_nominal_index(void)
return powernv_pstate_info.max - powernv_pstate_info.nominal;
}
+static void powernv_cpufreq_throttle_check(unsigned int cpu)
+{
+ unsigned long pmsr;
+ int pmsr_pmax, pmsr_lp;
+
+ pmsr = get_pmspr(SPRN_PMSR);
+
+ /* Check for Pmax Capping */
+ pmsr_pmax = (s8)PMSR_MAX(pmsr);
+ if (pmsr_pmax != powernv_pstate_info.max) {
+ throttled = true;
+ pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax);
+ pr_info("Max allowed Pstate is capped\n");
+ }
+
+ /*
+ * Check for Psafe by reading LocalPstate
+ * or check if Psafe_mode_active is set in PMSR.
+ */
+ pmsr_lp = (s8)PMSR_LP(pmsr);
+ if ((pmsr_lp < powernv_pstate_info.min) ||
+ (pmsr & PMSR_PSAFE_ENABLE)) {
+ throttled = true;
+ pr_info("Pstate set to safe frequency\n");
+ }
+
+ /* Check if SPR_EM_DISABLE is set in PMSR */
+ if (pmsr & PMSR_SPR_EM_DISABLE) {
+ throttled = true;
+ pr_info("Frequency Control disabled from OS\n");
+ }
+
+ if (throttled) {
+ pr_info("PMSR = %16lx\n", pmsr);
+ pr_crit("CPU Frequency could be throttled\n");
+ }
+}
+
/*
* powernv_cpufreq_target_index: Sets the frequency corresponding to
* the cpufreq table entry indexed by new_index on the cpus in the
@@ -307,6 +349,9 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
if (unlikely(rebooting) && new_index != get_nominal_index())
return 0;
+ if (!throttled)
+ powernv_cpufreq_throttle_check(smp_processor_id());
+
freq_data.pstate_id = powernv_freqs[new_index].driver_data;
/*
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 7cb4b766cf94..88b21ae0d6b0 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -1,7 +1,7 @@
/*
* Copyright 2013 Freescale Semiconductor, Inc.
*
- * CPU Frequency Scaling driver for Freescale PowerPC corenet SoCs.
+ * CPU Frequency Scaling driver for Freescale QorIQ SoCs.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,12 +20,13 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/smp.h>
-#include <sysdev/fsl_soc.h>
+#if !defined(CONFIG_ARM)
#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */
+#endif
/**
- * struct cpu_data - per CPU data struct
+ * struct cpu_data
* @parent: the parent node of cpu clock
* @table: frequency table
*/
@@ -69,17 +70,78 @@ static const struct soc_data sdata[] = {
static u32 min_cpufreq;
static const u32 *fmask;
-static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
+#if defined(CONFIG_ARM)
+static int get_cpu_physical_id(int cpu)
+{
+ return topology_core_id(cpu);
+}
+#else
+static int get_cpu_physical_id(int cpu)
+{
+ return get_hard_smp_processor_id(cpu);
+}
+#endif
-/* cpumask in a cluster */
-static DEFINE_PER_CPU(cpumask_var_t, cpu_mask);
+static u32 get_bus_freq(void)
+{
+ struct device_node *soc;
+ u32 sysfreq;
+
+ soc = of_find_node_by_type(NULL, "soc");
+ if (!soc)
+ return 0;
+
+ if (of_property_read_u32(soc, "bus-frequency", &sysfreq))
+ sysfreq = 0;
-#ifndef CONFIG_SMP
-static inline const struct cpumask *cpu_core_mask(int cpu)
+ of_node_put(soc);
+
+ return sysfreq;
+}
+
+static struct device_node *cpu_to_clk_node(int cpu)
{
- return cpumask_of(0);
+ struct device_node *np, *clk_np;
+
+ if (!cpu_present(cpu))
+ return NULL;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np)
+ return NULL;
+
+ clk_np = of_parse_phandle(np, "clocks", 0);
+ if (!clk_np)
+ return NULL;
+
+ of_node_put(np);
+
+ return clk_np;
+}
+
+/* traverse cpu nodes to get cpu mask of sharing clock wire */
+static void set_affected_cpus(struct cpufreq_policy *policy)
+{
+ struct device_node *np, *clk_np;
+ struct cpumask *dstp = policy->cpus;
+ int i;
+
+ np = cpu_to_clk_node(policy->cpu);
+ if (!np)
+ return;
+
+ for_each_present_cpu(i) {
+ clk_np = cpu_to_clk_node(i);
+ if (!clk_np)
+ continue;
+
+ if (clk_np == np)
+ cpumask_set_cpu(i, dstp);
+
+ of_node_put(clk_np);
+ }
+ of_node_put(np);
}
-#endif
/* reduce the duplicated frequencies in frequency table */
static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
@@ -107,6 +169,7 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
int i, j, ind;
unsigned int freq, max_freq;
struct cpufreq_frequency_table table;
+
for (i = 0; i < count - 1; i++) {
max_freq = freq_table[i].frequency;
ind = i;
@@ -131,7 +194,7 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
}
}
-static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
+static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct device_node *np;
int i, count, ret;
@@ -147,10 +210,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- pr_err("%s: no memory\n", __func__);
+ if (!data)
goto err_np;
- }
policy->clk = of_clk_get(np, 0);
if (IS_ERR(policy->clk)) {
@@ -172,7 +233,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
if (fmask)
- mask = fmask[get_hard_smp_processor_id(cpu)];
+ mask = fmask[get_cpu_physical_id(cpu)];
else
mask = 0x0;
@@ -203,13 +264,12 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
data->table = table;
/* update ->cpus if we have cluster, no harm if not */
- cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));
- for_each_cpu(i, per_cpu(cpu_mask, cpu))
- per_cpu(cpu_data, i) = data;
+ set_affected_cpus(policy);
+ policy->driver_data = data;
/* Minimum transition latency is 12 platform clocks */
u64temp = 12ULL * NSEC_PER_SEC;
- do_div(u64temp, fsl_get_sys_freq());
+ do_div(u64temp, get_bus_freq());
policy->cpuinfo.transition_latency = u64temp + 1;
of_node_put(np);
@@ -221,7 +281,7 @@ err_nomem1:
err_node:
of_node_put(data->parent);
err_nomem2:
- per_cpu(cpu_data, cpu) = NULL;
+ policy->driver_data = NULL;
kfree(data);
err_np:
of_node_put(np);
@@ -229,43 +289,40 @@ err_np:
return -ENODEV;
}
-static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
- unsigned int cpu;
+ struct cpu_data *data = policy->driver_data;
of_node_put(data->parent);
kfree(data->table);
kfree(data);
-
- for_each_cpu(cpu, per_cpu(cpu_mask, policy->cpu))
- per_cpu(cpu_data, cpu) = NULL;
+ policy->driver_data = NULL;
return 0;
}
-static int corenet_cpufreq_target(struct cpufreq_policy *policy,
+static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct clk *parent;
- struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
+ struct cpu_data *data = policy->driver_data;
parent = of_clk_get(data->parent, data->table[index].driver_data);
return clk_set_parent(policy->clk, parent);
}
-static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
- .name = "ppc_cpufreq",
+static struct cpufreq_driver qoriq_cpufreq_driver = {
+ .name = "qoriq_cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
- .init = corenet_cpufreq_cpu_init,
- .exit = __exit_p(corenet_cpufreq_cpu_exit),
+ .init = qoriq_cpufreq_cpu_init,
+ .exit = __exit_p(qoriq_cpufreq_cpu_exit),
.verify = cpufreq_generic_frequency_table_verify,
- .target_index = corenet_cpufreq_target,
+ .target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
.attr = cpufreq_generic_attr,
};
-static const struct of_device_id node_matches[] __initdata = {
+static const struct of_device_id node_matches[] __initconst = {
{ .compatible = "fsl,p2041-clockgen", .data = &sdata[0], },
{ .compatible = "fsl,p3041-clockgen", .data = &sdata[0], },
{ .compatible = "fsl,p5020-clockgen", .data = &sdata[1], },
@@ -275,61 +332,43 @@ static const struct of_device_id node_matches[] __initdata = {
{}
};
-static int __init ppc_corenet_cpufreq_init(void)
+static int __init qoriq_cpufreq_init(void)
{
int ret;
struct device_node *np;
const struct of_device_id *match;
const struct soc_data *data;
- unsigned int cpu;
np = of_find_matching_node(NULL, node_matches);
if (!np)
return -ENODEV;
- for_each_possible_cpu(cpu) {
- if (!alloc_cpumask_var(&per_cpu(cpu_mask, cpu), GFP_KERNEL))
- goto err_mask;
- cpumask_copy(per_cpu(cpu_mask, cpu), cpu_core_mask(cpu));
- }
-
match = of_match_node(node_matches, np);
data = match->data;
if (data) {
if (data->flag)
fmask = data->freq_mask;
- min_cpufreq = fsl_get_sys_freq();
+ min_cpufreq = get_bus_freq();
} else {
- min_cpufreq = fsl_get_sys_freq() / 2;
+ min_cpufreq = get_bus_freq() / 2;
}
of_node_put(np);
- ret = cpufreq_register_driver(&ppc_corenet_cpufreq_driver);
+ ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
if (!ret)
- pr_info("Freescale PowerPC corenet CPU frequency scaling driver\n");
+ pr_info("Freescale QorIQ CPU frequency scaling driver\n");
return ret;
-
-err_mask:
- for_each_possible_cpu(cpu)
- free_cpumask_var(per_cpu(cpu_mask, cpu));
-
- return -ENOMEM;
}
-module_init(ppc_corenet_cpufreq_init);
+module_init(qoriq_cpufreq_init);
-static void __exit ppc_corenet_cpufreq_exit(void)
+static void __exit qoriq_cpufreq_exit(void)
{
- unsigned int cpu;
-
- for_each_possible_cpu(cpu)
- free_cpumask_var(per_cpu(cpu_mask, cpu));
-
- cpufreq_unregister_driver(&ppc_corenet_cpufreq_driver);
+ cpufreq_unregister_driver(&qoriq_cpufreq_driver);
}
-module_exit(ppc_corenet_cpufreq_exit);
+module_exit(qoriq_cpufreq_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
-MODULE_DESCRIPTION("cpufreq driver for Freescale e500mc series SoCs");
+MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs");
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index c5029c1209b4..8c7930b5a65f 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -29,15 +29,10 @@ config DT_IDLE_STATES
bool
menu "ARM CPU Idle Drivers"
-depends on ARM
+depends on ARM || ARM64
source "drivers/cpuidle/Kconfig.arm"
endmenu
-menu "ARM64 CPU Idle Drivers"
-depends on ARM64
-source "drivers/cpuidle/Kconfig.arm64"
-endmenu
-
menu "MIPS CPU Idle Drivers"
depends on MIPS
source "drivers/cpuidle/Kconfig.mips"
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 8e07c9419153..21340e0be73e 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -1,10 +1,20 @@
#
# ARM CPU Idle drivers
#
+config ARM_CPUIDLE
+ bool "Generic ARM/ARM64 CPU idle Driver"
+ select DT_IDLE_STATES
+ help
+ Select this to enable generic cpuidle driver for ARM.
+ It provides a generic idle driver whose idle states are configured
+ at run-time through DT nodes. The CPUidle suspend backend is
+ initialized by calling the CPU operations init idle hook
+ provided by architecture code.
+
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
- depends on MCPM
+ depends on MCPM && !ARM64
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
select DT_IDLE_STATES
@@ -16,51 +26,51 @@ config ARM_BIG_LITTLE_CPUIDLE
config ARM_CLPS711X_CPUIDLE
bool "CPU Idle Driver for CLPS711X processors"
- depends on ARCH_CLPS711X || COMPILE_TEST
+ depends on ARCH_CLPS711X && !ARM64 || COMPILE_TEST
help
Select this to enable cpuidle on Cirrus Logic CLPS711X SOCs.
config ARM_HIGHBANK_CPUIDLE
bool "CPU Idle Driver for Calxeda processors"
- depends on ARM_PSCI
+ depends on ARM_PSCI && !ARM64
select ARM_CPU_SUSPEND
help
Select this to enable cpuidle on Calxeda processors.
config ARM_KIRKWOOD_CPUIDLE
bool "CPU Idle Driver for Marvell Kirkwood SoCs"
- depends on MACH_KIRKWOOD
+ depends on MACH_KIRKWOOD && !ARM64
help
This adds the CPU Idle driver for Marvell Kirkwood SoCs.
config ARM_ZYNQ_CPUIDLE
bool "CPU Idle Driver for Xilinx Zynq processors"
- depends on ARCH_ZYNQ
+ depends on ARCH_ZYNQ && !ARM64
help
Select this to enable cpuidle on Xilinx Zynq processors.
config ARM_U8500_CPUIDLE
bool "Cpu Idle Driver for the ST-E u8500 processors"
- depends on ARCH_U8500
+ depends on ARCH_U8500 && !ARM64
help
Select this to enable cpuidle for ST-E u8500 processors
config ARM_AT91_CPUIDLE
bool "Cpu Idle Driver for the AT91 processors"
default y
- depends on ARCH_AT91
+ depends on ARCH_AT91 && !ARM64
help
Select this to enable cpuidle for AT91 processors
config ARM_EXYNOS_CPUIDLE
bool "Cpu Idle Driver for the Exynos processors"
- depends on ARCH_EXYNOS
+ depends on ARCH_EXYNOS && !ARM64
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
help
Select this to enable cpuidle for Exynos processors
config ARM_MVEBU_V7_CPUIDLE
bool "CPU Idle Driver for mvebu v7 family processors"
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
diff --git a/drivers/cpuidle/Kconfig.arm64 b/drivers/cpuidle/Kconfig.arm64
deleted file mode 100644
index 6effb3656735..000000000000
--- a/drivers/cpuidle/Kconfig.arm64
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# ARM64 CPU Idle drivers
-#
-
-config ARM64_CPUIDLE
- bool "Generic ARM64 CPU idle Driver"
- select DT_IDLE_STATES
- help
- Select this to enable generic cpuidle driver for ARM64.
- It provides a generic idle driver whose idle states are configured
- at run-time through DT nodes. The CPUidle suspend backend is
- initialized by calling the CPU operations init idle hook
- provided by architecture code.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 4d177b916f75..3ba81b1dffad 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -17,16 +17,13 @@ obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
+obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
###############################################################################
# MIPS drivers
obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o
###############################################################################
-# ARM64 drivers
-obj-$(CONFIG_ARM64_CPUIDLE) += cpuidle-arm64.o
-
-###############################################################################
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 73fe2f8d7f96..7936dce4b878 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -292,7 +292,7 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
*/
smp_rmb();
- for_each_cpu_mask(i, coupled->coupled_cpus)
+ for_each_cpu(i, &coupled->coupled_cpus)
if (cpu_online(i) && coupled->requested_state[i] < state)
state = coupled->requested_state[i];
@@ -338,7 +338,7 @@ static void cpuidle_coupled_poke_others(int this_cpu,
{
int cpu;
- for_each_cpu_mask(cpu, coupled->coupled_cpus)
+ for_each_cpu(cpu, &coupled->coupled_cpus)
if (cpu != this_cpu && cpu_online(cpu))
cpuidle_coupled_poke(cpu);
}
@@ -638,7 +638,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev)
if (cpumask_empty(&dev->coupled_cpus))
return 0;
- for_each_cpu_mask(cpu, dev->coupled_cpus) {
+ for_each_cpu(cpu, &dev->coupled_cpus) {
other_dev = per_cpu(cpuidle_devices, cpu);
if (other_dev && other_dev->coupled) {
coupled = other_dev->coupled;
diff --git a/drivers/cpuidle/cpuidle-arm64.c b/drivers/cpuidle/cpuidle-arm.c
index 39a2c62716c3..545069d5fdfb 100644
--- a/drivers/cpuidle/cpuidle-arm64.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -1,5 +1,5 @@
/*
- * ARM64 generic CPU idle driver.
+ * ARM/ARM64 generic CPU idle driver.
*
* Copyright (C) 2014 ARM Ltd.
* Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*/
-#define pr_fmt(fmt) "CPUidle arm64: " fmt
+#define pr_fmt(fmt) "CPUidle arm: " fmt
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
@@ -17,13 +17,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/slab.h>
#include <asm/cpuidle.h>
#include "dt_idle_states.h"
/*
- * arm64_enter_idle_state - Programs CPU to enter the specified state
+ * arm_enter_idle_state - Programs CPU to enter the specified state
*
* dev: cpuidle device
* drv: cpuidle driver
@@ -32,8 +33,8 @@
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
-static int arm64_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static int arm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
int ret;
@@ -49,7 +50,7 @@ static int arm64_enter_idle_state(struct cpuidle_device *dev,
* call the CPU ops suspend protocol with idle index as a
* parameter.
*/
- ret = cpu_suspend(idx);
+ arm_cpuidle_suspend(idx);
cpu_pm_exit();
}
@@ -57,8 +58,8 @@ static int arm64_enter_idle_state(struct cpuidle_device *dev,
return ret ? -1 : idx;
}
-static struct cpuidle_driver arm64_idle_driver = {
- .name = "arm64_idle",
+static struct cpuidle_driver arm_idle_driver = {
+ .name = "arm_idle",
.owner = THIS_MODULE,
/*
* State at index 0 is standby wfi and considered standard
@@ -68,32 +69,33 @@ static struct cpuidle_driver arm64_idle_driver = {
* handler for idle state index 0.
*/
.states[0] = {
- .enter = arm64_enter_idle_state,
+ .enter = arm_enter_idle_state,
.exit_latency = 1,
.target_residency = 1,
.power_usage = UINT_MAX,
.name = "WFI",
- .desc = "ARM64 WFI",
+ .desc = "ARM WFI",
}
};
-static const struct of_device_id arm64_idle_state_match[] __initconst = {
+static const struct of_device_id arm_idle_state_match[] __initconst = {
{ .compatible = "arm,idle-state",
- .data = arm64_enter_idle_state },
+ .data = arm_enter_idle_state },
{ },
};
/*
- * arm64_idle_init
+ * arm_idle_init
*
- * Registers the arm64 specific cpuidle driver with the cpuidle
+ * Registers the arm specific cpuidle driver with the cpuidle
* framework. It relies on core code to parse the idle states
* and initialize them using driver data structures accordingly.
*/
-static int __init arm64_idle_init(void)
+static int __init arm_idle_init(void)
{
int cpu, ret;
- struct cpuidle_driver *drv = &arm64_idle_driver;
+ struct cpuidle_driver *drv = &arm_idle_driver;
+ struct cpuidle_device *dev;
/*
* Initialize idle states data, starting at index 1.
@@ -101,22 +103,61 @@ static int __init arm64_idle_init(void)
* let the driver initialization fail accordingly since there is no
* reason to initialize the idle driver if only wfi is supported.
*/
- ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1);
+ ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
if (ret <= 0)
return ret ? : -ENODEV;
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver\n");
+ return ret;
+ }
+
/*
* Call arch CPU operations in order to initialize
* idle states suspend back-end specific data
*/
for_each_possible_cpu(cpu) {
- ret = cpu_init_idle(cpu);
+ ret = arm_cpuidle_init(cpu);
+
+ /*
+ * Skip the cpuidle device initialization if the reported
+ * failure is a HW misconfiguration/breakage (-ENXIO).
+ */
+ if (ret == -ENXIO)
+ continue;
+
if (ret) {
pr_err("CPU %d failed to init idle CPU ops\n", cpu);
- return ret;
+ goto out_fail;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ pr_err("Failed to allocate cpuidle device\n");
+ goto out_fail;
+ }
+ dev->cpu = cpu;
+
+ ret = cpuidle_register_device(dev);
+ if (ret) {
+ pr_err("Failed to register cpuidle device for CPU %d\n",
+ cpu);
+ kfree(dev);
+ goto out_fail;
}
}
- return cpuidle_register(drv, NULL);
+ return 0;
+out_fail:
+ while (--cpu >= 0) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ cpuidle_unregister_device(dev);
+ kfree(dev);
+ }
+
+ cpuidle_unregister_driver(drv);
+
+ return ret;
}
-device_initcall(arm64_idle_init);
+device_initcall(arm_idle_init);
diff --git a/drivers/cpuidle/cpuidle-at91.c b/drivers/cpuidle/cpuidle-at91.c
index aae7bfc1ea36..f2446c78d87c 100644
--- a/drivers/cpuidle/cpuidle-at91.c
+++ b/drivers/cpuidle/cpuidle-at91.c
@@ -19,7 +19,6 @@
#include <linux/cpuidle.h>
#include <linux/io.h>
#include <linux/export.h>
-#include <asm/proc-fns.h>
#include <asm/cpuidle.h>
#define AT91_MAX_STATES 2
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index 26f5f29fdb03..b5f0a9cc8185 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -19,7 +19,6 @@
#include <linux/of.h>
#include <linux/platform_data/cpuidle-exynos.h>
-#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include <asm/cpuidle.h>
@@ -117,7 +116,8 @@ static int exynos_cpuidle_probe(struct platform_device *pdev)
{
int ret;
- if (of_machine_is_compatible("samsung,exynos4210")) {
+ if (IS_ENABLED(CONFIG_SMP) &&
+ of_machine_is_compatible("samsung,exynos4210")) {
exynos_cpuidle_pdata = pdev->dev.platform_data;
ret = cpuidle_register(&exynos_coupled_idle_driver,
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index cea0a6c4b1db..d23d8f468c12 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -21,7 +21,6 @@
#include <linux/cpuidle.h>
#include <linux/io.h>
#include <linux/export.h>
-#include <asm/proc-fns.h>
#include <asm/cpuidle.h>
#define KIRKWOOD_MAX_STATES 2
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
index 66f81e410f0d..8bf895c0017d 100644
--- a/drivers/cpuidle/cpuidle-ux500.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
static atomic_t master = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(master_lock);
diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c
index 002b8c9f98f5..543292b1d38e 100644
--- a/drivers/cpuidle/cpuidle-zynq.c
+++ b/drivers/cpuidle/cpuidle-zynq.c
@@ -28,7 +28,6 @@
#include <linux/init.h>
#include <linux/cpuidle.h>
#include <linux/platform_device.h>
-#include <asm/proc-fns.h>
#include <asm/cpuidle.h>
#define ZYNQ_MAX_STATES 2
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7a73a279e179..61c417b9e53f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -158,9 +158,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int entered_state;
struct cpuidle_state *target_state = &drv->states[index];
+ bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
s64 diff;
+ /*
+ * Tell the time framework to switch to a broadcast timer because our
+ * local timer will be shut down. If a local timer is used from another
+ * CPU as a broadcast timer, this call may fail if it is not available.
+ */
+ if (broadcast && tick_broadcast_enter())
+ return -EBUSY;
+
trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ktime_get();
@@ -169,6 +178,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_end = ktime_get();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
+ if (broadcast) {
+ if (WARN_ON_ONCE(!irqs_disabled()))
+ local_irq_disable();
+
+ tick_broadcast_exit();
+ }
+
if (!cpuidle_state_is_coupled(dev, drv, entered_state))
local_irq_enable();
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 40580794e23d..b8a5fa15ca24 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -190,12 +190,6 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
-/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
-static u64 div_round64(u64 dividend, u32 divisor)
-{
- return div_u64(dividend + (divisor / 2), divisor);
-}
-
/*
* Try detecting repeating patterns by keeping track of the last 8
* intervals, and checking if the standard deviation of that set
@@ -317,7 +311,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* operands are 32 bits.
* Make sure to round up for half microseconds.
*/
- data->predicted_us = div_round64((uint64_t)data->next_timer_us *
+ data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
data->correction_factor[data->bucket],
RESOLUTION * DECAY);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 2fb0fdfc87df..033c0c86f6ec 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor"
- depends on (X86 && PCI) || ARM64
+ depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
default n
help
The AMD Cryptographic Coprocessor provides hardware support
@@ -436,4 +436,27 @@ config CRYPTO_DEV_QCE
hardware. To compile this driver as a module, choose M here. The
module will be called qcrypto.
+config CRYPTO_DEV_VMX
+ bool "Support for VMX cryptographic acceleration instructions"
+ depends on PPC64
+ default n
+ help
+ Support for VMX cryptographic acceleration instructions.
+
+source "drivers/crypto/vmx/Kconfig"
+
+config CRYPTO_DEV_IMGTEC_HASH
+ tristate "Imagination Technologies hardware hash accelerator"
+ depends on MIPS || COMPILE_TEST
+ depends on HAS_DMA
+ select CRYPTO_ALGAPI
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
+ help
+ This driver interfaces with the Imagination Technologies
+ hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
+ hashing algorithms.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 3924f93d5774..fb84be7e6be5 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
@@ -25,3 +26,4 @@ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index d02b77150070..3b28e8c3de28 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1155,7 +1155,7 @@ struct crypto4xx_alg_common crypto4xx_alg[] = {
/**
* Module Initialization Routine
*/
-static int __init crypto4xx_probe(struct platform_device *ofdev)
+static int crypto4xx_probe(struct platform_device *ofdev)
{
int rc;
struct resource res;
@@ -1263,7 +1263,7 @@ err_alloc_dev:
return rc;
}
-static int __exit crypto4xx_remove(struct platform_device *ofdev)
+static int crypto4xx_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
@@ -1291,7 +1291,7 @@ static struct platform_driver crypto4xx_driver = {
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
- .remove = __exit_p(crypto4xx_remove),
+ .remove = crypto4xx_remove,
};
module_platform_driver(crypto4xx_driver);
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 6597aac9905d..0f9a9dc06a83 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -315,10 +315,10 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
dd->dma_size = length;
- if (!(dd->flags & AES_FLAGS_FAST)) {
- dma_sync_single_for_device(dd->dev, dma_addr_in, length,
- DMA_TO_DEVICE);
- }
+ dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(dd->dev, dma_addr_out, length,
+ DMA_FROM_DEVICE);
if (dd->flags & AES_FLAGS_CFB8) {
dd->dma_lch_in.dma_conf.dst_addr_width =
@@ -391,6 +391,11 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
{
dd->flags &= ~AES_FLAGS_DMA;
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
+ dd->dma_size, DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
+
/* use cache buffers */
dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
if (!dd->nb_in_sg)
@@ -459,6 +464,9 @@ static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
dd->flags |= AES_FLAGS_FAST;
} else {
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
+ dd->dma_size, DMA_TO_DEVICE);
+
/* use cache buffers */
count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
dd->buf_in, dd->buflen, dd->total, 0);
@@ -619,7 +627,7 @@ static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
- dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
@@ -1246,6 +1254,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x200:
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_cfb64 = 1;
+ dd->caps.max_burst_size = 4;
+ break;
case 0x130:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
@@ -1336,6 +1349,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, aes_dd);
INIT_LIST_HEAD(&aes_dd->list);
+ spin_lock_init(&aes_dd->lock);
tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
(unsigned long)aes_dd);
@@ -1374,7 +1388,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
/* Initializing the clock */
aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
if (IS_ERR(aes_dd->iclk)) {
- dev_err(dev, "clock intialization failed.\n");
+ dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
goto clk_err;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 34db04addc18..5b35433c5399 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -163,8 +163,20 @@ static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
count = min(ctx->sg->length - ctx->offset, ctx->total);
count = min(count, ctx->buflen - ctx->bufcnt);
- if (count <= 0)
- break;
+ if (count <= 0) {
+ /*
+ * Check if count <= 0 because the buffer is full or
+ * because the sg length is 0. In the latest case,
+ * check if there is another sg in the list, a 0 length
+ * sg doesn't necessarily mean the end of the sg list.
+ */
+ if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
+ ctx->sg = sg_next(ctx->sg);
+ continue;
+ } else {
+ break;
+ }
+ }
scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
ctx->offset, count, 0);
@@ -420,14 +432,8 @@ static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
ctx->digcnt[1], ctx->digcnt[0], length1, final);
- if (ctx->flags & (SHA_FLAGS_SHA1 | SHA_FLAGS_SHA224 |
- SHA_FLAGS_SHA256)) {
- dd->dma_lch_in.dma_conf.src_maxburst = 16;
- dd->dma_lch_in.dma_conf.dst_maxburst = 16;
- } else {
- dd->dma_lch_in.dma_conf.src_maxburst = 32;
- dd->dma_lch_in.dma_conf.dst_maxburst = 32;
- }
+ dd->dma_lch_in.dma_conf.src_maxburst = 16;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 16;
dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
@@ -529,7 +535,7 @@ static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
if (final)
atmel_sha_fill_padding(ctx, 0);
- if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+ if (final || (ctx->bufcnt == ctx->buflen)) {
count = ctx->bufcnt;
ctx->bufcnt = 0;
return atmel_sha_xmit_dma_map(dd, ctx, count, final);
@@ -1266,6 +1272,12 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x420:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ dd->caps.has_sha_384_512 = 1;
+ break;
case 0x410:
dd->caps.has_dma = 1;
dd->caps.has_dualbuff = 1;
@@ -1349,6 +1361,7 @@ static int atmel_sha_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sha_dd);
INIT_LIST_HEAD(&sha_dd->list);
+ spin_lock_init(&sha_dd->lock);
tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
(unsigned long)sha_dd);
@@ -1385,7 +1398,7 @@ static int atmel_sha_probe(struct platform_device *pdev)
/* Initializing the clock */
sha_dd->iclk = clk_get(&pdev->dev, "sha_clk");
if (IS_ERR(sha_dd->iclk)) {
- dev_err(dev, "clock intialization failed.\n");
+ dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
goto clk_err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 258772d9b22f..ca2999709eb4 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1370,6 +1370,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdes_dd);
INIT_LIST_HEAD(&tdes_dd->list);
+ spin_lock_init(&tdes_dd->lock);
tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
(unsigned long)tdes_dd);
@@ -1408,7 +1409,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
/* Initializing the clock */
tdes_dd->iclk = clk_get(&pdev->dev, "tdes_clk");
if (IS_ERR(tdes_dd->iclk)) {
- dev_err(dev, "clock intialization failed.\n");
+ dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(tdes_dd->iclk);
goto clk_err;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index f347ab7eea95..ba0532efd3ae 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1172,6 +1172,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
return -ENOMEM;
}
+ edesc->sec4_sg_bytes = 0;
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index ae31e555793c..26a544b505f1 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -52,7 +52,7 @@
/* length of descriptors */
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
-#define DESC_RNG_LEN (10 * CAAM_CMD_SZ)
+#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
struct buf_data {
@@ -90,8 +90,8 @@ static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
struct device *jrdev = ctx->jrdev;
if (ctx->sh_desc_dma)
- dma_unmap_single(jrdev, ctx->sh_desc_dma, DESC_RNG_LEN,
- DMA_TO_DEVICE);
+ dma_unmap_single(jrdev, ctx->sh_desc_dma,
+ desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
rng_unmap_buf(jrdev, &ctx->bufs[0]);
rng_unmap_buf(jrdev, &ctx->bufs[1]);
}
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 7f592d8d07bb..55a1f3951578 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,11 +1,6 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o ccp-ops.o
-ifdef CONFIG_X86
-ccp-objs += ccp-pci.o
-endif
-ifdef CONFIG_ARM64
-ccp-objs += ccp-platform.o
-endif
+ccp-objs := ccp-dev.o ccp-ops.o ccp-platform.o
+ccp-$(CONFIG_PCI) += ccp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 8e162ad82085..ea7e8446956a 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -23,7 +23,6 @@
#include "ccp-crypto.h"
-
static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
int ret)
{
@@ -38,11 +37,13 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
if (rctx->hash_rem) {
/* Save remaining data to buffer */
unsigned int offset = rctx->nbytes - rctx->hash_rem;
+
scatterwalk_map_and_copy(rctx->buf, rctx->src,
offset, rctx->hash_rem, 0);
rctx->buf_count = rctx->hash_rem;
- } else
+ } else {
rctx->buf_count = 0;
+ }
/* Update result area if supplied */
if (req->result)
@@ -202,7 +203,7 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
}
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int key_len)
+ unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct ccp_crypto_ahash_alg *alg =
@@ -292,7 +293,8 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
cipher_tfm = crypto_alloc_cipher("aes", 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(cipher_tfm)) {
pr_warn("could not load aes cipher driver\n");
return PTR_ERR(cipher_tfm);
@@ -354,7 +356,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
ret = crypto_register_ahash(alg);
if (ret) {
pr_err("%s ahash algorithm registration error (%d)\n",
- base->cra_name, ret);
+ base->cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 0cc5594b7de3..52c7395cb8d8 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -21,7 +21,6 @@
#include "ccp-crypto.h"
-
struct ccp_aes_xts_def {
const char *name;
const char *drv_name;
@@ -216,7 +215,6 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
ctx->u.aes.tfm_ablkcipher = NULL;
}
-
static int ccp_register_aes_xts_alg(struct list_head *head,
const struct ccp_aes_xts_def *def)
{
@@ -255,7 +253,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
ret = crypto_register_alg(alg);
if (ret) {
pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ alg->cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index e46490db0f63..7984f910884d 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -22,7 +22,6 @@
#include "ccp-crypto.h"
-
static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
{
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
@@ -345,7 +344,7 @@ static int ccp_register_aes_alg(struct list_head *head,
ret = crypto_register_alg(alg);
if (ret) {
pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ alg->cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 4d4e016d755b..bdec01ec608f 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -33,7 +33,6 @@ static unsigned int sha_disable;
module_param(sha_disable, uint, 0444);
MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
-
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
static LIST_HEAD(cipher_algs);
@@ -48,6 +47,7 @@ struct ccp_crypto_queue {
struct list_head *backlog;
unsigned int cmd_count;
};
+
#define CCP_CRYPTO_MAX_QLEN 100
static struct ccp_crypto_queue req_queue;
@@ -77,7 +77,6 @@ struct ccp_crypto_cpu {
int err;
};
-
static inline bool ccp_crypto_success(int err)
{
if (err && (err != -EINPROGRESS) && (err != -EBUSY))
@@ -143,7 +142,7 @@ static void ccp_crypto_complete(void *data, int err)
int ret;
if (err == -EINPROGRESS) {
- /* Only propogate the -EINPROGRESS if necessary */
+ /* Only propagate the -EINPROGRESS if necessary */
if (crypto_cmd->ret == -EBUSY) {
crypto_cmd->ret = -EINPROGRESS;
req->complete(req, -EINPROGRESS);
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 96531571f7cf..507b34e0cc19 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -23,7 +23,6 @@
#include "ccp-crypto.h"
-
static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
{
struct ahash_request *req = ahash_request_cast(async_req);
@@ -37,11 +36,13 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
if (rctx->hash_rem) {
/* Save remaining data to buffer */
unsigned int offset = rctx->nbytes - rctx->hash_rem;
+
scatterwalk_map_and_copy(rctx->buf, rctx->src,
offset, rctx->hash_rem, 0);
rctx->buf_count = rctx->hash_rem;
- } else
+ } else {
rctx->buf_count = 0;
+ }
/* Update result area if supplied */
if (req->result)
@@ -227,8 +228,9 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
}
key_len = digest_size;
- } else
+ } else {
memcpy(ctx->u.sha.key, key, key_len);
+ }
for (i = 0; i < block_size; i++) {
ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36;
@@ -355,7 +357,7 @@ static int ccp_register_hmac_alg(struct list_head *head,
ret = crypto_register_ahash(alg);
if (ret) {
pr_err("%s ahash algorithm registration error (%d)\n",
- base->cra_name, ret);
+ base->cra_name, ret);
kfree(ccp_alg);
return ret;
}
@@ -410,7 +412,7 @@ static int ccp_register_sha_alg(struct list_head *head,
ret = crypto_register_ahash(alg);
if (ret) {
pr_err("%s ahash algorithm registration error (%d)\n",
- base->cra_name, ret);
+ base->cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 9aa4ae184f7f..76a96f0f44c6 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -13,7 +13,6 @@
#ifndef __CCP_CRYPTO_H__
#define __CCP_CRYPTO_H__
-
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/pci.h>
@@ -25,7 +24,6 @@
#include <crypto/hash.h>
#include <crypto/sha.h>
-
#define CCP_CRA_PRIORITY 300
struct ccp_crypto_ablkcipher_alg {
@@ -68,7 +66,6 @@ static inline struct ccp_crypto_ahash_alg *
return container_of(ahash_alg, struct ccp_crypto_ahash_alg, alg);
}
-
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index ca29c120b85f..861bacc1bb94 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -37,7 +37,6 @@ struct ccp_tasklet_data {
struct ccp_cmd *cmd;
};
-
static struct ccp_device *ccp_dev;
static inline struct ccp_device *ccp_get_device(void)
{
@@ -296,11 +295,9 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
{
struct ccp_device *ccp;
- ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
- if (ccp == NULL) {
- dev_err(dev, "unable to allocate device struct\n");
+ ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
+ if (!ccp)
return NULL;
- }
ccp->dev = dev;
INIT_LIST_HEAD(&ccp->cmd);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 62ff35a6b9ec..6ff89031fb96 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -21,7 +21,7 @@
#include <linux/wait.h>
#include <linux/dmapool.h>
#include <linux/hw_random.h>
-
+#include <linux/bitops.h>
#define MAX_DMAPOOL_NAME_LEN 32
@@ -33,7 +33,6 @@
#define CACHE_NONE 0x00
#define CACHE_WB_NO_ALLOC 0xb7
-
/****** Register Mappings ******/
#define Q_MASK_REG 0x000
#define TRNG_OUT_REG 0x00c
@@ -54,8 +53,8 @@
#define CMD_Q_CACHE_BASE 0x228
#define CMD_Q_CACHE_INC 0x20
-#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f);
-#define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f);
+#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f)
+#define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f)
/****** REQ0 Related Values ******/
#define REQ0_WAIT_FOR_WRITE 0x00000004
@@ -103,7 +102,6 @@
/****** REQ6 Related Values ******/
#define REQ6_MEMTYPE_SHIFT 16
-
/****** Key Storage Block ******/
#define KSB_START 77
#define KSB_END 127
@@ -114,7 +112,7 @@
#define CCP_JOBID_MASK 0x0000003f
#define CCP_DMAPOOL_MAX_SIZE 64
-#define CCP_DMAPOOL_ALIGN (1 << 5)
+#define CCP_DMAPOOL_ALIGN BIT(5)
#define CCP_REVERSE_BUF_SIZE 64
@@ -142,7 +140,6 @@
#define CCP_ECC_RESULT_OFFSET 60
#define CCP_ECC_RESULT_SUCCESS 0x0001
-
struct ccp_device;
struct ccp_cmd;
@@ -261,7 +258,6 @@ struct ccp_device {
unsigned int axcache;
};
-
int ccp_pci_init(void);
void ccp_pci_exit(void);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 8729364261d7..71f2e3c89424 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -27,7 +27,6 @@
#include "ccp-dev.h"
-
enum ccp_memtype {
CCP_MEMTYPE_SYSTEM = 0,
CCP_MEMTYPE_KSB,
@@ -515,7 +514,6 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
if (!wa->dma_count)
return -ENOMEM;
-
return 0;
}
@@ -763,8 +761,9 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
op_len = min(sg_src_len, sg_dst_len);
- } else
+ } else {
op_len = sg_src_len;
+ }
/* The data operation length will be at least block_size in length
* or the smaller of available sg room remaining for the source or
@@ -1131,9 +1130,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (ret)
goto e_ctx;
- if (in_place)
+ if (in_place) {
dst = src;
- else {
+ } else {
ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
AES_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
@@ -1304,9 +1303,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
if (ret)
goto e_ctx;
- if (in_place)
+ if (in_place) {
dst = src;
- else {
+ } else {
ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
unit_size, DMA_FROM_DEVICE);
if (ret)
@@ -1451,8 +1450,9 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_ctx;
}
memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
- } else
+ } else {
ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+ }
ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -1732,9 +1732,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
if (ret)
goto e_mask;
- if (in_place)
+ if (in_place) {
dst = src;
- else {
+ } else {
ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
if (ret)
@@ -1974,7 +1974,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
src.address += CCP_ECC_OPERAND_SIZE;
/* Set the first point Z coordianate to 1 */
- *(src.address) = 0x01;
+ *src.address = 0x01;
src.address += CCP_ECC_OPERAND_SIZE;
if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
@@ -1989,7 +1989,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
src.address += CCP_ECC_OPERAND_SIZE;
/* Set the second point Z coordianate to 1 */
- *(src.address) = 0x01;
+ *src.address = 0x01;
src.address += CCP_ECC_OPERAND_SIZE;
} else {
/* Copy the Domain "a" parameter */
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 7f89c946adfe..af190d4795a8 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -174,11 +174,10 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ccp)
goto e_err;
- ccp_pci = kzalloc(sizeof(*ccp_pci), GFP_KERNEL);
- if (!ccp_pci) {
- ret = -ENOMEM;
- goto e_free1;
- }
+ ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
+ if (!ccp_pci)
+ goto e_err;
+
ccp->dev_specific = ccp_pci;
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
@@ -186,7 +185,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = pci_request_regions(pdev, "ccp");
if (ret) {
dev_err(dev, "pci_request_regions failed (%d)\n", ret);
- goto e_free2;
+ goto e_err;
}
ret = pci_enable_device(pdev);
@@ -204,7 +203,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = -EIO;
ccp->io_map = pci_iomap(pdev, bar, 0);
- if (ccp->io_map == NULL) {
+ if (!ccp->io_map) {
dev_err(dev, "pci_iomap failed\n");
goto e_device;
}
@@ -239,12 +238,6 @@ e_device:
e_regions:
pci_release_regions(pdev);
-e_free2:
- kfree(ccp_pci);
-
-e_free1:
- kfree(ccp);
-
e_err:
dev_notice(dev, "initialization failed\n");
return ret;
@@ -266,8 +259,6 @@ static void ccp_pci_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
- kfree(ccp);
-
dev_notice(dev, "disabled\n");
}
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index 8c50bad25f7e..b1c20b2b5647 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -23,9 +23,15 @@
#include <linux/delay.h>
#include <linux/ccp.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/acpi.h>
#include "ccp-dev.h"
+struct ccp_platform {
+ int use_acpi;
+ int coherent;
+};
static int ccp_get_irq(struct ccp_device *ccp)
{
@@ -84,10 +90,64 @@ static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
return NULL;
}
+#ifdef CONFIG_ACPI
+static int ccp_acpi_support(struct ccp_device *ccp)
+{
+ struct ccp_platform *ccp_platform = ccp->dev_specific;
+ struct acpi_device *adev = ACPI_COMPANION(ccp->dev);
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long data;
+ int cca;
+
+ /* Retrieve the device cache coherency value */
+ handle = adev->handle;
+ do {
+ status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
+ if (!ACPI_FAILURE(status)) {
+ cca = data;
+ break;
+ }
+ } while (!ACPI_FAILURE(status));
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(ccp->dev, "error obtaining acpi coherency value\n");
+ return -EINVAL;
+ }
+
+ ccp_platform->coherent = !!cca;
+
+ return 0;
+}
+#else /* CONFIG_ACPI */
+static int ccp_acpi_support(struct ccp_device *ccp)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_OF
+static int ccp_of_support(struct ccp_device *ccp)
+{
+ struct ccp_platform *ccp_platform = ccp->dev_specific;
+
+ ccp_platform->coherent = of_dma_is_coherent(ccp->dev->of_node);
+
+ return 0;
+}
+#else
+static int ccp_of_support(struct ccp_device *ccp)
+{
+ return -EINVAL;
+}
+#endif
+
static int ccp_platform_probe(struct platform_device *pdev)
{
struct ccp_device *ccp;
+ struct ccp_platform *ccp_platform;
struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct resource *ior;
int ret;
@@ -96,24 +156,40 @@ static int ccp_platform_probe(struct platform_device *pdev)
if (!ccp)
goto e_err;
- ccp->dev_specific = NULL;
+ ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL);
+ if (!ccp_platform)
+ goto e_err;
+
+ ccp->dev_specific = ccp_platform;
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
+ ccp_platform->use_acpi = (!adev || acpi_disabled) ? 0 : 1;
+
ior = ccp_find_mmio_area(ccp);
ccp->io_map = devm_ioremap_resource(dev, ior);
if (IS_ERR(ccp->io_map)) {
ret = PTR_ERR(ccp->io_map);
- goto e_free;
+ goto e_err;
}
ccp->io_regs = ccp->io_map;
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
- *(dev->dma_mask) = DMA_BIT_MASK(48);
- dev->coherent_dma_mask = DMA_BIT_MASK(48);
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ if (ccp_platform->use_acpi)
+ ret = ccp_acpi_support(ccp);
+ else
+ ret = ccp_of_support(ccp);
+ if (ret)
+ goto e_err;
- if (of_property_read_bool(dev->of_node, "dma-coherent"))
+ if (ccp_platform->coherent)
ccp->axcache = CACHE_WB_NO_ALLOC;
else
ccp->axcache = CACHE_NONE;
@@ -122,15 +198,12 @@ static int ccp_platform_probe(struct platform_device *pdev)
ret = ccp_init(ccp);
if (ret)
- goto e_free;
+ goto e_err;
dev_notice(dev, "enabled\n");
return 0;
-e_free:
- kfree(ccp);
-
e_err:
dev_notice(dev, "initialization failed\n");
return ret;
@@ -143,8 +216,6 @@ static int ccp_platform_remove(struct platform_device *pdev)
ccp_destroy(ccp);
- kfree(ccp);
-
dev_notice(dev, "disabled\n");
return 0;
@@ -200,15 +271,29 @@ static int ccp_platform_resume(struct platform_device *pdev)
}
#endif
-static const struct of_device_id ccp_platform_ids[] = {
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ccp_acpi_match[] = {
+ { "AMDI0C00", 0 },
+ { },
+};
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id ccp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a" },
{ },
};
+#endif
static struct platform_driver ccp_platform_driver = {
.driver = {
.name = "AMD Cryptographic Coprocessor",
- .of_match_table = ccp_platform_ids,
+#ifdef CONFIG_ACPI
+ .acpi_match_table = ccp_acpi_match,
+#endif
+#ifdef CONFIG_OF
+ .of_match_table = ccp_of_match,
+#endif
},
.probe = ccp_platform_probe,
.remove = ccp_platform_remove,
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
new file mode 100644
index 000000000000..ad47d0d61098
--- /dev/null
+++ b/drivers/crypto/img-hash.c
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (c) 2014 Imagination Technologies
+ * Authors: Will Thomas, James Hartley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Interface structure taken from omap-sham driver
+ */
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+
+#define CR_RESET 0
+#define CR_RESET_SET 1
+#define CR_RESET_UNSET 0
+
+#define CR_MESSAGE_LENGTH_H 0x4
+#define CR_MESSAGE_LENGTH_L 0x8
+
+#define CR_CONTROL 0xc
+#define CR_CONTROL_BYTE_ORDER_3210 0
+#define CR_CONTROL_BYTE_ORDER_0123 1
+#define CR_CONTROL_BYTE_ORDER_2310 2
+#define CR_CONTROL_BYTE_ORDER_1032 3
+#define CR_CONTROL_BYTE_ORDER_SHIFT 8
+#define CR_CONTROL_ALGO_MD5 0
+#define CR_CONTROL_ALGO_SHA1 1
+#define CR_CONTROL_ALGO_SHA224 2
+#define CR_CONTROL_ALGO_SHA256 3
+
+#define CR_INTSTAT 0x10
+#define CR_INTENAB 0x14
+#define CR_INTCLEAR 0x18
+#define CR_INT_RESULTS_AVAILABLE BIT(0)
+#define CR_INT_NEW_RESULTS_SET BIT(1)
+#define CR_INT_RESULT_READ_ERR BIT(2)
+#define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
+#define CR_INT_STATUS BIT(8)
+
+#define CR_RESULT_QUEUE 0x1c
+#define CR_RSD0 0x40
+#define CR_CORE_REV 0x50
+#define CR_CORE_DES1 0x60
+#define CR_CORE_DES2 0x70
+
+#define DRIVER_FLAGS_BUSY BIT(0)
+#define DRIVER_FLAGS_FINAL BIT(1)
+#define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
+#define DRIVER_FLAGS_OUTPUT_READY BIT(3)
+#define DRIVER_FLAGS_INIT BIT(4)
+#define DRIVER_FLAGS_CPU BIT(5)
+#define DRIVER_FLAGS_DMA_READY BIT(6)
+#define DRIVER_FLAGS_ERROR BIT(7)
+#define DRIVER_FLAGS_SG BIT(8)
+#define DRIVER_FLAGS_SHA1 BIT(18)
+#define DRIVER_FLAGS_SHA224 BIT(19)
+#define DRIVER_FLAGS_SHA256 BIT(20)
+#define DRIVER_FLAGS_MD5 BIT(21)
+
+#define IMG_HASH_QUEUE_LENGTH 20
+#define IMG_HASH_DMA_THRESHOLD 64
+
+#ifdef __LITTLE_ENDIAN
+#define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
+#else
+#define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
+#endif
+
+struct img_hash_dev;
+
+struct img_hash_request_ctx {
+ struct img_hash_dev *hdev;
+ u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+ unsigned long flags;
+ size_t digsize;
+
+ dma_addr_t dma_addr;
+ size_t dma_ct;
+
+ /* sg root */
+ struct scatterlist *sgfirst;
+ /* walk state */
+ struct scatterlist *sg;
+ size_t nents;
+ size_t offset;
+ unsigned int total;
+ size_t sent;
+
+ unsigned long op;
+
+ size_t bufcnt;
+ u8 buffer[0] __aligned(sizeof(u32));
+ struct ahash_request fallback_req;
+};
+
+struct img_hash_ctx {
+ struct img_hash_dev *hdev;
+ unsigned long flags;
+ struct crypto_ahash *fallback;
+};
+
+struct img_hash_dev {
+ struct list_head list;
+ struct device *dev;
+ struct clk *hash_clk;
+ struct clk *sys_clk;
+ void __iomem *io_base;
+
+ phys_addr_t bus_addr;
+ void __iomem *cpu_addr;
+
+ spinlock_t lock;
+ int err;
+ struct tasklet_struct done_task;
+ struct tasklet_struct dma_task;
+
+ unsigned long flags;
+ struct crypto_queue queue;
+ struct ahash_request *req;
+
+ struct dma_chan *dma_lch;
+};
+
+struct img_hash_drv {
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+static struct img_hash_drv img_hash = {
+ .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
+};
+
+static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
+{
+ return readl_relaxed(hdev->io_base + offset);
+}
+
+static inline void img_hash_write(struct img_hash_dev *hdev,
+ u32 offset, u32 value)
+{
+ writel_relaxed(value, hdev->io_base + offset);
+}
+
+static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
+{
+ return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
+}
+
+static void img_hash_start(struct img_hash_dev *hdev, bool dma)
+{
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+ u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
+
+ if (ctx->flags & DRIVER_FLAGS_MD5)
+ cr |= CR_CONTROL_ALGO_MD5;
+ else if (ctx->flags & DRIVER_FLAGS_SHA1)
+ cr |= CR_CONTROL_ALGO_SHA1;
+ else if (ctx->flags & DRIVER_FLAGS_SHA224)
+ cr |= CR_CONTROL_ALGO_SHA224;
+ else if (ctx->flags & DRIVER_FLAGS_SHA256)
+ cr |= CR_CONTROL_ALGO_SHA256;
+ dev_dbg(hdev->dev, "Starting hash process\n");
+ img_hash_write(hdev, CR_CONTROL, cr);
+
+ /*
+ * The hardware block requires two cycles between writing the control
+ * register and writing the first word of data in non DMA mode, to
+ * ensure the first data write is not grouped in burst with the control
+ * register write a read is issued to 'flush' the bus.
+ */
+ if (!dma)
+ img_hash_read(hdev, CR_CONTROL);
+}
+
+static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
+ size_t length, int final)
+{
+ u32 count, len32;
+ const u32 *buffer = (const u32 *)buf;
+
+ dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
+
+ if (final)
+ hdev->flags |= DRIVER_FLAGS_FINAL;
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ for (count = 0; count < len32; count++)
+ writel_relaxed(buffer[count], hdev->cpu_addr);
+
+ return -EINPROGRESS;
+}
+
+static void img_hash_dma_callback(void *data)
+{
+ struct img_hash_dev *hdev = (struct img_hash_dev *)data;
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+ if (ctx->bufcnt) {
+ img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
+ ctx->bufcnt = 0;
+ }
+ if (ctx->sg)
+ tasklet_schedule(&hdev->dma_task);
+}
+
+static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+ ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
+ if (ctx->dma_ct == 0) {
+ dev_err(hdev->dev, "Invalid DMA sg\n");
+ hdev->err = -EINVAL;
+ return -EINVAL;
+ }
+
+ desc = dmaengine_prep_slave_sg(hdev->dma_lch,
+ sg,
+ ctx->dma_ct,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(hdev->dev, "Null DMA descriptor\n");
+ hdev->err = -EINVAL;
+ dma_unmap_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
+ return -EINVAL;
+ }
+ desc->callback = img_hash_dma_callback;
+ desc->callback_param = hdev;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(hdev->dma_lch);
+
+ return 0;
+}
+
+static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
+{
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+ ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
+ ctx->buffer, hdev->req->nbytes);
+
+ ctx->total = hdev->req->nbytes;
+ ctx->bufcnt = 0;
+
+ hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
+
+ img_hash_start(hdev, false);
+
+ return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
+}
+
+static int img_hash_finish(struct ahash_request *req)
+{
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return -EINVAL;
+
+ memcpy(req->result, ctx->digest, ctx->digsize);
+
+ return 0;
+}
+
+static void img_hash_copy_hash(struct ahash_request *req)
+{
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)ctx->digest;
+ int i;
+
+ for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
+ hash[i] = img_hash_read_result_queue(ctx->hdev);
+}
+
+static void img_hash_finish_req(struct ahash_request *req, int err)
+{
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+ struct img_hash_dev *hdev = ctx->hdev;
+
+ if (!err) {
+ img_hash_copy_hash(req);
+ if (DRIVER_FLAGS_FINAL & hdev->flags)
+ err = img_hash_finish(req);
+ } else {
+ dev_warn(hdev->dev, "Hash failed with error %d\n", err);
+ ctx->flags |= DRIVER_FLAGS_ERROR;
+ }
+
+ hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
+ DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
+
+ if (req->base.complete)
+ req->base.complete(&req->base, err);
+}
+
+static int img_hash_write_via_dma(struct img_hash_dev *hdev)
+{
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+ img_hash_start(hdev, true);
+
+ dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
+
+ if (!ctx->total)
+ hdev->flags |= DRIVER_FLAGS_FINAL;
+
+ hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
+
+ tasklet_schedule(&hdev->dma_task);
+
+ return -EINPROGRESS;
+}
+
+static int img_hash_dma_init(struct img_hash_dev *hdev)
+{
+ struct dma_slave_config dma_conf;
+ int err = -EINVAL;
+
+ hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
+ if (!hdev->dma_lch) {
+ dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n");
+ return -EBUSY;
+ }
+ dma_conf.direction = DMA_MEM_TO_DEV;
+ dma_conf.dst_addr = hdev->bus_addr;
+ dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_conf.dst_maxburst = 16;
+ dma_conf.device_fc = false;
+
+ err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
+ if (err) {
+ dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
+ dma_release_channel(hdev->dma_lch);
+ return err;
+ }
+
+ return 0;
+}
+
+static void img_hash_dma_task(unsigned long d)
+{
+ struct img_hash_dev *hdev = (struct img_hash_dev *)d;
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+ u8 *addr;
+ size_t nbytes, bleft, wsend, len, tbc;
+ struct scatterlist tsg;
+
+ if (!ctx->sg)
+ return;
+
+ addr = sg_virt(ctx->sg);
+ nbytes = ctx->sg->length - ctx->offset;
+
+ /*
+ * The hash accelerator does not support a data valid mask. This means
+ * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
+ * padding bytes in the last word written by that dma would erroneously
+ * be included in the hash. To avoid this we round down the transfer,
+ * and add the excess to the start of the next dma. It does not matter
+ * that the final dma may not be a multiple of 4 bytes as the hashing
+ * block is programmed to accept the correct number of bytes.
+ */
+
+ bleft = nbytes % 4;
+ wsend = (nbytes / 4);
+
+ if (wsend) {
+ sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
+ if (img_hash_xmit_dma(hdev, &tsg)) {
+ dev_err(hdev->dev, "DMA failed, falling back to CPU");
+ ctx->flags |= DRIVER_FLAGS_CPU;
+ hdev->err = 0;
+ img_hash_xmit_cpu(hdev, addr + ctx->offset,
+ wsend * 4, 0);
+ ctx->sent += wsend * 4;
+ wsend = 0;
+ } else {
+ ctx->sent += wsend * 4;
+ }
+ }
+
+ if (bleft) {
+ ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
+ ctx->buffer, bleft, ctx->sent);
+ tbc = 0;
+ ctx->sg = sg_next(ctx->sg);
+ while (ctx->sg && (ctx->bufcnt < 4)) {
+ len = ctx->sg->length;
+ if (likely(len > (4 - ctx->bufcnt)))
+ len = 4 - ctx->bufcnt;
+ tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
+ ctx->buffer + ctx->bufcnt, len,
+ ctx->sent + ctx->bufcnt);
+ ctx->bufcnt += tbc;
+ if (tbc >= ctx->sg->length) {
+ ctx->sg = sg_next(ctx->sg);
+ tbc = 0;
+ }
+ }
+
+ ctx->sent += ctx->bufcnt;
+ ctx->offset = tbc;
+
+ if (!wsend)
+ img_hash_dma_callback(hdev);
+ } else {
+ ctx->offset = 0;
+ ctx->sg = sg_next(ctx->sg);
+ }
+}
+
+static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
+{
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+ if (ctx->flags & DRIVER_FLAGS_SG)
+ dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static int img_hash_process_data(struct img_hash_dev *hdev)
+{
+ struct ahash_request *req = hdev->req;
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+ int err = 0;
+
+ ctx->bufcnt = 0;
+
+ if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
+ dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
+ req->nbytes);
+ err = img_hash_write_via_dma(hdev);
+ } else {
+ dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
+ req->nbytes);
+ err = img_hash_write_via_cpu(hdev);
+ }
+ return err;
+}
+
+static int img_hash_hw_init(struct img_hash_dev *hdev)
+{
+ unsigned long long nbits;
+ u32 u, l;
+
+ img_hash_write(hdev, CR_RESET, CR_RESET_SET);
+ img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
+ img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
+
+ nbits = (u64)hdev->req->nbytes << 3;
+ u = nbits >> 32;
+ l = nbits;
+ img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
+ img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
+
+ if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
+ hdev->flags |= DRIVER_FLAGS_INIT;
+ hdev->err = 0;
+ }
+ dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
+ return 0;
+}
+
+static int img_hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ rctx->fallback_req.base.flags = req->base.flags
+ & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int img_hash_handle_queue(struct img_hash_dev *hdev,
+ struct ahash_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct img_hash_request_ctx *ctx;
+ unsigned long flags;
+ int err = 0, res = 0;
+
+ spin_lock_irqsave(&hdev->lock, flags);
+
+ if (req)
+ res = ahash_enqueue_request(&hdev->queue, req);
+
+ if (DRIVER_FLAGS_BUSY & hdev->flags) {
+ spin_unlock_irqrestore(&hdev->lock, flags);
+ return res;
+ }
+
+ backlog = crypto_get_backlog(&hdev->queue);
+ async_req = crypto_dequeue_request(&hdev->queue);
+ if (async_req)
+ hdev->flags |= DRIVER_FLAGS_BUSY;
+
+ spin_unlock_irqrestore(&hdev->lock, flags);
+
+ if (!async_req)
+ return res;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+ hdev->req = req;
+
+ ctx = ahash_request_ctx(req);
+
+ dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
+ ctx->op, req->nbytes);
+
+ err = img_hash_hw_init(hdev);
+
+ if (!err)
+ err = img_hash_process_data(hdev);
+
+ if (err != -EINPROGRESS) {
+ /* done_task will not finish so do it here */
+ img_hash_finish_req(req, err);
+ }
+ return res;
+}
+
+static int img_hash_update(struct ahash_request *req)
+{
+ struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ rctx->fallback_req.base.flags = req->base.flags
+ & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int img_hash_final(struct ahash_request *req)
+{
+ struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ rctx->fallback_req.base.flags = req->base.flags
+ & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int img_hash_finup(struct ahash_request *req)
+{
+ struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ rctx->fallback_req.base.flags = req->base.flags
+ & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int img_hash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+ struct img_hash_dev *hdev = NULL;
+ struct img_hash_dev *tmp;
+ int err;
+
+ spin_lock(&img_hash.lock);
+ if (!tctx->hdev) {
+ list_for_each_entry(tmp, &img_hash.dev_list, list) {
+ hdev = tmp;
+ break;
+ }
+ tctx->hdev = hdev;
+
+ } else {
+ hdev = tctx->hdev;
+ }
+
+ spin_unlock(&img_hash.lock);
+ ctx->hdev = hdev;
+ ctx->flags = 0;
+ ctx->digsize = crypto_ahash_digestsize(tfm);
+
+ switch (ctx->digsize) {
+ case SHA1_DIGEST_SIZE:
+ ctx->flags |= DRIVER_FLAGS_SHA1;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->flags |= DRIVER_FLAGS_SHA256;
+ break;
+ case SHA224_DIGEST_SIZE:
+ ctx->flags |= DRIVER_FLAGS_SHA224;
+ break;
+ case MD5_DIGEST_SIZE:
+ ctx->flags |= DRIVER_FLAGS_MD5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->bufcnt = 0;
+ ctx->offset = 0;
+ ctx->sent = 0;
+ ctx->total = req->nbytes;
+ ctx->sg = req->src;
+ ctx->sgfirst = req->src;
+ ctx->nents = sg_nents(ctx->sg);
+
+ err = img_hash_handle_queue(tctx->hdev, req);
+
+ return err;
+}
+
+static int img_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+ int err = -ENOMEM;
+
+ ctx->fallback = crypto_alloc_ahash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback)) {
+ pr_err("img_hash: Could not load fallback driver.\n");
+ err = PTR_ERR(ctx->fallback);
+ goto err;
+ }
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct img_hash_request_ctx) +
+ IMG_HASH_DMA_THRESHOLD);
+
+ return 0;
+
+err:
+ return err;
+}
+
+static void img_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(tctx->fallback);
+}
+
+static irqreturn_t img_irq_handler(int irq, void *dev_id)
+{
+ struct img_hash_dev *hdev = dev_id;
+ u32 reg;
+
+ reg = img_hash_read(hdev, CR_INTSTAT);
+ img_hash_write(hdev, CR_INTCLEAR, reg);
+
+ if (reg & CR_INT_NEW_RESULTS_SET) {
+ dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
+ if (DRIVER_FLAGS_BUSY & hdev->flags) {
+ hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
+ if (!(DRIVER_FLAGS_CPU & hdev->flags))
+ hdev->flags |= DRIVER_FLAGS_DMA_READY;
+ tasklet_schedule(&hdev->done_task);
+ } else {
+ dev_warn(hdev->dev,
+ "HASH interrupt when no active requests.\n");
+ }
+ } else if (reg & CR_INT_RESULTS_AVAILABLE) {
+ dev_warn(hdev->dev,
+ "IRQ triggered before the hash had completed\n");
+ } else if (reg & CR_INT_RESULT_READ_ERR) {
+ dev_warn(hdev->dev,
+ "Attempt to read from an empty result queue\n");
+ } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
+ dev_warn(hdev->dev,
+ "Data written before the hardware was configured\n");
+ }
+ return IRQ_HANDLED;
+}
+
+static struct ahash_alg img_algs[] = {
+ {
+ .init = img_hash_init,
+ .update = img_hash_update,
+ .final = img_hash_final,
+ .finup = img_hash_finup,
+ .digest = img_hash_digest,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "img-md5",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct img_hash_ctx),
+ .cra_init = img_hash_cra_init,
+ .cra_exit = img_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = img_hash_init,
+ .update = img_hash_update,
+ .final = img_hash_final,
+ .finup = img_hash_finup,
+ .digest = img_hash_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "img-sha1",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct img_hash_ctx),
+ .cra_init = img_hash_cra_init,
+ .cra_exit = img_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = img_hash_init,
+ .update = img_hash_update,
+ .final = img_hash_final,
+ .finup = img_hash_finup,
+ .digest = img_hash_digest,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "img-sha224",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct img_hash_ctx),
+ .cra_init = img_hash_cra_init,
+ .cra_exit = img_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = img_hash_init,
+ .update = img_hash_update,
+ .final = img_hash_final,
+ .finup = img_hash_finup,
+ .digest = img_hash_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "img-sha256",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct img_hash_ctx),
+ .cra_init = img_hash_cra_init,
+ .cra_exit = img_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+static int img_register_algs(struct img_hash_dev *hdev)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
+ err = crypto_register_ahash(&img_algs[i]);
+ if (err)
+ goto err_reg;
+ }
+ return 0;
+
+err_reg:
+ for (; i--; )
+ crypto_unregister_ahash(&img_algs[i]);
+
+ return err;
+}
+
+static int img_unregister_algs(struct img_hash_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(img_algs); i++)
+ crypto_unregister_ahash(&img_algs[i]);
+ return 0;
+}
+
+static void img_hash_done_task(unsigned long data)
+{
+ struct img_hash_dev *hdev = (struct img_hash_dev *)data;
+ int err = 0;
+
+ if (hdev->err == -EINVAL) {
+ err = hdev->err;
+ goto finish;
+ }
+
+ if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
+ img_hash_handle_queue(hdev, NULL);
+ return;
+ }
+
+ if (DRIVER_FLAGS_CPU & hdev->flags) {
+ if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
+ hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
+ goto finish;
+ }
+ } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
+ if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
+ hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
+ img_hash_write_via_dma_stop(hdev);
+ if (hdev->err) {
+ err = hdev->err;
+ goto finish;
+ }
+ }
+ if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
+ hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
+ DRIVER_FLAGS_OUTPUT_READY);
+ goto finish;
+ }
+ }
+ return;
+
+finish:
+ img_hash_finish_req(hdev->req, err);
+}
+
+static const struct of_device_id img_hash_match[] = {
+ { .compatible = "img,hash-accelerator" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, img_hash_match);
+
+static int img_hash_probe(struct platform_device *pdev)
+{
+ struct img_hash_dev *hdev;
+ struct device *dev = &pdev->dev;
+ struct resource *hash_res;
+ int irq;
+ int err;
+
+ hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
+ if (hdev == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&hdev->lock);
+
+ hdev->dev = dev;
+
+ platform_set_drvdata(pdev, hdev);
+
+ INIT_LIST_HEAD(&hdev->list);
+
+ tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
+ tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
+
+ crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
+
+ /* Register bank */
+ hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ hdev->io_base = devm_ioremap_resource(dev, hash_res);
+ if (IS_ERR(hdev->io_base)) {
+ err = PTR_ERR(hdev->io_base);
+ dev_err(dev, "can't ioremap, returned %d\n", err);
+
+ goto res_err;
+ }
+
+ /* Write port (DMA or CPU) */
+ hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
+ if (IS_ERR(hdev->cpu_addr)) {
+ dev_err(dev, "can't ioremap write port\n");
+ err = PTR_ERR(hdev->cpu_addr);
+ goto res_err;
+ }
+ hdev->bus_addr = hash_res->start;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = irq;
+ goto res_err;
+ }
+
+ err = devm_request_irq(dev, irq, img_irq_handler, 0,
+ dev_name(dev), hdev);
+ if (err) {
+ dev_err(dev, "unable to request irq\n");
+ goto res_err;
+ }
+ dev_dbg(dev, "using IRQ channel %d\n", irq);
+
+ hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
+ if (IS_ERR(hdev->hash_clk)) {
+ dev_err(dev, "clock initialization failed.\n");
+ err = PTR_ERR(hdev->hash_clk);
+ goto res_err;
+ }
+
+ hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
+ if (IS_ERR(hdev->sys_clk)) {
+ dev_err(dev, "clock initialization failed.\n");
+ err = PTR_ERR(hdev->sys_clk);
+ goto res_err;
+ }
+
+ err = clk_prepare_enable(hdev->hash_clk);
+ if (err)
+ goto res_err;
+
+ err = clk_prepare_enable(hdev->sys_clk);
+ if (err)
+ goto clk_err;
+
+ err = img_hash_dma_init(hdev);
+ if (err)
+ goto dma_err;
+
+ dev_dbg(dev, "using %s for DMA transfers\n",
+ dma_chan_name(hdev->dma_lch));
+
+ spin_lock(&img_hash.lock);
+ list_add_tail(&hdev->list, &img_hash.dev_list);
+ spin_unlock(&img_hash.lock);
+
+ err = img_register_algs(hdev);
+ if (err)
+ goto err_algs;
+ dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
+
+ return 0;
+
+err_algs:
+ spin_lock(&img_hash.lock);
+ list_del(&hdev->list);
+ spin_unlock(&img_hash.lock);
+ dma_release_channel(hdev->dma_lch);
+dma_err:
+ clk_disable_unprepare(hdev->sys_clk);
+clk_err:
+ clk_disable_unprepare(hdev->hash_clk);
+res_err:
+ tasklet_kill(&hdev->done_task);
+ tasklet_kill(&hdev->dma_task);
+
+ return err;
+}
+
+static int img_hash_remove(struct platform_device *pdev)
+{
+ static struct img_hash_dev *hdev;
+
+ hdev = platform_get_drvdata(pdev);
+ spin_lock(&img_hash.lock);
+ list_del(&hdev->list);
+ spin_unlock(&img_hash.lock);
+
+ img_unregister_algs(hdev);
+
+ tasklet_kill(&hdev->done_task);
+ tasklet_kill(&hdev->dma_task);
+
+ dma_release_channel(hdev->dma_lch);
+
+ clk_disable_unprepare(hdev->hash_clk);
+ clk_disable_unprepare(hdev->sys_clk);
+
+ return 0;
+}
+
+static struct platform_driver img_hash_driver = {
+ .probe = img_hash_probe,
+ .remove = img_hash_remove,
+ .driver = {
+ .name = "img-hash-accelerator",
+ .of_match_table = of_match_ptr(img_hash_match),
+ }
+};
+module_platform_driver(img_hash_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
+MODULE_AUTHOR("Will Thomas.");
+MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 829d6394fb33..59ed54e464a9 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -153,7 +153,7 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
struct dcp *sdcp = global_sdcp;
const int chan = actx->chan;
uint32_t stat;
- int ret;
+ unsigned long ret;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index afd136b45f49..10a9aeff1666 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1754,7 +1754,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
dev->dev.of_node->full_name);
return -EINVAL;
}
- cpu_set(*id, p->sharing);
+ cpumask_set_cpu(*id, &p->sharing);
table[*id] = p;
}
return 0;
@@ -1776,7 +1776,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
return -ENOMEM;
}
- cpus_clear(p->sharing);
+ cpumask_clear(&p->sharing);
spin_lock_init(&p->lock);
p->q_type = q_type;
INIT_LIST_HEAD(&p->jobs);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 42f95a4326b0..9a28b7e07c71 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
return err;
}
-static int omap_aes_check_aligned(struct scatterlist *sg)
+static int omap_aes_check_aligned(struct scatterlist *sg, int total)
{
+ int len = 0;
+
while (sg) {
if (!IS_ALIGNED(sg->offset, 4))
return -1;
if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
return -1;
+
+ len += sg->length;
sg = sg_next(sg);
}
+
+ if (len != total)
+ return -1;
+
return 0;
}
@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd->in_sg = req->src;
dd->out_sg = req->dst;
- if (omap_aes_check_aligned(dd->in_sg) ||
- omap_aes_check_aligned(dd->out_sg)) {
+ if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
+ omap_aes_check_aligned(dd->out_sg, dd->total)) {
if (omap_aes_copy_sgs(dd))
pr_err("Failed to copy SGs for unaligned cases\n");
dd->sgs_copied = 1;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 3c76696ee578..4d63e0d4da9a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -640,6 +640,7 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
while (ctx->sg) {
vaddr = kmap_atomic(sg_page(ctx->sg));
+ vaddr += ctx->sg->offset;
count = omap_sham_append_buffer(ctx,
vaddr + ctx->offset,
@@ -1945,6 +1946,7 @@ static int omap_sham_probe(struct platform_device *pdev)
dd->flags |= dd->pdata->flags;
pm_runtime_enable(dev);
+ pm_runtime_irq_safe(dev);
pm_runtime_get_sync(dev);
rev = omap_sham_read(dd, SHA_REG_REV(dd));
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 19c0efa29ab3..f22ce7169fa5 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -52,7 +52,6 @@
#include <linux/io.h>
#include "adf_cfg_common.h"
-#define PCI_VENDOR_ID_INTEL 0x8086
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
#define ADF_PCI_MAX_BARS 3
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
index c77453b900a3..7f8b66c915ed 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -60,36 +60,40 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
&accel_dev->accel_pci_dev.pci_dev->dev)) {
- pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name);
+ dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n",
+ hw_device->fw_name);
return -EFAULT;
}
uof_size = loader_data->uof_fw->size;
uof_addr = (void *)loader_data->uof_fw->data;
if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
- pr_err("QAT: Failed to map UOF\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
goto out_err;
}
if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
- pr_err("QAT: Failed to map UOF\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
goto out_err;
}
return 0;
out_err:
- release_firmware(loader_data->uof_fw);
+ adf_ae_fw_release(accel_dev);
return -EFAULT;
}
-int adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
- release_firmware(loader_data->uof_fw);
qat_uclo_del_uof_obj(loader_data->fw_loader);
qat_hal_deinit(loader_data->fw_loader);
+
+ if (loader_data->uof_fw)
+ release_firmware(loader_data->uof_fw);
+
+ loader_data->uof_fw = NULL;
loader_data->fw_loader = NULL;
- return 0;
}
int adf_ae_start(struct adf_accel_dev *accel_dev)
@@ -104,8 +108,9 @@ int adf_ae_start(struct adf_accel_dev *accel_dev)
ae_ctr++;
}
}
- pr_info("QAT: qat_dev%d started %d acceleration engines\n",
- accel_dev->accel_id, ae_ctr);
+ dev_info(&GET_DEV(accel_dev),
+ "qat_dev%d started %d acceleration engines\n",
+ accel_dev->accel_id, ae_ctr);
return 0;
}
@@ -121,8 +126,9 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
ae_ctr++;
}
}
- pr_info("QAT: qat_dev%d stopped %d acceleration engines\n",
- accel_dev->accel_id, ae_ctr);
+ dev_info(&GET_DEV(accel_dev),
+ "qat_dev%d stopped %d acceleration engines\n",
+ accel_dev->accel_id, ae_ctr);
return 0;
}
@@ -147,12 +153,12 @@ int adf_ae_init(struct adf_accel_dev *accel_dev)
accel_dev->fw_loader = loader_data;
if (qat_hal_init(accel_dev)) {
- pr_err("QAT: Failed to init the AEs\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n");
kfree(loader_data);
return -EFAULT;
}
if (adf_ae_reset(accel_dev, 0)) {
- pr_err("QAT: Failed to reset the AEs\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n");
qat_hal_deinit(loader_data->fw_loader);
kfree(loader_data);
return -EFAULT;
@@ -162,6 +168,9 @@ int adf_ae_init(struct adf_accel_dev *accel_dev)
int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+ qat_hal_deinit(loader_data->fw_loader);
kfree(accel_dev->fw_loader);
accel_dev->fw_loader = NULL;
return 0;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index fa1fef824de2..2dbc733b8ab2 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -60,14 +60,14 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
- pr_info("QAT: Acceleration driver hardware error detected.\n");
+ dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n");
if (!accel_dev) {
- pr_err("QAT: Can't find acceleration device\n");
+ dev_err(&pdev->dev, "Can't find acceleration device\n");
return PCI_ERS_RESULT_DISCONNECT;
}
if (state == pci_channel_io_perm_failure) {
- pr_err("QAT: Can't recover from device error\n");
+ dev_err(&pdev->dev, "Can't recover from device error\n");
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -88,10 +88,12 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
struct pci_dev *parent = pdev->bus->self;
uint16_t bridge_ctl = 0;
- pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id);
+ dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
+ accel_dev->accel_id);
if (!pci_wait_for_pending_transaction(pdev))
- pr_info("QAT: Transaction still in progress. Proceeding\n");
+ dev_info(&GET_DEV(accel_dev),
+ "Transaction still in progress. Proceeding\n");
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
@@ -158,7 +160,8 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
unsigned long timeout = wait_for_completion_timeout(
&reset_data->compl, wait_jiffies);
if (!timeout) {
- pr_err("QAT: Reset device timeout expired\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Reset device timeout expired\n");
ret = -EFAULT;
}
kfree(reset_data);
@@ -184,8 +187,8 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
static void adf_resume(struct pci_dev *pdev)
{
- pr_info("QAT: Acceleration driver reset completed\n");
- pr_info("QAT: Device is up and runnig\n");
+ dev_info(&pdev->dev, "Acceleration driver reset completed\n");
+ dev_info(&pdev->dev, "Device is up and runnig\n");
}
static struct pci_error_handlers adf_err_handler = {
@@ -236,7 +239,7 @@ EXPORT_SYMBOL_GPL(adf_disable_aer);
int adf_init_aer(void)
{
device_reset_wq = create_workqueue("qat_device_reset_wq");
- return (device_reset_wq == NULL) ? -EFAULT : 0;
+ return !device_reset_wq ? -EFAULT : 0;
}
void adf_exit_aer(void)
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index de16da9070a5..ab65bc274561 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -142,7 +142,8 @@ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
dev_cfg_data,
&qat_dev_cfg_fops);
if (!dev_cfg_data->debug) {
- pr_err("QAT: Failed to create qat cfg debugfs entry.\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to create qat cfg debugfs entry.\n");
kfree(dev_cfg_data);
accel_dev->cfg = NULL;
return -EFAULT;
@@ -305,7 +306,7 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"0x%lx", (unsigned long)val);
} else {
- pr_err("QAT: Unknown type given.\n");
+ dev_err(&GET_DEV(accel_dev), "Unknown type given.\n");
kfree(key_val);
return -1;
}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index c7ac758ebc90..13575111382c 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -59,7 +59,7 @@
#define ADF_RING_SYM_TX "RingSymTx"
#define ADF_RING_RND_TX "RingNrbgTx"
#define ADF_RING_ASYM_RX "RingAsymRx"
-#define ADF_RING_SYM_RX "RinSymRx"
+#define ADF_RING_SYM_RX "RingSymRx"
#define ADF_RING_RND_RX "RingNrbgRx"
#define ADF_RING_DC_TX "RingTx"
#define ADF_RING_DC_RX "RingRx"
@@ -69,15 +69,15 @@
#define ADF_DC "Dc"
#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
- ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_ENABLED
+ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
- ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCE_TIMER
+ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER
#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
- ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_MSG_ENABLED
+ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED
#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
- ADF_ETRMGR_BANK"%d"ADF_ETRMGR_CORE_AFFINITY
+ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
#define ADF_ACCEL_STR "Accelerator%d"
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index a62e485c8786..0666ee6a3360 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -115,7 +115,7 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
int adf_ae_init(struct adf_accel_dev *accel_dev);
int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
-int adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
int adf_ae_start(struct adf_accel_dev *accel_dev);
int adf_ae_stop(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 74207a6f0516..cb5f066e93a6 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -77,14 +77,14 @@ struct adf_ctl_drv_info {
struct class *drv_class;
};
-static struct adf_ctl_drv_info adt_ctl_drv;
+static struct adf_ctl_drv_info adf_ctl_drv;
static void adf_chr_drv_destroy(void)
{
- device_destroy(adt_ctl_drv.drv_class, MKDEV(adt_ctl_drv.major, 0));
- cdev_del(&adt_ctl_drv.drv_cdev);
- class_destroy(adt_ctl_drv.drv_class);
- unregister_chrdev_region(MKDEV(adt_ctl_drv.major, 0), 1);
+ device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
+ cdev_del(&adf_ctl_drv.drv_cdev);
+ class_destroy(adf_ctl_drv.drv_class);
+ unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
}
static int adf_chr_drv_create(void)
@@ -97,20 +97,20 @@ static int adf_chr_drv_create(void)
return -EFAULT;
}
- adt_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
- if (IS_ERR(adt_ctl_drv.drv_class)) {
+ adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(adf_ctl_drv.drv_class)) {
pr_err("QAT: class_create failed for adf_ctl\n");
goto err_chrdev_unreg;
}
- adt_ctl_drv.major = MAJOR(dev_id);
- cdev_init(&adt_ctl_drv.drv_cdev, &adf_ctl_ops);
- if (cdev_add(&adt_ctl_drv.drv_cdev, dev_id, 1)) {
+ adf_ctl_drv.major = MAJOR(dev_id);
+ cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
+ if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
pr_err("QAT: cdev add failed\n");
goto err_class_destr;
}
- drv_device = device_create(adt_ctl_drv.drv_class, NULL,
- MKDEV(adt_ctl_drv.major, 0),
+ drv_device = device_create(adf_ctl_drv.drv_class, NULL,
+ MKDEV(adf_ctl_drv.major, 0),
NULL, DEVICE_NAME);
if (IS_ERR(drv_device)) {
pr_err("QAT: failed to create device\n");
@@ -118,9 +118,9 @@ static int adf_chr_drv_create(void)
}
return 0;
err_cdev_del:
- cdev_del(&adt_ctl_drv.drv_cdev);
+ cdev_del(&adf_ctl_drv.drv_cdev);
err_class_destr:
- class_destroy(adt_ctl_drv.drv_class);
+ class_destroy(adf_ctl_drv.drv_class);
err_chrdev_unreg:
unregister_chrdev_region(dev_id, 1);
return -EFAULT;
@@ -159,14 +159,16 @@ static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
if (adf_cfg_add_key_value_param(accel_dev, section,
key_val->key, (void *)val,
key_val->type)) {
- pr_err("QAT: failed to add keyvalue.\n");
+ dev_err(&GET_DEV(accel_dev),
+ "failed to add hex keyvalue.\n");
return -EFAULT;
}
} else {
if (adf_cfg_add_key_value_param(accel_dev, section,
key_val->key, key_val->val,
key_val->type)) {
- pr_err("QAT: failed to add keyvalue.\n");
+ dev_err(&GET_DEV(accel_dev),
+ "failed to add keyvalue.\n");
return -EFAULT;
}
}
@@ -185,12 +187,14 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
while (section_head) {
if (copy_from_user(&section, (void __user *)section_head,
sizeof(*section_head))) {
- pr_err("QAT: failed to copy section info\n");
+ dev_err(&GET_DEV(accel_dev),
+ "failed to copy section info\n");
goto out_err;
}
if (adf_cfg_section_add(accel_dev, section.name)) {
- pr_err("QAT: failed to add section.\n");
+ dev_err(&GET_DEV(accel_dev),
+ "failed to add section.\n");
goto out_err;
}
@@ -199,7 +203,8 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
while (params_head) {
if (copy_from_user(&key_val, (void __user *)params_head,
sizeof(key_val))) {
- pr_err("QAT: Failed to copy keyvalue.\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to copy keyvalue.\n");
goto out_err;
}
if (adf_add_key_value_data(accel_dev, section.name,
@@ -258,8 +263,9 @@ static int adf_ctl_is_device_in_use(int id)
if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
- pr_info("QAT: device qat_dev%d is busy\n",
- dev->accel_id);
+ dev_info(&GET_DEV(dev),
+ "device qat_dev%d is busy\n",
+ dev->accel_id);
return -EBUSY;
}
}
@@ -280,7 +286,8 @@ static int adf_ctl_stop_devices(uint32_t id)
continue;
if (adf_dev_stop(accel_dev)) {
- pr_err("QAT: Failed to stop qat_dev%d\n", id);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to stop qat_dev%d\n", id);
ret = -EFAULT;
} else {
adf_dev_shutdown(accel_dev);
@@ -343,17 +350,20 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
}
if (!adf_dev_started(accel_dev)) {
- pr_info("QAT: Starting acceleration device qat_dev%d.\n",
- ctl_data->device_id);
+ dev_info(&GET_DEV(accel_dev),
+ "Starting acceleration device qat_dev%d.\n",
+ ctl_data->device_id);
ret = adf_dev_init(accel_dev);
if (!ret)
ret = adf_dev_start(accel_dev);
} else {
- pr_info("QAT: Acceleration device qat_dev%d already started.\n",
- ctl_data->device_id);
+ dev_info(&GET_DEV(accel_dev),
+ "Acceleration device qat_dev%d already started.\n",
+ ctl_data->device_id);
}
if (ret) {
- pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
+ dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+ ctl_data->device_id);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
}
@@ -408,7 +418,7 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
if (copy_to_user((void __user *)arg, &dev_info,
sizeof(struct adf_dev_status_info))) {
- pr_err("QAT: failed to copy status.\n");
+ dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
return -EFAULT;
}
return 0;
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 4a0a829d4500..3f0ff9e7d840 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -67,7 +67,8 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
struct list_head *itr;
if (num_devices == ADF_MAX_DEVICES) {
- pr_err("QAT: Only support up to %d devices\n", ADF_MAX_DEVICES);
+ dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
+ ADF_MAX_DEVICES);
return -EFAULT;
}
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 8f0ca498ab87..245f43237a2d 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -124,12 +124,12 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
if (!hw_data) {
dev_err(&GET_DEV(accel_dev),
- "QAT: Failed to init device - hw_data not set\n");
+ "Failed to init device - hw_data not set\n");
return -EFAULT;
}
if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
- pr_info("QAT: Device not configured\n");
+ dev_err(&GET_DEV(accel_dev), "Device not configured\n");
return -EFAULT;
}
@@ -151,20 +151,21 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
hw_data->enable_ints(accel_dev);
if (adf_ae_init(accel_dev)) {
- pr_err("QAT: Failed to initialise Acceleration Engine\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to initialise Acceleration Engine\n");
return -EFAULT;
}
set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
if (adf_ae_fw_load(accel_dev)) {
- pr_err("QAT: Failed to load acceleration FW\n");
- adf_ae_fw_release(accel_dev);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load acceleration FW\n");
return -EFAULT;
}
set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
if (hw_data->alloc_irq(accel_dev)) {
- pr_err("QAT: Failed to allocate interrupts\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
return -EFAULT;
}
set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
@@ -179,8 +180,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
if (!service->admin)
continue;
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
- pr_err("QAT: Failed to initialise service %s\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to initialise service %s\n",
+ service->name);
return -EFAULT;
}
set_bit(accel_dev->accel_id, &service->init_status);
@@ -190,8 +192,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
if (service->admin)
continue;
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
- pr_err("QAT: Failed to initialise service %s\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to initialise service %s\n",
+ service->name);
return -EFAULT;
}
set_bit(accel_dev->accel_id, &service->init_status);
@@ -221,7 +224,7 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
if (adf_ae_start(accel_dev)) {
- pr_err("QAT: AE Start Failed\n");
+ dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
return -EFAULT;
}
set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
@@ -231,8 +234,9 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
if (!service->admin)
continue;
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
- pr_err("QAT: Failed to start service %s\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to start service %s\n",
+ service->name);
return -EFAULT;
}
set_bit(accel_dev->accel_id, &service->start_status);
@@ -242,8 +246,9 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
if (service->admin)
continue;
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
- pr_err("QAT: Failed to start service %s\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to start service %s\n",
+ service->name);
return -EFAULT;
}
set_bit(accel_dev->accel_id, &service->start_status);
@@ -253,7 +258,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
set_bit(ADF_STATUS_STARTED, &accel_dev->status);
if (qat_algs_register()) {
- pr_err("QAT: Failed to register crypto algs\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to register crypto algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
return -EFAULT;
@@ -287,7 +293,8 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
if (qat_algs_unregister())
- pr_err("QAT: Failed to unregister crypto algs\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to unregister crypto algs\n");
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
@@ -310,8 +317,9 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
if (!test_bit(accel_dev->accel_id, &service->start_status))
continue;
if (service->event_hld(accel_dev, ADF_EVENT_STOP))
- pr_err("QAT: Failed to shutdown service %s\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to shutdown service %s\n",
+ service->name);
else
clear_bit(accel_dev->accel_id, &service->start_status);
}
@@ -321,7 +329,7 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
if (adf_ae_stop(accel_dev))
- pr_err("QAT: failed to stop AE\n");
+ dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
else
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
}
@@ -350,16 +358,14 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
}
if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
- if (adf_ae_fw_release(accel_dev))
- pr_err("QAT: Failed to release the ucode\n");
- else
- clear_bit(ADF_STATUS_AE_UCODE_LOADED,
- &accel_dev->status);
+ adf_ae_fw_release(accel_dev);
+ clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
}
if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
if (adf_ae_shutdown(accel_dev))
- pr_err("QAT: Failed to shutdown Accel Engine\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to shutdown Accel Engine\n");
else
clear_bit(ADF_STATUS_AE_INITIALISED,
&accel_dev->status);
@@ -372,8 +378,9 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (!test_bit(accel_dev->accel_id, &service->init_status))
continue;
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
- pr_err("QAT: Failed to shutdown service %s\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to shutdown service %s\n",
+ service->name);
else
clear_bit(accel_dev->accel_id, &service->init_status);
}
@@ -384,8 +391,9 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (!test_bit(accel_dev->accel_id, &service->init_status))
continue;
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
- pr_err("QAT: Failed to shutdown service %s\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to shutdown service %s\n",
+ service->name);
else
clear_bit(accel_dev->accel_id, &service->init_status);
}
@@ -419,16 +427,18 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
if (service->admin)
continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
- pr_err("QAT: Failed to restart service %s.\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to restart service %s.\n",
+ service->name);
}
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (!service->admin)
continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
- pr_err("QAT: Failed to restart service %s.\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to restart service %s.\n",
+ service->name);
}
return 0;
}
@@ -443,16 +453,18 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
if (service->admin)
continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
- pr_err("QAT: Failed to restart service %s.\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to restart service %s.\n",
+ service->name);
}
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (!service->admin)
continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
- pr_err("QAT: Failed to restart service %s.\n",
- service->name);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to restart service %s.\n",
+ service->name);
}
return 0;
}
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 7dd54aaee9fa..ccec327489da 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -195,7 +195,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
memset(ring->base_addr, 0x7F, ring_size_bytes);
/* The base_addr has to be aligned to the size of the buffer */
if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
- pr_err("QAT: Ring address not aligned\n");
+ dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
ring->base_addr, ring->dma_addr);
return -EFAULT;
@@ -242,32 +242,33 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
int ret;
if (bank_num >= GET_MAX_BANKS(accel_dev)) {
- pr_err("QAT: Invalid bank number\n");
+ dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
return -EFAULT;
}
if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
- pr_err("QAT: Invalid msg size\n");
+ dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
return -EFAULT;
}
if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
- pr_err("QAT: Invalid ring size for given msg size\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid ring size for given msg size\n");
return -EFAULT;
}
if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
- pr_err("QAT: Section %s, no such entry : %s\n",
- section, ring_name);
+ dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
+ section, ring_name);
return -EFAULT;
}
if (kstrtouint(val, 10, &ring_num)) {
- pr_err("QAT: Can't get ring number\n");
+ dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
return -EFAULT;
}
bank = &transport_data->banks[bank_num];
if (adf_reserve_ring(bank, ring_num)) {
- pr_err("QAT: Ring %d, %s already exists.\n",
- ring_num, ring_name);
+ dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
+ ring_num, ring_name);
return -EFAULT;
}
ring = &bank->rings[ring_num];
@@ -287,7 +288,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
accel_dev->hw_device->hw_arb_ring_enable(ring);
if (adf_ring_debugfs_add(ring, ring_name)) {
- pr_err("QAT: Couldn't add ring debugfs entry\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Couldn't add ring debugfs entry\n");
ret = -EFAULT;
goto err;
}
@@ -428,7 +430,8 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
goto err;
} else {
if (i < hw_data->tx_rx_gap) {
- pr_err("QAT: Invalid tx rings mask config\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid tx rings mask config\n");
goto err;
}
tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
@@ -436,7 +439,8 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
}
}
if (adf_bank_debugfs_add(bank)) {
- pr_err("QAT: Failed to add bank debugfs entry\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add bank debugfs entry\n");
goto err;
}
@@ -492,7 +496,8 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
etr_data->debug = debugfs_create_dir("transport",
accel_dev->debugfs_dir);
if (!etr_data->debug) {
- pr_err("QAT: Unable to create transport debugfs entry\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Unable to create transport debugfs entry\n");
ret = -ENOENT;
goto err_bank_debug;
}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index 6b6974553514..e41986967294 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -100,6 +100,8 @@ static int adf_ring_show(struct seq_file *sfile, void *v)
empty = READ_CSR_E_STAT(csr, bank->bank_number);
seq_puts(sfile, "------- Ring configuration -------\n");
+ seq_printf(sfile, "ring name: %s\n",
+ ring->ring_debug->ring_name);
seq_printf(sfile, "ring num %d, bank num %d\n",
ring->ring_number, ring->bank->bank_number);
seq_printf(sfile, "head %x, tail %x, empty: %d\n",
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
index 68f191b653b0..121d5e6e46ca 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -145,7 +145,7 @@ struct icp_qat_hw_auth_setup {
};
#define QAT_HW_DEFAULT_ALIGNMENT 8
-#define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n-1)))
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
#define ICP_QAT_HW_NULL_STATE1_SZ 32
#define ICP_QAT_HW_MD5_STATE1_SZ 16
#define ICP_QAT_HW_SHA1_STATE1_SZ 20
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 828f2a686aab..3bd705ca5973 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -110,13 +110,13 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list);
if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
- dev_to_node(&GET_DEV(accel_dev)) < 0)
- && adf_dev_started(accel_dev))
+ dev_to_node(&GET_DEV(accel_dev)) < 0) &&
+ adf_dev_started(accel_dev))
break;
accel_dev = NULL;
}
if (!accel_dev) {
- pr_err("QAT: Could not find device on node %d\n", node);
+ pr_err("QAT: Could not find a device on node %d\n", node);
accel_dev = adf_devmgr_get_first();
}
if (!accel_dev || !adf_dev_started(accel_dev))
@@ -137,7 +137,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
if (atomic_add_return(1, &inst_best->refctr) == 1) {
if (adf_dev_get(accel_dev)) {
atomic_dec(&inst_best->refctr);
- pr_err("QAT: Could increment dev refctr\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Could not increment dev refctr\n");
return NULL;
}
}
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index b818c19713bf..274ff7e9de6e 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -434,8 +434,8 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
}
-#define ESRAM_AUTO_TINIT (1<<2)
-#define ESRAM_AUTO_TINIT_DONE (1<<3)
+#define ESRAM_AUTO_TINIT BIT(2)
+#define ESRAM_AUTO_TINIT_DONE BIT(3)
#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
@@ -718,7 +718,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
handle->hal_handle->ae_max_num = max_en_ae_id + 1;
/* take all AEs out of reset */
if (qat_hal_clr_reset(handle)) {
- pr_err("QAT: qat_hal_clr_reset error\n");
+ dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
goto out_err;
}
if (qat_hal_clear_gpr(handle))
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
index 53c491b59f07..e4666065c399 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
@@ -93,7 +93,8 @@ int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
memcpy(out, admin->virt_addr + offset +
ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
else
- pr_err("QAT: Failed to send admin msg to accelerator\n");
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send admin msg to accelerator\n");
mutex_unlock(&admin->lock);
return received ? 0 : -EFAULT;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 6a735d5c0e37..b1386922d7a2 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -150,7 +150,8 @@ void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
*arb_map_config = thrd_to_arb_map_sku6;
break;
default:
- pr_err("QAT: The configuration doesn't match any SKU");
+ dev_err(&GET_DEV(accel_dev),
+ "The configuration doesn't match any SKU");
*arb_map_config = NULL;
}
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 01e0be21e93a..25269a9f24a2 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -73,11 +73,11 @@
/* Error detection and correction */
#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
-#define ADF_DH895XCC_ENABLE_AE_ECC_ERR (1 << 28)
-#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (1 << 24 | 1 << 12)
+#define ADF_DH895XCC_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
-#define ADF_DH895XCC_ERRSSMSH_EN (1 << 3)
+#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
/* Admin Messages Registers */
#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 8ffdb95c9804..9decea2779c6 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -236,7 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
- dev_to_node(&pdev->dev));
+ dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
@@ -379,7 +379,7 @@ out_err:
return ret;
}
-static void __exit adf_remove(struct pci_dev *pdev)
+static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index fe8f89697ad8..0d03c109c2d3 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -73,7 +73,7 @@ static int adf_enable_msix(struct adf_accel_dev *accel_dev)
if (pci_enable_msix_exact(pci_dev_info->pci_dev,
pci_dev_info->msix_entries.entries,
msix_num_entries)) {
- pr_err("QAT: Failed to enable MSIX IRQ\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n");
return -EFAULT;
}
return 0;
@@ -97,7 +97,8 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{
struct adf_accel_dev *accel_dev = dev_ptr;
- pr_info("QAT: qat_dev%d spurious AE interrupt\n", accel_dev->accel_id);
+ dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+ accel_dev->accel_id);
return IRQ_HANDLED;
}
@@ -121,8 +122,9 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
ret = request_irq(msixe[i].vector,
adf_msix_isr_bundle, 0, name, bank);
if (ret) {
- pr_err("QAT: failed to enable irq %d for %s\n",
- msixe[i].vector, name);
+ dev_err(&GET_DEV(accel_dev),
+ "failed to enable irq %d for %s\n",
+ msixe[i].vector, name);
return ret;
}
@@ -136,8 +138,9 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
"qat%d-ae-cluster", accel_dev->accel_id);
ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
if (ret) {
- pr_err("QAT: failed to enable irq %d, for %s\n",
- msixe[i].vector, name);
+ dev_err(&GET_DEV(accel_dev),
+ "failed to enable irq %d, for %s\n",
+ msixe[i].vector, name);
return ret;
}
return ret;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 290a7f0a681f..6be377f6b9e7 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -479,6 +479,7 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
struct scatterlist *sg;
int ret;
int i, j;
+ int idx = 0;
/* Copy new key if necessary */
if (ctx->flags & FLAGS_NEW_KEY) {
@@ -486,17 +487,20 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
ctx->flags &= ~FLAGS_NEW_KEY;
if (dev->flags & FLAGS_CBC) {
- dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
- dev->hw_desc[0]->p1 = dev->iv_phys_base;
+ dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
+ dev->hw_desc[idx]->p1 = dev->iv_phys_base;
} else {
- dev->hw_desc[0]->len1 = 0;
- dev->hw_desc[0]->p1 = 0;
+ dev->hw_desc[idx]->len1 = 0;
+ dev->hw_desc[idx]->p1 = 0;
}
- dev->hw_desc[0]->len2 = ctx->keylen;
- dev->hw_desc[0]->p2 = dev->key_phys_base;
- dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+ dev->hw_desc[idx]->len2 = ctx->keylen;
+ dev->hw_desc[idx]->p2 = dev->key_phys_base;
+ dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+
+ dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
+
+ idx++;
}
- dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
@@ -520,7 +524,7 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
}
/* Create input links */
- dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
+ dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
sg = dev->in_sg;
for (i = 0; i < dev->nb_in_sg; i++) {
dev->hw_link[i]->len = sg->length;
@@ -534,7 +538,7 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
}
/* Create output links */
- dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
+ dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
sg = dev->out_sg;
for (j = i; j < dev->nb_out_sg + i; j++) {
dev->hw_link[j]->len = sg->length;
@@ -548,10 +552,10 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
}
/* Fill remaining fields of hw_desc[1] */
- dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
- dev->hw_desc[1]->len1 = dev->total;
- dev->hw_desc[1]->len2 = dev->total;
- dev->hw_desc[1]->next = 0;
+ dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
+ dev->hw_desc[idx]->len1 = dev->total;
+ dev->hw_desc[idx]->len2 = dev->total;
+ dev->hw_desc[idx]->next = 0;
sahara_dump_descriptors(dev);
sahara_dump_links(dev);
@@ -576,6 +580,7 @@ static int sahara_aes_process(struct ablkcipher_request *req)
struct sahara_ctx *ctx;
struct sahara_aes_reqctx *rctx;
int ret;
+ unsigned long timeout;
/* Request is ready to be dispatched by the device */
dev_dbg(dev->device,
@@ -601,10 +606,12 @@ static int sahara_aes_process(struct ablkcipher_request *req)
reinit_completion(&dev->dma_completion);
ret = sahara_hw_descriptor_create(dev);
+ if (ret)
+ return -EINVAL;
- ret = wait_for_completion_timeout(&dev->dma_completion,
+ timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!ret) {
+ if (!timeout) {
dev_err(dev->device, "AES timeout\n");
return -ETIMEDOUT;
}
@@ -1044,7 +1051,8 @@ static int sahara_sha_process(struct ahash_request *req)
{
struct sahara_dev *dev = dev_ptr;
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
- int ret = -EINPROGRESS;
+ int ret;
+ unsigned long timeout;
ret = sahara_sha_prepare_request(req);
if (!ret)
@@ -1070,9 +1078,9 @@ static int sahara_sha_process(struct ahash_request *req)
sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
- ret = wait_for_completion_timeout(&dev->dma_completion,
+ timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!ret) {
+ if (!timeout) {
dev_err(dev->device, "SHA timeout\n");
return -ETIMEDOUT;
}
@@ -1092,15 +1100,20 @@ static int sahara_queue_manage(void *data)
{
struct sahara_dev *dev = (struct sahara_dev *)data;
struct crypto_async_request *async_req;
+ struct crypto_async_request *backlog;
int ret = 0;
do {
__set_current_state(TASK_INTERRUPTIBLE);
mutex_lock(&dev->queue_mutex);
+ backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
mutex_unlock(&dev->queue_mutex);
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
if (async_req) {
if (crypto_tfm_alg_type(async_req->tfm) ==
CRYPTO_ALG_TYPE_AHASH) {
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index ebbae8d3ce0d..857414afa29a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -637,8 +637,6 @@ static void talitos_unregister_rng(struct device *dev)
#define TALITOS_MAX_KEY_SIZE 96
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define MD5_BLOCK_SIZE 64
-
struct talitos_ctx {
struct device *dev;
int ch;
@@ -2195,7 +2193,7 @@ static struct talitos_alg_template driver_algs[] = {
.halg.base = {
.cra_name = "md5",
.cra_driver_name = "md5-talitos",
- .cra_blocksize = MD5_BLOCK_SIZE,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
}
@@ -2285,7 +2283,7 @@ static struct talitos_alg_template driver_algs[] = {
.halg.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos",
- .cra_blocksize = MD5_BLOCK_SIZE,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
}
@@ -2706,20 +2704,16 @@ static int talitos_probe(struct platform_device *ofdev)
goto err_out;
}
+ priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
+
for (i = 0; i < priv->num_channels; i++) {
priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
if (!priv->irq[1] || !(i & 1))
priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
- }
- for (i = 0; i < priv->num_channels; i++) {
spin_lock_init(&priv->chan[i].head_lock);
spin_lock_init(&priv->chan[i].tail_lock);
- }
- priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
-
- for (i = 0; i < priv->num_channels; i++) {
priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
priv->fifo_len, GFP_KERNEL);
if (!priv->chan[i].fifo) {
@@ -2727,11 +2721,10 @@ static int talitos_probe(struct platform_device *ofdev)
err = -ENOMEM;
goto err_out;
}
- }
- for (i = 0; i < priv->num_channels; i++)
atomic_set(&priv->chan[i].submit_count,
-(priv->chfifo_len - 1));
+ }
dma_set_mask(dev, DMA_BIT_MASK(36));
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 187a8fd7eee7..5f5f360628fc 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -184,7 +184,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(ctx->device->dev,
- "%s: device_prep_slave_sg() failed!\n", __func__);
+ "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
return -EFAULT;
}
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
new file mode 100644
index 000000000000..771babf16aa0
--- /dev/null
+++ b/drivers/crypto/vmx/Kconfig
@@ -0,0 +1,8 @@
+config CRYPTO_DEV_VMX_ENCRYPT
+ tristate "Encryption acceleration support on P8 CPU"
+ depends on PPC64 && CRYPTO_DEV_VMX
+ default y
+ help
+ Support for VMX cryptographic acceleration instructions on Power8 CPU.
+ This module supports acceleration for AES and GHASH in hardware. If you
+ choose 'M' here, this module will be called vmx-crypto.
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
new file mode 100644
index 000000000000..c699c6e6c82e
--- /dev/null
+++ b/drivers/crypto/vmx/Makefile
@@ -0,0 +1,19 @@
+obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
+vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o ghash.o
+
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+TARGET := linux-ppc64le
+else
+TARGET := linux-pcc64
+endif
+
+quiet_cmd_perl = PERL $@
+ cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
+
+$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl
+ $(call cmd,perl)
+
+$(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl
+ $(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
new file mode 100644
index 000000000000..ab300ea19434
--- /dev/null
+++ b/drivers/crypto/vmx/aes.c
@@ -0,0 +1,139 @@
+/**
+ * AES routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_ctx {
+ struct crypto_cipher *fallback;
+ struct aes_key enc_key;
+ struct aes_key dec_key;
+};
+
+static int p8_aes_init(struct crypto_tfm *tfm)
+{
+ const char *alg;
+ struct crypto_cipher *fallback;
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!(alg = crypto_tfm_alg_name(tfm))) {
+ printk(KERN_ERR "Failed to get algorithm name.\n");
+ return -ENOENT;
+ }
+
+ fallback = crypto_alloc_cipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+ printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+ crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+ crypto_cipher_set_flags(fallback,
+ crypto_cipher_get_flags((struct crypto_cipher *) tfm));
+ ctx->fallback = fallback;
+
+ return 0;
+}
+
+static void p8_aes_exit(struct crypto_tfm *tfm)
+{
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ crypto_free_cipher(ctx->fallback);
+ ctx->fallback = NULL;
+ }
+}
+
+static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ int ret;
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pagefault_disable();
+ enable_kernel_altivec();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ pagefault_enable();
+
+ ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
+ return ret;
+}
+
+static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (in_interrupt()) {
+ crypto_cipher_encrypt_one(ctx->fallback, dst, src);
+ } else {
+ pagefault_disable();
+ enable_kernel_altivec();
+ aes_p8_encrypt(src, dst, &ctx->enc_key);
+ pagefault_enable();
+ }
+}
+
+static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (in_interrupt()) {
+ crypto_cipher_decrypt_one(ctx->fallback, dst, src);
+ } else {
+ pagefault_disable();
+ enable_kernel_altivec();
+ aes_p8_decrypt(src, dst, &ctx->dec_key);
+ pagefault_enable();
+ }
+}
+
+struct crypto_alg p8_aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "p8_aes",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 1000,
+ .cra_type = NULL,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_alignmask = 0,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p8_aes_ctx),
+ .cra_init = p8_aes_init,
+ .cra_exit = p8_aes_exit,
+ .cra_cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = p8_aes_setkey,
+ .cia_encrypt = p8_aes_encrypt,
+ .cia_decrypt = p8_aes_decrypt,
+ },
+};
+
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
new file mode 100644
index 000000000000..1a559b7dddb5
--- /dev/null
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -0,0 +1,184 @@
+/**
+ * AES CBC routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_cbc_ctx {
+ struct crypto_blkcipher *fallback;
+ struct aes_key enc_key;
+ struct aes_key dec_key;
+};
+
+static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+{
+ const char *alg;
+ struct crypto_blkcipher *fallback;
+ struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!(alg = crypto_tfm_alg_name(tfm))) {
+ printk(KERN_ERR "Failed to get algorithm name.\n");
+ return -ENOENT;
+ }
+
+ fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+ printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+ crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+ crypto_blkcipher_set_flags(fallback,
+ crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm));
+ ctx->fallback = fallback;
+
+ return 0;
+}
+
+static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
+{
+ struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ crypto_free_blkcipher(ctx->fallback);
+ ctx->fallback = NULL;
+ }
+}
+
+static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ int ret;
+ struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pagefault_disable();
+ enable_kernel_altivec();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ pagefault_enable();
+
+ ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+ return ret;
+}
+
+static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ int ret;
+ struct blkcipher_walk walk;
+ struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(
+ crypto_blkcipher_tfm(desc->tfm));
+ struct blkcipher_desc fallback_desc = {
+ .tfm = ctx->fallback,
+ .info = desc->info,
+ .flags = desc->flags
+ };
+
+ if (in_interrupt()) {
+ ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
+ } else {
+ pagefault_disable();
+ enable_kernel_altivec();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ ret = blkcipher_walk_virt(desc, &walk);
+ while ((nbytes = walk.nbytes)) {
+ aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ pagefault_enable();
+ }
+
+ return ret;
+}
+
+static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ int ret;
+ struct blkcipher_walk walk;
+ struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(
+ crypto_blkcipher_tfm(desc->tfm));
+ struct blkcipher_desc fallback_desc = {
+ .tfm = ctx->fallback,
+ .info = desc->info,
+ .flags = desc->flags
+ };
+
+ if (in_interrupt()) {
+ ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
+ } else {
+ pagefault_disable();
+ enable_kernel_altivec();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ ret = blkcipher_walk_virt(desc, &walk);
+ while ((nbytes = walk.nbytes)) {
+ aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ pagefault_enable();
+ }
+
+ return ret;
+}
+
+
+struct crypto_alg p8_aes_cbc_alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "p8_aes_cbc",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 1000,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_alignmask = 0,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
+ .cra_init = p8_aes_cbc_init,
+ .cra_exit = p8_aes_cbc_exit,
+ .cra_blkcipher = {
+ .ivsize = 0,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = p8_aes_cbc_setkey,
+ .encrypt = p8_aes_cbc_encrypt,
+ .decrypt = p8_aes_cbc_decrypt,
+ },
+};
+
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
new file mode 100644
index 000000000000..96dbee4bf4a6
--- /dev/null
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -0,0 +1,167 @@
+/**
+ * AES CTR routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+#include "aesp8-ppc.h"
+
+struct p8_aes_ctr_ctx {
+ struct crypto_blkcipher *fallback;
+ struct aes_key enc_key;
+};
+
+static int p8_aes_ctr_init(struct crypto_tfm *tfm)
+{
+ const char *alg;
+ struct crypto_blkcipher *fallback;
+ struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!(alg = crypto_tfm_alg_name(tfm))) {
+ printk(KERN_ERR "Failed to get algorithm name.\n");
+ return -ENOENT;
+ }
+
+ fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+ printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+ crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+ crypto_blkcipher_set_flags(fallback,
+ crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm));
+ ctx->fallback = fallback;
+
+ return 0;
+}
+
+static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
+{
+ struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ crypto_free_blkcipher(ctx->fallback);
+ ctx->fallback = NULL;
+ }
+}
+
+static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ int ret;
+ struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pagefault_disable();
+ enable_kernel_altivec();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ pagefault_enable();
+
+ ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+ return ret;
+}
+
+static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
+ struct blkcipher_walk *walk)
+{
+ u8 *ctrblk = walk->iv;
+ u8 keystream[AES_BLOCK_SIZE];
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ unsigned int nbytes = walk->nbytes;
+
+ pagefault_disable();
+ enable_kernel_altivec();
+ aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
+ pagefault_enable();
+
+ crypto_xor(keystream, src, nbytes);
+ memcpy(dst, keystream, nbytes);
+ crypto_inc(ctrblk, AES_BLOCK_SIZE);
+}
+
+static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ int ret;
+ struct blkcipher_walk walk;
+ struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(
+ crypto_blkcipher_tfm(desc->tfm));
+ struct blkcipher_desc fallback_desc = {
+ .tfm = ctx->fallback,
+ .info = desc->info,
+ .flags = desc->flags
+ };
+
+ if (in_interrupt()) {
+ ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
+ } else {
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ pagefault_disable();
+ enable_kernel_altivec();
+ aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr,
+ (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv);
+ pagefault_enable();
+
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+ if (walk.nbytes) {
+ p8_aes_ctr_final(ctx, &walk);
+ ret = blkcipher_walk_done(desc, &walk, 0);
+ }
+ }
+
+ return ret;
+}
+
+struct crypto_alg p8_aes_ctr_alg = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "p8_aes_ctr",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 1000,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_alignmask = 0,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
+ .cra_init = p8_aes_ctr_init,
+ .cra_exit = p8_aes_ctr_exit,
+ .cra_blkcipher = {
+ .ivsize = 0,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = p8_aes_ctr_setkey,
+ .encrypt = p8_aes_ctr_crypt,
+ .decrypt = p8_aes_ctr_crypt,
+ },
+};
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
new file mode 100644
index 000000000000..e963945a83e1
--- /dev/null
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -0,0 +1,20 @@
+#include <linux/types.h>
+#include <crypto/aes.h>
+
+#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
+
+struct aes_key {
+ u8 key[AES_MAX_KEYLENGTH];
+ int rounds;
+};
+
+int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
+ struct aes_key *key);
+int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
+ struct aes_key *key);
+void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key);
+void aes_p8_decrypt(const u8 *in, u8 *out,const struct aes_key *key);
+void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
+ const struct aes_key *key, u8 *iv, const int enc);
+void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out,
+ size_t len, const struct aes_key *key, const u8 *iv);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
new file mode 100644
index 000000000000..6c5c20c6108e
--- /dev/null
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -0,0 +1,1930 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for AES instructions as per PowerISA
+# specification version 2.07, first implemented by POWER8 processor.
+# The module is endian-agnostic in sense that it supports both big-
+# and little-endian cases. Data alignment in parallelizable modes is
+# handled with VSX loads and stores, which implies MSR.VSX flag being
+# set. It should also be noted that ISA specification doesn't prohibit
+# alignment exceptions for these instructions on page boundaries.
+# Initially alignment was handled in pure AltiVec/VMX way [when data
+# is aligned programmatically, which in turn guarantees exception-
+# free execution], but it turned to hamper performance when vcipher
+# instructions are interleaved. It's reckoned that eventual
+# misalignment penalties at page boundaries are in average lower
+# than additional overhead in pure AltiVec approach.
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+ $SIZE_T =8;
+ $LRSAVE =2*$SIZE_T;
+ $STU ="stdu";
+ $POP ="ld";
+ $PUSH ="std";
+ $UCMP ="cmpld";
+ $SHL ="sldi";
+} elsif ($flavour =~ /32/) {
+ $SIZE_T =4;
+ $LRSAVE =$SIZE_T;
+ $STU ="stwu";
+ $POP ="lwz";
+ $PUSH ="stw";
+ $UCMP ="cmplw";
+ $SHL ="slwi";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=8*$SIZE_T;
+$prefix="aes_p8";
+
+$sp="r1";
+$vrsave="r12";
+
+#########################################################################
+{{{ # Key setup procedures #
+my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
+my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
+my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
+
+$code.=<<___;
+.machine "any"
+
+.text
+
+.align 7
+rcon:
+.long 0x01000000, 0x01000000, 0x01000000, 0x01000000 ?rev
+.long 0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000 ?rev
+.long 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c ?rev
+.long 0,0,0,0 ?asis
+Lconsts:
+ mflr r0
+ bcl 20,31,\$+4
+ mflr $ptr #vvvvv "distance between . and rcon
+ addi $ptr,$ptr,-0x48
+ mtlr r0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+.asciz "AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+
+.globl .${prefix}_set_encrypt_key
+Lset_encrypt_key:
+ mflr r11
+ $PUSH r11,$LRSAVE($sp)
+
+ li $ptr,-1
+ ${UCMP}i $inp,0
+ beq- Lenc_key_abort # if ($inp==0) return -1;
+ ${UCMP}i $out,0
+ beq- Lenc_key_abort # if ($out==0) return -1;
+ li $ptr,-2
+ cmpwi $bits,128
+ blt- Lenc_key_abort
+ cmpwi $bits,256
+ bgt- Lenc_key_abort
+ andi. r0,$bits,0x3f
+ bne- Lenc_key_abort
+
+ lis r0,0xfff0
+ mfspr $vrsave,256
+ mtspr 256,r0
+
+ bl Lconsts
+ mtlr r11
+
+ neg r9,$inp
+ lvx $in0,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ lvsr $key,0,r9 # borrow $key
+ li r8,0x20
+ cmpwi $bits,192
+ lvx $in1,0,$inp
+ le?vspltisb $mask,0x0f # borrow $mask
+ lvx $rcon,0,$ptr
+ le?vxor $key,$key,$mask # adjust for byte swap
+ lvx $mask,r8,$ptr
+ addi $ptr,$ptr,0x10
+ vperm $in0,$in0,$in1,$key # align [and byte swap in LE]
+ li $cnt,8
+ vxor $zero,$zero,$zero
+ mtctr $cnt
+
+ ?lvsr $outperm,0,$out
+ vspltisb $outmask,-1
+ lvx $outhead,0,$out
+ ?vperm $outmask,$zero,$outmask,$outperm
+
+ blt Loop128
+ addi $inp,$inp,8
+ beq L192
+ addi $inp,$inp,8
+ b L256
+
+.align 4
+Loop128:
+ vperm $key,$in0,$in0,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in0,$in0,$key
+ bdnz Loop128
+
+ lvx $rcon,0,$ptr # last two round keys
+
+ vperm $key,$in0,$in0,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in0,$in0,$key
+
+ vperm $key,$in0,$in0,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vxor $in0,$in0,$key
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+
+ addi $inp,$out,15 # 15 is not typo
+ addi $out,$out,0x50
+
+ li $rounds,10
+ b Ldone
+
+.align 4
+L192:
+ lvx $tmp,0,$inp
+ li $cnt,4
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $out,$out,16
+ vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
+ vspltisb $key,8 # borrow $key
+ mtctr $cnt
+ vsububm $mask,$mask,$key # adjust the mask
+
+Loop192:
+ vperm $key,$in1,$in1,$mask # roate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vcipherlast $key,$key,$rcon
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+
+ vsldoi $stage,$zero,$in1,8
+ vspltw $tmp,$in0,3
+ vxor $tmp,$tmp,$in1
+ vsldoi $in1,$zero,$in1,12 # >>32
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in1,$in1,$tmp
+ vxor $in0,$in0,$key
+ vxor $in1,$in1,$key
+ vsldoi $stage,$stage,$in0,8
+
+ vperm $key,$in1,$in1,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$stage,$stage,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vsldoi $stage,$in0,$in1,8
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vperm $outtail,$stage,$stage,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vspltw $tmp,$in0,3
+ vxor $tmp,$tmp,$in1
+ vsldoi $in1,$zero,$in1,12 # >>32
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in1,$in1,$tmp
+ vxor $in0,$in0,$key
+ vxor $in1,$in1,$key
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $inp,$out,15 # 15 is not typo
+ addi $out,$out,16
+ bdnz Loop192
+
+ li $rounds,12
+ addi $out,$out,0x20
+ b Ldone
+
+.align 4
+L256:
+ lvx $tmp,0,$inp
+ li $cnt,7
+ li $rounds,14
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $out,$out,16
+ vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
+ mtctr $cnt
+
+Loop256:
+ vperm $key,$in1,$in1,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in1,$in1,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in0,$in0,$key
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $inp,$out,15 # 15 is not typo
+ addi $out,$out,16
+ bdz Ldone
+
+ vspltw $key,$in0,3 # just splat
+ vsldoi $tmp,$zero,$in1,12 # >>32
+ vsbox $key,$key
+
+ vxor $in1,$in1,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in1,$in1,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in1,$in1,$tmp
+
+ vxor $in1,$in1,$key
+ b Loop256
+
+.align 4
+Ldone:
+ lvx $in1,0,$inp # redundant in aligned case
+ vsel $in1,$outhead,$in1,$outmask
+ stvx $in1,0,$inp
+ li $ptr,0
+ mtspr 256,$vrsave
+ stw $rounds,0($out)
+
+Lenc_key_abort:
+ mr r3,$ptr
+ blr
+ .long 0
+ .byte 0,12,0x14,1,0,0,3,0
+ .long 0
+.size .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
+
+.globl .${prefix}_set_decrypt_key
+ $STU $sp,-$FRAME($sp)
+ mflr r10
+ $PUSH r10,$FRAME+$LRSAVE($sp)
+ bl Lset_encrypt_key
+ mtlr r10
+
+ cmpwi r3,0
+ bne- Ldec_key_abort
+
+ slwi $cnt,$rounds,4
+ subi $inp,$out,240 # first round key
+ srwi $rounds,$rounds,1
+ add $out,$inp,$cnt # last round key
+ mtctr $rounds
+
+Ldeckey:
+ lwz r0, 0($inp)
+ lwz r6, 4($inp)
+ lwz r7, 8($inp)
+ lwz r8, 12($inp)
+ addi $inp,$inp,16
+ lwz r9, 0($out)
+ lwz r10,4($out)
+ lwz r11,8($out)
+ lwz r12,12($out)
+ stw r0, 0($out)
+ stw r6, 4($out)
+ stw r7, 8($out)
+ stw r8, 12($out)
+ subi $out,$out,16
+ stw r9, -16($inp)
+ stw r10,-12($inp)
+ stw r11,-8($inp)
+ stw r12,-4($inp)
+ bdnz Ldeckey
+
+ xor r3,r3,r3 # return value
+Ldec_key_abort:
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,1,0x80,0,3,0
+ .long 0
+.size .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
+___
+}}}
+#########################################################################
+{{{ # Single block en- and decrypt procedures #
+sub gen_block () {
+my $dir = shift;
+my $n = $dir eq "de" ? "n" : "";
+my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
+
+$code.=<<___;
+.globl .${prefix}_${dir}crypt
+ lwz $rounds,240($key)
+ lis r0,0xfc00
+ mfspr $vrsave,256
+ li $idx,15 # 15 is not typo
+ mtspr 256,r0
+
+ lvx v0,0,$inp
+ neg r11,$out
+ lvx v1,$idx,$inp
+ lvsl v2,0,$inp # inpperm
+ le?vspltisb v4,0x0f
+ ?lvsl v3,0,r11 # outperm
+ le?vxor v2,v2,v4
+ li $idx,16
+ vperm v0,v0,v1,v2 # align [and byte swap in LE]
+ lvx v1,0,$key
+ ?lvsl v5,0,$key # keyperm
+ srwi $rounds,$rounds,1
+ lvx v2,$idx,$key
+ addi $idx,$idx,16
+ subi $rounds,$rounds,1
+ ?vperm v1,v1,v2,v5 # align round key
+
+ vxor v0,v0,v1
+ lvx v1,$idx,$key
+ addi $idx,$idx,16
+ mtctr $rounds
+
+Loop_${dir}c:
+ ?vperm v2,v2,v1,v5
+ v${n}cipher v0,v0,v2
+ lvx v2,$idx,$key
+ addi $idx,$idx,16
+ ?vperm v1,v1,v2,v5
+ v${n}cipher v0,v0,v1
+ lvx v1,$idx,$key
+ addi $idx,$idx,16
+ bdnz Loop_${dir}c
+
+ ?vperm v2,v2,v1,v5
+ v${n}cipher v0,v0,v2
+ lvx v2,$idx,$key
+ ?vperm v1,v1,v2,v5
+ v${n}cipherlast v0,v0,v1
+
+ vspltisb v2,-1
+ vxor v1,v1,v1
+ li $idx,15 # 15 is not typo
+ ?vperm v2,v1,v2,v3 # outmask
+ le?vxor v3,v3,v4
+ lvx v1,0,$out # outhead
+ vperm v0,v0,v0,v3 # rotate [and byte swap in LE]
+ vsel v1,v1,v0,v2
+ lvx v4,$idx,$out
+ stvx v1,0,$out
+ vsel v0,v0,v4,v2
+ stvx v0,$idx,$out
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
+___
+}
+&gen_block("en");
+&gen_block("de");
+}}}
+#########################################################################
+{{{ # CBC en- and decrypt procedures #
+my ($inp,$out,$len,$key,$ivp,$enc,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm)=
+ map("v$_",(4..10));
+$code.=<<___;
+.globl .${prefix}_cbc_encrypt
+ ${UCMP}i $len,16
+ bltlr-
+
+ cmpwi $enc,0 # test direction
+ lis r0,0xffe0
+ mfspr $vrsave,256
+ mtspr 256,r0
+
+ li $idx,15
+ vxor $rndkey0,$rndkey0,$rndkey0
+ le?vspltisb $tmp,0x0f
+
+ lvx $ivec,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $ivec,$ivec,$inptail,$inpperm
+
+ neg r11,$inp
+ ?lvsl $keyperm,0,$key # prepare for unaligned key
+ lwz $rounds,240($key)
+
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inptail,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ ?lvsr $outperm,0,$out # prepare for unaligned store
+ vspltisb $outmask,-1
+ lvx $outhead,0,$out
+ ?vperm $outmask,$rndkey0,$outmask,$outperm
+ le?vxor $outperm,$outperm,$tmp
+
+ srwi $rounds,$rounds,1
+ li $idx,16
+ subi $rounds,$rounds,1
+ beq Lcbc_dec
+
+Lcbc_enc:
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ mtctr $rounds
+ subi $len,$len,16 # len-=16
+
+ lvx $rndkey0,0,$key
+ vperm $inout,$inout,$inptail,$inpperm
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ vxor $inout,$inout,$ivec
+
+Loop_cbc_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ bdnz Loop_cbc_enc
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipherlast $ivec,$inout,$rndkey0
+ ${UCMP}i $len,16
+
+ vperm $tmp,$ivec,$ivec,$outperm
+ vsel $inout,$outhead,$tmp,$outmask
+ vmr $outhead,$tmp
+ stvx $inout,0,$out
+ addi $out,$out,16
+ bge Lcbc_enc
+
+ b Lcbc_done
+
+.align 4
+Lcbc_dec:
+ ${UCMP}i $len,128
+ bge _aesp8_cbc_decrypt8x
+ vmr $tmp,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ mtctr $rounds
+ subi $len,$len,16 # len-=16
+
+ lvx $rndkey0,0,$key
+ vperm $tmp,$tmp,$inptail,$inpperm
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$tmp,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+
+Loop_cbc_dec:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ bdnz Loop_cbc_dec
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipherlast $inout,$inout,$rndkey0
+ ${UCMP}i $len,16
+
+ vxor $inout,$inout,$ivec
+ vmr $ivec,$tmp
+ vperm $tmp,$inout,$inout,$outperm
+ vsel $inout,$outhead,$tmp,$outmask
+ vmr $outhead,$tmp
+ stvx $inout,0,$out
+ addi $out,$out,16
+ bge Lcbc_dec
+
+Lcbc_done:
+ addi $out,$out,-1
+ lvx $inout,0,$out # redundant in aligned case
+ vsel $inout,$outhead,$inout,$outmask
+ stvx $inout,0,$out
+
+ neg $enc,$ivp # write [unaligned] iv
+ li $idx,15 # 15 is not typo
+ vxor $rndkey0,$rndkey0,$rndkey0
+ vspltisb $outmask,-1
+ le?vspltisb $tmp,0x0f
+ ?lvsl $outperm,0,$enc
+ ?vperm $outmask,$rndkey0,$outmask,$outperm
+ le?vxor $outperm,$outperm,$tmp
+ lvx $outhead,0,$ivp
+ vperm $ivec,$ivec,$ivec,$outperm
+ vsel $inout,$outhead,$ivec,$outmask
+ lvx $inptail,$idx,$ivp
+ stvx $inout,0,$ivp
+ vsel $inout,$ivec,$inptail,$outmask
+ stvx $inout,$idx,$ivp
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,6,0
+ .long 0
+___
+#########################################################################
+{{ # Optimized CBC decrypt procedure #
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(14..21));
+my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
+ # v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
+
+$code.=<<___;
+.align 5
+_aesp8_cbc_decrypt8x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ li r10,`$FRAME+8*16+15`
+ li r11,`$FRAME+8*16+31`
+ stvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ stvx v21,r11,$sp
+ addi r11,r11,32
+ stvx v22,r10,$sp
+ addi r10,r10,32
+ stvx v23,r11,$sp
+ addi r11,r11,32
+ stvx v24,r10,$sp
+ addi r10,r10,32
+ stvx v25,r11,$sp
+ addi r11,r11,32
+ stvx v26,r10,$sp
+ addi r10,r10,32
+ stvx v27,r11,$sp
+ addi r11,r11,32
+ stvx v28,r10,$sp
+ addi r10,r10,32
+ stvx v29,r11,$sp
+ addi r11,r11,32
+ stvx v30,r10,$sp
+ stvx v31,r11,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ subi $rounds,$rounds,3 # -4 in total
+ subi $len,$len,128 # bias
+
+ lvx $rndkey0,$x00,$key # load key schedule
+ lvx v30,$x10,$key
+ addi $key,$key,0x20
+ lvx v31,$x00,$key
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_cbc_dec_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key
+ addi $key,$key,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_cbc_dec_key
+
+ lvx v26,$x10,$key
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key
+ ?vperm v29,v29,v30,$keyperm
+ lvx $out0,$x70,$key # borrow $out0
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$out0,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ #lvx $inptail,0,$inp # "caller" already did this
+ #addi $inp,$inp,15 # 15 is not typo
+ subi $inp,$inp,15 # undo "caller"
+
+ le?li $idx,8
+ lvx_u $in0,$x00,$inp # load first 8 "words"
+ le?lvsl $inpperm,0,$idx
+ le?vspltisb $tmp,0x0f
+ lvx_u $in1,$x10,$inp
+ le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
+ lvx_u $in2,$x20,$inp
+ le?vperm $in0,$in0,$in0,$inpperm
+ lvx_u $in3,$x30,$inp
+ le?vperm $in1,$in1,$in1,$inpperm
+ lvx_u $in4,$x40,$inp
+ le?vperm $in2,$in2,$in2,$inpperm
+ vxor $out0,$in0,$rndkey0
+ lvx_u $in5,$x50,$inp
+ le?vperm $in3,$in3,$in3,$inpperm
+ vxor $out1,$in1,$rndkey0
+ lvx_u $in6,$x60,$inp
+ le?vperm $in4,$in4,$in4,$inpperm
+ vxor $out2,$in2,$rndkey0
+ lvx_u $in7,$x70,$inp
+ addi $inp,$inp,0x80
+ le?vperm $in5,$in5,$in5,$inpperm
+ vxor $out3,$in3,$rndkey0
+ le?vperm $in6,$in6,$in6,$inpperm
+ vxor $out4,$in4,$rndkey0
+ le?vperm $in7,$in7,$in7,$inpperm
+ vxor $out5,$in5,$rndkey0
+ vxor $out6,$in6,$rndkey0
+ vxor $out7,$in7,$rndkey0
+
+ mtctr $rounds
+ b Loop_cbc_dec8x
+.align 5
+Loop_cbc_dec8x:
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ vncipher $out6,$out6,v24
+ vncipher $out7,$out7,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ vncipher $out6,$out6,v25
+ vncipher $out7,$out7,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_cbc_dec8x
+
+ subic $len,$len,128 # $len-=128
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ vncipher $out6,$out6,v24
+ vncipher $out7,$out7,v24
+
+ subfe. r0,r0,r0 # borrow?-1:0
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ vncipher $out6,$out6,v25
+ vncipher $out7,$out7,v25
+
+ and r0,r0,$len
+ vncipher $out0,$out0,v26
+ vncipher $out1,$out1,v26
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ vncipher $out4,$out4,v26
+ vncipher $out5,$out5,v26
+ vncipher $out6,$out6,v26
+ vncipher $out7,$out7,v26
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in7 are loaded
+ # with last "words"
+ vncipher $out0,$out0,v27
+ vncipher $out1,$out1,v27
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vncipher $out4,$out4,v27
+ vncipher $out5,$out5,v27
+ vncipher $out6,$out6,v27
+ vncipher $out7,$out7,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ vncipher $out1,$out1,v28
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vncipher $out4,$out4,v28
+ vncipher $out5,$out5,v28
+ vncipher $out6,$out6,v28
+ vncipher $out7,$out7,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vncipher $out0,$out0,v29
+ vncipher $out1,$out1,v29
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vncipher $out4,$out4,v29
+ vncipher $out5,$out5,v29
+ vncipher $out6,$out6,v29
+ vncipher $out7,$out7,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+
+ vncipher $out0,$out0,v30
+ vxor $ivec,$ivec,v31 # xor with last round key
+ vncipher $out1,$out1,v30
+ vxor $in0,$in0,v31
+ vncipher $out2,$out2,v30
+ vxor $in1,$in1,v31
+ vncipher $out3,$out3,v30
+ vxor $in2,$in2,v31
+ vncipher $out4,$out4,v30
+ vxor $in3,$in3,v31
+ vncipher $out5,$out5,v30
+ vxor $in4,$in4,v31
+ vncipher $out6,$out6,v30
+ vxor $in5,$in5,v31
+ vncipher $out7,$out7,v30
+ vxor $in6,$in6,v31
+
+ vncipherlast $out0,$out0,$ivec
+ vncipherlast $out1,$out1,$in0
+ lvx_u $in0,$x00,$inp # load next input block
+ vncipherlast $out2,$out2,$in1
+ lvx_u $in1,$x10,$inp
+ vncipherlast $out3,$out3,$in2
+ le?vperm $in0,$in0,$in0,$inpperm
+ lvx_u $in2,$x20,$inp
+ vncipherlast $out4,$out4,$in3
+ le?vperm $in1,$in1,$in1,$inpperm
+ lvx_u $in3,$x30,$inp
+ vncipherlast $out5,$out5,$in4
+ le?vperm $in2,$in2,$in2,$inpperm
+ lvx_u $in4,$x40,$inp
+ vncipherlast $out6,$out6,$in5
+ le?vperm $in3,$in3,$in3,$inpperm
+ lvx_u $in5,$x50,$inp
+ vncipherlast $out7,$out7,$in6
+ le?vperm $in4,$in4,$in4,$inpperm
+ lvx_u $in6,$x60,$inp
+ vmr $ivec,$in7
+ le?vperm $in5,$in5,$in5,$inpperm
+ lvx_u $in7,$x70,$inp
+ addi $inp,$inp,0x80
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $in6,$in6,$in6,$inpperm
+ vxor $out0,$in0,$rndkey0
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $in7,$in7,$in7,$inpperm
+ vxor $out1,$in1,$rndkey0
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ vxor $out2,$in2,$rndkey0
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ vxor $out3,$in3,$rndkey0
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x40,$out
+ vxor $out4,$in4,$rndkey0
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x50,$out
+ vxor $out5,$in5,$rndkey0
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x60,$out
+ vxor $out6,$in6,$rndkey0
+ stvx_u $out7,$x70,$out
+ addi $out,$out,0x80
+ vxor $out7,$in7,$rndkey0
+
+ mtctr $rounds
+ beq Loop_cbc_dec8x # did $len-=128 borrow?
+
+ addic. $len,$len,128
+ beq Lcbc_dec8x_done
+ nop
+ nop
+
+Loop_cbc_dec8x_tail: # up to 7 "words" tail...
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ vncipher $out6,$out6,v24
+ vncipher $out7,$out7,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ vncipher $out6,$out6,v25
+ vncipher $out7,$out7,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_cbc_dec8x_tail
+
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ vncipher $out6,$out6,v24
+ vncipher $out7,$out7,v24
+
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ vncipher $out6,$out6,v25
+ vncipher $out7,$out7,v25
+
+ vncipher $out1,$out1,v26
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ vncipher $out4,$out4,v26
+ vncipher $out5,$out5,v26
+ vncipher $out6,$out6,v26
+ vncipher $out7,$out7,v26
+
+ vncipher $out1,$out1,v27
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vncipher $out4,$out4,v27
+ vncipher $out5,$out5,v27
+ vncipher $out6,$out6,v27
+ vncipher $out7,$out7,v27
+
+ vncipher $out1,$out1,v28
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vncipher $out4,$out4,v28
+ vncipher $out5,$out5,v28
+ vncipher $out6,$out6,v28
+ vncipher $out7,$out7,v28
+
+ vncipher $out1,$out1,v29
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vncipher $out4,$out4,v29
+ vncipher $out5,$out5,v29
+ vncipher $out6,$out6,v29
+ vncipher $out7,$out7,v29
+
+ vncipher $out1,$out1,v30
+ vxor $ivec,$ivec,v31 # last round key
+ vncipher $out2,$out2,v30
+ vxor $in1,$in1,v31
+ vncipher $out3,$out3,v30
+ vxor $in2,$in2,v31
+ vncipher $out4,$out4,v30
+ vxor $in3,$in3,v31
+ vncipher $out5,$out5,v30
+ vxor $in4,$in4,v31
+ vncipher $out6,$out6,v30
+ vxor $in5,$in5,v31
+ vncipher $out7,$out7,v30
+ vxor $in6,$in6,v31
+
+ cmplwi $len,32 # switch($len)
+ blt Lcbc_dec8x_one
+ nop
+ beq Lcbc_dec8x_two
+ cmplwi $len,64
+ blt Lcbc_dec8x_three
+ nop
+ beq Lcbc_dec8x_four
+ cmplwi $len,96
+ blt Lcbc_dec8x_five
+ nop
+ beq Lcbc_dec8x_six
+
+Lcbc_dec8x_seven:
+ vncipherlast $out1,$out1,$ivec
+ vncipherlast $out2,$out2,$in1
+ vncipherlast $out3,$out3,$in2
+ vncipherlast $out4,$out4,$in3
+ vncipherlast $out5,$out5,$in4
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out1,$out1,$out1,$inpperm
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x00,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x10,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x20,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x30,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x40,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x50,$out
+ stvx_u $out7,$x60,$out
+ addi $out,$out,0x70
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_six:
+ vncipherlast $out2,$out2,$ivec
+ vncipherlast $out3,$out3,$in2
+ vncipherlast $out4,$out4,$in3
+ vncipherlast $out5,$out5,$in4
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out2,$out2,$out2,$inpperm
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x00,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x10,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x20,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x30,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x40,$out
+ stvx_u $out7,$x50,$out
+ addi $out,$out,0x60
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_five:
+ vncipherlast $out3,$out3,$ivec
+ vncipherlast $out4,$out4,$in3
+ vncipherlast $out5,$out5,$in4
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out3,$out3,$out3,$inpperm
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x00,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x10,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x20,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x30,$out
+ stvx_u $out7,$x40,$out
+ addi $out,$out,0x50
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_four:
+ vncipherlast $out4,$out4,$ivec
+ vncipherlast $out5,$out5,$in4
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out4,$out4,$out4,$inpperm
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x00,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x10,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x20,$out
+ stvx_u $out7,$x30,$out
+ addi $out,$out,0x40
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_three:
+ vncipherlast $out5,$out5,$ivec
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out5,$out5,$out5,$inpperm
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x00,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x10,$out
+ stvx_u $out7,$x20,$out
+ addi $out,$out,0x30
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_two:
+ vncipherlast $out6,$out6,$ivec
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out6,$out6,$out6,$inpperm
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x00,$out
+ stvx_u $out7,$x10,$out
+ addi $out,$out,0x20
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_one:
+ vncipherlast $out7,$out7,$ivec
+ vmr $ivec,$in7
+
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out7,0,$out
+ addi $out,$out,0x10
+
+Lcbc_dec8x_done:
+ le?vperm $ivec,$ivec,$ivec,$inpperm
+ stvx_u $ivec,0,$ivp # write [unaligned] iv
+
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $inpperm,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt
+___
+}} }}}
+
+#########################################################################
+{{{ # CTR procedure[s] #
+my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
+ map("v$_",(4..11));
+my $dat=$tmp;
+
+$code.=<<___;
+.globl .${prefix}_ctr32_encrypt_blocks
+ ${UCMP}i $len,1
+ bltlr-
+
+ lis r0,0xfff0
+ mfspr $vrsave,256
+ mtspr 256,r0
+
+ li $idx,15
+ vxor $rndkey0,$rndkey0,$rndkey0
+ le?vspltisb $tmp,0x0f
+
+ lvx $ivec,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ vspltisb $one,1
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $ivec,$ivec,$inptail,$inpperm
+ vsldoi $one,$rndkey0,$one,1
+
+ neg r11,$inp
+ ?lvsl $keyperm,0,$key # prepare for unaligned key
+ lwz $rounds,240($key)
+
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inptail,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ srwi $rounds,$rounds,1
+ li $idx,16
+ subi $rounds,$rounds,1
+
+ ${UCMP}i $len,8
+ bge _aesp8_ctr32_encrypt8x
+
+ ?lvsr $outperm,0,$out # prepare for unaligned store
+ vspltisb $outmask,-1
+ lvx $outhead,0,$out
+ ?vperm $outmask,$rndkey0,$outmask,$outperm
+ le?vxor $outperm,$outperm,$tmp
+
+ lvx $rndkey0,0,$key
+ mtctr $rounds
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$ivec,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ b Loop_ctr32_enc
+
+.align 5
+Loop_ctr32_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ bdnz Loop_ctr32_enc
+
+ vadduwm $ivec,$ivec,$one
+ vmr $dat,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ subic. $len,$len,1 # blocks--
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ vperm $dat,$dat,$inptail,$inpperm
+ li $idx,16
+ ?vperm $rndkey1,$rndkey0,$rndkey1,$keyperm
+ lvx $rndkey0,0,$key
+ vxor $dat,$dat,$rndkey1 # last round key
+ vcipherlast $inout,$inout,$dat
+
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inout,$outperm
+ vsel $dat,$outhead,$inout,$outmask
+ mtctr $rounds
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vmr $outhead,$inout
+ vxor $inout,$ivec,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ stvx $dat,0,$out
+ addi $out,$out,16
+ bne Loop_ctr32_enc
+
+ addi $out,$out,-1
+ lvx $inout,0,$out # redundant in aligned case
+ vsel $inout,$outhead,$inout,$outmask
+ stvx $inout,0,$out
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,6,0
+ .long 0
+___
+#########################################################################
+{{ # Optimized CTR procedure #
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(15..22));
+my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
+ # v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
+my ($two,$three,$four)=($outhead,$outperm,$outmask);
+
+$code.=<<___;
+.align 5
+_aesp8_ctr32_encrypt8x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ li r10,`$FRAME+8*16+15`
+ li r11,`$FRAME+8*16+31`
+ stvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ stvx v21,r11,$sp
+ addi r11,r11,32
+ stvx v22,r10,$sp
+ addi r10,r10,32
+ stvx v23,r11,$sp
+ addi r11,r11,32
+ stvx v24,r10,$sp
+ addi r10,r10,32
+ stvx v25,r11,$sp
+ addi r11,r11,32
+ stvx v26,r10,$sp
+ addi r10,r10,32
+ stvx v27,r11,$sp
+ addi r11,r11,32
+ stvx v28,r10,$sp
+ addi r10,r10,32
+ stvx v29,r11,$sp
+ addi r11,r11,32
+ stvx v30,r10,$sp
+ stvx v31,r11,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ subi $rounds,$rounds,3 # -4 in total
+
+ lvx $rndkey0,$x00,$key # load key schedule
+ lvx v30,$x10,$key
+ addi $key,$key,0x20
+ lvx v31,$x00,$key
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_ctr32_enc_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key
+ addi $key,$key,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_ctr32_enc_key
+
+ lvx v26,$x10,$key
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key
+ ?vperm v29,v29,v30,$keyperm
+ lvx $out0,$x70,$key # borrow $out0
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$out0,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ vadduwm $two,$one,$one
+ subi $inp,$inp,15 # undo "caller"
+ $SHL $len,$len,4
+
+ vadduwm $out1,$ivec,$one # counter values ...
+ vadduwm $out2,$ivec,$two
+ vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
+ le?li $idx,8
+ vadduwm $out3,$out1,$two
+ vxor $out1,$out1,$rndkey0
+ le?lvsl $inpperm,0,$idx
+ vadduwm $out4,$out2,$two
+ vxor $out2,$out2,$rndkey0
+ le?vspltisb $tmp,0x0f
+ vadduwm $out5,$out3,$two
+ vxor $out3,$out3,$rndkey0
+ le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
+ vadduwm $out6,$out4,$two
+ vxor $out4,$out4,$rndkey0
+ vadduwm $out7,$out5,$two
+ vxor $out5,$out5,$rndkey0
+ vadduwm $ivec,$out6,$two # next counter value
+ vxor $out6,$out6,$rndkey0
+ vxor $out7,$out7,$rndkey0
+
+ mtctr $rounds
+ b Loop_ctr32_enc8x
+.align 5
+Loop_ctr32_enc8x:
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+ vcipher $out6,$out6,v24
+ vcipher $out7,$out7,v24
+Loop_ctr32_enc8x_middle:
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+ vcipher $out6,$out6,v25
+ vcipher $out7,$out7,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_ctr32_enc8x
+
+ subic r11,$len,256 # $len-256, borrow $key_
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+ vcipher $out6,$out6,v24
+ vcipher $out7,$out7,v24
+
+ subfe r0,r0,r0 # borrow?-1:0
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+ vcipher $out6,$out6,v25
+ vcipher $out7,$out7,v25
+
+ and r0,r0,r11
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vcipher $out0,$out0,v26
+ vcipher $out1,$out1,v26
+ vcipher $out2,$out2,v26
+ vcipher $out3,$out3,v26
+ vcipher $out4,$out4,v26
+ vcipher $out5,$out5,v26
+ vcipher $out6,$out6,v26
+ vcipher $out7,$out7,v26
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ subic $len,$len,129 # $len-=129
+ vcipher $out0,$out0,v27
+ addi $len,$len,1 # $len-=128 really
+ vcipher $out1,$out1,v27
+ vcipher $out2,$out2,v27
+ vcipher $out3,$out3,v27
+ vcipher $out4,$out4,v27
+ vcipher $out5,$out5,v27
+ vcipher $out6,$out6,v27
+ vcipher $out7,$out7,v27
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+
+ vcipher $out0,$out0,v28
+ lvx_u $in0,$x00,$inp # load input
+ vcipher $out1,$out1,v28
+ lvx_u $in1,$x10,$inp
+ vcipher $out2,$out2,v28
+ lvx_u $in2,$x20,$inp
+ vcipher $out3,$out3,v28
+ lvx_u $in3,$x30,$inp
+ vcipher $out4,$out4,v28
+ lvx_u $in4,$x40,$inp
+ vcipher $out5,$out5,v28
+ lvx_u $in5,$x50,$inp
+ vcipher $out6,$out6,v28
+ lvx_u $in6,$x60,$inp
+ vcipher $out7,$out7,v28
+ lvx_u $in7,$x70,$inp
+ addi $inp,$inp,0x80
+
+ vcipher $out0,$out0,v29
+ le?vperm $in0,$in0,$in0,$inpperm
+ vcipher $out1,$out1,v29
+ le?vperm $in1,$in1,$in1,$inpperm
+ vcipher $out2,$out2,v29
+ le?vperm $in2,$in2,$in2,$inpperm
+ vcipher $out3,$out3,v29
+ le?vperm $in3,$in3,$in3,$inpperm
+ vcipher $out4,$out4,v29
+ le?vperm $in4,$in4,$in4,$inpperm
+ vcipher $out5,$out5,v29
+ le?vperm $in5,$in5,$in5,$inpperm
+ vcipher $out6,$out6,v29
+ le?vperm $in6,$in6,$in6,$inpperm
+ vcipher $out7,$out7,v29
+ le?vperm $in7,$in7,$in7,$inpperm
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in7 are loaded
+ # with last "words"
+ subfe. r0,r0,r0 # borrow?-1:0
+ vcipher $out0,$out0,v30
+ vxor $in0,$in0,v31 # xor with last round key
+ vcipher $out1,$out1,v30
+ vxor $in1,$in1,v31
+ vcipher $out2,$out2,v30
+ vxor $in2,$in2,v31
+ vcipher $out3,$out3,v30
+ vxor $in3,$in3,v31
+ vcipher $out4,$out4,v30
+ vxor $in4,$in4,v31
+ vcipher $out5,$out5,v30
+ vxor $in5,$in5,v31
+ vcipher $out6,$out6,v30
+ vxor $in6,$in6,v31
+ vcipher $out7,$out7,v30
+ vxor $in7,$in7,v31
+
+ bne Lctr32_enc8x_break # did $len-129 borrow?
+
+ vcipherlast $in0,$out0,$in0
+ vcipherlast $in1,$out1,$in1
+ vadduwm $out1,$ivec,$one # counter values ...
+ vcipherlast $in2,$out2,$in2
+ vadduwm $out2,$ivec,$two
+ vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
+ vcipherlast $in3,$out3,$in3
+ vadduwm $out3,$out1,$two
+ vxor $out1,$out1,$rndkey0
+ vcipherlast $in4,$out4,$in4
+ vadduwm $out4,$out2,$two
+ vxor $out2,$out2,$rndkey0
+ vcipherlast $in5,$out5,$in5
+ vadduwm $out5,$out3,$two
+ vxor $out3,$out3,$rndkey0
+ vcipherlast $in6,$out6,$in6
+ vadduwm $out6,$out4,$two
+ vxor $out4,$out4,$rndkey0
+ vcipherlast $in7,$out7,$in7
+ vadduwm $out7,$out5,$two
+ vxor $out5,$out5,$rndkey0
+ le?vperm $in0,$in0,$in0,$inpperm
+ vadduwm $ivec,$out6,$two # next counter value
+ vxor $out6,$out6,$rndkey0
+ le?vperm $in1,$in1,$in1,$inpperm
+ vxor $out7,$out7,$rndkey0
+ mtctr $rounds
+
+ vcipher $out0,$out0,v24
+ stvx_u $in0,$x00,$out
+ le?vperm $in2,$in2,$in2,$inpperm
+ vcipher $out1,$out1,v24
+ stvx_u $in1,$x10,$out
+ le?vperm $in3,$in3,$in3,$inpperm
+ vcipher $out2,$out2,v24
+ stvx_u $in2,$x20,$out
+ le?vperm $in4,$in4,$in4,$inpperm
+ vcipher $out3,$out3,v24
+ stvx_u $in3,$x30,$out
+ le?vperm $in5,$in5,$in5,$inpperm
+ vcipher $out4,$out4,v24
+ stvx_u $in4,$x40,$out
+ le?vperm $in6,$in6,$in6,$inpperm
+ vcipher $out5,$out5,v24
+ stvx_u $in5,$x50,$out
+ le?vperm $in7,$in7,$in7,$inpperm
+ vcipher $out6,$out6,v24
+ stvx_u $in6,$x60,$out
+ vcipher $out7,$out7,v24
+ stvx_u $in7,$x70,$out
+ addi $out,$out,0x80
+
+ b Loop_ctr32_enc8x_middle
+
+.align 5
+Lctr32_enc8x_break:
+ cmpwi $len,-0x60
+ blt Lctr32_enc8x_one
+ nop
+ beq Lctr32_enc8x_two
+ cmpwi $len,-0x40
+ blt Lctr32_enc8x_three
+ nop
+ beq Lctr32_enc8x_four
+ cmpwi $len,-0x20
+ blt Lctr32_enc8x_five
+ nop
+ beq Lctr32_enc8x_six
+ cmpwi $len,0x00
+ blt Lctr32_enc8x_seven
+
+Lctr32_enc8x_eight:
+ vcipherlast $out0,$out0,$in0
+ vcipherlast $out1,$out1,$in1
+ vcipherlast $out2,$out2,$in2
+ vcipherlast $out3,$out3,$in3
+ vcipherlast $out4,$out4,$in4
+ vcipherlast $out5,$out5,$in5
+ vcipherlast $out6,$out6,$in6
+ vcipherlast $out7,$out7,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x40,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x50,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x60,$out
+ stvx_u $out7,$x70,$out
+ addi $out,$out,0x80
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_seven:
+ vcipherlast $out0,$out0,$in1
+ vcipherlast $out1,$out1,$in2
+ vcipherlast $out2,$out2,$in3
+ vcipherlast $out3,$out3,$in4
+ vcipherlast $out4,$out4,$in5
+ vcipherlast $out5,$out5,$in6
+ vcipherlast $out6,$out6,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x40,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x50,$out
+ stvx_u $out6,$x60,$out
+ addi $out,$out,0x70
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_six:
+ vcipherlast $out0,$out0,$in2
+ vcipherlast $out1,$out1,$in3
+ vcipherlast $out2,$out2,$in4
+ vcipherlast $out3,$out3,$in5
+ vcipherlast $out4,$out4,$in6
+ vcipherlast $out5,$out5,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x40,$out
+ stvx_u $out5,$x50,$out
+ addi $out,$out,0x60
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_five:
+ vcipherlast $out0,$out0,$in3
+ vcipherlast $out1,$out1,$in4
+ vcipherlast $out2,$out2,$in5
+ vcipherlast $out3,$out3,$in6
+ vcipherlast $out4,$out4,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ stvx_u $out4,$x40,$out
+ addi $out,$out,0x50
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_four:
+ vcipherlast $out0,$out0,$in4
+ vcipherlast $out1,$out1,$in5
+ vcipherlast $out2,$out2,$in6
+ vcipherlast $out3,$out3,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ stvx_u $out3,$x30,$out
+ addi $out,$out,0x40
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_three:
+ vcipherlast $out0,$out0,$in5
+ vcipherlast $out1,$out1,$in6
+ vcipherlast $out2,$out2,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ stvx_u $out2,$x20,$out
+ addi $out,$out,0x30
+ b Lcbc_dec8x_done
+
+.align 5
+Lctr32_enc8x_two:
+ vcipherlast $out0,$out0,$in6
+ vcipherlast $out1,$out1,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ stvx_u $out1,$x10,$out
+ addi $out,$out,0x20
+ b Lcbc_dec8x_done
+
+.align 5
+Lctr32_enc8x_one:
+ vcipherlast $out0,$out0,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ stvx_u $out0,0,$out
+ addi $out,$out,0x10
+
+Lctr32_enc8x_done:
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $inpperm,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks
+___
+}} }}}
+
+my $consts=1;
+foreach(split("\n",$code)) {
+ s/\`([^\`]*)\`/eval($1)/geo;
+
+ # constants table endian-specific conversion
+ if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
+ my $conv=$3;
+ my @bytes=();
+
+ # convert to endian-agnostic format
+ if ($1 eq "long") {
+ foreach (split(/,\s*/,$2)) {
+ my $l = /^0/?oct:int;
+ push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
+ }
+ } else {
+ @bytes = map(/^0/?oct:int,split(/,\s*/,$2));
+ }
+
+ # little-endian conversion
+ if ($flavour =~ /le$/o) {
+ SWITCH: for($conv) {
+ /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; };
+ /\?rev/ && do { @bytes=reverse(@bytes); last; };
+ }
+ }
+
+ #emit
+ print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
+ next;
+ }
+ $consts=0 if (m/Lconsts:/o); # end of table
+
+ # instructions prefixed with '?' are endian-specific and need
+ # to be adjusted accordingly...
+ if ($flavour =~ /le$/o) { # little-endian
+ s/le\?//o or
+ s/be\?/#be#/o or
+ s/\?lvsr/lvsl/o or
+ s/\?lvsl/lvsr/o or
+ s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
+ s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
+ s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
+ } else { # big-endian
+ s/le\?/#le#/o or
+ s/be\?//o or
+ s/\?([a-z]+)/$1/o;
+ }
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
new file mode 100644
index 000000000000..d0ffe277af5c
--- /dev/null
+++ b/drivers/crypto/vmx/ghash.c
@@ -0,0 +1,214 @@
+/**
+ * GHASH routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+#include <crypto/b128ops.h>
+
+#define IN_INTERRUPT in_interrupt()
+
+#define GHASH_BLOCK_SIZE (16)
+#define GHASH_DIGEST_SIZE (16)
+#define GHASH_KEY_LEN (16)
+
+void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
+void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
+void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
+ const u8 *in,size_t len);
+
+struct p8_ghash_ctx {
+ u128 htable[16];
+ struct crypto_shash *fallback;
+};
+
+struct p8_ghash_desc_ctx {
+ u64 shash[2];
+ u8 buffer[GHASH_DIGEST_SIZE];
+ int bytes;
+ struct shash_desc fallback_desc;
+};
+
+static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+{
+ const char *alg;
+ struct crypto_shash *fallback;
+ struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!(alg = crypto_tfm_alg_name(tfm))) {
+ printk(KERN_ERR "Failed to get algorithm name.\n");
+ return -ENOENT;
+ }
+
+ fallback = crypto_alloc_shash(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+ printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+ crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
+
+ crypto_shash_set_flags(fallback,
+ crypto_shash_get_flags((struct crypto_shash *) tfm));
+ ctx->fallback = fallback;
+
+ shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx)
+ + crypto_shash_descsize(fallback);
+
+ return 0;
+}
+
+static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ crypto_free_shash(ctx->fallback);
+ ctx->fallback = NULL;
+ }
+}
+
+static int p8_ghash_init(struct shash_desc *desc)
+{
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ dctx->bytes = 0;
+ memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
+ dctx->fallback_desc.tfm = ctx->fallback;
+ dctx->fallback_desc.flags = desc->flags;
+ return crypto_shash_init(&dctx->fallback_desc);
+}
+
+static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
+
+ if (keylen != GHASH_KEY_LEN)
+ return -EINVAL;
+
+ pagefault_disable();
+ enable_kernel_altivec();
+ enable_kernel_fp();
+ gcm_init_p8(ctx->htable, (const u64 *) key);
+ pagefault_enable();
+ return crypto_shash_setkey(ctx->fallback, key, keylen);
+}
+
+static int p8_ghash_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ unsigned int len;
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (IN_INTERRUPT) {
+ return crypto_shash_update(&dctx->fallback_desc, src, srclen);
+ } else {
+ if (dctx->bytes) {
+ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+ memcpy(dctx->buffer + dctx->bytes, src, srclen);
+ dctx->bytes += srclen;
+ return 0;
+ }
+ memcpy(dctx->buffer + dctx->bytes, src,
+ GHASH_DIGEST_SIZE - dctx->bytes);
+ pagefault_disable();
+ enable_kernel_altivec();
+ enable_kernel_fp();
+ gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
+ GHASH_DIGEST_SIZE);
+ pagefault_enable();
+ src += GHASH_DIGEST_SIZE - dctx->bytes;
+ srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+ dctx->bytes = 0;
+ }
+ len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+ if (len) {
+ pagefault_disable();
+ enable_kernel_altivec();
+ enable_kernel_fp();
+ gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+ pagefault_enable();
+ src += len;
+ srclen -= len;
+ }
+ if (srclen) {
+ memcpy(dctx->buffer, src, srclen);
+ dctx->bytes = srclen;
+ }
+ return 0;
+ }
+}
+
+static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+{
+ int i;
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (IN_INTERRUPT) {
+ return crypto_shash_final(&dctx->fallback_desc, out);
+ } else {
+ if (dctx->bytes) {
+ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+ dctx->buffer[i] = 0;
+ pagefault_disable();
+ enable_kernel_altivec();
+ enable_kernel_fp();
+ gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
+ GHASH_DIGEST_SIZE);
+ pagefault_enable();
+ dctx->bytes = 0;
+ }
+ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+ return 0;
+ }
+}
+
+struct shash_alg p8_ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = p8_ghash_init,
+ .update = p8_ghash_update,
+ .final = p8_ghash_final,
+ .setkey = p8_ghash_setkey,
+ .descsize = sizeof(struct p8_ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "p8_ghash",
+ .cra_priority = 1000,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p8_ghash_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = p8_ghash_init_tfm,
+ .cra_exit = p8_ghash_exit_tfm,
+ },
+};
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
new file mode 100644
index 000000000000..0a6f899839dd
--- /dev/null
+++ b/drivers/crypto/vmx/ghashp8-ppc.pl
@@ -0,0 +1,228 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# GHASH for for PowerISA v2.07.
+#
+# July 2014
+#
+# Accurate performance measurements are problematic, because it's
+# always virtualized setup with possibly throttled processor.
+# Relative comparison is therefore more informative. This initial
+# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
+# faster than "4-bit" integer-only compiler-generated 64-bit code.
+# "Initial version" means that there is room for futher improvement.
+
+$flavour=shift;
+$output =shift;
+
+if ($flavour =~ /64/) {
+ $SIZE_T=8;
+ $LRSAVE=2*$SIZE_T;
+ $STU="stdu";
+ $POP="ld";
+ $PUSH="std";
+} elsif ($flavour =~ /32/) {
+ $SIZE_T=4;
+ $LRSAVE=$SIZE_T;
+ $STU="stwu";
+ $POP="lwz";
+ $PUSH="stw";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
+
+my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block
+
+my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
+my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
+my $vrsave="r12";
+
+$code=<<___;
+.machine "any"
+
+.text
+
+.globl .gcm_init_p8
+ lis r0,0xfff0
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $H,0,r4 # load H
+
+ vspltisb $xC2,-16 # 0xf0
+ vspltisb $t0,1 # one
+ vaddubm $xC2,$xC2,$xC2 # 0xe0
+ vxor $zero,$zero,$zero
+ vor $xC2,$xC2,$t0 # 0xe1
+ vsldoi $xC2,$xC2,$zero,15 # 0xe1...
+ vsldoi $t1,$zero,$t0,1 # ...1
+ vaddubm $xC2,$xC2,$xC2 # 0xc2...
+ vspltisb $t2,7
+ vor $xC2,$xC2,$t1 # 0xc2....01
+ vspltb $t1,$H,0 # most significant byte
+ vsl $H,$H,$t0 # H<<=1
+ vsrab $t1,$t1,$t2 # broadcast carry bit
+ vand $t1,$t1,$xC2
+ vxor $H,$H,$t1 # twisted H
+
+ vsldoi $H,$H,$H,8 # twist even more ...
+ vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
+ vsldoi $Hl,$zero,$H,8 # ... and split
+ vsldoi $Hh,$H,$zero,8
+
+ stvx_u $xC2,0,r3 # save pre-computed table
+ stvx_u $Hl,r8,r3
+ stvx_u $H, r9,r3
+ stvx_u $Hh,r10,r3
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size .gcm_init_p8,.-.gcm_init_p8
+
+.globl .gcm_gmult_p8
+ lis r0,0xfff8
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $IN,0,$Xip # load Xi
+
+ lvx_u $Hl,r8,$Htbl # load pre-computed table
+ le?lvsl $lemask,r0,r0
+ lvx_u $H, r9,$Htbl
+ le?vspltisb $t0,0x07
+ lvx_u $Hh,r10,$Htbl
+ le?vxor $lemask,$lemask,$t0
+ lvx_u $xC2,0,$Htbl
+ le?vperm $IN,$IN,$IN,$lemask
+ vxor $zero,$zero,$zero
+
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+
+ vpmsumd $t2,$Xl,$xC2 # 1st phase
+
+ vsldoi $t0,$Xm,$zero,8
+ vsldoi $t1,$zero,$Xm,8
+ vxor $Xl,$Xl,$t0
+ vxor $Xh,$Xh,$t1
+
+ vsldoi $Xl,$Xl,$Xl,8
+ vxor $Xl,$Xl,$t2
+
+ vsldoi $t1,$Xl,$Xl,8 # 2nd phase
+ vpmsumd $Xl,$Xl,$xC2
+ vxor $t1,$t1,$Xh
+ vxor $Xl,$Xl,$t1
+
+ le?vperm $Xl,$Xl,$Xl,$lemask
+ stvx_u $Xl,0,$Xip # write out Xi
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size .gcm_gmult_p8,.-.gcm_gmult_p8
+
+.globl .gcm_ghash_p8
+ lis r0,0xfff8
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $Xl,0,$Xip # load Xi
+
+ lvx_u $Hl,r8,$Htbl # load pre-computed table
+ le?lvsl $lemask,r0,r0
+ lvx_u $H, r9,$Htbl
+ le?vspltisb $t0,0x07
+ lvx_u $Hh,r10,$Htbl
+ le?vxor $lemask,$lemask,$t0
+ lvx_u $xC2,0,$Htbl
+ le?vperm $Xl,$Xl,$Xl,$lemask
+ vxor $zero,$zero,$zero
+
+ lvx_u $IN,0,$inp
+ addi $inp,$inp,16
+ subi $len,$len,16
+ le?vperm $IN,$IN,$IN,$lemask
+ vxor $IN,$IN,$Xl
+ b Loop
+
+.align 5
+Loop:
+ subic $len,$len,16
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ subfe. r0,r0,r0 # borrow?-1:0
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ and r0,r0,$len
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+ add $inp,$inp,r0
+
+ vpmsumd $t2,$Xl,$xC2 # 1st phase
+
+ vsldoi $t0,$Xm,$zero,8
+ vsldoi $t1,$zero,$Xm,8
+ vxor $Xl,$Xl,$t0
+ vxor $Xh,$Xh,$t1
+
+ vsldoi $Xl,$Xl,$Xl,8
+ vxor $Xl,$Xl,$t2
+ lvx_u $IN,0,$inp
+ addi $inp,$inp,16
+
+ vsldoi $t1,$Xl,$Xl,8 # 2nd phase
+ vpmsumd $Xl,$Xl,$xC2
+ le?vperm $IN,$IN,$IN,$lemask
+ vxor $t1,$t1,$Xh
+ vxor $IN,$IN,$t1
+ vxor $IN,$IN,$Xl
+ beq Loop # did $len-=16 borrow?
+
+ vxor $Xl,$Xl,$t1
+ le?vperm $Xl,$Xl,$Xl,$lemask
+ stvx_u $Xl,0,$Xip # write out Xi
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+.size .gcm_ghash_p8,.-.gcm_ghash_p8
+
+.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+foreach (split("\n",$code)) {
+ if ($flavour =~ /le$/o) { # little-endian
+ s/le\?//o or
+ s/be\?/#be#/o;
+ } else {
+ s/le\?/#le#/o or
+ s/be\?//o;
+ }
+ print $_,"\n";
+}
+
+close STDOUT; # enforce flush
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
new file mode 100644
index 000000000000..a59188494af8
--- /dev/null
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -0,0 +1,207 @@
+#!/usr/bin/env perl
+
+# PowerPC assembler distiller by <appro>.
+
+my $flavour = shift;
+my $output = shift;
+open STDOUT,">$output" || die "can't open $output: $!";
+
+my %GLOBALS;
+my $dotinlocallabels=($flavour=~/linux/)?1:0;
+
+################################################################
+# directives which need special treatment on different platforms
+################################################################
+my $globl = sub {
+ my $junk = shift;
+ my $name = shift;
+ my $global = \$GLOBALS{$name};
+ my $ret;
+
+ $name =~ s|^[\.\_]||;
+
+ SWITCH: for ($flavour) {
+ /aix/ && do { $name = ".$name";
+ last;
+ };
+ /osx/ && do { $name = "_$name";
+ last;
+ };
+ /linux/
+ && do { $ret = "_GLOBAL($name)";
+ last;
+ };
+ }
+
+ $ret = ".globl $name\nalign 5\n$name:" if (!$ret);
+ $$global = $name;
+ $ret;
+};
+my $text = sub {
+ my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text";
+ $ret = ".abiversion 2\n".$ret if ($flavour =~ /linux.*64le/);
+ $ret;
+};
+my $machine = sub {
+ my $junk = shift;
+ my $arch = shift;
+ if ($flavour =~ /osx/)
+ { $arch =~ s/\"//g;
+ $arch = ($flavour=~/64/) ? "ppc970-64" : "ppc970" if ($arch eq "any");
+ }
+ ".machine $arch";
+};
+my $size = sub {
+ if ($flavour =~ /linux/)
+ { shift;
+ my $name = shift; $name =~ s|^[\.\_]||;
+ my $ret = ".size $name,.-".($flavour=~/64$/?".":"").$name;
+ $ret .= "\n.size .$name,.-.$name" if ($flavour=~/64$/);
+ $ret;
+ }
+ else
+ { ""; }
+};
+my $asciz = sub {
+ shift;
+ my $line = join(",",@_);
+ if ($line =~ /^"(.*)"$/)
+ { ".byte " . join(",",unpack("C*",$1),0) . "\n.align 2"; }
+ else
+ { ""; }
+};
+my $quad = sub {
+ shift;
+ my @ret;
+ my ($hi,$lo);
+ for (@_) {
+ if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io)
+ { $hi=$1?"0x$1":"0"; $lo="0x$2"; }
+ elsif (/^([0-9]+)$/o)
+ { $hi=$1>>32; $lo=$1&0xffffffff; } # error-prone with 32-bit perl
+ else
+ { $hi=undef; $lo=$_; }
+
+ if (defined($hi))
+ { push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo"); }
+ else
+ { push(@ret,".quad $lo"); }
+ }
+ join("\n",@ret);
+};
+
+################################################################
+# simplified mnemonics not handled by at least one assembler
+################################################################
+my $cmplw = sub {
+ my $f = shift;
+ my $cr = 0; $cr = shift if ($#_>1);
+ # Some out-of-date 32-bit GNU assembler just can't handle cmplw...
+ ($flavour =~ /linux.*32/) ?
+ " .long ".sprintf "0x%x",31<<26|$cr<<23|$_[0]<<16|$_[1]<<11|64 :
+ " cmplw ".join(',',$cr,@_);
+};
+my $bdnz = sub {
+ my $f = shift;
+ my $bo = $f=~/[\+\-]/ ? 16+9 : 16; # optional "to be taken" hint
+ " bc $bo,0,".shift;
+} if ($flavour!~/linux/);
+my $bltlr = sub {
+ my $f = shift;
+ my $bo = $f=~/\-/ ? 12+2 : 12; # optional "not to be taken" hint
+ ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
+ " .long ".sprintf "0x%x",19<<26|$bo<<21|16<<1 :
+ " bclr $bo,0";
+};
+my $bnelr = sub {
+ my $f = shift;
+ my $bo = $f=~/\-/ ? 4+2 : 4; # optional "not to be taken" hint
+ ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
+ " .long ".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 :
+ " bclr $bo,2";
+};
+my $beqlr = sub {
+ my $f = shift;
+ my $bo = $f=~/-/ ? 12+2 : 12; # optional "not to be taken" hint
+ ($flavour =~ /linux/) ? # GNU as doesn't allow most recent hints
+ " .long ".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 :
+ " bclr $bo,2";
+};
+# GNU assembler can't handle extrdi rA,rS,16,48, or when sum of last two
+# arguments is 64, with "operand out of range" error.
+my $extrdi = sub {
+ my ($f,$ra,$rs,$n,$b) = @_;
+ $b = ($b+$n)&63; $n = 64-$n;
+ " rldicl $ra,$rs,$b,$n";
+};
+my $vmr = sub {
+ my ($f,$vx,$vy) = @_;
+ " vor $vx,$vy,$vy";
+};
+
+# PowerISA 2.06 stuff
+sub vsxmem_op {
+ my ($f, $vrt, $ra, $rb, $op) = @_;
+ " .long ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1);
+}
+# made-up unaligned memory reference AltiVec/VMX instructions
+my $lvx_u = sub { vsxmem_op(@_, 844); }; # lxvd2x
+my $stvx_u = sub { vsxmem_op(@_, 972); }; # stxvd2x
+my $lvdx_u = sub { vsxmem_op(@_, 588); }; # lxsdx
+my $stvdx_u = sub { vsxmem_op(@_, 716); }; # stxsdx
+my $lvx_4w = sub { vsxmem_op(@_, 780); }; # lxvw4x
+my $stvx_4w = sub { vsxmem_op(@_, 908); }; # stxvw4x
+
+# PowerISA 2.07 stuff
+sub vcrypto_op {
+ my ($f, $vrt, $vra, $vrb, $op) = @_;
+ " .long ".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op;
+}
+my $vcipher = sub { vcrypto_op(@_, 1288); };
+my $vcipherlast = sub { vcrypto_op(@_, 1289); };
+my $vncipher = sub { vcrypto_op(@_, 1352); };
+my $vncipherlast= sub { vcrypto_op(@_, 1353); };
+my $vsbox = sub { vcrypto_op(@_, 0, 1480); };
+my $vshasigmad = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); };
+my $vshasigmaw = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); };
+my $vpmsumb = sub { vcrypto_op(@_, 1032); };
+my $vpmsumd = sub { vcrypto_op(@_, 1224); };
+my $vpmsubh = sub { vcrypto_op(@_, 1096); };
+my $vpmsumw = sub { vcrypto_op(@_, 1160); };
+my $vaddudm = sub { vcrypto_op(@_, 192); };
+
+my $mtsle = sub {
+ my ($f, $arg) = @_;
+ " .long ".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2);
+};
+
+print "#include <asm/ppc_asm.h>\n" if $flavour =~ /linux/;
+
+while($line=<>) {
+
+ $line =~ s|[#!;].*$||; # get rid of asm-style comments...
+ $line =~ s|/\*.*\*/||; # ... and C-style comments...
+ $line =~ s|^\s+||; # ... and skip white spaces in beginning...
+ $line =~ s|\s+$||; # ... and at the end
+
+ {
+ $line =~ s|\b\.L(\w+)|L$1|g; # common denominator for Locallabel
+ $line =~ s|\bL(\w+)|\.L$1|g if ($dotinlocallabels);
+ }
+
+ {
+ $line =~ s|^\s*(\.?)(\w+)([\.\+\-]?)\s*||;
+ my $c = $1; $c = "\t" if ($c eq "");
+ my $mnemonic = $2;
+ my $f = $3;
+ my $opcode = eval("\$$mnemonic");
+ $line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/);
+ if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); }
+ elsif ($mnemonic) { $line = $c.$mnemonic.$f."\t".$line; }
+ }
+
+ print $line if ($line);
+ print "\n";
+}
+
+close STDOUT;
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
new file mode 100644
index 000000000000..44d8d5cfe40d
--- /dev/null
+++ b/drivers/crypto/vmx/vmx.c
@@ -0,0 +1,88 @@
+/**
+ * Routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <asm/cputable.h>
+#include <crypto/internal/hash.h>
+
+extern struct shash_alg p8_ghash_alg;
+extern struct crypto_alg p8_aes_alg;
+extern struct crypto_alg p8_aes_cbc_alg;
+extern struct crypto_alg p8_aes_ctr_alg;
+static struct crypto_alg *algs[] = {
+ &p8_aes_alg,
+ &p8_aes_cbc_alg,
+ &p8_aes_ctr_alg,
+ NULL,
+};
+
+int __init p8_init(void)
+{
+ int ret = 0;
+ struct crypto_alg **alg_it;
+
+ if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
+ return -ENODEV;
+
+ for (alg_it = algs; *alg_it; alg_it++) {
+ ret = crypto_register_alg(*alg_it);
+ printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
+ (*alg_it)->cra_name, ret);
+ if (ret) {
+ for (alg_it--; alg_it >= algs; alg_it--)
+ crypto_unregister_alg(*alg_it);
+ break;
+ }
+ }
+ if (ret)
+ return ret;
+
+ ret = crypto_register_shash(&p8_ghash_alg);
+ if (ret) {
+ for (alg_it = algs; *alg_it; alg_it++)
+ crypto_unregister_alg(*alg_it);
+ }
+ return ret;
+}
+
+void __exit p8_exit(void)
+{
+ struct crypto_alg **alg_it;
+
+ for (alg_it = algs; *alg_it; alg_it++) {
+ printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name);
+ crypto_unregister_alg(*alg_it);
+ }
+ crypto_unregister_shash(&p8_ghash_alg);
+}
+
+module_init(p8_init);
+module_exit(p8_exit);
+
+MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>");
+MODULE_DESCRIPTION("IBM VMX cryptogaphic acceleration instructions support on Power 8");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 30b538d8cc90..ca1b362d77e2 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -392,7 +392,6 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
/**
* _remove_devfreq() - Remove devfreq from the list and release its resources.
* @devfreq: the devfreq struct
- * @skip: skip calling device_unregister().
*/
static void _remove_devfreq(struct devfreq *devfreq)
{
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index ad8347385f53..7d99d13bacd8 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -194,7 +194,7 @@ static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
return 0;
}
-static struct devfreq_event_ops exynos_ppmu_ops = {
+static const struct devfreq_event_ops exynos_ppmu_ops = {
.disable = exynos_ppmu_disable,
.set_event = exynos_ppmu_set_event,
.get_event = exynos_ppmu_get_event,
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 34790961af5a..13a1a6e8108c 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -62,7 +62,8 @@
#define ACTMON_BELOW_WMARK_WINDOW 3
#define ACTMON_BOOST_FREQ_STEP 16000
-/* activity counter is incremented every 256 memory transactions, and each
+/*
+ * Activity counter is incremented every 256 memory transactions, and each
* transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
* 4 * 256 = 1024.
*/
@@ -85,16 +86,25 @@
* struct tegra_devfreq_device_config - configuration specific to an ACTMON
* device
*
- * Coefficients and thresholds are in %
+ * Coefficients and thresholds are percentages unless otherwise noted
*/
struct tegra_devfreq_device_config {
u32 offset;
u32 irq_mask;
+ /* Factors applied to boost_freq every consecutive watermark breach */
unsigned int boost_up_coeff;
unsigned int boost_down_coeff;
+
+ /* Define the watermark bounds when applied to the current avg */
unsigned int boost_up_threshold;
unsigned int boost_down_threshold;
+
+ /*
+ * Threshold of activity (cycles) below which the CPU frequency isn't
+ * to be taken into account. This is to avoid increasing the EMC
+ * frequency when the CPU is very busy but not accessing the bus often.
+ */
u32 avg_dependency_threshold;
};
@@ -105,7 +115,7 @@ enum tegra_actmon_device {
static struct tegra_devfreq_device_config actmon_device_configs[] = {
{
- /* MCALL */
+ /* MCALL: All memory accesses (including from the CPUs) */
.offset = 0x1c0,
.irq_mask = 1 << 26,
.boost_up_coeff = 200,
@@ -114,7 +124,7 @@ static struct tegra_devfreq_device_config actmon_device_configs[] = {
.boost_down_threshold = 40,
},
{
- /* MCCPU */
+ /* MCCPU: memory accesses from the CPUs */
.offset = 0x200,
.irq_mask = 1 << 25,
.boost_up_coeff = 800,
@@ -132,25 +142,29 @@ static struct tegra_devfreq_device_config actmon_device_configs[] = {
*/
struct tegra_devfreq_device {
const struct tegra_devfreq_device_config *config;
+ void __iomem *regs;
+ spinlock_t lock;
+
+ /* Average event count sampled in the last interrupt */
+ u32 avg_count;
- void __iomem *regs;
- u32 avg_band_freq;
- u32 avg_count;
+ /*
+ * Extra frequency to increase the target by due to consecutive
+ * watermark breaches.
+ */
+ unsigned long boost_freq;
- unsigned long target_freq;
- unsigned long boost_freq;
+ /* Optimal frequency calculated from the stats for this device */
+ unsigned long target_freq;
};
struct tegra_devfreq {
struct devfreq *devfreq;
- struct platform_device *pdev;
struct reset_control *reset;
struct clk *clock;
void __iomem *regs;
- spinlock_t lock;
-
struct clk *emc_clock;
unsigned long max_freq;
unsigned long cur_freq;
@@ -174,19 +188,43 @@ static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
{ 250000, 100000 },
};
+static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
+{
+ return readl(tegra->regs + offset);
+}
+
+static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
+{
+ writel(val, tegra->regs + offset);
+}
+
+static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
+{
+ return readl(dev->regs + offset);
+}
+
+static void device_writel(struct tegra_devfreq_device *dev, u32 val,
+ u32 offset)
+{
+ writel(val, dev->regs + offset);
+}
+
static unsigned long do_percent(unsigned long val, unsigned int pct)
{
return val * pct / 100;
}
-static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq_device *dev)
+static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
+ struct tegra_devfreq_device *dev)
{
u32 avg = dev->avg_count;
- u32 band = dev->avg_band_freq * ACTMON_SAMPLING_PERIOD;
+ u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
+ u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD;
+
+ device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
- writel(avg + band, dev->regs + ACTMON_DEV_AVG_UPPER_WMARK);
- avg = max(avg, band);
- writel(avg - band, dev->regs + ACTMON_DEV_AVG_LOWER_WMARK);
+ avg = max(dev->avg_count, band);
+ device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
}
static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
@@ -194,96 +232,96 @@ static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
{
u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
- writel(do_percent(val, dev->config->boost_up_threshold),
- dev->regs + ACTMON_DEV_UPPER_WMARK);
+ device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
+ ACTMON_DEV_UPPER_WMARK);
- writel(do_percent(val, dev->config->boost_down_threshold),
- dev->regs + ACTMON_DEV_LOWER_WMARK);
+ device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
+ ACTMON_DEV_LOWER_WMARK);
}
static void actmon_write_barrier(struct tegra_devfreq *tegra)
{
/* ensure the update has reached the ACTMON */
wmb();
- readl(tegra->regs + ACTMON_GLB_STATUS);
+ actmon_readl(tegra, ACTMON_GLB_STATUS);
}
-static irqreturn_t actmon_isr(int irq, void *data)
+static void actmon_isr_device(struct tegra_devfreq *tegra,
+ struct tegra_devfreq_device *dev)
{
- struct tegra_devfreq *tegra = data;
- struct tegra_devfreq_device *dev = NULL;
unsigned long flags;
- u32 val;
- unsigned int i;
-
- val = readl(tegra->regs + ACTMON_GLB_STATUS);
-
- for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
- if (val & tegra->devices[i].config->irq_mask) {
- dev = tegra->devices + i;
- break;
- }
- }
-
- if (!dev)
- return IRQ_NONE;
+ u32 intr_status, dev_ctrl;
- spin_lock_irqsave(&tegra->lock, flags);
+ spin_lock_irqsave(&dev->lock, flags);
- dev->avg_count = readl(dev->regs + ACTMON_DEV_AVG_COUNT);
- tegra_devfreq_update_avg_wmark(dev);
+ dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
+ tegra_devfreq_update_avg_wmark(tegra, dev);
- val = readl(dev->regs + ACTMON_DEV_INTR_STATUS);
- if (val & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
- val = readl(dev->regs + ACTMON_DEV_CTRL) |
- ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
- ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
+ dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
+ if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
/*
* new_boost = min(old_boost * up_coef + step, max_freq)
*/
dev->boost_freq = do_percent(dev->boost_freq,
dev->config->boost_up_coeff);
dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
- if (dev->boost_freq >= tegra->max_freq) {
- dev->boost_freq = tegra->max_freq;
- val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
- }
- writel(val, dev->regs + ACTMON_DEV_CTRL);
- } else if (val & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
- val = readl(dev->regs + ACTMON_DEV_CTRL) |
- ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
- ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+
+ if (dev->boost_freq >= tegra->max_freq)
+ dev->boost_freq = tegra->max_freq;
+ else
+ dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+ } else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
/*
* new_boost = old_boost * down_coef
* or 0 if (old_boost * down_coef < step / 2)
*/
dev->boost_freq = do_percent(dev->boost_freq,
dev->config->boost_down_coeff);
- if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
+
+ dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+
+ if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1))
dev->boost_freq = 0;
- val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- }
- writel(val, dev->regs + ACTMON_DEV_CTRL);
+ else
+ dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
}
if (dev->config->avg_dependency_threshold) {
- val = readl(dev->regs + ACTMON_DEV_CTRL);
if (dev->avg_count >= dev->config->avg_dependency_threshold)
- val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
else if (dev->boost_freq == 0)
- val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- writel(val, dev->regs + ACTMON_DEV_CTRL);
+ dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
}
- writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
+ device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
+
+ device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
actmon_write_barrier(tegra);
- spin_unlock_irqrestore(&tegra->lock, flags);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
- return IRQ_WAKE_THREAD;
+static irqreturn_t actmon_isr(int irq, void *data)
+{
+ struct tegra_devfreq *tegra = data;
+ bool handled = false;
+ unsigned int i;
+ u32 val;
+
+ val = actmon_readl(tegra, ACTMON_GLB_STATUS);
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+ if (val & tegra->devices[i].config->irq_mask) {
+ actmon_isr_device(tegra, tegra->devices + i);
+ handled = true;
+ }
+ }
+
+ return handled ? IRQ_WAKE_THREAD : IRQ_NONE;
}
static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
@@ -317,7 +355,7 @@ static void actmon_update_target(struct tegra_devfreq *tegra,
static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
}
- spin_lock_irqsave(&tegra->lock, flags);
+ spin_lock_irqsave(&dev->lock, flags);
dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
@@ -327,7 +365,7 @@ static void actmon_update_target(struct tegra_devfreq *tegra,
if (dev->avg_count >= dev->config->avg_dependency_threshold)
dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
- spin_unlock_irqrestore(&tegra->lock, flags);
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static irqreturn_t actmon_thread_isr(int irq, void *data)
@@ -345,131 +383,110 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
unsigned long action, void *ptr)
{
struct clk_notifier_data *data = ptr;
- struct tegra_devfreq *tegra = container_of(nb, struct tegra_devfreq,
- rate_change_nb);
+ struct tegra_devfreq *tegra;
+ struct tegra_devfreq_device *dev;
unsigned int i;
unsigned long flags;
- spin_lock_irqsave(&tegra->lock, flags);
+ if (action != POST_RATE_CHANGE)
+ return NOTIFY_OK;
- switch (action) {
- case POST_RATE_CHANGE:
- tegra->cur_freq = data->new_rate / KHZ;
+ tegra = container_of(nb, struct tegra_devfreq, rate_change_nb);
- for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
- tegra_devfreq_update_wmark(tegra, tegra->devices + i);
+ tegra->cur_freq = data->new_rate / KHZ;
- actmon_write_barrier(tegra);
- break;
- case PRE_RATE_CHANGE:
- /* fall through */
- case ABORT_RATE_CHANGE:
- break;
- };
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+ dev = &tegra->devices[i];
- spin_unlock_irqrestore(&tegra->lock, flags);
+ spin_lock_irqsave(&dev->lock, flags);
+ tegra_devfreq_update_wmark(tegra, dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+
+ actmon_write_barrier(tegra);
return NOTIFY_OK;
}
-static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
- struct tegra_devfreq_device *dev)
+static void tegra_actmon_enable_interrupts(struct tegra_devfreq *tegra)
{
+ struct tegra_devfreq_device *dev;
u32 val;
+ unsigned int i;
- dev->avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
- dev->target_freq = tegra->cur_freq;
-
- dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
- writel(dev->avg_count, dev->regs + ACTMON_DEV_INIT_AVG);
-
- tegra_devfreq_update_avg_wmark(dev);
- tegra_devfreq_update_wmark(tegra, dev);
-
- writel(ACTMON_COUNT_WEIGHT, dev->regs + ACTMON_DEV_COUNT_WEIGHT);
- writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
-
- val = 0;
- val |= ACTMON_DEV_CTRL_ENB_PERIODIC |
- ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN |
- ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
- val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
- << ACTMON_DEV_CTRL_K_VAL_SHIFT;
- val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
- << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
- val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
- << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
- val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN |
- ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
-
- writel(val, dev->regs + ACTMON_DEV_CTRL);
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+ dev = &tegra->devices[i];
- actmon_write_barrier(tegra);
+ val = device_readl(dev, ACTMON_DEV_CTRL);
+ val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
+ val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
+ val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
- val = readl(dev->regs + ACTMON_DEV_CTRL);
- val |= ACTMON_DEV_CTRL_ENB;
- writel(val, dev->regs + ACTMON_DEV_CTRL);
+ device_writel(dev, val, ACTMON_DEV_CTRL);
+ }
actmon_write_barrier(tegra);
}
-static int tegra_devfreq_suspend(struct device *dev)
+static void tegra_actmon_disable_interrupts(struct tegra_devfreq *tegra)
{
- struct platform_device *pdev;
- struct tegra_devfreq *tegra;
- struct tegra_devfreq_device *actmon_dev;
- unsigned int i;
+ struct tegra_devfreq_device *dev;
u32 val;
-
- pdev = container_of(dev, struct platform_device, dev);
- tegra = platform_get_drvdata(pdev);
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
- actmon_dev = &tegra->devices[i];
-
- val = readl(actmon_dev->regs + ACTMON_DEV_CTRL);
- val &= ~ACTMON_DEV_CTRL_ENB;
- writel(val, actmon_dev->regs + ACTMON_DEV_CTRL);
+ dev = &tegra->devices[i];
- writel(ACTMON_INTR_STATUS_CLEAR,
- actmon_dev->regs + ACTMON_DEV_INTR_STATUS);
+ val = device_readl(dev, ACTMON_DEV_CTRL);
+ val &= ~ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
+ val &= ~ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
+ val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
- actmon_write_barrier(tegra);
+ device_writel(dev, val, ACTMON_DEV_CTRL);
}
- return 0;
+ actmon_write_barrier(tegra);
}
-static int tegra_devfreq_resume(struct device *dev)
+static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
+ struct tegra_devfreq_device *dev)
{
- struct platform_device *pdev;
- struct tegra_devfreq *tegra;
- struct tegra_devfreq_device *actmon_dev;
- unsigned int i;
+ u32 val = 0;
- pdev = container_of(dev, struct platform_device, dev);
- tegra = platform_get_drvdata(pdev);
+ dev->target_freq = tegra->cur_freq;
- for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
- actmon_dev = &tegra->devices[i];
+ dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+ device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
- tegra_actmon_configure_device(tegra, actmon_dev);
- }
+ tegra_devfreq_update_avg_wmark(tegra, dev);
+ tegra_devfreq_update_wmark(tegra, dev);
- return 0;
+ device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT);
+ device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
+
+ val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
+ val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
+ << ACTMON_DEV_CTRL_K_VAL_SHIFT;
+ val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
+ << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
+ val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
+ << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
+ val |= ACTMON_DEV_CTRL_ENB;
+
+ device_writel(dev, val, ACTMON_DEV_CTRL);
+
+ actmon_write_barrier(tegra);
}
static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct platform_device *pdev;
- struct tegra_devfreq *tegra;
+ struct tegra_devfreq *tegra = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
unsigned long rate = *freq * KHZ;
- pdev = container_of(dev, struct platform_device, dev);
- tegra = platform_get_drvdata(pdev);
-
rcu_read_lock();
opp = devfreq_recommended_opp(dev, &rate, flags);
if (IS_ERR(opp)) {
@@ -480,10 +497,8 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
rate = dev_pm_opp_get_freq(opp);
rcu_read_unlock();
- /* TODO: Once we have per-user clk constraints, set a floor */
- clk_set_rate(tegra->emc_clock, rate);
-
- /* TODO: Set voltage as well */
+ clk_set_min_rate(tegra->emc_clock, rate);
+ clk_set_rate(tegra->emc_clock, 0);
return 0;
}
@@ -491,13 +506,9 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
static int tegra_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
- struct platform_device *pdev;
- struct tegra_devfreq *tegra;
+ struct tegra_devfreq *tegra = dev_get_drvdata(dev);
struct tegra_devfreq_device *actmon_dev;
- pdev = container_of(dev, struct platform_device, dev);
- tegra = platform_get_drvdata(pdev);
-
stat->current_frequency = tegra->cur_freq;
/* To be used by the tegra governor */
@@ -508,7 +519,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
actmon_dev = &tegra->devices[MCALL];
/* Number of cycles spent on memory access */
- stat->busy_time = actmon_dev->avg_count;
+ stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
/* The bus can be considered to be saturated way before 100% */
stat->busy_time *= 100 / BUS_SATURATION_RATIO;
@@ -516,11 +527,19 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
/* Number of cycles in a sampling period */
stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq;
+ stat->busy_time = min(stat->busy_time, stat->total_time);
+
return 0;
}
-static int tegra_devfreq_get_target(struct devfreq *devfreq,
- unsigned long *freq)
+static struct devfreq_dev_profile tegra_devfreq_profile = {
+ .polling_ms = 0,
+ .target = tegra_devfreq_target,
+ .get_dev_status = tegra_devfreq_get_dev_status,
+};
+
+static int tegra_governor_get_target(struct devfreq *devfreq,
+ unsigned long *freq)
{
struct devfreq_dev_status stat;
struct tegra_devfreq *tegra;
@@ -548,22 +567,43 @@ static int tegra_devfreq_get_target(struct devfreq *devfreq,
return 0;
}
-static int tegra_devfreq_event_handler(struct devfreq *devfreq,
- unsigned int event, void *data)
+static int tegra_governor_event_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
{
- return 0;
+ struct tegra_devfreq *tegra;
+ int ret = 0;
+
+ tegra = dev_get_drvdata(devfreq->dev.parent);
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ devfreq_monitor_start(devfreq);
+ tegra_actmon_enable_interrupts(tegra);
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ tegra_actmon_disable_interrupts(tegra);
+ devfreq_monitor_stop(devfreq);
+ break;
+
+ case DEVFREQ_GOV_SUSPEND:
+ tegra_actmon_disable_interrupts(tegra);
+ devfreq_monitor_suspend(devfreq);
+ break;
+
+ case DEVFREQ_GOV_RESUME:
+ devfreq_monitor_resume(devfreq);
+ tegra_actmon_enable_interrupts(tegra);
+ break;
+ }
+
+ return ret;
}
static struct devfreq_governor tegra_devfreq_governor = {
- .name = "tegra",
- .get_target_freq = tegra_devfreq_get_target,
- .event_handler = tegra_devfreq_event_handler,
-};
-
-static struct devfreq_dev_profile tegra_devfreq_profile = {
- .polling_ms = 0,
- .target = tegra_devfreq_target,
- .get_dev_status = tegra_devfreq_get_dev_status,
+ .name = "tegra_actmon",
+ .get_target_freq = tegra_governor_get_target,
+ .event_handler = tegra_governor_event_handler,
};
static int tegra_devfreq_probe(struct platform_device *pdev)
@@ -571,8 +611,8 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
struct tegra_devfreq *tegra;
struct tegra_devfreq_device *dev;
struct resource *res;
- unsigned long max_freq;
unsigned int i;
+ unsigned long rate;
int irq;
int err;
@@ -580,19 +620,11 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
if (!tegra)
return -ENOMEM;
- spin_lock_init(&tegra->lock);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get regs resource\n");
- return -ENODEV;
- }
tegra->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tegra->regs)) {
- dev_err(&pdev->dev, "Failed to get IO memory\n");
+ if (IS_ERR(tegra->regs))
return PTR_ERR(tegra->regs);
- }
tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
if (IS_ERR(tegra->reset)) {
@@ -612,11 +644,7 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return PTR_ERR(tegra->emc_clock);
}
- err = of_init_opp_table(&pdev->dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init operating point table\n");
- return err;
- }
+ clk_set_rate(tegra->emc_clock, ULONG_MAX);
tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
@@ -630,43 +658,41 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
err = clk_prepare_enable(tegra->clock);
if (err) {
- reset_control_deassert(tegra->reset);
+ dev_err(&pdev->dev,
+ "Failed to prepare and enable ACTMON clock\n");
return err;
}
reset_control_deassert(tegra->reset);
- max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX);
- tegra->max_freq = max_freq / KHZ;
-
- clk_set_rate(tegra->emc_clock, max_freq);
-
+ tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ;
tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
- writel(ACTMON_SAMPLING_PERIOD - 1,
- tegra->regs + ACTMON_GLB_PERIOD_CTRL);
+ actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
+ ACTMON_GLB_PERIOD_CTRL);
for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
dev = tegra->devices + i;
dev->config = actmon_device_configs + i;
dev->regs = tegra->regs + dev->config->offset;
+ spin_lock_init(&dev->lock);
- tegra_actmon_configure_device(tegra, tegra->devices + i);
+ tegra_actmon_configure_device(tegra, dev);
}
- err = devfreq_add_governor(&tegra_devfreq_governor);
- if (err) {
- dev_err(&pdev->dev, "Failed to add governor\n");
- return err;
+ for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
+ rate = clk_round_rate(tegra->emc_clock, rate);
+ dev_pm_opp_add(&pdev->dev, rate, 0);
}
- tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
- tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
- &tegra_devfreq_profile,
- "tegra",
- NULL);
-
irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, tegra);
+
err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr,
actmon_thread_isr, IRQF_SHARED,
"tegra-devfreq", tegra);
@@ -675,7 +701,11 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, tegra);
+ tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
+ tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
+ &tegra_devfreq_profile,
+ "tegra_actmon",
+ NULL);
return 0;
}
@@ -683,6 +713,19 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
static int tegra_devfreq_remove(struct platform_device *pdev)
{
struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+ u32 val;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
+ val = device_readl(&tegra->devices[i], ACTMON_DEV_CTRL);
+ val &= ~ACTMON_DEV_CTRL_ENB;
+ device_writel(&tegra->devices[i], val, ACTMON_DEV_CTRL);
+ }
+
+ actmon_write_barrier(tegra);
+
+ devm_free_irq(&pdev->dev, irq, tegra);
clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
@@ -691,28 +734,52 @@ static int tegra_devfreq_remove(struct platform_device *pdev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(tegra_devfreq_pm_ops,
- tegra_devfreq_suspend,
- tegra_devfreq_resume);
-
-static struct of_device_id tegra_devfreq_of_match[] = {
+static const struct of_device_id tegra_devfreq_of_match[] = {
{ .compatible = "nvidia,tegra124-actmon" },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
+
static struct platform_driver tegra_devfreq_driver = {
.probe = tegra_devfreq_probe,
.remove = tegra_devfreq_remove,
.driver = {
- .name = "tegra-devfreq",
- .owner = THIS_MODULE,
+ .name = "tegra-devfreq",
.of_match_table = tegra_devfreq_of_match,
- .pm = &tegra_devfreq_pm_ops,
},
};
-module_platform_driver(tegra_devfreq_driver);
-MODULE_LICENSE("GPL");
+static int __init tegra_devfreq_init(void)
+{
+ int ret = 0;
+
+ ret = devfreq_add_governor(&tegra_devfreq_governor);
+ if (ret) {
+ pr_err("%s: failed to add governor: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&tegra_devfreq_driver);
+ if (ret)
+ devfreq_remove_governor(&tegra_devfreq_governor);
+
+ return ret;
+}
+module_init(tegra_devfreq_init)
+
+static void __exit tegra_devfreq_exit(void)
+{
+ int ret = 0;
+
+ platform_driver_unregister(&tegra_devfreq_driver);
+
+ ret = devfreq_remove_governor(&tegra_devfreq_governor);
+ if (ret)
+ pr_err("%s: failed to remove governor: %d\n", __func__, ret);
+}
+module_exit(tegra_devfreq_exit)
+
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Tegra devfreq driver");
MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
-MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5be225c2ba98..c5a9138a6a8d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -265,43 +265,40 @@ static inline int is_dma_buf_file(struct file *file)
}
/**
- * dma_buf_export_named - Creates a new dma_buf, and associates an anon file
+ * dma_buf_export - Creates a new dma_buf, and associates an anon file
* with this buffer, so it can be exported.
* Also connect the allocator specific data and ops to the buffer.
* Additionally, provide a name string for exporter; useful in debugging.
*
- * @priv: [in] Attach private data of allocator to this buffer
- * @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
- * @size: [in] Size of the buffer
- * @flags: [in] mode flags for the file.
- * @exp_name: [in] name of the exporting module - useful for debugging.
- * @resv: [in] reservation-object, NULL to allocate default one.
+ * @exp_info: [in] holds all the export related information provided
+ * by the exporter. see struct dma_buf_export_info
+ * for further details.
*
* Returns, on success, a newly created dma_buf object, which wraps the
* supplied private data and operations for dma_buf_ops. On either missing
* ops, or error in allocating struct dma_buf, will return negative error.
*
*/
-struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
- size_t size, int flags, const char *exp_name,
- struct reservation_object *resv)
+struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
{
struct dma_buf *dmabuf;
+ struct reservation_object *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
- if (!resv)
+ if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object);
else
/* prevent &dma_buf[1] == dma_buf->resv */
alloc_size += 1;
- if (WARN_ON(!priv || !ops
- || !ops->map_dma_buf
- || !ops->unmap_dma_buf
- || !ops->release
- || !ops->kmap_atomic
- || !ops->kmap
- || !ops->mmap)) {
+ if (WARN_ON(!exp_info->priv
+ || !exp_info->ops
+ || !exp_info->ops->map_dma_buf
+ || !exp_info->ops->unmap_dma_buf
+ || !exp_info->ops->release
+ || !exp_info->ops->kmap_atomic
+ || !exp_info->ops->kmap
+ || !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
}
@@ -309,10 +306,10 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
if (dmabuf == NULL)
return ERR_PTR(-ENOMEM);
- dmabuf->priv = priv;
- dmabuf->ops = ops;
- dmabuf->size = size;
- dmabuf->exp_name = exp_name;
+ dmabuf->priv = exp_info->priv;
+ dmabuf->ops = exp_info->ops;
+ dmabuf->size = exp_info->size;
+ dmabuf->exp_name = exp_info->exp_name;
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
@@ -323,7 +320,8 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
}
dmabuf->resv = resv;
- file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+ file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
+ exp_info->flags);
if (IS_ERR(file)) {
kfree(dmabuf);
return ERR_CAST(file);
@@ -341,8 +339,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
return dmabuf;
}
-EXPORT_SYMBOL_GPL(dma_buf_export_named);
-
+EXPORT_SYMBOL_GPL(dma_buf_export);
/**
* dma_buf_fd - returns a file descriptor for the given dma_buf
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 942ca541dcbd..bda2cb06dc7a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -112,6 +112,19 @@ config FSL_DMA
EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
some Txxx and Bxxx parts.
+config FSL_RAID
+ tristate "Freescale RAID engine Support"
+ depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ ---help---
+ Enable support for Freescale RAID Engine. RAID Engine is
+ available on some QorIQ SoCs (like P5020/P5040). It has
+ the capability to offload memcpy, xor and pq computation
+ for raid5/6.
+
+source "drivers/dma/hsu/Kconfig"
+
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"
depends on PPC_MPC512x || PPC_MPC831x
@@ -345,6 +358,16 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config DMA_JZ4780
+ tristate "JZ4780 DMA support"
+ depends on MACH_JZ4780
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This selects support for the DMA controller in Ingenic JZ4780 SoCs.
+ If you have a board based on such a SoC and wish to use DMA for
+ devices which can use the DMA controller, say Y or M here.
+
config K3_DMA
tristate "Hisilicon K3 DMA support"
depends on ARCH_HI3xxx
@@ -412,6 +435,15 @@ config IMG_MDC_DMA
help
Enable support for the IMG multi-threaded DMA controller (MDC).
+config XGENE_DMA
+ tristate "APM X-Gene DMA support"
+ depends on ARCH_XGENE || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ help
+ Enable support for the APM X-Gene SoC DMA engine.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 539d4825bd76..69f77d5ba53b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
@@ -40,9 +41,11 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
obj-y += xilinx/
@@ -50,3 +53,4 @@ obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
+obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 83aa55d6fa5d..49d396ec06e5 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -15,10 +15,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is in this distribution in the file
* called COPYING.
*
@@ -1195,11 +1191,6 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
/*
* The DMA ENGINE API
*/
-static int pl08x_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void pl08x_free_chan_resources(struct dma_chan *chan)
{
/* Ensure all queued descriptors are freed */
@@ -2066,7 +2057,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev;
- pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
@@ -2085,7 +2075,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
pl08x->slave.dev = &adev->dev;
- pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 0b4fc6fb48ce..57b2141ddddc 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -65,6 +65,21 @@ static void atc_issue_pending(struct dma_chan *chan);
/*----------------------------------------------------------------------*/
+static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
+ size_t len)
+{
+ unsigned int width;
+
+ if (!((src | dst | len) & 3))
+ width = 2;
+ else if (!((src | dst | len) & 1))
+ width = 1;
+ else
+ width = 0;
+
+ return width;
+}
+
static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
{
return list_first_entry(&atchan->active_list,
@@ -659,16 +674,10 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* We can be a lot more clever here, but this should take care
* of the most common optimization.
*/
- if (!((src | dest | len) & 3)) {
- ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
- src_width = dst_width = 2;
- } else if (!((src | dest | len) & 1)) {
- ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
- src_width = dst_width = 1;
- } else {
- ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
- src_width = dst_width = 0;
- }
+ src_width = dst_width = atc_get_xfer_width(src, dest, len);
+
+ ctrla = ATC_SRC_WIDTH(src_width) |
+ ATC_DST_WIDTH(dst_width);
for (offset = 0; offset < len; offset += xfer_count << src_width) {
xfer_count = min_t(size_t, (len - offset) >> src_width,
@@ -862,6 +871,144 @@ err:
}
/**
+ * atc_prep_dma_sg - prepare memory to memory scather-gather operation
+ * @chan: the channel to prepare operation on
+ * @dst_sg: destination scatterlist
+ * @dst_nents: number of destination scatterlist entries
+ * @src_sg: source scatterlist
+ * @src_nents: number of source scatterlist entries
+ * @flags: tx descriptor status flags
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_sg(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_desc *desc = NULL;
+ struct at_desc *first = NULL;
+ struct at_desc *prev = NULL;
+ unsigned int src_width;
+ unsigned int dst_width;
+ size_t xfer_count;
+ u32 ctrla;
+ u32 ctrlb;
+ size_t dst_len = 0, src_len = 0;
+ dma_addr_t dst = 0, src = 0;
+ size_t len = 0, total_len = 0;
+
+ if (unlikely(dst_nents == 0 || src_nents == 0))
+ return NULL;
+
+ if (unlikely(dst_sg == NULL || src_sg == NULL))
+ return NULL;
+
+ ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
+ | ATC_SRC_ADDR_MODE_INCR
+ | ATC_DST_ADDR_MODE_INCR
+ | ATC_FC_MEM2MEM;
+
+ /*
+ * loop until there is either no more source or no more destination
+ * scatterlist entry
+ */
+ while (true) {
+
+ /* prepare the next transfer */
+ if (dst_len == 0) {
+
+ /* no more destination scatterlist entries */
+ if (!dst_sg || !dst_nents)
+ break;
+
+ dst = sg_dma_address(dst_sg);
+ dst_len = sg_dma_len(dst_sg);
+
+ dst_sg = sg_next(dst_sg);
+ dst_nents--;
+ }
+
+ if (src_len == 0) {
+
+ /* no more source scatterlist entries */
+ if (!src_sg || !src_nents)
+ break;
+
+ src = sg_dma_address(src_sg);
+ src_len = sg_dma_len(src_sg);
+
+ src_sg = sg_next(src_sg);
+ src_nents--;
+ }
+
+ len = min_t(size_t, src_len, dst_len);
+ if (len == 0)
+ continue;
+
+ /* take care for the alignment */
+ src_width = dst_width = atc_get_xfer_width(src, dst, len);
+
+ ctrla = ATC_SRC_WIDTH(src_width) |
+ ATC_DST_WIDTH(dst_width);
+
+ /*
+ * The number of transfers to set up refer to the source width
+ * that depends on the alignment.
+ */
+ xfer_count = len >> src_width;
+ if (xfer_count > ATC_BTSIZE_MAX) {
+ xfer_count = ATC_BTSIZE_MAX;
+ len = ATC_BTSIZE_MAX << src_width;
+ }
+
+ /* create the transfer */
+ desc = atc_desc_get(atchan);
+ if (!desc)
+ goto err_desc_get;
+
+ desc->lli.saddr = src;
+ desc->lli.daddr = dst;
+ desc->lli.ctrla = ctrla | xfer_count;
+ desc->lli.ctrlb = ctrlb;
+
+ desc->txd.cookie = 0;
+ desc->len = len;
+
+ /*
+ * Although we only need the transfer width for the first and
+ * the last descriptor, its easier to set it to all descriptors.
+ */
+ desc->tx_width = src_width;
+
+ atc_desc_chain(&first, &prev, desc);
+
+ /* update the lengths and addresses for the next loop cycle */
+ dst_len -= len;
+ src_len -= len;
+ dst += len;
+ src += len;
+
+ total_len += len;
+ }
+
+ /* First descriptor of the chain embedds additional information */
+ first->txd.cookie = -EBUSY;
+ first->total_len = total_len;
+
+ /* set end-of-link to the last link descriptor of list*/
+ set_desc_eol(desc);
+
+ first->txd.flags = flags; /* client is in control of this ack */
+
+ return &first->txd;
+
+err_desc_get:
+ atc_desc_put(atchan, first);
+ return NULL;
+}
+
+/**
* atc_dma_cyclic_check_values
* Check for too big/unaligned periods and unaligned DMA buffer
*/
@@ -1461,8 +1608,10 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* setup platform data for each SoC */
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
+ dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
/* get DMA parameters from controller type */
plat_dat = at_dma_get_driver_data(pdev);
@@ -1582,11 +1731,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
}
+ if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
+ atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
+
dma_writel(atdma, EN, AT_DMA_ENABLE);
- dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
+ dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
+ dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index d9891d3461f6..933e4b338459 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1154,8 +1154,10 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s\n", __func__);
spin_lock_bh(&atchan->lock);
- if (!at_xdmac_chan_is_paused(atchan))
+ if (!at_xdmac_chan_is_paused(atchan)) {
+ spin_unlock_bh(&atchan->lock);
return 0;
+ }
at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index fa378d88f6c8..180fedb418cc 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -30,7 +30,7 @@
#define DRIVER_NAME "bestcomm-core"
/* MPC5200 device tree match tables */
-static struct of_device_id mpc52xx_sram_ids[] = {
+static const struct of_device_id mpc52xx_sram_ids[] = {
{ .compatible = "fsl,mpc5200-sram", },
{ .compatible = "mpc5200-sram", },
{}
@@ -481,7 +481,7 @@ static int mpc52xx_bcom_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc52xx_bcom_of_match[] = {
+static const struct of_device_id mpc52xx_bcom_of_match[] = {
{ .compatible = "fsl,mpc5200-bestcomm", },
{ .compatible = "mpc5200-bestcomm", },
{},
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 84884418fd30..7638b24ce8d0 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -7,10 +7,6 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/dmaengine.h>
@@ -343,7 +339,7 @@ static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
{
spin_lock(&chan->vchan.lock);
if (chan->desc) {
- if (chan->desc && chan->desc->cyclic) {
+ if (chan->desc->cyclic) {
vchan_cyclic_callback(&chan->desc->vdesc);
} else {
if (chan->next_sg == chan->desc->num_sgs) {
@@ -496,11 +492,6 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
return status;
}
-static int jz4740_dma_alloc_chan_resources(struct dma_chan *c)
-{
- return 0;
-}
-
static void jz4740_dma_free_chan_resources(struct dma_chan *c)
{
vchan_free_chan_resources(to_virt_chan(c));
@@ -543,7 +534,6 @@ static int jz4740_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, dd->cap_mask);
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
- dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources;
dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
dd->device_tx_status = jz4740_dma_tx_status;
dd->device_issue_pending = jz4740_dma_issue_pending;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
new file mode 100644
index 000000000000..26d2f0e09ea3
--- /dev/null
+++ b/drivers/dma/dma-jz4780.c
@@ -0,0 +1,877 @@
+/*
+ * Ingenic JZ4780 DMA controller
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex@alex-smith.me.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define JZ_DMA_NR_CHANNELS 32
+
+/* Global registers. */
+#define JZ_DMA_REG_DMAC 0x1000
+#define JZ_DMA_REG_DIRQP 0x1004
+#define JZ_DMA_REG_DDR 0x1008
+#define JZ_DMA_REG_DDRS 0x100c
+#define JZ_DMA_REG_DMACP 0x101c
+#define JZ_DMA_REG_DSIRQP 0x1020
+#define JZ_DMA_REG_DSIRQM 0x1024
+#define JZ_DMA_REG_DCIRQP 0x1028
+#define JZ_DMA_REG_DCIRQM 0x102c
+
+/* Per-channel registers. */
+#define JZ_DMA_REG_CHAN(n) (n * 0x20)
+#define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n))
+
+#define JZ_DMA_DMAC_DMAE BIT(0)
+#define JZ_DMA_DMAC_AR BIT(2)
+#define JZ_DMA_DMAC_HLT BIT(3)
+#define JZ_DMA_DMAC_FMSC BIT(31)
+
+#define JZ_DMA_DRT_AUTO 0x8
+
+#define JZ_DMA_DCS_CTE BIT(0)
+#define JZ_DMA_DCS_HLT BIT(2)
+#define JZ_DMA_DCS_TT BIT(3)
+#define JZ_DMA_DCS_AR BIT(4)
+#define JZ_DMA_DCS_DES8 BIT(30)
+
+#define JZ_DMA_DCM_LINK BIT(0)
+#define JZ_DMA_DCM_TIE BIT(1)
+#define JZ_DMA_DCM_STDE BIT(2)
+#define JZ_DMA_DCM_TSZ_SHIFT 8
+#define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
+#define JZ_DMA_DCM_DP_SHIFT 12
+#define JZ_DMA_DCM_SP_SHIFT 14
+#define JZ_DMA_DCM_DAI BIT(22)
+#define JZ_DMA_DCM_SAI BIT(23)
+
+#define JZ_DMA_SIZE_4_BYTE 0x0
+#define JZ_DMA_SIZE_1_BYTE 0x1
+#define JZ_DMA_SIZE_2_BYTE 0x2
+#define JZ_DMA_SIZE_16_BYTE 0x3
+#define JZ_DMA_SIZE_32_BYTE 0x4
+#define JZ_DMA_SIZE_64_BYTE 0x5
+#define JZ_DMA_SIZE_128_BYTE 0x6
+
+#define JZ_DMA_WIDTH_32_BIT 0x0
+#define JZ_DMA_WIDTH_8_BIT 0x1
+#define JZ_DMA_WIDTH_16_BIT 0x2
+
+#define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+/**
+ * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
+ * @dcm: value for the DCM (channel command) register
+ * @dsa: source address
+ * @dta: target address
+ * @dtc: transfer count (number of blocks of the transfer size specified in DCM
+ * to transfer) in the low 24 bits, offset of the next descriptor from the
+ * descriptor base address in the upper 8 bits.
+ * @sd: target/source stride difference (in stride transfer mode).
+ * @drt: request type
+ */
+struct jz4780_dma_hwdesc {
+ uint32_t dcm;
+ uint32_t dsa;
+ uint32_t dta;
+ uint32_t dtc;
+ uint32_t sd;
+ uint32_t drt;
+ uint32_t reserved[2];
+};
+
+/* Size of allocations for hardware descriptor blocks. */
+#define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
+#define JZ_DMA_MAX_DESC \
+ (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
+
+struct jz4780_dma_desc {
+ struct virt_dma_desc vdesc;
+
+ struct jz4780_dma_hwdesc *desc;
+ dma_addr_t desc_phys;
+ unsigned int count;
+ enum dma_transaction_type type;
+ uint32_t status;
+};
+
+struct jz4780_dma_chan {
+ struct virt_dma_chan vchan;
+ unsigned int id;
+ struct dma_pool *desc_pool;
+
+ uint32_t transfer_type;
+ uint32_t transfer_shift;
+ struct dma_slave_config config;
+
+ struct jz4780_dma_desc *desc;
+ unsigned int curr_hwdesc;
+};
+
+struct jz4780_dma_dev {
+ struct dma_device dma_device;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int irq;
+
+ uint32_t chan_reserved;
+ struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
+};
+
+struct jz4780_dma_data {
+ uint32_t transfer_type;
+ int channel;
+};
+
+static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct jz4780_dma_chan, vchan.chan);
+}
+
+static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
+ struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct jz4780_dma_desc, vdesc);
+}
+
+static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
+ struct jz4780_dma_chan *jzchan)
+{
+ return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
+ dma_device);
+}
+
+static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma,
+ unsigned int reg)
+{
+ return readl(jzdma->base + reg);
+}
+
+static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma,
+ unsigned int reg, uint32_t val)
+{
+ writel(val, jzdma->base + reg);
+}
+
+static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
+ struct jz4780_dma_chan *jzchan, unsigned int count,
+ enum dma_transaction_type type)
+{
+ struct jz4780_dma_desc *desc;
+
+ if (count > JZ_DMA_MAX_DESC)
+ return NULL;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
+ &desc->desc_phys);
+ if (!desc->desc) {
+ kfree(desc);
+ return NULL;
+ }
+
+ desc->count = count;
+ desc->type = type;
+ return desc;
+}
+
+static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
+
+ dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
+ kfree(desc);
+}
+
+static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
+{
+ *ord = ffs(val) - 1;
+
+ switch (*ord) {
+ case 0:
+ return JZ_DMA_SIZE_1_BYTE;
+ case 1:
+ return JZ_DMA_SIZE_2_BYTE;
+ case 2:
+ return JZ_DMA_SIZE_4_BYTE;
+ case 4:
+ return JZ_DMA_SIZE_16_BYTE;
+ case 5:
+ return JZ_DMA_SIZE_32_BYTE;
+ case 6:
+ return JZ_DMA_SIZE_64_BYTE;
+ case 7:
+ return JZ_DMA_SIZE_128_BYTE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
+ struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
+ enum dma_transfer_direction direction)
+{
+ struct dma_slave_config *config = &jzchan->config;
+ uint32_t width, maxburst, tsz;
+ int ord;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ desc->dcm = JZ_DMA_DCM_SAI;
+ desc->dsa = addr;
+ desc->dta = config->dst_addr;
+ desc->drt = jzchan->transfer_type;
+
+ width = config->dst_addr_width;
+ maxburst = config->dst_maxburst;
+ } else {
+ desc->dcm = JZ_DMA_DCM_DAI;
+ desc->dsa = config->src_addr;
+ desc->dta = addr;
+ desc->drt = jzchan->transfer_type;
+
+ width = config->src_addr_width;
+ maxburst = config->src_maxburst;
+ }
+
+ /*
+ * This calculates the maximum transfer size that can be used with the
+ * given address, length, width and maximum burst size. The address
+ * must be aligned to the transfer size, the total length must be
+ * divisible by the transfer size, and we must not use more than the
+ * maximum burst specified by the user.
+ */
+ tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord);
+ jzchan->transfer_shift = ord;
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ width = JZ_DMA_WIDTH_32_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
+ desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
+ desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
+
+ desc->dtc = len >> ord;
+}
+
+static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_desc *desc;
+ unsigned int i;
+ int err;
+
+ desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
+ if (!desc)
+ return NULL;
+
+ for (i = 0; i < sg_len; i++) {
+ err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
+ sg_dma_address(&sgl[i]),
+ sg_dma_len(&sgl[i]),
+ direction);
+ if (err < 0)
+ return ERR_PTR(err);
+
+
+ desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
+
+ if (i != (sg_len - 1)) {
+ /* Automatically proceeed to the next descriptor. */
+ desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
+
+ /*
+ * The upper 8 bits of the DTC field in the descriptor
+ * must be set to (offset from descriptor base of next
+ * descriptor >> 4).
+ */
+ desc->desc[i].dtc |=
+ (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
+ }
+ }
+
+ return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_desc *desc;
+ unsigned int periods, i;
+ int err;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ periods = buf_len / period_len;
+
+ desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
+ if (!desc)
+ return NULL;
+
+ for (i = 0; i < periods; i++) {
+ err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
+ period_len, direction);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ buf_addr += period_len;
+
+ /*
+ * Set the link bit to indicate that the controller should
+ * automatically proceed to the next descriptor. In
+ * jz4780_dma_begin(), this will be cleared if we need to issue
+ * an interrupt after each period.
+ */
+ desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
+
+ /*
+ * The upper 8 bits of the DTC field in the descriptor must be
+ * set to (offset from descriptor base of next descriptor >> 4).
+ * If this is the last descriptor, link it back to the first,
+ * i.e. leave offset set to 0, otherwise point to the next one.
+ */
+ if (i != (periods - 1)) {
+ desc->desc[i].dtc |=
+ (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
+ }
+ }
+
+ return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
+}
+
+struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_desc *desc;
+ uint32_t tsz;
+ int ord;
+
+ desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
+ if (!desc)
+ return NULL;
+
+ tsz = jz4780_dma_transfer_size(dest | src | len, &ord);
+ if (tsz < 0)
+ return ERR_PTR(tsz);
+
+ desc->desc[0].dsa = src;
+ desc->desc[0].dta = dest;
+ desc->desc[0].drt = JZ_DMA_DRT_AUTO;
+ desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
+ tsz << JZ_DMA_DCM_TSZ_SHIFT |
+ JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
+ JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
+ desc->desc[0].dtc = len >> ord;
+
+ return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
+}
+
+static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
+{
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
+ struct virt_dma_desc *vdesc;
+ unsigned int i;
+ dma_addr_t desc_phys;
+
+ if (!jzchan->desc) {
+ vdesc = vchan_next_desc(&jzchan->vchan);
+ if (!vdesc)
+ return;
+
+ list_del(&vdesc->node);
+
+ jzchan->desc = to_jz4780_dma_desc(vdesc);
+ jzchan->curr_hwdesc = 0;
+
+ if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
+ /*
+ * The DMA controller doesn't support triggering an
+ * interrupt after processing each descriptor, only
+ * after processing an entire terminated list of
+ * descriptors. For a cyclic DMA setup the list of
+ * descriptors is not terminated so we can never get an
+ * interrupt.
+ *
+ * If the user requested a callback for a cyclic DMA
+ * setup then we workaround this hardware limitation
+ * here by degrading to a set of unlinked descriptors
+ * which we will submit in sequence in response to the
+ * completion of processing the previous descriptor.
+ */
+ for (i = 0; i < jzchan->desc->count; i++)
+ jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
+ }
+ } else {
+ /*
+ * There is an existing transfer, therefore this must be one
+ * for which we unlinked the descriptors above. Advance to the
+ * next one in the list.
+ */
+ jzchan->curr_hwdesc =
+ (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
+ }
+
+ /* Use 8-word descriptors. */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8);
+
+ /* Write descriptor address and initiate descriptor fetch. */
+ desc_phys = jzchan->desc->desc_phys +
+ (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys);
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
+
+ /* Enable the channel. */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id),
+ JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
+}
+
+static void jz4780_dma_issue_pending(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&jzchan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
+ jz4780_dma_begin(jzchan);
+
+ spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
+}
+
+static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan)
+{
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&jzchan->vchan.lock, flags);
+
+ /* Clear the DMA status and stop the transfer. */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+ if (jzchan->desc) {
+ jz4780_dma_desc_free(&jzchan->desc->vdesc);
+ jzchan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&jzchan->vchan, &head);
+
+ spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&jzchan->vchan, &head);
+ return 0;
+}
+
+static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan,
+ const struct dma_slave_config *config)
+{
+ if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
+ return -EINVAL;
+
+ /* Copy the reset of the slave configuration, it is used later. */
+ memcpy(&jzchan->config, config, sizeof(jzchan->config));
+
+ return 0;
+}
+
+static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
+ struct jz4780_dma_desc *desc, unsigned int next_sg)
+{
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
+ unsigned int residue, count;
+ unsigned int i;
+
+ residue = 0;
+
+ for (i = next_sg; i < desc->count; i++)
+ residue += desc->desc[i].dtc << jzchan->transfer_shift;
+
+ if (next_sg != 0) {
+ count = jz4780_dma_readl(jzdma,
+ JZ_DMA_REG_DTC(jzchan->id));
+ residue += count << jzchan->transfer_shift;
+ }
+
+ return residue;
+}
+
+static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if ((status == DMA_COMPLETE) || (txstate == NULL))
+ return status;
+
+ spin_lock_irqsave(&jzchan->vchan.lock, flags);
+
+ vdesc = vchan_find_desc(&jzchan->vchan, cookie);
+ if (vdesc) {
+ /* On the issued list, so hasn't been processed yet */
+ txstate->residue = jz4780_dma_desc_residue(jzchan,
+ to_jz4780_dma_desc(vdesc), 0);
+ } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
+ txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
+ (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
+ } else
+ txstate->residue = 0;
+
+ if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
+ && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
+ status = DMA_ERROR;
+
+ spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
+ return status;
+}
+
+static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
+ struct jz4780_dma_chan *jzchan)
+{
+ uint32_t dcs;
+
+ spin_lock(&jzchan->vchan.lock);
+
+ dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id));
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+
+ if (dcs & JZ_DMA_DCS_AR) {
+ dev_warn(&jzchan->vchan.chan.dev->device,
+ "address error (DCS=0x%x)\n", dcs);
+ }
+
+ if (dcs & JZ_DMA_DCS_HLT) {
+ dev_warn(&jzchan->vchan.chan.dev->device,
+ "channel halt (DCS=0x%x)\n", dcs);
+ }
+
+ if (jzchan->desc) {
+ jzchan->desc->status = dcs;
+
+ if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
+ if (jzchan->desc->type == DMA_CYCLIC) {
+ vchan_cyclic_callback(&jzchan->desc->vdesc);
+ } else {
+ vchan_cookie_complete(&jzchan->desc->vdesc);
+ jzchan->desc = NULL;
+ }
+
+ jz4780_dma_begin(jzchan);
+ }
+ } else {
+ dev_err(&jzchan->vchan.chan.dev->device,
+ "channel IRQ with no active transfer\n");
+ }
+
+ spin_unlock(&jzchan->vchan.lock);
+}
+
+static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
+{
+ struct jz4780_dma_dev *jzdma = data;
+ uint32_t pending, dmac;
+ int i;
+
+ pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP);
+
+ for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ if (!(pending & (1<<i)))
+ continue;
+
+ jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
+ }
+
+ /* Clear halt and address error status of all channels. */
+ dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC);
+ dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
+
+ /* Clear interrupt pending status. */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
+ jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
+ chan->device->dev,
+ JZ_DMA_DESC_BLOCK_SIZE,
+ PAGE_SIZE, 0);
+ if (!jzchan->desc_pool) {
+ dev_err(&chan->dev->device,
+ "failed to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
+ vchan_free_chan_resources(&jzchan->vchan);
+ dma_pool_destroy(jzchan->desc_pool);
+ jzchan->desc_pool = NULL;
+}
+
+static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
+ struct jz4780_dma_data *data = param;
+
+ if (data->channel > -1) {
+ if (data->channel != jzchan->id)
+ return false;
+ } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
+ return false;
+ }
+
+ jzchan->transfer_type = data->transfer_type;
+
+ return true;
+}
+
+static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
+ dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
+ struct jz4780_dma_data data;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ data.transfer_type = dma_spec->args[0];
+ data.channel = dma_spec->args[1];
+
+ if (data.channel > -1) {
+ if (data.channel >= JZ_DMA_NR_CHANNELS) {
+ dev_err(jzdma->dma_device.dev,
+ "device requested non-existent channel %u\n",
+ data.channel);
+ return NULL;
+ }
+
+ /* Can only select a channel marked as reserved. */
+ if (!(jzdma->chan_reserved & BIT(data.channel))) {
+ dev_err(jzdma->dma_device.dev,
+ "device requested unreserved channel %u\n",
+ data.channel);
+ return NULL;
+ }
+ }
+
+ return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+}
+
+static int jz4780_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct jz4780_dma_dev *jzdma;
+ struct jz4780_dma_chan *jzchan;
+ struct dma_device *dd;
+ struct resource *res;
+ int i, ret;
+
+ jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
+ if (!jzdma)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, jzdma);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get I/O memory\n");
+ return -EINVAL;
+ }
+
+ jzdma->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(jzdma->base))
+ return PTR_ERR(jzdma->base);
+
+ jzdma->irq = platform_get_irq(pdev, 0);
+ if (jzdma->irq < 0) {
+ dev_err(dev, "failed to get IRQ: %d\n", ret);
+ return jzdma->irq;
+ }
+
+ ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0,
+ dev_name(dev), jzdma);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
+ return -EINVAL;
+ }
+
+ jzdma->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(jzdma->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(jzdma->clk);
+ }
+
+ clk_prepare_enable(jzdma->clk);
+
+ /* Property is optional, if it doesn't exist the value will remain 0. */
+ of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
+ 0, &jzdma->chan_reserved);
+
+ dd = &jzdma->dma_device;
+
+ dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+ dma_cap_set(DMA_SLAVE, dd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+
+ dd->dev = dev;
+ dd->copy_align = 2; /* 2^2 = 4 byte alignment */
+ dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
+ dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
+ dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
+ dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
+ dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
+ dd->device_config = jz4780_dma_slave_config;
+ dd->device_terminate_all = jz4780_dma_terminate_all;
+ dd->device_tx_status = jz4780_dma_tx_status;
+ dd->device_issue_pending = jz4780_dma_issue_pending;
+ dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
+ dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
+ dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+
+ /*
+ * Enable DMA controller, mark all channels as not programmable.
+ * Also set the FMSC bit - it increases MSC performance, so it makes
+ * little sense not to enable it.
+ */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC,
+ JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0);
+
+ INIT_LIST_HEAD(&dd->channels);
+
+ for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ jzchan = &jzdma->chan[i];
+ jzchan->id = i;
+
+ vchan_init(&jzchan->vchan, dd);
+ jzchan->vchan.desc_free = jz4780_dma_desc_free;
+ }
+
+ ret = dma_async_device_register(dd);
+ if (ret) {
+ dev_err(dev, "failed to register device\n");
+ goto err_disable_clk;
+ }
+
+ /* Register with OF DMA helpers. */
+ ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
+ jzdma);
+ if (ret) {
+ dev_err(dev, "failed to register OF DMA controller\n");
+ goto err_unregister_dev;
+ }
+
+ dev_info(dev, "JZ4780 DMA controller initialised\n");
+ return 0;
+
+err_unregister_dev:
+ dma_async_device_unregister(dd);
+
+err_disable_clk:
+ clk_disable_unprepare(jzdma->clk);
+ return ret;
+}
+
+static int jz4780_dma_remove(struct platform_device *pdev)
+{
+ struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ devm_free_irq(&pdev->dev, jzdma->irq, jzdma);
+ dma_async_device_unregister(&jzdma->dma_device);
+ return 0;
+}
+
+static const struct of_device_id jz4780_dma_dt_match[] = {
+ { .compatible = "ingenic,jz4780-dma", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
+
+static struct platform_driver jz4780_dma_driver = {
+ .probe = jz4780_dma_probe,
+ .remove = jz4780_dma_remove,
+ .driver = {
+ .name = "jz4780-dma",
+ .of_match_table = of_match_ptr(jz4780_dma_dt_match),
+ },
+};
+
+static int __init jz4780_dma_init(void)
+{
+ return platform_driver_register(&jz4780_dma_driver);
+}
+subsys_initcall(jz4780_dma_init);
+
+static void __exit jz4780_dma_exit(void)
+{
+ platform_driver_unregister(&jz4780_dma_driver);
+}
+module_exit(jz4780_dma_exit);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ac336a961dea..2890d744bb1b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
@@ -355,20 +351,6 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
}
EXPORT_SYMBOL(dma_find_channel);
-/*
- * net_dma_find_channel - find a channel for net_dma
- * net_dma has alignment requirements
- */
-struct dma_chan *net_dma_find_channel(void)
-{
- struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
- if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
- return NULL;
-
- return chan;
-}
-EXPORT_SYMBOL(net_dma_find_channel);
-
/**
* dma_issue_pending_all - flush all pending operations across all channels
*/
@@ -589,11 +571,15 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
chan = private_candidate(&mask, device, NULL, NULL);
if (chan) {
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
err = dma_chan_get(chan);
if (err) {
pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
chan = NULL;
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
}
}
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index dcfe964cc8dc..36e02f0f645e 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -3,7 +3,7 @@
#
config DW_DMAC_CORE
- tristate "Synopsys DesignWare AHB DMA support"
+ tristate
select DMA_ENGINE
config DW_DMAC
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index a8ad05291b27..1022c2e1a2b0 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -230,7 +230,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
- "BUG: Attempted to start non-idle channel\n");
+ "%s: BUG: Attempted to start non-idle channel\n",
+ __func__);
dwc_dump_chan_regs(dwc);
/* The tasklet will hopefully advance the queue... */
@@ -814,11 +815,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
+ if (!desc)
goto err_desc_get;
- }
desc->lli.sar = mem;
desc->lli.dar = reg;
@@ -874,11 +872,8 @@ slave_sg_todev_fill_desc:
slave_sg_fromdev_fill_desc:
desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
+ if (!desc)
goto err_desc_get;
- }
desc->lli.sar = reg;
desc->lli.dar = mem;
@@ -922,6 +917,8 @@ slave_sg_fromdev_fill_desc:
return &first->txd;
err_desc_get:
+ dev_err(chan2dev(chan),
+ "not enough descriptors available. Direction %d\n", direction);
dwc_desc_put(dwc, first);
return NULL;
}
@@ -1261,7 +1258,8 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
/* Assert channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
- "BUG: Attempted to start non-idle channel\n");
+ "%s: BUG: Attempted to start non-idle channel\n",
+ __func__);
dwc_dump_chan_regs(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return -EBUSY;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 53dbd3b3384c..bf09db7ca9ee 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -812,7 +812,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
LIST_HEAD(descs);
a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
- chan, EVENTQ_DEFAULT);
+ echan, EVENTQ_DEFAULT);
if (a_ch_num < 0) {
ret = -ENODEV;
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
new file mode 100644
index 000000000000..4d9470f16552
--- /dev/null
+++ b/drivers/dma/fsl_raid.c
@@ -0,0 +1,904 @@
+/*
+ * drivers/dma/fsl_raid.c
+ *
+ * Freescale RAID Engine device driver
+ *
+ * Author:
+ * Harninder Rai <harninder.rai@freescale.com>
+ * Naveen Burmi <naveenburmi@freescale.com>
+ *
+ * Rewrite:
+ * Xuelin Shi <xuelin.shi@freescale.com>
+ *
+ * Copyright (c) 2010-2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Theory of operation:
+ *
+ * General capabilities:
+ * RAID Engine (RE) block is capable of offloading XOR, memcpy and P/Q
+ * calculations required in RAID5 and RAID6 operations. RE driver
+ * registers with Linux's ASYNC layer as dma driver. RE hardware
+ * maintains strict ordering of the requests through chained
+ * command queueing.
+ *
+ * Data flow:
+ * Software RAID layer of Linux (MD layer) maintains RAID partitions,
+ * strips, stripes etc. It sends requests to the underlying ASYNC layer
+ * which further passes it to RE driver. ASYNC layer decides which request
+ * goes to which job ring of RE hardware. For every request processed by
+ * RAID Engine, driver gets an interrupt unless coalescing is set. The
+ * per job ring interrupt handler checks the status register for errors,
+ * clears the interrupt and leave the post interrupt processing to the irq
+ * thread.
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "fsl_raid.h"
+
+#define FSL_RE_MAX_XOR_SRCS 16
+#define FSL_RE_MAX_PQ_SRCS 16
+#define FSL_RE_MIN_DESCS 256
+#define FSL_RE_MAX_DESCS (4 * FSL_RE_MIN_DESCS)
+#define FSL_RE_FRAME_FORMAT 0x1
+#define FSL_RE_MAX_DATA_LEN (1024*1024)
+
+#define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx)
+
+/* Add descriptors into per chan software queue - submit_q */
+static dma_cookie_t fsl_re_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct fsl_re_desc *desc;
+ struct fsl_re_chan *re_chan;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ desc = to_fsl_re_dma_desc(tx);
+ re_chan = container_of(tx->chan, struct fsl_re_chan, chan);
+
+ spin_lock_irqsave(&re_chan->desc_lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &re_chan->submit_q);
+ spin_unlock_irqrestore(&re_chan->desc_lock, flags);
+
+ return cookie;
+}
+
+/* Copy descriptor from per chan software queue into hardware job ring */
+static void fsl_re_issue_pending(struct dma_chan *chan)
+{
+ struct fsl_re_chan *re_chan;
+ int avail;
+ struct fsl_re_desc *desc, *_desc;
+ unsigned long flags;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+
+ spin_lock_irqsave(&re_chan->desc_lock, flags);
+ avail = FSL_RE_SLOT_AVAIL(
+ in_be32(&re_chan->jrregs->inbring_slot_avail));
+
+ list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) {
+ if (!avail)
+ break;
+
+ list_move_tail(&desc->node, &re_chan->active_q);
+
+ memcpy(&re_chan->inb_ring_virt_addr[re_chan->inb_count],
+ &desc->hwdesc, sizeof(struct fsl_re_hw_desc));
+
+ re_chan->inb_count = (re_chan->inb_count + 1) &
+ FSL_RE_RING_SIZE_MASK;
+ out_be32(&re_chan->jrregs->inbring_add_job, FSL_RE_ADD_JOB(1));
+ avail--;
+ }
+ spin_unlock_irqrestore(&re_chan->desc_lock, flags);
+}
+
+static void fsl_re_desc_done(struct fsl_re_desc *desc)
+{
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ dma_cookie_complete(&desc->async_tx);
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback)
+ callback(callback_param);
+
+ dma_descriptor_unmap(&desc->async_tx);
+}
+
+static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
+{
+ struct fsl_re_desc *desc, *_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&re_chan->desc_lock, flags);
+ list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) {
+ if (async_tx_test_ack(&desc->async_tx))
+ list_move_tail(&desc->node, &re_chan->free_q);
+ }
+ spin_unlock_irqrestore(&re_chan->desc_lock, flags);
+
+ fsl_re_issue_pending(&re_chan->chan);
+}
+
+static void fsl_re_dequeue(unsigned long data)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc, *_desc;
+ struct fsl_re_hw_desc *hwdesc;
+ unsigned long flags;
+ unsigned int count, oub_count;
+ int found;
+
+ re_chan = dev_get_drvdata((struct device *)data);
+
+ fsl_re_cleanup_descs(re_chan);
+
+ spin_lock_irqsave(&re_chan->desc_lock, flags);
+ count = FSL_RE_SLOT_FULL(in_be32(&re_chan->jrregs->oubring_slot_full));
+ while (count--) {
+ found = 0;
+ hwdesc = &re_chan->oub_ring_virt_addr[re_chan->oub_count];
+ list_for_each_entry_safe(desc, _desc, &re_chan->active_q,
+ node) {
+ /* compare the hw dma addr to find the completed */
+ if (desc->hwdesc.lbea32 == hwdesc->lbea32 &&
+ desc->hwdesc.addr_low == hwdesc->addr_low) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ fsl_re_desc_done(desc);
+ list_move_tail(&desc->node, &re_chan->ack_q);
+ } else {
+ dev_err(re_chan->dev,
+ "found hwdesc not in sw queue, discard it\n");
+ }
+
+ oub_count = (re_chan->oub_count + 1) & FSL_RE_RING_SIZE_MASK;
+ re_chan->oub_count = oub_count;
+
+ out_be32(&re_chan->jrregs->oubring_job_rmvd,
+ FSL_RE_RMVD_JOB(1));
+ }
+ spin_unlock_irqrestore(&re_chan->desc_lock, flags);
+}
+
+/* Per Job Ring interrupt handler */
+static irqreturn_t fsl_re_isr(int irq, void *data)
+{
+ struct fsl_re_chan *re_chan;
+ u32 irqstate, status;
+
+ re_chan = dev_get_drvdata((struct device *)data);
+
+ irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status);
+ if (!irqstate)
+ return IRQ_NONE;
+
+ /*
+ * There's no way in upper layer (read MD layer) to recover from
+ * error conditions except restart everything. In long term we
+ * need to do something more than just crashing
+ */
+ if (irqstate & FSL_RE_ERROR) {
+ status = in_be32(&re_chan->jrregs->jr_status);
+ dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n",
+ irqstate, status);
+ }
+
+ /* Clear interrupt */
+ out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR);
+
+ tasklet_schedule(&re_chan->irqtask);
+
+ return IRQ_HANDLED;
+}
+
+static enum dma_status fsl_re_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void fill_cfd_frame(struct fsl_re_cmpnd_frame *cf, u8 index,
+ size_t length, dma_addr_t addr, bool final)
+{
+ u32 efrl = length & FSL_RE_CF_LENGTH_MASK;
+
+ efrl |= final << FSL_RE_CF_FINAL_SHIFT;
+ cf[index].efrl32 = efrl;
+ cf[index].addr_high = upper_32_bits(addr);
+ cf[index].addr_low = lower_32_bits(addr);
+}
+
+static struct fsl_re_desc *fsl_re_init_desc(struct fsl_re_chan *re_chan,
+ struct fsl_re_desc *desc,
+ void *cf, dma_addr_t paddr)
+{
+ desc->re_chan = re_chan;
+ desc->async_tx.tx_submit = fsl_re_tx_submit;
+ dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan);
+ INIT_LIST_HEAD(&desc->node);
+
+ desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT;
+ desc->hwdesc.lbea32 = upper_32_bits(paddr);
+ desc->hwdesc.addr_low = lower_32_bits(paddr);
+ desc->cf_addr = cf;
+ desc->cf_paddr = paddr;
+
+ desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE);
+ desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE;
+
+ return desc;
+}
+
+static struct fsl_re_desc *fsl_re_chan_alloc_desc(struct fsl_re_chan *re_chan,
+ unsigned long flags)
+{
+ struct fsl_re_desc *desc = NULL;
+ void *cf;
+ dma_addr_t paddr;
+ unsigned long lock_flag;
+
+ fsl_re_cleanup_descs(re_chan);
+
+ spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
+ if (!list_empty(&re_chan->free_q)) {
+ /* take one desc from free_q */
+ desc = list_first_entry(&re_chan->free_q,
+ struct fsl_re_desc, node);
+ list_del(&desc->node);
+
+ desc->async_tx.flags = flags;
+ }
+ spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
+
+ if (!desc) {
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_NOWAIT,
+ &paddr);
+ if (!cf) {
+ kfree(desc);
+ return NULL;
+ }
+
+ desc = fsl_re_init_desc(re_chan, desc, cf, paddr);
+ desc->async_tx.flags = flags;
+
+ spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
+ re_chan->alloc_count++;
+ spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
+ }
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+ struct fsl_re_xor_cdb *xor;
+ struct fsl_re_cmpnd_frame *cf;
+ u32 cdb;
+ unsigned int i, j;
+ unsigned int save_src_cnt = src_cnt;
+ int cont_q = 0;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+ if (len > FSL_RE_MAX_DATA_LEN) {
+ dev_err(re_chan->dev, "genq tx length %lu, max length %d\n",
+ len, FSL_RE_MAX_DATA_LEN);
+ return NULL;
+ }
+
+ desc = fsl_re_chan_alloc_desc(re_chan, flags);
+ if (desc <= 0)
+ return NULL;
+
+ if (scf && (flags & DMA_PREP_CONTINUE)) {
+ cont_q = 1;
+ src_cnt += 1;
+ }
+
+ /* Filling xor CDB */
+ cdb = FSL_RE_XOR_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
+ cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
+ cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
+ cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
+ cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
+ xor = desc->cdb_addr;
+ xor->cdb32 = cdb;
+
+ if (scf) {
+ /* compute q = src0*coef0^src1*coef1^..., * is GF(8) mult */
+ for (i = 0; i < save_src_cnt; i++)
+ xor->gfm[i] = scf[i];
+ if (cont_q)
+ xor->gfm[i++] = 1;
+ } else {
+ /* compute P, that is XOR all srcs */
+ for (i = 0; i < src_cnt; i++)
+ xor->gfm[i] = 1;
+ }
+
+ /* Filling frame 0 of compound frame descriptor with CDB */
+ cf = desc->cf_addr;
+ fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0);
+
+ /* Fill CFD's 1st frame with dest buffer */
+ fill_cfd_frame(cf, 1, len, dest, 0);
+
+ /* Fill CFD's rest of the frames with source buffers */
+ for (i = 2, j = 0; j < save_src_cnt; i++, j++)
+ fill_cfd_frame(cf, i, len, src[j], 0);
+
+ if (cont_q)
+ fill_cfd_frame(cf, i++, len, dest, 0);
+
+ /* Setting the final bit in the last source buffer frame in CFD */
+ cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
+
+ return &desc->async_tx;
+}
+
+/*
+ * Prep function for P parity calculation.In RAID Engine terminology,
+ * XOR calculation is called GenQ calculation done through GenQ command
+ */
+static struct dma_async_tx_descriptor *fsl_re_prep_dma_xor(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+{
+ /* NULL let genq take all coef as 1 */
+ return fsl_re_prep_dma_genq(chan, dest, src, src_cnt, NULL, len, flags);
+}
+
+/*
+ * Prep function for P/Q parity calculation.In RAID Engine terminology,
+ * P/Q calculation is called GenQQ done through GenQQ command
+ */
+static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
+ struct dma_chan *chan, dma_addr_t *dest, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+ struct fsl_re_pq_cdb *pq;
+ struct fsl_re_cmpnd_frame *cf;
+ u32 cdb;
+ u8 *p;
+ int gfmq_len, i, j;
+ unsigned int save_src_cnt = src_cnt;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+ if (len > FSL_RE_MAX_DATA_LEN) {
+ dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n",
+ len, FSL_RE_MAX_DATA_LEN);
+ return NULL;
+ }
+
+ /*
+ * RE requires at least 2 sources, if given only one source, we pass the
+ * second source same as the first one.
+ * With only one source, generating P is meaningless, only generate Q.
+ */
+ if (src_cnt == 1) {
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_src[2];
+ unsigned char coef[2];
+
+ dma_src[0] = *src;
+ coef[0] = *scf;
+ dma_src[1] = *src;
+ coef[1] = 0;
+ tx = fsl_re_prep_dma_genq(chan, dest[1], dma_src, 2, coef, len,
+ flags);
+ if (tx)
+ desc = to_fsl_re_dma_desc(tx);
+
+ return tx;
+ }
+
+ /*
+ * During RAID6 array creation, Linux's MD layer gets P and Q
+ * calculated separately in two steps. But our RAID Engine has
+ * the capability to calculate both P and Q with a single command
+ * Hence to merge well with MD layer, we need to provide a hook
+ * here and call re_jq_prep_dma_genq() function
+ */
+
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ return fsl_re_prep_dma_genq(chan, dest[1], src, src_cnt,
+ scf, len, flags);
+
+ if (flags & DMA_PREP_CONTINUE)
+ src_cnt += 3;
+
+ desc = fsl_re_chan_alloc_desc(re_chan, flags);
+ if (desc <= 0)
+ return NULL;
+
+ /* Filling GenQQ CDB */
+ cdb = FSL_RE_PQ_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
+ cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
+ cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
+ cdb |= FSL_RE_BUFFER_OUTPUT << FSL_RE_CDB_BUFFER_SHIFT;
+ cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
+
+ pq = desc->cdb_addr;
+ pq->cdb32 = cdb;
+
+ p = pq->gfm_q1;
+ /* Init gfm_q1[] */
+ for (i = 0; i < src_cnt; i++)
+ p[i] = 1;
+
+ /* Align gfm[] to 32bit */
+ gfmq_len = ALIGN(src_cnt, 4);
+
+ /* Init gfm_q2[] */
+ p += gfmq_len;
+ for (i = 0; i < src_cnt; i++)
+ p[i] = scf[i];
+
+ /* Filling frame 0 of compound frame descriptor with CDB */
+ cf = desc->cf_addr;
+ fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0);
+
+ /* Fill CFD's 1st & 2nd frame with dest buffers */
+ for (i = 1, j = 0; i < 3; i++, j++)
+ fill_cfd_frame(cf, i, len, dest[j], 0);
+
+ /* Fill CFD's rest of the frames with source buffers */
+ for (i = 3, j = 0; j < save_src_cnt; i++, j++)
+ fill_cfd_frame(cf, i, len, src[j], 0);
+
+ /* PQ computation continuation */
+ if (flags & DMA_PREP_CONTINUE) {
+ if (src_cnt - save_src_cnt == 3) {
+ p[save_src_cnt] = 0;
+ p[save_src_cnt + 1] = 0;
+ p[save_src_cnt + 2] = 1;
+ fill_cfd_frame(cf, i++, len, dest[0], 0);
+ fill_cfd_frame(cf, i++, len, dest[1], 0);
+ fill_cfd_frame(cf, i++, len, dest[1], 0);
+ } else {
+ dev_err(re_chan->dev, "PQ tx continuation error!\n");
+ return NULL;
+ }
+ }
+
+ /* Setting the final bit in the last source buffer frame in CFD */
+ cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
+
+ return &desc->async_tx;
+}
+
+/*
+ * Prep function for memcpy. In RAID Engine, memcpy is done through MOVE
+ * command. Logic of this function will need to be modified once multipage
+ * support is added in Linux's MD/ASYNC Layer
+ */
+static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+ size_t length;
+ struct fsl_re_cmpnd_frame *cf;
+ struct fsl_re_move_cdb *move;
+ u32 cdb;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+
+ if (len > FSL_RE_MAX_DATA_LEN) {
+ dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n",
+ len, FSL_RE_MAX_DATA_LEN);
+ return NULL;
+ }
+
+ desc = fsl_re_chan_alloc_desc(re_chan, flags);
+ if (desc <= 0)
+ return NULL;
+
+ /* Filling move CDB */
+ cdb = FSL_RE_MOVE_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
+ cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
+ cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
+ cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
+
+ move = desc->cdb_addr;
+ move->cdb32 = cdb;
+
+ /* Filling frame 0 of CFD with move CDB */
+ cf = desc->cf_addr;
+ fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0);
+
+ length = min_t(size_t, len, FSL_RE_MAX_DATA_LEN);
+
+ /* Fill CFD's 1st frame with dest buffer */
+ fill_cfd_frame(cf, 1, length, dest, 0);
+
+ /* Fill CFD's 2nd frame with src buffer */
+ fill_cfd_frame(cf, 2, length, src, 1);
+
+ return &desc->async_tx;
+}
+
+static int fsl_re_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+ void *cf;
+ dma_addr_t paddr;
+ int i;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+ for (i = 0; i < FSL_RE_MIN_DESCS; i++) {
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ break;
+
+ cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_KERNEL,
+ &paddr);
+ if (!cf) {
+ kfree(desc);
+ break;
+ }
+
+ INIT_LIST_HEAD(&desc->node);
+ fsl_re_init_desc(re_chan, desc, cf, paddr);
+
+ list_add_tail(&desc->node, &re_chan->free_q);
+ re_chan->alloc_count++;
+ }
+ return re_chan->alloc_count;
+}
+
+static void fsl_re_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+ while (re_chan->alloc_count--) {
+ desc = list_first_entry(&re_chan->free_q,
+ struct fsl_re_desc,
+ node);
+
+ list_del(&desc->node);
+ dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr,
+ desc->cf_paddr);
+ kfree(desc);
+ }
+
+ if (!list_empty(&re_chan->free_q))
+ dev_err(re_chan->dev, "chan resource cannot be cleaned!\n");
+}
+
+static int fsl_re_chan_probe(struct platform_device *ofdev,
+ struct device_node *np, u8 q, u32 off)
+{
+ struct device *dev, *chandev;
+ struct fsl_re_drv_private *re_priv;
+ struct fsl_re_chan *chan;
+ struct dma_device *dma_dev;
+ u32 ptr;
+ u32 status;
+ int ret = 0, rc;
+ struct platform_device *chan_ofdev;
+
+ dev = &ofdev->dev;
+ re_priv = dev_get_drvdata(dev);
+ dma_dev = &re_priv->dma_dev;
+
+ chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ /* create platform device for chan node */
+ chan_ofdev = of_platform_device_create(np, NULL, dev);
+ if (!chan_ofdev) {
+ dev_err(dev, "Not able to create ofdev for jr %d\n", q);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ /* read reg property from dts */
+ rc = of_property_read_u32(np, "reg", &ptr);
+ if (rc) {
+ dev_err(dev, "Reg property not found in jr %d\n", q);
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ chan->jrregs = (struct fsl_re_chan_cfg *)((u8 *)re_priv->re_regs +
+ off + ptr);
+
+ /* read irq property from dts */
+ chan->irq = irq_of_parse_and_map(np, 0);
+ if (chan->irq == NO_IRQ) {
+ dev_err(dev, "No IRQ defined for JR %d\n", q);
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
+
+ chandev = &chan_ofdev->dev;
+ tasklet_init(&chan->irqtask, fsl_re_dequeue, (unsigned long)chandev);
+
+ ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev);
+ if (ret) {
+ dev_err(dev, "Unable to register interrupt for JR %d\n", q);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ re_priv->re_jrs[q] = chan;
+ chan->chan.device = dma_dev;
+ chan->chan.private = chan;
+ chan->dev = chandev;
+ chan->re_dev = re_priv;
+
+ spin_lock_init(&chan->desc_lock);
+ INIT_LIST_HEAD(&chan->ack_q);
+ INIT_LIST_HEAD(&chan->active_q);
+ INIT_LIST_HEAD(&chan->submit_q);
+ INIT_LIST_HEAD(&chan->free_q);
+
+ chan->inb_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
+ GFP_KERNEL, &chan->inb_phys_addr);
+ if (!chan->inb_ring_virt_addr) {
+ dev_err(dev, "No dma memory for inb_ring_virt_addr\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ chan->oub_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
+ GFP_KERNEL, &chan->oub_phys_addr);
+ if (!chan->oub_ring_virt_addr) {
+ dev_err(dev, "No dma memory for oub_ring_virt_addr\n");
+ ret = -ENOMEM;
+ goto err_free_1;
+ }
+
+ /* Program the Inbound/Outbound ring base addresses and size */
+ out_be32(&chan->jrregs->inbring_base_h,
+ chan->inb_phys_addr & FSL_RE_ADDR_BIT_MASK);
+ out_be32(&chan->jrregs->oubring_base_h,
+ chan->oub_phys_addr & FSL_RE_ADDR_BIT_MASK);
+ out_be32(&chan->jrregs->inbring_base_l,
+ chan->inb_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
+ out_be32(&chan->jrregs->oubring_base_l,
+ chan->oub_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
+ out_be32(&chan->jrregs->inbring_size,
+ FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
+ out_be32(&chan->jrregs->oubring_size,
+ FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
+
+ /* Read LIODN value from u-boot */
+ status = in_be32(&chan->jrregs->jr_config_1) & FSL_RE_REG_LIODN_MASK;
+
+ /* Program the CFG reg */
+ out_be32(&chan->jrregs->jr_config_1,
+ FSL_RE_CFG1_CBSI | FSL_RE_CFG1_CBS0 | status);
+
+ dev_set_drvdata(chandev, chan);
+
+ /* Enable RE/CHAN */
+ out_be32(&chan->jrregs->jr_command, FSL_RE_ENABLE);
+
+ return 0;
+
+err_free_1:
+ dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
+ chan->inb_phys_addr);
+err_free:
+ return ret;
+}
+
+/* Probe function for RAID Engine */
+static int fsl_re_probe(struct platform_device *ofdev)
+{
+ struct fsl_re_drv_private *re_priv;
+ struct device_node *np;
+ struct device_node *child;
+ u32 off;
+ u8 ridx = 0;
+ struct dma_device *dma_dev;
+ struct resource *res;
+ int rc;
+ struct device *dev = &ofdev->dev;
+
+ re_priv = devm_kzalloc(dev, sizeof(*re_priv), GFP_KERNEL);
+ if (!re_priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ /* IOMAP the entire RAID Engine region */
+ re_priv->re_regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!re_priv->re_regs)
+ return -EBUSY;
+
+ /* Program the RE mode */
+ out_be32(&re_priv->re_regs->global_config, FSL_RE_NON_DPAA_MODE);
+
+ /* Program Galois Field polynomial */
+ out_be32(&re_priv->re_regs->galois_field_config, FSL_RE_GFM_POLY);
+
+ dev_info(dev, "version %x, mode %x, gfp %x\n",
+ in_be32(&re_priv->re_regs->re_version_id),
+ in_be32(&re_priv->re_regs->global_config),
+ in_be32(&re_priv->re_regs->galois_field_config));
+
+ dma_dev = &re_priv->dma_dev;
+ dma_dev->dev = dev;
+ INIT_LIST_HEAD(&dma_dev->channels);
+ dma_set_mask(dev, DMA_BIT_MASK(40));
+
+ dma_dev->device_alloc_chan_resources = fsl_re_alloc_chan_resources;
+ dma_dev->device_tx_status = fsl_re_tx_status;
+ dma_dev->device_issue_pending = fsl_re_issue_pending;
+
+ dma_dev->max_xor = FSL_RE_MAX_XOR_SRCS;
+ dma_dev->device_prep_dma_xor = fsl_re_prep_dma_xor;
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+
+ dma_dev->max_pq = FSL_RE_MAX_PQ_SRCS;
+ dma_dev->device_prep_dma_pq = fsl_re_prep_dma_pq;
+ dma_cap_set(DMA_PQ, dma_dev->cap_mask);
+
+ dma_dev->device_prep_dma_memcpy = fsl_re_prep_dma_memcpy;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+
+ dma_dev->device_free_chan_resources = fsl_re_free_chan_resources;
+
+ re_priv->total_chans = 0;
+
+ re_priv->cf_desc_pool = dmam_pool_create("fsl_re_cf_desc_pool", dev,
+ FSL_RE_CF_CDB_SIZE,
+ FSL_RE_CF_CDB_ALIGN, 0);
+
+ if (!re_priv->cf_desc_pool) {
+ dev_err(dev, "No memory for fsl re_cf desc pool\n");
+ return -ENOMEM;
+ }
+
+ re_priv->hw_desc_pool = dmam_pool_create("fsl_re_hw_desc_pool", dev,
+ sizeof(struct fsl_re_hw_desc) * FSL_RE_RING_SIZE,
+ FSL_RE_FRAME_ALIGN, 0);
+ if (!re_priv->hw_desc_pool) {
+ dev_err(dev, "No memory for fsl re_hw desc pool\n");
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(dev, re_priv);
+
+ /* Parse Device tree to find out the total number of JQs present */
+ for_each_compatible_node(np, NULL, "fsl,raideng-v1.0-job-queue") {
+ rc = of_property_read_u32(np, "reg", &off);
+ if (rc) {
+ dev_err(dev, "Reg property not found in JQ node\n");
+ return -ENODEV;
+ }
+ /* Find out the Job Rings present under each JQ */
+ for_each_child_of_node(np, child) {
+ rc = of_device_is_compatible(child,
+ "fsl,raideng-v1.0-job-ring");
+ if (rc) {
+ fsl_re_chan_probe(ofdev, child, ridx++, off);
+ re_priv->total_chans++;
+ }
+ }
+ }
+
+ dma_async_device_register(dma_dev);
+
+ return 0;
+}
+
+static void fsl_re_remove_chan(struct fsl_re_chan *chan)
+{
+ dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
+ chan->inb_phys_addr);
+
+ dma_pool_free(chan->re_dev->hw_desc_pool, chan->oub_ring_virt_addr,
+ chan->oub_phys_addr);
+}
+
+static int fsl_re_remove(struct platform_device *ofdev)
+{
+ struct fsl_re_drv_private *re_priv;
+ struct device *dev;
+ int i;
+
+ dev = &ofdev->dev;
+ re_priv = dev_get_drvdata(dev);
+
+ /* Cleanup chan related memory areas */
+ for (i = 0; i < re_priv->total_chans; i++)
+ fsl_re_remove_chan(re_priv->re_jrs[i]);
+
+ /* Unregister the driver */
+ dma_async_device_unregister(&re_priv->dma_dev);
+
+ return 0;
+}
+
+static struct of_device_id fsl_re_ids[] = {
+ { .compatible = "fsl,raideng-v1.0", },
+ {}
+};
+
+static struct platform_driver fsl_re_driver = {
+ .driver = {
+ .name = "fsl-raideng",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_re_ids,
+ },
+ .probe = fsl_re_probe,
+ .remove = fsl_re_remove,
+};
+
+module_platform_driver(fsl_re_driver);
+
+MODULE_AUTHOR("Harninder Rai <harninder.rai@freescale.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Freescale RAID Engine Device Driver");
diff --git a/drivers/dma/fsl_raid.h b/drivers/dma/fsl_raid.h
new file mode 100644
index 000000000000..69d743c04973
--- /dev/null
+++ b/drivers/dma/fsl_raid.h
@@ -0,0 +1,306 @@
+/*
+ * drivers/dma/fsl_raid.h
+ *
+ * Freescale RAID Engine device driver
+ *
+ * Author:
+ * Harninder Rai <harninder.rai@freescale.com>
+ * Naveen Burmi <naveenburmi@freescale.com>
+ *
+ * Rewrite:
+ * Xuelin Shi <xuelin.shi@freescale.com>
+
+ * Copyright (c) 2010-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define FSL_RE_MAX_CHANS 4
+#define FSL_RE_DPAA_MODE BIT(30)
+#define FSL_RE_NON_DPAA_MODE BIT(31)
+#define FSL_RE_GFM_POLY 0x1d000000
+#define FSL_RE_ADD_JOB(x) ((x) << 16)
+#define FSL_RE_RMVD_JOB(x) ((x) << 16)
+#define FSL_RE_CFG1_CBSI 0x08000000
+#define FSL_RE_CFG1_CBS0 0x00080000
+#define FSL_RE_SLOT_FULL_SHIFT 8
+#define FSL_RE_SLOT_FULL(x) ((x) >> FSL_RE_SLOT_FULL_SHIFT)
+#define FSL_RE_SLOT_AVAIL_SHIFT 8
+#define FSL_RE_SLOT_AVAIL(x) ((x) >> FSL_RE_SLOT_AVAIL_SHIFT)
+#define FSL_RE_PQ_OPCODE 0x1B
+#define FSL_RE_XOR_OPCODE 0x1A
+#define FSL_RE_MOVE_OPCODE 0x8
+#define FSL_RE_FRAME_ALIGN 16
+#define FSL_RE_BLOCK_SIZE 0x3 /* 4096 bytes */
+#define FSL_RE_CACHEABLE_IO 0x0
+#define FSL_RE_BUFFER_OUTPUT 0x0
+#define FSL_RE_INTR_ON_ERROR 0x1
+#define FSL_RE_DATA_DEP 0x1
+#define FSL_RE_ENABLE_DPI 0x0
+#define FSL_RE_RING_SIZE 0x400
+#define FSL_RE_RING_SIZE_MASK (FSL_RE_RING_SIZE - 1)
+#define FSL_RE_RING_SIZE_SHIFT 8
+#define FSL_RE_ADDR_BIT_SHIFT 4
+#define FSL_RE_ADDR_BIT_MASK (BIT(FSL_RE_ADDR_BIT_SHIFT) - 1)
+#define FSL_RE_ERROR 0x40000000
+#define FSL_RE_INTR 0x80000000
+#define FSL_RE_CLR_INTR 0x80000000
+#define FSL_RE_PAUSE 0x80000000
+#define FSL_RE_ENABLE 0x80000000
+#define FSL_RE_REG_LIODN_MASK 0x00000FFF
+
+#define FSL_RE_CDB_OPCODE_MASK 0xF8000000
+#define FSL_RE_CDB_OPCODE_SHIFT 27
+#define FSL_RE_CDB_EXCLEN_MASK 0x03000000
+#define FSL_RE_CDB_EXCLEN_SHIFT 24
+#define FSL_RE_CDB_EXCLQ1_MASK 0x00F00000
+#define FSL_RE_CDB_EXCLQ1_SHIFT 20
+#define FSL_RE_CDB_EXCLQ2_MASK 0x000F0000
+#define FSL_RE_CDB_EXCLQ2_SHIFT 16
+#define FSL_RE_CDB_BLKSIZE_MASK 0x0000C000
+#define FSL_RE_CDB_BLKSIZE_SHIFT 14
+#define FSL_RE_CDB_CACHE_MASK 0x00003000
+#define FSL_RE_CDB_CACHE_SHIFT 12
+#define FSL_RE_CDB_BUFFER_MASK 0x00000800
+#define FSL_RE_CDB_BUFFER_SHIFT 11
+#define FSL_RE_CDB_ERROR_MASK 0x00000400
+#define FSL_RE_CDB_ERROR_SHIFT 10
+#define FSL_RE_CDB_NRCS_MASK 0x0000003C
+#define FSL_RE_CDB_NRCS_SHIFT 6
+#define FSL_RE_CDB_DEPEND_MASK 0x00000008
+#define FSL_RE_CDB_DEPEND_SHIFT 3
+#define FSL_RE_CDB_DPI_MASK 0x00000004
+#define FSL_RE_CDB_DPI_SHIFT 2
+
+/*
+ * the largest cf block is 19*sizeof(struct cmpnd_frame), which is 304 bytes.
+ * here 19 = 1(cdb)+2(dest)+16(src), align to 64bytes, that is 320 bytes.
+ * the largest cdb block: struct pq_cdb which is 180 bytes, adding to cf block
+ * 320+180=500, align to 64bytes, that is 512 bytes.
+ */
+#define FSL_RE_CF_DESC_SIZE 320
+#define FSL_RE_CF_CDB_SIZE 512
+#define FSL_RE_CF_CDB_ALIGN 64
+
+struct fsl_re_ctrl {
+ /* General Configuration Registers */
+ __be32 global_config; /* Global Configuration Register */
+ u8 rsvd1[4];
+ __be32 galois_field_config; /* Galois Field Configuration Register */
+ u8 rsvd2[4];
+ __be32 jq_wrr_config; /* WRR Configuration register */
+ u8 rsvd3[4];
+ __be32 crc_config; /* CRC Configuration register */
+ u8 rsvd4[228];
+ __be32 system_reset; /* System Reset Register */
+ u8 rsvd5[252];
+ __be32 global_status; /* Global Status Register */
+ u8 rsvd6[832];
+ __be32 re_liodn_base; /* LIODN Base Register */
+ u8 rsvd7[1712];
+ __be32 re_version_id; /* Version ID register of RE */
+ __be32 re_version_id_2; /* Version ID 2 register of RE */
+ u8 rsvd8[512];
+ __be32 host_config; /* Host I/F Configuration Register */
+};
+
+struct fsl_re_chan_cfg {
+ /* Registers for JR interface */
+ __be32 jr_config_0; /* Job Queue Configuration 0 Register */
+ __be32 jr_config_1; /* Job Queue Configuration 1 Register */
+ __be32 jr_interrupt_status; /* Job Queue Interrupt Status Register */
+ u8 rsvd1[4];
+ __be32 jr_command; /* Job Queue Command Register */
+ u8 rsvd2[4];
+ __be32 jr_status; /* Job Queue Status Register */
+ u8 rsvd3[228];
+
+ /* Input Ring */
+ __be32 inbring_base_h; /* Inbound Ring Base Address Register - High */
+ __be32 inbring_base_l; /* Inbound Ring Base Address Register - Low */
+ __be32 inbring_size; /* Inbound Ring Size Register */
+ u8 rsvd4[4];
+ __be32 inbring_slot_avail; /* Inbound Ring Slot Available Register */
+ u8 rsvd5[4];
+ __be32 inbring_add_job; /* Inbound Ring Add Job Register */
+ u8 rsvd6[4];
+ __be32 inbring_cnsmr_indx; /* Inbound Ring Consumer Index Register */
+ u8 rsvd7[220];
+
+ /* Output Ring */
+ __be32 oubring_base_h; /* Outbound Ring Base Address Register - High */
+ __be32 oubring_base_l; /* Outbound Ring Base Address Register - Low */
+ __be32 oubring_size; /* Outbound Ring Size Register */
+ u8 rsvd8[4];
+ __be32 oubring_job_rmvd; /* Outbound Ring Job Removed Register */
+ u8 rsvd9[4];
+ __be32 oubring_slot_full; /* Outbound Ring Slot Full Register */
+ u8 rsvd10[4];
+ __be32 oubring_prdcr_indx; /* Outbound Ring Producer Index */
+};
+
+/*
+ * Command Descriptor Block (CDB) for unicast move command.
+ * In RAID Engine terms, memcpy is done through move command
+ */
+struct fsl_re_move_cdb {
+ __be32 cdb32;
+};
+
+/* Data protection/integrity related fields */
+#define FSL_RE_DPI_APPS_MASK 0xC0000000
+#define FSL_RE_DPI_APPS_SHIFT 30
+#define FSL_RE_DPI_REF_MASK 0x30000000
+#define FSL_RE_DPI_REF_SHIFT 28
+#define FSL_RE_DPI_GUARD_MASK 0x0C000000
+#define FSL_RE_DPI_GUARD_SHIFT 26
+#define FSL_RE_DPI_ATTR_MASK 0x03000000
+#define FSL_RE_DPI_ATTR_SHIFT 24
+#define FSL_RE_DPI_META_MASK 0x0000FFFF
+
+struct fsl_re_dpi {
+ __be32 dpi32;
+ __be32 ref;
+};
+
+/*
+ * CDB for GenQ command. In RAID Engine terminology, XOR is
+ * done through this command
+ */
+struct fsl_re_xor_cdb {
+ __be32 cdb32;
+ u8 gfm[16];
+ struct fsl_re_dpi dpi_dest_spec;
+ struct fsl_re_dpi dpi_src_spec[16];
+};
+
+/* CDB for no-op command */
+struct fsl_re_noop_cdb {
+ __be32 cdb32;
+};
+
+/*
+ * CDB for GenQQ command. In RAID Engine terminology, P/Q is
+ * done through this command
+ */
+struct fsl_re_pq_cdb {
+ __be32 cdb32;
+ u8 gfm_q1[16];
+ u8 gfm_q2[16];
+ struct fsl_re_dpi dpi_dest_spec[2];
+ struct fsl_re_dpi dpi_src_spec[16];
+};
+
+/* Compound frame */
+#define FSL_RE_CF_ADDR_HIGH_MASK 0x000000FF
+#define FSL_RE_CF_EXT_MASK 0x80000000
+#define FSL_RE_CF_EXT_SHIFT 31
+#define FSL_RE_CF_FINAL_MASK 0x40000000
+#define FSL_RE_CF_FINAL_SHIFT 30
+#define FSL_RE_CF_LENGTH_MASK 0x000FFFFF
+#define FSL_RE_CF_BPID_MASK 0x00FF0000
+#define FSL_RE_CF_BPID_SHIFT 16
+#define FSL_RE_CF_OFFSET_MASK 0x00001FFF
+
+struct fsl_re_cmpnd_frame {
+ __be32 addr_high;
+ __be32 addr_low;
+ __be32 efrl32;
+ __be32 rbro32;
+};
+
+/* Frame descriptor */
+#define FSL_RE_HWDESC_LIODN_MASK 0x3F000000
+#define FSL_RE_HWDESC_LIODN_SHIFT 24
+#define FSL_RE_HWDESC_BPID_MASK 0x00FF0000
+#define FSL_RE_HWDESC_BPID_SHIFT 16
+#define FSL_RE_HWDESC_ELIODN_MASK 0x0000F000
+#define FSL_RE_HWDESC_ELIODN_SHIFT 12
+#define FSL_RE_HWDESC_FMT_SHIFT 29
+#define FSL_RE_HWDESC_FMT_MASK (0x3 << FSL_RE_HWDESC_FMT_SHIFT)
+
+struct fsl_re_hw_desc {
+ __be32 lbea32;
+ __be32 addr_low;
+ __be32 fmt32;
+ __be32 status;
+};
+
+/* Raid Engine device private data */
+struct fsl_re_drv_private {
+ u8 total_chans;
+ struct dma_device dma_dev;
+ struct fsl_re_ctrl *re_regs;
+ struct fsl_re_chan *re_jrs[FSL_RE_MAX_CHANS];
+ struct dma_pool *cf_desc_pool;
+ struct dma_pool *hw_desc_pool;
+};
+
+/* Per job ring data structure */
+struct fsl_re_chan {
+ char name[16];
+ spinlock_t desc_lock; /* queue lock */
+ struct list_head ack_q; /* wait to acked queue */
+ struct list_head active_q; /* already issued on hw, not completed */
+ struct list_head submit_q;
+ struct list_head free_q; /* alloc available queue */
+ struct device *dev;
+ struct fsl_re_drv_private *re_dev;
+ struct dma_chan chan;
+ struct fsl_re_chan_cfg *jrregs;
+ int irq;
+ struct tasklet_struct irqtask;
+ u32 alloc_count;
+
+ /* hw descriptor ring for inbound queue*/
+ dma_addr_t inb_phys_addr;
+ struct fsl_re_hw_desc *inb_ring_virt_addr;
+ u32 inb_count;
+
+ /* hw descriptor ring for outbound queue */
+ dma_addr_t oub_phys_addr;
+ struct fsl_re_hw_desc *oub_ring_virt_addr;
+ u32 oub_count;
+};
+
+/* Async transaction descriptor */
+struct fsl_re_desc {
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head node;
+ struct fsl_re_hw_desc hwdesc;
+ struct fsl_re_chan *re_chan;
+
+ /* hwdesc will point to cf_addr */
+ void *cf_addr;
+ dma_addr_t cf_paddr;
+
+ void *cdb_addr;
+ dma_addr_t cdb_paddr;
+ int status;
+};
diff --git a/drivers/dma/hsu/Kconfig b/drivers/dma/hsu/Kconfig
new file mode 100644
index 000000000000..2810dca70612
--- /dev/null
+++ b/drivers/dma/hsu/Kconfig
@@ -0,0 +1,14 @@
+# DMA engine configuration for hsu
+config HSU_DMA
+ tristate
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
+config HSU_DMA_PCI
+ tristate "High Speed UART DMA PCI driver"
+ depends on PCI
+ select HSU_DMA
+ help
+ Support the High Speed UART DMA on the platfroms that
+ enumerate it as a PCI device. For example, Intel Medfield
+ has integrated this HSU DMA controller.
diff --git a/drivers/dma/hsu/Makefile b/drivers/dma/hsu/Makefile
new file mode 100644
index 000000000000..b8f9af032ef1
--- /dev/null
+++ b/drivers/dma/hsu/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_HSU_DMA) += hsu_dma.o
+hsu_dma-objs := hsu.o
+
+obj-$(CONFIG_HSU_DMA_PCI) += hsu_dma_pci.o
+hsu_dma_pci-objs := pci.o
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
new file mode 100644
index 000000000000..9b84def7a353
--- /dev/null
+++ b/drivers/dma/hsu/hsu.c
@@ -0,0 +1,495 @@
+/*
+ * Core driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * Partially based on the bits found in drivers/tty/serial/mfd.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * DMA channel allocation:
+ * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
+ * Write (UART RX).
+ * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
+ * port 3, and so on.
+ */
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "hsu.h"
+
+#define HSU_DMA_BUSWIDTHS \
+ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
+
+static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
+{
+ hsu_chan_writel(hsuc, HSU_CH_CR, 0);
+}
+
+static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
+{
+ u32 cr = HSU_CH_CR_CHA;
+
+ if (hsuc->direction == DMA_MEM_TO_DEV)
+ cr &= ~HSU_CH_CR_CHD;
+ else if (hsuc->direction == DMA_DEV_TO_MEM)
+ cr |= HSU_CH_CR_CHD;
+
+ hsu_chan_writel(hsuc, HSU_CH_CR, cr);
+}
+
+static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
+{
+ struct dma_slave_config *config = &hsuc->config;
+ struct hsu_dma_desc *desc = hsuc->desc;
+ u32 bsr = 0, mtsr = 0; /* to shut the compiler up */
+ u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
+ unsigned int i, count;
+
+ if (hsuc->direction == DMA_MEM_TO_DEV) {
+ bsr = config->dst_maxburst;
+ mtsr = config->dst_addr_width;
+ } else if (hsuc->direction == DMA_DEV_TO_MEM) {
+ bsr = config->src_maxburst;
+ mtsr = config->src_addr_width;
+ }
+
+ hsu_chan_disable(hsuc);
+
+ hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
+ hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
+ hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
+
+ /* Set descriptors */
+ count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC;
+ for (i = 0; i < count; i++) {
+ hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
+ hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
+
+ /* Prepare value for DCR */
+ dcr |= HSU_CH_DCR_DESCA(i);
+ dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */
+
+ desc->active++;
+ }
+ /* Only for the last descriptor in the chain */
+ dcr |= HSU_CH_DCR_CHSOD(count - 1);
+ dcr |= HSU_CH_DCR_CHDI(count - 1);
+
+ hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
+
+ hsu_chan_enable(hsuc);
+}
+
+static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ hsu_chan_disable(hsuc);
+ hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+}
+
+static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ hsu_dma_chan_start(hsuc);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+}
+
+static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
+{
+ struct virt_dma_desc *vdesc;
+
+ /* Get the next descriptor */
+ vdesc = vchan_next_desc(&hsuc->vchan);
+ if (!vdesc) {
+ hsuc->desc = NULL;
+ return;
+ }
+
+ list_del(&vdesc->node);
+ hsuc->desc = to_hsu_dma_desc(vdesc);
+
+ /* Start the channel with a new descriptor */
+ hsu_dma_start_channel(hsuc);
+}
+
+static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+ u32 sr;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+
+ return sr;
+}
+
+irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+{
+ struct hsu_dma_chan *hsuc;
+ struct hsu_dma_desc *desc;
+ unsigned long flags;
+ u32 sr;
+
+ /* Sanity check */
+ if (nr >= chip->pdata->nr_channels)
+ return IRQ_NONE;
+
+ hsuc = &chip->hsu->chan[nr];
+
+ /*
+ * No matter what situation, need read clear the IRQ status
+ * There is a bug, see Errata 5, HSD 2900918
+ */
+ sr = hsu_dma_chan_get_sr(hsuc);
+ if (!sr)
+ return IRQ_NONE;
+
+ /* Timeout IRQ, need wait some time, see Errata 2 */
+ if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY))
+ udelay(2);
+
+ sr &= ~HSU_CH_SR_DESCTO_ANY;
+ if (!sr)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ desc = hsuc->desc;
+ if (desc) {
+ if (sr & HSU_CH_SR_CHE) {
+ desc->status = DMA_ERROR;
+ } else if (desc->active < desc->nents) {
+ hsu_dma_start_channel(hsuc);
+ } else {
+ vchan_cookie_complete(&desc->vdesc);
+ desc->status = DMA_COMPLETE;
+ hsu_dma_start_transfer(hsuc);
+ }
+ }
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(hsu_dma_irq);
+
+static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
+{
+ struct hsu_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
+ if (!desc->sg) {
+ kfree(desc);
+ return NULL;
+ }
+
+ return desc;
+}
+
+static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
+
+ kfree(desc->sg);
+ kfree(desc);
+}
+
+static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ struct hsu_dma_desc *desc;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ desc = hsu_dma_alloc_desc(sg_len);
+ if (!desc)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ desc->sg[i].addr = sg_dma_address(sg);
+ desc->sg[i].len = sg_dma_len(sg);
+ }
+
+ desc->nents = sg_len;
+ desc->direction = direction;
+ /* desc->active = 0 by kzalloc */
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
+}
+
+static void hsu_dma_issue_pending(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
+ hsu_dma_start_transfer(hsuc);
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+}
+
+static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
+{
+ size_t bytes = 0;
+ unsigned int i;
+
+ for (i = desc->active; i < desc->nents; i++)
+ bytes += desc->sg[i].len;
+
+ return bytes;
+}
+
+static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
+{
+ struct hsu_dma_desc *desc = hsuc->desc;
+ size_t bytes = hsu_dma_desc_size(desc);
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ i = desc->active % HSU_DMA_CHAN_NR_DESC;
+ do {
+ bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
+ } while (--i >= 0);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+
+ return bytes;
+}
+
+static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ size_t bytes;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, state);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ vdesc = vchan_find_desc(&hsuc->vchan, cookie);
+ if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
+ bytes = hsu_dma_active_desc_size(hsuc);
+ dma_set_residue(state, bytes);
+ status = hsuc->desc->status;
+ } else if (vdesc) {
+ bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
+ dma_set_residue(state, bytes);
+ }
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ return status;
+}
+
+static int hsu_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+
+ /* Check if chan will be configured for slave transfers */
+ if (!is_slave_direction(config->direction))
+ return -EINVAL;
+
+ memcpy(&hsuc->config, config, sizeof(hsuc->config));
+
+ return 0;
+}
+
+static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ hsu_chan_disable(hsuc);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+}
+
+static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ hsu_chan_enable(hsuc);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+}
+
+static int hsu_dma_pause(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
+ hsu_dma_chan_deactivate(hsuc);
+ hsuc->desc->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ return 0;
+}
+
+static int hsu_dma_resume(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
+ hsuc->desc->status = DMA_IN_PROGRESS;
+ hsu_dma_chan_activate(hsuc);
+ }
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ return 0;
+}
+
+static int hsu_dma_terminate_all(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+
+ hsu_dma_stop_channel(hsuc);
+ hsuc->desc = NULL;
+
+ vchan_get_all_descriptors(&hsuc->vchan, &head);
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+ vchan_dma_desc_free_list(&hsuc->vchan, &head);
+
+ return 0;
+}
+
+static void hsu_dma_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+int hsu_dma_probe(struct hsu_dma_chip *chip)
+{
+ struct hsu_dma *hsu;
+ struct hsu_dma_platform_data *pdata = chip->pdata;
+ void __iomem *addr = chip->regs + chip->offset;
+ unsigned short i;
+ int ret;
+
+ hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
+ if (!hsu)
+ return -ENOMEM;
+
+ chip->hsu = hsu;
+
+ if (!pdata) {
+ pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ chip->pdata = pdata;
+
+ /* Guess nr_channels from the IO space length */
+ pdata->nr_channels = (chip->length - chip->offset) /
+ HSU_DMA_CHAN_LENGTH;
+ }
+
+ hsu->chan = devm_kcalloc(chip->dev, pdata->nr_channels,
+ sizeof(*hsu->chan), GFP_KERNEL);
+ if (!hsu->chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hsu->dma.channels);
+ for (i = 0; i < pdata->nr_channels; i++) {
+ struct hsu_dma_chan *hsuc = &hsu->chan[i];
+
+ hsuc->vchan.desc_free = hsu_dma_desc_free;
+ vchan_init(&hsuc->vchan, &hsu->dma);
+
+ hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
+
+ spin_lock_init(&hsuc->lock);
+ }
+
+ dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
+
+ hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
+
+ hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
+
+ hsu->dma.device_issue_pending = hsu_dma_issue_pending;
+ hsu->dma.device_tx_status = hsu_dma_tx_status;
+
+ hsu->dma.device_config = hsu_dma_slave_config;
+ hsu->dma.device_pause = hsu_dma_pause;
+ hsu->dma.device_resume = hsu_dma_resume;
+ hsu->dma.device_terminate_all = hsu_dma_terminate_all;
+
+ hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
+ hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
+ hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ hsu->dma.dev = chip->dev;
+
+ ret = dma_async_device_register(&hsu->dma);
+ if (ret)
+ return ret;
+
+ dev_info(chip->dev, "Found HSU DMA, %d channels\n", pdata->nr_channels);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hsu_dma_probe);
+
+int hsu_dma_remove(struct hsu_dma_chip *chip)
+{
+ struct hsu_dma *hsu = chip->hsu;
+ unsigned short i;
+
+ dma_async_device_unregister(&hsu->dma);
+
+ for (i = 0; i < chip->pdata->nr_channels; i++) {
+ struct hsu_dma_chan *hsuc = &hsu->chan[i];
+
+ tasklet_kill(&hsuc->vchan.task);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hsu_dma_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("High Speed UART DMA core driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
new file mode 100644
index 000000000000..0275233cf550
--- /dev/null
+++ b/drivers/dma/hsu/hsu.h
@@ -0,0 +1,118 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * Partially based on the bits found in drivers/tty/serial/mfd.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_HSU_H__
+#define __DMA_HSU_H__
+
+#include <linux/spinlock.h>
+#include <linux/dma/hsu.h>
+
+#include "../virt-dma.h"
+
+#define HSU_CH_SR 0x00 /* channel status */
+#define HSU_CH_CR 0x04 /* channel control */
+#define HSU_CH_DCR 0x08 /* descriptor control */
+#define HSU_CH_BSR 0x10 /* FIFO buffer size */
+#define HSU_CH_MTSR 0x14 /* minimum transfer size */
+#define HSU_CH_DxSAR(x) (0x20 + 8 * (x)) /* desc start addr */
+#define HSU_CH_DxTSR(x) (0x24 + 8 * (x)) /* desc transfer size */
+#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */
+#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */
+#define HSU_CH_D1SAR 0x28
+#define HSU_CH_D1TSR 0x2c
+#define HSU_CH_D2SAR 0x30
+#define HSU_CH_D2TSR 0x34
+#define HSU_CH_D3SAR 0x38
+#define HSU_CH_D3TSR 0x3c
+
+#define HSU_DMA_CHAN_NR_DESC 4
+#define HSU_DMA_CHAN_LENGTH 0x40
+
+/* Bits in HSU_CH_SR */
+#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
+#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+#define HSU_CH_SR_CHE BIT(15)
+
+/* Bits in HSU_CH_CR */
+#define HSU_CH_CR_CHA BIT(0)
+#define HSU_CH_CR_CHD BIT(1)
+
+/* Bits in HSU_CH_DCR */
+#define HSU_CH_DCR_DESCA(x) BIT(0 + (x))
+#define HSU_CH_DCR_CHSOD(x) BIT(8 + (x))
+#define HSU_CH_DCR_CHSOTO BIT(14)
+#define HSU_CH_DCR_CHSOE BIT(15)
+#define HSU_CH_DCR_CHDI(x) BIT(16 + (x))
+#define HSU_CH_DCR_CHEI BIT(23)
+#define HSU_CH_DCR_CHTOI(x) BIT(24 + (x))
+
+struct hsu_dma_sg {
+ dma_addr_t addr;
+ unsigned int len;
+};
+
+struct hsu_dma_desc {
+ struct virt_dma_desc vdesc;
+ enum dma_transfer_direction direction;
+ struct hsu_dma_sg *sg;
+ unsigned int nents;
+ unsigned int active;
+ enum dma_status status;
+};
+
+static inline struct hsu_dma_desc *to_hsu_dma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct hsu_dma_desc, vdesc);
+}
+
+struct hsu_dma_chan {
+ struct virt_dma_chan vchan;
+
+ void __iomem *reg;
+ spinlock_t lock;
+
+ /* hardware configuration */
+ enum dma_transfer_direction direction;
+ struct dma_slave_config config;
+
+ struct hsu_dma_desc *desc;
+};
+
+static inline struct hsu_dma_chan *to_hsu_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct hsu_dma_chan, vchan.chan);
+}
+
+static inline u32 hsu_chan_readl(struct hsu_dma_chan *hsuc, int offset)
+{
+ return readl(hsuc->reg + offset);
+}
+
+static inline void hsu_chan_writel(struct hsu_dma_chan *hsuc, int offset,
+ u32 value)
+{
+ writel(value, hsuc->reg + offset);
+}
+
+struct hsu_dma {
+ struct dma_device dma;
+
+ /* channels */
+ struct hsu_dma_chan *chan;
+};
+
+static inline struct hsu_dma *to_hsu_dma(struct dma_device *ddev)
+{
+ return container_of(ddev, struct hsu_dma, dma);
+}
+
+#endif /* __DMA_HSU_H__ */
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
new file mode 100644
index 000000000000..77879e6ddc4c
--- /dev/null
+++ b/drivers/dma/hsu/pci.c
@@ -0,0 +1,124 @@
+/*
+ * PCI driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * Partially based on the bits found in drivers/tty/serial/mfd.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "hsu.h"
+
+#define HSU_PCI_DMASR 0x00
+#define HSU_PCI_DMAISR 0x04
+
+#define HSU_PCI_CHAN_OFFSET 0x100
+
+static irqreturn_t hsu_pci_irq(int irq, void *dev)
+{
+ struct hsu_dma_chip *chip = dev;
+ u32 dmaisr;
+ unsigned short i;
+ irqreturn_t ret = IRQ_NONE;
+
+ dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
+ for (i = 0; i < chip->pdata->nr_channels; i++) {
+ if (dmaisr & 0x1)
+ ret |= hsu_dma_irq(chip, i);
+ dmaisr >>= 1;
+ }
+
+ return ret;
+}
+
+static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hsu_dma_chip *chip;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ return ret;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ chip->regs = pcim_iomap_table(pdev)[0];
+ chip->length = pci_resource_len(pdev, 0);
+ chip->offset = HSU_PCI_CHAN_OFFSET;
+ chip->irq = pdev->irq;
+
+ pci_enable_msi(pdev);
+
+ ret = hsu_dma_probe(chip);
+ if (ret)
+ return ret;
+
+ ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
+ if (ret)
+ goto err_register_irq;
+
+ pci_set_drvdata(pdev, chip);
+
+ return 0;
+
+err_register_irq:
+ hsu_dma_remove(chip);
+ return ret;
+}
+
+static void hsu_pci_remove(struct pci_dev *pdev)
+{
+ struct hsu_dma_chip *chip = pci_get_drvdata(pdev);
+
+ free_irq(chip->irq, chip);
+ hsu_dma_remove(chip);
+}
+
+static const struct pci_device_id hsu_pci_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x081e), 0 },
+ { PCI_VDEVICE(INTEL, 0x1192), 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, hsu_pci_id_table);
+
+static struct pci_driver hsu_pci_driver = {
+ .name = "hsu_dma_pci",
+ .id_table = hsu_pci_id_table,
+ .probe = hsu_pci_probe,
+ .remove = hsu_pci_remove,
+};
+
+module_pci_driver(hsu_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("High Speed UART DMA PCI driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index ed045a9ad634..9ca56830cc63 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -689,11 +689,6 @@ static int mdc_slave_config(struct dma_chan *chan,
return 0;
}
-static int mdc_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void mdc_free_chan_resources(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
@@ -910,7 +905,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
- mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
mdma->dma_dev.device_tx_status = mdc_tx_status;
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 66a0efb9651d..62bbd79338e0 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1260,6 +1260,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr)
@@ -1306,6 +1307,9 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
case 2:
sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
break;
+ case 3:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
+ break;
default:
dev_err(sdma->dev, "unknown firmware version\n");
goto err_firmware;
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 3b55bb8d969a..ea1e107ae884 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 940c1502a8b5..ee0aa9f4ccfa 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index d63f68b1aa35..30f5c7eede16 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 695483e6be32..69c7dfcad023 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 470292767e68..bf24ebe874b0 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 194ec20c9408..64790a45ef5d 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -15,10 +15,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 02177ecf09f8..a3e731edce57 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 5501eb072d69..76f0dc688a19 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 2f1cfa0f1f47..909352f74c89 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 263d9f6a207e..998826854fdd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
/*
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 6f7f43529ccb..647e362f01fd 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -313,11 +313,6 @@ static void k3_dma_tasklet(unsigned long arg)
}
}
-static int k3_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void k3_dma_free_chan_resources(struct dma_chan *chan)
{
struct k3_dma_chan *c = to_k3_chan(chan);
@@ -654,7 +649,7 @@ static void k3_dma_free_desc(struct virt_dma_desc *vd)
kfree(ds);
}
-static struct of_device_id k3_pdma_dt_ids[] = {
+static const struct of_device_id k3_pdma_dt_ids[] = {
{ .compatible = "hisilicon,k3-dma-1.0", },
{}
};
@@ -728,7 +723,6 @@ static int k3_dma_probe(struct platform_device *op)
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
d->slave.dev = &op->dev;
- d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources;
d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
d->slave.device_tx_status = k3_dma_tx_status;
d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index eb410044e1af..462a0229a743 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -973,7 +973,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
return 0;
}
-static struct of_device_id mmp_pdma_dt_ids[] = {
+static const struct of_device_id mmp_pdma_dt_ids[] = {
{ .compatible = "marvell,pdma-1.0", },
{}
};
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index b6f4e1fc9c78..449e785def17 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -613,7 +613,7 @@ struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
}
-static struct of_device_id mmp_tdma_dt_ids[] = {
+static const struct of_device_id mmp_tdma_dt_ids[] = {
{ .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
{ .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
{}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 57d2457545f3..e6281e7aa46e 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -21,10 +21,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
@@ -1072,7 +1068,7 @@ static int mpc_dma_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc_dma_match[] = {
+static const struct of_device_id mpc_dma_match[] = {
{ .compatible = "fsl,mpc5121-dma", },
{ .compatible = "fsl,mpc8308-dma", },
{},
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index b03e8137b918..1c56001df676 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -10,10 +10,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/init.h>
@@ -1249,7 +1245,7 @@ static int mv_xor_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id mv_xor_dt_ids[] = {
+static const struct of_device_id mv_xor_dt_ids[] = {
{ .compatible = "marvell,orion-xor", },
{},
};
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 78edc7e44569..91958dba39a2 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -9,10 +9,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef MV_XOR_H
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index ca31f1b45366..cbd4a8aff120 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -194,6 +194,7 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
return ERR_PTR(ret_no_channel);
}
+EXPORT_SYMBOL_GPL(of_dma_request_slave_channel);
/**
* of_dma_simple_xlate - Simple DMA engine translation function
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 35c143cb88da..b859792dde95 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -949,6 +949,7 @@ err_free_res:
err_disable_pdev:
pci_disable_device(pdev);
err_free_mem:
+ kfree(pd);
return err;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 0e1f56772855..a7d9d3029b14 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -556,7 +556,7 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAADDH;
buf[0] |= (da << 1);
- *((u16 *)&buf[1]) = val;
+ *((__le16 *)&buf[1]) = cpu_to_le16(val);
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
da == 1 ? "DA" : "SA", val);
@@ -710,7 +710,7 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAMOV;
buf[1] = dst;
- *((u32 *)&buf[2]) = val;
+ *((__le32 *)&buf[2]) = cpu_to_le32(val);
PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
@@ -888,7 +888,7 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
buf[1] = chan & 0x7;
- *((u32 *)&buf[2]) = addr;
+ *((__le32 *)&buf[2]) = cpu_to_le32(addr);
return SZ_DMAGO;
}
@@ -928,7 +928,7 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
}
writel(val, regs + DBGINST0);
- val = *((u32 *)&insn[2]);
+ val = le32_to_cpu(*((__le32 *)&insn[2]));
writel(val, regs + DBGINST1);
/* If timed out due to halted state-machine */
@@ -2162,7 +2162,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
* DMA transfer again. This pause feature was implemented to
* allow safely read residue before channel termination.
*/
-int pl330_pause(struct dma_chan *chan)
+static int pl330_pause(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
struct pl330_dmac *pl330 = pch->dmac;
@@ -2203,8 +2203,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}
-int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
- struct dma_pl330_desc *desc)
+static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
+ struct dma_pl330_desc *desc)
{
struct pl330_thread *thrd = pch->thread;
struct pl330_dmac *pl330 = pch->dmac;
@@ -2259,7 +2259,17 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
transferred = 0;
residual += desc->bytes_requested - transferred;
if (desc->txd.cookie == cookie) {
- ret = desc->status;
+ switch (desc->status) {
+ case DONE:
+ ret = DMA_COMPLETE;
+ break;
+ case PREP:
+ case BUSY:
+ ret = DMA_IN_PROGRESS;
+ break;
+ default:
+ WARN_ON(1);
+ }
break;
}
if (desc->last)
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index fa764a39cd36..9217f893b0d1 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -16,10 +16,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 9c914d625906..5a250cdc8376 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -171,6 +171,35 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = {
[BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 },
};
+static const struct reg_offset_data bam_v1_7_reg_info[] = {
+ [BAM_CTRL] = { 0x00000, 0x00, 0x00, 0x00 },
+ [BAM_REVISION] = { 0x01000, 0x00, 0x00, 0x00 },
+ [BAM_NUM_PIPES] = { 0x01008, 0x00, 0x00, 0x00 },
+ [BAM_DESC_CNT_TRSHLD] = { 0x00008, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS] = { 0x03010, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_MSK] = { 0x03014, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_STTS] = { 0x00014, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_CLR] = { 0x00018, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_EN] = { 0x0001C, 0x00, 0x00, 0x00 },
+ [BAM_CNFG_BITS] = { 0x0007C, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_EE] = { 0x03000, 0x00, 0x00, 0x1000 },
+ [BAM_IRQ_SRCS_MSK_EE] = { 0x03004, 0x00, 0x00, 0x1000 },
+ [BAM_P_CTRL] = { 0x13000, 0x1000, 0x00, 0x00 },
+ [BAM_P_RST] = { 0x13004, 0x1000, 0x00, 0x00 },
+ [BAM_P_HALT] = { 0x13008, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_STTS] = { 0x13010, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_CLR] = { 0x13014, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_EN] = { 0x13018, 0x1000, 0x00, 0x00 },
+ [BAM_P_EVNT_DEST_ADDR] = { 0x1382C, 0x00, 0x1000, 0x00 },
+ [BAM_P_EVNT_REG] = { 0x13818, 0x00, 0x1000, 0x00 },
+ [BAM_P_SW_OFSTS] = { 0x13800, 0x00, 0x1000, 0x00 },
+ [BAM_P_DATA_FIFO_ADDR] = { 0x13824, 0x00, 0x1000, 0x00 },
+ [BAM_P_DESC_FIFO_ADDR] = { 0x1381C, 0x00, 0x1000, 0x00 },
+ [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 },
+ [BAM_P_FIFO_SIZES] = { 0x13820, 0x00, 0x1000, 0x00 },
+};
+
/* BAM CTRL */
#define BAM_SW_RST BIT(0)
#define BAM_EN BIT(1)
@@ -1051,6 +1080,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
static const struct of_device_id bam_of_match[] = {
{ .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
{ .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
+ { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
{}
};
@@ -1113,7 +1143,7 @@ static int bam_dma_probe(struct platform_device *pdev)
if (!bdev->channels) {
ret = -ENOMEM;
- goto err_disable_clk;
+ goto err_tasklet_kill;
}
/* allocate and initialize channels */
@@ -1125,7 +1155,7 @@ static int bam_dma_probe(struct platform_device *pdev)
ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
IRQF_TRIGGER_HIGH, "bam_dma", bdev);
if (ret)
- goto err_disable_clk;
+ goto err_bam_channel_exit;
/* set max dma segment size */
bdev->common.dev = bdev->dev;
@@ -1133,7 +1163,7 @@ static int bam_dma_probe(struct platform_device *pdev)
ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
if (ret) {
dev_err(bdev->dev, "cannot set maximum segment size\n");
- goto err_disable_clk;
+ goto err_bam_channel_exit;
}
platform_set_drvdata(pdev, bdev);
@@ -1161,7 +1191,7 @@ static int bam_dma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&bdev->common);
if (ret) {
dev_err(bdev->dev, "failed to register dma async device\n");
- goto err_disable_clk;
+ goto err_bam_channel_exit;
}
ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
@@ -1173,8 +1203,14 @@ static int bam_dma_probe(struct platform_device *pdev)
err_unregister_dma:
dma_async_device_unregister(&bdev->common);
+err_bam_channel_exit:
+ for (i = 0; i < bdev->num_channels; i++)
+ tasklet_kill(&bdev->channels[i].vc.task);
+err_tasklet_kill:
+ tasklet_kill(&bdev->task);
err_disable_clk:
clk_disable_unprepare(bdev->bamclk);
+
return ret;
}
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 2f91da3db836..01dcaf21b988 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -749,11 +749,6 @@ unlock:
return ret;
}
-static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
{
/* Ensure all queued descriptors are freed */
@@ -1238,7 +1233,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
if (!s3cdma->phy_chans)
return -ENOMEM;
- /* aquire irqs and clocks for all physical channels */
+ /* acquire irqs and clocks for all physical channels */
for (i = 0; i < pdata->num_phy_channels; i++) {
struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
char clk_name[6];
@@ -1266,7 +1261,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
sprintf(clk_name, "dma.%d", i);
phy->clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(phy->clk) && sdata->has_clocks) {
- dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu",
+ dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
i, PTR_ERR(phy->clk));
continue;
}
@@ -1290,8 +1285,6 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
s3cdma->memcpy.dev = &pdev->dev;
- s3cdma->memcpy.device_alloc_chan_resources =
- s3c24xx_dma_alloc_chan_resources;
s3cdma->memcpy.device_free_chan_resources =
s3c24xx_dma_free_chan_resources;
s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
@@ -1305,8 +1298,6 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
s3cdma->slave.dev = &pdev->dev;
- s3cdma->slave.device_alloc_chan_resources =
- s3c24xx_dma_alloc_chan_resources;
s3cdma->slave.device_free_chan_resources =
s3c24xx_dma_free_chan_resources;
s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 5adf5407a8cb..43db255050d2 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -389,11 +389,6 @@ static void sa11x0_dma_tasklet(unsigned long arg)
}
-static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
{
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
@@ -835,7 +830,6 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
INIT_LIST_HEAD(&dmadev->channels);
dmadev->dev = dev;
- dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
dmadev->device_config = sa11x0_dma_device_config;
dmadev->device_pause = sa11x0_dma_device_pause;
@@ -948,6 +942,12 @@ static int sa11x0_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
+ d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
+ d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
if (ret) {
dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 8190ad225a1b..0f371524a4d9 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -51,12 +51,6 @@ config RCAR_HPB_DMAE
help
Enable support for the Renesas R-Car series DMA controllers.
-config RCAR_AUDMAC_PP
- tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support"
- depends on SH_DMAE_BASE
- help
- Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
-
config RCAR_DMAC
tristate "Renesas R-Car Gen2 DMA Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -64,3 +58,12 @@ config RCAR_DMAC
help
This driver supports the general purpose DMA controller found in the
Renesas R-Car second generation SoCs.
+
+config RENESAS_USB_DMAC
+ tristate "Renesas USB-DMA Controller"
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select RENESAS_DMA
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This driver supports the USB-DMA controller found in the Renesas
+ SoCs.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 2852f9db61a4..b8a598066ce2 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -15,5 +15,5 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_SUDMAC) += sudmac.o
obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
-obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
+obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
deleted file mode 100644
index d95bbdd721f4..000000000000
--- a/drivers/dma/sh/rcar-audmapp.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * This is for Renesas R-Car Audio-DMAC-peri-peri.
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * based on the drivers/dma/sh/shdma.c
- *
- * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
- * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
- * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/dmaengine.h>
-#include <linux/of_dma.h>
-#include <linux/platform_data/dma-rcar-audmapp.h>
-#include <linux/platform_device.h>
-#include <linux/shdma-base.h>
-
-/*
- * DMA register
- */
-#define PDMASAR 0x00
-#define PDMADAR 0x04
-#define PDMACHCR 0x0c
-
-/* PDMACHCR */
-#define PDMACHCR_DE (1 << 0)
-
-#define AUDMAPP_MAX_CHANNELS 29
-
-/* Default MEMCPY transfer size = 2^2 = 4 bytes */
-#define LOG2_DEFAULT_XFER_SIZE 2
-#define AUDMAPP_SLAVE_NUMBER 256
-#define AUDMAPP_LEN_MAX (16 * 1024 * 1024)
-
-struct audmapp_chan {
- struct shdma_chan shdma_chan;
- void __iomem *base;
- dma_addr_t slave_addr;
- u32 chcr;
-};
-
-struct audmapp_device {
- struct shdma_dev shdma_dev;
- struct audmapp_pdata *pdata;
- struct device *dev;
- void __iomem *chan_reg;
-};
-
-struct audmapp_desc {
- struct shdma_desc shdma_desc;
- dma_addr_t src;
- dma_addr_t dst;
-};
-
-#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
-
-#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
-#define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc)
-#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \
- struct audmapp_device, shdma_dev.dma_dev)
-
-static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg)
-{
- struct audmapp_device *audev = to_dev(auchan);
- struct device *dev = audev->dev;
-
- dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data);
-
- iowrite32(data, auchan->base + reg);
-}
-
-static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg)
-{
- return ioread32(auchan->base + reg);
-}
-
-static void audmapp_halt(struct shdma_chan *schan)
-{
- struct audmapp_chan *auchan = to_chan(schan);
- int i;
-
- audmapp_write(auchan, 0, PDMACHCR);
-
- for (i = 0; i < 1024; i++) {
- if (0 == audmapp_read(auchan, PDMACHCR))
- return;
- udelay(1);
- }
-}
-
-static void audmapp_start_xfer(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- struct audmapp_chan *auchan = to_chan(schan);
- struct audmapp_device *audev = to_dev(auchan);
- struct audmapp_desc *desc = to_desc(sdesc);
- struct device *dev = audev->dev;
- u32 chcr = auchan->chcr | PDMACHCR_DE;
-
- dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n",
- &desc->src, &desc->dst, chcr);
-
- audmapp_write(auchan, desc->src, PDMASAR);
- audmapp_write(auchan, desc->dst, PDMADAR);
- audmapp_write(auchan, chcr, PDMACHCR);
-}
-
-static int audmapp_get_config(struct audmapp_chan *auchan, int slave_id,
- u32 *chcr, dma_addr_t *dst)
-{
- struct audmapp_device *audev = to_dev(auchan);
- struct audmapp_pdata *pdata = audev->pdata;
- struct audmapp_slave_config *cfg;
- int i;
-
- *chcr = 0;
- *dst = 0;
-
- if (!pdata) { /* DT */
- *chcr = ((u32)slave_id) << 16;
- auchan->shdma_chan.slave_id = (slave_id) >> 8;
- return 0;
- }
-
- /* non-DT */
-
- if (slave_id >= AUDMAPP_SLAVE_NUMBER)
- return -ENXIO;
-
- for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
- if (cfg->slave_id == slave_id) {
- *chcr = cfg->chcr;
- *dst = cfg->dst;
- return 0;
- }
-
- return -ENXIO;
-}
-
-static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
- dma_addr_t slave_addr, bool try)
-{
- struct audmapp_chan *auchan = to_chan(schan);
- u32 chcr;
- dma_addr_t dst;
- int ret;
-
- ret = audmapp_get_config(auchan, slave_id, &chcr, &dst);
- if (ret < 0)
- return ret;
-
- if (try)
- return 0;
-
- auchan->chcr = chcr;
- auchan->slave_addr = slave_addr ? : dst;
-
- return 0;
-}
-
-static int audmapp_desc_setup(struct shdma_chan *schan,
- struct shdma_desc *sdesc,
- dma_addr_t src, dma_addr_t dst, size_t *len)
-{
- struct audmapp_desc *desc = to_desc(sdesc);
-
- if (*len > (size_t)AUDMAPP_LEN_MAX)
- *len = (size_t)AUDMAPP_LEN_MAX;
-
- desc->src = src;
- desc->dst = dst;
-
- return 0;
-}
-
-static void audmapp_setup_xfer(struct shdma_chan *schan,
- int slave_id)
-{
-}
-
-static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
-{
- struct audmapp_chan *auchan = to_chan(schan);
-
- return auchan->slave_addr;
-}
-
-static bool audmapp_channel_busy(struct shdma_chan *schan)
-{
- struct audmapp_chan *auchan = to_chan(schan);
- u32 chcr = audmapp_read(auchan, PDMACHCR);
-
- return chcr & ~PDMACHCR_DE;
-}
-
-static bool audmapp_desc_completed(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- return true;
-}
-
-static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
-{
- return &((struct audmapp_desc *)buf)[i].shdma_desc;
-}
-
-static const struct shdma_ops audmapp_shdma_ops = {
- .halt_channel = audmapp_halt,
- .desc_setup = audmapp_desc_setup,
- .set_slave = audmapp_set_slave,
- .start_xfer = audmapp_start_xfer,
- .embedded_desc = audmapp_embedded_desc,
- .setup_xfer = audmapp_setup_xfer,
- .slave_addr = audmapp_slave_addr,
- .channel_busy = audmapp_channel_busy,
- .desc_completed = audmapp_desc_completed,
-};
-
-static int audmapp_chan_probe(struct platform_device *pdev,
- struct audmapp_device *audev, int id)
-{
- struct shdma_dev *sdev = &audev->shdma_dev;
- struct audmapp_chan *auchan;
- struct shdma_chan *schan;
- struct device *dev = audev->dev;
-
- auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL);
- if (!auchan)
- return -ENOMEM;
-
- schan = &auchan->shdma_chan;
- schan->max_xfer_len = AUDMAPP_LEN_MAX;
-
- shdma_chan_probe(sdev, schan, id);
-
- auchan->base = audev->chan_reg + 0x20 + (0x10 * id);
- dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg);
-
- return 0;
-}
-
-static void audmapp_chan_remove(struct audmapp_device *audev)
-{
- struct shdma_chan *schan;
- int i;
-
- shdma_for_each_chan(schan, &audev->shdma_dev, i) {
- BUG_ON(!schan);
- shdma_chan_remove(schan);
- }
-}
-
-static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- dma_cap_mask_t mask;
- struct dma_chan *chan;
- u32 chcr = dma_spec->args[0];
-
- if (dma_spec->args_count != 1)
- return NULL;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- chan = dma_request_channel(mask, shdma_chan_filter, NULL);
- if (chan)
- to_shdma_chan(chan)->hw_req = chcr;
-
- return chan;
-}
-
-static int audmapp_probe(struct platform_device *pdev)
-{
- struct audmapp_pdata *pdata = pdev->dev.platform_data;
- struct device_node *np = pdev->dev.of_node;
- struct audmapp_device *audev;
- struct shdma_dev *sdev;
- struct dma_device *dma_dev;
- struct resource *res;
- int err, i;
-
- if (np)
- of_dma_controller_register(np, audmapp_of_xlate, pdev);
- else if (!pdata)
- return -ENODEV;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL);
- if (!audev)
- return -ENOMEM;
-
- audev->dev = &pdev->dev;
- audev->pdata = pdata;
- audev->chan_reg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(audev->chan_reg))
- return PTR_ERR(audev->chan_reg);
-
- sdev = &audev->shdma_dev;
- sdev->ops = &audmapp_shdma_ops;
- sdev->desc_size = sizeof(struct audmapp_desc);
-
- dma_dev = &sdev->dma_dev;
- dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
- dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
-
- err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS);
- if (err < 0)
- return err;
-
- platform_set_drvdata(pdev, audev);
-
- /* Create DMA Channel */
- for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) {
- err = audmapp_chan_probe(pdev, audev, i);
- if (err)
- goto chan_probe_err;
- }
-
- err = dma_async_device_register(dma_dev);
- if (err < 0)
- goto chan_probe_err;
-
- return err;
-
-chan_probe_err:
- audmapp_chan_remove(audev);
- shdma_cleanup(sdev);
-
- return err;
-}
-
-static int audmapp_remove(struct platform_device *pdev)
-{
- struct audmapp_device *audev = platform_get_drvdata(pdev);
- struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
-
- dma_async_device_unregister(dma_dev);
-
- audmapp_chan_remove(audev);
- shdma_cleanup(&audev->shdma_dev);
-
- return 0;
-}
-
-static const struct of_device_id audmapp_of_match[] = {
- { .compatible = "renesas,rcar-audmapp", },
- {},
-};
-
-static struct platform_driver audmapp_driver = {
- .probe = audmapp_probe,
- .remove = audmapp_remove,
- .driver = {
- .name = "rcar-audmapp-engine",
- .of_match_table = audmapp_of_match,
- },
-};
-module_platform_driver(audmapp_driver);
-
-MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
-MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 8ee383d339a5..10fcabad80f3 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -171,8 +171,7 @@ static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
return NULL;
}
-static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
- dma_addr_t slave_addr)
+static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
@@ -183,25 +182,23 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
ret = ops->set_slave(schan, match, slave_addr, true);
if (ret < 0)
return ret;
-
- slave_id = schan->slave_id;
} else {
- match = slave_id;
+ match = schan->real_slave_id;
}
- if (slave_id < 0 || slave_id >= slave_num)
+ if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
return -EINVAL;
- if (test_and_set_bit(slave_id, shdma_slave_used))
+ if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
return -EBUSY;
ret = ops->set_slave(schan, match, slave_addr, false);
if (ret < 0) {
- clear_bit(slave_id, shdma_slave_used);
+ clear_bit(schan->real_slave_id, shdma_slave_used);
return ret;
}
- schan->slave_id = slave_id;
+ schan->slave_id = schan->real_slave_id;
return 0;
}
@@ -221,10 +218,12 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
*/
if (slave) {
/* Legacy mode: .private is set in filter */
- ret = shdma_setup_slave(schan, slave->slave_id, 0);
+ schan->real_slave_id = slave->slave_id;
+ ret = shdma_setup_slave(schan, 0);
if (ret < 0)
goto esetslave;
} else {
+ /* Normal mode: real_slave_id was set by filter */
schan->slave_id = -EINVAL;
}
@@ -258,11 +257,14 @@ esetslave:
/*
* This is the standard shdma filter function to be used as a replacement to the
- * "old" method, using the .private pointer. If for some reason you allocate a
- * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
+ * "old" method, using the .private pointer.
+ * You always have to pass a valid slave id as the argument, old drivers that
+ * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
+ * need to be updated so we can remove the slave_id field from dma_slave_config.
* parameter. If this filter is used, the slave driver, after calling
* dma_request_channel(), will also have to call dmaengine_slave_config() with
- * .slave_id, .direction, and either .src_addr or .dst_addr set.
+ * .direction, and either .src_addr or .dst_addr set.
+ *
* NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
* capability! If this becomes a requirement, hardware glue drivers, using this
* services would have to provide their own filters, which first would check
@@ -276,7 +278,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
{
struct shdma_chan *schan;
struct shdma_dev *sdev;
- int match = (long)arg;
+ int slave_id = (long)arg;
int ret;
/* Only support channels handled by this driver. */
@@ -284,19 +286,39 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
shdma_alloc_chan_resources)
return false;
- if (match < 0)
+ schan = to_shdma_chan(chan);
+ sdev = to_shdma_dev(chan->device);
+
+ /*
+ * For DT, the schan->slave_id field is generated by the
+ * set_slave function from the slave ID that is passed in
+ * from xlate. For the non-DT case, the slave ID is
+ * directly passed into the filter function by the driver
+ */
+ if (schan->dev->of_node) {
+ ret = sdev->ops->set_slave(schan, slave_id, 0, true);
+ if (ret < 0)
+ return false;
+
+ schan->real_slave_id = schan->slave_id;
+ return true;
+ }
+
+ if (slave_id < 0) {
/* No slave requested - arbitrary channel */
+ dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
return true;
+ }
- schan = to_shdma_chan(chan);
- if (!schan->dev->of_node && match >= slave_num)
+ if (slave_id >= slave_num)
return false;
- sdev = to_shdma_dev(schan->dma_chan.device);
- ret = sdev->ops->set_slave(schan, match, 0, true);
+ ret = sdev->ops->set_slave(schan, slave_id, 0, true);
if (ret < 0)
return false;
+ schan->real_slave_id = slave_id;
+
return true;
}
EXPORT_SYMBOL(shdma_chan_filter);
@@ -452,6 +474,8 @@ static void shdma_free_chan_resources(struct dma_chan *chan)
chan->private = NULL;
}
+ schan->real_slave_id = 0;
+
spin_lock_irq(&schan->chan_lock);
list_splice_init(&schan->ld_free, &list);
@@ -764,11 +788,20 @@ static int shdma_config(struct dma_chan *chan,
*/
if (!config)
return -EINVAL;
+
+ /*
+ * overriding the slave_id through dma_slave_config is deprecated,
+ * but possibly some out-of-tree drivers still do it.
+ */
+ if (WARN_ON_ONCE(config->slave_id &&
+ config->slave_id != schan->real_slave_id))
+ schan->real_slave_id = config->slave_id;
+
/*
* We could lock this, but you shouldn't be configuring the
* channel, while using it...
*/
- return shdma_setup_slave(schan, config->slave_id,
+ return shdma_setup_slave(schan,
config->direction == DMA_DEV_TO_MEM ?
config->src_addr : config->dst_addr);
}
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 9f1d4c7dbab8..11707df1a689 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -443,7 +443,7 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
return ret;
}
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
static irqreturn_t sh_dmae_err(int irq, void *data)
{
struct sh_dmae_device *shdev = data;
@@ -689,7 +689,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
const struct sh_dmae_pdata *pdata;
unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int chan_irq[SH_DMAE_MAX_CHANNELS];
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
unsigned long irqflags = 0;
int errirq;
#endif
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
new file mode 100644
index 000000000000..ebd8a5f398b0
--- /dev/null
+++ b/drivers/dma/sh/usb-dmac.c
@@ -0,0 +1,912 @@
+/*
+ * Renesas USB DMA Controller Driver
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * based on rcar-dmac.c
+ * Copyright (C) 2014 Renesas Electronics Inc.
+ * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+/*
+ * struct usb_dmac_sg - Descriptor for a hardware transfer
+ * @mem_addr: memory address
+ * @size: transfer size in bytes
+ */
+struct usb_dmac_sg {
+ dma_addr_t mem_addr;
+ u32 size;
+};
+
+/*
+ * struct usb_dmac_desc - USB DMA Transfer Descriptor
+ * @vd: base virtual channel DMA transaction descriptor
+ * @direction: direction of the DMA transfer
+ * @sg_allocated_len: length of allocated sg
+ * @sg_len: length of sg
+ * @sg_index: index of sg
+ * @residue: residue after the DMAC completed a transfer
+ * @node: node for desc_got and desc_freed
+ * @done_cookie: cookie after the DMAC completed a transfer
+ * @sg: information for the transfer
+ */
+struct usb_dmac_desc {
+ struct virt_dma_desc vd;
+ enum dma_transfer_direction direction;
+ unsigned int sg_allocated_len;
+ unsigned int sg_len;
+ unsigned int sg_index;
+ u32 residue;
+ struct list_head node;
+ dma_cookie_t done_cookie;
+ struct usb_dmac_sg sg[0];
+};
+
+#define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd)
+
+/*
+ * struct usb_dmac_chan - USB DMA Controller Channel
+ * @vc: base virtual DMA channel object
+ * @iomem: channel I/O memory base
+ * @index: index of this channel in the controller
+ * @irq: irq number of this channel
+ * @desc: the current descriptor
+ * @descs_allocated: number of descriptors allocated
+ * @desc_got: got descriptors
+ * @desc_freed: freed descriptors after the DMAC completed a transfer
+ */
+struct usb_dmac_chan {
+ struct virt_dma_chan vc;
+ void __iomem *iomem;
+ unsigned int index;
+ int irq;
+ struct usb_dmac_desc *desc;
+ int descs_allocated;
+ struct list_head desc_got;
+ struct list_head desc_freed;
+};
+
+#define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan)
+
+/*
+ * struct usb_dmac - USB DMA Controller
+ * @engine: base DMA engine object
+ * @dev: the hardware device
+ * @iomem: remapped I/O memory base
+ * @n_channels: number of available channels
+ * @channels: array of DMAC channels
+ */
+struct usb_dmac {
+ struct dma_device engine;
+ struct device *dev;
+ void __iomem *iomem;
+
+ unsigned int n_channels;
+ struct usb_dmac_chan *channels;
+};
+
+#define to_usb_dmac(d) container_of(d, struct usb_dmac, engine)
+
+/* -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define USB_DMAC_CHAN_OFFSET(i) (0x20 + 0x20 * (i))
+
+#define USB_DMASWR 0x0008
+#define USB_DMASWR_SWR (1 << 0)
+#define USB_DMAOR 0x0060
+#define USB_DMAOR_AE (1 << 2)
+#define USB_DMAOR_DME (1 << 0)
+
+#define USB_DMASAR 0x0000
+#define USB_DMADAR 0x0004
+#define USB_DMATCR 0x0008
+#define USB_DMATCR_MASK 0x00ffffff
+#define USB_DMACHCR 0x0014
+#define USB_DMACHCR_FTE (1 << 24)
+#define USB_DMACHCR_NULLE (1 << 16)
+#define USB_DMACHCR_NULL (1 << 12)
+#define USB_DMACHCR_TS_8B ((0 << 7) | (0 << 6))
+#define USB_DMACHCR_TS_16B ((0 << 7) | (1 << 6))
+#define USB_DMACHCR_TS_32B ((1 << 7) | (0 << 6))
+#define USB_DMACHCR_IE (1 << 5)
+#define USB_DMACHCR_SP (1 << 2)
+#define USB_DMACHCR_TE (1 << 1)
+#define USB_DMACHCR_DE (1 << 0)
+#define USB_DMATEND 0x0018
+
+/* Hardcode the xfer_shift to 5 (32bytes) */
+#define USB_DMAC_XFER_SHIFT 5
+#define USB_DMAC_XFER_SIZE (1 << USB_DMAC_XFER_SHIFT)
+#define USB_DMAC_CHCR_TS USB_DMACHCR_TS_32B
+#define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES
+
+/* for descriptors */
+#define USB_DMAC_INITIAL_NR_DESC 16
+#define USB_DMAC_INITIAL_NR_SG 8
+
+/* -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data)
+{
+ writel(data, dmac->iomem + reg);
+}
+
+static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg)
+{
+ return readl(dmac->iomem + reg);
+}
+
+static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg)
+{
+ return readl(chan->iomem + reg);
+}
+
+static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data)
+{
+ writel(data, chan->iomem + reg);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization and configuration
+ */
+
+static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan)
+{
+ u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+
+ return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE;
+}
+
+static u32 usb_dmac_calc_tend(u32 size)
+{
+ /*
+ * Please refer to the Figure "Example of Final Transaction Valid
+ * Data Transfer Enable (EDTEN) Setting" in the data sheet.
+ */
+ return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? :
+ USB_DMAC_XFER_SIZE));
+}
+
+/* This function is already held by vc.lock */
+static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan,
+ unsigned int index)
+{
+ struct usb_dmac_desc *desc = chan->desc;
+ struct usb_dmac_sg *sg = desc->sg + index;
+ dma_addr_t src_addr = 0, dst_addr = 0;
+
+ WARN_ON_ONCE(usb_dmac_chan_is_busy(chan));
+
+ if (desc->direction == DMA_DEV_TO_MEM)
+ dst_addr = sg->mem_addr;
+ else
+ src_addr = sg->mem_addr;
+
+ dev_dbg(chan->vc.chan.device->dev,
+ "chan%u: queue sg %p: %u@%pad -> %pad\n",
+ chan->index, sg, sg->size, &src_addr, &dst_addr);
+
+ usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff);
+ usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff);
+ usb_dmac_chan_write(chan, USB_DMATCR,
+ DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE));
+ usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size));
+
+ usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS |
+ USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE);
+}
+
+/* This function is already held by vc.lock */
+static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ chan->desc = NULL;
+ return;
+ }
+
+ /*
+ * Remove this request from vc->desc_issued. Otherwise, this driver
+ * will get the previous value from vchan_next_desc() after a transfer
+ * was completed.
+ */
+ list_del(&vd->node);
+
+ chan->desc = to_usb_dmac_desc(vd);
+ chan->desc->sg_index = 0;
+ usb_dmac_chan_start_sg(chan, 0);
+}
+
+static int usb_dmac_init(struct usb_dmac *dmac)
+{
+ u16 dmaor;
+
+ /* Clear all channels and enable the DMAC globally. */
+ usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME);
+
+ dmaor = usb_dmac_read(dmac, USB_DMAOR);
+ if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) {
+ dev_warn(dmac->dev, "DMAOR initialization failed.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors allocation and free
+ */
+static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
+ gfp_t gfp)
+{
+ struct usb_dmac_desc *desc;
+ unsigned long flags;
+
+ desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->sg_allocated_len = sg_len;
+ INIT_LIST_HEAD(&desc->node);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ list_add_tail(&desc->node, &chan->desc_freed);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return 0;
+}
+
+static void usb_dmac_desc_free(struct usb_dmac_chan *chan)
+{
+ struct usb_dmac_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ list_splice_init(&chan->desc_freed, &list);
+ list_splice_init(&chan->desc_got, &list);
+
+ list_for_each_entry_safe(desc, _desc, &list, node) {
+ list_del(&desc->node);
+ kfree(desc);
+ }
+ chan->descs_allocated = 0;
+}
+
+static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
+ unsigned int sg_len, gfp_t gfp)
+{
+ struct usb_dmac_desc *desc = NULL;
+ unsigned long flags;
+
+ /* Get a freed descritpor */
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ list_for_each_entry(desc, &chan->desc_freed, node) {
+ if (sg_len <= desc->sg_allocated_len) {
+ list_move_tail(&desc->node, &chan->desc_got);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+ return desc;
+ }
+ }
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* Allocate a new descriptor */
+ if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) {
+ /* If allocated the desc, it was added to tail of the list */
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc,
+ node);
+ list_move_tail(&desc->node, &chan->desc_got);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+ return desc;
+ }
+
+ return NULL;
+}
+
+static void usb_dmac_desc_put(struct usb_dmac_chan *chan,
+ struct usb_dmac_desc *desc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ list_move_tail(&desc->node, &chan->desc_freed);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Stop and reset
+ */
+
+static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan)
+{
+ struct dma_chan *chan = &uchan->vc.chan;
+ struct usb_dmac *dmac = to_usb_dmac(chan->device);
+ int i;
+
+ /* Don't issue soft reset if any one of channels is busy */
+ for (i = 0; i < dmac->n_channels; ++i) {
+ if (usb_dmac_chan_is_busy(uchan))
+ return;
+ }
+
+ usb_dmac_write(dmac, USB_DMAOR, 0);
+ usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR);
+ udelay(100);
+ usb_dmac_write(dmac, USB_DMASWR, 0);
+ usb_dmac_write(dmac, USB_DMAOR, 1);
+}
+
+static void usb_dmac_chan_halt(struct usb_dmac_chan *chan)
+{
+ u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+
+ chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE);
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr);
+
+ usb_dmac_soft_reset(chan);
+}
+
+static void usb_dmac_stop(struct usb_dmac *dmac)
+{
+ usb_dmac_write(dmac, USB_DMAOR, 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int usb_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ int ret;
+
+ while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) {
+ ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG,
+ GFP_KERNEL);
+ if (ret < 0) {
+ usb_dmac_desc_free(uchan);
+ return ret;
+ }
+ uchan->descs_allocated++;
+ }
+
+ return pm_runtime_get_sync(chan->device->dev);
+}
+
+static void usb_dmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ unsigned long flags;
+
+ /* Protect against ISR */
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ usb_dmac_chan_halt(uchan);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+
+ usb_dmac_desc_free(uchan);
+ vchan_free_chan_resources(&uchan->vc);
+
+ pm_runtime_put(chan->device->dev);
+}
+
+static struct dma_async_tx_descriptor *
+usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long dma_flags, void *context)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ struct usb_dmac_desc *desc;
+ struct scatterlist *sg;
+ int i;
+
+ if (!sg_len) {
+ dev_warn(chan->device->dev,
+ "%s: bad parameter: len=%d\n", __func__, sg_len);
+ return NULL;
+ }
+
+ desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->direction = dir;
+ desc->sg_len = sg_len;
+ for_each_sg(sgl, sg, sg_len, i) {
+ desc->sg[i].mem_addr = sg_dma_address(sg);
+ desc->sg[i].size = sg_dma_len(sg);
+ }
+
+ return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
+}
+
+static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ struct usb_dmac_desc *desc;
+ unsigned long flags;
+ LIST_HEAD(head);
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ usb_dmac_chan_halt(uchan);
+ vchan_get_all_descriptors(&uchan->vc, &head);
+ if (uchan->desc)
+ uchan->desc = NULL;
+ list_splice_init(&uchan->desc_got, &list);
+ list_for_each_entry(desc, &list, node)
+ list_move_tail(&desc->node, &uchan->desc_freed);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&uchan->vc, &head);
+
+ return 0;
+}
+
+static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
+ struct usb_dmac_desc *desc,
+ int sg_index)
+{
+ struct usb_dmac_sg *sg = desc->sg + sg_index;
+ u32 mem_addr = sg->mem_addr & 0xffffffff;
+ unsigned int residue = sg->size;
+
+ /*
+ * We cannot use USB_DMATCR to calculate residue because USB_DMATCR
+ * has unsuited value to calculate.
+ */
+ if (desc->direction == DMA_DEV_TO_MEM)
+ residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr;
+ else
+ residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr;
+
+ return residue;
+}
+
+static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct usb_dmac_desc *desc;
+ u32 residue = 0;
+
+ list_for_each_entry_reverse(desc, &chan->desc_freed, node) {
+ if (desc->done_cookie == cookie) {
+ residue = desc->residue;
+ break;
+ }
+ }
+
+ return residue;
+}
+
+static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan,
+ dma_cookie_t cookie)
+{
+ u32 residue = 0;
+ struct virt_dma_desc *vd;
+ struct usb_dmac_desc *desc = chan->desc;
+ int i;
+
+ if (!desc) {
+ vd = vchan_find_desc(&chan->vc, cookie);
+ if (!vd)
+ return 0;
+ desc = to_usb_dmac_desc(vd);
+ }
+
+ /* Compute the size of all usb_dmac_sg still to be transferred */
+ for (i = desc->sg_index + 1; i < desc->sg_len; i++)
+ residue += desc->sg[i].size;
+
+ /* Add the residue for the current sg */
+ residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index);
+
+ return residue;
+}
+
+static enum dma_status usb_dmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ enum dma_status status;
+ unsigned int residue = 0;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ /* a client driver will get residue after DMA_COMPLETE */
+ if (!txstate)
+ return status;
+
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ if (status == DMA_COMPLETE)
+ residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie);
+ else
+ residue = usb_dmac_chan_get_residue(uchan, cookie);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+
+ dma_set_residue(txstate, residue);
+
+ return status;
+}
+
+static void usb_dmac_issue_pending(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ if (vchan_issue_pending(&uchan->vc) && !uchan->desc)
+ usb_dmac_chan_start_desc(uchan);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+}
+
+static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd)
+{
+ struct usb_dmac_desc *desc = to_usb_dmac_desc(vd);
+ struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan);
+
+ usb_dmac_desc_put(chan, desc);
+}
+
+/* -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
+{
+ struct usb_dmac_desc *desc = chan->desc;
+
+ BUG_ON(!desc);
+
+ if (++desc->sg_index < desc->sg_len) {
+ usb_dmac_chan_start_sg(chan, desc->sg_index);
+ } else {
+ desc->residue = usb_dmac_get_current_residue(chan, desc,
+ desc->sg_index - 1);
+ desc->done_cookie = desc->vd.tx.cookie;
+ vchan_cookie_complete(&desc->vd);
+
+ /* Restart the next transfer if this driver has a next desc */
+ usb_dmac_chan_start_desc(chan);
+ }
+}
+
+static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
+{
+ struct usb_dmac_chan *chan = dev;
+ irqreturn_t ret = IRQ_NONE;
+ u32 mask = USB_DMACHCR_TE;
+ u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
+ u32 chcr;
+
+ spin_lock(&chan->vc.lock);
+
+ chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+ if (chcr & check_bits)
+ mask |= USB_DMACHCR_DE | check_bits;
+ if (chcr & USB_DMACHCR_NULL) {
+ /* An interruption of TE will happen after we set FTE */
+ mask |= USB_DMACHCR_NULL;
+ chcr |= USB_DMACHCR_FTE;
+ ret |= IRQ_HANDLED;
+ }
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+
+ if (chcr & check_bits) {
+ usb_dmac_isr_transfer_end(chan);
+ ret |= IRQ_HANDLED;
+ }
+
+ spin_unlock(&chan->vc.lock);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ struct of_phandle_args *dma_spec = arg;
+
+ if (dma_spec->np != chan->device->dev->of_node)
+ return false;
+
+ /* USB-DMAC should be used with fixed usb controller's FIFO */
+ if (uchan->index != dma_spec->args[0])
+ return false;
+
+ return true;
+}
+
+static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct usb_dmac_chan *uchan;
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec);
+ if (!chan)
+ return NULL;
+
+ uchan = to_usb_dmac_chan(chan);
+
+ return chan;
+}
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#ifdef CONFIG_PM
+static int usb_dmac_runtime_suspend(struct device *dev)
+{
+ struct usb_dmac *dmac = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < dmac->n_channels; ++i)
+ usb_dmac_chan_halt(&dmac->channels[i]);
+
+ return 0;
+}
+
+static int usb_dmac_runtime_resume(struct device *dev)
+{
+ struct usb_dmac *dmac = dev_get_drvdata(dev);
+
+ return usb_dmac_init(dmac);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops usb_dmac_pm = {
+ SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
+ NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int usb_dmac_chan_probe(struct usb_dmac *dmac,
+ struct usb_dmac_chan *uchan,
+ unsigned int index)
+{
+ struct platform_device *pdev = to_platform_device(dmac->dev);
+ char pdev_irqname[5];
+ char *irqname;
+ int ret;
+
+ uchan->index = index;
+ uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
+
+ /* Request the channel interrupt. */
+ sprintf(pdev_irqname, "ch%u", index);
+ uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (uchan->irq < 0) {
+ dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
+ return -ENODEV;
+ }
+
+ irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+ dev_name(dmac->dev), index);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel,
+ IRQF_SHARED, irqname, uchan);
+ if (ret) {
+ dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
+ uchan->irq, ret);
+ return ret;
+ }
+
+ uchan->vc.desc_free = usb_dmac_virt_desc_free;
+ vchan_init(&uchan->vc, &dmac->engine);
+ INIT_LIST_HEAD(&uchan->desc_freed);
+ INIT_LIST_HEAD(&uchan->desc_got);
+
+ return 0;
+}
+
+static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+ if (ret < 0) {
+ dev_err(dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
+ dev_err(dev, "invalid number of channels %u\n",
+ dmac->n_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int usb_dmac_probe(struct platform_device *pdev)
+{
+ const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
+ struct dma_device *engine;
+ struct usb_dmac *dmac;
+ struct resource *mem;
+ unsigned int i;
+ int ret;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dmac);
+
+ ret = usb_dmac_parse_of(&pdev->dev, dmac);
+ if (ret < 0)
+ return ret;
+
+ dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+ sizeof(*dmac->channels), GFP_KERNEL);
+ if (!dmac->channels)
+ return -ENOMEM;
+
+ /* Request resources. */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dmac->iomem))
+ return PTR_ERR(dmac->iomem);
+
+ /* Enable runtime PM and initialize the device. */
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = usb_dmac_init(dmac);
+ pm_runtime_put(&pdev->dev);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset device\n");
+ goto error;
+ }
+
+ /* Initialize the channels. */
+ INIT_LIST_HEAD(&dmac->engine.channels);
+
+ for (i = 0; i < dmac->n_channels; ++i) {
+ ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Register the DMAC as a DMA provider for DT. */
+ ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate,
+ NULL);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * Register the DMA engine device.
+ *
+ * Default transfer size of 32 bytes requires 32-byte alignment.
+ */
+ engine = &dmac->engine;
+ dma_cap_set(DMA_SLAVE, engine->cap_mask);
+
+ engine->dev = &pdev->dev;
+
+ engine->src_addr_widths = widths;
+ engine->dst_addr_widths = widths;
+ engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources;
+ engine->device_free_chan_resources = usb_dmac_free_chan_resources;
+ engine->device_prep_slave_sg = usb_dmac_prep_slave_sg;
+ engine->device_terminate_all = usb_dmac_chan_terminate_all;
+ engine->device_tx_status = usb_dmac_tx_status;
+ engine->device_issue_pending = usb_dmac_issue_pending;
+
+ ret = dma_async_device_register(engine);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ of_dma_controller_free(pdev->dev.of_node);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static void usb_dmac_chan_remove(struct usb_dmac *dmac,
+ struct usb_dmac_chan *uchan)
+{
+ usb_dmac_chan_halt(uchan);
+ devm_free_irq(dmac->dev, uchan->irq, uchan);
+}
+
+static int usb_dmac_remove(struct platform_device *pdev)
+{
+ struct usb_dmac *dmac = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < dmac->n_channels; ++i)
+ usb_dmac_chan_remove(dmac, &dmac->channels[i]);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&dmac->engine);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static void usb_dmac_shutdown(struct platform_device *pdev)
+{
+ struct usb_dmac *dmac = platform_get_drvdata(pdev);
+
+ usb_dmac_stop(dmac);
+}
+
+static const struct of_device_id usb_dmac_of_ids[] = {
+ { .compatible = "renesas,usb-dmac", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
+
+static struct platform_driver usb_dmac_driver = {
+ .driver = {
+ .pm = &usb_dmac_pm,
+ .name = "usb-dmac",
+ .of_match_table = usb_dmac_of_ids,
+ },
+ .probe = usb_dmac_probe,
+ .remove = usb_dmac_remove,
+ .shutdown = usb_dmac_shutdown,
+};
+
+module_platform_driver(usb_dmac_driver);
+
+MODULE_DESCRIPTION("Renesas USB DMA Controller Driver");
+MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d0086e9f2082..a1afda43b8ef 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -896,7 +896,7 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
};
-static struct of_device_id sirfsoc_dma_match[] = {
+static const struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,prima2-dmac", },
{ .compatible = "sirf,marco-dmac", },
{},
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1332b1d4d541..3c10f034d4b9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2514,7 +2514,8 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
sg_dma_len(&dst_sg) = size;
sg_dma_len(&src_sg) = size;
- return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
+ return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
+ DMA_MEM_TO_MEM, dma_flags);
}
static struct dma_async_tx_descriptor *
@@ -2526,7 +2527,8 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
if (dst_nents != src_nents)
return NULL;
- return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
+ return d40_prep_sg(chan, src_sg, dst_sg, src_nents,
+ DMA_MEM_TO_MEM, dma_flags);
}
static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 7ebcf9bec698..11e536586812 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -796,11 +796,6 @@ static void sun6i_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&vchan->vc.lock, flags);
}
-static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void sun6i_dma_free_chan_resources(struct dma_chan *chan)
{
struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
@@ -896,7 +891,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
.nr_max_vchans = 37,
};
-static struct of_device_id sun6i_dma_match[] = {
+static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
{ .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
{ /* sentinel */ }
@@ -957,7 +952,6 @@ static int sun6i_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
INIT_LIST_HEAD(&sdc->slave.channels);
- sdc->slave.device_alloc_chan_resources = sun6i_dma_alloc_chan_resources;
sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources;
sdc->slave.device_tx_status = sun6i_dma_tx_status;
sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
new file mode 100755
index 000000000000..f52e37502254
--- /dev/null
+++ b/drivers/dma/xgene-dma.c
@@ -0,0 +1,2089 @@
+/*
+ * Applied Micro X-Gene SoC DMA engine Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
+ * Loc Ho <lho@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * NOTE: PM support is currently not available.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include "dmaengine.h"
+
+/* X-Gene DMA ring csr registers and bit definations */
+#define XGENE_DMA_RING_CONFIG 0x04
+#define XGENE_DMA_RING_ENABLE BIT(31)
+#define XGENE_DMA_RING_ID 0x08
+#define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31))
+#define XGENE_DMA_RING_ID_BUF 0x0C
+#define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21))
+#define XGENE_DMA_RING_THRESLD0_SET1 0x30
+#define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64
+#define XGENE_DMA_RING_THRESLD1_SET1 0x34
+#define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8
+#define XGENE_DMA_RING_HYSTERESIS 0x68
+#define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF
+#define XGENE_DMA_RING_STATE 0x6C
+#define XGENE_DMA_RING_STATE_WR_BASE 0x70
+#define XGENE_DMA_RING_NE_INT_MODE 0x017C
+#define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \
+ ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
+#define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \
+ ((m) &= (~BIT(31 - (v))))
+#define XGENE_DMA_RING_CLKEN 0xC208
+#define XGENE_DMA_RING_SRST 0xC200
+#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
+#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
+#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
+#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
+#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
+#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
+#define XGENE_DMA_RING_CMD_OFFSET 0x2C
+#define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6)
+#define XGENE_DMA_RING_COHERENT_SET(m) \
+ (((u32 *)(m))[2] |= BIT(4))
+#define XGENE_DMA_RING_ADDRL_SET(m, v) \
+ (((u32 *)(m))[2] |= (((v) >> 8) << 5))
+#define XGENE_DMA_RING_ADDRH_SET(m, v) \
+ (((u32 *)(m))[3] |= ((v) >> 35))
+#define XGENE_DMA_RING_ACCEPTLERR_SET(m) \
+ (((u32 *)(m))[3] |= BIT(19))
+#define XGENE_DMA_RING_SIZE_SET(m, v) \
+ (((u32 *)(m))[3] |= ((v) << 23))
+#define XGENE_DMA_RING_RECOMBBUF_SET(m) \
+ (((u32 *)(m))[3] |= BIT(27))
+#define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \
+ (((u32 *)(m))[3] |= (0x7 << 28))
+#define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \
+ (((u32 *)(m))[4] |= 0x3)
+#define XGENE_DMA_RING_SELTHRSH_SET(m) \
+ (((u32 *)(m))[4] |= BIT(3))
+#define XGENE_DMA_RING_TYPE_SET(m, v) \
+ (((u32 *)(m))[4] |= ((v) << 19))
+
+/* X-Gene DMA device csr registers and bit definitions */
+#define XGENE_DMA_IPBRR 0x0
+#define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF)
+#define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3)
+#define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3)
+#define XGENE_DMA_GCR 0x10
+#define XGENE_DMA_CH_SETUP(v) \
+ ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
+#define XGENE_DMA_ENABLE(v) ((v) |= BIT(31))
+#define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31))
+#define XGENE_DMA_RAID6_CONT 0x14
+#define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24)
+#define XGENE_DMA_INT 0x70
+#define XGENE_DMA_INT_MASK 0x74
+#define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF
+#define XGENE_DMA_INT_ALL_UNMASK 0x0
+#define XGENE_DMA_INT_MASK_SHIFT 0x14
+#define XGENE_DMA_RING_INT0_MASK 0x90A0
+#define XGENE_DMA_RING_INT1_MASK 0x90A8
+#define XGENE_DMA_RING_INT2_MASK 0x90B0
+#define XGENE_DMA_RING_INT3_MASK 0x90B8
+#define XGENE_DMA_RING_INT4_MASK 0x90C0
+#define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0
+#define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF
+#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
+#define XGENE_DMA_BLK_MEM_RDY 0xD074
+#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
+
+/* X-Gene SoC EFUSE csr register and bit defination */
+#define XGENE_SOC_JTAG1_SHADOW 0x18
+#define XGENE_DMA_PQ_DISABLE_MASK BIT(13)
+
+/* X-Gene DMA Descriptor format */
+#define XGENE_DMA_DESC_NV_BIT BIT_ULL(50)
+#define XGENE_DMA_DESC_IN_BIT BIT_ULL(55)
+#define XGENE_DMA_DESC_C_BIT BIT_ULL(63)
+#define XGENE_DMA_DESC_DR_BIT BIT_ULL(61)
+#define XGENE_DMA_DESC_ELERR_POS 46
+#define XGENE_DMA_DESC_RTYPE_POS 56
+#define XGENE_DMA_DESC_LERR_POS 60
+#define XGENE_DMA_DESC_FLYBY_POS 4
+#define XGENE_DMA_DESC_BUFLEN_POS 48
+#define XGENE_DMA_DESC_HOENQ_NUM_POS 48
+
+#define XGENE_DMA_DESC_NV_SET(m) \
+ (((u64 *)(m))[0] |= XGENE_DMA_DESC_NV_BIT)
+#define XGENE_DMA_DESC_IN_SET(m) \
+ (((u64 *)(m))[0] |= XGENE_DMA_DESC_IN_BIT)
+#define XGENE_DMA_DESC_RTYPE_SET(m, v) \
+ (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_RTYPE_POS))
+#define XGENE_DMA_DESC_BUFADDR_SET(m, v) \
+ (((u64 *)(m))[0] |= (v))
+#define XGENE_DMA_DESC_BUFLEN_SET(m, v) \
+ (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_BUFLEN_POS))
+#define XGENE_DMA_DESC_C_SET(m) \
+ (((u64 *)(m))[1] |= XGENE_DMA_DESC_C_BIT)
+#define XGENE_DMA_DESC_FLYBY_SET(m, v) \
+ (((u64 *)(m))[2] |= ((v) << XGENE_DMA_DESC_FLYBY_POS))
+#define XGENE_DMA_DESC_MULTI_SET(m, v, i) \
+ (((u64 *)(m))[2] |= ((u64)(v) << (((i) + 1) * 8)))
+#define XGENE_DMA_DESC_DR_SET(m) \
+ (((u64 *)(m))[2] |= XGENE_DMA_DESC_DR_BIT)
+#define XGENE_DMA_DESC_DST_ADDR_SET(m, v) \
+ (((u64 *)(m))[3] |= (v))
+#define XGENE_DMA_DESC_H0ENQ_NUM_SET(m, v) \
+ (((u64 *)(m))[3] |= ((u64)(v) << XGENE_DMA_DESC_HOENQ_NUM_POS))
+#define XGENE_DMA_DESC_ELERR_RD(m) \
+ (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
+#define XGENE_DMA_DESC_LERR_RD(m) \
+ (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
+#define XGENE_DMA_DESC_STATUS(elerr, lerr) \
+ (((elerr) << 4) | (lerr))
+
+/* X-Gene DMA descriptor empty s/w signature */
+#define XGENE_DMA_DESC_EMPTY_INDEX 0
+#define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL
+#define XGENE_DMA_DESC_SET_EMPTY(m) \
+ (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] = \
+ XGENE_DMA_DESC_EMPTY_SIGNATURE)
+#define XGENE_DMA_DESC_IS_EMPTY(m) \
+ (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] == \
+ XGENE_DMA_DESC_EMPTY_SIGNATURE)
+
+/* X-Gene DMA configurable parameters defines */
+#define XGENE_DMA_RING_NUM 512
+#define XGENE_DMA_BUFNUM 0x0
+#define XGENE_DMA_CPU_BUFNUM 0x18
+#define XGENE_DMA_RING_OWNER_DMA 0x03
+#define XGENE_DMA_RING_OWNER_CPU 0x0F
+#define XGENE_DMA_RING_TYPE_REGULAR 0x01
+#define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */
+#define XGENE_DMA_RING_NUM_CONFIG 5
+#define XGENE_DMA_MAX_CHANNEL 4
+#define XGENE_DMA_XOR_CHANNEL 0
+#define XGENE_DMA_PQ_CHANNEL 1
+#define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */
+#define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
+#define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */
+#define XGENE_DMA_MAX_XOR_SRC 5
+#define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
+#define XGENE_DMA_INVALID_LEN_CODE 0x7800
+
+/* X-Gene DMA descriptor error codes */
+#define ERR_DESC_AXI 0x01
+#define ERR_BAD_DESC 0x02
+#define ERR_READ_DATA_AXI 0x03
+#define ERR_WRITE_DATA_AXI 0x04
+#define ERR_FBP_TIMEOUT 0x05
+#define ERR_ECC 0x06
+#define ERR_DIFF_SIZE 0x08
+#define ERR_SCT_GAT_LEN 0x09
+#define ERR_CRC_ERR 0x11
+#define ERR_CHKSUM 0x12
+#define ERR_DIF 0x13
+
+/* X-Gene DMA error interrupt codes */
+#define ERR_DIF_SIZE_INT 0x0
+#define ERR_GS_ERR_INT 0x1
+#define ERR_FPB_TIMEO_INT 0x2
+#define ERR_WFIFO_OVF_INT 0x3
+#define ERR_RFIFO_OVF_INT 0x4
+#define ERR_WR_TIMEO_INT 0x5
+#define ERR_RD_TIMEO_INT 0x6
+#define ERR_WR_ERR_INT 0x7
+#define ERR_RD_ERR_INT 0x8
+#define ERR_BAD_DESC_INT 0x9
+#define ERR_DESC_DST_INT 0xA
+#define ERR_DESC_SRC_INT 0xB
+
+/* X-Gene DMA flyby operation code */
+#define FLYBY_2SRC_XOR 0x8
+#define FLYBY_3SRC_XOR 0x9
+#define FLYBY_4SRC_XOR 0xA
+#define FLYBY_5SRC_XOR 0xB
+
+/* X-Gene DMA SW descriptor flags */
+#define XGENE_DMA_FLAG_64B_DESC BIT(0)
+
+/* Define to dump X-Gene DMA descriptor */
+#define XGENE_DMA_DESC_DUMP(desc, m) \
+ print_hex_dump(KERN_ERR, (m), \
+ DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
+
+#define to_dma_desc_sw(tx) \
+ container_of(tx, struct xgene_dma_desc_sw, tx)
+#define to_dma_chan(dchan) \
+ container_of(dchan, struct xgene_dma_chan, dma_chan)
+
+#define chan_dbg(chan, fmt, arg...) \
+ dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
+#define chan_err(chan, fmt, arg...) \
+ dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
+
+struct xgene_dma_desc_hw {
+ u64 m0;
+ u64 m1;
+ u64 m2;
+ u64 m3;
+};
+
+enum xgene_dma_ring_cfgsize {
+ XGENE_DMA_RING_CFG_SIZE_512B,
+ XGENE_DMA_RING_CFG_SIZE_2KB,
+ XGENE_DMA_RING_CFG_SIZE_16KB,
+ XGENE_DMA_RING_CFG_SIZE_64KB,
+ XGENE_DMA_RING_CFG_SIZE_512KB,
+ XGENE_DMA_RING_CFG_SIZE_INVALID
+};
+
+struct xgene_dma_ring {
+ struct xgene_dma *pdma;
+ u8 buf_num;
+ u16 id;
+ u16 num;
+ u16 head;
+ u16 owner;
+ u16 slots;
+ u16 dst_ring_num;
+ u32 size;
+ void __iomem *cmd;
+ void __iomem *cmd_base;
+ dma_addr_t desc_paddr;
+ u32 state[XGENE_DMA_RING_NUM_CONFIG];
+ enum xgene_dma_ring_cfgsize cfgsize;
+ union {
+ void *desc_vaddr;
+ struct xgene_dma_desc_hw *desc_hw;
+ };
+};
+
+struct xgene_dma_desc_sw {
+ struct xgene_dma_desc_hw desc1;
+ struct xgene_dma_desc_hw desc2;
+ u32 flags;
+ struct list_head node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor tx;
+};
+
+/**
+ * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
+ * @dma_chan: dmaengine channel object member
+ * @pdma: X-Gene DMA device structure reference
+ * @dev: struct device reference for dma mapping api
+ * @id: raw id of this channel
+ * @rx_irq: channel IRQ
+ * @name: name of X-Gene DMA channel
+ * @lock: serializes enqueue/dequeue operations to the descriptor pool
+ * @pending: number of transaction request pushed to DMA controller for
+ * execution, but still waiting for completion,
+ * @max_outstanding: max number of outstanding request we can push to channel
+ * @ld_pending: descriptors which are queued to run, but have not yet been
+ * submitted to the hardware for execution
+ * @ld_running: descriptors which are currently being executing by the hardware
+ * @ld_completed: descriptors which have finished execution by the hardware.
+ * These descriptors have already had their cleanup actions run. They
+ * are waiting for the ACK bit to be set by the async tx API.
+ * @desc_pool: descriptor pool for DMA operations
+ * @tasklet: bottom half where all completed descriptors cleans
+ * @tx_ring: transmit ring descriptor that we use to prepare actual
+ * descriptors for further executions
+ * @rx_ring: receive ring descriptor that we use to get completed DMA
+ * descriptors during cleanup time
+ */
+struct xgene_dma_chan {
+ struct dma_chan dma_chan;
+ struct xgene_dma *pdma;
+ struct device *dev;
+ int id;
+ int rx_irq;
+ char name[10];
+ spinlock_t lock;
+ int pending;
+ int max_outstanding;
+ struct list_head ld_pending;
+ struct list_head ld_running;
+ struct list_head ld_completed;
+ struct dma_pool *desc_pool;
+ struct tasklet_struct tasklet;
+ struct xgene_dma_ring tx_ring;
+ struct xgene_dma_ring rx_ring;
+};
+
+/**
+ * struct xgene_dma - internal representation of an X-Gene DMA device
+ * @err_irq: DMA error irq number
+ * @ring_num: start id number for DMA ring
+ * @csr_dma: base for DMA register access
+ * @csr_ring: base for DMA ring register access
+ * @csr_ring_cmd: base for DMA ring command register access
+ * @csr_efuse: base for efuse register access
+ * @dma_dev: embedded struct dma_device
+ * @chan: reference to X-Gene DMA channels
+ */
+struct xgene_dma {
+ struct device *dev;
+ struct clk *clk;
+ int err_irq;
+ int ring_num;
+ void __iomem *csr_dma;
+ void __iomem *csr_ring;
+ void __iomem *csr_ring_cmd;
+ void __iomem *csr_efuse;
+ struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL];
+ struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL];
+};
+
+static const char * const xgene_dma_desc_err[] = {
+ [ERR_DESC_AXI] = "AXI error when reading src/dst link list",
+ [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc",
+ [ERR_READ_DATA_AXI] = "AXI error when reading data",
+ [ERR_WRITE_DATA_AXI] = "AXI error when writing data",
+ [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch",
+ [ERR_ECC] = "ECC double bit error",
+ [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result",
+ [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same",
+ [ERR_CRC_ERR] = "CRC error",
+ [ERR_CHKSUM] = "Checksum error",
+ [ERR_DIF] = "DIF error",
+};
+
+static const char * const xgene_dma_err[] = {
+ [ERR_DIF_SIZE_INT] = "DIF size error",
+ [ERR_GS_ERR_INT] = "Gather scatter not same size error",
+ [ERR_FPB_TIMEO_INT] = "Free pool time out error",
+ [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error",
+ [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error",
+ [ERR_WR_TIMEO_INT] = "Write time out error",
+ [ERR_RD_TIMEO_INT] = "Read time out error",
+ [ERR_WR_ERR_INT] = "HBF bus write error",
+ [ERR_RD_ERR_INT] = "HBF bus read error",
+ [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error",
+ [ERR_DESC_DST_INT] = "HFB reading dst link address error",
+ [ERR_DESC_SRC_INT] = "HFB reading src link address error",
+};
+
+static bool is_pq_enabled(struct xgene_dma *pdma)
+{
+ u32 val;
+
+ val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW);
+ return !(val & XGENE_DMA_PQ_DISABLE_MASK);
+}
+
+static void xgene_dma_cpu_to_le64(u64 *desc, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ desc[i] = cpu_to_le64(desc[i]);
+}
+
+static u16 xgene_dma_encode_len(u32 len)
+{
+ return (len < XGENE_DMA_MAX_BYTE_CNT) ?
+ len : XGENE_DMA_16K_BUFFER_LEN_CODE;
+}
+
+static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
+{
+ static u8 flyby_type[] = {
+ FLYBY_2SRC_XOR, /* Dummy */
+ FLYBY_2SRC_XOR, /* Dummy */
+ FLYBY_2SRC_XOR,
+ FLYBY_3SRC_XOR,
+ FLYBY_4SRC_XOR,
+ FLYBY_5SRC_XOR
+ };
+
+ return flyby_type[src_cnt];
+}
+
+static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state = ioread32(&cmd_base[1]);
+
+ return XGENE_DMA_RING_DESC_CNT(ring_state);
+}
+
+static void xgene_dma_set_src_buffer(void *ext8, size_t *len,
+ dma_addr_t *paddr)
+{
+ size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ?
+ *len : XGENE_DMA_MAX_BYTE_CNT;
+
+ XGENE_DMA_DESC_BUFADDR_SET(ext8, *paddr);
+ XGENE_DMA_DESC_BUFLEN_SET(ext8, xgene_dma_encode_len(nbytes));
+ *len -= nbytes;
+ *paddr += nbytes;
+}
+
+static void xgene_dma_invalidate_buffer(void *ext8)
+{
+ XGENE_DMA_DESC_BUFLEN_SET(ext8, XGENE_DMA_INVALID_LEN_CODE);
+}
+
+static void *xgene_dma_lookup_ext8(u64 *desc, int idx)
+{
+ return (idx % 2) ? (desc + idx - 1) : (desc + idx + 1);
+}
+
+static void xgene_dma_init_desc(void *desc, u16 dst_ring_num)
+{
+ XGENE_DMA_DESC_C_SET(desc); /* Coherent IO */
+ XGENE_DMA_DESC_IN_SET(desc);
+ XGENE_DMA_DESC_H0ENQ_NUM_SET(desc, dst_ring_num);
+ XGENE_DMA_DESC_RTYPE_SET(desc, XGENE_DMA_RING_OWNER_DMA);
+}
+
+static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc_sw,
+ dma_addr_t dst, dma_addr_t src,
+ size_t len)
+{
+ void *desc1, *desc2;
+ int i;
+
+ /* Get 1st descriptor */
+ desc1 = &desc_sw->desc1;
+ xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
+
+ /* Set destination address */
+ XGENE_DMA_DESC_DR_SET(desc1);
+ XGENE_DMA_DESC_DST_ADDR_SET(desc1, dst);
+
+ /* Set 1st source address */
+ xgene_dma_set_src_buffer(desc1 + 8, &len, &src);
+
+ if (len <= 0) {
+ desc2 = NULL;
+ goto skip_additional_src;
+ }
+
+ /*
+ * We need to split this source buffer,
+ * and need to use 2nd descriptor
+ */
+ desc2 = &desc_sw->desc2;
+ XGENE_DMA_DESC_NV_SET(desc1);
+
+ /* Set 2nd to 5th source address */
+ for (i = 0; i < 4 && len; i++)
+ xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i),
+ &len, &src);
+
+ /* Invalidate unused source address field */
+ for (; i < 4; i++)
+ xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i));
+
+ /* Updated flag that we have prepared 64B descriptor */
+ desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
+
+skip_additional_src:
+ /* Hardware stores descriptor in little endian format */
+ xgene_dma_cpu_to_le64(desc1, 4);
+ if (desc2)
+ xgene_dma_cpu_to_le64(desc2, 4);
+}
+
+static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc_sw,
+ dma_addr_t *dst, dma_addr_t *src,
+ u32 src_cnt, size_t *nbytes,
+ const u8 *scf)
+{
+ void *desc1, *desc2;
+ size_t len = *nbytes;
+ int i;
+
+ desc1 = &desc_sw->desc1;
+ desc2 = &desc_sw->desc2;
+
+ /* Initialize DMA descriptor */
+ xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
+
+ /* Set destination address */
+ XGENE_DMA_DESC_DR_SET(desc1);
+ XGENE_DMA_DESC_DST_ADDR_SET(desc1, *dst);
+
+ /* We have multiple source addresses, so need to set NV bit*/
+ XGENE_DMA_DESC_NV_SET(desc1);
+
+ /* Set flyby opcode */
+ XGENE_DMA_DESC_FLYBY_SET(desc1, xgene_dma_encode_xor_flyby(src_cnt));
+
+ /* Set 1st to 5th source addresses */
+ for (i = 0; i < src_cnt; i++) {
+ len = *nbytes;
+ xgene_dma_set_src_buffer((i == 0) ? (desc1 + 8) :
+ xgene_dma_lookup_ext8(desc2, i - 1),
+ &len, &src[i]);
+ XGENE_DMA_DESC_MULTI_SET(desc1, scf[i], i);
+ }
+
+ /* Hardware stores descriptor in little endian format */
+ xgene_dma_cpu_to_le64(desc1, 4);
+ xgene_dma_cpu_to_le64(desc2, 4);
+
+ /* Update meta data */
+ *nbytes = len;
+ *dst += XGENE_DMA_MAX_BYTE_CNT;
+
+ /* We need always 64B descriptor to perform xor or pq operations */
+ desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
+}
+
+static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xgene_dma_desc_sw *desc;
+ struct xgene_dma_chan *chan;
+ dma_cookie_t cookie;
+
+ if (unlikely(!tx))
+ return -EINVAL;
+
+ chan = to_dma_chan(tx->chan);
+ desc = to_dma_desc_sw(tx);
+
+ spin_lock_bh(&chan->lock);
+
+ cookie = dma_cookie_assign(tx);
+
+ /* Add this transaction list onto the tail of the pending queue */
+ list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
+
+ spin_unlock_bh(&chan->lock);
+
+ return cookie;
+}
+
+static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc)
+{
+ list_del(&desc->node);
+ chan_dbg(chan, "LD %p free\n", desc);
+ dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
+}
+
+static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
+ struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_desc_sw *desc;
+ dma_addr_t phys;
+
+ desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys);
+ if (!desc) {
+ chan_err(chan, "Failed to allocate LDs\n");
+ return NULL;
+ }
+
+ memset(desc, 0, sizeof(*desc));
+
+ INIT_LIST_HEAD(&desc->tx_list);
+ desc->tx.phys = phys;
+ desc->tx.tx_submit = xgene_dma_tx_submit;
+ dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan);
+
+ chan_dbg(chan, "LD %p allocated\n", desc);
+
+ return desc;
+}
+
+/**
+ * xgene_dma_clean_completed_descriptor - free all descriptors which
+ * has been completed and acked
+ * @chan: X-Gene DMA channel
+ *
+ * This function is used on all completed and acked descriptors.
+ */
+static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_desc_sw *desc, *_desc;
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) {
+ if (async_tx_test_ack(&desc->tx))
+ xgene_dma_clean_descriptor(chan, desc);
+ }
+}
+
+/**
+ * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
+ * @chan: X-Gene DMA channel
+ * @desc: descriptor to cleanup and free
+ *
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies.
+ */
+static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc)
+{
+ struct dma_async_tx_descriptor *tx = &desc->tx;
+
+ /*
+ * If this is not the last transaction in the group,
+ * then no need to complete cookie and run any callback as
+ * this is not the tx_descriptor which had been sent to caller
+ * of this DMA request
+ */
+
+ if (tx->cookie == 0)
+ return;
+
+ dma_cookie_complete(tx);
+
+ /* Run the link descriptor callback function */
+ if (tx->callback)
+ tx->callback(tx->callback_param);
+
+ dma_descriptor_unmap(tx);
+
+ /* Run any dependencies */
+ dma_run_dependencies(tx);
+}
+
+/**
+ * xgene_dma_clean_running_descriptor - move the completed descriptor from
+ * ld_running to ld_completed
+ * @chan: X-Gene DMA channel
+ * @desc: the descriptor which is completed
+ *
+ * Free the descriptor directly if acked by async_tx api,
+ * else move it to queue ld_completed.
+ */
+static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc)
+{
+ /* Remove from the list of running transactions */
+ list_del(&desc->node);
+
+ /*
+ * the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->tx)) {
+ /*
+ * Move this descriptor to the list of descriptors which is
+ * completed, but still awaiting the 'ack' bit to be set.
+ */
+ list_add_tail(&desc->node, &chan->ld_completed);
+ return;
+ }
+
+ chan_dbg(chan, "LD %p free\n", desc);
+ dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
+}
+
+static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
+ struct xgene_dma_desc_sw *desc_sw)
+{
+ struct xgene_dma_desc_hw *desc_hw;
+
+ /* Check if can push more descriptor to hw for execution */
+ if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
+ return -EBUSY;
+
+ /* Get hw descriptor from DMA tx ring */
+ desc_hw = &ring->desc_hw[ring->head];
+
+ /*
+ * Increment the head count to point next
+ * descriptor for next time
+ */
+ if (++ring->head == ring->slots)
+ ring->head = 0;
+
+ /* Copy prepared sw descriptor data to hw descriptor */
+ memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw));
+
+ /*
+ * Check if we have prepared 64B descriptor,
+ * in this case we need one more hw descriptor
+ */
+ if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) {
+ desc_hw = &ring->desc_hw[ring->head];
+
+ if (++ring->head == ring->slots)
+ ring->head = 0;
+
+ memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
+ }
+
+ /* Notify the hw that we have descriptor ready for execution */
+ iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
+ 2 : 1, ring->cmd);
+
+ return 0;
+}
+
+/**
+ * xgene_chan_xfer_ld_pending - push any pending transactions to hw
+ * @chan : X-Gene DMA channel
+ *
+ * LOCKING: must hold chan->desc_lock
+ */
+static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
+ int ret;
+
+ /*
+ * If the list of pending descriptors is empty, then we
+ * don't need to do any work at all
+ */
+ if (list_empty(&chan->ld_pending)) {
+ chan_dbg(chan, "No pending LDs\n");
+ return;
+ }
+
+ /*
+ * Move elements from the queue of pending transactions onto the list
+ * of running transactions and push it to hw for further executions
+ */
+ list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) {
+ /*
+ * Check if have pushed max number of transactions to hw
+ * as capable, so let's stop here and will push remaining
+ * elements from pening ld queue after completing some
+ * descriptors that we have already pushed
+ */
+ if (chan->pending >= chan->max_outstanding)
+ return;
+
+ ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
+ if (ret)
+ return;
+
+ /*
+ * Delete this element from ld pending queue and append it to
+ * ld running queue
+ */
+ list_move_tail(&desc_sw->node, &chan->ld_running);
+
+ /* Increment the pending transaction count */
+ chan->pending++;
+ }
+}
+
+/**
+ * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
+ * and move them to ld_completed to free until flag 'ack' is set
+ * @chan: X-Gene DMA channel
+ *
+ * This function is used on descriptors which have been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, then
+ * free these descriptors if flag 'ack' is set.
+ */
+static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_ring *ring = &chan->rx_ring;
+ struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
+ struct xgene_dma_desc_hw *desc_hw;
+ u8 status;
+
+ /* Clean already completed and acked descriptors */
+ xgene_dma_clean_completed_descriptor(chan);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
+ /* Get subsequent hw descriptor from DMA rx ring */
+ desc_hw = &ring->desc_hw[ring->head];
+
+ /* Check if this descriptor has been completed */
+ if (unlikely(XGENE_DMA_DESC_IS_EMPTY(desc_hw)))
+ break;
+
+ if (++ring->head == ring->slots)
+ ring->head = 0;
+
+ /* Check if we have any error with DMA transactions */
+ status = XGENE_DMA_DESC_STATUS(
+ XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
+ desc_hw->m0)),
+ XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
+ desc_hw->m0)));
+ if (status) {
+ /* Print the DMA error type */
+ chan_err(chan, "%s\n", xgene_dma_desc_err[status]);
+
+ /*
+ * We have DMA transactions error here. Dump DMA Tx
+ * and Rx descriptors for this request */
+ XGENE_DMA_DESC_DUMP(&desc_sw->desc1,
+ "X-Gene DMA TX DESC1: ");
+
+ if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC)
+ XGENE_DMA_DESC_DUMP(&desc_sw->desc2,
+ "X-Gene DMA TX DESC2: ");
+
+ XGENE_DMA_DESC_DUMP(desc_hw,
+ "X-Gene DMA RX ERR DESC: ");
+ }
+
+ /* Notify the hw about this completed descriptor */
+ iowrite32(-1, ring->cmd);
+
+ /* Mark this hw descriptor as processed */
+ XGENE_DMA_DESC_SET_EMPTY(desc_hw);
+
+ xgene_dma_run_tx_complete_actions(chan, desc_sw);
+
+ xgene_dma_clean_running_descriptor(chan, desc_sw);
+
+ /*
+ * Decrement the pending transaction count
+ * as we have processed one
+ */
+ chan->pending--;
+ }
+
+ /*
+ * Start any pending transactions automatically
+ * In the ideal case, we keep the DMA controller busy while we go
+ * ahead and free the descriptors below.
+ */
+ xgene_chan_xfer_ld_pending(chan);
+}
+
+static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xgene_dma_chan *chan = to_dma_chan(dchan);
+
+ /* Has this channel already been allocated? */
+ if (chan->desc_pool)
+ return 1;
+
+ chan->desc_pool = dma_pool_create(chan->name, chan->dev,
+ sizeof(struct xgene_dma_desc_sw),
+ 0, 0);
+ if (!chan->desc_pool) {
+ chan_err(chan, "Failed to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ chan_dbg(chan, "Allocate descripto pool\n");
+
+ return 1;
+}
+
+/**
+ * xgene_dma_free_desc_list - Free all descriptors in a queue
+ * @chan: X-Gene DMA channel
+ * @list: the list to free
+ *
+ * LOCKING: must hold chan->desc_lock
+ */
+static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
+ struct list_head *list)
+{
+ struct xgene_dma_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, list, node)
+ xgene_dma_clean_descriptor(chan, desc);
+}
+
+static void xgene_dma_free_tx_desc_list(struct xgene_dma_chan *chan,
+ struct list_head *list)
+{
+ struct xgene_dma_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, list, node)
+ xgene_dma_clean_descriptor(chan, desc);
+}
+
+static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xgene_dma_chan *chan = to_dma_chan(dchan);
+
+ chan_dbg(chan, "Free all resources\n");
+
+ if (!chan->desc_pool)
+ return;
+
+ spin_lock_bh(&chan->lock);
+
+ /* Process all running descriptor */
+ xgene_dma_cleanup_descriptors(chan);
+
+ /* Clean all link descriptor queues */
+ xgene_dma_free_desc_list(chan, &chan->ld_pending);
+ xgene_dma_free_desc_list(chan, &chan->ld_running);
+ xgene_dma_free_desc_list(chan, &chan->ld_completed);
+
+ spin_unlock_bh(&chan->lock);
+
+ /* Delete this channel DMA pool */
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy(
+ struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct xgene_dma_desc_sw *first = NULL, *new;
+ struct xgene_dma_chan *chan;
+ size_t copy;
+
+ if (unlikely(!dchan || !len))
+ return NULL;
+
+ chan = to_dma_chan(dchan);
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = xgene_dma_alloc_descriptor(chan);
+ if (!new)
+ goto fail;
+
+ /* Create the largest transaction possible */
+ copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
+
+ /* Prepare DMA descriptor */
+ xgene_dma_prep_cpy_desc(chan, new, dst, src, copy);
+
+ if (!first)
+ first = new;
+
+ new->tx.cookie = 0;
+ async_tx_ack(&new->tx);
+
+ /* Update metadata */
+ len -= copy;
+ dst += copy;
+ src += copy;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ new->tx.flags = flags; /* client is in control of this ack */
+ new->tx.cookie = -EBUSY;
+ list_splice(&first->tx_list, &new->tx_list);
+
+ return &new->tx;
+
+fail:
+ if (!first)
+ return NULL;
+
+ xgene_dma_free_tx_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ u32 dst_nents, struct scatterlist *src_sg,
+ u32 src_nents, unsigned long flags)
+{
+ struct xgene_dma_desc_sw *first = NULL, *new = NULL;
+ struct xgene_dma_chan *chan;
+ size_t dst_avail, src_avail;
+ dma_addr_t dst, src;
+ size_t len;
+
+ if (unlikely(!dchan))
+ return NULL;
+
+ if (unlikely(!dst_nents || !src_nents))
+ return NULL;
+
+ if (unlikely(!dst_sg || !src_sg))
+ return NULL;
+
+ chan = to_dma_chan(dchan);
+
+ /* Get prepared for the loop */
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+ dst_nents--;
+ src_nents--;
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Create the largest transaction possible */
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
+ if (len == 0)
+ goto fetch;
+
+ dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
+ src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
+
+ /* Allocate the link descriptor from DMA pool */
+ new = xgene_dma_alloc_descriptor(chan);
+ if (!new)
+ goto fail;
+
+ /* Prepare DMA descriptor */
+ xgene_dma_prep_cpy_desc(chan, new, dst, src, len);
+
+ if (!first)
+ first = new;
+
+ new->tx.cookie = 0;
+ async_tx_ack(&new->tx);
+
+ /* update metadata */
+ dst_avail -= len;
+ src_avail -= len;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+
+fetch:
+ /* fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ /* no more entries: we're done */
+ if (dst_nents == 0)
+ break;
+
+ /* fetch the next entry: if there are no more: done */
+ dst_sg = sg_next(dst_sg);
+ if (!dst_sg)
+ break;
+
+ dst_nents--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+
+ /* fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ /* no more entries: we're done */
+ if (src_nents == 0)
+ break;
+
+ /* fetch the next entry: if there are no more: done */
+ src_sg = sg_next(src_sg);
+ if (!src_sg)
+ break;
+
+ src_nents--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ if (!new)
+ return NULL;
+
+ new->tx.flags = flags; /* client is in control of this ack */
+ new->tx.cookie = -EBUSY;
+ list_splice(&first->tx_list, &new->tx_list);
+
+ return &new->tx;
+fail:
+ if (!first)
+ return NULL;
+
+ xgene_dma_free_tx_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
+ struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
+ u32 src_cnt, size_t len, unsigned long flags)
+{
+ struct xgene_dma_desc_sw *first = NULL, *new;
+ struct xgene_dma_chan *chan;
+ static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01};
+
+ if (unlikely(!dchan || !len))
+ return NULL;
+
+ chan = to_dma_chan(dchan);
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = xgene_dma_alloc_descriptor(chan);
+ if (!new)
+ goto fail;
+
+ /* Prepare xor DMA descriptor */
+ xgene_dma_prep_xor_desc(chan, new, &dst, src,
+ src_cnt, &len, multi);
+
+ if (!first)
+ first = new;
+
+ new->tx.cookie = 0;
+ async_tx_ack(&new->tx);
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ new->tx.flags = flags; /* client is in control of this ack */
+ new->tx.cookie = -EBUSY;
+ list_splice(&first->tx_list, &new->tx_list);
+
+ return &new->tx;
+
+fail:
+ if (!first)
+ return NULL;
+
+ xgene_dma_free_tx_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *xgene_dma_prep_pq(
+ struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
+ u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
+{
+ struct xgene_dma_desc_sw *first = NULL, *new;
+ struct xgene_dma_chan *chan;
+ size_t _len = len;
+ dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC];
+ static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01};
+
+ if (unlikely(!dchan || !len))
+ return NULL;
+
+ chan = to_dma_chan(dchan);
+
+ /*
+ * Save source addresses on local variable, may be we have to
+ * prepare two descriptor to generate P and Q if both enabled
+ * in the flags by client
+ */
+ memcpy(_src, src, sizeof(*src) * src_cnt);
+
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ len = 0;
+
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ _len = 0;
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = xgene_dma_alloc_descriptor(chan);
+ if (!new)
+ goto fail;
+
+ if (!first)
+ first = new;
+
+ new->tx.cookie = 0;
+ async_tx_ack(&new->tx);
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+
+ /*
+ * Prepare DMA descriptor to generate P,
+ * if DMA_PREP_PQ_DISABLE_P flag is not set
+ */
+ if (len) {
+ xgene_dma_prep_xor_desc(chan, new, &dst[0], src,
+ src_cnt, &len, multi);
+ continue;
+ }
+
+ /*
+ * Prepare DMA descriptor to generate Q,
+ * if DMA_PREP_PQ_DISABLE_Q flag is not set
+ */
+ if (_len) {
+ xgene_dma_prep_xor_desc(chan, new, &dst[1], _src,
+ src_cnt, &_len, scf);
+ }
+ } while (len || _len);
+
+ new->tx.flags = flags; /* client is in control of this ack */
+ new->tx.cookie = -EBUSY;
+ list_splice(&first->tx_list, &new->tx_list);
+
+ return &new->tx;
+
+fail:
+ if (!first)
+ return NULL;
+
+ xgene_dma_free_tx_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static void xgene_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct xgene_dma_chan *chan = to_dma_chan(dchan);
+
+ spin_lock_bh(&chan->lock);
+ xgene_chan_xfer_ld_pending(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+static void xgene_dma_tasklet_cb(unsigned long data)
+{
+ struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
+
+ spin_lock_bh(&chan->lock);
+
+ /* Run all cleanup for descriptors which have been completed */
+ xgene_dma_cleanup_descriptors(chan);
+
+ /* Re-enable DMA channel IRQ */
+ enable_irq(chan->rx_irq);
+
+ spin_unlock_bh(&chan->lock);
+}
+
+static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
+{
+ struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id;
+
+ BUG_ON(!chan);
+
+ /*
+ * Disable DMA channel IRQ until we process completed
+ * descriptors
+ */
+ disable_irq_nosync(chan->rx_irq);
+
+ /*
+ * Schedule the tasklet to handle all cleanup of the current
+ * transaction. It will start a new transaction if there is
+ * one pending.
+ */
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgene_dma_err_isr(int irq, void *id)
+{
+ struct xgene_dma *pdma = (struct xgene_dma *)id;
+ unsigned long int_mask;
+ u32 val, i;
+
+ val = ioread32(pdma->csr_dma + XGENE_DMA_INT);
+
+ /* Clear DMA interrupts */
+ iowrite32(val, pdma->csr_dma + XGENE_DMA_INT);
+
+ /* Print DMA error info */
+ int_mask = val >> XGENE_DMA_INT_MASK_SHIFT;
+ for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err))
+ dev_err(pdma->dev,
+ "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]);
+
+ return IRQ_HANDLED;
+}
+
+static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring)
+{
+ int i;
+
+ iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE);
+
+ for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++)
+ iowrite32(ring->state[i], ring->pdma->csr_ring +
+ XGENE_DMA_RING_STATE_WR_BASE + (i * 4));
+}
+
+static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring)
+{
+ memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG);
+ xgene_dma_wr_ring_state(ring);
+}
+
+static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
+{
+ void *ring_cfg = ring->state;
+ u64 addr = ring->desc_paddr;
+ void *desc;
+ u32 i, val;
+
+ ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
+
+ /* Clear DMA ring state */
+ xgene_dma_clr_ring_state(ring);
+
+ /* Set DMA ring type */
+ XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR);
+
+ if (ring->owner == XGENE_DMA_RING_OWNER_DMA) {
+ /* Set recombination buffer and timeout */
+ XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg);
+ XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg);
+ XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg);
+ }
+
+ /* Initialize DMA ring state */
+ XGENE_DMA_RING_SELTHRSH_SET(ring_cfg);
+ XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg);
+ XGENE_DMA_RING_COHERENT_SET(ring_cfg);
+ XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr);
+ XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr);
+ XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize);
+
+ /* Write DMA ring configurations */
+ xgene_dma_wr_ring_state(ring);
+
+ /* Set DMA ring id */
+ iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id),
+ ring->pdma->csr_ring + XGENE_DMA_RING_ID);
+
+ /* Set DMA ring buffer */
+ iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num),
+ ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
+
+ if (ring->owner != XGENE_DMA_RING_OWNER_CPU)
+ return;
+
+ /* Set empty signature to DMA Rx ring descriptors */
+ for (i = 0; i < ring->slots; i++) {
+ desc = &ring->desc_hw[i];
+ XGENE_DMA_DESC_SET_EMPTY(desc);
+ }
+
+ /* Enable DMA Rx ring interrupt */
+ val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
+ XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num);
+ iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
+}
+
+static void xgene_dma_clear_ring(struct xgene_dma_ring *ring)
+{
+ u32 ring_id, val;
+
+ if (ring->owner == XGENE_DMA_RING_OWNER_CPU) {
+ /* Disable DMA Rx ring interrupt */
+ val = ioread32(ring->pdma->csr_ring +
+ XGENE_DMA_RING_NE_INT_MODE);
+ XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num);
+ iowrite32(val, ring->pdma->csr_ring +
+ XGENE_DMA_RING_NE_INT_MODE);
+ }
+
+ /* Clear DMA ring state */
+ ring_id = XGENE_DMA_RING_ID_SETUP(ring->id);
+ iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID);
+
+ iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
+ xgene_dma_clr_ring_state(ring);
+}
+
+static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring)
+{
+ ring->cmd_base = ring->pdma->csr_ring_cmd +
+ XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num -
+ XGENE_DMA_RING_NUM));
+
+ ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET;
+}
+
+static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan,
+ enum xgene_dma_ring_cfgsize cfgsize)
+{
+ int size;
+
+ switch (cfgsize) {
+ case XGENE_DMA_RING_CFG_SIZE_512B:
+ size = 0x200;
+ break;
+ case XGENE_DMA_RING_CFG_SIZE_2KB:
+ size = 0x800;
+ break;
+ case XGENE_DMA_RING_CFG_SIZE_16KB:
+ size = 0x4000;
+ break;
+ case XGENE_DMA_RING_CFG_SIZE_64KB:
+ size = 0x10000;
+ break;
+ case XGENE_DMA_RING_CFG_SIZE_512KB:
+ size = 0x80000;
+ break;
+ default:
+ chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize);
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring)
+{
+ /* Clear DMA ring configurations */
+ xgene_dma_clear_ring(ring);
+
+ /* De-allocate DMA ring descriptor */
+ if (ring->desc_vaddr) {
+ dma_free_coherent(ring->pdma->dev, ring->size,
+ ring->desc_vaddr, ring->desc_paddr);
+ ring->desc_vaddr = NULL;
+ }
+}
+
+static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan)
+{
+ xgene_dma_delete_ring_one(&chan->rx_ring);
+ xgene_dma_delete_ring_one(&chan->tx_ring);
+}
+
+static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
+ struct xgene_dma_ring *ring,
+ enum xgene_dma_ring_cfgsize cfgsize)
+{
+ /* Setup DMA ring descriptor variables */
+ ring->pdma = chan->pdma;
+ ring->cfgsize = cfgsize;
+ ring->num = chan->pdma->ring_num++;
+ ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
+
+ ring->size = xgene_dma_get_ring_size(chan, cfgsize);
+ if (ring->size <= 0)
+ return ring->size;
+
+ /* Allocate memory for DMA ring descriptor */
+ ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
+ &ring->desc_paddr, GFP_KERNEL);
+ if (!ring->desc_vaddr) {
+ chan_err(chan, "Failed to allocate ring desc\n");
+ return -ENOMEM;
+ }
+
+ /* Configure and enable DMA ring */
+ xgene_dma_set_ring_cmd(ring);
+ xgene_dma_setup_ring(ring);
+
+ return 0;
+}
+
+static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_ring *rx_ring = &chan->rx_ring;
+ struct xgene_dma_ring *tx_ring = &chan->tx_ring;
+ int ret;
+
+ /* Create DMA Rx ring descriptor */
+ rx_ring->owner = XGENE_DMA_RING_OWNER_CPU;
+ rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id;
+
+ ret = xgene_dma_create_ring_one(chan, rx_ring,
+ XGENE_DMA_RING_CFG_SIZE_64KB);
+ if (ret)
+ return ret;
+
+ chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n",
+ rx_ring->id, rx_ring->num, rx_ring->desc_vaddr);
+
+ /* Create DMA Tx ring descriptor */
+ tx_ring->owner = XGENE_DMA_RING_OWNER_DMA;
+ tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id;
+
+ ret = xgene_dma_create_ring_one(chan, tx_ring,
+ XGENE_DMA_RING_CFG_SIZE_64KB);
+ if (ret) {
+ xgene_dma_delete_ring_one(rx_ring);
+ return ret;
+ }
+
+ tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num);
+
+ chan_dbg(chan,
+ "Tx ring id 0x%X num %d desc 0x%p\n",
+ tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
+
+ /* Set the max outstanding request possible to this channel */
+ chan->max_outstanding = rx_ring->slots;
+
+ return ret;
+}
+
+static int xgene_dma_init_rings(struct xgene_dma *pdma)
+{
+ int ret, i, j;
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ ret = xgene_dma_create_chan_rings(&pdma->chan[i]);
+ if (ret) {
+ for (j = 0; j < i; j++)
+ xgene_dma_delete_chan_rings(&pdma->chan[j]);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void xgene_dma_enable(struct xgene_dma *pdma)
+{
+ u32 val;
+
+ /* Configure and enable DMA engine */
+ val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
+ XGENE_DMA_CH_SETUP(val);
+ XGENE_DMA_ENABLE(val);
+ iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
+}
+
+static void xgene_dma_disable(struct xgene_dma *pdma)
+{
+ u32 val;
+
+ val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
+ XGENE_DMA_DISABLE(val);
+ iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
+}
+
+static void xgene_dma_mask_interrupts(struct xgene_dma *pdma)
+{
+ /*
+ * Mask DMA ring overflow, underflow and
+ * AXI write/read error interrupts
+ */
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
+
+ /* Mask DMA error interrupts */
+ iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK);
+}
+
+static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma)
+{
+ /*
+ * Unmask DMA ring overflow, underflow and
+ * AXI write/read error interrupts
+ */
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
+
+ /* Unmask DMA error interrupts */
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_INT_MASK);
+}
+
+static void xgene_dma_init_hw(struct xgene_dma *pdma)
+{
+ u32 val;
+
+ /* Associate DMA ring to corresponding ring HW */
+ iowrite32(XGENE_DMA_ASSOC_RING_MNGR1,
+ pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC);
+
+ /* Configure RAID6 polynomial control setting */
+ if (is_pq_enabled(pdma))
+ iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
+ pdma->csr_dma + XGENE_DMA_RAID6_CONT);
+ else
+ dev_info(pdma->dev, "PQ is disabled in HW\n");
+
+ xgene_dma_enable(pdma);
+ xgene_dma_unmask_interrupts(pdma);
+
+ /* Get DMA id and version info */
+ val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR);
+
+ /* DMA device info */
+ dev_info(pdma->dev,
+ "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
+ XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val),
+ XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL);
+}
+
+static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma)
+{
+ if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) &&
+ (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST)))
+ return 0;
+
+ iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN);
+ iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST);
+
+ /* Bring up memory */
+ iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
+
+ /* Force a barrier */
+ ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
+
+ /* reset may take up to 1ms */
+ usleep_range(1000, 1100);
+
+ if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY)
+ != XGENE_DMA_RING_BLK_MEM_RDY_VAL) {
+ dev_err(pdma->dev,
+ "Failed to release ring mngr memory from shutdown\n");
+ return -ENODEV;
+ }
+
+ /* program threshold set 1 and all hysteresis */
+ iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL,
+ pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1);
+ iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL,
+ pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1);
+ iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL,
+ pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS);
+
+ /* Enable QPcore and assign error queue */
+ iowrite32(XGENE_DMA_RING_ENABLE,
+ pdma->csr_ring + XGENE_DMA_RING_CONFIG);
+
+ return 0;
+}
+
+static int xgene_dma_init_mem(struct xgene_dma *pdma)
+{
+ int ret;
+
+ ret = xgene_dma_init_ring_mngr(pdma);
+ if (ret)
+ return ret;
+
+ /* Bring up memory */
+ iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
+
+ /* Force a barrier */
+ ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
+
+ /* reset may take up to 1ms */
+ usleep_range(1000, 1100);
+
+ if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY)
+ != XGENE_DMA_BLK_MEM_RDY_VAL) {
+ dev_err(pdma->dev,
+ "Failed to release DMA memory from shutdown\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int xgene_dma_request_irqs(struct xgene_dma *pdma)
+{
+ struct xgene_dma_chan *chan;
+ int ret, i, j;
+
+ /* Register DMA error irq */
+ ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr,
+ 0, "dma_error", pdma);
+ if (ret) {
+ dev_err(pdma->dev,
+ "Failed to register error IRQ %d\n", pdma->err_irq);
+ return ret;
+ }
+
+ /* Register DMA channel rx irq */
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ chan = &pdma->chan[i];
+ ret = devm_request_irq(chan->dev, chan->rx_irq,
+ xgene_dma_chan_ring_isr,
+ 0, chan->name, chan);
+ if (ret) {
+ chan_err(chan, "Failed to register Rx IRQ %d\n",
+ chan->rx_irq);
+ devm_free_irq(pdma->dev, pdma->err_irq, pdma);
+
+ for (j = 0; j < i; j++) {
+ chan = &pdma->chan[i];
+ devm_free_irq(chan->dev, chan->rx_irq, chan);
+ }
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void xgene_dma_free_irqs(struct xgene_dma *pdma)
+{
+ struct xgene_dma_chan *chan;
+ int i;
+
+ /* Free DMA device error irq */
+ devm_free_irq(pdma->dev, pdma->err_irq, pdma);
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ chan = &pdma->chan[i];
+ devm_free_irq(chan->dev, chan->rx_irq, chan);
+ }
+}
+
+static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
+ struct dma_device *dma_dev)
+{
+ /* Initialize DMA device capability mask */
+ dma_cap_zero(dma_dev->cap_mask);
+
+ /* Set DMA device capability */
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SG, dma_dev->cap_mask);
+
+ /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
+ * and channel 1 supports XOR, PQ both. First thing here is we have
+ * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
+ * we can make sure this by reading SoC Efuse register.
+ * Second thing, we have hw errata that if we run channel 0 and
+ * channel 1 simultaneously with executing XOR and PQ request,
+ * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
+ * if XOR and PQ supports on channel 1 is disabled.
+ */
+ if ((chan->id == XGENE_DMA_PQ_CHANNEL) &&
+ is_pq_enabled(chan->pdma)) {
+ dma_cap_set(DMA_PQ, dma_dev->cap_mask);
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+ } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) &&
+ !is_pq_enabled(chan->pdma)) {
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+ }
+
+ /* Set base and prep routines */
+ dma_dev->dev = chan->dev;
+ dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
+ dma_dev->device_issue_pending = xgene_dma_issue_pending;
+ dma_dev->device_tx_status = xgene_dma_tx_status;
+ dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy;
+ dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
+
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+ dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
+ dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
+ dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT;
+ }
+
+ if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
+ dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
+ dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
+ dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT;
+ }
+}
+
+static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
+{
+ struct xgene_dma_chan *chan = &pdma->chan[id];
+ struct dma_device *dma_dev = &pdma->dma_dev[id];
+ int ret;
+
+ chan->dma_chan.device = dma_dev;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->ld_pending);
+ INIT_LIST_HEAD(&chan->ld_running);
+ INIT_LIST_HEAD(&chan->ld_completed);
+ tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb,
+ (unsigned long)chan);
+
+ chan->pending = 0;
+ chan->desc_pool = NULL;
+ dma_cookie_init(&chan->dma_chan);
+
+ /* Setup dma device capabilities and prep routines */
+ xgene_dma_set_caps(chan, dma_dev);
+
+ /* Initialize DMA device list head */
+ INIT_LIST_HEAD(&dma_dev->channels);
+ list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels);
+
+ /* Register with Linux async DMA framework*/
+ ret = dma_async_device_register(dma_dev);
+ if (ret) {
+ chan_err(chan, "Failed to register async device %d", ret);
+ tasklet_kill(&chan->tasklet);
+
+ return ret;
+ }
+
+ /* DMA capability info */
+ dev_info(pdma->dev,
+ "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan),
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "",
+ dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
+ dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
+
+ return 0;
+}
+
+static int xgene_dma_init_async(struct xgene_dma *pdma)
+{
+ int ret, i, j;
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) {
+ ret = xgene_dma_async_register(pdma, i);
+ if (ret) {
+ for (j = 0; j < i; j++) {
+ dma_async_device_unregister(&pdma->dma_dev[j]);
+ tasklet_kill(&pdma->chan[j].tasklet);
+ }
+
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void xgene_dma_async_unregister(struct xgene_dma *pdma)
+{
+ int i;
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
+ dma_async_device_unregister(&pdma->dma_dev[i]);
+}
+
+static void xgene_dma_init_channels(struct xgene_dma *pdma)
+{
+ struct xgene_dma_chan *chan;
+ int i;
+
+ pdma->ring_num = XGENE_DMA_RING_NUM;
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ chan = &pdma->chan[i];
+ chan->dev = pdma->dev;
+ chan->pdma = pdma;
+ chan->id = i;
+ snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id);
+ }
+}
+
+static int xgene_dma_get_resources(struct platform_device *pdev,
+ struct xgene_dma *pdma)
+{
+ struct resource *res;
+ int irq, i;
+
+ /* Get DMA csr region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get csr region\n");
+ return -ENXIO;
+ }
+
+ pdma->csr_dma = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pdma->csr_dma) {
+ dev_err(&pdev->dev, "Failed to ioremap csr region");
+ return -ENOMEM;
+ }
+
+ /* Get DMA ring csr region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get ring csr region\n");
+ return -ENXIO;
+ }
+
+ pdma->csr_ring = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pdma->csr_ring) {
+ dev_err(&pdev->dev, "Failed to ioremap ring csr region");
+ return -ENOMEM;
+ }
+
+ /* Get DMA ring cmd csr region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get ring cmd csr region\n");
+ return -ENXIO;
+ }
+
+ pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pdma->csr_ring_cmd) {
+ dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region");
+ return -ENOMEM;
+ }
+
+ /* Get efuse csr region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get efuse csr region\n");
+ return -ENXIO;
+ }
+
+ pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pdma->csr_efuse) {
+ dev_err(&pdev->dev, "Failed to ioremap efuse csr region");
+ return -ENOMEM;
+ }
+
+ /* Get DMA error interrupt */
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Failed to get Error IRQ\n");
+ return -ENXIO;
+ }
+
+ pdma->err_irq = irq;
+
+ /* Get DMA Rx ring descriptor interrupts for all DMA channels */
+ for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Failed to get Rx IRQ\n");
+ return -ENXIO;
+ }
+
+ pdma->chan[i - 1].rx_irq = irq;
+ }
+
+ return 0;
+}
+
+static int xgene_dma_probe(struct platform_device *pdev)
+{
+ struct xgene_dma *pdma;
+ int ret, i;
+
+ pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL);
+ if (!pdma)
+ return -ENOMEM;
+
+ pdma->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pdma);
+
+ ret = xgene_dma_get_resources(pdev, pdma);
+ if (ret)
+ return ret;
+
+ pdma->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pdma->clk)) {
+ dev_err(&pdev->dev, "Failed to get clk\n");
+ return PTR_ERR(pdma->clk);
+ }
+
+ /* Enable clk before accessing registers */
+ ret = clk_prepare_enable(pdma->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
+ return ret;
+ }
+
+ /* Remove DMA RAM out of shutdown */
+ ret = xgene_dma_init_mem(pdma);
+ if (ret)
+ goto err_clk_enable;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto err_dma_mask;
+ }
+
+ /* Initialize DMA channels software state */
+ xgene_dma_init_channels(pdma);
+
+ /* Configue DMA rings */
+ ret = xgene_dma_init_rings(pdma);
+ if (ret)
+ goto err_clk_enable;
+
+ ret = xgene_dma_request_irqs(pdma);
+ if (ret)
+ goto err_request_irq;
+
+ /* Configure and enable DMA engine */
+ xgene_dma_init_hw(pdma);
+
+ /* Register DMA device with linux async framework */
+ ret = xgene_dma_init_async(pdma);
+ if (ret)
+ goto err_async_init;
+
+ return 0;
+
+err_async_init:
+ xgene_dma_free_irqs(pdma);
+
+err_request_irq:
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
+ xgene_dma_delete_chan_rings(&pdma->chan[i]);
+
+err_dma_mask:
+err_clk_enable:
+ clk_disable_unprepare(pdma->clk);
+
+ return ret;
+}
+
+static int xgene_dma_remove(struct platform_device *pdev)
+{
+ struct xgene_dma *pdma = platform_get_drvdata(pdev);
+ struct xgene_dma_chan *chan;
+ int i;
+
+ xgene_dma_async_unregister(pdma);
+
+ /* Mask interrupts and disable DMA engine */
+ xgene_dma_mask_interrupts(pdma);
+ xgene_dma_disable(pdma);
+ xgene_dma_free_irqs(pdma);
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ chan = &pdma->chan[i];
+ tasklet_kill(&chan->tasklet);
+ xgene_dma_delete_chan_rings(chan);
+ }
+
+ clk_disable_unprepare(pdma->clk);
+
+ return 0;
+}
+
+static const struct of_device_id xgene_dma_of_match_ptr[] = {
+ {.compatible = "apm,xgene-storm-dma",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
+
+static struct platform_driver xgene_dma_driver = {
+ .probe = xgene_dma_probe,
+ .remove = xgene_dma_remove,
+ .driver = {
+ .name = "X-Gene-DMA",
+ .of_match_table = xgene_dma_of_match_ptr,
+ },
+};
+
+module_platform_driver(xgene_dma_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
+MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
+MODULE_AUTHOR("Loc Ho <lho@apm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index bdd2a5dd7220..d8434d465885 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -22,9 +22,9 @@
* (at your option) any later version.
*/
-#include <linux/amba/xilinx_dma.h>
#include <linux/bitops.h>
#include <linux/dmapool.h>
+#include <linux/dma/xilinx_dma.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 6a1f7de6fa54..fdc0bf0543ce 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -55,6 +55,16 @@ config EXTCON_MAX77693
Maxim MAX77693 PMIC. The MAX77693 MUIC is a USB port accessory
detector and switch.
+config EXTCON_MAX77843
+ tristate "MAX77843 EXTCON Support"
+ depends on MFD_MAX77843
+ select IRQ_DOMAIN
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the MUIC device of
+ Maxim MAX77843. The MAX77843 MUIC is a USB port accessory
+ detector add switch.
+
config EXTCON_MAX8997
tristate "MAX8997 EXTCON Support"
depends on MFD_MAX8997 && IRQ_DOMAIN
@@ -93,4 +103,11 @@ config EXTCON_SM5502
Silicon Mitus SM5502. The SM5502 is a USB port accessory
detector and switch.
+config EXTCON_USB_GPIO
+ tristate "USB GPIO extcon support"
+ depends on GPIOLIB
+ help
+ Say Y here to enable GPIO based USB cable detection extcon support.
+ Used typically if GPIO is used for USB ID pin detection.
+
endif # MULTISTATE_SWITCH
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0370b42e5a27..9204114791a3 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -2,13 +2,15 @@
# Makefile for external connector class (extcon) devices
#
-obj-$(CONFIG_EXTCON) += extcon-class.o
+obj-$(CONFIG_EXTCON) += extcon.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
+obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
+obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 63f01c42aed4..a0ed35b336e4 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -136,18 +136,35 @@ static const char *arizona_cable[] = {
static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info);
-static void arizona_extcon_do_magic(struct arizona_extcon_info *info,
- unsigned int magic)
+static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
+ bool clamp)
{
struct arizona *arizona = info->arizona;
+ unsigned int mask = 0, val = 0;
int ret;
+ switch (arizona->type) {
+ case WM5110:
+ mask = ARIZONA_HP1L_SHRTO | ARIZONA_HP1L_FLWR |
+ ARIZONA_HP1L_SHRTI;
+ if (clamp)
+ val = ARIZONA_HP1L_SHRTO;
+ else
+ val = ARIZONA_HP1L_FLWR | ARIZONA_HP1L_SHRTI;
+ break;
+ default:
+ mask = ARIZONA_RMV_SHRT_HP1L;
+ if (clamp)
+ val = ARIZONA_RMV_SHRT_HP1L;
+ break;
+ };
+
mutex_lock(&arizona->dapm->card->dapm_mutex);
- arizona->hpdet_magic = magic;
+ arizona->hpdet_clamp = clamp;
- /* Keep the HP output stages disabled while doing the magic */
- if (magic) {
+ /* Keep the HP output stages disabled while doing the clamp */
+ if (clamp) {
ret = regmap_update_bits(arizona->regmap,
ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT1L_ENA |
@@ -158,20 +175,20 @@ static void arizona_extcon_do_magic(struct arizona_extcon_info *info,
ret);
}
- ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000,
- magic);
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HP_CTRL_1L,
+ mask, val);
if (ret != 0)
- dev_warn(arizona->dev, "Failed to do magic: %d\n",
+ dev_warn(arizona->dev, "Failed to do clamp: %d\n",
ret);
- ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000,
- magic);
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HP_CTRL_1R,
+ mask, val);
if (ret != 0)
- dev_warn(arizona->dev, "Failed to do magic: %d\n",
+ dev_warn(arizona->dev, "Failed to do clamp: %d\n",
ret);
- /* Restore the desired state while not doing the magic */
- if (!magic) {
+ /* Restore the desired state while not doing the clamp */
+ if (!clamp) {
ret = regmap_update_bits(arizona->regmap,
ARIZONA_OUTPUT_ENABLES_1,
ARIZONA_OUT1L_ENA |
@@ -603,7 +620,7 @@ done:
ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
0);
- arizona_extcon_do_magic(info, 0);
+ arizona_extcon_hp_clamp(info, false);
if (id_gpio)
gpio_set_value_cansleep(id_gpio, 0);
@@ -648,7 +665,7 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
if (info->mic)
arizona_stop_mic(info);
- arizona_extcon_do_magic(info, 0x4000);
+ arizona_extcon_hp_clamp(info, true);
ret = regmap_update_bits(arizona->regmap,
ARIZONA_ACCESSORY_DETECT_MODE_1,
@@ -699,7 +716,7 @@ static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
info->hpdet_active = true;
- arizona_extcon_do_magic(info, 0x4000);
+ arizona_extcon_hp_clamp(info, true);
ret = regmap_update_bits(arizona->regmap,
ARIZONA_ACCESSORY_DETECT_MODE_1,
@@ -1149,6 +1166,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
}
break;
case WM5110:
+ case WM8280:
switch (arizona->rev) {
case 0 ... 2:
break;
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index c1bf0cf747b0..3823aa4a3a80 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -539,8 +539,6 @@ static void max14577_muic_irq_work(struct work_struct *work)
dev_err(info->dev, "failed to handle MUIC interrupt\n");
mutex_unlock(&info->mutex);
-
- return;
}
/*
@@ -730,8 +728,7 @@ static int max14577_muic_probe(struct platform_device *pdev)
muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
- "failed: irq request (IRQ: %d,"
- " error :%d)\n",
+ "failed: irq request (IRQ: %d, error :%d)\n",
muic_irq->irq, ret);
return ret;
}
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index af165fd0c6f5..a66bec8f6252 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -190,8 +190,8 @@ enum max77693_muic_acc_type {
/* The below accessories have same ADC value so ADCLow and
ADC1K bit is used to separate specific accessory */
/* ADC|VBVolot|ADCLow|ADC1K| */
- MAX77693_MUIC_GND_USB_OTG = 0x100, /* 0x0| 0| 0| 0| */
- MAX77693_MUIC_GND_USB_OTG_VB = 0x104, /* 0x0| 1| 0| 0| */
+ MAX77693_MUIC_GND_USB_HOST = 0x100, /* 0x0| 0| 0| 0| */
+ MAX77693_MUIC_GND_USB_HOST_VB = 0x104, /* 0x0| 1| 0| 0| */
MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* 0x0| 0| 1| 0| */
MAX77693_MUIC_GND_MHL = 0x103, /* 0x0| 0| 1| 1| */
MAX77693_MUIC_GND_MHL_VB = 0x107, /* 0x0| 1| 1| 1| */
@@ -228,7 +228,7 @@ static const char *max77693_extcon_cable[] = {
[EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
[EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
[EXTCON_CABLE_MHL] = "MHL",
- [EXTCON_CABLE_MHL_TA] = "MHL_TA",
+ [EXTCON_CABLE_MHL_TA] = "MHL-TA",
[EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
[EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
[EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
@@ -403,8 +403,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
/**
* [0x1|VBVolt|ADCLow|ADC1K]
- * [0x1| 0| 0| 0] USB_OTG
- * [0x1| 1| 0| 0] USB_OTG_VB
+ * [0x1| 0| 0| 0] USB_HOST
+ * [0x1| 1| 0| 0] USB_HSOT_VB
* [0x1| 0| 1| 0] Audio Video cable with load
* [0x1| 0| 1| 1] MHL without charging cable
* [0x1| 1| 1| 1] MHL with charging cable
@@ -523,7 +523,7 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
* - Support charging and data connection through micro-usb port
* if USB cable is connected between target and host
* device.
- * - Support OTG device (Mouse/Keyboard)
+ * - Support OTG(On-The-Go) device (Ex: Mouse/Keyboard)
*/
ret = max77693_muic_set_path(info, info->path_usb, attached);
if (ret < 0)
@@ -609,9 +609,9 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
MAX77693_CABLE_GROUP_ADC_GND, &attached);
switch (cable_type_gnd) {
- case MAX77693_MUIC_GND_USB_OTG:
- case MAX77693_MUIC_GND_USB_OTG_VB:
- /* USB_OTG, PATH: AP_USB */
+ case MAX77693_MUIC_GND_USB_HOST:
+ case MAX77693_MUIC_GND_USB_HOST_VB:
+ /* USB_HOST, PATH: AP_USB */
ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
if (ret < 0)
return ret;
@@ -704,7 +704,7 @@ static int max77693_muic_adc_handler(struct max77693_muic_info *info)
switch (cable_type) {
case MAX77693_MUIC_ADC_GROUND:
- /* USB_OTG/MHL/Audio */
+ /* USB_HOST/MHL/Audio */
max77693_muic_adc_ground_handler(info);
break;
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
@@ -823,19 +823,19 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
case MAX77693_MUIC_GND_MHL:
case MAX77693_MUIC_GND_MHL_VB:
/*
- * MHL cable with MHL_TA(USB/TA) cable
+ * MHL cable with MHL-TA(USB/TA) cable
* - MHL cable include two port(HDMI line and separate
* micro-usb port. When the target connect MHL cable,
- * extcon driver check whether MHL_TA(USB/TA) cable is
- * connected. If MHL_TA cable is connected, extcon
+ * extcon driver check whether MHL-TA(USB/TA) cable is
+ * connected. If MHL-TA cable is connected, extcon
* driver notify state to notifiee for charging battery.
*
- * Features of 'MHL_TA(USB/TA) with MHL cable'
+ * Features of 'MHL-TA(USB/TA) with MHL cable'
* - Support MHL
* - Support charging through micro-usb port without
* data connection
*/
- extcon_set_cable_state(info->edev, "MHL_TA", attached);
+ extcon_set_cable_state(info->edev, "MHL-TA", attached);
if (!cable_attached)
extcon_set_cable_state(info->edev,
"MHL", cable_attached);
@@ -886,7 +886,7 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
* - Support charging and data connection through micro-
* usb port if USB cable is connected between target
* and host device
- * - Support OTG device (Mouse/Keyboard)
+ * - Support OTG(On-The-Go) device (Ex: Mouse/Keyboard)
*/
ret = max77693_muic_set_path(info, info->path_usb,
attached);
@@ -1019,8 +1019,6 @@ static void max77693_muic_irq_work(struct work_struct *work)
dev_err(info->dev, "failed to handle MUIC interrupt\n");
mutex_unlock(&info->mutex);
-
- return;
}
static irqreturn_t max77693_muic_irq_handler(int irq, void *data)
@@ -1171,8 +1169,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
- "failed: irq request (IRQ: %d,"
- " error :%d)\n",
+ "failed: irq request (IRQ: %d, error :%d)\n",
muic_irq->irq, ret);
return ret;
}
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
new file mode 100644
index 000000000000..8db6a926ea07
--- /dev/null
+++ b/drivers/extcon/extcon-max77843.c
@@ -0,0 +1,881 @@
+/*
+ * extcon-max77843.c - Maxim MAX77843 extcon driver to support
+ * MUIC(Micro USB Interface Controller)
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/extcon.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/max77843-private.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#define DELAY_MS_DEFAULT 15000 /* unit: millisecond */
+
+enum max77843_muic_status {
+ MAX77843_MUIC_STATUS1 = 0,
+ MAX77843_MUIC_STATUS2,
+ MAX77843_MUIC_STATUS3,
+
+ MAX77843_MUIC_STATUS_NUM,
+};
+
+struct max77843_muic_info {
+ struct device *dev;
+ struct max77843 *max77843;
+ struct extcon_dev *edev;
+
+ struct mutex mutex;
+ struct work_struct irq_work;
+ struct delayed_work wq_detcable;
+
+ u8 status[MAX77843_MUIC_STATUS_NUM];
+ int prev_cable_type;
+ int prev_chg_type;
+ int prev_gnd_type;
+
+ bool irq_adc;
+ bool irq_chg;
+};
+
+enum max77843_muic_cable_group {
+ MAX77843_CABLE_GROUP_ADC = 0,
+ MAX77843_CABLE_GROUP_ADC_GND,
+ MAX77843_CABLE_GROUP_CHG,
+};
+
+enum max77843_muic_adc_debounce_time {
+ MAX77843_DEBOUNCE_TIME_5MS = 0,
+ MAX77843_DEBOUNCE_TIME_10MS,
+ MAX77843_DEBOUNCE_TIME_25MS,
+ MAX77843_DEBOUNCE_TIME_38_62MS,
+};
+
+/* Define accessory cable type */
+enum max77843_muic_accessory_type {
+ MAX77843_MUIC_ADC_GROUND = 0,
+ MAX77843_MUIC_ADC_SEND_END_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S1_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S2_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S3_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S4_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S5_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S6_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S7_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S8_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S9_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S10_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S11_BUTTON,
+ MAX77843_MUIC_ADC_REMOTE_S12_BUTTON,
+ MAX77843_MUIC_ADC_RESERVED_ACC_1,
+ MAX77843_MUIC_ADC_RESERVED_ACC_2,
+ MAX77843_MUIC_ADC_RESERVED_ACC_3,
+ MAX77843_MUIC_ADC_RESERVED_ACC_4,
+ MAX77843_MUIC_ADC_RESERVED_ACC_5,
+ MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2,
+ MAX77843_MUIC_ADC_PHONE_POWERED_DEV,
+ MAX77843_MUIC_ADC_TTY_CONVERTER,
+ MAX77843_MUIC_ADC_UART_CABLE,
+ MAX77843_MUIC_ADC_CEA936A_TYPE1_CHG,
+ MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF,
+ MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON,
+ MAX77843_MUIC_ADC_AV_CABLE_NOLOAD,
+ MAX77843_MUIC_ADC_CEA936A_TYPE2_CHG,
+ MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF,
+ MAX77843_MUIC_ADC_FACTORY_MODE_UART_ON,
+ MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1,
+ MAX77843_MUIC_ADC_OPEN,
+
+ /* The blow accessories should check
+ not only ADC value but also ADC1K and VBVolt value. */
+ /* Offset|ADC1K|VBVolt| */
+ MAX77843_MUIC_GND_USB_HOST = 0x100, /* 0x1| 0| 0| */
+ MAX77843_MUIC_GND_USB_HOST_VB = 0x101, /* 0x1| 0| 1| */
+ MAX77843_MUIC_GND_MHL = 0x102, /* 0x1| 1| 0| */
+ MAX77843_MUIC_GND_MHL_VB = 0x103, /* 0x1| 1| 1| */
+};
+
+/* Define charger cable type */
+enum max77843_muic_charger_type {
+ MAX77843_MUIC_CHG_NONE = 0,
+ MAX77843_MUIC_CHG_USB,
+ MAX77843_MUIC_CHG_DOWNSTREAM,
+ MAX77843_MUIC_CHG_DEDICATED,
+ MAX77843_MUIC_CHG_SPECIAL_500MA,
+ MAX77843_MUIC_CHG_SPECIAL_1A,
+ MAX77843_MUIC_CHG_SPECIAL_BIAS,
+ MAX77843_MUIC_CHG_RESERVED,
+ MAX77843_MUIC_CHG_GND,
+};
+
+enum {
+ MAX77843_CABLE_USB = 0,
+ MAX77843_CABLE_USB_HOST,
+ MAX77843_CABLE_TA,
+ MAX77843_CABLE_CHARGE_DOWNSTREAM,
+ MAX77843_CABLE_FAST_CHARGER,
+ MAX77843_CABLE_SLOW_CHARGER,
+ MAX77843_CABLE_MHL,
+ MAX77843_CABLE_MHL_TA,
+ MAX77843_CABLE_JIG_USB_ON,
+ MAX77843_CABLE_JIG_USB_OFF,
+ MAX77843_CABLE_JIG_UART_ON,
+ MAX77843_CABLE_JIG_UART_OFF,
+
+ MAX77843_CABLE_NUM,
+};
+
+static const char *max77843_extcon_cable[] = {
+ [MAX77843_CABLE_USB] = "USB",
+ [MAX77843_CABLE_USB_HOST] = "USB-HOST",
+ [MAX77843_CABLE_TA] = "TA",
+ [MAX77843_CABLE_CHARGE_DOWNSTREAM] = "CHARGER-DOWNSTREAM",
+ [MAX77843_CABLE_FAST_CHARGER] = "FAST-CHARGER",
+ [MAX77843_CABLE_SLOW_CHARGER] = "SLOW-CHARGER",
+ [MAX77843_CABLE_MHL] = "MHL",
+ [MAX77843_CABLE_MHL_TA] = "MHL-TA",
+ [MAX77843_CABLE_JIG_USB_ON] = "JIG-USB-ON",
+ [MAX77843_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
+ [MAX77843_CABLE_JIG_UART_ON] = "JIG-UART-ON",
+ [MAX77843_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
+};
+
+struct max77843_muic_irq {
+ unsigned int irq;
+ const char *name;
+ unsigned int virq;
+};
+
+static struct max77843_muic_irq max77843_muic_irqs[] = {
+ { MAX77843_MUIC_IRQ_INT1_ADC, "MUIC-ADC" },
+ { MAX77843_MUIC_IRQ_INT1_ADCERROR, "MUIC-ADC_ERROR" },
+ { MAX77843_MUIC_IRQ_INT1_ADC1K, "MUIC-ADC1K" },
+ { MAX77843_MUIC_IRQ_INT2_CHGTYP, "MUIC-CHGTYP" },
+ { MAX77843_MUIC_IRQ_INT2_CHGDETRUN, "MUIC-CHGDETRUN" },
+ { MAX77843_MUIC_IRQ_INT2_DCDTMR, "MUIC-DCDTMR" },
+ { MAX77843_MUIC_IRQ_INT2_DXOVP, "MUIC-DXOVP" },
+ { MAX77843_MUIC_IRQ_INT2_VBVOLT, "MUIC-VBVOLT" },
+ { MAX77843_MUIC_IRQ_INT3_VBADC, "MUIC-VBADC" },
+ { MAX77843_MUIC_IRQ_INT3_VDNMON, "MUIC-VDNMON" },
+ { MAX77843_MUIC_IRQ_INT3_DNRES, "MUIC-DNRES" },
+ { MAX77843_MUIC_IRQ_INT3_MPNACK, "MUIC-MPNACK"},
+ { MAX77843_MUIC_IRQ_INT3_MRXBUFOW, "MUIC-MRXBUFOW"},
+ { MAX77843_MUIC_IRQ_INT3_MRXTRF, "MUIC-MRXTRF"},
+ { MAX77843_MUIC_IRQ_INT3_MRXPERR, "MUIC-MRXPERR"},
+ { MAX77843_MUIC_IRQ_INT3_MRXRDY, "MUIC-MRXRDY"},
+};
+
+static const struct regmap_config max77843_muic_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77843_MUIC_REG_END,
+};
+
+static const struct regmap_irq max77843_muic_irq[] = {
+ /* INT1 interrupt */
+ { .reg_offset = 0, .mask = MAX77843_MUIC_ADC, },
+ { .reg_offset = 0, .mask = MAX77843_MUIC_ADCERROR, },
+ { .reg_offset = 0, .mask = MAX77843_MUIC_ADC1K, },
+
+ /* INT2 interrupt */
+ { .reg_offset = 1, .mask = MAX77843_MUIC_CHGTYP, },
+ { .reg_offset = 1, .mask = MAX77843_MUIC_CHGDETRUN, },
+ { .reg_offset = 1, .mask = MAX77843_MUIC_DCDTMR, },
+ { .reg_offset = 1, .mask = MAX77843_MUIC_DXOVP, },
+ { .reg_offset = 1, .mask = MAX77843_MUIC_VBVOLT, },
+
+ /* INT3 interrupt */
+ { .reg_offset = 2, .mask = MAX77843_MUIC_VBADC, },
+ { .reg_offset = 2, .mask = MAX77843_MUIC_VDNMON, },
+ { .reg_offset = 2, .mask = MAX77843_MUIC_DNRES, },
+ { .reg_offset = 2, .mask = MAX77843_MUIC_MPNACK, },
+ { .reg_offset = 2, .mask = MAX77843_MUIC_MRXBUFOW, },
+ { .reg_offset = 2, .mask = MAX77843_MUIC_MRXTRF, },
+ { .reg_offset = 2, .mask = MAX77843_MUIC_MRXPERR, },
+ { .reg_offset = 2, .mask = MAX77843_MUIC_MRXRDY, },
+};
+
+static const struct regmap_irq_chip max77843_muic_irq_chip = {
+ .name = "max77843-muic",
+ .status_base = MAX77843_MUIC_REG_INT1,
+ .mask_base = MAX77843_MUIC_REG_INTMASK1,
+ .mask_invert = true,
+ .num_regs = 3,
+ .irqs = max77843_muic_irq,
+ .num_irqs = ARRAY_SIZE(max77843_muic_irq),
+};
+
+static int max77843_muic_set_path(struct max77843_muic_info *info,
+ u8 val, bool attached)
+{
+ struct max77843 *max77843 = info->max77843;
+ int ret = 0;
+ unsigned int ctrl1, ctrl2;
+
+ if (attached)
+ ctrl1 = val;
+ else
+ ctrl1 = CONTROL1_SW_OPEN;
+
+ ret = regmap_update_bits(max77843->regmap_muic,
+ MAX77843_MUIC_REG_CONTROL1,
+ CONTROL1_COM_SW, ctrl1);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot switch MUIC port\n");
+ return ret;
+ }
+
+ if (attached)
+ ctrl2 = MAX77843_MUIC_CONTROL2_CPEN_MASK;
+ else
+ ctrl2 = MAX77843_MUIC_CONTROL2_LOWPWR_MASK;
+
+ ret = regmap_update_bits(max77843->regmap_muic,
+ MAX77843_MUIC_REG_CONTROL2,
+ MAX77843_MUIC_CONTROL2_LOWPWR_MASK |
+ MAX77843_MUIC_CONTROL2_CPEN_MASK, ctrl2);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot update lowpower mode\n");
+ return ret;
+ }
+
+ dev_dbg(info->dev,
+ "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
+ ctrl1, ctrl2, attached ? "attached" : "detached");
+
+ return 0;
+}
+
+static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
+ enum max77843_muic_cable_group group, bool *attached)
+{
+ int adc, chg_type, cable_type, gnd_type;
+
+ adc = info->status[MAX77843_MUIC_STATUS1] &
+ MAX77843_MUIC_STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ switch (group) {
+ case MAX77843_CABLE_GROUP_ADC:
+ if (adc == MAX77843_MUIC_ADC_OPEN) {
+ *attached = false;
+ cable_type = info->prev_cable_type;
+ info->prev_cable_type = MAX77843_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+ cable_type = info->prev_cable_type = adc;
+ }
+ break;
+ case MAX77843_CABLE_GROUP_CHG:
+ chg_type = info->status[MAX77843_MUIC_STATUS2] &
+ MAX77843_MUIC_STATUS2_CHGTYP_MASK;
+
+ /* Check GROUND accessory with charger cable */
+ if (adc == MAX77843_MUIC_ADC_GROUND) {
+ if (chg_type == MAX77843_MUIC_CHG_NONE) {
+ /* The following state when charger cable is
+ * disconnected but the GROUND accessory still
+ * connected */
+ *attached = false;
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
+ } else {
+
+ /* The following state when charger cable is
+ * connected on the GROUND accessory */
+ *attached = true;
+ cable_type = MAX77843_MUIC_CHG_GND;
+ info->prev_chg_type = MAX77843_MUIC_CHG_GND;
+ }
+ break;
+ }
+
+ if (chg_type == MAX77843_MUIC_CHG_NONE) {
+ *attached = false;
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
+ } else {
+ *attached = true;
+ cable_type = info->prev_chg_type = chg_type;
+ }
+ break;
+ case MAX77843_CABLE_GROUP_ADC_GND:
+ if (adc == MAX77843_MUIC_ADC_OPEN) {
+ *attached = false;
+ cable_type = info->prev_gnd_type;
+ info->prev_gnd_type = MAX77843_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ /* Offset|ADC1K|VBVolt|
+ * 0x1| 0| 0| USB-HOST
+ * 0x1| 0| 1| USB-HOST with VB
+ * 0x1| 1| 0| MHL
+ * 0x1| 1| 1| MHL with VB */
+ /* Get ADC1K register bit */
+ gnd_type = (info->status[MAX77843_MUIC_STATUS1] &
+ MAX77843_MUIC_STATUS1_ADC1K_MASK);
+
+ /* Get VBVolt register bit */
+ gnd_type |= (info->status[MAX77843_MUIC_STATUS2] &
+ MAX77843_MUIC_STATUS2_VBVOLT_MASK);
+ gnd_type >>= STATUS2_VBVOLT_SHIFT;
+
+ /* Offset of GND cable */
+ gnd_type |= MAX77843_MUIC_GND_USB_HOST;
+ cable_type = info->prev_gnd_type = gnd_type;
+ }
+ break;
+ default:
+ dev_err(info->dev, "Unknown cable group (%d)\n", group);
+ cable_type = -EINVAL;
+ break;
+ }
+
+ return cable_type;
+}
+
+static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
+{
+ int ret, gnd_cable_type;
+ bool attached;
+
+ gnd_cable_type = max77843_muic_get_cable_type(info,
+ MAX77843_CABLE_GROUP_ADC_GND, &attached);
+ dev_dbg(info->dev, "external connector is %s (gnd:0x%02x)\n",
+ attached ? "attached" : "detached", gnd_cable_type);
+
+ switch (gnd_cable_type) {
+ case MAX77843_MUIC_GND_USB_HOST:
+ case MAX77843_MUIC_GND_USB_HOST_VB:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "USB-HOST", attached);
+ break;
+ case MAX77843_MUIC_GND_MHL_VB:
+ case MAX77843_MUIC_GND_MHL:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "MHL", attached);
+ break;
+ default:
+ dev_err(info->dev, "failed to detect %s accessory(gnd:0x%x)\n",
+ attached ? "attached" : "detached", gnd_cable_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max77843_muic_jig_handler(struct max77843_muic_info *info,
+ int cable_type, bool attached)
+{
+ int ret;
+
+ dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n",
+ attached ? "attached" : "detached", cable_type);
+
+ switch (cable_type) {
+ case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
+ if (ret < 0)
+ return ret;
+ extcon_set_cable_state(info->edev, "JIG-USB-OFF", attached);
+ break;
+ case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
+ if (ret < 0)
+ return ret;
+ extcon_set_cable_state(info->edev, "JIG-USB-ON", attached);
+ break;
+ case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_UART, attached);
+ if (ret < 0)
+ return ret;
+ extcon_set_cable_state(info->edev, "JIG-UART-OFF", attached);
+ break;
+ default:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ }
+
+ return 0;
+}
+
+static int max77843_muic_adc_handler(struct max77843_muic_info *info)
+{
+ int ret, cable_type;
+ bool attached;
+
+ cable_type = max77843_muic_get_cable_type(info,
+ MAX77843_CABLE_GROUP_ADC, &attached);
+
+ dev_dbg(info->dev,
+ "external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
+ attached ? "attached" : "detached", cable_type,
+ info->prev_cable_type);
+
+ switch (cable_type) {
+ case MAX77843_MUIC_ADC_GROUND:
+ ret = max77843_muic_adc_gnd_handler(info);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
+ case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
+ case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
+ ret = max77843_muic_jig_handler(info, cable_type, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX77843_MUIC_ADC_SEND_END_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S1_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S2_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S3_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S4_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S5_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S6_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S7_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S8_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S9_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S10_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S11_BUTTON:
+ case MAX77843_MUIC_ADC_REMOTE_S12_BUTTON:
+ case MAX77843_MUIC_ADC_RESERVED_ACC_1:
+ case MAX77843_MUIC_ADC_RESERVED_ACC_2:
+ case MAX77843_MUIC_ADC_RESERVED_ACC_3:
+ case MAX77843_MUIC_ADC_RESERVED_ACC_4:
+ case MAX77843_MUIC_ADC_RESERVED_ACC_5:
+ case MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2:
+ case MAX77843_MUIC_ADC_PHONE_POWERED_DEV:
+ case MAX77843_MUIC_ADC_TTY_CONVERTER:
+ case MAX77843_MUIC_ADC_UART_CABLE:
+ case MAX77843_MUIC_ADC_CEA936A_TYPE1_CHG:
+ case MAX77843_MUIC_ADC_AV_CABLE_NOLOAD:
+ case MAX77843_MUIC_ADC_CEA936A_TYPE2_CHG:
+ case MAX77843_MUIC_ADC_FACTORY_MODE_UART_ON:
+ case MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1:
+ case MAX77843_MUIC_ADC_OPEN:
+ dev_err(info->dev,
+ "accessory is %s but it isn't used (adc:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
+ return -EAGAIN;
+ default:
+ dev_err(info->dev,
+ "failed to detect %s accessory (adc:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max77843_muic_chg_handler(struct max77843_muic_info *info)
+{
+ int ret, chg_type, gnd_type;
+ bool attached;
+
+ chg_type = max77843_muic_get_cable_type(info,
+ MAX77843_CABLE_GROUP_CHG, &attached);
+
+ dev_dbg(info->dev,
+ "external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
+ attached ? "attached" : "detached",
+ chg_type, info->prev_chg_type);
+
+ switch (chg_type) {
+ case MAX77843_MUIC_CHG_USB:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "USB", attached);
+ break;
+ case MAX77843_MUIC_CHG_DOWNSTREAM:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev,
+ "CHARGER-DOWNSTREAM", attached);
+ break;
+ case MAX77843_MUIC_CHG_DEDICATED:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "TA", attached);
+ break;
+ case MAX77843_MUIC_CHG_SPECIAL_500MA:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "SLOW-CHAREGER", attached);
+ break;
+ case MAX77843_MUIC_CHG_SPECIAL_1A:
+ ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "FAST-CHARGER", attached);
+ break;
+ case MAX77843_MUIC_CHG_GND:
+ gnd_type = max77843_muic_get_cable_type(info,
+ MAX77843_CABLE_GROUP_ADC_GND, &attached);
+
+ /* Charger cable on MHL accessory is attach or detach */
+ if (gnd_type == MAX77843_MUIC_GND_MHL_VB)
+ extcon_set_cable_state(info->edev, "MHL-TA", true);
+ else if (gnd_type == MAX77843_MUIC_GND_MHL)
+ extcon_set_cable_state(info->edev, "MHL-TA", false);
+ break;
+ case MAX77843_MUIC_CHG_NONE:
+ break;
+ default:
+ dev_err(info->dev,
+ "failed to detect %s accessory (chg_type:0x%x)\n",
+ attached ? "attached" : "detached", chg_type);
+
+ max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void max77843_muic_irq_work(struct work_struct *work)
+{
+ struct max77843_muic_info *info = container_of(work,
+ struct max77843_muic_info, irq_work);
+ struct max77843 *max77843 = info->max77843;
+ int ret = 0;
+
+ mutex_lock(&info->mutex);
+
+ ret = regmap_bulk_read(max77843->regmap_muic,
+ MAX77843_MUIC_REG_STATUS1, info->status,
+ MAX77843_MUIC_STATUS_NUM);
+ if (ret) {
+ dev_err(info->dev, "Cannot read STATUS registers\n");
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ if (info->irq_adc) {
+ ret = max77843_muic_adc_handler(info);
+ if (ret)
+ dev_err(info->dev, "Unknown cable type\n");
+ info->irq_adc = false;
+ }
+
+ if (info->irq_chg) {
+ ret = max77843_muic_chg_handler(info);
+ if (ret)
+ dev_err(info->dev, "Unknown charger type\n");
+ info->irq_chg = false;
+ }
+
+ mutex_unlock(&info->mutex);
+}
+
+static irqreturn_t max77843_muic_irq_handler(int irq, void *data)
+{
+ struct max77843_muic_info *info = data;
+ int i, irq_type = -1;
+
+ for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++)
+ if (irq == max77843_muic_irqs[i].virq)
+ irq_type = max77843_muic_irqs[i].irq;
+
+ switch (irq_type) {
+ case MAX77843_MUIC_IRQ_INT1_ADC:
+ case MAX77843_MUIC_IRQ_INT1_ADCERROR:
+ case MAX77843_MUIC_IRQ_INT1_ADC1K:
+ info->irq_adc = true;
+ break;
+ case MAX77843_MUIC_IRQ_INT2_CHGTYP:
+ case MAX77843_MUIC_IRQ_INT2_CHGDETRUN:
+ case MAX77843_MUIC_IRQ_INT2_DCDTMR:
+ case MAX77843_MUIC_IRQ_INT2_DXOVP:
+ case MAX77843_MUIC_IRQ_INT2_VBVOLT:
+ info->irq_chg = true;
+ break;
+ case MAX77843_MUIC_IRQ_INT3_VBADC:
+ case MAX77843_MUIC_IRQ_INT3_VDNMON:
+ case MAX77843_MUIC_IRQ_INT3_DNRES:
+ case MAX77843_MUIC_IRQ_INT3_MPNACK:
+ case MAX77843_MUIC_IRQ_INT3_MRXBUFOW:
+ case MAX77843_MUIC_IRQ_INT3_MRXTRF:
+ case MAX77843_MUIC_IRQ_INT3_MRXPERR:
+ case MAX77843_MUIC_IRQ_INT3_MRXRDY:
+ break;
+ default:
+ dev_err(info->dev, "Cannot recognize IRQ(%d)\n", irq_type);
+ break;
+ }
+
+ schedule_work(&info->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void max77843_muic_detect_cable_wq(struct work_struct *work)
+{
+ struct max77843_muic_info *info = container_of(to_delayed_work(work),
+ struct max77843_muic_info, wq_detcable);
+ struct max77843 *max77843 = info->max77843;
+ int chg_type, adc, ret;
+ bool attached;
+
+ mutex_lock(&info->mutex);
+
+ ret = regmap_bulk_read(max77843->regmap_muic,
+ MAX77843_MUIC_REG_STATUS1, info->status,
+ MAX77843_MUIC_STATUS_NUM);
+ if (ret) {
+ dev_err(info->dev, "Cannot read STATUS registers\n");
+ goto err_cable_wq;
+ }
+
+ adc = max77843_muic_get_cable_type(info,
+ MAX77843_CABLE_GROUP_ADC, &attached);
+ if (attached && adc != MAX77843_MUIC_ADC_OPEN) {
+ ret = max77843_muic_adc_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect accessory\n");
+ goto err_cable_wq;
+ }
+ }
+
+ chg_type = max77843_muic_get_cable_type(info,
+ MAX77843_CABLE_GROUP_CHG, &attached);
+ if (attached && chg_type != MAX77843_MUIC_CHG_NONE) {
+ ret = max77843_muic_chg_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect charger accessory\n");
+ goto err_cable_wq;
+ }
+ }
+
+err_cable_wq:
+ mutex_unlock(&info->mutex);
+}
+
+static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
+ enum max77843_muic_adc_debounce_time time)
+{
+ struct max77843 *max77843 = info->max77843;
+ int ret;
+
+ switch (time) {
+ case MAX77843_DEBOUNCE_TIME_5MS:
+ case MAX77843_DEBOUNCE_TIME_10MS:
+ case MAX77843_DEBOUNCE_TIME_25MS:
+ case MAX77843_DEBOUNCE_TIME_38_62MS:
+ ret = regmap_update_bits(max77843->regmap_muic,
+ MAX77843_MUIC_REG_CONTROL4,
+ MAX77843_MUIC_CONTROL4_ADCDBSET_MASK,
+ time << CONTROL4_ADCDBSET_SHIFT);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot write MUIC regmap\n");
+ return ret;
+ }
+ break;
+ default:
+ dev_err(info->dev, "Invalid ADC debounce time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max77843_init_muic_regmap(struct max77843 *max77843)
+{
+ int ret;
+
+ max77843->i2c_muic = i2c_new_dummy(max77843->i2c->adapter,
+ I2C_ADDR_MUIC);
+ if (!max77843->i2c_muic) {
+ dev_err(&max77843->i2c->dev,
+ "Cannot allocate I2C device for MUIC\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(max77843->i2c_muic, max77843);
+
+ max77843->regmap_muic = devm_regmap_init_i2c(max77843->i2c_muic,
+ &max77843_muic_regmap_config);
+ if (IS_ERR(max77843->regmap_muic)) {
+ ret = PTR_ERR(max77843->regmap_muic);
+ goto err_muic_i2c;
+ }
+
+ ret = regmap_add_irq_chip(max77843->regmap_muic, max77843->irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
+ 0, &max77843_muic_irq_chip, &max77843->irq_data_muic);
+ if (ret < 0) {
+ dev_err(&max77843->i2c->dev, "Cannot add MUIC IRQ chip\n");
+ goto err_muic_i2c;
+ }
+
+ return 0;
+
+err_muic_i2c:
+ i2c_unregister_device(max77843->i2c_muic);
+
+ return ret;
+}
+
+static int max77843_muic_probe(struct platform_device *pdev)
+{
+ struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
+ struct max77843_muic_info *info;
+ unsigned int id;
+ int i, ret;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+ info->max77843 = max77843;
+
+ platform_set_drvdata(pdev, info);
+ mutex_init(&info->mutex);
+
+ /* Initialize i2c and regmap */
+ ret = max77843_init_muic_regmap(max77843);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init MUIC regmap\n");
+ return ret;
+ }
+
+ /* Turn off auto detection configuration */
+ ret = regmap_update_bits(max77843->regmap_muic,
+ MAX77843_MUIC_REG_CONTROL4,
+ MAX77843_MUIC_CONTROL4_USBAUTO_MASK |
+ MAX77843_MUIC_CONTROL4_FCTAUTO_MASK,
+ CONTROL4_AUTO_DISABLE);
+
+ /* Initialize extcon device */
+ info->edev = devm_extcon_dev_allocate(&pdev->dev,
+ max77843_extcon_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(&pdev->dev, "Failed to allocate memory for extcon\n");
+ ret = -ENODEV;
+ goto err_muic_irq;
+ }
+
+ ret = devm_extcon_dev_register(&pdev->dev, info->edev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register extcon device\n");
+ goto err_muic_irq;
+ }
+
+ /* Set ADC debounce time */
+ max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS);
+
+ /* Set initial path for UART */
+ max77843_muic_set_path(info, CONTROL1_SW_UART, true);
+
+ /* Check revision number of MUIC device */
+ ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read revision number\n");
+ goto err_muic_irq;
+ }
+ dev_info(info->dev, "MUIC device ID : 0x%x\n", id);
+
+ /* Support virtual irq domain for max77843 MUIC device */
+ INIT_WORK(&info->irq_work, max77843_muic_irq_work);
+
+ for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) {
+ struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i];
+ unsigned int virq = 0;
+
+ virq = regmap_irq_get_virq(max77843->irq_data_muic,
+ muic_irq->irq);
+ if (virq <= 0) {
+ ret = -EINVAL;
+ goto err_muic_irq;
+ }
+ muic_irq->virq = virq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
+ max77843_muic_irq_handler, IRQF_NO_SUSPEND,
+ muic_irq->name, info);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to request irq (IRQ: %d, error: %d)\n",
+ muic_irq->irq, ret);
+ goto err_muic_irq;
+ }
+ }
+
+ /* Detect accessory after completing the initialization of platform */
+ INIT_DELAYED_WORK(&info->wq_detcable, max77843_muic_detect_cable_wq);
+ queue_delayed_work(system_power_efficient_wq,
+ &info->wq_detcable, msecs_to_jiffies(DELAY_MS_DEFAULT));
+
+ return 0;
+
+err_muic_irq:
+ regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic);
+ i2c_unregister_device(max77843->i2c_muic);
+
+ return ret;
+}
+
+static int max77843_muic_remove(struct platform_device *pdev)
+{
+ struct max77843_muic_info *info = platform_get_drvdata(pdev);
+ struct max77843 *max77843 = info->max77843;
+
+ cancel_work_sync(&info->irq_work);
+ regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic);
+ i2c_unregister_device(max77843->i2c_muic);
+
+ return 0;
+}
+
+static const struct platform_device_id max77843_muic_id[] = {
+ { "max77843-muic", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, max77843_muic_id);
+
+static struct platform_driver max77843_muic_driver = {
+ .driver = {
+ .name = "max77843-muic",
+ },
+ .probe = max77843_muic_probe,
+ .remove = max77843_muic_remove,
+ .id_table = max77843_muic_id,
+};
+
+static int __init max77843_muic_init(void)
+{
+ return platform_driver_register(&max77843_muic_driver);
+}
+subsys_initcall(max77843_muic_init);
+
+MODULE_DESCRIPTION("Maxim MAX77843 Extcon driver");
+MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index fc1678fa95c4..5774e56c6422 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -579,8 +579,6 @@ static void max8997_muic_irq_work(struct work_struct *work)
dev_err(info->dev, "failed to handle MUIC interrupt\n");
mutex_unlock(&info->mutex);
-
- return;
}
static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
@@ -689,8 +687,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
- "failed: irq request (IRQ: %d,"
- " error :%d)\n",
+ "failed: irq request (IRQ: %d, error :%d)\n",
muic_irq->irq, ret);
goto err_irq;
}
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index a784b2d5ee72..9ccd5af89d1c 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -582,10 +582,8 @@ static int rt8973a_muic_i2c_probe(struct i2c_client *i2c,
return -EINVAL;
info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
- if (!info) {
- dev_err(&i2c->dev, "failed to allocate memory\n");
+ if (!info)
return -ENOMEM;
- }
i2c_set_clientdata(i2c, info);
info->dev = &i2c->dev;
@@ -681,7 +679,7 @@ static int rt8973a_muic_i2c_remove(struct i2c_client *i2c)
return 0;
}
-static struct of_device_id rt8973a_dt_match[] = {
+static const struct of_device_id rt8973a_dt_match[] = {
{ .compatible = "richtek,rt8973a-muic" },
{ },
};
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index b0f7bd82af90..2f93cf307852 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -359,8 +359,8 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
break;
default:
dev_dbg(info->dev,
- "cannot identify the cable type: adc(0x%x) "
- "dev_type1(0x%x)\n", adc, dev_type1);
+ "cannot identify the cable type: adc(0x%x)\n",
+ adc);
return -EINVAL;
};
break;
@@ -659,7 +659,7 @@ static int sm5502_muic_i2c_remove(struct i2c_client *i2c)
return 0;
}
-static struct of_device_id sm5502_dt_match[] = {
+static const struct of_device_id sm5502_dt_match[] = {
{ .compatible = "siliconmitus,sm5502-muic" },
{ },
};
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
new file mode 100644
index 000000000000..e45d1f13f445
--- /dev/null
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -0,0 +1,237 @@
+/**
+ * drivers/extcon/extcon-usb-gpio.c - USB GPIO extcon driver
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Roger Quadros <rogerq@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
+
+struct usb_extcon_info {
+ struct device *dev;
+ struct extcon_dev *edev;
+
+ struct gpio_desc *id_gpiod;
+ int id_irq;
+
+ unsigned long debounce_jiffies;
+ struct delayed_work wq_detcable;
+};
+
+/* List of detectable cables */
+enum {
+ EXTCON_CABLE_USB = 0,
+ EXTCON_CABLE_USB_HOST,
+
+ EXTCON_CABLE_END,
+};
+
+static const char *usb_extcon_cable[] = {
+ [EXTCON_CABLE_USB] = "USB",
+ [EXTCON_CABLE_USB_HOST] = "USB-HOST",
+ NULL,
+};
+
+static void usb_extcon_detect_cable(struct work_struct *work)
+{
+ int id;
+ struct usb_extcon_info *info = container_of(to_delayed_work(work),
+ struct usb_extcon_info,
+ wq_detcable);
+
+ /* check ID and update cable state */
+ id = gpiod_get_value_cansleep(info->id_gpiod);
+ if (id) {
+ /*
+ * ID = 1 means USB HOST cable detached.
+ * As we don't have event for USB peripheral cable attached,
+ * we simulate USB peripheral attach here.
+ */
+ extcon_set_cable_state(info->edev,
+ usb_extcon_cable[EXTCON_CABLE_USB_HOST],
+ false);
+ extcon_set_cable_state(info->edev,
+ usb_extcon_cable[EXTCON_CABLE_USB],
+ true);
+ } else {
+ /*
+ * ID = 0 means USB HOST cable attached.
+ * As we don't have event for USB peripheral cable detached,
+ * we simulate USB peripheral detach here.
+ */
+ extcon_set_cable_state(info->edev,
+ usb_extcon_cable[EXTCON_CABLE_USB],
+ false);
+ extcon_set_cable_state(info->edev,
+ usb_extcon_cable[EXTCON_CABLE_USB_HOST],
+ true);
+ }
+}
+
+static irqreturn_t usb_irq_handler(int irq, void *dev_id)
+{
+ struct usb_extcon_info *info = dev_id;
+
+ queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
+ info->debounce_jiffies);
+
+ return IRQ_HANDLED;
+}
+
+static int usb_extcon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct usb_extcon_info *info;
+ int ret;
+
+ if (!np)
+ return -EINVAL;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->id_gpiod = devm_gpiod_get(&pdev->dev, "id");
+ if (IS_ERR(info->id_gpiod)) {
+ dev_err(dev, "failed to get ID GPIO\n");
+ return PTR_ERR(info->id_gpiod);
+ }
+
+ info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_extcon_dev_register(dev, info->edev);
+ if (ret < 0) {
+ dev_err(dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ ret = gpiod_set_debounce(info->id_gpiod,
+ USB_GPIO_DEBOUNCE_MS * 1000);
+ if (ret < 0)
+ info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS);
+
+ INIT_DELAYED_WORK(&info->wq_detcable, usb_extcon_detect_cable);
+
+ info->id_irq = gpiod_to_irq(info->id_gpiod);
+ if (info->id_irq < 0) {
+ dev_err(dev, "failed to get ID IRQ\n");
+ return info->id_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+ usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for ID IRQ\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, info);
+ device_init_wakeup(dev, 1);
+
+ /* Perform initial detection */
+ usb_extcon_detect_cable(&info->wq_detcable.work);
+
+ return 0;
+}
+
+static int usb_extcon_remove(struct platform_device *pdev)
+{
+ struct usb_extcon_info *info = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&info->wq_detcable);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int usb_extcon_suspend(struct device *dev)
+{
+ struct usb_extcon_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (device_may_wakeup(dev)) {
+ ret = enable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * We don't want to process any IRQs after this point
+ * as GPIOs used behind I2C subsystem might not be
+ * accessible until resume completes. So disable IRQ.
+ */
+ disable_irq(info->id_irq);
+
+ return ret;
+}
+
+static int usb_extcon_resume(struct device *dev)
+{
+ struct usb_extcon_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (device_may_wakeup(dev)) {
+ ret = disable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+
+ enable_irq(info->id_irq);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(usb_extcon_pm_ops,
+ usb_extcon_suspend, usb_extcon_resume);
+
+static const struct of_device_id usb_extcon_dt_match[] = {
+ { .compatible = "linux,extcon-usb-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, usb_extcon_dt_match);
+
+static struct platform_driver usb_extcon_driver = {
+ .probe = usb_extcon_probe,
+ .remove = usb_extcon_remove,
+ .driver = {
+ .name = "extcon-usb-gpio",
+ .pm = &usb_extcon_pm_ops,
+ .of_match_table = usb_extcon_dt_match,
+ },
+};
+
+module_platform_driver(usb_extcon_driver);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_DESCRIPTION("USB GPIO extcon driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon.c
index 8319f25b7145..4c9f165e4a04 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon.c
@@ -158,6 +158,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
/* Optional callback given by the user */
if (edev->print_name) {
int ret = edev->print_name(edev, buf);
+
if (ret >= 0)
return ret;
}
@@ -444,6 +445,9 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
const char *extcon_name, const char *cable_name,
struct notifier_block *nb)
{
+ unsigned long flags;
+ int ret;
+
if (!obj || !cable_name || !nb)
return -EINVAL;
@@ -461,8 +465,11 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
obj->internal_nb.notifier_call = _call_per_cable;
- return raw_notifier_chain_register(&obj->edev->nh,
+ spin_lock_irqsave(&obj->edev->lock, flags);
+ ret = raw_notifier_chain_register(&obj->edev->nh,
&obj->internal_nb);
+ spin_unlock_irqrestore(&obj->edev->lock, flags);
+ return ret;
} else {
struct class_dev_iter iter;
struct extcon_dev *extd;
@@ -495,10 +502,17 @@ EXPORT_SYMBOL_GPL(extcon_register_interest);
*/
int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
{
+ unsigned long flags;
+ int ret;
+
if (!obj)
return -EINVAL;
- return raw_notifier_chain_unregister(&obj->edev->nh, &obj->internal_nb);
+ spin_lock_irqsave(&obj->edev->lock, flags);
+ ret = raw_notifier_chain_unregister(&obj->edev->nh, &obj->internal_nb);
+ spin_unlock_irqrestore(&obj->edev->lock, flags);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(extcon_unregister_interest);
@@ -515,7 +529,14 @@ EXPORT_SYMBOL_GPL(extcon_unregister_interest);
int extcon_register_notifier(struct extcon_dev *edev,
struct notifier_block *nb)
{
- return raw_notifier_chain_register(&edev->nh, nb);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&edev->lock, flags);
+ ret = raw_notifier_chain_register(&edev->nh, nb);
+ spin_unlock_irqrestore(&edev->lock, flags);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(extcon_register_notifier);
@@ -527,7 +548,14 @@ EXPORT_SYMBOL_GPL(extcon_register_notifier);
int extcon_unregister_notifier(struct extcon_dev *edev,
struct notifier_block *nb)
{
- return raw_notifier_chain_unregister(&edev->nh, nb);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&edev->lock, flags);
+ ret = raw_notifier_chain_unregister(&edev->nh, nb);
+ spin_unlock_irqrestore(&edev->lock, flags);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 2c68da1ceeee..f4ea80d602f7 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -237,18 +237,6 @@ static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
return -net->hard_header_len;
}
-static int fwnet_header_rebuild(struct sk_buff *skb)
-{
- struct fwnet_header *h = (struct fwnet_header *)skb->data;
-
- if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
- return arp_find((unsigned char *)&h->h_dest, skb);
-
- dev_notice(&skb->dev->dev, "unable to resolve type %04x addresses\n",
- be16_to_cpu(h->h_proto));
- return 0;
-}
-
static int fwnet_header_cache(const struct neighbour *neigh,
struct hh_cache *hh, __be16 type)
{
@@ -282,7 +270,6 @@ static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
static const struct header_ops fwnet_header_ops = {
.create = fwnet_header_create,
- .rebuild = fwnet_header_rebuild,
.cache = fwnet_header_cache,
.cache_update = fwnet_header_cache_update,
.parse = fwnet_header_parse,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 41983883cef4..6517132e5d8b 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -132,6 +132,10 @@ config ISCSI_IBFT
detect iSCSI boot parameters dynamically during system boot, say Y.
Otherwise, say N.
+config QCOM_SCM
+ bool
+ depends on ARM || ARM64
+
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 5373dc5b6011..3fdd3912709a 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
+obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
+CFLAGS_qcom_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 6e45a43ffe84..97b1616aa391 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -499,19 +499,19 @@ static int __init dmi_present(const u8 *buf)
buf += 16;
if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
+ if (smbios_ver)
+ dmi_ver = smbios_ver;
+ else
+ dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
dmi_num = get_unaligned_le16(buf + 12);
dmi_len = get_unaligned_le16(buf + 6);
dmi_base = get_unaligned_le32(buf + 8);
if (dmi_walk_early(dmi_decode) == 0) {
if (smbios_ver) {
- dmi_ver = smbios_ver;
- pr_info("SMBIOS %d.%d%s present.\n",
- dmi_ver >> 8, dmi_ver & 0xFF,
- (dmi_ver < 0x0300) ? "" : ".x");
+ pr_info("SMBIOS %d.%d present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF);
} else {
- dmi_ver = (buf[14] & 0xF0) << 4 |
- (buf[14] & 0x0F);
pr_info("Legacy DMI %d.%d present.\n",
dmi_ver >> 8, dmi_ver & 0xFF);
}
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 87b8e3b900d2..5c55227a34c8 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -120,7 +120,8 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
kset_unregister(map_kset);
- return entry;
+ map_kset = NULL;
+ return ERR_PTR(-ENOMEM);
}
memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
@@ -132,6 +133,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
if (ret) {
kobject_put(&entry->kobj);
kset_unregister(map_kset);
+ map_kset = NULL;
return ERR_PTR(ret);
}
@@ -195,8 +197,6 @@ out_add_entry:
entry = *(map_entries + j);
kobject_put(&entry->kobj);
}
- if (map_kset)
- kset_unregister(map_kset);
out:
return ret;
}
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index a330492e06f9..75273a251603 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -15,7 +15,7 @@
#include <linux/console.h>
#include <linux/efi.h>
#include <linux/serial.h>
-#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
#include <asm/vga.h>
#include "pcdp.h"
@@ -43,7 +43,7 @@ setup_serial_console(struct pcdp_uart *uart)
}
add_preferred_console("uart", 8250, &options[9]);
- return setup_early_serial8250_console(options);
+ return setup_earlycon(options);
#else
return -ENODEV;
#endif
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
new file mode 100644
index 000000000000..994b50fd997c
--- /dev/null
+++ b/drivers/firmware/qcom_scm.c
@@ -0,0 +1,494 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/outercache.h>
+#include <asm/cacheflush.h>
+
+
+#define QCOM_SCM_ENOMEM -5
+#define QCOM_SCM_EOPNOTSUPP -4
+#define QCOM_SCM_EINVAL_ADDR -3
+#define QCOM_SCM_EINVAL_ARG -2
+#define QCOM_SCM_ERROR -1
+#define QCOM_SCM_INTERRUPTED 1
+
+#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
+#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
+#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
+#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
+
+#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
+#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
+#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
+#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
+
+struct qcom_scm_entry {
+ int flag;
+ void *entry;
+};
+
+static struct qcom_scm_entry qcom_scm_wb[] = {
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
+};
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+/**
+ * struct qcom_scm_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from qcom_scm_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ * ------------------- <--- struct qcom_scm_command
+ * | command header |
+ * ------------------- <--- qcom_scm_get_command_buffer()
+ * | command buffer |
+ * ------------------- <--- struct qcom_scm_response and
+ * | response header | qcom_scm_command_to_response()
+ * ------------------- <--- qcom_scm_get_response_buffer()
+ * | response buffer |
+ * -------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate qcom_scm_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct qcom_scm_command {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 resp_hdr_offset;
+ __le32 id;
+ __le32 buf[0];
+};
+
+/**
+ * struct qcom_scm_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of qcom_scm_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct qcom_scm_response {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 is_complete;
+};
+
+/**
+ * alloc_qcom_scm_command() - Allocate an SCM command
+ * @cmd_size: size of the command buffer
+ * @resp_size: size of the response buffer
+ *
+ * Allocate an SCM command, including enough room for the command
+ * and response headers as well as the command and response buffers.
+ *
+ * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails.
+ */
+static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size)
+{
+ struct qcom_scm_command *cmd;
+ size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size +
+ resp_size;
+ u32 offset;
+
+ cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
+ if (cmd) {
+ cmd->len = cpu_to_le32(len);
+ offset = offsetof(struct qcom_scm_command, buf);
+ cmd->buf_offset = cpu_to_le32(offset);
+ cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size);
+ }
+ return cmd;
+}
+
+/**
+ * free_qcom_scm_command() - Free an SCM command
+ * @cmd: command to free
+ *
+ * Free an SCM command.
+ */
+static inline void free_qcom_scm_command(struct qcom_scm_command *cmd)
+{
+ kfree(cmd);
+}
+
+/**
+ * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct qcom_scm_response *qcom_scm_command_to_response(
+ const struct qcom_scm_command *cmd)
+{
+ return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
+}
+
+/**
+ * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
+{
+ return (void *)cmd->buf;
+}
+
+/**
+ * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
+{
+ return (void *)rsp + le32_to_cpu(rsp->buf_offset);
+}
+
+static int qcom_scm_remap_error(int err)
+{
+ pr_err("qcom_scm_call failed with error code %d\n", err);
+ switch (err) {
+ case QCOM_SCM_ERROR:
+ return -EIO;
+ case QCOM_SCM_EINVAL_ADDR:
+ case QCOM_SCM_EINVAL_ARG:
+ return -EINVAL;
+ case QCOM_SCM_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case QCOM_SCM_ENOMEM:
+ return -ENOMEM;
+ }
+ return -EINVAL;
+}
+
+static u32 smc(u32 cmd_addr)
+{
+ int context_id;
+ register u32 r0 asm("r0") = 1;
+ register u32 r1 asm("r1") = (u32)&context_id;
+ register u32 r2 asm("r2") = cmd_addr;
+ do {
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r0")
+ __asmeq("%2", "r1")
+ __asmeq("%3", "r2")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0 @ switch to secure world\n"
+ : "=r" (r0)
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "r3");
+ } while (r0 == QCOM_SCM_INTERRUPTED);
+
+ return r0;
+}
+
+static int __qcom_scm_call(const struct qcom_scm_command *cmd)
+{
+ int ret;
+ u32 cmd_addr = virt_to_phys(cmd);
+
+ /*
+ * Flush the command buffer so that the secure world sees
+ * the correct data.
+ */
+ __cpuc_flush_dcache_area((void *)cmd, cmd->len);
+ outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+
+ ret = smc(cmd_addr);
+ if (ret < 0)
+ ret = qcom_scm_remap_error(ret);
+
+ return ret;
+}
+
+static void qcom_scm_inv_range(unsigned long start, unsigned long end)
+{
+ u32 cacheline_size, ctr;
+
+ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+ cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
+ start = round_down(start, cacheline_size);
+ end = round_up(end, cacheline_size);
+ outer_inv_range(start, end);
+ while (start < end) {
+ asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
+ : "memory");
+ start += cacheline_size;
+ }
+ dsb();
+ isb();
+}
+
+/**
+ * qcom_scm_call() - Send an SCM command
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @cmd_buf: command buffer
+ * @cmd_len: length of the command buffer
+ * @resp_buf: response buffer
+ * @resp_len: length of the response buffer
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. Cache maintenance on the command
+ * and response buffers is taken care of by qcom_scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+ size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+ int ret;
+ struct qcom_scm_command *cmd;
+ struct qcom_scm_response *rsp;
+ unsigned long start, end;
+
+ cmd = alloc_qcom_scm_command(cmd_len, resp_len);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
+ if (cmd_buf)
+ memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
+
+ mutex_lock(&qcom_scm_lock);
+ ret = __qcom_scm_call(cmd);
+ mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ goto out;
+
+ rsp = qcom_scm_command_to_response(cmd);
+ start = (unsigned long)rsp;
+
+ do {
+ qcom_scm_inv_range(start, start + sizeof(*rsp));
+ } while (!rsp->is_complete);
+
+ end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len;
+ qcom_scm_inv_range(start, end);
+
+ if (resp_buf)
+ memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len);
+out:
+ free_qcom_scm_command(cmd);
+ return ret;
+}
+
+#define SCM_CLASS_REGISTER (0x2 << 8)
+#define SCM_MASK_IRQS BIT(5)
+#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
+ SCM_CLASS_REGISTER | \
+ SCM_MASK_IRQS | \
+ (n & 0xf))
+
+/**
+ * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
+{
+ int context_id;
+
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
+ register u32 r1 asm("r1") = (u32)&context_id;
+ register u32 r2 asm("r2") = arg1;
+
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r0")
+ __asmeq("%2", "r1")
+ __asmeq("%3", "r2")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0 @ switch to secure world\n"
+ : "=r" (r0)
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "r3");
+ return r0;
+}
+
+u32 qcom_scm_get_version(void)
+{
+ int context_id;
+ static u32 version = -1;
+ register u32 r0 asm("r0");
+ register u32 r1 asm("r1");
+
+ if (version != -1)
+ return version;
+
+ mutex_lock(&qcom_scm_lock);
+
+ r0 = 0x1 << 8;
+ r1 = (u32)&context_id;
+ do {
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r0")
+ __asmeq("%3", "r1")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0 @ switch to secure world\n"
+ : "=r" (r0), "=r" (r1)
+ : "r" (r0), "r" (r1)
+ : "r2", "r3");
+ } while (r0 == QCOM_SCM_INTERRUPTED);
+
+ version = r1;
+ mutex_unlock(&qcom_scm_lock);
+
+ return version;
+}
+EXPORT_SYMBOL(qcom_scm_get_version);
+
+#define QCOM_SCM_SVC_BOOT 0x1
+#define QCOM_SCM_BOOT_ADDR 0x1
+/*
+ * Set the cold/warm boot address for one of the CPU cores.
+ */
+static int qcom_scm_set_boot_addr(u32 addr, int flags)
+{
+ struct {
+ __le32 flags;
+ __le32 addr;
+ } cmd;
+
+ cmd.addr = cpu_to_le32(addr);
+ cmd.flags = cpu_to_le32(flags);
+ return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
+ &cmd, sizeof(cmd), NULL, 0);
+}
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int flags = 0;
+ int cpu;
+ int scm_cb_flags[] = {
+ QCOM_SCM_FLAG_COLDBOOT_CPU0,
+ QCOM_SCM_FLAG_COLDBOOT_CPU1,
+ QCOM_SCM_FLAG_COLDBOOT_CPU2,
+ QCOM_SCM_FLAG_COLDBOOT_CPU3,
+ };
+
+ if (!cpus || (cpus && cpumask_empty(cpus)))
+ return -EINVAL;
+
+ for_each_cpu(cpu, cpus) {
+ if (cpu < ARRAY_SIZE(scm_cb_flags))
+ flags |= scm_cb_flags[cpu];
+ else
+ set_cpu_present(cpu, false);
+ }
+
+ return qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
+}
+EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int ret;
+ int flags = 0;
+ int cpu;
+
+ /*
+ * Reassign only if we are switching from hotplug entry point
+ * to cpuidle entry point or vice versa.
+ */
+ for_each_cpu(cpu, cpus) {
+ if (entry == qcom_scm_wb[cpu].entry)
+ continue;
+ flags |= qcom_scm_wb[cpu].flag;
+ }
+
+ /* No change in entry function */
+ if (!flags)
+ return 0;
+
+ ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
+ if (!ret) {
+ for_each_cpu(cpu, cpus)
+ qcom_scm_wb[cpu].entry = entry;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
+
+#define QCOM_SCM_CMD_TERMINATE_PC 0x2
+#define QCOM_SCM_FLUSH_FLAG_MASK 0x3
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void qcom_scm_cpu_power_down(u32 flags)
+{
+ qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
+ flags & QCOM_SCM_FLUSH_FLAG_MASK);
+}
+EXPORT_SYMBOL(qcom_scm_cpu_power_down);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c1e2ca3d9a51..caefe806db5e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -90,27 +90,11 @@ config GPIO_GENERIC
# put drivers in the right section, in alphabetical order
-config GPIO_DA9052
- tristate "Dialog DA9052 GPIO"
- depends on PMIC_DA9052
- help
- Say yes here to enable the GPIO driver for the DA9052 chip.
-
-config GPIO_DA9055
- tristate "Dialog Semiconductor DA9055 GPIO"
- depends on MFD_DA9055
- help
- Say yes here to enable the GPIO driver for the DA9055 chip.
-
- The Dialog DA9055 PMIC chip has 3 GPIO pins that can be
- be controller by this driver.
-
- If driver is built as a module it will be called gpio-da9055.
-
+# This symbol is selected by both I2C and SPI expanders
config GPIO_MAX730X
tristate
-comment "Memory mapped GPIO drivers:"
+menu "Memory mapped GPIO drivers"
config GPIO_74XX_MMIO
tristate "GPIO driver for 74xx-ICs with MMIO access"
@@ -126,6 +110,22 @@ config GPIO_74XX_MMIO
8 bits: 74244 (Input), 74273 (Output)
16 bits: 741624 (Input), 7416374 (Output)
+config GPIO_ALTERA
+ tristate "Altera GPIO"
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y or M here to build support for the Altera PIO device.
+
+ If driver is built as a module it will be called gpio-altera.
+
+config GPIO_BCM_KONA
+ bool "Broadcom Kona GPIO"
+ depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
+ help
+ Turn on GPIO support for Broadcom "Kona" chips.
+
config GPIO_CLPS711X
tristate "CLPS711X GPIO support"
depends on ARCH_CLPS711X || COMPILE_TEST
@@ -140,28 +140,14 @@ config GPIO_DAVINCI
help
Say yes here to enable GPIO support for TI Davinci/Keystone SoCs.
-config GPIO_GENERIC_PLATFORM
- tristate "Generic memory-mapped GPIO controller support (MMIO platform device)"
- select GPIO_GENERIC
- help
- Say yes here to support basic platform_device memory-mapped GPIO controllers.
-
config GPIO_DWAPB
tristate "Synopsys DesignWare APB GPIO driver"
- depends on ARM
- depends on OF_GPIO
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
help
Say Y or M here to build support for the Synopsys DesignWare APB
GPIO block.
-config GPIO_IT8761E
- tristate "IT8761E GPIO support"
- depends on X86 # unconditional access to IO space.
- help
- Say yes here to support GPIO functionality of IT8761E super I/O chip.
-
config GPIO_EM
tristate "Emma Mobile GPIO"
depends on ARM && OF_GPIO
@@ -173,36 +159,99 @@ config GPIO_EP93XX
depends on ARCH_EP93XX
select GPIO_GENERIC
-config GPIO_ZEVIO
- bool "LSI ZEVIO SoC memory mapped GPIOs"
- depends on ARM && OF_GPIO
- help
- Say yes here to support the GPIO controller in LSI ZEVIO SoCs.
-
-config GPIO_MM_LANTIQ
- bool "Lantiq Memory mapped GPIOs"
- depends on LANTIQ && SOC_XWAY
- help
- This enables support for memory mapped GPIOs on the External Bus Unit
- (EBU) found on Lantiq SoCs. The gpios are output only as they are
- created by attaching a 16bit latch to the bus.
-
config GPIO_F7188X
- tristate "F71882FG and F71889F GPIO support"
+ tristate "F71869, F71869A, F71882FG and F71889F GPIO support"
depends on X86
help
This option enables support for GPIOs found on Fintek Super-I/O
- chips F71882FG and F71889F.
+ chips F71869, F71869A, F71882FG and F71889F.
To compile this driver as a module, choose M here: the module will
be called f7188x-gpio.
+config GPIO_GE_FPGA
+ bool "GE FPGA based GPIO"
+ depends on GE_FPGA
+ select GPIO_GENERIC
+ help
+ Support for common GPIO functionality provided on some GE Single Board
+ Computers.
+
+ This driver provides basic support (configure as input or output, read
+ and write pin state) for GPIO implemented in a number of GE single
+ board computers.
+
+config GPIO_GENERIC_PLATFORM
+ tristate "Generic memory-mapped GPIO controller support (MMIO platform device)"
+ select GPIO_GENERIC
+ help
+ Say yes here to support basic platform_device memory-mapped GPIO controllers.
+
+config GPIO_GRGPIO
+ tristate "Aeroflex Gaisler GRGPIO support"
+ depends on OF
+ select GPIO_GENERIC
+ select IRQ_DOMAIN
+ help
+ Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
+ VHDL IP core library.
+
+config GPIO_ICH
+ tristate "Intel ICH GPIO"
+ depends on PCI && X86
+ select MFD_CORE
+ select LPC_ICH
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
+ ICH9, ICH10, Series 5/3400 (eg Ibex Peak), Series 6/C200 (eg
+ Cougar Point), NM10 (Tiger Point), and 3100 (Whitmore Lake).
+
+ If unsure, say N.
+
+config GPIO_IOP
+ tristate "Intel IOP GPIO"
+ depends on ARM && (ARCH_IOP32X || ARCH_IOP33X)
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ IOP32X or IOP33X.
+
+ If unsure, say N.
+
+config GPIO_IT8761E
+ tristate "IT8761E GPIO support"
+ depends on X86 # unconditional access to IO space.
+ help
+ Say yes here to support GPIO functionality of IT8761E super I/O chip.
+
+config GPIO_LOONGSON
+ bool "Loongson-2/3 GPIO support"
+ depends on CPU_LOONGSON2 || CPU_LOONGSON3
+ help
+ driver for GPIO functionality on Loongson-2F/3A/3B processors.
+
+config GPIO_LYNXPOINT
+ tristate "Intel Lynxpoint GPIO support"
+ depends on ACPI && X86
+ select GPIOLIB_IRQCHIP
+ help
+ driver for GPIO functionality on Intel Lynxpoint PCH chipset
+ Requires ACPI device enumeration code to set up a platform device.
+
config GPIO_MB86S7X
bool "GPIO support for Fujitsu MB86S7x Platforms"
depends on ARCH_MB86S7X
help
Say yes here to support the GPIO controller in Fujitsu MB86S70 SoCs.
+config GPIO_MM_LANTIQ
+ bool "Lantiq Memory mapped GPIOs"
+ depends on LANTIQ && SOC_XWAY
+ help
+ This enables support for memory mapped GPIOs on the External Bus Unit
+ (EBU) found on Lantiq SoCs. The gpios are output only as they are
+ created by attaching a 16bit latch to the bus.
+
config GPIO_MOXART
bool "MOXART GPIO support"
depends on ARCH_MOXART
@@ -223,14 +272,6 @@ config GPIO_MPC8XXX
Say Y here if you're going to use hardware that connects to the
MPC512x/831x/834x/837x/8572/8610 GPIOs.
-config GPIO_MSM_V1
- tristate "Qualcomm MSM GPIO v1"
- depends on GPIOLIB && ARCH_MSM && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
- help
- Say yes here to support the GPIO interface on ARM v6 based
- Qualcomm MSM chips. Most of the pins on the MSM can be
- selected for GPIO, and are controlled by this driver.
-
config GPIO_MSM_V2
tristate "Qualcomm MSM GPIO v2"
depends on GPIOLIB && OF && ARCH_QCOM
@@ -303,6 +344,33 @@ config GPIO_SAMSUNG
Legacy GPIO support. Use only for platforms without support for
pinctrl.
+config GPIO_SCH
+ tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
+ depends on PCI && X86
+ select MFD_CORE
+ select LPC_SCH
+ help
+ Say yes here to support GPIO interface on Intel Poulsbo SCH,
+ Intel Tunnel Creek processor, Intel Centerton processor or
+ Intel Quark X1000 SoC.
+
+ The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
+ powered by the core power rail and are turned off during sleep
+ modes (S3 and higher). The remaining four GPIOs are powered by
+ the Intel SCH suspend power supply. These GPIOs remain
+ active during S3. The suspend powered GPIOs can be used to wake the
+ system from the Suspend-to-RAM state.
+
+ The Intel Tunnel Creek processor has 5 GPIOs powered by the
+ core power rail and 9 from suspend power supply.
+
+ The Intel Centerton processor has a total of 30 GPIO pins.
+ Twenty-one are powered by the core power rail and 9 from the
+ suspend power supply.
+
+ The Intel Quark X1000 SoC has 2 GPIOs powered by the core
+ power well and 6 from the suspend power well.
+
config GPIO_SCH311X
tristate "SMSC SCH311x SuperI/O GPIO"
help
@@ -327,12 +395,27 @@ config GPIO_STA2X11
Say yes here to support the STA2x11/ConneXt GPIO device.
The GPIO module has 128 GPIO pins with alternate functions.
+config GPIO_STP_XWAY
+ bool "XWAY STP GPIOs"
+ depends on SOC_XWAY
+ help
+ This enables support for the Serial To Parallel (STP) unit found on
+ XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
+ that can be up to 24 bit. This peripheral is aimed at driving leds.
+ Some of the gpios/leds can be auto updated by the soc with dsl and
+ phy status.
+
config GPIO_SYSCON
tristate "GPIO based on SYSCON"
depends on MFD_SYSCON && OF
help
Say yes here to support GPIO functionality though SYSCON driver.
+config GPIO_TB10X
+ bool
+ select GENERIC_IRQ_CHIP
+ select OF_GPIO
+
config GPIO_TS5500
tristate "TS-5500 DIO blocks and compatibles"
depends on TS5500 || COMPILE_TEST
@@ -364,6 +447,24 @@ config GPIO_VF610
help
Say yes here to support Vybrid vf610 GPIOs.
+config GPIO_VR41XX
+ tristate "NEC VR4100 series General-purpose I/O Uint support"
+ depends on CPU_VR41XX
+ help
+ Say yes here to support the NEC VR4100 series General-purpose I/O Uint
+
+config GPIO_VX855
+ tristate "VIA VX855/VX875 GPIO"
+ depends on PCI
+ select MFD_CORE
+ select MFD_VX855
+ help
+ Support access to the VX855/VX875 GPIO lines through the gpio library.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config GPIO_XGENE
bool "APM X-Gene GPIO controller support"
depends on ARM64 && OF_GPIO
@@ -387,13 +488,6 @@ config GPIO_XILINX
help
Say yes here to support the Xilinx FPGA GPIO device
-config GPIO_ZYNQ
- tristate "Xilinx Zynq GPIO support"
- depends on ARCH_ZYNQ
- select GPIOLIB_IRQCHIP
- help
- Say yes here to support Xilinx Zynq GPIO controller.
-
config GPIO_XTENSA
bool "Xtensa GPIO32 support"
depends on XTENSA
@@ -403,135 +497,49 @@ config GPIO_XTENSA
Say yes here to support the Xtensa internal GPIO32 IMPWIRE (input)
and EXPSTATE (output) ports
-config GPIO_VR41XX
- tristate "NEC VR4100 series General-purpose I/O Uint support"
- depends on CPU_VR41XX
- help
- Say yes here to support the NEC VR4100 series General-purpose I/O Uint
-
-config GPIO_SCH
- tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
- depends on PCI && X86
- select MFD_CORE
- select LPC_SCH
- help
- Say yes here to support GPIO interface on Intel Poulsbo SCH,
- Intel Tunnel Creek processor, Intel Centerton processor or
- Intel Quark X1000 SoC.
-
- The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
- powered by the core power rail and are turned off during sleep
- modes (S3 and higher). The remaining four GPIOs are powered by
- the Intel SCH suspend power supply. These GPIOs remain
- active during S3. The suspend powered GPIOs can be used to wake the
- system from the Suspend-to-RAM state.
-
- The Intel Tunnel Creek processor has 5 GPIOs powered by the
- core power rail and 9 from suspend power supply.
-
- The Intel Centerton processor has a total of 30 GPIO pins.
- Twenty-one are powered by the core power rail and 9 from the
- suspend power supply.
-
- The Intel Quark X1000 SoC has 2 GPIOs powered by the core
- power well and 6 from the suspend power well.
-
-config GPIO_ICH
- tristate "Intel ICH GPIO"
- depends on PCI && X86
- select MFD_CORE
- select LPC_ICH
- help
- Say yes here to support the GPIO functionality of a number of Intel
- ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
- ICH9, ICH10, Series 5/3400 (eg Ibex Peak), Series 6/C200 (eg
- Cougar Point), NM10 (Tiger Point), and 3100 (Whitmore Lake).
-
- If unsure, say N.
-
-config GPIO_IOP
- tristate "Intel IOP GPIO"
- depends on ARM && (ARCH_IOP32X || ARCH_IOP33X)
- help
- Say yes here to support the GPIO functionality of a number of Intel
- IOP32X or IOP33X.
-
- If unsure, say N.
-
-config GPIO_VX855
- tristate "VIA VX855/VX875 GPIO"
- depends on PCI
- select MFD_CORE
- select MFD_VX855
- help
- Support access to the VX855/VX875 GPIO lines through the gpio library.
-
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the
- functionality of the device.
-
-config GPIO_GE_FPGA
- bool "GE FPGA based GPIO"
- depends on GE_FPGA
- select GPIO_GENERIC
+config GPIO_ZEVIO
+ bool "LSI ZEVIO SoC memory mapped GPIOs"
+ depends on ARM && OF_GPIO
help
- Support for common GPIO functionality provided on some GE Single Board
- Computers.
-
- This driver provides basic support (configure as input or output, read
- and write pin state) for GPIO implemented in a number of GE single
- board computers.
+ Say yes here to support the GPIO controller in LSI ZEVIO SoCs.
-config GPIO_LYNXPOINT
- tristate "Intel Lynxpoint GPIO support"
- depends on ACPI && X86
+config GPIO_ZYNQ
+ tristate "Xilinx Zynq GPIO support"
+ depends on ARCH_ZYNQ
select GPIOLIB_IRQCHIP
help
- driver for GPIO functionality on Intel Lynxpoint PCH chipset
- Requires ACPI device enumeration code to set up a platform device.
-
-config GPIO_GRGPIO
- tristate "Aeroflex Gaisler GRGPIO support"
- depends on OF
- select GPIO_GENERIC
- select IRQ_DOMAIN
- help
- Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
- VHDL IP core library.
+ Say yes here to support Xilinx Zynq GPIO controller.
-config GPIO_TB10X
- bool
- select GENERIC_IRQ_CHIP
- select OF_GPIO
+endmenu
-comment "I2C GPIO expanders:"
+menu "I2C GPIO expanders"
+ depends on I2C
-config GPIO_ARIZONA
- tristate "Wolfson Microelectronics Arizona class devices"
- depends on MFD_ARIZONA
+config GPIO_ADP5588
+ tristate "ADP5588 I2C GPIO expander"
+ depends on I2C
help
- Support for GPIOs on Wolfson Arizona class devices.
+ This option enables support for 18 GPIOs found
+ on Analog Devices ADP5588 GPIO Expanders.
-config GPIO_CRYSTAL_COVE
- tristate "GPIO support for Crystal Cove PMIC"
- depends on INTEL_SOC_PMIC
- select GPIOLIB_IRQCHIP
+config GPIO_ADP5588_IRQ
+ bool "Interrupt controller support for ADP5588"
+ depends on GPIO_ADP5588=y
help
- Support for GPIO pins on Crystal Cove PMIC.
-
- Say Yes if you have a Intel SoC based tablet with Crystal Cove PMIC
- inside.
-
- This driver can also be built as a module. If so, the module will be
- called gpio-crystalcove.
+ Say yes here to enable the adp5588 to be used as an interrupt
+ controller. It requires the driver to be built in the kernel.
-config GPIO_LP3943
- tristate "TI/National Semiconductor LP3943 GPIO expander"
- depends on MFD_LP3943
+config GPIO_ADNP
+ tristate "Avionic Design N-bit GPIO expander"
+ depends on I2C && OF_GPIO
+ select GPIOLIB_IRQCHIP
help
- GPIO driver for LP3943 MFD.
- LP3943 can be used as a GPIO expander which provides up to 16 GPIOs.
- Open drain outputs are required for this usage.
+ This option enables support for N GPIOs found on Avionic Design
+ I2C GPIO expanders. The register space will be extended by powers
+ of two, so the controller will need to accommodate for that. For
+ example: if a controller provides 48 pins, 6 registers will be
+ enough to represent all pins, but the driver will assume a
+ register layout for 64 pins (8 registers).
config GPIO_MAX7300
tristate "Maxim MAX7300 GPIO expander"
@@ -543,7 +551,6 @@ config GPIO_MAX7300
config GPIO_MAX732X
tristate "MAX7319, MAX7320-7327 I2C Port Expanders"
depends on I2C
- select IRQ_DOMAIN
help
Say yes here to support the MAX7319, MAX7320-7327 series of I2C
Port Expanders. Each IO port on these chips has a fixed role of
@@ -563,6 +570,7 @@ config GPIO_MAX732X
config GPIO_MAX732X_IRQ
bool "Interrupt controller support for MAX732x"
depends on GPIO_MAX732X=y
+ select GPIOLIB_IRQCHIP
help
Say yes here to enable the max732x to be used as an interrupt
controller. It requires the driver to be built in the kernel.
@@ -604,6 +612,7 @@ config GPIO_PCA953X_IRQ
config GPIO_PCF857X
tristate "PCF857x, PCA{85,96}7x, and MAX732[89] I2C GPIO expanders"
depends on I2C
+ select GPIOLIB_IRQCHIP
select IRQ_DOMAIN
help
Say yes here to provide access to most "quasi-bidirectional" I2C
@@ -626,15 +635,6 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
-config GPIO_RC5T583
- bool "RICOH RC5T583 GPIO"
- depends on MFD_RC5T583
- help
- Select this option to enable GPIO driver for the Ricoh RC5T583
- chip family.
- This driver provides the support for driving/reading the gpio pins
- of RC5T583 device through standard gpio library.
-
config GPIO_SX150X
bool "Semtech SX150x I2C GPIO expander"
depends on I2C=y
@@ -647,6 +647,124 @@ config GPIO_SX150X
8 bits: sx1508q
16 bits: sx1509q
+endmenu
+
+menu "MFD GPIO expanders"
+
+config GPIO_ADP5520
+ tristate "GPIO Support for ADP5520 PMIC"
+ depends on PMIC_ADP5520
+ help
+ This option enables support for on-chip GPIO found
+ on Analog Devices ADP5520 PMICs.
+
+config GPIO_ARIZONA
+ tristate "Wolfson Microelectronics Arizona class devices"
+ depends on MFD_ARIZONA
+ help
+ Support for GPIOs on Wolfson Arizona class devices.
+
+config GPIO_CRYSTAL_COVE
+ tristate "GPIO support for Crystal Cove PMIC"
+ depends on INTEL_SOC_PMIC
+ select GPIOLIB_IRQCHIP
+ help
+ Support for GPIO pins on Crystal Cove PMIC.
+
+ Say Yes if you have a Intel SoC based tablet with Crystal Cove PMIC
+ inside.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-crystalcove.
+
+config GPIO_CS5535
+ tristate "AMD CS5535/CS5536 GPIO support"
+ depends on MFD_CS5535
+ help
+ The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
+ can be used for quite a number of things. The CS5535/6 is found on
+ AMD Geode and Lemote Yeeloong devices.
+
+ If unsure, say N.
+
+config GPIO_DA9052
+ tristate "Dialog DA9052 GPIO"
+ depends on PMIC_DA9052
+ help
+ Say yes here to enable the GPIO driver for the DA9052 chip.
+
+config GPIO_DA9055
+ tristate "Dialog Semiconductor DA9055 GPIO"
+ depends on MFD_DA9055
+ help
+ Say yes here to enable the GPIO driver for the DA9055 chip.
+
+ The Dialog DA9055 PMIC chip has 3 GPIO pins that can be
+ be controller by this driver.
+
+ If driver is built as a module it will be called gpio-da9055.
+
+config GPIO_DLN2
+ tristate "Diolan DLN2 GPIO support"
+ depends on MFD_DLN2
+ select GPIOLIB_IRQCHIP
+
+ help
+ Select this option to enable GPIO driver for the Diolan DLN2
+ board.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-dln2.
+
+config GPIO_JANZ_TTL
+ tristate "Janz VMOD-TTL Digital IO Module"
+ depends on MFD_JANZ_CMODIO
+ help
+ This enables support for the Janz VMOD-TTL Digital IO module.
+ This driver provides support for driving the pins in output
+ mode only. Input mode is not supported.
+
+config GPIO_KEMPLD
+ tristate "Kontron ETX / COMexpress GPIO"
+ depends on MFD_KEMPLD
+ help
+ This enables support for the PLD GPIO interface on some Kontron ETX
+ and COMexpress (ETXexpress) modules.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-kempld.
+
+config GPIO_LP3943
+ tristate "TI/National Semiconductor LP3943 GPIO expander"
+ depends on MFD_LP3943
+ help
+ GPIO driver for LP3943 MFD.
+ LP3943 can be used as a GPIO expander which provides up to 16 GPIOs.
+ Open drain outputs are required for this usage.
+
+config GPIO_MSIC
+ bool "Intel MSIC mixed signal gpio support"
+ depends on MFD_INTEL_MSIC
+ help
+ Enable support for GPIO on intel MSIC controllers found in
+ intel MID devices
+
+config GPIO_PALMAS
+ bool "TI PALMAS series PMICs GPIO"
+ depends on MFD_PALMAS
+ help
+ Select this option to enable GPIO driver for the TI PALMAS
+ series chip family.
+
+config GPIO_RC5T583
+ bool "RICOH RC5T583 GPIO"
+ depends on MFD_RC5T583
+ help
+ Select this option to enable GPIO driver for the Ricoh RC5T583
+ chip family.
+ This driver provides the support for driving/reading the gpio pins
+ of RC5T583 device through standard gpio library.
+
config GPIO_STMPE
bool "STMPE GPIOs"
depends on MFD_STMPE
@@ -656,24 +774,35 @@ config GPIO_STMPE
This enables support for the GPIOs found on the STMPE I/O
Expanders.
-config GPIO_STP_XWAY
- bool "XWAY STP GPIOs"
- depends on SOC_XWAY
- help
- This enables support for the Serial To Parallel (STP) unit found on
- XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
- that can be up to 24 bit. This peripheral is aimed at driving leds.
- Some of the gpios/leds can be auto updated by the soc with dsl and
- phy status.
-
config GPIO_TC3589X
bool "TC3589X GPIOs"
depends on MFD_TC3589X
+ depends on OF_GPIO
select GPIOLIB_IRQCHIP
help
This enables support for the GPIOs found on the TC3589X
I/O Expander.
+config GPIO_TIMBERDALE
+ bool "Support for timberdale GPIO IP"
+ depends on MFD_TIMBERDALE
+ ---help---
+ Add support for the GPIO IP in the timberdale FPGA.
+
+config GPIO_TPS6586X
+ bool "TPS6586X GPIO"
+ depends on MFD_TPS6586X
+ help
+ Select this option to enable GPIO driver for the TPS6586X
+ chip family.
+
+config GPIO_TPS65910
+ bool "TPS65910 GPIO"
+ depends on MFD_TPS65910
+ help
+ Select this option to enable GPIO driver for the TPS65910
+ chip family.
+
config GPIO_TPS65912
tristate "TI TPS65912 GPIO"
depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
@@ -694,6 +823,13 @@ config GPIO_TWL6040
Say yes here to access the GPO signals of twl6040
audio chip from Texas Instruments.
+config GPIO_UCB1400
+ tristate "Philips UCB1400 GPIO"
+ depends on UCB1400_CORE
+ help
+ This enables support for the Philips UCB1400 GPIO pins.
+ The UCB1400 is an AC97 audio codec.
+
config GPIO_WM831X
tristate "WM831x GPIOs"
depends on MFD_WM831X
@@ -715,50 +851,22 @@ config GPIO_WM8994
Say yes here to access the GPIO signals of WM8994 audio hub
CODECs from Wolfson Microelectronics.
-config GPIO_ADP5520
- tristate "GPIO Support for ADP5520 PMIC"
- depends on PMIC_ADP5520
- help
- This option enables support for on-chip GPIO found
- on Analog Devices ADP5520 PMICs.
+endmenu
-config GPIO_ADP5588
- tristate "ADP5588 I2C GPIO expander"
- depends on I2C
- help
- This option enables support for 18 GPIOs found
- on Analog Devices ADP5588 GPIO Expanders.
-
-config GPIO_ADP5588_IRQ
- bool "Interrupt controller support for ADP5588"
- depends on GPIO_ADP5588=y
- help
- Say yes here to enable the adp5588 to be used as an interrupt
- controller. It requires the driver to be built in the kernel.
+menu "PCI GPIO expanders"
+ depends on PCI
-config GPIO_ADNP
- tristate "Avionic Design N-bit GPIO expander"
- depends on I2C && OF_GPIO
- select GPIOLIB_IRQCHIP
+config GPIO_AMD8111
+ tristate "AMD 8111 GPIO driver"
+ depends on PCI
help
- This option enables support for N GPIOs found on Avionic Design
- I2C GPIO expanders. The register space will be extended by powers
- of two, so the controller will need to accommodate for that. For
- example: if a controller provides 48 pins, 6 registers will be
- enough to represent all pins, but the driver will assume a
- register layout for 64 pins (8 registers).
-
-comment "PCI GPIO expanders:"
+ The AMD 8111 south bridge contains 32 GPIO pins which can be used.
-config GPIO_CS5535
- tristate "AMD CS5535/CS5536 GPIO support"
- depends on MFD_CS5535
- help
- The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
- can be used for quite a number of things. The CS5535/6 is found on
- AMD Geode and Lemote Yeeloong devices.
+ Note, that usually system firmware/ACPI handles GPIO pins on their
+ own and users might easily break their systems with uncarefull usage
+ of this driver!
- If unsure, say N.
+ If unsure, say N
config GPIO_BT8XX
tristate "BT8XX GPIO abuser"
@@ -776,18 +884,6 @@ config GPIO_BT8XX
If unsure, say N.
-config GPIO_AMD8111
- tristate "AMD 8111 GPIO driver"
- depends on PCI
- help
- The AMD 8111 south bridge contains 32 GPIO pins which can be used.
-
- Note, that usually system firmware/ACPI handles GPIO pins on their
- own and users might easily break their systems with uncarefull usage
- of this driver!
-
- If unsure, say N
-
config GPIO_INTEL_MID
bool "Intel Mid GPIO support"
depends on PCI && X86
@@ -795,6 +891,16 @@ config GPIO_INTEL_MID
help
Say Y here to support Intel Mid GPIO.
+config GPIO_ML_IOH
+ tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
+ depends on PCI
+ select GENERIC_IRQ_CHIP
+ help
+ ML7213 is companion chip for Intel Atom E6xx series.
+ This driver can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/Output
+ Hub) which is for IVI(In-Vehicle Infotainment) use.
+ This driver can access the IOH's GPIO device.
+
config GPIO_PCH
tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO"
depends on PCI && (X86_32 || COMPILE_TEST)
@@ -811,15 +917,14 @@ config GPIO_PCH
ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-config GPIO_ML_IOH
- tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
+config GPIO_RDC321X
+ tristate "RDC R-321x GPIO support"
depends on PCI
- select GENERIC_IRQ_CHIP
+ select MFD_CORE
+ select MFD_RDC321X
help
- ML7213 is companion chip for Intel Atom E6xx series.
- This driver can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/Output
- Hub) which is for IVI(In-Vehicle Infotainment) use.
- This driver can access the IOH's GPIO device.
+ Support for the RDC R321x SoC GPIOs over southbridge
+ PCI configuration space.
config GPIO_SODAVILLE
bool "Intel Sodaville GPIO support"
@@ -829,22 +934,18 @@ config GPIO_SODAVILLE
help
Say Y here to support Intel Sodaville GPIO.
-config GPIO_TIMBERDALE
- bool "Support for timberdale GPIO IP"
- depends on MFD_TIMBERDALE
- ---help---
- Add support for the GPIO IP in the timberdale FPGA.
+endmenu
-config GPIO_RDC321X
- tristate "RDC R-321x GPIO support"
- depends on PCI
- select MFD_CORE
- select MFD_RDC321X
- help
- Support for the RDC R321x SoC GPIOs over southbridge
- PCI configuration space.
+menu "SPI GPIO expanders"
+ depends on SPI_MASTER
-comment "SPI GPIO expanders:"
+config GPIO_74X164
+ tristate "74x164 serial-in/parallel-out 8-bits shift register"
+ depends on SPI_MASTER && OF
+ help
+ Driver for 74x164 compatible serial-in/parallel-out 8-outputs
+ shift registers. This driver can be used to provide access
+ to more gpio outputs.
config GPIO_MAX7301
tristate "Maxim MAX7301 GPIO expander"
@@ -869,80 +970,10 @@ config GPIO_MC33880
SPI driver for Freescale MC33880 high-side/low-side switch.
This provides GPIO interface supporting inputs and outputs.
-config GPIO_74X164
- tristate "74x164 serial-in/parallel-out 8-bits shift register"
- depends on SPI_MASTER && OF
- help
- Driver for 74x164 compatible serial-in/parallel-out 8-outputs
- shift registers. This driver can be used to provide access
- to more gpio outputs.
-
-comment "AC97 GPIO expanders:"
-
-config GPIO_UCB1400
- tristate "Philips UCB1400 GPIO"
- depends on UCB1400_CORE
- help
- This enables support for the Philips UCB1400 GPIO pins.
- The UCB1400 is an AC97 audio codec.
-
-comment "LPC GPIO expanders:"
-
-config GPIO_KEMPLD
- tristate "Kontron ETX / COMexpress GPIO"
- depends on MFD_KEMPLD
- help
- This enables support for the PLD GPIO interface on some Kontron ETX
- and COMexpress (ETXexpress) modules.
-
- This driver can also be built as a module. If so, the module will be
- called gpio-kempld.
-
-comment "MODULbus GPIO expanders:"
-
-config GPIO_JANZ_TTL
- tristate "Janz VMOD-TTL Digital IO Module"
- depends on MFD_JANZ_CMODIO
- help
- This enables support for the Janz VMOD-TTL Digital IO module.
- This driver provides support for driving the pins in output
- mode only. Input mode is not supported.
-
-config GPIO_PALMAS
- bool "TI PALMAS series PMICs GPIO"
- depends on MFD_PALMAS
- help
- Select this option to enable GPIO driver for the TI PALMAS
- series chip family.
-
-config GPIO_TPS6586X
- bool "TPS6586X GPIO"
- depends on MFD_TPS6586X
- help
- Select this option to enable GPIO driver for the TPS6586X
- chip family.
+endmenu
-config GPIO_TPS65910
- bool "TPS65910 GPIO"
- depends on MFD_TPS65910
- help
- Select this option to enable GPIO driver for the TPS65910
- chip family.
-
-config GPIO_MSIC
- bool "Intel MSIC mixed signal gpio support"
- depends on MFD_INTEL_MSIC
- help
- Enable support for GPIO on intel MSIC controllers found in
- intel MID devices
-
-config GPIO_BCM_KONA
- bool "Broadcom Kona GPIO"
- depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
- help
- Turn on GPIO support for Broadcom "Kona" chips.
-
-comment "USB GPIO expanders:"
+menu "USB GPIO expanders"
+ depends on USB
config GPIO_VIPERBOARD
tristate "Viperboard GPIO a & b support"
@@ -955,16 +986,6 @@ config GPIO_VIPERBOARD
River Tech's viperboard.h for detailed meaning
of the module parameters.
-config GPIO_DLN2
- tristate "Diolan DLN2 GPIO support"
- depends on MFD_DLN2
- select GPIOLIB_IRQCHIP
-
- help
- Select this option to enable GPIO driver for the Diolan DLN2
- board.
-
- This driver can also be built as a module. If so, the module
- will be called gpio-dln2.
+endmenu
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index bdda6a94d2cd..f71bb971329c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
+obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_INTEL_MID) += gpio-intel-mid.o
+obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
@@ -58,7 +60,6 @@ obj-$(CONFIG_GPIO_MOXART) += gpio-moxart.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
-obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o
obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 13dbd3dfc33a..07ba82317ece 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -35,6 +35,20 @@ static int devm_gpiod_match(struct device *dev, void *res, void *data)
return *this == *gpio;
}
+static void devm_gpiod_release_array(struct device *dev, void *res)
+{
+ struct gpio_descs **descs = res;
+
+ gpiod_put_array(*descs);
+}
+
+static int devm_gpiod_match_array(struct device *dev, void *res, void *data)
+{
+ struct gpio_descs **this = res, **gpios = data;
+
+ return *this == *gpios;
+}
+
/**
* devm_gpiod_get - Resource-managed gpiod_get()
* @dev: GPIO consumer
@@ -111,23 +125,39 @@ EXPORT_SYMBOL(__devm_gpiod_get_index);
/**
* devm_get_gpiod_from_child - get a GPIO descriptor from a device's child node
* @dev: GPIO consumer
+ * @con_id: function within the GPIO consumer
* @child: firmware node (child of @dev)
*
* GPIO descriptors returned from this function are automatically disposed on
* driver detach.
*/
struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
+ const char *con_id,
struct fwnode_handle *child)
{
+ static const char * const suffixes[] = { "gpios", "gpio" };
+ char prop_name[32]; /* 32 is max size of property name */
struct gpio_desc **dr;
struct gpio_desc *desc;
+ unsigned int i;
dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!dr)
return ERR_PTR(-ENOMEM);
- desc = fwnode_get_named_gpiod(child, "gpios");
+ for (i = 0; i < ARRAY_SIZE(suffixes); i++) {
+ if (con_id)
+ snprintf(prop_name, sizeof(prop_name), "%s-%s",
+ con_id, suffixes[i]);
+ else
+ snprintf(prop_name, sizeof(prop_name), "%s",
+ suffixes[i]);
+
+ desc = fwnode_get_named_gpiod(child, prop_name);
+ if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+ break;
+ }
if (IS_ERR(desc)) {
devres_free(dr);
return desc;
@@ -170,6 +200,66 @@ struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *de
EXPORT_SYMBOL(__devm_gpiod_get_index_optional);
/**
+ * devm_gpiod_get_array - Resource-managed gpiod_get_array()
+ * @dev: GPIO consumer
+ * @con_id: function within the GPIO consumer
+ * @flags: optional GPIO initialization flags
+ *
+ * Managed gpiod_get_array(). GPIO descriptors returned from this function are
+ * automatically disposed on driver detach. See gpiod_get_array() for detailed
+ * information about behavior and return values.
+ */
+struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+{
+ struct gpio_descs **dr;
+ struct gpio_descs *descs;
+
+ dr = devres_alloc(devm_gpiod_release_array,
+ sizeof(struct gpio_descs *), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ descs = gpiod_get_array(dev, con_id, flags);
+ if (IS_ERR(descs)) {
+ devres_free(dr);
+ return descs;
+ }
+
+ *dr = descs;
+ devres_add(dev, dr);
+
+ return descs;
+}
+EXPORT_SYMBOL(devm_gpiod_get_array);
+
+/**
+ * devm_gpiod_get_array_optional - Resource-managed gpiod_get_array_optional()
+ * @dev: GPIO consumer
+ * @con_id: function within the GPIO consumer
+ * @flags: optional GPIO initialization flags
+ *
+ * Managed gpiod_get_array_optional(). GPIO descriptors returned from this
+ * function are automatically disposed on driver detach.
+ * See gpiod_get_array_optional() for detailed information about behavior and
+ * return values.
+ */
+struct gpio_descs *__must_check
+devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ struct gpio_descs *descs;
+
+ descs = devm_gpiod_get_array(dev, con_id, flags);
+ if (IS_ERR(descs) && (PTR_ERR(descs) == -ENOENT))
+ return NULL;
+
+ return descs;
+}
+EXPORT_SYMBOL(devm_gpiod_get_array_optional);
+
+/**
* devm_gpiod_put - Resource-managed gpiod_put()
* @desc: GPIO descriptor to dispose of
*
@@ -184,6 +274,21 @@ void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
}
EXPORT_SYMBOL(devm_gpiod_put);
+/**
+ * devm_gpiod_put_array - Resource-managed gpiod_put_array()
+ * @descs: GPIO descriptor array to dispose of
+ *
+ * Dispose of an array of GPIO descriptors obtained with devm_gpiod_get_array().
+ * Normally this function will not be called as the GPIOs will be disposed of
+ * by the resource management code.
+ */
+void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
+{
+ WARN_ON(devres_release(dev, devm_gpiod_release_array,
+ devm_gpiod_match_array, &descs));
+}
+EXPORT_SYMBOL(devm_gpiod_put_array);
+
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 3beed6ea8c65..d3fe6a6776da 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -367,7 +367,7 @@ static int adp5588_gpio_probe(struct i2c_client *client,
struct gpio_chip *gc;
int ret, i, revid;
- if (pdata == NULL) {
+ if (!pdata) {
dev_err(&client->dev, "missing platform data\n");
return -ENODEV;
}
@@ -378,8 +378,8 @@ static int adp5588_gpio_probe(struct i2c_client *client,
return -EIO;
}
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL)
+ dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
return -ENOMEM;
dev->client = client;
@@ -446,7 +446,6 @@ static int adp5588_gpio_probe(struct i2c_client *client,
err_irq:
adp5588_irq_teardown(dev);
err:
- kfree(dev);
return ret;
}
@@ -472,7 +471,6 @@ static int adp5588_gpio_remove(struct i2c_client *client)
gpiochip_remove(&dev->gpio_chip);
- kfree(dev);
return 0;
}
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
new file mode 100644
index 000000000000..449fb46cb8a0
--- /dev/null
+++ b/drivers/gpio/gpio-altera.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Based on gpio-mpc8xxx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+
+#define ALTERA_GPIO_MAX_NGPIO 32
+#define ALTERA_GPIO_DATA 0x0
+#define ALTERA_GPIO_DIR 0x4
+#define ALTERA_GPIO_IRQ_MASK 0x8
+#define ALTERA_GPIO_EDGE_CAP 0xc
+
+/**
+* struct altera_gpio_chip
+* @mmchip : memory mapped chip structure.
+* @gpio_lock : synchronization lock so that new irq/set/get requests
+ will be blocked until the current one completes.
+* @interrupt_trigger : specifies the hardware configured IRQ trigger type
+ (rising, falling, both, high)
+* @mapped_irq : kernel mapped irq number.
+*/
+struct altera_gpio_chip {
+ struct of_mm_gpio_chip mmchip;
+ spinlock_t gpio_lock;
+ int interrupt_trigger;
+ int mapped_irq;
+};
+
+static void altera_gpio_irq_unmask(struct irq_data *d)
+{
+ struct altera_gpio_chip *altera_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ unsigned long flags;
+ u32 intmask;
+
+ altera_gc = irq_data_get_irq_chip_data(d);
+ mm_gc = &altera_gc->mmchip;
+
+ spin_lock_irqsave(&altera_gc->gpio_lock, flags);
+ intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ /* Set ALTERA_GPIO_IRQ_MASK bit to unmask */
+ intmask |= BIT(irqd_to_hwirq(d));
+ writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
+}
+
+static void altera_gpio_irq_mask(struct irq_data *d)
+{
+ struct altera_gpio_chip *altera_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ unsigned long flags;
+ u32 intmask;
+
+ altera_gc = irq_data_get_irq_chip_data(d);
+ mm_gc = &altera_gc->mmchip;
+
+ spin_lock_irqsave(&altera_gc->gpio_lock, flags);
+ intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ /* Clear ALTERA_GPIO_IRQ_MASK bit to mask */
+ intmask &= ~BIT(irqd_to_hwirq(d));
+ writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
+}
+
+/**
+ * This controller's IRQ type is synthesized in hardware, so this function
+ * just checks if the requested set_type matches the synthesized IRQ type
+ */
+static int altera_gpio_irq_set_type(struct irq_data *d,
+ unsigned int type)
+{
+ struct altera_gpio_chip *altera_gc;
+
+ altera_gc = irq_data_get_irq_chip_data(d);
+
+ if (type == IRQ_TYPE_NONE)
+ return 0;
+ if (type == IRQ_TYPE_LEVEL_HIGH &&
+ altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
+ return 0;
+ if (type == IRQ_TYPE_EDGE_RISING &&
+ altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
+ return 0;
+ if (type == IRQ_TYPE_EDGE_FALLING &&
+ altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
+ return 0;
+ if (type == IRQ_TYPE_EDGE_BOTH &&
+ altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
+ return 0;
+
+ return -EINVAL;
+}
+
+static unsigned int altera_gpio_irq_startup(struct irq_data *d) {
+ altera_gpio_irq_unmask(d);
+
+ return 0;
+}
+
+static struct irq_chip altera_irq_chip = {
+ .name = "altera-gpio",
+ .irq_mask = altera_gpio_irq_mask,
+ .irq_unmask = altera_gpio_irq_unmask,
+ .irq_set_type = altera_gpio_irq_set_type,
+ .irq_startup = altera_gpio_irq_startup,
+ .irq_shutdown = altera_gpio_irq_mask,
+};
+
+static int altera_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct of_mm_gpio_chip *mm_gc;
+
+ mm_gc = to_of_mm_gpio_chip(gc);
+
+ return !!(readl(mm_gc->regs + ALTERA_GPIO_DATA) & BIT(offset));
+}
+
+static void altera_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct of_mm_gpio_chip *mm_gc;
+ struct altera_gpio_chip *chip;
+ unsigned long flags;
+ unsigned int data_reg;
+
+ mm_gc = to_of_mm_gpio_chip(gc);
+ chip = container_of(mm_gc, struct altera_gpio_chip, mmchip);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+ data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
+ if (value)
+ data_reg |= BIT(offset);
+ else
+ data_reg &= ~BIT(offset);
+ writel(data_reg, mm_gc->regs + ALTERA_GPIO_DATA);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct of_mm_gpio_chip *mm_gc;
+ struct altera_gpio_chip *chip;
+ unsigned long flags;
+ unsigned int gpio_ddr;
+
+ mm_gc = to_of_mm_gpio_chip(gc);
+ chip = container_of(mm_gc, struct altera_gpio_chip, mmchip);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+ /* Set pin as input, assumes software controlled IP */
+ gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
+ gpio_ddr &= ~BIT(offset);
+ writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ return 0;
+}
+
+static int altera_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset, int value)
+{
+ struct of_mm_gpio_chip *mm_gc;
+ struct altera_gpio_chip *chip;
+ unsigned long flags;
+ unsigned int data_reg, gpio_ddr;
+
+ mm_gc = to_of_mm_gpio_chip(gc);
+ chip = container_of(mm_gc, struct altera_gpio_chip, mmchip);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+ /* Sets the GPIO value */
+ data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
+ if (value)
+ data_reg |= BIT(offset);
+ else
+ data_reg &= ~BIT(offset);
+ writel(data_reg, mm_gc->regs + ALTERA_GPIO_DATA);
+
+ /* Set pin as output, assumes software controlled IP */
+ gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
+ gpio_ddr |= BIT(offset);
+ writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ return 0;
+}
+
+static void altera_gpio_irq_edge_handler(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct altera_gpio_chip *altera_gc;
+ struct irq_chip *chip;
+ struct of_mm_gpio_chip *mm_gc;
+ struct irq_domain *irqdomain;
+ unsigned long status;
+ int i;
+
+ altera_gc = irq_desc_get_handler_data(desc);
+ chip = irq_desc_get_chip(desc);
+ mm_gc = &altera_gc->mmchip;
+ irqdomain = altera_gc->mmchip.gc.irqdomain;
+
+ chained_irq_enter(chip, desc);
+
+ while ((status =
+ (readl(mm_gc->regs + ALTERA_GPIO_EDGE_CAP) &
+ readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK)))) {
+ writel(status, mm_gc->regs + ALTERA_GPIO_EDGE_CAP);
+ for_each_set_bit(i, &status, mm_gc->gc.ngpio) {
+ generic_handle_irq(irq_find_mapping(irqdomain, i));
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+
+static void altera_gpio_irq_leveL_high_handler(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct altera_gpio_chip *altera_gc;
+ struct irq_chip *chip;
+ struct of_mm_gpio_chip *mm_gc;
+ struct irq_domain *irqdomain;
+ unsigned long status;
+ int i;
+
+ altera_gc = irq_desc_get_handler_data(desc);
+ chip = irq_desc_get_chip(desc);
+ mm_gc = &altera_gc->mmchip;
+ irqdomain = altera_gc->mmchip.gc.irqdomain;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl(mm_gc->regs + ALTERA_GPIO_DATA);
+ status &= readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+
+ for_each_set_bit(i, &status, mm_gc->gc.ngpio) {
+ generic_handle_irq(irq_find_mapping(irqdomain, i));
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static int altera_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int reg, ret;
+ struct altera_gpio_chip *altera_gc;
+
+ altera_gc = devm_kzalloc(&pdev->dev, sizeof(*altera_gc), GFP_KERNEL);
+ if (!altera_gc)
+ return -ENOMEM;
+
+ spin_lock_init(&altera_gc->gpio_lock);
+
+ if (of_property_read_u32(node, "altr,ngpio", &reg))
+ /* By default assume maximum ngpio */
+ altera_gc->mmchip.gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
+ else
+ altera_gc->mmchip.gc.ngpio = reg;
+
+ if (altera_gc->mmchip.gc.ngpio > ALTERA_GPIO_MAX_NGPIO) {
+ dev_warn(&pdev->dev,
+ "ngpio is greater than %d, defaulting to %d\n",
+ ALTERA_GPIO_MAX_NGPIO, ALTERA_GPIO_MAX_NGPIO);
+ altera_gc->mmchip.gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
+ }
+
+ altera_gc->mmchip.gc.direction_input = altera_gpio_direction_input;
+ altera_gc->mmchip.gc.direction_output = altera_gpio_direction_output;
+ altera_gc->mmchip.gc.get = altera_gpio_get;
+ altera_gc->mmchip.gc.set = altera_gpio_set;
+ altera_gc->mmchip.gc.owner = THIS_MODULE;
+ altera_gc->mmchip.gc.dev = &pdev->dev;
+
+ ret = of_mm_gpiochip_add(node, &altera_gc->mmchip);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, altera_gc);
+
+ altera_gc->mapped_irq = platform_get_irq(pdev, 0);
+
+ if (altera_gc->mapped_irq < 0)
+ goto skip_irq;
+
+ if (of_property_read_u32(node, "altr,interrupt-type", &reg)) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev,
+ "altr,interrupt-type value not set in device tree\n");
+ goto teardown;
+ }
+ altera_gc->interrupt_trigger = reg;
+
+ ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+
+ if (ret) {
+ dev_info(&pdev->dev, "could not add irqchip\n");
+ return ret;
+ }
+
+ gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
+ &altera_irq_chip,
+ altera_gc->mapped_irq,
+ altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH ?
+ altera_gpio_irq_leveL_high_handler :
+ altera_gpio_irq_edge_handler);
+
+skip_irq:
+ return 0;
+teardown:
+ pr_err("%s: registration failed with status %d\n",
+ node->full_name, ret);
+
+ return ret;
+}
+
+static int altera_gpio_remove(struct platform_device *pdev)
+{
+ struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&altera_gc->mmchip.gc);
+
+ return -EIO;
+}
+
+static const struct of_device_id altera_gpio_of_match[] = {
+ { .compatible = "altr,pio-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_gpio_of_match);
+
+static struct platform_driver altera_gpio_driver = {
+ .driver = {
+ .name = "altera_gpio",
+ .of_match_table = of_match_ptr(altera_gpio_of_match),
+ },
+ .probe = altera_gpio_probe,
+ .remove = altera_gpio_remove,
+};
+
+static int __init altera_gpio_init(void)
+{
+ return platform_driver_register(&altera_gpio_driver);
+}
+subsys_initcall(altera_gpio_init);
+
+static void __exit altera_gpio_exit(void)
+{
+ platform_driver_unregister(&altera_gpio_driver);
+}
+module_exit(altera_gpio_exit);
+
+MODULE_AUTHOR("Tien Hock Loh <thloh@altera.com>");
+MODULE_DESCRIPTION("Altera GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index fe369f5c7fa6..052fbc8fdaaa 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -103,7 +103,7 @@ static int arizona_gpio_probe(struct platform_device *pdev)
arizona_gpio = devm_kzalloc(&pdev->dev, sizeof(*arizona_gpio),
GFP_KERNEL);
- if (arizona_gpio == NULL)
+ if (!arizona_gpio)
return -ENOMEM;
arizona_gpio->arizona = arizona;
@@ -116,6 +116,7 @@ static int arizona_gpio_probe(struct platform_device *pdev)
switch (arizona->type) {
case WM5102:
case WM5110:
+ case WM8280:
case WM8997:
arizona_gpio->gpio_chip.ngpio = 5;
break;
@@ -155,7 +156,6 @@ static int arizona_gpio_remove(struct platform_device *pdev)
static struct platform_driver arizona_gpio_driver = {
.driver.name = "arizona-gpio",
- .driver.owner = THIS_MODULE,
.probe = arizona_gpio_probe,
.remove = arizona_gpio_remove,
};
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 3d9e08f7e823..91a7ffe83135 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -24,7 +24,7 @@
#include <linux/mfd/intel_soc_pmic.h>
#define CRYSTALCOVE_GPIO_NUM 16
-#define CRYSTALCOVE_VGPIO_NUM 94
+#define CRYSTALCOVE_VGPIO_NUM 95
#define UPDATE_IRQ_TYPE BIT(0)
#define UPDATE_IRQ_MASK BIT(1)
@@ -39,6 +39,7 @@
#define GPIO0P0CTLI 0x33
#define GPIO1P0CTLO 0x3b
#define GPIO1P0CTLI 0x43
+#define GPIOPANELCTL 0x52
#define CTLI_INTCNT_DIS (0)
#define CTLI_INTCNT_NE (1 << 1)
@@ -93,6 +94,10 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
{
int reg;
+ if (gpio == 94) {
+ return GPIOPANELCTL;
+ }
+
if (reg_type == CTRL_IN) {
if (gpio < 8)
reg = GPIO0P0CTLI;
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 389a4d2a4926..2e9578ec0ca1 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -212,7 +212,7 @@ static int da9052_gpio_probe(struct platform_device *pdev)
int ret;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (gpio == NULL)
+ if (!gpio)
return -ENOMEM;
gpio->da9052 = dev_get_drvdata(pdev->dev.parent);
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index b8d757036887..7227e6ed3cb9 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -146,7 +146,7 @@ static int da9055_gpio_probe(struct platform_device *pdev)
int ret;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (gpio == NULL)
+ if (!gpio)
return -ENOMEM;
gpio->da9055 = dev_get_drvdata(pdev->dev.parent);
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 1be291ac6319..dbda8433c4f7 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -1,5 +1,5 @@
/*
- * GPIO driver for Fintek Super-I/O F71882 and F71889
+ * GPIO driver for Fintek Super-I/O F71869, F71869A, F71882 and F71889
*
* Copyright (C) 2010-2013 LaCie
*
@@ -32,12 +32,16 @@
#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
#define SIO_FINTEK_ID 0x1934 /* Manufacturer ID */
+#define SIO_F71869_ID 0x0814 /* F71869 chipset ID */
+#define SIO_F71869A_ID 0x1007 /* F71869A chipset ID */
#define SIO_F71882_ID 0x0541 /* F71882 chipset ID */
#define SIO_F71889_ID 0x0909 /* F71889 chipset ID */
-enum chips { f71882fg, f71889f };
+enum chips { f71869, f71869a, f71882fg, f71889f };
static const char * const f7188x_names[] = {
+ "f71869",
+ "f71869a",
"f71882fg",
"f71889f",
};
@@ -146,6 +150,27 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
/* Output mode register (0:open drain 1:push-pull). */
#define gpio_out_mode(base) (base + 3)
+static struct f7188x_gpio_bank f71869_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 6, 0xF0),
+ F7188X_GPIO_BANK(10, 8, 0xE0),
+ F7188X_GPIO_BANK(20, 8, 0xD0),
+ F7188X_GPIO_BANK(30, 8, 0xC0),
+ F7188X_GPIO_BANK(40, 8, 0xB0),
+ F7188X_GPIO_BANK(50, 5, 0xA0),
+ F7188X_GPIO_BANK(60, 6, 0x90),
+};
+
+static struct f7188x_gpio_bank f71869a_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 6, 0xF0),
+ F7188X_GPIO_BANK(10, 8, 0xE0),
+ F7188X_GPIO_BANK(20, 8, 0xD0),
+ F7188X_GPIO_BANK(30, 8, 0xC0),
+ F7188X_GPIO_BANK(40, 8, 0xB0),
+ F7188X_GPIO_BANK(50, 5, 0xA0),
+ F7188X_GPIO_BANK(60, 8, 0x90),
+ F7188X_GPIO_BANK(70, 8, 0x80),
+};
+
static struct f7188x_gpio_bank f71882_gpio_bank[] = {
F7188X_GPIO_BANK(0 , 8, 0xF0),
F7188X_GPIO_BANK(10, 8, 0xE0),
@@ -281,6 +306,14 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
switch (sio->type) {
+ case f71869:
+ data->nr_bank = ARRAY_SIZE(f71869_gpio_bank);
+ data->bank = f71869_gpio_bank;
+ break;
+ case f71869a:
+ data->nr_bank = ARRAY_SIZE(f71869a_gpio_bank);
+ data->bank = f71869a_gpio_bank;
+ break;
case f71882fg:
data->nr_bank = ARRAY_SIZE(f71882_gpio_bank);
data->bank = f71882_gpio_bank;
@@ -354,6 +387,12 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
devid = superio_inw(addr, SIO_DEVID);
switch (devid) {
+ case SIO_F71869_ID:
+ sio->type = f71869;
+ break;
+ case SIO_F71869A_ID:
+ sio->type = f71869a;
+ break;
case SIO_F71882_ID:
sio->type = f71882fg;
break;
@@ -410,7 +449,7 @@ err:
}
/*
- * Try to match a supported Fintech device by reading the (hard-wired)
+ * Try to match a supported Fintek device by reading the (hard-wired)
* configuration I/O ports. If available, then register both the platform
* device and driver to support the GPIOs.
*/
@@ -450,6 +489,6 @@ static void __exit f7188x_gpio_exit(void)
}
module_exit(f7188x_gpio_exit);
-MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71882FG and F71889F");
+MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71869, F71869A, F71882FG and F71889F");
MODULE_AUTHOR("Simon Guinot <simon.guinot@sequanux.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 7818cd1453ae..4ba7ed502131 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -173,6 +173,11 @@ static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
return !!(ichx_priv.use_gpio & (1 << (nr / 32)));
}
+static int ichx_gpio_get_direction(struct gpio_chip *gpio, unsigned nr)
+{
+ return ichx_read_bit(GPIO_IO_SEL, nr) ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
+}
+
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
/*
@@ -286,6 +291,7 @@ static void ichx_gpiolib_setup(struct gpio_chip *chip)
ichx_priv.desc->get : ichx_gpio_get;
chip->set = ichx_gpio_set;
+ chip->get_direction = ichx_gpio_get_direction;
chip->direction_input = ichx_gpio_direction_input;
chip->direction_output = ichx_gpio_direction_output;
chip->base = modparam_gpiobase;
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index 443518f63f15..6b8115f34208 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -156,7 +156,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
}
gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
- if (gpio == NULL)
+ if (!gpio)
return -ENOMEM;
gpio->pld = pld;
diff --git a/arch/mips/loongson/common/gpio.c b/drivers/gpio/gpio-loongson.c
index 29dbaa253061..ccc65a1aea88 100644
--- a/arch/mips/loongson/common/gpio.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -1,8 +1,10 @@
/*
- * STLS2F GPIO Support
+ * Loongson-2F/3A/3B GPIO Support
*
* Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
* Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
+ * Copyright (c) 2013 Hongbing Hu <huhb@lemote.com>
+ * Copyright (c) 2014 Huacai Chen <chenhc@lemote.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,67 +22,23 @@
#include <linux/gpio.h>
#define STLS2F_N_GPIO 4
-#define STLS2F_GPIO_IN_OFFSET 16
+#define STLS3A_N_GPIO 16
-static DEFINE_SPINLOCK(gpio_lock);
-
-int gpio_get_value(unsigned gpio)
-{
- u32 val;
- u32 mask;
-
- if (gpio >= STLS2F_N_GPIO)
- return __gpio_get_value(gpio);
-
- mask = 1 << (gpio + STLS2F_GPIO_IN_OFFSET);
- spin_lock(&gpio_lock);
- val = LOONGSON_GPIODATA;
- spin_unlock(&gpio_lock);
+#ifdef CONFIG_CPU_LOONGSON3
+#define LOONGSON_N_GPIO STLS3A_N_GPIO
+#else
+#define LOONGSON_N_GPIO STLS2F_N_GPIO
+#endif
- return (val & mask) != 0;
-}
-EXPORT_SYMBOL(gpio_get_value);
-
-void gpio_set_value(unsigned gpio, int state)
-{
- u32 val;
- u32 mask;
-
- if (gpio >= STLS2F_N_GPIO) {
- __gpio_set_value(gpio, state);
- return ;
- }
-
- mask = 1 << gpio;
-
- spin_lock(&gpio_lock);
- val = LOONGSON_GPIODATA;
- if (state)
- val |= mask;
- else
- val &= (~mask);
- LOONGSON_GPIODATA = val;
- spin_unlock(&gpio_lock);
-}
-EXPORT_SYMBOL(gpio_set_value);
+#define LOONGSON_GPIO_IN_OFFSET 16
-int gpio_cansleep(unsigned gpio)
-{
- if (gpio < STLS2F_N_GPIO)
- return 0;
- else
- return __gpio_cansleep(gpio);
-}
-EXPORT_SYMBOL(gpio_cansleep);
+static DEFINE_SPINLOCK(gpio_lock);
-static int ls2f_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+static int loongson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
u32 temp;
u32 mask;
- if (gpio >= STLS2F_N_GPIO)
- return -EINVAL;
-
spin_lock(&gpio_lock);
mask = 1 << gpio;
temp = LOONGSON_GPIOIE;
@@ -91,15 +49,12 @@ static int ls2f_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
return 0;
}
-static int ls2f_gpio_direction_output(struct gpio_chip *chip,
+static int loongson_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int level)
{
u32 temp;
u32 mask;
- if (gpio >= STLS2F_N_GPIO)
- return -EINVAL;
-
gpio_set_value(gpio, level);
spin_lock(&gpio_lock);
mask = 1 << gpio;
@@ -111,29 +66,50 @@ static int ls2f_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static int ls2f_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+static int loongson_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
- return gpio_get_value(gpio);
+ u32 val;
+ u32 mask;
+
+ mask = 1 << (gpio + LOONGSON_GPIO_IN_OFFSET);
+ spin_lock(&gpio_lock);
+ val = LOONGSON_GPIODATA;
+ spin_unlock(&gpio_lock);
+
+ return (val & mask) != 0;
}
-static void ls2f_gpio_set_value(struct gpio_chip *chip,
+static void loongson_gpio_set_value(struct gpio_chip *chip,
unsigned gpio, int value)
{
- gpio_set_value(gpio, value);
+ u32 val;
+ u32 mask;
+
+ mask = 1 << gpio;
+
+ spin_lock(&gpio_lock);
+ val = LOONGSON_GPIODATA;
+ if (value)
+ val |= mask;
+ else
+ val &= (~mask);
+ LOONGSON_GPIODATA = val;
+ spin_unlock(&gpio_lock);
}
-static struct gpio_chip ls2f_chip = {
- .label = "ls2f",
- .direction_input = ls2f_gpio_direction_input,
- .get = ls2f_gpio_get_value,
- .direction_output = ls2f_gpio_direction_output,
- .set = ls2f_gpio_set_value,
+static struct gpio_chip loongson_chip = {
+ .label = "Loongson-gpio-chip",
+ .direction_input = loongson_gpio_direction_input,
+ .get = loongson_gpio_get_value,
+ .direction_output = loongson_gpio_direction_output,
+ .set = loongson_gpio_set_value,
.base = 0,
- .ngpio = STLS2F_N_GPIO,
+ .ngpio = LOONGSON_N_GPIO,
+ .can_sleep = false,
};
-static int __init ls2f_gpio_setup(void)
+static int __init loongson_gpio_setup(void)
{
- return gpiochip_add(&ls2f_chip);
+ return gpiochip_add(&loongson_chip);
}
-arch_initcall(ls2f_gpio_setup);
+postcore_initcall(loongson_gpio_setup);
diff --git a/drivers/gpio/gpio-max7300.c b/drivers/gpio/gpio-max7300.c
index 40ab6dfb6021..0cc2c279ab5c 100644
--- a/drivers/gpio/gpio-max7300.c
+++ b/drivers/gpio/gpio-max7300.c
@@ -35,7 +35,6 @@ static int max7300_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max7301 *ts;
- int ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
@@ -49,8 +48,7 @@ static int max7300_probe(struct i2c_client *client,
ts->write = max7300_i2c_write;
ts->dev = &client->dev;
- ret = __max730x_probe(ts);
- return ret;
+ return __max730x_probe(ts);
}
static int max7300_remove(struct i2c_client *client)
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index a095b2393fe9..0fa4543c5e02 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -4,6 +4,7 @@
* Copyright (C) 2007 Marvell International Ltd.
* Copyright (C) 2008 Jack Ren <jack.ren@marvell.com>
* Copyright (C) 2008 Eric Miao <eric.miao@marvell.com>
+ * Copyright (C) 2015 Linus Walleij <linus.walleij@linaro.org>
*
* Derived from drivers/gpio/pca953x.c
*
@@ -16,10 +17,8 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/i2c.h>
#include <linux/i2c/max732x.h>
#include <linux/of.h>
@@ -150,9 +149,7 @@ struct max732x_chip {
uint8_t reg_out[2];
#ifdef CONFIG_GPIO_MAX732X_IRQ
- struct irq_domain *irq_domain;
struct mutex irq_lock;
- int irq_base;
uint8_t irq_mask;
uint8_t irq_mask_cur;
uint8_t irq_trig_raise;
@@ -356,35 +353,26 @@ static void max732x_irq_update_mask(struct max732x_chip *chip)
mutex_unlock(&chip->lock);
}
-static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
-{
- struct max732x_chip *chip = to_max732x(gc);
-
- if (chip->irq_domain) {
- return irq_create_mapping(chip->irq_domain,
- chip->irq_base + off);
- } else {
- return -ENXIO;
- }
-}
-
static void max732x_irq_mask(struct irq_data *d)
{
- struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max732x_chip *chip = to_max732x(gc);
chip->irq_mask_cur &= ~(1 << d->hwirq);
}
static void max732x_irq_unmask(struct irq_data *d)
{
- struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max732x_chip *chip = to_max732x(gc);
chip->irq_mask_cur |= 1 << d->hwirq;
}
static void max732x_irq_bus_lock(struct irq_data *d)
{
- struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max732x_chip *chip = to_max732x(gc);
mutex_lock(&chip->irq_lock);
chip->irq_mask_cur = chip->irq_mask;
@@ -392,7 +380,8 @@ static void max732x_irq_bus_lock(struct irq_data *d)
static void max732x_irq_bus_sync_unlock(struct irq_data *d)
{
- struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max732x_chip *chip = to_max732x(gc);
uint16_t new_irqs;
uint16_t level;
@@ -410,7 +399,8 @@ static void max732x_irq_bus_sync_unlock(struct irq_data *d)
static int max732x_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max732x_chip *chip = to_max732x(gc);
uint16_t off = d->hwirq;
uint16_t mask = 1 << off;
@@ -492,7 +482,8 @@ static irqreturn_t max732x_irq_handler(int irq, void *devid)
do {
level = __ffs(pending);
- handle_nested_irq(irq_find_mapping(chip->irq_domain, level));
+ handle_nested_irq(irq_find_mapping(chip->gpio_chip.irqdomain,
+ level));
pending &= ~(1 << level);
} while (pending);
@@ -500,86 +491,50 @@ static irqreturn_t max732x_irq_handler(int irq, void *devid)
return IRQ_HANDLED;
}
-static int max732x_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct max732x_chip *chip = h->host_data;
-
- if (!(chip->dir_input & (1 << hw))) {
- dev_err(&chip->client->dev,
- "Attempt to map output line as IRQ line: %lu\n",
- hw);
- return -EPERM;
- }
-
- irq_set_chip_data(virq, chip);
- irq_set_chip_and_handler(virq, &max732x_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
- set_irq_flags(virq, IRQF_VALID);
-#else
- irq_set_noprobe(virq);
-#endif
-
- return 0;
-}
-
-static struct irq_domain_ops max732x_irq_domain_ops = {
- .map = max732x_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static void max732x_irq_teardown(struct max732x_chip *chip)
-{
- if (chip->client->irq && chip->irq_domain)
- irq_domain_remove(chip->irq_domain);
-}
-
static int max732x_irq_setup(struct max732x_chip *chip,
const struct i2c_device_id *id)
{
struct i2c_client *client = chip->client;
struct max732x_platform_data *pdata = dev_get_platdata(&client->dev);
int has_irq = max732x_features[id->driver_data] >> 32;
+ int irq_base = 0;
int ret;
if (((pdata && pdata->irq_base) || client->irq)
&& has_irq != INT_NONE) {
if (pdata)
- chip->irq_base = pdata->irq_base;
+ irq_base = pdata->irq_base;
chip->irq_features = has_irq;
mutex_init(&chip->irq_lock);
- chip->irq_domain = irq_domain_add_simple(client->dev.of_node,
- chip->gpio_chip.ngpio, chip->irq_base,
- &max732x_irq_domain_ops, chip);
- if (!chip->irq_domain) {
- dev_err(&client->dev, "Failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- ret = request_threaded_irq(client->irq,
- NULL,
- max732x_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(&client->dev), chip);
+ ret = devm_request_threaded_irq(&client->dev,
+ client->irq,
+ NULL,
+ max732x_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
client->irq);
- goto out_failed;
+ return ret;
}
-
- chip->gpio_chip.to_irq = max732x_gpio_to_irq;
+ ret = gpiochip_irqchip_add(&chip->gpio_chip,
+ &max732x_irq_chip,
+ irq_base,
+ handle_edge_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(&client->dev,
+ "could not connect irqchip to gpiochip\n");
+ return ret;
+ }
+ gpiochip_set_chained_irqchip(&chip->gpio_chip,
+ &max732x_irq_chip,
+ client->irq,
+ NULL);
}
return 0;
-
-out_failed:
- max732x_irq_teardown(chip);
- return ret;
}
#else /* CONFIG_GPIO_MAX732X_IRQ */
@@ -595,10 +550,6 @@ static int max732x_irq_setup(struct max732x_chip *chip,
return 0;
}
-
-static void max732x_irq_teardown(struct max732x_chip *chip)
-{
-}
#endif
static int max732x_setup_gpio(struct max732x_chip *chip,
@@ -730,13 +681,15 @@ static int max732x_probe(struct i2c_client *client,
if (nr_port > 8)
max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
- ret = max732x_irq_setup(chip, id);
+ ret = gpiochip_add(&chip->gpio_chip);
if (ret)
goto out_failed;
- ret = gpiochip_add(&chip->gpio_chip);
- if (ret)
+ ret = max732x_irq_setup(chip, id);
+ if (ret) {
+ gpiochip_remove(&chip->gpio_chip);
goto out_failed;
+ }
if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
@@ -751,7 +704,6 @@ static int max732x_probe(struct i2c_client *client,
out_failed:
if (chip->client_dummy)
i2c_unregister_device(chip->client_dummy);
- max732x_irq_teardown(chip);
return ret;
}
@@ -774,8 +726,6 @@ static int max732x_remove(struct i2c_client *client)
gpiochip_remove(&chip->gpio_chip);
- max732x_irq_teardown(chip);
-
/* unregister any dummy i2c_client */
if (chip->client_dummy)
i2c_unregister_device(chip->client_dummy);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 21b1ce5abdfe..ee93c0ab0a59 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -58,6 +58,11 @@ static int mb86s70_gpio_request(struct gpio_chip *gc, unsigned gpio)
spin_lock_irqsave(&gchip->lock, flags);
val = readl(gchip->base + PFR(gpio));
+ if (!(val & OFFSET(gpio))) {
+ spin_unlock_irqrestore(&gchip->lock, flags);
+ return -EINVAL;
+ }
+
val &= ~OFFSET(gpio);
writel(val, gchip->base + PFR(gpio));
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 4e3e160e5db2..a431604c9e67 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -151,7 +151,7 @@ static int mc33880_remove(struct spi_device *spi)
struct mc33880 *mc;
mc = spi_get_drvdata(spi);
- if (mc == NULL)
+ if (!mc)
return -ENODEV;
gpiochip_remove(&mc->chip);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index eea5d7e578c9..2fc7ff852d16 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -949,10 +949,12 @@ static int mcp23s08_probe(struct spi_device *spi)
if (!chips)
return -ENODEV;
- data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08),
- GFP_KERNEL);
+ data = devm_kzalloc(&spi->dev,
+ sizeof(*data) + chips * sizeof(struct mcp23s08),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
+
spi_set_drvdata(spi, data);
spi->irq = irq_of_parse_and_map(spi->dev.of_node, 0);
@@ -989,7 +991,6 @@ fail:
continue;
gpiochip_remove(&data->mcp[addr]->chip);
}
- kfree(data);
return status;
}
@@ -1007,7 +1008,7 @@ static int mcp23s08_remove(struct spi_device *spi)
mcp23s08_irq_teardown(data->mcp[addr]);
gpiochip_remove(&data->mcp[addr]->chip);
}
- kfree(data);
+
return 0;
}
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
deleted file mode 100644
index edf285e26667..000000000000
--- a/drivers/gpio/gpio-msm-v1.c
+++ /dev/null
@@ -1,714 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/bitops.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-
-#include <mach/msm_gpiomux.h>
-
-/* see 80-VA736-2 Rev C pp 695-751
-**
-** These are actually the *shadow* gpio registers, since the
-** real ones (which allow full access) are only available to the
-** ARM9 side of the world.
-**
-** Since the _BASE need to be page-aligned when we're mapping them
-** to virtual addresses, adjust for the additional offset in these
-** macros.
-*/
-
-#define MSM_GPIO1_REG(off) (off)
-#define MSM_GPIO2_REG(off) (off)
-#define MSM_GPIO1_SHADOW_REG(off) (off)
-#define MSM_GPIO2_SHADOW_REG(off) (off)
-
-/*
- * MSM7X00 registers
- */
-/* output value */
-#define MSM7X00_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
-#define MSM7X00_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
-#define MSM7X00_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
-#define MSM7X00_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
-#define MSM7X00_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 106-95 */
-#define MSM7X00_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x50) /* gpio 107-121 */
-
-/* same pin map as above, output enable */
-#define MSM7X00_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x10)
-#define MSM7X00_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
-#define MSM7X00_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x14)
-#define MSM7X00_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x18)
-#define MSM7X00_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x1C)
-#define MSM7X00_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x54)
-
-/* same pin map as above, input read */
-#define MSM7X00_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x34)
-#define MSM7X00_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
-#define MSM7X00_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x38)
-#define MSM7X00_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x3C)
-#define MSM7X00_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x40)
-#define MSM7X00_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x44)
-
-/* same pin map as above, 1=edge 0=level interrup */
-#define MSM7X00_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x60)
-#define MSM7X00_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
-#define MSM7X00_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x64)
-#define MSM7X00_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x68)
-#define MSM7X00_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x6C)
-#define MSM7X00_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0xC0)
-
-/* same pin map as above, 1=positive 0=negative */
-#define MSM7X00_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x70)
-#define MSM7X00_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
-#define MSM7X00_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x74)
-#define MSM7X00_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x78)
-#define MSM7X00_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x7C)
-#define MSM7X00_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xBC)
-
-/* same pin map as above, interrupt enable */
-#define MSM7X00_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0x80)
-#define MSM7X00_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
-#define MSM7X00_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0x84)
-#define MSM7X00_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0x88)
-#define MSM7X00_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0x8C)
-#define MSM7X00_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xB8)
-
-/* same pin map as above, write 1 to clear interrupt */
-#define MSM7X00_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0x90)
-#define MSM7X00_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
-#define MSM7X00_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0x94)
-#define MSM7X00_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0x98)
-#define MSM7X00_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0x9C)
-#define MSM7X00_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xB4)
-
-/* same pin map as above, 1=interrupt pending */
-#define MSM7X00_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xA0)
-#define MSM7X00_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
-#define MSM7X00_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xA4)
-#define MSM7X00_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xA8)
-#define MSM7X00_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xAC)
-#define MSM7X00_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0xB0)
-
-/*
- * QSD8X50 registers
- */
-/* output value */
-#define QSD8X50_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
-#define QSD8X50_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
-#define QSD8X50_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
-#define QSD8X50_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
-#define QSD8X50_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 103-95 */
-#define QSD8X50_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x10) /* gpio 121-104 */
-#define QSD8X50_GPIO_OUT_6 MSM_GPIO1_SHADOW_REG(0x14) /* gpio 152-122 */
-#define QSD8X50_GPIO_OUT_7 MSM_GPIO1_SHADOW_REG(0x18) /* gpio 164-153 */
-
-/* same pin map as above, output enable */
-#define QSD8X50_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x20)
-#define QSD8X50_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
-#define QSD8X50_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x24)
-#define QSD8X50_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x28)
-#define QSD8X50_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x2C)
-#define QSD8X50_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x30)
-#define QSD8X50_GPIO_OE_6 MSM_GPIO1_SHADOW_REG(0x34)
-#define QSD8X50_GPIO_OE_7 MSM_GPIO1_SHADOW_REG(0x38)
-
-/* same pin map as above, input read */
-#define QSD8X50_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x50)
-#define QSD8X50_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
-#define QSD8X50_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x54)
-#define QSD8X50_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x58)
-#define QSD8X50_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x5C)
-#define QSD8X50_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x60)
-#define QSD8X50_GPIO_IN_6 MSM_GPIO1_SHADOW_REG(0x64)
-#define QSD8X50_GPIO_IN_7 MSM_GPIO1_SHADOW_REG(0x68)
-
-/* same pin map as above, 1=edge 0=level interrup */
-#define QSD8X50_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x70)
-#define QSD8X50_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
-#define QSD8X50_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x74)
-#define QSD8X50_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x78)
-#define QSD8X50_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x7C)
-#define QSD8X50_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0x80)
-#define QSD8X50_GPIO_INT_EDGE_6 MSM_GPIO1_SHADOW_REG(0x84)
-#define QSD8X50_GPIO_INT_EDGE_7 MSM_GPIO1_SHADOW_REG(0x88)
-
-/* same pin map as above, 1=positive 0=negative */
-#define QSD8X50_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x90)
-#define QSD8X50_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
-#define QSD8X50_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x94)
-#define QSD8X50_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x98)
-#define QSD8X50_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x9C)
-#define QSD8X50_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xA0)
-#define QSD8X50_GPIO_INT_POS_6 MSM_GPIO1_SHADOW_REG(0xA4)
-#define QSD8X50_GPIO_INT_POS_7 MSM_GPIO1_SHADOW_REG(0xA8)
-
-/* same pin map as above, interrupt enable */
-#define QSD8X50_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0xB0)
-#define QSD8X50_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
-#define QSD8X50_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0xB4)
-#define QSD8X50_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0xB8)
-#define QSD8X50_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0xBC)
-#define QSD8X50_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xC0)
-#define QSD8X50_GPIO_INT_EN_6 MSM_GPIO1_SHADOW_REG(0xC4)
-#define QSD8X50_GPIO_INT_EN_7 MSM_GPIO1_SHADOW_REG(0xC8)
-
-/* same pin map as above, write 1 to clear interrupt */
-#define QSD8X50_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0xD0)
-#define QSD8X50_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
-#define QSD8X50_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0xD4)
-#define QSD8X50_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0xD8)
-#define QSD8X50_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0xDC)
-#define QSD8X50_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xE0)
-#define QSD8X50_GPIO_INT_CLEAR_6 MSM_GPIO1_SHADOW_REG(0xE4)
-#define QSD8X50_GPIO_INT_CLEAR_7 MSM_GPIO1_SHADOW_REG(0xE8)
-
-/* same pin map as above, 1=interrupt pending */
-#define QSD8X50_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xF0)
-#define QSD8X50_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
-#define QSD8X50_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xF4)
-#define QSD8X50_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xF8)
-#define QSD8X50_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xFC)
-#define QSD8X50_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0x100)
-#define QSD8X50_GPIO_INT_STATUS_6 MSM_GPIO1_SHADOW_REG(0x104)
-#define QSD8X50_GPIO_INT_STATUS_7 MSM_GPIO1_SHADOW_REG(0x108)
-
-/*
- * MSM7X30 registers
- */
-/* output value */
-#define MSM7X30_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
-#define MSM7X30_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */
-#define MSM7X30_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */
-#define MSM7X30_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
-#define MSM7X30_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */
-#define MSM7X30_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */
-#define MSM7X30_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */
-#define MSM7X30_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */
-
-/* same pin map as above, output enable */
-#define MSM7X30_GPIO_OE_0 MSM_GPIO1_REG(0x10)
-#define MSM7X30_GPIO_OE_1 MSM_GPIO2_REG(0x08)
-#define MSM7X30_GPIO_OE_2 MSM_GPIO1_REG(0x14)
-#define MSM7X30_GPIO_OE_3 MSM_GPIO1_REG(0x18)
-#define MSM7X30_GPIO_OE_4 MSM_GPIO1_REG(0x1C)
-#define MSM7X30_GPIO_OE_5 MSM_GPIO1_REG(0x54)
-#define MSM7X30_GPIO_OE_6 MSM_GPIO1_REG(0xC8)
-#define MSM7X30_GPIO_OE_7 MSM_GPIO1_REG(0x218)
-
-/* same pin map as above, input read */
-#define MSM7X30_GPIO_IN_0 MSM_GPIO1_REG(0x34)
-#define MSM7X30_GPIO_IN_1 MSM_GPIO2_REG(0x20)
-#define MSM7X30_GPIO_IN_2 MSM_GPIO1_REG(0x38)
-#define MSM7X30_GPIO_IN_3 MSM_GPIO1_REG(0x3C)
-#define MSM7X30_GPIO_IN_4 MSM_GPIO1_REG(0x40)
-#define MSM7X30_GPIO_IN_5 MSM_GPIO1_REG(0x44)
-#define MSM7X30_GPIO_IN_6 MSM_GPIO1_REG(0xCC)
-#define MSM7X30_GPIO_IN_7 MSM_GPIO1_REG(0x21C)
-
-/* same pin map as above, 1=edge 0=level interrup */
-#define MSM7X30_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60)
-#define MSM7X30_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
-#define MSM7X30_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64)
-#define MSM7X30_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68)
-#define MSM7X30_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C)
-#define MSM7X30_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0)
-#define MSM7X30_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0)
-#define MSM7X30_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240)
-
-/* same pin map as above, 1=positive 0=negative */
-#define MSM7X30_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70)
-#define MSM7X30_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
-#define MSM7X30_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74)
-#define MSM7X30_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78)
-#define MSM7X30_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C)
-#define MSM7X30_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC)
-#define MSM7X30_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4)
-#define MSM7X30_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228)
-
-/* same pin map as above, interrupt enable */
-#define MSM7X30_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80)
-#define MSM7X30_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
-#define MSM7X30_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84)
-#define MSM7X30_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88)
-#define MSM7X30_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C)
-#define MSM7X30_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8)
-#define MSM7X30_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8)
-#define MSM7X30_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C)
-
-/* same pin map as above, write 1 to clear interrupt */
-#define MSM7X30_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90)
-#define MSM7X30_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
-#define MSM7X30_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94)
-#define MSM7X30_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98)
-#define MSM7X30_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C)
-#define MSM7X30_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4)
-#define MSM7X30_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC)
-#define MSM7X30_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230)
-
-/* same pin map as above, 1=interrupt pending */
-#define MSM7X30_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0)
-#define MSM7X30_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
-#define MSM7X30_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4)
-#define MSM7X30_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8)
-#define MSM7X30_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC)
-#define MSM7X30_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0)
-#define MSM7X30_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0)
-#define MSM7X30_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234)
-
-#define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0)
-
-#define MSM_GPIO_BANK(soc, bank, first, last) \
- { \
- .regs[MSM_GPIO_OUT] = soc##_GPIO_OUT_##bank, \
- .regs[MSM_GPIO_IN] = soc##_GPIO_IN_##bank, \
- .regs[MSM_GPIO_INT_STATUS] = soc##_GPIO_INT_STATUS_##bank, \
- .regs[MSM_GPIO_INT_CLEAR] = soc##_GPIO_INT_CLEAR_##bank, \
- .regs[MSM_GPIO_INT_EN] = soc##_GPIO_INT_EN_##bank, \
- .regs[MSM_GPIO_INT_EDGE] = soc##_GPIO_INT_EDGE_##bank, \
- .regs[MSM_GPIO_INT_POS] = soc##_GPIO_INT_POS_##bank, \
- .regs[MSM_GPIO_OE] = soc##_GPIO_OE_##bank, \
- .chip = { \
- .base = (first), \
- .ngpio = (last) - (first) + 1, \
- .get = msm_gpio_get, \
- .set = msm_gpio_set, \
- .direction_input = msm_gpio_direction_input, \
- .direction_output = msm_gpio_direction_output, \
- .to_irq = msm_gpio_to_irq, \
- .request = msm_gpio_request, \
- .free = msm_gpio_free, \
- } \
- }
-
-#define MSM_GPIO_BROKEN_INT_CLEAR 1
-
-enum msm_gpio_reg {
- MSM_GPIO_IN,
- MSM_GPIO_OUT,
- MSM_GPIO_INT_STATUS,
- MSM_GPIO_INT_CLEAR,
- MSM_GPIO_INT_EN,
- MSM_GPIO_INT_EDGE,
- MSM_GPIO_INT_POS,
- MSM_GPIO_OE,
- MSM_GPIO_REG_NR
-};
-
-struct msm_gpio_chip {
- spinlock_t lock;
- struct gpio_chip chip;
- unsigned long regs[MSM_GPIO_REG_NR];
-#if MSM_GPIO_BROKEN_INT_CLEAR
- unsigned int_status_copy;
-#endif
- unsigned int both_edge_detect;
- unsigned int int_enable[2]; /* 0: awake, 1: sleep */
- void __iomem *base;
-};
-
-struct msm_gpio_initdata {
- struct msm_gpio_chip *chips;
- int count;
-};
-
-static void msm_gpio_writel(struct msm_gpio_chip *chip, u32 val,
- enum msm_gpio_reg reg)
-{
- writel(val, chip->base + chip->regs[reg]);
-}
-
-static u32 msm_gpio_readl(struct msm_gpio_chip *chip, enum msm_gpio_reg reg)
-{
- return readl(chip->base + chip->regs[reg]);
-}
-
-static int msm_gpio_write(struct msm_gpio_chip *msm_chip,
- unsigned offset, unsigned on)
-{
- unsigned mask = BIT(offset);
- unsigned val;
-
- val = msm_gpio_readl(msm_chip, MSM_GPIO_OUT);
- if (on)
- msm_gpio_writel(msm_chip, val | mask, MSM_GPIO_OUT);
- else
- msm_gpio_writel(msm_chip, val & ~mask, MSM_GPIO_OUT);
- return 0;
-}
-
-static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip)
-{
- int loop_limit = 100;
- unsigned pol, val, val2, intstat;
- do {
- val = msm_gpio_readl(msm_chip, MSM_GPIO_IN);
- pol = msm_gpio_readl(msm_chip, MSM_GPIO_INT_POS);
- pol = (pol & ~msm_chip->both_edge_detect) |
- (~val & msm_chip->both_edge_detect);
- msm_gpio_writel(msm_chip, pol, MSM_GPIO_INT_POS);
- intstat = msm_gpio_readl(msm_chip, MSM_GPIO_INT_STATUS);
- val2 = msm_gpio_readl(msm_chip, MSM_GPIO_IN);
- if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0)
- return;
- } while (loop_limit-- > 0);
- printk(KERN_ERR "msm_gpio_update_both_edge_detect, "
- "failed to reach stable state %x != %x\n", val, val2);
-}
-
-static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip,
- unsigned offset)
-{
- unsigned bit = BIT(offset);
-
-#if MSM_GPIO_BROKEN_INT_CLEAR
- /* Save interrupts that already triggered before we loose them. */
- /* Any interrupt that triggers between the read of int_status */
- /* and the write to int_clear will still be lost though. */
- msm_chip->int_status_copy |=
- msm_gpio_readl(msm_chip, MSM_GPIO_INT_STATUS);
- msm_chip->int_status_copy &= ~bit;
-#endif
- msm_gpio_writel(msm_chip, bit, MSM_GPIO_INT_CLEAR);
- msm_gpio_update_both_edge_detect(msm_chip);
- return 0;
-}
-
-static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct msm_gpio_chip *msm_chip;
- unsigned long irq_flags;
- u32 val;
-
- msm_chip = container_of(chip, struct msm_gpio_chip, chip);
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- val = msm_gpio_readl(msm_chip, MSM_GPIO_OE) & ~BIT(offset);
- msm_gpio_writel(msm_chip, val, MSM_GPIO_OE);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
- return 0;
-}
-
-static int
-msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct msm_gpio_chip *msm_chip;
- unsigned long irq_flags;
- u32 val;
-
- msm_chip = container_of(chip, struct msm_gpio_chip, chip);
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- msm_gpio_write(msm_chip, offset, value);
- val = msm_gpio_readl(msm_chip, MSM_GPIO_OE) | BIT(offset);
- msm_gpio_writel(msm_chip, val, MSM_GPIO_OE);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
- return 0;
-}
-
-static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct msm_gpio_chip *msm_chip;
-
- msm_chip = container_of(chip, struct msm_gpio_chip, chip);
- return (msm_gpio_readl(msm_chip, MSM_GPIO_IN) & (1U << offset)) ? 1 : 0;
-}
-
-static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct msm_gpio_chip *msm_chip;
- unsigned long irq_flags;
-
- msm_chip = container_of(chip, struct msm_gpio_chip, chip);
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- msm_gpio_write(msm_chip, offset, value);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
-}
-
-static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- return MSM_GPIO_TO_INT(chip->base + offset);
-}
-
-#ifdef CONFIG_MSM_GPIOMUX
-static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- return msm_gpiomux_get(chip->base + offset);
-}
-
-static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- msm_gpiomux_put(chip->base + offset);
-}
-#else
-#define msm_gpio_request NULL
-#define msm_gpio_free NULL
-#endif
-
-static struct msm_gpio_chip *msm_gpio_chips;
-static int msm_gpio_count;
-
-static struct msm_gpio_chip msm_gpio_chips_msm7x01[] = {
- MSM_GPIO_BANK(MSM7X00, 0, 0, 15),
- MSM_GPIO_BANK(MSM7X00, 1, 16, 42),
- MSM_GPIO_BANK(MSM7X00, 2, 43, 67),
- MSM_GPIO_BANK(MSM7X00, 3, 68, 94),
- MSM_GPIO_BANK(MSM7X00, 4, 95, 106),
- MSM_GPIO_BANK(MSM7X00, 5, 107, 121),
-};
-
-static struct msm_gpio_initdata msm_gpio_7x01_init = {
- .chips = msm_gpio_chips_msm7x01,
- .count = ARRAY_SIZE(msm_gpio_chips_msm7x01),
-};
-
-static struct msm_gpio_chip msm_gpio_chips_msm7x30[] = {
- MSM_GPIO_BANK(MSM7X30, 0, 0, 15),
- MSM_GPIO_BANK(MSM7X30, 1, 16, 43),
- MSM_GPIO_BANK(MSM7X30, 2, 44, 67),
- MSM_GPIO_BANK(MSM7X30, 3, 68, 94),
- MSM_GPIO_BANK(MSM7X30, 4, 95, 106),
- MSM_GPIO_BANK(MSM7X30, 5, 107, 133),
- MSM_GPIO_BANK(MSM7X30, 6, 134, 150),
- MSM_GPIO_BANK(MSM7X30, 7, 151, 181),
-};
-
-static struct msm_gpio_initdata msm_gpio_7x30_init = {
- .chips = msm_gpio_chips_msm7x30,
- .count = ARRAY_SIZE(msm_gpio_chips_msm7x30),
-};
-
-static struct msm_gpio_chip msm_gpio_chips_qsd8x50[] = {
- MSM_GPIO_BANK(QSD8X50, 0, 0, 15),
- MSM_GPIO_BANK(QSD8X50, 1, 16, 42),
- MSM_GPIO_BANK(QSD8X50, 2, 43, 67),
- MSM_GPIO_BANK(QSD8X50, 3, 68, 94),
- MSM_GPIO_BANK(QSD8X50, 4, 95, 103),
- MSM_GPIO_BANK(QSD8X50, 5, 104, 121),
- MSM_GPIO_BANK(QSD8X50, 6, 122, 152),
- MSM_GPIO_BANK(QSD8X50, 7, 153, 164),
-};
-
-static struct msm_gpio_initdata msm_gpio_8x50_init = {
- .chips = msm_gpio_chips_qsd8x50,
- .count = ARRAY_SIZE(msm_gpio_chips_qsd8x50),
-};
-
-static void msm_gpio_irq_ack(struct irq_data *d)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- msm_gpio_clear_detect_status(msm_chip,
- d->irq - gpio_to_irq(msm_chip->chip.base));
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
-}
-
-static void msm_gpio_irq_mask(struct irq_data *d)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
-
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- /* level triggered interrupts are also latched */
- if (!(msm_gpio_readl(msm_chip, MSM_GPIO_INT_EDGE) & BIT(offset)))
- msm_gpio_clear_detect_status(msm_chip, offset);
- msm_chip->int_enable[0] &= ~BIT(offset);
- msm_gpio_writel(msm_chip, msm_chip->int_enable[0], MSM_GPIO_INT_EN);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
-}
-
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
-
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- /* level triggered interrupts are also latched */
- if (!(msm_gpio_readl(msm_chip, MSM_GPIO_INT_EDGE) & BIT(offset)))
- msm_gpio_clear_detect_status(msm_chip, offset);
- msm_chip->int_enable[0] |= BIT(offset);
- msm_gpio_writel(msm_chip, msm_chip->int_enable[0], MSM_GPIO_INT_EN);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
-}
-
-static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
-
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
-
- if (on)
- msm_chip->int_enable[1] |= BIT(offset);
- else
- msm_chip->int_enable[1] &= ~BIT(offset);
-
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
- return 0;
-}
-
-static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
- unsigned val, mask = BIT(offset);
-
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- val = msm_gpio_readl(msm_chip, MSM_GPIO_INT_EDGE);
- if (flow_type & IRQ_TYPE_EDGE_BOTH) {
- msm_gpio_writel(msm_chip, val | mask, MSM_GPIO_INT_EDGE);
- __irq_set_handler_locked(d->irq, handle_edge_irq);
- } else {
- msm_gpio_writel(msm_chip, val & ~mask, MSM_GPIO_INT_EDGE);
- __irq_set_handler_locked(d->irq, handle_level_irq);
- }
- if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
- msm_chip->both_edge_detect |= mask;
- msm_gpio_update_both_edge_detect(msm_chip);
- } else {
- msm_chip->both_edge_detect &= ~mask;
- val = msm_gpio_readl(msm_chip, MSM_GPIO_INT_POS);
- if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
- val |= mask;
- else
- val &= ~mask;
- msm_gpio_writel(msm_chip, val, MSM_GPIO_INT_POS);
- }
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
- return 0;
-}
-
-static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- int i, j, mask;
- unsigned val;
-
- for (i = 0; i < msm_gpio_count; i++) {
- struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
- val = msm_gpio_readl(msm_chip, MSM_GPIO_INT_STATUS);
- val &= msm_chip->int_enable[0];
- while (val) {
- mask = val & -val;
- j = fls(mask) - 1;
- /* printk("%s %08x %08x bit %d gpio %d irq %d\n",
- __func__, v, m, j, msm_chip->chip.start + j,
- FIRST_GPIO_IRQ + msm_chip->chip.start + j); */
- val &= ~mask;
- generic_handle_irq(FIRST_GPIO_IRQ +
- msm_chip->chip.base + j);
- }
- }
- desc->irq_data.chip->irq_ack(&desc->irq_data);
-}
-
-static struct irq_chip msm_gpio_irq_chip = {
- .name = "msmgpio",
- .irq_ack = msm_gpio_irq_ack,
- .irq_mask = msm_gpio_irq_mask,
- .irq_unmask = msm_gpio_irq_unmask,
- .irq_set_wake = msm_gpio_irq_set_wake,
- .irq_set_type = msm_gpio_irq_set_type,
-};
-
-static int gpio_msm_v1_probe(struct platform_device *pdev)
-{
- int i, j = 0;
- const struct platform_device_id *dev_id = platform_get_device_id(pdev);
- struct msm_gpio_initdata *data;
- int irq1, irq2;
- struct resource *res;
- void __iomem *base1, __iomem *base2;
-
- data = (struct msm_gpio_initdata *)dev_id->driver_data;
- msm_gpio_chips = data->chips;
- msm_gpio_count = data->count;
-
- irq1 = platform_get_irq(pdev, 0);
- if (irq1 < 0)
- return irq1;
-
- irq2 = platform_get_irq(pdev, 1);
- if (irq2 < 0)
- return irq2;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base1 = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base1))
- return PTR_ERR(base1);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base2 = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base2))
- return PTR_ERR(base2);
-
- for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) {
- if (i - FIRST_GPIO_IRQ >=
- msm_gpio_chips[j].chip.base +
- msm_gpio_chips[j].chip.ngpio)
- j++;
- irq_set_chip_data(i, &msm_gpio_chips[j]);
- irq_set_chip_and_handler(i, &msm_gpio_irq_chip,
- handle_edge_irq);
- set_irq_flags(i, IRQF_VALID);
- }
-
- for (i = 0; i < msm_gpio_count; i++) {
- if (i == 1)
- msm_gpio_chips[i].base = base2;
- else
- msm_gpio_chips[i].base = base1;
- spin_lock_init(&msm_gpio_chips[i].lock);
- msm_gpio_writel(&msm_gpio_chips[i], 0, MSM_GPIO_INT_EN);
- gpiochip_add(&msm_gpio_chips[i].chip);
- }
-
- irq_set_chained_handler(irq1, msm_gpio_irq_handler);
- irq_set_chained_handler(irq2, msm_gpio_irq_handler);
- irq_set_irq_wake(irq1, 1);
- irq_set_irq_wake(irq2, 1);
- return 0;
-}
-
-static struct platform_device_id gpio_msm_v1_device_ids[] = {
- { "gpio-msm-7201", (unsigned long)&msm_gpio_7x01_init },
- { "gpio-msm-7x30", (unsigned long)&msm_gpio_7x30_init },
- { "gpio-msm-8x50", (unsigned long)&msm_gpio_8x50_init },
- { }
-};
-MODULE_DEVICE_TABLE(platform, gpio_msm_v1_device_ids);
-
-static struct platform_driver gpio_msm_v1_driver = {
- .driver = {
- .name = "gpio-msm-v1",
- },
- .probe = gpio_msm_v1_probe,
- .id_table = gpio_msm_v1_device_ids,
-};
-
-static int __init gpio_msm_v1_init(void)
-{
- return platform_driver_register(&gpio_msm_v1_driver);
-}
-postcore_initcall(gpio_msm_v1_init);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index d0bc123c7975..1a54205860f5 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -320,11 +320,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
- gc->mask_cache &= ~mask;
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
+ ct->mask_cache_priv &= ~mask;
+
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
irq_gc_unlock(gc);
}
@@ -332,11 +334,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
- gc->mask_cache |= mask;
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
+ ct->mask_cache_priv |= mask;
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
irq_gc_unlock(gc);
}
@@ -344,11 +348,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
- gc->mask_cache &= ~mask;
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
+ ct->mask_cache_priv &= ~mask;
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
irq_gc_unlock(gc);
}
@@ -356,11 +362,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
- gc->mask_cache |= mask;
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
+ ct->mask_cache_priv |= mask;
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
irq_gc_unlock(gc);
}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index f476ae2eb0b3..b232397ad7ec 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -75,14 +75,12 @@ struct gpio_bank {
int power_mode;
bool workaround_enabled;
- void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
+ void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
int (*get_context_loss_count)(struct device *dev);
struct omap_gpio_reg_offs *regs;
};
-#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
-#define GPIO_BIT(bank, gpio) (BIT(GPIO_INDEX(bank, gpio)))
#define GPIO_MOD_CTRL_BIT BIT(0)
#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
@@ -90,11 +88,6 @@ struct gpio_bank {
static void omap_gpio_unmask_irq(struct irq_data *d);
-static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
-{
- return bank->chip.base + gpio_irq;
-}
-
static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
@@ -119,11 +112,11 @@ static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
/* set data out value using dedicate set/clear register */
-static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, int gpio,
+static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset,
int enable)
{
void __iomem *reg = bank->base;
- u32 l = GPIO_BIT(bank, gpio);
+ u32 l = BIT(offset);
if (enable) {
reg += bank->regs->set_dataout;
@@ -137,11 +130,11 @@ static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, int gpio,
}
/* set data out value using mask register */
-static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, int gpio,
+static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset,
int enable)
{
void __iomem *reg = bank->base + bank->regs->dataout;
- u32 gpio_bit = GPIO_BIT(bank, gpio);
+ u32 gpio_bit = BIT(offset);
u32 l;
l = readl_relaxed(reg);
@@ -208,13 +201,13 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
/**
* omap2_set_gpio_debounce - low level gpio debounce time
* @bank: the gpio bank we're acting upon
- * @gpio: the gpio number on this @gpio
+ * @offset: the gpio number on this @bank
* @debounce: debounce time to use
*
* OMAP's debounce time is in 31us steps so we need
* to convert and round up to the closest unit.
*/
-static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
+static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
unsigned debounce)
{
void __iomem *reg;
@@ -231,7 +224,7 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
else
debounce = (debounce / 0x1f) - 1;
- l = GPIO_BIT(bank, gpio);
+ l = BIT(offset);
clk_prepare_enable(bank->dbck);
reg = bank->base + bank->regs->debounce;
@@ -266,16 +259,16 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
/**
* omap_clear_gpio_debounce - clear debounce settings for a gpio
* @bank: the gpio bank we're acting upon
- * @gpio: the gpio number on this @gpio
+ * @offset: the gpio number on this @bank
*
* If a gpio is using debounce, then clear the debounce enable bit and if
* this is the only gpio in this bank using debounce, then clear the debounce
* time too. The debounce clock will also be disabled when calling this function
* if this is the only gpio in the bank using debounce.
*/
-static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
+static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
{
- u32 gpio_bit = GPIO_BIT(bank, gpio);
+ u32 gpio_bit = BIT(offset);
if (!bank->dbck_flag)
return;
@@ -472,42 +465,32 @@ static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset)
}
}
-static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
+static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset)
{
void __iomem *reg = bank->base + bank->regs->direction;
- return readl_relaxed(reg) & mask;
+ return readl_relaxed(reg) & BIT(offset);
}
-static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
- unsigned offset)
+static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset)
{
if (!LINE_USED(bank->mod_usage, offset)) {
omap_enable_gpio_module(bank, offset);
omap_set_gpio_direction(bank, offset, 1);
}
- bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
+ bank->irq_usage |= BIT(offset);
}
static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned gpio = 0;
int retval;
unsigned long flags;
- unsigned offset;
+ unsigned offset = d->hwirq;
if (!BANK_USED(bank))
pm_runtime_get_sync(bank->dev);
-#ifdef CONFIG_ARCH_OMAP1
- if (d->irq > IH_MPUIO_BASE)
- gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
-#endif
-
- if (!gpio)
- gpio = omap_irq_to_gpio(bank, d->hwirq);
-
if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
@@ -516,10 +499,9 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
return -EINVAL;
spin_lock_irqsave(&bank->lock, flags);
- offset = GPIO_INDEX(bank, gpio);
retval = omap_set_gpio_triggering(bank, offset, type);
- omap_gpio_init_irq(bank, gpio, offset);
- if (!omap_gpio_is_input(bank, BIT(offset))) {
+ omap_gpio_init_irq(bank, offset);
+ if (!omap_gpio_is_input(bank, offset)) {
spin_unlock_irqrestore(&bank->lock, flags);
return -EINVAL;
}
@@ -550,9 +532,10 @@ static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
readl_relaxed(reg);
}
-static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
+static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank,
+ unsigned offset)
{
- omap_clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
+ omap_clear_gpio_irqbank(bank, BIT(offset));
}
static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank)
@@ -613,13 +596,13 @@ static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
writel_relaxed(l, reg);
}
-static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, int gpio,
- int enable)
+static inline void omap_set_gpio_irqenable(struct gpio_bank *bank,
+ unsigned offset, int enable)
{
if (enable)
- omap_enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
+ omap_enable_gpio_irqbank(bank, BIT(offset));
else
- omap_disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
+ omap_disable_gpio_irqbank(bank, BIT(offset));
}
/*
@@ -630,14 +613,16 @@ static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, int gpio,
* enabled. When system is suspended, only selected GPIO interrupts need
* to have wake-up enabled.
*/
-static int omap_set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
+static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
+ int enable)
{
- u32 gpio_bit = GPIO_BIT(bank, gpio);
+ u32 gpio_bit = BIT(offset);
unsigned long flags;
if (bank->non_wakeup_gpios & gpio_bit) {
dev_err(bank->dev,
- "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
+ "Unable to modify wakeup on non-wakeup GPIO%d\n",
+ offset);
return -EINVAL;
}
@@ -653,22 +638,22 @@ static int omap_set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
return 0;
}
-static void omap_reset_gpio(struct gpio_bank *bank, int gpio)
+static void omap_reset_gpio(struct gpio_bank *bank, unsigned offset)
{
- omap_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
- omap_set_gpio_irqenable(bank, gpio, 0);
- omap_clear_gpio_irqstatus(bank, gpio);
- omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
- omap_clear_gpio_debounce(bank, gpio);
+ omap_set_gpio_direction(bank, offset, 1);
+ omap_set_gpio_irqenable(bank, offset, 0);
+ omap_clear_gpio_irqstatus(bank, offset);
+ omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+ omap_clear_gpio_debounce(bank, offset);
}
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
+ unsigned offset = d->hwirq;
- return omap_set_gpio_wakeup(bank, gpio, enable);
+ return omap_set_gpio_wakeup(bank, offset, enable);
}
static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -706,7 +691,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&bank->lock, flags);
bank->mod_usage &= ~(BIT(offset));
omap_disable_gpio_module(bank, offset);
- omap_reset_gpio(bank, bank->chip.base + offset);
+ omap_reset_gpio(bank, offset);
spin_unlock_irqrestore(&bank->lock, flags);
/*
@@ -803,15 +788,14 @@ exit:
static unsigned int omap_gpio_irq_startup(struct irq_data *d)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
unsigned long flags;
- unsigned offset = GPIO_INDEX(bank, gpio);
+ unsigned offset = d->hwirq;
if (!BANK_USED(bank))
pm_runtime_get_sync(bank->dev);
spin_lock_irqsave(&bank->lock, flags);
- omap_gpio_init_irq(bank, gpio, offset);
+ omap_gpio_init_irq(bank, offset);
spin_unlock_irqrestore(&bank->lock, flags);
omap_gpio_unmask_irq(d);
@@ -821,15 +805,13 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
static void omap_gpio_irq_shutdown(struct irq_data *d)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
unsigned long flags;
- unsigned offset = GPIO_INDEX(bank, gpio);
+ unsigned offset = d->hwirq;
spin_lock_irqsave(&bank->lock, flags);
- gpiochip_unlock_as_irq(&bank->chip, offset);
bank->irq_usage &= ~(BIT(offset));
omap_disable_gpio_module(bank, offset);
- omap_reset_gpio(bank, gpio);
+ omap_reset_gpio(bank, offset);
spin_unlock_irqrestore(&bank->lock, flags);
/*
@@ -843,43 +825,42 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
static void omap_gpio_ack_irq(struct irq_data *d)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
+ unsigned offset = d->hwirq;
- omap_clear_gpio_irqstatus(bank, gpio);
+ omap_clear_gpio_irqstatus(bank, offset);
}
static void omap_gpio_mask_irq(struct irq_data *d)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
+ unsigned offset = d->hwirq;
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
- omap_set_gpio_irqenable(bank, gpio, 0);
- omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
+ omap_set_gpio_irqenable(bank, offset, 0);
+ omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
spin_unlock_irqrestore(&bank->lock, flags);
}
static void omap_gpio_unmask_irq(struct irq_data *d)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
- unsigned int irq_mask = GPIO_BIT(bank, gpio);
+ unsigned offset = d->hwirq;
u32 trigger = irqd_get_trigger_type(d);
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
if (trigger)
- omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
+ omap_set_gpio_triggering(bank, offset, trigger);
/* For level-triggered GPIOs, the clearing must be done after
* the HW source is cleared, thus after the handler has run */
- if (bank->level_mask & irq_mask) {
- omap_set_gpio_irqenable(bank, gpio, 0);
- omap_clear_gpio_irqstatus(bank, gpio);
+ if (bank->level_mask & BIT(offset)) {
+ omap_set_gpio_irqenable(bank, offset, 0);
+ omap_clear_gpio_irqstatus(bank, offset);
}
- omap_set_gpio_irqenable(bank, gpio, 1);
+ omap_set_gpio_irqenable(bank, offset, 1);
spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -977,12 +958,10 @@ static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
static int omap_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank;
- u32 mask;
bank = container_of(chip, struct gpio_bank, chip);
- mask = (BIT(offset));
- if (omap_gpio_is_input(bank, mask))
+ if (omap_gpio_is_input(bank, offset))
return omap_get_gpio_datain(bank, offset);
else
return omap_get_gpio_dataout(bank, offset);
@@ -1075,38 +1054,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
dev_err(bank->dev, "Could not get gpio dbck\n");
}
-static void
-omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
- unsigned int num)
-{
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
-
- gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
- handle_simple_irq);
- if (!gc) {
- dev_err(bank->dev, "Memory alloc failed for gc\n");
- return;
- }
-
- ct = gc->chip_types;
-
- /* NOTE: No ack required, reading IRQ status clears it. */
- ct->chip.irq_mask = irq_gc_mask_set_bit;
- ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- ct->chip.irq_set_type = omap_gpio_irq_type;
-
- if (bank->regs->wkup_en)
- ct->chip.irq_set_wake = omap_gpio_wake_enable;
-
- ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
- irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
-}
-
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
{
- int j;
static int gpio;
int irq_base = 0;
int ret;
@@ -1153,6 +1102,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
}
#endif
+ /* MPUIO is a bit different, reading IRQ status clears it */
+ if (bank->is_mpuio) {
+ irqc->irq_ack = dummy_irq_chip.irq_ack;
+ irqc->irq_mask = irq_gc_mask_set_bit;
+ irqc->irq_unmask = irq_gc_mask_clr_bit;
+ if (!bank->regs->wkup_en)
+ irqc->irq_set_wake = NULL;
+ }
+
ret = gpiochip_irqchip_add(&bank->chip, irqc,
irq_base, omap_gpio_irq_handler,
IRQ_TYPE_NONE);
@@ -1166,15 +1124,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
gpiochip_set_chained_irqchip(&bank->chip, irqc,
bank->irq, omap_gpio_irq_handler);
- for (j = 0; j < bank->width; j++) {
- int irq = irq_find_mapping(bank->chip.irqdomain, j);
- if (bank->is_mpuio) {
- omap_mpuio_alloc_gc(bank, irq, bank->width);
- irq_set_chip_and_handler(irq, NULL, NULL);
- set_irq_flags(irq, 0);
- }
- }
-
return 0;
}
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 236708ad0a5b..945f0cda8529 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -88,11 +88,9 @@ struct pcf857x {
struct gpio_chip chip;
struct i2c_client *client;
struct mutex lock; /* protect 'out' */
- struct irq_domain *irq_domain; /* for irq demux */
spinlock_t slock; /* protect irq demux */
unsigned out; /* software latch */
unsigned status; /* current status */
- unsigned irq_mapped; /* mapped gpio irqs */
int (*write)(struct i2c_client *client, unsigned data);
int (*read)(struct i2c_client *client);
@@ -182,18 +180,6 @@ static void pcf857x_set(struct gpio_chip *chip, unsigned offset, int value)
/*-------------------------------------------------------------------------*/
-static int pcf857x_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
- int ret;
-
- ret = irq_create_mapping(gpio->irq_domain, offset);
- if (ret > 0)
- gpio->irq_mapped |= (1 << offset);
-
- return ret;
-}
-
static irqreturn_t pcf857x_irq(int irq, void *data)
{
struct pcf857x *gpio = data;
@@ -208,9 +194,9 @@ static irqreturn_t pcf857x_irq(int irq, void *data)
* interrupt source, just to avoid bad irqs
*/
- change = ((gpio->status ^ status) & gpio->irq_mapped);
+ change = (gpio->status ^ status);
for_each_set_bit(i, &change, gpio->chip.ngpio)
- generic_handle_irq(irq_find_mapping(gpio->irq_domain, i));
+ handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i));
gpio->status = status;
spin_unlock_irqrestore(&gpio->slock, flags);
@@ -218,66 +204,36 @@ static irqreturn_t pcf857x_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int pcf857x_irq_domain_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hw)
-{
- struct pcf857x *gpio = domain->host_data;
-
- irq_set_chip_and_handler(irq,
- &dummy_irq_chip,
- handle_level_irq);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
- gpio->irq_mapped |= (1 << hw);
-
- return 0;
-}
-
-static struct irq_domain_ops pcf857x_irq_domain_ops = {
- .map = pcf857x_irq_domain_map,
-};
+/*
+ * NOP functions
+ */
+static void noop(struct irq_data *data) { }
-static void pcf857x_irq_domain_cleanup(struct pcf857x *gpio)
+static unsigned int noop_ret(struct irq_data *data)
{
- if (gpio->irq_domain)
- irq_domain_remove(gpio->irq_domain);
-
+ return 0;
}
-static int pcf857x_irq_domain_init(struct pcf857x *gpio,
- struct i2c_client *client)
+static int pcf857x_irq_set_wake(struct irq_data *data, unsigned int on)
{
- int status;
-
- gpio->irq_domain = irq_domain_add_linear(client->dev.of_node,
- gpio->chip.ngpio,
- &pcf857x_irq_domain_ops,
- gpio);
- if (!gpio->irq_domain)
- goto fail;
-
- /* enable real irq */
- status = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, pcf857x_irq, IRQF_ONESHOT |
- IRQF_TRIGGER_FALLING | IRQF_SHARED,
- dev_name(&client->dev), gpio);
-
- if (status)
- goto fail;
-
- /* enable gpio_to_irq() */
- gpio->chip.to_irq = pcf857x_to_irq;
+ struct pcf857x *gpio = irq_data_get_irq_chip_data(data);
+ irq_set_irq_wake(gpio->client->irq, on);
return 0;
-
-fail:
- pcf857x_irq_domain_cleanup(gpio);
- return -EINVAL;
}
+static struct irq_chip pcf857x_irq_chip = {
+ .name = "pcf857x",
+ .irq_startup = noop_ret,
+ .irq_shutdown = noop,
+ .irq_enable = noop,
+ .irq_disable = noop,
+ .irq_ack = noop,
+ .irq_mask = noop,
+ .irq_unmask = noop,
+ .irq_set_wake = pcf857x_irq_set_wake,
+};
+
/*-------------------------------------------------------------------------*/
static int pcf857x_probe(struct i2c_client *client,
@@ -314,15 +270,6 @@ static int pcf857x_probe(struct i2c_client *client,
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = id->driver_data;
- /* enable gpio_to_irq() if platform has settings */
- if (client->irq) {
- status = pcf857x_irq_domain_init(gpio, client);
- if (status < 0) {
- dev_err(&client->dev, "irq_domain init failed\n");
- goto fail_irq_domain;
- }
- }
-
/* NOTE: the OnSemi jlc1562b is also largely compatible with
* these parts, notably for output. It has a low-resolution
* DAC instead of pin change IRQs; and its inputs can be the
@@ -398,6 +345,27 @@ static int pcf857x_probe(struct i2c_client *client,
if (status < 0)
goto fail;
+ /* Enable irqchip if we have an interrupt */
+ if (client->irq) {
+ status = gpiochip_irqchip_add(&gpio->chip, &pcf857x_irq_chip,
+ 0, handle_level_irq,
+ IRQ_TYPE_NONE);
+ if (status) {
+ dev_err(&client->dev, "cannot add irqchip\n");
+ goto fail_irq;
+ }
+
+ status = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, pcf857x_irq, IRQF_ONESHOT |
+ IRQF_TRIGGER_FALLING | IRQF_SHARED,
+ dev_name(&client->dev), gpio);
+ if (status)
+ goto fail_irq;
+
+ gpiochip_set_chained_irqchip(&gpio->chip, &pcf857x_irq_chip,
+ client->irq, NULL);
+ }
+
/* Let platform code set up the GPIOs and their users.
* Now is the first time anyone could use them.
*/
@@ -413,13 +381,12 @@ static int pcf857x_probe(struct i2c_client *client,
return 0;
-fail:
- if (client->irq)
- pcf857x_irq_domain_cleanup(gpio);
+fail_irq:
+ gpiochip_remove(&gpio->chip);
-fail_irq_domain:
- dev_dbg(&client->dev, "probe error %d for '%s'\n",
- status, client->name);
+fail:
+ dev_dbg(&client->dev, "probe error %d for '%s'\n", status,
+ client->name);
return status;
}
@@ -441,9 +408,6 @@ static int pcf857x_remove(struct i2c_client *client)
}
}
- if (client->irq)
- pcf857x_irq_domain_cleanup(gpio);
-
gpiochip_remove(&gpio->chip);
return status;
}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 2fdb04b6f101..cdbbcf0faf9d 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -59,8 +59,7 @@
#define GAFR_OFFSET 0x54
#define ED_MASK_OFFSET 0x9C /* GPIO edge detection for AP side */
-#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : ((n) > 5 ? 0x200 : 0x100) \
- + (((n) % 3) << 2))
+#define BANK_OFF(n) (((n) / 3) << 8) + (((n) % 3) << 2)
int pxa_last_gpio;
static int irq_base;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index c49522efa7b3..fd3977465948 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -14,6 +14,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/init.h>
@@ -37,20 +38,22 @@ struct gpio_rcar_priv {
struct platform_device *pdev;
struct gpio_chip gpio_chip;
struct irq_chip irq_chip;
+ unsigned int irq_parent;
+ struct clk *clk;
};
-#define IOINTSEL 0x00
-#define INOUTSEL 0x04
-#define OUTDT 0x08
-#define INDT 0x0c
-#define INTDT 0x10
-#define INTCLR 0x14
-#define INTMSK 0x18
-#define MSKCLR 0x1c
-#define POSNEG 0x20
-#define EDGLEVEL 0x24
-#define FILONOFF 0x28
-#define BOTHEDGE 0x4c
+#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
+#define INOUTSEL 0x04 /* General Input/Output Switching Register */
+#define OUTDT 0x08 /* General Output Register */
+#define INDT 0x0c /* General Input Register */
+#define INTDT 0x10 /* Interrupt Display Register */
+#define INTCLR 0x14 /* Interrupt Clear Register */
+#define INTMSK 0x18 /* Interrupt Mask Register */
+#define MSKCLR 0x1c /* Interrupt Mask Clear Register */
+#define POSNEG 0x20 /* Positive/Negative Logic Select Register */
+#define EDGLEVEL 0x24 /* Edge/level Select Register */
+#define FILONOFF 0x28 /* Chattering Prevention On/Off Register */
+#define BOTHEDGE 0x4c /* One Edge/Both Edge Select Register */
#define RCAR_MAX_GPIO_PER_BANK 32
@@ -169,6 +172,25 @@ static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
+
+ irq_set_irq_wake(p->irq_parent, on);
+
+ if (!p->clk)
+ return 0;
+
+ if (on)
+ clk_enable(p->clk);
+ else
+ clk_disable(p->clk);
+
+ return 0;
+}
+
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
@@ -367,6 +389,12 @@ static int gpio_rcar_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
+ p->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(p->clk)) {
+ dev_warn(dev, "unable to get clock\n");
+ p->clk = NULL;
+ }
+
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -404,8 +432,8 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_mask = gpio_rcar_irq_disable;
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
- irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED
- | IRQCHIP_MASK_ON_SUSPEND;
+ irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
+ irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add(gpio_chip);
if (ret) {
@@ -413,13 +441,14 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err0;
}
- ret = gpiochip_irqchip_add(&p->gpio_chip, irq_chip, p->config.irq_base,
+ ret = gpiochip_irqchip_add(gpio_chip, irq_chip, p->config.irq_base,
handle_level_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "cannot add irqchip\n");
goto err1;
}
+ p->irq_parent = irq->start;
if (devm_request_irq(dev, irq->start, gpio_rcar_irq_handler,
IRQF_SHARED, name, p)) {
dev_err(dev, "failed to request IRQ\n");
@@ -431,7 +460,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
/* warn in case of mismatch if irq base is specified */
if (p->config.irq_base) {
- ret = irq_find_mapping(p->gpio_chip.irqdomain, 0);
+ ret = irq_find_mapping(gpio_chip->irqdomain, 0);
if (p->config.irq_base != ret)
dev_warn(dev, "irq base mismatch (%u/%u)\n",
p->config.irq_base, ret);
@@ -447,7 +476,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
return 0;
err1:
- gpiochip_remove(&p->gpio_chip);
+ gpiochip_remove(gpio_chip);
err0:
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 62ab9f4b2cd3..46b89614aa91 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -283,7 +283,7 @@ fail_ioremap:
return ret;
}
-static int __exit tb10x_gpio_remove(struct platform_device *pdev)
+static int tb10x_gpio_remove(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio = platform_get_drvdata(pdev);
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 11aed2671065..31b244cffabb 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -260,10 +260,7 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
tc3589x_gpio->chip.ngpio = tc3589x->num_gpio;
tc3589x_gpio->chip.dev = &pdev->dev;
tc3589x_gpio->chip.base = -1;
-
-#ifdef CONFIG_OF_GPIO
tc3589x_gpio->chip.of_node = np;
-#endif
/* Bring the GPIO module out of reset */
ret = tc3589x_set_bits(tc3589x, TC3589x_RSTCTRL,
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 971c73964ef1..7bd9f209ffa8 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -244,16 +244,16 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc = &port->gc;
gc->of_node = np;
gc->dev = dev;
- gc->label = "vf610-gpio",
- gc->ngpio = VF610_GPIO_PER_PORT,
+ gc->label = "vf610-gpio";
+ gc->ngpio = VF610_GPIO_PER_PORT;
gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
- gc->request = vf610_gpio_request,
- gc->free = vf610_gpio_free,
- gc->direction_input = vf610_gpio_direction_input,
- gc->get = vf610_gpio_get,
- gc->direction_output = vf610_gpio_direction_output,
- gc->set = vf610_gpio_set,
+ gc->request = vf610_gpio_request;
+ gc->free = vf610_gpio_free;
+ gc->direction_input = vf610_gpio_direction_input;
+ gc->get = vf610_gpio_get;
+ gc->direction_output = vf610_gpio_direction_output;
+ gc->set = vf610_gpio_set;
ret = gpiochip_add(gc);
if (ret < 0)
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index b6a15c39293e..fb9d29a5d584 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -93,7 +93,7 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, res);
- if (!regs)
+ if (IS_ERR(regs))
return PTR_ERR(regs);
ret = bgpio_init(&priv->bgc, &pdev->dev, 4,
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index df990f29757a..725d16138b74 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -304,7 +304,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
return;
INIT_LIST_HEAD(&acpi_gpio->events);
- acpi_walk_resources(ACPI_HANDLE(chip->dev), "_AEI",
+ acpi_walk_resources(handle, "_AEI",
acpi_gpiochip_request_interrupt, acpi_gpio);
}
@@ -550,7 +550,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
length = min(agpio->pin_table_length, (u16)(pin_index + bits));
for (i = pin_index; i < length; ++i) {
- unsigned pin = agpio->pin_table[i];
+ int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
struct gpio_desc *desc;
bool found;
@@ -722,3 +722,87 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
acpi_detach_data(handle, acpi_gpio_chip_dh);
kfree(acpi_gpio);
}
+
+static unsigned int acpi_gpio_package_count(const union acpi_object *obj)
+{
+ const union acpi_object *element = obj->package.elements;
+ const union acpi_object *end = element + obj->package.count;
+ unsigned int count = 0;
+
+ while (element < end) {
+ if (element->type == ACPI_TYPE_LOCAL_REFERENCE)
+ count++;
+
+ element++;
+ }
+ return count;
+}
+
+static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
+{
+ unsigned int *count = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_GPIO)
+ *count += ares->data.gpio.pin_table_length;
+
+ return 1;
+}
+
+/**
+ * acpi_gpio_count - return the number of GPIOs associated with a
+ * device / function or -ENOENT if no GPIO has been
+ * assigned to the requested function.
+ * @dev: GPIO consumer, can be NULL for system-global GPIOs
+ * @con_id: function within the GPIO consumer
+ */
+int acpi_gpio_count(struct device *dev, const char *con_id)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ const union acpi_object *obj;
+ const struct acpi_gpio_mapping *gm;
+ int count = -ENOENT;
+ int ret;
+ char propname[32];
+ unsigned int i;
+
+ /* Try first from _DSD */
+ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ if (con_id && strcmp(con_id, "gpios"))
+ snprintf(propname, sizeof(propname), "%s-%s",
+ con_id, gpio_suffixes[i]);
+ else
+ snprintf(propname, sizeof(propname), "%s",
+ gpio_suffixes[i]);
+
+ ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY,
+ &obj);
+ if (ret == 0) {
+ if (obj->type == ACPI_TYPE_LOCAL_REFERENCE)
+ count = 1;
+ else if (obj->type == ACPI_TYPE_PACKAGE)
+ count = acpi_gpio_package_count(obj);
+ } else if (adev->driver_gpios) {
+ for (gm = adev->driver_gpios; gm->name; gm++)
+ if (strcmp(propname, gm->name) == 0) {
+ count = gm->size;
+ break;
+ }
+ }
+ if (count >= 0)
+ break;
+ }
+
+ /* Then from plain _CRS GPIOs */
+ if (count < 0) {
+ struct list_head resource_list;
+ unsigned int crs_count = 0;
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(adev, &resource_list,
+ acpi_find_gpio_count, &crs_count);
+ acpi_dev_free_resource_list(&resource_list);
+ if (crs_count > 0)
+ count = crs_count;
+ }
+ return count;
+}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 4650bf830d6b..a6c67c6b4680 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -22,6 +22,7 @@
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/slab.h>
+#include <linux/gpio/machine.h>
#include "gpiolib.h"
@@ -118,6 +119,114 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
EXPORT_SYMBOL(of_get_named_gpio_flags);
/**
+ * of_get_gpio_hog() - Get a GPIO hog descriptor, names and flags for GPIO API
+ * @np: device node to get GPIO from
+ * @name: GPIO line name
+ * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
+ * of_get_gpio_hog()
+ * @dflags: gpiod_flags - optional GPIO initialization flags
+ *
+ * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno
+ * value on the error condition.
+ */
+static struct gpio_desc *of_get_gpio_hog(struct device_node *np,
+ const char **name,
+ enum gpio_lookup_flags *lflags,
+ enum gpiod_flags *dflags)
+{
+ struct device_node *chip_np;
+ enum of_gpio_flags xlate_flags;
+ struct gpio_desc *desc;
+ struct gg_data gg_data = {
+ .flags = &xlate_flags,
+ };
+ u32 tmp;
+ int i, ret;
+
+ chip_np = np->parent;
+ if (!chip_np)
+ return ERR_PTR(-EINVAL);
+
+ xlate_flags = 0;
+ *lflags = 0;
+ *dflags = 0;
+
+ ret = of_property_read_u32(chip_np, "#gpio-cells", &tmp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (tmp > MAX_PHANDLE_ARGS)
+ return ERR_PTR(-EINVAL);
+
+ gg_data.gpiospec.args_count = tmp;
+ gg_data.gpiospec.np = chip_np;
+ for (i = 0; i < tmp; i++) {
+ ret = of_property_read_u32_index(np, "gpios", i,
+ &gg_data.gpiospec.args[i]);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
+ if (!gg_data.out_gpio) {
+ if (np->parent == np)
+ return ERR_PTR(-ENXIO);
+ else
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (xlate_flags & OF_GPIO_ACTIVE_LOW)
+ *lflags |= GPIO_ACTIVE_LOW;
+
+ if (of_property_read_bool(np, "input"))
+ *dflags |= GPIOD_IN;
+ else if (of_property_read_bool(np, "output-low"))
+ *dflags |= GPIOD_OUT_LOW;
+ else if (of_property_read_bool(np, "output-high"))
+ *dflags |= GPIOD_OUT_HIGH;
+ else {
+ pr_warn("GPIO line %d (%s): no hogging state specified, bailing out\n",
+ desc_to_gpio(gg_data.out_gpio), np->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (name && of_property_read_string(np, "line-name", name))
+ *name = np->name;
+
+ desc = gg_data.out_gpio;
+
+ return desc;
+}
+
+/**
+ * of_gpiochip_scan_hogs - Scan gpio-controller and apply GPIO hog as requested
+ * @chip: gpio chip to act on
+ *
+ * This is only used by of_gpiochip_add to request/set GPIO initial
+ * configuration.
+ */
+static void of_gpiochip_scan_hogs(struct gpio_chip *chip)
+{
+ struct gpio_desc *desc = NULL;
+ struct device_node *np;
+ const char *name;
+ enum gpio_lookup_flags lflags;
+ enum gpiod_flags dflags;
+
+ for_each_child_of_node(chip->of_node, np) {
+ if (!of_property_read_bool(np, "gpio-hog"))
+ continue;
+
+ desc = of_get_gpio_hog(np, &name, &lflags, &dflags);
+ if (IS_ERR(desc))
+ continue;
+
+ if (gpiod_hog(desc, name, lflags, dflags))
+ continue;
+ }
+}
+
+/**
* of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
* @np: device node of the GPIO chip
@@ -326,6 +435,8 @@ void of_gpiochip_add(struct gpio_chip *chip)
of_gpiochip_add_pin_range(chip);
of_node_get(chip->of_node);
+
+ of_gpiochip_scan_hogs(chip);
}
void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 7722ed53bd65..af3bc7a8033b 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -551,6 +551,7 @@ static struct class gpio_class = {
*/
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
+ struct gpio_chip *chip;
unsigned long flags;
int status;
const char *ioname = NULL;
@@ -568,8 +569,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
return -EINVAL;
}
+ chip = desc->chip;
+
mutex_lock(&sysfs_lock);
+ /* check if chip is being removed */
+ if (!chip || !chip->exported) {
+ status = -ENODEV;
+ goto fail_unlock;
+ }
+
spin_lock_irqsave(&gpio_lock, flags);
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
test_bit(FLAG_EXPORT, &desc->flags)) {
@@ -783,12 +792,15 @@ void gpiochip_unexport(struct gpio_chip *chip)
{
int status;
struct device *dev;
+ struct gpio_desc *desc;
+ unsigned int i;
mutex_lock(&sysfs_lock);
dev = class_find_device(&gpio_class, NULL, chip, match_export);
if (dev) {
put_device(dev);
device_unregister(dev);
+ /* prevent further gpiod exports */
chip->exported = false;
status = 0;
} else
@@ -797,6 +809,13 @@ void gpiochip_unexport(struct gpio_chip *chip)
if (status)
chip_dbg(chip, "%s: status %d\n", __func__, status);
+
+ /* unregister gpiod class devices owned by sysfs */
+ for (i = 0; i < chip->ngpio; i++) {
+ desc = &chip->desc[i];
+ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
+ gpiod_free(desc);
+ }
}
static int __init gpiolib_sysfs_init(void)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 1ca9295b2c10..59eaa23767d8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -315,6 +315,7 @@ EXPORT_SYMBOL_GPL(gpiochip_add);
/* Forward-declaration */
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static void gpiochip_free_hogs(struct gpio_chip *chip);
/**
* gpiochip_remove() - unregister a gpio_chip
@@ -333,6 +334,7 @@ void gpiochip_remove(struct gpio_chip *chip)
acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
+ gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
spin_lock_irqsave(&gpio_lock, flags);
@@ -866,6 +868,7 @@ static bool __gpiod_free(struct gpio_desc *desc)
clear_bit(FLAG_REQUESTED, &desc->flags);
clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ clear_bit(FLAG_IS_HOGGED, &desc->flags);
ret = true;
}
@@ -1659,19 +1662,18 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
enum gpio_lookup_flags *flags)
{
- static const char * const suffixes[] = { "gpios", "gpio" };
char prop_name[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
struct gpio_desc *desc;
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(suffixes); i++) {
+ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
if (con_id)
snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
- suffixes[i]);
+ gpio_suffixes[i]);
else
snprintf(prop_name, sizeof(prop_name), "%s",
- suffixes[i]);
+ gpio_suffixes[i]);
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
&of_flags);
@@ -1692,7 +1694,6 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
enum gpio_lookup_flags *flags)
{
- static const char * const suffixes[] = { "gpios", "gpio" };
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_gpio_info info;
struct gpio_desc *desc;
@@ -1700,13 +1701,13 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
int i;
/* Try first from _DSD */
- for (i = 0; i < ARRAY_SIZE(suffixes); i++) {
+ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
if (con_id && strcmp(con_id, "gpios")) {
snprintf(propname, sizeof(propname), "%s-%s",
- con_id, suffixes[i]);
+ con_id, gpio_suffixes[i]);
} else {
snprintf(propname, sizeof(propname), "%s",
- suffixes[i]);
+ gpio_suffixes[i]);
}
desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
@@ -1805,6 +1806,70 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
return desc;
}
+static int dt_gpio_count(struct device *dev, const char *con_id)
+{
+ int ret;
+ char propname[32];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ if (con_id)
+ snprintf(propname, sizeof(propname), "%s-%s",
+ con_id, gpio_suffixes[i]);
+ else
+ snprintf(propname, sizeof(propname), "%s",
+ gpio_suffixes[i]);
+
+ ret = of_gpio_named_count(dev->of_node, propname);
+ if (ret >= 0)
+ break;
+ }
+ return ret;
+}
+
+static int platform_gpio_count(struct device *dev, const char *con_id)
+{
+ struct gpiod_lookup_table *table;
+ struct gpiod_lookup *p;
+ unsigned int count = 0;
+
+ table = gpiod_find_lookup_table(dev);
+ if (!table)
+ return -ENOENT;
+
+ for (p = &table->table[0]; p->chip_label; p++) {
+ if ((con_id && p->con_id && !strcmp(con_id, p->con_id)) ||
+ (!con_id && !p->con_id))
+ count++;
+ }
+ if (!count)
+ return -ENOENT;
+
+ return count;
+}
+
+/**
+ * gpiod_count - return the number of GPIOs associated with a device / function
+ * or -ENOENT if no GPIO has been assigned to the requested function
+ * @dev: GPIO consumer, can be NULL for system-global GPIOs
+ * @con_id: function within the GPIO consumer
+ */
+int gpiod_count(struct device *dev, const char *con_id)
+{
+ int count = -ENOENT;
+
+ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
+ count = dt_gpio_count(dev, con_id);
+ else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev))
+ count = acpi_gpio_count(dev, con_id);
+
+ if (count < 0)
+ count = platform_gpio_count(dev, con_id);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(gpiod_count);
+
/**
* gpiod_get - obtain a GPIO for a given GPIO function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
@@ -1840,6 +1905,47 @@ struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
}
EXPORT_SYMBOL_GPL(__gpiod_get_optional);
+
+/**
+ * gpiod_configure_flags - helper function to configure a given GPIO
+ * @desc: gpio whose value will be assigned
+ * @con_id: function within the GPIO consumer
+ * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
+ * of_get_gpio_hog()
+ * @dflags: gpiod_flags - optional GPIO initialization flags
+ *
+ * Return 0 on success, -ENOENT if no GPIO has been assigned to the
+ * requested function and/or index, or another IS_ERR() code if an error
+ * occurred while trying to acquire the GPIO.
+ */
+static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
+ unsigned long lflags, enum gpiod_flags dflags)
+{
+ int status;
+
+ if (lflags & GPIO_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ if (lflags & GPIO_OPEN_DRAIN)
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ if (lflags & GPIO_OPEN_SOURCE)
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
+ /* No particular flag request, return here... */
+ if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
+ pr_debug("no flags found for %s\n", con_id);
+ return 0;
+ }
+
+ /* Process flags */
+ if (dflags & GPIOD_FLAGS_BIT_DIR_OUT)
+ status = gpiod_direction_output(desc,
+ dflags & GPIOD_FLAGS_BIT_DIR_VAL);
+ else
+ status = gpiod_direction_input(desc);
+
+ return status;
+}
+
/**
* gpiod_get_index - obtain a GPIO from a multi-index GPIO function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
@@ -1865,13 +1971,15 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
- /* Using device tree? */
- if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) {
- dev_dbg(dev, "using device tree for GPIO lookup\n");
- desc = of_find_gpio(dev, con_id, idx, &lookupflags);
- } else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) {
- dev_dbg(dev, "using ACPI for GPIO lookup\n");
- desc = acpi_find_gpio(dev, con_id, idx, &lookupflags);
+ if (dev) {
+ /* Using device tree? */
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ dev_dbg(dev, "using device tree for GPIO lookup\n");
+ desc = of_find_gpio(dev, con_id, idx, &lookupflags);
+ } else if (ACPI_COMPANION(dev)) {
+ dev_dbg(dev, "using ACPI for GPIO lookup\n");
+ desc = acpi_find_gpio(dev, con_id, idx, &lookupflags);
+ }
}
/*
@@ -1889,28 +1997,10 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
}
status = gpiod_request(desc, con_id);
-
if (status < 0)
return ERR_PTR(status);
- if (lookupflags & GPIO_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lookupflags & GPIO_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (lookupflags & GPIO_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
-
- /* No particular flag request, return here... */
- if (!(flags & GPIOD_FLAGS_BIT_DIR_SET))
- return desc;
-
- /* Process flags */
- if (flags & GPIOD_FLAGS_BIT_DIR_OUT)
- status = gpiod_direction_output(desc,
- flags & GPIOD_FLAGS_BIT_DIR_VAL);
- else
- status = gpiod_direction_input(desc);
-
+ status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (status < 0) {
dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
@@ -2006,6 +2096,132 @@ struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
EXPORT_SYMBOL_GPL(__gpiod_get_index_optional);
/**
+ * gpiod_hog - Hog the specified GPIO desc given the provided flags
+ * @desc: gpio whose value will be assigned
+ * @name: gpio line name
+ * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
+ * of_get_gpio_hog()
+ * @dflags: gpiod_flags - optional GPIO initialization flags
+ */
+int gpiod_hog(struct gpio_desc *desc, const char *name,
+ unsigned long lflags, enum gpiod_flags dflags)
+{
+ struct gpio_chip *chip;
+ struct gpio_desc *local_desc;
+ int hwnum;
+ int status;
+
+ chip = gpiod_to_chip(desc);
+ hwnum = gpio_chip_hwgpio(desc);
+
+ local_desc = gpiochip_request_own_desc(chip, hwnum, name);
+ if (IS_ERR(local_desc)) {
+ pr_debug("requesting own GPIO %s failed\n", name);
+ return PTR_ERR(local_desc);
+ }
+
+ status = gpiod_configure_flags(desc, name, lflags, dflags);
+ if (status < 0) {
+ pr_debug("setup of GPIO %s failed\n", name);
+ gpiochip_free_own_desc(desc);
+ return status;
+ }
+
+ /* Mark GPIO as hogged so it can be identified and removed later */
+ set_bit(FLAG_IS_HOGGED, &desc->flags);
+
+ pr_info("GPIO line %d (%s) hogged as %s%s\n",
+ desc_to_gpio(desc), name,
+ (dflags&GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
+ (dflags&GPIOD_FLAGS_BIT_DIR_OUT) ?
+ (dflags&GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low":"");
+
+ return 0;
+}
+
+/**
+ * gpiochip_free_hogs - Scan gpio-controller chip and release GPIO hog
+ * @chip: gpio chip to act on
+ *
+ * This is only used by of_gpiochip_remove to free hogged gpios
+ */
+static void gpiochip_free_hogs(struct gpio_chip *chip)
+{
+ int id;
+
+ for (id = 0; id < chip->ngpio; id++) {
+ if (test_bit(FLAG_IS_HOGGED, &chip->desc[id].flags))
+ gpiochip_free_own_desc(&chip->desc[id]);
+ }
+}
+
+/**
+ * gpiod_get_array - obtain multiple GPIOs from a multi-index GPIO function
+ * @dev: GPIO consumer, can be NULL for system-global GPIOs
+ * @con_id: function within the GPIO consumer
+ * @flags: optional GPIO initialization flags
+ *
+ * This function acquires all the GPIOs defined under a given function.
+ *
+ * Return a struct gpio_descs containing an array of descriptors, -ENOENT if
+ * no GPIO has been assigned to the requested function, or another IS_ERR()
+ * code if an error occurred while trying to acquire the GPIOs.
+ */
+struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+{
+ struct gpio_desc *desc;
+ struct gpio_descs *descs;
+ int count;
+
+ count = gpiod_count(dev, con_id);
+ if (count < 0)
+ return ERR_PTR(count);
+
+ descs = kzalloc(sizeof(*descs) + sizeof(descs->desc[0]) * count,
+ GFP_KERNEL);
+ if (!descs)
+ return ERR_PTR(-ENOMEM);
+
+ for (descs->ndescs = 0; descs->ndescs < count; ) {
+ desc = gpiod_get_index(dev, con_id, descs->ndescs, flags);
+ if (IS_ERR(desc)) {
+ gpiod_put_array(descs);
+ return ERR_CAST(desc);
+ }
+ descs->desc[descs->ndescs] = desc;
+ descs->ndescs++;
+ }
+ return descs;
+}
+EXPORT_SYMBOL_GPL(gpiod_get_array);
+
+/**
+ * gpiod_get_array_optional - obtain multiple GPIOs from a multi-index GPIO
+ * function
+ * @dev: GPIO consumer, can be NULL for system-global GPIOs
+ * @con_id: function within the GPIO consumer
+ * @flags: optional GPIO initialization flags
+ *
+ * This is equivalent to gpiod_get_array(), except that when no GPIO was
+ * assigned to the requested function it will return NULL.
+ */
+struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+{
+ struct gpio_descs *descs;
+
+ descs = gpiod_get_array(dev, con_id, flags);
+ if (IS_ERR(descs) && (PTR_ERR(descs) == -ENOENT))
+ return NULL;
+
+ return descs;
+}
+EXPORT_SYMBOL_GPL(gpiod_get_array_optional);
+
+/**
* gpiod_put - dispose of a GPIO descriptor
* @desc: GPIO descriptor to dispose of
*
@@ -2017,6 +2233,21 @@ void gpiod_put(struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_put);
+/**
+ * gpiod_put_array - dispose of multiple GPIO descriptors
+ * @descs: struct gpio_descs containing an array of descriptors
+ */
+void gpiod_put_array(struct gpio_descs *descs)
+{
+ unsigned int i;
+
+ for (i = 0; i < descs->ndescs; i++)
+ gpiod_put(descs->desc[i]);
+
+ kfree(descs);
+}
+EXPORT_SYMBOL_GPL(gpiod_put_array);
+
#ifdef CONFIG_DEBUG_FS
static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 550a5eafbd38..594b1798c0e7 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -17,6 +17,8 @@
enum of_gpio_flags;
+struct acpi_device;
+
/**
* struct acpi_gpio_info - ACPI GPIO specific information
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
@@ -27,6 +29,9 @@ struct acpi_gpio_info {
bool active_low;
};
+/* gpio suffixes used for ACPI and device tree lookup */
+static const char * const gpio_suffixes[] = { "gpios", "gpio" };
+
#ifdef CONFIG_ACPI
void acpi_gpiochip_add(struct gpio_chip *chip);
void acpi_gpiochip_remove(struct gpio_chip *chip);
@@ -37,6 +42,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
const char *propname, int index,
struct acpi_gpio_info *info);
+
+int acpi_gpio_count(struct device *dev, const char *con_id);
#else
static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
@@ -53,6 +60,11 @@ acpi_get_gpiod_by_index(struct acpi_device *adev, const char *propname,
{
return ERR_PTR(-ENOSYS);
}
+
+static inline int acpi_gpio_count(struct device *dev, const char *con_id)
+{
+ return -ENODEV;
+}
#endif
struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
@@ -78,6 +90,7 @@ struct gpio_desc {
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
#define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */
+#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
#define ID_SHIFT 16 /* add new flags before this one */
@@ -89,6 +102,8 @@ struct gpio_desc {
int gpiod_request(struct gpio_desc *desc, const char *label);
void gpiod_free(struct gpio_desc *desc);
+int gpiod_hog(struct gpio_desc *desc, const char *name,
+ unsigned long lflags, enum gpiod_flags dflags);
/*
* Return the GPIO number of the passed descriptor relative to its chip
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 151a050129e7..47f2ce81b412 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -165,6 +165,15 @@ config DRM_SAVAGE
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
+config DRM_VGEM
+ tristate "Virtual GEM provider"
+ depends on DRM
+ help
+ Choose this option to get a virtual graphics memory manager,
+ as used by Mesa's software renderer for enhanced performance.
+ If M is selected the module will be called vgem.
+
+
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/rockchip/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2c239b99de64..7d4944e1a60c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
+obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 5c50aa8a8908..19a4fba46e4e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -435,21 +435,22 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
{
struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
- struct timespec time;
+ struct timespec64 time;
dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
/* Reading GPU clock counter from KGD */
- args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter =
+ dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
/* No access to rdtsc. Using raw monotonic time */
- getrawmonotonic(&time);
- args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+ getrawmonotonic64(&time);
+ args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
- get_monotonic_boottime(&time);
- args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
+ get_monotonic_boottime64(&time);
+ args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 5bc32c26b989..ca7f2d3af2ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -94,7 +94,8 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
return NULL;
}
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
{
struct kfd_dev *kfd;
@@ -112,6 +113,11 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
kfd->device_info = device_info;
kfd->pdev = pdev;
kfd->init_complete = false;
+ kfd->kfd2kgd = f2g;
+
+ mutex_init(&kfd->doorbell_mutex);
+ memset(&kfd->doorbell_available_index, 0,
+ sizeof(kfd->doorbell_available_index));
return kfd;
}
@@ -200,8 +206,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
/* add another 512KB for all other allocations on gart (HPD, fences) */
size += 512 * 1024;
- if (kfd2kgd->init_gtt_mem_allocation(kfd->kgd, size, &kfd->gtt_mem,
- &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)) {
+ if (kfd->kfd2kgd->init_gtt_mem_allocation(
+ kfd->kgd, size, &kfd->gtt_mem,
+ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
dev_err(kfd_device,
"Could not allocate %d bytes for device (%x:%x)\n",
size, kfd->pdev->vendor, kfd->pdev->device);
@@ -270,7 +277,7 @@ device_iommu_pasid_error:
kfd_topology_add_device_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
- kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
"device (%x:%x) NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -285,7 +292,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
amd_iommu_free_device(kfd->pdev);
kfd_topology_remove_device(kfd);
kfd_gtt_sa_fini(kfd);
- kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
kfree(kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d8135adb2238..596ee5cd3b84 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -82,7 +82,8 @@ static inline unsigned int get_pipes_num_cpsch(void)
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
+ return dqm->dev->kfd2kgd->program_sh_mem_settings(
+ dqm->dev->kgd, qpd->vmid,
qpd->sh_mem_config,
qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit,
@@ -429,9 +430,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !qpd);
- BUG_ON(!list_empty(&qpd->queues_list));
+ pr_debug("In func %s\n", __func__);
- pr_debug("kfd: In func %s\n", __func__);
+ pr_debug("qpd->queues_list is %s\n",
+ list_empty(&qpd->queues_list) ? "empty" : "not empty");
retval = 0;
mutex_lock(&dqm->lock);
@@ -457,9 +459,12 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
{
uint32_t pasid_mapping;
- pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
- ATC_VMID_PASID_MAPPING_VALID;
- return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
+ pasid_mapping = (pasid == 0) ? 0 :
+ (uint32_t)pasid |
+ ATC_VMID_PASID_MAPPING_VALID;
+
+ return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
+ dqm->dev->kgd, pasid_mapping,
vmid);
}
@@ -511,7 +516,7 @@ int init_pipelines(struct device_queue_manager *dqm,
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */
- kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
+ dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
}
@@ -878,6 +883,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
return -ENOMEM;
}
+ init_sdma_vm(dqm, q, qpd);
+
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval != 0)
@@ -905,7 +912,7 @@ out:
return retval;
}
-static int fence_wait_timeout(unsigned int *fence_addr,
+static int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
unsigned long timeout)
{
@@ -961,7 +968,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
KFD_FENCE_COMPLETED);
/* should be timed out */
- fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
+ amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 1a9b355dd114..17e56dcc8540 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -32,9 +32,6 @@
* and that's assures that any user process won't get access to the
* kernel doorbells page
*/
-static DEFINE_MUTEX(doorbell_mutex);
-static unsigned long doorbell_available_index[
- DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)] = { 0 };
#define KERNEL_DOORBELL_PASID 1
#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
@@ -170,12 +167,12 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
BUG_ON(!kfd || !doorbell_off);
- mutex_lock(&doorbell_mutex);
- inx = find_first_zero_bit(doorbell_available_index,
+ mutex_lock(&kfd->doorbell_mutex);
+ inx = find_first_zero_bit(kfd->doorbell_available_index,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
- __set_bit(inx, doorbell_available_index);
- mutex_unlock(&doorbell_mutex);
+ __set_bit(inx, kfd->doorbell_available_index);
+ mutex_unlock(&kfd->doorbell_mutex);
if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
return NULL;
@@ -203,9 +200,9 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
- mutex_lock(&doorbell_mutex);
- __clear_bit(inx, doorbell_available_index);
- mutex_unlock(&doorbell_mutex);
+ mutex_lock(&kfd->doorbell_mutex);
+ __clear_bit(inx, kfd->doorbell_available_index);
+ mutex_unlock(&kfd->doorbell_mutex);
}
inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 3f34ae16f075..4e0a68f13a77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -34,7 +34,6 @@
#define KFD_DRIVER_MINOR 7
#define KFD_DRIVER_PATCHLEVEL 1
-const struct kfd2kgd_calls *kfd2kgd;
static const struct kgd2kfd_calls kgd2kfd = {
.exit = kgd2kfd_exit,
.probe = kgd2kfd_probe,
@@ -55,9 +54,7 @@ module_param(max_num_of_queues_per_device, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_device,
"Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
-bool kgd2kfd_init(unsigned interface_version,
- const struct kfd2kgd_calls *f2g,
- const struct kgd2kfd_calls **g2f)
+bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
{
/*
* Only one interface version is supported,
@@ -66,11 +63,6 @@ bool kgd2kfd_init(unsigned interface_version,
if (interface_version != KFD_INTERFACE_VERSION)
return false;
- /* Protection against multiple amd kgd loads */
- if (kfd2kgd)
- return true;
-
- kfd2kgd = f2g;
*g2f = &kgd2kfd;
return true;
@@ -85,8 +77,6 @@ static int __init kfd_module_init(void)
{
int err;
- kfd2kgd = NULL;
-
/* Verify module parameters */
if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
(sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index a09e18a339f3..434979428fc0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -151,14 +151,15 @@ static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr)
{
- return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+ return mm->dev->kfd2kgd->hqd_load
+ (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr)
{
- return kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
}
static int update_mqd(struct mqd_manager *mm, void *mqd,
@@ -245,7 +246,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+ return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
pipe_id, queue_id);
}
@@ -258,7 +259,7 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
@@ -266,7 +267,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
- return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
+ return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
pipe_id, queue_id);
}
@@ -275,7 +276,7 @@ static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
}
/*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 5a44f2fecf38..f21fccebd75b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -148,6 +148,11 @@ struct kfd_dev {
struct kgd2kfd_shared_resources shared_resources;
+ const struct kfd2kgd_calls *kfd2kgd;
+ struct mutex doorbell_mutex;
+ unsigned long doorbell_available_index[DIV_ROUND_UP(
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
+
void *gtt_mem;
uint64_t gtt_start_gpu_addr;
void *gtt_start_cpu_ptr;
@@ -164,13 +169,12 @@ struct kfd_dev {
/* KGD2KFD callbacks */
void kgd2kfd_exit(void);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev);
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
- const struct kgd2kfd_shared_resources *gpu_resources);
+ const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-extern const struct kfd2kgd_calls *kfd2kgd;
-
enum kfd_mempool {
KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
@@ -378,8 +382,6 @@ struct qcm_process_device {
/* The Device Queue Manager that owns this data */
struct device_queue_manager *dqm;
struct process_queue_manager *pqm;
- /* Device Queue Manager lock */
- struct mutex *lock;
/* Queues list */
struct list_head queues_list;
struct list_head priv_queue_list;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a369c149d172..945d6226dc51 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -162,10 +162,16 @@ static void kfd_process_wq_release(struct work_struct *work)
p = my_work->p;
+ pr_debug("Releasing process (pasid %d) in workqueue\n",
+ p->pasid);
+
mutex_lock(&p->mutex);
list_for_each_entry_safe(pdd, temp, &p->per_device_data,
per_device_list) {
+ pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
+ pdd->dev->id, p->pasid);
+
amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
list_del(&pdd->per_device_list);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 498399323a8c..e469c4b2e8cc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -726,13 +726,14 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
}
sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
- kfd2kgd->get_max_engine_clock_in_mhz(
+ dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
dev->gpu->kgd));
+
sysfs_show_64bit_prop(buffer, "local_mem_size",
- kfd2kgd->get_vmem_size(dev->gpu->kgd));
+ (unsigned long long int) 0);
sysfs_show_32bit_prop(buffer, "fw_version",
- kfd2kgd->get_fw_version(
+ dev->gpu->kfd2kgd->get_fw_version(
dev->gpu->kgd,
KGD_ENGINE_MEC1));
}
@@ -1099,8 +1100,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
buf[2] = gpu->pdev->subsystem_device;
buf[3] = gpu->pdev->device;
buf[4] = gpu->pdev->bus->number;
- buf[5] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) & 0xffffffff);
- buf[6] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+ buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd)
+ & 0xffffffff);
+ buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
for (i = 0, hashout = 0; i < 7; i++)
hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 239bc16a1ddd..dabd94446b7b 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -77,37 +77,6 @@ struct kgd2kfd_shared_resources {
};
/**
- * struct kgd2kfd_calls
- *
- * @exit: Notifies amdkfd that kgd module is unloaded
- *
- * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
- *
- * @device_init: Initialize the newly probed device (if it is a device that
- * amdkfd supports)
- *
- * @device_exit: Notifies amdkfd about a removal of a kgd device
- *
- * @suspend: Notifies amdkfd about a suspend action done to a kgd device
- *
- * @resume: Notifies amdkfd about a resume action done to a kgd device
- *
- * This structure contains function callback pointers so the kgd driver
- * will notify to the amdkfd about certain status changes.
- *
- */
-struct kgd2kfd_calls {
- void (*exit)(void);
- struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev);
- bool (*device_init)(struct kfd_dev *kfd,
- const struct kgd2kfd_shared_resources *gpu_resources);
- void (*device_exit)(struct kfd_dev *kfd);
- void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
- void (*suspend)(struct kfd_dev *kfd);
- int (*resume)(struct kfd_dev *kfd);
-};
-
-/**
* struct kfd2kgd_calls
*
* @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
@@ -196,8 +165,39 @@ struct kfd2kgd_calls {
enum kgd_engine_type type);
};
+/**
+ * struct kgd2kfd_calls
+ *
+ * @exit: Notifies amdkfd that kgd module is unloaded
+ *
+ * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
+ *
+ * @device_init: Initialize the newly probed device (if it is a device that
+ * amdkfd supports)
+ *
+ * @device_exit: Notifies amdkfd about a removal of a kgd device
+ *
+ * @suspend: Notifies amdkfd about a suspend action done to a kgd device
+ *
+ * @resume: Notifies amdkfd about a resume action done to a kgd device
+ *
+ * This structure contains function callback pointers so the kgd driver
+ * will notify to the amdkfd about certain status changes.
+ *
+ */
+struct kgd2kfd_calls {
+ void (*exit)(void);
+ struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev,
+ const struct kfd2kgd_calls *f2g);
+ bool (*device_init)(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources);
+ void (*device_exit)(struct kfd_dev *kfd);
+ void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
+ void (*suspend)(struct kfd_dev *kfd);
+ int (*resume)(struct kfd_dev *kfd);
+};
+
bool kgd2kfd_init(unsigned interface_version,
- const struct kfd2kgd_calls *f2g,
const struct kgd2kfd_calls **g2f);
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index ef5feeecec84..580e10acaa3a 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -538,8 +538,14 @@ struct dma_buf *
armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
int flags)
{
- return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
- O_RDWR, NULL);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &armada_gem_prime_dmabuf_ops;
+ exp_info.size = obj->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = obj;
+
+ return dma_buf_export(&exp_info);
}
struct drm_gem_object *
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
index 4126d43b5057..3c4023e142d0 100644
--- a/drivers/gpu/drm/armada/armada_output.h
+++ b/drivers/gpu/drm/armada/armada_output.h
@@ -9,7 +9,7 @@
#define ARMADA_CONNETOR_H
#define encoder_helper_funcs(encoder) \
- ((struct drm_encoder_helper_funcs *)encoder->helper_private)
+ ((const struct drm_encoder_helper_funcs *)encoder->helper_private)
struct armada_output_type {
int connector_type;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index b3e3068c6ec0..f69b92535505 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -21,6 +21,7 @@
#include <linux/clk.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -37,14 +38,14 @@
* @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
* @event: pointer to the current page flip event
* @id: CRTC id (returned by drm_crtc_index)
- * @dpms: DPMS mode
+ * @enabled: CRTC state
*/
struct atmel_hlcdc_crtc {
struct drm_crtc base;
struct atmel_hlcdc_dc *dc;
struct drm_pending_vblank_event *event;
int id;
- int dpms;
+ bool enabled;
};
static inline struct atmel_hlcdc_crtc *
@@ -53,86 +54,17 @@ drm_crtc_to_atmel_hlcdc_crtc(struct drm_crtc *crtc)
return container_of(crtc, struct atmel_hlcdc_crtc, base);
}
-static void atmel_hlcdc_crtc_dpms(struct drm_crtc *c, int mode)
-{
- struct drm_device *dev = c->dev;
- struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
- struct regmap *regmap = crtc->dc->hlcdc->regmap;
- unsigned int status;
-
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (crtc->dpms == mode)
- return;
-
- pm_runtime_get_sync(dev->dev);
-
- if (mode != DRM_MODE_DPMS_ON) {
- regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- (status & ATMEL_HLCDC_DISP))
- cpu_relax();
-
- regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- (status & ATMEL_HLCDC_SYNC))
- cpu_relax();
-
- regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- (status & ATMEL_HLCDC_PIXEL_CLK))
- cpu_relax();
-
- clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
-
- pm_runtime_allow(dev->dev);
- } else {
- pm_runtime_forbid(dev->dev);
-
- clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
-
- regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- !(status & ATMEL_HLCDC_PIXEL_CLK))
- cpu_relax();
-
-
- regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- !(status & ATMEL_HLCDC_SYNC))
- cpu_relax();
-
- regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
- while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
- !(status & ATMEL_HLCDC_DISP))
- cpu_relax();
- }
-
- pm_runtime_put_sync(dev->dev);
-
- crtc->dpms = mode;
-}
-
-static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct regmap *regmap = crtc->dc->hlcdc->regmap;
- struct drm_plane *plane = c->primary;
- struct drm_framebuffer *fb;
+ struct drm_display_mode *adj = &c->state->adjusted_mode;
unsigned long mode_rate;
struct videomode vm;
unsigned long prate;
unsigned int cfg;
int div;
- if (atmel_hlcdc_dc_mode_valid(crtc->dc, adj) != MODE_OK)
- return -EINVAL;
-
vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
vm.vsync_len = adj->crtc_vsync_end - adj->crtc_vsync_start;
@@ -156,7 +88,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
cfg = 0;
prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
- mode_rate = mode->crtc_clock * 1000;
+ mode_rate = adj->crtc_clock * 1000;
if ((prate / 2) < mode_rate) {
prate *= 2;
cfg |= ATMEL_HLCDC_CLKSEL;
@@ -174,10 +106,10 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
cfg = 0;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (adj->flags & DRM_MODE_FLAG_NVSYNC)
cfg |= ATMEL_HLCDC_VSPOL;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (adj->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= ATMEL_HLCDC_HSPOL;
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5),
@@ -187,77 +119,155 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
ATMEL_HLCDC_GUARDTIME_MASK,
cfg);
+}
+
+static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
+{
+ struct drm_device *dev = c->dev;
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ struct regmap *regmap = crtc->dc->hlcdc->regmap;
+ unsigned int status;
+
+ if (!crtc->enabled)
+ return;
+
+ drm_crtc_vblank_off(c);
+
+ pm_runtime_get_sync(dev->dev);
+
+ regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ (status & ATMEL_HLCDC_DISP))
+ cpu_relax();
+
+ regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ (status & ATMEL_HLCDC_SYNC))
+ cpu_relax();
+
+ regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ (status & ATMEL_HLCDC_PIXEL_CLK))
+ cpu_relax();
+
+ clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
+ pinctrl_pm_select_sleep_state(dev->dev);
+
+ pm_runtime_allow(dev->dev);
- fb = plane->fb;
- plane->fb = old_fb;
+ pm_runtime_put_sync(dev->dev);
- return atmel_hlcdc_plane_update_with_mode(plane, c, fb, 0, 0,
- adj->hdisplay, adj->vdisplay,
- x << 16, y << 16,
- adj->hdisplay << 16,
- adj->vdisplay << 16,
- adj);
+ crtc->enabled = false;
}
-int atmel_hlcdc_crtc_mode_set_base(struct drm_crtc *c, int x, int y,
- struct drm_framebuffer *old_fb)
+static void atmel_hlcdc_crtc_enable(struct drm_crtc *c)
{
- struct drm_plane *plane = c->primary;
- struct drm_framebuffer *fb = plane->fb;
- struct drm_display_mode *mode = &c->hwmode;
-
- plane->fb = old_fb;
-
- return plane->funcs->update_plane(plane, c, fb,
- 0, 0,
- mode->hdisplay,
- mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16,
- mode->vdisplay << 16);
+ struct drm_device *dev = c->dev;
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ struct regmap *regmap = crtc->dc->hlcdc->regmap;
+ unsigned int status;
+
+ if (crtc->enabled)
+ return;
+
+ pm_runtime_get_sync(dev->dev);
+
+ pm_runtime_forbid(dev->dev);
+
+ pinctrl_pm_select_default_state(dev->dev);
+ clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
+
+ regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ !(status & ATMEL_HLCDC_PIXEL_CLK))
+ cpu_relax();
+
+
+ regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ !(status & ATMEL_HLCDC_SYNC))
+ cpu_relax();
+
+ regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
+ while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
+ !(status & ATMEL_HLCDC_DISP))
+ cpu_relax();
+
+ pm_runtime_put_sync(dev->dev);
+
+ drm_crtc_vblank_on(c);
+
+ crtc->enabled = true;
}
-static void atmel_hlcdc_crtc_prepare(struct drm_crtc *crtc)
+void atmel_hlcdc_crtc_suspend(struct drm_crtc *c)
{
- atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+ if (crtc->enabled) {
+ atmel_hlcdc_crtc_disable(c);
+ /* save enable state for resume */
+ crtc->enabled = true;
+ }
}
-static void atmel_hlcdc_crtc_commit(struct drm_crtc *crtc)
+void atmel_hlcdc_crtc_resume(struct drm_crtc *c)
{
- atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+ if (crtc->enabled) {
+ crtc->enabled = false;
+ atmel_hlcdc_crtc_enable(c);
+ }
}
-static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
+ struct drm_crtc_state *s)
{
- return true;
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+ if (atmel_hlcdc_dc_mode_valid(crtc->dc, &s->adjusted_mode) != MODE_OK)
+ return -EINVAL;
+
+ return atmel_hlcdc_plane_prepare_disc_area(s);
}
-static void atmel_hlcdc_crtc_disable(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
{
- struct drm_plane *plane;
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
- atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- crtc->primary->funcs->disable_plane(crtc->primary);
+ if (c->state->event) {
+ c->state->event->pipe = drm_crtc_index(c);
- drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) {
- if (plane->crtc != crtc)
- continue;
+ WARN_ON(drm_crtc_vblank_get(c) != 0);
- plane->funcs->disable_plane(crtc->primary);
- plane->crtc = NULL;
+ crtc->event = c->state->event;
+ c->state->event = NULL;
}
}
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc)
+{
+ /* TODO: write common plane control register if available */
+}
+
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
.mode_fixup = atmel_hlcdc_crtc_mode_fixup,
- .dpms = atmel_hlcdc_crtc_dpms,
- .mode_set = atmel_hlcdc_crtc_mode_set,
- .mode_set_base = atmel_hlcdc_crtc_mode_set_base,
- .prepare = atmel_hlcdc_crtc_prepare,
- .commit = atmel_hlcdc_crtc_commit,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
.disable = atmel_hlcdc_crtc_disable,
+ .enable = atmel_hlcdc_crtc_enable,
+ .atomic_check = atmel_hlcdc_crtc_atomic_check,
+ .atomic_begin = atmel_hlcdc_crtc_atomic_begin,
+ .atomic_flush = atmel_hlcdc_crtc_atomic_flush,
};
static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c)
@@ -306,61 +316,13 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
-static int atmel_hlcdc_crtc_page_flip(struct drm_crtc *c,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
- struct atmel_hlcdc_plane_update_req req;
- struct drm_plane *plane = c->primary;
- struct drm_device *dev = c->dev;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (crtc->event)
- ret = -EBUSY;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- if (ret)
- return ret;
-
- memset(&req, 0, sizeof(req));
- req.crtc_x = 0;
- req.crtc_y = 0;
- req.crtc_h = c->mode.crtc_vdisplay;
- req.crtc_w = c->mode.crtc_hdisplay;
- req.src_x = c->x << 16;
- req.src_y = c->y << 16;
- req.src_w = req.crtc_w << 16;
- req.src_h = req.crtc_h << 16;
- req.fb = fb;
-
- ret = atmel_hlcdc_plane_prepare_update_req(plane, &req, &c->hwmode);
- if (ret)
- return ret;
-
- if (event) {
- drm_vblank_get(c->dev, crtc->id);
- spin_lock_irqsave(&dev->event_lock, flags);
- crtc->event = event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
- ret = atmel_hlcdc_plane_apply_update_req(plane, &req);
- if (ret)
- crtc->event = NULL;
- else
- plane->fb = fb;
-
- return ret;
-}
-
static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
- .page_flip = atmel_hlcdc_crtc_page_flip,
- .set_config = drm_crtc_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_config = drm_atomic_helper_set_config,
.destroy = atmel_hlcdc_crtc_destroy,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
int atmel_hlcdc_crtc_create(struct drm_device *dev)
@@ -375,7 +337,6 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
if (!crtc)
return -ENOMEM;
- crtc->dpms = DRM_MODE_DPMS_OFF;
crtc->dc = dc;
ret = drm_crtc_init_with_planes(dev, &crtc->base,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index c1cb17493e0d..60b0c13d7ff5 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -222,6 +222,8 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = atmel_hlcdc_fb_create,
.output_poll_changed = atmel_hlcdc_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
};
static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
@@ -317,6 +319,8 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
goto err_periph_clk_disable;
}
+ drm_mode_config_reset(dev);
+
ret = drm_vblank_init(dev, 1);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
@@ -555,6 +559,41 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct drm_crtc *crtc;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ drm_modeset_lock_all(drm_dev);
+ list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head)
+ atmel_hlcdc_crtc_suspend(crtc);
+ drm_modeset_unlock_all(drm_dev);
+ return 0;
+}
+
+static int atmel_hlcdc_dc_drm_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct drm_crtc *crtc;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ drm_modeset_lock_all(drm_dev);
+ list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head)
+ atmel_hlcdc_crtc_resume(crtc);
+ drm_modeset_unlock_all(drm_dev);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops,
+ atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume);
+
static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
{ .compatible = "atmel,hlcdc-display-controller" },
{ },
@@ -565,6 +604,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.remove = atmel_hlcdc_dc_drm_remove,
.driver = {
.name = "atmel-hlcdc-display-controller",
+ .pm = &atmel_hlcdc_dc_drm_pm_ops,
.of_match_table = atmel_hlcdc_dc_of_match,
},
};
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 7bc96af3397a..cf6b375bc38d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -26,11 +26,14 @@
#include <linux/irqdomain.h>
#include <linux/pwm.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
#include "atmel_hlcdc_layer.h"
@@ -69,7 +72,6 @@ struct atmel_hlcdc_dc_desc {
*/
struct atmel_hlcdc_plane_properties {
struct drm_property *alpha;
- struct drm_property *rotation;
};
/**
@@ -84,7 +86,6 @@ struct atmel_hlcdc_plane {
struct drm_plane base;
struct atmel_hlcdc_layer layer;
struct atmel_hlcdc_plane_properties *properties;
- unsigned int rotation;
};
static inline struct atmel_hlcdc_plane *
@@ -100,43 +101,6 @@ atmel_hlcdc_layer_to_plane(struct atmel_hlcdc_layer *l)
}
/**
- * Atmel HLCDC Plane update request structure.
- *
- * @crtc_x: x position of the plane relative to the CRTC
- * @crtc_y: y position of the plane relative to the CRTC
- * @crtc_w: visible width of the plane
- * @crtc_h: visible height of the plane
- * @src_x: x buffer position
- * @src_y: y buffer position
- * @src_w: buffer width
- * @src_h: buffer height
- * @fb: framebuffer object object
- * @bpp: bytes per pixel deduced from pixel_format
- * @offsets: offsets to apply to the GEM buffers
- * @xstride: value to add to the pixel pointer between each line
- * @pstride: value to add to the pixel pointer between each pixel
- * @nplanes: number of planes (deduced from pixel_format)
- */
-struct atmel_hlcdc_plane_update_req {
- int crtc_x;
- int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
- uint32_t src_x;
- uint32_t src_y;
- uint32_t src_w;
- uint32_t src_h;
- struct drm_framebuffer *fb;
-
- /* These fields are private and should not be touched */
- int bpp[ATMEL_HLCDC_MAX_PLANES];
- unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
- int xstride[ATMEL_HLCDC_MAX_PLANES];
- int pstride[ATMEL_HLCDC_MAX_PLANES];
- int nplanes;
-};
-
-/**
* Atmel HLCDC Planes.
*
* This structure stores the instantiated HLCDC Planes and can be accessed by
@@ -184,28 +148,16 @@ int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
struct atmel_hlcdc_planes *
atmel_hlcdc_create_planes(struct drm_device *dev);
-int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req,
- const struct drm_display_mode *mode);
-
-int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req);
-
-int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w,
- unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- const struct drm_display_mode *mode);
+int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state);
void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc,
struct drm_file *file);
+void atmel_hlcdc_crtc_suspend(struct drm_crtc *crtc);
+void atmel_hlcdc_crtc_resume(struct drm_crtc *crtc);
+
int atmel_hlcdc_crtc_create(struct drm_device *dev);
int atmel_hlcdc_create_outputs(struct drm_device *dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
index e79bd9ba474b..377e43cea9dd 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -298,7 +298,7 @@ void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
spin_unlock_irqrestore(&layer->lock, flags);
}
-int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
+void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
struct atmel_hlcdc_layer_update *upd = &layer->update;
@@ -341,8 +341,6 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
dma->status = ATMEL_HLCDC_LAYER_DISABLED;
spin_unlock_irqrestore(&layer->lock, flags);
-
- return 0;
}
int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
index 27e56c0862ec..9beabc940bce 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
@@ -120,6 +120,7 @@
#define ATMEL_HLCDC_LAYER_DISCEN BIT(11)
#define ATMEL_HLCDC_LAYER_GA_SHIFT 16
#define ATMEL_HLCDC_LAYER_GA_MASK GENMASK(23, ATMEL_HLCDC_LAYER_GA_SHIFT)
+#define ATMEL_HLCDC_LAYER_GA(x) ((x) << ATMEL_HLCDC_LAYER_GA_SHIFT)
#define ATMEL_HLCDC_LAYER_CSC_CFG(p, o) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.csc + o)
@@ -376,7 +377,7 @@ int atmel_hlcdc_layer_init(struct drm_device *dev,
void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
struct atmel_hlcdc_layer *layer);
-int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer);
+void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer);
int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index c402192362c5..9c4513005310 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -86,25 +86,22 @@ atmel_hlcdc_rgb_output_to_panel(struct atmel_hlcdc_rgb_output *output)
return container_of(output, struct atmel_hlcdc_panel, base);
}
-static void atmel_hlcdc_panel_encoder_dpms(struct drm_encoder *encoder,
- int mode)
+static void atmel_hlcdc_panel_encoder_enable(struct drm_encoder *encoder)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == rgb->dpms)
- return;
+ drm_panel_enable(panel->panel);
+}
- if (mode != DRM_MODE_DPMS_ON)
- drm_panel_disable(panel->panel);
- else
- drm_panel_enable(panel->panel);
+static void atmel_hlcdc_panel_encoder_disable(struct drm_encoder *encoder)
+{
+ struct atmel_hlcdc_rgb_output *rgb =
+ drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
+ struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
- rgb->dpms = mode;
+ drm_panel_disable(panel->panel);
}
static bool
@@ -115,16 +112,6 @@ atmel_hlcdc_panel_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static void atmel_hlcdc_panel_encoder_prepare(struct drm_encoder *encoder)
-{
- atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void atmel_hlcdc_panel_encoder_commit(struct drm_encoder *encoder)
-{
- atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
static void
atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -156,11 +143,10 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
}
static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
- .dpms = atmel_hlcdc_panel_encoder_dpms,
.mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
- .prepare = atmel_hlcdc_panel_encoder_prepare,
- .commit = atmel_hlcdc_panel_encoder_commit,
.mode_set = atmel_hlcdc_rgb_encoder_mode_set,
+ .disable = atmel_hlcdc_panel_encoder_disable,
+ .enable = atmel_hlcdc_panel_encoder_enable,
};
static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder)
@@ -226,10 +212,13 @@ atmel_hlcdc_panel_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = atmel_hlcdc_panel_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = atmel_hlcdc_panel_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index c5892dcfd745..be9fa8220499 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -19,6 +19,59 @@
#include "atmel_hlcdc_dc.h"
+/**
+ * Atmel HLCDC Plane state structure.
+ *
+ * @base: DRM plane state
+ * @crtc_x: x position of the plane relative to the CRTC
+ * @crtc_y: y position of the plane relative to the CRTC
+ * @crtc_w: visible width of the plane
+ * @crtc_h: visible height of the plane
+ * @src_x: x buffer position
+ * @src_y: y buffer position
+ * @src_w: buffer width
+ * @src_h: buffer height
+ * @alpha: alpha blending of the plane
+ * @bpp: bytes per pixel deduced from pixel_format
+ * @offsets: offsets to apply to the GEM buffers
+ * @xstride: value to add to the pixel pointer between each line
+ * @pstride: value to add to the pixel pointer between each pixel
+ * @nplanes: number of planes (deduced from pixel_format)
+ */
+struct atmel_hlcdc_plane_state {
+ struct drm_plane_state base;
+ int crtc_x;
+ int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+ uint32_t src_x;
+ uint32_t src_y;
+ uint32_t src_w;
+ uint32_t src_h;
+
+ u8 alpha;
+
+ bool disc_updated;
+
+ int disc_x;
+ int disc_y;
+ int disc_w;
+ int disc_h;
+
+ /* These fields are private and should not be touched */
+ int bpp[ATMEL_HLCDC_MAX_PLANES];
+ unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
+ int xstride[ATMEL_HLCDC_MAX_PLANES];
+ int pstride[ATMEL_HLCDC_MAX_PLANES];
+ int nplanes;
+};
+
+static inline struct atmel_hlcdc_plane_state *
+drm_plane_state_to_atmel_hlcdc_plane_state(struct drm_plane_state *s)
+{
+ return container_of(s, struct atmel_hlcdc_plane_state, base);
+}
+
#define SUBPIXEL_MASK 0xffff
static uint32_t rgb_formats[] = {
@@ -128,7 +181,7 @@ static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode)
return 0;
}
-static bool atmel_hlcdc_format_embedds_alpha(u32 format)
+static bool atmel_hlcdc_format_embeds_alpha(u32 format)
{
int i;
@@ -204,7 +257,7 @@ static u32 heo_upscaling_ycoef[] = {
static void
atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
- struct atmel_hlcdc_plane_update_req *req)
+ struct atmel_hlcdc_plane_state *state)
{
const struct atmel_hlcdc_layer_cfg_layout *layout =
&plane->layer.desc->layout;
@@ -213,69 +266,69 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->size,
0xffffffff,
- (req->crtc_w - 1) |
- ((req->crtc_h - 1) << 16));
+ (state->crtc_w - 1) |
+ ((state->crtc_h - 1) << 16));
if (layout->memsize)
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->memsize,
0xffffffff,
- (req->src_w - 1) |
- ((req->src_h - 1) << 16));
+ (state->src_w - 1) |
+ ((state->src_h - 1) << 16));
if (layout->pos)
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->pos,
0xffffffff,
- req->crtc_x |
- (req->crtc_y << 16));
+ state->crtc_x |
+ (state->crtc_y << 16));
/* TODO: rework the rescaling part */
- if (req->crtc_w != req->src_w || req->crtc_h != req->src_h) {
+ if (state->crtc_w != state->src_w || state->crtc_h != state->src_h) {
u32 factor_reg = 0;
- if (req->crtc_w != req->src_w) {
+ if (state->crtc_w != state->src_w) {
int i;
u32 factor;
u32 *coeff_tab = heo_upscaling_xcoef;
u32 max_memsize;
- if (req->crtc_w < req->src_w)
+ if (state->crtc_w < state->src_w)
coeff_tab = heo_downscaling_xcoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_xcoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
17 + i,
0xffffffff,
coeff_tab[i]);
- factor = ((8 * 256 * req->src_w) - (256 * 4)) /
- req->crtc_w;
+ factor = ((8 * 256 * state->src_w) - (256 * 4)) /
+ state->crtc_w;
factor++;
- max_memsize = ((factor * req->crtc_w) + (256 * 4)) /
+ max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
2048;
- if (max_memsize > req->src_w)
+ if (max_memsize > state->src_w)
factor--;
factor_reg |= factor | 0x80000000;
}
- if (req->crtc_h != req->src_h) {
+ if (state->crtc_h != state->src_h) {
int i;
u32 factor;
u32 *coeff_tab = heo_upscaling_ycoef;
u32 max_memsize;
- if (req->crtc_w < req->src_w)
+ if (state->crtc_w < state->src_w)
coeff_tab = heo_downscaling_ycoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
33 + i,
0xffffffff,
coeff_tab[i]);
- factor = ((8 * 256 * req->src_w) - (256 * 4)) /
- req->crtc_w;
+ factor = ((8 * 256 * state->src_w) - (256 * 4)) /
+ state->crtc_w;
factor++;
- max_memsize = ((factor * req->crtc_w) + (256 * 4)) /
+ max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
2048;
- if (max_memsize > req->src_w)
+ if (max_memsize > state->src_w)
factor--;
factor_reg |= (factor << 16) | 0x80000000;
}
@@ -287,7 +340,7 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
static void
atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
- struct atmel_hlcdc_plane_update_req *req)
+ struct atmel_hlcdc_plane_state *state)
{
const struct atmel_hlcdc_layer_cfg_layout *layout =
&plane->layer.desc->layout;
@@ -297,10 +350,11 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL |
ATMEL_HLCDC_LAYER_ITER;
- if (atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format))
+ if (atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format))
cfg |= ATMEL_HLCDC_LAYER_LAEN;
else
- cfg |= ATMEL_HLCDC_LAYER_GAEN;
+ cfg |= ATMEL_HLCDC_LAYER_GAEN |
+ ATMEL_HLCDC_LAYER_GA(state->alpha);
}
atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -312,24 +366,26 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_ITER2BL |
ATMEL_HLCDC_LAYER_ITER |
ATMEL_HLCDC_LAYER_GAEN |
+ ATMEL_HLCDC_LAYER_GA_MASK |
ATMEL_HLCDC_LAYER_LAEN |
ATMEL_HLCDC_LAYER_OVR |
ATMEL_HLCDC_LAYER_DMA, cfg);
}
static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
- struct atmel_hlcdc_plane_update_req *req)
+ struct atmel_hlcdc_plane_state *state)
{
u32 cfg;
int ret;
- ret = atmel_hlcdc_format_to_plane_mode(req->fb->pixel_format, &cfg);
+ ret = atmel_hlcdc_format_to_plane_mode(state->base.fb->pixel_format,
+ &cfg);
if (ret)
return;
- if ((req->fb->pixel_format == DRM_FORMAT_YUV422 ||
- req->fb->pixel_format == DRM_FORMAT_NV61) &&
- (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
+ if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
+ state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
+ (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
cfg |= ATMEL_HLCDC_YUV422ROT;
atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -341,7 +397,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
* Rotation optimization is not working on RGB888 (rotation is still
* working but without any optimization).
*/
- if (req->fb->pixel_format == DRM_FORMAT_RGB888)
+ if (state->base.fb->pixel_format == DRM_FORMAT_RGB888)
cfg = ATMEL_HLCDC_LAYER_DMA_ROTDIS;
else
cfg = 0;
@@ -352,73 +408,142 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
}
static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
- struct atmel_hlcdc_plane_update_req *req)
+ struct atmel_hlcdc_plane_state *state)
{
struct atmel_hlcdc_layer *layer = &plane->layer;
const struct atmel_hlcdc_layer_cfg_layout *layout =
&layer->desc->layout;
int i;
- atmel_hlcdc_layer_update_set_fb(&plane->layer, req->fb, req->offsets);
+ atmel_hlcdc_layer_update_set_fb(&plane->layer, state->base.fb,
+ state->offsets);
- for (i = 0; i < req->nplanes; i++) {
+ for (i = 0; i < state->nplanes; i++) {
if (layout->xstride[i]) {
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->xstride[i],
0xffffffff,
- req->xstride[i]);
+ state->xstride[i]);
}
if (layout->pstride[i]) {
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->pstride[i],
0xffffffff,
- req->pstride[i]);
+ state->pstride[i]);
}
}
}
-static int atmel_hlcdc_plane_check_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req,
- const struct drm_display_mode *mode)
+int
+atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
{
- struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- const struct atmel_hlcdc_layer_cfg_layout *layout =
- &plane->layer.desc->layout;
+ int disc_x = 0, disc_y = 0, disc_w = 0, disc_h = 0;
+ const struct atmel_hlcdc_layer_cfg_layout *layout;
+ struct atmel_hlcdc_plane_state *primary_state;
+ struct drm_plane_state *primary_s;
+ struct atmel_hlcdc_plane *primary;
+ struct drm_plane *ovl;
+
+ primary = drm_plane_to_atmel_hlcdc_plane(c_state->crtc->primary);
+ layout = &primary->layer.desc->layout;
+ if (!layout->disc_pos || !layout->disc_size)
+ return 0;
+
+ primary_s = drm_atomic_get_plane_state(c_state->state,
+ &primary->base);
+ if (IS_ERR(primary_s))
+ return PTR_ERR(primary_s);
+
+ primary_state = drm_plane_state_to_atmel_hlcdc_plane_state(primary_s);
+
+ drm_atomic_crtc_state_for_each_plane(ovl, c_state) {
+ struct atmel_hlcdc_plane_state *ovl_state;
+ struct drm_plane_state *ovl_s;
+
+ if (ovl == c_state->crtc->primary)
+ continue;
- if (!layout->size &&
- (mode->hdisplay != req->crtc_w ||
- mode->vdisplay != req->crtc_h))
- return -EINVAL;
+ ovl_s = drm_atomic_get_plane_state(c_state->state, ovl);
+ if (IS_ERR(ovl_s))
+ return PTR_ERR(ovl_s);
- if (plane->layer.desc->max_height &&
- req->crtc_h > plane->layer.desc->max_height)
- return -EINVAL;
+ ovl_state = drm_plane_state_to_atmel_hlcdc_plane_state(ovl_s);
- if (plane->layer.desc->max_width &&
- req->crtc_w > plane->layer.desc->max_width)
- return -EINVAL;
+ if (!ovl_s->fb ||
+ atmel_hlcdc_format_embeds_alpha(ovl_s->fb->pixel_format) ||
+ ovl_state->alpha != 255)
+ continue;
- if ((req->crtc_h != req->src_h || req->crtc_w != req->src_w) &&
- (!layout->memsize ||
- atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format)))
- return -EINVAL;
+ /* TODO: implement a smarter hidden area detection */
+ if (ovl_state->crtc_h * ovl_state->crtc_w < disc_h * disc_w)
+ continue;
- if (req->crtc_x < 0 || req->crtc_y < 0)
- return -EINVAL;
+ disc_x = ovl_state->crtc_x;
+ disc_y = ovl_state->crtc_y;
+ disc_h = ovl_state->crtc_h;
+ disc_w = ovl_state->crtc_w;
+ }
- if (req->crtc_w + req->crtc_x > mode->hdisplay ||
- req->crtc_h + req->crtc_y > mode->vdisplay)
- return -EINVAL;
+ if (disc_x == primary_state->disc_x &&
+ disc_y == primary_state->disc_y &&
+ disc_w == primary_state->disc_w &&
+ disc_h == primary_state->disc_h)
+ return 0;
+
+
+ primary_state->disc_x = disc_x;
+ primary_state->disc_y = disc_y;
+ primary_state->disc_w = disc_w;
+ primary_state->disc_h = disc_h;
+ primary_state->disc_updated = true;
return 0;
}
-int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req,
- const struct drm_display_mode *mode)
+static void
+atmel_hlcdc_plane_update_disc_area(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_state *state)
+{
+ const struct atmel_hlcdc_layer_cfg_layout *layout =
+ &plane->layer.desc->layout;
+ int disc_surface = 0;
+
+ if (!state->disc_updated)
+ return;
+
+ disc_surface = state->disc_h * state->disc_w;
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer, layout->general_config,
+ ATMEL_HLCDC_LAYER_DISCEN,
+ disc_surface ? ATMEL_HLCDC_LAYER_DISCEN : 0);
+
+ if (!disc_surface)
+ return;
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ layout->disc_pos,
+ 0xffffffff,
+ state->disc_x | (state->disc_y << 16));
+
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ layout->disc_size,
+ 0xffffffff,
+ (state->disc_w - 1) |
+ ((state->disc_h - 1) << 16));
+}
+
+static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
+ struct drm_plane_state *s)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(s);
+ const struct atmel_hlcdc_layer_cfg_layout *layout =
+ &plane->layer.desc->layout;
+ struct drm_framebuffer *fb = state->base.fb;
+ const struct drm_display_mode *mode;
+ struct drm_crtc_state *crtc_state;
unsigned int patched_crtc_w;
unsigned int patched_crtc_h;
unsigned int patched_src_w;
@@ -430,196 +555,196 @@ int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
int vsub = 1;
int i;
- if ((req->src_x | req->src_y | req->src_w | req->src_h) &
+ if (!state->base.crtc || !fb)
+ return 0;
+
+ crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)];
+ mode = &crtc_state->adjusted_mode;
+
+ state->src_x = s->src_x;
+ state->src_y = s->src_y;
+ state->src_h = s->src_h;
+ state->src_w = s->src_w;
+ state->crtc_x = s->crtc_x;
+ state->crtc_y = s->crtc_y;
+ state->crtc_h = s->crtc_h;
+ state->crtc_w = s->crtc_w;
+ if ((state->src_x | state->src_y | state->src_w | state->src_h) &
SUBPIXEL_MASK)
return -EINVAL;
- req->src_x >>= 16;
- req->src_y >>= 16;
- req->src_w >>= 16;
- req->src_h >>= 16;
+ state->src_x >>= 16;
+ state->src_y >>= 16;
+ state->src_w >>= 16;
+ state->src_h >>= 16;
- req->nplanes = drm_format_num_planes(req->fb->pixel_format);
- if (req->nplanes > ATMEL_HLCDC_MAX_PLANES)
+ state->nplanes = drm_format_num_planes(fb->pixel_format);
+ if (state->nplanes > ATMEL_HLCDC_MAX_PLANES)
return -EINVAL;
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
- if (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
- tmp = req->crtc_w;
- req->crtc_w = req->crtc_h;
- req->crtc_h = tmp;
- tmp = req->src_w;
- req->src_w = req->src_h;
- req->src_h = tmp;
+ if (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
+ tmp = state->crtc_w;
+ state->crtc_w = state->crtc_h;
+ state->crtc_h = tmp;
+ tmp = state->src_w;
+ state->src_w = state->src_h;
+ state->src_h = tmp;
}
- if (req->crtc_x + req->crtc_w > mode->hdisplay)
- patched_crtc_w = mode->hdisplay - req->crtc_x;
+ if (state->crtc_x + state->crtc_w > mode->hdisplay)
+ patched_crtc_w = mode->hdisplay - state->crtc_x;
else
- patched_crtc_w = req->crtc_w;
+ patched_crtc_w = state->crtc_w;
- if (req->crtc_x < 0) {
- patched_crtc_w += req->crtc_x;
- x_offset = -req->crtc_x;
- req->crtc_x = 0;
+ if (state->crtc_x < 0) {
+ patched_crtc_w += state->crtc_x;
+ x_offset = -state->crtc_x;
+ state->crtc_x = 0;
}
- if (req->crtc_y + req->crtc_h > mode->vdisplay)
- patched_crtc_h = mode->vdisplay - req->crtc_y;
+ if (state->crtc_y + state->crtc_h > mode->vdisplay)
+ patched_crtc_h = mode->vdisplay - state->crtc_y;
else
- patched_crtc_h = req->crtc_h;
+ patched_crtc_h = state->crtc_h;
- if (req->crtc_y < 0) {
- patched_crtc_h += req->crtc_y;
- y_offset = -req->crtc_y;
- req->crtc_y = 0;
+ if (state->crtc_y < 0) {
+ patched_crtc_h += state->crtc_y;
+ y_offset = -state->crtc_y;
+ state->crtc_y = 0;
}
- patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * req->src_w,
- req->crtc_w);
- patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * req->src_h,
- req->crtc_h);
+ patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * state->src_w,
+ state->crtc_w);
+ patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * state->src_h,
+ state->crtc_h);
- hsub = drm_format_horz_chroma_subsampling(req->fb->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(req->fb->pixel_format);
+ hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
- for (i = 0; i < req->nplanes; i++) {
+ for (i = 0; i < state->nplanes; i++) {
unsigned int offset = 0;
int xdiv = i ? hsub : 1;
int ydiv = i ? vsub : 1;
- req->bpp[i] = drm_format_plane_cpp(req->fb->pixel_format, i);
- if (!req->bpp[i])
+ state->bpp[i] = drm_format_plane_cpp(fb->pixel_format, i);
+ if (!state->bpp[i])
return -EINVAL;
- switch (plane->rotation & 0xf) {
+ switch (state->base.rotation & 0xf) {
case BIT(DRM_ROTATE_90):
- offset = ((y_offset + req->src_y + patched_src_w - 1) /
- ydiv) * req->fb->pitches[i];
- offset += ((x_offset + req->src_x) / xdiv) *
- req->bpp[i];
- req->xstride[i] = ((patched_src_w - 1) / ydiv) *
- req->fb->pitches[i];
- req->pstride[i] = -req->fb->pitches[i] - req->bpp[i];
+ offset = ((y_offset + state->src_y + patched_src_w - 1) /
+ ydiv) * fb->pitches[i];
+ offset += ((x_offset + state->src_x) / xdiv) *
+ state->bpp[i];
+ state->xstride[i] = ((patched_src_w - 1) / ydiv) *
+ fb->pitches[i];
+ state->pstride[i] = -fb->pitches[i] - state->bpp[i];
break;
case BIT(DRM_ROTATE_180):
- offset = ((y_offset + req->src_y + patched_src_h - 1) /
- ydiv) * req->fb->pitches[i];
- offset += ((x_offset + req->src_x + patched_src_w - 1) /
- xdiv) * req->bpp[i];
- req->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) *
- req->bpp[i]) - req->fb->pitches[i];
- req->pstride[i] = -2 * req->bpp[i];
+ offset = ((y_offset + state->src_y + patched_src_h - 1) /
+ ydiv) * fb->pitches[i];
+ offset += ((x_offset + state->src_x + patched_src_w - 1) /
+ xdiv) * state->bpp[i];
+ state->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) *
+ state->bpp[i]) - fb->pitches[i];
+ state->pstride[i] = -2 * state->bpp[i];
break;
case BIT(DRM_ROTATE_270):
- offset = ((y_offset + req->src_y) / ydiv) *
- req->fb->pitches[i];
- offset += ((x_offset + req->src_x + patched_src_h - 1) /
- xdiv) * req->bpp[i];
- req->xstride[i] = -(((patched_src_w - 1) / ydiv) *
- req->fb->pitches[i]) -
- (2 * req->bpp[i]);
- req->pstride[i] = req->fb->pitches[i] - req->bpp[i];
+ offset = ((y_offset + state->src_y) / ydiv) *
+ fb->pitches[i];
+ offset += ((x_offset + state->src_x + patched_src_h - 1) /
+ xdiv) * state->bpp[i];
+ state->xstride[i] = -(((patched_src_w - 1) / ydiv) *
+ fb->pitches[i]) -
+ (2 * state->bpp[i]);
+ state->pstride[i] = fb->pitches[i] - state->bpp[i];
break;
case BIT(DRM_ROTATE_0):
default:
- offset = ((y_offset + req->src_y) / ydiv) *
- req->fb->pitches[i];
- offset += ((x_offset + req->src_x) / xdiv) *
- req->bpp[i];
- req->xstride[i] = req->fb->pitches[i] -
+ offset = ((y_offset + state->src_y) / ydiv) *
+ fb->pitches[i];
+ offset += ((x_offset + state->src_x) / xdiv) *
+ state->bpp[i];
+ state->xstride[i] = fb->pitches[i] -
((patched_src_w / xdiv) *
- req->bpp[i]);
- req->pstride[i] = 0;
+ state->bpp[i]);
+ state->pstride[i] = 0;
break;
}
- req->offsets[i] = offset + req->fb->offsets[i];
+ state->offsets[i] = offset + fb->offsets[i];
}
- req->src_w = patched_src_w;
- req->src_h = patched_src_h;
- req->crtc_w = patched_crtc_w;
- req->crtc_h = patched_crtc_h;
+ state->src_w = patched_src_w;
+ state->src_h = patched_src_h;
+ state->crtc_w = patched_crtc_w;
+ state->crtc_h = patched_crtc_h;
- return atmel_hlcdc_plane_check_update_req(p, req, mode);
-}
+ if (!layout->size &&
+ (mode->hdisplay != state->crtc_w ||
+ mode->vdisplay != state->crtc_h))
+ return -EINVAL;
-int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p,
- struct atmel_hlcdc_plane_update_req *req)
-{
- struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- int ret;
+ if (plane->layer.desc->max_height &&
+ state->crtc_h > plane->layer.desc->max_height)
+ return -EINVAL;
- ret = atmel_hlcdc_layer_update_start(&plane->layer);
- if (ret)
- return ret;
+ if (plane->layer.desc->max_width &&
+ state->crtc_w > plane->layer.desc->max_width)
+ return -EINVAL;
- atmel_hlcdc_plane_update_pos_and_size(plane, req);
- atmel_hlcdc_plane_update_general_settings(plane, req);
- atmel_hlcdc_plane_update_format(plane, req);
- atmel_hlcdc_plane_update_buffers(plane, req);
+ if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) &&
+ (!layout->memsize ||
+ atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format)))
+ return -EINVAL;
- atmel_hlcdc_layer_update_commit(&plane->layer);
+ if (state->crtc_x < 0 || state->crtc_y < 0)
+ return -EINVAL;
+
+ if (state->crtc_w + state->crtc_x > mode->hdisplay ||
+ state->crtc_h + state->crtc_y > mode->vdisplay)
+ return -EINVAL;
return 0;
}
-int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w,
- unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- const struct drm_display_mode *mode)
+static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- struct atmel_hlcdc_plane_update_req req;
- int ret = 0;
-
- memset(&req, 0, sizeof(req));
- req.crtc_x = crtc_x;
- req.crtc_y = crtc_y;
- req.crtc_w = crtc_w;
- req.crtc_h = crtc_h;
- req.src_x = src_x;
- req.src_y = src_y;
- req.src_w = src_w;
- req.src_h = src_h;
- req.fb = fb;
-
- ret = atmel_hlcdc_plane_prepare_update_req(&plane->base, &req, mode);
- if (ret)
- return ret;
- if (!req.crtc_h || !req.crtc_w)
- return atmel_hlcdc_layer_disable(&plane->layer);
-
- return atmel_hlcdc_plane_apply_update_req(&plane->base, &req);
+ return atmel_hlcdc_layer_update_start(&plane->layer);
}
-static int atmel_hlcdc_plane_update(struct drm_plane *p,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
+ struct drm_plane_state *old_s)
{
- return atmel_hlcdc_plane_update_with_mode(p, crtc, fb, crtc_x, crtc_y,
- crtc_w, crtc_h, src_x, src_y,
- src_w, src_h, &crtc->hwmode);
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
+
+ if (!p->state->crtc || !p->state->fb)
+ return;
+
+ atmel_hlcdc_plane_update_pos_and_size(plane, state);
+ atmel_hlcdc_plane_update_general_settings(plane, state);
+ atmel_hlcdc_plane_update_format(plane, state);
+ atmel_hlcdc_plane_update_buffers(plane, state);
+ atmel_hlcdc_plane_update_disc_area(plane, state);
+
+ atmel_hlcdc_layer_update_commit(&plane->layer);
}
-static int atmel_hlcdc_plane_disable(struct drm_plane *p)
+static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
+ struct drm_plane_state *old_state)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- return atmel_hlcdc_layer_disable(&plane->layer);
+ atmel_hlcdc_layer_disable(&plane->layer);
}
static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
@@ -635,38 +760,36 @@ static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
devm_kfree(p->dev->dev, plane);
}
-static int atmel_hlcdc_plane_set_alpha(struct atmel_hlcdc_plane *plane,
- u8 alpha)
+static int atmel_hlcdc_plane_atomic_set_property(struct drm_plane *p,
+ struct drm_plane_state *s,
+ struct drm_property *property,
+ uint64_t val)
{
- atmel_hlcdc_layer_update_start(&plane->layer);
- atmel_hlcdc_layer_update_cfg(&plane->layer,
- plane->layer.desc->layout.general_config,
- ATMEL_HLCDC_LAYER_GA_MASK,
- alpha << ATMEL_HLCDC_LAYER_GA_SHIFT);
- atmel_hlcdc_layer_update_commit(&plane->layer);
-
- return 0;
-}
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_properties *props = plane->properties;
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(s);
-static int atmel_hlcdc_plane_set_rotation(struct atmel_hlcdc_plane *plane,
- unsigned int rotation)
-{
- plane->rotation = rotation;
+ if (property == props->alpha)
+ state->alpha = val;
+ else
+ return -EINVAL;
return 0;
}
-static int atmel_hlcdc_plane_set_property(struct drm_plane *p,
- struct drm_property *property,
- uint64_t value)
+static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
+ const struct drm_plane_state *s,
+ struct drm_property *property,
+ uint64_t *val)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_plane_properties *props = plane->properties;
+ const struct atmel_hlcdc_plane_state *state =
+ container_of(s, const struct atmel_hlcdc_plane_state, base);
if (property == props->alpha)
- atmel_hlcdc_plane_set_alpha(plane, value);
- else if (property == props->rotation)
- atmel_hlcdc_plane_set_rotation(plane, value);
+ *val = state->alpha;
else
return -EINVAL;
@@ -694,8 +817,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
if (desc->layout.xstride && desc->layout.pstride)
drm_object_attach_property(&plane->base.base,
- props->rotation,
- BIT(DRM_ROTATE_0));
+ plane->base.dev->mode_config.rotation_property,
+ BIT(DRM_ROTATE_0));
if (desc->layout.csc) {
/*
@@ -717,11 +840,76 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
}
}
+static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
+ .prepare_fb = atmel_hlcdc_plane_prepare_fb,
+ .atomic_check = atmel_hlcdc_plane_atomic_check,
+ .atomic_update = atmel_hlcdc_plane_atomic_update,
+ .atomic_disable = atmel_hlcdc_plane_atomic_disable,
+};
+
+static void atmel_hlcdc_plane_reset(struct drm_plane *p)
+{
+ struct atmel_hlcdc_plane_state *state;
+
+ if (p->state) {
+ state = drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
+
+ if (state->base.fb)
+ drm_framebuffer_unreference(state->base.fb);
+
+ kfree(state);
+ p->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->alpha = 255;
+ p->state = &state->base;
+ p->state->plane = p;
+ }
+}
+
+static struct drm_plane_state *
+atmel_hlcdc_plane_atomic_duplicate_state(struct drm_plane *p)
+{
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
+ struct atmel_hlcdc_plane_state *copy;
+
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ copy->disc_updated = false;
+
+ if (copy->base.fb)
+ drm_framebuffer_reference(copy->base.fb);
+
+ return &copy->base;
+}
+
+static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *s)
+{
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(s);
+
+ if (s->fb)
+ drm_framebuffer_unreference(s->fb);
+
+ kfree(state);
+}
+
static struct drm_plane_funcs layer_plane_funcs = {
- .update_plane = atmel_hlcdc_plane_update,
- .disable_plane = atmel_hlcdc_plane_disable,
- .set_property = atmel_hlcdc_plane_set_property,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
.destroy = atmel_hlcdc_plane_destroy,
+ .reset = atmel_hlcdc_plane_reset,
+ .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
+ .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
+ .atomic_set_property = atmel_hlcdc_plane_atomic_set_property,
+ .atomic_get_property = atmel_hlcdc_plane_atomic_get_property,
};
static struct atmel_hlcdc_plane *
@@ -755,6 +943,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
if (ret)
return ERR_PTR(ret);
+ drm_plane_helper_add(&plane->base,
+ &atmel_hlcdc_layer_plane_helper_funcs);
+
/* Set default property values*/
atmel_hlcdc_plane_init_properties(plane, desc, props);
@@ -774,12 +965,13 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
if (!props->alpha)
return ERR_PTR(-ENOMEM);
- props->rotation = drm_mode_create_rotation_property(dev,
- BIT(DRM_ROTATE_0) |
- BIT(DRM_ROTATE_90) |
- BIT(DRM_ROTATE_180) |
- BIT(DRM_ROTATE_270));
- if (!props->rotation)
+ dev->mode_config.rotation_property =
+ drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_90) |
+ BIT(DRM_ROTATE_180) |
+ BIT(DRM_ROTATE_270));
+ if (!dev->mode_config.rotation_property)
return ERR_PTR(-ENOMEM);
return props;
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index 460389702d31..a39b0343c197 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -164,6 +164,7 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index f38bbcdf929b..acef3223772c 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -11,3 +11,14 @@ config DRM_PTN3460
select DRM_PANEL
---help---
ptn3460 eDP-LVDS bridge chip driver.
+
+config DRM_PS8622
+ tristate "Parade eDP/LVDS bridge"
+ depends on DRM
+ depends on OF
+ select DRM_PANEL
+ select DRM_KMS_HELPER
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ parade eDP-LVDS bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index d8a8cfd12fbb..8dfebd984370 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,4 +1,5 @@
ccflags-y := -Iinclude/drm
+obj-$(CONFIG_DRM_PS8622) += ps8622.o
obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c
index cd6a70647e32..49cafb61d290 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw_hdmi.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/hdmi.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
#include <drm/drm_of.h>
@@ -126,6 +127,7 @@ struct dw_hdmi {
struct i2c_adapter *ddc;
void __iomem *regs;
+ struct mutex audio_mutex;
unsigned int sample_rate;
int ratio;
@@ -177,26 +179,23 @@ static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg,
hdmi_modb(hdmi, data << shift, mask, reg);
}
-static void hdmi_set_clock_regenerator_n(struct dw_hdmi *hdmi,
- unsigned int value)
+static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
+ unsigned int n)
{
- hdmi_writeb(hdmi, value & 0xff, HDMI_AUD_N1);
- hdmi_writeb(hdmi, (value >> 8) & 0xff, HDMI_AUD_N2);
- hdmi_writeb(hdmi, (value >> 16) & 0x0f, HDMI_AUD_N3);
+ /* Must be set/cleared first */
+ hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
/* nshift factor = 0 */
hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_N_SHIFT_MASK, HDMI_AUD_CTS3);
-}
-
-static void hdmi_regenerate_cts(struct dw_hdmi *hdmi, unsigned int cts)
-{
- /* Must be set/cleared first */
- hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
- hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
- hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
hdmi_writeb(hdmi, ((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
+ hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
+ hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
+
+ hdmi_writeb(hdmi, (n >> 16) & 0x0f, HDMI_AUD_N3);
+ hdmi_writeb(hdmi, (n >> 8) & 0xff, HDMI_AUD_N2);
+ hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1);
}
static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
@@ -355,18 +354,21 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
__func__, hdmi->sample_rate, hdmi->ratio,
pixel_clk, clk_n, clk_cts);
- hdmi_set_clock_regenerator_n(hdmi, clk_n);
- hdmi_regenerate_cts(hdmi, clk_cts);
+ hdmi_set_cts_n(hdmi, clk_cts, clk_n);
}
static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
{
+ mutex_lock(&hdmi->audio_mutex);
hdmi_set_clk_regenerator(hdmi, 74250000);
+ mutex_unlock(&hdmi->audio_mutex);
}
static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
{
+ mutex_lock(&hdmi->audio_mutex);
hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
+ mutex_unlock(&hdmi->audio_mutex);
}
/*
@@ -753,10 +755,10 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
{
unsigned res_idx, i;
u8 val, msec;
- const struct dw_hdmi_mpll_config *mpll_config =
- hdmi->plat_data->mpll_cfg;
- const struct dw_hdmi_curr_ctrl *curr_ctrl = hdmi->plat_data->cur_ctr;
- const struct dw_hdmi_sym_term *sym_term = hdmi->plat_data->sym_term;
+ const struct dw_hdmi_plat_data *plat_data = hdmi->plat_data;
+ const struct dw_hdmi_mpll_config *mpll_config = plat_data->mpll_cfg;
+ const struct dw_hdmi_curr_ctrl *curr_ctrl = plat_data->cur_ctr;
+ const struct dw_hdmi_phy_config *phy_config = plat_data->phy_config;
if (prep)
return -EINVAL;
@@ -827,18 +829,18 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
- for (i = 0; sym_term[i].mpixelclock != (~0UL); i++)
+ for (i = 0; phy_config[i].mpixelclock != (~0UL); i++)
if (hdmi->hdmi_data.video_mode.mpixelclock <=
- sym_term[i].mpixelclock)
+ phy_config[i].mpixelclock)
break;
/* RESISTANCE TERM 133Ohm Cfg */
- hdmi_phy_i2c_write(hdmi, sym_term[i].term, 0x19); /* TXTERM */
+ hdmi_phy_i2c_write(hdmi, phy_config[i].term, 0x19); /* TXTERM */
/* PREEMP Cgf 0.00 */
- hdmi_phy_i2c_write(hdmi, sym_term[i].sym_ctr, 0x09); /* CKSYMTXCTRL */
-
+ hdmi_phy_i2c_write(hdmi, phy_config[i].sym_ctr, 0x09); /* CKSYMTXCTRL */
/* TX/CK LVL 10 */
- hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */
+ hdmi_phy_i2c_write(hdmi, phy_config[i].vlev_ctr, 0x0E); /* VLEVCTRL */
+
/* REMOVE CLK TERM */
hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
@@ -1569,6 +1571,8 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
hdmi->ratio = 100;
hdmi->encoder = encoder;
+ mutex_init(&hdmi->audio_mutex);
+
of_property_read_u32(np, "reg-io-width", &val);
switch (val) {
diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/ps8622.c
new file mode 100644
index 000000000000..e895aa7ea353
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ps8622.c
@@ -0,0 +1,684 @@
+/*
+ * Parade PS8622 eDP/LVDS bridge driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_panel.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/* Brightness scale on the Parade chip */
+#define PS8622_MAX_BRIGHTNESS 0xff
+
+/* Timings taken from the version 1.7 datasheet for the PS8622/PS8625 */
+#define PS8622_POWER_RISE_T1_MIN_US 10
+#define PS8622_POWER_RISE_T1_MAX_US 10000
+#define PS8622_RST_HIGH_T2_MIN_US 3000
+#define PS8622_RST_HIGH_T2_MAX_US 30000
+#define PS8622_PWMO_END_T12_MS 200
+#define PS8622_POWER_FALL_T16_MAX_US 10000
+#define PS8622_POWER_OFF_T17_MS 500
+
+#if ((PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US) > \
+ (PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US))
+#error "T2.min + T1.max must be less than T2.max + T1.min"
+#endif
+
+struct ps8622_bridge {
+ struct drm_connector connector;
+ struct i2c_client *client;
+ struct drm_bridge bridge;
+ struct drm_panel *panel;
+ struct regulator *v12;
+ struct backlight_device *bl;
+
+ struct gpio_desc *gpio_slp;
+ struct gpio_desc *gpio_rst;
+
+ u32 max_lane_count;
+ u32 lane_count;
+
+ bool enabled;
+};
+
+static inline struct ps8622_bridge *
+ bridge_to_ps8622(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ps8622_bridge, bridge);
+}
+
+static inline struct ps8622_bridge *
+ connector_to_ps8622(struct drm_connector *connector)
+{
+ return container_of(connector, struct ps8622_bridge, connector);
+}
+
+static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val)
+{
+ int ret;
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg;
+ u8 data[] = {reg, val};
+
+ msg.addr = client->addr + page;
+ msg.flags = 0;
+ msg.len = sizeof(data);
+ msg.buf = data;
+
+ ret = i2c_transfer(adap, &msg, 1);
+ if (ret != 1)
+ pr_warn("PS8622 I2C write (0x%02x,0x%02x,0x%02x) failed: %d\n",
+ client->addr + page, reg, val, ret);
+ return !(ret == 1);
+}
+
+static int ps8622_send_config(struct ps8622_bridge *ps8622)
+{
+ struct i2c_client *cl = ps8622->client;
+ int err = 0;
+
+ /* HPD low */
+ err = ps8622_set(cl, 0x02, 0xa1, 0x01);
+ if (err)
+ goto error;
+
+ /* SW setting: [1:0] SW output 1.2V voltage is lower to 96% */
+ err = ps8622_set(cl, 0x04, 0x14, 0x01);
+ if (err)
+ goto error;
+
+ /* RCO SS setting: [5:4] = b01 0.5%, b10 1%, b11 1.5% */
+ err = ps8622_set(cl, 0x04, 0xe3, 0x20);
+ if (err)
+ goto error;
+
+ /* [7] RCO SS enable */
+ err = ps8622_set(cl, 0x04, 0xe2, 0x80);
+ if (err)
+ goto error;
+
+ /* RPHY Setting
+ * [3:2] CDR tune wait cycle before measure for fine tune
+ * b00: 1us b01: 0.5us b10:2us, b11: 4us
+ */
+ err = ps8622_set(cl, 0x04, 0x8a, 0x0c);
+ if (err)
+ goto error;
+
+ /* [3] RFD always on */
+ err = ps8622_set(cl, 0x04, 0x89, 0x08);
+ if (err)
+ goto error;
+
+ /* CTN lock in/out: 20000ppm/80000ppm. Lock out 2 times. */
+ err = ps8622_set(cl, 0x04, 0x71, 0x2d);
+ if (err)
+ goto error;
+
+ /* 2.7G CDR settings: NOF=40LSB for HBR CDR setting */
+ err = ps8622_set(cl, 0x04, 0x7d, 0x07);
+ if (err)
+ goto error;
+
+ /* [1:0] Fmin=+4bands */
+ err = ps8622_set(cl, 0x04, 0x7b, 0x00);
+ if (err)
+ goto error;
+
+ /* [7:5] DCO_FTRNG=+-40% */
+ err = ps8622_set(cl, 0x04, 0x7a, 0xfd);
+ if (err)
+ goto error;
+
+ /* 1.62G CDR settings: [5:2]NOF=64LSB [1:0]DCO scale is 2/5 */
+ err = ps8622_set(cl, 0x04, 0xc0, 0x12);
+ if (err)
+ goto error;
+
+ /* Gitune=-37% */
+ err = ps8622_set(cl, 0x04, 0xc1, 0x92);
+ if (err)
+ goto error;
+
+ /* Fbstep=100% */
+ err = ps8622_set(cl, 0x04, 0xc2, 0x1c);
+ if (err)
+ goto error;
+
+ /* [7] LOS signal disable */
+ err = ps8622_set(cl, 0x04, 0x32, 0x80);
+ if (err)
+ goto error;
+
+ /* RPIO Setting: [7:4] LVDS driver bias current : 75% (250mV swing) */
+ err = ps8622_set(cl, 0x04, 0x00, 0xb0);
+ if (err)
+ goto error;
+
+ /* [7:6] Right-bar GPIO output strength is 8mA */
+ err = ps8622_set(cl, 0x04, 0x15, 0x40);
+ if (err)
+ goto error;
+
+ /* EQ Training State Machine Setting, RCO calibration start */
+ err = ps8622_set(cl, 0x04, 0x54, 0x10);
+ if (err)
+ goto error;
+
+ /* Logic, needs more than 10 I2C command */
+ /* [4:0] MAX_LANE_COUNT set to max supported lanes */
+ err = ps8622_set(cl, 0x01, 0x02, 0x80 | ps8622->max_lane_count);
+ if (err)
+ goto error;
+
+ /* [4:0] LANE_COUNT_SET set to chosen lane count */
+ err = ps8622_set(cl, 0x01, 0x21, 0x80 | ps8622->lane_count);
+ if (err)
+ goto error;
+
+ err = ps8622_set(cl, 0x00, 0x52, 0x20);
+ if (err)
+ goto error;
+
+ /* HPD CP toggle enable */
+ err = ps8622_set(cl, 0x00, 0xf1, 0x03);
+ if (err)
+ goto error;
+
+ err = ps8622_set(cl, 0x00, 0x62, 0x41);
+ if (err)
+ goto error;
+
+ /* Counter number, add 1ms counter delay */
+ err = ps8622_set(cl, 0x00, 0xf6, 0x01);
+ if (err)
+ goto error;
+
+ /* [6]PWM function control by DPCD0040f[7], default is PWM block */
+ err = ps8622_set(cl, 0x00, 0x77, 0x06);
+ if (err)
+ goto error;
+
+ /* 04h Adjust VTotal toleranceto fix the 30Hz no display issue */
+ err = ps8622_set(cl, 0x00, 0x4c, 0x04);
+ if (err)
+ goto error;
+
+ /* DPCD00400='h00, Parade OUI ='h001cf8 */
+ err = ps8622_set(cl, 0x01, 0xc0, 0x00);
+ if (err)
+ goto error;
+
+ /* DPCD00401='h1c */
+ err = ps8622_set(cl, 0x01, 0xc1, 0x1c);
+ if (err)
+ goto error;
+
+ /* DPCD00402='hf8 */
+ err = ps8622_set(cl, 0x01, 0xc2, 0xf8);
+ if (err)
+ goto error;
+
+ /* DPCD403~408 = ASCII code, D2SLV5='h4432534c5635 */
+ err = ps8622_set(cl, 0x01, 0xc3, 0x44);
+ if (err)
+ goto error;
+
+ /* DPCD404 */
+ err = ps8622_set(cl, 0x01, 0xc4, 0x32);
+ if (err)
+ goto error;
+
+ /* DPCD405 */
+ err = ps8622_set(cl, 0x01, 0xc5, 0x53);
+ if (err)
+ goto error;
+
+ /* DPCD406 */
+ err = ps8622_set(cl, 0x01, 0xc6, 0x4c);
+ if (err)
+ goto error;
+
+ /* DPCD407 */
+ err = ps8622_set(cl, 0x01, 0xc7, 0x56);
+ if (err)
+ goto error;
+
+ /* DPCD408 */
+ err = ps8622_set(cl, 0x01, 0xc8, 0x35);
+ if (err)
+ goto error;
+
+ /* DPCD40A, Initial Code major revision '01' */
+ err = ps8622_set(cl, 0x01, 0xca, 0x01);
+ if (err)
+ goto error;
+
+ /* DPCD40B, Initial Code minor revision '05' */
+ err = ps8622_set(cl, 0x01, 0xcb, 0x05);
+ if (err)
+ goto error;
+
+
+ if (ps8622->bl) {
+ /* DPCD720, internal PWM */
+ err = ps8622_set(cl, 0x01, 0xa5, 0xa0);
+ if (err)
+ goto error;
+
+ /* FFh for 100% brightness, 0h for 0% brightness */
+ err = ps8622_set(cl, 0x01, 0xa7,
+ ps8622->bl->props.brightness);
+ if (err)
+ goto error;
+ } else {
+ /* DPCD720, external PWM */
+ err = ps8622_set(cl, 0x01, 0xa5, 0x80);
+ if (err)
+ goto error;
+ }
+
+ /* Set LVDS output as 6bit-VESA mapping, single LVDS channel */
+ err = ps8622_set(cl, 0x01, 0xcc, 0x13);
+ if (err)
+ goto error;
+
+ /* Enable SSC set by register */
+ err = ps8622_set(cl, 0x02, 0xb1, 0x20);
+ if (err)
+ goto error;
+
+ /* Set SSC enabled and +/-1% central spreading */
+ err = ps8622_set(cl, 0x04, 0x10, 0x16);
+ if (err)
+ goto error;
+
+ /* Logic end */
+ /* MPU Clock source: LC => RCO */
+ err = ps8622_set(cl, 0x04, 0x59, 0x60);
+ if (err)
+ goto error;
+
+ /* LC -> RCO */
+ err = ps8622_set(cl, 0x04, 0x54, 0x14);
+ if (err)
+ goto error;
+
+ /* HPD high */
+ err = ps8622_set(cl, 0x02, 0xa1, 0x91);
+
+error:
+ return err ? -EIO : 0;
+}
+
+static int ps8622_backlight_update(struct backlight_device *bl)
+{
+ struct ps8622_bridge *ps8622 = dev_get_drvdata(&bl->dev);
+ int ret, brightness = bl->props.brightness;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ if (!ps8622->enabled)
+ return -EINVAL;
+
+ ret = ps8622_set(ps8622->client, 0x01, 0xa7, brightness);
+
+ return ret;
+}
+
+static const struct backlight_ops ps8622_backlight_ops = {
+ .update_status = ps8622_backlight_update,
+};
+
+static void ps8622_pre_enable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+ int ret;
+
+ if (ps8622->enabled)
+ return;
+
+ gpiod_set_value(ps8622->gpio_rst, 0);
+
+ if (ps8622->v12) {
+ ret = regulator_enable(ps8622->v12);
+ if (ret)
+ DRM_ERROR("fails to enable ps8622->v12");
+ }
+
+ if (drm_panel_prepare(ps8622->panel)) {
+ DRM_ERROR("failed to prepare panel\n");
+ return;
+ }
+
+ gpiod_set_value(ps8622->gpio_slp, 1);
+
+ /*
+ * T1 is the range of time that it takes for the power to rise after we
+ * enable the lcd/ps8622 fet. T2 is the range of time in which the
+ * data sheet specifies we should deassert the reset pin.
+ *
+ * If it takes T1.max for the power to rise, we need to wait atleast
+ * T2.min before deasserting the reset pin. If it takes T1.min for the
+ * power to rise, we need to wait at most T2.max before deasserting the
+ * reset pin.
+ */
+ usleep_range(PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US,
+ PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US);
+
+ gpiod_set_value(ps8622->gpio_rst, 1);
+
+ /* wait 20ms after RST high */
+ usleep_range(20000, 30000);
+
+ ret = ps8622_send_config(ps8622);
+ if (ret) {
+ DRM_ERROR("Failed to send config to bridge (%d)\n", ret);
+ return;
+ }
+
+ ps8622->enabled = true;
+}
+
+static void ps8622_enable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+ if (drm_panel_enable(ps8622->panel)) {
+ DRM_ERROR("failed to enable panel\n");
+ return;
+ }
+}
+
+static void ps8622_disable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+ if (drm_panel_disable(ps8622->panel)) {
+ DRM_ERROR("failed to disable panel\n");
+ return;
+ }
+ msleep(PS8622_PWMO_END_T12_MS);
+}
+
+static void ps8622_post_disable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+ if (!ps8622->enabled)
+ return;
+
+ ps8622->enabled = false;
+
+ /*
+ * This doesn't matter if the regulators are turned off, but something
+ * else might keep them on. In that case, we want to assert the slp gpio
+ * to lower power.
+ */
+ gpiod_set_value(ps8622->gpio_slp, 0);
+
+ if (drm_panel_unprepare(ps8622->panel)) {
+ DRM_ERROR("failed to unprepare panel\n");
+ return;
+ }
+
+ if (ps8622->v12)
+ regulator_disable(ps8622->v12);
+
+ /*
+ * Sleep for at least the amount of time that it takes the power rail to
+ * fall to prevent asserting the rst gpio from doing anything.
+ */
+ usleep_range(PS8622_POWER_FALL_T16_MAX_US,
+ 2 * PS8622_POWER_FALL_T16_MAX_US);
+ gpiod_set_value(ps8622->gpio_rst, 0);
+
+ msleep(PS8622_POWER_OFF_T17_MS);
+}
+
+static int ps8622_get_modes(struct drm_connector *connector)
+{
+ struct ps8622_bridge *ps8622;
+
+ ps8622 = connector_to_ps8622(connector);
+
+ return drm_panel_get_modes(ps8622->panel);
+}
+
+static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
+{
+ struct ps8622_bridge *ps8622;
+
+ ps8622 = connector_to_ps8622(connector);
+
+ return ps8622->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
+ .get_modes = ps8622_get_modes,
+ .best_encoder = ps8622_best_encoder,
+};
+
+static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
+ bool force)
+{
+ return connector_status_connected;
+}
+
+static void ps8622_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs ps8622_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ps8622_detect,
+ .destroy = ps8622_connector_destroy,
+};
+
+static int ps8622_attach(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ ps8622->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(bridge->dev, &ps8622->connector,
+ &ps8622_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&ps8622->connector,
+ &ps8622_connector_helper_funcs);
+ drm_connector_register(&ps8622->connector);
+ drm_mode_connector_attach_encoder(&ps8622->connector,
+ bridge->encoder);
+
+ if (ps8622->panel)
+ drm_panel_attach(ps8622->panel, &ps8622->connector);
+
+ drm_helper_hpd_irq_event(ps8622->connector.dev);
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs ps8622_bridge_funcs = {
+ .pre_enable = ps8622_pre_enable,
+ .enable = ps8622_enable,
+ .disable = ps8622_disable,
+ .post_disable = ps8622_post_disable,
+ .attach = ps8622_attach,
+};
+
+static const struct of_device_id ps8622_devices[] = {
+ {.compatible = "parade,ps8622",},
+ {.compatible = "parade,ps8625",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, ps8622_devices);
+
+static int ps8622_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *endpoint, *panel_node;
+ struct ps8622_bridge *ps8622;
+ int ret;
+
+ ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
+ if (!ps8622)
+ return -ENOMEM;
+
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (endpoint) {
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ if (panel_node) {
+ ps8622->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!ps8622->panel)
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ps8622->client = client;
+
+ ps8622->v12 = devm_regulator_get(dev, "vdd12");
+ if (IS_ERR(ps8622->v12)) {
+ dev_info(dev, "no 1.2v regulator found for PS8622\n");
+ ps8622->v12 = NULL;
+ }
+
+ ps8622->gpio_slp = devm_gpiod_get(dev, "sleep");
+ if (IS_ERR(ps8622->gpio_slp)) {
+ ret = PTR_ERR(ps8622->gpio_slp);
+ dev_err(dev, "cannot get gpio_slp %d\n", ret);
+ return ret;
+ }
+ ret = gpiod_direction_output(ps8622->gpio_slp, 1);
+ if (ret) {
+ dev_err(dev, "cannot configure gpio_slp\n");
+ return ret;
+ }
+
+ ps8622->gpio_rst = devm_gpiod_get(dev, "reset");
+ if (IS_ERR(ps8622->gpio_rst)) {
+ ret = PTR_ERR(ps8622->gpio_rst);
+ dev_err(dev, "cannot get gpio_rst %d\n", ret);
+ return ret;
+ }
+ /*
+ * Assert the reset pin high to avoid the bridge being
+ * initialized prematurely
+ */
+ ret = gpiod_direction_output(ps8622->gpio_rst, 1);
+ if (ret) {
+ dev_err(dev, "cannot configure gpio_rst\n");
+ return ret;
+ }
+
+ ps8622->max_lane_count = id->driver_data;
+
+ if (of_property_read_u32(dev->of_node, "lane-count",
+ &ps8622->lane_count)) {
+ ps8622->lane_count = ps8622->max_lane_count;
+ } else if (ps8622->lane_count > ps8622->max_lane_count) {
+ dev_info(dev, "lane-count property is too high,"
+ "using max_lane_count\n");
+ ps8622->lane_count = ps8622->max_lane_count;
+ }
+
+ if (!of_find_property(dev->of_node, "use-external-pwm", NULL)) {
+ ps8622->bl = backlight_device_register("ps8622-backlight",
+ dev, ps8622, &ps8622_backlight_ops,
+ NULL);
+ if (IS_ERR(ps8622->bl)) {
+ DRM_ERROR("failed to register backlight\n");
+ ret = PTR_ERR(ps8622->bl);
+ ps8622->bl = NULL;
+ return ret;
+ }
+ ps8622->bl->props.max_brightness = PS8622_MAX_BRIGHTNESS;
+ ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS;
+ }
+
+ ps8622->bridge.funcs = &ps8622_bridge_funcs;
+ ps8622->bridge.of_node = dev->of_node;
+ ret = drm_bridge_add(&ps8622->bridge);
+ if (ret) {
+ DRM_ERROR("Failed to add bridge\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, ps8622);
+
+ return 0;
+}
+
+static int ps8622_remove(struct i2c_client *client)
+{
+ struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
+
+ if (ps8622->bl)
+ backlight_device_unregister(ps8622->bl);
+
+ drm_bridge_remove(&ps8622->bridge);
+
+ return 0;
+}
+
+static const struct i2c_device_id ps8622_i2c_table[] = {
+ /* Device type, max_lane_count */
+ {"ps8622", 1},
+ {"ps8625", 2},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table);
+
+static struct i2c_driver ps8622_driver = {
+ .id_table = ps8622_i2c_table,
+ .probe = ps8622_probe,
+ .remove = ps8622_remove,
+ .driver = {
+ .name = "ps8622",
+ .owner = THIS_MODULE,
+ .of_match_table = ps8622_devices,
+ },
+};
+module_i2c_driver(ps8622_driver);
+
+MODULE_AUTHOR("Vincent Palatin <vpalatin@chromium.org>");
+MODULE_DESCRIPTION("Parade ps8622/ps8625 eDP-LVDS converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index 826833e396f0..9d2f053382e1 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -265,7 +265,7 @@ static struct drm_connector_funcs ptn3460_connector_funcs = {
.destroy = ptn3460_connector_destroy,
};
-int ptn3460_bridge_attach(struct drm_bridge *bridge)
+static int ptn3460_bridge_attach(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c2e9c5283136..6e3b78ee7d16 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -92,7 +92,7 @@ drm_atomic_state_alloc(struct drm_device *dev)
state->dev = dev;
- DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
+ DRM_DEBUG_ATOMIC("Allocate atomic state %p\n", state);
return state;
fail:
@@ -122,7 +122,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
struct drm_mode_config *config = &dev->mode_config;
int i;
- DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
+ DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i];
@@ -134,6 +134,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
connector->funcs->atomic_destroy_state(connector,
state->connector_states[i]);
+ state->connectors[i] = NULL;
state->connector_states[i] = NULL;
}
@@ -145,6 +146,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
crtc->funcs->atomic_destroy_state(crtc,
state->crtc_states[i]);
+ state->crtcs[i] = NULL;
state->crtc_states[i] = NULL;
}
@@ -156,6 +158,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
plane->funcs->atomic_destroy_state(plane,
state->plane_states[i]);
+ state->planes[i] = NULL;
state->plane_states[i] = NULL;
}
}
@@ -170,9 +173,12 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
*/
void drm_atomic_state_free(struct drm_atomic_state *state)
{
+ if (!state)
+ return;
+
drm_atomic_state_clear(state);
- DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
+ DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
kfree_state(state);
}
@@ -217,8 +223,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
state->crtcs[index] = crtc;
crtc_state->state = state;
- DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
- crtc->base.id, crtc_state, state);
+ DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n",
+ crtc->base.id, crtc_state, state);
return crtc_state;
}
@@ -248,11 +254,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_mode_config *config = &dev->mode_config;
/* FIXME: Mode prop is missing, which also controls ->enable. */
- if (property == config->prop_active) {
+ if (property == config->prop_active)
state->active = val;
- } else if (crtc->funcs->atomic_set_property)
+ else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
- return -EINVAL;
+ else
+ return -EINVAL;
+
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_crtc_set_property);
@@ -266,9 +275,17 @@ int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property, uint64_t *val)
{
- if (crtc->funcs->atomic_get_property)
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_active)
+ *val = state->active;
+ else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
- return -EINVAL;
+ else
+ return -EINVAL;
+
+ return 0;
}
/**
@@ -293,8 +310,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
*/
if (state->active && !state->enable) {
- DRM_DEBUG_KMS("[CRTC:%d] active without enabled\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n",
+ crtc->base.id);
return -EINVAL;
}
@@ -340,8 +357,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
state->planes[index] = plane;
plane_state->state = state;
- DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
- plane->base.id, plane_state, state);
+ DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n",
+ plane->base.id, plane_state, state);
if (plane_state->crtc) {
struct drm_crtc_state *crtc_state;
@@ -450,6 +467,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
+ } else if (property == config->rotation_property) {
+ *val = state->rotation;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
@@ -473,14 +492,14 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
unsigned int fb_width, fb_height;
- unsigned int i;
+ int ret;
/* either *both* CRTC and FB must be set, or neither */
if (WARN_ON(state->crtc && !state->fb)) {
- DRM_DEBUG_KMS("CRTC set but no FB\n");
+ DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
return -EINVAL;
} else if (WARN_ON(state->fb && !state->crtc)) {
- DRM_DEBUG_KMS("FB set but no CRTC\n");
+ DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
return -EINVAL;
}
@@ -490,18 +509,16 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
- DRM_DEBUG_KMS("Invalid crtc for plane\n");
+ DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
return -EINVAL;
}
/* Check whether this plane supports the fb pixel format. */
- for (i = 0; i < plane->format_count; i++)
- if (state->fb->pixel_format == plane->format_types[i])
- break;
- if (i == plane->format_count) {
- DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(state->fb->pixel_format));
- return -EINVAL;
+ ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
+ drm_get_format_name(state->fb->pixel_format));
+ return ret;
}
/* Give drivers some help against integer overflows */
@@ -509,9 +526,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
state->crtc_h > INT_MAX ||
state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- state->crtc_w, state->crtc_h,
- state->crtc_x, state->crtc_y);
+ DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ state->crtc_w, state->crtc_h,
+ state->crtc_x, state->crtc_y);
return -ERANGE;
}
@@ -523,12 +540,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->src_x > fb_width - state->src_w ||
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
- DRM_DEBUG_KMS("Invalid source coordinates "
- "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
- state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
- state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
- state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
- state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
+ DRM_DEBUG_ATOMIC("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
+ state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
+ state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
+ state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
return -ENOSPC;
}
@@ -575,7 +592,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
* at most the array is a bit too large.
*/
if (index >= state->num_connector) {
- DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
+ DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
return ERR_PTR(-EAGAIN);
}
@@ -590,8 +607,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
state->connectors[index] = connector;
connector_state->state = state;
- DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
- connector->base.id, connector_state, state);
+ DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
+ connector->base.id, connector_state, state);
if (connector_state->crtc) {
struct drm_crtc_state *crtc_state;
@@ -752,17 +769,18 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
}
if (crtc)
- DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
- plane_state, crtc->base.id);
+ DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n",
+ plane_state, crtc->base.id);
else
- DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
+ DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
+ plane_state);
return 0;
}
EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
/**
- * drm_atomic_set_fb_for_plane - set crtc for plane
+ * drm_atomic_set_fb_for_plane - set framebuffer for plane
* @plane_state: atomic state object for the plane
* @fb: fb to use for the plane
*
@@ -782,10 +800,11 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
plane_state->fb = fb;
if (fb)
- DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
- fb->base.id, plane_state);
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
+ fb->base.id, plane_state);
else
- DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
+ DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
+ plane_state);
}
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
@@ -818,11 +837,11 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
conn_state->crtc = crtc;
if (crtc)
- DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
- conn_state, crtc->base.id);
+ DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n",
+ conn_state, crtc->base.id);
else
- DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
- conn_state);
+ DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
+ conn_state);
return 0;
}
@@ -858,8 +877,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
if (ret)
return ret;
- DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
- crtc->base.id, state);
+ DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n",
+ crtc->base.id, state);
/*
* Changed connectors are already in @state, so only need to look at the
@@ -890,19 +909,18 @@ int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- int i, num_connected_connectors = 0;
-
- for (i = 0; i < state->num_connector; i++) {
- struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
- conn_state = state->connector_states[i];
+ int i, num_connected_connectors = 0;
- if (conn_state && conn_state->crtc == crtc)
+ for_each_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->crtc == crtc)
num_connected_connectors++;
}
- DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
- state, num_connected_connectors, crtc->base.id);
+ DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
+ state, num_connected_connectors, crtc->base.id);
return num_connected_connectors;
}
@@ -914,7 +932,7 @@ EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
*
* This function should be used by legacy entry points which don't understand
* -EDEADLK semantics. For simplicity this one will grab all modeset locks after
- * the slowpath completed.
+ * the slowpath completed.
*/
void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
{
@@ -949,36 +967,28 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
- int nplanes = config->num_total_plane;
- int ncrtcs = config->num_crtc;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
int i, ret = 0;
- DRM_DEBUG_KMS("checking %p\n", state);
-
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
+ DRM_DEBUG_ATOMIC("checking %p\n", state);
- if (!plane)
- continue;
-
- ret = drm_atomic_plane_check(plane, state->plane_states[i]);
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ ret = drm_atomic_plane_check(plane, plane_state);
if (ret) {
- DRM_DEBUG_KMS("[PLANE:%d] atomic core check failed\n",
- plane->base.id);
+ DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n",
+ plane->base.id);
return ret;
}
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
-
- if (!crtc)
- continue;
-
- ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ ret = drm_atomic_crtc_check(crtc, crtc_state);
if (ret) {
- DRM_DEBUG_KMS("[CRTC:%d] atomic core check failed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n",
+ crtc->base.id);
return ret;
}
}
@@ -987,17 +997,11 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
ret = config->funcs->atomic_check(state->dev, state);
if (!state->allow_modeset) {
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- struct drm_crtc_state *crtc_state = state->crtc_states[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (crtc_state->mode_changed ||
crtc_state->active_changed) {
- DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n",
+ crtc->base.id);
return -EINVAL;
}
}
@@ -1032,7 +1036,7 @@ int drm_atomic_commit(struct drm_atomic_state *state)
if (ret)
return ret;
- DRM_DEBUG_KMS("commiting %p\n", state);
+ DRM_DEBUG_ATOMIC("commiting %p\n", state);
return config->funcs->atomic_commit(state->dev, state, false);
}
@@ -1063,7 +1067,7 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
if (ret)
return ret;
- DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
+ DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
return config->funcs->atomic_commit(state->dev, state, true);
}
@@ -1191,6 +1195,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
unsigned plane_mask = 0;
int ret = 0;
unsigned int i, j;
@@ -1294,15 +1300,9 @@ retry:
}
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- int ncrtcs = dev->mode_config.num_crtc;
-
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_state *crtc_state = state->crtc_states[i];
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_pending_vblank_event *e;
- if (!crtc_state)
- continue;
-
e = create_vblank_event(dev, file_priv, arg->user_data);
if (!e) {
ret = -ENOMEM;
@@ -1354,14 +1354,7 @@ fail:
goto backoff;
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- int ncrtcs = dev->mode_config.num_crtc;
-
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_state *crtc_state = state->crtc_states[i];
-
- if (!crtc_state)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
destroy_vblank_event(dev, file_priv, crtc_state->event);
crtc_state->event = NULL;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7e3a52b97c7d..1d2ca52530d5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -116,9 +116,9 @@ steal_encoder(struct drm_atomic_state *state,
*/
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
- encoder->base.id, encoder->name,
- encoder_crtc->base.id);
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
+ encoder->base.id, encoder->name,
+ encoder_crtc->base.id);
crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
if (IS_ERR(crtc_state))
@@ -130,9 +130,9 @@ steal_encoder(struct drm_atomic_state *state,
if (connector->state->best_encoder != encoder)
continue;
- DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
connector_state = drm_atomic_get_connector_state(state,
connector);
@@ -151,7 +151,7 @@ steal_encoder(struct drm_atomic_state *state,
static int
update_connector_routing(struct drm_atomic_state *state, int conn_idx)
{
- struct drm_connector_helper_funcs *funcs;
+ const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
struct drm_crtc *encoder_crtc;
struct drm_connector *connector;
@@ -165,9 +165,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
if (!connector)
return 0;
- DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
if (connector->state->crtc != connector_state->crtc) {
if (connector->state->crtc) {
@@ -186,7 +186,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
if (!connector_state->crtc) {
- DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n",
+ DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
@@ -199,19 +199,19 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
new_encoder = funcs->best_encoder(connector);
if (!new_encoder) {
- DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
return -EINVAL;
}
if (new_encoder == connector_state->best_encoder) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
- connector->base.id,
- connector->name,
- new_encoder->base.id,
- new_encoder->name,
- connector_state->crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ connector_state->crtc->base.id);
return 0;
}
@@ -222,9 +222,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
if (encoder_crtc) {
ret = steal_encoder(state, new_encoder, encoder_crtc);
if (ret) {
- DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
return ret;
}
}
@@ -235,12 +235,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
crtc_state = state->crtc_states[idx];
crtc_state->mode_changed = true;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
- connector->base.id,
- connector->name,
- new_encoder->base.id,
- new_encoder->name,
- connector_state->crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ connector_state->crtc->base.id);
return 0;
}
@@ -248,30 +248,24 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
static int
mode_fixup(struct drm_atomic_state *state)
{
- int ncrtcs = state->dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i;
bool ret;
- for (i = 0; i < ncrtcs; i++) {
- crtc_state = state->crtc_states[i];
-
- if (!crtc_state || !crtc_state->mode_changed)
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!crtc_state->mode_changed)
continue;
drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
}
- for (i = 0; i < state->num_connector; i++) {
- struct drm_encoder_helper_funcs *funcs;
+ for_each_connector_in_state(state, connector, conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
- conn_state = state->connector_states[i];
-
- if (!conn_state)
- continue;
-
WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc);
if (!conn_state->crtc || !conn_state->best_encoder)
@@ -292,7 +286,7 @@ mode_fixup(struct drm_atomic_state *state)
encoder->bridge, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
- DRM_DEBUG_KMS("Bridge fixup failed\n");
+ DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
return -EINVAL;
}
}
@@ -301,37 +295,33 @@ mode_fixup(struct drm_atomic_state *state)
ret = funcs->atomic_check(encoder, crtc_state,
conn_state);
if (ret) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] check failed\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
+ encoder->base.id, encoder->name);
return ret;
}
} else {
ret = funcs->mode_fixup(encoder, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
+ encoder->base.id, encoder->name);
return -EINVAL;
}
}
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
- crtc_state = state->crtc_states[i];
- crtc = state->crtcs[i];
-
- if (!crtc_state || !crtc_state->mode_changed)
+ if (!crtc_state->mode_changed)
continue;
funcs = crtc->helper_private;
ret = funcs->mode_fixup(crtc, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
- DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n",
+ crtc->base.id);
return -EINVAL;
}
}
@@ -346,7 +336,7 @@ needs_modeset(struct drm_crtc_state *state)
}
/**
- * drm_atomic_helper_check - validate state object for modeset changes
+ * drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
* @state: the driver state object
*
@@ -371,32 +361,27 @@ int
drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int ncrtcs = dev->mode_config.num_crtc;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
int i, ret;
- for (i = 0; i < ncrtcs; i++) {
- crtc = state->crtcs[i];
- crtc_state = state->crtc_states[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
- DRM_DEBUG_KMS("[CRTC:%d] mode changed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n",
+ crtc->base.id);
crtc_state->mode_changed = true;
}
if (crtc->state->enable != crtc_state->enable) {
- DRM_DEBUG_KMS("[CRTC:%d] enable changed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
+ crtc->base.id);
crtc_state->mode_changed = true;
}
}
- for (i = 0; i < state->num_connector; i++) {
+ for_each_connector_in_state(state, connector, connector_state, i) {
/*
* This only sets crtc->mode_changed for routing changes,
* drivers must set crtc->mode_changed themselves when connector
@@ -413,32 +398,26 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
* configuration. This must be done before calling mode_fixup in case a
* crtc only changed its mode but has the same set of connectors.
*/
- for (i = 0; i < ncrtcs; i++) {
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
int num_connectors;
- crtc = state->crtcs[i];
- crtc_state = state->crtc_states[i];
-
- if (!crtc)
- continue;
-
/*
* We must set ->active_changed after walking connectors for
* otherwise an update that only changes active would result in
* a full modeset because update_connector_routing force that.
*/
if (crtc->state->active != crtc_state->active) {
- DRM_DEBUG_KMS("[CRTC:%d] active changed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n",
+ crtc->base.id);
crtc_state->active_changed = true;
}
if (!needs_modeset(crtc_state))
continue;
- DRM_DEBUG_KMS("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
- crtc->base.id,
- crtc_state->enable ? 'y' : 'n',
+ DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
+ crtc->base.id,
+ crtc_state->enable ? 'y' : 'n',
crtc_state->active ? 'y' : 'n');
ret = drm_atomic_add_affected_connectors(state, crtc);
@@ -449,8 +428,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
crtc);
if (crtc_state->enable != !!num_connectors) {
- DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n",
+ crtc->base.id);
return -EINVAL;
}
@@ -461,7 +440,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
- * drm_atomic_helper_check - validate state object for modeset changes
+ * drm_atomic_helper_check_planes - validate state object for planes changes
* @dev: DRM device
* @state: the driver state object
*
@@ -476,17 +455,14 @@ int
drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int nplanes = dev->mode_config.num_total_plane;
- int ncrtcs = dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
int i, ret = 0;
- for (i = 0; i < nplanes; i++) {
- struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
-
- if (!plane)
- continue;
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs;
funcs = plane->helper_private;
@@ -497,18 +473,14 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(plane, plane_state);
if (ret) {
- DRM_DEBUG_KMS("[PLANE:%d] atomic driver check failed\n",
- plane->base.id);
+ DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n",
+ plane->base.id);
return ret;
}
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc = state->crtcs[i];
-
- if (!crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -517,8 +489,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(crtc, state->crtc_states[i]);
if (ret) {
- DRM_DEBUG_KMS("[CRTC:%d] atomic driver check failed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n",
+ crtc->base.id);
return ret;
}
}
@@ -567,27 +539,26 @@ EXPORT_SYMBOL(drm_atomic_helper_check);
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
{
- int ncrtcs = old_state->dev->mode_config.num_crtc;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int i;
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector_state *old_conn_state;
- struct drm_connector *connector;
- struct drm_encoder_helper_funcs *funcs;
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_crtc_state *old_crtc_state;
- old_conn_state = old_state->connector_states[i];
- connector = old_state->connectors[i];
-
/* Shut down everything that's in the changeset and currently
* still on. So need to check the old, saved state. */
- if (!old_conn_state || !old_conn_state->crtc)
+ if (!old_conn_state->crtc)
continue;
old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
- if (!old_crtc_state->active)
+ if (!old_crtc_state->active ||
+ !needs_modeset(old_conn_state->crtc->state))
continue;
encoder = old_conn_state->best_encoder;
@@ -600,12 +571,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = encoder->helper_private;
- DRM_DEBUG_KMS("disabling [ENCODER:%d:%s]\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
- * it away), so we won't call call disable hooks twice.
+ * it away), so we won't call disable hooks twice.
*/
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
@@ -622,16 +593,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
encoder->bridge->funcs->post_disable(encoder->bridge);
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state;
-
- crtc = old_state->crtcs[i];
- old_crtc_state = old_state->crtc_states[i];
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
/* Shut down everything that needs a full modeset. */
- if (!crtc || !needs_modeset(crtc->state))
+ if (!needs_modeset(crtc->state))
continue;
if (!old_crtc_state->active)
@@ -639,8 +605,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private;
- DRM_DEBUG_KMS("disabling [CRTC:%d]\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
+ crtc->base.id);
/* Right function depends upon target state. */
@@ -656,16 +622,15 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
static void
set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
{
- int ncrtcs = old_state->dev->mode_config.num_crtc;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int i;
/* clear out existing links */
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector *connector;
-
- connector = old_state->connectors[i];
-
- if (!connector || !connector->encoder)
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ if (!connector->encoder)
continue;
WARN_ON(!connector->encoder->crtc);
@@ -675,12 +640,8 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
}
/* set new links */
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector *connector;
-
- connector = old_state->connectors[i];
-
- if (!connector || !connector->state->crtc)
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ if (!connector->state->crtc)
continue;
if (WARN_ON(!connector->state->best_encoder))
@@ -691,14 +652,7 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
}
/* set legacy state in the crtc structure */
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc;
-
- crtc = old_state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
crtc->mode = crtc->state->mode;
crtc->enabled = crtc->state->enable;
crtc->x = crtc->primary->state->src_x >> 16;
@@ -709,38 +663,35 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
static void
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
{
- int ncrtcs = old_state->dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
int i;
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc;
-
- crtc = old_state->crtcs[i];
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
- if (!crtc || !crtc->state->mode_changed)
+ if (!crtc->state->mode_changed)
continue;
funcs = crtc->helper_private;
- if (crtc->state->enable) {
- DRM_DEBUG_KMS("modeset on [CRTC:%d]\n",
- crtc->base.id);
+ if (crtc->state->enable && funcs->mode_set_nofb) {
+ DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
+ crtc->base.id);
funcs->mode_set_nofb(crtc);
}
}
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector *connector;
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
struct drm_crtc_state *new_crtc_state;
- struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
- connector = old_state->connectors[i];
-
- if (!connector || !connector->state->best_encoder)
+ if (!connector->state->best_encoder)
continue;
encoder = connector->state->best_encoder;
@@ -752,14 +703,15 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!new_crtc_state->mode_changed)
continue;
- DRM_DEBUG_KMS("modeset on [ENCODER:%d:%s]\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
- * it away), so we won't call call mode_set hooks twice.
+ * it away), so we won't call mode_set hooks twice.
*/
- funcs->mode_set(encoder, mode, adjusted_mode);
+ if (funcs->mode_set)
+ funcs->mode_set(encoder, mode, adjusted_mode);
if (encoder->bridge && encoder->bridge->funcs->mode_set)
encoder->bridge->funcs->mode_set(encoder->bridge,
@@ -768,46 +720,56 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
}
/**
- * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates
+ * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
* @dev: DRM device
- * @state: atomic state
+ * @old_state: atomic state object with old state structures
*
- * This function commits the modeset changes that need to be committed before
- * updating planes. It shuts down all the outputs that need to be shut down and
+ * This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
+ *
+ * For compatability with legacy crtc helpers this should be called before
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
*/
-void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
- struct drm_atomic_state *state)
+void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
{
- disable_outputs(dev, state);
- set_routing_links(dev, state);
- crtc_set_mode(dev, state);
+ disable_outputs(dev, old_state);
+ set_routing_links(dev, old_state);
+ crtc_set_mode(dev, old_state);
}
-EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes);
+EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
/**
- * drm_atomic_helper_commit_post_planes - modeset commit after plane updates
+ * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
- * This function commits the modeset changes that need to be committed after
- * updating planes: It enables all the outputs with the new configuration which
- * had to be turned off for the update.
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatability with legacy crtc helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
*/
-void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
{
- int ncrtcs = old_state->dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
int i;
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc;
-
- crtc = old_state->crtcs[i];
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
/* Need to filter out CRTCs where only planes change. */
- if (!crtc || !needs_modeset(crtc->state))
+ if (!needs_modeset(crtc->state))
continue;
if (!crtc->state->active)
@@ -816,8 +778,8 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
funcs = crtc->helper_private;
if (crtc->state->enable) {
- DRM_DEBUG_KMS("enabling [CRTC:%d]\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
+ crtc->base.id);
if (funcs->enable)
funcs->enable(crtc);
@@ -826,28 +788,26 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
}
}
- for (i = 0; i < old_state->num_connector; i++) {
- struct drm_connector *connector;
- struct drm_encoder_helper_funcs *funcs;
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
- connector = old_state->connectors[i];
-
- if (!connector || !connector->state->best_encoder)
+ if (!connector->state->best_encoder)
continue;
- if (!connector->state->crtc->state->active)
+ if (!connector->state->crtc->state->active ||
+ !needs_modeset(connector->state->crtc->state))
continue;
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
- DRM_DEBUG_KMS("enabling [ENCODER:%d:%s]\n",
- encoder->base.id, encoder->name);
+ DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
- * it away), so we won't call call enable hooks twice.
+ * it away), so we won't call enable hooks twice.
*/
if (encoder->bridge)
encoder->bridge->funcs->pre_enable(encoder->bridge);
@@ -861,18 +821,17 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
encoder->bridge->funcs->enable(encoder->bridge);
}
}
-EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes);
+EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
static void wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int nplanes = dev->mode_config.num_total_plane;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
int i;
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
-
- if (!plane || !plane->state->fence)
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ if (!plane->state->fence)
continue;
WARN_ON(!plane->state->fb);
@@ -889,16 +848,9 @@ static bool framebuffer_changed(struct drm_device *dev,
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
- int nplanes = old_state->dev->mode_config.num_total_plane;
int i;
- for (i = 0; i < nplanes; i++) {
- plane = old_state->planes[i];
- old_plane_state = old_state->plane_states[i];
-
- if (!plane)
- continue;
-
+ for_each_plane_in_state(old_state, plane, old_plane_state, i) {
if (plane->state->crtc != crtc &&
old_plane_state->crtc != crtc)
continue;
@@ -927,16 +879,9 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- int ncrtcs = old_state->dev->mode_config.num_crtc;
int i, ret;
- for (i = 0; i < ncrtcs; i++) {
- crtc = old_state->crtcs[i];
- old_crtc_state = old_state->crtc_states[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
/* No one cares about the old state, so abuse it for tracking
* and store whether we hold a vblank reference (and should do a
* vblank wait) in the ->enable boolean. */
@@ -961,11 +906,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
}
- for (i = 0; i < ncrtcs; i++) {
- crtc = old_state->crtcs[i];
- old_crtc_state = old_state->crtc_states[i];
-
- if (!crtc || !old_crtc_state->enable)
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ if (!old_crtc_state->enable)
continue;
ret = wait_event_timeout(dev->vblank[i].queue,
@@ -1014,7 +956,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
/*
* Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one conditions: It must be guaranteed
+ * any modeset locks at all under one condition: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
* supports it, which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
@@ -1030,11 +972,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
wait_for_fences(dev, state);
- drm_atomic_helper_commit_pre_planes(dev, state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state);
- drm_atomic_helper_commit_post_planes(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
@@ -1085,9 +1027,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
*/
/**
- * drm_atomic_helper_prepare_planes - prepare plane resources after commit
+ * drm_atomic_helper_prepare_planes - prepare plane resources before commit
* @dev: DRM device
- * @state: atomic state object with old state structures
+ * @state: atomic state object with new state structures
*
* This function prepares plane state, specifically framebuffers, for the new
* configuration. If any failure is encountered this function will call
@@ -1103,8 +1045,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
int ret, i;
for (i = 0; i < nplanes; i++) {
- struct drm_plane_helper_funcs *funcs;
+ const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *plane_state = state->plane_states[i];
struct drm_framebuffer *fb;
if (!plane)
@@ -1112,10 +1055,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
funcs = plane->helper_private;
- fb = state->plane_states[i]->fb;
+ fb = plane_state->fb;
if (fb && funcs->prepare_fb) {
- ret = funcs->prepare_fb(plane, fb);
+ ret = funcs->prepare_fb(plane, fb, plane_state);
if (ret)
goto fail;
}
@@ -1125,8 +1068,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
fail:
for (i--; i >= 0; i--) {
- struct drm_plane_helper_funcs *funcs;
+ const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *plane_state = state->plane_states[i];
struct drm_framebuffer *fb;
if (!plane)
@@ -1137,7 +1081,7 @@ fail:
fb = state->plane_states[i]->fb;
if (fb && funcs->cleanup_fb)
- funcs->cleanup_fb(plane, fb);
+ funcs->cleanup_fb(plane, fb, plane_state);
}
@@ -1161,16 +1105,14 @@ EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
void drm_atomic_helper_commit_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
- int nplanes = dev->mode_config.num_total_plane;
- int ncrtcs = dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
int i;
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc = old_state->crtcs[i];
-
- if (!crtc)
- continue;
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -1180,13 +1122,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_begin(crtc);
}
- for (i = 0; i < nplanes; i++) {
- struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = old_state->planes[i];
- struct drm_plane_state *old_plane_state;
-
- if (!plane)
- continue;
+ for_each_plane_in_state(old_state, plane, old_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs;
funcs = plane->helper_private;
@@ -1205,12 +1142,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_update(plane, old_plane_state);
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc_helper_funcs *funcs;
- struct drm_crtc *crtc = old_state->crtcs[i];
-
- if (!crtc)
- continue;
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -1237,23 +1170,20 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
- int nplanes = dev->mode_config.num_total_plane;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
int i;
- for (i = 0; i < nplanes; i++) {
- struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = old_state->planes[i];
+ for_each_plane_in_state(old_state, plane, plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs;
struct drm_framebuffer *old_fb;
- if (!plane)
- continue;
-
funcs = plane->helper_private;
- old_fb = old_state->plane_states[i]->fb;
+ old_fb = plane_state->fb;
if (old_fb && funcs->cleanup_fb)
- funcs->cleanup_fb(plane, old_fb);
+ funcs->cleanup_fb(plane, old_fb, plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
@@ -1496,8 +1426,10 @@ static int update_output_state(struct drm_atomic_state *state,
struct drm_mode_set *set)
{
struct drm_device *dev = set->crtc->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
struct drm_connector_state *conn_state;
- int ncrtcs = state->dev->mode_config.num_crtc;
int ret, i, j;
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
@@ -1513,27 +1445,14 @@ static int update_output_state(struct drm_atomic_state *state,
return PTR_ERR(conn_state);
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
return ret;
}
/* Then recompute connector->crtc links and crtc enabling state. */
- for (i = 0; i < state->num_connector; i++) {
- struct drm_connector *connector;
-
- connector = state->connectors[i];
- conn_state = state->connector_states[i];
-
- if (!connector)
- continue;
-
+ for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == set->crtc) {
ret = drm_atomic_set_crtc_for_connector(conn_state,
NULL);
@@ -1552,13 +1471,7 @@ static int update_output_state(struct drm_atomic_state *state,
}
}
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- struct drm_crtc_state *crtc_state = state->crtc_states[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
/* Don't update ->enable for the CRTC in the set_config request,
* since a mismatch would indicate a bug in the upper layers.
* The actual modeset code later on will catch any
@@ -1678,12 +1591,13 @@ backoff:
EXPORT_SYMBOL(drm_atomic_helper_set_config);
/**
- * drm_atomic_helper_crtc_set_property - helper for crtc prorties
+ * drm_atomic_helper_crtc_set_property - helper for crtc properties
* @crtc: DRM crtc
* @property: DRM property
* @val: value of property
*
- * Provides a default plane disablle handler using the atomic driver interface.
+ * Provides a default crtc set_property handler using the atomic driver
+ * interface.
*
* RETURNS:
* Zero on success, error code on failure
@@ -1737,12 +1651,13 @@ backoff:
EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
/**
- * drm_atomic_helper_plane_set_property - helper for plane prorties
+ * drm_atomic_helper_plane_set_property - helper for plane properties
* @plane: DRM plane
* @property: DRM property
* @val: value of property
*
- * Provides a default plane disable handler using the atomic driver interface.
+ * Provides a default plane set_property handler using the atomic driver
+ * interface.
*
* RETURNS:
* Zero on success, error code on failure
@@ -1796,12 +1711,13 @@ backoff:
EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
/**
- * drm_atomic_helper_connector_set_property - helper for connector prorties
+ * drm_atomic_helper_connector_set_property - helper for connector properties
* @connector: DRM connector
* @property: DRM property
* @val: value of property
*
- * Provides a default plane disablle handler using the atomic driver interface.
+ * Provides a default connector set_property handler using the atomic driver
+ * interface.
*
* RETURNS:
* Zero on success, error code on failure
@@ -1984,10 +1900,10 @@ retry:
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
list_for_each_entry(tmp_connector, &config->connector_list, head) {
- if (connector->state->crtc != crtc)
+ if (tmp_connector->state->crtc != crtc)
continue;
- if (connector->dpms == DRM_MODE_DPMS_ON) {
+ if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
active = true;
break;
}
@@ -2050,6 +1966,26 @@ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
/**
+ * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
+ * @crtc: CRTC object
+ * @state: atomic CRTC state
+ *
+ * Copies atomic state from a CRTC's current state and resets inferred values.
+ * This is useful for drivers that subclass the CRTC state.
+ */
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ memcpy(state, crtc->state, sizeof(*state));
+
+ state->mode_changed = false;
+ state->active_changed = false;
+ state->planes_changed = false;
+ state->event = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
+
+/**
* drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
* @crtc: drm CRTC
*
@@ -2064,20 +2000,35 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
if (WARN_ON(!crtc->state))
return NULL;
- state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
-
- if (state) {
- state->mode_changed = false;
- state->active_changed = false;
- state->planes_changed = false;
- state->event = NULL;
- }
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_crtc_duplicate_state(crtc, state);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
/**
+ * __drm_atomic_helper_crtc_destroy_state - release CRTC state
+ * @crtc: CRTC object
+ * @state: CRTC state object to release
+ *
+ * Releases all resources stored in the CRTC state without actually freeing
+ * the memory of the CRTC state. This is useful for drivers that subclass the
+ * CRTC state.
+ */
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ /*
+ * This is currently a placeholder so that drivers that subclass the
+ * state will automatically do the right thing if code is ever added
+ * to this function.
+ */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
+
+/**
* drm_atomic_helper_crtc_destroy_state - default state destroy hook
* @crtc: drm CRTC
* @state: CRTC state object to release
@@ -2088,6 +2039,7 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ __drm_atomic_helper_crtc_destroy_state(crtc, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
@@ -2113,6 +2065,24 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
/**
+ * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
+ * @plane: plane object
+ * @state: atomic plane state
+ *
+ * Copies atomic state from a plane's current state. This is useful for
+ * drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ memcpy(state, plane->state, sizeof(*state));
+
+ if (state->fb)
+ drm_framebuffer_reference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
+
+/**
* drm_atomic_helper_plane_duplicate_state - default state duplicate hook
* @plane: drm plane
*
@@ -2127,16 +2097,32 @@ drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
if (WARN_ON(!plane->state))
return NULL;
- state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
-
- if (state && state->fb)
- drm_framebuffer_reference(state->fb);
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, state);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
/**
+ * __drm_atomic_helper_plane_destroy_state - release plane state
+ * @plane: plane object
+ * @state: plane state object to release
+ *
+ * Releases all resources stored in the plane state without actually freeing
+ * the memory of the plane state. This is useful for drivers that subclass the
+ * plane state.
+ */
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
+
+/**
* drm_atomic_helper_plane_destroy_state - default state destroy hook
* @plane: drm plane
* @state: plane state object to release
@@ -2147,9 +2133,7 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- if (state->fb)
- drm_framebuffer_unreference(state->fb);
-
+ __drm_atomic_helper_plane_destroy_state(plane, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
@@ -2173,6 +2157,22 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
/**
+ * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
+ * @connector: connector object
+ * @state: atomic connector state
+ *
+ * Copies atomic state from a connector's current state. This is useful for
+ * drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ memcpy(state, connector->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
+
+/**
* drm_atomic_helper_connector_duplicate_state - default state duplicate hook
* @connector: drm connector
*
@@ -2182,14 +2182,41 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
{
+ struct drm_connector_state *state;
+
if (WARN_ON(!connector->state))
return NULL;
- return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_connector_duplicate_state(connector, state);
+
+ return state;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
/**
+ * __drm_atomic_helper_connector_destroy_state - release connector state
+ * @connector: connector object
+ * @state: connector state object to release
+ *
+ * Releases all resources stored in the connector state without actually
+ * freeing the memory of the connector state. This is useful for drivers that
+ * subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ /*
+ * This is currently a placeholder so that drivers that subclass the
+ * state will automatically do the right thing if code is ever added
+ * to this function.
+ */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
+
+/**
* drm_atomic_helper_connector_destroy_state - default state destroy hook
* @connector: drm connector
* @state: connector state object to release
@@ -2200,6 +2227,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
+ __drm_atomic_helper_connector_destroy_state(connector, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index d1187e571c6d..eaa5790c2a6f 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -49,7 +49,7 @@ void drm_bridge_remove(struct drm_bridge *bridge)
}
EXPORT_SYMBOL(drm_bridge_remove);
-extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
+int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
{
if (!dev || !bridge)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b6f076b213bc..3007b44e6bf4 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -660,6 +660,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_mode_config *config = &dev->mode_config;
int ret;
+ WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY);
+ WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR);
+
crtc->dev = dev;
crtc->funcs = funcs;
crtc->invert_dimensions = false;
@@ -1999,21 +2002,32 @@ int drm_mode_getcrtc(struct drm_device *dev,
return -ENOENT;
drm_modeset_lock_crtc(crtc, crtc->primary);
- crtc_resp->x = crtc->x;
- crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
if (crtc->primary->fb)
crtc_resp->fb_id = crtc->primary->fb->base.id;
else
crtc_resp->fb_id = 0;
- if (crtc->enabled) {
-
- drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
- crtc_resp->mode_valid = 1;
+ if (crtc->state) {
+ crtc_resp->x = crtc->primary->state->src_x >> 16;
+ crtc_resp->y = crtc->primary->state->src_y >> 16;
+ if (crtc->state->enable) {
+ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
+ crtc_resp->mode_valid = 1;
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
} else {
- crtc_resp->mode_valid = 0;
+ crtc_resp->x = crtc->x;
+ crtc_resp->y = crtc->y;
+ if (crtc->enabled) {
+ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+ crtc_resp->mode_valid = 1;
+
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
}
drm_modeset_unlock_crtc(crtc);
@@ -2266,8 +2280,6 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
crtc = drm_encoder_get_crtc(encoder);
if (crtc)
enc_resp->crtc_id = crtc->base.id;
- else if (encoder->crtc)
- enc_resp->crtc_id = encoder->crtc->base.id;
else
enc_resp->crtc_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -2402,6 +2414,27 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
return 0;
}
+/**
+ * drm_plane_check_pixel_format - Check if the plane supports the pixel format
+ * @plane: plane to check for format support
+ * @format: the pixel format
+ *
+ * Returns:
+ * Zero of @plane has @format in its list of supported pixel formats, -EINVAL
+ * otherwise.
+ */
+int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
+{
+ unsigned int i;
+
+ for (i = 0; i < plane->format_count; i++) {
+ if (format == plane->format_types[i])
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
/*
* setplane_internal - setplane handler for internal callers
*
@@ -2422,7 +2455,6 @@ static int __setplane_internal(struct drm_plane *plane,
{
int ret = 0;
unsigned int fb_width, fb_height;
- unsigned int i;
/* No fb means shut it down */
if (!fb) {
@@ -2445,16 +2477,24 @@ static int __setplane_internal(struct drm_plane *plane,
}
/* Check whether this plane supports the fb pixel format. */
- for (i = 0; i < plane->format_count; i++)
- if (fb->pixel_format == plane->format_types[i])
- break;
- if (i == plane->format_count) {
+ ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
+ if (ret) {
DRM_DEBUG_KMS("Invalid pixel format %s\n",
drm_get_format_name(fb->pixel_format));
- ret = -EINVAL;
goto out;
}
+ /* Give drivers some help against integer overflows */
+ if (crtc_w > INT_MAX ||
+ crtc_x > INT_MAX - (int32_t) crtc_w ||
+ crtc_h > INT_MAX ||
+ crtc_y > INT_MAX - (int32_t) crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ crtc_w, crtc_h, crtc_x, crtc_y);
+ return -ERANGE;
+ }
+
+
fb_width = fb->width << 16;
fb_height = fb->height << 16;
@@ -2539,17 +2579,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- /* Give drivers some help against integer overflows */
- if (plane_req->crtc_w > INT_MAX ||
- plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
- plane_req->crtc_h > INT_MAX ||
- plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->crtc_x, plane_req->crtc_y);
- return -ERANGE;
- }
-
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
@@ -2775,6 +2804,23 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ /*
+ * Check whether the primary plane supports the fb pixel format.
+ * Drivers not implementing the universal planes API use a
+ * default formats list provided by the DRM core which doesn't
+ * match real hardware capabilities. Skip the check in that
+ * case.
+ */
+ if (!crtc->primary->format_default) {
+ ret = drm_plane_check_pixel_format(crtc->primary,
+ fb->pixel_format);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format));
+ goto out;
+ }
+ }
+
ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
mode, fb);
if (ret)
@@ -3252,6 +3298,12 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
+
+ if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
+ DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
+ return -EINVAL;
+ }
}
return 0;
@@ -3266,7 +3318,7 @@ internal_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret;
- if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
@@ -3282,6 +3334,12 @@ internal_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
+ if (r->flags & DRM_MODE_FB_MODIFIERS &&
+ !dev->mode_config.allow_fb_modifiers) {
+ DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+ return ERR_PTR(-EINVAL);
+ }
+
ret = framebuffer_check(r);
if (ret)
return ERR_PTR(ret);
@@ -5543,6 +5601,7 @@ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
return NULL;
}
+EXPORT_SYMBOL(drm_mode_get_tile_group);
/**
* drm_mode_create_tile_group - create a tile group from a displayid description
@@ -5581,3 +5640,4 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
return tg;
}
+EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index b1979e7bdc88..ab00286aec93 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -161,7 +161,7 @@ EXPORT_SYMBOL(drm_helper_crtc_in_use);
static void
drm_encoder_disable(struct drm_encoder *encoder)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
@@ -191,7 +191,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
if (crtc_funcs->disable)
@@ -229,7 +229,7 @@ EXPORT_SYMBOL(drm_helper_disable_unused_functions);
static void
drm_crtc_prepare_encoders(struct drm_device *dev)
{
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -270,9 +270,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_display_mode *adjusted_mode, saved_mode;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
bool saved_enabled;
struct drm_encoder *encoder;
@@ -292,6 +292,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
saved_mode = crtc->mode;
+ saved_hwmode = crtc->hwmode;
saved_x = crtc->x;
saved_y = crtc->y;
@@ -334,6 +335,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ crtc->hwmode = *adjusted_mode;
+
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -396,9 +399,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
encoder->bridge->funcs->enable(encoder->bridge);
}
- /* Store real post-adjustment hardware mode. */
- crtc->hwmode = *adjusted_mode;
-
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
@@ -411,6 +411,7 @@ done:
if (!ret) {
crtc->enabled = saved_enabled;
crtc->mode = saved_mode;
+ crtc->hwmode = saved_hwmode;
crtc->x = saved_x;
crtc->y = saved_y;
}
@@ -472,7 +473,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *save_connectors, *connector;
int count = 0, ro, fail = 0;
- struct drm_crtc_helper_funcs *crtc_funcs;
+ const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
int ret;
int i;
@@ -572,7 +573,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
/* a) traverse passed in connector list and get encoders for them */
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_connector_helper_funcs *connector_funcs =
+ const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
@@ -732,7 +733,7 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_bridge *bridge = encoder->bridge;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
if (bridge) {
if (mode == DRM_MODE_DPMS_ON)
@@ -794,7 +795,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
@@ -808,7 +809,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
if (encoder)
drm_helper_encoder_dpms(encoder, encoder_dpms);
if (crtc) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
@@ -837,6 +838,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
+ fb->modifier[i] = mode_cmd->modifier[i];
}
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
@@ -869,7 +871,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
- struct drm_crtc_helper_funcs *crtc_funcs;
+ const struct drm_crtc_helper_funcs *crtc_funcs;
int encoder_dpms;
bool ret;
@@ -934,7 +936,7 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
struct drm_framebuffer *old_fb)
{
struct drm_crtc_state *crtc_state;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int ret;
if (crtc->funcs->atomic_duplicate_state)
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index f1283878ff6d..71dcbc64ae98 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -427,11 +427,13 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
* retrying the transaction as appropriate. It is assumed that the
* aux->transfer function does not modify anything in the msg other than the
* reply field.
+ *
+ * Returns bytes transferred on success, or a negative error code on failure.
*/
static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
unsigned int retry;
- int err;
+ int ret;
/*
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
@@ -440,14 +442,14 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
*/
for (retry = 0; retry < 7; retry++) {
mutex_lock(&aux->hw_mutex);
- err = aux->transfer(aux, msg);
+ ret = aux->transfer(aux, msg);
mutex_unlock(&aux->hw_mutex);
- if (err < 0) {
- if (err == -EBUSY)
+ if (ret < 0) {
+ if (ret == -EBUSY)
continue;
- DRM_DEBUG_KMS("transaction failed: %d\n", err);
- return err;
+ DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+ return ret;
}
@@ -460,7 +462,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
break;
case DP_AUX_NATIVE_REPLY_NACK:
- DRM_DEBUG_KMS("native nack\n");
+ DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size);
return -EREMOTEIO;
case DP_AUX_NATIVE_REPLY_DEFER:
@@ -488,12 +490,10 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
* Both native ACK and I2C ACK replies received. We
* can assume the transfer was successful.
*/
- if (err < msg->size)
- return -EPROTO;
- return 0;
+ return ret;
case DP_AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("I2C nack\n");
+ DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size);
aux->i2c_nack_count++;
return -EREMOTEIO;
@@ -513,14 +513,55 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return -EREMOTEIO;
}
+/*
+ * Keep retrying drm_dp_i2c_do_msg until all data has been transferred.
+ *
+ * Returns an error code on failure, or a recommended transfer size on success.
+ */
+static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *orig_msg)
+{
+ int err, ret = orig_msg->size;
+ struct drm_dp_aux_msg msg = *orig_msg;
+
+ while (msg.size > 0) {
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ if (err <= 0)
+ return err == 0 ? -EPROTO : err;
+
+ if (err < msg.size && err < ret) {
+ DRM_DEBUG_KMS("Partial I2C reply: requested %zu bytes got %d bytes\n",
+ msg.size, err);
+ ret = err;
+ }
+
+ msg.size -= err;
+ msg.buffer += err;
+ }
+
+ return ret;
+}
+
+/*
+ * Bizlink designed DP->DVI-D Dual Link adapters require the I2C over AUX
+ * packets to be as large as possible. If not, the I2C transactions never
+ * succeed. Hence the default is maximum.
+ */
+static int dp_aux_i2c_transfer_size __read_mostly = DP_AUX_MAX_PAYLOAD_BYTES;
+module_param_unsafe(dp_aux_i2c_transfer_size, int, 0644);
+MODULE_PARM_DESC(dp_aux_i2c_transfer_size,
+ "Number of bytes to transfer in a single I2C over DP AUX CH message, (1-16, default 16)");
+
static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
int num)
{
struct drm_dp_aux *aux = adapter->algo_data;
unsigned int i, j;
+ unsigned transfer_size;
struct drm_dp_aux_msg msg;
int err = 0;
+ dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
+
memset(&msg, 0, sizeof(msg));
for (i = 0; i < num; i++) {
@@ -538,20 +579,19 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
err = drm_dp_i2c_do_msg(aux, &msg);
if (err < 0)
break;
- /*
- * Many hardware implementations support FIFOs larger than a
- * single byte, but it has been empirically determined that
- * transferring data in larger chunks can actually lead to
- * decreased performance. Therefore each message is simply
- * transferred byte-by-byte.
+ /* We want each transaction to be as large as possible, but
+ * we'll go to smaller sizes if the hardware gives us a
+ * short reply.
*/
- for (j = 0; j < msgs[i].len; j++) {
+ transfer_size = dp_aux_i2c_transfer_size;
+ for (j = 0; j < msgs[i].len; j += msg.size) {
msg.buffer = msgs[i].buf + j;
- msg.size = 1;
+ msg.size = min(transfer_size, msgs[i].len - j);
- err = drm_dp_i2c_do_msg(aux, &msg);
+ err = drm_dp_i2c_drain_msg(aux, &msg);
if (err < 0)
break;
+ transfer_size = err;
}
if (err < 0)
break;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 379ab4555756..132581ca4ad8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2324,6 +2324,19 @@ out:
}
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ int slots = 0;
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return slots;
+
+ slots = port->vcpi.num_slots;
+ drm_dp_put_port(port);
+ return slots;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
+
/**
* drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
* @mgr: manager for this port
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index d51213464672..48f7359e2a6b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -70,7 +70,7 @@ void drm_err(const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
+ printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
__builtin_return_address(0), &vaf);
va_end(args);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index cc0ae047ed3b..5c1aca443e54 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -304,7 +304,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
}
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1e6a0c760c5d..cac422916c7a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -238,7 +238,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
- struct drm_crtc_helper_funcs *funcs;
+ const struct drm_crtc_helper_funcs *funcs;
int i;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
@@ -285,7 +285,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
struct drm_crtc *crtc;
- struct drm_crtc_helper_funcs *funcs;
+ const struct drm_crtc_helper_funcs *funcs;
struct drm_framebuffer *fb;
int i;
@@ -765,7 +765,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
- struct drm_crtc_helper_funcs *crtc_funcs;
+ const struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, j, rc = 0;
@@ -1034,23 +1034,45 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
- int x, y;
+ struct drm_mode_set *mode_set;
+ int x, y, j;
+ /* in case of tile group, are we the last tile vert or horiz?
+ * If no tile group you are always the last one both vertically
+ * and horizontally
+ */
+ bool lastv = true, lasth = true;
+
desired_mode = fb_helper->crtc_info[i].desired_mode;
+ mode_set = &fb_helper->crtc_info[i].mode_set;
+
+ if (!desired_mode)
+ continue;
+
+ crtc_count++;
+
x = fb_helper->crtc_info[i].x;
y = fb_helper->crtc_info[i].y;
- if (desired_mode) {
- if (gamma_size == 0)
- gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
- if (desired_mode->hdisplay + x < sizes.fb_width)
- sizes.fb_width = desired_mode->hdisplay + x;
- if (desired_mode->vdisplay + y < sizes.fb_height)
- sizes.fb_height = desired_mode->vdisplay + y;
- if (desired_mode->hdisplay + x > sizes.surface_width)
- sizes.surface_width = desired_mode->hdisplay + x;
- if (desired_mode->vdisplay + y > sizes.surface_height)
- sizes.surface_height = desired_mode->vdisplay + y;
- crtc_count++;
+
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+
+ sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
+ sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
+
+ for (j = 0; j < mode_set->num_connectors; j++) {
+ struct drm_connector *connector = mode_set->connectors[j];
+ if (connector->has_tile) {
+ lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
+ lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
+ /* cloning to multiple tiles is just crazy-talk, so: */
+ break;
+ }
}
+
+ if (lasth)
+ sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width);
+ if (lastv)
+ sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
}
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
@@ -1261,12 +1283,12 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
int width, int height)
{
struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *mode;
bool prefer_non_interlace;
cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
if (cmdline_mode->specified == false)
- return mode;
+ return NULL;
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
@@ -1275,7 +1297,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
goto create_mode;
prefer_non_interlace = !cmdline_mode->interlace;
- again:
+again:
list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
@@ -1529,7 +1551,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
int c, o;
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
+ const struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index f1b32f91d941..cbb4fc0fc969 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -37,6 +37,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include "drm_internal.h"
#include "drm_legacy.h"
/**
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 2f4c4343dfa3..aa8bbb460c57 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return 0;
}
-drm_ioctl_compat_t *drm_compat_ioctls[] = {
+static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 3785d66721f2..266dcd6cdf3b 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -321,6 +321,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
else
req->value = 64;
break;
+ case DRM_CAP_ADDFB2_MODIFIERS:
+ req->value = dev->mode_config.allow_fb_modifiers;
+ break;
default:
return -EINVAL;
}
@@ -521,8 +524,13 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = { \
+ .cmd = ioctl, \
+ .func = _func, \
+ .flags = _flags, \
+ .name = #ioctl \
+ }
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -660,39 +668,29 @@ long drm_ioctl(struct file *filp,
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
- unsigned int usize, asize;
+ unsigned int usize, asize, drv_size;
dev = file_priv->minor->dev;
if (drm_device_is_unplugged(dev))
return -ENODEV;
- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
- goto err_i1;
- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- u32 drv_size;
+ if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) {
+ /* driver ioctl */
+ if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+ goto err_i1;
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- drv_size = _IOC_SIZE(ioctl->cmd_drv);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
- cmd = ioctl->cmd_drv;
- }
- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
- u32 drv_size;
-
+ } else {
+ /* core ioctl */
+ if (nr >= DRM_CORE_IOCTL_COUNT)
+ goto err_i1;
ioctl = &drm_ioctls[nr];
+ }
- drv_size = _IOC_SIZE(ioctl->cmd);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
-
- cmd = ioctl->cmd;
- } else
- goto err_i1;
+ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = _IOC_SIZE(cmd);
+ asize = max(usize, drv_size);
+ cmd = ioctl->cmd;
DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
task_pid_nr(current),
@@ -773,12 +771,13 @@ EXPORT_SYMBOL(drm_ioctl);
*/
bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{
- if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
- (nr < DRM_COMMAND_BASE)) {
- *flags = drm_ioctls[nr].flags;
- return true;
- }
+ if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END)
+ return false;
+
+ if (nr >= DRM_CORE_IOCTL_COUNT)
+ return false;
- return false;
+ *flags = drm_ioctls[nr].flags;
+ return true;
}
EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 10574a0c3a55..af9662e58272 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
/* Reinitialize corresponding vblank timestamp if high-precision query
* available. Skip this step if query unsupported or failed. Will
- * reinitialize delayed at next vblank interrupt in that case.
+ * reinitialize delayed at next vblank interrupt in that case and
+ * assign 0 for now, to mark the vblanktimestamp as invalid.
*/
- if (rc) {
- tslot = atomic_read(&vblank->count) + diff;
- vblanktimestamp(dev, crtc, tslot) = t_vblank;
- }
+ tslot = atomic_read(&vblank->count) + diff;
+ vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0};
smp_mb__before_atomic();
atomic_add(diff, &vblank->count);
@@ -276,7 +275,6 @@ static void vblank_disable_fn(unsigned long arg)
void drm_vblank_cleanup(struct drm_device *dev)
{
int crtc;
- unsigned long irqflags;
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
@@ -285,11 +283,10 @@ void drm_vblank_cleanup(struct drm_device *dev)
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
- del_timer_sync(&vblank->disable_timer);
+ WARN_ON(vblank->enabled &&
+ drm_core_check_feature(dev, DRIVER_MODESET));
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- vblank_disable_and_save(dev, crtc);
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ del_timer_sync(&vblank->disable_timer);
}
kfree(dev->vblank);
@@ -475,17 +472,23 @@ int drm_irq_uninstall(struct drm_device *dev)
dev->irq_enabled = false;
/*
- * Wake up any waiters so they don't hang.
+ * Wake up any waiters so they don't hang. This is just to paper over
+ * isssues for UMS drivers which aren't in full control of their
+ * vblank/irq handling. KMS drivers must ensure that vblanks are all
+ * disabled when uninstalling the irq handler.
*/
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
+ if (!vblank->enabled)
+ continue;
+
+ WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
+
+ vblank_disable_and_save(dev, i);
wake_up(&vblank->queue);
- vblank->enabled = false;
- vblank->last =
- dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1052,7 +1055,7 @@ EXPORT_SYMBOL(drm_vblank_get);
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
- * This is the native kms version of drm_vblank_off().
+ * This is the native kms version of drm_vblank_get().
*
* Returns:
* Zero on success, nonzero on failure.
@@ -1233,6 +1236,38 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
+ * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
+ * @crtc: CRTC in question
+ *
+ * Drivers can use this function to reset the vblank state to off at load time.
+ * Drivers should use this together with the drm_crtc_vblank_off() and
+ * drm_crtc_vblank_on() functions. The difference compared to
+ * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
+ * and hence doesn't need to call any driver hooks.
+ */
+void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc)
+{
+ struct drm_device *dev = drm_crtc->dev;
+ unsigned long irqflags;
+ int crtc = drm_crtc_index(drm_crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /*
+ * Prevent subsequent drm_vblank_get() from enabling the vblank
+ * interrupt by bumping the refcount.
+ */
+ if (!vblank->inmodeset) {
+ atomic_inc(&vblank->refcount);
+ vblank->inmodeset = 1;
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ WARN_ON(!list_empty(&dev->vblank_event_list));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_reset);
+
+/**
* drm_vblank_on - enable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
@@ -1653,7 +1688,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
struct timeval tvblank;
unsigned long irqflags;
- if (!dev->num_crtcs)
+ if (WARN_ON_ONCE(!dev->num_crtcs))
return false;
if (WARN_ON(crtc >= dev->num_crtcs))
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 487d0e35c134..213b11ea69b5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -278,7 +278,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
hblank = drm_mode->hdisplay * hblank_percentage /
(100 * HV_FACTOR - hblank_percentage);
hblank -= hblank % (2 * CVT_H_GRANULARITY);
- /* 14. find the total pixes per line */
+ /* 14. find the total pixels per line */
drm_mode->htotal = drm_mode->hdisplay + hblank;
drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
drm_mode->hsync_start = drm_mode->hsync_end -
@@ -903,6 +903,12 @@ EXPORT_SYMBOL(drm_mode_duplicate);
*/
bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
+ if (!mode1 && !mode2)
+ return true;
+
+ if (!mode1 || !mode2)
+ return false;
+
/* do clock check convert to PICOS so fb modes get matched
* the same */
if (mode1->clock && mode2->clock) {
@@ -1148,7 +1154,7 @@ EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
- * @merge_type_bits: whether to merge or overright type bits.
+ * @merge_type_bits: whether to merge or overwrite type bits
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
@@ -1209,7 +1215,7 @@ EXPORT_SYMBOL(drm_mode_connector_list_update);
* <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
*
* The intermediate drm_cmdline_mode structure is required to store additional
- * options from the command line modline like the force-enabel/disable flag.
+ * options from the command line modline like the force-enable/disable flag.
*
* Returns:
* True if a valid modeline has been parsed, false otherwise.
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 16150a00c237..aaa130736bf8 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -43,14 +43,10 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
{
- struct device_node *remote_port, *ep = NULL;
+ struct device_node *remote_port, *ep;
uint32_t possible_crtcs = 0;
- do {
- ep = of_graph_get_next_endpoint(port, ep);
- if (!ep)
- break;
-
+ for_each_endpoint_of_node(port, ep) {
remote_port = of_graph_get_remote_port(ep);
if (!remote_port) {
of_node_put(ep);
@@ -60,7 +56,7 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
of_node_put(remote_port);
- } while (1);
+ }
return possible_crtcs;
}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index fd29f03645b8..1b1bd42b0368 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -27,6 +27,7 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include "drm_internal.h"
#include "drm_legacy.h"
/**
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 5ba5792bfdba..40c1db9ad7c3 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -344,20 +344,7 @@ const struct drm_plane_funcs drm_primary_helper_funcs = {
};
EXPORT_SYMBOL(drm_primary_helper_funcs);
-/**
- * drm_primary_helper_create_plane() - Create a generic primary plane
- * @dev: drm device
- * @formats: pixel formats supported, or NULL for a default safe list
- * @num_formats: size of @formats; ignored if @formats is NULL
- *
- * Allocates and initializes a primary plane that can be used with the primary
- * plane helpers. Drivers that wish to use driver-specific plane structures or
- * provide custom handler functions may perform their own allocation and
- * initialization rather than calling this function.
- */
-struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
- const uint32_t *formats,
- int num_formats)
+static struct drm_plane *create_primary_plane(struct drm_device *dev)
{
struct drm_plane *primary;
int ret;
@@ -368,15 +355,17 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
return NULL;
}
- if (formats == NULL) {
- formats = safe_modeset_formats;
- num_formats = ARRAY_SIZE(safe_modeset_formats);
- }
+ /*
+ * Remove the format_default field from drm_plane when dropping
+ * this helper.
+ */
+ primary->format_default = true;
/* possible_crtc's will be filled in later by crtc_init */
ret = drm_universal_plane_init(dev, primary, 0,
&drm_primary_helper_funcs,
- formats, num_formats,
+ safe_modeset_formats,
+ ARRAY_SIZE(safe_modeset_formats),
DRM_PLANE_TYPE_PRIMARY);
if (ret) {
kfree(primary);
@@ -385,7 +374,6 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
return primary;
}
-EXPORT_SYMBOL(drm_primary_helper_create_plane);
/**
* drm_crtc_init - Legacy CRTC initialization function
@@ -404,7 +392,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
{
struct drm_plane *primary;
- primary = drm_primary_helper_create_plane(dev, NULL, 0);
+ primary = create_primary_plane(dev);
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
}
EXPORT_SYMBOL(drm_crtc_init);
@@ -413,9 +401,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
struct drm_plane_state *plane_state,
struct drm_framebuffer *old_fb)
{
- struct drm_plane_helper_funcs *plane_funcs;
+ const struct drm_plane_helper_funcs *plane_funcs;
struct drm_crtc *crtc[2];
- struct drm_crtc_helper_funcs *crtc_funcs[2];
+ const struct drm_crtc_helper_funcs *crtc_funcs[2];
int i, ret = 0;
plane_funcs = plane->helper_private;
@@ -437,7 +425,8 @@ int drm_plane_helper_commit(struct drm_plane *plane,
if (plane_funcs->prepare_fb && plane_state->fb &&
plane_state->fb != old_fb) {
- ret = plane_funcs->prepare_fb(plane, plane_state->fb);
+ ret = plane_funcs->prepare_fb(plane, plane_state->fb,
+ plane_state);
if (ret)
goto out;
}
@@ -487,7 +476,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
}
if (plane_funcs->cleanup_fb && old_fb)
- plane_funcs->cleanup_fb(plane, old_fb);
+ plane_funcs->cleanup_fb(plane, old_fb, plane_state);
out:
if (plane_state) {
if (plane->funcs->atomic_destroy_state)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7482b06cd08f..7fec191b45f7 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -339,13 +339,17 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- struct reservation_object *robj = NULL;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &drm_gem_prime_dmabuf_ops;
+ exp_info.size = obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
if (dev->driver->gem_prime_res_obj)
- robj = dev->driver->gem_prime_res_obj(obj);
+ exp_info.resv = dev->driver->gem_prime_res_obj(obj);
- return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
- flags, robj);
+ return dma_buf_export(&exp_info);
}
EXPORT_SYMBOL(drm_gem_prime_export);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 3fee587bc284..63503879a676 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -98,7 +98,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- struct drm_connector_helper_funcs *connector_funcs =
+ const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count = 0;
int mode_flags = 0;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 5c99d3773212..ffc305fc2076 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -166,23 +166,68 @@ void drm_sysfs_destroy(void)
/*
* Connector properties
*/
-static ssize_t status_show(struct device *device,
+static ssize_t status_store(struct device *device,
struct device_attribute *attr,
- char *buf)
+ const char *buf, size_t count)
{
struct drm_connector *connector = to_drm_connector(device);
- enum drm_connector_status status;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
int ret;
- ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex);
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
return ret;
- status = connector->funcs->detect(connector, true);
- mutex_unlock(&connector->dev->mode_config.mutex);
+ old_status = connector->status;
+
+ if (sysfs_streq(buf, "detect")) {
+ connector->force = 0;
+ connector->status = connector->funcs->detect(connector, true);
+ } else if (sysfs_streq(buf, "on")) {
+ connector->force = DRM_FORCE_ON;
+ } else if (sysfs_streq(buf, "on-digital")) {
+ connector->force = DRM_FORCE_ON_DIGITAL;
+ } else if (sysfs_streq(buf, "off")) {
+ connector->force = DRM_FORCE_OFF;
+ } else
+ ret = -EINVAL;
+
+ if (ret == 0 && connector->force) {
+ if (connector->force == DRM_FORCE_ON ||
+ connector->force == DRM_FORCE_ON_DIGITAL)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+ if (connector->funcs->force)
+ connector->funcs->force(connector);
+ }
+
+ if (old_status != connector->status) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ connector->name,
+ old_status, connector->status);
+
+ dev->mode_config.delayed_event = true;
+ if (dev->mode_config.poll_enabled)
+ schedule_delayed_work(&dev->mode_config.output_poll_work,
+ 0);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+static ssize_t status_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = to_drm_connector(device);
return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_connector_status_name(status));
+ drm_get_connector_status_name(connector->status));
}
static ssize_t dpms_show(struct device *device,
@@ -339,7 +384,7 @@ static ssize_t select_subconnector_show(struct device *device,
drm_get_dvi_i_select_name((int)subconnector));
}
-static DEVICE_ATTR_RO(status);
+static DEVICE_ATTR_RW(status);
static DEVICE_ATTR_RO(enabled);
static DEVICE_ATTR_RO(dpms);
static DEVICE_ATTR_RO(modes);
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index 27cc95f36381..ce3c42813fbb 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -7,7 +7,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM drm
-#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE drm_trace
TRACE_EVENT(drm_vblank_event,
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 4a2c328959e5..aab49ee4ed40 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#endif
#include <asm/pgtable.h>
+#include "drm_internal.h"
#include "drm_legacy.h"
struct drm_vma_entry {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 970046199608..1f7e33f59de6 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -28,6 +28,7 @@
#include <video/exynos7_decon.h>
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_iommu.h"
@@ -41,32 +42,16 @@
#define WINDOWS_NR 2
-struct decon_win_data {
- unsigned int ovl_x;
- unsigned int ovl_y;
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int bpp;
- unsigned int pixel_format;
- dma_addr_t dma_addr;
- bool enabled;
- bool resume;
-};
-
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
+ struct exynos_drm_plane planes[WINDOWS_NR];
struct clk *pclk;
struct clk *aclk;
struct clk *eclk;
struct clk *vclk;
void __iomem *regs;
- struct decon_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
bool i80_if;
@@ -296,59 +281,16 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
}
}
-static void decon_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct decon_context *ctx = crtc->ctx;
- struct decon_win_data *win_data;
- int win, padding;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
-
- win_data = &ctx->win_data[win];
-
- padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
- win_data->offset_x = plane->fb_x;
- win_data->offset_y = plane->fb_y;
- win_data->fb_width = plane->fb_width + padding;
- win_data->fb_height = plane->fb_height;
- win_data->ovl_x = plane->crtc_x;
- win_data->ovl_y = plane->crtc_y;
- win_data->ovl_width = plane->crtc_width;
- win_data->ovl_height = plane->crtc_height;
- win_data->dma_addr = plane->dma_addr[0];
- win_data->bpp = plane->bpp;
- win_data->pixel_format = plane->pixel_format;
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- plane->fb_width, plane->crtc_width);
-}
-
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
{
- struct decon_win_data *win_data = &ctx->win_data[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
+ int padding;
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_BPPMODE_MASK;
- switch (win_data->pixel_format) {
+ switch (plane->pixel_format) {
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
val |= WINCONx_BURSTLEN_16WORD;
@@ -397,7 +339,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -407,7 +349,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+ if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
@@ -435,7 +378,7 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
* @protect: 1 to protect (disable updates)
*/
static void decon_shadow_protect_win(struct decon_context *ctx,
- int win, bool protect)
+ unsigned int win, bool protect)
{
u32 bits, val;
@@ -449,12 +392,12 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
writel(val, ctx->regs + SHADOWCON);
}
-static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct decon_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.mode;
- struct decon_win_data *win_data;
- int win = zpos;
+ struct exynos_drm_plane *plane;
+ int padding;
unsigned long val, alpha;
unsigned int last_x;
unsigned int last_y;
@@ -462,17 +405,14 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
if (ctx->suspended)
return;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
/* If suspended, enable this on resume */
if (ctx->suspended) {
- win_data->resume = true;
+ plane->resume = true;
return;
}
@@ -490,39 +430,41 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
decon_shadow_protect_win(ctx, win, true);
/* buffer start address */
- val = (unsigned long)win_data->dma_addr;
+ val = (unsigned long)plane->dma_addr[0];
writel(val, ctx->regs + VIDW_BUF_START(win));
+ padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+
/* buffer size */
- writel(win_data->fb_width, ctx->regs + VIDW_WHOLE_X(win));
- writel(win_data->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
+ writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win));
+ writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
/* offset from the start of the buffer to read */
- writel(win_data->offset_x, ctx->regs + VIDW_OFFSET_X(win));
- writel(win_data->offset_y, ctx->regs + VIDW_OFFSET_Y(win));
+ writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
+ writel(plane->src_y, ctx->regs + VIDW_OFFSET_Y(win));
DRM_DEBUG_KMS("start addr = 0x%lx\n",
- (unsigned long)win_data->dma_addr);
+ (unsigned long)val);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
+ plane->crtc_width, plane->crtc_height);
/*
* OSD position.
* In case the window layout goes of LCD layout, DECON fails.
*/
- if ((win_data->ovl_x + win_data->ovl_width) > mode->hdisplay)
- win_data->ovl_x = mode->hdisplay - win_data->ovl_width;
- if ((win_data->ovl_y + win_data->ovl_height) > mode->vdisplay)
- win_data->ovl_y = mode->vdisplay - win_data->ovl_height;
+ if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay)
+ plane->crtc_x = mode->hdisplay - plane->crtc_width;
+ if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay)
+ plane->crtc_y = mode->vdisplay - plane->crtc_height;
- val = VIDOSDxA_TOPLEFT_X(win_data->ovl_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->ovl_y);
+ val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
+ VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = win_data->ovl_x + win_data->ovl_width;
+ last_x = plane->crtc_x + plane->crtc_width;
if (last_x)
last_x--;
- last_y = win_data->ovl_y + win_data->ovl_height;
+ last_y = plane->crtc_y + plane->crtc_height;
if (last_y)
last_y--;
@@ -531,7 +473,7 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->ovl_x, win_data->ovl_y, last_x, last_y);
+ plane->crtc_x, plane->crtc_y, last_x, last_y);
/* OSD alpha */
alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
@@ -565,27 +507,23 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
- win_data->enabled = true;
+ plane->enabled = true;
}
-static void decon_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct decon_context *ctx = crtc->ctx;
- struct decon_win_data *win_data;
- int win = zpos;
+ struct exynos_drm_plane *plane;
u32 val;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
if (ctx->suspended) {
/* do not resume this window*/
- win_data->resume = false;
+ plane->resume = false;
return;
}
@@ -604,42 +542,42 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, int zpos)
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
- win_data->enabled = false;
+ plane->enabled = false;
}
static void decon_window_suspend(struct decon_context *ctx)
{
- struct decon_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ plane->resume = plane->enabled;
+ if (plane->enabled)
decon_win_disable(ctx->crtc, i);
}
}
static void decon_window_resume(struct decon_context *ctx)
{
- struct decon_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
+ plane = &ctx->planes[i];
+ plane->enabled = plane->resume;
+ plane->resume = false;
}
}
static void decon_apply(struct decon_context *ctx)
{
- struct decon_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ if (plane->enabled)
decon_win_commit(ctx->crtc, i);
else
decon_win_disable(ctx->crtc, i);
@@ -779,7 +717,6 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.wait_for_vblank = decon_wait_for_vblank,
- .win_mode_set = decon_win_mode_set,
.win_commit = decon_win_commit,
.win_disable = decon_win_disable,
};
@@ -818,6 +755,9 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
ret = decon_ctx_initialize(ctx, drm_dev);
@@ -826,8 +766,18 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_LCD,
+ for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
+ type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[ctx->default_win];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
&decon_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
decon_ctx_remove(ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index bf17a60b40ed..1dbfba58f909 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,10 +32,16 @@
#include <drm/bridge/ptn3460.h>
#include "exynos_dp_core.h"
+#include "exynos_drm_fimd.h"
#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
connector)
+static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
+{
+ return to_exynos_crtc(dp->encoder->crtc);
+}
+
static inline struct exynos_dp_device *
display_to_dp(struct exynos_drm_display *d)
{
@@ -1070,6 +1076,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
}
}
+ fimd_dp_clock_enable(dp_to_crtc(dp), true);
+
clk_prepare_enable(dp->clock);
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
@@ -1094,6 +1102,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);
+ fimd_dp_clock_enable(dp_to_crtc(dp), false);
+
if (dp->panel) {
if (drm_panel_unprepare(dp->panel))
DRM_ERROR("failed to turnoff the panel\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 48ccab7fdf63..eb49195cec5c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -34,9 +34,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
if (mode > DRM_MODE_DPMS_ON) {
/* wait for the completion of page flip. */
if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
- !atomic_read(&exynos_crtc->pending_flip),
- HZ/20))
- atomic_set(&exynos_crtc->pending_flip, 0);
+ (exynos_crtc->event == NULL), HZ/20))
+ exynos_crtc->event = NULL;
drm_crtc_vblank_off(crtc);
}
@@ -164,11 +163,10 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
- struct exynos_drm_private *dev_priv = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_framebuffer *old_fb = crtc->primary->fb;
unsigned int crtc_w, crtc_h;
- int ret = -EINVAL;
+ int ret;
/* when the page flip is requested, crtc's dpms should be on */
if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
@@ -176,48 +174,49 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
+ if (!event)
+ return -EINVAL;
- if (event) {
- /*
- * the pipe from user always is 0 so we can set pipe number
- * of current owner to event.
- */
- event->pipe = exynos_crtc->pipe;
+ spin_lock_irq(&dev->event_lock);
+ if (exynos_crtc->event) {
+ ret = -EBUSY;
+ goto out;
+ }
- ret = drm_vblank_get(dev, exynos_crtc->pipe);
- if (ret) {
- DRM_DEBUG("failed to acquire vblank counter\n");
+ ret = drm_vblank_get(dev, exynos_crtc->pipe);
+ if (ret) {
+ DRM_DEBUG("failed to acquire vblank counter\n");
+ goto out;
+ }
- goto out;
- }
+ exynos_crtc->event = event;
+ spin_unlock_irq(&dev->event_lock);
+ /*
+ * the pipe from user always is 0 so we can set pipe number
+ * of current owner to event.
+ */
+ event->pipe = exynos_crtc->pipe;
+
+ crtc->primary->fb = fb;
+ crtc_w = fb->width - crtc->x;
+ crtc_h = fb->height - crtc->y;
+ ret = exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
+ crtc_w, crtc_h, crtc->x, crtc->y,
+ crtc_w, crtc_h);
+ if (ret) {
+ crtc->primary->fb = old_fb;
spin_lock_irq(&dev->event_lock);
- list_add_tail(&event->base.link,
- &dev_priv->pageflip_event_list);
- atomic_set(&exynos_crtc->pending_flip, 1);
+ exynos_crtc->event = NULL;
+ drm_vblank_put(dev, exynos_crtc->pipe);
spin_unlock_irq(&dev->event_lock);
-
- crtc->primary->fb = fb;
- crtc_w = fb->width - crtc->x;
- crtc_h = fb->height - crtc->y;
- ret = exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
- crtc_w, crtc_h, crtc->x, crtc->y,
- crtc_w, crtc_h);
- if (ret) {
- crtc->primary->fb = old_fb;
-
- spin_lock_irq(&dev->event_lock);
- drm_vblank_put(dev, exynos_crtc->pipe);
- list_del(&event->base.link);
- atomic_set(&exynos_crtc->pending_flip, 0);
- spin_unlock_irq(&dev->event_lock);
-
- goto out;
- }
+ return ret;
}
+
+ return 0;
+
out:
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock_irq(&dev->event_lock);
return ret;
}
@@ -239,13 +238,13 @@ static struct drm_crtc_funcs exynos_crtc_funcs = {
};
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
+ struct drm_plane *plane,
int pipe,
enum exynos_drm_output_type type,
struct exynos_drm_crtc_ops *ops,
void *ctx)
{
struct exynos_drm_crtc *exynos_crtc;
- struct drm_plane *plane;
struct exynos_drm_private *private = drm_dev->dev_private;
struct drm_crtc *crtc;
int ret;
@@ -255,19 +254,12 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&exynos_crtc->pending_flip_queue);
- atomic_set(&exynos_crtc->pending_flip, 0);
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
exynos_crtc->pipe = pipe;
exynos_crtc->type = type;
exynos_crtc->ops = ops;
exynos_crtc->ctx = ctx;
- plane = exynos_plane_init(drm_dev, 1 << pipe,
- DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- goto err_plane;
- }
crtc = &exynos_crtc->base;
@@ -284,7 +276,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
err_crtc:
plane->funcs->destroy(plane);
-err_plane:
kfree(exynos_crtc);
return ERR_PTR(ret);
}
@@ -320,26 +311,20 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
{
struct exynos_drm_private *dev_priv = dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
+ if (exynos_crtc->event) {
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (pipe != e->pipe)
- continue;
-
- list_del(&e->base.link);
- drm_send_vblank_event(dev, -1, e);
+ drm_send_vblank_event(dev, -1, exynos_crtc->event);
drm_vblank_put(dev, pipe);
- atomic_set(&exynos_crtc->pending_flip, 0);
wake_up(&exynos_crtc->pending_flip_queue);
+
}
+ exynos_crtc->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6258b800aab8..0ecd8fc45cff 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -18,6 +18,7 @@
#include "exynos_drm_drv.h"
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
+ struct drm_plane *plane,
int pipe,
enum exynos_drm_output_type type,
struct exynos_drm_crtc_ops *ops,
@@ -27,12 +28,6 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
-void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
- struct exynos_drm_plane *plane);
-void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
-void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
-void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
-
/* This function gets pipe value to crtc device matched with out_type. */
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
unsigned int out_type);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 3833bf8ca025..cd485c091b30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -185,9 +185,14 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
struct drm_gem_object *obj, int flags)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- return dma_buf_export(obj, &exynos_dmabuf_ops,
- exynos_gem_obj->base.size, flags, NULL);
+ exp_info.ops = &exynos_dmabuf_ops;
+ exp_info.size = exynos_gem_obj->base.size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
+
+ return dma_buf_export(&exp_info);
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 90168d7cf66a..8ac465208eae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -55,13 +55,11 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
int ret;
- int nr;
private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
if (!private)
return -ENOMEM;
- INIT_LIST_HEAD(&private->pageflip_event_list);
dev_set_drvdata(dev->dev, dev);
dev->dev_private = (void *)private;
@@ -81,19 +79,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_mode_config_init(dev);
- for (nr = 0; nr < MAX_PLANE; nr++) {
- struct drm_plane *plane;
- unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
-
- plane = exynos_plane_init(dev, possible_crtcs,
- DRM_PLANE_TYPE_OVERLAY);
- if (!IS_ERR(plane))
- continue;
-
- ret = PTR_ERR(plane);
- goto err_mode_config_cleanup;
- }
-
/* setup possible_clones. */
exynos_drm_encoder_setup(dev);
@@ -237,25 +222,13 @@ static void exynos_drm_preclose(struct drm_device *dev,
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_pending_vblank_event *v, *vt;
struct drm_pending_event *e, *et;
unsigned long flags;
if (!file->driver_priv)
return;
- /* Release all events not unhandled by page flip handler. */
spin_lock_irqsave(&dev->event_lock, flags);
- list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
- base.link) {
- if (v->base.file_priv == file) {
- list_del(&v->base.link);
- drm_vblank_put(dev, v->pipe);
- v->base.destroy(&v->base);
- }
- }
-
/* Release all events handled by page flip handler but not freed. */
list_for_each_entry_safe(e, et, &file->event_list, link) {
list_del(&e->link);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 9afd390d4674..e12ecb5d5d9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -21,7 +21,6 @@
#define MAX_CRTC 3
#define MAX_PLANE 5
#define MAX_FB_BUFFER 4
-#define DEFAULT_ZPOS -1
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc, base)
#define to_exynos_plane(x) container_of(x, struct exynos_drm_plane, base)
@@ -48,20 +47,22 @@ enum exynos_drm_output_type {
* Exynos drm common overlay structure.
*
* @base: plane object
- * @fb_x: offset x on a framebuffer to be displayed.
+ * @src_x: offset x on a framebuffer to be displayed.
* - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed.
+ * @src_y: offset y on a framebuffer to be displayed.
* - the unit is screen coordinates.
- * @fb_width: width of a framebuffer.
- * @fb_height: height of a framebuffer.
* @src_width: width of a partial image to be displayed from framebuffer.
* @src_height: height of a partial image to be displayed from framebuffer.
+ * @fb_width: width of a framebuffer.
+ * @fb_height: height of a framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_width: window width to be displayed (hardware screen).
* @crtc_height: window height to be displayed (hardware screen).
* @mode_width: width of screen mode.
* @mode_height: height of screen mode.
+ * @h_ratio: horizontal scaling ratio, 16.16 fixed point
+ * @v_ratio: vertical scaling ratio, 16.16 fixed point
* @refresh: refresh rate.
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
@@ -78,6 +79,7 @@ enum exynos_drm_output_type {
* @transparency: transparency on or off.
* @activated: activated or not.
* @enabled: enabled or not.
+ * @resume: to resume or not.
*
* this structure is common to exynos SoC and its contents would be copied
* to hardware specific overlay info.
@@ -85,25 +87,27 @@ enum exynos_drm_output_type {
struct exynos_drm_plane {
struct drm_plane base;
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int fb_width;
- unsigned int fb_height;
+ unsigned int src_x;
+ unsigned int src_y;
unsigned int src_width;
unsigned int src_height;
+ unsigned int fb_width;
+ unsigned int fb_height;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_width;
unsigned int crtc_height;
unsigned int mode_width;
unsigned int mode_height;
+ unsigned int h_ratio;
+ unsigned int v_ratio;
unsigned int refresh;
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
- int zpos;
+ unsigned int zpos;
unsigned int index_color;
bool default_win:1;
@@ -112,6 +116,7 @@ struct exynos_drm_plane {
bool transparency:1;
bool activated:1;
bool enabled:1;
+ bool resume:1;
};
/*
@@ -172,9 +177,7 @@ struct exynos_drm_display {
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @wait_for_vblank: wait for vblank interrupt to make sure that
* hardware overlay is updated.
- * @win_mode_set: copy drm overlay info to hw specific overlay info.
* @win_commit: apply hardware specific overlay data to registers.
- * @win_enable: enable hardware specific overlay.
* @win_disable: disable hardware specific overlay.
* @te_handler: trigger to transfer video image at the tearing effect
* synchronization signal if there is a page flip request.
@@ -189,11 +192,8 @@ struct exynos_drm_crtc_ops {
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
- void (*win_mode_set)(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane);
- void (*win_commit)(struct exynos_drm_crtc *crtc, int zpos);
- void (*win_enable)(struct exynos_drm_crtc *crtc, int zpos);
- void (*win_disable)(struct exynos_drm_crtc *crtc, int zpos);
+ void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
+ void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
void (*te_handler)(struct exynos_drm_crtc *crtc);
};
@@ -210,6 +210,7 @@ struct exynos_drm_crtc_ops {
* we can refer to the crtc to current hardware interrupt occurred through
* this pipe value.
* @dpms: store the crtc dpms value
+ * @event: vblank event that is currently queued for flip
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the crtc's implementation specific context
*/
@@ -219,7 +220,7 @@ struct exynos_drm_crtc {
unsigned int pipe;
unsigned int dpms;
wait_queue_head_t pending_flip_queue;
- atomic_t pending_flip;
+ struct drm_pending_vblank_event *event;
struct exynos_drm_crtc_ops *ops;
void *ctx;
};
@@ -249,9 +250,6 @@ struct drm_exynos_file_private {
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
- /* list head for new event to be added. */
- struct list_head pageflip_event_list;
-
/*
* created crtc object would be contained at this array and
* this array is used to be aware of which crtc did it request vblank.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 05fe93dc57a8..04927153bf38 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1473,12 +1473,6 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
return 0;
}
-static int exynos_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static struct drm_encoder *
exynos_dsi_best_encoder(struct drm_connector *connector)
{
@@ -1489,7 +1483,6 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
.get_modes = exynos_dsi_get_modes,
- .mode_valid = exynos_dsi_mode_valid,
.best_encoder = exynos_dsi_best_encoder,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d346d1e6eda0..929cb03a8eab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -151,10 +151,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
- return ERR_PTR(-EINVAL);
- }
+ if (ret < 0)
+ return ERR_PTR(ret);
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb)
@@ -250,10 +248,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
+ if (ret < 0)
goto err_unreference;
- }
}
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 84f8dfe1c5ec..e71e331f0188 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -76,6 +76,7 @@ static struct fb_ops exynos_drm_fb_ops = {
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes,
struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
@@ -85,7 +86,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
unsigned long offset;
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
/* RGB formats use only one buffer */
buffer = exynos_drm_fb_buffer(fb, 0);
@@ -189,7 +190,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto err_destroy_framebuffer;
}
- ret = exynos_drm_fbdev_update(helper, helper->fb);
+ ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
if (ret < 0)
goto err_dealloc_cmap;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 33a10ce967ea..9819fa6a9e2a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,9 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
+#include "exynos_drm_fimd.h"
/*
* FIMD stands for Fully Interactive Mobile Display and
@@ -54,6 +56,9 @@
/* size control register for hardware windows 1 ~ 2. */
#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
+#define VIDWnALPHA0(win) (VIDW_ALPHA + 0x00 + (win) * 8)
+#define VIDWnALPHA1(win) (VIDW_ALPHA + 0x04 + (win) * 8)
+
#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
#define VIDWx_BUF_END(win, buf) (VIDW_BUF_END(buf) + (win) * 8)
#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
@@ -140,32 +145,15 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
.has_vtsel = 1,
};
-struct fimd_win_data {
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int fb_pitch;
- unsigned int bpp;
- unsigned int pixel_format;
- dma_addr_t dma_addr;
- unsigned int buf_offsize;
- unsigned int line_size; /* bytes */
- bool enabled;
- bool resume;
-};
-
struct fimd_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
+ struct exynos_drm_plane planes[WINDOWS_NR];
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
struct regmap *sysreg;
- struct fimd_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
u32 vidcon0;
@@ -502,59 +490,9 @@ static void fimd_disable_vblank(struct exynos_drm_crtc *crtc)
}
}
-static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct fimd_context *ctx = crtc->ctx;
- struct fimd_win_data *win_data;
- int win;
- unsigned long offset;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- offset = plane->fb_x * (plane->bpp >> 3);
- offset += plane->fb_y * plane->pitch;
-
- DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, plane->pitch);
-
- win_data = &ctx->win_data[win];
-
- win_data->offset_x = plane->crtc_x;
- win_data->offset_y = plane->crtc_y;
- win_data->ovl_width = plane->crtc_width;
- win_data->ovl_height = plane->crtc_height;
- win_data->fb_pitch = plane->pitch;
- win_data->fb_width = plane->fb_width;
- win_data->fb_height = plane->fb_height;
- win_data->dma_addr = plane->dma_addr[0] + offset;
- win_data->bpp = plane->bpp;
- win_data->pixel_format = plane->pixel_format;
- win_data->buf_offsize =
- plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
- win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- plane->fb_width, plane->crtc_width);
-}
-
static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
{
- struct fimd_win_data *win_data = &ctx->win_data[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
val = WINCONx_ENWIN;
@@ -564,11 +502,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* So the request format is ARGB8888 then change it to XRGB8888.
*/
if (ctx->driver_data->has_limited_fmt && !win) {
- if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
- win_data->pixel_format = DRM_FORMAT_XRGB8888;
+ if (plane->pixel_format == DRM_FORMAT_ARGB8888)
+ plane->pixel_format = DRM_FORMAT_XRGB8888;
}
- switch (win_data->pixel_format) {
+ switch (plane->pixel_format) {
case DRM_FORMAT_C8:
val |= WINCON0_BPPMODE_8BPP_PALETTE;
val |= WINCONx_BURSTLEN_8WORD;
@@ -604,7 +542,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -614,12 +552,30 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
writel(val, ctx->regs + WINCON(win));
+
+ /* hardware window 0 doesn't support alpha channel. */
+ if (win != 0) {
+ /* OSD alpha */
+ val = VIDISD14C_ALPHA0_R(0xf) |
+ VIDISD14C_ALPHA0_G(0xf) |
+ VIDISD14C_ALPHA0_B(0xf) |
+ VIDISD14C_ALPHA1_R(0xf) |
+ VIDISD14C_ALPHA1_G(0xf) |
+ VIDISD14C_ALPHA1_B(0xf);
+
+ writel(val, ctx->regs + VIDOSD_C(win));
+
+ val = VIDW_ALPHA_R(0xf) | VIDW_ALPHA_G(0xf) |
+ VIDW_ALPHA_G(0xf);
+ writel(val, ctx->regs + VIDWnALPHA0(win));
+ writel(val, ctx->regs + VIDWnALPHA1(win));
+ }
}
static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
@@ -642,7 +598,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
* @protect: 1 to protect (disable updates)
*/
static void fimd_shadow_protect_win(struct fimd_context *ctx,
- int win, bool protect)
+ unsigned int win, bool protect)
{
u32 reg, bits, val;
@@ -662,29 +618,25 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
writel(val, ctx->regs + reg);
}
-static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct fimd_context *ctx = crtc->ctx;
- struct fimd_win_data *win_data;
- int win = zpos;
- unsigned long val, alpha, size;
- unsigned int last_x;
- unsigned int last_y;
+ struct exynos_drm_plane *plane;
+ dma_addr_t dma_addr;
+ unsigned long val, size, offset;
+ unsigned int last_x, last_y, buf_offsize, line_size;
if (ctx->suspended)
return;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
/* If suspended, enable this on resume */
if (ctx->suspended) {
- win_data->resume = true;
+ plane->resume = true;
return;
}
@@ -701,38 +653,45 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
/* protect windows */
fimd_shadow_protect_win(ctx, win, true);
+
+ offset = plane->src_x * (plane->bpp >> 3);
+ offset += plane->src_y * plane->pitch;
+
/* buffer start address */
- val = (unsigned long)win_data->dma_addr;
+ dma_addr = plane->dma_addr[0] + offset;
+ val = (unsigned long)dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
- size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
- val = (unsigned long)(win_data->dma_addr + size);
+ size = plane->pitch * plane->crtc_height;
+ val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->dma_addr, val, size);
+ (unsigned long)dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
+ plane->crtc_width, plane->crtc_height);
/* buffer size */
- val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
- VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
+ buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
+ line_size = plane->crtc_width * (plane->bpp >> 3);
+ val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
+ VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH_E(line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
- val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
- VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
+ val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
+ VIDOSDxA_TOPLEFT_Y(plane->crtc_y) |
+ VIDOSDxA_TOPLEFT_X_E(plane->crtc_x) |
+ VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = win_data->offset_x + win_data->ovl_width;
+ last_x = plane->crtc_x + plane->crtc_width;
if (last_x)
last_x--;
- last_y = win_data->offset_y + win_data->ovl_height;
+ last_y = plane->crtc_y + plane->crtc_height;
if (last_y)
last_y--;
@@ -742,24 +701,14 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->offset_x, win_data->offset_y, last_x, last_y);
-
- /* hardware window 0 doesn't support alpha channel. */
- if (win != 0) {
- /* OSD alpha */
- alpha = VIDISD14C_ALPHA1_R(0xf) |
- VIDISD14C_ALPHA1_G(0xf) |
- VIDISD14C_ALPHA1_B(0xf);
-
- writel(alpha, ctx->regs + VIDOSD_C(win));
- }
+ plane->crtc_x, plane->crtc_y, last_x, last_y);
/* OSD size */
if (win != 3 && win != 4) {
u32 offset = VIDOSD_D(win);
if (win == 0)
offset = VIDOSD_C(win);
- val = win_data->ovl_width * win_data->ovl_height;
+ val = plane->crtc_width * plane->crtc_height;
writel(val, ctx->regs + offset);
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
@@ -779,29 +728,25 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
/* Enable DMA channel and unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
- win_data->enabled = true;
+ plane->enabled = true;
if (ctx->i80_if)
atomic_set(&ctx->win_updated, 1);
}
-static void fimd_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct fimd_context *ctx = crtc->ctx;
- struct fimd_win_data *win_data;
- int win = zpos;
-
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
+ struct exynos_drm_plane *plane;
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
if (ctx->suspended) {
/* do not resume this window*/
- win_data->resume = false;
+ plane->resume = false;
return;
}
@@ -816,42 +761,42 @@ static void fimd_win_disable(struct exynos_drm_crtc *crtc, int zpos)
/* unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
- win_data->enabled = false;
+ plane->enabled = false;
}
static void fimd_window_suspend(struct fimd_context *ctx)
{
- struct fimd_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ plane->resume = plane->enabled;
+ if (plane->enabled)
fimd_win_disable(ctx->crtc, i);
}
}
static void fimd_window_resume(struct fimd_context *ctx)
{
- struct fimd_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
+ plane = &ctx->planes[i];
+ plane->enabled = plane->resume;
+ plane->resume = false;
}
}
static void fimd_apply(struct fimd_context *ctx)
{
- struct fimd_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ if (plane->enabled)
fimd_win_commit(ctx->crtc, i);
else
fimd_win_disable(ctx->crtc, i);
@@ -1008,7 +953,6 @@ static struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.wait_for_vblank = fimd_wait_for_vblank,
- .win_mode_set = fimd_win_mode_set,
.win_commit = fimd_win_commit,
.win_disable = fimd_win_disable,
.te_handler = fimd_te_handler,
@@ -1054,14 +998,29 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
struct fimd_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
ctx->drm_dev = drm_dev;
ctx->pipe = priv->pipe++;
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_LCD,
+ for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
+ type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[ctx->default_win];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
&fimd_crtc_ops, ctx);
+ if (IS_ERR(ctx->crtc))
+ return PTR_ERR(ctx->crtc);
if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display);
@@ -1233,6 +1192,24 @@ static int fimd_remove(struct platform_device *pdev)
return 0;
}
+void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
+{
+ struct fimd_context *ctx = crtc->ctx;
+ u32 val;
+
+ /*
+ * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
+ * clock. On these SoCs the bootloader may enable it but any
+ * power domain off/on will reset it to disable state.
+ */
+ if (ctx->driver_data != &exynos5_fimd_driver_data)
+ return;
+
+ val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+ writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+}
+EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
+
struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = fimd_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
new file mode 100644
index 000000000000..b4fcaa568456
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FIMD_H_
+#define _EXYNOS_DRM_FIMD_H_
+
+extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
+
+#endif /* _EXYNOS_DRM_FIMD_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d5ad17dfc24d..b7f1cbc46cc2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -476,6 +476,45 @@ err_clear:
return ret;
}
+static int ipp_validate_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_mem_node *m_node,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_config *ipp_cfg;
+ unsigned int num_plane;
+ unsigned long min_size, size;
+ unsigned int bpp;
+ int i;
+
+ /* The property id should already be varified */
+ ipp_cfg = &c_node->property.config[m_node->prop_id];
+ num_plane = drm_format_num_planes(ipp_cfg->fmt);
+
+ /**
+ * This is a rather simplified validation of a memory node.
+ * It basically verifies provided gem object handles
+ * and the buffer sizes with respect to current configuration.
+ * This is not the best that can be done
+ * but it seems more than enough
+ */
+ for (i = 0; i < num_plane; ++i) {
+ if (!m_node->buf_info.handles[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ bpp = drm_format_plane_cpp(ipp_cfg->fmt, i);
+ min_size = (ipp_cfg->sz.hsize * ipp_cfg->sz.vsize * bpp) >> 3;
+ size = exynos_drm_gem_get_size(drm_dev,
+ m_node->buf_info.handles[i],
+ c_node->filp);
+ if (min_size > size) {
+ DRM_ERROR("invalid size for plane %d\n", i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int ipp_put_mem_node(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_mem_node *m_node)
@@ -552,6 +591,11 @@ static struct drm_exynos_ipp_mem_node
}
mutex_lock(&c_node->mem_lock);
+ if (ipp_validate_mem_node(drm_dev, m_node, c_node)) {
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ mutex_unlock(&c_node->mem_lock);
+ return ERR_PTR(-EFAULT);
+ }
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 8ad5b7294eb4..13ea3349363b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -92,7 +92,6 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h)
{
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
unsigned int actual_w;
unsigned int actual_h;
@@ -111,13 +110,17 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_y = 0;
}
+ /* set ratio */
+ exynos_plane->h_ratio = (src_w << 16) / crtc_w;
+ exynos_plane->v_ratio = (src_h << 16) / crtc_h;
+
/* set drm framebuffer data. */
- exynos_plane->fb_x = src_x;
- exynos_plane->fb_y = src_y;
+ exynos_plane->src_x = src_x;
+ exynos_plane->src_y = src_y;
+ exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16;
+ exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16;
exynos_plane->fb_width = fb->width;
exynos_plane->fb_height = fb->height;
- exynos_plane->src_width = src_w;
- exynos_plane->src_height = src_h;
exynos_plane->bpp = fb->bits_per_pixel;
exynos_plane->pitch = fb->pitches[0];
exynos_plane->pixel_format = fb->pixel_format;
@@ -139,9 +142,6 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
exynos_plane->crtc_width, exynos_plane->crtc_height);
plane->crtc = crtc;
-
- if (exynos_crtc->ops->win_mode_set)
- exynos_crtc->ops->win_mode_set(exynos_crtc, exynos_plane);
}
int
@@ -182,39 +182,14 @@ static int exynos_disable_plane(struct drm_plane *plane)
return 0;
}
-static void exynos_plane_destroy(struct drm_plane *plane)
-{
- struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
-
- exynos_disable_plane(plane);
- drm_plane_cleanup(plane);
- kfree(exynos_plane);
-}
-
-static int exynos_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_private *dev_priv = dev->dev_private;
-
- if (property == dev_priv->plane_zpos_property) {
- exynos_plane->zpos = val;
- return 0;
- }
-
- return -EINVAL;
-}
-
static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = exynos_update_plane,
.disable_plane = exynos_disable_plane,
- .destroy = exynos_plane_destroy,
- .set_property = exynos_plane_set_property,
+ .destroy = drm_plane_cleanup,
};
-static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
+static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
+ unsigned int zpos)
{
struct drm_device *dev = plane->dev;
struct exynos_drm_private *dev_priv = dev->dev_private;
@@ -222,41 +197,36 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
prop = dev_priv->plane_zpos_property;
if (!prop) {
- prop = drm_property_create_range(dev, 0, "zpos", 0,
- MAX_PLANE - 1);
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE,
+ "zpos", 0, MAX_PLANE - 1);
if (!prop)
return;
dev_priv->plane_zpos_property = prop;
}
- drm_object_attach_property(&plane->base, prop, 0);
+ drm_object_attach_property(&plane->base, prop, zpos);
}
-struct drm_plane *exynos_plane_init(struct drm_device *dev,
- unsigned long possible_crtcs,
- enum drm_plane_type type)
+int exynos_plane_init(struct drm_device *dev,
+ struct exynos_drm_plane *exynos_plane,
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int zpos)
{
- struct exynos_drm_plane *exynos_plane;
int err;
- exynos_plane = kzalloc(sizeof(struct exynos_drm_plane), GFP_KERNEL);
- if (!exynos_plane)
- return ERR_PTR(-ENOMEM);
-
err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
&exynos_plane_funcs, formats,
ARRAY_SIZE(formats), type);
if (err) {
DRM_ERROR("failed to initialize plane\n");
- kfree(exynos_plane);
- return ERR_PTR(err);
+ return err;
}
- if (type == DRM_PLANE_TYPE_PRIMARY)
- exynos_plane->zpos = DEFAULT_ZPOS;
- else
- exynos_plane_attach_zpos_property(&exynos_plane->base);
+ exynos_plane->zpos = zpos;
- return &exynos_plane->base;
+ if (type == DRM_PLANE_TYPE_OVERLAY)
+ exynos_plane_attach_zpos_property(&exynos_plane->base, zpos);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 9d3c374e7b3e..f360590d1412 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -20,6 +20,7 @@ int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
-struct drm_plane *exynos_plane_init(struct drm_device *dev,
- unsigned long possible_crtcs,
- enum drm_plane_type type);
+int exynos_plane_init(struct drm_device *dev,
+ struct exynos_drm_plane *exynos_plane,
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index b886972b5888..27e84ec21694 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -23,6 +23,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_encoder.h"
#include "exynos_drm_vidi.h"
@@ -32,20 +33,6 @@
#define ctx_from_connector(c) container_of(c, struct vidi_context, \
connector)
-struct vidi_win_data {
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int bpp;
- dma_addr_t dma_addr;
- unsigned int buf_offsize;
- unsigned int line_size; /* bytes */
- bool enabled;
-};
-
struct vidi_context {
struct exynos_drm_display display;
struct platform_device *pdev;
@@ -53,7 +40,7 @@ struct vidi_context {
struct exynos_drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector connector;
- struct vidi_win_data win_data[WINDOWS_NR];
+ struct exynos_drm_plane planes[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
unsigned int default_win;
@@ -97,19 +84,6 @@ static const char fake_edid_info[] = {
0x00, 0x00, 0x00, 0x06
};
-static void vidi_apply(struct vidi_context *ctx)
-{
- struct exynos_drm_crtc_ops *crtc_ops = ctx->crtc->ops;
- struct vidi_win_data *win_data;
- int i;
-
- for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled && (crtc_ops && crtc_ops->win_commit))
- crtc_ops->win_commit(ctx->crtc, i);
- }
-}
-
static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -143,104 +117,46 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
ctx->vblank_on = false;
}
-static void vidi_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct vidi_context *ctx = crtc->ctx;
- struct vidi_win_data *win_data;
- int win;
- unsigned long offset;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- offset = plane->fb_x * (plane->bpp >> 3);
- offset += plane->fb_y * plane->pitch;
-
- DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, plane->pitch);
-
- win_data = &ctx->win_data[win];
-
- win_data->offset_x = plane->crtc_x;
- win_data->offset_y = plane->crtc_y;
- win_data->ovl_width = plane->crtc_width;
- win_data->ovl_height = plane->crtc_height;
- win_data->fb_width = plane->fb_width;
- win_data->fb_height = plane->fb_height;
- win_data->dma_addr = plane->dma_addr[0] + offset;
- win_data->bpp = plane->bpp;
- win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
- (plane->bpp >> 3);
- win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
-
- /*
- * some parts of win_data should be transferred to user side
- * through specific ioctl.
- */
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- plane->fb_width, plane->crtc_width);
-}
-
-static void vidi_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct vidi_context *ctx = crtc->ctx;
- struct vidi_win_data *win_data;
- int win = zpos;
+ struct exynos_drm_plane *plane;
if (ctx->suspended)
return;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
- win_data->enabled = true;
+ plane->enabled = true;
- DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
if (ctx->vblank_on)
schedule_work(&ctx->work);
}
-static void vidi_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void vidi_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct vidi_context *ctx = crtc->ctx;
- struct vidi_win_data *win_data;
- int win = zpos;
-
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
+ struct exynos_drm_plane *plane;
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
- win_data->enabled = false;
+ plane = &ctx->planes[win];
+ plane->enabled = false;
/* TODO. */
}
static int vidi_power_on(struct vidi_context *ctx, bool enable)
{
+ struct exynos_drm_plane *plane;
+ int i;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
if (enable != false && enable != true)
@@ -253,7 +169,11 @@ static int vidi_power_on(struct vidi_context *ctx, bool enable)
if (test_and_clear_bit(0, &ctx->irq_flags))
vidi_enable_vblank(ctx->crtc);
- vidi_apply(ctx);
+ for (i = 0; i < WINDOWS_NR; i++) {
+ plane = &ctx->planes[i];
+ if (plane->enabled)
+ vidi_win_commit(ctx->crtc, i);
+ }
} else {
ctx->suspended = true;
}
@@ -301,7 +221,6 @@ static struct exynos_drm_crtc_ops vidi_crtc_ops = {
.dpms = vidi_dpms,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
- .win_mode_set = vidi_win_mode_set,
.win_commit = vidi_win_commit,
.win_disable = vidi_win_disable,
};
@@ -543,12 +462,25 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
vidi_ctx_initialize(ctx, drm_dev);
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_VIDI,
+ for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
+ type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[ctx->default_win];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_VIDI,
&vidi_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
DRM_ERROR("failed to create crtc.\n");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 229b3613c60b..5eba971f394a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2007,7 +2007,7 @@ static void hdmi_mode_set(struct exynos_drm_display *display,
DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
m->hdisplay, m->vdisplay,
m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
- "INTERLACED" : "PROGERESSIVE");
+ "INTERLACED" : "PROGRESSIVE");
/* preserve mode information for later use. */
drm_mode_copy(&hdata->current_mode, mode);
@@ -2101,7 +2101,7 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
struct hdmi_context *hdata = display_to_hdmi(display);
struct drm_encoder *encoder = hdata->encoder;
struct drm_crtc *crtc = encoder->crtc;
- struct drm_crtc_helper_funcs *funcs = NULL;
+ const struct drm_crtc_helper_funcs *funcs = NULL;
DRM_DEBUG_KMS("mode %d\n", mode);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 2e3bc57ea50e..fbec750574e6 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,35 +37,13 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
#include "exynos_mixer.h"
#define MIXER_WIN_NR 3
#define MIXER_DEFAULT_WIN 0
-struct hdmi_win_data {
- dma_addr_t dma_addr;
- dma_addr_t chroma_dma_addr;
- uint32_t pixel_format;
- unsigned int bpp;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_width;
- unsigned int crtc_height;
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int fb_width;
- unsigned int fb_pitch;
- unsigned int fb_height;
- unsigned int src_width;
- unsigned int src_height;
- unsigned int mode_width;
- unsigned int mode_height;
- unsigned int scan_flags;
- bool enabled;
- bool resume;
-};
-
struct mixer_resources {
int irq;
void __iomem *mixer_regs;
@@ -90,6 +68,7 @@ struct mixer_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
+ struct exynos_drm_plane planes[MIXER_WIN_NR];
int pipe;
bool interlace;
bool powered;
@@ -99,7 +78,6 @@ struct mixer_context {
struct mutex mixer_mutex;
struct mixer_resources mixer_res;
- struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -289,7 +267,7 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
/* choosing between interlace and progressive mode */
val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
- MXR_CFG_SCAN_PROGRASSIVE);
+ MXR_CFG_SCAN_PROGRESSIVE);
if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
/* choosing between proper HD and SD mode */
@@ -403,17 +381,16 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
- struct hdmi_win_data *win_data;
- unsigned int x_ratio, y_ratio;
+ struct exynos_drm_plane *plane;
unsigned int buf_num = 1;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
bool crcb_mode = false;
u32 val;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
- switch (win_data->pixel_format) {
+ switch (plane->pixel_format) {
case DRM_FORMAT_NV12:
crcb_mode = false;
buf_num = 2;
@@ -421,35 +398,31 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
/* TODO: single buffer format NV12, NV21 */
default:
/* ignore pixel format at disable time */
- if (!win_data->dma_addr)
+ if (!plane->dma_addr[0])
break;
DRM_ERROR("pixel format for vp is wrong [%d].\n",
- win_data->pixel_format);
+ plane->pixel_format);
return;
}
- /* scaling feature: (src << 16) / dst */
- x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
- y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
-
if (buf_num == 2) {
- luma_addr[0] = win_data->dma_addr;
- chroma_addr[0] = win_data->chroma_dma_addr;
+ luma_addr[0] = plane->dma_addr[0];
+ chroma_addr[0] = plane->dma_addr[1];
} else {
- luma_addr[0] = win_data->dma_addr;
- chroma_addr[0] = win_data->dma_addr
- + (win_data->fb_pitch * win_data->fb_height);
+ luma_addr[0] = plane->dma_addr[0];
+ chroma_addr[0] = plane->dma_addr[0]
+ + (plane->pitch * plane->fb_height);
}
- if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+ if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
ctx->interlace = true;
if (tiled_mode) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
- luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
- chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
+ luma_addr[1] = luma_addr[0] + plane->pitch;
+ chroma_addr[1] = chroma_addr[0] + plane->pitch;
}
} else {
ctx->interlace = false;
@@ -470,30 +443,30 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
- VP_IMG_VSIZE(win_data->fb_height));
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) |
+ VP_IMG_VSIZE(plane->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
- VP_IMG_VSIZE(win_data->fb_height / 2));
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) |
+ VP_IMG_VSIZE(plane->fb_height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
- vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
+ vp_reg_write(res, VP_SRC_WIDTH, plane->src_width);
+ vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height);
vp_reg_write(res, VP_SRC_H_POSITION,
- VP_SRC_H_POSITION_VAL(win_data->fb_x));
- vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
+ VP_SRC_H_POSITION_VAL(plane->src_x));
+ vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
- vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
- vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
+ vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width);
+ vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
if (ctx->interlace) {
- vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
- vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
- vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height);
+ vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
}
- vp_reg_write(res, VP_H_RATIO, x_ratio);
- vp_reg_write(res, VP_V_RATIO, y_ratio);
+ vp_reg_write(res, VP_H_RATIO, plane->h_ratio);
+ vp_reg_write(res, VP_V_RATIO, plane->v_ratio);
vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
@@ -503,8 +476,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, win_data->mode_height);
- mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+ mixer_cfg_scan(ctx, plane->mode_height);
+ mixer_cfg_rgb_fmt(ctx, plane->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
@@ -521,25 +494,49 @@ static void mixer_layer_update(struct mixer_context *ctx)
mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
+static int mixer_setup_scale(const struct exynos_drm_plane *plane,
+ unsigned int *x_ratio, unsigned int *y_ratio)
+{
+ if (plane->crtc_width != plane->src_width) {
+ if (plane->crtc_width == 2 * plane->src_width)
+ *x_ratio = 1;
+ else
+ goto fail;
+ }
+
+ if (plane->crtc_height != plane->src_height) {
+ if (plane->crtc_height == 2 * plane->src_height)
+ *y_ratio = 1;
+ else
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ DRM_DEBUG_KMS("only 2x width/height scaling of plane supported\n");
+ return -ENOTSUPP;
+}
+
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
- struct hdmi_win_data *win_data;
- unsigned int x_ratio, y_ratio;
+ struct exynos_drm_plane *plane;
+ unsigned int x_ratio = 0, y_ratio = 0;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
#define RGB565 4
#define ARGB1555 5
#define ARGB4444 6
#define ARGB8888 7
- switch (win_data->bpp) {
+ switch (plane->bpp) {
case 16:
fmt = ARGB4444;
break;
@@ -550,21 +547,21 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
fmt = ARGB8888;
}
- /* 2x scaling feature */
- x_ratio = 0;
- y_ratio = 0;
+ /* check if mixer supports requested scaling setup */
+ if (mixer_setup_scale(plane, &x_ratio, &y_ratio))
+ return;
- dst_x_offset = win_data->crtc_x;
- dst_y_offset = win_data->crtc_y;
+ dst_x_offset = plane->crtc_x;
+ dst_y_offset = plane->crtc_y;
/* converting dma address base and source offset */
- dma_addr = win_data->dma_addr
- + (win_data->fb_x * win_data->bpp >> 3)
- + (win_data->fb_y * win_data->fb_pitch);
+ dma_addr = plane->dma_addr[0]
+ + (plane->src_x * plane->bpp >> 3)
+ + (plane->src_y * plane->pitch);
src_x_offset = 0;
src_y_offset = 0;
- if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+ if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE)
ctx->interlace = true;
else
ctx->interlace = false;
@@ -578,18 +575,18 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* setup geometry */
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
- win_data->fb_pitch / (win_data->bpp >> 3));
+ plane->pitch / (plane->bpp >> 3));
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
win == MIXER_DEFAULT_WIN) {
- val = MXR_MXR_RES_HEIGHT(win_data->mode_height);
- val |= MXR_MXR_RES_WIDTH(win_data->mode_width);
+ val = MXR_MXR_RES_HEIGHT(plane->mode_height);
+ val |= MXR_MXR_RES_WIDTH(plane->mode_width);
mixer_reg_write(res, MXR_RESOLUTION, val);
}
- val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
- val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
+ val = MXR_GRP_WH_WIDTH(plane->src_width);
+ val |= MXR_GRP_WH_HEIGHT(plane->src_height);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -607,8 +604,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* set buffer address to mixer */
mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, win_data->mode_height);
- mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+ mixer_cfg_scan(ctx, plane->mode_height);
+ mixer_cfg_rgb_fmt(ctx, plane->mode_height);
mixer_cfg_layer(ctx, win, true);
/* layer update mandatory for mixer 16.0.33.0 */
@@ -920,63 +917,9 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct mixer_context *mixer_ctx = crtc->ctx;
- struct hdmi_win_data *win_data;
- int win;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
- plane->fb_width, plane->fb_height,
- plane->fb_x, plane->fb_y,
- plane->crtc_width, plane->crtc_height,
- plane->crtc_x, plane->crtc_y);
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = MIXER_DEFAULT_WIN;
-
- if (win < 0 || win >= MIXER_WIN_NR) {
- DRM_ERROR("mixer window[%d] is wrong\n", win);
- return;
- }
-
- win_data = &mixer_ctx->win_data[win];
-
- win_data->dma_addr = plane->dma_addr[0];
- win_data->chroma_dma_addr = plane->dma_addr[1];
- win_data->pixel_format = plane->pixel_format;
- win_data->bpp = plane->bpp;
-
- win_data->crtc_x = plane->crtc_x;
- win_data->crtc_y = plane->crtc_y;
- win_data->crtc_width = plane->crtc_width;
- win_data->crtc_height = plane->crtc_height;
-
- win_data->fb_x = plane->fb_x;
- win_data->fb_y = plane->fb_y;
- win_data->fb_width = plane->fb_width;
- win_data->fb_height = plane->fb_height;
- win_data->fb_pitch = plane->pitch;
- win_data->src_width = plane->src_width;
- win_data->src_height = plane->src_height;
-
- win_data->mode_width = plane->mode_width;
- win_data->mode_height = plane->mode_height;
-
- win_data->scan_flags = plane->scan_flag;
-}
-
-static void mixer_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -992,14 +935,13 @@ static void mixer_win_commit(struct exynos_drm_crtc *crtc, int zpos)
else
mixer_graph_buffer(mixer_ctx, win);
- mixer_ctx->win_data[win].enabled = true;
+ mixer_ctx->planes[win].enabled = true;
}
-static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
- int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
unsigned long flags;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -1007,7 +949,7 @@ static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
mutex_unlock(&mixer_ctx->mixer_mutex);
- mixer_ctx->win_data[win].resume = false;
+ mixer_ctx->planes[win].resume = false;
return;
}
mutex_unlock(&mixer_ctx->mixer_mutex);
@@ -1020,7 +962,7 @@ static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
- mixer_ctx->win_data[win].enabled = false;
+ mixer_ctx->planes[win].enabled = false;
}
static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
@@ -1057,12 +999,12 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
static void mixer_window_suspend(struct mixer_context *ctx)
{
- struct hdmi_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
+ plane = &ctx->planes[i];
+ plane->resume = plane->enabled;
mixer_win_disable(ctx->crtc, i);
}
mixer_wait_for_vblank(ctx->crtc);
@@ -1070,14 +1012,14 @@ static void mixer_window_suspend(struct mixer_context *ctx)
static void mixer_window_resume(struct mixer_context *ctx)
{
- struct hdmi_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ plane->enabled = plane->resume;
+ plane->resume = false;
+ if (plane->enabled)
mixer_win_commit(ctx->crtc, i);
}
}
@@ -1189,7 +1131,6 @@ static struct exynos_drm_crtc_ops mixer_crtc_ops = {
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.wait_for_vblank = mixer_wait_for_vblank,
- .win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
};
@@ -1253,15 +1194,28 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
ret = mixer_initialize(ctx, drm_dev);
if (ret)
return ret;
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_HDMI,
- &mixer_crtc_ops, ctx);
+ for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) {
+ type = (zpos == MIXER_DEFAULT_WIN) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[MIXER_DEFAULT_WIN];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_HDMI,
+ &mixer_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
mixer_ctx_remove(ctx);
ret = PTR_ERR(ctx->crtc);
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 5f32e1a29411..ac60260c2389 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -101,7 +101,7 @@
#define MXR_CFG_GRP0_ENABLE (1 << 4)
#define MXR_CFG_VP_ENABLE (1 << 3)
#define MXR_CFG_SCAN_INTERLACE (0 << 2)
-#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
+#define MXR_CFG_SCAN_PROGRESSIVE (1 << 2)
#define MXR_CFG_SCAN_NTSC (0 << 1)
#define MXR_CFG_SCAN_PAL (1 << 1)
#define MXR_CFG_SCAN_SD (0 << 0)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 66727328832d..7d47b3d5cc0d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -823,7 +823,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
{
- struct drm_crtc_helper_funcs *crtc_funcs =
+ const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 4268bf210034..6b1d3340ba14 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -195,7 +195,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
encoder->crtc->x, encoder->crtc->y, encoder->crtc->primary->fb))
return -1;
} else {
- struct drm_encoder_helper_funcs *helpers
+ const struct drm_encoder_helper_funcs *helpers
= encoder->helper_private;
helpers->mode_set(encoder, &crtc->saved_mode,
&crtc->saved_adjusted_mode);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 0b770396548c..211069b2b951 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -505,7 +505,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
else
gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS") && encoder) {
- struct drm_encoder_helper_funcs *helpers =
+ const struct drm_encoder_helper_funcs *helpers =
encoder->helper_private;
helpers->dpms(encoder, value);
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 9bb9bddd881a..001b450b27b3 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -501,20 +501,20 @@ bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
void gma_crtc_prepare(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
void gma_crtc_commit(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
}
void gma_crtc_disable(struct drm_crtc *crtc)
{
struct gtt_range *gt;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -656,7 +656,7 @@ void gma_crtc_restore(struct drm_crtc *crtc)
void gma_encoder_prepare(struct drm_encoder *encoder)
{
- struct drm_encoder_helper_funcs *encoder_funcs =
+ const struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
/* lvds has its own version of prepare see psb_intel_lvds_prepare */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
@@ -664,7 +664,7 @@ void gma_encoder_prepare(struct drm_encoder *encoder)
void gma_encoder_commit(struct drm_encoder *encoder)
{
- struct drm_encoder_helper_funcs *encoder_funcs =
+ const struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
/* lvds has its own version of commit see psb_intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index abf2248da61e..89f705c3a5eb 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -290,7 +290,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
encoder->crtc->primary->fb))
goto set_prop_error;
} else {
- struct drm_encoder_helper_funcs *funcs =
+ const struct drm_encoder_helper_funcs *funcs =
encoder->helper_private;
funcs->mode_set(encoder,
&gma_crtc->saved_mode,
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 8cc8a5abbc7b..acd38344b302 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -849,7 +849,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
{
- struct drm_crtc_helper_funcs *crtc_funcs =
+ const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 2de216c2374f..1048f0c7c6ce 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -483,7 +483,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
{
- struct drm_crtc_helper_funcs *crtc_funcs =
+ const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 54f73f50571a..2310d879cdc2 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -347,7 +347,7 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index b21a09451d1d..6659da88fe5b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -108,7 +108,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 88aad95bde09..ce0645d0c1e5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -625,7 +625,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
else
gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS")) {
- struct drm_encoder_helper_funcs *hfuncs
+ const struct drm_encoder_helper_funcs *hfuncs
= encoder->helper_private;
hfuncs->dpms(encoder, value);
}
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index fa140e04d5fa..b728523e194f 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -27,12 +27,13 @@ struct adv7511 {
struct regmap *regmap;
struct regmap *packet_memory_regmap;
enum drm_connector_status status;
- int dpms_mode;
+ bool powered;
unsigned int f_tmds;
unsigned int current_edid_segment;
uint8_t edid_buf[256];
+ bool edid_read;
wait_queue_head_t wq;
struct drm_encoder *encoder;
@@ -357,6 +358,48 @@ static void adv7511_set_link_config(struct adv7511 *adv7511,
adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
}
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+ adv7511->current_edid_segment = -1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+ ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+
+ /*
+ * Per spec it is allowed to pulse the HDP signal to indicate that the
+ * EDID information has changed. Some monitors do this when they wakeup
+ * from standby or are enabled. When the HDP goes low the adv7511 is
+ * reset and the outputs are disabled which might cause the monitor to
+ * go to standby again. To avoid this we ignore the HDP pin for the
+ * first few seconds after enabling the output.
+ */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HDP_SRC_MASK,
+ ADV7511_REG_POWER2_HDP_SRC_NONE);
+
+ /*
+ * Most of the registers are reset during power down or when HPD is low.
+ */
+ regcache_sync(adv7511->regmap);
+
+ adv7511->powered = true;
+}
+
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+ /* TODO: setup additional power down modes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+ regcache_mark_dirty(adv7511->regmap);
+
+ adv7511->powered = false;
+}
+
/* -----------------------------------------------------------------------------
* Interrupt and hotplug detection
*/
@@ -379,69 +422,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
-static irqreturn_t adv7511_irq_handler(int irq, void *devid)
-{
- struct adv7511 *adv7511 = devid;
-
- if (adv7511_hpd(adv7511))
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
-
- wake_up_all(&adv7511->wq);
-
- return IRQ_HANDLED;
-}
-
-static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
- unsigned int irq)
+static int adv7511_irq_process(struct adv7511 *adv7511)
{
unsigned int irq0, irq1;
- unsigned int pending;
int ret;
ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
if (ret < 0)
- return 0;
+ return ret;
+
ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
if (ret < 0)
- return 0;
+ return ret;
- pending = (irq1 << 8) | irq0;
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- return pending & irq;
+ if (irq0 & ADV7511_INT0_HDP)
+ drm_helper_hpd_irq_event(adv7511->encoder->dev);
+
+ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
+ adv7511->edid_read = true;
+
+ if (adv7511->i2c_main->irq)
+ wake_up_all(&adv7511->wq);
+ }
+
+ return 0;
}
-static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
- int timeout)
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+ struct adv7511 *adv7511 = devid;
+ int ret;
+
+ ret = adv7511_irq_process(adv7511);
+ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
{
- unsigned int pending;
int ret;
if (adv7511->i2c_main->irq) {
ret = wait_event_interruptible_timeout(adv7511->wq,
- adv7511_is_interrupt_pending(adv7511, irq),
- msecs_to_jiffies(timeout));
- if (ret <= 0)
- return 0;
- pending = adv7511_is_interrupt_pending(adv7511, irq);
+ adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
- if (timeout < 25)
- timeout = 25;
- do {
- pending = adv7511_is_interrupt_pending(adv7511, irq);
- if (pending)
+ for (; timeout > 0; timeout -= 25) {
+ ret = adv7511_irq_process(adv7511);
+ if (ret < 0)
break;
+
+ if (adv7511->edid_read)
+ break;
+
msleep(25);
- timeout -= 25;
- } while (timeout >= 25);
+ }
}
- return pending;
+ return adv7511->edid_read ? 0 : -EIO;
}
-/* -----------------------------------------------------------------------------
- * EDID retrieval
- */
-
static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
size_t len)
{
@@ -463,19 +508,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
return ret;
if (status != 2) {
+ adv7511->edid_read = false;
regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
block);
- ret = adv7511_wait_for_interrupt(adv7511,
- ADV7511_INT0_EDID_READY |
- ADV7511_INT1_DDC_ERROR, 200);
-
- if (!(ret & ADV7511_INT0_EDID_READY))
- return -EIO;
+ ret = adv7511_wait_for_edid(adv7511, 200);
+ if (ret < 0)
+ return ret;
}
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
-
/* Break this apart, hopefully more I2C controllers will
* support 64 byte transfers than 256 byte transfers
*/
@@ -526,9 +566,11 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
unsigned int count;
/* Reading the EDID only works if the device is powered */
- if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
+ if (!adv7511->powered) {
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+ ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
adv7511->current_edid_segment = -1;
@@ -536,7 +578,7 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
- if (adv7511->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!adv7511->powered)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
@@ -558,41 +600,10 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- adv7511->current_edid_segment = -1;
-
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, 0);
- /*
- * Per spec it is allowed to pulse the HDP signal to indicate
- * that the EDID information has changed. Some monitors do this
- * when they wakeup from standby or are enabled. When the HDP
- * goes low the adv7511 is reset and the outputs are disabled
- * which might cause the monitor to go to standby again. To
- * avoid this we ignore the HDP pin for the first few seconds
- * after enabeling the output.
- */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HDP_SRC_MASK,
- ADV7511_REG_POWER2_HDP_SRC_NONE);
- /* Most of the registers are reset during power down or
- * when HPD is low
- */
- regcache_sync(adv7511->regmap);
- break;
- default:
- /* TODO: setup additional power down modes */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN,
- ADV7511_POWER_POWER_DOWN);
- regcache_mark_dirty(adv7511->regmap);
- break;
- }
-
- adv7511->dpms_mode = mode;
+ if (mode == DRM_MODE_DPMS_ON)
+ adv7511_power_on(adv7511);
+ else
+ adv7511_power_off(adv7511);
}
static enum drm_connector_status
@@ -620,10 +631,9 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
* there is a pending HPD interrupt and the cable is connected there was
* at least one transition from disconnected to connected and the chip
* has to be reinitialized. */
- if (status == connector_status_connected && hpd &&
- adv7511->dpms_mode == DRM_MODE_DPMS_ON) {
+ if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
- adv7511_encoder_dpms(encoder, adv7511->dpms_mode);
+ adv7511_power_on(adv7511);
adv7511_get_modes(encoder, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
@@ -858,7 +868,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511)
return -ENOMEM;
- adv7511->dpms_mode = DRM_MODE_DPMS_OFF;
+ adv7511->powered = false;
adv7511->status = connector_status_disconnected;
ret = adv7511_parse_dt(dev->of_node, &link_config);
@@ -918,10 +928,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN);
-
- adv7511->current_edid_segment = -1;
+ adv7511_power_off(adv7511);
i2c_set_clientdata(i2c, adv7511);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a9041d1a8ff0..5febffdb027d 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
#include <drm/i2c/tda998x.h>
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
@@ -387,7 +388,7 @@ set_page(struct tda998x_priv *priv, uint16_t reg)
};
int ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0) {
- dev_err(&client->dev, "setpage %04x err %d\n",
+ dev_err(&client->dev, "%s %04x err %d\n", __func__,
reg, ret);
return ret;
}
@@ -1035,8 +1036,9 @@ tda998x_encoder_detect(struct tda998x_priv *priv)
connector_status_disconnected;
}
-static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
+static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
{
+ struct tda998x_priv *priv = data;
uint8_t offset, segptr;
int ret, i;
@@ -1080,8 +1082,8 @@ static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
return -ETIMEDOUT;
}
- ret = reg_read_range(priv, REG_EDID_DATA_0, buf, EDID_LENGTH);
- if (ret != EDID_LENGTH) {
+ ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
+ if (ret != length) {
dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
blk, ret);
return ret;
@@ -1090,82 +1092,31 @@ static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
return 0;
}
-static uint8_t *do_get_edid(struct tda998x_priv *priv)
+static int
+tda998x_encoder_get_modes(struct tda998x_priv *priv,
+ struct drm_connector *connector)
{
- int j, valid_extensions = 0;
- uint8_t *block, *new;
- bool print_bad_edid = drm_debug & DRM_UT_KMS;
-
- if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
- return NULL;
+ struct edid *edid;
+ int n;
if (priv->rev == TDA19988)
reg_clear(priv, REG_TX4, TX4_PD_RAM);
- /* base block fetch */
- if (read_edid_block(priv, block, 0))
- goto fail;
-
- if (!drm_edid_block_valid(block, 0, print_bad_edid))
- goto fail;
-
- /* if there's no extensions, we're done */
- if (block[0x7e] == 0)
- goto done;
-
- new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
- if (!new)
- goto fail;
- block = new;
-
- for (j = 1; j <= block[0x7e]; j++) {
- uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
- if (read_edid_block(priv, ext_block, j))
- goto fail;
-
- if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
- goto fail;
-
- valid_extensions++;
- }
-
- if (valid_extensions != block[0x7e]) {
- block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
- block[0x7e] = valid_extensions;
- new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
- if (!new)
- goto fail;
- block = new;
- }
+ edid = drm_do_get_edid(connector, read_edid_block, priv);
-done:
if (priv->rev == TDA19988)
reg_set(priv, REG_TX4, TX4_PD_RAM);
- return block;
-
-fail:
- if (priv->rev == TDA19988)
- reg_set(priv, REG_TX4, TX4_PD_RAM);
- dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
- kfree(block);
- return NULL;
-}
-
-static int
-tda998x_encoder_get_modes(struct tda998x_priv *priv,
- struct drm_connector *connector)
-{
- struct edid *edid = (struct edid *)do_get_edid(priv);
- int n = 0;
-
- if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
- priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
- kfree(edid);
+ if (!edid) {
+ dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
+ return 0;
}
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ kfree(edid);
+
return n;
}
@@ -1547,6 +1498,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
struct i2c_client *client = to_i2c_client(dev);
struct drm_device *drm = data;
struct tda998x_priv2 *priv;
+ uint32_t crtcs = 0;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -1555,9 +1507,18 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
dev_set_drvdata(dev, priv);
+ if (dev->of_node)
+ crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /* If no CRTCs were found, fall back to our old behaviour */
+ if (crtcs == 0) {
+ dev_warn(dev, "Falling back to first CRTC\n");
+ crtcs = 1 << 0;
+ }
+
priv->base.encoder = &priv->encoder;
priv->connector.interlace_allowed = 1;
- priv->encoder.possible_crtcs = 1 << 0;
+ priv->encoder.possible_crtcs = crtcs;
ret = tda998x_create(client, &priv->base);
if (ret)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index f01922591679..a69002e2257d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,6 +28,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem.o \
+ i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
@@ -83,9 +84,11 @@ i915-y += dvo_ch7017.o \
intel_sdvo.o \
intel_tv.o
+# virtual gpu code
+i915-y += i915_vgpu.o
+
# legacy horrors
-i915-y += i915_dma.o \
- i915_ums.o
+i915-y += i915_dma.o
obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 806e812340d0..61ae8ff4eaed 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -818,23 +818,28 @@ static bool valid_reg(const u32 *table, int count, u32 addr)
return false;
}
-static u32 *vmap_batch(struct drm_i915_gem_object *obj)
+static u32 *vmap_batch(struct drm_i915_gem_object *obj,
+ unsigned start, unsigned len)
{
int i;
void *addr = NULL;
struct sg_page_iter sg_iter;
+ int first_page = start >> PAGE_SHIFT;
+ int last_page = (len + start + 4095) >> PAGE_SHIFT;
+ int npages = last_page - first_page;
struct page **pages;
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+ pages = drm_malloc_ab(npages, sizeof(*pages));
if (pages == NULL) {
DRM_DEBUG_DRIVER("Failed to get space for pages\n");
goto finish;
}
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- pages[i] = sg_page_iter_page(&sg_iter);
- i++;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
+ pages[i++] = sg_page_iter_page(&sg_iter);
+ if (i == npages)
+ break;
}
addr = vmap(pages, i, 0, PAGE_KERNEL);
@@ -855,61 +860,61 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
u32 batch_start_offset,
u32 batch_len)
{
- int ret = 0;
int needs_clflush = 0;
- u32 *src_base, *dest_base = NULL;
- u32 *src_addr, *dest_addr;
- u32 offset = batch_start_offset / sizeof(*dest_addr);
- u32 end = batch_start_offset + batch_len;
+ void *src_base, *src;
+ void *dst = NULL;
+ int ret;
- if (end > dest_obj->base.size || end > src_obj->base.size)
+ if (batch_len > dest_obj->base.size ||
+ batch_len + batch_start_offset > src_obj->base.size)
return ERR_PTR(-E2BIG);
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) {
- DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
return ERR_PTR(ret);
}
- src_base = vmap_batch(src_obj);
+ src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
if (!src_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
ret = -ENOMEM;
goto unpin_src;
}
- src_addr = src_base + offset;
-
- if (needs_clflush)
- drm_clflush_virt_range((char *)src_addr, batch_len);
+ ret = i915_gem_object_get_pages(dest_obj);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
+ goto unmap_src;
+ }
+ i915_gem_object_pin_pages(dest_obj);
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) {
- DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
+ DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
goto unmap_src;
}
- dest_base = vmap_batch(dest_obj);
- if (!dest_base) {
+ dst = vmap_batch(dest_obj, 0, batch_len);
+ if (!dst) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+ i915_gem_object_unpin_pages(dest_obj);
ret = -ENOMEM;
goto unmap_src;
}
- dest_addr = dest_base + offset;
-
- if (batch_start_offset != 0)
- memset((u8 *)dest_base, 0, batch_start_offset);
+ src = src_base + offset_in_page(batch_start_offset);
+ if (needs_clflush)
+ drm_clflush_virt_range(src, batch_len);
- memcpy(dest_addr, src_addr, batch_len);
- memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
+ memcpy(dst, src, batch_len);
unmap_src:
vunmap(src_base);
unpin_src:
i915_gem_object_unpin_pages(src_obj);
- return ret ? ERR_PTR(ret) : dest_base;
+ return ret ? ERR_PTR(ret) : dst;
}
/**
@@ -1046,34 +1051,26 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
u32 batch_len,
bool is_master)
{
- int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
-
- ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
- return -1;
- }
+ int ret = 0;
batch_base = copy_batch(shadow_batch_obj, batch_obj,
batch_start_offset, batch_len);
if (IS_ERR(batch_base)) {
DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
- i915_gem_object_ggtt_unpin(shadow_batch_obj);
return PTR_ERR(batch_base);
}
- cmd = batch_base + (batch_start_offset / sizeof(*cmd));
-
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
- batch_end = cmd + (batch_len / sizeof(*batch_end));
+ batch_end = batch_base + (batch_len / sizeof(*batch_end));
+ cmd = batch_base;
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
u32 length;
@@ -1132,7 +1129,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
}
vunmap(batch_base);
- i915_gem_object_ggtt_unpin(shadow_batch_obj);
+ i915_gem_object_unpin_pages(shadow_batch_obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e8b18e542da4..007c7d7d8295 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -139,10 +139,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (vma->pin_count > 0)
pin_count++;
- seq_printf(m, " (pinned x %d)", pin_count);
+ }
+ seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_display)
seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -580,7 +581,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank,
work->flip_ready_vblank,
- drm_vblank_count(dev, crtc->pipe));
+ drm_crtc_vblank_count(&crtc->base));
if (work->enable_stall_check)
seq_puts(m, "Stall check enabled, ");
else
@@ -1089,7 +1090,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
- IS_BROADWELL(dev)) {
+ IS_BROADWELL(dev) || IS_GEN9(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1108,11 +1109,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
- reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- reqf >>= 24;
- else
- reqf >>= 25;
+ if (IS_GEN9(dev))
+ reqf >>= 23;
+ else {
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ }
reqf = intel_gpu_freq(dev_priv, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
@@ -1126,7 +1131,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1152,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & 0xff00) >> 8);
+ (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -1177,19 +1184,25 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = rp_state_cap & 0xff;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+ seq_printf(m, "Idle freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts;
@@ -1204,6 +1217,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "min GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ seq_printf(m, "idle GPU freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
@@ -1778,11 +1794,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel,
+ fb->base.modifier[0],
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
@@ -1793,11 +1810,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
if (ifbdev && &fb->base == ifbdev->helper.fb)
continue;
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel,
+ fb->base.modifier[0],
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
@@ -1828,18 +1846,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- if (dev_priv->ips.pwrctx) {
- seq_puts(m, "power context ");
- describe_obj(m, dev_priv->ips.pwrctx);
- seq_putc(m, '\n');
- }
-
- if (dev_priv->ips.renderctx) {
- seq_puts(m, "render context ");
- describe_obj(m, dev_priv->ips.renderctx);
- seq_putc(m, '\n');
- }
-
list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (!i915.enable_execlists &&
ctx->legacy_hw_ctx.rcs_state == NULL)
@@ -2183,7 +2189,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
seq_puts(m, "aliasing PPGTT:\n");
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
ppgtt->debug_dump(ppgtt, m);
}
@@ -2243,6 +2249,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enum pipe pipe;
bool enabled = false;
+ if (!HAS_PSR(dev)) {
+ seq_puts(m, "PSR not supported\n");
+ return 0;
+ }
+
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->psr.lock);
@@ -2255,17 +2266,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_PSR(dev)) {
- if (HAS_DDI(dev))
- enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- else {
- for_each_pipe(dev_priv, pipe) {
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
- }
+ if (HAS_DDI(dev))
+ enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ else {
+ for_each_pipe(dev_priv, pipe) {
+ stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ enabled = true;
}
}
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
@@ -2282,7 +2291,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
yesno((bool)dev_priv->psr.link_standby));
/* CHV PSR has no kind of performance counter */
- if (HAS_PSR(dev) && HAS_DDI(dev)) {
+ if (HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
@@ -2305,8 +2314,7 @@ static int i915_sink_crc(struct seq_file *m, void *data)
u8 crc[6];
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.dpms != DRM_MODE_DPMS_ON)
continue;
@@ -2674,7 +2682,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
active = cursor_position(dev, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
- x, y, crtc->cursor_width, crtc->cursor_height,
+ x, y, crtc->base.cursor->state->crtc_w,
+ crtc->base.cursor->state->crtc_h,
crtc->cursor_addr, yesno(active));
}
@@ -2850,7 +2859,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
entry->start, entry->end,
@@ -2867,6 +2876,115 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
return 0;
}
+static void drrs_status_per_crtc(struct seq_file *m,
+ struct drm_device *dev, struct intel_crtc *intel_crtc)
+{
+ struct intel_encoder *intel_encoder;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_drrs *drrs = &dev_priv->drrs;
+ int vrefresh = 0;
+
+ for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
+ /* Encoder connected on this CRTC */
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ seq_puts(m, "eDP:\n");
+ break;
+ case INTEL_OUTPUT_DSI:
+ seq_puts(m, "DSI:\n");
+ break;
+ case INTEL_OUTPUT_HDMI:
+ seq_puts(m, "HDMI:\n");
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ seq_puts(m, "DP:\n");
+ break;
+ default:
+ seq_printf(m, "Other encoder (id=%d).\n",
+ intel_encoder->type);
+ return;
+ }
+ }
+
+ if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
+ seq_puts(m, "\tVBT: DRRS_type: Static");
+ else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
+ seq_puts(m, "\tVBT: DRRS_type: Seamless");
+ else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
+ seq_puts(m, "\tVBT: DRRS_type: None");
+ else
+ seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
+
+ seq_puts(m, "\n\n");
+
+ if (intel_crtc->config->has_drrs) {
+ struct intel_panel *panel;
+
+ mutex_lock(&drrs->mutex);
+ /* DRRS Supported */
+ seq_puts(m, "\tDRRS Supported: Yes\n");
+
+ /* disable_drrs() will make drrs->dp NULL */
+ if (!drrs->dp) {
+ seq_puts(m, "Idleness DRRS: Disabled");
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+
+ panel = &drrs->dp->attached_connector->panel;
+ seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
+ drrs->busy_frontbuffer_bits);
+
+ seq_puts(m, "\n\t\t");
+ if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
+ seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
+ vrefresh = panel->fixed_mode->vrefresh;
+ } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
+ seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
+ vrefresh = panel->downclock_mode->vrefresh;
+ } else {
+ seq_printf(m, "DRRS_State: Unknown(%d)\n",
+ drrs->refresh_rate_type);
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+ seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+
+ seq_puts(m, "\n\t\t");
+ mutex_unlock(&drrs->mutex);
+ } else {
+ /* DRRS not supported. Print the VBT parameter*/
+ seq_puts(m, "\tDRRS Supported : No");
+ }
+ seq_puts(m, "\n");
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct intel_crtc *intel_crtc;
+ int active_crtc_cnt = 0;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ drm_modeset_lock(&intel_crtc->base.mutex, NULL);
+
+ if (intel_crtc->active) {
+ active_crtc_cnt++;
+ seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
+
+ drrs_status_per_crtc(m, dev, intel_crtc);
+ }
+
+ drm_modeset_unlock(&intel_crtc->base.mutex);
+ }
+
+ if (!active_crtc_cnt)
+ seq_puts(m, "No active crtc found\n");
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -4189,7 +4307,7 @@ i915_max_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rp_state_cap, hw_max, hw_min;
+ u32 hw_max, hw_min;
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -4206,18 +4324,10 @@ i915_max_freq_set(void *data, u64 val)
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- if (IS_VALLEYVIEW(dev)) {
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
- } else {
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = dev_priv->rps.max_freq;
- hw_min = (rp_state_cap >> 16) & 0xff;
- }
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4226,10 +4336,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4267,7 +4374,7 @@ i915_min_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rp_state_cap, hw_max, hw_min;
+ u32 hw_max, hw_min;
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -4284,18 +4391,10 @@ i915_min_freq_set(void *data, u64 val)
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- if (IS_VALLEYVIEW(dev)) {
- val = intel_freq_opcode(dev_priv, val);
-
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
- } else {
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(dev_priv, val);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- hw_max = dev_priv->rps.max_freq;
- hw_min = (rp_state_cap >> 16) & 0xff;
- }
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4304,10 +4403,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4374,6 +4470,112 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
+static int i915_sseu_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
+
+ if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
+ return -ENODEV;
+
+ seq_puts(m, "SSEU Device Info\n");
+ seq_printf(m, " Available Slice Total: %u\n",
+ INTEL_INFO(dev)->slice_total);
+ seq_printf(m, " Available Subslice Total: %u\n",
+ INTEL_INFO(dev)->subslice_total);
+ seq_printf(m, " Available Subslice Per Slice: %u\n",
+ INTEL_INFO(dev)->subslice_per_slice);
+ seq_printf(m, " Available EU Total: %u\n",
+ INTEL_INFO(dev)->eu_total);
+ seq_printf(m, " Available EU Per Subslice: %u\n",
+ INTEL_INFO(dev)->eu_per_subslice);
+ seq_printf(m, " Has Slice Power Gating: %s\n",
+ yesno(INTEL_INFO(dev)->has_slice_pg));
+ seq_printf(m, " Has Subslice Power Gating: %s\n",
+ yesno(INTEL_INFO(dev)->has_subslice_pg));
+ seq_printf(m, " Has EU Power Gating: %s\n",
+ yesno(INTEL_INFO(dev)->has_eu_pg));
+
+ seq_puts(m, "SSEU Device Status\n");
+ if (IS_CHERRYVIEW(dev)) {
+ const int ss_max = 2;
+ int ss;
+ u32 sig1[ss_max], sig2[ss_max];
+
+ sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
+ sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
+ sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
+ sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (sig1[ss] & CHV_SS_PG_ENABLE)
+ /* skip disabled subslice */
+ continue;
+
+ s_tot = 1;
+ ss_per++;
+ eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+ ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+ eu_tot += eu_cnt;
+ eu_per = max(eu_per, eu_cnt);
+ }
+ ss_tot = ss_per;
+ } else if (IS_SKYLAKE(dev)) {
+ const int s_max = 3, ss_max = 4;
+ int s, ss;
+ u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
+
+ s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
+ s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
+ s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
+ eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
+ eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
+ eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
+ eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
+ eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
+ eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < s_max; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ s_tot++;
+ ss_per = INTEL_INFO(dev)->subslice_per_slice;
+ ss_tot += ss_per;
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
+ eu_mask[ss%2]);
+ eu_tot += eu_cnt;
+ eu_per = max(eu_per, eu_cnt);
+ }
+ }
+ }
+ seq_printf(m, " Enabled Slice Total: %u\n", s_tot);
+ seq_printf(m, " Enabled Subslice Total: %u\n", ss_tot);
+ seq_printf(m, " Enabled Subslice Per Slice: %u\n", ss_per);
+ seq_printf(m, " Enabled EU Total: %u\n", eu_tot);
+ seq_printf(m, " Enabled EU Per Subslice: %u\n", eu_per);
+
+ return 0;
+}
+
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
@@ -4487,6 +4689,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_ddb_info", i915_ddb_info, 0},
+ {"i915_sseu_status", i915_sseu_status, 0},
+ {"i915_drrs_status", i915_drrs_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1a46787129e7..68e0c85a17cf 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -36,6 +36,7 @@
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include <linux/pci.h>
#include <linux/console.h>
@@ -67,6 +68,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CHIPSET_ID:
value = dev->pdev->device;
break;
+ case I915_PARAM_REVISION:
+ value = dev->pdev->revision;
+ break;
case I915_PARAM_HAS_GEM:
value = 1;
break;
@@ -149,6 +153,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_MMAP_VERSION:
value = 1;
break;
+ case I915_PARAM_SUBSLICE_TOTAL:
+ value = INTEL_INFO(dev)->subslice_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_EU_TOTAL:
+ value = INTEL_INFO(dev)->eu_total;
+ if (!value)
+ return -ENODEV;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -605,16 +619,128 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
}
}
+ /* Initialize slice/subslice/EU info */
if (IS_CHERRYVIEW(dev)) {
- u32 fuse, mask_eu;
+ u32 fuse, eu_dis;
fuse = I915_READ(CHV_FUSE_GT);
- mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
- CHV_FGT_EU_DIS_SS0_R1_MASK |
- CHV_FGT_EU_DIS_SS1_R0_MASK |
- CHV_FGT_EU_DIS_SS1_R1_MASK);
- info->eu_total = 16 - hweight32(mask_eu);
+
+ info->slice_total = 1;
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ info->subslice_total = info->subslice_per_slice;
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ info->eu_total / info->subslice_total :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = 0;
+ info->has_subslice_pg = (info->subslice_total > 1);
+ info->has_eu_pg = (info->eu_per_subslice > 2);
+ } else if (IS_SKYLAKE(dev)) {
+ const int s_max = 3, ss_max = 4, eu_max = 8;
+ int s, ss;
+ u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+ fuse2 = I915_READ(GEN8_FUSE2);
+ s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
+ GEN8_F2_S_ENA_SHIFT;
+ ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT;
+
+ eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
+ eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
+ eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
+
+ info->slice_total = hweight32(s_enable);
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ info->subslice_per_slice = ss_max - hweight32(ss_disable);
+ info->subslice_total = info->slice_total *
+ info->subslice_per_slice;
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < s_max; s++) {
+ if (!(s_enable & (0x1 << s)))
+ /* skip disabled slice */
+ continue;
+
+ for (ss = 0; ss < ss_max; ss++) {
+ u32 n_disabled;
+
+ if (ss_disable & (0x1 << ss))
+ /* skip disabled subslice */
+ continue;
+
+ n_disabled = hweight8(eu_disable[s] >>
+ (ss * eu_max));
+
+ /*
+ * Record which subslice(s) has(have) 7 EUs. we
+ * can tune the hash used to spread work among
+ * subslices if they are unbalanced.
+ */
+ if (eu_max - n_disabled == 7)
+ info->subslice_7eu[s] |= 1 << ss;
+
+ info->eu_total += eu_max - n_disabled;
+ }
+ }
+
+ /*
+ * SKL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ DIV_ROUND_UP(info->eu_total,
+ info->subslice_total) : 0;
+ /*
+ * SKL supports slice power gating on devices with more than
+ * one slice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
+ info->has_subslice_pg = 0;
+ info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
}
+ DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
+ DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
+ DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
+ DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
+ DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+ DRM_DEBUG_DRIVER("has slice power gating: %s\n",
+ info->has_slice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
+ info->has_subslice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has EU power gating: %s\n",
+ info->has_eu_pg ? "y" : "n");
}
/**
@@ -637,17 +763,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
info = (struct intel_device_info *) flags;
- /* Refuse to load on gen6+ without kms enabled. */
- if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
- DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
- DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
- return -ENODEV;
- }
-
- /* UMS needs agp support. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
- return -EINVAL;
-
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
@@ -717,20 +832,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_regs;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over. */
- ret = i915_kick_out_firmware_fb(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
- goto out_gtt;
- }
+ /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over. */
+ ret = i915_kick_out_firmware_fb(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ goto out_gtt;
+ }
- ret = i915_kick_out_vgacon(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting VGA console\n");
- goto out_gtt;
- }
+ ret = i915_kick_out_vgacon(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting VGA console\n");
+ goto out_gtt;
}
pci_set_master(dev->pdev);
@@ -834,14 +947,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_power_domains_init(dev_priv);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev);
- if (ret < 0) {
- DRM_ERROR("failed to init modeset\n");
- goto out_power_well;
- }
+ ret = i915_load_modeset_init(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to init modeset\n");
+ goto out_power_well;
}
+ /*
+ * Notify a valid surface after modesetting,
+ * when running inside a VM.
+ */
+ if (intel_vgpu_active(dev))
+ I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+
i915_setup_sysfs(dev);
if (INTEL_INFO(dev)->num_pipes) {
@@ -921,28 +1039,25 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister();
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_fbdev_fini(dev);
+ intel_fbdev_fini(dev);
drm_vblank_cleanup(dev);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_modeset_cleanup(dev);
-
- /*
- * free the memory space allocated for the child device
- * config parsed from VBT
- */
- if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
- }
+ intel_modeset_cleanup(dev);
- vga_switcheroo_unregister_client(dev->pdev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
}
+ vga_switcheroo_unregister_client(dev->pdev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
@@ -952,17 +1067,15 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_fini(dev);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Flush any outstanding unpin_work. */
- flush_workqueue(dev_priv->wq);
+ /* Flush any outstanding unpin_work. */
+ flush_workqueue(dev_priv->wq);
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_ringbuffer(dev);
- i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
- i915_gem_cleanup_stolen(dev);
- }
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+ i915_gem_cleanup_stolen(dev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@@ -1023,8 +1136,7 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_modeset_preclose(dev, file);
+ intel_modeset_preclose(dev, file);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -1087,7 +1199,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c66b568bb81..a19d2c71e205 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -346,7 +346,6 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
};
static const struct intel_device_info intel_cherryview_info = {
- .is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -369,6 +368,19 @@ static const struct intel_device_info intel_skylake_info = {
IVB_CURSOR_OFFSETS,
};
+static const struct intel_device_info intel_skylake_gt3_info = {
+ .is_preliminary = 1,
+ .is_skylake = 1,
+ .gen = 9, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -406,7 +418,9 @@ static const struct intel_device_info intel_skylake_info = {
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
INTEL_CHV_IDS(&intel_cherryview_info), \
- INTEL_SKL_IDS(&intel_skylake_info)
+ INTEL_SKL_GT1_IDS(&intel_skylake_info), \
+ INTEL_SKL_GT2_IDS(&intel_skylake_info), \
+ INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -553,6 +567,7 @@ static int i915_drm_suspend(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
pci_power_t opregion_target_state;
+ int error;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
@@ -567,37 +582,32 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(dev->pdev);
- /* If KMS is active, we do the leavevt stuff here */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int error;
-
- error = i915_gem_suspend(dev);
- if (error) {
- dev_err(&dev->pdev->dev,
- "GEM idle failed, resume might fail\n");
- return error;
- }
+ error = i915_gem_suspend(dev);
+ if (error) {
+ dev_err(&dev->pdev->dev,
+ "GEM idle failed, resume might fail\n");
+ return error;
+ }
- intel_suspend_gt_powersave(dev);
+ intel_suspend_gt_powersave(dev);
- /*
- * Disable CRTCs directly since we want to preserve sw state
- * for _thaw. Also, power gate the CRTC power wells.
- */
- drm_modeset_lock_all(dev);
- for_each_crtc(dev, crtc)
- intel_crtc_control(crtc, false);
- drm_modeset_unlock_all(dev);
+ /*
+ * Disable CRTCs directly since we want to preserve sw state
+ * for _thaw. Also, power gate the CRTC power wells.
+ */
+ drm_modeset_lock_all(dev);
+ for_each_crtc(dev, crtc)
+ intel_crtc_control(crtc, false);
+ drm_modeset_unlock_all(dev);
- intel_dp_mst_suspend(dev);
+ intel_dp_mst_suspend(dev);
- intel_runtime_pm_disable_interrupts(dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_hpd_cancel_work(dev_priv);
- intel_suspend_encoders(dev_priv);
+ intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev);
- }
+ intel_suspend_hw(dev);
i915_gem_suspend_gtt_mappings(dev);
@@ -679,53 +689,55 @@ static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
i915_restore_state(dev);
intel_opregion_setup(dev);
- /* KMS EnterVT equivalent */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_init_pch_refclk(dev);
- drm_mode_config_reset(dev);
+ intel_init_pch_refclk(dev);
+ drm_mode_config_reset(dev);
- mutex_lock(&dev->struct_mutex);
- if (i915_gem_init_hw(dev)) {
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
- }
- mutex_unlock(&dev->struct_mutex);
+ /*
+ * Interrupts have to be enabled before any batches are run. If not the
+ * GPU will hang. i915_gem_init_hw() will initiate batches to
+ * update/restore the context.
+ *
+ * Modeset enabling in intel_modeset_init_hw() also needs working
+ * interrupts.
+ */
+ intel_runtime_pm_enable_interrupts(dev_priv);
- /* We need working interrupts for modeset enabling ... */
- intel_runtime_pm_enable_interrupts(dev_priv);
+ mutex_lock(&dev->struct_mutex);
+ if (i915_gem_init_hw(dev)) {
+ DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ }
+ mutex_unlock(&dev->struct_mutex);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
- drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, true);
- drm_modeset_unlock_all(dev);
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
- intel_dp_mst_resume(dev);
+ intel_dp_mst_resume(dev);
- /*
- * ... but also need to make sure that hotplug processing
- * doesn't cause havoc. Like in the driver load code we don't
- * bother with the tiny race here where we might loose hotplug
- * notifications.
- * */
- intel_hpd_init(dev_priv);
- /* Config may have changed between suspend and resume */
- drm_helper_hpd_irq_event(dev);
- }
+ /*
+ * ... but also need to make sure that hotplug processing
+ * doesn't cause havoc. Like in the driver load code we don't
+ * bother with the tiny race here where we might loose hotplug
+ * notifications.
+ * */
+ intel_hpd_init(dev_priv);
+ /* Config may have changed between suspend and resume */
+ drm_helper_hpd_irq_event(dev);
intel_opregion_init(dev);
@@ -861,38 +873,29 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- dev_priv->gpu_error.reload_in_reset = true;
- ret = i915_gem_init_hw(dev);
+ /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
+ dev_priv->gpu_error.reload_in_reset = true;
- dev_priv->gpu_error.reload_in_reset = false;
+ ret = i915_gem_init_hw(dev);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- DRM_ERROR("Failed hw init on reset %d\n", ret);
- return ret;
- }
-
- /*
- * FIXME: This races pretty badly against concurrent holders of
- * ring interrupts. This is possible since we've started to drop
- * dev->struct_mutex in select places when waiting for the gpu.
- */
+ dev_priv->gpu_error.reload_in_reset = false;
- /*
- * rps/rc6 re-init is necessary to restore state lost after the
- * reset and the re-install of gt irqs. Skip for ironlake per
- * previous concerns that it doesn't respond well to some forms
- * of re-init after reset.
- */
- if (INTEL_INFO(dev)->gen > 5)
- intel_enable_gt_powersave(dev);
- } else {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ DRM_ERROR("Failed hw init on reset %d\n", ret);
+ return ret;
}
+ /*
+ * rps/rc6 re-init is necessary to restore state lost after the
+ * reset and the re-install of gt irqs. Skip for ironlake per
+ * previous concerns that it doesn't respond well to some forms
+ * of re-init after reset.
+ */
+ if (INTEL_INFO(dev)->gen > 5)
+ intel_enable_gt_powersave(dev);
+
return 0;
}
@@ -1042,7 +1045,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
- s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+ s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
s->ecochk = I915_READ(GAM_ECOCHK);
@@ -1124,7 +1127,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
+ I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
I915_WRITE(GAM_ECOCHK, s->ecochk);
@@ -1640,11 +1643,9 @@ static int __init i915_init(void)
if (!(driver.driver_features & DRIVER_MODESET)) {
driver.get_vblank_timestamp = NULL;
-#ifndef CONFIG_DRM_I915_UMS
/* Silently fail loading to not upset userspace. */
DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
return 0;
-#endif
}
/*
@@ -1660,10 +1661,8 @@ static int __init i915_init(void)
static void __exit i915_exit(void)
{
-#ifndef CONFIG_DRM_I915_UMS
if (!(driver.driver_features & DRIVER_MODESET))
return; /* Never loaded a driver. */
-#endif
drm_pci_exit(&driver, &i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b4faa2df9d3d..8ae6f7f06b3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,6 +31,7 @@
#define _I915_DRV_H_
#include <uapi/drm/i915_drm.h>
+#include <uapi/drm/drm_fourcc.h>
#include "i915_reg.h"
#include "intel_bios.h"
@@ -55,7 +56,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150130"
+#define DRIVER_DATE "20150327"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -69,6 +70,9 @@
#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
#endif
+#undef WARN_ON_ONCE
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")
+
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
@@ -222,9 +226,14 @@ enum hpd_pin {
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_plane(pipe, p) \
- for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
-#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
+#define for_each_plane(__dev_priv, __pipe, __p) \
+ for ((__p) = 0; \
+ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p)++)
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -237,6 +246,12 @@ enum hpd_pin {
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_connector(dev, intel_connector) \
+ list_for_each_entry(intel_connector, \
+ &dev->mode_config.connector_list, \
+ base.head)
+
+
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
@@ -412,6 +427,8 @@ struct drm_i915_error_state {
u32 forcewake;
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
+ u32 fault_data0; /* gen8, gen9 */
+ u32 fault_data1; /* gen8, gen9 */
u32 done_reg;
u32 gac_eco;
u32 gam_ecochk;
@@ -529,7 +546,7 @@ struct drm_i915_display_funcs {
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
- struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
@@ -538,7 +555,7 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
- void (*modeset_global_resources)(struct drm_device *dev);
+ void (*modeset_global_resources)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -692,7 +709,18 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
- unsigned int eu_total;
+
+ /* Slice/subslice/EU info */
+ u8 slice_total;
+ u8 subslice_total;
+ u8 subslice_per_slice;
+ u8 eu_total;
+ u8 eu_per_subslice;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
};
#undef DEFINE_FLAG
@@ -771,11 +799,20 @@ struct intel_context {
struct list_head link;
};
+enum fb_op_origin {
+ ORIGIN_GTT,
+ ORIGIN_CPU,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+};
+
struct i915_fbc {
- unsigned long size;
+ unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
- enum plane plane;
+ unsigned int possible_framebuffer_bits;
+ unsigned int busy_bits;
+ struct intel_crtc *crtc;
int y;
struct drm_mm_node compressed_fb;
@@ -787,14 +824,6 @@ struct i915_fbc {
* possible. */
bool enabled;
- /* On gen8 some rings cannont perform fbc clean operation so for now
- * we are doing this on SW with mmio.
- * This variable works in the opposite information direction
- * of ring->fbc_dirty telling software on frontbuffer tracking
- * to perform the cache clean on sw side.
- */
- bool need_sw_cache_clean;
-
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
@@ -888,150 +917,21 @@ struct intel_gmbus {
};
struct i915_suspend_saved_registers {
- u8 saveLBB;
- u32 saveDSPACNTR;
- u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 savePIPEACONF;
- u32 savePIPEBCONF;
- u32 savePIPEASRC;
- u32 savePIPEBSRC;
- u32 saveFPA0;
- u32 saveFPA1;
- u32 saveDPLL_A;
- u32 saveDPLL_A_MD;
- u32 saveHTOTAL_A;
- u32 saveHBLANK_A;
- u32 saveHSYNC_A;
- u32 saveVTOTAL_A;
- u32 saveVBLANK_A;
- u32 saveVSYNC_A;
- u32 saveBCLRPAT_A;
- u32 saveTRANSACONF;
- u32 saveTRANS_HTOTAL_A;
- u32 saveTRANS_HBLANK_A;
- u32 saveTRANS_HSYNC_A;
- u32 saveTRANS_VTOTAL_A;
- u32 saveTRANS_VBLANK_A;
- u32 saveTRANS_VSYNC_A;
- u32 savePIPEASTAT;
- u32 saveDSPASTRIDE;
- u32 saveDSPASIZE;
- u32 saveDSPAPOS;
- u32 saveDSPAADDR;
- u32 saveDSPASURF;
- u32 saveDSPATILEOFF;
- u32 savePFIT_PGM_RATIOS;
- u32 saveBLC_HIST_CTL;
- u32 saveBLC_PWM_CTL;
- u32 saveBLC_PWM_CTL2;
- u32 saveBLC_CPU_PWM_CTL;
- u32 saveBLC_CPU_PWM_CTL2;
- u32 saveFPB0;
- u32 saveFPB1;
- u32 saveDPLL_B;
- u32 saveDPLL_B_MD;
- u32 saveHTOTAL_B;
- u32 saveHBLANK_B;
- u32 saveHSYNC_B;
- u32 saveVTOTAL_B;
- u32 saveVBLANK_B;
- u32 saveVSYNC_B;
- u32 saveBCLRPAT_B;
- u32 saveTRANSBCONF;
- u32 saveTRANS_HTOTAL_B;
- u32 saveTRANS_HBLANK_B;
- u32 saveTRANS_HSYNC_B;
- u32 saveTRANS_VTOTAL_B;
- u32 saveTRANS_VBLANK_B;
- u32 saveTRANS_VSYNC_B;
- u32 savePIPEBSTAT;
- u32 saveDSPBSTRIDE;
- u32 saveDSPBSIZE;
- u32 saveDSPBPOS;
- u32 saveDSPBADDR;
- u32 saveDSPBSURF;
- u32 saveDSPBTILEOFF;
- u32 saveVGA0;
- u32 saveVGA1;
- u32 saveVGA_PD;
- u32 saveVGACNTRL;
- u32 saveADPA;
u32 saveLVDS;
u32 savePP_ON_DELAYS;
u32 savePP_OFF_DELAYS;
- u32 saveDVOA;
- u32 saveDVOB;
- u32 saveDVOC;
u32 savePP_ON;
u32 savePP_OFF;
u32 savePP_CONTROL;
u32 savePP_DIVISOR;
- u32 savePFIT_CONTROL;
- u32 save_palette_a[256];
- u32 save_palette_b[256];
u32 saveFBC_CONTROL;
- u32 saveIER;
- u32 saveIIR;
- u32 saveIMR;
- u32 saveDEIER;
- u32 saveDEIMR;
- u32 saveGTIER;
- u32 saveGTIMR;
- u32 saveFDI_RXA_IMR;
- u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0;
u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF2[3];
- u8 saveMSR;
- u8 saveSR[8];
- u8 saveGR[25];
- u8 saveAR_INDEX;
- u8 saveAR[21];
- u8 saveDACMASK;
- u8 saveCR[37];
uint64_t saveFENCE[I915_MAX_NUM_FENCES];
- u32 saveCURACNTR;
- u32 saveCURAPOS;
- u32 saveCURABASE;
- u32 saveCURBCNTR;
- u32 saveCURBPOS;
- u32 saveCURBBASE;
- u32 saveCURSIZE;
- u32 saveDP_B;
- u32 saveDP_C;
- u32 saveDP_D;
- u32 savePIPEA_GMCH_DATA_M;
- u32 savePIPEB_GMCH_DATA_M;
- u32 savePIPEA_GMCH_DATA_N;
- u32 savePIPEB_GMCH_DATA_N;
- u32 savePIPEA_DP_LINK_M;
- u32 savePIPEB_DP_LINK_M;
- u32 savePIPEA_DP_LINK_N;
- u32 savePIPEB_DP_LINK_N;
- u32 saveFDI_RXA_CTL;
- u32 saveFDI_TXA_CTL;
- u32 saveFDI_RXB_CTL;
- u32 saveFDI_TXB_CTL;
- u32 savePFA_CTL_1;
- u32 savePFB_CTL_1;
- u32 savePFA_WIN_SZ;
- u32 savePFB_WIN_SZ;
- u32 savePFA_WIN_POS;
- u32 savePFB_WIN_POS;
- u32 savePCH_DREF_CONTROL;
- u32 saveDISP_ARB_CTL;
- u32 savePIPEA_DATA_M1;
- u32 savePIPEA_DATA_N1;
- u32 savePIPEA_LINK_M1;
- u32 savePIPEA_LINK_N1;
- u32 savePIPEB_DATA_M1;
- u32 savePIPEB_DATA_N1;
- u32 savePIPEB_LINK_M1;
- u32 savePIPEB_LINK_N1;
- u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@@ -1128,13 +1028,12 @@ struct intel_gen6_power_mgmt {
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 idle_freq; /* Frequency to request when we are idle */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
u32 cz_freq;
- u32 ei_interrupt_count;
-
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -1171,9 +1070,6 @@ struct intel_ilk_power_mgmt {
int c_m;
int r_t;
-
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
};
struct drm_i915_private;
@@ -1455,6 +1351,7 @@ struct intel_vbt_data {
bool edp_initialized;
bool edp_support;
int edp_bpp;
+ bool edp_low_vswing;
struct edp_power_seq edp_pps;
struct {
@@ -1515,6 +1412,25 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
+struct vlv_wm_values {
+ struct {
+ uint16_t primary;
+ uint16_t sprite[2];
+ uint8_t cursor;
+ } pipe[3];
+
+ struct {
+ uint16_t plane;
+ uint8_t cursor;
+ } sr;
+
+ struct {
+ uint8_t cursor;
+ uint8_t sprite[2];
+ uint8_t primary;
+ } ddl[3];
+};
+
struct skl_ddb_entry {
uint16_t start, end; /* in number of blocks, 'end' is exclusive */
};
@@ -1641,6 +1557,10 @@ struct i915_workarounds {
u32 count;
};
+struct i915_virtual_gpu {
+ bool active;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1653,6 +1573,8 @@ struct drm_i915_private {
struct intel_uncore uncore;
+ struct i915_virtual_gpu vgpu;
+
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
@@ -1871,6 +1793,7 @@ struct drm_i915_private {
union {
struct ilk_wm_values hw;
struct skl_wm_values skl_hw;
+ struct vlv_wm_values vlv;
};
} wm;
@@ -2142,7 +2065,7 @@ struct drm_i915_gem_request {
u32 tail;
/**
- * Context related to this request
+ * Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
* context, we must increment the context's refcount, to guarantee that
* it persists while any request is linked to it. Requests themselves
@@ -2152,6 +2075,7 @@ struct drm_i915_gem_request {
* context.
*/
struct intel_context *ctx;
+ struct intel_ringbuffer *ringbuf;
/** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj;
@@ -2166,6 +2090,9 @@ struct drm_i915_gem_request {
/** file_priv list entry for this request */
struct list_head client_list;
+ /** process identifier submitting this request */
+ struct pid *pid;
+
uint32_t uniq;
/**
@@ -2352,6 +2279,7 @@ struct drm_i915_cmd_table {
})
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
@@ -2374,9 +2302,6 @@ struct drm_i915_cmd_table {
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
INTEL_DEVID(dev) == 0x0152 || \
INTEL_DEVID(dev) == 0x015a)
-#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
- INTEL_DEVID(dev) == 0x0106 || \
- INTEL_DEVID(dev) == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
@@ -2400,6 +2325,12 @@ struct drm_i915_cmd_table {
INTEL_DEVID(dev) == 0x0A1E)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+#define SKL_REVID_A0 (0x0)
+#define SKL_REVID_B0 (0x1)
+#define SKL_REVID_C0 (0x2)
+#define SKL_REVID_D0 (0x3)
+#define SKL_REVID_E0 (0x4)
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2499,6 +2430,7 @@ struct drm_i915_cmd_table {
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define GT_FREQUENCY_MULTIPLIER 50
+#define GEN9_FREQ_SCALER 3
#include "i915_trace.h"
@@ -2507,14 +2439,11 @@ extern int i915_max_ioctl;
extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
extern int i915_resume_legacy(struct drm_device *dev);
-extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
-extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_params.c */
struct i915_params {
int modeset;
int panel_ignore_lid;
- unsigned int powersave;
int semaphores;
unsigned int lvds_downclock;
int lvds_channel_mode;
@@ -2534,11 +2463,12 @@ struct i915_params {
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
+ bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
int use_mmio_flip;
- bool mmio_debug;
+ int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
};
@@ -2591,6 +2521,10 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+static inline bool intel_vgpu_active(struct drm_device *dev)
+{
+ return to_i915(dev)->vgpu.active;
+}
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2669,12 +2603,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
-unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
- long target,
- unsigned flags);
-#define I915_SHRINK_PURGEABLE 0x1
-#define I915_SHRINK_UNBOUND 0x2
-#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2691,20 +2619,16 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
-int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view);
-static inline
-int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags)
-{
- return i915_gem_object_pin_view(obj, vm, alignment, flags,
- &i915_ggtt_view_normal);
-}
+int __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags);
+int __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ uint32_t alignment,
+ uint64_t flags);
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
@@ -2844,8 +2768,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
+ struct intel_engine_cs *pipelined,
+ const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
@@ -2868,60 +2794,46 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view);
-static inline
-unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view);
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
- return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
+ return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
}
+
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view);
-static inline
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
-}
+ struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-static inline
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
-}
-
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
-static inline
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
- &i915_ggtt_view_normal);
-}
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
-static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
- struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->pin_count > 0)
- return true;
- return false;
+static inline struct i915_vma *
+i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
}
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
@@ -2944,13 +2856,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
-}
-
-static inline unsigned long
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
-{
- return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
+ return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
}
static inline unsigned long
@@ -2974,7 +2880,13 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
}
-void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
+void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
+static inline void
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+}
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -3046,6 +2958,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size);
+/* i915_gem_shrinker.c */
+unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target,
+ unsigned flags);
+#define I915_SHRINK_PURGEABLE 0x1
+#define I915_SHRINK_UNBOUND 0x2
+#define I915_SHRINK_BOUND 0x4
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+
+
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
@@ -3121,10 +3044,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
-/* i915_ums.c */
-void i915_save_display_reg(struct drm_device *dev);
-void i915_restore_display_reg(struct drm_device *dev);
-
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
void i915_teardown_sysfs(struct drm_device *dev_priv);
@@ -3196,8 +3115,7 @@ extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
-extern void gen6_set_rps(struct drm_device *dev, u8 val);
-extern void valleyview_set_rps(struct drm_device *dev, u8 val);
+extern void intel_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev);
@@ -3210,8 +3128,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-void intel_notify_mmio_flip(struct intel_engine_cs *ring);
-
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 27ea6bdebce7..53394f998a1f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1,5 +1,5 @@
/*
- * Copyright © 2008 Intel Corporation
+ * Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,9 +29,9 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include <linux/oom.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -52,15 +52,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
- struct shrink_control *sc);
-static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
- struct shrink_control *sc);
-static int i915_gem_shrinker_oom(struct notifier_block *nb,
- unsigned long event,
- void *ptr);
-static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
@@ -350,7 +341,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
- int ret;
+ int ret = 0;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
@@ -359,6 +350,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -369,13 +361,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
unwritten = copy_from_user(vaddr, user_data, args->size);
mutex_lock(&dev->struct_mutex);
- if (unwritten)
- return -EFAULT;
+ if (unwritten) {
+ ret = -EFAULT;
+ goto out;
+ }
}
drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev);
- return 0;
+
+out:
+ intel_fb_obj_flush(obj, false);
+ return ret;
}
void *i915_gem_object_alloc(struct drm_device *dev)
@@ -809,6 +806,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+
while (remain > 0) {
/* Operation in this page
*
@@ -829,7 +828,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
- goto out_unpin;
+ goto out_flush;
}
remain -= page_length;
@@ -837,6 +836,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset += page_length;
}
+out_flush:
+ intel_fb_obj_flush(obj, false);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out:
@@ -951,6 +952,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+
i915_gem_object_pin_pages(obj);
offset = args->offset;
@@ -1029,6 +1032,7 @@ out:
if (needs_clflush_after)
i915_gem_chipset_flush(dev);
+ intel_fb_obj_flush(obj, false);
return ret;
}
@@ -1922,12 +1926,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
- return obj->madv == I915_MADV_DONTNEED;
-}
-
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
@@ -2033,85 +2031,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
-unsigned long
-i915_gem_shrink(struct drm_i915_private *dev_priv,
- long target, unsigned flags)
-{
- const struct {
- struct list_head *list;
- unsigned int bit;
- } phases[] = {
- { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
- { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
- { NULL, 0 },
- }, *phase;
- unsigned long count = 0;
-
- /*
- * As we may completely rewrite the (un)bound list whilst unbinding
- * (due to retiring requests) we have to strictly process only
- * one element of the list at the time, and recheck the list
- * on every iteration.
- *
- * In particular, we must hold a reference whilst removing the
- * object as we may end up waiting for and/or retiring the objects.
- * This might release the final reference (held by the active list)
- * and result in the object being freed from under us. This is
- * similar to the precautions the eviction code must take whilst
- * removing objects.
- *
- * Also note that although these lists do not hold a reference to
- * the object we can safely grab one here: The final object
- * unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
- * object on the bound_list with a reference count equals 0.
- */
- for (phase = phases; phase->list; phase++) {
- struct list_head still_in_list;
-
- if ((flags & phase->bit) == 0)
- continue;
-
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(phase->list)) {
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma, *v;
-
- obj = list_first_entry(phase->list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
-
- if (flags & I915_SHRINK_PURGEABLE &&
- !i915_gem_object_is_purgeable(obj))
- continue;
-
- drm_gem_object_reference(&obj->base);
-
- /* For the unbound phase, this should be a no-op! */
- list_for_each_entry_safe(vma, v,
- &obj->vma_list, vma_link)
- if (i915_vma_unbind(vma))
- break;
-
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- drm_gem_object_unreference(&obj->base);
- }
- list_splice(&still_in_list, phase->list);
- }
-
- return count;
-}
-
-static unsigned long
-i915_gem_shrink_all(struct drm_i915_private *dev_priv)
-{
- i915_gem_evict_everything(dev_priv->dev);
- return i915_gem_shrink(dev_priv, LONG_MAX,
- I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
-}
-
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
@@ -2458,10 +2377,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
ret = ring->add_request(ring);
if (ret)
return ret;
+
+ request->tail = intel_ring_get_tail(ringbuf);
}
request->head = request_start;
- request->tail = intel_ring_get_tail(ringbuf);
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
@@ -2492,6 +2412,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
list_add_tail(&request->client_list,
&file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
+
+ request->pid = get_pid(task_pid(current));
}
trace_i915_gem_request_add(request);
@@ -2572,6 +2494,8 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
list_del(&request->list);
i915_gem_request_remove_from_client(request);
+ put_pid(request->pid);
+
i915_gem_request_unreference(request);
}
@@ -2744,7 +2668,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
*/
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
- struct intel_ringbuffer *ringbuf;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
@@ -2755,23 +2678,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
trace_i915_gem_request_retire(request);
- /* This is one of the few common intersection points
- * between legacy ringbuffer submission and execlists:
- * we need to tell them apart in order to find the correct
- * ringbuffer to which the request belongs to.
- */
- if (i915.enable_execlists) {
- struct intel_context *ctx = request->ctx;
- ringbuf = ctx->engine[ring->id].ringbuf;
- } else
- ringbuf = ring->buffer;
-
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*/
- ringbuf->last_retired_head = request->postfix;
+ request->ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request);
}
@@ -3516,9 +3428,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
unsigned alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+ uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3530,6 +3442,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
+ if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return ERR_PTR(-EINVAL);
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
@@ -3568,7 +3483,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
+ vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
+ i915_gem_obj_lookup_or_create_vma(obj, vm);
+
if (IS_ERR(vma))
goto err_unpin;
@@ -3598,6 +3515,17 @@ search_free:
if (ret)
goto err_remove_node;
+ /* allocate before insert / bind */
+ if (vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
+ VM_TO_TRACE_NAME(vma->vm));
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ goto err_remove_node;
+ }
+
trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level,
flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
@@ -3768,7 +3696,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -3950,7 +3878,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined)
+ struct intel_engine_cs *pipelined,
+ const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
bool was_pin_display;
@@ -3986,7 +3915,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
+ ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+ view->type == I915_GGTT_VIEW_NORMAL ?
+ PIN_MAPPABLE : 0);
if (ret)
goto err_unpin_display;
@@ -4014,9 +3945,11 @@ err_unpin_display:
}
void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- i915_gem_object_ggtt_unpin(obj);
+ i915_gem_object_ggtt_unpin_view(obj, view);
+
obj->pin_display = is_pin_display(obj);
}
@@ -4083,7 +4016,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -4165,12 +4098,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
return false;
}
-int
-i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+static int
+i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
+ uint32_t alignment,
+ uint64_t flags)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
@@ -4186,17 +4119,29 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
- vma = i915_gem_obj_to_vma_view(obj, vm, view);
+ if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return -EINVAL;
+
+ vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
+ i915_gem_obj_to_vma(obj, vm);
+
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
if (i915_vma_misplaced(vma, alignment, flags)) {
+ unsigned long offset;
+ offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
+ i915_gem_obj_offset(obj, vm);
WARN(vma->pin_count,
- "bo is already pinned with incorrect alignment:"
+ "bo is already pinned in %s with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset_view(obj, vm, view->type),
+ ggtt_view ? "ggtt" : "ppgtt",
+ offset,
alignment,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
@@ -4210,8 +4155,12 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
- flags, view);
+ /* In true PPGTT, bind has possibly changed PDEs, which
+ * means we must do a context switch before the GPU can
+ * accurately read some of the VMAs.
+ */
+ vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
+ flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
@@ -4237,7 +4186,7 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
- mappable = (vma->node.start + obj->base.size <=
+ mappable = (vma->node.start + fence_size <=
dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
@@ -4252,16 +4201,41 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
return 0;
}
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ return i915_gem_object_do_pin(obj, vm,
+ i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
+ alignment, flags);
+}
+
+int
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ if (WARN_ONCE(!view, "no view specified"))
+ return -EINVAL;
+
+ return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+ alignment, flags | PIN_GLOBAL);
+}
+
void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+ struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
BUG_ON(!vma);
- BUG_ON(vma->pin_count == 0);
- BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+ WARN_ON(vma->pin_count == 0);
+ WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
- if (--vma->pin_count == 0)
+ if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
obj->pin_mappable = false;
}
@@ -4382,7 +4356,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
+ if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED;
@@ -4557,15 +4531,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->ggtt_view.type == view->type)
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
return vma;
+ }
+ return NULL;
+}
+
+struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+ struct i915_vma *vma;
+
+ if (WARN_ONCE(!view, "no view specified"))
+ return ERR_PTR(-EINVAL);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma;
return NULL;
}
@@ -4612,10 +4604,6 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_retire_requests(dev);
- /* Under UMS, be paranoid and evict. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_gem_evict_everything(dev);
-
i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex);
@@ -4986,18 +4974,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
- I915_WRITE(MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
- }
-
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
- /* Old X drivers will take 0-2 for front, back, depth buffers */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- dev_priv->fence_reg_start = 3;
-
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
@@ -5005,6 +4983,10 @@ i915_gem_load(struct drm_device *dev)
else
dev_priv->num_fence_regs = 8;
+ if (intel_vgpu_active(dev))
+ dev_priv->num_fence_regs =
+ I915_READ(vgtif_reg(avail_rs.fence_num));
+
/* Initialize fence registers to zero */
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
i915_gem_restore_fences(dev);
@@ -5014,13 +4996,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
- dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
- dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&dev_priv->mm.shrinker);
-
- dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- register_oom_notifier(&dev_priv->mm.oom_notifier);
+ i915_gem_shrinker_init(dev_priv);
i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
@@ -5112,106 +5088,70 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
-{
- if (!mutex_is_locked(mutex))
- return false;
-
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
-
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+/* All the new VM stuff */
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
- if (to_i915(dev)->mm.shrinker_no_lock_stealing)
- return false;
+ WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
- *unlock = false;
- } else
- *unlock = true;
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
+ return vma->node.start;
+ }
- return true;
+ WARN(1, "%s vma for this object not found.\n",
+ i915_is_ggtt(vm) ? "global" : "ppgtt");
+ return -1;
}
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- int count = 0;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (drm_mm_node_allocated(&vma->node))
- count++;
-
- return count;
-}
-
-static unsigned long
-i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
-{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long count;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return 0;
-
- count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
- if (obj->pages_pin_count == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!i915_gem_obj_is_pinned(obj) &&
- obj->pages_pin_count == num_vma_bound(obj))
- count += obj->base.size >> PAGE_SHIFT;
- }
-
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma->node.start;
- return count;
+ WARN(1, "global vma for this object not found.\n");
+ return -1;
}
-/* All the new VM stuff */
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (vma->vm == vm && vma->ggtt_view.type == view)
- return vma->node.start;
-
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ return true;
}
- WARN(1, "%s vma for this object not found.\n",
- i915_is_ggtt(vm) ? "global" : "ppgtt");
- return -1;
+
+ return false;
}
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, vma_link)
- if (vma->vm == vm &&
- vma->ggtt_view.type == view &&
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
@@ -5239,118 +5179,26 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
BUG_ON(list_empty(&o->vma_list));
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
if (vma->vm == vm)
return vma->node.size;
-
+ }
return 0;
}
-static unsigned long
-i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
-{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- unsigned long freed;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return SHRINK_STOP;
-
- freed = i915_gem_shrink(dev_priv,
- sc->nr_to_scan,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
- if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink(dev_priv,
- sc->nr_to_scan - freed,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- return freed;
-}
-
-static int
-i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, mm.oom_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed_pages;
- bool was_interruptible;
- bool unlock;
-
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
- schedule_timeout_killable(1);
- if (fatal_signal_pending(current))
- return NOTIFY_DONE;
- }
- if (timeout == 0) {
- pr_err("Unable to purge GPU memory due lock contention.\n");
- return NOTIFY_DONE;
- }
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- freed_pages = i915_gem_shrink_all(dev_priv);
-
- dev_priv->mm.interruptible = was_interruptible;
-
- /* Because we may be allocating inside our own driver, we cannot
- * assert that there are no objects with pinned pages that are not
- * being pointed to by hardware.
- */
- unbound = bound = pinned = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- if (!obj->base.filp) /* not backed by a freeable object */
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- unbound += obj->base.size;
- }
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->base.filp)
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- bound += obj->base.size;
+ if (vma->pin_count > 0)
+ return true;
}
-
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- if (freed_pages || unbound || bound)
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed_pages << PAGE_SHIFT, pinned);
- if (unbound || bound)
- pr_err("%lu and %lu bytes still available in the "
- "bound and unbound GPU page lists.\n",
- bound, unbound);
-
- *(unsigned long *)ptr += freed_pages;
- return NOTIFY_DONE;
+ return false;
}
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == ggtt &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- return vma;
-
- return NULL;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8603bf48d3ee..f3e84c44d009 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -296,11 +296,15 @@ void i915_gem_context_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- /* In execlists mode we will unreference the context when the execlist
- * queue is cleared and the requests destroyed.
- */
- if (i915.enable_execlists)
+ if (i915.enable_execlists) {
+ struct intel_context *ctx;
+
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ intel_lr_context_reset(dev, ctx);
+ }
+
return;
+ }
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
@@ -565,6 +569,66 @@ mi_set_context(struct intel_engine_cs *ring,
return ret;
}
+static inline bool should_skip_switch(struct intel_engine_cs *ring,
+ struct intel_context *from,
+ struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (to->remap_slice)
+ return false;
+
+ if (to->ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &to->ppgtt->pd_dirty_rings))
+ return true;
+ } else if (dev_priv->mm.aliasing_ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!to->ppgtt)
+ return false;
+
+ if (INTEL_INFO(ring->dev)->gen < 8)
+ return true;
+
+ if (ring != &dev_priv->ring[RCS])
+ return true;
+
+ return false;
+}
+
+static bool
+needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
+ u32 hw_flags)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!to->ppgtt)
+ return false;
+
+ if (!IS_GEN8(ring->dev))
+ return false;
+
+ if (ring != &dev_priv->ring[RCS])
+ return false;
+
+ if (hw_flags & MI_RESTORE_INHIBIT)
+ return true;
+
+ return false;
+}
+
static int do_switch(struct intel_engine_cs *ring,
struct intel_context *to)
{
@@ -580,7 +644,7 @@ static int do_switch(struct intel_engine_cs *ring,
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
- if (from == to && !to->remap_slice)
+ if (should_skip_switch(ring, from, to))
return 0;
/* Trying to pin first makes error handling easier. */
@@ -598,11 +662,18 @@ static int do_switch(struct intel_engine_cs *ring,
*/
from = ring->last_context;
- if (to->ppgtt) {
+ if (needs_pd_load_pre(ring, to)) {
+ /* Older GENs and non render rings still want the load first,
+ * "PP_DCLV followed by PP_DIR_BASE register through Load
+ * Register Immediate commands in Ring Buffer before submitting
+ * a context."*/
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
+
+ /* Doing a PD load always reloads the page dirs */
+ clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
}
if (ring != &dev_priv->ring[RCS]) {
@@ -633,13 +704,41 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out;
}
- if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+ if (!to->legacy_hw_ctx.initialized) {
hw_flags |= MI_RESTORE_INHIBIT;
+ /* NB: If we inhibit the restore, the context is not allowed to
+ * die because future work may end up depending on valid address
+ * space. This means we must enforce that a page table load
+ * occur when this occurs. */
+ } else if (to->ppgtt &&
+ test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
+ hw_flags |= MI_FORCE_RESTORE;
+
+ /* We should never emit switch_mm more than once */
+ WARN_ON(needs_pd_load_pre(ring, to) &&
+ needs_pd_load_post(ring, to, hw_flags));
ret = mi_set_context(ring, to, hw_flags);
if (ret)
goto unpin_out;
+ /* GEN8 does *not* require an explicit reload if the PDPs have been
+ * setup, and we do not wish to move them.
+ */
+ if (needs_pd_load_post(ring, to, hw_flags)) {
+ trace_switch_mm(ring, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+ /* The hardware context switch is emitted, but we haven't
+ * actually changed the state - so it's probably safe to bail
+ * here. Still, let the user know something dangerous has
+ * happened.
+ */
+ if (ret) {
+ DRM_ERROR("Failed to change address space on context switch\n");
+ goto unpin_out;
+ }
+ }
+
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
@@ -677,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring,
i915_gem_context_unreference(from);
}
- uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+ uninitialized = !to->legacy_hw_ctx.initialized;
to->legacy_hw_ctx.initialized = true;
done:
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 82a1f4b57778..7998da27c500 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -230,6 +230,13 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &i915_dmabuf_ops;
+ exp_info.size = gem_obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gem_obj;
+
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@@ -237,8 +244,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
- NULL);
+ return dma_buf_export(&exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e3a49d94da3a..d09e35ed9c9a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -63,6 +63,10 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
*
* This function is used by the object/vma binding code.
*
+ * Since this function is only used to free up virtual address space it only
+ * ignores pinned vmas, and not object where the backing storage itself is
+ * pinned. Hence obj->pages_pin_count does not protect against eviction.
+ *
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 38a742532c4f..a3190e793ed4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -251,7 +251,6 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (HAS_LLC(obj->base.dev) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
- !obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -337,6 +336,51 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
return 0;
}
+static void
+clflush_write32(void *addr, uint32_t value)
+{
+ /* This is not a fast path, so KISS. */
+ drm_clflush_virt_range(addr, sizeof(uint32_t));
+ *(uint32_t *)addr = value;
+ drm_clflush_virt_range(addr, sizeof(uint32_t));
+}
+
+static int
+relocate_entry_clflush(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint64_t target_offset)
+{
+ struct drm_device *dev = obj->base.dev;
+ uint32_t page_offset = offset_in_page(reloc->offset);
+ uint64_t delta = (int)reloc->delta + target_offset;
+ char *vaddr;
+ int ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
+ clflush_write32(vaddr + page_offset, lower_32_bits(delta));
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+ if (page_offset == 0) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+ }
+
+ clflush_write32(vaddr + page_offset, upper_32_bits(delta));
+ }
+
+ kunmap_atomic(vaddr);
+
+ return 0;
+}
+
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
@@ -426,8 +470,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (use_cpu_reloc(obj))
ret = relocate_entry_cpu(obj, reloc, target_offset);
- else
+ else if (obj->map_and_fenceable)
ret = relocate_entry_gtt(obj, reloc, target_offset);
+ else if (cpu_has_clflush)
+ ret = relocate_entry_clflush(obj, reloc, target_offset);
+ else {
+ WARN_ONCE(1, "Impossible case in relocation handling\n");
+ ret = -ENODEV;
+ }
if (ret)
return ret;
@@ -525,6 +575,12 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
return ret;
}
+static bool only_mappable_for_reloc(unsigned int flags)
+{
+ return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
+ __EXEC_OBJECT_NEEDS_MAP;
+}
+
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_engine_cs *ring,
@@ -536,14 +592,21 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
int ret;
flags = 0;
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
- flags |= PIN_GLOBAL | PIN_MAPPABLE;
- if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
- flags |= PIN_GLOBAL;
- if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
- flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+ if (!drm_mm_node_allocated(&vma->node)) {
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
+ flags |= PIN_GLOBAL | PIN_MAPPABLE;
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+ flags |= PIN_GLOBAL;
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+ flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+ }
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
+ if ((ret == -ENOSPC || ret == -E2BIG) &&
+ only_mappable_for_reloc(entry->flags))
+ ret = i915_gem_object_pin(obj, vma->vm,
+ entry->alignment,
+ flags & ~(PIN_GLOBAL | PIN_MAPPABLE));
if (ret)
return ret;
@@ -605,13 +668,14 @@ eb_vma_misplaced(struct i915_vma *vma)
vma->node.start & (entry->alignment - 1))
return true;
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
- return true;
-
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
+ /* avoid costly ping-pong once a batch bo ended up non-mappable */
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+ return !only_mappable_for_reloc(entry->flags);
+
return false;
}
@@ -971,7 +1035,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req);
- intel_fb_obj_invalidate(obj, ring);
+ intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1076,16 +1140,15 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
u32 batch_len,
- bool is_master,
- u32 *flags)
+ bool is_master)
{
struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
struct drm_i915_gem_object *shadow_batch_obj;
- bool need_reloc = false;
+ struct i915_vma *vma;
int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
- batch_obj->base.size);
+ PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
@@ -1095,40 +1158,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
batch_start_offset,
batch_len,
is_master);
- if (ret) {
- if (ret == -EACCES)
- return batch_obj;
- } else {
- struct i915_vma *vma;
+ if (ret)
+ goto err;
- memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+ ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
+ if (ret)
+ goto err;
- vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
- vma->exec_entry = shadow_exec_entry;
- vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
- drm_gem_object_reference(&shadow_batch_obj->base);
- i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
- list_add_tail(&vma->exec_list, &eb->vmas);
+ memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
- shadow_batch_obj->base.pending_read_domains =
- batch_obj->base.pending_read_domains;
+ vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+ vma->exec_entry = shadow_exec_entry;
+ vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
+ drm_gem_object_reference(&shadow_batch_obj->base);
+ list_add_tail(&vma->exec_list, &eb->vmas);
- /*
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically
- * don't want that set when the command parser is
- * enabled.
- *
- * FIXME: with aliasing ppgtt, buffers that should only
- * be in ggtt still end up in the aliasing ppgtt. remove
- * this check when that is fixed.
- */
- if (USES_FULL_PPGTT(dev))
- *flags |= I915_DISPATCH_SECURE;
- }
+ shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+
+ return shadow_batch_obj;
- return ret ? ERR_PTR(ret) : shadow_batch_obj;
+err:
+ if (ret == -EACCES) /* unhandled chained batch */
+ return batch_obj;
+ else
+ return ERR_PTR(ret);
}
int
@@ -1138,7 +1191,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags)
+ u64 exec_start, u32 dispatch_flags)
{
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1198,6 +1251,13 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
if (ret)
goto error;
+ if (ctx->ppgtt)
+ WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+ "%s didn't clear reload\n", ring->name);
+ else if (dev_priv->mm.aliasing_ppgtt)
+ WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
+ (1<<ring->id), "%s didn't clear reload\n", ring->name);
+
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
@@ -1266,19 +1326,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
- flags);
+ dispatch_flags);
if (ret)
goto error;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
- flags);
+ dispatch_flags);
if (ret)
return ret;
}
- trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1353,7 +1413,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset;
- u32 flags;
+ u32 dispatch_flags;
int ret;
bool need_relocs;
@@ -1364,15 +1424,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
- flags = 0;
+ dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (!file->is_master || !capable(CAP_SYS_ADMIN))
return -EPERM;
- flags |= I915_DISPATCH_SECURE;
+ dispatch_flags |= I915_DISPATCH_SECURE;
}
if (args->flags & I915_EXEC_IS_PINNED)
- flags |= I915_DISPATCH_PINNED;
+ dispatch_flags |= I915_DISPATCH_PINNED;
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1494,12 +1554,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj,
args->batch_start_offset,
args->batch_len,
- file->is_master,
- &flags);
+ file->is_master);
if (IS_ERR(batch_obj)) {
ret = PTR_ERR(batch_obj);
goto err;
}
+
+ /*
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+ * bit from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically
+ * don't want that set when the command parser is
+ * enabled.
+ *
+ * FIXME: with aliasing ppgtt, buffers that should only
+ * be in ggtt still end up in the aliasing ppgtt. remove
+ * this check when that is fixed.
+ */
+ if (USES_FULL_PPGTT(dev))
+ dispatch_flags |= I915_DISPATCH_SECURE;
+
+ exec_start = 0;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
@@ -1507,14 +1582,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
- if (flags & I915_DISPATCH_SECURE) {
+ if (dispatch_flags & I915_DISPATCH_SECURE) {
/*
* So on first glance it looks freaky that we pin the batch here
* outside of the reservation loop. But:
* - The batch is already pinned into the relevant ppgtt, so we
* already have the backing storage fully allocated.
* - No other BO uses the global gtt (well contexts, but meh),
- * so we don't really have issues with mutliple objects not
+ * so we don't really have issues with multiple objects not
* fitting due to fragmentation.
* So this is actually safe.
*/
@@ -1527,7 +1602,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
- &eb->vmas, batch_obj, exec_start, flags);
+ &eb->vmas, batch_obj, exec_start,
+ dispatch_flags);
/*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
@@ -1535,7 +1611,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* needs to be adjusted to also track the ggtt batch vma properly as
* active.
*/
- if (flags & I915_DISPATCH_SECURE)
+ if (dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj);
err:
/* the request owns the ref now */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dccdc8aad2e2..0239fbff7bf7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -27,6 +27,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
@@ -66,8 +67,9 @@
* i915_ggtt_view_type and struct i915_ggtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _view suffix. They take the struct i915_ggtt_view parameter
- * encapsulating all metadata required to implement a view.
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
* globally const i915_ggtt_view_normal singleton instance exists. All old core
@@ -91,6 +93,9 @@
*/
const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_rotated = {
+ .type = I915_GGTT_VIEW_ROTATED
+};
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@@ -103,6 +108,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
+ if (intel_vgpu_active(dev))
+ has_full_ppgtt = false; /* emulation is too hard */
+
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
@@ -138,17 +146,16 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return has_aliasing_ppgtt ? 1 : 0;
}
-
static void ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);
-static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid)
+static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
{
- gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
switch (level) {
@@ -166,11 +173,11 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
{
- gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+ gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE_INDEX;
@@ -179,11 +186,11 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
return pde;
}
-static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -201,11 +208,11 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -225,11 +232,11 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 flags)
+static gen6_pte_t byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 flags)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (!(flags & PTE_READ_ONLY))
@@ -241,11 +248,11 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -254,11 +261,11 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -275,6 +282,162 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
+#define i915_dma_unmap_single(px, dev) \
+ __i915_dma_unmap_single((px)->daddr, dev)
+
+static inline void __i915_dma_unmap_single(dma_addr_t daddr,
+ struct drm_device *dev)
+{
+ struct device *device = &dev->pdev->dev;
+
+ dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+}
+
+/**
+ * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
+ * @px: Page table/dir/etc to get a DMA map for
+ * @dev: drm device
+ *
+ * Page table allocations are unified across all gens. They always require a
+ * single 4k allocation, as well as a DMA mapping. If we keep the structs
+ * symmetric here, the simple macro covers us for every page table type.
+ *
+ * Return: 0 if success.
+ */
+#define i915_dma_map_single(px, dev) \
+ i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+
+static inline int i915_dma_map_page_single(struct page *page,
+ struct drm_device *dev,
+ dma_addr_t *daddr)
+{
+ struct device *device = &dev->pdev->dev;
+
+ *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device, *daddr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void unmap_and_free_pt(struct i915_page_table_entry *pt,
+ struct drm_device *dev)
+{
+ if (WARN_ON(!pt->page))
+ return;
+
+ i915_dma_unmap_single(pt, dev);
+ __free_page(pt->page);
+ kfree(pt->used_ptes);
+ kfree(pt);
+}
+
+static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
+{
+ struct i915_page_table_entry *pt;
+ const size_t count = INTEL_INFO(dev)->gen >= 8 ?
+ GEN8_PTES : GEN6_PTES;
+ int ret = -ENOMEM;
+
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ if (!pt)
+ return ERR_PTR(-ENOMEM);
+
+ pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
+ GFP_KERNEL);
+
+ if (!pt->used_ptes)
+ goto fail_bitmap;
+
+ pt->page = alloc_page(GFP_KERNEL);
+ if (!pt->page)
+ goto fail_page;
+
+ ret = i915_dma_map_single(pt, dev);
+ if (ret)
+ goto fail_dma;
+
+ return pt;
+
+fail_dma:
+ __free_page(pt->page);
+fail_page:
+ kfree(pt->used_ptes);
+fail_bitmap:
+ kfree(pt);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * alloc_pt_range() - Allocate a multiple page tables
+ * @pd: The page directory which will have at least @count entries
+ * available to point to the allocated page tables.
+ * @pde: First page directory entry for which we are allocating.
+ * @count: Number of pages to allocate.
+ * @dev: DRM device.
+ *
+ * Allocates multiple page table pages and sets the appropriate entries in the
+ * page table structure within the page directory. Function cleans up after
+ * itself on any failures.
+ *
+ * Return: 0 if allocation succeeded.
+ */
+static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
+ struct drm_device *dev)
+{
+ int i, ret;
+
+ /* 512 is the max page tables per page_directory on any platform. */
+ if (WARN_ON(pde + count > I915_PDES))
+ return -EINVAL;
+
+ for (i = pde; i < pde + count; i++) {
+ struct i915_page_table_entry *pt = alloc_pt_single(dev);
+
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto err_out;
+ }
+ WARN(pd->page_table[i],
+ "Leaking page directory entry %d (%p)\n",
+ i, pd->page_table[i]);
+ pd->page_table[i] = pt;
+ }
+
+ return 0;
+
+err_out:
+ while (i-- > pde)
+ unmap_and_free_pt(pd->page_table[i], dev);
+ return ret;
+}
+
+static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
+{
+ if (pd->page) {
+ __free_page(pd->page);
+ kfree(pd);
+ }
+}
+
+static struct i915_page_directory_entry *alloc_pd_single(void)
+{
+ struct i915_page_directory_entry *pd;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pd->page) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pd;
+}
+
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val)
@@ -304,10 +467,10 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
int i, ret;
/* bit of a hack to find the actual last used pd */
- int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
+ int used_pd = ppgtt->num_pd_entries / I915_PDES;
for (i = used_pd - 1; i >= 0; i--) {
- dma_addr_t addr = ppgtt->pd_dma_addr[i];
+ dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
ret = gen8_write_pdp(ring, i, addr);
if (ret)
return ret;
@@ -323,7 +486,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen8_gtt_pte_t *pt_vaddr, scratch_pte;
+ gen8_pte_t *pt_vaddr, scratch_pte;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -334,11 +497,28 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
+ struct i915_page_directory_entry *pd;
+ struct i915_page_table_entry *pt;
+ struct page *page_table;
+
+ if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
+ continue;
+
+ pd = ppgtt->pdp.page_directory[pdpe];
+
+ if (WARN_ON(!pd->page_table[pde]))
+ continue;
+
+ pt = pd->page_table[pde];
+
+ if (WARN_ON(!pt->page))
+ continue;
+
+ page_table = pt->page;
last_pte = pte + num_entries;
- if (last_pte > GEN8_PTES_PER_PAGE)
- last_pte = GEN8_PTES_PER_PAGE;
+ if (last_pte > GEN8_PTES)
+ last_pte = GEN8_PTES;
pt_vaddr = kmap_atomic(page_table);
@@ -352,7 +532,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
pte = 0;
- if (++pde == GEN8_PDES_PER_PAGE) {
+ if (++pde == I915_PDES) {
pdpe++;
pde = 0;
}
@@ -366,7 +546,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen8_gtt_pte_t *pt_vaddr;
+ gen8_pte_t *pt_vaddr;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -375,21 +555,26 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
- if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
+ if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
break;
- if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
+ if (pt_vaddr == NULL) {
+ struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
+ struct i915_page_table_entry *pt = pd->page_table[pde];
+ struct page *page_table = pt->page;
+
+ pt_vaddr = kmap_atomic(page_table);
+ }
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
- if (++pte == GEN8_PTES_PER_PAGE) {
+ if (++pte == GEN8_PTES) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
- if (++pde == GEN8_PDES_PER_PAGE) {
+ if (++pde == I915_PDES) {
pdpe++;
pde = 0;
}
@@ -403,29 +588,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct page **pt_pages)
+static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
{
int i;
- if (pt_pages == NULL)
+ if (!pd->page)
return;
- for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
- if (pt_pages[i])
- __free_pages(pt_pages[i], 0);
+ for (i = 0; i < I915_PDES; i++) {
+ if (WARN_ON(!pd->page_table[i]))
+ continue;
+
+ unmap_and_free_pt(pd->page_table[i], dev);
+ pd->page_table[i] = NULL;
+ }
}
-static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
- gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
- kfree(ppgtt->gen8_pt_pages[i]);
- kfree(ppgtt->gen8_pt_dma_addr[i]);
- }
+ if (WARN_ON(!ppgtt->pdp.page_directory[i]))
+ continue;
- __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+ gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+ unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
+ }
}
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
@@ -436,14 +625,23 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_pages; i++) {
/* TODO: In the future we'll support sparse mappings, so this
* will have to change. */
- if (!ppgtt->pd_dma_addr[i])
+ if (!ppgtt->pdp.page_directory[i]->daddr)
continue;
- pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+ pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ for (j = 0; j < I915_PDES; j++) {
+ struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
+ struct i915_page_table_entry *pt;
+ dma_addr_t addr;
+
+ if (WARN_ON(!pd->page_table[j]))
+ continue;
+
+ pt = pd->page_table[j];
+ addr = pt->daddr;
+
if (addr)
pci_unmap_page(hwdev, addr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
@@ -460,86 +658,47 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
gen8_ppgtt_free(ppgtt);
}
-static struct page **__gen8_alloc_page_tables(void)
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
- struct page **pt_pages;
- int i;
-
- pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
- if (!pt_pages)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
- pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!pt_pages[i])
- goto bail;
- }
-
- return pt_pages;
-
-bail:
- gen8_free_page_tables(pt_pages);
- kfree(pt_pages);
- return ERR_PTR(-ENOMEM);
-}
-
-static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
-{
- struct page **pt_pages[GEN8_LEGACY_PDPS];
int i, ret;
- for (i = 0; i < max_pdp; i++) {
- pt_pages[i] = __gen8_alloc_page_tables();
- if (IS_ERR(pt_pages[i])) {
- ret = PTR_ERR(pt_pages[i]);
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
+ 0, I915_PDES, ppgtt->base.dev);
+ if (ret)
goto unwind_out;
- }
}
- /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
- * "atomic" - for cleanup purposes.
- */
- for (i = 0; i < max_pdp; i++)
- ppgtt->gen8_pt_pages[i] = pt_pages[i];
-
return 0;
unwind_out:
- while (i--) {
- gen8_free_page_tables(pt_pages[i]);
- kfree(pt_pages[i]);
- }
+ while (i--)
+ gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
- return ret;
+ return -ENOMEM;
}
-static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
+static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
{
int i;
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
- ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!ppgtt->gen8_pt_dma_addr[i])
- return -ENOMEM;
+ for (i = 0; i < max_pdp; i++) {
+ ppgtt->pdp.page_directory[i] = alloc_pd_single();
+ if (IS_ERR(ppgtt->pdp.page_directory[i]))
+ goto unwind_out;
}
- return 0;
-}
+ ppgtt->num_pd_pages = max_pdp;
+ BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
-static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
-{
- ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
- if (!ppgtt->pd_pages)
- return -ENOMEM;
+ return 0;
- ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
- BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+unwind_out:
+ while (i--)
+ unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
- return 0;
+ return -ENOMEM;
}
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
@@ -551,18 +710,16 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
- if (ret) {
- __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
- return ret;
- }
+ ret = gen8_ppgtt_allocate_page_tables(ppgtt);
+ if (ret)
+ goto err_out;
- ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
+ ppgtt->num_pd_entries = max_pdp * I915_PDES;
- ret = gen8_ppgtt_allocate_dma(ppgtt);
- if (ret)
- gen8_ppgtt_free(ppgtt);
+ return 0;
+err_out:
+ gen8_ppgtt_free(ppgtt);
return ret;
}
@@ -573,14 +730,14 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
int ret;
pd_addr = pci_map_page(ppgtt->base.dev->pdev,
- &ppgtt->pd_pages[pd], 0,
+ ppgtt->pdp.page_directory[pd]->page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
if (ret)
return ret;
- ppgtt->pd_dma_addr[pd] = pd_addr;
+ ppgtt->pdp.page_directory[pd]->daddr = pd_addr;
return 0;
}
@@ -590,22 +747,23 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
const int pt)
{
dma_addr_t pt_addr;
- struct page *p;
+ struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd];
+ struct i915_page_table_entry *ptab = pdir->page_table[pt];
+ struct page *p = ptab->page;
int ret;
- p = ppgtt->gen8_pt_pages[pd][pt];
pt_addr = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
if (ret)
return ret;
- ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
+ ptab->daddr = pt_addr;
return 0;
}
-/**
+/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
@@ -618,26 +776,30 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
- const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+ const int min_pt_pages = I915_PDES * max_pdp;
int i, j, ret;
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
- /* 1. Do all our allocations for page directories and page tables. */
- ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+ /* 1. Do all our allocations for page directories and page tables.
+ * We allocate more than was asked so that we can point the unused parts
+ * to valid entries that point to scratch page. Dynamic page tables
+ * will fix this eventually.
+ */
+ ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES);
if (ret)
return ret;
/*
* 2. Create DMA mappings for the page directories and page tables.
*/
- for (i = 0; i < max_pdp; i++) {
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
if (ret)
goto bail;
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ for (j = 0; j < I915_PDES; j++) {
ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
if (ret)
goto bail;
@@ -652,11 +814,13 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
* will never need to touch the PDEs again.
*/
- for (i = 0; i < max_pdp; i++) {
- gen8_ppgtt_pde_t *pd_vaddr;
- pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
- dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
+ struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
+ gen8_pde_t *pd_vaddr;
+ pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
+ for (j = 0; j < I915_PDES; j++) {
+ struct i915_page_table_entry *pt = pd->page_table[j];
+ dma_addr_t addr = pt->daddr;
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
@@ -670,9 +834,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ /* This is the area that we advertise as usable for the caller */
+ ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE;
+
+ /* Set all ptes to a valid scratch page. Also above requested space */
+ ppgtt->base.clear_range(&ppgtt->base, 0,
+ ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE,
+ true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
@@ -691,22 +860,23 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
- gen6_gtt_pte_t __iomem *pd_addr;
- gen6_gtt_pte_t scratch_pte;
+ gen6_pte_t __iomem *pd_addr;
+ gen6_pte_t scratch_pte;
uint32_t pd_entry;
int pte, pde;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
- pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
- ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+ pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
- ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
+ ppgtt->pd.pd_offset,
+ ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
u32 expected;
- gen6_gtt_pte_t *pt_vaddr;
- dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
+ gen6_pte_t *pt_vaddr;
+ dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
pd_entry = readl(pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@@ -717,10 +887,10 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
- for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+ for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
- (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
+ (pde * PAGE_SIZE * GEN6_PTES) +
(pte * PAGE_SIZE);
int i;
bool found = false;
@@ -743,33 +913,43 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
}
-static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static void gen6_write_pde(struct i915_page_directory_entry *pd,
+ const int pde, struct i915_page_table_entry *pt)
{
- struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
- gen6_gtt_pte_t __iomem *pd_addr;
- uint32_t pd_entry;
- int i;
+ /* Caller needs to make sure the write completes if necessary */
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(pd, struct i915_hw_ppgtt, pd);
+ u32 pd_entry;
- WARN_ON(ppgtt->pd_offset & 0x3f);
- pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
- ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+ pd_entry |= GEN6_PDE_VALID;
- pt_addr = ppgtt->pt_dma_addr[i];
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
+ writel(pd_entry, ppgtt->pd_addr + pde);
+}
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
+/* Write all the page tables found in the ppgtt structure to incrementing page
+ * directories. */
+static void gen6_write_page_range(struct drm_i915_private *dev_priv,
+ struct i915_page_directory_entry *pd,
+ uint32_t start, uint32_t length)
+{
+ struct i915_page_table_entry *pt;
+ uint32_t pde, temp;
+
+ gen6_for_each_pde(pt, pd, start, length, temp, pde)
+ gen6_write_pde(pd, pde, pt);
+
+ /* Make sure write is complete before other code can use this page
+ * table. Also require for WC mapped PTEs */
+ readl(dev_priv->gtt.gsm);
}
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
- BUG_ON(ppgtt->pd_offset & 0x3f);
+ BUG_ON(ppgtt->pd.pd_offset & 0x3f);
- return (ppgtt->pd_offset / 64) << 16;
+ return (ppgtt->pd.pd_offset / 64) << 16;
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
@@ -797,6 +977,16 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
+static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ return 0;
+}
+
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring)
{
@@ -908,21 +1098,21 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+ gen6_pte_t *pt_vaddr, scratch_pte;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned act_pt = first_entry / GEN6_PTES;
+ unsigned first_pte = first_entry % GEN6_PTES;
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
- if (last_pte > I915_PPGTT_PT_ENTRIES)
- last_pte = I915_PPGTT_PT_ENTRIES;
+ if (last_pte > GEN6_PTES)
+ last_pte = GEN6_PTES;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
@@ -942,22 +1132,22 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen6_gtt_pte_t *pt_vaddr;
+ gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
- unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned act_pt = first_entry / GEN6_PTES;
+ unsigned act_pte = first_entry % GEN6_PTES;
struct sg_page_iter sg_iter;
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true, flags);
- if (++act_pte == I915_PPGTT_PT_ENTRIES) {
+ if (++act_pte == GEN6_PTES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
act_pt++;
@@ -968,26 +1158,134 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
}
-static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
+ * are switching between contexts with the same LRCA, we also must do a force
+ * restore.
+ */
+static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+ /* If current vm != vm, */
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+ struct i915_page_table_entry *pt)
{
+ gen6_pte_t *pt_vaddr, scratch_pte;
int i;
- if (ppgtt->pt_dma_addr) {
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(ppgtt->base.dev->pdev,
- ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
+ WARN_ON(vm->scratch.addr == 0);
+
+ scratch_pte = vm->pte_encode(vm->scratch.addr,
+ I915_CACHE_LLC, true, 0);
+
+ pt_vaddr = kmap_atomic(pt->page);
+
+ for (i = 0; i < GEN6_PTES; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ kunmap_atomic(pt_vaddr);
+}
+
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ DECLARE_BITMAP(new_page_tables, I915_PDES);
+ struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_page_table_entry *pt;
+ const uint32_t start_save = start, length_save = length;
+ uint32_t pde, temp;
+ int ret;
+
+ WARN_ON(upper_32_bits(start));
+
+ bitmap_zero(new_page_tables, I915_PDES);
+
+ /* The allocation is done in two stages so that we can bail out with
+ * minimal amount of pain. The first stage finds new page tables that
+ * need allocation. The second stage marks use ptes within the page
+ * tables.
+ */
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ if (pt != ppgtt->scratch_pt) {
+ WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
+ continue;
+ }
+
+ /* We've already allocated a page table */
+ WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
+
+ pt = alloc_pt_single(dev);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto unwind_out;
+ }
+
+ gen6_initialize_pt(vm, pt);
+
+ ppgtt->pd.page_table[pde] = pt;
+ set_bit(pde, new_page_tables);
+ trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
}
+
+ start = start_save;
+ length = length_save;
+
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
+
+ bitmap_zero(tmp_bitmap, GEN6_PTES);
+ bitmap_set(tmp_bitmap, gen6_pte_index(start),
+ gen6_pte_count(start, length));
+
+ if (test_and_clear_bit(pde, new_page_tables))
+ gen6_write_pde(&ppgtt->pd, pde, pt);
+
+ trace_i915_page_table_entry_map(vm, pde, pt,
+ gen6_pte_index(start),
+ gen6_pte_count(start, length),
+ GEN6_PTES);
+ bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
+ GEN6_PTES);
+ }
+
+ WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
+
+ /* Make sure write is complete before other code can use this page
+ * table. Also require for WC mapped PTEs */
+ readl(dev_priv->gtt.gsm);
+
+ mark_tlbs_dirty(ppgtt);
+ return 0;
+
+unwind_out:
+ for_each_set_bit(pde, new_page_tables, I915_PDES) {
+ struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde];
+
+ ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+ unmap_and_free_pt(pt, vm->dev);
+ }
+
+ mark_tlbs_dirty(ppgtt);
+ return ret;
}
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- __free_page(ppgtt->pt_pages[i]);
- kfree(ppgtt->pt_pages);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ struct i915_page_table_entry *pt = ppgtt->pd.page_table[i];
+
+ if (pt != ppgtt->scratch_pt)
+ unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
+ }
+
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ unmap_and_free_pd(&ppgtt->pd);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -997,7 +1295,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
drm_mm_remove_node(&ppgtt->node);
- gen6_ppgtt_unmap_pages(ppgtt);
gen6_ppgtt_free(ppgtt);
}
@@ -1013,6 +1310,12 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->scratch_pt))
+ return PTR_ERR(ppgtt->scratch_pt);
+
+ gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
@@ -1026,88 +1329,43 @@ alloc:
0, dev_priv->gtt.base.total,
0);
if (ret)
- return ret;
+ goto err_out;
retried = true;
goto alloc;
}
- if (ppgtt->node.start < dev_priv->gtt.mappable_end)
- DRM_DEBUG("Forced to use aperture for PDEs\n");
-
- ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
- return ret;
-}
-
-static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
-{
- int i;
-
- ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
- GFP_KERNEL);
+ if (ret)
+ goto err_out;
- if (!ppgtt->pt_pages)
- return -ENOMEM;
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!ppgtt->pt_pages[i]) {
- gen6_ppgtt_free(ppgtt);
- return -ENOMEM;
- }
- }
+ if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ DRM_DEBUG("Forced to use aperture for PDEs\n");
+ ppgtt->num_pd_entries = I915_PDES;
return 0;
+
+err_out:
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ return ret;
}
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
- int ret;
-
- ret = gen6_ppgtt_allocate_page_directories(ppgtt);
- if (ret)
- return ret;
-
- ret = gen6_ppgtt_allocate_page_tables(ppgtt);
- if (ret) {
- drm_mm_remove_node(&ppgtt->node);
- return ret;
- }
-
- ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!ppgtt->pt_dma_addr) {
- drm_mm_remove_node(&ppgtt->node);
- gen6_ppgtt_free(ppgtt);
- return -ENOMEM;
- }
-
- return 0;
+ return gen6_ppgtt_allocate_page_directories(ppgtt);
}
-static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
+ uint64_t start, uint64_t length)
{
- struct drm_device *dev = ppgtt->base.dev;
- int i;
-
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
-
- pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
- PCI_DMA_BIDIRECTIONAL);
-
- if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
- gen6_ppgtt_unmap_pages(ppgtt);
- return -EIO;
- }
-
- ppgtt->pt_dma_addr[i] = pt_addr;
- }
+ struct i915_page_table_entry *unused;
+ uint32_t pde, temp;
- return 0;
+ gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
+ ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1123,40 +1381,57 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
} else
BUG();
+ if (intel_vgpu_active(dev))
+ ppgtt->switch_mm = vgpu_mm_switch;
+
ret = gen6_ppgtt_alloc(ppgtt);
if (ret)
return ret;
- ret = gen6_ppgtt_setup_page_tables(ppgtt);
- if (ret) {
- gen6_ppgtt_free(ppgtt);
- return ret;
+ if (aliasing) {
+ /* preallocate all pts */
+ ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
+ ppgtt->base.dev);
+
+ if (ret) {
+ gen6_ppgtt_cleanup(&ppgtt->base);
+ return ret;
+ }
}
+ ppgtt->base.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+ ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
- ppgtt->pd_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
+ ppgtt->pd.pd_offset =
+ ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+
+ if (aliasing)
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ else
+ gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
- gen6_write_pdes(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n",
- ppgtt->pd_offset << 10);
+ ppgtt->pd.pd_offset << 10);
return 0;
}
-static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt,
+ bool aliasing)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1164,7 +1439,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8)
- return gen6_ppgtt_init(ppgtt);
+ return gen6_ppgtt_init(ppgtt, aliasing);
else
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
}
@@ -1173,7 +1448,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
- ret = __hw_ppgtt_init(dev, ppgtt);
+ ret = __hw_ppgtt_init(dev, ppgtt, false);
if (ret == 0) {
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
@@ -1420,15 +1695,20 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
return;
}
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- /* TODO: Perhaps it shouldn't be gen6 specific */
- if (i915_is_ggtt(vm)) {
- if (dev_priv->mm.aliasing_ppgtt)
- gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
- continue;
- }
+ if (USES_PPGTT(dev)) {
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ /* TODO: Perhaps it shouldn't be gen6 specific */
+
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt,
+ base);
+
+ if (i915_is_ggtt(vm))
+ ppgtt = dev_priv->mm.aliasing_ppgtt;
- gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
+ gen6_write_page_range(dev_priv, &ppgtt->pd,
+ 0, ppgtt->base.total);
+ }
}
i915_ggtt_flush(dev_priv);
@@ -1447,7 +1727,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
-static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
+static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
#ifdef writeq
writeq(pte, addr);
@@ -1464,8 +1744,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
- gen8_gtt_pte_t __iomem *gtt_entries =
- (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ gen8_pte_t __iomem *gtt_entries =
+ (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
@@ -1510,8 +1790,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
- gen6_gtt_pte_t __iomem *gtt_entries =
- (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ gen6_pte_t __iomem *gtt_entries =
+ (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
@@ -1549,8 +1829,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ gen8_pte_t scratch_pte, __iomem *gtt_base =
+ (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
@@ -1575,8 +1855,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
@@ -1633,11 +1913,15 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ struct sg_table *pages = obj->pages;
/* Currently applicable only to VLV */
if (obj->gt_ro)
flags |= PTE_READ_ONLY;
+ if (i915_is_ggtt(vma->vm))
+ pages = vma->ggtt_view.pages;
+
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
@@ -1652,7 +1936,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
+ vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, flags);
vma->bound |= GLOBAL_BIND;
@@ -1663,8 +1947,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
- appgtt->base.insert_entries(&appgtt->base,
- vma->ggtt_view.pages,
+ appgtt->base.insert_entries(&appgtt->base, pages,
vma->node.start,
cache_level, flags);
vma->bound |= LOCAL_BIND;
@@ -1753,6 +2036,16 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
/* Subtract the guard page ... */
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
+
+ dev_priv->gtt.base.start = start;
+ dev_priv->gtt.base.total = end - start;
+
+ if (intel_vgpu_active(dev)) {
+ ret = intel_vgt_balloon(dev);
+ if (ret)
+ return ret;
+ }
+
if (!HAS_LLC(dev))
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
@@ -1772,9 +2065,6 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
vma->bound |= GLOBAL_BIND;
}
- dev_priv->gtt.base.start = start;
- dev_priv->gtt.base.total = end - start;
-
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -1793,9 +2083,11 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
if (!ppgtt)
return -ENOMEM;
- ret = __hw_ppgtt_init(dev, ppgtt);
- if (ret != 0)
+ ret = __hw_ppgtt_init(dev, ppgtt, true);
+ if (ret) {
+ kfree(ppgtt);
return ret;
+ }
dev_priv->mm.aliasing_ppgtt = ppgtt;
}
@@ -1826,6 +2118,9 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
}
if (drm_mm_initialized(&vm->mm)) {
+ if (intel_vgpu_active(dev))
+ intel_vgt_deballoon();
+
drm_mm_takedown(&vm->mm);
list_del(&vm->global_link);
}
@@ -2078,7 +2373,7 @@ static int gen8_gmch_probe(struct drm_device *dev,
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
- *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
+ *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
if (IS_CHERRYVIEW(dev))
chv_setup_private_ppat(dev_priv);
@@ -2123,7 +2418,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
- *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
+ *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
ret = ggtt_probe_common(dev, gtt_size);
@@ -2228,11 +2523,16 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
-static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+static struct i915_vma *
+__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view)
{
- struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ struct i915_vma *vma;
+
+ if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return ERR_PTR(-EINVAL);
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -2241,10 +2541,11 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
- vma->ggtt_view = *view;
if (INTEL_INFO(vm->dev)->gen >= 6) {
if (i915_is_ggtt(vm)) {
+ vma->ggtt_view = *ggtt_view;
+
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
} else {
@@ -2253,6 +2554,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
}
} else {
BUG_ON(!i915_is_ggtt(vm));
+ vma->ggtt_view = *ggtt_view;
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
}
@@ -2265,38 +2567,170 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = __i915_gem_vma_create(obj, vm,
+ i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
- vma = i915_gem_obj_to_vma_view(obj, vm, view);
+ if (WARN_ON(!view))
+ return ERR_PTR(-EINVAL);
+
+ vma = i915_gem_obj_to_ggtt_view(obj, view);
+
+ if (IS_ERR(vma))
+ return vma;
+
if (!vma)
- vma = __i915_gem_vma_create(obj, vm, view);
+ vma = __i915_gem_vma_create(obj, ggtt, view);
return vma;
+
+}
+
+static void
+rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
+ struct sg_table *st)
+{
+ unsigned int column, row;
+ unsigned int src_idx;
+ struct scatterlist *sg = st->sgl;
+
+ st->nents = 0;
+
+ for (column = 0; column < width; column++) {
+ src_idx = width * (height - 1) + column;
+ for (row = 0; row < height; row++) {
+ st->nents++;
+ /* We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+ sg_set_page(sg, NULL, PAGE_SIZE, 0);
+ sg_dma_address(sg) = in[src_idx];
+ sg_dma_len(sg) = PAGE_SIZE;
+ sg = sg_next(sg);
+ src_idx -= width;
+ }
+ }
+}
+
+static struct sg_table *
+intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
+ unsigned long size, pages, rot_pages;
+ struct sg_page_iter sg_iter;
+ unsigned long i;
+ dma_addr_t *page_addr_list;
+ struct sg_table *st;
+ unsigned int tile_pitch, tile_height;
+ unsigned int width_pages, height_pages;
+ int ret = -ENOMEM;
+
+ pages = obj->base.size / PAGE_SIZE;
+
+ /* Calculate tiling geometry. */
+ tile_height = intel_tile_height(dev, rot_info->pixel_format,
+ rot_info->fb_modifier);
+ tile_pitch = PAGE_SIZE / tile_height;
+ width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
+ height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
+ rot_pages = width_pages * height_pages;
+ size = rot_pages * PAGE_SIZE;
+
+ /* Allocate a temporary list of source pages for random access. */
+ page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+ if (!page_addr_list)
+ return ERR_PTR(ret);
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ /* Populate source page list from the object. */
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
+ i++;
+ }
+
+ /* Rotate the pages. */
+ rotate_pages(page_addr_list, width_pages, height_pages, st);
+
+ DRM_DEBUG_KMS(
+ "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
+ size, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, width_pages, height_pages,
+ rot_pages);
+
+ drm_free_large(page_addr_list);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ drm_free_large(page_addr_list);
+
+ DRM_DEBUG_KMS(
+ "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
+ size, ret, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, width_pages, height_pages,
+ rot_pages);
+ return ERR_PTR(ret);
}
-static inline
-int i915_get_vma_pages(struct i915_vma *vma)
+static inline int
+i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
+ int ret = 0;
+
if (vma->ggtt_view.pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
vma->ggtt_view.pages = vma->obj->pages;
+ else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+ vma->ggtt_view.pages =
+ intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
if (!vma->ggtt_view.pages) {
- DRM_ERROR("Failed to get pages for VMA view type %u!\n",
+ DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
vma->ggtt_view.type);
- return -EINVAL;
+ ret = -EINVAL;
+ } else if (IS_ERR(vma->ggtt_view.pages)) {
+ ret = PTR_ERR(vma->ggtt_view.pages);
+ vma->ggtt_view.pages = NULL;
+ DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
}
- return 0;
+ return ret;
}
/**
@@ -2312,10 +2746,12 @@ int i915_get_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
- int ret = i915_get_vma_pages(vma);
+ if (i915_is_ggtt(vma->vm)) {
+ int ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
vma->bind_vma(vma, cache_level, flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index e377c7d27bd4..fc03c99317c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -36,13 +36,13 @@
struct drm_i915_file_private;
-typedef uint32_t gen6_gtt_pte_t;
-typedef uint64_t gen8_gtt_pte_t;
-typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
+typedef uint32_t gen6_pte_t;
+typedef uint64_t gen8_pte_t;
+typedef uint64_t gen8_pde_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
@@ -51,9 +51,16 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PPGTT_PD_ENTRIES 512
-#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
+#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
+#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
+#define I915_PDES 512
+#define I915_PDE_MASK (I915_PDES - 1)
+#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT 22
#define GEN6_PDE_VALID (1 << 0)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
@@ -88,9 +95,8 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PDE_MASK 0x1ff
#define GEN8_PTE_SHIFT 12
#define GEN8_PTE_MASK 0x1ff
-#define GEN8_LEGACY_PDPS 4
-#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
-#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+#define GEN8_LEGACY_PDPES 4
+#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -111,15 +117,28 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
+ I915_GGTT_VIEW_ROTATED
+};
+
+struct intel_rotation_info {
+ unsigned int height;
+ unsigned int pitch;
+ uint32_t pixel_format;
+ uint64_t fb_modifier;
};
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
struct sg_table *pages;
+
+ union {
+ struct intel_rotation_info rotation_info;
+ };
};
extern const struct i915_ggtt_view i915_ggtt_view_normal;
+extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
@@ -187,6 +206,28 @@ struct i915_vma {
u32 flags);
};
+struct i915_page_table_entry {
+ struct page *page;
+ dma_addr_t daddr;
+
+ unsigned long *used_ptes;
+};
+
+struct i915_page_directory_entry {
+ struct page *page; /* NULL for GEN6-GEN7 */
+ union {
+ uint32_t pd_offset;
+ dma_addr_t daddr;
+ };
+
+ struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */
+};
+
+struct i915_page_directory_pointer_entry {
+ /* struct page *page; */
+ struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES];
+};
+
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
@@ -223,9 +264,12 @@ struct i915_address_space {
struct list_head inactive_list;
/* FIXME: Need a more generic return type */
- gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 flags); /* Create a valid PTE */
+ gen6_pte_t (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 flags); /* Create a valid PTE */
+ int (*allocate_va_range)(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
@@ -269,30 +313,90 @@ struct i915_hw_ppgtt {
struct i915_address_space base;
struct kref ref;
struct drm_mm_node node;
+ unsigned long pd_dirty_rings;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
- struct page **pt_pages;
- struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
- };
- struct page *pd_pages;
- union {
- uint32_t pd_offset;
- dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
- };
- union {
- dma_addr_t *pt_dma_addr;
- dma_addr_t *gen8_pt_dma_addr[4];
+ struct i915_page_directory_pointer_entry pdp;
+ struct i915_page_directory_entry pd;
};
+ struct i915_page_table_entry *scratch_pt;
+
struct drm_i915_file_private *file_priv;
+ gen6_pte_t __iomem *pd_addr;
+
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
+/* For each pde iterates over every pde between from start until start + length.
+ * If start, and start+length are not perfectly divisible, the macro will round
+ * down, and up as needed. The macro modifies pde, start, and length. Dev is
+ * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
+ * and length = 2G effectively iterates over every PDE in the system.
+ *
+ * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+ */
+#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
+ for (iter = gen6_pde_index(start); \
+ pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
+ iter++, \
+ temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
+ temp = min_t(unsigned, temp, length), \
+ start += temp, length -= temp)
+
+static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
+{
+ const uint32_t mask = NUM_PTE(pde_shift) - 1;
+
+ return (address >> PAGE_SHIFT) & mask;
+}
+
+/* Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+*/
+static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
+ uint32_t pde_shift)
+{
+ const uint64_t mask = ~((1 << pde_shift) - 1);
+ uint64_t end;
+
+ WARN_ON(length == 0);
+ WARN_ON(offset_in_page(addr|length));
+
+ end = addr + length;
+
+ if ((addr & mask) != (end & mask))
+ return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+ return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
+{
+ return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline uint32_t gen6_pte_index(uint32_t addr)
+{
+ return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
+{
+ return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline uint32_t gen6_pde_index(uint32_t addr)
+{
+ return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -321,4 +425,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+static inline bool
+i915_ggtt_view_equal(const struct i915_ggtt_view *a,
+ const struct i915_ggtt_view *b)
+{
+ if (WARN_ON(!a || !b))
+ return false;
+
+ return a->type == b->type;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
new file mode 100644
index 000000000000..f7929e769250
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/oom.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches
+ * @dev_priv: i915 device
+ * @target: amount of memory to make available, in pages
+ * @flags: control flags for selecting cache types
+ *
+ * This function is the main interface to the shrinker. It will try to release
+ * up to @target pages of main memory backing storage from buffer objects.
+ * Selection of the specific caches can be done with @flags. This is e.g. useful
+ * when purgeable objects should be removed from caches preferentially.
+ *
+ * Note that it's not guaranteed that released amount is actually available as
+ * free system memory - the pages might still be in-used to due to other reasons
+ * (like cpu mmaps) or the mm core has reused them before we could grab them.
+ * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
+ * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
+ *
+ * Also note that any kind of pinning (both per-vma address space pins and
+ * backing storage pins at the buffer object level) result in the shrinker code
+ * having to skip the object.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target, unsigned flags)
+{
+ const struct {
+ struct list_head *list;
+ unsigned int bit;
+ } phases[] = {
+ { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+ { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+ { NULL, 0 },
+ }, *phase;
+ unsigned long count = 0;
+
+ /*
+ * As we may completely rewrite the (un)bound list whilst unbinding
+ * (due to retiring requests) we have to strictly process only
+ * one element of the list at the time, and recheck the list
+ * on every iteration.
+ *
+ * In particular, we must hold a reference whilst removing the
+ * object as we may end up waiting for and/or retiring the objects.
+ * This might release the final reference (held by the active list)
+ * and result in the object being freed from under us. This is
+ * similar to the precautions the eviction code must take whilst
+ * removing objects.
+ *
+ * Also note that although these lists do not hold a reference to
+ * the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
+ */
+ for (phase = phases; phase->list; phase++) {
+ struct list_head still_in_list;
+
+ if ((flags & phase->bit) == 0)
+ continue;
+
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(phase->list)) {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma, *v;
+
+ obj = list_first_entry(phase->list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
+
+ if (flags & I915_SHRINK_PURGEABLE &&
+ obj->madv != I915_MADV_DONTNEED)
+ continue;
+
+ drm_gem_object_reference(&obj->base);
+
+ /* For the unbound phase, this should be a no-op! */
+ list_for_each_entry_safe(vma, v,
+ &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, phase->list);
+ }
+
+ return count;
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches completely
+ * @dev_priv: i915 device
+ *
+ * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
+ * caches completely. It also first waits for and retires all outstanding
+ * requests to also be able to release backing storage for active objects.
+ *
+ * This should only be used in code to intentionally quiescent the gpu or as a
+ * last-ditch effort when memory seems to have run out.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+ i915_gem_evict_everything(dev_priv->dev);
+ return i915_gem_shrink(dev_priv, LONG_MAX,
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
+}
+
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+
+ if (to_i915(dev)->mm.shrinker_no_lock_stealing)
+ return false;
+
+ *unlock = false;
+ } else
+ *unlock = true;
+
+ return true;
+}
+
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
+
+ return count;
+}
+
+static unsigned long
+i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long count;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return 0;
+
+ count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ if (obj->pages_pin_count == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!i915_gem_obj_is_pinned(obj) &&
+ obj->pages_pin_count == num_vma_bound(obj))
+ count += obj->base.size >> PAGE_SHIFT;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static unsigned long
+i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long freed;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
+
+ freed = i915_gem_shrink(dev_priv,
+ sc->nr_to_scan,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
+ if (freed < sc->nr_to_scan)
+ freed += i915_gem_shrink(dev_priv,
+ sc->nr_to_scan - freed,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return freed;
+}
+
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long timeout = msecs_to_jiffies(5000) + 1;
+ unsigned long pinned, bound, unbound, freed_pages;
+ bool was_interruptible;
+ bool unlock;
+
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
+ schedule_timeout_killable(1);
+ if (fatal_signal_pending(current))
+ return NOTIFY_DONE;
+ }
+ if (timeout == 0) {
+ pr_err("Unable to purge GPU memory due lock contention.\n");
+ return NOTIFY_DONE;
+ }
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ freed_pages = i915_gem_shrink_all(dev_priv);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ /* Because we may be allocating inside our own driver, we cannot
+ * assert that there are no objects with pinned pages that are not
+ * being pointed to by hardware.
+ */
+ unbound = bound = pinned = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (!obj->base.filp) /* not backed by a freeable object */
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ unbound += obj->base.size;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!obj->base.filp)
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ bound += obj->base.size;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ if (freed_pages || unbound || bound)
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed_pages << PAGE_SHIFT, pinned);
+ if (unbound || bound)
+ pr_err("%lu and %lu bytes still available in the "
+ "bound and unbound GPU page lists.\n",
+ bound, unbound);
+
+ *(unsigned long *)ptr += freed_pages;
+ return NOTIFY_DONE;
+}
+
+/**
+ * i915_gem_shrinker_init - Initialize i915 shrinker
+ * @dev_priv: i915 device
+ *
+ * This function registers and sets up the i915 shrinker and OOM handler.
+ */
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
+{
+ dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.shrinker);
+
+ dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ register_oom_notifier(&dev_priv->mm.oom_notifier);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9c6f93ec886b..f8da71682c96 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -231,7 +231,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->fbc.size = size / dev_priv->fbc.threshold;
+ dev_priv->fbc.uncompressed_size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -253,7 +253,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_c
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- if (size < dev_priv->fbc.size)
+ if (size <= dev_priv->fbc.uncompressed_size)
return 0;
/* Release any current block */
@@ -266,7 +266,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->fbc.size == 0)
+ if (dev_priv->fbc.uncompressed_size == 0)
return;
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
@@ -276,7 +276,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
kfree(dev_priv->fbc.compressed_llb);
}
- dev_priv->fbc.size = 0;
+ dev_priv->fbc.uncompressed_size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 48ddbf44c862..1d4e60df8883 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -386,6 +386,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+ error->fault_data1, error->fault_data0);
+
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
@@ -555,7 +560,14 @@ static void i915_error_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore_obj);
+
+ for (i = 0; i < error->vm_count; i++)
+ kfree(error->active_bo[i]);
+
kfree(error->active_bo);
+ kfree(error->active_bo_count);
+ kfree(error->pinned_bo);
+ kfree(error->pinned_bo_count);
kfree(error->overlay);
kfree(error->display);
kfree(error);
@@ -994,12 +1006,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj);
- if (request->file_priv) {
+ if (request->pid) {
struct task_struct *task;
rcu_read_lock();
- task = pid_task(request->file_priv->file->pid,
- PIDTYPE_PID);
+ task = pid_task(request->pid, PIDTYPE_PID);
if (task) {
strcpy(error->ring[i].comm, task->comm);
error->ring[i].pid = task->pid;
@@ -1165,6 +1176,11 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (IS_GEN7(dev))
error->err_int = I915_READ(GEN7_ERR_INT);
+ if (INTEL_INFO(dev)->gen >= 8) {
+ error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+ error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ }
+
if (IS_GEN6(dev)) {
error->forcewake = I915_READ(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ede5bbbd8a08..6d494432b19f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -277,6 +277,7 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
I915_WRITE(reg, dev_priv->pm_rps_events);
I915_WRITE(reg, dev_priv->pm_rps_events);
POSTING_READ(reg);
+ dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -330,12 +331,10 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
~dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
-
- dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
+
+ synchronize_irq(dev->irq);
}
/**
@@ -492,31 +491,6 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-/**
- * i915_pipe_enabled - check if a pipe is enabled
- * @dev: DRM device
- * @pipe: pipe to check
- *
- * Reading certain registers when the pipe is disabled can hang the chip.
- * Use this routine to make sure the PLL is running and the pipe is active
- * before reading such registers if unsure.
- */
-static int
-i915_pipe_enabled(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Locking is horribly broken here, but whatever. */
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- return intel_crtc->active;
- } else {
- return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
- }
-}
-
/*
* This timing diagram depicts the video signal in and
* around the vertical blanking period.
@@ -582,34 +556,16 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ const struct drm_display_mode *mode =
+ &intel_crtc->config->base.adjusted_mode;
- if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %c\n", pipe_name(pipe));
- return 0;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- const struct drm_display_mode *mode =
- &intel_crtc->config->base.adjusted_mode;
-
- htotal = mode->crtc_htotal;
- hsync_start = mode->crtc_hsync_start;
- vbl_start = mode->crtc_vblank_start;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
- } else {
- enum transcoder cpu_transcoder = (enum transcoder) pipe;
-
- htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
- hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
- vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
- if ((I915_READ(PIPECONF(cpu_transcoder)) &
- PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
- }
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vbl_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
/* Convert to pixel count */
vbl_start *= htotal;
@@ -648,12 +604,6 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
- if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %c\n", pipe_name(pipe));
- return 0;
- }
-
return I915_READ(reg);
}
@@ -840,7 +790,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return -EINVAL;
}
- if (!crtc->enabled) {
+ if (!crtc->state->enable) {
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
return -EBUSY;
}
@@ -1046,129 +996,73 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue);
}
-static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
- struct intel_rps_ei *rps_ei)
+static void vlv_c0_read(struct drm_i915_private *dev_priv,
+ struct intel_rps_ei *ei)
{
- u32 cz_ts, cz_freq_khz;
- u32 render_count, media_count;
- u32 elapsed_render, elapsed_media, elapsed_time;
- u32 residency = 0;
-
- cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
- cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
-
- render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
- media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
-
- if (rps_ei->cz_clock == 0) {
- rps_ei->cz_clock = cz_ts;
- rps_ei->render_c0 = render_count;
- rps_ei->media_c0 = media_count;
-
- return dev_priv->rps.cur_freq;
- }
-
- elapsed_time = cz_ts - rps_ei->cz_clock;
- rps_ei->cz_clock = cz_ts;
+ ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+ ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
+ ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
+}
- elapsed_render = render_count - rps_ei->render_c0;
- rps_ei->render_c0 = render_count;
+static bool vlv_c0_above(struct drm_i915_private *dev_priv,
+ const struct intel_rps_ei *old,
+ const struct intel_rps_ei *now,
+ int threshold)
+{
+ u64 time, c0;
- elapsed_media = media_count - rps_ei->media_c0;
- rps_ei->media_c0 = media_count;
+ if (old->cz_clock == 0)
+ return false;
- /* Convert all the counters into common unit of milli sec */
- elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
- elapsed_render /= cz_freq_khz;
- elapsed_media /= cz_freq_khz;
+ time = now->cz_clock - old->cz_clock;
+ time *= threshold * dev_priv->mem_freq;
- /*
- * Calculate overall C0 residency percentage
- * only if elapsed time is non zero
+ /* Workload can be split between render + media, e.g. SwapBuffers
+ * being blitted in X after being rendered in mesa. To account for
+ * this we need to combine both engines into our activity counter.
*/
- if (elapsed_time) {
- residency =
- ((max(elapsed_render, elapsed_media) * 100)
- / elapsed_time);
- }
+ c0 = now->render_c0 - old->render_c0;
+ c0 += now->media_c0 - old->media_c0;
+ c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
- return residency;
+ return c0 >= time;
}
-/**
- * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
- * busy-ness calculated from C0 counters of render & media power wells
- * @dev_priv: DRM device private
- *
- */
-static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- u32 residency_C0_up = 0, residency_C0_down = 0;
- int new_delay, adj;
-
- dev_priv->rps.ei_interrupt_count++;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
-
- if (dev_priv->rps.up_ei.cz_clock == 0) {
- vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
- vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
- return dev_priv->rps.cur_freq;
- }
+ vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
+ dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+}
+static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+ struct intel_rps_ei now;
+ u32 events = 0;
- /*
- * To down throttle, C0 residency should be less than down threshold
- * for continous EI intervals. So calculate down EI counters
- * once in VLV_INT_COUNT_FOR_DOWN_EI
- */
- if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+ if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ return 0;
- dev_priv->rps.ei_interrupt_count = 0;
+ vlv_c0_read(dev_priv, &now);
+ if (now.cz_clock == 0)
+ return 0;
- residency_C0_down = vlv_c0_residency(dev_priv,
- &dev_priv->rps.down_ei);
- } else {
- residency_C0_up = vlv_c0_residency(dev_priv,
- &dev_priv->rps.up_ei);
+ if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
+ if (!vlv_c0_above(dev_priv,
+ &dev_priv->rps.down_ei, &now,
+ VLV_RP_DOWN_EI_THRESHOLD))
+ events |= GEN6_PM_RP_DOWN_THRESHOLD;
+ dev_priv->rps.down_ei = now;
}
- new_delay = dev_priv->rps.cur_freq;
-
- adj = dev_priv->rps.last_adj;
- /* C0 residency is greater than UP threshold. Increase Frequency */
- if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
- if (adj > 0)
- adj *= 2;
- else
- adj = 1;
-
- if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
- new_delay = dev_priv->rps.cur_freq + adj;
-
- /*
- * For better performance, jump directly
- * to RPe if we're below it.
- */
- if (new_delay < dev_priv->rps.efficient_freq)
- new_delay = dev_priv->rps.efficient_freq;
-
- } else if (!dev_priv->rps.ei_interrupt_count &&
- (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
- if (adj < 0)
- adj *= 2;
- else
- adj = -1;
- /*
- * This means, C0 residency is less than down threshold over
- * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
- */
- if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
- new_delay = dev_priv->rps.cur_freq + adj;
+ if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+ if (vlv_c0_above(dev_priv,
+ &dev_priv->rps.up_ei, &now,
+ VLV_RP_UP_EI_THRESHOLD))
+ events |= GEN6_PM_RP_UP_THRESHOLD;
+ dev_priv->rps.up_ei = now;
}
- return new_delay;
+ return events;
}
static void gen6_pm_rps_work(struct work_struct *work)
@@ -1198,6 +1092,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
+ pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+
adj = dev_priv->rps.last_adj;
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
@@ -1220,8 +1116,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
- } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
@@ -1243,10 +1137,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
- if (IS_VALLEYVIEW(dev_priv->dev))
- valleyview_set_rps(dev_priv->dev, new_delay);
- else
- gen6_set_rps(dev_priv->dev, new_delay);
+ intel_set_rps(dev_priv->dev, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -1748,11 +1639,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
- /* TODO: RPS on GEN9+ is not supported yet. */
- if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
- "GEN9+: unexpected RPS IRQ\n"))
- return;
-
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
@@ -2662,9 +2548,6 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
@@ -2684,9 +2567,6 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -2699,9 +2579,6 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
@@ -2715,9 +2592,6 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -2769,9 +2643,6 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -3236,15 +3107,24 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
spin_lock_irq(&dev_priv->irq_lock);
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
- ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
- ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
+ if (pipe_mask & 1 << PIPE_A)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
+ dev_priv->de_irq_mask[PIPE_A],
+ ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
+ if (pipe_mask & 1 << PIPE_B)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
+ dev_priv->de_irq_mask[PIPE_B],
+ ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
+ if (pipe_mask & 1 << PIPE_C)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
+ dev_priv->de_irq_mask[PIPE_C],
+ ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3718,14 +3598,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
I915_WRITE16(IMR, dev_priv->irq_mask);
I915_WRITE16(IER,
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT);
POSTING_READ16(IER);
@@ -3887,14 +3765,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev)) {
@@ -4362,7 +4238,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
@@ -4392,10 +4268,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
- dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
- }
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 44f2262a5553..bb64415a1c3e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -27,7 +27,6 @@
struct i915_params i915 __read_mostly = {
.modeset = -1,
.panel_ignore_lid = 1,
- .powersave = 1,
.semaphores = -1,
.lvds_downclock = 0,
.lvds_channel_mode = 0,
@@ -44,6 +43,7 @@ struct i915_params i915 __read_mostly = {
.enable_ips = 1,
.fastboot = 0,
.prefault_disable = 0,
+ .load_detect_test = 0,
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
@@ -65,10 +65,6 @@ MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
-module_param_named(powersave, i915.powersave, int, 0600);
-MODULE_PARM_DESC(powersave,
- "Enable powersavings, fbc, downclocking, etc. (default: true)");
-
module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
@@ -144,11 +140,16 @@ module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)");
-module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
+module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
+module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
+MODULE_PARM_DESC(load_detect_test,
+ "Force-enable the VGA load detect code for testing (default:false). "
+ "For developers only.");
+
module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness,
"Invert backlight brightness "
@@ -171,10 +172,10 @@ module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
-module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
+module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
MODULE_PARM_DESC(mmio_debug,
- "Enable the MMIO debug code (default: false). This may negatively "
- "affect performance.");
+ "Enable the MMIO debug code for the first N failures (default: off). "
+ "This may negatively affect performance.");
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 33b3d0a24071..773d1d24e604 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,7 +139,21 @@
#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
+#define GEN8_R_PWR_CLK_STATE 0x20C8
+#define GEN8_RPCS_ENABLE (1 << 31)
+#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
+#define GEN8_RPCS_S_CNT_SHIFT 15
+#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT)
+#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11)
+#define GEN8_RPCS_SS_CNT_SHIFT 8
+#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
+#define GEN8_RPCS_EU_MAX_SHIFT 4
+#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT)
+#define GEN8_RPCS_EU_MIN_SHIFT 0
+#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
+
#define GAM_ECOCHK 0x4090
+#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
#define ECOCHK_SNB_BIT (1<<10)
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
@@ -552,6 +566,9 @@
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
+#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */
+#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */
+#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */
#define _DP_SSC(val, pipe) ((val) << (2 * (pipe)))
#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe))
#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe))
@@ -586,6 +603,19 @@ enum punit_power_well {
PUNIT_POWER_WELL_NUM,
};
+enum skl_disp_power_wells {
+ SKL_DISP_PW_MISC_IO,
+ SKL_DISP_PW_DDI_A_E,
+ SKL_DISP_PW_DDI_B,
+ SKL_DISP_PW_DDI_C,
+ SKL_DISP_PW_DDI_D,
+ SKL_DISP_PW_1 = 14,
+ SKL_DISP_PW_2,
+};
+
+#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
+#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1))
+
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
@@ -614,6 +644,11 @@ enum punit_power_well {
#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137
#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8
+#define PUNIT_REG_DDR_SETUP2 0x139
+#define FORCE_DDR_FREQ_REQ_ACK (1 << 8)
+#define FORCE_DDR_LOW_FREQ (1 << 1)
+#define FORCE_DDR_HIGH_FREQ (1 << 0)
+
#define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
@@ -638,7 +673,6 @@ enum punit_power_well {
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
#define VLV_RP_UP_EI_THRESHOLD 90
#define VLV_RP_DOWN_EI_THRESHOLD 70
-#define VLV_INT_COUNT_FOR_DOWN_EI 5
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
@@ -1002,6 +1036,7 @@ enum punit_power_well {
#define DPIO_CHV_FIRST_MOD (0 << 8)
#define DPIO_CHV_SECOND_MOD (1 << 8)
#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
+#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0)
#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
#define _CHV_PLL_DW6_CH0 0x8018
@@ -1011,6 +1046,19 @@ enum punit_power_well {
#define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+#define _CHV_PLL_DW8_CH0 0x8020
+#define _CHV_PLL_DW8_CH1 0x81A0
+#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0
+#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0)
+#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
+
+#define _CHV_PLL_DW9_CH0 0x8024
+#define _CHV_PLL_DW9_CH1 0x81A4
+#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
+#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1)
+#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
+#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
+
#define _CHV_CMN_DW5_CH0 0x8114
#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
@@ -1258,6 +1306,9 @@ enum punit_power_well {
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
+#define GEN8_FAULT_TLB_DATA0 0x04b10
+#define GEN8_FAULT_TLB_DATA1 0x04b14
+
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -1314,6 +1365,8 @@ enum punit_power_well {
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
+#define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2))
+#define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2))
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
@@ -1470,6 +1523,7 @@ enum punit_power_well {
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
#define GEN6_BLITTER_ECOSKPD 0x221d0
#define GEN6_BLITTER_LOCK_SHIFT 16
@@ -1482,6 +1536,8 @@ enum punit_power_well {
/* Fuse readout registers for GT */
#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_DISABLE_SS0 (1 << 10)
+#define CHV_FGT_DISABLE_SS1 (1 << 11)
#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
@@ -1491,6 +1547,17 @@ enum punit_power_well {
#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+#define GEN8_FUSE2 0x9120
+#define GEN8_F2_S_ENA_SHIFT 25
+#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
+
+#define GEN9_F2_SS_DIS_SHIFT 20
+#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
+
+#define GEN8_EU_DISABLE0 0x9134
+#define GEN8_EU_DISABLE1 0x9138
+#define GEN8_EU_DISABLE2 0x913c
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -1740,6 +1807,7 @@ enum punit_power_well {
#define GMBUS_CYCLE_INDEX (2<<25)
#define GMBUS_CYCLE_STOP (4<<25)
#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_BYTE_COUNT_MAX 256U
#define GMBUS_SLAVE_INDEX_SHIFT 8
#define GMBUS_SLAVE_ADDR_SHIFT 1
#define GMBUS_SLAVE_READ (1<<0)
@@ -2048,6 +2116,14 @@ enum punit_power_well {
#define CDCLK_FREQ_SHIFT 4
#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
#define CZCLK_FREQ_MASK 0xf
+
+#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C)
+#define PFI_CREDIT_63 (9 << 28) /* chv only */
+#define PFI_CREDIT_31 (8 << 28) /* chv only */
+#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
+#define PFI_CREDIT_RESEND (1 << 27)
+#define VGA_FAST_MODE_DISABLE (1 << 14)
+
#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
/*
@@ -2376,6 +2452,12 @@ enum punit_power_well {
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
+#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
+#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+ INTERVAL_1_33_US(us) : \
+ INTERVAL_1_28_US(us))
+
/*
* Logical Context regs
*/
@@ -2968,7 +3050,7 @@ enum punit_power_well {
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
-/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
@@ -3865,6 +3947,7 @@ enum punit_power_well {
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0<<5)
@@ -4013,7 +4096,7 @@ enum punit_power_well {
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
-#define DSPARB 0x70030
+#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
#define DSPARB_BSTART_MASK (0x7f)
@@ -4021,6 +4104,9 @@ enum punit_power_well {
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
+#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */
+
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
@@ -4044,8 +4130,8 @@ enum punit_power_well {
#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_CURSORA_MASK (0x3f<<8)
-#define DSPFW_PLANEC_SHIFT_OLD 0
-#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */
+#define DSPFW_PLANEC_OLD_SHIFT 0
+#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
@@ -4084,25 +4170,25 @@ enum punit_power_well {
#define DSPFW_SPRITED_WM1_SHIFT 24
#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK (0xff<<16)
+#define DSPFW_SPRITED_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK (0xff<<0)
+#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK (0xff<<16)
+#define DSPFW_SPRITEF_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK (0xff<<0)
+#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK (0xff<<16)
+#define DSPFW_PLANEC_MASK_VLV (0xff<<16)
#define DSPFW_CURSORC_WM1_SHIFT 8
#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
#define DSPFW_CURSORC_SHIFT 0
@@ -4111,7 +4197,7 @@ enum punit_power_well {
/* vlv/chv high order bits */
#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (1<<24)
+#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
#define DSPFW_SPRITEF_HI_MASK (1<<23)
#define DSPFW_SPRITEE_HI_SHIFT 22
@@ -4132,7 +4218,7 @@ enum punit_power_well {
#define DSPFW_PLANEA_HI_MASK (1<<0)
#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (1<<24)
+#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
@@ -4153,21 +4239,17 @@ enum punit_power_well {
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
/* drain latency register values*/
-#define DRAIN_LATENCY_PRECISION_16 16
-#define DRAIN_LATENCY_PRECISION_32 32
-#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_PRECISION_HIGH (1<<31)
-#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
-#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
-#define DDL_PLANE_PRECISION_HIGH (1<<7)
-#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
+#define DDL_PRECISION_HIGH (1<<7)
+#define DDL_PRECISION_LOW (0<<7)
#define DRAIN_LATENCY_MASK 0x7f
+#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400)
+#define CBR_PND_DEADLINE_DISABLE (1<<31)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
@@ -5221,14 +5303,22 @@ enum punit_power_well {
#define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define FF_SLICE_CS_CHICKEN2 0x02e4
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
+
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
+# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
-#define HIZ_CHICKEN 0x7018
-# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
+#define HIZ_CHICKEN 0x7018
+# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
+
+#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308
+#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
#define GEN7_L3SQCREG1 0xB010
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -5245,11 +5335,16 @@ enum punit_power_well {
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+#define GEN8_L3SQCREG4 0xb118
+#define GEN8_LQSC_RO_PERF_DIS (1<<27)
+
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
-#define HDC_FORCE_NON_COHERENT (1<<4)
-#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
+#define HDC_FORCE_NON_COHERENT (1<<4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
@@ -5258,6 +5353,9 @@ enum punit_power_well {
#define HSW_SCRATCH1 0xb038
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
+#define BDW_SCRATCH1 0xb11c
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
+
/* PCH */
/* south display engine interrupt: IBX */
@@ -5976,10 +6074,13 @@ enum punit_power_well {
#define GTFIFOCTL 0x120008
#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
+#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
#define HSW_IDICR 0x9008
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
#define HSW_EDRAM_PRESENT 0x120010
+#define EDRAM_ENABLED 0x1
#define GEN6_UCGCTL1 0x9400
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
@@ -6003,6 +6104,7 @@ enum punit_power_well {
#define GEN6_RSTCTL 0x9420
#define GEN8_UCGCTL6 0x9430
+#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_GFXPAUSE 0xA000
@@ -6010,6 +6112,7 @@ enum punit_power_well {
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
#define HSW_FREQUENCY(x) ((x)<<24)
+#define GEN9_FREQUENCY(x) ((x)<<23)
#define GEN6_OFFSET(x) ((x)<<19)
#define GEN6_AGGRESSIVE_TURBO (0<<15)
#define GEN6_RC_VIDEO_FREQ 0xA00C
@@ -6028,8 +6131,10 @@ enum punit_power_well {
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
+#define GEN9_CAGF_SHIFT 23
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
+#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@@ -6120,8 +6225,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
-#define VLV_RENDER_C0_COUNT_REG 0x138118
-#define VLV_MEDIA_C0_COUNT_REG 0x13811C
+#define VLV_RENDER_C0_COUNT 0x138118
+#define VLV_MEDIA_C0_COUNT 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
@@ -6155,6 +6260,37 @@ enum punit_power_well {
#define GEN6_RC6 3
#define GEN6_RC7 4
+#define CHV_POWER_SS0_SIG1 0xa720
+#define CHV_POWER_SS1_SIG1 0xa728
+#define CHV_SS_PG_ENABLE (1<<1)
+#define CHV_EU08_PG_ENABLE (1<<9)
+#define CHV_EU19_PG_ENABLE (1<<17)
+#define CHV_EU210_PG_ENABLE (1<<25)
+
+#define CHV_POWER_SS0_SIG2 0xa724
+#define CHV_POWER_SS1_SIG2 0xa72c
+#define CHV_EU311_PG_ENABLE (1<<1)
+
+#define GEN9_SLICE0_PGCTL_ACK 0x804c
+#define GEN9_SLICE1_PGCTL_ACK 0x8050
+#define GEN9_SLICE2_PGCTL_ACK 0x8054
+#define GEN9_PGCTL_SLICE_ACK (1 << 0)
+
+#define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c
+#define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060
+#define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064
+#define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068
+#define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c
+#define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070
+#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
+#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
+#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
+#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
+#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
+#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
+#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
+#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
+
#define GEN7_MISCCPCTL (0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
@@ -6185,6 +6321,7 @@ enum punit_power_well {
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
+#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
@@ -6200,8 +6337,12 @@ enum punit_power_well {
#define HALF_SLICE_CHICKEN3 0xe184
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
+#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+#define GEN9_HALF_SLICE_CHICKEN7 0xe194
+#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
+
/* Audio */
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
@@ -6351,6 +6492,13 @@ enum punit_power_well {
#define HSW_PWR_WELL_FORCE_ON (1<<19)
#define HSW_PWR_WELL_CTL6 0x45414
+/* SKL Fuse Status */
+#define SKL_FUSE_STATUS 0x42000
+#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
+#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
+#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
+#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
+
/* Per-pipe DDI Function Control */
#define TRANS_DDI_FUNC_CTL_A 0x60400
#define TRANS_DDI_FUNC_CTL_B 0x61400
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 9f19ed38cdc3..cf67f82f7b7f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,166 +29,6 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE8(index_port, reg);
- return I915_READ8(data_port);
-}
-
-static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
- return I915_READ8(VGA_AR_DATA_READ);
-}
-
-static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
- I915_WRITE8(VGA_AR_DATA_WRITE, val);
-}
-
-static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE8(index_port, reg);
- I915_WRITE8(data_port, val);
-}
-
-static void i915_save_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* VGA state */
- dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
- dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
- dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
- dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
-
- /* VGA color palette registers */
- dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
-
- /* MSR bits */
- dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* CRT controller regs */
- i915_write_indexed(dev, cr_index, cr_data, 0x11,
- i915_read_indexed(dev, cr_index, cr_data, 0x11) &
- (~0x80));
- for (i = 0; i <= 0x24; i++)
- dev_priv->regfile.saveCR[i] =
- i915_read_indexed(dev, cr_index, cr_data, i);
- /* Make sure we don't turn off CR group 0 writes */
- dev_priv->regfile.saveCR[0x11] &= ~0x80;
-
- /* Attribute controller registers */
- I915_READ8(st01);
- dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
- for (i = 0; i <= 0x14; i++)
- dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
- I915_READ8(st01);
-
- /* Graphics controller registers */
- for (i = 0; i < 9; i++)
- dev_priv->regfile.saveGR[i] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
-
- dev_priv->regfile.saveGR[0x10] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->regfile.saveGR[0x11] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->regfile.saveGR[0x18] =
- i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-
- /* Sequencer registers */
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveSR[i] =
- i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
-}
-
-static void i915_restore_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* VGA state */
- I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
-
- I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
- I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
- POSTING_READ(VGA_PD);
- udelay(150);
-
- /* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
- if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* Sequencer registers, don't write SR07 */
- for (i = 0; i < 7; i++)
- i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->regfile.saveSR[i]);
-
- /* CRT controller regs */
- /* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
- for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
-
- /* Graphics controller regs */
- for (i = 0; i < 9; i++)
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->regfile.saveGR[i]);
-
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->regfile.saveGR[0x10]);
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->regfile.saveGR[0x11]);
- i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->regfile.saveGR[0x18]);
-
- /* Attribute controller registers */
- I915_READ8(st01); /* switch back to index mode */
- for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
- I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
- I915_READ8(st01);
-
- /* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
-}
-
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -197,11 +37,6 @@ static void i915_save_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
- /* This is only meaningful in non-KMS mode */
- /* Don't regfile.save them in KMS mode */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_save_display_reg(dev);
-
/* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
@@ -224,9 +59,6 @@ static void i915_save_display(struct drm_device *dev)
/* save FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_save_vga(dev);
}
static void i915_restore_display(struct drm_device *dev)
@@ -238,11 +70,7 @@ static void i915_restore_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_restore_display_reg(dev);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- mask = ~LVDS_PORT_EN;
+ mask = ~LVDS_PORT_EN;
/* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -270,10 +98,7 @@ static void i915_restore_display(struct drm_device *dev)
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_restore_vga(dev);
- else
- i915_redisable_vga(dev);
+ i915_redisable_vga(dev);
}
int i915_save_state(struct drm_device *dev)
@@ -285,24 +110,6 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveDEIER = I915_READ(DEIER);
- dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
- dev_priv->regfile.saveGTIER = I915_READ(GTIER);
- dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
- dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
- dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
- I915_READ(RSTDBYCTL);
- dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
- } else {
- dev_priv->regfile.saveIER = I915_READ(IER);
- dev_priv->regfile.saveIMR = I915_READ(IMR);
- }
- }
-
if (IS_GEN4(dev))
pci_read_config_word(dev->pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
@@ -341,24 +148,6 @@ int i915_restore_state(struct drm_device *dev)
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
- I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
- I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
- I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
- I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
- I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
- I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
- I915_WRITE(RSTDBYCTL,
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
- } else {
- I915_WRITE(IER, dev_priv->regfile.saveIER);
- I915_WRITE(IMR, dev_priv->regfile.saveIMR);
- }
- }
-
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 49f5ade0edb7..247626885f49 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -127,10 +127,19 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
+static ssize_t
+show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = dev_get_drvdata(kdev);
+ u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
+}
+
static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
@@ -153,6 +162,16 @@ static struct attribute_group rc6p_attr_group = {
.name = power_group_name,
.attrs = rc6p_attrs
};
+
+static struct attribute *media_rc6_attrs[] = {
+ &dev_attr_media_rc6_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group media_rc6_attr_group = {
+ .name = power_group_name,
+ .attrs = media_rc6_attrs
+};
#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -300,7 +319,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
u32 rpstat = I915_READ(GEN6_RPSTAT1);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_GEN9(dev_priv))
+ ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -402,10 +423,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -464,10 +482,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev, val);
- else
- gen6_set_rps(dev, val);
+ intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -493,38 +508,17 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val, rp_state_cap;
- ssize_t ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ u32 val;
- if (attr == &dev_attr_gt_RP0_freq_mhz) {
- if (IS_VALLEYVIEW(dev))
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
- else
- val = intel_gpu_freq(dev_priv,
- ((rp_state_cap & 0x0000ff) >> 0));
- } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
- if (IS_VALLEYVIEW(dev))
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
- else
- val = intel_gpu_freq(dev_priv,
- ((rp_state_cap & 0x00ff00) >> 8));
- } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
- if (IS_VALLEYVIEW(dev))
- val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
- else
- val = intel_gpu_freq(dev_priv,
- ((rp_state_cap & 0xff0000) >> 16));
- } else {
+ if (attr == &dev_attr_gt_RP0_freq_mhz)
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ else if (attr == &dev_attr_gt_RP1_freq_mhz)
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ else if (attr == &dev_attr_gt_RPn_freq_mhz)
+ val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ else
BUG();
- }
+
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
@@ -633,6 +627,12 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret)
DRM_ERROR("RC6p residency sysfs setup failed\n");
}
+ if (IS_VALLEYVIEW(dev)) {
+ ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ &media_rc6_attr_group);
+ if (ret)
+ DRM_ERROR("Media RC6 residency sysfs setup failed\n");
+ }
#endif
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 6058a01b4443..5fda6c70b423 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -12,7 +12,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
-#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE i915_trace
/* pipe updates */
@@ -115,7 +114,7 @@ TRACE_EVENT(i915_vma_bind,
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
- __field(u32, offset)
+ __field(u64, offset)
__field(u32, size)
__field(unsigned, flags)
),
@@ -128,7 +127,7 @@ TRACE_EVENT(i915_vma_bind,
__entry->flags = flags;
),
- TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
+ TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "",
__entry->vm)
@@ -141,7 +140,7 @@ TRACE_EVENT(i915_vma_unbind,
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
- __field(u32, offset)
+ __field(u64, offset)
__field(u32, size)
),
@@ -152,10 +151,109 @@ TRACE_EVENT(i915_vma_unbind,
__entry->size = vma->node.size;
),
- TP_printk("obj=%p, offset=%08x size=%x vm=%p",
+ TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
+#define VM_TO_TRACE_NAME(vm) \
+ (i915_is_ggtt(vm) ? "G" : \
+ "P")
+
+DECLARE_EVENT_CLASS(i915_va,
+ TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+ TP_ARGS(vm, start, length, name),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u64, start)
+ __field(u64, end)
+ __string(name, name)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->start = start;
+ __entry->end = start + length - 1;
+ __assign_str(name, name);
+ ),
+
+ TP_printk("vm=%p (%s), 0x%llx-0x%llx",
+ __entry->vm, __get_str(name), __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_va, i915_va_alloc,
+ TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+ TP_ARGS(vm, start, length, name)
+);
+
+DECLARE_EVENT_CLASS(i915_page_table_entry,
+ TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
+ TP_ARGS(vm, pde, start, pde_shift),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, pde)
+ __field(u64, start)
+ __field(u64, end)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->pde = pde;
+ __entry->start = start;
+ __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1;
+ ),
+
+ TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
+ __entry->vm, __entry->pde, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc,
+ TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
+ TP_ARGS(vm, pde, start, pde_shift)
+);
+
+/* Avoid extra math because we only support two sizes. The format is defined by
+ * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
+#define TRACE_PT_SIZE(bits) \
+ ((((bits) == 1024) ? 288 : 144) + 1)
+
+DECLARE_EVENT_CLASS(i915_page_table_entry_update,
+ TP_PROTO(struct i915_address_space *vm, u32 pde,
+ struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ TP_ARGS(vm, pde, pt, first, count, bits),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, pde)
+ __field(u32, first)
+ __field(u32, last)
+ __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->pde = pde;
+ __entry->first = first;
+ __entry->last = first + count - 1;
+ scnprintf(__get_str(cur_ptes),
+ TRACE_PT_SIZE(bits),
+ "%*pb",
+ bits,
+ pt->used_ptes);
+ ),
+
+ TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
+ __entry->vm, __entry->pde, __entry->last, __entry->first,
+ __get_str(cur_ptes))
+);
+
+DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
+ TP_PROTO(struct i915_address_space *vm, u32 pde,
+ struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ TP_ARGS(vm, pde, pt, first, count, bits)
+);
+
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
TP_ARGS(obj, old_read, old_write),
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
deleted file mode 100644
index d10fe3e9c49f..000000000000
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- *
- * Copyright 2008 (c) Intel Corporation
- * Jesse Barnes <jbarnes@virtuousgeek.org>
- * Copyright 2013 (c) Intel Corporation
- * Daniel Vetter <daniel.vetter@ffwll.ch>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "intel_drv.h"
-#include "i915_reg.h"
-
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpll_reg;
-
- /* On IVB, 3rd pipe shares PLL with another one */
- if (pipe > 1)
- return false;
-
- if (HAS_PCH_SPLIT(dev))
- dpll_reg = PCH_DPLL(pipe);
- else
- dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
-
- return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-}
-
-void i915_save_display_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- /* Cursor state */
- dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
- dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
- } else {
- dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
- dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
- dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
- dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
- dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
- dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
- dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
- }
-
- dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
- dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
-
- /* Pipe & plane B info */
- dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
- } else {
- dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
- dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
- dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
- dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
- dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
- dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
- dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
- }
-
- dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
- dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
- }
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
- else
- dev_priv->regfile.saveADPA = I915_READ(ADPA);
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->regfile.saveDP_B = I915_READ(DP_B);
- dev_priv->regfile.saveDP_C = I915_READ(DP_C);
- dev_priv->regfile.saveDP_D = I915_READ(DP_D);
- dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
- dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
- dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
- dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
- dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
- dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
- dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
- dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
- }
- /* FIXME: regfile.save TV & SDVO state */
-
- /* Panel fitter */
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- }
-
- /* Backlight */
- if (INTEL_INFO(dev)->gen <= 4)
- pci_read_config_byte(dev->pdev, PCI_LBPC,
- &dev_priv->regfile.saveLBB);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- } else {
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
- dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- }
-
- return;
-}
-
-void i915_restore_display_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_a_reg, fpa0_reg, fpa1_reg;
- int dpll_b_reg, fpb0_reg, fpb1_reg;
- int i;
-
- /* Backlight */
- if (INTEL_INFO(dev)->gen <= 4)
- pci_write_config_byte(dev->pdev, PCI_LBPC,
- dev_priv->regfile.saveLBB);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
- /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
- * otherwise we get blank eDP screen after S3 on some machines
- */
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
- } else {
- if (INTEL_INFO(dev)->gen >= 4)
- I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
- }
-
- /* Panel fitter */
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
- I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
- }
-
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
- }
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
- break;
- }
-
-
- if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = _PCH_DPLL_A;
- dpll_b_reg = _PCH_DPLL_B;
- fpa0_reg = _PCH_FPA0;
- fpb0_reg = _PCH_FPB0;
- fpa1_reg = _PCH_FPA1;
- fpb1_reg = _PCH_FPB1;
- } else {
- dpll_a_reg = _DPLL_A;
- dpll_b_reg = _DPLL_B;
- fpa0_reg = _FPA0;
- fpb0_reg = _FPB0;
- fpa1_reg = _FPA1;
- fpb1_reg = _FPB1;
- }
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- /* Prime the clock */
- if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- }
- I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
- /* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
- POSTING_READ(_DPLL_A_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
-
- I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
-
- I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
-
- I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
- I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
- I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
- I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
- I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
- I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
- I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
- }
-
- I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
-
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
- I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- }
- I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
- /* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
- POSTING_READ(_DPLL_B_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
-
- I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
-
- I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
-
- I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
- I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
- I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
- I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
- I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
- I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
- I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
- }
-
- I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
-
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
- I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
-
- /* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
- I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
- I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
- }
- /* FIXME: restore TV & SDVO state */
-
- return;
-}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
new file mode 100644
index 000000000000..5eee75bff170
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_drv.h"
+#include "i915_vgpu.h"
+
+/**
+ * DOC: Intel GVT-g guest support
+ *
+ * Intel GVT-g is a graphics virtualization technology which shares the
+ * GPU among multiple virtual machines on a time-sharing basis. Each
+ * virtual machine is presented a virtual GPU (vGPU), which has equivalent
+ * features as the underlying physical GPU (pGPU), so i915 driver can run
+ * seamlessly in a virtual machine. This file provides vGPU specific
+ * optimizations when running in a virtual machine, to reduce the complexity
+ * of vGPU emulation and to improve the overall performance.
+ *
+ * A primary function introduced here is so-called "address space ballooning"
+ * technique. Intel GVT-g partitions global graphics memory among multiple VMs,
+ * so each VM can directly access a portion of the memory without hypervisor's
+ * intervention, e.g. filling textures or queuing commands. However with the
+ * partitioning an unmodified i915 driver would assume a smaller graphics
+ * memory starting from address ZERO, then requires vGPU emulation module to
+ * translate the graphics address between 'guest view' and 'host view', for
+ * all registers and command opcodes which contain a graphics memory address.
+ * To reduce the complexity, Intel GVT-g introduces "address space ballooning",
+ * by telling the exact partitioning knowledge to each guest i915 driver, which
+ * then reserves and prevents non-allocated portions from allocation. Thus vGPU
+ * emulation module only needs to scan and validate graphics addresses without
+ * complexity of address translation.
+ *
+ */
+
+/**
+ * i915_check_vgpu - detect virtual GPU
+ * @dev: drm device *
+ *
+ * This function is called at the initialization stage, to detect whether
+ * running on a vGPU.
+ */
+void i915_check_vgpu(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ uint64_t magic;
+ uint32_t version;
+
+ BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ magic = readq(dev_priv->regs + vgtif_reg(magic));
+ if (magic != VGT_MAGIC)
+ return;
+
+ version = INTEL_VGT_IF_VERSION_ENCODE(
+ readw(dev_priv->regs + vgtif_reg(version_major)),
+ readw(dev_priv->regs + vgtif_reg(version_minor)));
+ if (version != INTEL_VGT_IF_VERSION) {
+ DRM_INFO("VGT interface version mismatch!\n");
+ return;
+ }
+
+ dev_priv->vgpu.active = true;
+ DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+}
+
+struct _balloon_info_ {
+ /*
+ * There are up to 2 regions per mappable/unmappable graphic
+ * memory that might be ballooned. Here, index 0/1 is for mappable
+ * graphic memory, 2/3 for unmappable graphic memory.
+ */
+ struct drm_mm_node space[4];
+};
+
+static struct _balloon_info_ bl_info;
+
+/**
+ * intel_vgt_deballoon - deballoon reserved graphics address trunks
+ *
+ * This function is called to deallocate the ballooned-out graphic memory, when
+ * driver is unloaded or when ballooning fails.
+ */
+void intel_vgt_deballoon(void)
+{
+ int i;
+
+ DRM_DEBUG("VGT deballoon.\n");
+
+ for (i = 0; i < 4; i++) {
+ if (bl_info.space[i].allocated)
+ drm_mm_remove_node(&bl_info.space[i]);
+ }
+
+ memset(&bl_info, 0, sizeof(bl_info));
+}
+
+static int vgt_balloon_space(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long start, unsigned long end)
+{
+ unsigned long size = end - start;
+
+ if (start == end)
+ return -EINVAL;
+
+ DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
+ start, end, size / 1024);
+
+ node->start = start;
+ node->size = size;
+
+ return drm_mm_reserve_node(mm, node);
+}
+
+/**
+ * intel_vgt_balloon - balloon out reserved graphics address trunks
+ * @dev: drm device
+ *
+ * This function is called at the initialization stage, to balloon out the
+ * graphic address space allocated to other vGPUs, by marking these spaces as
+ * reserved. The ballooning related knowledge(starting address and size of
+ * the mappable/unmappable graphic memory) is described in the vgt_if structure
+ * in a reserved mmio range.
+ *
+ * To give an example, the drawing below depicts one typical scenario after
+ * ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned
+ * out each for the mappable and the non-mappable part. From the vGPU1 point of
+ * view, the total size is the same as the physical one, with the start address
+ * of its graphic space being zero. Yet there are some portions ballooned out(
+ * the shadow part, which are marked as reserved by drm allocator). From the
+ * host point of view, the graphic address space is partitioned by multiple
+ * vGPUs in different VMs.
+ *
+ * vGPU1 view Host view
+ * 0 ------> +-----------+ +-----------+
+ * ^ |///////////| | vGPU3 |
+ * | |///////////| +-----------+
+ * | |///////////| | vGPU2 |
+ * | +-----------+ +-----------+
+ * mappable GM | available | ==> | vGPU1 |
+ * | +-----------+ +-----------+
+ * | |///////////| | |
+ * v |///////////| | Host |
+ * +=======+===========+ +===========+
+ * ^ |///////////| | vGPU3 |
+ * | |///////////| +-----------+
+ * | |///////////| | vGPU2 |
+ * | +-----------+ +-----------+
+ * unmappable GM | available | ==> | vGPU1 |
+ * | +-----------+ +-----------+
+ * | |///////////| | |
+ * | |///////////| | Host |
+ * v |///////////| | |
+ * total GM size ------> +-----------+ +-----------+
+ *
+ * Returns:
+ * zero on success, non-zero if configuration invalid or ballooning failed
+ */
+int intel_vgt_balloon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
+
+ unsigned long mappable_base, mappable_size, mappable_end;
+ unsigned long unmappable_base, unmappable_size, unmappable_end;
+ int ret;
+
+ mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
+ mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
+ unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
+ unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
+
+ mappable_end = mappable_base + mappable_size;
+ unmappable_end = unmappable_base + unmappable_size;
+
+ DRM_INFO("VGT ballooning configuration:\n");
+ DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
+ mappable_base, mappable_size / 1024);
+ DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
+ unmappable_base, unmappable_size / 1024);
+
+ if (mappable_base < ggtt_vm->start ||
+ mappable_end > dev_priv->gtt.mappable_end ||
+ unmappable_base < dev_priv->gtt.mappable_end ||
+ unmappable_end > ggtt_vm_end) {
+ DRM_ERROR("Invalid ballooning configuration!\n");
+ return -EINVAL;
+ }
+
+ /* Unmappable graphic memory ballooning */
+ if (unmappable_base > dev_priv->gtt.mappable_end) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[2],
+ dev_priv->gtt.mappable_end,
+ unmappable_base);
+
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * No need to partition out the last physical page,
+ * because it is reserved to the guard page.
+ */
+ if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[3],
+ unmappable_end,
+ ggtt_vm_end - PAGE_SIZE);
+ if (ret)
+ goto err;
+ }
+
+ /* Mappable graphic memory ballooning */
+ if (mappable_base > ggtt_vm->start) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[0],
+ ggtt_vm->start, mappable_base);
+
+ if (ret)
+ goto err;
+ }
+
+ if (mappable_end < dev_priv->gtt.mappable_end) {
+ ret = vgt_balloon_space(&ggtt_vm->mm,
+ &bl_info.space[1],
+ mappable_end,
+ dev_priv->gtt.mappable_end);
+
+ if (ret)
+ goto err;
+ }
+
+ DRM_INFO("VGT balloon successfully\n");
+ return 0;
+
+err:
+ DRM_ERROR("VGT balloon fail\n");
+ intel_vgt_deballoon();
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
new file mode 100644
index 000000000000..97a88b5f6a26
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _I915_VGPU_H_
+#define _I915_VGPU_H_
+
+/* The MMIO offset of the shared info between guest and host emulator */
+#define VGT_PVINFO_PAGE 0x78000
+#define VGT_PVINFO_SIZE 0x1000
+
+/*
+ * The following structure pages are defined in GEN MMIO space
+ * for virtualization. (One page for now)
+ */
+#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
+#define VGT_VERSION_MAJOR 1
+#define VGT_VERSION_MINOR 0
+
+#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
+#define INTEL_VGT_IF_VERSION \
+ INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
+
+struct vgt_if {
+ uint64_t magic; /* VGT_MAGIC */
+ uint16_t version_major;
+ uint16_t version_minor;
+ uint32_t vgt_id; /* ID of vGT instance */
+ uint32_t rsv1[12]; /* pad to offset 0x40 */
+ /*
+ * Data structure to describe the balooning info of resources.
+ * Each VM can only have one portion of continuous area for now.
+ * (May support scattered resource in future)
+ * (starting from offset 0x40)
+ */
+ struct {
+ /* Aperture register balooning */
+ struct {
+ uint32_t base;
+ uint32_t size;
+ } mappable_gmadr; /* aperture */
+ /* GMADR register balooning */
+ struct {
+ uint32_t base;
+ uint32_t size;
+ } nonmappable_gmadr; /* non aperture */
+ /* allowed fence registers */
+ uint32_t fence_num;
+ uint32_t rsv2[3];
+ } avail_rs; /* available/assigned resource */
+ uint32_t rsv3[0x200 - 24]; /* pad to half page */
+ /*
+ * The bottom half page is for response from Gfx driver to hypervisor.
+ * Set to reserved fields temporarily by now.
+ */
+ uint32_t rsv4;
+ uint32_t display_ready; /* ready for display owner switch */
+ uint32_t rsv5[0x200 - 2]; /* pad to one page */
+} __packed;
+
+#define vgtif_reg(x) \
+ (VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)
+
+/* vGPU display status to be used by the host side */
+#define VGT_DRV_DISPLAY_NOT_READY 0
+#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
+
+extern void i915_check_vgpu(struct drm_device *dev);
+extern int intel_vgt_balloon(struct drm_device *dev);
+extern void intel_vgt_deballoon(void);
+
+#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 19a9dd5408f3..3903b90fb64e 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -134,9 +134,9 @@ int intel_atomic_commit(struct drm_device *dev,
* FIXME: The proper sequence here will eventually be:
*
* drm_atomic_helper_swap_state(dev, state)
- * drm_atomic_helper_commit_pre_planes(dev, state);
+ * drm_atomic_helper_commit_modeset_disables(dev, state);
* drm_atomic_helper_commit_planes(dev, state);
- * drm_atomic_helper_commit_post_planes(dev, state);
+ * drm_atomic_helper_commit_modeset_enables(dev, state);
* drm_atomic_helper_wait_for_vblanks(dev, state);
* drm_atomic_helper_cleanup_planes(dev, state);
* drm_atomic_state_free(state);
@@ -214,12 +214,18 @@ struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *crtc_state;
if (WARN_ON(!intel_crtc->config))
- return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL);
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ else
+ crtc_state = kmemdup(intel_crtc->config,
+ sizeof(*intel_crtc->config), GFP_KERNEL);
- return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config),
- GFP_KERNEL);
+ if (crtc_state)
+ crtc_state->base.crtc = crtc;
+
+ return &crtc_state->base;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 9e6f727dfd19..976b89156570 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -203,16 +203,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t *val)
{
- struct drm_mode_config *config = &plane->dev->mode_config;
-
- if (property == config->rotation_property) {
- *val = state->rotation;
- } else {
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
- return -EINVAL;
- }
-
- return 0;
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
}
/**
@@ -233,14 +225,6 @@ intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val)
{
- struct drm_mode_config *config = &plane->dev->mode_config;
-
- if (property == config->rotation_property) {
- state->rotation = val;
- } else {
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
- return -EINVAL;
- }
-
- return 0;
+ DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 3f178258d9f9..c684085cb56a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -662,6 +662,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp_link_params->vswing);
break;
}
+
+ if (bdb->version >= 173) {
+ uint8_t vswing;
+
+ vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
+ dev_priv->vbt.edp_low_vswing = vswing == 0;
+ }
}
static void
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index a6a8710f665f..6afd5be33367 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -554,6 +554,7 @@ struct bdb_edp {
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
+ u64 edp_vswing_preemph; /* v173 */
} __packed;
struct psr_table {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e66e17af0a56..515d7123785d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -690,7 +690,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -706,9 +706,11 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else
+ else if (INTEL_INFO(dev)->gen < 4)
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(connector, &tmp);
+ else
+ status = connector_status_unknown;
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
} else
status = connector_status_unknown;
@@ -794,6 +796,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_get_property = intel_connector_atomic_get_property,
};
@@ -848,7 +851,7 @@ void intel_crt_init(struct drm_device *dev)
if (!crt)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(crt);
return;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f14e8a2a022d..3eb0efc2dd0d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -139,18 +139,24 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00004014, 0x00000087 },
};
+/* eDP 1.4 low vswing translation parameters */
+static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000a8 },
+ { 0x00002016, 0x000000ab },
+ { 0x00006012, 0x000000a2 },
+ { 0x00008010, 0x00000088 },
+ { 0x00000018, 0x000000ab },
+ { 0x00004014, 0x000000a2 },
+ { 0x00006012, 0x000000a6 },
+ { 0x00000018, 0x000000a2 },
+ { 0x00005013, 0x0000009c },
+ { 0x00000018, 0x00000088 },
+};
+
+
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
/* Idx NT mV T mV db */
- { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
- { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
- { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
- { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
- { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
- { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
- { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
- { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
- { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
- { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
+ { 0x00004014, 0x00000087 }, /* 0: 800 1000 2 */
};
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
@@ -187,7 +193,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- int i, n_hdmi_entries, hdmi_800mV_0dB;
+ int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
+ size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
@@ -198,60 +205,85 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
if (IS_SKYLAKE(dev)) {
ddi_translations_fdi = NULL;
ddi_translations_dp = skl_ddi_translations_dp;
- ddi_translations_edp = skl_ddi_translations_dp;
+ n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ if (dev_priv->vbt.edp_low_vswing) {
+ ddi_translations_edp = skl_ddi_translations_edp;
+ n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+ } else {
+ ddi_translations_edp = skl_ddi_translations_dp;
+ n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ }
+
+ /*
+ * On SKL, the recommendation from the hw team is to always use
+ * a certain type of level shifter (and thus the corresponding
+ * 800mV+2dB entry). Given that's the only validated entry, we
+ * override what is in the VBT, at least until further notice.
+ */
+ hdmi_level = 0;
ddi_translations_hdmi = skl_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 0;
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 7;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
+ n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 6;
+ hdmi_default_entry = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 7;
}
switch (port) {
case PORT_A:
ddi_translations = ddi_translations_edp;
+ size = n_edp_entries;
break;
case PORT_B:
case PORT_C:
ddi_translations = ddi_translations_dp;
+ size = n_dp_entries;
break;
case PORT_D:
- if (intel_dp_is_edp(dev, PORT_D))
+ if (intel_dp_is_edp(dev, PORT_D)) {
ddi_translations = ddi_translations_edp;
- else
+ size = n_edp_entries;
+ } else {
ddi_translations = ddi_translations_dp;
+ size = n_dp_entries;
+ }
break;
case PORT_E:
if (ddi_translations_fdi)
ddi_translations = ddi_translations_fdi;
else
ddi_translations = ddi_translations_dp;
+ size = n_dp_entries;
break;
default:
BUG();
}
- for (i = 0, reg = DDI_BUF_TRANS(port);
- i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
I915_WRITE(reg, ddi_translations[i].trans1);
reg += 4;
I915_WRITE(reg, ddi_translations[i].trans2);
@@ -261,7 +293,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
/* Choose a good default if VBT is badly populated */
if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
hdmi_level >= n_hdmi_entries)
- hdmi_level = hdmi_800mV_0dB;
+ hdmi_level = hdmi_default_entry;
/* Entry 9 is for HDMI: */
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
@@ -460,17 +492,23 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
}
static struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *intel_encoder, *ret = NULL;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_encoder *ret = NULL;
+ struct drm_atomic_state *state;
int num_encoders = 0;
+ int i;
- for_each_intel_encoder(dev, intel_encoder) {
- if (intel_encoder->new_crtc == crtc) {
- ret = intel_encoder;
- num_encoders++;
- }
+ state = crtc_state->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i] ||
+ state->connector_states[i]->crtc != crtc_state->base.crtc)
+ continue;
+
+ ret = to_intel_encoder(state->connector_states[i]->best_encoder);
+ num_encoders++;
}
WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
@@ -752,9 +790,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
case DPLL_CRTL1_LINK_RATE_810:
link_clock = 81000;
break;
+ case DPLL_CRTL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
case DPLL_CRTL1_LINK_RATE_1350:
link_clock = 135000;
break;
+ case DPLL_CRTL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
case DPLL_CRTL1_LINK_RATE_2700:
link_clock = 270000;
break;
@@ -1175,7 +1222,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
- intel_ddi_get_crtc_new_encoder(intel_crtc);
+ intel_ddi_get_crtc_new_encoder(crtc_state);
int clock = crtc_state->port_clock;
if (IS_SKYLAKE(dev))
@@ -2153,7 +2200,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
struct intel_connector *connector;
enum port port = intel_dig_port->port;
- connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ connector = intel_connector_alloc();
if (!connector)
return NULL;
@@ -2172,7 +2219,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
struct intel_connector *connector;
enum port port = intel_dig_port->port;
- connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ connector = intel_connector_alloc();
if (!connector)
return NULL;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f75173c20f47..d0f3cbc87474 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -83,7 +83,8 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb);
+ int x, int y, struct drm_framebuffer *old_fb,
+ struct drm_atomic_state *state);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -391,7 +392,7 @@ static const intel_limit_t intel_limits_chv = {
* them would make no difference.
*/
.dot = { .min = 25000 * 5, .max = 540000 * 5},
- .vco = { .min = 4860000, .max = 6700000 },
+ .vco = { .min = 4800000, .max = 6480000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
.m2 = { .min = 24 << 22, .max = 175 << 22 },
@@ -430,25 +431,41 @@ bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
* intel_pipe_has_type() but looking at encoder->new_crtc instead of
* encoder->crtc.
*/
-static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
+static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
+ int type)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
+ int i, num_connectors = 0;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
+ continue;
- for_each_intel_encoder(dev, encoder)
- if (encoder->new_crtc == crtc && encoder->type == type)
+ num_connectors++;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ if (encoder->type == type)
return true;
+ }
+
+ WARN_ON(num_connectors == 0);
return false;
}
-static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
- int refclk)
+static const intel_limit_t *
+intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -466,20 +483,21 @@ static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
return limit;
}
-static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
+static const intel_limit_t *
+intel_g4x_limit(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
- intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
@@ -487,17 +505,18 @@ static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
+static const intel_limit_t *
+intel_limit(struct intel_crtc_state *crtc_state, int refclk)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc, refclk);
+ limit = intel_ironlake_limit(crtc_state, refclk);
else if (IS_G4X(dev)) {
- limit = intel_g4x_limit(crtc);
+ limit = intel_g4x_limit(crtc_state);
} else if (IS_PINEVIEW(dev)) {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
@@ -506,14 +525,14 @@ static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
+ else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dac;
@@ -600,15 +619,17 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+i9xx_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -661,15 +682,17 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+pnv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -720,10 +743,12 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+g4x_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int max_n;
@@ -732,7 +757,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int err_most = (target >> 8) + (target >> 9);
found = false;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
@@ -776,11 +801,53 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
return found;
}
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+ const intel_clock_t *calculated_clock,
+ const intel_clock_t *best_clock,
+ unsigned int best_error_ppm,
+ unsigned int *error_ppm)
+{
+ /*
+ * For CHV ignore the error and consider only the P value.
+ * Prefer a bigger P value based on HW requirements.
+ */
+ if (IS_CHERRYVIEW(dev)) {
+ *error_ppm = 0;
+
+ return calculated_clock->p > best_clock->p;
+ }
+
+ if (WARN_ON_ONCE(!target_freq))
+ return false;
+
+ *error_ppm = div_u64(1000000ULL *
+ abs(target_freq - calculated_clock->dot),
+ target_freq);
+ /*
+ * Prefer a better P value over a better (smaller) error if the error
+ * is small. Ensure this preference for future configurations too by
+ * setting the error to 0.
+ */
+ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+ *error_ppm = 0;
+
+ return true;
+ }
+
+ return *error_ppm + 10 < best_error_ppm;
+}
+
static bool
-vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+vlv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
@@ -800,7 +867,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
clock.p = clock.p1 * clock.p2;
/* based on hardware requirement, prefer bigger m1,m2 values */
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- unsigned int ppm, diff;
+ unsigned int ppm;
clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
refclk * clock.m1);
@@ -811,20 +878,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
&clock))
continue;
- diff = abs(clock.dot - target);
- ppm = div_u64(1000000ULL * diff, target);
-
- if (ppm < 100 && clock.p > best_clock->p) {
- bestppm = 0;
- *best_clock = clock;
- found = true;
- }
+ if (!vlv_PLL_is_optimal(dev, target,
+ &clock,
+ best_clock,
+ bestppm, &ppm))
+ continue;
- if (bestppm >= 10 && ppm < bestppm - 10) {
- bestppm = ppm;
- *best_clock = clock;
- found = true;
- }
+ *best_clock = clock;
+ bestppm = ppm;
+ found = true;
}
}
}
@@ -834,16 +896,20 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+chv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
+ unsigned int best_error_ppm;
intel_clock_t clock;
uint64_t m2;
int found = false;
memset(best_clock, 0, sizeof(*best_clock));
+ best_error_ppm = 1000000;
/*
* Based on hardware doc, the n always set to 1, and m1 always
@@ -857,6 +923,7 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
for (clock.p2 = limit->p2.p2_fast;
clock.p2 >= limit->p2.p2_slow;
clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ unsigned int error_ppm;
clock.p = clock.p1 * clock.p2;
@@ -873,12 +940,13 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit, &clock))
continue;
- /* based on hardware requirement, prefer bigger p
- */
- if (clock.p > best_clock->p) {
- *best_clock = clock;
- found = true;
- }
+ if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ best_error_ppm, &error_ppm))
+ continue;
+
+ *best_clock = clock;
+ best_error_ppm = error_ppm;
+ found = true;
}
}
@@ -897,8 +965,12 @@ bool intel_crtc_active(struct drm_crtc *crtc)
*
* We can ditch the crtc->primary->fb check as soon as we can
* properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
*/
- return intel_crtc->active && crtc->primary->fb &&
+ return intel_crtc->active && crtc->primary->state->fb &&
intel_crtc->config->base.adjusted_mode.crtc_clock;
}
@@ -1301,14 +1373,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
u32 val;
if (INTEL_INFO(dev)->gen >= 9) {
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
} else if (IS_VALLEYVIEW(dev)) {
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
I915_STATE_WARN(val & SP_ENABLE,
@@ -2190,30 +2262,109 @@ static bool need_vtd_wa(struct drm_device *dev)
return false;
}
-int
-intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+ uint64_t fb_format_modifier)
+{
+ unsigned int tile_height;
+ uint32_t pixel_bytes;
+
+ switch (fb_format_modifier) {
+ case DRM_FORMAT_MOD_NONE:
+ tile_height = 1;
+ break;
+ case I915_FORMAT_MOD_X_TILED:
+ tile_height = IS_GEN2(dev) ? 16 : 8;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ tile_height = 32;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ pixel_bytes = drm_format_plane_cpp(pixel_format, 0);
+ switch (pixel_bytes) {
+ default:
+ case 1:
+ tile_height = 64;
+ break;
+ case 2:
+ case 4:
+ tile_height = 32;
+ break;
+ case 8:
+ tile_height = 16;
+ break;
+ case 16:
+ WARN_ONCE(1,
+ "128-bit pixels are not supported for display!");
+ tile_height = 16;
+ break;
+ }
+ break;
+ default:
+ MISSING_CASE(fb_format_modifier);
+ tile_height = 1;
+ break;
+ }
+
+ return tile_height;
+}
+
+unsigned int
+intel_fb_align_height(struct drm_device *dev, unsigned int height,
+ uint32_t pixel_format, uint64_t fb_format_modifier)
+{
+ return ALIGN(height, intel_tile_height(dev, pixel_format,
+ fb_format_modifier));
+}
+
+static int
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state)
{
- int tile_height;
+ struct intel_rotation_info *info = &view->rotation_info;
- tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
- return ALIGN(height, tile_height);
+ *view = i915_ggtt_view_normal;
+
+ if (!plane_state)
+ return 0;
+
+ if (!intel_rotation_90_or_270(plane_state->rotation))
+ return 0;
+
+ *view = i915_ggtt_view_rotated;
+
+ info->height = fb->height;
+ info->pixel_format = fb->pixel_format;
+ info->pitch = fb->pitches[0];
+ info->fb_modifier = fb->modifier[0];
+
+ if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED ||
+ info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) {
+ DRM_DEBUG_KMS(
+ "Y or Yf tiling is needed for 90/270 rotation!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
u32 alignment;
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2223,7 +2374,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
else
alignment = 64 * 1024;
break;
- case I915_TILING_X:
+ case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else {
@@ -2231,13 +2382,22 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
alignment = 0;
}
break;
- case I915_TILING_Y:
- WARN(1, "Y tiled bo slipped through, driver bug!\n");
- return -EINVAL;
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
+ "Y tiling bo slipped through, driver bug!\n"))
+ return -EINVAL;
+ alignment = 1 * 1024 * 1024;
+ break;
default:
- BUG();
+ MISSING_CASE(fb->modifier[0]);
+ return -EINVAL;
}
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ if (ret)
+ return ret;
+
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
* we should always have valid PTE following the scanout preventing
@@ -2256,7 +2416,8 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
intel_runtime_pm_get(dev_priv);
dev_priv->mm.interruptible = false;
- ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
+ &view);
if (ret)
goto err_interruptible;
@@ -2276,19 +2437,27 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
return 0;
err_unpin:
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
err_interruptible:
dev_priv->mm.interruptible = true;
intel_runtime_pm_put(dev_priv);
return ret;
}
-void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state)
{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
+ int ret;
+
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ WARN_ONCE(ret, "Couldn't get view from plane state!");
+
i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
}
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -2366,12 +2535,13 @@ static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
}
static bool
-intel_alloc_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_framebuffer *fb = &plane_config->fb->base;
u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
u32 size_aligned = round_up(plane_config->base + plane_config->size,
PAGE_SIZE);
@@ -2390,25 +2560,24 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
obj->tiling_mode = plane_config->tiling;
if (obj->tiling_mode == I915_TILING_X)
- obj->stride = crtc->base.primary->fb->pitches[0];
+ obj->stride = fb->pitches[0];
- mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
- mode_cmd.width = crtc->base.primary->fb->width;
- mode_cmd.height = crtc->base.primary->fb->height;
- mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
+ mode_cmd.pixel_format = fb->pixel_format;
+ mode_cmd.width = fb->width;
+ mode_cmd.height = fb->height;
+ mode_cmd.pitches[0] = fb->pitches[0];
+ mode_cmd.modifier[0] = fb->modifier[0];
+ mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
mutex_lock(&dev->struct_mutex);
-
- if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
+ if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
&mode_cmd, obj)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
-
- obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG_KMS("plane fb obj %p\n", obj);
+ DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
return true;
out_unref_obj:
@@ -2421,35 +2590,37 @@ out_unref_obj:
static void
update_state_fb(struct drm_plane *plane)
{
- if (plane->fb != plane->state->fb)
- drm_atomic_set_fb_for_plane(plane->state, plane->fb);
+ if (plane->fb == plane->state->fb)
+ return;
+
+ if (plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
+ plane->state->fb = plane->fb;
+ if (plane->state->fb)
+ drm_framebuffer_reference(plane->state->fb);
}
static void
-intel_find_plane_obj(struct intel_crtc *intel_crtc,
- struct intel_initial_plane_config *plane_config)
+intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
+ struct drm_plane *primary = intel_crtc->base.primary;
+ struct drm_framebuffer *fb;
- if (!intel_crtc->base.primary->fb)
+ if (!plane_config->fb)
return;
- if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
- struct drm_plane *primary = intel_crtc->base.primary;
-
- primary->state->crtc = &intel_crtc->base;
- primary->crtc = &intel_crtc->base;
- update_state_fb(primary);
-
- return;
+ if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
+ fb = &plane_config->fb->base;
+ goto valid_fb;
}
- kfree(intel_crtc->base.primary->fb);
- intel_crtc->base.primary->fb = NULL;
+ kfree(plane_config->fb);
/*
* Failed to alloc the obj, check to see if we should share
@@ -2464,26 +2635,29 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (!i->active)
continue;
- obj = intel_fb_obj(c->primary->fb);
- if (obj == NULL)
+ fb = c->primary->fb;
+ if (!fb)
continue;
+ obj = intel_fb_obj(fb);
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
- struct drm_plane *primary = intel_crtc->base.primary;
-
- if (obj->tiling_mode != I915_TILING_NONE)
- dev_priv->preserve_bios_swizzle = true;
-
- drm_framebuffer_reference(c->primary->fb);
- primary->fb = c->primary->fb;
- primary->state->crtc = &intel_crtc->base;
- primary->crtc = &intel_crtc->base;
- obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
- break;
+ drm_framebuffer_reference(fb);
+ goto valid_fb;
}
}
- update_state_fb(intel_crtc->base.primary);
+ return;
+
+valid_fb:
+ obj = intel_fb_obj(fb);
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dev_priv->preserve_bios_swizzle = true;
+
+ primary->fb = fb;
+ primary->state->crtc = &intel_crtc->base;
+ primary->crtc = &intel_crtc->base;
+ update_state_fb(primary);
+ obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
}
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2604,9 +2778,6 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
- fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane),
@@ -2708,9 +2879,6 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
- fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -2723,6 +2891,51 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(reg);
}
+u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
+ uint32_t pixel_format)
+{
+ u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes
+ * chunks for linear buffers or in number of tiles for tiled
+ * buffers.
+ */
+ switch (fb_modifier) {
+ case DRM_FORMAT_MOD_NONE:
+ return 64;
+ case I915_FORMAT_MOD_X_TILED:
+ if (INTEL_INFO(dev)->gen == 2)
+ return 128;
+ return 512;
+ case I915_FORMAT_MOD_Y_TILED:
+ /* No need to check for old gens and Y tiling since this is
+ * about the display engine and those will be blocked before
+ * we get here.
+ */
+ return 128;
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (bits_per_pixel == 8)
+ return 64;
+ else
+ return 128;
+ default:
+ MISSING_CASE(fb_modifier);
+ return 64;
+ }
+}
+
+unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj)
+{
+ const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
+
+ if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
+ view = &i915_ggtt_view_rotated;
+
+ return i915_gem_obj_ggtt_offset_view(obj, view);
+}
+
static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
@@ -2730,10 +2943,10 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int pipe = intel_crtc->pipe;
- u32 plane_ctl, stride;
+ u32 plane_ctl, stride_div;
+ unsigned long surf_addr;
if (!intel_crtc->primary_enabled) {
I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -2777,43 +2990,39 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
BUG();
}
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- stride = fb->pitches[0] >> 6;
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
break;
- case I915_TILING_X:
+ case I915_FORMAT_MOD_X_TILED:
plane_ctl |= PLANE_CTL_TILED_X;
- stride = fb->pitches[0] >> 9;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ plane_ctl |= PLANE_CTL_TILED_Y;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ plane_ctl |= PLANE_CTL_TILED_YF;
break;
default:
- BUG();
+ MISSING_CASE(fb->modifier[0]);
}
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
- I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
-
- DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
- i915_gem_obj_ggtt_offset(obj),
- x, y, fb->width, fb->height,
- fb->pitches[0]);
+ obj = intel_fb_obj(fb);
+ stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
+ surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj);
+ I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
(intel_crtc->config->pipe_src_h - 1) << 16 |
(intel_crtc->config->pipe_src_w - 1));
- I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
- I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
+ I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
}
@@ -3064,38 +3273,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
-{
- return crtc->base.enabled && crtc->active &&
- crtc->config->has_pch_encoder;
-}
-
-static void ivb_modeset_global_resources(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
- struct intel_crtc *pipe_C_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
- uint32_t temp;
-
- /*
- * When everything is off disable fdi C so that we could enable fdi B
- * with all lanes. Note that we don't care about enabled pipes without
- * an enabled pch encoder.
- */
- if (!pipe_has_enabled_pch(pipe_B_crtc) &&
- !pipe_has_enabled_pch(pipe_C_crtc)) {
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
- temp = I915_READ(SOUTH_CHICKEN1);
- temp &= ~FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("disabling fdi C rx\n");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- }
-}
-
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -3751,20 +3928,23 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
- if (temp & FDI_BC_BIFURCATION_SELECT)
+ if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
- temp |= FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("enabling fdi C rx\n");
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ if (enable)
+ temp |= FDI_BC_BIFURCATION_SELECT;
+
+ DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
I915_WRITE(SOUTH_CHICKEN1, temp);
POSTING_READ(SOUTH_CHICKEN1);
}
@@ -3772,20 +3952,19 @@ static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
switch (intel_crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (intel_crtc->config->fdi_lanes > 2)
- WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ cpt_set_fdi_bc_bifurcation(dev, false);
else
- cpt_enable_fdi_bc_bifurcation(dev);
+ cpt_set_fdi_bc_bifurcation(dev, true);
break;
case PIPE_C:
- cpt_enable_fdi_bc_bifurcation(dev);
+ cpt_set_fdi_bc_bifurcation(dev, true);
break;
default:
@@ -4120,6 +4299,24 @@ static void intel_enable_sprite_planes(struct drm_crtc *crtc)
}
}
+/*
+ * Disable a plane internally without actually modifying the plane's state.
+ * This will allow us to easily restore the plane later by just reprogramming
+ * its state.
+ */
+static void disable_plane_internal(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_plane_state *state =
+ plane->funcs->atomic_duplicate_state(plane);
+ struct intel_plane_state *intel_state = to_intel_plane_state(state);
+
+ intel_state->visible = false;
+ intel_plane->commit_plane(plane, intel_state);
+
+ intel_plane_destroy_state(plane, state);
+}
+
static void intel_disable_sprite_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4129,8 +4326,8 @@ static void intel_disable_sprite_planes(struct drm_crtc *crtc)
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
- plane->funcs->disable_plane(plane);
+ if (plane->fb && intel_plane->pipe == pipe)
+ disable_plane_internal(plane);
}
}
@@ -4204,7 +4401,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
bool reenable_ips = false;
/* The clocks have to be on to load the palette. */
- if (!crtc->enabled || !intel_crtc->active)
+ if (!crtc->state->enable || !intel_crtc->active)
return;
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
@@ -4288,11 +4485,10 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
intel_crtc_wait_for_pending_flips(crtc);
- if (dev_priv->fbc.plane == plane)
+ if (dev_priv->fbc.crtc == intel_crtc)
intel_fbc_disable(dev);
hsw_disable_ips(intel_crtc);
@@ -4318,7 +4514,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -4327,7 +4523,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_prepare_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -4426,7 +4622,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -4435,7 +4631,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_enable_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -4760,8 +4956,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
return mask;
}
-static void modeset_update_crtc_power_domains(struct drm_device *dev)
+static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
@@ -4773,7 +4970,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
- if (!crtc->base.enabled)
+ if (!crtc->base.state->enable)
continue;
pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
@@ -4783,7 +4980,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
}
if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(dev);
+ dev_priv->display.modeset_global_resources(state);
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
@@ -4900,24 +5097,23 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
switch (cdclk) {
- case 400000:
- cmd = 3;
- break;
case 333333:
case 320000:
- cmd = 2;
- break;
case 266667:
- cmd = 1;
- break;
case 200000:
- cmd = 0;
break;
default:
MISSING_CASE(cdclk);
return;
}
+ /*
+ * Specs are full of misinformation, but testing on actual
+ * hardware has shown that we just need to write the desired
+ * CCK divider into the Punit register.
+ */
+ cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
@@ -4937,27 +5133,25 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
-
- /* FIXME: Punit isn't quite ready yet */
- if (IS_CHERRYVIEW(dev_priv->dev))
- return 400000;
+ int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
* 320/333MHz (depends on HPLL freq)
- * 400MHz
- * So we check to see whether we're above 90% of the lower bin and
- * adjust if needed.
+ * 400MHz (VLV only)
+ * So we check to see whether we're above 90% (VLV) or 95% (CHV)
+ * of the lower bin and adjust if needed.
*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
- if (max_pixclk > freq_320*9/10)
+ if (!IS_CHERRYVIEW(dev_priv) &&
+ max_pixclk > freq_320*limit/100)
return 400000;
- else if (max_pixclk > 266667*9/10)
+ else if (max_pixclk > 266667*limit/100)
return freq_320;
else if (max_pixclk > 0)
return 266667;
@@ -4994,12 +5188,49 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
/* disable/enable all currently active pipes while we change cdclk */
for_each_intel_crtc(dev, intel_crtc)
- if (intel_crtc->base.enabled)
+ if (intel_crtc->base.state->enable)
*prepare_pipes |= (1 << intel_crtc->pipe);
}
-static void valleyview_modeset_global_resources(struct drm_device *dev)
+static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+{
+ unsigned int credits, default_credits;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ default_credits = PFI_CREDIT(12);
+ else
+ default_credits = PFI_CREDIT(8);
+
+ if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
+ /* CHV suggested value is 31 or 63 */
+ if (IS_CHERRYVIEW(dev_priv))
+ credits = PFI_CREDIT_31;
+ else
+ credits = PFI_CREDIT(15);
+ } else {
+ credits = default_credits;
+ }
+
+ /*
+ * WA - write default credits before re-programming
+ * FIXME: should we also set the resend bit here?
+ */
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ default_credits);
+
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ credits | PFI_CREDIT_RESEND);
+
+ /*
+ * FIXME is this guaranteed to clear
+ * immediately or should we poll for it?
+ */
+ WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+}
+
+static void valleyview_modeset_global_resources(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
@@ -5021,6 +5252,8 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
else
valleyview_set_cdclk(dev, req_cdclk);
+ vlv_program_pfi_credits(dev_priv);
+
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
}
@@ -5034,7 +5267,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
bool is_dsi;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -5049,7 +5282,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
}
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -5117,7 +5350,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
if (intel_crtc->active)
return;
@@ -5125,7 +5358,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
+ intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -5316,7 +5549,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
/* crtc should still be enabled when we disable it. */
- WARN_ON(!crtc->enabled);
+ WARN_ON(!crtc->state->enable);
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
@@ -5394,7 +5627,8 @@ static void intel_connector_check_state(struct intel_connector *connector)
crtc = encoder->base.crtc;
- I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n");
+ I915_STATE_WARN(!crtc->state->enable,
+ "crtc not enabled\n");
I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
"encoder active on the wrong pipe\n");
@@ -5402,6 +5636,34 @@ static void intel_connector_check_state(struct intel_connector *connector)
}
}
+int intel_connector_init(struct intel_connector *connector)
+{
+ struct drm_connector_state *connector_state;
+
+ connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
+ if (!connector_state)
+ return -ENOMEM;
+
+ connector->base.state = connector_state;
+ return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+ struct intel_connector *connector;
+
+ connector = kzalloc(sizeof *connector, GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ if (intel_connector_init(connector) < 0) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
/* Even simpler default implementation, if there's really no special case to
* consider. */
void intel_connector_dpms(struct drm_connector *connector, int mode)
@@ -5433,13 +5695,21 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
return encoder->get_hw_state(encoder, &pipe);
}
+static int pipe_required_fdi_lanes(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_crtc *crtc =
+ to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+
+ if (crtc->base.state->enable &&
+ crtc->config->has_pch_encoder)
+ return crtc->config->fdi_lanes;
+
+ return 0;
+}
+
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
-
DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
@@ -5466,22 +5736,20 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
case PIPE_A:
return true;
case PIPE_B:
- if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
- pipe_config->fdi_lanes > 2) {
+ if (pipe_config->fdi_lanes > 2 &&
+ pipe_required_fdi_lanes(dev, PIPE_C) > 0) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
return true;
case PIPE_C:
- if (!pipe_has_enabled_pch(pipe_B_crtc) ||
- pipe_B_crtc->config->fdi_lanes <= 2) {
- if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return false;
- }
- } else {
+ if (pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+ if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
}
@@ -5581,7 +5849,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
@@ -5615,10 +5883,6 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
u32 val;
int divider;
- /* FIXME: Punit isn't quite ready yet */
- if (IS_CHERRYVIEW(dev))
- return 400000;
-
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
@@ -5764,15 +6028,18 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
+static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
+ int num_connectors)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
+ WARN_ON(!crtc_state->base.state);
+
if (IS_VALLEYVIEW(dev)) {
refclk = 100000;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5815,8 +6082,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
- reduced_clock && i915.powersave) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
@@ -5884,7 +6151,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed).
*/
- if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
@@ -5900,13 +6167,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
}
}
-void intel_dp_set_m_n(struct intel_crtc *crtc)
+void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
{
+ struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+
+ if (m_n == M1_N1) {
+ dp_m_n = &crtc->config->dp_m_n;
+ dp_m2_n2 = &crtc->config->dp_m2_n2;
+ } else if (m_n == M2_N2) {
+
+ /*
+ * M2_N2 registers are not supported. Hence m2_n2 divider value
+ * needs to be programmed into M1_N1.
+ */
+ dp_m_n = &crtc->config->dp_m2_n2;
+ } else {
+ DRM_ERROR("Unsupported divider value\n");
+ return;
+ }
+
if (crtc->config->has_pch_encoder)
intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n,
- &crtc->config->dp_m2_n2);
+ intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
}
static void vlv_update_pll(struct intel_crtc *crtc,
@@ -6044,9 +6327,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
int pipe = crtc->pipe;
int dpll_reg = DPLL(crtc->pipe);
enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 loopfilter, intcoeff;
+ u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
- int refclk;
+ u32 dpio_val;
+ int vco;
bestn = pipe_config->dpll.n;
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
@@ -6054,6 +6338,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
bestm2 = pipe_config->dpll.m2 >> 22;
bestp1 = pipe_config->dpll.p1;
bestp2 = pipe_config->dpll.p2;
+ vco = pipe_config->dpll.vco;
+ dpio_val = 0;
+ loopfilter = 0;
/*
* Enable Refclk and SSC
@@ -6079,26 +6366,56 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
1 << DPIO_CHV_N_DIV_SHIFT);
/* M2 fraction division */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+ if (bestm2_frac)
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
/* M2 fraction division enable */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
- DPIO_CHV_FRAC_DIV_EN |
- (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
+ if (bestm2_frac)
+ dpio_val |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+
+ /* Program digital lock detect threshold */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
+ if (!bestm2_frac)
+ dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
/* Loop filter */
- refclk = i9xx_get_refclk(crtc, 0);
- loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
- 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
- if (refclk == 100000)
- intcoeff = 11;
- else if (refclk == 38400)
- intcoeff = 10;
- else
- intcoeff = 9;
- loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
+ if (vco == 5400000) {
+ loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6200000) {
+ loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6480000) {
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x8;
+ } else {
+ /* Not supported. Apply the same limits as in the max case */
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0;
+ }
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+
/* AFC Recal */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
@@ -6123,6 +6440,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
struct intel_crtc_state pipe_config = {
+ .base.crtc = &crtc->base,
.pixel_multiplier = 1,
.dpll = *dpll,
};
@@ -6167,12 +6485,12 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
- is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
- intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -6215,7 +6533,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -6245,7 +6563,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -6256,10 +6574,10 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -6473,11 +6791,20 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ int i;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != crtc)
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != &crtc->base)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -6496,7 +6823,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
if (!crtc_state->clock_set) {
- refclk = i9xx_get_refclk(crtc, num_connectors);
+ refclk = i9xx_get_refclk(crtc_state, num_connectors);
/*
* Returns a set of divisors for the desired target clock with
@@ -6504,8 +6831,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
* the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
* 2) / p1 / p2.
*/
- limit = intel_limit(crtc, refclk);
- ok = dev_priv->display.find_dpll(limit, crtc,
+ limit = intel_limit(crtc_state, refclk);
+ ok = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, &clock);
if (!ok) {
@@ -6521,7 +6848,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
* we will disable the LVDS downclock feature.
*/
has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->display.find_dpll(limit, crtc_state,
dev_priv->lvds_downclock,
refclk, &clock,
&reduced_clock);
@@ -6620,7 +6947,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -6636,9 +6963,12 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4)
- if (val & DISPPLANE_TILED)
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
+ fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ }
+ }
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
@@ -6664,7 +6994,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- plane_config->tiling);
+ fb->pixel_format,
+ fb->modifier[0]);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -6673,7 +7004,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
- crtc->base.primary->fb = fb;
+ plane_config->fb = intel_fb;
}
static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7147,18 +7478,26 @@ void intel_init_pch_refclk(struct drm_device *dev)
lpt_init_pch_refclk(dev);
}
-static int ironlake_get_refclk(struct drm_crtc *crtc)
+static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
- int num_connectors = 0;
+ int num_connectors = 0, i;
bool is_lvds = false;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != to_intel_crtc(crtc))
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -7345,22 +7684,21 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk;
const intel_limit_t *limit;
bool ret, is_lvds = false;
- is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
+ is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
- refclk = ironlake_get_refclk(crtc);
+ refclk = ironlake_get_refclk(crtc_state);
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(intel_crtc, refclk);
- ret = dev_priv->display.find_dpll(limit, intel_crtc,
+ limit = intel_limit(crtc_state, refclk);
+ ret = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, clock);
if (!ret)
@@ -7374,7 +7712,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* downclock feature.
*/
*has_reduced_clock =
- dev_priv->display.find_dpll(limit, intel_crtc,
+ dev_priv->display.find_dpll(limit, crtc_state,
dev_priv->lvds_downclock,
refclk, clock,
reduced_clock);
@@ -7407,16 +7745,24 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ struct intel_encoder *encoder;
uint32_t dpll;
- int factor, num_connectors = 0;
+ int factor, num_connectors = 0, i;
bool is_lvds = false, is_sdvo = false;
- for_each_intel_encoder(dev, intel_encoder) {
- if (intel_encoder->new_crtc != to_intel_crtc(crtc))
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
- switch (intel_encoder->type) {
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
+ switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -7545,7 +7891,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
}
}
- if (is_lvds && has_reduced_clock && i915.powersave)
+ if (is_lvds && has_reduced_clock)
crtc->lowfreq_avail = true;
else
crtc->lowfreq_avail = false;
@@ -7651,10 +7997,10 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val, base, offset, stride_mult;
+ u32 val, base, offset, stride_mult, tiling;
int pipe = crtc->pipe;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -7670,9 +8016,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
if (!(val & PLANE_CTL_ENABLE))
goto error;
- if (val & PLANE_CTL_TILED_MASK)
- plane_config->tiling = I915_TILING_X;
-
pixel_format = val & PLANE_CTL_FORMAT_MASK;
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX,
@@ -7680,6 +8023,26 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+ tiling = val & PLANE_CTL_TILED_MASK;
+ switch (tiling) {
+ case PLANE_CTL_TILED_LINEAR:
+ fb->modifier[0] = DRM_FORMAT_MOD_NONE;
+ break;
+ case PLANE_CTL_TILED_X:
+ plane_config->tiling = I915_TILING_X;
+ fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ break;
+ case PLANE_CTL_TILED_Y:
+ fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
+ break;
+ case PLANE_CTL_TILED_YF:
+ fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
+ break;
+ default:
+ MISSING_CASE(tiling);
+ goto error;
+ }
+
base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
plane_config->base = base;
@@ -7690,21 +8053,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0));
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- stride_mult = 64;
- break;
- case I915_TILING_X:
- stride_mult = 512;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto error;
- }
+ stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(dev, fb->height,
- plane_config->tiling);
+ fb->pixel_format,
+ fb->modifier[0]);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -7713,7 +8068,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
- crtc->base.primary->fb = fb;
+ plane_config->fb = intel_fb;
return;
error:
@@ -7753,7 +8108,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
u32 val, base, offset;
int pipe = crtc->pipe;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -7769,9 +8124,12 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4)
- if (val & DISPPLANE_TILED)
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
+ fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ }
+ }
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
@@ -7797,7 +8155,8 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- plane_config->tiling);
+ fb->pixel_format,
+ fb->modifier[0]);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -7806,7 +8165,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
- crtc->base.primary->fb = fb;
+ plane_config->fb = intel_fb;
}
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
@@ -8292,8 +8651,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
uint32_t cntl = 0, size = 0;
if (base) {
- unsigned int width = intel_crtc->cursor_width;
- unsigned int height = intel_crtc->cursor_height;
+ unsigned int width = intel_crtc->base.cursor->state->crtc_w;
+ unsigned int height = intel_crtc->base.cursor->state->crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
switch (stride) {
@@ -8357,7 +8716,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl = 0;
if (base) {
cntl = MCURSOR_GAMMA_ENABLE;
- switch (intel_crtc->cursor_width) {
+ switch (intel_crtc->base.cursor->state->crtc_w) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
@@ -8368,7 +8727,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(intel_crtc->cursor_width);
+ MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
@@ -8415,7 +8774,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
base = 0;
if (x < 0) {
- if (x + intel_crtc->cursor_width <= 0)
+ if (x + intel_crtc->base.cursor->state->crtc_w <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -8424,7 +8783,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= x << CURSOR_X_SHIFT;
if (y < 0) {
- if (y + intel_crtc->cursor_height <= 0)
+ if (y + intel_crtc->base.cursor->state->crtc_h <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -8440,8 +8799,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
- base += (intel_crtc->cursor_height *
- intel_crtc->cursor_width - 1) * 4;
+ base += (intel_crtc->base.cursor->state->crtc_h *
+ intel_crtc->base.cursor->state->crtc_w - 1) * 4;
}
if (IS_845G(dev) || IS_I865G(dev))
@@ -8633,6 +8992,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_atomic_state *state = NULL;
+ struct drm_connector_state *connector_state;
int ret, i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -8680,7 +9041,7 @@ retry:
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
- if (possible_crtc->enabled)
+ if (possible_crtc->state->enable)
continue;
/* This can occur when applying the pipe A quirk on resume. */
if (to_intel_crtc(possible_crtc)->new_enabled)
@@ -8714,6 +9075,21 @@ retry:
old->load_detect_temp = true;
old->release_fb = NULL;
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return false;
+
+ state->acquire_ctx = ctx;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ goto fail;
+ }
+
+ connector_state->crtc = crtc;
+ connector_state->best_encoder = &intel_encoder->base;
+
if (!mode)
mode = &load_detect_mode;
@@ -8736,7 +9112,7 @@ retry:
goto fail;
}
- if (intel_set_mode(crtc, mode, 0, 0, fb)) {
+ if (intel_set_mode(crtc, mode, 0, 0, fb, state)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -8749,12 +9125,17 @@ retry:
return true;
fail:
- intel_crtc->new_enabled = crtc->enabled;
+ intel_crtc->new_enabled = crtc->state->enable;
if (intel_crtc->new_enabled)
intel_crtc->new_config = intel_crtc->config;
else
intel_crtc->new_config = NULL;
fail_unlock:
+ if (state) {
+ drm_atomic_state_free(state);
+ state = NULL;
+ }
+
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
@@ -8764,24 +9145,44 @@ fail_unlock:
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old)
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_atomic_state *state;
+ struct drm_connector_state *connector_state;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.id, encoder->name);
if (old->load_detect_temp) {
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ goto fail;
+
+ state->acquire_ctx = ctx;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state))
+ goto fail;
+
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_crtc->new_enabled = false;
intel_crtc->new_config = NULL;
- intel_set_mode(crtc, NULL, 0, 0, NULL);
+
+ connector_state->best_encoder = NULL;
+ connector_state->crtc = NULL;
+
+ intel_set_mode(crtc, NULL, 0, 0, NULL, state);
+
+ drm_atomic_state_free(state);
if (old->release_fb) {
drm_framebuffer_unregister_private(old->release_fb);
@@ -8794,6 +9195,11 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
+
+ return;
+fail:
+ DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
+ drm_atomic_state_free(state);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -9032,6 +9438,8 @@ void intel_mark_busy(struct drm_device *dev)
intel_runtime_pm_get(dev_priv);
i915_update_gfx_val(dev_priv);
+ if (INTEL_INFO(dev)->gen >= 6)
+ gen6_rps_busy(dev_priv);
dev_priv->mm.busy = true;
}
@@ -9045,9 +9453,6 @@ void intel_mark_idle(struct drm_device *dev)
dev_priv->mm.busy = false;
- if (!i915.powersave)
- goto out;
-
for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
@@ -9058,7 +9463,6 @@ void intel_mark_idle(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
-out:
intel_runtime_pm_put(dev_priv);
}
@@ -9100,9 +9504,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb_obj);
+ intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
drm_gem_object_unreference(&work->pending_flip_obj->base);
- drm_gem_object_unreference(&work->old_fb_obj->base);
intel_fbc_update(dev);
@@ -9111,6 +9514,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_unlock(&dev->struct_mutex);
intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+ drm_framebuffer_unreference(work->old_fb);
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
@@ -9627,69 +10031,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
return 0;
}
-static int intel_gen9_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
- uint32_t flags)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t plane = 0, stride;
- int ret;
-
- switch(intel_crtc->pipe) {
- case PIPE_A:
- plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
- break;
- case PIPE_B:
- plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
- break;
- case PIPE_C:
- plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
- break;
- default:
- WARN_ONCE(1, "unknown plane in flip command\n");
- return -ENODEV;
- }
-
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- stride = fb->pitches[0] >> 6;
- break;
- case I915_TILING_X:
- stride = fb->pitches[0] >> 9;
- break;
- default:
- WARN_ONCE(1, "unknown tiling in flip command\n");
- return -ENODEV;
- }
-
- ret = intel_ring_begin(ring, 10);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, DERRMR);
- intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE));
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
- MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit(ring, DERRMR);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
- intel_ring_emit(ring, 0);
-
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
- intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-
- intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
-
- return 0;
-}
-
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -9719,10 +10060,10 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
!i915_gem_request_completed(work->flip_queued_req, true))
return false;
- work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
+ work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
}
- if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3)
+ if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
return false;
/* Potential stall - if we see that the flip has happened,
@@ -9753,7 +10094,8 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
spin_lock(&dev->event_lock);
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
- intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
+ intel_crtc->unpin_work->flip_queued_vblank,
+ drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc);
}
spin_unlock(&dev->event_lock);
@@ -9805,7 +10147,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
- work->old_fb_obj = intel_fb_obj(old_fb);
+ work->old_fb = old_fb;
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
@@ -9836,12 +10178,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto cleanup;
-
/* Reference the objects for the scheduled work. */
- drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_framebuffer_reference(work->old_fb);
drm_gem_object_reference(&obj->base);
crtc->primary->fb = fb;
@@ -9849,6 +10187,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->pending_flip_obj = obj;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto cleanup;
+
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -9857,7 +10199,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
- if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
+ if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
@@ -9870,12 +10212,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ring = &dev_priv->ring[RCS];
}
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
+ crtc->primary->state, ring);
if (ret)
goto cleanup_pending;
- work->gtt_offset =
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
+ work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
+ + intel_crtc->dspaddr_offset;
if (use_mmio_flip(ring, obj)) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
@@ -9895,10 +10238,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_ring_get_request(ring));
}
- work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
+ work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true;
- i915_gem_track_fb(work->old_fb_obj, obj,
+ i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
intel_fbc_disable(dev);
@@ -9910,16 +10253,17 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_unpin:
- intel_unpin_fb_obj(obj);
+ intel_unpin_fb_obj(fb, crtc->primary->state);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
+ mutex_unlock(&dev->struct_mutex);
+cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
- drm_gem_object_unreference(&work->old_fb_obj->base);
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&dev->struct_mutex);
-cleanup:
+ drm_gem_object_unreference_unlocked(&obj->base);
+ drm_framebuffer_unreference(work->old_fb);
+
spin_lock_irq(&dev->event_lock);
intel_crtc->unpin_work = NULL;
spin_unlock_irq(&dev->event_lock);
@@ -9959,8 +10303,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
connector->new_encoder =
to_intel_encoder(connector->base.encoder);
}
@@ -9971,7 +10314,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
- crtc->new_enabled = crtc->base.enabled;
+ crtc->new_enabled = crtc->base.state->enable;
if (crtc->new_enabled)
crtc->new_config = crtc->config;
@@ -9980,6 +10323,27 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
}
}
+/* Transitional helper to copy current connector/encoder state to
+ * connector->state. This is needed so that code that is partially
+ * converted to atomic does the right thing.
+ */
+static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ for_each_intel_connector(dev, connector) {
+ if (connector->base.encoder) {
+ connector->base.state->best_encoder =
+ connector->base.encoder;
+ connector->base.state->crtc =
+ connector->base.encoder->crtc;
+ } else {
+ connector->base.state->best_encoder = NULL;
+ connector->base.state->crtc = NULL;
+ }
+ }
+}
+
/**
* intel_modeset_commit_output_state
*
@@ -9991,8 +10355,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
connector->base.encoder = &connector->new_encoder->base;
}
@@ -10001,8 +10364,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
+ crtc->base.state->enable = crtc->new_enabled;
crtc->base.enabled = crtc->new_enabled;
}
+
+ intel_modeset_update_connector_atomic_state(dev);
}
static void
@@ -10037,8 +10403,9 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_atomic_state *state;
struct intel_connector *connector;
- int bpp;
+ int bpp, i;
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
@@ -10078,11 +10445,15 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
pipe_config->pipe_bpp = bpp;
+ state = pipe_config->base.state;
+
/* Clamp display bpp to EDID value */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
- if (!connector->new_encoder ||
- connector->new_encoder->new_crtc != crtc)
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector = to_intel_connector(state->connectors[i]);
+ if (state->connector_states[i]->crtc != &crtc->base)
continue;
connected_sink_compute_bpp(connector, pipe_config);
@@ -10207,8 +10578,7 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
struct intel_encoder *encoder = connector->new_encoder;
if (!encoder)
@@ -10239,15 +10609,30 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
return true;
}
+static void
+clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+{
+ struct drm_crtc_state tmp_state;
+
+ /* Clear only the intel specific part of the crtc state */
+ tmp_state = crtc_state->base;
+ memset(crtc_state, 0, sizeof *crtc_state);
+ crtc_state->base = tmp_state;
+}
+
static struct intel_crtc_state *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode,
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
struct intel_crtc_state *pipe_config;
int plane_bpp, ret = -EINVAL;
+ int i;
bool retry = true;
if (!check_encoder_cloning(to_intel_crtc(crtc))) {
@@ -10260,10 +10645,13 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
return ERR_PTR(-EINVAL);
}
- pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
- if (!pipe_config)
- return ERR_PTR(-ENOMEM);
+ pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+ clear_intel_crtc_state(pipe_config);
+
+ pipe_config->base.crtc = crtc;
drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
drm_mode_copy(&pipe_config->base.mode, mode);
@@ -10318,11 +10706,17 @@ encoder_retry:
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- for_each_intel_encoder(dev, encoder) {
+ for (i = 0; i < state->num_connector; i++) {
+ connector = to_intel_connector(state->connectors[i]);
+ if (!connector)
+ continue;
- if (&encoder->new_crtc->base != crtc)
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
@@ -10358,7 +10752,6 @@ encoder_retry:
return pipe_config;
fail:
- kfree(pipe_config);
return ERR_PTR(ret);
}
@@ -10380,8 +10773,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
* to be part of the prepare_pipes mask. We don't (yet) support global
* modeset across multiple crtcs, so modeset_pipes will only have one
* bit set at most. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.encoder == &connector->new_encoder->base)
continue;
@@ -10412,7 +10804,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
/* Check for pipes that will be enabled/disabled ... */
for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->base.enabled == intel_crtc->new_enabled)
+ if (intel_crtc->base.state->enable == intel_crtc->new_enabled)
continue;
if (!intel_crtc->new_enabled)
@@ -10487,10 +10879,10 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
/* Double check state. */
for_each_intel_crtc(dev, intel_crtc) {
- WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
+ WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
intel_crtc->new_config != intel_crtc->config);
- WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
+ WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config);
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -10750,7 +11142,7 @@ static void check_wm_state(struct drm_device *dev)
continue;
/* planes */
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
hw_entry = &hw_ddb.plane[pipe][plane];
sw_entry = &sw_ddb->plane[pipe][plane];
@@ -10784,8 +11176,7 @@ check_connector_state(struct drm_device *dev)
{
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
/* This also checks the encoder/connector hw state with the
* ->get_hw_state callbacks. */
intel_connector_check_state(connector);
@@ -10815,8 +11206,7 @@ check_encoder_state(struct drm_device *dev)
I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
"encoder's active_connectors set, but no crtc\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.encoder != &encoder->base)
continue;
enabled = true;
@@ -10877,7 +11267,7 @@ check_crtc_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
- I915_STATE_WARN(crtc->active && !crtc->base.enabled,
+ I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
"active crtc, but not enabled in sw tracking\n");
for_each_intel_encoder(dev, encoder) {
@@ -10891,9 +11281,10 @@ check_crtc_state(struct drm_device *dev)
I915_STATE_WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
- I915_STATE_WARN(enabled != crtc->base.enabled,
+ I915_STATE_WARN(enabled != crtc->base.state->enable,
"crtc's computed enabled state doesn't match tracked enabled state "
- "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+ "(expected %i, found %i)\n", enabled,
+ crtc->base.state->enable);
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
@@ -10957,7 +11348,7 @@ check_shared_dpll_state(struct drm_device *dev)
pll->on, active);
for_each_intel_crtc(dev, crtc) {
- if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
+ if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++;
@@ -11039,17 +11430,30 @@ static struct intel_crtc_state *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
+ struct drm_atomic_state *state,
unsigned *modeset_pipes,
unsigned *prepare_pipes,
unsigned *disable_pipes)
{
+ struct drm_device *dev = crtc->dev;
struct intel_crtc_state *pipe_config = NULL;
+ struct intel_crtc *intel_crtc;
+ int ret = 0;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ERR_PTR(ret);
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
- if ((*modeset_pipes) == 0)
- goto out;
+ for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) {
+ pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ pipe_config->base.enable = false;
+ }
/*
* Note this needs changes when we start tracking multiple modes
@@ -11057,15 +11461,21 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
* (i.e. one pipe_config for each crtc) rather than just the one
* for this crtc.
*/
- pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
- if (IS_ERR(pipe_config)) {
- goto out;
+ for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) {
+ /* FIXME: For now we still expect modeset_pipes has at most
+ * one bit set. */
+ if (WARN_ON(&intel_crtc->base != crtc))
+ continue;
+
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state);
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ "[modeset]");
}
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- "[modeset]");
-out:
- return pipe_config;
+ return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));;
}
static int __intel_set_mode_setup_plls(struct drm_device *dev,
@@ -11109,6 +11519,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
+ struct intel_crtc_state *crtc_state_copy = NULL;
struct intel_crtc *intel_crtc;
int ret = 0;
@@ -11116,6 +11527,12 @@ static int __intel_set_mode(struct drm_crtc *crtc,
if (!saved_mode)
return -ENOMEM;
+ crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), GFP_KERNEL);
+ if (!crtc_state_copy) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
*saved_mode = crtc->mode;
if (modeset_pipes)
@@ -11143,7 +11560,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
intel_crtc_disable(&intel_crtc->base);
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
- if (intel_crtc->base.enabled)
+ if (intel_crtc->base.state->enable)
dev_priv->display.crtc_disable(&intel_crtc->base);
}
@@ -11173,7 +11590,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
- modeset_update_crtc_power_domains(dev);
+ modeset_update_crtc_power_domains(pipe_config->base.state);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
@@ -11199,9 +11616,25 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* FIXME: add subpixel order */
done:
- if (ret && crtc->enabled)
+ if (ret && crtc->state->enable)
crtc->mode = *saved_mode;
+ if (ret == 0 && pipe_config) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* The pipe_config will be freed with the atomic state, so
+ * make a copy. */
+ memcpy(crtc_state_copy, intel_crtc->config,
+ sizeof *crtc_state_copy);
+ intel_crtc->config = crtc_state_copy;
+ intel_crtc->base.state = &crtc_state_copy->base;
+
+ if (modeset_pipes)
+ intel_crtc->new_config = intel_crtc->config;
+ } else {
+ kfree(crtc_state_copy);
+ }
+
kfree(saved_mode);
return ret;
}
@@ -11227,27 +11660,81 @@ static int intel_set_mode_pipes(struct drm_crtc *crtc,
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+ int x, int y, struct drm_framebuffer *fb,
+ struct drm_atomic_state *state)
{
struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
+ int ret = 0;
- pipe_config = intel_modeset_compute_config(crtc, mode, fb,
+ pipe_config = intel_modeset_compute_config(crtc, mode, fb, state,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
- if (IS_ERR(pipe_config))
- return PTR_ERR(pipe_config);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto out;
+ }
- return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
- modeset_pipes, prepare_pipes,
- disable_pipes);
+ ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
+ if (ret)
+ goto out;
+
+out:
+ return ret;
}
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
+ crtc->base.id);
+ return;
+ }
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ /* The force restore path in the HW readout code relies on the staged
+ * config still keeping the user requested config while the actual
+ * state has been overwritten by the configuration read from HW. We
+ * need to copy the staged config to the atomic state, otherwise the
+ * mode set will just reapply the state the HW is already in. */
+ for_each_intel_encoder(dev, encoder) {
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+
+ for_each_intel_connector(dev, connector) {
+ if (connector->new_encoder != encoder)
+ continue;
+
+ connector_state = drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state)) {
+ DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
+ connector->base.base.id,
+ connector->base.name,
+ PTR_ERR(connector_state));
+ continue;
+ }
+
+ connector_state->crtc = crtc;
+ connector_state->best_encoder = &encoder->base;
+ }
+ }
+
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb,
+ state);
+
+ drm_atomic_state_free(state);
}
#undef for_each_intel_crtc_masked
@@ -11295,7 +11782,7 @@ static int intel_set_config_save_state(struct drm_device *dev,
*/
count = 0;
for_each_crtc(dev, crtc) {
- config->save_crtc_enabled[count++] = crtc->enabled;
+ config->save_crtc_enabled[count++] = crtc->state->enable;
}
count = 0;
@@ -11336,7 +11823,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
connector->new_encoder =
to_intel_encoder(config->save_connector_encoders[count++]);
}
@@ -11416,9 +11903,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
static int
intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
- struct intel_set_config *config)
+ struct intel_set_config *config,
+ struct drm_atomic_state *state)
{
struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
int ro;
@@ -11428,8 +11917,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
/* Otherwise traverse passed in connector list and get encoders
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
@@ -11454,15 +11942,16 @@ intel_modeset_stage_output_state(struct drm_device *dev,
if (&connector->new_encoder->base != connector->base.encoder) {
- DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n",
+ connector->base.base.id,
+ connector->base.name);
config->mode_changed = true;
}
}
/* connector->new_encoder is now updated for all connectors. */
/* Update crtc of enabled connectors. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
struct drm_crtc *new_crtc;
if (!connector->new_encoder)
@@ -11482,6 +11971,14 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ connector_state->crtc = new_crtc;
+ connector_state->best_encoder = &connector->new_encoder->base;
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
connector->base.name,
@@ -11491,9 +11988,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Check for any encoders that needs to be disabled. */
for_each_intel_encoder(dev, encoder) {
int num_connectors = 0;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->new_encoder == encoder) {
WARN_ON(!connector->new_encoder->new_crtc);
num_connectors++;
@@ -11508,16 +12003,25 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Only now check for crtc changes so we don't miss encoders
* that will be disabled. */
if (&encoder->new_crtc->base != encoder->base.crtc) {
- DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n",
+ encoder->base.base.id,
+ encoder->base.name);
config->mode_changed = true;
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
- if (connector->new_encoder)
+ for_each_intel_connector(dev, connector) {
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ if (connector->new_encoder) {
if (connector->new_encoder != connector->encoder)
connector->encoder = connector->new_encoder;
+ } else {
+ connector_state->crtc = NULL;
+ }
}
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
@@ -11529,8 +12033,9 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
}
- if (crtc->new_enabled != crtc->base.enabled) {
- DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
+ if (crtc->new_enabled != crtc->base.state->enable) {
+ DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n",
+ crtc->base.base.id,
crtc->new_enabled ? "en" : "dis");
config->mode_changed = true;
}
@@ -11553,7 +12058,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc)
DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
pipe_name(crtc->pipe));
- list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->new_encoder &&
connector->new_encoder->new_crtc == crtc)
connector->new_encoder = NULL;
@@ -11572,6 +12077,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_mode_set save_set;
+ struct drm_atomic_state *state = NULL;
struct intel_set_config *config;
struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
@@ -11616,12 +12122,20 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
* such cases. */
intel_set_config_compute_mode_changes(set, config);
- ret = intel_modeset_stage_output_state(dev, set, config);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_config;
+ }
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ ret = intel_modeset_stage_output_state(dev, set, config, state);
if (ret)
goto fail;
pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
- set->fb,
+ set->fb, state,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
@@ -11641,10 +12155,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
*/
}
- /* set_mode will free it in the mode_changed case */
- if (!config->mode_changed)
- kfree(pipe_config);
-
intel_update_pipe_size(to_intel_crtc(set->crtc));
if (config->mode_changed) {
@@ -11690,6 +12200,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
fail:
intel_set_config_restore_state(dev, config);
+ drm_atomic_state_clear(state);
+
/*
* HACK: if the pipe was on, but we didn't have a framebuffer,
* force the pipe off to avoid oopsing in the modeset code
@@ -11702,11 +12214,15 @@ fail:
/* Try to restore the config */
if (config->mode_changed &&
intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb))
+ save_set.x, save_set.y, save_set.fb,
+ state))
DRM_ERROR("failed to restore config after modeset failure\n");
}
out_config:
+ if (state)
+ drm_atomic_state_free(state);
+
intel_set_config_free(config);
return ret;
}
@@ -11821,6 +12337,28 @@ static void intel_shared_dpll_init(struct drm_device *dev)
}
/**
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
+ */
+bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ /* Update watermarks on tiling changes. */
+ if (!plane->state->fb || !state->fb ||
+ plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+ plane->state->rotation != state->rotation)
+ return true;
+
+ return false;
+}
+
+/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
* @fb: framebuffer to prepare for presentation
@@ -11834,7 +12372,8 @@ static void intel_shared_dpll_init(struct drm_device *dev)
*/
int
intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -11868,7 +12407,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
}
if (ret == 0)
@@ -11888,7 +12427,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -11899,7 +12439,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(obj);
+ intel_unpin_fb_obj(fb, old_state);
mutex_unlock(&dev->struct_mutex);
}
}
@@ -11944,7 +12484,7 @@ intel_check_primary_plane(struct drm_plane *plane,
*/
if (intel_crtc->primary_enabled &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.crtc == intel_crtc &&
state->base.rotation != BIT(DRM_ROTATE_0)) {
intel_crtc->atomic.disable_fbc = true;
}
@@ -11963,6 +12503,9 @@ intel_check_primary_plane(struct drm_plane *plane,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
intel_crtc->atomic.update_fbc = true;
+
+ if (intel_wm_need_update(plane, &state->base))
+ intel_crtc->atomic.update_wm = true;
}
return 0;
@@ -11977,8 +12520,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_rect *src = &state->src;
crtc = crtc ? crtc : plane->crtc;
@@ -11988,8 +12529,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
- intel_plane->obj = obj;
-
if (intel_crtc->active) {
if (state->visible) {
/* FIXME: kill this fastboot hack */
@@ -12229,17 +12768,14 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
- /* we only need to pin inside GTT if cursor is non-phy */
- mutex_lock(&dev->struct_mutex);
- if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
+ if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
finish:
if (intel_crtc->active) {
- if (intel_crtc->cursor_width != state->base.crtc_w)
+ if (plane->state->crtc_w != state->base.crtc_w)
intel_crtc->atomic.update_wm = true;
intel_crtc->atomic.fb_bits |=
@@ -12256,7 +12792,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc = state->base.crtc;
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
@@ -12267,8 +12802,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
crtc->cursor_x = state->base.crtc_x;
crtc->cursor_y = state->base.crtc_y;
- intel_plane->obj = obj;
-
if (intel_crtc->cursor_bo == obj)
goto update;
@@ -12282,8 +12815,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
update:
- intel_crtc->cursor_width = state->base.crtc_w;
- intel_crtc->cursor_height = state->base.crtc_h;
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, state->visible);
@@ -12353,6 +12884,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
if (!crtc_state)
goto fail;
intel_crtc_set_state(intel_crtc, crtc_state);
+ crtc_state->base.crtc = &intel_crtc->base;
primary = intel_primary_plane_create(dev, pipe);
if (!primary)
@@ -12430,9 +12962,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
if (!drmmode_crtc) {
@@ -12502,7 +13031,6 @@ static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct drm_connector *connector;
bool dpd_is_edp = false;
intel_lvds_init(dev);
@@ -12513,10 +13041,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (HAS_DDI(dev)) {
int found;
- /* Haswell uses DDI functions to detect digital outputs */
+ /*
+ * Haswell uses DDI functions to detect digital outputs.
+ * On SKL pre-D0 the strap isn't connected, so we assume
+ * it's there.
+ */
found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
- /* DDI A only supports eDP */
- if (found)
+ /* WaIgnoreDDIAStrap: skl */
+ if (found ||
+ (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -12633,37 +13166,6 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- /*
- * FIXME: We don't have full atomic support yet, but we want to be
- * able to enable/test plane updates via the atomic interface in the
- * meantime. However as soon as we flip DRIVER_ATOMIC on, the DRM core
- * will take some atomic codepaths to lookup properties during
- * drmModeGetConnector() that unconditionally dereference
- * connector->state.
- *
- * We create a dummy connector state here for each connector to ensure
- * the DRM core doesn't try to dereference a NULL connector->state.
- * The actual connector properties will never be updated or contain
- * useful information, but since we're doing this specifically for
- * testing/debug of the plane operations (and only when a specific
- * kernel module option is given), that shouldn't really matter.
- *
- * Once atomic support for crtc's + connectors lands, this loop should
- * be removed since we'll be setting up real connector state, which
- * will contain Intel-specific properties.
- */
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- if (!WARN_ON(connector->state)) {
- connector->state =
- kzalloc(sizeof(*connector->state),
- GFP_KERNEL);
- }
- }
- }
-
intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
@@ -12705,52 +13207,100 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
+static
+u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
+ uint32_t pixel_format)
+{
+ u32 gen = INTEL_INFO(dev)->gen;
+
+ if (gen >= 9) {
+ /* "The stride in bytes must not exceed the of the size of 8K
+ * pixels and 32K bytes."
+ */
+ return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
+ } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
+ return 32*1024;
+ } else if (gen >= 4) {
+ if (fb_modifier == I915_FORMAT_MOD_X_TILED)
+ return 16*1024;
+ else
+ return 32*1024;
+ } else if (gen >= 3) {
+ if (fb_modifier == I915_FORMAT_MOD_X_TILED)
+ return 8*1024;
+ else
+ return 16*1024;
+ } else {
+ /* XXX DSPC is limited to 4k tiled */
+ return 8*1024;
+ }
+}
+
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
- int aligned_height;
- int pitch_limit;
+ unsigned int aligned_height;
int ret;
+ u32 pitch_limit, stride_alignment;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (obj->tiling_mode == I915_TILING_Y) {
- DRM_DEBUG("hardware does not support tiling Y\n");
- return -EINVAL;
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /* Enforce that fb modifier and tiling mode match, but only for
+ * X-tiled. This is needed for FBC. */
+ if (!!(obj->tiling_mode == I915_TILING_X) !=
+ !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
+ DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
+ return -EINVAL;
+ }
+ } else {
+ if (obj->tiling_mode == I915_TILING_X)
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ else if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("No Y tiling for legacy addfb\n");
+ return -EINVAL;
+ }
}
- if (mode_cmd->pitches[0] & 63) {
- DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
- mode_cmd->pitches[0]);
+ /* Passed in modifier sanity checking. */
+ switch (mode_cmd->modifier[0]) {
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (INTEL_INFO(dev)->gen < 9) {
+ DRM_DEBUG("Unsupported tiling 0x%llx!\n",
+ mode_cmd->modifier[0]);
+ return -EINVAL;
+ }
+ case DRM_FORMAT_MOD_NONE:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
+ mode_cmd->modifier[0]);
return -EINVAL;
}
- if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
- pitch_limit = 32*1024;
- } else if (INTEL_INFO(dev)->gen >= 4) {
- if (obj->tiling_mode)
- pitch_limit = 16*1024;
- else
- pitch_limit = 32*1024;
- } else if (INTEL_INFO(dev)->gen >= 3) {
- if (obj->tiling_mode)
- pitch_limit = 8*1024;
- else
- pitch_limit = 16*1024;
- } else
- /* XXX DSPC is limited to 4k tiled */
- pitch_limit = 8*1024;
+ stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
+ mode_cmd->pixel_format);
+ if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
+ DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
+ mode_cmd->pitches[0], stride_alignment);
+ return -EINVAL;
+ }
+ pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
+ mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
- DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
- obj->tiling_mode ? "tiled" : "linear",
+ DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
+ "tiled" : "linear",
mode_cmd->pitches[0], pitch_limit);
return -EINVAL;
}
- if (obj->tiling_mode != I915_TILING_NONE &&
+ if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
mode_cmd->pitches[0] != obj->stride) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0], obj->stride);
@@ -12805,7 +13355,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
aligned_height = intel_fb_align_height(dev, mode_cmd->height,
- obj->tiling_mode);
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
@@ -12958,8 +13509,6 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- dev_priv->display.modeset_global_resources =
- ivb_modeset_global_resources;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
} else if (IS_VALLEYVIEW(dev)) {
@@ -12967,9 +13516,6 @@ static void intel_init_display(struct drm_device *dev)
valleyview_modeset_global_resources;
}
- /* Default just returns -ENODEV to indicate unsupported */
- dev_priv->display.queue_flip = intel_default_queue_flip;
-
switch (INTEL_INFO(dev)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
@@ -12992,8 +13538,10 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
case 9:
- dev_priv->display.queue_flip = intel_gen9_queue_flip;
- break;
+ /* Drop through - unsupported since execlist only. */
+ default:
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
}
intel_panel_init_backlight_funcs(dev);
@@ -13087,9 +13635,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
};
static struct intel_quirk intel_quirks[] = {
- /* HP Mini needs pipe A force quirk (LP: #322104) */
- { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
-
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
@@ -13212,6 +13757,8 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
+ dev->mode_config.allow_fb_modifiers = true;
+
dev->mode_config.funcs = &intel_mode_funcs;
intel_init_quirks(dev);
@@ -13254,7 +13801,7 @@ void intel_modeset_init(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) {
intel_crtc_init(dev, pipe);
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
ret = intel_plane_init(dev, pipe, sprite);
if (ret)
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
@@ -13295,7 +13842,7 @@ void intel_modeset_init(struct drm_device *dev)
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
- intel_find_plane_obj(crtc, &crtc->plane_config);
+ intel_find_initial_plane_obj(crtc, &crtc->plane_config);
}
}
}
@@ -13310,9 +13857,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
* by enabling the load detect pipe once. */
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
crt = &connector->base;
break;
@@ -13323,7 +13868,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
return;
if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
- intel_release_load_detect_pipe(crt, &load_detect_temp);
+ intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
}
static bool
@@ -13357,11 +13902,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* restore vblank interrupts to correct state */
+ drm_crtc_vblank_reset(&crtc->base);
if (crtc->active) {
update_scanline_offset(crtc);
- drm_vblank_on(dev, crtc->pipe);
- } else
- drm_vblank_off(dev, crtc->pipe);
+ drm_crtc_vblank_on(&crtc->base);
+ }
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
@@ -13383,8 +13928,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
crtc->plane = plane;
/* ... and break all links. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder->base.crtc != &crtc->base)
continue;
@@ -13393,14 +13937,14 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
}
/* multiple connectors may have the same encoder:
* handle them and break crtc link separately */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head)
+ for_each_intel_connector(dev, connector)
if (connector->encoder->base.crtc == &crtc->base) {
connector->encoder->base.crtc = NULL;
connector->encoder->connectors_active = false;
}
WARN_ON(crtc->active);
+ crtc->base.state->enable = false;
crtc->base.enabled = false;
}
@@ -13417,7 +13961,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* have active connectors/encoders. */
intel_crtc_update_dpms(&crtc->base);
- if (crtc->active != crtc->base.enabled) {
+ if (crtc->active != crtc->base.state->enable) {
struct intel_encoder *encoder;
/* This can happen either due to bugs in the get_hw_state
@@ -13425,9 +13969,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* pipe A quirk. */
DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
crtc->base.base.id,
- crtc->base.enabled ? "enabled" : "disabled",
+ crtc->base.state->enable ? "enabled" : "disabled",
crtc->active ? "enabled" : "disabled");
+ crtc->base.state->enable = crtc->active;
crtc->base.enabled = crtc->active;
/* Because we only establish the connector -> encoder ->
@@ -13496,9 +14041,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* a bug in one of the get_hw_state functions. Or someplace else
* in our code, like the register restore mess on resume. Clamp
* things to off as a safer default. */
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder != encoder)
continue;
connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -13564,6 +14107,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = dev_priv->display.get_pipe_config(crtc,
crtc->config);
+ crtc->base.state->enable = crtc->active;
crtc->base.enabled = crtc->active;
crtc->primary_enabled = primary_get_hw_state(crtc);
@@ -13612,8 +14156,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe_name(pipe));
}
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
connector->encoder->connectors_active = true;
@@ -13669,6 +14212,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
"[setup_hw_state]");
}
+ intel_modeset_update_connector_atomic_state(dev);
+
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
@@ -13697,8 +14242,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
- crtc->primary->fb);
+ intel_crtc_restore_mode(crtc);
}
} else {
intel_modeset_update_staged_output_state(dev);
@@ -13712,6 +14256,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
+ int ret;
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
@@ -13736,15 +14281,18 @@ void intel_modeset_gem_init(struct drm_device *dev)
* pinned & fenced. When we do the allocation it's too early
* for this.
*/
- mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) {
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
- if (intel_pin_and_fence_fb_obj(c->primary,
- c->primary->fb,
- NULL)) {
+ mutex_lock(&dev->struct_mutex);
+ ret = intel_pin_and_fence_fb_obj(c->primary,
+ c->primary->fb,
+ c->primary->state,
+ NULL);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -13752,7 +14300,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
update_state_fb(c->primary);
}
}
- mutex_unlock(&dev->struct_mutex);
intel_backlight_register(dev);
}
@@ -13793,8 +14340,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_fbc_disable(dev);
- ironlake_teardown_rc6(dev);
-
mutex_unlock(&dev->struct_mutex);
/* flush any delayed tasks or pending work */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a74aaf9242b9..f27346e907b1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -84,6 +84,13 @@ static const struct dp_link_dpll chv_dpll[] = {
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
+/* Skylake supports following rates */
+static const int gen9_rates[] = { 162000, 216000, 270000,
+ 324000, 432000, 540000 };
+static const int chv_rates[] = { 162000, 202500, 210000, 216000,
+ 243000, 270000, 324000, 405000,
+ 420000, 432000, 540000 };
+static const int default_rates[] = { 162000, 270000, 540000 };
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
@@ -118,23 +125,15 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
-int
-intel_dp_max_link_bw(struct intel_dp *intel_dp)
+static int
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
- struct drm_device *dev = intel_dp->attached_connector->base.dev;
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
- break;
- case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
- if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
- INTEL_INFO(dev)->gen >= 8) &&
- intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
- max_link_bw = DP_LINK_BW_5_4;
- else
- max_link_bw = DP_LINK_BW_2_7;
+ case DP_LINK_BW_5_4:
break;
default:
WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -210,7 +209,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
target_clock = fixed_mode->clock;
}
- max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
+ max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
@@ -240,7 +239,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
return v;
}
-void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@@ -943,8 +942,9 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
size_t txsize, rxsize;
int ret;
- txbuf[0] = msg->request << 4;
- txbuf[1] = msg->address >> 8;
+ txbuf[0] = (msg->request << 4) |
+ ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
txbuf[2] = msg->address & 0xff;
txbuf[3] = msg->size - 1;
@@ -952,7 +952,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
- rxsize = 1;
+ rxsize = 2; /* 0 or 1 data bytes */
if (WARN_ON(txsize > 20))
return -E2BIG;
@@ -963,8 +963,13 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
- /* Return payload size. */
- ret = msg->size;
+ if (ret > 1) {
+ /* Number of bytes written in a short write. */
+ ret = clamp_t(int, rxbuf[1], 0, msg->size);
+ } else {
+ /* Return payload size. */
+ ret = msg->size;
+ }
}
break;
@@ -1075,7 +1080,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
{
u32 ctrl1;
@@ -1084,19 +1089,35 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
pipe_config->dpll_hw_state.cfgcr2 = 0;
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- switch (link_bw) {
- case DP_LINK_BW_1_62:
+ switch (link_clock / 2) {
+ case 81000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
SKL_DPLL0);
break;
- case DP_LINK_BW_2_7:
+ case 135000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
SKL_DPLL0);
break;
- case DP_LINK_BW_5_4:
+ case 270000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
SKL_DPLL0);
break;
+ case 162000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
+ SKL_DPLL0);
+ break;
+ /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
+ results in CDCLK change. Need to handle the change of CDCLK by
+ disabling pipes and re-enabling them */
+ case 108000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
+ SKL_DPLL0);
+ break;
+ case 216000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
+ SKL_DPLL0);
+ break;
+
}
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
}
@@ -1117,6 +1138,42 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
}
}
+static int
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+{
+ if (intel_dp->num_sink_rates) {
+ *sink_rates = intel_dp->sink_rates;
+ return intel_dp->num_sink_rates;
+ }
+
+ *sink_rates = default_rates;
+
+ return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+}
+
+static int
+intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
+{
+ if (INTEL_INFO(dev)->gen >= 9) {
+ *source_rates = gen9_rates;
+ return ARRAY_SIZE(gen9_rates);
+ } else if (IS_CHERRYVIEW(dev)) {
+ *source_rates = chv_rates;
+ return ARRAY_SIZE(chv_rates);
+ }
+
+ *source_rates = default_rates;
+
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ /* WaDisableHBR2:skl */
+ return (DP_LINK_BW_2_7 >> 3) + 1;
+ else if (INTEL_INFO(dev)->gen >= 8 ||
+ (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+ return (DP_LINK_BW_5_4 >> 3) + 1;
+ else
+ return (DP_LINK_BW_2_7 >> 3) + 1;
+}
+
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config, int link_bw)
@@ -1150,6 +1207,113 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
+static int intersect_rates(const int *source_rates, int source_len,
+ const int *sink_rates, int sink_len,
+ int *common_rates)
+{
+ int i = 0, j = 0, k = 0;
+
+ while (i < source_len && j < sink_len) {
+ if (source_rates[i] == sink_rates[j]) {
+ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+ return k;
+ common_rates[k] = source_rates[i];
+ ++k;
+ ++i;
+ ++j;
+ } else if (source_rates[i] < sink_rates[j]) {
+ ++i;
+ } else {
+ ++j;
+ }
+ }
+ return k;
+}
+
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+ int *common_rates)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len;
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ source_len = intel_dp_source_rates(dev, &source_rates);
+
+ return intersect_rates(source_rates, source_len,
+ sink_rates, sink_len,
+ common_rates);
+}
+
+static void snprintf_int_array(char *str, size_t len,
+ const int *array, int nelem)
+{
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < nelem; i++) {
+ int r = snprintf(str, len, "%d,", array[i]);
+ if (r >= len)
+ return;
+ str += r;
+ len -= r;
+ }
+}
+
+static void intel_dp_print_rates(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len, common_len;
+ int common_rates[DP_MAX_SUPPORTED_RATES];
+ char str[128]; /* FIXME: too big for stack? */
+
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ source_len = intel_dp_source_rates(dev, &source_rates);
+ snprintf_int_array(str, sizeof(str), source_rates, source_len);
+ DRM_DEBUG_KMS("source rates: %s\n", str);
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
+ DRM_DEBUG_KMS("sink rates: %s\n", str);
+
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
+ snprintf_int_array(str, sizeof(str), common_rates, common_len);
+ DRM_DEBUG_KMS("common rates: %s\n", str);
+}
+
+static int rate_to_index(int find, const int *rates)
+{
+ int i = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
+ if (find == rates[i])
+ break;
+
+ return i;
+}
+
+int
+intel_dp_max_link_rate(struct intel_dp *intel_dp)
+{
+ int rates[DP_MAX_SUPPORTED_RATES] = {};
+ int len;
+
+ len = intel_dp_common_rates(intel_dp, rates);
+ if (WARN_ON(len <= 0))
+ return 162000;
+
+ return rates[rate_to_index(0, rates) - 1];
+}
+
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
+{
+ return rate_to_index(rate, intel_dp->sink_rates);
+}
+
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -1159,24 +1323,32 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *intel_crtc = encoder->new_crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int min_lane_count = 1;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
- int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
+ int max_clock;
int bpp, mode_rate;
- static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
int link_avail, link_clock;
+ int common_rates[DP_MAX_SUPPORTED_RATES] = {};
+ int common_len;
+
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
+
+ /* No common link rates between source and sink */
+ WARN_ON(common_len <= 0);
+
+ max_clock = common_len - 1;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
pipe_config->has_dp_encoder = true;
pipe_config->has_drrs = false;
- pipe_config->has_audio = intel_dp->has_audio;
+ pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -1193,8 +1365,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return false;
DRM_DEBUG_KMS("DP link computation with max lane count %i "
- "max bw %02x pixel clock %iKHz\n",
- max_lane_count, bws[max_clock],
+ "max bw %d pixel clock %iKHz\n",
+ max_lane_count, common_rates[max_clock],
adjusted_mode->crtc_clock);
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
@@ -1223,8 +1395,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
bpp);
for (clock = min_clock; clock <= max_clock; clock++) {
- for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
- link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
+ for (lane_count = min_lane_count;
+ lane_count <= max_lane_count;
+ lane_count <<= 1) {
+
+ link_clock = common_rates[clock];
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -1253,10 +1428,20 @@ found:
if (intel_dp->color_range)
pipe_config->limited_color_range = true;
- intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
+
+ if (intel_dp->num_sink_rates) {
+ intel_dp->link_bw = 0;
+ intel_dp->rate_select =
+ intel_dp_rate_select(intel_dp, common_rates[clock]);
+ } else {
+ intel_dp->link_bw =
+ drm_dp_link_rate_to_bw_code(common_rates[clock]);
+ intel_dp->rate_select = 0;
+ }
+
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->port_clock = common_rates[clock];
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
@@ -1279,7 +1464,7 @@ found:
}
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
- skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
+ skl_edp_set_pll_config(pipe_config, common_rates[clock]);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
@@ -2026,8 +2211,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
int dotclock;
tmp = I915_READ(intel_dp->output_reg);
- if (tmp & DP_AUDIO_OUTPUT_ENABLE)
- pipe_config->has_audio = true;
+
+ pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
if (tmp & DP_SYNC_HS_HIGH)
@@ -2557,11 +2742,6 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
/* Program Tx lane latency optimal setting*/
for (i = 0; i < 4; i++) {
- /* Set the latency optimal bit */
- data = (i == 1) ? 0x0 : 0x6;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
- data << DPIO_FRC_LATENCY_SHFIT);
-
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
@@ -2691,11 +2871,14 @@ static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- else if (IS_VALLEYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2719,6 +2902,8 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
@@ -3201,6 +3386,9 @@ intel_hsw_signal_levels(uint8_t train_set)
return DDI_BUF_TRANS_SELECT(7);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(8);
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DDI_BUF_TRANS_SELECT(9);
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
@@ -3358,6 +3546,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+ if (intel_dp->num_sink_rates)
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
+ &intel_dp->rate_select, 1);
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
@@ -3570,6 +3761,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint8_t rev;
if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
@@ -3601,6 +3793,33 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
} else
intel_dp->use_tps3 = false;
+ /* Intermediate frequency support */
+ if (is_edp(intel_dp) &&
+ (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+ (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
+ (rev >= 0x03)) { /* eDp v1.4 or higher */
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+ int i;
+
+ intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_SUPPORTED_LINK_RATES,
+ sink_rates,
+ sizeof(sink_rates));
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ int val = le16_to_cpu(sink_rates[i]);
+
+ if (val == 0)
+ break;
+
+ /* Value read is in kHz while drm clock is saved in deca-kHz */
+ intel_dp->sink_rates[i] = (val * 200) / 10;
+ }
+ intel_dp->num_sink_rates = i;
+ }
+
+ intel_dp_print_rates(intel_dp);
+
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -3803,7 +4022,7 @@ go_again:
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
-void
+static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4390,6 +4609,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4736,6 +4956,18 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(pp_div_reg));
}
+/**
+ * intel_dp_set_drrs_state - program registers for RR switch to take effect
+ * @dev: DRM device
+ * @refresh_rate: RR to be programmed
+ *
+ * This function gets called when refresh rate (RR) has to be changed from
+ * one frequency to another. Switches can be between high and low RR
+ * supported by the panel or to any other RR based on media playback (in
+ * this case, RR value needs to be passed from user space).
+ *
+ * The caller of this function needs to take a lock on dev_priv->drrs.
+ */
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4764,7 +4996,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
- intel_crtc = encoder->new_crtc;
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
@@ -4793,14 +5025,32 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
- if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
+ if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
+ switch (index) {
+ case DRRS_HIGH_RR:
+ intel_dp_set_m_n(intel_crtc, M1_N1);
+ break;
+ case DRRS_LOW_RR:
+ intel_dp_set_m_n(intel_crtc, M2_N2);
+ break;
+ case DRRS_MAX_RR:
+ default:
+ DRM_ERROR("Unsupported refreshrate type\n");
+ }
+ } else if (INTEL_INFO(dev)->gen > 6) {
reg = PIPECONF(intel_crtc->config->cpu_transcoder);
val = I915_READ(reg);
+
if (index > DRRS_HIGH_RR) {
- val |= PIPECONF_EDP_RR_MODE_SWITCH;
- intel_dp_set_m_n(intel_crtc);
+ if (IS_VALLEYVIEW(dev))
+ val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val |= PIPECONF_EDP_RR_MODE_SWITCH;
} else {
- val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
+ if (IS_VALLEYVIEW(dev))
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
I915_WRITE(reg, val);
}
@@ -4810,6 +5060,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
}
+/**
+ * intel_edp_drrs_enable - init drrs struct if supported
+ * @intel_dp: DP struct
+ *
+ * Initializes frontbuffer_bits and drrs.dp
+ */
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4837,6 +5093,11 @@ unlock:
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * intel_edp_drrs_disable - Disable DRRS
+ * @intel_dp: DP struct
+ *
+ */
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4892,10 +5153,20 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
downclock_mode->vrefresh);
unlock:
-
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * intel_edp_drrs_invalidate - Invalidate DRRS
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * When there is a disturbance on screen (due to cursor movement/time
+ * update etc), DRRS needs to be invalidated, i.e. need to switch to
+ * high RR.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
@@ -4903,15 +5174,21 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
struct drm_crtc *crtc;
enum pipe pipe;
- if (!dev_priv->drrs.dp)
+ if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
+ cancel_delayed_work(&dev_priv->drrs.work);
+
mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
- cancel_delayed_work_sync(&dev_priv->drrs.work);
intel_dp_set_drrs_state(dev_priv->dev,
dev_priv->drrs.dp->attached_connector->panel.
fixed_mode->vrefresh);
@@ -4923,6 +5200,17 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * intel_edp_drrs_flush - Flush DRRS
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * When there is no movement on screen, DRRS work can be scheduled.
+ * This DRRS work is responsible for setting relevant registers after a
+ * timeout of 1 second.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
void intel_edp_drrs_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
@@ -4930,16 +5218,21 @@ void intel_edp_drrs_flush(struct drm_device *dev,
struct drm_crtc *crtc;
enum pipe pipe;
- if (!dev_priv->drrs.dp)
+ if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
+ cancel_delayed_work(&dev_priv->drrs.work);
+
mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
- cancel_delayed_work_sync(&dev_priv->drrs.work);
-
if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
!dev_priv->drrs.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->drrs.work,
@@ -4947,6 +5240,56 @@ void intel_edp_drrs_flush(struct drm_device *dev,
mutex_unlock(&dev_priv->drrs.mutex);
}
+/**
+ * DOC: Display Refresh Rate Switching (DRRS)
+ *
+ * Display Refresh Rate Switching (DRRS) is a power conservation feature
+ * which enables swtching between low and high refresh rates,
+ * dynamically, based on the usage scenario. This feature is applicable
+ * for internal panels.
+ *
+ * Indication that the panel supports DRRS is given by the panel EDID, which
+ * would list multiple refresh rates for one resolution.
+ *
+ * DRRS is of 2 types - static and seamless.
+ * Static DRRS involves changing refresh rate (RR) by doing a full modeset
+ * (may appear as a blink on screen) and is used in dock-undock scenario.
+ * Seamless DRRS involves changing RR without any visual effect to the user
+ * and can be used during normal system usage. This is done by programming
+ * certain registers.
+ *
+ * Support for static/seamless DRRS may be indicated in the VBT based on
+ * inputs from the panel spec.
+ *
+ * DRRS saves power by switching to low RR based on usage scenarios.
+ *
+ * eDP DRRS:-
+ * The implementation is based on frontbuffer tracking implementation.
+ * When there is a disturbance on the screen triggered by user activity or a
+ * periodic system activity, DRRS is disabled (RR is changed to high RR).
+ * When there is no movement on screen, after a timeout of 1 second, a switch
+ * to low RR is made.
+ * For integration with frontbuffer tracking code,
+ * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
+ *
+ * DRRS can be further extended to support other internal panels and also
+ * the scenario of video playback wherein RR is set based on the rate
+ * requested by userspace.
+ */
+
+/**
+ * intel_dp_drrs_init - Init basic DRRS work and mutex.
+ * @intel_connector: eDP connector
+ * @fixed_mode: preferred mode of panel
+ *
+ * This function is called only once at driver load to initialize basic
+ * DRRS stuff.
+ *
+ * Returns:
+ * Downclock mode if panel supports it, else return NULL.
+ * DRRS support is determined by the presence of downclock mode (apart
+ * from VBT setting).
+ */
static struct drm_display_mode *
intel_dp_drrs_init(struct intel_connector *intel_connector,
struct drm_display_mode *fixed_mode)
@@ -4956,6 +5299,9 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *downclock_mode = NULL;
+ INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
+ mutex_init(&dev_priv->drrs.mutex);
+
if (INTEL_INFO(dev)->gen <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
@@ -4970,14 +5316,10 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
(dev, fixed_mode, connector);
if (!downclock_mode) {
- DRM_DEBUG_KMS("DRRS not supported\n");
+ DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
return NULL;
}
- INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
-
- mutex_init(&dev_priv->drrs.mutex);
-
dev_priv->drrs.type = dev_priv->vbt.drrs_type;
dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
@@ -5000,8 +5342,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
-
if (!is_edp(intel_dp))
return true;
@@ -5251,7 +5591,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dig_port);
return;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9f67a379a9a5..5cb47482d29f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -36,11 +36,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = encoder->base.dev;
- int bpp;
- int lane_count, slots;
+ struct drm_atomic_state *state;
+ int bpp, i;
+ int lane_count, slots, rate;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_connector *found = NULL, *intel_connector;
+ struct intel_connector *found = NULL;
int mst_pbn;
pipe_config->dp_encoder_is_mst = true;
@@ -52,15 +52,30 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
* seem to suggest we should do otherwise.
*/
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
- intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
+
+ rate = intel_dp_max_link_rate(intel_dp);
+
+ if (intel_dp->num_sink_rates) {
+ intel_dp->link_bw = 0;
+ intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate);
+ } else {
+ intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate);
+ intel_dp->rate_select = 0;
+ }
+
intel_dp->lane_count = lane_count;
pipe_config->pipe_bpp = 24;
- pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->port_clock = rate;
- list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
- if (intel_connector->new_encoder == encoder) {
- found = intel_connector;
+ state = pipe_config->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ if (state->connector_states[i]->best_encoder == &encoder->base) {
+ found = to_intel_connector(state->connectors[i]);
break;
}
}
@@ -140,7 +155,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, intel_connector) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
break;
@@ -317,6 +332,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_mst_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
@@ -399,7 +415,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct drm_connector *connector;
int i;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector)
return NULL;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index eef79ccd0b7c..897f17db08af 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -35,9 +35,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
-
-#define DIV_ROUND_CLOSEST_ULL(ll, d) \
-({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+#include <drm/drm_atomic.h>
/**
* _wait_for - magic (register) wait macro
@@ -56,8 +54,8 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && drm_can_sleep()) { \
- msleep(W); \
+ if ((W) && drm_can_sleep()) { \
+ usleep_range((W)*1000, (W)*2000); \
} else { \
cpu_relax(); \
} \
@@ -258,6 +256,7 @@ struct intel_plane_state {
};
struct intel_initial_plane_config {
+ struct intel_framebuffer *fb;
unsigned int tiling;
int size;
u32 base;
@@ -463,7 +462,6 @@ struct intel_crtc {
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
- int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_size;
uint32_t cursor_base;
@@ -500,16 +498,20 @@ struct intel_plane_wm_parameters {
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
+ u64 tiling;
+ unsigned int rotation;
};
struct intel_plane {
struct drm_plane base;
int plane;
enum pipe pipe;
- struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
+ /* FIXME convert to properties */
+ struct drm_intel_sprite_colorkey ckey;
+
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
* as the other pieces of the struct may not reflect the values we want
@@ -526,7 +528,6 @@ struct intel_plane {
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -537,10 +538,6 @@ struct intel_plane {
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
- int (*update_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
- void (*get_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
};
struct intel_watermark_params {
@@ -563,6 +560,7 @@ struct cxsr_latency {
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
@@ -592,6 +590,26 @@ struct intel_hdmi {
struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
+/*
+ * enum link_m_n_set:
+ * When platform provides two set of M_N registers for dp, we can
+ * program them and switch between them incase of DRRS.
+ * But When only one such register is provided, we have to program the
+ * required divider value on that registers itself based on the DRRS state.
+ *
+ * M1_N1 : Program dp_m_n on M1_N1 registers
+ * dp_m2_n2 on M2_N2 registers (If supported)
+ *
+ * M2_N2 : Program dp_m2_n2 on M1_N1 registers
+ * M2_N2 registers are not supported
+ */
+
+enum link_m_n_set {
+ /* Sets the m1_n1 and m2_n2 */
+ M1_N1 = 0,
+ M2_N2
+};
+
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
@@ -601,10 +619,14 @@ struct intel_dp {
uint32_t color_range;
bool color_range_auto;
uint8_t link_bw;
+ uint8_t rate_select;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
+ uint8_t num_sink_rates;
+ int sink_rates[DP_MAX_SUPPORTED_RATES];
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@@ -710,7 +732,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work {
struct work_struct work;
struct drm_crtc *crtc;
- struct drm_i915_gem_object *old_fb_obj;
+ struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
atomic_t pending;
@@ -817,7 +839,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -852,7 +875,8 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
@@ -877,10 +901,14 @@ void intel_frontbuffer_flip(struct drm_device *dev,
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
-int intel_fb_align_height(struct drm_device *dev, int height,
- unsigned int tiling);
+unsigned int intel_fb_align_height(struct drm_device *dev,
+ unsigned int height,
+ uint32_t pixel_format,
+ uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
+u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
+ uint32_t pixel_format);
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
@@ -899,6 +927,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
+int intel_connector_init(struct intel_connector *);
+struct intel_connector *intel_connector_alloc(void);
void intel_connector_dpms(struct drm_connector *, int mode);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_modeset_check_state(struct drm_device *dev);
@@ -928,11 +958,12 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old);
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined);
-void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -942,9 +973,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state);
int intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
@@ -954,6 +987,19 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val);
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+ uint64_t fb_format_modifier);
+
+static inline bool
+intel_rotation_90_or_270(unsigned int rotation)
+{
+ return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
+}
+
+bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -993,7 +1039,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(struct intel_crtc *crtc);
+void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
@@ -1008,6 +1054,9 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
+unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj);
+
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1017,7 +1066,6 @@ void intel_dp_complete_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-void intel_dp_check_link_status(struct intel_dp *intel_dp);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
@@ -1032,17 +1080,11 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
-int intel_dp_max_link_bw(struct intel_dp *intel_dp);
+int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
-void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
-int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-int intel_disable_plane(struct drm_plane *plane);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp);
void intel_edp_drrs_disable(struct intel_dp *intel_dp);
@@ -1097,7 +1139,11 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_fbc_update(struct drm_device *dev);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_disable(struct drm_device *dev);
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1213,8 +1259,9 @@ void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
-void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_busy(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -1231,14 +1278,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane);
-int intel_plane_set_property(struct drm_plane *plane,
- struct drm_property *prop,
- uint64_t val);
int intel_plane_restore(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
@@ -1261,6 +1303,17 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+static inline struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
+ if (IS_ERR(crtc_state))
+ return ERR_PTR(PTR_ERR(crtc_state));
+
+ return to_intel_crtc_state(crtc_state);
+}
/* intel_atomic_plane.c */
struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 10ab68457ca8..51966426addf 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -854,7 +854,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
/* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(port), val);
+ I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
@@ -975,6 +975,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
void intel_dsi_init(struct drm_device *dev)
@@ -1006,7 +1007,7 @@ void intel_dsi_init(struct drm_device *dev)
if (!intel_dsi)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dsi);
return;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index d8579510beb0..770040ff486e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,6 +393,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
@@ -468,7 +469,7 @@ void intel_dvo_init(struct drm_device *dev)
if (!intel_dvo)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dvo);
return;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 624d1d92d284..4165ce0644f7 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -78,7 +78,8 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
dev_priv->fbc.enabled = true;
- cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
+ /* Note: fbc.threshold == 1 for i8xx */
+ cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -173,29 +174,10 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void snb_fbc_blit_update(struct drm_device *dev)
+static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
-
- /* Blitter is part of Media powerwell on VLV. No impact of
- * his param in other platforms for now */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
-
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+ I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
+ POSTING_READ(MSG_FBC_REND_STATE);
}
static void ilk_fbc_enable(struct drm_crtc *crtc)
@@ -238,9 +220,10 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- snb_fbc_blit_update(dev);
}
+ intel_fbc_nuke(dev_priv);
+
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -319,7 +302,7 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- snb_fbc_blit_update(dev);
+ intel_fbc_nuke(dev_priv);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -339,19 +322,6 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->fbc.enabled;
}
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!IS_GEN8(dev))
- return;
-
- if (!intel_fbc_enabled(dev))
- return;
-
- I915_WRITE(MSG_FBC_REND_STATE, value);
-}
-
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
@@ -368,7 +338,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
if (work->crtc->primary->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc);
- dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
dev_priv->fbc.y = work->crtc->y;
}
@@ -459,7 +429,7 @@ void intel_fbc_disable(struct drm_device *dev)
return;
dev_priv->display.disable_fbc(dev);
- dev_priv->fbc.plane = -1;
+ dev_priv->fbc.crtc = NULL;
}
static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
@@ -472,6 +442,43 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
return true;
}
+static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
+{
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ enum pipe pipe;
+ bool pipe_a_only = false, one_pipe_only = false;
+
+ if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
+ pipe_a_only = true;
+ else if (INTEL_INFO(dev_priv)->gen <= 4)
+ one_pipe_only = true;
+
+ for_each_pipe(dev_priv, pipe) {
+ tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (intel_crtc_active(tmp_crtc) &&
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
+ if (one_pipe_only && crtc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ return NULL;
+ }
+ crtc = tmp_crtc;
+ }
+
+ if (pipe_a_only)
+ break;
+ }
+
+ if (!crtc || crtc->primary->fb == NULL) {
+ if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+ DRM_DEBUG_KMS("no output, disabling\n");
+ return NULL;
+ }
+
+ return crtc;
+}
+
/**
* intel_fbc_update - enable/disable FBC as needed
* @dev: the drm_device
@@ -494,22 +501,30 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
void intel_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct drm_crtc *crtc = NULL;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
- if (!HAS_FBC(dev)) {
- set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
+ if (!HAS_FBC(dev))
return;
+
+ /* disable framebuffer compression in vGPU */
+ if (intel_vgpu_active(dev))
+ i915.enable_fbc = 0;
+
+ if (i915.enable_fbc < 0) {
+ if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ goto out_disable;
}
- if (!i915.powersave) {
+ if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
- return;
+ goto out_disable;
}
/*
@@ -521,39 +536,15 @@ void intel_fbc_update(struct drm_device *dev)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
- for_each_crtc(dev, tmp_crtc) {
- if (intel_crtc_active(tmp_crtc) &&
- to_intel_crtc(tmp_crtc)->primary_enabled) {
- if (crtc) {
- if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->primary->fb == NULL) {
- if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
- DRM_DEBUG_KMS("no output, disabling\n");
+ crtc = intel_fbc_find_crtc(dev_priv);
+ if (!crtc)
goto out_disable;
- }
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config->base.adjusted_mode;
- if (i915.enable_fbc < 0) {
- if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
- DRM_DEBUG_KMS("disabled per chip default\n");
- goto out_disable;
- }
- if (!i915.enable_fbc) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- goto out_disable;
- }
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
@@ -617,7 +608,7 @@ void intel_fbc_update(struct drm_device *dev)
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
- if (dev_priv->fbc.plane == intel_crtc->plane &&
+ if (dev_priv->fbc.crtc == intel_crtc &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->y)
return;
@@ -663,6 +654,44 @@ out_disable:
i915_gem_stolen_cleanup_compression(dev);
}
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ struct drm_device *dev = dev_priv->dev;
+ unsigned int fbc_bits;
+
+ if (origin == ORIGIN_GTT)
+ return;
+
+ if (dev_priv->fbc.enabled)
+ fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
+ else if (dev_priv->fbc.fbc_work)
+ fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
+ to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+ else
+ fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
+
+ dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
+
+ if (dev_priv->fbc.busy_bits)
+ intel_fbc_disable(dev);
+}
+
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!dev_priv->fbc.busy_bits)
+ return;
+
+ dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
+
+ if (!dev_priv->fbc.busy_bits)
+ intel_fbc_update(dev);
+}
+
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
@@ -671,11 +700,22 @@ out_disable:
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
+ enum pipe pipe;
+
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
+ dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
return;
}
+ for_each_pipe(dev_priv, pipe) {
+ dev_priv->fbc.possible_framebuffer_bits |=
+ INTEL_FRONTBUFFER_PRIMARY(pipe);
+
+ if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
+ break;
+ }
+
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->display.fbc_enabled = ilk_fbc_enabled;
dev_priv->display.enable_fbc = gen7_fbc_enable;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 3001a8674611..4e7e7da2e03b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -71,6 +71,31 @@ static int intel_fbdev_set_par(struct fb_info *info)
return ret;
}
+static int intel_fbdev_blank(int blank, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_blank(blank, info);
+
+ if (ret == 0) {
+ /*
+ * FIXME: fbdev presumes that all callbacks also work from
+ * atomic contexts and relies on that for emergency oops
+ * printing. KMS totally doesn't do that and the locking here is
+ * by far not the only place this goes wrong. Ignore this for
+ * now until we solve this for real.
+ */
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
+
+ return ret;
+}
+
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -79,7 +104,7 @@ static struct fb_ops intelfb_ops = {
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
+ .fb_blank = intel_fbdev_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
@@ -126,7 +151,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
}
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
+ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
@@ -594,7 +619,8 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
- plane_config->tiling);
+ fb->base.pixel_format,
+ fb->base.modifier[0]);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 73cb6e036445..a20cffb78c0f 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -110,16 +110,11 @@ static void intel_mark_fb_busy(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
- if (!i915.powersave)
- return;
-
for_each_pipe(dev_priv, pipe) {
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
intel_increase_pllclock(dev, pipe);
- if (ring && intel_fbc_enabled(dev))
- ring->fbc_dirty = true;
}
}
@@ -127,6 +122,7 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
+ * @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
@@ -135,7 +131,8 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -158,6 +155,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_psr_invalidate(dev, obj->frontbuffer_bits);
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
+ intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
}
/**
@@ -185,16 +183,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits);
-
- /*
- * FIXME: Unconditional fbc flushing here is a rather gross hack and
- * needs to be reworked into a proper frontbuffer tracking scheme like
- * psr employs.
- */
- if (dev_priv->fbc.need_sw_cache_clean) {
- dev_priv->fbc.need_sw_cache_clean = false;
- bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
- }
+ intel_fbc_flush(dev_priv, frontbuffer_bits);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 995c5b261f4f..bfbe07b6ddce 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -951,19 +951,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
+static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_atomic_state *state;
struct intel_encoder *encoder;
+ struct drm_connector_state *connector_state;
int count = 0, count_hdmi = 0;
+ int i;
if (HAS_GMCH_DISPLAY(dev))
return false;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != crtc)
+ state = crtc_state->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
count++;
}
@@ -1020,7 +1031,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
clock_12bpc <= portclock_limit &&
- hdmi_12bpc_possible(encoder->new_crtc)) {
+ hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1504,11 +1515,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
/* Program Tx latency optimal setting */
for (i = 0; i < 4; i++) {
- /* Set the latency optimal bit */
- data = (i == 1) ? 0x0 : 0x6;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
- data << DPIO_FRC_LATENCY_SHFIT);
-
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
@@ -1618,6 +1624,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
@@ -1743,7 +1750,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
if (!intel_dig_port)
return;
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dig_port);
return;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b31088a551f2..56e437e31580 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
}
static int
-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
- u32 gmbus1_index)
+gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
+ unsigned short addr, u8 *buf, unsigned int len,
+ u32 gmbus1_index)
{
int reg_offset = dev_priv->gpio_mmio_base;
- u16 len = msg->len;
- u8 *buf = msg->buf;
I915_WRITE(GMBUS1 + reg_offset,
gmbus1_index |
GMBUS_CYCLE_WAIT |
(len << GMBUS_BYTE_COUNT_SHIFT) |
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
}
static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
{
- int reg_offset = dev_priv->gpio_mmio_base;
- u16 len = msg->len;
u8 *buf = msg->buf;
+ unsigned int rx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
+
+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
+ buf, len, gmbus1_index);
+ if (ret)
+ return ret;
+
+ rx_size -= len;
+ buf += len;
+ } while (rx_size != 0);
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
+ unsigned short addr, u8 *buf, unsigned int len)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ unsigned int chunk_size = len;
u32 val, loop;
val = loop = 0;
@@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT |
- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
if (ret)
return ret;
}
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+{
+ u8 *buf = msg->buf;
+ unsigned int tx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
+
+ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
+ if (ret)
+ return ret;
+
+ buf += len;
+ tx_size -= len;
+ } while (tx_size != 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e8d3da9f3373..09df74b8e917 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -254,8 +254,10 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
return lrca >> 12;
}
-static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *ctx_obj)
{
+ struct drm_device *dev = ring->dev;
uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
@@ -272,6 +274,13 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
* signalling between Command Streamers */
/* desc |= GEN8_CTX_FORCE_RESTORE; */
+ /* WaEnableForceRestoreInCtxtDescForVCS:skl */
+ if (IS_GEN9(dev) &&
+ INTEL_REVID(dev) <= SKL_REVID_B0 &&
+ (ring->id == BCS || ring->id == VCS ||
+ ring->id == VECS || ring->id == VCS2))
+ desc |= GEN8_CTX_FORCE_RESTORE;
+
return desc;
}
@@ -286,13 +295,13 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
/* XXX: You must always write both descriptors in the order below. */
if (ctx_obj1)
- temp = execlists_ctx_descriptor(ctx_obj1);
+ temp = execlists_ctx_descriptor(ring, ctx_obj1);
else
temp = 0;
desc[1] = (u32)(temp >> 32);
desc[0] = (u32)temp;
- temp = execlists_ctx_descriptor(ctx_obj0);
+ temp = execlists_ctx_descriptor(ring, ctx_obj0);
desc[3] = (u32)(temp >> 32);
desc[2] = (u32)temp;
@@ -384,6 +393,26 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
}
}
+ if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
+ /*
+ * WaIdleLiteRestore: make sure we never cause a lite
+ * restore with HEAD==TAIL
+ */
+ if (req0 && req0->elsp_submitted) {
+ /*
+ * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
+ * as we resubmit the request. See gen8_emit_request()
+ * for where we prepare the padding after the end of the
+ * request.
+ */
+ struct intel_ringbuffer *ringbuf;
+
+ ringbuf = req0->ctx->engine[ring->id].ringbuf;
+ req0->tail += 8;
+ req0->tail &= ringbuf->size - 1;
+ }
+ }
+
WARN_ON(req1 && req1->elsp_submitted);
execlists_submit_contexts(ring, req0->ctx, req0->tail,
@@ -612,7 +641,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
* @vmas: list of vmas.
* @batch_obj: the batchbuffer to submit.
* @exec_start: batchbuffer start virtual address pointer.
- * @flags: translated execbuffer call flags.
+ * @dispatch_flags: translated execbuffer call flags.
*
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call.
@@ -625,7 +654,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags)
+ u64 exec_start, u32 dispatch_flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
@@ -698,10 +727,12 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode;
}
- ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
+ ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
if (ret)
return ret;
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -776,7 +807,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-/**
+/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @ringbuf: Logical Ringbuffer to advance.
*
@@ -785,9 +816,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
-void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- struct drm_i915_gem_request *request)
+static void
+intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
@@ -876,12 +908,9 @@ static int logical_ring_alloc_request(struct intel_engine_cs *ring,
return ret;
}
- /* Hold a reference to the context this request belongs to
- * (we will need it when the time comes to emit/retire the
- * request).
- */
request->ctx = ctx;
i915_gem_context_reference(request->ctx);
+ request->ringbuf = ctx->engine[ring->id].ringbuf;
ring->outstanding_lazy_request = request;
return 0;
@@ -1140,11 +1169,22 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
return init_workarounds_ring(ring);
}
+static int gen9_init_render_ring(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ ret = gen8_init_common_ring(ring);
+ if (ret)
+ return ret;
+
+ return init_workarounds_ring(ring);
+}
+
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
- u64 offset, unsigned flags)
+ u64 offset, unsigned dispatch_flags)
{
- bool ppgtt = !(flags & I915_DISPATCH_SECURE);
+ bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_logical_ring_begin(ringbuf, ctx, 4);
@@ -1295,7 +1335,12 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
u32 cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, request->ctx, 6);
+ /*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+ ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
if (ret)
return ret;
@@ -1313,9 +1358,50 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
+ /*
+ * Here we add two extra NOOPs as padding to avoid
+ * lite restore of a context with HEAD==TAIL.
+ */
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
+
return 0;
}
+static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct render_state so;
+ struct drm_i915_file_private *file_priv = ctx->file_priv;
+ struct drm_file *file = file_priv ? file_priv->file : NULL;
+ int ret;
+
+ ret = i915_gem_render_state_prepare(ring, &so);
+ if (ret)
+ return ret;
+
+ if (so.rodata == NULL)
+ return 0;
+
+ ret = ring->emit_bb_start(ringbuf,
+ ctx,
+ so.ggtt_offset,
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+
+ ret = __i915_add_request(ring, file, so.obj);
+ /* intel_logical_ring_add_request moves object to inactive if it
+ * fails */
+out:
+ i915_gem_render_state_fini(&so);
+ return ret;
+}
+
static int gen8_init_rcs_context(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
@@ -1399,7 +1485,10 @@ static int logical_render_ring_init(struct drm_device *dev)
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- ring->init_hw = gen8_init_render_ring;
+ if (INTEL_INFO(dev)->gen >= 9)
+ ring->init_hw = gen9_init_render_ring;
+ else
+ ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
@@ -1581,37 +1670,47 @@ cleanup_render_ring:
return ret;
}
-int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static u32
+make_rpcs(struct drm_device *dev)
{
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
- struct render_state so;
- struct drm_i915_file_private *file_priv = ctx->file_priv;
- struct drm_file *file = file_priv ? file_priv->file : NULL;
- int ret;
+ u32 rpcs = 0;
- ret = i915_gem_render_state_prepare(ring, &so);
- if (ret)
- return ret;
-
- if (so.rodata == NULL)
+ /*
+ * No explicit RPCS request is needed to ensure full
+ * slice/subslice/EU enablement prior to Gen9.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
return 0;
- ret = ring->emit_bb_start(ringbuf,
- ctx,
- so.ggtt_offset,
- I915_DISPATCH_SECURE);
- if (ret)
- goto out;
+ /*
+ * Starting in Gen9, render power gating can leave
+ * slice/subslice/EU in a partially enabled state. We
+ * must make an explicit request through RPCS for full
+ * enablement.
+ */
+ if (INTEL_INFO(dev)->has_slice_pg) {
+ rpcs |= GEN8_RPCS_S_CNT_ENABLE;
+ rpcs |= INTEL_INFO(dev)->slice_total <<
+ GEN8_RPCS_S_CNT_SHIFT;
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+ if (INTEL_INFO(dev)->has_subslice_pg) {
+ rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
+ rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+ GEN8_RPCS_SS_CNT_SHIFT;
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
- ret = __i915_add_request(ring, file, so.obj);
- /* intel_logical_ring_add_request moves object to inactive if it
- * fails */
-out:
- i915_gem_render_state_fini(&so);
- return ret;
+ if (INTEL_INFO(dev)->has_eu_pg) {
+ rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ GEN8_RPCS_EU_MIN_SHIFT;
+ rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ GEN8_RPCS_EU_MAX_SHIFT;
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
+
+ return rpcs;
}
static int
@@ -1659,7 +1758,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
reg_state[CTX_CONTEXT_CONTROL+1] =
- _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
+ _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1706,18 +1806,18 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
- reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
- reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
- reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
- reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
- reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
- reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
- reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
- reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
+ reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[3]->daddr);
+ reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[3]->daddr);
+ reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[2]->daddr);
+ reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[2]->daddr);
+ reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[1]->daddr);
+ reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[1]->daddr);
+ reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[0]->daddr);
+ reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[0]->daddr);
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
- reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
- reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
+ reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
+ reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
}
kunmap_atomic(reg_state);
@@ -1925,3 +2025,38 @@ error_unpin_ctx:
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
+
+void intel_lr_context_reset(struct drm_device *dev,
+ struct intel_context *ctx)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_object *ctx_obj =
+ ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf =
+ ctx->engine[ring->id].ringbuf;
+ uint32_t *reg_state;
+ struct page *page;
+
+ if (!ctx_obj)
+ continue;
+
+ if (i915_gem_object_get_pages(ctx_obj)) {
+ WARN(1, "Failed get_pages for context obj\n");
+ continue;
+ }
+ page = i915_gem_object_get_page(ctx_obj, 1);
+ reg_state = kmap_atomic(page);
+
+ reg_state[CTX_RING_HEAD+1] = 0;
+ reg_state[CTX_RING_TAIL+1] = 0;
+
+ kunmap_atomic(reg_state);
+
+ ringbuf->head = 0;
+ ringbuf->tail = 0;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 6f2d7da594f6..adb731e49c57 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -30,6 +30,8 @@
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
+#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
+#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
@@ -40,10 +42,6 @@ int intel_logical_rings_init(struct drm_device *dev);
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx);
-void intel_logical_ring_advance_and_submit(
- struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- struct drm_i915_gem_request *request);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
@@ -70,13 +68,13 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
int num_dwords);
/* Logical Ring Contexts */
-int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
- struct intel_context *ctx);
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct intel_engine_cs *ring,
struct intel_context *ctx);
+void intel_lr_context_reset(struct drm_device *dev,
+ struct intel_context *ctx);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -86,7 +84,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags);
+ u64 exec_start, u32 dispatch_flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 071b96d6e146..fbcc7dff0d63 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -286,7 +286,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
unsigned int lvds_bpp;
/* Should never happen!! */
@@ -509,7 +509,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
intel_connector->panel.fitting_mode = value;
crtc = intel_attached_encoder(connector)->base.crtc;
- if (crtc && crtc->enabled) {
+ if (crtc && crtc->state->enable) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
@@ -535,6 +535,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -812,12 +813,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
static const struct dmi_system_id intel_dual_link_lvds[] = {
{
.callback = intel_dual_link_lvds_callback,
- .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .ident = "Apple MacBook Pro 15\" (2010)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
+ },
+ },
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro 15\" (2011)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
},
},
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro 15\" (2012)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
+ },
+ },
{ } /* terminating entry */
};
@@ -847,6 +864,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
if (i915.lvds_channel_mode > 0)
return i915.lvds_channel_mode == 2;
+ /* single channel LVDS is limited to 112 MHz */
+ if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
+ > 112999)
+ return true;
+
if (dmi_check_system(intel_dual_link_lvds))
return true;
@@ -945,6 +967,12 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
+ if (intel_connector_init(&lvds_connector->base) < 0) {
+ kfree(lvds_connector);
+ kfree(lvds_encoder);
+ return;
+ }
+
lvds_encoder->attached_connector = lvds_connector;
intel_encoder = &lvds_encoder->base;
@@ -1104,6 +1132,8 @@ void intel_lvds_init(struct drm_device *dev)
out:
mutex_unlock(&dev->mode_config.mutex);
+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
@@ -1118,7 +1148,6 @@ out:
}
drm_connector_register(connector);
- intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d8de1d5140a7..71e87abdcae7 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -744,10 +744,8 @@ void intel_opregion_init(struct drm_device *dev)
return;
if (opregion->acpi) {
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_didl_outputs(dev);
- intel_setup_cadls(dev);
- }
+ intel_didl_outputs(dev);
+ intel_setup_cadls(dev);
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index f93dfc174495..dd92122ed95c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -720,7 +720,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+ &i915_ggtt_view_normal);
if (ret != 0)
return ret;
@@ -1065,7 +1066,6 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct put_image_params *params;
int ret;
- /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1261,7 +1261,6 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct overlay_registers __iomem *regs;
int ret;
- /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index d8686ce89160..08532d4ffe0a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 24d77ddcc5f4..fa4ccb346389 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -56,24 +56,42 @@ static void gen9_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * WaDisableSDEUnitClockGating:skl
- * This seems to be a pre-production w/a.
- */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
- GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+ /* WaEnableLbsSlaRetryTimerDecrement:skl */
+ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+}
- /*
- * WaDisableDgMirrorFixInHalfSliceChicken5:skl
- * This is a pre-production w/a.
- */
- I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
- I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
- ~GEN9_DG_MIRROR_FIX_ENABLE);
+static void skl_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* Wa4x4STCOptimizationDisable:skl */
- I915_WRITE(CACHE_MODE_1,
- _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+ gen9_init_clock_gating(dev);
+
+ if (INTEL_REVID(dev) == SKL_REVID_A0) {
+ /*
+ * WaDisableSDEUnitClockGating:skl
+ * WaSetGAPSunitClckGateDisable:skl
+ */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+ }
+
+ if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+ /* WaDisableHDCInvalidation:skl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
+ /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
+ I915_WRITE(FF_SLICE_CS_CHICKEN2,
+ I915_READ(FF_SLICE_CS_CHICKEN2) |
+ GEN9_TSG_BARRIER_ACK_DISABLE);
+ }
+
+ if (INTEL_REVID(dev) <= SKL_REVID_E0)
+ /* WaDisableLSQCROPERFforOCL:skl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_RO_PERF_DIS);
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -245,6 +263,47 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
return NULL;
}
+static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if (enable)
+ val &= ~FORCE_DDR_HIGH_FREQ;
+ else
+ val |= FORCE_DDR_HIGH_FREQ;
+ val &= ~FORCE_DDR_LOW_FREQ;
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ if (enable)
+ val |= DSP_MAXFIFO_PM5_ENABLE;
+ else
+ val &= ~DSP_MAXFIFO_PM5_ENABLE;
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+#define FW_WM(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
+
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
struct drm_device *dev = dev_priv->dev;
@@ -252,6 +311,8 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
if (IS_VALLEYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ if (IS_CHERRYVIEW(dev))
+ chv_set_memory_pm5(dev_priv, enable);
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
} else if (IS_PINEVIEW(dev)) {
@@ -274,6 +335,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
enable ? "enabled" : "disabled");
}
+
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
@@ -290,6 +352,61 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
*/
static const int pessimal_latency_ns = 5000;
+#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
+ ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
+
+static int vlv_get_fifo_size(struct drm_device *dev,
+ enum pipe pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int sprite0_start, sprite1_start, size;
+
+ switch (pipe) {
+ uint32_t dsparb, dsparb2, dsparb3;
+ case PIPE_A:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
+ break;
+ case PIPE_B:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
+ break;
+ case PIPE_C:
+ dsparb2 = I915_READ(DSPARB2);
+ dsparb3 = I915_READ(DSPARB3);
+ sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
+ sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
+ break;
+ default:
+ return 0;
+ }
+
+ switch (plane) {
+ case 0:
+ size = sprite0_start;
+ break;
+ case 1:
+ size = sprite1_start - sprite0_start;
+ break;
+ case 2:
+ size = 512 - 1 - sprite1_start;
+ break;
+ default:
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
+ pipe_name(pipe), plane == 0 ? "primary" : "sprite",
+ plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
+ size);
+
+ return size;
+}
+
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -535,7 +652,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
crtc = single_enabled_crtc(dev);
if (crtc) {
const struct drm_display_mode *adjusted_mode;
- int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
int clock;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
@@ -547,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
- reg |= wm << DSPFW_SR_SHIFT;
+ reg |= FW_WM(wm, SR);
I915_WRITE(DSPFW1, reg);
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
@@ -557,7 +674,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
- reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ reg |= FW_WM(wm, CURSOR_SR);
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
@@ -566,7 +683,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
- reg |= wm & DSPFW_HPLL_SR_MASK;
+ reg |= FW_WM(wm, HPLL_SR);
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
@@ -575,7 +692,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ reg |= FW_WM(wm, HPLL_CURSOR);
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
@@ -611,7 +728,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -626,7 +743,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
+ entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -698,7 +815,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -712,7 +829,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
+ entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
@@ -721,232 +838,234 @@ static bool g4x_compute_srwm(struct drm_device *dev,
display, cursor);
}
-static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
- int pixel_size,
- int *prec_mult,
- int *drain_latency)
-{
- struct drm_device *dev = crtc->dev;
- int entries;
- int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
+#define FW_WM_VLV(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
- if (WARN(clock == 0, "Pixel clock is zero!\n"))
- return false;
+static void vlv_write_wm_values(struct intel_crtc *crtc,
+ const struct vlv_wm_values *wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
- return false;
+ I915_WRITE(VLV_DDL(pipe),
+ (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
- entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- if (IS_CHERRYVIEW(dev))
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
- DRAIN_LATENCY_PRECISION_16;
- else
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
- DRAIN_LATENCY_PRECISION_32;
- *drain_latency = (64 * (*prec_mult) * 4) / entries;
+ I915_WRITE(DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
+ I915_WRITE(DSPFW2,
+ FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
+ I915_WRITE(DSPFW3,
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ I915_WRITE(DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ I915_WRITE(DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
+ I915_WRITE(DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
+ I915_WRITE(DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ } else {
+ I915_WRITE(DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ I915_WRITE(DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ }
- if (*drain_latency > DRAIN_LATENCY_MASK)
- *drain_latency = DRAIN_LATENCY_MASK;
+ POSTING_READ(DSPFW1);
- return true;
+ dev_priv->wm.vlv = *wm;
}
-/*
- * Update drain latency registers of memory arbiter
- *
- * Valleyview SoC has a new memory arbiter and needs drain latency registers
- * to be programmed. Each plane has a drain latency multiplier and a drain
- * latency value.
- */
+#undef FW_WM_VLV
-static void vlv_update_drain_latency(struct drm_crtc *crtc)
+static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
+ struct drm_plane *plane)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pixel_size;
- int drain_latency;
- enum pipe pipe = intel_crtc->pipe;
- int plane_prec, prec_mult, plane_dl;
- const int high_precision = IS_CHERRYVIEW(dev) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
+ int entries, prec_mult, drain_latency, pixel_size;
+ int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
+ const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
- plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
- DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
- (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
+ /*
+ * FIXME the plane might have an fb
+ * but be invisible (eg. due to clipping)
+ */
+ if (!intel_crtc->active || !plane->state->fb)
+ return 0;
- if (!intel_crtc_active(crtc)) {
- I915_WRITE(VLV_DDL(pipe), plane_dl);
- return;
- }
+ if (WARN(clock == 0, "Pixel clock is zero!\n"))
+ return 0;
- /* Primary plane Drain Latency */
- pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
- if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_PLANE_PRECISION_HIGH :
- DDL_PLANE_PRECISION_LOW;
- plane_dl |= plane_prec | drain_latency;
- }
+ pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
- /* Cursor Drain Latency
- * BPP is always 4 for cursor
- */
- pixel_size = 4;
+ if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
+ return 0;
- /* Program cursor DL only if it is enabled */
- if (intel_crtc->cursor_base &&
- vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_CURSOR_PRECISION_HIGH :
- DDL_CURSOR_PRECISION_LOW;
- plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
+ entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+
+ prec_mult = high_precision;
+ drain_latency = 64 * prec_mult * 4 / entries;
+
+ if (drain_latency > DRAIN_LATENCY_MASK) {
+ prec_mult /= 2;
+ drain_latency = 64 * prec_mult * 4 / entries;
}
- I915_WRITE(VLV_DDL(pipe), plane_dl);
-}
+ if (drain_latency > DRAIN_LATENCY_MASK)
+ drain_latency = DRAIN_LATENCY_MASK;
-#define single_plane_enabled(mask) is_power_of_2(mask)
+ return drain_latency | (prec_mult == high_precision ?
+ DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
+}
-static void valleyview_update_wm(struct drm_crtc *crtc)
+static int vlv_compute_wm(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ int fifo_size)
{
- struct drm_device *dev = crtc->dev;
- static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int plane_sr, cursor_sr;
- int ignore_plane_sr, ignore_cursor_sr;
- unsigned int enabled = 0;
- bool cxsr_enabled;
+ int clock, entries, pixel_size;
- vlv_update_drain_latency(crtc);
+ /*
+ * FIXME the plane might have an fb
+ * but be invisible (eg. due to clipping)
+ */
+ if (!crtc->active || !plane->base.state->fb)
+ return 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1 << PIPE_A;
+ pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+ clock = crtc->config->base.adjusted_mode.crtc_clock;
- if (g4x_compute_wm0(dev, PIPE_B,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 1 << PIPE_B;
+ entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &plane_sr, &ignore_cursor_sr) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- 2*sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &ignore_plane_sr, &cursor_sr)) {
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- intel_set_memory_cxsr(dev_priv, false);
- plane_sr = cursor_sr = 0;
+ /*
+ * Set up the watermark such that we don't start issuing memory
+ * requests until we are within PND's max deadline value (256us).
+ * Idea being to be idle as long as possible while still taking
+ * advatange of PND's deadline scheduling. The limit of 8
+ * cachelines (used when the FIFO will anyway drain in less time
+ * than 256us) should match what we would be done if trickle
+ * feed were enabled.
+ */
+ return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
+}
+
+static bool vlv_compute_sr_wm(struct drm_device *dev,
+ struct vlv_wm_values *wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_crtc *crtc;
+ enum pipe pipe = INVALID_PIPE;
+ int num_planes = 0;
+ int fifo_size = 0;
+ struct intel_plane *plane;
+
+ wm->sr.cursor = wm->sr.plane = 0;
+
+ crtc = single_enabled_crtc(dev);
+ /* maxfifo not supported on pipe C */
+ if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
+ pipe = to_intel_crtc(crtc)->pipe;
+ num_planes = !!wm->pipe[pipe].primary +
+ !!wm->pipe[pipe].sprite[0] +
+ !!wm->pipe[pipe].sprite[1];
+ fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
- "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- plane_sr, cursor_sr);
+ if (fifo_size == 0 || num_planes > 1)
+ return false;
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
+ to_intel_plane(crtc->cursor), 0x3f);
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (plane->pipe != pipe)
+ continue;
+
+ wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
+ plane, fifo_size);
+ if (wm->sr.plane != 0)
+ break;
+ }
+
+ return true;
}
-static void cherryview_update_wm(struct drm_crtc *crtc)
+static void valleyview_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, planec_wm;
- int cursora_wm, cursorb_wm, cursorc_wm;
- int plane_sr, cursor_sr;
- int ignore_plane_sr, ignore_cursor_sr;
- unsigned int enabled = 0;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
bool cxsr_enabled;
+ struct vlv_wm_values wm = dev_priv->wm.vlv;
- vlv_update_drain_latency(crtc);
+ wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
+ wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
+ to_intel_plane(crtc->primary),
+ vlv_get_fifo_size(dev, pipe, 0));
- if (g4x_compute_wm0(dev, PIPE_A,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1 << PIPE_A;
+ wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
+ wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
+ to_intel_plane(crtc->cursor),
+ 0x3f);
- if (g4x_compute_wm0(dev, PIPE_B,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 1 << PIPE_B;
+ cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
- if (g4x_compute_wm0(dev, PIPE_C,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planec_wm, &cursorc_wm))
- enabled |= 1 << PIPE_C;
+ if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+ return;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &plane_sr, &ignore_cursor_sr) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- 2*sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &ignore_plane_sr, &cursor_sr)) {
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- intel_set_memory_cxsr(dev_priv, false);
- plane_sr = cursor_sr = 0;
- }
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+ "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
+ wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+ wm.sr.plane, wm.sr.cursor);
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
- "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
- "SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- planec_wm, cursorc_wm,
- plane_sr, cursor_sr);
+ /*
+ * FIXME DDR DVFS introduces massive memory latencies which
+ * are not known to system agent so any deadline specified
+ * by the display may not be respected. To support DDR DVFS
+ * the watermark code needs to be rewritten to essentially
+ * bypass deadline mechanism and rely solely on the
+ * watermarks. For now disable DDR DVFS.
+ */
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_set_memory_dvfs(dev_priv, false);
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
- I915_WRITE(DSPFW9_CHV,
- (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
- DSPFW_CURSORC_MASK)) |
- (planec_wm << DSPFW_PLANEC_SHIFT) |
- (cursorc_wm << DSPFW_CURSORC_SHIFT));
+ if (!cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(intel_crtc, &wm);
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -961,30 +1080,47 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = to_intel_plane(plane)->pipe;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
int sprite = to_intel_plane(plane)->plane;
- int drain_latency;
- int plane_prec;
- int sprite_dl;
- int prec_mult;
- const int high_precision = IS_CHERRYVIEW(dev) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
+ bool cxsr_enabled;
+ struct vlv_wm_values wm = dev_priv->wm.vlv;
- sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
- (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
+ if (enabled) {
+ wm.ddl[pipe].sprite[sprite] =
+ vlv_compute_drain_latency(crtc, plane);
- if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
- &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_SPRITE_PRECISION_HIGH(sprite) :
- DDL_SPRITE_PRECISION_LOW(sprite);
- sprite_dl |= plane_prec |
- (drain_latency << DDL_SPRITE_SHIFT(sprite));
+ wm.pipe[pipe].sprite[sprite] =
+ vlv_compute_wm(intel_crtc,
+ to_intel_plane(plane),
+ vlv_get_fifo_size(dev, pipe, sprite+1));
+ } else {
+ wm.ddl[pipe].sprite[sprite] = 0;
+ wm.pipe[pipe].sprite[sprite] = 0;
}
- I915_WRITE(VLV_DDL(pipe), sprite_dl);
+ cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
+
+ if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+ return;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
+ "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
+ sprite_name(pipe, sprite),
+ wm.pipe[pipe].sprite[sprite],
+ wm.sr.plane, wm.sr.cursor);
+
+ if (!cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(intel_crtc, &wm);
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1027,17 +1163,17 @@ static void g4x_update_wm(struct drm_crtc *crtc)
plane_sr, cursor_sr);
I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
+ FW_WM(plane_sr, SR) |
+ FW_WM(cursorb_wm, CURSORB) |
+ FW_WM(planeb_wm, PLANEB) |
+ FW_WM(planea_wm, PLANEA));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
+ FW_WM(cursora_wm, CURSORA));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -1062,7 +1198,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1080,7 +1216,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * to_intel_crtc(crtc)->cursor_width;
+ pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1103,19 +1239,21 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
- (8 << DSPFW_CURSORB_SHIFT) |
- (8 << DSPFW_PLANEB_SHIFT) |
- (8 << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
- (8 << DSPFW_PLANEC_SHIFT_OLD));
+ I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
+#undef FW_WM
+
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
@@ -1139,7 +1277,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1161,7 +1299,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1184,7 +1322,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_I915GM(dev) && enabled) {
struct drm_i915_gem_object *obj;
- obj = intel_fb_obj(enabled->primary->fb);
+ obj = intel_fb_obj(enabled->primary->state->fb);
/* self-refresh seems busted with untiled */
if (obj->tiling_mode == I915_TILING_NONE)
@@ -1208,7 +1346,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
- int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
+ int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1645,7 +1783,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
u32 linetime, ips_linetime;
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return 0;
/* The WM are computed with base on how long it takes to fill a single
@@ -1711,6 +1849,8 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
GEN9_MEM_LATENCY_LEVEL_MASK;
/*
+ * WaWmMemoryReadLatency:skl
+ *
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from
* the punit.
@@ -1898,19 +2038,31 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return;
p->active = true;
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
- p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
- p->cur.bytes_per_pixel = 4;
+
+ if (crtc->primary->state->fb) {
+ p->pri.enabled = true;
+ p->pri.bytes_per_pixel =
+ crtc->primary->state->fb->bits_per_pixel / 8;
+ } else {
+ p->pri.enabled = false;
+ p->pri.bytes_per_pixel = 0;
+ }
+
+ if (crtc->cursor->state->fb) {
+ p->cur.enabled = true;
+ p->cur.bytes_per_pixel = 4;
+ } else {
+ p->cur.enabled = false;
+ p->cur.bytes_per_pixel = 0;
+ }
p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
- p->cur.horiz_pixels = intel_crtc->cursor_width;
- /* TODO: for now, assume primary and cursor planes are always enabled. */
- p->pri.enabled = true;
- p->cur.enabled = true;
+ p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2410,7 +2562,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
nth_active_pipe = 0;
for_each_crtc(dev, crtc) {
- if (!intel_crtc_active(crtc))
+ if (!to_intel_crtc(crtc)->active)
continue;
if (crtc == for_crtc)
@@ -2443,13 +2595,12 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_device *dev = dev_priv->dev;
enum pipe pipe;
int plane;
u32 val;
for_each_pipe(dev_priv, pipe) {
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
@@ -2498,10 +2649,12 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
+ uint16_t minimum[I915_MAX_PLANES];
unsigned int total_data_rate;
int plane;
@@ -2520,9 +2673,21 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
alloc_size -= cursor_blocks;
alloc->end -= cursor_blocks;
+ /* 1. Allocate the mininum required blocks for each active plane */
+ for_each_plane(dev_priv, pipe, plane) {
+ const struct intel_plane_wm_parameters *p;
+
+ p = &params->plane[plane];
+ if (!p->enabled)
+ continue;
+
+ minimum[plane] = 8;
+ alloc_size -= minimum[plane];
+ }
+
/*
- * Each active plane get a portion of the remaining space, in
- * proportion to the amount of data they need to fetch from memory.
+ * 2. Distribute the remaining space in proportion to the amount of
+ * data each plane needs to fetch from memory.
*
* FIXME: we may not allocate every single block here.
*/
@@ -2544,8 +2709,9 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
- plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
- total_data_rate);
+ plane_blocks = minimum[plane];
+ plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
+ total_data_rate);
ddb->plane[pipe][plane].start = start;
ddb->plane[pipe][plane].end = start + plane_blocks;
@@ -2575,7 +2741,7 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
if (latency == 0)
return UINT_MAX;
- wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
+ wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
return ret;
@@ -2583,17 +2749,29 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
- uint32_t latency)
+ uint64_t tiling, uint32_t latency)
{
- uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
+ uint32_t ret;
+ uint32_t plane_bytes_per_line, plane_blocks_per_line;
+ uint32_t wm_intermediate_val;
if (latency == 0)
return UINT_MAX;
plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+
+ if (tiling == I915_FORMAT_MOD_Y_TILED ||
+ tiling == I915_FORMAT_MOD_Yf_TILED) {
+ plane_bytes_per_line *= 4;
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ plane_blocks_per_line /= 4;
+ } else {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ }
+
wm_intermediate_val = latency * pixel_rate;
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
- plane_bytes_per_line;
+ plane_blocks_per_line;
return ret;
}
@@ -2624,7 +2802,7 @@ static void skl_compute_wm_global_parameters(struct drm_device *dev,
struct drm_plane *plane;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- config->num_pipes_active += intel_crtc_active(crtc);
+ config->num_pipes_active += to_intel_crtc(crtc)->active;
/* FIXME: I don't think we need those two global parameters on SKL */
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2642,26 +2820,40 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
+ struct drm_framebuffer *fb;
int i = 1; /* Index for sprite planes start */
- p->active = intel_crtc_active(crtc);
+ p->active = intel_crtc->active;
if (p->active) {
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
- /*
- * For now, assume primary and cursor planes are always enabled.
- */
- p->plane[0].enabled = true;
- p->plane[0].bytes_per_pixel =
- crtc->primary->fb->bits_per_pixel / 8;
+ fb = crtc->primary->state->fb;
+ if (fb) {
+ p->plane[0].enabled = true;
+ p->plane[0].bytes_per_pixel = fb->bits_per_pixel / 8;
+ p->plane[0].tiling = fb->modifier[0];
+ } else {
+ p->plane[0].enabled = false;
+ p->plane[0].bytes_per_pixel = 0;
+ p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
+ }
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
-
- p->cursor.enabled = true;
- p->cursor.bytes_per_pixel = 4;
- p->cursor.horiz_pixels = intel_crtc->cursor_width ?
- intel_crtc->cursor_width : 64;
+ p->plane[0].rotation = crtc->primary->state->rotation;
+
+ fb = crtc->cursor->state->fb;
+ if (fb) {
+ p->cursor.enabled = true;
+ p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
+ p->cursor.horiz_pixels = crtc->cursor->state->crtc_w;
+ p->cursor.vert_pixels = crtc->cursor->state->crtc_h;
+ } else {
+ p->cursor.enabled = false;
+ p->cursor.bytes_per_pixel = 0;
+ p->cursor.horiz_pixels = 64;
+ p->cursor.vert_pixels = 64;
+ }
}
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2673,41 +2865,74 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
}
}
-static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
+static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+ struct skl_pipe_wm_parameters *p,
struct intel_plane_wm_parameters *p_params,
uint16_t ddb_allocation,
- uint32_t mem_value,
+ int level,
uint16_t *out_blocks, /* out */
uint8_t *out_lines /* out */)
{
- uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
- uint32_t result_bytes;
+ uint32_t latency = dev_priv->wm.skl_latency[level];
+ uint32_t method1, method2;
+ uint32_t plane_bytes_per_line, plane_blocks_per_line;
+ uint32_t res_blocks, res_lines;
+ uint32_t selected_result;
- if (mem_value == 0 || !p->active || !p_params->enabled)
+ if (latency == 0 || !p->active || !p_params->enabled)
return false;
method1 = skl_wm_method1(p->pixel_rate,
p_params->bytes_per_pixel,
- mem_value);
+ latency);
method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal,
p_params->horiz_pixels,
p_params->bytes_per_pixel,
- mem_value);
+ p_params->tiling,
+ latency);
plane_bytes_per_line = p_params->horiz_pixels *
p_params->bytes_per_pixel;
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+
+ if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
+ p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
+ uint32_t min_scanlines = 4;
+ uint32_t y_tile_minimum;
+ if (intel_rotation_90_or_270(p_params->rotation)) {
+ switch (p_params->bytes_per_pixel) {
+ case 1:
+ min_scanlines = 16;
+ break;
+ case 2:
+ min_scanlines = 8;
+ break;
+ case 8:
+ WARN(1, "Unsupported pixel depth for rotation");
+ }
+ }
+ y_tile_minimum = plane_blocks_per_line * min_scanlines;
+ selected_result = max(method2, y_tile_minimum);
+ } else {
+ if ((ddb_allocation / plane_blocks_per_line) >= 1)
+ selected_result = min(method1, method2);
+ else
+ selected_result = method1;
+ }
- /* For now xtile and linear */
- if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
- result_bytes = min(method1, method2);
- else
- result_bytes = method1;
+ res_blocks = selected_result + 1;
+ res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
- res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
- res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
+ if (level >= 1 && level <= 7) {
+ if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
+ p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
+ res_lines += 4;
+ else
+ res_blocks++;
+ }
- if (res_blocks > ddb_allocation || res_lines > 31)
+ if (res_blocks >= ddb_allocation || res_lines > 31)
return false;
*out_blocks = res_blocks;
@@ -2724,30 +2949,31 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
int num_planes,
struct skl_wm_level *result)
{
- uint16_t latency = dev_priv->wm.skl_latency[level];
uint16_t ddb_blocks;
int i;
for (i = 0; i < num_planes; i++) {
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
+ result->plane_en[i] = skl_compute_plane_wm(dev_priv,
+ p, &p->plane[i],
ddb_blocks,
- latency,
+ level,
&result->plane_res_b[i],
&result->plane_res_l[i]);
}
ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
- result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
- latency, &result->cursor_res_b,
+ result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
+ ddb_blocks, level,
+ &result->cursor_res_b,
&result->cursor_res_l);
}
static uint32_t
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
{
- if (!intel_crtc_active(crtc))
+ if (!to_intel_crtc(crtc)->active)
return 0;
return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
@@ -2921,12 +3147,11 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
static void
skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
{
- struct drm_device *dev = dev_priv->dev;
int plane;
DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
I915_WRITE(PLANE_SURF(pipe, plane),
I915_READ(PLANE_SURF(pipe, plane)));
}
@@ -3133,12 +3358,21 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
int pixel_size, bool enabled, bool scaled)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane->state->fb;
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height;
intel_plane->wm.bytes_per_pixel = pixel_size;
+ intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
+ /*
+ * Framebuffer can be NULL on plane disable, but it does not
+ * matter for watermarks if we assume no tiling in that case.
+ */
+ if (fb)
+ intel_plane->wm.tiling = fb->modifier[0];
+ intel_plane->wm.rotation = plane->state->rotation;
skl_update_wm(crtc);
}
@@ -3287,7 +3521,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return;
hw->dirty[pipe] = true;
@@ -3342,7 +3576,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
- active->pipe_enabled = intel_crtc_active(crtc);
+ active->pipe_enabled = intel_crtc->active;
if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
@@ -3456,41 +3690,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
pixel_size, enabled, scaled);
}
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
-{
- struct drm_i915_gem_object *ctx;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
- return NULL;
- }
-
- ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n", ret);
- goto err_unref;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
- if (ret) {
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
- goto err_unpin;
- }
-
- return ctx;
-
-err_unpin:
- i915_gem_object_ggtt_unpin(ctx);
-err_unref:
- drm_gem_object_unreference(&ctx->base);
- return NULL;
-}
-
/**
* Lock protecting IPS related data structures
*/
@@ -3623,7 +3822,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
* ourselves, instead of doing a rmw cycle (which might result in us clearing
* all limits and the gpu stuck at whatever frequency it is at atm).
*/
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
+static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
u32 limits;
@@ -3633,9 +3832,15 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- limits = dev_priv->rps.max_freq_softlimit << 24;
- if (val <= dev_priv->rps.min_freq_softlimit)
- limits |= dev_priv->rps.min_freq_softlimit << 16;
+ if (IS_GEN9(dev_priv->dev)) {
+ limits = (dev_priv->rps.max_freq_softlimit) << 23;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= (dev_priv->rps.min_freq_softlimit) << 14;
+ } else {
+ limits = dev_priv->rps.max_freq_softlimit << 24;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= dev_priv->rps.min_freq_softlimit << 16;
+ }
return limits;
}
@@ -3643,6 +3848,8 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
int new_power;
+ u32 threshold_up = 0, threshold_down = 0; /* in % */
+ u32 ei_up = 0, ei_down = 0;
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
@@ -3664,9 +3871,9 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
break;
}
/* Max/min bins are special */
- if (val == dev_priv->rps.min_freq_softlimit)
+ if (val <= dev_priv->rps.min_freq_softlimit)
new_power = LOW_POWER;
- if (val == dev_priv->rps.max_freq_softlimit)
+ if (val >= dev_priv->rps.max_freq_softlimit)
new_power = HIGH_POWER;
if (new_power == dev_priv->rps.power)
return;
@@ -3675,59 +3882,53 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
switch (new_power) {
case LOW_POWER:
/* Upclock if more than 95% busy over 16ms */
- I915_WRITE(GEN6_RP_UP_EI, 12500);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+ ei_up = 16000;
+ threshold_up = 95;
/* Downclock if less than 85% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 85;
break;
case BETWEEN:
/* Upclock if more than 90% busy over 13ms */
- I915_WRITE(GEN6_RP_UP_EI, 10250);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+ ei_up = 13000;
+ threshold_up = 90;
/* Downclock if less than 75% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 75;
break;
case HIGH_POWER:
/* Upclock if more than 85% busy over 10ms */
- I915_WRITE(GEN6_RP_UP_EI, 8000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+ ei_up = 10000;
+ threshold_up = 85;
/* Downclock if less than 60% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 60;
break;
}
+ I915_WRITE(GEN6_RP_UP_EI,
+ GT_INTERVAL_FROM_US(dev_priv, ei_up));
+ I915_WRITE(GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
+
+ I915_WRITE(GEN6_RP_DOWN_EI,
+ GT_INTERVAL_FROM_US(dev_priv, ei_down));
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
dev_priv->rps.power = new_power;
dev_priv->rps.last_adj = 0;
}
@@ -3737,11 +3938,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
u32 mask = 0;
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
- mask |= GEN6_PM_RP_UP_THRESHOLD;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
@@ -3750,13 +3950,13 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-void gen6_set_rps(struct drm_device *dev, u8 val)
+static void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq_softlimit);
- WARN_ON(val < dev_priv->rps.min_freq_softlimit);
+ WARN_ON(val > dev_priv->rps.max_freq);
+ WARN_ON(val < dev_priv->rps.min_freq);
/* min/max delay may still have been modified so be sure to
* write the limits value.
@@ -3764,7 +3964,10 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN9_FREQUENCY(val));
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
@@ -3777,7 +3980,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
POSTING_READ(GEN6_RPNSWREQ);
@@ -3786,6 +3989,27 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(val * 50);
}
+static void valleyview_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(val > dev_priv->rps.max_freq);
+ WARN_ON(val < dev_priv->rps.min_freq);
+
+ if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
+ "Odd GPU freq value\n"))
+ val &= ~1;
+
+ if (val != dev_priv->rps.cur_freq)
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
+ dev_priv->rps.cur_freq = val;
+ trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+}
+
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
*
* * If Gfx is Idle, then
@@ -3798,10 +4022,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ u32 val = dev_priv->rps.idle_freq;
/* CHV and latest VLV don't need to force the gfx clock */
if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ valleyview_set_rps(dev_priv->dev, val);
return;
}
@@ -3809,7 +4034,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* When we are idle. Drop to min voltage state.
*/
- if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
+ if (dev_priv->rps.cur_freq <= val)
return;
/* Mask turbo interrupt so that they will not come in between */
@@ -3818,10 +4043,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
vlv_force_gfx_clock(dev_priv, true);
- dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
+ dev_priv->rps.cur_freq = val;
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
- dev_priv->rps.min_freq_softlimit);
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 100))
@@ -3829,8 +4053,19 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
vlv_force_gfx_clock(dev_priv, false);
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+}
+
+void gen6_rps_busy(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ gen6_rps_reset_ei(dev_priv);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -3842,46 +4077,34 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_boost(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ u32 val;
mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev))
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
- else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
+ val = dev_priv->rps.max_freq_softlimit;
+ if (dev_priv->rps.enabled &&
+ dev_priv->mm.busy &&
+ dev_priv->rps.cur_freq < val) {
+ intel_set_rps(dev_priv->dev, val);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
-void valleyview_set_rps(struct drm_device *dev, u8 val)
+void intel_set_rps(struct drm_device *dev, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq_softlimit);
- WARN_ON(val < dev_priv->rps.min_freq_softlimit);
-
- if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
- "Odd GPU freq value\n"))
- val &= ~1;
-
- if (val != dev_priv->rps.cur_freq)
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- dev_priv->rps.cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
}
static void gen9_disable_rps(struct drm_device *dev)
@@ -3995,6 +4218,13 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ if (IS_SKYLAKE(dev)) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ the natural hardware unit for SKL */
+ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+ }
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
@@ -4011,6 +4241,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4035,23 +4267,21 @@ static void gen9_enable_rps(struct drm_device *dev)
gen6_init_rps_frequencies(dev);
- I915_WRITE(GEN6_RPNSWREQ, 0xc800000);
- I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000);
+ /* Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
+
+ /* 1 second timeout*/
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
+ GT_INTERVAL_FROM_US(dev_priv, 1000000));
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08);
- I915_WRITE(GEN6_RP_UP_EI, 0x101d0);
- I915_WRITE(GEN6_RP_DOWN_EI, 0x55730);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
- I915_WRITE(GEN6_PMINTRMSK, 0x6);
- I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
- gen6_enable_rps_interrupts(dev);
+ /* Leaning on the below call to gen6_set_rps to program/setup the
+ * Up/Down EI & threshold registers, as well as the RP_CONTROL,
+ * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4179,7 +4409,7 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4273,7 +4503,7 @@ static void gen6_enable_rps(struct drm_device *dev)
}
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -4638,6 +4868,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4713,6 +4945,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.min_freq) & 1,
"Odd GPU freq values\n");
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4904,124 +5138,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-void ironlake_teardown_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->ips.renderctx) {
- i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
- drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
- dev_priv->ips.renderctx = NULL;
- }
-
- if (dev_priv->ips.pwrctx) {
- i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
- drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
- dev_priv->ips.pwrctx = NULL;
- }
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (I915_READ(PWRCTXA)) {
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 50);
-
- I915_WRITE(PWRCTXA, 0);
- POSTING_READ(PWRCTXA);
-
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
- }
-}
-
-static int ironlake_setup_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->ips.renderctx == NULL)
- dev_priv->ips.renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.renderctx)
- return -ENOMEM;
-
- if (dev_priv->ips.pwrctx == NULL)
- dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.pwrctx) {
- ironlake_teardown_rc6(dev);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void ironlake_enable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
- bool was_interruptible;
- int ret;
-
- /* rc6 disabled by default due to repeated reports of hanging during
- * boot and resume.
- */
- if (!intel_enable_rc6(dev))
- return;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ret = ironlake_setup_rc6(dev);
- if (ret)
- return;
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- /*
- * GPU can automatically power down the render unit if given a page
- * to save state.
- */
- ret = intel_ring_begin(ring, 6);
- if (ret) {
- ironlake_teardown_rc6(dev);
- dev_priv->mm.interruptible = was_interruptible;
- return;
- }
-
- intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
- intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- intel_ring_emit(ring, MI_SUSPEND_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_advance(ring);
-
- /*
- * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
- * does an implicit flush, combined with MI_FLUSH above, it should be
- * safe to assume that renderctx is valid
- */
- ret = intel_ring_idle(ring);
- dev_priv->mm.interruptible = was_interruptible;
- if (ret) {
- DRM_ERROR("failed to enable ironlake power savings\n");
- ironlake_teardown_rc6(dev);
- return;
- }
-
- I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-
- intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
-}
-
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
@@ -5534,12 +5650,7 @@ static void gen6_suspend_rps(struct drm_device *dev)
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- /*
- * TODO: disable RPS interrupts on GEN9+ too once RPS support
- * is added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_disable_rps_interrupts(dev);
+ gen6_disable_rps_interrupts(dev);
}
/**
@@ -5569,7 +5680,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
- ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6) {
intel_suspend_gt_powersave(dev);
@@ -5597,12 +5707,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
- /*
- * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
- * added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_reset_rps_interrupts(dev);
+ gen6_reset_rps_interrupts(dev);
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
@@ -5619,10 +5724,16 @@ static void intel_gen6_powersave_work(struct work_struct *work)
gen6_enable_rps(dev);
__gen6_update_ring_freq(dev);
}
+
+ WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+
+ WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+
dev_priv->rps.enabled = true;
- if (INTEL_INFO(dev)->gen < 9)
- gen6_enable_rps_interrupts(dev);
+ gen6_enable_rps_interrupts(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5633,10 +5744,13 @@ void intel_enable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* Powersaving is controlled by the host when inside a VM */
+ if (intel_vgpu_active(dev))
+ return;
+
if (IS_IRONLAKE_M(dev)) {
mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
- ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
} else if (INTEL_INFO(dev)->gen >= 6) {
@@ -6169,11 +6283,22 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
gen6_check_mch_setup(dev);
}
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ I915_WRITE(CBR1_VLV, 0);
+}
+
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+ vlv_init_display_clock_gating(dev_priv);
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@@ -6221,8 +6346,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_UCGCTL4,
I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-
/*
* BSpec says this must be set, even though
* WaDisable4x2SubspanOptimization isn't listed for VLV.
@@ -6259,9 +6382,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ vlv_init_display_clock_gating(dev_priv);
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
@@ -6396,7 +6517,8 @@ void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->display.init_clock_gating(dev);
+ if (dev_priv->display.init_clock_gating)
+ dev_priv->display.init_clock_gating(dev);
}
void intel_suspend_hw(struct drm_device *dev)
@@ -6422,7 +6544,7 @@ void intel_init_pm(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
- dev_priv->display.init_clock_gating = gen9_init_clock_gating;
+ dev_priv->display.init_clock_gating = skl_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -6450,7 +6572,7 @@ void intel_init_pm(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
- dev_priv->display.update_wm = cherryview_update_wm;
+ dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
@@ -6618,7 +6740,9 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- if (IS_CHERRYVIEW(dev_priv->dev))
+ if (IS_GEN9(dev_priv->dev))
+ return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
+ else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_gpu_freq(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
return byt_gpu_freq(dev_priv, val);
@@ -6628,7 +6752,9 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- if (IS_CHERRYVIEW(dev_priv->dev))
+ if (IS_GEN9(dev_priv->dev))
+ return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
+ else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
return byt_freq_opcode(dev_priv, val);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b9f40c2e0af7..a8f9348259ae 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -532,8 +532,6 @@ static void intel_psr_exit(struct drm_device *dev)
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
-
- dev_priv->psr.active = false;
} else {
val = I915_READ(VLV_PSRCTL(pipe));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e5b3c6dbd467..441e2502b889 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -317,29 +317,6 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
return 0;
}
-static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
-{
- int ret;
-
- if (!ring->fbc_dirty)
- return 0;
-
- ret = intel_ring_begin(ring, 6);
- if (ret)
- return ret;
- /* WaFbcNukeOn3DBlt:ivb/hsw */
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, MSG_FBC_REND_STATE);
- intel_ring_emit(ring, value);
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit(ring, MSG_FBC_REND_STATE);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
- intel_ring_advance(ring);
-
- ring->fbc_dirty = false;
- return 0;
-}
-
static int
gen7_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
@@ -398,9 +375,6 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- if (!invalidate_domains && flush_domains)
- return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
return 0;
}
@@ -458,14 +432,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
return ret;
}
- ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
- if (ret)
- return ret;
-
- if (!invalidate_domains && flush_domains)
- return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
- return 0;
+ return gen8_emit_pipe_control(ring, flags, scratch_addr);
}
static void ring_write_tail(struct intel_engine_cs *ring,
@@ -502,6 +469,68 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
I915_WRITE(HWS_PGA, addr);
}
+static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 mmio = 0;
+
+ /* The ring status page addresses are no longer next to the rest of
+ * the ring registers as of gen7.
+ */
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ /*
+ * VCS2 actually doesn't exist on Gen7. Only shut up
+ * gcc switch check warning
+ */
+ case VCS2:
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ case VECS:
+ mmio = VEBOX_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(ring->dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ /* XXX: gen8 returns to sanity */
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
+
+ /*
+ * Flush the TLB for this page
+ *
+ * FIXME: These two bits have disappeared on gen8, so a question
+ * arises: do we still need this and if so how should we go about
+ * invalidating the TLB?
+ */
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+
+ /* ring should be idle before issuing a sync flush*/
+ WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
+}
+
static bool stop_ring(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
@@ -788,12 +817,14 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
- /* WaForceEnableNonCoherent:bdw */
- /* WaHdcDisableFetchWhenMasked:bdw */
- /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ /* WaForceEnableNonCoherent:bdw */
HDC_FORCE_NON_COHERENT |
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaHdcDisableFetchWhenMasked:bdw */
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
@@ -870,9 +901,132 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
+ if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+ INTEL_REVID(dev) == SKL_REVID_D0)
+ /* WaBarrierPerformanceFixDisable:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE |
+ HDC_BARRIER_PERFORMANCE_DISABLE);
+
return 0;
}
+static int gen9_init_workarounds(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* WaDisablePartialInstShootdown:skl */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Syncing dependencies between camera and graphics */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+ if (INTEL_REVID(dev) == SKL_REVID_A0 ||
+ INTEL_REVID(dev) == SKL_REVID_B0) {
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:skl */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_DG_MIRROR_FIX_ENABLE);
+ }
+
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl */
+ WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN9_RHWO_OPTIMIZATION_DISABLE);
+ WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
+ DISABLE_PIXEL_MASK_CAMMING);
+ }
+
+ if (INTEL_REVID(dev) >= SKL_REVID_C0) {
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX);
+ }
+
+ if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+ /*
+ *Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+ /* WaForceEnableNonCoherent:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+ }
+
+ /* Wa4x4STCOptimizationDisable:skl */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+ /* WaDisablePartialResolveInVc:skl */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+
+ /* WaCcsTlbPrefetchDisable:skl */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+ return 0;
+}
+
+static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 vals[3] = { 0, 0, 0 };
+ unsigned int i;
+
+ for (i = 0; i < 3; i++) {
+ u8 ss;
+
+ /*
+ * Only consider slices where one, and only one, subslice has 7
+ * EUs
+ */
+ if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
+ continue;
+
+ /*
+ * subslice_7eu[i] != 0 (because of the check above) and
+ * ss_max == 4 (maximum number of subslices possible per slice)
+ *
+ * -> 0 <= ss <= 3;
+ */
+ ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+ vals[i] = 3 - ss;
+ }
+
+ if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+ return 0;
+
+ /* Tune IZ hashing. See intel_device_info_runtime_init() */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN9_IZ_HASHING_MASK(2) |
+ GEN9_IZ_HASHING_MASK(1) |
+ GEN9_IZ_HASHING_MASK(0),
+ GEN9_IZ_HASHING(2, vals[2]) |
+ GEN9_IZ_HASHING(1, vals[1]) |
+ GEN9_IZ_HASHING(0, vals[0]));
+
+ return 0;
+}
+
+
+static int skl_init_workarounds(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_workarounds(ring);
+
+ /* WaDisablePowerCompilerClockGating:skl */
+ if (INTEL_REVID(dev) == SKL_REVID_B0)
+ WA_SET_BIT_MASKED(HIZ_CHICKEN,
+ BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+
+ return skl_tune_iz_hashing(ring);
+}
+
int init_workarounds_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -888,6 +1042,11 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
if (IS_CHERRYVIEW(dev))
return chv_init_workarounds(ring);
+ if (IS_SKYLAKE(dev))
+ return skl_init_workarounds(ring);
+ else if (IS_GEN9(dev))
+ return gen9_init_workarounds(ring);
+
return 0;
}
@@ -1386,68 +1545,6 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-void intel_ring_setup_status_page(struct intel_engine_cs *ring)
-{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- u32 mmio = 0;
-
- /* The ring status page addresses are no longer next to the rest of
- * the ring registers as of gen7.
- */
- if (IS_GEN7(dev)) {
- switch (ring->id) {
- case RCS:
- mmio = RENDER_HWS_PGA_GEN7;
- break;
- case BCS:
- mmio = BLT_HWS_PGA_GEN7;
- break;
- /*
- * VCS2 actually doesn't exist on Gen7. Only shut up
- * gcc switch check warning
- */
- case VCS2:
- case VCS:
- mmio = BSD_HWS_PGA_GEN7;
- break;
- case VECS:
- mmio = VEBOX_HWS_PGA_GEN7;
- break;
- }
- } else if (IS_GEN6(ring->dev)) {
- mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
- } else {
- /* XXX: gen8 returns to sanity */
- mmio = RING_HWS_PGA(ring->mmio_base);
- }
-
- I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
- POSTING_READ(mmio);
-
- /*
- * Flush the TLB for this page
- *
- * FIXME: These two bits have disappeared on gen8, so a question
- * arises: do we still need this and if so how should we go about
- * invalidating the TLB?
- */
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
- u32 reg = RING_INSTPM(ring->mmio_base);
-
- /* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-
- I915_WRITE(reg,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
- 1000))
- DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- ring->name);
- }
-}
-
static int
bsd_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
@@ -1611,7 +1708,7 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
static int
i965_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 length,
- unsigned flags)
+ unsigned dispatch_flags)
{
int ret;
@@ -1622,7 +1719,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -1635,8 +1733,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
- u64 offset, u32 len,
- unsigned flags)
+ u64 offset, u32 len,
+ unsigned dispatch_flags)
{
u32 cs_offset = ring->scratch.gtt_offset;
int ret;
@@ -1654,7 +1752,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- if ((flags & I915_DISPATCH_PINNED) == 0) {
+ if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1686,7 +1784,8 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1697,7 +1796,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
i915_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
- unsigned flags)
+ unsigned dispatch_flags)
{
int ret;
@@ -1706,7 +1805,8 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
@@ -2097,6 +2197,7 @@ intel_ring_alloc_request(struct intel_engine_cs *ring)
kref_init(&request->ref);
request->ring = ring;
+ request->ringbuf = ring->buffer;
request->uniq = dev_private->request_uniq++;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
@@ -2273,9 +2374,10 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
static int
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
- unsigned flags)
+ unsigned dispatch_flags)
{
- bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
+ bool ppgtt = USES_PPGTT(ring->dev) &&
+ !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_ring_begin(ring, 4);
@@ -2294,8 +2396,8 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
- u64 offset, u32 len,
- unsigned flags)
+ u64 offset, u32 len,
+ unsigned dispatch_flags)
{
int ret;
@@ -2305,7 +2407,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
- (flags & I915_DISPATCH_SECURE ?
+ (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
@@ -2317,7 +2419,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
- unsigned flags)
+ unsigned dispatch_flags)
{
int ret;
@@ -2327,7 +2429,8 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -2341,7 +2444,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
@@ -2350,7 +2452,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(ring->dev)->gen >= 8)
+ if (INTEL_INFO(dev)->gen >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2370,7 +2472,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(ring->dev)->gen >= 8) {
+ if (INTEL_INFO(dev)->gen >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
@@ -2379,13 +2481,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
}
intel_ring_advance(ring);
- if (!invalidate && flush) {
- if (IS_GEN7(dev))
- return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
- else if (IS_BROADWELL(dev))
- dev_priv->fbc.need_sw_cache_clean = true;
- }
-
return 0;
}
@@ -2612,19 +2707,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
/**
- * Initialize the second BSD ring for Broadwell GT3.
- * It is noted that this only exists on Broadwell GT3.
+ * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
*/
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
- if ((INTEL_INFO(dev)->gen != 8)) {
- DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
- return -EINVAL;
- }
-
ring->name = "bsd2 ring";
ring->id = VCS2;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 714f3fdd57d2..c761fe05ad6f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -164,7 +164,7 @@ struct intel_engine_cs {
u32 seqno);
int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
u64 offset, u32 length,
- unsigned flags);
+ unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring);
@@ -242,7 +242,7 @@ struct intel_engine_cs {
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
- u64 offset, unsigned flags);
+ u64 offset, unsigned dispatch_flags);
/**
* List of objects currently involved in rendering from the
@@ -267,7 +267,6 @@ struct intel_engine_cs {
*/
struct drm_i915_gem_request *outstanding_lazy_request;
bool gpu_caches_dirty;
- bool fbc_dirty;
wait_queue_head_t irq_queue;
@@ -373,11 +372,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
* 0x06: ring 2 head pointer (915-class)
* 0x10-0x1b: Context status DWords (GM45)
* 0x1f: Last written status offset. (GM45)
+ * 0x20-0x2f: Reserved (Gen6+)
*
- * The area from dword 0x20 to 0x3ff is available for driver usage.
+ * The area from dword 0x30 to 0x3ff is available for driver usage.
*/
-#define I915_GEM_HWS_INDEX 0x20
-#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
@@ -425,7 +425,6 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
-void intel_ring_setup_status_page(struct intel_engine_cs *ring);
int init_workarounds_ring(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 49695d7d51e3..ce00e6994eeb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -194,8 +194,39 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
- gen8_irq_power_well_post_enable(dev_priv);
+ if (IS_BROADWELL(dev))
+ gen8_irq_power_well_post_enable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+}
+
+static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ if (power_well->data == SKL_DISP_PW_2) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ gen8_irq_power_well_post_enable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+ }
+
+ if (power_well->data == SKL_DISP_PW_1) {
+ intel_prepare_ddi(dev);
+ gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
+ }
}
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -230,6 +261,141 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
}
}
+#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
+ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
+ SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
+#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
+ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
+ SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
+ SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static void skl_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ uint32_t tmp, fuse_status;
+ uint32_t req_mask, state_mask;
+ bool is_enabled, enable_requested, check_fuse_status = false;
+
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ fuse_status = I915_READ(SKL_FUSE_STATUS);
+
+ switch (power_well->data) {
+ case SKL_DISP_PW_1:
+ if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+ SKL_FUSE_PG0_DIST_STATUS), 1)) {
+ DRM_ERROR("PG0 not enabled\n");
+ return;
+ }
+ break;
+ case SKL_DISP_PW_2:
+ if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
+ DRM_ERROR("PG1 in disabled state\n");
+ return;
+ }
+ break;
+ case SKL_DISP_PW_DDI_A_E:
+ case SKL_DISP_PW_DDI_B:
+ case SKL_DISP_PW_DDI_C:
+ case SKL_DISP_PW_DDI_D:
+ case SKL_DISP_PW_MISC_IO:
+ break;
+ default:
+ WARN(1, "Unknown power well %lu\n", power_well->data);
+ return;
+ }
+
+ req_mask = SKL_POWER_WELL_REQ(power_well->data);
+ enable_requested = tmp & req_mask;
+ state_mask = SKL_POWER_WELL_STATE(power_well->data);
+ is_enabled = tmp & state_mask;
+
+ if (enable) {
+ if (!enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
+ }
+
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ state_mask), 1))
+ DRM_ERROR("%s enable timeout\n",
+ power_well->name);
+ check_fuse_status = true;
+ }
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
+ DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
+ }
+ }
+
+ if (check_fuse_status) {
+ if (power_well->data == SKL_DISP_PW_1) {
+ if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+ SKL_FUSE_PG1_DIST_STATUS), 1))
+ DRM_ERROR("PG1 distributing status timeout\n");
+ } else if (power_well->data == SKL_DISP_PW_2) {
+ if (wait_for((I915_READ(SKL_FUSE_STATUS) &
+ SKL_FUSE_PG2_DIST_STATUS), 1))
+ DRM_ERROR("PG2 distributing status timeout\n");
+ }
+ }
+
+ if (enable && !is_enabled)
+ skl_power_well_post_enable(dev_priv, power_well);
+}
+
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -255,6 +421,36 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_set_power_well(dev_priv, power_well, false);
}
+static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
+ SKL_POWER_WELL_STATE(power_well->data);
+
+ return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
+}
+
+static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ skl_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+ /* Clear any request made by BIOS as driver is taking over */
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void skl_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ skl_set_power_well(dev_priv, power_well, true);
+}
+
+static void skl_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ skl_set_power_well(dev_priv, power_well, false);
+}
+
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -829,6 +1025,13 @@ static const struct i915_power_well_ops hsw_power_well_ops = {
.is_enabled = hsw_power_well_enabled,
};
+static const struct i915_power_well_ops skl_power_well_ops = {
+ .sync_hw = skl_power_well_sync_hw,
+ .enable = skl_power_well_enable,
+ .disable = skl_power_well_disable,
+ .is_enabled = skl_power_well_enabled,
+};
+
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
@@ -1059,6 +1262,57 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
return NULL;
}
+static struct i915_power_well skl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "power well 1",
+ .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_1,
+ },
+ {
+ .name = "MISC IO power well",
+ .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_MISC_IO,
+ },
+ {
+ .name = "power well 2",
+ .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_2,
+ },
+ {
+ .name = "DDI A/E power well",
+ .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_A_E,
+ },
+ {
+ .name = "DDI B power well",
+ .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_B,
+ },
+ {
+ .name = "DDI C power well",
+ .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_C,
+ },
+ {
+ .name = "DDI D power well",
+ .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_DDI_D,
+ },
+};
+
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -1085,6 +1339,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
+ } else if (IS_SKYLAKE(dev_priv->dev)) {
+ set_power_wells(power_domains, skl_power_wells);
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -1200,7 +1456,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
}
/**
- * intel_aux_display_runtime_get - grab an auxilliary power domain reference
+ * intel_aux_display_runtime_get - grab an auxiliary power domain reference
* @dev_priv: i915 device instance
*
* This function grabs a power domain reference for the auxiliary power domain
@@ -1217,10 +1473,10 @@ void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
}
/**
- * intel_aux_display_runtime_put - release an auxilliary power domain reference
+ * intel_aux_display_runtime_put - release an auxiliary power domain reference
* @dev_priv: i915 device instance
*
- * This function drops the auxilliary power domain reference obtained by
+ * This function drops the auxiliary power domain reference obtained by
* intel_aux_display_runtime_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 64ad2b40179f..e87d2f418de4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1247,7 +1247,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
switch (crtc->config->pixel_multiplier) {
default:
- WARN(1, "unknown pixel mutlipler specified\n");
+ WARN(1, "unknown pixel multiplier specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -2194,6 +2194,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_sdvo_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
@@ -2425,6 +2426,22 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
}
}
+static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
+{
+ struct intel_sdvo_connector *sdvo_connector;
+
+ sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!sdvo_connector)
+ return NULL;
+
+ if (intel_connector_init(&sdvo_connector->base) < 0) {
+ kfree(sdvo_connector);
+ return NULL;
+ }
+
+ return sdvo_connector;
+}
+
static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
@@ -2436,7 +2453,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising DVI device %d\n", device);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
@@ -2490,7 +2507,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
DRM_DEBUG_KMS("initialising TV type %d\n", type);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
@@ -2569,7 +2586,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9c5451c97942..a4c0a04b5044 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -98,7 +98,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
if (min <= 0 || max <= 0)
return false;
- if (WARN_ON(drm_vblank_get(dev, pipe)))
+ if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
return false;
local_irq_disable();
@@ -132,7 +132,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
finish_wait(wq, &wait);
- drm_vblank_put(dev, pipe);
+ drm_crtc_vblank_put(&crtc->base);
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
@@ -179,7 +179,7 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -187,23 +187,16 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- u32 plane_ctl, stride;
+ u32 plane_ctl, stride_div;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ unsigned long surf_addr;
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
-
- /* Mask out pixel format bits in case we change it */
- plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
- plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
- plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
- plane_ctl &= ~PLANE_CTL_TILED_MASK;
- plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
- plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
-
- /* Trickle feed has to be enabled */
- plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+ plane_ctl = PLANE_CTL_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
@@ -245,39 +238,57 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
BUG();
}
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- stride = fb->pitches[0] >> 6;
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
break;
- case I915_TILING_X:
+ case I915_FORMAT_MOD_X_TILED:
plane_ctl |= PLANE_CTL_TILED_X;
- stride = fb->pitches[0] >> 9;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ plane_ctl |= PLANE_CTL_TILED_Y;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ plane_ctl |= PLANE_CTL_TILED_YF;
break;
default:
- BUG();
+ MISSING_CASE(fb->modifier[0]);
}
+
if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
- plane_ctl |= PLANE_CTL_ENABLE;
- plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
-
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
+ stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
+ if (key->flags) {
+ I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+ I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+ I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ surf_addr = intel_plane_obj_offset(intel_plane, obj);
+
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
- I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+ I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div);
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
- I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
@@ -290,73 +301,15 @@ skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- I915_WRITE(PLANE_CTL(pipe, plane),
- I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+ I915_WRITE(PLANE_CTL(pipe, plane), 0);
/* Activate double buffered register update */
- I915_WRITE(PLANE_CTL(pipe, plane), 0);
- POSTING_READ(PLANE_CTL(pipe, plane));
+ I915_WRITE(PLANE_SURF(pipe, plane), 0);
+ POSTING_READ(PLANE_SURF(pipe, plane));
intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
}
-static int
-skl_update_colorkey(struct drm_plane *drm_plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane;
- u32 plane_ctl;
-
- I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
- I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
- I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
-
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
- plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
- I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
-
- POSTING_READ(PLANE_CTL(pipe, plane));
-
- return 0;
-}
-
-static void
-skl_get_colorkey(struct drm_plane *drm_plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane;
- u32 plane_ctl;
-
- key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
- key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
- key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
-
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
-
- switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
- case PLANE_CTL_KEY_ENABLE_DESTINATION:
- key->flags = I915_SET_COLORKEY_DESTINATION;
- break;
- case PLANE_CTL_KEY_ENABLE_SOURCE:
- key->flags = I915_SET_COLORKEY_SOURCE;
- break;
- default:
- key->flags = I915_SET_COLORKEY_NONE;
- }
-}
-
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
{
@@ -399,7 +352,7 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -408,19 +361,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- sprctl = I915_READ(SPCNTR(pipe, plane));
-
- /* Mask out pixel format bits in case we change it */
- sprctl &= ~SP_PIXFORMAT_MASK;
- sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
- sprctl &= ~SP_TILED;
- sprctl &= ~SP_ROTATE_180;
+ sprctl = SP_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_YUYV:
@@ -474,8 +423,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
- sprctl |= SP_ENABLE;
-
intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
@@ -503,6 +450,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+ I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+ I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SP_SOURCE_KEY;
+
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
@@ -536,8 +492,8 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_update_primary_plane(intel_crtc);
- I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
- ~SP_ENABLE);
+ I915_WRITE(SPCNTR(pipe, plane), 0);
+
/* Activate double buffered register update */
I915_WRITE(SPSURF(pipe, plane), 0);
@@ -546,61 +502,11 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
-static int
-vlv_update_colorkey(struct drm_plane *dplane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
- u32 sprctl;
-
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
- I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
- I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
-
- sprctl = I915_READ(SPCNTR(pipe, plane));
- sprctl &= ~SP_SOURCE_KEY;
- if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SP_SOURCE_KEY;
- I915_WRITE(SPCNTR(pipe, plane), sprctl);
-
- POSTING_READ(SPKEYMSK(pipe, plane));
-
- return 0;
-}
-
-static void
-vlv_get_colorkey(struct drm_plane *dplane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
- u32 sprctl;
-
- key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
- key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
- key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
-
- sprctl = I915_READ(SPCNTR(pipe, plane));
- if (sprctl & SP_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
static void
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -609,19 +515,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_plane->pipe;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- sprctl = I915_READ(SPRCTL(pipe));
-
- /* Mask out pixel format bits in case we change it */
- sprctl &= ~SPRITE_PIXFORMAT_MASK;
- sprctl &= ~SPRITE_RGB_ORDER_RGBX;
- sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
- sprctl &= ~SPRITE_TILED;
- sprctl &= ~SPRITE_ROTATE_180;
+ sprctl = SPRITE_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -660,8 +561,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- sprctl |= SPRITE_ENABLE;
-
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
@@ -698,6 +597,17 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(SPRKEYVAL(pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(pipe), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -739,73 +649,12 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
I915_WRITE(SPRSURF(pipe), 0);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
-}
-
-static int
-ivb_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
- sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- sprctl |= SPRITE_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SPRITE_SOURCE_KEY;
- I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
-
- POSTING_READ(SPRKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
-
- if (sprctl & SPRITE_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (sprctl & SPRITE_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
}
static void
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -814,19 +663,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- dvscntr = I915_READ(DVSCNTR(pipe));
-
- /* Mask out pixel format bits in case we change it */
- dvscntr &= ~DVS_PIXFORMAT_MASK;
- dvscntr &= ~DVS_RGB_ORDER_XBGR;
- dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
- dvscntr &= ~DVS_TILED;
- dvscntr &= ~DVS_ROTATE_180;
+ dvscntr = DVS_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -862,7 +706,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (IS_GEN6(dev))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
- dvscntr |= DVS_ENABLE;
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
pixel_size, true,
@@ -894,6 +737,17 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(DVSKEYVAL(pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(pipe), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -922,20 +776,14 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
intel_update_primary_plane(intel_crtc);
- I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ I915_WRITE(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
+
/* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
}
/**
@@ -993,7 +841,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
mutex_lock(&dev->struct_mutex);
- if (dev_priv->fbc.plane == intel_crtc->plane)
+ if (dev_priv->fbc.crtc == intel_crtc)
intel_fbc_disable(dev);
mutex_unlock(&dev->struct_mutex);
@@ -1006,67 +854,9 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
hsw_disable_ips(intel_crtc);
}
-static int
-ilk_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
- dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- dvscntr |= DVS_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- dvscntr |= DVS_SOURCE_KEY;
- I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
-
- POSTING_READ(DVSKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
-
- if (dvscntr & DVS_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (dvscntr & DVS_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
-
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
- struct drm_intel_sprite_colorkey key;
-
- intel_plane->get_colorkey(&intel_plane->base, &key);
-
- return key.flags != I915_SET_COLORKEY_NONE;
+ return intel_plane->ckey.flags != I915_SET_COLORKEY_NONE;
}
static int
@@ -1076,7 +866,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
@@ -1106,16 +895,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
return -EINVAL;
}
- /* Sprite planes can be linear or x-tiled surfaces */
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- case I915_TILING_X:
- break;
- default:
- DRM_DEBUG_KMS("Unsupported tiling mode\n");
- return -EINVAL;
- }
-
/*
* FIXME the following code does a bunch of fuzzy adjustments to the
* coordinates and sizes. We probably need some way to decide whether
@@ -1259,6 +1038,19 @@ finish:
if (!intel_crtc->primary_enabled && !state->hides_primary)
intel_crtc->atomic.post_enable_primary = true;
+
+ if (intel_wm_need_update(plane, &state->base))
+ intel_crtc->atomic.update_wm = true;
+
+ if (!state->visible) {
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |=
+ (1 << drm_plane_index(plane));
+ }
}
return 0;
@@ -1272,7 +1064,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
@@ -1280,8 +1071,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
- plane->fb = state->base.fb;
- intel_plane->obj = obj;
+ plane->fb = fb;
if (intel_crtc->active) {
intel_crtc->primary_enabled = !state->hides_primary;
@@ -1295,7 +1085,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
src_y = state->src.y1;
src_w = drm_rect_width(&state->src);
src_h = drm_rect_height(&state->src);
- intel_plane->update_plane(plane, crtc, fb, obj,
+ intel_plane->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
} else {
@@ -1312,13 +1102,14 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct intel_plane *intel_plane;
int ret = 0;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
+ if (IS_VALLEYVIEW(dev) &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, set->plane_id);
@@ -1328,34 +1119,15 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
}
intel_plane = to_intel_plane(plane);
- ret = intel_plane->update_colorkey(plane, set);
-
-out_unlock:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
-int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_intel_sprite_colorkey *get = data;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- drm_modeset_lock_all(dev);
-
- plane = drm_plane_find(dev, get->plane_id);
- if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ intel_plane->ckey = *set;
- intel_plane = to_intel_plane(plane);
- intel_plane->get_colorkey(plane, get);
+ /*
+ * The only way this could fail would be due to
+ * the current plane state being unsupportable already,
+ * and we dont't consider that an error for the
+ * colorkey ioctl. So just ignore any error.
+ */
+ intel_plane_restore(plane);
out_unlock:
drm_modeset_unlock_all(dev);
@@ -1364,10 +1136,10 @@ out_unlock:
int intel_plane_restore(struct drm_plane *plane)
{
- if (!plane->crtc || !plane->fb)
+ if (!plane->crtc || !plane->state->fb)
return 0;
- return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
+ return plane->funcs->update_plane(plane, plane->crtc, plane->state->fb,
plane->state->crtc_x, plane->state->crtc_y,
plane->state->crtc_w, plane->state->crtc_h,
plane->state->src_x, plane->state->src_y,
@@ -1448,8 +1220,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
- intel_plane->update_colorkey = ilk_update_colorkey;
- intel_plane->get_colorkey = ilk_get_colorkey;
if (IS_GEN6(dev)) {
plane_formats = snb_plane_formats;
@@ -1473,16 +1243,12 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (IS_VALLEYVIEW(dev)) {
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
- intel_plane->update_colorkey = vlv_update_colorkey;
- intel_plane->get_colorkey = vlv_get_colorkey;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
} else {
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
- intel_plane->update_colorkey = ivb_update_colorkey;
- intel_plane->get_colorkey = ivb_get_colorkey;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1497,8 +1263,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 1;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
- intel_plane->update_colorkey = skl_update_colorkey;
- intel_plane->get_colorkey = skl_get_colorkey;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 892d23c8479d..8b9d325bda3c 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1332,7 +1332,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(connector, &tmp);
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
status = type < 0 ?
connector_status_disconnected :
connector_status_connected;
@@ -1516,6 +1516,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
@@ -1620,7 +1621,7 @@ intel_tv_init(struct drm_device *dev)
return;
}
- intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_tv);
return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4e8fb891d4ea..ff2a74651dd4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -23,6 +23,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "i915_vgpu.h"
#include <linux/pm_runtime.h>
@@ -210,6 +211,13 @@ static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
gen6_gt_check_fifodbg(dev_priv);
}
+static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
+{
+ u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
+
+ return count & GT_FIFO_FREE_ENTRIES_MASK;
+}
+
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
@@ -217,16 +225,15 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
if (IS_VALLEYVIEW(dev_priv->dev))
- dev_priv->uncore.fifo_count =
- __raw_i915_read32(dev_priv, GTFIFOCTL) &
- GT_FIFO_FREE_ENTRIES_MASK;
+ dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
- u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+ u32 fifo = fifo_free_entries(dev_priv);
+
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
- fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+ fifo = fifo_free_entries(dev_priv);
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
@@ -314,8 +321,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (IS_GEN6(dev) || IS_GEN7(dev))
dev_priv->uncore.fifo_count =
- __raw_i915_read32(dev_priv, GTFIFOCTL) &
- GT_FIFO_FREE_ENTRIES_MASK;
+ fifo_free_entries(dev_priv);
}
if (!restore)
@@ -328,8 +334,9 @@ static void intel_uncore_ellc_detect(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
- (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
+ if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
+ INTEL_INFO(dev)->gen >= 9) &&
+ (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
@@ -353,6 +360,14 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev,
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
+ /* WaDisableShadowRegForCpd:chv */
+ if (IS_CHERRYVIEW(dev)) {
+ __raw_i915_write32(dev_priv, GTFIFOCTL,
+ __raw_i915_read32(dev_priv, GTFIFOCTL) |
+ GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
+ GT_FIFO_CTL_RC6_POLICY_STALL);
+ }
+
intel_uncore_forcewake_reset(dev, restore_forcewake);
}
@@ -550,18 +565,24 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ i915.mmio_debug--; /* Only report the first N failures */
}
}
static void
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
- if (i915.mmio_debug)
+ static bool mmio_debug_once = true;
+
+ if (i915.mmio_debug || !mmio_debug_once)
return;
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
+ DRM_DEBUG("Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ i915.mmio_debug = mmio_debug_once--;
}
}
@@ -640,6 +661,14 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}
+#define __vgpu_read(x) \
+static u##x \
+vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ GEN6_READ_HEADER(x); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ GEN6_READ_FOOTER; \
+}
+
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
@@ -703,6 +732,10 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
GEN6_READ_FOOTER; \
}
+__vgpu_read(8)
+__vgpu_read(16)
+__vgpu_read(32)
+__vgpu_read(64)
__gen9_read(8)
__gen9_read(16)
__gen9_read(32)
@@ -724,6 +757,7 @@ __gen6_read(64)
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
+#undef __vgpu_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -807,6 +841,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
GEN6_WRITE_FOOTER; \
}
+#define __vgpu_write(x) \
+static void vgpu_write##x(struct drm_i915_private *dev_priv, \
+ off_t reg, u##x val, bool trace) { \
+ GEN6_WRITE_HEADER; \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ GEN6_WRITE_FOOTER; \
+}
+
static const u32 gen8_shadowed_regs[] = {
FORCEWAKE_MT,
GEN6_RPNSWREQ,
@@ -924,12 +966,17 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
+__vgpu_write(8)
+__vgpu_write(16)
+__vgpu_write(32)
+__vgpu_write(64)
#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
+#undef __vgpu_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
@@ -972,6 +1019,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
d->val_set = FORCEWAKE_KERNEL;
d->val_clear = 0;
} else {
+ /* WaRsClearFWBitsAtReset:bdw,skl */
d->val_reset = _MASKED_BIT_DISABLE(0xffff);
d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
@@ -1088,6 +1136,8 @@ void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ i915_check_vgpu(dev);
+
intel_uncore_ellc_detect(dev);
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
@@ -1136,6 +1186,11 @@ void intel_uncore_init(struct drm_device *dev)
break;
}
+ if (intel_vgpu_active(dev)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
+ ASSIGN_READ_MMIO_VFUNCS(vgpu);
+ }
+
i915_check_and_clear_faults(dev);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 33cdddf26684..2b81a417cf29 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -36,6 +36,7 @@ config DRM_IMX_TVE
config DRM_IMX_LDB
tristate "Support for LVDS displays"
depends on DRM_IMX && MFD_SYSCON
+ select DRM_PANEL
help
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 87fe8ed92ebe..a3ecf1069b76 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -75,10 +75,10 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
},
};
-static const struct dw_hdmi_sym_term imx_sym_term[] = {
- /*pixelclk symbol term*/
- { 148500000, 0x800d, 0x0005 },
- { ~0UL, 0x0000, 0x0000 }
+static const struct dw_hdmi_phy_config imx_phy_config[] = {
+ /*pixelclk symbol term vlev */
+ { 148500000, 0x800d, 0x0005, 0x01ad},
+ { ~0UL, 0x0000, 0x0000, 0x0000}
};
static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
@@ -123,7 +123,7 @@ static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
{
- imx_drm_panel_format(encoder, V4L2_PIX_FMT_RGB24);
+ imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
}
static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
@@ -163,7 +163,7 @@ static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
.mpll_cfg = imx_mpll_cfg,
.cur_ctr = imx_cur_ctr,
- .sym_term = imx_sym_term,
+ .phy_config = imx_phy_config,
.dev_type = IMX6Q_HDMI,
.mode_valid = imx6q_hdmi_mode_valid,
};
@@ -171,7 +171,7 @@ static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
.mpll_cfg = imx_mpll_cfg,
.cur_ctr = imx_cur_ctr,
- .sym_term = imx_sym_term,
+ .phy_config = imx_phy_config,
.dev_type = IMX6DL_HDMI,
.mode_valid = imx6dl_hdmi_mode_valid,
};
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index a002f53aab0e..74f505b0dd02 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -103,8 +103,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
return NULL;
}
-int imx_drm_panel_format_pins(struct drm_encoder *encoder,
- u32 interface_pix_fmt, int hsync_pin, int vsync_pin)
+int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
+ int hsync_pin, int vsync_pin)
{
struct imx_drm_crtc_helper_funcs *helper;
struct imx_drm_crtc *imx_crtc;
@@ -116,16 +116,16 @@ int imx_drm_panel_format_pins(struct drm_encoder *encoder,
helper = &imx_crtc->imx_drm_helper_funcs;
if (helper->set_interface_pix_fmt)
return helper->set_interface_pix_fmt(encoder->crtc,
- interface_pix_fmt, hsync_pin, vsync_pin);
+ bus_format, hsync_pin, vsync_pin);
return 0;
}
-EXPORT_SYMBOL_GPL(imx_drm_panel_format_pins);
+EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins);
-int imx_drm_panel_format(struct drm_encoder *encoder, u32 interface_pix_fmt)
+int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
{
- return imx_drm_panel_format_pins(encoder, interface_pix_fmt, 2, 3);
+ return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3);
}
-EXPORT_SYMBOL_GPL(imx_drm_panel_format);
+EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
@@ -431,15 +431,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
}
EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
-static struct device_node *imx_drm_of_get_next_endpoint(
- const struct device_node *parent, struct device_node *prev)
-{
- struct device_node *node = of_graph_get_next_endpoint(parent, prev);
-
- of_node_put(prev);
- return node;
-}
-
/*
* @node: device tree node containing encoder input ports
* @encoder: drm_encoder
@@ -448,7 +439,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
struct drm_encoder *encoder)
{
struct imx_drm_crtc *imx_crtc = imx_drm_find_crtc(encoder->crtc);
- struct device_node *ep = NULL;
+ struct device_node *ep;
struct of_endpoint endpoint;
struct device_node *port;
int ret;
@@ -456,18 +447,15 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
if (!node || !imx_crtc)
return -EINVAL;
- do {
- ep = imx_drm_of_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
+ for_each_endpoint_of_node(node, ep) {
port = of_graph_get_remote_port(ep);
of_node_put(port);
if (port == imx_crtc->crtc->port) {
ret = of_graph_parse_endpoint(ep, &endpoint);
+ of_node_put(ep);
return ret ? ret : endpoint.port;
}
- } while (ep);
+ }
return -EINVAL;
}
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 3c559ccd6af0..28e776d8d9d2 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -18,7 +18,7 @@ struct imx_drm_crtc_helper_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
- u32 pix_fmt, int hsync_pin, int vsync_pin);
+ u32 bus_format, int hsync_pin, int vsync_pin);
const struct drm_crtc_helper_funcs *crtc_helper_funcs;
const struct drm_crtc_funcs *crtc_funcs;
};
@@ -40,10 +40,10 @@ void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-int imx_drm_panel_format_pins(struct drm_encoder *encoder,
- u32 interface_pix_fmt, int hsync_pin, int vsync_pin);
-int imx_drm_panel_format(struct drm_encoder *encoder,
- u32 interface_pix_fmt);
+int imx_drm_set_bus_format_pins(struct drm_encoder *encoder,
+ u32 bus_format, int hsync_pin, int vsync_pin);
+int imx_drm_set_bus_format(struct drm_encoder *encoder,
+ u32 bus_format);
int imx_drm_encoder_get_mux_id(struct device_node *node,
struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 2d6dc94e1e64..abacc8f67469 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -19,10 +19,11 @@
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
#include <video/of_videomode.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
@@ -55,12 +56,14 @@ struct imx_ldb_channel {
struct imx_ldb *ldb;
struct drm_connector connector;
struct drm_encoder encoder;
+ struct drm_panel *panel;
struct device_node *child;
int chno;
void *edid;
int edid_len;
struct drm_display_mode mode;
int mode_valid;
+ int bus_format;
};
struct bus_mux {
@@ -75,6 +78,7 @@ struct imx_ldb {
struct imx_ldb_channel channel[2];
struct clk *clk[2]; /* our own clock */
struct clk *clk_sel[4]; /* parent of display clock */
+ struct clk *clk_parent[4]; /* original parent of clk_sel */
struct clk *clk_pll[2]; /* upstream clock we can adjust */
u32 ldb_ctrl;
const struct bus_mux *lvds_mux;
@@ -91,6 +95,17 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
int num_modes = 0;
+ if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
+ imx_ldb_ch->panel->funcs->get_modes) {
+ struct drm_display_info *di = &connector->display_info;
+
+ num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
+ if (!imx_ldb_ch->bus_format && di->num_bus_formats)
+ imx_ldb_ch->bus_format = di->bus_formats[0];
+ if (num_modes > 0)
+ return num_modes;
+ }
+
if (imx_ldb_ch->edid) {
drm_mode_connector_update_edid_property(connector,
imx_ldb_ch->edid);
@@ -163,24 +178,36 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
- u32 pixel_fmt;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+ u32 bus_format;
- switch (imx_ldb_ch->chno) {
- case 0:
- pixel_fmt = (ldb->ldb_ctrl & LDB_DATA_WIDTH_CH0_24) ?
- V4L2_PIX_FMT_RGB24 : V4L2_PIX_FMT_BGR666;
+ switch (imx_ldb_ch->bus_format) {
+ default:
+ dev_warn(ldb->dev,
+ "could not determine data mapping, default to 18-bit \"spwg\"\n");
+ /* fallthrough */
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ bus_format = MEDIA_BUS_FMT_RGB666_1X18;
break;
- case 1:
- pixel_fmt = (ldb->ldb_ctrl & LDB_DATA_WIDTH_CH1_24) ?
- V4L2_PIX_FMT_RGB24 : V4L2_PIX_FMT_BGR666;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
break;
- default:
- dev_err(ldb->dev, "unable to config di%d panel format\n",
- imx_ldb_ch->chno);
- pixel_fmt = V4L2_PIX_FMT_RGB24;
}
- imx_drm_panel_format(encoder, pixel_fmt);
+ imx_drm_set_bus_format(encoder, bus_format);
}
static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
@@ -190,6 +217,8 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
+ drm_panel_prepare(imx_ldb_ch->panel);
+
if (dual) {
clk_prepare_enable(ldb->clk[0]);
clk_prepare_enable(ldb->clk[1]);
@@ -223,6 +252,8 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
}
regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
+
+ drm_panel_enable(imx_ldb_ch->panel);
}
static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
@@ -274,6 +305,7 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int mux, ret;
/*
* imx_ldb_encoder_disable is called by
@@ -287,6 +319,8 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
(ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0)
return;
+ drm_panel_disable(imx_ldb_ch->panel);
+
if (imx_ldb_ch == &ldb->channel[0])
ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
else if (imx_ldb_ch == &ldb->channel[1])
@@ -298,6 +332,30 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
clk_disable_unprepare(ldb->clk[0]);
clk_disable_unprepare(ldb->clk[1]);
}
+
+ if (ldb->lvds_mux) {
+ const struct bus_mux *lvds_mux = NULL;
+
+ if (imx_ldb_ch == &ldb->channel[0])
+ lvds_mux = &ldb->lvds_mux[0];
+ else if (imx_ldb_ch == &ldb->channel[1])
+ lvds_mux = &ldb->lvds_mux[1];
+
+ regmap_read(ldb->regmap, lvds_mux->reg, &mux);
+ mux &= lvds_mux->mask;
+ mux >>= lvds_mux->shift;
+ } else {
+ mux = (imx_ldb_ch == &ldb->channel[0]) ? 0 : 1;
+ }
+
+ /* set display clock mux back to original input clock */
+ ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk_parent[mux]);
+ if (ret)
+ dev_err(ldb->dev,
+ "unable to set di%d parent clock to original parent\n",
+ mux);
+
+ drm_panel_unprepare(imx_ldb_ch->panel);
}
static struct drm_connector_funcs imx_ldb_connector_funcs = {
@@ -371,6 +429,9 @@ static int imx_ldb_register(struct drm_device *drm,
drm_connector_init(drm, &imx_ldb_ch->connector,
&imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ if (imx_ldb_ch->panel)
+ drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector);
+
drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
&imx_ldb_ch->encoder);
@@ -382,25 +443,39 @@ enum {
LVDS_BIT_MAP_JEIDA
};
-static const char * const imx_ldb_bit_mappings[] = {
- [LVDS_BIT_MAP_SPWG] = "spwg",
- [LVDS_BIT_MAP_JEIDA] = "jeida",
+struct imx_ldb_bit_mapping {
+ u32 bus_format;
+ u32 datawidth;
+ const char * const mapping;
+};
+
+static const struct imx_ldb_bit_mapping imx_ldb_bit_mappings[] = {
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, "spwg" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, "spwg" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, "jeida" },
};
-static const int of_get_data_mapping(struct device_node *np)
+static u32 of_get_bus_format(struct device *dev, struct device_node *np)
{
const char *bm;
+ u32 datawidth = 0;
int ret, i;
ret = of_property_read_string(np, "fsl,data-mapping", &bm);
if (ret < 0)
return ret;
- for (i = 0; i < ARRAY_SIZE(imx_ldb_bit_mappings); i++)
- if (!strcasecmp(bm, imx_ldb_bit_mappings[i]))
- return i;
+ of_property_read_u32(np, "fsl,data-width", &datawidth);
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(imx_ldb_bit_mappings); i++) {
+ if (!strcasecmp(bm, imx_ldb_bit_mappings[i].mapping) &&
+ datawidth == imx_ldb_bit_mappings[i].datawidth)
+ return imx_ldb_bit_mappings[i].bus_format;
+ }
+
+ dev_err(dev, "invalid data mapping: %d-bit \"%s\"\n", datawidth, bm);
+
+ return -ENOENT;
}
static struct bus_mux imx6q_lvds_mux[2] = {
@@ -437,8 +512,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
struct device_node *child;
const u8 *edidp;
struct imx_ldb *imx_ldb;
- int datawidth;
- int mapping;
int dual;
int ret;
int i;
@@ -479,12 +552,15 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
imx_ldb->clk_sel[i] = NULL;
break;
}
+
+ imx_ldb->clk_parent[i] = clk_get_parent(imx_ldb->clk_sel[i]);
}
if (i == 0)
return ret;
for_each_child_of_node(np, child) {
struct imx_ldb_channel *channel;
+ struct device_node *port;
ret = of_property_read_u32(child, "reg", &i);
if (ret || i < 0 || i > 1)
@@ -503,49 +579,53 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
channel->chno = i;
channel->child = child;
+ /*
+ * The output port is port@4 with an external 4-port mux or
+ * port@2 with the internal 2-port mux.
+ */
+ port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2);
+ if (port) {
+ struct device_node *endpoint, *remote;
+
+ endpoint = of_get_child_by_name(port, "endpoint");
+ if (endpoint) {
+ remote = of_graph_get_remote_port_parent(endpoint);
+ if (remote)
+ channel->panel = of_drm_find_panel(remote);
+ else
+ return -EPROBE_DEFER;
+ if (!channel->panel) {
+ dev_err(dev, "panel not found: %s\n",
+ remote->full_name);
+ return -EPROBE_DEFER;
+ }
+ }
+ }
+
edidp = of_get_property(child, "edid", &channel->edid_len);
if (edidp) {
channel->edid = kmemdup(edidp, channel->edid_len,
GFP_KERNEL);
- } else {
+ } else if (!channel->panel) {
ret = of_get_drm_display_mode(child, &channel->mode, 0);
if (!ret)
channel->mode_valid = 1;
}
- ret = of_property_read_u32(child, "fsl,data-width", &datawidth);
- if (ret)
- datawidth = 0;
- else if (datawidth != 18 && datawidth != 24)
- return -EINVAL;
-
- mapping = of_get_data_mapping(child);
- switch (mapping) {
- case LVDS_BIT_MAP_SPWG:
- if (datawidth == 24) {
- if (i == 0 || dual)
- imx_ldb->ldb_ctrl |=
- LDB_DATA_WIDTH_CH0_24;
- if (i == 1 || dual)
- imx_ldb->ldb_ctrl |=
- LDB_DATA_WIDTH_CH1_24;
- }
- break;
- case LVDS_BIT_MAP_JEIDA:
- if (datawidth == 18) {
- dev_err(dev, "JEIDA standard only supported in 24 bit\n");
- return -EINVAL;
- }
- if (i == 0 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
- LDB_BIT_MAP_CH0_JEIDA;
- if (i == 1 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
- LDB_BIT_MAP_CH1_JEIDA;
- break;
- default:
- dev_err(dev, "data mapping not specified or invalid\n");
- return -EINVAL;
+ channel->bus_format = of_get_bus_format(dev, child);
+ if (channel->bus_format == -EINVAL) {
+ /*
+ * If no bus format was specified in the device tree,
+ * we can still get it from the connected panel later.
+ */
+ if (channel->panel && channel->panel->funcs &&
+ channel->panel->funcs->get_modes)
+ channel->bus_format = 0;
+ }
+ if (channel->bus_format < 0) {
+ dev_err(dev, "could not determine data mapping: %d\n",
+ channel->bus_format);
+ return channel->bus_format;
}
ret = imx_ldb_register(drm, channel);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 4216e479a9be..214eceefc981 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -301,11 +301,11 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
switch (tve->mode) {
case TVE_MODE_VGA:
- imx_drm_panel_format_pins(encoder, IPU_PIX_FMT_GBR24,
- tve->hsync_pin, tve->vsync_pin);
+ imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24,
+ tve->hsync_pin, tve->vsync_pin);
break;
case TVE_MODE_TVOUT:
- imx_drm_panel_format(encoder, V4L2_PIX_FMT_YUV444);
+ imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
break;
}
}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 98551e356e12..7bc8301fafff 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -45,7 +45,7 @@ struct ipu_crtc {
struct drm_pending_vblank_event *page_flip_event;
struct drm_framebuffer *newfb;
int irq;
- u32 interface_pix_fmt;
+ u32 bus_format;
int di_hsync_pin;
int di_vsync_pin;
};
@@ -145,7 +145,6 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_di_signal_cfg sig_cfg = {};
unsigned long encoder_types = 0;
- u32 out_pixel_fmt;
int ret;
dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
@@ -161,21 +160,21 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
__func__, encoder_types);
/*
- * If we have DAC, TVDAC or LDB, then we need the IPU DI clock
- * to be the same as the LDB DI clock.
+ * If we have DAC or LDB, then we need the IPU DI clock to be
+ * the same as the LDB DI clock. For TVDAC, derive the IPU DI
+ * clock from 27 MHz TVE_DI clock, but allow to divide it.
*/
if (encoder_types & (BIT(DRM_MODE_ENCODER_DAC) |
- BIT(DRM_MODE_ENCODER_TVDAC) |
BIT(DRM_MODE_ENCODER_LVDS)))
sig_cfg.clkflags = IPU_DI_CLKMODE_SYNC | IPU_DI_CLKMODE_EXT;
+ else if (encoder_types & BIT(DRM_MODE_ENCODER_TVDAC))
+ sig_cfg.clkflags = IPU_DI_CLKMODE_EXT;
else
sig_cfg.clkflags = 0;
- out_pixel_fmt = ipu_crtc->interface_pix_fmt;
-
sig_cfg.enable_pol = 1;
sig_cfg.clk_pol = 0;
- sig_cfg.pixel_fmt = out_pixel_fmt;
+ sig_cfg.bus_format = ipu_crtc->bus_format;
sig_cfg.v_to_h_sync = 0;
sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
@@ -184,7 +183,7 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
mode->flags & DRM_MODE_FLAG_INTERLACE,
- out_pixel_fmt, mode->hdisplay);
+ ipu_crtc->bus_format, mode->hdisplay);
if (ret) {
dev_err(ipu_crtc->dev,
"initializing display controller failed with %d\n",
@@ -202,7 +201,8 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode,
crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
- x, y, mode->hdisplay, mode->vdisplay);
+ x, y, mode->hdisplay, mode->vdisplay,
+ mode->flags & DRM_MODE_FLAG_INTERLACE);
}
static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
@@ -291,11 +291,11 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
}
static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
- u32 pixfmt, int hsync_pin, int vsync_pin)
+ u32 bus_format, int hsync_pin, int vsync_pin)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- ipu_crtc->interface_pix_fmt = pixfmt;
+ ipu_crtc->bus_format = bus_format;
ipu_crtc->di_hsync_pin = hsync_pin;
ipu_crtc->di_vsync_pin = vsync_pin;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 6987e16fe99b..878a643d72e4 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -99,7 +99,7 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+ uint32_t src_w, uint32_t src_h, bool interlaced)
{
struct device *dev = ipu_plane->base.dev->dev;
int ret;
@@ -213,6 +213,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
if (ret < 0)
return ret;
+ if (interlaced)
+ ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
ipu_plane->w = src_w;
ipu_plane->h = src_h;
@@ -312,7 +314,8 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
- src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16);
+ src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
+ false);
if (ret < 0) {
ipu_plane_put_resources(ipu_plane);
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index af125fb40ef5..9b5eff18f5b8 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -42,7 +42,7 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y, uint32_t src_w,
- uint32_t src_h);
+ uint32_t src_h, bool interlaced);
void ipu_plane_enable(struct ipu_plane *plane);
void ipu_plane_disable(struct ipu_plane *plane);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 900dda6a8e71..74a9ce40ddc4 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -33,7 +33,7 @@ struct imx_parallel_display {
struct device *dev;
void *edid;
int edid_len;
- u32 interface_pix_fmt;
+ u32 bus_format;
int mode_valid;
struct drm_display_mode mode;
struct drm_panel *panel;
@@ -118,7 +118,7 @@ static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
- imx_drm_panel_format(encoder, imxpd->interface_pix_fmt);
+ imx_drm_set_bus_format(encoder, imxpd->bus_format);
}
static void imx_pd_encoder_commit(struct drm_encoder *encoder)
@@ -225,14 +225,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
if (!strcmp(fmt, "rgb24"))
- imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB24;
+ imxpd->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
else if (!strcmp(fmt, "rgb565"))
- imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB565;
+ imxpd->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
else if (!strcmp(fmt, "bgr666"))
- imxpd->interface_pix_fmt = V4L2_PIX_FMT_BGR666;
+ imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
else if (!strcmp(fmt, "lvds666"))
- imxpd->interface_pix_fmt =
- v4l2_fourcc('L', 'V', 'D', '6');
+ imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
}
panel_node = of_parse_phandle(np, "fsl,panel", 0);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 9872ba9abf1a..6e84df9369a6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1222,7 +1222,7 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
u8 tmp;
if (mdev->type == G200_WB)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index bacbbb70f679..0a6f6764a37c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -35,3 +35,14 @@ config DRM_MSM_REGISTER_LOGGING
Compile in support for logging register reads/writes in a format
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+
+config DRM_MSM_DSI
+ bool "Enable DSI support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ default y
+ help
+ Choose this option if you have a need for MIPI DSI connector
+ support.
+
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 674a132fd76e..ab2086783fee 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -50,5 +50,10 @@ msm-y := \
msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+ dsi/dsi_host.o \
+ dsi/dsi_manager.o \
+ dsi/dsi_phy.o \
+ mdp/mdp5/mdp5_cmd_encoder.o
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
new file mode 100644
index 000000000000..28d1f95a90cc
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi.h"
+
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
+{
+ if (!msm_dsi || !msm_dsi->panel)
+ return NULL;
+
+ return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
+ msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
+ msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
+}
+
+static void dsi_destroy(struct msm_dsi *msm_dsi)
+{
+ if (!msm_dsi)
+ return;
+
+ msm_dsi_manager_unregister(msm_dsi);
+ if (msm_dsi->host) {
+ msm_dsi_host_destroy(msm_dsi->host);
+ msm_dsi->host = NULL;
+ }
+
+ platform_set_drvdata(msm_dsi->pdev, NULL);
+}
+
+static struct msm_dsi *dsi_init(struct platform_device *pdev)
+{
+ struct msm_dsi *msm_dsi = NULL;
+ int ret;
+
+ if (!pdev) {
+ dev_err(&pdev->dev, "no dsi device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
+ if (!msm_dsi) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ DBG("dsi probed=%p", msm_dsi);
+
+ msm_dsi->pdev = pdev;
+ platform_set_drvdata(pdev, msm_dsi);
+
+ /* Init dsi host */
+ ret = msm_dsi_host_init(msm_dsi);
+ if (ret)
+ goto fail;
+
+ /* Register to dsi manager */
+ ret = msm_dsi_manager_register(msm_dsi);
+ if (ret)
+ goto fail;
+
+ return msm_dsi;
+
+fail:
+ if (msm_dsi)
+ dsi_destroy(msm_dsi);
+
+ return ERR_PTR(ret);
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi;
+
+ DBG("");
+ msm_dsi = dsi_init(pdev);
+ if (IS_ERR(msm_dsi))
+ return PTR_ERR(msm_dsi);
+
+ priv->dsi[msm_dsi->id] = msm_dsi;
+
+ return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+ int id = msm_dsi->id;
+
+ if (priv->dsi[id]) {
+ dsi_destroy(msm_dsi);
+ priv->dsi[id] = NULL;
+ }
+}
+
+static const struct component_ops dsi_ops = {
+ .bind = dsi_bind,
+ .unbind = dsi_unbind,
+};
+
+static int dsi_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_dev_remove(struct platform_device *pdev)
+{
+ DBG("");
+ component_del(&pdev->dev, &dsi_ops);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,mdss-dsi-ctrl" },
+ {}
+};
+
+static struct platform_driver dsi_driver = {
+ .probe = dsi_dev_probe,
+ .remove = dsi_dev_remove,
+ .driver = {
+ .name = "msm_dsi",
+ .of_match_table = dt_match,
+ },
+};
+
+void __init msm_dsi_register(void)
+{
+ DBG("");
+ platform_driver_register(&dsi_driver);
+}
+
+void __exit msm_dsi_unregister(void)
+{
+ DBG("");
+ platform_driver_unregister(&dsi_driver);
+}
+
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret, i;
+
+ if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
+ !encoders[MSM_DSI_CMD_ENCODER_ID]))
+ return -EINVAL;
+
+ msm_dsi->dev = dev;
+
+ ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
+ if (ret) {
+ dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+ goto fail;
+ }
+
+ msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
+ if (IS_ERR(msm_dsi->bridge)) {
+ ret = PTR_ERR(msm_dsi->bridge);
+ dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+ msm_dsi->bridge = NULL;
+ goto fail;
+ }
+
+ msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
+ if (IS_ERR(msm_dsi->connector)) {
+ ret = PTR_ERR(msm_dsi->connector);
+ dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
+ msm_dsi->connector = NULL;
+ goto fail;
+ }
+
+ for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+ encoders[i]->bridge = msm_dsi->bridge;
+ msm_dsi->encoders[i] = encoders[i];
+ }
+
+ priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
+ priv->connectors[priv->num_connectors++] = msm_dsi->connector;
+
+ return 0;
+fail:
+ if (msm_dsi) {
+ /* bridge/connector are normally destroyed by drm: */
+ if (msm_dsi->bridge) {
+ msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+ msm_dsi->bridge = NULL;
+ }
+ if (msm_dsi->connector) {
+ msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+ msm_dsi->connector = NULL;
+ }
+ }
+
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
new file mode 100644
index 000000000000..10f54d4e379a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_CONNECTOR_H__
+#define __DSI_CONNECTOR_H__
+
+#include <linux/platform_device.h>
+
+#include "drm_crtc.h"
+#include "drm_mipi_dsi.h"
+#include "drm_panel.h"
+
+#include "msm_drv.h"
+
+#define DSI_0 0
+#define DSI_1 1
+#define DSI_MAX 2
+
+#define DSI_CLOCK_MASTER DSI_0
+#define DSI_CLOCK_SLAVE DSI_1
+
+#define DSI_LEFT DSI_0
+#define DSI_RIGHT DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER DSI_1
+#define DSI_ENCODER_SLAVE DSI_0
+
+struct msm_dsi {
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+
+ struct mipi_dsi_host *host;
+ struct msm_dsi_phy *phy;
+ struct drm_panel *panel;
+ unsigned long panel_flags;
+ bool phy_enabled;
+
+ /* the encoders we are hooked to (outside of dsi block) */
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
+
+ int id;
+};
+
+/* dsi manager */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
+struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+int msm_dsi_manager_phy_enable(int id,
+ const unsigned long bit_rate, const unsigned long esc_rate,
+ u32 *clk_pre, u32 *clk_post);
+void msm_dsi_manager_phy_disable(int id);
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+
+/* msm dsi */
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
+
+/* dsi host */
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
+ u32 iova, u32 len);
+int msm_dsi_host_enable(struct mipi_dsi_host *host);
+int msm_dsi_host_disable(struct mipi_dsi_host *host);
+int msm_dsi_host_power_on(struct mipi_dsi_host *host);
+int msm_dsi_host_power_off(struct mipi_dsi_host *host);
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+ struct drm_display_mode *mode);
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
+ unsigned long *panel_flags);
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
+void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+void msm_dsi_host_destroy(struct mipi_dsi_host *host);
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+ struct drm_device *dev);
+int msm_dsi_host_init(struct msm_dsi *msm_dsi);
+
+/* dsi phy */
+struct msm_dsi_phy;
+enum msm_dsi_phy_type {
+ MSM_DSI_PHY_UNKNOWN,
+ MSM_DSI_PHY_28NM,
+ MSM_DSI_PHY_MAX
+};
+struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id);
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+ u32 *clk_pre, u32 *clk_post);
+#endif /* __DSI_CONNECTOR_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index abf1bba520bf..1dcfae265e98 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
-
-Copyright (C) 2013 by the following authors:
+- /usr2/hali/local/envytools/envytools/rnndb/dsi/dsi.xml ( 18681 bytes, from 2015-03-04 23:08:31)
+- /usr2/hali/local/envytools/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-01-28 21:43:22)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -51,11 +42,11 @@ enum dsi_traffic_mode {
BURST_MODE = 2,
};
-enum dsi_dst_format {
- DST_FORMAT_RGB565 = 0,
- DST_FORMAT_RGB666 = 1,
- DST_FORMAT_RGB666_LOOSE = 2,
- DST_FORMAT_RGB888 = 3,
+enum dsi_vid_dst_format {
+ VID_DST_FORMAT_RGB565 = 0,
+ VID_DST_FORMAT_RGB666 = 1,
+ VID_DST_FORMAT_RGB666_LOOSE = 2,
+ VID_DST_FORMAT_RGB888 = 3,
};
enum dsi_rgb_swap {
@@ -69,20 +60,63 @@ enum dsi_rgb_swap {
enum dsi_cmd_trigger {
TRIGGER_NONE = 0,
+ TRIGGER_SEOF = 1,
TRIGGER_TE = 2,
TRIGGER_SW = 4,
TRIGGER_SW_SEOF = 5,
TRIGGER_SW_TE = 6,
};
+enum dsi_cmd_dst_format {
+ CMD_DST_FORMAT_RGB111 = 0,
+ CMD_DST_FORMAT_RGB332 = 3,
+ CMD_DST_FORMAT_RGB444 = 4,
+ CMD_DST_FORMAT_RGB565 = 6,
+ CMD_DST_FORMAT_RGB666 = 7,
+ CMD_DST_FORMAT_RGB888 = 8,
+};
+
+enum dsi_lane_swap {
+ LANE_SWAP_0123 = 0,
+ LANE_SWAP_3012 = 1,
+ LANE_SWAP_2301 = 2,
+ LANE_SWAP_1230 = 3,
+ LANE_SWAP_0321 = 4,
+ LANE_SWAP_1032 = 5,
+ LANE_SWAP_2103 = 6,
+ LANE_SWAP_3210 = 7,
+};
+
#define DSI_IRQ_CMD_DMA_DONE 0x00000001
#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
#define DSI_IRQ_CMD_MDP_DONE 0x00000100
#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
#define DSI_IRQ_VIDEO_DONE 0x00010000
#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
+#define DSI_IRQ_BTA_DONE 0x00100000
+#define DSI_IRQ_MASK_BTA_DONE 0x00200000
#define DSI_IRQ_ERROR 0x01000000
#define DSI_IRQ_MASK_ERROR 0x02000000
+#define REG_DSI_6G_HW_VERSION 0x00000000
+#define DSI_6G_HW_VERSION_MAJOR__MASK 0xf0000000
+#define DSI_6G_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK;
+}
+#define DSI_6G_HW_VERSION_MINOR__MASK 0x0fff0000
+#define DSI_6G_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK;
+}
+#define DSI_6G_HW_VERSION_STEP__MASK 0x0000ffff
+#define DSI_6G_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK;
+}
+
#define REG_DSI_CTRL 0x00000000
#define DSI_CTRL_ENABLE 0x00000001
#define DSI_CTRL_VID_MODE_EN 0x00000002
@@ -96,11 +130,15 @@ enum dsi_cmd_trigger {
#define DSI_CTRL_CRC_CHECK 0x01000000
#define REG_DSI_STATUS0 0x00000004
+#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY 0x00000001
#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
+#define DSI_STATUS0_CMD_MODE_MDP_BUSY 0x00000004
#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
#define DSI_STATUS0_DSI_BUSY 0x00000010
+#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000
#define REG_DSI_FIFO_STATUS 0x00000008
+#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080
#define REG_DSI_VID_CFG0 0x0000000c
#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
@@ -111,7 +149,7 @@ static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
}
#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
-static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
+static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val)
{
return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
}
@@ -129,21 +167,15 @@ static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
#define REG_DSI_VID_CFG1 0x0000001c
-#define DSI_VID_CFG1_R_SEL 0x00000010
-#define DSI_VID_CFG1_G_SEL 0x00000100
-#define DSI_VID_CFG1_B_SEL 0x00001000
-#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
-#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
+#define DSI_VID_CFG1_R_SEL 0x00000001
+#define DSI_VID_CFG1_G_SEL 0x00000010
+#define DSI_VID_CFG1_B_SEL 0x00000100
+#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00007000
+#define DSI_VID_CFG1_RGB_SWAP__SHIFT 12
static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
{
return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
}
-#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
-#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
-static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
-{
- return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
-}
#define REG_DSI_ACTIVE_H 0x00000020
#define DSI_ACTIVE_H_START__MASK 0x00000fff
@@ -201,32 +233,115 @@ static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
}
-#define REG_DSI_ACTIVE_VSYNC 0x00000034
-#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
-#define DSI_ACTIVE_VSYNC_START__SHIFT 0
-static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
+#define REG_DSI_ACTIVE_VSYNC_HPOS 0x00000030
+#define DSI_ACTIVE_VSYNC_HPOS_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val)
{
- return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
+ return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK;
}
-#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
-#define DSI_ACTIVE_VSYNC_END__SHIFT 16
-static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
+#define DSI_ACTIVE_VSYNC_HPOS_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val)
{
- return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
+ return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC_VPOS 0x00000034
+#define DSI_ACTIVE_VSYNC_VPOS_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_VPOS_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK;
}
#define REG_DSI_CMD_DMA_CTRL 0x00000038
+#define DSI_CMD_DMA_CTRL_BROADCAST_EN 0x80000000
#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
#define REG_DSI_CMD_CFG0 0x0000003c
+#define DSI_CMD_CFG0_DST_FORMAT__MASK 0x0000000f
+#define DSI_CMD_CFG0_DST_FORMAT__SHIFT 0
+static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val)
+{
+ return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_CMD_CFG0_R_SEL 0x00000010
+#define DSI_CMD_CFG0_G_SEL 0x00000100
+#define DSI_CMD_CFG0_B_SEL 0x00001000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK 0x00f00000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT 20
+static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK;
+}
+#define DSI_CMD_CFG0_RGB_SWAP__MASK 0x00070000
+#define DSI_CMD_CFG0_RGB_SWAP__SHIFT 16
+static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK;
+}
#define REG_DSI_CMD_CFG1 0x00000040
+#define DSI_CMD_CFG1_WR_MEM_START__MASK 0x000000ff
+#define DSI_CMD_CFG1_WR_MEM_START__SHIFT 0
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK;
+}
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK 0x0000ff00
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT 8
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK;
+}
+#define DSI_CMD_CFG1_INSERT_DCS_COMMAND 0x00010000
#define REG_DSI_DMA_BASE 0x00000044
#define REG_DSI_DMA_LEN 0x00000048
+#define REG_DSI_CMD_MDP_STREAM_CTRL 0x00000054
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK 0x0000003f
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT 8
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK 0xffff0000
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM_TOTAL 0x00000058
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK 0x00000fff
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK;
+}
+
#define REG_DSI_ACK_ERR_STATUS 0x00000064
static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
@@ -234,19 +349,25 @@ static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
#define REG_DSI_TRIG_CTRL 0x00000080
-#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
+#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x00000007
#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
{
return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
}
-#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
+#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x00000070
#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
{
return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
}
-#define DSI_TRIG_CTRL_STREAM 0x00000100
+#define DSI_TRIG_CTRL_STREAM__MASK 0x00000300
+#define DSI_TRIG_CTRL_STREAM__SHIFT 8
+static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
+{
+ return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK;
+}
+#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME 0x00001000
#define DSI_TRIG_CTRL_TE 0x80000000
#define REG_DSI_TRIG_DMA 0x0000008c
@@ -274,6 +395,12 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0
+static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
+{
+ return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK;
+}
#define REG_DSI_ERR_INT_MASK0 0x00000108
@@ -282,8 +409,36 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define REG_DSI_RESET 0x00000114
#define REG_DSI_CLK_CTRL 0x00000118
+#define DSI_CLK_CTRL_AHBS_HCLK_ON 0x00000001
+#define DSI_CLK_CTRL_AHBM_SCLK_ON 0x00000002
+#define DSI_CLK_CTRL_PCLK_ON 0x00000004
+#define DSI_CLK_CTRL_DSICLK_ON 0x00000008
+#define DSI_CLK_CTRL_BYTECLK_ON 0x00000010
+#define DSI_CLK_CTRL_ESCCLK_ON 0x00000020
+#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200
+
+#define REG_DSI_CLK_STATUS 0x0000011c
+#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000
#define REG_DSI_PHY_RESET 0x00000128
+#define DSI_PHY_RESET_RESET 0x00000001
+
+#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
+#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
+#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
+static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val)
+{
+ return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK;
+}
+#define DSI_RDBK_DATA_CTRL_CLR 0x00000001
+
+#define REG_DSI_VERSION 0x000001f0
+#define DSI_VERSION_MAJOR__MASK 0xff000000
+#define DSI_VERSION_MAJOR__SHIFT 24
+static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK;
+}
#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
@@ -501,5 +656,184 @@ static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x000003
#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
+static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114
+
+#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c
+#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_28nm_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_28nm_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_28nm_PHY_CTRL_4 0x00000180
+
+#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184
+
+#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
+
+#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
+
+#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
new file mode 100644
index 000000000000..956b22492c9a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -0,0 +1,1993 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <video/mipi_display.h>
+
+#include "dsi.h"
+#include "dsi.xml.h"
+
+#define MSM_DSI_VER_MAJOR_V2 0x02
+#define MSM_DSI_VER_MAJOR_6G 0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
+
+#define DSI_6G_REG_SHIFT 4
+
+#define DSI_REGULATOR_MAX 8
+struct dsi_reg_entry {
+ char name[32];
+ int min_voltage;
+ int max_voltage;
+ int enable_load;
+ int disable_load;
+};
+
+struct dsi_reg_config {
+ int num;
+ struct dsi_reg_entry regs[DSI_REGULATOR_MAX];
+};
+
+struct dsi_config {
+ u32 major;
+ u32 minor;
+ u32 io_offset;
+ enum msm_dsi_phy_type phy_type;
+ struct dsi_reg_config reg_cfg;
+};
+
+static const struct dsi_config dsi_cfgs[] = {
+ {MSM_DSI_VER_MAJOR_V2, 0, 0, MSM_DSI_PHY_UNKNOWN},
+ { /* 8974 v1 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_0,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8974 v2 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_1,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8974 v3 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8084 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_2,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8916 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 2850000, 2850000, 100000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+};
+
+static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
+{
+ u32 ver;
+ u32 ver_6g;
+
+ if (!major || !minor)
+ return -EINVAL;
+
+ /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
+ * makes all other registers 4-byte shifted down.
+ */
+ ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
+ if (ver_6g == 0) {
+ ver = msm_readl(base + REG_DSI_VERSION);
+ ver = FIELD(ver, DSI_VERSION_MAJOR);
+ if (ver <= MSM_DSI_VER_MAJOR_V2) {
+ /* old versions */
+ *major = ver;
+ *minor = 0;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
+ ver = FIELD(ver, DSI_VERSION_MAJOR);
+ if (ver == MSM_DSI_VER_MAJOR_6G) {
+ /* 6G version */
+ *major = ver;
+ *minor = ver_6g;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+}
+
+#define DSI_ERR_STATE_ACK 0x0000
+#define DSI_ERR_STATE_TIMEOUT 0x0001
+#define DSI_ERR_STATE_DLN0_PHY 0x0002
+#define DSI_ERR_STATE_FIFO 0x0004
+#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
+#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
+#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
+
+#define DSI_CLK_CTRL_ENABLE_CLKS \
+ (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
+ DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
+ DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
+ DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
+
+struct msm_dsi_host {
+ struct mipi_dsi_host base;
+
+ struct platform_device *pdev;
+ struct drm_device *dev;
+
+ int id;
+
+ void __iomem *ctrl_base;
+ struct regulator_bulk_data supplies[DSI_REGULATOR_MAX];
+ struct clk *mdp_core_clk;
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *mmss_misc_ahb_clk;
+ struct clk *byte_clk;
+ struct clk *esc_clk;
+ struct clk *pixel_clk;
+ u32 byte_clk_rate;
+
+ struct gpio_desc *disp_en_gpio;
+ struct gpio_desc *te_gpio;
+
+ const struct dsi_config *cfg;
+
+ struct completion dma_comp;
+ struct completion video_comp;
+ struct mutex dev_mutex;
+ struct mutex cmd_mutex;
+ struct mutex clk_mutex;
+ spinlock_t intr_lock; /* Protect interrupt ctrl register */
+
+ u32 err_work_state;
+ struct work_struct err_work;
+ struct workqueue_struct *workqueue;
+
+ struct drm_gem_object *tx_gem_obj;
+ u8 *rx_buf;
+
+ struct drm_display_mode *mode;
+
+ /* Panel info */
+ struct device_node *panel_node;
+ unsigned int channel;
+ unsigned int lanes;
+ enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
+
+ u32 dma_cmd_ctrl_restore;
+
+ bool registered;
+ bool power_on;
+ int irq;
+};
+
+static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB565: return 16;
+ case MIPI_DSI_FMT_RGB666_PACKED: return 18;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default: return 24;
+ }
+}
+
+static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
+{
+ return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+}
+static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
+{
+ msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
+
+static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
+{
+ const struct dsi_config *cfg;
+ struct regulator *gdsc_reg;
+ int i, ret;
+ u32 major = 0, minor = 0;
+
+ gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
+ if (IS_ERR_OR_NULL(gdsc_reg)) {
+ pr_err("%s: cannot get gdsc\n", __func__);
+ goto fail;
+ }
+ ret = regulator_enable(gdsc_reg);
+ if (ret) {
+ pr_err("%s: unable to enable gdsc\n", __func__);
+ regulator_put(gdsc_reg);
+ goto fail;
+ }
+ ret = clk_prepare_enable(msm_host->ahb_clk);
+ if (ret) {
+ pr_err("%s: unable to enable ahb_clk\n", __func__);
+ regulator_disable(gdsc_reg);
+ regulator_put(gdsc_reg);
+ goto fail;
+ }
+
+ ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
+
+ clk_disable_unprepare(msm_host->ahb_clk);
+ regulator_disable(gdsc_reg);
+ regulator_put(gdsc_reg);
+ if (ret) {
+ pr_err("%s: Invalid version\n", __func__);
+ goto fail;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
+ cfg = dsi_cfgs + i;
+ if ((cfg->major == major) && (cfg->minor == minor))
+ return cfg;
+ }
+ pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
+
+fail:
+ return NULL;
+}
+
+static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct msm_dsi_host, base);
+}
+
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
+{
+ struct regulator_bulk_data *s = msm_host->supplies;
+ const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+ int num = msm_host->cfg->reg_cfg.num;
+ int i;
+
+ DBG("");
+ for (i = num - 1; i >= 0; i--)
+ if (regs[i].disable_load >= 0)
+ regulator_set_load(s[i].consumer,
+ regs[i].disable_load);
+
+ regulator_bulk_disable(num, s);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
+{
+ struct regulator_bulk_data *s = msm_host->supplies;
+ const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+ int num = msm_host->cfg->reg_cfg.num;
+ int ret, i;
+
+ DBG("");
+ for (i = 0; i < num; i++) {
+ if (regs[i].enable_load >= 0) {
+ ret = regulator_set_load(s[i].consumer,
+ regs[i].enable_load);
+ if (ret < 0) {
+ pr_err("regulator %d set op mode failed, %d\n",
+ i, ret);
+ goto fail;
+ }
+ }
+ }
+
+ ret = regulator_bulk_enable(num, s);
+ if (ret < 0) {
+ pr_err("regulator enable failed, %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ regulator_set_load(s[i].consumer, regs[i].disable_load);
+ return ret;
+}
+
+static int dsi_regulator_init(struct msm_dsi_host *msm_host)
+{
+ struct regulator_bulk_data *s = msm_host->supplies;
+ const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+ int num = msm_host->cfg->reg_cfg.num;
+ int i, ret;
+
+ for (i = 0; i < num; i++)
+ s[i].supply = regs[i].name;
+
+ ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
+ if (ret < 0) {
+ pr_err("%s: failed to init regulator, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ for (i = 0; i < num; i++) {
+ if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
+ ret = regulator_set_voltage(s[i].consumer,
+ regs[i].min_voltage, regs[i].max_voltage);
+ if (ret < 0) {
+ pr_err("regulator %d set voltage failed, %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int dsi_clk_init(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ int ret = 0;
+
+ msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
+ if (IS_ERR(msm_host->mdp_core_clk)) {
+ ret = PTR_ERR(msm_host->mdp_core_clk);
+ pr_err("%s: Unable to get mdp core clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(msm_host->ahb_clk)) {
+ ret = PTR_ERR(msm_host->ahb_clk);
+ pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
+ if (IS_ERR(msm_host->axi_clk)) {
+ ret = PTR_ERR(msm_host->axi_clk);
+ pr_err("%s: Unable to get axi bus clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
+ if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
+ ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
+ pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
+ if (IS_ERR(msm_host->byte_clk)) {
+ ret = PTR_ERR(msm_host->byte_clk);
+ pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->byte_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
+ if (IS_ERR(msm_host->pixel_clk)) {
+ ret = PTR_ERR(msm_host->pixel_clk);
+ pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->pixel_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->esc_clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(msm_host->esc_clk)) {
+ ret = PTR_ERR(msm_host->esc_clk);
+ pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->esc_clk = NULL;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
+ DBG("id=%d", msm_host->id);
+
+ ret = clk_prepare_enable(msm_host->mdp_core_clk);
+ if (ret) {
+ pr_err("%s: failed to enable mdp_core_clock, %d\n",
+ __func__, ret);
+ goto core_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->ahb_clk);
+ if (ret) {
+ pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
+ goto ahb_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->axi_clk);
+ if (ret) {
+ pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
+ goto axi_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
+ if (ret) {
+ pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
+ __func__, ret);
+ goto misc_ahb_clk_err;
+ }
+
+ return 0;
+
+misc_ahb_clk_err:
+ clk_disable_unprepare(msm_host->axi_clk);
+axi_clk_err:
+ clk_disable_unprepare(msm_host->ahb_clk);
+ahb_clk_err:
+ clk_disable_unprepare(msm_host->mdp_core_clk);
+core_clk_err:
+ return ret;
+}
+
+static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
+{
+ DBG("");
+ clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
+ clk_disable_unprepare(msm_host->axi_clk);
+ clk_disable_unprepare(msm_host->ahb_clk);
+ clk_disable_unprepare(msm_host->mdp_core_clk);
+}
+
+static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
+ DBG("Set clk rates: pclk=%d, byteclk=%d",
+ msm_host->mode->clock, msm_host->byte_clk_rate);
+
+ ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ if (ret) {
+ pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->esc_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->byte_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ return 0;
+
+pixel_clk_err:
+ clk_disable_unprepare(msm_host->byte_clk);
+byte_clk_err:
+ clk_disable_unprepare(msm_host->esc_clk);
+error:
+ return ret;
+}
+
+static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+{
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
+
+static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
+{
+ int ret = 0;
+
+ mutex_lock(&msm_host->clk_mutex);
+ if (enable) {
+ ret = dsi_bus_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: Can not enable bus clk, %d\n",
+ __func__, ret);
+ goto unlock_ret;
+ }
+ ret = dsi_link_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: Can not enable link clk, %d\n",
+ __func__, ret);
+ dsi_bus_clk_disable(msm_host);
+ goto unlock_ret;
+ }
+ } else {
+ dsi_link_clk_disable(msm_host);
+ dsi_bus_clk_disable(msm_host);
+ }
+
+unlock_ret:
+ mutex_unlock(&msm_host->clk_mutex);
+ return ret;
+}
+
+static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+{
+ struct drm_display_mode *mode = msm_host->mode;
+ u8 lanes = msm_host->lanes;
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ u32 pclk_rate;
+
+ if (!mode) {
+ pr_err("%s: mode not set\n", __func__);
+ return -EINVAL;
+ }
+
+ pclk_rate = mode->clock * 1000;
+ if (lanes > 0) {
+ msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
+ } else {
+ pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+ msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+ }
+
+ DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+
+ return 0;
+}
+
+static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
+{
+ DBG("");
+ dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
+ /* Make sure fully reset */
+ wmb();
+ udelay(1000);
+ dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
+ udelay(100);
+}
+
+static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
+{
+ u32 intr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_host->intr_lock, flags);
+ intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+
+ if (enable)
+ intr |= mask;
+ else
+ intr &= ~mask;
+
+ DBG("intr=%x enable=%d", intr, enable);
+
+ dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
+ spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+}
+
+static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
+{
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ return BURST_MODE;
+ else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ return NON_BURST_SYNCH_PULSE;
+
+ return NON_BURST_SYNCH_EVENT;
+}
+
+static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
+ const enum mipi_dsi_pixel_format mipi_fmt)
+{
+ switch (mipi_fmt) {
+ case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
+ case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
+ default: return VID_DST_FORMAT_RGB888;
+ }
+}
+
+static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
+ const enum mipi_dsi_pixel_format mipi_fmt)
+{
+ switch (mipi_fmt) {
+ case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
+ default: return CMD_DST_FORMAT_RGB888;
+ }
+}
+
+static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
+ u32 clk_pre, u32 clk_post)
+{
+ u32 flags = msm_host->mode_flags;
+ enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+ u32 data = 0;
+
+ if (!enable) {
+ dsi_write(msm_host, REG_DSI_CTRL, 0);
+ return;
+ }
+
+ if (flags & MIPI_DSI_MODE_VIDEO) {
+ if (flags & MIPI_DSI_MODE_VIDEO_HSE)
+ data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
+ if (flags & MIPI_DSI_MODE_VIDEO_HFP)
+ data |= DSI_VID_CFG0_HFP_POWER_STOP;
+ if (flags & MIPI_DSI_MODE_VIDEO_HBP)
+ data |= DSI_VID_CFG0_HBP_POWER_STOP;
+ if (flags & MIPI_DSI_MODE_VIDEO_HSA)
+ data |= DSI_VID_CFG0_HSA_POWER_STOP;
+ /* Always set low power stop mode for BLLP
+ * to let command engine send packets
+ */
+ data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
+ DSI_VID_CFG0_BLLP_POWER_STOP;
+ data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
+ data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
+ data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
+ dsi_write(msm_host, REG_DSI_VID_CFG0, data);
+
+ /* Do not swap RGB colors */
+ data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
+ dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
+ } else {
+ /* Do not swap RGB colors */
+ data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
+ data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
+ dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
+
+ data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
+ DSI_CMD_CFG1_WR_MEM_CONTINUE(
+ MIPI_DCS_WRITE_MEMORY_CONTINUE);
+ /* Always insert DCS command */
+ data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
+ dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
+ }
+
+ dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
+ DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
+ DSI_CMD_DMA_CTRL_LOW_POWER);
+
+ data = 0;
+ /* Always assume dedicated TE pin */
+ data |= DSI_TRIG_CTRL_TE;
+ data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
+ data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
+ data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
+ if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
+ (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+ data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
+ dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
+
+ data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
+ DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
+ dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
+
+ data = 0;
+ if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
+ data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
+ dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
+
+ /* allow only ack-err-status to generate interrupt */
+ dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+
+ data = DSI_CTRL_CLK_EN;
+
+ DBG("lane number=%d", msm_host->lanes);
+ if (msm_host->lanes == 2) {
+ data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
+ /* swap lanes for 2-lane panel for better performance */
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
+ } else {
+ /* Take 4 lanes as default */
+ data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
+ DSI_CTRL_LANE3;
+ /* Do not swap lanes for 4-lane panel */
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
+ }
+ data |= DSI_CTRL_ENABLE;
+
+ dsi_write(msm_host, REG_DSI_CTRL, data);
+}
+
+static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+{
+ struct drm_display_mode *mode = msm_host->mode;
+ u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
+ u32 h_total = mode->htotal;
+ u32 v_total = mode->vtotal;
+ u32 hs_end = mode->hsync_end - mode->hsync_start;
+ u32 vs_end = mode->vsync_end - mode->vsync_start;
+ u32 ha_start = h_total - mode->hsync_start;
+ u32 ha_end = ha_start + mode->hdisplay;
+ u32 va_start = v_total - mode->vsync_start;
+ u32 va_end = va_start + mode->vdisplay;
+ u32 wc;
+
+ DBG("");
+
+ if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ dsi_write(msm_host, REG_DSI_ACTIVE_H,
+ DSI_ACTIVE_H_START(ha_start) |
+ DSI_ACTIVE_H_END(ha_end));
+ dsi_write(msm_host, REG_DSI_ACTIVE_V,
+ DSI_ACTIVE_V_START(va_start) |
+ DSI_ACTIVE_V_END(va_end));
+ dsi_write(msm_host, REG_DSI_TOTAL,
+ DSI_TOTAL_H_TOTAL(h_total - 1) |
+ DSI_TOTAL_V_TOTAL(v_total - 1));
+
+ dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
+ DSI_ACTIVE_HSYNC_START(hs_start) |
+ DSI_ACTIVE_HSYNC_END(hs_end));
+ dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
+ dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
+ DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
+ DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
+ } else { /* command mode */
+ /* image data and 1 byte write_memory_start cmd */
+ wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
+ DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
+ DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
+ msm_host->channel) |
+ DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
+ MIPI_DSI_DCS_LONG_WRITE));
+
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
+ DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+ DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
+ }
+}
+
+static void dsi_sw_reset(struct msm_dsi_host *msm_host)
+{
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+ wmb(); /* clocks need to be enabled before reset */
+
+ dsi_write(msm_host, REG_DSI_RESET, 1);
+ wmb(); /* make sure reset happen */
+ dsi_write(msm_host, REG_DSI_RESET, 0);
+}
+
+static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
+ bool video_mode, bool enable)
+{
+ u32 dsi_ctrl;
+
+ dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
+
+ if (!enable) {
+ dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
+ DSI_CTRL_CMD_MODE_EN);
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
+ DSI_IRQ_MASK_VIDEO_DONE, 0);
+ } else {
+ if (video_mode) {
+ dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
+ } else { /* command mode */
+ dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
+ }
+ dsi_ctrl |= DSI_CTRL_ENABLE;
+ }
+
+ dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
+}
+
+static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
+{
+ u32 data;
+
+ data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
+
+ if (mode == 0)
+ data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
+ else
+ data |= DSI_CMD_DMA_CTRL_LOW_POWER;
+
+ dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
+}
+
+static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
+{
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
+
+ reinit_completion(&msm_host->video_comp);
+
+ wait_for_completion_timeout(&msm_host->video_comp,
+ msecs_to_jiffies(70));
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
+}
+
+static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
+{
+ if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
+ return;
+
+ if (msm_host->power_on) {
+ dsi_wait4video_done(msm_host);
+ /* delay 4 ms to skip BLLP */
+ usleep_range(2000, 4000);
+ }
+}
+
+/* dsi_cmd */
+static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+ int ret;
+ u32 iova;
+
+ mutex_lock(&dev->struct_mutex);
+ msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
+ if (IS_ERR(msm_host->tx_gem_obj)) {
+ ret = PTR_ERR(msm_host->tx_gem_obj);
+ pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
+ msm_host->tx_gem_obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
+ if (ret) {
+ pr_err("%s: failed to get iova, %d\n", __func__, ret);
+ return ret;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ if (iova & 0x07) {
+ pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+{
+ struct drm_device *dev = msm_host->dev;
+
+ if (msm_host->tx_gem_obj) {
+ msm_gem_put_iova(msm_host->tx_gem_obj, 0);
+ mutex_lock(&dev->struct_mutex);
+ msm_gem_free_object(msm_host->tx_gem_obj);
+ msm_host->tx_gem_obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
+ const struct mipi_dsi_msg *msg)
+{
+ struct mipi_dsi_packet packet;
+ int len;
+ int ret;
+ u8 *data;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ pr_err("%s: create packet failed, %d\n", __func__, ret);
+ return ret;
+ }
+ len = (packet.size + 3) & (~0x3);
+
+ if (len > tx_gem->size) {
+ pr_err("%s: packet size is too big\n", __func__);
+ return -EINVAL;
+ }
+
+ data = msm_gem_vaddr(tx_gem);
+
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* MSM specific command format in memory */
+ data[0] = packet.header[1];
+ data[1] = packet.header[2];
+ data[2] = packet.header[0];
+ data[3] = BIT(7); /* Last packet */
+ if (mipi_dsi_packet_format_is_long(msg->type))
+ data[3] |= BIT(6);
+ if (msg->rx_buf && msg->rx_len)
+ data[3] |= BIT(5);
+
+ /* Long packet */
+ if (packet.payload && packet.payload_length)
+ memcpy(data + 4, packet.payload, packet.payload_length);
+
+ /* Append 0xff to the end */
+ if (packet.size < len)
+ memset(data + packet.size, 0xff, len - packet.size);
+
+ return len;
+}
+
+/*
+ * dsi_short_read1_resp: 1 parameter
+ */
+static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ u8 *data = msg->rx_buf;
+ if (data && (msg->rx_len >= 1)) {
+ *data = buf[1]; /* strip out dcs type */
+ return 1;
+ } else {
+ pr_err("%s: read data does not match with rx_buf len %d\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
+ }
+}
+
+/*
+ * dsi_short_read2_resp: 2 parameter
+ */
+static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ u8 *data = msg->rx_buf;
+ if (data && (msg->rx_len >= 2)) {
+ data[0] = buf[1]; /* strip out dcs type */
+ data[1] = buf[2];
+ return 2;
+ } else {
+ pr_err("%s: read data does not match with rx_buf len %d\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
+ }
+}
+
+static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ /* strip out 4 byte dcs header */
+ if (msg->rx_buf && msg->rx_len)
+ memcpy(msg->rx_buf, buf + 4, msg->rx_len);
+
+ return msg->rx_len;
+}
+
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+ int ret;
+ u32 iova;
+ bool triggered;
+
+ ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
+ }
+
+ reinit_completion(&msm_host->dma_comp);
+
+ dsi_wait4video_eng_busy(msm_host);
+
+ triggered = msm_dsi_manager_cmd_xfer_trigger(
+ msm_host->id, iova, len);
+ if (triggered) {
+ ret = wait_for_completion_timeout(&msm_host->dma_comp,
+ msecs_to_jiffies(200));
+ DBG("ret=%d", ret);
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else
+ ret = len;
+ } else
+ ret = len;
+
+ return ret;
+}
+
+static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
+ u8 *buf, int rx_byte, int pkt_size)
+{
+ u32 *lp, *temp, data;
+ int i, j = 0, cnt;
+ bool ack_error = false;
+ u32 read_cnt;
+ u8 reg[16];
+ int repeated_bytes = 0;
+ int buf_offset = buf - msm_host->rx_buf;
+
+ lp = (u32 *)buf;
+ temp = (u32 *)reg;
+ cnt = (rx_byte + 3) >> 2;
+ if (cnt > 4)
+ cnt = 4; /* 4 x 32 bits registers only */
+
+ /* Calculate real read data count */
+ read_cnt = dsi_read(msm_host, 0x1d4) >> 16;
+
+ ack_error = (rx_byte == 4) ?
+ (read_cnt == 8) : /* short pkt + 4-byte error pkt */
+ (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
+
+ if (ack_error)
+ read_cnt -= 4; /* Remove 4 byte error pkt */
+
+ /*
+ * In case of multiple reads from the panel, after the first read, there
+ * is possibility that there are some bytes in the payload repeating in
+ * the RDBK_DATA registers. Since we read all the parameters from the
+ * panel right from the first byte for every pass. We need to skip the
+ * repeating bytes and then append the new parameters to the rx buffer.
+ */
+ if (read_cnt > 16) {
+ int bytes_shifted;
+ /* Any data more than 16 bytes will be shifted out.
+ * The temp read buffer should already contain these bytes.
+ * The remaining bytes in read buffer are the repeated bytes.
+ */
+ bytes_shifted = read_cnt - 16;
+ repeated_bytes = buf_offset - bytes_shifted;
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
+ *temp++ = ntohl(data); /* to host byte order */
+ DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
+ }
+
+ for (i = repeated_bytes; i < 16; i++)
+ buf[j++] = reg[i];
+
+ return j;
+}
+
+static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
+ const struct mipi_dsi_msg *msg)
+{
+ int len, ret;
+ int bllp_len = msm_host->mode->hdisplay *
+ dsi_get_bpp(msm_host->format) / 8;
+
+ len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
+ if (!len) {
+ pr_err("%s: failed to add cmd type = 0x%x\n",
+ __func__, msg->type);
+ return -EINVAL;
+ }
+
+ /* for video mode, do not send cmds more than
+ * one pixel line, since it only transmit it
+ * during BLLP.
+ */
+ /* TODO: if the command is sent in LP mode, the bit rate is only
+ * half of esc clk rate. In this case, if the video is already
+ * actively streaming, we need to check more carefully if the
+ * command can be fit into one BLLP.
+ */
+ if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
+ pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ ret = dsi_cmd_dma_tx(msm_host, len);
+ if (ret < len) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
+ return -ECOMM;
+ }
+
+ return len;
+}
+
+static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
+{
+ u32 data0, data1;
+
+ data0 = dsi_read(msm_host, REG_DSI_CTRL);
+ data1 = data0;
+ data1 &= ~DSI_CTRL_ENABLE;
+ dsi_write(msm_host, REG_DSI_CTRL, data1);
+ /*
+ * dsi controller need to be disabled before
+ * clocks turned on
+ */
+ wmb();
+
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+ wmb(); /* make sure clocks enabled */
+
+ /* dsi controller can only be reset while clocks are running */
+ dsi_write(msm_host, REG_DSI_RESET, 1);
+ wmb(); /* make sure reset happen */
+ dsi_write(msm_host, REG_DSI_RESET, 0);
+ wmb(); /* controller out of reset */
+ dsi_write(msm_host, REG_DSI_CTRL, data0);
+ wmb(); /* make sure dsi controller enabled again */
+}
+
+static void dsi_err_worker(struct work_struct *work)
+{
+ struct msm_dsi_host *msm_host =
+ container_of(work, struct msm_dsi_host, err_work);
+ u32 status = msm_host->err_work_state;
+
+ pr_err("%s: status=%x\n", __func__, status);
+ if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
+ dsi_sw_reset_restore(msm_host);
+
+ /* It is safe to clear here because error irq is disabled. */
+ msm_host->err_work_state = 0;
+
+ /* enable dsi error interrupt */
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+}
+
+static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
+ /* Writing of an extra 0 needed to clear error bits */
+ dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
+ msm_host->err_work_state |= DSI_ERR_STATE_ACK;
+ }
+}
+
+static void dsi_timeout_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
+ }
+}
+
+static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
+ }
+}
+
+static void dsi_fifo_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
+
+ /* fifo underflow, overflow */
+ if (status) {
+ dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
+ if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
+ msm_host->err_work_state |=
+ DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
+ }
+}
+
+static void dsi_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_STATUS0);
+
+ if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
+ dsi_write(msm_host, REG_DSI_STATUS0, status);
+ msm_host->err_work_state |=
+ DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
+ }
+}
+
+static void dsi_clk_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
+
+ if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
+ dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
+ }
+}
+
+static void dsi_error(struct msm_dsi_host *msm_host)
+{
+ /* disable dsi error interrupt */
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
+
+ dsi_clk_status(msm_host);
+ dsi_fifo_status(msm_host);
+ dsi_ack_err_status(msm_host);
+ dsi_timeout_status(msm_host);
+ dsi_status(msm_host);
+ dsi_dln0_phy_err(msm_host);
+
+ queue_work(msm_host->workqueue, &msm_host->err_work);
+}
+
+static irqreturn_t dsi_host_irq(int irq, void *ptr)
+{
+ struct msm_dsi_host *msm_host = ptr;
+ u32 isr;
+ unsigned long flags;
+
+ if (!msm_host->ctrl_base)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&msm_host->intr_lock, flags);
+ isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+ dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
+ spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+
+ DBG("isr=0x%x, id=%d", isr, msm_host->id);
+
+ if (isr & DSI_IRQ_ERROR)
+ dsi_error(msm_host);
+
+ if (isr & DSI_IRQ_VIDEO_DONE)
+ complete(&msm_host->video_comp);
+
+ if (isr & DSI_IRQ_CMD_DMA_DONE)
+ complete(&msm_host->dma_comp);
+
+ return IRQ_HANDLED;
+}
+
+static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
+ struct device *panel_device)
+{
+ int ret;
+
+ msm_host->disp_en_gpio = devm_gpiod_get(panel_device,
+ "disp-enable");
+ if (IS_ERR(msm_host->disp_en_gpio)) {
+ DBG("cannot get disp-enable-gpios %ld",
+ PTR_ERR(msm_host->disp_en_gpio));
+ msm_host->disp_en_gpio = NULL;
+ }
+ if (msm_host->disp_en_gpio) {
+ ret = gpiod_direction_output(msm_host->disp_en_gpio, 0);
+ if (ret) {
+ pr_err("cannot set dir to disp-en-gpios %d\n", ret);
+ return ret;
+ }
+ }
+
+ msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te");
+ if (IS_ERR(msm_host->te_gpio)) {
+ DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
+ msm_host->te_gpio = NULL;
+ }
+
+ if (msm_host->te_gpio) {
+ ret = gpiod_direction_input(msm_host->te_gpio);
+ if (ret) {
+ pr_err("%s: cannot set dir to disp-te-gpios, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ msm_host->channel = dsi->channel;
+ msm_host->lanes = dsi->lanes;
+ msm_host->format = dsi->format;
+ msm_host->mode_flags = dsi->mode_flags;
+
+ msm_host->panel_node = dsi->dev.of_node;
+
+ /* Some gpios defined in panel DT need to be controlled by host */
+ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
+ if (ret)
+ return ret;
+
+ DBG("id=%d", msm_host->id);
+ if (msm_host->dev)
+ drm_helper_hpd_irq_event(msm_host->dev);
+
+ return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ msm_host->panel_node = NULL;
+
+ DBG("id=%d", msm_host->id);
+ if (msm_host->dev)
+ drm_helper_hpd_irq_event(msm_host->dev);
+
+ return 0;
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ if (!msg || !msm_host->power_on)
+ return -EINVAL;
+
+ mutex_lock(&msm_host->cmd_mutex);
+ ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
+ mutex_unlock(&msm_host->cmd_mutex);
+
+ return ret;
+}
+
+static struct mipi_dsi_host_ops dsi_host_ops = {
+ .attach = dsi_host_attach,
+ .detach = dsi_host_detach,
+ .transfer = dsi_host_transfer,
+};
+
+int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_host *msm_host = NULL;
+ struct platform_device *pdev = msm_dsi->pdev;
+ int ret;
+
+ msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
+ if (!msm_host) {
+ pr_err("%s: FAILED: cannot alloc dsi host\n",
+ __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,dsi-host-index", &msm_host->id);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: host index not specified, ret=%d\n",
+ __func__, ret);
+ goto fail;
+ }
+ msm_host->pdev = pdev;
+
+ ret = dsi_clk_init(msm_host);
+ if (ret) {
+ pr_err("%s: unable to initialize dsi clks\n", __func__);
+ goto fail;
+ }
+
+ msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
+ if (IS_ERR(msm_host->ctrl_base)) {
+ pr_err("%s: unable to map Dsi ctrl base\n", __func__);
+ ret = PTR_ERR(msm_host->ctrl_base);
+ goto fail;
+ }
+
+ msm_host->cfg = dsi_get_config(msm_host);
+ if (!msm_host->cfg) {
+ ret = -EINVAL;
+ pr_err("%s: get config failed\n", __func__);
+ goto fail;
+ }
+
+ ret = dsi_regulator_init(msm_host);
+ if (ret) {
+ pr_err("%s: regulator init failed\n", __func__);
+ goto fail;
+ }
+
+ msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
+ if (!msm_host->rx_buf) {
+ pr_err("%s: alloc rx temp buf failed\n", __func__);
+ goto fail;
+ }
+
+ init_completion(&msm_host->dma_comp);
+ init_completion(&msm_host->video_comp);
+ mutex_init(&msm_host->dev_mutex);
+ mutex_init(&msm_host->cmd_mutex);
+ mutex_init(&msm_host->clk_mutex);
+ spin_lock_init(&msm_host->intr_lock);
+
+ /* setup workqueue */
+ msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
+ INIT_WORK(&msm_host->err_work, dsi_err_worker);
+
+ msm_dsi->phy = msm_dsi_phy_init(pdev, msm_host->cfg->phy_type,
+ msm_host->id);
+ if (!msm_dsi->phy) {
+ ret = -EINVAL;
+ pr_err("%s: phy init failed\n", __func__);
+ goto fail;
+ }
+ msm_dsi->host = &msm_host->base;
+ msm_dsi->id = msm_host->id;
+
+ DBG("Dsi Host %d initialized", msm_host->id);
+ return 0;
+
+fail:
+ return ret;
+}
+
+void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ DBG("");
+ dsi_tx_buf_free(msm_host);
+ if (msm_host->workqueue) {
+ flush_workqueue(msm_host->workqueue);
+ destroy_workqueue(msm_host->workqueue);
+ msm_host->workqueue = NULL;
+ }
+
+ mutex_destroy(&msm_host->clk_mutex);
+ mutex_destroy(&msm_host->cmd_mutex);
+ mutex_destroy(&msm_host->dev_mutex);
+}
+
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+ struct drm_device *dev)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct platform_device *pdev = msm_host->pdev;
+ int ret;
+
+ msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (msm_host->irq < 0) {
+ ret = msm_host->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, msm_host->irq,
+ dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "dsi_isr", msm_host);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ msm_host->irq, ret);
+ return ret;
+ }
+
+ msm_host->dev = dev;
+ ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+ if (ret) {
+ pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct device_node *node;
+ int ret;
+
+ /* Register mipi dsi host */
+ if (!msm_host->registered) {
+ host->dev = &msm_host->pdev->dev;
+ host->ops = &dsi_host_ops;
+ ret = mipi_dsi_host_register(host);
+ if (ret)
+ return ret;
+
+ msm_host->registered = true;
+
+ /* If the panel driver has not been probed after host register,
+ * we should defer the host's probe.
+ * It makes sure panel is connected when fbcon detects
+ * connector status and gets the proper display mode to
+ * create framebuffer.
+ */
+ if (check_defer) {
+ node = of_get_child_by_name(msm_host->pdev->dev.of_node,
+ "panel");
+ if (node) {
+ if (!of_drm_find_panel(node))
+ return -EPROBE_DEFER;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void msm_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (msm_host->registered) {
+ mipi_dsi_host_unregister(host);
+ host->dev = NULL;
+ host->ops = NULL;
+ msm_host->registered = false;
+ }
+}
+
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ /* TODO: make sure dsi_cmd_mdp is idle.
+ * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
+ * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
+ * How to handle the old versions? Wait for mdp cmd done?
+ */
+
+ /*
+ * mdss interrupt is generated in mdp core clock domain
+ * mdp clock need to be enabled to receive dsi interrupt
+ */
+ dsi_clk_ctrl(msm_host, 1);
+
+ /* TODO: vote for bus bandwidth */
+
+ if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+ dsi_set_tx_power_mode(0, msm_host);
+
+ msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
+ dsi_write(msm_host, REG_DSI_CTRL,
+ msm_host->dma_cmd_ctrl_restore |
+ DSI_CTRL_CMD_MODE_EN |
+ DSI_CTRL_ENABLE);
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
+
+ return 0;
+}
+
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
+ dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
+
+ if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+ dsi_set_tx_power_mode(1, msm_host);
+
+ /* TODO: unvote for bus bandwidth */
+
+ dsi_clk_ctrl(msm_host, 0);
+}
+
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ return dsi_cmds2buf_tx(msm_host, msg);
+}
+
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int data_byte, rx_byte, dlen, end;
+ int short_response, diff, pkt_size, ret = 0;
+ char cmd;
+ int rlen = msg->rx_len;
+ u8 *buf;
+
+ if (rlen <= 2) {
+ short_response = 1;
+ pkt_size = rlen;
+ rx_byte = 4;
+ } else {
+ short_response = 0;
+ data_byte = 10; /* first read */
+ if (rlen < data_byte)
+ pkt_size = rlen;
+ else
+ pkt_size = data_byte;
+ rx_byte = data_byte + 6; /* 4 header + 2 crc */
+ }
+
+ buf = msm_host->rx_buf;
+ end = 0;
+ while (!end) {
+ u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
+ struct mipi_dsi_msg max_pkt_size_msg = {
+ .channel = msg->channel,
+ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+ .tx_len = 2,
+ .tx_buf = tx,
+ };
+
+ DBG("rlen=%d pkt_size=%d rx_byte=%d",
+ rlen, pkt_size, rx_byte);
+
+ ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
+ if (ret < 2) {
+ pr_err("%s: Set max pkt size failed, %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
+ (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+ /* Clear the RDBK_DATA registers */
+ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
+ DSI_RDBK_DATA_CTRL_CLR);
+ wmb(); /* make sure the RDBK registers are cleared */
+ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
+ wmb(); /* release cleared status before transfer */
+ }
+
+ ret = dsi_cmds2buf_tx(msm_host, msg);
+ if (ret < msg->tx_len) {
+ pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * once cmd_dma_done interrupt received,
+ * return data from client is ready and stored
+ * at RDBK_DATA register already
+ * since rx fifo is 16 bytes, dcs header is kept at first loop,
+ * after that dcs header lost during shift into registers
+ */
+ dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
+
+ if (dlen <= 0)
+ return 0;
+
+ if (short_response)
+ break;
+
+ if (rlen <= data_byte) {
+ diff = data_byte - rlen;
+ end = 1;
+ } else {
+ diff = 0;
+ rlen -= data_byte;
+ }
+
+ if (!end) {
+ dlen -= 2; /* 2 crc */
+ dlen -= diff;
+ buf += dlen; /* next start position */
+ data_byte = 14; /* NOT first read */
+ if (rlen < data_byte)
+ pkt_size += rlen;
+ else
+ pkt_size += data_byte;
+ DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
+ }
+ }
+
+ /*
+ * For single Long read, if the requested rlen < 10,
+ * we need to shift the start position of rx
+ * data buffer to skip the bytes which are not
+ * updated.
+ */
+ if (pkt_size < 10 && !short_response)
+ buf = msm_host->rx_buf + (10 - rlen);
+ else
+ buf = msm_host->rx_buf;
+
+ cmd = buf[0];
+ switch (cmd) {
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
+ ret = 0;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ ret = dsi_short_read1_resp(buf, msg);
+ break;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ ret = dsi_short_read2_resp(buf, msg);
+ break;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ ret = dsi_long_read_resp(buf, msg);
+ break;
+ default:
+ pr_warn("%s:Invalid response cmd\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+ dsi_write(msm_host, REG_DSI_DMA_LEN, len);
+ dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
+
+ /* Make sure trigger happens */
+ wmb();
+}
+
+int msm_dsi_host_enable(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_op_mode_config(msm_host,
+ !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
+
+ /* TODO: clock should be turned off for command mode,
+ * and only turned on before MDP START.
+ * This part of code should be enabled once mdp driver support it.
+ */
+ /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
+ dsi_clk_ctrl(msm_host, 0); */
+
+ return 0;
+}
+
+int msm_dsi_host_disable(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_op_mode_config(msm_host,
+ !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
+
+ /* Since we have disabled INTF, the video engine won't stop so that
+ * the cmd engine will be blocked.
+ * Reset to disable video engine so that we can send off cmd.
+ */
+ dsi_sw_reset(msm_host);
+
+ return 0;
+}
+
+int msm_dsi_host_power_on(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ u32 clk_pre = 0, clk_post = 0;
+ int ret = 0;
+
+ mutex_lock(&msm_host->dev_mutex);
+ if (msm_host->power_on) {
+ DBG("dsi host already on");
+ goto unlock_ret;
+ }
+
+ ret = dsi_calc_clk_rate(msm_host);
+ if (ret) {
+ pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+ goto unlock_ret;
+ }
+
+ ret = dsi_host_regulator_enable(msm_host);
+ if (ret) {
+ pr_err("%s:Failed to enable vregs.ret=%d\n",
+ __func__, ret);
+ goto unlock_ret;
+ }
+
+ ret = dsi_bus_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ dsi_phy_sw_reset(msm_host);
+ ret = msm_dsi_manager_phy_enable(msm_host->id,
+ msm_host->byte_clk_rate * 8,
+ clk_get_rate(msm_host->esc_clk),
+ &clk_pre, &clk_post);
+ dsi_bus_clk_disable(msm_host);
+ if (ret) {
+ pr_err("%s: failed to enable phy, %d\n", __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ ret = dsi_clk_ctrl(msm_host, 1);
+ if (ret) {
+ pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ dsi_timing_setup(msm_host);
+ dsi_sw_reset(msm_host);
+ dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
+
+ if (msm_host->disp_en_gpio)
+ gpiod_set_value(msm_host->disp_en_gpio, 1);
+
+ msm_host->power_on = true;
+ mutex_unlock(&msm_host->dev_mutex);
+
+ return 0;
+
+fail_disable_reg:
+ dsi_host_regulator_disable(msm_host);
+unlock_ret:
+ mutex_unlock(&msm_host->dev_mutex);
+ return ret;
+}
+
+int msm_dsi_host_power_off(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ mutex_lock(&msm_host->dev_mutex);
+ if (!msm_host->power_on) {
+ DBG("dsi host already off");
+ goto unlock_ret;
+ }
+
+ dsi_ctrl_config(msm_host, false, 0, 0);
+
+ if (msm_host->disp_en_gpio)
+ gpiod_set_value(msm_host->disp_en_gpio, 0);
+
+ msm_dsi_manager_phy_disable(msm_host->id);
+
+ dsi_clk_ctrl(msm_host, 0);
+
+ dsi_host_regulator_disable(msm_host);
+
+ DBG("-");
+
+ msm_host->power_on = false;
+
+unlock_ret:
+ mutex_unlock(&msm_host->dev_mutex);
+ return 0;
+}
+
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+ struct drm_display_mode *mode)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (msm_host->mode) {
+ drm_mode_destroy(msm_host->dev, msm_host->mode);
+ msm_host->mode = NULL;
+ }
+
+ msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
+ if (IS_ERR(msm_host->mode)) {
+ pr_err("%s: cannot duplicate mode\n", __func__);
+ return PTR_ERR(msm_host->mode);
+ }
+
+ return 0;
+}
+
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
+ unsigned long *panel_flags)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct drm_panel *panel;
+
+ panel = of_drm_find_panel(msm_host->panel_node);
+ if (panel_flags)
+ *panel_flags = msm_host->mode_flags;
+
+ return panel;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
new file mode 100644
index 000000000000..ee3ebcaa33f5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_kms.h"
+#include "dsi.h"
+
+struct msm_dsi_manager {
+ struct msm_dsi *dsi[DSI_MAX];
+
+ bool is_dual_panel;
+ bool is_sync_needed;
+ int master_panel_id;
+};
+
+static struct msm_dsi_manager msm_dsim_glb;
+
+#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel)
+#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
+#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id)
+
+static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
+{
+ return msm_dsim_glb.dsi[id];
+}
+
+static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
+{
+ return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
+}
+
+static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+ /* We assume 2 dsi nodes have the same information of dual-panel and
+ * sync-mode, and only one node specifies master in case of dual mode.
+ */
+ if (!msm_dsim->is_dual_panel)
+ msm_dsim->is_dual_panel = of_property_read_bool(
+ np, "qcom,dual-panel-mode");
+
+ if (msm_dsim->is_dual_panel) {
+ if (of_property_read_bool(np, "qcom,master-panel"))
+ msm_dsim->master_panel_id = id;
+ if (!msm_dsim->is_sync_needed)
+ msm_dsim->is_sync_needed = of_property_read_bool(
+ np, "qcom,sync-dual-panel");
+ }
+
+ return 0;
+}
+
+struct dsi_connector {
+ struct drm_connector base;
+ int id;
+};
+
+struct dsi_bridge {
+ struct drm_bridge base;
+ int id;
+};
+
+#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
+#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
+
+static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
+{
+ struct dsi_connector *dsi_connector = to_dsi_connector(connector);
+ return dsi_connector->id;
+}
+
+static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
+{
+ struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
+ return dsi_bridge->id;
+}
+
+static enum drm_connector_status dsi_mgr_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ DBG("id=%d", id);
+ if (!msm_dsi->panel) {
+ msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
+ &msm_dsi->panel_flags);
+
+ /* There is only 1 panel in the global panel list
+ * for dual panel mode. Therefore slave dsi should get
+ * the drm_panel instance from master dsi, and
+ * keep using the panel flags got from the current DSI link.
+ */
+ if (!msm_dsi->panel && IS_DUAL_PANEL() &&
+ !IS_MASTER_PANEL(id) && other_dsi)
+ msm_dsi->panel = msm_dsi_host_get_panel(
+ other_dsi->host, NULL);
+
+ if (msm_dsi->panel && IS_DUAL_PANEL())
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.tile_property, 0);
+
+ /* Set split display info to kms once dual panel is connected
+ * to both hosts
+ */
+ if (msm_dsi->panel && IS_DUAL_PANEL() &&
+ other_dsi && other_dsi->panel) {
+ bool cmd_mode = !(msm_dsi->panel_flags &
+ MIPI_DSI_MODE_VIDEO);
+ struct drm_encoder *encoder = msm_dsi_get_encoder(
+ dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
+ struct drm_encoder *slave_enc = msm_dsi_get_encoder(
+ dsi_mgr_get_dsi(DSI_ENCODER_SLAVE));
+
+ if (kms->funcs->set_split_display)
+ kms->funcs->set_split_display(kms, encoder,
+ slave_enc, cmd_mode);
+ else
+ pr_err("mdp does not support dual panel\n");
+ }
+ }
+
+ return msm_dsi->panel ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void dsi_mgr_connector_destroy(struct drm_connector *connector)
+{
+ DBG("");
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode, *m;
+
+ /* Only support left-right mode */
+ list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
+ mode->clock >>= 1;
+ mode->hdisplay >>= 1;
+ mode->hsync_start >>= 1;
+ mode->hsync_end >>= 1;
+ mode->htotal >>= 1;
+ drm_mode_set_name(mode);
+ }
+}
+
+static int dsi_dual_connector_tile_init(
+ struct drm_connector *connector, int id)
+{
+ struct drm_display_mode *mode;
+ /* Fake topology id */
+ char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
+
+ if (connector->tile_group) {
+ DBG("Tile property has been initialized");
+ return 0;
+ }
+
+ /* Use the first mode only for now */
+ mode = list_first_entry(&connector->probed_modes,
+ struct drm_display_mode,
+ head);
+ if (!mode)
+ return -EINVAL;
+
+ connector->tile_group = drm_mode_get_tile_group(
+ connector->dev, topo_id);
+ if (!connector->tile_group)
+ connector->tile_group = drm_mode_create_tile_group(
+ connector->dev, topo_id);
+ if (!connector->tile_group) {
+ pr_err("%s: failed to create tile group\n", __func__);
+ return -ENOMEM;
+ }
+
+ connector->has_tile = true;
+ connector->tile_is_single_monitor = true;
+
+ /* mode has been fixed */
+ connector->tile_h_size = mode->hdisplay;
+ connector->tile_v_size = mode->vdisplay;
+
+ /* Only support left-right mode */
+ connector->num_h_tile = 2;
+ connector->num_v_tile = 1;
+
+ connector->tile_v_loc = 0;
+ connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
+
+ return 0;
+}
+
+static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_panel *panel = msm_dsi->panel;
+ int ret, num;
+
+ if (!panel)
+ return 0;
+
+ /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
+ * panel should not attach to any connector.
+ * Only temporarily attach panel to the current connector here,
+ * to let panel set mode to this connector.
+ */
+ drm_panel_attach(panel, connector);
+ num = drm_panel_get_modes(panel);
+ drm_panel_detach(panel);
+ if (!num)
+ return 0;
+
+ if (IS_DUAL_PANEL()) {
+ /* report half resolution to user */
+ dsi_dual_connector_fix_modes(connector);
+ ret = dsi_dual_connector_tile_init(connector, id);
+ if (ret)
+ return ret;
+ ret = drm_mode_connector_set_tile_property(connector);
+ if (ret) {
+ pr_err("%s: set tile property failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return num;
+}
+
+static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ DBG("");
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms, requested, encoder);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+dsi_mgr_connector_best_encoder(struct drm_connector *connector)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+
+ DBG("");
+ return msm_dsi_get_encoder(msm_dsi);
+}
+
+static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_panel = IS_DUAL_PANEL();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!panel || (is_dual_panel && (DSI_1 == id)))
+ return;
+
+ ret = msm_dsi_host_power_on(host);
+ if (ret) {
+ pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
+ goto host_on_fail;
+ }
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_power_on(msm_dsi1->host);
+ if (ret) {
+ pr_err("%s: power on host1 failed, %d\n",
+ __func__, ret);
+ goto host1_on_fail;
+ }
+ }
+
+ /* Always call panel functions once, because even for dual panels,
+ * there is only one drm_panel instance.
+ */
+ ret = drm_panel_prepare(panel);
+ if (ret) {
+ pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
+ goto panel_prep_fail;
+ }
+
+ ret = msm_dsi_host_enable(host);
+ if (ret) {
+ pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
+ goto host_en_fail;
+ }
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_enable(msm_dsi1->host);
+ if (ret) {
+ pr_err("%s: enable host1 failed, %d\n", __func__, ret);
+ goto host1_en_fail;
+ }
+ }
+
+ ret = drm_panel_enable(panel);
+ if (ret) {
+ pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
+ goto panel_en_fail;
+ }
+
+ return;
+
+panel_en_fail:
+ if (is_dual_panel && msm_dsi1)
+ msm_dsi_host_disable(msm_dsi1->host);
+host1_en_fail:
+ msm_dsi_host_disable(host);
+host_en_fail:
+ drm_panel_unprepare(panel);
+panel_prep_fail:
+ if (is_dual_panel && msm_dsi1)
+ msm_dsi_host_power_off(msm_dsi1->host);
+host1_on_fail:
+ msm_dsi_host_power_off(host);
+host_on_fail:
+ return;
+}
+
+static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
+{
+ DBG("");
+}
+
+static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
+{
+ DBG("");
+}
+
+static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_panel = IS_DUAL_PANEL();
+ int ret;
+
+ DBG("id=%d", id);
+
+ if (!panel || (is_dual_panel && (DSI_1 == id)))
+ return;
+
+ ret = drm_panel_disable(panel);
+ if (ret)
+ pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
+
+ ret = msm_dsi_host_disable(host);
+ if (ret)
+ pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_disable(msm_dsi1->host);
+ if (ret)
+ pr_err("%s: host1 disable failed, %d\n", __func__, ret);
+ }
+
+ ret = drm_panel_unprepare(panel);
+ if (ret)
+ pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
+
+ ret = msm_dsi_host_power_off(host);
+ if (ret)
+ pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_power_off(msm_dsi1->host);
+ if (ret)
+ pr_err("%s: host1 power off failed, %d\n",
+ __func__, ret);
+ }
+}
+
+static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_dual_panel = IS_DUAL_PANEL();
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ if (is_dual_panel && (DSI_1 == id))
+ return;
+
+ msm_dsi_host_set_display_mode(host, adjusted_mode);
+ if (is_dual_panel && other_dsi)
+ msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
+}
+
+static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = dsi_mgr_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = dsi_mgr_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
+ .get_modes = dsi_mgr_connector_get_modes,
+ .mode_valid = dsi_mgr_connector_mode_valid,
+ .best_encoder = dsi_mgr_connector_best_encoder,
+};
+
+static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
+ .pre_enable = dsi_mgr_bridge_pre_enable,
+ .enable = dsi_mgr_bridge_enable,
+ .disable = dsi_mgr_bridge_disable,
+ .post_disable = dsi_mgr_bridge_post_disable,
+ .mode_set = dsi_mgr_bridge_mode_set,
+};
+
+/* initialize connector */
+struct drm_connector *msm_dsi_manager_connector_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_connector *connector = NULL;
+ struct dsi_connector *dsi_connector;
+ int ret;
+
+ dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
+ sizeof(*dsi_connector), GFP_KERNEL);
+ if (!dsi_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dsi_connector->id = id;
+
+ connector = &dsi_connector->base;
+
+ ret = drm_connector_init(msm_dsi->dev, connector,
+ &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (ret)
+ goto fail;
+
+ drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
+
+ /* Enable HPD to let hpd event is handled
+ * when panel is attached to the host.
+ */
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ /* Display driver doesn't support interlace now. */
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ ret = drm_connector_register(connector);
+ if (ret)
+ goto fail;
+
+ return connector;
+
+fail:
+ if (connector)
+ dsi_mgr_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
+
+/* initialize bridge */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_bridge *bridge = NULL;
+ struct dsi_bridge *dsi_bridge;
+ int ret;
+
+ dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
+ sizeof(*dsi_bridge), GFP_KERNEL);
+ if (!dsi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dsi_bridge->id = id;
+
+ bridge = &dsi_bridge->base;
+ bridge->funcs = &dsi_mgr_bridge_funcs;
+
+ ret = drm_bridge_attach(msm_dsi->dev, bridge);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ msm_dsi_manager_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
+
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+int msm_dsi_manager_phy_enable(int id,
+ const unsigned long bit_rate, const unsigned long esc_rate,
+ u32 *clk_pre, u32 *clk_post)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi_phy *phy = msm_dsi->phy;
+ int ret;
+
+ ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
+ if (ret)
+ return ret;
+
+ msm_dsi->phy_enabled = true;
+ msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
+
+ return 0;
+}
+
+void msm_dsi_manager_phy_disable(int id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+ struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+ struct msm_dsi_phy *phy = msm_dsi->phy;
+
+ /* disable DSI phy
+ * In dual-dsi configuration, the phy should be disabled for the
+ * first controller only when the second controller is disabled.
+ */
+ msm_dsi->phy_enabled = false;
+ if (IS_DUAL_PANEL() && mdsi && sdsi) {
+ if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+ msm_dsi_phy_disable(sdsi->phy);
+ msm_dsi_phy_disable(mdsi->phy);
+ }
+ } else {
+ msm_dsi_phy_disable(phy);
+ }
+}
+
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_read = (msg->rx_buf && msg->rx_len);
+ bool need_sync = (IS_SYNC_NEEDED() && !is_read);
+ int ret;
+
+ if (!msg->tx_buf || !msg->tx_len)
+ return 0;
+
+ /* In dual master case, panel requires the same commands sent to
+ * both DSI links. Host issues the command trigger to both links
+ * when DSI_1 calls the cmd transfer function, no matter it happens
+ * before or after DSI_0 cmd transfer.
+ */
+ if (need_sync && (id == DSI_0))
+ return is_read ? msg->rx_len : msg->tx_len;
+
+ if (need_sync && msm_dsi0) {
+ ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg);
+ if (ret) {
+ pr_err("%s: failed to prepare non-trigger host, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ ret = msm_dsi_host_xfer_prepare(host, msg);
+ if (ret) {
+ pr_err("%s: failed to prepare host, %d\n", __func__, ret);
+ goto restore_host0;
+ }
+
+ ret = is_read ? msm_dsi_host_cmd_rx(host, msg) :
+ msm_dsi_host_cmd_tx(host, msg);
+
+ msm_dsi_host_xfer_restore(host, msg);
+
+restore_host0:
+ if (need_sync && msm_dsi0)
+ msm_dsi_host_xfer_restore(msm_dsi0->host, msg);
+
+ return ret;
+}
+
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+ struct mipi_dsi_host *host = msm_dsi->host;
+
+ if (IS_SYNC_NEEDED() && (id == DSI_0))
+ return false;
+
+ if (IS_SYNC_NEEDED() && msm_dsi0)
+ msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len);
+
+ msm_dsi_host_cmd_xfer_commit(host, iova, len);
+
+ return true;
+}
+
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+ int id = msm_dsi->id;
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ int ret;
+
+ if (id > DSI_MAX) {
+ pr_err("%s: invalid id %d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ if (msm_dsim->dsi[id]) {
+ pr_err("%s: dsi%d already registered\n", __func__, id);
+ return -EBUSY;
+ }
+
+ msm_dsim->dsi[id] = msm_dsi;
+
+ ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
+ if (ret) {
+ pr_err("%s: failed to parse dual panel info\n", __func__);
+ return ret;
+ }
+
+ if (!IS_DUAL_PANEL()) {
+ ret = msm_dsi_host_register(msm_dsi->host, true);
+ } else if (!other_dsi) {
+ return 0;
+ } else {
+ struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
+ msm_dsi : other_dsi;
+ struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
+ other_dsi : msm_dsi;
+ /* Register slave host first, so that slave DSI device
+ * has a chance to probe, and do not block the master
+ * DSI device's probe.
+ * Also, do not check defer for the slave host,
+ * because only master DSI device adds the panel to global
+ * panel list. The panel's device is the master DSI device.
+ */
+ ret = msm_dsi_host_register(sdsi->host, false);
+ if (ret)
+ return ret;
+ ret = msm_dsi_host_register(mdsi->host, true);
+ }
+
+ return ret;
+}
+
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+ if (msm_dsi->host)
+ msm_dsi_host_unregister(msm_dsi->host);
+ msm_dsim->dsi[msm_dsi->id] = NULL;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c
new file mode 100644
index 000000000000..f0cea8927388
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi.h"
+#include "dsi.xml.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+struct dsi_dphy_timing {
+ u32 clk_pre;
+ u32 clk_post;
+ u32 clk_zero;
+ u32 clk_trail;
+ u32 clk_prepare;
+ u32 hs_exit;
+ u32 hs_zero;
+ u32 hs_prepare;
+ u32 hs_trail;
+ u32 hs_rqst;
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+};
+
+struct msm_dsi_phy {
+ void __iomem *base;
+ void __iomem *reg_base;
+ int id;
+ struct dsi_dphy_timing timing;
+ int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+ int (*disable)(struct msm_dsi_phy *phy);
+};
+
+#define S_DIV_ROUND_UP(n, d) \
+ (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
+
+static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
+ s32 min_result, bool even)
+{
+ s32 v;
+ v = (tmax - tmin) * percent;
+ v = S_DIV_ROUND_UP(v, 100) + tmin;
+ if (even && (v & 0x1))
+ return max_t(s32, min_result, v - 1);
+ else
+ return max_t(s32, min_result, v);
+}
+
+static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
+ s32 ui, s32 coeff, s32 pcnt)
+{
+ s32 tmax, tmin, clk_z;
+ s32 temp;
+
+ /* reset */
+ temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ if (tmin > 255) {
+ tmax = 511;
+ clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
+ } else {
+ tmax = 255;
+ clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
+ }
+
+ /* adjust */
+ temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
+ timing->clk_zero = clk_z + 8 - temp;
+}
+
+static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ s32 ui, lpx;
+ s32 tmax, tmin;
+ s32 pcnt0 = 10;
+ s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+ tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
+ tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
+
+ temp = lpx / ui;
+ if (temp & 0x1)
+ timing->hs_rqst = temp;
+ else
+ timing->hs_rqst = max_t(s32, 0, temp - 2);
+
+ /* Calculate clk_zero after clk_prepare and hs_rqst */
+ dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ temp = 85 * coeff + 6 * ui;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 40 * coeff + 4 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
+
+ tmax = 255;
+ temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
+ temp = 145 * coeff + 10 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 60 * coeff + 4 * ui;
+ tmin = DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ tmax = 255;
+ tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
+
+ tmax = 63;
+ temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
+ temp = 60 * coeff + 52 * ui - 24 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ tmax = 63;
+ temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
+ temp += 8 * ui + lpx;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ if (tmin > tmax) {
+ temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
+ timing->clk_pre = temp >> 1;
+ temp = (2 * tmax - tmin) * pcnt2;
+ } else {
+ timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->clk_pre, timing->clk_post, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->reg_base;
+
+ if (!enable) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ struct dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+
+ DBG("");
+
+ if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ pr_err("%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+ dsi_28nm_phy_regulator_ctrl(phy, true);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+ }
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
+ else
+ dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
+
+ return 0;
+}
+
+static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+ dsi_28nm_phy_regulator_ctrl(phy, false);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+
+ return 0;
+}
+
+#define dsi_phy_func_init(name) \
+ do { \
+ phy->enable = dsi_##name##_phy_enable; \
+ phy->disable = dsi_##name##_phy_disable; \
+ } while (0)
+
+struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id)
+{
+ struct msm_dsi_phy *phy;
+
+ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+ if (IS_ERR_OR_NULL(phy->base)) {
+ pr_err("%s: failed to map phy base\n", __func__);
+ return NULL;
+ }
+ phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
+ if (IS_ERR_OR_NULL(phy->reg_base)) {
+ pr_err("%s: failed to map phy regulator base\n", __func__);
+ return NULL;
+ }
+
+ switch (type) {
+ case MSM_DSI_PHY_28NM:
+ dsi_phy_func_init(28nm);
+ break;
+ default:
+ pr_err("%s: unsupported type, %d\n", __func__, type);
+ return NULL;
+ }
+
+ phy->id = id;
+
+ return phy;
+}
+
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ if (!phy || !phy->enable)
+ return -EINVAL;
+ return phy->enable(phy, is_dual_panel, bit_rate, esc_rate);
+}
+
+int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+ if (!phy || !phy->disable)
+ return -EINVAL;
+ return phy->disable(phy);
+}
+
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+ u32 *clk_pre, u32 *clk_post)
+{
+ if (!phy)
+ return;
+ if (clk_pre)
+ *clk_pre = phy->timing.clk_pre;
+ if (clk_post)
+ *clk_post = phy->timing.clk_post;
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index eeed006eed13..6997ec636c6d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -53,6 +53,23 @@ struct pll_rate {
/* NOTE: keep sorted highest freq to lowest: */
static const struct pll_rate freqtbl[] = {
+ { 154000000, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
/* 1080p60/1080p50 case */
{ 148500000, {
{ 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
@@ -112,6 +129,23 @@ static const struct pll_rate freqtbl[] = {
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0, 0 } }
},
+ { 74176000, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
{ 65000000, {
{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index cde25009203a..dbc068988377 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -83,7 +83,8 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
};
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
@@ -93,7 +94,8 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index c276624290af..b9a4ded6e400 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,9 +8,9 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 29312 bytes, from 2015-03-23 21:18:48)
- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15)
-- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-03-23 20:38:49)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -37,11 +37,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum mdp5_intf {
+enum mdp5_intf_type {
+ INTF_DISABLED = 0,
INTF_DSI = 1,
INTF_HDMI = 3,
INTF_LCDC = 5,
INTF_eDP = 9,
+ INTF_VIRTUAL = 100,
+ INTF_WB = 101,
};
enum mdp5_intfnum {
@@ -67,11 +70,11 @@ enum mdp5_pipe {
enum mdp5_ctl_mode {
MODE_NONE = 0,
- MODE_ROT0 = 1,
- MODE_ROT1 = 2,
- MODE_WB0 = 3,
- MODE_WB1 = 4,
- MODE_WFD = 5,
+ MODE_WB_0_BLOCK = 1,
+ MODE_WB_1_BLOCK = 2,
+ MODE_WB_0_LINE = 3,
+ MODE_WB_1_LINE = 4,
+ MODE_WB_2_LINE = 5,
};
enum mdp5_pack_3d {
@@ -94,33 +97,6 @@ enum mdp5_pipe_bwc {
BWC_Q_MED = 2,
};
-enum mdp5_client_id {
- CID_UNUSED = 0,
- CID_VIG0_Y = 1,
- CID_VIG0_CR = 2,
- CID_VIG0_CB = 3,
- CID_VIG1_Y = 4,
- CID_VIG1_CR = 5,
- CID_VIG1_CB = 6,
- CID_VIG2_Y = 7,
- CID_VIG2_CR = 8,
- CID_VIG2_CB = 9,
- CID_DMA0_Y = 10,
- CID_DMA0_CR = 11,
- CID_DMA0_CB = 12,
- CID_DMA1_Y = 13,
- CID_DMA1_CR = 14,
- CID_DMA1_CB = 15,
- CID_RGB0 = 16,
- CID_RGB1 = 17,
- CID_RGB2 = 18,
- CID_VIG3_Y = 19,
- CID_VIG3_CR = 20,
- CID_VIG3_CB = 21,
- CID_RGB3 = 22,
- CID_MAX = 23,
-};
-
enum mdp5_cursor_format {
CURSOR_FMT_ARGB8888 = 0,
CURSOR_FMT_ARGB1555 = 2,
@@ -144,30 +120,25 @@ enum mdp5_data_format {
DATA_FORMAT_YUV = 1,
};
-#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001
-#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002
-#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004
-#define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008
-#define MDP5_IRQ_INTF0_WB_WFD 0x00000010
-#define MDP5_IRQ_INTF1_WB_WFD 0x00000020
-#define MDP5_IRQ_INTF2_WB_WFD 0x00000040
-#define MDP5_IRQ_INTF3_WB_WFD 0x00000080
-#define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100
-#define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200
-#define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400
-#define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800
-#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000
-#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000
-#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000
-#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000
-#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000
-#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000
-#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000
-#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000
-#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000
-#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000
-#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000
-#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000
+#define MDP5_IRQ_WB_0_DONE 0x00000001
+#define MDP5_IRQ_WB_1_DONE 0x00000002
+#define MDP5_IRQ_WB_2_DONE 0x00000010
+#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100
+#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200
+#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400
+#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800
+#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000
+#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000
+#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000
+#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000
+#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000
+#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000
+#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000
+#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000
+#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000
+#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000
+#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000
+#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000
#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
#define MDP5_IRQ_INTF0_VSYNC 0x02000000
#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
@@ -176,136 +147,186 @@ enum mdp5_data_format {
#define MDP5_IRQ_INTF2_VSYNC 0x20000000
#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
#define MDP5_IRQ_INTF3_VSYNC 0x80000000
-#define REG_MDP5_HW_VERSION 0x00000000
+#define REG_MDSS_HW_VERSION 0x00000000
+#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDSS_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK;
+}
+#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDSS_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK;
+}
+#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDSS_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDSS_HW_INTR_STATUS 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001
+#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020
+#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
+#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
-#define REG_MDP5_HW_INTR_STATUS 0x00000010
-#define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001
-#define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010
-#define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020
-#define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100
-#define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000
+static inline uint32_t __offset_MDP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->mdp.base[0]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-#define REG_MDP5_MDP_VERSION 0x00000100
-#define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000
-#define MDP5_MDP_VERSION_MINOR__SHIFT 16
-static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
+static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
+#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
{
- return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
+ return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK;
}
-#define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000
-#define MDP5_MDP_VERSION_MAJOR__SHIFT 28
-static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
+#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val)
{
- return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
+ return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK;
+}
+#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK;
}
-#define REG_MDP5_DISP_INTF_SEL 0x00000104
-#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
-#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
+static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); }
+#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
}
-#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
-#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
}
-#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
-#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
}
-#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
-#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
}
-#define REG_MDP5_INTR_EN 0x00000110
+static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); }
-#define REG_MDP5_INTR_STATUS 0x00000114
+static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); }
-#define REG_MDP5_INTR_CLEAR 0x00000118
+static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); }
-#define REG_MDP5_HIST_INTR_EN 0x0000011c
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); }
-#define REG_MDP5_HIST_INTR_STATUS 0x00000120
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); }
-#define REG_MDP5_HIST_INTR_CLEAR 0x00000124
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); }
-static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); }
+#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
-static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
}
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
}
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
}
-static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
-static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK;
}
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK;
}
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK;
}
static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
{
switch (idx) {
- case IGC_VIG: return 0x00000300;
- case IGC_RGB: return 0x00000310;
- case IGC_DMA: return 0x00000320;
- case IGC_DSPP: return 0x00000400;
+ case IGC_VIG: return 0x00000200;
+ case IGC_RGB: return 0x00000210;
+ case IGC_DMA: return 0x00000220;
+ case IGC_DSPP: return 0x00000300;
default: return INVALID_IDX(idx);
}
}
-static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); }
-static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
-static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
-#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
-#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
-static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
+#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
{
- return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+ return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK;
}
-#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+
+#define REG_MDP5_SPLIT_DPL_EN 0x000003f4
+
+#define REG_MDP5_SPLIT_DPL_UPPER 0x000003f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
+
+#define REG_MDP5_SPLIT_DPL_LOWER 0x000004f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
static inline uint32_t __offset_CTL(uint32_t idx)
{
@@ -437,11 +458,19 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __o
#define MDP5_CTL_FLUSH_DSPP0 0x00002000
#define MDP5_CTL_FLUSH_DSPP1 0x00004000
#define MDP5_CTL_FLUSH_DSPP2 0x00008000
+#define MDP5_CTL_FLUSH_WB 0x00010000
#define MDP5_CTL_FLUSH_CTL 0x00020000
#define MDP5_CTL_FLUSH_VIG3 0x00040000
#define MDP5_CTL_FLUSH_RGB3 0x00080000
#define MDP5_CTL_FLUSH_LM5 0x00100000
#define MDP5_CTL_FLUSH_DSPP3 0x00200000
+#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000
+#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000
+#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000
+#define MDP5_CTL_FLUSH_TIMING_3 0x10000000
+#define MDP5_CTL_FLUSH_TIMING_2 0x20000000
+#define MDP5_CTL_FLUSH_TIMING_1 0x40000000
+#define MDP5_CTL_FLUSH_TIMING_0 0x80000000
static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
@@ -1117,6 +1146,94 @@ static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc
static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
+static inline uint32_t __offset_PP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->pp.base[0]);
+ case 1: return (mdp5_cfg->pp.base[1]);
+ case 2: return (mdp5_cfg->pp.base[2]);
+ case 3: return (mdp5_cfg->pp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000
+#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); }
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); }
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK;
+}
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff
+#define MDP5_PP_SYNC_THRESH_START__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK;
+}
+#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000
+#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); }
+
static inline uint32_t __offset_INTF(uint32_t idx)
{
switch (idx) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index b0a44310cf2a..e001e6b2296a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,13 +24,23 @@ const struct mdp5_cfg_hw *mdp5_cfg = NULL;
const struct mdp5_cfg_hw msm8x74_config = {
.name = "msm8x74",
+ .mdp = {
+ .count = 1,
+ .base = { 0x00100 },
+ },
.smp = {
.mmb_count = 22,
.mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+ },
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
@@ -57,27 +67,49 @@ const struct mdp5_cfg_hw msm8x74_config = {
.count = 2,
.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
},
+ .pp = {
+ .count = 3,
+ .base = { 0x12d00, 0x12e00, 0x12f00 },
+ },
.intf = {
.count = 4,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
},
+ .intfs = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
.max_clk = 200000000,
};
const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
+ .mdp = {
+ .count = 1,
+ .base = { 0x00100 },
+ },
.smp = {
.mmb_count = 44,
.mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
+ [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+ [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+ },
.reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
- .reserved[CID_RGB0] = 2,
- .reserved[CID_RGB1] = 2,
- .reserved[CID_RGB2] = 2,
- .reserved[CID_RGB3] = 2,
+ .reserved = {
+ /* Two SMP blocks are statically tied to RGB pipes: */
+ [16] = 2, [17] = 2, [18] = 2, [22] = 2,
+ },
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .flush_hw_mask = 0x003fffff,
},
.pipe_vig = {
.count = 4,
@@ -105,10 +137,69 @@ const struct mdp5_cfg_hw apq8084_config = {
.count = 3,
.base = { 0x13500, 0x13700, 0x13900 },
},
+ .pp = {
+ .count = 4,
+ .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
+ },
.intf = {
.count = 5,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
},
+ .intfs = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ .max_clk = 320000000,
+};
+
+const struct mdp5_cfg_hw msm8x16_config = {
+ .name = "msm8x16",
+ .mdp = {
+ .count = 1,
+ .base = { 0x01000 },
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x05000 },
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x15000, 0x17000 },
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x25000 },
+ },
+ .lm = {
+ .count = 2, /* LM0 and LM3 */
+ .base = { 0x45000, 0x48000 },
+ .nb_stages = 5,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x55000 },
+
+ },
+ .intf = {
+ .count = 1, /* INTF_1 */
+ .base = { 0x6B800 },
+ },
+ /* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */
.max_clk = 320000000,
};
@@ -116,6 +207,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 0, .config = { .hw = &msm8x74_config } },
{ .revision = 2, .config = { .hw = &msm8x74_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
+ { .revision = 6, .config = { .hw = &msm8x16_config } },
};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index dba4d52cceeb..3a551b0892d8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -44,26 +44,38 @@ struct mdp5_lm_block {
uint32_t nb_stages; /* number of stages per blender */
};
+struct mdp5_ctl_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t flush_hw_mask; /* FLUSH register's hardware mask */
+};
+
struct mdp5_smp_block {
int mmb_count; /* number of SMP MMBs */
int mmb_size; /* MMB: size in bytes */
+ uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */
mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
};
+#define MDP5_INTF_NUM_MAX 5
+
struct mdp5_cfg_hw {
char *name;
+ struct mdp5_sub_block mdp;
struct mdp5_smp_block smp;
- struct mdp5_sub_block ctl;
+ struct mdp5_ctl_block ctl;
struct mdp5_sub_block pipe_vig;
struct mdp5_sub_block pipe_rgb;
struct mdp5_sub_block pipe_dma;
struct mdp5_lm_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
+ struct mdp5_sub_block pp;
struct mdp5_sub_block intf;
+ u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+
uint32_t max_clk;
};
@@ -84,6 +96,10 @@ const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hn
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
+#define mdp5_cfg_intf_is_virtual(intf_type) ({ \
+ typeof(intf_type) __val = (intf_type); \
+ (__val) >= INTF_VIRTUAL ? true : false; })
+
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor);
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
new file mode 100644
index 000000000000..e4e89567f51d
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp5_cmd_encoder {
+ struct drm_encoder base;
+ struct mdp5_interface intf;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[0],
+}, {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[1],
+} };
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdss_mdp",
+};
+
+static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc)
+{
+ mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
+ &mdp_bus_scale_table);
+ DBG("bus scale client: %08x", mdp5_cmd_enc->bsc);
+}
+
+static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc)
+{
+ if (mdp5_cmd_enc->bsc) {
+ msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc);
+ mdp5_cmd_enc->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
+{
+ if (mdp5_cmd_enc->bsc) {
+ DBG("set bus scaling: %d", idx);
+ /* HACK: scaling down, and then immediately back up
+ * seems to leave things broken (underflow).. so
+ * never disable:
+ */
+ idx = 1;
+ msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
+static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
+static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {}
+#endif
+
+#define VSYNC_CLK_RATE 19200000
+static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct device *dev = encoder->dev->dev;
+ u32 total_lines_x100, vclks_line, cfg;
+ long vsync_clk_speed;
+ int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+
+ if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
+ dev_err(dev, "vsync_clk is not initialized\n");
+ return -EINVAL;
+ }
+
+ total_lines_x100 = mode->vtotal * mode->vrefresh;
+ if (!total_lines_x100) {
+ dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ __func__, mode->vtotal, mode->vrefresh);
+ return -EINVAL;
+ }
+
+ vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
+ if (vsync_clk_speed <= 0) {
+ dev_err(dev, "vsync_clk round rate failed %ld\n",
+ vsync_clk_speed);
+ return -EINVAL;
+ }
+ vclks_line = vsync_clk_speed * 100 / total_lines_x100;
+
+ cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
+ | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
+ cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id),
+ MDP5_PP_SYNC_THRESH_START(4) |
+ MDP5_PP_SYNC_THRESH_CONTINUE(4));
+
+ return 0;
+}
+
+static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ int ret;
+
+ ret = clk_set_rate(mdp5_kms->vsync_clk,
+ clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
+ if (ret) {
+ dev_err(encoder->dev->dev,
+ "vsync_clk clk_set_rate failed, %d\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(mdp5_kms->vsync_clk);
+ if (ret) {
+ dev_err(encoder->dev->dev,
+ "vsync_clk clk_prepare_enable failed, %d\n", ret);
+ return ret;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1);
+
+ return 0;
+}
+
+static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
+ clk_disable_unprepare(mdp5_kms->vsync_clk);
+}
+
+static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ bs_fini(mdp5_cmd_enc);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp5_cmd_enc);
+}
+
+static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = {
+ .destroy = mdp5_cmd_encoder_destroy,
+};
+
+static bool mdp5_cmd_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+ pingpong_tearcheck_setup(encoder, mode);
+ mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
+}
+
+static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+ int lm = mdp5_crtc_get_lm(encoder->crtc);
+
+ if (WARN_ON(!mdp5_cmd_enc->enabled))
+ return;
+
+ /* Wait for the last frame done */
+ mdp_irq_wait(&mdp5_kms->base, lm2ppdone(lm));
+ pingpong_tearcheck_disable(encoder);
+
+ mdp5_ctl_set_encoder_state(ctl, false);
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+
+ bs_set(mdp5_cmd_enc, 0);
+
+ mdp5_cmd_enc->enabled = false;
+}
+
+static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+
+ if (WARN_ON(mdp5_cmd_enc->enabled))
+ return;
+
+ bs_set(mdp5_cmd_enc, 1);
+ if (pingpong_tearcheck_enable(encoder))
+ return;
+
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+
+ mdp5_ctl_set_encoder_state(ctl, true);
+
+ mdp5_cmd_enc->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = {
+ .mode_fixup = mdp5_cmd_encoder_mode_fixup,
+ .mode_set = mdp5_cmd_encoder_mode_set,
+ .disable = mdp5_cmd_encoder_disable,
+ .enable = mdp5_cmd_encoder_enable,
+};
+
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_kms *mdp5_kms;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_cmd_enc->intf.num;
+
+ /* Switch slave encoder's trigger MUX, to use the master's
+ * start signal for the slave encoder
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
+ else
+ return -EINVAL;
+
+ /* Smart Panel, Sync mode */
+ data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
+
+ /* Make sure clocks are on when connectors calling this function. */
+ mdp5_enable(mdp5_kms);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
+
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
+ MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+ mdp5_disable(mdp5_kms);
+
+ return 0;
+}
+
+/* initialize command mode encoder */
+struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp5_cmd_encoder *mdp5_cmd_enc;
+ int ret;
+
+ if (WARN_ON((intf->type != INTF_DSI) &&
+ (intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL);
+ if (!mdp5_cmd_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
+ encoder = &mdp5_cmd_enc->base;
+
+ drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
+ DRM_MODE_ENCODER_DSI);
+
+ drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
+
+ bs_init(mdp5_cmd_enc);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp5_cmd_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 2f2863cf8b45..c1530772187d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -82,8 +82,6 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}
-#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
-
static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -110,8 +108,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
drm_atomic_crtc_for_each_plane(plane, crtc) {
flush_mask |= mdp5_plane_get_flush(plane);
}
- flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
- flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
+
+ flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
crtc_flush(crtc, flush_mask);
}
@@ -298,8 +296,6 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
mdp5_enable(mdp5_kms);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
- crtc_flush_all(crtc);
-
mdp5_crtc->enabled = true;
}
@@ -444,13 +440,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- struct drm_gem_object *cursor_bo, *old_bo;
+ struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, cursor_addr, stride;
int ret, bpp, lm;
unsigned int depth;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w, roi_h;
+ bool cursor_enable = true;
unsigned long flags;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -463,7 +460,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
- return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false);
+ cursor_enable = false;
+ goto set_cursor;
}
cursor_bo = drm_gem_object_lookup(dev, file, handle);
@@ -504,11 +502,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
- ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
- if (ret)
+set_cursor:
+ ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
+ if (ret) {
+ dev_err(dev->dev, "failed to %sable cursor: %d\n",
+ cursor_enable ? "en" : "dis", ret);
goto end;
+ }
- flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
crtc_flush(crtc, flush_mask);
end:
@@ -613,64 +614,39 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
}
/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
- enum mdp5_intf intf_id)
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- uint32_t flush_mask = 0;
- uint32_t intf_sel;
- unsigned long flags;
+ int lm = mdp5_crtc_get_lm(crtc);
/* now that we know what irq's we want: */
- mdp5_crtc->err.irqmask = intf2err(intf);
- mdp5_crtc->vblank.irqmask = intf2vblank(intf);
- mdp_irq_update(&mdp5_kms->base);
-
- spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
-
- switch (intf) {
- case 0:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
- break;
- case 1:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
- break;
- case 2:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
- break;
- case 3:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
- break;
- default:
- BUG();
- break;
- }
+ mdp5_crtc->err.irqmask = intf2err(intf->num);
- mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
- spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+ /* Register command mode Pingpong done as vblank for now,
+ * so that atomic commit should wait for it to finish.
+ * Ideally, in the future, we should take rd_ptr done as vblank,
+ * and let atomic commit wait for pingpong done for commond mode.
+ */
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_crtc->vblank.irqmask = lm2ppdone(lm);
+ else
+ mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
+ mdp_irq_update(&mdp5_kms->base);
- DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
- flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
- flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
-
- crtc_flush(crtc, flush_mask);
}
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
+}
- if (WARN_ON(!crtc))
- return -EINVAL;
-
- return mdp5_crtc->lm;
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
}
/* initialize crtc */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 151129032d16..5488b687c8d1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,23 +33,31 @@
* requested by the client (in mdp5_crtc_mode_set()).
*/
+struct op_mode {
+ struct mdp5_interface intf;
+
+ bool encoder_enabled;
+ uint32_t start_mask;
+};
+
struct mdp5_ctl {
struct mdp5_ctl_manager *ctlm;
u32 id;
+ int lm;
/* whether this CTL has been allocated or not: */
bool busy;
- /* memory output connection (@see mdp5_ctl_mode): */
- u32 mode;
+ /* Operation Mode Configuration for the Pipeline */
+ struct op_mode pipeline;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
u32 reg_offset;
- /* flush mask used to commit CTL registers */
- u32 flush_mask;
+ /* when do CTL registers need to be flushed? (mask of trigger bits) */
+ u32 pending_ctl_trigger;
bool cursor_on;
@@ -63,6 +71,9 @@ struct mdp5_ctl_manager {
u32 nlm;
u32 nctl;
+ /* to filter out non-present bits in the current hardware config */
+ u32 flush_hw_mask;
+
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
spinlock_t pool_lock;
struct mdp5_ctl ctls[MAX_CTL];
@@ -94,31 +105,172 @@ u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
return mdp5_read(mdp5_kms, reg);
}
+static void set_display_intf(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf)
+{
+ unsigned long flags;
+ u32 intf_sel;
+
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
+
+ switch (intf->num) {
+ case 0:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
+ break;
+ case 1:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
+ break;
+ case 2:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
+ break;
+ case 3:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+}
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf)
+static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
{
unsigned long flags;
- static const enum mdp5_intfnum intfnum[] = {
- INTF0, INTF1, INTF2, INTF3,
- };
+ u32 ctl_op = 0;
+
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
+
+ switch (intf->type) {
+ case INTF_DSI:
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ ctl_op |= MDP5_CTL_OP_CMD_MODE;
+ break;
+
+ case INTF_WB:
+ if (intf->mode == MDP5_INTF_WB_MODE_LINE)
+ ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
+ break;
+
+ default:
+ break;
+ }
spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
- MDP5_CTL_OP_MODE(ctl->mode) |
- MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+ memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
+
+ ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
+ mdp_ctl_flush_mask_encoder(intf);
+
+ /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ set_display_intf(mdp5_kms, intf);
+
+ set_ctl_op(ctl, intf);
return 0;
}
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
+static bool start_signal_needed(struct mdp5_ctl *ctl)
+{
+ struct op_mode *pipeline = &ctl->pipeline;
+
+ if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
+ return false;
+
+ switch (pipeline->intf.type) {
+ case INTF_WB:
+ return true;
+ case INTF_DSI:
+ return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
+ default:
+ return false;
+ }
+}
+
+/*
+ * send_start_signal() - Overlay Processor Start Signal
+ *
+ * For a given control operation (display pipeline), a START signal needs to be
+ * executed in order to kick off operation and activate all layers.
+ * e.g.: DSI command mode, Writeback
+ */
+static void send_start_signal(struct mdp5_ctl *ctl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+static void refill_start_mask(struct mdp5_ctl *ctl)
+{
+ struct op_mode *pipeline = &ctl->pipeline;
+ struct mdp5_interface *intf = &ctl->pipeline.intf;
+
+ pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
+
+ /*
+ * Writeback encoder needs to program & flush
+ * address registers for each page flip..
+ */
+ if (intf->type == INTF_WB)
+ pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
+}
+
+/**
+ * mdp5_ctl_set_encoder_state() - set the encoder state
+ *
+ * @enable: true, when encoder is ready for data streaming; false, otherwise.
+ *
+ * Note:
+ * This encoder state is needed to trigger START signal (data path kickoff).
+ */
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
+{
+ if (WARN_ON(!ctl))
+ return -EINVAL;
+
+ ctl->pipeline.encoder_enabled = enabled;
+ DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
+
+ if (start_signal_needed(ctl)) {
+ send_start_signal(ctl);
+ refill_start_mask(ctl);
+ }
+
+ return 0;
+}
+
+/*
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
+ */
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 blend_cfg;
- int lm;
+ int lm = ctl->lm;
- lm = mdp5_crtc_get_lm(ctl->crtc);
if (unlikely(WARN_ON(lm < 0))) {
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
ctl->id, lm);
@@ -138,12 +290,12 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
ctl->cursor_on = enable;
return 0;
}
-
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
unsigned long flags;
@@ -157,37 +309,122 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+
return 0;
}
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
+{
+ if (intf->type == INTF_WB)
+ return MDP5_CTL_FLUSH_WB;
+
+ switch (intf->num) {
+ case 0: return MDP5_CTL_FLUSH_TIMING_0;
+ case 1: return MDP5_CTL_FLUSH_TIMING_1;
+ case 2: return MDP5_CTL_FLUSH_TIMING_2;
+ case 3: return MDP5_CTL_FLUSH_TIMING_3;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_cursor(int cursor_id)
+{
+ switch (cursor_id) {
+ case 0: return MDP5_CTL_FLUSH_CURSOR_0;
+ case 1: return MDP5_CTL_FLUSH_CURSOR_1;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_lm(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ case 5: return MDP5_CTL_FLUSH_LM5;
+ default: return 0;
+ }
+}
+
+static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ u32 sw_mask = 0;
+#define BIT_NEEDS_SW_FIX(bit) \
+ (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
+
+ /* for some targets, cursor bit is the same as LM bit */
+ if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
+ sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
+
+ return sw_mask;
+}
+
+/**
+ * mdp5_ctl_commit() - Register Flush
+ *
+ * The flush register is used to indicate several registers are all
+ * programmed, and are safe to update to the back copy of the double
+ * buffered registers.
+ *
+ * Some registers FLUSH bits are shared when the hardware does not have
+ * dedicated bits for them; handling these is the job of fix_sw_flush().
+ *
+ * CTL registers need to be flushed in some circumstances; if that is the
+ * case, some trigger bits will be present in both flush mask and
+ * ctl->pending_ctl_trigger.
+ */
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ struct op_mode *pipeline = &ctl->pipeline;
unsigned long flags;
- if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
- int lm = mdp5_crtc_get_lm(ctl->crtc);
+ pipeline->start_mask &= ~flush_mask;
- if (unlikely(WARN_ON(lm < 0))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
- ctl->id, lm);
- return -EINVAL;
- }
+ VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
+ pipeline->start_mask, ctl->pending_ctl_trigger);
- /* for current targets, cursor bit is the same as LM bit */
- flush_mask |= mdp_ctl_flush_mask_lm(lm);
+ if (ctl->pending_ctl_trigger & flush_mask) {
+ flush_mask |= MDP5_CTL_FLUSH_CTL;
+ ctl->pending_ctl_trigger = 0;
}
- spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
- spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ flush_mask |= fix_sw_flush(ctl, flush_mask);
- return 0;
-}
+ flush_mask &= ctl_mgr->flush_hw_mask;
-u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
-{
- return ctl->flush_mask;
+ if (flush_mask) {
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ }
+
+ if (start_signal_needed(ctl)) {
+ send_start_signal(ctl);
+ refill_start_mask(ctl);
+ }
+
+ return 0;
}
void mdp5_ctl_release(struct mdp5_ctl *ctl)
@@ -208,6 +445,11 @@ void mdp5_ctl_release(struct mdp5_ctl *ctl)
DBG("CTL %d released", ctl->id);
}
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
+{
+ return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+}
+
/*
* mdp5_ctl_request() - CTL dynamic allocation
*
@@ -235,8 +477,10 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
ctl = &ctl_mgr->ctls[c];
+ ctl->lm = mdp5_crtc_get_lm(crtc);
ctl->crtc = crtc;
ctl->busy = true;
+ ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);
unlock:
@@ -267,7 +511,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
{
struct mdp5_ctl_manager *ctl_mgr;
- const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
+ const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
@@ -289,6 +533,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
ctl_mgr->dev = dev;
ctl_mgr->nlm = hw_cfg->lm.count;
ctl_mgr->nctl = ctl_cfg->count;
+ ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
spin_lock_init(&ctl_mgr->pool_lock);
/* initialize each CTL of the pool: */
@@ -303,9 +548,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
- ctl->mode = MODE_NONE;
ctl->reg_offset = ctl_cfg->base[c];
- ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
ctl->busy = false;
spin_lock_init(&ctl->hw_lock);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index ad48788efeea..7a62000994a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -33,19 +33,13 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
* which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf);
+struct mdp5_interface;
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
-
-/* @blend_cfg: see LM blender config definition below */
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
-
-/* @flush_mask: see CTL flush masks definitions below */
-int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
-u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
-
-void mdp5_ctl_release(struct mdp5_ctl *ctl);
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
/*
* blend_cfg (LM blender config):
@@ -72,51 +66,32 @@ static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
}
/*
- * flush_mask (CTL flush masks):
+ * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
+ *
+ * @blend_cfg: see LM blender config definition below
*
- * The following functions allow each DRM entity to get and store
- * their own flush mask.
- * Once stored, these masks will then be accessed through each DRM's
- * interface and used by the caller of mdp5_ctl_commit() to specify
- * which block(s) need to be flushed through @flush_mask parameter.
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
-#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
+/**
+ * mdp_ctl_flush_mask...() - Register FLUSH masks
+ *
+ * These masks are used to specify which block(s) need to be flushed
+ * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask).
+ */
+u32 mdp_ctl_flush_mask_lm(int lm);
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe);
+u32 mdp_ctl_flush_mask_cursor(int cursor_id);
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
-static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
-{
- /* TODO: use id once multiple cursor support is present */
- (void)cursor_id;
+/* @flush_mask: see CTL flush masks definitions below */
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
- return MDP5_CTL_FLUSH_CURSOR_DUMMY;
-}
+void mdp5_ctl_release(struct mdp5_ctl *ctl);
-static inline u32 mdp_ctl_flush_mask_lm(int lm)
-{
- switch (lm) {
- case 0: return MDP5_CTL_FLUSH_LM0;
- case 1: return MDP5_CTL_FLUSH_LM1;
- case 2: return MDP5_CTL_FLUSH_LM2;
- case 5: return MDP5_CTL_FLUSH_LM5;
- default: return 0;
- }
-}
-static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
-{
- switch (pipe) {
- case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
- case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
- case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
- case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
- case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
- case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
- case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
- case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
- case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
- case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
- default: return 0;
- }
-}
#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index af0e02fa4f48..1188f4bf1e60 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -23,8 +23,7 @@
struct mdp5_encoder {
struct drm_encoder base;
- int intf;
- enum mdp5_intf intf_id;
+ struct mdp5_interface intf;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
@@ -126,7 +125,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
- int intf = mdp5_encoder->intf;
+ int intf = mdp5_encoder->intf.num;
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
@@ -188,7 +187,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
- if (mdp5_encoder->intf_id == INTF_eDP) {
+ if (mdp5_encoder->intf.type == INTF_eDP) {
display_v_start += mode->htotal - mode->hsync_start;
display_v_end -= mode->hsync_start - mode->hdisplay;
}
@@ -218,21 +217,29 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+ mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
}
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ int lm = mdp5_crtc_get_lm(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf.num;
unsigned long flags;
if (WARN_ON(!mdp5_encoder->enabled))
return;
+ mdp5_ctl_set_encoder_state(ctl, false);
+
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
/*
* Wait for a vsync so we know the ENABLE=0 latched before
@@ -242,7 +249,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
- mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf));
bs_set(mdp5_encoder, 0);
@@ -253,19 +260,21 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf.num;
unsigned long flags;
if (WARN_ON(mdp5_encoder->enabled))
return;
- mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
- mdp5_encoder->intf_id);
-
bs_set(mdp5_encoder, 1);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+
+ mdp5_ctl_set_encoder_state(ctl, true);
mdp5_encoder->enabled = true;
}
@@ -277,12 +286,51 @@ static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.enable = mdp5_encoder_enable,
};
+int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_encoder->intf.num;
+
+ /* Switch slave encoder's TimingGen Sync mode,
+ * to use the master's enable signal for the slave encoder.
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
+ else
+ return -EINVAL;
+
+ /* Make sure clocks are on when connectors calling this function. */
+ mdp5_enable(mdp5_kms);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+ MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+ /* Dumb Panel, Sync mode */
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+ mdp5_disable(mdp5_kms);
+
+ return 0;
+}
+
/* initialize encoder */
-struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
- enum mdp5_intf intf_id)
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
+ int enc_type = (intf->type == INTF_DSI) ?
+ DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
int ret;
mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
@@ -291,14 +339,13 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
goto fail;
}
- mdp5_encoder->intf = intf;
- mdp5_encoder->intf_id = intf_id;
+ memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
encoder = &mdp5_encoder->base;
spin_lock_init(&mdp5_encoder->intf_lock);
- drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type);
+
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
bs_init(mdp5_encoder);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index a9407105b9b7..33bd4c6160dd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -23,7 +23,7 @@
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
{
- mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
}
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
@@ -35,8 +35,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
mdp5_disable(mdp5_kms);
}
@@ -61,7 +61,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
mdp5_disable(mdp5_kms);
}
@@ -73,8 +73,8 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
unsigned int id;
uint32_t status;
- status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+ status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
VERB("status=%08x", status);
@@ -91,13 +91,13 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
uint32_t intr;
- intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
+ intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
- if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
+ if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
mdp5_irq_mdp(mdp_kms);
- intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
+ intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
}
while (intr) {
@@ -128,10 +128,10 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
* can register to get their irq's delivered
*/
-#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
- MDP5_HW_INTR_STATUS_INTR_DSI1 | \
- MDP5_HW_INTR_STATUS_INTR_HDMI | \
- MDP5_HW_INTR_STATUS_INTR_EDP)
+#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
+ MDSS_HW_INTR_STATUS_INTR_DSI1 | \
+ MDSS_HW_INTR_STATUS_INTR_HDMI | \
+ MDSS_HW_INTR_STATUS_INTR_EDP)
static void mdp5_hw_mask_irq(struct irq_data *irqd)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 92b61db5754c..dfa8beb9343a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -58,7 +58,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
*/
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
@@ -86,6 +86,18 @@ static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
return rate;
}
+static int mdp5_set_split_display(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder,
+ bool is_cmd_mode)
+{
+ if (is_cmd_mode)
+ return mdp5_cmd_encoder_set_split_display(encoder,
+ slave_encoder);
+ else
+ return mdp5_encoder_set_split_display(encoder, slave_encoder);
+}
+
static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -131,6 +143,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.complete_commit = mdp5_complete_commit,
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
+ .set_split_display = mdp5_set_split_display,
.preclose = mdp5_preclose,
.destroy = mdp5_destroy,
},
@@ -161,6 +174,134 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
return 0;
}
+static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
+ enum mdp5_intf_type intf_type, int intf_num,
+ enum mdp5_intf_mode intf_mode)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct mdp5_interface intf = {
+ .num = intf_num,
+ .type = intf_type,
+ .mode = intf_mode,
+ };
+
+ if ((intf_type == INTF_DSI) &&
+ (intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
+ encoder = mdp5_cmd_encoder_init(dev, &intf);
+ else
+ encoder = mdp5_encoder_init(dev, &intf);
+
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct encoder\n");
+ return encoder;
+ }
+
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ return encoder;
+}
+
+static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
+{
+ const int intf_cnt = hw_cfg->intf.count;
+ const u32 *intfs = hw_cfg->intfs;
+ int id = 0, i;
+
+ for (i = 0; i < intf_cnt; i++) {
+ if (intfs[i] == INTF_DSI) {
+ if (intf_num == i)
+ return id;
+
+ id++;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ const struct mdp5_cfg_hw *hw_cfg =
+ mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num];
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ switch (intf_type) {
+ case INTF_DISABLED:
+ break;
+ case INTF_eDP:
+ if (!priv->edp)
+ break;
+
+ encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
+ MDP5_INTF_MODE_NONE);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = msm_edp_modeset_init(priv->edp, dev, encoder);
+ break;
+ case INTF_HDMI:
+ if (!priv->hdmi)
+ break;
+
+ encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
+ MDP5_INTF_MODE_NONE);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ break;
+ case INTF_DSI:
+ {
+ int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
+ struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
+ enum mdp5_intf_mode mode;
+ int i;
+
+ if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
+ dev_err(dev->dev, "failed to find dsi from intf %d\n",
+ intf_num);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!priv->dsi[dsi_id])
+ break;
+
+ for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+ mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
+ MDP5_INTF_DSI_MODE_COMMAND :
+ MDP5_INTF_DSI_MODE_VIDEO;
+ dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
+ intf_num, mode);
+ if (IS_ERR(dsi_encs)) {
+ ret = PTR_ERR(dsi_encs);
+ break;
+ }
+ }
+
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
+ break;
+ }
+ default:
+ dev_err(dev->dev, "unknown intf: %d\n", intf_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
static const enum mdp5_pipe crtcs[] = {
@@ -171,7 +312,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
};
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- struct drm_encoder *encoder;
const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
@@ -222,44 +362,13 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
}
}
- if (priv->hdmi) {
- /* Construct encoder for HDMI: */
- encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
-
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
- priv->encoders[priv->num_encoders++] = encoder;
-
- ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
- if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
- goto fail;
- }
- }
-
- if (priv->edp) {
- /* Construct encoder for eDP: */
- encoder = mdp5_encoder_init(dev, 0, INTF_eDP);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct eDP encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
-
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
- priv->encoders[priv->num_encoders++] = encoder;
-
- /* Construct bridge/connector for eDP: */
- ret = msm_edp_modeset_init(priv->edp, dev, encoder);
- if (ret) {
- dev_err(dev->dev, "failed to initialize eDP: %d\n",
- ret);
+ /* Construct encoders and modeset initialize connector devices
+ * for each external display interface.
+ */
+ for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) {
+ ret = modeset_init_intf(mdp5_kms, i);
+ if (ret)
goto fail;
- }
}
return 0;
@@ -274,11 +383,11 @@ static void read_hw_revision(struct mdp5_kms *mdp5_kms,
uint32_t version;
mdp5_enable(mdp5_kms);
- version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+ version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION);
mdp5_disable(mdp5_kms);
- *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
- *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+ *major = FIELD(version, MDSS_HW_VERSION_MAJOR);
+ *minor = FIELD(version, MDSS_HW_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor);
}
@@ -321,6 +430,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_kms->dev = dev;
+ /* mdp5_kms->mmio actually represents the MDSS base address */
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
ret = PTR_ERR(mdp5_kms->mmio);
@@ -403,8 +513,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
* we don't disable):
*/
mdp5_enable(mdp5_kms);
- for (i = 0; i < config->hw->intf.count; i++)
+ for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
+ if (!config->hw->intf.base[i] ||
+ mdp5_cfg_intf_is_virtual(config->hw->intfs[i]))
+ continue;
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+ }
mdp5_disable(mdp5_kms);
mdelay(16);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 49d011e8835b..2c0de174cc09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -54,7 +54,7 @@ struct mdp5_kms {
/*
* lock to protect access to global resources: ie., following register:
- * - REG_MDP5_DISP_INTF_SEL
+ * - REG_MDP5_MDP_DISP_INTF_SEL
*/
spinlock_t resource_lock;
@@ -94,6 +94,24 @@ struct mdp5_plane_state {
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
+enum mdp5_intf_mode {
+ MDP5_INTF_MODE_NONE = 0,
+
+ /* Modes used for DSI interface (INTF_DSI type): */
+ MDP5_INTF_DSI_MODE_VIDEO,
+ MDP5_INTF_DSI_MODE_COMMAND,
+
+ /* Modes used for WB interface (INTF_WB type): */
+ MDP5_INTF_WB_MODE_BLOCK,
+ MDP5_INTF_WB_MODE_LINE,
+};
+
+struct mdp5_interface {
+ int num; /* display interface number */
+ enum mdp5_intf_type type;
+ enum mdp5_intf_mode mode;
+};
+
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
msm_writel(data, mdp5_kms->mmio + reg);
@@ -130,9 +148,9 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
}
}
-static inline uint32_t intf2err(int intf)
+static inline uint32_t intf2err(int intf_num)
{
- switch (intf) {
+ switch (intf_num) {
case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
@@ -141,9 +159,23 @@ static inline uint32_t intf2err(int intf)
}
}
-static inline uint32_t intf2vblank(int intf)
+#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer)
+static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
{
- switch (intf) {
+ /*
+ * In case of DSI Command Mode, the Ping Pong's read pointer IRQ
+ * acts as a Vblank signal. The Ping Pong buffer used is bound to
+ * layer mixer.
+ */
+
+ if ((intf->type == INTF_DSI) &&
+ (intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
+ return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm);
+
+ if (intf->type == INTF_WB)
+ return MDP5_IRQ_WB_2_DONE;
+
+ switch (intf->num) {
case 0: return MDP5_IRQ_INTF0_VSYNC;
case 1: return MDP5_IRQ_INTF1_VSYNC;
case 2: return MDP5_IRQ_INTF2_VSYNC;
@@ -152,6 +184,11 @@ static inline uint32_t intf2vblank(int intf)
}
}
+static inline uint32_t lm2ppdone(int lm)
+{
+ return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm);
+}
+
int mdp5_disable(struct mdp5_kms *mdp5_kms);
int mdp5_enable(struct mdp5_kms *mdp5_kms);
@@ -197,13 +234,33 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
- enum mdp5_intf intf_id);
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
-struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
- enum mdp5_intf intf_id);
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf);
+int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+
+#ifdef CONFIG_DRM_MSM_DSI
+struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf);
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+#else
+static inline struct drm_encoder *mdp5_cmd_encoder_init(
+ struct drm_device *dev, struct mdp5_interface *intf)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int mdp5_cmd_encoder_set_split_display(
+ struct drm_encoder *encoder, struct drm_encoder *slave_encoder)
+{
+ return -EINVAL;
+}
+#endif
#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 05cf9ab2a876..18a3d203b174 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -156,7 +156,8 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -166,7 +167,8 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -505,8 +507,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
- MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
- MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 1f795af89680..16702aecf0df 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -43,7 +43,7 @@
* set.
*
* 2) mdp5_smp_configure():
- * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
+ * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
* are configured for the union(pending, inuse)
*
* 3) mdp5_smp_commit():
@@ -74,7 +74,7 @@ struct mdp5_smp {
spinlock_t state_lock;
mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
- struct mdp5_client_smp_state client_state[CID_MAX];
+ struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
static inline
@@ -85,27 +85,31 @@ struct mdp5_kms *get_kms(struct mdp5_smp *smp)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
{
- WARN_ON(plane >= pipe2nclients(pipe));
- switch (pipe) {
- case SSPP_VIG0: return CID_VIG0_Y + plane;
- case SSPP_VIG1: return CID_VIG1_Y + plane;
- case SSPP_VIG2: return CID_VIG2_Y + plane;
- case SSPP_RGB0: return CID_RGB0;
- case SSPP_RGB1: return CID_RGB1;
- case SSPP_RGB2: return CID_RGB2;
- case SSPP_DMA0: return CID_DMA0_Y + plane;
- case SSPP_DMA1: return CID_DMA1_Y + plane;
- case SSPP_VIG3: return CID_VIG3_Y + plane;
- case SSPP_RGB3: return CID_RGB3;
- default: return CID_UNUSED;
- }
+#define CID_UNUSED 0
+
+ if (WARN_ON(plane >= pipe2nclients(pipe)))
+ return CID_UNUSED;
+
+ /*
+ * Note on SMP clients:
+ * For ViG pipes, fetch Y/Cr/Cb-components clients are always
+ * consecutive, and in that order.
+ *
+ * e.g.:
+ * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
+ * Y plane's client ID is N
+ * Cr plane's client ID is N + 1
+ * Cb plane's client ID is N + 2
+ */
+
+ return mdp5_cfg->smp.clients[pipe] + plane;
}
/* step #1: update # of blocks pending for the client: */
static int smp_request_block(struct mdp5_smp *smp,
- enum mdp5_client_id cid, int nblks)
+ u32 cid, int nblks)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
const struct mdp5_cfg_hw *hw_cfg;
@@ -227,7 +231,7 @@ void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
}
static void update_smp_state(struct mdp5_smp *smp,
- enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
+ u32 cid, mdp5_smp_state_t *assigned)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
@@ -237,25 +241,25 @@ static void update_smp_state(struct mdp5_smp *smp,
int idx = blk / 3;
int fld = blk % 3;
- val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+ val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
switch (fld) {
case 0:
- val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
- val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+ val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
break;
case 1:
- val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
- val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+ val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
break;
case 2:
- val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
- val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+ val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
}
}
@@ -267,7 +271,7 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
- enum mdp5_client_id cid = pipe2client(pipe, i);
+ u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
bitmap_or(assigned, ps->inuse, ps->pending, cnt);
@@ -283,7 +287,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
- enum mdp5_client_id cid = pipe2client(pipe, i);
+ u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
/*
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 18fd643b6e69..5b192128cda2 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -96,11 +96,11 @@ static void complete_commit(struct msm_commit *c)
kms->funcs->prepare_commit(kms, state);
- drm_atomic_helper_commit_pre_planes(dev, state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state);
- drm_atomic_helper_commit_post_planes(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs. So we end up faulting when disabling
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a4269119f9ea..47f4dd407671 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -182,41 +182,57 @@ static int get_mdp_ver(struct platform_device *pdev)
return 4;
}
-static int msm_load(struct drm_device *dev, unsigned long flags)
-{
- struct platform_device *pdev = dev->platformdev;
- struct msm_drm_private *priv;
- struct msm_kms *kms;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(dev->dev, "failed to allocate private data\n");
- return -ENOMEM;
- }
+#include <linux/of_address.h>
- dev->dev_private = priv;
-
- priv->wq = alloc_ordered_workqueue("msm", 0);
- init_waitqueue_head(&priv->fence_event);
- init_waitqueue_head(&priv->pending_crtcs_event);
-
- INIT_LIST_HEAD(&priv->inactive_list);
- INIT_LIST_HEAD(&priv->fence_cbs);
+static int msm_init_vram(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned long size = 0;
+ int ret = 0;
- drm_mode_config_init(dev);
+#ifdef CONFIG_OF
+ /* In the device-tree world, we could have a 'memory-region'
+ * phandle, which gives us a link to our "vram". Allocating
+ * is all nicely abstracted behind the dma api, but we need
+ * to know the entire size to allocate it all in one go. There
+ * are two cases:
+ * 1) device with no IOMMU, in which case we need exclusive
+ * access to a VRAM carveout big enough for all gpu
+ * buffers
+ * 2) device with IOMMU, but where the bootloader puts up
+ * a splash screen. In this case, the VRAM carveout
+ * need only be large enough for fbdev fb. But we need
+ * exclusive access to the buffer to avoid the kernel
+ * using those pages for other purposes (which appears
+ * as corruption on screen before we have a chance to
+ * load and do initial modeset)
+ */
+ struct device_node *node;
+
+ node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
+ if (node) {
+ struct resource r;
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+ size = r.end - r.start;
+ DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start);
+ } else
+#endif
/* if we have no IOMMU, then we need to use carveout allocator.
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
if (!iommu_present(&platform_bus_type)) {
+ DRM_INFO("using %s VRAM carveout\n", vram);
+ size = memparse(vram, NULL);
+ }
+
+ if (size) {
DEFINE_DMA_ATTRS(attrs);
- unsigned long size;
void *p;
- DBG("using %s VRAM carveout", vram);
- size = memparse(vram, NULL);
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
@@ -232,8 +248,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
if (!p) {
dev_err(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
- ret = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
dev_info(dev->dev, "VRAM: %08x->%08x\n",
@@ -241,6 +256,37 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
(uint32_t)(priv->vram.paddr + size));
}
+ return ret;
+}
+
+static int msm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+ init_waitqueue_head(&priv->fence_event);
+ init_waitqueue_head(&priv->pending_crtcs_event);
+
+ INIT_LIST_HEAD(&priv->inactive_list);
+ INIT_LIST_HEAD(&priv->fence_cbs);
+
+ drm_mode_config_init(dev);
+
+ ret = msm_init_vram(dev);
+ if (ret)
+ goto fail;
+
platform_set_drvdata(pdev, dev);
/* Bind all our sub-components: */
@@ -1030,6 +1076,7 @@ static struct platform_driver msm_platform_driver = {
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_dsi_register();
msm_edp_register();
hdmi_register();
adreno_register();
@@ -1043,6 +1090,7 @@ static void __exit msm_drm_unregister(void)
hdmi_unregister();
adreno_unregister();
msm_edp_unregister();
+ msm_dsi_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9e8d441b61c3..04db4bd1b5b6 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -82,6 +82,9 @@ struct msm_drm_private {
*/
struct msm_edp *edp;
+ /* DSI is shared by mdp4 and mdp5 */
+ struct msm_dsi *dsi[2];
+
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
@@ -236,6 +239,32 @@ void __exit msm_edp_unregister(void);
int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
struct drm_encoder *encoder);
+struct msm_dsi;
+enum msm_dsi_encoder_id {
+ MSM_DSI_VIDEO_ENCODER_ID = 0,
+ MSM_DSI_CMD_ENCODER_ID = 1,
+ MSM_DSI_ENCODER_NUM = 2
+};
+#ifdef CONFIG_DRM_MSM_DSI
+void __init msm_dsi_register(void);
+void __exit msm_dsi_unregister(void);
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]);
+#else
+static inline void __init msm_dsi_register(void)
+{
+}
+static inline void __exit msm_dsi_unregister(void)
+{
+}
+static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
+ struct drm_device *dev,
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+{
+ return -EINVAL;
+}
+#endif
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index df60f65728ff..95f6532df02d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -110,7 +110,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
mutex_lock(&dev->struct_mutex);
- fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
+ MSM_BO_WC | MSM_BO_STOLEN);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(fbdev->bo)) {
ret = PTR_ERR(fbdev->bo);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 49dea4fb55ac..479d8af72bcb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -32,6 +32,12 @@ static dma_addr_t physaddr(struct drm_gem_object *obj)
priv->vram.paddr;
}
+static bool use_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ return !msm_obj->vram_node;
+}
+
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj,
int npages)
@@ -72,7 +78,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
struct page **p;
int npages = obj->size >> PAGE_SHIFT;
- if (iommu_present(&platform_bus_type))
+ if (use_pages(obj))
p = drm_gem_get_pages(obj);
else
p = get_pages_vram(obj, npages);
@@ -116,7 +122,7 @@ static void put_pages(struct drm_gem_object *obj)
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
- if (iommu_present(&platform_bus_type))
+ if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else {
drm_mm_remove_node(msm_obj->vram_node);
@@ -580,6 +586,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
unsigned sz;
+ bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
@@ -592,15 +599,23 @@ static int msm_gem_new_impl(struct drm_device *dev,
return -EINVAL;
}
- sz = sizeof(*msm_obj);
if (!iommu_present(&platform_bus_type))
+ use_vram = true;
+ else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+ use_vram = true;
+
+ if (WARN_ON(use_vram && !priv->vram.size))
+ return -EINVAL;
+
+ sz = sizeof(*msm_obj);
+ if (use_vram)
sz += sizeof(struct drm_mm_node);
msm_obj = kzalloc(sz, GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
- if (!iommu_present(&platform_bus_type))
+ if (use_vram)
msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->flags = flags;
@@ -630,7 +645,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
if (ret)
goto fail;
- if (iommu_present(&platform_bus_type)) {
+ if (use_pages(obj)) {
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8fbbd0594c46..85d481e29276 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -21,6 +21,9 @@
#include <linux/reservation.h>
#include "msm_drv.h"
+/* Additional internal-use only BO flags: */
+#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -59,7 +62,7 @@ struct msm_gem_object {
struct reservation_object _resv;
/* For physically contiguous buffers. Used when we don't have
- * an IOMMU.
+ * an IOMMU. Also used for stolen/splashscreen buffer.
*/
struct drm_mm_node *vram_node;
};
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 3a78cb48662b..a9f17bdb4530 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -47,6 +47,10 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
+ int (*set_split_display)(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder,
+ bool is_cmd_mode);
/* cleanup: */
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 542bb266a0ab..3d96b49fe662 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -703,7 +703,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
@@ -724,7 +724,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
static void nv_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nouveau_hw_load_state(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index d7b495a5f30c..af7249ca0f4b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -358,7 +358,7 @@ static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
static void nv04_dac_prepare(struct drm_encoder *encoder)
{
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct drm_device *dev = encoder->dev;
int head = nouveau_crtc(encoder->crtc)->index;
@@ -409,7 +409,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
helper->dpms(encoder, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index f6ca343fd34a..7cfb0cbc9b6e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -244,7 +244,7 @@ static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
static void nv04_dfp_prepare(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct drm_device *dev = encoder->dev;
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
@@ -445,7 +445,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct dcb_output *dcbe = nv_encoder->dcb;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index f96237ef2a6b..4131be5507ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -109,7 +109,7 @@ nv04_display_create(struct drm_device *dev)
crtc->funcs->save(crtc);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct drm_encoder_helper_funcs *func = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *func = encoder->helper_private;
func->save(encoder);
}
@@ -138,7 +138,7 @@ nv04_display_destroy(struct drm_device *dev)
/* Restore state */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct drm_encoder_helper_funcs *func = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *func = encoder->helper_private;
func->restore(encoder);
}
@@ -169,7 +169,7 @@ nv04_display_init(struct drm_device *dev)
* on suspend too.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct drm_encoder_helper_funcs *func = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *func = encoder->helper_private;
func->restore(encoder);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index d9664b37def1..70e95cf6fd19 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -122,7 +122,7 @@ static void nv04_tv_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
int head = nouveau_crtc(encoder->crtc)->index;
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
helper->dpms(encoder, DRM_MODE_DPMS_OFF);
@@ -164,7 +164,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
helper->dpms(encoder, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 731d74efc1e5..d9720dda8385 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -405,7 +405,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
int head = nouveau_crtc(encoder->crtc)->index;
uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[
@@ -583,7 +583,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
nv17_tv_update_rescaler(encoder);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 5ad17fc36ae3..0b5af0fe8659 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -12,6 +12,13 @@
#define NV_DMA_TO_MEMORY 0x00000003
#define NV_DMA_IN_MEMORY 0x0000003d
+#define FERMI_TWOD_A 0x0000902d
+
+#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x0000903d
+
+#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
+#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
+
#define NV04_DISP 0x00000046
#define NV03_CHANNEL_DMA 0x0000006b
@@ -25,6 +32,7 @@
#define G82_CHANNEL_GPFIFO 0x0000826f
#define FERMI_CHANNEL_GPFIFO 0x0000906f
#define KEPLER_CHANNEL_GPFIFO_A 0x0000a06f
+#define MAXWELL_CHANNEL_GPFIFO_A 0x0000b06f
#define NV50_DISP 0x00005070
#define G82_DISP 0x00008270
@@ -84,6 +92,7 @@
#define KEPLER_C 0x0000a297
#define MAXWELL_A 0x0000b097
+#define MAXWELL_B 0x0000b197
#define FERMI_COMPUTE_A 0x000090c0
#define FERMI_COMPUTE_B 0x000091c0
@@ -92,6 +101,7 @@
#define KEPLER_COMPUTE_B 0x0000a1c0
#define MAXWELL_COMPUTE_A 0x0000b0c0
+#define MAXWELL_COMPUTE_B 0x0000b1c0
/*******************************************************************************
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 7e29c52617ea..e832f729e1b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -10,4 +10,7 @@ extern struct nvkm_oclass gf100_ce1_oclass;
extern struct nvkm_oclass gk104_ce0_oclass;
extern struct nvkm_oclass gk104_ce1_oclass;
extern struct nvkm_oclass gk104_ce2_oclass;
+extern struct nvkm_oclass gm204_ce0_oclass;
+extern struct nvkm_oclass gm204_ce1_oclass;
+extern struct nvkm_oclass gm204_ce2_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 05321ce7ab15..97cdeab8e44c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -116,6 +116,7 @@ extern struct nvkm_oclass *gf100_fifo_oclass;
extern struct nvkm_oclass *gk104_fifo_oclass;
extern struct nvkm_oclass *gk20a_fifo_oclass;
extern struct nvkm_oclass *gk208_fifo_oclass;
+extern struct nvkm_oclass *gm204_fifo_oclass;
int nvkm_fifo_uevent_ctor(struct nvkm_object *, void *, u32,
struct nvkm_notify *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 93ef1f2bfac4..7cbe20280760 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -38,7 +38,7 @@ nvkm_gr(void *obj)
}
#define nvkm_gr_create(p,e,c,y,d) \
- nvkm_engine_create((p), (e), (c), (y), "PGR", "graphics", (d))
+ nvkm_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
#define nvkm_gr_destroy(d) \
nvkm_engine_destroy(&(d)->base)
#define nvkm_gr_init(d) \
@@ -72,6 +72,8 @@ extern struct nvkm_oclass *gk110_gr_oclass;
extern struct nvkm_oclass *gk110b_gr_oclass;
extern struct nvkm_oclass *gk208_gr_oclass;
extern struct nvkm_oclass *gm107_gr_oclass;
+extern struct nvkm_oclass *gm204_gr_oclass;
+extern struct nvkm_oclass *gm206_gr_oclass;
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index d104c1aac807..1bcb763cfca0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -45,4 +45,5 @@ nvkm_instmem(void *obj)
extern struct nvkm_oclass *nv04_instmem_oclass;
extern struct nvkm_oclass *nv40_instmem_oclass;
extern struct nvkm_oclass *nv50_instmem_oclass;
+extern struct nvkm_oclass *gk20a_instmem_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 7b86acc634a0..755942352557 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -35,6 +35,7 @@ extern struct nvkm_oclass *gt215_pmu_oclass;
extern struct nvkm_oclass *gf100_pmu_oclass;
extern struct nvkm_oclass *gf110_pmu_oclass;
extern struct nvkm_oclass *gk104_pmu_oclass;
+extern struct nvkm_oclass *gk110_pmu_oclass;
extern struct nvkm_oclass *gk208_pmu_oclass;
extern struct nvkm_oclass *gk20a_pmu_oclass;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 77326e344dad..6edcce1658b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1110,6 +1110,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index e581f63cbf25..0589babc506e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -184,7 +184,8 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u32 handle, u32 engine, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { KEPLER_CHANNEL_GPFIFO_A,
+ static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
+ KEPLER_CHANNEL_GPFIFO_A,
FERMI_CHANNEL_GPFIFO,
G82_CHANNEL_GPFIFO,
NV50_CHANNEL_GPFIFO,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index db7095ae4ebb..3162040bc314 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -309,7 +309,7 @@ detect_analog:
nv_encoder = find_encoder(connector, DCB_OUTPUT_TV);
if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
- struct drm_encoder_helper_funcs *helper =
+ const struct drm_encoder_helper_funcs *helper =
encoder->helper_private;
if (helper->detect(encoder, connector) ==
@@ -592,7 +592,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
static struct drm_display_mode *
nouveau_connector_native_mode(struct drm_connector *connector)
{
- struct drm_connector_helper_funcs *helper = connector->helper_private;
+ const struct drm_connector_helper_funcs *helper = connector->helper_private;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 860b0e2d4181..8670d90cdc11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -869,13 +869,20 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct nouveau_bo *bo;
+ uint32_t domain;
int ret;
args->pitch = roundup(args->width * (args->bpp / 8), 256);
args->size = args->pitch * args->height;
args->size = roundup(args->size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
+ /* Use VRAM if there is any ; otherwise fallback to system memory */
+ if (nouveau_drm(dev)->device.info.ram_size != 0)
+ domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ else
+ domain = NOUVEAU_GEM_DOMAIN_GART;
+
+ ret = nouveau_gem_new(dev, args->size, 0, domain, 0, 0, &bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8763deb5188b..89049335b738 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -181,6 +181,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
break;
case FERMI_CHANNEL_GPFIFO:
case KEPLER_CHANNEL_GPFIFO_A:
+ case MAXWELL_CHANNEL_GPFIFO_A:
ret = nvc0_fence_create(drm);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index fc68f0973f9e..dd726523ca99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -10,7 +10,7 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_PATCHLEVEL 2
/*
* 1.1.1:
@@ -28,6 +28,8 @@
* - fermi,kepler,maxwell zbc
* 1.2.1:
* - allow concurrent access to bo's mapped read/write.
+ * 1.2.2:
+ * - add NOUVEAU_GEM_DOMAIN_COHERENT flag
*/
#include <nvif/client.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 7c077fced1d1..0e690bf19fc9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -189,6 +189,9 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
+ if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
+ flags |= TTM_PL_FLAG_UNCACHED;
+
ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
tile_flags, NULL, NULL, pnvbo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index dc5900bf54ff..775277f1edb0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
+#include <linux/iommu.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
@@ -91,6 +92,72 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
return 0;
}
+static void nouveau_platform_probe_iommu(struct device *dev,
+ struct nouveau_platform_gpu *gpu)
+{
+ int err;
+ unsigned long pgsize_bitmap;
+
+ mutex_init(&gpu->iommu.mutex);
+
+ if (iommu_present(&platform_bus_type)) {
+ gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
+ if (IS_ERR(gpu->iommu.domain))
+ goto error;
+
+ /*
+ * A IOMMU is only usable if it supports page sizes smaller
+ * or equal to the system's PAGE_SIZE, with a preference if
+ * both are equal.
+ */
+ pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
+ if (pgsize_bitmap & PAGE_SIZE) {
+ gpu->iommu.pgshift = PAGE_SHIFT;
+ } else {
+ gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
+ if (gpu->iommu.pgshift == 0) {
+ dev_warn(dev, "unsupported IOMMU page size\n");
+ goto free_domain;
+ }
+ gpu->iommu.pgshift -= 1;
+ }
+
+ err = iommu_attach_device(gpu->iommu.domain, dev);
+ if (err)
+ goto free_domain;
+
+ err = nvkm_mm_init(&gpu->iommu._mm, 0,
+ (1ULL << 40) >> gpu->iommu.pgshift, 1);
+ if (err)
+ goto detach_device;
+
+ gpu->iommu.mm = &gpu->iommu._mm;
+ }
+
+ return;
+
+detach_device:
+ iommu_detach_device(gpu->iommu.domain, dev);
+
+free_domain:
+ iommu_domain_free(gpu->iommu.domain);
+
+error:
+ gpu->iommu.domain = NULL;
+ gpu->iommu.pgshift = 0;
+ dev_err(dev, "cannot initialize IOMMU MM\n");
+}
+
+static void nouveau_platform_remove_iommu(struct device *dev,
+ struct nouveau_platform_gpu *gpu)
+{
+ if (gpu->iommu.domain) {
+ nvkm_mm_fini(&gpu->iommu._mm);
+ iommu_detach_device(gpu->iommu.domain, dev);
+ iommu_domain_free(gpu->iommu.domain);
+ }
+}
+
static int nouveau_platform_probe(struct platform_device *pdev)
{
struct nouveau_platform_gpu *gpu;
@@ -118,6 +185,8 @@ static int nouveau_platform_probe(struct platform_device *pdev)
if (IS_ERR(gpu->clk_pwr))
return PTR_ERR(gpu->clk_pwr);
+ nouveau_platform_probe_iommu(&pdev->dev, gpu);
+
err = nouveau_platform_power_up(gpu);
if (err)
return err;
@@ -140,10 +209,9 @@ static int nouveau_platform_probe(struct platform_device *pdev)
err_unref:
drm_dev_unref(drm);
- return 0;
-
power_down:
nouveau_platform_power_down(gpu);
+ nouveau_platform_remove_iommu(&pdev->dev, gpu);
return err;
}
@@ -154,10 +222,15 @@ static int nouveau_platform_remove(struct platform_device *pdev)
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvkm_device *device = nvxx_device(&drm->device);
struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
+ int err;
nouveau_drm_device_remove(drm_dev);
- return nouveau_platform_power_down(gpu);
+ err = nouveau_platform_power_down(gpu);
+
+ nouveau_platform_remove_iommu(&pdev->dev, gpu);
+
+ return err;
}
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 268bb7213681..392874cf4725 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -24,10 +24,12 @@
#define __NOUVEAU_PLATFORM_H__
#include "core/device.h"
+#include "core/mm.h"
struct reset_control;
struct clk;
struct regulator;
+struct iommu_domain;
struct platform_driver;
struct nouveau_platform_gpu {
@@ -36,6 +38,22 @@ struct nouveau_platform_gpu {
struct clk *clk_pwr;
struct regulator *vdd;
+
+ struct {
+ /*
+ * Protects accesses to mm from subsystems
+ */
+ struct mutex mutex;
+
+ struct nvkm_mm _mm;
+ /*
+ * Just points to _mm. We need this to avoid embedding
+ * struct nvkm_mm in os.h
+ */
+ struct nvkm_mm *mm;
+ struct iommu_domain *domain;
+ unsigned long pgshift;
+ } iommu;
};
struct nouveau_platform_device {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 273e50110ec3..18f449715788 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -82,6 +82,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
u32 size_nc = 0;
int ret;
+ if (drm->device.info.ram_size == 0)
+ return -ENOMEM;
+
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->page_shift;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index bf429cabbaa8..a03db4368696 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -215,6 +215,7 @@ nv84_fence_create(struct nouveau_drm *drm)
{
struct nvkm_fifo *pfifo = nvxx_fifo(&drm->device);
struct nv84_fence_priv *priv;
+ u32 domain;
int ret;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -231,10 +232,17 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
- ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
- TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo);
+ /* Use VRAM if there is any ; otherwise fallback to system memory */
+ domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
+ /*
+ * fences created in sysmem must be non-cached or we
+ * will lose CPU/GPU coherency!
+ */
+ TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
+ ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, domain, 0,
+ 0, NULL, NULL, &priv->bo);
if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(priv->bo, domain, false);
if (ret == 0) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 858797453e0b..fa8cda7058cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -1,3 +1,4 @@
nvkm-y += nvkm/engine/ce/gt215.o
nvkm-y += nvkm/engine/ce/gf100.o
nvkm-y += nvkm/engine/ce/gk104.o
+nvkm-y += nvkm/engine/ce/gm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
new file mode 100644
index 000000000000..577eb2eead05
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include <engine/ce.h>
+
+#include <core/engctx.h>
+
+struct gm204_ce_priv {
+ struct nvkm_engine base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nvkm_oclass
+gm204_ce_sclass[] = {
+ { 0xb0b5, &nvkm_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCE context
+ ******************************************************************************/
+
+static struct nvkm_ofuncs
+gm204_ce_context_ofuncs = {
+ .ctor = _nvkm_engctx_ctor,
+ .dtor = _nvkm_engctx_dtor,
+ .init = _nvkm_engctx_init,
+ .fini = _nvkm_engctx_fini,
+ .rd32 = _nvkm_engctx_rd32,
+ .wr32 = _nvkm_engctx_wr32,
+};
+
+static struct nvkm_oclass
+gm204_ce_cclass = {
+ .handle = NV_ENGCTX(CE0, 0x24),
+ .ofuncs = &gm204_ce_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCE engine/subdev functions
+ ******************************************************************************/
+
+static void
+gm204_ce_intr(struct nvkm_subdev *subdev)
+{
+ const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
+ struct gm204_ce_priv *priv = (void *)subdev;
+ u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
+
+ if (stat) {
+ nv_warn(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+ }
+}
+
+static int
+gm204_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gm204_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCE0", "ce0", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000040;
+ nv_subdev(priv)->intr = gm204_ce_intr;
+ nv_engine(priv)->cclass = &gm204_ce_cclass;
+ nv_engine(priv)->sclass = gm204_ce_sclass;
+ return 0;
+}
+
+static int
+gm204_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gm204_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCE1", "ce1", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000080;
+ nv_subdev(priv)->intr = gm204_ce_intr;
+ nv_engine(priv)->cclass = &gm204_ce_cclass;
+ nv_engine(priv)->sclass = gm204_ce_sclass;
+ return 0;
+}
+
+static int
+gm204_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gm204_ce_priv *priv;
+ int ret;
+
+ ret = nvkm_engine_create(parent, engine, oclass, true,
+ "PCE2", "ce2", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00200000;
+ nv_subdev(priv)->intr = gm204_ce_intr;
+ nv_engine(priv)->cclass = &gm204_ce_cclass;
+ nv_engine(priv)->sclass = gm204_ce_sclass;
+ return 0;
+}
+
+struct nvkm_oclass
+gm204_ce0_oclass = {
+ .handle = NV_ENGINE(CE0, 0x24),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gm204_ce0_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = _nvkm_engine_init,
+ .fini = _nvkm_engine_fini,
+ },
+};
+
+struct nvkm_oclass
+gm204_ce1_oclass = {
+ .handle = NV_ENGINE(CE1, 0x24),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gm204_ce1_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = _nvkm_engine_init,
+ .fini = _nvkm_engine_fini,
+ },
+};
+
+struct nvkm_oclass
+gm204_ce2_oclass = {
+ .handle = NV_ENGINE(CE2, 0x24),
+ .ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gm204_ce2_ctor,
+ .dtor = _nvkm_engine_dtor,
+ .init = _nvkm_engine_init,
+ .fini = _nvkm_engine_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 6efa8f38ff54..63d8e52f4b22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -139,9 +139,13 @@ nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
args->v0.chipset = device->chipset;
args->v0.revision = device->chiprev;
- if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
- else args->v0.ram_size = args->v0.ram_user = 0;
- if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
+ if (pfb && pfb->ram)
+ args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
+ else
+ args->v0.ram_size = args->v0.ram_user = 0;
+ if (imem && args->v0.ram_size > 0)
+ args->v0.ram_user = args->v0.ram_user - imem->reserved;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
index bf5893458a47..6a9483f65d83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
@@ -171,7 +171,7 @@ gk104_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = gk20a_instmem_oclass;
device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
@@ -202,7 +202,7 @@ gk104_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk110_pmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
@@ -236,7 +236,7 @@ gk104_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk110_pmu_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
index 108d048da764..70abf1ec7c98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
@@ -127,16 +127,14 @@ gm100_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
#endif
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gm204_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
-#endif
+ device->oclass[NVDEV_ENGINE_GR ] = gm204_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
-#if 0
device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
+#if 0
device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
@@ -170,16 +168,14 @@ gm100_identify(struct nvkm_device *device)
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
#endif
device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gm204_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
-#endif
+ device->oclass[NVDEV_ENGINE_GR ] = gm206_gr_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
-#if 0
device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
+#if 0
device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 0ebf466e9ef3..9ef6728c528d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -413,8 +413,8 @@ gf110_disp_base_mthd_base = {
static const struct nv50_disp_mthd_list
gf110_disp_base_mthd_image = {
- .mthd = 0x0400,
- .addr = 0x000400,
+ .mthd = 0x0020,
+ .addr = 0x000020,
.data = {
{ 0x0400, 0x661400 },
{ 0x0404, 0x661404 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 84ade810e27c..8ba808df24ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -229,7 +229,7 @@ nv50_disp_dmac_create_(struct nvkm_object *parent,
switch (dmac->pushdma->target) {
case NV_MEM_TARGET_VRAM:
- dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+ dmac->push = 0x00000001 | dmac->pushdma->start >> 8;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index c5a2d8718c5b..42891cb71ea3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -9,3 +9,4 @@ nvkm-y += nvkm/engine/fifo/gf100.o
nvkm-y += nvkm/engine/fifo/gk104.o
nvkm-y += nvkm/engine/fifo/gk20a.o
nvkm-y += nvkm/engine/fifo/gk208.o
+nvkm-y += nvkm/engine/fifo/gm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 9585539e59f2..e10f9644140f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -323,8 +323,8 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
return nvkm_fifo_channel_fini(&chan->base, suspend);
}
-static struct nvkm_ofuncs
-gk104_fifo_ofuncs = {
+struct nvkm_ofuncs
+gk104_fifo_chan_ofuncs = {
.ctor = gk104_fifo_chan_ctor,
.dtor = _nvkm_fifo_channel_dtor,
.init = gk104_fifo_chan_init,
@@ -337,7 +337,7 @@ gk104_fifo_ofuncs = {
static struct nvkm_oclass
gk104_fifo_sclass[] = {
- { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_ofuncs },
+ { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
{}
};
@@ -774,6 +774,7 @@ gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit)
while (object) {
switch (nv_mclass(object)) {
case KEPLER_CHANNEL_GPFIFO_A:
+ case MAXWELL_CHANNEL_GPFIFO_A:
gk104_fifo_recover(priv, engine, (void *)object);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 3046e00ed6ba..318d30d6ee1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -13,4 +13,6 @@ struct gk104_fifo_impl {
struct nvkm_oclass base;
u32 channels;
};
+
+extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
new file mode 100644
index 000000000000..749d525dd8e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+
+#include <nvif/class.h>
+
+static struct nvkm_oclass
+gm204_fifo_sclass[] = {
+ { MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
+ {}
+};
+
+static int
+gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject);
+ if (ret == 0) {
+ struct gk104_fifo_priv *priv = (void *)*pobject;
+ nv_engine(priv)->sclass = gm204_fifo_sclass;
+ }
+ return ret;
+}
+
+struct nvkm_oclass *
+gm204_fifo_oclass = &(struct gk104_fifo_impl) {
+ .base.handle = NV_ENGINE(FIFO, 0x24),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gm204_fifo_ctor,
+ .dtor = gk104_fifo_dtor,
+ .init = gk104_fifo_init,
+ .fini = _nvkm_fifo_fini,
+ },
+ .channels = 4096,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 1771d944591b..2e1b92f71d9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -12,6 +12,8 @@ nvkm-y += nvkm/engine/gr/ctxgk110.o
nvkm-y += nvkm/engine/gr/ctxgk110b.o
nvkm-y += nvkm/engine/gr/ctxgk208.o
nvkm-y += nvkm/engine/gr/ctxgm107.o
+nvkm-y += nvkm/engine/gr/ctxgm204.o
+nvkm-y += nvkm/engine/gr/ctxgm206.o
nvkm-y += nvkm/engine/gr/nv04.o
nvkm-y += nvkm/engine/gr/nv10.o
nvkm-y += nvkm/engine/gr/nv20.o
@@ -34,3 +36,5 @@ nvkm-y += nvkm/engine/gr/gk110.o
nvkm-y += nvkm/engine/gr/gk110b.o
nvkm-y += nvkm/engine/gr/gk208.o
nvkm-y += nvkm/engine/gr/gm107.o
+nvkm-y += nvkm/engine/gr/gm204.o
+nvkm-y += nvkm/engine/gr/gm206.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 1166b1aa1525..3676a3342bc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -88,11 +88,22 @@ void gk104_grctx_generate_bundle(struct gf100_grctx *);
void gk104_grctx_generate_pagepool(struct gf100_grctx *);
void gk104_grctx_generate_unkn(struct gf100_gr_priv *);
void gk104_grctx_generate_r418bb8(struct gf100_gr_priv *);
+void gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *);
+
extern struct nvkm_oclass *gk110_grctx_oclass;
extern struct nvkm_oclass *gk110b_grctx_oclass;
extern struct nvkm_oclass *gk208_grctx_oclass;
+
extern struct nvkm_oclass *gm107_grctx_oclass;
+void gm107_grctx_generate_bundle(struct gf100_grctx *);
+void gm107_grctx_generate_pagepool(struct gf100_grctx *);
+void gm107_grctx_generate_attrib(struct gf100_grctx *);
+
+extern struct nvkm_oclass *gm204_grctx_oclass;
+void gm204_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+
+extern struct nvkm_oclass *gm206_grctx_oclass;
/* context init value lists */
@@ -196,4 +207,22 @@ extern const struct gf100_gr_init gk208_grctx_init_rstr2d_0[];
extern const struct gf100_gr_init gk208_grctx_init_prop_0[];
extern const struct gf100_gr_init gk208_grctx_init_crstr_0[];
+
+extern const struct gf100_gr_init gm107_grctx_init_gpc_unk_0[];
+extern const struct gf100_gr_init gm107_grctx_init_wwdx_0[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_icmd[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_mthd[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_hub[];
+
+extern const struct gf100_gr_init gm204_grctx_init_prop_0[];
+extern const struct gf100_gr_init gm204_grctx_init_setup_0[];
+extern const struct gf100_gr_init gm204_grctx_init_gpm_0[];
+extern const struct gf100_gr_init gm204_grctx_init_gpc_unk_2[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_tpc[];
+
+extern const struct gf100_gr_pack gm204_grctx_pack_ppc[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 5e9454ba158f..b12f6a9fd926 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -941,6 +941,14 @@ gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
}
void
+gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+{
+ const u32 fbp_count = nv_rd32(priv, 0x120074);
+ nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+void
gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
{
struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
@@ -970,13 +978,7 @@ gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
- if (priv->gpc_nr == 1) {
- nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
- nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
- } else {
- nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
- nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
- }
+ gk104_grctx_generate_rop_active_fbps(priv);
nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
gf100_gr_icmd(priv, oclass->icmd);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index b2fae6e389e2..fbeaae3ae6ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -699,7 +699,7 @@ gm107_grctx_pack_hub[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_grctx_init_gpc_unk_0[] = {
{ 0x418380, 1, 0x04, 0x00000056 },
{}
@@ -834,7 +834,7 @@ gm107_grctx_init_cbm_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_grctx_init_wwdx_0[] = {
{ 0x41bf00, 1, 0x04, 0x0a418820 },
{ 0x41bf04, 1, 0x04, 0x062080e6 },
@@ -860,7 +860,7 @@ gm107_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
+void
gm107_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
@@ -877,7 +877,7 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
-static void
+void
gm107_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
@@ -892,7 +892,7 @@ gm107_grctx_generate_pagepool(struct gf100_grctx *info)
mmio_wr32(info, 0x418e30, 0x80000000); /* guess at it being related */
}
-static void
+void
gm107_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr_priv *priv = info->priv;
@@ -926,7 +926,7 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, o + 0xe4, as);
mmio_wr32(info, o + 0xf8, ao);
ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
- mmio_wr32(info, u, (0x715 /*XXX*/ << 16) | bs);
+ mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
}
}
}
@@ -982,13 +982,7 @@ gm107_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
- if (priv->gpc_nr == 1) {
- nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
- nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
- } else {
- nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
- nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
- }
+ gk104_grctx_generate_rop_active_fbps(priv);
gf100_gr_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
new file mode 100644
index 000000000000..ea8e66151aa8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
@@ -0,0 +1,1054 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct gf100_gr_init
+gm204_grctx_init_icmd_0[] = {
+ { 0x001000, 1, 0x01, 0x00000002 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000008 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x000374, 1, 0x01, 0x00000100 },
+ { 0x000818, 8, 0x01, 0x00000000 },
+ { 0x000848, 16, 0x01, 0x00000000 },
+ { 0x000738, 1, 0x01, 0x00000000 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000001 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000004 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x0000a9, 1, 0x01, 0x0000ffff },
+ { 0x000038, 1, 0x01, 0x0fac6881 },
+ { 0x00003d, 1, 0x01, 0x00000001 },
+ { 0x0000e8, 8, 0x01, 0x00000400 },
+ { 0x000078, 8, 0x01, 0x00000300 },
+ { 0x000050, 1, 0x01, 0x00000011 },
+ { 0x000058, 8, 0x01, 0x00000008 },
+ { 0x000208, 8, 0x01, 0x00000001 },
+ { 0x000081, 1, 0x01, 0x00000001 },
+ { 0x000085, 1, 0x01, 0x00000004 },
+ { 0x000088, 1, 0x01, 0x00000400 },
+ { 0x000090, 1, 0x01, 0x00000300 },
+ { 0x000098, 1, 0x01, 0x00001001 },
+ { 0x0000e3, 1, 0x01, 0x00000001 },
+ { 0x0000da, 1, 0x01, 0x00000001 },
+ { 0x0000b4, 4, 0x01, 0x88888888 },
+ { 0x0000f8, 1, 0x01, 0x00000003 },
+ { 0x0000fa, 1, 0x01, 0x00000001 },
+ { 0x0000b1, 2, 0x01, 0x00000001 },
+ { 0x00009f, 4, 0x01, 0x0000ffff },
+ { 0x0000a8, 1, 0x01, 0x0000ffff },
+ { 0x0000ad, 1, 0x01, 0x0000013e },
+ { 0x0000e1, 1, 0x01, 0x00000010 },
+ { 0x000290, 16, 0x01, 0x00000000 },
+ { 0x0003b0, 16, 0x01, 0x00000000 },
+ { 0x0002a0, 16, 0x01, 0x00000000 },
+ { 0x000420, 16, 0x01, 0x00000000 },
+ { 0x0002b0, 16, 0x01, 0x00000000 },
+ { 0x000430, 16, 0x01, 0x00000000 },
+ { 0x0002c0, 16, 0x01, 0x00000000 },
+ { 0x0004d0, 16, 0x01, 0x00000000 },
+ { 0x000720, 16, 0x01, 0x00000000 },
+ { 0x0008c0, 16, 0x01, 0x00000000 },
+ { 0x000890, 16, 0x01, 0x00000000 },
+ { 0x0008e0, 16, 0x01, 0x00000000 },
+ { 0x0008a0, 16, 0x01, 0x00000000 },
+ { 0x0008f0, 16, 0x01, 0x00000000 },
+ { 0x00094c, 1, 0x01, 0x000000ff },
+ { 0x00094d, 1, 0x01, 0xffffffff },
+ { 0x00094e, 1, 0x01, 0x00000002 },
+ { 0x0002f2, 2, 0x01, 0x00000001 },
+ { 0x0002f5, 1, 0x01, 0x00000001 },
+ { 0x0002f7, 1, 0x01, 0x00000001 },
+ { 0x000303, 1, 0x01, 0x00000001 },
+ { 0x0002e6, 1, 0x01, 0x00000001 },
+ { 0x000466, 1, 0x01, 0x00000052 },
+ { 0x000301, 1, 0x01, 0x3f800000 },
+ { 0x000304, 1, 0x01, 0x30201000 },
+ { 0x000305, 1, 0x01, 0x70605040 },
+ { 0x000306, 1, 0x01, 0xb8a89888 },
+ { 0x000307, 1, 0x01, 0xf8e8d8c8 },
+ { 0x00030a, 1, 0x01, 0x00ffff00 },
+ { 0x00030b, 1, 0x01, 0x0000001a },
+ { 0x00030c, 1, 0x01, 0x00000001 },
+ { 0x000318, 1, 0x01, 0x00000001 },
+ { 0x000340, 1, 0x01, 0x00000000 },
+ { 0x00037d, 1, 0x01, 0x00000006 },
+ { 0x0003a0, 1, 0x01, 0x00000002 },
+ { 0x0003aa, 1, 0x01, 0x00000001 },
+ { 0x0003a9, 1, 0x01, 0x00000001 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000383, 1, 0x01, 0x00000011 },
+ { 0x000360, 1, 0x01, 0x00000040 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x000374, 1, 0x01, 0x00000100 },
+ { 0x00037a, 1, 0x01, 0x00000012 },
+ { 0x000619, 1, 0x01, 0x00000003 },
+ { 0x000811, 1, 0x01, 0x00000003 },
+ { 0x000812, 1, 0x01, 0x00000004 },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000815, 1, 0x01, 0x0000000b },
+ { 0x000800, 6, 0x01, 0x00000001 },
+ { 0x000632, 1, 0x01, 0x00000001 },
+ { 0x000633, 1, 0x01, 0x00000002 },
+ { 0x000634, 1, 0x01, 0x00000003 },
+ { 0x000635, 1, 0x01, 0x00000004 },
+ { 0x000654, 1, 0x01, 0x3f800000 },
+ { 0x000657, 1, 0x01, 0x3f800000 },
+ { 0x000655, 2, 0x01, 0x3f800000 },
+ { 0x0006cd, 1, 0x01, 0x3f800000 },
+ { 0x0007f5, 1, 0x01, 0x3f800000 },
+ { 0x0007dc, 1, 0x01, 0x39291909 },
+ { 0x0007dd, 1, 0x01, 0x79695949 },
+ { 0x0007de, 1, 0x01, 0xb9a99989 },
+ { 0x0007df, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007e8, 1, 0x01, 0x00003210 },
+ { 0x0007e9, 1, 0x01, 0x00007654 },
+ { 0x0007ea, 1, 0x01, 0x00000098 },
+ { 0x0007ec, 1, 0x01, 0x39291909 },
+ { 0x0007ed, 1, 0x01, 0x79695949 },
+ { 0x0007ee, 1, 0x01, 0xb9a99989 },
+ { 0x0007ef, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007f0, 1, 0x01, 0x00003210 },
+ { 0x0007f1, 1, 0x01, 0x00007654 },
+ { 0x0007f2, 1, 0x01, 0x00000098 },
+ { 0x0005a5, 1, 0x01, 0x00000001 },
+ { 0x0005aa, 1, 0x01, 0x00000002 },
+ { 0x0005cb, 1, 0x01, 0x00000004 },
+ { 0x0005d0, 1, 0x01, 0x20181008 },
+ { 0x0005d1, 1, 0x01, 0x40383028 },
+ { 0x0005d2, 1, 0x01, 0x60585048 },
+ { 0x0005d3, 1, 0x01, 0x80787068 },
+ { 0x000980, 128, 0x01, 0x00000000 },
+ { 0x000468, 1, 0x01, 0x00000004 },
+ { 0x00046c, 1, 0x01, 0x00000001 },
+ { 0x000470, 96, 0x01, 0x00000000 },
+ { 0x0005e0, 16, 0x01, 0x00000d10 },
+ { 0x000510, 16, 0x01, 0x3f800000 },
+ { 0x000520, 1, 0x01, 0x000002b6 },
+ { 0x000529, 1, 0x01, 0x00000001 },
+ { 0x000530, 16, 0x01, 0xffff0000 },
+ { 0x000550, 32, 0x01, 0xffff0000 },
+ { 0x000585, 1, 0x01, 0x0000003f },
+ { 0x000576, 1, 0x01, 0x00000003 },
+ { 0x00057b, 1, 0x01, 0x00000059 },
+ { 0x000586, 1, 0x01, 0x00000040 },
+ { 0x000582, 2, 0x01, 0x00000080 },
+ { 0x000595, 1, 0x01, 0x00400040 },
+ { 0x000596, 1, 0x01, 0x00000492 },
+ { 0x000597, 1, 0x01, 0x08080203 },
+ { 0x0005ad, 1, 0x01, 0x00000008 },
+ { 0x000598, 1, 0x01, 0x00020001 },
+ { 0x0005d4, 1, 0x01, 0x00000001 },
+ { 0x0005c2, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
+ { 0x00063a, 1, 0x01, 0x00000002 },
+ { 0x00063b, 2, 0x01, 0x00000001 },
+ { 0x00063d, 1, 0x01, 0x00000002 },
+ { 0x00063e, 1, 0x01, 0x00000001 },
+ { 0x0008b8, 8, 0x01, 0x00000001 },
+ { 0x000900, 8, 0x01, 0x00000001 },
+ { 0x000908, 8, 0x01, 0x00000002 },
+ { 0x000910, 16, 0x01, 0x00000001 },
+ { 0x000920, 8, 0x01, 0x00000002 },
+ { 0x000928, 8, 0x01, 0x00000001 },
+ { 0x000662, 1, 0x01, 0x00000001 },
+ { 0x000648, 9, 0x01, 0x00000001 },
+ { 0x000674, 1, 0x01, 0x00000001 },
+ { 0x000658, 1, 0x01, 0x0000000f },
+ { 0x0007ff, 1, 0x01, 0x0000000a },
+ { 0x00066a, 1, 0x01, 0x40000000 },
+ { 0x00066b, 1, 0x01, 0x10000000 },
+ { 0x00066c, 2, 0x01, 0xffff0000 },
+ { 0x0007af, 2, 0x01, 0x00000008 },
+ { 0x0007f6, 1, 0x01, 0x00000001 },
+ { 0x0006b2, 1, 0x01, 0x00000055 },
+ { 0x0007ad, 1, 0x01, 0x00000003 },
+ { 0x000971, 1, 0x01, 0x00000008 },
+ { 0x000972, 1, 0x01, 0x00000040 },
+ { 0x000973, 1, 0x01, 0x0000012c },
+ { 0x00097c, 1, 0x01, 0x00000040 },
+ { 0x000975, 1, 0x01, 0x00000020 },
+ { 0x000976, 1, 0x01, 0x00000001 },
+ { 0x000977, 1, 0x01, 0x00000020 },
+ { 0x000978, 1, 0x01, 0x00000001 },
+ { 0x000957, 1, 0x01, 0x00000003 },
+ { 0x00095e, 1, 0x01, 0x20164010 },
+ { 0x00095f, 1, 0x01, 0x00000020 },
+ { 0x000a0d, 1, 0x01, 0x00000006 },
+ { 0x00097d, 1, 0x01, 0x0000000c },
+ { 0x000683, 1, 0x01, 0x00000006 },
+ { 0x000687, 1, 0x01, 0x003fffff },
+ { 0x0006a0, 1, 0x01, 0x00000005 },
+ { 0x000840, 1, 0x01, 0x00400008 },
+ { 0x000841, 1, 0x01, 0x08000080 },
+ { 0x000842, 1, 0x01, 0x00400008 },
+ { 0x000843, 1, 0x01, 0x08000080 },
+ { 0x000818, 8, 0x01, 0x00000000 },
+ { 0x000848, 16, 0x01, 0x00000000 },
+ { 0x000738, 1, 0x01, 0x00000000 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ab, 1, 0x01, 0x00000002 },
+ { 0x0006ac, 1, 0x01, 0x00000080 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x0006bb, 1, 0x01, 0x000000cf },
+ { 0x0006ce, 1, 0x01, 0x2a712488 },
+ { 0x000739, 1, 0x01, 0x4085c000 },
+ { 0x00073a, 1, 0x01, 0x00000080 },
+ { 0x000786, 1, 0x01, 0x80000100 },
+ { 0x00073c, 1, 0x01, 0x00010100 },
+ { 0x00073d, 1, 0x01, 0x02800000 },
+ { 0x000787, 1, 0x01, 0x000000cf },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x000836, 1, 0x01, 0x00000001 },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x000833, 1, 0x01, 0x04444480 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c1b0, 8, 0x01, 0x0000000f },
+ { 0x00c1b8, 1, 0x01, 0x0fac6881 },
+ { 0x00c1b9, 1, 0x01, 0x00fac688 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x00c413, 4, 0x01, 0x88888888 },
+ { 0x00c423, 1, 0x01, 0x0000ff00 },
+ { 0x00c420, 1, 0x01, 0x00880101 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_icmd[] = {
+ { gm204_grctx_init_icmd_0 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_b197_0[] = {
+ { 0x000800, 8, 0x40, 0x00000000 },
+ { 0x000804, 8, 0x40, 0x00000000 },
+ { 0x000808, 8, 0x40, 0x00000400 },
+ { 0x00080c, 8, 0x40, 0x00000300 },
+ { 0x000810, 1, 0x04, 0x000000cf },
+ { 0x000850, 7, 0x40, 0x00000000 },
+ { 0x000814, 8, 0x40, 0x00000040 },
+ { 0x000818, 8, 0x40, 0x00000001 },
+ { 0x00081c, 8, 0x40, 0x00000000 },
+ { 0x000820, 8, 0x40, 0x00000000 },
+ { 0x001c00, 16, 0x10, 0x00000000 },
+ { 0x001c04, 16, 0x10, 0x00000000 },
+ { 0x001c08, 16, 0x10, 0x00000000 },
+ { 0x001c0c, 16, 0x10, 0x00000000 },
+ { 0x001d00, 16, 0x10, 0x00000000 },
+ { 0x001d04, 16, 0x10, 0x00000000 },
+ { 0x001d08, 16, 0x10, 0x00000000 },
+ { 0x001d0c, 16, 0x10, 0x00000000 },
+ { 0x001f00, 16, 0x08, 0x00000000 },
+ { 0x001f04, 16, 0x08, 0x00000000 },
+ { 0x001f80, 16, 0x08, 0x00000000 },
+ { 0x001f84, 16, 0x08, 0x00000000 },
+ { 0x002000, 1, 0x04, 0x00000000 },
+ { 0x002040, 1, 0x04, 0x00000011 },
+ { 0x002080, 1, 0x04, 0x00000020 },
+ { 0x0020c0, 1, 0x04, 0x00000030 },
+ { 0x002100, 1, 0x04, 0x00000040 },
+ { 0x002140, 1, 0x04, 0x00000051 },
+ { 0x00200c, 6, 0x40, 0x00000001 },
+ { 0x002010, 1, 0x04, 0x00000000 },
+ { 0x002050, 1, 0x04, 0x00000000 },
+ { 0x002090, 1, 0x04, 0x00000001 },
+ { 0x0020d0, 1, 0x04, 0x00000002 },
+ { 0x002110, 1, 0x04, 0x00000003 },
+ { 0x002150, 1, 0x04, 0x00000004 },
+ { 0x000380, 4, 0x20, 0x00000000 },
+ { 0x000384, 4, 0x20, 0x00000000 },
+ { 0x000388, 4, 0x20, 0x00000000 },
+ { 0x00038c, 4, 0x20, 0x00000000 },
+ { 0x000700, 4, 0x10, 0x00000000 },
+ { 0x000704, 4, 0x10, 0x00000000 },
+ { 0x000708, 4, 0x10, 0x00000000 },
+ { 0x002800, 128, 0x04, 0x00000000 },
+ { 0x000a00, 16, 0x20, 0x00000000 },
+ { 0x000a04, 16, 0x20, 0x00000000 },
+ { 0x000a08, 16, 0x20, 0x00000000 },
+ { 0x000a0c, 16, 0x20, 0x00000000 },
+ { 0x000a10, 16, 0x20, 0x00000000 },
+ { 0x000a14, 16, 0x20, 0x00000000 },
+ { 0x000a18, 16, 0x20, 0x00006420 },
+ { 0x000a1c, 16, 0x20, 0x00000000 },
+ { 0x000c00, 16, 0x10, 0x00000000 },
+ { 0x000c04, 16, 0x10, 0x00000000 },
+ { 0x000c08, 16, 0x10, 0x00000000 },
+ { 0x000c0c, 16, 0x10, 0x3f800000 },
+ { 0x000d00, 8, 0x08, 0xffff0000 },
+ { 0x000d04, 8, 0x08, 0xffff0000 },
+ { 0x000e00, 16, 0x10, 0x00000000 },
+ { 0x000e04, 16, 0x10, 0xffff0000 },
+ { 0x000e08, 16, 0x10, 0xffff0000 },
+ { 0x000d40, 4, 0x08, 0x00000000 },
+ { 0x000d44, 4, 0x08, 0x00000000 },
+ { 0x001e00, 8, 0x20, 0x00000001 },
+ { 0x001e04, 8, 0x20, 0x00000001 },
+ { 0x001e08, 8, 0x20, 0x00000002 },
+ { 0x001e0c, 8, 0x20, 0x00000001 },
+ { 0x001e10, 8, 0x20, 0x00000001 },
+ { 0x001e14, 8, 0x20, 0x00000002 },
+ { 0x001e18, 8, 0x20, 0x00000001 },
+ { 0x001480, 8, 0x10, 0x00000000 },
+ { 0x001484, 8, 0x10, 0x00000000 },
+ { 0x001488, 8, 0x10, 0x00000000 },
+ { 0x003400, 128, 0x04, 0x00000000 },
+ { 0x00030c, 1, 0x04, 0x00000001 },
+ { 0x001944, 1, 0x04, 0x00000000 },
+ { 0x001514, 1, 0x04, 0x00000000 },
+ { 0x000d68, 1, 0x04, 0x0000ffff },
+ { 0x00121c, 1, 0x04, 0x0fac6881 },
+ { 0x000fac, 1, 0x04, 0x00000001 },
+ { 0x001538, 1, 0x04, 0x00000001 },
+ { 0x000fe0, 2, 0x04, 0x00000000 },
+ { 0x000fe8, 1, 0x04, 0x00000014 },
+ { 0x000fec, 1, 0x04, 0x00000040 },
+ { 0x000ff0, 1, 0x04, 0x00000000 },
+ { 0x00179c, 1, 0x04, 0x00000000 },
+ { 0x001228, 1, 0x04, 0x00000400 },
+ { 0x00122c, 1, 0x04, 0x00000300 },
+ { 0x001230, 1, 0x04, 0x00010001 },
+ { 0x0007f8, 1, 0x04, 0x00000000 },
+ { 0x001208, 1, 0x04, 0x00000000 },
+ { 0x0015b4, 1, 0x04, 0x00000001 },
+ { 0x0015cc, 1, 0x04, 0x00000000 },
+ { 0x001534, 1, 0x04, 0x00000000 },
+ { 0x000754, 1, 0x04, 0x00000001 },
+ { 0x000fb0, 1, 0x04, 0x00000000 },
+ { 0x0015d0, 1, 0x04, 0x00000000 },
+ { 0x0011e0, 4, 0x04, 0x88888888 },
+ { 0x00153c, 1, 0x04, 0x00000000 },
+ { 0x0016b4, 1, 0x04, 0x00000003 },
+ { 0x000fa4, 1, 0x04, 0x00000001 },
+ { 0x000fbc, 4, 0x04, 0x0000ffff },
+ { 0x000fa8, 1, 0x04, 0x0000ffff },
+ { 0x000df8, 2, 0x04, 0x00000000 },
+ { 0x001948, 1, 0x04, 0x00000000 },
+ { 0x001970, 1, 0x04, 0x00000001 },
+ { 0x00161c, 1, 0x04, 0x000009f0 },
+ { 0x000dcc, 1, 0x04, 0x00000010 },
+ { 0x0015e4, 1, 0x04, 0x00000000 },
+ { 0x001160, 32, 0x04, 0x25e00040 },
+ { 0x001880, 32, 0x04, 0x00000000 },
+ { 0x000f84, 2, 0x04, 0x00000000 },
+ { 0x0017c8, 2, 0x04, 0x00000000 },
+ { 0x0017d0, 1, 0x04, 0x000000ff },
+ { 0x0017d4, 1, 0x04, 0xffffffff },
+ { 0x0017d8, 1, 0x04, 0x00000002 },
+ { 0x0017dc, 1, 0x04, 0x00000000 },
+ { 0x0015f4, 2, 0x04, 0x00000000 },
+ { 0x001434, 2, 0x04, 0x00000000 },
+ { 0x000d74, 1, 0x04, 0x00000000 },
+ { 0x0013a4, 1, 0x04, 0x00000000 },
+ { 0x001318, 1, 0x04, 0x00000001 },
+ { 0x001080, 2, 0x04, 0x00000000 },
+ { 0x001088, 2, 0x04, 0x00000001 },
+ { 0x001090, 1, 0x04, 0x00000000 },
+ { 0x001094, 1, 0x04, 0x00000001 },
+ { 0x001098, 1, 0x04, 0x00000000 },
+ { 0x00109c, 1, 0x04, 0x00000001 },
+ { 0x0010a0, 2, 0x04, 0x00000000 },
+ { 0x001644, 1, 0x04, 0x00000000 },
+ { 0x000748, 1, 0x04, 0x00000000 },
+ { 0x000de8, 1, 0x04, 0x00000000 },
+ { 0x001648, 1, 0x04, 0x00000000 },
+ { 0x0012a4, 1, 0x04, 0x00000000 },
+ { 0x001120, 4, 0x04, 0x00000000 },
+ { 0x001118, 1, 0x04, 0x00000000 },
+ { 0x00164c, 1, 0x04, 0x00000000 },
+ { 0x001658, 1, 0x04, 0x00000000 },
+ { 0x001910, 1, 0x04, 0x00000290 },
+ { 0x001518, 1, 0x04, 0x00000000 },
+ { 0x00165c, 1, 0x04, 0x00000001 },
+ { 0x001520, 1, 0x04, 0x00000000 },
+ { 0x001604, 1, 0x04, 0x00000000 },
+ { 0x001570, 1, 0x04, 0x00000000 },
+ { 0x0013b0, 2, 0x04, 0x3f800000 },
+ { 0x00020c, 1, 0x04, 0x00000000 },
+ { 0x001670, 1, 0x04, 0x30201000 },
+ { 0x001674, 1, 0x04, 0x70605040 },
+ { 0x001678, 1, 0x04, 0xb8a89888 },
+ { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
+ { 0x00166c, 1, 0x04, 0x00000000 },
+ { 0x001680, 1, 0x04, 0x00ffff00 },
+ { 0x0012d0, 1, 0x04, 0x00000003 },
+ { 0x00113c, 1, 0x04, 0x00000000 },
+ { 0x0012d4, 1, 0x04, 0x00000002 },
+ { 0x001684, 2, 0x04, 0x00000000 },
+ { 0x000dac, 2, 0x04, 0x00001b02 },
+ { 0x000db4, 1, 0x04, 0x00000000 },
+ { 0x00168c, 1, 0x04, 0x00000000 },
+ { 0x0015bc, 1, 0x04, 0x00000000 },
+ { 0x00156c, 1, 0x04, 0x00000000 },
+ { 0x00187c, 1, 0x04, 0x00000000 },
+ { 0x001110, 1, 0x04, 0x00000001 },
+ { 0x000dc0, 3, 0x04, 0x00000000 },
+ { 0x000f40, 5, 0x04, 0x00000000 },
+ { 0x001234, 1, 0x04, 0x00000000 },
+ { 0x001690, 1, 0x04, 0x00000000 },
+ { 0x000790, 5, 0x04, 0x00000000 },
+ { 0x00077c, 1, 0x04, 0x00000000 },
+ { 0x001000, 1, 0x04, 0x00000010 },
+ { 0x0010fc, 1, 0x04, 0x00000000 },
+ { 0x001290, 1, 0x04, 0x00000000 },
+ { 0x000218, 1, 0x04, 0x00000010 },
+ { 0x0012d8, 1, 0x04, 0x00000000 },
+ { 0x0012dc, 1, 0x04, 0x00000010 },
+ { 0x000d94, 1, 0x04, 0x00000001 },
+ { 0x00155c, 2, 0x04, 0x00000000 },
+ { 0x001564, 1, 0x04, 0x00000fff },
+ { 0x001574, 2, 0x04, 0x00000000 },
+ { 0x00157c, 1, 0x04, 0x000fffff },
+ { 0x001354, 1, 0x04, 0x00000000 },
+ { 0x001610, 1, 0x04, 0x00000012 },
+ { 0x001608, 2, 0x04, 0x00000000 },
+ { 0x00260c, 1, 0x04, 0x00000000 },
+ { 0x0007ac, 1, 0x04, 0x00000000 },
+ { 0x00162c, 1, 0x04, 0x00000003 },
+ { 0x000210, 1, 0x04, 0x00000000 },
+ { 0x000320, 1, 0x04, 0x00000000 },
+ { 0x000324, 6, 0x04, 0x3f800000 },
+ { 0x000750, 1, 0x04, 0x00000000 },
+ { 0x000760, 1, 0x04, 0x39291909 },
+ { 0x000764, 1, 0x04, 0x79695949 },
+ { 0x000768, 1, 0x04, 0xb9a99989 },
+ { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x000770, 1, 0x04, 0x30201000 },
+ { 0x000774, 1, 0x04, 0x70605040 },
+ { 0x000778, 1, 0x04, 0x00009080 },
+ { 0x000780, 1, 0x04, 0x39291909 },
+ { 0x000784, 1, 0x04, 0x79695949 },
+ { 0x000788, 1, 0x04, 0xb9a99989 },
+ { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x0007d0, 1, 0x04, 0x30201000 },
+ { 0x0007d4, 1, 0x04, 0x70605040 },
+ { 0x0007d8, 1, 0x04, 0x00009080 },
+ { 0x001004, 1, 0x04, 0x00000000 },
+ { 0x001240, 8, 0x04, 0x00000000 },
+ { 0x00037c, 1, 0x04, 0x00000001 },
+ { 0x000740, 1, 0x04, 0x00000000 },
+ { 0x001148, 1, 0x04, 0x00000000 },
+ { 0x000fb4, 1, 0x04, 0x00000000 },
+ { 0x000fb8, 1, 0x04, 0x00000002 },
+ { 0x001130, 1, 0x04, 0x00000002 },
+ { 0x000fd4, 2, 0x04, 0x00000000 },
+ { 0x001030, 1, 0x04, 0x20181008 },
+ { 0x001034, 1, 0x04, 0x40383028 },
+ { 0x001038, 1, 0x04, 0x60585048 },
+ { 0x00103c, 1, 0x04, 0x80787068 },
+ { 0x000744, 1, 0x04, 0x00000000 },
+ { 0x002600, 1, 0x04, 0x00000000 },
+ { 0x001918, 1, 0x04, 0x00000000 },
+ { 0x00191c, 1, 0x04, 0x00000900 },
+ { 0x001920, 1, 0x04, 0x00000405 },
+ { 0x001308, 1, 0x04, 0x00000001 },
+ { 0x001924, 1, 0x04, 0x00000000 },
+ { 0x0013ac, 1, 0x04, 0x00000000 },
+ { 0x00192c, 1, 0x04, 0x00000001 },
+ { 0x00193c, 1, 0x04, 0x00002c1c },
+ { 0x000d7c, 1, 0x04, 0x00000000 },
+ { 0x000f8c, 1, 0x04, 0x00000000 },
+ { 0x0002c0, 1, 0x04, 0x00000001 },
+ { 0x001510, 1, 0x04, 0x00000000 },
+ { 0x001940, 1, 0x04, 0x00000000 },
+ { 0x000ff4, 2, 0x04, 0x00000000 },
+ { 0x00194c, 2, 0x04, 0x00000000 },
+ { 0x001968, 1, 0x04, 0x00000000 },
+ { 0x001590, 1, 0x04, 0x0000003f },
+ { 0x0007e8, 4, 0x04, 0x00000000 },
+ { 0x00196c, 1, 0x04, 0x00000011 },
+ { 0x0002e4, 1, 0x04, 0x0000b001 },
+ { 0x00036c, 2, 0x04, 0x00000000 },
+ { 0x00197c, 1, 0x04, 0x00000000 },
+ { 0x000fcc, 2, 0x04, 0x00000000 },
+ { 0x0002d8, 1, 0x04, 0x00000040 },
+ { 0x001980, 1, 0x04, 0x00000080 },
+ { 0x001504, 1, 0x04, 0x00000080 },
+ { 0x001984, 1, 0x04, 0x00000000 },
+ { 0x000f60, 1, 0x04, 0x00000000 },
+ { 0x000f64, 1, 0x04, 0x00400040 },
+ { 0x000f68, 1, 0x04, 0x00002212 },
+ { 0x000f6c, 1, 0x04, 0x08080203 },
+ { 0x001108, 1, 0x04, 0x00000008 },
+ { 0x000f70, 1, 0x04, 0x00080001 },
+ { 0x000ffc, 1, 0x04, 0x00000000 },
+ { 0x001134, 1, 0x04, 0x00000000 },
+ { 0x000f1c, 1, 0x04, 0x00000000 },
+ { 0x0011f8, 1, 0x04, 0x00000000 },
+ { 0x001138, 1, 0x04, 0x00000001 },
+ { 0x000300, 1, 0x04, 0x00000001 },
+ { 0x0013a8, 1, 0x04, 0x00000000 },
+ { 0x001224, 1, 0x04, 0x00000000 },
+ { 0x0012ec, 1, 0x04, 0x00000000 },
+ { 0x001310, 1, 0x04, 0x00000000 },
+ { 0x001314, 1, 0x04, 0x00000001 },
+ { 0x001380, 1, 0x04, 0x00000000 },
+ { 0x001384, 4, 0x04, 0x00000001 },
+ { 0x001394, 1, 0x04, 0x00000000 },
+ { 0x00139c, 1, 0x04, 0x00000000 },
+ { 0x001398, 1, 0x04, 0x00000000 },
+ { 0x001594, 1, 0x04, 0x00000000 },
+ { 0x001598, 4, 0x04, 0x00000001 },
+ { 0x000f54, 3, 0x04, 0x00000000 },
+ { 0x0019bc, 1, 0x04, 0x00000000 },
+ { 0x000f9c, 2, 0x04, 0x00000000 },
+ { 0x0012cc, 1, 0x04, 0x00000000 },
+ { 0x0012e8, 1, 0x04, 0x00000000 },
+ { 0x00130c, 1, 0x04, 0x00000001 },
+ { 0x001360, 8, 0x04, 0x00000000 },
+ { 0x00133c, 2, 0x04, 0x00000001 },
+ { 0x001344, 1, 0x04, 0x00000002 },
+ { 0x001348, 2, 0x04, 0x00000001 },
+ { 0x001350, 1, 0x04, 0x00000002 },
+ { 0x001358, 1, 0x04, 0x00000001 },
+ { 0x0012e4, 1, 0x04, 0x00000000 },
+ { 0x00131c, 4, 0x04, 0x00000000 },
+ { 0x0019c0, 1, 0x04, 0x00000000 },
+ { 0x001140, 1, 0x04, 0x00000000 },
+ { 0x000dd0, 1, 0x04, 0x00000000 },
+ { 0x000dd4, 1, 0x04, 0x00000001 },
+ { 0x0002f4, 1, 0x04, 0x00000000 },
+ { 0x0019c4, 1, 0x04, 0x00000000 },
+ { 0x0019c8, 1, 0x04, 0x00001500 },
+ { 0x00135c, 1, 0x04, 0x00000000 },
+ { 0x000f90, 1, 0x04, 0x00000000 },
+ { 0x0019e0, 8, 0x04, 0x00000001 },
+ { 0x0019cc, 1, 0x04, 0x00000001 },
+ { 0x00111c, 1, 0x04, 0x00000001 },
+ { 0x0015b8, 1, 0x04, 0x00000000 },
+ { 0x001a00, 1, 0x04, 0x00001111 },
+ { 0x001a04, 7, 0x04, 0x00000000 },
+ { 0x000d6c, 2, 0x04, 0xffff0000 },
+ { 0x0010f8, 1, 0x04, 0x00001010 },
+ { 0x000d80, 5, 0x04, 0x00000000 },
+ { 0x000da0, 1, 0x04, 0x00000000 },
+ { 0x0007a4, 2, 0x04, 0x00000000 },
+ { 0x001508, 1, 0x04, 0x80000000 },
+ { 0x00150c, 1, 0x04, 0x40000000 },
+ { 0x001668, 1, 0x04, 0x00000000 },
+ { 0x000318, 2, 0x04, 0x00000008 },
+ { 0x000d9c, 1, 0x04, 0x00000001 },
+ { 0x000f14, 1, 0x04, 0x00000000 },
+ { 0x000374, 1, 0x04, 0x00000000 },
+ { 0x000378, 1, 0x04, 0x0000000c },
+ { 0x0007dc, 1, 0x04, 0x00000000 },
+ { 0x00074c, 1, 0x04, 0x00000055 },
+ { 0x001420, 1, 0x04, 0x00000003 },
+ { 0x001008, 1, 0x04, 0x00000008 },
+ { 0x00100c, 1, 0x04, 0x00000040 },
+ { 0x001010, 1, 0x04, 0x0000012c },
+ { 0x000d60, 1, 0x04, 0x00000040 },
+ { 0x001018, 1, 0x04, 0x00000020 },
+ { 0x00101c, 1, 0x04, 0x00000001 },
+ { 0x001020, 1, 0x04, 0x00000020 },
+ { 0x001024, 1, 0x04, 0x00000001 },
+ { 0x001444, 3, 0x04, 0x00000000 },
+ { 0x000360, 1, 0x04, 0x20164010 },
+ { 0x000364, 1, 0x04, 0x00000020 },
+ { 0x000368, 1, 0x04, 0x00000000 },
+ { 0x000da8, 1, 0x04, 0x00000030 },
+ { 0x000de4, 1, 0x04, 0x00000000 },
+ { 0x000204, 1, 0x04, 0x00000006 },
+ { 0x0002d0, 1, 0x04, 0x003fffff },
+ { 0x001220, 1, 0x04, 0x00000005 },
+ { 0x000fdc, 1, 0x04, 0x00000000 },
+ { 0x000f98, 1, 0x04, 0x00400008 },
+ { 0x001284, 1, 0x04, 0x08000080 },
+ { 0x001450, 1, 0x04, 0x00400008 },
+ { 0x001454, 1, 0x04, 0x08000080 },
+ { 0x000214, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_mthd[] = {
+ { gm204_grctx_init_b197_0, 0xb197 },
+ { gf100_grctx_init_902d_0, 0x902d },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_fe_0[] = {
+ { 0x404004, 8, 0x04, 0x00000000 },
+ { 0x404024, 1, 0x04, 0x0000e000 },
+ { 0x404028, 8, 0x04, 0x00000000 },
+ { 0x4040a8, 8, 0x04, 0x00000000 },
+ { 0x4040c8, 1, 0x04, 0xf801008f },
+ { 0x4040d0, 6, 0x04, 0x00000000 },
+ { 0x4040f8, 1, 0x04, 0x00000000 },
+ { 0x404100, 10, 0x04, 0x00000000 },
+ { 0x404130, 2, 0x04, 0x00000000 },
+ { 0x404150, 1, 0x04, 0x0000002e },
+ { 0x404154, 2, 0x04, 0x00000800 },
+ { 0x404164, 1, 0x04, 0x00000045 },
+ { 0x40417c, 2, 0x04, 0x00000000 },
+ { 0x404194, 1, 0x04, 0x33000700 },
+ { 0x4041a0, 4, 0x04, 0x00000000 },
+ { 0x4041c4, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_ds_0[] = {
+ { 0x405800, 1, 0x04, 0x8f8001bf },
+ { 0x405830, 1, 0x04, 0x04001000 },
+ { 0x405834, 1, 0x04, 0x08000000 },
+ { 0x405838, 1, 0x04, 0x00010000 },
+ { 0x405854, 1, 0x04, 0x00000000 },
+ { 0x405870, 4, 0x04, 0x00000001 },
+ { 0x405a00, 2, 0x04, 0x00000000 },
+ { 0x405a18, 1, 0x04, 0x00000000 },
+ { 0x405a1c, 1, 0x04, 0x000000ff },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_cwd_0[] = {
+ { 0x405b00, 1, 0x04, 0x00000000 },
+ { 0x405b10, 1, 0x04, 0x00001000 },
+ { 0x405b20, 1, 0x04, 0x04000000 },
+ { 0x405b60, 6, 0x04, 0x00000000 },
+ { 0x405ba0, 6, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_pd_0[] = {
+ { 0x406020, 1, 0x04, 0x17410001 },
+ { 0x406028, 4, 0x04, 0x00000001 },
+ { 0x4064a8, 1, 0x04, 0x00000000 },
+ { 0x4064ac, 1, 0x04, 0x00003fff },
+ { 0x4064b0, 3, 0x04, 0x00000000 },
+ { 0x4064c0, 1, 0x04, 0x80400280 },
+ { 0x4064c4, 1, 0x04, 0x0400ffff },
+ { 0x4064c8, 1, 0x04, 0x01800780 },
+ { 0x4064cc, 9, 0x04, 0x00000000 },
+ { 0x4064fc, 1, 0x04, 0x0000022a },
+ { 0x406500, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_be_0[] = {
+ { 0x408800, 1, 0x04, 0x32882a3c },
+ { 0x408804, 1, 0x04, 0x00000040 },
+ { 0x408808, 1, 0x04, 0x1003e005 },
+ { 0x408840, 1, 0x04, 0x00000e0b },
+ { 0x408900, 1, 0x04, 0xb080b801 },
+ { 0x408904, 1, 0x04, 0x63038001 },
+ { 0x408908, 1, 0x04, 0x12c8502f },
+ { 0x408980, 1, 0x04, 0x0000011d },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_hub[] = {
+ { gf100_grctx_init_main_0 },
+ { gm204_grctx_init_fe_0 },
+ { gk110_grctx_init_pri_0 },
+ { gk104_grctx_init_memfmt_0 },
+ { gm204_grctx_init_ds_0 },
+ { gm204_grctx_init_cwd_0 },
+ { gm204_grctx_init_pd_0 },
+ { gk208_grctx_init_rstr2d_0 },
+ { gk104_grctx_init_scc_0 },
+ { gm204_grctx_init_be_0 },
+ {}
+};
+
+const struct gf100_gr_init
+gm204_grctx_init_prop_0[] = {
+ { 0x418400, 1, 0x04, 0x38e01e00 },
+ { 0x418404, 1, 0x04, 0x70001fff },
+ { 0x41840c, 1, 0x04, 0x20001008 },
+ { 0x418410, 2, 0x04, 0x0fff0fff },
+ { 0x418418, 1, 0x04, 0x07ff07ff },
+ { 0x41841c, 1, 0x04, 0x3feffbff },
+ { 0x418450, 6, 0x04, 0x00000000 },
+ { 0x418468, 1, 0x04, 0x00000001 },
+ { 0x41846c, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_gpc_unk_1[] = {
+ { 0x418600, 1, 0x04, 0x0000007f },
+ { 0x418684, 1, 0x04, 0x0000001f },
+ { 0x418700, 1, 0x04, 0x00000002 },
+ { 0x418704, 1, 0x04, 0x00000080 },
+ { 0x418708, 1, 0x04, 0x40000000 },
+ { 0x41870c, 2, 0x04, 0x00000000 },
+ { 0x418728, 1, 0x04, 0x00010000 },
+ {}
+};
+
+const struct gf100_gr_init
+gm204_grctx_init_setup_0[] = {
+ { 0x418800, 1, 0x04, 0x7006863a },
+ { 0x418808, 1, 0x04, 0x00000000 },
+ { 0x418810, 1, 0x04, 0x00000000 },
+ { 0x418828, 1, 0x04, 0x00000044 },
+ { 0x418830, 1, 0x04, 0x10000001 },
+ { 0x4188d8, 1, 0x04, 0x00000008 },
+ { 0x4188e0, 1, 0x04, 0x01000000 },
+ { 0x4188e8, 5, 0x04, 0x00000000 },
+ { 0x4188fc, 1, 0x04, 0x20100058 },
+ {}
+};
+
+const struct gf100_gr_init
+gm204_grctx_init_gpm_0[] = {
+ { 0x418c10, 8, 0x04, 0x00000000 },
+ { 0x418c40, 1, 0x04, 0xffffffff },
+ { 0x418c6c, 1, 0x04, 0x00000001 },
+ { 0x418c80, 1, 0x04, 0x20200000 },
+ {}
+};
+
+const struct gf100_gr_init
+gm204_grctx_init_gpc_unk_2[] = {
+ { 0x418e00, 1, 0x04, 0x90040000 },
+ { 0x418e24, 1, 0x04, 0x00000000 },
+ { 0x418e28, 1, 0x04, 0x00000030 },
+ { 0x418e2c, 1, 0x04, 0x00000100 },
+ { 0x418e30, 3, 0x04, 0x00000000 },
+ { 0x418e40, 22, 0x04, 0x00000000 },
+ { 0x418ea0, 12, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_pack
+gm204_grctx_pack_gpc[] = {
+ { gm107_grctx_init_gpc_unk_0 },
+ { gm204_grctx_init_prop_0 },
+ { gm204_grctx_init_gpc_unk_1 },
+ { gm204_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gk208_grctx_init_crstr_0 },
+ { gm204_grctx_init_gpm_0 },
+ { gm204_grctx_init_gpc_unk_2 },
+ { gf100_grctx_init_gcc_0 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_pe_0[] = {
+ { 0x419848, 1, 0x04, 0x00000000 },
+ { 0x419864, 1, 0x04, 0x00000029 },
+ { 0x419888, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_tex_0[] = {
+ { 0x419a00, 1, 0x04, 0x000100f0 },
+ { 0x419a04, 1, 0x04, 0x00000005 },
+ { 0x419a08, 1, 0x04, 0x00000621 },
+ { 0x419a0c, 1, 0x04, 0x00320000 },
+ { 0x419a10, 1, 0x04, 0x00000000 },
+ { 0x419a14, 1, 0x04, 0x00000200 },
+ { 0x419a1c, 1, 0x04, 0x0010c000 },
+ { 0x419a20, 1, 0x04, 0x20008a00 },
+ { 0x419a30, 1, 0x04, 0x00000001 },
+ { 0x419a3c, 1, 0x04, 0x0000181e },
+ { 0x419ac4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_mpc_0[] = {
+ { 0x419c00, 1, 0x04, 0x0000009a },
+ { 0x419c04, 1, 0x04, 0x80000bd6 },
+ { 0x419c08, 1, 0x04, 0x00000002 },
+ { 0x419c20, 1, 0x04, 0x00000000 },
+ { 0x419c24, 1, 0x04, 0x00084210 },
+ { 0x419c28, 1, 0x04, 0x3efbefbe },
+ { 0x419c2c, 1, 0x04, 0x00000000 },
+ { 0x419c34, 1, 0x04, 0x71ff1ff3 },
+ { 0x419c3c, 1, 0x04, 0x00001919 },
+ { 0x419c50, 1, 0x04, 0x00000005 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_l1c_0[] = {
+ { 0x419c84, 1, 0x04, 0x0000003e },
+ { 0x419c90, 1, 0x04, 0x0000000a },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_sm_0[] = {
+ { 0x419e04, 3, 0x04, 0x00000000 },
+ { 0x419e10, 1, 0x04, 0x00001c02 },
+ { 0x419e44, 1, 0x04, 0x00d3eff2 },
+ { 0x419e48, 1, 0x04, 0x00000000 },
+ { 0x419e4c, 1, 0x04, 0x0000007f },
+ { 0x419e50, 1, 0x04, 0x00000000 },
+ { 0x419e58, 6, 0x04, 0x00000000 },
+ { 0x419e74, 10, 0x04, 0x00000000 },
+ { 0x419eac, 1, 0x04, 0x0001cf8b },
+ { 0x419eb0, 1, 0x04, 0x00030300 },
+ { 0x419eb8, 1, 0x04, 0x40000000 },
+ { 0x419ef0, 24, 0x04, 0x00000000 },
+ { 0x419f68, 2, 0x04, 0x00000000 },
+ { 0x419f70, 1, 0x04, 0x00000020 },
+ { 0x419f78, 1, 0x04, 0x00010beb },
+ { 0x419f7c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_tpc[] = {
+ { gm204_grctx_init_pe_0 },
+ { gm204_grctx_init_tex_0 },
+ { gm204_grctx_init_mpc_0 },
+ { gm204_grctx_init_l1c_0 },
+ { gm204_grctx_init_sm_0 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_pes_0[] = {
+ { 0x41be24, 1, 0x04, 0x0000000e },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_grctx_init_cbm_0[] = {
+ { 0x41bec0, 1, 0x04, 0x00000000 },
+ { 0x41bec4, 1, 0x04, 0x01030000 },
+ { 0x41bee4, 1, 0x04, 0x00000000 },
+ { 0x41bef0, 1, 0x04, 0x000003ff },
+ { 0x41bef4, 2, 0x04, 0x00000000 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_grctx_pack_ppc[] = {
+ { gm204_grctx_init_pes_0 },
+ { gm204_grctx_init_cbm_0 },
+ { gm107_grctx_init_wwdx_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static void
+gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+{
+ int gpc, tpc, id;
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+ id++;
+ }
+ }
+ }
+}
+
+static void
+gm204_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+{
+ const u32 fbp_count = nv_rd32(priv, 0x12006c);
+ nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+static void
+gm204_grctx_generate_405b60(struct gf100_gr_priv *priv)
+{
+ const u32 dist_nr = DIV_ROUND_UP(priv->tpc_total, 4);
+ u32 dist[TPC_MAX] = {};
+ u32 gpcs[GPC_MAX] = {};
+ u8 tpcnr[GPC_MAX];
+ int tpc, gpc, i;
+
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+ /* won't result in the same distribution as the binary driver where
+ * some of the gpcs have more tpcs than others, but this shall do
+ * for the moment. the code for earlier gpus has this issue too.
+ */
+ for (gpc = -1, i = 0; i < priv->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while(!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
+ gpcs[gpc] |= i << (tpc * 8);
+ }
+
+ for (i = 0; i < dist_nr; i++)
+ nv_wr32(priv, 0x405b60 + (i * 4), dist[i]);
+ for (i = 0; i < priv->gpc_nr; i++)
+ nv_wr32(priv, 0x405ba0 + (i * 4), gpcs[i]);
+}
+
+void
+gm204_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+{
+ struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ u32 tmp;
+ int i;
+
+ gf100_gr_mmio(priv, oclass->hub);
+ gf100_gr_mmio(priv, oclass->gpc);
+ gf100_gr_mmio(priv, oclass->zcull);
+ gf100_gr_mmio(priv, oclass->tpc);
+ gf100_gr_mmio(priv, oclass->ppc);
+
+ nv_wr32(priv, 0x404154, 0x00000000);
+
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
+ oclass->unkn(priv);
+
+ gm204_grctx_generate_tpcid(priv);
+ gf100_grctx_generate_r406028(priv);
+ gk104_grctx_generate_r418bb8(priv);
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+ nv_wr32(priv, 0x406500, 0x00000000);
+
+ nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+
+ gm204_grctx_generate_rop_active_fbps(priv);
+
+ for (tmp = 0, i = 0; i < priv->gpc_nr; i++)
+ tmp |= ((1 << priv->tpc_nr[i]) - 1) << (i * 4);
+ nv_wr32(priv, 0x4041c4, tmp);
+
+ gm204_grctx_generate_405b60(priv);
+
+ gf100_gr_icmd(priv, oclass->icmd);
+ nv_wr32(priv, 0x404154, 0x00000800);
+ gf100_gr_mthd(priv, oclass->mthd);
+
+ nv_mask(priv, 0x418e94, 0xffffffff, 0xc4230000);
+ nv_mask(priv, 0x418e4c, 0xffffffff, 0x70000000);
+}
+
+struct nvkm_oclass *
+gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0x24),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
+ },
+ .main = gm204_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gm204_grctx_pack_hub,
+ .gpc = gm204_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gm204_grctx_pack_tpc,
+ .ppc = gm204_grctx_pack_ppc,
+ .icmd = gm204_grctx_pack_icmd,
+ .mthd = gm204_grctx_pack_mthd,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x780,
+ .pagepool = gm107_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gm107_grctx_generate_attrib,
+ .attrib_nr_max = 0x600,
+ .attrib_nr = 0x400,
+ .alpha_nr_max = 0x1800,
+ .alpha_nr = 0x1000,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
new file mode 100644
index 000000000000..91ec41617943
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+static const struct gf100_gr_init
+gm206_grctx_init_gpc_unk_1[] = {
+ { 0x418600, 1, 0x04, 0x0000007f },
+ { 0x418684, 1, 0x04, 0x0000001f },
+ { 0x418700, 1, 0x04, 0x00000002 },
+ { 0x418704, 1, 0x04, 0x00000080 },
+ { 0x418708, 1, 0x04, 0x40000000 },
+ { 0x41870c, 2, 0x04, 0x00000000 },
+ { 0x418728, 1, 0x04, 0x00300020 },
+ {}
+};
+
+static const struct gf100_gr_pack
+gm206_grctx_pack_gpc[] = {
+ { gm107_grctx_init_gpc_unk_0 },
+ { gm204_grctx_init_prop_0 },
+ { gm206_grctx_init_gpc_unk_1 },
+ { gm204_grctx_init_setup_0 },
+ { gf100_grctx_init_zcull_0 },
+ { gk208_grctx_init_crstr_0 },
+ { gm204_grctx_init_gpm_0 },
+ { gm204_grctx_init_gpc_unk_2 },
+ { gf100_grctx_init_gcc_0 },
+ {}
+};
+
+struct nvkm_oclass *
+gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0x26),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_context_ctor,
+ .dtor = gf100_gr_context_dtor,
+ .init = _nvkm_gr_context_init,
+ .fini = _nvkm_gr_context_fini,
+ .rd32 = _nvkm_gr_context_rd32,
+ .wr32 = _nvkm_gr_context_wr32,
+ },
+ .main = gm204_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .hub = gm204_grctx_pack_hub,
+ .gpc = gm206_grctx_pack_gpc,
+ .zcull = gf100_grctx_pack_zcull,
+ .tpc = gm204_grctx_pack_tpc,
+ .ppc = gm204_grctx_pack_ppc,
+ .icmd = gm204_grctx_pack_icmd,
+ .mthd = gm204_grctx_pack_mthd,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x780,
+ .pagepool = gm107_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gm107_grctx_generate_attrib,
+ .attrib_nr_max = 0x600,
+ .attrib_nr = 0x400,
+ .alpha_nr_max = 0x1800,
+ .alpha_nr = 0x1000,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index eaed1599b90f..194afe910d21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -52,6 +52,12 @@ mmio_list_base:
#endif
#ifdef INCLUDE_CODE
+#define gpc_wr32(addr,reg) /*
+*/ mov b32 $r15 reg /*
+*/ imm32($r14, addr) /*
+*/ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /*
+*/ call(nv_wr32)
+
// reports an exception to the host
//
// In: $r15 error code (see os.h)
@@ -64,6 +70,43 @@ error:
pop $r14
ret
+#if CHIPSET >= GM107
+tpc_strand_wait:
+ push $r9
+ trace_set(T_STRTPC)
+ tpc_strand_busy:
+ nv_iord($r9, NV_PGRAPH_GPCX_GPCCS_TPC_STATUS, 0)
+ bra b32 $r9 0x0 ne #tpc_strand_busy
+ trace_clr(T_STRTPC)
+ pop $r9
+ ret
+
+#define tpc_strand_wait() call(tpc_strand_wait)
+#define tpc_strand_enable() /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_ENABLE /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15) /*
+*/ tpc_strand_wait()
+#define tpc_strand_disable() /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_DISABLE /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15) /*
+*/ tpc_strand_wait()
+#define tpc_strand_seek(p) /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_INDEX_ALL /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_INDEX, $r15) /*
+*/ mov $r15 p /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_SELECT, $r15) /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_SEEK /*
+*/ tpc_strand_wait()
+#define tpc_strand_info(m) /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15) /*
+*/ mov $r15 m /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_DATA, $r15) /*
+*/ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_GET_INFO /*
+*/ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15) /*
+*/ tpc_strand_wait()
+#endif
+
+
// GPC fuc initialisation, executed by triggering ucode start, will
// fall through to main loop after completion.
//
@@ -101,7 +144,7 @@ init:
// enable interrupts
bset $flags ie0
- // figure out which GPC we are, and how many TPCs we have
+ // how many TPCs do we have?
nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_UNITS, 0)
mov $r3 1
and $r2 0x1f
@@ -109,8 +152,12 @@ init:
sub b32 $r3 1
st b32 D[$r0 + #tpc_count] $r2
st b32 D[$r0 + #tpc_mask] $r3
+
+ // determine which GPC we are, setup (optional) mmio access offset
nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_MYINDEX, 0)
st b32 D[$r0 + #gpc_id] $r2
+ shl b32 $r2 15
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMIO_BASE, 0, $r2)
#if NV_PGRAPH_GPCX_UNK__SIZE > 0
// figure out which, and how many, UNKs are actually present
@@ -186,8 +233,56 @@ init:
// calculate size of strand context data
mov b32 $r15 $r2
call(strand_ctx_init)
+ add b32 $r2 $r15
add b32 $r3 $r15
+#if CHIPSET >= GM107
+ // calculate size of tpc strand context data
+ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_INDEX_ALL
+ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_INDEX, $r15)
+ tpc_strand_enable();
+ tpc_strand_seek(0);
+ tpc_strand_info(-1);
+
+ ld b32 $r4 D[$r0 + #tpc_count]
+ mov $r5 NV_PGRAPH_GPC0_TPC0
+ ld b32 $r6 D[$r0 + #gpc_id]
+ shl b32 $r6 15
+ add b32 $r5 $r6
+ tpc_strand_init_tpc_loop:
+ add b32 $r14 $r5 NV_TPC_STRAND_CNT
+ call(nv_rd32)
+ mov b32 $r6 $r15
+ clear b32 $r7
+ tpc_strand_init_idx_loop:
+ add b32 $r14 $r5 NV_TPC_STRAND_INDEX
+ mov b32 $r15 $r7
+ call(nv_wr32)
+ add b32 $r14 $r5 NV_TPC_STRAND_SAVE_SWBASE
+ shr b32 $r15 $r2 8
+ call(nv_wr32)
+ add b32 $r14 $r5 NV_TPC_STRAND_LOAD_SWBASE
+ shr b32 $r15 $r2 8
+ call(nv_wr32)
+ add b32 $r14 $r5 NV_TPC_STRAND_WORDS
+ call(nv_rd32)
+ shr b32 $r15 6
+ add b32 $r15 1
+ shl b32 $r15 8
+ add b32 $r2 $r15
+ add b32 $r3 $r15
+ add b32 $r7 1
+ sub b32 $r6 1
+ bra nz #tpc_strand_init_idx_loop
+ add b32 $r5 NV_PGRAPH_GPC0_TPC0__SIZE
+ sub b32 $r4 1
+ bra nz #tpc_strand_init_tpc_loop
+
+ mov $r15 NV_PGRAPH_GPC0_TPCX_STRAND_INDEX_ALL
+ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_INDEX, $r15)
+ tpc_strand_disable();
+#endif
+
// save context size, and tell HUB we're done
nv_iowr(NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(1), 0, $r3)
clear b32 $r2
@@ -306,6 +401,9 @@ ctx_redswitch:
ctx_xfer:
// set context base address
nv_iowr(NV_PGRAPH_GPCX_GPCCS_MEM_BASE, 0, $r15)
+#if CHIPSET >= GM107
+ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_MEM_BASE, $r15)
+#endif
bra not $p1 #ctx_xfer_not_load
call(ctx_redswitch)
ctx_xfer_not_load:
@@ -318,6 +416,14 @@ ctx_xfer:
add b32 $r2 NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE
nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_CMD, 0x3f, $r2)
+#if CHIPSET >= GM107
+ tpc_strand_enable();
+ tpc_strand_seek(0);
+ xbit $r15 $flags $p1 // SAVE/LOAD
+ add b32 $r15 NV_PGRAPH_GPC0_TPCX_STRAND_CMD_SAVE
+ gpc_wr32(NV_PGRAPH_GPC0_TPCX_STRAND_CMD, $r15)
+#endif
+
// mmio context
xbit $r10 $flags $p1 // direction
or $r10 2 // first
@@ -362,6 +468,9 @@ ctx_xfer:
// wait for strands to finish
call(strand_wait)
+#if CHIPSET >= GM107
+ tpc_strand_wait()
+#endif
// if load, or a save without a load following, do some
// unknown stuff that's done after finishing a block of
@@ -370,6 +479,9 @@ ctx_xfer:
bra not $p2 #ctx_xfer_done
ctx_xfer_post:
call(strand_post)
+#if CHIPSET >= GM107
+ tpc_strand_disable()
+#endif
// mark completion in HUB's barrier
ctx_xfer_done:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index ea32f56c0a92..231f696d1e0a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -310,7 +310,7 @@ uint32_t gf100_grgpc_code[] = {
0x03f01200,
0x0002d000,
0x17f104bd,
- 0x10fe04e6,
+ 0x10fe04f8,
0x0007f100,
0x0003f007,
0xbd0000d0,
@@ -329,157 +329,157 @@ uint32_t gf100_grgpc_code[] = {
0xf0860027,
0x22cf0123,
0x04028000,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0xf1082595,
- 0xf0c00007,
- 0x05d00103,
+ 0xf10f24b6,
+ 0xf0c90007,
+ 0x02d00103,
0xf104bd00,
- 0xf0c10007,
- 0x05d00103,
- 0x9804bd00,
- 0x0f98000e,
- 0x5021f501,
- 0x002fbb01,
- 0x98003fbb,
- 0x0f98010e,
- 0x5021f502,
- 0x050e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x0235b600,
- 0xd30007f1,
- 0xd00103f0,
- 0x04bd0003,
- 0xb60825b6,
- 0x20b60635,
- 0x0130b601,
- 0xb60824b6,
- 0x2fb90834,
- 0xd321f502,
- 0x003fbb02,
- 0x010007f1,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0xf10235b6,
+ 0xf0d30007,
+ 0x03d00103,
+ 0xb604bd00,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x022fb908,
+ 0x02d321f5,
+ 0xbb002fbb,
+ 0x07f1003f,
+ 0x03f00100,
+ 0x0003d002,
+ 0x24bd04bd,
+ 0xf11f29f0,
+ 0xf0080007,
+ 0x02d00203,
+/* 0x04bb: main */
+ 0xf404bd00,
+ 0x28f40031,
+ 0x1cd7f000,
+ 0xf43921f4,
+ 0xe4b0f401,
+ 0x1e18f404,
+ 0xf00181fe,
+ 0x20bd0627,
+ 0xb60412fd,
+ 0x1efd01e4,
+ 0x0018fe05,
+ 0x05b021f5,
+/* 0x04eb: main_not_ctx_xfer */
+ 0x94d30ef4,
+ 0xf5f010ef,
+ 0x7e21f501,
+ 0xc60ef403,
+/* 0x04f8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x1cd7f02c,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
+ 0xf00421f4,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x0546: ih_no_fifo */
+ 0x07f104bd,
+ 0x03f00100,
+ 0x000ad000,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x056a: hub_barrier_done */
+ 0xf7f001f8,
+ 0x040e9801,
+ 0xb904febb,
+ 0xe7f102ff,
+ 0xe3f09418,
+ 0x9d21f440,
+/* 0x0582: ctx_redswitch */
+ 0xf7f000f8,
+ 0x0007f120,
+ 0x0103f085,
+ 0xbd000fd0,
+ 0x08e7f004,
+/* 0x0594: ctx_redswitch_delay */
+ 0xf401e2b6,
+ 0xf5f1fd1b,
+ 0xf5f10800,
+ 0x07f10200,
+ 0x03f08500,
+ 0x000fd001,
+ 0x00f804bd,
+/* 0x05b0: ctx_xfer */
+ 0x810007f1,
0xd00203f0,
- 0x04bd0003,
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
-/* 0x04a9: main */
- 0x0031f404,
- 0xf00028f4,
- 0x21f41cd7,
- 0xf401f439,
- 0xf404e4b0,
- 0x81fe1e18,
- 0x0627f001,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x21f50018,
- 0x0ef4059e,
-/* 0x04d9: main_not_ctx_xfer */
- 0x10ef94d3,
- 0xf501f5f0,
- 0xf4037e21,
-/* 0x04e6: ih */
- 0x80f9c60e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0xf104bdf0,
- 0xf00200a7,
- 0xaacf00a3,
- 0x04abc400,
- 0xf02c0bf4,
- 0xe7f11cd7,
- 0xe3f01a00,
- 0x00eecf00,
- 0x1900f7f1,
- 0xcf00f3f0,
- 0x21f400ff,
- 0x01e7f004,
- 0x1d0007f1,
- 0xd00003f0,
- 0x04bd000e,
-/* 0x0534: ih_no_fifo */
- 0x010007f1,
- 0xd00003f0,
- 0x04bd000a,
- 0xe0fcf0fc,
- 0xb0fcd0fc,
- 0x90fca0fc,
- 0x88fe80fc,
- 0xf480fc00,
- 0x01f80032,
-/* 0x0558: hub_barrier_done */
- 0x9801f7f0,
- 0xfebb040e,
- 0x02ffb904,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f89d21,
-/* 0x0570: ctx_redswitch */
- 0xf120f7f0,
- 0xf0850007,
- 0x0fd00103,
- 0xf004bd00,
-/* 0x0582: ctx_redswitch_delay */
- 0xe2b608e7,
- 0xfd1bf401,
- 0x0800f5f1,
- 0x0200f5f1,
- 0x850007f1,
- 0xd00103f0,
0x04bd000f,
-/* 0x059e: ctx_xfer */
- 0x07f100f8,
- 0x03f08100,
- 0x000fd002,
- 0x11f404bd,
- 0x7021f507,
-/* 0x05b1: ctx_xfer_not_load */
- 0x6a21f505,
- 0xf124bd02,
- 0xf047fc07,
+ 0xf50711f4,
+/* 0x05c3: ctx_xfer_not_load */
+ 0xf5058221,
+ 0xbd026a21,
+ 0xfc07f124,
+ 0x0203f047,
+ 0xbd0002d0,
+ 0x012cf004,
+ 0xf10320b6,
+ 0xf04afc07,
0x02d00203,
0xf004bd00,
- 0x20b6012c,
- 0xfc07f103,
- 0x0203f04a,
- 0xbd0002d0,
- 0x01acf004,
- 0xf102a5f0,
- 0xf00000b7,
- 0x0c9850b3,
- 0x0fc4b604,
- 0x9800bcbb,
- 0x0d98000c,
- 0x00e7f001,
- 0x016f21f5,
- 0xf001acf0,
- 0xb7f104a5,
- 0xb3f04000,
- 0x040c9850,
- 0xbb0fc4b6,
- 0x0c9800bc,
- 0x020d9801,
- 0xf1060f98,
- 0xf50800e7,
- 0xf5016f21,
- 0xf4025e21,
- 0x12f40601,
-/* 0x0629: ctx_xfer_post */
- 0x7f21f507,
-/* 0x062d: ctx_xfer_done */
- 0x5821f502,
- 0x0000f805,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0xa5f001ac,
+ 0x00b7f102,
+ 0x50b3f000,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x000c9800,
+ 0xf0010d98,
+ 0x21f500e7,
+ 0xacf0016f,
+ 0x04a5f001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98010c98,
+ 0x0f98020d,
+ 0x00e7f106,
+ 0x6f21f508,
+ 0x5e21f501,
+ 0x0601f402,
+/* 0x063b: ctx_xfer_post */
+ 0xf50712f4,
+/* 0x063f: ctx_xfer_done */
+ 0xf5027f21,
+ 0xf8056a21,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 9a36d9cbb8a5..64d07df4b8b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = {
0x03f01200,
0x0002d000,
0x17f104bd,
- 0x10fe0530,
+ 0x10fe0542,
0x0007f100,
0x0003f007,
0xbd0000d0,
@@ -333,188 +333,188 @@ uint32_t gf117_grgpc_code[] = {
0xf0860027,
0x22cf0123,
0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0421: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0436: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40126b0,
-/* 0x0442: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0xf1082595,
- 0xf0c00007,
- 0x05d00103,
+ 0xf10f24b6,
+ 0xf0c90007,
+ 0x02d00103,
0xf104bd00,
- 0xf0c10007,
- 0x05d00103,
- 0x9804bd00,
- 0x0f98000e,
- 0x5021f501,
- 0x002fbb01,
- 0x98003fbb,
- 0x0f98010e,
- 0x5021f502,
- 0x050e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x020e9800,
- 0xf5030f98,
- 0x98015021,
- 0xeffd070e,
- 0x002ebb00,
- 0xb6003ebb,
- 0x07f10235,
- 0x03f0d300,
- 0x0003d001,
- 0x25b604bd,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb02d321,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0080007,
- 0x02d00203,
-/* 0x04f3: main */
- 0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x05e821f5,
-/* 0x0523: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0x7e21f501,
- 0xc60ef403,
-/* 0x0530: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x24d7f02c,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xf00421f4,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x057e: ih_no_fifo */
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0430: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0445: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40126,
+/* 0x0451: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
0x07f104bd,
- 0x03f00100,
- 0x000ad000,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x05a2: hub_barrier_done */
- 0xf7f001f8,
- 0x040e9801,
- 0xb904febb,
- 0xe7f102ff,
- 0xe3f09418,
- 0x9d21f440,
-/* 0x05ba: ctx_redswitch */
- 0xf7f000f8,
- 0x0007f120,
- 0x0103f085,
- 0xbd000fd0,
- 0x08e7f004,
-/* 0x05cc: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x07f10200,
- 0x03f08500,
- 0x000fd001,
- 0x00f804bd,
-/* 0x05e8: ctx_xfer */
- 0x810007f1,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x2fbb02d3,
+ 0x003fbb00,
+ 0x010007f1,
0xd00203f0,
- 0x04bd000f,
- 0xf50711f4,
-/* 0x05fb: ctx_xfer_not_load */
- 0xf505ba21,
- 0xbd026a21,
- 0xfc07f124,
- 0x0203f047,
+ 0x04bd0003,
+ 0x29f024bd,
+ 0x0007f11f,
+ 0x0203f008,
0xbd0002d0,
- 0x012cf004,
- 0xf10320b6,
- 0xf04afc07,
+/* 0x0505: main */
+ 0x0031f404,
+ 0xf00028f4,
+ 0x21f424d7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef405fa,
+/* 0x0535: main_not_ctx_xfer */
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf4037e21,
+/* 0x0542: ih */
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf02c0bf4,
+ 0xe7f124d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x0590: ih_no_fifo */
+ 0x010007f1,
+ 0xd00003f0,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x05b4: hub_barrier_done */
+ 0x9801f7f0,
+ 0xfebb040e,
+ 0x02ffb904,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f89d21,
+/* 0x05cc: ctx_redswitch */
+ 0xf120f7f0,
+ 0xf0850007,
+ 0x0fd00103,
+ 0xf004bd00,
+/* 0x05de: ctx_redswitch_delay */
+ 0xe2b608e7,
+ 0xfd1bf401,
+ 0x0800f5f1,
+ 0x0200f5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05fa: ctx_xfer */
+ 0x07f100f8,
+ 0x03f08100,
+ 0x000fd002,
+ 0x11f404bd,
+ 0xcc21f507,
+/* 0x060d: ctx_xfer_not_load */
+ 0x6a21f505,
+ 0xf124bd02,
+ 0xf047fc07,
0x02d00203,
0xf004bd00,
- 0xa5f001ac,
- 0x00b7f102,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf0016f,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf0016f,
- 0x04a5f001,
- 0x3000b7f1,
- 0x9850b3f0,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6f21f502,
- 0x5e21f501,
- 0x0601f402,
-/* 0x0697: ctx_xfer_post */
- 0xf50712f4,
-/* 0x069b: ctx_xfer_done */
- 0xf5027f21,
- 0xf805a221,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x20b6012c,
+ 0xfc07f103,
+ 0x0203f04a,
+ 0xbd0002d0,
+ 0x01acf004,
+ 0xf102a5f0,
+ 0xf00000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf101acf0,
+ 0xf04000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98010c,
+ 0x060f9802,
+ 0x0800e7f1,
+ 0x016f21f5,
+ 0xf001acf0,
+ 0xb7f104a5,
+ 0xb3f03000,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x030d9802,
+ 0xf1080f98,
+ 0xf50200e7,
+ 0xf5016f21,
+ 0xf4025e21,
+ 0x12f40601,
+/* 0x06a9: ctx_xfer_post */
+ 0x7f21f507,
+/* 0x06ad: ctx_xfer_done */
+ 0xb421f502,
+ 0x0000f805,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 49020fff4317..2f596433c222 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = {
0x03f01200,
0x0002d000,
0x17f104bd,
- 0x10fe0530,
+ 0x10fe0542,
0x0007f100,
0x0003f007,
0xbd0000d0,
@@ -333,188 +333,188 @@ uint32_t gk104_grgpc_code[] = {
0xf0860027,
0x22cf0123,
0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0421: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0436: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40126b0,
-/* 0x0442: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0xf1082595,
- 0xf0c00007,
- 0x05d00103,
+ 0xf10f24b6,
+ 0xf0c90007,
+ 0x02d00103,
0xf104bd00,
- 0xf0c10007,
- 0x05d00103,
- 0x9804bd00,
- 0x0f98000e,
- 0x5021f501,
- 0x002fbb01,
- 0x98003fbb,
- 0x0f98010e,
- 0x5021f502,
- 0x050e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x020e9800,
- 0xf5030f98,
- 0x98015021,
- 0xeffd070e,
- 0x002ebb00,
- 0xb6003ebb,
- 0x07f10235,
- 0x03f0d300,
- 0x0003d001,
- 0x25b604bd,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb02d321,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0080007,
- 0x02d00203,
-/* 0x04f3: main */
- 0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x05e821f5,
-/* 0x0523: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0x7e21f501,
- 0xc60ef403,
-/* 0x0530: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x24d7f02c,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xf00421f4,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x057e: ih_no_fifo */
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0430: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0445: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40126,
+/* 0x0451: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
0x07f104bd,
- 0x03f00100,
- 0x000ad000,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x05a2: hub_barrier_done */
- 0xf7f001f8,
- 0x040e9801,
- 0xb904febb,
- 0xe7f102ff,
- 0xe3f09418,
- 0x9d21f440,
-/* 0x05ba: ctx_redswitch */
- 0xf7f000f8,
- 0x0007f120,
- 0x0103f085,
- 0xbd000fd0,
- 0x08e7f004,
-/* 0x05cc: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x07f10200,
- 0x03f08500,
- 0x000fd001,
- 0x00f804bd,
-/* 0x05e8: ctx_xfer */
- 0x810007f1,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x2fbb02d3,
+ 0x003fbb00,
+ 0x010007f1,
0xd00203f0,
- 0x04bd000f,
- 0xf50711f4,
-/* 0x05fb: ctx_xfer_not_load */
- 0xf505ba21,
- 0xbd026a21,
- 0xfc07f124,
- 0x0203f047,
+ 0x04bd0003,
+ 0x29f024bd,
+ 0x0007f11f,
+ 0x0203f008,
0xbd0002d0,
- 0x012cf004,
- 0xf10320b6,
- 0xf04afc07,
+/* 0x0505: main */
+ 0x0031f404,
+ 0xf00028f4,
+ 0x21f424d7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef405fa,
+/* 0x0535: main_not_ctx_xfer */
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf4037e21,
+/* 0x0542: ih */
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf02c0bf4,
+ 0xe7f124d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x0590: ih_no_fifo */
+ 0x010007f1,
+ 0xd00003f0,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x05b4: hub_barrier_done */
+ 0x9801f7f0,
+ 0xfebb040e,
+ 0x02ffb904,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f89d21,
+/* 0x05cc: ctx_redswitch */
+ 0xf120f7f0,
+ 0xf0850007,
+ 0x0fd00103,
+ 0xf004bd00,
+/* 0x05de: ctx_redswitch_delay */
+ 0xe2b608e7,
+ 0xfd1bf401,
+ 0x0800f5f1,
+ 0x0200f5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05fa: ctx_xfer */
+ 0x07f100f8,
+ 0x03f08100,
+ 0x000fd002,
+ 0x11f404bd,
+ 0xcc21f507,
+/* 0x060d: ctx_xfer_not_load */
+ 0x6a21f505,
+ 0xf124bd02,
+ 0xf047fc07,
0x02d00203,
0xf004bd00,
- 0xa5f001ac,
- 0x00b7f102,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf0016f,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf0016f,
- 0x04a5f001,
- 0x3000b7f1,
- 0x9850b3f0,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6f21f502,
- 0x5e21f501,
- 0x0601f402,
-/* 0x0697: ctx_xfer_post */
- 0xf50712f4,
-/* 0x069b: ctx_xfer_done */
- 0xf5027f21,
- 0xf805a221,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x20b6012c,
+ 0xfc07f103,
+ 0x0203f04a,
+ 0xbd0002d0,
+ 0x01acf004,
+ 0xf102a5f0,
+ 0xf00000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf101acf0,
+ 0xf04000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98010c,
+ 0x060f9802,
+ 0x0800e7f1,
+ 0x016f21f5,
+ 0xf001acf0,
+ 0xb7f104a5,
+ 0xb3f03000,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x030d9802,
+ 0xf1080f98,
+ 0xf50200e7,
+ 0xf5016f21,
+ 0xf4025e21,
+ 0x12f40601,
+/* 0x06a9: ctx_xfer_post */
+ 0x7f21f507,
+/* 0x06ad: ctx_xfer_done */
+ 0xb421f502,
+ 0x0000f805,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index c95b07e3bce5..ee8e54db8fc9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = {
0x03f01200,
0x0002d000,
0x17f104bd,
- 0x10fe0530,
+ 0x10fe0542,
0x0007f100,
0x0003f007,
0xbd0000d0,
@@ -333,188 +333,188 @@ uint32_t gk110_grgpc_code[] = {
0xf0860027,
0x22cf0123,
0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0421: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0436: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40226b0,
-/* 0x0442: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0xf1082595,
- 0xf0c00007,
- 0x05d00103,
+ 0xf10f24b6,
+ 0xf0c90007,
+ 0x02d00103,
0xf104bd00,
- 0xf0c10007,
- 0x05d00103,
- 0x9804bd00,
- 0x0f98000e,
- 0x5021f501,
- 0x002fbb01,
- 0x98003fbb,
- 0x0f98010e,
- 0x5021f502,
- 0x050e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x020e9800,
- 0xf5030f98,
- 0x98015021,
- 0xeffd070e,
- 0x002ebb00,
- 0xb6003ebb,
- 0x07f10235,
- 0x03f0d300,
- 0x0003d001,
- 0x25b604bd,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb02d321,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0300007,
- 0x02d00203,
-/* 0x04f3: main */
- 0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x05e821f5,
-/* 0x0523: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0x7e21f501,
- 0xc60ef403,
-/* 0x0530: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x24d7f02c,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xf00421f4,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x057e: ih_no_fifo */
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0430: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0445: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40226,
+/* 0x0451: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
0x07f104bd,
- 0x03f00100,
- 0x000ad000,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x05a2: hub_barrier_done */
- 0xf7f001f8,
- 0x040e9801,
- 0xb904febb,
- 0xe7f102ff,
- 0xe3f09418,
- 0x9d21f440,
-/* 0x05ba: ctx_redswitch */
- 0xf7f000f8,
- 0x0007f120,
- 0x0103f085,
- 0xbd000fd0,
- 0x08e7f004,
-/* 0x05cc: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x07f10200,
- 0x03f08500,
- 0x000fd001,
- 0x00f804bd,
-/* 0x05e8: ctx_xfer */
- 0x810007f1,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x2fbb02d3,
+ 0x003fbb00,
+ 0x010007f1,
0xd00203f0,
- 0x04bd000f,
- 0xf50711f4,
-/* 0x05fb: ctx_xfer_not_load */
- 0xf505ba21,
- 0xbd026a21,
- 0xfc07f124,
- 0x0203f047,
+ 0x04bd0003,
+ 0x29f024bd,
+ 0x0007f11f,
+ 0x0203f030,
0xbd0002d0,
- 0x012cf004,
- 0xf10320b6,
- 0xf04afc07,
+/* 0x0505: main */
+ 0x0031f404,
+ 0xf00028f4,
+ 0x21f424d7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef405fa,
+/* 0x0535: main_not_ctx_xfer */
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf4037e21,
+/* 0x0542: ih */
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf02c0bf4,
+ 0xe7f124d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x0590: ih_no_fifo */
+ 0x010007f1,
+ 0xd00003f0,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x05b4: hub_barrier_done */
+ 0x9801f7f0,
+ 0xfebb040e,
+ 0x02ffb904,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f89d21,
+/* 0x05cc: ctx_redswitch */
+ 0xf120f7f0,
+ 0xf0850007,
+ 0x0fd00103,
+ 0xf004bd00,
+/* 0x05de: ctx_redswitch_delay */
+ 0xe2b608e7,
+ 0xfd1bf401,
+ 0x0800f5f1,
+ 0x0200f5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05fa: ctx_xfer */
+ 0x07f100f8,
+ 0x03f08100,
+ 0x000fd002,
+ 0x11f404bd,
+ 0xcc21f507,
+/* 0x060d: ctx_xfer_not_load */
+ 0x6a21f505,
+ 0xf124bd02,
+ 0xf047fc07,
0x02d00203,
0xf004bd00,
- 0xa5f001ac,
- 0x00b7f102,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf0016f,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf0016f,
- 0x04a5f001,
- 0x3000b7f1,
- 0x9850b3f0,
- 0xc4b6040c,
- 0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6f21f502,
- 0x5e21f501,
- 0x0601f402,
-/* 0x0697: ctx_xfer_post */
- 0xf50712f4,
-/* 0x069b: ctx_xfer_done */
- 0xf5027f21,
- 0xf805a221,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x20b6012c,
+ 0xfc07f103,
+ 0x0203f04a,
+ 0xbd0002d0,
+ 0x01acf004,
+ 0xf102a5f0,
+ 0xf00000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf101acf0,
+ 0xf04000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98010c,
+ 0x060f9802,
+ 0x0800e7f1,
+ 0x016f21f5,
+ 0xf001acf0,
+ 0xb7f104a5,
+ 0xb3f03000,
+ 0x040c9850,
+ 0xbb0fc4b6,
+ 0x0c9800bc,
+ 0x030d9802,
+ 0xf1080f98,
+ 0xf50200e7,
+ 0xf5016f21,
+ 0xf4025e21,
+ 0x12f40601,
+/* 0x06a9: ctx_xfer_post */
+ 0x7f21f507,
+/* 0x06ad: ctx_xfer_done */
+ 0xb421f502,
+ 0x0000f805,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 7e1c28ee7591..fbcc342f896f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = {
0x02020014,
0xf6120040,
0x04bd0002,
- 0xfe047241,
+ 0xfe048141,
0x00400010,
0x0000f607,
0x040204bd,
@@ -291,20 +291,23 @@ uint32_t gk208_grgpc_code[] = {
0x820603b5,
0xcf018600,
0x02b50022,
+ 0x0f24b604,
+ 0x01c90080,
+ 0xbd0002f6,
0x0c308e04,
0xbd24bd50,
-/* 0x0377: init_unk_loop */
+/* 0x0383: init_unk_loop */
0x7e44bd34,
0xb0000065,
0x0bf400f6,
0xbb010f0e,
0x4ffd04f2,
0x0130b605,
-/* 0x038c: init_unk_next */
+/* 0x0398: init_unk_next */
0xb60120b6,
0x26b004e0,
0xe21bf401,
-/* 0x0398: init_unk_done */
+/* 0x03a4: init_unk_done */
0xb50703b5,
0x00820804,
0x22cf0201,
@@ -338,121 +341,118 @@ uint32_t gk208_grgpc_code[] = {
0xb60824b6,
0x2fb20834,
0x0002687e,
- 0x80003fbb,
- 0xf6020100,
- 0x04bd0003,
- 0x29f024bd,
- 0x3000801f,
- 0x0002f602,
-/* 0x0436: main */
- 0x31f404bd,
- 0x0028f400,
- 0x377e240d,
- 0x01f40000,
- 0x04e4b0f4,
- 0xfe1d18f4,
- 0x06020181,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x097e0018,
- 0x0ef40005,
-/* 0x0465: main_not_ctx_xfer */
- 0x10ef94d4,
- 0x7e01f5f0,
- 0xf40002f8,
-/* 0x0472: ih */
- 0x80f9c70e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0x4a04bdf0,
- 0xaacf0200,
- 0x04abc400,
- 0x0d1f0bf4,
- 0x1a004e24,
- 0x4f00eecf,
- 0xffcf1900,
- 0x00047e00,
- 0x40010e00,
- 0x0ef61d00,
-/* 0x04af: ih_no_fifo */
- 0x4004bd00,
- 0x0af60100,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04cf: hub_barrier_done */
- 0x0f01f800,
- 0x040e9801,
- 0xb204febb,
- 0x94188eff,
- 0x008f7e40,
-/* 0x04e3: ctx_redswitch */
- 0x0f00f800,
- 0x85008020,
+ 0xbb002fbb,
+ 0x0080003f,
+ 0x03f60201,
+ 0xbd04bd00,
+ 0x1f29f024,
+ 0x02300080,
+ 0xbd0002f6,
+/* 0x0445: main */
+ 0x0031f404,
+ 0x0d0028f4,
+ 0x00377e24,
+ 0xf401f400,
+ 0xf404e4b0,
+ 0x81fe1d18,
+ 0xbd060201,
+ 0x0412fd20,
+ 0xfd01e4b6,
+ 0x18fe051e,
+ 0x05187e00,
+ 0xd40ef400,
+/* 0x0474: main_not_ctx_xfer */
+ 0xf010ef94,
+ 0xf87e01f5,
+ 0x0ef40002,
+/* 0x0481: ih */
+ 0xfe80f9c7,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0x004a04bd,
+ 0x00aacf02,
+ 0xf404abc4,
+ 0x240d1f0b,
+ 0xcf1a004e,
+ 0x004f00ee,
+ 0x00ffcf19,
+ 0x0000047e,
+ 0x0040010e,
+ 0x000ef61d,
+/* 0x04be: ih_no_fifo */
+ 0x004004bd,
+ 0x000af601,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x04de: hub_barrier_done */
+ 0x010f01f8,
+ 0xbb040e98,
+ 0xffb204fe,
+ 0x4094188e,
+ 0x00008f7e,
+/* 0x04f2: ctx_redswitch */
+ 0x200f00f8,
+ 0x01850080,
+ 0xbd000ff6,
+/* 0x04ff: ctx_redswitch_delay */
+ 0xb6080e04,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x85008002,
0x000ff601,
- 0x080e04bd,
-/* 0x04f0: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x00800200,
- 0x0ff60185,
- 0xf804bd00,
-/* 0x0509: ctx_xfer */
- 0x81008000,
- 0x000ff602,
- 0x11f404bd,
- 0x04e37e07,
-/* 0x0519: ctx_xfer_not_load */
- 0x02167e00,
- 0x8024bd00,
- 0xf60247fc,
- 0x04bd0002,
- 0xb6012cf0,
- 0xfc800320,
- 0x02f6024a,
+ 0x00f804bd,
+/* 0x0518: ctx_xfer */
+ 0x02810080,
+ 0xbd000ff6,
+ 0x0711f404,
+ 0x0004f27e,
+/* 0x0528: ctx_xfer_not_load */
+ 0x0002167e,
+ 0xfc8024bd,
+ 0x02f60247,
0xf004bd00,
- 0xa5f001ac,
- 0x00008b02,
- 0x040c9850,
- 0xbb0fc4b6,
- 0x0c9800bc,
- 0x010d9800,
- 0x3d7e000e,
- 0xacf00001,
- 0x40008b01,
- 0x040c9850,
- 0xbb0fc4b6,
- 0x0c9800bc,
- 0x020d9801,
- 0x4e060f98,
- 0x3d7e0800,
- 0xacf00001,
- 0x04a5f001,
- 0x5030008b,
+ 0x20b6012c,
+ 0x4afc8003,
+ 0x0002f602,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x5000008b,
0xb6040c98,
0xbcbb0fc4,
- 0x020c9800,
- 0x98030d98,
- 0x004e080f,
- 0x013d7e02,
- 0x020a7e00,
- 0x0601f400,
-/* 0x05a3: ctx_xfer_post */
- 0x7e0712f4,
-/* 0x05a7: ctx_xfer_done */
- 0x7e000227,
- 0xf80004cf,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x01acf000,
+ 0x5040008b,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0x98020d98,
+ 0x004e060f,
+ 0x013d7e08,
+ 0x01acf000,
+ 0x8b04a5f0,
+ 0x98503000,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98020c98,
+ 0x0f98030d,
+ 0x02004e08,
+ 0x00013d7e,
+ 0x00020a7e,
+ 0xf40601f4,
+/* 0x05b2: ctx_xfer_post */
+ 0x277e0712,
+/* 0x05b6: ctx_xfer_done */
+ 0xde7e0002,
+ 0x00f80004,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5 b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5
index e730603891d7..47802c7ecca1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5
@@ -24,7 +24,7 @@
#define NV_PGRAPH_GPCX_UNK__SIZE 0x00000002
-#define CHIPSET GK208
+#define CHIPSET GM107
#include "macros.fuc"
.section #gm107_grgpc_data
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 6d53b67dd3c4..51f5c3c6e966 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -41,7 +41,7 @@ uint32_t gm107_grgpc_data[] = {
};
uint32_t gm107_grgpc_code[] = {
- 0x03140ef5,
+ 0x03410ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
@@ -268,187 +268,319 @@ uint32_t gm107_grgpc_code[] = {
0x409c1c8e,
0x00008f7e,
0x00f8e0fc,
-/* 0x0314: init */
- 0x004104bd,
- 0x0011cf42,
- 0x010911e7,
- 0xfe0814b6,
- 0x02020014,
- 0xf6120040,
- 0x04bd0002,
- 0xfe047241,
- 0x00400010,
- 0x0000f607,
- 0x040204bd,
- 0xf6040040,
- 0x04bd0002,
- 0x821031f4,
- 0xcf018200,
- 0x01030022,
- 0xbb1f24f0,
- 0x32b60432,
- 0x0502b501,
- 0x820603b5,
- 0xcf018600,
- 0x02b50022,
- 0x0c308e04,
- 0xbd24bd50,
-/* 0x0377: init_unk_loop */
- 0x7e44bd34,
- 0xb0000065,
- 0x0bf400f6,
- 0xbb010f0e,
- 0x4ffd04f2,
- 0x0130b605,
-/* 0x038c: init_unk_next */
- 0xb60120b6,
- 0x26b004e0,
- 0xe21bf402,
-/* 0x0398: init_unk_done */
- 0xb50703b5,
- 0x00820804,
- 0x22cf0201,
- 0x9534bd00,
- 0x00800825,
- 0x05f601c0,
- 0x8004bd00,
- 0xf601c100,
+/* 0x0314: tpc_strand_wait */
+ 0x94bd90f9,
+ 0x800a99f0,
+ 0xf6023700,
+ 0x04bd0009,
+/* 0x0324: tpc_strand_busy */
+ 0x033f0089,
+ 0xb30099cf,
+ 0xbdf90094,
+ 0x0a99f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0xf890fc04,
+/* 0x0341: init */
+ 0x4104bd00,
+ 0x11cf4200,
+ 0x0911e700,
+ 0x0814b601,
+ 0x020014fe,
+ 0x12004002,
+ 0xbd0002f6,
+ 0x05b04104,
+ 0x400010fe,
+ 0x00f60700,
+ 0x0204bd00,
+ 0x04004004,
+ 0xbd0002f6,
+ 0x1031f404,
+ 0x01820082,
+ 0x030022cf,
+ 0x1f24f001,
+ 0xb60432bb,
+ 0x02b50132,
+ 0x0603b505,
+ 0x01860082,
+ 0xb50022cf,
+ 0x24b60402,
+ 0xc900800f,
+ 0x0002f601,
+ 0x308e04bd,
+ 0x24bd500c,
+ 0x44bd34bd,
+/* 0x03b0: init_unk_loop */
+ 0x0000657e,
+ 0xf400f6b0,
+ 0x010f0e0b,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x03c5: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40226,
+/* 0x03d1: init_unk_done */
+ 0x0703b5e2,
+ 0x820804b5,
+ 0xcf020100,
+ 0x34bd0022,
+ 0x80082595,
+ 0xf601c000,
0x04bd0005,
- 0x98000e98,
- 0x207e010f,
- 0x2fbb0001,
+ 0x01c10080,
+ 0xbd0005f6,
+ 0x000e9804,
+ 0x7e010f98,
+ 0xbb000120,
+ 0x3fbb002f,
+ 0x010e9800,
+ 0x7e020f98,
+ 0x98000120,
+ 0xeffd050e,
+ 0x002ebb00,
+ 0x98003ebb,
+ 0x0f98020e,
+ 0x01207e03,
+ 0x070e9800,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x0235b600,
+ 0x01d30080,
+ 0xbd0003f6,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb20834b6,
+ 0x02687e2f,
+ 0x002fbb00,
+ 0x0f003fbb,
+ 0x8effb23f,
+ 0xf0501d60,
+ 0x8f7e01e5,
+ 0x0c0f0000,
+ 0xa88effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0x03147e00,
+ 0xb23f0f00,
+ 0x1d608eff,
+ 0x01e5f050,
+ 0x00008f7e,
+ 0xffb2000f,
+ 0x501d9c8e,
+ 0x7e01e5f0,
+ 0x0f00008f,
+ 0x03147e01,
+ 0x8effb200,
+ 0xf0501da8,
+ 0x8f7e01e5,
+ 0xff0f0000,
+ 0x988effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0xb2020f00,
+ 0x1da88eff,
+ 0x01e5f050,
+ 0x00008f7e,
+ 0x0003147e,
+ 0x85050498,
+ 0x98504000,
+ 0x64b60406,
+ 0x0056bb0f,
+/* 0x04e0: tpc_strand_init_tpc_loop */
+ 0x05705eb8,
+ 0x00657e00,
+ 0xbdf6b200,
+/* 0x04ed: tpc_strand_init_idx_loop */
+ 0x605eb874,
+ 0x7fb20005,
+ 0x00008f7e,
+ 0x05885eb8,
+ 0x082f9500,
+ 0x00008f7e,
+ 0x058c5eb8,
+ 0x082f9500,
+ 0x00008f7e,
+ 0x05905eb8,
+ 0x00657e00,
+ 0x06f5b600,
+ 0xb601f0b6,
+ 0x2fbb08f4,
0x003fbb00,
- 0x98010e98,
- 0x207e020f,
- 0x0e980001,
- 0x00effd05,
- 0xbb002ebb,
- 0x0e98003e,
- 0x030f9802,
- 0x0001207e,
- 0xfd070e98,
- 0x2ebb00ef,
- 0x003ebb00,
- 0x800235b6,
- 0xf601d300,
- 0x04bd0003,
- 0xb60825b6,
- 0x20b60635,
- 0x0130b601,
- 0xb60824b6,
- 0x2fb20834,
- 0x0002687e,
- 0x80003fbb,
- 0xf6020100,
- 0x04bd0003,
- 0x29f024bd,
- 0x3000801f,
- 0x0002f602,
-/* 0x0436: main */
- 0x31f404bd,
- 0x0028f400,
- 0x377e240d,
- 0x01f40000,
- 0x04e4b0f4,
- 0xfe1d18f4,
- 0x06020181,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x097e0018,
- 0x0ef40005,
-/* 0x0465: main_not_ctx_xfer */
- 0x10ef94d4,
- 0x7e01f5f0,
- 0xf40002f8,
-/* 0x0472: ih */
- 0x80f9c70e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0x4a04bdf0,
- 0xaacf0200,
- 0x04abc400,
- 0x0d1f0bf4,
- 0x1a004e24,
- 0x4f00eecf,
- 0xffcf1900,
- 0x00047e00,
- 0x40010e00,
- 0x0ef61d00,
-/* 0x04af: ih_no_fifo */
- 0x4004bd00,
- 0x0af60100,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04cf: hub_barrier_done */
- 0x0f01f800,
- 0x040e9801,
- 0xb204febb,
- 0x94188eff,
- 0x008f7e40,
-/* 0x04e3: ctx_redswitch */
- 0x0f00f800,
- 0x85008020,
- 0x000ff601,
- 0x080e04bd,
-/* 0x04f0: ctx_redswitch_delay */
- 0xf401e2b6,
- 0xf5f1fd1b,
- 0xf5f10800,
- 0x00800200,
- 0x0ff60185,
- 0xf804bd00,
-/* 0x0509: ctx_xfer */
- 0x81008000,
- 0x000ff602,
- 0x11f404bd,
- 0x04e37e07,
-/* 0x0519: ctx_xfer_not_load */
- 0x02167e00,
- 0x8024bd00,
- 0xf60247fc,
+ 0xb60170b6,
+ 0x1bf40162,
+ 0x0050b7bf,
+ 0x0142b608,
+ 0x0fa81bf4,
+ 0x8effb23f,
+ 0xf0501d60,
+ 0x8f7e01e5,
+ 0x0d0f0000,
+ 0xa88effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0x03147e00,
+ 0x01008000,
+ 0x0003f602,
+ 0x24bd04bd,
+ 0x801f29f0,
+ 0xf6023000,
0x04bd0002,
- 0xb6012cf0,
- 0xfc800320,
- 0x02f6024a,
+/* 0x0574: main */
+ 0xf40031f4,
+ 0x240d0028,
+ 0x0000377e,
+ 0xb0f401f4,
+ 0x18f404e4,
+ 0x0181fe1d,
+ 0x20bd0602,
+ 0xb60412fd,
+ 0x1efd01e4,
+ 0x0018fe05,
+ 0x0006477e,
+/* 0x05a3: main_not_ctx_xfer */
+ 0x94d40ef4,
+ 0xf5f010ef,
+ 0x02f87e01,
+ 0xc70ef400,
+/* 0x05b0: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x02004a04,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x4e240d1f,
+ 0xeecf1a00,
+ 0x19004f00,
+ 0x7e00ffcf,
+ 0x0e000004,
+ 0x1d004001,
+ 0xbd000ef6,
+/* 0x05ed: ih_no_fifo */
+ 0x01004004,
+ 0xbd000af6,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+/* 0x060d: hub_barrier_done */
+ 0x98010f01,
+ 0xfebb040e,
+ 0x8effb204,
+ 0x7e409418,
+ 0xf800008f,
+/* 0x0621: ctx_redswitch */
+ 0x80200f00,
+ 0xf6018500,
+ 0x04bd000f,
+/* 0x062e: ctx_redswitch_delay */
+ 0xe2b6080e,
+ 0xfd1bf401,
+ 0x0800f5f1,
+ 0x0200f5f1,
+ 0x01850080,
+ 0xbd000ff6,
+/* 0x0647: ctx_xfer */
+ 0x8000f804,
+ 0xf6028100,
+ 0x04bd000f,
+ 0xc48effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0x0711f400,
+ 0x0006217e,
+/* 0x0664: ctx_xfer_not_load */
+ 0x0002167e,
+ 0xfc8024bd,
+ 0x02f60247,
0xf004bd00,
+ 0x20b6012c,
+ 0x4afc8003,
+ 0x0002f602,
+ 0x0c0f04bd,
+ 0xa88effb2,
+ 0xe5f0501d,
+ 0x008f7e01,
+ 0x03147e00,
+ 0xb23f0f00,
+ 0x1d608eff,
+ 0x01e5f050,
+ 0x00008f7e,
+ 0xffb2000f,
+ 0x501d9c8e,
+ 0x7e01e5f0,
+ 0x0f00008f,
+ 0x03147e01,
+ 0x01fcf000,
+ 0xb203f0b6,
+ 0x1da88eff,
+ 0x01e5f050,
+ 0x00008f7e,
+ 0xf001acf0,
+ 0x008b02a5,
+ 0x0c985000,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98000c,
+ 0x7e000e01,
+ 0xf000013d,
+ 0x008b01ac,
+ 0x0c985040,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98010c,
+ 0x060f9802,
+ 0x7e08004e,
+ 0xf000013d,
0xa5f001ac,
- 0x00008b02,
+ 0x30008b04,
0x040c9850,
0xbb0fc4b6,
0x0c9800bc,
- 0x010d9800,
- 0x3d7e000e,
- 0xacf00001,
- 0x40008b01,
- 0x040c9850,
- 0xbb0fc4b6,
- 0x0c9800bc,
- 0x020d9801,
- 0x4e060f98,
- 0x3d7e0800,
- 0xacf00001,
- 0x04a5f001,
- 0x5030008b,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x020c9800,
- 0x98030d98,
- 0x004e080f,
- 0x013d7e02,
- 0x020a7e00,
- 0x0601f400,
-/* 0x05a3: ctx_xfer_post */
- 0x7e0712f4,
-/* 0x05a7: ctx_xfer_done */
- 0x7e000227,
- 0xf80004cf,
+ 0x030d9802,
+ 0x4e080f98,
+ 0x3d7e0200,
+ 0x0a7e0001,
+ 0x147e0002,
+ 0x01f40003,
+ 0x1a12f406,
+/* 0x073c: ctx_xfer_post */
+ 0x0002277e,
+ 0xffb20d0f,
+ 0x501da88e,
+ 0x7e01e5f0,
+ 0x7e00008f,
+/* 0x0753: ctx_xfer_done */
+ 0x7e000314,
+ 0xf800060d,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc
index 2a0b0f844299..fa618066441a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/macros.fuc
@@ -29,6 +29,7 @@
#define GK100 0xe0
#define GK110 0xf0
#define GK208 0x108
+#define GM107 0x117
#define NV_PGRAPH_TRAPPED_ADDR 0x400704
#define NV_PGRAPH_TRAPPED_DATA_LO 0x400708
@@ -79,7 +80,9 @@
#define NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE 0x409718
#define NV_PGRAPH_FECS_MMCTX_MULTI_MASK 0x40971c
#define NV_PGRAPH_FECS_MMCTX_QUEUE 0x409720
+#define NV_PGRAPH_FECS_MMIO_BASE 0x409724
#define NV_PGRAPH_FECS_MMIO_CTRL 0x409728
+#define NV_PGRAPH_FECS_MMIO_CTRL_BASE_ENABLE 0x00000001
#define NV_PGRAPH_FECS_MMIO_RDVAL 0x40972c
#define NV_PGRAPH_FECS_MMIO_WRVAL 0x409730
#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT 0x40974c
@@ -147,6 +150,11 @@
#define NV_PGRAPH_GPCX_GPCCS_MYINDEX 0x41a618
#define NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE 0x41a700
#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE 0x41a704
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_BASE 0x41a724
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL 0x41a728
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE 0x00000001
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_RDVAL 0x41a72c
+#define NV_PGRAPH_GPCX_GPCCS_MMIO_WRVAL 0x41a730
#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT 0x41a74c
#if CHIPSET < GK110
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800)
@@ -164,6 +172,29 @@
#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE 0x00000003
#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_LOAD 0x00000004
#define NV_PGRAPH_GPCX_GPCCS_MEM_BASE 0x41aa04
+#define NV_PGRAPH_GPCX_GPCCS_TPC_STATUS 0x41acfc
+
+#define NV_PGRAPH_GPC0_TPC0 0x504000
+#define NV_PGRAPH_GPC0_TPC0__SIZE 0x000800
+
+#define NV_PGRAPH_GPC0_TPCX_STRAND_INDEX 0x501d60
+#define NV_PGRAPH_GPC0_TPCX_STRAND_INDEX_ALL 0x0000003f
+#define NV_PGRAPH_GPC0_TPCX_STRAND_DATA 0x501d98
+#define NV_PGRAPH_GPC0_TPCX_STRAND_SELECT 0x501d9c
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD 0x501da8
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_SEEK 0x00000001
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_GET_INFO 0x00000002
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_SAVE 0x00000003
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_LOAD 0x00000004
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_ENABLE 0x0000000c
+#define NV_PGRAPH_GPC0_TPCX_STRAND_CMD_DISABLE 0x0000000d
+#define NV_PGRAPH_GPC0_TPCX_STRAND_MEM_BASE 0x501dc4
+
+#define NV_TPC_STRAND_INDEX 0x560
+#define NV_TPC_STRAND_CNT 0x570
+#define NV_TPC_STRAND_SAVE_SWBASE 0x588
+#define NV_TPC_STRAND_LOAD_SWBASE 0x58c
+#define NV_TPC_STRAND_WORDS 0x590
#define mmctx_data(r,c) .b32 (((c - 1) << 26) | r)
#define queue_init .skip 72 // (2 * 4) + ((8 * 4) * 2)
@@ -178,6 +209,7 @@
#define T_SAVE 7
#define T_LCHAN 8
#define T_LCTXH 9
+#define T_STRTPC 10
#if CHIPSET < GK208
#define imm32(reg,val) /*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 1dd482e9da77..5606c25e5d02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -236,7 +236,7 @@ static int
gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd,
void *pdata, u32 size)
{
- struct gf100_gr_priv *priv = (void *)nv_engine(object);
+ struct gf100_gr_priv *priv = (void *)object->engine;
if (size >= sizeof(u32)) {
u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
nv_wr32(priv, 0x419e44, data);
@@ -260,8 +260,8 @@ gf100_gr_90c0_omthds[] = {
struct nvkm_oclass
gf100_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0x9039, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
@@ -1097,12 +1097,26 @@ gf100_gr_intr(struct nvkm_subdev *subdev)
u32 subc = (addr & 0x00070000) >> 16;
u32 data = nv_rd32(priv, 0x400708);
u32 code = nv_rd32(priv, 0x400110);
- u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
+ u32 class;
int chid;
+ if (nv_device(priv)->card_type < NV_E0 || subc < 4)
+ class = nv_rd32(priv, 0x404200 + (subc * 4));
+ else
+ class = 0x0000;
+
engctx = nvkm_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
+ if (stat & 0x00000001) {
+ /*
+ * notifier interrupt, only needed for cyclestats
+ * can be safely ignored
+ */
+ nv_wr32(priv, 0x400100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
if (stat & 0x00000010) {
handle = nvkm_handle_get_class(engctx, class);
if (!handle || nv_call(handle->object, mthd, data)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index aeeca1be9cf0..8af1a89eda84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -124,10 +124,12 @@ void gf100_gr_dtor(struct nvkm_object *);
int gf100_gr_init(struct nvkm_object *);
void gf100_gr_zbc_init(struct gf100_gr_priv *);
-int gk104_gr_fini(struct nvkm_object *, bool);
+int gk104_gr_ctor(struct nvkm_object *, struct nvkm_object *,
+ struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
int gk104_gr_init(struct nvkm_object *);
-int gk110_gr_fini(struct nvkm_object *, bool);
+int gm204_gr_init(struct nvkm_object *);
extern struct nvkm_ofuncs gf100_fermi_ofuncs;
@@ -136,6 +138,7 @@ extern struct nvkm_omthds gf100_gr_9097_omthds[];
extern struct nvkm_omthds gf100_gr_90c0_omthds[];
extern struct nvkm_oclass gf110_gr_sclass[];
extern struct nvkm_oclass gk110_gr_sclass[];
+extern struct nvkm_oclass gm204_gr_sclass[];
struct gf100_gr_init {
u32 addr;
@@ -247,4 +250,17 @@ extern const struct gf100_gr_init gk110_gr_init_tex_0[];
extern const struct gf100_gr_init gk110_gr_init_sm_0[];
extern const struct gf100_gr_init gk208_gr_init_gpc_unk_0[];
+
+extern const struct gf100_gr_init gm107_gr_init_scc_0[];
+extern const struct gf100_gr_init gm107_gr_init_prop_0[];
+extern const struct gf100_gr_init gm107_gr_init_setup_1[];
+extern const struct gf100_gr_init gm107_gr_init_zcull_0[];
+extern const struct gf100_gr_init gm107_gr_init_gpc_unk_1[];
+extern const struct gf100_gr_init gm107_gr_init_tex_0[];
+extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
+extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
+extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
+void gm107_gr_init_bios(struct gf100_gr_priv *);
+
+extern const struct gf100_gr_pack gm204_gr_pack_mmio[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 5362c8176e64..8df73421c78c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -32,8 +32,8 @@
static struct nvkm_oclass
gf108_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0x9039, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 88beb491b7b8..ef76e2dd1d31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -32,8 +32,8 @@
struct nvkm_oclass
gf110_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0x9039, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ FERMI_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 489fdd94b885..46f7844eca70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -34,8 +34,8 @@
static struct nvkm_oclass
gk104_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa040, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
{ KEPLER_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
@@ -310,6 +310,17 @@ gk104_gr_init(struct nvkm_object *object)
return gf100_gr_init_ctxctl(priv);
}
+int
+gk104_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_pmu *pmu = nvkm_pmu(parent);
+ if (pmu)
+ pmu->pgob(pmu, false);
+ return gf100_gr_ctor(parent, engine, oclass, data, size, pobject);
+}
+
#include "fuc/hubgk104.fuc3.h"
static struct gf100_gr_ucode
@@ -334,7 +345,7 @@ struct nvkm_oclass *
gk104_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xe4),
.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
+ .ctor = gk104_gr_ctor,
.dtor = gf100_gr_dtor,
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 78e03ab1608e..f4cd8e5546af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -34,8 +34,8 @@
struct nvkm_oclass
gk110_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa140, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ KEPLER_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
@@ -173,43 +173,6 @@ gk110_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-int
-gk110_gr_fini(struct nvkm_object *object, bool suspend)
-{
- struct gf100_gr_priv *priv = (void *)object;
- static const struct {
- u32 addr;
- u32 data;
- } magic[] = {
- { 0x020520, 0xfffffffc },
- { 0x020524, 0xfffffffe },
- { 0x020524, 0xfffffffc },
- { 0x020524, 0xfffffff8 },
- { 0x020524, 0xffffffe0 },
- { 0x020530, 0xfffffffe },
- { 0x02052c, 0xfffffffa },
- { 0x02052c, 0xfffffff0 },
- { 0x02052c, 0xffffffc0 },
- { 0x02052c, 0xffffff00 },
- { 0x02052c, 0xfffffc00 },
- { 0x02052c, 0xfffcfc00 },
- { 0x02052c, 0xfff0fc00 },
- { 0x02052c, 0xff80fc00 },
- { 0x020528, 0xfffffffe },
- { 0x020528, 0xfffffffc },
- };
- int i;
-
- nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
- nv_mask(priv, 0x0206b4, 0x00000000, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(magic); i++) {
- nv_wr32(priv, magic[i].addr, magic[i].data);
- nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
- }
-
- return nvkm_gr_fini(&priv->base, suspend);
-}
-
#include "fuc/hubgk110.fuc3.h"
struct gf100_gr_ucode
@@ -234,10 +197,10 @@ struct nvkm_oclass *
gk110_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf0),
.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
+ .ctor = gk104_gr_ctor,
.dtor = gf100_gr_dtor,
.init = gk104_gr_init,
- .fini = gk110_gr_fini,
+ .fini = _nvkm_gr_fini,
},
.cclass = &gk110_grctx_oclass,
.sclass = gk110_gr_sclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 5292c5a9a38c..9ff9eab0ccaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -102,10 +102,10 @@ struct nvkm_oclass *
gk110b_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf1),
.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
+ .ctor = gk104_gr_ctor,
.dtor = gf100_gr_dtor,
.init = gk104_gr_init,
- .fini = gk110_gr_fini,
+ .fini = _nvkm_gr_fini,
},
.cclass = &gk110b_grctx_oclass,
.sclass = gk110_gr_sclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index ae6b853173b6..85f44a3d5d11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -34,10 +34,10 @@
static struct nvkm_oclass
gk208_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa140, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ KEPLER_B, &gf100_fermi_ofuncs },
- { 0xa1c0, &nvkm_object_ofuncs },
+ { KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
@@ -152,43 +152,6 @@ gk208_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
-gk208_gr_fini(struct nvkm_object *object, bool suspend)
-{
- struct gf100_gr_priv *priv = (void *)object;
- static const struct {
- u32 addr;
- u32 data;
- } magic[] = {
- { 0x020520, 0xfffffffc },
- { 0x020524, 0xfffffffe },
- { 0x020524, 0xfffffffc },
- { 0x020524, 0xfffffff8 },
- { 0x020524, 0xffffffe0 },
- { 0x020530, 0xfffffffe },
- { 0x02052c, 0xfffffffa },
- { 0x02052c, 0xfffffff0 },
- { 0x02052c, 0xffffffc0 },
- { 0x02052c, 0xffffff00 },
- { 0x02052c, 0xfffffc00 },
- { 0x02052c, 0xfffcfc00 },
- { 0x02052c, 0xfff0fc00 },
- { 0x02052c, 0xff80fc00 },
- { 0x020528, 0xfffffffe },
- { 0x020528, 0xfffffffc },
- };
- int i;
-
- nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
- nv_mask(priv, 0x0206b4, 0x00000000, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(magic); i++) {
- nv_wr32(priv, magic[i].addr, magic[i].data);
- nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
- }
-
- return nvkm_gr_fini(&priv->base, suspend);
-}
-
#include "fuc/hubgk208.fuc5.h"
static struct gf100_gr_ucode
@@ -213,10 +176,10 @@ struct nvkm_oclass *
gk208_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x08),
.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
+ .ctor = gk104_gr_ctor,
.dtor = gf100_gr_dtor,
.init = gk104_gr_init,
- .fini = gk208_gr_fini,
+ .fini = _nvkm_gr_fini,
},
.cclass = &gk208_grctx_oclass,
.sclass = gk208_gr_sclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 213755534084..40ff5eb9180c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -26,8 +26,8 @@
static struct nvkm_oclass
gk20a_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa040, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
{ KEPLER_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 124492b8a2d6..a5ebd459bc24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -35,8 +35,8 @@
static struct nvkm_oclass
gm107_gr_sclass[] = {
- { 0x902d, &nvkm_object_ofuncs },
- { 0xa140, &nvkm_object_ofuncs },
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ MAXWELL_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
{ MAXWELL_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
{}
@@ -71,7 +71,7 @@ gm107_gr_init_ds_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_scc_0[] = {
{ 0x40803c, 1, 0x04, 0x00000010 },
{}
@@ -85,14 +85,14 @@ gm107_gr_init_sked_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_prop_0[] = {
{ 0x418408, 1, 0x04, 0x00000000 },
{ 0x4184a0, 1, 0x04, 0x00000000 },
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_setup_1[] = {
{ 0x4188c8, 2, 0x04, 0x00000000 },
{ 0x4188d0, 1, 0x04, 0x00010000 },
@@ -100,7 +100,7 @@ gm107_gr_init_setup_1[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_zcull_0[] = {
{ 0x418910, 1, 0x04, 0x00010001 },
{ 0x418914, 1, 0x04, 0x00000301 },
@@ -111,7 +111,7 @@ gm107_gr_init_zcull_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_gpc_unk_1[] = {
{ 0x418d00, 1, 0x04, 0x00000000 },
{ 0x418f00, 1, 0x04, 0x00000400 },
@@ -134,7 +134,7 @@ gm107_gr_init_tpccs_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ab8, 1, 0x04, 0x000000e7 },
@@ -160,7 +160,7 @@ gm107_gr_init_pe_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_l1c_0[] = {
{ 0x419c98, 1, 0x04, 0x00000000 },
{ 0x419cc0, 2, 0x04, 0x00000000 },
@@ -206,14 +206,14 @@ gm107_gr_init_pes_0[] = {
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_wwdx_0[] = {
{ 0x41bfd4, 1, 0x04, 0x00800000 },
{ 0x41bfdc, 1, 0x04, 0x00000000 },
{}
};
-static const struct gf100_gr_init
+const struct gf100_gr_init
gm107_gr_init_cbm_0[] = {
{ 0x41becc, 1, 0x04, 0x00000000 },
{}
@@ -291,7 +291,7 @@ gm107_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static void
+void
gm107_gr_init_bios(struct gf100_gr_priv *priv)
{
static const struct {
@@ -464,7 +464,7 @@ gm107_gr_oclass = &(struct gf100_gr_oclass) {
.cclass = &gm107_grctx_oclass,
.sclass = gm107_gr_sclass,
.mmio = gm107_gr_pack_mmio,
- .fecs.ucode = 0 ? &gm107_gr_fecs_ucode : NULL,
+ .fecs.ucode = &gm107_gr_fecs_ucode,
.gpccs.ucode = &gm107_gr_gpccs_ucode,
.ppc_nr = 2,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
new file mode 100644
index 000000000000..2f5eadd12a9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+struct nvkm_oclass
+gm204_gr_sclass[] = {
+ { FERMI_TWOD_A, &nvkm_object_ofuncs },
+ { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
+ { MAXWELL_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
+ { MAXWELL_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH register lists
+ ******************************************************************************/
+
+static const struct gf100_gr_init
+gm204_gr_init_main_0[] = {
+ { 0x400080, 1, 0x04, 0x003003e2 },
+ { 0x400088, 1, 0x04, 0xe007bfe7 },
+ { 0x40008c, 1, 0x04, 0x00060000 },
+ { 0x400090, 1, 0x04, 0x00000030 },
+ { 0x40013c, 1, 0x04, 0x003901f3 },
+ { 0x400140, 1, 0x04, 0x00000100 },
+ { 0x400144, 1, 0x04, 0x00000000 },
+ { 0x400148, 1, 0x04, 0x00000110 },
+ { 0x400138, 1, 0x04, 0x00000000 },
+ { 0x400130, 2, 0x04, 0x00000000 },
+ { 0x400124, 1, 0x04, 0x00000002 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_fe_0[] = {
+ { 0x40415c, 1, 0x04, 0x00000000 },
+ { 0x404170, 1, 0x04, 0x00000000 },
+ { 0x4041b4, 1, 0x04, 0x00000000 },
+ { 0x4041b8, 1, 0x04, 0x00000010 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_ds_0[] = {
+ { 0x40583c, 1, 0x04, 0x00000000 },
+ { 0x405844, 1, 0x04, 0x00ffffff },
+ { 0x40584c, 1, 0x04, 0x00000001 },
+ { 0x405850, 1, 0x04, 0x00000000 },
+ { 0x405900, 1, 0x04, 0x00000000 },
+ { 0x405908, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_sked_0[] = {
+ { 0x407010, 1, 0x04, 0x00000000 },
+ { 0x407040, 1, 0x04, 0x80440434 },
+ { 0x407048, 1, 0x04, 0x00000008 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_tpccs_0[] = {
+ { 0x419d60, 1, 0x04, 0x0000003f },
+ { 0x419d88, 3, 0x04, 0x00000000 },
+ { 0x419dc4, 1, 0x04, 0x00000000 },
+ { 0x419dc8, 1, 0x04, 0x00000501 },
+ { 0x419dd0, 1, 0x04, 0x00000000 },
+ { 0x419dd4, 1, 0x04, 0x00000100 },
+ { 0x419dd8, 1, 0x04, 0x00000001 },
+ { 0x419ddc, 1, 0x04, 0x00000002 },
+ { 0x419de0, 1, 0x04, 0x00000001 },
+ { 0x419de8, 1, 0x04, 0x000000cc },
+ { 0x419dec, 1, 0x04, 0x00000000 },
+ { 0x419df0, 1, 0x04, 0x000000cc },
+ { 0x419df4, 1, 0x04, 0x00000000 },
+ { 0x419d0c, 1, 0x04, 0x00000000 },
+ { 0x419d10, 1, 0x04, 0x00000014 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_pe_0[] = {
+ { 0x419900, 1, 0x04, 0x000000ff },
+ { 0x419810, 1, 0x04, 0x00000000 },
+ { 0x41980c, 1, 0x04, 0x00000010 },
+ { 0x419844, 1, 0x04, 0x00000000 },
+ { 0x419838, 1, 0x04, 0x000000ff },
+ { 0x419850, 1, 0x04, 0x00000004 },
+ { 0x419854, 2, 0x04, 0x00000000 },
+ { 0x419894, 3, 0x04, 0x00100401 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_sm_0[] = {
+ { 0x419e30, 1, 0x04, 0x000000ff },
+ { 0x419e00, 1, 0x04, 0x00000000 },
+ { 0x419ea0, 1, 0x04, 0x00000000 },
+ { 0x419ee4, 1, 0x04, 0x00000000 },
+ { 0x419ea4, 1, 0x04, 0x00000100 },
+ { 0x419ea8, 1, 0x04, 0x00000000 },
+ { 0x419ee8, 1, 0x04, 0x00000091 },
+ { 0x419eb4, 1, 0x04, 0x00000000 },
+ { 0x419ebc, 2, 0x04, 0x00000000 },
+ { 0x419edc, 1, 0x04, 0x000c1810 },
+ { 0x419ed8, 1, 0x04, 0x00000000 },
+ { 0x419ee0, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_l1c_1[] = {
+ { 0x419cf8, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_sm_1[] = {
+ { 0x419f74, 1, 0x04, 0x00055155 },
+ { 0x419f80, 4, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_l1c_2[] = {
+ { 0x419ccc, 2, 0x04, 0x00000000 },
+ { 0x419c80, 1, 0x04, 0x3f006022 },
+ { 0x419c88, 1, 0x04, 0x00210000 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_pes_0[] = {
+ { 0x41be50, 1, 0x04, 0x000000ff },
+ { 0x41be04, 1, 0x04, 0x00000000 },
+ { 0x41be08, 1, 0x04, 0x00000004 },
+ { 0x41be0c, 1, 0x04, 0x00000008 },
+ { 0x41be10, 1, 0x04, 0x2e3b8bc7 },
+ { 0x41be14, 2, 0x04, 0x00000000 },
+ { 0x41be3c, 5, 0x04, 0x00100401 },
+ {}
+};
+
+static const struct gf100_gr_init
+gm204_gr_init_be_0[] = {
+ { 0x408890, 1, 0x04, 0x000000ff },
+ { 0x40880c, 1, 0x04, 0x00000000 },
+ { 0x408850, 1, 0x04, 0x00000004 },
+ { 0x408878, 1, 0x04, 0x01b4201c },
+ { 0x40887c, 1, 0x04, 0x80004c55 },
+ { 0x408880, 1, 0x04, 0x0018c258 },
+ { 0x408884, 1, 0x04, 0x0000160f },
+ { 0x408974, 1, 0x04, 0x000000ff },
+ { 0x408910, 9, 0x04, 0x00000000 },
+ { 0x408950, 1, 0x04, 0x00000000 },
+ { 0x408954, 1, 0x04, 0x0000ffff },
+ { 0x408958, 1, 0x04, 0x00000034 },
+ { 0x40895c, 1, 0x04, 0x84b17403 },
+ { 0x408960, 1, 0x04, 0x04c1884f },
+ { 0x408964, 1, 0x04, 0x04714445 },
+ { 0x408968, 1, 0x04, 0x0280802f },
+ { 0x40896c, 1, 0x04, 0x04304856 },
+ { 0x408970, 1, 0x04, 0x00012800 },
+ { 0x408984, 1, 0x04, 0x00000000 },
+ { 0x408988, 1, 0x04, 0x08040201 },
+ { 0x40898c, 1, 0x04, 0x80402010 },
+ {}
+};
+
+const struct gf100_gr_pack
+gm204_gr_pack_mmio[] = {
+ { gm204_gr_init_main_0 },
+ { gm204_gr_init_fe_0 },
+ { gf100_gr_init_pri_0 },
+ { gf100_gr_init_rstr2d_0 },
+ { gf100_gr_init_pd_0 },
+ { gm204_gr_init_ds_0 },
+ { gm107_gr_init_scc_0 },
+ { gm204_gr_init_sked_0 },
+ { gk110_gr_init_cwd_0 },
+ { gm107_gr_init_prop_0 },
+ { gk208_gr_init_gpc_unk_0 },
+ { gf100_gr_init_setup_0 },
+ { gf100_gr_init_crstr_0 },
+ { gm107_gr_init_setup_1 },
+ { gm107_gr_init_zcull_0 },
+ { gf100_gr_init_gpm_0 },
+ { gm107_gr_init_gpc_unk_1 },
+ { gf100_gr_init_gcc_0 },
+ { gm204_gr_init_tpccs_0 },
+ { gm107_gr_init_tex_0 },
+ { gm204_gr_init_pe_0 },
+ { gm107_gr_init_l1c_0 },
+ { gf100_gr_init_mpc_0 },
+ { gm204_gr_init_sm_0 },
+ { gm204_gr_init_l1c_1 },
+ { gm204_gr_init_sm_1 },
+ { gm204_gr_init_l1c_2 },
+ { gm204_gr_init_pes_0 },
+ { gm107_gr_init_wwdx_0 },
+ { gm107_gr_init_cbm_0 },
+ { gm204_gr_init_be_0 },
+ {}
+};
+
+const struct gf100_gr_pack *
+gm204_gr_data[] = {
+ gm204_gr_pack_mmio,
+ NULL
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+gm204_gr_init_ctxctl(struct gf100_gr_priv *priv)
+{
+ return 0;
+}
+
+int
+gm204_gr_init(struct nvkm_object *object)
+{
+ struct gf100_gr_oclass *oclass = (void *)object->oclass;
+ struct gf100_gr_priv *priv = (void *)object;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+ u32 data[TPC_MAX / 8] = {};
+ u8 tpcnr[GPC_MAX];
+ int gpc, tpc, ppc, rop;
+ int ret, i;
+ u32 tmp;
+
+ ret = nvkm_gr_init(&priv->base);
+ if (ret)
+ return ret;
+
+ tmp = nv_rd32(priv, 0x100c80); /*XXX: mask? */
+ nv_wr32(priv, 0x418880, 0x00001000 | (tmp & 0x00000fff));
+ nv_wr32(priv, 0x418890, 0x00000000);
+ nv_wr32(priv, 0x418894, 0x00000000);
+ nv_wr32(priv, 0x4188b4, priv->unk4188b4->addr >> 8);
+ nv_wr32(priv, 0x4188b8, priv->unk4188b8->addr >> 8);
+ nv_mask(priv, 0x4188b0, 0x00040000, 0x00040000);
+
+ /*XXX: belongs in fb */
+ nv_wr32(priv, 0x100cc8, priv->unk4188b4->addr >> 8);
+ nv_wr32(priv, 0x100ccc, priv->unk4188b8->addr >> 8);
+ nv_mask(priv, 0x100cc4, 0x00040000, 0x00040000);
+
+ gf100_gr_mmio(priv, oclass->mmio);
+
+ gm107_gr_init_bios(priv);
+
+ nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
+ priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ priv->tpc_total);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
+ nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+ nv_wr32(priv, GPC_BCAST(0x033c), nv_rd32(priv, 0x100804));
+
+ nv_wr32(priv, 0x400500, 0x00010001);
+ nv_wr32(priv, 0x400100, 0xffffffff);
+ nv_wr32(priv, 0x40013c, 0xffffffff);
+ nv_wr32(priv, 0x400124, 0x00000002);
+ nv_wr32(priv, 0x409c24, 0x000e0000);
+ nv_wr32(priv, 0x405848, 0xc0000000);
+ nv_wr32(priv, 0x40584c, 0x00000001);
+ nv_wr32(priv, 0x404000, 0xc0000000);
+ nv_wr32(priv, 0x404600, 0xc0000000);
+ nv_wr32(priv, 0x408030, 0xc0000000);
+ nv_wr32(priv, 0x404490, 0xc0000000);
+ nv_wr32(priv, 0x406018, 0xc0000000);
+ nv_wr32(priv, 0x407020, 0x40000000);
+ nv_wr32(priv, 0x405840, 0xc0000000);
+ nv_wr32(priv, 0x405844, 0x00ffffff);
+ nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]);
+ for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
+ nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+ }
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+
+ nv_wr32(priv, 0x400108, 0xffffffff);
+ nv_wr32(priv, 0x400138, 0xffffffff);
+ nv_wr32(priv, 0x400118, 0xffffffff);
+ nv_wr32(priv, 0x400130, 0xffffffff);
+ nv_wr32(priv, 0x40011c, 0xffffffff);
+ nv_wr32(priv, 0x400134, 0xffffffff);
+
+ nv_wr32(priv, 0x400054, 0x2c350f63);
+
+ gf100_gr_zbc_init(priv);
+
+ return gm204_gr_init_ctxctl(priv);
+}
+
+struct nvkm_oclass *
+gm204_gr_oclass = &(struct gf100_gr_oclass) {
+ .base.handle = NV_ENGINE(GR, 0x24),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gm204_gr_init,
+ .fini = _nvkm_gr_fini,
+ },
+ .cclass = &gm204_grctx_oclass,
+ .sclass = gm204_gr_sclass,
+ .mmio = gm204_gr_pack_mmio,
+ .ppc_nr = 2,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
new file mode 100644
index 000000000000..04b9733d146a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+struct nvkm_oclass *
+gm206_gr_oclass = &(struct gf100_gr_oclass) {
+ .base.handle = NV_ENGINE(GR, 0x26),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gf100_gr_ctor,
+ .dtor = gf100_gr_dtor,
+ .init = gm204_gr_init,
+ .fini = _nvkm_gr_fini,
+ },
+ .cclass = &gm206_grctx_oclass,
+ .sclass = gm204_gr_sclass,
+ .mmio = gm204_gr_pack_mmio,
+ .ppc_nr = 2,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index 1fbd93bbb561..f9d0eb5647fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -52,7 +52,7 @@ acpi_read_fast(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
u32 start = offset & ~0x00000fff;
u32 fetch = limit - start;
- if (nvbios_extend(bios, limit) > 0) {
+ if (nvbios_extend(bios, limit) >= 0) {
int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
if (ret == fetch)
return fetch;
@@ -73,7 +73,7 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
u32 start = offset & ~0xfff;
u32 fetch = 0;
- if (nvbios_extend(bios, limit) > 0) {
+ if (nvbios_extend(bios, limit) >= 0) {
while (start + fetch < limit) {
int ret = nouveau_acpi_get_bios_chunk(bios->data,
start + fetch,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index b8853bf16b23..7622b41619a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -29,7 +29,7 @@ struct nvkm_hwsq {
u32 data;
struct {
u8 data[512];
- u8 size;
+ u16 size;
} c;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index 3394a5ea8a9f..ebf709c27e3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -11,17 +11,34 @@ struct hwsq {
struct hwsq_reg {
int sequence;
bool force;
- u32 addr[2];
+ u32 addr;
+ u32 stride; /* in bytes */
+ u32 mask;
u32 data;
};
static inline struct hwsq_reg
+hwsq_stride(u32 addr, u32 stride, u32 mask)
+{
+ return (struct hwsq_reg) {
+ .sequence = 0,
+ .force = 0,
+ .addr = addr,
+ .stride = stride,
+ .mask = mask,
+ .data = 0xdeadbeef,
+ };
+}
+
+static inline struct hwsq_reg
hwsq_reg2(u32 addr1, u32 addr2)
{
return (struct hwsq_reg) {
.sequence = 0,
.force = 0,
- .addr = { addr1, addr2 },
+ .addr = addr1,
+ .stride = addr2 - addr1,
+ .mask = 0x3,
.data = 0xdeadbeef,
};
}
@@ -29,7 +46,14 @@ hwsq_reg2(u32 addr1, u32 addr2)
static inline struct hwsq_reg
hwsq_reg(u32 addr)
{
- return hwsq_reg2(addr, addr);
+ return (struct hwsq_reg) {
+ .sequence = 0,
+ .force = 0,
+ .addr = addr,
+ .stride = 0,
+ .mask = 0x1,
+ .data = 0xdeadbeef,
+ };
}
static inline int
@@ -62,18 +86,24 @@ static inline u32
hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
{
if (reg->sequence != ram->sequence)
- reg->data = nv_rd32(ram->subdev, reg->addr[0]);
+ reg->data = nv_rd32(ram->subdev, reg->addr);
return reg->data;
}
static inline void
hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
{
+ u32 mask, off = 0;
+
reg->sequence = ram->sequence;
reg->data = data;
- if (reg->addr[0] != reg->addr[1])
- nvkm_hwsq_wr32(ram->hwsq, reg->addr[1], reg->data);
- nvkm_hwsq_wr32(ram->hwsq, reg->addr[0], reg->data);
+
+ for (mask = reg->mask; mask > 0; mask = (mask & ~1) >> 1) {
+ if (mask & 1)
+ nvkm_hwsq_wr32(ram->hwsq, reg->addr+off, reg->data);
+
+ off += reg->stride;
+ }
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index b24a9cc04b73..39a83d82e0cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -184,7 +184,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
nv_debug(clk, "setting performance state %d\n", pstatei);
clk->pstate = pstatei;
- if (pfb->ram->calc) {
+ if (pfb->ram && pfb->ram->calc) {
int khz = pstate->base.domain[nv_clk_src_mem];
do {
ret = pfb->ram->calc(pfb, khz);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 14a51a9ff7d0..7c63abf11e22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -5,7 +5,7 @@ struct nvkm_pll_vals;
struct nv04_devinit_priv {
struct nvkm_devinit base;
- u8 owner;
+ int owner;
};
int nv04_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 904d601e8a50..d6be4c6c5408 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -37,7 +37,6 @@ nvkm-y += nvkm/subdev/fb/ramgt215.o
nvkm-y += nvkm/subdev/fb/rammcp77.o
nvkm-y += nvkm/subdev/fb/ramgf100.o
nvkm-y += nvkm/subdev/fb/ramgk104.o
-nvkm-y += nvkm/subdev/fb/ramgk20a.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 16589fa613cd..61fde43dab71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -55,9 +55,11 @@ _nvkm_fb_fini(struct nvkm_object *object, bool suspend)
struct nvkm_fb *pfb = (void *)object;
int ret;
- ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
- if (ret && suspend)
- return ret;
+ if (pfb->ram) {
+ ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
+ if (ret && suspend)
+ return ret;
+ }
return nvkm_subdev_fini(&pfb->base, suspend);
}
@@ -72,9 +74,11 @@ _nvkm_fb_init(struct nvkm_object *object)
if (ret)
return ret;
- ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
- if (ret)
- return ret;
+ if (pfb->ram) {
+ ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
+ if (ret)
+ return ret;
+ }
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
@@ -91,9 +95,12 @@ _nvkm_fb_dtor(struct nvkm_object *object)
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
nvkm_mm_fini(&pfb->tags);
- nvkm_mm_fini(&pfb->vram);
- nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
+ if (pfb->ram) {
+ nvkm_mm_fini(&pfb->vram);
+ nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
+ }
+
nvkm_subdev_destroy(&pfb->base);
}
@@ -127,6 +134,9 @@ nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
pfb->memtype_valid = impl->memtype;
+ if (!impl->ram)
+ return 0;
+
ret = nvkm_object_ctor(nv_object(pfb), NULL, impl->ram, NULL, 0, &ram);
if (ret) {
nv_fatal(pfb, "error detecting memory configuration!!\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 6762847c05e8..a5d7857d3898 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -65,5 +65,4 @@ gk20a_fb_oclass = &(struct nvkm_fb_impl) {
.fini = _nvkm_fb_fini,
},
.memtype = gf100_fb_memtype_valid,
- .ram = &gk20a_ram_oclass,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index d82da02daa1f..485c4b64819a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -32,7 +32,6 @@ extern struct nvkm_oclass gt215_ram_oclass;
extern struct nvkm_oclass mcp77_ram_oclass;
extern struct nvkm_oclass gf100_ram_oclass;
extern struct nvkm_oclass gk104_ram_oclass;
-extern struct nvkm_oclass gk20a_ram_oclass;
extern struct nvkm_oclass gm107_ram_oclass;
int nvkm_sddr2_calc(struct nvkm_ram *ram);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c
deleted file mode 100644
index 5f30db140b47..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk20a.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/device.h>
-
-struct gk20a_mem {
- struct nvkm_mem base;
- void *cpuaddr;
- dma_addr_t handle;
-};
-#define to_gk20a_mem(m) container_of(m, struct gk20a_mem, base)
-
-static void
-gk20a_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
-{
- struct device *dev = nv_device_base(nv_device(pfb));
- struct gk20a_mem *mem = to_gk20a_mem(*pmem);
-
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- if (likely(mem->cpuaddr))
- dma_free_coherent(dev, mem->base.size << PAGE_SHIFT,
- mem->cpuaddr, mem->handle);
-
- kfree(mem->base.pages);
- kfree(mem);
-}
-
-static int
-gk20a_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nvkm_mem **pmem)
-{
- struct device *dev = nv_device_base(nv_device(pfb));
- struct gk20a_mem *mem;
- u32 type = memtype & 0xff;
- u32 npages, order;
- int i;
-
- nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, size,
- align, ncmin);
-
- npages = size >> PAGE_SHIFT;
- if (npages == 0)
- npages = 1;
-
- if (align == 0)
- align = PAGE_SIZE;
- align >>= PAGE_SHIFT;
-
- /* round alignment to the next power of 2, if needed */
- order = fls(align);
- if ((align & (align - 1)) == 0)
- order--;
- align = BIT(order);
-
- /* ensure returned address is correctly aligned */
- npages = max(align, npages);
-
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- mem->base.size = npages;
- mem->base.memtype = type;
-
- mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL);
- if (!mem->base.pages) {
- kfree(mem);
- return -ENOMEM;
- }
-
- *pmem = &mem->base;
-
- mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT,
- &mem->handle, GFP_KERNEL);
- if (!mem->cpuaddr) {
- nv_error(pfb, "%s: cannot allocate memory!\n", __func__);
- gk20a_ram_put(pfb, pmem);
- return -ENOMEM;
- }
-
- align <<= PAGE_SHIFT;
-
- /* alignment check */
- if (unlikely(mem->handle & (align - 1)))
- nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n",
- &mem->handle, align);
-
- nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n",
- npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr);
-
- for (i = 0; i < npages; i++)
- mem->base.pages[i] = mem->handle + (PAGE_SIZE * i);
-
- mem->base.offset = (u64)mem->base.pages[0];
- return 0;
-}
-
-static int
-gk20a_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 datasize,
- struct nvkm_object **pobject)
-{
- struct nvkm_ram *ram;
- int ret;
-
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
- ram->type = NV_MEM_TYPE_STOLEN;
- ram->size = get_num_physpages() << PAGE_SHIFT;
-
- ram->get = gk20a_ram_get;
- ram->put = gk20a_ram_put;
- return 0;
-}
-
-struct nvkm_oclass
-gk20a_ram_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_ram_ctor,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index ba19158a5912..0b256aa4960f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -45,10 +45,8 @@ gm107_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ret = nvkm_fuse_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
- if (ret)
- return ret;
- return 0;
+ return ret;
}
struct nvkm_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
index e6f35abe7879..13bb7fc0a569 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
@@ -2,3 +2,4 @@ nvkm-y += nvkm/subdev/instmem/base.o
nvkm-y += nvkm/subdev/instmem/nv04.o
nvkm-y += nvkm/subdev/instmem/nv40.o
nvkm-y += nvkm/subdev/instmem/nv50.o
+nvkm-y += nvkm/subdev/instmem/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
new file mode 100644
index 000000000000..dd0994d9ebfc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * GK20A does not have dedicated video memory, and to accurately represent this
+ * fact Nouveau will not create a RAM device for it. Therefore its instmem
+ * implementation must be done directly on top of system memory, while providing
+ * coherent read and write operations.
+ *
+ * Instmem can be allocated through two means:
+ * 1) If an IOMMU mapping has been probed, the IOMMU API is used to make memory
+ * pages contiguous to the GPU. This is the preferred way.
+ * 2) If no IOMMU mapping is probed, the DMA API is used to allocate physically
+ * contiguous memory.
+ *
+ * In both cases CPU read and writes are performed using PRAMIN (i.e. using the
+ * GPU path) to ensure these operations are coherent for the GPU. This allows us
+ * to use more "relaxed" allocation parameters when using the DMA API, since we
+ * never need a kernel mapping.
+ */
+
+#include <subdev/fb.h>
+#include <core/mm.h>
+#include <core/device.h>
+
+#ifdef __KERNEL__
+#include <linux/dma-attrs.h>
+#include <linux/iommu.h>
+#include <nouveau_platform.h>
+#endif
+
+#include "priv.h"
+
+struct gk20a_instobj_priv {
+ struct nvkm_instobj base;
+ /* Must be second member here - see nouveau_gpuobj_map_vm() */
+ struct nvkm_mem *mem;
+ /* Pointed by mem */
+ struct nvkm_mem _mem;
+};
+
+/*
+ * Used for objects allocated using the DMA API
+ */
+struct gk20a_instobj_dma {
+ struct gk20a_instobj_priv base;
+
+ void *cpuaddr;
+ dma_addr_t handle;
+ struct nvkm_mm_node r;
+};
+
+/*
+ * Used for objects flattened using the IOMMU API
+ */
+struct gk20a_instobj_iommu {
+ struct gk20a_instobj_priv base;
+
+ /* array of base.mem->size pages */
+ struct page *pages[];
+};
+
+struct gk20a_instmem_priv {
+ struct nvkm_instmem base;
+ spinlock_t lock;
+ u64 addr;
+
+ /* Only used if IOMMU if present */
+ struct mutex *mm_mutex;
+ struct nvkm_mm *mm;
+ struct iommu_domain *domain;
+ unsigned long iommu_pgshift;
+
+ /* Only used by DMA API */
+ struct dma_attrs attrs;
+};
+
+/*
+ * Use PRAMIN to read/write data and avoid coherency issues.
+ * PRAMIN uses the GPU path and ensures data will always be coherent.
+ *
+ * A dynamic mapping based solution would be desirable in the future, but
+ * the issue remains of how to maintain coherency efficiently. On ARM it is
+ * not easy (if possible at all?) to create uncached temporary mappings.
+ */
+
+static u32
+gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
+{
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
+ struct gk20a_instobj_priv *node = (void *)object;
+ unsigned long flags;
+ u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
+ u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+ u32 data;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (unlikely(priv->addr != base)) {
+ nv_wr32(priv, 0x001700, base >> 16);
+ priv->addr = base;
+ }
+ data = nv_rd32(priv, 0x700000 + addr);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return data;
+}
+
+static void
+gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+{
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
+ struct gk20a_instobj_priv *node = (void *)object;
+ unsigned long flags;
+ u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
+ u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (unlikely(priv->addr != base)) {
+ nv_wr32(priv, 0x001700, base >> 16);
+ priv->addr = base;
+ }
+ nv_wr32(priv, 0x700000 + addr, data);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
+{
+ struct gk20a_instobj_dma *node = (void *)_node;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct device *dev = nv_device_base(nv_device(priv));
+
+ if (unlikely(!node->cpuaddr))
+ return;
+
+ dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
+ node->handle, &priv->attrs);
+}
+
+static void
+gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
+{
+ struct gk20a_instobj_iommu *node = (void *)_node;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct nvkm_mm_node *r;
+ int i;
+
+ if (unlikely(list_empty(&_node->mem->regions)))
+ return;
+
+ r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
+ rl_entry);
+
+ /* clear bit 34 to unmap pages */
+ r->offset &= ~BIT(34 - priv->iommu_pgshift);
+
+ /* Unmap pages from GPU address space and free them */
+ for (i = 0; i < _node->mem->size; i++) {
+ iommu_unmap(priv->domain,
+ (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
+ __free_page(node->pages[i]);
+ }
+
+ /* Release area from GPU address space */
+ mutex_lock(priv->mm_mutex);
+ nvkm_mm_free(priv->mm, &r);
+ mutex_unlock(priv->mm_mutex);
+}
+
+static void
+gk20a_instobj_dtor(struct nvkm_object *object)
+{
+ struct gk20a_instobj_priv *node = (void *)object;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+
+ if (priv->domain)
+ gk20a_instobj_dtor_iommu(node);
+ else
+ gk20a_instobj_dtor_dma(node);
+
+ nvkm_instobj_destroy(&node->base);
+}
+
+static int
+gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 npages, u32 align,
+ struct gk20a_instobj_priv **_node)
+{
+ struct gk20a_instobj_dma *node;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct device *dev = nv_device_base(nv_device(parent));
+ int ret;
+
+ ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
+ (void **)&node);
+ *_node = &node->base;
+ if (ret)
+ return ret;
+
+ node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
+ &node->handle, GFP_KERNEL,
+ &priv->attrs);
+ if (!node->cpuaddr) {
+ nv_error(priv, "cannot allocate DMA memory\n");
+ return -ENOMEM;
+ }
+
+ /* alignment check */
+ if (unlikely(node->handle & (align - 1)))
+ nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
+ &node->handle, align);
+
+ /* present memory for being mapped using small pages */
+ node->r.type = 12;
+ node->r.offset = node->handle >> 12;
+ node->r.length = (npages << PAGE_SHIFT) >> 12;
+
+ node->base._mem.offset = node->handle;
+
+ INIT_LIST_HEAD(&node->base._mem.regions);
+ list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
+
+ return 0;
+}
+
+static int
+gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, u32 npages, u32 align,
+ struct gk20a_instobj_priv **_node)
+{
+ struct gk20a_instobj_iommu *node;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct nvkm_mm_node *r;
+ int ret;
+ int i;
+
+ ret = nvkm_instobj_create_(parent, engine, oclass,
+ sizeof(*node) + sizeof(node->pages[0]) * npages,
+ (void **)&node);
+ *_node = &node->base;
+ if (ret)
+ return ret;
+
+ /* Allocate backing memory */
+ for (i = 0; i < npages; i++) {
+ struct page *p = alloc_page(GFP_KERNEL);
+
+ if (p == NULL) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+ node->pages[i] = p;
+ }
+
+ mutex_lock(priv->mm_mutex);
+ /* Reserve area from GPU address space */
+ ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
+ align >> priv->iommu_pgshift, &r);
+ mutex_unlock(priv->mm_mutex);
+ if (ret) {
+ nv_error(priv, "virtual space is full!\n");
+ goto free_pages;
+ }
+
+ /* Map into GPU address space */
+ for (i = 0; i < npages; i++) {
+ struct page *p = node->pages[i];
+ u32 offset = (r->offset + i) << priv->iommu_pgshift;
+
+ ret = iommu_map(priv->domain, offset, page_to_phys(p),
+ PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ nv_error(priv, "IOMMU mapping failure: %d\n", ret);
+
+ while (i-- > 0) {
+ offset -= PAGE_SIZE;
+ iommu_unmap(priv->domain, offset, PAGE_SIZE);
+ }
+ goto release_area;
+ }
+ }
+
+ /* Bit 34 tells that an address is to be resolved through the IOMMU */
+ r->offset |= BIT(34 - priv->iommu_pgshift);
+
+ node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
+
+ INIT_LIST_HEAD(&node->base._mem.regions);
+ list_add_tail(&r->rl_entry, &node->base._mem.regions);
+
+ return 0;
+
+release_area:
+ mutex_lock(priv->mm_mutex);
+ nvkm_mm_free(priv->mm, &r);
+ mutex_unlock(priv->mm_mutex);
+
+free_pages:
+ for (i = 0; i < npages && node->pages[i] != NULL; i++)
+ __free_page(node->pages[i]);
+
+ return ret;
+}
+
+static int
+gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 _size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_instobj_args *args = data;
+ struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct gk20a_instobj_priv *node;
+ u32 size, align;
+ int ret;
+
+ nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
+ priv->domain ? "IOMMU" : "DMA", args->size, args->align);
+
+ /* Round size and align to page bounds */
+ size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
+ align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
+
+ if (priv->domain)
+ ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
+ size >> PAGE_SHIFT, align, &node);
+ else
+ ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
+ size >> PAGE_SHIFT, align, &node);
+ *pobject = nv_object(node);
+ if (ret)
+ return ret;
+
+ node->mem = &node->_mem;
+
+ /* present memory for being mapped using small pages */
+ node->mem->size = size >> 12;
+ node->mem->memtype = 0;
+ node->mem->page_shift = 12;
+
+ node->base.addr = node->mem->offset;
+ node->base.size = size;
+
+ nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
+ size, align, node->mem->offset);
+
+ return 0;
+}
+
+static struct nvkm_instobj_impl
+gk20a_instobj_oclass = {
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk20a_instobj_ctor,
+ .dtor = gk20a_instobj_dtor,
+ .init = _nvkm_instobj_init,
+ .fini = _nvkm_instobj_fini,
+ .rd32 = gk20a_instobj_rd32,
+ .wr32 = gk20a_instobj_wr32,
+ },
+};
+
+
+
+static int
+gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
+{
+ struct gk20a_instmem_priv *priv = (void *)object;
+ priv->addr = ~0ULL;
+ return nvkm_instmem_fini(&priv->base, suspend);
+}
+
+static int
+gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct gk20a_instmem_priv *priv;
+ struct nouveau_platform_device *plat;
+ int ret;
+
+ ret = nvkm_instmem_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&priv->lock);
+
+ plat = nv_device_to_platform(nv_device(parent));
+ if (plat->gpu->iommu.domain) {
+ priv->domain = plat->gpu->iommu.domain;
+ priv->mm = plat->gpu->iommu.mm;
+ priv->iommu_pgshift = plat->gpu->iommu.pgshift;
+ priv->mm_mutex = &plat->gpu->iommu.mutex;
+
+ nv_info(priv, "using IOMMU\n");
+ } else {
+ init_dma_attrs(&priv->attrs);
+ /*
+ * We will access instmem through PRAMIN and thus do not need a
+ * consistent CPU pointer or kernel mapping
+ */
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
+ dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
+
+ nv_info(priv, "using DMA API\n");
+ }
+
+ return 0;
+}
+
+struct nvkm_oclass *
+gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
+ .base.handle = NV_SUBDEV(INSTMEM, 0xea),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = gk20a_instmem_ctor,
+ .dtor = _nvkm_instmem_dtor,
+ .init = _nvkm_instmem_init,
+ .fini = gk20a_instmem_fini,
+ },
+ .instobj = &gk20a_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 8e7cc6200d60..7fb5ea0314cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -136,7 +136,8 @@ gf100_ltc_dtor(struct nvkm_object *object)
struct nvkm_ltc_priv *priv = (void *)object;
nvkm_mm_fini(&priv->tags);
- nvkm_mm_free(&pfb->vram, &priv->tag_ram);
+ if (pfb->ram)
+ nvkm_mm_free(&pfb->vram, &priv->tag_ram);
nvkm_ltc_destroy(priv);
}
@@ -149,6 +150,12 @@ gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
u32 tag_size, tag_margin, tag_align;
int ret;
+ /* No VRAM, no tags for now. */
+ if (!pfb->ram) {
+ priv->num_tags = 0;
+ goto mm_init;
+ }
+
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
priv->num_tags = (pfb->ram->size >> 17) / 4;
if (priv->num_tags > (1 << 17))
@@ -183,6 +190,7 @@ gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
priv->tag_base = tag_base;
}
+mm_init:
ret = nvkm_mm_init(&priv->tags, 0, priv->num_tags, 1);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index 42cac13ca629..f20e4ca87e17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -182,7 +182,7 @@ mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info)
{
u64 desc = *(u64 *)data;
if ((desc & 0xf0) != 0xf0)
- nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
+ nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 9a150d520225..7081d6a9b95f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -4,5 +4,6 @@ nvkm-y += nvkm/subdev/pmu/gt215.o
nvkm-y += nvkm/subdev/pmu/gf100.o
nvkm-y += nvkm/subdev/pmu/gf110.o
nvkm-y += nvkm/subdev/pmu/gk104.o
+nvkm-y += nvkm/subdev/pmu/gk110.o
nvkm-y += nvkm/subdev/pmu/gk208.o
nvkm-y += nvkm/subdev/pmu/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
new file mode 100644
index 000000000000..89bb94b0af8b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf110_pmu_code gk110_pmu_code
+#define gf110_pmu_data gk110_pmu_data
+#include "priv.h"
+#include "fuc/gf110.fuc4.h"
+
+#include <subdev/timer.h>
+
+void
+gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
+{
+ static const struct {
+ u32 addr;
+ u32 data;
+ } magic[] = {
+ { 0x020520, 0xfffffffc },
+ { 0x020524, 0xfffffffe },
+ { 0x020524, 0xfffffffc },
+ { 0x020524, 0xfffffff8 },
+ { 0x020524, 0xffffffe0 },
+ { 0x020530, 0xfffffffe },
+ { 0x02052c, 0xfffffffa },
+ { 0x02052c, 0xfffffff0 },
+ { 0x02052c, 0xffffffc0 },
+ { 0x02052c, 0xffffff00 },
+ { 0x02052c, 0xfffffc00 },
+ { 0x02052c, 0xfffcfc00 },
+ { 0x02052c, 0xfff0fc00 },
+ { 0x02052c, 0xff80fc00 },
+ { 0x020528, 0xfffffffe },
+ { 0x020528, 0xfffffffc },
+ };
+ int i;
+
+ nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
+ nv_rd32(pmu, 0x000200);
+ nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+ msleep(50);
+
+ nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+
+ nv_mask(pmu, 0x0206b4, 0x00000000, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(magic); i++) {
+ nv_wr32(pmu, magic[i].addr, magic[i].data);
+ nv_wait(pmu, magic[i].addr, 0x80000000, 0x00000000);
+ }
+
+ nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+
+ nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
+ nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
+ nv_rd32(pmu, 0x000200);
+}
+
+struct nvkm_oclass *
+gk110_pmu_oclass = &(struct nvkm_pmu_impl) {
+ .base.handle = NV_SUBDEV(PMU, 0xf0),
+ .base.ofuncs = &(struct nvkm_ofuncs) {
+ .ctor = _nvkm_pmu_ctor,
+ .dtor = _nvkm_pmu_dtor,
+ .init = _nvkm_pmu_init,
+ .fini = _nvkm_pmu_fini,
+ },
+ .code.data = gk110_pmu_code,
+ .code.size = sizeof(gk110_pmu_code),
+ .data.data = gk110_pmu_data,
+ .data.size = sizeof(gk110_pmu_data),
+ .pgob = gk110_pmu_pgob,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index 6f9c09af1a49..b14134ef9ea5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -37,4 +37,5 @@ gk208_pmu_oclass = &(struct nvkm_pmu_impl) {
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
.data.size = sizeof(gk208_pmu_data),
+ .pgob = gk110_pmu_pgob,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index a49934bbe637..594f746e68f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -159,7 +159,7 @@ resched:
nvkm_timer_alarm(priv, 100000000, alarm);
}
-int
+static int
gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_pmu *pmu = (void *)object;
@@ -170,7 +170,7 @@ gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
return nvkm_subdev_fini(&pmu->base, suspend);
}
-int
+static int
gk20a_pmu_init(struct nvkm_object *object)
{
struct nvkm_pmu *pmu = (void *)object;
@@ -192,7 +192,8 @@ gk20a_pmu_init(struct nvkm_object *object)
return ret;
}
-struct gk20a_pmu_dvfs_data gk20a_dvfs_data= {
+static struct gk20a_pmu_dvfs_data
+gk20a_dvfs_data= {
.p_load_target = 70,
.p_load_max = 90,
.p_smooth = 1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 998410563bfd..799e7c8b88f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -40,4 +40,6 @@ struct nvkm_pmu_impl {
void (*pgob)(struct nvkm_pmu *, bool);
};
+
+void gk110_pmu_pgob(struct nvkm_pmu *, bool);
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index a94b11f7859d..17739737dcf6 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -102,7 +102,7 @@ void copy_timings_drm_to_omap(struct omap_video_timings *timings,
timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+ timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
}
static enum drm_connector_status omap_connector_detect(
@@ -271,18 +271,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.best_encoder = omap_connector_attached_encoder,
};
-/* flush an area of the framebuffer (in case of manual update display that
- * is not automatically flushed)
- */
-void omap_connector_flush(struct drm_connector *connector,
- int x, int y, int w, int h)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- /* TODO: enable when supported in dss */
- VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
-}
-
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
int connector_type, struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index b0566a1ca28f..f456544bf300 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -28,7 +28,6 @@
struct omap_crtc {
struct drm_crtc base;
- struct drm_plane *plane;
const char *name;
int pipe;
@@ -46,7 +45,6 @@ struct omap_crtc {
struct omap_video_timings timings;
bool enabled;
- bool full_update;
struct omap_drm_apply apply;
@@ -74,8 +72,14 @@ struct omap_crtc {
* XXX maybe fold into apply_work??
*/
struct work_struct page_flip_work;
+
+ bool ignore_digit_sync_lost;
};
+/* -----------------------------------------------------------------------------
+ * Helper Functions
+ */
+
uint32_t pipe2vbl(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -83,6 +87,22 @@ uint32_t pipe2vbl(struct drm_crtc *crtc)
return dispc_mgr_get_vsync_irq(omap_crtc->channel);
}
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return &omap_crtc->timings;
+}
+
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return omap_crtc->channel;
+}
+
+/* -----------------------------------------------------------------------------
+ * DSS Manager Functions
+ */
+
/*
* Manager-ops, callbacks from output when they need to configure
* the upstream part of the video pipe.
@@ -122,7 +142,63 @@ static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
{
}
-static void set_enabled(struct drm_crtc *crtc, bool enable);
+/* Called only from CRTC pre_apply and suspend/resume handlers. */
+static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ enum omap_channel channel = omap_crtc->channel;
+ struct omap_irq_wait *wait;
+ u32 framedone_irq, vsync_irq;
+ int ret;
+
+ if (dispc_mgr_is_enabled(channel) == enable)
+ return;
+
+ if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
+ /*
+ * Digit output produces some sync lost interrupts during the
+ * first frame when enabling, so we need to ignore those.
+ */
+ omap_crtc->ignore_digit_sync_lost = true;
+ }
+
+ framedone_irq = dispc_mgr_get_framedone_irq(channel);
+ vsync_irq = dispc_mgr_get_vsync_irq(channel);
+
+ if (enable) {
+ wait = omap_irq_wait_init(dev, vsync_irq, 1);
+ } else {
+ /*
+ * When we disable the digit output, we need to wait for
+ * FRAMEDONE to know that DISPC has finished with the output.
+ *
+ * OMAP2/3 does not have FRAMEDONE irq for digit output, and in
+ * that case we need to use vsync interrupt, and wait for both
+ * even and odd frames.
+ */
+
+ if (framedone_irq)
+ wait = omap_irq_wait_init(dev, framedone_irq, 1);
+ else
+ wait = omap_irq_wait_init(dev, vsync_irq, 2);
+ }
+
+ dispc_mgr_enable(channel, enable);
+
+ ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+ if (ret) {
+ dev_err(dev->dev, "%s: timeout waiting for %s\n",
+ omap_crtc->name, enable ? "enable" : "disable");
+ }
+
+ if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
+ omap_crtc->ignore_digit_sync_lost = false;
+ /* make sure the irq handler sees the value above */
+ mb();
+ }
+}
+
static int omap_crtc_enable(struct omap_overlay_manager *mgr)
{
@@ -131,7 +207,7 @@ static int omap_crtc_enable(struct omap_overlay_manager *mgr)
dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
dispc_mgr_set_timings(omap_crtc->channel,
&omap_crtc->timings);
- set_enabled(&omap_crtc->base, true);
+ omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
}
@@ -140,7 +216,7 @@ static void omap_crtc_disable(struct omap_overlay_manager *mgr)
{
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
- set_enabled(&omap_crtc->base, false);
+ omap_crtc_set_enabled(&omap_crtc->base, false);
}
static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
@@ -149,7 +225,6 @@ static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
DBG("%s", omap_crtc->name);
omap_crtc->timings = *timings;
- omap_crtc->full_update = true;
}
static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
@@ -174,19 +249,201 @@ static void omap_crtc_unregister_framedone_handler(
}
static const struct dss_mgr_ops mgr_ops = {
- .connect = omap_crtc_connect,
- .disconnect = omap_crtc_disconnect,
- .start_update = omap_crtc_start_update,
- .enable = omap_crtc_enable,
- .disable = omap_crtc_disable,
- .set_timings = omap_crtc_set_timings,
- .set_lcd_config = omap_crtc_set_lcd_config,
- .register_framedone_handler = omap_crtc_register_framedone_handler,
- .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
+ .connect = omap_crtc_connect,
+ .disconnect = omap_crtc_disconnect,
+ .start_update = omap_crtc_start_update,
+ .enable = omap_crtc_enable,
+ .disable = omap_crtc_disable,
+ .set_timings = omap_crtc_set_timings,
+ .set_lcd_config = omap_crtc_set_lcd_config,
+ .register_framedone_handler = omap_crtc_register_framedone_handler,
+ .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
};
-/*
- * CRTC funcs:
+/* -----------------------------------------------------------------------------
+ * Apply Logic
+ */
+
+static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, error_irq);
+
+ if (omap_crtc->ignore_digit_sync_lost) {
+ irqstatus &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+ if (!irqstatus)
+ return;
+ }
+
+ DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+}
+
+static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, apply_irq);
+ struct drm_crtc *crtc = &omap_crtc->base;
+
+ if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+ struct omap_drm_private *priv =
+ crtc->dev->dev_private;
+ DBG("%s: apply done", omap_crtc->name);
+ __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+}
+
+static void apply_worker(struct work_struct *work)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(work, struct omap_crtc, apply_work);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct omap_drm_apply *apply, *n;
+ bool need_apply;
+
+ /*
+ * Synchronize everything on mode_config.mutex, to keep
+ * the callbacks and list modification all serialized
+ * with respect to modesetting ioctls from userspace.
+ */
+ drm_modeset_lock(&crtc->mutex, NULL);
+ dispc_runtime_get();
+
+ /*
+ * If we are still pending a previous update, wait.. when the
+ * pending update completes, we get kicked again.
+ */
+ if (omap_crtc->apply_irq.registered)
+ goto out;
+
+ /* finish up previous apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->pending_applies, pending_node) {
+ apply->post_apply(apply);
+ list_del(&apply->pending_node);
+ }
+
+ need_apply = !list_empty(&omap_crtc->queued_applies);
+
+ /* then handle the next round of of queued apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->queued_applies, queued_node) {
+ apply->pre_apply(apply);
+ list_del(&apply->queued_node);
+ apply->queued = false;
+ list_add_tail(&apply->pending_node,
+ &omap_crtc->pending_applies);
+ }
+
+ if (need_apply) {
+ enum omap_channel channel = omap_crtc->channel;
+
+ DBG("%s: GO", omap_crtc->name);
+
+ if (dispc_mgr_is_enabled(channel)) {
+ dispc_mgr_go(channel);
+ omap_irq_register(dev, &omap_crtc->apply_irq);
+ } else {
+ struct omap_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+ }
+
+out:
+ dispc_runtime_put();
+ drm_modeset_unlock(&crtc->mutex);
+}
+
+int omap_crtc_apply(struct drm_crtc *crtc,
+ struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ /* no need to queue it again if it is already queued: */
+ if (apply->queued)
+ return 0;
+
+ apply->queued = true;
+ list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+
+ /*
+ * If there are no currently pending updates, then go ahead and
+ * kick the worker immediately, otherwise it will run again when
+ * the current update finishes.
+ */
+ if (list_empty(&omap_crtc->pending_applies)) {
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+
+ return 0;
+}
+
+static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(apply, struct omap_crtc, apply);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ unsigned int i;
+
+ DBG("%s: enabled=%d", omap_crtc->name, omap_crtc->enabled);
+
+ for (i = 0; i < priv->num_encoders; i++) {
+ if (priv->encoders[i]->crtc == crtc) {
+ encoder = priv->encoders[i];
+ break;
+ }
+ }
+
+ if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder)
+ omap_encoder_set_enabled(omap_crtc->current_encoder, false);
+
+ omap_crtc->current_encoder = encoder;
+
+ if (!omap_crtc->enabled) {
+ if (encoder)
+ omap_encoder_set_enabled(encoder, false);
+ } else {
+ if (encoder) {
+ omap_encoder_set_enabled(encoder, false);
+ omap_encoder_update(encoder, omap_crtc->mgr,
+ &omap_crtc->timings);
+ omap_encoder_set_enabled(encoder, true);
+ }
+ }
+}
+
+static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+{
+ /* nothing needed for post-apply */
+}
+
+void omap_crtc_flush(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ int loops = 0;
+
+ while (!list_empty(&omap_crtc->pending_applies) ||
+ !list_empty(&omap_crtc->queued_applies) ||
+ omap_crtc->event || omap_crtc->old_fb) {
+
+ if (++loops > 10) {
+ dev_err(crtc->dev->dev,
+ "omap_crtc_flush() timeout\n");
+ break;
+ }
+
+ schedule_timeout_uninterruptible(msecs_to_jiffies(20));
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * CRTC Functions
*/
static void omap_crtc_destroy(struct drm_crtc *crtc)
@@ -214,17 +471,13 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
if (enabled != omap_crtc->enabled) {
omap_crtc->enabled = enabled;
- omap_crtc->full_update = true;
omap_crtc_apply(crtc, &omap_crtc->apply);
- /* also enable our private plane: */
- WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
-
- /* and any attached overlay planes: */
+ /* Enable/disable all planes associated with the CRTC. */
for (i = 0; i < priv->num_planes; i++) {
struct drm_plane *plane = priv->planes[i];
if (plane->crtc == crtc)
- WARN_ON(omap_plane_dpms(plane, mode));
+ WARN_ON(omap_plane_set_enable(plane, enabled));
}
}
}
@@ -256,13 +509,17 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc,
mode->type, mode->flags);
copy_timings_drm_to_omap(&omap_crtc->timings, mode);
- omap_crtc->full_update = true;
- return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16,
- NULL, NULL);
+ /*
+ * The primary plane CRTC can be reset if the plane is disabled directly
+ * through the universal plane API. Set it again here.
+ */
+ crtc->primary->crtc = crtc;
+
+ return omap_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x, y, mode->hdisplay, mode->vdisplay,
+ NULL, NULL);
}
static void omap_crtc_prepare(struct drm_crtc *crtc)
@@ -282,15 +539,13 @@ static void omap_crtc_commit(struct drm_crtc *crtc)
static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_plane *plane = omap_crtc->plane;
+ struct drm_plane *plane = crtc->primary;
struct drm_display_mode *mode = &crtc->mode;
return omap_plane_mode_set(plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16,
- NULL, NULL);
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x, y, mode->hdisplay, mode->vdisplay,
+ NULL, NULL);
}
static void vblank_cb(void *arg)
@@ -299,6 +554,7 @@ static void vblank_cb(void *arg)
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
unsigned long flags;
+ struct drm_framebuffer *fb;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -306,10 +562,15 @@ static void vblank_cb(void *arg)
if (omap_crtc->event)
drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
+ fb = omap_crtc->old_fb;
+
omap_crtc->event = NULL;
omap_crtc->old_fb = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (fb)
+ drm_framebuffer_unreference(fb);
}
static void page_flip_worker(struct work_struct *work)
@@ -321,11 +582,10 @@ static void page_flip_worker(struct work_struct *work)
struct drm_gem_object *bo;
drm_modeset_lock(&crtc->mutex, NULL);
- omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- crtc->x << 16, crtc->y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16,
- vblank_cb, crtc);
+ omap_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ crtc->x, crtc->y, mode->hdisplay, mode->vdisplay,
+ vblank_cb, crtc);
drm_modeset_unlock(&crtc->mutex);
bo = omap_framebuffer_bo(crtc->primary->fb, 0);
@@ -361,11 +621,12 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
if (omap_crtc->old_fb) {
spin_unlock_irqrestore(&dev->event_lock, flags);
dev_err(dev->dev, "already a pending flip\n");
- return -EINVAL;
+ return -EBUSY;
}
omap_crtc->event = event;
omap_crtc->old_fb = primary->fb = fb;
+ drm_framebuffer_reference(omap_crtc->old_fb);
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -385,7 +646,6 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
static int omap_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val)
{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_drm_private *priv = crtc->dev->dev_private;
if (property == priv->rotation_prop) {
@@ -393,7 +653,7 @@ static int omap_crtc_set_property(struct drm_crtc *crtc,
!!(val & ((1LL << DRM_ROTATE_90) | (1LL << DRM_ROTATE_270)));
}
- return omap_plane_set_property(omap_crtc->plane, property, val);
+ return omap_plane_set_property(crtc->primary, property, val);
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
@@ -412,256 +672,15 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.mode_set_base = omap_crtc_mode_set_base,
};
-const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return &omap_crtc->timings;
-}
-
-enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return omap_crtc->channel;
-}
-
-static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
- struct omap_crtc *omap_crtc =
- container_of(irq, struct omap_crtc, error_irq);
- struct drm_crtc *crtc = &omap_crtc->base;
- DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
- /* avoid getting in a flood, unregister the irq until next vblank */
- __omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
-}
-
-static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
- struct omap_crtc *omap_crtc =
- container_of(irq, struct omap_crtc, apply_irq);
- struct drm_crtc *crtc = &omap_crtc->base;
-
- if (!omap_crtc->error_irq.registered)
- __omap_irq_register(crtc->dev, &omap_crtc->error_irq);
-
- if (!dispc_mgr_go_busy(omap_crtc->channel)) {
- struct omap_drm_private *priv =
- crtc->dev->dev_private;
- DBG("%s: apply done", omap_crtc->name);
- __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
- queue_work(priv->wq, &omap_crtc->apply_work);
- }
-}
-
-static void apply_worker(struct work_struct *work)
-{
- struct omap_crtc *omap_crtc =
- container_of(work, struct omap_crtc, apply_work);
- struct drm_crtc *crtc = &omap_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct omap_drm_apply *apply, *n;
- bool need_apply;
-
- /*
- * Synchronize everything on mode_config.mutex, to keep
- * the callbacks and list modification all serialized
- * with respect to modesetting ioctls from userspace.
- */
- drm_modeset_lock(&crtc->mutex, NULL);
- dispc_runtime_get();
-
- /*
- * If we are still pending a previous update, wait.. when the
- * pending update completes, we get kicked again.
- */
- if (omap_crtc->apply_irq.registered)
- goto out;
-
- /* finish up previous apply's: */
- list_for_each_entry_safe(apply, n,
- &omap_crtc->pending_applies, pending_node) {
- apply->post_apply(apply);
- list_del(&apply->pending_node);
- }
-
- need_apply = !list_empty(&omap_crtc->queued_applies);
-
- /* then handle the next round of of queued apply's: */
- list_for_each_entry_safe(apply, n,
- &omap_crtc->queued_applies, queued_node) {
- apply->pre_apply(apply);
- list_del(&apply->queued_node);
- apply->queued = false;
- list_add_tail(&apply->pending_node,
- &omap_crtc->pending_applies);
- }
-
- if (need_apply) {
- enum omap_channel channel = omap_crtc->channel;
-
- DBG("%s: GO", omap_crtc->name);
-
- if (dispc_mgr_is_enabled(channel)) {
- omap_irq_register(dev, &omap_crtc->apply_irq);
- dispc_mgr_go(channel);
- } else {
- struct omap_drm_private *priv = dev->dev_private;
- queue_work(priv->wq, &omap_crtc->apply_work);
- }
- }
-
-out:
- dispc_runtime_put();
- drm_modeset_unlock(&crtc->mutex);
-}
-
-int omap_crtc_apply(struct drm_crtc *crtc,
- struct omap_drm_apply *apply)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- /* no need to queue it again if it is already queued: */
- if (apply->queued)
- return 0;
-
- apply->queued = true;
- list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
-
- /*
- * If there are no currently pending updates, then go ahead and
- * kick the worker immediately, otherwise it will run again when
- * the current update finishes.
- */
- if (list_empty(&omap_crtc->pending_applies)) {
- struct omap_drm_private *priv = crtc->dev->dev_private;
- queue_work(priv->wq, &omap_crtc->apply_work);
- }
-
- return 0;
-}
-
-/* called only from apply */
-static void set_enabled(struct drm_crtc *crtc, bool enable)
-{
- struct drm_device *dev = crtc->dev;
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- enum omap_channel channel = omap_crtc->channel;
- struct omap_irq_wait *wait;
- u32 framedone_irq, vsync_irq;
- int ret;
-
- if (dispc_mgr_is_enabled(channel) == enable)
- return;
-
- /*
- * Digit output produces some sync lost interrupts during the first
- * frame when enabling, so we need to ignore those.
- */
- omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
-
- framedone_irq = dispc_mgr_get_framedone_irq(channel);
- vsync_irq = dispc_mgr_get_vsync_irq(channel);
-
- if (enable) {
- wait = omap_irq_wait_init(dev, vsync_irq, 1);
- } else {
- /*
- * When we disable the digit output, we need to wait for
- * FRAMEDONE to know that DISPC has finished with the output.
- *
- * OMAP2/3 does not have FRAMEDONE irq for digit output, and in
- * that case we need to use vsync interrupt, and wait for both
- * even and odd frames.
- */
-
- if (framedone_irq)
- wait = omap_irq_wait_init(dev, framedone_irq, 1);
- else
- wait = omap_irq_wait_init(dev, vsync_irq, 2);
- }
-
- dispc_mgr_enable(channel, enable);
-
- ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
- if (ret) {
- dev_err(dev->dev, "%s: timeout waiting for %s\n",
- omap_crtc->name, enable ? "enable" : "disable");
- }
-
- omap_irq_register(crtc->dev, &omap_crtc->error_irq);
-}
-
-static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
-{
- struct omap_crtc *omap_crtc =
- container_of(apply, struct omap_crtc, apply);
- struct drm_crtc *crtc = &omap_crtc->base;
- struct drm_encoder *encoder = NULL;
-
- DBG("%s: enabled=%d, full=%d", omap_crtc->name,
- omap_crtc->enabled, omap_crtc->full_update);
-
- if (omap_crtc->full_update) {
- struct omap_drm_private *priv = crtc->dev->dev_private;
- int i;
- for (i = 0; i < priv->num_encoders; i++) {
- if (priv->encoders[i]->crtc == crtc) {
- encoder = priv->encoders[i];
- break;
- }
- }
- }
-
- if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder)
- omap_encoder_set_enabled(omap_crtc->current_encoder, false);
-
- omap_crtc->current_encoder = encoder;
-
- if (!omap_crtc->enabled) {
- if (encoder)
- omap_encoder_set_enabled(encoder, false);
- } else {
- if (encoder) {
- omap_encoder_set_enabled(encoder, false);
- omap_encoder_update(encoder, omap_crtc->mgr,
- &omap_crtc->timings);
- omap_encoder_set_enabled(encoder, true);
- }
- }
-
- omap_crtc->full_update = false;
-}
-
-static void omap_crtc_post_apply(struct omap_drm_apply *apply)
-{
- /* nothing needed for post-apply */
-}
-
-void omap_crtc_flush(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- int loops = 0;
-
- while (!list_empty(&omap_crtc->pending_applies) ||
- !list_empty(&omap_crtc->queued_applies) ||
- omap_crtc->event || omap_crtc->old_fb) {
-
- if (++loops > 10) {
- dev_err(crtc->dev->dev,
- "omap_crtc_flush() timeout\n");
- break;
- }
-
- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
- }
-}
+/* -----------------------------------------------------------------------------
+ * Init and Cleanup
+ */
static const char *channel_names[] = {
- [OMAP_DSS_CHANNEL_LCD] = "lcd",
- [OMAP_DSS_CHANNEL_DIGIT] = "tv",
- [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
- [OMAP_DSS_CHANNEL_LCD3] = "lcd3",
+ [OMAP_DSS_CHANNEL_LCD] = "lcd",
+ [OMAP_DSS_CHANNEL_DIGIT] = "tv",
+ [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+ [OMAP_DSS_CHANNEL_LCD3] = "lcd3",
};
void omap_crtc_pre_init(void)
@@ -681,12 +700,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc = NULL;
struct omap_crtc *omap_crtc;
struct omap_overlay_manager_info *info;
+ int ret;
DBG("%s", channel_names[channel]);
omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
if (!omap_crtc)
- goto fail;
+ return NULL;
crtc = &omap_crtc->base;
@@ -700,8 +720,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_crtc->apply.post_apply = omap_crtc_post_apply;
omap_crtc->channel = channel;
- omap_crtc->plane = plane;
- omap_crtc->plane->crtc = crtc;
omap_crtc->name = channel_names[channel];
omap_crtc->pipe = id;
@@ -723,18 +741,18 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
info->trans_enabled = false;
- drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &omap_crtc_funcs);
+ if (ret < 0) {
+ kfree(omap_crtc);
+ return NULL;
+ }
+
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
- omap_plane_install_properties(omap_crtc->plane, &crtc->base);
+ omap_plane_install_properties(crtc->primary, &crtc->base);
omap_crtcs[channel] = omap_crtc;
return crtc;
-
-fail:
- if (crtc)
- omap_crtc_destroy(crtc);
-
- return NULL;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 58bcd6ae0255..9f32a83ca507 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -148,11 +148,15 @@ struct refill_engine {
bool async;
- wait_queue_head_t wait_for_refill;
+ struct completion compl;
struct list_head idle_node;
};
+struct dmm_platform_data {
+ uint32_t cpu_cache_flags;
+};
+
struct dmm {
struct device *dev;
void __iomem *base;
@@ -183,6 +187,8 @@ struct dmm {
/* allocation list and lock */
struct list_head alloc_head;
+
+ const struct dmm_platform_data *plat_data;
};
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 56c60552abba..042038e8a662 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/list.h>
+#include <linux/completion.h>
#include "omap_dmm_tiler.h"
#include "omap_dmm_priv.h"
@@ -39,6 +40,10 @@
static struct tcm *containers[TILFMT_NFORMATS];
static struct dmm *omap_dmm;
+#if defined(CONFIG_OF)
+static const struct of_device_id dmm_of_match[];
+#endif
+
/* global spinlock for protecting lists */
static DEFINE_SPINLOCK(list_lock);
@@ -58,19 +63,19 @@ static const struct {
uint32_t slot_w; /* width of each slot (in pixels) */
uint32_t slot_h; /* height of each slot (in pixels) */
} geom[TILFMT_NFORMATS] = {
- [TILFMT_8BIT] = GEOM(0, 0, 1),
- [TILFMT_16BIT] = GEOM(0, 1, 2),
- [TILFMT_32BIT] = GEOM(1, 1, 4),
- [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
+ [TILFMT_8BIT] = GEOM(0, 0, 1),
+ [TILFMT_16BIT] = GEOM(0, 1, 2),
+ [TILFMT_32BIT] = GEOM(1, 1, 4),
+ [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
};
/* lookup table for registers w/ per-engine instances */
static const uint32_t reg[][4] = {
- [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
- DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
- [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
- DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
+ [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
+ DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
+ [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
+ DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
};
/* simple allocator to grab next 16 byte aligned memory from txn */
@@ -142,10 +147,10 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
for (i = 0; i < dmm->num_engines; i++) {
if (status & DMM_IRQSTAT_LST) {
- wake_up_interruptible(&dmm->engines[i].wait_for_refill);
-
if (dmm->engines[i].async)
release_engine(&dmm->engines[i]);
+
+ complete(&dmm->engines[i].compl);
}
status >>= 8;
@@ -269,15 +274,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
/* mark whether it is async to denote list management in IRQ handler */
engine->async = wait ? false : true;
+ reinit_completion(&engine->compl);
+ /* verify that the irq handler sees the 'async' and completion value */
+ smp_mb();
/* kick reload */
writel(engine->refill_pa,
dmm->base + reg[PAT_DESCR][engine->id]);
if (wait) {
- if (wait_event_interruptible_timeout(engine->wait_for_refill,
- wait_status(engine, DMM_PATSTATUS_READY) == 0,
- msecs_to_jiffies(1)) <= 0) {
+ if (!wait_for_completion_timeout(&engine->compl,
+ msecs_to_jiffies(1))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
}
@@ -529,6 +536,11 @@ size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
}
+uint32_t tiler_get_cpu_cache_flags(void)
+{
+ return omap_dmm->plat_data->cpu_cache_flags;
+}
+
bool dmm_is_available(void)
{
return omap_dmm ? true : false;
@@ -592,6 +604,18 @@ static int omap_dmm_probe(struct platform_device *dev)
init_waitqueue_head(&omap_dmm->engine_queue);
+ if (dev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(dmm_of_match, dev->dev.of_node);
+ if (!match) {
+ dev_err(&dev->dev, "failed to find matching device node\n");
+ return -ENODEV;
+ }
+
+ omap_dmm->plat_data = match->data;
+ }
+
/* lookup hwmod data - base address and irq */
mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!mem) {
@@ -696,7 +720,7 @@ static int omap_dmm_probe(struct platform_device *dev)
(REFILL_BUFFER_SIZE * i);
omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
(REFILL_BUFFER_SIZE * i);
- init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
+ init_completion(&omap_dmm->engines[i].compl);
list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
}
@@ -941,7 +965,7 @@ error:
}
#endif
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int omap_dmm_resume(struct device *dev)
{
struct tcm_area area;
@@ -965,16 +989,28 @@ static int omap_dmm_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops omap_dmm_pm_ops = {
- .resume = omap_dmm_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
+
#if defined(CONFIG_OF)
+static const struct dmm_platform_data dmm_omap4_platform_data = {
+ .cpu_cache_flags = OMAP_BO_WC,
+};
+
+static const struct dmm_platform_data dmm_omap5_platform_data = {
+ .cpu_cache_flags = OMAP_BO_UNCACHED,
+};
+
static const struct of_device_id dmm_of_match[] = {
- { .compatible = "ti,omap4-dmm", },
- { .compatible = "ti,omap5-dmm", },
+ {
+ .compatible = "ti,omap4-dmm",
+ .data = &dmm_omap4_platform_data,
+ },
+ {
+ .compatible = "ti,omap5-dmm",
+ .data = &dmm_omap5_platform_data,
+ },
{},
};
#endif
@@ -986,9 +1022,7 @@ struct platform_driver omap_dmm_driver = {
.owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
.of_match_table = of_match_ptr(dmm_of_match),
-#ifdef CONFIG_PM
.pm = &omap_dmm_pm_ops,
-#endif
},
};
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 4fdd61e54bd2..e83c78372db8 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -106,6 +106,7 @@ uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient);
size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+uint32_t tiler_get_cpu_cache_flags(void);
bool dmm_is_available(void);
extern struct platform_driver omap_dmm_driver;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 8241ed9b353c..94920d47e3b6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -128,6 +128,29 @@ cleanup:
return r;
}
+static int omap_modeset_create_crtc(struct drm_device *dev, int id,
+ enum omap_channel channel)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
+
+ crtc = omap_crtc_init(dev, plane, channel, id);
+
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+ priv->crtcs[id] = crtc;
+ priv->num_crtcs++;
+
+ priv->planes[id] = plane;
+ priv->num_planes++;
+
+ return 0;
+}
+
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
@@ -136,6 +159,7 @@ static int omap_modeset_init(struct drm_device *dev)
int num_mgrs = dss_feat_get_num_mgrs();
int num_crtcs;
int i, id = 0;
+ int ret;
drm_mode_config_init(dev);
@@ -209,18 +233,13 @@ static int omap_modeset_init(struct drm_device *dev)
* allocated crtc, we create a new crtc for it
*/
if (!channel_used(dev, channel)) {
- struct drm_plane *plane;
- struct drm_crtc *crtc;
-
- plane = omap_plane_init(dev, id, true);
- crtc = omap_crtc_init(dev, plane, channel, id);
-
- BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
- priv->crtcs[id] = crtc;
- priv->num_crtcs++;
-
- priv->planes[id] = plane;
- priv->num_planes++;
+ ret = omap_modeset_create_crtc(dev, id, channel);
+ if (ret < 0) {
+ dev_err(dev->dev,
+ "could not create CRTC (channel %u)\n",
+ channel);
+ return ret;
+ }
id++;
}
@@ -234,26 +253,8 @@ static int omap_modeset_init(struct drm_device *dev)
/* find a free manager for this crtc */
for (i = 0; i < num_mgrs; i++) {
- if (!channel_used(dev, i)) {
- struct drm_plane *plane;
- struct drm_crtc *crtc;
-
- plane = omap_plane_init(dev, id, true);
- crtc = omap_crtc_init(dev, plane, i, id);
-
- BUG_ON(priv->num_crtcs >=
- ARRAY_SIZE(priv->crtcs));
-
- priv->crtcs[id] = crtc;
- priv->num_crtcs++;
-
- priv->planes[id] = plane;
- priv->num_planes++;
-
+ if (!channel_used(dev, i))
break;
- } else {
- continue;
- }
}
if (i == num_mgrs) {
@@ -261,13 +262,24 @@ static int omap_modeset_init(struct drm_device *dev)
dev_err(dev->dev, "no managers left for crtc\n");
return -ENOMEM;
}
+
+ ret = omap_modeset_create_crtc(dev, id, i);
+ if (ret < 0) {
+ dev_err(dev->dev,
+ "could not create CRTC (channel %u)\n", i);
+ return ret;
+ }
}
/*
* Create normal planes for the remaining overlays:
*/
for (; id < num_ovls; id++) {
- struct drm_plane *plane = omap_plane_init(dev, id, false);
+ struct drm_plane *plane;
+
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_OVERLAY);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
priv->planes[priv->num_planes++] = plane;
@@ -286,14 +298,13 @@ static int omap_modeset_init(struct drm_device *dev)
for (id = 0; id < priv->num_crtcs; id++) {
struct drm_crtc *crtc = priv->crtcs[id];
enum omap_channel crtc_channel;
- enum omap_dss_output_id supported_outputs;
crtc_channel = omap_crtc_channel(crtc);
- supported_outputs =
- dss_feat_get_supported_outputs(crtc_channel);
- if (supported_outputs & output->id)
+ if (output->dispc_channel == crtc_channel) {
encoder->possible_crtcs |= (1 << id);
+ break;
+ }
}
omap_dss_put_device(output);
@@ -480,6 +491,7 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+ spin_lock_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
omap_gem_init(dev);
@@ -519,7 +531,8 @@ static int dev_unload(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
- omap_fbdev_free(dev);
+ if (priv->fbdev)
+ omap_fbdev_free(dev);
/* flush crtcs so the fbs get released */
for (i = 0; i < priv->num_crtcs; i++)
@@ -588,9 +601,11 @@ static void dev_lastclose(struct drm_device *dev)
}
}
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
- if (ret)
- DBG("failed to restore crtc mode");
+ if (priv->fbdev) {
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+ if (ret)
+ DBG("failed to restore crtc mode");
+ }
}
static void dev_preclose(struct drm_device *dev, struct drm_file *file)
@@ -610,74 +625,57 @@ static const struct vm_operations_struct omap_gem_vm_ops = {
};
static const struct file_operations omapdriver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .mmap = omap_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = noop_llseek,
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ .mmap = omap_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
};
static struct drm_driver omap_drm_driver = {
- .driver_features =
- DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
- .load = dev_load,
- .unload = dev_unload,
- .open = dev_open,
- .lastclose = dev_lastclose,
- .preclose = dev_preclose,
- .postclose = dev_postclose,
- .set_busid = drm_platform_set_busid,
- .get_vblank_counter = drm_vblank_count,
- .enable_vblank = omap_irq_enable_vblank,
- .disable_vblank = omap_irq_disable_vblank,
- .irq_preinstall = omap_irq_preinstall,
- .irq_postinstall = omap_irq_postinstall,
- .irq_uninstall = omap_irq_uninstall,
- .irq_handler = omap_irq_handler,
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM
+ | DRIVER_PRIME,
+ .load = dev_load,
+ .unload = dev_unload,
+ .open = dev_open,
+ .lastclose = dev_lastclose,
+ .preclose = dev_preclose,
+ .postclose = dev_postclose,
+ .set_busid = drm_platform_set_busid,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = omap_irq_enable_vblank,
+ .disable_vblank = omap_irq_disable_vblank,
+ .irq_preinstall = omap_irq_preinstall,
+ .irq_postinstall = omap_irq_postinstall,
+ .irq_uninstall = omap_irq_uninstall,
+ .irq_handler = omap_irq_handler,
#ifdef CONFIG_DEBUG_FS
- .debugfs_init = omap_debugfs_init,
- .debugfs_cleanup = omap_debugfs_cleanup,
+ .debugfs_init = omap_debugfs_init,
+ .debugfs_cleanup = omap_debugfs_cleanup,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = omap_gem_prime_export,
- .gem_prime_import = omap_gem_prime_import,
- .gem_free_object = omap_gem_free_object,
- .gem_vm_ops = &omap_gem_vm_ops,
- .dumb_create = omap_gem_dumb_create,
- .dumb_map_offset = omap_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
- .ioctls = ioctls,
- .num_ioctls = DRM_OMAP_NUM_IOCTLS,
- .fops = &omapdriver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = omap_gem_prime_export,
+ .gem_prime_import = omap_gem_prime_import,
+ .gem_free_object = omap_gem_free_object,
+ .gem_vm_ops = &omap_gem_vm_ops,
+ .dumb_create = omap_gem_dumb_create,
+ .dumb_map_offset = omap_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .ioctls = ioctls,
+ .num_ioctls = DRM_OMAP_NUM_IOCTLS,
+ .fops = &omapdriver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
-static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
-{
- DBG("");
- return 0;
-}
-
-static int pdev_resume(struct platform_device *device)
-{
- DBG("");
- return 0;
-}
-
-static void pdev_shutdown(struct platform_device *device)
-{
- DBG("");
-}
-
static int pdev_probe(struct platform_device *device)
{
int r;
@@ -709,24 +707,35 @@ static int pdev_remove(struct platform_device *device)
return 0;
}
-#ifdef CONFIG_PM
-static const struct dev_pm_ops omapdrm_pm_ops = {
- .resume = omap_gem_resume,
-};
+#ifdef CONFIG_PM_SLEEP
+static int omap_drm_suspend(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_disable(drm_dev);
+
+ return 0;
+}
+
+static int omap_drm_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_enable(drm_dev);
+
+ return omap_gem_resume(dev);
+}
#endif
+static SIMPLE_DEV_PM_OPS(omapdrm_pm_ops, omap_drm_suspend, omap_drm_resume);
+
static struct platform_driver pdev = {
- .driver = {
- .name = DRIVER_NAME,
-#ifdef CONFIG_PM
- .pm = &omapdrm_pm_ops,
-#endif
- },
- .probe = pdev_probe,
- .remove = pdev_remove,
- .suspend = pdev_suspend,
- .resume = pdev_resume,
- .shutdown = pdev_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &omapdrm_pm_ops,
+ },
+ .probe = pdev_probe,
+ .remove = pdev_remove,
};
static int __init omap_drm_init(void)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 60e47b33c801..b31c79f15aed 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -105,6 +105,9 @@ struct omap_drm_private {
struct workqueue_struct *wq;
+ /* lock for obj_list below */
+ spinlock_t list_lock;
+
/* list of GEM objects: */
struct list_head obj_list;
@@ -160,15 +163,15 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
void omap_crtc_flush(struct drm_crtc *crtc);
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int plane_id, bool private_plane);
-int omap_plane_dpms(struct drm_plane *plane, int mode);
+ int id, enum drm_plane_type type);
+int omap_plane_set_enable(struct drm_plane *plane, bool enable);
int omap_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- void (*fxn)(void *), void *arg);
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h,
+ void (*fxn)(void *), void *arg);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
int omap_plane_set_property(struct drm_plane *plane,
@@ -186,8 +189,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
struct drm_encoder *encoder);
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
-void omap_connector_flush(struct drm_connector *connector,
- int x, int y, int w, int h);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
@@ -208,8 +209,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_framebuffer *fb, struct drm_connector *from);
-void omap_framebuffer_flush(struct drm_framebuffer *fb,
- int x, int y, int w, int h);
void omap_gem_init(struct drm_device *dev);
void omap_gem_deinit(struct drm_device *dev);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 2a5cacdc344b..b2c1a29cc12b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -86,6 +86,7 @@ struct plane {
struct omap_framebuffer {
struct drm_framebuffer base;
+ int pin_count;
const struct format *format;
struct plane planes[4];
};
@@ -121,18 +122,6 @@ static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned flags, unsigned color,
struct drm_clip_rect *clips, unsigned num_clips)
{
- int i;
-
- drm_modeset_lock_all(fb->dev);
-
- for (i = 0; i < num_clips; i++) {
- omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
- clips[i].x2 - clips[i].x1,
- clips[i].y2 - clips[i].y1);
- }
-
- drm_modeset_unlock_all(fb->dev);
-
return 0;
}
@@ -261,6 +250,11 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ if (omap_fb->pin_count > 0) {
+ omap_fb->pin_count++;
+ return 0;
+ }
+
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
@@ -269,6 +263,8 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
}
+ omap_fb->pin_count++;
+
return 0;
fail:
@@ -287,6 +283,11 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ omap_fb->pin_count--;
+
+ if (omap_fb->pin_count > 0)
+ return 0;
+
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
ret = omap_gem_put_paddr(plane->bo);
@@ -336,34 +337,6 @@ struct drm_connector *omap_framebuffer_get_next_connector(
return NULL;
}
-/* flush an area of the framebuffer (in case of manual update display that
- * is not automatically flushed)
- */
-void omap_framebuffer_flush(struct drm_framebuffer *fb,
- int x, int y, int w, int h)
-{
- struct drm_connector *connector = NULL;
-
- VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
-
- /* FIXME: This is racy - no protection against modeset config changes. */
- while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
- /* only consider connectors that are part of a chain */
- if (connector->encoder && connector->encoder->crtc) {
- /* TODO: maybe this should propagate thru the crtc who
- * could do the coordinate translation..
- */
- struct drm_crtc *crtc = connector->encoder->crtc;
- int cx = max(0, x - crtc->x);
- int cy = max(0, y - crtc->y);
- int cw = w + (x - crtc->x) - cx;
- int ch = h + (y - crtc->y) - cy;
-
- omap_connector_flush(connector, cx, cy, cw, ch);
- }
- }
-}
-
#ifdef CONFIG_DEBUG_FS
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
@@ -407,7 +380,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- struct omap_framebuffer *omap_fb;
+ struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
const struct format *format = NULL;
int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
@@ -450,6 +423,14 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
goto fail;
}
+ if (pitch % format->planes[i].stride_bpp != 0) {
+ dev_err(dev->dev,
+ "buffer pitch (%d bytes) is not a multiple of pixel size (%d bytes)\n",
+ pitch, format->planes[i].stride_bpp);
+ ret = -EINVAL;
+ goto fail;
+ }
+
size = pitch * mode_cmd->height / format->planes[i].sub_y;
if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
@@ -478,8 +459,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
return fb;
fail:
- if (fb)
- omap_framebuffer_destroy(fb);
+ kfree(omap_fb);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index d292d24b3a6e..950cd3389092 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -42,42 +42,8 @@ struct omap_fbdev {
struct work_struct work;
};
-static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
static struct drm_fb_helper *get_fb(struct fb_info *fbi);
-static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t res;
-
- res = fb_sys_write(fbi, buf, count, ppos);
- omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
-
- return res;
-}
-
-static void omap_fbdev_fillrect(struct fb_info *fbi,
- const struct fb_fillrect *rect)
-{
- sys_fillrect(fbi, rect);
- omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
-}
-
-static void omap_fbdev_copyarea(struct fb_info *fbi,
- const struct fb_copyarea *area)
-{
- sys_copyarea(fbi, area);
- omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
-}
-
-static void omap_fbdev_imageblit(struct fb_info *fbi,
- const struct fb_image *image)
-{
- sys_imageblit(fbi, image);
- omap_fbdev_flush(fbi, image->dx, image->dy,
- image->width, image->height);
-}
-
static void pan_worker(struct work_struct *work)
{
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
@@ -121,10 +87,10 @@ static struct fb_ops omap_fb_ops = {
* basic fbdev ops which write to the framebuffer
*/
.fb_read = fb_sys_read,
- .fb_write = omap_fbdev_write,
- .fb_fillrect = omap_fbdev_fillrect,
- .fb_copyarea = omap_fbdev_copyarea,
- .fb_imageblit = omap_fbdev_imageblit,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
@@ -294,21 +260,6 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi)
return fbi->par;
}
-/* flush an area of the framebuffer (in case of manual update display that
- * is not automatically flushed)
- */
-static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
-{
- struct drm_fb_helper *helper = get_fb(fbi);
-
- if (!helper)
- return;
-
- VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
-
- omap_framebuffer_flush(helper->fb, x, y, w, h);
-}
-
/* initialize fbdev helper */
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index aeb91ed653c9..e9718b99a8a9 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -828,6 +828,7 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
dev_err(obj->dev->dev,
"could not release unmap: %d\n", ret);
}
+ omap_obj->paddr = 0;
omap_obj->block = NULL;
}
}
@@ -1272,13 +1273,16 @@ unlock:
void omap_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
evict(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ spin_lock(&priv->list_lock);
list_del(&omap_obj->mm_list);
+ spin_unlock(&priv->list_lock);
drm_gem_free_mmap_offset(obj);
@@ -1358,8 +1362,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
/* currently don't allow cached buffers.. there is some caching
* stuff that needs to be handled better
*/
- flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
- flags |= OMAP_BO_WC;
+ flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
+ flags |= tiler_get_cpu_cache_flags();
/* align dimensions to slot boundaries... */
tiler_align(gem2fmt(flags),
@@ -1376,7 +1380,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
if (!omap_obj)
goto fail;
+ spin_lock(&priv->list_lock);
list_add(&omap_obj->mm_list, &priv->obj_list);
+ spin_unlock(&priv->list_lock);
obj = &omap_obj->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index a2dbfb1737b4..344fd789170d 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -156,22 +156,29 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
}
static struct dma_buf_ops omap_dmabuf_ops = {
- .map_dma_buf = omap_gem_map_dma_buf,
- .unmap_dma_buf = omap_gem_unmap_dma_buf,
- .release = omap_gem_dmabuf_release,
- .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
- .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
- .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
- .kmap = omap_gem_dmabuf_kmap,
- .kunmap = omap_gem_dmabuf_kunmap,
- .mmap = omap_gem_dmabuf_mmap,
+ .map_dma_buf = omap_gem_map_dma_buf,
+ .unmap_dma_buf = omap_gem_unmap_dma_buf,
+ .release = omap_gem_dmabuf_release,
+ .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
+ .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
+ .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
+ .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
+ .kmap = omap_gem_dmabuf_kmap,
+ .kunmap = omap_gem_dmabuf_kunmap,
+ .mmap = omap_gem_dmabuf_mmap,
};
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags, NULL);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &omap_dmabuf_ops;
+ exp_info.size = obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
+
+ return dma_buf_export(&exp_info);
}
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index f035d2bceae7..3eb097efc488 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -34,7 +34,7 @@ static void omap_irq_update(struct drm_device *dev)
struct omap_drm_irq *irq;
uint32_t irqmask = priv->vblank_mask;
- BUG_ON(!spin_is_locked(&list_lock));
+ assert_spin_locked(&list_lock);
list_for_each_entry(irq, &priv->irq_list, node)
irqmask |= irq->irqmask;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index ee8e2b3a117e..1c6b63f39474 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -65,12 +65,16 @@ struct omap_plane {
struct callback apply_done_cb;
};
-static void unpin_worker(struct drm_flip_work *work, void *val)
+static void omap_plane_unpin_worker(struct drm_flip_work *work, void *val)
{
struct omap_plane *omap_plane =
container_of(work, struct omap_plane, unpin_work);
struct drm_device *dev = omap_plane->base.dev;
+ /*
+ * omap_framebuffer_pin/unpin are always called from priv->wq,
+ * so there's no need for locking here.
+ */
omap_framebuffer_unpin(val);
mutex_lock(&dev->mode_config.mutex);
drm_framebuffer_unreference(val);
@@ -78,7 +82,8 @@ static void unpin_worker(struct drm_flip_work *work, void *val)
}
/* update which fb (if any) is pinned for scanout */
-static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
+static int omap_plane_update_pin(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
@@ -121,13 +126,12 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
struct drm_crtc *crtc = plane->crtc;
enum omap_channel channel;
bool enabled = omap_plane->enabled && crtc;
- bool ilace, replication;
int ret;
DBG("%s, enabled=%d", omap_plane->name, enabled);
/* if fb has changed, pin new fb: */
- update_pin(plane, enabled ? plane->fb : NULL);
+ omap_plane_update_pin(plane, enabled ? plane->fb : NULL);
if (!enabled) {
dispc_ovl_enable(omap_plane->id, false);
@@ -145,20 +149,17 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
&info->paddr, &info->p_uv_addr);
- /* TODO: */
- ilace = false;
- replication = false;
+ dispc_ovl_set_channel_out(omap_plane->id, channel);
/* and finally, update omapdss: */
- ret = dispc_ovl_setup(omap_plane->id, info,
- replication, omap_crtc_timings(crtc), false);
+ ret = dispc_ovl_setup(omap_plane->id, info, false,
+ omap_crtc_timings(crtc), false);
if (ret) {
dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
return;
}
dispc_ovl_enable(omap_plane->id, true);
- dispc_ovl_set_channel_out(omap_plane->id, channel);
}
static void omap_plane_post_apply(struct omap_drm_apply *apply)
@@ -167,7 +168,6 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
container_of(apply, struct omap_plane, apply);
struct drm_plane *plane = &omap_plane->base;
struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_overlay_info *info = &omap_plane->info;
struct callback cb;
cb = omap_plane->apply_done_cb;
@@ -177,14 +177,9 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
if (cb.fxn)
cb.fxn(cb.arg);
-
- if (omap_plane->enabled) {
- omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
- info->out_width, info->out_height);
- }
}
-static int apply(struct drm_plane *plane)
+static int omap_plane_apply(struct drm_plane *plane)
{
if (plane->crtc) {
struct omap_plane *omap_plane = to_omap_plane(plane);
@@ -194,12 +189,12 @@ static int apply(struct drm_plane *plane)
}
int omap_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- void (*fxn)(void *), void *arg)
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h,
+ void (*fxn)(void *), void *arg)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_drm_window *win = &omap_plane->win;
@@ -209,11 +204,10 @@ int omap_plane_mode_set(struct drm_plane *plane,
win->crtc_w = crtc_w;
win->crtc_h = crtc_h;
- /* src values are in Q16 fixed point, convert to integer: */
- win->src_x = src_x >> 16;
- win->src_y = src_y >> 16;
- win->src_w = src_w >> 16;
- win->src_h = src_h >> 16;
+ win->src_x = src_x;
+ win->src_y = src_y;
+ win->src_w = src_w;
+ win->src_h = src_h;
if (fxn) {
/* omap_crtc should ensure that a new page flip
@@ -225,15 +219,7 @@ int omap_plane_mode_set(struct drm_plane *plane,
omap_plane->apply_done_cb.arg = arg;
}
- if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
-
- drm_framebuffer_reference(fb);
-
- plane->fb = fb;
- plane->crtc = crtc;
-
- return apply(plane);
+ return omap_plane_apply(plane);
}
static int omap_plane_update(struct drm_plane *plane,
@@ -254,17 +240,29 @@ static int omap_plane_update(struct drm_plane *plane,
break;
}
+ /*
+ * We don't need to take a reference to the framebuffer as the DRM core
+ * has already done so for the purpose of setting plane->fb.
+ */
+ plane->fb = fb;
+ plane->crtc = crtc;
+
+ /* src values are in Q16 fixed point, convert to integer: */
return omap_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h,
+ src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
NULL, NULL);
}
static int omap_plane_disable(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
+
omap_plane->win.rotation = BIT(DRM_ROTATE_0);
- return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ omap_plane->info.zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
+ ? 0 : omap_plane->id;
+
+ return omap_plane_set_enable(plane, false);
}
static void omap_plane_destroy(struct drm_plane *plane)
@@ -275,7 +273,6 @@ static void omap_plane_destroy(struct drm_plane *plane)
omap_irq_unregister(plane->dev, &omap_plane->error_irq);
- omap_plane_disable(plane);
drm_plane_cleanup(plane);
drm_flip_work_cleanup(&omap_plane->unpin_work);
@@ -283,18 +280,15 @@ static void omap_plane_destroy(struct drm_plane *plane)
kfree(omap_plane);
}
-int omap_plane_dpms(struct drm_plane *plane, int mode)
+int omap_plane_set_enable(struct drm_plane *plane, bool enable)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- bool enabled = (mode == DRM_MODE_DPMS_ON);
- int ret = 0;
- if (enabled != omap_plane->enabled) {
- omap_plane->enabled = enabled;
- ret = apply(plane);
- }
+ if (enable == omap_plane->enabled)
+ return 0;
- return ret;
+ omap_plane->enabled = enable;
+ return omap_plane_apply(plane);
}
/* helper to install properties which are common to planes and crtcs */
@@ -342,61 +336,63 @@ int omap_plane_set_property(struct drm_plane *plane,
if (property == priv->rotation_prop) {
DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
omap_plane->win.rotation = val;
- ret = apply(plane);
+ ret = omap_plane_apply(plane);
} else if (property == priv->zorder_prop) {
DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
omap_plane->info.zorder = val;
- ret = apply(plane);
+ ret = omap_plane_apply(plane);
}
return ret;
}
static const struct drm_plane_funcs omap_plane_funcs = {
- .update_plane = omap_plane_update,
- .disable_plane = omap_plane_disable,
- .destroy = omap_plane_destroy,
- .set_property = omap_plane_set_property,
+ .update_plane = omap_plane_update,
+ .disable_plane = omap_plane_disable,
+ .destroy = omap_plane_destroy,
+ .set_property = omap_plane_set_property,
};
static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
{
struct omap_plane *omap_plane =
container_of(irq, struct omap_plane, error_irq);
- DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+ DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_plane->name,
+ irqstatus);
}
static const char *plane_names[] = {
- [OMAP_DSS_GFX] = "gfx",
- [OMAP_DSS_VIDEO1] = "vid1",
- [OMAP_DSS_VIDEO2] = "vid2",
- [OMAP_DSS_VIDEO3] = "vid3",
+ [OMAP_DSS_GFX] = "gfx",
+ [OMAP_DSS_VIDEO1] = "vid1",
+ [OMAP_DSS_VIDEO2] = "vid2",
+ [OMAP_DSS_VIDEO3] = "vid3",
};
static const uint32_t error_irqs[] = {
- [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+ [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
};
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int id, bool private_plane)
+ int id, enum drm_plane_type type)
{
struct omap_drm_private *priv = dev->dev_private;
- struct drm_plane *plane = NULL;
+ struct drm_plane *plane;
struct omap_plane *omap_plane;
struct omap_overlay_info *info;
+ int ret;
- DBG("%s: priv=%d", plane_names[id], private_plane);
+ DBG("%s: type=%d", plane_names[id], type);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
- return NULL;
+ return ERR_PTR(-ENOMEM);
drm_flip_work_init(&omap_plane->unpin_work,
- "unpin", unpin_worker);
+ "unpin", omap_plane_unpin_worker);
omap_plane->nformats = omap_framebuffer_get_formats(
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
@@ -413,8 +409,11 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
omap_plane->error_irq.irq = omap_plane_error_irq;
omap_irq_register(dev, &omap_plane->error_irq);
- drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
- omap_plane->formats, omap_plane->nformats, private_plane);
+ ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1,
+ &omap_plane_funcs, omap_plane->formats,
+ omap_plane->nformats, type);
+ if (ret < 0)
+ goto error;
omap_plane_install_properties(plane, &plane->base);
@@ -432,10 +431,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
* TODO add ioctl to give userspace an API to change this.. this
* will come in a subsequent patch.
*/
- if (private_plane)
+ if (type == DRM_PLANE_TYPE_PRIMARY)
omap_plane->info.zorder = 0;
else
omap_plane->info.zorder = id;
return plane;
+
+error:
+ omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+ kfree(omap_plane);
+ return NULL;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d84583776d50..6d64c7bb908b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -11,6 +11,7 @@ config DRM_PANEL_SIMPLE
tristate "support for simple panels"
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
help
DRM panel driver for dumb panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 39806c335339..30904a9b2a4c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -33,9 +33,14 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <video/display_timing.h>
+#include <video/videomode.h>
+
struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
+ const struct display_timing *timings;
+ unsigned int num_timings;
unsigned int bpc;
@@ -94,6 +99,25 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
if (!panel->desc)
return 0;
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+ struct videomode vm;
+
+ videomode_from_timing(dt, &vm);
+ mode = drm_mode_create(drm);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u\n",
+ dt->hactive.typ, dt->vactive.typ);
+ continue;
+ }
+
+ drm_display_mode_from_videomode(&vm, mode);
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
@@ -226,12 +250,30 @@ static int panel_simple_get_modes(struct drm_panel *panel)
return num;
}
+static int panel_simple_get_timings(struct drm_panel *panel,
+ unsigned int num_timings,
+ struct display_timing *timings)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+ unsigned int i;
+
+ if (p->desc->num_timings < num_timings)
+ num_timings = p->desc->num_timings;
+
+ if (timings)
+ for (i = 0; i < num_timings; i++)
+ timings[i] = p->desc->timings[i];
+
+ return p->desc->num_timings;
+}
+
static const struct drm_panel_funcs panel_simple_funcs = {
.disable = panel_simple_disable,
.unprepare = panel_simple_unprepare,
.prepare = panel_simple_prepare,
.enable = panel_simple_enable,
.get_modes = panel_simple_get_modes,
+ .get_timings = panel_simple_get_timings,
};
static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
@@ -327,6 +369,31 @@ static void panel_simple_shutdown(struct device *dev)
panel_simple_disable(&panel->base);
}
+static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
+ .clock = 33333,
+ .hdisplay = 800,
+ .hsync_start = 800 + 0,
+ .hsync_end = 800 + 0 + 255,
+ .htotal = 800 + 0 + 255 + 0,
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 45,
+ .vtotal = 480 + 2 + 45 + 0,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ampire_am800480r3tmqwa1h = {
+ .modes = &ampire_am800480r3tmqwa1h_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode auo_b101aw03_mode = {
.clock = 51450,
.hdisplay = 1024,
@@ -350,6 +417,29 @@ static const struct panel_desc auo_b101aw03 = {
},
};
+static const struct drm_display_mode auo_b101ean01_mode = {
+ .clock = 72500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 119,
+ .hsync_end = 1280 + 119 + 32,
+ .htotal = 1280 + 119 + 32 + 21,
+ .vdisplay = 800,
+ .vsync_start = 800 + 4,
+ .vsync_end = 800 + 4 + 20,
+ .vtotal = 800 + 4 + 20 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b101ean01 = {
+ .modes = &auo_b101ean01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+};
+
static const struct drm_display_mode auo_b101xtn01_mode = {
.clock = 72000,
.hdisplay = 1366,
@@ -615,24 +705,25 @@ static const struct panel_desc giantplus_gpg482739qs5 = {
.width = 95,
.height = 54,
},
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
-static const struct drm_display_mode hannstar_hsd070pww1_mode = {
- .clock = 71100,
- .hdisplay = 1280,
- .hsync_start = 1280 + 1,
- .hsync_end = 1280 + 1 + 158,
- .htotal = 1280 + 1 + 158 + 1,
- .vdisplay = 800,
- .vsync_start = 800 + 1,
- .vsync_end = 800 + 1 + 21,
- .vtotal = 800 + 1 + 21 + 1,
- .vrefresh = 60,
+static const struct display_timing hannstar_hsd070pww1_timing = {
+ .pixelclock = { 64300000, 71100000, 82000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 1, 1, 10 },
+ .hback_porch = { 1, 1, 10 },
+ .hsync_len = { 52, 158, 661 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 1, 1, 10 },
+ .vback_porch = { 1, 1, 10 },
+ .vsync_len = { 1, 21, 203 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc hannstar_hsd070pww1 = {
- .modes = &hannstar_hsd070pww1_mode,
- .num_modes = 1,
+ .timings = &hannstar_hsd070pww1_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 151,
@@ -663,6 +754,31 @@ static const struct panel_desc hitachi_tx23d38vm0caa = {
},
};
+static const struct drm_display_mode innolux_at043tn24_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 11,
+ .vtotal = 272 + 2 + 11 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_at043tn24 = {
+ .modes = &innolux_at043tn24_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode innolux_g121i1_l01_mode = {
.clock = 71000,
.hdisplay = 1280,
@@ -733,6 +849,29 @@ static const struct panel_desc innolux_n156bge_l21 = {
},
};
+static const struct drm_display_mode innolux_zj070na_01p_mode = {
+ .clock = 51501,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 128,
+ .hsync_end = 1024 + 128 + 64,
+ .htotal = 1024 + 128 + 64 + 128,
+ .vdisplay = 600,
+ .vsync_start = 600 + 16,
+ .vsync_end = 600 + 16 + 4,
+ .vtotal = 600 + 16 + 4 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_zj070na_01p = {
+ .modes = &innolux_zj070na_01p_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 1024,
+ .height = 600,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -756,6 +895,30 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
+ .clock = 25000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 10,
+ .hsync_end = 480 + 10 + 10,
+ .htotal = 480 + 10 + 10 + 15,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 3,
+ .vtotal = 800 + 3 + 3 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc ortustech_com43h4m85ulc = {
+ .modes = &ortustech_com43h4m85ulc_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 56,
+ .height = 93,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -779,11 +942,63 @@ static const struct panel_desc samsung_ltn101nt05 = {
},
};
+static const struct drm_display_mode samsung_ltn140at29_301_mode = {
+ .clock = 76300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 48,
+ .htotal = 1366 + 64 + 48 + 128,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 5,
+ .vtotal = 768 + 2 + 5 + 17,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_ltn140at29_301 = {
+ .modes = &samsung_ltn140at29_301_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 320,
+ .height = 187,
+ },
+};
+
+static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
+ .clock = 33300,
+ .hdisplay = 800,
+ .hsync_start = 800 + 1,
+ .hsync_end = 800 + 1 + 64,
+ .htotal = 800 + 1 + 64 + 64,
+ .vdisplay = 480,
+ .vsync_start = 480 + 1,
+ .vsync_end = 480 + 1 + 23,
+ .vtotal = 480 + 1 + 23 + 22,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc shelly_sca07010_bfn_lnn = {
+ .modes = &shelly_sca07010_bfn_lnn_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct of_device_id platform_of_match[] = {
{
+ .compatible = "ampire,am800480r3tmqwa1h",
+ .data = &ampire_am800480r3tmqwa1h,
+ }, {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
+ .compatible = "auo,b101ean01",
+ .data = &auo_b101ean01,
+ }, {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
@@ -826,6 +1041,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "hit,tx23d38vm0caa",
.data = &hitachi_tx23d38vm0caa
}, {
+ .compatible = "innolux,at043tn24",
+ .data = &innolux_at043tn24,
+ }, {
.compatible ="innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
@@ -835,12 +1053,24 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
+ .compatible = "innolux,zj070na-01p",
+ .data = &innolux_zj070na_01p,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
+ .compatible = "ortustech,com43h4m85ulc",
+ .data = &ortustech_com43h4m85ulc,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
+ .compatible = "samsung,ltn140at29-301",
+ .data = &samsung_ltn140at29_301,
+ }, {
+ .compatible = "shelly,sca07010-bfn-lnn",
+ .data = &shelly_sca07010_bfn_lnn,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1d9b80c91a15..e2d07085b6a5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -102,7 +102,7 @@ static int qxl_drm_freeze(struct drm_device *dev)
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc->enabled)
(*crtc_funcs->disable)(crtc);
}
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 970f8e92dbb7..421ae130809b 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,3 +1,11 @@
+config DRM_RADEON_USERPTR
+ bool "Always enable userptr support"
+ depends on DRM_RADEON
+ select MMU_NOTIFIER
+ help
+ This option selects CONFIG_MMU_NOTIFIER if it isn't already
+ selected to enabled full userptr support.
+
config DRM_RADEON_UMS
bool "Enable userspace modesetting on radeon (DEPRECATED)"
depends on DRM_RADEON
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 4605633e253b..dea53e36a2ef 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -81,7 +81,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o \
- radeon_sync.o radeon_audio.o
+ radeon_sync.o radeon_audio.o radeon_dp_auxch.o radeon_dp_mst.o
radeon-$(CONFIG_MMU_NOTIFIER) += radeon_mn.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 86807ee91bd1..42b2ea3fdcf3 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
@@ -576,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
else
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ /* if there is no audio, set MINM_OVER_MAXP */
+ if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
@@ -606,6 +613,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
}
+ if (radeon_encoder->is_mst_encoder) {
+ struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ }
+
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (radeon_crtc->ss_enabled) {
@@ -952,7 +966,9 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
radeon_crtc->bpc = 8;
radeon_crtc->ss_enabled = false;
- if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ if (radeon_encoder->is_mst_encoder) {
+ radeon_dp_mst_prepare_pll(crtc, mode);
+ } else if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
@@ -2069,6 +2085,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
radeon_crtc->connector = NULL;
return false;
}
+ if (radeon_crtc->encoder) {
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+
+ radeon_crtc->output_csc = radeon_encoder->output_csc;
+ }
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8d74de82456e..3e3290c203c6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -158,7 +158,7 @@ done:
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
static ssize_t
-radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct radeon_i2c_chan *chan =
container_of(aux, struct radeon_i2c_chan, aux);
@@ -226,11 +226,20 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
{
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
int ret;
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
- radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer;
+ if (ASIC_IS_DCE5(rdev)) {
+ if (radeon_auxch)
+ radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_native;
+ else
+ radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
+ } else {
+ radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
+ }
ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
if (!ret)
@@ -301,8 +310,8 @@ static int dp_get_max_dp_pix_clock(int link_rate,
/***** radeon specific DP functions *****/
-static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
- u8 dpcd[DP_DPCD_SIZE])
+int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+ u8 dpcd[DP_DPCD_SIZE])
{
int max_link_rate;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c39c1d0d9d4e..dd39f434b4a7 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -671,7 +671,15 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
+ struct radeon_encoder_atom_dig *dig_enc;
+ if (radeon_encoder_is_digital(encoder)) {
+ dig_enc = radeon_encoder->enc_priv;
+ if (dig_enc->active_mst_links)
+ return ATOM_ENCODER_MODE_DP_MST;
+ }
+ if (radeon_encoder->is_mst_encoder || radeon_encoder->offset)
+ return ATOM_ENCODER_MODE_DP_MST;
/* dp bridges are always DP */
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
return ATOM_ENCODER_MODE_DP;
@@ -823,7 +831,7 @@ union dig_encoder_control {
};
void
-atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -920,7 +928,10 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
- args.v3.acConfig.ucDigSel = dig->dig_encoder;
+ if (enc_override != -1)
+ args.v3.acConfig.ucDigSel = enc_override;
+ else
+ args.v3.acConfig.ucDigSel = dig->dig_encoder;
args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
break;
case 4:
@@ -948,7 +959,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
}
- args.v4.acConfig.ucDigSel = dig->dig_encoder;
+
+ if (enc_override != -1)
+ args.v4.acConfig.ucDigSel = enc_override;
+ else
+ args.v4.acConfig.ucDigSel = dig->dig_encoder;
args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
if (hpd_id == RADEON_HPD_NONE)
args.v4.ucHPD_ID = 0;
@@ -969,6 +984,12 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
}
+void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+{
+ atombios_dig_encoder_setup2(encoder, action, panel_mode, -1);
+}
+
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
@@ -978,7 +999,7 @@ union dig_transmitter_control {
};
void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set, int fe)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -1328,7 +1349,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v5.asConfig.ucHPDSel = 0;
else
args.v5.asConfig.ucHPDSel = hpd_id + 1;
- args.v5.ucDigEncoderSel = 1 << dig_encoder;
+ args.v5.ucDigEncoderSel = (fe != -1) ? (1 << fe) : (1 << dig_encoder);
args.v5.ucDPLaneSet = lane_set;
break;
default:
@@ -1344,6 +1365,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+{
+ atombios_dig_transmitter_setup2(encoder, action, lane_num, lane_set, -1);
+}
+
bool
atombios_set_edp_panel_power(struct drm_connector *connector, int action)
{
@@ -1687,6 +1714,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+
+ /* don't power off encoders with active MST links */
+ if (dig->active_mst_links)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1729,17 +1761,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int encoder_mode = atombios_get_encoder_mode(encoder);
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
- if (connector && (radeon_audio != 0) &&
+ if ((radeon_audio != 0) &&
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
- (ENCODER_MODE_IS_DP(encoder_mode) &&
- drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+ ENCODER_MODE_IS_DP(encoder_mode)))
radeon_audio_dpms(encoder, mode);
switch (radeon_encoder->encoder_id) {
@@ -1955,6 +1985,53 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
+void
+atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, int fe)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+ uint8_t frev, crev;
+ union crtc_source_param args;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ if (frev != 1 && crev != 2)
+ DRM_ERROR("Unknown table for MST %d, %d\n", frev, crev);
+
+ args.v2.ucCRTC = radeon_crtc->crtc_id;
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_DP_MST;
+
+ switch (fe) {
+ case 0:
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case 1:
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case 2:
+ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case 3:
+ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case 4:
+ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case 5:
+ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ case 6:
+ args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
+ break;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
static void
atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct drm_display_mode *mode)
@@ -2003,7 +2080,14 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
}
-static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx)
+{
+ if (enc_idx < 0)
+ return;
+ rdev->mode_info.active_encoders &= ~(1 << enc_idx);
+}
+
+int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -2012,71 +2096,79 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t dig_enc_in_use = 0;
+ int enc_idx = -1;
+ if (fe_idx >= 0) {
+ enc_idx = fe_idx;
+ goto assigned;
+ }
if (ASIC_IS_DCE6(rdev)) {
/* DCE6 */
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
- return 1;
+ enc_idx = 1;
else
- return 0;
+ enc_idx = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
- return 3;
+ enc_idx = 3;
else
- return 2;
+ enc_idx = 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
- return 5;
+ enc_idx = 5;
else
- return 4;
+ enc_idx = 4;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return 6;
+ enc_idx = 6;
break;
}
+ goto assigned;
} else if (ASIC_IS_DCE4(rdev)) {
/* DCE4/5 */
if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
/* ontario follows DCE4 */
if (rdev->family == CHIP_PALM) {
if (dig->linkb)
- return 1;
+ enc_idx = 1;
else
- return 0;
+ enc_idx = 0;
} else
/* llano follows DCE3.2 */
- return radeon_crtc->crtc_id;
+ enc_idx = radeon_crtc->crtc_id;
} else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
- return 1;
+ enc_idx = 1;
else
- return 0;
+ enc_idx = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
- return 3;
+ enc_idx = 3;
else
- return 2;
+ enc_idx = 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
- return 5;
+ enc_idx = 5;
else
- return 4;
+ enc_idx = 4;
break;
}
}
+ goto assigned;
}
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
- return radeon_crtc->crtc_id;
+ enc_idx = radeon_crtc->crtc_id;
+ goto assigned;
}
/* on DCE3 - LVTMA can only be driven by DIGB */
@@ -2104,6 +2196,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
if (!(dig_enc_in_use & 1))
return 0;
return 1;
+
+assigned:
+ if (enc_idx == -1) {
+ DRM_ERROR("Got encoder index incorrect - returning 0\n");
+ return 0;
+ }
+ if (rdev->mode_info.active_encoders & (1 << enc_idx)) {
+ DRM_ERROR("chosen encoder in use %d\n", enc_idx);
+ }
+ rdev->mode_info.active_encoders |= (1 << enc_idx);
+ return enc_idx;
}
/* This only needs to be called once at startup */
@@ -2362,7 +2465,9 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (dig) {
- dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ if (dig->dig_encoder >= 0)
+ radeon_atom_release_dig_encoder(rdev, dig->dig_encoder);
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder, -1);
if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
if (rdev->family >= CHIP_R600)
dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
@@ -2464,10 +2569,18 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
disable_done:
if (radeon_encoder_is_digital(encoder)) {
- dig = radeon_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- radeon_encoder->active_device = 0;
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ if (rdev->asic->display.hdmi_enable)
+ radeon_hdmi_enable(rdev, encoder, false);
+ }
+ if (atombios_get_encoder_mode(encoder) != ATOM_ENCODER_MODE_DP_MST) {
+ dig = radeon_encoder->enc_priv;
+ radeon_atom_release_dig_encoder(rdev, dig->dig_encoder);
+ dig->dig_encoder = -1;
+ radeon_encoder->active_device = 0;
+ }
+ } else
+ radeon_encoder->active_device = 0;
}
/* these are handled by the primary encoders */
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index db08f17be76b..69556f5e247e 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2751,13 +2751,54 @@ void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
else /* current_index == 2 */
pl = &ps->high;
seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- if (rdev->family >= CHIP_CEDAR) {
- seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
- current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
- } else {
- seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
- current_index, pl->sclk, pl->mclk, pl->vddc);
- }
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ }
+}
+
+u32 btc_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->sclk;
+ }
+}
+
+u32 btc_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->mclk;
}
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index bcd2f1fe803f..8730562323a8 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5922,6 +5922,20 @@ void ci_dpm_print_power_state(struct radeon_device *rdev,
r600_dpm_print_ps_status(rdev, rps);
}
+u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ u32 sclk = ci_get_average_sclk_freq(rdev);
+
+ return sclk;
+}
+
+u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ u32 mclk = ci_get_average_mclk_freq(rdev);
+
+ return mclk;
+}
+
u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct ci_power_info *pi = ci_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 3e670d344a20..a0c35bbc8546 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -141,6 +141,39 @@ static void cik_fini_cg(struct radeon_device *rdev);
static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
+/**
+ * cik_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int cik_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS2:
+ case GRBM_STATUS_SE0:
+ case GRBM_STATUS_SE1:
+ case GRBM_STATUS_SE2:
+ case GRBM_STATUS_SE3:
+ case SRBM_STATUS:
+ case SRBM_STATUS2:
+ case (SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET):
+ case (SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET):
+ case UVD_STATUS:
+ /* TODO VCE */
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/* get temperature in millidegrees */
int ci_get_temp(struct radeon_device *rdev)
{
@@ -5789,7 +5822,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
L2_CACHE_BIGK_FRAGMENT_SIZE(4));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
@@ -5804,7 +5837,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
/* restore context1-15 */
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
@@ -7394,12 +7427,12 @@ int cik_irq_set(struct radeon_device *rdev)
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
- hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -7486,27 +7519,27 @@ int cik_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("cik_irq_set: hpd 1\n");
- hpd1 |= DC_HPDx_INT_EN;
+ hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("cik_irq_set: hpd 2\n");
- hpd2 |= DC_HPDx_INT_EN;
+ hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("cik_irq_set: hpd 3\n");
- hpd3 |= DC_HPDx_INT_EN;
+ hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("cik_irq_set: hpd 4\n");
- hpd4 |= DC_HPDx_INT_EN;
+ hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("cik_irq_set: hpd 5\n");
- hpd5 |= DC_HPDx_INT_EN;
+ hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("cik_irq_set: hpd 6\n");
- hpd6 |= DC_HPDx_INT_EN;
+ hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
@@ -7678,6 +7711,36 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
+ if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
}
/**
@@ -7803,6 +7866,7 @@ int cik_irq_process(struct radeon_device *rdev)
u8 me_id, pipe_id, queue_id;
u32 ring_index;
bool queue_hotplug = false;
+ bool queue_dp = false;
bool queue_reset = false;
u32 addr, status, mc_client;
bool queue_thermal = false;
@@ -8048,6 +8112,48 @@ restart_ih:
DRM_DEBUG("IH: HPD6\n");
}
break;
+ case 6:
+ if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+ }
+ break;
+ case 7:
+ if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+ }
+ break;
+ case 8:
+ if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+ }
+ break;
+ case 9:
+ if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+ }
+ break;
+ case 10:
+ if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+ }
+ break;
+ case 11:
+ if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -8256,6 +8362,8 @@ restart_ih:
rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
}
+ if (queue_dp)
+ schedule_work(&rdev->dp_work);
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_reset) {
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 243a36c93b8f..0089d837a8e3 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2088,6 +2088,8 @@
# define CLK_OD(x) ((x) << 6)
# define CLK_OD_MASK (0x1f << 6)
+#define UVD_STATUS 0xf6bc
+
/* UVD clocks */
#define CG_DCLK_CNTL 0xC050009C
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 3adc2afe32aa..68fd9fc677e3 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
}
}
-
-void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- if (!dig || !dig->afmt)
- return;
-
- if (enable) {
- WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
- EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
- EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
- EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
- EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
- EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
- } else {
- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
- }
-
- dig->afmt->enabled = enable;
-}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 973df064c14f..05e6d6ef5963 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1006,6 +1006,34 @@ static void evergreen_init_golden_registers(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int evergreen_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS_SE0:
+ case GRBM_STATUS_SE1:
+ case SRBM_STATUS:
+ case SRBM_STATUS2:
+ case DMA_STATUS_REG:
+ case UVD_STATUS:
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split)
@@ -2457,7 +2485,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -4392,12 +4420,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0;
}
- hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
if (rdev->family == CHIP_ARUBA)
thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
@@ -4486,27 +4514,27 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("evergreen_irq_set: hpd 1\n");
- hpd1 |= DC_HPDx_INT_EN;
+ hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("evergreen_irq_set: hpd 2\n");
- hpd2 |= DC_HPDx_INT_EN;
+ hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("evergreen_irq_set: hpd 3\n");
- hpd3 |= DC_HPDx_INT_EN;
+ hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("evergreen_irq_set: hpd 4\n");
- hpd4 |= DC_HPDx_INT_EN;
+ hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("evergreen_irq_set: hpd 5\n");
- hpd5 |= DC_HPDx_INT_EN;
+ hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
- hpd6 |= DC_HPDx_INT_EN;
+ hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.afmt[0]) {
DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
@@ -4700,6 +4728,38 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
+
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+
if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
@@ -4780,6 +4840,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 ring_index;
bool queue_hotplug = false;
bool queue_hdmi = false;
+ bool queue_dp = false;
bool queue_thermal = false;
u32 status, addr;
@@ -5019,6 +5080,48 @@ restart_ih:
DRM_DEBUG("IH: HPD6\n");
}
break;
+ case 6:
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+ }
+ break;
+ case 7:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+ }
+ break;
+ case 8:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+ }
+ break;
+ case 9:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+ }
+ break;
+ case 10:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+ }
+ break;
+ case 11:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -5151,6 +5254,8 @@ restart_ih:
rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
}
+ if (queue_dp)
+ schedule_work(&rdev->dp_work);
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_hdmi)
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index c18d4ecbd95d..0926739c9fa7 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
WREG32(AFMT_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
- WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
-
WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
- HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
- ~HDMI_AVI_INFO_LINE_MASK);
+ HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
+ ~HDMI_AVI_INFO_LINE_MASK);
}
void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
@@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
AFMT_AUDIO_CHANNEL_ENABLE(0xff));
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
/* allow 60958 channel status and send audio packets fields to be updated */
- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
}
@@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
return;
if (enable) {
- WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
- HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-
- WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
- HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ AFMT_AUDIO_SAMPLE_SEND);
+ } else {
+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ ~AFMT_AUDIO_SAMPLE_SEND);
+ }
} else {
+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ ~AFMT_AUDIO_SAMPLE_SEND);
WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
}
@@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
if (!dig || !dig->afmt)
return;
- if (enable) {
+ if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
uint32_t val;
+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ AFMT_AUDIO_SAMPLE_SEND);
+
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
- if (radeon_connector->con_priv) {
+ if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) {
dig_connector = radeon_connector->con_priv;
val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
@@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
} else {
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ ~AFMT_AUDIO_SAMPLE_SEND);
}
dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a8d1d5240fcb..4aa5f755572b 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1520,6 +1520,7 @@
#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_STATUS 0xf6bc
/*
* PM4
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 0e236d067d66..2d71da448487 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2820,6 +2820,29 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
}
}
+u32 kv_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 current_index =
+ (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
+ CURR_SCLK_INDEX_SHIFT;
+ u32 sclk;
+
+ if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
+ return 0;
+ } else {
+ sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
+ return sclk;
+ }
+}
+
+u32 kv_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
void kv_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *rps)
{
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index dab00812abaa..aba2f428c0a8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -828,6 +828,35 @@ out:
return err;
}
+/**
+ * cayman_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int cayman_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS_SE0:
+ case GRBM_STATUS_SE1:
+ case SRBM_STATUS:
+ case SRBM_STATUS2:
+ case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
+ case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
+ case UVD_STATUS:
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
int tn_get_temp(struct radeon_device *rdev)
{
u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
@@ -1253,7 +1282,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
@@ -1272,7 +1301,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
*/
for (i = 1; i < 8; i++) {
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
+ rdev->vm_manager.max_pfn - 1);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->vm_manager.saved_table_addr[i]);
}
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 7bc9f8d9804a..c3d531a1114b 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -4319,6 +4319,42 @@ void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
}
}
+u32 ni_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct ni_ps *ps = ni_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ return 0;
+ } else {
+ pl = &ps->performance_levels[current_index];
+ return pl->sclk;
+ }
+}
+
+u32 ni_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct ni_ps *ps = ni_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ return 0;
+ } else {
+ pl = &ps->performance_levels[current_index];
+ return pl->mclk;
+ }
+}
+
u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index 5db7b7d6feb0..da310a70c0f0 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -83,4 +83,48 @@
# define NI_REGAMMA_PROG_B 4
# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
+#define NI_DP_MSE_LINK_TIMING 0x73a0
+# define NI_DP_MSE_LINK_FRAME (((x) & 0x3ff) << 0)
+# define NI_DP_MSE_LINK_LINE (((x) & 0x3) << 16)
+
+#define NI_DP_MSE_MISC_CNTL 0x736c
+# define NI_DP_MSE_BLANK_CODE (((x) & 0x1) << 0)
+# define NI_DP_MSE_TIMESTAMP_MODE (((x) & 0x1) << 4)
+# define NI_DP_MSE_ZERO_ENCODER (((x) & 0x1) << 8)
+
+#define NI_DP_MSE_RATE_CNTL 0x7384
+# define NI_DP_MSE_RATE_Y(x) (((x) & 0x3ffffff) << 0)
+# define NI_DP_MSE_RATE_X(x) (((x) & 0x3f) << 26)
+
+#define NI_DP_MSE_RATE_UPDATE 0x738c
+
+#define NI_DP_MSE_SAT0 0x7390
+# define NI_DP_MSE_SAT_SRC0(x) (((x) & 0x7) << 0)
+# define NI_DP_MSE_SAT_SLOT_COUNT0(x) (((x) & 0x3f) << 8)
+# define NI_DP_MSE_SAT_SRC1(x) (((x) & 0x7) << 16)
+# define NI_DP_MSE_SAT_SLOT_COUNT1(x) (((x) & 0x3f) << 24)
+
+#define NI_DP_MSE_SAT1 0x7394
+
+#define NI_DP_MSE_SAT2 0x7398
+
+#define NI_DP_MSE_SAT_UPDATE 0x739c
+
+#define NI_DIG_BE_CNTL 0x7140
+# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
+# define NI_DIG_FE_DIG_MODE(x) (((x) & 0x7) << 16)
+# define NI_DIG_MODE_DP_SST 0
+# define NI_DIG_MODE_LVDS 1
+# define NI_DIG_MODE_TMDS_DVI 2
+# define NI_DIG_MODE_TMDS_HDMI 3
+# define NI_DIG_MODE_DP_MST 5
+# define NI_DIG_HPD_SELECT(x) (((x) & 0x7) << 28)
+
+#define NI_DIG_FE_CNTL 0x7000
+# define NI_DIG_SOURCE_SELECT(x) (((x) & 0x3) << 0)
+# define NI_DIG_STEREOSYNC_SELECT(x) (((x) & 0x3) << 4)
+# define NI_DIG_STEREOSYNC_GATE_EN(x) (((x) & 0x1) << 8)
+# define NI_DIG_DUAL_LINK_ENABLE(x) (((x) & 0x1) << 16)
+# define NI_DIG_SWAP(x) (((x) & 0x1) << 18)
+# define NI_DIG_SYMCLK_FE_ON (0x1 << 24)
#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 6b44580440d0..3b290838918c 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -816,6 +816,52 @@
#define MC_PMG_CMD_MRS2 0x2b5c
#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
+#define AUX_CONTROL 0x6200
+#define AUX_EN (1 << 0)
+#define AUX_LS_READ_EN (1 << 8)
+#define AUX_LS_UPDATE_DISABLE(x) (((x) & 0x1) << 12)
+#define AUX_HPD_DISCON(x) (((x) & 0x1) << 16)
+#define AUX_DET_EN (1 << 18)
+#define AUX_HPD_SEL(x) (((x) & 0x7) << 20)
+#define AUX_IMPCAL_REQ_EN (1 << 24)
+#define AUX_TEST_MODE (1 << 28)
+#define AUX_DEGLITCH_EN (1 << 29)
+#define AUX_SW_CONTROL 0x6204
+#define AUX_SW_GO (1 << 0)
+#define AUX_LS_READ_TRIG (1 << 2)
+#define AUX_SW_START_DELAY(x) (((x) & 0xf) << 4)
+#define AUX_SW_WR_BYTES(x) (((x) & 0x1f) << 16)
+
+#define AUX_SW_INTERRUPT_CONTROL 0x620c
+#define AUX_SW_DONE_INT (1 << 0)
+#define AUX_SW_DONE_ACK (1 << 1)
+#define AUX_SW_DONE_MASK (1 << 2)
+#define AUX_SW_LS_DONE_INT (1 << 4)
+#define AUX_SW_LS_DONE_MASK (1 << 6)
+#define AUX_SW_STATUS 0x6210
+#define AUX_SW_DONE (1 << 0)
+#define AUX_SW_REQ (1 << 1)
+#define AUX_SW_RX_TIMEOUT_STATE(x) (((x) & 0x7) << 4)
+#define AUX_SW_RX_TIMEOUT (1 << 7)
+#define AUX_SW_RX_OVERFLOW (1 << 8)
+#define AUX_SW_RX_HPD_DISCON (1 << 9)
+#define AUX_SW_RX_PARTIAL_BYTE (1 << 10)
+#define AUX_SW_NON_AUX_MODE (1 << 11)
+#define AUX_SW_RX_MIN_COUNT_VIOL (1 << 12)
+#define AUX_SW_RX_INVALID_STOP (1 << 14)
+#define AUX_SW_RX_SYNC_INVALID_L (1 << 17)
+#define AUX_SW_RX_SYNC_INVALID_H (1 << 18)
+#define AUX_SW_RX_INVALID_START (1 << 19)
+#define AUX_SW_RX_RECV_NO_DET (1 << 20)
+#define AUX_SW_RX_RECV_INVALID_H (1 << 22)
+#define AUX_SW_RX_RECV_INVALID_V (1 << 23)
+
+#define AUX_SW_DATA 0x6218
+#define AUX_SW_DATA_RW (1 << 0)
+#define AUX_SW_DATA_MASK(x) (((x) & 0xff) << 8)
+#define AUX_SW_DATA_INDEX(x) (((x) & 0x1f) << 16)
+#define AUX_SW_AUTOINCREMENT_DISABLE (1 << 31)
+
#define LB_SYNC_RESET_SEL 0x6b28
#define LB_SYNC_RESET_SEL_MASK (3 << 0)
#define LB_SYNC_RESET_SEL_SHIFT 0
@@ -1086,6 +1132,7 @@
#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
#define UVD_RBC_RB_RPTR 0xF690
#define UVD_RBC_RB_WPTR 0xF694
+#define UVD_STATUS 0xf6bc
/*
* PM4
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2fcad344492f..25b4ac967742 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -109,6 +109,32 @@ extern int evergreen_rlc_resume(struct radeon_device *rdev);
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
/**
+ * r600_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int r600_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS2:
+ case R_000E50_SRBM_STATUS:
+ case DMA_STATUS_REG:
+ case UVD_STATUS:
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
* r600_get_xclk - get the xclk
*
* @rdev: radeon_device pointer
@@ -1086,7 +1112,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index dd6606b8e23c..e85894ade95c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
WREG32(HDMI0_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
+
WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
- WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
- HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 33d5a4f4eebd..46eb0fa75a61 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -111,6 +111,8 @@ extern int radeon_deep_color;
extern int radeon_use_pflipirq;
extern int radeon_bapm;
extern int radeon_backlight;
+extern int radeon_auxch;
+extern int radeon_mst;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -505,7 +507,7 @@ struct radeon_bo {
pid_t pid;
struct radeon_mn *mn;
- struct interval_tree_node mn_it;
+ struct list_head mn_list;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -1671,7 +1673,6 @@ struct radeon_uvd {
struct radeon_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
- void *saved_bo;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
unsigned img_size[RADEON_MAX_UVD_HANDLES];
@@ -1857,6 +1858,8 @@ struct radeon_asic {
u32 (*get_xclk)(struct radeon_device *rdev);
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
+ /* get register for info ioctl */
+ int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val);
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
@@ -1985,6 +1988,8 @@ struct radeon_asic {
u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
+ u32 (*get_current_sclk)(struct radeon_device *rdev);
+ u32 (*get_current_mclk)(struct radeon_device *rdev);
} dpm;
/* pageflipping */
struct {
@@ -2408,6 +2413,7 @@ struct radeon_device {
struct radeon_rlc rlc;
struct radeon_mec mec;
struct work_struct hotplug_work;
+ struct work_struct dp_work;
struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
@@ -2932,6 +2938,7 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
+#define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v))
#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
@@ -2950,6 +2957,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
+#define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev))
+#define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev))
/* Common functions */
/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index c0ecd128b14b..8dbf5083c4ff 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -136,6 +136,11 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
}
}
+static int radeon_invalid_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ return -EINVAL;
+}
/* helper to disable agp */
/**
@@ -199,6 +204,7 @@ static struct radeon_asic r100_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
@@ -266,6 +272,7 @@ static struct radeon_asic r200_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
@@ -361,6 +368,7 @@ static struct radeon_asic r300_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
@@ -428,6 +436,7 @@ static struct radeon_asic r300_asic_pcie = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
@@ -495,6 +504,7 @@ static struct radeon_asic r420_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
@@ -562,6 +572,7 @@ static struct radeon_asic rs400_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
@@ -629,6 +640,7 @@ static struct radeon_asic rs600_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -696,6 +708,7 @@ static struct radeon_asic rs690_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
@@ -763,6 +776,7 @@ static struct radeon_asic rv515_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
@@ -830,6 +844,7 @@ static struct radeon_asic r520_asic = {
.mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
+ .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
@@ -925,6 +940,7 @@ static struct radeon_asic r600_asic = {
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1009,6 +1025,7 @@ static struct radeon_asic rv6xx_asic = {
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1080,6 +1097,8 @@ static struct radeon_asic rv6xx_asic = {
.print_power_state = &rv6xx_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv6xx_dpm_force_performance_level,
+ .get_current_sclk = &rv6xx_dpm_get_current_sclk,
+ .get_current_mclk = &rv6xx_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &rs600_page_flip,
@@ -1099,6 +1118,7 @@ static struct radeon_asic rs780_asic = {
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1170,6 +1190,8 @@ static struct radeon_asic rs780_asic = {
.print_power_state = &rs780_dpm_print_power_state,
.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rs780_dpm_force_performance_level,
+ .get_current_sclk = &rs780_dpm_get_current_sclk,
+ .get_current_mclk = &rs780_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &rs600_page_flip,
@@ -1180,7 +1202,7 @@ static struct radeon_asic rs780_asic = {
static struct radeon_asic_ring rv770_uvd_ring = {
.ib_execute = &uvd_v1_0_ib_execute,
.emit_fence = &uvd_v2_2_fence_emit,
- .emit_semaphore = &uvd_v1_0_semaphore_emit,
+ .emit_semaphore = &uvd_v2_2_semaphore_emit,
.cs_parse = &radeon_uvd_cs_parse,
.ring_test = &uvd_v1_0_ring_test,
.ib_test = &uvd_v1_0_ib_test,
@@ -1202,6 +1224,7 @@ static struct radeon_asic rv770_asic = {
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1274,6 +1297,8 @@ static struct radeon_asic rv770_asic = {
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &rv770_dpm_vblank_too_short,
+ .get_current_sclk = &rv770_dpm_get_current_sclk,
+ .get_current_mclk = &rv770_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &rv770_page_flip,
@@ -1319,6 +1344,7 @@ static struct radeon_asic evergreen_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1391,6 +1417,8 @@ static struct radeon_asic evergreen_asic = {
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &cypress_dpm_vblank_too_short,
+ .get_current_sclk = &rv770_dpm_get_current_sclk,
+ .get_current_mclk = &rv770_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1410,6 +1438,7 @@ static struct radeon_asic sumo_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1481,6 +1510,8 @@ static struct radeon_asic sumo_asic = {
.print_power_state = &sumo_dpm_print_power_state,
.debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level,
.force_performance_level = &sumo_dpm_force_performance_level,
+ .get_current_sclk = &sumo_dpm_get_current_sclk,
+ .get_current_mclk = &sumo_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1500,6 +1531,7 @@ static struct radeon_asic btc_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1572,6 +1604,8 @@ static struct radeon_asic btc_asic = {
.debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
+ .get_current_sclk = &btc_dpm_get_current_sclk,
+ .get_current_mclk = &btc_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1634,6 +1668,7 @@ static struct radeon_asic cayman_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = cayman_get_allowed_info_register,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1717,6 +1752,8 @@ static struct radeon_asic cayman_asic = {
.debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level,
.force_performance_level = &ni_dpm_force_performance_level,
.vblank_too_short = &ni_dpm_vblank_too_short,
+ .get_current_sclk = &ni_dpm_get_current_sclk,
+ .get_current_mclk = &ni_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1736,6 +1773,7 @@ static struct radeon_asic trinity_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = cayman_get_allowed_info_register,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1819,6 +1857,8 @@ static struct radeon_asic trinity_asic = {
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
.force_performance_level = &trinity_dpm_force_performance_level,
.enable_bapm = &trinity_dpm_enable_bapm,
+ .get_current_sclk = &trinity_dpm_get_current_sclk,
+ .get_current_mclk = &trinity_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -1868,6 +1908,7 @@ static struct radeon_asic si_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &si_get_xclk,
.get_gpu_clock_counter = &si_get_gpu_clock_counter,
+ .get_allowed_info_register = si_get_allowed_info_register,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -1955,6 +1996,8 @@ static struct radeon_asic si_asic = {
.fan_ctrl_get_mode = &si_fan_ctrl_get_mode,
.get_fan_speed_percent = &si_fan_ctrl_get_fan_speed_percent,
.set_fan_speed_percent = &si_fan_ctrl_set_fan_speed_percent,
+ .get_current_sclk = &si_dpm_get_current_sclk,
+ .get_current_mclk = &si_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -2032,6 +2075,7 @@ static struct radeon_asic ci_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
+ .get_allowed_info_register = cik_get_allowed_info_register,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -2123,6 +2167,8 @@ static struct radeon_asic ci_asic = {
.fan_ctrl_get_mode = &ci_fan_ctrl_get_mode,
.get_fan_speed_percent = &ci_fan_ctrl_get_fan_speed_percent,
.set_fan_speed_percent = &ci_fan_ctrl_set_fan_speed_percent,
+ .get_current_sclk = &ci_dpm_get_current_sclk,
+ .get_current_mclk = &ci_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
@@ -2142,6 +2188,7 @@ static struct radeon_asic kv_asic = {
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
+ .get_allowed_info_register = cik_get_allowed_info_register,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
@@ -2229,6 +2276,8 @@ static struct radeon_asic kv_asic = {
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,
+ .get_current_sclk = &kv_dpm_get_current_sclk,
+ .get_current_mclk = &kv_dpm_get_current_mclk,
},
.pflip = {
.page_flip = &evergreen_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 72bdd3bf0d8e..a3ca8cd305c5 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -384,6 +384,8 @@ u32 r600_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r600_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
+int r600_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_init(struct radeon_device *rdev);
@@ -433,6 +435,8 @@ void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
struct seq_file *m);
int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev);
/* rs780 dpm */
int rs780_dpm_init(struct radeon_device *rdev);
int rs780_dpm_enable(struct radeon_device *rdev);
@@ -449,6 +453,8 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
struct seq_file *m);
int rs780_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -488,6 +494,8 @@ void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
int rv770_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev);
+u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev);
/*
* evergreen
@@ -540,6 +548,8 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv);
int evergreen_get_temp(struct radeon_device *rdev);
+int evergreen_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
int sumo_get_temp(struct radeon_device *rdev);
int tn_get_temp(struct radeon_device *rdev);
int cypress_dpm_init(struct radeon_device *rdev);
@@ -563,6 +573,8 @@ u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
+u32 btc_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 btc_dpm_get_current_mclk(struct radeon_device *rdev);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
int sumo_dpm_late_enable(struct radeon_device *rdev);
@@ -581,6 +593,8 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
struct seq_file *m);
int sumo_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev);
/*
* cayman
@@ -637,6 +651,8 @@ uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cayman_dma_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
+int cayman_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
int ni_dpm_init(struct radeon_device *rdev);
void ni_dpm_setup_asic(struct radeon_device *rdev);
@@ -655,6 +671,8 @@ void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
int ni_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
+u32 ni_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 ni_dpm_get_current_mclk(struct radeon_device *rdev);
int trinity_dpm_init(struct radeon_device *rdev);
int trinity_dpm_enable(struct radeon_device *rdev);
int trinity_dpm_late_enable(struct radeon_device *rdev);
@@ -674,6 +692,8 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
int trinity_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
+u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -726,6 +746,8 @@ u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int si_get_temp(struct radeon_device *rdev);
+int si_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
int si_dpm_init(struct radeon_device *rdev);
void si_dpm_setup_asic(struct radeon_device *rdev);
int si_dpm_enable(struct radeon_device *rdev);
@@ -746,6 +768,8 @@ int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
u32 speed);
u32 si_fan_ctrl_get_mode(struct radeon_device *rdev);
void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode);
+u32 si_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 si_dpm_get_current_mclk(struct radeon_device *rdev);
/* DCE8 - CIK */
void dce8_bandwidth_update(struct radeon_device *rdev);
@@ -841,6 +865,8 @@ void cik_sdma_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
int ci_get_temp(struct radeon_device *rdev);
int kv_get_temp(struct radeon_device *rdev);
+int cik_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
int ci_dpm_init(struct radeon_device *rdev);
int ci_dpm_enable(struct radeon_device *rdev);
@@ -862,6 +888,8 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+u32 ci_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 ci_dpm_get_current_mclk(struct radeon_device *rdev);
int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
u32 *speed);
@@ -890,6 +918,8 @@ int kv_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
+u32 kv_dpm_get_current_sclk(struct radeon_device *rdev);
+u32 kv_dpm_get_current_mclk(struct radeon_device *rdev);
/* uvd v1.0 */
uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
@@ -919,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int uvd_v2_2_resume(struct radeon_device *rdev);
void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
/* uvd v3.1 */
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fc1b3f34cf18..8f285244c839 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -845,6 +845,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
radeon_link_encoder_connector(dev);
+ radeon_setup_mst_connector(dev);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index b21ef69a34ac..dcb779647c57 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
-void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
static const u32 pin_offsets[7] =
{
@@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
.set_avi_packet = evergreen_set_avi_packet,
.set_audio_packet = dce4_set_audio_packet,
.mode_set = radeon_audio_dp_mode_set,
- .dpms = dce6_dp_enable,
+ .dpms = evergreen_dp_enable,
};
static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -461,30 +460,37 @@ void radeon_audio_detect(struct drm_connector *connector,
if (!connector || !connector->encoder)
return;
+ if (!radeon_encoder_is_digital(connector->encoder))
+ return;
+
rdev = connector->encoder->dev->dev_private;
+
+ if (!radeon_audio_chipset_supported(rdev))
+ return;
+
radeon_encoder = to_radeon_encoder(connector->encoder);
dig = radeon_encoder->enc_priv;
- if (status == connector_status_connected) {
- struct radeon_connector *radeon_connector;
- int sink_type;
-
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- radeon_encoder->audio = NULL;
- return;
- }
+ if (!dig->afmt)
+ return;
- radeon_connector = to_radeon_connector(connector);
- sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (status == connector_status_connected) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
- sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ radeon_dp_getsinktype(radeon_connector) ==
+ CONNECTOR_OBJECT_ID_DISPLAYPORT)
radeon_encoder->audio = rdev->audio.dp_funcs;
else
radeon_encoder->audio = rdev->audio.hdmi_funcs;
dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ } else {
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
+ dig->afmt->pin = NULL;
+ }
} else {
radeon_audio_enable(rdev, dig->afmt->pin, 0);
dig->afmt->pin = NULL;
@@ -520,16 +526,40 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
int err;
+ list_for_each_entry(connector,
+ &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return -ENOENT;
+ }
+
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %d\n", err);
return err;
}
+ if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+ if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+ else
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+ } else {
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ }
+
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
DRM_ERROR("failed to pack AVI infoframe: %d\n", err);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 27def67cb6be..d17d251dbd4f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -27,6 +27,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_mst_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_audio.h"
@@ -34,12 +35,33 @@
#include <linux/pm_runtime.h>
+static int radeon_dp_handle_hpd(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_dp_mst_check_status(radeon_connector);
+ if (ret == -EINVAL)
+ return 1;
+ return 0;
+}
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ if (radeon_connector->is_mst_connector)
+ return;
+ if (dig_connector->is_mst) {
+ radeon_dp_handle_hpd(connector);
+ return;
+ }
+ }
/* bail if the connector does not have hpd pin, e.g.,
* VGA, TV, etc.
*/
@@ -135,7 +157,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
- struct drm_connector_helper_funcs *connector_funcs =
+ const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -225,7 +247,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *best_encoder = NULL;
struct drm_encoder *encoder = NULL;
- struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
int i;
@@ -702,7 +724,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
if (connector->encoder)
radeon_encoder = to_radeon_encoder(connector->encoder);
else {
- struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
}
@@ -725,6 +747,30 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
radeon_property_change_mode(&radeon_encoder->base);
}
+ if (property == rdev->mode_info.output_csc_property) {
+ if (connector->encoder)
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ else {
+ const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+ }
+
+ if (radeon_encoder->output_csc == val)
+ return 0;
+
+ radeon_encoder->output_csc = val;
+
+ if (connector->encoder->crtc) {
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ radeon_crtc->output_csc = radeon_encoder->output_csc;
+
+ (*crtc_funcs->load_lut)(crtc);
+ }
+ }
+
return 0;
}
@@ -896,7 +942,7 @@ static int radeon_lvds_set_property(struct drm_connector *connector,
if (connector->encoder)
radeon_encoder = to_radeon_encoder(connector->encoder);
else {
- struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
}
@@ -964,7 +1010,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
int r;
@@ -1094,7 +1140,7 @@ static enum drm_connector_status
radeon_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
int r;
@@ -1174,7 +1220,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
int i, r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1333,8 +1379,10 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
+ if (radeon_audio != 0) {
+ radeon_connector_get_edid(connector);
radeon_audio_detect(connector, ret);
+ }
exit:
pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1585,6 +1633,9 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
int r;
+ if (radeon_dig_connector->is_mst)
+ return connector_status_disconnected;
+
r = pm_runtime_get_sync(connector->dev->dev);
if (r < 0)
return connector_status_disconnected;
@@ -1635,7 +1686,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
@@ -1643,12 +1694,21 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
radeon_dp_getdpcd(radeon_connector);
+ r = radeon_dp_mst_probe(radeon_connector);
+ if (r == 1)
+ ret = connector_status_disconnected;
+ }
} else {
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- if (radeon_dp_getdpcd(radeon_connector))
- ret = connector_status_connected;
+ if (radeon_dp_getdpcd(radeon_connector)) {
+ r = radeon_dp_mst_probe(radeon_connector);
+ if (r == 1)
+ ret = connector_status_disconnected;
+ else
+ ret = connector_status_connected;
+ }
} else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
@@ -1659,8 +1719,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
+ if (radeon_audio != 0) {
+ radeon_connector_get_edid(connector);
radeon_audio_detect(connector, ret);
+ }
out:
pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1872,6 +1934,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1904,6 +1970,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1950,6 +2020,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -1972,6 +2046,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
@@ -2023,6 +2101,10 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.load_detect_property,
1);
}
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_DVII)
connector->doublescan_allowed = true;
@@ -2068,6 +2150,10 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -2116,6 +2202,10 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+ RADEON_OUTPUT_CSC_BYPASS);
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
@@ -2352,3 +2442,27 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
+
+void radeon_setup_mst_connector(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+
+ if (!ASIC_IS_DCE5(rdev))
+ return;
+
+ if (radeon_mst == 0)
+ return;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ int ret;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ ret = radeon_dp_mst_init(radeon_connector);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 4d0f96cc3da4..ab39b85e0f76 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
+ p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
}
}
kfree(parser->track);
- kfree(parser->relocs);
+ drm_free_large(parser->relocs);
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bd7519fdd3f4..b7ca4c514621 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1442,6 +1442,11 @@ int radeon_device_init(struct radeon_device *rdev,
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
}
+ r = radeon_mst_debugfs_init(rdev);
+ if (r) {
+ DRM_ERROR("registering mst debugfs failed (%d).\n", r);
+ }
+
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 913fafa597ad..d2e9e9efc159 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -154,7 +154,7 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
(NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
- (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+ (NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc->output_csc) |
NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
@@ -1382,6 +1382,13 @@ static struct drm_prop_enum_list radeon_dither_enum_list[] =
{ RADEON_FMT_DITHER_ENABLE, "on" },
};
+static struct drm_prop_enum_list radeon_output_csc_enum_list[] =
+{ { RADEON_OUTPUT_CSC_BYPASS, "bypass" },
+ { RADEON_OUTPUT_CSC_TVRGB, "tvrgb" },
+ { RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" },
+ { RADEON_OUTPUT_CSC_YCBCR709, "ycbcr709" },
+};
+
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
@@ -1444,6 +1451,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
"dither",
radeon_dither_enum_list, sz);
+ sz = ARRAY_SIZE(radeon_output_csc_enum_list);
+ rdev->mode_info.output_csc_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "output_csc",
+ radeon_output_csc_enum_list, sz);
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
new file mode 100644
index 000000000000..bf1fecc6cceb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "nid.h"
+
+#define AUX_RX_ERROR_FLAGS (AUX_SW_RX_OVERFLOW | \
+ AUX_SW_RX_HPD_DISCON | \
+ AUX_SW_RX_PARTIAL_BYTE | \
+ AUX_SW_NON_AUX_MODE | \
+ AUX_SW_RX_MIN_COUNT_VIOL | \
+ AUX_SW_RX_INVALID_STOP | \
+ AUX_SW_RX_SYNC_INVALID_L | \
+ AUX_SW_RX_SYNC_INVALID_H | \
+ AUX_SW_RX_INVALID_START | \
+ AUX_SW_RX_RECV_NO_DET | \
+ AUX_SW_RX_RECV_INVALID_H | \
+ AUX_SW_RX_RECV_INVALID_V)
+
+#define AUX_SW_REPLY_GET_BYTE_COUNT(x) (((x) >> 24) & 0x1f)
+
+#define BARE_ADDRESS_SIZE 3
+
+static const u32 aux_offset[] =
+{
+ 0x6200 - 0x6200,
+ 0x6250 - 0x6200,
+ 0x62a0 - 0x6200,
+ 0x6300 - 0x6200,
+ 0x6350 - 0x6200,
+ 0x63a0 - 0x6200,
+};
+
+ssize_t
+radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct radeon_i2c_chan *chan =
+ container_of(aux, struct radeon_i2c_chan, aux);
+ struct drm_device *dev = chan->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int ret = 0, i;
+ uint32_t tmp, ack = 0;
+ int instance = chan->rec.i2c_id & 0xf;
+ u8 byte;
+ u8 *buf = msg->buffer;
+ int retry_count = 0;
+ int bytes;
+ int msize;
+ bool is_write = false;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ is_write = true;
+ break;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* work out two sizes required */
+ msize = 0;
+ bytes = BARE_ADDRESS_SIZE;
+ if (msg->size) {
+ msize = msg->size - 1;
+ bytes++;
+ if (is_write)
+ bytes += msg->size;
+ }
+
+ mutex_lock(&chan->mutex);
+
+ /* switch the pad to aux mode */
+ tmp = RREG32(chan->rec.mask_clk_reg);
+ tmp |= (1 << 16);
+ WREG32(chan->rec.mask_clk_reg, tmp);
+
+ /* setup AUX control register with correct HPD pin */
+ tmp = RREG32(AUX_CONTROL + aux_offset[instance]);
+
+ tmp &= AUX_HPD_SEL(0x7);
+ tmp |= AUX_HPD_SEL(chan->rec.hpd);
+ tmp |= AUX_EN | AUX_LS_READ_EN;
+
+ WREG32(AUX_CONTROL + aux_offset[instance], tmp);
+
+ /* atombios appears to write this twice lets copy it */
+ WREG32(AUX_SW_CONTROL + aux_offset[instance],
+ AUX_SW_WR_BYTES(bytes));
+ WREG32(AUX_SW_CONTROL + aux_offset[instance],
+ AUX_SW_WR_BYTES(bytes));
+
+ /* write the data header into the registers */
+ /* request, addres, msg size */
+ byte = (msg->request << 4);
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
+
+ byte = (msg->address >> 8) & 0xff;
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(byte));
+
+ byte = msg->address & 0xff;
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(byte));
+
+ byte = msize;
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(byte));
+
+ /* if we are writing - write the msg buffer */
+ if (is_write) {
+ for (i = 0; i < msg->size; i++) {
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_MASK(buf[i]));
+ }
+ }
+
+ /* clear the ACK */
+ WREG32(AUX_SW_INTERRUPT_CONTROL + aux_offset[instance], AUX_SW_DONE_ACK);
+
+ /* write the size and GO bits */
+ WREG32(AUX_SW_CONTROL + aux_offset[instance],
+ AUX_SW_WR_BYTES(bytes) | AUX_SW_GO);
+
+ /* poll the status registers - TODO irq support */
+ do {
+ tmp = RREG32(AUX_SW_STATUS + aux_offset[instance]);
+ if (tmp & AUX_SW_DONE) {
+ break;
+ }
+ usleep_range(100, 200);
+ } while (retry_count++ < 1000);
+
+ if (retry_count >= 1000) {
+ DRM_ERROR("auxch hw never signalled completion, error %08x\n", tmp);
+ ret = -EIO;
+ goto done;
+ }
+
+ if (tmp & AUX_SW_RX_TIMEOUT) {
+ DRM_DEBUG_KMS("dp_aux_ch timed out\n");
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+ if (tmp & AUX_RX_ERROR_FLAGS) {
+ DRM_DEBUG_KMS("dp_aux_ch flags not zero: %08x\n", tmp);
+ ret = -EIO;
+ goto done;
+ }
+
+ bytes = AUX_SW_REPLY_GET_BYTE_COUNT(tmp);
+ if (bytes) {
+ WREG32(AUX_SW_DATA + aux_offset[instance],
+ AUX_SW_DATA_RW | AUX_SW_AUTOINCREMENT_DISABLE);
+
+ tmp = RREG32(AUX_SW_DATA + aux_offset[instance]);
+ ack = (tmp >> 8) & 0xff;
+
+ for (i = 0; i < bytes - 1; i++) {
+ tmp = RREG32(AUX_SW_DATA + aux_offset[instance]);
+ if (buf)
+ buf[i] = (tmp >> 8) & 0xff;
+ }
+ if (buf)
+ ret = bytes - 1;
+ }
+
+ WREG32(AUX_SW_INTERRUPT_CONTROL + aux_offset[instance], AUX_SW_DONE_ACK);
+
+ if (is_write)
+ ret = msg->size;
+done:
+ mutex_unlock(&chan->mutex);
+
+ if (ret >= 0)
+ msg->reply = ack >> 4;
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
new file mode 100644
index 000000000000..2b98ed3e684d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -0,0 +1,785 @@
+
+#include <drm/drmP.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include "radeon.h"
+#include "atom.h"
+#include "ni_reg.h"
+
+static struct radeon_encoder *radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector);
+
+static int radeon_atom_set_enc_offset(int id)
+{
+ static const int offsets[] = { EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET,
+ 0x13830 - 0x7030 };
+
+ return offsets[id];
+}
+
+static int radeon_dp_mst_set_be_cntl(struct radeon_encoder *primary,
+ struct radeon_encoder_mst *mst_enc,
+ enum radeon_hpd_id hpd, bool enable)
+{
+ struct drm_device *dev = primary->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t reg;
+ int retries = 0;
+ uint32_t temp;
+
+ reg = RREG32(NI_DIG_BE_CNTL + primary->offset);
+
+ /* set MST mode */
+ reg &= ~NI_DIG_FE_DIG_MODE(7);
+ reg |= NI_DIG_FE_DIG_MODE(NI_DIG_MODE_DP_MST);
+
+ if (enable)
+ reg |= NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
+ else
+ reg &= ~NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
+
+ reg |= NI_DIG_HPD_SELECT(hpd);
+ DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DIG_BE_CNTL + primary->offset, reg);
+ WREG32(NI_DIG_BE_CNTL + primary->offset, reg);
+
+ if (enable) {
+ uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
+
+ do {
+ temp = RREG32(NI_DIG_FE_CNTL + offset);
+ } while ((temp & NI_DIG_SYMCLK_FE_ON) && retries++ < 10000);
+ if (retries == 10000)
+ DRM_ERROR("timed out waiting for FE %d %d\n", primary->offset, mst_enc->fe);
+ }
+ return 0;
+}
+
+static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
+ int stream_number,
+ int fe,
+ int slots)
+{
+ struct drm_device *dev = primary->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u32 temp, val;
+ int retries = 0;
+ int satreg, satidx;
+
+ satreg = stream_number >> 1;
+ satidx = stream_number & 1;
+
+ temp = RREG32(NI_DP_MSE_SAT0 + satreg + primary->offset);
+
+ val = NI_DP_MSE_SAT_SLOT_COUNT0(slots) | NI_DP_MSE_SAT_SRC0(fe);
+
+ val <<= (16 * satidx);
+
+ temp &= ~(0xffff << (16 * satidx));
+
+ temp |= val;
+
+ DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
+ WREG32(NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
+
+ WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
+
+ do {
+ temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
+ } while ((temp & 0x1) && retries++ < 10000);
+
+ if (retries == 10000)
+ DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
+
+ /* MTP 16 ? */
+ return 0;
+}
+
+static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn,
+ struct radeon_encoder *primary)
+{
+ struct drm_device *dev = mst_conn->base.dev;
+ struct stream_attribs new_attribs[6];
+ int i;
+ int idx = 0;
+ struct radeon_connector *radeon_connector;
+ struct drm_connector *connector;
+
+ memset(new_attribs, 0, sizeof(new_attribs));
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_encoder *subenc;
+ struct radeon_encoder_mst *mst_enc;
+
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->is_mst_connector)
+ continue;
+
+ if (radeon_connector->mst_port != mst_conn)
+ continue;
+
+ subenc = radeon_connector->mst_encoder;
+ mst_enc = subenc->enc_priv;
+
+ if (!mst_enc->enc_active)
+ continue;
+
+ new_attribs[idx].fe = mst_enc->fe;
+ new_attribs[idx].slots = drm_dp_mst_get_vcpi_slots(&mst_conn->mst_mgr, mst_enc->port);
+ idx++;
+ }
+
+ for (i = 0; i < idx; i++) {
+ if (new_attribs[i].fe != mst_conn->cur_stream_attribs[i].fe ||
+ new_attribs[i].slots != mst_conn->cur_stream_attribs[i].slots) {
+ radeon_dp_mst_set_stream_attrib(primary, i, new_attribs[i].fe, new_attribs[i].slots);
+ mst_conn->cur_stream_attribs[i].fe = new_attribs[i].fe;
+ mst_conn->cur_stream_attribs[i].slots = new_attribs[i].slots;
+ }
+ }
+
+ for (i = idx; i < mst_conn->enabled_attribs; i++) {
+ radeon_dp_mst_set_stream_attrib(primary, i, 0, 0);
+ mst_conn->cur_stream_attribs[i].fe = 0;
+ mst_conn->cur_stream_attribs[i].slots = 0;
+ }
+ mst_conn->enabled_attribs = idx;
+ return 0;
+}
+
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+{
+ struct drm_device *dev = mst->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_mst *mst_enc = mst->enc_priv;
+ uint32_t val, temp;
+ uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
+ int retries = 0;
+
+ val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
+
+ WREG32(NI_DP_MSE_RATE_CNTL + offset, val);
+
+ do {
+ temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+ } while ((temp & 0x1) && (retries++ < 10000));
+
+ if (retries >= 10000)
+ DRM_ERROR("timed out wait for rate cntl %d\n", mst_enc->fe);
+ return 0;
+}
+
+static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector *master = radeon_connector->mst_port;
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_dp_mst_get_edid(connector, &master->mst_mgr, radeon_connector->port);
+ radeon_connector->edid = edid;
+ DRM_DEBUG_KMS("edid retrieved %p\n", edid);
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+ drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+
+ return ret;
+}
+
+static int radeon_dp_mst_get_modes(struct drm_connector *connector)
+{
+ return radeon_dp_mst_get_ddc_modes(connector);
+}
+
+static enum drm_mode_status
+radeon_dp_mst_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO - validate mode against available PBN for link */
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ return &radeon_connector->mst_encoder->base;
+}
+
+static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
+ .get_modes = radeon_dp_mst_get_modes,
+ .mode_valid = radeon_dp_mst_mode_valid,
+ .best_encoder = radeon_mst_best_encoder,
+};
+
+static enum drm_connector_status
+radeon_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector *master = radeon_connector->mst_port;
+
+ return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port);
+}
+
+static void
+radeon_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_encoder *radeon_encoder = radeon_connector->mst_encoder;
+
+ drm_encoder_cleanup(&radeon_encoder->base);
+ kfree(radeon_encoder);
+ drm_connector_cleanup(connector);
+ kfree(radeon_connector);
+}
+
+static void radeon_connector_dpms(struct drm_connector *connector, int mode)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
+ .dpms = radeon_connector_dpms,
+ .detect = radeon_dp_mst_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = radeon_dp_mst_connector_destroy,
+};
+
+static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ const char *pathprop)
+{
+ struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector;
+ struct drm_connector *connector;
+
+ radeon_connector = kzalloc(sizeof(*radeon_connector), GFP_KERNEL);
+ if (!radeon_connector)
+ return NULL;
+
+ radeon_connector->is_mst_connector = true;
+ connector = &radeon_connector->base;
+ radeon_connector->port = port;
+ radeon_connector->mst_port = master;
+ DRM_DEBUG_KMS("\n");
+
+ drm_connector_init(dev, connector, &radeon_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &radeon_dp_mst_connector_helper_funcs);
+ radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
+
+ drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_mode_connector_set_path_property(connector, pathprop);
+ drm_reinit_primary_mode_group(dev);
+
+ mutex_lock(&dev->mode_config.mutex);
+ radeon_fb_add_connector(rdev, connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ drm_connector_register(connector);
+ return connector;
+}
+
+static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ drm_connector_unregister(connector);
+ /* need to nuke the connector */
+ mutex_lock(&dev->mode_config.mutex);
+ /* dpms off */
+ radeon_fb_remove_connector(rdev, connector);
+
+ drm_connector_cleanup(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_reinit_primary_mode_group(dev);
+
+
+ kfree(connector);
+ DRM_DEBUG_KMS("\n");
+}
+
+static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+struct drm_dp_mst_topology_cbs mst_cbs = {
+ .add_connector = radeon_dp_add_mst_connector,
+ .destroy_connector = radeon_dp_destroy_mst_connector,
+ .hotplug = radeon_dp_mst_hotplug,
+};
+
+struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (!connector->encoder)
+ continue;
+ if (!radeon_connector->is_mst_connector)
+ continue;
+
+ DRM_DEBUG_KMS("checking %p vs %p\n", connector->encoder, encoder);
+ if (connector->encoder == encoder)
+ return radeon_connector;
+ }
+ return NULL;
+}
+
+void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder);
+ struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
+ struct radeon_connector *radeon_connector = radeon_mst_find_connector(&radeon_encoder->base);
+ int dp_clock;
+ struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
+
+ if (radeon_connector) {
+ radeon_connector->pixelclock_for_modeset = mode->clock;
+ if (radeon_connector->base.display_info.bpc)
+ radeon_crtc->bpc = radeon_connector->base.display_info.bpc;
+ else
+ radeon_crtc->bpc = 8;
+ }
+
+ DRM_DEBUG_KMS("dp_clock %p %d\n", dig_connector, dig_connector->dp_clock);
+ dp_clock = dig_connector->dp_clock;
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
+ ASIC_INTERNAL_SS_ON_DP,
+ dp_clock);
+}
+
+static void
+radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder, *primary;
+ struct radeon_encoder_mst *mst_enc;
+ struct radeon_encoder_atom_dig *dig_enc;
+ struct radeon_connector *radeon_connector;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ int ret, slots;
+
+ if (!ASIC_IS_DCE5(rdev)) {
+ DRM_ERROR("got mst dpms on non-DCE5\n");
+ return;
+ }
+
+ radeon_connector = radeon_mst_find_connector(encoder);
+ if (!radeon_connector)
+ return;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ mst_enc = radeon_encoder->enc_priv;
+
+ primary = mst_enc->primary;
+
+ dig_enc = primary->enc_priv;
+
+ crtc = encoder->crtc;
+ DRM_DEBUG_KMS("got connector %d\n", dig_enc->active_mst_links);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ dig_enc->active_mst_links++;
+
+ radeon_crtc = to_radeon_crtc(crtc);
+
+ if (dig_enc->active_mst_links == 1) {
+ mst_enc->fe = dig_enc->dig_encoder;
+ mst_enc->fe_from_be = true;
+ atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
+
+ atombios_dig_encoder_setup(&primary->base, ATOM_ENCODER_CMD_SETUP, 0);
+ atombios_dig_transmitter_setup2(&primary->base, ATOM_TRANSMITTER_ACTION_ENABLE,
+ 0, 0, dig_enc->dig_encoder);
+
+ if (radeon_dp_needs_link_train(mst_enc->connector) ||
+ dig_enc->active_mst_links == 1) {
+ radeon_dp_link_train(&primary->base, &mst_enc->connector->base);
+ }
+
+ } else {
+ mst_enc->fe = radeon_atom_pick_dig_encoder(encoder, radeon_crtc->crtc_id);
+ if (mst_enc->fe == -1)
+ DRM_ERROR("failed to get frontend for dig encoder\n");
+ mst_enc->fe_from_be = false;
+ atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
+ }
+
+ DRM_DEBUG_KMS("dig encoder is %d %d %d\n", dig_enc->dig_encoder,
+ dig_enc->linkb, radeon_crtc->crtc_id);
+
+ ret = drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr,
+ radeon_connector->port,
+ mst_enc->pbn, &slots);
+ ret = drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+
+ radeon_dp_mst_set_be_cntl(primary, mst_enc,
+ radeon_connector->mst_port->hpd.hpd, true);
+
+ mst_enc->enc_active = true;
+ radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
+ radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+ atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
+ mst_enc->fe);
+ ret = drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
+
+ ret = drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
+
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ DRM_ERROR("DPMS OFF %d\n", dig_enc->active_mst_links);
+
+ if (!mst_enc->enc_active)
+ return;
+
+ drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
+ ret = drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+
+ drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
+ /* and this can also fail */
+ drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
+
+ drm_dp_mst_deallocate_vcpi(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
+
+ mst_enc->enc_active = false;
+ radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
+
+ radeon_dp_mst_set_be_cntl(primary, mst_enc,
+ radeon_connector->mst_port->hpd.hpd, false);
+ atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0,
+ mst_enc->fe);
+
+ if (!mst_enc->fe_from_be)
+ radeon_atom_release_dig_encoder(rdev, mst_enc->fe);
+
+ mst_enc->fe_from_be = false;
+ dig_enc->active_mst_links--;
+ if (dig_enc->active_mst_links == 0) {
+ /* drop link */
+ }
+
+ break;
+ }
+
+}
+
+static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder_mst *mst_enc;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int bpp = 24;
+
+ mst_enc = radeon_encoder->enc_priv;
+
+ mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+
+ mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
+ DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
+ mst_enc->primary->active_device, mst_enc->primary->devices,
+ mst_enc->connector->devices, mst_enc->primary->base.encoder_type);
+
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ {
+ struct radeon_connector_atom_dig *dig_connector;
+
+ dig_connector = mst_enc->connector->con_priv;
+ dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
+ dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
+ dig_connector->dpcd);
+ DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+ dig_connector->dp_lane_count, dig_connector->dp_clock);
+ }
+ return true;
+}
+
+static void radeon_mst_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_connector *radeon_connector;
+ struct radeon_encoder *radeon_encoder, *primary;
+ struct radeon_encoder_mst *mst_enc;
+ struct radeon_encoder_atom_dig *dig_enc;
+
+ radeon_connector = radeon_mst_find_connector(encoder);
+ if (!radeon_connector) {
+ DRM_DEBUG_KMS("failed to find connector %p\n", encoder);
+ return;
+ }
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ mst_enc = radeon_encoder->enc_priv;
+
+ primary = mst_enc->primary;
+
+ dig_enc = primary->enc_priv;
+
+ mst_enc->port = radeon_connector->port;
+
+ if (dig_enc->dig_encoder == -1) {
+ dig_enc->dig_encoder = radeon_atom_pick_dig_encoder(&primary->base, -1);
+ primary->offset = radeon_atom_set_enc_offset(dig_enc->dig_encoder);
+ atombios_set_mst_encoder_crtc_source(encoder, dig_enc->dig_encoder);
+
+
+ }
+ DRM_DEBUG_KMS("%d %d\n", dig_enc->dig_encoder, primary->offset);
+}
+
+static void
+radeon_mst_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static void radeon_mst_encoder_commit(struct drm_encoder *encoder)
+{
+ radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ DRM_DEBUG_KMS("\n");
+}
+
+static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
+ .dpms = radeon_mst_encoder_dpms,
+ .mode_fixup = radeon_mst_mode_fixup,
+ .prepare = radeon_mst_encoder_prepare,
+ .mode_set = radeon_mst_encoder_mode_set,
+ .commit = radeon_mst_encoder_commit,
+};
+
+void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs radeon_dp_mst_enc_funcs = {
+ .destroy = radeon_dp_mst_encoder_destroy,
+};
+
+static struct radeon_encoder *
+radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_mst *mst_enc;
+ struct drm_encoder *encoder;
+ const struct drm_connector_helper_funcs *connector_funcs = connector->base.helper_private;
+ struct drm_encoder *enc_master = connector_funcs->best_encoder(&connector->base);
+
+ DRM_DEBUG_KMS("enc master is %p\n", enc_master);
+ radeon_encoder = kzalloc(sizeof(*radeon_encoder), GFP_KERNEL);
+ if (!radeon_encoder)
+ return NULL;
+
+ radeon_encoder->enc_priv = kzalloc(sizeof(*mst_enc), GFP_KERNEL);
+ if (!radeon_encoder->enc_priv) {
+ kfree(radeon_encoder);
+ return NULL;
+ }
+ encoder = &radeon_encoder->base;
+ switch (rdev->num_crtc) {
+ case 1:
+ encoder->possible_crtcs = 0x1;
+ break;
+ case 2:
+ default:
+ encoder->possible_crtcs = 0x3;
+ break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
+
+ drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs,
+ DRM_MODE_ENCODER_DPMST);
+ drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs);
+
+ mst_enc = radeon_encoder->enc_priv;
+ mst_enc->connector = connector;
+ mst_enc->primary = to_radeon_encoder(enc_master);
+ radeon_encoder->is_mst_encoder = true;
+ return radeon_encoder;
+}
+
+int
+radeon_dp_mst_init(struct radeon_connector *radeon_connector)
+{
+ struct drm_device *dev = radeon_connector->base.dev;
+
+ if (!radeon_connector->ddc_bus->has_aux)
+ return 0;
+
+ radeon_connector->mst_mgr.cbs = &mst_cbs;
+ return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev->dev,
+ &radeon_connector->ddc_bus->aux, 16, 6,
+ radeon_connector->base.base.id);
+}
+
+int
+radeon_dp_mst_probe(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ int ret;
+ u8 msg[1];
+
+ if (!radeon_mst)
+ return 0;
+
+ if (dig_connector->dpcd[DP_DPCD_REV] < 0x12)
+ return 0;
+
+ ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_MSTM_CAP, msg,
+ 1);
+ if (ret) {
+ if (msg[0] & DP_MST_CAP) {
+ DRM_DEBUG_KMS("Sink is MST capable\n");
+ dig_connector->is_mst = true;
+ } else {
+ DRM_DEBUG_KMS("Sink is not MST capable\n");
+ dig_connector->is_mst = false;
+ }
+
+ }
+ drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
+ dig_connector->is_mst);
+ return dig_connector->is_mst;
+}
+
+int
+radeon_dp_mst_check_status(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ int retry;
+
+ if (dig_connector->is_mst) {
+ u8 esi[16] = { 0 };
+ int dret;
+ int ret = 0;
+ bool handled;
+
+ dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
+ DP_SINK_COUNT_ESI, esi, 8);
+go_again:
+ if (dret == 8) {
+ DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ ret = drm_dp_mst_hpd_irq(&radeon_connector->mst_mgr, esi, &handled);
+
+ if (handled) {
+ for (retry = 0; retry < 3; retry++) {
+ int wret;
+ wret = drm_dp_dpcd_write(&radeon_connector->ddc_bus->aux,
+ DP_SINK_COUNT_ESI + 1, &esi[1], 3);
+ if (wret == 3)
+ break;
+ }
+
+ dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
+ DP_SINK_COUNT_ESI, esi, 8);
+ if (dret == 8) {
+ DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ goto go_again;
+ }
+ } else
+ ret = 0;
+
+ return ret;
+ } else {
+ DRM_DEBUG_KMS("failed to get ESI - device may have failed %d\n", ret);
+ dig_connector->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
+ dig_connector->is_mst);
+ /* send a hotplug event */
+ }
+ }
+ return -EINVAL;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_mst_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+ int i;
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ radeon_connector = to_radeon_connector(connector);
+ dig_connector = radeon_connector->con_priv;
+ if (radeon_connector->is_mst_connector)
+ continue;
+ if (!dig_connector->is_mst)
+ continue;
+ drm_dp_mst_dump_topology(m, &radeon_connector->mst_mgr);
+
+ for (i = 0; i < radeon_connector->enabled_attribs; i++)
+ seq_printf(m, "attrib %d: %d %d\n", i,
+ radeon_connector->cur_stream_attribs[i].fe,
+ radeon_connector->cur_stream_attribs[i].slots);
+ }
+ drm_modeset_unlock_all(dev);
+ return 0;
+}
+
+static struct drm_info_list radeon_debugfs_mst_list[] = {
+ {"radeon_mst_info", &radeon_debugfs_mst_info, 0, NULL},
+};
+#endif
+
+int radeon_mst_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_mst_list, 1);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 5d684beb48d3..7d620d4b3f31 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -89,9 +89,10 @@
* 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
* CS to GPU on >= r600
* 2.41.0 - evergreen/cayman: Add SET_BASE/DRAW_INDIRECT command parsing support
+ * 2.42.0 - Add VCE/VUI (Video Usability Information) support
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 41
+#define KMS_DRIVER_MINOR 42
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -190,6 +191,8 @@ int radeon_deep_color = 0;
int radeon_use_pflipirq = 2;
int radeon_bapm = -1;
int radeon_backlight = -1;
+int radeon_auxch = -1;
+int radeon_mst = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -239,7 +242,7 @@ module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, radeon_msi, int, 0444);
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 10000 = 10 seconds, 0 = disable)");
module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
@@ -275,6 +278,12 @@ module_param_named(bapm, radeon_bapm, int, 0444);
MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(backlight, radeon_backlight, int, 0444);
+MODULE_PARM_DESC(auxch, "Use native auxch experimental support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(auxch, radeon_auxch, int, 0444);
+
+MODULE_PARM_DESC(mst, "DisplayPort MST experimental support (1 = enable, 0 = disable)");
+module_param_named(mst, radeon_mst, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3a297037cc17..ef99917f000d 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -247,7 +247,16 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->active_device & radeon_connector->devices)
+ if (radeon_encoder->is_mst_encoder) {
+ struct radeon_encoder_mst *mst_enc;
+
+ if (!radeon_connector->is_mst_connector)
+ continue;
+
+ mst_enc = radeon_encoder->enc_priv;
+ if (mst_enc->connector == radeon_connector->mst_port)
+ return connector;
+ } else if (radeon_encoder->active_device & radeon_connector->devices)
return connector;
}
return NULL;
@@ -393,6 +402,9 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_DisplayPort:
+ if (radeon_connector->is_mst_connector)
+ return false;
+
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ea276ff6d174..aeb676708e60 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -257,6 +257,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
}
info->par = rfbdev;
+ info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
@@ -434,3 +435,13 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
return true;
return false;
}
+
+void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
+{
+ drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+}
+
+void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
+{
+ drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 00fc59762e0d..7162c935371c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -87,6 +87,20 @@ static void radeon_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
+static void radeon_dp_work_func(struct work_struct *work)
+{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ dp_work);
+ struct drm_device *dev = rdev->ddev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ /* this should take a mutex */
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
+ }
+}
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
@@ -276,6 +290,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
}
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
rdev->irq.installed = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 122eb5693ba1..3db23007cdf4 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -103,15 +103,14 @@ static const struct kgd2kfd_calls *kgd2kfd;
bool radeon_kfd_init(void)
{
#if defined(CONFIG_HSA_AMD_MODULE)
- bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
- const struct kgd2kfd_calls**);
+ bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init);
if (kgd2kfd_init_p == NULL)
return false;
- if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
symbol_put(kgd2kfd_init);
kgd2kfd = NULL;
@@ -120,7 +119,7 @@ bool radeon_kfd_init(void)
return true;
#elif defined(CONFIG_HSA_AMD)
- if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
kgd2kfd = NULL;
return false;
@@ -143,7 +142,8 @@ void radeon_kfd_fini(void)
void radeon_kfd_device_probe(struct radeon_device *rdev)
{
if (kgd2kfd)
- rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, rdev->pdev);
+ rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
+ rdev->pdev, &kfd2kgd);
}
void radeon_kfd_device_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 686411e4e4f6..7b2a7335cc5d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -547,6 +547,35 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
else
*value = 1;
break;
+ case RADEON_INFO_CURRENT_GPU_TEMP:
+ /* get temperature in millidegrees C */
+ if (rdev->asic->pm.get_temperature)
+ *value = radeon_get_temperature(rdev);
+ else
+ *value = 0;
+ break;
+ case RADEON_INFO_CURRENT_GPU_SCLK:
+ /* get sclk in Mhz */
+ if (rdev->pm.dpm_enabled)
+ *value = radeon_dpm_get_current_sclk(rdev) / 100;
+ else
+ *value = rdev->pm.current_sclk / 100;
+ break;
+ case RADEON_INFO_CURRENT_GPU_MCLK:
+ /* get mclk in Mhz */
+ if (rdev->pm.dpm_enabled)
+ *value = radeon_dpm_get_current_mclk(rdev) / 100;
+ else
+ *value = rdev->pm.current_mclk / 100;
+ break;
+ case RADEON_INFO_READ_REG:
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (radeon_get_allowed_info_register(rdev, *value, value))
+ return -EINVAL;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index c89971d904c3..45715307db71 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -36,7 +36,7 @@
static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder_helper_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_funcs;
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 572b4dbec186..eef006c48584 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -53,6 +53,11 @@ struct radeon_mn {
struct rb_root objects;
};
+struct radeon_mn_node {
+ struct interval_tree_node it;
+ struct list_head bos;
+};
+
/**
* radeon_mn_destroy - destroy the rmn
*
@@ -64,14 +69,21 @@ static void radeon_mn_destroy(struct work_struct *work)
{
struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
struct radeon_device *rdev = rmn->rdev;
- struct radeon_bo *bo, *next;
+ struct radeon_mn_node *node, *next_node;
+ struct radeon_bo *bo, *next_bo;
mutex_lock(&rdev->mn_lock);
mutex_lock(&rmn->lock);
hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) {
- interval_tree_remove(&bo->mn_it, &rmn->objects);
- bo->mn = NULL;
+ rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
+ it.rb) {
+
+ interval_tree_remove(&node->it, &rmn->objects);
+ list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
+ bo->mn = NULL;
+ list_del_init(&bo->mn_list);
+ }
+ kfree(node);
}
mutex_unlock(&rmn->lock);
mutex_unlock(&rdev->mn_lock);
@@ -121,29 +133,36 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
+ struct radeon_mn_node *node;
struct radeon_bo *bo;
- int r;
+ long r;
- bo = container_of(it, struct radeon_bo, mn_it);
+ node = container_of(it, struct radeon_mn_node, it);
it = interval_tree_iter_next(it, start, end);
- r = radeon_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%d) failed to reserve user bo\n", r);
- continue;
- }
+ list_for_each_entry(bo, &node->bos, mn_list) {
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
- false, MAX_SCHEDULE_TIMEOUT);
- if (r)
- DRM_ERROR("(%d) failed to wait for user bo\n", r);
+ if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+ continue;
- radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (r)
- DRM_ERROR("(%d) failed to validate user bo\n", r);
+ r = radeon_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("(%ld) failed to reserve user bo\n", r);
+ continue;
+ }
- radeon_bo_unreserve(bo);
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ true, false, MAX_SCHEDULE_TIMEOUT);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (r)
+ DRM_ERROR("(%ld) failed to validate user bo\n", r);
+
+ radeon_bo_unreserve(bo);
+ }
}
mutex_unlock(&rmn->lock);
@@ -220,24 +239,44 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
unsigned long end = addr + radeon_bo_size(bo) - 1;
struct radeon_device *rdev = bo->rdev;
struct radeon_mn *rmn;
+ struct radeon_mn_node *node = NULL;
+ struct list_head bos;
struct interval_tree_node *it;
rmn = radeon_mn_get(rdev);
if (IS_ERR(rmn))
return PTR_ERR(rmn);
+ INIT_LIST_HEAD(&bos);
+
mutex_lock(&rmn->lock);
- it = interval_tree_iter_first(&rmn->objects, addr, end);
- if (it) {
- mutex_unlock(&rmn->lock);
- return -EEXIST;
+ while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+ kfree(node);
+ node = container_of(it, struct radeon_mn_node, it);
+ interval_tree_remove(&node->it, &rmn->objects);
+ addr = min(it->start, addr);
+ end = max(it->last, end);
+ list_splice(&node->bos, &bos);
+ }
+
+ if (!node) {
+ node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
+ if (!node) {
+ mutex_unlock(&rmn->lock);
+ return -ENOMEM;
+ }
}
bo->mn = rmn;
- bo->mn_it.start = addr;
- bo->mn_it.last = end;
- interval_tree_insert(&bo->mn_it, &rmn->objects);
+
+ node->it.start = addr;
+ node->it.last = end;
+ INIT_LIST_HEAD(&node->bos);
+ list_splice(&bos, &node->bos);
+ list_add(&bo->mn_list, &node->bos);
+
+ interval_tree_insert(&node->it, &rmn->objects);
mutex_unlock(&rmn->lock);
@@ -255,6 +294,7 @@ void radeon_mn_unregister(struct radeon_bo *bo)
{
struct radeon_device *rdev = bo->rdev;
struct radeon_mn *rmn;
+ struct list_head *head;
mutex_lock(&rdev->mn_lock);
rmn = bo->mn;
@@ -264,8 +304,19 @@ void radeon_mn_unregister(struct radeon_bo *bo)
}
mutex_lock(&rmn->lock);
- interval_tree_remove(&bo->mn_it, &rmn->objects);
+ /* save the next list entry for later */
+ head = bo->mn_list.next;
+
bo->mn = NULL;
+ list_del(&bo->mn_list);
+
+ if (list_empty(head)) {
+ struct radeon_mn_node *node;
+ node = container_of(head, struct radeon_mn_node, bos);
+ interval_tree_remove(&node->it, &rmn->objects);
+ kfree(node);
+ }
+
mutex_unlock(&rmn->lock);
mutex_unlock(&rdev->mn_lock);
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 920a8be8abad..fa91a17b81b6 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
#include <linux/i2c.h>
@@ -85,6 +86,13 @@ enum radeon_hpd_id {
RADEON_HPD_NONE = 0xff,
};
+enum radeon_output_csc {
+ RADEON_OUTPUT_CSC_BYPASS = 0,
+ RADEON_OUTPUT_CSC_TVRGB = 1,
+ RADEON_OUTPUT_CSC_YCBCR601 = 2,
+ RADEON_OUTPUT_CSC_YCBCR709 = 3,
+};
+
#define RADEON_MAX_I2C_BUS 16
/* radeon gpio-based i2c
@@ -255,6 +263,8 @@ struct radeon_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
+ /* Output CSC */
+ struct drm_property *output_csc_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -265,6 +275,9 @@ struct radeon_mode_info {
u16 firmware_flags;
/* pointer to backlight encoder */
struct radeon_encoder *bl_encoder;
+
+ /* bitmask for active encoder frontends */
+ uint32_t active_encoders;
};
#define RADEON_MAX_BL_LEVEL 0xFF
@@ -357,6 +370,7 @@ struct radeon_crtc {
u32 wm_low;
u32 wm_high;
struct drm_display_mode hw_mode;
+ enum radeon_output_csc output_csc;
};
struct radeon_encoder_primary_dac {
@@ -426,12 +440,24 @@ struct radeon_encoder_atom_dig {
uint8_t backlight_level;
int panel_mode;
struct radeon_afmt *afmt;
+ int active_mst_links;
};
struct radeon_encoder_atom_dac {
enum radeon_tv_std tv_std;
};
+struct radeon_encoder_mst {
+ int crtc;
+ struct radeon_encoder *primary;
+ struct radeon_connector *connector;
+ struct drm_dp_mst_port *port;
+ int pbn;
+ int fe;
+ bool fe_from_be;
+ bool enc_active;
+};
+
struct radeon_encoder {
struct drm_encoder base;
uint32_t encoder_enum;
@@ -450,6 +476,11 @@ struct radeon_encoder {
bool is_ext_encoder;
u16 caps;
struct radeon_audio_funcs *audio;
+ enum radeon_output_csc output_csc;
+ bool can_mst;
+ uint32_t offset;
+ bool is_mst_encoder;
+ /* front end for this mst encoder */
};
struct radeon_connector_atom_dig {
@@ -460,6 +491,7 @@ struct radeon_connector_atom_dig {
int dp_clock;
int dp_lane_count;
bool edp_on;
+ bool is_mst;
};
struct radeon_gpio_rec {
@@ -503,6 +535,11 @@ enum radeon_connector_dither {
RADEON_FMT_DITHER_ENABLE = 1,
};
+struct stream_attribs {
+ uint16_t fe;
+ uint16_t slots;
+};
+
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -524,6 +561,14 @@ struct radeon_connector {
enum radeon_connector_audio audio;
enum radeon_connector_dither dither;
int pixelclock_for_modeset;
+ bool is_mst_connector;
+ struct radeon_connector *mst_port;
+ struct drm_dp_mst_port *port;
+ struct drm_dp_mst_topology_mgr mst_mgr;
+
+ struct radeon_encoder *mst_encoder;
+ struct stream_attribs cur_stream_attribs[6];
+ int enabled_attribs;
};
struct radeon_framebuffer {
@@ -708,15 +753,26 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
+int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+ u8 *dpcd);
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
+extern ssize_t
+radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg);
+
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
+extern void atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
+extern void atombios_dig_transmitter_setup2(struct drm_encoder *encoder,
+ int action, uint8_t lane_num,
+ uint8_t lane_set, int fe);
+extern void atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder,
+ int fe);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
@@ -929,7 +985,23 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
+
+void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
+void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
+
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
+
+/* mst */
+int radeon_dp_mst_init(struct radeon_connector *radeon_connector);
+int radeon_dp_mst_probe(struct radeon_connector *radeon_connector);
+int radeon_dp_mst_check_status(struct radeon_connector *radeon_connector);
+int radeon_mst_debugfs_init(struct radeon_device *rdev);
+void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode);
+
+void radeon_setup_mst_connector(struct drm_device *dev);
+
+int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx);
+void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index ce075cb08cb2..fdce4062901f 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -9,7 +9,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM radeon
-#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE radeon_trace
TRACE_EVENT(radeon_bo_create,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index b292aca0f342..edafd3c2b170 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
struct radeon_ttm_tt *gtt = (void *)ttm;
- struct scatterlist *sg;
- int i;
+ struct sg_page_iter sg_iter;
int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */
dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
- struct page *page = sg_page(sg);
-
+ for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
set_page_dirty(page);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index c10b2aec6450..6edcb5485092 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev)
int radeon_uvd_suspend(struct radeon_device *rdev)
{
- unsigned size;
- void *ptr;
- int i;
+ int i, r;
if (rdev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
- if (atomic_read(&rdev->uvd.handles[i]))
- break;
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ if (handle != 0) {
+ struct radeon_fence *fence;
- if (i == RADEON_MAX_UVD_HANDLES)
- return 0;
+ radeon_uvd_note_usage(rdev);
- size = radeon_bo_size(rdev->uvd.vcpu_bo);
- size -= rdev->uvd_fw->size;
+ r = radeon_uvd_get_destroy_msg(rdev,
+ R600_RING_TYPE_UVD_INDEX, handle, &fence);
+ if (r) {
+ DRM_ERROR("Error destroying UVD (%d)!\n", r);
+ continue;
+ }
- ptr = rdev->uvd.cpu_addr;
- ptr += rdev->uvd_fw->size;
+ radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
- rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
- memcpy(rdev->uvd.saved_bo, ptr, size);
+ rdev->uvd.filp[i] = NULL;
+ atomic_set(&rdev->uvd.handles[i], 0);
+ }
+ }
return 0;
}
@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
ptr = rdev->uvd.cpu_addr;
ptr += rdev->uvd_fw->size;
- if (rdev->uvd.saved_bo != NULL) {
- memcpy(ptr, rdev->uvd.saved_bo, size);
- kfree(rdev->uvd.saved_bo);
- rdev->uvd.saved_bo = NULL;
- } else
- memset(ptr, 0, size);
+ memset(ptr, 0, size);
return 0;
}
@@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
return 0;
}
+static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
+ unsigned stream_type)
+{
+ switch (stream_type) {
+ case 0: /* H264 */
+ case 1: /* VC1 */
+ /* always supported */
+ return 0;
+
+ case 3: /* MPEG2 */
+ case 4: /* MPEG4 */
+ /* only since UVD 3 */
+ if (p->rdev->family >= CHIP_PALM)
+ return 0;
+
+ /* fall through */
+ default:
+ DRM_ERROR("UVD codec not supported by hardware %d!\n",
+ stream_type);
+ return -EINVAL;
+ }
+}
+
static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
unsigned offset, unsigned buf_sizes[])
{
@@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- if (msg_type == 1) {
- /* it's a decode msg, calc buffer sizes */
- r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
- /* calc image size (width * height) */
- img_size = msg[6] * msg[7];
+ switch (msg_type) {
+ case 0:
+ /* it's a create msg, calc image size (width * height) */
+ img_size = msg[7] * msg[8];
+
+ r = radeon_uvd_validate_codec(p, msg[4]);
radeon_bo_kunmap(bo);
if (r)
return r;
- } else if (msg_type == 2) {
+ /* try to alloc a new handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
+ DRM_ERROR("Handle 0x%x already in use!\n", handle);
+ return -EINVAL;
+ }
+
+ if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+ p->rdev->uvd.filp[i] = p->filp;
+ p->rdev->uvd.img_size[i] = img_size;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("No more free UVD handles!\n");
+ return -EINVAL;
+
+ case 1:
+ /* it's a decode msg, validate codec and calc buffer sizes */
+ r = radeon_uvd_validate_codec(p, msg[4]);
+ if (!r)
+ r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ radeon_bo_kunmap(bo);
+ if (r)
+ return r;
+
+ /* validate the handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
+ if (p->rdev->uvd.filp[i] != p->filp) {
+ DRM_ERROR("UVD handle collision detected!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
+ return -ENOENT;
+
+ case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
radeon_bo_kunmap(bo);
return 0;
- } else {
- /* it's a create msg, calc image size (width * height) */
- img_size = msg[7] * msg[8];
- radeon_bo_kunmap(bo);
- if (msg_type != 0) {
- DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
- return -EINVAL;
- }
-
- /* it's a create msg, no special handling needed */
- }
-
- /* create or decode, validate the handle */
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
- if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
- return 0;
- }
+ default:
- /* handle not found try to alloc a new one */
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
- if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
- p->rdev->uvd.filp[i] = p->filp;
- p->rdev->uvd.img_size[i] = img_size;
- return 0;
- }
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+ return -EINVAL;
}
- DRM_ERROR("No more free UVD handles!\n");
+ BUG();
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 976fe432f4e2..0de5711ac508 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
*
* @p: parser context
* @handle: handle to validate
+ * @allocated: allocated a new handle?
*
* Validates the handle and return the found session index or -EINVAL
* we we don't have another free session index.
*/
-int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
+ uint32_t handle, bool *allocated)
{
unsigned i;
+ *allocated = false;
+
/* validate the handle */
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
- if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+ if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
+ if (p->rdev->vce.filp[i] != p->filp) {
+ DRM_ERROR("VCE handle collision detected!\n");
+ return -EINVAL;
+ }
return i;
+ }
}
/* handle not found try to alloc a new one */
@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
p->rdev->vce.filp[i] = p->filp;
p->rdev->vce.img_size[i] = 0;
+ *allocated = true;
return i;
}
}
@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
int session_idx = -1;
- bool destroyed = false;
+ bool destroyed = false, created = false, allocated = false;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
- int i, r;
+ int i, r = 0;
while (p->idx < p->chunk_ib->length_dw) {
uint32_t len = radeon_get_ib_value(p, p->idx);
@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
if (destroyed) {
DRM_ERROR("No other command allowed after destroy!\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
switch (cmd) {
case 0x00000001: // session
handle = radeon_get_ib_value(p, p->idx + 2);
- session_idx = radeon_vce_validate_handle(p, handle);
+ session_idx = radeon_vce_validate_handle(p, handle,
+ &allocated);
if (session_idx < 0)
return session_idx;
size = &p->rdev->vce.img_size[session_idx];
@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
break;
case 0x01000001: // create
+ created = true;
+ if (!allocated) {
+ DRM_ERROR("Handle already in use!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
*size = radeon_get_ib_value(p, p->idx + 8) *
radeon_get_ib_value(p, p->idx + 10) *
8 * 3 / 2;
@@ -571,18 +591,19 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
case 0x04000005: // rate control
case 0x04000007: // motion estimation
case 0x04000008: // rdo
+ case 0x04000009: // vui
break;
case 0x03000001: // encode
r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
*size);
if (r)
- return r;
+ goto out;
r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
*size / 3);
if (r)
- return r;
+ goto out;
break;
case 0x02000001: // destroy
@@ -593,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
*size * 2);
if (r)
- return r;
+ goto out;
break;
case 0x05000004: // video bitstream buffer
@@ -601,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
tmp);
if (r)
- return r;
+ goto out;
break;
case 0x05000005: // feedback buffer
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
4096);
if (r)
- return r;
+ goto out;
break;
default:
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
if (session_idx == -1) {
DRM_ERROR("no session command at start of IB\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
p->idx += len / 4;
}
- if (destroyed) {
- /* IB contains a destroy msg, free the handle */
+ if (allocated && !created) {
+ DRM_ERROR("New session without create command!\n");
+ r = -ENOENT;
+ }
+
+out:
+ if ((!r && destroyed) || (r && allocated)) {
+ /*
+ * IB contains a destroy msg or we have allocated an
+ * handle and got an error, anyway free the handle
+ */
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
}
- return 0;
+ return r;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2a5a4a9e772d..de42fc4a22b8 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
mutex_lock(&vm->mutex);
+ soffset /= RADEON_GPU_PAGE_SIZE;
+ eoffset /= RADEON_GPU_PAGE_SIZE;
+ if (soffset || eoffset) {
+ struct interval_tree_node *it;
+ it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+ if (it && it != &bo_va->it) {
+ struct radeon_bo_va *tmp;
+ tmp = container_of(it, struct radeon_bo_va, it);
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
+ "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
+ soffset, tmp->bo, tmp->it.start, tmp->it.last);
+ mutex_unlock(&vm->mutex);
+ return -EINVAL;
+ }
+ }
+
if (bo_va->it.start || bo_va->it.last) {
if (bo_va->addr) {
/* add a clone of the bo_va to clear the old address */
@@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
spin_lock(&vm->status_lock);
list_add(&tmp->vm_status, &vm->freed);
spin_unlock(&vm->status_lock);
+
+ bo_va->addr = 0;
}
interval_tree_remove(&bo_va->it, &vm->va);
@@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
bo_va->it.last = 0;
}
- soffset /= RADEON_GPU_PAGE_SIZE;
- eoffset /= RADEON_GPU_PAGE_SIZE;
if (soffset || eoffset) {
- struct interval_tree_node *it;
- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
- if (it) {
- struct radeon_bo_va *tmp;
- tmp = container_of(it, struct radeon_bo_va, it);
- /* bo and tmp overlap, invalid offset */
- dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
- "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
- soffset, tmp->bo, tmp->it.start, tmp->it.last);
- mutex_unlock(&vm->mutex);
- return -EINVAL;
- }
bo_va->it.start = soffset;
bo_va->it.last = eoffset - 1;
interval_tree_insert(&bo_va->it, &vm->va);
@@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
list_del(&bo_va->bo_list);
mutex_lock(&vm->mutex);
- interval_tree_remove(&bo_va->it, &vm->va);
+ if (bo_va->it.start || bo_va->it.last)
+ interval_tree_remove(&bo_va->it, &vm->va);
spin_lock(&vm->status_lock);
list_del(&bo_va->vm_status);
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 9031f4b69824..cb0afe78abed 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -1001,6 +1001,28 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
ps->sclk_high, ps->max_voltage);
}
+/* get the current sclk in 10 khz units */
+u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK;
+ u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+ u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1;
+ u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 +
+ ((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1;
+ u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) /
+ (post_div * ref_div);
+
+ return sclk;
+}
+
+/* get the current mclk in 10 khz units */
+u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct igp_power_info *pi = rs780_get_pi(rdev);
+
+ return pi->bootup_uma_clk;
+}
+
int rs780_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 6a5c233361e9..97e5a6f1ce58 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -2050,6 +2050,52 @@ void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
}
}
+/* get the current sclk in 10 khz units */
+u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct rv6xx_ps *ps = rv6xx_get_ps(rps);
+ struct rv6xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->sclk;
+ }
+}
+
+/* get the current mclk in 10 khz units */
+u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct rv6xx_ps *ps = rv6xx_get_ps(rps);
+ struct rv6xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->mclk;
+ }
+}
+
void rv6xx_dpm_fini(struct radeon_device *rdev)
{
int i;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 01ee96acb398..c54d6313a46d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -921,7 +921,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 306732641b23..b9c770745a7a 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2492,6 +2492,50 @@ void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
}
}
+u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->sclk;
+ }
+}
+
+u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ return 0;
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ return pl->mclk;
+ }
+}
+
void rv770_dpm_fini(struct radeon_device *rdev)
{
int i;
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 3cf1e2921545..9ef2064b1c9c 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -989,6 +989,9 @@
((n) & 0x3FFF) << 16)
/* UVD */
+#define UVD_SEMA_ADDR_LOW 0xef00
+#define UVD_SEMA_ADDR_HIGH 0xef04
+#define UVD_SEMA_CMD 0xef08
#define UVD_GPCOM_VCPU_CMD 0xef0c
#define UVD_GPCOM_VCPU_DATA0 0xef10
#define UVD_GPCOM_VCPU_DATA1 0xef14
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a7fb2735d4a9..5326f753e107 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1264,6 +1264,36 @@ static void si_init_golden_registers(struct radeon_device *rdev)
}
}
+/**
+ * si_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int si_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS2:
+ case GRBM_STATUS_SE0:
+ case GRBM_STATUS_SE1:
+ case SRBM_STATUS:
+ case SRBM_STATUS2:
+ case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
+ case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
+ case UVD_STATUS:
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
@@ -4273,7 +4303,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
L2_CACHE_BIGK_FRAGMENT_SIZE(4));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
@@ -4288,7 +4318,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
/* empty context1-15 */
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
@@ -6055,12 +6085,12 @@ int si_irq_set(struct radeon_device *rdev)
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
if (!ASIC_IS_NODCE(rdev)) {
- hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
- hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
}
dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -6123,27 +6153,27 @@ int si_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("si_irq_set: hpd 1\n");
- hpd1 |= DC_HPDx_INT_EN;
+ hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("si_irq_set: hpd 2\n");
- hpd2 |= DC_HPDx_INT_EN;
+ hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("si_irq_set: hpd 3\n");
- hpd3 |= DC_HPDx_INT_EN;
+ hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("si_irq_set: hpd 4\n");
- hpd4 |= DC_HPDx_INT_EN;
+ hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("si_irq_set: hpd 5\n");
- hpd5 |= DC_HPDx_INT_EN;
+ hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("si_irq_set: hpd 6\n");
- hpd6 |= DC_HPDx_INT_EN;
+ hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
@@ -6306,6 +6336,37 @@ static inline void si_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
+
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_RX_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
}
static void si_irq_disable(struct radeon_device *rdev)
@@ -6371,6 +6432,7 @@ int si_irq_process(struct radeon_device *rdev)
u32 src_id, src_data, ring_id;
u32 ring_index;
bool queue_hotplug = false;
+ bool queue_dp = false;
bool queue_thermal = false;
u32 status, addr;
@@ -6611,6 +6673,48 @@ restart_ih:
DRM_DEBUG("IH: HPD6\n");
}
break;
+ case 6:
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+ }
+ break;
+ case 7:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+ }
+ break;
+ case 8:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+ }
+ break;
+ case 9:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+ }
+ break;
+ case 10:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+ }
+ break;
+ case 11:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -6693,6 +6797,8 @@ restart_ih:
rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
}
+ if (queue_dp)
+ schedule_work(&rdev->dp_work);
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 7be11651b7e6..ff8b83f5e929 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2924,6 +2924,7 @@ struct si_dpm_quirk {
static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
{ 0, 0, 0, 0 },
};
@@ -6993,3 +6994,39 @@ void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
}
+
+u32 si_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct ni_ps *ps = ni_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ return 0;
+ } else {
+ pl = &ps->performance_levels[current_index];
+ return pl->sclk;
+ }
+}
+
+u32 si_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct ni_ps *ps = ni_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ return 0;
+ } else {
+ pl = &ps->performance_levels[current_index];
+ return pl->mclk;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 99a9835c9f61..3afac3013983 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1556,6 +1556,7 @@
#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
#define UVD_RBC_RB_RPTR 0xF690
#define UVD_RBC_RB_WPTR 0xF694
+#define UVD_STATUS 0xf6bc
#define UVD_CGC_CTRL 0xF4B0
# define DCM (1 << 0)
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 25fd4ced36c8..cd0862809adf 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1837,6 +1837,34 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
}
}
+u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
+ struct sumo_ps *ps = sumo_get_ps(rps);
+ struct sumo_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >>
+ CURR_INDEX_SHIFT;
+
+ if (current_index == BOOST_DPM_LEVEL) {
+ pl = &pi->boost_pl;
+ return pl->sclk;
+ } else if (current_index >= ps->num_levels) {
+ return 0;
+ } else {
+ pl = &ps->levels[current_index];
+ return pl->sclk;
+ }
+}
+
+u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
void sumo_dpm_fini(struct radeon_device *rdev)
{
int i;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 38dacb7a3689..a5b02c575d77 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1964,6 +1964,31 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
}
}
+u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev)
+{
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
+ struct trinity_ps *ps = trinity_get_ps(rps);
+ struct trinity_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) >>
+ CURRENT_STATE_SHIFT;
+
+ if (current_index >= ps->num_levels) {
+ return 0;
+ } else {
+ pl = &ps->levels[current_index];
+ return pl->sclk;
+ }
+}
+
+u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev)
+{
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
void trinity_dpm_fini(struct radeon_device *rdev)
{
int i;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index e72b3cb59358..c6b1cbca47fc 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
- uint64_t addr = semaphore->gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
- radeon_ring_write(ring, emit_wait ? 1 : 0);
-
- return true;
+ /* disable semaphores for UVD V1 hardware */
+ return false;
}
/**
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 89193519f8a1..7ed778cec7c6 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
}
/**
+ * uvd_v2_2_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, emit_wait ? 1 : 0);
+
+ return true;
+}
+
+/**
* uvd_v2_2_resume - memory controller programming
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 25c7a998fc2c..7d0b8ef9bea2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -15,6 +15,8 @@
#include <linux/mutex.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -99,9 +101,13 @@ static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
clk_disable_unprepare(rcrtc->clock);
}
+/* -----------------------------------------------------------------------------
+ * Hardware Setup
+ */
+
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
- const struct drm_display_mode *mode = &rcrtc->crtc.mode;
+ const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
unsigned long mode_clock = mode->clock * 1000;
unsigned long clk;
u32 value;
@@ -187,9 +193,19 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc,
rcdu->dpad0_source = rcrtc->index;
}
-void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
+static unsigned int plane_zpos(struct rcar_du_plane *plane)
+{
+ return to_rcar_du_plane_state(plane->plane.state)->zpos;
+}
+
+static const struct rcar_du_format_info *
+plane_format(struct rcar_du_plane *plane)
+{
+ return to_rcar_du_plane_state(plane->plane.state)->format;
+}
+
+static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
unsigned int num_planes = 0;
unsigned int prio = 0;
@@ -201,29 +217,30 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
unsigned int j;
- if (plane->crtc != &rcrtc->crtc || !plane->enabled)
+ if (plane->plane.state->crtc != &rcrtc->crtc)
continue;
/* Insert the plane in the sorted planes array. */
for (j = num_planes++; j > 0; --j) {
- if (planes[j-1]->zpos <= plane->zpos)
+ if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
break;
planes[j] = planes[j-1];
}
planes[j] = plane;
- prio += plane->format->planes * 4;
+ prio += plane_format(plane)->planes * 4;
}
for (i = 0; i < num_planes; ++i) {
struct rcar_du_plane *plane = planes[i];
- unsigned int index = plane->hwindex;
+ struct drm_plane_state *state = plane->plane.state;
+ unsigned int index = to_rcar_du_plane_state(state)->hwindex;
prio -= 4;
dspr |= (index + 1) << prio;
dptsr |= DPTSR_PnDK(index) | DPTSR_PnTS(index);
- if (plane->format->planes == 2) {
+ if (plane_format(plane)->planes == 2) {
index = (index + 1) % 8;
prio -= 4;
@@ -236,8 +253,6 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
* with superposition controller 2.
*/
if (rcrtc->index % 2) {
- u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
-
/* The DPTSR register is updated when the display controller is
* stopped. We thus need to restart the DU. Once again, sorry
* for the flicker. One way to mitigate the issue would be to
@@ -245,29 +260,104 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
* split, or through a module parameter). Flicker would then
* occur only if we need to break the pre-association.
*/
- if (value != dptsr) {
+ mutex_lock(&rcrtc->group->lock);
+ if (rcar_du_group_read(rcrtc->group, DPTSR) != dptsr) {
rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
if (rcrtc->group->used_crtcs)
rcar_du_group_restart(rcrtc->group);
}
+ mutex_unlock(&rcrtc->group->lock);
}
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
dspr);
}
+/* -----------------------------------------------------------------------------
+ * Page Flip
+ */
+
+void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
+ struct drm_file *file)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+
+ /* Destroy the pending vertical blanking event associated with the
+ * pending page flip, if any, and disable vertical blanking interrupts.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = rcrtc->event;
+ if (event && event->base.file_priv == file) {
+ rcrtc->event = NULL;
+ event->base.destroy(&event->base);
+ drm_crtc_vblank_put(&rcrtc->crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = rcrtc->event;
+ rcrtc->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (event == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_send_vblank_event(dev, rcrtc->index, event);
+ wake_up(&rcrtc->flip_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ drm_crtc_vblank_put(&rcrtc->crtc);
+}
+
+static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
+{
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+ bool pending;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = rcrtc->event != NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return pending;
+}
+
+static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
+{
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
+
+ if (wait_event_timeout(rcrtc->flip_wait,
+ !rcar_du_crtc_page_flip_pending(rcrtc),
+ msecs_to_jiffies(50)))
+ return;
+
+ dev_warn(rcdu->dev, "page flip timeout\n");
+
+ rcar_du_crtc_finish_page_flip(rcrtc);
+}
+
+/* -----------------------------------------------------------------------------
+ * Start/Stop and Suspend/Resume
+ */
+
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
bool interlaced;
- unsigned int i;
if (rcrtc->started)
return;
- if (WARN_ON(rcrtc->plane->format == NULL))
- return;
-
/* Set display off and background to black */
rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
@@ -276,20 +366,8 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
rcar_du_crtc_set_display_timing(rcrtc);
rcar_du_group_set_routing(rcrtc->group);
- mutex_lock(&rcrtc->group->planes.lock);
- rcrtc->plane->enabled = true;
- rcar_du_crtc_update_planes(crtc);
- mutex_unlock(&rcrtc->group->planes.lock);
-
- /* Setup planes. */
- for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
- struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
-
- if (plane->crtc != crtc || !plane->enabled)
- continue;
-
- rcar_du_plane_setup(plane);
- }
+ /* Start with all planes disabled. */
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
/* Select master sync mode. This enables display operation in master
* sync mode (with the HSYNC and VSYNC signals configured as outputs and
@@ -302,6 +380,9 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
rcar_du_group_start_stop(rcrtc->group, true);
+ /* Turn vertical blanking interrupt reporting back on. */
+ drm_crtc_vblank_on(crtc);
+
rcrtc->started = true;
}
@@ -312,10 +393,12 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
if (!rcrtc->started)
return;
- mutex_lock(&rcrtc->group->planes.lock);
- rcrtc->plane->enabled = false;
- rcar_du_crtc_update_planes(crtc);
- mutex_unlock(&rcrtc->group->planes.lock);
+ /* Disable vertical blanking interrupt reporting. We first need to wait
+ * for page flip completion before stopping the CRTC as userspace
+ * expects page flips to eventually complete.
+ */
+ rcar_du_crtc_wait_page_flip(rcrtc);
+ drm_crtc_vblank_off(crtc);
/* Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
@@ -335,196 +418,109 @@ void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
{
- if (rcrtc->dpms != DRM_MODE_DPMS_ON)
+ unsigned int i;
+
+ if (!rcrtc->enabled)
return;
rcar_du_crtc_get(rcrtc);
rcar_du_crtc_start(rcrtc);
-}
-
-static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
-{
- struct drm_crtc *crtc = &rcrtc->crtc;
-
- rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
- rcar_du_plane_update_base(rcrtc->plane);
-}
-
-static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ /* Commit the planes state. */
+ for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
+ struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
- if (rcrtc->dpms == mode)
- return;
+ if (plane->plane.state->crtc != &rcrtc->crtc)
+ continue;
- if (mode == DRM_MODE_DPMS_ON) {
- rcar_du_crtc_get(rcrtc);
- rcar_du_crtc_start(rcrtc);
- } else {
- rcar_du_crtc_stop(rcrtc);
- rcar_du_crtc_put(rcrtc);
+ rcar_du_plane_setup(plane);
}
- rcrtc->dpms = mode;
+ rcar_du_crtc_update_planes(rcrtc);
}
-static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* TODO Fixup modes */
- return true;
-}
+/* -----------------------------------------------------------------------------
+ * CRTC Functions
+ */
-static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
+static void rcar_du_crtc_enable(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- /* We need to access the hardware during mode set, acquire a reference
- * to the CRTC.
- */
- rcar_du_crtc_get(rcrtc);
+ if (rcrtc->enabled)
+ return;
- /* Stop the CRTC and release the plane. Force the DPMS mode to off as a
- * result.
- */
- rcar_du_crtc_stop(rcrtc);
- rcar_du_plane_release(rcrtc->plane);
+ rcar_du_crtc_get(rcrtc);
+ rcar_du_crtc_start(rcrtc);
- rcrtc->dpms = DRM_MODE_DPMS_OFF;
+ rcrtc->enabled = true;
}
-static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void rcar_du_crtc_disable(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
- const struct rcar_du_format_info *format;
- int ret;
-
- format = rcar_du_format_info(crtc->primary->fb->pixel_format);
- if (format == NULL) {
- dev_dbg(rcdu->dev, "mode_set: unsupported format %08x\n",
- crtc->primary->fb->pixel_format);
- ret = -EINVAL;
- goto error;
- }
- ret = rcar_du_plane_reserve(rcrtc->plane, format);
- if (ret < 0)
- goto error;
-
- rcrtc->plane->format = format;
-
- rcrtc->plane->src_x = x;
- rcrtc->plane->src_y = y;
- rcrtc->plane->width = mode->hdisplay;
- rcrtc->plane->height = mode->vdisplay;
+ if (!rcrtc->enabled)
+ return;
- rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
+ rcar_du_crtc_stop(rcrtc);
+ rcar_du_crtc_put(rcrtc);
+ rcrtc->enabled = false;
rcrtc->outputs = 0;
-
- return 0;
-
-error:
- /* There's no rollback/abort operation to clean up in case of error. We
- * thus need to release the reference to the CRTC acquired in prepare()
- * here.
- */
- rcar_du_crtc_put(rcrtc);
- return ret;
}
-static void rcar_du_crtc_mode_commit(struct drm_crtc *crtc)
+static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
-
- /* We're done, restart the CRTC and set the DPMS mode to on. The
- * reference to the DU acquired at prepare() time will thus be released
- * by the DPMS handler (possibly called by the disable() handler).
- */
- rcar_du_crtc_start(rcrtc);
- rcrtc->dpms = DRM_MODE_DPMS_ON;
+ /* TODO Fixup modes */
+ return true;
}
-static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
{
+ struct drm_pending_vblank_event *event = crtc->state->event;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
- rcrtc->plane->src_x = x;
- rcrtc->plane->src_y = y;
-
- rcar_du_crtc_update_base(rcrtc);
+ if (event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- return 0;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ rcrtc->event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
}
-static void rcar_du_crtc_disable(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- rcar_du_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- rcar_du_plane_release(rcrtc->plane);
+ rcar_du_crtc_update_planes(rcrtc);
}
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
- .dpms = rcar_du_crtc_dpms,
.mode_fixup = rcar_du_crtc_mode_fixup,
- .prepare = rcar_du_crtc_mode_prepare,
- .commit = rcar_du_crtc_mode_commit,
- .mode_set = rcar_du_crtc_mode_set,
- .mode_set_base = rcar_du_crtc_mode_set_base,
.disable = rcar_du_crtc_disable,
+ .enable = rcar_du_crtc_enable,
+ .atomic_begin = rcar_du_crtc_atomic_begin,
+ .atomic_flush = rcar_du_crtc_atomic_flush,
};
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
- struct drm_file *file)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = rcrtc->crtc.dev;
- unsigned long flags;
-
- /* Destroy the pending vertical blanking event associated with the
- * pending page flip, if any, and disable vertical blanking interrupts.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- event = rcrtc->event;
- if (event && event->base.file_priv == file) {
- rcrtc->event = NULL;
- event->base.destroy(&event->base);
- drm_vblank_put(dev, rcrtc->index);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = rcrtc->crtc.dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- event = rcrtc->event;
- rcrtc->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- if (event == NULL)
- return;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_send_vblank_event(dev, rcrtc->index, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+static const struct drm_crtc_funcs crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
- drm_vblank_put(dev, rcrtc->index);
-}
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
@@ -544,41 +540,9 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
return ret;
}
-static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- struct drm_device *dev = rcrtc->crtc.dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (rcrtc->event != NULL) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return -EBUSY;
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- crtc->primary->fb = fb;
- rcar_du_crtc_update_base(rcrtc);
-
- if (event) {
- event->pipe = rcrtc->index;
- drm_vblank_get(dev, rcrtc->index);
- spin_lock_irqsave(&dev->event_lock, flags);
- rcrtc->event = event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
- return 0;
-}
-
-static const struct drm_crtc_funcs crtc_funcs = {
- .destroy = drm_crtc_cleanup,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = rcar_du_crtc_page_flip,
-};
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
{
@@ -620,20 +584,24 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
return -EPROBE_DEFER;
}
+ init_waitqueue_head(&rcrtc->flip_wait);
+
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[index];
rcrtc->index = index;
- rcrtc->dpms = DRM_MODE_DPMS_OFF;
- rcrtc->plane = &rgrp->planes.planes[index % 2];
-
- rcrtc->plane->crtc = crtc;
+ rcrtc->enabled = false;
- ret = drm_crtc_init(rcdu->ddev, crtc, &crtc_funcs);
+ ret = drm_crtc_init_with_planes(rcdu->ddev, crtc,
+ &rgrp->planes.planes[index % 2].plane,
+ NULL, &crtc_funcs);
if (ret < 0)
return ret;
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+ /* Start with vertical blanking interrupt reporting disabled. */
+ drm_crtc_vblank_off(crtc);
+
/* Register the interrupt handler. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
irq = platform_get_irq(pdev, index);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index d2f89f7d2e5e..5d9aa9b33769 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,12 +15,12 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
+#include <linux/wait.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
struct rcar_du_group;
-struct rcar_du_plane;
struct rcar_du_crtc {
struct drm_crtc crtc;
@@ -32,11 +32,12 @@ struct rcar_du_crtc {
bool started;
struct drm_pending_vblank_event *event;
+ wait_queue_head_t flip_wait;
+
unsigned int outputs;
- int dpms;
+ bool enabled;
struct rcar_du_group *group;
- struct rcar_du_plane *plane;
};
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
@@ -59,6 +60,5 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_route_output(struct drm_crtc *crtc,
enum rcar_du_output output);
-void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index e0d74f821416..da1216a73969 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/wait.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -163,6 +164,8 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
return -ENOMEM;
}
+ init_waitqueue_head(&rcdu->commit.wait);
+
rcdu->dev = &pdev->dev;
rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data
: (void *)platform_get_device_id(pdev)->driver_data;
@@ -175,17 +178,19 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
if (IS_ERR(rcdu->mmio))
return PTR_ERR(rcdu->mmio);
- /* DRM/KMS objects */
- ret = rcar_du_modeset_init(rcdu);
+ /* Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize DRM/KMS\n");
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
goto done;
}
- /* vblank handling */
- ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
+ /* DRM/KMS objects */
+ ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize vblank\n");
+ dev_err(&pdev->dev, "failed to initialize DRM/KMS\n");
goto done;
}
@@ -247,7 +252,8 @@ static const struct file_operations rcar_du_fops = {
};
static struct drm_driver rcar_du_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
+ | DRIVER_ATOMIC,
.load = rcar_du_load,
.unload = rcar_du_unload,
.preclose = rcar_du_preclose,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index c5b9ea6a7eaa..c7c538dd2e68 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,6 +15,7 @@
#define __RCAR_DU_DRV_H__
#include <linux/kernel.h>
+#include <linux/wait.h>
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
@@ -64,6 +65,10 @@ struct rcar_du_device_info {
unsigned int num_lvds;
};
+#define RCAR_DU_MAX_CRTCS 3
+#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
+#define RCAR_DU_MAX_LVDS 2
+
struct rcar_du_device {
struct device *dev;
const struct rcar_du_device_info *info;
@@ -73,13 +78,18 @@ struct rcar_du_device {
struct drm_device *ddev;
struct drm_fbdev_cma *fbdev;
- struct rcar_du_crtc crtcs[3];
+ struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs;
- struct rcar_du_group groups[2];
+ struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
unsigned int dpad0_source;
- struct rcar_du_lvdsenc *lvds[2];
+ struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS];
+
+ struct {
+ wait_queue_head_t wait;
+ u32 pending;
+ } commit;
};
static inline bool rcar_du_has(struct rcar_du_device *rcdu,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 279167f783f6..d0ae1e8009c6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -42,46 +42,40 @@ rcar_du_connector_best_encoder(struct drm_connector *connector)
* Encoder
*/
-static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void rcar_du_encoder_disable(struct drm_encoder *encoder)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ if (renc->lvds)
+ rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, false);
+}
+
+static void rcar_du_encoder_enable(struct drm_encoder *encoder)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
if (renc->lvds)
- rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
+ rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, true);
}
-static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ const struct drm_display_mode *mode = &crtc_state->mode;
const struct drm_display_mode *panel_mode;
+ struct drm_connector *connector = conn_state->connector;
struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
- bool found = false;
/* DAC encoders have currently no restriction on the mode. */
if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
- return true;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- dev_dbg(dev->dev, "mode_fixup: no connector found\n");
- return false;
- }
+ return 0;
if (list_empty(&connector->modes)) {
- dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
- return false;
+ dev_dbg(dev->dev, "encoder: empty modes list\n");
+ return -EINVAL;
}
panel_mode = list_first_entry(&connector->modes,
@@ -90,7 +84,7 @@ static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
/* We're not allowed to modify the resolution. */
if (mode->hdisplay != panel_mode->hdisplay ||
mode->vdisplay != panel_mode->vdisplay)
- return false;
+ return -EINVAL;
/* The flat panel mode is fixed, just copy it to the adjusted mode. */
drm_mode_copy(adjusted_mode, panel_mode);
@@ -102,25 +96,7 @@ static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->clock = clamp(adjusted_mode->clock,
30000, 150000);
- return true;
-}
-
-static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
-{
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
- if (renc->lvds)
- rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
- DRM_MODE_DPMS_OFF);
-}
-
-static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
-{
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
- if (renc->lvds)
- rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
- DRM_MODE_DPMS_ON);
+ return 0;
}
static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
@@ -133,11 +109,10 @@ static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = rcar_du_encoder_dpms,
- .mode_fixup = rcar_du_encoder_mode_fixup,
- .prepare = rcar_du_encoder_mode_prepare,
- .commit = rcar_du_encoder_mode_commit,
.mode_set = rcar_du_encoder_mode_set,
+ .disable = rcar_du_encoder_disable,
+ .enable = rcar_du_encoder_enable,
+ .atomic_check = rcar_du_encoder_atomic_check,
};
static const struct drm_encoder_funcs encoder_funcs = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 0c38cdcda4ca..ed36433fbe84 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -14,6 +14,8 @@
#ifndef __RCAR_DU_GROUP_H__
#define __RCAR_DU_GROUP_H__
+#include <linux/mutex.h>
+
#include "rcar_du_plane.h"
struct rcar_du_device;
@@ -25,6 +27,7 @@ struct rcar_du_device;
* @index: group index
* @use_count: number of users of the group (rcar_du_group_(get|put))
* @used_crtcs: number of CRTCs currently in use
+ * @lock: protects the DPTSR register
* @planes: planes handled by the group
*/
struct rcar_du_group {
@@ -35,6 +38,8 @@ struct rcar_du_group {
unsigned int use_count;
unsigned int used_crtcs;
+ struct mutex lock;
+
struct rcar_du_planes planes;
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index ca94b029ac80..96f2eb43713c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -12,6 +12,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
@@ -74,10 +75,13 @@ rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = rcar_du_hdmi_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
@@ -108,7 +112,7 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
@@ -116,7 +120,6 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- connector->encoder = encoder;
rcon->encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 221f0a17fd6a..81da8419282b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -26,41 +26,50 @@
struct rcar_du_hdmienc {
struct rcar_du_encoder *renc;
struct device *dev;
- int dpms;
+ bool enabled;
};
#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
-static void rcar_du_hdmienc_dpms(struct drm_encoder *encoder, int mode)
+static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ if (sfuncs->dpms)
+ sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
- if (hdmienc->dpms == mode)
- return;
+ if (hdmienc->renc->lvds)
+ rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
+ false);
- if (mode == DRM_MODE_DPMS_ON && hdmienc->renc->lvds)
- rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode);
+ hdmienc->enabled = false;
+}
- if (sfuncs->dpms)
- sfuncs->dpms(encoder, mode);
+static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
+{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (hdmienc->renc->lvds)
+ rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
+ true);
- if (mode != DRM_MODE_DPMS_ON && hdmienc->renc->lvds)
- rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode);
+ if (sfuncs->dpms)
+ sfuncs->dpms(encoder, DRM_MODE_DPMS_ON);
- hdmienc->dpms = mode;
+ hdmienc->enabled = true;
}
-static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ const struct drm_display_mode *mode = &crtc_state->mode;
/* The internal LVDS encoder has a clock frequency operating range of
* 30MHz to 150MHz. Clamp the clock accordingly.
@@ -70,19 +79,9 @@ static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
30000, 150000);
if (sfuncs->mode_fixup == NULL)
- return true;
-
- return sfuncs->mode_fixup(encoder, mode, adjusted_mode);
-}
+ return 0;
-static void rcar_du_hdmienc_mode_prepare(struct drm_encoder *encoder)
-{
- rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void rcar_du_hdmienc_mode_commit(struct drm_encoder *encoder)
-{
- rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_ON);
+ return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL;
}
static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
@@ -99,18 +98,18 @@ static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = rcar_du_hdmienc_dpms,
- .mode_fixup = rcar_du_hdmienc_mode_fixup,
- .prepare = rcar_du_hdmienc_mode_prepare,
- .commit = rcar_du_hdmienc_mode_commit,
.mode_set = rcar_du_hdmienc_mode_set,
+ .disable = rcar_du_hdmienc_disable,
+ .enable = rcar_du_hdmienc_enable,
+ .atomic_check = rcar_du_hdmienc_atomic_check,
};
static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
+ if (hdmienc->enabled)
+ rcar_du_hdmienc_disable(encoder);
drm_encoder_cleanup(encoder);
put_device(hdmienc->dev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index cc9136e8ee9c..93117f159a3b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -12,12 +12,15 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <linux/of_graph.h>
+#include <linux/wait.h>
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
@@ -185,9 +188,309 @@ static void rcar_du_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(rcdu->fbdev);
}
+/* -----------------------------------------------------------------------------
+ * Atomic Check and Update
+ */
+
+/*
+ * Atomic hardware plane allocator
+ *
+ * The hardware plane allocator is solely based on the atomic plane states
+ * without keeping any external state to avoid races between .atomic_check()
+ * and .atomic_commit().
+ *
+ * The core idea is to avoid using a free planes bitmask that would need to be
+ * shared between check and commit handlers with a collective knowledge based on
+ * the allocated hardware plane(s) for each KMS plane. The allocator then loops
+ * over all plane states to compute the free planes bitmask, allocates hardware
+ * planes based on that bitmask, and stores the result back in the plane states.
+ *
+ * For this to work we need to access the current state of planes not touched by
+ * the atomic update. To ensure that it won't be modified, we need to lock all
+ * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
+ * updates from .atomic_check() up to completion (when swapping the states if
+ * the check step has succeeded) or rollback (when freeing the states if the
+ * check step has failed).
+ *
+ * Allocation is performed in the .atomic_check() handler and applied
+ * automatically when the core swaps the old and new states.
+ */
+
+static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
+ struct rcar_du_plane_state *state)
+{
+ const struct rcar_du_format_info *cur_format;
+
+ cur_format = to_rcar_du_plane_state(plane->plane.state)->format;
+
+ /* Lowering the number of planes doesn't strictly require reallocation
+ * as the extra hardware plane will be freed when committing, but doing
+ * so could lead to more fragmentation.
+ */
+ return !cur_format || cur_format->planes != state->format->planes;
+}
+
+static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
+{
+ unsigned int mask;
+
+ if (state->hwindex == -1)
+ return 0;
+
+ mask = 1 << state->hwindex;
+ if (state->format->planes == 2)
+ mask |= 1 << ((state->hwindex + 1) % 8);
+
+ return mask;
+}
+
+static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free)
+{
+ unsigned int i;
+
+ for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) {
+ if (!(free & (1 << i)))
+ continue;
+
+ if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
+ break;
+ }
+
+ return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i;
+}
+
+static int rcar_du_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct rcar_du_device *rcdu = dev->dev_private;
+ unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
+ unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
+ bool needs_realloc = false;
+ unsigned int groups = 0;
+ unsigned int i;
+ int ret;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret < 0)
+ return ret;
+
+ /* Check if hardware planes need to be reallocated. */
+ for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ struct rcar_du_plane_state *plane_state;
+ struct rcar_du_plane *plane;
+ unsigned int index;
+
+ if (!state->planes[i])
+ continue;
+
+ plane = to_rcar_plane(state->planes[i]);
+ plane_state = to_rcar_du_plane_state(state->plane_states[i]);
+
+ /* If the plane is being disabled we don't need to go through
+ * the full reallocation procedure. Just mark the hardware
+ * plane(s) as freed.
+ */
+ if (!plane_state->format) {
+ index = plane - plane->group->planes.planes;
+ group_freed_planes[plane->group->index] |= 1 << index;
+ plane_state->hwindex = -1;
+ continue;
+ }
+
+ /* If the plane needs to be reallocated mark it as such, and
+ * mark the hardware plane(s) as free.
+ */
+ if (rcar_du_plane_needs_realloc(plane, plane_state)) {
+ groups |= 1 << plane->group->index;
+ needs_realloc = true;
+
+ index = plane - plane->group->planes.planes;
+ group_freed_planes[plane->group->index] |= 1 << index;
+ plane_state->hwindex = -1;
+ }
+ }
+
+ if (!needs_realloc)
+ return 0;
+
+ /* Grab all plane states for the groups that need reallocation to ensure
+ * locking and avoid racy updates. This serializes the update operation,
+ * but there's not much we can do about it as that's the hardware
+ * design.
+ *
+ * Compute the used planes mask for each group at the same time to avoid
+ * looping over the planes separately later.
+ */
+ while (groups) {
+ unsigned int index = ffs(groups) - 1;
+ struct rcar_du_group *group = &rcdu->groups[index];
+ unsigned int used_planes = 0;
+
+ for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
+ struct rcar_du_plane *plane = &group->planes.planes[i];
+ struct rcar_du_plane_state *plane_state;
+ struct drm_plane_state *s;
+
+ s = drm_atomic_get_plane_state(state, &plane->plane);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ /* If the plane has been freed in the above loop its
+ * hardware planes must not be added to the used planes
+ * bitmask. However, the current state doesn't reflect
+ * the free state yet, as we've modified the new state
+ * above. Use the local freed planes list to check for
+ * that condition instead.
+ */
+ if (group_freed_planes[index] & (1 << i))
+ continue;
+
+ plane_state = to_rcar_du_plane_state(plane->plane.state);
+ used_planes |= rcar_du_plane_hwmask(plane_state);
+ }
+
+ group_free_planes[index] = 0xff & ~used_planes;
+ groups &= ~(1 << index);
+ }
+
+ /* Reallocate hardware planes for each plane that needs it. */
+ for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ struct rcar_du_plane_state *plane_state;
+ struct rcar_du_plane *plane;
+ int idx;
+
+ if (!state->planes[i])
+ continue;
+
+ plane = to_rcar_plane(state->planes[i]);
+ plane_state = to_rcar_du_plane_state(state->plane_states[i]);
+
+ /* Skip planes that are being disabled or don't need to be
+ * reallocated.
+ */
+ if (!plane_state->format ||
+ !rcar_du_plane_needs_realloc(plane, plane_state))
+ continue;
+
+ idx = rcar_du_plane_hwalloc(plane_state->format->planes,
+ group_free_planes[plane->group->index]);
+ if (idx < 0) {
+ dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
+ __func__);
+ return idx;
+ }
+
+ plane_state->hwindex = idx;
+
+ group_free_planes[plane->group->index] &=
+ ~rcar_du_plane_hwmask(plane_state);
+ }
+
+ return 0;
+}
+
+struct rcar_du_commit {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_atomic_state *state;
+ u32 crtcs;
+};
+
+static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
+{
+ struct drm_device *dev = commit->dev;
+ struct rcar_du_device *rcdu = dev->dev_private;
+ struct drm_atomic_state *old_state = commit->state;
+
+ /* Apply the atomic update. */
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_commit_planes(dev, old_state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, old_state);
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
+
+ drm_atomic_state_free(old_state);
+
+ /* Complete the commit, wake up any waiter. */
+ spin_lock(&rcdu->commit.wait.lock);
+ rcdu->commit.pending &= ~commit->crtcs;
+ wake_up_all_locked(&rcdu->commit.wait);
+ spin_unlock(&rcdu->commit.wait.lock);
+
+ kfree(commit);
+}
+
+static void rcar_du_atomic_work(struct work_struct *work)
+{
+ struct rcar_du_commit *commit =
+ container_of(work, struct rcar_du_commit, work);
+
+ rcar_du_atomic_complete(commit);
+}
+
+static int rcar_du_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool async)
+{
+ struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_commit *commit;
+ unsigned int i;
+ int ret;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /* Allocate the commit object. */
+ commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+ if (commit == NULL)
+ return -ENOMEM;
+
+ INIT_WORK(&commit->work, rcar_du_atomic_work);
+ commit->dev = dev;
+ commit->state = state;
+
+ /* Wait until all affected CRTCs have completed previous commits and
+ * mark them as pending.
+ */
+ for (i = 0; i < dev->mode_config.num_crtc; ++i) {
+ if (state->crtcs[i])
+ commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
+ }
+
+ spin_lock(&rcdu->commit.wait.lock);
+ ret = wait_event_interruptible_locked(rcdu->commit.wait,
+ !(rcdu->commit.pending & commit->crtcs));
+ if (ret == 0)
+ rcdu->commit.pending |= commit->crtcs;
+ spin_unlock(&rcdu->commit.wait.lock);
+
+ if (ret) {
+ kfree(commit);
+ return ret;
+ }
+
+ /* Swap the state, this is the point of no return. */
+ drm_atomic_helper_swap_state(dev, state);
+
+ if (async)
+ schedule_work(&commit->work);
+ else
+ rcar_du_atomic_complete(commit);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
.output_poll_changed = rcar_du_output_poll_changed,
+ .atomic_check = rcar_du_atomic_check,
+ .atomic_commit = rcar_du_atomic_commit,
};
static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
@@ -206,7 +509,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
struct device_node *connector = NULL;
struct device_node *encoder = NULL;
- struct device_node *prev = NULL;
+ struct device_node *ep_node = NULL;
struct device_node *entity_ep_node;
struct device_node *entity;
int ret;
@@ -224,16 +527,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
- while (1) {
- struct device_node *ep_node;
-
- ep_node = of_graph_get_next_endpoint(entity, prev);
- of_node_put(prev);
- prev = ep_node;
-
- if (!ep_node)
- break;
-
+ for_each_endpoint_of_node(entity, ep_node) {
if (ep_node == entity_ep_node)
continue;
@@ -300,27 +594,19 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
{
struct device_node *np = rcdu->dev->of_node;
- struct device_node *prev = NULL;
+ struct device_node *ep_node;
unsigned int num_encoders = 0;
/*
* Iterate over the endpoints and create one encoder for each output
* pipeline.
*/
- while (1) {
- struct device_node *ep_node;
+ for_each_endpoint_of_node(np, ep_node) {
enum rcar_du_output output;
struct of_endpoint ep;
unsigned int i;
int ret;
- ep_node = of_graph_get_next_endpoint(np, prev);
- of_node_put(prev);
- prev = ep_node;
-
- if (ep_node == NULL)
- break;
-
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret < 0) {
of_node_put(ep_node);
@@ -392,6 +678,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
for (i = 0; i < num_groups; ++i) {
struct rcar_du_group *rgrp = &rcdu->groups[i];
+ mutex_init(&rgrp->lock);
+
rgrp->dev = rcdu;
rgrp->mmio_offset = mmio_offsets[i];
rgrp->index = i;
@@ -439,27 +727,21 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
encoder->possible_clones = (1 << num_encoders) - 1;
}
- /* Now that the CRTCs have been initialized register the planes. */
- for (i = 0; i < num_groups; ++i) {
- ret = rcar_du_planes_register(&rcdu->groups[i]);
- if (ret < 0)
- return ret;
- }
+ drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- drm_helper_disable_unused_functions(dev);
+ if (dev->mode_config.num_connector) {
+ fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+ if (IS_ERR(fbdev))
+ return PTR_ERR(fbdev);
- fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
- if (IS_ERR(fbdev))
- return PTR_ERR(fbdev);
-
-#ifndef CONFIG_FRAMEBUFFER_CONSOLE
- drm_fbdev_cma_restore_mode(fbdev);
-#endif
-
- rcdu->fbdev = fbdev;
+ rcdu->fbdev = fbdev;
+ } else {
+ dev_info(rcdu->dev,
+ "no connector found, disabling fbdev emulation\n");
+ }
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 6d9811c052c4..0c43032fc693 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -12,6 +12,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -74,10 +75,13 @@ rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = rcar_du_lvds_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
@@ -117,7 +121,7 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
@@ -125,7 +129,6 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- connector->encoder = encoder;
lvdscon->connector.encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index 7cfb48ce1791..85043c5bad03 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -28,7 +28,7 @@ struct rcar_du_lvdsenc {
unsigned int index;
void __iomem *mmio;
struct clk *clock;
- int dpms;
+ bool enabled;
enum rcar_lvds_input input;
};
@@ -48,7 +48,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
u32 pllcr;
int ret;
- if (lvds->dpms == DRM_MODE_DPMS_ON)
+ if (lvds->enabled)
return 0;
ret = clk_prepare_enable(lvds->clock);
@@ -110,13 +110,13 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
lvdcr0 |= LVDCR0_LVRES;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
- lvds->dpms = DRM_MODE_DPMS_ON;
+ lvds->enabled = true;
return 0;
}
static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
{
- if (lvds->dpms == DRM_MODE_DPMS_OFF)
+ if (!lvds->enabled)
return;
rcar_lvds_write(lvds, LVDCR0, 0);
@@ -124,13 +124,13 @@ static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
clk_disable_unprepare(lvds->clock);
- lvds->dpms = DRM_MODE_DPMS_OFF;
+ lvds->enabled = false;
}
-int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
- struct drm_crtc *crtc, int mode)
+int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc,
+ bool enable)
{
- if (mode == DRM_MODE_DPMS_OFF) {
+ if (!enable) {
rcar_du_lvdsenc_stop(lvds);
return 0;
} else if (crtc) {
@@ -179,7 +179,7 @@ int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
lvds->dev = rcdu;
lvds->index = i;
lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
- lvds->dpms = DRM_MODE_DPMS_OFF;
+ lvds->enabled = false;
ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
if (ret < 0)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index f65aabda0796..9a6001c07303 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -28,15 +28,15 @@ enum rcar_lvds_input {
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
-int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
- struct drm_crtc *crtc, int mode);
+int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, bool enable);
#else
static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
{
return 0;
}
-static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
- struct drm_crtc *crtc, int mode)
+static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, bool enable)
{
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 50f2f2b20d39..210e5c3fd982 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -12,10 +12,12 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
@@ -26,16 +28,6 @@
#define RCAR_DU_COLORKEY_SOURCE (1 << 24)
#define RCAR_DU_COLORKEY_MASK (1 << 24)
-struct rcar_du_kms_plane {
- struct drm_plane plane;
- struct rcar_du_plane *hwplane;
-};
-
-static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
-{
- return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
-}
-
static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
unsigned int index, u32 reg)
{
@@ -50,74 +42,31 @@ static void rcar_du_plane_write(struct rcar_du_group *rgrp,
data);
}
-int rcar_du_plane_reserve(struct rcar_du_plane *plane,
- const struct rcar_du_format_info *format)
-{
- struct rcar_du_group *rgrp = plane->group;
- unsigned int i;
- int ret = -EBUSY;
-
- mutex_lock(&rgrp->planes.lock);
-
- for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) {
- if (!(rgrp->planes.free & (1 << i)))
- continue;
-
- if (format->planes == 1 ||
- rgrp->planes.free & (1 << ((i + 1) % 8)))
- break;
- }
-
- if (i == ARRAY_SIZE(rgrp->planes.planes))
- goto done;
-
- rgrp->planes.free &= ~(1 << i);
- if (format->planes == 2)
- rgrp->planes.free &= ~(1 << ((i + 1) % 8));
-
- plane->hwindex = i;
-
- ret = 0;
-
-done:
- mutex_unlock(&rgrp->planes.lock);
- return ret;
-}
-
-void rcar_du_plane_release(struct rcar_du_plane *plane)
-{
- struct rcar_du_group *rgrp = plane->group;
-
- if (plane->hwindex == -1)
- return;
-
- mutex_lock(&rgrp->planes.lock);
- rgrp->planes.free |= 1 << plane->hwindex;
- if (plane->format->planes == 2)
- rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8);
- mutex_unlock(&rgrp->planes.lock);
-
- plane->hwindex = -1;
-}
-
-void rcar_du_plane_update_base(struct rcar_du_plane *plane)
+static void rcar_du_plane_setup_fb(struct rcar_du_plane *plane)
{
+ struct rcar_du_plane_state *state =
+ to_rcar_du_plane_state(plane->plane.state);
+ struct drm_framebuffer *fb = plane->plane.state->fb;
struct rcar_du_group *rgrp = plane->group;
- unsigned int index = plane->hwindex;
+ unsigned int src_x = state->state.src_x >> 16;
+ unsigned int src_y = state->state.src_y >> 16;
+ unsigned int index = state->hwindex;
+ struct drm_gem_cma_object *gem;
bool interlaced;
u32 mwr;
- interlaced = plane->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
+ interlaced = state->state.crtc->state->adjusted_mode.flags
+ & DRM_MODE_FLAG_INTERLACE;
/* Memory pitch (expressed in pixels). Must be doubled for interlaced
* operation with 32bpp formats.
*/
- if (plane->format->planes == 2)
- mwr = plane->pitch;
+ if (state->format->planes == 2)
+ mwr = fb->pitches[0];
else
- mwr = plane->pitch * 8 / plane->format->bpp;
+ mwr = fb->pitches[0] * 8 / state->format->bpp;
- if (interlaced && plane->format->bpp == 32)
+ if (interlaced && state->format->bpp == 32)
mwr *= 2;
rcar_du_plane_write(rgrp, index, PnMWR, mwr);
@@ -134,42 +83,33 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
* require a halved Y position value, in both progressive and interlaced
* modes.
*/
- rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
- rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
- (!interlaced && plane->format->bpp == 32 ? 2 : 1));
- rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
+ rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
+ rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
+ (!interlaced && state->format->bpp == 32 ? 2 : 1));
- if (plane->format->planes == 2) {
- index = (index + 1) % 8;
-
- rcar_du_plane_write(rgrp, index, PnMWR, plane->pitch);
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ rcar_du_plane_write(rgrp, index, PnDSA0R, gem->paddr + fb->offsets[0]);
- rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
- rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
- (plane->format->bpp == 16 ? 2 : 1) / 2);
- rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]);
- }
-}
+ if (state->format->planes == 2) {
+ index = (index + 1) % 8;
-void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
- struct drm_framebuffer *fb)
-{
- struct drm_gem_cma_object *gem;
+ rcar_du_plane_write(rgrp, index, PnMWR, fb->pitches[0]);
- plane->pitch = fb->pitches[0];
+ rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
+ rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
+ (state->format->bpp == 16 ? 2 : 1) / 2);
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- plane->dma[0] = gem->paddr + fb->offsets[0];
-
- if (plane->format->planes == 2) {
gem = drm_fb_cma_get_gem_obj(fb, 1);
- plane->dma[1] = gem->paddr + fb->offsets[1];
+ rcar_du_plane_write(rgrp, index, PnDSA0R,
+ gem->paddr + fb->offsets[1]);
}
}
static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
unsigned int index)
{
+ struct rcar_du_plane_state *state =
+ to_rcar_du_plane_state(plane->plane.state);
struct rcar_du_group *rgrp = plane->group;
u32 colorkey;
u32 pnmr;
@@ -183,47 +123,47 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
* For XRGB, set the alpha value to the plane-wide alpha value and
* enable alpha-blending regardless of the X bit value.
*/
- if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
+ if (state->format->fourcc != DRM_FORMAT_XRGB1555)
rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
else
rcar_du_plane_write(rgrp, index, PnALPHAR,
- PnALPHAR_ABIT_X | plane->alpha);
+ PnALPHAR_ABIT_X | state->alpha);
- pnmr = PnMR_BM_MD | plane->format->pnmr;
+ pnmr = PnMR_BM_MD | state->format->pnmr;
/* Disable color keying when requested. YUV formats have the
* PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying
* automatically.
*/
- if ((plane->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE)
+ if ((state->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE)
pnmr |= PnMR_SPIM_TP_OFF;
/* For packed YUV formats we need to select the U/V order. */
- if (plane->format->fourcc == DRM_FORMAT_YUYV)
+ if (state->format->fourcc == DRM_FORMAT_YUYV)
pnmr |= PnMR_YCDF_YUYV;
rcar_du_plane_write(rgrp, index, PnMR, pnmr);
- switch (plane->format->fourcc) {
+ switch (state->format->fourcc) {
case DRM_FORMAT_RGB565:
- colorkey = ((plane->colorkey & 0xf80000) >> 8)
- | ((plane->colorkey & 0x00fc00) >> 5)
- | ((plane->colorkey & 0x0000f8) >> 3);
+ colorkey = ((state->colorkey & 0xf80000) >> 8)
+ | ((state->colorkey & 0x00fc00) >> 5)
+ | ((state->colorkey & 0x0000f8) >> 3);
rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
- colorkey = ((plane->colorkey & 0xf80000) >> 9)
- | ((plane->colorkey & 0x00f800) >> 6)
- | ((plane->colorkey & 0x0000f8) >> 3);
+ colorkey = ((state->colorkey & 0xf80000) >> 9)
+ | ((state->colorkey & 0x00f800) >> 6)
+ | ((state->colorkey & 0x0000f8) >> 3);
rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
rcar_du_plane_write(rgrp, index, PnTC3R,
- PnTC3R_CODE | (plane->colorkey & 0xffffff));
+ PnTC3R_CODE | (state->colorkey & 0xffffff));
break;
}
}
@@ -231,6 +171,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
unsigned int index)
{
+ struct rcar_du_plane_state *state =
+ to_rcar_du_plane_state(plane->plane.state);
struct rcar_du_group *rgrp = plane->group;
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
@@ -242,17 +184,17 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
*/
ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
ddcr4 &= ~PnDDCR4_EDF_MASK;
- ddcr4 |= plane->format->edf | PnDDCR4_CODE;
+ ddcr4 |= state->format->edf | PnDDCR4_CODE;
rcar_du_plane_setup_mode(plane, index);
- if (plane->format->planes == 2) {
- if (plane->hwindex != index) {
- if (plane->format->fourcc == DRM_FORMAT_NV12 ||
- plane->format->fourcc == DRM_FORMAT_NV21)
+ if (state->format->planes == 2) {
+ if (state->hwindex != index) {
+ if (state->format->fourcc == DRM_FORMAT_NV12 ||
+ state->format->fourcc == DRM_FORMAT_NV21)
ddcr2 |= PnDDCR2_Y420;
- if (plane->format->fourcc == DRM_FORMAT_NV21)
+ if (state->format->fourcc == DRM_FORMAT_NV21)
ddcr2 |= PnDDCR2_NV21;
ddcr2 |= PnDDCR2_DIVU;
@@ -265,10 +207,10 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
/* Destination position and size */
- rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
- rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
- rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x);
- rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y);
+ rcar_du_plane_write(rgrp, index, PnDSXR, plane->plane.state->crtc_w);
+ rcar_du_plane_write(rgrp, index, PnDSYR, plane->plane.state->crtc_h);
+ rcar_du_plane_write(rgrp, index, PnDPXR, plane->plane.state->crtc_x);
+ rcar_du_plane_write(rgrp, index, PnDPYR, plane->plane.state->crtc_y);
/* Wrap-around and blinking, disabled */
rcar_du_plane_write(rgrp, index, PnWASPR, 0);
@@ -279,150 +221,143 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
void rcar_du_plane_setup(struct rcar_du_plane *plane)
{
- __rcar_du_plane_setup(plane, plane->hwindex);
- if (plane->format->planes == 2)
- __rcar_du_plane_setup(plane, (plane->hwindex + 1) % 8);
+ struct rcar_du_plane_state *state =
+ to_rcar_du_plane_state(plane->plane.state);
+
+ __rcar_du_plane_setup(plane, state->hwindex);
+ if (state->format->planes == 2)
+ __rcar_du_plane_setup(plane, (state->hwindex + 1) % 8);
- rcar_du_plane_update_base(plane);
+ rcar_du_plane_setup_fb(plane);
}
-static int
-rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static int rcar_du_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
+ struct rcar_du_plane_state *rstate = to_rcar_du_plane_state(state);
struct rcar_du_plane *rplane = to_rcar_plane(plane);
struct rcar_du_device *rcdu = rplane->group->dev;
- const struct rcar_du_format_info *format;
- unsigned int nplanes;
- int ret;
- format = rcar_du_format_info(fb->pixel_format);
- if (format == NULL) {
- dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
- fb->pixel_format);
- return -EINVAL;
+ if (!state->fb || !state->crtc) {
+ rstate->format = NULL;
+ return 0;
}
- if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
+ if (state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h) {
dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__);
return -EINVAL;
}
- nplanes = rplane->format ? rplane->format->planes : 0;
-
- /* Reallocate hardware planes if the number of required planes has
- * changed.
- */
- if (format->planes != nplanes) {
- rcar_du_plane_release(rplane);
- ret = rcar_du_plane_reserve(rplane, format);
- if (ret < 0)
- return ret;
+ rstate->format = rcar_du_format_info(state->fb->pixel_format);
+ if (rstate->format == NULL) {
+ dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
+ state->fb->pixel_format);
+ return -EINVAL;
}
- rplane->crtc = crtc;
- rplane->format = format;
-
- rplane->src_x = src_x >> 16;
- rplane->src_y = src_y >> 16;
- rplane->dst_x = crtc_x;
- rplane->dst_y = crtc_y;
- rplane->width = crtc_w;
- rplane->height = crtc_h;
-
- rcar_du_plane_compute_base(rplane, fb);
- rcar_du_plane_setup(rplane);
-
- mutex_lock(&rplane->group->planes.lock);
- rplane->enabled = true;
- rcar_du_crtc_update_planes(rplane->crtc);
- mutex_unlock(&rplane->group->planes.lock);
-
return 0;
}
-static int rcar_du_plane_disable(struct drm_plane *plane)
+static void rcar_du_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct rcar_du_plane *rplane = to_rcar_plane(plane);
- if (!rplane->enabled)
- return 0;
+ if (plane->state->crtc)
+ rcar_du_plane_setup(rplane);
+}
- mutex_lock(&rplane->group->planes.lock);
- rplane->enabled = false;
- rcar_du_crtc_update_planes(rplane->crtc);
- mutex_unlock(&rplane->group->planes.lock);
+static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = {
+ .atomic_check = rcar_du_plane_atomic_check,
+ .atomic_update = rcar_du_plane_atomic_update,
+};
- rcar_du_plane_release(rplane);
+static void rcar_du_plane_reset(struct drm_plane *plane)
+{
+ struct rcar_du_plane_state *state;
- rplane->crtc = NULL;
- rplane->format = NULL;
+ if (plane->state && plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
- return 0;
-}
+ kfree(plane->state);
+ plane->state = NULL;
-/* Both the .set_property and the .update_plane operations are called with the
- * mode_config lock held. There is this no need to explicitly protect access to
- * the alpha and colorkey fields and the mode register.
- */
-static void rcar_du_plane_set_alpha(struct rcar_du_plane *plane, u32 alpha)
-{
- if (plane->alpha == alpha)
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
return;
- plane->alpha = alpha;
- if (!plane->enabled || plane->format->fourcc != DRM_FORMAT_XRGB1555)
- return;
+ state->hwindex = -1;
+ state->alpha = 255;
+ state->colorkey = RCAR_DU_COLORKEY_NONE;
+ state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
- rcar_du_plane_setup_mode(plane, plane->hwindex);
+ plane->state = &state->state;
+ plane->state->plane = plane;
}
-static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
- u32 colorkey)
+static struct drm_plane_state *
+rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane)
{
- if (plane->colorkey == colorkey)
- return;
+ struct rcar_du_plane_state *state;
+ struct rcar_du_plane_state *copy;
- plane->colorkey = colorkey;
- if (!plane->enabled)
- return;
+ state = to_rcar_du_plane_state(plane->state);
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (copy == NULL)
+ return NULL;
+
+ if (copy->state.fb)
+ drm_framebuffer_reference(copy->state.fb);
- rcar_du_plane_setup_mode(plane, plane->hwindex);
+ return &copy->state;
}
-static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
- unsigned int zpos)
+static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- mutex_lock(&plane->group->planes.lock);
- if (plane->zpos == zpos)
- goto done;
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
- plane->zpos = zpos;
- if (!plane->enabled)
- goto done;
+ kfree(to_rcar_du_plane_state(state));
+}
+
+static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct rcar_du_plane_state *rstate = to_rcar_du_plane_state(state);
+ struct rcar_du_plane *rplane = to_rcar_plane(plane);
+ struct rcar_du_group *rgrp = rplane->group;
- rcar_du_crtc_update_planes(plane->crtc);
+ if (property == rgrp->planes.alpha)
+ rstate->alpha = val;
+ else if (property == rgrp->planes.colorkey)
+ rstate->colorkey = val;
+ else if (property == rgrp->planes.zpos)
+ rstate->zpos = val;
+ else
+ return -EINVAL;
-done:
- mutex_unlock(&plane->group->planes.lock);
+ return 0;
}
-static int rcar_du_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t value)
+static int rcar_du_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state, struct drm_property *property,
+ uint64_t *val)
{
+ const struct rcar_du_plane_state *rstate =
+ container_of(state, const struct rcar_du_plane_state, state);
struct rcar_du_plane *rplane = to_rcar_plane(plane);
struct rcar_du_group *rgrp = rplane->group;
if (property == rgrp->planes.alpha)
- rcar_du_plane_set_alpha(rplane, value);
+ *val = rstate->alpha;
else if (property == rgrp->planes.colorkey)
- rcar_du_plane_set_colorkey(rplane, value);
+ *val = rstate->colorkey;
else if (property == rgrp->planes.zpos)
- rcar_du_plane_set_zpos(rplane, value);
+ *val = rstate->zpos;
else
return -EINVAL;
@@ -430,10 +365,15 @@ static int rcar_du_plane_set_property(struct drm_plane *plane,
}
static const struct drm_plane_funcs rcar_du_plane_funcs = {
- .update_plane = rcar_du_plane_update,
- .disable_plane = rcar_du_plane_disable,
- .set_property = rcar_du_plane_set_property,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = rcar_du_plane_reset,
+ .set_property = drm_atomic_helper_plane_set_property,
.destroy = drm_plane_cleanup,
+ .atomic_duplicate_state = rcar_du_plane_atomic_duplicate_state,
+ .atomic_destroy_state = rcar_du_plane_atomic_destroy_state,
+ .atomic_set_property = rcar_du_plane_atomic_set_property,
+ .atomic_get_property = rcar_du_plane_atomic_get_property,
};
static const uint32_t formats[] = {
@@ -453,10 +393,11 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
{
struct rcar_du_planes *planes = &rgrp->planes;
struct rcar_du_device *rcdu = rgrp->dev;
+ unsigned int num_planes;
+ unsigned int num_crtcs;
+ unsigned int crtcs;
unsigned int i;
-
- mutex_init(&planes->lock);
- planes->free = 0xff;
+ int ret;
planes->alpha =
drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
@@ -478,45 +419,34 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
if (planes->zpos == NULL)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) {
- struct rcar_du_plane *plane = &planes->planes[i];
-
- plane->group = rgrp;
- plane->hwindex = -1;
- plane->alpha = 255;
- plane->colorkey = RCAR_DU_COLORKEY_NONE;
- plane->zpos = 0;
- }
-
- return 0;
-}
-
-int rcar_du_planes_register(struct rcar_du_group *rgrp)
-{
- struct rcar_du_planes *planes = &rgrp->planes;
- struct rcar_du_device *rcdu = rgrp->dev;
- unsigned int crtcs;
- unsigned int i;
- int ret;
+ /* Create one primary plane per in this group CRTC and seven overlay
+ * planes.
+ */
+ num_crtcs = min(rcdu->num_crtcs - 2 * rgrp->index, 2U);
+ num_planes = num_crtcs + 7;
crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
- for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
- struct rcar_du_kms_plane *plane;
-
- plane = devm_kzalloc(rcdu->dev, sizeof(*plane), GFP_KERNEL);
- if (plane == NULL)
- return -ENOMEM;
+ for (i = 0; i < num_planes; ++i) {
+ enum drm_plane_type type = i < num_crtcs
+ ? DRM_PLANE_TYPE_PRIMARY
+ : DRM_PLANE_TYPE_OVERLAY;
+ struct rcar_du_plane *plane = &planes->planes[i];
- plane->hwplane = &planes->planes[i + 2];
- plane->hwplane->zpos = 1;
+ plane->group = rgrp;
- ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs,
- &rcar_du_plane_funcs, formats,
- ARRAY_SIZE(formats), false);
+ ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
+ &rcar_du_plane_funcs, formats,
+ ARRAY_SIZE(formats), type);
if (ret < 0)
return ret;
+ drm_plane_helper_add(&plane->plane,
+ &rcar_du_plane_helper_funcs);
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
drm_object_attach_property(&plane->plane.base,
planes->alpha, 255);
drm_object_attach_property(&plane->plane.base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 3021288b1a89..abff0ebeb195 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -14,68 +14,57 @@
#ifndef __RCAR_DU_PLANE_H__
#define __RCAR_DU_PLANE_H__
-#include <linux/mutex.h>
-
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
struct rcar_du_format_info;
struct rcar_du_group;
-/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
- * using KMS planes requires at least one of the CRTCs being enabled, no more
- * than 7 KMS planes can be available. We thus create 7 KMS planes and
- * 9 software planes (one for each KMS planes and one for each CRTC).
+/* The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
+ * As using overlay planes requires at least one of the CRTCs being enabled, no
+ * more than 7 overlay planes can be available. We thus create 1 primary plane
+ * per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.
*/
-
-#define RCAR_DU_NUM_KMS_PLANES 7
+#define RCAR_DU_NUM_KMS_PLANES 9
#define RCAR_DU_NUM_HW_PLANES 8
-#define RCAR_DU_NUM_SW_PLANES 9
struct rcar_du_plane {
+ struct drm_plane plane;
struct rcar_du_group *group;
- struct drm_crtc *crtc;
-
- bool enabled;
-
- int hwindex; /* 0-based, -1 means unused */
- unsigned int alpha;
- unsigned int colorkey;
- unsigned int zpos;
-
- const struct rcar_du_format_info *format;
-
- unsigned long dma[2];
- unsigned int pitch;
-
- unsigned int width;
- unsigned int height;
-
- unsigned int src_x;
- unsigned int src_y;
- unsigned int dst_x;
- unsigned int dst_y;
};
+static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct rcar_du_plane, plane);
+}
+
struct rcar_du_planes {
- struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
- unsigned int free;
- struct mutex lock;
+ struct rcar_du_plane planes[RCAR_DU_NUM_KMS_PLANES];
struct drm_property *alpha;
struct drm_property *colorkey;
struct drm_property *zpos;
};
+struct rcar_du_plane_state {
+ struct drm_plane_state state;
+
+ const struct rcar_du_format_info *format;
+ int hwindex; /* 0-based, -1 means unused */
+
+ unsigned int alpha;
+ unsigned int colorkey;
+ unsigned int zpos;
+};
+
+static inline struct rcar_du_plane_state *
+to_rcar_du_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct rcar_du_plane_state, state);
+}
+
int rcar_du_planes_init(struct rcar_du_group *rgrp);
-int rcar_du_planes_register(struct rcar_du_group *rgrp);
void rcar_du_plane_setup(struct rcar_du_plane *plane);
-void rcar_du_plane_update_base(struct rcar_du_plane *plane);
-void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
- struct drm_framebuffer *fb);
-int rcar_du_plane_reserve(struct rcar_du_plane *plane,
- const struct rcar_du_format_info *format);
-void rcar_du_plane_release(struct rcar_du_plane *plane);
#endif /* __RCAR_DU_PLANE_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 9d4879921cc7..e0a5d8f93963 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -12,6 +12,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -43,10 +44,13 @@ rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_vga_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = rcar_du_vga_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
@@ -76,7 +80,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
@@ -84,7 +88,6 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- connector->encoder = encoder;
rcon->encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index d236faa05b19..80d6fc8a5cee 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -133,12 +133,12 @@ static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = {
}
};
-static const struct dw_hdmi_sym_term rockchip_sym_term[] = {
- /*pixelclk symbol term*/
- { 74250000, 0x8009, 0x0004 },
- { 148500000, 0x8029, 0x0004 },
- { 297000000, 0x8039, 0x0005 },
- { ~0UL, 0x0000, 0x0000 }
+static const struct dw_hdmi_phy_config rockchip_phy_config[] = {
+ /*pixelclk symbol term vlev*/
+ { 74250000, 0x8009, 0x0004, 0x0272},
+ { 148500000, 0x802b, 0x0004, 0x028d},
+ { 297000000, 0x8039, 0x0005, 0x028d},
+ { ~0UL, 0x0000, 0x0000, 0x0000}
};
static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
@@ -230,7 +230,7 @@ static const struct dw_hdmi_plat_data rockchip_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
.mpll_cfg = rockchip_mpll_cfg,
.cur_ctr = rockchip_cur_ctr,
- .sym_term = rockchip_sym_term,
+ .phy_config = rockchip_phy_config,
.dev_type = RK3288_HDMI,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 21a481b224eb..3962176ee713 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -129,6 +129,7 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
struct rockchip_drm_private *private;
struct dma_iommu_mapping *mapping;
struct device *dev = drm_dev->dev;
+ struct drm_connector *connector;
int ret;
private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
@@ -171,6 +172,23 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
if (ret)
goto err_detach_device;
+ /*
+ * All components are now added, we can publish the connector sysfs
+ * entries to userspace. This will generate hotplug events and so
+ * userspace will expect to be able to access DRM at this point.
+ */
+ list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
+ head) {
+ ret = drm_connector_register(connector);
+ if (ret) {
+ dev_err(drm_dev->dev,
+ "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
+ connector->base.id,
+ connector->name, ret);
+ goto err_unbind;
+ }
+ }
+
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
@@ -200,6 +218,7 @@ err_vblank_cleanup:
drm_vblank_cleanup(drm_dev);
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
+err_unbind:
component_unbind_all(dev, drm_dev);
err_detach_device:
arm_iommu_detach_device(dev);
@@ -366,7 +385,7 @@ static const struct dev_pm_ops rockchip_drm_pm_ops = {
int rockchip_drm_encoder_get_mux_id(struct device_node *node,
struct drm_encoder *encoder)
{
- struct device_node *ep = NULL;
+ struct device_node *ep;
struct drm_crtc *crtc = encoder->crtc;
struct of_endpoint endpoint;
struct device_node *port;
@@ -375,18 +394,15 @@ int rockchip_drm_encoder_get_mux_id(struct device_node *node,
if (!node || !crtc)
return -EINVAL;
- do {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
+ for_each_endpoint_of_node(node, ep) {
port = of_graph_get_remote_port(ep);
of_node_put(port);
if (port == crtc->port) {
ret = of_graph_parse_endpoint(ep, &endpoint);
+ of_node_put(ep);
return ret ?: endpoint.id;
}
- } while (ep);
+ }
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index a5d889a8716b..5b0dc0f6fd94 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -71,7 +71,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
- rk_obj = rockchip_gem_create_object(dev, size);
+ rk_obj = rockchip_gem_create_object(dev, size, true);
if (IS_ERR(rk_obj))
return -ENOMEM;
@@ -106,7 +106,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
fb = helper->fb;
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
@@ -119,6 +119,9 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
fb->width, fb->height, fb->depth, rk_obj->kvaddr,
offset, size);
+
+ fbi->skip_vt_switch = true;
+
return 0;
err_drm_framebuffer_unref:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 7ca8799ef784..eb2282cc4a56 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -22,7 +22,8 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
-static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
+static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
+ bool alloc_kmap)
{
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
@@ -30,7 +31,9 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
init_dma_attrs(&rk_obj->dma_attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
- /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */
+ if (!alloc_kmap)
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs);
+
rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
&rk_obj->dma_addr, GFP_KERNEL,
&rk_obj->dma_attrs);
@@ -103,7 +106,8 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
}
struct rockchip_gem_object *
- rockchip_gem_create_object(struct drm_device *drm, unsigned int size)
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
+ bool alloc_kmap)
{
struct rockchip_gem_object *rk_obj;
struct drm_gem_object *obj;
@@ -119,7 +123,7 @@ struct rockchip_gem_object *
drm_gem_private_object_init(drm, obj, size);
- ret = rockchip_gem_alloc_buf(rk_obj);
+ ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
if (ret)
goto err_free_rk_obj;
@@ -163,7 +167,7 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv,
struct drm_gem_object *obj;
int ret;
- rk_obj = rockchip_gem_create_object(drm, size);
+ rk_obj = rockchip_gem_create_object(drm, size, false);
if (IS_ERR(rk_obj))
return ERR_CAST(rk_obj);
@@ -282,6 +286,9 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs))
+ return NULL;
+
return rk_obj->kvaddr;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 67bcebe90003..ad22618473a4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -41,7 +41,8 @@ int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma);
struct rockchip_gem_object *
- rockchip_gem_create_object(struct drm_device *drm, unsigned int size);
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
+ bool alloc_kmap);
void rockchip_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 9a5c571b95fc..4557f335a8a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -81,7 +81,7 @@ struct vop {
struct drm_crtc crtc;
struct device *dev;
struct drm_device *drm_dev;
- unsigned int dpms;
+ bool is_enabled;
int connector_type;
int connector_out_mode;
@@ -89,6 +89,7 @@ struct vop {
/* mutex vsync_ work */
struct mutex vsync_mutex;
bool vsync_work_pending;
+ struct completion dsp_hold_completion;
const struct vop_data *data;
@@ -382,11 +383,50 @@ static bool is_alpha_support(uint32_t format)
}
}
+static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!vop->is_enabled))
+ return;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK,
+ DSP_HOLD_VALID_INTR_EN(1));
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!vop->is_enabled))
+ return;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK,
+ DSP_HOLD_VALID_INTR_EN(0));
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
static void vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
int ret;
+ if (vop->is_enabled)
+ return;
+
+ ret = pm_runtime_get_sync(vop->dev);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
+ return;
+ }
+
ret = clk_enable(vop->hclk);
if (ret < 0) {
dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
@@ -417,6 +457,11 @@ static void vop_enable(struct drm_crtc *crtc)
goto err_disable_aclk;
}
+ /*
+ * At here, vop clock & iommu is enable, R/W vop regs would be safe.
+ */
+ vop->is_enabled = true;
+
spin_lock(&vop->reg_lock);
VOP_CTRL_SET(vop, standby, 0);
@@ -441,28 +486,44 @@ static void vop_disable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
- drm_vblank_off(crtc->dev, vop->pipe);
+ if (!vop->is_enabled)
+ return;
- disable_irq(vop->irq);
+ drm_vblank_off(crtc->dev, vop->pipe);
/*
- * TODO: Since standby doesn't take effect until the next vblank,
- * when we turn off dclk below, the vop is probably still active.
+ * Vop standby will take effect at end of current frame,
+ * if dsp hold valid irq happen, it means standby complete.
+ *
+ * we must wait standby complete when we want to disable aclk,
+ * if not, memory bus maybe dead.
*/
+ reinit_completion(&vop->dsp_hold_completion);
+ vop_dsp_hold_valid_irq_enable(vop);
+
spin_lock(&vop->reg_lock);
VOP_CTRL_SET(vop, standby, 1);
spin_unlock(&vop->reg_lock);
+
+ wait_for_completion(&vop->dsp_hold_completion);
+
+ vop_dsp_hold_valid_irq_disable(vop);
+
+ disable_irq(vop->irq);
+
+ vop->is_enabled = false;
+
/*
- * disable dclk to stop frame scan, so we can safely detach iommu,
+ * vop standby complete, so iommu detach is safe.
*/
- clk_disable(vop->dclk);
-
rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
+ clk_disable(vop->dclk);
clk_disable(vop->aclk);
clk_disable(vop->hclk);
+ pm_runtime_put(vop->dev);
}
/*
@@ -742,7 +803,7 @@ static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
struct vop *vop = to_vop(crtc);
unsigned long flags;
- if (vop->dpms != DRM_MODE_DPMS_ON)
+ if (!vop->is_enabled)
return -EPERM;
spin_lock_irqsave(&vop->irq_lock, flags);
@@ -759,8 +820,9 @@ static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
struct vop *vop = to_vop(crtc);
unsigned long flags;
- if (vop->dpms != DRM_MODE_DPMS_ON)
+ if (!vop->is_enabled)
return;
+
spin_lock_irqsave(&vop->irq_lock, flags);
vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0));
spin_unlock_irqrestore(&vop->irq_lock, flags);
@@ -773,15 +835,8 @@ static const struct rockchip_crtc_funcs private_crtc_funcs = {
static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- struct vop *vop = to_vop(crtc);
-
DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
- if (vop->dpms == mode) {
- DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
- return;
- }
-
switch (mode) {
case DRM_MODE_DPMS_ON:
vop_enable(crtc);
@@ -795,8 +850,6 @@ static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
DRM_DEBUG_KMS("unspecified mode %d\n", mode);
break;
}
-
- vop->dpms = mode;
}
static void vop_crtc_prepare(struct drm_crtc *crtc)
@@ -847,7 +900,7 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
u16 vact_end = vact_st + vdisplay;
- int ret;
+ int ret, ret_clk;
uint32_t val;
/*
@@ -869,13 +922,14 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
default:
DRM_ERROR("unsupport connector_type[%d]\n",
vop->connector_type);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
};
VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
val = 0x8;
- val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
- val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
VOP_CTRL_SET(vop, pin_pol, val);
VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
@@ -892,7 +946,7 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
ret = vop_crtc_mode_set_base(crtc, x, y, fb);
if (ret)
- return ret;
+ goto out;
/*
* reset dclk, take all mode config affect, so the clk would run in
@@ -903,13 +957,14 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
reset_control_deassert(vop->dclk_rst);
clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
- ret = clk_enable(vop->dclk);
- if (ret < 0) {
- dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
- return ret;
+out:
+ ret_clk = clk_enable(vop->dclk);
+ if (ret_clk < 0) {
+ dev_err(vop->dev, "failed to enable dclk - %d\n", ret_clk);
+ return ret_clk;
}
- return 0;
+ return ret;
}
static void vop_crtc_commit(struct drm_crtc *crtc)
@@ -934,9 +989,9 @@ static int vop_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb = crtc->primary->fb;
int ret;
- /* when the page flip is requested, crtc's dpms should be on */
- if (vop->dpms > DRM_MODE_DPMS_ON) {
- DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms);
+ /* when the page flip is requested, crtc should be on */
+ if (!vop->is_enabled) {
+ DRM_DEBUG("page flip request rejected because crtc is off.\n");
return 0;
}
@@ -1081,6 +1136,7 @@ static irqreturn_t vop_isr(int irq, void *data)
struct vop *vop = data;
uint32_t intr0_reg, active_irqs;
unsigned long flags;
+ int ret = IRQ_NONE;
/*
* INTR_CTRL0 register has interrupt status, enable and clear bits, we
@@ -1099,15 +1155,23 @@ static irqreturn_t vop_isr(int irq, void *data)
if (!active_irqs)
return IRQ_NONE;
- /* Only Frame Start Interrupt is enabled; other irqs are spurious. */
- if (!(active_irqs & FS_INTR)) {
- DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
- return IRQ_NONE;
+ if (active_irqs & DSP_HOLD_VALID_INTR) {
+ complete(&vop->dsp_hold_completion);
+ active_irqs &= ~DSP_HOLD_VALID_INTR;
+ ret = IRQ_HANDLED;
+ }
+
+ if (active_irqs & FS_INTR) {
+ drm_handle_vblank(vop->drm_dev, vop->pipe);
+ active_irqs &= ~FS_INTR;
+ ret = (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
}
- drm_handle_vblank(vop->drm_dev, vop->pipe);
+ /* Unhandled irqs are spurious. */
+ if (active_irqs)
+ DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
- return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+ return ret;
}
static int vop_create_crtc(struct vop *vop)
@@ -1189,6 +1253,7 @@ static int vop_create_crtc(struct vop *vop)
goto err_cleanup_crtc;
}
+ init_completion(&vop->dsp_hold_completion);
crtc->port = port;
vop->pipe = drm_crtc_index(crtc);
rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
@@ -1302,7 +1367,7 @@ static int vop_initial(struct vop *vop)
clk_disable(vop->hclk);
- vop->dpms = DRM_MODE_DPMS_OFF;
+ vop->is_enabled = false;
return 0;
@@ -1344,7 +1409,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
struct vop *vop;
struct resource *res;
size_t alloc_size;
- int ret;
+ int ret, irq;
of_id = of_match_device(vop_driver_dt_match, dev);
vop_data = of_id->data;
@@ -1380,11 +1445,12 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- vop->irq = platform_get_irq(pdev, 0);
- if (vop->irq < 0) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
dev_err(dev, "cannot find irq for vop\n");
- return vop->irq;
+ return irq;
}
+ vop->irq = (unsigned int)irq;
spin_lock_init(&vop->reg_lock);
spin_lock_init(&vop->irq_lock);
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
index e6f6ef7c4866..6b641c5a2ec7 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -9,6 +9,8 @@
#include <linux/clk.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
@@ -77,22 +79,18 @@ static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
}
static int
-sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
- struct drm_framebuffer *old_fb)
+sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- struct sti_layer *layer;
struct clk *clk;
int rate = mode->clock * 1000;
int res;
- unsigned int w, h;
- DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d mode:%d (%s)\n",
+ DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
- crtc->primary->fb->base.id, mode->base.id, mode->name);
+ mode->base.id, mode->name);
DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
mode->vrefresh, mode->clock,
@@ -122,72 +120,13 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux, &crtc->mode);
- /* a GDP is reserved to the CRTC FB */
- layer = to_sti_layer(crtc->primary);
- if (!layer) {
- DRM_ERROR("Can not find GDP0)\n");
- return -EINVAL;
- }
-
- /* copy the mode data adjusted by mode_fixup() into crtc->mode
- * so that hardware can be set to proper mode
- */
- memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
-
- res = sti_mixer_set_layer_depth(mixer, layer);
- if (res) {
- DRM_ERROR("Can not set layer depth\n");
- return -EINVAL;
- }
res = sti_mixer_active_video_area(mixer, &crtc->mode);
if (res) {
DRM_ERROR("Can not set active video area\n");
return -EINVAL;
}
- w = crtc->primary->fb->width - x;
- h = crtc->primary->fb->height - y;
-
- return sti_layer_prepare(layer, crtc,
- crtc->primary->fb, &crtc->mode,
- mixer->id, 0, 0, w, h, x, y, w, h);
-}
-
-static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct sti_mixer *mixer = to_sti_mixer(crtc);
- struct sti_layer *layer;
- unsigned int w, h;
- int ret;
-
- DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d (%d,%d)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
- crtc->primary->fb->base.id, x, y);
-
- /* GDP is reserved to the CRTC FB */
- layer = to_sti_layer(crtc->primary);
- if (!layer) {
- DRM_ERROR("Can not find GDP0)\n");
- ret = -EINVAL;
- goto out;
- }
-
- w = crtc->primary->fb->width - crtc->x;
- h = crtc->primary->fb->height - crtc->y;
-
- ret = sti_layer_prepare(layer, crtc,
- crtc->primary->fb, &crtc->mode,
- mixer->id, 0, 0, w, h,
- crtc->x, crtc->y, w, h);
- if (ret) {
- DRM_ERROR("Can not prepare layer\n");
- goto out;
- }
-
- sti_drm_crtc_commit(crtc);
-out:
- return ret;
+ return res;
}
static void sti_drm_crtc_disable(struct drm_crtc *crtc)
@@ -195,7 +134,6 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- struct sti_layer *layer;
if (!mixer->enabled)
return;
@@ -205,24 +143,6 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
/* Disable Background */
sti_mixer_set_background_status(mixer, false);
- /* Disable GDP */
- layer = to_sti_layer(crtc->primary);
- if (!layer) {
- DRM_ERROR("Cannot find GDP0\n");
- return;
- }
-
- /* Disable layer at mixer level */
- if (sti_mixer_set_layer_status(mixer, layer, false))
- DRM_ERROR("Can not disable %s layer at mixer\n",
- sti_layer_to_str(layer));
-
- /* Wait a while to be sure that a Vsync event is received */
- msleep(WAIT_NEXT_VSYNC_MS);
-
- /* Then disable layer itself */
- sti_layer_disable(layer);
-
drm_crtc_vblank_off(crtc);
/* Disable pixel clock and compo IP clocks */
@@ -237,64 +157,44 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
mixer->enabled = false;
}
-static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
- .dpms = sti_drm_crtc_dpms,
- .prepare = sti_drm_crtc_prepare,
- .commit = sti_drm_crtc_commit,
- .mode_fixup = sti_drm_crtc_mode_fixup,
- .mode_set = sti_drm_crtc_mode_set,
- .mode_set_base = sti_drm_crtc_mode_set_base,
- .disable = sti_drm_crtc_disable,
-};
+static void
+sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ sti_drm_crtc_prepare(crtc);
+ sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
+}
-static int sti_drm_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static void sti_drm_atomic_begin(struct drm_crtc *crtc)
{
- struct drm_device *drm_dev = crtc->dev;
- struct drm_framebuffer *old_fb;
struct sti_mixer *mixer = to_sti_mixer(crtc);
- unsigned long flags;
- int ret;
- DRM_DEBUG_KMS("fb %d --> fb %d\n",
- crtc->primary->fb->base.id, fb->base.id);
+ if (crtc->state->event) {
+ crtc->state->event->pipe = drm_crtc_index(crtc);
- mutex_lock(&drm_dev->struct_mutex);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- old_fb = crtc->primary->fb;
- crtc->primary->fb = fb;
- ret = sti_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
- if (ret) {
- DRM_ERROR("failed\n");
- crtc->primary->fb = old_fb;
- goto out;
+ mixer->pending_event = crtc->state->event;
+ crtc->state->event = NULL;
}
+}
- if (event) {
- event->pipe = mixer->id;
-
- ret = drm_vblank_get(drm_dev, event->pipe);
- if (ret) {
- DRM_ERROR("Cannot get vblank\n");
- goto out;
- }
-
- spin_lock_irqsave(&drm_dev->event_lock, flags);
- if (mixer->pending_event) {
- drm_vblank_put(drm_dev, event->pipe);
- ret = -EBUSY;
- } else {
- mixer->pending_event = event;
- }
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
- }
-out:
- mutex_unlock(&drm_dev->struct_mutex);
- return ret;
+static void sti_drm_atomic_flush(struct drm_crtc *crtc)
+{
}
+static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
+ .dpms = sti_drm_crtc_dpms,
+ .prepare = sti_drm_crtc_prepare,
+ .commit = sti_drm_crtc_commit,
+ .mode_fixup = sti_drm_crtc_mode_fixup,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_nofb = sti_drm_crtc_mode_set_nofb,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
+ .disable = sti_drm_crtc_disable,
+ .atomic_begin = sti_drm_atomic_begin,
+ .atomic_flush = sti_drm_atomic_flush,
+};
+
static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
{
DRM_DEBUG_KMS("\n");
@@ -380,10 +280,13 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
static struct drm_crtc_funcs sti_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
- .page_flip = sti_drm_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
.destroy = sti_drm_crtc_destroy,
.set_property = sti_drm_crtc_set_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
index 5239fa121726..59d558b400b3 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -12,6 +12,8 @@
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -28,8 +30,87 @@
#define STI_MAX_FB_HEIGHT 4096
#define STI_MAX_FB_WIDTH 4096
+static void sti_drm_atomic_schedule(struct sti_drm_private *private,
+ struct drm_atomic_state *state)
+{
+ private->commit.state = state;
+ schedule_work(&private->commit.work);
+}
+
+static void sti_drm_atomic_complete(struct sti_drm_private *private,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = private->drm_dev;
+
+ /*
+ * Everything below can be run asynchronously without the need to grab
+ * any modeset locks at all under one condition: It must be guaranteed
+ * that the asynchronous work has either been cancelled (if the driver
+ * supports it, which at least requires that the framebuffers get
+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+ * before the new state gets committed on the software side with
+ * drm_atomic_helper_swap_state().
+ *
+ * This scheme allows new atomic state updates to be prepared and
+ * checked in parallel to the asynchronous completion of the previous
+ * update. Which is important since compositors need to figure out the
+ * composition of the next frame right after having submitted the
+ * current layout.
+ */
+
+ drm_atomic_helper_commit_modeset_disables(drm, state);
+ drm_atomic_helper_commit_planes(drm, state);
+ drm_atomic_helper_commit_modeset_enables(drm, state);
+
+ drm_atomic_helper_wait_for_vblanks(drm, state);
+
+ drm_atomic_helper_cleanup_planes(drm, state);
+ drm_atomic_state_free(state);
+}
+
+static void sti_drm_atomic_work(struct work_struct *work)
+{
+ struct sti_drm_private *private = container_of(work,
+ struct sti_drm_private, commit.work);
+
+ sti_drm_atomic_complete(private, private->commit.state);
+}
+
+static int sti_drm_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state, bool async)
+{
+ struct sti_drm_private *private = drm->dev_private;
+ int err;
+
+ err = drm_atomic_helper_prepare_planes(drm, state);
+ if (err)
+ return err;
+
+ /* serialize outstanding asynchronous commits */
+ mutex_lock(&private->commit.lock);
+ flush_work(&private->commit.work);
+
+ /*
+ * This is the point of no return - everything below never fails except
+ * when the hw goes bonghits. Which means we can commit the new state on
+ * the software side now.
+ */
+
+ drm_atomic_helper_swap_state(drm, state);
+
+ if (async)
+ sti_drm_atomic_schedule(private, state);
+ else
+ sti_drm_atomic_complete(private, state);
+
+ mutex_unlock(&private->commit.lock);
+ return 0;
+}
+
static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = sti_drm_atomic_commit,
};
static void sti_drm_mode_config_init(struct drm_device *dev)
@@ -61,6 +142,9 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)private;
private->drm_dev = dev;
+ mutex_init(&private->commit.lock);
+ INIT_WORK(&private->commit.work, sti_drm_atomic_work);
+
drm_mode_config_init(dev);
drm_kms_helper_poll_init(dev);
@@ -74,7 +158,7 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
return ret;
}
- drm_helper_disable_unused_functions(dev);
+ drm_mode_config_reset(dev);
#ifdef CONFIG_DRM_STI_FBDEV
drm_fbdev_cma_init(dev, 32,
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h
index ec5e2eb8dff9..c413aa3ff402 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.h
+++ b/drivers/gpu/drm/sti/sti_drm_drv.h
@@ -24,6 +24,12 @@ struct sti_drm_private {
struct sti_compositor *compo;
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;
+
+ struct {
+ struct drm_atomic_state *state;
+ struct work_struct work;
+ struct mutex lock;
+ } commit;
};
#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
index bb6a29339e10..64d4ed43dda3 100644
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -6,6 +6,10 @@
* License terms: GNU General Public License (GPL), version 2
*/
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
#include "sti_compositor.h"
#include "sti_drm_drv.h"
#include "sti_drm_plane.h"
@@ -33,9 +37,9 @@ sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct sti_mixer *mixer = to_sti_mixer(crtc);
int res;
- DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s) drm fb:%d\n",
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
- plane->base.id, sti_layer_to_str(layer), fb->base.id);
+ plane->base.id, sti_layer_to_str(layer));
DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
res = sti_mixer_set_layer_depth(mixer, layer);
@@ -110,7 +114,7 @@ static void sti_drm_plane_destroy(struct drm_plane *plane)
{
DRM_DEBUG_DRIVER("\n");
- sti_drm_disable_plane(plane);
+ drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
}
@@ -133,10 +137,58 @@ static int sti_drm_plane_set_property(struct drm_plane *plane,
}
static struct drm_plane_funcs sti_drm_plane_funcs = {
- .update_plane = sti_drm_update_plane,
- .disable_plane = sti_drm_disable_plane,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_drm_plane_destroy,
.set_property = sti_drm_plane_set_property,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
+{
+ return 0;
+}
+
+static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_fb)
+{
+}
+
+static int sti_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void sti_drm_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = plane->state;
+
+ sti_drm_update_plane(plane, state->crtc, state->fb,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h,
+ state->src_x, state->src_y,
+ state->src_w, state->src_h);
+}
+
+static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *oldstate)
+{
+ sti_drm_disable_plane(plane);
+}
+
+static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
+ .prepare_fb = sti_drm_plane_prepare_fb,
+ .cleanup_fb = sti_drm_plane_cleanup_fb,
+ .atomic_check = sti_drm_plane_atomic_check,
+ .atomic_update = sti_drm_plane_atomic_update,
+ .atomic_disable = sti_drm_plane_atomic_disable,
};
static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
@@ -178,11 +230,13 @@ struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
return NULL;
}
+ drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
+
for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
if (sti_layer_default_zorder[i] == layer->desc)
break;
- default_zorder = i;
+ default_zorder = i + 1;
if (type == DRM_PLANE_TYPE_OVERLAY)
sti_drm_plane_attach_zorder_property(&layer->plane,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index aeb5070c8363..a9b678af85a6 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_panel.h>
@@ -364,10 +365,13 @@ static void sti_dvo_connector_destroy(struct drm_connector *connector)
}
static struct drm_connector_funcs sti_dvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_dvo_connector_detect,
.destroy = sti_dvo_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index a9bbb081ecad..598cd78b0b16 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -10,6 +10,7 @@
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
/* HDformatter registers */
@@ -611,10 +612,13 @@ static void sti_hda_connector_destroy(struct drm_connector *connector)
}
static struct drm_connector_funcs sti_hda_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hda_connector_detect,
.destroy = sti_hda_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 1485ade98710..ae5424bd6b4c 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -13,6 +13,7 @@
#include <linux/reset.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@@ -663,10 +664,13 @@ static void sti_hdmi_connector_destroy(struct drm_connector *connector)
}
static struct drm_connector_funcs sti_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hdmi_connector_detect,
.destroy = sti_hdmi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 1a52522f5da7..a287e4fec865 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -425,8 +425,8 @@ static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane_state *state;
- if (plane->state && plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane, plane->state);
kfree(plane->state);
plane->state = NULL;
@@ -443,12 +443,14 @@ static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_pla
struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
struct tegra_plane_state *copy;
- copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
- if (copy->base.fb)
- drm_framebuffer_reference(copy->base.fb);
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+ copy->tiling = state->tiling;
+ copy->format = state->format;
+ copy->swap = state->swap;
return &copy->base;
}
@@ -456,9 +458,7 @@ static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_pla
static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- if (state->fb)
- drm_framebuffer_unreference(state->fb);
-
+ __drm_atomic_helper_plane_destroy_state(plane, state);
kfree(state);
}
@@ -472,13 +472,15 @@ static const struct drm_plane_funcs tegra_primary_plane_funcs = {
};
static int tegra_plane_prepare_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
{
return 0;
}
static void tegra_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_fb)
{
}
@@ -906,6 +908,15 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
return 0;
}
+u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc)
+{
+ if (dc->syncpt)
+ return host1x_syncpt_read(dc->syncpt);
+
+ /* fallback to software emulated VBLANK counter */
+ return drm_crtc_vblank_count(&dc->base);
+}
+
void tegra_dc_enable_vblank(struct tegra_dc *dc)
{
unsigned long value, flags;
@@ -993,6 +1004,9 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
{
struct tegra_dc_state *state;
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc, crtc->state);
+
kfree(crtc->state);
crtc->state = NULL;
@@ -1009,14 +1023,15 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc_state *copy;
- copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
- copy->base.mode_changed = false;
- copy->base.active_changed = false;
- copy->base.planes_changed = false;
- copy->base.event = NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &copy->base);
+ copy->clk = state->clk;
+ copy->pclk = state->pclk;
+ copy->div = state->div;
+ copy->planes = state->planes;
return &copy->base;
}
@@ -1024,6 +1039,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ __drm_atomic_helper_crtc_destroy_state(crtc, state);
kfree(state);
}
@@ -1150,26 +1166,18 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
return 0;
}
-int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent,
- unsigned long pclk, unsigned int div)
-{
- u32 value;
- int err;
-
- err = clk_set_parent(dc->clk, parent);
- if (err < 0) {
- dev_err(dc->dev, "failed to set parent clock: %d\n", err);
- return err;
- }
-
- DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), div);
-
- value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
- tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
-
- return 0;
-}
-
+/**
+ * tegra_dc_state_setup_clock - check clock settings and store them in atomic
+ * state
+ * @dc: display controller
+ * @crtc_state: CRTC atomic state
+ * @clk: parent clock for display controller
+ * @pclk: pixel clock
+ * @div: shift clock divider
+ *
+ * Returns:
+ * 0 on success or a negative error-code on failure.
+ */
int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
struct clk *clk, unsigned long pclk,
@@ -1177,6 +1185,9 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc,
{
struct tegra_dc_state *state = to_dc_state(crtc_state);
+ if (!clk_has_parent(dc->clk, clk))
+ return -EINVAL;
+
state->clk = clk;
state->pclk = pclk;
state->div = div;
@@ -1292,9 +1303,7 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
.disable = tegra_crtc_disable,
.mode_fixup = tegra_crtc_mode_fixup,
- .mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = tegra_crtc_mode_set_nofb,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = tegra_crtc_prepare,
.commit = tegra_crtc_commit,
.atomic_check = tegra_crtc_atomic_check,
@@ -1629,7 +1638,6 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_drm *tegra = drm->dev_private;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
- unsigned int syncpt;
u32 value;
int err;
@@ -1698,13 +1706,15 @@ static int tegra_dc_init(struct host1x_client *client)
}
/* initialize display controller */
- if (dc->pipe)
- syncpt = SYNCPT_VBLANK1;
- else
- syncpt = SYNCPT_VBLANK0;
+ if (dc->syncpt) {
+ u32 syncpt = host1x_syncpt_id(dc->syncpt);
- tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
- tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+ value = SYNCPT_CNTRL_NO_STALL;
+ tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+
+ value = SYNCPT_VSYNC_ENABLE | syncpt;
+ tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
+ }
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
@@ -1872,6 +1882,7 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
+ unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
const struct of_device_id *id;
struct resource *regs;
struct tegra_dc *dc;
@@ -1963,6 +1974,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
return err;
}
+ dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
+ if (!dc->syncpt)
+ dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
+
platform_set_drvdata(pdev, dc);
return 0;
@@ -1973,6 +1988,8 @@ static int tegra_dc_remove(struct platform_device *pdev)
struct tegra_dc *dc = platform_get_drvdata(pdev);
int err;
+ host1x_syncpt_free(dc->syncpt);
+
err = host1x_client_unregister(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 705c93b00794..55792daabbb5 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -12,6 +12,8 @@
#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define SYNCPT_CNTRL_NO_STALL (1 << 8)
+#define SYNCPT_CNTRL_SOFT_RESET (1 << 0)
#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
@@ -23,6 +25,7 @@
#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define SYNCPT_VSYNC_ENABLE (1 << 8)
#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
#define DC_CMD_DISPLAY_COMMAND 0x032
#define DISP_CTRL_MODE_STOP (0 << 5)
@@ -438,8 +441,4 @@
#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
-/* synchronization points */
-#define SYNCPT_VBLANK0 26
-#define SYNCPT_VBLANK1 27
-
#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 7dd328d77996..bfad15a913a0 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -55,9 +55,9 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
* current layout.
*/
- drm_atomic_helper_commit_pre_planes(drm, state);
+ drm_atomic_helper_commit_modeset_disables(drm, state);
drm_atomic_helper_commit_planes(drm, state);
- drm_atomic_helper_commit_post_planes(drm, state);
+ drm_atomic_helper_commit_modeset_enables(drm, state);
drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -172,6 +172,9 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
*/
drm->irq_enabled = true;
+ /* syncpoints are used for full 32-bit hardware VBLANK counters */
+ drm->max_vblank_count = 0xffffffff;
+
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
goto device;
@@ -813,12 +816,12 @@ static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{
struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
if (!crtc)
return 0;
- /* TODO: implement real hardware counter using syncpoints */
- return drm_crtc_vblank_count(crtc);
+ return tegra_dc_get_vblank_counter(dc);
}
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
@@ -879,8 +882,18 @@ static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
return 0;
}
+static int tegra_debugfs_iova(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct tegra_drm *tegra = drm->dev_private;
+
+ return drm_mm_dump_table(s, &tegra->mm);
+}
+
static struct drm_info_list tegra_debugfs_list[] = {
{ "framebuffers", tegra_debugfs_framebuffers, 0 },
+ { "iova", tegra_debugfs_iova, 0 },
};
static int tegra_debugfs_init(struct drm_minor *minor)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 8cb2dfeaa957..659b2fcc986d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -106,6 +106,7 @@ struct tegra_output;
struct tegra_dc {
struct host1x_client client;
+ struct host1x_syncpt *syncpt;
struct device *dev;
spinlock_t lock;
@@ -180,12 +181,11 @@ struct tegra_dc_window {
};
/* from dc.c */
+u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc);
void tegra_dc_enable_vblank(struct tegra_dc *dc);
void tegra_dc_disable_vblank(struct tegra_dc *dc);
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
void tegra_dc_commit(struct tegra_dc *dc);
-int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent,
- unsigned long pclk, unsigned int div);
int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
struct clk *clk, unsigned long pclk,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index cfb481943b6b..1217272a51f2 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -627,8 +627,14 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
struct drm_gem_object *gem,
int flags)
{
- return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
- flags, NULL);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &tegra_gem_prime_dmabuf_ops;
+ exp_info.size = gem->size;
+ exp_info.flags = flags;
+ exp_info.priv = gem;
+
+ return dma_buf_export(&exp_info);
}
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 7eaaee74a039..06ab1783bba1 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -952,7 +952,7 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
}
tegra_hdmi_writel(hdmi,
- SOR_SEQ_CTL_PU_PC(0) |
+ SOR_SEQ_PU_PC(0) |
SOR_SEQ_PU_PC_ALT(0) |
SOR_SEQ_PD_PC(8) |
SOR_SEQ_PD_PC_ALT(8),
@@ -1394,8 +1394,8 @@ static int tegra_hdmi_exit(struct host1x_client *client)
tegra_output_exit(&hdmi->output);
- clk_disable_unprepare(hdmi->clk);
reset_control_assert(hdmi->rst);
+ clk_disable_unprepare(hdmi->clk);
regulator_disable(hdmi->vdd);
regulator_disable(hdmi->pll);
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 919a19df4e1b..a882514389cd 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -201,7 +201,7 @@
#define HDMI_NV_PDISP_SOR_CRCB 0x5d
#define HDMI_NV_PDISP_SOR_BLANK 0x5e
#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
-#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC(x) (((x) & 0xf) << 0)
#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 2afe478ded3b..7591d8901f9a 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -41,6 +41,8 @@ struct tegra_sor {
struct mutex lock;
bool enabled;
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
struct dentry *debugfs;
};
@@ -68,13 +70,12 @@ static inline struct tegra_sor *to_sor(struct tegra_output *output)
return container_of(output, struct tegra_sor, output);
}
-static inline unsigned long tegra_sor_readl(struct tegra_sor *sor,
- unsigned long offset)
+static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned long offset)
{
return readl(sor->regs + (offset << 2));
}
-static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value,
+static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
unsigned long offset)
{
writel(value, sor->regs + (offset << 2));
@@ -83,9 +84,9 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value,
static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
struct drm_dp_link *link)
{
- unsigned long value;
unsigned int i;
u8 pattern;
+ u32 value;
int err;
/* setup lane parameters */
@@ -202,7 +203,7 @@ static void tegra_sor_update(struct tegra_sor *sor)
static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
{
- unsigned long value;
+ u32 value;
value = tegra_sor_readl(sor, SOR_PWM_DIV);
value &= ~SOR_PWM_DIV_MASK;
@@ -281,7 +282,7 @@ static int tegra_sor_wakeup(struct tegra_sor *sor)
static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout)
{
- unsigned long value;
+ u32 value;
value = tegra_sor_readl(sor, SOR_PWR);
value |= SOR_PWR_TRIGGER | SOR_PWR_NORMAL_STATE_PU;
@@ -674,38 +675,195 @@ static const struct file_operations tegra_sor_crc_fops = {
.release = tegra_sor_crc_release,
};
+static int tegra_sor_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_sor *sor = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-38s %#05x %08x\n", #name, name, \
+ tegra_sor_readl(sor, name))
+
+ DUMP_REG(SOR_CTXSW);
+ DUMP_REG(SOR_SUPER_STATE_0);
+ DUMP_REG(SOR_SUPER_STATE_1);
+ DUMP_REG(SOR_STATE_0);
+ DUMP_REG(SOR_STATE_1);
+ DUMP_REG(SOR_HEAD_STATE_0(0));
+ DUMP_REG(SOR_HEAD_STATE_0(1));
+ DUMP_REG(SOR_HEAD_STATE_1(0));
+ DUMP_REG(SOR_HEAD_STATE_1(1));
+ DUMP_REG(SOR_HEAD_STATE_2(0));
+ DUMP_REG(SOR_HEAD_STATE_2(1));
+ DUMP_REG(SOR_HEAD_STATE_3(0));
+ DUMP_REG(SOR_HEAD_STATE_3(1));
+ DUMP_REG(SOR_HEAD_STATE_4(0));
+ DUMP_REG(SOR_HEAD_STATE_4(1));
+ DUMP_REG(SOR_HEAD_STATE_5(0));
+ DUMP_REG(SOR_HEAD_STATE_5(1));
+ DUMP_REG(SOR_CRC_CNTRL);
+ DUMP_REG(SOR_DP_DEBUG_MVID);
+ DUMP_REG(SOR_CLK_CNTRL);
+ DUMP_REG(SOR_CAP);
+ DUMP_REG(SOR_PWR);
+ DUMP_REG(SOR_TEST);
+ DUMP_REG(SOR_PLL_0);
+ DUMP_REG(SOR_PLL_1);
+ DUMP_REG(SOR_PLL_2);
+ DUMP_REG(SOR_PLL_3);
+ DUMP_REG(SOR_CSTM);
+ DUMP_REG(SOR_LVDS);
+ DUMP_REG(SOR_CRC_A);
+ DUMP_REG(SOR_CRC_B);
+ DUMP_REG(SOR_BLANK);
+ DUMP_REG(SOR_SEQ_CTL);
+ DUMP_REG(SOR_LANE_SEQ_CTL);
+ DUMP_REG(SOR_SEQ_INST(0));
+ DUMP_REG(SOR_SEQ_INST(1));
+ DUMP_REG(SOR_SEQ_INST(2));
+ DUMP_REG(SOR_SEQ_INST(3));
+ DUMP_REG(SOR_SEQ_INST(4));
+ DUMP_REG(SOR_SEQ_INST(5));
+ DUMP_REG(SOR_SEQ_INST(6));
+ DUMP_REG(SOR_SEQ_INST(7));
+ DUMP_REG(SOR_SEQ_INST(8));
+ DUMP_REG(SOR_SEQ_INST(9));
+ DUMP_REG(SOR_SEQ_INST(10));
+ DUMP_REG(SOR_SEQ_INST(11));
+ DUMP_REG(SOR_SEQ_INST(12));
+ DUMP_REG(SOR_SEQ_INST(13));
+ DUMP_REG(SOR_SEQ_INST(14));
+ DUMP_REG(SOR_SEQ_INST(15));
+ DUMP_REG(SOR_PWM_DIV);
+ DUMP_REG(SOR_PWM_CTL);
+ DUMP_REG(SOR_VCRC_A_0);
+ DUMP_REG(SOR_VCRC_A_1);
+ DUMP_REG(SOR_VCRC_B_0);
+ DUMP_REG(SOR_VCRC_B_1);
+ DUMP_REG(SOR_CCRC_A_0);
+ DUMP_REG(SOR_CCRC_A_1);
+ DUMP_REG(SOR_CCRC_B_0);
+ DUMP_REG(SOR_CCRC_B_1);
+ DUMP_REG(SOR_EDATA_A_0);
+ DUMP_REG(SOR_EDATA_A_1);
+ DUMP_REG(SOR_EDATA_B_0);
+ DUMP_REG(SOR_EDATA_B_1);
+ DUMP_REG(SOR_COUNT_A_0);
+ DUMP_REG(SOR_COUNT_A_1);
+ DUMP_REG(SOR_COUNT_B_0);
+ DUMP_REG(SOR_COUNT_B_1);
+ DUMP_REG(SOR_DEBUG_A_0);
+ DUMP_REG(SOR_DEBUG_A_1);
+ DUMP_REG(SOR_DEBUG_B_0);
+ DUMP_REG(SOR_DEBUG_B_1);
+ DUMP_REG(SOR_TRIG);
+ DUMP_REG(SOR_MSCHECK);
+ DUMP_REG(SOR_XBAR_CTRL);
+ DUMP_REG(SOR_XBAR_POL);
+ DUMP_REG(SOR_DP_LINKCTL_0);
+ DUMP_REG(SOR_DP_LINKCTL_1);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT_0);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT_1);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1);
+ DUMP_REG(SOR_LANE_PREEMPHASIS_0);
+ DUMP_REG(SOR_LANE_PREEMPHASIS_1);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS_0);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS_1);
+ DUMP_REG(SOR_LANE_POST_CURSOR_0);
+ DUMP_REG(SOR_LANE_POST_CURSOR_1);
+ DUMP_REG(SOR_DP_CONFIG_0);
+ DUMP_REG(SOR_DP_CONFIG_1);
+ DUMP_REG(SOR_DP_MN_0);
+ DUMP_REG(SOR_DP_MN_1);
+ DUMP_REG(SOR_DP_PADCTL_0);
+ DUMP_REG(SOR_DP_PADCTL_1);
+ DUMP_REG(SOR_DP_DEBUG_0);
+ DUMP_REG(SOR_DP_DEBUG_1);
+ DUMP_REG(SOR_DP_SPARE_0);
+ DUMP_REG(SOR_DP_SPARE_1);
+ DUMP_REG(SOR_DP_AUDIO_CTRL);
+ DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
+ DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6);
+ DUMP_REG(SOR_DP_TPG);
+ DUMP_REG(SOR_DP_TPG_CONFIG);
+ DUMP_REG(SOR_DP_LQ_CSTM_0);
+ DUMP_REG(SOR_DP_LQ_CSTM_1);
+ DUMP_REG(SOR_DP_LQ_CSTM_2);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_sor_show_regs, 0, NULL },
+};
+
static int tegra_sor_debugfs_init(struct tegra_sor *sor,
struct drm_minor *minor)
{
struct dentry *entry;
+ unsigned int i;
int err = 0;
sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
if (!sor->debugfs)
return -ENOMEM;
+ sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!sor->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ sor->debugfs_files[i].data = sor;
+
+ err = drm_debugfs_create_files(sor->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ sor->debugfs, minor);
+ if (err < 0)
+ goto free;
+
entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
&tegra_sor_crc_fops);
if (!entry) {
- dev_err(sor->dev,
- "cannot create /sys/kernel/debug/dri/%s/sor/crc\n",
- minor->debugfs_root->d_name.name);
err = -ENOMEM;
- goto remove;
+ goto free;
}
return err;
+free:
+ kfree(sor->debugfs_files);
+ sor->debugfs_files = NULL;
remove:
- debugfs_remove(sor->debugfs);
+ debugfs_remove_recursive(sor->debugfs);
sor->debugfs = NULL;
return err;
}
static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
{
- debugfs_remove_recursive(sor->debugfs);
+ drm_debugfs_remove_files(sor->debugfs_files, ARRAY_SIZE(debugfs_files),
+ sor->minor);
+ sor->minor = NULL;
+
+ kfree(sor->debugfs_files);
sor->debugfs = NULL;
+
+ debugfs_remove_recursive(sor->debugfs);
+ sor->debugfs_files = NULL;
}
static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
@@ -791,8 +949,8 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
struct tegra_sor_config config;
struct drm_dp_link link;
struct drm_dp_aux *aux;
- unsigned long value;
int err = 0;
+ u32 value;
mutex_lock(&sor->lock);
@@ -1354,12 +1512,30 @@ static int tegra_sor_init(struct host1x_client *client)
}
}
+ /*
+ * XXX: Remove this reset once proper hand-over from firmware to
+ * kernel is possible.
+ */
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to assert SOR reset: %d\n", err);
+ return err;
+ }
+
err = clk_prepare_enable(sor->clk);
if (err < 0) {
dev_err(sor->dev, "failed to enable clock: %d\n", err);
return err;
}
+ usleep_range(1000, 3000);
+
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err);
+ return err;
+ }
+
err = clk_prepare_enable(sor->clk_safe);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 12c87110db3a..4f5fa8d65fe9 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -683,6 +683,12 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &tdev->ops;
+ exp_info.size = prime->size;
+ exp_info.flags = flags;
+ exp_info.priv = prime;
/*
* Need to create a new dma_buf, with memory accounting.
@@ -694,8 +700,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
goto out_unref;
}
- dma_buf = dma_buf_export(prime, &tdev->ops,
- prime->size, flags, NULL);
+ dma_buf = dma_buf_export(&exp_info);
if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf);
ttm_mem_global_free(tdev->mem_glob,
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index ac8a66b4dfc2..e2243edd1ce3 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -202,7 +202,14 @@ static struct dma_buf_ops udl_dmabuf_ops = {
struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &udl_dmabuf_ops;
+ exp_info.size = obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
+
+ return dma_buf_export(&exp_info);
}
static int udl_prime_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
new file mode 100644
index 000000000000..1055cb79096c
--- /dev/null
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+vgem-y := vgem_drv.o vgem_dma_buf.o
+
+obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c
new file mode 100644
index 000000000000..0254438ad1a6
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_dma_buf.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ * Copyright © 2014 The Chromium OS Authors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include "vgem_drv.h"
+
+struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ BUG_ON(obj->pages == NULL);
+
+ return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE);
+}
+
+int vgem_gem_prime_pin(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ return vgem_gem_get_pages(obj);
+}
+
+void vgem_gem_prime_unpin(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ vgem_gem_put_pages(obj);
+}
+
+void *vgem_gem_prime_vmap(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ BUG_ON(obj->pages == NULL);
+
+ return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+}
+
+void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ vunmap(vaddr);
+}
+
+struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_vgem_gem_object *obj = NULL;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = drm_gem_object_init(dev, &obj->base, dma_buf->size);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
+ get_dma_buf(dma_buf);
+
+ obj->base.dma_buf = dma_buf;
+ obj->use_dma_buf = true;
+
+ return &obj->base;
+
+fail_free:
+ kfree(obj);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
new file mode 100644
index 000000000000..cb3b43525b2d
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ * Copyright © 2014 The Chromium OS Authors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Adam Jackson <ajax@redhat.com>
+ * Ben Widawsky <ben@bwidawsk.net>
+ */
+
+/**
+ * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
+ * software renderer and the X server for efficient buffer sharing.
+ */
+
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+#include "vgem_drv.h"
+
+#define DRIVER_NAME "vgem"
+#define DRIVER_DESC "Virtual GEM provider"
+#define DRIVER_DATE "20120112"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
+{
+ drm_gem_put_pages(&obj->base, obj->pages, false, false);
+ obj->pages = NULL;
+}
+
+static void vgem_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
+
+ drm_gem_free_mmap_offset(obj);
+
+ if (vgem_obj->use_dma_buf && obj->dma_buf) {
+ dma_buf_put(obj->dma_buf);
+ obj->dma_buf = NULL;
+ }
+
+ drm_gem_object_release(obj);
+
+ if (vgem_obj->pages)
+ vgem_gem_put_pages(vgem_obj);
+
+ vgem_obj->pages = NULL;
+
+ kfree(vgem_obj);
+}
+
+int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
+{
+ struct page **pages;
+
+ if (obj->pages || obj->use_dma_buf)
+ return 0;
+
+ pages = drm_gem_get_pages(&obj->base);
+ if (IS_ERR(pages)) {
+ return PTR_ERR(pages);
+ }
+
+ obj->pages = pages;
+
+ return 0;
+}
+
+static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_vgem_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->base.dev;
+ loff_t num_pages;
+ pgoff_t page_offset;
+ int ret;
+
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+ PAGE_SHIFT;
+
+ num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
+
+ if (page_offset > num_pages)
+ return VM_FAULT_SIGBUS;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+ obj->pages[page_offset]);
+
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EBUSY:
+ return VM_FAULT_RETRY;
+ case -EFAULT:
+ case -EINVAL:
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ON(1);
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static struct vm_operations_struct vgem_gem_vm_ops = {
+ .fault = vgem_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+/* ioctls */
+
+static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ unsigned int *handle,
+ unsigned long size)
+{
+ struct drm_vgem_gem_object *obj;
+ struct drm_gem_object *gem_object;
+ int err;
+
+ size = roundup(size, PAGE_SIZE);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ gem_object = &obj->base;
+
+ err = drm_gem_object_init(dev, gem_object, size);
+ if (err)
+ goto out;
+
+ err = drm_gem_handle_create(file, gem_object, handle);
+ if (err)
+ goto handle_out;
+
+ drm_gem_object_unreference_unlocked(gem_object);
+
+ return gem_object;
+
+handle_out:
+ drm_gem_object_release(gem_object);
+out:
+ kfree(obj);
+ return ERR_PTR(err);
+}
+
+static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gem_object;
+ uint64_t size;
+ uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+
+ size = args->height * pitch;
+ if (size == 0)
+ return -EINVAL;
+
+ gem_object = vgem_gem_create(dev, file, &args->handle, size);
+
+ if (IS_ERR(gem_object)) {
+ DRM_DEBUG_DRIVER("object creation failed\n");
+ return PTR_ERR(gem_object);
+ }
+
+ args->size = gem_object->size;
+ args->pitch = pitch;
+
+ DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+
+ return 0;
+}
+
+int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ int ret = 0;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (!drm_vma_node_has_offset(&obj->vma_node)) {
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto unref;
+ }
+
+ BUG_ON(!obj->filp);
+
+ obj->filp->private_data = obj;
+
+ ret = vgem_gem_get_pages(to_vgem_bo(obj));
+ if (ret)
+ goto fail_get_pages;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+ goto unref;
+
+fail_get_pages:
+ drm_gem_free_mmap_offset(obj);
+unref:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_vma_offset_node *node;
+ struct drm_gem_object *obj;
+ struct drm_vgem_gem_object *vgem_obj;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
+
+ obj = container_of(node, struct drm_gem_object, vma_node);
+
+ vgem_obj = to_vgem_bo(obj);
+
+ if (obj->dma_buf && vgem_obj->use_dma_buf) {
+ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+ goto out_unlock;
+ }
+
+ if (!obj->dev->driver->gem_vm_ops) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = obj->dev->driver->gem_vm_ops;
+ vma->vm_private_data = vgem_obj;
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ mutex_unlock(&dev->struct_mutex);
+ drm_gem_vm_open(vma);
+ return ret;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+
+static struct drm_ioctl_desc vgem_ioctls[] = {
+};
+
+static const struct file_operations vgem_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = vgem_drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+};
+
+static struct drm_driver vgem_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_PRIME,
+ .gem_free_object = vgem_gem_free_object,
+ .gem_vm_ops = &vgem_gem_vm_ops,
+ .ioctls = vgem_ioctls,
+ .fops = &vgem_driver_fops,
+ .dumb_create = vgem_gem_dumb_create,
+ .dumb_map_offset = vgem_gem_dumb_map,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = vgem_gem_prime_import,
+ .gem_prime_pin = vgem_gem_prime_pin,
+ .gem_prime_unpin = vgem_gem_prime_unpin,
+ .gem_prime_get_sg_table = vgem_gem_prime_get_sg_table,
+ .gem_prime_vmap = vgem_gem_prime_vmap,
+ .gem_prime_vunmap = vgem_gem_prime_vunmap,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+struct drm_device *vgem_device;
+
+static int __init vgem_init(void)
+{
+ int ret;
+
+ vgem_device = drm_dev_alloc(&vgem_driver, NULL);
+ if (!vgem_device) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = drm_dev_register(vgem_device, 0);
+
+ if (ret)
+ goto out_unref;
+
+ return 0;
+
+out_unref:
+ drm_dev_unref(vgem_device);
+out:
+ return ret;
+}
+
+static void __exit vgem_exit(void)
+{
+ drm_dev_unregister(vgem_device);
+ drm_dev_unref(vgem_device);
+}
+
+module_init(vgem_init);
+module_exit(vgem_exit);
+
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 886779030f1a..57ab4d8f41f9 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -1,5 +1,6 @@
/*
- * Copyright © 2013 Intel Corporation
+ * Copyright © 2012 Intel Corporation
+ * Copyright © 2014 The Chromium OS Authors
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -17,23 +18,40 @@
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
*
- * Author: Jani Nikula <jani.nikula@intel.com>
*/
-#ifndef _INTEL_DSI_DSI_H
-#define _INTEL_DSI_DSI_H
+#ifndef _VGEM_DRV_H_
+#define _VGEM_DRV_H_
#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <video/mipi_display.h>
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_dsi.h"
+#include <drm/drm_gem.h>
+
+#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
+struct drm_vgem_gem_object {
+ struct drm_gem_object base;
+ struct page **pages;
+ bool use_dma_buf;
+};
+
+/* vgem_drv.c */
+extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
+extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
+
+/* vgem_dma_buf.c */
+extern struct sg_table *vgem_gem_prime_get_sg_table(
+ struct drm_gem_object *gobj);
+extern int vgem_gem_prime_pin(struct drm_gem_object *gobj);
+extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj);
+extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj);
+extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
- enum port port);
-#endif /* _INTEL_DSI_DSI_H */
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b9cbc304e..620bb5cf617c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -134,7 +134,7 @@
*/
#define VMW_IOCTL_DEF(ioctl, func, flags) \
- [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
/**
* Ioctl definitions.
@@ -1044,7 +1044,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
- if (unlikely(ioctl->cmd_drv != cmd)) {
+ if (unlikely(ioctl->cmd != cmd)) {
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
return -EINVAL;
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index b10550ee1d89..6b7fdc1e2ed0 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -425,6 +425,12 @@ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
}
EXPORT_SYMBOL(host1x_syncpt_read_min);
+u32 host1x_syncpt_read(struct host1x_syncpt *sp)
+{
+ return host1x_syncpt_load(sp);
+}
+EXPORT_SYMBOL(host1x_syncpt_read);
+
int host1x_syncpt_nb_pts(struct host1x *host)
{
return host->info->nb_pts;
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 4864f8300797..9ef2e1f54ca4 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -147,20 +147,20 @@ static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
writel(reg2, priv->dc_tmpl_reg + word * 8 + 4);
}
-static int ipu_pixfmt_to_map(u32 fmt)
+static int ipu_bus_format_to_map(u32 fmt)
{
switch (fmt) {
- case V4L2_PIX_FMT_RGB24:
+ case MEDIA_BUS_FMT_RGB888_1X24:
return IPU_DC_MAP_RGB24;
- case V4L2_PIX_FMT_RGB565:
+ case MEDIA_BUS_FMT_RGB565_1X16:
return IPU_DC_MAP_RGB565;
- case IPU_PIX_FMT_GBR24:
+ case MEDIA_BUS_FMT_GBR888_1X24:
return IPU_DC_MAP_GBR24;
- case V4L2_PIX_FMT_BGR666:
+ case MEDIA_BUS_FMT_RGB666_1X18:
return IPU_DC_MAP_BGR666;
- case v4l2_fourcc('L', 'V', 'D', '6'):
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
return IPU_DC_MAP_LVDS666;
- case V4L2_PIX_FMT_BGR24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
return IPU_DC_MAP_BGR24;
default:
return -EINVAL;
@@ -168,7 +168,7 @@ static int ipu_pixfmt_to_map(u32 fmt)
}
int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
- u32 pixel_fmt, u32 width)
+ u32 bus_format, u32 width)
{
struct ipu_dc_priv *priv = dc->priv;
u32 reg = 0;
@@ -176,7 +176,7 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
dc->di = ipu_di_get_num(di);
- map = ipu_pixfmt_to_map(pixel_fmt);
+ map = ipu_bus_format_to_map(bus_format);
if (map < 0) {
dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
return map;
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 3ddfb3d0b64d..2970c6bb668c 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -441,8 +441,7 @@ static void ipu_di_config_clock(struct ipu_di *di,
in_rate = clk_get_rate(clk);
div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
- if (div == 0)
- div = 1;
+ div = clamp(div, 1U, 255U);
clkgen0 = div << 4;
}
@@ -459,8 +458,7 @@ static void ipu_di_config_clock(struct ipu_di *di,
clkrate = clk_get_rate(di->clk_ipu);
div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
- if (div == 0)
- div = 1;
+ div = clamp(div, 1U, 255U);
rate = clkrate / div;
error = rate / (sig->mode.pixelclock / 1000);
@@ -483,8 +481,7 @@ static void ipu_di_config_clock(struct ipu_di *di,
in_rate = clk_get_rate(clk);
div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
- if (div == 0)
- div = 1;
+ div = clamp(div, 1U, 255U);
clkgen0 = div << 4;
}
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index ad75588e1629..1dcb96ccda66 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -297,8 +297,8 @@ static int calc_resize_coeffs(struct ipu_ic *ic,
return -EINVAL;
}
- /* Cannot downsize more than 8:1 */
- if ((out_size << 3) < in_size) {
+ /* Cannot downsize more than 4:1 */
+ if ((out_size << 2) < in_size) {
dev_err(ipu->dev, "Unsupported downsize\n");
return -EINVAL;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 15338afdf7f9..cc4c6649d195 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -634,7 +634,12 @@ config HID_PLANTRONICS
tristate "Plantronics USB HID Driver"
depends on HID
---help---
- Provides HID support for Plantronics telephony devices.
+ Provides HID support for Plantronics USB audio devices.
+ Correctly maps vendor unique volume up/down HID usages to
+ KEY_VOLUMEUP and KEY_VOLUMEDOWN events and prevents core mapping
+ of other vendor unique HID usages to random mouse events.
+
+ Say M here if you may ever plug in a Plantronics USB audio device.
config HID_PRIMAX
tristate "Primax non-fully HID-compliant devices"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index e4a21dfd7ef3..2f8a41dc3cc8 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_ACRUX) += hid-axff.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o
-obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
+obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
@@ -46,12 +46,12 @@ obj-$(CONFIG_HID_ICADE) += hid-icade.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
-obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
+obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
-obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
+obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index a64f8626bdf6..157c62775053 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -706,7 +706,8 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
(hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP) &&
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP ||
+ hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -1061,13 +1062,13 @@ static u32 s32ton(__s32 value, unsigned n)
* Search linux-kernel and linux-usb-devel archives for "hid-core extract".
*/
-static __u32 extract(const struct hid_device *hid, __u8 *report,
+__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n)
{
u64 x;
if (n > 32)
- hid_warn(hid, "extract() called with n (%d) > 32! (%s)\n",
+ hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
n, current->comm);
report += offset >> 3; /* adjust byte index */
@@ -1076,6 +1077,7 @@ static __u32 extract(const struct hid_device *hid, __u8 *report,
x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */
return (u32) x;
}
+EXPORT_SYMBOL_GPL(hid_field_extract);
/*
* "implement" : set bits in a little endian bit stream.
@@ -1221,9 +1223,9 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
for (n = 0; n < count; n++) {
value[n] = min < 0 ?
- snto32(extract(hid, data, offset + n * size, size),
- size) :
- extract(hid, data, offset + n * size, size);
+ snto32(hid_field_extract(hid, data, offset + n * size,
+ size), size) :
+ hid_field_extract(hid, data, offset + n * size, size);
/* Ignore report if ErrorRollOver */
if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
@@ -1851,6 +1853,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
@@ -1901,6 +1904,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
@@ -2000,6 +2004,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
@@ -2268,14 +2273,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_0_4_IF_KIT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_16_16_IF_KIT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_8_8_8_IF_KIT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_7_IF_KIT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_8_IF_KIT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_PHIDGET_MOTORCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
@@ -2402,14 +2399,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
#endif
- { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
{ }
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index c4ef3bc726e3..1b764d1745f3 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -41,13 +41,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
for (i = 0; i < *rsize - 4; i++)
if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
- __u8 tmp;
-
rdesc[i] = 0x19;
rdesc[i + 2] = 0x29;
- tmp = rdesc[i + 3];
- rdesc[i + 3] = rdesc[i + 1];
- rdesc[i + 1] = tmp;
+ swap(rdesc[i + 3], rdesc[i + 1]);
}
return rdesc;
}
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index c785095f9f2c..2886b645ced7 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -815,7 +815,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer",
[KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2",
[KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS",
- [KEY_COFFEE] = "Coffee", [KEY_DIRECTION] = "Direction",
+ [KEY_COFFEE] = "Coffee", [KEY_ROTATE_DISPLAY] = "RotateDisplay",
[KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail",
[KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer",
[KEY_BACK] = "Back", [KEY_FORWARD] = "Forward",
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 14fcb1103fb3..b04b0820d816 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -164,6 +164,7 @@
#define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
+#define USB_DEVICE_ID_ATEN_CS682 0x2213
#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
@@ -226,6 +227,7 @@
#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
+#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
@@ -362,16 +364,6 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
-#define USB_VENDOR_ID_GLAB 0x06c2
-#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
-#define USB_DEVICE_ID_1_PHIDGETSERVO_30 0x0039
-#define USB_DEVICE_ID_0_0_4_IF_KIT 0x0040
-#define USB_DEVICE_ID_0_16_16_IF_KIT 0x0044
-#define USB_DEVICE_ID_8_8_8_IF_KIT 0x0045
-#define USB_DEVICE_ID_0_8_7_IF_KIT 0x0051
-#define USB_DEVICE_ID_0_8_8_IF_KIT 0x0053
-#define USB_DEVICE_ID_PHIDGET_MOTORCONTROL 0x0058
-
#define USB_VENDOR_ID_GOODTOUCH 0x1aad
#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
@@ -585,6 +577,7 @@
#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
#define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047
#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
+#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
@@ -672,6 +665,7 @@
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07dc
#define USB_DEVICE_ID_MS_TYPE_COVER_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -958,13 +952,6 @@
#define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061
#define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068
-#define USB_VENDOR_ID_VERNIER 0x08f7
-#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
-#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002
-#define USB_DEVICE_ID_VERNIER_SKIP 0x0003
-#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
-#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
-
#define USB_VENDOR_ID_VTL 0x0306
#define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f
@@ -983,9 +970,6 @@
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
-#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101
-#define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104
-#define USB_DEVICE_ID_8_8_4_IF_KIT 0x8201
#define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888
#define USB_DEVICE_ID_QUAD_USB_JOYPAD 0x8800
#define USB_DEVICE_ID_DUAL_USB_JOYPAD 0x8866
@@ -1039,4 +1023,11 @@
#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
#define USB_DEVICE_ID_RI_KA_WEBMAIL 0x1320 /* Webmail Notifier */
+#define USB_VENDOR_ID_MULTIPLE_1781 0x1781
+#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD 0x0a8d
+
+#define USB_VENDOR_ID_DRACAL_RAPHNET 0x289b
+#define USB_DEVICE_ID_RAPHNET_2NES2SNES 0x0002
+#define USB_DEVICE_ID_RAPHNET_4NES4SNES 0x0003
+
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 008e89bf6f3c..3511bbaba505 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1157,7 +1157,8 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
/* report the usage code as scancode if the key status has changed */
- if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
+ if (usage->type == EV_KEY &&
+ (!test_bit(usage->code, input->key)) == value)
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
input_event(input, usage->type, usage->code, value);
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index c4c3f0952521..4f59bffd0205 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -43,6 +43,35 @@ struct lenovo_drvdata_cptkbd {
#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+static const __u8 lenovo_pro_dock_need_fixup_collection[] = {
+ 0x05, 0x88, /* Usage Page (Vendor Usage Page 0x88) */
+ 0x09, 0x01, /* Usage (Vendor Usage 0x01) */
+ 0xa1, 0x01, /* Collection (Application) */
+ 0x85, 0x04, /* Report ID (4) */
+ 0x19, 0x00, /* Usage Minimum (0) */
+ 0x2a, 0xff, 0xff, /* Usage Maximum (65535) */
+};
+
+static __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LENOVO_TPPRODOCK:
+ /* the fixups that need to be done:
+ * - get a reasonable usage max for the vendor collection
+ * 0x8801 from the report ID 4
+ */
+ if (*rsize >= 153 &&
+ memcmp(&rdesc[140], lenovo_pro_dock_need_fixup_collection,
+ sizeof(lenovo_pro_dock_need_fixup_collection)) == 0) {
+ rdesc[151] = 0x01;
+ rdesc[152] = 0x00;
+ }
+ break;
+ }
+ return rdesc;
+}
+
static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
@@ -599,7 +628,8 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
GFP_KERNEL);
if (data_pointer == NULL) {
hid_err(hdev, "Could not allocate memory for driver data\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
// set same default values as windows driver
@@ -610,7 +640,8 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
if (name_mute == NULL || name_micmute == NULL) {
hid_err(hdev, "Could not allocate memory for led data\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
@@ -634,6 +665,9 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
lenovo_features_set_tpkbd(hdev);
return 0;
+err:
+ sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tpkbd);
+ return ret;
}
static int lenovo_probe_cptkbd(struct hid_device *hdev)
@@ -762,10 +796,29 @@ static void lenovo_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static void lenovo_input_configured(struct hid_device *hdev,
+ struct hid_input *hi)
+{
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LENOVO_TPKBD:
+ case USB_DEVICE_ID_LENOVO_CUSBKBD:
+ case USB_DEVICE_ID_LENOVO_CBTKBD:
+ if (test_bit(EV_REL, hi->input->evbit)) {
+ /* set only for trackpoint device */
+ __set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+ __set_bit(INPUT_PROP_POINTING_STICK,
+ hi->input->propbit);
+ }
+ break;
+ }
+}
+
+
static const struct hid_device_id lenovo_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
{ }
};
@@ -774,10 +827,12 @@ MODULE_DEVICE_TABLE(hid, lenovo_devices);
static struct hid_driver lenovo_driver = {
.name = "lenovo",
.id_table = lenovo_devices,
+ .input_configured = lenovo_input_configured,
.input_mapping = lenovo_input_mapping,
.probe = lenovo_probe,
.remove = lenovo_remove,
.raw_event = lenovo_raw_event,
+ .report_fixup = lenovo_report_fixup,
};
module_hid_driver(lenovo_driver);
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index b86c18e651ed..429340d809b5 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -700,7 +700,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
/* insert a little delay of 10 jiffies ~ 40ms */
wait_queue_head_t wait;
init_waitqueue_head (&wait);
- wait_event_interruptible_timeout(wait, 0, 10);
+ wait_event_interruptible_timeout(wait, 0,
+ msecs_to_jiffies(40));
/* Select random Address */
buf[1] = 0xB2;
@@ -712,13 +713,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
if (drv_data->quirks & LG_FF)
- lgff_init(hdev);
- if (drv_data->quirks & LG_FF2)
- lg2ff_init(hdev);
- if (drv_data->quirks & LG_FF3)
- lg3ff_init(hdev);
- if (drv_data->quirks & LG_FF4)
- lg4ff_init(hdev);
+ ret = lgff_init(hdev);
+ else if (drv_data->quirks & LG_FF2)
+ ret = lg2ff_init(hdev);
+ else if (drv_data->quirks & LG_FF3)
+ ret = lg3ff_init(hdev);
+ else if (drv_data->quirks & LG_FF4)
+ ret = lg4ff_init(hdev);
+
+ if (ret)
+ goto err_free;
return 0;
err_free:
@@ -731,8 +735,8 @@ static void lg_remove(struct hid_device *hdev)
struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
if (drv_data->quirks & LG_FF4)
lg4ff_deinit(hdev);
-
- hid_hw_stop(hdev);
+ else
+ hid_hw_stop(hdev);
kfree(drv_data);
}
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 1232210b1cc5..02cec83caac3 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -68,26 +68,32 @@
#define LG4FF_FFEX_REV_MAJ 0x21
#define LG4FF_FFEX_REV_MIN 0x00
-static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
-static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range);
-
-struct lg4ff_device_entry {
- __u32 product_id;
- __u16 range;
- __u16 min_range;
- __u16 max_range;
+static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
+static void lg4ff_set_range_g25(struct hid_device *hid, u16 range);
+
+struct lg4ff_wheel_data {
+ const u32 product_id;
+ u16 range;
+ const u16 min_range;
+ const u16 max_range;
#ifdef CONFIG_LEDS_CLASS
- __u8 led_state;
+ u8 led_state;
struct led_classdev *led[5];
#endif
- u32 alternate_modes;
- const char *real_tag;
- const char *real_name;
- u16 real_product_id;
- struct list_head list;
+ const u32 alternate_modes;
+ const char * const real_tag;
+ const char * const real_name;
+ const u16 real_product_id;
+
void (*set_range)(struct hid_device *hid, u16 range);
};
+struct lg4ff_device_entry {
+ spinlock_t report_lock; /* Protect output HID report */
+ struct hid_report *report;
+ struct lg4ff_wheel_data wdata;
+};
+
static const signed short lg4ff_wheel_effects[] = {
FF_CONSTANT,
FF_AUTOCENTER,
@@ -95,16 +101,16 @@ static const signed short lg4ff_wheel_effects[] = {
};
struct lg4ff_wheel {
- const __u32 product_id;
+ const u32 product_id;
const signed short *ff_effects;
- const __u16 min_range;
- const __u16 max_range;
+ const u16 min_range;
+ const u16 max_range;
void (*set_range)(struct hid_device *hid, u16 range);
};
struct lg4ff_compat_mode_switch {
- const __u8 cmd_count; /* Number of commands to send */
- const __u8 cmd[];
+ const u8 cmd_count; /* Number of commands to send */
+ const u8 cmd[];
};
struct lg4ff_wheel_ident_info {
@@ -134,10 +140,10 @@ struct lg4ff_alternate_mode {
static const struct lg4ff_wheel lg4ff_devices[] = {
{USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
{USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
- {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_dfp},
- {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
- {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
- {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_dfp},
+ {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25},
{USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL},
{USB_DEVICE_ID_LOGITECH_WII_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}
};
@@ -245,10 +251,10 @@ static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext16_g25 = {
};
/* Recalculates X axis value accordingly to currently selected range */
-static __s32 lg4ff_adjust_dfp_x_axis(__s32 value, __u16 range)
+static s32 lg4ff_adjust_dfp_x_axis(s32 value, u16 range)
{
- __u16 max_range;
- __s32 new_value;
+ u16 max_range;
+ s32 new_value;
if (range == 900)
return value;
@@ -269,21 +275,21 @@ static __s32 lg4ff_adjust_dfp_x_axis(__s32 value, __u16 range)
}
int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data)
+ struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data)
{
struct lg4ff_device_entry *entry = drv_data->device_props;
- __s32 new_value = 0;
+ s32 new_value = 0;
if (!entry) {
hid_err(hid, "Device properties not found");
return 0;
}
- switch (entry->product_id) {
+ switch (entry->wdata.product_id) {
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
switch (usage->code) {
case ABS_X:
- new_value = lg4ff_adjust_dfp_x_axis(value, entry->range);
+ new_value = lg4ff_adjust_dfp_x_axis(value, entry->wdata.range);
input_event(field->hidinput->input, usage->type, usage->code, new_value);
return 1;
default:
@@ -294,14 +300,56 @@ int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field,
}
}
-static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
+static void lg4ff_init_wheel_data(struct lg4ff_wheel_data * const wdata, const struct lg4ff_wheel *wheel,
+ const struct lg4ff_multimode_wheel *mmode_wheel,
+ const u16 real_product_id)
+{
+ u32 alternate_modes = 0;
+ const char *real_tag = NULL;
+ const char *real_name = NULL;
+
+ if (mmode_wheel) {
+ alternate_modes = mmode_wheel->alternate_modes;
+ real_tag = mmode_wheel->real_tag;
+ real_name = mmode_wheel->real_name;
+ }
+
+ {
+ struct lg4ff_wheel_data t_wdata = { .product_id = wheel->product_id,
+ .real_product_id = real_product_id,
+ .min_range = wheel->min_range,
+ .max_range = wheel->max_range,
+ .set_range = wheel->set_range,
+ .alternate_modes = alternate_modes,
+ .real_tag = real_tag,
+ .real_name = real_name };
+
+ memcpy(wdata, &t_wdata, sizeof(t_wdata));
+ }
+}
+
+static int lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
{
struct hid_device *hid = input_get_drvdata(dev);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- __s32 *value = report->field[0]->value;
+ struct lg4ff_device_entry *entry;
+ struct lg_drv_data *drv_data;
+ unsigned long flags;
+ s32 *value;
int x;
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Private driver data not found!\n");
+ return -EINVAL;
+ }
+
+ entry = drv_data->device_props;
+ if (!entry) {
+ hid_err(hid, "Device properties not found!\n");
+ return -EINVAL;
+ }
+ value = entry->report->field[0]->value;
+
#define CLAMP(x) do { if (x < 0) x = 0; else if (x > 0xff) x = 0xff; } while (0)
switch (effect->type) {
@@ -309,6 +357,7 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e
x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */
CLAMP(x);
+ spin_lock_irqsave(&entry->report_lock, flags);
if (x == 0x80) {
/* De-activate force in slot-1*/
value[0] = 0x13;
@@ -319,7 +368,8 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e
value[5] = 0x00;
value[6] = 0x00;
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
+ spin_unlock_irqrestore(&entry->report_lock, flags);
return 0;
}
@@ -331,7 +381,8 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e
value[5] = 0x00;
value[6] = 0x00;
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
+ spin_unlock_irqrestore(&entry->report_lock, flags);
break;
}
return 0;
@@ -339,15 +390,16 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e
/* Sends default autocentering command compatible with
* all wheels except Formula Force EX */
-static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
+static void lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- __s32 *value = report->field[0]->value;
- __u32 expand_a, expand_b;
+ s32 *value = report->field[0]->value;
+ u32 expand_a, expand_b;
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
+ unsigned long flags;
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
@@ -360,8 +412,10 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud
hid_err(hid, "Device properties not found!\n");
return;
}
+ value = entry->report->field[0]->value;
/* De-activate Auto-Center */
+ spin_lock_irqsave(&entry->report_lock, flags);
if (magnitude == 0) {
value[0] = 0xf5;
value[1] = 0x00;
@@ -371,7 +425,8 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud
value[5] = 0x00;
value[6] = 0x00;
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
+ spin_unlock_irqrestore(&entry->report_lock, flags);
return;
}
@@ -384,7 +439,7 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud
}
/* Adjust for non-MOMO wheels */
- switch (entry->product_id) {
+ switch (entry->wdata.product_id) {
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
break;
@@ -401,7 +456,7 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud
value[5] = 0x00;
value[6] = 0x00;
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
/* Activate Auto-Center */
value[0] = 0x14;
@@ -412,18 +467,34 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud
value[5] = 0x00;
value[6] = 0x00;
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
+ spin_unlock_irqrestore(&entry->report_lock, flags);
}
/* Sends autocentering command compatible with Formula Force EX */
-static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude)
+static void lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- __s32 *value = report->field[0]->value;
+ struct lg4ff_device_entry *entry;
+ struct lg_drv_data *drv_data;
+ unsigned long flags;
+ s32 *value;
magnitude = magnitude * 90 / 65535;
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Private driver data not found!\n");
+ return;
+ }
+
+ entry = drv_data->device_props;
+ if (!entry) {
+ hid_err(hid, "Device properties not found!\n");
+ return;
+ }
+ value = entry->report->field[0]->value;
+
+ spin_lock_irqsave(&entry->report_lock, flags);
value[0] = 0xfe;
value[1] = 0x03;
value[2] = magnitude >> 14;
@@ -432,18 +503,33 @@ static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude)
value[5] = 0x00;
value[6] = 0x00;
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
+ spin_unlock_irqrestore(&entry->report_lock, flags);
}
/* Sends command to set range compatible with G25/G27/Driving Force GT */
-static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range)
+static void lg4ff_set_range_g25(struct hid_device *hid, u16 range)
{
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- __s32 *value = report->field[0]->value;
+ struct lg4ff_device_entry *entry;
+ struct lg_drv_data *drv_data;
+ unsigned long flags;
+ s32 *value;
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Private driver data not found!\n");
+ return;
+ }
+
+ entry = drv_data->device_props;
+ if (!entry) {
+ hid_err(hid, "Device properties not found!\n");
+ return;
+ }
+ value = entry->report->field[0]->value;
dbg_hid("G25/G27/DFGT: setting range to %u\n", range);
+ spin_lock_irqsave(&entry->report_lock, flags);
value[0] = 0xf8;
value[1] = 0x81;
value[2] = range & 0x00ff;
@@ -452,20 +538,35 @@ static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range)
value[5] = 0x00;
value[6] = 0x00;
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
+ spin_unlock_irqrestore(&entry->report_lock, flags);
}
/* Sends commands to set range compatible with Driving Force Pro wheel */
-static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range)
+static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range)
{
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ struct lg4ff_device_entry *entry;
+ struct lg_drv_data *drv_data;
+ unsigned long flags;
int start_left, start_right, full_range;
- __s32 *value = report->field[0]->value;
+ s32 *value;
+
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Private driver data not found!\n");
+ return;
+ }
+ entry = drv_data->device_props;
+ if (!entry) {
+ hid_err(hid, "Device properties not found!\n");
+ return;
+ }
+ value = entry->report->field[0]->value;
dbg_hid("Driving Force Pro: setting range to %u\n", range);
/* Prepare "coarse" limit command */
+ spin_lock_irqsave(&entry->report_lock, flags);
value[0] = 0xf8;
value[1] = 0x00; /* Set later */
value[2] = 0x00;
@@ -475,13 +576,13 @@ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range)
value[6] = 0x00;
if (range > 200) {
- report->field[0]->value[1] = 0x03;
+ value[1] = 0x03;
full_range = 900;
} else {
- report->field[0]->value[1] = 0x02;
+ value[1] = 0x02;
full_range = 200;
}
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
/* Prepare "fine" limit command */
value[0] = 0x81;
@@ -493,7 +594,8 @@ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range)
value[6] = 0x00;
if (range == 200 || range == 900) { /* Do not apply any fine limit */
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
+ spin_unlock_irqrestore(&entry->report_lock, flags);
return;
}
@@ -507,7 +609,8 @@ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range)
value[5] = (start_right & 0xe) << 4 | (start_left & 0xe);
value[6] = 0xff;
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
+ spin_unlock_irqrestore(&entry->report_lock, flags);
}
static const struct lg4ff_compat_mode_switch *lg4ff_get_mode_switch_command(const u16 real_product_id, const u16 target_product_id)
@@ -569,19 +672,35 @@ static const struct lg4ff_compat_mode_switch *lg4ff_get_mode_switch_command(cons
static int lg4ff_switch_compatibility_mode(struct hid_device *hid, const struct lg4ff_compat_mode_switch *s)
{
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- __s32 *value = report->field[0]->value;
+ struct lg4ff_device_entry *entry;
+ struct lg_drv_data *drv_data;
+ unsigned long flags;
+ s32 *value;
u8 i;
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Private driver data not found!\n");
+ return -EINVAL;
+ }
+
+ entry = drv_data->device_props;
+ if (!entry) {
+ hid_err(hid, "Device properties not found!\n");
+ return -EINVAL;
+ }
+ value = entry->report->field[0]->value;
+
+ spin_lock_irqsave(&entry->report_lock, flags);
for (i = 0; i < s->cmd_count; i++) {
u8 j;
for (j = 0; j < 7; j++)
value[j] = s->cmd[j + (7*i)];
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
}
+ spin_unlock_irqrestore(&entry->report_lock, flags);
hid_hw_wait(hid);
return 0;
}
@@ -606,23 +725,23 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr
return 0;
}
- if (!entry->real_name) {
+ if (!entry->wdata.real_name) {
hid_err(hid, "NULL pointer to string\n");
return 0;
}
for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) {
- if (entry->alternate_modes & BIT(i)) {
+ if (entry->wdata.alternate_modes & BIT(i)) {
/* Print tag and full name */
count += scnprintf(buf + count, PAGE_SIZE - count, "%s: %s",
lg4ff_alternate_modes[i].tag,
- !lg4ff_alternate_modes[i].product_id ? entry->real_name : lg4ff_alternate_modes[i].name);
+ !lg4ff_alternate_modes[i].product_id ? entry->wdata.real_name : lg4ff_alternate_modes[i].name);
if (count >= PAGE_SIZE - 1)
return count;
/* Mark the currently active mode with an asterisk */
- if (lg4ff_alternate_modes[i].product_id == entry->product_id ||
- (lg4ff_alternate_modes[i].product_id == 0 && entry->product_id == entry->real_product_id))
+ if (lg4ff_alternate_modes[i].product_id == entry->wdata.product_id ||
+ (lg4ff_alternate_modes[i].product_id == 0 && entry->wdata.product_id == entry->wdata.real_product_id))
count += scnprintf(buf + count, PAGE_SIZE - count, " *\n");
else
count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
@@ -675,10 +794,10 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
const u16 mode_product_id = lg4ff_alternate_modes[i].product_id;
const char *tag = lg4ff_alternate_modes[i].tag;
- if (entry->alternate_modes & BIT(i)) {
+ if (entry->wdata.alternate_modes & BIT(i)) {
if (!strcmp(tag, lbuf)) {
if (!mode_product_id)
- target_product_id = entry->real_product_id;
+ target_product_id = entry->wdata.real_product_id;
else
target_product_id = mode_product_id;
break;
@@ -693,24 +812,24 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
}
kfree(lbuf); /* Not needed anymore */
- if (target_product_id == entry->product_id) /* Nothing to do */
+ if (target_product_id == entry->wdata.product_id) /* Nothing to do */
return count;
/* Automatic switching has to be disabled for the switch to DF-EX mode to work correctly */
if (target_product_id == USB_DEVICE_ID_LOGITECH_WHEEL && !lg4ff_no_autoswitch) {
hid_info(hid, "\"%s\" cannot be switched to \"DF-EX\" mode. Load the \"hid_logitech\" module with \"lg4ff_no_autoswitch=1\" parameter set and try again\n",
- entry->real_name);
+ entry->wdata.real_name);
return -EINVAL;
}
/* Take care of hardware limitations */
- if ((entry->real_product_id == USB_DEVICE_ID_LOGITECH_DFP_WHEEL || entry->real_product_id == USB_DEVICE_ID_LOGITECH_G25_WHEEL) &&
- entry->product_id > target_product_id) {
- hid_info(hid, "\"%s\" cannot be switched back into \"%s\" mode\n", entry->real_name, lg4ff_alternate_modes[i].name);
+ if ((entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_DFP_WHEEL || entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_G25_WHEEL) &&
+ entry->wdata.product_id > target_product_id) {
+ hid_info(hid, "\"%s\" cannot be switched back into \"%s\" mode\n", entry->wdata.real_name, lg4ff_alternate_modes[i].name);
return -EINVAL;
}
- s = lg4ff_get_mode_switch_command(entry->real_product_id, target_product_id);
+ s = lg4ff_get_mode_switch_command(entry->wdata.real_product_id, target_product_id);
if (!s) {
hid_err(hid, "Invalid target product ID %X\n", target_product_id);
return -EINVAL;
@@ -721,9 +840,9 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
}
static DEVICE_ATTR(alternate_modes, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_alternate_modes_show, lg4ff_alternate_modes_store);
-/* Read current range and display it in terminal */
-static ssize_t range_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+/* Export the currently set range of the wheel */
+static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct hid_device *hid = to_hid_device(dev);
struct lg4ff_device_entry *entry;
@@ -742,19 +861,19 @@ static ssize_t range_show(struct device *dev, struct device_attribute *attr,
return 0;
}
- count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range);
+ count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->wdata.range);
return count;
}
/* Set range to user specified value, call appropriate function
* according to the type of the wheel */
-static ssize_t range_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct hid_device *hid = to_hid_device(dev);
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
- __u16 range = simple_strtoul(buf, NULL, 10);
+ u16 range = simple_strtoul(buf, NULL, 10);
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
@@ -769,18 +888,18 @@ static ssize_t range_store(struct device *dev, struct device_attribute *attr,
}
if (range == 0)
- range = entry->max_range;
+ range = entry->wdata.max_range;
/* Check if the wheel supports range setting
* and that the range is within limits for the wheel */
- if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) {
- entry->set_range(hid, range);
- entry->range = range;
+ if (entry->wdata.set_range && range >= entry->wdata.min_range && range <= entry->wdata.max_range) {
+ entry->wdata.set_range(hid, range);
+ entry->wdata.range = range;
}
return count;
}
-static DEVICE_ATTR_RW(range);
+static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_range_show, lg4ff_range_store);
static ssize_t lg4ff_real_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -801,12 +920,12 @@ static ssize_t lg4ff_real_id_show(struct device *dev, struct device_attribute *a
return 0;
}
- if (!entry->real_tag || !entry->real_name) {
+ if (!entry->wdata.real_tag || !entry->wdata.real_name) {
hid_err(hid, "NULL pointer to string\n");
return 0;
}
- count = scnprintf(buf, PAGE_SIZE, "%s: %s\n", entry->real_tag, entry->real_name);
+ count = scnprintf(buf, PAGE_SIZE, "%s: %s\n", entry->wdata.real_tag, entry->wdata.real_name);
return count;
}
@@ -818,12 +937,27 @@ static ssize_t lg4ff_real_id_store(struct device *dev, struct device_attribute *
static DEVICE_ATTR(real_id, S_IRUGO, lg4ff_real_id_show, lg4ff_real_id_store);
#ifdef CONFIG_LEDS_CLASS
-static void lg4ff_set_leds(struct hid_device *hid, __u8 leds)
+static void lg4ff_set_leds(struct hid_device *hid, u8 leds)
{
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- __s32 *value = report->field[0]->value;
+ struct lg_drv_data *drv_data;
+ struct lg4ff_device_entry *entry;
+ unsigned long flags;
+ s32 *value;
+
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Private driver data not found!\n");
+ return;
+ }
+
+ entry = drv_data->device_props;
+ if (!entry) {
+ hid_err(hid, "Device properties not found!\n");
+ return;
+ }
+ value = entry->report->field[0]->value;
+ spin_lock_irqsave(&entry->report_lock, flags);
value[0] = 0xf8;
value[1] = 0x12;
value[2] = leds;
@@ -831,7 +965,8 @@ static void lg4ff_set_leds(struct hid_device *hid, __u8 leds)
value[4] = 0x00;
value[5] = 0x00;
value[6] = 0x00;
- hid_hw_request(hid, report, HID_REQ_SET_REPORT);
+ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
+ spin_unlock_irqrestore(&entry->report_lock, flags);
}
static void lg4ff_led_set_brightness(struct led_classdev *led_cdev,
@@ -848,7 +983,7 @@ static void lg4ff_led_set_brightness(struct led_classdev *led_cdev,
return;
}
- entry = (struct lg4ff_device_entry *)drv_data->device_props;
+ entry = drv_data->device_props;
if (!entry) {
hid_err(hid, "Device properties not found.");
@@ -856,15 +991,15 @@ static void lg4ff_led_set_brightness(struct led_classdev *led_cdev,
}
for (i = 0; i < 5; i++) {
- if (led_cdev != entry->led[i])
+ if (led_cdev != entry->wdata.led[i])
continue;
- state = (entry->led_state >> i) & 1;
+ state = (entry->wdata.led_state >> i) & 1;
if (value == LED_OFF && state) {
- entry->led_state &= ~(1 << i);
- lg4ff_set_leds(hid, entry->led_state);
+ entry->wdata.led_state &= ~(1 << i);
+ lg4ff_set_leds(hid, entry->wdata.led_state);
} else if (value != LED_OFF && !state) {
- entry->led_state |= 1 << i;
- lg4ff_set_leds(hid, entry->led_state);
+ entry->wdata.led_state |= 1 << i;
+ lg4ff_set_leds(hid, entry->wdata.led_state);
}
break;
}
@@ -883,7 +1018,7 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
return LED_OFF;
}
- entry = (struct lg4ff_device_entry *)drv_data->device_props;
+ entry = drv_data->device_props;
if (!entry) {
hid_err(hid, "Device properties not found.");
@@ -891,8 +1026,8 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
}
for (i = 0; i < 5; i++)
- if (led_cdev == entry->led[i]) {
- value = (entry->led_state >> i) & 1;
+ if (led_cdev == entry->wdata.led[i]) {
+ value = (entry->wdata.led_state >> i) & 1;
break;
}
@@ -991,8 +1126,11 @@ int lg4ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ const struct lg4ff_multimode_wheel *mmode_wheel = NULL;
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
int error, i, j;
@@ -1003,6 +1141,18 @@ int lg4ff_init(struct hid_device *hid)
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Cannot add device, private driver data not allocated\n");
+ return -1;
+ }
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ spin_lock_init(&entry->report_lock);
+ entry->report = report;
+ drv_data->device_props = entry;
+
/* Check if a multimode wheel has been connected and
* handle it appropriately */
mmode_ret = lg4ff_handle_multimode_wheel(hid, &real_product_id, bcdDevice);
@@ -1012,6 +1162,11 @@ int lg4ff_init(struct hid_device *hid)
*/
if (mmode_ret == LG4FF_MMODE_SWITCHED)
return 0;
+ else if (mmode_ret < 0) {
+ hid_err(hid, "Unable to switch device mode during initialization, errno %d\n", mmode_ret);
+ error = mmode_ret;
+ goto err_init;
+ }
/* Check what wheel has been connected */
for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
@@ -1022,9 +1177,11 @@ int lg4ff_init(struct hid_device *hid)
}
if (i == ARRAY_SIZE(lg4ff_devices)) {
- hid_err(hid, "Device is not supported by lg4ff driver. If you think it should be, consider reporting a bug to"
- "LKML, Simon Wood <simon@mungewell.org> or Michal Maly <madcatxster@gmail.com>\n");
- return -1;
+ hid_err(hid, "This device is flagged to be handled by the lg4ff module but this module does not know how to handle it. "
+ "Please report this as a bug to LKML, Simon Wood <simon@mungewell.org> or "
+ "Michal Maly <madcatxster@devoid-pointer.net>\n");
+ error = -1;
+ goto err_init;
}
if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) {
@@ -1035,7 +1192,8 @@ int lg4ff_init(struct hid_device *hid)
if (mmode_idx == ARRAY_SIZE(lg4ff_multimode_wheels)) {
hid_err(hid, "Device product ID %X is not listed as a multimode wheel", real_product_id);
- return -1;
+ error = -1;
+ goto err_init;
}
}
@@ -1043,37 +1201,17 @@ int lg4ff_init(struct hid_device *hid)
for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++)
set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit);
- error = input_ff_create_memless(dev, NULL, hid_lg4ff_play);
+ error = input_ff_create_memless(dev, NULL, lg4ff_play);
if (error)
- return error;
-
- /* Get private driver data */
- drv_data = hid_get_drvdata(hid);
- if (!drv_data) {
- hid_err(hid, "Cannot add device, private driver data not allocated\n");
- return -1;
- }
+ goto err_init;
/* Initialize device properties */
- entry = kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
- if (!entry) {
- hid_err(hid, "Cannot add device, insufficient memory to allocate device properties.\n");
- return -ENOMEM;
- }
- drv_data->device_props = entry;
-
- entry->product_id = lg4ff_devices[i].product_id;
- entry->real_product_id = real_product_id;
- entry->min_range = lg4ff_devices[i].min_range;
- entry->max_range = lg4ff_devices[i].max_range;
- entry->set_range = lg4ff_devices[i].set_range;
if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) {
BUG_ON(mmode_idx == -1);
- entry->alternate_modes = lg4ff_multimode_wheels[mmode_idx].alternate_modes;
- entry->real_tag = lg4ff_multimode_wheels[mmode_idx].real_tag;
- entry->real_name = lg4ff_multimode_wheels[mmode_idx].real_name;
+ mmode_wheel = &lg4ff_multimode_wheels[mmode_idx];
}
+ lg4ff_init_wheel_data(&entry->wdata, &lg4ff_devices[i], mmode_wheel, real_product_id);
/* Check if autocentering is available and
* set the centering force to zero by default */
@@ -1081,9 +1219,9 @@ int lg4ff_init(struct hid_device *hid)
/* Formula Force EX expects different autocentering command */
if ((bcdDevice >> 8) == LG4FF_FFEX_REV_MAJ &&
(bcdDevice & 0xff) == LG4FF_FFEX_REV_MIN)
- dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
+ dev->ff->set_autocenter = lg4ff_set_autocenter_ffex;
else
- dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
+ dev->ff->set_autocenter = lg4ff_set_autocenter_default;
dev->ff->set_autocenter(dev, 0);
}
@@ -1091,27 +1229,27 @@ int lg4ff_init(struct hid_device *hid)
/* Create sysfs interface */
error = device_create_file(&hid->dev, &dev_attr_range);
if (error)
- return error;
+ hid_warn(hid, "Unable to create sysfs interface for \"range\", errno %d\n", error);
if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) {
error = device_create_file(&hid->dev, &dev_attr_real_id);
if (error)
- return error;
+ hid_warn(hid, "Unable to create sysfs interface for \"real_id\", errno %d\n", error);
error = device_create_file(&hid->dev, &dev_attr_alternate_modes);
if (error)
- return error;
+ hid_warn(hid, "Unable to create sysfs interface for \"alternate_modes\", errno %d\n", error);
}
dbg_hid("sysfs interface created\n");
/* Set the maximum range to start with */
- entry->range = entry->max_range;
- if (entry->set_range != NULL)
- entry->set_range(hid, entry->range);
+ entry->wdata.range = entry->wdata.max_range;
+ if (entry->wdata.set_range)
+ entry->wdata.set_range(hid, entry->wdata.range);
#ifdef CONFIG_LEDS_CLASS
/* register led subsystem - G27 only */
- entry->led_state = 0;
+ entry->wdata.led_state = 0;
for (j = 0; j < 5; j++)
- entry->led[j] = NULL;
+ entry->wdata.led[j] = NULL;
if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_G27_WHEEL) {
struct led_classdev *led;
@@ -1126,7 +1264,7 @@ int lg4ff_init(struct hid_device *hid)
led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
if (!led) {
hid_err(hid, "can't allocate memory for LED %d\n", j);
- goto err;
+ goto err_leds;
}
name = (void *)(&led[1]);
@@ -1137,16 +1275,16 @@ int lg4ff_init(struct hid_device *hid)
led->brightness_get = lg4ff_led_get_brightness;
led->brightness_set = lg4ff_led_set_brightness;
- entry->led[j] = led;
+ entry->wdata.led[j] = led;
error = led_classdev_register(&hid->dev, led);
if (error) {
hid_err(hid, "failed to register LED %d. Aborting.\n", j);
-err:
+err_leds:
/* Deregister LEDs (if any) */
for (j = 0; j < 5; j++) {
- led = entry->led[j];
- entry->led[j] = NULL;
+ led = entry->wdata.led[j];
+ entry->wdata.led[j] = NULL;
if (!led)
continue;
led_classdev_unregister(led);
@@ -1160,6 +1298,11 @@ out:
#endif
hid_info(hid, "Force feedback support for Logitech Gaming Wheels\n");
return 0;
+
+err_init:
+ drv_data->device_props = NULL;
+ kfree(entry);
+ return error;
}
int lg4ff_deinit(struct hid_device *hid)
@@ -1176,14 +1319,13 @@ int lg4ff_deinit(struct hid_device *hid)
if (!entry)
goto out; /* Nothing more to do */
- device_remove_file(&hid->dev, &dev_attr_range);
-
/* Multimode devices will have at least the "MODE_NATIVE" bit set */
- if (entry->alternate_modes) {
+ if (entry->wdata.alternate_modes) {
device_remove_file(&hid->dev, &dev_attr_real_id);
device_remove_file(&hid->dev, &dev_attr_alternate_modes);
}
+ device_remove_file(&hid->dev, &dev_attr_range);
#ifdef CONFIG_LEDS_CLASS
{
int j;
@@ -1192,8 +1334,8 @@ int lg4ff_deinit(struct hid_device *hid)
/* Deregister LEDs (if any) */
for (j = 0; j < 5; j++) {
- led = entry->led[j];
- entry->led[j] = NULL;
+ led = entry->wdata.led[j];
+ entry->wdata.led[j] = NULL;
if (!led)
continue;
led_classdev_unregister(led);
@@ -1201,10 +1343,10 @@ int lg4ff_deinit(struct hid_device *hid)
}
}
#endif
+ hid_hw_stop(hid);
+ drv_data->device_props = NULL;
- /* Deallocate memory */
kfree(entry);
-
out:
dbg_hid("Device successfully unregistered\n");
return 0;
diff --git a/drivers/hid/hid-lg4ff.h b/drivers/hid/hid-lg4ff.h
index 5b6a5086c47f..66201af44da3 100644
--- a/drivers/hid/hid-lg4ff.h
+++ b/drivers/hid/hid-lg4ff.h
@@ -5,12 +5,12 @@
extern int lg4ff_no_autoswitch; /* From hid-lg.c */
int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data);
+ struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data);
int lg4ff_init(struct hid_device *hdev);
int lg4ff_deinit(struct hid_device *hdev);
#else
static inline int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data) { return 0; }
+ struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data) { return 0; }
static inline int lg4ff_init(struct hid_device *hdev) { return -1; }
static inline int lg4ff_deinit(struct hid_device *hdev) { return -1; }
#endif
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index b3cf6fd4be96..484196459305 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -40,11 +40,11 @@ MODULE_PARM_DESC(disable_raw_mode,
#define HIDPP_REPORT_LONG_LENGTH 20
#define HIDPP_QUIRK_CLASS_WTP BIT(0)
+#define HIDPP_QUIRK_CLASS_M560 BIT(1)
-/* bits 1..20 are reserved for classes */
+/* bits 2..20 are reserved for classes */
#define HIDPP_QUIRK_DELAYED_INIT BIT(21)
#define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
-#define HIDPP_QUIRK_MULTI_INPUT BIT(23)
/*
* There are two hidpp protocols in use, the first version hidpp10 is known
@@ -706,12 +706,6 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
-
- if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
- (field->application == HID_GD_KEYBOARD))
- return 0;
-
return -1;
}
@@ -720,10 +714,6 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
{
struct wtp_data *wd = hidpp->private_data;
- if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
- /* this is the generic hid-input call */
- return;
-
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(EV_KEY, input_dev->evbit);
__clear_bit(EV_REL, input_dev->evbit);
@@ -941,6 +931,207 @@ static int wtp_connect(struct hid_device *hdev, bool connected)
true, true);
}
+/* ------------------------------------------------------------------------- */
+/* Logitech M560 devices */
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Logitech M560 protocol overview
+ *
+ * The Logitech M560 mouse, is designed for windows 8. When the middle and/or
+ * the sides buttons are pressed, it sends some keyboard keys events
+ * instead of buttons ones.
+ * To complicate things further, the middle button keys sequence
+ * is different from the odd press and the even press.
+ *
+ * forward button -> Super_R
+ * backward button -> Super_L+'d' (press only)
+ * middle button -> 1st time: Alt_L+SuperL+XF86TouchpadOff (press only)
+ * 2nd time: left-click (press only)
+ * NB: press-only means that when the button is pressed, the
+ * KeyPress/ButtonPress and KeyRelease/ButtonRelease events are generated
+ * together sequentially; instead when the button is released, no event is
+ * generated !
+ *
+ * With the command
+ * 10<xx>0a 3500af03 (where <xx> is the mouse id),
+ * the mouse reacts differently:
+ * - it never sends a keyboard key event
+ * - for the three mouse button it sends:
+ * middle button press 11<xx>0a 3500af00...
+ * side 1 button (forward) press 11<xx>0a 3500b000...
+ * side 2 button (backward) press 11<xx>0a 3500ae00...
+ * middle/side1/side2 button release 11<xx>0a 35000000...
+ */
+
+static const u8 m560_config_parameter[] = {0x00, 0xaf, 0x03};
+
+struct m560_private_data {
+ struct input_dev *input;
+};
+
+/* how buttons are mapped in the report */
+#define M560_MOUSE_BTN_LEFT 0x01
+#define M560_MOUSE_BTN_RIGHT 0x02
+#define M560_MOUSE_BTN_WHEEL_LEFT 0x08
+#define M560_MOUSE_BTN_WHEEL_RIGHT 0x10
+
+#define M560_SUB_ID 0x0a
+#define M560_BUTTON_MODE_REGISTER 0x35
+
+static int m560_send_config_command(struct hid_device *hdev, bool connected)
+{
+ struct hidpp_report response;
+ struct hidpp_device *hidpp_dev;
+
+ hidpp_dev = hid_get_drvdata(hdev);
+
+ if (!connected)
+ return -ENODEV;
+
+ return hidpp_send_rap_command_sync(
+ hidpp_dev,
+ REPORT_ID_HIDPP_SHORT,
+ M560_SUB_ID,
+ M560_BUTTON_MODE_REGISTER,
+ (u8 *)m560_config_parameter,
+ sizeof(m560_config_parameter),
+ &response
+ );
+}
+
+static int m560_allocate(struct hid_device *hdev)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct m560_private_data *d;
+
+ d = devm_kzalloc(&hdev->dev, sizeof(struct m560_private_data),
+ GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ hidpp->private_data = d;
+
+ return 0;
+};
+
+static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct m560_private_data *mydata = hidpp->private_data;
+
+ /* sanity check */
+ if (!mydata || !mydata->input) {
+ hid_err(hdev, "error in parameter\n");
+ return -EINVAL;
+ }
+
+ if (size < 7) {
+ hid_err(hdev, "error in report\n");
+ return 0;
+ }
+
+ if (data[0] == REPORT_ID_HIDPP_LONG &&
+ data[2] == M560_SUB_ID && data[6] == 0x00) {
+ /*
+ * m560 mouse report for middle, forward and backward button
+ *
+ * data[0] = 0x11
+ * data[1] = device-id
+ * data[2] = 0x0a
+ * data[5] = 0xaf -> middle
+ * 0xb0 -> forward
+ * 0xae -> backward
+ * 0x00 -> release all
+ * data[6] = 0x00
+ */
+
+ switch (data[5]) {
+ case 0xaf:
+ input_report_key(mydata->input, BTN_MIDDLE, 1);
+ break;
+ case 0xb0:
+ input_report_key(mydata->input, BTN_FORWARD, 1);
+ break;
+ case 0xae:
+ input_report_key(mydata->input, BTN_BACK, 1);
+ break;
+ case 0x00:
+ input_report_key(mydata->input, BTN_BACK, 0);
+ input_report_key(mydata->input, BTN_FORWARD, 0);
+ input_report_key(mydata->input, BTN_MIDDLE, 0);
+ break;
+ default:
+ hid_err(hdev, "error in report\n");
+ return 0;
+ }
+ input_sync(mydata->input);
+
+ } else if (data[0] == 0x02) {
+ /*
+ * Logitech M560 mouse report
+ *
+ * data[0] = type (0x02)
+ * data[1..2] = buttons
+ * data[3..5] = xy
+ * data[6] = wheel
+ */
+
+ int v;
+
+ input_report_key(mydata->input, BTN_LEFT,
+ !!(data[1] & M560_MOUSE_BTN_LEFT));
+ input_report_key(mydata->input, BTN_RIGHT,
+ !!(data[1] & M560_MOUSE_BTN_RIGHT));
+
+ if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT)
+ input_report_rel(mydata->input, REL_HWHEEL, -1);
+ else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT)
+ input_report_rel(mydata->input, REL_HWHEEL, 1);
+
+ v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12);
+ input_report_rel(mydata->input, REL_X, v);
+
+ v = hid_snto32(hid_field_extract(hdev, data+3, 12, 12), 12);
+ input_report_rel(mydata->input, REL_Y, v);
+
+ v = hid_snto32(data[6], 8);
+ input_report_rel(mydata->input, REL_WHEEL, v);
+
+ input_sync(mydata->input);
+ }
+
+ return 1;
+}
+
+static void m560_populate_input(struct hidpp_device *hidpp,
+ struct input_dev *input_dev, bool origin_is_hid_core)
+{
+ struct m560_private_data *mydata = hidpp->private_data;
+
+ mydata->input = input_dev;
+
+ __set_bit(EV_KEY, mydata->input->evbit);
+ __set_bit(BTN_MIDDLE, mydata->input->keybit);
+ __set_bit(BTN_RIGHT, mydata->input->keybit);
+ __set_bit(BTN_LEFT, mydata->input->keybit);
+ __set_bit(BTN_BACK, mydata->input->keybit);
+ __set_bit(BTN_FORWARD, mydata->input->keybit);
+
+ __set_bit(EV_REL, mydata->input->evbit);
+ __set_bit(REL_X, mydata->input->relbit);
+ __set_bit(REL_Y, mydata->input->relbit);
+ __set_bit(REL_WHEEL, mydata->input->relbit);
+ __set_bit(REL_HWHEEL, mydata->input->relbit);
+}
+
+static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ return -1;
+}
+
/* -------------------------------------------------------------------------- */
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
@@ -953,6 +1144,9 @@ static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
return wtp_input_mapping(hdev, hi, field, usage, bit, max);
+ else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560 &&
+ field->application != HID_GD_MOUSE)
+ return m560_input_mapping(hdev, hi, field, usage, bit, max);
return 0;
}
@@ -962,6 +1156,8 @@ static void hidpp_populate_input(struct hidpp_device *hidpp,
{
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
wtp_populate_input(hidpp, input, origin_is_hid_core);
+ else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
+ m560_populate_input(hidpp, input, origin_is_hid_core);
}
static void hidpp_input_configured(struct hid_device *hdev,
@@ -1049,6 +1245,8 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
return wtp_raw_event(hdev, data, size);
+ else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
+ return m560_raw_event(hdev, data, size);
return 0;
}
@@ -1126,6 +1324,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
ret = wtp_connect(hdev, connected);
if (ret)
return;
+ } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) {
+ ret = m560_send_config_command(hdev, connected);
+ if (ret)
+ return;
}
if (!connected || hidpp->delayed_input)
@@ -1201,7 +1403,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
ret = wtp_allocate(hdev, id);
if (ret)
- goto wtp_allocate_fail;
+ goto allocate_fail;
+ } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) {
+ ret = m560_allocate(hdev);
+ if (ret)
+ goto allocate_fail;
}
INIT_WORK(&hidpp->work, delayed_work_cb);
@@ -1245,10 +1451,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
connect_mask &= ~HID_CONNECT_HIDINPUT;
- /* Re-enable hidinput for multi-input devices */
- if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
- connect_mask |= HID_CONNECT_HIDINPUT;
-
ret = hid_hw_start(hdev, connect_mask);
if (ret) {
hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
@@ -1268,7 +1470,7 @@ hid_hw_start_fail:
hid_parse_fail:
cancel_work_sync(&hidpp->work);
mutex_destroy(&hidpp->send_mutex);
-wtp_allocate_fail:
+allocate_fail:
hid_set_drvdata(hdev, NULL);
return ret;
}
@@ -1296,11 +1498,10 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
- { /* Keyboard TK820 */
+ { /* Mouse logitech M560 */
HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4102),
- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
- HIDPP_QUIRK_CLASS_WTP },
+ USB_VENDOR_ID_LOGITECH, 0x402d),
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
{ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index af935eb198c9..32a596f554af 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -280,6 +280,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP),
.driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
+ .driver_data = MS_HIDINPUT },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index 2180e0789b76..febb21ee190e 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -2,7 +2,7 @@
* Plantronics USB HID Driver
*
* Copyright (c) 2014 JD Cole <jd.cole@plantronics.com>
- * Copyright (c) 2014 Terry Junge <terry.junge@plantronics.com>
+ * Copyright (c) 2015 Terry Junge <terry.junge@plantronics.com>
*/
/*
@@ -17,23 +17,138 @@
#include <linux/hid.h>
#include <linux/module.h>
+#define PLT_HID_1_0_PAGE 0xffa00000
+#define PLT_HID_2_0_PAGE 0xffa20000
+
+#define PLT_BASIC_TELEPHONY 0x0003
+#define PLT_BASIC_EXCEPTION 0x0005
+
+#define PLT_VOL_UP 0x00b1
+#define PLT_VOL_DOWN 0x00b2
+
+#define PLT1_VOL_UP (PLT_HID_1_0_PAGE | PLT_VOL_UP)
+#define PLT1_VOL_DOWN (PLT_HID_1_0_PAGE | PLT_VOL_DOWN)
+#define PLT2_VOL_UP (PLT_HID_2_0_PAGE | PLT_VOL_UP)
+#define PLT2_VOL_DOWN (PLT_HID_2_0_PAGE | PLT_VOL_DOWN)
+
+#define PLT_DA60 0xda60
+#define PLT_BT300_MIN 0x0413
+#define PLT_BT300_MAX 0x0418
+
+
+#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
+ (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
+
static int plantronics_input_mapping(struct hid_device *hdev,
struct hid_input *hi,
struct hid_field *field,
struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if (field->application == HID_CP_CONSUMERCONTROL
- && (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) {
- hid_dbg(hdev, "usage: %08x (appl: %08x) - defaulted\n",
- usage->hid, field->application);
- return 0;
+ unsigned short mapped_key;
+ unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
+
+ /* handle volume up/down mapping */
+ /* non-standard types or multi-HID interfaces - plt_type is PID */
+ if (!(plt_type & HID_USAGE_PAGE)) {
+ switch (plt_type) {
+ case PLT_DA60:
+ if (PLT_ALLOW_CONSUMER)
+ goto defaulted;
+ goto ignored;
+ default:
+ if (PLT_ALLOW_CONSUMER)
+ goto defaulted;
+ }
+ }
+ /* handle standard types - plt_type is 0xffa0uuuu or 0xffa2uuuu */
+ /* 'basic telephony compliant' - allow default consumer page map */
+ else if ((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY &&
+ (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) {
+ if (PLT_ALLOW_CONSUMER)
+ goto defaulted;
+ }
+ /* not 'basic telephony' - apply legacy mapping */
+ /* only map if the field is in the device's primary vendor page */
+ else if (!((field->application ^ plt_type) & HID_USAGE_PAGE)) {
+ switch (usage->hid) {
+ case PLT1_VOL_UP:
+ case PLT2_VOL_UP:
+ mapped_key = KEY_VOLUMEUP;
+ goto mapped;
+ case PLT1_VOL_DOWN:
+ case PLT2_VOL_DOWN:
+ mapped_key = KEY_VOLUMEDOWN;
+ goto mapped;
+ }
}
- hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n",
- usage->hid, field->application);
+/*
+ * Future mapping of call control or other usages,
+ * if and when keys are defined would go here
+ * otherwise, ignore everything else that was not mapped
+ */
+ignored:
return -1;
+
+defaulted:
+ hid_dbg(hdev, "usage: %08x (appl: %08x) - defaulted\n",
+ usage->hid, field->application);
+ return 0;
+
+mapped:
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, mapped_key);
+ hid_dbg(hdev, "usage: %08x (appl: %08x) - mapped to key %d\n",
+ usage->hid, field->application, mapped_key);
+ return 1;
+}
+
+static unsigned long plantronics_device_type(struct hid_device *hdev)
+{
+ unsigned i, col_page;
+ unsigned long plt_type = hdev->product;
+
+ /* multi-HID interfaces? - plt_type is PID */
+ if (plt_type >= PLT_BT300_MIN && plt_type <= PLT_BT300_MAX)
+ goto exit;
+
+ /* determine primary vendor page */
+ for (i = 0; i < hdev->maxcollection; i++) {
+ col_page = hdev->collection[i].usage & HID_USAGE_PAGE;
+ if (col_page == PLT_HID_2_0_PAGE) {
+ plt_type = hdev->collection[i].usage;
+ break;
+ }
+ if (col_page == PLT_HID_1_0_PAGE)
+ plt_type = hdev->collection[i].usage;
+ }
+
+exit:
+ hid_dbg(hdev, "plt_type decoded as: %08lx\n", plt_type);
+ return plt_type;
+}
+
+static int plantronics_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ goto err;
+ }
+
+ hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
+ HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
+ if (ret)
+ hid_err(hdev, "hw start failed\n");
+
+err:
+ return ret;
}
static const struct hid_device_id plantronics_devices[] = {
@@ -46,6 +161,7 @@ static struct hid_driver plantronics_driver = {
.name = "plantronics",
.id_table = plantronics_devices,
.input_mapping = plantronics_input_mapping,
+ .probe = plantronics_probe,
};
module_hid_driver(plantronics_driver);
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 91fab975063c..e3e98ccf137b 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -395,11 +395,10 @@ static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data)
/* break keys */
for (bit_index = 0; bit_index < 24; bit_index++) {
- key = pm->last_key[bit_index];
if (!((0x01 << bit_index) & bit_mask)) {
input_event(pm->input_ep82, EV_KEY,
pm->last_key[bit_index], 0);
- pm->last_key[bit_index] = 0;
+ pm->last_key[bit_index] = 0;
}
}
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 368ffdf2c0a3..4cf80bb276dc 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -29,9 +29,9 @@
#define RMI_SET_RMI_MODE_REPORT_ID 0x0f /* Feature Report */
/* flags */
-#define RMI_READ_REQUEST_PENDING BIT(0)
-#define RMI_READ_DATA_PENDING BIT(1)
-#define RMI_STARTED BIT(2)
+#define RMI_READ_REQUEST_PENDING 0
+#define RMI_READ_DATA_PENDING 1
+#define RMI_STARTED 2
/* device flags */
#define RMI_DEVICE BIT(0)
@@ -1013,6 +1013,7 @@ static int rmi_populate_f30(struct hid_device *hdev)
static int rmi_populate(struct hid_device *hdev)
{
+ struct rmi_data *data = hid_get_drvdata(hdev);
int ret;
ret = rmi_scan_pdt(hdev);
@@ -1033,9 +1034,11 @@ static int rmi_populate(struct hid_device *hdev)
return ret;
}
- ret = rmi_populate_f30(hdev);
- if (ret)
- hid_warn(hdev, "Error while initializing F30 (%d).\n", ret);
+ if (!(data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)) {
+ ret = rmi_populate_f30(hdev);
+ if (ret)
+ hid_warn(hdev, "Error while initializing F30 (%d).\n", ret);
+ }
return 0;
}
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index c3f6f1e311ea..090a1ba0abb6 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -294,7 +294,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
if (!report)
return -EINVAL;
- mutex_lock(&hsdev->mutex);
+ mutex_lock(hsdev->mutex_ptr);
if (flag == SENSOR_HUB_SYNC) {
memset(&hsdev->pending, 0, sizeof(hsdev->pending));
init_completion(&hsdev->pending.ready);
@@ -328,7 +328,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
kfree(hsdev->pending.raw_data);
hsdev->pending.status = false;
}
- mutex_unlock(&hsdev->mutex);
+ mutex_unlock(hsdev->mutex_ptr);
return ret_val;
}
@@ -667,7 +667,14 @@ static int sensor_hub_probe(struct hid_device *hdev,
hsdev->vendor_id = hdev->vendor;
hsdev->product_id = hdev->product;
hsdev->usage = collection->usage;
- mutex_init(&hsdev->mutex);
+ hsdev->mutex_ptr = devm_kzalloc(&hdev->dev,
+ sizeof(struct mutex),
+ GFP_KERNEL);
+ if (!hsdev->mutex_ptr) {
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+ mutex_init(hsdev->mutex_ptr);
hsdev->start_collection_index = i;
if (last_hsdev)
last_hsdev->end_collection_index = i;
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index 37845eccddb5..36b6470af947 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -166,6 +166,9 @@ static const struct hid_device_id sjoy_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD),
.driver_data = HID_QUIRK_MULTI_INPUT |
HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII),
+ .driver_data = HID_QUIRK_MULTI_INPUT |
+ HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ }
};
MODULE_DEVICE_TABLE(hid, sjoy_devices);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index ab4dd952b6ba..f77469d4edfb 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -42,9 +42,9 @@
#include <linux/i2c/i2c-hid.h>
/* flags */
-#define I2C_HID_STARTED (1 << 0)
-#define I2C_HID_RESET_PENDING (1 << 1)
-#define I2C_HID_READ_PENDING (1 << 2)
+#define I2C_HID_STARTED 0
+#define I2C_HID_RESET_PENDING 1
+#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@@ -862,6 +862,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
union acpi_object *obj;
struct acpi_device *adev;
acpi_handle handle;
+ int ret;
handle = ACPI_HANDLE(&client->dev);
if (!handle || acpi_bus_get_device(handle, &adev))
@@ -877,7 +878,9 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
pdata->hid_descriptor_address = obj->integer.value;
ACPI_FREE(obj);
- return acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
+ /* GPIOs are optional */
+ ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
+ return ret < 0 && ret != -ENXIO ? ret : 0;
}
static const struct acpi_device_id i2c_hid_acpi_match[] = {
@@ -1016,7 +1019,6 @@ static int i2c_hid_probe(struct i2c_client *client,
hid->driver_data = client;
hid->ll_driver = &i2c_hid_ll_driver;
hid->dev.parent = &client->dev;
- ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev));
hid->bus = BUS_I2C;
hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index a775143e6265..53e7de7cb9e2 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -52,7 +52,6 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
- { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
@@ -61,6 +60,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
@@ -69,6 +69,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
@@ -88,6 +89,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
@@ -140,6 +142,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096, HID_QUIRK_NO_INIT_INPUT_REPORTS },
+ { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
{ 0, 0 }
};
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 024f4d89d579..a533787a6d85 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -134,8 +134,10 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac)
extern const struct hid_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
-void wacom_setup_device_quirks(struct wacom_features *features);
-int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
+void wacom_setup_device_quirks(struct wacom *wacom);
+int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac);
+int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index e8607d096138..4c0ffca97bef 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -35,7 +35,11 @@ static int wacom_get_report(struct hid_device *hdev, u8 type, u8 *buf,
do {
retval = hid_hw_raw_request(hdev, buf[0], buf, size, type,
HID_REQ_GET_REPORT);
- } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries);
+ } while ((retval == -ETIMEDOUT || retval == -EAGAIN) && --retries);
+
+ if (retval < 0)
+ hid_err(hdev, "wacom_get_report: ran out of retries "
+ "(last error = %d)\n", retval);
return retval;
}
@@ -48,7 +52,11 @@ static int wacom_set_report(struct hid_device *hdev, u8 type, u8 *buf,
do {
retval = hid_hw_raw_request(hdev, buf[0], buf, size, type,
HID_REQ_SET_REPORT);
- } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries);
+ } while ((retval == -ETIMEDOUT || retval == -EAGAIN) && --retries);
+
+ if (retval < 0)
+ hid_err(hdev, "wacom_set_report: ran out of retries "
+ "(last error = %d)\n", retval);
return retval;
}
@@ -117,9 +125,16 @@ static void wacom_feature_mapping(struct hid_device *hdev,
break;
data[0] = field->report->id;
ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
- data, 2, 0);
- if (ret == 2)
+ data, 2, WAC_CMD_RETRIES);
+ if (ret == 2) {
features->touch_max = data[1];
+ } else {
+ features->touch_max = 16;
+ hid_warn(hdev, "wacom_feature_mapping: "
+ "could not get HID_DG_CONTACTMAX, "
+ "defaulting to %d\n",
+ features->touch_max);
+ }
kfree(data);
}
break;
@@ -181,7 +196,11 @@ static void wacom_usage_mapping(struct hid_device *hdev,
* X/Y values and some cases of invalid Digitizer X/Y
* values commonly reported.
*/
- if (!pen && !finger)
+ if (pen)
+ features->device_type |= WACOM_DEVICETYPE_PEN;
+ else if (finger)
+ features->device_type |= WACOM_DEVICETYPE_TOUCH;
+ else
return;
/*
@@ -198,14 +217,11 @@ static void wacom_usage_mapping(struct hid_device *hdev,
case HID_GD_X:
features->x_max = field->logical_maximum;
if (finger) {
- features->device_type = BTN_TOOL_FINGER;
features->x_phy = field->physical_maximum;
if (features->type != BAMBOO_PT) {
features->unit = field->unit;
features->unitExpo = field->unit_exponent;
}
- } else {
- features->device_type = BTN_TOOL_PEN;
}
break;
case HID_GD_Y:
@@ -237,7 +253,7 @@ static void wacom_post_parse_hid(struct hid_device *hdev,
if (features->type == HID_GENERIC) {
/* Any last-minute generic device setup */
if (features->touch_max > 1) {
- input_mt_init_slots(wacom_wac->input, wacom_wac->features.touch_max,
+ input_mt_init_slots(wacom_wac->touch_input, wacom_wac->features.touch_max,
INPUT_MT_DIRECT);
}
}
@@ -395,7 +411,7 @@ static int wacom_query_tablet_data(struct hid_device *hdev,
if (features->type == HID_GENERIC)
return wacom_hid_set_device_mode(hdev);
- if (features->device_type == BTN_TOOL_FINGER) {
+ if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
if (features->type > TABLETPC) {
/* MT Tablet PC touch */
return wacom_set_device_mode(hdev, 3, 4, 4);
@@ -409,7 +425,7 @@ static int wacom_query_tablet_data(struct hid_device *hdev,
else if (features->type == BAMBOO_PAD) {
return wacom_set_device_mode(hdev, 2, 2, 2);
}
- } else if (features->device_type == BTN_TOOL_PEN) {
+ } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
if (features->type <= BAMBOO_PT && features->type != WIRELESS) {
return wacom_set_device_mode(hdev, 2, 2, 2);
}
@@ -425,7 +441,6 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
struct usb_interface *intf = wacom->intf;
/* default features */
- features->device_type = BTN_TOOL_PEN;
features->x_fuzz = 4;
features->y_fuzz = 4;
features->pressure_fuzz = 0;
@@ -439,17 +454,13 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
*/
if (features->type == WIRELESS) {
if (intf->cur_altsetting->desc.bInterfaceNumber == 0) {
- features->device_type = 0;
+ features->device_type = WACOM_DEVICETYPE_NONE;
} else if (intf->cur_altsetting->desc.bInterfaceNumber == 2) {
- features->device_type = BTN_TOOL_FINGER;
+ features->device_type |= WACOM_DEVICETYPE_TOUCH;
features->pktlen = WACOM_PKGLEN_BBTOUCH3;
}
}
- /* only devices that support touch need to retrieve the info */
- if (features->type < BAMBOO_PT)
- return;
-
wacom_parse_hid(hdev, features);
}
@@ -527,9 +538,9 @@ static int wacom_add_shared_data(struct hid_device *hdev)
wacom_wac->shared = &data->shared;
- if (wacom_wac->features.device_type == BTN_TOOL_FINGER)
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
wacom_wac->shared->touch = hdev;
- else if (wacom_wac->features.device_type == BTN_TOOL_PEN)
+ else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
wacom_wac->shared->pen = hdev;
out:
@@ -848,6 +859,9 @@ static int wacom_initialize_leds(struct wacom *wacom)
{
int error;
+ if (!(wacom->wacom_wac.features.device_type & WACOM_DEVICETYPE_PAD))
+ return 0;
+
/* Initialize default values */
switch (wacom->wacom_wac.features.type) {
case INTUOS4S:
@@ -881,17 +895,14 @@ static int wacom_initialize_leds(struct wacom *wacom)
case INTUOSPS:
case INTUOSPM:
case INTUOSPL:
- if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN) {
- wacom->led.select[0] = 0;
- wacom->led.select[1] = 0;
- wacom->led.llv = 32;
- wacom->led.hlv = 0;
- wacom->led.img_lum = 0;
-
- error = sysfs_create_group(&wacom->hdev->dev.kobj,
- &intuos5_led_attr_group);
- } else
- return 0;
+ wacom->led.select[0] = 0;
+ wacom->led.select[1] = 0;
+ wacom->led.llv = 32;
+ wacom->led.hlv = 0;
+ wacom->led.img_lum = 0;
+
+ error = sysfs_create_group(&wacom->hdev->dev.kobj,
+ &intuos5_led_attr_group);
break;
default:
@@ -914,6 +925,9 @@ static void wacom_destroy_leds(struct wacom *wacom)
if (!wacom->led_initialized)
return;
+ if (!(wacom->wacom_wac.features.device_type & WACOM_DEVICETYPE_PAD))
+ return;
+
wacom->led_initialized = false;
switch (wacom->wacom_wac.features.type) {
@@ -937,9 +951,8 @@ static void wacom_destroy_leds(struct wacom *wacom)
case INTUOSPS:
case INTUOSPM:
case INTUOSPL:
- if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN)
- sysfs_remove_group(&wacom->hdev->dev.kobj,
- &intuos5_led_attr_group);
+ sysfs_remove_group(&wacom->hdev->dev.kobj,
+ &intuos5_led_attr_group);
break;
}
}
@@ -1117,7 +1130,7 @@ static struct input_dev *wacom_allocate_input(struct wacom *wacom)
if (!input_dev)
return NULL;
- input_dev->name = wacom_wac->name;
+ input_dev->name = wacom_wac->pen_name;
input_dev->phys = hdev->phys;
input_dev->dev.parent = &hdev->dev;
input_dev->open = wacom_open;
@@ -1136,27 +1149,33 @@ static void wacom_free_inputs(struct wacom *wacom)
{
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
- if (wacom_wac->input)
- input_free_device(wacom_wac->input);
+ if (wacom_wac->pen_input)
+ input_free_device(wacom_wac->pen_input);
+ if (wacom_wac->touch_input)
+ input_free_device(wacom_wac->touch_input);
if (wacom_wac->pad_input)
input_free_device(wacom_wac->pad_input);
- wacom_wac->input = NULL;
+ wacom_wac->pen_input = NULL;
+ wacom_wac->touch_input = NULL;
wacom_wac->pad_input = NULL;
}
static int wacom_allocate_inputs(struct wacom *wacom)
{
- struct input_dev *input_dev, *pad_input_dev;
+ struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
- input_dev = wacom_allocate_input(wacom);
+ pen_input_dev = wacom_allocate_input(wacom);
+ touch_input_dev = wacom_allocate_input(wacom);
pad_input_dev = wacom_allocate_input(wacom);
- if (!input_dev || !pad_input_dev) {
+ if (!pen_input_dev || !touch_input_dev || !pad_input_dev) {
wacom_free_inputs(wacom);
return -ENOMEM;
}
- wacom_wac->input = input_dev;
+ wacom_wac->pen_input = pen_input_dev;
+ wacom_wac->touch_input = touch_input_dev;
+ wacom_wac->touch_input->name = wacom_wac->touch_name;
wacom_wac->pad_input = pad_input_dev;
wacom_wac->pad_input->name = wacom_wac->pad_name;
@@ -1165,11 +1184,17 @@ static int wacom_allocate_inputs(struct wacom *wacom)
static void wacom_clean_inputs(struct wacom *wacom)
{
- if (wacom->wacom_wac.input) {
- if (wacom->wacom_wac.input_registered)
- input_unregister_device(wacom->wacom_wac.input);
+ if (wacom->wacom_wac.pen_input) {
+ if (wacom->wacom_wac.pen_registered)
+ input_unregister_device(wacom->wacom_wac.pen_input);
else
- input_free_device(wacom->wacom_wac.input);
+ input_free_device(wacom->wacom_wac.pen_input);
+ }
+ if (wacom->wacom_wac.touch_input) {
+ if (wacom->wacom_wac.touch_registered)
+ input_unregister_device(wacom->wacom_wac.touch_input);
+ else
+ input_free_device(wacom->wacom_wac.touch_input);
}
if (wacom->wacom_wac.pad_input) {
if (wacom->wacom_wac.pad_registered)
@@ -1177,29 +1202,49 @@ static void wacom_clean_inputs(struct wacom *wacom)
else
input_free_device(wacom->wacom_wac.pad_input);
}
- wacom->wacom_wac.input = NULL;
+ wacom->wacom_wac.pen_input = NULL;
+ wacom->wacom_wac.touch_input = NULL;
wacom->wacom_wac.pad_input = NULL;
wacom_destroy_leds(wacom);
}
static int wacom_register_inputs(struct wacom *wacom)
{
- struct input_dev *input_dev, *pad_input_dev;
+ struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
- int error;
+ int error = 0;
- input_dev = wacom_wac->input;
+ pen_input_dev = wacom_wac->pen_input;
+ touch_input_dev = wacom_wac->touch_input;
pad_input_dev = wacom_wac->pad_input;
- if (!input_dev || !pad_input_dev)
+ if (!pen_input_dev || !touch_input_dev || !pad_input_dev)
return -EINVAL;
- error = wacom_setup_pentouch_input_capabilities(input_dev, wacom_wac);
- if (!error) {
- error = input_register_device(input_dev);
+ error = wacom_setup_pen_input_capabilities(pen_input_dev, wacom_wac);
+ if (error) {
+ /* no pen in use on this interface */
+ input_free_device(pen_input_dev);
+ wacom_wac->pen_input = NULL;
+ pen_input_dev = NULL;
+ } else {
+ error = input_register_device(pen_input_dev);
+ if (error)
+ goto fail_register_pen_input;
+ wacom_wac->pen_registered = true;
+ }
+
+ error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac);
+ if (error) {
+ /* no touch in use on this interface */
+ input_free_device(touch_input_dev);
+ wacom_wac->touch_input = NULL;
+ touch_input_dev = NULL;
+ } else {
+ error = input_register_device(touch_input_dev);
if (error)
- return error;
- wacom_wac->input_registered = true;
+ goto fail_register_touch_input;
+ wacom_wac->touch_registered = true;
}
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
@@ -1226,9 +1271,14 @@ fail_leds:
pad_input_dev = NULL;
wacom_wac->pad_registered = false;
fail_register_pad_input:
- input_unregister_device(input_dev);
- wacom_wac->input = NULL;
- wacom_wac->input_registered = false;
+ input_unregister_device(touch_input_dev);
+ wacom_wac->touch_input = NULL;
+ wacom_wac->touch_registered = false;
+fail_register_touch_input:
+ input_unregister_device(pen_input_dev);
+ wacom_wac->pen_input = NULL;
+ wacom_wac->pen_registered = false;
+fail_register_pen_input:
return error;
}
@@ -1285,8 +1335,11 @@ static void wacom_wireless_work(struct work_struct *work)
/* Stylus interface */
wacom_wac1->features =
*((struct wacom_features *)id->driver_data);
- wacom_wac1->features.device_type = BTN_TOOL_PEN;
- snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen",
+ wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PEN;
+ if (wacom_wac1->features.type != INTUOSHT &&
+ wacom_wac1->features.type != BAMBOO_PT)
+ wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
+ snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
wacom_wac1->features.name);
snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
wacom_wac1->features.name);
@@ -1304,16 +1357,16 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_wac2->features =
*((struct wacom_features *)id->driver_data);
wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
- wacom_wac2->features.device_type = BTN_TOOL_FINGER;
wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
- if (wacom_wac2->features.touch_max)
- snprintf(wacom_wac2->name, WACOM_NAME_MAX,
- "%s (WL) Finger",wacom_wac2->features.name);
- else
- snprintf(wacom_wac2->name, WACOM_NAME_MAX,
- "%s (WL) Pad",wacom_wac2->features.name);
+ snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
+ "%s (WL) Finger",wacom_wac2->features.name);
snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
- "%s (WL) Pad", wacom_wac2->features.name);
+ "%s (WL) Pad",wacom_wac2->features.name);
+ if (wacom_wac1->features.touch_max)
+ wacom_wac2->features.device_type |= WACOM_DEVICETYPE_TOUCH;
+ if (wacom_wac1->features.type == INTUOSHT ||
+ wacom_wac1->features.type == BAMBOO_PT)
+ wacom_wac2->features.device_type |= WACOM_DEVICETYPE_PAD;
wacom_wac2->pid = wacom_wac->pid;
error = wacom_allocate_inputs(wacom2) ||
wacom_register_inputs(wacom2);
@@ -1322,7 +1375,7 @@ static void wacom_wireless_work(struct work_struct *work)
if (wacom_wac1->features.type == INTUOSHT &&
wacom_wac1->features.touch_max)
- wacom_wac->shared->touch_input = wacom_wac2->input;
+ wacom_wac->shared->touch_input = wacom_wac2->touch_input;
}
error = wacom_initialize_battery(wacom);
@@ -1369,6 +1422,12 @@ static void wacom_set_default_phy(struct wacom_features *features)
static void wacom_calculate_res(struct wacom_features *features)
{
+ /* set unit to "100th of a mm" for devices not reported by HID */
+ if (!features->unit) {
+ features->unit = 0x11;
+ features->unitExpo = -3;
+ }
+
features->x_resolution = wacom_calc_hid_res(features->x_max,
features->x_phy,
features->unit,
@@ -1396,6 +1455,49 @@ static size_t wacom_compute_pktlen(struct hid_device *hdev)
return size;
}
+static void wacom_update_name(struct wacom *wacom)
+{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ char name[WACOM_NAME_MAX];
+
+ /* Generic devices name unspecified */
+ if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
+ if (strstr(wacom->hdev->name, "Wacom") ||
+ strstr(wacom->hdev->name, "wacom") ||
+ strstr(wacom->hdev->name, "WACOM")) {
+ /* name is in HID descriptor, use it */
+ strlcpy(name, wacom->hdev->name, sizeof(name));
+
+ /* strip out excess whitespaces */
+ while (1) {
+ char *gap = strstr(name, " ");
+ if (gap == NULL)
+ break;
+ /* shift everything including the terminator */
+ memmove(gap, gap+1, strlen(gap));
+ }
+ /* get rid of trailing whitespace */
+ if (name[strlen(name)-1] == ' ')
+ name[strlen(name)-1] = '\0';
+ } else {
+ /* no meaningful name retrieved. use product ID */
+ snprintf(name, sizeof(name),
+ "%s %X", features->name, wacom->hdev->product);
+ }
+ } else {
+ strlcpy(name, features->name, sizeof(name));
+ }
+
+ /* Append the device type to the name */
+ snprintf(wacom_wac->pen_name, sizeof(wacom_wac->pen_name),
+ "%s Pen", name);
+ snprintf(wacom_wac->touch_name, sizeof(wacom_wac->touch_name),
+ "%s Finger", name);
+ snprintf(wacom_wac->pad_name, sizeof(wacom_wac->pad_name),
+ "%s Pad", name);
+}
+
static int wacom_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -1474,64 +1576,25 @@ static int wacom_probe(struct hid_device *hdev,
/* Retrieve the physical and logical size for touch devices */
wacom_retrieve_hid_descriptor(hdev, features);
+ wacom_setup_device_quirks(wacom);
- /*
- * Intuos5 has no useful data about its touch interface in its
- * HID descriptor. If this is the touch interface (PacketSize
- * of WACOM_PKGLEN_BBTOUCH3), override the table values.
- */
- if (features->type >= INTUOS5S && features->type <= INTUOSHT) {
- if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
- features->device_type = BTN_TOOL_FINGER;
+ if (features->device_type == WACOM_DEVICETYPE_NONE &&
+ features->type != WIRELESS) {
+ error = features->type == HID_GENERIC ? -ENODEV : 0;
- features->x_max = 4096;
- features->y_max = 4096;
- } else {
- features->device_type = BTN_TOOL_PEN;
- }
- }
+ dev_warn(&hdev->dev, "Unknown device_type for '%s'. %s.",
+ hdev->name,
+ error ? "Ignoring" : "Assuming pen");
- /*
- * Same thing for Bamboo 3rd gen.
- */
- if ((features->type == BAMBOO_PT) &&
- (features->pktlen == WACOM_PKGLEN_BBTOUCH3) &&
- (features->device_type == BTN_TOOL_PEN)) {
- features->device_type = BTN_TOOL_FINGER;
+ if (error)
+ goto fail_shared_data;
- features->x_max = 4096;
- features->y_max = 4096;
+ features->device_type |= WACOM_DEVICETYPE_PEN;
}
- /*
- * Same thing for Bamboo PAD
- */
- if (features->type == BAMBOO_PAD)
- features->device_type = BTN_TOOL_FINGER;
-
- if (hdev->bus == BUS_BLUETOOTH)
- features->quirks |= WACOM_QUIRK_BATTERY;
-
- wacom_setup_device_quirks(features);
-
- /* set unit to "100th of a mm" for devices not reported by HID */
- if (!features->unit) {
- features->unit = 0x11;
- features->unitExpo = -3;
- }
wacom_calculate_res(features);
- strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
- snprintf(wacom_wac->pad_name, sizeof(wacom_wac->pad_name),
- "%s Pad", features->name);
-
- /* Append the device type to the name */
- if (features->device_type != BTN_TOOL_FINGER)
- strlcat(wacom_wac->name, " Pen", WACOM_NAME_MAX);
- else if (features->touch_max)
- strlcat(wacom_wac->name, " Finger", WACOM_NAME_MAX);
- else
- strlcat(wacom_wac->name, " Pad", WACOM_NAME_MAX);
+ wacom_update_name(wacom);
error = wacom_add_shared_data(hdev);
if (error)
@@ -1574,9 +1637,9 @@ static int wacom_probe(struct hid_device *hdev,
if (features->quirks & WACOM_QUIRK_MONITOR)
error = hid_hw_open(hdev);
- if (wacom_wac->features.type == INTUOSHT && wacom_wac->features.touch_max) {
- if (wacom_wac->features.device_type == BTN_TOOL_FINGER)
- wacom_wac->shared->touch_input = wacom_wac->input;
+ if (wacom_wac->features.type == INTUOSHT &&
+ wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) {
+ wacom_wac->shared->touch_input = wacom_wac->touch_input;
}
return 0;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index fa54d3290659..232da89f4e88 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -69,7 +69,7 @@ static void wacom_notify_battery(struct wacom_wac *wacom_wac,
static int wacom_penpartner_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
switch (data[0]) {
case 1:
@@ -114,7 +114,7 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
int prox, pressure;
if (data[0] != WACOM_REPORT_PENABLED) {
@@ -186,7 +186,7 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
static int wacom_ptu_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
if (data[0] != WACOM_REPORT_PENABLED) {
dev_dbg(input->dev.parent,
@@ -215,7 +215,7 @@ static int wacom_ptu_irq(struct wacom_wac *wacom)
static int wacom_dtu_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
int prox = data[1] & 0x20;
dev_dbg(input->dev.parent,
@@ -245,7 +245,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
static int wacom_dtus_irq(struct wacom_wac *wacom)
{
char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
unsigned short prox, pressure = 0;
if (data[0] != WACOM_REPORT_DTUS && data[0] != WACOM_REPORT_DTUSPAD) {
@@ -297,7 +297,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
struct input_dev *pad_input = wacom->pad_input;
int battery_capacity, ps_connected;
int prox;
@@ -464,7 +464,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
int idx = 0;
/* tool number */
@@ -649,7 +649,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
unsigned int t;
/* general pen packet */
@@ -681,7 +681,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
unsigned int t;
int idx = 0, result;
@@ -1025,7 +1025,7 @@ static void wacom_intuos_bt_process_data(struct wacom_wac *wacom,
memcpy(wacom->data, data, 10);
wacom_intuos_irq(wacom);
- input_sync(wacom->input);
+ input_sync(wacom->pen_input);
if (wacom->pad_input)
input_sync(wacom->pad_input);
}
@@ -1057,7 +1057,7 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len)
ps_connected);
break;
default:
- dev_dbg(wacom->input->dev.parent,
+ dev_dbg(wacom->pen_input->dev.parent,
"Unknown report: %d,%d size:%zu\n",
data[0], data[1], len);
return 0;
@@ -1067,14 +1067,16 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len)
static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
{
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->touch_input;
unsigned touch_max = wacom->features.touch_max;
int count = 0;
int i;
- /* non-HID_GENERIC single touch input doesn't call this routine */
- if ((touch_max == 1) && (wacom->features.type == HID_GENERIC))
- return wacom->hid_data.tipswitch &&
+ if (!touch_max)
+ return 0;
+
+ if (touch_max == 1)
+ return test_bit(BTN_TOUCH, input->key) &&
!wacom->shared->stylus_in_proximity;
for (i = 0; i < input->mt->num_slots; i++) {
@@ -1089,7 +1091,7 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
static int wacom_24hdt_irq(struct wacom_wac *wacom)
{
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->touch_input;
unsigned char *data = wacom->data;
int i;
int current_num_contacts = data[61];
@@ -1157,7 +1159,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
static int wacom_mt_touch(struct wacom_wac *wacom)
{
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->touch_input;
unsigned char *data = wacom->data;
int i;
int current_num_contacts = data[2];
@@ -1208,7 +1210,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
{
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->touch_input;
unsigned char *data = wacom->data;
int i;
@@ -1237,7 +1239,7 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
{
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->touch_input;
bool prox = !wacom->shared->stylus_in_proximity;
int x = 0, y = 0;
@@ -1273,7 +1275,7 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
static int wacom_tpc_pen(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
bool prox = data[1] & 0x20;
if (!wacom->shared->stylus_in_proximity) /* first in prox */
@@ -1302,8 +1304,12 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
unsigned char *data = wacom->data;
- dev_dbg(wacom->input->dev.parent,
- "%s: received report #%d\n", __func__, data[0]);
+ if (wacom->pen_input)
+ dev_dbg(wacom->pen_input->dev.parent,
+ "%s: received report #%d\n", __func__, data[0]);
+ else if (wacom->touch_input)
+ dev_dbg(wacom->touch_input->dev.parent,
+ "%s: received report #%d\n", __func__, data[0]);
switch (len) {
case WACOM_PKGLEN_TPC1FG:
@@ -1335,11 +1341,9 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
-static void wacom_map_usage(struct wacom *wacom, struct hid_usage *usage,
+static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
struct hid_field *field, __u8 type, __u16 code, int fuzz)
{
- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct input_dev *input = wacom_wac->input;
int fmin = field->logical_minimum;
int fmax = field->logical_maximum;
@@ -1367,36 +1371,38 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->pen_input;
switch (usage->hid) {
case HID_GD_X:
- wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
break;
case HID_GD_Y:
- wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4);
break;
case HID_DG_TIPPRESSURE:
- wacom_map_usage(wacom, usage, field, EV_ABS, ABS_PRESSURE, 0);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_PRESSURE, 0);
break;
case HID_DG_INRANGE:
- wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
break;
case HID_DG_INVERT:
- wacom_map_usage(wacom, usage, field, EV_KEY,
+ wacom_map_usage(input, usage, field, EV_KEY,
BTN_TOOL_RUBBER, 0);
break;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
- wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0);
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
break;
case HID_DG_BARRELSWITCH:
- wacom_map_usage(wacom, usage, field, EV_KEY, BTN_STYLUS, 0);
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS, 0);
break;
case HID_DG_BARRELSWITCH2:
- wacom_map_usage(wacom, usage, field, EV_KEY, BTN_STYLUS2, 0);
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS2, 0);
break;
case HID_DG_TOOLSERIALNUMBER:
- wacom_map_usage(wacom, usage, field, EV_MSC, MSC_SERIAL, 0);
+ wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
break;
}
}
@@ -1406,7 +1412,7 @@ static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct input_dev *input = wacom_wac->input;
+ struct input_dev *input = wacom_wac->pen_input;
/* checking which Tool / tip switch to send */
switch (usage->hid) {
@@ -1436,7 +1442,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct input_dev *input = wacom_wac->input;
+ struct input_dev *input = wacom_wac->pen_input;
bool prox = wacom_wac->hid_data.inrange_state;
if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */
@@ -1465,23 +1471,24 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
switch (usage->hid) {
case HID_GD_X:
features->last_slot_field = usage->hid;
if (touch_max == 1)
- wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
else
- wacom_map_usage(wacom, usage, field, EV_ABS,
+ wacom_map_usage(input, usage, field, EV_ABS,
ABS_MT_POSITION_X, 4);
break;
case HID_GD_Y:
features->last_slot_field = usage->hid;
if (touch_max == 1)
- wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4);
else
- wacom_map_usage(wacom, usage, field, EV_ABS,
+ wacom_map_usage(input, usage, field, EV_ABS,
ABS_MT_POSITION_Y, 4);
break;
case HID_DG_CONTACTID:
@@ -1495,7 +1502,7 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
break;
case HID_DG_TIPSWITCH:
features->last_slot_field = usage->hid;
- wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0);
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
break;
}
}
@@ -1551,7 +1558,7 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
if (usage->usage_index + 1 == field->report_count) {
if (usage->hid == wacom_wac->features.last_slot_field)
- wacom_wac_finger_slot(wacom_wac, wacom_wac->input);
+ wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
return 0;
@@ -1562,7 +1569,7 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct input_dev *input = wacom_wac->input;
+ struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
if (touch_max > 1)
@@ -1579,10 +1586,10 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct input_dev *input = wacom_wac->input;
/* currently, only direct devices have proper hid report descriptors */
- __set_bit(INPUT_PROP_DIRECT, input->propbit);
+ __set_bit(INPUT_PROP_DIRECT, wacom_wac->pen_input->propbit);
+ __set_bit(INPUT_PROP_DIRECT, wacom_wac->touch_input->propbit);
if (WACOM_PEN_FIELD(field))
return wacom_wac_pen_usage_mapping(hdev, field, usage);
@@ -1627,7 +1634,7 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
static int wacom_bpt_touch(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->touch_input;
struct input_dev *pad_input = wacom->pad_input;
unsigned char *data = wacom->data;
int i;
@@ -1675,7 +1682,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
{
struct wacom_features *features = &wacom->features;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->touch_input;
bool touch = data[1] & 0x80;
int slot = input_mt_get_slot_by_key(input, data[0]);
@@ -1733,7 +1740,6 @@ static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
static int wacom_bpt3_touch(struct wacom_wac *wacom)
{
- struct input_dev *input = wacom->input;
unsigned char *data = wacom->data;
int count = data[1] & 0x07;
int i;
@@ -1752,8 +1758,12 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
wacom_bpt3_button_msg(wacom, data + offset);
}
- input_mt_sync_frame(input);
- wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
+
+ /* only update the touch if we actually have a touchpad */
+ if (wacom->touch_registered) {
+ input_mt_sync_frame(wacom->touch_input);
+ wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
+ }
return 1;
}
@@ -1761,7 +1771,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
static int wacom_bpt_pen(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pen_input;
unsigned char *data = wacom->data;
int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
@@ -1870,7 +1880,7 @@ static void wacom_bamboo_pad_pen_event(struct wacom_wac *wacom,
static int wacom_bamboo_pad_touch_event(struct wacom_wac *wacom,
unsigned char *data)
{
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->touch_input;
unsigned char *finger_data, prefix;
unsigned id;
int x, y;
@@ -2114,7 +2124,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
}
if (sync) {
- input_sync(wacom_wac->input);
+ if (wacom_wac->pen_input)
+ input_sync(wacom_wac->pen_input);
+ if (wacom_wac->touch_input)
+ input_sync(wacom_wac->touch_input);
if (wacom_wac->pad_input)
input_sync(wacom_wac->pad_input);
}
@@ -2122,7 +2135,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
static void wacom_setup_cintiq(struct wacom_wac *wacom_wac)
{
- struct input_dev *input_dev = wacom_wac->input;
+ struct input_dev *input_dev = wacom_wac->pen_input;
input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
@@ -2145,7 +2158,7 @@ static void wacom_setup_cintiq(struct wacom_wac *wacom_wac)
static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
{
- struct input_dev *input_dev = wacom_wac->input;
+ struct input_dev *input_dev = wacom_wac->pen_input;
input_set_capability(input_dev, EV_REL, REL_WHEEL);
@@ -2164,15 +2177,57 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
}
-void wacom_setup_device_quirks(struct wacom_features *features)
+void wacom_setup_device_quirks(struct wacom *wacom)
{
+ struct wacom_features *features = &wacom->wacom_wac.features;
+
+ /* The pen and pad share the same interface on most devices */
+ if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
+ features->type == DTUS || features->type == WACOM_MO ||
+ (features->type >= INTUOS3S && features->type <= WACOM_13HD &&
+ features->type != INTUOSHT)) {
+ if (features->device_type & WACOM_DEVICETYPE_PEN)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ }
/* touch device found but size is not defined. use default */
- if (features->device_type == BTN_TOOL_FINGER && !features->x_max) {
+ if (features->device_type & WACOM_DEVICETYPE_TOUCH && !features->x_max) {
features->x_max = 1023;
features->y_max = 1023;
}
+ /*
+ * Intuos5/Pro and Bamboo 3rd gen have no useful data about its
+ * touch interface in its HID descriptor. If this is the touch
+ * interface (PacketSize of WACOM_PKGLEN_BBTOUCH3), override the
+ * tablet values.
+ */
+ if ((features->type >= INTUOS5S && features->type <= INTUOSHT) ||
+ (features->type == BAMBOO_PT)) {
+ if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
+ if (features->touch_max)
+ features->device_type |= WACOM_DEVICETYPE_TOUCH;
+ if (features->type == BAMBOO_PT || features->type == INTUOSHT)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
+ features->x_max = 4096;
+ features->y_max = 4096;
+ }
+ }
+
+ /*
+ * Raw Wacom-mode pen and touch events both come from interface
+ * 0, whose HID descriptor has an application usage of 0xFF0D
+ * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
+ * out through the HID_GENERIC device created for interface 1,
+ * so rewrite this one to be of type BTN_TOOL_FINGER.
+ */
+ if (features->type == BAMBOO_PAD)
+ features->device_type |= WACOM_DEVICETYPE_TOUCH;
+
+ if (wacom->hdev->bus == BUS_BLUETOOTH)
+ features->quirks |= WACOM_QUIRK_BATTERY;
+
/* quirk for bamboo touch with 2 low res touches */
if (features->type == BAMBOO_PT &&
features->pktlen == WACOM_PKGLEN_BBTOUCH) {
@@ -2189,61 +2244,23 @@ void wacom_setup_device_quirks(struct wacom_features *features)
features->quirks |= WACOM_QUIRK_NO_INPUT;
/* must be monitor interface if no device_type set */
- if (!features->device_type) {
+ if (features->device_type == WACOM_DEVICETYPE_NONE) {
features->quirks |= WACOM_QUIRK_MONITOR;
features->quirks |= WACOM_QUIRK_BATTERY;
}
}
}
-static void wacom_abs_set_axis(struct input_dev *input_dev,
- struct wacom_wac *wacom_wac)
-{
- struct wacom_features *features = &wacom_wac->features;
-
- if (features->device_type == BTN_TOOL_PEN) {
- input_set_abs_params(input_dev, ABS_X, features->x_min,
- features->x_max, features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, features->y_min,
- features->y_max, features->y_fuzz, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0,
- features->pressure_max, features->pressure_fuzz, 0);
-
- /* penabled devices have fixed resolution for each model */
- input_abs_set_res(input_dev, ABS_X, features->x_resolution);
- input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
- } else {
- if (features->touch_max == 1) {
- input_set_abs_params(input_dev, ABS_X, 0,
- features->x_max, features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, 0,
- features->y_max, features->y_fuzz, 0);
- input_abs_set_res(input_dev, ABS_X,
- features->x_resolution);
- input_abs_set_res(input_dev, ABS_Y,
- features->y_resolution);
- }
-
- if (features->touch_max > 1) {
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
- features->x_max, features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
- features->y_max, features->y_fuzz, 0);
- input_abs_set_res(input_dev, ABS_MT_POSITION_X,
- features->x_resolution);
- input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
- features->y_resolution);
- }
- }
-}
-
-int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
+int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac)
{
struct wacom_features *features = &wacom_wac->features;
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ if (!(features->device_type & WACOM_DEVICETYPE_PEN))
+ return -ENODEV;
+
if (features->type == HID_GENERIC)
/* setup has already been done */
return 0;
@@ -2251,7 +2268,17 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
- wacom_abs_set_axis(input_dev, wacom_wac);
+ input_set_abs_params(input_dev, ABS_X, features->x_min,
+ features->x_max, features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, features->y_min,
+ features->y_max, features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0,
+ features->pressure_max, features->pressure_fuzz, 0);
+
+ /* penabled devices have fixed resolution for each model */
+ input_abs_set_res(input_dev, ABS_X, features->x_resolution);
+ input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
+
switch (features->type) {
case GRAPHIRE_BT:
@@ -2320,53 +2347,25 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
case INTUOSPS:
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
- if (features->device_type == BTN_TOOL_PEN) {
- input_set_abs_params(input_dev, ABS_DISTANCE, 0,
- features->distance_max,
- 0, 0);
-
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
- input_abs_set_res(input_dev, ABS_Z, 287);
+ input_set_abs_params(input_dev, ABS_DISTANCE, 0,
+ features->distance_max,
+ 0, 0);
- wacom_setup_intuos(wacom_wac);
- } else if (features->device_type == BTN_TOOL_FINGER) {
- __clear_bit(ABS_MISC, input_dev->absbit);
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_abs_set_res(input_dev, ABS_Z, 287);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
- 0, features->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
- 0, features->y_max, 0, 0);
- input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
- }
+ wacom_setup_intuos(wacom_wac);
break;
case WACOM_24HDT:
- if (features->device_type == BTN_TOOL_FINGER) {
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
- }
- /* fall through */
-
case WACOM_27QHDT:
case MTSCREEN:
case MTTPC:
case MTTPC_B:
case TABLETPC2FG:
- if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
- input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
- /* fall through */
-
case TABLETPC:
case TABLETPCE:
__clear_bit(ABS_MISC, input_dev->absbit);
-
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
-
- if (features->device_type != BTN_TOOL_PEN)
- break; /* no need to process stylus stuff */
-
/* fall through */
case DTUS:
@@ -2394,50 +2393,114 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
break;
case INTUOSHT:
- if (features->touch_max &&
- features->device_type == BTN_TOOL_FINGER) {
- input_dev->evbit[0] |= BIT_MASK(EV_SW);
- __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
- }
- /* fall through */
-
case BAMBOO_PT:
__clear_bit(ABS_MISC, input_dev->absbit);
- if (features->device_type == BTN_TOOL_FINGER) {
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+ input_set_abs_params(input_dev, ABS_DISTANCE, 0,
+ features->distance_max,
+ 0, 0);
+ break;
+ case BAMBOO_PAD:
+ __clear_bit(ABS_MISC, input_dev->absbit);
+ break;
+ }
+ return 0;
+}
- if (features->touch_max) {
- if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
- input_set_abs_params(input_dev,
- ABS_MT_TOUCH_MAJOR,
- 0, features->x_max, 0, 0);
- input_set_abs_params(input_dev,
- ABS_MT_TOUCH_MINOR,
- 0, features->y_max, 0, 0);
- }
- input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
- } else {
- /* buttons/keys only interface */
- __clear_bit(ABS_X, input_dev->absbit);
- __clear_bit(ABS_Y, input_dev->absbit);
- __clear_bit(BTN_TOUCH, input_dev->keybit);
+int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
+{
+ struct wacom_features *features = &wacom_wac->features;
- /* PAD is setup by wacom_setup_pad_input_capabilities later */
- return 1;
- }
- } else if (features->device_type == BTN_TOOL_PEN) {
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
- __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
- __set_bit(BTN_TOOL_PEN, input_dev->keybit);
- __set_bit(BTN_STYLUS, input_dev->keybit);
- __set_bit(BTN_STYLUS2, input_dev->keybit);
- input_set_abs_params(input_dev, ABS_DISTANCE, 0,
- features->distance_max,
- 0, 0);
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+ if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
+ return -ENODEV;
+
+ if (features->type == HID_GENERIC)
+ /* setup has already been done */
+ return 0;
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ if (features->touch_max == 1) {
+ input_set_abs_params(input_dev, ABS_X, 0,
+ features->x_max, features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0,
+ features->y_max, features->y_fuzz, 0);
+ input_abs_set_res(input_dev, ABS_X,
+ features->x_resolution);
+ input_abs_set_res(input_dev, ABS_Y,
+ features->y_resolution);
+ }
+ else if (features->touch_max > 1) {
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+ features->x_max, features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+ features->y_max, features->y_fuzz, 0);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X,
+ features->x_resolution);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
+ features->y_resolution);
+ }
+
+ switch (features->type) {
+ case INTUOS5:
+ case INTUOS5L:
+ case INTUOSPM:
+ case INTUOSPL:
+ case INTUOS5S:
+ case INTUOSPS:
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, features->y_max, 0, 0);
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
+ break;
+
+ case WACOM_24HDT:
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ /* fall through */
+
+ case WACOM_27QHDT:
+ case MTSCREEN:
+ case MTTPC:
+ case MTTPC_B:
+ case TABLETPC2FG:
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
+ /*fall through */
+
+ case TABLETPC:
+ case TABLETPCE:
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ break;
+
+ case INTUOSHT:
+ input_dev->evbit[0] |= BIT_MASK(EV_SW);
+ __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
+ /* fall through */
+
+ case BAMBOO_PT:
+ if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
+ input_set_abs_params(input_dev,
+ ABS_MT_TOUCH_MAJOR,
+ 0, features->x_max, 0, 0);
+ input_set_abs_params(input_dev,
+ ABS_MT_TOUCH_MINOR,
+ 0, features->y_max, 0, 0);
}
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
break;
+
case BAMBOO_PAD:
- __clear_bit(ABS_MISC, input_dev->absbit);
input_mt_init_slots(input_dev, features->touch_max,
INPUT_MT_POINTER);
__set_bit(BTN_LEFT, input_dev->keybit);
@@ -2453,6 +2516,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_features *features = &wacom_wac->features;
int i;
+ if (!(features->device_type & WACOM_DEVICETYPE_PAD))
+ return -ENODEV;
+
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
/* kept for making legacy xf86-input-wacom working with the wheels */
@@ -2589,10 +2655,6 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case INTUOS5S:
case INTUOSPS:
- /* touch interface does not have the pad device */
- if (features->device_type != BTN_TOOL_PEN)
- return -ENODEV;
-
for (i = 0; i < 7; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
@@ -2634,12 +2696,6 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case INTUOSHT:
case BAMBOO_PT:
- /* pad device is on the touch interface */
- if ((features->device_type != BTN_TOOL_FINGER) ||
- /* Bamboo Pen only tablet does not have pad */
- ((features->type == BAMBOO_PT) && !features->touch_max))
- return -ENODEV;
-
__clear_bit(ABS_MISC, input_dev->absbit);
__set_bit(BTN_LEFT, input_dev->keybit);
@@ -2919,6 +2975,9 @@ static const struct wacom_features wacom_features_0x32F =
{ "Wacom DTU1031X", 22472, 12728, 511, 0,
DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
+static const struct wacom_features wacom_features_0x336 =
+ { "Wacom DTU1141", 23472, 13203, 1023, 0,
+ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x57 =
{ "Wacom DTK2241", 95640, 54060, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
@@ -3272,6 +3331,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x32F) },
{ USB_DEVICE_WACOM(0x333) },
{ USB_DEVICE_WACOM(0x335) },
+ { USB_DEVICE_WACOM(0x336) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 4700ac994a3b..2978c303909d 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -18,10 +18,7 @@
#define WACOM_NAME_MAX 64
/* packet length for individual models */
-#define WACOM_PKGLEN_PENPRTN 7
-#define WACOM_PKGLEN_GRAPHIRE 8
#define WACOM_PKGLEN_BBFUN 9
-#define WACOM_PKGLEN_INTUOS 10
#define WACOM_PKGLEN_TPC1FG 5
#define WACOM_PKGLEN_TPC1FG_B 10
#define WACOM_PKGLEN_TPC2FG 14
@@ -29,9 +26,6 @@
#define WACOM_PKGLEN_BBTOUCH3 64
#define WACOM_PKGLEN_BBPEN 10
#define WACOM_PKGLEN_WIRELESS 32
-#define WACOM_PKGLEN_MTOUCH 62
-#define WACOM_PKGLEN_MTTPC 40
-#define WACOM_PKGLEN_DTUS 68
#define WACOM_PKGLEN_PENABLED 8
#define WACOM_PKGLEN_BPAD_TOUCH 32
#define WACOM_PKGLEN_BPAD_TOUCH_USB 64
@@ -78,10 +72,20 @@
#define WACOM_QUIRK_MONITOR 0x0004
#define WACOM_QUIRK_BATTERY 0x0008
+/* device types */
+#define WACOM_DEVICETYPE_NONE 0x0000
+#define WACOM_DEVICETYPE_PEN 0x0001
+#define WACOM_DEVICETYPE_TOUCH 0x0002
+#define WACOM_DEVICETYPE_PAD 0x0004
+
+#define WACOM_VENDORDEFINED_PEN 0xff0d0001
+
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_PEN) || \
- ((f)->application == HID_DG_PEN))
+ ((f)->application == HID_DG_PEN) || \
+ ((f)->application == HID_DG_DIGITIZER) || \
+ ((f)->application == WACOM_VENDORDEFINED_PEN))
#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
((f)->physical == HID_DG_FINGER) || \
((f)->application == HID_DG_TOUCHSCREEN))
@@ -192,7 +196,8 @@ struct hid_data {
};
struct wacom_wac {
- char name[WACOM_NAME_MAX];
+ char pen_name[WACOM_NAME_MAX];
+ char touch_name[WACOM_NAME_MAX];
char pad_name[WACOM_NAME_MAX];
char bat_name[WACOM_NAME_MAX];
char ac_name[WACOM_NAME_MAX];
@@ -203,9 +208,11 @@ struct wacom_wac {
bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
- struct input_dev *input;
+ struct input_dev *pen_input;
+ struct input_dev *touch_input;
struct input_dev *pad_input;
- bool input_registered;
+ bool pen_registered;
+ bool touch_registered;
bool pad_registered;
int pid;
int battery_capacity;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 2978f5ee8d2a..54da66dc7d16 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -71,7 +71,8 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
struct vmbus_channel_msginfo *open_info = NULL;
void *in, *out;
unsigned long flags;
- int ret, t, err = 0;
+ int ret, err = 0;
+ unsigned long t;
spin_lock_irqsave(&newchannel->lock, flags);
if (newchannel->state == CHANNEL_OPEN_STATE) {
@@ -89,9 +90,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
get_order(send_ringbuffer_size + recv_ringbuffer_size));
- if (!out)
- return -ENOMEM;
-
+ if (!out) {
+ err = -ENOMEM;
+ goto error0;
+ }
in = (void *)((unsigned long)out + send_ringbuffer_size);
@@ -135,7 +137,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
GFP_KERNEL);
if (!open_info) {
err = -ENOMEM;
- goto error0;
+ goto error_gpadl;
}
init_completion(&open_info->waitevent);
@@ -151,7 +153,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
- goto error0;
+ goto error_gpadl;
}
if (userdatalen)
@@ -195,10 +197,14 @@ error1:
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+error_gpadl:
+ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
+
error0:
free_pages((unsigned long)out,
get_order(send_ringbuffer_size + recv_ringbuffer_size));
kfree(open_info);
+ newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
EXPORT_SYMBOL_GPL(vmbus_open);
@@ -534,6 +540,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
free_pages((unsigned long)channel->ringbuffer_pages,
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+ /*
+ * If the channel has been rescinded; process device removal.
+ */
+ if (channel->rescind)
+ hv_process_channel_removal(channel,
+ channel->offermsg.child_relid);
return ret;
}
@@ -569,23 +581,9 @@ void vmbus_close(struct vmbus_channel *channel)
}
EXPORT_SYMBOL_GPL(vmbus_close);
-/**
- * vmbus_sendpacket() - Send the specified buffer on the given channel
- * @channel: Pointer to vmbus_channel structure.
- * @buffer: Pointer to the buffer you want to receive the data into.
- * @bufferlen: Maximum size of what the the buffer will hold
- * @requestid: Identifier of the request
- * @type: Type of packet that is being send e.g. negotiate, time
- * packet etc.
- *
- * Sends data in @buffer directly to hyper-v via the vmbus
- * This will send the data unparsed to hyper-v.
- *
- * Mainly used by Hyper-V drivers.
- */
-int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
+int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u64 requestid,
- enum vmbus_packet_type type, u32 flags)
+ enum vmbus_packet_type type, u32 flags, bool kick_q)
{
struct vmpacket_descriptor desc;
u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
@@ -613,21 +611,61 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && signal)
+ /*
+ * Signalling the host is conditional on many factors:
+ * 1. The ring state changed from being empty to non-empty.
+ * This is tracked by the variable "signal".
+ * 2. The variable kick_q tracks if more data will be placed
+ * on the ring. We will not signal if more data is
+ * to be placed.
+ *
+ * If we cannot write to the ring-buffer; signal the host
+ * even if we may not have written anything. This is a rare
+ * enough condition that it should not matter.
+ */
+ if (((ret == 0) && kick_q && signal) || (ret))
vmbus_setevent(channel);
return ret;
}
+EXPORT_SYMBOL(vmbus_sendpacket_ctl);
+
+/**
+ * vmbus_sendpacket() - Send the specified buffer on the given channel
+ * @channel: Pointer to vmbus_channel structure.
+ * @buffer: Pointer to the buffer you want to receive the data into.
+ * @bufferlen: Maximum size of what the the buffer will hold
+ * @requestid: Identifier of the request
+ * @type: Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ *
+ * Sends data in @buffer directly to hyper-v via the vmbus
+ * This will send the data unparsed to hyper-v.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u64 requestid,
+ enum vmbus_packet_type type, u32 flags)
+{
+ return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
+ type, flags, true);
+}
EXPORT_SYMBOL(vmbus_sendpacket);
/*
- * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
- * packets using a GPADL Direct packet type.
+ * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer
+ * packets using a GPADL Direct packet type. This interface allows you
+ * to control notifying the host. This will be useful for sending
+ * batched data. Also the sender can control the send flags
+ * explicitly.
*/
-int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
+int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount, void *buffer, u32 bufferlen,
- u64 requestid)
+ u64 requestid,
+ u32 flags,
+ bool kick_q)
{
int ret;
int i;
@@ -655,7 +693,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
/* Setup the descriptor */
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
- desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+ desc.flags = flags;
desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
@@ -676,11 +714,40 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && signal)
+ /*
+ * Signalling the host is conditional on many factors:
+ * 1. The ring state changed from being empty to non-empty.
+ * This is tracked by the variable "signal".
+ * 2. The variable kick_q tracks if more data will be placed
+ * on the ring. We will not signal if more data is
+ * to be placed.
+ *
+ * If we cannot write to the ring-buffer; signal the host
+ * even if we may not have written anything. This is a rare
+ * enough condition that it should not matter.
+ */
+ if (((ret == 0) && kick_q && signal) || (ret))
vmbus_setevent(channel);
return ret;
}
+EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
+
+/*
+ * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
+ * packets using a GPADL Direct packet type.
+ */
+int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount, void *buffer, u32 bufferlen,
+ u64 requestid)
+{
+ u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+ return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
+ buffer, bufferlen, requestid,
+ flags, true);
+
+}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
/*
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 3736f71bdec5..0eeb1b3bc048 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -32,12 +32,6 @@
#include "hyperv_vmbus.h"
-struct vmbus_channel_message_table_entry {
- enum vmbus_channel_message_type message_type;
- void (*message_handler)(struct vmbus_channel_message_header *msg);
-};
-
-
/**
* vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
* @icmsghdrp: Pointer to msg header structure
@@ -139,54 +133,29 @@ EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
*/
static struct vmbus_channel *alloc_channel(void)
{
+ static atomic_t chan_num = ATOMIC_INIT(0);
struct vmbus_channel *channel;
channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
if (!channel)
return NULL;
+ channel->id = atomic_inc_return(&chan_num);
spin_lock_init(&channel->inbound_lock);
spin_lock_init(&channel->lock);
INIT_LIST_HEAD(&channel->sc_list);
INIT_LIST_HEAD(&channel->percpu_list);
- channel->controlwq = create_workqueue("hv_vmbus_ctl");
- if (!channel->controlwq) {
- kfree(channel);
- return NULL;
- }
-
return channel;
}
/*
- * release_hannel - Release the vmbus channel object itself
- */
-static void release_channel(struct work_struct *work)
-{
- struct vmbus_channel *channel = container_of(work,
- struct vmbus_channel,
- work);
-
- destroy_workqueue(channel->controlwq);
-
- kfree(channel);
-}
-
-/*
* free_channel - Release the resources used by the vmbus channel object
*/
static void free_channel(struct vmbus_channel *channel)
{
-
- /*
- * We have to release the channel's workqueue/thread in the vmbus's
- * workqueue/thread context
- * ie we can't destroy ourselves.
- */
- INIT_WORK(&channel->work, release_channel);
- queue_work(vmbus_connection.work_queue, &channel->work);
+ kfree(channel);
}
static void percpu_channel_enq(void *arg)
@@ -204,33 +173,21 @@ static void percpu_channel_deq(void *arg)
list_del(&channel->percpu_list);
}
-/*
- * vmbus_process_rescind_offer -
- * Rescind the offer by initiating a device removal
- */
-static void vmbus_process_rescind_offer(struct work_struct *work)
+
+void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
{
- struct vmbus_channel *channel = container_of(work,
- struct vmbus_channel,
- work);
+ struct vmbus_channel_relid_released msg;
unsigned long flags;
struct vmbus_channel *primary_channel;
- struct vmbus_channel_relid_released msg;
- struct device *dev;
-
- if (channel->device_obj) {
- dev = get_device(&channel->device_obj->device);
- if (dev) {
- vmbus_device_unregister(channel->device_obj);
- put_device(dev);
- }
- }
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
- msg.child_relid = channel->offermsg.child_relid;
+ msg.child_relid = relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
+ if (channel == NULL)
+ return;
+
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -259,7 +216,6 @@ void vmbus_free_channels(void)
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
vmbus_device_unregister(channel->device_obj);
- kfree(channel->device_obj);
free_channel(channel);
}
}
@@ -268,15 +224,11 @@ void vmbus_free_channels(void)
* vmbus_process_offer - Process the offer by creating a channel/device
* associated with this offer
*/
-static void vmbus_process_offer(struct work_struct *work)
+static void vmbus_process_offer(struct vmbus_channel *newchannel)
{
- struct vmbus_channel *newchannel = container_of(work,
- struct vmbus_channel,
- work);
struct vmbus_channel *channel;
bool fnew = true;
bool enq = false;
- int ret;
unsigned long flags;
/* Make sure this is a new offer */
@@ -335,10 +287,11 @@ static void vmbus_process_offer(struct work_struct *work)
}
newchannel->state = CHANNEL_OPEN_STATE;
+ channel->num_sc++;
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
- goto done_init_rescind;
+ return;
}
goto err_free_chan;
@@ -361,33 +314,35 @@ static void vmbus_process_offer(struct work_struct *work)
&newchannel->offermsg.offer.if_instance,
newchannel);
if (!newchannel->device_obj)
- goto err_free_chan;
+ goto err_deq_chan;
/*
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
*/
- ret = vmbus_device_register(newchannel->device_obj);
- if (ret != 0) {
+ if (vmbus_device_register(newchannel->device_obj) != 0) {
pr_err("unable to add child device object (relid %d)\n",
- newchannel->offermsg.child_relid);
-
- spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
- list_del(&newchannel->listentry);
- spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+ newchannel->offermsg.child_relid);
kfree(newchannel->device_obj);
- goto err_free_chan;
+ goto err_deq_chan;
}
-done_init_rescind:
- spin_lock_irqsave(&newchannel->lock, flags);
- /* The next possible work is rescind handling */
- INIT_WORK(&newchannel->work, vmbus_process_rescind_offer);
- /* Check if rescind offer was already received */
- if (newchannel->rescind)
- queue_work(newchannel->controlwq, &newchannel->work);
- spin_unlock_irqrestore(&newchannel->lock, flags);
return;
+
+err_deq_chan:
+ spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
+ list_del(&newchannel->listentry);
+ spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+
+ if (newchannel->target_cpu != get_cpu()) {
+ put_cpu();
+ smp_call_function_single(newchannel->target_cpu,
+ percpu_channel_deq, newchannel, true);
+ } else {
+ percpu_channel_deq(newchannel);
+ put_cpu();
+ }
+
err_free_chan:
free_channel(newchannel);
}
@@ -411,6 +366,8 @@ static const struct hv_vmbus_device_id hp_devs[] = {
{ HV_SCSI_GUID, },
/* Network */
{ HV_NIC_GUID, },
+ /* NetworkDirect Guest RDMA */
+ { HV_ND_GUID, },
};
@@ -511,8 +468,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
newchannel->monitor_grp = (u8)offer->monitorid / 32;
newchannel->monitor_bit = (u8)offer->monitorid % 32;
- INIT_WORK(&newchannel->work, vmbus_process_offer);
- queue_work(newchannel->controlwq, &newchannel->work);
+ vmbus_process_offer(newchannel);
}
/*
@@ -525,28 +481,34 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel;
unsigned long flags;
+ struct device *dev;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
channel = relid2channel(rescind->child_relid);
- if (channel == NULL)
- /* Just return here, no channel found */
+ if (channel == NULL) {
+ hv_process_channel_removal(NULL, rescind->child_relid);
return;
+ }
spin_lock_irqsave(&channel->lock, flags);
channel->rescind = true;
- /*
- * channel->work.func != vmbus_process_rescind_offer means we are still
- * processing offer request and the rescind offer processing should be
- * postponed. It will be done at the very end of vmbus_process_offer()
- * as rescind flag is being checked there.
- */
- if (channel->work.func == vmbus_process_rescind_offer)
- /* work is initialized for vmbus_process_rescind_offer() from
- * vmbus_process_offer() where the channel got created */
- queue_work(channel->controlwq, &channel->work);
-
spin_unlock_irqrestore(&channel->lock, flags);
+
+ if (channel->device_obj) {
+ /*
+ * We will have to unregister this device from the
+ * driver core.
+ */
+ dev = get_device(&channel->device_obj->device);
+ if (dev) {
+ vmbus_device_unregister(channel->device_obj);
+ put_device(dev);
+ }
+ } else {
+ hv_process_channel_removal(channel,
+ channel->offermsg.child_relid);
+ }
}
/*
@@ -731,25 +693,25 @@ static void vmbus_onversion_response(
}
/* Channel message dispatch table */
-static struct vmbus_channel_message_table_entry
+struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT] = {
- {CHANNELMSG_INVALID, NULL},
- {CHANNELMSG_OFFERCHANNEL, vmbus_onoffer},
- {CHANNELMSG_RESCIND_CHANNELOFFER, vmbus_onoffer_rescind},
- {CHANNELMSG_REQUESTOFFERS, NULL},
- {CHANNELMSG_ALLOFFERS_DELIVERED, vmbus_onoffers_delivered},
- {CHANNELMSG_OPENCHANNEL, NULL},
- {CHANNELMSG_OPENCHANNEL_RESULT, vmbus_onopen_result},
- {CHANNELMSG_CLOSECHANNEL, NULL},
- {CHANNELMSG_GPADL_HEADER, NULL},
- {CHANNELMSG_GPADL_BODY, NULL},
- {CHANNELMSG_GPADL_CREATED, vmbus_ongpadl_created},
- {CHANNELMSG_GPADL_TEARDOWN, NULL},
- {CHANNELMSG_GPADL_TORNDOWN, vmbus_ongpadl_torndown},
- {CHANNELMSG_RELID_RELEASED, NULL},
- {CHANNELMSG_INITIATE_CONTACT, NULL},
- {CHANNELMSG_VERSION_RESPONSE, vmbus_onversion_response},
- {CHANNELMSG_UNLOAD, NULL},
+ {CHANNELMSG_INVALID, 0, NULL},
+ {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
+ {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
+ {CHANNELMSG_REQUESTOFFERS, 0, NULL},
+ {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
+ {CHANNELMSG_OPENCHANNEL, 0, NULL},
+ {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
+ {CHANNELMSG_CLOSECHANNEL, 0, NULL},
+ {CHANNELMSG_GPADL_HEADER, 0, NULL},
+ {CHANNELMSG_GPADL_BODY, 0, NULL},
+ {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
+ {CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
+ {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
+ {CHANNELMSG_RELID_RELEASED, 0, NULL},
+ {CHANNELMSG_INITIATE_CONTACT, 0, NULL},
+ {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
+ {CHANNELMSG_UNLOAD, 0, NULL},
};
/*
@@ -787,7 +749,7 @@ int vmbus_request_offers(void)
{
struct vmbus_channel_message_header *msg;
struct vmbus_channel_msginfo *msginfo;
- int ret, t;
+ int ret;
msginfo = kmalloc(sizeof(*msginfo) +
sizeof(struct vmbus_channel_message_header),
@@ -795,8 +757,6 @@ int vmbus_request_offers(void)
if (!msginfo)
return -ENOMEM;
- init_completion(&msginfo->waitevent);
-
msg = (struct vmbus_channel_message_header *)msginfo->msg;
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
@@ -810,14 +770,6 @@ int vmbus_request_offers(void)
goto cleanup;
}
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
-
-
-
cleanup:
kfree(msginfo);
@@ -826,9 +778,8 @@ cleanup:
/*
* Retrieve the (sub) channel on which to send an outgoing request.
- * When a primary channel has multiple sub-channels, we choose a
- * channel whose VCPU binding is closest to the VCPU on which
- * this call is being made.
+ * When a primary channel has multiple sub-channels, we try to
+ * distribute the load equally amongst all available channels.
*/
struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
{
@@ -836,11 +787,19 @@ struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
int cur_cpu;
struct vmbus_channel *cur_channel;
struct vmbus_channel *outgoing_channel = primary;
- int cpu_distance, new_cpu_distance;
+ int next_channel;
+ int i = 1;
if (list_empty(&primary->sc_list))
return outgoing_channel;
+ next_channel = primary->next_oc++;
+
+ if (next_channel > (primary->num_sc)) {
+ primary->next_oc = 0;
+ return outgoing_channel;
+ }
+
cur_cpu = hv_context.vp_index[get_cpu()];
put_cpu();
list_for_each_safe(cur, tmp, &primary->sc_list) {
@@ -851,18 +810,10 @@ struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
if (cur_channel->target_vp == cur_cpu)
return cur_channel;
- cpu_distance = ((outgoing_channel->target_vp > cur_cpu) ?
- (outgoing_channel->target_vp - cur_cpu) :
- (cur_cpu - outgoing_channel->target_vp));
-
- new_cpu_distance = ((cur_channel->target_vp > cur_cpu) ?
- (cur_channel->target_vp - cur_cpu) :
- (cur_cpu - cur_channel->target_vp));
-
- if (cpu_distance < new_cpu_distance)
- continue;
+ if (i == next_channel)
+ return cur_channel;
- outgoing_channel = cur_channel;
+ i++;
}
return outgoing_channel;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index a63a795300b9..b27220a425f4 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -216,10 +216,21 @@ int vmbus_connect(void)
cleanup:
pr_err("Unable to connect to host\n");
+
vmbus_connection.conn_state = DISCONNECTED;
+ vmbus_disconnect();
+
+ kfree(msginfo);
- if (vmbus_connection.work_queue)
+ return ret;
+}
+
+void vmbus_disconnect(void)
+{
+ if (vmbus_connection.work_queue) {
+ drain_workqueue(vmbus_connection.work_queue);
destroy_workqueue(vmbus_connection.work_queue);
+ }
if (vmbus_connection.int_page) {
free_pages((unsigned long)vmbus_connection.int_page, 0);
@@ -230,10 +241,6 @@ cleanup:
free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0);
vmbus_connection.monitor_pages[0] = NULL;
vmbus_connection.monitor_pages[1] = NULL;
-
- kfree(msginfo);
-
- return ret;
}
/*
@@ -311,10 +318,8 @@ static void process_chn_event(u32 relid)
*/
channel = pcpu_relid2channel(relid);
- if (!channel) {
- pr_err("channel not found for relid - %u\n", relid);
+ if (!channel)
return;
- }
/*
* A channel once created is persistent even when there
@@ -349,10 +354,7 @@ static void process_chn_event(u32 relid)
else
bytes_to_read = 0;
} while (read_state && (bytes_to_read != 0));
- } else {
- pr_err("no channel callback for relid - %u\n", relid);
}
-
}
/*
@@ -420,6 +422,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
union hv_connection_id conn_id;
int ret = 0;
int retries = 0;
+ u32 msec = 1;
conn_id.asu32 = 0;
conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID;
@@ -429,13 +432,20 @@ int vmbus_post_msg(void *buffer, size_t buflen)
* insufficient resources. Retry the operation a couple of
* times before giving up.
*/
- while (retries < 10) {
+ while (retries < 20) {
ret = hv_post_message(conn_id, 1, buffer, buflen);
switch (ret) {
+ case HV_STATUS_INVALID_CONNECTION_ID:
+ /*
+ * We could get this if we send messages too
+ * frequently.
+ */
+ ret = -EAGAIN;
+ break;
+ case HV_STATUS_INSUFFICIENT_MEMORY:
case HV_STATUS_INSUFFICIENT_BUFFERS:
ret = -ENOMEM;
- case -ENOMEM:
break;
case HV_STATUS_SUCCESS:
return ret;
@@ -445,7 +455,9 @@ int vmbus_post_msg(void *buffer, size_t buflen)
}
retries++;
- msleep(100);
+ msleep(msec);
+ if (msec < 2048)
+ msec *= 2;
}
return ret;
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 50e51a51ff8b..d3943bceecc3 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -312,7 +312,11 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
dev->features = CLOCK_EVT_FEAT_ONESHOT;
dev->cpumask = cpumask_of(cpu);
dev->rating = 1000;
- dev->owner = THIS_MODULE;
+ /*
+ * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
+ * result in clockevents_config_and_register() taking additional
+ * references to the hv_vmbus module making it impossible to unload.
+ */
dev->set_mode = hv_ce_setmode;
dev->set_next_event = hv_ce_set_next_event;
@@ -470,6 +474,20 @@ void hv_synic_init(void *arg)
}
/*
+ * hv_synic_clockevents_cleanup - Cleanup clockevent devices
+ */
+void hv_synic_clockevents_cleanup(void)
+{
+ int cpu;
+
+ if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
+ return;
+
+ for_each_online_cpu(cpu)
+ clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
+}
+
+/*
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
*/
void hv_synic_cleanup(void *arg)
@@ -477,11 +495,17 @@ void hv_synic_cleanup(void *arg)
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
+ union hv_synic_scontrol sctrl;
int cpu = smp_processor_id();
if (!hv_context.synic_initialized)
return;
+ /* Turn off clockevent device */
+ if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
+ hv_ce_setmode(CLOCK_EVT_MODE_SHUTDOWN,
+ hv_context.clk_evt[cpu]);
+
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
shared_sint.masked = 1;
@@ -502,6 +526,10 @@ void hv_synic_cleanup(void *arg)
wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
- free_page((unsigned long)hv_context.synic_message_page[cpu]);
- free_page((unsigned long)hv_context.synic_event_page[cpu]);
+ /* Disable the global synic bit */
+ rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+ sctrl.enable = 0;
+ wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+
+ hv_synic_free_cpu(cpu);
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index ff169386b2c7..cb5b7dc9797f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -428,14 +428,13 @@ struct dm_info_msg {
* currently hot added. We hot add in multiples of 128M
* chunks; it is possible that we may not be able to bring
* online all the pages in the region. The range
- * covered_start_pfn : covered_end_pfn defines the pages that can
+ * covered_end_pfn defines the pages that can
* be brough online.
*/
struct hv_hotadd_state {
struct list_head list;
unsigned long start_pfn;
- unsigned long covered_start_pfn;
unsigned long covered_end_pfn;
unsigned long ha_end_pfn;
unsigned long end_pfn;
@@ -503,6 +502,8 @@ struct hv_dynmem_device {
* Number of pages we have currently ballooned out.
*/
unsigned int num_pages_ballooned;
+ unsigned int num_pages_onlined;
+ unsigned int num_pages_added;
/*
* State to manage the ballooning (up) operation.
@@ -534,7 +535,6 @@ struct hv_dynmem_device {
struct task_struct *thread;
struct mutex ha_region_mutex;
- struct completion waiter_event;
/*
* A list of hot-add regions.
@@ -554,46 +554,32 @@ static struct hv_dynmem_device dm_device;
static void post_status(struct hv_dynmem_device *dm);
#ifdef CONFIG_MEMORY_HOTPLUG
-static void acquire_region_mutex(bool trylock)
-{
- if (trylock) {
- reinit_completion(&dm_device.waiter_event);
- while (!mutex_trylock(&dm_device.ha_region_mutex))
- wait_for_completion(&dm_device.waiter_event);
- } else {
- mutex_lock(&dm_device.ha_region_mutex);
- }
-}
-
-static void release_region_mutex(bool trylock)
-{
- if (trylock) {
- mutex_unlock(&dm_device.ha_region_mutex);
- } else {
- mutex_unlock(&dm_device.ha_region_mutex);
- complete(&dm_device.waiter_event);
- }
-}
-
static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
void *v)
{
+ struct memory_notify *mem = (struct memory_notify *)v;
+
switch (val) {
case MEM_GOING_ONLINE:
- acquire_region_mutex(true);
+ mutex_lock(&dm_device.ha_region_mutex);
break;
case MEM_ONLINE:
+ dm_device.num_pages_onlined += mem->nr_pages;
case MEM_CANCEL_ONLINE:
- release_region_mutex(true);
+ mutex_unlock(&dm_device.ha_region_mutex);
if (dm_device.ha_waiting) {
dm_device.ha_waiting = false;
complete(&dm_device.ol_waitevent);
}
break;
- case MEM_GOING_OFFLINE:
case MEM_OFFLINE:
+ mutex_lock(&dm_device.ha_region_mutex);
+ dm_device.num_pages_onlined -= mem->nr_pages;
+ mutex_unlock(&dm_device.ha_region_mutex);
+ break;
+ case MEM_GOING_OFFLINE:
case MEM_CANCEL_OFFLINE:
break;
}
@@ -646,7 +632,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
init_completion(&dm_device.ol_waitevent);
dm_device.ha_waiting = true;
- release_region_mutex(false);
+ mutex_unlock(&dm_device.ha_region_mutex);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
(HA_CHUNK << PAGE_SHIFT));
@@ -665,6 +651,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
}
has->ha_end_pfn -= HA_CHUNK;
has->covered_end_pfn -= processed_pfn;
+ mutex_lock(&dm_device.ha_region_mutex);
break;
}
@@ -675,7 +662,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
* have not been "onlined" within the allowed time.
*/
wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
- acquire_region_mutex(false);
+ mutex_lock(&dm_device.ha_region_mutex);
post_status(&dm_device);
}
@@ -691,8 +678,7 @@ static void hv_online_page(struct page *pg)
list_for_each(cur, &dm_device.ha_region_list) {
has = list_entry(cur, struct hv_hotadd_state, list);
- cur_start_pgp = (unsigned long)
- pfn_to_page(has->covered_start_pfn);
+ cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
if (((unsigned long)pg >= cur_start_pgp) &&
@@ -704,7 +690,6 @@ static void hv_online_page(struct page *pg)
__online_page_set_limits(pg);
__online_page_increment_counters(pg);
__online_page_free(pg);
- has->covered_start_pfn++;
}
}
}
@@ -748,10 +733,9 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
* is, update it.
*/
- if (has->covered_end_pfn != start_pfn) {
+ if (has->covered_end_pfn != start_pfn)
has->covered_end_pfn = start_pfn;
- has->covered_start_pfn = start_pfn;
- }
+
return true;
}
@@ -794,9 +778,18 @@ static unsigned long handle_pg_range(unsigned long pg_start,
pgs_ol = has->ha_end_pfn - start_pfn;
if (pgs_ol > pfn_cnt)
pgs_ol = pfn_cnt;
- hv_bring_pgs_online(start_pfn, pgs_ol);
+
+ /*
+ * Check if the corresponding memory block is already
+ * online by checking its last previously backed page.
+ * In case it is we need to bring rest (which was not
+ * backed previously) online too.
+ */
+ if (start_pfn > has->start_pfn &&
+ !PageReserved(pfn_to_page(start_pfn - 1)))
+ hv_bring_pgs_online(start_pfn, pgs_ol);
+
has->covered_end_pfn += pgs_ol;
- has->covered_start_pfn += pgs_ol;
pfn_cnt -= pgs_ol;
}
@@ -857,7 +850,6 @@ static unsigned long process_hot_add(unsigned long pg_start,
list_add_tail(&ha_region->list, &dm_device.ha_region_list);
ha_region->start_pfn = rg_start;
ha_region->ha_end_pfn = rg_start;
- ha_region->covered_start_pfn = pg_start;
ha_region->covered_end_pfn = pg_start;
ha_region->end_pfn = rg_start + rg_size;
}
@@ -886,7 +878,7 @@ static void hot_add_req(struct work_struct *dummy)
resp.hdr.size = sizeof(struct dm_hot_add_response);
#ifdef CONFIG_MEMORY_HOTPLUG
- acquire_region_mutex(false);
+ mutex_lock(&dm_device.ha_region_mutex);
pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
@@ -918,7 +910,9 @@ static void hot_add_req(struct work_struct *dummy)
if (do_hot_add)
resp.page_count = process_hot_add(pg_start, pfn_cnt,
rg_start, rg_sz);
- release_region_mutex(false);
+
+ dm->num_pages_added += resp.page_count;
+ mutex_unlock(&dm_device.ha_region_mutex);
#endif
/*
* The result field of the response structure has the
@@ -982,8 +976,8 @@ static unsigned long compute_balloon_floor(void)
* 128 72 (1/2)
* 512 168 (1/4)
* 2048 360 (1/8)
- * 8192 768 (1/16)
- * 32768 1536 (1/32)
+ * 8192 744 (1/16)
+ * 32768 1512 (1/32)
*/
if (totalram_pages < MB2PAGES(128))
min_pages = MB2PAGES(8) + (totalram_pages >> 1);
@@ -992,9 +986,9 @@ static unsigned long compute_balloon_floor(void)
else if (totalram_pages < MB2PAGES(2048))
min_pages = MB2PAGES(104) + (totalram_pages >> 3);
else if (totalram_pages < MB2PAGES(8192))
- min_pages = MB2PAGES(256) + (totalram_pages >> 4);
+ min_pages = MB2PAGES(232) + (totalram_pages >> 4);
else
- min_pages = MB2PAGES(512) + (totalram_pages >> 5);
+ min_pages = MB2PAGES(488) + (totalram_pages >> 5);
#undef MB2PAGES
return min_pages;
}
@@ -1031,17 +1025,21 @@ static void post_status(struct hv_dynmem_device *dm)
status.hdr.trans_id = atomic_inc_return(&trans_id);
/*
- * The host expects the guest to report free memory.
- * Further, the host expects the pressure information to
- * include the ballooned out pages.
- * For a given amount of memory that we are managing, we
- * need to compute a floor below which we should not balloon.
- * Compute this and add it to the pressure report.
+ * The host expects the guest to report free and committed memory.
+ * Furthermore, the host expects the pressure information to include
+ * the ballooned out pages. For a given amount of memory that we are
+ * managing we need to compute a floor below which we should not
+ * balloon. Compute this and add it to the pressure report.
+ * We also need to report all offline pages (num_pages_added -
+ * num_pages_onlined) as committed to the host, otherwise it can try
+ * asking us to balloon them out.
*/
status.num_avail = val.freeram;
status.num_committed = vm_memory_committed() +
- dm->num_pages_ballooned +
- compute_balloon_floor();
+ dm->num_pages_ballooned +
+ (dm->num_pages_added > dm->num_pages_onlined ?
+ dm->num_pages_added - dm->num_pages_onlined : 0) +
+ compute_balloon_floor();
/*
* If our transaction ID is no longer current, just don't
@@ -1083,11 +1081,12 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
-static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
- struct dm_balloon_response *bl_resp, int alloc_unit,
- bool *alloc_error)
+static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
+ unsigned int num_pages,
+ struct dm_balloon_response *bl_resp,
+ int alloc_unit)
{
- int i = 0;
+ unsigned int i = 0;
struct page *pg;
if (num_pages < alloc_unit)
@@ -1106,11 +1105,8 @@ static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
__GFP_NOMEMALLOC | __GFP_NOWARN,
get_order(alloc_unit << PAGE_SHIFT));
- if (!pg) {
- *alloc_error = true;
+ if (!pg)
return i * alloc_unit;
- }
-
dm->num_pages_ballooned += alloc_unit;
@@ -1137,14 +1133,15 @@ static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
static void balloon_up(struct work_struct *dummy)
{
- int num_pages = dm_device.balloon_wrk.num_pages;
- int num_ballooned = 0;
+ unsigned int num_pages = dm_device.balloon_wrk.num_pages;
+ unsigned int num_ballooned = 0;
struct dm_balloon_response *bl_resp;
int alloc_unit;
int ret;
- bool alloc_error;
bool done = false;
int i;
+ struct sysinfo val;
+ unsigned long floor;
/* The host balloons pages in 2M granularity. */
WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
@@ -1155,6 +1152,15 @@ static void balloon_up(struct work_struct *dummy)
*/
alloc_unit = 512;
+ si_meminfo(&val);
+ floor = compute_balloon_floor();
+
+ /* Refuse to balloon below the floor, keep the 2M granularity. */
+ if (val.freeram < num_pages || val.freeram - num_pages < floor) {
+ num_pages = val.freeram > floor ? (val.freeram - floor) : 0;
+ num_pages -= num_pages % PAGES_IN_2M;
+ }
+
while (!done) {
bl_resp = (struct dm_balloon_response *)send_buffer;
memset(send_buffer, 0, PAGE_SIZE);
@@ -1164,18 +1170,15 @@ static void balloon_up(struct work_struct *dummy)
num_pages -= num_ballooned;
- alloc_error = false;
num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
- bl_resp, alloc_unit,
- &alloc_error);
+ bl_resp, alloc_unit);
if (alloc_unit != 1 && num_ballooned == 0) {
alloc_unit = 1;
continue;
}
- if ((alloc_unit == 1 && alloc_error) ||
- (num_ballooned == num_pages)) {
+ if (num_ballooned == 0 || num_ballooned == num_pages) {
bl_resp->more_pages = 0;
done = true;
dm_device.state = DM_INITIALIZED;
@@ -1414,7 +1417,8 @@ static void balloon_onchannelcallback(void *context)
static int balloon_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
- int ret, t;
+ int ret;
+ unsigned long t;
struct dm_version_request version_req;
struct dm_capabilities cap_msg;
@@ -1439,7 +1443,6 @@ static int balloon_probe(struct hv_device *dev,
dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
init_completion(&dm_device.host_event);
init_completion(&dm_device.config_event);
- init_completion(&dm_device.waiter_event);
INIT_LIST_HEAD(&dm_device.ha_region_list);
mutex_init(&dm_device.ha_region_mutex);
INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 3b9c9ef0deb8..7994ec2e4151 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -340,12 +340,8 @@ static int util_probe(struct hv_device *dev,
set_channel_read_state(dev->channel, false);
- ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
- srv->util_cb, dev->channel);
- if (ret)
- goto error;
-
hv_set_drvdata(dev, srv);
+
/*
* Based on the host; initialize the framework and
* service version numbers we will negotiate.
@@ -365,6 +361,11 @@ static int util_probe(struct hv_device *dev,
hb_srv_version = HB_VERSION;
}
+ ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
+ srv->util_cb, dev->channel);
+ if (ret)
+ goto error;
+
return 0;
error:
@@ -379,9 +380,9 @@ static int util_remove(struct hv_device *dev)
{
struct hv_util_service *srv = hv_get_drvdata(dev);
- vmbus_close(dev->channel);
if (srv->util_deinit)
srv->util_deinit();
+ vmbus_close(dev->channel);
kfree(srv->recv_buffer);
return 0;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 44b1c9424712..887287ad411f 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -49,6 +49,17 @@ enum hv_cpuid_function {
HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005,
};
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE 0x400
+
+#define HV_X64_MSR_CRASH_P0 0x40000100
+#define HV_X64_MSR_CRASH_P1 0x40000101
+#define HV_X64_MSR_CRASH_P2 0x40000102
+#define HV_X64_MSR_CRASH_P3 0x40000103
+#define HV_X64_MSR_CRASH_P4 0x40000104
+#define HV_X64_MSR_CRASH_CTL 0x40000105
+
+#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
+
/* Define version of the synthetic interrupt controller. */
#define HV_SYNIC_VERSION (1)
@@ -572,6 +583,8 @@ extern void hv_synic_init(void *irqarg);
extern void hv_synic_cleanup(void *arg);
+extern void hv_synic_clockevents_cleanup(void);
+
/*
* Host version information.
*/
@@ -672,6 +685,23 @@ struct vmbus_msginfo {
extern struct vmbus_connection vmbus_connection;
+enum vmbus_message_handler_type {
+ /* The related handler can sleep. */
+ VMHT_BLOCKING = 0,
+
+ /* The related handler must NOT sleep. */
+ VMHT_NON_BLOCKING = 1,
+};
+
+struct vmbus_channel_message_table_entry {
+ enum vmbus_channel_message_type message_type;
+ enum vmbus_message_handler_type handler_type;
+ void (*message_handler)(struct vmbus_channel_message_header *msg);
+};
+
+extern struct vmbus_channel_message_table_entry
+ channel_message_table[CHANNELMSG_COUNT];
+
/* General vmbus interface */
struct hv_device *vmbus_device_create(const uuid_le *type,
@@ -692,6 +722,7 @@ void vmbus_free_channels(void);
/* Connection interface */
int vmbus_connect(void);
+void vmbus_disconnect(void);
int vmbus_post_msg(void *buffer, size_t buflen);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index f518b8d7a5b5..c85235e9f245 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,9 +33,12 @@
#include <linux/hyperv.h>
#include <linux/kernel_stat.h>
#include <linux/clockchips.h>
+#include <linux/cpu.h>
#include <asm/hyperv.h>
#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
+#include <linux/notifier.h>
+#include <linux/ptrace.h>
#include "hyperv_vmbus.h"
static struct acpi_device *hv_acpi_dev;
@@ -44,6 +47,31 @@ static struct tasklet_struct msg_dpc;
static struct completion probe_event;
static int irq;
+
+static int hyperv_panic_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct pt_regs *regs;
+
+ regs = current_pt_regs();
+
+ wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
+ wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
+ wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
+ wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
+ wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
+
+ /*
+ * Let Hyper-V know there is crash data available
+ */
+ wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hyperv_panic_block = {
+ .notifier_call = hyperv_panic_event,
+};
+
struct resource hyperv_mmio = {
.name = "hyperv mmio",
.flags = IORESOURCE_MEM,
@@ -507,14 +535,26 @@ static int vmbus_probe(struct device *child_device)
*/
static int vmbus_remove(struct device *child_device)
{
- struct hv_driver *drv = drv_to_hv_drv(child_device->driver);
+ struct hv_driver *drv;
struct hv_device *dev = device_to_hv_device(child_device);
-
- if (drv->remove)
- drv->remove(dev);
- else
- pr_err("remove not set for driver %s\n",
- dev_name(child_device));
+ u32 relid = dev->channel->offermsg.child_relid;
+
+ if (child_device->driver) {
+ drv = drv_to_hv_drv(child_device->driver);
+ if (drv->remove)
+ drv->remove(dev);
+ else {
+ hv_process_channel_removal(dev->channel, relid);
+ pr_err("remove not set for driver %s\n",
+ dev_name(child_device));
+ }
+ } else {
+ /*
+ * We don't have a driver for this device; deal with the
+ * rescind message by removing the channel.
+ */
+ hv_process_channel_removal(dev->channel, relid);
+ }
return 0;
}
@@ -573,6 +613,10 @@ static void vmbus_onmessage_work(struct work_struct *work)
{
struct onmessage_work_context *ctx;
+ /* Do not process messages if we're in DISCONNECTED state */
+ if (vmbus_connection.conn_state == DISCONNECTED)
+ return;
+
ctx = container_of(work, struct onmessage_work_context,
work);
vmbus_onmessage(&ctx->msg);
@@ -613,21 +657,36 @@ static void vmbus_on_msg_dpc(unsigned long data)
void *page_addr = hv_context.synic_message_page[cpu];
struct hv_message *msg = (struct hv_message *)page_addr +
VMBUS_MESSAGE_SINT;
+ struct vmbus_channel_message_header *hdr;
+ struct vmbus_channel_message_table_entry *entry;
struct onmessage_work_context *ctx;
while (1) {
- if (msg->header.message_type == HVMSG_NONE) {
+ if (msg->header.message_type == HVMSG_NONE)
/* no msg */
break;
- } else {
+
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+
+ if (hdr->msgtype >= CHANNELMSG_COUNT) {
+ WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
+ goto msg_handled;
+ }
+
+ entry = &channel_message_table[hdr->msgtype];
+ if (entry->handler_type == VMHT_BLOCKING) {
ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
if (ctx == NULL)
continue;
+
INIT_WORK(&ctx->work, vmbus_onmessage_work);
memcpy(&ctx->msg, msg, sizeof(*msg));
+
queue_work(vmbus_connection.work_queue, &ctx->work);
- }
+ } else
+ entry->message_handler(hdr);
+msg_handled:
msg->header.message_type = HVMSG_NONE;
/*
@@ -704,6 +763,39 @@ static void vmbus_isr(void)
}
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int hyperv_cpu_disable(void)
+{
+ return -ENOSYS;
+}
+
+static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
+{
+ static void *previous_cpu_disable;
+
+ /*
+ * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
+ * ...) is not supported at this moment as channel interrupts are
+ * distributed across all of them.
+ */
+
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7))
+ return;
+
+ if (vmbus_loaded) {
+ previous_cpu_disable = smp_ops.cpu_disable;
+ smp_ops.cpu_disable = hyperv_cpu_disable;
+ pr_notice("CPU offlining is not supported by hypervisor\n");
+ } else if (previous_cpu_disable)
+ smp_ops.cpu_disable = previous_cpu_disable;
+}
+#else
+static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
+{
+}
+#endif
+
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
@@ -744,6 +836,16 @@ static int vmbus_bus_init(int irq)
if (ret)
goto err_alloc;
+ hv_cpu_hotplug_quirk(true);
+
+ /*
+ * Only register if the crash MSRs are available
+ */
+ if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &hyperv_panic_block);
+ }
+
vmbus_request_offers();
return 0;
@@ -840,10 +942,8 @@ int vmbus_device_register(struct hv_device *child_device_obj)
{
int ret = 0;
- static atomic_t device_num = ATOMIC_INIT(0);
-
- dev_set_name(&child_device_obj->device, "vmbus_0_%d",
- atomic_inc_return(&device_num));
+ dev_set_name(&child_device_obj->device, "vmbus_%d",
+ child_device_obj->channel->id);
child_device_obj->device.bus = &hv_bus;
child_device_obj->device.parent = &hv_acpi_dev->dev;
@@ -992,11 +1092,19 @@ cleanup:
static void __exit vmbus_exit(void)
{
+ int cpu;
+
+ vmbus_connection.conn_state = DISCONNECTED;
+ hv_synic_clockevents_cleanup();
hv_remove_vmbus_irq();
vmbus_free_channels();
bus_unregister(&hv_bus);
hv_cleanup();
+ for_each_online_cpu(cpu)
+ smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
acpi_bus_unregister_driver(&vmbus_acpi_driver);
+ hv_cpu_hotplug_quirk(false);
+ vmbus_disconnect();
}
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index d1542b7d4bc3..4d2815079fc2 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -36,6 +36,7 @@
#include <linux/jiffies.h>
#include <linux/of.h>
#include <linux/delay.h>
+#include <linux/util_macros.h>
#include <linux/platform_data/ina2xx.h>
@@ -141,19 +142,6 @@ static const struct ina2xx_config ina2xx_config[] = {
*/
static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
-static int ina226_avg_bits(int avg)
-{
- int i;
-
- /* Get the closest average from the tab. */
- for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) {
- if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2)
- break;
- }
-
- return i; /* Return 0b0111 for values greater than 1024. */
-}
-
static int ina226_reg_to_interval(u16 config)
{
int avg = ina226_avg_tab[INA226_READ_AVG(config)];
@@ -171,7 +159,8 @@ static u16 ina226_interval_to_reg(int interval, u16 config)
avg = DIV_ROUND_CLOSEST(interval * 1000,
INA226_TOTAL_CONV_TIME_DEFAULT);
- avg_bits = ina226_avg_bits(avg);
+ avg_bits = find_closest(avg, ina226_avg_tab,
+ ARRAY_SIZE(ina226_avg_tab));
return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits);
}
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 2b4b419273fe..6ff773fcaefb 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -34,6 +34,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/util_macros.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
@@ -190,15 +191,7 @@ static const int lm85_range_map[] = {
static int RANGE_TO_REG(long range)
{
- int i;
-
- /* Find the closest match */
- for (i = 0; i < 15; ++i) {
- if (range <= (lm85_range_map[i] + lm85_range_map[i + 1]) / 2)
- break;
- }
-
- return i;
+ return find_closest(range, lm85_range_map, ARRAY_SIZE(lm85_range_map));
}
#define RANGE_FROM_REG(val) lm85_range_map[(val) & 0x0f]
@@ -209,16 +202,12 @@ static const int lm85_freq_map[8] = { /* 1 Hz */
static const int adm1027_freq_map[8] = { /* 1 Hz */
11, 15, 22, 29, 35, 44, 59, 88
};
+#define FREQ_MAP_LEN 8
-static int FREQ_TO_REG(const int *map, unsigned long freq)
+static int FREQ_TO_REG(const int *map,
+ unsigned int map_size, unsigned long freq)
{
- int i;
-
- /* Find the closest match */
- for (i = 0; i < 7; ++i)
- if (freq <= (map[i] + map[i + 1]) / 2)
- break;
- return i;
+ return find_closest(freq, map, map_size);
}
static int FREQ_FROM_REG(const int *map, u8 reg)
@@ -828,7 +817,8 @@ static ssize_t set_pwm_freq(struct device *dev,
data->cfg5 &= ~ADT7468_HFPWM;
lm85_write_value(client, ADT7468_REG_CFG5, data->cfg5);
} else { /* Low freq. mode */
- data->pwm_freq[nr] = FREQ_TO_REG(data->freq_map, val);
+ data->pwm_freq[nr] = FREQ_TO_REG(data->freq_map,
+ FREQ_MAP_LEN, val);
lm85_write_value(client, LM85_REG_AFAN_RANGE(nr),
(data->zone[nr].range << 4)
| data->pwm_freq[nr]);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 21894131190f..49276bbdac3d 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -35,6 +35,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
+#include <linux/util_macros.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = {
@@ -308,11 +309,8 @@ static u8 pwm_freq_to_reg(unsigned long val, u16 clkin)
unsigned long best0, best1;
/* Best fit for cksel = 0 */
- for (reg0 = 0; reg0 < ARRAY_SIZE(pwm_freq_cksel0) - 1; reg0++) {
- if (val > (pwm_freq_cksel0[reg0] +
- pwm_freq_cksel0[reg0 + 1]) / 2)
- break;
- }
+ reg0 = find_closest_descending(val, pwm_freq_cksel0,
+ ARRAY_SIZE(pwm_freq_cksel0));
if (val < 375) /* cksel = 1 can't beat this */
return reg0;
best0 = pwm_freq_cksel0[reg0];
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
new file mode 100644
index 000000000000..fc1f1ae7a49d
--- /dev/null
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -0,0 +1,61 @@
+#
+# Coresight configuration
+#
+menuconfig CORESIGHT
+ bool "CoreSight Tracing Support"
+ select ARM_AMBA
+ help
+ This framework provides a kernel interface for the CoreSight debug
+ and trace drivers to register themselves with. It's intended to build
+ a topological view of the CoreSight components based on a DT
+ specification and configure the right serie of components when a
+ trace source gets enabled.
+
+if CORESIGHT
+config CORESIGHT_LINKS_AND_SINKS
+ bool "CoreSight Link and Sink drivers"
+ help
+ This enables support for CoreSight link and sink drivers that are
+ responsible for transporting and collecting the trace data
+ respectively. Link and sinks are dynamically aggregated with a trace
+ entity at run time to form a complete trace path.
+
+config CORESIGHT_LINK_AND_SINK_TMC
+ bool "Coresight generic TMC driver"
+ depends on CORESIGHT_LINKS_AND_SINKS
+ help
+ This enables support for the Trace Memory Controller driver.
+ Depending on its configuration the device can act as a link (embedded
+ trace router - ETR) or sink (embedded trace FIFO). The driver
+ complies with the generic implementation of the component without
+ special enhancement or added features.
+
+config CORESIGHT_SINK_TPIU
+ bool "Coresight generic TPIU driver"
+ depends on CORESIGHT_LINKS_AND_SINKS
+ help
+ This enables support for the Trace Port Interface Unit driver,
+ responsible for bridging the gap between the on-chip coresight
+ components and a trace for bridging the gap between the on-chip
+ coresight components and a trace port collection engine, typically
+ connected to an external host for use case capturing more traces than
+ the on-board coresight memory can handle.
+
+config CORESIGHT_SINK_ETBV10
+ bool "Coresight ETBv1.0 driver"
+ depends on CORESIGHT_LINKS_AND_SINKS
+ help
+ This enables support for the Embedded Trace Buffer version 1.0 driver
+ that complies with the generic implementation of the component without
+ special enhancement or added features.
+
+config CORESIGHT_SOURCE_ETM3X
+ bool "CoreSight Embedded Trace Macrocell 3.x driver"
+ depends on !ARM64
+ select CORESIGHT_LINKS_AND_SINKS
+ help
+ This driver provides support for processor ETM3.x and PTM1.x modules,
+ which allows tracing the instructions that a processor is executing
+ This is primarily useful for instruction level tracing. Depending
+ the ETM version data tracing may also be available.
+endif
diff --git a/drivers/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 4b4bec890ef5..4b4bec890ef5 100644
--- a/drivers/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
diff --git a/drivers/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index c9acd406f0d0..40049869aecd 100644
--- a/drivers/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -313,8 +313,8 @@ static ssize_t etb_read(struct file *file, char __user *data,
*ppos += len;
- dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
- __func__, len, (int) (depth * 4 - *ppos));
+ dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
+ __func__, len, (int)(depth * 4 - *ppos));
return len;
}
diff --git a/drivers/coresight/coresight-etm-cp14.c b/drivers/hwtracing/coresight/coresight-etm-cp14.c
index 12a220682117..12a220682117 100644
--- a/drivers/coresight/coresight-etm-cp14.c
+++ b/drivers/hwtracing/coresight/coresight-etm-cp14.c
diff --git a/drivers/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 501c5fac8a45..501c5fac8a45 100644
--- a/drivers/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
diff --git a/drivers/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index c965f5724abd..c965f5724abd 100644
--- a/drivers/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
diff --git a/drivers/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 3db36f70b666..3db36f70b666 100644
--- a/drivers/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
diff --git a/drivers/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 62fcd98cc7cf..62fcd98cc7cf 100644
--- a/drivers/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
diff --git a/drivers/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index cdf05537d574..75b9abd804e6 100644
--- a/drivers/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -107,7 +107,7 @@ static int replicator_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id replicator_match[] = {
+static const struct of_device_id replicator_match[] = {
{.compatible = "arm,coresight-replicator"},
{}
};
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 3ff232f9ddf7..7147f3dd363c 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -533,8 +533,8 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
*ppos += len;
- dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
- __func__, len, (int) (drvdata->size - *ppos));
+ dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
+ __func__, len, (int)(drvdata->size - *ppos));
return len;
}
@@ -565,6 +565,59 @@ static const struct file_operations tmc_fops = {
.llseek = no_llseek,
};
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ unsigned long flags;
+ u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
+ u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
+ u32 devid;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ goto out;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ CS_UNLOCK(drvdata->base);
+
+ tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
+ tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
+ tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
+ tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
+ tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
+ tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
+ tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
+ tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
+ tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
+ tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
+ devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+
+ CS_LOCK(drvdata->base);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ return sprintf(buf,
+ "Depth:\t\t0x%x\n"
+ "Status:\t\t0x%x\n"
+ "RAM read ptr:\t0x%x\n"
+ "RAM wrt ptr:\t0x%x\n"
+ "Trigger cnt:\t0x%x\n"
+ "Control:\t0x%x\n"
+ "Flush status:\t0x%x\n"
+ "Flush ctrl:\t0x%x\n"
+ "Mode:\t\t0x%x\n"
+ "PSRC:\t\t0x%x\n"
+ "DEVID:\t\t0x%x\n",
+ tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
+ tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
+out:
+ return -EINVAL;
+}
+static DEVICE_ATTR_RO(status);
+
static ssize_t trigger_cntr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -593,18 +646,21 @@ static DEVICE_ATTR_RW(trigger_cntr);
static struct attribute *coresight_etb_attrs[] = {
&dev_attr_trigger_cntr.attr,
+ &dev_attr_status.attr,
NULL,
};
ATTRIBUTE_GROUPS(coresight_etb);
static struct attribute *coresight_etr_attrs[] = {
&dev_attr_trigger_cntr.attr,
+ &dev_attr_status.attr,
NULL,
};
ATTRIBUTE_GROUPS(coresight_etr);
static struct attribute *coresight_etf_attrs[] = {
&dev_attr_trigger_cntr.attr,
+ &dev_attr_status.attr,
NULL,
};
ATTRIBUTE_GROUPS(coresight_etf);
diff --git a/drivers/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 3b33af2416bb..3b33af2416bb 100644
--- a/drivers/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
diff --git a/drivers/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index c5def9382357..894531d315b8 100644
--- a/drivers/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -305,7 +305,9 @@ static int coresight_build_paths(struct coresight_device *csdev,
list_add(&csdev->path_link, path);
- if (csdev->type == CORESIGHT_DEV_TYPE_SINK && csdev->activated) {
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+ csdev->activated) {
if (enable)
ret = coresight_enable_path(path);
else
diff --git a/drivers/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index c3efa418a86d..35e51ce93a5c 100644
--- a/drivers/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/coresight.h>
+#include <linux/cpumask.h>
#include <asm/smp_plat.h>
@@ -52,15 +53,6 @@ of_coresight_get_endpoint_device(struct device_node *endpoint)
endpoint, of_dev_node_match);
}
-static struct device_node *of_get_coresight_endpoint(
- const struct device_node *parent, struct device_node *prev)
-{
- struct device_node *node = of_graph_get_next_endpoint(parent, prev);
-
- of_node_put(prev);
- return node;
-}
-
static void of_coresight_get_ports(struct device_node *node,
int *nr_inport, int *nr_outport)
{
@@ -68,7 +60,7 @@ static void of_coresight_get_ports(struct device_node *node,
int in = 0, out = 0;
do {
- ep = of_get_coresight_endpoint(node, ep);
+ ep = of_graph_get_next_endpoint(node, ep);
if (!ep)
break;
@@ -113,7 +105,7 @@ static int of_coresight_alloc_memory(struct device *dev,
struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node)
{
- int i = 0, ret = 0;
+ int i = 0, ret = 0, cpu;
struct coresight_platform_data *pdata;
struct of_endpoint endpoint, rendpoint;
struct device *rdev;
@@ -140,7 +132,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
/* Iterate through each port to discover topology */
do {
/* Get a handle on a port */
- ep = of_get_coresight_endpoint(node, ep);
+ ep = of_graph_get_next_endpoint(node, ep);
if (!ep)
break;
@@ -187,17 +179,10 @@ struct coresight_platform_data *of_get_coresight_platform_data(
/* Affinity defaults to CPU0 */
pdata->cpu = 0;
dn = of_parse_phandle(node, "cpu", 0);
- if (dn) {
- const u32 *cell;
- int len, index;
- u64 hwid;
-
- cell = of_get_property(dn, "reg", &len);
- if (cell) {
- hwid = of_read_number(cell, of_n_addr_cells(dn));
- index = get_logical_index(hwid);
- if (index != -EINVAL)
- pdata->cpu = index;
+ for (cpu = 0; dn && cpu < nr_cpu_ids; cpu++) {
+ if (dn == of_get_cpu_node(cpu, NULL)) {
+ pdata->cpu = cpu;
+ break;
}
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 22da9c2ffa22..2255af23b9c7 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -485,6 +485,15 @@ config I2C_DESIGNWARE_BAYTRAIL
the platform firmware controlling it. You should say Y if running on
a BayTrail system using the AXP288.
+config I2C_DIGICOLOR
+ tristate "Conexant Digicolor I2C driver"
+ depends on ARCH_DIGICOLOR
+ help
+ Support for Conexant Digicolor SoCs (CX92755) I2C controller driver.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-digicolor.
+
config I2C_EFM32
tristate "EFM32 I2C controller"
depends on ARCH_EFM32 || COMPILE_TEST
@@ -574,6 +583,15 @@ config I2C_IOP3XX
This driver can also be built as a module. If so, the module
will be called i2c-iop3xx.
+config I2C_JZ4780
+ tristate "JZ4780 I2C controller interface support"
+ depends on MACH_JZ4780 || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ Ingenic JZ4780 I2C controller.
+
+ If you don't know what to do here, say N.
+
config I2C_KEMPLD
tristate "Kontron COM I2C Controller"
depends on MFD_KEMPLD
@@ -898,6 +916,16 @@ config I2C_XLR
This driver can also be built as a module. If so, the module
will be called i2c-xlr.
+config I2C_XLP9XX
+ tristate "XLP9XX I2C support"
+ depends on CPU_XLP || COMPILE_TEST
+ help
+ This driver enables support for the on-chip I2C interface of
+ the Broadcom XLP9xx/XLP5xx MIPS processors.
+
+ This driver can also be built as a module. If so, the module will
+ be called i2c-xlp9xx.
+
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3638feb6677e..cdf941da91c6 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -45,6 +45,7 @@ i2c-designware-platform-objs := i2c-designware-platdrv.o
i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-objs := i2c-designware-pcidrv.o
+obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o
obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o
@@ -55,6 +56,7 @@ obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IMG) += i2c-img-scb.o
obj-$(CONFIG_I2C_IMX) += i2c-imx.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
+obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
obj-$(CONFIG_I2C_MESON) += i2c-meson.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
@@ -87,6 +89,7 @@ obj-$(CONFIG_I2C_WMT) += i2c-wmt.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
obj-$(CONFIG_I2C_XLR) += i2c-xlr.o
+obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o
obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
# External I2C/SMBus adapter drivers
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 636fd2efad88..ff23d1bdd230 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -381,6 +381,7 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
static int at91_do_twi_transfer(struct at91_twi_dev *dev)
{
int ret;
+ unsigned long time_left;
bool has_unre_flag = dev->pdata->has_unre_flag;
dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
@@ -436,9 +437,9 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
}
}
- ret = wait_for_completion_timeout(&dev->cmd_complete,
- dev->adapter.timeout);
- if (ret == 0) {
+ time_left = wait_for_completion_timeout(&dev->cmd_complete,
+ dev->adapter.timeout);
+ if (time_left == 0) {
dev_err(dev->dev, "controller timed out\n");
at91_init_twi_bus(dev);
ret = -ETIMEDOUT;
@@ -487,30 +488,10 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
if (ret < 0)
goto out;
- /*
- * The hardware can handle at most two messages concatenated by a
- * repeated start via it's internal address feature.
- */
- if (num > 2) {
- dev_err(dev->dev,
- "cannot handle more than two concatenated messages.\n");
- ret = 0;
- goto out;
- } else if (num == 2) {
+ if (num == 2) {
int internal_address = 0;
int i;
- if (msg->flags & I2C_M_RD) {
- dev_err(dev->dev, "first transfer must be write.\n");
- ret = -EINVAL;
- goto out;
- }
- if (msg->len > 3) {
- dev_err(dev->dev, "first message size must be <= 3.\n");
- ret = -EINVAL;
- goto out;
- }
-
/* 1st msg is put into the internal address, start with 2nd */
m_start = &msg[1];
for (i = 0; i < msg->len; ++i) {
@@ -540,6 +521,15 @@ out:
return ret;
}
+/*
+ * The hardware can handle at most two messages concatenated by a
+ * repeated start via it's internal address feature.
+ */
+static struct i2c_adapter_quirks at91_twi_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
+ .max_comb_1st_msg_len = 3,
+};
+
static u32 at91_twi_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
@@ -777,6 +767,7 @@ static int at91_twi_probe(struct platform_device *pdev)
dev->adapter.owner = THIS_MODULE;
dev->adapter.class = I2C_CLASS_DEPRECATED;
dev->adapter.algo = &at91_twi_algorithm;
+ dev->adapter.quirks = &at91_twi_quirks;
dev->adapter.dev.parent = dev->dev;
dev->adapter.nr = pdev->id;
dev->adapter.timeout = AT91_I2C_TIMEOUT;
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 768a598d8d03..32d883490863 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -334,12 +334,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
u32 int_mask = MST_STATUS_ERR | MST_STATUS_SNS;
u32 rx_xfer, tx_xfer;
u32 addr_1, addr_2;
- int ret;
-
- if (msg->len > 255) {
- dev_warn(idev->dev, "unsupported length %u\n", msg->len);
- return -EINVAL;
- }
+ unsigned long time_left;
idev->msg = msg;
idev->msg_xfrd = 0;
@@ -388,15 +383,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
i2c_int_enable(idev, int_mask);
- ret = wait_for_completion_timeout(&idev->msg_complete,
- I2C_XFER_TIMEOUT);
+ time_left = wait_for_completion_timeout(&idev->msg_complete,
+ I2C_XFER_TIMEOUT);
i2c_int_disable(idev, int_mask);
if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
dev_warn(idev->dev, "busy after xfer\n");
- if (ret == 0)
+ if (time_left == 0)
idev->msg_err = -ETIMEDOUT;
if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
@@ -408,17 +403,17 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
{
u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
- int ret;
+ unsigned long time_left;
reinit_completion(&idev->msg_complete);
/* Issue stop */
writel(0xb, idev->base + MST_COMMAND);
i2c_int_enable(idev, int_mask);
- ret = wait_for_completion_timeout(&idev->msg_complete,
- I2C_STOP_TIMEOUT);
+ time_left = wait_for_completion_timeout(&idev->msg_complete,
+ I2C_STOP_TIMEOUT);
i2c_int_disable(idev, int_mask);
- if (ret == 0)
+ if (time_left == 0)
return -ETIMEDOUT;
if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
@@ -454,6 +449,11 @@ static const struct i2c_algorithm axxia_i2c_algo = {
.functionality = axxia_i2c_func,
};
+static struct i2c_adapter_quirks axxia_i2c_quirks = {
+ .max_read_len = 255,
+ .max_write_len = 255,
+};
+
static int axxia_i2c_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -511,6 +511,7 @@ static int axxia_i2c_probe(struct platform_device *pdev)
strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
idev->adapter.owner = THIS_MODULE;
idev->adapter.algo = &axxia_i2c_algo;
+ idev->adapter.quirks = &axxia_i2c_quirks;
idev->adapter.dev.parent = &pdev->dev;
idev->adapter.dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index d3c89157b337..f9f2c2082151 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -160,14 +160,6 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 val;
unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC);
- /* need to reserve one byte in the FIFO for the slave address */
- if (msg->len > M_TX_RX_FIFO_SIZE - 1) {
- dev_err(iproc_i2c->device,
- "only support data length up to %u bytes\n",
- M_TX_RX_FIFO_SIZE - 1);
- return -EOPNOTSUPP;
- }
-
/* check if bus is busy */
if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
BIT(M_CMD_START_BUSY_SHIFT))) {
@@ -287,6 +279,12 @@ static const struct i2c_algorithm bcm_iproc_algo = {
.functionality = bcm_iproc_i2c_functionality,
};
+static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
+ /* need to reserve one byte in the FIFO for the slave address */
+ .max_read_len = M_TX_RX_FIFO_SIZE - 1,
+ .max_write_len = M_TX_RX_FIFO_SIZE - 1,
+};
+
static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
{
unsigned int bus_speed;
@@ -413,6 +411,7 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, iproc_i2c);
strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name));
adap->algo = &bcm_iproc_algo;
+ adap->quirks = &bcm_iproc_i2c_quirks;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 5d6feb937b9d..c9336a3202d5 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -147,7 +147,7 @@ static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
struct i2c_msg *msg)
{
u32 c;
- int time_left;
+ unsigned long time_left;
i2c_dev->msg_buf = msg->buf;
i2c_dev->msg_buf_remaining = msg->len;
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 7d7a14cdadfb..2ee78e099d30 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -475,7 +475,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
struct i2c_adapter *adap)
{
- int ret;
+ unsigned long time_left;
u32 reg;
id->p_msg = msg;
@@ -501,8 +501,8 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
cdns_i2c_msend(id);
/* Wait for the signal of completion */
- ret = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
- if (!ret) {
+ time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
+ if (time_left == 0) {
cdns_i2c_master_reset(adap);
dev_err(id->adap.dev.parent,
"timeout waiting on completion\n");
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 2d466538b2e2..714bdc837769 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -308,22 +308,12 @@ static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
struct i2c_reg __iomem *i2c_reg = cpm->i2c_reg;
struct i2c_ram __iomem *i2c_ram = cpm->i2c_ram;
struct i2c_msg *pmsg;
- int ret, i;
+ int ret;
int tptr;
int rptr;
cbd_t __iomem *tbdf;
cbd_t __iomem *rbdf;
- if (num > CPM_MAXBD)
- return -EINVAL;
-
- /* Check if we have any oversized READ requests */
- for (i = 0; i < num; i++) {
- pmsg = &msgs[i];
- if (pmsg->len >= CPM_MAX_READ)
- return -EINVAL;
- }
-
/* Reset to use first buffer */
out_be16(&i2c_ram->rbptr, in_be16(&i2c_ram->rbase));
out_be16(&i2c_ram->tbptr, in_be16(&i2c_ram->tbase));
@@ -424,10 +414,18 @@ static const struct i2c_algorithm cpm_i2c_algo = {
.functionality = cpm_i2c_func,
};
+/* CPM_MAX_READ is also limiting writes according to the code! */
+static struct i2c_adapter_quirks cpm_i2c_quirks = {
+ .max_num_msgs = CPM_MAXBD,
+ .max_read_len = CPM_MAX_READ,
+ .max_write_len = CPM_MAX_READ,
+};
+
static const struct i2c_adapter cpm_ops = {
.owner = THIS_MODULE,
.name = "i2c-cpm",
.algo = &cpm_i2c_algo,
+ .quirks = &cpm_i2c_quirks,
};
static int cpm_i2c_setup(struct cpm_i2c *cpm)
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 875c22ae5400..fa8dedd8c3a2 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -182,72 +182,41 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
const u16 bus_num = bus->remote_bus;
int request_len;
int response_len;
- u8 *request = NULL;
- u8 *response = NULL;
int result;
- struct cros_ec_command msg;
+ struct cros_ec_command msg = { };
request_len = ec_i2c_count_message(i2c_msgs, num);
if (request_len < 0) {
dev_warn(dev, "Error constructing message %d\n", request_len);
- result = request_len;
- goto exit;
+ return request_len;
}
+
response_len = ec_i2c_count_response(i2c_msgs, num);
if (response_len < 0) {
/* Unexpected; no errors should come when NULL response */
dev_warn(dev, "Error preparing response %d\n", response_len);
- result = response_len;
- goto exit;
- }
-
- if (request_len <= ARRAY_SIZE(bus->request_buf)) {
- request = bus->request_buf;
- } else {
- request = kzalloc(request_len, GFP_KERNEL);
- if (request == NULL) {
- result = -ENOMEM;
- goto exit;
- }
- }
- if (response_len <= ARRAY_SIZE(bus->response_buf)) {
- response = bus->response_buf;
- } else {
- response = kzalloc(response_len, GFP_KERNEL);
- if (response == NULL) {
- result = -ENOMEM;
- goto exit;
- }
+ return response_len;
}
- result = ec_i2c_construct_message(request, i2c_msgs, num, bus_num);
+ result = ec_i2c_construct_message(msg.outdata, i2c_msgs, num, bus_num);
if (result)
- goto exit;
+ return result;
msg.version = 0;
msg.command = EC_CMD_I2C_PASSTHRU;
- msg.outdata = request;
msg.outsize = request_len;
- msg.indata = response;
msg.insize = response_len;
result = cros_ec_cmd_xfer(bus->ec, &msg);
if (result < 0)
- goto exit;
+ return result;
- result = ec_i2c_parse_response(response, i2c_msgs, &num);
+ result = ec_i2c_parse_response(msg.indata, i2c_msgs, &num);
if (result < 0)
- goto exit;
+ return result;
/* Indicate success by saying how many messages were sent */
- result = num;
-exit:
- if (request != bus->request_buf)
- kfree(request);
- if (response != bus->response_buf)
- kfree(response);
-
- return result;
+ return num;
}
static u32 ec_i2c_functionality(struct i2c_adapter *adap)
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 6dc7ff5d3d9a..4788a32afb86 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -60,6 +60,12 @@
#define DAVINCI_I2C_IVR_REG 0x28
#define DAVINCI_I2C_EMDR_REG 0x2c
#define DAVINCI_I2C_PSC_REG 0x30
+#define DAVINCI_I2C_FUNC_REG 0x48
+#define DAVINCI_I2C_DIR_REG 0x4c
+#define DAVINCI_I2C_DIN_REG 0x50
+#define DAVINCI_I2C_DOUT_REG 0x54
+#define DAVINCI_I2C_DSET_REG 0x58
+#define DAVINCI_I2C_DCLR_REG 0x5c
#define DAVINCI_I2C_IVR_AAS 0x07
#define DAVINCI_I2C_IVR_SCD 0x06
@@ -93,6 +99,29 @@
#define DAVINCI_I2C_IMR_NACK BIT(1)
#define DAVINCI_I2C_IMR_AL BIT(0)
+/* set SDA and SCL as GPIO */
+#define DAVINCI_I2C_FUNC_PFUNC0 BIT(0)
+
+/* set SCL as output when used as GPIO*/
+#define DAVINCI_I2C_DIR_PDIR0 BIT(0)
+/* set SDA as output when used as GPIO*/
+#define DAVINCI_I2C_DIR_PDIR1 BIT(1)
+
+/* read SCL GPIO level */
+#define DAVINCI_I2C_DIN_PDIN0 BIT(0)
+/* read SDA GPIO level */
+#define DAVINCI_I2C_DIN_PDIN1 BIT(1)
+
+/*set the SCL GPIO high */
+#define DAVINCI_I2C_DSET_PDSET0 BIT(0)
+/*set the SDA GPIO high */
+#define DAVINCI_I2C_DSET_PDSET1 BIT(1)
+
+/* set the SCL GPIO low */
+#define DAVINCI_I2C_DCLR_PDCLR0 BIT(0)
+/* set the SDA GPIO low */
+#define DAVINCI_I2C_DCLR_PDCLR1 BIT(1)
+
struct davinci_i2c_dev {
struct device *dev;
void __iomem *base;
@@ -129,43 +158,6 @@ static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg)
return readw_relaxed(i2c_dev->base + reg);
}
-/* Generate a pulse on the i2c clock pin. */
-static void davinci_i2c_clock_pulse(unsigned int scl_pin)
-{
- u16 i;
-
- if (scl_pin) {
- /* Send high and low on the SCL line */
- for (i = 0; i < 9; i++) {
- gpio_set_value(scl_pin, 0);
- udelay(20);
- gpio_set_value(scl_pin, 1);
- udelay(20);
- }
- }
-}
-
-/* This routine does i2c bus recovery as specified in the
- * i2c protocol Rev. 03 section 3.16 titled "Bus clear"
- */
-static void davinci_i2c_recover_bus(struct davinci_i2c_dev *dev)
-{
- u32 flag = 0;
- struct davinci_i2c_platform_data *pdata = dev->pdata;
-
- dev_err(dev->dev, "initiating i2c bus recovery\n");
- /* Send NACK to the slave */
- flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
- flag |= DAVINCI_I2C_MDR_NACK;
- /* write the data into mode register */
- davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
- davinci_i2c_clock_pulse(pdata->scl_pin);
- /* Send STOP */
- flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
- flag |= DAVINCI_I2C_MDR_STP;
- davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
-}
-
static inline void davinci_i2c_reset_ctrl(struct davinci_i2c_dev *i2c_dev,
int val)
{
@@ -263,6 +255,99 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev)
}
/*
+ * This routine does i2c bus recovery by using i2c_generic_gpio_recovery
+ * which is provided by I2C Bus recovery infrastructure.
+ */
+static void davinci_i2c_prepare_recovery(struct i2c_adapter *adap)
+{
+ struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
+
+ /* Disable interrupts */
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, 0);
+
+ /* put I2C into reset */
+ davinci_i2c_reset_ctrl(dev, 0);
+}
+
+static void davinci_i2c_unprepare_recovery(struct i2c_adapter *adap)
+{
+ struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
+
+ i2c_davinci_init(dev);
+}
+
+static struct i2c_bus_recovery_info davinci_i2c_gpio_recovery_info = {
+ .recover_bus = i2c_generic_gpio_recovery,
+ .prepare_recovery = davinci_i2c_prepare_recovery,
+ .unprepare_recovery = davinci_i2c_unprepare_recovery,
+};
+
+static void davinci_i2c_set_scl(struct i2c_adapter *adap, int val)
+{
+ struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
+
+ if (val)
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_DSET_REG,
+ DAVINCI_I2C_DSET_PDSET0);
+ else
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_DCLR_REG,
+ DAVINCI_I2C_DCLR_PDCLR0);
+}
+
+static int davinci_i2c_get_scl(struct i2c_adapter *adap)
+{
+ struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
+ int val;
+
+ /* read the state of SCL */
+ val = davinci_i2c_read_reg(dev, DAVINCI_I2C_DIN_REG);
+ return val & DAVINCI_I2C_DIN_PDIN0;
+}
+
+static int davinci_i2c_get_sda(struct i2c_adapter *adap)
+{
+ struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
+ int val;
+
+ /* read the state of SDA */
+ val = davinci_i2c_read_reg(dev, DAVINCI_I2C_DIN_REG);
+ return val & DAVINCI_I2C_DIN_PDIN1;
+}
+
+static void davinci_i2c_scl_prepare_recovery(struct i2c_adapter *adap)
+{
+ struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
+
+ davinci_i2c_prepare_recovery(adap);
+
+ /* SCL output, SDA input */
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_DIR_REG, DAVINCI_I2C_DIR_PDIR0);
+
+ /* change to GPIO mode */
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_FUNC_REG,
+ DAVINCI_I2C_FUNC_PFUNC0);
+}
+
+static void davinci_i2c_scl_unprepare_recovery(struct i2c_adapter *adap)
+{
+ struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
+
+ /* change back to I2C mode */
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_FUNC_REG, 0);
+
+ davinci_i2c_unprepare_recovery(adap);
+}
+
+static struct i2c_bus_recovery_info davinci_i2c_scl_recovery_info = {
+ .recover_bus = i2c_generic_scl_recovery,
+ .set_scl = davinci_i2c_set_scl,
+ .get_scl = davinci_i2c_get_scl,
+ .get_sda = davinci_i2c_get_sda,
+ .prepare_recovery = davinci_i2c_scl_prepare_recovery,
+ .unprepare_recovery = davinci_i2c_scl_unprepare_recovery,
+};
+
+/*
* Waiting for bus not busy
*/
static int i2c_davinci_wait_bus_not_busy(struct davinci_i2c_dev *dev,
@@ -282,8 +367,7 @@ static int i2c_davinci_wait_bus_not_busy(struct davinci_i2c_dev *dev,
return -ETIMEDOUT;
} else {
to_cnt = 0;
- davinci_i2c_recover_bus(dev);
- i2c_davinci_init(dev);
+ i2c_recover_bus(&dev->adapter);
}
}
if (allow_sleep)
@@ -304,7 +388,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
struct davinci_i2c_platform_data *pdata = dev->pdata;
u32 flag;
u16 w;
- int r;
+ unsigned long time_left;
/* Introduce a delay, required for some boards (e.g Davinci EVM) */
if (pdata->bus_delay)
@@ -368,11 +452,11 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
flag |= DAVINCI_I2C_MDR_STP;
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
- r = wait_for_completion_timeout(&dev->cmd_complete, dev->adapter.timeout);
- if (r == 0) {
+ time_left = wait_for_completion_timeout(&dev->cmd_complete,
+ dev->adapter.timeout);
+ if (!time_left) {
dev_err(dev->dev, "controller timed out\n");
- davinci_i2c_recover_bus(dev);
- i2c_davinci_init(dev);
+ i2c_recover_bus(adap);
dev->buf_len = 0;
return -ETIMEDOUT;
}
@@ -380,17 +464,13 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
/* This should be 0 if all bytes were transferred
* or dev->cmd_err denotes an error.
*/
- if (r >= 0) {
- dev_err(dev->dev, "abnormal termination buf_len=%i\n",
- dev->buf_len);
- r = -EREMOTEIO;
- }
+ dev_err(dev->dev, "abnormal termination buf_len=%i\n",
+ dev->buf_len);
dev->terminate = 1;
wmb();
dev->buf_len = 0;
+ return -EREMOTEIO;
}
- if (r < 0)
- return r;
/* no error */
if (likely(!dev->cmd_err))
@@ -674,6 +754,10 @@ static int davinci_i2c_probe(struct platform_device *pdev)
if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&prop))
dev->pdata->bus_freq = prop / 1000;
+
+ dev->pdata->has_pfunc =
+ of_property_read_bool(pdev->dev.of_node,
+ "ti,has-pfunc");
} else if (!dev->pdata) {
dev->pdata = &davinci_i2c_platform_data_default;
}
@@ -715,6 +799,14 @@ static int davinci_i2c_probe(struct platform_device *pdev)
adap->timeout = DAVINCI_I2C_TIMEOUT;
adap->dev.of_node = pdev->dev.of_node;
+ if (dev->pdata->has_pfunc)
+ adap->bus_recovery_info = &davinci_i2c_scl_recovery_info;
+ else if (dev->pdata->scl_pin) {
+ adap->bus_recovery_info = &davinci_i2c_gpio_recovery_info;
+ adap->bus_recovery_info->scl_gpio = dev->pdata->scl_pin;
+ adap->bus_recovery_info->sda_gpio = dev->pdata->sda_pin;
+ }
+
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
if (r) {
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 6e25c010e690..6f19a33773fe 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -656,8 +656,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
i2c_dw_xfer_init(dev);
/* wait for tx to complete */
- ret = wait_for_completion_timeout(&dev->cmd_complete, HZ);
- if (ret == 0) {
+ if (!wait_for_completion_timeout(&dev->cmd_complete, HZ)) {
dev_err(dev->dev, "controller timed out\n");
/* i2c_dw_init implicitly disables the adapter */
i2c_dw_init(dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index c270f5f9a8f9..0a80e4aabaed 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -143,10 +143,8 @@ static int dw_i2c_probe(struct platform_device *pdev)
u32 clk_freq, ht = 0;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
- return irq; /* -ENXIO */
- }
+ if (irq < 0)
+ return irq;
dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
if (!dev)
@@ -166,7 +164,7 @@ static int dw_i2c_probe(struct platform_device *pdev)
/* fast mode by default because of legacy reasons */
clk_freq = 400000;
- if (ACPI_COMPANION(&pdev->dev)) {
+ if (has_acpi_companion(&pdev->dev)) {
dw_i2c_acpi_configure(pdev);
} else if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
@@ -286,7 +284,7 @@ static int dw_i2c_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (ACPI_COMPANION(&pdev->dev))
+ if (has_acpi_companion(&pdev->dev))
dw_i2c_acpi_unconfigure(pdev);
return 0;
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
new file mode 100644
index 000000000000..9604024e0eb0
--- /dev/null
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -0,0 +1,384 @@
+/*
+ * I2C bus driver for Conexant Digicolor SoCs
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2015 Paradox Innovation Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define DEFAULT_FREQ 100000
+#define TIMEOUT_MS 100
+
+#define II_CONTROL 0x0
+#define II_CONTROL_LOCAL_RESET BIT(0)
+
+#define II_CLOCKTIME 0x1
+
+#define II_COMMAND 0x2
+#define II_CMD_START 1
+#define II_CMD_RESTART 2
+#define II_CMD_SEND_ACK 3
+#define II_CMD_GET_ACK 6
+#define II_CMD_GET_NOACK 7
+#define II_CMD_STOP 10
+#define II_COMMAND_GO BIT(7)
+#define II_COMMAND_COMPLETION_STATUS(r) (((r) >> 5) & 3)
+#define II_CMD_STATUS_NORMAL 0
+#define II_CMD_STATUS_ACK_GOOD 1
+#define II_CMD_STATUS_ACK_BAD 2
+#define II_CMD_STATUS_ABORT 3
+
+#define II_DATA 0x3
+#define II_INTFLAG_CLEAR 0x8
+#define II_INTENABLE 0xa
+
+struct dc_i2c {
+ struct i2c_adapter adap;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned int frequency;
+
+ struct i2c_msg *msg;
+ unsigned int msgbuf_ptr;
+ int last;
+ spinlock_t lock;
+ struct completion done;
+ int state;
+ int error;
+};
+
+enum {
+ STATE_IDLE,
+ STATE_START,
+ STATE_ADDR,
+ STATE_WRITE,
+ STATE_READ,
+ STATE_STOP,
+};
+
+static void dc_i2c_cmd(struct dc_i2c *i2c, u8 cmd)
+{
+ writeb_relaxed(cmd | II_COMMAND_GO, i2c->regs + II_COMMAND);
+}
+
+static u8 dc_i2c_addr_cmd(struct i2c_msg *msg)
+{
+ u8 addr = (msg->addr & 0x7f) << 1;
+
+ if (msg->flags & I2C_M_RD)
+ addr |= 1;
+
+ return addr;
+}
+
+static void dc_i2c_data(struct dc_i2c *i2c, u8 data)
+{
+ writeb_relaxed(data, i2c->regs + II_DATA);
+}
+
+static void dc_i2c_write_byte(struct dc_i2c *i2c, u8 byte)
+{
+ dc_i2c_data(i2c, byte);
+ dc_i2c_cmd(i2c, II_CMD_SEND_ACK);
+}
+
+static void dc_i2c_write_buf(struct dc_i2c *i2c)
+{
+ dc_i2c_write_byte(i2c, i2c->msg->buf[i2c->msgbuf_ptr++]);
+}
+
+static void dc_i2c_next_read(struct dc_i2c *i2c)
+{
+ bool last = (i2c->msgbuf_ptr + 1 == i2c->msg->len);
+
+ dc_i2c_cmd(i2c, last ? II_CMD_GET_NOACK : II_CMD_GET_ACK);
+}
+
+static void dc_i2c_stop(struct dc_i2c *i2c)
+{
+ i2c->state = STATE_STOP;
+ if (i2c->last)
+ dc_i2c_cmd(i2c, II_CMD_STOP);
+ else
+ complete(&i2c->done);
+}
+
+static u8 dc_i2c_read_byte(struct dc_i2c *i2c)
+{
+ return readb_relaxed(i2c->regs + II_DATA);
+}
+
+static void dc_i2c_read_buf(struct dc_i2c *i2c)
+{
+ i2c->msg->buf[i2c->msgbuf_ptr++] = dc_i2c_read_byte(i2c);
+ dc_i2c_next_read(i2c);
+}
+
+static void dc_i2c_set_irq(struct dc_i2c *i2c, int enable)
+{
+ if (enable)
+ writeb_relaxed(1, i2c->regs + II_INTFLAG_CLEAR);
+ writeb_relaxed(!!enable, i2c->regs + II_INTENABLE);
+}
+
+static int dc_i2c_cmd_status(struct dc_i2c *i2c)
+{
+ u8 cmd = readb_relaxed(i2c->regs + II_COMMAND);
+
+ return II_COMMAND_COMPLETION_STATUS(cmd);
+}
+
+static void dc_i2c_start_msg(struct dc_i2c *i2c, int first)
+{
+ struct i2c_msg *msg = i2c->msg;
+
+ if (!(msg->flags & I2C_M_NOSTART)) {
+ i2c->state = STATE_START;
+ dc_i2c_cmd(i2c, first ? II_CMD_START : II_CMD_RESTART);
+ } else if (msg->flags & I2C_M_RD) {
+ i2c->state = STATE_READ;
+ dc_i2c_next_read(i2c);
+ } else {
+ i2c->state = STATE_WRITE;
+ dc_i2c_write_buf(i2c);
+ }
+}
+
+static irqreturn_t dc_i2c_irq(int irq, void *dev_id)
+{
+ struct dc_i2c *i2c = dev_id;
+ int cmd_status = dc_i2c_cmd_status(i2c);
+ unsigned long flags;
+ u8 addr_cmd;
+
+ writeb_relaxed(1, i2c->regs + II_INTFLAG_CLEAR);
+
+ spin_lock_irqsave(&i2c->lock, flags);
+
+ if (cmd_status == II_CMD_STATUS_ACK_BAD
+ || cmd_status == II_CMD_STATUS_ABORT) {
+ i2c->error = -EIO;
+ complete(&i2c->done);
+ goto out;
+ }
+
+ switch (i2c->state) {
+ case STATE_START:
+ addr_cmd = dc_i2c_addr_cmd(i2c->msg);
+ dc_i2c_write_byte(i2c, addr_cmd);
+ i2c->state = STATE_ADDR;
+ break;
+ case STATE_ADDR:
+ if (i2c->msg->flags & I2C_M_RD) {
+ dc_i2c_next_read(i2c);
+ i2c->state = STATE_READ;
+ break;
+ }
+ i2c->state = STATE_WRITE;
+ /* fall through */
+ case STATE_WRITE:
+ if (i2c->msgbuf_ptr < i2c->msg->len)
+ dc_i2c_write_buf(i2c);
+ else
+ dc_i2c_stop(i2c);
+ break;
+ case STATE_READ:
+ if (i2c->msgbuf_ptr < i2c->msg->len)
+ dc_i2c_read_buf(i2c);
+ else
+ dc_i2c_stop(i2c);
+ break;
+ case STATE_STOP:
+ i2c->state = STATE_IDLE;
+ complete(&i2c->done);
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&i2c->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int dc_i2c_xfer_msg(struct dc_i2c *i2c, struct i2c_msg *msg, int first,
+ int last)
+{
+ unsigned long timeout = msecs_to_jiffies(TIMEOUT_MS);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c->lock, flags);
+ i2c->msg = msg;
+ i2c->msgbuf_ptr = 0;
+ i2c->last = last;
+ i2c->error = 0;
+
+ reinit_completion(&i2c->done);
+ dc_i2c_set_irq(i2c, 1);
+ dc_i2c_start_msg(i2c, first);
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ timeout = wait_for_completion_timeout(&i2c->done, timeout);
+ dc_i2c_set_irq(i2c, 0);
+ if (timeout == 0) {
+ i2c->state = STATE_IDLE;
+ return -ETIMEDOUT;
+ }
+
+ if (i2c->error)
+ return i2c->error;
+
+ return 0;
+}
+
+static int dc_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct dc_i2c *i2c = adap->algo_data;
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ ret = dc_i2c_xfer_msg(i2c, &msgs[i], i == 0, i == num - 1);
+ if (ret)
+ return ret;
+ }
+
+ return num;
+}
+
+static int dc_i2c_init_hw(struct dc_i2c *i2c)
+{
+ unsigned long clk_rate = clk_get_rate(i2c->clk);
+ unsigned int clocktime;
+
+ writeb_relaxed(II_CONTROL_LOCAL_RESET, i2c->regs + II_CONTROL);
+ udelay(100);
+ writeb_relaxed(0, i2c->regs + II_CONTROL);
+ udelay(100);
+
+ clocktime = DIV_ROUND_UP(clk_rate, 64 * i2c->frequency);
+ if (clocktime < 1 || clocktime > 0xff) {
+ dev_err(i2c->dev, "can't set bus speed of %u Hz\n",
+ i2c->frequency);
+ return -EINVAL;
+ }
+ writeb_relaxed(clocktime - 1, i2c->regs + II_CLOCKTIME);
+
+ return 0;
+}
+
+static u32 dc_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART;
+}
+
+static const struct i2c_algorithm dc_i2c_algorithm = {
+ .master_xfer = dc_i2c_xfer,
+ .functionality = dc_i2c_func,
+};
+
+static int dc_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct dc_i2c *i2c;
+ struct resource *r;
+ int ret = 0, irq;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(struct dc_i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &i2c->frequency))
+ i2c->frequency = DEFAULT_FREQ;
+
+ i2c->dev = &pdev->dev;
+ platform_set_drvdata(pdev, i2c);
+
+ spin_lock_init(&i2c->lock);
+ init_completion(&i2c->done);
+
+ i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c->clk))
+ return PTR_ERR(i2c->clk);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(i2c->regs))
+ return PTR_ERR(i2c->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, dc_i2c_irq, 0,
+ dev_name(&pdev->dev), i2c);
+ if (ret < 0)
+ return ret;
+
+ strlcpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
+ sizeof(i2c->adap.name));
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.algo = &dc_i2c_algorithm;
+ i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = np;
+ i2c->adap.algo_data = i2c;
+
+ ret = dc_i2c_init_hw(i2c);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret < 0) {
+ clk_unprepare(i2c->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dc_i2c_remove(struct platform_device *pdev)
+{
+ struct dc_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adap);
+ clk_disable_unprepare(i2c->clk);
+
+ return 0;
+}
+
+static const struct of_device_id dc_i2c_match[] = {
+ { .compatible = "cnxt,cx92755-i2c" },
+ { },
+};
+
+static struct platform_driver dc_i2c_driver = {
+ .probe = dc_i2c_probe,
+ .remove = dc_i2c_remove,
+ .driver = {
+ .name = "digicolor-i2c",
+ .of_match_table = dc_i2c_match,
+ },
+};
+module_platform_driver(dc_i2c_driver);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Conexant Digicolor I2C master driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index b3fb86af4cbb..1600edd57ce9 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -144,7 +144,6 @@ static int dln2_i2c_xfer(struct i2c_adapter *adapter,
{
struct dln2_i2c *dln2 = i2c_get_adapdata(adapter);
struct i2c_msg *pmsg;
- struct device *dev = &dln2->adapter.dev;
int i;
for (i = 0; i < num; i++) {
@@ -152,11 +151,6 @@ static int dln2_i2c_xfer(struct i2c_adapter *adapter,
pmsg = &msgs[i];
- if (pmsg->len > DLN2_I2C_MAX_XFER_SIZE) {
- dev_warn(dev, "maximum transfer size exceeded\n");
- return -EOPNOTSUPP;
- }
-
if (pmsg->flags & I2C_M_RD) {
ret = dln2_i2c_read(dln2, pmsg->addr, pmsg->buf,
pmsg->len);
@@ -187,6 +181,11 @@ static const struct i2c_algorithm dln2_i2c_usb_algorithm = {
.functionality = dln2_i2c_func,
};
+static struct i2c_adapter_quirks dln2_i2c_quirks = {
+ .max_read_len = DLN2_I2C_MAX_XFER_SIZE,
+ .max_write_len = DLN2_I2C_MAX_XFER_SIZE,
+};
+
static int dln2_i2c_probe(struct platform_device *pdev)
{
int ret;
@@ -209,7 +208,9 @@ static int dln2_i2c_probe(struct platform_device *pdev)
dln2->adapter.owner = THIS_MODULE;
dln2->adapter.class = I2C_CLASS_HWMON;
dln2->adapter.algo = &dln2_i2c_usb_algorithm;
+ dln2->adapter.quirks = &dln2_i2c_quirks;
dln2->adapter.dev.parent = dev;
+ dln2->adapter.dev.of_node = dev->of_node;
i2c_set_adapdata(&dln2->adapter, dln2);
snprintf(dln2->adapter.name, sizeof(dln2->adapter.name), "%s-%s-%d",
"dln2-i2c", dev_name(pdev->dev.parent), dln2->port);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 8fafb254e42a..5ecbb3fdc27e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -223,8 +223,6 @@ struct i801_priv {
#endif
};
-static struct pci_driver i801_driver;
-
#define FEATURE_SMBUS_PEC (1 << 0)
#define FEATURE_BLOCK_BUFFER (1 << 1)
#define FEATURE_BLOCK_PROC (1 << 2)
@@ -1140,7 +1138,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
int err, i;
struct i801_priv *priv;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -1182,34 +1180,35 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
priv->features &= ~disable_features;
- err = pci_enable_device(dev);
+ err = pcim_enable_device(dev);
if (err) {
dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
err);
- goto exit;
+ return err;
}
+ pcim_pin_device(dev);
/* Determine the address of the SMBus area */
priv->smba = pci_resource_start(dev, SMBBAR);
if (!priv->smba) {
- dev_err(&dev->dev, "SMBus base address uninitialized, "
- "upgrade BIOS\n");
- err = -ENODEV;
- goto exit;
+ dev_err(&dev->dev,
+ "SMBus base address uninitialized, upgrade BIOS\n");
+ return -ENODEV;
}
err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
if (err) {
- err = -ENODEV;
- goto exit;
+ return -ENODEV;
}
- err = pci_request_region(dev, SMBBAR, i801_driver.name);
+ err = pcim_iomap_regions(dev, 1 << SMBBAR,
+ dev_driver_string(&dev->dev));
if (err) {
- dev_err(&dev->dev, "Failed to request SMBus region "
- "0x%lx-0x%Lx\n", priv->smba,
+ dev_err(&dev->dev,
+ "Failed to request SMBus region 0x%lx-0x%Lx\n",
+ priv->smba,
(unsigned long long)pci_resource_end(dev, SMBBAR));
- goto exit;
+ return err;
}
pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp);
@@ -1254,8 +1253,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (priv->features & FEATURE_IRQ) {
init_waitqueue_head(&priv->waitq);
- err = request_irq(dev->irq, i801_isr, IRQF_SHARED,
- i801_driver.name, priv);
+ err = devm_request_irq(&dev->dev, dev->irq, i801_isr,
+ IRQF_SHARED,
+ dev_driver_string(&dev->dev), priv);
if (err) {
dev_err(&dev->dev, "Failed to allocate irq %d: %d\n",
dev->irq, err);
@@ -1276,7 +1276,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
err = i2c_add_adapter(&priv->adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
- goto exit_free_irq;
+ return err;
}
i801_probe_optional_slaves(priv);
@@ -1286,14 +1286,6 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_drvdata(dev, priv);
return 0;
-
-exit_free_irq:
- if (priv->features & FEATURE_IRQ)
- free_irq(dev->irq, priv);
- pci_release_region(dev, SMBBAR);
-exit:
- kfree(priv);
- return err;
}
static void i801_remove(struct pci_dev *dev)
@@ -1304,11 +1296,6 @@ static void i801_remove(struct pci_dev *dev)
i2c_del_adapter(&priv->adapter);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
- if (priv->features & FEATURE_IRQ)
- free_irq(dev->irq, priv);
- pci_release_region(dev, SMBBAR);
-
- kfree(priv);
/*
* do not call pci_disable_device(dev) since it can cause hard hangs on
* some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
@@ -1330,7 +1317,7 @@ static int i801_resume(struct pci_dev *dev)
{
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
- return pci_enable_device(dev);
+ return 0;
}
#else
#define i801_suspend NULL
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 0fcc1694c607..00ffd6613680 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -988,15 +988,16 @@ out:
static int img_i2c_reset_bus(struct img_i2c *i2c)
{
unsigned long flags;
- int ret;
+ unsigned long time_left;
spin_lock_irqsave(&i2c->lock, flags);
reinit_completion(&i2c->msg_complete);
img_i2c_reset_start(i2c);
spin_unlock_irqrestore(&i2c->lock, flags);
- ret = wait_for_completion_timeout(&i2c->msg_complete, IMG_I2C_TIMEOUT);
- if (ret == 0)
+ time_left = wait_for_completion_timeout(&i2c->msg_complete,
+ IMG_I2C_TIMEOUT);
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
}
@@ -1007,6 +1008,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
struct img_i2c *i2c = i2c_get_adapdata(adap);
bool atomic = false;
int i, ret;
+ unsigned long time_left;
if (i2c->mode == MODE_SUSPEND) {
WARN(1, "refusing to service transaction in suspended state\n");
@@ -1068,11 +1070,11 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
img_i2c_write(i2c);
spin_unlock_irqrestore(&i2c->lock, flags);
- ret = wait_for_completion_timeout(&i2c->msg_complete,
- IMG_I2C_TIMEOUT);
+ time_left = wait_for_completion_timeout(&i2c->msg_complete,
+ IMG_I2C_TIMEOUT);
del_timer_sync(&i2c->check_timer);
- if (ret == 0) {
+ if (time_left == 0) {
dev_err(adap->dev.parent, "i2c transfer timed out\n");
i2c->msg_status = -ETIMEDOUT;
break;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index d7b26fc6f432..a53a7dd66945 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -601,6 +601,7 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
struct i2c_msg *msgs)
{
int result;
+ unsigned long time_left;
unsigned int temp = 0;
unsigned long orig_jiffies = jiffies;
struct imx_i2c_dma *dma = i2c_imx->dma;
@@ -624,10 +625,10 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
*/
imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
reinit_completion(&i2c_imx->dma->cmd_complete);
- result = wait_for_completion_timeout(
+ time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
- if (result == 0) {
+ if (time_left == 0) {
dmaengine_terminate_all(dma->chan_using);
return -ETIMEDOUT;
}
@@ -663,6 +664,7 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
struct i2c_msg *msgs, bool is_lastmsg)
{
int result;
+ unsigned long time_left;
unsigned int temp;
unsigned long orig_jiffies = jiffies;
struct imx_i2c_dma *dma = i2c_imx->dma;
@@ -682,10 +684,10 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
return result;
reinit_completion(&i2c_imx->dma->cmd_complete);
- result = wait_for_completion_timeout(
+ time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
- if (result == 0) {
+ if (time_left == 0) {
dmaengine_terminate_all(dma->chan_using);
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index f2b0ff011631..f994712d0904 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -380,6 +380,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
int size, union i2c_smbus_data *data)
{
int ret;
+ unsigned long time_left;
dma_addr_t dma_addr = 0; /* address of the data buffer */
u8 dma_size = 0;
enum dma_data_direction dma_direction = 0;
@@ -578,13 +579,13 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
ismt_submit_desc(priv);
/* Now we wait for interrupt completion, 1s */
- ret = wait_for_completion_timeout(&priv->cmp, HZ*1);
+ time_left = wait_for_completion_timeout(&priv->cmp, HZ*1);
/* unmap the data buffer */
if (dma_size != 0)
dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
- if (unlikely(!ret)) {
+ if (unlikely(!time_left)) {
dev_err(dev, "completion wait timed out\n");
ret = -ETIMEDOUT;
goto out;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
new file mode 100644
index 000000000000..19b2d689a5ef
--- /dev/null
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -0,0 +1,833 @@
+/*
+ * Ingenic JZ4780 I2C bus driver
+ *
+ * Copyright (C) 2006 - 2009 Ingenic Semiconductor Inc.
+ * Copyright (C) 2015 Imagination Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+
+#define JZ4780_I2C_CTRL 0x00
+#define JZ4780_I2C_TAR 0x04
+#define JZ4780_I2C_SAR 0x08
+#define JZ4780_I2C_DC 0x10
+#define JZ4780_I2C_SHCNT 0x14
+#define JZ4780_I2C_SLCNT 0x18
+#define JZ4780_I2C_FHCNT 0x1C
+#define JZ4780_I2C_FLCNT 0x20
+#define JZ4780_I2C_INTST 0x2C
+#define JZ4780_I2C_INTM 0x30
+#define JZ4780_I2C_RXTL 0x38
+#define JZ4780_I2C_TXTL 0x3C
+#define JZ4780_I2C_CINTR 0x40
+#define JZ4780_I2C_CRXUF 0x44
+#define JZ4780_I2C_CRXOF 0x48
+#define JZ4780_I2C_CTXOF 0x4C
+#define JZ4780_I2C_CRXREQ 0x50
+#define JZ4780_I2C_CTXABRT 0x54
+#define JZ4780_I2C_CRXDONE 0x58
+#define JZ4780_I2C_CACT 0x5C
+#define JZ4780_I2C_CSTP 0x60
+#define JZ4780_I2C_CSTT 0x64
+#define JZ4780_I2C_CGC 0x68
+#define JZ4780_I2C_ENB 0x6C
+#define JZ4780_I2C_STA 0x70
+#define JZ4780_I2C_TXABRT 0x80
+#define JZ4780_I2C_DMACR 0x88
+#define JZ4780_I2C_DMATDLR 0x8C
+#define JZ4780_I2C_DMARDLR 0x90
+#define JZ4780_I2C_SDASU 0x94
+#define JZ4780_I2C_ACKGC 0x98
+#define JZ4780_I2C_ENSTA 0x9C
+#define JZ4780_I2C_SDAHD 0xD0
+
+#define JZ4780_I2C_CTRL_STPHLD BIT(7)
+#define JZ4780_I2C_CTRL_SLVDIS BIT(6)
+#define JZ4780_I2C_CTRL_REST BIT(5)
+#define JZ4780_I2C_CTRL_MATP BIT(4)
+#define JZ4780_I2C_CTRL_SATP BIT(3)
+#define JZ4780_I2C_CTRL_SPDF BIT(2)
+#define JZ4780_I2C_CTRL_SPDS BIT(1)
+#define JZ4780_I2C_CTRL_MD BIT(0)
+
+#define JZ4780_I2C_STA_SLVACT BIT(6)
+#define JZ4780_I2C_STA_MSTACT BIT(5)
+#define JZ4780_I2C_STA_RFF BIT(4)
+#define JZ4780_I2C_STA_RFNE BIT(3)
+#define JZ4780_I2C_STA_TFE BIT(2)
+#define JZ4780_I2C_STA_TFNF BIT(1)
+#define JZ4780_I2C_STA_ACT BIT(0)
+
+static const char * const jz4780_i2c_abrt_src[] = {
+ "ABRT_7B_ADDR_NOACK",
+ "ABRT_10ADDR1_NOACK",
+ "ABRT_10ADDR2_NOACK",
+ "ABRT_XDATA_NOACK",
+ "ABRT_GCALL_NOACK",
+ "ABRT_GCALL_READ",
+ "ABRT_HS_ACKD",
+ "SBYTE_ACKDET",
+ "ABRT_HS_NORSTRT",
+ "SBYTE_NORSTRT",
+ "ABRT_10B_RD_NORSTRT",
+ "ABRT_MASTER_DIS",
+ "ARB_LOST",
+ "SLVFLUSH_TXFIFO",
+ "SLV_ARBLOST",
+ "SLVRD_INTX",
+};
+
+#define JZ4780_I2C_INTST_IGC BIT(11)
+#define JZ4780_I2C_INTST_ISTT BIT(10)
+#define JZ4780_I2C_INTST_ISTP BIT(9)
+#define JZ4780_I2C_INTST_IACT BIT(8)
+#define JZ4780_I2C_INTST_RXDN BIT(7)
+#define JZ4780_I2C_INTST_TXABT BIT(6)
+#define JZ4780_I2C_INTST_RDREQ BIT(5)
+#define JZ4780_I2C_INTST_TXEMP BIT(4)
+#define JZ4780_I2C_INTST_TXOF BIT(3)
+#define JZ4780_I2C_INTST_RXFL BIT(2)
+#define JZ4780_I2C_INTST_RXOF BIT(1)
+#define JZ4780_I2C_INTST_RXUF BIT(0)
+
+#define JZ4780_I2C_INTM_MIGC BIT(11)
+#define JZ4780_I2C_INTM_MISTT BIT(10)
+#define JZ4780_I2C_INTM_MISTP BIT(9)
+#define JZ4780_I2C_INTM_MIACT BIT(8)
+#define JZ4780_I2C_INTM_MRXDN BIT(7)
+#define JZ4780_I2C_INTM_MTXABT BIT(6)
+#define JZ4780_I2C_INTM_MRDREQ BIT(5)
+#define JZ4780_I2C_INTM_MTXEMP BIT(4)
+#define JZ4780_I2C_INTM_MTXOF BIT(3)
+#define JZ4780_I2C_INTM_MRXFL BIT(2)
+#define JZ4780_I2C_INTM_MRXOF BIT(1)
+#define JZ4780_I2C_INTM_MRXUF BIT(0)
+
+#define JZ4780_I2C_DC_READ BIT(8)
+
+#define JZ4780_I2C_SDAHD_HDENB BIT(8)
+
+#define JZ4780_I2C_ENB_I2C BIT(0)
+
+#define JZ4780_I2CSHCNT_ADJUST(n) (((n) - 8) < 6 ? 6 : ((n) - 8))
+#define JZ4780_I2CSLCNT_ADJUST(n) (((n) - 1) < 8 ? 8 : ((n) - 1))
+#define JZ4780_I2CFHCNT_ADJUST(n) (((n) - 8) < 6 ? 6 : ((n) - 8))
+#define JZ4780_I2CFLCNT_ADJUST(n) (((n) - 1) < 8 ? 8 : ((n) - 1))
+
+#define JZ4780_I2C_FIFO_LEN 16
+#define TX_LEVEL 3
+#define RX_LEVEL (JZ4780_I2C_FIFO_LEN - TX_LEVEL - 1)
+
+#define JZ4780_I2C_TIMEOUT 300
+
+#define BUFSIZE 200
+
+struct jz4780_i2c {
+ void __iomem *iomem;
+ int irq;
+ struct clk *clk;
+ struct i2c_adapter adap;
+
+ /* lock to protect rbuf and wbuf between xfer_rd/wr and irq handler */
+ spinlock_t lock;
+
+ /* beginning of lock scope */
+ unsigned char *rbuf;
+ int rd_total_len;
+ int rd_data_xfered;
+ int rd_cmd_xfered;
+
+ unsigned char *wbuf;
+ int wt_len;
+
+ int is_write;
+ int stop_hold;
+ int speed;
+
+ int data_buf[BUFSIZE];
+ int cmd_buf[BUFSIZE];
+ int cmd;
+
+ /* end of lock scope */
+ struct completion trans_waitq;
+};
+
+static inline unsigned short jz4780_i2c_readw(struct jz4780_i2c *i2c,
+ unsigned long offset)
+{
+ return readw(i2c->iomem + offset);
+}
+
+static inline void jz4780_i2c_writew(struct jz4780_i2c *i2c,
+ unsigned long offset, unsigned short val)
+{
+ writew(val, i2c->iomem + offset);
+}
+
+static int jz4780_i2c_disable(struct jz4780_i2c *i2c)
+{
+ unsigned short regval;
+ unsigned long loops = 5;
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_ENB, 0);
+
+ do {
+ regval = jz4780_i2c_readw(i2c, JZ4780_I2C_ENSTA);
+ if (!(regval & JZ4780_I2C_ENB_I2C))
+ return 0;
+
+ usleep_range(5000, 15000);
+ } while (--loops);
+
+ dev_err(&i2c->adap.dev, "disable failed: ENSTA=0x%04x\n", regval);
+ return -ETIMEDOUT;
+}
+
+static int jz4780_i2c_enable(struct jz4780_i2c *i2c)
+{
+ unsigned short regval;
+ unsigned long loops = 5;
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_ENB, 1);
+
+ do {
+ regval = jz4780_i2c_readw(i2c, JZ4780_I2C_ENSTA);
+ if (regval & JZ4780_I2C_ENB_I2C)
+ return 0;
+
+ usleep_range(5000, 15000);
+ } while (--loops);
+
+ dev_err(&i2c->adap.dev, "enable failed: ENSTA=0x%04x\n", regval);
+ return -ETIMEDOUT;
+}
+
+static int jz4780_i2c_set_target(struct jz4780_i2c *i2c, unsigned char address)
+{
+ unsigned short regval;
+ unsigned long loops = 5;
+
+ do {
+ regval = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
+ if ((regval & JZ4780_I2C_STA_TFE) &&
+ !(regval & JZ4780_I2C_STA_MSTACT))
+ break;
+
+ usleep_range(5000, 15000);
+ } while (--loops);
+
+ if (loops) {
+ jz4780_i2c_writew(i2c, JZ4780_I2C_TAR, address);
+ return 0;
+ }
+
+ dev_err(&i2c->adap.dev,
+ "set device to address 0x%02x failed, STA=0x%04x\n",
+ address, regval);
+
+ return -ENXIO;
+}
+
+static int jz4780_i2c_set_speed(struct jz4780_i2c *i2c)
+{
+ int dev_clk_khz = clk_get_rate(i2c->clk) / 1000;
+ int cnt_high = 0; /* HIGH period count of the SCL clock */
+ int cnt_low = 0; /* LOW period count of the SCL clock */
+ int cnt_period = 0; /* period count of the SCL clock */
+ int setup_time = 0;
+ int hold_time = 0;
+ unsigned short tmp = 0;
+ int i2c_clk = i2c->speed;
+
+ if (jz4780_i2c_disable(i2c))
+ dev_dbg(&i2c->adap.dev, "i2c not disabled\n");
+
+ /*
+ * 1 JZ4780_I2C cycle equals to cnt_period PCLK(i2c_clk)
+ * standard mode, min LOW and HIGH period are 4700 ns and 4000 ns
+ * fast mode, min LOW and HIGH period are 1300 ns and 600 ns
+ */
+ cnt_period = dev_clk_khz / i2c_clk;
+
+ if (i2c_clk <= 100)
+ cnt_high = (cnt_period * 4000) / (4700 + 4000);
+ else
+ cnt_high = (cnt_period * 600) / (1300 + 600);
+
+ cnt_low = cnt_period - cnt_high;
+
+ /*
+ * NOTE: JZ4780_I2C_CTRL_REST can't set when i2c enabled, because
+ * normal read are 2 messages, we cannot disable i2c controller
+ * between these two messages, this means that we must always set
+ * JZ4780_I2C_CTRL_REST when init JZ4780_I2C_CTRL
+ *
+ */
+ if (i2c_clk <= 100) {
+ tmp = JZ4780_I2C_CTRL_SPDS | JZ4780_I2C_CTRL_REST
+ | JZ4780_I2C_CTRL_SLVDIS | JZ4780_I2C_CTRL_MD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_SHCNT,
+ JZ4780_I2CSHCNT_ADJUST(cnt_high));
+ jz4780_i2c_writew(i2c, JZ4780_I2C_SLCNT,
+ JZ4780_I2CSLCNT_ADJUST(cnt_low));
+ } else {
+ tmp = JZ4780_I2C_CTRL_SPDF | JZ4780_I2C_CTRL_REST
+ | JZ4780_I2C_CTRL_SLVDIS | JZ4780_I2C_CTRL_MD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_FHCNT,
+ JZ4780_I2CFHCNT_ADJUST(cnt_high));
+ jz4780_i2c_writew(i2c, JZ4780_I2C_FLCNT,
+ JZ4780_I2CFLCNT_ADJUST(cnt_low));
+ }
+
+ /*
+ * a i2c device must internally provide a hold time at least 300ns
+ * tHD:DAT
+ * Standard Mode: min=300ns, max=3450ns
+ * Fast Mode: min=0ns, max=900ns
+ * tSU:DAT
+ * Standard Mode: min=250ns, max=infinite
+ * Fast Mode: min=100(250ns is recommended), max=infinite
+ *
+ * 1i2c_clk = 10^6 / dev_clk_khz
+ * on FPGA, dev_clk_khz = 12000, so 1i2c_clk = 1000/12 = 83ns
+ * on Pisces(1008M), dev_clk_khz=126000, so 1i2c_clk = 1000 / 126 = 8ns
+ *
+ * The actual hold time is (SDAHD + 1) * (i2c_clk period).
+ *
+ * Length of setup time calculated using (SDASU - 1) * (ic_clk_period)
+ *
+ */
+ if (i2c_clk <= 100) { /* standard mode */
+ setup_time = 300;
+ hold_time = 400;
+ } else {
+ setup_time = 450;
+ hold_time = 450;
+ }
+
+ hold_time = ((hold_time * dev_clk_khz) / 1000000) - 1;
+ setup_time = ((setup_time * dev_clk_khz) / 1000000) + 1;
+
+ if (setup_time > 255)
+ setup_time = 255;
+
+ if (setup_time <= 0)
+ setup_time = 1;
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_SDASU, setup_time);
+
+ if (hold_time > 255)
+ hold_time = 255;
+
+ if (hold_time >= 0) {
+ /*i2c hold time enable */
+ hold_time |= JZ4780_I2C_SDAHD_HDENB;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, hold_time);
+ } else {
+ /* disable hold time */
+ jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, 0);
+ }
+
+ return 0;
+}
+
+static int jz4780_i2c_cleanup(struct jz4780_i2c *i2c)
+{
+ int ret;
+ unsigned long flags;
+ unsigned short tmp;
+
+ spin_lock_irqsave(&i2c->lock, flags);
+
+ /* can send stop now if need */
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+
+ /* disable all interrupts first */
+ jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0);
+
+ /* then clear all interrupts */
+ jz4780_i2c_readw(i2c, JZ4780_I2C_CTXABRT);
+ jz4780_i2c_readw(i2c, JZ4780_I2C_CINTR);
+
+ /* then disable the controller */
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp &= ~JZ4780_I2C_ENB_I2C;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ udelay(10);
+ tmp |= JZ4780_I2C_ENB_I2C;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ ret = jz4780_i2c_disable(i2c);
+ if (ret)
+ dev_err(&i2c->adap.dev,
+ "unable to disable device during cleanup!\n");
+
+ if (unlikely(jz4780_i2c_readw(i2c, JZ4780_I2C_INTM)
+ & jz4780_i2c_readw(i2c, JZ4780_I2C_INTST)))
+ dev_err(&i2c->adap.dev,
+ "device has interrupts after a complete cleanup!\n");
+
+ return ret;
+}
+
+static int jz4780_i2c_prepare(struct jz4780_i2c *i2c)
+{
+ jz4780_i2c_set_speed(i2c);
+ return jz4780_i2c_enable(i2c);
+}
+
+static void jz4780_i2c_send_rcmd(struct jz4780_i2c *i2c, int cmd_count)
+{
+ int i;
+
+ for (i = 0; i < cmd_count; i++)
+ jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ);
+}
+
+static void jz4780_i2c_trans_done(struct jz4780_i2c *i2c)
+{
+ jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0);
+ complete(&i2c->trans_waitq);
+}
+
+static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
+{
+ unsigned short tmp;
+ unsigned short intst;
+ unsigned short intmsk;
+ struct jz4780_i2c *i2c = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c->lock, flags);
+ intmsk = jz4780_i2c_readw(i2c, JZ4780_I2C_INTM);
+ intst = jz4780_i2c_readw(i2c, JZ4780_I2C_INTST);
+
+ intst &= intmsk;
+
+ if (intst & JZ4780_I2C_INTST_TXABT) {
+ jz4780_i2c_trans_done(i2c);
+ goto done;
+ }
+
+ if (intst & JZ4780_I2C_INTST_RXOF) {
+ dev_dbg(&i2c->adap.dev, "received fifo overflow!\n");
+ jz4780_i2c_trans_done(i2c);
+ goto done;
+ }
+
+ /*
+ * When reading, always drain RX FIFO before we send more Read
+ * Commands to avoid fifo overrun
+ */
+ if (i2c->is_write == 0) {
+ int rd_left;
+
+ while ((jz4780_i2c_readw(i2c, JZ4780_I2C_STA)
+ & JZ4780_I2C_STA_RFNE)) {
+ *(i2c->rbuf++) = jz4780_i2c_readw(i2c, JZ4780_I2C_DC)
+ & 0xff;
+ i2c->rd_data_xfered++;
+ if (i2c->rd_data_xfered == i2c->rd_total_len) {
+ jz4780_i2c_trans_done(i2c);
+ goto done;
+ }
+ }
+
+ rd_left = i2c->rd_total_len - i2c->rd_data_xfered;
+
+ if (rd_left <= JZ4780_I2C_FIFO_LEN)
+ jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, rd_left - 1);
+ }
+
+ if (intst & JZ4780_I2C_INTST_TXEMP) {
+ if (i2c->is_write == 0) {
+ int cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered;
+ int max_send = (JZ4780_I2C_FIFO_LEN - 1)
+ - (i2c->rd_cmd_xfered
+ - i2c->rd_data_xfered);
+ int cmd_to_send = min(cmd_left, max_send);
+
+ if (i2c->rd_cmd_xfered != 0)
+ cmd_to_send = min(cmd_to_send,
+ JZ4780_I2C_FIFO_LEN
+ - TX_LEVEL - 1);
+
+ if (cmd_to_send) {
+ jz4780_i2c_send_rcmd(i2c, cmd_to_send);
+ i2c->rd_cmd_xfered += cmd_to_send;
+ }
+
+ cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered;
+ if (cmd_left == 0) {
+ intmsk = jz4780_i2c_readw(i2c, JZ4780_I2C_INTM);
+ intmsk &= ~JZ4780_I2C_INTM_MTXEMP;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, intmsk);
+
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ }
+ } else {
+ unsigned short data;
+ unsigned short i2c_sta;
+
+ i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
+
+ while ((i2c_sta & JZ4780_I2C_STA_TFNF) &&
+ (i2c->wt_len > 0)) {
+ i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
+ data = *i2c->wbuf;
+ data &= ~JZ4780_I2C_DC_READ;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_DC,
+ data);
+ i2c->wbuf++;
+ i2c->wt_len--;
+ }
+
+ if (i2c->wt_len == 0) {
+ if (!i2c->stop_hold) {
+ tmp = jz4780_i2c_readw(i2c,
+ JZ4780_I2C_CTRL);
+ tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL,
+ tmp);
+ }
+
+ jz4780_i2c_trans_done(i2c);
+ goto done;
+ }
+ }
+ }
+
+done:
+ spin_unlock_irqrestore(&i2c->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
+{
+ int i;
+
+ dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
+ dev_err(&i2c->adap.dev, "device addr=%x\n",
+ jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
+ dev_err(&i2c->adap.dev, "send cmd count:%d %d\n",
+ i2c->cmd, i2c->cmd_buf[i2c->cmd]);
+ dev_err(&i2c->adap.dev, "receive data count:%d %d\n",
+ i2c->cmd, i2c->data_buf[i2c->cmd]);
+
+ for (i = 0; i < 16; i++) {
+ if (src & BIT(i))
+ dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
+ i, jz4780_i2c_abrt_src[i]);
+ }
+}
+
+static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
+ unsigned char *buf, int len, int cnt,
+ int idx)
+{
+ int ret = 0;
+ long timeout;
+ int wait_time = JZ4780_I2C_TIMEOUT * (len + 5);
+ unsigned short tmp;
+ unsigned long flags;
+
+ memset(buf, 0, len);
+
+ spin_lock_irqsave(&i2c->lock, flags);
+
+ i2c->stop_hold = 0;
+ i2c->is_write = 0;
+ i2c->rbuf = buf;
+ i2c->rd_total_len = len;
+ i2c->rd_data_xfered = 0;
+ i2c->rd_cmd_xfered = 0;
+
+ if (len <= JZ4780_I2C_FIFO_LEN)
+ jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, len - 1);
+ else
+ jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, RX_LEVEL);
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, TX_LEVEL);
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_INTM,
+ JZ4780_I2C_INTM_MRXFL | JZ4780_I2C_INTM_MTXEMP
+ | JZ4780_I2C_INTM_MTXABT | JZ4780_I2C_INTM_MRXOF);
+
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp |= JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ timeout = wait_for_completion_timeout(&i2c->trans_waitq,
+ msecs_to_jiffies(wait_time));
+
+ if (!timeout) {
+ dev_err(&i2c->adap.dev, "irq read timeout\n");
+ dev_dbg(&i2c->adap.dev, "send cmd count:%d %d\n",
+ i2c->cmd, i2c->cmd_buf[i2c->cmd]);
+ dev_dbg(&i2c->adap.dev, "receive data count:%d %d\n",
+ i2c->cmd, i2c->data_buf[i2c->cmd]);
+ ret = -EIO;
+ }
+
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_TXABRT);
+ if (tmp) {
+ jz4780_i2c_txabrt(i2c, tmp);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static inline int jz4780_i2c_xfer_write(struct jz4780_i2c *i2c,
+ unsigned char *buf, int len,
+ int cnt, int idx)
+{
+ int ret = 0;
+ int wait_time = JZ4780_I2C_TIMEOUT * (len + 5);
+ long timeout;
+ unsigned short tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c->lock, flags);
+
+ if (idx < (cnt - 1))
+ i2c->stop_hold = 1;
+ else
+ i2c->stop_hold = 0;
+
+ i2c->is_write = 1;
+ i2c->wbuf = buf;
+ i2c->wt_len = len;
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, TX_LEVEL);
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, JZ4780_I2C_INTM_MTXEMP
+ | JZ4780_I2C_INTM_MTXABT);
+
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp |= JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ timeout = wait_for_completion_timeout(&i2c->trans_waitq,
+ msecs_to_jiffies(wait_time));
+ if (timeout && !i2c->stop_hold) {
+ unsigned short i2c_sta;
+ int write_in_process;
+
+ timeout = JZ4780_I2C_TIMEOUT * 100;
+ for (; timeout > 0; timeout--) {
+ i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
+
+ write_in_process = (i2c_sta & JZ4780_I2C_STA_MSTACT) ||
+ !(i2c_sta & JZ4780_I2C_STA_TFE);
+ if (!write_in_process)
+ break;
+ udelay(10);
+ }
+ }
+
+ if (!timeout) {
+ dev_err(&i2c->adap.dev, "write wait timeout\n");
+ ret = -EIO;
+ }
+
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_TXABRT);
+ if (tmp) {
+ jz4780_i2c_txabrt(i2c, tmp);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int jz4780_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int count)
+{
+ int i = -EIO;
+ int ret = 0;
+ struct jz4780_i2c *i2c = adap->algo_data;
+
+ ret = jz4780_i2c_prepare(i2c);
+ if (ret) {
+ dev_err(&i2c->adap.dev, "I2C prepare failed\n");
+ goto out;
+ }
+
+ if (msg->addr != jz4780_i2c_readw(i2c, JZ4780_I2C_TAR)) {
+ ret = jz4780_i2c_set_target(i2c, msg->addr);
+ if (ret)
+ goto out;
+ }
+ for (i = 0; i < count; i++, msg++) {
+ if (msg->flags & I2C_M_RD)
+ ret = jz4780_i2c_xfer_read(i2c, msg->buf, msg->len,
+ count, i);
+ else
+ ret = jz4780_i2c_xfer_write(i2c, msg->buf, msg->len,
+ count, i);
+
+ if (ret)
+ goto out;
+ }
+
+ ret = i;
+
+out:
+ jz4780_i2c_cleanup(i2c);
+ return ret;
+}
+
+static u32 jz4780_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm jz4780_i2c_algorithm = {
+ .master_xfer = jz4780_i2c_xfer,
+ .functionality = jz4780_i2c_functionality,
+};
+
+static const struct of_device_id jz4780_i2c_of_matches[] = {
+ { .compatible = "ingenic,jz4780-i2c", },
+ { /* sentinel */ }
+};
+
+static int jz4780_i2c_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ unsigned int clk_freq = 0;
+ unsigned short tmp;
+ struct resource *r;
+ struct jz4780_i2c *i2c;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(struct jz4780_i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.algo = &jz4780_i2c_algorithm;
+ i2c->adap.algo_data = i2c;
+ i2c->adap.retries = 5;
+ i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
+ sprintf(i2c->adap.name, "%s", pdev->name);
+
+ init_completion(&i2c->trans_waitq);
+ spin_lock_init(&i2c->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->iomem = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(i2c->iomem))
+ return PTR_ERR(i2c->iomem);
+
+ platform_set_drvdata(pdev, i2c);
+
+ i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c->clk))
+ return PTR_ERR(i2c->clk);
+
+ clk_prepare_enable(i2c->clk);
+
+ if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &clk_freq)) {
+ dev_err(&pdev->dev, "clock-frequency not specified in DT");
+ return clk_freq;
+ }
+
+ i2c->speed = clk_freq / 1000;
+ jz4780_i2c_set_speed(i2c);
+
+ dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
+
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+
+ jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
+
+ i2c->cmd = 0;
+ memset(i2c->cmd_buf, 0, BUFSIZE);
+ memset(i2c->data_buf, 0, BUFSIZE);
+
+ i2c->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
+ dev_name(&pdev->dev), i2c);
+ if (ret) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add bus\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+}
+
+static int jz4780_i2c_remove(struct platform_device *pdev)
+{
+ struct jz4780_i2c *i2c = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(i2c->clk);
+ i2c_del_adapter(&i2c->adap);
+ return 0;
+}
+
+static struct platform_driver jz4780_i2c_driver = {
+ .probe = jz4780_i2c_probe,
+ .remove = jz4780_i2c_remove,
+ .driver = {
+ .name = "jz4780-i2c",
+ .of_match_table = of_match_ptr(jz4780_i2c_of_matches),
+ },
+};
+
+module_platform_driver(jz4780_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ztyan<ztyan@ingenic.cn>");
+MODULE_DESCRIPTION("i2c driver for JZ4780 SoCs");
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index c74cc2be613b..48ecffecc0ed 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <asm/mpc52xx.h>
+#include <asm/mpc85xx.h>
#include <sysdev/fsl_soc.h>
#define DRV_NAME "mpc-i2c"
@@ -95,8 +96,9 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
i2c->interrupt = readb(i2c->base + MPC_I2C_SR);
writeb(0, i2c->base + MPC_I2C_SR);
wake_up(&i2c->queue);
+ return IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
/* Sometimes 9th clock pulse isn't generated, and slave doesn't release
@@ -346,6 +348,33 @@ static u32 mpc_i2c_get_sec_cfg_8xxx(void)
return val;
}
+static u32 mpc_i2c_get_prescaler_8xxx(void)
+{
+ /* mpc83xx and mpc82xx all have prescaler 1 */
+ u32 prescaler = 1;
+
+ /* mpc85xx */
+ if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)
+ || pvr_version_is(PVR_VER_E500MC)
+ || pvr_version_is(PVR_VER_E5500)
+ || pvr_version_is(PVR_VER_E6500)) {
+ unsigned int svr = mfspr(SPRN_SVR);
+
+ if ((SVR_SOC_VER(svr) == SVR_8540)
+ || (SVR_SOC_VER(svr) == SVR_8541)
+ || (SVR_SOC_VER(svr) == SVR_8560)
+ || (SVR_SOC_VER(svr) == SVR_8555)
+ || (SVR_SOC_VER(svr) == SVR_8610))
+ /* the above 85xx SoCs have prescaler 1 */
+ prescaler = 1;
+ else
+ /* all the other 85xx have prescaler 2 */
+ prescaler = 2;
+ }
+
+ return prescaler;
+}
+
static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
u32 prescaler, u32 *real_clk)
{
@@ -363,7 +392,7 @@ static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
if (of_device_is_compatible(node, "fsl,mpc8544-i2c"))
prescaler = mpc_i2c_get_sec_cfg_8xxx() ? 3 : 2;
if (!prescaler)
- prescaler = 1;
+ prescaler = mpc_i2c_get_prescaler_8xxx();
divider = fsl_get_sys_freq() / clock / prescaler;
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index ff8b12c8d25f..3e84f6c090a5 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -568,6 +568,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
int ret;
int flags;
int use_pio = 0;
+ unsigned long time_left;
flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
@@ -599,9 +600,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
if (ret)
return ret;
- ret = wait_for_completion_timeout(&i2c->cmd_complete,
+ time_left = wait_for_completion_timeout(&i2c->cmd_complete,
msecs_to_jiffies(1000));
- if (ret == 0)
+ if (!time_left)
goto timeout;
ret = i2c->cmd_err;
@@ -912,7 +913,7 @@ static void __exit mxs_i2c_exit(void)
module_exit(mxs_i2c_exit);
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("MXS I2C Bus Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 97998946c4f6..bcd17e8cbcb4 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -446,9 +446,9 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
*/
static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
{
- u32 status = 0;
+ int status = 0;
u32 mcr, irq_mask;
- int timeout;
+ unsigned long timeout;
mcr = load_i2c_mcr_reg(dev, flags);
writel(mcr, dev->virtbase + I2C_MCR);
@@ -517,7 +517,7 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
{
u32 status = 0;
u32 mcr, irq_mask;
- int timeout;
+ unsigned long timeout;
mcr = load_i2c_mcr_reg(dev, flags);
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index 16f90b1a7508..75dd6d041241 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -104,18 +104,8 @@ static int i2c_opal_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
req.buffer_ra = cpu_to_be64(__pa(msgs[0].buf));
break;
case 2:
- /* For two messages, we basically support only simple
- * smbus transactions of a write plus a read. We might
- * want to allow also two writes but we'd have to bounce
- * the data into a single buffer.
- */
- if ((msgs[0].flags & I2C_M_RD) || !(msgs[1].flags & I2C_M_RD))
- return -EOPNOTSUPP;
- if (msgs[0].len > 4)
- return -EOPNOTSUPP;
- if (msgs[0].addr != msgs[1].addr)
- return -EOPNOTSUPP;
- req.type = OPAL_I2C_SM_READ;
+ req.type = (msgs[1].flags & I2C_M_RD) ?
+ OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
req.addr = cpu_to_be16(msgs[0].addr);
req.subaddr_sz = msgs[0].len;
for (i = 0; i < msgs[0].len; i++)
@@ -210,6 +200,15 @@ static const struct i2c_algorithm i2c_opal_algo = {
.functionality = i2c_opal_func,
};
+/*
+ * For two messages, we basically support simple smbus transactions of a
+ * write-then-anything.
+ */
+static struct i2c_adapter_quirks i2c_opal_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
+ .max_comb_1st_msg_len = 4,
+};
+
static int i2c_opal_probe(struct platform_device *pdev)
{
struct i2c_adapter *adapter;
@@ -232,6 +231,7 @@ static int i2c_opal_probe(struct platform_device *pdev)
adapter->algo = &i2c_opal_algo;
adapter->algo_data = (void *)(unsigned long)opal_id;
+ adapter->quirks = &i2c_opal_quirks;
adapter->dev.parent = &pdev->dev;
adapter->dev.of_node = of_node_get(pdev->dev.of_node);
pname = of_get_property(pdev->dev.of_node, "ibm,port-name", NULL);
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 6336f02ec566..3bd2e7d06e4b 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -285,6 +285,6 @@ static struct platform_driver i2c_pca_pf_driver = {
module_platform_driver(i2c_pca_pf_driver);
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("I2C-PCA9564/PCA9665 platform driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index d37d9db6681e..2c40edbf6224 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -456,14 +456,6 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
return -EINVAL;
}
- if (cmd->read_len > MSP_MAX_BYTES_PER_RW ||
- cmd->write_len > MSP_MAX_BYTES_PER_RW) {
- dev_err(&pmcmsptwi_adapter.dev,
- "%s: Cannot transfer more than %d bytes\n",
- __func__, MSP_MAX_BYTES_PER_RW);
- return -EINVAL;
- }
-
mutex_lock(&data->lock);
dev_dbg(&pmcmsptwi_adapter.dev,
"Setting address to 0x%04x\n", cmd->addr);
@@ -520,25 +512,14 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
struct pmcmsptwi_cfg oldcfg, newcfg;
int ret;
- if (num > 2) {
- dev_dbg(&adap->dev, "%d messages unsupported\n", num);
- return -EINVAL;
- } else if (num == 2) {
- /* Check for a dual write-then-read command */
+ if (num == 2) {
struct i2c_msg *nextmsg = msg + 1;
- if (!(msg->flags & I2C_M_RD) &&
- (nextmsg->flags & I2C_M_RD) &&
- msg->addr == nextmsg->addr) {
- cmd.type = MSP_TWI_CMD_WRITE_READ;
- cmd.write_len = msg->len;
- cmd.write_data = msg->buf;
- cmd.read_len = nextmsg->len;
- cmd.read_data = nextmsg->buf;
- } else {
- dev_dbg(&adap->dev,
- "Non write-read dual messages unsupported\n");
- return -EINVAL;
- }
+
+ cmd.type = MSP_TWI_CMD_WRITE_READ;
+ cmd.write_len = msg->len;
+ cmd.write_data = msg->buf;
+ cmd.read_len = nextmsg->len;
+ cmd.read_data = nextmsg->buf;
} else if (msg->flags & I2C_M_RD) {
cmd.type = MSP_TWI_CMD_READ;
cmd.read_len = msg->len;
@@ -598,6 +579,14 @@ static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL;
}
+static struct i2c_adapter_quirks pmcmsptwi_i2c_quirks = {
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .max_write_len = MSP_MAX_BYTES_PER_RW,
+ .max_read_len = MSP_MAX_BYTES_PER_RW,
+ .max_comb_1st_msg_len = MSP_MAX_BYTES_PER_RW,
+ .max_comb_2nd_msg_len = MSP_MAX_BYTES_PER_RW,
+};
+
/* -- Initialization -- */
static struct i2c_algorithm pmcmsptwi_algo = {
@@ -609,6 +598,7 @@ static struct i2c_adapter pmcmsptwi_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &pmcmsptwi_algo,
+ .quirks = &pmcmsptwi_i2c_quirks,
.name = DRV_NAME,
};
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 60a53c169ed2..6abcf696e359 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -153,12 +153,6 @@ static int i2c_powermac_master_xfer( struct i2c_adapter *adap,
int read;
int addrdir;
- if (num != 1) {
- dev_err(&adap->dev,
- "Multi-message I2C transactions not supported\n");
- return -EOPNOTSUPP;
- }
-
if (msgs->flags & I2C_M_TEN)
return -EINVAL;
read = (msgs->flags & I2C_M_RD) != 0;
@@ -205,6 +199,9 @@ static const struct i2c_algorithm i2c_powermac_algorithm = {
.functionality = i2c_powermac_func,
};
+static struct i2c_adapter_quirks i2c_powermac_quirks = {
+ .max_num_msgs = 1,
+};
static int i2c_powermac_remove(struct platform_device *dev)
{
@@ -434,6 +431,7 @@ static int i2c_powermac_probe(struct platform_device *dev)
platform_set_drvdata(dev, adapter);
adapter->algo = &i2c_powermac_algorithm;
+ adapter->quirks = &i2c_powermac_quirks;
i2c_set_adapdata(adapter, bus);
adapter->dev.parent = &dev->dev;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 4dad23bdffbe..fdcbdab808e9 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -412,17 +412,6 @@ static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
unsigned long left;
int ret;
- /*
- * The QUP block will issue a NACK and STOP on the bus when reaching
- * the end of the read, the length of the read is specified as one byte
- * which limits the possible read to 256 (QUP_READ_LIMIT) bytes.
- */
- if (msg->len > QUP_READ_LIMIT) {
- dev_err(qup->dev, "HW not capable of reads over %d bytes\n",
- QUP_READ_LIMIT);
- return -EINVAL;
- }
-
qup->msg = msg;
qup->pos = 0;
@@ -534,6 +523,15 @@ static const struct i2c_algorithm qup_i2c_algo = {
.functionality = qup_i2c_func,
};
+/*
+ * The QUP block will issue a NACK and STOP on the bus when reaching
+ * the end of the read, the length of the read is specified as one byte
+ * which limits the possible read to 256 (QUP_READ_LIMIT) bytes.
+ */
+static struct i2c_adapter_quirks qup_i2c_quirks = {
+ .max_read_len = QUP_READ_LIMIT,
+};
+
static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup)
{
clk_prepare_enable(qup->clk);
@@ -670,6 +668,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&qup->adap, qup);
qup->adap.algo = &qup_i2c_algo;
+ qup->adap.quirks = &qup_i2c_quirks;
qup->adap.dev.parent = qup->dev;
qup->adap.dev.of_node = pdev->dev.of_node;
strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 71a6e07eb7ab..5a84bea5b845 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -382,11 +382,11 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
if (ssr_filtered & SAR) {
/* read or write request */
if (ssr_raw & STM) {
- i2c_slave_event(priv->slave, I2C_SLAVE_REQ_READ_START, &value);
+ i2c_slave_event(priv->slave, I2C_SLAVE_READ_REQUESTED, &value);
rcar_i2c_write(priv, ICRXTX, value);
rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR);
} else {
- i2c_slave_event(priv->slave, I2C_SLAVE_REQ_WRITE_START, &value);
+ i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
rcar_i2c_read(priv, ICRXTX); /* dummy read */
rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
}
@@ -406,17 +406,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
int ret;
value = rcar_i2c_read(priv, ICRXTX);
- ret = i2c_slave_event(priv->slave, I2C_SLAVE_REQ_WRITE_END, &value);
+ ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
/* Send NACK in case of error */
rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0));
- i2c_slave_event(priv->slave, I2C_SLAVE_REQ_WRITE_START, &value);
rcar_i2c_write(priv, ICSSR, ~SDR & 0xff);
}
/* master wants to read from us */
if (ssr_filtered & SDE) {
- i2c_slave_event(priv->slave, I2C_SLAVE_REQ_READ_END, &value);
- i2c_slave_event(priv->slave, I2C_SLAVE_REQ_READ_START, &value);
+ i2c_slave_event(priv->slave, I2C_SLAVE_READ_PROCESSED, &value);
rcar_i2c_write(priv, ICRXTX, value);
rcar_i2c_write(priv, ICSSR, ~SDE & 0xff);
}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 5f96b1b3e3a5..019d5426fe52 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -833,7 +833,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
clk_disable(i2c->clk);
spin_unlock_irqrestore(&i2c->lock, flags);
- return ret;
+ return ret < 0 ? ret : num;
}
static u32 rk3x_i2c_func(struct i2c_adapter *adap)
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 88057fad9dfe..ea72dca32fdf 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -10,17 +10,18 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
#include <linux/clk.h>
-#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
#include <linux/err.h>
-#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
/* SSC registers */
#define SSC_BRG 0x000
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 29f14331dd9d..1bcd75ea0b4c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -532,7 +532,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
{
u32 packet_header;
u32 int_mask;
- int ret;
+ unsigned long time_left;
tegra_i2c_flush_fifos(i2c_dev);
@@ -585,18 +585,20 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
i2c_readl(i2c_dev, I2C_INT_MASK));
- ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
+ time_left = wait_for_completion_timeout(&i2c_dev->msg_complete,
+ TEGRA_I2C_TIMEOUT);
tegra_i2c_mask_irq(i2c_dev, int_mask);
- if (ret == 0) {
+ if (time_left == 0) {
dev_err(i2c_dev->dev, "i2c transfer timed out\n");
tegra_i2c_init(i2c_dev);
return -ETIMEDOUT;
}
- dev_dbg(i2c_dev->dev, "transfer complete: %d %d %d\n",
- ret, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err);
+ dev_dbg(i2c_dev->dev, "transfer complete: %lu %d %d\n",
+ time_left, completion_done(&i2c_dev->msg_complete),
+ i2c_dev->msg_err);
if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
return 0;
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 7533fa34d737..47e88adf2011 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -288,10 +288,6 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
i, pmsg->flags & I2C_M_RD ? "read" : "write",
pmsg->flags, pmsg->len, pmsg->addr);
- /* msgs longer than 2048 bytes are not supported by adapter */
- if (pmsg->len > 2048)
- return -EINVAL;
-
mutex_lock(&vb->lock);
/* directly send the message */
if (pmsg->flags & I2C_M_RD) {
@@ -358,6 +354,11 @@ static const struct i2c_algorithm vprbrd_algorithm = {
.functionality = vprbrd_i2c_func,
};
+static struct i2c_adapter_quirks vprbrd_quirks = {
+ .max_read_len = 2048,
+ .max_write_len = 2048,
+};
+
static int vprbrd_i2c_probe(struct platform_device *pdev)
{
struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
@@ -373,6 +374,7 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
vb_i2c->i2c.owner = THIS_MODULE;
vb_i2c->i2c.class = I2C_CLASS_HWMON;
vb_i2c->i2c.algo = &vprbrd_algorithm;
+ vb_i2c->i2c.quirks = &vprbrd_quirks;
vb_i2c->i2c.algo_data = vb;
/* save the param in usb capabable memory */
vb_i2c->bus_freq_param = i2c_bus_param;
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index 82ea34925489..e1e3a85596c5 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -128,7 +128,8 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
{
struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
u16 val, tcr_val;
- int ret, wait_result;
+ int ret;
+ unsigned long wait_result;
int xfer_len = 0;
if (!(pmsg->flags & I2C_M_NOSTART)) {
@@ -177,7 +178,7 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
while (xfer_len < pmsg->len) {
wait_result = wait_for_completion_timeout(&i2c_dev->complete,
- 500 * HZ / 1000);
+ msecs_to_jiffies(500));
if (wait_result == 0)
return -ETIMEDOUT;
@@ -218,7 +219,8 @@ static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
{
struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
u16 val, tcr_val;
- int ret, wait_result;
+ int ret;
+ unsigned long wait_result;
u32 xfer_len = 0;
if (!(pmsg->flags & I2C_M_NOSTART)) {
@@ -266,7 +268,7 @@ static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
while (xfer_len < pmsg->len) {
wait_result = wait_for_completion_timeout(&i2c_dev->complete,
- 500 * HZ / 1000);
+ msecs_to_jiffies(500));
if (!wait_result)
return -ETIMEDOUT;
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
new file mode 100644
index 000000000000..c941418f06f5
--- /dev/null
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (c) 2003-2015 Broadcom Corporation
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define XLP9XX_I2C_DIV 0x0
+#define XLP9XX_I2C_CTRL 0x1
+#define XLP9XX_I2C_CMD 0x2
+#define XLP9XX_I2C_STATUS 0x3
+#define XLP9XX_I2C_MTXFIFO 0x4
+#define XLP9XX_I2C_MRXFIFO 0x5
+#define XLP9XX_I2C_MFIFOCTRL 0x6
+#define XLP9XX_I2C_STXFIFO 0x7
+#define XLP9XX_I2C_SRXFIFO 0x8
+#define XLP9XX_I2C_SFIFOCTRL 0x9
+#define XLP9XX_I2C_SLAVEADDR 0xA
+#define XLP9XX_I2C_OWNADDR 0xB
+#define XLP9XX_I2C_FIFOWCNT 0xC
+#define XLP9XX_I2C_INTEN 0xD
+#define XLP9XX_I2C_INTST 0xE
+#define XLP9XX_I2C_WAITCNT 0xF
+#define XLP9XX_I2C_TIMEOUT 0X10
+#define XLP9XX_I2C_GENCALLADDR 0x11
+
+#define XLP9XX_I2C_CMD_START BIT(7)
+#define XLP9XX_I2C_CMD_STOP BIT(6)
+#define XLP9XX_I2C_CMD_READ BIT(5)
+#define XLP9XX_I2C_CMD_WRITE BIT(4)
+#define XLP9XX_I2C_CMD_ACK BIT(3)
+
+#define XLP9XX_I2C_CTRL_MCTLEN_SHIFT 16
+#define XLP9XX_I2C_CTRL_MCTLEN_MASK 0xffff0000
+#define XLP9XX_I2C_CTRL_RST BIT(8)
+#define XLP9XX_I2C_CTRL_EN BIT(6)
+#define XLP9XX_I2C_CTRL_MASTER BIT(4)
+#define XLP9XX_I2C_CTRL_FIFORD BIT(1)
+#define XLP9XX_I2C_CTRL_ADDMODE BIT(0)
+
+#define XLP9XX_I2C_INTEN_NACKADDR BIT(25)
+#define XLP9XX_I2C_INTEN_SADDR BIT(13)
+#define XLP9XX_I2C_INTEN_DATADONE BIT(12)
+#define XLP9XX_I2C_INTEN_ARLOST BIT(11)
+#define XLP9XX_I2C_INTEN_MFIFOFULL BIT(4)
+#define XLP9XX_I2C_INTEN_MFIFOEMTY BIT(3)
+#define XLP9XX_I2C_INTEN_MFIFOHI BIT(2)
+#define XLP9XX_I2C_INTEN_BUSERR BIT(0)
+
+#define XLP9XX_I2C_MFIFOCTRL_HITH_SHIFT 8
+#define XLP9XX_I2C_MFIFOCTRL_LOTH_SHIFT 0
+#define XLP9XX_I2C_MFIFOCTRL_RST BIT(16)
+
+#define XLP9XX_I2C_SLAVEADDR_RW BIT(0)
+#define XLP9XX_I2C_SLAVEADDR_ADDR_SHIFT 1
+
+#define XLP9XX_I2C_IP_CLK_FREQ 133000000UL
+#define XLP9XX_I2C_DEFAULT_FREQ 100000
+#define XLP9XX_I2C_HIGH_FREQ 400000
+#define XLP9XX_I2C_FIFO_SIZE 0x80U
+#define XLP9XX_I2C_TIMEOUT_MS 1000
+
+#define XLP9XX_I2C_FIFO_WCNT_MASK 0xff
+#define XLP9XX_I2C_STATUS_ERRMASK (XLP9XX_I2C_INTEN_ARLOST | \
+ XLP9XX_I2C_INTEN_NACKADDR | XLP9XX_I2C_INTEN_BUSERR)
+
+struct xlp9xx_i2c_dev {
+ struct device *dev;
+ struct i2c_adapter adapter;
+ struct completion msg_complete;
+ int irq;
+ bool msg_read;
+ u32 __iomem *base;
+ u32 msg_buf_remaining;
+ u32 msg_len;
+ u32 clk_hz;
+ u32 msg_err;
+ u8 *msg_buf;
+};
+
+static inline void xlp9xx_write_i2c_reg(struct xlp9xx_i2c_dev *priv,
+ unsigned long reg, u32 val)
+{
+ writel(val, priv->base + reg);
+}
+
+static inline u32 xlp9xx_read_i2c_reg(struct xlp9xx_i2c_dev *priv,
+ unsigned long reg)
+{
+ return readl(priv->base + reg);
+}
+
+static void xlp9xx_i2c_mask_irq(struct xlp9xx_i2c_dev *priv, u32 mask)
+{
+ u32 inten;
+
+ inten = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_INTEN) & ~mask;
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, inten);
+}
+
+static void xlp9xx_i2c_unmask_irq(struct xlp9xx_i2c_dev *priv, u32 mask)
+{
+ u32 inten;
+
+ inten = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_INTEN) | mask;
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, inten);
+}
+
+static void xlp9xx_i2c_update_rx_fifo_thres(struct xlp9xx_i2c_dev *priv)
+{
+ u32 thres;
+
+ thres = min(priv->msg_buf_remaining, XLP9XX_I2C_FIFO_SIZE);
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_MFIFOCTRL,
+ thres << XLP9XX_I2C_MFIFOCTRL_HITH_SHIFT);
+}
+
+static void xlp9xx_i2c_fill_tx_fifo(struct xlp9xx_i2c_dev *priv)
+{
+ u32 len, i;
+ u8 *buf = priv->msg_buf;
+
+ len = min(priv->msg_buf_remaining, XLP9XX_I2C_FIFO_SIZE);
+ for (i = 0; i < len; i++)
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_MTXFIFO, buf[i]);
+ priv->msg_buf_remaining -= len;
+ priv->msg_buf += len;
+}
+
+static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
+{
+ u32 len, i;
+ u8 *buf = priv->msg_buf;
+
+ len = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_FIFOWCNT) &
+ XLP9XX_I2C_FIFO_WCNT_MASK;
+ len = min(priv->msg_buf_remaining, len);
+ for (i = 0; i < len; i++, buf++)
+ *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+ priv->msg_buf_remaining -= len;
+ priv->msg_buf = buf;
+
+ if (priv->msg_buf_remaining)
+ xlp9xx_i2c_update_rx_fifo_thres(priv);
+}
+
+static irqreturn_t xlp9xx_i2c_isr(int irq, void *dev_id)
+{
+ struct xlp9xx_i2c_dev *priv = dev_id;
+ u32 status;
+
+ status = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_INTST);
+ if (status == 0)
+ return IRQ_NONE;
+
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTST, status);
+ if (status & XLP9XX_I2C_STATUS_ERRMASK) {
+ priv->msg_err = status;
+ goto xfer_done;
+ }
+
+ /* SADDR ACK for SMBUS_QUICK */
+ if ((status & XLP9XX_I2C_INTEN_SADDR) && (priv->msg_len == 0))
+ goto xfer_done;
+
+ if (!priv->msg_read) {
+ if (status & XLP9XX_I2C_INTEN_MFIFOEMTY) {
+ /* TX FIFO got empty, fill it up again */
+ if (priv->msg_buf_remaining)
+ xlp9xx_i2c_fill_tx_fifo(priv);
+ else
+ xlp9xx_i2c_mask_irq(priv,
+ XLP9XX_I2C_INTEN_MFIFOEMTY);
+ }
+ } else {
+ if (status & (XLP9XX_I2C_INTEN_DATADONE |
+ XLP9XX_I2C_INTEN_MFIFOHI)) {
+ /* data is in FIFO, read it */
+ if (priv->msg_buf_remaining)
+ xlp9xx_i2c_drain_rx_fifo(priv);
+ }
+ }
+
+ /* Transfer complete */
+ if (status & XLP9XX_I2C_INTEN_DATADONE)
+ goto xfer_done;
+
+ return IRQ_HANDLED;
+
+xfer_done:
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, 0);
+ complete(&priv->msg_complete);
+ return IRQ_HANDLED;
+}
+
+static int xlp9xx_i2c_init(struct xlp9xx_i2c_dev *priv)
+{
+ u32 prescale;
+
+ /*
+ * The controller uses 5 * SCL clock internally.
+ * So prescale value should be divided by 5.
+ */
+ prescale = DIV_ROUND_UP(XLP9XX_I2C_IP_CLK_FREQ, priv->clk_hz);
+ prescale = ((prescale - 8) / 5) - 1;
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, XLP9XX_I2C_CTRL_RST);
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, XLP9XX_I2C_CTRL_EN |
+ XLP9XX_I2C_CTRL_MASTER);
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_DIV, prescale);
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, 0);
+
+ return 0;
+}
+
+static int xlp9xx_i2c_xfer_msg(struct xlp9xx_i2c_dev *priv, struct i2c_msg *msg,
+ int last_msg)
+{
+ unsigned long timeleft;
+ u32 intr_mask, cmd, val;
+
+ priv->msg_buf = msg->buf;
+ priv->msg_buf_remaining = priv->msg_len = msg->len;
+ priv->msg_err = 0;
+ priv->msg_read = (msg->flags & I2C_M_RD);
+ reinit_completion(&priv->msg_complete);
+
+ /* Reset FIFO */
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_MFIFOCTRL,
+ XLP9XX_I2C_MFIFOCTRL_RST);
+
+ /* set FIFO threshold if reading */
+ if (priv->msg_read)
+ xlp9xx_i2c_update_rx_fifo_thres(priv);
+
+ /* set slave addr */
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_SLAVEADDR,
+ (msg->addr << XLP9XX_I2C_SLAVEADDR_ADDR_SHIFT) |
+ (priv->msg_read ? XLP9XX_I2C_SLAVEADDR_RW : 0));
+
+ /* Build control word for transfer */
+ val = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_CTRL);
+ if (!priv->msg_read)
+ val &= ~XLP9XX_I2C_CTRL_FIFORD;
+ else
+ val |= XLP9XX_I2C_CTRL_FIFORD; /* read */
+
+ if (msg->flags & I2C_M_TEN)
+ val |= XLP9XX_I2C_CTRL_ADDMODE; /* 10-bit address mode*/
+ else
+ val &= ~XLP9XX_I2C_CTRL_ADDMODE;
+
+ /* set data length to be transferred */
+ val = (val & ~XLP9XX_I2C_CTRL_MCTLEN_MASK) |
+ (msg->len << XLP9XX_I2C_CTRL_MCTLEN_SHIFT);
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, val);
+
+ /* fill fifo during tx */
+ if (!priv->msg_read)
+ xlp9xx_i2c_fill_tx_fifo(priv);
+
+ /* set interrupt mask */
+ intr_mask = (XLP9XX_I2C_INTEN_ARLOST | XLP9XX_I2C_INTEN_BUSERR |
+ XLP9XX_I2C_INTEN_NACKADDR | XLP9XX_I2C_INTEN_DATADONE);
+
+ if (priv->msg_read) {
+ intr_mask |= XLP9XX_I2C_INTEN_MFIFOHI;
+ if (msg->len == 0)
+ intr_mask |= XLP9XX_I2C_INTEN_SADDR;
+ } else {
+ if (msg->len == 0)
+ intr_mask |= XLP9XX_I2C_INTEN_SADDR;
+ else
+ intr_mask |= XLP9XX_I2C_INTEN_MFIFOEMTY;
+ }
+ xlp9xx_i2c_unmask_irq(priv, intr_mask);
+
+ /* set cmd reg */
+ cmd = XLP9XX_I2C_CMD_START;
+ cmd |= (priv->msg_read ? XLP9XX_I2C_CMD_READ : XLP9XX_I2C_CMD_WRITE);
+ if (last_msg)
+ cmd |= XLP9XX_I2C_CMD_STOP;
+
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CMD, cmd);
+
+ timeleft = msecs_to_jiffies(XLP9XX_I2C_TIMEOUT_MS);
+ timeleft = wait_for_completion_timeout(&priv->msg_complete, timeleft);
+
+ if (priv->msg_err) {
+ dev_dbg(priv->dev, "transfer error %x!\n", priv->msg_err);
+ if (priv->msg_err & XLP9XX_I2C_INTEN_BUSERR)
+ xlp9xx_i2c_init(priv);
+ return -EIO;
+ }
+
+ if (timeleft == 0) {
+ dev_dbg(priv->dev, "i2c transfer timed out!\n");
+ xlp9xx_i2c_init(priv);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int xlp9xx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ int i, ret;
+ struct xlp9xx_i2c_dev *priv = i2c_get_adapdata(adap);
+
+ for (i = 0; i < num; i++) {
+ ret = xlp9xx_i2c_xfer_msg(priv, &msgs[i], i == num - 1);
+ if (ret != 0)
+ return ret;
+ }
+
+ return num;
+}
+
+static u32 xlp9xx_i2c_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static struct i2c_algorithm xlp9xx_i2c_algo = {
+ .master_xfer = xlp9xx_i2c_xfer,
+ .functionality = xlp9xx_i2c_functionality,
+};
+
+static int xlp9xx_i2c_get_frequency(struct platform_device *pdev,
+ struct xlp9xx_i2c_dev *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 freq;
+ int err;
+
+ err = of_property_read_u32(np, "clock-frequency", &freq);
+ if (err) {
+ freq = XLP9XX_I2C_DEFAULT_FREQ;
+ dev_dbg(&pdev->dev, "using default frequency %u\n", freq);
+ } else if (freq == 0 || freq > XLP9XX_I2C_HIGH_FREQ) {
+ dev_warn(&pdev->dev, "invalid frequency %u, using default\n",
+ freq);
+ freq = XLP9XX_I2C_DEFAULT_FREQ;
+ }
+ priv->clk_hz = freq;
+
+ return 0;
+}
+
+static int xlp9xx_i2c_probe(struct platform_device *pdev)
+{
+ struct xlp9xx_i2c_dev *priv;
+ struct resource *res;
+ int err = 0;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq <= 0) {
+ dev_err(&pdev->dev, "invalid irq!\n");
+ return priv->irq;
+ }
+
+ xlp9xx_i2c_get_frequency(pdev, priv);
+ xlp9xx_i2c_init(priv);
+
+ err = devm_request_irq(&pdev->dev, priv->irq, xlp9xx_i2c_isr, 0,
+ pdev->name, priv);
+ if (err) {
+ dev_err(&pdev->dev, "IRQ request failed!\n");
+ return err;
+ }
+
+ init_completion(&priv->msg_complete);
+ priv->adapter.dev.parent = &pdev->dev;
+ priv->adapter.algo = &xlp9xx_i2c_algo;
+ priv->adapter.dev.of_node = pdev->dev.of_node;
+ priv->dev = &pdev->dev;
+
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name), "xlp9xx-i2c");
+ i2c_set_adapdata(&priv->adapter, priv);
+
+ err = i2c_add_adapter(&priv->adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add I2C adapter!\n");
+ return err;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ dev_dbg(&pdev->dev, "I2C bus:%d added\n", priv->adapter.nr);
+
+ return 0;
+}
+
+static int xlp9xx_i2c_remove(struct platform_device *pdev)
+{
+ struct xlp9xx_i2c_dev *priv;
+
+ priv = platform_get_drvdata(pdev);
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, 0);
+ synchronize_irq(priv->irq);
+ i2c_del_adapter(&priv->adapter);
+ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, 0);
+
+ return 0;
+}
+
+static const struct of_device_id xlp9xx_i2c_of_match[] = {
+ { .compatible = "netlogic,xlp980-i2c", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver xlp9xx_i2c_driver = {
+ .probe = xlp9xx_i2c_probe,
+ .remove = xlp9xx_i2c_remove,
+ .driver = {
+ .name = "xlp9xx-i2c",
+ .of_match_table = xlp9xx_i2c_of_match,
+ },
+};
+
+module_platform_driver(xlp9xx_i2c_driver);
+
+MODULE_AUTHOR("Subhendu Sekhar Behera <sbehera@broadcom.com>");
+MODULE_DESCRIPTION("XLP9XX/5XX I2C Bus Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index edf274cabe81..987c124432c5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -133,7 +133,7 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
return AE_OK;
memset(&info, 0, sizeof(info));
- info.acpi_node.companion = adev;
+ info.fwnode = acpi_fwnode_handle(adev);
info.irq = -1;
INIT_LIST_HEAD(&resource_list);
@@ -561,7 +561,7 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
int i = 0, val = 1, ret = 0;
if (bri->prepare_recovery)
- bri->prepare_recovery(bri);
+ bri->prepare_recovery(adap);
/*
* By this time SCL is high, as we need to give 9 falling-rising edges
@@ -586,7 +586,7 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
}
if (bri->unprepare_recovery)
- bri->unprepare_recovery(bri);
+ bri->unprepare_recovery(adap);
return ret;
}
@@ -596,6 +596,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
adap->bus_recovery_info->set_scl(adap, 1);
return i2c_generic_recovery(adap);
}
+EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
{
@@ -610,6 +611,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
return ret;
}
+EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
int i2c_recover_bus(struct i2c_adapter *adap)
{
@@ -619,6 +621,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
return adap->bus_recovery_info->recover_bus(adap);
}
+EXPORT_SYMBOL_GPL(i2c_recover_bus);
static int i2c_device_probe(struct device *dev)
{
@@ -971,7 +974,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
client->dev.of_node = info->of_node;
- ACPI_COMPANION_SET(&client->dev, info->acpi_node.companion);
+ client->dev.fwnode = info->fwnode;
i2c_dev_set_name(adap, client);
status = device_register(&client->dev);
@@ -1410,6 +1413,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
+ pm_runtime_no_callbacks(&adap->dev);
+
#ifdef CONFIG_I2C_COMPAT
res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
adap->dev.parent);
@@ -1875,6 +1880,13 @@ static int __init i2c_init(void)
{
int retval;
+ retval = of_alias_get_highest_id("i2c");
+
+ down_write(&__i2c_board_lock);
+ if (retval >= __i2c_first_dynamic_bus_num)
+ __i2c_first_dynamic_bus_num = retval + 1;
+ up_write(&__i2c_board_lock);
+
retval = bus_register(&i2c_bus_type);
if (retval)
return retval;
@@ -1926,6 +1938,65 @@ module_exit(i2c_exit);
* ----------------------------------------------------
*/
+/* Check if val is exceeding the quirk IFF quirk is non 0 */
+#define i2c_quirk_exceeded(val, quirk) ((quirk) && ((val) > (quirk)))
+
+static int i2c_quirk_error(struct i2c_adapter *adap, struct i2c_msg *msg, char *err_msg)
+{
+ dev_err_ratelimited(&adap->dev, "adapter quirk: %s (addr 0x%04x, size %u, %s)\n",
+ err_msg, msg->addr, msg->len,
+ msg->flags & I2C_M_RD ? "read" : "write");
+ return -EOPNOTSUPP;
+}
+
+static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ const struct i2c_adapter_quirks *q = adap->quirks;
+ int max_num = q->max_num_msgs, i;
+ bool do_len_check = true;
+
+ if (q->flags & I2C_AQ_COMB) {
+ max_num = 2;
+
+ /* special checks for combined messages */
+ if (num == 2) {
+ if (q->flags & I2C_AQ_COMB_WRITE_FIRST && msgs[0].flags & I2C_M_RD)
+ return i2c_quirk_error(adap, &msgs[0], "1st comb msg must be write");
+
+ if (q->flags & I2C_AQ_COMB_READ_SECOND && !(msgs[1].flags & I2C_M_RD))
+ return i2c_quirk_error(adap, &msgs[1], "2nd comb msg must be read");
+
+ if (q->flags & I2C_AQ_COMB_SAME_ADDR && msgs[0].addr != msgs[1].addr)
+ return i2c_quirk_error(adap, &msgs[0], "comb msg only to same addr");
+
+ if (i2c_quirk_exceeded(msgs[0].len, q->max_comb_1st_msg_len))
+ return i2c_quirk_error(adap, &msgs[0], "msg too long");
+
+ if (i2c_quirk_exceeded(msgs[1].len, q->max_comb_2nd_msg_len))
+ return i2c_quirk_error(adap, &msgs[1], "msg too long");
+
+ do_len_check = false;
+ }
+ }
+
+ if (i2c_quirk_exceeded(num, max_num))
+ return i2c_quirk_error(adap, &msgs[0], "too many messages");
+
+ for (i = 0; i < num; i++) {
+ u16 len = msgs[i].len;
+
+ if (msgs[i].flags & I2C_M_RD) {
+ if (do_len_check && i2c_quirk_exceeded(len, q->max_read_len))
+ return i2c_quirk_error(adap, &msgs[i], "msg too long");
+ } else {
+ if (do_len_check && i2c_quirk_exceeded(len, q->max_write_len))
+ return i2c_quirk_error(adap, &msgs[i], "msg too long");
+ }
+ }
+
+ return 0;
+}
+
/**
* __i2c_transfer - unlocked flavor of i2c_transfer
* @adap: Handle to I2C bus
@@ -1943,6 +2014,9 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
unsigned long orig_jiffies;
int ret, try;
+ if (adap->quirks && i2c_check_for_quirks(adap, msgs, num))
+ return -EOPNOTSUPP;
+
/* i2c_trace_msg gets enabled when tracepoint i2c_transfer gets
* enabled. This is an efficient way of keeping the for-loop from
* being executed when not needed.
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 593f7ca9adc7..06cc1ff088f1 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -32,8 +32,9 @@ struct i2c_mux_priv {
struct i2c_algorithm algo;
struct i2c_adapter *parent;
- void *mux_priv; /* the mux chip/device */
- u32 chan_id; /* the channel id */
+ struct device *mux_dev;
+ void *mux_priv;
+ u32 chan_id;
int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
@@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
/* Set up private adapter data */
priv->parent = parent;
+ priv->mux_dev = mux_dev;
priv->mux_priv = mux_priv;
priv->chan_id = chan_id;
priv->select = select;
@@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap)
char symlink_name[20];
snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
- sysfs_remove_link(&adap->dev.parent->kobj, symlink_name);
+ sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);
sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
i2c_del_adapter(adap);
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index cf9b09db092f..822374654609 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -36,7 +36,7 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
struct eeprom_data *eeprom = i2c_get_clientdata(client);
switch (event) {
- case I2C_SLAVE_REQ_WRITE_END:
+ case I2C_SLAVE_WRITE_RECEIVED:
if (eeprom->first_write) {
eeprom->buffer_idx = *val;
eeprom->first_write = false;
@@ -47,17 +47,23 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
}
break;
- case I2C_SLAVE_REQ_READ_START:
+ case I2C_SLAVE_READ_PROCESSED:
+ /* The previous byte made it to the bus, get next one */
+ eeprom->buffer_idx++;
+ /* fallthrough */
+ case I2C_SLAVE_READ_REQUESTED:
spin_lock(&eeprom->buffer_lock);
*val = eeprom->buffer[eeprom->buffer_idx];
spin_unlock(&eeprom->buffer_lock);
- break;
-
- case I2C_SLAVE_REQ_READ_END:
- eeprom->buffer_idx++;
+ /*
+ * Do not increment buffer_idx here, because we don't know if
+ * this byte will be actually used. Read Linux I2C slave docs
+ * for details.
+ */
break;
case I2C_SLAVE_STOP:
+ case I2C_SLAVE_WRITE_REQUESTED:
eeprom->first_write = true;
break;
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index f5798eb4076b..70db99264339 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -76,10 +76,9 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
return -ENODEV;
}
adapter = of_find_i2c_adapter_by_node(adapter_np);
- if (!adapter) {
- dev_err(&pdev->dev, "Cannot find parent bus\n");
+ if (!adapter)
return -EPROBE_DEFER;
- }
+
mux->data.parent = i2c_adapter_id(adapter);
put_device(&adapter->dev);
@@ -177,11 +176,8 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
}
parent = i2c_get_adapter(mux->data.parent);
- if (!parent) {
- dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
- mux->data.parent);
+ if (!parent)
return -EPROBE_DEFER;
- }
mux->parent = parent;
mux->gpio_base = gpio_base;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 3d8f4fe2e47e..bea0d2de2993 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -204,9 +204,9 @@ static int pca954x_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
/* Get the mux out of reset if a reset GPIO is specified. */
- gpio = devm_gpiod_get(&client->dev, "reset");
- if (!IS_ERR(gpio))
- gpiod_direction_output(gpio, 0);
+ gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
/* Write the mux register at addr to verify
* that the mux is in fact present. This also
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index a04c49f2a011..39ea67f9b066 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -643,15 +643,6 @@ config BLK_DEV_TC86C001
help
This driver adds support for Toshiba TC86C001 GOKU-S chip.
-config BLK_DEV_CELLEB
- tristate "Toshiba's Cell Reference Set IDE support"
- depends on PPC_CELLEB
- select BLK_DEV_IDEDMA_PCI
- help
- This driver provides support for the on-board IDE controller on
- Toshiba Cell Reference Board.
- If unsure, say Y.
-
endif
# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index a04ee82f1c8f..2a8c417d4081 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
-obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o
obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index 6250aee30503..89a4ff100b7a 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -123,7 +123,7 @@ static int cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
pci_set_master(dev);
- if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "%s: No suitable DMA available.\n",
d->name);
return -ENODEV;
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index e5f3db831373..f5f2b62471da 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -96,17 +96,5 @@ static struct pnp_driver idepnp_driver = {
.remove = idepnp_remove,
};
-static int __init pnpide_init(void)
-{
- return pnp_register_driver(&idepnp_driver);
-}
-
-static void __exit pnpide_exit(void)
-{
- pnp_unregister_driver(&idepnp_driver);
-}
-
-module_init(pnpide_init);
-module_exit(pnpide_exit);
-
+module_pnp_driver(idepnp_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 2db803cd095c..96a345248224 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1497,9 +1497,9 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
drive->name);
return 0;
}
- st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE);
- st_le16(&table->req_count, tc);
- st_le32(&table->phy_addr, cur_addr);
+ table->command = cpu_to_le16(wr? OUTPUT_MORE: INPUT_MORE);
+ table->req_count = cpu_to_le16(tc);
+ table->phy_addr = cpu_to_le32(cur_addr);
table->cmd_dep = 0;
table->xfer_status = 0;
table->res_count = 0;
@@ -1513,10 +1513,10 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
/* convert the last command to an input/output last command */
if (count) {
- st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST);
+ table[-1].command = cpu_to_le16(wr? OUTPUT_LAST: INPUT_LAST);
/* add the stop command to the end of the list */
memset(table, 0, sizeof(struct dbdma_cmd));
- st_le16(&table->command, DBDMA_STOP);
+ table->command = cpu_to_le16(DBDMA_STOP);
mb();
writel(hwif->dmatable_dma, &dma->cmdptr);
return 1;
@@ -1689,10 +1689,9 @@ static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
* The +2 is +1 for the stop command and +1 to allow for
* aligning the start address to a multiple of 16 bytes.
*/
- pmif->dma_table_cpu = pci_alloc_consistent(
- dev,
+ pmif->dma_table_cpu = dma_alloc_coherent(&dev->dev,
(MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
- &hwif->dmatable_dma);
+ &hwif->dmatable_dma, GFP_KERNEL);
if (pmif->dma_table_cpu == NULL) {
printk(KERN_ERR "%s: unable to allocate DMA command list\n",
hwif->name);
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
deleted file mode 100644
index 2a2d188b5d5b..000000000000
--- a/drivers/ide/scc_pata.c
+++ /dev/null
@@ -1,887 +0,0 @@
-/*
- * Support for IDE interfaces on Celleb platform
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This code is based on drivers/ide/pci/siimage.c:
- * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
-
-#define SCC_PATA_NAME "scc IDE"
-
-#define TDVHSEL_MASTER 0x00000001
-#define TDVHSEL_SLAVE 0x00000004
-
-#define MODE_JCUSFEN 0x00000080
-
-#define CCKCTRL_ATARESET 0x00040000
-#define CCKCTRL_BUFCNT 0x00020000
-#define CCKCTRL_CRST 0x00010000
-#define CCKCTRL_OCLKEN 0x00000100
-#define CCKCTRL_ATACLKOEN 0x00000002
-#define CCKCTRL_LCLKEN 0x00000001
-
-#define QCHCD_IOS_SS 0x00000001
-
-#define QCHSD_STPDIAG 0x00020000
-
-#define INTMASK_MSK 0xD1000012
-#define INTSTS_SERROR 0x80000000
-#define INTSTS_PRERR 0x40000000
-#define INTSTS_RERR 0x10000000
-#define INTSTS_ICERR 0x01000000
-#define INTSTS_BMSINT 0x00000010
-#define INTSTS_BMHE 0x00000008
-#define INTSTS_IOIRQS 0x00000004
-#define INTSTS_INTRQ 0x00000002
-#define INTSTS_ACTEINT 0x00000001
-
-#define ECMODE_VALUE 0x01
-
-static struct scc_ports {
- unsigned long ctl, dma;
- struct ide_host *host; /* for removing port from system */
-} scc_ports[MAX_HWIFS];
-
-/* PIO transfer mode table */
-/* JCHST */
-static unsigned long JCHSTtbl[2][7] = {
- {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
- {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
-};
-
-/* JCHHT */
-static unsigned long JCHHTtbl[2][7] = {
- {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
- {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
-};
-
-/* JCHCT */
-static unsigned long JCHCTtbl[2][7] = {
- {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
- {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
-};
-
-
-/* DMA transfer mode table */
-/* JCHDCTM/JCHDCTS */
-static unsigned long JCHDCTxtbl[2][7] = {
- {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
- {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
-};
-
-/* JCSTWTM/JCSTWTS */
-static unsigned long JCSTWTxtbl[2][7] = {
- {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
- {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
-};
-
-/* JCTSS */
-static unsigned long JCTSStbl[2][7] = {
- {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
- {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
-};
-
-/* JCENVT */
-static unsigned long JCENVTtbl[2][7] = {
- {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
- {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
-};
-
-/* JCACTSELS/JCACTSELM */
-static unsigned long JCACTSELtbl[2][7] = {
- {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
-};
-
-
-static u8 scc_ide_inb(unsigned long port)
-{
- u32 data = in_be32((void*)port);
- return (u8)data;
-}
-
-static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
-{
- out_be32((void *)hwif->io_ports.command_addr, cmd);
- eieio();
- in_be32((void *)(hwif->dma_base + 0x01c));
- eieio();
-}
-
-static u8 scc_read_status(ide_hwif_t *hwif)
-{
- return (u8)in_be32((void *)hwif->io_ports.status_addr);
-}
-
-static u8 scc_read_altstatus(ide_hwif_t *hwif)
-{
- return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
-}
-
-static u8 scc_dma_sff_read_status(ide_hwif_t *hwif)
-{
- return (u8)in_be32((void *)(hwif->dma_base + 4));
-}
-
-static void scc_write_devctl(ide_hwif_t *hwif, u8 ctl)
-{
- out_be32((void *)hwif->io_ports.ctl_addr, ctl);
- eieio();
- in_be32((void *)(hwif->dma_base + 0x01c));
- eieio();
-}
-
-static void scc_ide_insw(unsigned long port, void *addr, u32 count)
-{
- u16 *ptr = (u16 *)addr;
- while (count--) {
- *ptr++ = le16_to_cpu(in_be32((void*)port));
- }
-}
-
-static void scc_ide_insl(unsigned long port, void *addr, u32 count)
-{
- u16 *ptr = (u16 *)addr;
- while (count--) {
- *ptr++ = le16_to_cpu(in_be32((void*)port));
- *ptr++ = le16_to_cpu(in_be32((void*)port));
- }
-}
-
-static void scc_ide_outb(u8 addr, unsigned long port)
-{
- out_be32((void*)port, addr);
-}
-
-static void
-scc_ide_outsw(unsigned long port, void *addr, u32 count)
-{
- u16 *ptr = (u16 *)addr;
- while (count--) {
- out_be32((void*)port, cpu_to_le16(*ptr++));
- }
-}
-
-static void
-scc_ide_outsl(unsigned long port, void *addr, u32 count)
-{
- u16 *ptr = (u16 *)addr;
- while (count--) {
- out_be32((void*)port, cpu_to_le16(*ptr++));
- out_be32((void*)port, cpu_to_le16(*ptr++));
- }
-}
-
-/**
- * scc_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Load the timing settings for this device mode into the
- * controller.
- */
-
-static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct scc_ports *ports = ide_get_hwifdata(hwif);
- unsigned long ctl_base = ports->ctl;
- unsigned long cckctrl_port = ctl_base + 0xff0;
- unsigned long piosht_port = ctl_base + 0x000;
- unsigned long pioct_port = ctl_base + 0x004;
- unsigned long reg;
- int offset;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- reg = in_be32((void __iomem *)cckctrl_port);
- if (reg & CCKCTRL_ATACLKOEN) {
- offset = 1; /* 133MHz */
- } else {
- offset = 0; /* 100MHz */
- }
- reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
- out_be32((void __iomem *)piosht_port, reg);
- reg = JCHCTtbl[offset][pio];
- out_be32((void __iomem *)pioct_port, reg);
-}
-
-/**
- * scc_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Load the timing settings for this device mode into the
- * controller.
- */
-
-static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct scc_ports *ports = ide_get_hwifdata(hwif);
- unsigned long ctl_base = ports->ctl;
- unsigned long cckctrl_port = ctl_base + 0xff0;
- unsigned long mdmact_port = ctl_base + 0x008;
- unsigned long mcrcst_port = ctl_base + 0x00c;
- unsigned long sdmact_port = ctl_base + 0x010;
- unsigned long scrcst_port = ctl_base + 0x014;
- unsigned long udenvt_port = ctl_base + 0x018;
- unsigned long tdvhsel_port = ctl_base + 0x020;
- int is_slave = drive->dn & 1;
- int offset, idx;
- unsigned long reg;
- unsigned long jcactsel;
- const u8 speed = drive->dma_mode;
-
- reg = in_be32((void __iomem *)cckctrl_port);
- if (reg & CCKCTRL_ATACLKOEN) {
- offset = 1; /* 133MHz */
- } else {
- offset = 0; /* 100MHz */
- }
-
- idx = speed - XFER_UDMA_0;
-
- jcactsel = JCACTSELtbl[offset][idx];
- if (is_slave) {
- out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
- out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
- jcactsel = jcactsel << 2;
- out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
- } else {
- out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
- out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
- out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
- }
- reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
- out_be32((void __iomem *)udenvt_port, reg);
-}
-
-static void scc_dma_host_set(ide_drive_t *drive, int on)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 unit = drive->dn & 1;
- u8 dma_stat = scc_dma_sff_read_status(hwif);
-
- if (on)
- dma_stat |= (1 << (5 + unit));
- else
- dma_stat &= ~(1 << (5 + unit));
-
- scc_ide_outb(dma_stat, hwif->dma_base + 4);
-}
-
-/**
- * scc_dma_setup - begin a DMA phase
- * @drive: target device
- * @cmd: command
- *
- * Build an IDE DMA PRD (IDE speak for scatter gather table)
- * and then set up the DMA transfer registers.
- *
- * Returns 0 on success. If a PIO fallback is required then 1
- * is returned.
- */
-
-static int scc_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- u32 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
- u8 dma_stat;
-
- /* fall back to pio! */
- if (ide_build_dmatable(drive, cmd) == 0)
- return 1;
-
- /* PRD table */
- out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
-
- /* specify r/w */
- out_be32((void __iomem *)hwif->dma_base, rw);
-
- /* read DMA status for INTR & ERROR flags */
- dma_stat = scc_dma_sff_read_status(hwif);
-
- /* clear INTR & ERROR flags */
- out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
-
- return 0;
-}
-
-static void scc_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_cmd = scc_ide_inb(hwif->dma_base);
-
- /* start DMA */
- scc_ide_outb(dma_cmd | 1, hwif->dma_base);
-}
-
-static int __scc_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat, dma_cmd;
-
- /* get DMA command mode */
- dma_cmd = scc_ide_inb(hwif->dma_base);
- /* stop DMA */
- scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
- /* get DMA status */
- dma_stat = scc_dma_sff_read_status(hwif);
- /* clear the INTR & ERROR bits */
- scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
- /* verify good DMA status */
- return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
-}
-
-/**
- * scc_dma_end - Stop DMA
- * @drive: IDE drive
- *
- * Check and clear INT Status register.
- * Then call __scc_dma_end().
- */
-
-static int scc_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *dma_base = (void __iomem *)hwif->dma_base;
- unsigned long intsts_port = hwif->dma_base + 0x014;
- u32 reg;
- int dma_stat, data_loss = 0;
- static int retry = 0;
-
- /* errata A308 workaround: Step5 (check data loss) */
- /* We don't check non ide_disk because it is limited to UDMA4 */
- if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
- & ATA_ERR) &&
- drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
- reg = in_be32((void __iomem *)intsts_port);
- if (!(reg & INTSTS_ACTEINT)) {
- printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
- drive->name);
- data_loss = 1;
- if (retry++) {
- struct request *rq = hwif->rq;
- ide_drive_t *drive;
- int i;
-
- /* ERROR_RESET and drive->crc_count are needed
- * to reduce DMA transfer mode in retry process.
- */
- if (rq)
- rq->errors |= ERROR_RESET;
-
- ide_port_for_each_dev(i, drive, hwif)
- drive->crc_count++;
- }
- }
- }
-
- while (1) {
- reg = in_be32((void __iomem *)intsts_port);
-
- if (reg & INTSTS_SERROR) {
- printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
- out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
-
- out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
- continue;
- }
-
- if (reg & INTSTS_PRERR) {
- u32 maea0, maec0;
- unsigned long ctl_base = hwif->config_data;
-
- maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
- maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
-
- printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
-
- out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
-
- out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
- continue;
- }
-
- if (reg & INTSTS_RERR) {
- printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
- out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
-
- out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
- continue;
- }
-
- if (reg & INTSTS_ICERR) {
- out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
-
- printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
- out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
- continue;
- }
-
- if (reg & INTSTS_BMSINT) {
- printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
- out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
-
- ide_do_reset(drive);
- continue;
- }
-
- if (reg & INTSTS_BMHE) {
- out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
- continue;
- }
-
- if (reg & INTSTS_ACTEINT) {
- out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
- continue;
- }
-
- if (reg & INTSTS_IOIRQS) {
- out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
- continue;
- }
- break;
- }
-
- dma_stat = __scc_dma_end(drive);
- if (data_loss)
- dma_stat |= 2; /* emulate DMA error (to retry command) */
- return dma_stat;
-}
-
-/* returns 1 if dma irq issued, 0 otherwise */
-static int scc_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
-
- /* SCC errata A252,A308 workaround: Step4 */
- if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
- & ATA_ERR) &&
- (int_stat & INTSTS_INTRQ))
- return 1;
-
- /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
- if (int_stat & INTSTS_IOIRQS)
- return 1;
-
- return 0;
-}
-
-static u8 scc_udma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 mask = hwif->ultra_mask;
-
- /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
- if ((drive->media != ide_disk) && (mask & 0xE0)) {
- printk(KERN_INFO "%s: limit %s to UDMA4\n",
- SCC_PATA_NAME, drive->name);
- mask = ATA_UDMA4;
- }
-
- return mask;
-}
-
-/**
- * setup_mmio_scc - map CTRL/BMID region
- * @dev: PCI device we are configuring
- * @name: device name
- *
- */
-
-static int setup_mmio_scc (struct pci_dev *dev, const char *name)
-{
- void __iomem *ctl_addr;
- void __iomem *dma_addr;
- int i, ret;
-
- for (i = 0; i < MAX_HWIFS; i++) {
- if (scc_ports[i].ctl == 0)
- break;
- }
- if (i >= MAX_HWIFS)
- return -ENOMEM;
-
- ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
- if (ret < 0) {
- printk(KERN_ERR "%s: can't reserve resources\n", name);
- return ret;
- }
-
- ctl_addr = pci_ioremap_bar(dev, 0);
- if (!ctl_addr)
- goto fail_0;
-
- dma_addr = pci_ioremap_bar(dev, 1);
- if (!dma_addr)
- goto fail_1;
-
- pci_set_master(dev);
- scc_ports[i].ctl = (unsigned long)ctl_addr;
- scc_ports[i].dma = (unsigned long)dma_addr;
- pci_set_drvdata(dev, (void *) &scc_ports[i]);
-
- return 1;
-
- fail_1:
- iounmap(ctl_addr);
- fail_0:
- return -ENOMEM;
-}
-
-static int scc_ide_setup_pci_device(struct pci_dev *dev,
- const struct ide_port_info *d)
-{
- struct scc_ports *ports = pci_get_drvdata(dev);
- struct ide_host *host;
- struct ide_hw hw, *hws[] = { &hw };
- int i, rc;
-
- memset(&hw, 0, sizeof(hw));
- for (i = 0; i <= 8; i++)
- hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
- hw.irq = dev->irq;
- hw.dev = &dev->dev;
-
- rc = ide_host_add(d, hws, 1, &host);
- if (rc)
- return rc;
-
- ports->host = host;
-
- return 0;
-}
-
-/**
- * init_setup_scc - set up an SCC PATA Controller
- * @dev: PCI device
- * @d: IDE port info
- *
- * Perform the initial set up for this device.
- */
-
-static int init_setup_scc(struct pci_dev *dev, const struct ide_port_info *d)
-{
- unsigned long ctl_base;
- unsigned long dma_base;
- unsigned long cckctrl_port;
- unsigned long intmask_port;
- unsigned long mode_port;
- unsigned long ecmode_port;
- u32 reg = 0;
- struct scc_ports *ports;
- int rc;
-
- rc = pci_enable_device(dev);
- if (rc)
- goto end;
-
- rc = setup_mmio_scc(dev, d->name);
- if (rc < 0)
- goto end;
-
- ports = pci_get_drvdata(dev);
- ctl_base = ports->ctl;
- dma_base = ports->dma;
- cckctrl_port = ctl_base + 0xff0;
- intmask_port = dma_base + 0x010;
- mode_port = ctl_base + 0x024;
- ecmode_port = ctl_base + 0xf00;
-
- /* controller initialization */
- reg = 0;
- out_be32((void*)cckctrl_port, reg);
- reg |= CCKCTRL_ATACLKOEN;
- out_be32((void*)cckctrl_port, reg);
- reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
- out_be32((void*)cckctrl_port, reg);
- reg |= CCKCTRL_CRST;
- out_be32((void*)cckctrl_port, reg);
-
- for (;;) {
- reg = in_be32((void*)cckctrl_port);
- if (reg & CCKCTRL_CRST)
- break;
- udelay(5000);
- }
-
- reg |= CCKCTRL_ATARESET;
- out_be32((void*)cckctrl_port, reg);
-
- out_be32((void*)ecmode_port, ECMODE_VALUE);
- out_be32((void*)mode_port, MODE_JCUSFEN);
- out_be32((void*)intmask_port, INTMASK_MSK);
-
- rc = scc_ide_setup_pci_device(dev, d);
-
- end:
- return rc;
-}
-
-static void scc_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
- struct ide_io_ports *io_ports = &drive->hwif->io_ports;
-
- if (valid & IDE_VALID_FEATURE)
- scc_ide_outb(tf->feature, io_ports->feature_addr);
- if (valid & IDE_VALID_NSECT)
- scc_ide_outb(tf->nsect, io_ports->nsect_addr);
- if (valid & IDE_VALID_LBAL)
- scc_ide_outb(tf->lbal, io_ports->lbal_addr);
- if (valid & IDE_VALID_LBAM)
- scc_ide_outb(tf->lbam, io_ports->lbam_addr);
- if (valid & IDE_VALID_LBAH)
- scc_ide_outb(tf->lbah, io_ports->lbah_addr);
- if (valid & IDE_VALID_DEVICE)
- scc_ide_outb(tf->device, io_ports->device_addr);
-}
-
-static void scc_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
- struct ide_io_ports *io_ports = &drive->hwif->io_ports;
-
- if (valid & IDE_VALID_ERROR)
- tf->error = scc_ide_inb(io_ports->feature_addr);
- if (valid & IDE_VALID_NSECT)
- tf->nsect = scc_ide_inb(io_ports->nsect_addr);
- if (valid & IDE_VALID_LBAL)
- tf->lbal = scc_ide_inb(io_ports->lbal_addr);
- if (valid & IDE_VALID_LBAM)
- tf->lbam = scc_ide_inb(io_ports->lbam_addr);
- if (valid & IDE_VALID_LBAH)
- tf->lbah = scc_ide_inb(io_ports->lbah_addr);
- if (valid & IDE_VALID_DEVICE)
- tf->device = scc_ide_inb(io_ports->device_addr);
-}
-
-static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- len++;
-
- if (drive->io_32bit) {
- scc_ide_insl(data_addr, buf, len / 4);
-
- if ((len & 3) >= 2)
- scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
- } else
- scc_ide_insw(data_addr, buf, len / 2);
-}
-
-static void scc_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- len++;
-
- if (drive->io_32bit) {
- scc_ide_outsl(data_addr, buf, len / 4);
-
- if ((len & 3) >= 2)
- scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
- } else
- scc_ide_outsw(data_addr, buf, len / 2);
-}
-
-/**
- * init_mmio_iops_scc - set up the iops for MMIO
- * @hwif: interface to set up
- *
- */
-
-static void init_mmio_iops_scc(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct scc_ports *ports = pci_get_drvdata(dev);
- unsigned long dma_base = ports->dma;
-
- ide_set_hwifdata(hwif, ports);
-
- hwif->dma_base = dma_base;
- hwif->config_data = ports->ctl;
-}
-
-/**
- * init_iops_scc - set up iops
- * @hwif: interface to set up
- *
- * Do the basic setup for the SCC hardware interface
- * and then do the MMIO setup.
- */
-
-static void init_iops_scc(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- hwif->hwif_data = NULL;
- if (pci_get_drvdata(dev) == NULL)
- return;
- init_mmio_iops_scc(hwif);
-}
-
-static int scc_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- return ide_allocate_dma_engine(hwif);
-}
-
-static u8 scc_cable_detect(ide_hwif_t *hwif)
-{
- return ATA_CBL_PATA80;
-}
-
-/**
- * init_hwif_scc - set up hwif
- * @hwif: interface to set up
- *
- * We do the basic set up of the interface structure. The SCC
- * requires several custom handlers so we override the default
- * ide DMA handlers appropriately.
- */
-
-static void init_hwif_scc(ide_hwif_t *hwif)
-{
- /* PTERADD */
- out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
-
- if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
- hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
- else
- hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
-}
-
-static const struct ide_tp_ops scc_tp_ops = {
- .exec_command = scc_exec_command,
- .read_status = scc_read_status,
- .read_altstatus = scc_read_altstatus,
- .write_devctl = scc_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = scc_tf_load,
- .tf_read = scc_tf_read,
-
- .input_data = scc_input_data,
- .output_data = scc_output_data,
-};
-
-static const struct ide_port_ops scc_port_ops = {
- .set_pio_mode = scc_set_pio_mode,
- .set_dma_mode = scc_set_dma_mode,
- .udma_filter = scc_udma_filter,
- .cable_detect = scc_cable_detect,
-};
-
-static const struct ide_dma_ops scc_dma_ops = {
- .dma_host_set = scc_dma_host_set,
- .dma_setup = scc_dma_setup,
- .dma_start = scc_dma_start,
- .dma_end = scc_dma_end,
- .dma_test_irq = scc_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = scc_dma_sff_read_status,
-};
-
-static const struct ide_port_info scc_chipset = {
- .name = "sccIDE",
- .init_iops = init_iops_scc,
- .init_dma = scc_init_dma,
- .init_hwif = init_hwif_scc,
- .tp_ops = &scc_tp_ops,
- .port_ops = &scc_port_ops,
- .dma_ops = &scc_dma_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .irq_flags = IRQF_SHARED,
- .pio_mask = ATA_PIO4,
- .chipset = ide_pci,
-};
-
-/**
- * scc_init_one - pci layer discovery entry
- * @dev: PCI device
- * @id: ident table entry
- *
- * Called by the PCI code when it finds an SCC PATA controller.
- * We then use the IDE PCI generic helper to do most of the work.
- */
-
-static int scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return init_setup_scc(dev, &scc_chipset);
-}
-
-/**
- * scc_remove - pci layer remove entry
- * @dev: PCI device
- *
- * Called by the PCI code when it removes an SCC PATA controller.
- */
-
-static void scc_remove(struct pci_dev *dev)
-{
- struct scc_ports *ports = pci_get_drvdata(dev);
- struct ide_host *host = ports->host;
-
- ide_host_remove(host);
-
- iounmap((void*)ports->dma);
- iounmap((void*)ports->ctl);
- pci_release_selected_regions(dev, (1 << 2) - 1);
- memset(ports, 0, sizeof(*ports));
-}
-
-static const struct pci_device_id scc_pci_tbl[] = {
- { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
-
-static struct pci_driver scc_pci_driver = {
- .name = "SCC IDE",
- .id_table = scc_pci_tbl,
- .probe = scc_init_one,
- .remove = scc_remove,
-};
-
-static int __init scc_ide_init(void)
-{
- return ide_pci_register_driver(&scc_pci_driver);
-}
-
-static void __exit scc_ide_exit(void)
-{
- pci_unregister_driver(&scc_pci_driver);
-}
-
-module_init(scc_ide_init);
-module_exit(scc_ide_exit);
-
-MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 34a5e5223d50..112d2fe1bcdb 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -209,7 +209,7 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
* a DMA mask field to the struct ide_port_info if we need it
* (or let lower level driver set the DMA mask)
*/
- ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
if (ret < 0) {
printk(KERN_ERR "%s %s: can't set DMA mask\n",
d->name, pci_name(dev));
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index 63761db61384..2d35e9f7516f 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -334,8 +334,8 @@ static int ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
if (ide_allocate_dma_engine(hwif))
goto dma_pci_alloc_failure;
- pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE,
- (dma_addr_t *)&hwif->extra_base);
+ pad = dma_alloc_coherent(&dev->dev, IOC4_IDE_CACHELINE_SIZE,
+ (dma_addr_t *)&hwif->extra_base, GFP_KERNEL);
if (pad) {
ide_set_hwifdata(hwif, pad);
return 0;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5c979d0667a2..2a36a95d95cf 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -218,18 +218,10 @@ static struct cpuidle_state byt_cstates[] = {
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
- .name = "C1E-BYT",
- .desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 15,
- .target_residency = 30,
- .enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
- {
.name = "C6N-BYT",
.desc = "MWAIT 0x58",
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 40,
+ .exit_latency = 300,
.target_residency = 275,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
@@ -237,7 +229,7 @@ static struct cpuidle_state byt_cstates[] = {
.name = "C6S-BYT",
.desc = "MWAIT 0x52",
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 140,
+ .exit_latency = 500,
.target_residency = 560,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
@@ -246,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
.desc = "MWAIT 0x60",
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
- .target_residency = 1500,
+ .target_residency = 4000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
@@ -261,6 +253,51 @@ static struct cpuidle_state byt_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state cht_cstates[] = {
+ {
+ .name = "C1-CHT",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6N-CHT",
+ .desc = "MWAIT 0x58",
+ .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 80,
+ .target_residency = 275,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6S-CHT",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 200,
+ .target_residency = 560,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C7-CHT",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 4000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C7S-CHT",
+ .desc = "MWAIT 0x64",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state ivb_cstates[] = {
{
.name = "C1-IVB",
@@ -747,6 +784,12 @@ static const struct idle_cpu idle_cpu_byt = {
.byt_auto_demotion_disable_flag = true,
};
+static const struct idle_cpu idle_cpu_cht = {
+ .state_table = cht_cstates,
+ .disable_promotion_to_c1e = true,
+ .byt_auto_demotion_disable_flag = true,
+};
+
static const struct idle_cpu idle_cpu_ivb = {
.state_table = ivb_cstates,
.disable_promotion_to_c1e = true,
@@ -775,7 +818,7 @@ static const struct idle_cpu idle_cpu_avn = {
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
-static const struct x86_cpu_id intel_idle_ids[] = {
+static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(0x1a, idle_cpu_nehalem),
ICPU(0x1e, idle_cpu_nehalem),
ICPU(0x1f, idle_cpu_nehalem),
@@ -789,6 +832,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x2d, idle_cpu_snb),
ICPU(0x36, idle_cpu_atom),
ICPU(0x37, idle_cpu_byt),
+ ICPU(0x4c, idle_cpu_cht),
ICPU(0x3a, idle_cpu_ivb),
ICPU(0x3e, idle_cpu_ivt),
ICPU(0x3c, idle_cpu_hsw),
diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
index 7f55a6d7cd03..c6d5a3a40b60 100644
--- a/drivers/iio/accel/mma9551_core.c
+++ b/drivers/iio/accel/mma9551_core.c
@@ -389,7 +389,12 @@ int mma9551_read_config_words(struct i2c_client *client, u8 app_id,
{
int ret, i;
int len_words = len / sizeof(u16);
- __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+ __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2];
+
+ if (len_words > ARRAY_SIZE(be_buf)) {
+ dev_err(&client->dev, "Invalid buffer size %d\n", len);
+ return -EINVAL;
+ }
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
reg, NULL, 0, (u8 *) be_buf, len);
@@ -424,7 +429,12 @@ int mma9551_read_status_words(struct i2c_client *client, u8 app_id,
{
int ret, i;
int len_words = len / sizeof(u16);
- __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+ __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2];
+
+ if (len_words > ARRAY_SIZE(be_buf)) {
+ dev_err(&client->dev, "Invalid buffer size %d\n", len);
+ return -EINVAL;
+ }
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
reg, NULL, 0, (u8 *) be_buf, len);
@@ -459,7 +469,12 @@ int mma9551_write_config_words(struct i2c_client *client, u8 app_id,
{
int i;
int len_words = len / sizeof(u16);
- __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+ __be16 be_buf[(MMA9551_MAX_MAILBOX_DATA_REGS - 1) / 2];
+
+ if (len_words > ARRAY_SIZE(be_buf)) {
+ dev_err(&client->dev, "Invalid buffer size %d\n", len);
+ return -EINVAL;
+ }
for (i = 0; i < len_words; i++)
be_buf[i] = cpu_to_be16(buf[i]);
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 2df1af7d43fc..365a109aaaef 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -54,6 +54,7 @@
#define MMA9553_MASK_CONF_STEPCOALESCE GENMASK(7, 0)
#define MMA9553_REG_CONF_ACTTHD 0x0E
+#define MMA9553_MAX_ACTTHD GENMASK(15, 0)
/* Pedometer status registers (R-only) */
#define MMA9553_REG_STATUS 0x00
@@ -316,22 +317,19 @@ static int mma9553_set_config(struct mma9553_data *data, u16 reg,
static int mma9553_read_activity_stepcnt(struct mma9553_data *data,
u8 *activity, u16 *stepcnt)
{
- u32 status_stepcnt;
- u16 status;
+ u16 buf[2];
int ret;
ret = mma9551_read_status_words(data->client, MMA9551_APPID_PEDOMETER,
- MMA9553_REG_STATUS, sizeof(u32),
- (u16 *) &status_stepcnt);
+ MMA9553_REG_STATUS, sizeof(u32), buf);
if (ret < 0) {
dev_err(&data->client->dev,
"error reading status and stepcnt\n");
return ret;
}
- status = status_stepcnt & MMA9553_MASK_CONF_WORD;
- *activity = mma9553_get_bits(status, MMA9553_MASK_STATUS_ACTIVITY);
- *stepcnt = status_stepcnt >> 16;
+ *activity = mma9553_get_bits(buf[0], MMA9553_MASK_STATUS_ACTIVITY);
+ *stepcnt = buf[1];
return 0;
}
@@ -872,6 +870,9 @@ static int mma9553_write_event_value(struct iio_dev *indio_dev,
case IIO_EV_INFO_PERIOD:
switch (chan->type) {
case IIO_ACTIVITY:
+ if (val < 0 || val > MMA9553_ACTIVITY_THD_TO_SEC(
+ MMA9553_MAX_ACTTHD))
+ return -EINVAL;
mutex_lock(&data->mutex);
ret = mma9553_set_config(data, MMA9553_REG_CONF_ACTTHD,
&data->conf.actthd,
@@ -971,7 +972,8 @@ static const struct iio_chan_spec_ext_info mma9553_ext_info[] = {
.modified = 1, \
.channel2 = _chan2, \
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBHEIGHT), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBHEIGHT) | \
+ BIT(IIO_CHAN_INFO_ENABLE), \
.event_spec = mma9553_activity_events, \
.num_event_specs = ARRAY_SIZE(mma9553_activity_events), \
.ext_info = mma9553_ext_info, \
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 58d1d13d552a..211b13271c61 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -546,6 +546,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &accel_info;
+ mutex_init(&adata->tb.buf_lock);
st_sensors_power_enable(indio_dev);
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 08bcfb061ca5..56008a86b78f 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -53,39 +53,42 @@ static const struct iio_chan_spec const axp288_adc_channels[] = {
.channel = 0,
.address = AXP288_TS_ADC_H,
.datasheet_name = "TS_PIN",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
}, {
.indexed = 1,
.type = IIO_TEMP,
.channel = 1,
.address = AXP288_PMIC_ADC_H,
.datasheet_name = "PMIC_TEMP",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
}, {
.indexed = 1,
.type = IIO_TEMP,
.channel = 2,
.address = AXP288_GP_ADC_H,
.datasheet_name = "GPADC",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
}, {
.indexed = 1,
.type = IIO_CURRENT,
.channel = 3,
.address = AXP20X_BATT_CHRG_I_H,
.datasheet_name = "BATT_CHG_I",
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
}, {
.indexed = 1,
.type = IIO_CURRENT,
.channel = 4,
.address = AXP20X_BATT_DISCHRG_I_H,
.datasheet_name = "BATT_DISCHRG_I",
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
}, {
.indexed = 1,
.type = IIO_VOLTAGE,
.channel = 5,
.address = AXP20X_BATT_V_H,
.datasheet_name = "BATT_V",
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
},
};
@@ -151,9 +154,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
chan->address))
dev_err(&indio_dev->dev, "TS pin restore\n");
break;
- case IIO_CHAN_INFO_PROCESSED:
- ret = axp288_adc_read_channel(val, chan->address, info->regmap);
- break;
default:
ret = -EINVAL;
}
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index 51e2a83c9404..115f6e99a7fa 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -35,8 +35,9 @@
#define CC10001_ADC_EOC_SET BIT(0)
#define CC10001_ADC_CHSEL_SAMPLED 0x0c
-#define CC10001_ADC_POWER_UP 0x10
-#define CC10001_ADC_POWER_UP_SET BIT(0)
+#define CC10001_ADC_POWER_DOWN 0x10
+#define CC10001_ADC_POWER_DOWN_SET BIT(0)
+
#define CC10001_ADC_DEBUG 0x14
#define CC10001_ADC_DATA_COUNT 0x20
@@ -62,7 +63,6 @@ struct cc10001_adc_device {
u16 *buf;
struct mutex lock;
- unsigned long channel_map;
unsigned int start_delay_ns;
unsigned int eoc_delay_ns;
};
@@ -79,6 +79,18 @@ static inline u32 cc10001_adc_read_reg(struct cc10001_adc_device *adc_dev,
return readl(adc_dev->reg_base + reg);
}
+static void cc10001_adc_power_up(struct cc10001_adc_device *adc_dev)
+{
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN, 0);
+ ndelay(adc_dev->start_delay_ns);
+}
+
+static void cc10001_adc_power_down(struct cc10001_adc_device *adc_dev)
+{
+ cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN,
+ CC10001_ADC_POWER_DOWN_SET);
+}
+
static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
unsigned int channel)
{
@@ -88,6 +100,7 @@ static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
val = (channel & CC10001_ADC_CH_MASK) | CC10001_ADC_MODE_SINGLE_CONV;
cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
+ udelay(1);
val = cc10001_adc_read_reg(adc_dev, CC10001_ADC_CONFIG);
val = val | CC10001_ADC_START_CONV;
cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
@@ -129,6 +142,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
struct iio_dev *indio_dev;
unsigned int delay_ns;
unsigned int channel;
+ unsigned int scan_idx;
bool sample_invalid;
u16 *data;
int i;
@@ -139,20 +153,17 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
mutex_lock(&adc_dev->lock);
- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
- CC10001_ADC_POWER_UP_SET);
-
- /* Wait for 8 (6+2) clock cycles before activating START */
- ndelay(adc_dev->start_delay_ns);
+ cc10001_adc_power_up(adc_dev);
/* Calculate delay step for eoc and sampled data */
delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
i = 0;
sample_invalid = false;
- for_each_set_bit(channel, indio_dev->active_scan_mask,
+ for_each_set_bit(scan_idx, indio_dev->active_scan_mask,
indio_dev->masklength) {
+ channel = indio_dev->channels[scan_idx].channel;
cc10001_adc_start(adc_dev, channel);
data[i] = cc10001_adc_poll_done(indio_dev, channel, delay_ns);
@@ -166,7 +177,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
}
done:
- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
+ cc10001_adc_power_down(adc_dev);
mutex_unlock(&adc_dev->lock);
@@ -185,11 +196,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
unsigned int delay_ns;
u16 val;
- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
- CC10001_ADC_POWER_UP_SET);
-
- /* Wait for 8 (6+2) clock cycles before activating START */
- ndelay(adc_dev->start_delay_ns);
+ cc10001_adc_power_up(adc_dev);
/* Calculate delay step for eoc and sampled data */
delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
@@ -198,7 +205,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns);
- cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
+ cc10001_adc_power_down(adc_dev);
return val;
}
@@ -224,7 +231,7 @@ static int cc10001_adc_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
ret = regulator_get_voltage(adc_dev->reg);
- if (ret)
+ if (ret < 0)
return ret;
*val = ret / 1000;
@@ -255,22 +262,22 @@ static const struct iio_info cc10001_adc_info = {
.update_scan_mode = &cc10001_update_scan_mode,
};
-static int cc10001_adc_channel_init(struct iio_dev *indio_dev)
+static int cc10001_adc_channel_init(struct iio_dev *indio_dev,
+ unsigned long channel_map)
{
- struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
struct iio_chan_spec *chan_array, *timestamp;
unsigned int bit, idx = 0;
- indio_dev->num_channels = bitmap_weight(&adc_dev->channel_map,
- CC10001_ADC_NUM_CHANNELS);
+ indio_dev->num_channels = bitmap_weight(&channel_map,
+ CC10001_ADC_NUM_CHANNELS) + 1;
- chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels + 1,
+ chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels,
sizeof(struct iio_chan_spec),
GFP_KERNEL);
if (!chan_array)
return -ENOMEM;
- for_each_set_bit(bit, &adc_dev->channel_map, CC10001_ADC_NUM_CHANNELS) {
+ for_each_set_bit(bit, &channel_map, CC10001_ADC_NUM_CHANNELS) {
struct iio_chan_spec *chan = &chan_array[idx];
chan->type = IIO_VOLTAGE;
@@ -305,6 +312,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
unsigned long adc_clk_rate;
struct resource *res;
struct iio_dev *indio_dev;
+ unsigned long channel_map;
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
@@ -313,9 +321,9 @@ static int cc10001_adc_probe(struct platform_device *pdev)
adc_dev = iio_priv(indio_dev);
- adc_dev->channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
+ channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
if (!of_property_read_u32(node, "adc-reserved-channels", &ret))
- adc_dev->channel_map &= ~ret;
+ channel_map &= ~ret;
adc_dev->reg = devm_regulator_get(&pdev->dev, "vref");
if (IS_ERR(adc_dev->reg))
@@ -361,7 +369,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES;
/* Setup the ADC channels available on the device */
- ret = cc10001_adc_channel_init(indio_dev);
+ ret = cc10001_adc_channel_init(indio_dev, channel_map);
if (ret < 0)
goto err_disable_clk;
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 87ee1c7d0b54..44bf815adb6c 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -436,7 +436,7 @@ static int max1027_probe(struct spi_device *spi)
indio_dev->num_channels * 2,
GFP_KERNEL);
if (st->buffer == NULL) {
- dev_err(&indio_dev->dev, "Can't allocate bufffer\n");
+ dev_err(&indio_dev->dev, "Can't allocate buffer\n");
return -ENOMEM;
}
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index efbfd12a4bfd..8d9c9b9215dd 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -60,12 +60,12 @@ struct mcp320x {
struct spi_message msg;
struct spi_transfer transfer[2];
- u8 tx_buf;
- u8 rx_buf[2];
-
struct regulator *reg;
struct mutex lock;
const struct mcp320x_chip_info *chip_info;
+
+ u8 tx_buf ____cacheline_aligned;
+ u8 rx_buf[2];
};
static int mcp320x_channel_to_tx_data(int device_index,
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 3211729bcb0b..0c4618b4d515 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -18,6 +18,7 @@
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -471,11 +472,11 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
const struct vadc_channel_prop *prop, u16 adc_code)
{
const struct vadc_prescale_ratio *prescale;
- s32 voltage;
+ s64 voltage;
voltage = adc_code - vadc->graph[prop->calibration].gnd;
voltage *= vadc->graph[prop->calibration].dx;
- voltage = voltage / vadc->graph[prop->calibration].dy;
+ voltage = div64_s64(voltage, vadc->graph[prop->calibration].dy);
if (prop->calibration == VADC_CALIB_ABSOLUTE)
voltage += vadc->graph[prop->calibration].dx;
@@ -487,7 +488,7 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
voltage = voltage * prescale->den;
- return voltage / prescale->num;
+ return div64_s64(voltage, prescale->num);
}
static int vadc_decimation_from_dt(u32 value)
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index a221f7329b79..ce93bd8e3f68 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -856,6 +856,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
switch (chan->address) {
case XADC_REG_VCCINT:
case XADC_REG_VCCAUX:
+ case XADC_REG_VREFP:
case XADC_REG_VCCBRAM:
case XADC_REG_VCCPINT:
case XADC_REG_VCCPAUX:
@@ -996,7 +997,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
.num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
.scan_index = (_scan_index), \
.scan_type = { \
- .sign = 'u', \
+ .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
.realbits = 12, \
.storagebits = 16, \
.shift = 4, \
@@ -1008,7 +1009,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
static const struct iio_chan_spec xadc_channels[] = {
XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
- XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true),
+ XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index c7487e8d7f80..54adc5087210 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -145,9 +145,9 @@ static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
#define XADC_REG_MAX_VCCPINT 0x28
#define XADC_REG_MAX_VCCPAUX 0x29
#define XADC_REG_MAX_VCCO_DDR 0x2a
-#define XADC_REG_MIN_VCCPINT 0x2b
-#define XADC_REG_MIN_VCCPAUX 0x2c
-#define XADC_REG_MIN_VCCO_DDR 0x2d
+#define XADC_REG_MIN_VCCPINT 0x2c
+#define XADC_REG_MIN_VCCPAUX 0x2d
+#define XADC_REG_MIN_VCCO_DDR 0x2e
#define XADC_REG_CONF0 0x40
#define XADC_REG_CONF1 0x41
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index edd13d2b4121..8dd0477e201c 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -304,8 +304,6 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *of_pdata;
int err = 0;
- mutex_init(&sdata->tb.buf_lock);
-
/* If OF/DT pdata exists, it will take precedence of anything else */
of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
if (of_pdata)
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 21395f26d227..ffe96642b6d0 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -400,6 +400,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &gyro_info;
+ mutex_init(&gdata->tb.buf_lock);
st_sensors_power_enable(indio_dev);
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index 847ca561afe0..55c267bbfd2f 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -38,7 +38,8 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
kfifo_free(&buf->kf);
ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length);
- buf->update_needed = false;
+ if (ret >= 0)
+ buf->update_needed = false;
} else {
kfifo_reset_out(&buf->kf);
}
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 91ecc46ffeaa..ef60bae738e3 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -43,8 +43,6 @@ struct prox_state {
static const struct iio_chan_spec prox_channels[] = {
{
.type = IIO_PROXIMITY,
- .modified = 1,
- .channel2 = IIO_NO_MOD,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
@@ -253,7 +251,6 @@ static int hid_prox_probe(struct platform_device *pdev)
struct iio_dev *indio_dev;
struct prox_state *prox_state;
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
- struct iio_chan_spec *channels;
indio_dev = devm_iio_device_alloc(&pdev->dev,
sizeof(struct prox_state));
@@ -272,20 +269,21 @@ static int hid_prox_probe(struct platform_device *pdev)
return ret;
}
- channels = kmemdup(prox_channels, sizeof(prox_channels), GFP_KERNEL);
- if (!channels) {
+ indio_dev->channels = kmemdup(prox_channels, sizeof(prox_channels),
+ GFP_KERNEL);
+ if (!indio_dev->channels) {
dev_err(&pdev->dev, "failed to duplicate channels\n");
return -ENOMEM;
}
- ret = prox_parse_report(pdev, hsdev, channels,
+ ret = prox_parse_report(pdev, hsdev,
+ (struct iio_chan_spec *)indio_dev->channels,
HID_USAGE_SENSOR_PROX, prox_state);
if (ret) {
dev_err(&pdev->dev, "failed to setup attributes\n");
goto error_free_dev_mem;
}
- indio_dev->channels = channels;
indio_dev->num_channels =
ARRAY_SIZE(prox_channels);
indio_dev->dev.parent = &pdev->dev;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 8ade473f99fe..2e56f812a644 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -369,6 +369,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &magn_info;
+ mutex_init(&mdata->tb.buf_lock);
st_sensors_power_enable(indio_dev);
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
index 7c623e2bd633..a2602d8dd6d5 100644
--- a/drivers/iio/pressure/bmp280.c
+++ b/drivers/iio/pressure/bmp280.c
@@ -172,6 +172,7 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
var2 = (((((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1]))) *
((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1])))) >> 12) *
((s32)(s16)le16_to_cpu(buf[T3]))) >> 14;
+ data->t_fine = var1 + var2;
return (data->t_fine * 5 + 128) >> 8;
}
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 7bb8d4c1f7df..3cf0bd67d24c 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -47,8 +47,6 @@ struct press_state {
static const struct iio_chan_spec press_channels[] = {
{
.type = IIO_PRESSURE,
- .modified = 1,
- .channel2 = IIO_NO_MOD,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 97baf40d424b..e881fa6291e9 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -417,6 +417,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &press_info;
+ mutex_init(&press_data->tb.buf_lock);
st_sensors_power_enable(indio_dev);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f80da50d84a5..38339d220d7f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -472,13 +472,8 @@ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
} sgid_addr, dgid_addr;
- ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid);
- if (ret)
- return ret;
-
- ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
- if (ret)
- return ret;
+ rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+ rdma_gid2ip(&dgid_addr._sockaddr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
@@ -512,10 +507,8 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
struct sockaddr_in6 _sockaddr_in6;
} gid_addr;
- ret = rdma_gid2ip(&gid_addr._sockaddr, sgid);
+ rdma_gid2ip(&gid_addr._sockaddr, sgid);
- if (ret)
- return ret;
memset(&dev_addr, 0, sizeof(dev_addr));
ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
if (ret)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e28a494e2a3a..0c1419105ff0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -437,39 +437,38 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
return cm_id_priv;
}
-static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
+static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask)
{
int i;
- for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
- ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
- ((unsigned long *) mask)[i];
+ for (i = 0; i < IB_CM_COMPARE_SIZE; i++)
+ dst[i] = src[i] & mask[i];
}
static int cm_compare_data(struct ib_cm_compare_data *src_data,
struct ib_cm_compare_data *dst_data)
{
- u8 src[IB_CM_COMPARE_SIZE];
- u8 dst[IB_CM_COMPARE_SIZE];
+ u32 src[IB_CM_COMPARE_SIZE];
+ u32 dst[IB_CM_COMPARE_SIZE];
if (!src_data || !dst_data)
return 0;
cm_mask_copy(src, src_data->data, dst_data->mask);
cm_mask_copy(dst, dst_data->data, src_data->mask);
- return memcmp(src, dst, IB_CM_COMPARE_SIZE);
+ return memcmp(src, dst, sizeof(src));
}
-static int cm_compare_private_data(u8 *private_data,
+static int cm_compare_private_data(u32 *private_data,
struct ib_cm_compare_data *dst_data)
{
- u8 src[IB_CM_COMPARE_SIZE];
+ u32 src[IB_CM_COMPARE_SIZE];
if (!dst_data)
return 0;
cm_mask_copy(src, private_data, dst_data->mask);
- return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
+ return memcmp(src, dst_data->data, sizeof(src));
}
/*
@@ -538,7 +537,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
static struct cm_id_private * cm_find_listen(struct ib_device *device,
__be64 service_id,
- u8 *private_data)
+ u32 *private_data)
{
struct rb_node *node = cm.listen_service_table.rb_node;
struct cm_id_private *cm_id_priv;
@@ -953,7 +952,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
cm_mask_copy(cm_id_priv->compare_data->data,
compare_data->data, compare_data->mask);
memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
- IB_CM_COMPARE_SIZE);
+ sizeof(compare_data->mask));
}
cm_id->state = IB_CM_LISTEN;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index be068f47e47e..8b76f0ef965e 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -103,7 +103,7 @@ struct cm_req_msg {
/* local ACK timeout:5, rsvd:3 */
u8 alt_offset139;
- u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
+ u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
} __attribute__ ((packed));
@@ -801,7 +801,7 @@ struct cm_sidr_req_msg {
__be16 rsvd;
__be64 service_id;
- u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
+ u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
} __attribute__ ((packed));
struct cm_sidr_rep_msg {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d570030d899c..06441a43c3aa 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
memcpy(&ib->sib_addr, &path->dgid, 16);
}
+static __be16 ss_get_port(const struct sockaddr_storage *ss)
+{
+ if (ss->ss_family == AF_INET)
+ return ((struct sockaddr_in *)ss)->sin_port;
+ else if (ss->ss_family == AF_INET6)
+ return ((struct sockaddr_in6 *)ss)->sin6_port;
+ BUG();
+}
+
static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
struct cma_hdr *hdr)
{
- struct sockaddr_in *listen4, *ip4;
+ struct sockaddr_in *ip4;
- listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
- ip4->sin_family = listen4->sin_family;
+ ip4->sin_family = AF_INET;
ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
- ip4->sin_port = listen4->sin_port;
+ ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
- ip4->sin_family = listen4->sin_family;
+ ip4->sin_family = AF_INET;
ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
ip4->sin_port = hdr->port;
}
@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
struct cma_hdr *hdr)
{
- struct sockaddr_in6 *listen6, *ip6;
+ struct sockaddr_in6 *ip6;
- listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
- ip6->sin6_family = listen6->sin6_family;
+ ip6->sin6_family = AF_INET6;
ip6->sin6_addr = hdr->dst_addr.ip6;
- ip6->sin6_port = listen6->sin6_port;
+ ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
- ip6->sin6_family = listen6->sin6_family;
+ ip6->sin6_family = AF_INET6;
ip6->sin6_addr = hdr->src_addr.ip6;
ip6->sin6_port = hdr->port;
}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index b85ddbc979e0..e6ffa2e66c1a 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -33,7 +33,7 @@
#include "iwpm_util.h"
-static const char iwpm_ulib_name[] = "iWarpPortMapperUser";
+static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser";
static int iwpm_ulib_version = 3;
static int iwpm_user_pid = IWPM_PID_UNDEFINED;
static atomic_t echo_nlmsg_seq;
@@ -468,7 +468,8 @@ add_mapping_response_exit:
}
EXPORT_SYMBOL(iwpm_add_mapping_cb);
-/* netlink attribute policy for the response to add and query mapping request */
+/* netlink attribute policy for the response to add and query mapping request
+ * and response with remote address info */
static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
[IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 },
[IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) },
@@ -559,6 +560,76 @@ query_mapping_response_exit:
}
EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
+/*
+ * iwpm_remote_info_cb - Process a port mapper message, containing
+ * the remote connecting peer address info
+ */
+int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX];
+ struct sockaddr_storage *local_sockaddr, *remote_sockaddr;
+ struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr;
+ struct iwpm_remote_info *rem_info;
+ const char *msg_type;
+ u8 nl_client;
+ int ret = -EINVAL;
+
+ msg_type = "Remote Mapping info";
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,
+ resp_query_policy, nltb, msg_type))
+ return ret;
+
+ nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
+ if (!iwpm_valid_client(nl_client)) {
+ pr_info("%s: Invalid port mapper client = %d\n",
+ __func__, nl_client);
+ return ret;
+ }
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+
+ local_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
+ remote_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
+ mapped_loc_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
+ mapped_rem_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]);
+
+ if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family ||
+ mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) {
+ pr_info("%s: Sockaddr family doesn't match the requested one\n",
+ __func__);
+ return ret;
+ }
+ rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
+ if (!rem_info) {
+ pr_err("%s: Unable to allocate a remote info\n", __func__);
+ ret = -ENOMEM;
+ return ret;
+ }
+ memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr,
+ sizeof(struct sockaddr_storage));
+ memcpy(&rem_info->remote_sockaddr, remote_sockaddr,
+ sizeof(struct sockaddr_storage));
+ memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr,
+ sizeof(struct sockaddr_storage));
+ rem_info->nl_client = nl_client;
+
+ iwpm_add_remote_info(rem_info);
+
+ iwpm_print_sockaddr(local_sockaddr,
+ "remote_info: Local sockaddr:");
+ iwpm_print_sockaddr(mapped_loc_sockaddr,
+ "remote_info: Mapped local sockaddr:");
+ iwpm_print_sockaddr(remote_sockaddr,
+ "remote_info: Remote sockaddr:");
+ iwpm_print_sockaddr(mapped_rem_sockaddr,
+ "remote_info: Mapped remote sockaddr:");
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_remote_info_cb);
+
/* netlink attribute policy for the received request for mapping info */
static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
[IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING,
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 69e9f84c1605..a626795bf9c7 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -33,8 +33,10 @@
#include "iwpm_util.h"
-#define IWPM_HASH_BUCKET_SIZE 512
-#define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1)
+#define IWPM_MAPINFO_HASH_SIZE 512
+#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
+#define IWPM_REMINFO_HASH_SIZE 64
+#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
static LIST_HEAD(iwpm_nlmsg_req_list);
static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -42,31 +44,49 @@ static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
static struct hlist_head *iwpm_hash_bucket;
static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
+static struct hlist_head *iwpm_reminfo_bucket;
+static DEFINE_SPINLOCK(iwpm_reminfo_lock);
+
static DEFINE_MUTEX(iwpm_admin_lock);
static struct iwpm_admin_data iwpm_admin;
int iwpm_init(u8 nl_client)
{
+ int ret = 0;
if (iwpm_valid_client(nl_client))
return -EINVAL;
mutex_lock(&iwpm_admin_lock);
if (atomic_read(&iwpm_admin.refcount) == 0) {
- iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE *
+ iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE *
sizeof(struct hlist_head), GFP_KERNEL);
if (!iwpm_hash_bucket) {
- mutex_unlock(&iwpm_admin_lock);
+ ret = -ENOMEM;
pr_err("%s Unable to create mapinfo hash table\n", __func__);
- return -ENOMEM;
+ goto init_exit;
+ }
+ iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
+ sizeof(struct hlist_head), GFP_KERNEL);
+ if (!iwpm_reminfo_bucket) {
+ kfree(iwpm_hash_bucket);
+ ret = -ENOMEM;
+ pr_err("%s Unable to create reminfo hash table\n", __func__);
+ goto init_exit;
}
}
atomic_inc(&iwpm_admin.refcount);
+init_exit:
mutex_unlock(&iwpm_admin_lock);
- iwpm_set_valid(nl_client, 1);
- return 0;
+ if (!ret) {
+ iwpm_set_valid(nl_client, 1);
+ pr_debug("%s: Mapinfo and reminfo tables are created\n",
+ __func__);
+ }
+ return ret;
}
EXPORT_SYMBOL(iwpm_init);
static void free_hash_bucket(void);
+static void free_reminfo_bucket(void);
int iwpm_exit(u8 nl_client)
{
@@ -81,7 +101,8 @@ int iwpm_exit(u8 nl_client)
}
if (atomic_dec_and_test(&iwpm_admin.refcount)) {
free_hash_bucket();
- pr_debug("%s: Mapinfo hash table is destroyed\n", __func__);
+ free_reminfo_bucket();
+ pr_debug("%s: Resources are destroyed\n", __func__);
}
mutex_unlock(&iwpm_admin_lock);
iwpm_set_valid(nl_client, 0);
@@ -89,7 +110,7 @@ int iwpm_exit(u8 nl_client)
}
EXPORT_SYMBOL(iwpm_exit);
-static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *,
+static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
struct sockaddr_storage *);
int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
@@ -99,9 +120,10 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
struct hlist_head *hash_bucket_head;
struct iwpm_mapping_info *map_info;
unsigned long flags;
+ int ret = -EINVAL;
if (!iwpm_valid_client(nl_client))
- return -EINVAL;
+ return ret;
map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
if (!map_info) {
pr_err("%s: Unable to allocate a mapping info\n", __func__);
@@ -115,13 +137,16 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
if (iwpm_hash_bucket) {
- hash_bucket_head = get_hash_bucket_head(
+ hash_bucket_head = get_mapinfo_hash_bucket(
&map_info->local_sockaddr,
&map_info->mapped_sockaddr);
- hlist_add_head(&map_info->hlist_node, hash_bucket_head);
+ if (hash_bucket_head) {
+ hlist_add_head(&map_info->hlist_node, hash_bucket_head);
+ ret = 0;
+ }
}
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(iwpm_create_mapinfo);
@@ -136,9 +161,12 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
if (iwpm_hash_bucket) {
- hash_bucket_head = get_hash_bucket_head(
+ hash_bucket_head = get_mapinfo_hash_bucket(
local_sockaddr,
mapped_local_addr);
+ if (!hash_bucket_head)
+ goto remove_mapinfo_exit;
+
hlist_for_each_entry_safe(map_info, tmp_hlist_node,
hash_bucket_head, hlist_node) {
@@ -152,6 +180,7 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
}
}
}
+remove_mapinfo_exit:
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
return ret;
}
@@ -166,7 +195,7 @@ static void free_hash_bucket(void)
/* remove all the mapinfo data from the list */
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
- for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+ for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry_safe(map_info, tmp_hlist_node,
&iwpm_hash_bucket[i], hlist_node) {
@@ -180,6 +209,96 @@ static void free_hash_bucket(void)
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
}
+static void free_reminfo_bucket(void)
+{
+ struct hlist_node *tmp_hlist_node;
+ struct iwpm_remote_info *rem_info;
+ unsigned long flags;
+ int i;
+
+ /* remove all the remote info from the list */
+ spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+ for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) {
+ hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
+ &iwpm_reminfo_bucket[i], hlist_node) {
+
+ hlist_del_init(&rem_info->hlist_node);
+ kfree(rem_info);
+ }
+ }
+ /* free the hash list */
+ kfree(iwpm_reminfo_bucket);
+ iwpm_reminfo_bucket = NULL;
+ spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+}
+
+static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *,
+ struct sockaddr_storage *);
+
+void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
+{
+ struct hlist_head *hash_bucket_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+ if (iwpm_reminfo_bucket) {
+ hash_bucket_head = get_reminfo_hash_bucket(
+ &rem_info->mapped_loc_sockaddr,
+ &rem_info->mapped_rem_sockaddr);
+ if (hash_bucket_head)
+ hlist_add_head(&rem_info->hlist_node, hash_bucket_head);
+ }
+ spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+}
+
+int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
+ struct sockaddr_storage *mapped_rem_addr,
+ struct sockaddr_storage *remote_addr,
+ u8 nl_client)
+{
+ struct hlist_node *tmp_hlist_node;
+ struct hlist_head *hash_bucket_head;
+ struct iwpm_remote_info *rem_info = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (!iwpm_valid_client(nl_client)) {
+ pr_info("%s: Invalid client = %d\n", __func__, nl_client);
+ return ret;
+ }
+ spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+ if (iwpm_reminfo_bucket) {
+ hash_bucket_head = get_reminfo_hash_bucket(
+ mapped_loc_addr,
+ mapped_rem_addr);
+ if (!hash_bucket_head)
+ goto get_remote_info_exit;
+ hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
+ hash_bucket_head, hlist_node) {
+
+ if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr,
+ mapped_loc_addr) &&
+ !iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr,
+ mapped_rem_addr)) {
+
+ memcpy(remote_addr, &rem_info->remote_sockaddr,
+ sizeof(struct sockaddr_storage));
+ iwpm_print_sockaddr(remote_addr,
+ "get_remote_info: Remote sockaddr:");
+
+ hlist_del_init(&rem_info->hlist_node);
+ kfree(rem_info);
+ ret = 0;
+ break;
+ }
+ }
+ }
+get_remote_info_exit:
+ spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_get_remote_info);
+
struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
u8 nl_client, gfp_t gfp)
{
@@ -409,31 +528,54 @@ static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr)
return hash;
}
-static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage
- *local_sockaddr,
- struct sockaddr_storage
- *mapped_sockaddr)
+static int get_hash_bucket(struct sockaddr_storage *a_sockaddr,
+ struct sockaddr_storage *b_sockaddr, u32 *hash)
{
- u32 local_hash, mapped_hash, hash;
+ u32 a_hash, b_hash;
- if (local_sockaddr->ss_family == AF_INET) {
- local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr);
- mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr);
+ if (a_sockaddr->ss_family == AF_INET) {
+ a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr);
+ b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr);
- } else if (local_sockaddr->ss_family == AF_INET6) {
- local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr);
- mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr);
+ } else if (a_sockaddr->ss_family == AF_INET6) {
+ a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr);
+ b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr);
} else {
pr_err("%s: Invalid sockaddr family\n", __func__);
- return NULL;
+ return -EINVAL;
}
- if (local_hash == mapped_hash) /* if port mapper isn't available */
- hash = local_hash;
+ if (a_hash == b_hash) /* if port mapper isn't available */
+ *hash = a_hash;
else
- hash = jhash_2words(local_hash, mapped_hash, 0);
+ *hash = jhash_2words(a_hash, b_hash, 0);
+ return 0;
+}
+
+static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage
+ *local_sockaddr, struct sockaddr_storage
+ *mapped_sockaddr)
+{
+ u32 hash;
+ int ret;
- return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK];
+ ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash);
+ if (ret)
+ return NULL;
+ return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK];
+}
+
+static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage
+ *mapped_loc_sockaddr, struct sockaddr_storage
+ *mapped_rem_sockaddr)
+{
+ u32 hash;
+ int ret;
+
+ ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash);
+ if (ret)
+ return NULL;
+ return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK];
}
static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
@@ -512,7 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
- for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+ for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
if (map_info->nl_client != nl_client)
@@ -595,7 +737,7 @@ int iwpm_mapinfo_available(void)
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
if (iwpm_hash_bucket) {
- for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+ for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
if (!hlist_empty(&iwpm_hash_bucket[i])) {
full_bucket = 1;
break;
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 9777c869a140..ee2d9ff095be 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -76,6 +76,14 @@ struct iwpm_mapping_info {
u8 nl_client;
};
+struct iwpm_remote_info {
+ struct hlist_node hlist_node;
+ struct sockaddr_storage remote_sockaddr;
+ struct sockaddr_storage mapped_loc_sockaddr;
+ struct sockaddr_storage mapped_rem_sockaddr;
+ u8 nl_client;
+};
+
struct iwpm_admin_data {
atomic_t refcount;
atomic_t nlmsg_seq;
@@ -128,6 +136,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
int iwpm_get_nlmsg_seq(void);
/**
+ * iwpm_add_reminfo - Add remote address info of the connecting peer
+ * to the remote info hash table
+ * @reminfo: The remote info to be added
+ */
+void iwpm_add_remote_info(struct iwpm_remote_info *reminfo);
+
+/**
* iwpm_valid_client - Check if the port mapper client is valid
* @nl_client: The index of the netlink client
*
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 8c014b5dab4c..38acb3cfc545 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
*/
- if ((PAGE_ALIGN(addr + size) <= size) ||
- (PAGE_ALIGN(addr + size) <= addr))
+ if (((addr + size) < addr) ||
+ PAGE_ALIGN(addr + size) < (addr + size))
return ERR_PTR(-EINVAL);
if (!can_do_mlock())
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 8b8cc6fa0ab0..40becdb3196e 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page(
int remove_existing_mapping = 0;
int ret = 0;
- mutex_lock(&umem->odp_data->umem_mutex);
/*
* Note: we avoid writing if seq is different from the initial seq, to
* handle case of a racing notifier. This check also allows us to bail
@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page(
}
out:
- mutex_unlock(&umem->odp_data->umem_mutex);
-
/* On Demand Paging - avoid pinning the page */
if (umem->context->invalidate_range || !stored_page)
put_page(page);
@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
user_virt += npages << PAGE_SHIFT;
+ mutex_lock(&umem->odp_data->umem_mutex);
for (j = 0; j < npages; ++j) {
ret = ib_umem_odp_map_dma_single_page(
umem, k, base_virt_addr, local_page_list[j],
@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
break;
k++;
}
+ mutex_unlock(&umem->odp_data->umem_mutex);
if (ret < 0) {
/* Release left over pages when handling errors. */
@@ -633,12 +632,11 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only
* once. */
+ mutex_lock(&umem->odp_data->umem_mutex);
for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
- mutex_lock(&umem->odp_data->umem_mutex);
if (umem->odp_data->page_list[idx]) {
struct page *page = umem->odp_data->page_list[idx];
- struct page *head_page = compound_head(page);
dma_addr_t dma = umem->odp_data->dma_list[idx];
dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
@@ -646,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
- if (dma & ODP_WRITE_ALLOWED_BIT)
+ if (dma & ODP_WRITE_ALLOWED_BIT) {
+ struct page *head_page = compound_head(page);
/*
* set_page_dirty prefers being called with
* the page lock. However, MMU notifiers are
@@ -657,13 +656,14 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* be removed.
*/
set_page_dirty(head_page);
+ }
/* on demand pinning support */
if (!umem->context->invalidate_range)
put_page(page);
umem->odp_data->page_list[idx] = NULL;
umem->odp_data->dma_list[idx] = 0;
}
- mutex_unlock(&umem->odp_data->umem_mutex);
}
+ mutex_unlock(&umem->odp_data->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 259dcc7779f5..88cce9bb72fe 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -246,6 +246,17 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uqp);
}
+ list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
+ struct ib_srq *srq = uobj->object;
+ struct ib_uevent_object *uevent =
+ container_of(uobj, struct ib_uevent_object, uobject);
+
+ idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
+ ib_destroy_srq(srq);
+ ib_uverbs_release_uevent(file, uevent);
+ kfree(uevent);
+ }
+
list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
struct ib_cq *cq = uobj->object;
struct ib_uverbs_event_file *ev_file = cq->cq_context;
@@ -258,17 +269,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(ucq);
}
- list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
- struct ib_srq *srq = uobj->object;
- struct ib_uevent_object *uevent =
- container_of(uobj, struct ib_uevent_object, uobject);
-
- idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
- ib_destroy_srq(srq);
- ib_uverbs_release_uevent(file, uevent);
- kfree(uevent);
- }
-
list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
struct ib_mr *mr = uobj->object;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 57176ddd4c50..3ad8dc798f52 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep,
sizeof(ep->com.mapped_remote_addr));
}
+static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep)
+{
+ int ret;
+
+ print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep ");
+ print_addr(&child_ep->com, __func__, "get_remote_addr child_ep ");
+
+ ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr,
+ &child_ep->com.mapped_remote_addr,
+ &child_ep->com.remote_addr, RDMA_NL_C4IW);
+ if (ret)
+ PDBG("Unable to find remote peer addr info - err %d\n", ret);
+
+ return ret;
+}
+
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx, int use_ts, int ipv6)
{
@@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep)
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
- opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+ opt2 |= T5_ISS_F;
}
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
@@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
status, status2errno(status));
if (is_neg_adv(status)) {
- dev_warn(&dev->rdev.lldi.pdev->dev,
- "Connection problems for atid %u status %u (%s)\n",
- atid, status, neg_adv_str(status));
+ PDBG("%s Connection problems for atid %u status %u (%s)\n",
+ __func__, atid, status, neg_adv_str(status));
+ ep->stats.connect_neg_adv++;
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.neg_adv++;
+ mutex_unlock(&dev->rdev.stats.lock);
return 0;
}
@@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
- opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+ opt2 |= T5_ISS_F;
rpl5 = (void *)rpl;
memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
if (peer2peer)
@@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
+
+ /*
+ * The mapped_local and mapped_remote addresses get setup with
+ * the actual 4-tuple. The local address will be based on the
+ * actual local address of the connection, but on the port number
+ * of the parent listening endpoint. The remote address is
+ * setup based on a query to the IWPM since we don't know what it
+ * originally was before mapping. If no mapping was done, then
+ * mapped_remote == remote, and mapped_local == local.
+ */
if (iptype == 4) {
struct sockaddr_in *sin = (struct sockaddr_in *)
- &child_ep->com.local_addr;
+ &child_ep->com.mapped_local_addr;
+
sin->sin_family = PF_INET;
sin->sin_port = local_port;
sin->sin_addr.s_addr = *(__be32 *)local_ip;
- sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
+
+ sin = (struct sockaddr_in *)&child_ep->com.local_addr;
+ sin->sin_family = PF_INET;
+ sin->sin_port = ((struct sockaddr_in *)
+ &parent_ep->com.local_addr)->sin_port;
+ sin->sin_addr.s_addr = *(__be32 *)local_ip;
+
+ sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
sin->sin_family = PF_INET;
sin->sin_port = peer_port;
sin->sin_addr.s_addr = *(__be32 *)peer_ip;
} else {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
- &child_ep->com.local_addr;
+ &child_ep->com.mapped_local_addr;
+
sin6->sin6_family = PF_INET6;
sin6->sin6_port = local_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
- sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
+
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = ((struct sockaddr_in6 *)
+ &parent_ep->com.local_addr)->sin6_port;
+ memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = peer_port;
memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
}
+ memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
+ sizeof(child_ep->com.remote_addr));
+ get_remote_addr(parent_ep, child_ep);
+
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
@@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid);
if (is_neg_adv(req->status)) {
- dev_warn(&dev->rdev.lldi.pdev->dev,
- "Negative advice on abort - tid %u status %d (%s)\n",
- ep->hwtid, req->status, neg_adv_str(req->status));
+ PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
+ neg_adv_str(req->status));
+ ep->stats.abort_neg_adv++;
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.neg_adv++;
+ mutex_unlock(&dev->rdev.stats.lock);
return 0;
}
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
* TP will ignore any value > 0 for MSS index.
*/
req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
- req->cookie = (unsigned long)skb;
+ req->cookie = (uintptr_t)skb;
set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
@@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (is_neg_adv(req->status)) {
- dev_warn(&dev->rdev.lldi.pdev->dev,
- "Negative advice on abort - tid %u status %d (%s)\n",
- ep->hwtid, req->status, neg_adv_str(req->status));
+ PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
+ neg_adv_str(req->status));
+ ep->stats.abort_neg_adv++;
+ dev->rdev.stats.neg_adv++;
kfree_skb(skb);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ab7692ac2044..68ddb3710215 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (unsigned long) &wr_wait;
+ res_wr->cookie = (uintptr_t)&wr_wait;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_RESET;
@@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (unsigned long) &wr_wait;
+ res_wr->cookie = (uintptr_t)&wr_wait;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
goto err4;
cq->gen = 1;
- cq->gts = rdev->lldi.gts_reg;
cq->rdev = rdev;
if (user) {
- cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
- (cq->cqid << rdev->cqshift);
- cq->ugts &= PAGE_MASK;
+ u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK;
+
+ cq->ugts = (u64)rdev->bar2_pa + off;
+ } else if (is_t4(rdev->lldi.adapter_type)) {
+ cq->gts = rdev->lldi.gts_reg;
+ cq->qid_mask = -1U;
+ } else {
+ u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12;
+
+ cq->gts = rdev->bar2_kva + off;
+ cq->qid_mask = rdev->qpmask;
}
return 0;
err4:
@@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
}
PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
__func__, chp->cq.cqid, chp, chp->cq.size,
- chp->cq.memsize,
- (unsigned long long) chp->cq.dma_addr);
+ chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
return &chp->ibcq;
err5:
kfree(mm2);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 8fb295e4a9ab..7e895d714b19 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+ [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
[RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
};
@@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v)
int prev_ts_set = 0;
int idx, end;
-#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000)
+#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
idx = atomic_read(&dev->rdev.wr_log_idx) &
(dev->rdev.wr_log_size - 1);
@@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v)
dev->rdev.stats.act_ofld_conn_fails);
seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
dev->rdev.stats.pas_ofld_conn_fails);
+ seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
return 0;
}
@@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data)
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
"history 0x%lx hwtid %d atid %d "
+ "conn_na %u abort_na %u "
"%pI4:%d/%d <-> %pI4:%d/%d\n",
ep, ep->com.cm_id, ep->com.qp,
(int)ep->com.state, ep->com.flags,
ep->com.history, ep->hwtid, ep->atid,
+ ep->stats.connect_neg_adv,
+ ep->stats.abort_neg_adv,
&lsin->sin_addr, ntohs(lsin->sin_port),
ntohs(mapped_lsin->sin_port),
&rsin->sin_addr, ntohs(rsin->sin_port),
@@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data)
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
"history 0x%lx hwtid %d atid %d "
+ "conn_na %u abort_na %u "
"%pI6:%d/%d <-> %pI6:%d/%d\n",
ep, ep->com.cm_id, ep->com.qp,
(int)ep->com.state, ep->com.flags,
ep->com.history, ep->hwtid, ep->atid,
+ ep->stats.connect_neg_adv,
+ ep->stats.abort_neg_adv,
&lsin6->sin6_addr, ntohs(lsin6->sin6_port),
ntohs(mapped_lsin6->sin6_port),
&rsin6->sin6_addr, ntohs(rsin6->sin6_port),
@@ -765,6 +773,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
c4iw_init_dev_ucontext(rdev, &rdev->uctx);
/*
+ * This implementation assumes udb_density == ucq_density! Eventually
+ * we might need to support this but for now fail the open. Also the
+ * cqid and qpid range must match for now.
+ */
+ if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
+ pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
+ pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
+ rdev->lldi.ucq_density);
+ err = -EINVAL;
+ goto err1;
+ }
+ if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
+ rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
+ pr_err(MOD "%s: unsupported qp and cq id ranges "
+ "qp start %u size %u cq start %u size %u\n",
+ pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
+ rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
+ rdev->lldi.vr->cq.size);
+ err = -EINVAL;
+ goto err1;
+ }
+
+ /*
* qpshift is the number of bits to shift the qpid left in order
* to get the correct address of the doorbell for that qp.
*/
@@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->lldi.vr->qp.size,
rdev->lldi.vr->cq.start,
rdev->lldi.vr->cq.size);
- PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
+ PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
"qpmask 0x%x cqshift %lu cqmask 0x%x\n",
(unsigned)pci_resource_len(rdev->lldi.pdev, 2),
- (u64)pci_resource_start(rdev->lldi.pdev, 2),
+ (void *)pci_resource_start(rdev->lldi.pdev, 2),
rdev->lldi.db_reg,
rdev->lldi.gts_reg,
rdev->qpshift, rdev->qpmask,
@@ -1355,7 +1386,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_sq_host_wq_pidx(&qp->wq),
t4_sq_wq_size(&qp->wq));
if (ret) {
- pr_err(KERN_ERR MOD "%s: Fatal error - "
+ pr_err(MOD "%s: Fatal error - "
"DB overflow recovery failed - "
"error syncing SQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
@@ -1371,7 +1402,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_rq_wq_size(&qp->wq));
if (ret) {
- pr_err(KERN_ERR MOD "%s: Fatal error - "
+ pr_err(MOD "%s: Fatal error - "
"DB overflow recovery failed - "
"error syncing RQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index d87e1650f643..97bb5550a6cf 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -137,6 +137,7 @@ struct c4iw_stats {
u64 tcam_full;
u64 act_ofld_conn_fails;
u64 pas_ofld_conn_fails;
+ u64 neg_adv;
};
struct c4iw_hw_queue {
@@ -814,6 +815,11 @@ struct c4iw_listen_ep {
int backlog;
};
+struct c4iw_ep_stats {
+ unsigned connect_neg_adv;
+ unsigned abort_neg_adv;
+};
+
struct c4iw_ep {
struct c4iw_ep_common com;
struct c4iw_ep *parent_ep;
@@ -846,6 +852,7 @@ struct c4iw_ep {
unsigned int retry_count;
int snd_win;
int rcv_win;
+ struct c4iw_ep_stats stats;
};
static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 6791fd16272c..cff815b91707 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -73,7 +73,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
c4iw_init_wr_wait(&wr_wait);
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
- skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
@@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (i == (num_wqe-1)) {
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
FW_WR_COMPL_F);
- req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
+ req->wr.wr_lo = (__force __be64)&wr_wait;
} else
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
@@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
mhp->attr.zbva = 0;
mhp->attr.va_fbo = 0;
mhp->attr.page_size = 0;
- mhp->attr.len = ~0UL;
+ mhp->attr.len = ~0ULL;
mhp->attr.pbl_size = 0;
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
- mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
+ mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
if (ret)
goto err1;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 15cae5a31018..389ced335bc5 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_NRES_V(2) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (unsigned long) &wr_wait;
+ res_wr->cookie = (uintptr_t)&wr_wait;
res = res_wr->res;
res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID_V(ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (unsigned long) &ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)&ep->com.wr_wait;
wqe->u.fini.type = FW_RI_TYPE_FINI;
ret = c4iw_ofld_send(&rhp->rdev, skb);
@@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_FLOWID_V(qhp->ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
@@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
insert_mmap(ucontext, mm2);
mm3->key = uresp.sq_db_gts_key;
- mm3->addr = (__force unsigned long) qhp->wq.sq.udb;
+ mm3->addr = (__force unsigned long)qhp->wq.sq.udb;
mm3->len = PAGE_SIZE;
insert_mmap(ucontext, mm3);
mm4->key = uresp.rq_db_gts_key;
- mm4->addr = (__force unsigned long) qhp->wq.rq.udb;
+ mm4->addr = (__force unsigned long)qhp->wq.rq.udb;
mm4->len = PAGE_SIZE;
insert_mmap(ucontext, mm4);
if (mm5) {
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 871cdcac7be2..7f2a6c244d25 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -539,6 +539,7 @@ struct t4_cq {
size_t memsize;
__be64 bits_type_ts;
u32 cqid;
+ u32 qid_mask;
int vector;
u16 size; /* including status page */
u16 cidx;
@@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_M) {
val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
- INGRESSQID_V(cq->cqid);
+ INGRESSQID_V(cq->cqid & cq->qid_mask);
writel(val, cq->gts);
cq->cidx_inc -= CIDXINC_M;
}
val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
- INGRESSQID_V(cq->cqid);
+ INGRESSQID_V(cq->cqid & cq->qid_mask);
writel(val, cq->gts);
cq->cidx_inc = 0;
return 0;
@@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq)
u32 val;
val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
- INGRESSQID_V(cq->cqid);
+ INGRESSQID_V(cq->cqid & cq->qid_mask);
writel(val, cq->gts);
cq->cidx_inc = 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 5e53327fc647..343e8daf2270 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -848,6 +848,8 @@ enum { /* TCP congestion control algorithms */
#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
-#define CONG_CNTRL_VALID (1 << 18)
+#define T5_ISS_S 18
+#define T5_ISS_V(x) ((x) << T5_ISS_S)
+#define T5_ISS_F T5_ISS_V(1U)
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c
index 120aedf9f989..cec181532924 100644
--- a/drivers/infiniband/hw/ehca/ehca_mcast.c
+++ b/drivers/infiniband/hw/ehca/ehca_mcast.c
@@ -77,7 +77,7 @@ int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return -EINVAL;
}
- memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
+ memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
interface_id = be64_to_cpu(my_gid.global.interface_id);
@@ -114,7 +114,7 @@ int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return -EINVAL;
}
- memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
+ memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
interface_id = be64_to_cpu(my_gid.global.interface_id);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 6d7f453b4d05..450d15965005 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -40,9 +40,9 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/io.h>
-#include <linux/aio.h>
#include <linux/jiffies.h>
#include <linux/cpu.h>
+#include <linux/uio.h>
#include <asm/pgtable.h>
#include "ipath_kernel.h"
@@ -53,15 +53,19 @@ static int ipath_open(struct inode *, struct file *);
static int ipath_close(struct inode *, struct file *);
static ssize_t ipath_write(struct file *, const char __user *, size_t,
loff_t *);
-static ssize_t ipath_writev(struct kiocb *, const struct iovec *,
- unsigned long , loff_t);
+static ssize_t ipath_write_iter(struct kiocb *, struct iov_iter *from);
static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
static int ipath_mmap(struct file *, struct vm_area_struct *);
+/*
+ * This is really, really weird shit - write() and writev() here
+ * have completely unrelated semantics. Sucky userland ABI,
+ * film at 11.
+ */
static const struct file_operations ipath_file_ops = {
.owner = THIS_MODULE,
.write = ipath_write,
- .aio_write = ipath_writev,
+ .write_iter = ipath_write_iter,
.open = ipath_open,
.release = ipath_close,
.poll = ipath_poll,
@@ -2414,18 +2418,17 @@ bail:
return ret;
}
-static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov,
- unsigned long dim, loff_t off)
+static ssize_t ipath_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *filp = iocb->ki_filp;
struct ipath_filedata *fp = filp->private_data;
struct ipath_portdata *pd = port_fp(filp);
struct ipath_user_sdma_queue *pq = fp->pq;
- if (!dim)
+ if (!iter_is_iovec(from) || !from->nr_segs)
return -EINVAL;
- return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim);
+ return ipath_user_sdma_writev(pd->port_dd, pq, from->iov, from->nr_segs);
}
static struct class *ipath_class;
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 33c45dfcbd88..1ca8e32a9592 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -82,14 +82,14 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
- error = ipathfs_mknod(parent->d_inode, *dentry,
+ error = ipathfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
else
error = PTR_ERR(*dentry);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return error;
}
@@ -277,11 +277,11 @@ static int remove_file(struct dentry *parent, char *name)
}
spin_lock(&tmp->d_lock);
- if (!d_unhashed(tmp) && tmp->d_inode) {
+ if (!d_unhashed(tmp) && d_really_is_positive(tmp)) {
dget_dlock(tmp);
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
- simple_unlink(parent->d_inode, tmp);
+ simple_unlink(d_inode(parent), tmp);
} else
spin_unlock(&tmp->d_lock);
@@ -302,7 +302,7 @@ static int remove_device_files(struct super_block *sb,
int ret;
root = dget(sb->s_root);
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
dir = lookup_one_len(unit, root, strlen(unit));
@@ -315,10 +315,10 @@ static int remove_device_files(struct super_block *sb,
remove_file(dir, "flash");
remove_file(dir, "atomic_counters");
d_delete(dir);
- ret = simple_rmdir(root->d_inode, dir);
+ ret = simple_rmdir(d_inode(root), dir);
bail:
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
dput(root);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index a31e031afd87..0f00204d2ece 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -58,14 +58,19 @@ struct mlx4_alias_guid_work_context {
int query_id;
struct list_head list;
int block_num;
+ ib_sa_comp_mask guid_indexes;
+ u8 method;
};
struct mlx4_next_alias_guid_work {
u8 port;
u8 block_num;
+ u8 method;
struct mlx4_sriov_alias_guid_info_rec_det rec_det;
};
+static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
+ int *resched_delay_sec);
void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
u8 port_num, u8 *p_data)
@@ -118,6 +123,57 @@ ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
return IB_SA_COMP_MASK(4 + index);
}
+void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
+ int port, int slave_init)
+{
+ __be64 curr_guid, required_guid;
+ int record_num = slave / 8;
+ int index = slave % 8;
+ int port_index = port - 1;
+ unsigned long flags;
+ int do_work = 0;
+
+ spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+ if (dev->sriov.alias_guid.ports_guid[port_index].state_flags &
+ GUID_STATE_NEED_PORT_INIT)
+ goto unlock;
+ if (!slave_init) {
+ curr_guid = *(__be64 *)&dev->sriov.
+ alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].
+ all_recs[GUID_REC_SIZE * index];
+ if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) ||
+ !curr_guid)
+ goto unlock;
+ required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
+ } else {
+ required_guid = mlx4_get_admin_guid(dev->dev, slave, port);
+ if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+ goto unlock;
+ }
+ *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].
+ all_recs[GUID_REC_SIZE * index] = required_guid;
+ dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].guid_indexes
+ |= mlx4_ib_get_aguid_comp_mask_from_ix(index);
+ dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].status
+ = MLX4_GUID_INFO_STATUS_IDLE;
+ /* set to run immediately */
+ dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].time_to_run = 0;
+ dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].
+ guids_retry_schedule[index] = 0;
+ do_work = 1;
+unlock:
+ spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
+
+ if (do_work)
+ mlx4_ib_init_alias_guid_work(dev, port_index);
+}
+
/*
* Whenever new GUID is set/unset (guid table change) create event and
* notify the relevant slave (master also should be notified).
@@ -138,10 +194,15 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
enum slave_port_state prev_state;
__be64 tmp_cur_ag, form_cache_ag;
enum slave_port_gen_event gen_event;
+ struct mlx4_sriov_alias_guid_info_rec_det *rec;
+ unsigned long flags;
+ __be64 required_value;
if (!mlx4_is_master(dev->dev))
return;
+ rec = &dev->sriov.alias_guid.ports_guid[port_num - 1].
+ all_rec_per_port[block_num];
guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
ports_guid[port_num - 1].
all_rec_per_port[block_num].guid_indexes);
@@ -166,8 +227,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
*/
if (tmp_cur_ag != form_cache_ag)
continue;
- mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
+ spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+ required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
+
+ if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+ required_value = 0;
+
+ if (tmp_cur_ag == required_value) {
+ rec->guid_indexes = rec->guid_indexes &
+ ~mlx4_ib_get_aguid_comp_mask_from_ix(i);
+ } else {
+ /* may notify port down if value is 0 */
+ if (tmp_cur_ag != MLX4_NOT_SET_GUID) {
+ spin_unlock_irqrestore(&dev->sriov.
+ alias_guid.ag_work_lock, flags);
+ continue;
+ }
+ }
+ spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock,
+ flags);
+ mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
/*2 cases: Valid GUID, and Invalid Guid*/
if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
@@ -188,10 +268,14 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
&gen_event);
- pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
- slave_id, port_num);
- mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
- MLX4_PORT_CHANGE_SUBTYPE_DOWN);
+ if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
+ pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
+ slave_id, port_num);
+ mlx4_gen_port_state_change_eqe(dev->dev,
+ slave_id,
+ port_num,
+ MLX4_PORT_CHANGE_SUBTYPE_DOWN);
+ }
}
}
}
@@ -206,6 +290,9 @@ static void aliasguid_query_handler(int status,
int i;
struct mlx4_sriov_alias_guid_info_rec_det *rec;
unsigned long flags, flags1;
+ ib_sa_comp_mask declined_guid_indexes = 0;
+ ib_sa_comp_mask applied_guid_indexes = 0;
+ unsigned int resched_delay_sec = 0;
if (!context)
return;
@@ -216,9 +303,9 @@ static void aliasguid_query_handler(int status,
all_rec_per_port[cb_ctx->block_num];
if (status) {
- rec->status = MLX4_GUID_INFO_STATUS_IDLE;
pr_debug("(port: %d) failed: status = %d\n",
cb_ctx->port, status);
+ rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC;
goto out;
}
@@ -235,57 +322,101 @@ static void aliasguid_query_handler(int status,
rec = &dev->sriov.alias_guid.ports_guid[port_index].
all_rec_per_port[guid_rec->block_num];
- rec->status = MLX4_GUID_INFO_STATUS_SET;
- rec->method = MLX4_GUID_INFO_RECORD_SET;
-
+ spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
- __be64 tmp_cur_ag;
- tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
+ __be64 sm_response, required_val;
+
+ if (!(cb_ctx->guid_indexes &
+ mlx4_ib_get_aguid_comp_mask_from_ix(i)))
+ continue;
+ sm_response = *(__be64 *)&guid_rec->guid_info_list
+ [i * GUID_REC_SIZE];
+ required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
+ if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) {
+ if (required_val ==
+ cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+ goto next_entry;
+
+ /* A new value was set till we got the response */
+ pr_debug("need to set new value %llx, record num %d, block_num:%d\n",
+ be64_to_cpu(required_val),
+ i, guid_rec->block_num);
+ goto entry_declined;
+ }
+
/* check if the SM didn't assign one of the records.
- * if it didn't, if it was not sysadmin request:
- * ask the SM to give a new GUID, (instead of the driver request).
+ * if it didn't, re-ask for.
*/
- if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
- mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
- "block_num: %d was declined by SM, "
- "ownership by %d (0 = driver, 1=sysAdmin,"
- " 2=None)\n", __func__, i,
- guid_rec->block_num, rec->ownership);
- if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
- /* if it is driver assign, asks for new GUID from SM*/
- *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
- MLX4_NOT_SET_GUID;
-
- /* Mark the record as not assigned, and let it
- * be sent again in the next work sched.*/
- rec->status = MLX4_GUID_INFO_STATUS_IDLE;
- rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
- }
+ if (sm_response == MLX4_NOT_SET_GUID) {
+ if (rec->guids_retry_schedule[i] == 0)
+ mlx4_ib_warn(&dev->ib_dev,
+ "%s:Record num %d in block_num: %d was declined by SM\n",
+ __func__, i,
+ guid_rec->block_num);
+ goto entry_declined;
} else {
/* properly assigned record. */
/* We save the GUID we just got from the SM in the
* admin_guid in order to be persistent, and in the
* request from the sm the process will ask for the same GUID */
- if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
- tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
- /* the sysadmin assignment failed.*/
- mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
- " admin guid after SysAdmin "
- "configuration. "
- "Record num %d in block_num:%d "
- "was declined by SM, "
- "new val(0x%llx) was kept\n",
- __func__, i,
- guid_rec->block_num,
- be64_to_cpu(*(__be64 *) &
- rec->all_recs[i * GUID_REC_SIZE]));
+ if (required_val &&
+ sm_response != required_val) {
+ /* Warn only on first retry */
+ if (rec->guids_retry_schedule[i] == 0)
+ mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
+ " admin guid after SysAdmin "
+ "configuration. "
+ "Record num %d in block_num:%d "
+ "was declined by SM, "
+ "new val(0x%llx) was kept, SM returned (0x%llx)\n",
+ __func__, i,
+ guid_rec->block_num,
+ be64_to_cpu(required_val),
+ be64_to_cpu(sm_response));
+ goto entry_declined;
} else {
- memcpy(&rec->all_recs[i * GUID_REC_SIZE],
- &guid_rec->guid_info_list[i * GUID_REC_SIZE],
- GUID_REC_SIZE);
+ *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
+ sm_response;
+ if (required_val == 0)
+ mlx4_set_admin_guid(dev->dev,
+ sm_response,
+ (guid_rec->block_num
+ * NUM_ALIAS_GUID_IN_REC) + i,
+ cb_ctx->port);
+ goto next_entry;
}
}
+entry_declined:
+ declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
+ rec->guids_retry_schedule[i] =
+ (rec->guids_retry_schedule[i] == 0) ? 1 :
+ min((unsigned int)60,
+ rec->guids_retry_schedule[i] * 2);
+ /* using the minimum value among all entries in that record */
+ resched_delay_sec = (resched_delay_sec == 0) ?
+ rec->guids_retry_schedule[i] :
+ min(resched_delay_sec,
+ rec->guids_retry_schedule[i]);
+ continue;
+
+next_entry:
+ rec->guids_retry_schedule[i] = 0;
}
+
+ applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes;
+ if (declined_guid_indexes ||
+ rec->guid_indexes & ~(applied_guid_indexes)) {
+ pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n",
+ guid_rec->block_num,
+ be64_to_cpu((__force __be64)rec->guid_indexes),
+ be64_to_cpu((__force __be64)applied_guid_indexes),
+ be64_to_cpu((__force __be64)declined_guid_indexes));
+ rec->time_to_run = ktime_get_real_ns() +
+ resched_delay_sec * NSEC_PER_SEC;
+ } else {
+ rec->status = MLX4_GUID_INFO_STATUS_SET;
+ }
+ spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
/*
The func is call here to close the cases when the
sm doesn't send smp, so in the sa response the driver
@@ -297,10 +428,13 @@ static void aliasguid_query_handler(int status,
out:
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
- if (!dev->sriov.is_going_down)
+ if (!dev->sriov.is_going_down) {
+ get_low_record_time_index(dev, port_index, &resched_delay_sec);
queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
&dev->sriov.alias_guid.ports_guid[port_index].
- alias_guid_work, 0);
+ alias_guid_work,
+ msecs_to_jiffies(resched_delay_sec * 1000));
+ }
if (cb_ctx->sa_query) {
list_del(&cb_ctx->list);
kfree(cb_ctx);
@@ -317,9 +451,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
ib_sa_comp_mask comp_mask = 0;
dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
- = MLX4_GUID_INFO_STATUS_IDLE;
- dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
- = MLX4_GUID_INFO_RECORD_SET;
+ = MLX4_GUID_INFO_STATUS_SET;
/* calculate the comp_mask for that record.*/
for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
@@ -333,19 +465,21 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
need to assign GUIDs, then don't put it up for assignment.
*/
if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
- (!index && !i) ||
- MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
- ports_guid[port - 1].all_rec_per_port[index].ownership)
+ (!index && !i))
continue;
comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
}
dev->sriov.alias_guid.ports_guid[port - 1].
- all_rec_per_port[index].guid_indexes = comp_mask;
+ all_rec_per_port[index].guid_indexes |= comp_mask;
+ if (dev->sriov.alias_guid.ports_guid[port - 1].
+ all_rec_per_port[index].guid_indexes)
+ dev->sriov.alias_guid.ports_guid[port - 1].
+ all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE;
+
}
static int set_guid_rec(struct ib_device *ibdev,
- u8 port, int index,
- struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
+ struct mlx4_next_alias_guid_work *rec)
{
int err;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
@@ -354,6 +488,9 @@ static int set_guid_rec(struct ib_device *ibdev,
struct ib_port_attr attr;
struct mlx4_alias_guid_work_context *callback_context;
unsigned long resched_delay, flags, flags1;
+ u8 port = rec->port + 1;
+ int index = rec->block_num;
+ struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det;
struct list_head *head =
&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
@@ -380,6 +517,8 @@ static int set_guid_rec(struct ib_device *ibdev,
callback_context->port = port;
callback_context->dev = dev;
callback_context->block_num = index;
+ callback_context->guid_indexes = rec_det->guid_indexes;
+ callback_context->method = rec->method;
memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
@@ -399,7 +538,7 @@ static int set_guid_rec(struct ib_device *ibdev,
callback_context->query_id =
ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
ibdev, port, &guid_info_rec,
- comp_mask, rec_det->method, 1000,
+ comp_mask, rec->method, 1000,
GFP_KERNEL, aliasguid_query_handler,
callback_context,
&callback_context->sa_query);
@@ -434,6 +573,30 @@ out:
return err;
}
+static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port)
+{
+ int j, k, entry;
+ __be64 guid;
+
+ /*Check if the SM doesn't need to assign the GUIDs*/
+ for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
+ for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
+ entry = j * NUM_ALIAS_GUID_IN_REC + k;
+ /* no request for the 0 entry (hw guid) */
+ if (!entry || entry > dev->dev->persist->num_vfs ||
+ !mlx4_is_slave_active(dev->dev, entry))
+ continue;
+ guid = mlx4_get_admin_guid(dev->dev, entry, port);
+ *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
+ all_rec_per_port[j].all_recs
+ [GUID_REC_SIZE * k] = guid;
+ pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n",
+ entry,
+ be64_to_cpu(guid),
+ port);
+ }
+ }
+}
void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
{
int i;
@@ -443,6 +606,13 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
+
+ if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags &
+ GUID_STATE_NEED_PORT_INIT) {
+ mlx4_ib_guid_port_init(dev, port);
+ dev->sriov.alias_guid.ports_guid[port - 1].state_flags &=
+ (~GUID_STATE_NEED_PORT_INIT);
+ }
for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
invalidate_guid_record(dev, port, i);
@@ -462,60 +632,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
-/* The function returns the next record that was
- * not configured (or failed to be configured) */
-static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
- struct mlx4_next_alias_guid_work *rec)
+static void set_required_record(struct mlx4_ib_dev *dev, u8 port,
+ struct mlx4_next_alias_guid_work *next_rec,
+ int record_index)
{
- int j;
- unsigned long flags;
+ int i;
+ int lowset_time_entry = -1;
+ int lowest_time = 0;
+ ib_sa_comp_mask delete_guid_indexes = 0;
+ ib_sa_comp_mask set_guid_indexes = 0;
+ struct mlx4_sriov_alias_guid_info_rec_det *rec =
+ &dev->sriov.alias_guid.ports_guid[port].
+ all_rec_per_port[record_index];
- for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
- spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
- if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
- MLX4_GUID_INFO_STATUS_IDLE) {
- memcpy(&rec->rec_det,
- &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
- sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
- rec->port = port;
- rec->block_num = j;
- dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
- MLX4_GUID_INFO_STATUS_PENDING;
- spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
- return 0;
+ for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
+ if (!(rec->guid_indexes &
+ mlx4_ib_get_aguid_comp_mask_from_ix(i)))
+ continue;
+
+ if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] ==
+ cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+ delete_guid_indexes |=
+ mlx4_ib_get_aguid_comp_mask_from_ix(i);
+ else
+ set_guid_indexes |=
+ mlx4_ib_get_aguid_comp_mask_from_ix(i);
+
+ if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <=
+ lowest_time) {
+ lowset_time_entry = i;
+ lowest_time = rec->guids_retry_schedule[i];
}
- spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
}
- return -ENOENT;
+
+ memcpy(&next_rec->rec_det, rec, sizeof(*rec));
+ next_rec->port = port;
+ next_rec->block_num = record_index;
+
+ if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] ==
+ cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) {
+ next_rec->rec_det.guid_indexes = delete_guid_indexes;
+ next_rec->method = MLX4_GUID_INFO_RECORD_DELETE;
+ } else {
+ next_rec->rec_det.guid_indexes = set_guid_indexes;
+ next_rec->method = MLX4_GUID_INFO_RECORD_SET;
+ }
}
-static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
- int rec_index,
- struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
+/* return index of record that should be updated based on lowest
+ * rescheduled time
+ */
+static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
+ int *resched_delay_sec)
{
- dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
- rec_det->guid_indexes;
- memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
- rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
- dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
- rec_det->status;
+ int record_index = -1;
+ u64 low_record_time = 0;
+ struct mlx4_sriov_alias_guid_info_rec_det rec;
+ int j;
+
+ for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
+ rec = dev->sriov.alias_guid.ports_guid[port].
+ all_rec_per_port[j];
+ if (rec.status == MLX4_GUID_INFO_STATUS_IDLE &&
+ rec.guid_indexes) {
+ if (record_index == -1 ||
+ rec.time_to_run < low_record_time) {
+ record_index = j;
+ low_record_time = rec.time_to_run;
+ }
+ }
+ }
+ if (resched_delay_sec) {
+ u64 curr_time = ktime_get_real_ns();
+
+ *resched_delay_sec = (low_record_time < curr_time) ? 0 :
+ div_u64((low_record_time - curr_time), NSEC_PER_SEC);
+ }
+
+ return record_index;
}
-static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
+/* The function returns the next record that was
+ * not configured (or failed to be configured) */
+static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
+ struct mlx4_next_alias_guid_work *rec)
{
- int j;
- struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
-
- for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
- memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
- rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
- IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
- IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
- IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
- IB_SA_GUIDINFO_REC_GID7;
- rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
- set_administratively_guid_record(dev, port, j, &rec_det);
+ unsigned long flags;
+ int record_index;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+ record_index = get_low_record_time_index(dev, port, NULL);
+
+ if (record_index < 0) {
+ ret = -ENOENT;
+ goto out;
}
+
+ set_required_record(dev, port, rec, record_index);
+out:
+ spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
+ return ret;
}
static void alias_guid_work(struct work_struct *work)
@@ -545,9 +762,7 @@ static void alias_guid_work(struct work_struct *work)
goto out;
}
- set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
- &rec->rec_det);
-
+ set_guid_rec(&dev->ib_dev, rec);
out:
kfree(rec);
}
@@ -562,6 +777,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
if (!dev->sriov.is_going_down) {
+ /* If there is pending one should cancell then run, otherwise
+ * won't run till previous one is ended as same work
+ * struct is used.
+ */
+ cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port].
+ alias_guid_work);
queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
&dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
}
@@ -609,7 +830,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
{
char alias_wq_name[15];
int ret = 0;
- int i, j, k;
+ int i, j;
union ib_gid gid;
if (!mlx4_is_master(dev->dev))
@@ -633,33 +854,25 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
for (i = 0 ; i < dev->num_ports; i++) {
memset(&dev->sriov.alias_guid.ports_guid[i], 0,
sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
- /*Check if the SM doesn't need to assign the GUIDs*/
+ dev->sriov.alias_guid.ports_guid[i].state_flags |=
+ GUID_STATE_NEED_PORT_INIT;
for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
- if (mlx4_ib_sm_guid_assign) {
- dev->sriov.alias_guid.ports_guid[i].
- all_rec_per_port[j].
- ownership = MLX4_GUID_DRIVER_ASSIGN;
- continue;
- }
- dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
- ownership = MLX4_GUID_NONE_ASSIGN;
- /*mark each val as it was deleted,
- till the sysAdmin will give it valid val*/
- for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
- *(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
- all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
- cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
- }
+ /* mark each val as it was deleted */
+ memset(dev->sriov.alias_guid.ports_guid[i].
+ all_rec_per_port[j].all_recs, 0xFF,
+ sizeof(dev->sriov.alias_guid.ports_guid[i].
+ all_rec_per_port[j].all_recs));
}
INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
/*prepare the records, set them to be allocated by sm*/
+ if (mlx4_ib_sm_guid_assign)
+ for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++)
+ mlx4_set_admin_guid(dev->dev, 0, j, i + 1);
for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
invalidate_guid_record(dev, i + 1, j);
dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
dev->sriov.alias_guid.ports_guid[i].port = i;
- if (mlx4_ib_sm_guid_assign)
- set_all_slaves_guids(dev, i);
snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
dev->sriov.alias_guid.ports_guid[i].wq =
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 59040265e361..9cd2b002d7ae 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1430,6 +1430,10 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
tun_qp->ring[i].addr,
rx_buf_size,
DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
+ kfree(tun_qp->ring[i].addr);
+ goto err;
+ }
}
for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
@@ -1442,6 +1446,11 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
tun_qp->tx_ring[i].buf.addr,
tx_buf_size,
DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ctx->ib_dev,
+ tun_qp->tx_ring[i].buf.map)) {
+ kfree(tun_qp->tx_ring[i].buf.addr);
+ goto tx_err;
+ }
tun_qp->tx_ring[i].ah = NULL;
}
spin_lock_init(&tun_qp->tx_lock);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index b972c0b41799..cc64400d41ac 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -66,9 +66,9 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-int mlx4_ib_sm_guid_assign = 1;
+int mlx4_ib_sm_guid_assign = 0;
module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
-MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
+MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
@@ -587,8 +587,9 @@ static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_vio
((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
}
- err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev->dev, mailbox);
return err;
@@ -1525,8 +1526,8 @@ static void update_gids_task(struct work_struct *work)
memcpy(gids, gw->gids, sizeof gw->gids);
err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
- 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
+ MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
if (err)
pr_warn("set port command failed\n");
else
@@ -1564,12 +1565,11 @@ static void reset_gids_task(struct work_struct *work)
IB_LINK_LAYER_ETHERNET) {
err = mlx4_cmd(dev, mailbox->dma,
MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
- 1, MLX4_CMD_SET_PORT,
+ MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err)
- pr_warn(KERN_WARNING
- "set port %d command failed\n", gw->port);
+ pr_warn("set port %d command failed\n", gw->port);
}
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -2790,9 +2790,31 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_SLAVE_INIT:
/* here, p is the slave id */
do_slave_init(ibdev, p, 1);
+ if (mlx4_is_master(dev)) {
+ int i;
+
+ for (i = 1; i <= ibdev->num_ports; i++) {
+ if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
+ == IB_LINK_LAYER_INFINIBAND)
+ mlx4_ib_slave_alias_guid_event(ibdev,
+ p, i,
+ 1);
+ }
+ }
return;
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+ if (mlx4_is_master(dev)) {
+ int i;
+
+ for (i = 1; i <= ibdev->num_ports; i++) {
+ if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
+ == IB_LINK_LAYER_INFINIBAND)
+ mlx4_ib_slave_alias_guid_event(ibdev,
+ p, i,
+ 0);
+ }
+ }
/* here, p is the slave id */
do_slave_init(ibdev, p, 0);
return;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f829fd935b79..fce3934372a1 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -342,14 +342,9 @@ struct mlx4_ib_ah {
enum mlx4_guid_alias_rec_status {
MLX4_GUID_INFO_STATUS_IDLE,
MLX4_GUID_INFO_STATUS_SET,
- MLX4_GUID_INFO_STATUS_PENDING,
};
-enum mlx4_guid_alias_rec_ownership {
- MLX4_GUID_DRIVER_ASSIGN,
- MLX4_GUID_SYSADMIN_ASSIGN,
- MLX4_GUID_NONE_ASSIGN, /*init state of each record*/
-};
+#define GUID_STATE_NEED_PORT_INIT 0x01
enum mlx4_guid_alias_rec_method {
MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET,
@@ -360,8 +355,8 @@ struct mlx4_sriov_alias_guid_info_rec_det {
u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
- u8 method; /*set or delete*/
- enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/
+ unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
+ u64 time_to_run;
};
struct mlx4_sriov_alias_guid_port_rec_det {
@@ -369,6 +364,7 @@ struct mlx4_sriov_alias_guid_port_rec_det {
struct workqueue_struct *wq;
struct delayed_work alias_guid_work;
u8 port;
+ u32 state_flags;
struct mlx4_sriov_alias_guid *parent;
struct list_head cb_list;
};
@@ -802,6 +798,8 @@ int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
struct attribute *attr);
ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
+void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
+ int port, int slave_init);
int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ed2bd6701f9b..02fc91c68027 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -566,6 +566,10 @@ static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
sizeof (struct mlx4_ib_proxy_sqp_hdr),
DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
+ kfree(qp->sqp_proxy_rcv[i].addr);
+ goto err;
+ }
}
return 0;
@@ -2605,8 +2609,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
- wr->wr.ud.hlen);
+ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
*lso_seg_len = halign;
return 0;
}
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index d10c2b8a5dad..6797108ce873 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -46,21 +46,17 @@
static ssize_t show_admin_alias_guid(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int record_num;/*0-15*/
- int guid_index_in_rec; /*0 - 7*/
struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
+ __be64 sysadmin_ag_val;
- record_num = mlx4_ib_iov_dentry->entry_num / 8 ;
- guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8 ;
+ sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev,
+ mlx4_ib_iov_dentry->entry_num,
+ port->num);
- return sprintf(buf, "%llx\n",
- be64_to_cpu(*(__be64 *)&mdev->sriov.alias_guid.
- ports_guid[port->num - 1].
- all_rec_per_port[record_num].
- all_recs[8 * guid_index_in_rec]));
+ return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val));
}
/* store_admin_alias_guid stores the (new) administratively assigned value of that GUID.
@@ -80,6 +76,7 @@ static ssize_t store_admin_alias_guid(struct device *dev,
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
u64 sysadmin_ag_val;
+ unsigned long flags;
record_num = mlx4_ib_iov_dentry->entry_num / 8;
guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8;
@@ -87,6 +84,7 @@ static ssize_t store_admin_alias_guid(struct device *dev,
pr_err("GUID 0 block 0 is RO\n");
return count;
}
+ spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags);
sscanf(buf, "%llx", &sysadmin_ag_val);
*(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1].
all_rec_per_port[record_num].
@@ -96,33 +94,15 @@ static ssize_t store_admin_alias_guid(struct device *dev,
/* Change the state to be pending for update */
mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status
= MLX4_GUID_INFO_STATUS_IDLE ;
-
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method
- = MLX4_GUID_INFO_RECORD_SET;
-
- switch (sysadmin_ag_val) {
- case MLX4_GUID_FOR_DELETE_VAL:
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method
- = MLX4_GUID_INFO_RECORD_DELETE;
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
- = MLX4_GUID_SYSADMIN_ASSIGN;
- break;
- /* The sysadmin requests the SM to re-assign */
- case MLX4_NOT_SET_GUID:
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
- = MLX4_GUID_DRIVER_ASSIGN;
- break;
- /* The sysadmin requests a specific value.*/
- default:
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
- = MLX4_GUID_SYSADMIN_ASSIGN;
- break;
- }
+ mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val),
+ mlx4_ib_iov_dentry->entry_num,
+ port->num);
/* set the record index */
mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes
- = mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec);
+ |= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec);
+ spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags);
mlx4_ib_init_alias_guid_work(mdev, port->num - 1);
return count;
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 39ab0caefdf9..66080580e24d 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index c463e7bba5f4..2ee6b1051975 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -572,11 +572,15 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
+ struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
+ void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+
mlx5_cq_arm(&to_mcq(ibcq)->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
- to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
- MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
+ uar_page,
+ MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
+ to_mcq(ibcq)->mcq.cons_index);
return 0;
}
@@ -697,8 +701,6 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
cq->mcq.set_ci_db = cq->db.db;
cq->mcq.arm_db = cq->db.db + 1;
- *cq->mcq.set_ci_db = 0;
- *cq->mcq.arm_db = 0;
cq->mcq.cqe_sz = cqe_size;
err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
@@ -782,7 +784,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
cq->cqe_size = cqe_size;
cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
- err = mlx5_vector2eqn(dev, vector, &eqn, &irqn);
+ err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
if (err)
goto err_cqb;
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index ece028fc47d6..a0e4e6ddb71a 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 657af9a1167c..9cf9a37bb5ff 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index cc4ac1e583b2..57c9809e8b87 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -62,95 +62,6 @@ static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
-int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
-{
- struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
- struct mlx5_eq *eq, *n;
- int err = -ENOENT;
-
- spin_lock(&table->lock);
- list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
- if (eq->index == vector) {
- *eqn = eq->eqn;
- *irqn = eq->irqn;
- err = 0;
- break;
- }
- }
- spin_unlock(&table->lock);
-
- return err;
-}
-
-static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
-{
- struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
- char name[MLX5_MAX_EQ_NAME];
- struct mlx5_eq *eq, *n;
- int ncomp_vec;
- int nent;
- int err;
- int i;
-
- INIT_LIST_HEAD(&dev->eqs_list);
- ncomp_vec = table->num_comp_vectors;
- nent = MLX5_COMP_EQ_SIZE;
- for (i = 0; i < ncomp_vec; i++) {
- eq = kzalloc(sizeof(*eq), GFP_KERNEL);
- if (!eq) {
- err = -ENOMEM;
- goto clean;
- }
-
- snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
- err = mlx5_create_map_eq(dev->mdev, eq,
- i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
- name, &dev->mdev->priv.uuari.uars[0]);
- if (err) {
- kfree(eq);
- goto clean;
- }
- mlx5_ib_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
- eq->index = i;
- spin_lock(&table->lock);
- list_add_tail(&eq->list, &dev->eqs_list);
- spin_unlock(&table->lock);
- }
-
- dev->num_comp_vectors = ncomp_vec;
- return 0;
-
-clean:
- spin_lock(&table->lock);
- list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
- list_del(&eq->list);
- spin_unlock(&table->lock);
- if (mlx5_destroy_unmap_eq(dev->mdev, eq))
- mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
- kfree(eq);
- spin_lock(&table->lock);
- }
- spin_unlock(&table->lock);
- return err;
-}
-
-static void free_comp_eqs(struct mlx5_ib_dev *dev)
-{
- struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
- struct mlx5_eq *eq, *n;
-
- spin_lock(&table->lock);
- list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
- list_del(&eq->list);
- spin_unlock(&table->lock);
- if (mlx5_destroy_unmap_eq(dev->mdev, eq))
- mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
- kfree(eq);
- spin_lock(&table->lock);
- }
- spin_unlock(&table->lock);
-}
-
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
@@ -1291,10 +1202,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
get_ext_port_caps(dev);
- err = alloc_comp_eqs(dev);
- if (err)
- goto err_dealloc;
-
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
@@ -1303,7 +1210,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey;
dev->num_ports = mdev->caps.gen.num_ports;
dev->ib_dev.phys_port_cnt = dev->num_ports;
- dev->ib_dev.num_comp_vectors = dev->num_comp_vectors;
+ dev->ib_dev.num_comp_vectors =
+ dev->mdev->priv.eq_table.num_comp_vectors;
dev->ib_dev.dma_device = &mdev->pdev->dev;
dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
@@ -1390,13 +1298,13 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
err = init_node_data(dev);
if (err)
- goto err_eqs;
+ goto err_dealloc;
mutex_init(&dev->cap_mask_mutex);
err = create_dev_resources(&dev->devr);
if (err)
- goto err_eqs;
+ goto err_dealloc;
err = mlx5_ib_odp_init_one(dev);
if (err)
@@ -1433,9 +1341,6 @@ err_odp:
err_rsrc:
destroy_dev_resources(&dev->devr);
-err_eqs:
- free_comp_eqs(dev);
-
err_dealloc:
ib_dealloc_device((struct ib_device *)dev);
@@ -1450,7 +1355,6 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
destroy_umrc_res(dev);
mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
- free_comp_eqs(dev);
ib_dealloc_device(&dev->ib_dev);
}
@@ -1458,6 +1362,7 @@ static struct mlx5_interface mlx5_ib_interface = {
.add = mlx5_ib_add,
.remove = mlx5_ib_remove,
.event = mlx5_ib_event,
+ .protocol = MLX5_INTERFACE_PROTOCOL_IB,
};
static int __init mlx5_ib_init(void)
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 611a9fdf2f38..40df2cca0609 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 83f22fe297c8..dff1cfcdf476 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -421,9 +421,7 @@ struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
- struct list_head eqs_list;
int num_ports;
- int num_comp_vectors;
/* serialize update of capability mask
*/
struct mutex cap_mask_mutex;
@@ -594,7 +592,6 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
-int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn);
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index cd9822eeacae..71c593583864 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index a2c541c4809a..5099db08afd2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index be0cd358b080..d35f62d4f4c5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -796,9 +796,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
goto err_free;
}
- qp->db.db[0] = 0;
- qp->db.db[1] = 0;
-
qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
@@ -1162,10 +1159,11 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return;
+
if (qp->state != IB_QPS_RESET) {
mlx5_ib_qp_disable_pagefaults(qp);
if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
- MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
+ MLX5_QP_STATE_RST, in, 0, &qp->mqp))
mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
qp->mqp.qpn);
}
@@ -1394,7 +1392,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
- pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
+ pr_err("sgid_index (%u) too large. max is %d\n",
ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 41fec66217dd..02d77a29764d 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -165,8 +165,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
return err;
}
- *srq->db.db = 0;
-
if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
mlx5_ib_dbg(dev, "buf alloc failed\n");
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index d0ba264ac1ed..76fb7b927d37 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 3b2a6dc8ea99..9f9d5c563a61 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+ [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
[RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6f09a72e78d7..72b43417cbe3 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic,
memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
}
+static void record_sockaddr_info(struct sockaddr_storage *addr_info,
+ nes_addr_t *ip_addr, u16 *port_num)
+{
+ struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
+
+ if (in_addr->sin_family == AF_INET) {
+ *ip_addr = ntohl(in_addr->sin_addr.s_addr);
+ *port_num = ntohs(in_addr->sin_port);
+ }
+}
+
/*
* nes_record_pm_msg - Save the received mapping info
*/
static void nes_record_pm_msg(struct nes_cm_info *cm_info,
struct iwpm_sa_data *pm_msg)
{
- struct sockaddr_in *mapped_loc_addr =
- (struct sockaddr_in *)&pm_msg->mapped_loc_addr;
- struct sockaddr_in *mapped_rem_addr =
- (struct sockaddr_in *)&pm_msg->mapped_rem_addr;
-
- if (mapped_loc_addr->sin_family == AF_INET) {
- cm_info->mapped_loc_addr =
- ntohl(mapped_loc_addr->sin_addr.s_addr);
- cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port);
- }
- if (mapped_rem_addr->sin_family == AF_INET) {
- cm_info->mapped_rem_addr =
- ntohl(mapped_rem_addr->sin_addr.s_addr);
- cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port);
- }
+ record_sockaddr_info(&pm_msg->mapped_loc_addr,
+ &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
+
+ record_sockaddr_info(&pm_msg->mapped_rem_addr,
+ &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
+}
+
+/*
+ * nes_get_reminfo - Get the address info of the remote connecting peer
+ */
+static int nes_get_remote_addr(struct nes_cm_node *cm_node)
+{
+ struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
+ struct sockaddr_storage remote_addr;
+ int ret;
+
+ nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
+ htons(cm_node->mapped_loc_port), &mapped_loc_addr);
+ nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
+ htons(cm_node->mapped_rem_port), &mapped_rem_addr);
+
+ ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
+ &remote_addr, RDMA_NL_NES);
+ if (ret)
+ nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
+ else
+ record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
+ &cm_node->rem_port);
+ return ret;
}
/**
@@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
return NULL;
/* set our node specific transport info */
- cm_node->loc_addr = cm_info->loc_addr;
+ if (listener) {
+ cm_node->loc_addr = listener->loc_addr;
+ cm_node->loc_port = listener->loc_port;
+ } else {
+ cm_node->loc_addr = cm_info->loc_addr;
+ cm_node->loc_port = cm_info->loc_port;
+ }
cm_node->rem_addr = cm_info->rem_addr;
- cm_node->loc_port = cm_info->loc_port;
cm_node->rem_port = cm_info->rem_port;
cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
@@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
cm_node->state = NES_CM_STATE_ESTABLISHED;
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ nes_get_remote_addr(cm_node);
handle_rcv_mpa(cm_node, skb);
} else { /* rcvd ACK only */
dev_kfree_skb_any(skb);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index ffd48bfc4923..7df16f74bb45 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -903,7 +903,7 @@ struct qib_devdata {
/* PCI Device ID (here for NodeInfo) */
u16 deviceid;
/* for write combining settings */
- unsigned long wc_cookie;
+ int wc_cookie;
unsigned long wc_base;
unsigned long wc_len;
@@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit);
extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist;
-extern unsigned qib_wc_pat;
extern unsigned qib_cc_table_size;
int qib_init(struct qib_devdata *, int);
int init_chip_wc_pat(struct qib_devdata *dd, u32);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 41937c6f888a..725881890c4a 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -39,11 +39,11 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/io.h>
-#include <linux/aio.h>
#include <linux/jiffies.h>
#include <asm/pgtable.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/uio.h>
#include "qib.h"
#include "qib_common.h"
@@ -55,15 +55,19 @@
static int qib_open(struct inode *, struct file *);
static int qib_close(struct inode *, struct file *);
static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
-static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
- unsigned long, loff_t);
+static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
static unsigned int qib_poll(struct file *, struct poll_table_struct *);
static int qib_mmapf(struct file *, struct vm_area_struct *);
+/*
+ * This is really, really weird shit - write() and writev() here
+ * have completely unrelated semantics. Sucky userland ABI,
+ * film at 11.
+ */
static const struct file_operations qib_file_ops = {
.owner = THIS_MODULE,
.write = qib_write,
- .aio_write = qib_aio_write,
+ .write_iter = qib_write_iter,
.open = qib_open,
.release = qib_close,
.poll = qib_poll,
@@ -831,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma,
vma->vm_flags &= ~VM_MAYREAD;
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- if (qib_wc_pat)
+ /* We used PAT if wc_cookie == 0 */
+ if (!dd->wc_cookie)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
@@ -2249,17 +2254,16 @@ bail:
return ret;
}
-static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long dim, loff_t off)
+static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct qib_filedata *fp = iocb->ki_filp->private_data;
struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
struct qib_user_sdma_queue *pq = fp->pq;
- if (!dim || !pq)
+ if (!iter_is_iovec(from) || !from->nr_segs || !pq)
return -EINVAL;
-
- return qib_user_sdma_writev(rcd, pq, iov, dim);
+
+ return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
}
static struct class *qib_class;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 650897a8591e..bdd5d3857203 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -89,14 +89,14 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
- error = qibfs_mknod(parent->d_inode, *dentry,
+ error = qibfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
else
error = PTR_ERR(*dentry);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return error;
}
@@ -455,10 +455,10 @@ static int remove_file(struct dentry *parent, char *name)
}
spin_lock(&tmp->d_lock);
- if (!d_unhashed(tmp) && tmp->d_inode) {
+ if (!d_unhashed(tmp) && d_really_is_positive(tmp)) {
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
- simple_unlink(parent->d_inode, tmp);
+ simple_unlink(d_inode(parent), tmp);
} else {
spin_unlock(&tmp->d_lock);
}
@@ -481,7 +481,7 @@ static int remove_device_files(struct super_block *sb,
int ret, i;
root = dget(sb->s_root);
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
snprintf(unit, sizeof(unit), "%u", dd->unit);
dir = lookup_one_len(unit, root, strlen(unit));
@@ -491,7 +491,7 @@ static int remove_device_files(struct super_block *sb,
goto bail;
}
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock(&d_inode(dir)->i_mutex);
remove_file(dir, "counters");
remove_file(dir, "counter_names");
remove_file(dir, "portcounter_names");
@@ -506,13 +506,13 @@ static int remove_device_files(struct super_block *sb,
}
}
remove_file(dir, "flash");
- mutex_unlock(&dir->d_inode->i_mutex);
- ret = simple_rmdir(root->d_inode, dir);
+ mutex_unlock(&d_inode(dir)->i_mutex);
+ ret = simple_rmdir(d_inode(root), dir);
d_delete(dir);
dput(dir);
bail:
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
dput(root);
return ret;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 0d2ba59af30a..4b927809d1a1 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd)
qib_6120_config_ctxts(dd);
qib_set_ctxtcnt(dd);
- if (qib_wc_pat) {
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- }
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
set_6120_baseaddrs(dd); /* set chip access pointers now */
ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 22affda8af88..00b2af211157 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
qib_7220_config_ctxts(dd);
qib_set_ctxtcnt(dd); /* needed for PAT setup */
- if (qib_wc_pat) {
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- }
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
set_7220_baseaddrs(dd); /* set chip access pointers now */
ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ef97b71c8f7d..f32b4628e991 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
unsigned features, pidx, sbufcnt;
int ret, mtu;
u32 sbufs, updthresh;
+ resource_size_t vl15off;
/* pport structs are contiguous, allocated after devdata */
ppd = (struct qib_pportdata *)(dd + 1);
@@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
qib_7322_config_ctxts(dd);
qib_set_ctxtcnt(dd);
- if (qib_wc_pat) {
- resource_size_t vl15off;
- /*
- * We do not set WC on the VL15 buffers to avoid
- * a rare problem with unaligned writes from
- * interrupt-flushed store buffers, so we need
- * to map those separately here. We can't solve
- * this for the rarely used mtrr case.
- */
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
+ /*
+ * We do not set WC on the VL15 buffers to avoid
+ * a rare problem with unaligned writes from
+ * interrupt-flushed store buffers, so we need
+ * to map those separately here. We can't solve
+ * this for the rarely used mtrr case.
+ */
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
- /* vl15 buffers start just after the 4k buffers */
- vl15off = dd->physaddr + (dd->piobufbase >> 32) +
- dd->piobcnt4k * dd->align4k;
- dd->piovl15base = ioremap_nocache(vl15off,
- NUM_VL15_BUFS * dd->align4k);
- if (!dd->piovl15base) {
- ret = -ENOMEM;
- goto bail;
- }
+ /* vl15 buffers start just after the 4k buffers */
+ vl15off = dd->physaddr + (dd->piobufbase >> 32) +
+ dd->piobcnt4k * dd->align4k;
+ dd->piovl15base = ioremap_nocache(vl15off,
+ NUM_VL15_BUFS * dd->align4k);
+ if (!dd->piovl15base) {
+ ret = -ENOMEM;
+ goto bail;
}
+
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 2ee36953e234..7e00470adc30 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
unsigned qib_cc_table_size;
module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
-/*
- * qib_wc_pat parameter:
- * 0 is WC via MTRR
- * 1 is WC via PAT
- * If PAT initialization fails, code reverts back to MTRR
- */
-unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
-module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
-MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
static void verify_interrupt(unsigned long);
@@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd)
spin_unlock(&dd->pport[pidx].cc_shadow_lock);
}
- if (!qib_wc_pat)
- qib_disable_wc(dd);
+ qib_disable_wc(dd);
if (dd->pioavailregs_dma) {
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
@@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto bail;
}
- if (!qib_wc_pat) {
- ret = qib_enable_wc(dd);
- if (ret) {
- qib_dev_err(dd,
- "Write combining not enabled (err %d): performance may be poor\n",
- -ret);
- ret = 0;
- }
+ ret = qib_enable_wc(dd);
+ if (ret) {
+ qib_dev_err(dd,
+ "Write combining not enabled (err %d): performance may be poor\n",
+ -ret);
+ ret = 0;
}
qib_verify_pioperf(dd);
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
index 81b225f2300a..edd0ddbd4481 100644
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -116,21 +116,10 @@ int qib_enable_wc(struct qib_devdata *dd)
}
if (!ret) {
- int cookie;
-
- cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
- if (cookie < 0) {
- {
- qib_devinfo(dd->pcidev,
- "mtrr_add() WC for PIO bufs failed (%d)\n",
- cookie);
- ret = -EINVAL;
- }
- } else {
- dd->wc_cookie = cookie;
- dd->wc_base = (unsigned long) pioaddr;
- dd->wc_len = (unsigned long) piolen;
- }
+ dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
+ if (dd->wc_cookie < 0)
+ /* use error from routine */
+ ret = dd->wc_cookie;
}
return ret;
@@ -142,18 +131,7 @@ int qib_enable_wc(struct qib_devdata *dd)
*/
void qib_disable_wc(struct qib_devdata *dd)
{
- if (dd->wc_cookie) {
- int r;
-
- r = mtrr_del(dd->wc_cookie, dd->wc_base,
- dd->wc_len);
- if (r < 0)
- qib_devinfo(dd->pcidev,
- "mtrr_del(%lx, %lx, %lx) failed: %d\n",
- dd->wc_cookie, dd->wc_base,
- dd->wc_len, r);
- dd->wc_cookie = 0; /* even on failure */
- }
+ arch_phys_wc_del(dd->wc_cookie);
}
/**
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index d7562beb5423..bd94b0a6e9e5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -87,7 +87,6 @@ enum {
IPOIB_FLAG_ADMIN_UP = 2,
IPOIB_PKEY_ASSIGNED = 3,
IPOIB_FLAG_SUBINTERFACE = 5,
- IPOIB_MCAST_RUN = 6,
IPOIB_STOP_REAPER = 7,
IPOIB_FLAG_ADMIN_CM = 9,
IPOIB_FLAG_UMCAST = 10,
@@ -98,9 +97,15 @@ enum {
IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
IPOIB_MCAST_FLAG_SENDONLY = 1,
- IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
+ /*
+ * For IPOIB_MCAST_FLAG_BUSY
+ * When set, in flight join and mcast->mc is unreliable
+ * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
+ * haven't started yet
+ * When clear and mcast->mc is valid pointer, join was successful
+ */
+ IPOIB_MCAST_FLAG_BUSY = 2,
IPOIB_MCAST_FLAG_ATTACHED = 3,
- IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -148,6 +153,7 @@ struct ipoib_mcast {
unsigned long created;
unsigned long backoff;
+ unsigned long delay_until;
unsigned long flags;
unsigned char logcount;
@@ -292,6 +298,11 @@ struct ipoib_neigh_table {
struct completion deleted;
};
+struct ipoib_qp_state_validate {
+ struct work_struct work;
+ struct ipoib_dev_priv *priv;
+};
+
/*
* Device private locking: network stack tx_lock protects members used
* in TX fast path, lock protects everything else. lock nests inside
@@ -317,6 +328,7 @@ struct ipoib_dev_priv {
struct list_head multicast_list;
struct rb_root multicast_tree;
+ struct workqueue_struct *wq;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
struct work_struct flush_light;
@@ -426,11 +438,6 @@ struct ipoib_neigh {
#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
-static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
-{
- return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
-}
-
void ipoib_neigh_dtor(struct ipoib_neigh *neigh);
static inline void ipoib_neigh_put(struct ipoib_neigh *neigh)
{
@@ -477,10 +484,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
-int ipoib_ib_dev_open(struct net_device *dev, int flush);
+int ipoib_ib_dev_open(struct net_device *dev);
int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev, int flush);
-int ipoib_ib_dev_stop(struct net_device *dev, int flush);
+int ipoib_ib_dev_down(struct net_device *dev);
+int ipoib_ib_dev_stop(struct net_device *dev);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -492,7 +499,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
+int ipoib_mcast_stop_thread(struct net_device *dev);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 933efcea0d03..cf32a778e7d0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -386,8 +386,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
rx->rx_ring[i].mapping,
GFP_KERNEL)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
- ret = -ENOMEM;
- goto err_count;
+ ret = -ENOMEM;
+ goto err_count;
}
ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
if (ret) {
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
}
spin_lock_irq(&priv->lock);
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
}
return;
}
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
- queue_work(ipoib_workqueue, &priv->cm.start_task);
+ queue_work(priv->wq, &priv->cm.start_task);
return tx;
}
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->daddr + 4);
tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
- queue_work(ipoib_workqueue, &priv->cm.skb_task);
+ queue_work(priv->wq, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
}
if (!list_empty(&priv->cm.passive_ids))
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 72626c348174..63b92cbb29ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -94,39 +94,9 @@ void ipoib_free_ah(struct kref *kref)
static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
u64 mapping[IPOIB_UD_RX_SG])
{
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
- DMA_FROM_DEVICE);
- ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
- DMA_FROM_DEVICE);
- } else
- ib_dma_unmap_single(priv->ca, mapping[0],
- IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
- DMA_FROM_DEVICE);
-}
-
-static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
- struct sk_buff *skb,
- unsigned int length)
-{
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
- unsigned int size;
- /*
- * There is only two buffers needed for max_payload = 4K,
- * first buf size is IPOIB_UD_HEAD_SIZE
- */
- skb->tail += IPOIB_UD_HEAD_SIZE;
- skb->len += length;
-
- size = length - IPOIB_UD_HEAD_SIZE;
-
- skb_frag_size_set(frag, size);
- skb->data_len += size;
- skb->truesize += PAGE_SIZE;
- } else
- skb_put(skb, length);
-
+ ib_dma_unmap_single(priv->ca, mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
+ DMA_FROM_DEVICE);
}
static int ipoib_ib_post_receive(struct net_device *dev, int id)
@@ -156,18 +126,11 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int buf_size;
- int tailroom;
u64 *mapping;
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- buf_size = IPOIB_UD_HEAD_SIZE;
- tailroom = 128; /* reserve some tailroom for IP/TCP headers */
- } else {
- buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- tailroom = 0;
- }
+ buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + tailroom + 4);
+ skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
if (unlikely(!skb))
return NULL;
@@ -184,23 +147,8 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
goto error;
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- struct page *page = alloc_page(GFP_ATOMIC);
- if (!page)
- goto partial_error;
- skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
- mapping[1] =
- ib_dma_map_page(priv->ca, page,
- 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
- goto partial_error;
- }
-
priv->rx_ring[id].skb = skb;
return skb;
-
-partial_error:
- ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
error:
dev_kfree_skb_any(skb);
return NULL;
@@ -278,7 +226,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
wc->byte_len, wc->slid);
ipoib_ud_dma_unmap_rx(priv, mapping);
- ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
+
+ skb_put(skb, wc->byte_len);
/* First byte of dgid signals multicast when 0xff */
dgid = &((struct ib_grh *)skb->data)->dgid;
@@ -296,6 +245,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb->truesize = SKB_TRUESIZE(skb->len);
+
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
@@ -376,6 +327,51 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
}
}
+/*
+ * As the result of a completion error the QP Can be transferred to SQE states.
+ * The function checks if the (send)QP is in SQE state and
+ * moves it back to RTS state, that in order to have it functional again.
+ */
+static void ipoib_qp_state_validate_work(struct work_struct *work)
+{
+ struct ipoib_qp_state_validate *qp_work =
+ container_of(work, struct ipoib_qp_state_validate, work);
+
+ struct ipoib_dev_priv *priv = qp_work->priv;
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
+ __func__, ret);
+ goto free_res;
+ }
+ pr_info("%s: QP: 0x%x is in state: %d\n",
+ __func__, priv->qp->qp_num, qp_attr.qp_state);
+
+ /* currently support only in SQE->RTS transition*/
+ if (qp_attr.qp_state == IB_QPS_SQE) {
+ qp_attr.qp_state = IB_QPS_RTS;
+
+ ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
+ if (ret) {
+ pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
+ ret, priv->qp->qp_num);
+ goto free_res;
+ }
+ pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
+ __func__, priv->qp->qp_num);
+ } else {
+ pr_warn("QP (%d) will stay in state: %d\n",
+ priv->qp->qp_num, qp_attr.qp_state);
+ }
+
+free_res:
+ kfree(qp_work);
+}
+
static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -407,10 +403,22 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
- wc->status != IB_WC_WR_FLUSH_ERR)
+ wc->status != IB_WC_WR_FLUSH_ERR) {
+ struct ipoib_qp_state_validate *qp_work;
ipoib_warn(priv, "failed send event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
+ qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
+ if (!qp_work) {
+ ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
+ __func__, priv->qp->qp_num);
+ return;
+ }
+
+ INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
+ qp_work->priv = priv;
+ queue_work(priv->wq, &qp_work->work);
+ }
}
static int poll_tx(struct ipoib_dev_priv *priv)
@@ -655,16 +663,33 @@ void ipoib_reap_ah(struct work_struct *work)
__ipoib_reap_ah(dev);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
+static void ipoib_flush_ah(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ cancel_delayed_work(&priv->ah_reap_task);
+ flush_workqueue(priv->wq);
+ ipoib_reap_ah(&priv->ah_reap_task.work);
+}
+
+static void ipoib_stop_ah(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ set_bit(IPOIB_STOP_REAPER, &priv->flags);
+ ipoib_flush_ah(dev);
+}
+
static void ipoib_ib_tx_timer_func(unsigned long ctx)
{
drain_tx_cq((struct net_device *)ctx);
}
-int ipoib_ib_dev_open(struct net_device *dev, int flush)
+int ipoib_ib_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
@@ -696,7 +721,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +731,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
dev_stop:
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi);
- ipoib_ib_dev_stop(dev, flush);
+ ipoib_ib_dev_stop(dev);
return -1;
}
@@ -738,7 +763,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
return ipoib_mcast_start_thread(dev);
}
-int ipoib_ib_dev_down(struct net_device *dev, int flush)
+int ipoib_ib_dev_down(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -747,7 +772,7 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
- ipoib_mcast_stop_thread(dev, flush);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
@@ -807,7 +832,7 @@ void ipoib_drain_cq(struct net_device *dev)
local_bh_enable();
}
-int ipoib_ib_dev_stop(struct net_device *dev, int flush)
+int ipoib_ib_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
@@ -877,24 +902,7 @@ timeout:
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
- /* Wait for all AHs to be reaped */
- set_bit(IPOIB_STOP_REAPER, &priv->flags);
- cancel_delayed_work(&priv->ah_reap_task);
- if (flush)
- flush_workqueue(ipoib_workqueue);
-
- begin = jiffies;
-
- while (!list_empty(&priv->dead_ahs)) {
- __ipoib_reap_ah(dev);
-
- if (time_after(jiffies, begin + HZ)) {
- ipoib_warn(priv, "timing out; will leak address handles\n");
- break;
- }
-
- msleep(1);
- }
+ ipoib_flush_ah(dev);
ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
@@ -918,7 +926,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
(unsigned long) dev);
if (dev->flags & IFF_UP) {
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
ipoib_transport_dev_cleanup(dev);
return -ENODEV;
}
@@ -1037,15 +1045,16 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
if (level == IPOIB_FLUSH_LIGHT) {
ipoib_mark_paths_invalid(dev);
ipoib_mcast_dev_flush(dev);
+ ipoib_flush_ah(dev);
}
if (level >= IPOIB_FLUSH_NORMAL)
- ipoib_ib_dev_down(dev, 0);
+ ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- ipoib_ib_dev_stop(dev, 0);
- if (ipoib_ib_dev_open(dev, 0) != 0)
+ ipoib_ib_dev_stop(dev);
+ if (ipoib_ib_dev_open(dev) != 0)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
@@ -1097,9 +1106,17 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
*/
ipoib_flush_paths(dev);
- ipoib_mcast_stop_thread(dev, 1);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
+ /*
+ * All of our ah references aren't free until after
+ * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
+ * the neighbor garbage collection is stopped and reaped.
+ * That should all be done now, so make a final ah flush.
+ */
+ ipoib_stop_ah(dev);
+
ipoib_transport_dev_cleanup(dev);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 58b5aa3b6f2d..9e1b203d756d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
return 0;
err_stop:
- ipoib_ib_dev_stop(dev, 1);
+ ipoib_ib_dev_stop(dev);
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
netif_stop_queue(dev);
- ipoib_ib_dev_down(dev, 1);
- ipoib_ib_dev_stop(dev, 0);
+ ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_stop(dev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
@@ -640,8 +640,10 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
if (!path->query && path_rec_start(dev, path))
goto err_path;
-
- __skb_queue_tail(&neigh->queue, skb);
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ __skb_queue_tail(&neigh->queue, skb);
+ else
+ goto err_drop;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -676,7 +678,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
new_path = 1;
}
if (path) {
- __skb_queue_tail(&path->queue, skb);
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ __skb_queue_tail(&path->queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
if (!path->query && path_rec_start(dev, path)) {
spin_unlock_irqrestore(&priv->lock, flags);
@@ -839,7 +846,19 @@ static void ipoib_set_mcast_list(struct net_device *dev)
return;
}
- queue_work(ipoib_workqueue, &priv->restart_task);
+ queue_work(priv->wq, &priv->restart_task);
+}
+
+static int ipoib_get_iflink(const struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ /* parent interface */
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ return dev->ifindex;
+
+ /* child/vlan interface */
+ return priv->parent->ifindex;
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +973,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
}
@@ -1133,7 +1152,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
/* start garbage collection */
clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
return 0;
@@ -1262,15 +1281,13 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- if (ipoib_neigh_hash_init(priv) < 0)
- goto out;
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, ipoib_recvq_size);
- goto out_neigh_hash_cleanup;
+ goto out;
}
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1285,16 +1302,24 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
if (ipoib_ib_dev_init(dev, ca, port))
goto out_tx_ring_cleanup;
+ /*
+ * Must be after ipoib_ib_dev_init so we can allocate a per
+ * device wq there and use it here
+ */
+ if (ipoib_neigh_hash_init(priv) < 0)
+ goto out_dev_uninit;
+
return 0;
+out_dev_uninit:
+ ipoib_ib_dev_cleanup(dev);
+
out_tx_ring_cleanup:
vfree(priv->tx_ring);
out_rx_ring_cleanup:
kfree(priv->rx_ring);
-out_neigh_hash_cleanup:
- ipoib_neigh_hash_uninit(dev);
out:
return -ENOMEM;
}
@@ -1317,6 +1342,12 @@ void ipoib_dev_cleanup(struct net_device *dev)
}
unregister_netdevice_many(&head);
+ /*
+ * Must be before ipoib_ib_dev_cleanup or we delete an in use
+ * work queue
+ */
+ ipoib_neigh_hash_uninit(dev);
+
ipoib_ib_dev_cleanup(dev);
kfree(priv->rx_ring);
@@ -1324,8 +1355,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->rx_ring = NULL;
priv->tx_ring = NULL;
-
- ipoib_neigh_hash_uninit(dev);
}
static const struct header_ops ipoib_header_ops = {
@@ -1341,6 +1370,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
.ndo_start_xmit = ipoib_start_xmit,
.ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list,
+ .ndo_get_iflink = ipoib_get_iflink,
};
void ipoib_setup(struct net_device *dev)
@@ -1633,10 +1663,11 @@ sysfs_failed:
register_failed:
ib_unregister_event_handler(&priv->event_handler);
+ flush_workqueue(ipoib_workqueue);
/* Stop GC if started before flush */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
event_failed:
ipoib_dev_cleanup(priv->dev);
@@ -1699,6 +1730,7 @@ static void ipoib_remove_one(struct ib_device *device)
list_for_each_entry_safe(priv, tmp, dev_list, list) {
ib_unregister_event_handler(&priv->event_handler);
+ flush_workqueue(ipoib_workqueue);
rtnl_lock();
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
@@ -1707,7 +1739,7 @@ static void ipoib_remove_one(struct ib_device *device)
/* Stop GC */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
unregister_netdev(priv->dev);
free_netdev(priv->dev);
@@ -1742,14 +1774,16 @@ static int __init ipoib_init_module(void)
return ret;
/*
- * We create our own workqueue mainly because we want to be
- * able to flush it when devices are being removed. We can't
- * use schedule_work()/flush_scheduled_work() because both
- * unregister_netdev() and linkwatch_event take the rtnl lock,
- * so flush_scheduled_work() can deadlock during device
- * removal.
+ * We create a global workqueue here that is used for all flush
+ * operations. However, if you attempt to flush a workqueue
+ * from a task on that same workqueue, it deadlocks the system.
+ * We want to be able to flush the tasks associated with a
+ * specific net device, so we also create a workqueue for each
+ * netdevice. We queue up the tasks for that device only on
+ * its private workqueue, and we only queue up flush events
+ * on our global flush workqueue. This avoids the deadlocks.
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib");
+ ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ffb83b5f7e80..0d23e0568deb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -55,8 +55,6 @@ MODULE_PARM_DESC(mcast_debug_level,
"Enable multicast debug tracing if > 0");
#endif
-static DEFINE_MUTEX(mcast_mutex);
-
struct ipoib_mcast_iter {
struct net_device *dev;
union ib_gid mgid;
@@ -66,6 +64,48 @@ struct ipoib_mcast_iter {
unsigned int send_only;
};
+/*
+ * This should be called with the priv->lock held
+ */
+static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
+ struct ipoib_mcast *mcast,
+ bool delay)
+{
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+
+ /*
+ * We will be scheduling *something*, so cancel whatever is
+ * currently scheduled first
+ */
+ cancel_delayed_work(&priv->mcast_task);
+ if (mcast && delay) {
+ /*
+ * We had a failure and want to schedule a retry later
+ */
+ mcast->backoff *= 2;
+ if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+ mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ mcast->delay_until = jiffies + (mcast->backoff * HZ);
+ /*
+ * Mark this mcast for its delay, but restart the
+ * task immediately. The join task will make sure to
+ * clear out all entries without delays, and then
+ * schedule itself to run again when the earliest
+ * delay expires
+ */
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+ } else if (delay) {
+ /*
+ * Special case of retrying after a failure to
+ * allocate the broadcast multicast group, wait
+ * 1 second and try again
+ */
+ queue_delayed_work(priv->wq, &priv->mcast_task, HZ);
+ } else
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+}
+
static void ipoib_mcast_free(struct ipoib_mcast *mcast)
{
struct net_device *dev = mcast->dev;
@@ -103,6 +143,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
mcast->dev = dev;
mcast->created = jiffies;
+ mcast->delay_until = jiffies;
mcast->backoff = 1;
INIT_LIST_HEAD(&mcast->list);
@@ -185,17 +226,27 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
spin_unlock_irq(&priv->lock);
return -EAGAIN;
}
- priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ /*update priv member according to the new mcast*/
+ priv->broadcast->mcmember.qkey = mcmember->qkey;
+ priv->broadcast->mcmember.mtu = mcmember->mtu;
+ priv->broadcast->mcmember.traffic_class = mcmember->traffic_class;
+ priv->broadcast->mcmember.rate = mcmember->rate;
+ priv->broadcast->mcmember.sl = mcmember->sl;
+ priv->broadcast->mcmember.flow_label = mcmember->flow_label;
+ priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
+ /* assume if the admin and the mcast are the same both can be changed */
+ if (priv->mcast_mtu == priv->admin_mtu)
+ priv->admin_mtu =
+ priv->mcast_mtu =
+ IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ else
+ priv->mcast_mtu =
+ IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+
priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
spin_unlock_irq(&priv->lock);
priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
set_qkey = 1;
-
- if (!ipoib_cm_admin_enabled(dev)) {
- rtnl_lock();
- dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
- rtnl_unlock();
- }
}
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -270,107 +321,35 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
return 0;
}
-static int
-ipoib_mcast_sendonly_join_complete(int status,
- struct ib_sa_multicast *multicast)
-{
- struct ipoib_mcast *mcast = multicast->context;
- struct net_device *dev = mcast->dev;
-
- /* We trap for port events ourselves. */
- if (status == -ENETRESET)
- return 0;
-
- if (!status)
- status = ipoib_mcast_join_finish(mcast, &multicast->rec);
-
- if (status) {
- if (mcast->logcount++ < 20)
- ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
-
- /* Flush out any queued packets */
- netif_tx_lock_bh(dev);
- while (!skb_queue_empty(&mcast->pkt_queue)) {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
- }
- netif_tx_unlock_bh(dev);
-
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
- &mcast->flags);
- }
- return status;
-}
-
-static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
-{
- struct net_device *dev = mcast->dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_sa_mcmember_rec rec = {
-#if 0 /* Some SMs don't support send-only yet */
- .join_state = 4
-#else
- .join_state = 1
-#endif
- };
- int ret = 0;
-
- if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
- return -ENODEV;
- }
-
- if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
- ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
- return -EBUSY;
- }
-
- rec.mgid = mcast->mcmember.mgid;
- rec.port_gid = priv->local_gid;
- rec.pkey = cpu_to_be16(priv->pkey);
-
- mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
- priv->port, &rec,
- IB_SA_MCMEMBER_REC_MGID |
- IB_SA_MCMEMBER_REC_PORT_GID |
- IB_SA_MCMEMBER_REC_PKEY |
- IB_SA_MCMEMBER_REC_JOIN_STATE,
- GFP_ATOMIC,
- ipoib_mcast_sendonly_join_complete,
- mcast);
- if (IS_ERR(mcast->mc)) {
- ret = PTR_ERR(mcast->mc);
- clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
- ret);
- } else {
- ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
- mcast->mcmember.mgid.raw);
- }
-
- return ret;
-}
-
void ipoib_mcast_carrier_on_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
carrier_on_task);
struct ib_port_attr attr;
- /*
- * Take rtnl_lock to avoid racing with ipoib_stop() and
- * turning the carrier back on while a device is being
- * removed.
- */
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
- rtnl_lock();
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed. However, ipoib_stop() will attempt to flush
+ * the workqueue while holding the rtnl lock, so loop
+ * on trylock until either we get the lock or we see
+ * FLAG_OPER_UP go away as that signals that we are bailing
+ * and can safely ignore the carrier on work.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
+ if (!ipoib_cm_admin_enabled(priv->dev))
+ dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
netif_carrier_on(priv->dev);
rtnl_unlock();
}
@@ -382,7 +361,9 @@ static int ipoib_mcast_join_complete(int status,
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
- ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
+ ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ?
+ "sendonly " : "",
mcast->mcmember.mgid.raw, status);
/* We trap for port events ourselves. */
@@ -396,49 +377,74 @@ static int ipoib_mcast_join_complete(int status,
if (!status) {
mcast->backoff = 1;
- mutex_lock(&mcast_mutex);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, 0);
- mutex_unlock(&mcast_mutex);
+ mcast->delay_until = jiffies;
/*
- * Defer carrier on work to ipoib_workqueue to avoid a
- * deadlock on rtnl_lock here.
+ * Defer carrier on work to priv->wq to avoid a
+ * deadlock on rtnl_lock here. Requeue our multicast
+ * work too, which will end up happening right after
+ * our carrier on task work and will allow us to
+ * send out all of the non-broadcast joins
*/
- if (mcast == priv->broadcast)
- queue_work(ipoib_workqueue, &priv->carrier_on_task);
-
- status = 0;
- goto out;
- }
+ if (mcast == priv->broadcast) {
+ spin_lock_irq(&priv->lock);
+ queue_work(priv->wq, &priv->carrier_on_task);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ goto out_locked;
+ }
+ } else {
+ if (mcast->logcount++ < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN) {
+ ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+ } else {
+ ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+ }
+ }
- if (mcast->logcount++ < 20) {
- if (status == -ETIMEDOUT || status == -EAGAIN) {
- ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ mcast->backoff >= 2) {
+ /*
+ * We only retry sendonly joins once before we drop
+ * the packet and quit trying to deal with the
+ * group. However, we leave the group in the
+ * mcast list as an unjoined group. If we want to
+ * try joining again, we simply queue up a packet
+ * and restart the join thread. The empty queue
+ * is why the join thread ignores this group.
+ */
+ mcast->backoff = 1;
+ netif_tx_lock_bh(dev);
+ while (!skb_queue_empty(&mcast->pkt_queue)) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
+ }
+ netif_tx_unlock_bh(dev);
} else {
- ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
+ spin_lock_irq(&priv->lock);
+ /* Requeue this join task with a backoff delay */
+ __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
+ goto out_locked;
}
}
-
- mcast->backoff *= 2;
- if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
- mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
-
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-
- mutex_lock(&mcast_mutex);
+out:
spin_lock_irq(&priv->lock);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
- mcast->backoff * HZ);
+out_locked:
+ /*
+ * Make sure to set mcast->mc before we clear the busy flag to avoid
+ * racing with code that checks for BUSY before checking mcast->mc
+ */
+ if (status)
+ mcast->mc = NULL;
+ else
+ mcast->mc = multicast;
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
- mutex_unlock(&mcast_mutex);
-out:
complete(&mcast->done);
+
return status;
}
@@ -446,6 +452,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
int create)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ib_sa_multicast *multicast;
struct ib_sa_mcmember_rec rec = {
.join_state = 1
};
@@ -487,29 +494,18 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
}
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
-
- mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
+ multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
- if (IS_ERR(mcast->mc)) {
+ if (IS_ERR(multicast)) {
+ ret = PTR_ERR(multicast);
+ ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
+ spin_lock_irq(&priv->lock);
+ /* Requeue this join task with a backoff delay */
+ __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ spin_unlock_irq(&priv->lock);
complete(&mcast->done);
- ret = PTR_ERR(mcast->mc);
- ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
-
- mcast->backoff *= 2;
- if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
- mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
-
- mutex_lock(&mcast_mutex);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task,
- mcast->backoff * HZ);
- mutex_unlock(&mcast_mutex);
}
}
@@ -519,8 +515,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
container_of(work, struct ipoib_dev_priv, mcast_task.work);
struct net_device *dev = priv->dev;
struct ib_port_attr port_attr;
+ unsigned long delay_until = 0;
+ struct ipoib_mcast *mcast = NULL;
+ int create = 1;
- if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
if (ib_query_port(priv->ca, priv->port, &port_attr) ||
@@ -536,93 +535,118 @@ void ipoib_mcast_join_task(struct work_struct *work)
else
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ spin_lock_irq(&priv->lock);
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ goto out;
+
if (!priv->broadcast) {
struct ipoib_mcast *broadcast;
- if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- return;
-
- broadcast = ipoib_mcast_alloc(dev, 1);
+ broadcast = ipoib_mcast_alloc(dev, 0);
if (!broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
- mutex_lock(&mcast_mutex);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, HZ);
- mutex_unlock(&mcast_mutex);
- return;
+ /*
+ * Restart us after a 1 second delay to retry
+ * creating our broadcast group and attaching to
+ * it. Until this succeeds, this ipoib dev is
+ * completely stalled (multicast wise).
+ */
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 1);
+ goto out;
}
- spin_lock_irq(&priv->lock);
memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
sizeof (union ib_gid));
priv->broadcast = broadcast;
__ipoib_mcast_add(dev, priv->broadcast);
- spin_unlock_irq(&priv->lock);
}
if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
- if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
- ipoib_mcast_join(dev, priv->broadcast, 0);
- return;
- }
-
- while (1) {
- struct ipoib_mcast *mcast = NULL;
-
- spin_lock_irq(&priv->lock);
- list_for_each_entry(mcast, &priv->multicast_list, list) {
- if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
- /* Found the next unjoined group */
- break;
+ if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) {
+ mcast = priv->broadcast;
+ create = 0;
+ if (mcast->backoff > 1 &&
+ time_before(jiffies, mcast->delay_until)) {
+ delay_until = mcast->delay_until;
+ mcast = NULL;
}
}
- spin_unlock_irq(&priv->lock);
+ goto out;
+ }
- if (&mcast->list == &priv->multicast_list) {
- /* All done */
- break;
+ /*
+ * We'll never get here until the broadcast group is both allocated
+ * and attached
+ */
+ list_for_each_entry(mcast, &priv->multicast_list, list) {
+ if (IS_ERR_OR_NULL(mcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
+ (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ||
+ !skb_queue_empty(&mcast->pkt_queue))) {
+ if (mcast->backoff == 1 ||
+ time_after_eq(jiffies, mcast->delay_until)) {
+ /* Found the next unjoined group */
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ create = 0;
+ else
+ create = 1;
+ spin_unlock_irq(&priv->lock);
+ ipoib_mcast_join(dev, mcast, create);
+ spin_lock_irq(&priv->lock);
+ } else if (!delay_until ||
+ time_before(mcast->delay_until, delay_until))
+ delay_until = mcast->delay_until;
}
-
- ipoib_mcast_join(dev, mcast, 1);
- return;
}
- ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
+ mcast = NULL;
+ ipoib_dbg_mcast(priv, "successfully started all multicast joins\n");
- clear_bit(IPOIB_MCAST_RUN, &priv->flags);
+out:
+ if (delay_until) {
+ cancel_delayed_work(&priv->mcast_task);
+ queue_delayed_work(priv->wq, &priv->mcast_task,
+ delay_until - jiffies);
+ }
+ if (mcast) {
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ }
+ spin_unlock_irq(&priv->lock);
+ if (mcast)
+ ipoib_mcast_join(dev, mcast, create);
}
int ipoib_mcast_start_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
ipoib_dbg_mcast(priv, "starting multicast thread\n");
- mutex_lock(&mcast_mutex);
- if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
- mutex_unlock(&mcast_mutex);
+ spin_lock_irqsave(&priv->lock, flags);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
+int ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
- mutex_lock(&mcast_mutex);
- clear_bit(IPOIB_MCAST_RUN, &priv->flags);
+ spin_lock_irqsave(&priv->lock, flags);
cancel_delayed_work(&priv->mcast_task);
- mutex_unlock(&mcast_mutex);
+ spin_unlock_irqrestore(&priv->lock, flags);
- if (flush)
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
return 0;
}
@@ -633,6 +657,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
int ret = 0;
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
+
+ if (!IS_ERR_OR_NULL(mcast->mc))
ib_sa_free_multicast(mcast->mc);
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -644,7 +671,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
be16_to_cpu(mcast->mcmember.mlid));
if (ret)
ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
- }
+ } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ ipoib_dbg(priv, "leaving with no mcmember but not a "
+ "SENDONLY join\n");
return 0;
}
@@ -667,49 +696,37 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
}
mcast = __ipoib_mcast_find(dev, mgid);
- if (!mcast) {
- /* Let's create a new send only group now */
- ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
- mgid);
-
- mcast = ipoib_mcast_alloc(dev, 0);
+ if (!mcast || !mcast->ah) {
if (!mcast) {
- ipoib_warn(priv, "unable to allocate memory for "
- "multicast structure\n");
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
- goto out;
- }
-
- set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
- memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
- __ipoib_mcast_add(dev, mcast);
- list_add_tail(&mcast->list, &priv->multicast_list);
- }
+ /* Let's create a new send only group now */
+ ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
+ mgid);
+
+ mcast = ipoib_mcast_alloc(dev, 0);
+ if (!mcast) {
+ ipoib_warn(priv, "unable to allocate memory "
+ "for multicast structure\n");
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
- if (!mcast->ah) {
+ set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
+ memcpy(mcast->mcmember.mgid.raw, mgid,
+ sizeof (union ib_gid));
+ __ipoib_mcast_add(dev, mcast);
+ list_add_tail(&mcast->list, &priv->multicast_list);
+ }
if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
skb_queue_tail(&mcast->pkt_queue, skb);
else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
-
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- ipoib_dbg_mcast(priv, "no address vector, "
- "but multicast join already started\n");
- else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- ipoib_mcast_sendonly_join(mcast);
-
- /*
- * If lookup completes between here and out:, don't
- * want to send packet twice.
- */
- mcast = NULL;
- }
-
-out:
- if (mcast && mcast->ah) {
+ if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ }
+ } else {
struct ipoib_neigh *neigh;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -759,9 +776,12 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /* seperate between the wait to the leave*/
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -792,9 +812,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
unsigned long flags;
struct ib_sa_mcmember_rec rec;
- ipoib_dbg_mcast(priv, "restarting multicast task\n");
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ /*
+ * shortcut...on shutdown flush is called next, just
+ * let it do all the work
+ */
+ return;
- ipoib_mcast_stop_thread(dev, 0);
+ ipoib_dbg_mcast(priv, "restarting multicast task\n");
local_irq_save(flags);
netif_addr_lock(dev);
@@ -880,14 +905,27 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /* We have to cancel outside of the spinlock */
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
}
- if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- ipoib_mcast_start_thread(dev);
+ /*
+ * Double check that we are still up
+ */
+ if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index c56d5d44c53b..e5cc43074196 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -157,6 +157,16 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
goto out_free_pd;
}
+ /*
+ * the various IPoIB tasks assume they will never race against
+ * themselves, so always use a single thread workqueue
+ */
+ priv->wq = create_singlethread_workqueue("ipoib_wq");
+ if (!priv->wq) {
+ printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
+ goto out_free_mr;
+ }
+
size = ipoib_recvq_size + 1;
ret = ipoib_cm_dev_init(dev);
if (!ret) {
@@ -165,12 +175,13 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
else
size += ipoib_recvq_size * ipoib_max_conn_qp;
- }
+ } else
+ goto out_free_wq;
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
if (IS_ERR(priv->recv_cq)) {
printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
- goto out_free_mr;
+ goto out_cm_dev_cleanup;
}
priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
@@ -216,15 +227,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->tx_wr.send_flags = IB_SEND_SIGNALED;
priv->rx_sge[0].lkey = priv->mr->lkey;
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
- priv->rx_sge[1].length = PAGE_SIZE;
- priv->rx_sge[1].lkey = priv->mr->lkey;
- priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
- } else {
- priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- priv->rx_wr.num_sge = 1;
- }
+
+ priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ priv->rx_wr.num_sge = 1;
+
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
@@ -236,12 +242,19 @@ out_free_send_cq:
out_free_recv_cq:
ib_destroy_cq(priv->recv_cq);
+out_cm_dev_cleanup:
+ ipoib_cm_dev_cleanup(dev);
+
+out_free_wq:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
out_free_mr:
ib_dereg_mr(priv->mr);
- ipoib_cm_dev_cleanup(dev);
out_free_pd:
ib_dealloc_pd(priv->pd);
+
return -ENODEV;
}
@@ -265,11 +278,18 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
ipoib_cm_dev_cleanup(dev);
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
+
if (ib_dereg_mr(priv->mr))
ipoib_warn(priv, "ib_dereg_mr failed\n");
if (ib_dealloc_pd(priv->pd))
ipoib_warn(priv, "ib_dealloc_pd failed\n");
+
}
void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 9fad7b5ac8b9..fca1a882de27 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -58,6 +58,7 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+ priv->parent = ppriv->dev;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
result = ipoib_set_dev_features(priv, ppriv->ca);
@@ -84,8 +85,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
goto register_failed;
}
- priv->parent = ppriv->dev;
-
ipoib_create_debug_files(priv->dev);
/* RTNL childs don't need proprietary sysfs entries */
@@ -102,7 +101,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
}
priv->child_type = type;
- priv->dev->iflink = ppriv->dev->ifindex;
list_add_tail(&priv->list, &ppriv->child_intfs);
return 0;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index b47aea1094b2..262ba1f8ee50 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -69,7 +69,7 @@
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
-#define DRV_VER "1.5"
+#define DRV_VER "1.6"
#define iser_dbg(fmt, arg...) \
do { \
@@ -218,22 +218,21 @@ enum iser_data_dir {
/**
* struct iser_data_buf - iSER data buffer
*
- * @buf: pointer to the sg list
+ * @sg: pointer to the sg list
* @size: num entries of this sg
* @data_len: total beffer byte len
* @dma_nents: returned by dma_map_sg
- * @copy_buf: allocated copy buf for SGs unaligned
- * for rdma which are copied
- * @sg_single: SG-ified clone of a non SG SC or
- * unaligned SG
+ * @orig_sg: pointer to the original sg list (in case
+ * we used a copy)
+ * @orig_size: num entris of orig sg list
*/
struct iser_data_buf {
- void *buf;
+ struct scatterlist *sg;
unsigned int size;
unsigned long data_len;
unsigned int dma_nents;
- char *copy_buf;
- struct scatterlist sg_single;
+ struct scatterlist *orig_sg;
+ unsigned int orig_size;
};
/* fwd declarations */
@@ -244,35 +243,14 @@ struct iscsi_endpoint;
/**
* struct iser_mem_reg - iSER memory registration info
*
- * @lkey: MR local key
- * @rkey: MR remote key
- * @va: MR start address (buffer va)
- * @len: MR length
+ * @sge: memory region sg element
+ * @rkey: memory region remote key
* @mem_h: pointer to registration context (FMR/Fastreg)
*/
struct iser_mem_reg {
- u32 lkey;
- u32 rkey;
- u64 va;
- u64 len;
- void *mem_h;
-};
-
-/**
- * struct iser_regd_buf - iSER buffer registration desc
- *
- * @reg: memory registration info
- * @virt_addr: virtual address of buffer
- * @device: reference to iser device
- * @direction: dma direction (for dma_unmap)
- * @data_size: data buffer size in bytes
- */
-struct iser_regd_buf {
- struct iser_mem_reg reg;
- void *virt_addr;
- struct iser_device *device;
- enum dma_data_direction direction;
- unsigned int data_size;
+ struct ib_sge sge;
+ u32 rkey;
+ void *mem_h;
};
enum iser_desc_type {
@@ -534,11 +512,9 @@ struct iser_conn {
* @sc: link to scsi command
* @command_sent: indicate if command was sent
* @dir: iser data direction
- * @rdma_regd: task rdma registration desc
+ * @rdma_reg: task rdma registration desc
* @data: iser data buffer desc
- * @data_copy: iser data copy buffer desc (bounce buffer)
* @prot: iser protection buffer desc
- * @prot_copy: iser protection copy buffer desc (bounce buffer)
*/
struct iscsi_iser_task {
struct iser_tx_desc desc;
@@ -547,11 +523,9 @@ struct iscsi_iser_task {
struct scsi_cmnd *sc;
int command_sent;
int dir[ISER_DIRS_NUM];
- struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];
+ struct iser_mem_reg rdma_reg[ISER_DIRS_NUM];
struct iser_data_buf data[ISER_DIRS_NUM];
- struct iser_data_buf data_copy[ISER_DIRS_NUM];
struct iser_data_buf prot[ISER_DIRS_NUM];
- struct iser_data_buf prot_copy[ISER_DIRS_NUM];
};
struct iser_page_vec {
@@ -621,7 +595,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
- struct iser_data_buf *mem_copy,
enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
@@ -634,10 +607,6 @@ int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *dst_addr,
int non_blocking);
-int iser_reg_page_vec(struct ib_conn *ib_conn,
- struct iser_page_vec *page_vec,
- struct iser_mem_reg *mem_reg);
-
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
@@ -667,4 +636,9 @@ int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max);
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
+struct fast_reg_descriptor *
+iser_reg_desc_get(struct ib_conn *ib_conn);
+void
+iser_reg_desc_put(struct ib_conn *ib_conn,
+ struct fast_reg_descriptor *desc);
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 20e859a6f1a6..3e2118e8ed87 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -50,7 +50,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
- struct iser_regd_buf *regd_buf;
+ struct iser_mem_reg *mem_reg;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
@@ -78,15 +78,15 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
iser_err("Failed to set up Data-IN RDMA\n");
return err;
}
- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
+ mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
hdr->flags |= ISER_RSV;
- hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
- hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+ hdr->read_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->read_va = cpu_to_be64(mem_reg->sge.addr);
iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
- task->itt, regd_buf->reg.rkey,
- (unsigned long long)regd_buf->reg.va);
+ task->itt, mem_reg->rkey,
+ (unsigned long long)mem_reg->sge.addr);
return 0;
}
@@ -104,7 +104,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
- struct iser_regd_buf *regd_buf;
+ struct iser_mem_reg *mem_reg;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
@@ -134,25 +134,25 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return err;
}
- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
+ mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
- hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
- hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz);
+ hdr->write_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n",
- task->itt, regd_buf->reg.rkey,
- (unsigned long long)regd_buf->reg.va, unsol_sz);
+ task->itt, mem_reg->rkey,
+ (unsigned long long)mem_reg->sge.addr, unsol_sz);
}
if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
task->itt, imm_sz);
- tx_dsg->addr = regd_buf->reg.va;
+ tx_dsg->addr = mem_reg->sge.addr;
tx_dsg->length = imm_sz;
- tx_dsg->lkey = regd_buf->reg.lkey;
+ tx_dsg->lkey = mem_reg->sge.lkey;
iser_task->desc.num_sge = 2;
}
@@ -401,16 +401,16 @@ int iser_send_command(struct iscsi_conn *conn,
}
if (scsi_sg_count(sc)) { /* using a scatter list */
- data_buf->buf = scsi_sglist(sc);
+ data_buf->sg = scsi_sglist(sc);
data_buf->size = scsi_sg_count(sc);
}
data_buf->data_len = scsi_bufflen(sc);
if (scsi_prot_sg_count(sc)) {
- prot_buf->buf = scsi_prot_sglist(sc);
+ prot_buf->sg = scsi_prot_sglist(sc);
prot_buf->size = scsi_prot_sg_count(sc);
- prot_buf->data_len = data_buf->data_len >>
- ilog2(sc->device->sector_size) * 8;
+ prot_buf->data_len = (data_buf->data_len >>
+ ilog2(sc->device->sector_size)) * 8;
}
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
@@ -450,7 +450,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = NULL;
- struct iser_regd_buf *regd_buf;
+ struct iser_mem_reg *mem_reg;
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
@@ -477,11 +477,11 @@ int iser_send_data_out(struct iscsi_conn *conn,
/* build the tx desc */
iser_initialize_task_headers(task, tx_desc);
- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
+ mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
tx_dsg = &tx_desc->tx_sg[1];
- tx_dsg->addr = regd_buf->reg.va + buf_offset;
- tx_dsg->length = data_seg_len;
- tx_dsg->lkey = regd_buf->reg.lkey;
+ tx_dsg->addr = mem_reg->sge.addr + buf_offset;
+ tx_dsg->length = data_seg_len;
+ tx_dsg->lkey = mem_reg->sge.lkey;
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
@@ -658,10 +658,10 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
iser_task->prot[ISER_DIR_IN].data_len = 0;
iser_task->prot[ISER_DIR_OUT].data_len = 0;
- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
- sizeof(struct iser_regd_buf));
- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
- sizeof(struct iser_regd_buf));
+ memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
+ sizeof(struct iser_mem_reg));
+ memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
+ sizeof(struct iser_mem_reg));
}
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
@@ -674,35 +674,31 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
*/
- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ if (iser_task->data[ISER_DIR_IN].orig_sg) {
is_rdma_data_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->data[ISER_DIR_IN],
- &iser_task->data_copy[ISER_DIR_IN],
ISER_DIR_IN);
}
- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ if (iser_task->data[ISER_DIR_OUT].orig_sg) {
is_rdma_data_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->data[ISER_DIR_OUT],
- &iser_task->data_copy[ISER_DIR_OUT],
ISER_DIR_OUT);
}
- if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) {
+ if (iser_task->prot[ISER_DIR_IN].orig_sg) {
is_rdma_prot_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->prot[ISER_DIR_IN],
- &iser_task->prot_copy[ISER_DIR_IN],
ISER_DIR_IN);
}
- if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ if (iser_task->prot[ISER_DIR_OUT].orig_sg) {
is_rdma_prot_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->prot[ISER_DIR_OUT],
- &iser_task->prot_copy[ISER_DIR_OUT],
ISER_DIR_OUT);
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 341040bf0984..f0cdc961eb11 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -39,68 +39,173 @@
#include "iscsi_iser.h"
-#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
+static void
+iser_free_bounce_sg(struct iser_data_buf *data)
+{
+ struct scatterlist *sg;
+ int count;
-/**
- * iser_start_rdma_unaligned_sg
- */
-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- struct iser_data_buf *data_copy,
- enum iser_data_dir cmd_dir)
+ for_each_sg(data->sg, sg, data->size, count)
+ __free_page(sg_page(sg));
+
+ kfree(data->sg);
+
+ data->sg = data->orig_sg;
+ data->size = data->orig_size;
+ data->orig_sg = NULL;
+ data->orig_size = 0;
+}
+
+static int
+iser_alloc_bounce_sg(struct iser_data_buf *data)
{
- struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg;
- char *mem = NULL;
- unsigned long cmd_data_len = 0;
- int dma_nents, i;
+ struct page *page;
+ unsigned long length = data->data_len;
+ int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE);
- for_each_sg(sgl, sg, data->size, i)
- cmd_data_len += ib_sg_dma_len(dev, sg);
+ sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC);
+ if (!sg)
+ goto err;
- if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
- mem = (void *)__get_free_pages(GFP_ATOMIC,
- ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
- else
- mem = kmalloc(cmd_data_len, GFP_ATOMIC);
+ sg_init_table(sg, nents);
+ while (length) {
+ u32 page_len = min_t(u32, length, PAGE_SIZE);
- if (mem == NULL) {
- iser_err("Failed to allocate mem size %d %d for copying sglist\n",
- data->size, (int)cmd_data_len);
- return -ENOMEM;
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ goto err;
+
+ sg_set_page(&sg[i], page, page_len, 0);
+ length -= page_len;
+ i++;
}
- if (cmd_dir == ISER_DIR_OUT) {
- /* copy the unaligned sg the buffer which is used for RDMA */
- char *p, *from;
-
- sgl = (struct scatterlist *)data->buf;
- p = mem;
- for_each_sg(sgl, sg, data->size, i) {
- from = kmap_atomic(sg_page(sg));
- memcpy(p,
- from + sg->offset,
- sg->length);
- kunmap_atomic(from);
- p += sg->length;
+ data->orig_sg = data->sg;
+ data->orig_size = data->size;
+ data->sg = sg;
+ data->size = nents;
+
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __free_page(sg_page(&sg[i - 1]));
+ kfree(sg);
+
+ return -ENOMEM;
+}
+
+static void
+iser_copy_bounce(struct iser_data_buf *data, bool to_buffer)
+{
+ struct scatterlist *osg, *bsg = data->sg;
+ void *oaddr, *baddr;
+ unsigned int left = data->data_len;
+ unsigned int bsg_off = 0;
+ int i;
+
+ for_each_sg(data->orig_sg, osg, data->orig_size, i) {
+ unsigned int copy_len, osg_off = 0;
+
+ oaddr = kmap_atomic(sg_page(osg)) + osg->offset;
+ copy_len = min(left, osg->length);
+ while (copy_len) {
+ unsigned int len = min(copy_len, bsg->length - bsg_off);
+
+ baddr = kmap_atomic(sg_page(bsg)) + bsg->offset;
+ if (to_buffer)
+ memcpy(baddr + bsg_off, oaddr + osg_off, len);
+ else
+ memcpy(oaddr + osg_off, baddr + bsg_off, len);
+
+ kunmap_atomic(baddr - bsg->offset);
+ osg_off += len;
+ bsg_off += len;
+ copy_len -= len;
+
+ if (bsg_off >= bsg->length) {
+ bsg = sg_next(bsg);
+ bsg_off = 0;
+ }
}
+ kunmap_atomic(oaddr - osg->offset);
+ left -= osg_off;
}
+}
+
+static inline void
+iser_copy_from_bounce(struct iser_data_buf *data)
+{
+ iser_copy_bounce(data, false);
+}
+
+static inline void
+iser_copy_to_bounce(struct iser_data_buf *data)
+{
+ iser_copy_bounce(data, true);
+}
+
+struct fast_reg_descriptor *
+iser_reg_desc_get(struct ib_conn *ib_conn)
+{
+ struct fast_reg_descriptor *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ib_conn->lock, flags);
+ desc = list_first_entry(&ib_conn->fastreg.pool,
+ struct fast_reg_descriptor, list);
+ list_del(&desc->list);
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
+
+ return desc;
+}
+
+void
+iser_reg_desc_put(struct ib_conn *ib_conn,
+ struct fast_reg_descriptor *desc)
+{
+ unsigned long flags;
- sg_init_one(&data_copy->sg_single, mem, cmd_data_len);
- data_copy->buf = &data_copy->sg_single;
- data_copy->size = 1;
- data_copy->copy_buf = mem;
+ spin_lock_irqsave(&ib_conn->lock, flags);
+ list_add(&desc->list, &ib_conn->fastreg.pool);
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
+}
- dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
- (cmd_dir == ISER_DIR_OUT) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- BUG_ON(dma_nents == 0);
+/**
+ * iser_start_rdma_unaligned_sg
+ */
+static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *data,
+ enum iser_data_dir cmd_dir)
+{
+ struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
+ int rc;
+
+ rc = iser_alloc_bounce_sg(data);
+ if (rc) {
+ iser_err("Failed to allocate bounce for data len %lu\n",
+ data->data_len);
+ return rc;
+ }
+
+ if (cmd_dir == ISER_DIR_OUT)
+ iser_copy_to_bounce(data);
- data_copy->dma_nents = dma_nents;
- data_copy->data_len = cmd_data_len;
+ data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!data->dma_nents) {
+ iser_err("Got dma_nents %d, something went wrong...\n",
+ data->dma_nents);
+ rc = -ENOMEM;
+ goto err;
+ }
return 0;
+err:
+ iser_free_bounce_sg(data);
+ return rc;
}
/**
@@ -109,51 +214,18 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
- struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir)
{
- struct ib_device *dev;
- unsigned long cmd_data_len;
-
- dev = iser_task->iser_conn->ib_conn.device->ib_device;
+ struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
- ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
+ ib_dma_unmap_sg(dev, data->sg, data->size,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (cmd_dir == ISER_DIR_IN) {
- char *mem;
- struct scatterlist *sgl, *sg;
- unsigned char *p, *to;
- unsigned int sg_size;
- int i;
-
- /* copy back read RDMA to unaligned sg */
- mem = data_copy->copy_buf;
-
- sgl = (struct scatterlist *)data->buf;
- sg_size = data->size;
-
- p = mem;
- for_each_sg(sgl, sg, sg_size, i) {
- to = kmap_atomic(sg_page(sg));
- memcpy(to + sg->offset,
- p,
- sg->length);
- kunmap_atomic(to);
- p += sg->length;
- }
- }
+ if (cmd_dir == ISER_DIR_IN)
+ iser_copy_from_bounce(data);
- cmd_data_len = data->data_len;
-
- if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
- free_pages((unsigned long)data_copy->copy_buf,
- ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
- else
- kfree(data_copy->copy_buf);
-
- data_copy->copy_buf = NULL;
+ iser_free_bounce_sg(data);
}
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
@@ -175,7 +247,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
struct ib_device *ibdev, u64 *pages,
int *offset, int *data_size)
{
- struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
+ struct scatterlist *sg, *sgl = data->sg;
u64 start_addr, end_addr, page, chunk_start = 0;
unsigned long total_sz = 0;
unsigned int dma_len;
@@ -227,14 +299,14 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
static int iser_data_buf_aligned_len(struct iser_data_buf *data,
struct ib_device *ibdev)
{
- struct scatterlist *sgl, *sg, *next_sg = NULL;
+ struct scatterlist *sg, *sgl, *next_sg = NULL;
u64 start_addr, end_addr;
int i, ret_len, start_check = 0;
if (data->dma_nents == 1)
return 1;
- sgl = (struct scatterlist *)data->buf;
+ sgl = data->sg;
start_addr = ib_sg_dma_address(ibdev, sgl);
for_each_sg(sgl, sg, data->dma_nents, i) {
@@ -266,11 +338,10 @@ static int iser_data_buf_aligned_len(struct iser_data_buf *data,
static void iser_data_buf_dump(struct iser_data_buf *data,
struct ib_device *ibdev)
{
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg;
int i;
- for_each_sg(sgl, sg, data->dma_nents, i)
+ for_each_sg(data->sg, sg, data->dma_nents, i)
iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
@@ -288,31 +359,6 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
}
-static void iser_page_vec_build(struct iser_data_buf *data,
- struct iser_page_vec *page_vec,
- struct ib_device *ibdev)
-{
- int page_vec_len = 0;
-
- page_vec->length = 0;
- page_vec->offset = 0;
-
- iser_dbg("Translating sg sz: %d\n", data->dma_nents);
- page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
- &page_vec->offset,
- &page_vec->data_size);
- iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
-
- page_vec->length = page_vec_len;
-
- if (page_vec_len * SIZE_4K < page_vec->data_size) {
- iser_err("page_vec too short to hold this SG\n");
- iser_data_buf_dump(data, ibdev);
- iser_dump_page_vec(page_vec);
- BUG();
- }
-}
-
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
@@ -323,7 +369,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
iser_task->dir[iser_dir] = 1;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
- data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
if (data->dma_nents == 0) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
@@ -338,24 +384,41 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
struct ib_device *dev;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
- ib_dma_unmap_sg(dev, data->buf, data->size, dir);
+ ib_dma_unmap_sg(dev, data->sg, data->size, dir);
+}
+
+static int
+iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
+ struct iser_mem_reg *reg)
+{
+ struct scatterlist *sg = mem->sg;
+
+ reg->sge.lkey = device->mr->lkey;
+ reg->rkey = device->mr->rkey;
+ reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
+ reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
+
+ iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
+ " length=0x%x\n", reg->sge.lkey, reg->rkey,
+ reg->sge.addr, reg->sge.length);
+
+ return 0;
}
static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
- struct ib_device *ibdev,
struct iser_data_buf *mem,
- struct iser_data_buf *mem_copy,
enum iser_data_dir cmd_dir,
int aligned_len)
{
- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+ struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+ struct iser_device *device = iser_task->iser_conn->ib_conn.device;
iscsi_conn->fmr_unalign_cnt++;
iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
aligned_len, mem->size);
if (iser_debug_level > 0)
- iser_data_buf_dump(mem, ibdev);
+ iser_data_buf_dump(mem, device->ib_device);
/* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_task, mem,
@@ -364,13 +427,95 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
/* allocate copy buf, if we are writing, copy the */
/* unaligned scatterlist, dma map the copy */
- if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0)
+ if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
return -ENOMEM;
return 0;
}
/**
+ * iser_reg_page_vec - Register physical memory
+ *
+ * returns: 0 on success, errno code on failure
+ */
+static
+int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_page_vec *page_vec,
+ struct iser_mem_reg *mem_reg)
+{
+ struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_pool_fmr *fmr;
+ int ret, plen;
+
+ plen = iser_sg_to_page_vec(mem, device->ib_device,
+ page_vec->pages,
+ &page_vec->offset,
+ &page_vec->data_size);
+ page_vec->length = plen;
+ if (plen * SIZE_4K < page_vec->data_size) {
+ iser_err("page vec too short to hold this SG\n");
+ iser_data_buf_dump(mem, device->ib_device);
+ iser_dump_page_vec(page_vec);
+ return -EINVAL;
+ }
+
+ fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
+ page_vec->pages,
+ page_vec->length,
+ page_vec->pages[0]);
+ if (IS_ERR(fmr)) {
+ ret = PTR_ERR(fmr);
+ iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
+ return ret;
+ }
+
+ mem_reg->sge.lkey = fmr->fmr->lkey;
+ mem_reg->rkey = fmr->fmr->rkey;
+ mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset;
+ mem_reg->sge.length = page_vec->data_size;
+ mem_reg->mem_h = fmr;
+
+ return 0;
+}
+
+/**
+ * Unregister (previosuly registered using FMR) memory.
+ * If memory is non-FMR does nothing.
+ */
+void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+ int ret;
+
+ if (!reg->mem_h)
+ return;
+
+ iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
+
+ ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
+ if (ret)
+ iser_err("ib_fmr_pool_unmap failed %d\n", ret);
+
+ reg->mem_h = NULL;
+}
+
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+
+ if (!reg->mem_h)
+ return;
+
+ iser_reg_desc_put(&iser_task->iser_conn->ib_conn,
+ reg->mem_h);
+ reg->mem_h = NULL;
+}
+
+/**
* iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
* using FMR (if possible) obtaining rkey and va
*
@@ -383,45 +528,29 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
- struct iser_regd_buf *regd_buf;
+ struct iser_mem_reg *mem_reg;
int aligned_len;
int err;
int i;
- struct scatterlist *sg;
- regd_buf = &iser_task->rdma_regd[cmd_dir];
+ mem_reg = &iser_task->rdma_reg[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, ibdev, mem,
- &iser_task->data_copy[cmd_dir],
+ err = fall_to_bounce_buf(iser_task, mem,
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
return err;
}
- mem = &iser_task->data_copy[cmd_dir];
}
/* if there a single dma entry, FMR is not needed */
if (mem->dma_nents == 1) {
- sg = (struct scatterlist *)mem->buf;
-
- regd_buf->reg.lkey = device->mr->lkey;
- regd_buf->reg.rkey = device->mr->rkey;
- regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
- regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
-
- iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
- "va: 0x%08lX sz: %ld]\n",
- (unsigned int)regd_buf->reg.lkey,
- (unsigned int)regd_buf->reg.rkey,
- (unsigned long)regd_buf->reg.va,
- (unsigned long)regd_buf->reg.len);
+ return iser_reg_dma(device, mem, mem_reg);
} else { /* use FMR for multiple dma entries */
- iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
- err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
- &regd_buf->reg);
+ err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec,
+ mem_reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
@@ -519,8 +648,10 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
static int
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
- struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
- struct ib_sge *prot_sge, struct ib_sge *sig_sge)
+ struct fast_reg_descriptor *desc,
+ struct iser_mem_reg *data_reg,
+ struct iser_mem_reg *prot_reg,
+ struct iser_mem_reg *sig_reg)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_pi_context *pi_ctx = desc->pi_ctx;
@@ -544,12 +675,12 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
memset(&sig_wr, 0, sizeof(sig_wr));
sig_wr.opcode = IB_WR_REG_SIG_MR;
sig_wr.wr_id = ISER_FASTREG_LI_WRID;
- sig_wr.sg_list = data_sge;
+ sig_wr.sg_list = &data_reg->sge;
sig_wr.num_sge = 1;
sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
if (scsi_prot_sg_count(iser_task->sc))
- sig_wr.wr.sig_handover.prot = prot_sge;
+ sig_wr.wr.sig_handover.prot = &prot_reg->sge;
sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
@@ -566,27 +697,26 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
}
desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
- sig_sge->lkey = pi_ctx->sig_mr->lkey;
- sig_sge->addr = 0;
- sig_sge->length = scsi_transfer_length(iser_task->sc);
+ sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
+ sig_reg->rkey = pi_ctx->sig_mr->rkey;
+ sig_reg->sge.addr = 0;
+ sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
- iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
- sig_sge->addr, sig_sge->length,
- sig_sge->lkey);
+ iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
+ sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
+ sig_reg->sge.length);
err:
return ret;
}
static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
- struct iser_regd_buf *regd_buf,
struct iser_data_buf *mem,
+ struct fast_reg_descriptor *desc,
enum iser_reg_indicator ind,
- struct ib_sge *sge)
+ struct iser_mem_reg *reg)
{
- struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
- struct ib_device *ibdev = device->ib_device;
struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fastreg_wr, inv_wr;
@@ -594,17 +724,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
int ret, offset, size, plen;
/* if there a single dma entry, dma mr suffices */
- if (mem->dma_nents == 1) {
- struct scatterlist *sg = (struct scatterlist *)mem->buf;
-
- sge->lkey = device->mr->lkey;
- sge->addr = ib_sg_dma_address(ibdev, &sg[0]);
- sge->length = ib_sg_dma_len(ibdev, &sg[0]);
-
- iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
- sge->lkey, sge->addr, sge->length);
- return 0;
- }
+ if (mem->dma_nents == 1)
+ return iser_reg_dma(device, mem, reg);
if (ind == ISER_DATA_KEY_VALID) {
mr = desc->data_mr;
@@ -652,9 +773,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
}
desc->reg_indicators &= ~ind;
- sge->lkey = mr->lkey;
- sge->addr = frpl->page_list[0] + offset;
- sge->length = size;
+ reg->sge.lkey = mr->lkey;
+ reg->rkey = mr->rkey;
+ reg->sge.addr = frpl->page_list[0] + offset;
+ reg->sge.length = size;
return ret;
}
@@ -672,93 +794,66 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
- struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
+ struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir];
struct fast_reg_descriptor *desc = NULL;
- struct ib_sge data_sge;
int err, aligned_len;
- unsigned long flags;
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, ibdev, mem,
- &iser_task->data_copy[cmd_dir],
+ err = fall_to_bounce_buf(iser_task, mem,
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
return err;
}
- mem = &iser_task->data_copy[cmd_dir];
}
if (mem->dma_nents != 1 ||
scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
- spin_lock_irqsave(&ib_conn->lock, flags);
- desc = list_first_entry(&ib_conn->fastreg.pool,
- struct fast_reg_descriptor, list);
- list_del(&desc->list);
- spin_unlock_irqrestore(&ib_conn->lock, flags);
- regd_buf->reg.mem_h = desc;
+ desc = iser_reg_desc_get(ib_conn);
+ mem_reg->mem_h = desc;
}
- err = iser_fast_reg_mr(iser_task, regd_buf, mem,
- ISER_DATA_KEY_VALID, &data_sge);
+ err = iser_fast_reg_mr(iser_task, mem, desc,
+ ISER_DATA_KEY_VALID, mem_reg);
if (err)
goto err_reg;
if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
- struct ib_sge prot_sge, sig_sge;
+ struct iser_mem_reg prot_reg;
- memset(&prot_sge, 0, sizeof(prot_sge));
+ memset(&prot_reg, 0, sizeof(prot_reg));
if (scsi_prot_sg_count(iser_task->sc)) {
mem = &iser_task->prot[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, ibdev, mem,
- &iser_task->prot_copy[cmd_dir],
+ err = fall_to_bounce_buf(iser_task, mem,
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
return err;
}
- mem = &iser_task->prot_copy[cmd_dir];
}
- err = iser_fast_reg_mr(iser_task, regd_buf, mem,
- ISER_PROT_KEY_VALID, &prot_sge);
+ err = iser_fast_reg_mr(iser_task, mem, desc,
+ ISER_PROT_KEY_VALID, &prot_reg);
if (err)
goto err_reg;
}
- err = iser_reg_sig_mr(iser_task, desc, &data_sge,
- &prot_sge, &sig_sge);
+ err = iser_reg_sig_mr(iser_task, desc, mem_reg,
+ &prot_reg, mem_reg);
if (err) {
iser_err("Failed to register signature mr\n");
return err;
}
desc->reg_indicators |= ISER_FASTREG_PROTECTED;
-
- regd_buf->reg.lkey = sig_sge.lkey;
- regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
- regd_buf->reg.va = sig_sge.addr;
- regd_buf->reg.len = sig_sge.length;
- } else {
- if (desc)
- regd_buf->reg.rkey = desc->data_mr->rkey;
- else
- regd_buf->reg.rkey = device->mr->rkey;
-
- regd_buf->reg.lkey = data_sge.lkey;
- regd_buf->reg.va = data_sge.addr;
- regd_buf->reg.len = data_sge.length;
}
return 0;
err_reg:
- if (desc) {
- spin_lock_irqsave(&ib_conn->lock, flags);
- list_add_tail(&desc->list, &ib_conn->fastreg.pool);
- spin_unlock_irqrestore(&ib_conn->lock, flags);
- }
+ if (desc)
+ iser_reg_desc_put(ib_conn, desc);
return err;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 4065abe28829..cc2dd35ffbc0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -274,6 +274,65 @@ void iser_free_fmr_pool(struct ib_conn *ib_conn)
}
static int
+iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd,
+ struct fast_reg_descriptor *desc)
+{
+ struct iser_pi_context *pi_ctx = NULL;
+ struct ib_mr_init_attr mr_init_attr = {.max_reg_descriptors = 2,
+ .flags = IB_MR_SIGNATURE_EN};
+ int ret = 0;
+
+ desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
+ if (!desc->pi_ctx)
+ return -ENOMEM;
+
+ pi_ctx = desc->pi_ctx;
+
+ pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_frpl)) {
+ ret = PTR_ERR(pi_ctx->prot_frpl);
+ goto prot_frpl_failure;
+ }
+
+ pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
+ ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(pi_ctx->prot_mr)) {
+ ret = PTR_ERR(pi_ctx->prot_mr);
+ goto prot_mr_failure;
+ }
+ desc->reg_indicators |= ISER_PROT_KEY_VALID;
+
+ pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ if (IS_ERR(pi_ctx->sig_mr)) {
+ ret = PTR_ERR(pi_ctx->sig_mr);
+ goto sig_mr_failure;
+ }
+ desc->reg_indicators |= ISER_SIG_KEY_VALID;
+ desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
+
+ return 0;
+
+sig_mr_failure:
+ ib_dereg_mr(desc->pi_ctx->prot_mr);
+prot_mr_failure:
+ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+prot_frpl_failure:
+ kfree(desc->pi_ctx);
+
+ return ret;
+}
+
+static void
+iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
+{
+ ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
+ ib_dereg_mr(pi_ctx->prot_mr);
+ ib_destroy_mr(pi_ctx->sig_mr);
+ kfree(pi_ctx);
+}
+
+static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
bool pi_enable, struct fast_reg_descriptor *desc)
{
@@ -297,59 +356,12 @@ iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
desc->reg_indicators |= ISER_DATA_KEY_VALID;
if (pi_enable) {
- struct ib_mr_init_attr mr_init_attr = {0};
- struct iser_pi_context *pi_ctx = NULL;
-
- desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
- if (!desc->pi_ctx) {
- iser_err("Failed to allocate pi context\n");
- ret = -ENOMEM;
+ ret = iser_alloc_pi_ctx(ib_device, pd, desc);
+ if (ret)
goto pi_ctx_alloc_failure;
- }
- pi_ctx = desc->pi_ctx;
-
- pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_frpl)) {
- ret = PTR_ERR(pi_ctx->prot_frpl);
- iser_err("Failed to allocate prot frpl ret=%d\n",
- ret);
- goto prot_frpl_failure;
- }
-
- pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
- ISCSI_ISER_SG_TABLESIZE + 1);
- if (IS_ERR(pi_ctx->prot_mr)) {
- ret = PTR_ERR(pi_ctx->prot_mr);
- iser_err("Failed to allocate prot frmr ret=%d\n",
- ret);
- goto prot_mr_failure;
- }
- desc->reg_indicators |= ISER_PROT_KEY_VALID;
-
- mr_init_attr.max_reg_descriptors = 2;
- mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
- pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
- if (IS_ERR(pi_ctx->sig_mr)) {
- ret = PTR_ERR(pi_ctx->sig_mr);
- iser_err("Failed to allocate signature enabled mr err=%d\n",
- ret);
- goto sig_mr_failure;
- }
- desc->reg_indicators |= ISER_SIG_KEY_VALID;
}
- desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
-
- iser_dbg("Create fr_desc %p page_list %p\n",
- desc, desc->data_frpl->page_list);
return 0;
-sig_mr_failure:
- ib_dereg_mr(desc->pi_ctx->prot_mr);
-prot_mr_failure:
- ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
-prot_frpl_failure:
- kfree(desc->pi_ctx);
pi_ctx_alloc_failure:
ib_dereg_mr(desc->data_mr);
fast_reg_mr_failure:
@@ -416,12 +428,8 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
list_del(&desc->list);
ib_free_fast_reg_page_list(desc->data_frpl);
ib_dereg_mr(desc->data_mr);
- if (desc->pi_ctx) {
- ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
- ib_dereg_mr(desc->pi_ctx->prot_mr);
- ib_destroy_mr(desc->pi_ctx->sig_mr);
- kfree(desc->pi_ctx);
- }
+ if (desc->pi_ctx)
+ iser_free_pi_ctx(desc->pi_ctx);
kfree(desc);
++i;
}
@@ -721,7 +729,7 @@ static void iser_connect_error(struct rdma_cm_id *cma_id)
struct iser_conn *iser_conn;
iser_conn = (struct iser_conn *)cma_id->context;
- iser_conn->state = ISER_CONN_DOWN;
+ iser_conn->state = ISER_CONN_TERMINATING;
}
/**
@@ -992,93 +1000,6 @@ connect_failure:
return err;
}
-/**
- * iser_reg_page_vec - Register physical memory
- *
- * returns: 0 on success, errno code on failure
- */
-int iser_reg_page_vec(struct ib_conn *ib_conn,
- struct iser_page_vec *page_vec,
- struct iser_mem_reg *mem_reg)
-{
- struct ib_pool_fmr *mem;
- u64 io_addr;
- u64 *page_list;
- int status;
-
- page_list = page_vec->pages;
- io_addr = page_list[0];
-
- mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
- page_list,
- page_vec->length,
- io_addr);
-
- if (IS_ERR(mem)) {
- status = (int)PTR_ERR(mem);
- iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
- return status;
- }
-
- mem_reg->lkey = mem->fmr->lkey;
- mem_reg->rkey = mem->fmr->rkey;
- mem_reg->len = page_vec->length * SIZE_4K;
- mem_reg->va = io_addr;
- mem_reg->mem_h = (void *)mem;
-
- mem_reg->va += page_vec->offset;
- mem_reg->len = page_vec->data_size;
-
- iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
- "entry[0]: (0x%08lx,%ld)] -> "
- "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
- page_vec, page_vec->length,
- (unsigned long)page_vec->pages[0],
- (unsigned long)page_vec->data_size,
- (unsigned int)mem_reg->lkey, mem_reg->mem_h,
- (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
- return 0;
-}
-
-/**
- * Unregister (previosuly registered using FMR) memory.
- * If memory is non-FMR does nothing.
- */
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
- int ret;
-
- if (!reg->mem_h)
- return;
-
- iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
-
- ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
- if (ret)
- iser_err("ib_fmr_pool_unmap failed %d\n", ret);
-
- reg->mem_h = NULL;
-}
-
-void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
- struct iser_conn *iser_conn = iser_task->iser_conn;
- struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct fast_reg_descriptor *desc = reg->mem_h;
-
- if (!desc)
- return;
-
- reg->mem_h = NULL;
- spin_lock_bh(&ib_conn->lock);
- list_add_tail(&desc->list, &ib_conn->fastreg.pool);
- spin_unlock_bh(&ib_conn->lock);
-}
-
int iser_post_recvl(struct iser_conn *iser_conn)
{
struct ib_recv_wr rx_wr, *rx_wr_failed;
@@ -1210,6 +1131,9 @@ iser_handle_comp_error(struct ib_conn *ib_conn,
iscsi_conn_failure(iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
+ if (wc->wr_id == ISER_FASTREG_LI_WRID)
+ return;
+
if (is_iser_tx_desc(iser_conn, wr_id)) {
struct iser_tx_desc *desc = wr_id;
@@ -1254,13 +1178,11 @@ static void iser_handle_wc(struct ib_wc *wc)
else
iser_dbg("flush error: wr id %llx\n", wc->wr_id);
- if (wc->wr_id != ISER_FASTREG_LI_WRID &&
- wc->wr_id != ISER_BEACON_WRID)
- iser_handle_comp_error(ib_conn, wc);
-
- /* complete in case all flush errors were consumed */
if (wc->wr_id == ISER_BEACON_WRID)
+ /* all flush errors were consumed */
complete(&ib_conn->flush_comp);
+ else
+ iser_handle_comp_error(ib_conn, wc);
}
}
@@ -1306,7 +1228,7 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector)
{
- struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
struct fast_reg_descriptor *desc = reg->mem_h;
unsigned long sector_size = iser_task->sc->device->sector_size;
struct ib_mr_status mr_status;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 075b19cc78e8..327529ee85eb 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -76,12 +76,12 @@ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
- struct isert_conn *isert_conn = (struct isert_conn *)context;
+ struct isert_conn *isert_conn = context;
isert_err("conn %p event: %d\n", isert_conn, e->event);
switch (e->event) {
case IB_EVENT_COMM_EST:
- rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
+ rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
@@ -107,13 +107,12 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
return 0;
}
-static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+static struct isert_comp *
+isert_comp_get(struct isert_conn *isert_conn)
{
- struct isert_device *device = isert_conn->conn_device;
- struct ib_qp_init_attr attr;
+ struct isert_device *device = isert_conn->device;
struct isert_comp *comp;
- int ret, i, min = 0;
+ int i, min = 0;
mutex_lock(&device_list_mutex);
for (i = 0; i < device->comps_used; i++)
@@ -122,9 +121,30 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
min = i;
comp = &device->comps[min];
comp->active_qps++;
+ mutex_unlock(&device_list_mutex);
+
isert_info("conn %p, using comp %p min_index: %d\n",
isert_conn, comp, min);
+
+ return comp;
+}
+
+static void
+isert_comp_put(struct isert_comp *comp)
+{
+ mutex_lock(&device_list_mutex);
+ comp->active_qps--;
mutex_unlock(&device_list_mutex);
+}
+
+static struct ib_qp *
+isert_create_qp(struct isert_conn *isert_conn,
+ struct isert_comp *comp,
+ struct rdma_cm_id *cma_id)
+{
+ struct isert_device *device = isert_conn->device;
+ struct ib_qp_init_attr attr;
+ int ret;
memset(&attr, 0, sizeof(struct ib_qp_init_attr));
attr.event_handler = isert_qp_event_callback;
@@ -149,19 +169,31 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
if (device->pi_capable)
attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
- ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
+ ret = rdma_create_qp(cma_id, device->pd, &attr);
if (ret) {
isert_err("rdma_create_qp failed for cma_id %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return cma_id->qp;
+}
+
+static int
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+{
+ struct isert_comp *comp;
+ int ret;
+
+ comp = isert_comp_get(isert_conn);
+ isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
+ if (IS_ERR(isert_conn->qp)) {
+ ret = PTR_ERR(isert_conn->qp);
goto err;
}
- isert_conn->conn_qp = cma_id->qp;
return 0;
err:
- mutex_lock(&device_list_mutex);
- comp->active_qps--;
- mutex_unlock(&device_list_mutex);
-
+ isert_comp_put(comp);
return ret;
}
@@ -174,18 +206,19 @@ isert_cq_event_callback(struct ib_event *e, void *context)
static int
isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
u64 dma_addr;
int i, j;
- isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
+ isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
sizeof(struct iser_rx_desc), GFP_KERNEL);
- if (!isert_conn->conn_rx_descs)
+ if (!isert_conn->rx_descs)
goto fail;
- rx_desc = isert_conn->conn_rx_descs;
+ rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
@@ -198,21 +231,21 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
- rx_sg->lkey = isert_conn->conn_mr->lkey;
+ rx_sg->lkey = device->mr->lkey;
}
- isert_conn->conn_rx_desc_head = 0;
+ isert_conn->rx_desc_head = 0;
return 0;
dma_map_fail:
- rx_desc = isert_conn->conn_rx_descs;
+ rx_desc = isert_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
- kfree(isert_conn->conn_rx_descs);
- isert_conn->conn_rx_descs = NULL;
+ kfree(isert_conn->rx_descs);
+ isert_conn->rx_descs = NULL;
fail:
isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
@@ -222,59 +255,51 @@ fail:
static void
isert_free_rx_descriptors(struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
struct iser_rx_desc *rx_desc;
int i;
- if (!isert_conn->conn_rx_descs)
+ if (!isert_conn->rx_descs)
return;
- rx_desc = isert_conn->conn_rx_descs;
+ rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
- kfree(isert_conn->conn_rx_descs);
- isert_conn->conn_rx_descs = NULL;
+ kfree(isert_conn->rx_descs);
+ isert_conn->rx_descs = NULL;
}
static void isert_cq_work(struct work_struct *);
static void isert_cq_callback(struct ib_cq *, void *);
-static int
-isert_create_device_ib_res(struct isert_device *device)
+static void
+isert_free_comps(struct isert_device *device)
{
- struct ib_device *ib_dev = device->ib_device;
- struct ib_device_attr *dev_attr;
- int ret = 0, i;
- int max_cqe;
-
- dev_attr = &device->dev_attr;
- ret = isert_query_device(ib_dev, dev_attr);
- if (ret)
- return ret;
+ int i;
- max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- /* asign function handlers */
- if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
- dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
- device->use_fastreg = 1;
- device->reg_rdma_mem = isert_reg_rdma;
- device->unreg_rdma_mem = isert_unreg_rdma;
- } else {
- device->use_fastreg = 0;
- device->reg_rdma_mem = isert_map_rdma;
- device->unreg_rdma_mem = isert_unmap_cmd;
+ if (comp->cq) {
+ cancel_work_sync(&comp->work);
+ ib_destroy_cq(comp->cq);
+ }
}
+ kfree(device->comps);
+}
- /* Check signature cap */
- device->pi_capable = dev_attr->device_cap_flags &
- IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
+static int
+isert_alloc_comps(struct isert_device *device,
+ struct ib_device_attr *attr)
+{
+ int i, max_cqe, ret = 0;
device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
- device->ib_device->num_comp_vectors));
+ device->ib_device->num_comp_vectors));
+
isert_info("Using %d CQs, %s supports %d vectors support "
"Fast registration %d pi_capable %d\n",
device->comps_used, device->ib_device->name,
@@ -288,6 +313,8 @@ isert_create_device_ib_res(struct isert_device *device)
return -ENOMEM;
}
+ max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
+
for (i = 0; i < device->comps_used; i++) {
struct isert_comp *comp = &device->comps[i];
@@ -299,6 +326,7 @@ isert_create_device_ib_res(struct isert_device *device)
(void *)comp,
max_cqe, i);
if (IS_ERR(comp->cq)) {
+ isert_err("Unable to allocate cq\n");
ret = PTR_ERR(comp->cq);
comp->cq = NULL;
goto out_cq;
@@ -310,40 +338,79 @@ isert_create_device_ib_res(struct isert_device *device)
}
return 0;
-
out_cq:
- for (i = 0; i < device->comps_used; i++) {
- struct isert_comp *comp = &device->comps[i];
+ isert_free_comps(device);
+ return ret;
+}
- if (comp->cq) {
- cancel_work_sync(&comp->work);
- ib_destroy_cq(comp->cq);
- }
+static int
+isert_create_device_ib_res(struct isert_device *device)
+{
+ struct ib_device_attr *dev_attr;
+ int ret;
+
+ dev_attr = &device->dev_attr;
+ ret = isert_query_device(device->ib_device, dev_attr);
+ if (ret)
+ return ret;
+
+ /* asign function handlers */
+ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
+ dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
+ device->use_fastreg = 1;
+ device->reg_rdma_mem = isert_reg_rdma;
+ device->unreg_rdma_mem = isert_unreg_rdma;
+ } else {
+ device->use_fastreg = 0;
+ device->reg_rdma_mem = isert_map_rdma;
+ device->unreg_rdma_mem = isert_unmap_cmd;
}
- kfree(device->comps);
+ ret = isert_alloc_comps(device, dev_attr);
+ if (ret)
+ return ret;
+
+ device->pd = ib_alloc_pd(device->ib_device);
+ if (IS_ERR(device->pd)) {
+ ret = PTR_ERR(device->pd);
+ isert_err("failed to allocate pd, device %p, ret=%d\n",
+ device, ret);
+ goto out_cq;
+ }
+
+ device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(device->mr)) {
+ ret = PTR_ERR(device->mr);
+ isert_err("failed to create dma mr, device %p, ret=%d\n",
+ device, ret);
+ goto out_mr;
+ }
+
+ /* Check signature cap */
+ device->pi_capable = dev_attr->device_cap_flags &
+ IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
+
+ return 0;
+
+out_mr:
+ ib_dealloc_pd(device->pd);
+out_cq:
+ isert_free_comps(device);
return ret;
}
static void
isert_free_device_ib_res(struct isert_device *device)
{
- int i;
-
isert_info("device %p\n", device);
- for (i = 0; i < device->comps_used; i++) {
- struct isert_comp *comp = &device->comps[i];
-
- cancel_work_sync(&comp->work);
- ib_destroy_cq(comp->cq);
- comp->cq = NULL;
- }
- kfree(device->comps);
+ ib_dereg_mr(device->mr);
+ ib_dealloc_pd(device->pd);
+ isert_free_comps(device);
}
static void
-isert_device_try_release(struct isert_device *device)
+isert_device_put(struct isert_device *device)
{
mutex_lock(&device_list_mutex);
device->refcount--;
@@ -357,7 +424,7 @@ isert_device_try_release(struct isert_device *device)
}
static struct isert_device *
-isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
+isert_device_get(struct rdma_cm_id *cma_id)
{
struct isert_device *device;
int ret;
@@ -404,13 +471,13 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
struct fast_reg_descriptor *fr_desc, *tmp;
int i = 0;
- if (list_empty(&isert_conn->conn_fr_pool))
+ if (list_empty(&isert_conn->fr_pool))
return;
isert_info("Freeing conn %p fastreg pool", isert_conn);
list_for_each_entry_safe(fr_desc, tmp,
- &isert_conn->conn_fr_pool, list) {
+ &isert_conn->fr_pool, list) {
list_del(&fr_desc->list);
ib_free_fast_reg_page_list(fr_desc->data_frpl);
ib_dereg_mr(fr_desc->data_mr);
@@ -424,9 +491,9 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
++i;
}
- if (i < isert_conn->conn_fr_pool_size)
+ if (i < isert_conn->fr_pool_size)
isert_warn("Pool still has %d regions registered\n",
- isert_conn->conn_fr_pool_size - i);
+ isert_conn->fr_pool_size - i);
}
static int
@@ -526,7 +593,7 @@ static int
isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
{
struct fast_reg_descriptor *fr_desc;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
struct se_session *se_sess = isert_conn->conn->sess->se_sess;
struct se_node_acl *se_nacl = se_sess->se_node_acl;
int i, ret, tag_num;
@@ -537,7 +604,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
- isert_conn->conn_fr_pool_size = 0;
+ isert_conn->fr_pool_size = 0;
for (i = 0; i < tag_num; i++) {
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
if (!fr_desc) {
@@ -547,7 +614,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
}
ret = isert_create_fr_desc(device->ib_device,
- isert_conn->conn_pd, fr_desc);
+ device->pd, fr_desc);
if (ret) {
isert_err("Failed to create fastreg descriptor err=%d\n",
ret);
@@ -555,12 +622,12 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
goto err;
}
- list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
- isert_conn->conn_fr_pool_size++;
+ list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
+ isert_conn->fr_pool_size++;
}
isert_dbg("Creating conn %p fastreg pool size=%d",
- isert_conn, isert_conn->conn_fr_pool_size);
+ isert_conn, isert_conn->fr_pool_size);
return 0;
@@ -569,55 +636,50 @@ err:
return ret;
}
-static int
-isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+static void
+isert_init_conn(struct isert_conn *isert_conn)
{
- struct isert_np *isert_np = cma_id->context;
- struct iscsi_np *np = isert_np->np;
- struct isert_conn *isert_conn;
- struct isert_device *device;
- struct ib_device *ib_dev = cma_id->device;
- int ret = 0;
-
- spin_lock_bh(&np->np_thread_lock);
- if (!np->enabled) {
- spin_unlock_bh(&np->np_thread_lock);
- isert_dbg("iscsi_np is not enabled, reject connect request\n");
- return rdma_reject(cma_id, NULL, 0);
- }
- spin_unlock_bh(&np->np_thread_lock);
-
- isert_dbg("cma_id: %p, portal: %p\n",
- cma_id, cma_id->context);
-
- isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
- if (!isert_conn) {
- isert_err("Unable to allocate isert_conn\n");
- return -ENOMEM;
- }
isert_conn->state = ISER_CONN_INIT;
- INIT_LIST_HEAD(&isert_conn->conn_accept_node);
- init_completion(&isert_conn->conn_login_comp);
+ INIT_LIST_HEAD(&isert_conn->accept_node);
+ init_completion(&isert_conn->login_comp);
init_completion(&isert_conn->login_req_comp);
- init_completion(&isert_conn->conn_wait);
- kref_init(&isert_conn->conn_kref);
- mutex_init(&isert_conn->conn_mutex);
- spin_lock_init(&isert_conn->conn_lock);
- INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
+ init_completion(&isert_conn->wait);
+ kref_init(&isert_conn->kref);
+ mutex_init(&isert_conn->mutex);
+ spin_lock_init(&isert_conn->pool_lock);
+ INIT_LIST_HEAD(&isert_conn->fr_pool);
+}
+
+static void
+isert_free_login_buf(struct isert_conn *isert_conn)
+{
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
- isert_conn->conn_cm_id = cma_id;
+ ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
+ ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+ ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ DMA_FROM_DEVICE);
+ kfree(isert_conn->login_buf);
+}
+
+static int
+isert_alloc_login_buf(struct isert_conn *isert_conn,
+ struct ib_device *ib_dev)
+{
+ int ret;
isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!isert_conn->login_buf) {
isert_err("Unable to allocate isert_conn->login_buf\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
isert_conn->login_req_buf = isert_conn->login_buf;
isert_conn->login_rsp_buf = isert_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN;
+
isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
isert_conn->login_buf, isert_conn->login_req_buf,
isert_conn->login_rsp_buf);
@@ -628,8 +690,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
if (ret) {
- isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
- ret);
+ isert_err("login_req_dma mapping error: %d\n", ret);
isert_conn->login_req_dma = 0;
goto out_login_buf;
}
@@ -640,17 +701,58 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
- isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
- ret);
+ isert_err("login_rsp_dma mapping error: %d\n", ret);
isert_conn->login_rsp_dma = 0;
goto out_req_dma_map;
}
- device = isert_device_find_by_ib_dev(cma_id);
+ return 0;
+
+out_req_dma_map:
+ ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
+out_login_buf:
+ kfree(isert_conn->login_buf);
+ return ret;
+}
+
+static int
+isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+{
+ struct isert_np *isert_np = cma_id->context;
+ struct iscsi_np *np = isert_np->np;
+ struct isert_conn *isert_conn;
+ struct isert_device *device;
+ int ret = 0;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (!np->enabled) {
+ spin_unlock_bh(&np->np_thread_lock);
+ isert_dbg("iscsi_np is not enabled, reject connect request\n");
+ return rdma_reject(cma_id, NULL, 0);
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ isert_dbg("cma_id: %p, portal: %p\n",
+ cma_id, cma_id->context);
+
+ isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
+ if (!isert_conn)
+ return -ENOMEM;
+
+ isert_init_conn(isert_conn);
+ isert_conn->cm_id = cma_id;
+
+ ret = isert_alloc_login_buf(isert_conn, cma_id->device);
+ if (ret)
+ goto out;
+
+ device = isert_device_get(cma_id);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
goto out_rsp_dma_map;
}
+ isert_conn->device = device;
/* Set max inflight RDMA READ requests */
isert_conn->initiator_depth = min_t(u8,
@@ -658,24 +760,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
device->dev_attr.max_qp_init_rd_atom);
isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
- isert_conn->conn_device = device;
- isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
- if (IS_ERR(isert_conn->conn_pd)) {
- ret = PTR_ERR(isert_conn->conn_pd);
- isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
- isert_conn, ret);
- goto out_pd;
- }
-
- isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(isert_conn->conn_mr)) {
- ret = PTR_ERR(isert_conn->conn_mr);
- isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
- isert_conn, ret);
- goto out_mr;
- }
-
ret = isert_conn_setup_qp(isert_conn, cma_id);
if (ret)
goto out_conn_dev;
@@ -689,7 +773,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
goto out_conn_dev;
mutex_lock(&isert_np->np_accept_mutex);
- list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
+ list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list);
mutex_unlock(&isert_np->np_accept_mutex);
isert_info("np %p: Allow accept_np to continue\n", np);
@@ -697,19 +781,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
return 0;
out_conn_dev:
- ib_dereg_mr(isert_conn->conn_mr);
-out_mr:
- ib_dealloc_pd(isert_conn->conn_pd);
-out_pd:
- isert_device_try_release(device);
+ isert_device_put(device);
out_rsp_dma_map:
- ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
-out_req_dma_map:
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
-out_login_buf:
- kfree(isert_conn->login_buf);
+ isert_free_login_buf(isert_conn);
out:
kfree(isert_conn);
rdma_reject(cma_id, NULL, 0);
@@ -719,43 +793,32 @@ out:
static void
isert_connect_release(struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
isert_dbg("conn %p\n", isert_conn);
- if (device && device->use_fastreg)
+ BUG_ON(!device);
+
+ if (device->use_fastreg)
isert_conn_free_fastreg_pool(isert_conn);
isert_free_rx_descriptors(isert_conn);
- rdma_destroy_id(isert_conn->conn_cm_id);
+ if (isert_conn->cm_id)
+ rdma_destroy_id(isert_conn->cm_id);
- if (isert_conn->conn_qp) {
- struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
+ if (isert_conn->qp) {
+ struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
- isert_dbg("dec completion context %p active_qps\n", comp);
- mutex_lock(&device_list_mutex);
- comp->active_qps--;
- mutex_unlock(&device_list_mutex);
-
- ib_destroy_qp(isert_conn->conn_qp);
+ isert_comp_put(comp);
+ ib_destroy_qp(isert_conn->qp);
}
- ib_dereg_mr(isert_conn->conn_mr);
- ib_dealloc_pd(isert_conn->conn_pd);
+ if (isert_conn->login_buf)
+ isert_free_login_buf(isert_conn);
- if (isert_conn->login_buf) {
- ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN,
- DMA_FROM_DEVICE);
- kfree(isert_conn->login_buf);
- }
- kfree(isert_conn);
+ isert_device_put(device);
- if (device)
- isert_device_try_release(device);
+ kfree(isert_conn);
}
static void
@@ -765,22 +828,22 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
isert_info("conn %p\n", isert_conn);
- if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
+ if (!kref_get_unless_zero(&isert_conn->kref)) {
isert_warn("conn %p connect_release is running\n", isert_conn);
return;
}
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
if (isert_conn->state != ISER_CONN_FULL_FEATURE)
isert_conn->state = ISER_CONN_UP;
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
}
static void
-isert_release_conn_kref(struct kref *kref)
+isert_release_kref(struct kref *kref)
{
struct isert_conn *isert_conn = container_of(kref,
- struct isert_conn, conn_kref);
+ struct isert_conn, kref);
isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
current->pid);
@@ -791,7 +854,7 @@ isert_release_conn_kref(struct kref *kref)
static void
isert_put_conn(struct isert_conn *isert_conn)
{
- kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
+ kref_put(&isert_conn->kref, isert_release_kref);
}
/**
@@ -803,7 +866,7 @@ isert_put_conn(struct isert_conn *isert_conn)
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
- * This routine must be called with conn_mutex held. Thus it is
+ * This routine must be called with mutex held. Thus it is
* safe to call multiple times.
*/
static void
@@ -819,7 +882,7 @@ isert_conn_terminate(struct isert_conn *isert_conn)
isert_info("Terminating conn %p state %d\n",
isert_conn, isert_conn->state);
isert_conn->state = ISER_CONN_TERMINATING;
- err = rdma_disconnect(isert_conn->conn_cm_id);
+ err = rdma_disconnect(isert_conn->cm_id);
if (err)
isert_warn("Failed rdma_disconnect isert_conn %p\n",
isert_conn);
@@ -868,22 +931,25 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
isert_conn = cma_id->qp->qp_context;
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
- isert_info("conn %p completing conn_wait\n", isert_conn);
- complete(&isert_conn->conn_wait);
+ isert_info("conn %p completing wait\n", isert_conn);
+ complete(&isert_conn->wait);
return 0;
}
-static void
+static int
isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
+
+ return -1;
}
static int
@@ -912,7 +978,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
- isert_connect_error(cma_id);
+ ret = isert_connect_error(cma_id);
break;
default:
isert_err("Unhandled RDMA CMA event: %d\n", event->event);
@@ -927,11 +993,11 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
{
struct ib_recv_wr *rx_wr, *rx_wr_failed;
int i, ret;
- unsigned int rx_head = isert_conn->conn_rx_desc_head;
+ unsigned int rx_head = isert_conn->rx_desc_head;
struct iser_rx_desc *rx_desc;
- for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
- rx_desc = &isert_conn->conn_rx_descs[rx_head];
+ for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
+ rx_desc = &isert_conn->rx_descs[rx_head];
rx_wr->wr_id = (uintptr_t)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
@@ -943,14 +1009,14 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
rx_wr->next = NULL; /* mark end of work requests list */
isert_conn->post_recv_buf_count += count;
- ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
+ ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
&rx_wr_failed);
if (ret) {
isert_err("ib_post_recv() failed with ret: %d\n", ret);
isert_conn->post_recv_buf_count -= count;
} else {
isert_dbg("Posted %d RX buffers\n", count);
- isert_conn->conn_rx_desc_head = rx_head;
+ isert_conn->rx_desc_head = rx_head;
}
return ret;
}
@@ -958,7 +1024,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
static int
isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct ib_send_wr send_wr, *send_wr_failed;
int ret;
@@ -972,7 +1038,7 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
+ ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
if (ret)
isert_err("ib_post_send() failed, ret: %d\n", ret);
@@ -984,7 +1050,8 @@ isert_create_send_desc(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd,
struct iser_tx_desc *tx_desc)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
@@ -995,8 +1062,8 @@ isert_create_send_desc(struct isert_conn *isert_conn,
tx_desc->num_sge = 1;
tx_desc->isert_cmd = isert_cmd;
- if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
- tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
+ if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
}
}
@@ -1005,7 +1072,8 @@ static int
isert_init_tx_hdrs(struct isert_conn *isert_conn,
struct iser_tx_desc *tx_desc)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
u64 dma_addr;
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
@@ -1018,7 +1086,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
- tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
@@ -1051,7 +1119,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
memset(&sge, 0, sizeof(struct ib_sge));
sge.addr = isert_conn->login_req_dma;
sge.length = ISER_RX_LOGIN_SIZE;
- sge.lkey = isert_conn->conn_mr->lkey;
+ sge.lkey = isert_conn->device->mr->lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
@@ -1062,7 +1130,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
rx_wr.num_sge = 1;
isert_conn->post_recv_buf_count++;
- ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
if (ret) {
isert_err("ib_post_recv() failed: %d\n", ret);
isert_conn->post_recv_buf_count--;
@@ -1076,8 +1144,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
u32 length)
{
struct isert_conn *isert_conn = conn->context;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+ struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
int ret;
isert_create_send_desc(isert_conn, NULL, tx_desc);
@@ -1100,13 +1169,13 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
tx_dsg->addr = isert_conn->login_rsp_dma;
tx_dsg->length = length;
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
+ tx_dsg->lkey = isert_conn->device->mr->lkey;
tx_desc->num_sge = 2;
}
if (!login->login_failed) {
if (login->login_complete) {
if (!conn->sess->sess_ops->SessionType &&
- isert_conn->conn_device->use_fastreg) {
+ isert_conn->device->use_fastreg) {
ret = isert_conn_create_fastreg_pool(isert_conn);
if (ret) {
isert_err("Conn: %p failed to create"
@@ -1124,9 +1193,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
return ret;
/* Now we are in FULL_FEATURE phase */
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_FULL_FEATURE;
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
goto post_send;
}
@@ -1185,7 +1254,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
memcpy(login->req_buf, &rx_desc->data[0], size);
if (login->first_request) {
- complete(&isert_conn->conn_login_comp);
+ complete(&isert_conn->login_comp);
return;
}
schedule_delayed_work(&conn->login_work, 0);
@@ -1194,7 +1263,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
static struct iscsi_cmd
*isert_allocate_cmd(struct iscsi_conn *conn)
{
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct isert_cmd *isert_cmd;
struct iscsi_cmd *cmd;
@@ -1379,13 +1448,12 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
{
struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iscsi_conn *conn = isert_conn->conn;
- struct iscsi_session *sess = conn->sess;
struct iscsi_cmd *cmd;
struct isert_cmd *isert_cmd;
int ret = -EINVAL;
u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
- if (sess->sess_ops->SessionType &&
+ if (conn->sess->sess_ops->SessionType &&
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
" ignoring\n", opcode);
@@ -1497,10 +1565,11 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
}
static void
-isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
- u32 xfer_len)
+isert_rcv_completion(struct iser_rx_desc *desc,
+ struct isert_conn *isert_conn,
+ u32 xfer_len)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iscsi_hdr *hdr;
u64 rx_dma;
int rx_buflen, outstanding;
@@ -1532,9 +1601,9 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
if (login && !login->first_request)
isert_rx_login_req(isert_conn);
}
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
complete(&isert_conn->login_req_comp);
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
} else {
isert_rx_do_work(desc, isert_conn);
}
@@ -1566,7 +1635,7 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct scatterlist *sg, u32 nents, u32 length, u32 offset,
enum iser_ib_op_code op, struct isert_data_buf *data)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
data->dma_dir = op == ISER_IB_RDMA_WRITE ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
@@ -1597,7 +1666,7 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
static void
isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
memset(data, 0, sizeof(*data));
@@ -1634,7 +1703,6 @@ static void
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- LIST_HEAD(unmap_list);
isert_dbg("Cmd %p\n", isert_cmd);
@@ -1644,9 +1712,9 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
isert_unmap_data_buf(isert_conn, &wr->prot);
wr->fr_desc->ind &= ~ISERT_PROTECTED;
}
- spin_lock_bh(&isert_conn->conn_lock);
- list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
- spin_unlock_bh(&isert_conn->conn_lock);
+ spin_lock_bh(&isert_conn->pool_lock);
+ list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
+ spin_unlock_bh(&isert_conn->pool_lock);
wr->fr_desc = NULL;
}
@@ -1665,7 +1733,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
struct iscsi_conn *conn = isert_conn->conn;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
struct iscsi_text_rsp *hdr;
isert_dbg("Cmd %p\n", isert_cmd);
@@ -1815,7 +1883,7 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
int ret = 0;
if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
@@ -1841,7 +1909,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
int ret = 0;
if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
@@ -1861,11 +1929,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
- if (ret)
+ if (ret) {
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
transport_send_check_condition_and_sense(se_cmd,
se_cmd->pi_err, 0);
- else
+ } else {
target_execute_cmd(se_cmd);
+ }
}
static void
@@ -1874,7 +1944,7 @@ isert_do_control_comp(struct work_struct *work)
struct isert_cmd *isert_cmd = container_of(work,
struct isert_cmd, comp_work);
struct isert_conn *isert_conn = isert_cmd->conn;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
@@ -1922,10 +1992,10 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
}
static void
-isert_send_completion(struct iser_tx_desc *tx_desc,
+isert_snd_completion(struct iser_tx_desc *tx_desc,
struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
struct isert_rdma_wr *wr;
@@ -1938,10 +2008,6 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
switch (wr->iser_ib_op) {
- case ISER_IB_RECV:
- isert_err("Got ISER_IB_RECV\n");
- dump_stack();
- break;
case ISER_IB_SEND:
isert_response_completion(tx_desc, isert_cmd,
isert_conn, ib_dev);
@@ -1973,8 +2039,8 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
static inline bool
is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
{
- void *start = isert_conn->conn_rx_descs;
- int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
+ void *start = isert_conn->rx_descs;
+ int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
if (wr_id >= start && wr_id < start + len)
return false;
@@ -1986,11 +2052,11 @@ static void
isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
{
if (wc->wr_id == ISER_BEACON_WRID) {
- isert_info("conn %p completing conn_wait_comp_err\n",
+ isert_info("conn %p completing wait_comp_err\n",
isert_conn);
- complete(&isert_conn->conn_wait_comp_err);
+ complete(&isert_conn->wait_comp_err);
} else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct isert_cmd *isert_cmd;
struct iser_tx_desc *desc;
@@ -2018,10 +2084,10 @@ isert_handle_wc(struct ib_wc *wc)
if (likely(wc->status == IB_WC_SUCCESS)) {
if (wc->opcode == IB_WC_RECV) {
rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
- isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
+ isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
} else {
tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
- isert_send_completion(tx_desc, isert_conn);
+ isert_snd_completion(tx_desc, isert_conn);
}
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
@@ -2070,7 +2136,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
struct ib_send_wr *wr_failed;
int ret;
- ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
+ ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
&wr_failed);
if (ret) {
isert_err("ib_post_send failed with %d\n", ret);
@@ -2083,7 +2149,7 @@ static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
&isert_cmd->tx_desc.iscsi_header;
@@ -2097,7 +2163,8 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
if (cmd->se_cmd.sense_buffer &&
((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
u32 padding, pdu_len;
@@ -2116,7 +2183,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->pdu_buf_len = pdu_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = pdu_len;
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
+ tx_dsg->lkey = device->mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
@@ -2131,8 +2198,8 @@ static void
isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
@@ -2148,8 +2215,8 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
static enum target_prot_op
isert_get_sup_prot_ops(struct iscsi_conn *conn)
{
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
if (conn->tpg->tpg_attrib.t10_pi) {
if (device->pi_capable) {
@@ -2170,7 +2237,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
bool nopout_response)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
@@ -2189,7 +2256,7 @@ static int
isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
@@ -2207,7 +2274,7 @@ static int
isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
@@ -2225,9 +2292,10 @@ static int
isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
struct iscsi_reject *hdr =
(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
@@ -2243,7 +2311,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = ISCSI_HDR_LEN;
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
+ tx_dsg->lkey = device->mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
@@ -2257,7 +2325,7 @@ static int
isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_text_rsp *hdr =
(struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
@@ -2273,7 +2341,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
if (txt_rsp_len) {
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
void *txt_rsp_buf = cmd->buf_ptr;
@@ -2283,7 +2352,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_len = txt_rsp_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = txt_rsp_len;
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
+ tx_dsg->lkey = device->mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
@@ -2300,7 +2369,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct scatterlist *sg_start, *tmp_sg;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
u32 sg_off, page_off;
int i = 0, sg_nents;
@@ -2324,7 +2394,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
ib_sge->length = min_t(u32, data_left,
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
- ib_sge->lkey = isert_conn->conn_mr->lkey;
+ ib_sge->lkey = device->mr->lkey;
isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
ib_sge->addr, ib_sge->length, ib_sge->lkey);
@@ -2346,7 +2416,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct isert_data_buf *data = &wr->data;
struct ib_send_wr *send_wr;
struct ib_sge *ib_sge;
@@ -2485,7 +2555,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
enum isert_indicator ind,
struct ib_sge *sge)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fr_wr, inv_wr;
@@ -2494,7 +2565,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
u32 page_off;
if (mem->dma_nents == 1) {
- sge->lkey = isert_conn->conn_mr->lkey;
+ sge->lkey = device->mr->lkey;
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
@@ -2542,7 +2613,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
else
wr->next = &fr_wr;
- ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
+ ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
if (ret) {
isert_err("fast registration failed, ret:%d\n", ret);
return ret;
@@ -2655,7 +2726,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
else
wr->next = &sig_wr;
- ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
+ ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
if (ret) {
isert_err("fast registration failed, ret:%d\n", ret);
goto err;
@@ -2685,14 +2756,14 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd,
struct isert_rdma_wr *wr)
{
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
int ret;
if (!wr->fr_desc->pi_ctx) {
ret = isert_create_pi_ctx(wr->fr_desc,
device->ib_device,
- isert_conn->conn_pd);
+ device->pd);
if (ret) {
isert_err("conn %p failed to allocate pi_ctx\n",
isert_conn);
@@ -2763,11 +2834,11 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
return ret;
if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
- spin_lock_irqsave(&isert_conn->conn_lock, flags);
- fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
+ spin_lock_irqsave(&isert_conn->pool_lock, flags);
+ fr_desc = list_first_entry(&isert_conn->fr_pool,
struct fast_reg_descriptor, list);
list_del(&fr_desc->list);
- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+ spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
wr->fr_desc = fr_desc;
}
@@ -2814,9 +2885,9 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
unmap_cmd:
if (fr_desc) {
- spin_lock_irqsave(&isert_conn->conn_lock, flags);
- list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+ spin_lock_irqsave(&isert_conn->pool_lock, flags);
+ list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
+ spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
}
isert_unmap_data_buf(isert_conn, &wr->data);
@@ -2829,8 +2900,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
struct ib_send_wr *wr_failed;
int rc;
@@ -2859,7 +2930,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
wr->send_wr_num += 1;
}
- rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+ rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
if (rc)
isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
@@ -2879,8 +2950,8 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
struct ib_send_wr *wr_failed;
int rc;
@@ -2893,7 +2964,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
return rc;
}
- rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+ rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
if (rc)
isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
@@ -2987,7 +3058,7 @@ isert_setup_id(struct isert_np *isert_np)
goto out_id;
}
- ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
+ ret = rdma_listen(id, 0);
if (ret) {
isert_err("rdma_listen() failed: %d\n", ret);
goto out_id;
@@ -3046,7 +3117,7 @@ out:
static int
isert_rdma_accept(struct isert_conn *isert_conn)
{
- struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
+ struct rdma_cm_id *cm_id = isert_conn->cm_id;
struct rdma_conn_param cp;
int ret;
@@ -3067,7 +3138,7 @@ isert_rdma_accept(struct isert_conn *isert_conn)
static int
isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
{
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
int ret;
isert_info("before login_req comp conn: %p\n", isert_conn);
@@ -3090,8 +3161,8 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
isert_rx_login_req(isert_conn);
- isert_info("before conn_login_comp conn: %p\n", conn);
- ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
+ isert_info("before login_comp conn: %p\n", conn);
+ ret = wait_for_completion_interruptible(&isert_conn->login_comp);
if (ret)
return ret;
@@ -3104,7 +3175,7 @@ static void
isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
struct isert_conn *isert_conn)
{
- struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
+ struct rdma_cm_id *cm_id = isert_conn->cm_id;
struct rdma_route *cm_route = &cm_id->route;
struct sockaddr_in *sock_in;
struct sockaddr_in6 *sock_in6;
@@ -3137,13 +3208,13 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
static int
isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
{
- struct isert_np *isert_np = (struct isert_np *)np->np_context;
+ struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn;
- int max_accept = 0, ret;
+ int ret;
accept_wait:
ret = down_interruptible(&isert_np->np_sem);
- if (ret || max_accept > 5)
+ if (ret)
return -ENODEV;
spin_lock_bh(&np->np_thread_lock);
@@ -3162,17 +3233,15 @@ accept_wait:
mutex_lock(&isert_np->np_accept_mutex);
if (list_empty(&isert_np->np_accept_list)) {
mutex_unlock(&isert_np->np_accept_mutex);
- max_accept++;
goto accept_wait;
}
isert_conn = list_first_entry(&isert_np->np_accept_list,
- struct isert_conn, conn_accept_node);
- list_del_init(&isert_conn->conn_accept_node);
+ struct isert_conn, accept_node);
+ list_del_init(&isert_conn->accept_node);
mutex_unlock(&isert_np->np_accept_mutex);
conn->context = isert_conn;
isert_conn->conn = conn;
- max_accept = 0;
isert_set_conn_info(np, conn, isert_conn);
@@ -3184,7 +3253,7 @@ accept_wait:
static void
isert_free_np(struct iscsi_np *np)
{
- struct isert_np *isert_np = (struct isert_np *)np->np_context;
+ struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn, *n;
if (isert_np->np_cm_id)
@@ -3202,7 +3271,7 @@ isert_free_np(struct iscsi_np *np)
isert_info("Still have isert connections, cleaning up...\n");
list_for_each_entry_safe(isert_conn, n,
&isert_np->np_accept_list,
- conn_accept_node) {
+ accept_node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
isert_connect_release(isert_conn);
@@ -3222,11 +3291,11 @@ static void isert_release_work(struct work_struct *work)
isert_info("Starting release conn %p\n", isert_conn);
- wait_for_completion(&isert_conn->conn_wait);
+ wait_for_completion(&isert_conn->wait);
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_DOWN;
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
isert_info("Destroying conn %p\n", isert_conn);
isert_put_conn(isert_conn);
@@ -3264,15 +3333,15 @@ isert_wait4flush(struct isert_conn *isert_conn)
isert_info("conn %p\n", isert_conn);
- init_completion(&isert_conn->conn_wait_comp_err);
+ init_completion(&isert_conn->wait_comp_err);
isert_conn->beacon.wr_id = ISER_BEACON_WRID;
/* post an indication that all flush errors were consumed */
- if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
+ if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
isert_err("conn %p failed to post beacon", isert_conn);
return;
}
- wait_for_completion(&isert_conn->conn_wait_comp_err);
+ wait_for_completion(&isert_conn->wait_comp_err);
}
static void isert_wait_conn(struct iscsi_conn *conn)
@@ -3281,17 +3350,17 @@ static void isert_wait_conn(struct iscsi_conn *conn)
isert_info("Starting conn %p\n", isert_conn);
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
/*
- * Only wait for conn_wait_comp_err if the isert_conn made it
+ * Only wait for wait_comp_err if the isert_conn made it
* into full feature phase..
*/
if (isert_conn->state == ISER_CONN_INIT) {
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
return;
}
isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
isert_wait4cmds(conn);
isert_wait4flush(isert_conn);
@@ -3370,7 +3439,7 @@ static void __exit isert_exit(void)
}
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
-MODULE_VERSION("0.1");
+MODULE_VERSION("1.0");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 8dc8415d152d..9ec23a786c02 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -31,7 +31,6 @@
#define isert_err(fmt, arg...) \
pr_err(PFX "%s: " fmt, __func__ , ## arg)
-#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
#define ISER_BEACON_WRID 0xfffffffffffffffeULL
@@ -160,27 +159,25 @@ struct isert_conn {
u64 login_req_dma;
int login_req_len;
u64 login_rsp_dma;
- unsigned int conn_rx_desc_head;
- struct iser_rx_desc *conn_rx_descs;
- struct ib_recv_wr conn_rx_wr[ISERT_MIN_POSTED_RX];
+ unsigned int rx_desc_head;
+ struct iser_rx_desc *rx_descs;
+ struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX];
struct iscsi_conn *conn;
- struct list_head conn_accept_node;
- struct completion conn_login_comp;
+ struct list_head accept_node;
+ struct completion login_comp;
struct completion login_req_comp;
- struct iser_tx_desc conn_login_tx_desc;
- struct rdma_cm_id *conn_cm_id;
- struct ib_pd *conn_pd;
- struct ib_mr *conn_mr;
- struct ib_qp *conn_qp;
- struct isert_device *conn_device;
- struct mutex conn_mutex;
- struct completion conn_wait;
- struct completion conn_wait_comp_err;
- struct kref conn_kref;
- struct list_head conn_fr_pool;
- int conn_fr_pool_size;
+ struct iser_tx_desc login_tx_desc;
+ struct rdma_cm_id *cm_id;
+ struct ib_qp *qp;
+ struct isert_device *device;
+ struct mutex mutex;
+ struct completion wait;
+ struct completion wait_comp_err;
+ struct kref kref;
+ struct list_head fr_pool;
+ int fr_pool_size;
/* lock to protect fastreg pool */
- spinlock_t conn_lock;
+ spinlock_t pool_lock;
struct work_struct release_work;
struct ib_recv_wr beacon;
bool logout_posted;
@@ -211,6 +208,8 @@ struct isert_device {
bool pi_capable;
int refcount;
struct ib_device *ib_device;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
struct isert_comp *comps;
int comps_used;
struct list_head dev_node;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 0747c0595a9d..918814cd0f80 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -40,6 +40,7 @@
#include <linux/parser.h>
#include <linux/random.h>
#include <linux/jiffies.h>
+#include <rdma/ib_cache.h>
#include <linux/atomic.h>
@@ -265,10 +266,10 @@ static int srp_init_qp(struct srp_target_port *target,
if (!attr)
return -ENOMEM;
- ret = ib_find_pkey(target->srp_host->srp_dev->dev,
- target->srp_host->port,
- be16_to_cpu(target->pkey),
- &attr->pkey_index);
+ ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
+ target->srp_host->port,
+ be16_to_cpu(target->pkey),
+ &attr->pkey_index);
if (ret)
goto out;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 6e0a477681e9..9b84b4c0a000 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -93,7 +93,7 @@ MODULE_PARM_DESC(srpt_service_guid,
" instead of using the node_guid of the first HCA.");
static struct ib_client srpt_client;
-static struct target_fabric_configfs *srpt_target;
+static const struct target_core_fabric_ops srpt_template;
static void srpt_release_channel(struct srpt_rdma_ch *ch);
static int srpt_queue_status(struct se_cmd *cmd);
@@ -207,7 +207,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
}
break;
default:
- printk(KERN_ERR "received unrecognized IB event %d\n",
+ pr_err("received unrecognized IB event %d\n",
event->event);
break;
}
@@ -218,7 +218,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
*/
static void srpt_srq_event(struct ib_event *event, void *ctx)
{
- printk(KERN_INFO "SRQ event %d\n", event->event);
+ pr_info("SRQ event %d\n", event->event);
}
/**
@@ -242,8 +242,7 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
ch->sess_name, srpt_get_ch_state(ch));
break;
default:
- printk(KERN_ERR "received unrecognized IB QP event %d\n",
- event->event);
+ pr_err("received unrecognized IB QP event %d\n", event->event);
break;
}
}
@@ -602,7 +601,7 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
sport = &sdev->port[i - 1];
WARN_ON(sport->port != i);
if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
- printk(KERN_ERR "disabling MAD processing failed.\n");
+ pr_err("disabling MAD processing failed.\n");
if (sport->mad_agent) {
ib_unregister_mad_agent(sport->mad_agent);
sport->mad_agent = NULL;
@@ -810,7 +809,7 @@ static int srpt_post_send(struct srpt_rdma_ch *ch,
ret = -ENOMEM;
if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
- printk(KERN_WARNING "IB send queue full (needed 1)\n");
+ pr_warn("IB send queue full (needed 1)\n");
goto out;
}
@@ -912,7 +911,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
if (ioctx->n_rbuf >
(srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
- printk(KERN_ERR "received unsupported SRP_CMD request"
+ pr_err("received unsupported SRP_CMD request"
" type (%u out + %u in != %u / %zu)\n",
srp_cmd->data_out_desc_cnt,
srp_cmd->data_in_desc_cnt,
@@ -1432,7 +1431,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
srpt_unmap_sg_to_ib_sge(ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
} else {
- printk(KERN_ERR "IB completion has been received too late for"
+ pr_err("IB completion has been received too late for"
" wr_id = %u.\n", ioctx->ioctx.index);
}
}
@@ -1457,7 +1456,7 @@ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
SRPT_STATE_DATA_IN))
target_execute_cmd(&ioctx->cmd);
else
- printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
+ pr_err("%s[%d]: wrong state = %d\n", __func__,
__LINE__, srpt_get_cmd_state(ioctx));
} else if (opcode == SRPT_RDMA_ABORT) {
ioctx->rdma_aborted = true;
@@ -1481,7 +1480,7 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
switch (opcode) {
case SRPT_RDMA_READ_LAST:
if (ioctx->n_rdma <= 0) {
- printk(KERN_ERR "Received invalid RDMA read"
+ pr_err("Received invalid RDMA read"
" error completion with idx %d\n",
ioctx->ioctx.index);
break;
@@ -1490,14 +1489,13 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
if (state == SRPT_STATE_NEED_DATA)
srpt_abort_cmd(ioctx);
else
- printk(KERN_ERR "%s[%d]: wrong state = %d\n",
+ pr_err("%s[%d]: wrong state = %d\n",
__func__, __LINE__, state);
break;
case SRPT_RDMA_WRITE_LAST:
break;
default:
- printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
- __LINE__, opcode);
+ pr_err("%s[%d]: opcode = %u\n", __func__, __LINE__, opcode);
break;
}
}
@@ -1549,8 +1547,8 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
if (sense_data_len > max_sense_len) {
- printk(KERN_WARNING "truncated sense data from %d to %d"
- " bytes\n", sense_data_len, max_sense_len);
+ pr_warn("truncated sense data from %d to %d"
+ " bytes\n", sense_data_len, max_sense_len);
sense_data_len = max_sense_len;
}
@@ -1628,8 +1626,8 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
int addressing_method;
if (unlikely(len < 2)) {
- printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
- "more", len);
+ pr_err("Illegal LUN length %d, expected 2 bytes or more\n",
+ len);
goto out;
}
@@ -1663,7 +1661,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
default:
- printk(KERN_ERR "Unimplemented LUN addressing method %u",
+ pr_err("Unimplemented LUN addressing method %u\n",
addressing_method);
break;
}
@@ -1672,8 +1670,7 @@ out:
return res;
out_err:
- printk(KERN_ERR "Support for multi-level LUNs has not yet been"
- " implemented");
+ pr_err("Support for multi-level LUNs has not yet been implemented\n");
goto out;
}
@@ -1723,7 +1720,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
}
if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
- printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
+ pr_err("0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
ret = TCM_INVALID_CDB_FIELD;
goto send_sense;
@@ -1912,7 +1909,7 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
break;
case SRP_I_LOGOUT:
- printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
+ pr_err("Not yet implemented: SRP_I_LOGOUT\n");
break;
case SRP_CRED_RSP:
pr_debug("received SRP_CRED_RSP\n");
@@ -1921,10 +1918,10 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
pr_debug("received SRP_AER_RSP\n");
break;
case SRP_RSP:
- printk(KERN_ERR "Received SRP_RSP\n");
+ pr_err("Received SRP_RSP\n");
break;
default:
- printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
+ pr_err("received IU with unknown opcode 0x%x\n",
srp_cmd->opcode);
break;
}
@@ -1948,12 +1945,12 @@ static void srpt_process_rcv_completion(struct ib_cq *cq,
req_lim = atomic_dec_return(&ch->req_lim);
if (unlikely(req_lim < 0))
- printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
+ pr_err("req_lim = %d < 0\n", req_lim);
ioctx = sdev->ioctx_ring[index];
srpt_handle_new_iu(ch, ioctx, NULL);
} else {
- printk(KERN_INFO "receiving failed for idx %u with status %d\n",
- index, wc->status);
+ pr_info("receiving failed for idx %u with status %d\n",
+ index, wc->status);
}
}
@@ -1993,12 +1990,12 @@ static void srpt_process_send_completion(struct ib_cq *cq,
}
} else {
if (opcode == SRPT_SEND) {
- printk(KERN_INFO "sending response for idx %u failed"
- " with status %d\n", index, wc->status);
+ pr_info("sending response for idx %u failed"
+ " with status %d\n", index, wc->status);
srpt_handle_send_err_comp(ch, wc->wr_id);
} else if (opcode != SRPT_RDMA_MID) {
- printk(KERN_INFO "RDMA t %d for idx %u failed with"
- " status %d", opcode, index, wc->status);
+ pr_info("RDMA t %d for idx %u failed with"
+ " status %d\n", opcode, index, wc->status);
srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
}
}
@@ -2062,15 +2059,15 @@ static int srpt_compl_thread(void *arg)
ch = arg;
BUG_ON(!ch);
- printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
- ch->sess_name, ch->thread->comm, current->pid);
+ pr_info("Session %s: kernel thread %s (PID %d) started\n",
+ ch->sess_name, ch->thread->comm, current->pid);
while (!kthread_should_stop()) {
wait_event_interruptible(ch->wait_queue,
(srpt_process_completion(ch->cq, ch),
kthread_should_stop()));
}
- printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
- ch->sess_name, ch->thread->comm, current->pid);
+ pr_info("Session %s: kernel thread %s (PID %d) stopped\n",
+ ch->sess_name, ch->thread->comm, current->pid);
return 0;
}
@@ -2097,7 +2094,7 @@ retry:
ch->rq_size + srp_sq_size, 0);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
- printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
+ pr_err("failed to create CQ cqe= %d ret= %d\n",
ch->rq_size + srp_sq_size, ret);
goto out;
}
@@ -2123,7 +2120,7 @@ retry:
goto retry;
}
}
- printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+ pr_err("failed to create_qp ret= %d\n", ret);
goto err_destroy_cq;
}
@@ -2143,7 +2140,7 @@ retry:
ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
if (IS_ERR(ch->thread)) {
- printk(KERN_ERR "failed to create kernel thread %ld\n",
+ pr_err("failed to create kernel thread %ld\n",
PTR_ERR(ch->thread));
ch->thread = NULL;
goto err_destroy_qp;
@@ -2204,7 +2201,7 @@ static void __srpt_close_ch(struct srpt_rdma_ch *ch)
/* fall through */
case CH_LIVE:
if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
- printk(KERN_ERR "sending CM DREQ failed.\n");
+ pr_err("sending CM DREQ failed.\n");
break;
case CH_DISCONNECTING:
break;
@@ -2291,7 +2288,7 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id)
ret = srpt_ch_qp_err(ch);
if (ret < 0)
- printk(KERN_ERR "Setting queue pair in error state"
+ pr_err("Setting queue pair in error state"
" failed: %d\n", ret);
}
}
@@ -2435,17 +2432,17 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
it_iu_len = be32_to_cpu(req->req_it_iu_len);
- printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
- " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
- " (guid=0x%llx:0x%llx)\n",
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
- it_iu_len,
- param->port,
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+ pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
+ " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
+ " (guid=0x%llx:0x%llx)\n",
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
+ it_iu_len,
+ param->port,
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
rej = kzalloc(sizeof *rej, GFP_KERNEL);
@@ -2460,7 +2457,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
ret = -EINVAL;
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
+ pr_err("rejected SRP_LOGIN_REQ because its"
" length (%d bytes) is out of range (%d .. %d)\n",
it_iu_len, 64, srp_max_req_size);
goto reject;
@@ -2470,7 +2467,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
ret = -EINVAL;
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
+ pr_err("rejected SRP_LOGIN_REQ because the target port"
" has not yet been enabled\n");
goto reject;
}
@@ -2516,7 +2513,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
ret = -ENOMEM;
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
+ pr_err("rejected SRP_LOGIN_REQ because it"
" has an invalid target port identifier.\n");
goto reject;
}
@@ -2525,7 +2522,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (!ch) {
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
+ pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
ret = -ENOMEM;
goto reject;
}
@@ -2562,7 +2559,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (ret) {
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
+ pr_err("rejected SRP_LOGIN_REQ because creating"
" a new RDMA channel failed.\n");
goto free_ring;
}
@@ -2571,7 +2568,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (ret) {
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
+ pr_err("rejected SRP_LOGIN_REQ because enabling"
" RTR failed (error code = %d)\n", ret);
goto destroy_ib;
}
@@ -2586,8 +2583,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
nacl = srpt_lookup_acl(sport, ch->i_port_id);
if (!nacl) {
- printk(KERN_INFO "Rejected login because no ACL has been"
- " configured yet for initiator %s.\n", ch->sess_name);
+ pr_info("Rejected login because no ACL has been"
+ " configured yet for initiator %s.\n", ch->sess_name);
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
goto destroy_ib;
@@ -2631,7 +2628,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ret = ib_send_cm_rep(cm_id, rep_param);
if (ret) {
- printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
+ pr_err("sending SRP_LOGIN_REQ response failed"
" (error code = %d)\n", ret);
goto release_channel;
}
@@ -2679,7 +2676,7 @@ out:
static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
{
- printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
+ pr_info("Received IB REJ for cm_id %p.\n", cm_id);
srpt_drain_channel(cm_id);
}
@@ -2714,13 +2711,13 @@ static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
{
- printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
+ pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id);
srpt_drain_channel(cm_id);
}
static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
{
- printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
+ pr_info("Received IB REP error for cm_id %p.\n", cm_id);
srpt_drain_channel(cm_id);
}
@@ -2755,9 +2752,9 @@ static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
if (send_drep) {
if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
- printk(KERN_ERR "Sending IB DREP failed.\n");
- printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
- ch->sess_name);
+ pr_err("Sending IB DREP failed.\n");
+ pr_info("Received DREQ and sent DREP for session %s.\n",
+ ch->sess_name);
}
}
@@ -2766,8 +2763,7 @@ static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
*/
static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
{
- printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
- cm_id);
+ pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id);
srpt_drain_channel(cm_id);
}
@@ -2811,14 +2807,13 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
srpt_cm_rep_error(cm_id);
break;
case IB_CM_DREQ_ERROR:
- printk(KERN_INFO "Received IB DREQ ERROR event.\n");
+ pr_info("Received IB DREQ ERROR event.\n");
break;
case IB_CM_MRA_RECEIVED:
- printk(KERN_INFO "Received IB MRA event\n");
+ pr_info("Received IB MRA event\n");
break;
default:
- printk(KERN_ERR "received unrecognized IB CM event %d\n",
- event->event);
+ pr_err("received unrecognized IB CM event %d\n", event->event);
break;
}
@@ -2848,8 +2843,8 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
ret = -ENOMEM;
sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
if (sq_wr_avail < 0) {
- printk(KERN_WARNING "IB send queue full (needed %d)\n",
- n_rdma);
+ pr_warn("IB send queue full (needed %d)\n",
+ n_rdma);
goto out;
}
}
@@ -2889,7 +2884,7 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
}
if (ret)
- printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
+ pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
__func__, __LINE__, ret, i, n_rdma);
if (ret && i > 0) {
wr.num_sge = 0;
@@ -2897,12 +2892,12 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
wr.send_flags = IB_SEND_SIGNALED;
while (ch->state == CH_LIVE &&
ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
- printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
+ pr_info("Trying to abort failed RDMA transfer [%d]\n",
ioctx->ioctx.index);
msleep(1000);
}
while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
- printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
+ pr_info("Waiting until RDMA abort finished [%d]\n",
ioctx->ioctx.index);
msleep(1000);
}
@@ -2923,17 +2918,17 @@ static int srpt_xfer_data(struct srpt_rdma_ch *ch,
ret = srpt_map_sg_to_ib_sge(ch, ioctx);
if (ret) {
- printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
+ pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret);
goto out;
}
ret = srpt_perform_rdmas(ch, ioctx);
if (ret) {
if (ret == -EAGAIN || ret == -ENOMEM)
- printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
- __func__, __LINE__, ret);
+ pr_info("%s[%d] queue full -- ret=%d\n",
+ __func__, __LINE__, ret);
else
- printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
+ pr_err("%s[%d] fatal error -- ret=%d\n",
__func__, __LINE__, ret);
goto out_unmap;
}
@@ -3058,7 +3053,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
!ioctx->queue_status_only) {
ret = srpt_xfer_data(ch, ioctx);
if (ret) {
- printk(KERN_ERR "xfer_data failed for tag %llu\n",
+ pr_err("xfer_data failed for tag %llu\n",
ioctx->tag);
return;
}
@@ -3075,7 +3070,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
}
ret = srpt_post_send(ch, ioctx, resp_len);
if (ret) {
- printk(KERN_ERR "sending cmd response failed for tag %llu\n",
+ pr_err("sending cmd response failed for tag %llu\n",
ioctx->tag);
srpt_unmap_sg_to_ib_sge(ch, ioctx);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
@@ -3154,7 +3149,7 @@ static int srpt_release_sdev(struct srpt_device *sdev)
res = wait_event_interruptible(sdev->ch_releaseQ,
srpt_ch_list_empty(sdev));
if (res)
- printk(KERN_ERR "%s: interrupted.\n", __func__);
+ pr_err("%s: interrupted.\n", __func__);
return 0;
}
@@ -3293,7 +3288,7 @@ static void srpt_add_one(struct ib_device *device)
spin_lock_init(&sport->port_acl_lock);
if (srpt_refresh_port(sport)) {
- printk(KERN_ERR "MAD registration failed for %s-%d.\n",
+ pr_err("MAD registration failed for %s-%d.\n",
srpt_sdev_name(sdev), i);
goto err_ring;
}
@@ -3330,7 +3325,7 @@ free_dev:
kfree(sdev);
err:
sdev = NULL;
- printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
+ pr_info("%s(%s) failed.\n", __func__, device->name);
goto out;
}
@@ -3344,8 +3339,7 @@ static void srpt_remove_one(struct ib_device *device)
sdev = ib_get_client_data(device, &srpt_client);
if (!sdev) {
- printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
- device->name);
+ pr_info("%s(%s): nothing to do.\n", __func__, device->name);
return;
}
@@ -3464,7 +3458,7 @@ static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
if (!nacl) {
- printk(KERN_ERR "Unable to allocate struct srpt_node_acl\n");
+ pr_err("Unable to allocate struct srpt_node_acl\n");
return NULL;
}
@@ -3615,7 +3609,7 @@ static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
u8 i_port_id[16];
if (srpt_parse_i_port_id(i_port_id, name) < 0) {
- printk(KERN_ERR "invalid initiator port ID %s\n", name);
+ pr_err("invalid initiator port ID %s\n", name);
ret = -EINVAL;
goto err;
}
@@ -3816,12 +3810,12 @@ static ssize_t srpt_tpg_store_enable(
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
+ pr_err("Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
+ pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
return -EINVAL;
}
if (tmp == 1)
@@ -3851,7 +3845,7 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
int res;
/* Initialize sport->port_wwn and sport->port_tpg_1 */
- res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
+ res = core_tpg_register(&srpt_template, &sport->port_wwn,
&sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
if (res)
return ERR_PTR(res);
@@ -3919,7 +3913,9 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops srpt_template = {
+static const struct target_core_fabric_ops srpt_template = {
+ .module = THIS_MODULE,
+ .name = "srpt",
.get_fabric_name = srpt_get_fabric_name,
.get_fabric_proto_ident = srpt_get_fabric_proto_ident,
.tpg_get_wwn = srpt_get_fabric_wwn,
@@ -3964,6 +3960,10 @@ static struct target_core_fabric_ops srpt_template = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = srpt_make_nodeacl,
.fabric_drop_nodeacl = srpt_drop_nodeacl,
+
+ .tfc_wwn_attrs = srpt_wwn_attrs,
+ .tfc_tpg_base_attrs = srpt_tpg_attrs,
+ .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
};
/**
@@ -3980,7 +3980,7 @@ static int __init srpt_init_module(void)
ret = -EINVAL;
if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
- printk(KERN_ERR "invalid value %d for kernel module parameter"
+ pr_err("invalid value %d for kernel module parameter"
" srp_max_req_size -- must be at least %d.\n",
srp_max_req_size, MIN_MAX_REQ_SIZE);
goto out;
@@ -3988,54 +3988,26 @@ static int __init srpt_init_module(void)
if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
|| srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
- printk(KERN_ERR "invalid value %d for kernel module parameter"
+ pr_err("invalid value %d for kernel module parameter"
" srpt_srq_size -- must be in the range [%d..%d].\n",
srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
goto out;
}
- srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
- if (IS_ERR(srpt_target)) {
- printk(KERN_ERR "couldn't register\n");
- ret = PTR_ERR(srpt_target);
+ ret = target_register_template(&srpt_template);
+ if (ret)
goto out;
- }
-
- srpt_target->tf_ops = srpt_template;
-
- /*
- * Set up default attribute lists.
- */
- srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
- srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
- srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
- srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
-
- ret = target_fabric_configfs_register(srpt_target);
- if (ret < 0) {
- printk(KERN_ERR "couldn't register\n");
- goto out_free_target;
- }
ret = ib_register_client(&srpt_client);
if (ret) {
- printk(KERN_ERR "couldn't register IB client\n");
+ pr_err("couldn't register IB client\n");
goto out_unregister_target;
}
return 0;
out_unregister_target:
- target_fabric_configfs_deregister(srpt_target);
- srpt_target = NULL;
-out_free_target:
- if (srpt_target)
- target_fabric_configfs_free(srpt_target);
+ target_unregister_template(&srpt_template);
out:
return ret;
}
@@ -4043,8 +4015,7 @@ out:
static void __exit srpt_cleanup_module(void)
{
ib_unregister_client(&srpt_client);
- target_fabric_configfs_deregister(srpt_target);
- srpt_target = NULL;
+ target_unregister_template(&srpt_template);
}
module_init(srpt_init_module);
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index f50f6dd92274..b81c88c43452 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -23,8 +23,6 @@
/* #define DEBUG */
-#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
-
#include <linux/input.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -116,7 +114,7 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
if (effect->type < FF_EFFECT_MIN || effect->type > FF_EFFECT_MAX ||
!test_bit(effect->type, dev->ffbit)) {
- pr_debug("invalid or not supported effect type in upload\n");
+ dev_dbg(&dev->dev, "invalid or not supported effect type in upload\n");
return -EINVAL;
}
@@ -124,7 +122,7 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
(effect->u.periodic.waveform < FF_WAVEFORM_MIN ||
effect->u.periodic.waveform > FF_WAVEFORM_MAX ||
!test_bit(effect->u.periodic.waveform, dev->ffbit))) {
- pr_debug("invalid or not supported wave form in upload\n");
+ dev_dbg(&dev->dev, "invalid or not supported wave form in upload\n");
return -EINVAL;
}
@@ -246,7 +244,7 @@ static int flush_effects(struct input_dev *dev, struct file *file)
struct ff_device *ff = dev->ff;
int i;
- pr_debug("flushing now\n");
+ dev_dbg(&dev->dev, "flushing now\n");
mutex_lock(&ff->mutex);
@@ -316,7 +314,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
int i;
if (!max_effects) {
- pr_err("cannot allocate device without any effects\n");
+ dev_err(&dev->dev, "cannot allocate device without any effects\n");
return -EINVAL;
}
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 74c0d8c6002a..fcc6c3368182 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -237,6 +237,18 @@ static u16 ml_calculate_direction(u16 direction, u16 force,
(force + new_force)) << 1;
}
+#define FRAC_N 8
+static inline s16 fixp_new16(s16 a)
+{
+ return ((s32)a) >> (16 - FRAC_N);
+}
+
+static inline s16 fixp_mult(s16 a, s16 b)
+{
+ a = ((s32)a * 0x100) / 0x7fff;
+ return ((s32)(a * b)) >> FRAC_N;
+}
+
/*
* Combine two effects and apply gain.
*/
@@ -247,7 +259,7 @@ static void ml_combine_effects(struct ff_effect *effect,
struct ff_effect *new = state->effect;
unsigned int strong, weak, i;
int x, y;
- fixp_t level;
+ s16 level;
switch (new->type) {
case FF_CONSTANT:
@@ -255,8 +267,8 @@ static void ml_combine_effects(struct ff_effect *effect,
level = fixp_new16(apply_envelope(state,
new->u.constant.level,
&new->u.constant.envelope));
- x = fixp_mult(fixp_sin(i), level) * gain / 0xffff;
- y = fixp_mult(-fixp_cos(i), level) * gain / 0xffff;
+ x = fixp_mult(fixp_sin16(i), level) * gain / 0xffff;
+ y = fixp_mult(-fixp_cos16(i), level) * gain / 0xffff;
/*
* here we abuse ff_ramp to hold x and y of constant force
* If in future any driver wants something else than x and y
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 34feb3e1127b..54fce56c8023 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -368,27 +368,35 @@ static void input_mt_set_slots(struct input_mt *mt,
int *slots, int num_pos)
{
struct input_mt_slot *s;
- int *w = mt->red, *p;
+ int *w = mt->red, j;
- for (p = slots; p != slots + num_pos; p++)
- *p = -1;
+ for (j = 0; j != num_pos; j++)
+ slots[j] = -1;
for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
if (!input_mt_is_active(s))
continue;
- for (p = slots; p != slots + num_pos; p++)
- if (*w++ < 0)
- *p = s - mt->slots;
+
+ for (j = 0; j != num_pos; j++) {
+ if (w[j] < 0) {
+ slots[j] = s - mt->slots;
+ break;
+ }
+ }
+
+ w += num_pos;
}
for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
if (input_mt_is_active(s))
continue;
- for (p = slots; p != slots + num_pos; p++)
- if (*p < 0) {
- *p = s - mt->slots;
+
+ for (j = 0; j != num_pos; j++) {
+ if (slots[j] < 0) {
+ slots[j] = s - mt->slots;
break;
}
+ }
}
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 3aa2f3f3da5b..61c761156371 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -31,12 +31,14 @@
* - the iForce driver drivers/char/joystick/iforce.c
* - the skeleton-driver drivers/usb/usb-skeleton.c
* - Xbox 360 information http://www.free60.org/wiki/Gamepad
+ * - Xbox One information https://github.com/quantus/xbox-one-controller-protocol
*
* Thanks to:
* - ITO Takayuki for providing essential xpad information on his website
* - Vojtech Pavlik - iforce driver / input subsystem
* - Greg Kroah-Hartman - usb-skeleton driver
* - XBOX Linux project - extra USB id's
+ * - Pekka Pöyry (quantus) - Xbox One controller reverse engineering
*
* TODO:
* - fine tune axes (especially trigger axes)
@@ -828,6 +830,23 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
+ case XTYPE_XBOXONE:
+ xpad->odata[0] = 0x09; /* activate rumble */
+ xpad->odata[1] = 0x08;
+ xpad->odata[2] = 0x00;
+ xpad->odata[3] = 0x08; /* continuous effect */
+ xpad->odata[4] = 0x00; /* simple rumble mode */
+ xpad->odata[5] = 0x03; /* L and R actuator only */
+ xpad->odata[6] = 0x00; /* TODO: LT actuator */
+ xpad->odata[7] = 0x00; /* TODO: RT actuator */
+ xpad->odata[8] = strong / 256; /* left actuator */
+ xpad->odata[9] = weak / 256; /* right actuator */
+ xpad->odata[10] = 0x80; /* length of pulse */
+ xpad->odata[11] = 0x00; /* stop period of pulse */
+ xpad->irq_out->transfer_buffer_length = 12;
+
+ return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
+
default:
dev_dbg(&xpad->dev->dev,
"%s - rumble command sent to unsupported xpad type: %d\n",
@@ -841,7 +860,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
static int xpad_init_ff(struct usb_xpad *xpad)
{
- if (xpad->xtype == XTYPE_UNKNOWN || xpad->xtype == XTYPE_XBOXONE)
+ if (xpad->xtype == XTYPE_UNKNOWN)
return 0;
input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a89ba7cb96f1..106fbac7f8c5 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -588,6 +588,16 @@ config KEYBOARD_DAVINCI
To compile this driver as a module, choose M here: the
module will be called davinci_keyscan.
+config KEYBOARD_IPAQ_MICRO
+ tristate "Buttons on Micro SoC (iPaq h3100,h3600,h3700)"
+ depends on MFD_IPAQ_MICRO
+ help
+ Say Y to enable support for the buttons attached to
+ Micro peripheral controller on iPAQ h3100/h3600/h3700
+
+ To compile this driver as a module, choose M here: the
+ module will be called ipaq-micro-keys.
+
config KEYBOARD_OMAP
tristate "TI OMAP keypad support"
depends on ARCH_OMAP1
@@ -686,4 +696,15 @@ config KEYBOARD_CAP11XX
To compile this driver as a module, choose M here: the
module will be called cap11xx.
+config KEYBOARD_BCM
+ tristate "Broadcom keypad driver"
+ depends on OF && HAVE_CLK
+ select INPUT_MATRIXKMAP
+ default ARCH_BCM_CYGNUS
+ help
+ Say Y here if you want to use Broadcom keypad.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm-keypad.
+
endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 470767884bd8..df28d5553c05 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_KEYBOARD_ADP5589) += adp5589-keys.o
obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
+obj-$(CONFIG_KEYBOARD_BCM) += bcm-keypad.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_CAP11XX) += cap11xx.o
obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
obj-$(CONFIG_KEYBOARD_TCA8418) += tca8418_keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
+obj-$(CONFIG_KEYBOARD_IPAQ_MICRO) += ipaq-micro-keys.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 387c51f4b4e4..ec876b5b1382 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1653,6 +1653,12 @@ static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id)
return 1;
}
+/*
+ * NOTE: do not add any more "force release" quirks to this table. The
+ * task of adjusting list of keys that should be "released" automatically
+ * by the driver is now delegated to userspace tools, such as udev, so
+ * submit such quirks there.
+ */
static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
{
.matches = {
diff --git a/drivers/input/keyboard/bcm-keypad.c b/drivers/input/keyboard/bcm-keypad.c
new file mode 100644
index 000000000000..86a8b723ae15
--- /dev/null
+++ b/drivers/input/keyboard/bcm-keypad.c
@@ -0,0 +1,458 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#define DEFAULT_CLK_HZ 31250
+#define MAX_ROWS 8
+#define MAX_COLS 8
+
+/* Register/field definitions */
+#define KPCR_OFFSET 0x00000080
+#define KPCR_MODE 0x00000002
+#define KPCR_MODE_SHIFT 1
+#define KPCR_MODE_MASK 1
+#define KPCR_ENABLE 0x00000001
+#define KPCR_STATUSFILTERENABLE 0x00008000
+#define KPCR_STATUSFILTERTYPE_SHIFT 12
+#define KPCR_COLFILTERENABLE 0x00000800
+#define KPCR_COLFILTERTYPE_SHIFT 8
+#define KPCR_ROWWIDTH_SHIFT 20
+#define KPCR_COLUMNWIDTH_SHIFT 16
+
+#define KPIOR_OFFSET 0x00000084
+#define KPIOR_ROWOCONTRL_SHIFT 24
+#define KPIOR_ROWOCONTRL_MASK 0xFF000000
+#define KPIOR_COLUMNOCONTRL_SHIFT 16
+#define KPIOR_COLUMNOCONTRL_MASK 0x00FF0000
+#define KPIOR_COLUMN_IO_DATA_SHIFT 0
+
+#define KPEMR0_OFFSET 0x00000090
+#define KPEMR1_OFFSET 0x00000094
+#define KPEMR2_OFFSET 0x00000098
+#define KPEMR3_OFFSET 0x0000009C
+#define KPEMR_EDGETYPE_BOTH 3
+
+#define KPSSR0_OFFSET 0x000000A0
+#define KPSSR1_OFFSET 0x000000A4
+#define KPSSRN_OFFSET(reg_n) (KPSSR0_OFFSET + 4 * (reg_n))
+#define KPIMR0_OFFSET 0x000000B0
+#define KPIMR1_OFFSET 0x000000B4
+#define KPICR0_OFFSET 0x000000B8
+#define KPICR1_OFFSET 0x000000BC
+#define KPICRN_OFFSET(reg_n) (KPICR0_OFFSET + 4 * (reg_n))
+#define KPISR0_OFFSET 0x000000C0
+#define KPISR1_OFFSET 0x000000C4
+
+#define KPCR_STATUSFILTERTYPE_MAX 7
+#define KPCR_COLFILTERTYPE_MAX 7
+
+/* Macros to determine the row/column from a bit that is set in SSR0/1. */
+#define BIT_TO_ROW_SSRN(bit_nr, reg_n) (((bit_nr) >> 3) + 4 * (reg_n))
+#define BIT_TO_COL(bit_nr) ((bit_nr) % 8)
+
+/* Structure representing various run-time entities */
+struct bcm_kp {
+ void __iomem *base;
+ int irq;
+ struct clk *clk;
+ struct input_dev *input_dev;
+ unsigned long last_state[2];
+ unsigned int n_rows;
+ unsigned int n_cols;
+ u32 kpcr;
+ u32 kpior;
+ u32 kpemr;
+ u32 imr0_val;
+ u32 imr1_val;
+};
+
+/*
+ * Returns the keycode from the input device keymap given the row and
+ * column.
+ */
+static int bcm_kp_get_keycode(struct bcm_kp *kp, int row, int col)
+{
+ unsigned int row_shift = get_count_order(kp->n_cols);
+ unsigned short *keymap = kp->input_dev->keycode;
+
+ return keymap[MATRIX_SCAN_CODE(row, col, row_shift)];
+}
+
+static void bcm_kp_report_keys(struct bcm_kp *kp, int reg_num, int pull_mode)
+{
+ unsigned long state, change;
+ int bit_nr;
+ int key_press;
+ int row, col;
+ unsigned int keycode;
+
+ /* Clear interrupts */
+ writel(0xFFFFFFFF, kp->base + KPICRN_OFFSET(reg_num));
+
+ state = readl(kp->base + KPSSRN_OFFSET(reg_num));
+ change = kp->last_state[reg_num] ^ state;
+ kp->last_state[reg_num] = state;
+
+ for_each_set_bit(bit_nr, &change, BITS_PER_LONG) {
+ key_press = state & BIT(bit_nr);
+ /* The meaning of SSR register depends on pull mode. */
+ key_press = pull_mode ? !key_press : key_press;
+ row = BIT_TO_ROW_SSRN(bit_nr, reg_num);
+ col = BIT_TO_COL(bit_nr);
+ keycode = bcm_kp_get_keycode(kp, row, col);
+ input_report_key(kp->input_dev, keycode, key_press);
+ }
+}
+
+static irqreturn_t bcm_kp_isr_thread(int irq, void *dev_id)
+{
+ struct bcm_kp *kp = dev_id;
+ int pull_mode = (kp->kpcr >> KPCR_MODE_SHIFT) & KPCR_MODE_MASK;
+ int reg_num;
+
+ for (reg_num = 0; reg_num <= 1; reg_num++)
+ bcm_kp_report_keys(kp, reg_num, pull_mode);
+
+ input_sync(kp->input_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm_kp_start(struct bcm_kp *kp)
+{
+ int error;
+
+ if (kp->clk) {
+ error = clk_prepare_enable(kp->clk);
+ if (error)
+ return error;
+ }
+
+ writel(kp->kpior, kp->base + KPIOR_OFFSET);
+
+ writel(kp->imr0_val, kp->base + KPIMR0_OFFSET);
+ writel(kp->imr1_val, kp->base + KPIMR1_OFFSET);
+
+ writel(kp->kpemr, kp->base + KPEMR0_OFFSET);
+ writel(kp->kpemr, kp->base + KPEMR1_OFFSET);
+ writel(kp->kpemr, kp->base + KPEMR2_OFFSET);
+ writel(kp->kpemr, kp->base + KPEMR3_OFFSET);
+
+ writel(0xFFFFFFFF, kp->base + KPICR0_OFFSET);
+ writel(0xFFFFFFFF, kp->base + KPICR1_OFFSET);
+
+ kp->last_state[0] = readl(kp->base + KPSSR0_OFFSET);
+ kp->last_state[0] = readl(kp->base + KPSSR1_OFFSET);
+
+ writel(kp->kpcr | KPCR_ENABLE, kp->base + KPCR_OFFSET);
+
+ return 0;
+}
+
+static void bcm_kp_stop(const struct bcm_kp *kp)
+{
+ u32 val;
+
+ val = readl(kp->base + KPCR_OFFSET);
+ val &= ~KPCR_ENABLE;
+ writel(0, kp->base + KPCR_OFFSET);
+ writel(0, kp->base + KPIMR0_OFFSET);
+ writel(0, kp->base + KPIMR1_OFFSET);
+ writel(0xFFFFFFFF, kp->base + KPICR0_OFFSET);
+ writel(0xFFFFFFFF, kp->base + KPICR1_OFFSET);
+
+ if (kp->clk)
+ clk_disable_unprepare(kp->clk);
+}
+
+static int bcm_kp_open(struct input_dev *dev)
+{
+ struct bcm_kp *kp = input_get_drvdata(dev);
+
+ return bcm_kp_start(kp);
+}
+
+static void bcm_kp_close(struct input_dev *dev)
+{
+ struct bcm_kp *kp = input_get_drvdata(dev);
+
+ bcm_kp_stop(kp);
+}
+
+static int bcm_kp_matrix_key_parse_dt(struct bcm_kp *kp)
+{
+ struct device *dev = kp->input_dev->dev.parent;
+ struct device_node *np = dev->of_node;
+ int error;
+ unsigned int dt_val;
+ unsigned int i;
+ unsigned int num_rows, col_mask, rows_set;
+
+ /* Initialize the KPCR Keypad Configuration Register */
+ kp->kpcr = KPCR_STATUSFILTERENABLE | KPCR_COLFILTERENABLE;
+
+ error = matrix_keypad_parse_of_params(dev, &kp->n_rows, &kp->n_cols);
+ if (error) {
+ dev_err(dev, "failed to parse kp params\n");
+ return error;
+ }
+
+ /* Set row width for the ASIC block. */
+ kp->kpcr |= (kp->n_rows - 1) << KPCR_ROWWIDTH_SHIFT;
+
+ /* Set column width for the ASIC block. */
+ kp->kpcr |= (kp->n_cols - 1) << KPCR_COLUMNWIDTH_SHIFT;
+
+ /* Configure the IMR registers */
+
+ /*
+ * IMR registers contain interrupt enable bits for 8x8 matrix
+ * IMR0 register format: <row3> <row2> <row1> <row0>
+ * IMR1 register format: <row7> <row6> <row5> <row4>
+ */
+ col_mask = (1 << (kp->n_cols)) - 1;
+ num_rows = kp->n_rows;
+
+ /* Set column bits in rows 0 to 3 in IMR0 */
+ kp->imr0_val = col_mask;
+
+ rows_set = 1;
+ while (--num_rows && rows_set++ < 4)
+ kp->imr0_val |= kp->imr0_val << MAX_COLS;
+
+ /* Set column bits in rows 4 to 7 in IMR1 */
+ kp->imr1_val = 0;
+ if (num_rows) {
+ kp->imr1_val = col_mask;
+ while (--num_rows)
+ kp->imr1_val |= kp->imr1_val << MAX_COLS;
+ }
+
+ /* Initialize the KPEMR Keypress Edge Mode Registers */
+ /* Trigger on both edges */
+ kp->kpemr = 0;
+ for (i = 0; i <= 30; i += 2)
+ kp->kpemr |= (KPEMR_EDGETYPE_BOTH << i);
+
+ /*
+ * Obtain the Status filter debounce value and verify against the
+ * possible values specified in the DT binding.
+ */
+ of_property_read_u32(np, "status-debounce-filter-period", &dt_val);
+
+ if (dt_val > KPCR_STATUSFILTERTYPE_MAX) {
+ dev_err(dev, "Invalid Status filter debounce value %d\n",
+ dt_val);
+ return -EINVAL;
+ }
+
+ kp->kpcr |= dt_val << KPCR_STATUSFILTERTYPE_SHIFT;
+
+ /*
+ * Obtain the Column filter debounce value and verify against the
+ * possible values specified in the DT binding.
+ */
+ of_property_read_u32(np, "col-debounce-filter-period", &dt_val);
+
+ if (dt_val > KPCR_COLFILTERTYPE_MAX) {
+ dev_err(dev, "Invalid Column filter debounce value %d\n",
+ dt_val);
+ return -EINVAL;
+ }
+
+ kp->kpcr |= dt_val << KPCR_COLFILTERTYPE_SHIFT;
+
+ /*
+ * Determine between the row and column,
+ * which should be configured as output.
+ */
+ if (of_property_read_bool(np, "row-output-enabled")) {
+ /*
+ * Set RowOContrl or ColumnOContrl in KPIOR
+ * to the number of pins to drive as outputs
+ */
+ kp->kpior = ((1 << kp->n_rows) - 1) <<
+ KPIOR_ROWOCONTRL_SHIFT;
+ } else {
+ kp->kpior = ((1 << kp->n_cols) - 1) <<
+ KPIOR_COLUMNOCONTRL_SHIFT;
+ }
+
+ /*
+ * Determine if the scan pull up needs to be enabled
+ */
+ if (of_property_read_bool(np, "pull-up-enabled"))
+ kp->kpcr |= KPCR_MODE;
+
+ dev_dbg(dev, "n_rows=%d n_col=%d kpcr=%x kpior=%x kpemr=%x\n",
+ kp->n_rows, kp->n_cols,
+ kp->kpcr, kp->kpior, kp->kpemr);
+
+ return 0;
+}
+
+
+static int bcm_kp_probe(struct platform_device *pdev)
+{
+ struct bcm_kp *kp;
+ struct input_dev *input_dev;
+ struct resource *res;
+ int error;
+
+ kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
+ if (!kp)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev) {
+ dev_err(&pdev->dev, "failed to allocate the input device\n");
+ return -ENOMEM;
+ }
+
+ __set_bit(EV_KEY, input_dev->evbit);
+
+ /* Enable auto repeat feature of Linux input subsystem */
+ if (of_property_read_bool(pdev->dev.of_node, "autorepeat"))
+ __set_bit(EV_REP, input_dev->evbit);
+
+ input_dev->name = pdev->name;
+ input_dev->phys = "keypad/input0";
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->open = bcm_kp_open;
+ input_dev->close = bcm_kp_close;
+
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->id.vendor = 0x0001;
+ input_dev->id.product = 0x0001;
+ input_dev->id.version = 0x0100;
+
+ input_set_drvdata(input_dev, kp);
+
+ kp->input_dev = input_dev;
+
+ platform_set_drvdata(pdev, kp);
+
+ error = bcm_kp_matrix_key_parse_dt(kp);
+ if (error)
+ return error;
+
+ error = matrix_keypad_build_keymap(NULL, NULL,
+ kp->n_rows, kp->n_cols,
+ NULL, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ return error;
+ }
+
+ /* Get the KEYPAD base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Missing keypad base address resource\n");
+ return -ENODEV;
+ }
+
+ kp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(kp->base))
+ return PTR_ERR(kp->base);
+
+ /* Enable clock */
+ kp->clk = devm_clk_get(&pdev->dev, "peri_clk");
+ if (IS_ERR(kp->clk)) {
+ error = PTR_ERR(kp->clk);
+ if (error != -ENOENT) {
+ if (error != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return error;
+ }
+ dev_dbg(&pdev->dev,
+ "No clock specified. Assuming it's enabled\n");
+ kp->clk = NULL;
+ } else {
+ unsigned int desired_rate;
+ long actual_rate;
+
+ error = of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &desired_rate);
+ if (error < 0)
+ desired_rate = DEFAULT_CLK_HZ;
+
+ actual_rate = clk_round_rate(kp->clk, desired_rate);
+ if (actual_rate <= 0)
+ return -EINVAL;
+
+ error = clk_set_rate(kp->clk, actual_rate);
+ if (error)
+ return error;
+
+ error = clk_prepare_enable(kp->clk);
+ if (error)
+ return error;
+ }
+
+ /* Put the kp into a known sane state */
+ bcm_kp_stop(kp);
+
+ kp->irq = platform_get_irq(pdev, 0);
+ if (kp->irq < 0) {
+ dev_err(&pdev->dev, "no IRQ specified\n");
+ return -EINVAL;
+ }
+
+ error = devm_request_threaded_irq(&pdev->dev, kp->irq,
+ NULL, bcm_kp_isr_thread,
+ IRQF_ONESHOT, pdev->name, kp);
+ if (error) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ return error;
+ }
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm_kp_of_match[] = {
+ { .compatible = "brcm,bcm-keypad" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_kp_of_match);
+
+static struct platform_driver bcm_kp_device_driver = {
+ .probe = bcm_kp_probe,
+ .driver = {
+ .name = "bcm-keypad",
+ .of_match_table = of_match_ptr(bcm_kp_of_match),
+ }
+};
+
+module_platform_driver(bcm_kp_device_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("BCM Keypad Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index ffa989f2c785..b50c5b8b8a4d 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -148,16 +148,19 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
static int cros_ec_keyb_get_state(struct cros_ec_keyb *ckdev, uint8_t *kb_state)
{
+ int ret;
struct cros_ec_command msg = {
- .version = 0,
.command = EC_CMD_MKBP_STATE,
- .outdata = NULL,
- .outsize = 0,
- .indata = kb_state,
.insize = ckdev->cols,
};
- return cros_ec_cmd_xfer(ckdev->ec, &msg);
+ ret = cros_ec_cmd_xfer(ckdev->ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ memcpy(kb_state, msg.indata, ckdev->cols);
+
+ return 0;
}
static irqreturn_t cros_ec_keyb_irq(int irq, void *data)
@@ -338,7 +341,7 @@ static int cros_ec_keyb_resume(struct device *dev)
* wake source (e.g. the lid is open and the user might press a key to
* wake) then the key scan buffer should be preserved.
*/
- if (ckdev->ec->was_wake_device)
+ if (!ckdev->ec->was_wake_device)
cros_ec_keyb_clear_keyboard(ckdev);
return 0;
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 90df4df58b07..097d7216d98e 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -125,7 +125,7 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
device_for_each_child_node(dev, child) {
struct gpio_desc *desc;
- desc = devm_get_gpiod_from_child(dev, child);
+ desc = devm_get_gpiod_from_child(dev, NULL, child);
if (IS_ERR(desc)) {
error = PTR_ERR(desc);
if (error != -EPROBE_DEFER)
diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c
new file mode 100644
index 000000000000..602900d1f937
--- /dev/null
+++ b/drivers/input/keyboard/ipaq-micro-keys.c
@@ -0,0 +1,168 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * h3600 atmel micro companion support, key subdevice
+ * based on previous kernel 2.4 version
+ * Author : Alessandro Gardich <gremlin@gremlin.it>
+ * Author : Linus Walleij <linus.walleij@linaro.org>
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/pm.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ipaq-micro.h>
+
+struct ipaq_micro_keys {
+ struct ipaq_micro *micro;
+ struct input_dev *input;
+ u16 *codes;
+};
+
+static const u16 micro_keycodes[] = {
+ KEY_RECORD, /* 1: Record button */
+ KEY_CALENDAR, /* 2: Calendar */
+ KEY_ADDRESSBOOK, /* 3: Contacts (looks like Outlook) */
+ KEY_MAIL, /* 4: Envelope (Q on older iPAQs) */
+ KEY_HOMEPAGE, /* 5: Start (looks like swoopy arrow) */
+ KEY_UP, /* 6: Up */
+ KEY_RIGHT, /* 7: Right */
+ KEY_LEFT, /* 8: Left */
+ KEY_DOWN, /* 9: Down */
+};
+
+static void micro_key_receive(void *data, int len, unsigned char *msg)
+{
+ struct ipaq_micro_keys *keys = data;
+ int key, down;
+
+ down = 0x80 & msg[0];
+ key = 0x7f & msg[0];
+
+ if (key < ARRAY_SIZE(micro_keycodes)) {
+ input_report_key(keys->input, keys->codes[key], down);
+ input_sync(keys->input);
+ }
+}
+
+static void micro_key_start(struct ipaq_micro_keys *keys)
+{
+ spin_lock(&keys->micro->lock);
+ keys->micro->key = micro_key_receive;
+ keys->micro->key_data = keys;
+ spin_unlock(&keys->micro->lock);
+}
+
+static void micro_key_stop(struct ipaq_micro_keys *keys)
+{
+ spin_lock(&keys->micro->lock);
+ keys->micro->key = NULL;
+ keys->micro->key_data = NULL;
+ spin_unlock(&keys->micro->lock);
+}
+
+static int micro_key_open(struct input_dev *input)
+{
+ struct ipaq_micro_keys *keys = input_get_drvdata(input);
+
+ micro_key_start(keys);
+
+ return 0;
+}
+
+static void micro_key_close(struct input_dev *input)
+{
+ struct ipaq_micro_keys *keys = input_get_drvdata(input);
+
+ micro_key_stop(keys);
+}
+
+static int micro_key_probe(struct platform_device *pdev)
+{
+ struct ipaq_micro_keys *keys;
+ int error;
+ int i;
+
+ keys = devm_kzalloc(&pdev->dev, sizeof(*keys), GFP_KERNEL);
+ if (!keys)
+ return -ENOMEM;
+
+ keys->micro = dev_get_drvdata(pdev->dev.parent);
+
+ keys->input = devm_input_allocate_device(&pdev->dev);
+ if (!keys->input)
+ return -ENOMEM;
+
+ keys->input->keycodesize = sizeof(micro_keycodes[0]);
+ keys->input->keycodemax = ARRAY_SIZE(micro_keycodes);
+ keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
+ keys->input->keycodesize * keys->input->keycodemax,
+ GFP_KERNEL);
+ keys->input->keycode = keys->codes;
+
+ __set_bit(EV_KEY, keys->input->evbit);
+ for (i = 0; i < ARRAY_SIZE(micro_keycodes); i++)
+ __set_bit(micro_keycodes[i], keys->input->keybit);
+
+ keys->input->name = "h3600 micro keys";
+ keys->input->open = micro_key_open;
+ keys->input->close = micro_key_close;
+ input_set_drvdata(keys->input, keys);
+
+ error = input_register_device(keys->input);
+ if (error)
+ return error;
+
+ platform_set_drvdata(pdev, keys);
+ return 0;
+}
+
+static int __maybe_unused micro_key_suspend(struct device *dev)
+{
+ struct ipaq_micro_keys *keys = dev_get_drvdata(dev);
+
+ micro_key_stop(keys);
+
+ return 0;
+}
+
+static int __maybe_unused micro_key_resume(struct device *dev)
+{
+ struct ipaq_micro_keys *keys = dev_get_drvdata(dev);
+ struct input_dev *input = keys->input;
+
+ mutex_lock(&input->mutex);
+
+ if (input->users)
+ micro_key_start(keys);
+
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(micro_key_dev_pm_ops,
+ micro_key_suspend, micro_key_resume);
+
+static struct platform_driver micro_key_device_driver = {
+ .driver = {
+ .name = "ipaq-micro-keys",
+ .pm = &micro_key_dev_pm_ops,
+ },
+ .probe = micro_key_probe,
+};
+module_platform_driver(micro_key_device_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("driver for iPAQ Atmel micro keys");
+MODULE_ALIAS("platform:ipaq-micro-keys");
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 9081cbef11ea..0ad422b8a260 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -1,6 +1,6 @@
/*
* LM8333 keypad driver
- * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -231,6 +231,6 @@ static struct i2c_driver lm8333_driver = {
};
module_i2c_driver(lm8333_driver);
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("LM8333 keyboard driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 563932500ff1..31c606a4dd31 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -70,6 +70,28 @@
#define TC3589x_KBD_INT_CLR 0x1
/**
+ * struct tc35893_keypad_platform_data - platform specific keypad data
+ * @keymap_data: matrix scan code table for keycodes
+ * @krow: mask for available rows, value is 0xFF
+ * @kcol: mask for available columns, value is 0xFF
+ * @debounce_period: platform specific debounce time
+ * @settle_time: platform specific settle down time
+ * @irqtype: type of interrupt, falling or rising edge
+ * @enable_wakeup: specifies if keypad event can wake up system from sleep
+ * @no_autorepeat: flag for auto repetition
+ */
+struct tc3589x_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+ u8 krow;
+ u8 kcol;
+ u8 debounce_period;
+ u8 settle_time;
+ unsigned long irqtype;
+ bool enable_wakeup;
+ bool no_autorepeat;
+};
+
+/**
* struct tc_keypad - data structure used by keypad driver
* @tc3589x: pointer to tc35893
* @input: pointer to input device object
@@ -296,7 +318,6 @@ static void tc3589x_keypad_close(struct input_dev *input)
tc3589x_keypad_disable(keypad);
}
-#ifdef CONFIG_OF
static const struct tc3589x_keypad_platform_data *
tc3589x_keypad_of_probe(struct device *dev)
{
@@ -346,14 +367,6 @@ tc3589x_keypad_of_probe(struct device *dev)
return plat;
}
-#else
-static inline const struct tc3589x_keypad_platform_data *
-tc3589x_keypad_of_probe(struct device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
-
static int tc3589x_keypad_probe(struct platform_device *pdev)
{
@@ -363,13 +376,10 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
const struct tc3589x_keypad_platform_data *plat;
int error, irq;
- plat = tc3589x->pdata->keypad;
- if (!plat) {
- plat = tc3589x_keypad_of_probe(&pdev->dev);
- if (IS_ERR(plat)) {
- dev_err(&pdev->dev, "invalid keypad platform data\n");
- return PTR_ERR(plat);
- }
+ plat = tc3589x_keypad_of_probe(&pdev->dev);
+ if (IS_ERR(plat)) {
+ dev_err(&pdev->dev, "invalid keypad platform data\n");
+ return PTR_ERR(plat);
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 6deb8dae3205..4436ab1b9735 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -115,6 +115,18 @@ config INPUT_PCSPKR
To compile this driver as a module, choose M here: the
module will be called pcspkr.
+config INPUT_PM8941_PWRKEY
+ tristate "Qualcomm PM8941 power key support"
+ depends on MFD_SPMI_PMIC
+ help
+ Say Y here if you want support for the power key usually found
+ on boards using a Qualcomm PM8941 compatible PMIC.
+
+ If unsure, say Y.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pm8941-pwrkey.
+
config INPUT_PM8XXX_VIBRATOR
tristate "Qualcomm PM8XXX vibrator support"
depends on MFD_PM8XXX
@@ -165,6 +177,18 @@ config INPUT_MAX77693_HAPTIC
To compile this driver as module, choose M here: the
module will be called max77693-haptic.
+config INPUT_MAX77843_HAPTIC
+ tristate "MAXIM MAX77843 haptic controller support"
+ depends on MFD_MAX77843 && REGULATOR
+ select INPUT_FF_MEMLESS
+ help
+ This option enables support for the haptic controller on
+ MAXIM MAX77843 chip. The driver supports ff-memless interface
+ from input framework.
+
+ To compile this driver as module, choose M here: the
+ module will be called max77843-haptic.
+
config INPUT_MAX8925_ONKEY
tristate "MAX8925 ONKEY support"
depends on MFD_MAX8925
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 403a1a54a76c..78ba4c1b8532 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o
+obj-$(CONFIG_INPUT_MAX77843_HAPTIC) += max77843-haptic.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o
obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
@@ -49,6 +50,7 @@ obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
+obj-$(CONFIG_INPUT_PM8941_PWRKEY) += pm8941-pwrkey.o
obj-$(CONFIG_INPUT_PM8XXX_VIBRATOR) += pm8xxx-vibrator.o
obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
diff --git a/drivers/input/misc/max77843-haptic.c b/drivers/input/misc/max77843-haptic.c
new file mode 100644
index 000000000000..dccbb465a055
--- /dev/null
+++ b/drivers/input/misc/max77843-haptic.c
@@ -0,0 +1,358 @@
+/*
+ * MAXIM MAX77693 Haptic device driver
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/mfd/max77843-private.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#define MAX_MAGNITUDE_SHIFT 16
+
+enum max77843_haptic_motor_type {
+ MAX77843_HAPTIC_ERM = 0,
+ MAX77843_HAPTIC_LRA,
+};
+
+enum max77843_haptic_pwm_divisor {
+ MAX77843_HAPTIC_PWM_DIVISOR_32 = 0,
+ MAX77843_HAPTIC_PWM_DIVISOR_64,
+ MAX77843_HAPTIC_PWM_DIVISOR_128,
+ MAX77843_HAPTIC_PWM_DIVISOR_256,
+};
+
+struct max77843_haptic {
+ struct regmap *regmap_haptic;
+ struct device *dev;
+ struct input_dev *input_dev;
+ struct pwm_device *pwm_dev;
+ struct regulator *motor_reg;
+ struct work_struct work;
+ struct mutex mutex;
+
+ unsigned int magnitude;
+ unsigned int pwm_duty;
+
+ bool active;
+ bool suspended;
+
+ enum max77843_haptic_motor_type type;
+ enum max77843_haptic_pwm_divisor pwm_divisor;
+};
+
+static int max77843_haptic_set_duty_cycle(struct max77843_haptic *haptic)
+{
+ int delta = (haptic->pwm_dev->period + haptic->pwm_duty) / 2;
+ int error;
+
+ error = pwm_config(haptic->pwm_dev, delta, haptic->pwm_dev->period);
+ if (error) {
+ dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int max77843_haptic_bias(struct max77843_haptic *haptic, bool on)
+{
+ int error;
+
+ error = regmap_update_bits(haptic->regmap_haptic,
+ MAX77843_SYS_REG_MAINCTRL1,
+ MAX77843_MAINCTRL1_BIASEN_MASK,
+ on << MAINCTRL1_BIASEN_SHIFT);
+ if (error) {
+ dev_err(haptic->dev, "failed to %s bias: %d\n",
+ on ? "enable" : "disable", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int max77843_haptic_config(struct max77843_haptic *haptic, bool enable)
+{
+ unsigned int value;
+ int error;
+
+ value = (haptic->type << MCONFIG_MODE_SHIFT) |
+ (enable << MCONFIG_MEN_SHIFT) |
+ (haptic->pwm_divisor << MCONFIG_PDIV_SHIFT);
+
+ error = regmap_write(haptic->regmap_haptic,
+ MAX77843_HAP_REG_MCONFIG, value);
+ if (error) {
+ dev_err(haptic->dev,
+ "failed to update haptic config: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int max77843_haptic_enable(struct max77843_haptic *haptic)
+{
+ int error;
+
+ if (haptic->active)
+ return 0;
+
+ error = pwm_enable(haptic->pwm_dev);
+ if (error) {
+ dev_err(haptic->dev,
+ "failed to enable pwm device: %d\n", error);
+ return error;
+ }
+
+ error = max77843_haptic_config(haptic, true);
+ if (error)
+ goto err_config;
+
+ haptic->active = true;
+
+ return 0;
+
+err_config:
+ pwm_disable(haptic->pwm_dev);
+
+ return error;
+}
+
+static int max77843_haptic_disable(struct max77843_haptic *haptic)
+{
+ int error;
+
+ if (!haptic->active)
+ return 0;
+
+ error = max77843_haptic_config(haptic, false);
+ if (error)
+ return error;
+
+ pwm_disable(haptic->pwm_dev);
+
+ haptic->active = false;
+
+ return 0;
+}
+
+static void max77843_haptic_play_work(struct work_struct *work)
+{
+ struct max77843_haptic *haptic =
+ container_of(work, struct max77843_haptic, work);
+ int error;
+
+ mutex_lock(&haptic->mutex);
+
+ if (haptic->suspended)
+ goto out_unlock;
+
+ if (haptic->magnitude) {
+ error = max77843_haptic_set_duty_cycle(haptic);
+ if (error) {
+ dev_err(haptic->dev,
+ "failed to set duty cycle: %d\n", error);
+ goto out_unlock;
+ }
+
+ error = max77843_haptic_enable(haptic);
+ if (error)
+ dev_err(haptic->dev,
+ "cannot enable haptic: %d\n", error);
+ } else {
+ error = max77843_haptic_disable(haptic);
+ if (error)
+ dev_err(haptic->dev,
+ "cannot disable haptic: %d\n", error);
+ }
+
+out_unlock:
+ mutex_unlock(&haptic->mutex);
+}
+
+static int max77843_haptic_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct max77843_haptic *haptic = input_get_drvdata(dev);
+ u64 period_mag_multi;
+
+ haptic->magnitude = effect->u.rumble.strong_magnitude;
+ if (!haptic->magnitude)
+ haptic->magnitude = effect->u.rumble.weak_magnitude;
+
+ period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude;
+ haptic->pwm_duty = (unsigned int)(period_mag_multi >>
+ MAX_MAGNITUDE_SHIFT);
+
+ schedule_work(&haptic->work);
+
+ return 0;
+}
+
+static int max77843_haptic_open(struct input_dev *dev)
+{
+ struct max77843_haptic *haptic = input_get_drvdata(dev);
+ int error;
+
+ error = max77843_haptic_bias(haptic, true);
+ if (error)
+ return error;
+
+ error = regulator_enable(haptic->motor_reg);
+ if (error) {
+ dev_err(haptic->dev,
+ "failed to enable regulator: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void max77843_haptic_close(struct input_dev *dev)
+{
+ struct max77843_haptic *haptic = input_get_drvdata(dev);
+ int error;
+
+ cancel_work_sync(&haptic->work);
+ max77843_haptic_disable(haptic);
+
+ error = regulator_disable(haptic->motor_reg);
+ if (error)
+ dev_err(haptic->dev,
+ "failed to disable regulator: %d\n", error);
+
+ max77843_haptic_bias(haptic, false);
+}
+
+static int max77843_haptic_probe(struct platform_device *pdev)
+{
+ struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
+ struct max77843_haptic *haptic;
+ int error;
+
+ haptic = devm_kzalloc(&pdev->dev, sizeof(*haptic), GFP_KERNEL);
+ if (!haptic)
+ return -ENOMEM;
+
+ haptic->regmap_haptic = max77843->regmap;
+ haptic->dev = &pdev->dev;
+ haptic->type = MAX77843_HAPTIC_LRA;
+ haptic->pwm_divisor = MAX77843_HAPTIC_PWM_DIVISOR_128;
+
+ INIT_WORK(&haptic->work, max77843_haptic_play_work);
+ mutex_init(&haptic->mutex);
+
+ haptic->pwm_dev = devm_pwm_get(&pdev->dev, NULL);
+ if (IS_ERR(haptic->pwm_dev)) {
+ dev_err(&pdev->dev, "failed to get pwm device\n");
+ return PTR_ERR(haptic->pwm_dev);
+ }
+
+ haptic->motor_reg = devm_regulator_get_exclusive(&pdev->dev, "haptic");
+ if (IS_ERR(haptic->motor_reg)) {
+ dev_err(&pdev->dev, "failed to get regulator\n");
+ return PTR_ERR(haptic->motor_reg);
+ }
+
+ haptic->input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!haptic->input_dev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ haptic->input_dev->name = "max77843-haptic";
+ haptic->input_dev->id.version = 1;
+ haptic->input_dev->dev.parent = &pdev->dev;
+ haptic->input_dev->open = max77843_haptic_open;
+ haptic->input_dev->close = max77843_haptic_close;
+ input_set_drvdata(haptic->input_dev, haptic);
+ input_set_capability(haptic->input_dev, EV_FF, FF_RUMBLE);
+
+ error = input_ff_create_memless(haptic->input_dev, NULL,
+ max77843_haptic_play_effect);
+ if (error) {
+ dev_err(&pdev->dev, "failed to create force-feedback\n");
+ return error;
+ }
+
+ error = input_register_device(haptic->input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ return error;
+ }
+
+ platform_set_drvdata(pdev, haptic);
+
+ return 0;
+}
+
+static int __maybe_unused max77843_haptic_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct max77843_haptic *haptic = platform_get_drvdata(pdev);
+ int error;
+
+ error = mutex_lock_interruptible(&haptic->mutex);
+ if (error)
+ return error;
+
+ max77843_haptic_disable(haptic);
+
+ haptic->suspended = true;
+
+ mutex_unlock(&haptic->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused max77843_haptic_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct max77843_haptic *haptic = platform_get_drvdata(pdev);
+ unsigned int magnitude;
+
+ mutex_lock(&haptic->mutex);
+
+ haptic->suspended = false;
+
+ magnitude = ACCESS_ONCE(haptic->magnitude);
+ if (magnitude)
+ max77843_haptic_enable(haptic);
+
+ mutex_unlock(&haptic->mutex);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(max77843_haptic_pm_ops,
+ max77843_haptic_suspend, max77843_haptic_resume);
+
+static struct platform_driver max77843_haptic_driver = {
+ .driver = {
+ .name = "max77843-haptic",
+ .pm = &max77843_haptic_pm_ops,
+ },
+ .probe = max77843_haptic_probe,
+};
+module_platform_driver(max77843_haptic_driver);
+
+MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
+MODULE_DESCRIPTION("MAXIM MAX77843 Haptic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 98228773a111..19c73574458e 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -174,12 +174,13 @@ static int mma8450_probe(struct i2c_client *c,
struct mma8450 *m;
int err;
- m = kzalloc(sizeof(struct mma8450), GFP_KERNEL);
- idev = input_allocate_polled_device();
- if (!m || !idev) {
- err = -ENOMEM;
- goto err_free_mem;
- }
+ m = devm_kzalloc(&c->dev, sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_polled_device(&c->dev);
+ if (!idev)
+ return -ENOMEM;
m->client = c;
m->idev = idev;
@@ -187,7 +188,6 @@ static int mma8450_probe(struct i2c_client *c,
idev->private = m;
idev->input->name = MMA8450_DRV_NAME;
idev->input->id.bustype = BUS_I2C;
- idev->input->dev.parent = &c->dev;
idev->poll = mma8450_poll;
idev->poll_interval = POLL_INTERVAL;
idev->poll_interval_max = POLL_INTERVAL_MAX;
@@ -202,29 +202,12 @@ static int mma8450_probe(struct i2c_client *c,
err = input_register_polled_device(idev);
if (err) {
dev_err(&c->dev, "failed to register polled input device\n");
- goto err_free_mem;
+ return err;
}
i2c_set_clientdata(c, m);
return 0;
-
-err_free_mem:
- input_free_polled_device(idev);
- kfree(m);
- return err;
-}
-
-static int mma8450_remove(struct i2c_client *c)
-{
- struct mma8450 *m = i2c_get_clientdata(c);
- struct input_polled_dev *idev = m->idev;
-
- input_unregister_polled_device(idev);
- input_free_polled_device(idev);
- kfree(m);
-
- return 0;
}
static const struct i2c_device_id mma8450_id[] = {
@@ -242,11 +225,9 @@ MODULE_DEVICE_TABLE(of, mma8450_dt_ids);
static struct i2c_driver mma8450_driver = {
.driver = {
.name = MMA8450_DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = mma8450_dt_ids,
},
.probe = mma8450_probe,
- .remove = mma8450_remove,
.id_table = mma8450_id,
};
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
index 1f9b5ee92746..1e1baed63929 100644
--- a/drivers/input/misc/palmas-pwrbutton.c
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -304,7 +304,7 @@ static SIMPLE_DEV_PM_OPS(palmas_pwron_pm,
palmas_pwron_suspend, palmas_pwron_resume);
#ifdef CONFIG_OF
-static struct of_device_id of_palmas_pwr_match[] = {
+static const struct of_device_id of_palmas_pwr_match[] = {
{ .compatible = "ti,palmas-pwrbutton" },
{ },
};
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
new file mode 100644
index 000000000000..867db8a91372
--- /dev/null
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2014, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+
+#define PON_REV2 0x01
+
+#define PON_RT_STS 0x10
+#define PON_KPDPWR_N_SET BIT(0)
+
+#define PON_PS_HOLD_RST_CTL 0x5a
+#define PON_PS_HOLD_RST_CTL2 0x5b
+#define PON_PS_HOLD_ENABLE BIT(7)
+#define PON_PS_HOLD_TYPE_MASK 0x0f
+#define PON_PS_HOLD_TYPE_SHUTDOWN 4
+#define PON_PS_HOLD_TYPE_HARD_RESET 7
+
+#define PON_PULL_CTL 0x70
+#define PON_KPDPWR_PULL_UP BIT(1)
+
+#define PON_DBC_CTL 0x71
+#define PON_DBC_DELAY_MASK 0x7
+
+
+struct pm8941_pwrkey {
+ struct device *dev;
+ int irq;
+ u32 baseaddr;
+ struct regmap *regmap;
+ struct input_dev *input;
+
+ unsigned int revision;
+ struct notifier_block reboot_notifier;
+};
+
+static int pm8941_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct pm8941_pwrkey *pwrkey = container_of(nb, struct pm8941_pwrkey,
+ reboot_notifier);
+ unsigned int enable_reg;
+ unsigned int reset_type;
+ int error;
+
+ /* PMICs with revision 0 have the enable bit in same register as ctrl */
+ if (pwrkey->revision == 0)
+ enable_reg = PON_PS_HOLD_RST_CTL;
+ else
+ enable_reg = PON_PS_HOLD_RST_CTL2;
+
+ error = regmap_update_bits(pwrkey->regmap,
+ pwrkey->baseaddr + enable_reg,
+ PON_PS_HOLD_ENABLE,
+ 0);
+ if (error)
+ dev_err(pwrkey->dev,
+ "unable to clear ps hold reset enable: %d\n",
+ error);
+
+ /*
+ * Updates of PON_PS_HOLD_ENABLE requires 3 sleep cycles between
+ * writes.
+ */
+ usleep_range(100, 1000);
+
+ switch (code) {
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ reset_type = PON_PS_HOLD_TYPE_SHUTDOWN;
+ break;
+ case SYS_RESTART:
+ default:
+ reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
+ break;
+ };
+
+ error = regmap_update_bits(pwrkey->regmap,
+ pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
+ PON_PS_HOLD_TYPE_MASK,
+ reset_type);
+ if (error)
+ dev_err(pwrkey->dev, "unable to set ps hold reset type: %d\n",
+ error);
+
+ error = regmap_update_bits(pwrkey->regmap,
+ pwrkey->baseaddr + enable_reg,
+ PON_PS_HOLD_ENABLE,
+ PON_PS_HOLD_ENABLE);
+ if (error)
+ dev_err(pwrkey->dev, "unable to re-set enable: %d\n", error);
+
+ return NOTIFY_DONE;
+}
+
+static irqreturn_t pm8941_pwrkey_irq(int irq, void *_data)
+{
+ struct pm8941_pwrkey *pwrkey = _data;
+ unsigned int sts;
+ int error;
+
+ error = regmap_read(pwrkey->regmap,
+ pwrkey->baseaddr + PON_RT_STS, &sts);
+ if (error)
+ return IRQ_HANDLED;
+
+ input_report_key(pwrkey->input, KEY_POWER, !!(sts & PON_KPDPWR_N_SET));
+ input_sync(pwrkey->input);
+
+ return IRQ_HANDLED;
+}
+
+static int __maybe_unused pm8941_pwrkey_suspend(struct device *dev)
+{
+ struct pm8941_pwrkey *pwrkey = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(pwrkey->irq);
+
+ return 0;
+}
+
+static int __maybe_unused pm8941_pwrkey_resume(struct device *dev)
+{
+ struct pm8941_pwrkey *pwrkey = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(pwrkey->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pm8941_pwr_key_pm_ops,
+ pm8941_pwrkey_suspend, pm8941_pwrkey_resume);
+
+static int pm8941_pwrkey_probe(struct platform_device *pdev)
+{
+ struct pm8941_pwrkey *pwrkey;
+ bool pull_up;
+ u32 req_delay;
+ int error;
+
+ if (of_property_read_u32(pdev->dev.of_node, "debounce", &req_delay))
+ req_delay = 15625;
+
+ if (req_delay > 2000000 || req_delay == 0) {
+ dev_err(&pdev->dev, "invalid debounce time: %u\n", req_delay);
+ return -EINVAL;
+ }
+
+ pull_up = of_property_read_bool(pdev->dev.of_node, "bias-pull-up");
+
+ pwrkey = devm_kzalloc(&pdev->dev, sizeof(*pwrkey), GFP_KERNEL);
+ if (!pwrkey)
+ return -ENOMEM;
+
+ pwrkey->dev = &pdev->dev;
+
+ pwrkey->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pwrkey->regmap) {
+ dev_err(&pdev->dev, "failed to locate regmap\n");
+ return -ENODEV;
+ }
+
+ pwrkey->irq = platform_get_irq(pdev, 0);
+ if (pwrkey->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return pwrkey->irq;
+ }
+
+ error = of_property_read_u32(pdev->dev.of_node, "reg",
+ &pwrkey->baseaddr);
+ if (error)
+ return error;
+
+ error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_REV2,
+ &pwrkey->revision);
+ if (error) {
+ dev_err(&pdev->dev, "failed to set debounce: %d\n", error);
+ return error;
+ }
+
+ pwrkey->input = devm_input_allocate_device(&pdev->dev);
+ if (!pwrkey->input) {
+ dev_dbg(&pdev->dev, "unable to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ input_set_capability(pwrkey->input, EV_KEY, KEY_POWER);
+
+ pwrkey->input->name = "pm8941_pwrkey";
+ pwrkey->input->phys = "pm8941_pwrkey/input0";
+
+ req_delay = (req_delay << 6) / USEC_PER_SEC;
+ req_delay = ilog2(req_delay);
+
+ error = regmap_update_bits(pwrkey->regmap,
+ pwrkey->baseaddr + PON_DBC_CTL,
+ PON_DBC_DELAY_MASK,
+ req_delay);
+ if (error) {
+ dev_err(&pdev->dev, "failed to set debounce: %d\n", error);
+ return error;
+ }
+
+ error = regmap_update_bits(pwrkey->regmap,
+ pwrkey->baseaddr + PON_PULL_CTL,
+ PON_KPDPWR_PULL_UP,
+ pull_up ? PON_KPDPWR_PULL_UP : 0);
+ if (error) {
+ dev_err(&pdev->dev, "failed to set pull: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(&pdev->dev, pwrkey->irq,
+ NULL, pm8941_pwrkey_irq,
+ IRQF_ONESHOT,
+ "pm8941_pwrkey", pwrkey);
+ if (error) {
+ dev_err(&pdev->dev, "failed requesting IRQ: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(pwrkey->input);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device: %d\n",
+ error);
+ return error;
+ }
+
+ pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify,
+ error = register_reboot_notifier(&pwrkey->reboot_notifier);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register reboot notifier: %d\n",
+ error);
+ return error;
+ }
+
+ platform_set_drvdata(pdev, pwrkey);
+ device_init_wakeup(&pdev->dev, 1);
+
+ return 0;
+}
+
+static int pm8941_pwrkey_remove(struct platform_device *pdev)
+{
+ struct pm8941_pwrkey *pwrkey = platform_get_drvdata(pdev);
+
+ device_init_wakeup(&pdev->dev, 0);
+ unregister_reboot_notifier(&pwrkey->reboot_notifier);
+
+ return 0;
+}
+
+static const struct of_device_id pm8941_pwr_key_id_table[] = {
+ { .compatible = "qcom,pm8941-pwrkey" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table);
+
+static struct platform_driver pm8941_pwrkey_driver = {
+ .probe = pm8941_pwrkey_probe,
+ .remove = pm8941_pwrkey_remove,
+ .driver = {
+ .name = "pm8941-pwrkey",
+ .pm = &pm8941_pwr_key_pm_ops,
+ .of_match_table = of_match_ptr(pm8941_pwr_key_id_table),
+ },
+};
+module_platform_driver(pm8941_pwrkey_driver);
+
+MODULE_DESCRIPTION("PM8941 Power Key driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index a28ee70ff158..e82edf810d1f 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -50,7 +50,6 @@ static int pwm_beeper_event(struct input_dev *input,
}
if (value == 0) {
- pwm_config(beeper->pwm, 0, 0);
pwm_disable(beeper->pwm);
} else {
period = HZ_TO_NANOSECONDS(value);
@@ -169,12 +168,6 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pwm_beeper_pm_ops,
pwm_beeper_suspend, pwm_beeper_resume);
-#ifdef CONFIG_PM_SLEEP
-#define PWM_BEEPER_PM_OPS (&pwm_beeper_pm_ops)
-#else
-#define PWM_BEEPER_PM_OPS NULL
-#endif
-
#ifdef CONFIG_OF
static const struct of_device_id pwm_beeper_match[] = {
{ .compatible = "pwm-beeper", },
@@ -187,7 +180,7 @@ static struct platform_driver pwm_beeper_driver = {
.remove = pwm_beeper_remove,
.driver = {
.name = "pwm-beeper",
- .pm = PWM_BEEPER_PM_OPS,
+ .pm = &pwm_beeper_pm_ops,
.of_match_table = of_match_ptr(pwm_beeper_match),
},
};
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 132eb914ea3e..6bf3f1082f71 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -245,7 +245,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(regulator_haptic_pm_ops,
regulator_haptic_suspend, regulator_haptic_resume);
-static struct of_device_id regulator_haptic_dt_match[] = {
+static const struct of_device_id regulator_haptic_dt_match[] = {
{ .compatible = "regulator-haptic" },
{ /* sentinel */ },
};
diff --git a/drivers/input/misc/tps65218-pwrbutton.c b/drivers/input/misc/tps65218-pwrbutton.c
index 54508dec4eb3..a39b62651a4b 100644
--- a/drivers/input/misc/tps65218-pwrbutton.c
+++ b/drivers/input/misc/tps65218-pwrbutton.c
@@ -106,7 +106,7 @@ static int tps65218_pwron_probe(struct platform_device *pdev)
return 0;
}
-static struct of_device_id of_tps65218_pwr_match[] = {
+static const struct of_device_id of_tps65218_pwr_match[] = {
{ .compatible = "ti,tps65218-pwrbutton" },
{ },
};
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 4658b5d41dd7..7462d2fc8cfe 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -149,6 +149,18 @@ config MOUSE_PS2_FOCALTECH
If unsure, say Y.
+config MOUSE_PS2_VMMOUSE
+ bool "Virtual mouse (vmmouse)"
+ depends on MOUSE_PS2 && X86 && HYPERVISOR_GUEST
+ help
+ Say Y here if you are running under control of VMware hypervisor
+ (ESXi, Workstation or Fusion). Also make sure that when you enable
+ this option, you remove the xf86-input-vmmouse user-space driver
+ or upgrade it to at least xf86-input-vmmouse 13.0.1, which doesn't
+ load in the presence of an in-kernel vmmouse driver.
+
+ If unsure, say N.
+
config MOUSE_SERIAL
tristate "Serial mouse"
select SERIO
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 8a9c98e76d9c..793300bfbddd 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -36,6 +36,7 @@ psmouse-$(CONFIG_MOUSE_PS2_SENTELIC) += sentelic.o
psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o
psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT) += touchkit_ps2.o
psmouse-$(CONFIG_MOUSE_PS2_CYPRESS) += cypress_ps2.o
+psmouse-$(CONFIG_MOUSE_PS2_VMMOUSE) += vmmouse.o
elan_i2c-objs := elan_i2c_core.o
elan_i2c-$(CONFIG_MOUSE_ELAN_I2C_I2C) += elan_i2c_i2c.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 27bcdbc950c9..e6708f6efb4d 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -153,10 +153,18 @@ static const struct alps_protocol_info alps_v7_protocol_data = {
ALPS_PROTO_V7, 0x48, 0x48, ALPS_DUALPOINT
};
+static const struct alps_protocol_info alps_v8_protocol_data = {
+ ALPS_PROTO_V8, 0x18, 0x18, 0
+};
+
static void alps_set_abs_params_st(struct alps_data *priv,
struct input_dev *dev1);
static void alps_set_abs_params_mt(struct alps_data *priv,
struct input_dev *dev1);
+static void alps_set_abs_params_v7(struct alps_data *priv,
+ struct input_dev *dev1);
+static void alps_set_abs_params_ss4_v2(struct alps_data *priv,
+ struct input_dev *dev1);
/* Packet formats are described in Documentation/input/alps.txt */
@@ -243,6 +251,14 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
return;
}
+ /* Non interleaved V2 dualpoint has separate stick button bits */
+ if (priv->proto_version == ALPS_PROTO_V2 &&
+ priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
+ left |= packet[0] & 1;
+ right |= packet[0] & 2;
+ middle |= packet[0] & 4;
+ }
+
alps_report_buttons(dev, dev2, left, right, middle);
/* Convert hardware tap to a reasonable Z value */
@@ -1085,6 +1101,176 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
alps_process_touchpad_packet_v7(psmouse);
}
+static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte)
+{
+ unsigned char pkt_id = SS4_PACKET_ID_IDLE;
+
+ if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
+ (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && byte[5] == 0x00) {
+ pkt_id = SS4_PACKET_ID_IDLE;
+ } else if (!(byte[3] & 0x10)) {
+ pkt_id = SS4_PACKET_ID_ONE;
+ } else if (!(byte[3] & 0x20)) {
+ pkt_id = SS4_PACKET_ID_TWO;
+ } else {
+ pkt_id = SS4_PACKET_ID_MULTI;
+ }
+
+ return pkt_id;
+}
+
+static int alps_decode_ss4_v2(struct alps_fields *f,
+ unsigned char *p, struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char pkt_id;
+ unsigned int no_data_x, no_data_y;
+
+ pkt_id = alps_get_pkt_id_ss4_v2(p);
+
+ /* Current packet is 1Finger coordinate packet */
+ switch (pkt_id) {
+ case SS4_PACKET_ID_ONE:
+ f->mt[0].x = SS4_1F_X_V2(p);
+ f->mt[0].y = SS4_1F_Y_V2(p);
+ f->pressure = ((SS4_1F_Z_V2(p)) * 2) & 0x7f;
+ f->fingers = 1;
+ f->first_mp = 0;
+ f->is_mp = 0;
+ break;
+
+ case SS4_PACKET_ID_TWO:
+ if (priv->flags & ALPS_BUTTONPAD) {
+ f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
+ f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
+ f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
+ f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
+ } else {
+ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+ f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
+ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
+ }
+ f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
+
+ if (SS4_IS_MF_CONTINUE(p)) {
+ f->first_mp = 1;
+ } else {
+ f->fingers = 2;
+ f->first_mp = 0;
+ }
+ f->is_mp = 0;
+
+ break;
+
+ case SS4_PACKET_ID_MULTI:
+ if (priv->flags & ALPS_BUTTONPAD) {
+ f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
+ f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
+ f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+ f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX_BL;
+ no_data_y = SS4_MFPACKET_NO_AY_BL;
+ } else {
+ f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
+ f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
+ f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
+ f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX;
+ no_data_y = SS4_MFPACKET_NO_AY;
+ }
+
+ f->first_mp = 0;
+ f->is_mp = 1;
+
+ if (SS4_IS_5F_DETECTED(p)) {
+ f->fingers = 5;
+ } else if (f->mt[3].x == no_data_x &&
+ f->mt[3].y == no_data_y) {
+ f->mt[3].x = 0;
+ f->mt[3].y = 0;
+ f->fingers = 3;
+ } else {
+ f->fingers = 4;
+ }
+ break;
+
+ case SS4_PACKET_ID_IDLE:
+ default:
+ memset(f, 0, sizeof(struct alps_fields));
+ break;
+ }
+
+ f->left = !!(SS4_BTN_V2(p) & 0x01);
+ if (!(priv->flags & ALPS_BUTTONPAD)) {
+ f->right = !!(SS4_BTN_V2(p) & 0x02);
+ f->middle = !!(SS4_BTN_V2(p) & 0x04);
+ }
+
+ return 0;
+}
+
+static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = psmouse->dev;
+ struct alps_fields *f = &priv->f;
+
+ memset(f, 0, sizeof(struct alps_fields));
+ priv->decode_fields(f, packet, psmouse);
+ if (priv->multi_packet) {
+ /*
+ * Sometimes the first packet will indicate a multi-packet
+ * sequence, but sometimes the next multi-packet would not
+ * come. Check for this, and when it happens process the
+ * position packet as usual.
+ */
+ if (f->is_mp) {
+ /* Now process the 1st packet */
+ priv->decode_fields(f, priv->multi_data, psmouse);
+ } else {
+ priv->multi_packet = 0;
+ }
+ }
+
+ /*
+ * "f.is_mp" would always be '0' after merging the 1st and 2nd packet.
+ * When it is set, it means 2nd packet comes without 1st packet come.
+ */
+ if (f->is_mp)
+ return;
+
+ /* Save the first packet */
+ if (!priv->multi_packet && f->first_mp) {
+ priv->multi_packet = 1;
+ memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
+ return;
+ }
+
+ priv->multi_packet = 0;
+
+ alps_report_mt_data(psmouse, (f->fingers <= 4) ? f->fingers : 4);
+
+ input_mt_report_finger_count(dev, f->fingers);
+
+ input_report_key(dev, BTN_LEFT, f->left);
+ input_report_key(dev, BTN_RIGHT, f->right);
+ input_report_key(dev, BTN_MIDDLE, f->middle);
+
+ input_report_abs(dev, ABS_PRESSURE, f->pressure);
+ input_sync(dev);
+}
+
+static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
+{
+ if (psmouse->pktcnt == 4 && ((psmouse->packet[3] & 0x08) != 0x08))
+ return false;
+ if (psmouse->pktcnt == 6 && ((psmouse->packet[5] & 0x10) != 0x0))
+ return false;
+ return true;
+}
+
static DEFINE_MUTEX(alps_mutex);
static void alps_register_bare_ps2_mouse(struct work_struct *work)
@@ -1159,13 +1345,14 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
bool report_buttons)
{
struct alps_data *priv = psmouse->private;
- struct input_dev *dev;
+ struct input_dev *dev, *dev2 = NULL;
/* Figure out which device to use to report the bare packet */
if (priv->proto_version == ALPS_PROTO_V2 &&
(priv->flags & ALPS_DUALPOINT)) {
/* On V2 devices the DualPoint Stick reports bare packets */
dev = priv->dev2;
+ dev2 = psmouse->dev;
} else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
/* Register dev3 mouse if we received PS/2 packet first time */
if (!IS_ERR(priv->dev3))
@@ -1177,7 +1364,7 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
}
if (report_buttons)
- alps_report_buttons(dev, NULL,
+ alps_report_buttons(dev, dev2,
packet[0] & 1, packet[0] & 2, packet[0] & 4);
input_report_rel(dev, REL_X,
@@ -1305,8 +1492,12 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
* a device connected to the external PS/2 port. Because bare PS/2
* protocol does not have enough constant bits to self-synchronize
* properly we only do this if the device is fully synchronized.
+ * Can not distinguish V8's first byte from PS/2 packet's
*/
- if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
+ if (priv->proto_version != ALPS_PROTO_V8 &&
+ !psmouse->out_of_sync_cnt &&
+ (psmouse->packet[0] & 0xc8) == 0x08) {
+
if (psmouse->pktcnt == 3) {
alps_report_bare_ps2_packet(psmouse, psmouse->packet,
true);
@@ -1354,8 +1545,10 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
}
- if (priv->proto_version == ALPS_PROTO_V7 &&
- !alps_is_valid_package_v7(psmouse)) {
+ if ((priv->proto_version == ALPS_PROTO_V7 &&
+ !alps_is_valid_package_v7(psmouse)) ||
+ (priv->proto_version == ALPS_PROTO_V8 &&
+ !alps_is_valid_package_ss4_v2(psmouse))) {
psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
psmouse->pktcnt - 1,
psmouse->packet[psmouse->pktcnt - 1]);
@@ -2130,6 +2323,88 @@ error:
return -1;
}
+static int alps_get_otp_values_ss4_v2(struct psmouse *psmouse,
+ unsigned char index, unsigned char otp[])
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ switch (index) {
+ case 0:
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) ||
+ ps2_command(ps2dev, otp, PSMOUSE_CMD_GETINFO))
+ return -1;
+
+ break;
+
+ case 1:
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) ||
+ ps2_command(ps2dev, otp, PSMOUSE_CMD_GETINFO))
+ return -1;
+
+ break;
+ }
+
+ return 0;
+}
+
+static int alps_update_device_area_ss4_v2(unsigned char otp[][4],
+ struct alps_data *priv)
+{
+ int num_x_electrode;
+ int num_y_electrode;
+ int x_pitch, y_pitch, x_phys, y_phys;
+
+ num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
+ num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+
+ priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+ priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+
+ x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
+ y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+
+ x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */
+ y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */
+
+ priv->x_res = priv->x_max * 10 / x_phys; /* units / mm */
+ priv->y_res = priv->y_max * 10 / y_phys; /* units / mm */
+
+ return 0;
+}
+
+static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
+ struct alps_data *priv)
+{
+ unsigned char is_btnless;
+
+ is_btnless = (otp[1][1] >> 3) & 0x01;
+
+ if (is_btnless)
+ priv->flags |= ALPS_BUTTONPAD;
+
+ return 0;
+}
+
+static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
+ struct alps_data *priv)
+{
+ unsigned char otp[2][4];
+
+ memset(otp, 0, sizeof(otp));
+
+ if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
+ alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
+ return -1;
+
+ alps_update_device_area_ss4_v2(otp, priv);
+
+ alps_update_btn_info_ss4_v2(otp, priv);
+
+ return 0;
+}
+
static int alps_dolphin_get_device_area(struct psmouse *psmouse,
struct alps_data *priv)
{
@@ -2222,6 +2497,35 @@ error:
return ret;
}
+static int alps_hw_init_ss4_v2(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ char param[2] = {0x64, 0x28};
+ int ret = -1;
+
+ /* enter absolute mode */
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) ||
+ ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE)) {
+ goto error;
+ }
+
+ /* T.B.D. Decread noise packet number, delete in the future */
+ if (alps_exit_command_mode(psmouse) ||
+ alps_enter_command_mode(psmouse) ||
+ alps_command_mode_write_reg(psmouse, 0x001D, 0x20)) {
+ goto error;
+ }
+ alps_exit_command_mode(psmouse);
+
+ return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+
+error:
+ alps_exit_command_mode(psmouse);
+ return ret;
+}
+
static int alps_set_protocol(struct psmouse *psmouse,
struct alps_data *priv,
const struct alps_protocol_info *protocol)
@@ -2311,7 +2615,7 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->hw_init = alps_hw_init_v7;
priv->process_packet = alps_process_packet_v7;
priv->decode_fields = alps_decode_packet_v7;
- priv->set_abs_params = alps_set_abs_params_mt;
+ priv->set_abs_params = alps_set_abs_params_v7;
priv->nibble_commands = alps_v3_nibble_commands;
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
priv->x_max = 0xfff;
@@ -2321,6 +2625,19 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->flags |= ALPS_BUTTONPAD;
break;
+
+ case ALPS_PROTO_V8:
+ priv->hw_init = alps_hw_init_ss4_v2;
+ priv->process_packet = alps_process_packet_ss4_v2;
+ priv->decode_fields = alps_decode_ss4_v2;
+ priv->set_abs_params = alps_set_abs_params_ss4_v2;
+ priv->nibble_commands = alps_v3_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+
+ if (alps_set_defaults_ss4_v2(psmouse, priv))
+ return -EIO;
+
+ break;
}
return 0;
@@ -2389,6 +2706,9 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
} else if (ec[0] == 0x88 && ec[1] == 0x07 &&
ec[2] >= 0x90 && ec[2] <= 0x9d) {
protocol = &alps_v3_protocol_data;
+ } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
+ e7[2] == 0x14 && ec[1] == 0x02) {
+ protocol = &alps_v8_protocol_data;
} else {
psmouse_dbg(psmouse,
"Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
@@ -2437,10 +2757,11 @@ static void alps_set_abs_params_st(struct alps_data *priv,
{
input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0);
input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0);
+ input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
}
-static void alps_set_abs_params_mt(struct alps_data *priv,
- struct input_dev *dev1)
+static void alps_set_abs_params_mt_common(struct alps_data *priv,
+ struct input_dev *dev1)
{
input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, priv->x_max, 0, 0);
input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, priv->y_max, 0, 0);
@@ -2448,15 +2769,44 @@ static void alps_set_abs_params_mt(struct alps_data *priv,
input_abs_set_res(dev1, ABS_MT_POSITION_X, priv->x_res);
input_abs_set_res(dev1, ABS_MT_POSITION_Y, priv->y_res);
- input_mt_init_slots(dev1, MAX_TOUCHES, INPUT_MT_POINTER |
- INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK | INPUT_MT_SEMI_MT);
-
set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
+}
+
+static void alps_set_abs_params_mt(struct alps_data *priv,
+ struct input_dev *dev1)
+{
+ alps_set_abs_params_mt_common(priv, dev1);
+ input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
+
+ input_mt_init_slots(dev1, MAX_TOUCHES,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK | INPUT_MT_SEMI_MT);
+}
+
+static void alps_set_abs_params_v7(struct alps_data *priv,
+ struct input_dev *dev1)
+{
+ alps_set_abs_params_mt_common(priv, dev1);
+ set_bit(BTN_TOOL_QUINTTAP, dev1->keybit);
+
+ input_mt_init_slots(dev1, MAX_TOUCHES,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK);
+
+ set_bit(BTN_TOOL_QUINTTAP, dev1->keybit);
+}
+
+static void alps_set_abs_params_ss4_v2(struct alps_data *priv,
+ struct input_dev *dev1)
+{
+ alps_set_abs_params_mt_common(priv, dev1);
+ input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
+ set_bit(BTN_TOOL_QUINTTAP, dev1->keybit);
- /* V7 is real multi-touch */
- if (priv->proto_version == ALPS_PROTO_V7)
- clear_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
+ input_mt_init_slots(dev1, MAX_TOUCHES,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK);
}
int alps_init(struct psmouse *psmouse)
@@ -2489,9 +2839,6 @@ int alps_init(struct psmouse *psmouse)
dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
priv->set_abs_params(priv, dev1);
- /* No pressure on V7 */
- if (priv->proto_version != ALPS_PROTO_V7)
- input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
if (priv->flags & ALPS_WHEEL) {
dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 02513c0502fc..6dfdccc3a7c6 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -22,14 +22,90 @@
#define ALPS_PROTO_V5 0x500
#define ALPS_PROTO_V6 0x600
#define ALPS_PROTO_V7 0x700 /* t3btl t4s */
+#define ALPS_PROTO_V8 0x800 /* SS4btl SS4s */
-#define MAX_TOUCHES 2
+#define MAX_TOUCHES 4
#define DOLPHIN_COUNT_PER_ELECTRODE 64
#define DOLPHIN_PROFILE_XOFFSET 8 /* x-electrode offset */
#define DOLPHIN_PROFILE_YOFFSET 1 /* y-electrode offset */
/*
+ * enum SS4_PACKET_ID - defines the packet type for V8
+ * SS4_PACKET_ID_IDLE: There's no finger and no button activity.
+ * SS4_PACKET_ID_ONE: There's one finger on touchpad
+ * or there's button activities.
+ * SS4_PACKET_ID_TWO: There's two or more fingers on touchpad
+ * SS4_PACKET_ID_MULTI: There's three or more fingers on touchpad
+*/
+enum SS4_PACKET_ID {
+ SS4_PACKET_ID_IDLE = 0,
+ SS4_PACKET_ID_ONE,
+ SS4_PACKET_ID_TWO,
+ SS4_PACKET_ID_MULTI,
+};
+
+#define SS4_COUNT_PER_ELECTRODE 256
+#define SS4_NUMSENSOR_XOFFSET 7
+#define SS4_NUMSENSOR_YOFFSET 7
+#define SS4_MIN_PITCH_MM 50
+
+#define SS4_MASK_NORMAL_BUTTONS 0x07
+
+#define SS4_1F_X_V2(_b) ((_b[0] & 0x0007) | \
+ ((_b[1] << 3) & 0x0078) | \
+ ((_b[1] << 2) & 0x0380) | \
+ ((_b[2] << 5) & 0x1C00) \
+ )
+
+#define SS4_1F_Y_V2(_b) (((_b[2]) & 0x000F) | \
+ ((_b[3] >> 2) & 0x0030) | \
+ ((_b[4] << 6) & 0x03C0) | \
+ ((_b[4] << 5) & 0x0C00) \
+ )
+
+#define SS4_1F_Z_V2(_b) (((_b[5]) & 0x0F) | \
+ ((_b[5] >> 1) & 0x70) | \
+ ((_b[4]) & 0x80) \
+ )
+
+#define SS4_1F_LFB_V2(_b) (((_b[2] >> 4) & 0x01) == 0x01)
+
+#define SS4_MF_LF_V2(_b, _i) ((_b[1 + (_i) * 3] & 0x0004) == 0x0004)
+
+#define SS4_BTN_V2(_b) ((_b[0] >> 5) & SS4_MASK_NORMAL_BUTTONS)
+
+#define SS4_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 5) & 0x00E0) | \
+ ((_b[1 + _i * 3] << 5) & 0x1F00) \
+ )
+
+#define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
+ ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
+ ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
+ )
+
+#define SS4_BTL_MF_X_V2(_b, _i) (SS4_STD_MF_X_V2(_b, _i) | \
+ ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
+ )
+
+#define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
+ ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
+ )
+
+#define SS4_MF_Z_V2(_b, _i) (((_b[1 + (_i) * 3]) & 0x0001) | \
+ ((_b[1 + (_i) * 3] >> 1) & 0x0002) \
+ )
+
+#define SS4_IS_MF_CONTINUE(_b) ((_b[2] & 0x10) == 0x10)
+#define SS4_IS_5F_DETECTED(_b) ((_b[2] & 0x10) == 0x10)
+
+
+#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
+#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */
+#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */
+
+/*
* enum V7_PACKET_ID - defines the packet type for V7
* V7_PACKET_ID_IDLE: There's no finger and no button activity.
* V7_PACKET_ID_TWO: There's one or two non-resting fingers on touchpad
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index 58f4f6fa4857..efe148474e7f 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -723,7 +723,7 @@ static ssize_t cyapa_update_suspend_scanrate(struct device *dev,
} else if (sysfs_streq(buf, OFF_MODE_NAME)) {
cyapa->suspend_power_mode = PWR_MODE_OFF;
} else if (!kstrtou16(buf, 10, &sleep_time)) {
- cyapa->suspend_sleep_time = max_t(u16, sleep_time, 1000);
+ cyapa->suspend_sleep_time = min_t(u16, sleep_time, 1000);
cyapa->suspend_power_mode =
cyapa_sleep_time_to_pwr_cmd(cyapa->suspend_sleep_time);
} else {
@@ -840,7 +840,7 @@ static ssize_t cyapa_update_rt_suspend_scanrate(struct device *dev,
if (error)
return error;
- cyapa->runtime_suspend_sleep_time = max_t(u16, time, 1000);
+ cyapa->runtime_suspend_sleep_time = min_t(u16, time, 1000);
cyapa->runtime_suspend_power_mode =
cyapa_sleep_time_to_pwr_cmd(cyapa->runtime_suspend_sleep_time);
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index e100c1b31597..6d5f8a4c1748 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -17,7 +17,7 @@
*/
#ifndef _ELAN_I2C_H
-#define _ELAN_i2C_H
+#define _ELAN_I2C_H
#include <linux/types.h>
@@ -25,6 +25,7 @@
#define ETP_ENABLE_CALIBRATE 0x0002
#define ETP_DISABLE_CALIBRATE 0x0000
#define ETP_DISABLE_POWER 0x0001
+#define ETP_PRESSURE_OFFSET 25
/* IAP Firmware handling */
#define ETP_FW_NAME "elan_i2c.bin"
@@ -79,6 +80,8 @@ struct elan_transport_ops {
struct completion *reset_done);
int (*get_report)(struct i2c_client *client, u8 *report);
+ int (*get_pressure_adjustment)(struct i2c_client *client,
+ int *adjustment);
};
extern const struct elan_transport_ops elan_smbus_ops, elan_i2c_ops;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 7ce8bfe22d7e..fd5068b2542d 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -4,7 +4,7 @@
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.5.6
+ * Version: 1.5.7
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -40,8 +40,7 @@
#include "elan_i2c.h"
#define DRIVER_NAME "elan_i2c"
-#define ELAN_DRIVER_VERSION "1.5.6"
-#define ETP_PRESSURE_OFFSET 25
+#define ELAN_DRIVER_VERSION "1.5.7"
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
#define ETP_FINGER_WIDTH 15
@@ -53,6 +52,7 @@
#define ETP_REPORT_ID_OFFSET 2
#define ETP_TOUCH_INFO_OFFSET 3
#define ETP_FINGER_DATA_OFFSET 4
+#define ETP_HOVER_INFO_OFFSET 30
#define ETP_MAX_REPORT_LEN 34
/* The main device structure */
@@ -81,7 +81,7 @@ struct elan_tp_data {
u8 sm_version;
u8 iap_version;
u16 fw_checksum;
-
+ int pressure_adjustment;
u8 mode;
bool irq_wake;
@@ -99,7 +99,7 @@ static int elan_enable_power(struct elan_tp_data *data)
error = regulator_enable(data->vcc);
if (error) {
dev_err(&data->client->dev,
- "Failed to enable regulator: %d\n", error);
+ "failed to enable regulator: %d\n", error);
return error;
}
@@ -111,6 +111,7 @@ static int elan_enable_power(struct elan_tp_data *data)
msleep(30);
} while (--repeat > 0);
+ dev_err(&data->client->dev, "failed to enable power: %d\n", error);
return error;
}
@@ -125,7 +126,7 @@ static int elan_disable_power(struct elan_tp_data *data)
error = regulator_disable(data->vcc);
if (error) {
dev_err(&data->client->dev,
- "Failed to disable regulator: %d\n",
+ "failed to disable regulator: %d\n",
error);
/* Attempt to power the chip back up */
data->ops->power_control(data->client, true);
@@ -138,6 +139,7 @@ static int elan_disable_power(struct elan_tp_data *data)
msleep(30);
} while (--repeat > 0);
+ dev_err(&data->client->dev, "failed to disable power: %d\n", error);
return error;
}
@@ -196,7 +198,6 @@ static int elan_initialize(struct elan_tp_data *data)
if (!error)
return 0;
- repeat--;
msleep(30);
} while (--repeat > 0);
@@ -228,6 +229,11 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
+ error = data->ops->get_pressure_adjustment(data->client,
+ &data->pressure_adjustment);
+ if (error)
+ return error;
+
return 0;
}
@@ -720,13 +726,13 @@ static const struct attribute_group *elan_sysfs_groups[] = {
*/
static void elan_report_contact(struct elan_tp_data *data,
int contact_num, bool contact_valid,
- u8 *finger_data)
+ bool hover_event, u8 *finger_data)
{
struct input_dev *input = data->input;
unsigned int pos_x, pos_y;
unsigned int pressure, mk_x, mk_y;
- unsigned int area_x, area_y, major, minor, new_pressure;
-
+ unsigned int area_x, area_y, major, minor;
+ unsigned int scaled_pressure;
if (contact_valid) {
pos_x = ((finger_data[0] & 0xf0) << 4) |
@@ -755,15 +761,18 @@ static void elan_report_contact(struct elan_tp_data *data,
major = max(area_x, area_y);
minor = min(area_x, area_y);
- new_pressure = pressure + ETP_PRESSURE_OFFSET;
- if (new_pressure > ETP_MAX_PRESSURE)
- new_pressure = ETP_MAX_PRESSURE;
+ scaled_pressure = pressure + data->pressure_adjustment;
+
+ if (scaled_pressure > ETP_MAX_PRESSURE)
+ scaled_pressure = ETP_MAX_PRESSURE;
input_mt_slot(input, contact_num);
input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
input_report_abs(input, ABS_MT_POSITION_X, pos_x);
input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
- input_report_abs(input, ABS_MT_PRESSURE, new_pressure);
+ input_report_abs(input, ABS_MT_DISTANCE, hover_event);
+ input_report_abs(input, ABS_MT_PRESSURE,
+ hover_event ? 0 : scaled_pressure);
input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
@@ -779,11 +788,14 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
u8 *finger_data = &packet[ETP_FINGER_DATA_OFFSET];
int i;
u8 tp_info = packet[ETP_TOUCH_INFO_OFFSET];
- bool contact_valid;
+ u8 hover_info = packet[ETP_HOVER_INFO_OFFSET];
+ bool contact_valid, hover_event;
+ hover_event = hover_info & 0x40;
for (i = 0; i < ETP_MAX_FINGERS; i++) {
contact_valid = tp_info & (1U << (3 + i));
- elan_report_contact(data, i, contact_valid, finger_data);
+ elan_report_contact(data, i, contact_valid, hover_event,
+ finger_data);
if (contact_valid)
finger_data += ETP_FINGER_DATA_LEN;
@@ -877,6 +889,7 @@ static int elan_setup_input_device(struct elan_tp_data *data)
ETP_FINGER_WIDTH * max_width, 0, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
ETP_FINGER_WIDTH * min_width, 0, 0);
+ input_set_abs_params(input, ABS_MT_DISTANCE, 0, 1, 0, 0);
data->input = input;
@@ -1084,16 +1097,18 @@ static int __maybe_unused elan_resume(struct device *dev)
}
error = elan_enable_power(data);
- if (error)
+ if (error) {
dev_err(dev, "power up when resuming failed: %d\n", error);
+ goto err;
+ }
error = elan_initialize(data);
if (error)
dev_err(dev, "initialize when resuming failed: %d\n", error);
+err:
enable_irq(data->client->irq);
-
- return 0;
+ return error;
}
static SIMPLE_DEV_PM_OPS(elan_pm_ops, elan_suspend, elan_resume);
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 029941f861af..a0acbbf83bfd 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -41,6 +41,7 @@
#define ETP_I2C_MAX_X_AXIS_CMD 0x0106
#define ETP_I2C_MAX_Y_AXIS_CMD 0x0107
#define ETP_I2C_RESOLUTION_CMD 0x0108
+#define ETP_I2C_PRESSURE_CMD 0x010A
#define ETP_I2C_IAP_VERSION_CMD 0x0110
#define ETP_I2C_SET_CMD 0x0300
#define ETP_I2C_POWER_CMD 0x0307
@@ -117,7 +118,15 @@ static int elan_i2c_write_cmd(struct i2c_client *client, u16 reg, u16 cmd)
int ret;
ret = i2c_transfer(client->adapter, &msg, 1);
- return ret == 1 ? 0 : (ret < 0 ? ret : -EIO);
+ if (ret != 1) {
+ if (ret >= 0)
+ ret = -EIO;
+ dev_err(&client->dev, "writing cmd (0x%04x) failed: %d\n",
+ reg, ret);
+ return ret;
+ }
+
+ return 0;
}
static int elan_i2c_initialize(struct i2c_client *client)
@@ -356,8 +365,29 @@ static int elan_i2c_get_num_traces(struct i2c_client *client,
return error;
}
- *x_traces = val[0] - 1;
- *y_traces = val[1] - 1;
+ *x_traces = val[0];
+ *y_traces = val[1];
+
+ return 0;
+}
+
+static int elan_i2c_get_pressure_adjustment(struct i2c_client *client,
+ int *adjustment)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_PRESSURE_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get pressure format: %d\n",
+ error);
+ return error;
+ }
+
+ if ((val[0] >> 4) & 0x1)
+ *adjustment = 0;
+ else
+ *adjustment = ETP_PRESSURE_OFFSET;
return 0;
}
@@ -594,6 +624,7 @@ const struct elan_transport_ops elan_i2c_ops = {
.get_sm_version = elan_i2c_get_sm_version,
.get_product_id = elan_i2c_get_product_id,
.get_checksum = elan_i2c_get_checksum,
+ .get_pressure_adjustment = elan_i2c_get_pressure_adjustment,
.get_max = elan_i2c_get_max,
.get_resolution = elan_i2c_get_resolution,
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index 06a2bcd1cda2..30ab80dbcdd6 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -268,12 +268,19 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
return error;
}
- *x_traces = val[1] - 1;
- *y_traces = val[2] - 1;
+ *x_traces = val[1];
+ *y_traces = val[2];
return 0;
}
+static int elan_smbus_get_pressure_adjustment(struct i2c_client *client,
+ int *adjustment)
+{
+ *adjustment = ETP_PRESSURE_OFFSET;
+ return 0;
+}
+
static int elan_smbus_iap_get_mode(struct i2c_client *client,
enum tp_mode *mode)
{
@@ -497,6 +504,7 @@ const struct elan_transport_ops elan_smbus_ops = {
.get_sm_version = elan_smbus_get_sm_version,
.get_product_id = elan_smbus_get_product_id,
.get_checksum = elan_smbus_get_checksum,
+ .get_pressure_adjustment = elan_smbus_get_pressure_adjustment,
.get_max = elan_smbus_get_max,
.get_resolution = elan_smbus_get_resolution,
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 6e22682c8255..991dc6b20a58 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
}
/*
+ * This writes the reg_07 value again to the hardware at the end of every
+ * set_rate call because the register loses its value. reg_07 allows setting
+ * absolute mode on v4 hardware
+ */
+static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
+ unsigned int rate)
+{
+ struct elantech_data *etd = psmouse->private;
+
+ etd->original_set_rate(psmouse, rate);
+ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
+ psmouse_err(psmouse, "restoring reg_07 failed\n");
+}
+
+/*
* Put the touchpad into absolute mode
*/
static int elantech_set_absolute_mode(struct psmouse *psmouse)
@@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
* Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
* Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
+ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
+ * Asus X750JN 0x381f17 10, 14, 0e clickpad
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
@@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse)
goto init_fail;
}
+ if (etd->fw_version == 0x381f17) {
+ etd->original_set_rate = psmouse->set_rate;
+ psmouse->set_rate = elantech_set_rate_restore_reg_07;
+ }
+
if (elantech_set_input_params(psmouse)) {
psmouse_err(psmouse, "failed to query touchpad range.\n");
goto init_fail;
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 6f3afec02f03..f965d1569cc3 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -142,6 +142,7 @@ struct elantech_data {
struct finger_pos mt[ETP_MAX_FINGERS];
unsigned char parity[256];
int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
+ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
};
#ifdef CONFIG_MOUSE_PS2_ELANTECH
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index 23222dd5a66f..e5ed216824e9 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -256,8 +256,8 @@ static void lifebook_disconnect(struct psmouse *psmouse)
int lifebook_detect(struct psmouse *psmouse, bool set_properties)
{
- if (!lifebook_present)
- return -1;
+ if (!lifebook_present)
+ return -1;
if (desired_serio_phys &&
strcmp(psmouse->ps2dev.serio->phys, desired_serio_phys))
@@ -268,7 +268,7 @@ int lifebook_detect(struct psmouse *psmouse, bool set_properties)
psmouse->name = "Lifebook TouchScreen";
}
- return 0;
+ return 0;
}
static int lifebook_create_relative_device(struct psmouse *psmouse)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 8bc61237bc1b..5bb1658f60c7 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -36,6 +36,7 @@
#include "sentelic.h"
#include "cypress_ps2.h"
#include "focaltech.h"
+#include "vmmouse.h"
#define DRIVER_DESC "PS/2 mouse driver"
@@ -474,19 +475,45 @@ static int psmouse_poll(struct psmouse *psmouse)
PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
}
+static bool psmouse_check_pnp_id(const char *id, const char * const ids[])
+{
+ int i;
+
+ for (i = 0; ids[i]; i++)
+ if (!strcasecmp(id, ids[i]))
+ return true;
+
+ return false;
+}
+
/*
* psmouse_matches_pnp_id - check if psmouse matches one of the passed in ids.
*/
bool psmouse_matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
{
- int i;
-
- if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
- for (i = 0; ids[i]; i++)
- if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
- return true;
+ struct serio *serio = psmouse->ps2dev.serio;
+ char *p, *fw_id_copy, *save_ptr;
+ bool found = false;
+
+ if (strncmp(serio->firmware_id, "PNP: ", 5))
+ return false;
+
+ fw_id_copy = kstrndup(&serio->firmware_id[5],
+ sizeof(serio->firmware_id) - 5,
+ GFP_KERNEL);
+ if (!fw_id_copy)
+ return false;
+
+ save_ptr = fw_id_copy;
+ while ((p = strsep(&fw_id_copy, " ")) != NULL) {
+ if (psmouse_check_pnp_id(p, ids)) {
+ found = true;
+ break;
+ }
+ }
- return false;
+ kfree(save_ptr);
+ return found;
}
/*
@@ -764,6 +791,13 @@ static int psmouse_extensions(struct psmouse *psmouse,
}
}
+ if (psmouse_do_detect(vmmouse_detect, psmouse, set_properties) == 0) {
+ if (max_proto > PSMOUSE_IMEX) {
+ if (!set_properties || vmmouse_init(psmouse) == 0)
+ return PSMOUSE_VMMOUSE;
+ }
+ }
+
/*
* Try Kensington ThinkingMouse (we try first, because synaptics probe
* upsets the thinkingmouse).
@@ -1087,6 +1121,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.init = focaltech_init,
},
#endif
+#ifdef CONFIG_MOUSE_PS2_VMMOUSE
+ {
+ .type = PSMOUSE_VMMOUSE,
+ .name = VMMOUSE_PSNAME,
+ .alias = "vmmouse",
+ .detect = vmmouse_detect,
+ .init = vmmouse_init,
+ },
+#endif
{
.type = PSMOUSE_AUTO,
.name = "auto",
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index d02e1bdc9ae4..ad5a5a1ea872 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -103,6 +103,7 @@ enum psmouse_type {
PSMOUSE_SYNAPTICS_RELATIVE,
PSMOUSE_CYPRESS,
PSMOUSE_FOCALTECH,
+ PSMOUSE_VMMOUSE,
PSMOUSE_AUTO /* This one should always be last */
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 3b06c8a360b6..630af73e98c4 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -67,6 +67,9 @@
#define X_MAX_POSITIVE 8176
#define Y_MAX_POSITIVE 8176
+/* maximum ABS_MT_POSITION displacement (in mm) */
+#define DMAX 10
+
/*****************************************************************************
* Stuff we need even when we do not want native Synaptics support
****************************************************************************/
@@ -203,6 +206,13 @@ static const char * const topbuttonpad_pnp_ids[] = {
NULL
};
+/* This list has been kindly provided by Synaptics. */
+static const char * const forcepad_pnp_ids[] = {
+ "SYN300D",
+ "SYN3014",
+ NULL
+};
+
/*****************************************************************************
* Synaptics communications functions
****************************************************************************/
@@ -687,8 +697,6 @@ static void synaptics_parse_ext_buttons(const unsigned char buf[],
hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits;
}
-static bool is_forcepad;
-
static int synaptics_parse_hw_state(const unsigned char buf[],
struct synaptics_data *priv,
struct synaptics_hw_state *hw)
@@ -718,7 +726,7 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
hw->left = (buf[0] & 0x01) ? 1 : 0;
hw->right = (buf[0] & 0x02) ? 1 : 0;
- if (is_forcepad) {
+ if (priv->is_forcepad) {
/*
* ForcePads, like Clickpads, use middle button
* bits to report primary button clicks.
@@ -917,7 +925,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
pos[i].y = synaptics_invert_y(hw[i]->y);
}
- input_mt_assign_slots(dev, slot, pos, nsemi, 0);
+ input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res);
for (i = 0; i < nsemi; i++) {
input_mt_slot(dev, slot[i]);
@@ -1186,7 +1194,7 @@ static void set_input_params(struct psmouse *psmouse,
ABS_MT_POSITION_Y);
/* Image sensors can report per-contact pressure */
input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
- input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
+ input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK);
/* Image sensors can signal 4 and 5 finger clicks */
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
@@ -1418,29 +1426,11 @@ static const struct dmi_system_id __initconst cr48_dmi_table[] = {
{ }
};
-static const struct dmi_system_id forcepad_dmi_table[] __initconst = {
-#if defined(CONFIG_DMI) && defined(CONFIG_X86)
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Folio 1040 G1"),
- },
- },
-#endif
- { }
-};
-
void __init synaptics_module_init(void)
{
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
cr48_profile_sensor = dmi_check_system(cr48_dmi_table);
-
- /*
- * Unfortunately ForcePad capability is not exported over PS/2,
- * so we have to resort to checking DMI.
- */
- is_forcepad = dmi_check_system(forcepad_dmi_table);
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
@@ -1475,6 +1465,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
if (SYN_ID_DISGEST_SUPPORTED(priv->identity))
priv->disable_gesture = true;
+ /*
+ * Unfortunately ForcePad capability is not exported over PS/2,
+ * so we have to resort to checking PNP IDs.
+ */
+ priv->is_forcepad = psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids);
+
if (synaptics_set_mode(psmouse)) {
psmouse_err(psmouse, "Unable to initialize device.\n");
goto init_fail;
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index ee4bd0d12b26..56faa7ec4434 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -196,6 +196,7 @@ struct synaptics_data {
unsigned long press_start;
bool press;
bool report_press;
+ bool is_forcepad;
};
void synaptics_module_init(void);
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
new file mode 100644
index 000000000000..e272f06258ce
--- /dev/null
+++ b/drivers/input/mouse/vmmouse.c
@@ -0,0 +1,508 @@
+/*
+ * Driver for Virtual PS/2 Mouse on VMware and QEMU hypervisors.
+ *
+ * Copyright (C) 2014, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Twin device code is hugely inspired by the ALPS driver.
+ * Authors:
+ * Dmitry Torokhov <dmitry.torokhov@gmail.com>
+ * Thomas Hellstrom <thellstrom@vmware.com>
+ */
+
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/libps2.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/hypervisor.h>
+
+#include "psmouse.h"
+#include "vmmouse.h"
+
+#define VMMOUSE_PROTO_MAGIC 0x564D5868U
+#define VMMOUSE_PROTO_PORT 0x5658
+
+/*
+ * Main commands supported by the vmmouse hypervisor port.
+ */
+#define VMMOUSE_PROTO_CMD_GETVERSION 10
+#define VMMOUSE_PROTO_CMD_ABSPOINTER_DATA 39
+#define VMMOUSE_PROTO_CMD_ABSPOINTER_STATUS 40
+#define VMMOUSE_PROTO_CMD_ABSPOINTER_COMMAND 41
+#define VMMOUSE_PROTO_CMD_ABSPOINTER_RESTRICT 86
+
+/*
+ * Subcommands for VMMOUSE_PROTO_CMD_ABSPOINTER_COMMAND
+ */
+#define VMMOUSE_CMD_ENABLE 0x45414552U
+#define VMMOUSE_CMD_DISABLE 0x000000f5U
+#define VMMOUSE_CMD_REQUEST_RELATIVE 0x4c455252U
+#define VMMOUSE_CMD_REQUEST_ABSOLUTE 0x53424152U
+
+#define VMMOUSE_ERROR 0xffff0000U
+
+#define VMMOUSE_VERSION_ID 0x3442554aU
+
+#define VMMOUSE_RELATIVE_PACKET 0x00010000U
+
+#define VMMOUSE_LEFT_BUTTON 0x20
+#define VMMOUSE_RIGHT_BUTTON 0x10
+#define VMMOUSE_MIDDLE_BUTTON 0x08
+
+/*
+ * VMMouse Restrict command
+ */
+#define VMMOUSE_RESTRICT_ANY 0x00
+#define VMMOUSE_RESTRICT_CPL0 0x01
+#define VMMOUSE_RESTRICT_IOPL 0x02
+
+#define VMMOUSE_MAX_X 0xFFFF
+#define VMMOUSE_MAX_Y 0xFFFF
+
+#define VMMOUSE_VENDOR "VMware"
+#define VMMOUSE_NAME "VMMouse"
+
+/**
+ * struct vmmouse_data - private data structure for the vmmouse driver
+ *
+ * @abs_dev: "Absolute" device used to report absolute mouse movement.
+ * @phys: Physical path for the absolute device.
+ * @dev_name: Name attribute name for the absolute device.
+ */
+struct vmmouse_data {
+ struct input_dev *abs_dev;
+ char phys[32];
+ char dev_name[128];
+};
+
+/**
+ * Hypervisor-specific bi-directional communication channel
+ * implementing the vmmouse protocol. Should never execute on
+ * bare metal hardware.
+ */
+#define VMMOUSE_CMD(cmd, in1, out1, out2, out3, out4) \
+({ \
+ unsigned long __dummy1, __dummy2; \
+ __asm__ __volatile__ ("inl %%dx" : \
+ "=a"(out1), \
+ "=b"(out2), \
+ "=c"(out3), \
+ "=d"(out4), \
+ "=S"(__dummy1), \
+ "=D"(__dummy2) : \
+ "a"(VMMOUSE_PROTO_MAGIC), \
+ "b"(in1), \
+ "c"(VMMOUSE_PROTO_CMD_##cmd), \
+ "d"(VMMOUSE_PROTO_PORT) : \
+ "memory"); \
+})
+
+/**
+ * vmmouse_report_button - report button state on the correct input device
+ *
+ * @psmouse: Pointer to the psmouse struct
+ * @abs_dev: The absolute input device
+ * @rel_dev: The relative input device
+ * @pref_dev: The preferred device for reporting
+ * @code: Button code
+ * @value: Button value
+ *
+ * Report @value and @code on @pref_dev, unless the button is already
+ * pressed on the other device, in which case the state is reported on that
+ * device.
+ */
+static void vmmouse_report_button(struct psmouse *psmouse,
+ struct input_dev *abs_dev,
+ struct input_dev *rel_dev,
+ struct input_dev *pref_dev,
+ unsigned int code, int value)
+{
+ if (test_bit(code, abs_dev->key))
+ pref_dev = abs_dev;
+ else if (test_bit(code, rel_dev->key))
+ pref_dev = rel_dev;
+
+ input_report_key(pref_dev, code, value);
+}
+
+/**
+ * vmmouse_report_events - process events on the vmmouse communications channel
+ *
+ * @psmouse: Pointer to the psmouse struct
+ *
+ * This function pulls events from the vmmouse communications channel and
+ * reports them on the correct (absolute or relative) input device. When the
+ * communications channel is drained, or if we've processed more than 255
+ * psmouse commands, the function returns PSMOUSE_FULL_PACKET. If there is a
+ * host- or synchronization error, the function returns PSMOUSE_BAD_DATA in
+ * the hope that the caller will reset the communications channel.
+ */
+static psmouse_ret_t vmmouse_report_events(struct psmouse *psmouse)
+{
+ struct input_dev *rel_dev = psmouse->dev;
+ struct vmmouse_data *priv = psmouse->private;
+ struct input_dev *abs_dev = priv->abs_dev;
+ struct input_dev *pref_dev;
+ u32 status, x, y, z;
+ u32 dummy1, dummy2, dummy3;
+ unsigned int queue_length;
+ unsigned int count = 255;
+
+ while (count--) {
+ /* See if we have motion data. */
+ VMMOUSE_CMD(ABSPOINTER_STATUS, 0,
+ status, dummy1, dummy2, dummy3);
+ if ((status & VMMOUSE_ERROR) == VMMOUSE_ERROR) {
+ psmouse_err(psmouse, "failed to fetch status data\n");
+ /*
+ * After a few attempts this will result in
+ * reconnect.
+ */
+ return PSMOUSE_BAD_DATA;
+ }
+
+ queue_length = status & 0xffff;
+ if (queue_length == 0)
+ break;
+
+ if (queue_length % 4) {
+ psmouse_err(psmouse, "invalid queue length\n");
+ return PSMOUSE_BAD_DATA;
+ }
+
+ /* Now get it */
+ VMMOUSE_CMD(ABSPOINTER_DATA, 4, status, x, y, z);
+
+ /*
+ * And report what we've got. Prefer to report button
+ * events on the same device where we report motion events.
+ * This doesn't work well with the mouse wheel, though. See
+ * below. Ideally we would want to report that on the
+ * preferred device as well.
+ */
+ if (status & VMMOUSE_RELATIVE_PACKET) {
+ pref_dev = rel_dev;
+ input_report_rel(rel_dev, REL_X, (s32)x);
+ input_report_rel(rel_dev, REL_Y, -(s32)y);
+ } else {
+ pref_dev = abs_dev;
+ input_report_abs(abs_dev, ABS_X, x);
+ input_report_abs(abs_dev, ABS_Y, y);
+ }
+
+ /* Xorg seems to ignore wheel events on absolute devices */
+ input_report_rel(rel_dev, REL_WHEEL, -(s8)((u8) z));
+
+ vmmouse_report_button(psmouse, abs_dev, rel_dev,
+ pref_dev, BTN_LEFT,
+ status & VMMOUSE_LEFT_BUTTON);
+ vmmouse_report_button(psmouse, abs_dev, rel_dev,
+ pref_dev, BTN_RIGHT,
+ status & VMMOUSE_RIGHT_BUTTON);
+ vmmouse_report_button(psmouse, abs_dev, rel_dev,
+ pref_dev, BTN_MIDDLE,
+ status & VMMOUSE_MIDDLE_BUTTON);
+ input_sync(abs_dev);
+ input_sync(rel_dev);
+ }
+
+ return PSMOUSE_FULL_PACKET;
+}
+
+/**
+ * vmmouse_process_byte - process data on the ps/2 channel
+ *
+ * @psmouse: Pointer to the psmouse struct
+ *
+ * When the ps/2 channel indicates that there is vmmouse data available,
+ * call vmmouse channel processing. Otherwise, continue to accept bytes. If
+ * there is a synchronization or communication data error, return
+ * PSMOUSE_BAD_DATA in the hope that the caller will reset the mouse.
+ */
+static psmouse_ret_t vmmouse_process_byte(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+
+ switch (psmouse->pktcnt) {
+ case 1:
+ return (packet[0] & 0x8) == 0x8 ?
+ PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA;
+
+ case 2:
+ return PSMOUSE_GOOD_DATA;
+
+ default:
+ return vmmouse_report_events(psmouse);
+ }
+}
+
+/**
+ * vmmouse_disable - Disable vmmouse
+ *
+ * @psmouse: Pointer to the psmouse struct
+ *
+ * Tries to disable vmmouse mode.
+ */
+static void vmmouse_disable(struct psmouse *psmouse)
+{
+ u32 status;
+ u32 dummy1, dummy2, dummy3, dummy4;
+
+ VMMOUSE_CMD(ABSPOINTER_COMMAND, VMMOUSE_CMD_DISABLE,
+ dummy1, dummy2, dummy3, dummy4);
+
+ VMMOUSE_CMD(ABSPOINTER_STATUS, 0,
+ status, dummy1, dummy2, dummy3);
+
+ if ((status & VMMOUSE_ERROR) != VMMOUSE_ERROR)
+ psmouse_warn(psmouse, "failed to disable vmmouse device\n");
+}
+
+/**
+ * vmmouse_enable - Enable vmmouse and request absolute mode.
+ *
+ * @psmouse: Pointer to the psmouse struct
+ *
+ * Tries to enable vmmouse mode. Performs basic checks and requests
+ * absolute vmmouse mode.
+ * Returns 0 on success, -ENODEV on failure.
+ */
+static int vmmouse_enable(struct psmouse *psmouse)
+{
+ u32 status, version;
+ u32 dummy1, dummy2, dummy3, dummy4;
+
+ /*
+ * Try enabling the device. If successful, we should be able to
+ * read valid version ID back from it.
+ */
+ VMMOUSE_CMD(ABSPOINTER_COMMAND, VMMOUSE_CMD_ENABLE,
+ dummy1, dummy2, dummy3, dummy4);
+
+ /*
+ * See if version ID can be retrieved.
+ */
+ VMMOUSE_CMD(ABSPOINTER_STATUS, 0, status, dummy1, dummy2, dummy3);
+ if ((status & 0x0000ffff) == 0) {
+ psmouse_dbg(psmouse, "empty flags - assuming no device\n");
+ return -ENXIO;
+ }
+
+ VMMOUSE_CMD(ABSPOINTER_DATA, 1 /* single item */,
+ version, dummy1, dummy2, dummy3);
+ if (version != VMMOUSE_VERSION_ID) {
+ psmouse_dbg(psmouse, "Unexpected version value: %u vs %u\n",
+ (unsigned) version, VMMOUSE_VERSION_ID);
+ vmmouse_disable(psmouse);
+ return -ENXIO;
+ }
+
+ /*
+ * Restrict ioport access, if possible.
+ */
+ VMMOUSE_CMD(ABSPOINTER_RESTRICT, VMMOUSE_RESTRICT_CPL0,
+ dummy1, dummy2, dummy3, dummy4);
+
+ VMMOUSE_CMD(ABSPOINTER_COMMAND, VMMOUSE_CMD_REQUEST_ABSOLUTE,
+ dummy1, dummy2, dummy3, dummy4);
+
+ return 0;
+}
+
+/*
+ * Array of supported hypervisors.
+ */
+static const struct hypervisor_x86 *vmmouse_supported_hypervisors[] = {
+ &x86_hyper_vmware,
+#ifdef CONFIG_KVM_GUEST
+ &x86_hyper_kvm,
+#endif
+};
+
+/**
+ * vmmouse_check_hypervisor - Check if we're running on a supported hypervisor
+ */
+static bool vmmouse_check_hypervisor(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++)
+ if (vmmouse_supported_hypervisors[i] == x86_hyper)
+ return true;
+
+ return false;
+}
+
+/**
+ * vmmouse_detect - Probe whether vmmouse is available
+ *
+ * @psmouse: Pointer to the psmouse struct
+ * @set_properties: Whether to set psmouse name and vendor
+ *
+ * Returns 0 if vmmouse channel is available. Negative error code if not.
+ */
+int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
+{
+ u32 response, version, dummy1, dummy2;
+
+ if (!vmmouse_check_hypervisor()) {
+ psmouse_dbg(psmouse,
+ "VMMouse not running on supported hypervisor.\n");
+ return -ENXIO;
+ }
+
+ if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
+ psmouse_dbg(psmouse, "VMMouse port in use.\n");
+ return -EBUSY;
+ }
+
+ /* Check if the device is present */
+ response = ~VMMOUSE_PROTO_MAGIC;
+ VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
+ if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
+ release_region(VMMOUSE_PROTO_PORT, 4);
+ return -ENXIO;
+ }
+
+ if (set_properties) {
+ psmouse->vendor = VMMOUSE_VENDOR;
+ psmouse->name = VMMOUSE_NAME;
+ psmouse->model = version;
+ }
+
+ release_region(VMMOUSE_PROTO_PORT, 4);
+
+ return 0;
+}
+
+/**
+ * vmmouse_disconnect - Take down vmmouse driver
+ *
+ * @psmouse: Pointer to the psmouse struct
+ *
+ * Takes down vmmouse driver and frees resources set up in vmmouse_init().
+ */
+static void vmmouse_disconnect(struct psmouse *psmouse)
+{
+ struct vmmouse_data *priv = psmouse->private;
+
+ vmmouse_disable(psmouse);
+ psmouse_reset(psmouse);
+ input_unregister_device(priv->abs_dev);
+ kfree(priv);
+ release_region(VMMOUSE_PROTO_PORT, 4);
+}
+
+/**
+ * vmmouse_reconnect - Reset the ps/2 - and vmmouse connections
+ *
+ * @psmouse: Pointer to the psmouse struct
+ *
+ * Attempts to reset the mouse connections. Returns 0 on success and
+ * -1 on failure.
+ */
+static int vmmouse_reconnect(struct psmouse *psmouse)
+{
+ int error;
+
+ psmouse_reset(psmouse);
+ vmmouse_disable(psmouse);
+ error = vmmouse_enable(psmouse);
+ if (error) {
+ psmouse_err(psmouse,
+ "Unable to re-enable mouse when reconnecting, err: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * vmmouse_init - Initialize the vmmouse driver
+ *
+ * @psmouse: Pointer to the psmouse struct
+ *
+ * Requests the device and tries to enable vmmouse mode.
+ * If successful, sets up the input device for relative movement events.
+ * It also allocates another input device and sets it up for absolute motion
+ * events. Returns 0 on success and -1 on failure.
+ */
+int vmmouse_init(struct psmouse *psmouse)
+{
+ struct vmmouse_data *priv;
+ struct input_dev *rel_dev = psmouse->dev, *abs_dev;
+ int error;
+
+ if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
+ psmouse_dbg(psmouse, "VMMouse port in use.\n");
+ return -EBUSY;
+ }
+
+ psmouse_reset(psmouse);
+ error = vmmouse_enable(psmouse);
+ if (error)
+ goto release_region;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ abs_dev = input_allocate_device();
+ if (!priv || !abs_dev) {
+ error = -ENOMEM;
+ goto init_fail;
+ }
+
+ priv->abs_dev = abs_dev;
+ psmouse->private = priv;
+
+ input_set_capability(rel_dev, EV_REL, REL_WHEEL);
+
+ /* Set up and register absolute device */
+ snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
+ psmouse->ps2dev.serio->phys);
+
+ /* Mimic name setup for relative device in psmouse-base.c */
+ snprintf(priv->dev_name, sizeof(priv->dev_name), "%s %s %s",
+ VMMOUSE_PSNAME, VMMOUSE_VENDOR, VMMOUSE_NAME);
+ abs_dev->phys = priv->phys;
+ abs_dev->name = priv->dev_name;
+ abs_dev->id.bustype = BUS_I8042;
+ abs_dev->id.vendor = 0x0002;
+ abs_dev->id.product = PSMOUSE_VMMOUSE;
+ abs_dev->id.version = psmouse->model;
+ abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
+
+ error = input_register_device(priv->abs_dev);
+ if (error)
+ goto init_fail;
+
+ /* Set absolute device capabilities */
+ input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
+ input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
+ input_set_capability(abs_dev, EV_KEY, BTN_MIDDLE);
+ input_set_capability(abs_dev, EV_ABS, ABS_X);
+ input_set_capability(abs_dev, EV_ABS, ABS_Y);
+ input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
+ input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
+
+ psmouse->protocol_handler = vmmouse_process_byte;
+ psmouse->disconnect = vmmouse_disconnect;
+ psmouse->reconnect = vmmouse_reconnect;
+
+ return 0;
+
+init_fail:
+ vmmouse_disable(psmouse);
+ psmouse_reset(psmouse);
+ input_free_device(abs_dev);
+ kfree(priv);
+ psmouse->private = NULL;
+
+release_region:
+ release_region(VMMOUSE_PROTO_PORT, 4);
+
+ return error;
+}
diff --git a/drivers/input/mouse/vmmouse.h b/drivers/input/mouse/vmmouse.h
new file mode 100644
index 000000000000..6f126017a24c
--- /dev/null
+++ b/drivers/input/mouse/vmmouse.h
@@ -0,0 +1,30 @@
+/*
+ * Driver for Virtual PS/2 Mouse on VMware and QEMU hypervisors.
+ *
+ * Copyright (C) 2014, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _VMMOUSE_H
+#define _VMMOUSE_H
+
+#ifdef CONFIG_MOUSE_PS2_VMMOUSE
+#define VMMOUSE_PSNAME "VirtualPS/2"
+
+int vmmouse_detect(struct psmouse *psmouse, bool set_properties);
+int vmmouse_init(struct psmouse *psmouse);
+#else
+static inline int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
+{
+ return -ENOSYS;
+}
+static inline int vmmouse_init(struct psmouse *psmouse)
+{
+ return -ENOSYS;
+}
+#endif
+
+#endif
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 94ab494a6ade..ecba666afadb 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -31,7 +31,6 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/ioport.h>
-#include <linux/pci_ids.h>
#include <asm/irq.h>
#include <asm/io.h>
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 986a71c614b0..cb5ece77fd7d 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1162,13 +1162,32 @@ static int i8042_controller_resume(bool force_reset)
static int i8042_pm_suspend(struct device *dev)
{
+ int i;
+
i8042_controller_reset(true);
+ /* Set up serio interrupts for system wakeup. */
+ for (i = 0; i < I8042_NUM_PORTS; i++) {
+ struct serio *serio = i8042_ports[i].serio;
+
+ if (serio && device_may_wakeup(&serio->dev))
+ enable_irq_wake(i8042_ports[i].irq);
+ }
+
return 0;
}
static int i8042_pm_resume(struct device *dev)
{
+ int i;
+
+ for (i = 0; i < I8042_NUM_PORTS; i++) {
+ struct serio *serio = i8042_ports[i].serio;
+
+ if (serio && device_may_wakeup(&serio->dev))
+ disable_irq_wake(i8042_ports[i].irq);
+ }
+
/*
* On resume from S2R we always try to reset the controller
* to bring it in a sane state. (In case of S2D we expect
@@ -1300,13 +1319,16 @@ static void __init i8042_register_ports(void)
int i;
for (i = 0; i < I8042_NUM_PORTS; i++) {
- if (i8042_ports[i].serio) {
+ struct serio *serio = i8042_ports[i].serio;
+
+ if (serio) {
printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
- i8042_ports[i].serio->name,
+ serio->name,
(unsigned long) I8042_DATA_REG,
(unsigned long) I8042_COMMAND_REG,
i8042_ports[i].irq);
- serio_register_port(i8042_ports[i].serio);
+ serio_register_port(serio);
+ device_set_wakeup_capable(&serio->dev, true);
}
}
}
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 6261fd6d7c3c..80f6386709bf 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -140,6 +140,19 @@ config TOUCHSCREEN_BU21013
To compile this driver as a module, choose M here: the
module will be called bu21013_ts.
+config TOUCHSCREEN_CHIPONE_ICN8318
+ tristate "chipone icn8318 touchscreen controller"
+ depends on GPIOLIB
+ depends on I2C
+ depends on OF
+ help
+ Say Y here if you have a ChipOne icn8318 based I2C touchscreen.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called chipone_icn8318.
+
config TOUCHSCREEN_CY8CTMG110
tristate "cy8ctmg110 touchscreen"
depends on I2C
@@ -297,11 +310,12 @@ config TOUCHSCREEN_FUJITSU
config TOUCHSCREEN_GOODIX
tristate "Goodix I2C touchscreen"
- depends on I2C && ACPI
+ depends on I2C
help
Say Y here if you have the Goodix touchscreen (such as one
installed in Onda v975w tablets) connected to your
- system.
+ system. It also supports 5-finger chip models, which can be
+ found on ARM tablets, like Wexler TAB7200 and MSI Primo73.
If unsure, say N.
@@ -323,6 +337,18 @@ config TOUCHSCREEN_ILI210X
To compile this driver as a module, choose M here: the
module will be called ili210x.
+config TOUCHSCREEN_IPROC
+ tristate "IPROC touch panel driver support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ help
+ Say Y here if you want to add support for the IPROC touch
+ controller to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm_iproc_tsc.
+
config TOUCHSCREEN_S3C2410
tristate "Samsung S3C2410/generic touchscreen input driver"
depends on ARCH_S3C24XX || SAMSUNG_DEV_TS
@@ -954,7 +980,9 @@ config TOUCHSCREEN_SUN4I
config TOUCHSCREEN_SUR40
tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
depends on USB
+ depends on MEDIA_USB_SUPPORT
select INPUT_POLLDEV
+ select VIDEOBUF2_DMA_SG
help
Say Y here if you want support for the Samsung SUR40 touchscreen
(also known as Microsoft Surface 2.0 or Microsoft PixelSense).
@@ -962,6 +990,17 @@ config TOUCHSCREEN_SUR40
To compile this driver as a module, choose M here: the
module will be called sur40.
+config TOUCHSCREEN_SX8654
+ tristate "Semtech SX8654 touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a Semtech SX8654 touchscreen controller.
+
+ If unsure, say N
+
+ To compile this driver as a module, choose M here: the
+ module will be called sx8654.
+
config TOUCHSCREEN_TPS6507X
tristate "TPS6507x based touchscreens"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 0242fea2102a..44deea743d02 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_TOUCHSCREEN_AR1021_I2C) += ar1021_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
+obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318) += chipone_icn8318.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o cyttsp_i2c_common.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
+obj-$(CONFIG_TOUCHSCREEN_IPROC) += bcm_iproc_tsc.o
obj-$(CONFIG_TOUCHSCREEN_LPC32XX) += lpc32xx_ts.o
obj-$(CONFIG_TOUCHSCREEN_MAX11801) += max11801_ts.o
obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
@@ -79,5 +81,6 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
+obj-$(CONFIG_TOUCHSCREEN_SX8654) += sx8654.o
obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index ba30578e296e..f0b954d46a25 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -157,7 +157,7 @@ static const struct i2c_device_id ar1021_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ar1021_i2c_id);
-static struct of_device_id ar1021_i2c_of_match[] = {
+static const struct of_device_id ar1021_i2c_of_match[] = {
{ .compatible = "microchip,ar1021-i2c", },
{ }
};
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 95ee92a91bd2..40b98dda8f38 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -14,6 +14,8 @@
*
*/
+#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/completion.h>
@@ -25,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
/* Version */
#define MXT_VER_20 20
@@ -79,6 +82,7 @@
#define MXT_SPT_DIGITIZER_T43 43
#define MXT_SPT_MESSAGECOUNT_T44 44
#define MXT_SPT_CTECONFIG_T46 46
+#define MXT_TOUCH_MULTITOUCHSCREEN_T100 100
/* MXT_GEN_MESSAGE_T5 object */
#define MXT_RPTID_NOMSG 0xff
@@ -185,6 +189,36 @@ struct t9_range {
#define MXT_RESET_VALUE 0x01
#define MXT_BACKUP_VALUE 0x55
+/* T100 Multiple Touch Touchscreen */
+#define MXT_T100_CTRL 0
+#define MXT_T100_CFG1 1
+#define MXT_T100_TCHAUX 3
+#define MXT_T100_XRANGE 13
+#define MXT_T100_YRANGE 24
+
+#define MXT_T100_CFG_SWITCHXY BIT(5)
+
+#define MXT_T100_TCHAUX_VECT BIT(0)
+#define MXT_T100_TCHAUX_AMPL BIT(1)
+#define MXT_T100_TCHAUX_AREA BIT(2)
+
+#define MXT_T100_DETECT BIT(7)
+#define MXT_T100_TYPE_MASK 0x70
+
+enum t100_type {
+ MXT_T100_TYPE_FINGER = 1,
+ MXT_T100_TYPE_PASSIVE_STYLUS = 2,
+ MXT_T100_TYPE_HOVERING_FINGER = 4,
+ MXT_T100_TYPE_GLOVE = 5,
+ MXT_T100_TYPE_LARGE_TOUCH = 6,
+};
+
+#define MXT_DISTANCE_ACTIVE_TOUCH 0
+#define MXT_DISTANCE_HOVERING 1
+
+#define MXT_TOUCH_MAJOR_DEFAULT 1
+#define MXT_PRESSURE_DEFAULT 1
+
/* Delay times */
#define MXT_BACKUP_TIME 50 /* msec */
#define MXT_RESET_TIME 200 /* msec */
@@ -244,6 +278,9 @@ struct mxt_data {
unsigned int max_y;
bool in_bootloader;
u16 mem_size;
+ u8 t100_aux_ampl;
+ u8 t100_aux_area;
+ u8 t100_aux_vect;
u8 max_reportid;
u32 config_crc;
u32 info_crc;
@@ -253,6 +290,7 @@ struct mxt_data {
bool update_input;
u8 last_message_count;
u8 num_touchids;
+ u8 multitouch;
/* Cached parameters from object table */
u16 T5_address;
@@ -264,6 +302,8 @@ struct mxt_data {
u8 T9_reportid_max;
u8 T19_reportid;
u16 T44_address;
+ u8 T100_reportid_min;
+ u8 T100_reportid_max;
/* for fw update in bootloader */
struct completion bl_completion;
@@ -771,6 +811,114 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
data->update_input = true;
}
+static void mxt_proc_t100_message(struct mxt_data *data, u8 *message)
+{
+ struct device *dev = &data->client->dev;
+ struct input_dev *input_dev = data->input_dev;
+ int id;
+ u8 status;
+ u8 type = 0;
+ u16 x;
+ u16 y;
+ int distance = 0;
+ int tool = 0;
+ u8 major = 0;
+ u8 pressure = 0;
+ u8 orientation = 0;
+
+ id = message[0] - data->T100_reportid_min - 2;
+
+ /* ignore SCRSTATUS events */
+ if (id < 0)
+ return;
+
+ status = message[1];
+ x = get_unaligned_le16(&message[2]);
+ y = get_unaligned_le16(&message[4]);
+
+ if (status & MXT_T100_DETECT) {
+ type = (status & MXT_T100_TYPE_MASK) >> 4;
+
+ switch (type) {
+ case MXT_T100_TYPE_HOVERING_FINGER:
+ tool = MT_TOOL_FINGER;
+ distance = MXT_DISTANCE_HOVERING;
+
+ if (data->t100_aux_vect)
+ orientation = message[data->t100_aux_vect];
+
+ break;
+
+ case MXT_T100_TYPE_FINGER:
+ case MXT_T100_TYPE_GLOVE:
+ tool = MT_TOOL_FINGER;
+ distance = MXT_DISTANCE_ACTIVE_TOUCH;
+
+ if (data->t100_aux_area)
+ major = message[data->t100_aux_area];
+
+ if (data->t100_aux_ampl)
+ pressure = message[data->t100_aux_ampl];
+
+ if (data->t100_aux_vect)
+ orientation = message[data->t100_aux_vect];
+
+ break;
+
+ case MXT_T100_TYPE_PASSIVE_STYLUS:
+ tool = MT_TOOL_PEN;
+
+ /*
+ * Passive stylus is reported with size zero so
+ * hardcode.
+ */
+ major = MXT_TOUCH_MAJOR_DEFAULT;
+
+ if (data->t100_aux_ampl)
+ pressure = message[data->t100_aux_ampl];
+
+ break;
+
+ case MXT_T100_TYPE_LARGE_TOUCH:
+ /* Ignore suppressed touch */
+ break;
+
+ default:
+ dev_dbg(dev, "Unexpected T100 type\n");
+ return;
+ }
+ }
+
+ /*
+ * Values reported should be non-zero if tool is touching the
+ * device
+ */
+ if (!pressure && type != MXT_T100_TYPE_HOVERING_FINGER)
+ pressure = MXT_PRESSURE_DEFAULT;
+
+ input_mt_slot(input_dev, id);
+
+ if (status & MXT_T100_DETECT) {
+ dev_dbg(dev, "[%u] type:%u x:%u y:%u a:%02X p:%02X v:%02X\n",
+ id, type, x, y, major, pressure, orientation);
+
+ input_mt_report_slot_state(input_dev, tool, 1);
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, major);
+ input_report_abs(input_dev, ABS_MT_PRESSURE, pressure);
+ input_report_abs(input_dev, ABS_MT_DISTANCE, distance);
+ input_report_abs(input_dev, ABS_MT_ORIENTATION, orientation);
+ } else {
+ dev_dbg(dev, "[%u] release\n", id);
+
+ /* close out slot */
+ input_mt_report_slot_state(input_dev, 0, 0);
+ }
+
+ data->update_input = true;
+}
+
static int mxt_proc_message(struct mxt_data *data, u8 *message)
{
u8 report_id = message[0];
@@ -786,9 +934,12 @@ static int mxt_proc_message(struct mxt_data *data, u8 *message)
* is not yet registered.
*/
mxt_dump_message(data, message);
- } else if (report_id >= data->T9_reportid_min
- && report_id <= data->T9_reportid_max) {
+ } else if (report_id >= data->T9_reportid_min &&
+ report_id <= data->T9_reportid_max) {
mxt_proc_t9_message(data, message);
+ } else if (report_id >= data->T100_reportid_min &&
+ report_id <= data->T100_reportid_max) {
+ mxt_proc_t100_message(data, message);
} else if (report_id == data->T19_reportid) {
mxt_input_button(data, message);
data->update_input = true;
@@ -1411,6 +1562,8 @@ static void mxt_free_object_table(struct mxt_data *data)
data->T9_reportid_max = 0;
data->T19_reportid = 0;
data->T44_address = 0;
+ data->T100_reportid_min = 0;
+ data->T100_reportid_max = 0;
data->max_reportid = 0;
}
@@ -1487,6 +1640,7 @@ static int mxt_get_object_table(struct mxt_data *data)
data->T7_address = object->start_address;
break;
case MXT_TOUCH_MULTI_T9:
+ data->multitouch = MXT_TOUCH_MULTI_T9;
data->T9_reportid_min = min_id;
data->T9_reportid_max = max_id;
data->num_touchids = object->num_report_ids
@@ -1498,6 +1652,13 @@ static int mxt_get_object_table(struct mxt_data *data)
case MXT_SPT_GPIOPWM_T19:
data->T19_reportid = min_id;
break;
+ case MXT_TOUCH_MULTITOUCHSCREEN_T100:
+ data->multitouch = MXT_TOUCH_MULTITOUCHSCREEN_T100;
+ data->T100_reportid_min = min_id;
+ data->T100_reportid_max = max_id;
+ /* first two report IDs reserved */
+ data->num_touchids = object->num_report_ids - 2;
+ break;
}
end_address = object->start_address
@@ -1582,22 +1743,138 @@ static int mxt_read_t9_resolution(struct mxt_data *data)
return 0;
}
+static int mxt_read_t100_config(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ struct mxt_object *object;
+ u16 range_x, range_y;
+ u8 cfg, tchaux;
+ u8 aux;
+
+ object = mxt_get_object(data, MXT_TOUCH_MULTITOUCHSCREEN_T100);
+ if (!object)
+ return -EINVAL;
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_XRANGE,
+ sizeof(range_x), &range_x);
+ if (error)
+ return error;
+
+ le16_to_cpus(&range_x);
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_YRANGE,
+ sizeof(range_y), &range_y);
+ if (error)
+ return error;
+
+ le16_to_cpus(&range_y);
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_CFG1,
+ 1, &cfg);
+ if (error)
+ return error;
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_TCHAUX,
+ 1, &tchaux);
+ if (error)
+ return error;
+
+ /* Handle default values */
+ if (range_x == 0)
+ range_x = 1023;
+
+ if (range_y == 0)
+ range_y = 1023;
+
+ if (cfg & MXT_T100_CFG_SWITCHXY) {
+ data->max_x = range_y;
+ data->max_y = range_x;
+ } else {
+ data->max_x = range_x;
+ data->max_y = range_y;
+ }
+
+ /* allocate aux bytes */
+ aux = 6;
+
+ if (tchaux & MXT_T100_TCHAUX_VECT)
+ data->t100_aux_vect = aux++;
+
+ if (tchaux & MXT_T100_TCHAUX_AMPL)
+ data->t100_aux_ampl = aux++;
+
+ if (tchaux & MXT_T100_TCHAUX_AREA)
+ data->t100_aux_area = aux++;
+
+ dev_dbg(&client->dev,
+ "T100 aux mappings vect:%u ampl:%u area:%u\n",
+ data->t100_aux_vect, data->t100_aux_ampl, data->t100_aux_area);
+
+ dev_info(&client->dev,
+ "T100 Touchscreen size X%uY%u\n", data->max_x, data->max_y);
+
+ return 0;
+}
+
static int mxt_input_open(struct input_dev *dev);
static void mxt_input_close(struct input_dev *dev);
-static int mxt_initialize_t9_input_device(struct mxt_data *data)
+static void mxt_set_up_as_touchpad(struct input_dev *input_dev,
+ struct mxt_data *data)
{
- struct device *dev = &data->client->dev;
const struct mxt_platform_data *pdata = data->pdata;
+ int i;
+
+ input_dev->name = "Atmel maXTouch Touchpad";
+
+ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+
+ input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X,
+ MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
+ MXT_PIXELS_PER_MM);
+
+ for (i = 0; i < pdata->t19_num_keys; i++)
+ if (pdata->t19_keymap[i] != KEY_RESERVED)
+ input_set_capability(input_dev, EV_KEY,
+ pdata->t19_keymap[i]);
+}
+
+static int mxt_initialize_input_device(struct mxt_data *data)
+{
+ const struct mxt_platform_data *pdata = data->pdata;
+ struct device *dev = &data->client->dev;
struct input_dev *input_dev;
int error;
unsigned int num_mt_slots;
unsigned int mt_flags = 0;
- int i;
- error = mxt_read_t9_resolution(data);
- if (error)
- dev_warn(dev, "Failed to initialize T9 resolution\n");
+ switch (data->multitouch) {
+ case MXT_TOUCH_MULTI_T9:
+ num_mt_slots = data->T9_reportid_max - data->T9_reportid_min + 1;
+ error = mxt_read_t9_resolution(data);
+ if (error)
+ dev_warn(dev, "Failed to initialize T9 resolution\n");
+ break;
+
+ case MXT_TOUCH_MULTITOUCHSCREEN_T100:
+ num_mt_slots = data->num_touchids;
+ error = mxt_read_t100_config(data);
+ if (error)
+ dev_warn(dev, "Failed to read T100 config\n");
+ break;
+
+ default:
+ dev_err(dev, "Invalid multitouch object\n");
+ return -EINVAL;
+ }
input_dev = input_allocate_device();
if (!input_dev) {
@@ -1612,54 +1889,76 @@ static int mxt_initialize_t9_input_device(struct mxt_data *data)
input_dev->open = mxt_input_open;
input_dev->close = mxt_input_close;
- __set_bit(EV_ABS, input_dev->evbit);
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(BTN_TOUCH, input_dev->keybit);
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
- if (pdata->t19_num_keys) {
- __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+ /* For single touch */
+ input_set_abs_params(input_dev, ABS_X, 0, data->max_x, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, data->max_y, 0, 0);
- for (i = 0; i < pdata->t19_num_keys; i++)
- if (pdata->t19_keymap[i] != KEY_RESERVED)
- input_set_capability(input_dev, EV_KEY,
- pdata->t19_keymap[i]);
+ if (data->multitouch == MXT_TOUCH_MULTI_T9 ||
+ (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
+ data->t100_aux_ampl)) {
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 255, 0, 0);
+ }
+ /* If device has buttons we assume it is a touchpad */
+ if (pdata->t19_num_keys) {
+ mxt_set_up_as_touchpad(input_dev, data);
mt_flags |= INPUT_MT_POINTER;
-
- input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM);
- input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM);
- input_abs_set_res(input_dev, ABS_MT_POSITION_X,
- MXT_PIXELS_PER_MM);
- input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
- MXT_PIXELS_PER_MM);
-
- input_dev->name = "Atmel maXTouch Touchpad";
}
- /* For single touch */
- input_set_abs_params(input_dev, ABS_X,
- 0, data->max_x, 0, 0);
- input_set_abs_params(input_dev, ABS_Y,
- 0, data->max_y, 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE,
- 0, 255, 0, 0);
-
/* For multi touch */
- num_mt_slots = data->T9_reportid_max - data->T9_reportid_min + 1;
error = input_mt_init_slots(input_dev, num_mt_slots, mt_flags);
if (error) {
dev_err(dev, "Error %d initialising slots\n", error);
goto err_free_mem;
}
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
- 0, MXT_MAX_AREA, 0, 0);
+ if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100) {
+ input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_DISTANCE,
+ MXT_DISTANCE_ACTIVE_TOUCH,
+ MXT_DISTANCE_HOVERING,
+ 0, 0);
+ }
+
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
0, data->max_x, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
0, data->max_y, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_PRESSURE,
- 0, 255, 0, 0);
+
+ if (data->multitouch == MXT_TOUCH_MULTI_T9 ||
+ (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
+ data->t100_aux_area)) {
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, MXT_MAX_AREA, 0, 0);
+ }
+
+ if (data->multitouch == MXT_TOUCH_MULTI_T9 ||
+ (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
+ data->t100_aux_ampl)) {
+ input_set_abs_params(input_dev, ABS_MT_PRESSURE,
+ 0, 255, 0, 0);
+ }
+
+ if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
+ data->t100_aux_vect) {
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
+ 0, 255, 0, 0);
+ }
+
+ if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
+ data->t100_aux_ampl) {
+ input_set_abs_params(input_dev, ABS_MT_PRESSURE,
+ 0, 255, 0, 0);
+ }
+
+ if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
+ data->t100_aux_vect) {
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
+ 0, 255, 0, 0);
+ }
input_set_drvdata(input_dev, data);
@@ -1765,9 +2064,13 @@ static int mxt_configure_objects(struct mxt_data *data,
dev_warn(dev, "Error %d updating config\n", error);
}
- error = mxt_initialize_t9_input_device(data);
- if (error)
- return error;
+ if (data->multitouch) {
+ error = mxt_initialize_input_device(data);
+ if (error)
+ return error;
+ } else {
+ dev_warn(dev, "No touch object detected\n");
+ }
dev_info(dev,
"Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
@@ -2044,15 +2347,13 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
/* Touch enable */
- mxt_write_object(data,
- MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
+ mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0x83);
}
static void mxt_stop(struct mxt_data *data)
{
/* Touch disable */
- mxt_write_object(data,
- MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
+ mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0);
}
static int mxt_input_open(struct input_dev *dev)
@@ -2072,7 +2373,7 @@ static void mxt_input_close(struct input_dev *dev)
}
#ifdef CONFIG_OF
-static struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
+static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
{
struct mxt_platform_data *pdata;
u32 *keymap;
@@ -2080,7 +2381,7 @@ static struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
int proplen, i, ret;
if (!client->dev.of_node)
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-ENOENT);
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -2111,25 +2412,132 @@ static struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
return pdata;
}
#else
-static struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
+static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
{
- dev_dbg(&client->dev, "No platform data specified\n");
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOENT);
+}
+#endif
+
+#ifdef CONFIG_ACPI
+
+struct mxt_acpi_platform_data {
+ const char *hid;
+ struct mxt_platform_data pdata;
+};
+
+static unsigned int samus_touchpad_buttons[] = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ BTN_LEFT
+};
+
+static struct mxt_acpi_platform_data samus_platform_data[] = {
+ {
+ /* Touchpad */
+ .hid = "ATML0000",
+ .pdata = {
+ .t19_num_keys = ARRAY_SIZE(samus_touchpad_buttons),
+ .t19_keymap = samus_touchpad_buttons,
+ },
+ },
+ {
+ /* Touchscreen */
+ .hid = "ATML0001",
+ },
+ { }
+};
+
+static const struct dmi_system_id mxt_dmi_table[] = {
+ {
+ /* 2015 Google Pixel */
+ .ident = "Chromebook Pixel 2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Samus"),
+ },
+ .driver_data = samus_platform_data,
+ },
+ { }
+};
+
+static const struct mxt_platform_data *mxt_parse_acpi(struct i2c_client *client)
+{
+ struct acpi_device *adev;
+ const struct dmi_system_id *system_id;
+ const struct mxt_acpi_platform_data *acpi_pdata;
+
+ /*
+ * Ignore ACPI devices representing bootloader mode.
+ *
+ * This is a bit of a hack: Google Chromebook BIOS creates ACPI
+ * devices for both application and bootloader modes, but we are
+ * interested in application mode only (if device is in bootloader
+ * mode we'll end up switching into application anyway). So far
+ * application mode addresses were all above 0x40, so we'll use it
+ * as a threshold.
+ */
+ if (client->addr < 0x40)
+ return ERR_PTR(-ENXIO);
+
+ adev = ACPI_COMPANION(&client->dev);
+ if (!adev)
+ return ERR_PTR(-ENOENT);
+
+ system_id = dmi_first_match(mxt_dmi_table);
+ if (!system_id)
+ return ERR_PTR(-ENOENT);
+
+ acpi_pdata = system_id->driver_data;
+ if (!acpi_pdata)
+ return ERR_PTR(-ENOENT);
+
+ while (acpi_pdata->hid) {
+ if (!strcmp(acpi_device_hid(adev), acpi_pdata->hid))
+ return &acpi_pdata->pdata;
+
+ acpi_pdata++;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+#else
+static const struct mxt_platform_data *mxt_parse_acpi(struct i2c_client *client)
+{
+ return ERR_PTR(-ENOENT);
}
#endif
+static const struct mxt_platform_data *
+mxt_get_platform_data(struct i2c_client *client)
+{
+ const struct mxt_platform_data *pdata;
+
+ pdata = dev_get_platdata(&client->dev);
+ if (pdata)
+ return pdata;
+
+ pdata = mxt_parse_dt(client);
+ if (!IS_ERR(pdata) || PTR_ERR(pdata) != -ENOENT)
+ return pdata;
+
+ pdata = mxt_parse_acpi(client);
+ if (!IS_ERR(pdata) || PTR_ERR(pdata) != -ENOENT)
+ return pdata;
+
+ dev_err(&client->dev, "No platform data specified\n");
+ return ERR_PTR(-EINVAL);
+}
+
static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct mxt_data *data;
const struct mxt_platform_data *pdata;
int error;
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- pdata = mxt_parse_dt(client);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- }
+ pdata = mxt_get_platform_data(client);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL);
if (!data) {
@@ -2237,6 +2645,15 @@ static const struct of_device_id mxt_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mxt_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id mxt_acpi_id[] = {
+ { "ATML0000", 0 }, /* Touchpad */
+ { "ATML0001", 0 }, /* Touchscreen */
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, mxt_acpi_id);
+#endif
+
static const struct i2c_device_id mxt_id[] = {
{ "qt602240_ts", 0 },
{ "atmel_mxt_ts", 0 },
@@ -2251,6 +2668,7 @@ static struct i2c_driver mxt_driver = {
.name = "atmel_mxt_ts",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(mxt_of_match),
+ .acpi_match_table = ACPI_PTR(mxt_acpi_id),
.pm = &mxt_pm_ops,
},
.probe = mxt_probe,
diff --git a/drivers/input/touchscreen/bcm_iproc_tsc.c b/drivers/input/touchscreen/bcm_iproc_tsc.c
new file mode 100644
index 000000000000..ae460a5c93d5
--- /dev/null
+++ b/drivers/input/touchscreen/bcm_iproc_tsc.c
@@ -0,0 +1,522 @@
+/*
+* Copyright (C) 2015 Broadcom Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation version 2.
+*
+* This program is distributed "as is" WITHOUT ANY WARRANTY of any
+* kind, whether express or implied; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/keyboard.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/serio.h>
+
+#define IPROC_TS_NAME "iproc-ts"
+
+#define PEN_DOWN_STATUS 1
+#define PEN_UP_STATUS 0
+
+#define X_MIN 0
+#define Y_MIN 0
+#define X_MAX 0xFFF
+#define Y_MAX 0xFFF
+
+/* Value given by controller for invalid coordinate. */
+#define INVALID_COORD 0xFFFFFFFF
+
+/* Register offsets */
+#define REGCTL1 0x00
+#define REGCTL2 0x04
+#define INTERRUPT_THRES 0x08
+#define INTERRUPT_MASK 0x0c
+
+#define INTERRUPT_STATUS 0x10
+#define CONTROLLER_STATUS 0x14
+#define FIFO_DATA 0x18
+#define FIFO_DATA_X_Y_MASK 0xFFFF
+#define ANALOG_CONTROL 0x1c
+
+#define AUX_DATA 0x20
+#define DEBOUNCE_CNTR_STAT 0x24
+#define SCAN_CNTR_STAT 0x28
+#define REM_CNTR_STAT 0x2c
+
+#define SETTLING_TIMER_STAT 0x30
+#define SPARE_REG 0x34
+#define SOFT_BYPASS_CONTROL 0x38
+#define SOFT_BYPASS_DATA 0x3c
+
+
+/* Bit values for INTERRUPT_MASK and INTERRUPT_STATUS regs */
+#define TS_PEN_INTR_MASK BIT(0)
+#define TS_FIFO_INTR_MASK BIT(2)
+
+/* Bit values for CONTROLLER_STATUS reg1 */
+#define TS_PEN_DOWN BIT(0)
+
+/* Shift values for control reg1 */
+#define SCANNING_PERIOD_SHIFT 24
+#define DEBOUNCE_TIMEOUT_SHIFT 16
+#define SETTLING_TIMEOUT_SHIFT 8
+#define TOUCH_TIMEOUT_SHIFT 0
+
+/* Shift values for coordinates from fifo */
+#define X_COORD_SHIFT 0
+#define Y_COORD_SHIFT 16
+
+/* Bit values for REGCTL2 */
+#define TS_CONTROLLER_EN_BIT BIT(16)
+#define TS_CONTROLLER_AVGDATA_SHIFT 8
+#define TS_CONTROLLER_AVGDATA_MASK (0x7 << TS_CONTROLLER_AVGDATA_SHIFT)
+#define TS_CONTROLLER_PWR_LDO BIT(5)
+#define TS_CONTROLLER_PWR_ADC BIT(4)
+#define TS_CONTROLLER_PWR_BGP BIT(3)
+#define TS_CONTROLLER_PWR_TS BIT(2)
+#define TS_WIRE_MODE_BIT BIT(1)
+
+#define dbg_reg(dev, priv, reg) \
+ dev_dbg(dev, "%20s= 0x%08x\n", #reg, readl((priv)->regs + reg))
+
+struct tsc_param {
+ /* Each step is 1024 us. Valid 1-256 */
+ u32 scanning_period;
+
+ /* Each step is 512 us. Valid 0-255 */
+ u32 debounce_timeout;
+
+ /*
+ * The settling duration (in ms) is the amount of time the tsc
+ * waits to allow the voltage to settle after turning on the
+ * drivers in detection mode. Valid values: 0-11
+ * 0 = 0.008 ms
+ * 1 = 0.01 ms
+ * 2 = 0.02 ms
+ * 3 = 0.04 ms
+ * 4 = 0.08 ms
+ * 5 = 0.16 ms
+ * 6 = 0.32 ms
+ * 7 = 0.64 ms
+ * 8 = 1.28 ms
+ * 9 = 2.56 ms
+ * 10 = 5.12 ms
+ * 11 = 10.24 ms
+ */
+ u32 settling_timeout;
+
+ /* touch timeout in sample counts */
+ u32 touch_timeout;
+
+ /*
+ * Number of data samples which are averaged before a final data point
+ * is placed into the FIFO
+ */
+ u32 average_data;
+
+ /* FIFO threshold */
+ u32 fifo_threshold;
+
+ /* Optional standard touchscreen properties. */
+ u32 max_x;
+ u32 max_y;
+ u32 fuzz_x;
+ u32 fuzz_y;
+ bool invert_x;
+ bool invert_y;
+};
+
+struct iproc_ts_priv {
+ struct platform_device *pdev;
+ struct input_dev *idev;
+
+ void __iomem *regs;
+ struct clk *tsc_clk;
+
+ int pen_status;
+ struct tsc_param cfg_params;
+};
+
+/*
+ * Set default values the same as hardware reset values
+ * except for fifo_threshold with is set to 1.
+ */
+static const struct tsc_param iproc_default_config = {
+ .scanning_period = 0x5, /* 1 to 256 */
+ .debounce_timeout = 0x28, /* 0 to 255 */
+ .settling_timeout = 0x7, /* 0 to 11 */
+ .touch_timeout = 0xa, /* 0 to 255 */
+ .average_data = 5, /* entry 5 = 32 pts */
+ .fifo_threshold = 1, /* 0 to 31 */
+ .max_x = X_MAX,
+ .max_y = Y_MAX,
+};
+
+static void ts_reg_dump(struct iproc_ts_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ dbg_reg(dev, priv, REGCTL1);
+ dbg_reg(dev, priv, REGCTL2);
+ dbg_reg(dev, priv, INTERRUPT_THRES);
+ dbg_reg(dev, priv, INTERRUPT_MASK);
+ dbg_reg(dev, priv, INTERRUPT_STATUS);
+ dbg_reg(dev, priv, CONTROLLER_STATUS);
+ dbg_reg(dev, priv, FIFO_DATA);
+ dbg_reg(dev, priv, ANALOG_CONTROL);
+ dbg_reg(dev, priv, AUX_DATA);
+ dbg_reg(dev, priv, DEBOUNCE_CNTR_STAT);
+ dbg_reg(dev, priv, SCAN_CNTR_STAT);
+ dbg_reg(dev, priv, REM_CNTR_STAT);
+ dbg_reg(dev, priv, SETTLING_TIMER_STAT);
+ dbg_reg(dev, priv, SPARE_REG);
+ dbg_reg(dev, priv, SOFT_BYPASS_CONTROL);
+ dbg_reg(dev, priv, SOFT_BYPASS_DATA);
+}
+
+static irqreturn_t iproc_touchscreen_interrupt(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct iproc_ts_priv *priv = platform_get_drvdata(pdev);
+ u32 intr_status;
+ u32 raw_coordinate;
+ u16 x;
+ u16 y;
+ int i;
+ bool needs_sync = false;
+
+ intr_status = readl(priv->regs + INTERRUPT_STATUS);
+ intr_status &= TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK;
+ if (intr_status == 0)
+ return IRQ_NONE;
+
+ /* Clear all interrupt status bits, write-1-clear */
+ writel(intr_status, priv->regs + INTERRUPT_STATUS);
+
+ /* Pen up/down */
+ if (intr_status & TS_PEN_INTR_MASK) {
+ if (readl(priv->regs + CONTROLLER_STATUS) & TS_PEN_DOWN)
+ priv->pen_status = PEN_DOWN_STATUS;
+ else
+ priv->pen_status = PEN_UP_STATUS;
+
+ input_report_key(priv->idev, BTN_TOUCH, priv->pen_status);
+ needs_sync = true;
+
+ dev_dbg(&priv->pdev->dev,
+ "pen up-down (%d)\n", priv->pen_status);
+ }
+
+ /* coordinates in FIFO exceed the theshold */
+ if (intr_status & TS_FIFO_INTR_MASK) {
+ for (i = 0; i < priv->cfg_params.fifo_threshold; i++) {
+ raw_coordinate = readl(priv->regs + FIFO_DATA);
+ if (raw_coordinate == INVALID_COORD)
+ continue;
+
+ /*
+ * The x and y coordinate are 16 bits each
+ * with the x in the lower 16 bits and y in the
+ * upper 16 bits.
+ */
+ x = (raw_coordinate >> X_COORD_SHIFT) &
+ FIFO_DATA_X_Y_MASK;
+ y = (raw_coordinate >> Y_COORD_SHIFT) &
+ FIFO_DATA_X_Y_MASK;
+
+ /* We only want to retain the 12 msb of the 16 */
+ x = (x >> 4) & 0x0FFF;
+ y = (y >> 4) & 0x0FFF;
+
+ /* adjust x y according to lcd tsc mount angle */
+ if (priv->cfg_params.invert_x)
+ x = priv->cfg_params.max_x - x;
+
+ if (priv->cfg_params.invert_y)
+ y = priv->cfg_params.max_y - y;
+
+ input_report_abs(priv->idev, ABS_X, x);
+ input_report_abs(priv->idev, ABS_Y, y);
+ needs_sync = true;
+
+ dev_dbg(&priv->pdev->dev, "xy (0x%x 0x%x)\n", x, y);
+ }
+ }
+
+ if (needs_sync)
+ input_sync(priv->idev);
+
+ return IRQ_HANDLED;
+}
+
+static int iproc_ts_start(struct input_dev *idev)
+{
+ struct iproc_ts_priv *priv = input_get_drvdata(idev);
+ u32 val;
+ int error;
+
+ /* Enable clock */
+ error = clk_prepare_enable(priv->tsc_clk);
+ if (error) {
+ dev_err(&priv->pdev->dev, "%s clk_prepare_enable failed %d\n",
+ __func__, error);
+ return error;
+ }
+
+ /*
+ * Interrupt is generated when:
+ * FIFO reaches the int_th value, and pen event(up/down)
+ */
+ val = TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK;
+ writel(val, priv->regs + INTERRUPT_MASK);
+
+ writel(priv->cfg_params.fifo_threshold, priv->regs + INTERRUPT_THRES);
+
+ /* Initialize control reg1 */
+ val = 0;
+ val |= priv->cfg_params.scanning_period << SCANNING_PERIOD_SHIFT;
+ val |= priv->cfg_params.debounce_timeout << DEBOUNCE_TIMEOUT_SHIFT;
+ val |= priv->cfg_params.settling_timeout << SETTLING_TIMEOUT_SHIFT;
+ val |= priv->cfg_params.touch_timeout << TOUCH_TIMEOUT_SHIFT;
+ writel(val, priv->regs + REGCTL1);
+
+ /* Try to clear all interrupt status */
+ val = readl(priv->regs + INTERRUPT_STATUS);
+ val |= TS_FIFO_INTR_MASK | TS_PEN_INTR_MASK;
+ writel(val, priv->regs + INTERRUPT_STATUS);
+
+ /* Initialize control reg2 */
+ val = readl(priv->regs + REGCTL2);
+ val |= TS_CONTROLLER_EN_BIT | TS_WIRE_MODE_BIT;
+
+ val &= ~TS_CONTROLLER_AVGDATA_MASK;
+ val |= priv->cfg_params.average_data << TS_CONTROLLER_AVGDATA_SHIFT;
+
+ val &= ~(TS_CONTROLLER_PWR_LDO | /* PWR up LDO */
+ TS_CONTROLLER_PWR_ADC | /* PWR up ADC */
+ TS_CONTROLLER_PWR_BGP | /* PWR up BGP */
+ TS_CONTROLLER_PWR_TS); /* PWR up TS */
+
+ writel(val, priv->regs + REGCTL2);
+
+ ts_reg_dump(priv);
+
+ return 0;
+}
+
+static void iproc_ts_stop(struct input_dev *dev)
+{
+ u32 val;
+ struct iproc_ts_priv *priv = input_get_drvdata(dev);
+
+ writel(0, priv->regs + INTERRUPT_MASK); /* Disable all interrupts */
+
+ /* Only power down touch screen controller */
+ val = readl(priv->regs + REGCTL2);
+ val |= TS_CONTROLLER_PWR_TS;
+ writel(val, priv->regs + REGCTL2);
+
+ clk_disable(priv->tsc_clk);
+}
+
+static int iproc_get_tsc_config(struct device *dev, struct iproc_ts_priv *priv)
+{
+ struct device_node *np = dev->of_node;
+ u32 val;
+
+ priv->cfg_params = iproc_default_config;
+
+ if (!np)
+ return 0;
+
+ if (of_property_read_u32(np, "scanning_period", &val) >= 0) {
+ if (val < 1 || val > 256) {
+ dev_err(dev, "scanning_period (%u) must be [1-256]\n",
+ val);
+ return -EINVAL;
+ }
+ priv->cfg_params.scanning_period = val;
+ }
+
+ if (of_property_read_u32(np, "debounce_timeout", &val) >= 0) {
+ if (val > 255) {
+ dev_err(dev, "debounce_timeout (%u) must be [0-255]\n",
+ val);
+ return -EINVAL;
+ }
+ priv->cfg_params.debounce_timeout = val;
+ }
+
+ if (of_property_read_u32(np, "settling_timeout", &val) >= 0) {
+ if (val > 11) {
+ dev_err(dev, "settling_timeout (%u) must be [0-11]\n",
+ val);
+ return -EINVAL;
+ }
+ priv->cfg_params.settling_timeout = val;
+ }
+
+ if (of_property_read_u32(np, "touch_timeout", &val) >= 0) {
+ if (val > 255) {
+ dev_err(dev, "touch_timeout (%u) must be [0-255]\n",
+ val);
+ return -EINVAL;
+ }
+ priv->cfg_params.touch_timeout = val;
+ }
+
+ if (of_property_read_u32(np, "average_data", &val) >= 0) {
+ if (val > 8) {
+ dev_err(dev, "average_data (%u) must be [0-8]\n", val);
+ return -EINVAL;
+ }
+ priv->cfg_params.average_data = val;
+ }
+
+ if (of_property_read_u32(np, "fifo_threshold", &val) >= 0) {
+ if (val > 31) {
+ dev_err(dev, "fifo_threshold (%u)) must be [0-31]\n",
+ val);
+ return -EINVAL;
+ }
+ priv->cfg_params.fifo_threshold = val;
+ }
+
+ /* Parse optional properties. */
+ of_property_read_u32(np, "touchscreen-size-x", &priv->cfg_params.max_x);
+ of_property_read_u32(np, "touchscreen-size-y", &priv->cfg_params.max_y);
+
+ of_property_read_u32(np, "touchscreen-fuzz-x",
+ &priv->cfg_params.fuzz_x);
+ of_property_read_u32(np, "touchscreen-fuzz-y",
+ &priv->cfg_params.fuzz_y);
+
+ priv->cfg_params.invert_x =
+ of_property_read_bool(np, "touchscreen-inverted-x");
+ priv->cfg_params.invert_y =
+ of_property_read_bool(np, "touchscreen-inverted-y");
+
+ return 0;
+}
+
+static int iproc_ts_probe(struct platform_device *pdev)
+{
+ struct iproc_ts_priv *priv;
+ struct input_dev *idev;
+ struct resource *res;
+ int irq;
+ int error;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* touchscreen controller memory mapped regs */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->regs)) {
+ error = PTR_ERR(priv->regs);
+ dev_err(&pdev->dev, "unable to map I/O memory: %d\n", error);
+ return error;
+ }
+
+ priv->tsc_clk = devm_clk_get(&pdev->dev, "tsc_clk");
+ if (IS_ERR(priv->tsc_clk)) {
+ error = PTR_ERR(priv->tsc_clk);
+ dev_err(&pdev->dev,
+ "failed getting clock tsc_clk: %d\n", error);
+ return error;
+ }
+
+ priv->pdev = pdev;
+ error = iproc_get_tsc_config(&pdev->dev, priv);
+ if (error) {
+ dev_err(&pdev->dev, "get_tsc_config failed: %d\n", error);
+ return error;
+ }
+
+ idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ priv->idev = idev;
+ priv->pen_status = PEN_UP_STATUS;
+
+ /* Set input device info */
+ idev->name = IPROC_TS_NAME;
+ idev->dev.parent = &pdev->dev;
+
+ idev->id.bustype = BUS_HOST;
+ idev->id.vendor = SERIO_UNKNOWN;
+ idev->id.product = 0;
+ idev->id.version = 0;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ __set_bit(BTN_TOUCH, idev->keybit);
+
+ input_set_abs_params(idev, ABS_X, X_MIN, priv->cfg_params.max_x,
+ priv->cfg_params.fuzz_x, 0);
+ input_set_abs_params(idev, ABS_Y, Y_MIN, priv->cfg_params.max_y,
+ priv->cfg_params.fuzz_y, 0);
+
+ idev->open = iproc_ts_start;
+ idev->close = iproc_ts_stop;
+
+ input_set_drvdata(idev, priv);
+ platform_set_drvdata(pdev, priv);
+
+ /* get interrupt */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed: %d\n", irq);
+ return irq;
+ }
+
+ error = devm_request_irq(&pdev->dev, irq,
+ iproc_touchscreen_interrupt,
+ IRQF_SHARED, IPROC_TS_NAME, pdev);
+ if (error)
+ return error;
+
+ error = input_register_device(priv->idev);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id iproc_ts_of_match[] = {
+ {.compatible = "brcm,iproc-touchscreen", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, iproc_ts_of_match);
+
+static struct platform_driver iproc_ts_driver = {
+ .probe = iproc_ts_probe,
+ .driver = {
+ .name = IPROC_TS_NAME,
+ .of_match_table = of_match_ptr(iproc_ts_of_match),
+ },
+};
+
+module_platform_driver(iproc_ts_driver);
+
+MODULE_DESCRIPTION("IPROC Touchscreen driver");
+MODULE_AUTHOR("Broadcom");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/chipone_icn8318.c b/drivers/input/touchscreen/chipone_icn8318.c
new file mode 100644
index 000000000000..32e9db0e04bf
--- /dev/null
+++ b/drivers/input/touchscreen/chipone_icn8318.c
@@ -0,0 +1,316 @@
+/*
+ * Driver for ChipOne icn8318 i2c touchscreen controller
+ *
+ * Copyright (c) 2015 Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#define ICN8318_REG_POWER 4
+#define ICN8318_REG_TOUCHDATA 16
+
+#define ICN8318_POWER_ACTIVE 0
+#define ICN8318_POWER_MONITOR 1
+#define ICN8318_POWER_HIBERNATE 2
+
+#define ICN8318_MAX_TOUCHES 5
+
+struct icn8318_touch {
+ __u8 slot;
+ __be16 x;
+ __be16 y;
+ __u8 pressure; /* Seems more like finger width then pressure really */
+ __u8 event;
+/* The difference between 2 and 3 is unclear */
+#define ICN8318_EVENT_NO_DATA 1 /* No finger seen yet since wakeup */
+#define ICN8318_EVENT_UPDATE1 2 /* New or updated coordinates */
+#define ICN8318_EVENT_UPDATE2 3 /* New or updated coordinates */
+#define ICN8318_EVENT_END 4 /* Finger lifted */
+} __packed;
+
+struct icn8318_touch_data {
+ __u8 softbutton;
+ __u8 touch_count;
+ struct icn8318_touch touches[ICN8318_MAX_TOUCHES];
+} __packed;
+
+struct icn8318_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct gpio_desc *wake_gpio;
+ u32 max_x;
+ u32 max_y;
+ bool invert_x;
+ bool invert_y;
+ bool swap_x_y;
+};
+
+static int icn8318_read_touch_data(struct i2c_client *client,
+ struct icn8318_touch_data *touch_data)
+{
+ u8 reg = ICN8318_REG_TOUCHDATA;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = &reg
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(struct icn8318_touch_data),
+ .buf = (u8 *)touch_data
+ }
+ };
+
+ return i2c_transfer(client->adapter, msg, 2);
+}
+
+static inline bool icn8318_touch_active(u8 event)
+{
+ return (event == ICN8318_EVENT_UPDATE1) ||
+ (event == ICN8318_EVENT_UPDATE2);
+}
+
+static irqreturn_t icn8318_irq(int irq, void *dev_id)
+{
+ struct icn8318_data *data = dev_id;
+ struct device *dev = &data->client->dev;
+ struct icn8318_touch_data touch_data;
+ int i, ret, x, y;
+
+ ret = icn8318_read_touch_data(data->client, &touch_data);
+ if (ret < 0) {
+ dev_err(dev, "Error reading touch data: %d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ if (touch_data.softbutton) {
+ /*
+ * Other data is invalid when a softbutton is pressed.
+ * This needs some extra devicetree bindings to map the icn8318
+ * softbutton codes to evdev codes. Currently no known devices
+ * use this.
+ */
+ return IRQ_HANDLED;
+ }
+
+ if (touch_data.touch_count > ICN8318_MAX_TOUCHES) {
+ dev_warn(dev, "Too much touches %d > %d\n",
+ touch_data.touch_count, ICN8318_MAX_TOUCHES);
+ touch_data.touch_count = ICN8318_MAX_TOUCHES;
+ }
+
+ for (i = 0; i < touch_data.touch_count; i++) {
+ struct icn8318_touch *touch = &touch_data.touches[i];
+ bool act = icn8318_touch_active(touch->event);
+
+ input_mt_slot(data->input, touch->slot);
+ input_mt_report_slot_state(data->input, MT_TOOL_FINGER, act);
+ if (!act)
+ continue;
+
+ x = be16_to_cpu(touch->x);
+ y = be16_to_cpu(touch->y);
+
+ if (data->invert_x)
+ x = data->max_x - x;
+
+ if (data->invert_y)
+ y = data->max_y - y;
+
+ if (!data->swap_x_y) {
+ input_event(data->input, EV_ABS, ABS_MT_POSITION_X, x);
+ input_event(data->input, EV_ABS, ABS_MT_POSITION_Y, y);
+ } else {
+ input_event(data->input, EV_ABS, ABS_MT_POSITION_X, y);
+ input_event(data->input, EV_ABS, ABS_MT_POSITION_Y, x);
+ }
+ }
+
+ input_mt_sync_frame(data->input);
+ input_sync(data->input);
+
+ return IRQ_HANDLED;
+}
+
+static int icn8318_start(struct input_dev *dev)
+{
+ struct icn8318_data *data = input_get_drvdata(dev);
+
+ enable_irq(data->client->irq);
+ gpiod_set_value_cansleep(data->wake_gpio, 1);
+
+ return 0;
+}
+
+static void icn8318_stop(struct input_dev *dev)
+{
+ struct icn8318_data *data = input_get_drvdata(dev);
+
+ disable_irq(data->client->irq);
+ i2c_smbus_write_byte_data(data->client, ICN8318_REG_POWER,
+ ICN8318_POWER_HIBERNATE);
+ gpiod_set_value_cansleep(data->wake_gpio, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int icn8318_suspend(struct device *dev)
+{
+ struct icn8318_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ mutex_lock(&data->input->mutex);
+ if (data->input->users)
+ icn8318_stop(data->input);
+ mutex_unlock(&data->input->mutex);
+
+ return 0;
+}
+
+static int icn8318_resume(struct device *dev)
+{
+ struct icn8318_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ mutex_lock(&data->input->mutex);
+ if (data->input->users)
+ icn8318_start(data->input);
+ mutex_unlock(&data->input->mutex);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(icn8318_pm_ops, icn8318_suspend, icn8318_resume);
+
+static int icn8318_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct icn8318_data *data;
+ struct input_dev *input;
+ u32 fuzz_x = 0, fuzz_y = 0;
+ int error;
+
+ if (!client->irq) {
+ dev_err(dev, "Error no irq specified\n");
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->wake_gpio = devm_gpiod_get(dev, "wake", GPIOD_OUT_LOW);
+ if (IS_ERR(data->wake_gpio)) {
+ error = PTR_ERR(data->wake_gpio);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "Error getting wake gpio: %d\n", error);
+ return error;
+ }
+
+ if (of_property_read_u32(np, "touchscreen-size-x", &data->max_x) ||
+ of_property_read_u32(np, "touchscreen-size-y", &data->max_y)) {
+ dev_err(dev, "Error touchscreen-size-x and/or -y missing\n");
+ return -EINVAL;
+ }
+
+ /* Optional */
+ of_property_read_u32(np, "touchscreen-fuzz-x", &fuzz_x);
+ of_property_read_u32(np, "touchscreen-fuzz-y", &fuzz_y);
+ data->invert_x = of_property_read_bool(np, "touchscreen-inverted-x");
+ data->invert_y = of_property_read_bool(np, "touchscreen-inverted-y");
+ data->swap_x_y = of_property_read_bool(np, "touchscreen-swapped-x-y");
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = client->name;
+ input->id.bustype = BUS_I2C;
+ input->open = icn8318_start;
+ input->close = icn8318_stop;
+ input->dev.parent = dev;
+
+ if (!data->swap_x_y) {
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0,
+ data->max_x, fuzz_x, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
+ data->max_y, fuzz_y, 0);
+ } else {
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0,
+ data->max_y, fuzz_y, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
+ data->max_x, fuzz_x, 0);
+ }
+
+ error = input_mt_init_slots(input, ICN8318_MAX_TOUCHES,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error)
+ return error;
+
+ data->client = client;
+ data->input = input;
+ input_set_drvdata(input, data);
+
+ error = devm_request_threaded_irq(dev, client->irq, NULL, icn8318_irq,
+ IRQF_ONESHOT, client->name, data);
+ if (error) {
+ dev_err(dev, "Error requesting irq: %d\n", error);
+ return error;
+ }
+
+ /* Stop device till opened */
+ icn8318_stop(data->input);
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ i2c_set_clientdata(client, data);
+
+ return 0;
+}
+
+static const struct of_device_id icn8318_of_match[] = {
+ { .compatible = "chipone,icn8318" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, icn8318_of_match);
+
+/* This is useless for OF-enabled devices, but it is needed by I2C subsystem */
+static const struct i2c_device_id icn8318_i2c_id[] = {
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, icn8318_i2c_id);
+
+static struct i2c_driver icn8318_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "chipone_icn8318",
+ .pm = &icn8318_pm_ops,
+ .of_match_table = icn8318_of_match,
+ },
+ .probe = icn8318_probe,
+ .id_table = icn8318_i2c_id,
+};
+
+module_i2c_driver(icn8318_driver);
+
+MODULE_DESCRIPTION("ChipOne icn8318 I2C Touchscreen Driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index d4c24fb7704f..e6aef3e48bd9 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -37,6 +37,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/input/edt-ft5x06.h>
#define MAX_SUPPORT_POINTS 5
@@ -1034,7 +1035,6 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
input->id.bustype = BUS_I2C;
input->dev.parent = &client->dev;
- __set_bit(EV_SYN, input->evbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(EV_ABS, input->evbit);
__set_bit(BTN_TOUCH, input->keybit);
@@ -1044,6 +1044,10 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
0, tsdata->num_x * 64 - 1, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y,
0, tsdata->num_y * 64 - 1, 0, 0);
+
+ if (!pdata)
+ touchscreen_parse_of_params(input);
+
error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, 0);
if (error) {
dev_err(&client->dev, "Unable to init MT slots.\n");
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 926c58e540c0..0efd766a545b 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -98,7 +98,6 @@
#define MAX_FW_UPDATE_RETRIES 30
#define ELAN_FW_PAGESIZE 132
-#define ELAN_FW_FILENAME "elants_i2c.bin"
/* calibration timeout definition */
#define ELAN_CALI_TIMEOUT_MSEC 10000
@@ -697,12 +696,19 @@ static int elants_i2c_fw_update(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
const struct firmware *fw;
+ char *fw_name;
int error;
- error = request_firmware(&fw, ELAN_FW_FILENAME, &client->dev);
+ fw_name = kasprintf(GFP_KERNEL, "elants_i2c_%04x.bin", ts->hw_version);
+ if (!fw_name)
+ return -ENOMEM;
+
+ dev_info(&client->dev, "requesting fw name = %s\n", fw_name);
+ error = request_firmware(&fw, fw_name, &client->dev);
+ kfree(fw_name);
if (error) {
- dev_err(&client->dev, "failed to request firmware %s: %d\n",
- ELAN_FW_FILENAME, error);
+ dev_err(&client->dev, "failed to request firmware: %d\n",
+ error);
return error;
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index ca196689f025..3af16984d57c 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -23,6 +23,8 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
#include <asm/unaligned.h>
struct goodix_ts_data {
@@ -48,6 +50,7 @@ struct goodix_ts_data {
#define GOODIX_REG_VERSION 0x8140
#define RESOLUTION_LOC 1
+#define MAX_CONTACTS_LOC 5
#define TRIGGER_LOC 6
static const unsigned long goodix_irq_flags[] = {
@@ -99,7 +102,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
}
touch_num = data[0] & 0x0f;
- if (touch_num > GOODIX_MAX_CONTACTS)
+ if (touch_num > ts->max_touch_num)
return -EPROTO;
if (touch_num > 1) {
@@ -141,7 +144,7 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
*/
static void goodix_process_events(struct goodix_ts_data *ts)
{
- u8 point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
+ u8 point_data[1 + GOODIX_CONTACT_SIZE * ts->max_touch_num];
int touch_num;
int i;
@@ -202,21 +205,23 @@ static void goodix_read_config(struct goodix_ts_data *ts)
ts->abs_x_max = GOODIX_MAX_WIDTH;
ts->abs_y_max = GOODIX_MAX_HEIGHT;
ts->int_trigger_type = GOODIX_INT_TRIGGER;
+ ts->max_touch_num = GOODIX_MAX_CONTACTS;
return;
}
ts->abs_x_max = get_unaligned_le16(&config[RESOLUTION_LOC]);
ts->abs_y_max = get_unaligned_le16(&config[RESOLUTION_LOC + 2]);
- ts->int_trigger_type = (config[TRIGGER_LOC]) & 0x03;
- if (!ts->abs_x_max || !ts->abs_y_max) {
+ ts->int_trigger_type = config[TRIGGER_LOC] & 0x03;
+ ts->max_touch_num = config[MAX_CONTACTS_LOC] & 0x0f;
+ if (!ts->abs_x_max || !ts->abs_y_max || !ts->max_touch_num) {
dev_err(&ts->client->dev,
"Invalid config, using defaults\n");
ts->abs_x_max = GOODIX_MAX_WIDTH;
ts->abs_y_max = GOODIX_MAX_HEIGHT;
+ ts->max_touch_num = GOODIX_MAX_CONTACTS;
}
}
-
/**
* goodix_read_version - Read goodix touchscreen version
*
@@ -295,7 +300,7 @@ static int goodix_request_input_dev(struct goodix_ts_data *ts)
input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
- input_mt_init_slots(ts->input_dev, GOODIX_MAX_CONTACTS,
+ input_mt_init_slots(ts->input_dev, ts->max_touch_num,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
ts->input_dev->name = "Goodix Capacitive TouchScreen";
@@ -372,11 +377,27 @@ static const struct i2c_device_id goodix_ts_id[] = {
{ }
};
+#ifdef CONFIG_ACPI
static const struct acpi_device_id goodix_acpi_match[] = {
{ "GDIX1001", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id goodix_of_match[] = {
+ { .compatible = "goodix,gt911" },
+ { .compatible = "goodix,gt9110" },
+ { .compatible = "goodix,gt912" },
+ { .compatible = "goodix,gt927" },
+ { .compatible = "goodix,gt9271" },
+ { .compatible = "goodix,gt928" },
+ { .compatible = "goodix,gt967" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, goodix_of_match);
+#endif
static struct i2c_driver goodix_ts_driver = {
.probe = goodix_ts_probe,
@@ -384,7 +405,8 @@ static struct i2c_driver goodix_ts_driver = {
.driver = {
.name = "Goodix-TS",
.owner = THIS_MODULE,
- .acpi_match_table = goodix_acpi_match,
+ .acpi_match_table = ACPI_PTR(goodix_acpi_match),
+ .of_match_table = of_match_ptr(goodix_of_match),
},
};
module_i2c_driver(goodix_ts_driver);
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index f8f9b84230b1..b82b5207c78b 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -11,8 +11,41 @@
#include <linux/of.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
+static u32 of_get_optional_u32(struct device_node *np,
+ const char *property)
+{
+ u32 val = 0;
+
+ of_property_read_u32(np, property, &val);
+
+ return val;
+}
+
+static void touchscreen_set_params(struct input_dev *dev,
+ unsigned long axis,
+ int max, int fuzz)
+{
+ struct input_absinfo *absinfo;
+
+ if (!test_bit(axis, dev->absbit)) {
+ /*
+ * Emit a warning only if the axis is not a multitouch
+ * axis, which might not be set by the driver.
+ */
+ if (!input_is_mt_axis(axis))
+ dev_warn(&dev->dev,
+ "DT specifies parameters but the axis is not set up\n");
+ return;
+ }
+
+ absinfo = &dev->absinfo[axis];
+ absinfo->maximum = max;
+ absinfo->fuzz = fuzz;
+}
+
/**
* touchscreen_parse_of_params - parse common touchscreen DT properties
* @dev: device that should be parsed
@@ -24,22 +57,31 @@
void touchscreen_parse_of_params(struct input_dev *dev)
{
struct device_node *np = dev->dev.parent->of_node;
- struct input_absinfo *absinfo;
+ u32 maximum, fuzz;
input_alloc_absinfo(dev);
if (!dev->absinfo)
return;
- absinfo = &dev->absinfo[ABS_X];
- of_property_read_u32(np, "touchscreen-size-x", &absinfo->maximum);
- of_property_read_u32(np, "touchscreen-fuzz-x", &absinfo->fuzz);
+ maximum = of_get_optional_u32(np, "touchscreen-size-x");
+ fuzz = of_get_optional_u32(np, "touchscreen-fuzz-x");
+ if (maximum || fuzz) {
+ touchscreen_set_params(dev, ABS_X, maximum, fuzz);
+ touchscreen_set_params(dev, ABS_MT_POSITION_X, maximum, fuzz);
+ }
- absinfo = &dev->absinfo[ABS_Y];
- of_property_read_u32(np, "touchscreen-size-y", &absinfo->maximum);
- of_property_read_u32(np, "touchscreen-fuzz-y", &absinfo->fuzz);
+ maximum = of_get_optional_u32(np, "touchscreen-size-y");
+ fuzz = of_get_optional_u32(np, "touchscreen-fuzz-y");
+ if (maximum || fuzz) {
+ touchscreen_set_params(dev, ABS_Y, maximum, fuzz);
+ touchscreen_set_params(dev, ABS_MT_POSITION_Y, maximum, fuzz);
+ }
- absinfo = &dev->absinfo[ABS_PRESSURE];
- of_property_read_u32(np, "touchscreen-max-pressure", &absinfo->maximum);
- of_property_read_u32(np, "touchscreen-fuzz-pressure", &absinfo->fuzz);
+ maximum = of_get_optional_u32(np, "touchscreen-max-pressure");
+ fuzz = of_get_optional_u32(np, "touchscreen-fuzz-pressure");
+ if (maximum || fuzz) {
+ touchscreen_set_params(dev, ABS_PRESSURE, maximum, fuzz);
+ touchscreen_set_params(dev, ABS_MT_PRESSURE, maximum, fuzz);
+ }
}
EXPORT_SYMBOL(touchscreen_parse_of_params);
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index b93a28b955fd..c0116994067d 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -30,6 +30,10 @@
* These kinds of heuristics are just asking for trouble (and don't belong
* in the kernel). So this driver offers straight forward, reliable single
* touch functionality only.
+ *
+ * s.a. A20 User Manual "1.15 TP" (Documentation/arm/sunxi/README)
+ * (looks like the description in the A20 User Manual v1.3 is better
+ * than the one in the A10 User Manual v.1.5)
*/
#include <linux/err.h>
@@ -193,7 +197,7 @@ static int sun4i_get_temp(const struct sun4i_ts_data *ts, long *temp)
if (ts->temp_data == -1)
return -EAGAIN;
- *temp = (ts->temp_data - ts->temp_offset) * ts->temp_step;
+ *temp = ts->temp_data * ts->temp_step - ts->temp_offset;
return 0;
}
@@ -246,6 +250,8 @@ static int sun4i_ts_probe(struct platform_device *pdev)
int error;
u32 reg;
bool ts_attached;
+ u32 tp_sensitive_adjust = 15;
+ u32 filter_type = 1;
ts = devm_kzalloc(dev, sizeof(struct sun4i_ts_data), GFP_KERNEL);
if (!ts)
@@ -255,22 +261,31 @@ static int sun4i_ts_probe(struct platform_device *pdev)
ts->ignore_fifo_data = true;
ts->temp_data = -1;
if (of_device_is_compatible(np, "allwinner,sun6i-a31-ts")) {
- /* Allwinner SDK has temperature = -271 + (value / 6) (C) */
- ts->temp_offset = 1626;
+ /* Allwinner SDK has temperature (C) = (value / 6) - 271 */
+ ts->temp_offset = 271000;
ts->temp_step = 167;
+ } else if (of_device_is_compatible(np, "allwinner,sun4i-a10-ts")) {
+ /*
+ * The A10 temperature sensor has quite a wide spread, these
+ * parameters are based on the averaging of the calibration
+ * results of 4 completely different boards, with a spread of
+ * temp_step from 0.096 - 0.170 and temp_offset from 176 - 331.
+ */
+ ts->temp_offset = 257000;
+ ts->temp_step = 133;
} else {
/*
* The user manuals do not contain the formula for calculating
* the temperature. The formula used here is from the AXP209,
* which is designed by X-Powers, an affiliate of Allwinner:
*
- * temperature = -144.7 + (value * 0.1)
+ * temperature (C) = (value * 0.1) - 144.7
*
* Allwinner does not have any documentation whatsoever for
* this hardware. Moreover, it is claimed that the sensor
* is inaccurate and cannot work properly.
*/
- ts->temp_offset = 1447;
+ ts->temp_offset = 144700;
ts->temp_step = 100;
}
@@ -313,14 +328,20 @@ static int sun4i_ts_probe(struct platform_device *pdev)
ts->base + TP_CTRL0);
/*
- * sensitive_adjust = 15 : max, which is not all that sensitive,
+ * tp_sensitive_adjust is an optional property
* tp_mode = 0 : only x and y coordinates, as we don't use dual touch
*/
- writel(TP_SENSITIVE_ADJUST(15) | TP_MODE_SELECT(0),
+ of_property_read_u32(np, "allwinner,tp-sensitive-adjust",
+ &tp_sensitive_adjust);
+ writel(TP_SENSITIVE_ADJUST(tp_sensitive_adjust) | TP_MODE_SELECT(0),
ts->base + TP_CTRL2);
- /* Enable median filter, type 1 : 5/3 */
- writel(FILTER_EN(1) | FILTER_TYPE(1), ts->base + TP_CTRL3);
+ /*
+ * Enable median and averaging filter, optional property for
+ * filter type.
+ */
+ of_property_read_u32(np, "allwinner,filter-type", &filter_type);
+ writel(FILTER_EN(1) | FILTER_TYPE(filter_type), ts->base + TP_CTRL3);
/* Enable temperature measurement, period 1953 (2 seconds) */
writel(TEMP_ENABLE(1) | TEMP_PERIOD(1953), ts->base + TP_TPR);
@@ -330,10 +351,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
* finally enable tp mode.
*/
reg = STYLUS_UP_DEBOUN(5) | STYLUS_UP_DEBOUN_EN(1);
- if (of_device_is_compatible(np, "allwinner,sun4i-a10-ts"))
- reg |= TP_MODE_EN(1);
- else
+ if (of_device_is_compatible(np, "allwinner,sun6i-a31-ts"))
reg |= SUN6I_TP_MODE_EN(1);
+ else
+ reg |= TP_MODE_EN(1);
writel(reg, ts->base + TP_CTRL1);
/*
@@ -383,6 +404,7 @@ static int sun4i_ts_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_ts_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-ts", },
+ { .compatible = "allwinner,sun5i-a13-ts", },
{ .compatible = "allwinner,sun6i-a31-ts", },
{ /* sentinel */ }
};
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index f1cb05148b46..a24eba5ea843 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -1,7 +1,7 @@
/*
* Surface2.0/SUR40/PixelSense input driver
*
- * Copyright (c) 2013 by Florian 'floe' Echtler <floe@butterbrot.org>
+ * Copyright (c) 2014 by Florian 'floe' Echtler <floe@butterbrot.org>
*
* Derived from the USB Skeleton driver 1.1,
* Copyright (c) 2003 Greg Kroah-Hartman (greg@kroah.com)
@@ -12,6 +12,9 @@
* and from the generic hid-multitouch driver,
* Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr>
*
+ * and from the v4l2-pci-skeleton driver,
+ * Copyright (c) Copyright 2014 Cisco Systems, Inc.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
@@ -31,6 +34,11 @@
#include <linux/input-polldev.h>
#include <linux/input/mt.h>
#include <linux/usb/input.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-sg.h>
/* read 512 bytes from endpoint 0x86 -> get header + blobs */
struct sur40_header {
@@ -82,9 +90,19 @@ struct sur40_data {
struct sur40_blob blobs[];
} __packed;
+/* read 512 bytes from endpoint 0x82 -> get header below
+ * continue reading 16k blocks until header.size bytes read */
+struct sur40_image_header {
+ __le32 magic; /* "SUBF" */
+ __le32 packet_id;
+ __le32 size; /* always 0x0007e900 = 960x540 */
+ __le32 timestamp; /* milliseconds (increases by 16 or 17 each frame) */
+ __le32 unknown; /* "epoch?" always 02/03 00 00 00 */
+} __packed;
/* version information */
#define DRIVER_SHORT "sur40"
+#define DRIVER_LONG "Samsung SUR40"
#define DRIVER_AUTHOR "Florian 'floe' Echtler <floe@butterbrot.org>"
#define DRIVER_DESC "Surface2.0/SUR40/PixelSense input driver"
@@ -99,6 +117,13 @@ struct sur40_data {
/* touch data endpoint */
#define TOUCH_ENDPOINT 0x86
+/* video data endpoint */
+#define VIDEO_ENDPOINT 0x82
+
+/* video header fields */
+#define VIDEO_HEADER_MAGIC 0x46425553
+#define VIDEO_PACKET_SIZE 16384
+
/* polling interval (ms) */
#define POLL_INTERVAL 10
@@ -113,21 +138,23 @@ struct sur40_data {
#define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */
#define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */
-/*
- * Note: an earlier, non-public version of this driver used USB_RECIP_ENDPOINT
- * here by mistake which is very likely to have corrupted the firmware EEPROM
- * on two separate SUR40 devices. Thanks to Alan Stern who spotted this bug.
- * Should you ever run into a similar problem, the background story to this
- * incident and instructions on how to fix the corrupted EEPROM are available
- * at https://floe.butterbrot.org/matrix/hacking/surface/brick.html
-*/
-
+/* master device state */
struct sur40_state {
struct usb_device *usbdev;
struct device *dev;
struct input_polled_dev *input;
+ struct v4l2_device v4l2;
+ struct video_device vdev;
+ struct mutex lock;
+
+ struct vb2_queue queue;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct list_head buf_list;
+ spinlock_t qlock;
+ int sequence;
+
struct sur40_data *bulk_in_buffer;
size_t bulk_in_size;
u8 bulk_in_epaddr;
@@ -135,6 +162,27 @@ struct sur40_state {
char phys[64];
};
+struct sur40_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+/* forward declarations */
+static const struct video_device sur40_video_device;
+static const struct v4l2_pix_format sur40_video_format;
+static const struct vb2_queue sur40_queue;
+static void sur40_process_video(struct sur40_state *sur40);
+
+/*
+ * Note: an earlier, non-public version of this driver used USB_RECIP_ENDPOINT
+ * here by mistake which is very likely to have corrupted the firmware EEPROM
+ * on two separate SUR40 devices. Thanks to Alan Stern who spotted this bug.
+ * Should you ever run into a similar problem, the background story to this
+ * incident and instructions on how to fix the corrupted EEPROM are available
+ * at https://floe.butterbrot.org/matrix/hacking/surface/brick.html
+*/
+
+/* command wrapper */
static int sur40_command(struct sur40_state *dev,
u8 command, u16 index, void *buffer, u16 size)
{
@@ -247,7 +295,6 @@ static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input)
/* core function: poll for new input data */
static void sur40_poll(struct input_polled_dev *polldev)
{
-
struct sur40_state *sur40 = polldev->private;
struct input_dev *input = polldev->input;
int result, bulk_read, need_blobs, packet_blobs, i;
@@ -314,6 +361,86 @@ static void sur40_poll(struct input_polled_dev *polldev)
input_mt_sync_frame(input);
input_sync(input);
+
+ sur40_process_video(sur40);
+}
+
+/* deal with video data */
+static void sur40_process_video(struct sur40_state *sur40)
+{
+
+ struct sur40_image_header *img = (void *)(sur40->bulk_in_buffer);
+ struct sur40_buffer *new_buf;
+ struct usb_sg_request sgr;
+ struct sg_table *sgt;
+ int result, bulk_read;
+
+ if (!vb2_start_streaming_called(&sur40->queue))
+ return;
+
+ /* get a new buffer from the list */
+ spin_lock(&sur40->qlock);
+ if (list_empty(&sur40->buf_list)) {
+ dev_dbg(sur40->dev, "buffer queue empty\n");
+ spin_unlock(&sur40->qlock);
+ return;
+ }
+ new_buf = list_entry(sur40->buf_list.next, struct sur40_buffer, list);
+ list_del(&new_buf->list);
+ spin_unlock(&sur40->qlock);
+
+ /* retrieve data via bulk read */
+ result = usb_bulk_msg(sur40->usbdev,
+ usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT),
+ sur40->bulk_in_buffer, sur40->bulk_in_size,
+ &bulk_read, 1000);
+
+ if (result < 0) {
+ dev_err(sur40->dev, "error in usb_bulk_read\n");
+ goto err_poll;
+ }
+
+ if (bulk_read != sizeof(struct sur40_image_header)) {
+ dev_err(sur40->dev, "received %d bytes (%zd expected)\n",
+ bulk_read, sizeof(struct sur40_image_header));
+ goto err_poll;
+ }
+
+ if (le32_to_cpu(img->magic) != VIDEO_HEADER_MAGIC) {
+ dev_err(sur40->dev, "image magic mismatch\n");
+ goto err_poll;
+ }
+
+ if (le32_to_cpu(img->size) != sur40_video_format.sizeimage) {
+ dev_err(sur40->dev, "image size mismatch\n");
+ goto err_poll;
+ }
+
+ sgt = vb2_dma_sg_plane_desc(&new_buf->vb, 0);
+
+ result = usb_sg_init(&sgr, sur40->usbdev,
+ usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0,
+ sgt->sgl, sgt->nents, sur40_video_format.sizeimage, 0);
+ if (result < 0) {
+ dev_err(sur40->dev, "error %d in usb_sg_init\n", result);
+ goto err_poll;
+ }
+
+ usb_sg_wait(&sgr);
+ if (sgr.status < 0) {
+ dev_err(sur40->dev, "error %d in usb_sg_wait\n", sgr.status);
+ goto err_poll;
+ }
+
+ /* mark as finished */
+ v4l2_get_timestamp(&new_buf->vb.v4l2_buf.timestamp);
+ new_buf->vb.v4l2_buf.sequence = sur40->sequence++;
+ new_buf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&new_buf->vb, VB2_BUF_STATE_DONE);
+ return;
+
+err_poll:
+ vb2_buffer_done(&new_buf->vb, VB2_BUF_STATE_ERROR);
}
/* Initialize input device parameters. */
@@ -377,6 +504,11 @@ static int sur40_probe(struct usb_interface *interface,
goto err_free_dev;
}
+ /* initialize locks/lists */
+ INIT_LIST_HEAD(&sur40->buf_list);
+ spin_lock_init(&sur40->qlock);
+ mutex_init(&sur40->lock);
+
/* Set up polled input device control structure */
poll_dev->private = sur40;
poll_dev->poll_interval = POLL_INTERVAL;
@@ -387,7 +519,7 @@ static int sur40_probe(struct usb_interface *interface,
/* Set up regular input device structure */
sur40_input_setup(poll_dev->input);
- poll_dev->input->name = "Samsung SUR40";
+ poll_dev->input->name = DRIVER_LONG;
usb_to_input_id(usbdev, &poll_dev->input->id);
usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys));
strlcat(sur40->phys, "/input0", sizeof(sur40->phys));
@@ -408,6 +540,7 @@ static int sur40_probe(struct usb_interface *interface,
goto err_free_polldev;
}
+ /* register the polled input device */
error = input_register_polled_device(poll_dev);
if (error) {
dev_err(&interface->dev,
@@ -415,12 +548,54 @@ static int sur40_probe(struct usb_interface *interface,
goto err_free_buffer;
}
+ /* register the video master device */
+ snprintf(sur40->v4l2.name, sizeof(sur40->v4l2.name), "%s", DRIVER_LONG);
+ error = v4l2_device_register(sur40->dev, &sur40->v4l2);
+ if (error) {
+ dev_err(&interface->dev,
+ "Unable to register video master device.");
+ goto err_unreg_v4l2;
+ }
+
+ /* initialize the lock and subdevice */
+ sur40->queue = sur40_queue;
+ sur40->queue.drv_priv = sur40;
+ sur40->queue.lock = &sur40->lock;
+
+ /* initialize the queue */
+ error = vb2_queue_init(&sur40->queue);
+ if (error)
+ goto err_unreg_v4l2;
+
+ sur40->alloc_ctx = vb2_dma_sg_init_ctx(sur40->dev);
+ if (IS_ERR(sur40->alloc_ctx)) {
+ dev_err(sur40->dev, "Can't allocate buffer context");
+ goto err_unreg_v4l2;
+ }
+
+ sur40->vdev = sur40_video_device;
+ sur40->vdev.v4l2_dev = &sur40->v4l2;
+ sur40->vdev.lock = &sur40->lock;
+ sur40->vdev.queue = &sur40->queue;
+ video_set_drvdata(&sur40->vdev, sur40);
+
+ error = video_register_device(&sur40->vdev, VFL_TYPE_GRABBER, -1);
+ if (error) {
+ dev_err(&interface->dev,
+ "Unable to register video subdevice.");
+ goto err_unreg_video;
+ }
+
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, sur40);
dev_dbg(&interface->dev, "%s is now attached\n", DRIVER_DESC);
return 0;
+err_unreg_video:
+ video_unregister_device(&sur40->vdev);
+err_unreg_v4l2:
+ v4l2_device_unregister(&sur40->v4l2);
err_free_buffer:
kfree(sur40->bulk_in_buffer);
err_free_polldev:
@@ -436,6 +611,10 @@ static void sur40_disconnect(struct usb_interface *interface)
{
struct sur40_state *sur40 = usb_get_intfdata(interface);
+ video_unregister_device(&sur40->vdev);
+ v4l2_device_unregister(&sur40->v4l2);
+ vb2_dma_sg_cleanup_ctx(sur40->alloc_ctx);
+
input_unregister_polled_device(sur40->input);
input_free_polled_device(sur40->input);
kfree(sur40->bulk_in_buffer);
@@ -445,12 +624,243 @@ static void sur40_disconnect(struct usb_interface *interface)
dev_dbg(&interface->dev, "%s is now disconnected\n", DRIVER_DESC);
}
+/*
+ * Setup the constraints of the queue: besides setting the number of planes
+ * per buffer and the size and allocation context of each plane, it also
+ * checks if sufficient buffers have been allocated. Usually 3 is a good
+ * minimum number: many DMA engines need a minimum of 2 buffers in the
+ * queue and you need to have another available for userspace processing.
+ */
+static int sur40_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct sur40_state *sur40 = vb2_get_drv_priv(q);
+
+ if (q->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - q->num_buffers;
+
+ if (fmt && fmt->fmt.pix.sizeimage < sur40_video_format.sizeimage)
+ return -EINVAL;
+
+ *nplanes = 1;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : sur40_video_format.sizeimage;
+ alloc_ctxs[0] = sur40->alloc_ctx;
+
+ return 0;
+}
+
+/*
+ * Prepare the buffer for queueing to the DMA engine: check and set the
+ * payload size.
+ */
+static int sur40_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = sur40_video_format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(&sur40->usbdev->dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ return 0;
+}
+
+/*
+ * Queue this buffer to the DMA engine.
+ */
+static void sur40_buffer_queue(struct vb2_buffer *vb)
+{
+ struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue);
+ struct sur40_buffer *buf = (struct sur40_buffer *)vb;
+
+ spin_lock(&sur40->qlock);
+ list_add_tail(&buf->list, &sur40->buf_list);
+ spin_unlock(&sur40->qlock);
+}
+
+static void return_all_buffers(struct sur40_state *sur40,
+ enum vb2_buffer_state state)
+{
+ struct sur40_buffer *buf, *node;
+
+ spin_lock(&sur40->qlock);
+ list_for_each_entry_safe(buf, node, &sur40->buf_list, list) {
+ vb2_buffer_done(&buf->vb, state);
+ list_del(&buf->list);
+ }
+ spin_unlock(&sur40->qlock);
+}
+
+/*
+ * Start streaming. First check if the minimum number of buffers have been
+ * queued. If not, then return -ENOBUFS and the vb2 framework will call
+ * this function again the next time a buffer has been queued until enough
+ * buffers are available to actually start the DMA engine.
+ */
+static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct sur40_state *sur40 = vb2_get_drv_priv(vq);
+
+ sur40->sequence = 0;
+ return 0;
+}
+
+/*
+ * Stop the DMA engine. Any remaining buffers in the DMA queue are dequeued
+ * and passed on to the vb2 framework marked as STATE_ERROR.
+ */
+static void sur40_stop_streaming(struct vb2_queue *vq)
+{
+ struct sur40_state *sur40 = vb2_get_drv_priv(vq);
+
+ /* Release all active buffers */
+ return_all_buffers(sur40, VB2_BUF_STATE_ERROR);
+}
+
+/* V4L ioctl */
+static int sur40_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct sur40_state *sur40 = video_drvdata(file);
+
+ strlcpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver));
+ strlcpy(cap->card, DRIVER_LONG, sizeof(cap->card));
+ usb_make_path(sur40->usbdev, cap->bus_info, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int sur40_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index != 0)
+ return -EINVAL;
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ i->std = V4L2_STD_UNKNOWN;
+ strlcpy(i->name, "In-Cell Sensor", sizeof(i->name));
+ i->capabilities = 0;
+ return 0;
+}
+
+static int sur40_vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return (i == 0) ? 0 : -EINVAL;
+}
+
+static int sur40_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int sur40_vidioc_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ f->fmt.pix = sur40_video_format;
+ return 0;
+}
+
+static int sur40_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index != 0)
+ return -EINVAL;
+ strlcpy(f->description, "8-bit greyscale", sizeof(f->description));
+ f->pixelformat = V4L2_PIX_FMT_GREY;
+ f->flags = 0;
+ return 0;
+}
+
static const struct usb_device_id sur40_table[] = {
{ USB_DEVICE(ID_MICROSOFT, ID_SUR40) }, /* Samsung SUR40 */
{ } /* terminating null entry */
};
MODULE_DEVICE_TABLE(usb, sur40_table);
+/* V4L2 structures */
+static const struct vb2_ops sur40_queue_ops = {
+ .queue_setup = sur40_queue_setup,
+ .buf_prepare = sur40_buffer_prepare,
+ .buf_queue = sur40_buffer_queue,
+ .start_streaming = sur40_start_streaming,
+ .stop_streaming = sur40_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static const struct vb2_queue sur40_queue = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ /*
+ * VB2_USERPTR in currently not enabled: passing a user pointer to
+ * dma-sg will result in segment sizes that are not a multiple of
+ * 512 bytes, which is required by the host controller.
+ */
+ .io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF,
+ .buf_struct_size = sizeof(struct sur40_buffer),
+ .ops = &sur40_queue_ops,
+ .mem_ops = &vb2_dma_sg_memops,
+ .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
+ .min_buffers_needed = 3,
+};
+
+static const struct v4l2_file_operations sur40_video_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+static const struct v4l2_ioctl_ops sur40_video_ioctl_ops = {
+
+ .vidioc_querycap = sur40_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = sur40_vidioc_enum_fmt,
+ .vidioc_try_fmt_vid_cap = sur40_vidioc_fmt,
+ .vidioc_s_fmt_vid_cap = sur40_vidioc_fmt,
+ .vidioc_g_fmt_vid_cap = sur40_vidioc_fmt,
+
+ .vidioc_enum_input = sur40_vidioc_enum_input,
+ .vidioc_g_input = sur40_vidioc_g_input,
+ .vidioc_s_input = sur40_vidioc_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct video_device sur40_video_device = {
+ .name = DRIVER_LONG,
+ .fops = &sur40_video_fops,
+ .ioctl_ops = &sur40_video_ioctl_ops,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_pix_format sur40_video_format = {
+ .pixelformat = V4L2_PIX_FMT_GREY,
+ .width = SENSOR_RES_X / 2,
+ .height = SENSOR_RES_Y / 2,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bytesperline = SENSOR_RES_X / 2,
+ .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2),
+};
+
/* USB-specific object needed to register this driver with the USB subsystem. */
static struct usb_driver sur40_driver = {
.name = DRIVER_SHORT,
diff --git a/drivers/input/touchscreen/sx8654.c b/drivers/input/touchscreen/sx8654.c
new file mode 100644
index 000000000000..aecb9ad2e701
--- /dev/null
+++ b/drivers/input/touchscreen/sx8654.c
@@ -0,0 +1,286 @@
+/*
+ * Driver for Semtech SX8654 I2C touchscreen controller.
+ *
+ * Copyright (c) 2015 Armadeus Systems
+ * Sébastien Szymanski <sebastien.szymanski@armadeus.com>
+ *
+ * Using code from:
+ * - sx865x.c
+ * Copyright (c) 2013 U-MoBo Srl
+ * Pierluigi Passaro <p.passaro@u-mobo.com>
+ * - sx8650.c
+ * Copyright (c) 2009 Wayne Roberts
+ * - tsc2007.c
+ * Copyright (c) 2008 Kwangwoo Lee
+ * - ads7846.c
+ * Copyright (c) 2005 David Brownell
+ * Copyright (c) 2006 Nokia Corporation
+ * - corgi_ts.c
+ * Copyright (C) 2004-2005 Richard Purdie
+ * - omap_ts.[hc], ads7846.h, ts_osk.c
+ * Copyright (C) 2002 MontaVista Software
+ * Copyright (C) 2004 Texas Instruments
+ * Copyright (C) 2005 Dirk Behme
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+/* register addresses */
+#define I2C_REG_TOUCH0 0x00
+#define I2C_REG_TOUCH1 0x01
+#define I2C_REG_CHANMASK 0x04
+#define I2C_REG_IRQMASK 0x22
+#define I2C_REG_IRQSRC 0x23
+#define I2C_REG_SOFTRESET 0x3f
+
+/* commands */
+#define CMD_READ_REGISTER 0x40
+#define CMD_MANUAL 0xc0
+#define CMD_PENTRG 0xe0
+
+/* value for I2C_REG_SOFTRESET */
+#define SOFTRESET_VALUE 0xde
+
+/* bits for I2C_REG_IRQSRC */
+#define IRQ_PENTOUCH_TOUCHCONVDONE 0x08
+#define IRQ_PENRELEASE 0x04
+
+/* bits for RegTouch1 */
+#define CONDIRQ 0x20
+#define FILT_7SA 0x03
+
+/* bits for I2C_REG_CHANMASK */
+#define CONV_X 0x80
+#define CONV_Y 0x40
+
+/* coordinates rate: higher nibble of CTRL0 register */
+#define RATE_MANUAL 0x00
+#define RATE_5000CPS 0xf0
+
+/* power delay: lower nibble of CTRL0 register */
+#define POWDLY_1_1MS 0x0b
+
+#define MAX_12BIT ((1 << 12) - 1)
+
+struct sx8654 {
+ struct input_dev *input;
+ struct i2c_client *client;
+};
+
+static irqreturn_t sx8654_irq(int irq, void *handle)
+{
+ struct sx8654 *sx8654 = handle;
+ int irqsrc;
+ u8 data[4];
+ unsigned int x, y;
+ int retval;
+
+ irqsrc = i2c_smbus_read_byte_data(sx8654->client,
+ CMD_READ_REGISTER | I2C_REG_IRQSRC);
+ dev_dbg(&sx8654->client->dev, "irqsrc = 0x%x", irqsrc);
+
+ if (irqsrc < 0)
+ goto out;
+
+ if (irqsrc & IRQ_PENRELEASE) {
+ dev_dbg(&sx8654->client->dev, "pen release interrupt");
+
+ input_report_key(sx8654->input, BTN_TOUCH, 0);
+ input_sync(sx8654->input);
+ }
+
+ if (irqsrc & IRQ_PENTOUCH_TOUCHCONVDONE) {
+ dev_dbg(&sx8654->client->dev, "pen touch interrupt");
+
+ retval = i2c_master_recv(sx8654->client, data, sizeof(data));
+ if (retval != sizeof(data))
+ goto out;
+
+ /* invalid data */
+ if (unlikely(data[0] & 0x80 || data[2] & 0x80))
+ goto out;
+
+ x = ((data[0] & 0xf) << 8) | (data[1]);
+ y = ((data[2] & 0xf) << 8) | (data[3]);
+
+ input_report_abs(sx8654->input, ABS_X, x);
+ input_report_abs(sx8654->input, ABS_Y, y);
+ input_report_key(sx8654->input, BTN_TOUCH, 1);
+ input_sync(sx8654->input);
+
+ dev_dbg(&sx8654->client->dev, "point(%4d,%4d)\n", x, y);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int sx8654_open(struct input_dev *dev)
+{
+ struct sx8654 *sx8654 = input_get_drvdata(dev);
+ struct i2c_client *client = sx8654->client;
+ int error;
+
+ /* enable pen trigger mode */
+ error = i2c_smbus_write_byte_data(client, I2C_REG_TOUCH0,
+ RATE_5000CPS | POWDLY_1_1MS);
+ if (error) {
+ dev_err(&client->dev, "writing to I2C_REG_TOUCH0 failed");
+ return error;
+ }
+
+ error = i2c_smbus_write_byte(client, CMD_PENTRG);
+ if (error) {
+ dev_err(&client->dev, "writing command CMD_PENTRG failed");
+ return error;
+ }
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static void sx8654_close(struct input_dev *dev)
+{
+ struct sx8654 *sx8654 = input_get_drvdata(dev);
+ struct i2c_client *client = sx8654->client;
+ int error;
+
+ disable_irq(client->irq);
+
+ /* enable manual mode mode */
+ error = i2c_smbus_write_byte(client, CMD_MANUAL);
+ if (error) {
+ dev_err(&client->dev, "writing command CMD_MANUAL failed");
+ return;
+ }
+
+ error = i2c_smbus_write_byte_data(client, I2C_REG_TOUCH0, 0);
+ if (error) {
+ dev_err(&client->dev, "writing to I2C_REG_TOUCH0 failed");
+ return;
+ }
+}
+
+static int sx8654_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct sx8654 *sx8654;
+ struct input_dev *input;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENXIO;
+
+ sx8654 = devm_kzalloc(&client->dev, sizeof(*sx8654), GFP_KERNEL);
+ if (!sx8654)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!sx8654)
+ return -ENOMEM;
+
+ input->name = "SX8654 I2C Touchscreen";
+ input->id.bustype = BUS_I2C;
+ input->dev.parent = &client->dev;
+ input->open = sx8654_open;
+ input->close = sx8654_close;
+
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(input, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, MAX_12BIT, 0, 0);
+
+ sx8654->client = client;
+ sx8654->input = input;
+
+ input_set_drvdata(sx8654->input, sx8654);
+
+ error = i2c_smbus_write_byte_data(client, I2C_REG_SOFTRESET,
+ SOFTRESET_VALUE);
+ if (error) {
+ dev_err(&client->dev, "writing softreset value failed");
+ return error;
+ }
+
+ error = i2c_smbus_write_byte_data(client, I2C_REG_CHANMASK,
+ CONV_X | CONV_Y);
+ if (error) {
+ dev_err(&client->dev, "writing to I2C_REG_CHANMASK failed");
+ return error;
+ }
+
+ error = i2c_smbus_write_byte_data(client, I2C_REG_IRQMASK,
+ IRQ_PENTOUCH_TOUCHCONVDONE |
+ IRQ_PENRELEASE);
+ if (error) {
+ dev_err(&client->dev, "writing to I2C_REG_IRQMASK failed");
+ return error;
+ }
+
+ error = i2c_smbus_write_byte_data(client, I2C_REG_TOUCH1,
+ CONDIRQ | FILT_7SA);
+ if (error) {
+ dev_err(&client->dev, "writing to I2C_REG_TOUCH1 failed");
+ return error;
+ }
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, sx8654_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->name, sx8654);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable IRQ %d, error: %d\n",
+ client->irq, error);
+ return error;
+ }
+
+ /* Disable the IRQ, we'll enable it in sx8654_open() */
+ disable_irq(client->irq);
+
+ error = input_register_device(sx8654->input);
+ if (error)
+ return error;
+
+ i2c_set_clientdata(client, sx8654);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id sx8654_of_match[] = {
+ { .compatible = "semtech,sx8654", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sx8654_of_match);
+#endif
+
+static const struct i2c_device_id sx8654_id_table[] = {
+ { "semtech_sx8654", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sx8654_id_table);
+
+static struct i2c_driver sx8654_driver = {
+ .driver = {
+ .name = "sx8654",
+ .of_match_table = of_match_ptr(sx8654_of_match),
+ },
+ .id_table = sx8654_id_table,
+ .probe = sx8654_probe,
+};
+module_i2c_driver(sx8654_driver);
+
+MODULE_AUTHOR("Sébastien Szymanski <sebastien.szymanski@armadeus.com>");
+MODULE_DESCRIPTION("Semtech SX8654 I2C Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 1bf9906b5a3f..ccc8aa615709 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -75,7 +75,7 @@ struct tsc2007 {
u16 model;
u16 x_plate_ohms;
u16 max_rt;
- unsigned long poll_period;
+ unsigned long poll_period; /* in jiffies */
int fuzzx;
int fuzzy;
int fuzzz;
@@ -214,8 +214,7 @@ static irqreturn_t tsc2007_soft_irq(int irq, void *handle)
dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt);
}
- wait_event_timeout(ts->wait, ts->stopped,
- msecs_to_jiffies(ts->poll_period));
+ wait_event_timeout(ts->wait, ts->stopped, ts->poll_period);
}
dev_dbg(&ts->client->dev, "UP\n");
@@ -314,9 +313,9 @@ static int tsc2007_probe_dt(struct i2c_client *client, struct tsc2007 *ts)
ts->fuzzz = val32;
if (!of_property_read_u64(np, "ti,poll-period", &val64))
- ts->poll_period = val64;
+ ts->poll_period = msecs_to_jiffies(val64);
else
- ts->poll_period = 1;
+ ts->poll_period = msecs_to_jiffies(1);
if (!of_property_read_u32(np, "ti,x-plate-ohms", &val32)) {
ts->x_plate_ohms = val32;
@@ -350,7 +349,7 @@ static int tsc2007_probe_pdev(struct i2c_client *client, struct tsc2007 *ts,
ts->model = pdata->model;
ts->x_plate_ohms = pdata->x_plate_ohms;
ts->max_rt = pdata->max_rt ? : MAX_12BIT;
- ts->poll_period = pdata->poll_period ? : 1;
+ ts->poll_period = msecs_to_jiffies(pdata->poll_period ? : 1);
ts->get_pendown_state = pdata->get_pendown_state;
ts->clear_penirq = pdata->clear_penirq;
ts->fuzzx = pdata->fuzzx;
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index a0966331a89b..f2c6c352c55a 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -132,6 +132,7 @@ enum {
DEVTYPE_GUNZE,
DEVTYPE_DMC_TSC10,
DEVTYPE_IRTOUCH,
+ DEVTYPE_IRTOUCH_HIRES,
DEVTYPE_IDEALTEK,
DEVTYPE_GENERAL_TOUCH,
DEVTYPE_GOTOP,
@@ -198,6 +199,7 @@ static const struct usb_device_id usbtouch_devices[] = {
#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
{USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
+ {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
#endif
#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
@@ -1177,6 +1179,15 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
.rept_size = 8,
.read_data = irtouch_read_data,
},
+
+ [DEVTYPE_IRTOUCH_HIRES] = {
+ .min_xc = 0x0,
+ .max_xc = 0x7fff,
+ .min_yc = 0x0,
+ .max_yc = 0x7fff,
+ .rept_size = 8,
+ .read_data = irtouch_read_data,
+ },
#endif
#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 48882c126245..e43d48956dea 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -33,6 +33,7 @@
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/msi.h>
+#include <linux/dma-contiguous.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -126,6 +127,11 @@ static int __init alloc_passthrough_domain(void);
*
****************************************************************************/
+static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct protection_domain, domain);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -1321,7 +1327,9 @@ static u64 *alloc_pte(struct protection_domain *domain,
* This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it.
*/
-static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
+static u64 *fetch_pte(struct protection_domain *domain,
+ unsigned long address,
+ unsigned long *page_size)
{
int level;
u64 *pte;
@@ -1329,8 +1337,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
if (address > PM_LEVEL_SIZE(domain->mode))
return NULL;
- level = domain->mode - 1;
- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -1339,19 +1348,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
return NULL;
/* Large PTE */
- if (PM_PTE_LEVEL(*pte) == 0x07) {
- unsigned long pte_mask, __pte;
-
- /*
- * If we have a series of large PTEs, make
- * sure to return a pointer to the first one.
- */
- pte_mask = PTE_PAGE_SIZE(*pte);
- pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
- __pte = ((unsigned long)pte) & pte_mask;
-
- return (u64 *)__pte;
- }
+ if (PM_PTE_LEVEL(*pte) == 7 ||
+ PM_PTE_LEVEL(*pte) == 0)
+ break;
/* No level skipping support yet */
if (PM_PTE_LEVEL(*pte) != level)
@@ -1360,8 +1359,21 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
level -= 1;
/* Walk to the next level */
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[PM_LEVEL_INDEX(level, address)];
+ pte = IOMMU_PTE_PAGE(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+ }
+
+ if (PM_PTE_LEVEL(*pte) == 0x07) {
+ unsigned long pte_mask;
+
+ /*
+ * If we have a series of large PTEs, make
+ * sure to return a pointer to the first one.
+ */
+ *page_size = pte_mask = PTE_PAGE_SIZE(*pte);
+ pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
+ pte = (u64 *)(((unsigned long)pte) & pte_mask);
}
return pte;
@@ -1383,13 +1395,14 @@ static int iommu_map_page(struct protection_domain *dom,
u64 __pte, *pte;
int i, count;
+ BUG_ON(!IS_ALIGNED(bus_addr, page_size));
+ BUG_ON(!IS_ALIGNED(phys_addr, page_size));
+
if (!(prot & IOMMU_PROT_MASK))
return -EINVAL;
- bus_addr = PAGE_ALIGN(bus_addr);
- phys_addr = PAGE_ALIGN(phys_addr);
- count = PAGE_SIZE_PTE_COUNT(page_size);
- pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
+ count = PAGE_SIZE_PTE_COUNT(page_size);
+ pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
if (!pte)
return -ENOMEM;
@@ -1398,7 +1411,7 @@ static int iommu_map_page(struct protection_domain *dom,
if (IOMMU_PTE_PRESENT(pte[i]))
return -EBUSY;
- if (page_size > PAGE_SIZE) {
+ if (count > 1) {
__pte = PAGE_SIZE_PTE(phys_addr, page_size);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
} else
@@ -1421,7 +1434,8 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
unsigned long bus_addr,
unsigned long page_size)
{
- unsigned long long unmap_size, unmapped;
+ unsigned long long unmapped;
+ unsigned long unmap_size;
u64 *pte;
BUG_ON(!is_power_of_2(page_size));
@@ -1430,28 +1444,12 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
while (unmapped < page_size) {
- pte = fetch_pte(dom, bus_addr);
-
- if (!pte) {
- /*
- * No PTE for this address
- * move forward in 4kb steps
- */
- unmap_size = PAGE_SIZE;
- } else if (PM_PTE_LEVEL(*pte) == 0) {
- /* 4kb PTE found for this address */
- unmap_size = PAGE_SIZE;
- *pte = 0ULL;
- } else {
- int count, i;
-
- /* Large PTE found which maps this address */
- unmap_size = PTE_PAGE_SIZE(*pte);
-
- /* Only unmap from the first pte in the page */
- if ((unmap_size - 1) & bus_addr)
- break;
- count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ pte = fetch_pte(dom, bus_addr, &unmap_size);
+
+ if (pte) {
+ int i, count;
+
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
}
@@ -1599,7 +1597,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
{
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
struct amd_iommu *iommu;
- unsigned long i, old_size;
+ unsigned long i, old_size, pte_pgsize;
#ifdef CONFIG_IOMMU_STRESS
populate = false;
@@ -1672,12 +1670,13 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
*/
for (i = dma_dom->aperture[index]->offset;
i < dma_dom->aperture_size;
- i += PAGE_SIZE) {
- u64 *pte = fetch_pte(&dma_dom->domain, i);
+ i += pte_pgsize) {
+ u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue;
- dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
+ dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
+ pte_pgsize >> 12);
}
update_domain(&dma_dom->domain);
@@ -2422,16 +2421,6 @@ static int device_change_notifier(struct notifier_block *nb,
dev_data = get_dev_data(dev);
switch (action) {
- case BUS_NOTIFY_UNBOUND_DRIVER:
-
- domain = domain_for_device(dev);
-
- if (!domain)
- goto out;
- if (dev_data->passthrough)
- break;
- detach_device(dev);
- break;
case BUS_NOTIFY_ADD_DEVICE:
iommu_init_device(dev);
@@ -2467,7 +2456,7 @@ static int device_change_notifier(struct notifier_block *nb,
dev->archdata.dma_ops = &amd_iommu_dma_ops;
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_REMOVED_DEVICE:
iommu_uninit_device(dev);
@@ -2923,38 +2912,42 @@ static void *alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{
- unsigned long flags;
- void *virt_addr;
- struct protection_domain *domain;
- phys_addr_t paddr;
u64 dma_mask = dev->coherent_dma_mask;
+ struct protection_domain *domain;
+ unsigned long flags;
+ struct page *page;
INC_STATS_COUNTER(cnt_alloc_coherent);
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL) {
- virt_addr = (void *)__get_free_pages(flag, get_order(size));
- *dma_addr = __pa(virt_addr);
- return virt_addr;
+ page = alloc_pages(flag, get_order(size));
+ *dma_addr = page_to_phys(page);
+ return page_address(page);
} else if (IS_ERR(domain))
return NULL;
+ size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- flag |= __GFP_ZERO;
- virt_addr = (void *)__get_free_pages(flag, get_order(size));
- if (!virt_addr)
- return NULL;
+ page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
+ if (!page) {
+ if (!(flag & __GFP_WAIT))
+ return NULL;
- paddr = virt_to_phys(virt_addr);
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+ return NULL;
+ }
if (!dma_mask)
dma_mask = *dev->dma_mask;
spin_lock_irqsave(&domain->lock, flags);
- *dma_addr = __map_single(dev, domain->priv, paddr,
+ *dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
size, DMA_BIDIRECTIONAL, true, dma_mask);
if (*dma_addr == DMA_ERROR_CODE) {
@@ -2966,11 +2959,12 @@ static void *alloc_coherent(struct device *dev, size_t size,
spin_unlock_irqrestore(&domain->lock, flags);
- return virt_addr;
+ return page_address(page);
out_free:
- free_pages((unsigned long)virt_addr, get_order(size));
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, get_order(size));
return NULL;
}
@@ -2982,11 +2976,15 @@ static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
- unsigned long flags;
struct protection_domain *domain;
+ unsigned long flags;
+ struct page *page;
INC_STATS_COUNTER(cnt_free_coherent);
+ page = virt_to_page(virt_addr);
+ size = PAGE_ALIGN(size);
+
domain = get_domain(dev);
if (IS_ERR(domain))
goto free_mem;
@@ -3000,7 +2998,8 @@ static void free_coherent(struct device *dev, size_t size,
spin_unlock_irqrestore(&domain->lock, flags);
free_mem:
- free_pages((unsigned long)virt_addr, get_order(size));
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, get_order(size));
}
/*
@@ -3236,42 +3235,45 @@ static int __init alloc_passthrough_domain(void)
return 0;
}
-static int amd_iommu_domain_init(struct iommu_domain *dom)
+
+static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
- struct protection_domain *domain;
+ struct protection_domain *pdomain;
- domain = protection_domain_alloc();
- if (!domain)
- goto out_free;
+ /* We only support unmanaged domains for now */
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
- domain->mode = PAGE_MODE_3_LEVEL;
- domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!domain->pt_root)
+ pdomain = protection_domain_alloc();
+ if (!pdomain)
goto out_free;
- domain->iommu_domain = dom;
-
- dom->priv = domain;
+ pdomain->mode = PAGE_MODE_3_LEVEL;
+ pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pdomain->pt_root)
+ goto out_free;
- dom->geometry.aperture_start = 0;
- dom->geometry.aperture_end = ~0ULL;
- dom->geometry.force_aperture = true;
+ pdomain->domain.geometry.aperture_start = 0;
+ pdomain->domain.geometry.aperture_end = ~0ULL;
+ pdomain->domain.geometry.force_aperture = true;
- return 0;
+ return &pdomain->domain;
out_free:
- protection_domain_free(domain);
+ protection_domain_free(pdomain);
- return -ENOMEM;
+ return NULL;
}
-static void amd_iommu_domain_destroy(struct iommu_domain *dom)
+static void amd_iommu_domain_free(struct iommu_domain *dom)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain;
- if (!domain)
+ if (!dom)
return;
+ domain = to_pdomain(dom);
+
if (domain->dev_cnt > 0)
cleanup_domain(domain);
@@ -3284,8 +3286,6 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
free_gcr3_table(domain);
protection_domain_free(domain);
-
- dom->priv = NULL;
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
@@ -3313,7 +3313,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
int ret;
@@ -3340,7 +3340,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
phys_addr_t paddr, size_t page_size, int iommu_prot)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
int prot = 0;
int ret;
@@ -3362,7 +3362,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
size_t page_size)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
@@ -3380,28 +3380,22 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova)
{
- struct protection_domain *domain = dom->priv;
- unsigned long offset_mask;
- phys_addr_t paddr;
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long offset_mask, pte_pgsize;
u64 *pte, __pte;
if (domain->mode == PAGE_MODE_NONE)
return iova;
- pte = fetch_pte(domain, iova);
+ pte = fetch_pte(domain, iova, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
return 0;
- if (PM_PTE_LEVEL(*pte) == 0)
- offset_mask = PAGE_SIZE - 1;
- else
- offset_mask = PTE_PAGE_SIZE(*pte) - 1;
-
- __pte = *pte & PM_ADDR_MASK;
- paddr = (__pte & ~offset_mask) | (iova & offset_mask);
+ offset_mask = pte_pgsize - 1;
+ __pte = *pte & PM_ADDR_MASK;
- return paddr;
+ return (__pte & ~offset_mask) | (iova & offset_mask);
}
static bool amd_iommu_capable(enum iommu_cap cap)
@@ -3420,8 +3414,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
static const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
- .domain_init = amd_iommu_domain_init,
- .domain_destroy = amd_iommu_domain_destroy,
+ .domain_alloc = amd_iommu_domain_alloc,
+ .domain_free = amd_iommu_domain_free,
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
@@ -3483,7 +3477,7 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
@@ -3504,7 +3498,7 @@ EXPORT_SYMBOL(amd_iommu_domain_direct_map);
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int levels, ret;
@@ -3616,7 +3610,7 @@ static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
u64 address)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3638,7 +3632,7 @@ static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3718,7 +3712,7 @@ static int __clear_gcr3(struct protection_domain *domain, int pasid)
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
unsigned long cr3)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3732,7 +3726,7 @@ EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3765,17 +3759,17 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
- struct protection_domain *domain;
+ struct protection_domain *pdomain;
- domain = get_domain(&pdev->dev);
- if (IS_ERR(domain))
+ pdomain = get_domain(&pdev->dev);
+ if (IS_ERR(pdomain))
return NULL;
/* Only return IOMMUv2 domains */
- if (!(domain->flags & PD_IOMMUV2_MASK))
+ if (!(pdomain->flags & PD_IOMMUV2_MASK))
return NULL;
- return domain->iommu_domain;
+ return &pdomain->domain;
}
EXPORT_SYMBOL(amd_iommu_get_v2_domain);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c4fffb710c58..05030e523771 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -282,6 +282,12 @@
#define PTE_PAGE_SIZE(pte) \
(1ULL << (1 + ffz(((pte) | 0xfffULL))))
+/*
+ * Takes a page-table level and returns the default page-size for this level
+ */
+#define PTE_LEVEL_PAGE_SIZE(level) \
+ (1ULL << (12 + (9 * (level))))
+
#define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59)
@@ -400,6 +406,8 @@ struct iommu_domain;
struct protection_domain {
struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */
+ struct iommu_domain domain; /* generic domain handle used by
+ iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/
struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
@@ -411,10 +419,7 @@ struct protection_domain {
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
- void *priv; /* private data */
- struct iommu_domain *iommu_domain; /* Pointer to generic
- domain structure */
-
+ void *priv; /* private data */
};
/*
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 6d5a5c44453b..3465faf1809e 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state_wait(struct pasid_state *pasid_state)
{
+ atomic_dec(&pasid_state->count);
wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
free_pasid_state(pasid_state);
}
@@ -417,7 +418,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
dev_state = pasid_state->device_state;
run_inv_ctx_cb = !pasid_state->invalid;
- if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb)
+ if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
unbind_pasid(pasid_state);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a3adde6519f0..66a803b9dd3a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -224,14 +224,7 @@
#define RESUME_TERMINATE (1 << 0)
#define TTBCR2_SEP_SHIFT 15
-#define TTBCR2_SEP_MASK 0x7
-
-#define TTBCR2_ADDR_32 0
-#define TTBCR2_ADDR_36 1
-#define TTBCR2_ADDR_40 2
-#define TTBCR2_ADDR_42 3
-#define TTBCR2_ADDR_44 4
-#define TTBCR2_ADDR_48 5
+#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
#define TTBRn_HI_ASID_SHIFT 16
@@ -343,6 +336,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
+ struct iommu_domain domain;
};
static struct iommu_ops arm_smmu_ops;
@@ -360,6 +354,11 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
+static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct arm_smmu_domain, domain);
+}
+
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -645,7 +644,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
u32 fsr, far, fsynr, resume;
unsigned long iova;
struct iommu_domain *domain = dev;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *cb_base;
@@ -730,6 +729,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ if (smmu->version > ARM_SMMU_V1) {
+ /*
+ * CBA2R.
+ * *Must* be initialised before CBAR thanks to VMID16
+ * architectural oversight affected some implementations.
+ */
+#ifdef CONFIG_64BIT
+ reg = CBA2R_RW64_64BIT;
+#else
+ reg = CBA2R_RW64_32BIT;
+#endif
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
+ }
+
/* CBAR */
reg = cfg->cbar;
if (smmu->version == ARM_SMMU_V1)
@@ -747,16 +760,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
- if (smmu->version > ARM_SMMU_V1) {
- /* CBA2R */
-#ifdef CONFIG_64BIT
- reg = CBA2R_RW64_64BIT;
-#else
- reg = CBA2R_RW64_32BIT;
-#endif
- writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
- }
-
/* TTBRs */
if (stage1) {
reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
@@ -783,26 +786,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
if (smmu->version > ARM_SMMU_V1) {
reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
- switch (smmu->va_size) {
- case 32:
- reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
- break;
- case 36:
- reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
- break;
- case 40:
- reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
- break;
- case 42:
- reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
- break;
- case 44:
- reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
- break;
- case 48:
- reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
- break;
- }
+ reg |= TTBCR2_SEP_UPSTREAM;
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
}
} else {
@@ -836,7 +820,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg;
enum io_pgtable_fmt fmt;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
mutex_lock(&smmu_domain->init_mutex);
@@ -958,7 +942,7 @@ out_unlock:
static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *cb_base;
@@ -985,10 +969,12 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
}
-static int arm_smmu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -996,17 +982,17 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
- return -ENOMEM;
+ return NULL;
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock);
- domain->priv = smmu_domain;
- return 0;
+
+ return &smmu_domain->domain;
}
-static void arm_smmu_domain_destroy(struct iommu_domain *domain)
+static void arm_smmu_domain_free(struct iommu_domain *domain)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
/*
* Free the domain resources. We assume that all devices have
@@ -1143,7 +1129,7 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
@@ -1187,7 +1173,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master_cfg *cfg;
cfg = find_smmu_master_cfg(dev);
@@ -1203,7 +1189,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
{
int ret;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@@ -1220,7 +1206,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
{
size_t ret;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@@ -1235,7 +1221,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
@@ -1281,7 +1267,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
{
phys_addr_t ret;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@@ -1329,61 +1315,83 @@ static void __arm_smmu_release_pci_iommudata(void *data)
kfree(data);
}
-static int arm_smmu_add_device(struct device *dev)
+static int arm_smmu_add_pci_device(struct pci_dev *pdev)
{
- struct arm_smmu_device *smmu;
- struct arm_smmu_master_cfg *cfg;
+ int i, ret;
+ u16 sid;
struct iommu_group *group;
- void (*releasefn)(void *) = NULL;
- int ret;
-
- smmu = find_smmu_for_device(dev);
- if (!smmu)
- return -ENODEV;
+ struct arm_smmu_master_cfg *cfg;
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
+ group = iommu_group_get_for_dev(&pdev->dev);
+ if (IS_ERR(group))
return PTR_ERR(group);
- }
-
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
+ cfg = iommu_group_get_iommudata(group);
+ if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
ret = -ENOMEM;
goto out_put_group;
}
- cfg->num_streamids = 1;
- /*
- * Assume Stream ID == Requester ID for now.
- * We need a way to describe the ID mappings in FDT.
- */
- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
- &cfg->streamids[0]);
- releasefn = __arm_smmu_release_pci_iommudata;
- } else {
- struct arm_smmu_master *master;
-
- master = find_smmu_master(smmu, dev->of_node);
- if (!master) {
- ret = -ENODEV;
- goto out_put_group;
- }
+ iommu_group_set_iommudata(group, cfg,
+ __arm_smmu_release_pci_iommudata);
+ }
- cfg = &master->cfg;
+ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
+ ret = -ENOSPC;
+ goto out_put_group;
}
- iommu_group_set_iommudata(group, cfg, releasefn);
- ret = iommu_group_add_device(group, dev);
+ /*
+ * Assume Stream ID == Requester ID for now.
+ * We need a way to describe the ID mappings in FDT.
+ */
+ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
+ for (i = 0; i < cfg->num_streamids; ++i)
+ if (cfg->streamids[i] == sid)
+ break;
+ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
+ if (i == cfg->num_streamids)
+ cfg->streamids[cfg->num_streamids++] = sid;
+
+ return 0;
out_put_group:
iommu_group_put(group);
return ret;
}
+static int arm_smmu_add_platform_device(struct device *dev)
+{
+ struct iommu_group *group;
+ struct arm_smmu_master *master;
+ struct arm_smmu_device *smmu = find_smmu_for_device(dev);
+
+ if (!smmu)
+ return -ENODEV;
+
+ master = find_smmu_master(smmu, dev->of_node);
+ if (!master)
+ return -ENODEV;
+
+ /* No automatic group creation for platform devices */
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ iommu_group_set_iommudata(group, &master->cfg, NULL);
+ return iommu_group_add_device(group, dev);
+}
+
+static int arm_smmu_add_device(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return arm_smmu_add_pci_device(to_pci_dev(dev));
+
+ return arm_smmu_add_platform_device(dev);
+}
+
static void arm_smmu_remove_device(struct device *dev)
{
iommu_group_remove_device(dev);
@@ -1392,7 +1400,7 @@ static void arm_smmu_remove_device(struct device *dev)
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
switch (attr) {
case DOMAIN_ATTR_NESTING:
@@ -1407,7 +1415,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
int ret = 0;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
mutex_lock(&smmu_domain->init_mutex);
@@ -1435,8 +1443,8 @@ out_unlock:
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
- .domain_init = arm_smmu_domain_init,
- .domain_destroy = arm_smmu_domain_destroy,
+ .domain_alloc = arm_smmu_domain_alloc,
+ .domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
.detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
@@ -1633,6 +1641,15 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
smmu->pa_size = size;
+ /*
+ * What the page table walker can address actually depends on which
+ * descriptor format is in use, but since a) we don't know that yet,
+ * and b) it can vary per context bank, this will have to do...
+ */
+ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
+ dev_warn(smmu->dev,
+ "failed to set DMA mask for table walker\n");
+
if (smmu->version == ARM_SMMU_V1) {
smmu->va_size = smmu->ipa_size;
size = SZ_4K | SZ_2M | SZ_1G;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index dc14fec4ede1..3e898504a7c4 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -200,6 +200,7 @@ struct exynos_iommu_domain {
short *lv2entcnt; /* free lv2 entry counter for each section */
spinlock_t lock; /* lock for this structure */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
+ struct iommu_domain domain; /* generic domain data structure */
};
struct sysmmu_drvdata {
@@ -214,6 +215,11 @@ struct sysmmu_drvdata {
phys_addr_t pgtable;
};
+static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct exynos_iommu_domain, domain);
+}
+
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
{
/* return true if the System MMU was not active previously
@@ -696,58 +702,60 @@ static inline void pgtable_flush(void *vastart, void *vaend)
virt_to_phys(vaend));
}
-static int exynos_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
{
- struct exynos_iommu_domain *priv;
+ struct exynos_iommu_domain *exynos_domain;
int i;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
+ if (!exynos_domain)
+ return NULL;
- priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
- if (!priv->pgtable)
+ exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
+ if (!exynos_domain->pgtable)
goto err_pgtable;
- priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
- if (!priv->lv2entcnt)
+ exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!exynos_domain->lv2entcnt)
goto err_counter;
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
- priv->pgtable[i + 0] = ZERO_LV2LINK;
- priv->pgtable[i + 1] = ZERO_LV2LINK;
- priv->pgtable[i + 2] = ZERO_LV2LINK;
- priv->pgtable[i + 3] = ZERO_LV2LINK;
- priv->pgtable[i + 4] = ZERO_LV2LINK;
- priv->pgtable[i + 5] = ZERO_LV2LINK;
- priv->pgtable[i + 6] = ZERO_LV2LINK;
- priv->pgtable[i + 7] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
}
- pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
+ pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
- spin_lock_init(&priv->lock);
- spin_lock_init(&priv->pgtablelock);
- INIT_LIST_HEAD(&priv->clients);
+ spin_lock_init(&exynos_domain->lock);
+ spin_lock_init(&exynos_domain->pgtablelock);
+ INIT_LIST_HEAD(&exynos_domain->clients);
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = ~0UL;
- domain->geometry.force_aperture = true;
+ exynos_domain->domain.geometry.aperture_start = 0;
+ exynos_domain->domain.geometry.aperture_end = ~0UL;
+ exynos_domain->domain.geometry.force_aperture = true;
- domain->priv = priv;
- return 0;
+ return &exynos_domain->domain;
err_counter:
- free_pages((unsigned long)priv->pgtable, 2);
+ free_pages((unsigned long)exynos_domain->pgtable, 2);
err_pgtable:
- kfree(priv);
- return -ENOMEM;
+ kfree(exynos_domain);
+ return NULL;
}
-static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
+static void exynos_iommu_domain_free(struct iommu_domain *domain)
{
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
struct exynos_iommu_owner *owner;
unsigned long flags;
int i;
@@ -773,15 +781,14 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
free_pages((unsigned long)priv->pgtable, 2);
free_pages((unsigned long)priv->lv2entcnt, 1);
- kfree(domain->priv);
- domain->priv = NULL;
+ kfree(priv);
}
static int exynos_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
int ret;
@@ -812,7 +819,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct exynos_iommu_owner *owner;
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
@@ -988,7 +995,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
phys_addr_t paddr, size_t size, int prot)
{
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_pte_t *entry;
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
unsigned long flags;
@@ -1042,7 +1049,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
static size_t exynos_iommu_unmap(struct iommu_domain *domain,
unsigned long l_iova, size_t size)
{
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
sysmmu_pte_t *ent;
size_t err_pgsize;
@@ -1119,7 +1126,7 @@ err:
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_pte_t *entry;
unsigned long flags;
phys_addr_t phys = 0;
@@ -1171,8 +1178,8 @@ static void exynos_iommu_remove_device(struct device *dev)
}
static const struct iommu_ops exynos_iommu_ops = {
- .domain_init = exynos_iommu_domain_init,
- .domain_destroy = exynos_iommu_domain_destroy,
+ .domain_alloc = exynos_iommu_domain_alloc,
+ .domain_free = exynos_iommu_domain_free,
.attach_dev = exynos_iommu_attach_device,
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index ceebd287b660..1d452930c890 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -33,6 +33,11 @@ static struct kmem_cache *fsl_pamu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
static DEFINE_SPINLOCK(device_domain_lock);
+static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct fsl_dma_domain, iommu_domain);
+}
+
static int __init iommu_init_mempool(void)
{
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
@@ -65,7 +70,7 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i
struct dma_window *win_ptr = &dma_domain->win_arr[0];
struct iommu_domain_geometry *geom;
- geom = &dma_domain->iommu_domain->geometry;
+ geom = &dma_domain->iommu_domain.geometry;
if (!win_cnt || !dma_domain->geom_size) {
pr_debug("Number of windows/geometry not configured for the domain\n");
@@ -123,7 +128,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
{
int ret;
struct dma_window *wnd = &dma_domain->win_arr[0];
- phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
+ phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags);
@@ -172,7 +177,7 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
} else {
phys_addr_t wnd_addr;
- wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
+ wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
ret = pamu_config_ppaace(liodn, wnd_addr,
wnd->size,
@@ -384,7 +389,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
if (iova < domain->geometry.aperture_start ||
iova > domain->geometry.aperture_end)
@@ -398,11 +403,9 @@ static bool fsl_pamu_capable(enum iommu_cap cap)
return cap == IOMMU_CAP_CACHE_COHERENCY;
}
-static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
+static void fsl_pamu_domain_free(struct iommu_domain *domain)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
-
- domain->priv = NULL;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
/* remove all the devices from the device list */
detach_device(NULL, dma_domain);
@@ -413,23 +416,24 @@ static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
}
-static int fsl_pamu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
{
struct fsl_dma_domain *dma_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
dma_domain = iommu_alloc_dma_domain();
if (!dma_domain) {
pr_debug("dma_domain allocation failed\n");
- return -ENOMEM;
+ return NULL;
}
- domain->priv = dma_domain;
- dma_domain->iommu_domain = domain;
/* defaul geometry 64 GB i.e. maximum system address */
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = (1ULL << 36) - 1;
- domain->geometry.force_aperture = true;
+ dma_domain->iommu_domain. geometry.aperture_start = 0;
+ dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
+ dma_domain->iommu_domain.geometry.force_aperture = true;
- return 0;
+ return &dma_domain->iommu_domain;
}
/* Configure geometry settings for all LIODNs associated with domain */
@@ -499,7 +503,7 @@ static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags;
int ret;
@@ -530,7 +534,7 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
struct dma_window *wnd;
int pamu_prot = 0;
int ret;
@@ -607,7 +611,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
int num)
{
unsigned long flags;
- struct iommu_domain *domain = dma_domain->iommu_domain;
+ struct iommu_domain *domain = &dma_domain->iommu_domain;
int ret = 0;
int i;
@@ -653,7 +657,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
static int fsl_pamu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
const u32 *liodn;
u32 liodn_cnt;
int len, ret = 0;
@@ -691,7 +695,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
static void fsl_pamu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
const u32 *prop;
int len;
struct pci_dev *pdev = NULL;
@@ -723,7 +727,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
{
struct iommu_domain_geometry *geom_attr = data;
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
dma_addr_t geom_size;
unsigned long flags;
@@ -813,7 +817,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
int ret = 0;
switch (attr_type) {
@@ -838,7 +842,7 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
int ret = 0;
switch (attr_type) {
@@ -999,7 +1003,7 @@ static void fsl_pamu_remove_device(struct device *dev)
static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags;
int ret;
@@ -1048,15 +1052,15 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
return dma_domain->win_cnt;
}
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
- .domain_init = fsl_pamu_domain_init,
- .domain_destroy = fsl_pamu_domain_destroy,
+ .domain_alloc = fsl_pamu_domain_alloc,
+ .domain_free = fsl_pamu_domain_free,
.attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device,
.domain_window_enable = fsl_pamu_window_enable,
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h
index c90293f99709..f2b0f741d3de 100644
--- a/drivers/iommu/fsl_pamu_domain.h
+++ b/drivers/iommu/fsl_pamu_domain.h
@@ -71,7 +71,7 @@ struct fsl_dma_domain {
u32 stash_id;
struct pamu_stash_attribute dma_stash;
u32 snoop_id;
- struct iommu_domain *iommu_domain;
+ struct iommu_domain iommu_domain;
spinlock_t domain_lock;
};
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2d1e05bdbb53..68d43beccb7e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -50,6 +50,7 @@
#define CONTEXT_SIZE VTD_PAGE_SIZE
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
+#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
@@ -184,32 +185,11 @@ static int force_on = 0;
* 64-127: Reserved
*/
struct root_entry {
- u64 val;
- u64 rsvd1;
+ u64 lo;
+ u64 hi;
};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
-static inline bool root_present(struct root_entry *root)
-{
- return (root->val & 1);
-}
-static inline void set_root_present(struct root_entry *root)
-{
- root->val |= 1;
-}
-static inline void set_root_value(struct root_entry *root, unsigned long value)
-{
- root->val &= ~VTD_PAGE_MASK;
- root->val |= value & VTD_PAGE_MASK;
-}
-static inline struct context_entry *
-get_context_addr_from_root(struct root_entry *root)
-{
- return (struct context_entry *)
- (root_present(root)?phys_to_virt(
- root->val & VTD_PAGE_MASK) :
- NULL);
-}
/*
* low 64 bits:
@@ -339,7 +319,7 @@ struct dmar_domain {
DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/
- struct list_head devices; /* all devices' list */
+ struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
@@ -358,6 +338,9 @@ struct dmar_domain {
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
+
+ struct iommu_domain domain; /* generic domain data structure for
+ iommu core */
};
/* PCI domain-device relationship */
@@ -449,6 +432,12 @@ static LIST_HEAD(device_domain_list);
static const struct iommu_ops intel_iommu_ops;
+/* Convert generic 'struct iommu_domain to private struct dmar_domain */
+static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct dmar_domain, domain);
+}
+
static int __init intel_iommu_setup(char *str)
{
if (!str)
@@ -595,12 +584,13 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int i, found = 0;
+ bool found = false;
+ int i;
domain->iommu_coherency = 1;
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
- found = 1;
+ found = true;
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
break;
@@ -672,6 +662,40 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->iommu_superpage = domain_update_iommu_superpage(NULL);
}
+static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
+ u8 bus, u8 devfn, int alloc)
+{
+ struct root_entry *root = &iommu->root_entry[bus];
+ struct context_entry *context;
+ u64 *entry;
+
+ if (ecap_ecs(iommu->ecap)) {
+ if (devfn >= 0x80) {
+ devfn -= 0x80;
+ entry = &root->hi;
+ }
+ devfn *= 2;
+ }
+ entry = &root->lo;
+ if (*entry & 1)
+ context = phys_to_virt(*entry & VTD_PAGE_MASK);
+ else {
+ unsigned long phy_addr;
+ if (!alloc)
+ return NULL;
+
+ context = alloc_pgtable_page(iommu->node);
+ if (!context)
+ return NULL;
+
+ __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
+ phy_addr = virt_to_phys((void *)context);
+ *entry = phy_addr | 1;
+ __iommu_flush_cache(iommu, entry, sizeof(*entry));
+ }
+ return &context[devfn];
+}
+
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -684,7 +708,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
segment = pci_domain_nr(pdev->bus);
- } else if (ACPI_COMPANION(dev))
+ } else if (has_acpi_companion(dev))
dev = &ACPI_COMPANION(dev)->dev;
rcu_read_lock();
@@ -731,75 +755,36 @@ static void domain_flush_cache(struct dmar_domain *domain,
clflush_cache_range(addr, size);
}
-/* Gets context entry for a given bus and devfn */
-static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
- u8 bus, u8 devfn)
-{
- struct root_entry *root;
- struct context_entry *context;
- unsigned long phy_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
- root = &iommu->root_entry[bus];
- context = get_context_addr_from_root(root);
- if (!context) {
- context = (struct context_entry *)
- alloc_pgtable_page(iommu->node);
- if (!context) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return NULL;
- }
- __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
- phy_addr = virt_to_phys((void *)context);
- set_root_value(root, phy_addr);
- set_root_present(root);
- __iommu_flush_cache(iommu, root, sizeof(*root));
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
- return &context[devfn];
-}
-
static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
- struct root_entry *root;
struct context_entry *context;
- int ret;
+ int ret = 0;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
- root = &iommu->root_entry[bus];
- context = get_context_addr_from_root(root);
- if (!context) {
- ret = 0;
- goto out;
- }
- ret = context_present(&context[devfn]);
-out:
+ context = iommu_context_addr(iommu, bus, devfn, 0);
+ if (context)
+ ret = context_present(context);
spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
- struct root_entry *root;
struct context_entry *context;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
- root = &iommu->root_entry[bus];
- context = get_context_addr_from_root(root);
+ context = iommu_context_addr(iommu, bus, devfn, 0);
if (context) {
- context_clear_entry(&context[devfn]);
- __iommu_flush_cache(iommu, &context[devfn], \
- sizeof(*context));
+ context_clear_entry(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
}
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void free_context_table(struct intel_iommu *iommu)
{
- struct root_entry *root;
int i;
unsigned long flags;
struct context_entry *context;
@@ -809,10 +794,17 @@ static void free_context_table(struct intel_iommu *iommu)
goto out;
}
for (i = 0; i < ROOT_ENTRY_NR; i++) {
- root = &iommu->root_entry[i];
- context = get_context_addr_from_root(root);
+ context = iommu_context_addr(iommu, i, 0, 0);
+ if (context)
+ free_pgtable_page(context);
+
+ if (!ecap_ecs(iommu->ecap))
+ continue;
+
+ context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
free_pgtable_page(context);
+
}
free_pgtable_page(iommu->root_entry);
iommu->root_entry = NULL;
@@ -1136,14 +1128,16 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
- void *addr;
+ u64 addr;
u32 sts;
unsigned long flag;
- addr = iommu->root_entry;
+ addr = virt_to_phys(iommu->root_entry);
+ if (ecap_ecs(iommu->ecap))
+ addr |= DMA_RTADDR_RTT;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
+ dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
@@ -1267,7 +1261,7 @@ static struct device_domain_info *
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
- int found = 0;
+ bool found = false;
unsigned long flags;
struct device_domain_info *info;
struct pci_dev *pdev;
@@ -1282,7 +1276,7 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
list_for_each_entry(info, &domain->devices, link)
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- found = 1;
+ found = true;
break;
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -1790,7 +1784,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
translation != CONTEXT_TT_MULTI_LEVEL);
- context = device_to_context_entry(iommu, bus, devfn);
+ spin_lock_irqsave(&iommu->lock, flags);
+ context = iommu_context_addr(iommu, bus, devfn, 1);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (!context)
return -ENOMEM;
spin_lock_irqsave(&iommu->lock, flags);
@@ -2554,6 +2550,10 @@ static bool device_has_rmrr(struct device *dev)
* In both cases we assume that PCI USB devices with RMRRs have them largely
* for historical reasons and that the RMRR space is not actively used post
* boot. This exclusion may change if vendors begin to abuse it.
+ *
+ * The same exception is made for graphics devices, with the requirement that
+ * any use of the RMRR regions will be torn down before assigning the device
+ * to a guest.
*/
static bool device_is_rmrr_locked(struct device *dev)
{
@@ -2563,7 +2563,7 @@ static bool device_is_rmrr_locked(struct device *dev)
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
- if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
+ if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
return false;
}
@@ -4269,7 +4269,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct device_domain_info *info, *tmp;
struct intel_iommu *iommu;
unsigned long flags;
- int found = 0;
+ bool found = false;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
@@ -4301,7 +4301,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
* update iommu count and coherency
*/
if (info->iommu == iommu)
- found = 1;
+ found = true;
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -4339,44 +4339,45 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
return 0;
}
-static int intel_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) {
printk(KERN_ERR
"intel_iommu_domain_init: dmar_domain == NULL\n");
- return -ENOMEM;
+ return NULL;
}
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR
"intel_iommu_domain_init() failed\n");
domain_exit(dmar_domain);
- return -ENOMEM;
+ return NULL;
}
domain_update_iommu_cap(dmar_domain);
- domain->priv = dmar_domain;
+ domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
domain->geometry.force_aperture = true;
- return 0;
+ return domain;
}
-static void intel_iommu_domain_destroy(struct iommu_domain *domain)
+static void intel_iommu_domain_free(struct iommu_domain *domain)
{
- struct dmar_domain *dmar_domain = domain->priv;
-
- domain->priv = NULL;
- domain_exit(dmar_domain);
+ domain_exit(to_dmar_domain(domain));
}
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct dmar_domain *dmar_domain = domain->priv;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu;
int addr_width;
u8 bus, devfn;
@@ -4441,16 +4442,14 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct dmar_domain *dmar_domain = domain->priv;
-
- domain_remove_one_dev_info(dmar_domain, dev);
+ domain_remove_one_dev_info(to_dmar_domain(domain), dev);
}
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot)
{
- struct dmar_domain *dmar_domain = domain->priv;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
int prot = 0;
int ret;
@@ -4487,7 +4486,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
static size_t intel_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
- struct dmar_domain *dmar_domain = domain->priv;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct page *freelist = NULL;
struct intel_iommu *iommu;
unsigned long start_pfn, last_pfn;
@@ -4535,7 +4534,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct dmar_domain *dmar_domain = domain->priv;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct dma_pte *pte;
int level = 0;
u64 phys = 0;
@@ -4594,8 +4593,8 @@ static void intel_iommu_remove_device(struct device *dev)
static const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
- .domain_init = intel_iommu_domain_init,
- .domain_destroy = intel_iommu_domain_destroy,
+ .domain_alloc = intel_iommu_domain_alloc,
+ .domain_free = intel_iommu_domain_free,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
.map = intel_iommu_map,
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 14de1ab223c8..5709ae9c3e77 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -631,16 +631,13 @@ static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int setup = 0;
+ bool setup = false;
int eim = 0;
if (x2apic_supported()) {
eim = !dmar_x2apic_optout();
if (!eim)
- printk(KERN_WARNING
- "Your BIOS is broken and requested that x2apic be disabled.\n"
- "This will slightly decrease performance.\n"
- "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
+ pr_info("x2apic is disabled because BIOS sets x2apic opt out bit. You can use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
}
for_each_iommu(iommu, drhd) {
@@ -697,7 +694,7 @@ static int __init intel_enable_irq_remapping(void)
*/
for_each_iommu(iommu, drhd) {
iommu_set_irq_remapping(iommu, eim);
- setup = 1;
+ setup = true;
}
if (!setup)
@@ -856,7 +853,7 @@ static int __init parse_ioapics_under_ir(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int ir_supported = 0;
+ bool ir_supported = false;
int ioapic_idx;
for_each_iommu(iommu, drhd)
@@ -864,7 +861,7 @@ static int __init parse_ioapics_under_ir(void)
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1;
- ir_supported = 1;
+ ir_supported = true;
}
if (!ir_supported)
@@ -917,7 +914,7 @@ static void disable_irq_remapping(void)
static int reenable_irq_remapping(int eim)
{
struct dmar_drhd_unit *drhd;
- int setup = 0;
+ bool setup = false;
struct intel_iommu *iommu = NULL;
for_each_iommu(iommu, drhd)
@@ -933,7 +930,7 @@ static int reenable_irq_remapping(int eim)
/* Set up interrupt remapping for iommu.*/
iommu_set_irq_remapping(iommu, eim);
- setup = 1;
+ setup = true;
}
if (!setup)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index b610a8dee238..4e460216bd16 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -116,6 +116,8 @@
#define ARM_32_LPAE_TCR_EAE (1 << 31)
#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
+#define ARM_LPAE_TCR_EPD1 (1 << 23)
+
#define ARM_LPAE_TCR_TG0_4K (0 << 14)
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
@@ -621,6 +623,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
}
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+
+ /* Disable speculative walks through TTBR1 */
+ reg |= ARM_LPAE_TCR_EPD1;
cfg->arm_lpae_s1_cfg.tcr = reg;
/* MAIRs */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 72e683df0731..d4f527e56679 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -901,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
struct iommu_domain *domain;
- int ret;
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
if (!domain)
return NULL;
- domain->ops = bus->iommu_ops;
-
- ret = domain->ops->domain_init(domain);
- if (ret)
- goto out_free;
+ domain->ops = bus->iommu_ops;
+ domain->type = IOMMU_DOMAIN_UNMANAGED;
return domain;
-
-out_free:
- kfree(domain);
-
- return NULL;
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
- if (likely(domain->ops->domain_destroy != NULL))
- domain->ops->domain_destroy(domain);
-
- kfree(domain);
+ domain->ops->domain_free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
@@ -1049,6 +1037,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
+ return -EINVAL;
+
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
@@ -1100,6 +1091,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
+ return -EINVAL;
+
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index bc39bdf7b99b..1a67c531a07e 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -38,7 +38,7 @@ struct ipmmu_vmsa_device {
struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device *mmu;
- struct iommu_domain *io_domain;
+ struct iommu_domain io_domain;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@@ -56,6 +56,11 @@ struct ipmmu_vmsa_archdata {
static DEFINE_SPINLOCK(ipmmu_devices_lock);
static LIST_HEAD(ipmmu_devices);
+static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
+}
+
#define TLB_LOOP_TIMEOUT 100 /* 100us */
/* -----------------------------------------------------------------------------
@@ -428,7 +433,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
* TODO: We need to look up the faulty device based on the I/O VA. Use
* the IOMMU device for now.
*/
- if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0))
+ if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
return IRQ_HANDLED;
dev_err_ratelimited(mmu->dev,
@@ -448,7 +453,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
return IRQ_NONE;
io_domain = mmu->mapping->domain;
- domain = io_domain->priv;
+ domain = to_vmsa_domain(io_domain);
return ipmmu_domain_irq(domain);
}
@@ -457,25 +462,25 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
* IOMMU Operations
*/
-static int ipmmu_domain_init(struct iommu_domain *io_domain)
+static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
{
struct ipmmu_vmsa_domain *domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
- return -ENOMEM;
+ return NULL;
spin_lock_init(&domain->lock);
- io_domain->priv = domain;
- domain->io_domain = io_domain;
-
- return 0;
+ return &domain->io_domain;
}
-static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
+static void ipmmu_domain_free(struct iommu_domain *io_domain)
{
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
/*
* Free the domain resources. We assume that all devices have already
@@ -491,7 +496,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_device *mmu = archdata->mmu;
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned long flags;
unsigned int i;
int ret = 0;
@@ -532,7 +537,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
struct device *dev)
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned int i;
for (i = 0; i < archdata->num_utlbs; ++i)
@@ -546,7 +551,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
if (!domain)
return -ENODEV;
@@ -557,7 +562,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
size_t size)
{
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
return domain->iop->unmap(domain->iop, iova, size);
}
@@ -565,7 +570,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
/* TODO: Is locking needed ? */
@@ -737,8 +742,8 @@ static void ipmmu_remove_device(struct device *dev)
}
static const struct iommu_ops ipmmu_ops = {
- .domain_init = ipmmu_domain_init,
- .domain_destroy = ipmmu_domain_destroy,
+ .domain_alloc = ipmmu_domain_alloc,
+ .domain_free = ipmmu_domain_free,
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index e1b05379ca0e..15a2063812fa 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -52,8 +52,14 @@ DEFINE_SPINLOCK(msm_iommu_lock);
struct msm_priv {
unsigned long *pgtable;
struct list_head list_attached;
+ struct iommu_domain domain;
};
+static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
+{
+ return container_of(dom, struct msm_priv, domain);
+}
+
static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
{
int ret;
@@ -79,7 +85,7 @@ static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
static int __flush_iotlb(struct iommu_domain *domain)
{
- struct msm_priv *priv = domain->priv;
+ struct msm_priv *priv = to_msm_priv(domain);
struct msm_iommu_drvdata *iommu_drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int ret = 0;
@@ -209,10 +215,14 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
SET_M(base, ctx, 1);
}
-static int msm_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
{
- struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ struct msm_priv *priv;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto fail_nomem;
@@ -224,20 +234,19 @@ static int msm_iommu_domain_init(struct iommu_domain *domain)
goto fail_nomem;
memset(priv->pgtable, 0, SZ_16K);
- domain->priv = priv;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = (1ULL << 32) - 1;
- domain->geometry.force_aperture = true;
+ priv->domain.geometry.aperture_start = 0;
+ priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
+ priv->domain.geometry.force_aperture = true;
- return 0;
+ return &priv->domain;
fail_nomem:
kfree(priv);
- return -ENOMEM;
+ return NULL;
}
-static void msm_iommu_domain_destroy(struct iommu_domain *domain)
+static void msm_iommu_domain_free(struct iommu_domain *domain)
{
struct msm_priv *priv;
unsigned long flags;
@@ -245,20 +254,17 @@ static void msm_iommu_domain_destroy(struct iommu_domain *domain)
int i;
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
- domain->priv = NULL;
+ priv = to_msm_priv(domain);
- if (priv) {
- fl_table = priv->pgtable;
+ fl_table = priv->pgtable;
- for (i = 0; i < NUM_FL_PTE; i++)
- if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
- free_page((unsigned long) __va(((fl_table[i]) &
- FL_BASE_MASK)));
+ for (i = 0; i < NUM_FL_PTE; i++)
+ if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
+ free_page((unsigned long) __va(((fl_table[i]) &
+ FL_BASE_MASK)));
- free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
- priv->pgtable = NULL;
- }
+ free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
+ priv->pgtable = NULL;
kfree(priv);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
@@ -276,9 +282,9 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
+ priv = to_msm_priv(domain);
- if (!priv || !dev) {
+ if (!dev) {
ret = -EINVAL;
goto fail;
}
@@ -330,9 +336,9 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
int ret;
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
+ priv = to_msm_priv(domain);
- if (!priv || !dev)
+ if (!dev)
goto fail;
iommu_drvdata = dev_get_drvdata(dev->parent);
@@ -382,11 +388,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
goto fail;
}
- priv = domain->priv;
- if (!priv) {
- ret = -EINVAL;
- goto fail;
- }
+ priv = to_msm_priv(domain);
fl_table = priv->pgtable;
@@ -484,10 +486,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
-
- if (!priv)
- goto fail;
+ priv = to_msm_priv(domain);
fl_table = priv->pgtable;
@@ -566,7 +565,7 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
+ priv = to_msm_priv(domain);
if (list_empty(&priv->list_attached))
goto fail;
@@ -674,8 +673,8 @@ fail:
static const struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
- .domain_init = msm_iommu_domain_init,
- .domain_destroy = msm_iommu_domain_destroy,
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
.attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index a4ba851825c2..a22c33d6a486 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -59,6 +59,7 @@ struct omap_iommu_domain {
struct omap_iommu *iommu_dev;
struct device *dev;
spinlock_t lock;
+ struct iommu_domain domain;
};
#define MMU_LOCK_BASE_SHIFT 10
@@ -80,6 +81,15 @@ static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
/**
+ * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
+ * @dom: generic iommu domain handle
+ **/
+static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct omap_iommu_domain, domain);
+}
+
+/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
**/
@@ -901,7 +911,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
u32 *iopgd, *iopte;
struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
if (!omap_domain->iommu_dev)
return IRQ_NONE;
@@ -1113,7 +1123,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
struct iotlb_entry e;
@@ -1140,7 +1150,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t size)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
@@ -1152,7 +1162,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu;
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
int ret = 0;
@@ -1212,17 +1222,20 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
spin_lock(&omap_domain->lock);
_omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
-static int omap_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
{
struct omap_iommu_domain *omap_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain) {
pr_err("kzalloc failed\n");
@@ -1244,25 +1257,21 @@ static int omap_iommu_domain_init(struct iommu_domain *domain)
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
spin_lock_init(&omap_domain->lock);
- domain->priv = omap_domain;
+ omap_domain->domain.geometry.aperture_start = 0;
+ omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
+ omap_domain->domain.geometry.force_aperture = true;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = (1ULL << 32) - 1;
- domain->geometry.force_aperture = true;
-
- return 0;
+ return &omap_domain->domain;
fail_nomem:
kfree(omap_domain);
out:
- return -ENOMEM;
+ return NULL;
}
-static void omap_iommu_domain_destroy(struct iommu_domain *domain)
+static void omap_iommu_domain_free(struct iommu_domain *domain)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
-
- domain->priv = NULL;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
/*
* An iommu device is still attached
@@ -1278,7 +1287,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain)
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t da)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
u32 *pgd, *pte;
@@ -1358,8 +1367,8 @@ static void omap_iommu_remove_device(struct device *dev)
}
static const struct iommu_ops omap_iommu_ops = {
- .domain_init = omap_iommu_domain_init,
- .domain_destroy = omap_iommu_domain_destroy,
+ .domain_alloc = omap_iommu_domain_alloc,
+ .domain_free = omap_iommu_domain_free,
.attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 9f74fddcd304..cab214544237 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -80,6 +80,8 @@ struct rk_iommu_domain {
u32 *dt; /* page directory table */
spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */
+
+ struct iommu_domain domain;
};
struct rk_iommu {
@@ -100,6 +102,11 @@ static inline void rk_table_flush(u32 *va, unsigned int count)
outer_flush_range(pa_start, pa_end);
}
+static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct rk_iommu_domain, domain);
+}
+
/**
* Inspired by _wait_for in intel_drv.h
* This is NOT safe for use in interrupt context.
@@ -503,7 +510,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
phys_addr_t pt_phys, phys = 0;
u32 dte, pte;
@@ -639,7 +646,7 @@ unwind:
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
phys_addr_t paddr, size_t size, int prot)
{
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova;
u32 *page_table, *pte_addr;
@@ -670,7 +677,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
size_t size)
{
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova;
phys_addr_t pt_phys;
@@ -726,7 +733,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct rk_iommu *iommu;
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
int ret;
phys_addr_t dte_addr;
@@ -778,7 +785,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct rk_iommu *iommu;
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
/* Allow 'virtual devices' (eg drm) to detach from domain */
@@ -804,13 +811,16 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
dev_info(dev, "Detached from iommu domain\n");
}
-static int rk_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{
struct rk_iommu_domain *rk_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
- return -ENOMEM;
+ return NULL;
/*
* rk32xx iommus use a 2 level pagetable.
@@ -827,17 +837,16 @@ static int rk_iommu_domain_init(struct iommu_domain *domain)
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
- domain->priv = rk_domain;
+ return &rk_domain->domain;
- return 0;
err_dt:
kfree(rk_domain);
- return -ENOMEM;
+ return NULL;
}
-static void rk_iommu_domain_destroy(struct iommu_domain *domain)
+static void rk_iommu_domain_free(struct iommu_domain *domain)
{
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
int i;
WARN_ON(!list_empty(&rk_domain->iommus));
@@ -852,8 +861,7 @@ static void rk_iommu_domain_destroy(struct iommu_domain *domain)
}
free_page((unsigned long)rk_domain->dt);
- kfree(domain->priv);
- domain->priv = NULL;
+ kfree(rk_domain);
}
static bool rk_iommu_is_dev_iommu_master(struct device *dev)
@@ -952,8 +960,8 @@ static void rk_iommu_remove_device(struct device *dev)
}
static const struct iommu_ops rk_iommu_ops = {
- .domain_init = rk_iommu_domain_init,
- .domain_destroy = rk_iommu_domain_destroy,
+ .domain_alloc = rk_iommu_domain_alloc,
+ .domain_free = rk_iommu_domain_free,
.attach_dev = rk_iommu_attach_device,
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
@@ -996,20 +1004,18 @@ static int rk_iommu_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id rk_iommu_dt_ids[] = {
{ .compatible = "rockchip,iommu" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
-#endif
static struct platform_driver rk_iommu_driver = {
.probe = rk_iommu_probe,
.remove = rk_iommu_remove,
.driver = {
.name = "rk_iommu",
- .of_match_table = of_match_ptr(rk_iommu_dt_ids),
+ .of_match_table = rk_iommu_dt_ids,
},
};
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index f1b00774e4de..a0287519a1d4 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -42,11 +42,17 @@ struct shmobile_iommu_domain {
spinlock_t map_lock;
spinlock_t attached_list_lock;
struct list_head attached_list;
+ struct iommu_domain domain;
};
static struct shmobile_iommu_archdata *ipmmu_archdata;
static struct kmem_cache *l1cache, *l2cache;
+static struct shmobile_iommu_domain *to_sh_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct shmobile_iommu_domain, domain);
+}
+
static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
struct kmem_cache *cache, size_t size)
{
@@ -82,31 +88,33 @@ static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable,
sizeof(val) * count, DMA_TO_DEVICE);
}
-static int shmobile_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *shmobile_iommu_domain_alloc(unsigned type)
{
struct shmobile_iommu_domain *sh_domain;
int i, ret;
- sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL);
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ sh_domain = kzalloc(sizeof(*sh_domain), GFP_KERNEL);
if (!sh_domain)
- return -ENOMEM;
+ return NULL;
ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
if (ret < 0) {
kfree(sh_domain);
- return ret;
+ return NULL;
}
for (i = 0; i < L1_LEN; i++)
sh_domain->l2[i].pgtable = NULL;
spin_lock_init(&sh_domain->map_lock);
spin_lock_init(&sh_domain->attached_list_lock);
INIT_LIST_HEAD(&sh_domain->attached_list);
- domain->priv = sh_domain;
- return 0;
+ return &sh_domain->domain;
}
-static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
+static void shmobile_iommu_domain_free(struct iommu_domain *domain)
{
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
int i;
for (i = 0; i < L1_LEN; i++) {
@@ -115,14 +123,13 @@ static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
}
pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
kfree(sh_domain);
- domain->priv = NULL;
}
static int shmobile_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
int ret = -EBUSY;
if (!archdata)
@@ -151,7 +158,7 @@ static void shmobile_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
if (!archdata)
return;
@@ -214,7 +221,7 @@ static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
unsigned int l1index, l2index;
int ret;
@@ -258,7 +265,7 @@ static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
unsigned int l1index, l2index;
uint32_t l2entry = 0;
size_t ret = 0;
@@ -298,7 +305,7 @@ done:
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
uint32_t l1entry = 0, l2entry = 0;
unsigned int l1index, l2index;
@@ -355,8 +362,8 @@ static int shmobile_iommu_add_device(struct device *dev)
}
static const struct iommu_ops shmobile_iommu_ops = {
- .domain_init = shmobile_iommu_domain_init,
- .domain_destroy = shmobile_iommu_domain_destroy,
+ .domain_alloc = shmobile_iommu_domain_alloc,
+ .domain_free = shmobile_iommu_domain_free,
.attach_dev = shmobile_iommu_attach_device,
.detach_dev = shmobile_iommu_detach_device,
.map = shmobile_iommu_map,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index c48da057dbb1..37e708fdbb5a 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -63,11 +63,21 @@ struct gart_device {
struct device *dev;
};
+struct gart_domain {
+ struct iommu_domain domain; /* generic domain handle */
+ struct gart_device *gart; /* link to gart device */
+};
+
static struct gart_device *gart_handle; /* unique for a system */
#define GART_PTE(_pfn) \
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
+static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct gart_domain, domain);
+}
+
/*
* Any interaction between any block on PPSB and a block on APB or AHB
* must have these read-back to ensure the APB/AHB bus transaction is
@@ -156,20 +166,11 @@ static inline bool gart_iova_range_valid(struct gart_device *gart,
static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct gart_device *gart;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
struct gart_client *client, *c;
int err = 0;
- gart = gart_handle;
- if (!gart)
- return -EINVAL;
- domain->priv = gart;
-
- domain->geometry.aperture_start = gart->iovmm_base;
- domain->geometry.aperture_end = gart->iovmm_base +
- gart->page_count * GART_PAGE_SIZE - 1;
- domain->geometry.force_aperture = true;
-
client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
if (!client)
return -ENOMEM;
@@ -198,7 +199,8 @@ fail:
static void gart_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct gart_device *gart = domain->priv;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
struct gart_client *c;
spin_lock(&gart->client_lock);
@@ -216,33 +218,55 @@ out:
spin_unlock(&gart->client_lock);
}
-static int gart_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
{
- return 0;
-}
+ struct gart_domain *gart_domain;
+ struct gart_device *gart;
-static void gart_iommu_domain_destroy(struct iommu_domain *domain)
-{
- struct gart_device *gart = domain->priv;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+ gart = gart_handle;
if (!gart)
- return;
+ return NULL;
- spin_lock(&gart->client_lock);
- if (!list_empty(&gart->client)) {
- struct gart_client *c;
+ gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
+ if (!gart_domain)
+ return NULL;
+
+ gart_domain->gart = gart;
+ gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
+ gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
+ gart->page_count * GART_PAGE_SIZE - 1;
+ gart_domain->domain.geometry.force_aperture = true;
+
+ return &gart_domain->domain;
+}
+
+static void gart_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
+
+ if (gart) {
+ spin_lock(&gart->client_lock);
+ if (!list_empty(&gart->client)) {
+ struct gart_client *c;
- list_for_each_entry(c, &gart->client, list)
- gart_iommu_detach_dev(domain, c->dev);
+ list_for_each_entry(c, &gart->client, list)
+ gart_iommu_detach_dev(domain, c->dev);
+ }
+ spin_unlock(&gart->client_lock);
}
- spin_unlock(&gart->client_lock);
- domain->priv = NULL;
+
+ kfree(gart_domain);
}
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t bytes, int prot)
{
- struct gart_device *gart = domain->priv;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
unsigned long flags;
unsigned long pfn;
@@ -265,7 +289,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t bytes)
{
- struct gart_device *gart = domain->priv;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
unsigned long flags;
if (!gart_iova_range_valid(gart, iova, bytes))
@@ -281,7 +306,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct gart_device *gart = domain->priv;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
unsigned long pte;
phys_addr_t pa;
unsigned long flags;
@@ -310,8 +336,8 @@ static bool gart_iommu_capable(enum iommu_cap cap)
static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable,
- .domain_init = gart_iommu_domain_init,
- .domain_destroy = gart_iommu_domain_destroy,
+ .domain_alloc = gart_iommu_domain_alloc,
+ .domain_free = gart_iommu_domain_free,
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 6e134c7c227f..c845d99ecf6b 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
@@ -24,6 +25,8 @@ struct tegra_smmu {
struct tegra_mc *mc;
const struct tegra_smmu_soc *soc;
+ unsigned long pfn_mask;
+
unsigned long *asids;
struct mutex lock;
@@ -31,7 +34,7 @@ struct tegra_smmu {
};
struct tegra_smmu_as {
- struct iommu_domain *domain;
+ struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
struct page *count;
@@ -40,6 +43,11 @@ struct tegra_smmu_as {
u32 attr;
};
+static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
+{
+ return container_of(dom, struct tegra_smmu_as, domain);
+}
+
static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
unsigned long offset)
{
@@ -105,8 +113,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12
-#define SMMU_PFN_MASK 0x000fffff
-
#define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29)
@@ -224,30 +230,32 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
return false;
}
-static int tegra_smmu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{
struct tegra_smmu_as *as;
unsigned int i;
uint32_t *pd;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
as = kzalloc(sizeof(*as), GFP_KERNEL);
if (!as)
- return -ENOMEM;
+ return NULL;
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->domain = domain;
as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
if (!as->pd) {
kfree(as);
- return -ENOMEM;
+ return NULL;
}
as->count = alloc_page(GFP_KERNEL);
if (!as->count) {
__free_page(as->pd);
kfree(as);
- return -ENOMEM;
+ return NULL;
}
/* clear PDEs */
@@ -264,14 +272,17 @@ static int tegra_smmu_domain_init(struct iommu_domain *domain)
for (i = 0; i < SMMU_NUM_PDE; i++)
pd[i] = 0;
- domain->priv = as;
+ /* setup aperture */
+ as->domain.geometry.aperture_start = 0;
+ as->domain.geometry.aperture_end = 0xffffffff;
+ as->domain.geometry.force_aperture = true;
- return 0;
+ return &as->domain;
}
-static void tegra_smmu_domain_destroy(struct iommu_domain *domain)
+static void tegra_smmu_domain_free(struct iommu_domain *domain)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
/* TODO: free page directory and page tables */
ClearPageReserved(as->pd);
@@ -395,7 +406,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct tegra_smmu *smmu = dev->archdata.iommu;
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct of_phandle_args args;
unsigned int index = 0;
@@ -428,7 +439,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain,
static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct tegra_smmu *smmu = as->smmu;
struct of_phandle_args args;
@@ -481,7 +492,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu);
} else {
- page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
+ page = pfn_to_page(pd[pde] & smmu->pfn_mask);
pt = page_address(page);
}
@@ -503,7 +514,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
u32 *pd = page_address(as->pd), *pt;
struct page *page;
- page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
+ page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
pt = page_address(page);
/*
@@ -524,7 +535,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct tegra_smmu *smmu = as->smmu;
unsigned long offset;
struct page *page;
@@ -548,7 +559,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct tegra_smmu *smmu = as->smmu;
unsigned long offset;
struct page *page;
@@ -572,13 +583,13 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct page *page;
unsigned long pfn;
u32 *pte;
pte = as_get_pte(as, iova, &page);
- pfn = *pte & SMMU_PFN_MASK;
+ pfn = *pte & as->smmu->pfn_mask;
return PFN_PHYS(pfn);
}
@@ -633,8 +644,8 @@ static void tegra_smmu_remove_device(struct device *dev)
static const struct iommu_ops tegra_smmu_ops = {
.capable = tegra_smmu_capable,
- .domain_init = tegra_smmu_domain_init,
- .domain_destroy = tegra_smmu_domain_destroy,
+ .domain_alloc = tegra_smmu_domain_alloc,
+ .domain_free = tegra_smmu_domain_free,
.attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev,
.add_device = tegra_smmu_add_device,
@@ -702,6 +713,10 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->dev = dev;
smmu->mc = mc;
+ smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
+ mc->soc->num_address_bits, smmu->pfn_mask);
+
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
if (soc->supports_request_limit)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c8d260e33a90..6de62a96e79c 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -60,6 +60,11 @@ config ATMEL_AIC5_IRQ
select MULTI_IRQ_HANDLER
select SPARSE_IRQ
+config BCM7038_L1_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
config BCM7120_L2_IRQ
bool
select GENERIC_IRQ_CHIP
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 552a74027601..dda4927e47a6 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
+obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o
obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
new file mode 100644
index 000000000000..d3b8c8be15f6
--- /dev/null
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -0,0 +1,335 @@
+/*
+ * Broadcom BCM7038 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Author: Kevin Cernekee
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip/chained_irq.h>
+
+#include "irqchip.h"
+
+#define IRQS_PER_WORD 32
+#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
+#define MAX_WORDS 8
+
+struct bcm7038_l1_cpu;
+
+struct bcm7038_l1_chip {
+ raw_spinlock_t lock;
+ unsigned int n_words;
+ struct irq_domain *domain;
+ struct bcm7038_l1_cpu *cpus[NR_CPUS];
+ u8 affinity[MAX_WORDS * IRQS_PER_WORD];
+};
+
+struct bcm7038_l1_cpu {
+ void __iomem *map_base;
+ u32 mask_cache[0];
+};
+
+/*
+ * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another:
+ *
+ * 7038:
+ * 0x1000_1400: W0_STATUS
+ * 0x1000_1404: W1_STATUS
+ * 0x1000_1408: W0_MASK_STATUS
+ * 0x1000_140c: W1_MASK_STATUS
+ * 0x1000_1410: W0_MASK_SET
+ * 0x1000_1414: W1_MASK_SET
+ * 0x1000_1418: W0_MASK_CLEAR
+ * 0x1000_141c: W1_MASK_CLEAR
+ *
+ * 7445:
+ * 0xf03e_1500: W0_STATUS
+ * 0xf03e_1504: W1_STATUS
+ * 0xf03e_1508: W2_STATUS
+ * 0xf03e_150c: W3_STATUS
+ * 0xf03e_1510: W4_STATUS
+ * 0xf03e_1514: W0_MASK_STATUS
+ * 0xf03e_1518: W1_MASK_STATUS
+ * [...]
+ */
+
+static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (0 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (1 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (2 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (3 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline u32 l1_readl(void __iomem *reg)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return ioread32be(reg);
+ else
+ return readl(reg);
+}
+
+static inline void l1_writel(u32 val, void __iomem *reg)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ iowrite32be(val, reg);
+ else
+ writel(val, reg);
+}
+
+static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc)
+{
+ struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
+ struct bcm7038_l1_cpu *cpu;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int idx;
+
+#ifdef CONFIG_SMP
+ cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+#else
+ cpu = intc->cpus[0];
+#endif
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < intc->n_words; idx++) {
+ int base = idx * IRQS_PER_WORD;
+ unsigned long pending, flags;
+ int hwirq;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
+ ~cpu->mask_cache[idx];
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+ generic_handle_irq(irq_find_mapping(intc->domain,
+ base + hwirq));
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ intc->cpus[cpu_idx]->mask_cache[word] &= ~mask;
+ l1_writel(mask, intc->cpus[cpu_idx]->map_base +
+ reg_mask_clr(intc, word));
+}
+
+static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ intc->cpus[cpu_idx]->mask_cache[word] |= mask;
+ l1_writel(mask, intc->cpus[cpu_idx]->map_base +
+ reg_mask_set(intc, word));
+}
+
+static void bcm7038_l1_unmask(struct irq_data *d)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm7038_l1_mask(struct irq_data *d)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm7038_l1_mask(d, intc->affinity[d->hwirq]);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static int bcm7038_l1_set_affinity(struct irq_data *d,
+ const struct cpumask *dest,
+ bool force)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ irq_hw_number_t hw = d->hwirq;
+ u32 word = hw / IRQS_PER_WORD;
+ u32 mask = BIT(hw % IRQS_PER_WORD);
+ unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask);
+ bool was_disabled;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+
+ was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] &
+ mask);
+ __bcm7038_l1_mask(d, intc->affinity[hw]);
+ intc->affinity[hw] = first_cpu;
+ if (!was_disabled)
+ __bcm7038_l1_unmask(d, first_cpu);
+
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+ return 0;
+}
+
+static int __init bcm7038_l1_init_one(struct device_node *dn,
+ unsigned int idx,
+ struct bcm7038_l1_chip *intc)
+{
+ struct resource res;
+ resource_size_t sz;
+ struct bcm7038_l1_cpu *cpu;
+ unsigned int i, n_words, parent_irq;
+
+ if (of_address_to_resource(dn, idx, &res))
+ return -EINVAL;
+ sz = resource_size(&res);
+ n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+ if (n_words > MAX_WORDS)
+ return -EINVAL;
+ else if (!intc->n_words)
+ intc->n_words = n_words;
+ else if (intc->n_words != n_words)
+ return -EINVAL;
+
+ cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ cpu->map_base = ioremap(res.start, sz);
+ if (!cpu->map_base)
+ return -ENOMEM;
+
+ for (i = 0; i < n_words; i++) {
+ l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i));
+ cpu->mask_cache[i] = 0xffffffff;
+ }
+
+ parent_irq = irq_of_parse_and_map(dn, idx);
+ if (!parent_irq) {
+ pr_err("failed to map parent interrupt %d\n", parent_irq);
+ return -EINVAL;
+ }
+ irq_set_handler_data(parent_irq, intc);
+ irq_set_chained_handler(parent_irq, bcm7038_l1_irq_handle);
+
+ return 0;
+}
+
+static struct irq_chip bcm7038_l1_irq_chip = {
+ .name = "bcm7038-l1",
+ .irq_mask = bcm7038_l1_mask,
+ .irq_unmask = bcm7038_l1_unmask,
+ .irq_set_affinity = bcm7038_l1_set_affinity,
+};
+
+static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, d->host_data);
+ return 0;
+}
+
+static const struct irq_domain_ops bcm7038_l1_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = bcm7038_l1_map,
+};
+
+int __init bcm7038_l1_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ struct bcm7038_l1_chip *intc;
+ int idx, ret;
+
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&intc->lock);
+ for_each_possible_cpu(idx) {
+ ret = bcm7038_l1_init_one(dn, idx, intc);
+ if (ret < 0) {
+ if (idx)
+ break;
+ pr_err("failed to remap intc L1 registers\n");
+ goto out_free;
+ }
+ }
+
+ intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ &bcm7038_l1_domain_ops,
+ intc);
+ if (!intc->domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n",
+ intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words);
+
+ return 0;
+
+out_unmap:
+ for_each_possible_cpu(idx) {
+ struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
+
+ if (cpu) {
+ if (cpu->map_base)
+ iounmap(cpu->map_base);
+ kfree(cpu);
+ }
+ }
+out_free:
+ kfree(intc);
+ return ret;
+}
+
+IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init);
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 8eec8e1201d9..3ba5cc780fcb 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kconfig.h>
+#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -34,15 +35,21 @@
#define IRQSTAT 0x04
#define MAX_WORDS 4
+#define MAX_MAPPINGS (MAX_WORDS * 2)
#define IRQS_PER_WORD 32
struct bcm7120_l2_intc_data {
unsigned int n_words;
- void __iomem *base[MAX_WORDS];
+ void __iomem *map_base[MAX_MAPPINGS];
+ void __iomem *pair_base[MAX_WORDS];
+ int en_offset[MAX_WORDS];
+ int stat_offset[MAX_WORDS];
struct irq_domain *domain;
bool can_wake;
u32 irq_fwd_mask[MAX_WORDS];
u32 irq_map_mask[MAX_WORDS];
+ int num_parent_irqs;
+ const __be32 *map_mask_prop;
};
static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
@@ -61,7 +68,8 @@ static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
int hwirq;
irq_gc_lock(gc);
- pending = irq_reg_readl(gc, IRQSTAT) & gc->mask_cache;
+ pending = irq_reg_readl(gc, b->stat_offset[idx]) &
+ gc->mask_cache;
irq_gc_unlock(gc);
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
@@ -76,27 +84,30 @@ static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
static void bcm7120_l2_intc_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct bcm7120_l2_intc_data *b = gc->private;
irq_gc_lock(gc);
if (b->can_wake)
- irq_reg_writel(gc, gc->mask_cache | gc->wake_active, IRQEN);
+ irq_reg_writel(gc, gc->mask_cache | gc->wake_active,
+ ct->regs.mask);
irq_gc_unlock(gc);
}
static void bcm7120_l2_intc_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
/* Restore the saved mask */
irq_gc_lock(gc);
- irq_reg_writel(gc, gc->mask_cache, IRQEN);
+ irq_reg_writel(gc, gc->mask_cache, ct->regs.mask);
irq_gc_unlock(gc);
}
static int bcm7120_l2_intc_init_one(struct device_node *dn,
struct bcm7120_l2_intc_data *data,
- int irq, const __be32 *map_mask)
+ int irq)
{
int parent_irq;
unsigned int idx;
@@ -110,9 +121,15 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
/* For multiple parent IRQs with multiple words, this looks like:
* <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...>
*/
- for (idx = 0; idx < data->n_words; idx++)
- data->irq_map_mask[idx] |=
- be32_to_cpup(map_mask + irq * data->n_words + idx);
+ for (idx = 0; idx < data->n_words; idx++) {
+ if (data->map_mask_prop) {
+ data->irq_map_mask[idx] |=
+ be32_to_cpup(data->map_mask_prop +
+ irq * data->n_words + idx);
+ } else {
+ data->irq_map_mask[idx] = 0xffffffff;
+ }
+ }
irq_set_handler_data(parent_irq, data);
irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle);
@@ -120,68 +137,107 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
return 0;
}
-int __init bcm7120_l2_intc_of_init(struct device_node *dn,
- struct device_node *parent)
+static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn,
+ struct bcm7120_l2_intc_data *data)
+{
+ int ret;
+
+ data->map_base[0] = of_iomap(dn, 0);
+ if (!data->map_base[0]) {
+ pr_err("unable to map registers\n");
+ return -ENOMEM;
+ }
+
+ data->pair_base[0] = data->map_base[0];
+ data->en_offset[0] = IRQEN;
+ data->stat_offset[0] = IRQSTAT;
+ data->n_words = 1;
+
+ ret = of_property_read_u32_array(dn, "brcm,int-fwd-mask",
+ data->irq_fwd_mask, data->n_words);
+ if (ret != 0 && ret != -EINVAL) {
+ /* property exists but has the wrong number of words */
+ pr_err("invalid brcm,int-fwd-mask property\n");
+ return -EINVAL;
+ }
+
+ data->map_mask_prop = of_get_property(dn, "brcm,int-map-mask", &ret);
+ if (!data->map_mask_prop ||
+ (ret != (sizeof(__be32) * data->num_parent_irqs * data->n_words))) {
+ pr_err("invalid brcm,int-map-mask property\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
+ struct bcm7120_l2_intc_data *data)
+{
+ unsigned int gc_idx;
+
+ for (gc_idx = 0; gc_idx < MAX_WORDS; gc_idx++) {
+ unsigned int map_idx = gc_idx * 2;
+ void __iomem *en = of_iomap(dn, map_idx + 0);
+ void __iomem *stat = of_iomap(dn, map_idx + 1);
+ void __iomem *base = min(en, stat);
+
+ data->map_base[map_idx + 0] = en;
+ data->map_base[map_idx + 1] = stat;
+
+ if (!base)
+ break;
+
+ data->pair_base[gc_idx] = base;
+ data->en_offset[gc_idx] = en - base;
+ data->stat_offset[gc_idx] = stat - base;
+ }
+
+ if (!gc_idx) {
+ pr_err("unable to map registers\n");
+ return -EINVAL;
+ }
+
+ data->n_words = gc_idx;
+ return 0;
+}
+
+int __init bcm7120_l2_intc_probe(struct device_node *dn,
+ struct device_node *parent,
+ int (*iomap_regs_fn)(struct device_node *,
+ struct bcm7120_l2_intc_data *),
+ const char *intc_name)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct bcm7120_l2_intc_data *data;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
- const __be32 *map_mask;
- int num_parent_irqs;
- int ret = 0, len;
+ int ret = 0;
unsigned int idx, irq, flags;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- for (idx = 0; idx < MAX_WORDS; idx++) {
- data->base[idx] = of_iomap(dn, idx);
- if (!data->base[idx])
- break;
- data->n_words = idx + 1;
- }
- if (!data->n_words) {
- pr_err("failed to remap intc L2 registers\n");
- ret = -ENOMEM;
- goto out_unmap;
- }
-
- /* Enable all interrupts specified in the interrupt forward mask;
- * disable all others. If the property doesn't exist (-EINVAL),
- * assume all zeroes.
- */
- ret = of_property_read_u32_array(dn, "brcm,int-fwd-mask",
- data->irq_fwd_mask, data->n_words);
- if (ret == 0 || ret == -EINVAL) {
- for (idx = 0; idx < data->n_words; idx++)
- __raw_writel(data->irq_fwd_mask[idx],
- data->base[idx] + IRQEN);
- } else {
- /* property exists but has the wrong number of words */
- pr_err("invalid int-fwd-mask property\n");
- ret = -EINVAL;
- goto out_unmap;
- }
-
- num_parent_irqs = of_irq_count(dn);
- if (num_parent_irqs <= 0) {
+ data->num_parent_irqs = of_irq_count(dn);
+ if (data->num_parent_irqs <= 0) {
pr_err("invalid number of parent interrupts\n");
ret = -ENOMEM;
goto out_unmap;
}
- map_mask = of_get_property(dn, "brcm,int-map-mask", &len);
- if (!map_mask ||
- (len != (sizeof(*map_mask) * num_parent_irqs * data->n_words))) {
- pr_err("invalid brcm,int-map-mask property\n");
- ret = -EINVAL;
+ ret = iomap_regs_fn(dn, data);
+ if (ret < 0)
goto out_unmap;
+
+ for (idx = 0; idx < data->n_words; idx++) {
+ __raw_writel(data->irq_fwd_mask[idx],
+ data->pair_base[idx] +
+ data->en_offset[idx]);
}
- for (irq = 0; irq < num_parent_irqs; irq++) {
- ret = bcm7120_l2_intc_init_one(dn, data, irq, map_mask);
+ for (irq = 0; irq < data->num_parent_irqs; irq++) {
+ ret = bcm7120_l2_intc_init_one(dn, data, irq);
if (ret)
goto out_unmap;
}
@@ -215,11 +271,12 @@ int __init bcm7120_l2_intc_of_init(struct device_node *dn,
gc = irq_get_domain_generic_chip(data->domain, irq);
gc->unused = 0xffffffff & ~data->irq_map_mask[idx];
- gc->reg_base = data->base[idx];
gc->private = data;
ct = gc->chip_types;
- ct->regs.mask = IRQEN;
+ gc->reg_base = data->pair_base[idx];
+ ct->regs.mask = data->en_offset[idx];
+
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_ack = irq_gc_noop;
@@ -236,20 +293,38 @@ int __init bcm7120_l2_intc_of_init(struct device_node *dn,
}
}
- pr_info("registered BCM7120 L2 intc (mem: 0x%p, parent IRQ(s): %d)\n",
- data->base[0], num_parent_irqs);
+ pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n",
+ intc_name, data->map_base[0], data->num_parent_irqs);
return 0;
out_free_domain:
irq_domain_remove(data->domain);
out_unmap:
- for (idx = 0; idx < MAX_WORDS; idx++) {
- if (data->base[idx])
- iounmap(data->base[idx]);
+ for (idx = 0; idx < MAX_MAPPINGS; idx++) {
+ if (data->map_base[idx])
+ iounmap(data->map_base[idx]);
}
kfree(data);
return ret;
}
+
+int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
+ struct device_node *parent)
+{
+ return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120,
+ "BCM7120 L2");
+}
+
+int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
+ struct device_node *parent)
+{
+ return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380,
+ "BCM3380 L2");
+}
+
IRQCHIP_DECLARE(bcm7120_l2_intc, "brcm,bcm7120-l2-intc",
- bcm7120_l2_intc_of_init);
+ bcm7120_l2_intc_probe_7120);
+
+IRQCHIP_DECLARE(bcm3380_l2_intc, "brcm,bcm3380-l2-intc",
+ bcm7120_l2_intc_probe_3380);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 313c2c64498a..d6bcc6be0777 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -136,7 +136,11 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
/* Disable all interrupts by default */
writel(0xffffffff, data->base + CPU_MASK_SET);
- writel(0xffffffff, data->base + CPU_CLEAR);
+
+ /* Wakeup interrupts may be retained from S5 (cold boot) */
+ data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake");
+ if (!data->can_wake)
+ writel(0xffffffff, data->base + CPU_CLEAR);
data->parent_irq = irq_of_parse_and_map(np, 0);
if (!data->parent_irq) {
@@ -188,8 +192,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
- if (of_property_read_bool(np, "brcm,irq-can-wake")) {
- data->can_wake = true;
+ if (data->can_wake) {
/* This IRQ chip can wake the system, set all child interrupts
* in wake_enabled mask
*/
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 4f2fb62e6f37..49875adb6b44 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -567,7 +567,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
*/
smp_wmb();
- for_each_cpu_mask(cpu, *mask) {
+ for_each_cpu(cpu, mask) {
u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
u16 tlist;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index a6ce3476834e..01999d74bd3a 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -33,12 +33,14 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/acpi.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <linux/irqchip/arm-gic-acpi.h>
#include <asm/cputype.h>
#include <asm/irq.h>
@@ -80,19 +82,6 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
#define NR_GIC_CPU_IF 8
static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
-/*
- * Supported arch specific GIC irq extension.
- * Default make them NULL.
- */
-struct irq_chip gic_arch_extn = {
- .irq_eoi = NULL,
- .irq_mask = NULL,
- .irq_unmask = NULL,
- .irq_retrigger = NULL,
- .irq_set_type = NULL,
- .irq_set_wake = NULL,
-};
-
#ifndef MAX_GIC_NR
#define MAX_GIC_NR 1
#endif
@@ -165,34 +154,16 @@ static int gic_peek_irq(struct irq_data *d, u32 offset)
static void gic_mask_irq(struct irq_data *d)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&irq_controller_lock, flags);
gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
- if (gic_arch_extn.irq_mask)
- gic_arch_extn.irq_mask(d);
- raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_unmask_irq(struct irq_data *d)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&irq_controller_lock, flags);
- if (gic_arch_extn.irq_unmask)
- gic_arch_extn.irq_unmask(d);
gic_poke_irq(d, GIC_DIST_ENABLE_SET);
- raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_eoi_irq(struct irq_data *d)
{
- if (gic_arch_extn.irq_eoi) {
- raw_spin_lock(&irq_controller_lock);
- gic_arch_extn.irq_eoi(d);
- raw_spin_unlock(&irq_controller_lock);
- }
-
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
}
@@ -249,8 +220,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = gic_dist_base(d);
unsigned int gicirq = gic_irq(d);
- unsigned long flags;
- int ret;
/* Interrupt configuration for SGIs can't be changed */
if (gicirq < 16)
@@ -261,25 +230,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- raw_spin_lock_irqsave(&irq_controller_lock, flags);
-
- if (gic_arch_extn.irq_set_type)
- gic_arch_extn.irq_set_type(d, type);
-
- ret = gic_configure_irq(gicirq, type, base, NULL);
-
- raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
-
- return ret;
-}
-
-static int gic_retrigger(struct irq_data *d)
-{
- if (gic_arch_extn.irq_retrigger)
- return gic_arch_extn.irq_retrigger(d);
-
- /* the genirq layer expects 0 if we can't retrigger in hardware */
- return 0;
+ return gic_configure_irq(gicirq, type, base, NULL);
}
#ifdef CONFIG_SMP
@@ -310,21 +261,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
}
#endif
-#ifdef CONFIG_PM
-static int gic_set_wake(struct irq_data *d, unsigned int on)
-{
- int ret = -ENXIO;
-
- if (gic_arch_extn.irq_set_wake)
- ret = gic_arch_extn.irq_set_wake(d, on);
-
- return ret;
-}
-
-#else
-#define gic_set_wake NULL
-#endif
-
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
u32 irqstat, irqnr;
@@ -383,11 +319,9 @@ static struct irq_chip gic_chip = {
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq,
.irq_set_type = gic_set_type,
- .irq_retrigger = gic_retrigger,
#ifdef CONFIG_SMP
.irq_set_affinity = gic_set_affinity,
#endif
- .irq_set_wake = gic_set_wake,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
};
@@ -1053,7 +987,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
set_handle_irq(gic_handle_irq);
}
- gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic);
gic_cpu_init(gic);
gic_pm_init(gic);
@@ -1107,3 +1040,105 @@ IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
#endif
+
+#ifdef CONFIG_ACPI
+static phys_addr_t dist_phy_base, cpu_phy_base __initdata;
+
+static int __init
+gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+ phys_addr_t gic_cpu_base;
+ static int cpu_base_assigned;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ /*
+ * There is no support for non-banked GICv1/2 register in ACPI spec.
+ * All CPU interface addresses have to be the same.
+ */
+ gic_cpu_base = processor->base_address;
+ if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
+ return -EINVAL;
+
+ cpu_phy_base = gic_cpu_base;
+ cpu_base_assigned = 1;
+ return 0;
+}
+
+static int __init
+gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_distributor *dist;
+
+ dist = (struct acpi_madt_generic_distributor *)header;
+
+ if (BAD_MADT_ENTRY(dist, end))
+ return -EINVAL;
+
+ dist_phy_base = dist->base_address;
+ return 0;
+}
+
+int __init
+gic_v2_acpi_init(struct acpi_table_header *table)
+{
+ void __iomem *cpu_base, *dist_base;
+ int count;
+
+ /* Collect CPU base addresses */
+ count = acpi_parse_entries(ACPI_SIG_MADT,
+ sizeof(struct acpi_table_madt),
+ gic_acpi_parse_madt_cpu, table,
+ ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
+ if (count <= 0) {
+ pr_err("No valid GICC entries exist\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find distributor base address. We expect one distributor entry since
+ * ACPI 5.1 spec neither support multi-GIC instances nor GIC cascade.
+ */
+ count = acpi_parse_entries(ACPI_SIG_MADT,
+ sizeof(struct acpi_table_madt),
+ gic_acpi_parse_madt_distributor, table,
+ ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
+ if (count <= 0) {
+ pr_err("No valid GICD entries exist\n");
+ return -EINVAL;
+ } else if (count > 1) {
+ pr_err("More than one GICD entry detected\n");
+ return -EINVAL;
+ }
+
+ cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
+ if (!cpu_base) {
+ pr_err("Unable to map GICC registers\n");
+ return -ENOMEM;
+ }
+
+ dist_base = ioremap(dist_phy_base, ACPI_GICV2_DIST_MEM_SIZE);
+ if (!dist_base) {
+ pr_err("Unable to map GICD registers\n");
+ iounmap(cpu_base);
+ return -ENOMEM;
+ }
+
+ /*
+ * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
+ * as default IRQ domain to allow for GSI registration and GSI to IRQ
+ * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
+ */
+ gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
+ irq_set_default_host(gic_data[0].domain);
+
+ acpi_irq_model = ACPI_IRQ_MODEL_GIC;
+ return 0;
+}
+#endif
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index f2d269bca789..57f09cb54464 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -239,7 +239,7 @@ int gic_get_c0_compare_int(void)
int gic_get_c0_perfcount_int(void)
{
if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
- /* Is the erformance counter shared with the timer? */
+ /* Is the performance counter shared with the timer? */
if (cp0_perfcount_irq < 0)
return -1;
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
@@ -248,6 +248,29 @@ int gic_get_c0_perfcount_int(void)
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}
+int gic_get_c0_fdc_int(void)
+{
+ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
+ /* Is the FDC IRQ even present? */
+ if (cp0_fdc_irq < 0)
+ return -1;
+ return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+ }
+
+ /*
+ * Some cores claim the FDC is routable but it doesn't actually seem to
+ * be connected.
+ */
+ switch (current_cpu_type()) {
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ return -1;
+ }
+
+ return irq_create_mapping(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
+}
+
static void gic_handle_shared_int(void)
{
unsigned int i, intr, virq;
@@ -366,19 +389,19 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
int i;
cpumask_and(&tmp, cpumask, cpu_online_mask);
- if (cpus_empty(tmp))
+ if (cpumask_empty(&tmp))
return -EINVAL;
/* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
- gic_map_to_vpe(irq, first_cpu(tmp));
+ gic_map_to_vpe(irq, cpumask_first(&tmp));
/* Update the pcpu_masks */
for (i = 0; i < NR_CPUS; i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask);
- set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+ set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
cpumask_copy(d->affinity, cpumask);
spin_unlock_irqrestore(&gic_lock, flags);
@@ -613,15 +636,20 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
* of the MIPS kernel code does not use the percpu IRQ API for
* the CP0 timer and performance counter interrupts.
*/
- if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
+ switch (intr) {
+ case GIC_LOCAL_INT_TIMER:
+ case GIC_LOCAL_INT_PERFCTR:
+ case GIC_LOCAL_INT_FDC:
+ irq_set_chip_and_handler(virq,
+ &gic_all_vpes_local_irq_controller,
+ handle_percpu_irq);
+ break;
+ default:
irq_set_chip_and_handler(virq,
&gic_local_irq_controller,
handle_percpu_devid_irq);
irq_set_percpu_devid(virq);
- } else {
- irq_set_chip_and_handler(virq,
- &gic_all_vpes_local_irq_controller,
- handle_percpu_irq);
+ break;
}
spin_lock_irqsave(&gic_lock, flags);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 51c485d9a877..f67bbd80433e 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -264,7 +264,7 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
&tegra_ictlr_chip,
- &info->base[ictlr]);
+ info->base[ictlr]);
}
parent_args = *args;
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 0fe2f718d81c..afd1af3dfe5a 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -8,6 +8,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi_irq.h>
#include <linux/init.h>
#include <linux/of_irq.h>
#include <linux/irqchip.h>
@@ -26,4 +27,6 @@ extern struct of_device_id __irqchip_of_table[];
void __init irqchip_init(void)
{
of_irq_init(__irqchip_of_table);
+
+ acpi_irq_init();
}
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index c8ced12fa452..1cfcea62aed9 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -389,22 +389,49 @@ zsau_resp[] =
{NULL, ZSAU_UNKNOWN}
};
-/* retrieve CID from parsed response
- * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535
+/* check for and remove fixed string prefix
+ * If s starts with prefix terminated by a non-alphanumeric character,
+ * return pointer to the first character after that, otherwise return NULL.
*/
-static int cid_of_response(char *s)
+static char *skip_prefix(char *s, const char *prefix)
{
- int cid;
- int rc;
-
- if (s[-1] != ';')
- return 0; /* no CID separator */
- rc = kstrtoint(s, 10, &cid);
- if (rc)
- return 0; /* CID not numeric */
- if (cid < 1 || cid > 65535)
- return -1; /* CID out of range */
- return cid;
+ while (*prefix)
+ if (*s++ != *prefix++)
+ return NULL;
+ if (isalnum(*s))
+ return NULL;
+ return s;
+}
+
+/* queue event with CID */
+static void add_cid_event(struct cardstate *cs, int cid, int type,
+ void *ptr, int parameter)
+{
+ unsigned long flags;
+ unsigned next, tail;
+ struct event_t *event;
+
+ gig_dbg(DEBUG_EVENT, "queueing event %d for cid %d", type, cid);
+
+ spin_lock_irqsave(&cs->ev_lock, flags);
+
+ tail = cs->ev_tail;
+ next = (tail + 1) % MAX_EVENTS;
+ if (unlikely(next == cs->ev_head)) {
+ dev_err(cs->dev, "event queue full\n");
+ kfree(ptr);
+ } else {
+ event = cs->events + tail;
+ event->type = type;
+ event->cid = cid;
+ event->ptr = ptr;
+ event->arg = NULL;
+ event->parameter = parameter;
+ event->at_state = NULL;
+ cs->ev_tail = next;
+ }
+
+ spin_unlock_irqrestore(&cs->ev_lock, flags);
}
/**
@@ -417,190 +444,188 @@ static int cid_of_response(char *s)
*/
void gigaset_handle_modem_response(struct cardstate *cs)
{
- unsigned char *argv[MAX_REC_PARAMS + 1];
- int params;
- int i, j;
+ char *eoc, *psep, *ptr;
const struct resp_type_t *rt;
const struct zsau_resp_t *zr;
- int curarg;
- unsigned long flags;
- unsigned next, tail, head;
- struct event_t *event;
- int resp_code;
- int param_type;
- int abort;
- size_t len;
- int cid;
- int rawstring;
-
- len = cs->cbytes;
- if (!len) {
+ int cid, parameter;
+ u8 type, value;
+
+ if (!cs->cbytes) {
/* ignore additional LFs/CRs (M10x config mode or cx100) */
gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
return;
}
- cs->respdata[len] = 0;
- argv[0] = cs->respdata;
- params = 1;
+ cs->respdata[cs->cbytes] = 0;
+
if (cs->at_state.getstring) {
- /* getstring only allowed without cid at the moment */
+ /* state machine wants next line verbatim */
cs->at_state.getstring = 0;
- rawstring = 1;
- cid = 0;
- } else {
- /* parse line */
- for (i = 0; i < len; i++)
- switch (cs->respdata[i]) {
- case ';':
- case ',':
- case '=':
- if (params > MAX_REC_PARAMS) {
- dev_warn(cs->dev,
- "too many parameters in response\n");
- /* need last parameter (might be CID) */
- params--;
- }
- argv[params++] = cs->respdata + i + 1;
- }
-
- rawstring = 0;
- cid = params > 1 ? cid_of_response(argv[params - 1]) : 0;
- if (cid < 0) {
- gigaset_add_event(cs, &cs->at_state, RSP_INVAL,
- NULL, 0, NULL);
- return;
- }
+ ptr = kstrdup(cs->respdata, GFP_ATOMIC);
+ gig_dbg(DEBUG_EVENT, "string==%s", ptr ? ptr : "NULL");
+ add_cid_event(cs, 0, RSP_STRING, ptr, 0);
+ return;
+ }
- for (j = 1; j < params; ++j)
- argv[j][-1] = 0;
+ /* look up response type */
+ for (rt = resp_type; rt->response; ++rt) {
+ eoc = skip_prefix(cs->respdata, rt->response);
+ if (eoc)
+ break;
+ }
+ if (!rt->response) {
+ add_cid_event(cs, 0, RSP_NONE, NULL, 0);
+ gig_dbg(DEBUG_EVENT, "unknown modem response: '%s'\n",
+ cs->respdata);
+ return;
+ }
- gig_dbg(DEBUG_EVENT, "CMD received: %s", argv[0]);
- if (cid) {
- --params;
- gig_dbg(DEBUG_EVENT, "CID: %s", argv[params]);
- }
- gig_dbg(DEBUG_EVENT, "available params: %d", params - 1);
- for (j = 1; j < params; j++)
- gig_dbg(DEBUG_EVENT, "param %d: %s", j, argv[j]);
+ /* check for CID */
+ psep = strrchr(cs->respdata, ';');
+ if (psep &&
+ !kstrtoint(psep + 1, 10, &cid) &&
+ cid >= 1 && cid <= 65535) {
+ /* valid CID: chop it off */
+ *psep = 0;
+ } else {
+ /* no valid CID: leave unchanged */
+ cid = 0;
}
- spin_lock_irqsave(&cs->ev_lock, flags);
- head = cs->ev_head;
- tail = cs->ev_tail;
+ gig_dbg(DEBUG_EVENT, "CMD received: %s", cs->respdata);
+ if (cid)
+ gig_dbg(DEBUG_EVENT, "CID: %d", cid);
- abort = 1;
- curarg = 0;
- while (curarg < params) {
- next = (tail + 1) % MAX_EVENTS;
- if (unlikely(next == head)) {
- dev_err(cs->dev, "event queue full\n");
- break;
- }
+ switch (rt->type) {
+ case RT_NOTHING:
+ /* check parameter separator */
+ if (*eoc)
+ goto bad_param; /* extra parameter */
- event = cs->events + tail;
- event->at_state = NULL;
- event->cid = cid;
- event->ptr = NULL;
- event->arg = NULL;
- tail = next;
+ add_cid_event(cs, cid, rt->resp_code, NULL, 0);
+ break;
- if (rawstring) {
- resp_code = RSP_STRING;
- param_type = RT_STRING;
- } else {
- for (rt = resp_type; rt->response; ++rt)
- if (!strcmp(argv[curarg], rt->response))
+ case RT_RING:
+ /* check parameter separator */
+ if (!*eoc)
+ eoc = NULL; /* no parameter */
+ else if (*eoc++ != ',')
+ goto bad_param;
+
+ add_cid_event(cs, 0, rt->resp_code, NULL, cid);
+
+ /* process parameters as individual responses */
+ while (eoc) {
+ /* look up parameter type */
+ psep = NULL;
+ for (rt = resp_type; rt->response; ++rt) {
+ psep = skip_prefix(eoc, rt->response);
+ if (psep)
break;
+ }
- if (!rt->response) {
- event->type = RSP_NONE;
- gig_dbg(DEBUG_EVENT,
- "unknown modem response: '%s'\n",
- argv[curarg]);
- break;
+ /* all legal parameters are of type RT_STRING */
+ if (!psep || rt->type != RT_STRING) {
+ dev_warn(cs->dev,
+ "illegal RING parameter: '%s'\n",
+ eoc);
+ return;
}
- resp_code = rt->resp_code;
- param_type = rt->type;
- ++curarg;
- }
+ /* skip parameter value separator */
+ if (*psep++ != '=')
+ goto bad_param;
- event->type = resp_code;
+ /* look up end of parameter */
+ eoc = strchr(psep, ',');
+ if (eoc)
+ *eoc++ = 0;
- switch (param_type) {
- case RT_NOTHING:
- break;
- case RT_RING:
- if (!cid) {
- dev_err(cs->dev,
- "received RING without CID!\n");
- event->type = RSP_INVAL;
- abort = 1;
- } else {
- event->cid = 0;
- event->parameter = cid;
- abort = 0;
- }
+ /* retrieve parameter value */
+ ptr = kstrdup(psep, GFP_ATOMIC);
+
+ /* queue event */
+ add_cid_event(cs, cid, rt->resp_code, ptr, 0);
+ }
+ break;
+
+ case RT_ZSAU:
+ /* check parameter separator */
+ if (!*eoc) {
+ /* no parameter */
+ add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE);
break;
- case RT_ZSAU:
- if (curarg >= params) {
- event->parameter = ZSAU_NONE;
+ }
+ if (*eoc++ != '=')
+ goto bad_param;
+
+ /* look up parameter value */
+ for (zr = zsau_resp; zr->str; ++zr)
+ if (!strcmp(eoc, zr->str))
break;
- }
- for (zr = zsau_resp; zr->str; ++zr)
- if (!strcmp(argv[curarg], zr->str))
- break;
- event->parameter = zr->code;
- if (!zr->str)
- dev_warn(cs->dev,
- "%s: unknown parameter %s after ZSAU\n",
- __func__, argv[curarg]);
- ++curarg;
- break;
- case RT_STRING:
- if (curarg < params) {
- event->ptr = kstrdup(argv[curarg], GFP_ATOMIC);
- if (!event->ptr)
- dev_err(cs->dev, "out of memory\n");
- ++curarg;
- }
- gig_dbg(DEBUG_EVENT, "string==%s",
- event->ptr ? (char *) event->ptr : "NULL");
- break;
- case RT_ZCAU:
- event->parameter = -1;
- if (curarg + 1 < params) {
- u8 type, value;
-
- i = kstrtou8(argv[curarg++], 16, &type);
- j = kstrtou8(argv[curarg++], 16, &value);
- if (i == 0 && j == 0)
- event->parameter = (type << 8) | value;
- } else
- curarg = params - 1;
- break;
- case RT_NUMBER:
- if (curarg >= params ||
- kstrtoint(argv[curarg++], 10, &event->parameter))
- event->parameter = -1;
- gig_dbg(DEBUG_EVENT, "parameter==%d", event->parameter);
- break;
+ if (!zr->str)
+ goto bad_param;
+
+ add_cid_event(cs, cid, rt->resp_code, NULL, zr->code);
+ break;
+
+ case RT_STRING:
+ /* check parameter separator */
+ if (*eoc++ != '=')
+ goto bad_param;
+
+ /* retrieve parameter value */
+ ptr = kstrdup(eoc, GFP_ATOMIC);
+
+ /* queue event */
+ add_cid_event(cs, cid, rt->resp_code, ptr, 0);
+ break;
+
+ case RT_ZCAU:
+ /* check parameter separators */
+ if (*eoc++ != '=')
+ goto bad_param;
+ psep = strchr(eoc, ',');
+ if (!psep)
+ goto bad_param;
+ *psep++ = 0;
+
+ /* decode parameter values */
+ if (kstrtou8(eoc, 16, &type) || kstrtou8(psep, 16, &value)) {
+ *--psep = ',';
+ goto bad_param;
}
+ parameter = (type << 8) | value;
- if (resp_code == RSP_ZDLE)
- cs->dle = event->parameter;
+ add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
+ break;
- if (abort)
- break;
- }
+ case RT_NUMBER:
+ /* check parameter separator */
+ if (*eoc++ != '=')
+ goto bad_param;
- cs->ev_tail = tail;
- spin_unlock_irqrestore(&cs->ev_lock, flags);
+ /* decode parameter value */
+ if (kstrtoint(eoc, 10, &parameter))
+ goto bad_param;
+
+ /* special case ZDLE: set flag before queueing event */
+ if (rt->resp_code == RSP_ZDLE)
+ cs->dle = parameter;
- if (curarg != params)
- gig_dbg(DEBUG_EVENT,
- "invalid number of processed parameters: %d/%d",
- curarg, params);
+ add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
+ break;
+
+bad_param:
+ /* parameter unexpected, incomplete or malformed */
+ dev_warn(cs->dev, "bad parameter in response '%s'\n",
+ cs->respdata);
+ add_cid_event(cs, cid, rt->resp_code, NULL, -1);
+ break;
+
+ default:
+ dev_err(cs->dev, "%s: internal error on '%s'\n",
+ __func__, cs->respdata);
+ }
}
EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index c1493f4162fb..d5bdbaf93a1a 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -1092,7 +1092,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
card->ci = get_card_info(ent->driver_data);
if (!card->ci) {
- pr_info("mISDN: do not have informations about adapter at %s\n",
+ pr_info("mISDN: do not have information about adapter at %s\n",
pci_name(pdev));
kfree(card);
pci_disable_device(pdev);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 94affa5e6f28..546b7e81161d 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1951,38 +1951,6 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
return len;
}
-/* We don't need to send arp, because we have point-to-point connections. */
-static int
-isdn_net_rebuild_header(struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- isdn_net_local *lp = netdev_priv(dev);
- int ret = 0;
-
- if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
- struct ethhdr *eth = (struct ethhdr *) skb->data;
-
- /*
- * Only ARP/IP is currently supported
- */
-
- if (eth->h_proto != htons(ETH_P_IP)) {
- printk(KERN_WARNING
- "isdn_net: %s don't know how to resolve type %d addresses?\n",
- dev->name, (int) eth->h_proto);
- memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
- return 0;
- }
- /*
- * Try to get ARP to resolve the header.
- */
-#ifdef CONFIG_INET
- ret = arp_find(eth->h_dest, skb);
-#endif
- }
- return ret;
-}
-
static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type)
{
@@ -2005,7 +1973,6 @@ static void isdn_header_cache_update(struct hh_cache *hh,
static const struct header_ops isdn_header_ops = {
.create = isdn_net_header,
- .rebuild = isdn_net_rebuild_header,
.cache = isdn_header_cache,
.cache_update = isdn_header_cache_update,
};
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 87f7dff20ff6..52c43821f746 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -295,7 +295,7 @@ dsp_cmx_del_conf_member(struct dsp *dsp)
}
}
printk(KERN_WARNING
- "%s: dsp is not present in its own conf_meber list.\n",
+ "%s: dsp is not present in its own conf_member list.\n",
__func__);
return -EINVAL;
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 77025f5cb57d..0222b1a35a2d 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -460,7 +460,7 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: enable mixing of "
- "tx-data with conf mebers\n", __func__);
+ "tx-data with conf members\n", __func__);
dsp->tx_mix = 1;
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
@@ -474,7 +474,7 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: disable mixing of "
- "tx-data with conf mebers\n", __func__);
+ "tx-data with conf members\n", __func__);
dsp->tx_mix = 0;
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 84b35925ee4d..8dc7290089bb 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -112,8 +112,8 @@ mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
}
static int
-mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
@@ -173,8 +173,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
}
static int
-mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 25b320d64e26..966b9605f5f0 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -526,6 +526,14 @@ config LEDS_VERSATILE
This option enabled support for the LEDs on the ARM Versatile
and RealView boards. Say Y to enabled these.
+config LEDS_PM8941_WLED
+ tristate "LED support for the Qualcomm PM8941 WLED block"
+ depends on LEDS_CLASS
+ select REGMAP
+ help
+ This option enables support for the 'White' LED block
+ on Qualcomm PM8941 PMICs.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index cbba921b6f1c..bf4609338e10 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o
obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
+obj-$(CONFIG_LEDS_PM8941_WLED) += leds-pm8941-wled.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index 4a19fd44f93f..3b2573411a37 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -216,75 +216,6 @@ static ssize_t flash_fault_show(struct device *dev,
}
static DEVICE_ATTR_RO(flash_fault);
-static ssize_t available_sync_leds_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
- char *pbuf = buf;
- int i, buf_len;
-
- buf_len = sprintf(pbuf, "[0: none] ");
- pbuf += buf_len;
-
- for (i = 0; i < fled_cdev->num_sync_leds; ++i) {
- buf_len = sprintf(pbuf, "[%d: %s] ", i + 1,
- fled_cdev->sync_leds[i]->led_cdev.name);
- pbuf += buf_len;
- }
-
- return sprintf(buf, "%s\n", buf);
-}
-static DEVICE_ATTR_RO(available_sync_leds);
-
-static ssize_t flash_sync_strobe_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
- unsigned long led_id;
- ssize_t ret;
-
- mutex_lock(&led_cdev->led_access);
-
- if (led_sysfs_is_disabled(led_cdev)) {
- ret = -EBUSY;
- goto unlock;
- }
-
- ret = kstrtoul(buf, 10, &led_id);
- if (ret)
- goto unlock;
-
- if (led_id > fled_cdev->num_sync_leds) {
- ret = -ERANGE;
- goto unlock;
- }
-
- fled_cdev->sync_led_id = led_id;
-
- ret = size;
-unlock:
- mutex_unlock(&led_cdev->led_access);
- return ret;
-}
-
-static ssize_t flash_sync_strobe_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
- int sled_id = fled_cdev->sync_led_id;
- char *sync_led_name = "none";
-
- if (fled_cdev->sync_led_id > 0)
- sync_led_name = (char *)
- fled_cdev->sync_leds[sled_id - 1]->led_cdev.name;
-
- return sprintf(buf, "[%d: %s]\n", sled_id, sync_led_name);
-}
-static DEVICE_ATTR_RW(flash_sync_strobe);
-
static struct attribute *led_flash_strobe_attrs[] = {
&dev_attr_flash_strobe.attr,
NULL,
@@ -307,12 +238,6 @@ static struct attribute *led_flash_fault_attrs[] = {
NULL,
};
-static struct attribute *led_flash_sync_strobe_attrs[] = {
- &dev_attr_available_sync_leds.attr,
- &dev_attr_flash_sync_strobe.attr,
- NULL,
-};
-
static const struct attribute_group led_flash_strobe_group = {
.attrs = led_flash_strobe_attrs,
};
@@ -329,10 +254,6 @@ static const struct attribute_group led_flash_fault_group = {
.attrs = led_flash_fault_attrs,
};
-static const struct attribute_group led_flash_sync_strobe_group = {
- .attrs = led_flash_sync_strobe_attrs,
-};
-
static void led_flash_resume(struct led_classdev *led_cdev)
{
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
@@ -361,9 +282,6 @@ static void led_flash_init_sysfs_groups(struct led_classdev_flash *fled_cdev)
if (ops->fault_get)
flash_groups[num_sysfs_groups++] = &led_flash_fault_group;
- if (led_cdev->flags & LED_DEV_CAP_SYNC_STROBE)
- flash_groups[num_sysfs_groups++] = &led_flash_sync_strobe_group;
-
led_cdev->groups = flash_groups;
}
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 795ec994c663..728681debdbe 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -212,6 +212,31 @@ static const struct dev_pm_ops leds_class_dev_pm_ops = {
.resume = led_resume,
};
+static int match_name(struct device *dev, const void *data)
+{
+ if (!dev_name(dev))
+ return 0;
+ return !strcmp(dev_name(dev), (char *)data);
+}
+
+static int led_classdev_next_name(const char *init_name, char *name,
+ size_t len)
+{
+ unsigned int i = 0;
+ int ret = 0;
+
+ strlcpy(name, init_name, len);
+
+ while (class_find_device(leds_class, NULL, name, match_name) &&
+ (ret < len))
+ ret = snprintf(name, len, "%s_%u", init_name, ++i);
+
+ if (ret >= len)
+ return -ENOMEM;
+
+ return i;
+}
+
/**
* led_classdev_register - register a new object of led_classdev class.
* @parent: The device to register.
@@ -219,12 +244,22 @@ static const struct dev_pm_ops leds_class_dev_pm_ops = {
*/
int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
{
+ char name[64];
+ int ret;
+
+ ret = led_classdev_next_name(led_cdev->name, name, sizeof(name));
+ if (ret < 0)
+ return ret;
+
led_cdev->dev = device_create_with_groups(leds_class, parent, 0,
- led_cdev, led_cdev->groups,
- "%s", led_cdev->name);
+ led_cdev, led_cdev->groups, "%s", name);
if (IS_ERR(led_cdev->dev))
return PTR_ERR(led_cdev->dev);
+ if (ret)
+ dev_warn(parent, "Led %s renamed to %s due to name collision",
+ led_cdev->name, dev_name(led_cdev->dev));
+
#ifdef CONFIG_LEDS_TRIGGERS
init_rwsem(&led_cdev->trigger_lock);
#endif
@@ -288,6 +323,63 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_classdev_unregister);
+static void devm_led_classdev_release(struct device *dev, void *res)
+{
+ led_classdev_unregister(*(struct led_classdev **)res);
+}
+
+/**
+ * devm_led_classdev_register - resource managed led_classdev_register()
+ * @parent: The device to register.
+ * @led_cdev: the led_classdev structure for this device.
+ */
+int devm_led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev)
+{
+ struct led_classdev **dr;
+ int rc;
+
+ dr = devres_alloc(devm_led_classdev_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = led_classdev_register(parent, led_cdev);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ *dr = led_cdev;
+ devres_add(parent, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_led_classdev_register);
+
+static int devm_led_classdev_match(struct device *dev, void *res, void *data)
+{
+ struct led_cdev **p = res;
+
+ if (WARN_ON(!p || !*p))
+ return 0;
+
+ return *p == data;
+}
+
+/**
+ * devm_led_classdev_unregister() - resource managed led_classdev_unregister()
+ * @parent: The device to unregister.
+ * @led_cdev: the led_classdev structure for this device.
+ */
+void devm_led_classdev_unregister(struct device *dev,
+ struct led_classdev *led_cdev)
+{
+ WARN_ON(devres_release(dev,
+ devm_led_classdev_release,
+ devm_led_classdev_match, led_cdev));
+}
+EXPORT_SYMBOL_GPL(devm_led_classdev_unregister);
+
static int __init leds_init(void)
{
leds_class = class_create(THIS_MODULE, "leds");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index d26af0a79a90..15eb3f86f670 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -184,7 +184,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
struct gpio_led led = {};
const char *state = NULL;
- led.gpiod = devm_get_gpiod_from_child(dev, child);
+ led.gpiod = devm_get_gpiod_from_child(dev, NULL, child);
if (IS_ERR(led.gpiod)) {
fwnode_handle_put(child);
ret = PTR_ERR(led.gpiod);
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index 00f068b0fa6f..d3098e395fff 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -406,6 +406,6 @@ static struct i2c_driver lp8501_driver = {
module_i2c_driver(lp8501_driver);
-MODULE_DESCRIPTION("Texas Instruments LP8501 LED drvier");
+MODULE_DESCRIPTION("Texas Instruments LP8501 LED driver");
MODULE_AUTHOR("Milo Kim");
MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 840e93f3ab3e..8c2b7fbe2392 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -391,11 +391,13 @@ static int lp8860_probe(struct i2c_client *client,
}
}
- led->enable_gpio = devm_gpiod_get(&client->dev, "enable");
- if (IS_ERR(led->enable_gpio))
- led->enable_gpio = NULL;
- else
- gpiod_direction_output(led->enable_gpio, 0);
+ led->enable_gpio = devm_gpiod_get_optional(&client->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(led->enable_gpio)) {
+ ret = PTR_ERR(led->enable_gpio);
+ dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
+ return ret;
+ }
led->regulator = devm_regulator_get(&client->dev, "vled");
if (IS_ERR(led->regulator))
@@ -486,6 +488,6 @@ static struct i2c_driver lp8860_driver = {
};
module_i2c_driver(lp8860_driver);
-MODULE_DESCRIPTION("Texas Instruments LP8860 LED drvier");
+MODULE_DESCRIPTION("Texas Instruments LP8860 LED driver");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index f110b4c456ba..bee3e1ab27fd 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -289,7 +289,7 @@ pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
return ERR_PTR(-ENOMEM);
for_each_child_of_node(np, child) {
- struct led_info led;
+ struct led_info led = {};
u32 reg;
int res;
diff --git a/drivers/leds/leds-pm8941-wled.c b/drivers/leds/leds-pm8941-wled.c
new file mode 100644
index 000000000000..bf64a593fbf1
--- /dev/null
+++ b/drivers/leds/leds-pm8941-wled.c
@@ -0,0 +1,435 @@
+/* Copyright (c) 2015, Sony Mobile Communications, AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define PM8941_WLED_REG_VAL_BASE 0x40
+#define PM8941_WLED_REG_VAL_MAX 0xFFF
+
+#define PM8941_WLED_REG_MOD_EN 0x46
+#define PM8941_WLED_REG_MOD_EN_BIT BIT(7)
+#define PM8941_WLED_REG_MOD_EN_MASK BIT(7)
+
+#define PM8941_WLED_REG_SYNC 0x47
+#define PM8941_WLED_REG_SYNC_MASK 0x07
+#define PM8941_WLED_REG_SYNC_LED1 BIT(0)
+#define PM8941_WLED_REG_SYNC_LED2 BIT(1)
+#define PM8941_WLED_REG_SYNC_LED3 BIT(2)
+#define PM8941_WLED_REG_SYNC_ALL 0x07
+#define PM8941_WLED_REG_SYNC_CLEAR 0x00
+
+#define PM8941_WLED_REG_FREQ 0x4c
+#define PM8941_WLED_REG_FREQ_MASK 0x0f
+
+#define PM8941_WLED_REG_OVP 0x4d
+#define PM8941_WLED_REG_OVP_MASK 0x03
+
+#define PM8941_WLED_REG_BOOST 0x4e
+#define PM8941_WLED_REG_BOOST_MASK 0x07
+
+#define PM8941_WLED_REG_SINK 0x4f
+#define PM8941_WLED_REG_SINK_MASK 0xe0
+#define PM8941_WLED_REG_SINK_SHFT 0x05
+
+/* Per-'string' registers below */
+#define PM8941_WLED_REG_STR_OFFSET 0x10
+
+#define PM8941_WLED_REG_STR_MOD_EN_BASE 0x60
+#define PM8941_WLED_REG_STR_MOD_MASK BIT(7)
+#define PM8941_WLED_REG_STR_MOD_EN BIT(7)
+
+#define PM8941_WLED_REG_STR_SCALE_BASE 0x62
+#define PM8941_WLED_REG_STR_SCALE_MASK 0x1f
+
+#define PM8941_WLED_REG_STR_MOD_SRC_BASE 0x63
+#define PM8941_WLED_REG_STR_MOD_SRC_MASK 0x01
+#define PM8941_WLED_REG_STR_MOD_SRC_INT 0x00
+#define PM8941_WLED_REG_STR_MOD_SRC_EXT 0x01
+
+#define PM8941_WLED_REG_STR_CABC_BASE 0x66
+#define PM8941_WLED_REG_STR_CABC_MASK BIT(7)
+#define PM8941_WLED_REG_STR_CABC_EN BIT(7)
+
+struct pm8941_wled_config {
+ u32 i_boost_limit;
+ u32 ovp;
+ u32 switch_freq;
+ u32 num_strings;
+ u32 i_limit;
+ bool cs_out_en;
+ bool ext_gen;
+ bool cabc_en;
+};
+
+struct pm8941_wled {
+ struct regmap *regmap;
+ u16 addr;
+
+ struct led_classdev cdev;
+
+ struct pm8941_wled_config cfg;
+};
+
+static int pm8941_wled_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct pm8941_wled *wled;
+ u8 ctrl = 0;
+ u16 val;
+ int rc;
+ int i;
+
+ wled = container_of(cdev, struct pm8941_wled, cdev);
+
+ if (value != 0)
+ ctrl = PM8941_WLED_REG_MOD_EN_BIT;
+
+ val = value * PM8941_WLED_REG_VAL_MAX / LED_FULL;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->addr + PM8941_WLED_REG_MOD_EN,
+ PM8941_WLED_REG_MOD_EN_MASK, ctrl);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ u8 v[2] = { val & 0xff, (val >> 8) & 0xf };
+
+ rc = regmap_bulk_write(wled->regmap,
+ wled->addr + PM8941_WLED_REG_VAL_BASE + 2 * i,
+ v, 2);
+ if (rc)
+ return rc;
+ }
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->addr + PM8941_WLED_REG_SYNC,
+ PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_ALL);
+ if (rc)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->addr + PM8941_WLED_REG_SYNC,
+ PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_CLEAR);
+ return rc;
+}
+
+static void pm8941_wled_set_brightness(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ if (pm8941_wled_set(cdev, value)) {
+ dev_err(cdev->dev, "Unable to set brightness\n");
+ return;
+ }
+ cdev->brightness = value;
+}
+
+static int pm8941_wled_setup(struct pm8941_wled *wled)
+{
+ int rc;
+ int i;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->addr + PM8941_WLED_REG_OVP,
+ PM8941_WLED_REG_OVP_MASK, wled->cfg.ovp);
+ if (rc)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->addr + PM8941_WLED_REG_BOOST,
+ PM8941_WLED_REG_BOOST_MASK, wled->cfg.i_boost_limit);
+ if (rc)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->addr + PM8941_WLED_REG_FREQ,
+ PM8941_WLED_REG_FREQ_MASK, wled->cfg.switch_freq);
+ if (rc)
+ return rc;
+
+ if (wled->cfg.cs_out_en) {
+ u8 all = (BIT(wled->cfg.num_strings) - 1)
+ << PM8941_WLED_REG_SINK_SHFT;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->addr + PM8941_WLED_REG_SINK,
+ PM8941_WLED_REG_SINK_MASK, all);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ u16 addr = wled->addr + PM8941_WLED_REG_STR_OFFSET * i;
+
+ rc = regmap_update_bits(wled->regmap,
+ addr + PM8941_WLED_REG_STR_MOD_EN_BASE,
+ PM8941_WLED_REG_STR_MOD_MASK,
+ PM8941_WLED_REG_STR_MOD_EN);
+ if (rc)
+ return rc;
+
+ if (wled->cfg.ext_gen) {
+ rc = regmap_update_bits(wled->regmap,
+ addr + PM8941_WLED_REG_STR_MOD_SRC_BASE,
+ PM8941_WLED_REG_STR_MOD_SRC_MASK,
+ PM8941_WLED_REG_STR_MOD_SRC_EXT);
+ if (rc)
+ return rc;
+ }
+
+ rc = regmap_update_bits(wled->regmap,
+ addr + PM8941_WLED_REG_STR_SCALE_BASE,
+ PM8941_WLED_REG_STR_SCALE_MASK,
+ wled->cfg.i_limit);
+ if (rc)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ addr + PM8941_WLED_REG_STR_CABC_BASE,
+ PM8941_WLED_REG_STR_CABC_MASK,
+ wled->cfg.cabc_en ?
+ PM8941_WLED_REG_STR_CABC_EN : 0);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static const struct pm8941_wled_config pm8941_wled_config_defaults = {
+ .i_boost_limit = 3,
+ .i_limit = 20,
+ .ovp = 2,
+ .switch_freq = 5,
+ .num_strings = 0,
+ .cs_out_en = false,
+ .ext_gen = false,
+ .cabc_en = false,
+};
+
+struct pm8941_wled_var_cfg {
+ const u32 *values;
+ u32 (*fn)(u32);
+ int size;
+};
+
+static const u32 pm8941_wled_i_boost_limit_values[] = {
+ 105, 385, 525, 805, 980, 1260, 1400, 1680,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_i_boost_limit_cfg = {
+ .values = pm8941_wled_i_boost_limit_values,
+ .size = ARRAY_SIZE(pm8941_wled_i_boost_limit_values),
+};
+
+static const u32 pm8941_wled_ovp_values[] = {
+ 35, 32, 29, 27,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_ovp_cfg = {
+ .values = pm8941_wled_ovp_values,
+ .size = ARRAY_SIZE(pm8941_wled_ovp_values),
+};
+
+static u32 pm8941_wled_num_strings_values_fn(u32 idx)
+{
+ return idx + 1;
+}
+
+static const struct pm8941_wled_var_cfg pm8941_wled_num_strings_cfg = {
+ .fn = pm8941_wled_num_strings_values_fn,
+ .size = 3,
+};
+
+static u32 pm8941_wled_switch_freq_values_fn(u32 idx)
+{
+ return 19200 / (2 * (1 + idx));
+}
+
+static const struct pm8941_wled_var_cfg pm8941_wled_switch_freq_cfg = {
+ .fn = pm8941_wled_switch_freq_values_fn,
+ .size = 16,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_i_limit_cfg = {
+ .size = 26,
+};
+
+static u32 pm8941_wled_values(const struct pm8941_wled_var_cfg *cfg, u32 idx)
+{
+ if (idx >= cfg->size)
+ return UINT_MAX;
+ if (cfg->fn)
+ return cfg->fn(idx);
+ if (cfg->values)
+ return cfg->values[idx];
+ return idx;
+}
+
+static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
+{
+ struct pm8941_wled_config *cfg = &wled->cfg;
+ u32 val;
+ int rc;
+ u32 c;
+ int i;
+ int j;
+
+ const struct {
+ const char *name;
+ u32 *val_ptr;
+ const struct pm8941_wled_var_cfg *cfg;
+ } u32_opts[] = {
+ {
+ "qcom,current-boost-limit",
+ &cfg->i_boost_limit,
+ .cfg = &pm8941_wled_i_boost_limit_cfg,
+ },
+ {
+ "qcom,current-limit",
+ &cfg->i_limit,
+ .cfg = &pm8941_wled_i_limit_cfg,
+ },
+ {
+ "qcom,ovp",
+ &cfg->ovp,
+ .cfg = &pm8941_wled_ovp_cfg,
+ },
+ {
+ "qcom,switching-freq",
+ &cfg->switch_freq,
+ .cfg = &pm8941_wled_switch_freq_cfg,
+ },
+ {
+ "qcom,num-strings",
+ &cfg->num_strings,
+ .cfg = &pm8941_wled_num_strings_cfg,
+ },
+ };
+ const struct {
+ const char *name;
+ bool *val_ptr;
+ } bool_opts[] = {
+ { "qcom,cs-out", &cfg->cs_out_en, },
+ { "qcom,ext-gen", &cfg->ext_gen, },
+ { "qcom,cabc", &cfg->cabc_en, },
+ };
+
+ rc = of_property_read_u32(dev->of_node, "reg", &val);
+ if (rc || val > 0xffff) {
+ dev_err(dev, "invalid IO resources\n");
+ return rc ? rc : -EINVAL;
+ }
+ wled->addr = val;
+
+ rc = of_property_read_string(dev->of_node, "label", &wled->cdev.name);
+ if (rc)
+ wled->cdev.name = dev->of_node->name;
+
+ wled->cdev.default_trigger = of_get_property(dev->of_node,
+ "linux,default-trigger", NULL);
+
+ *cfg = pm8941_wled_config_defaults;
+ for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
+ rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
+ if (rc == -EINVAL) {
+ continue;
+ } else if (rc) {
+ dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
+ return rc;
+ }
+
+ c = UINT_MAX;
+ for (j = 0; c != val; j++) {
+ c = pm8941_wled_values(u32_opts[i].cfg, j);
+ if (c == UINT_MAX) {
+ dev_err(dev, "invalid value for '%s'\n",
+ u32_opts[i].name);
+ return -EINVAL;
+ }
+ }
+
+ dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
+ *u32_opts[i].val_ptr = j;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
+ if (of_property_read_bool(dev->of_node, bool_opts[i].name))
+ *bool_opts[i].val_ptr = true;
+ }
+
+ cfg->num_strings = cfg->num_strings + 1;
+
+ return 0;
+}
+
+static int pm8941_wled_probe(struct platform_device *pdev)
+{
+ struct pm8941_wled *wled;
+ struct regmap *regmap;
+ int rc;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "Unable to get regmap\n");
+ return -EINVAL;
+ }
+
+ wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
+ if (!wled)
+ return -ENOMEM;
+
+ wled->regmap = regmap;
+
+ rc = pm8941_wled_configure(wled, &pdev->dev);
+ if (rc)
+ return rc;
+
+ rc = pm8941_wled_setup(wled);
+ if (rc)
+ return rc;
+
+ wled->cdev.brightness_set = pm8941_wled_set_brightness;
+
+ rc = devm_led_classdev_register(&pdev->dev, &wled->cdev);
+ if (rc)
+ return rc;
+
+ platform_set_drvdata(pdev, wled);
+
+ return 0;
+};
+
+static const struct of_device_id pm8941_wled_match_table[] = {
+ { .compatible = "qcom,pm8941-wled" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pm8941_wled_match_table);
+
+static struct platform_driver pm8941_wled_driver = {
+ .probe = pm8941_wled_probe,
+ .driver = {
+ .name = "pm8941-wled",
+ .of_match_table = pm8941_wled_match_table,
+ },
+};
+
+module_platform_driver(pm8941_wled_driver);
+
+MODULE_DESCRIPTION("pm8941 wled driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pm8941-wled");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index f668500a2157..1d07e3e83d29 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -121,9 +121,6 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
return ret;
}
- if (child)
- led_data->period = pwm_get_period(led_data->pwm);
-
led_data->can_sleep = pwm_can_sleep(led_data->pwm);
if (led_data->can_sleep)
INIT_WORK(&led_data->work, led_pwm_work);
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 1219af493c0f..19a32280731d 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -211,10 +211,9 @@ static void initialize(struct lg_cpu *cpu)
/*
* The Guest tells us where we're not to deliver interrupts by putting
- * the range of addresses into "struct lguest_data".
+ * the instruction address into "struct lguest_data".
*/
- if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start)
- || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
+ if (get_user(cpu->lg->noirq_iret, &cpu->lg->lguest_data->noirq_iret))
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
/*
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 70dfcdc29f1f..5e7559be222a 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -56,21 +56,16 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
}
/*H:210
- * The set_guest_interrupt() routine actually delivers the interrupt or
- * trap. The mechanics of delivering traps and interrupts to the Guest are the
- * same, except some traps have an "error code" which gets pushed onto the
- * stack as well: the caller tells us if this is one.
- *
- * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
- * interrupt or trap. It's split into two parts for traditional reasons: gcc
- * on i386 used to be frightened by 64 bit numbers.
+ * The push_guest_interrupt_stack() routine saves Guest state on the stack for
+ * an interrupt or trap. The mechanics of delivering traps and interrupts to
+ * the Guest are the same, except some traps have an "error code" which gets
+ * pushed onto the stack as well: the caller tells us if this is one.
*
* We set up the stack just like the CPU does for a real interrupt, so it's
* identical for the Guest (and the standard "iret" instruction will undo
* it).
*/
-static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
- bool has_err)
+static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err)
{
unsigned long gstack, origstack;
u32 eflags, ss, irq_enable;
@@ -130,12 +125,28 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
if (has_err)
push_guest_stack(cpu, &gstack, cpu->regs->errcode);
- /*
- * Now we've pushed all the old state, we change the stack, the code
- * segment and the address to execute.
- */
+ /* Adjust the stack pointer and stack segment. */
cpu->regs->ss = ss;
cpu->regs->esp = virtstack + (gstack - origstack);
+}
+
+/*
+ * This actually makes the Guest start executing the given interrupt/trap
+ * handler.
+ *
+ * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
+ * interrupt or trap. It's split into two parts for traditional reasons: gcc
+ * on i386 used to be frightened by 64 bit numbers.
+ */
+static void guest_run_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi)
+{
+ /* If we're already in the kernel, we don't change stacks. */
+ if ((cpu->regs->ss&0x3) != GUEST_PL)
+ cpu->regs->ss = cpu->esp1;
+
+ /*
+ * Set the code segment and the address to execute.
+ */
cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
cpu->regs->eip = idt_address(lo, hi);
@@ -158,6 +169,24 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
kill_guest(cpu, "Disabling interrupts");
}
+/* This restores the eflags word which was pushed on the stack by a trap */
+static void restore_eflags(struct lg_cpu *cpu)
+{
+ /* This is the physical address of the stack. */
+ unsigned long stack_pa = guest_pa(cpu, cpu->regs->esp);
+
+ /*
+ * Stack looks like this:
+ * Address Contents
+ * esp EIP
+ * esp + 4 CS
+ * esp + 8 EFLAGS
+ */
+ cpu->regs->eflags = lgread(cpu, stack_pa + 8, u32);
+ cpu->regs->eflags &=
+ ~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
+}
+
/*H:205
* Virtual Interrupts.
*
@@ -200,14 +229,6 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
BUG_ON(irq >= LGUEST_IRQS);
- /*
- * They may be in the middle of an iret, where they asked us never to
- * deliver interrupts.
- */
- if (cpu->regs->eip >= cpu->lg->noirq_start &&
- (cpu->regs->eip < cpu->lg->noirq_end))
- return;
-
/* If they're halted, interrupts restart them. */
if (cpu->halted) {
/* Re-enable interrupts. */
@@ -237,12 +258,34 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
if (idt_present(idt->a, idt->b)) {
/* OK, mark it no longer pending and deliver it. */
clear_bit(irq, cpu->irqs_pending);
+
/*
- * set_guest_interrupt() takes the interrupt descriptor and a
- * flag to say whether this interrupt pushes an error code onto
- * the stack as well: virtual interrupts never do.
+ * They may be about to iret, where they asked us never to
+ * deliver interrupts. In this case, we can emulate that iret
+ * then immediately deliver the interrupt. This is basically
+ * a noop: the iret would pop the interrupt frame and restore
+ * eflags, and then we'd set it up again. So just restore the
+ * eflags word and jump straight to the handler in this case.
+ *
+ * Denys Vlasenko points out that this isn't quite right: if
+ * the iret was returning to userspace, then that interrupt
+ * would reset the stack pointer (which the Guest told us
+ * about via LHCALL_SET_STACK). But unless the Guest is being
+ * *really* weird, that will be the same as the current stack
+ * anyway.
*/
- set_guest_interrupt(cpu, idt->a, idt->b, false);
+ if (cpu->regs->eip == cpu->lg->noirq_iret) {
+ restore_eflags(cpu);
+ } else {
+ /*
+ * set_guest_interrupt() takes a flag to say whether
+ * this interrupt pushes an error code onto the stack
+ * as well: virtual interrupts never do.
+ */
+ push_guest_interrupt_stack(cpu, false);
+ }
+ /* Actually make Guest cpu jump to handler. */
+ guest_run_interrupt(cpu, idt->a, idt->b);
}
/*
@@ -353,8 +396,9 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
*/
if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
return false;
- set_guest_interrupt(cpu, cpu->arch.idt[num].a,
- cpu->arch.idt[num].b, has_err(num));
+ push_guest_interrupt_stack(cpu, has_err(num));
+ guest_run_interrupt(cpu, cpu->arch.idt[num].a,
+ cpu->arch.idt[num].b);
return true;
}
@@ -395,8 +439,9 @@ static bool direct_trap(unsigned int num)
* The Guest has the ability to turn its interrupt gates into trap gates,
* if it is careful. The Host will let trap gates can go directly to the
* Guest, but the Guest needs the interrupts atomically disabled for an
- * interrupt gate. It can do this by pointing the trap gate at instructions
- * within noirq_start and noirq_end, where it can safely disable interrupts.
+ * interrupt gate. The Host could provide a mechanism to register more
+ * "no-interrupt" regions, and the Guest could point the trap gate at
+ * instructions within that region, where it can safely disable interrupts.
*/
/*M:006
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 307e8b39e7d1..ac8ad0461e80 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -102,7 +102,7 @@ struct lguest {
struct pgdir pgdirs[4];
- unsigned long noirq_start, noirq_end;
+ unsigned long noirq_iret;
unsigned int stack_pages;
u32 tsc_khz;
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index c4c6113eb9a6..30c60687d277 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -339,6 +339,13 @@ static ssize_t write(struct file *file, const char __user *in,
}
}
+static int open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+
+ return 0;
+}
+
/*L:060
* The final piece of interface code is the close() routine. It reverses
* everything done in initialize(). This is usually called because the
@@ -409,6 +416,7 @@ static int close(struct inode *inode, struct file *file)
*/
static const struct file_operations lguest_fops = {
.owner = THIS_MODULE,
+ .open = open,
.release = close,
.write = write,
.read = read,
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 4192901cab40..048901a1111a 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -182,31 +182,31 @@ static void rackmeter_setup_dbdma(struct rackmeter *rm)
/* Prepare 4 dbdma commands for the 2 buffers */
memset(cmd, 0, 4 * sizeof(struct dbdma_cmd));
- st_le16(&cmd->req_count, 4);
- st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
- st_le32(&cmd->phy_addr, rm->dma_buf_p +
+ cmd->req_count = cpu_to_le16(4);
+ cmd->command = cpu_to_le16(STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
+ cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
offsetof(struct rackmeter_dma, mark));
- st_le32(&cmd->cmd_dep, 0x02000000);
+ cmd->cmd_dep = cpu_to_le32(0x02000000);
cmd++;
- st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
- st_le16(&cmd->command, OUTPUT_MORE);
- st_le32(&cmd->phy_addr, rm->dma_buf_p +
+ cmd->req_count = cpu_to_le16(SAMPLE_COUNT * 4);
+ cmd->command = cpu_to_le16(OUTPUT_MORE);
+ cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
offsetof(struct rackmeter_dma, buf1));
cmd++;
- st_le16(&cmd->req_count, 4);
- st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
- st_le32(&cmd->phy_addr, rm->dma_buf_p +
+ cmd->req_count = cpu_to_le16(4);
+ cmd->command = cpu_to_le16(STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
+ cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
offsetof(struct rackmeter_dma, mark));
- st_le32(&cmd->cmd_dep, 0x01000000);
+ cmd->cmd_dep = cpu_to_le32(0x01000000);
cmd++;
- st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
- st_le16(&cmd->command, OUTPUT_MORE | BR_ALWAYS);
- st_le32(&cmd->phy_addr, rm->dma_buf_p +
+ cmd->req_count = cpu_to_le16(SAMPLE_COUNT * 4);
+ cmd->command = cpu_to_le16(OUTPUT_MORE | BR_ALWAYS);
+ cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
offsetof(struct rackmeter_dma, buf2));
- st_le32(&cmd->cmd_dep, rm->dma_buf_p);
+ cmd->cmd_dep = cpu_to_le32(rm->dma_buf_p);
rackmeter_do_pause(rm, 0);
}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 10ae69bcbbd2..d531f804455d 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -557,8 +557,7 @@ int __init smu_init (void)
return 0;
fail_msg_node:
- if (smu->msg_node)
- of_node_put(smu->msg_node);
+ of_node_put(smu->msg_node);
fail_db_node:
of_node_put(smu->db_node);
fail_bootmem:
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index dee88e59f0d3..f9512bfa6c3c 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -329,10 +329,11 @@ int __init find_via_pmu(void)
gaddr = of_translate_address(gpiop, reg);
if (gaddr != OF_BAD_ADDR)
gpio_reg = ioremap(gaddr, 0x10);
+ of_node_put(gpiop);
}
if (gpio_reg == NULL) {
printk(KERN_ERR "via-pmu: Can't find GPIO reg !\n");
- goto fail_gpio;
+ goto fail;
}
} else
pmu_kind = PMU_UNKNOWN;
@@ -340,7 +341,7 @@ int __init find_via_pmu(void)
via = ioremap(taddr, 0x2000);
if (via == NULL) {
printk(KERN_ERR "via-pmu: Can't map address !\n");
- goto fail;
+ goto fail_via_remap;
}
out_8(&via[IER], IER_CLR | 0x7f); /* disable all intrs */
@@ -348,10 +349,8 @@ int __init find_via_pmu(void)
pmu_state = idle;
- if (!init_pmu()) {
- via = NULL;
- return 0;
- }
+ if (!init_pmu())
+ goto fail_init;
printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n",
PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version);
@@ -359,11 +358,15 @@ int __init find_via_pmu(void)
sys_ctrler = SYS_CTRLER_PMU;
return 1;
- fail:
- of_node_put(vias);
+
+ fail_init:
+ iounmap(via);
+ via = NULL;
+ fail_via_remap:
iounmap(gpio_reg);
gpio_reg = NULL;
- fail_gpio:
+ fail:
+ of_node_put(vias);
vias = NULL;
return 0;
}
@@ -2109,7 +2112,7 @@ pmu_read(struct file *file, char __user *buf,
spin_lock_irqsave(&pp->lock, flags);
add_wait_queue(&pp->wait, &wait);
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
for (;;) {
ret = -EAGAIN;
@@ -2138,7 +2141,7 @@ pmu_read(struct file *file, char __user *buf,
schedule();
spin_lock_irqsave(&pp->lock, flags);
}
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&pp->wait, &wait);
spin_unlock_irqrestore(&pp->lock, flags);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 84325f267acf..84b0a2d74d60 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -6,6 +6,15 @@ menuconfig MAILBOX
signals. Say Y if your platform supports hardware mailboxes.
if MAILBOX
+
+config ARM_MHU
+ tristate "ARM MHU Mailbox"
+ depends on ARM_AMBA
+ help
+ Say Y here if you want to build the ARM MHU controller driver.
+ The controller has 3 mailbox channels, the last of which can be
+ used in Secure mode only.
+
config PL320_MBOX
bool "ARM PL320 Mailbox"
depends on ARM_AMBA
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 2e79231154cf..b18201e97e29 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -2,6 +2,8 @@
obj-$(CONFIG_MAILBOX) += mailbox.o
+obj-$(CONFIG_ARM_MHU) += arm_mhu.o
+
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
new file mode 100644
index 000000000000..ac693c635357
--- /dev/null
+++ b/drivers/mailbox/arm_mhu.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Jassi Brar <jaswinder.singh@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/amba/bus.h>
+#include <linux/mailbox_controller.h>
+
+#define INTR_STAT_OFS 0x0
+#define INTR_SET_OFS 0x8
+#define INTR_CLR_OFS 0x10
+
+#define MHU_LP_OFFSET 0x0
+#define MHU_HP_OFFSET 0x20
+#define MHU_SEC_OFFSET 0x200
+#define TX_REG_OFFSET 0x100
+
+#define MHU_CHANS 3
+
+struct mhu_link {
+ unsigned irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct arm_mhu {
+ void __iomem *base;
+ struct mhu_link mlink[MHU_CHANS];
+ struct mbox_chan chan[MHU_CHANS];
+ struct mbox_controller mbox;
+};
+
+static irqreturn_t mhu_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+
+ val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
+ if (!val)
+ return IRQ_NONE;
+
+ mbox_chan_received_data(chan, (void *)&val);
+
+ writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
+
+ return IRQ_HANDLED;
+}
+
+static bool mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+
+ return (val == 0);
+}
+
+static int mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 *arg = data;
+
+ writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int mhu_startup(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+ writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
+
+ ret = request_irq(mlink->irq, mhu_rx_interrupt,
+ IRQF_SHARED, "mhu_link", chan);
+ if (ret) {
+ dev_err(chan->mbox->dev,
+ "Unable to aquire IRQ %d\n", mlink->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mhu_shutdown(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+
+ free_irq(mlink->irq, chan);
+}
+
+static struct mbox_chan_ops mhu_ops = {
+ .send_data = mhu_send_data,
+ .startup = mhu_startup,
+ .shutdown = mhu_shutdown,
+ .last_tx_done = mhu_last_tx_done,
+};
+
+static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int i, err;
+ struct arm_mhu *mhu;
+ struct device *dev = &adev->dev;
+ int mhu_reg[MHU_CHANS] = {MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET};
+
+ /* Allocate memory for device */
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(mhu->base)) {
+ dev_err(dev, "ioremap failed\n");
+ return PTR_ERR(mhu->base);
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ mhu->chan[i].con_priv = &mhu->mlink[i];
+ mhu->mlink[i].irq = adev->irq[i];
+ mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+ }
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_CHANS;
+ mhu->mbox.ops = &mhu_ops;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 10;
+
+ amba_set_drvdata(adev, mhu);
+
+ err = mbox_controller_register(&mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ dev_info(dev, "ARM MHU Mailbox registered\n");
+ return 0;
+}
+
+static int mhu_remove(struct amba_device *adev)
+{
+ struct arm_mhu *mhu = amba_get_drvdata(adev);
+
+ mbox_controller_unregister(&mhu->mbox);
+
+ return 0;
+}
+
+static struct amba_id mhu_ids[] = {
+ {
+ .id = 0x1bb098,
+ .mask = 0xffffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhu_ids);
+
+static struct amba_driver arm_mhu_driver = {
+ .drv = {
+ .name = "mhu",
+ },
+ .id_table = mhu_ids,
+ .probe = mhu_probe,
+ .remove = mhu_remove,
+};
+module_amba_driver(arm_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHU Driver");
+MODULE_AUTHOR("Jassi Brar <jassisinghbrar@gmail.com>");
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 977c814cdf6f..7e91d68a3ac3 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -20,10 +20,35 @@
* shared memory regions as defined in the PCC table entries. The PCC
* specification supports a Doorbell mechanism for the PCC clients
* to notify the platform about new data. This Doorbell information
- * is also specified in each PCC table entry. See pcc_send_data()
- * and pcc_tx_done() for basic mode of operation.
+ * is also specified in each PCC table entry.
*
- * For more details about PCC, please see the ACPI specification from
+ * Typical high level flow of operation is:
+ *
+ * PCC Reads:
+ * * Client tries to acquire a channel lock.
+ * * After it is acquired it writes READ cmd in communication region cmd
+ * address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then client has control over channel and
+ * it can proceed with its reads.
+ * * Client releases lock.
+ *
+ * PCC Writes:
+ * * Client tries to acquire channel lock.
+ * * Client writes to its communication region after it acquires a
+ * channel lock.
+ * * Client writes WRITE cmd in communication region cmd address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then writes have succeded and it can release
+ * the channel lock.
+ *
+ * There is a Nominal latency defined for each channel which indicates
+ * how long to wait until a command completes. If command is not complete
+ * the client needs to retry or assume failure.
+ *
+ * For more details about PCC, please see the ACPI specification from
* http://www.uefi.org/ACPIv5.1 Section 14.
*
* This file implements PCC as a Mailbox controller and allows for PCC
@@ -42,8 +67,6 @@
#include "mailbox.h"
#define MAX_PCC_SUBSPACES 256
-#define PCCS_SS_SIG_MAGIC 0x50434300
-#define PCC_CMD_COMPLETE 0x1
static struct mbox_chan *pcc_mbox_channels;
@@ -71,23 +94,6 @@ static struct mbox_chan *get_pcc_channel(int id)
}
/**
- * get_subspace_id - Given a Mailbox channel, find out the
- * PCC subspace id.
- * @chan: Pointer to Mailbox Channel from which we want
- * the index.
- * Return: Errno if not found, else positive index number.
- */
-static int get_subspace_id(struct mbox_chan *chan)
-{
- unsigned int id = chan - pcc_mbox_channels;
-
- if (id < 0 || id > pcc_mbox_ctrl.num_chans)
- return -ENOENT;
-
- return id;
-}
-
-/**
* pcc_mbox_request_channel - PCC clients call this function to
* request a pointer to their PCC subspace, from which they
* can get the details of communicating with the remote.
@@ -117,7 +123,7 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
chan = get_pcc_channel(subspace_id);
if (!chan || chan->cl) {
- dev_err(dev, "%s: PCC mailbox not free\n", __func__);
+ dev_err(dev, "Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
@@ -161,81 +167,30 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
/**
- * pcc_tx_done - Callback from Mailbox controller code to
- * check if PCC message transmission completed.
- * @chan: Pointer to Mailbox channel on which previous
- * transmission occurred.
- *
- * Return: TRUE if succeeded.
- */
-static bool pcc_tx_done(struct mbox_chan *chan)
-{
- struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
- struct acpi_pcct_shared_memory *generic_comm_base =
- (struct acpi_pcct_shared_memory *) pcct_ss->base_address;
- u16 cmd_delay = pcct_ss->latency;
- unsigned int retries = 0;
-
- /* Try a few times while waiting for platform to consume */
- while (!(readw_relaxed(&generic_comm_base->status)
- & PCC_CMD_COMPLETE)) {
-
- if (retries++ < 5)
- udelay(cmd_delay);
- else {
- /*
- * If the remote is dead, this will cause the Mbox
- * controller to timeout after mbox client.tx_tout
- * msecs.
- */
- pr_err("PCC platform did not respond.\n");
- return false;
- }
- }
- return true;
-}
-
-/**
- * pcc_send_data - Called from Mailbox Controller code to finally
- * transmit data over channel.
+ * pcc_send_data - Called from Mailbox Controller code. Used
+ * here only to ring the channel doorbell. The PCC client
+ * specific read/write is done in the client driver in
+ * order to maintain atomicity over PCC channel once
+ * OS has control over it. See above for flow of operations.
* @chan: Pointer to Mailbox channel over which to send data.
- * @data: Actual data to be written over channel.
+ * @data: Client specific data written over channel. Used here
+ * only for debug after PCC transaction completes.
*
* Return: Err if something failed else 0 for success.
*/
static int pcc_send_data(struct mbox_chan *chan, void *data)
{
struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
- struct acpi_pcct_shared_memory *generic_comm_base =
- (struct acpi_pcct_shared_memory *) pcct_ss->base_address;
struct acpi_generic_address doorbell;
u64 doorbell_preserve;
u64 doorbell_val;
u64 doorbell_write;
- u16 cmd = *(u16 *) data;
- u16 ss_idx = -1;
-
- ss_idx = get_subspace_id(chan);
-
- if (ss_idx < 0) {
- pr_err("Invalid Subspace ID from PCC client\n");
- return -EINVAL;
- }
doorbell = pcct_ss->doorbell_register;
doorbell_preserve = pcct_ss->preserve_mask;
doorbell_write = pcct_ss->write_mask;
- /* Write to the shared comm region. */
- writew(cmd, &generic_comm_base->command);
-
- /* Write Subspace MAGIC value so platform can identify destination. */
- writel((PCCS_SS_SIG_MAGIC | ss_idx), &generic_comm_base->signature);
-
- /* Flip CMD COMPLETE bit */
- writew(0, &generic_comm_base->status);
-
- /* Sync notification from OSPM to Platform. */
+ /* Sync notification from OS to Platform. */
acpi_read(&doorbell_val, &doorbell);
acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
&doorbell);
@@ -245,7 +200,6 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
static struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
- .last_tx_done = pcc_tx_done,
};
/**
@@ -351,8 +305,6 @@ static int pcc_mbox_probe(struct platform_device *pdev)
pcc_mbox_ctrl.chans = pcc_mbox_channels;
pcc_mbox_ctrl.ops = &pcc_chan_ops;
- pcc_mbox_ctrl.txdone_poll = true;
- pcc_mbox_ctrl.txpoll_period = 10;
pcc_mbox_ctrl.dev = &pdev->dev;
pr_info("Registering PCC driver as Mailbox controller\n");
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 0af7361e377f..de36237d7c6b 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -56,9 +56,9 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE,
KBUILD_MODNAME);
- if (IS_ERR(res)) {
+ if (!res) {
dev_err(&pdev->dev, "Failed to request PCI memory\n");
- ret = PTR_ERR(res);
+ ret = -EBUSY;
goto out_disable;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 63e05e32b462..edcf4ab66e00 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -175,6 +175,22 @@ config MD_FAULTY
In unsure, say N.
+
+config MD_CLUSTER
+ tristate "Cluster Support for MD (EXPERIMENTAL)"
+ depends on BLK_DEV_MD
+ depends on DLM
+ default n
+ ---help---
+ Clustering support for MD devices. This enables locking and
+ synchronization across multiple systems on the cluster, so all
+ nodes in the cluster can access the MD devices simultaneously.
+
+ This brings the redundancy (and uptime) of RAID levels across the
+ nodes of the cluster.
+
+ If unsure, say N.
+
source "drivers/md/bcache/Kconfig"
config BLK_DEV_DM_BUILTIN
@@ -196,6 +212,17 @@ config BLK_DEV_DM
If unsure, say N.
+config DM_MQ_DEFAULT
+ bool "request-based DM: use blk-mq I/O path by default"
+ depends on BLK_DEV_DM
+ ---help---
+ This option enables the blk-mq based I/O path for request-based
+ DM devices by default. With the option the dm_mod.use_blk_mq
+ module/boot option defaults to Y, without it to N, but it can
+ still be overriden either way.
+
+ If unsure say N.
+
config DM_DEBUG
bool "Device mapper debugging support"
depends on BLK_DEV_DM
@@ -432,4 +459,20 @@ config DM_SWITCH
If unsure, say N.
+config DM_LOG_WRITES
+ tristate "Log writes target support"
+ depends on BLK_DEV_DM
+ ---help---
+ This device-mapper target takes two devices, one device to use
+ normally, one to log all write operations done to the first device.
+ This is for use by file system developers wishing to verify that
+ their fs is writing a consitent file system at all times by allowing
+ them to replay the log in a variety of ways and to check the
+ contents.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-log-writes.
+
+ If unsure, say N.
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index a2da532b1c2b..dba4db5985fb 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_MD_RAID10) += raid10.o
obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
+obj-$(CONFIG_MD_CLUSTER) += md-cluster.o
obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
@@ -55,6 +56,7 @@ obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
+obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 3a5767968ba0..2bc56e2a3526 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -205,6 +205,10 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
+ int node_offset = 0;
+
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * store->file_pages;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
@@ -433,6 +437,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
/* This might have been changed by a reshape */
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
+ sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
bitmap_info.space);
kunmap_atomic(sb);
@@ -544,6 +549,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
unsigned long long events;
+ int nodes = 0;
unsigned long sectors_reserved = 0;
int err = -EINVAL;
struct page *sb_page;
@@ -562,6 +568,22 @@ static int bitmap_read_sb(struct bitmap *bitmap)
return -ENOMEM;
bitmap->storage.sb_page = sb_page;
+re_read:
+ /* If cluster_slot is set, the cluster is setup */
+ if (bitmap->cluster_slot >= 0) {
+ sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
+
+ sector_div(bm_blocks,
+ bitmap->mddev->bitmap_info.chunksize >> 9);
+ /* bits to bytes */
+ bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
+ /* to 4k blocks */
+ bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
+ bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
+ pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
+ bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
+ }
+
if (bitmap->storage.file) {
loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
@@ -577,12 +599,15 @@ static int bitmap_read_sb(struct bitmap *bitmap)
if (err)
return err;
+ err = -EINVAL;
sb = kmap_atomic(sb_page);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
+ nodes = le32_to_cpu(sb->nodes);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -619,7 +644,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
goto out;
}
events = le64_to_cpu(sb->events);
- if (events < bitmap->mddev->events) {
+ if (!nodes && (events < bitmap->mddev->events)) {
printk(KERN_INFO
"%s: bitmap file is out of date (%llu < %llu) "
"-- forcing full recovery\n",
@@ -634,20 +659,40 @@ static int bitmap_read_sb(struct bitmap *bitmap)
if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
err = 0;
+
out:
kunmap_atomic(sb);
+ /* Assiging chunksize is required for "re_read" */
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
+ if (nodes && (bitmap->cluster_slot < 0)) {
+ err = md_setup_cluster(bitmap->mddev, nodes);
+ if (err) {
+ pr_err("%s: Could not setup cluster service (%d)\n",
+ bmname(bitmap), err);
+ goto out_no_sb;
+ }
+ bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
+ goto re_read;
+ }
+
+
out_no_sb:
if (test_bit(BITMAP_STALE, &bitmap->flags))
bitmap->events_cleared = bitmap->mddev->events;
bitmap->mddev->bitmap_info.chunksize = chunksize;
bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+ bitmap->mddev->bitmap_info.nodes = nodes;
if (bitmap->mddev->bitmap_info.space == 0 ||
bitmap->mddev->bitmap_info.space > sectors_reserved)
bitmap->mddev->bitmap_info.space = sectors_reserved;
- if (err)
+ if (err) {
bitmap_print_sb(bitmap);
+ if (bitmap->cluster_slot < 0)
+ md_cluster_stop(bitmap->mddev);
+ }
return err;
}
@@ -692,9 +737,10 @@ static inline struct page *filemap_get_page(struct bitmap_storage *store,
}
static int bitmap_storage_alloc(struct bitmap_storage *store,
- unsigned long chunks, int with_super)
+ unsigned long chunks, int with_super,
+ int slot_number)
{
- int pnum;
+ int pnum, offset = 0;
unsigned long num_pages;
unsigned long bytes;
@@ -703,6 +749,7 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
bytes += sizeof(bitmap_super_t);
num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
+ offset = slot_number * (num_pages - 1);
store->filemap = kmalloc(sizeof(struct page *)
* num_pages, GFP_KERNEL);
@@ -713,20 +760,22 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (store->sb_page == NULL)
return -ENOMEM;
- store->sb_page->index = 0;
}
+
pnum = 0;
if (store->sb_page) {
store->filemap[0] = store->sb_page;
pnum = 1;
+ store->sb_page->index = offset;
}
+
for ( ; pnum < num_pages; pnum++) {
store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!store->filemap[pnum]) {
store->file_pages = pnum;
return -ENOMEM;
}
- store->filemap[pnum]->index = pnum;
+ store->filemap[pnum]->index = pnum + offset;
}
store->file_pages = pnum;
@@ -885,6 +934,28 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
}
}
+static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
+{
+ unsigned long bit;
+ struct page *page;
+ void *paddr;
+ unsigned long chunk = block >> bitmap->counts.chunkshift;
+ int set = 0;
+
+ page = filemap_get_page(&bitmap->storage, chunk);
+ if (!page)
+ return -EINVAL;
+ bit = file_page_offset(&bitmap->storage, chunk);
+ paddr = kmap_atomic(page);
+ if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
+ set = test_bit(bit, paddr);
+ else
+ set = test_bit_le(bit, paddr);
+ kunmap_atomic(paddr);
+ return set;
+}
+
+
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
* sync the dirty pages of the bitmap file to disk */
@@ -935,7 +1006,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
*/
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
{
- unsigned long i, chunks, index, oldindex, bit;
+ unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
struct page *page = NULL;
unsigned long bit_cnt = 0;
struct file *file;
@@ -981,6 +1052,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
if (!bitmap->mddev->bitmap_info.external)
offset = sizeof(bitmap_super_t);
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
+
for (i = 0; i < chunks; i++) {
int b;
index = file_page_index(&bitmap->storage, i);
@@ -1001,7 +1075,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
bitmap->mddev,
bitmap->mddev->bitmap_info.offset,
page,
- index, count);
+ index + node_offset, count);
if (ret)
goto err;
@@ -1207,7 +1281,6 @@ void bitmap_daemon_work(struct mddev *mddev)
j < bitmap->storage.file_pages
&& !test_bit(BITMAP_STALE, &bitmap->flags);
j++) {
-
if (test_page_attr(bitmap, j,
BITMAP_PAGE_DIRTY))
/* bitmap_unplug will handle the rest */
@@ -1530,11 +1603,13 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
return;
}
if (!*bmc) {
- *bmc = 2 | (needed ? NEEDED_MASK : 0);
+ *bmc = 2;
bitmap_count_page(&bitmap->counts, offset, 1);
bitmap_set_pending(&bitmap->counts, offset);
bitmap->allclean = 0;
}
+ if (needed)
+ *bmc |= NEEDED_MASK;
spin_unlock_irq(&bitmap->counts.lock);
}
@@ -1591,6 +1666,10 @@ static void bitmap_free(struct bitmap *bitmap)
if (!bitmap) /* there was no bitmap */
return;
+ if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
+ bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
+ md_cluster_stop(bitmap->mddev);
+
/* Shouldn't be needed - but just in case.... */
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes) == 0);
@@ -1636,7 +1715,7 @@ void bitmap_destroy(struct mddev *mddev)
* initialize the bitmap structure
* if this returns an error, bitmap_destroy must be called to do clean up
*/
-int bitmap_create(struct mddev *mddev)
+struct bitmap *bitmap_create(struct mddev *mddev, int slot)
{
struct bitmap *bitmap;
sector_t blocks = mddev->resync_max_sectors;
@@ -1650,7 +1729,7 @@ int bitmap_create(struct mddev *mddev)
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
spin_lock_init(&bitmap->counts.lock);
atomic_set(&bitmap->pending_writes, 0);
@@ -1659,6 +1738,7 @@ int bitmap_create(struct mddev *mddev)
init_waitqueue_head(&bitmap->behind_wait);
bitmap->mddev = mddev;
+ bitmap->cluster_slot = slot;
if (mddev->kobj.sd)
bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
@@ -1706,12 +1786,14 @@ int bitmap_create(struct mddev *mddev)
printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
bitmap->counts.pages, bmname(bitmap));
- mddev->bitmap = bitmap;
- return test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
+ err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
+ if (err)
+ goto error;
+ return bitmap;
error:
bitmap_free(bitmap);
- return err;
+ return ERR_PTR(err);
}
int bitmap_load(struct mddev *mddev)
@@ -1765,6 +1847,60 @@ out:
}
EXPORT_SYMBOL_GPL(bitmap_load);
+/* Loads the bitmap associated with slot and copies the resync information
+ * to our bitmap
+ */
+int bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ sector_t *low, sector_t *high, bool clear_bits)
+{
+ int rv = 0, i, j;
+ sector_t block, lo = 0, hi = 0;
+ struct bitmap_counts *counts;
+ struct bitmap *bitmap = bitmap_create(mddev, slot);
+
+ if (IS_ERR(bitmap))
+ return PTR_ERR(bitmap);
+
+ rv = bitmap_read_sb(bitmap);
+ if (rv)
+ goto err;
+
+ rv = bitmap_init_from_disk(bitmap, 0);
+ if (rv)
+ goto err;
+
+ counts = &bitmap->counts;
+ for (j = 0; j < counts->chunks; j++) {
+ block = (sector_t)j << counts->chunkshift;
+ if (bitmap_file_test_bit(bitmap, block)) {
+ if (!lo)
+ lo = block;
+ hi = block;
+ bitmap_file_clear_bit(bitmap, block);
+ bitmap_set_memory_bits(mddev->bitmap, block, 1);
+ bitmap_file_set_bit(mddev->bitmap, block);
+ }
+ }
+
+ if (clear_bits) {
+ bitmap_update_sb(bitmap);
+ /* Setting this for the ev_page should be enough.
+ * And we do not require both write_all and PAGE_DIRT either
+ */
+ for (i = 0; i < bitmap->storage.file_pages; i++)
+ set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
+ bitmap_write_all(bitmap);
+ bitmap_unplug(bitmap);
+ }
+ *low = lo;
+ *high = hi;
+err:
+ bitmap_free(bitmap);
+ return rv;
+}
+EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);
+
+
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
unsigned long chunk_kb;
@@ -1849,7 +1985,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
memset(&store, 0, sizeof(store));
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
ret = bitmap_storage_alloc(&store, chunks,
- !bitmap->mddev->bitmap_info.external);
+ !bitmap->mddev->bitmap_info.external,
+ bitmap->cluster_slot);
if (ret)
goto err;
@@ -2021,13 +2158,18 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
return -EINVAL;
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
+ struct bitmap *bitmap;
mddev->pers->quiesce(mddev, 1);
- rv = bitmap_create(mddev);
- if (!rv)
+ bitmap = bitmap_create(mddev, -1);
+ if (IS_ERR(bitmap))
+ rv = PTR_ERR(bitmap);
+ else {
+ mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
- if (rv) {
- bitmap_destroy(mddev);
- mddev->bitmap_info.offset = 0;
+ if (rv) {
+ bitmap_destroy(mddev);
+ mddev->bitmap_info.offset = 0;
+ }
}
mddev->pers->quiesce(mddev, 0);
if (rv)
@@ -2186,6 +2328,8 @@ __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
static ssize_t metadata_show(struct mddev *mddev, char *page)
{
+ if (mddev_is_clustered(mddev))
+ return sprintf(page, "clustered\n");
return sprintf(page, "%s\n", (mddev->bitmap_info.external
? "external" : "internal"));
}
@@ -2198,7 +2342,8 @@ static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
return -EBUSY;
if (strncmp(buf, "external", 8) == 0)
mddev->bitmap_info.external = 1;
- else if (strncmp(buf, "internal", 8) == 0)
+ else if ((strncmp(buf, "internal", 8) == 0) ||
+ (strncmp(buf, "clustered", 9) == 0))
mddev->bitmap_info.external = 0;
else
return -EINVAL;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 30210b9c4ef9..f1f4dd01090d 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -130,8 +130,9 @@ typedef struct bitmap_super_s {
__le32 write_behind; /* 60 number of outstanding write-behind writes */
__le32 sectors_reserved; /* 64 number of 512-byte sectors that are
* reserved for the bitmap. */
-
- __u8 pad[256 - 68]; /* set to zero */
+ __le32 nodes; /* 68 the maximum number of nodes in cluster. */
+ __u8 cluster_name[64]; /* 72 cluster name to which this md belongs */
+ __u8 pad[256 - 136]; /* set to zero */
} bitmap_super_t;
/* notes:
@@ -226,12 +227,13 @@ struct bitmap {
wait_queue_head_t behind_wait;
struct kernfs_node *sysfs_can_clear;
+ int cluster_slot; /* Slot offset for clustered env */
};
/* the bitmap API */
/* these are used only by md/bitmap */
-int bitmap_create(struct mddev *mddev);
+struct bitmap *bitmap_create(struct mddev *mddev, int slot);
int bitmap_load(struct mddev *mddev);
void bitmap_flush(struct mddev *mddev);
void bitmap_destroy(struct mddev *mddev);
@@ -260,6 +262,8 @@ void bitmap_daemon_work(struct mddev *mddev);
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int chunksize, int init);
+int bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ sector_t *lo, sector_t *hi, bool clear_bits);
#endif
#endif
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 13f547a4eeb6..3ddd1162334d 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -8,6 +8,7 @@
#include "dm.h"
#include <linux/hash.h>
+#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -124,32 +125,41 @@ static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
* sorted queue.
*/
#define NR_QUEUE_LEVELS 16u
+#define NR_SENTINELS NR_QUEUE_LEVELS * 3
+
+#define WRITEBACK_PERIOD HZ
struct queue {
+ unsigned nr_elts;
+ bool current_writeback_sentinels;
+ unsigned long next_writeback;
struct list_head qs[NR_QUEUE_LEVELS];
+ struct list_head sentinels[NR_SENTINELS];
};
static void queue_init(struct queue *q)
{
unsigned i;
- for (i = 0; i < NR_QUEUE_LEVELS; i++)
+ q->nr_elts = 0;
+ q->current_writeback_sentinels = false;
+ q->next_writeback = 0;
+ for (i = 0; i < NR_QUEUE_LEVELS; i++) {
INIT_LIST_HEAD(q->qs + i);
+ INIT_LIST_HEAD(q->sentinels + i);
+ INIT_LIST_HEAD(q->sentinels + NR_QUEUE_LEVELS + i);
+ INIT_LIST_HEAD(q->sentinels + (2 * NR_QUEUE_LEVELS) + i);
+ }
}
-/*
- * Checks to see if the queue is empty.
- * FIXME: reduce cpu usage.
- */
-static bool queue_empty(struct queue *q)
+static unsigned queue_size(struct queue *q)
{
- unsigned i;
-
- for (i = 0; i < NR_QUEUE_LEVELS; i++)
- if (!list_empty(q->qs + i))
- return false;
+ return q->nr_elts;
+}
- return true;
+static bool queue_empty(struct queue *q)
+{
+ return q->nr_elts == 0;
}
/*
@@ -157,24 +167,19 @@ static bool queue_empty(struct queue *q)
*/
static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
{
+ q->nr_elts++;
list_add_tail(elt, q->qs + level);
}
-static void queue_remove(struct list_head *elt)
+static void queue_remove(struct queue *q, struct list_head *elt)
{
+ q->nr_elts--;
list_del(elt);
}
-/*
- * Shifts all regions down one level. This has no effect on the order of
- * the queue.
- */
-static void queue_shift_down(struct queue *q)
+static bool is_sentinel(struct queue *q, struct list_head *h)
{
- unsigned level;
-
- for (level = 1; level < NR_QUEUE_LEVELS; level++)
- list_splice_init(q->qs + level, q->qs + level - 1);
+ return (h >= q->sentinels) && (h < (q->sentinels + NR_SENTINELS));
}
/*
@@ -184,10 +189,12 @@ static void queue_shift_down(struct queue *q)
static struct list_head *queue_peek(struct queue *q)
{
unsigned level;
+ struct list_head *h;
for (level = 0; level < NR_QUEUE_LEVELS; level++)
- if (!list_empty(q->qs + level))
- return q->qs[level].next;
+ list_for_each(h, q->qs + level)
+ if (!is_sentinel(q, h))
+ return h;
return NULL;
}
@@ -197,16 +204,34 @@ static struct list_head *queue_pop(struct queue *q)
struct list_head *r = queue_peek(q);
if (r) {
+ q->nr_elts--;
list_del(r);
-
- /* have we just emptied the bottom level? */
- if (list_empty(q->qs))
- queue_shift_down(q);
}
return r;
}
+/*
+ * Pops an entry from a level that is not past a sentinel.
+ */
+static struct list_head *queue_pop_old(struct queue *q)
+{
+ unsigned level;
+ struct list_head *h;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_for_each(h, q->qs + level) {
+ if (is_sentinel(q, h))
+ break;
+
+ q->nr_elts--;
+ list_del(h);
+ return h;
+ }
+
+ return NULL;
+}
+
static struct list_head *list_pop(struct list_head *lh)
{
struct list_head *r = lh->next;
@@ -217,6 +242,62 @@ static struct list_head *list_pop(struct list_head *lh)
return r;
}
+static struct list_head *writeback_sentinel(struct queue *q, unsigned level)
+{
+ if (q->current_writeback_sentinels)
+ return q->sentinels + NR_QUEUE_LEVELS + level;
+ else
+ return q->sentinels + 2 * NR_QUEUE_LEVELS + level;
+}
+
+static void queue_update_writeback_sentinels(struct queue *q)
+{
+ unsigned i;
+ struct list_head *h;
+
+ if (time_after(jiffies, q->next_writeback)) {
+ for (i = 0; i < NR_QUEUE_LEVELS; i++) {
+ h = writeback_sentinel(q, i);
+ list_del(h);
+ list_add_tail(h, q->qs + i);
+ }
+
+ q->next_writeback = jiffies + WRITEBACK_PERIOD;
+ q->current_writeback_sentinels = !q->current_writeback_sentinels;
+ }
+}
+
+/*
+ * Sometimes we want to iterate through entries that have been pushed since
+ * a certain event. We use sentinel entries on the queues to delimit these
+ * 'tick' events.
+ */
+static void queue_tick(struct queue *q)
+{
+ unsigned i;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++) {
+ list_del(q->sentinels + i);
+ list_add_tail(q->sentinels + i, q->qs + i);
+ }
+}
+
+typedef void (*iter_fn)(struct list_head *, void *);
+static void queue_iterate_tick(struct queue *q, iter_fn fn, void *context)
+{
+ unsigned i;
+ struct list_head *h;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++) {
+ list_for_each_prev(h, q->qs + i) {
+ if (is_sentinel(q, h))
+ break;
+
+ fn(h, context);
+ }
+ }
+}
+
/*----------------------------------------------------------------*/
/*
@@ -232,8 +313,6 @@ struct entry {
*/
bool dirty:1;
unsigned hit_count;
- unsigned generation;
- unsigned tick;
};
/*
@@ -481,7 +560,6 @@ static bool in_cache(struct mq_policy *mq, struct entry *e)
*/
static void push(struct mq_policy *mq, struct entry *e)
{
- e->tick = mq->tick;
hash_insert(mq, e);
if (in_cache(mq, e))
@@ -496,7 +574,11 @@ static void push(struct mq_policy *mq, struct entry *e)
*/
static void del(struct mq_policy *mq, struct entry *e)
{
- queue_remove(&e->list);
+ if (in_cache(mq, e))
+ queue_remove(e->dirty ? &mq->cache_dirty : &mq->cache_clean, &e->list);
+ else
+ queue_remove(&mq->pre_cache, &e->list);
+
hash_remove(e);
}
@@ -518,18 +600,24 @@ static struct entry *pop(struct mq_policy *mq, struct queue *q)
return e;
}
-static struct entry *peek(struct queue *q)
+static struct entry *pop_old(struct mq_policy *mq, struct queue *q)
{
- struct list_head *h = queue_peek(q);
- return h ? container_of(h, struct entry, list) : NULL;
+ struct entry *e;
+ struct list_head *h = queue_pop_old(q);
+
+ if (!h)
+ return NULL;
+
+ e = container_of(h, struct entry, list);
+ hash_remove(e);
+
+ return e;
}
-/*
- * Has this entry already been updated?
- */
-static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
+static struct entry *peek(struct queue *q)
{
- return mq->tick == e->tick;
+ struct list_head *h = queue_peek(q);
+ return h ? container_of(h, struct entry, list) : NULL;
}
/*
@@ -583,20 +671,9 @@ static void check_generation(struct mq_policy *mq)
* Whenever we use an entry we bump up it's hit counter, and push it to the
* back to it's current level.
*/
-static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
+static void requeue(struct mq_policy *mq, struct entry *e)
{
- if (updated_this_tick(mq, e))
- return;
-
- e->hit_count++;
- mq->hit_count++;
check_generation(mq);
-
- /* generation adjustment, to stop the counts increasing forever. */
- /* FIXME: divide? */
- /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
- e->generation = mq->generation;
-
del(mq, e);
push(mq, e);
}
@@ -703,7 +780,7 @@ static int cache_entry_found(struct mq_policy *mq,
struct entry *e,
struct policy_result *result)
{
- requeue_and_update_tick(mq, e);
+ requeue(mq, e);
if (in_cache(mq, e)) {
result->op = POLICY_HIT;
@@ -740,8 +817,6 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
new_e->oblock = e->oblock;
new_e->dirty = false;
new_e->hit_count = e->hit_count;
- new_e->generation = e->generation;
- new_e->tick = e->tick;
del(mq, e);
free_entry(&mq->pre_cache_pool, e);
@@ -757,18 +832,16 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
int data_dir, struct policy_result *result)
{
int r = 0;
- bool updated = updated_this_tick(mq, e);
- if ((!discarded_oblock && updated) ||
- !should_promote(mq, e, discarded_oblock, data_dir)) {
- requeue_and_update_tick(mq, e);
+ if (!should_promote(mq, e, discarded_oblock, data_dir)) {
+ requeue(mq, e);
result->op = POLICY_MISS;
} else if (!can_migrate)
r = -EWOULDBLOCK;
else {
- requeue_and_update_tick(mq, e);
+ requeue(mq, e);
r = pre_cache_to_cache(mq, e, result);
}
@@ -795,7 +868,6 @@ static void insert_in_pre_cache(struct mq_policy *mq,
e->dirty = false;
e->oblock = oblock;
e->hit_count = 1;
- e->generation = mq->generation;
push(mq, e);
}
@@ -828,7 +900,6 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
e->oblock = oblock;
e->dirty = false;
e->hit_count = 1;
- e->generation = mq->generation;
push(mq, e);
result->cblock = infer_cblock(&mq->cache_pool, e);
@@ -905,12 +976,37 @@ static void mq_destroy(struct dm_cache_policy *p)
kfree(mq);
}
+static void update_pre_cache_hits(struct list_head *h, void *context)
+{
+ struct entry *e = container_of(h, struct entry, list);
+ e->hit_count++;
+}
+
+static void update_cache_hits(struct list_head *h, void *context)
+{
+ struct mq_policy *mq = context;
+ struct entry *e = container_of(h, struct entry, list);
+ e->hit_count++;
+ mq->hit_count++;
+}
+
static void copy_tick(struct mq_policy *mq)
{
- unsigned long flags;
+ unsigned long flags, tick;
spin_lock_irqsave(&mq->tick_lock, flags);
- mq->tick = mq->tick_protected;
+ tick = mq->tick_protected;
+ if (tick != mq->tick) {
+ queue_iterate_tick(&mq->pre_cache, update_pre_cache_hits, mq);
+ queue_iterate_tick(&mq->cache_dirty, update_cache_hits, mq);
+ queue_iterate_tick(&mq->cache_clean, update_cache_hits, mq);
+ mq->tick = tick;
+ }
+
+ queue_tick(&mq->pre_cache);
+ queue_tick(&mq->cache_dirty);
+ queue_tick(&mq->cache_clean);
+ queue_update_writeback_sentinels(&mq->cache_dirty);
spin_unlock_irqrestore(&mq->tick_lock, flags);
}
@@ -1001,7 +1097,6 @@ static int mq_load_mapping(struct dm_cache_policy *p,
e->oblock = oblock;
e->dirty = false; /* this gets corrected in a minute */
e->hit_count = hint_valid ? hint : 1;
- e->generation = mq->generation;
push(mq, e);
return 0;
@@ -1012,10 +1107,15 @@ static int mq_save_hints(struct mq_policy *mq, struct queue *q,
{
int r;
unsigned level;
+ struct list_head *h;
struct entry *e;
for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each_entry(e, q->qs + level, list) {
+ list_for_each(h, q->qs + level) {
+ if (is_sentinel(q, h))
+ continue;
+
+ e = container_of(h, struct entry, list);
r = fn(context, infer_cblock(&mq->cache_pool, e),
e->oblock, e->hit_count);
if (r)
@@ -1087,10 +1187,27 @@ static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
return r;
}
+#define CLEAN_TARGET_PERCENTAGE 25
+
+static bool clean_target_met(struct mq_policy *mq)
+{
+ /*
+ * Cache entries may not be populated. So we're cannot rely on the
+ * size of the clean queue.
+ */
+ unsigned nr_clean = from_cblock(mq->cache_size) - queue_size(&mq->cache_dirty);
+ unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_PERCENTAGE / 100;
+
+ return nr_clean >= target;
+}
+
static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
dm_cblock_t *cblock)
{
- struct entry *e = pop(mq, &mq->cache_dirty);
+ struct entry *e = pop_old(mq, &mq->cache_dirty);
+
+ if (!e && !clean_target_met(mq))
+ e = pop(mq, &mq->cache_dirty);
if (!e)
return -ENODATA;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 713a96237a80..5503e43e5f28 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -228,7 +228,7 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
*
* tcw: Compatible implementation of the block chaining mode used
* by the TrueCrypt device encryption system (prior to version 4.1).
- * For more info see: http://www.truecrypt.org
+ * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
* It operates on full 512 byte sectors and uses CBC
* with an IV derived from initial key and the sector number.
* In addition, whitening value is applied on every sector, whitening
@@ -1124,15 +1124,15 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->cc;
- struct bio *base_bio = io->base_bio;
struct bio *clone;
/*
- * The block layer might modify the bvec array, so always
- * copy the required bvecs because we need the original
- * one in order to decrypt the whole bio data *afterwards*.
+ * We need the original biovec array in order to decrypt
+ * the whole bio data *afterwards* -- thanks to immutable
+ * biovecs we don't need to worry about the block layer
+ * modifying the biovec array; so leverage bio_clone_fast().
*/
- clone = bio_clone_bioset(base_bio, gfp, cc->bs);
+ clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
if (!clone)
return 1;
@@ -1816,6 +1816,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret)
goto bad;
+ ret = -EINVAL;
while (opt_params--) {
opt_string = dm_shift_arg(&as);
if (!opt_string) {
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 42c3a27a14cc..57b6a1901c91 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -236,7 +236,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
delayed->context = dc;
- delayed->expires = expires = jiffies + (delay * HZ / 1000);
+ delayed->expires = expires = jiffies + msecs_to_jiffies(delay);
mutex_lock(&delayed_bios_lock);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c8a18e4ee9dc..720ceeb7fa9b 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto err_unlock_md_type;
}
- if (dm_get_md_type(md) == DM_TYPE_NONE)
+ if (dm_get_md_type(md) == DM_TYPE_NONE) {
/* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(t));
- else if (dm_get_md_type(md) != dm_table_get_type(t)) {
+
+ /* setup md->queue to reflect md's type (may block) */
+ r = dm_setup_md_queue(md);
+ if (r) {
+ DMWARN("unable to set up device queue for new table.");
+ goto err_unlock_md_type;
+ }
+ } else if (dm_get_md_type(md) != dm_table_get_type(t)) {
DMWARN("can't change device type after initial table load.");
r = -EINVAL;
goto err_unlock_md_type;
}
- /* setup md->queue to reflect md's type (may block) */
- r = dm_setup_md_queue(md);
- if (r) {
- DMWARN("unable to set up device queue for new table.");
- goto err_unlock_md_type;
- }
dm_unlock_md_type(md);
/* stage inactive table */
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 03177ca0b009..058256d2eeea 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -17,7 +17,9 @@
#define DM_LOG_USERSPACE_VSN "1.3.0"
-struct flush_entry {
+#define FLUSH_ENTRY_POOL_SIZE 16
+
+struct dm_dirty_log_flush_entry {
int type;
region_t region;
struct list_head list;
@@ -34,22 +36,14 @@ struct flush_entry {
struct log_c {
struct dm_target *ti;
struct dm_dev *log_dev;
- uint32_t region_size;
- region_t region_count;
- uint64_t luid;
- char uuid[DM_UUID_LEN];
char *usr_argv_str;
uint32_t usr_argc;
- /*
- * in_sync_hint gets set when doing is_remote_recovering. It
- * represents the first region that needs recovery. IOW, the
- * first zero bit of sync_bits. This can be useful for to limit
- * traffic for calls like is_remote_recovering and get_resync_work,
- * but be take care in its use for anything else.
- */
- uint64_t in_sync_hint;
+ uint32_t region_size;
+ region_t region_count;
+ uint64_t luid;
+ char uuid[DM_UUID_LEN];
/*
* Mark and clear requests are held until a flush is issued
@@ -62,6 +56,15 @@ struct log_c {
struct list_head clear_list;
/*
+ * in_sync_hint gets set when doing is_remote_recovering. It
+ * represents the first region that needs recovery. IOW, the
+ * first zero bit of sync_bits. This can be useful for to limit
+ * traffic for calls like is_remote_recovering and get_resync_work,
+ * but be take care in its use for anything else.
+ */
+ uint64_t in_sync_hint;
+
+ /*
* Workqueue for flush of clear region requests.
*/
struct workqueue_struct *dmlog_wq;
@@ -72,19 +75,11 @@ struct log_c {
* Combine userspace flush and mark requests for efficiency.
*/
uint32_t integrated_flush;
-};
-
-static mempool_t *flush_entry_pool;
-static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data)
-{
- return kmalloc(sizeof(struct flush_entry), gfp_mask);
-}
+ mempool_t *flush_entry_pool;
+};
-static void flush_entry_free(void *element, void *pool_data)
-{
- kfree(element);
-}
+static struct kmem_cache *_flush_entry_cache;
static int userspace_do_request(struct log_c *lc, const char *uuid,
int request_type, char *data, size_t data_size,
@@ -254,6 +249,14 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
goto out;
}
+ lc->flush_entry_pool = mempool_create_slab_pool(FLUSH_ENTRY_POOL_SIZE,
+ _flush_entry_cache);
+ if (!lc->flush_entry_pool) {
+ DMERR("Failed to create flush_entry_pool");
+ r = -ENOMEM;
+ goto out;
+ }
+
/*
* Send table string and get back any opened device.
*/
@@ -310,6 +313,8 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
out:
kfree(devices_rdata);
if (r) {
+ if (lc->flush_entry_pool)
+ mempool_destroy(lc->flush_entry_pool);
kfree(lc);
kfree(ctr_str);
} else {
@@ -338,6 +343,8 @@ static void userspace_dtr(struct dm_dirty_log *log)
if (lc->log_dev)
dm_put_device(lc->ti, lc->log_dev);
+ mempool_destroy(lc->flush_entry_pool);
+
kfree(lc->usr_argv_str);
kfree(lc);
@@ -461,7 +468,7 @@ static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
{
int r = 0;
- struct flush_entry *fe;
+ struct dm_dirty_log_flush_entry *fe;
list_for_each_entry(fe, flush_list, list) {
r = userspace_do_request(lc, lc->uuid, fe->type,
@@ -481,7 +488,7 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
int r = 0;
int count;
uint32_t type = 0;
- struct flush_entry *fe, *tmp_fe;
+ struct dm_dirty_log_flush_entry *fe, *tmp_fe;
LIST_HEAD(tmp_list);
uint64_t group[MAX_FLUSH_GROUP_COUNT];
@@ -563,7 +570,8 @@ static int userspace_flush(struct dm_dirty_log *log)
LIST_HEAD(clear_list);
int mark_list_is_empty;
int clear_list_is_empty;
- struct flush_entry *fe, *tmp_fe;
+ struct dm_dirty_log_flush_entry *fe, *tmp_fe;
+ mempool_t *flush_entry_pool = lc->flush_entry_pool;
spin_lock_irqsave(&lc->flush_lock, flags);
list_splice_init(&lc->mark_list, &mark_list);
@@ -643,10 +651,10 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
{
unsigned long flags;
struct log_c *lc = log->context;
- struct flush_entry *fe;
+ struct dm_dirty_log_flush_entry *fe;
/* Wait for an allocation, but _never_ fail */
- fe = mempool_alloc(flush_entry_pool, GFP_NOIO);
+ fe = mempool_alloc(lc->flush_entry_pool, GFP_NOIO);
BUG_ON(!fe);
spin_lock_irqsave(&lc->flush_lock, flags);
@@ -672,7 +680,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
{
unsigned long flags;
struct log_c *lc = log->context;
- struct flush_entry *fe;
+ struct dm_dirty_log_flush_entry *fe;
/*
* If we fail to allocate, we skip the clearing of
@@ -680,7 +688,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
* to cause the region to be resync'ed when the
* device is activated next time.
*/
- fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC);
+ fe = mempool_alloc(lc->flush_entry_pool, GFP_ATOMIC);
if (!fe) {
DMERR("Failed to allocate memory to clear region.");
return;
@@ -733,7 +741,6 @@ static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
static void userspace_set_region_sync(struct dm_dirty_log *log,
region_t region, int in_sync)
{
- int r;
struct log_c *lc = log->context;
struct {
region_t r;
@@ -743,12 +750,12 @@ static void userspace_set_region_sync(struct dm_dirty_log *log,
pkg.r = region;
pkg.i = (int64_t)in_sync;
- r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
- (char *)&pkg, sizeof(pkg), NULL, NULL);
+ (void) userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
+ (char *)&pkg, sizeof(pkg), NULL, NULL);
/*
* It would be nice to be able to report failures.
- * However, it is easy emough to detect and resolve.
+ * However, it is easy enough to detect and resolve.
*/
return;
}
@@ -886,18 +893,16 @@ static int __init userspace_dirty_log_init(void)
{
int r = 0;
- flush_entry_pool = mempool_create(100, flush_entry_alloc,
- flush_entry_free, NULL);
-
- if (!flush_entry_pool) {
- DMWARN("Unable to create flush_entry_pool: No memory.");
+ _flush_entry_cache = KMEM_CACHE(dm_dirty_log_flush_entry, 0);
+ if (!_flush_entry_cache) {
+ DMWARN("Unable to create flush_entry_cache: No memory.");
return -ENOMEM;
}
r = dm_ulog_tfr_init();
if (r) {
DMWARN("Unable to initialize userspace log communications");
- mempool_destroy(flush_entry_pool);
+ kmem_cache_destroy(_flush_entry_cache);
return r;
}
@@ -905,7 +910,7 @@ static int __init userspace_dirty_log_init(void)
if (r) {
DMWARN("Couldn't register userspace dirty log type");
dm_ulog_tfr_exit();
- mempool_destroy(flush_entry_pool);
+ kmem_cache_destroy(_flush_entry_cache);
return r;
}
@@ -917,7 +922,7 @@ static void __exit userspace_dirty_log_exit(void)
{
dm_dirty_log_type_unregister(&_userspace_type);
dm_ulog_tfr_exit();
- mempool_destroy(flush_entry_pool);
+ kmem_cache_destroy(_flush_entry_cache);
DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
return;
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 39ad9664d397..fdf8ec304f8d 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -172,6 +172,7 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
char *rdata, size_t *rdata_size)
{
int r = 0;
+ unsigned long tmo;
size_t dummy = 0;
int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
struct dm_ulog_request *tfr = prealloced_ulog_tfr;
@@ -236,11 +237,11 @@ resend:
goto out;
}
- r = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
+ tmo = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
spin_lock(&receiving_list_lock);
list_del_init(&(pkg.list));
spin_unlock(&receiving_list_lock);
- if (!r) {
+ if (!tmo) {
DMWARN("[%s] Request timed out: [%u/%u] - retrying",
(strlen(uuid) > 8) ?
(uuid + (strlen(uuid) - 8)) : (uuid),
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
new file mode 100644
index 000000000000..93e08446a87d
--- /dev/null
+++ b/drivers/md/dm-log-writes.c
@@ -0,0 +1,825 @@
+/*
+ * Copyright (C) 2014 Facebook. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/device-mapper.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#define DM_MSG_PREFIX "log-writes"
+
+/*
+ * This target will sequentially log all writes to the target device onto the
+ * log device. This is helpful for replaying writes to check for fs consistency
+ * at all times. This target provides a mechanism to mark specific events to
+ * check data at a later time. So for example you would:
+ *
+ * write data
+ * fsync
+ * dmsetup message /dev/whatever mark mymark
+ * unmount /mnt/test
+ *
+ * Then replay the log up to mymark and check the contents of the replay to
+ * verify it matches what was written.
+ *
+ * We log writes only after they have been flushed, this makes the log describe
+ * close to the order in which the data hits the actual disk, not its cache. So
+ * for example the following sequence (W means write, C means complete)
+ *
+ * Wa,Wb,Wc,Cc,Ca,FLUSH,FUAd,Cb,CFLUSH,CFUAd
+ *
+ * Would result in the log looking like this:
+ *
+ * c,a,flush,fuad,b,<other writes>,<next flush>
+ *
+ * This is meant to help expose problems where file systems do not properly wait
+ * on data being written before invoking a FLUSH. FUA bypasses cache so once it
+ * completes it is added to the log as it should be on disk.
+ *
+ * We treat DISCARDs as if they don't bypass cache so that they are logged in
+ * order of completion along with the normal writes. If we didn't do it this
+ * way we would process all the discards first and then write all the data, when
+ * in fact we want to do the data and the discard in the order that they
+ * completed.
+ */
+#define LOG_FLUSH_FLAG (1 << 0)
+#define LOG_FUA_FLAG (1 << 1)
+#define LOG_DISCARD_FLAG (1 << 2)
+#define LOG_MARK_FLAG (1 << 3)
+
+#define WRITE_LOG_VERSION 1
+#define WRITE_LOG_MAGIC 0x6a736677736872
+
+/*
+ * The disk format for this is braindead simple.
+ *
+ * At byte 0 we have our super, followed by the following sequence for
+ * nr_entries:
+ *
+ * [ 1 sector ][ entry->nr_sectors ]
+ * [log_write_entry][ data written ]
+ *
+ * The log_write_entry takes up a full sector so we can have arbitrary length
+ * marks and it leaves us room for extra content in the future.
+ */
+
+/*
+ * Basic info about the log for userspace.
+ */
+struct log_write_super {
+ __le64 magic;
+ __le64 version;
+ __le64 nr_entries;
+ __le32 sectorsize;
+};
+
+/*
+ * sector - the sector we wrote.
+ * nr_sectors - the number of sectors we wrote.
+ * flags - flags for this log entry.
+ * data_len - the size of the data in this log entry, this is for private log
+ * entry stuff, the MARK data provided by userspace for example.
+ */
+struct log_write_entry {
+ __le64 sector;
+ __le64 nr_sectors;
+ __le64 flags;
+ __le64 data_len;
+};
+
+struct log_writes_c {
+ struct dm_dev *dev;
+ struct dm_dev *logdev;
+ u64 logged_entries;
+ u32 sectorsize;
+ atomic_t io_blocks;
+ atomic_t pending_blocks;
+ sector_t next_sector;
+ sector_t end_sector;
+ bool logging_enabled;
+ bool device_supports_discard;
+ spinlock_t blocks_lock;
+ struct list_head unflushed_blocks;
+ struct list_head logging_blocks;
+ wait_queue_head_t wait;
+ struct task_struct *log_kthread;
+};
+
+struct pending_block {
+ int vec_cnt;
+ u64 flags;
+ sector_t sector;
+ sector_t nr_sectors;
+ char *data;
+ u32 datalen;
+ struct list_head list;
+ struct bio_vec vecs[0];
+};
+
+struct per_bio_data {
+ struct pending_block *block;
+};
+
+static void put_pending_block(struct log_writes_c *lc)
+{
+ if (atomic_dec_and_test(&lc->pending_blocks)) {
+ smp_mb__after_atomic();
+ if (waitqueue_active(&lc->wait))
+ wake_up(&lc->wait);
+ }
+}
+
+static void put_io_block(struct log_writes_c *lc)
+{
+ if (atomic_dec_and_test(&lc->io_blocks)) {
+ smp_mb__after_atomic();
+ if (waitqueue_active(&lc->wait))
+ wake_up(&lc->wait);
+ }
+}
+
+static void log_end_io(struct bio *bio, int err)
+{
+ struct log_writes_c *lc = bio->bi_private;
+ struct bio_vec *bvec;
+ int i;
+
+ if (err) {
+ unsigned long flags;
+
+ DMERR("Error writing log block, error=%d", err);
+ spin_lock_irqsave(&lc->blocks_lock, flags);
+ lc->logging_enabled = false;
+ spin_unlock_irqrestore(&lc->blocks_lock, flags);
+ }
+
+ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
+
+ put_io_block(lc);
+ bio_put(bio);
+}
+
+/*
+ * Meant to be called if there is an error, it will free all the pages
+ * associated with the block.
+ */
+static void free_pending_block(struct log_writes_c *lc,
+ struct pending_block *block)
+{
+ int i;
+
+ for (i = 0; i < block->vec_cnt; i++) {
+ if (block->vecs[i].bv_page)
+ __free_page(block->vecs[i].bv_page);
+ }
+ kfree(block->data);
+ kfree(block);
+ put_pending_block(lc);
+}
+
+static int write_metadata(struct log_writes_c *lc, void *entry,
+ size_t entrylen, void *data, size_t datalen,
+ sector_t sector)
+{
+ struct bio *bio;
+ struct page *page;
+ void *ptr;
+ size_t ret;
+
+ bio = bio_alloc(GFP_KERNEL, 1);
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
+ }
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = lc->logdev->bdev;
+ bio->bi_end_io = log_end_io;
+ bio->bi_private = lc;
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ DMERR("Couldn't alloc log page");
+ bio_put(bio);
+ goto error;
+ }
+
+ ptr = kmap_atomic(page);
+ memcpy(ptr, entry, entrylen);
+ if (datalen)
+ memcpy(ptr + entrylen, data, datalen);
+ memset(ptr + entrylen + datalen, 0,
+ lc->sectorsize - entrylen - datalen);
+ kunmap_atomic(ptr);
+
+ ret = bio_add_page(bio, page, lc->sectorsize, 0);
+ if (ret != lc->sectorsize) {
+ DMERR("Couldn't add page to the log block");
+ goto error_bio;
+ }
+ submit_bio(WRITE, bio);
+ return 0;
+error_bio:
+ bio_put(bio);
+ __free_page(page);
+error:
+ put_io_block(lc);
+ return -1;
+}
+
+static int log_one_block(struct log_writes_c *lc,
+ struct pending_block *block, sector_t sector)
+{
+ struct bio *bio;
+ struct log_write_entry entry;
+ size_t ret;
+ int i;
+
+ entry.sector = cpu_to_le64(block->sector);
+ entry.nr_sectors = cpu_to_le64(block->nr_sectors);
+ entry.flags = cpu_to_le64(block->flags);
+ entry.data_len = cpu_to_le64(block->datalen);
+ if (write_metadata(lc, &entry, sizeof(entry), block->data,
+ block->datalen, sector)) {
+ free_pending_block(lc, block);
+ return -1;
+ }
+
+ if (!block->vec_cnt)
+ goto out;
+ sector++;
+
+ bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
+ }
+ atomic_inc(&lc->io_blocks);
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = lc->logdev->bdev;
+ bio->bi_end_io = log_end_io;
+ bio->bi_private = lc;
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ for (i = 0; i < block->vec_cnt; i++) {
+ /*
+ * The page offset is always 0 because we allocate a new page
+ * for every bvec in the original bio for simplicity sake.
+ */
+ ret = bio_add_page(bio, block->vecs[i].bv_page,
+ block->vecs[i].bv_len, 0);
+ if (ret != block->vecs[i].bv_len) {
+ atomic_inc(&lc->io_blocks);
+ submit_bio(WRITE, bio);
+ bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
+ }
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = lc->logdev->bdev;
+ bio->bi_end_io = log_end_io;
+ bio->bi_private = lc;
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ ret = bio_add_page(bio, block->vecs[i].bv_page,
+ block->vecs[i].bv_len, 0);
+ if (ret != block->vecs[i].bv_len) {
+ DMERR("Couldn't add page on new bio?");
+ bio_put(bio);
+ goto error;
+ }
+ }
+ sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
+ }
+ submit_bio(WRITE, bio);
+out:
+ kfree(block->data);
+ kfree(block);
+ put_pending_block(lc);
+ return 0;
+error:
+ free_pending_block(lc, block);
+ put_io_block(lc);
+ return -1;
+}
+
+static int log_super(struct log_writes_c *lc)
+{
+ struct log_write_super super;
+
+ super.magic = cpu_to_le64(WRITE_LOG_MAGIC);
+ super.version = cpu_to_le64(WRITE_LOG_VERSION);
+ super.nr_entries = cpu_to_le64(lc->logged_entries);
+ super.sectorsize = cpu_to_le32(lc->sectorsize);
+
+ if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
+ DMERR("Couldn't write super");
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline sector_t logdev_last_sector(struct log_writes_c *lc)
+{
+ return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static int log_writes_kthread(void *arg)
+{
+ struct log_writes_c *lc = (struct log_writes_c *)arg;
+ sector_t sector = 0;
+
+ while (!kthread_should_stop()) {
+ bool super = false;
+ bool logging_enabled;
+ struct pending_block *block = NULL;
+ int ret;
+
+ spin_lock_irq(&lc->blocks_lock);
+ if (!list_empty(&lc->logging_blocks)) {
+ block = list_first_entry(&lc->logging_blocks,
+ struct pending_block, list);
+ list_del_init(&block->list);
+ if (!lc->logging_enabled)
+ goto next;
+
+ sector = lc->next_sector;
+ if (block->flags & LOG_DISCARD_FLAG)
+ lc->next_sector++;
+ else
+ lc->next_sector += block->nr_sectors + 1;
+
+ /*
+ * Apparently the size of the device may not be known
+ * right away, so handle this properly.
+ */
+ if (!lc->end_sector)
+ lc->end_sector = logdev_last_sector(lc);
+ if (lc->end_sector &&
+ lc->next_sector >= lc->end_sector) {
+ DMERR("Ran out of space on the logdev");
+ lc->logging_enabled = false;
+ goto next;
+ }
+ lc->logged_entries++;
+ atomic_inc(&lc->io_blocks);
+
+ super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG));
+ if (super)
+ atomic_inc(&lc->io_blocks);
+ }
+next:
+ logging_enabled = lc->logging_enabled;
+ spin_unlock_irq(&lc->blocks_lock);
+ if (block) {
+ if (logging_enabled) {
+ ret = log_one_block(lc, block, sector);
+ if (!ret && super)
+ ret = log_super(lc);
+ if (ret) {
+ spin_lock_irq(&lc->blocks_lock);
+ lc->logging_enabled = false;
+ spin_unlock_irq(&lc->blocks_lock);
+ }
+ } else
+ free_pending_block(lc, block);
+ continue;
+ }
+
+ if (!try_to_freeze()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop() &&
+ !atomic_read(&lc->pending_blocks))
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Construct a log-writes mapping:
+ * log-writes <dev_path> <log_dev_path>
+ */
+static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct log_writes_c *lc;
+ struct dm_arg_set as;
+ const char *devname, *logdevname;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ if (argc < 2) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL);
+ if (!lc) {
+ ti->error = "Cannot allocate context";
+ return -ENOMEM;
+ }
+ spin_lock_init(&lc->blocks_lock);
+ INIT_LIST_HEAD(&lc->unflushed_blocks);
+ INIT_LIST_HEAD(&lc->logging_blocks);
+ init_waitqueue_head(&lc->wait);
+ lc->sectorsize = 1 << SECTOR_SHIFT;
+ atomic_set(&lc->io_blocks, 0);
+ atomic_set(&lc->pending_blocks, 0);
+
+ devname = dm_shift_arg(&as);
+ if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev)) {
+ ti->error = "Device lookup failed";
+ goto bad;
+ }
+
+ logdevname = dm_shift_arg(&as);
+ if (dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), &lc->logdev)) {
+ ti->error = "Log device lookup failed";
+ dm_put_device(ti, lc->dev);
+ goto bad;
+ }
+
+ lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
+ if (!lc->log_kthread) {
+ ti->error = "Couldn't alloc kthread";
+ dm_put_device(ti, lc->dev);
+ dm_put_device(ti, lc->logdev);
+ goto bad;
+ }
+
+ /* We put the super at sector 0, start logging at sector 1 */
+ lc->next_sector = 1;
+ lc->logging_enabled = true;
+ lc->end_sector = logdev_last_sector(lc);
+ lc->device_supports_discard = true;
+
+ ti->num_flush_bios = 1;
+ ti->flush_supported = true;
+ ti->num_discard_bios = 1;
+ ti->discards_supported = true;
+ ti->per_bio_data_size = sizeof(struct per_bio_data);
+ ti->private = lc;
+ return 0;
+
+bad:
+ kfree(lc);
+ return -EINVAL;
+}
+
+static int log_mark(struct log_writes_c *lc, char *data)
+{
+ struct pending_block *block;
+ size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry);
+
+ block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
+ if (!block) {
+ DMERR("Error allocating pending block");
+ return -ENOMEM;
+ }
+
+ block->data = kstrndup(data, maxsize, GFP_KERNEL);
+ if (!block->data) {
+ DMERR("Error copying mark data");
+ kfree(block);
+ return -ENOMEM;
+ }
+ atomic_inc(&lc->pending_blocks);
+ block->datalen = strlen(block->data);
+ block->flags |= LOG_MARK_FLAG;
+ spin_lock_irq(&lc->blocks_lock);
+ list_add_tail(&block->list, &lc->logging_blocks);
+ spin_unlock_irq(&lc->blocks_lock);
+ wake_up_process(lc->log_kthread);
+ return 0;
+}
+
+static void log_writes_dtr(struct dm_target *ti)
+{
+ struct log_writes_c *lc = ti->private;
+
+ spin_lock_irq(&lc->blocks_lock);
+ list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks);
+ spin_unlock_irq(&lc->blocks_lock);
+
+ /*
+ * This is just nice to have since it'll update the super to include the
+ * unflushed blocks, if it fails we don't really care.
+ */
+ log_mark(lc, "dm-log-writes-end");
+ wake_up_process(lc->log_kthread);
+ wait_event(lc->wait, !atomic_read(&lc->io_blocks) &&
+ !atomic_read(&lc->pending_blocks));
+ kthread_stop(lc->log_kthread);
+
+ WARN_ON(!list_empty(&lc->logging_blocks));
+ WARN_ON(!list_empty(&lc->unflushed_blocks));
+ dm_put_device(ti, lc->dev);
+ dm_put_device(ti, lc->logdev);
+ kfree(lc);
+}
+
+static void normal_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct log_writes_c *lc = ti->private;
+
+ bio->bi_bdev = lc->dev->bdev;
+}
+
+static int log_writes_map(struct dm_target *ti, struct bio *bio)
+{
+ struct log_writes_c *lc = ti->private;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ struct pending_block *block;
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ size_t alloc_size;
+ int i = 0;
+ bool flush_bio = (bio->bi_rw & REQ_FLUSH);
+ bool fua_bio = (bio->bi_rw & REQ_FUA);
+ bool discard_bio = (bio->bi_rw & REQ_DISCARD);
+
+ pb->block = NULL;
+
+ /* Don't bother doing anything if logging has been disabled */
+ if (!lc->logging_enabled)
+ goto map_bio;
+
+ /*
+ * Map reads as normal.
+ */
+ if (bio_data_dir(bio) == READ)
+ goto map_bio;
+
+ /* No sectors and not a flush? Don't care */
+ if (!bio_sectors(bio) && !flush_bio)
+ goto map_bio;
+
+ /*
+ * Discards will have bi_size set but there's no actual data, so just
+ * allocate the size of the pending block.
+ */
+ if (discard_bio)
+ alloc_size = sizeof(struct pending_block);
+ else
+ alloc_size = sizeof(struct pending_block) + sizeof(struct bio_vec) * bio_segments(bio);
+
+ block = kzalloc(alloc_size, GFP_NOIO);
+ if (!block) {
+ DMERR("Error allocating pending block");
+ spin_lock_irq(&lc->blocks_lock);
+ lc->logging_enabled = false;
+ spin_unlock_irq(&lc->blocks_lock);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&block->list);
+ pb->block = block;
+ atomic_inc(&lc->pending_blocks);
+
+ if (flush_bio)
+ block->flags |= LOG_FLUSH_FLAG;
+ if (fua_bio)
+ block->flags |= LOG_FUA_FLAG;
+ if (discard_bio)
+ block->flags |= LOG_DISCARD_FLAG;
+
+ block->sector = bio->bi_iter.bi_sector;
+ block->nr_sectors = bio_sectors(bio);
+
+ /* We don't need the data, just submit */
+ if (discard_bio) {
+ WARN_ON(flush_bio || fua_bio);
+ if (lc->device_supports_discard)
+ goto map_bio;
+ bio_endio(bio, 0);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /* Flush bio, splice the unflushed blocks onto this list and submit */
+ if (flush_bio && !bio_sectors(bio)) {
+ spin_lock_irq(&lc->blocks_lock);
+ list_splice_init(&lc->unflushed_blocks, &block->list);
+ spin_unlock_irq(&lc->blocks_lock);
+ goto map_bio;
+ }
+
+ /*
+ * We will write this bio somewhere else way later so we need to copy
+ * the actual contents into new pages so we know the data will always be
+ * there.
+ *
+ * We do this because this could be a bio from O_DIRECT in which case we
+ * can't just hold onto the page until some later point, we have to
+ * manually copy the contents.
+ */
+ bio_for_each_segment(bv, bio, iter) {
+ struct page *page;
+ void *src, *dst;
+
+ page = alloc_page(GFP_NOIO);
+ if (!page) {
+ DMERR("Error allocing page");
+ free_pending_block(lc, block);
+ spin_lock_irq(&lc->blocks_lock);
+ lc->logging_enabled = false;
+ spin_unlock_irq(&lc->blocks_lock);
+ return -ENOMEM;
+ }
+
+ src = kmap_atomic(bv.bv_page);
+ dst = kmap_atomic(page);
+ memcpy(dst, src + bv.bv_offset, bv.bv_len);
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+ block->vecs[i].bv_page = page;
+ block->vecs[i].bv_len = bv.bv_len;
+ block->vec_cnt++;
+ i++;
+ }
+
+ /* Had a flush with data in it, weird */
+ if (flush_bio) {
+ spin_lock_irq(&lc->blocks_lock);
+ list_splice_init(&lc->unflushed_blocks, &block->list);
+ spin_unlock_irq(&lc->blocks_lock);
+ }
+map_bio:
+ normal_map_bio(ti, bio);
+ return DM_MAPIO_REMAPPED;
+}
+
+static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
+{
+ struct log_writes_c *lc = ti->private;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+
+ if (bio_data_dir(bio) == WRITE && pb->block) {
+ struct pending_block *block = pb->block;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lc->blocks_lock, flags);
+ if (block->flags & LOG_FLUSH_FLAG) {
+ list_splice_tail_init(&block->list, &lc->logging_blocks);
+ list_add_tail(&block->list, &lc->logging_blocks);
+ wake_up_process(lc->log_kthread);
+ } else if (block->flags & LOG_FUA_FLAG) {
+ list_add_tail(&block->list, &lc->logging_blocks);
+ wake_up_process(lc->log_kthread);
+ } else
+ list_add_tail(&block->list, &lc->unflushed_blocks);
+ spin_unlock_irqrestore(&lc->blocks_lock, flags);
+ }
+
+ return error;
+}
+
+/*
+ * INFO format: <logged entries> <highest allocated sector>
+ */
+static void log_writes_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result,
+ unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct log_writes_c *lc = ti->private;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%llu %llu", lc->logged_entries,
+ (unsigned long long)lc->next_sector - 1);
+ if (!lc->logging_enabled)
+ DMEMIT(" logging_disabled");
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %s", lc->dev->name, lc->logdev->name);
+ break;
+ }
+}
+
+static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd,
+ unsigned long arg)
+{
+ struct log_writes_c *lc = ti->private;
+ struct dm_dev *dev = lc->dev;
+ int r = 0;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+}
+
+static int log_writes_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct log_writes_c *lc = ti->private;
+ struct request_queue *q = bdev_get_queue(lc->dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = lc->dev->bdev;
+ bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static int log_writes_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn,
+ void *data)
+{
+ struct log_writes_c *lc = ti->private;
+
+ return fn(ti, lc->dev, 0, ti->len, data);
+}
+
+/*
+ * Messages supported:
+ * mark <mark data> - specify the marked data.
+ */
+static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r = -EINVAL;
+ struct log_writes_c *lc = ti->private;
+
+ if (argc != 2) {
+ DMWARN("Invalid log-writes message arguments, expect 2 arguments, got %d", argc);
+ return r;
+ }
+
+ if (!strcasecmp(argv[0], "mark"))
+ r = log_mark(lc, argv[1]);
+ else
+ DMWARN("Unrecognised log writes target message received: %s", argv[0]);
+
+ return r;
+}
+
+static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct log_writes_c *lc = ti->private;
+ struct request_queue *q = bdev_get_queue(lc->dev->bdev);
+
+ if (!q || !blk_queue_discard(q)) {
+ lc->device_supports_discard = false;
+ limits->discard_granularity = 1 << SECTOR_SHIFT;
+ limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
+ }
+}
+
+static struct target_type log_writes_target = {
+ .name = "log-writes",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = log_writes_ctr,
+ .dtr = log_writes_dtr,
+ .map = log_writes_map,
+ .end_io = normal_end_io,
+ .status = log_writes_status,
+ .ioctl = log_writes_ioctl,
+ .merge = log_writes_merge,
+ .message = log_writes_message,
+ .iterate_devices = log_writes_iterate_devices,
+ .io_hints = log_writes_io_hints,
+};
+
+static int __init dm_log_writes_init(void)
+{
+ int r = dm_register_target(&log_writes_target);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void __exit dm_log_writes_exit(void)
+{
+ dm_unregister_target(&log_writes_target);
+}
+
+module_init(dm_log_writes_init);
+module_exit(dm_log_writes_exit);
+
+MODULE_DESCRIPTION(DM_NAME " log writes target");
+MODULE_AUTHOR("Josef Bacik <jbacik@fb.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d376dc87716e..63953477a07c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -428,7 +428,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
} else {
/* blk-mq request-based interface */
*__clone = blk_get_request(bdev_get_queue(bdev),
- rq_data_dir(rq), GFP_KERNEL);
+ rq_data_dir(rq), GFP_ATOMIC);
if (IS_ERR(*__clone))
/* ENOMEM, requeue */
return r;
@@ -1627,7 +1627,7 @@ static int __pgpath_busy(struct pgpath *pgpath)
{
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
- return dm_underlying_device_busy(q);
+ return blk_lld_busy(q);
}
/*
@@ -1703,7 +1703,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index c62c5ab6aed5..7e818f5f1dc4 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -11,7 +11,7 @@
struct dm_sysfs_attr {
struct attribute attr;
ssize_t (*show)(struct mapped_device *, char *);
- ssize_t (*store)(struct mapped_device *, char *);
+ ssize_t (*store)(struct mapped_device *, const char *, size_t count);
};
#define DM_ATTR_RO(_name) \
@@ -39,6 +39,31 @@ static ssize_t dm_attr_show(struct kobject *kobj, struct attribute *attr,
return ret;
}
+#define DM_ATTR_RW(_name) \
+struct dm_sysfs_attr dm_attr_##_name = \
+ __ATTR(_name, S_IRUGO | S_IWUSR, dm_attr_##_name##_show, dm_attr_##_name##_store)
+
+static ssize_t dm_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t count)
+{
+ struct dm_sysfs_attr *dm_attr;
+ struct mapped_device *md;
+ ssize_t ret;
+
+ dm_attr = container_of(attr, struct dm_sysfs_attr, attr);
+ if (!dm_attr->store)
+ return -EIO;
+
+ md = dm_get_from_kobject(kobj);
+ if (!md)
+ return -EINVAL;
+
+ ret = dm_attr->store(md, page, count);
+ dm_put(md);
+
+ return ret;
+}
+
static ssize_t dm_attr_name_show(struct mapped_device *md, char *buf)
{
if (dm_copy_name_and_uuid(md, buf, NULL))
@@ -64,25 +89,33 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
return strlen(buf);
}
+static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
+{
+ sprintf(buf, "%d\n", dm_use_blk_mq(md));
+
+ return strlen(buf);
+}
+
static DM_ATTR_RO(name);
static DM_ATTR_RO(uuid);
static DM_ATTR_RO(suspended);
+static DM_ATTR_RO(use_blk_mq);
+static DM_ATTR_RW(rq_based_seq_io_merge_deadline);
static struct attribute *dm_attrs[] = {
&dm_attr_name.attr,
&dm_attr_uuid.attr,
&dm_attr_suspended.attr,
+ &dm_attr_use_blk_mq.attr,
+ &dm_attr_rq_based_seq_io_merge_deadline.attr,
NULL,
};
static const struct sysfs_ops dm_sysfs_ops = {
.show = dm_attr_show,
+ .store = dm_attr_store,
};
-/*
- * dm kobject is embedded in mapped_device structure
- * no need to define release function here
- */
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 6554d9148927..d9b00b8565c6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -18,6 +18,8 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/mount.h>
#define DM_MSG_PREFIX "table"
@@ -372,23 +374,18 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
int r;
dev_t uninitialized_var(dev);
struct dm_dev_internal *dd;
- unsigned int major, minor;
struct dm_table *t = ti->table;
- char dummy;
+ struct block_device *bdev;
BUG_ON(!t);
- if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
- /* Extract the major/minor numbers */
- dev = MKDEV(major, minor);
- if (MAJOR(dev) != major || MINOR(dev) != minor)
- return -EOVERFLOW;
+ /* convert the path to a device */
+ bdev = lookup_bdev(path);
+ if (IS_ERR(bdev)) {
+ dev = name_to_dev_t(path);
+ if (!dev)
+ return -ENODEV;
} else {
- /* convert the path to a device */
- struct block_device *bdev = lookup_bdev(path);
-
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
dev = bdev->bd_dev;
bdput(bdev);
}
@@ -939,7 +936,7 @@ bool dm_table_mq_request_based(struct dm_table *t)
return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
}
-static int dm_table_alloc_md_mempools(struct dm_table *t)
+static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
unsigned type = dm_table_get_type(t);
unsigned per_bio_data_size = 0;
@@ -957,7 +954,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t)
per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
}
- t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
+ t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size);
if (!t->mempools)
return -ENOMEM;
@@ -1127,7 +1124,7 @@ int dm_table_complete(struct dm_table *t)
return r;
}
- r = dm_table_alloc_md_mempools(t);
+ r = dm_table_alloc_md_mempools(t, t->md);
if (r)
DMERR("unable to allocate mempools");
@@ -1339,14 +1336,14 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
continue;
if (ti->flush_supported)
- return 1;
+ return true;
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_flush_capable, &flush))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static bool dm_table_discard_zeroes_data(struct dm_table *t)
@@ -1359,10 +1356,10 @@ static bool dm_table_discard_zeroes_data(struct dm_table *t)
ti = dm_table_get_target(t, i++);
if (ti->discard_zeroes_data_unsupported)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
@@ -1408,10 +1405,10 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, func, NULL))
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1468,14 +1465,14 @@ static bool dm_table_supports_discards(struct dm_table *t)
continue;
if (ti->discards_supported)
- return 1;
+ return true;
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_discard_capable, NULL))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1677,20 +1674,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
return r;
}
-int dm_table_any_busy_target(struct dm_table *t)
-{
- unsigned i;
- struct dm_target *ti;
-
- for (i = 0; i < t->num_targets; i++) {
- ti = t->targets + i;
- if (ti->type->busy && ti->type->busy(ti))
- return 1;
- }
-
- return 0;
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
@@ -1709,9 +1692,13 @@ void dm_table_run_md_queue_async(struct dm_table *t)
md = dm_table_get_md(t);
queue = dm_get_md_queue(md);
if (queue) {
- spin_lock_irqsave(queue->queue_lock, flags);
- blk_run_queue_async(queue);
- spin_unlock_irqrestore(queue->queue_lock, flags);
+ if (queue->mq_ops)
+ blk_mq_run_hw_queues(queue, true);
+ else {
+ spin_lock_irqsave(queue->queue_lock, flags);
+ blk_run_queue_async(queue);
+ spin_unlock_irqrestore(queue->queue_lock, flags);
+ }
}
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 7a7bab8947ae..66616db33e6f 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -18,20 +18,39 @@
#include <linux/module.h>
#include <linux/device-mapper.h>
+#include <linux/reboot.h>
#include <crypto/hash.h>
#define DM_MSG_PREFIX "verity"
+#define DM_VERITY_ENV_LENGTH 42
+#define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
+
#define DM_VERITY_IO_VEC_INLINE 16
#define DM_VERITY_MEMPOOL_SIZE 4
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
#define DM_VERITY_MAX_LEVELS 63
+#define DM_VERITY_MAX_CORRUPTED_ERRS 100
+
+#define DM_VERITY_OPT_LOGGING "ignore_corruption"
+#define DM_VERITY_OPT_RESTART "restart_on_corruption"
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+enum verity_mode {
+ DM_VERITY_MODE_EIO,
+ DM_VERITY_MODE_LOGGING,
+ DM_VERITY_MODE_RESTART
+};
+
+enum verity_block_type {
+ DM_VERITY_BLOCK_TYPE_DATA,
+ DM_VERITY_BLOCK_TYPE_METADATA
+};
+
struct dm_verity {
struct dm_dev *data_dev;
struct dm_dev *hash_dev;
@@ -54,6 +73,8 @@ struct dm_verity {
unsigned digest_size; /* digest size for the current hash algorithm */
unsigned shash_descsize;/* the size of temporary space for crypto */
int hash_failed; /* set to 1 if hash of any block failed */
+ enum verity_mode mode; /* mode for handling verification errors */
+ unsigned corrupted_errs;/* Number of errors for corrupted blocks */
mempool_t *vec_mempool; /* mempool of bio vector */
@@ -175,6 +196,57 @@ static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
}
/*
+ * Handle verification errors.
+ */
+static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
+ unsigned long long block)
+{
+ char verity_env[DM_VERITY_ENV_LENGTH];
+ char *envp[] = { verity_env, NULL };
+ const char *type_str = "";
+ struct mapped_device *md = dm_table_get_md(v->ti->table);
+
+ /* Corruption should be visible in device status in all modes */
+ v->hash_failed = 1;
+
+ if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
+ goto out;
+
+ v->corrupted_errs++;
+
+ switch (type) {
+ case DM_VERITY_BLOCK_TYPE_DATA:
+ type_str = "data";
+ break;
+ case DM_VERITY_BLOCK_TYPE_METADATA:
+ type_str = "metadata";
+ break;
+ default:
+ BUG();
+ }
+
+ DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
+ block);
+
+ if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
+ DMERR("%s: reached maximum errors", v->data_dev->name);
+
+ snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
+ DM_VERITY_ENV_VAR_NAME, type, block);
+
+ kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
+
+out:
+ if (v->mode == DM_VERITY_MODE_LOGGING)
+ return 0;
+
+ if (v->mode == DM_VERITY_MODE_RESTART)
+ kernel_restart("dm-verity device corrupted");
+
+ return 1;
+}
+
+/*
* Verify hash of a metadata block pertaining to the specified data block
* ("block" argument) at a specified level ("level" argument).
*
@@ -251,11 +323,11 @@ static int verity_verify_level(struct dm_verity_io *io, sector_t block,
goto release_ret_r;
}
if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
- DMERR_LIMIT("metadata block %llu is corrupted",
- (unsigned long long)hash_block);
- v->hash_failed = 1;
- r = -EIO;
- goto release_ret_r;
+ if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA,
+ hash_block)) {
+ r = -EIO;
+ goto release_ret_r;
+ }
} else
aux->hash_verified = 1;
}
@@ -367,10 +439,9 @@ test_block_hash:
return r;
}
if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
- DMERR_LIMIT("data block %llu is corrupted",
- (unsigned long long)(io->block + b));
- v->hash_failed = 1;
- return -EIO;
+ if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+ io->block + b))
+ return -EIO;
}
}
@@ -546,6 +617,19 @@ static void verity_status(struct dm_target *ti, status_type_t type,
else
for (x = 0; x < v->salt_size; x++)
DMEMIT("%02x", v->salt[x]);
+ if (v->mode != DM_VERITY_MODE_EIO) {
+ DMEMIT(" 1 ");
+ switch (v->mode) {
+ case DM_VERITY_MODE_LOGGING:
+ DMEMIT(DM_VERITY_OPT_LOGGING);
+ break;
+ case DM_VERITY_MODE_RESTART:
+ DMEMIT(DM_VERITY_OPT_RESTART);
+ break;
+ default:
+ BUG();
+ }
+ }
break;
}
}
@@ -647,13 +731,19 @@ static void verity_dtr(struct dm_target *ti)
static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
struct dm_verity *v;
- unsigned num;
+ struct dm_arg_set as;
+ const char *opt_string;
+ unsigned int num, opt_params;
unsigned long long num_ll;
int r;
int i;
sector_t hash_position;
char dummy;
+ static struct dm_arg _args[] = {
+ {0, 1, "Invalid number of feature args"},
+ };
+
v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
if (!v) {
ti->error = "Cannot allocate verity structure";
@@ -668,8 +758,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- if (argc != 10) {
- ti->error = "Invalid argument count: exactly 10 arguments required";
+ if (argc < 10) {
+ ti->error = "Not enough arguments";
r = -EINVAL;
goto bad;
}
@@ -790,6 +880,39 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
}
+ argv += 10;
+ argc -= 10;
+
+ /* Optional parameters */
+ if (argc) {
+ as.argc = argc;
+ as.argv = argv;
+
+ r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+ if (r)
+ goto bad;
+
+ while (opt_params) {
+ opt_params--;
+ opt_string = dm_shift_arg(&as);
+ if (!opt_string) {
+ ti->error = "Not enough feature arguments";
+ r = -EINVAL;
+ goto bad;
+ }
+
+ if (!strcasecmp(opt_string, DM_VERITY_OPT_LOGGING))
+ v->mode = DM_VERITY_MODE_LOGGING;
+ else if (!strcasecmp(opt_string, DM_VERITY_OPT_RESTART))
+ v->mode = DM_VERITY_MODE_RESTART;
+ else {
+ ti->error = "Invalid feature arguments";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+ }
+
v->hash_per_block_bits =
__fls((1 << v->hash_dev_block_bits) / v->digest_size);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8001fe9e3434..a930b72314ac 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -21,6 +21,9 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/elevator.h> /* for rq_end_sector() */
+#include <linux/blk-mq.h>
#include <trace/events/block.h>
@@ -216,8 +219,29 @@ struct mapped_device {
struct kthread_worker kworker;
struct task_struct *kworker_task;
+
+ /* for request-based merge heuristic in dm_request_fn() */
+ unsigned seq_rq_merge_deadline_usecs;
+ int last_rq_rw;
+ sector_t last_rq_pos;
+ ktime_t last_rq_start_time;
+
+ /* for blk-mq request-based DM support */
+ struct blk_mq_tag_set tag_set;
+ bool use_blk_mq;
};
+#ifdef CONFIG_DM_MQ_DEFAULT
+static bool use_blk_mq = true;
+#else
+static bool use_blk_mq = false;
+#endif
+
+bool dm_use_blk_mq(struct mapped_device *md)
+{
+ return md->use_blk_mq;
+}
+
/*
* For mempools pre-allocation at the table loading time.
*/
@@ -250,35 +274,35 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
*/
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
+static unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
- unsigned ios = ACCESS_ONCE(*reserved_ios);
- unsigned modified_ios = 0;
+ unsigned param = ACCESS_ONCE(*module_param);
+ unsigned modified_param = 0;
- if (!ios)
- modified_ios = def;
- else if (ios > max)
- modified_ios = max;
+ if (!param)
+ modified_param = def;
+ else if (param > max)
+ modified_param = max;
- if (modified_ios) {
- (void)cmpxchg(reserved_ios, ios, modified_ios);
- ios = modified_ios;
+ if (modified_param) {
+ (void)cmpxchg(module_param, param, modified_param);
+ param = modified_param;
}
- return ios;
+ return param;
}
unsigned dm_get_reserved_bio_based_ios(void)
{
- return __dm_get_reserved_ios(&reserved_bio_based_ios,
+ return __dm_get_module_param(&reserved_bio_based_ios,
RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
unsigned dm_get_reserved_rq_based_ios(void)
{
- return __dm_get_reserved_ios(&reserved_rq_based_ios,
+ return __dm_get_module_param(&reserved_rq_based_ios,
RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
@@ -1017,6 +1041,11 @@ static void end_clone_bio(struct bio *clone, int error)
blk_update_request(tio->orig, 0, nr_bytes);
}
+static struct dm_rq_target_io *tio_from_request(struct request *rq)
+{
+ return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
+}
+
/*
* Don't touch any member of the md after calling this function because
* the md may be freed in dm_put() at the end of this function.
@@ -1024,10 +1053,13 @@ static void end_clone_bio(struct bio *clone, int error)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
+ int nr_requests_pending;
+
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
- if (!md_in_flight(md))
+ nr_requests_pending = md_in_flight(md);
+ if (!nr_requests_pending)
wake_up(&md->wait);
/*
@@ -1036,8 +1068,13 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (run_queue)
- blk_run_queue_async(md->queue);
+ if (run_queue) {
+ if (md->queue->mq_ops)
+ blk_mq_run_hw_queues(md->queue, true);
+ else if (!nr_requests_pending ||
+ (nr_requests_pending >= md->queue->nr_congestion_on))
+ blk_run_queue_async(md->queue);
+ }
/*
* dm_put() must be at the end of this function. See the comment above
@@ -1045,16 +1082,29 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md);
}
-static void free_rq_clone(struct request *clone)
+static void free_rq_clone(struct request *clone, bool must_be_mapped)
{
struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+
+ WARN_ON_ONCE(must_be_mapped && !clone->q);
blk_rq_unprep_clone(clone);
- if (clone->q && clone->q->mq_ops)
+
+ if (md->type == DM_TYPE_MQ_REQUEST_BASED)
+ /* stacked on blk-mq queue(s) */
tio->ti->type->release_clone_rq(clone);
- else
- free_clone_request(tio->md, clone);
- free_rq_tio(tio);
+ else if (!md->queue->mq_ops)
+ /* request_fn queue stacked on request_fn queue(s) */
+ free_clone_request(md, clone);
+ /*
+ * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
+ * no need to call free_clone_request() because we leverage blk-mq by
+ * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
+ */
+
+ if (!md->queue->mq_ops)
+ free_rq_tio(tio);
}
/*
@@ -1082,38 +1132,54 @@ static void dm_end_request(struct request *clone, int error)
rq->sense_len = clone->sense_len;
}
- free_rq_clone(clone);
- blk_end_request_all(rq, error);
+ free_rq_clone(clone, true);
+ if (!rq->q->mq_ops)
+ blk_end_request_all(rq, error);
+ else
+ blk_mq_end_request(rq, error);
rq_completed(md, rw, true);
}
static void dm_unprep_request(struct request *rq)
{
- struct dm_rq_target_io *tio = rq->special;
+ struct dm_rq_target_io *tio = tio_from_request(rq);
struct request *clone = tio->clone;
- rq->special = NULL;
- rq->cmd_flags &= ~REQ_DONTPREP;
+ if (!rq->q->mq_ops) {
+ rq->special = NULL;
+ rq->cmd_flags &= ~REQ_DONTPREP;
+ }
if (clone)
- free_rq_clone(clone);
+ free_rq_clone(clone, false);
}
/*
* Requeue the original request of a clone.
*/
-static void dm_requeue_unmapped_original_request(struct mapped_device *md,
- struct request *rq)
+static void old_requeue_request(struct request *rq)
{
- int rw = rq_data_dir(rq);
struct request_queue *q = rq->q;
unsigned long flags;
- dm_unprep_request(rq);
-
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_requeue_unmapped_original_request(struct mapped_device *md,
+ struct request *rq)
+{
+ int rw = rq_data_dir(rq);
+
+ dm_unprep_request(rq);
+
+ if (!rq->q->mq_ops)
+ old_requeue_request(rq);
+ else {
+ blk_mq_requeue_request(rq);
+ blk_mq_kick_requeue_list(rq->q);
+ }
rq_completed(md, rw, false);
}
@@ -1125,35 +1191,44 @@ static void dm_requeue_unmapped_request(struct request *clone)
dm_requeue_unmapped_original_request(tio->md, tio->orig);
}
-static void __stop_queue(struct request_queue *q)
-{
- blk_stop_queue(q);
-}
-
-static void stop_queue(struct request_queue *q)
+static void old_stop_queue(struct request_queue *q)
{
unsigned long flags;
+ if (blk_queue_stopped(q))
+ return;
+
spin_lock_irqsave(q->queue_lock, flags);
- __stop_queue(q);
+ blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void __start_queue(struct request_queue *q)
+static void stop_queue(struct request_queue *q)
{
- if (blk_queue_stopped(q))
- blk_start_queue(q);
+ if (!q->mq_ops)
+ old_stop_queue(q);
+ else
+ blk_mq_stop_hw_queues(q);
}
-static void start_queue(struct request_queue *q)
+static void old_start_queue(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __start_queue(q);
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
+static void start_queue(struct request_queue *q)
+{
+ if (!q->mq_ops)
+ old_start_queue(q);
+ else
+ blk_mq_start_stopped_hw_queues(q, true);
+}
+
static void dm_done(struct request *clone, int error, bool mapped)
{
int r = error;
@@ -1192,13 +1267,20 @@ static void dm_done(struct request *clone, int error, bool mapped)
static void dm_softirq_done(struct request *rq)
{
bool mapped = true;
- struct dm_rq_target_io *tio = rq->special;
+ struct dm_rq_target_io *tio = tio_from_request(rq);
struct request *clone = tio->clone;
+ int rw;
if (!clone) {
- blk_end_request_all(rq, tio->error);
- rq_completed(tio->md, rq_data_dir(rq), false);
- free_rq_tio(tio);
+ rw = rq_data_dir(rq);
+ if (!rq->q->mq_ops) {
+ blk_end_request_all(rq, tio->error);
+ rq_completed(tio->md, rw, false);
+ free_rq_tio(tio);
+ } else {
+ blk_mq_end_request(rq, tio->error);
+ rq_completed(tio->md, rw, false);
+ }
return;
}
@@ -1214,7 +1296,7 @@ static void dm_softirq_done(struct request *rq)
*/
static void dm_complete_request(struct request *rq, int error)
{
- struct dm_rq_target_io *tio = rq->special;
+ struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
blk_complete_request(rq);
@@ -1233,7 +1315,7 @@ static void dm_kill_unmapped_request(struct request *rq, int error)
}
/*
- * Called with the clone's queue lock held
+ * Called with the clone's queue lock held (for non-blk-mq)
*/
static void end_clone_request(struct request *clone, int error)
{
@@ -1693,7 +1775,7 @@ out:
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static void _dm_request(struct request_queue *q, struct bio *bio)
+static void dm_make_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
@@ -1725,16 +1807,6 @@ int dm_request_based(struct mapped_device *md)
return blk_queue_stackable(md->queue);
}
-static void dm_request(struct request_queue *q, struct bio *bio)
-{
- struct mapped_device *md = q->queuedata;
-
- if (dm_request_based(md))
- blk_queue_bio(q, bio);
- else
- _dm_request(q, bio);
-}
-
static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
{
int r;
@@ -1787,15 +1859,25 @@ static int setup_clone(struct request *clone, struct request *rq,
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
- struct request *clone = alloc_clone_request(md, gfp_mask);
+ /*
+ * Do not allocate a clone if tio->clone was already set
+ * (see: dm_mq_queue_rq).
+ */
+ bool alloc_clone = !tio->clone;
+ struct request *clone;
- if (!clone)
- return NULL;
+ if (alloc_clone) {
+ clone = alloc_clone_request(md, gfp_mask);
+ if (!clone)
+ return NULL;
+ } else
+ clone = tio->clone;
blk_rq_init(NULL, clone);
if (setup_clone(clone, rq, tio, gfp_mask)) {
/* -ENOMEM */
- free_clone_request(md, clone);
+ if (alloc_clone)
+ free_clone_request(md, clone);
return NULL;
}
@@ -1804,6 +1886,19 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
static void map_tio_request(struct kthread_work *work);
+static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
+ struct mapped_device *md)
+{
+ tio->md = md;
+ tio->ti = NULL;
+ tio->clone = NULL;
+ tio->orig = rq;
+ tio->error = 0;
+ memset(&tio->info, 0, sizeof(tio->info));
+ if (md->kworker_task)
+ init_kthread_work(&tio->work, map_tio_request);
+}
+
static struct dm_rq_target_io *prep_tio(struct request *rq,
struct mapped_device *md, gfp_t gfp_mask)
{
@@ -1815,13 +1910,7 @@ static struct dm_rq_target_io *prep_tio(struct request *rq,
if (!tio)
return NULL;
- tio->md = md;
- tio->ti = NULL;
- tio->clone = NULL;
- tio->orig = rq;
- tio->error = 0;
- memset(&tio->info, 0, sizeof(tio->info));
- init_kthread_work(&tio->work, map_tio_request);
+ init_tio(tio, rq, md);
table = dm_get_live_table(md, &srcu_idx);
if (!dm_table_mq_request_based(table)) {
@@ -1865,11 +1954,11 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
* DM_MAPIO_REQUEUE : the original request needs to be requeued
* < 0 : the request was completed due to failure
*/
-static int map_request(struct dm_target *ti, struct request *rq,
+static int map_request(struct dm_rq_target_io *tio, struct request *rq,
struct mapped_device *md)
{
int r;
- struct dm_rq_target_io *tio = rq->special;
+ struct dm_target *ti = tio->ti;
struct request *clone = NULL;
if (tio->clone) {
@@ -1884,7 +1973,7 @@ static int map_request(struct dm_target *ti, struct request *rq,
}
if (IS_ERR(clone))
return DM_MAPIO_REQUEUE;
- if (setup_clone(clone, rq, tio, GFP_KERNEL)) {
+ if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
ti->type->release_clone_rq(clone);
return DM_MAPIO_REQUEUE;
@@ -1925,15 +2014,24 @@ static void map_tio_request(struct kthread_work *work)
struct request *rq = tio->orig;
struct mapped_device *md = tio->md;
- if (map_request(tio->ti, rq, md) == DM_MAPIO_REQUEUE)
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
dm_requeue_unmapped_original_request(md, rq);
}
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
- blk_start_request(orig);
+ if (!orig->q->mq_ops)
+ blk_start_request(orig);
+ else
+ blk_mq_start_request(orig);
atomic_inc(&md->pending[rq_data_dir(orig)]);
+ if (md->seq_rq_merge_deadline_usecs) {
+ md->last_rq_pos = rq_end_sector(orig);
+ md->last_rq_rw = rq_data_dir(orig);
+ md->last_rq_start_time = ktime_get();
+ }
+
/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
@@ -1944,6 +2042,45 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
dm_get(md);
}
+#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
+{
+ return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
+}
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count)
+{
+ unsigned deadline;
+
+ if (!dm_request_based(md) || md->use_blk_mq)
+ return count;
+
+ if (kstrtouint(buf, 10, &deadline))
+ return -EINVAL;
+
+ if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
+ deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
+
+ md->seq_rq_merge_deadline_usecs = deadline;
+
+ return count;
+}
+
+static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
+{
+ ktime_t kt_deadline;
+
+ if (!md->seq_rq_merge_deadline_usecs)
+ return false;
+
+ kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
+ kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
+
+ return !ktime_after(ktime_get(), kt_deadline);
+}
+
/*
* q->request_fn for request-based dm.
* Called with the queue lock held.
@@ -1967,7 +2104,7 @@ static void dm_request_fn(struct request_queue *q)
while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
- goto delay_and_out;
+ goto out;
/* always use block 0 to find the target for flushes for now */
pos = 0;
@@ -1986,12 +2123,17 @@ static void dm_request_fn(struct request_queue *q)
continue;
}
+ if (dm_request_peeked_before_merge_deadline(md) &&
+ md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
+ goto delay_and_out;
+
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
dm_start_request(md, rq);
- tio = rq->special;
+ tio = tio_from_request(rq);
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
queue_kthread_work(&md->kworker, &tio->work);
@@ -2001,33 +2143,11 @@ static void dm_request_fn(struct request_queue *q)
goto out;
delay_and_out:
- blk_delay_queue(q, HZ / 10);
+ blk_delay_queue(q, HZ / 100);
out:
dm_put_live_table(md, srcu_idx);
}
-int dm_underlying_device_busy(struct request_queue *q)
-{
- return blk_lld_busy(q);
-}
-EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
-
-static int dm_lld_busy(struct request_queue *q)
-{
- int r;
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_live_table_fast(md);
-
- if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
- r = 1;
- else
- r = dm_table_any_busy_target(map);
-
- dm_put_live_table_fast(md);
-
- return r;
-}
-
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
@@ -2110,7 +2230,7 @@ static void dm_init_md_queue(struct mapped_device *md)
{
/*
* Request-based dm devices cannot be stacked on top of bio-based dm
- * devices. The type of this dm device has not been decided yet.
+ * devices. The type of this dm device may not have been decided yet.
* The type is decided at the first table loading time.
* To prevent problematic device stacking, clear the queue flag
* for request stacking support until then.
@@ -2118,13 +2238,21 @@ static void dm_init_md_queue(struct mapped_device *md)
* This queue is new, so no concurrency on the queue_flags.
*/
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+}
+static void dm_init_old_md_queue(struct mapped_device *md)
+{
+ md->use_blk_mq = false;
+ dm_init_md_queue(md);
+
+ /*
+ * Initialize aspects of queue that aren't relevant for blk-mq
+ */
md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
- blk_queue_make_request(md->queue, dm_request);
+
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
- blk_queue_merge_bvec(md->queue, dm_merge_bvec);
}
/*
@@ -2156,6 +2284,7 @@ static struct mapped_device *alloc_dev(int minor)
if (r < 0)
goto bad_io_barrier;
+ md->use_blk_mq = use_blk_mq;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
mutex_init(&md->type_lock);
@@ -2267,6 +2396,8 @@ static void free_dev(struct mapped_device *md)
del_gendisk(md->disk);
put_disk(md->disk);
blk_cleanup_queue(md->queue);
+ if (md->use_blk_mq)
+ blk_mq_free_tag_set(&md->tag_set);
bdput(md->bdev);
free_minor(minor);
@@ -2278,7 +2409,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->io_pool && md->bs) {
+ if (md->bs) {
/* The md already has necessary mempools. */
if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
/*
@@ -2310,7 +2441,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
p->bs = NULL;
out:
- /* mempool bind completed, now no need any mempools in the table */
+ /* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
}
@@ -2357,7 +2488,7 @@ int dm_queue_merge_is_compulsory(struct request_queue *q)
if (!q->merge_bvec_fn)
return 0;
- if (q->make_request_fn == dm_request) {
+ if (q->make_request_fn == dm_make_request) {
dev_md = q->queuedata;
if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
return 0;
@@ -2426,7 +2557,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
- if (dm_table_request_based(t) && !blk_queue_stopped(q))
+ if (dm_table_request_based(t))
stop_queue(q);
__bind_mempools(md, t);
@@ -2508,14 +2639,6 @@ unsigned dm_get_md_type(struct mapped_device *md)
return md->type;
}
-static bool dm_md_type_request_based(struct mapped_device *md)
-{
- unsigned table_type = dm_get_md_type(md);
-
- return (table_type == DM_TYPE_REQUEST_BASED ||
- table_type == DM_TYPE_MQ_REQUEST_BASED);
-}
-
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
return md->immutable_target_type;
@@ -2532,6 +2655,14 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+static void init_rq_based_worker_thread(struct mapped_device *md)
+{
+ /* Initialize the request-based DM worker thread */
+ init_kthread_worker(&md->kworker);
+ md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
+ "kdmwork-%s", dm_device_name(md));
+}
+
/*
* Fully initialize a request-based queue (->elevator, ->request_fn, etc).
*/
@@ -2539,28 +2670,158 @@ static int dm_init_request_based_queue(struct mapped_device *md)
{
struct request_queue *q = NULL;
- if (md->queue->elevator)
- return 1;
-
/* Fully initialize the queue */
q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
if (!q)
- return 0;
+ return -EINVAL;
+
+ /* disable dm_request_fn's merge heuristic by default */
+ md->seq_rq_merge_deadline_usecs = 0;
md->queue = q;
- dm_init_md_queue(md);
+ dm_init_old_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
- blk_queue_lld_busy(md->queue, dm_lld_busy);
- /* Also initialize the request-based DM worker thread */
- init_kthread_worker(&md->kworker);
- md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
- "kdmwork-%s", dm_device_name(md));
+ init_rq_based_worker_thread(md);
elv_register_queue(md->queue);
- return 1;
+ return 0;
+}
+
+static int dm_mq_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct mapped_device *md = data;
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+
+ /*
+ * Must initialize md member of tio, otherwise it won't
+ * be available in dm_mq_queue_rq.
+ */
+ tio->md = md;
+
+ return 0;
+}
+
+static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+ struct mapped_device *md = tio->md;
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+ struct dm_target *ti;
+ sector_t pos;
+
+ /* always use block 0 to find the target for flushes for now */
+ pos = 0;
+ if (!(rq->cmd_flags & REQ_FLUSH))
+ pos = blk_rq_pos(rq);
+
+ ti = dm_table_find_target(map, pos);
+ if (!dm_target_is_valid(ti)) {
+ dm_put_live_table(md, srcu_idx);
+ DMERR_LIMIT("request attempted access beyond the end of device");
+ /*
+ * Must perform setup, that rq_completed() requires,
+ * before returning BLK_MQ_RQ_QUEUE_ERROR
+ */
+ dm_start_request(md, rq);
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
+ dm_put_live_table(md, srcu_idx);
+
+ if (ti->type->busy && ti->type->busy(ti))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ dm_start_request(md, rq);
+
+ /* Init tio using md established in .init_request */
+ init_tio(tio, rq, md);
+
+ /*
+ * Establish tio->ti before queuing work (map_tio_request)
+ * or making direct call to map_request().
+ */
+ tio->ti = ti;
+
+ /* Clone the request if underlying devices aren't blk-mq */
+ if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
+ /* clone request is allocated at the end of the pdu */
+ tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
+ if (!clone_rq(rq, md, tio, GFP_ATOMIC))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ queue_kthread_work(&md->kworker, &tio->work);
+ } else {
+ /* Direct call is fine since .queue_rq allows allocations */
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
+ dm_requeue_unmapped_original_request(md, rq);
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static struct blk_mq_ops dm_mq_ops = {
+ .queue_rq = dm_mq_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .complete = dm_softirq_done,
+ .init_request = dm_mq_init_request,
+};
+
+static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
+{
+ unsigned md_type = dm_get_md_type(md);
+ struct request_queue *q;
+ int err;
+
+ memset(&md->tag_set, 0, sizeof(md->tag_set));
+ md->tag_set.ops = &dm_mq_ops;
+ md->tag_set.queue_depth = BLKDEV_MAX_RQ;
+ md->tag_set.numa_node = NUMA_NO_NODE;
+ md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ md->tag_set.nr_hw_queues = 1;
+ if (md_type == DM_TYPE_REQUEST_BASED) {
+ /* make the memory for non-blk-mq clone part of the pdu */
+ md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request);
+ } else
+ md->tag_set.cmd_size = sizeof(struct dm_rq_target_io);
+ md->tag_set.driver_data = md;
+
+ err = blk_mq_alloc_tag_set(&md->tag_set);
+ if (err)
+ return err;
+
+ q = blk_mq_init_allocated_queue(&md->tag_set, md->queue);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto out_tag_set;
+ }
+ md->queue = q;
+ dm_init_md_queue(md);
+
+ /* backfill 'mq' sysfs registration normally done in blk_register_queue */
+ blk_mq_register_disk(md->disk);
+
+ if (md_type == DM_TYPE_REQUEST_BASED)
+ init_rq_based_worker_thread(md);
+
+ return 0;
+
+out_tag_set:
+ blk_mq_free_tag_set(&md->tag_set);
+ return err;
+}
+
+static unsigned filter_md_type(unsigned type, struct mapped_device *md)
+{
+ if (type == DM_TYPE_BIO_BASED)
+ return type;
+
+ return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
}
/*
@@ -2568,9 +2829,29 @@ static int dm_init_request_based_queue(struct mapped_device *md)
*/
int dm_setup_md_queue(struct mapped_device *md)
{
- if (dm_md_type_request_based(md) && !dm_init_request_based_queue(md)) {
- DMWARN("Cannot initialize queue for request-based mapped device");
- return -EINVAL;
+ int r;
+ unsigned md_type = filter_md_type(dm_get_md_type(md), md);
+
+ switch (md_type) {
+ case DM_TYPE_REQUEST_BASED:
+ r = dm_init_request_based_queue(md);
+ if (r) {
+ DMWARN("Cannot initialize queue for request-based mapped device");
+ return r;
+ }
+ break;
+ case DM_TYPE_MQ_REQUEST_BASED:
+ r = dm_init_request_based_blk_mq_queue(md);
+ if (r) {
+ DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
+ return r;
+ }
+ break;
+ case DM_TYPE_BIO_BASED:
+ dm_init_old_md_queue(md);
+ blk_queue_make_request(md->queue, dm_make_request);
+ blk_queue_merge_bvec(md->queue, dm_merge_bvec);
+ break;
}
return 0;
@@ -2654,7 +2935,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- if (dm_request_based(md))
+ if (dm_request_based(md) && md->kworker_task)
flush_kthread_worker(&md->kworker);
/*
@@ -2908,7 +3189,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
*/
if (dm_request_based(md)) {
stop_queue(md->queue);
- flush_kthread_worker(&md->kworker);
+ if (md->kworker_task)
+ flush_kthread_worker(&md->kworker);
}
flush_workqueue(md->wq);
@@ -3206,6 +3488,7 @@ struct gendisk *dm_disk(struct mapped_device *md)
{
return md->disk;
}
+EXPORT_SYMBOL_GPL(dm_disk);
struct kobject *dm_kobject(struct mapped_device *md)
{
@@ -3253,16 +3536,19 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
+struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
+ unsigned integrity, unsigned per_bio_data_size)
{
struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
- struct kmem_cache *cachep;
+ struct kmem_cache *cachep = NULL;
unsigned int pool_size = 0;
unsigned int front_pad;
if (!pools)
return NULL;
+ type = filter_md_type(type, md);
+
switch (type) {
case DM_TYPE_BIO_BASED:
cachep = _io_cache;
@@ -3270,13 +3556,13 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
break;
case DM_TYPE_REQUEST_BASED:
+ cachep = _rq_tio_cache;
pool_size = dm_get_reserved_rq_based_ios();
pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
if (!pools->rq_pool)
goto out;
/* fall through to setup remaining rq-based pools */
case DM_TYPE_MQ_REQUEST_BASED:
- cachep = _rq_tio_cache;
if (!pool_size)
pool_size = dm_get_reserved_rq_based_ios();
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
@@ -3284,12 +3570,14 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
WARN_ON(per_bio_data_size != 0);
break;
default:
- goto out;
+ BUG();
}
- pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
- if (!pools->io_pool)
- goto out;
+ if (cachep) {
+ pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
+ if (!pools->io_pool)
+ goto out;
+ }
pools->bs = bioset_create_nobvec(pool_size, front_pad);
if (!pools->bs)
@@ -3346,6 +3634,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
+
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 59f53e79db82..6123c2bf9150 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -70,7 +70,6 @@ void dm_table_presuspend_undo_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
-int dm_table_any_busy_target(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
@@ -212,6 +211,8 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
void dm_internal_suspend(struct mapped_device *md);
void dm_internal_resume(struct mapped_device *md);
+bool dm_use_blk_mq(struct mapped_device *md);
+
int dm_io_init(void);
void dm_io_exit(void);
@@ -221,7 +222,8 @@ void dm_kcopyd_exit(void);
/*
* Mempool operations
*/
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
+struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
+ unsigned integrity, unsigned per_bio_data_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
@@ -235,4 +237,8 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
return !maxlen || strlen(result) + 1 >= maxlen;
}
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count);
+
#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
new file mode 100644
index 000000000000..fcfc4b9b2672
--- /dev/null
+++ b/drivers/md/md-cluster.c
@@ -0,0 +1,965 @@
+/*
+ * Copyright (C) 2015, SUSE
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/dlm.h>
+#include <linux/sched.h>
+#include <linux/raid/md_p.h>
+#include "md.h"
+#include "bitmap.h"
+#include "md-cluster.h"
+
+#define LVB_SIZE 64
+#define NEW_DEV_TIMEOUT 5000
+
+struct dlm_lock_resource {
+ dlm_lockspace_t *ls;
+ struct dlm_lksb lksb;
+ char *name; /* lock name. */
+ uint32_t flags; /* flags to pass to dlm_lock() */
+ struct completion completion; /* completion for synchronized locking */
+ void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
+ struct mddev *mddev; /* pointing back to mddev. */
+};
+
+struct suspend_info {
+ int slot;
+ sector_t lo;
+ sector_t hi;
+ struct list_head list;
+};
+
+struct resync_info {
+ __le64 lo;
+ __le64 hi;
+};
+
+/* md_cluster_info flags */
+#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
+
+
+struct md_cluster_info {
+ /* dlm lock space and resources for clustered raid. */
+ dlm_lockspace_t *lockspace;
+ int slot_number;
+ struct completion completion;
+ struct dlm_lock_resource *sb_lock;
+ struct mutex sb_mutex;
+ struct dlm_lock_resource *bitmap_lockres;
+ struct list_head suspend_list;
+ spinlock_t suspend_lock;
+ struct md_thread *recovery_thread;
+ unsigned long recovery_map;
+ /* communication loc resources */
+ struct dlm_lock_resource *ack_lockres;
+ struct dlm_lock_resource *message_lockres;
+ struct dlm_lock_resource *token_lockres;
+ struct dlm_lock_resource *no_new_dev_lockres;
+ struct md_thread *recv_thread;
+ struct completion newdisk_completion;
+ unsigned long state;
+};
+
+enum msg_type {
+ METADATA_UPDATED = 0,
+ RESYNCING,
+ NEWDISK,
+ REMOVE,
+ RE_ADD,
+};
+
+struct cluster_msg {
+ int type;
+ int slot;
+ /* TODO: Unionize this for smaller footprint */
+ sector_t low;
+ sector_t high;
+ char uuid[16];
+ int raid_slot;
+};
+
+static void sync_ast(void *arg)
+{
+ struct dlm_lock_resource *res;
+
+ res = (struct dlm_lock_resource *) arg;
+ complete(&res->completion);
+}
+
+static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
+{
+ int ret = 0;
+
+ init_completion(&res->completion);
+ ret = dlm_lock(res->ls, mode, &res->lksb,
+ res->flags, res->name, strlen(res->name),
+ 0, sync_ast, res, res->bast);
+ if (ret)
+ return ret;
+ wait_for_completion(&res->completion);
+ return res->lksb.sb_status;
+}
+
+static int dlm_unlock_sync(struct dlm_lock_resource *res)
+{
+ return dlm_lock_sync(res, DLM_LOCK_NL);
+}
+
+static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
+ char *name, void (*bastfn)(void *arg, int mode), int with_lvb)
+{
+ struct dlm_lock_resource *res = NULL;
+ int ret, namelen;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
+ if (!res)
+ return NULL;
+ res->ls = cinfo->lockspace;
+ res->mddev = mddev;
+ namelen = strlen(name);
+ res->name = kzalloc(namelen + 1, GFP_KERNEL);
+ if (!res->name) {
+ pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name);
+ goto out_err;
+ }
+ strlcpy(res->name, name, namelen + 1);
+ if (with_lvb) {
+ res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL);
+ if (!res->lksb.sb_lvbptr) {
+ pr_err("md-cluster: Unable to allocate LVB for resource %s\n", name);
+ goto out_err;
+ }
+ res->flags = DLM_LKF_VALBLK;
+ }
+
+ if (bastfn)
+ res->bast = bastfn;
+
+ res->flags |= DLM_LKF_EXPEDITE;
+
+ ret = dlm_lock_sync(res, DLM_LOCK_NL);
+ if (ret) {
+ pr_err("md-cluster: Unable to lock NL on new lock resource %s\n", name);
+ goto out_err;
+ }
+ res->flags &= ~DLM_LKF_EXPEDITE;
+ res->flags |= DLM_LKF_CONVERT;
+
+ return res;
+out_err:
+ kfree(res->lksb.sb_lvbptr);
+ kfree(res->name);
+ kfree(res);
+ return NULL;
+}
+
+static void lockres_free(struct dlm_lock_resource *res)
+{
+ if (!res)
+ return;
+
+ init_completion(&res->completion);
+ dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
+ wait_for_completion(&res->completion);
+
+ kfree(res->name);
+ kfree(res->lksb.sb_lvbptr);
+ kfree(res);
+}
+
+static char *pretty_uuid(char *dest, char *src)
+{
+ int i, len = 0;
+
+ for (i = 0; i < 16; i++) {
+ if (i == 4 || i == 6 || i == 8 || i == 10)
+ len += sprintf(dest + len, "-");
+ len += sprintf(dest + len, "%02x", (__u8)src[i]);
+ }
+ return dest;
+}
+
+static void add_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres,
+ sector_t lo, sector_t hi)
+{
+ struct resync_info *ri;
+
+ ri = (struct resync_info *)lockres->lksb.sb_lvbptr;
+ ri->lo = cpu_to_le64(lo);
+ ri->hi = cpu_to_le64(hi);
+}
+
+static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres)
+{
+ struct resync_info ri;
+ struct suspend_info *s = NULL;
+ sector_t hi = 0;
+
+ dlm_lock_sync(lockres, DLM_LOCK_CR);
+ memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
+ hi = le64_to_cpu(ri.hi);
+ if (ri.hi > 0) {
+ s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
+ if (!s)
+ goto out;
+ s->hi = hi;
+ s->lo = le64_to_cpu(ri.lo);
+ }
+ dlm_unlock_sync(lockres);
+out:
+ return s;
+}
+
+static void recover_bitmaps(struct md_thread *thread)
+{
+ struct mddev *mddev = thread->mddev;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct dlm_lock_resource *bm_lockres;
+ char str[64];
+ int slot, ret;
+ struct suspend_info *s, *tmp;
+ sector_t lo, hi;
+
+ while (cinfo->recovery_map) {
+ slot = fls64((u64)cinfo->recovery_map) - 1;
+
+ /* Clear suspend_area associated with the bitmap */
+ spin_lock_irq(&cinfo->suspend_lock);
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ list_del(&s->list);
+ kfree(s);
+ }
+ spin_unlock_irq(&cinfo->suspend_lock);
+
+ snprintf(str, 64, "bitmap%04d", slot);
+ bm_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!bm_lockres) {
+ pr_err("md-cluster: Cannot initialize bitmaps\n");
+ goto clear_bit;
+ }
+
+ ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ if (ret) {
+ pr_err("md-cluster: Could not DLM lock %s: %d\n",
+ str, ret);
+ goto clear_bit;
+ }
+ ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
+ if (ret) {
+ pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
+ goto dlm_unlock;
+ }
+ if (hi > 0) {
+ /* TODO:Wait for current resync to get over */
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ if (lo < mddev->recovery_cp)
+ mddev->recovery_cp = lo;
+ md_check_recovery(mddev);
+ }
+dlm_unlock:
+ dlm_unlock_sync(bm_lockres);
+clear_bit:
+ clear_bit(slot, &cinfo->recovery_map);
+ }
+}
+
+static void recover_prep(void *arg)
+{
+}
+
+static void recover_slot(void *arg, struct dlm_slot *slot)
+{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
+ mddev->bitmap_info.cluster_name,
+ slot->nodeid, slot->slot,
+ cinfo->slot_number);
+ set_bit(slot->slot - 1, &cinfo->recovery_map);
+ if (!cinfo->recovery_thread) {
+ cinfo->recovery_thread = md_register_thread(recover_bitmaps,
+ mddev, "recover");
+ if (!cinfo->recovery_thread) {
+ pr_warn("md-cluster: Could not create recovery thread\n");
+ return;
+ }
+ }
+ md_wakeup_thread(cinfo->recovery_thread);
+}
+
+static void recover_done(void *arg, struct dlm_slot *slots,
+ int num_slots, int our_slot,
+ uint32_t generation)
+{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ cinfo->slot_number = our_slot;
+ complete(&cinfo->completion);
+}
+
+static const struct dlm_lockspace_ops md_ls_ops = {
+ .recover_prep = recover_prep,
+ .recover_slot = recover_slot,
+ .recover_done = recover_done,
+};
+
+/*
+ * The BAST function for the ack lock resource
+ * This function wakes up the receive thread in
+ * order to receive and process the message.
+ */
+static void ack_bast(void *arg, int mode)
+{
+ struct dlm_lock_resource *res = (struct dlm_lock_resource *)arg;
+ struct md_cluster_info *cinfo = res->mddev->cluster_info;
+
+ if (mode == DLM_LOCK_EX)
+ md_wakeup_thread(cinfo->recv_thread);
+}
+
+static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
+{
+ struct suspend_info *s, *tmp;
+
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ pr_info("%s:%d Deleting suspend_info: %d\n",
+ __func__, __LINE__, slot);
+ list_del(&s->list);
+ kfree(s);
+ break;
+ }
+}
+
+static void remove_suspend_info(struct md_cluster_info *cinfo, int slot)
+{
+ spin_lock_irq(&cinfo->suspend_lock);
+ __remove_suspend_info(cinfo, slot);
+ spin_unlock_irq(&cinfo->suspend_lock);
+}
+
+
+static void process_suspend_info(struct md_cluster_info *cinfo,
+ int slot, sector_t lo, sector_t hi)
+{
+ struct suspend_info *s;
+
+ if (!hi) {
+ remove_suspend_info(cinfo, slot);
+ return;
+ }
+ s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
+ if (!s)
+ return;
+ s->slot = slot;
+ s->lo = lo;
+ s->hi = hi;
+ spin_lock_irq(&cinfo->suspend_lock);
+ /* Remove existing entry (if exists) before adding */
+ __remove_suspend_info(cinfo, slot);
+ list_add(&s->list, &cinfo->suspend_list);
+ spin_unlock_irq(&cinfo->suspend_lock);
+}
+
+static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
+{
+ char disk_uuid[64];
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ char event_name[] = "EVENT=ADD_DEVICE";
+ char raid_slot[16];
+ char *envp[] = {event_name, disk_uuid, raid_slot, NULL};
+ int len;
+
+ len = snprintf(disk_uuid, 64, "DEVICE_UUID=");
+ pretty_uuid(disk_uuid + len, cmsg->uuid);
+ snprintf(raid_slot, 16, "RAID_DISK=%d", cmsg->raid_slot);
+ pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot);
+ init_completion(&cinfo->newdisk_completion);
+ set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
+ kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp);
+ wait_for_completion_timeout(&cinfo->newdisk_completion,
+ NEW_DEV_TIMEOUT);
+ clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
+}
+
+
+static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ md_reload_sb(mddev);
+ dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
+}
+
+static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
+{
+ struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot);
+
+ if (rdev)
+ md_kick_rdev_from_array(rdev);
+ else
+ pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", __func__, __LINE__, msg->raid_slot);
+}
+
+static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg)
+{
+ struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot);
+
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ clear_bit(Faulty, &rdev->flags);
+ else
+ pr_warn("%s: %d Could not find disk(%d) which is faulty", __func__, __LINE__, msg->raid_slot);
+}
+
+static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
+{
+ switch (msg->type) {
+ case METADATA_UPDATED:
+ pr_info("%s: %d Received message: METADATA_UPDATE from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_metadata_update(mddev, msg);
+ break;
+ case RESYNCING:
+ pr_info("%s: %d Received message: RESYNCING from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_suspend_info(mddev->cluster_info, msg->slot,
+ msg->low, msg->high);
+ break;
+ case NEWDISK:
+ pr_info("%s: %d Received message: NEWDISK from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_add_new_disk(mddev, msg);
+ break;
+ case REMOVE:
+ pr_info("%s: %d Received REMOVE from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_remove_disk(mddev, msg);
+ break;
+ case RE_ADD:
+ pr_info("%s: %d Received RE_ADD from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_readd_disk(mddev, msg);
+ break;
+ default:
+ pr_warn("%s:%d Received unknown message from %d\n",
+ __func__, __LINE__, msg->slot);
+ }
+}
+
+/*
+ * thread for receiving message
+ */
+static void recv_daemon(struct md_thread *thread)
+{
+ struct md_cluster_info *cinfo = thread->mddev->cluster_info;
+ struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres;
+ struct dlm_lock_resource *message_lockres = cinfo->message_lockres;
+ struct cluster_msg msg;
+
+ /*get CR on Message*/
+ if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) {
+ pr_err("md/raid1:failed to get CR on MESSAGE\n");
+ return;
+ }
+
+ /* read lvb and wake up thread to process this message_lockres */
+ memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg));
+ process_recvd_msg(thread->mddev, &msg);
+
+ /*release CR on ack_lockres*/
+ dlm_unlock_sync(ack_lockres);
+ /*up-convert to EX on message_lockres*/
+ dlm_lock_sync(message_lockres, DLM_LOCK_EX);
+ /*get CR on ack_lockres again*/
+ dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
+ /*release CR on message_lockres*/
+ dlm_unlock_sync(message_lockres);
+}
+
+/* lock_comm()
+ * Takes the lock on the TOKEN lock resource so no other
+ * node can communicate while the operation is underway.
+ */
+static int lock_comm(struct md_cluster_info *cinfo)
+{
+ int error;
+
+ error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
+ if (error)
+ pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
+ __func__, __LINE__, error);
+ return error;
+}
+
+static void unlock_comm(struct md_cluster_info *cinfo)
+{
+ dlm_unlock_sync(cinfo->token_lockres);
+}
+
+/* __sendmsg()
+ * This function performs the actual sending of the message. This function is
+ * usually called after performing the encompassing operation
+ * The function:
+ * 1. Grabs the message lockresource in EX mode
+ * 2. Copies the message to the message LVB
+ * 3. Downconverts message lockresource to CR
+ * 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes
+ * and the other nodes read the message. The thread will wait here until all other
+ * nodes have released ack lock resource.
+ * 5. Downconvert ack lockresource to CR
+ */
+static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
+{
+ int error;
+ int slot = cinfo->slot_number - 1;
+
+ cmsg->slot = cpu_to_le32(slot);
+ /*get EX on Message*/
+ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX);
+ if (error) {
+ pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error);
+ goto failed_message;
+ }
+
+ memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
+ sizeof(struct cluster_msg));
+ /*down-convert EX to CR on Message*/
+ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CR);
+ if (error) {
+ pr_err("md-cluster: failed to convert EX to CR on MESSAGE(%d)\n",
+ error);
+ goto failed_message;
+ }
+
+ /*up-convert CR to EX on Ack*/
+ error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX);
+ if (error) {
+ pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n",
+ error);
+ goto failed_ack;
+ }
+
+ /*down-convert EX to CR on Ack*/
+ error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR);
+ if (error) {
+ pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n",
+ error);
+ goto failed_ack;
+ }
+
+failed_ack:
+ dlm_unlock_sync(cinfo->message_lockres);
+failed_message:
+ return error;
+}
+
+static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
+{
+ int ret;
+
+ lock_comm(cinfo);
+ ret = __sendmsg(cinfo, cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int gather_all_resync_info(struct mddev *mddev, int total_slots)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int i, ret = 0;
+ struct dlm_lock_resource *bm_lockres;
+ struct suspend_info *s;
+ char str[64];
+
+
+ for (i = 0; i < total_slots; i++) {
+ memset(str, '\0', 64);
+ snprintf(str, 64, "bitmap%04d", i);
+ bm_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!bm_lockres)
+ return -ENOMEM;
+ if (i == (cinfo->slot_number - 1))
+ continue;
+
+ bm_lockres->flags |= DLM_LKF_NOQUEUE;
+ ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ if (ret == -EAGAIN) {
+ memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
+ s = read_resync_info(mddev, bm_lockres);
+ if (s) {
+ pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
+ __func__, __LINE__,
+ (unsigned long long) s->lo,
+ (unsigned long long) s->hi, i);
+ spin_lock_irq(&cinfo->suspend_lock);
+ s->slot = i;
+ list_add(&s->list, &cinfo->suspend_list);
+ spin_unlock_irq(&cinfo->suspend_lock);
+ }
+ ret = 0;
+ lockres_free(bm_lockres);
+ continue;
+ }
+ if (ret)
+ goto out;
+ /* TODO: Read the disk bitmap sb and check if it needs recovery */
+ dlm_unlock_sync(bm_lockres);
+ lockres_free(bm_lockres);
+ }
+out:
+ return ret;
+}
+
+static int join(struct mddev *mddev, int nodes)
+{
+ struct md_cluster_info *cinfo;
+ int ret, ops_rv;
+ char str[64];
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ init_completion(&cinfo->completion);
+
+ mutex_init(&cinfo->sb_mutex);
+ mddev->cluster_info = cinfo;
+
+ memset(str, 0, 64);
+ pretty_uuid(str, mddev->uuid);
+ ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
+ DLM_LSFL_FS, LVB_SIZE,
+ &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
+ if (ret)
+ goto err;
+ wait_for_completion(&cinfo->completion);
+ if (nodes < cinfo->slot_number) {
+ pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).",
+ cinfo->slot_number, nodes);
+ ret = -ERANGE;
+ goto err;
+ }
+ cinfo->sb_lock = lockres_init(mddev, "cmd-super",
+ NULL, 0);
+ if (!cinfo->sb_lock) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ /* Initiate the communication resources */
+ ret = -ENOMEM;
+ cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
+ if (!cinfo->recv_thread) {
+ pr_err("md-cluster: cannot allocate memory for recv_thread!\n");
+ goto err;
+ }
+ cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1);
+ if (!cinfo->message_lockres)
+ goto err;
+ cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
+ if (!cinfo->token_lockres)
+ goto err;
+ cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
+ if (!cinfo->ack_lockres)
+ goto err;
+ cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
+ if (!cinfo->no_new_dev_lockres)
+ goto err;
+
+ /* get sync CR lock on ACK. */
+ if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
+ pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
+ ret);
+ /* get sync CR lock on no-new-dev. */
+ if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
+ pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret);
+
+
+ pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
+ snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
+ cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!cinfo->bitmap_lockres)
+ goto err;
+ if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
+ pr_err("Failed to get bitmap lock\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&cinfo->suspend_list);
+ spin_lock_init(&cinfo->suspend_lock);
+
+ ret = gather_all_resync_info(mddev, nodes);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ lockres_free(cinfo->message_lockres);
+ lockres_free(cinfo->token_lockres);
+ lockres_free(cinfo->ack_lockres);
+ lockres_free(cinfo->no_new_dev_lockres);
+ lockres_free(cinfo->bitmap_lockres);
+ lockres_free(cinfo->sb_lock);
+ if (cinfo->lockspace)
+ dlm_release_lockspace(cinfo->lockspace, 2);
+ mddev->cluster_info = NULL;
+ kfree(cinfo);
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static int leave(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ if (!cinfo)
+ return 0;
+ md_unregister_thread(&cinfo->recovery_thread);
+ md_unregister_thread(&cinfo->recv_thread);
+ lockres_free(cinfo->message_lockres);
+ lockres_free(cinfo->token_lockres);
+ lockres_free(cinfo->ack_lockres);
+ lockres_free(cinfo->no_new_dev_lockres);
+ lockres_free(cinfo->sb_lock);
+ lockres_free(cinfo->bitmap_lockres);
+ dlm_release_lockspace(cinfo->lockspace, 2);
+ return 0;
+}
+
+/* slot_number(): Returns the MD slot number to use
+ * DLM starts the slot numbers from 1, wheras cluster-md
+ * wants the number to be from zero, so we deduct one
+ */
+static int slot_number(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ return cinfo->slot_number - 1;
+}
+
+static void resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi);
+ /* Re-acquire the lock to refresh LVB */
+ dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
+}
+
+static int metadata_update_start(struct mddev *mddev)
+{
+ return lock_comm(mddev->cluster_info);
+}
+
+static int metadata_update_finish(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int ret;
+
+ memset(&cmsg, 0, sizeof(cmsg));
+ cmsg.type = cpu_to_le32(METADATA_UPDATED);
+ ret = __sendmsg(cinfo, &cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int metadata_update_cancel(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ return dlm_unlock_sync(cinfo->token_lockres);
+}
+
+static int resync_send(struct mddev *mddev, enum msg_type type,
+ sector_t lo, sector_t hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int slot = cinfo->slot_number - 1;
+
+ pr_info("%s:%d lo: %llu hi: %llu\n", __func__, __LINE__,
+ (unsigned long long)lo,
+ (unsigned long long)hi);
+ resync_info_update(mddev, lo, hi);
+ cmsg.type = cpu_to_le32(type);
+ cmsg.slot = cpu_to_le32(slot);
+ cmsg.low = cpu_to_le64(lo);
+ cmsg.high = cpu_to_le64(hi);
+ return sendmsg(cinfo, &cmsg);
+}
+
+static int resync_start(struct mddev *mddev, sector_t lo, sector_t hi)
+{
+ pr_info("%s:%d\n", __func__, __LINE__);
+ return resync_send(mddev, RESYNCING, lo, hi);
+}
+
+static void resync_finish(struct mddev *mddev)
+{
+ pr_info("%s:%d\n", __func__, __LINE__);
+ resync_send(mddev, RESYNCING, 0, 0);
+}
+
+static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret = 0;
+ struct suspend_info *s;
+
+ spin_lock_irq(&cinfo->suspend_lock);
+ if (list_empty(&cinfo->suspend_list))
+ goto out;
+ list_for_each_entry(s, &cinfo->suspend_list, list)
+ if (hi > s->lo && lo < s->hi) {
+ ret = 1;
+ break;
+ }
+out:
+ spin_unlock_irq(&cinfo->suspend_lock);
+ return ret;
+}
+
+static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int ret = 0;
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+ char *uuid = sb->device_uuid;
+
+ memset(&cmsg, 0, sizeof(cmsg));
+ cmsg.type = cpu_to_le32(NEWDISK);
+ memcpy(cmsg.uuid, uuid, 16);
+ cmsg.raid_slot = rdev->desc_nr;
+ lock_comm(cinfo);
+ ret = __sendmsg(cinfo, &cmsg);
+ if (ret)
+ return ret;
+ cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
+ ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
+ cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
+ /* Some node does not "see" the device */
+ if (ret == -EAGAIN)
+ ret = -ENOENT;
+ else
+ dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
+ return ret;
+}
+
+static int add_new_disk_finish(struct mddev *mddev)
+{
+ struct cluster_msg cmsg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret;
+ /* Write sb and inform others */
+ md_update_sb(mddev, 1);
+ cmsg.type = METADATA_UPDATED;
+ ret = __sendmsg(cinfo, &cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int new_disk_ack(struct mddev *mddev, bool ack)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) {
+ pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev));
+ return -EINVAL;
+ }
+
+ if (ack)
+ dlm_unlock_sync(cinfo->no_new_dev_lockres);
+ complete(&cinfo->newdisk_completion);
+ return 0;
+}
+
+static int remove_disk(struct mddev *mddev, struct md_rdev *rdev)
+{
+ struct cluster_msg cmsg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ cmsg.type = REMOVE;
+ cmsg.raid_slot = rdev->desc_nr;
+ return __sendmsg(cinfo, &cmsg);
+}
+
+static int gather_bitmaps(struct md_rdev *rdev)
+{
+ int sn, err;
+ sector_t lo, hi;
+ struct cluster_msg cmsg;
+ struct mddev *mddev = rdev->mddev;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ cmsg.type = RE_ADD;
+ cmsg.raid_slot = rdev->desc_nr;
+ err = sendmsg(cinfo, &cmsg);
+ if (err)
+ goto out;
+
+ for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
+ if (sn == (cinfo->slot_number - 1))
+ continue;
+ err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
+ if (err) {
+ pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
+ goto out;
+ }
+ if ((hi > 0) && (lo < mddev->recovery_cp))
+ mddev->recovery_cp = lo;
+ }
+out:
+ return err;
+}
+
+static struct md_cluster_operations cluster_ops = {
+ .join = join,
+ .leave = leave,
+ .slot_number = slot_number,
+ .resync_info_update = resync_info_update,
+ .resync_start = resync_start,
+ .resync_finish = resync_finish,
+ .metadata_update_start = metadata_update_start,
+ .metadata_update_finish = metadata_update_finish,
+ .metadata_update_cancel = metadata_update_cancel,
+ .area_resyncing = area_resyncing,
+ .add_new_disk_start = add_new_disk_start,
+ .add_new_disk_finish = add_new_disk_finish,
+ .new_disk_ack = new_disk_ack,
+ .remove_disk = remove_disk,
+ .gather_bitmaps = gather_bitmaps,
+};
+
+static int __init cluster_init(void)
+{
+ pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n");
+ pr_info("Registering Cluster MD functions\n");
+ register_md_cluster_operations(&cluster_ops, THIS_MODULE);
+ return 0;
+}
+
+static void cluster_exit(void)
+{
+ unregister_md_cluster_operations();
+}
+
+module_init(cluster_init);
+module_exit(cluster_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Clustering support for MD");
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
new file mode 100644
index 000000000000..6817ee00e053
--- /dev/null
+++ b/drivers/md/md-cluster.h
@@ -0,0 +1,29 @@
+
+
+#ifndef _MD_CLUSTER_H
+#define _MD_CLUSTER_H
+
+#include "md.h"
+
+struct mddev;
+struct md_rdev;
+
+struct md_cluster_operations {
+ int (*join)(struct mddev *mddev, int nodes);
+ int (*leave)(struct mddev *mddev);
+ int (*slot_number)(struct mddev *mddev);
+ void (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*resync_start)(struct mddev *mddev, sector_t lo, sector_t hi);
+ void (*resync_finish)(struct mddev *mddev);
+ int (*metadata_update_start)(struct mddev *mddev);
+ int (*metadata_update_finish)(struct mddev *mddev);
+ int (*metadata_update_cancel)(struct mddev *mddev);
+ int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
+ int (*add_new_disk_finish)(struct mddev *mddev);
+ int (*new_disk_ack)(struct mddev *mddev, bool ack);
+ int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
+ int (*gather_bitmaps)(struct md_rdev *rdev);
+};
+
+#endif /* _MD_CLUSTER_H */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e6178787ce3d..593a02476c78 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -53,6 +53,7 @@
#include <linux/slab.h>
#include "md.h"
#include "bitmap.h"
+#include "md-cluster.h"
#ifndef MODULE
static void autostart_arrays(int part);
@@ -66,6 +67,11 @@ static void autostart_arrays(int part);
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
+struct md_cluster_operations *md_cluster_ops;
+EXPORT_SYMBOL(md_cluster_ops);
+struct module *md_cluster_mod;
+EXPORT_SYMBOL(md_cluster_mod);
+
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
@@ -640,7 +646,7 @@ void mddev_unlock(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_unlock);
-static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
+struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
@@ -650,6 +656,7 @@ static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
return NULL;
}
+EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
{
@@ -2047,11 +2054,11 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
int choice = 0;
if (mddev->pers)
choice = mddev->raid_disks;
- while (find_rdev_nr_rcu(mddev, choice))
+ while (md_find_rdev_nr_rcu(mddev, choice))
choice++;
rdev->desc_nr = choice;
} else {
- if (find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
+ if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
rcu_read_unlock();
return -EBUSY;
}
@@ -2166,11 +2173,12 @@ static void export_rdev(struct md_rdev *rdev)
kobject_put(&rdev->kobj);
}
-static void kick_rdev_from_array(struct md_rdev *rdev)
+void md_kick_rdev_from_array(struct md_rdev *rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
}
+EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
static void export_array(struct mddev *mddev)
{
@@ -2179,7 +2187,7 @@ static void export_array(struct mddev *mddev)
while (!list_empty(&mddev->disks)) {
rdev = list_first_entry(&mddev->disks, struct md_rdev,
same_set);
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
}
mddev->raid_disks = 0;
mddev->major_version = 0;
@@ -2208,7 +2216,7 @@ static void sync_sbs(struct mddev *mddev, int nospares)
}
}
-static void md_update_sb(struct mddev *mddev, int force_change)
+void md_update_sb(struct mddev *mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
@@ -2369,6 +2377,37 @@ repeat:
wake_up(&rdev->blocked_wait);
}
}
+EXPORT_SYMBOL(md_update_sb);
+
+static int add_bound_rdev(struct md_rdev *rdev)
+{
+ struct mddev *mddev = rdev->mddev;
+ int err = 0;
+
+ if (!mddev->pers->hot_remove_disk) {
+ /* If there is hot_add_disk but no hot_remove_disk
+ * then added disks for geometry changes,
+ * and should be added immediately.
+ */
+ super_types[mddev->major_version].
+ validate_super(mddev, rdev);
+ err = mddev->pers->hot_add_disk(mddev, rdev);
+ if (err) {
+ unbind_rdev_from_array(rdev);
+ export_rdev(rdev);
+ return err;
+ }
+ }
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
+
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ if (mddev->degraded)
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_new_event(mddev);
+ md_wakeup_thread(mddev->thread);
+ return 0;
+}
/* words written to sysfs files may, or may not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
@@ -2471,10 +2510,16 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
- kick_rdev_from_array(rdev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->remove_disk(mddev, rdev);
+ md_kick_rdev_from_array(rdev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (mddev->pers)
md_update_sb(mddev, 1);
md_new_event(mddev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
err = 0;
}
} else if (cmd_match(buf, "writemostly")) {
@@ -2553,6 +2598,21 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
clear_bit(Replacement, &rdev->flags);
err = 0;
}
+ } else if (cmd_match(buf, "re-add")) {
+ if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
+ /* clear_bit is performed _after_ all the devices
+ * have their local Faulty bit cleared. If any writes
+ * happen in the meantime in the local node, they
+ * will land in the local bitmap, which will be synced
+ * by this node eventually
+ */
+ if (!mddev_is_clustered(rdev->mddev) ||
+ (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
+ clear_bit(Faulty, &rdev->flags);
+ err = add_bound_rdev(rdev);
+ }
+ } else
+ err = -EBUSY;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -3127,7 +3187,7 @@ static void analyze_sbs(struct mddev *mddev)
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
bdevname(rdev->bdev,b));
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
@@ -3142,18 +3202,27 @@ static void analyze_sbs(struct mddev *mddev)
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
mddev->max_disks);
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
continue;
}
- if (rdev != freshest)
+ if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
printk(KERN_WARNING "md: kicking non-fresh %s"
" from array!\n",
bdevname(rdev->bdev,b));
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
continue;
}
+ /* No device should have a Candidate flag
+ * when reading devices
+ */
+ if (test_bit(Candidate, &rdev->flags)) {
+ pr_info("md: kicking Cluster Candidate %s from array!\n",
+ bdevname(rdev->bdev, b));
+ md_kick_rdev_from_array(rdev);
+ }
+ }
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
@@ -4008,8 +4077,12 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
if (mddev->pers) {
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
@@ -4354,7 +4427,6 @@ min_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long min;
int err;
- int chunk;
if (kstrtoull(buf, 10, &min))
return -EINVAL;
@@ -4368,16 +4440,8 @@ min_sync_store(struct mddev *mddev, const char *buf, size_t len)
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
goto out_unlock;
- /* Must be a multiple of chunk_size */
- chunk = mddev->chunk_sectors;
- if (chunk) {
- sector_t temp = min;
-
- err = -EINVAL;
- if (sector_div(temp, chunk))
- goto out_unlock;
- }
- mddev->resync_min = min;
+ /* Round down to multiple of 4K for safety */
+ mddev->resync_min = round_down(min, 8);
err = 0;
out_unlock:
@@ -4754,12 +4818,12 @@ static void md_free(struct kobject *ko)
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
+ if (mddev->queue)
+ blk_cleanup_queue(mddev->queue);
if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
- if (mddev->queue)
- blk_cleanup_queue(mddev->queue);
kfree(mddev);
}
@@ -5077,10 +5141,16 @@ int md_run(struct mddev *mddev)
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
- err = bitmap_create(mddev);
- if (err)
+ struct bitmap *bitmap;
+
+ bitmap = bitmap_create(mddev, -1);
+ if (IS_ERR(bitmap)) {
+ err = PTR_ERR(bitmap);
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
+ } else
+ mddev->bitmap = bitmap;
+
}
if (err) {
mddev_detach(mddev);
@@ -5232,6 +5302,8 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
@@ -5250,6 +5322,8 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
}
void md_stop_writes(struct mddev *mddev)
@@ -5636,6 +5710,8 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_info.offset)
info.state |= (1<<MD_SB_BITMAP_PRESENT);
+ if (mddev_is_clustered(mddev))
+ info.state |= (1<<MD_SB_CLUSTERED);
info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
@@ -5691,7 +5767,7 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
return -EFAULT;
rcu_read_lock();
- rdev = find_rdev_nr_rcu(mddev, info.number);
+ rdev = md_find_rdev_nr_rcu(mddev, info.number);
if (rdev) {
info.major = MAJOR(rdev->bdev->bd_dev);
info.minor = MINOR(rdev->bdev->bd_dev);
@@ -5724,6 +5800,13 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
struct md_rdev *rdev;
dev_t dev = MKDEV(info->major,info->minor);
+ if (mddev_is_clustered(mddev) &&
+ !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
+ pr_err("%s: Cannot add to clustered mddev.\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+
if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
return -EOVERFLOW;
@@ -5810,31 +5893,38 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
else
clear_bit(WriteMostly, &rdev->flags);
+ /*
+ * check whether the device shows up in other nodes
+ */
+ if (mddev_is_clustered(mddev)) {
+ if (info->state & (1 << MD_DISK_CANDIDATE)) {
+ /* Through --cluster-confirm */
+ set_bit(Candidate, &rdev->flags);
+ err = md_cluster_ops->new_disk_ack(mddev, true);
+ if (err) {
+ export_rdev(rdev);
+ return err;
+ }
+ } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
+ /* --add initiated by this node */
+ err = md_cluster_ops->add_new_disk_start(mddev, rdev);
+ if (err) {
+ md_cluster_ops->add_new_disk_finish(mddev);
+ export_rdev(rdev);
+ return err;
+ }
+ }
+ }
+
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
- if (!err && !mddev->pers->hot_remove_disk) {
- /* If there is hot_add_disk but no hot_remove_disk
- * then added disks for geometry changes,
- * and should be added immediately.
- */
- super_types[mddev->major_version].
- validate_super(mddev, rdev);
- err = mddev->pers->hot_add_disk(mddev, rdev);
- if (err)
- unbind_rdev_from_array(rdev);
- }
if (err)
export_rdev(rdev);
else
- sysfs_notify_dirent_safe(rdev->sysfs_state);
-
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- if (mddev->degraded)
- set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (!err)
- md_new_event(mddev);
- md_wakeup_thread(mddev->thread);
+ err = add_bound_rdev(rdev);
+ if (mddev_is_clustered(mddev) &&
+ (info->state & (1 << MD_DISK_CLUSTER_ADD)))
+ md_cluster_ops->add_new_disk_finish(mddev);
return err;
}
@@ -5895,18 +5985,29 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
if (!rdev)
return -ENXIO;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
+
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(mddev, rdev);
if (rdev->raid_disk >= 0)
goto busy;
- kick_rdev_from_array(rdev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->remove_disk(mddev, rdev);
+
+ md_kick_rdev_from_array(rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+
return 0;
busy:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
@@ -5956,12 +6057,15 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
err = -EINVAL;
goto abort_export;
}
+
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
- goto abort_export;
+ goto abort_clustered;
/*
* The rest should better be atomic, we can have disk failures
@@ -5972,6 +6076,8 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
@@ -5981,6 +6087,9 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
md_new_event(mddev);
return 0;
+abort_clustered:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
abort_export:
export_rdev(rdev);
return err;
@@ -6038,9 +6147,14 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
- err = bitmap_create(mddev);
- if (!err)
+ struct bitmap *bitmap;
+
+ bitmap = bitmap_create(mddev, -1);
+ if (!IS_ERR(bitmap)) {
+ mddev->bitmap = bitmap;
err = bitmap_load(mddev);
+ } else
+ err = PTR_ERR(bitmap);
}
if (fd < 0 || err) {
bitmap_destroy(mddev);
@@ -6293,6 +6407,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
return rv;
}
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
@@ -6300,33 +6416,49 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
- if (mddev->pers->quiesce == NULL || mddev->thread == NULL)
- return -EINVAL;
- if (mddev->recovery || mddev->sync_thread)
- return -EBUSY;
+ if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
+ rv = -EINVAL;
+ goto err;
+ }
+ if (mddev->recovery || mddev->sync_thread) {
+ rv = -EBUSY;
+ goto err;
+ }
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
+ struct bitmap *bitmap;
/* add the bitmap */
- if (mddev->bitmap)
- return -EEXIST;
- if (mddev->bitmap_info.default_offset == 0)
- return -EINVAL;
+ if (mddev->bitmap) {
+ rv = -EEXIST;
+ goto err;
+ }
+ if (mddev->bitmap_info.default_offset == 0) {
+ rv = -EINVAL;
+ goto err;
+ }
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
mddev->pers->quiesce(mddev, 1);
- rv = bitmap_create(mddev);
- if (!rv)
+ bitmap = bitmap_create(mddev, -1);
+ if (!IS_ERR(bitmap)) {
+ mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
+ } else
+ rv = PTR_ERR(bitmap);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
} else {
/* remove the bitmap */
- if (!mddev->bitmap)
- return -ENOENT;
- if (mddev->bitmap->storage.file)
- return -EINVAL;
+ if (!mddev->bitmap) {
+ rv = -ENOENT;
+ goto err;
+ }
+ if (mddev->bitmap->storage.file) {
+ rv = -EINVAL;
+ goto err;
+ }
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
@@ -6334,6 +6466,12 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
}
}
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+ return rv;
+err:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
return rv;
}
@@ -6393,6 +6531,7 @@ static inline bool md_ioctl_valid(unsigned int cmd)
case SET_DISK_FAULTY:
case STOP_ARRAY:
case STOP_ARRAY_RO:
+ case CLUSTERED_DISK_NACK:
return true;
default:
return false;
@@ -6665,6 +6804,13 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto unlock;
}
+ case CLUSTERED_DISK_NACK:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->new_disk_ack(mddev, false);
+ else
+ err = -EINVAL;
+ goto unlock;
+
case HOT_ADD_DISK:
err = hot_add_disk(mddev, new_decode_dev(arg));
goto unlock;
@@ -7238,6 +7384,55 @@ int unregister_md_personality(struct md_personality *p)
}
EXPORT_SYMBOL(unregister_md_personality);
+int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module)
+{
+ if (md_cluster_ops != NULL)
+ return -EALREADY;
+ spin_lock(&pers_lock);
+ md_cluster_ops = ops;
+ md_cluster_mod = module;
+ spin_unlock(&pers_lock);
+ return 0;
+}
+EXPORT_SYMBOL(register_md_cluster_operations);
+
+int unregister_md_cluster_operations(void)
+{
+ spin_lock(&pers_lock);
+ md_cluster_ops = NULL;
+ spin_unlock(&pers_lock);
+ return 0;
+}
+EXPORT_SYMBOL(unregister_md_cluster_operations);
+
+int md_setup_cluster(struct mddev *mddev, int nodes)
+{
+ int err;
+
+ err = request_module("md-cluster");
+ if (err) {
+ pr_err("md-cluster module not found.\n");
+ return err;
+ }
+
+ spin_lock(&pers_lock);
+ if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
+ spin_unlock(&pers_lock);
+ return -ENOENT;
+ }
+ spin_unlock(&pers_lock);
+
+ return md_cluster_ops->join(mddev, nodes);
+}
+
+void md_cluster_stop(struct mddev *mddev)
+{
+ if (!md_cluster_ops)
+ return;
+ md_cluster_ops->leave(mddev);
+ module_put(md_cluster_mod);
+}
+
static int is_mddev_idle(struct mddev *mddev, int init)
{
struct md_rdev *rdev;
@@ -7375,7 +7570,11 @@ int md_allow_write(struct mddev *mddev)
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock(&mddev->lock);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock(&mddev->lock);
@@ -7576,6 +7775,9 @@ void md_do_sync(struct md_thread *thread)
md_new_event(mddev);
update_time = jiffies;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_start(mddev, j, max_sectors);
+
blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
@@ -7618,8 +7820,7 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
- sectors = mddev->pers->sync_request(mddev, j, &skipped,
- currspeed < speed_min(mddev));
+ sectors = mddev->pers->sync_request(mddev, j, &skipped);
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break;
@@ -7636,6 +7837,8 @@ void md_do_sync(struct md_thread *thread)
j += sectors;
if (j > 2)
mddev->curr_resync = j;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_info_update(mddev, j, max_sectors);
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
/* this is the earliest that rebuild will be
@@ -7677,11 +7880,18 @@ void md_do_sync(struct md_thread *thread)
/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > speed_min(mddev)) {
- if ((currspeed > speed_max(mddev)) ||
- !is_mddev_idle(mddev, 0)) {
+ if (currspeed > speed_max(mddev)) {
msleep(500);
goto repeat;
}
+ if (!is_mddev_idle(mddev, 0)) {
+ /*
+ * Give other IO more of a chance.
+ * The faster the devices, the less we wait.
+ */
+ wait_event(mddev->recovery_wait,
+ !atomic_read(&mddev->recovery_active));
+ }
}
}
printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
@@ -7694,7 +7904,10 @@ void md_do_sync(struct md_thread *thread)
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
- mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
+ mddev->pers->sync_request(mddev, max_sectors, &skipped);
+
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_finish(mddev);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
@@ -7925,8 +8138,13 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS) {
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+ }
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
@@ -8024,6 +8242,8 @@ void md_reap_sync_thread(struct mddev *mddev)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
@@ -8036,6 +8256,8 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8656,6 +8878,28 @@ err_wq:
return ret;
}
+void md_reload_sb(struct mddev *mddev)
+{
+ struct md_rdev *rdev, *tmp;
+
+ rdev_for_each_safe(rdev, tmp, mddev) {
+ rdev->sb_loaded = 0;
+ ClearPageUptodate(rdev->sb_page);
+ }
+ mddev->raid_disks = 0;
+ analyze_sbs(mddev);
+ rdev_for_each_safe(rdev, tmp, mddev) {
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+ /* since we don't write to faulty devices, we figure out if the
+ * disk is faulty by comparing events
+ */
+ if (mddev->events > sb->events)
+ set_bit(Faulty, &rdev->flags);
+ }
+
+}
+EXPORT_SYMBOL(md_reload_sb);
+
#ifndef MODULE
/*
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 318ca8fd430f..4046a6c6f223 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -23,6 +23,7 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include "md-cluster.h"
#define MaxSector (~(sector_t)0)
@@ -170,6 +171,10 @@ enum flag_bits {
* a want_replacement device with same
* raid_disk number.
*/
+ Candidate, /* For clustered environments only:
+ * This device is seen locally but not
+ * by the whole cluster
+ */
};
#define BB_LEN_MASK (0x00000000000001FFULL)
@@ -202,6 +207,8 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
extern void md_ack_all_badblocks(struct badblocks *bb);
+struct md_cluster_info;
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -430,6 +437,8 @@ struct mddev {
unsigned long daemon_sleep; /* how many jiffies between updates? */
unsigned long max_write_behind; /* write-behind mode */
int external;
+ int nodes; /* Maximum number of nodes in the cluster */
+ char cluster_name[64]; /* Name of the cluster */
} bitmap_info;
atomic_t max_corr_read_errors; /* max read retries */
@@ -448,6 +457,7 @@ struct mddev {
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
+ struct md_cluster_info *cluster_info;
};
static inline int __must_check mddev_lock(struct mddev *mddev)
@@ -496,7 +506,7 @@ struct md_personality
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev);
- sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
+ sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
int (*resize) (struct mddev *mddev, sector_t sectors);
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (struct mddev *mddev);
@@ -608,6 +618,11 @@ static inline void safe_put_page(struct page *p)
extern int register_md_personality(struct md_personality *p);
extern int unregister_md_personality(struct md_personality *p);
+extern int register_md_cluster_operations(struct md_cluster_operations *ops,
+ struct module *module);
+extern int unregister_md_cluster_operations(void);
+extern int md_setup_cluster(struct mddev *mddev, int nodes);
+extern void md_cluster_stop(struct mddev *mddev);
extern struct md_thread *md_register_thread(
void (*run)(struct md_thread *thread),
struct mddev *mddev,
@@ -654,6 +669,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
+extern void md_reload_sb(struct mddev *mddev);
+extern void md_update_sb(struct mddev *mddev, int force);
+extern void md_kick_rdev_from_array(struct md_rdev * rdev);
+struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
static inline int mddev_check_plugged(struct mddev *mddev)
{
return !!blk_check_plugged(md_unplug, mddev,
@@ -669,4 +688,9 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
}
}
+extern struct md_cluster_operations *md_cluster_ops;
+static inline int mddev_is_clustered(struct mddev *mddev)
+{
+ return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
+}
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 3b5d7f704aa3..6a68ef5246d4 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -188,8 +188,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
dev[j] = rdev1;
- disk_stack_limits(mddev->gendisk, rdev1->bdev,
- rdev1->data_offset << 9);
+ if (mddev->queue)
+ disk_stack_limits(mddev->gendisk, rdev1->bdev,
+ rdev1->data_offset << 9);
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
conf->has_merge_bvec = 1;
@@ -271,14 +272,16 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
goto abort;
}
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- blk_queue_io_opt(mddev->queue,
- (mddev->chunk_sectors << 9) * mddev->raid_disks);
+ if (mddev->queue) {
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
- if (!discard_supported)
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ if (!discard_supported)
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ }
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@@ -429,9 +432,12 @@ static int raid0_run(struct mddev *mddev)
}
if (md_check_no_bitmap(mddev))
return -EINVAL;
- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+
+ if (mddev->queue) {
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+ }
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -448,16 +454,17 @@ static int raid0_run(struct mddev *mddev)
printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
mdname(mddev),
(unsigned long long)mddev->array_sectors);
- /* calculate the max read-ahead size.
- * For read-ahead of large files to be effective, we need to
- * readahead at least twice a whole stripe. i.e. number of devices
- * multiplied by chunk size times 2.
- * If an individual device has an ra_pages greater than the
- * chunk size, then we will not drive that device as hard as it
- * wants. We consider this a configuration error: a larger
- * chunksize should be used in that case.
- */
- {
+
+ if (mddev->queue) {
+ /* calculate the max read-ahead size.
+ * For read-ahead of large files to be effective, we need to
+ * readahead at least twice a whole stripe. i.e. number of devices
+ * multiplied by chunk size times 2.
+ * If an individual device has an ra_pages greater than the
+ * chunk size, then we will not drive that device as hard as it
+ * wants. We consider this a configuration error: a larger
+ * chunksize should be used in that case.
+ */
int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d34e238afa54..9157a29c8dbf 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -539,7 +539,13 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
has_nonrot_disk = 0;
choose_next_idle = 0;
- choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
+ if ((conf->mddev->recovery_cp < this_sector + sectors) ||
+ (mddev_is_clustered(conf->mddev) &&
+ md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+ this_sector + sectors)))
+ choose_first = 1;
+ else
+ choose_first = 0;
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
sector_t dist;
@@ -1102,8 +1108,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */
if (bio_data_dir(bio) == WRITE &&
- bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_iter.bi_sector < mddev->suspend_hi) {
+ ((bio_end_sector(bio) > mddev->suspend_lo &&
+ bio->bi_iter.bi_sector < mddev->suspend_hi) ||
+ (mddev_is_clustered(mddev) &&
+ md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1114,7 +1122,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_iter.bi_sector >= mddev->suspend_hi)
+ bio->bi_iter.bi_sector >= mddev->suspend_hi ||
+ (mddev_is_clustered(mddev) &&
+ !md_cluster_ops->area_resyncing(mddev,
+ bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
schedule();
}
@@ -1561,6 +1572,7 @@ static int raid1_spare_active(struct mddev *mddev)
struct md_rdev *rdev = conf->mirrors[i].rdev;
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
if (repl
+ && !test_bit(Candidate, &repl->flags)
&& repl->recovery_offset == MaxSector
&& !test_bit(Faulty, &repl->flags)
&& !test_and_set_bit(In_sync, &repl->flags)) {
@@ -2468,7 +2480,7 @@ static int init_resync(struct r1conf *conf)
* that can be installed to exclude normal IO requests.
*/
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
@@ -2521,13 +2533,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*skipped = 1;
return sync_blocks;
}
- /*
- * If there is non-resync activity waiting for a turn,
- * and resync is going fast enough,
- * then let it though before starting on this new sync request.
- */
- if (!go_faster && conf->nr_waiting)
- msleep_interruptible(1000);
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a7196c49d15d..e793ab6b3570 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2889,7 +2889,7 @@ static int init_resync(struct r10conf *conf)
*/
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
- int *skipped, int go_faster)
+ int *skipped)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
@@ -2994,12 +2994,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (conf->geo.near_copies < conf->geo.raid_disks &&
max_sector > (sector_nr | chunk_mask))
max_sector = (sector_nr | chunk_mask) + 1;
- /*
- * If there is non-resync activity waiting for us then
- * put in a delay to throttle resync.
- */
- if (!go_faster && conf->nr_waiting)
- msleep_interruptible(1000);
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cd2f96b2c572..1ba97fdc6df1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -54,6 +54,7 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/nodemask.h>
+#include <linux/flex_array.h>
#include <trace/events/block.h>
#include "md.h"
@@ -496,7 +497,7 @@ static void shrink_buffers(struct stripe_head *sh)
}
}
-static int grow_buffers(struct stripe_head *sh)
+static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
{
int i;
int num = sh->raid_conf->pool_size;
@@ -504,7 +505,7 @@ static int grow_buffers(struct stripe_head *sh)
for (i = 0; i < num; i++) {
struct page *page;
- if (!(page = alloc_page(GFP_KERNEL))) {
+ if (!(page = alloc_page(gfp))) {
return 1;
}
sh->dev[i].page = page;
@@ -525,6 +526,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
BUG_ON(stripe_operations_active(sh));
+ BUG_ON(sh->batch_head);
pr_debug("init_stripe called, stripe %llu\n",
(unsigned long long)sector);
@@ -552,8 +554,10 @@ retry:
}
if (read_seqcount_retry(&conf->gen_lock, seq))
goto retry;
+ sh->overwrite_disks = 0;
insert_hash(conf, sh);
sh->cpu = smp_processor_id();
+ set_bit(STRIPE_BATCH_READY, &sh->state);
}
static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
@@ -668,20 +672,28 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
*(conf->hash_locks + hash));
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
- if (!conf->inactive_blocked)
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
sh = get_free_stripe(conf, hash);
+ if (!sh && llist_empty(&conf->released_stripes) &&
+ !test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE,
+ &conf->cache_state);
+ }
if (noblock && sh == NULL)
break;
if (!sh) {
- conf->inactive_blocked = 1;
+ set_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state);
wait_event_lock_irq(
conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes * 3 / 4)
- || !conf->inactive_blocked),
+ || !test_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state)),
*(conf->hash_locks + hash));
- conf->inactive_blocked = 0;
+ clear_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state);
} else {
init_stripe(sh, sector, previous);
atomic_inc(&sh->count);
@@ -708,6 +720,130 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
return sh;
}
+static bool is_full_stripe_write(struct stripe_head *sh)
+{
+ BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
+ return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded);
+}
+
+static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+{
+ local_irq_disable();
+ if (sh1 > sh2) {
+ spin_lock(&sh2->stripe_lock);
+ spin_lock_nested(&sh1->stripe_lock, 1);
+ } else {
+ spin_lock(&sh1->stripe_lock);
+ spin_lock_nested(&sh2->stripe_lock, 1);
+ }
+}
+
+static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+{
+ spin_unlock(&sh1->stripe_lock);
+ spin_unlock(&sh2->stripe_lock);
+ local_irq_enable();
+}
+
+/* Only freshly new full stripe normal write stripe can be added to a batch list */
+static bool stripe_can_batch(struct stripe_head *sh)
+{
+ return test_bit(STRIPE_BATCH_READY, &sh->state) &&
+ is_full_stripe_write(sh);
+}
+
+/* we only do back search */
+static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
+{
+ struct stripe_head *head;
+ sector_t head_sector, tmp_sec;
+ int hash;
+ int dd_idx;
+
+ if (!stripe_can_batch(sh))
+ return;
+ /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
+ tmp_sec = sh->sector;
+ if (!sector_div(tmp_sec, conf->chunk_sectors))
+ return;
+ head_sector = sh->sector - STRIPE_SECTORS;
+
+ hash = stripe_hash_locks_hash(head_sector);
+ spin_lock_irq(conf->hash_locks + hash);
+ head = __find_stripe(conf, head_sector, conf->generation);
+ if (head && !atomic_inc_not_zero(&head->count)) {
+ spin_lock(&conf->device_lock);
+ if (!atomic_read(&head->count)) {
+ if (!test_bit(STRIPE_HANDLE, &head->state))
+ atomic_inc(&conf->active_stripes);
+ BUG_ON(list_empty(&head->lru) &&
+ !test_bit(STRIPE_EXPANDING, &head->state));
+ list_del_init(&head->lru);
+ if (head->group) {
+ head->group->stripes_cnt--;
+ head->group = NULL;
+ }
+ }
+ atomic_inc(&head->count);
+ spin_unlock(&conf->device_lock);
+ }
+ spin_unlock_irq(conf->hash_locks + hash);
+
+ if (!head)
+ return;
+ if (!stripe_can_batch(head))
+ goto out;
+
+ lock_two_stripes(head, sh);
+ /* clear_batch_ready clear the flag */
+ if (!stripe_can_batch(head) || !stripe_can_batch(sh))
+ goto unlock_out;
+
+ if (sh->batch_head)
+ goto unlock_out;
+
+ dd_idx = 0;
+ while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ dd_idx++;
+ if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
+ goto unlock_out;
+
+ if (head->batch_head) {
+ spin_lock(&head->batch_head->batch_lock);
+ /* This batch list is already running */
+ if (!stripe_can_batch(head)) {
+ spin_unlock(&head->batch_head->batch_lock);
+ goto unlock_out;
+ }
+
+ /*
+ * at this point, head's BATCH_READY could be cleared, but we
+ * can still add the stripe to batch list
+ */
+ list_add(&sh->batch_list, &head->batch_list);
+ spin_unlock(&head->batch_head->batch_lock);
+
+ sh->batch_head = head->batch_head;
+ } else {
+ head->batch_head = head;
+ sh->batch_head = head->batch_head;
+ spin_lock(&head->batch_lock);
+ list_add_tail(&sh->batch_list, &head->batch_list);
+ spin_unlock(&head->batch_lock);
+ }
+
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ if (atomic_dec_return(&conf->preread_active_stripes)
+ < IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+
+ atomic_inc(&sh->count);
+unlock_out:
+ unlock_two_stripes(head, sh);
+out:
+ release_stripe(head);
+}
+
/* Determine if 'data_offset' or 'new_data_offset' should be used
* in this stripe_head.
*/
@@ -738,6 +874,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
{
struct r5conf *conf = sh->raid_conf;
int i, disks = sh->disks;
+ struct stripe_head *head_sh = sh;
might_sleep();
@@ -746,6 +883,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
int replace_only = 0;
struct bio *bi, *rbi;
struct md_rdev *rdev, *rrdev = NULL;
+
+ sh = head_sh;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
rw = WRITE_FUA;
@@ -764,6 +903,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
rw |= REQ_SYNC;
+again:
bi = &sh->dev[i].req;
rbi = &sh->dev[i].rreq; /* For writing to replacement */
@@ -782,7 +922,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
/* We raced and saw duplicates */
rrdev = NULL;
} else {
- if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
+ if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
rdev = rrdev;
rrdev = NULL;
}
@@ -853,13 +993,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
__func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
atomic_inc(&sh->count);
+ if (sh != head_sh)
+ atomic_inc(&head_sh->count);
if (use_new_offset(conf, sh))
bi->bi_iter.bi_sector = (sh->sector
+ rdev->new_data_offset);
else
bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
- if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
bi->bi_rw |= REQ_NOMERGE;
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
@@ -903,6 +1045,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
__func__, (unsigned long long)sh->sector,
rbi->bi_rw, i);
atomic_inc(&sh->count);
+ if (sh != head_sh)
+ atomic_inc(&head_sh->count);
if (use_new_offset(conf, sh))
rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->new_data_offset);
@@ -936,6 +1080,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
}
+
+ if (!head_sh->batch_head)
+ continue;
+ sh = list_first_entry(&sh->batch_list, struct stripe_head,
+ batch_list);
+ if (sh != head_sh)
+ goto again;
}
}
@@ -1051,6 +1202,7 @@ static void ops_run_biofill(struct stripe_head *sh)
struct async_submit_ctl submit;
int i;
+ BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1109,16 +1261,28 @@ static void ops_complete_compute(void *stripe_head_ref)
/* return a pointer to the address conversion region of the scribble buffer */
static addr_conv_t *to_addr_conv(struct stripe_head *sh,
- struct raid5_percpu *percpu)
+ struct raid5_percpu *percpu, int i)
+{
+ void *addr;
+
+ addr = flex_array_get(percpu->scribble, i);
+ return addr + sizeof(struct page *) * (sh->disks + 2);
+}
+
+/* return a pointer to the address conversion region of the scribble buffer */
+static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
{
- return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
+ void *addr;
+
+ addr = flex_array_get(percpu->scribble, i);
+ return addr;
}
static struct dma_async_tx_descriptor *
ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs = to_addr_page(percpu, 0);
int target = sh->ops.target;
struct r5dev *tgt = &sh->dev[target];
struct page *xor_dest = tgt->page;
@@ -1127,6 +1291,8 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
struct async_submit_ctl submit;
int i;
+ BUG_ON(sh->batch_head);
+
pr_debug("%s: stripe %llu block: %d\n",
__func__, (unsigned long long)sh->sector, target);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
@@ -1138,7 +1304,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
- ops_complete_compute, sh, to_addr_conv(sh, percpu));
+ ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
@@ -1156,7 +1322,9 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
* destination buffer is recorded in srcs[count] and the Q destination
* is recorded in srcs[count+1]].
*/
-static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
+static int set_syndrome_sources(struct page **srcs,
+ struct stripe_head *sh,
+ int srctype)
{
int disks = sh->disks;
int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
@@ -1171,8 +1339,15 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
i = d0_idx;
do {
int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
+ struct r5dev *dev = &sh->dev[i];
- srcs[slot] = sh->dev[i].page;
+ if (i == sh->qd_idx || i == sh->pd_idx ||
+ (srctype == SYNDROME_SRC_ALL) ||
+ (srctype == SYNDROME_SRC_WANT_DRAIN &&
+ test_bit(R5_Wantdrain, &dev->flags)) ||
+ (srctype == SYNDROME_SRC_WRITTEN &&
+ dev->written))
+ srcs[slot] = sh->dev[i].page;
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
@@ -1183,7 +1358,7 @@ static struct dma_async_tx_descriptor *
ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
- struct page **blocks = percpu->scribble;
+ struct page **blocks = to_addr_page(percpu, 0);
int target;
int qd_idx = sh->qd_idx;
struct dma_async_tx_descriptor *tx;
@@ -1193,6 +1368,7 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
int i;
int count;
+ BUG_ON(sh->batch_head);
if (sh->ops.target < 0)
target = sh->ops.target2;
else if (sh->ops.target2 < 0)
@@ -1211,12 +1387,12 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
atomic_inc(&sh->count);
if (target == qd_idx) {
- count = set_syndrome_sources(blocks, sh);
+ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
blocks[count] = NULL; /* regenerating p is not necessary */
BUG_ON(blocks[count+1] != dest); /* q should already be set */
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
} else {
/* Compute any data- or p-drive using XOR */
@@ -1229,7 +1405,7 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
}
@@ -1248,9 +1424,10 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
struct r5dev *tgt = &sh->dev[target];
struct r5dev *tgt2 = &sh->dev[target2];
struct dma_async_tx_descriptor *tx;
- struct page **blocks = percpu->scribble;
+ struct page **blocks = to_addr_page(percpu, 0);
struct async_submit_ctl submit;
+ BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu block1: %d block2: %d\n",
__func__, (unsigned long long)sh->sector, target, target2);
BUG_ON(target < 0 || target2 < 0);
@@ -1290,7 +1467,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
/* Missing P+Q, just recompute */
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
return async_gen_syndrome(blocks, 0, syndrome_disks+2,
STRIPE_SIZE, &submit);
} else {
@@ -1314,21 +1491,21 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit,
ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, NULL, NULL,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
&submit);
- count = set_syndrome_sources(blocks, sh);
+ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
init_async_submit(&submit, ASYNC_TX_FENCE, tx,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
return async_gen_syndrome(blocks, 0, count+2,
STRIPE_SIZE, &submit);
}
} else {
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
if (failb == syndrome_disks) {
/* We're missing D+P. */
return async_raid6_datap_recov(syndrome_disks+2,
@@ -1352,17 +1529,18 @@ static void ops_complete_prexor(void *stripe_head_ref)
}
static struct dma_async_tx_descriptor *
-ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
- struct dma_async_tx_descriptor *tx)
+ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
+ struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs = to_addr_page(percpu, 0);
int count = 0, pd_idx = sh->pd_idx, i;
struct async_submit_ctl submit;
/* existing parity data subtracted */
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+ BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1374,31 +1552,56 @@ ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
}
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
- ops_complete_prexor, sh, to_addr_conv(sh, percpu));
+ ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
return tx;
}
static struct dma_async_tx_descriptor *
+ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
+ struct dma_async_tx_descriptor *tx)
+{
+ struct page **blocks = to_addr_page(percpu, 0);
+ int count;
+ struct async_submit_ctl submit;
+
+ pr_debug("%s: stripe %llu\n", __func__,
+ (unsigned long long)sh->sector);
+
+ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN);
+
+ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
+ ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
+ tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+
+ return tx;
+}
+
+static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
int i;
+ struct stripe_head *head_sh = sh;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
+ struct r5dev *dev;
struct bio *chosen;
- if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
+ sh = head_sh;
+ if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) {
struct bio *wbi;
+again:
+ dev = &sh->dev[i];
spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;
dev->towrite = NULL;
+ sh->overwrite_disks = 0;
BUG_ON(dev->written);
wbi = dev->written = chosen;
spin_unlock_irq(&sh->stripe_lock);
@@ -1423,6 +1626,15 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
}
wbi = r5_next_bio(wbi, dev->sector);
}
+
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head,
+ batch_list);
+ if (sh == head_sh)
+ continue;
+ goto again;
+ }
}
}
@@ -1478,12 +1690,15 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs;
struct async_submit_ctl submit;
- int count = 0, pd_idx = sh->pd_idx, i;
+ int count, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
int prexor = 0;
unsigned long flags;
+ int j = 0;
+ struct stripe_head *head_sh = sh;
+ int last_stripe;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1500,15 +1715,18 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
ops_complete_reconstruct(sh);
return;
}
+again:
+ count = 0;
+ xor_srcs = to_addr_page(percpu, j);
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (written)
*/
- if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
+ if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
prexor = 1;
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (dev->written)
+ if (head_sh->dev[i].written)
xor_srcs[count++] = dev->page;
}
} else {
@@ -1525,17 +1743,32 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
* set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
* for the synchronous xor case
*/
- flags = ASYNC_TX_ACK |
- (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
-
- atomic_inc(&sh->count);
+ last_stripe = !head_sh->batch_head ||
+ list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list) == head_sh;
+ if (last_stripe) {
+ flags = ASYNC_TX_ACK |
+ (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
+
+ atomic_inc(&head_sh->count);
+ init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
+ to_addr_conv(sh, percpu, j));
+ } else {
+ flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST;
+ init_async_submit(&submit, flags, tx, NULL, NULL,
+ to_addr_conv(sh, percpu, j));
+ }
- init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
- to_addr_conv(sh, percpu));
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
+ if (!last_stripe) {
+ j++;
+ sh = list_first_entry(&sh->batch_list, struct stripe_head,
+ batch_list);
+ goto again;
+ }
}
static void
@@ -1543,8 +1776,12 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
struct async_submit_ctl submit;
- struct page **blocks = percpu->scribble;
- int count, i;
+ struct page **blocks;
+ int count, i, j = 0;
+ struct stripe_head *head_sh = sh;
+ int last_stripe;
+ int synflags;
+ unsigned long txflags;
pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
@@ -1562,13 +1799,36 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
return;
}
- count = set_syndrome_sources(blocks, sh);
+again:
+ blocks = to_addr_page(percpu, j);
- atomic_inc(&sh->count);
+ if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
+ synflags = SYNDROME_SRC_WRITTEN;
+ txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST;
+ } else {
+ synflags = SYNDROME_SRC_ALL;
+ txflags = ASYNC_TX_ACK;
+ }
+
+ count = set_syndrome_sources(blocks, sh, synflags);
+ last_stripe = !head_sh->batch_head ||
+ list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list) == head_sh;
- init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
- sh, to_addr_conv(sh, percpu));
+ if (last_stripe) {
+ atomic_inc(&head_sh->count);
+ init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
+ head_sh, to_addr_conv(sh, percpu, j));
+ } else
+ init_async_submit(&submit, 0, tx, NULL, NULL,
+ to_addr_conv(sh, percpu, j));
async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+ if (!last_stripe) {
+ j++;
+ sh = list_first_entry(&sh->batch_list, struct stripe_head,
+ batch_list);
+ goto again;
+ }
}
static void ops_complete_check(void *stripe_head_ref)
@@ -1589,7 +1849,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
struct page *xor_dest;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs = to_addr_page(percpu, 0);
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
int count;
@@ -1598,6 +1858,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
+ BUG_ON(sh->batch_head);
count = 0;
xor_dest = sh->dev[pd_idx].page;
xor_srcs[count++] = xor_dest;
@@ -1608,7 +1869,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
}
init_async_submit(&submit, 0, NULL, NULL, NULL,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
&sh->ops.zero_sum_result, &submit);
@@ -1619,20 +1880,21 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
{
- struct page **srcs = percpu->scribble;
+ struct page **srcs = to_addr_page(percpu, 0);
struct async_submit_ctl submit;
int count;
pr_debug("%s: stripe %llu checkp: %d\n", __func__,
(unsigned long long)sh->sector, checkp);
- count = set_syndrome_sources(srcs, sh);
+ BUG_ON(sh->batch_head);
+ count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL);
if (!checkp)
srcs[count] = NULL;
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
- sh, to_addr_conv(sh, percpu));
+ sh, to_addr_conv(sh, percpu, 0));
async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
}
@@ -1667,8 +1929,12 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
async_tx_ack(tx);
}
- if (test_bit(STRIPE_OP_PREXOR, &ops_request))
- tx = ops_run_prexor(sh, percpu, tx);
+ if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
+ if (level < 6)
+ tx = ops_run_prexor5(sh, percpu, tx);
+ else
+ tx = ops_run_prexor6(sh, percpu, tx);
+ }
if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
tx = ops_run_biodrain(sh, tx);
@@ -1693,7 +1959,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
BUG();
}
- if (overlap_clear)
+ if (overlap_clear && !sh->batch_head)
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_Overlap, &dev->flags))
@@ -1702,28 +1968,42 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-static int grow_one_stripe(struct r5conf *conf, int hash)
+static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
+{
+ struct stripe_head *sh;
+
+ sh = kmem_cache_zalloc(sc, gfp);
+ if (sh) {
+ spin_lock_init(&sh->stripe_lock);
+ spin_lock_init(&sh->batch_lock);
+ INIT_LIST_HEAD(&sh->batch_list);
+ INIT_LIST_HEAD(&sh->lru);
+ atomic_set(&sh->count, 1);
+ }
+ return sh;
+}
+static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
{
struct stripe_head *sh;
- sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
+
+ sh = alloc_stripe(conf->slab_cache, gfp);
if (!sh)
return 0;
sh->raid_conf = conf;
- spin_lock_init(&sh->stripe_lock);
-
- if (grow_buffers(sh)) {
+ if (grow_buffers(sh, gfp)) {
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
- sh->hash_lock_index = hash;
+ sh->hash_lock_index =
+ conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
/* we just created an active stripe so... */
- atomic_set(&sh->count, 1);
atomic_inc(&conf->active_stripes);
- INIT_LIST_HEAD(&sh->lru);
+
release_stripe(sh);
+ conf->max_nr_stripes++;
return 1;
}
@@ -1731,7 +2011,6 @@ static int grow_stripes(struct r5conf *conf, int num)
{
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
- int hash;
if (conf->mddev->gendisk)
sprintf(conf->cache_name[0],
@@ -1749,13 +2028,10 @@ static int grow_stripes(struct r5conf *conf, int num)
return 1;
conf->slab_cache = sc;
conf->pool_size = devs;
- hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
- while (num--) {
- if (!grow_one_stripe(conf, hash))
+ while (num--)
+ if (!grow_one_stripe(conf, GFP_KERNEL))
return 1;
- conf->max_nr_stripes++;
- hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
- }
+
return 0;
}
@@ -1772,13 +2048,50 @@ static int grow_stripes(struct r5conf *conf, int num)
* calculate over all devices (not just the data blocks), using zeros in place
* of the P and Q blocks.
*/
-static size_t scribble_len(int num)
+static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
{
+ struct flex_array *ret;
size_t len;
len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
+ ret = flex_array_alloc(len, cnt, flags);
+ if (!ret)
+ return NULL;
+ /* always prealloc all elements, so no locking is required */
+ if (flex_array_prealloc(ret, 0, cnt, flags)) {
+ flex_array_free(ret);
+ return NULL;
+ }
+ return ret;
+}
- return len;
+static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
+{
+ unsigned long cpu;
+ int err = 0;
+
+ mddev_suspend(conf->mddev);
+ get_online_cpus();
+ for_each_present_cpu(cpu) {
+ struct raid5_percpu *percpu;
+ struct flex_array *scribble;
+
+ percpu = per_cpu_ptr(conf->percpu, cpu);
+ scribble = scribble_alloc(new_disks,
+ new_sectors / STRIPE_SECTORS,
+ GFP_NOIO);
+
+ if (scribble) {
+ flex_array_free(percpu->scribble);
+ percpu->scribble = scribble;
+ } else {
+ err = -ENOMEM;
+ break;
+ }
+ }
+ put_online_cpus();
+ mddev_resume(conf->mddev);
+ return err;
}
static int resize_stripes(struct r5conf *conf, int newsize)
@@ -1809,7 +2122,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
struct stripe_head *osh, *nsh;
LIST_HEAD(newstripes);
struct disk_info *ndisks;
- unsigned long cpu;
int err;
struct kmem_cache *sc;
int i;
@@ -1830,13 +2142,11 @@ static int resize_stripes(struct r5conf *conf, int newsize)
return -ENOMEM;
for (i = conf->max_nr_stripes; i; i--) {
- nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
+ nsh = alloc_stripe(sc, GFP_KERNEL);
if (!nsh)
break;
nsh->raid_conf = conf;
- spin_lock_init(&nsh->stripe_lock);
-
list_add(&nsh->lru, &newstripes);
}
if (i) {
@@ -1863,13 +2173,11 @@ static int resize_stripes(struct r5conf *conf, int newsize)
lock_device_hash_lock(conf, hash));
osh = get_free_stripe(conf, hash);
unlock_device_hash_lock(conf, hash);
- atomic_set(&nsh->count, 1);
+
for(i=0; i<conf->pool_size; i++) {
nsh->dev[i].page = osh->dev[i].page;
nsh->dev[i].orig_page = osh->dev[i].page;
}
- for( ; i<newsize; i++)
- nsh->dev[i].page = NULL;
nsh->hash_lock_index = hash;
kmem_cache_free(conf->slab_cache, osh);
cnt++;
@@ -1895,25 +2203,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
} else
err = -ENOMEM;
- get_online_cpus();
- conf->scribble_len = scribble_len(newsize);
- for_each_present_cpu(cpu) {
- struct raid5_percpu *percpu;
- void *scribble;
-
- percpu = per_cpu_ptr(conf->percpu, cpu);
- scribble = kmalloc(conf->scribble_len, GFP_NOIO);
-
- if (scribble) {
- kfree(percpu->scribble);
- percpu->scribble = scribble;
- } else {
- err = -ENOMEM;
- break;
- }
- }
- put_online_cpus();
-
/* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -1933,13 +2222,15 @@ static int resize_stripes(struct r5conf *conf, int newsize)
conf->slab_cache = sc;
conf->active_name = 1-conf->active_name;
- conf->pool_size = newsize;
+ if (!err)
+ conf->pool_size = newsize;
return err;
}
-static int drop_one_stripe(struct r5conf *conf, int hash)
+static int drop_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
+ int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
spin_lock_irq(conf->hash_locks + hash);
sh = get_free_stripe(conf, hash);
@@ -1950,15 +2241,15 @@ static int drop_one_stripe(struct r5conf *conf, int hash)
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
+ conf->max_nr_stripes--;
return 1;
}
static void shrink_stripes(struct r5conf *conf)
{
- int hash;
- for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
- while (drop_one_stripe(conf, hash))
- ;
+ while (conf->max_nr_stripes &&
+ drop_one_stripe(conf))
+ ;
if (conf->slab_cache)
kmem_cache_destroy(conf->slab_cache);
@@ -2154,10 +2445,16 @@ static void raid5_end_write_request(struct bio *bi, int error)
}
rdev_dec_pending(rdev, conf->mddev);
+ if (sh->batch_head && !uptodate && !replacement)
+ set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
+
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
+
+ if (sh->batch_head && sh != sh->batch_head)
+ release_stripe(sh->batch_head);
}
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
@@ -2535,7 +2832,7 @@ static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
{
- int i, pd_idx = sh->pd_idx, disks = sh->disks;
+ int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks;
struct r5conf *conf = sh->raid_conf;
int level = conf->level;
@@ -2571,13 +2868,15 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
atomic_inc(&conf->pending_full_writes);
} else {
- BUG_ON(level == 6);
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
+ BUG_ON(level == 6 &&
+ (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) ||
+ test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags))));
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (i == pd_idx)
+ if (i == pd_idx || i == qd_idx)
continue;
if (dev->towrite &&
@@ -2624,7 +2923,8 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
* toread/towrite point to the first in a chain.
* The bi_next chain must be in order.
*/
-static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
+static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
+ int forwrite, int previous)
{
struct bio **bip;
struct r5conf *conf = sh->raid_conf;
@@ -2643,6 +2943,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
* protect it.
*/
spin_lock_irq(&sh->stripe_lock);
+ /* Don't allow new IO added to stripes in batch list */
+ if (sh->batch_head)
+ goto overlap;
if (forwrite) {
bip = &sh->dev[dd_idx].towrite;
if (*bip == NULL)
@@ -2657,6 +2960,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
goto overlap;
+ if (!forwrite || previous)
+ clear_bit(STRIPE_BATCH_READY, &sh->state);
+
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
if (*bip)
bi->bi_next = *bip;
@@ -2674,7 +2980,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sector = bio_end_sector(bi);
}
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
- set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
+ if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
+ sh->overwrite_disks++;
}
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
@@ -2688,6 +2995,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sh->bm_seq = conf->seq_flush+1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
+
+ if (stripe_can_batch(sh))
+ stripe_add_to_batch_list(conf, sh);
return 1;
overlap:
@@ -2720,6 +3030,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio **return_bi)
{
int i;
+ BUG_ON(sh->batch_head);
for (i = disks; i--; ) {
struct bio *bi;
int bitmap_end = 0;
@@ -2746,6 +3057,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
/* fail all writes first */
bi = sh->dev[i].towrite;
sh->dev[i].towrite = NULL;
+ sh->overwrite_disks = 0;
spin_unlock_irq(&sh->stripe_lock);
if (bi)
bitmap_end = 1;
@@ -2834,6 +3146,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
int abort = 0;
int i;
+ BUG_ON(sh->batch_head);
clear_bit(STRIPE_SYNCING, &sh->state);
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
wake_up(&conf->wait_for_overlap);
@@ -2976,7 +3289,9 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
/* reconstruct-write isn't being forced */
return 0;
for (i = 0; i < s->failed; i++) {
- if (!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
+ if (s->failed_num[i] != sh->pd_idx &&
+ s->failed_num[i] != sh->qd_idx &&
+ !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
!test_bit(R5_OVERWRITE, &fdev[i]->flags))
return 1;
}
@@ -2996,6 +3311,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
*/
BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
BUG_ON(test_bit(R5_Wantread, &dev->flags));
+ BUG_ON(sh->batch_head);
if ((s->uptodate == disks - 1) &&
(s->failed && (disk_idx == s->failed_num[0] ||
disk_idx == s->failed_num[1]))) {
@@ -3087,6 +3403,9 @@ static void handle_stripe_clean_event(struct r5conf *conf,
int i;
struct r5dev *dev;
int discard_pending = 0;
+ struct stripe_head *head_sh = sh;
+ bool do_endio = false;
+ int wakeup_nr = 0;
for (i = disks; i--; )
if (sh->dev[i].written) {
@@ -3102,8 +3421,11 @@ static void handle_stripe_clean_event(struct r5conf *conf,
clear_bit(R5_UPTODATE, &dev->flags);
if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
- dev->page = dev->orig_page;
}
+ do_endio = true;
+
+returnbi:
+ dev->page = dev->orig_page;
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_iter.bi_sector <
@@ -3120,6 +3442,17 @@ static void handle_stripe_clean_event(struct r5conf *conf,
STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head,
+ batch_list);
+ if (sh != head_sh) {
+ dev = &sh->dev[i];
+ goto returnbi;
+ }
+ }
+ sh = head_sh;
+ dev = &sh->dev[i];
} else if (test_bit(R5_Discard, &dev->flags))
discard_pending = 1;
WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
@@ -3141,8 +3474,17 @@ static void handle_stripe_clean_event(struct r5conf *conf,
* will be reinitialized
*/
spin_lock_irq(&conf->device_lock);
+unhash:
remove_hash(sh);
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list);
+ if (sh != head_sh)
+ goto unhash;
+ }
spin_unlock_irq(&conf->device_lock);
+ sh = head_sh;
+
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
set_bit(STRIPE_HANDLE, &sh->state);
@@ -3151,6 +3493,45 @@ static void handle_stripe_clean_event(struct r5conf *conf,
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
md_wakeup_thread(conf->mddev->thread);
+
+ if (!head_sh->batch_head || !do_endio)
+ return;
+ for (i = 0; i < head_sh->disks; i++) {
+ if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
+ wakeup_nr++;
+ }
+ while (!list_empty(&head_sh->batch_list)) {
+ int i;
+ sh = list_first_entry(&head_sh->batch_list,
+ struct stripe_head, batch_list);
+ list_del_init(&sh->batch_list);
+
+ set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
+ head_sh->state & ~((1 << STRIPE_ACTIVE) |
+ (1 << STRIPE_PREREAD_ACTIVE) |
+ STRIPE_EXPAND_SYNC_FLAG));
+ sh->check_state = head_sh->check_state;
+ sh->reconstruct_state = head_sh->reconstruct_state;
+ for (i = 0; i < sh->disks; i++) {
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ wakeup_nr++;
+ sh->dev[i].flags = head_sh->dev[i].flags;
+ }
+
+ spin_lock_irq(&sh->stripe_lock);
+ sh->batch_head = NULL;
+ spin_unlock_irq(&sh->stripe_lock);
+ if (sh->state & STRIPE_EXPAND_SYNC_FLAG)
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+ }
+
+ spin_lock_irq(&head_sh->stripe_lock);
+ head_sh->batch_head = NULL;
+ spin_unlock_irq(&head_sh->stripe_lock);
+ wake_up_nr(&conf->wait_for_overlap, wakeup_nr);
+ if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG)
+ set_bit(STRIPE_HANDLE, &head_sh->state);
}
static void handle_stripe_dirtying(struct r5conf *conf,
@@ -3161,28 +3542,27 @@ static void handle_stripe_dirtying(struct r5conf *conf,
int rmw = 0, rcw = 0, i;
sector_t recovery_cp = conf->mddev->recovery_cp;
- /* RAID6 requires 'rcw' in current implementation.
- * Otherwise, check whether resync is now happening or should start.
+ /* Check whether resync is now happening or should start.
* If yes, then the array is dirty (after unclean shutdown or
* initial creation), so parity in some stripes might be inconsistent.
* In this case, we need to always do reconstruct-write, to ensure
* that in case of drive failure or read-error correction, we
* generate correct data from the parity.
*/
- if (conf->max_degraded == 2 ||
+ if (conf->rmw_level == PARITY_DISABLE_RMW ||
(recovery_cp < MaxSector && sh->sector >= recovery_cp &&
s->failed == 0)) {
/* Calculate the real rcw later - for now make it
* look like rcw is cheaper
*/
rcw = 1; rmw = 2;
- pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
- conf->max_degraded, (unsigned long long)recovery_cp,
+ pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
+ conf->rmw_level, (unsigned long long)recovery_cp,
(unsigned long long)sh->sector);
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
@@ -3192,7 +3572,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
rmw += 2*disks; /* cannot read it */
}
/* Would I have to read this buffer for reconstruct_write */
- if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
+ if (!test_bit(R5_OVERWRITE, &dev->flags) &&
+ i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
@@ -3205,7 +3586,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
- if (rmw < rcw && rmw > 0) {
+ if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
if (conf->mddev->queue)
blk_add_trace_msg(conf->mddev->queue,
@@ -3213,7 +3594,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags)) &&
@@ -3232,7 +3613,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
}
}
}
- if (rcw <= rmw && rcw > 0) {
+ if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) {
/* want reconstruct write, but need to get some data */
int qread =0;
rcw = 0;
@@ -3290,6 +3671,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
{
struct r5dev *dev = NULL;
+ BUG_ON(sh->batch_head);
set_bit(STRIPE_HANDLE, &sh->state);
switch (sh->check_state) {
@@ -3380,6 +3762,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
int qd_idx = sh->qd_idx;
struct r5dev *dev;
+ BUG_ON(sh->batch_head);
set_bit(STRIPE_HANDLE, &sh->state);
BUG_ON(s->failed > 2);
@@ -3543,6 +3926,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
* copy some of them into a target stripe for expand.
*/
struct dma_async_tx_descriptor *tx = NULL;
+ BUG_ON(sh->batch_head);
clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
for (i = 0; i < sh->disks; i++)
if (i != sh->pd_idx && i != sh->qd_idx) {
@@ -3615,8 +3999,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
memset(s, 0, sizeof(*s));
- s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
+ s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head;
+ s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
s->failed_num[0] = -1;
s->failed_num[1] = -1;
@@ -3786,6 +4170,72 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
rcu_read_unlock();
}
+static int clear_batch_ready(struct stripe_head *sh)
+{
+ struct stripe_head *tmp;
+ if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
+ return 0;
+ spin_lock(&sh->stripe_lock);
+ if (!sh->batch_head) {
+ spin_unlock(&sh->stripe_lock);
+ return 0;
+ }
+
+ /*
+ * this stripe could be added to a batch list before we check
+ * BATCH_READY, skips it
+ */
+ if (sh->batch_head != sh) {
+ spin_unlock(&sh->stripe_lock);
+ return 1;
+ }
+ spin_lock(&sh->batch_lock);
+ list_for_each_entry(tmp, &sh->batch_list, batch_list)
+ clear_bit(STRIPE_BATCH_READY, &tmp->state);
+ spin_unlock(&sh->batch_lock);
+ spin_unlock(&sh->stripe_lock);
+
+ /*
+ * BATCH_READY is cleared, no new stripes can be added.
+ * batch_list can be accessed without lock
+ */
+ return 0;
+}
+
+static void check_break_stripe_batch_list(struct stripe_head *sh)
+{
+ struct stripe_head *head_sh, *next;
+ int i;
+
+ if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
+ return;
+
+ head_sh = sh;
+
+ list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
+
+ list_del_init(&sh->batch_list);
+
+ set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
+ head_sh->state & ~((1 << STRIPE_ACTIVE) |
+ (1 << STRIPE_PREREAD_ACTIVE) |
+ (1 << STRIPE_DEGRADED) |
+ STRIPE_EXPAND_SYNC_FLAG));
+ sh->check_state = head_sh->check_state;
+ sh->reconstruct_state = head_sh->reconstruct_state;
+ for (i = 0; i < sh->disks; i++)
+ sh->dev[i].flags = head_sh->dev[i].flags &
+ (~((1 << R5_WriteError) | (1 << R5_Overlap)));
+
+ spin_lock_irq(&sh->stripe_lock);
+ sh->batch_head = NULL;
+ spin_unlock_irq(&sh->stripe_lock);
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+ }
+}
+
static void handle_stripe(struct stripe_head *sh)
{
struct stripe_head_state s;
@@ -3803,7 +4253,14 @@ static void handle_stripe(struct stripe_head *sh)
return;
}
- if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
+ if (clear_batch_ready(sh) ) {
+ clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
+ return;
+ }
+
+ check_break_stripe_batch_list(sh);
+
+ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
spin_lock(&sh->stripe_lock);
/* Cannot process 'sync' concurrently with 'discard' */
if (!test_bit(STRIPE_DISCARD, &sh->state) &&
@@ -4158,7 +4615,7 @@ static int raid5_congested(struct mddev *mddev, int bits)
* how busy the stripe_cache is
*/
- if (conf->inactive_blocked)
+ if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return 1;
if (conf->quiesce)
return 1;
@@ -4180,8 +4637,12 @@ static int raid5_mergeable_bvec(struct mddev *mddev,
unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9;
- if ((bvm->bi_rw & 1) == WRITE)
- return biovec->bv_len; /* always allow writes to be mergeable */
+ /*
+ * always allow writes to be mergeable, read as well if array
+ * is degraded as we'll go through stripe cache anyway.
+ */
+ if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
+ return biovec->bv_len;
if (mddev->new_chunk_sectors < mddev->chunk_sectors)
chunk_sectors = mddev->new_chunk_sectors;
@@ -4603,12 +5064,14 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
}
set_bit(STRIPE_DISCARD, &sh->state);
finish_wait(&conf->wait_for_overlap, &w);
+ sh->overwrite_disks = 0;
for (d = 0; d < conf->raid_disks; d++) {
if (d == sh->pd_idx || d == sh->qd_idx)
continue;
sh->dev[d].towrite = bi;
set_bit(R5_OVERWRITE, &sh->dev[d].flags);
raid5_inc_bi_active_stripes(bi);
+ sh->overwrite_disks++;
}
spin_unlock_irq(&sh->stripe_lock);
if (conf->mddev->bitmap) {
@@ -4656,7 +5119,12 @@ static void make_request(struct mddev *mddev, struct bio * bi)
md_write_start(mddev, bi);
- if (rw == READ &&
+ /*
+ * If array is degraded, better not do chunk aligned read because
+ * later we might have to read it again in order to reconstruct
+ * data on failed drives.
+ */
+ if (rw == READ && mddev->degraded == 0 &&
mddev->reshape_position == MaxSector &&
chunk_aligned_read(mddev,bi))
return;
@@ -4772,7 +5240,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_stripe_bio(sh, bi, dd_idx, rw)) {
+ !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
/* Stripe is busy expanding or
* add failed due to overlap. Flush everything
* and wait a while
@@ -4785,7 +5253,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
- if ((bi->bi_rw & REQ_SYNC) &&
+ if ((!sh->batch_head || sh == sh->batch_head) &&
+ (bi->bi_rw & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
release_stripe_plug(mddev, sh);
@@ -5050,8 +5519,7 @@ ret:
return reshape_sectors;
}
-/* FIXME go_faster isn't used */
-static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
{
struct r5conf *conf = mddev->private;
struct stripe_head *sh;
@@ -5186,7 +5654,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
- if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
+ if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
release_stripe(sh);
raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
@@ -5312,6 +5780,8 @@ static void raid5d(struct md_thread *thread)
int batch_size, released;
released = release_stripe_list(conf, conf->temp_inactive_list);
+ if (released)
+ clear_bit(R5_DID_ALLOC, &conf->cache_state);
if (
!list_empty(&conf->bitmap_list)) {
@@ -5350,6 +5820,13 @@ static void raid5d(struct md_thread *thread)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
+ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+ grow_one_stripe(conf, __GFP_NOWARN);
+ /* Set flag even if allocation failed. This helps
+ * slow down allocation requests when mem is short
+ */
+ set_bit(R5_DID_ALLOC, &conf->cache_state);
+ }
async_tx_issue_pending_all();
blk_finish_plug(&plug);
@@ -5365,7 +5842,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
spin_lock(&mddev->lock);
conf = mddev->private;
if (conf)
- ret = sprintf(page, "%d\n", conf->max_nr_stripes);
+ ret = sprintf(page, "%d\n", conf->min_nr_stripes);
spin_unlock(&mddev->lock);
return ret;
}
@@ -5375,30 +5852,24 @@ raid5_set_cache_size(struct mddev *mddev, int size)
{
struct r5conf *conf = mddev->private;
int err;
- int hash;
if (size <= 16 || size > 32768)
return -EINVAL;
- hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
- while (size < conf->max_nr_stripes) {
- if (drop_one_stripe(conf, hash))
- conf->max_nr_stripes--;
- else
- break;
- hash--;
- if (hash < 0)
- hash = NR_STRIPE_HASH_LOCKS - 1;
- }
+
+ conf->min_nr_stripes = size;
+ while (size < conf->max_nr_stripes &&
+ drop_one_stripe(conf))
+ ;
+
+
err = md_allow_write(mddev);
if (err)
return err;
- hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
- while (size > conf->max_nr_stripes) {
- if (grow_one_stripe(conf, hash))
- conf->max_nr_stripes++;
- else break;
- hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
- }
+
+ while (size > conf->max_nr_stripes)
+ if (!grow_one_stripe(conf, GFP_KERNEL))
+ break;
+
return 0;
}
EXPORT_SYMBOL(raid5_set_cache_size);
@@ -5433,6 +5904,49 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
raid5_store_stripe_cache_size);
static ssize_t
+raid5_show_rmw_level(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ if (conf)
+ return sprintf(page, "%d\n", conf->rmw_level);
+ else
+ return 0;
+}
+
+static ssize_t
+raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf = mddev->private;
+ unsigned long new;
+
+ if (!conf)
+ return -ENODEV;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+
+ if (kstrtoul(page, 10, &new))
+ return -EINVAL;
+
+ if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
+ return -EINVAL;
+
+ if (new != PARITY_DISABLE_RMW &&
+ new != PARITY_ENABLE_RMW &&
+ new != PARITY_PREFER_RMW)
+ return -EINVAL;
+
+ conf->rmw_level = new;
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
+ raid5_show_rmw_level,
+ raid5_store_rmw_level);
+
+
+static ssize_t
raid5_show_preread_threshold(struct mddev *mddev, char *page)
{
struct r5conf *conf;
@@ -5463,7 +5977,7 @@ raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
conf = mddev->private;
if (!conf)
err = -ENODEV;
- else if (new > conf->max_nr_stripes)
+ else if (new > conf->min_nr_stripes)
err = -EINVAL;
else
conf->bypass_threshold = new;
@@ -5618,6 +6132,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_preread_bypass_threshold.attr,
&raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
+ &raid5_rmw_level.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -5699,7 +6214,8 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (percpu->scribble)
+ flex_array_free(percpu->scribble);
percpu->spare_page = NULL;
percpu->scribble = NULL;
}
@@ -5709,7 +6225,12 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
if (conf->level == 6 && !percpu->spare_page)
percpu->spare_page = alloc_page(GFP_KERNEL);
if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+ percpu->scribble = scribble_alloc(max(conf->raid_disks,
+ conf->previous_raid_disks),
+ max(conf->chunk_sectors,
+ conf->prev_chunk_sectors)
+ / STRIPE_SECTORS,
+ GFP_KERNEL);
if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
free_scratch_buffer(conf, percpu);
@@ -5740,6 +6261,8 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ if (conf->shrinker.seeks)
+ unregister_shrinker(&conf->shrinker);
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
@@ -5807,6 +6330,30 @@ static int raid5_alloc_percpu(struct r5conf *conf)
return err;
}
+static unsigned long raid5_cache_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+ int ret = 0;
+ while (ret < sc->nr_to_scan) {
+ if (drop_one_stripe(conf) == 0)
+ return SHRINK_STOP;
+ ret++;
+ }
+ return ret;
+}
+
+static unsigned long raid5_cache_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+
+ if (conf->max_nr_stripes < conf->min_nr_stripes)
+ /* unlikely, but not impossible */
+ return 0;
+ return conf->max_nr_stripes - conf->min_nr_stripes;
+}
+
static struct r5conf *setup_conf(struct mddev *mddev)
{
struct r5conf *conf;
@@ -5879,7 +6426,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
else
conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
max_disks = max(conf->raid_disks, conf->previous_raid_disks);
- conf->scribble_len = scribble_len(max_disks);
conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
GFP_KERNEL);
@@ -5907,6 +6453,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(conf->temp_inactive_list + i);
conf->level = mddev->new_level;
+ conf->chunk_sectors = mddev->new_chunk_sectors;
if (raid5_alloc_percpu(conf) != 0)
goto abort;
@@ -5939,12 +6486,17 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->fullsync = 1;
}
- conf->chunk_sectors = mddev->new_chunk_sectors;
conf->level = mddev->new_level;
- if (conf->level == 6)
+ if (conf->level == 6) {
conf->max_degraded = 2;
- else
+ if (raid6_call.xor_syndrome)
+ conf->rmw_level = PARITY_ENABLE_RMW;
+ else
+ conf->rmw_level = PARITY_DISABLE_RMW;
+ } else {
conf->max_degraded = 1;
+ conf->rmw_level = PARITY_ENABLE_RMW;
+ }
conf->algorithm = mddev->new_layout;
conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) {
@@ -5952,10 +6504,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->prev_algo = mddev->layout;
}
- memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
+ conf->min_nr_stripes = NR_STRIPES;
+ memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
- if (grow_stripes(conf, NR_STRIPES)) {
+ if (grow_stripes(conf, conf->min_nr_stripes)) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
@@ -5963,6 +6516,17 @@ static struct r5conf *setup_conf(struct mddev *mddev)
} else
printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
mdname(mddev), memory);
+ /*
+ * Losing a stripe head costs more than the time to refill it,
+ * it reduces the queue depth and so can hurt throughput.
+ * So set it rather large, scaled by number of devices.
+ */
+ conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
+ conf->shrinker.scan_objects = raid5_cache_scan;
+ conf->shrinker.count_objects = raid5_cache_count;
+ conf->shrinker.batch = 128;
+ conf->shrinker.flags = 0;
+ register_shrinker(&conf->shrinker);
sprintf(pers_name, "raid%d", mddev->new_level);
conf->thread = md_register_thread(raid5d, mddev, pers_name);
@@ -6604,9 +7168,9 @@ static int check_stripe_cache(struct mddev *mddev)
*/
struct r5conf *conf = mddev->private;
if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
- > conf->max_nr_stripes ||
+ > conf->min_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
- > conf->max_nr_stripes) {
+ > conf->min_nr_stripes) {
printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
mdname(mddev),
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
@@ -6642,6 +7206,15 @@ static int check_reshape(struct mddev *mddev)
if (!check_stripe_cache(mddev))
return -ENOSPC;
+ if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
+ mddev->delta_disks > 0)
+ if (resize_chunks(conf,
+ conf->previous_raid_disks
+ + max(0, mddev->delta_disks),
+ max(mddev->new_chunk_sectors,
+ mddev->chunk_sectors)
+ ) < 0)
+ return -ENOMEM;
return resize_stripes(conf, (conf->previous_raid_disks
+ mddev->delta_disks));
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 983e18a83db1..7dc0dd86074b 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -210,11 +210,19 @@ struct stripe_head {
atomic_t count; /* nr of active thread/requests */
int bm_seq; /* sequence number for bitmap flushes */
int disks; /* disks in stripe */
+ int overwrite_disks; /* total overwrite disks in stripe,
+ * this is only checked when stripe
+ * has STRIPE_BATCH_READY
+ */
enum check_states check_state;
enum reconstruct_states reconstruct_state;
spinlock_t stripe_lock;
int cpu;
struct r5worker_group *group;
+
+ struct stripe_head *batch_head; /* protected by stripe lock */
+ spinlock_t batch_lock; /* only header's lock is useful */
+ struct list_head batch_list; /* protected by head's batch lock*/
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -327,8 +335,15 @@ enum {
STRIPE_ON_UNPLUG_LIST,
STRIPE_DISCARD,
STRIPE_ON_RELEASE_LIST,
+ STRIPE_BATCH_READY,
+ STRIPE_BATCH_ERR,
};
+#define STRIPE_EXPAND_SYNC_FLAG \
+ ((1 << STRIPE_EXPAND_SOURCE) |\
+ (1 << STRIPE_EXPAND_READY) |\
+ (1 << STRIPE_EXPANDING) |\
+ (1 << STRIPE_SYNC_REQUESTED))
/*
* Operation request flags
*/
@@ -340,6 +355,24 @@ enum {
STRIPE_OP_RECONSTRUCT,
STRIPE_OP_CHECK,
};
+
+/*
+ * RAID parity calculation preferences
+ */
+enum {
+ PARITY_DISABLE_RMW = 0,
+ PARITY_ENABLE_RMW,
+ PARITY_PREFER_RMW,
+};
+
+/*
+ * Pages requested from set_syndrome_sources()
+ */
+enum {
+ SYNDROME_SRC_ALL,
+ SYNDROME_SRC_WANT_DRAIN,
+ SYNDROME_SRC_WRITTEN,
+};
/*
* Plugging:
*
@@ -396,10 +429,11 @@ struct r5conf {
spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
struct mddev *mddev;
int chunk_sectors;
- int level, algorithm;
+ int level, algorithm, rmw_level;
int max_degraded;
int raid_disks;
int max_nr_stripes;
+ int min_nr_stripes;
/* reshape_progress is the leading edge of a 'reshape'
* It has value MaxSector when no reshape is happening
@@ -458,15 +492,11 @@ struct r5conf {
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
- void *scribble; /* space for constructing buffer
+ struct flex_array *scribble; /* space for constructing buffer
* lists and performing address
* conversions
*/
} __percpu *percpu;
- size_t scribble_len; /* size of scribble region must be
- * associated with conf to handle
- * cpu hotplug while reshaping
- */
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
@@ -480,9 +510,19 @@ struct r5conf {
struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
- int inactive_blocked; /* release of inactive stripes blocked,
- * waiting for 25% to be free
- */
+ unsigned long cache_state;
+#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
+ * waiting for 25% to be free
+ */
+#define R5_ALLOC_MORE 2 /* It might help to allocate another
+ * stripe.
+ */
+#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
+ * more until at least one has been
+ * released. This avoids flooding
+ * the cache.
+ */
+ struct shrinker shrinker;
int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock;
struct disk_info *disks;
@@ -497,6 +537,7 @@ struct r5conf {
int worker_cnt_per_group;
};
+
/*
* Our supported algorithms
*/
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 49cd30870e0d..3ef0f90b128f 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -87,13 +87,21 @@ config MEDIA_RC_SUPPORT
config MEDIA_CONTROLLER
bool "Media Controller API"
- depends on MEDIA_CAMERA_SUPPORT
+ depends on MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT
---help---
Enable the media controller API used to query media devices internal
topology and configure it dynamically.
This API is mostly used by camera interfaces in embedded platforms.
+config MEDIA_CONTROLLER_DVB
+ bool "Enable Media controller for DVB"
+ depends on MEDIA_CONTROLLER
+ ---help---
+ Enable the media controller API support for DVB.
+
+ This is currently experimental.
+
#
# Video4Linux support
# Only enables if one of the V4L2 types (ATV, webcam, radio) is selected
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index b7d63933dae6..df1e8c975cd8 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -587,26 +587,20 @@ int saa7146_vv_release(struct saa7146_dev* dev)
}
EXPORT_SYMBOL_GPL(saa7146_vv_release);
-int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
+int saa7146_register_device(struct video_device *vfd, struct saa7146_dev *dev,
char *name, int type)
{
- struct video_device *vfd;
int err;
int i;
DEB_EE("dev:%p, name:'%s', type:%d\n", dev, name, type);
- // released by vfd->release
- vfd = video_device_alloc();
- if (vfd == NULL)
- return -ENOMEM;
-
vfd->fops = &video_fops;
if (type == VFL_TYPE_GRABBER)
vfd->ioctl_ops = &dev->ext_vv_data->vid_ops;
else
vfd->ioctl_ops = &dev->ext_vv_data->vbi_ops;
- vfd->release = video_device_release;
+ vfd->release = video_device_release_empty;
vfd->lock = &dev->v4l2_lock;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->tvnorms = 0;
@@ -618,25 +612,20 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
err = video_register_device(vfd, type, -1);
if (err < 0) {
ERR("cannot register v4l2 device. skipping.\n");
- video_device_release(vfd);
return err;
}
pr_info("%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(vfd));
-
- *vid = vfd;
return 0;
}
EXPORT_SYMBOL_GPL(saa7146_register_device);
-int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev)
+int saa7146_unregister_device(struct video_device *vfd, struct saa7146_dev *dev)
{
DEB_EE("dev:%p\n", dev);
- video_unregister_device(*vid);
- *vid = NULL;
-
+ video_unregister_device(vfd);
return 0;
}
EXPORT_SYMBOL_GPL(saa7146_unregister_device);
diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c
index 1e71e374bbfe..2da995758778 100644
--- a/drivers/media/common/saa7146/saa7146_vbi.c
+++ b/drivers/media/common/saa7146/saa7146_vbi.c
@@ -95,7 +95,7 @@ static int vbi_workaround(struct saa7146_dev *dev)
/* prepare to wait to be woken up by the irq-handler */
add_wait_queue(&vv->vbi_wq, &wait);
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
/* start rps1 to enable workaround */
saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
@@ -106,7 +106,7 @@ static int vbi_workaround(struct saa7146_dev *dev)
DEB_VBI("brs bug workaround %d/1\n", i);
remove_wait_queue(&vv->vbi_wq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
/* disable rps1 irqs */
SAA7146_IER_DISABLE(dev,MASK_28);
diff --git a/drivers/media/common/siano/sms-cards.c b/drivers/media/common/siano/sms-cards.c
index 82c7a1289f05..ca2f80c7740c 100644
--- a/drivers/media/common/siano/sms-cards.c
+++ b/drivers/media/common/siano/sms-cards.c
@@ -21,10 +21,6 @@
#include "smsir.h"
#include <linux/module.h>
-static int sms_dbg;
-module_param_named(cards_dbg, sms_dbg, int, 0644);
-MODULE_PARM_DESC(cards_dbg, "set debug level (info=1, adv=2 (or-able))");
-
static struct sms_board sms_boards[] = {
[SMS_BOARD_UNKNOWN] = {
.name = "Unknown board",
@@ -232,7 +228,7 @@ int sms_board_event(struct smscore_device_t *coredev,
break; /* BOARD_EVENT_MULTIPLEX_ERRORS */
default:
- sms_err("Unknown SMS board event");
+ pr_err("Unknown SMS board event\n");
break;
}
return 0;
@@ -342,7 +338,7 @@ int sms_board_lna_control(struct smscore_device_t *coredev, int onoff)
int board_id = smscore_get_board_id(coredev);
struct sms_board *board = sms_get_board(board_id);
- sms_debug("%s: LNA %s", __func__, onoff ? "enabled" : "disabled");
+ pr_debug("%s: LNA %s\n", __func__, onoff ? "enabled" : "disabled");
switch (board_id) {
case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
diff --git a/drivers/media/common/siano/sms-cards.h b/drivers/media/common/siano/sms-cards.h
index 4c4caddf9869..bb3d733f092b 100644
--- a/drivers/media/common/siano/sms-cards.h
+++ b/drivers/media/common/siano/sms-cards.h
@@ -20,8 +20,9 @@
#ifndef __SMS_CARDS_H__
#define __SMS_CARDS_H__
-#include <linux/usb.h>
#include "smscoreapi.h"
+
+#include <linux/usb.h>
#include "smsir.h"
#define SMS_BOARD_UNKNOWN 0
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index a3677438205e..2a8d9a36d6f0 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -21,6 +21,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "smscoreapi.h"
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -34,14 +36,9 @@
#include <linux/wait.h>
#include <asm/byteorder.h>
-#include "smscoreapi.h"
#include "sms-cards.h"
#include "smsir.h"
-static int sms_dbg;
-module_param_named(debug, sms_dbg, int, 0644);
-MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))");
-
struct smscore_device_notifyee_t {
struct list_head entry;
hotplug_t hotplug;
@@ -460,7 +457,7 @@ static struct smscore_registry_entry_t *smscore_find_registry(char *devpath)
strcpy(entry->devpath, devpath);
list_add(&entry->entry, &g_smscore_registry);
} else
- sms_err("failed to create smscore_registry.");
+ pr_err("failed to create smscore_registry.\n");
kmutex_unlock(&g_smscore_registrylock);
return entry;
}
@@ -473,7 +470,7 @@ int smscore_registry_getmode(char *devpath)
if (entry)
return entry->mode;
else
- sms_err("No registry found.");
+ pr_err("No registry found.\n");
return default_mode;
}
@@ -487,7 +484,7 @@ static enum sms_device_type_st smscore_registry_gettype(char *devpath)
if (entry)
return entry->type;
else
- sms_err("No registry found.");
+ pr_err("No registry found.\n");
return -EINVAL;
}
@@ -500,7 +497,7 @@ static void smscore_registry_setmode(char *devpath, int mode)
if (entry)
entry->mode = mode;
else
- sms_err("No registry found.");
+ pr_err("No registry found.\n");
}
static void smscore_registry_settype(char *devpath,
@@ -512,7 +509,7 @@ static void smscore_registry_settype(char *devpath,
if (entry)
entry->type = type;
else
- sms_err("No registry found.");
+ pr_err("No registry found.\n");
}
@@ -635,10 +632,8 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
struct smscore_buffer_t *cb;
cb = kzalloc(sizeof(struct smscore_buffer_t), GFP_KERNEL);
- if (!cb) {
- sms_info("kzalloc(...) failed");
+ if (!cb)
return NULL;
- }
cb->p = buffer;
cb->offset_in_common = buffer - (u8 *) common_buffer;
@@ -658,16 +653,19 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
* @return 0 on success, <0 on error.
*/
int smscore_register_device(struct smsdevice_params_t *params,
- struct smscore_device_t **coredev)
+ struct smscore_device_t **coredev,
+ void *mdev)
{
struct smscore_device_t *dev;
u8 *buffer;
dev = kzalloc(sizeof(struct smscore_device_t), GFP_KERNEL);
- if (!dev) {
- sms_info("kzalloc(...) failed");
+ if (!dev)
return -ENOMEM;
- }
+
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ dev->media_dev = mdev;
+#endif
/* init list entry so it could be safe in smscore_unregister_device */
INIT_LIST_HEAD(&dev->entry);
@@ -722,7 +720,7 @@ int smscore_register_device(struct smsdevice_params_t *params,
smscore_putbuffer(dev, cb);
}
- sms_info("allocated %d buffers", dev->num_buffers);
+ pr_debug("allocated %d buffers\n", dev->num_buffers);
dev->mode = DEVICE_MODE_NONE;
dev->board_id = SMS_BOARD_UNKNOWN;
@@ -746,7 +744,7 @@ int smscore_register_device(struct smsdevice_params_t *params,
*coredev = dev;
- sms_info("device %p created", dev);
+ pr_debug("device %p created\n", dev);
return 0;
}
@@ -763,7 +761,7 @@ static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
rc = coredev->sendrequest_handler(coredev->context, buffer, size);
if (rc < 0) {
- sms_info("sendrequest returned error %d", rc);
+ pr_info("sendrequest returned error %d\n", rc);
return rc;
}
@@ -786,11 +784,11 @@ static int smscore_init_ir(struct smscore_device_t *coredev)
coredev->ir.dev = NULL;
ir_io = sms_get_board(smscore_get_board_id(coredev))->board_cfg.ir;
if (ir_io) {/* only if IR port exist we use IR sub-module */
- sms_info("IR loading");
+ pr_debug("IR loading\n");
rc = sms_ir_init(coredev);
if (rc != 0)
- sms_err("Error initialization DTV IR sub-module");
+ pr_err("Error initialization DTV IR sub-module\n");
else {
buffer = kmalloc(sizeof(struct sms_msg_data2) +
SMS_DMA_ALIGNMENT,
@@ -812,11 +810,10 @@ static int smscore_init_ir(struct smscore_device_t *coredev)
kfree(buffer);
} else
- sms_err
- ("Sending IR initialization message failed");
+ pr_err("Sending IR initialization message failed\n");
}
} else
- sms_info("IR port has not been detected");
+ pr_info("IR port has not been detected\n");
return 0;
}
@@ -835,13 +832,13 @@ static int smscore_configure_board(struct smscore_device_t *coredev)
board = sms_get_board(coredev->board_id);
if (!board) {
- sms_err("no board configuration exist.");
+ pr_err("no board configuration exist.\n");
return -EINVAL;
}
if (board->mtu) {
struct sms_msg_data mtu_msg;
- sms_debug("set max transmit unit %d", board->mtu);
+ pr_debug("set max transmit unit %d\n", board->mtu);
mtu_msg.x_msg_header.msg_src_id = 0;
mtu_msg.x_msg_header.msg_dst_id = HIF_TASK;
@@ -856,7 +853,7 @@ static int smscore_configure_board(struct smscore_device_t *coredev)
if (board->crystal) {
struct sms_msg_data crys_msg;
- sms_debug("set crystal value %d", board->crystal);
+ pr_debug("set crystal value %d\n", board->crystal);
SMS_INIT_MSG(&crys_msg.x_msg_header,
MSG_SMS_NEW_CRYSTAL_REQ,
@@ -890,12 +887,12 @@ int smscore_start_device(struct smscore_device_t *coredev)
rc = smscore_set_device_mode(coredev, mode);
if (rc < 0) {
- sms_info("set device mode faile , rc %d", rc);
+ pr_info("set device mode failed , rc %d\n", rc);
return rc;
}
rc = smscore_configure_board(coredev);
if (rc < 0) {
- sms_info("configure board failed , rc %d", rc);
+ pr_info("configure board failed , rc %d\n", rc);
return rc;
}
@@ -904,7 +901,7 @@ int smscore_start_device(struct smscore_device_t *coredev)
rc = smscore_notify_callbacks(coredev, coredev->device, 1);
smscore_init_ir(coredev);
- sms_info("device %p started, rc %d", coredev, rc);
+ pr_debug("device %p started, rc %d\n", coredev, rc);
kmutex_unlock(&g_smscore_deviceslock);
@@ -927,7 +924,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
mem_address = firmware->start_address;
- sms_info("loading FW to addr 0x%x size %d",
+ pr_debug("loading FW to addr 0x%x size %d\n",
mem_address, firmware->length);
if (coredev->preload_handler) {
rc = coredev->preload_handler(coredev->context);
@@ -941,14 +938,14 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
return -ENOMEM;
if (coredev->mode != DEVICE_MODE_NONE) {
- sms_debug("sending reload command.");
+ pr_debug("sending reload command.\n");
SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_START_REQ,
sizeof(struct sms_msg_hdr));
rc = smscore_sendrequest_and_wait(coredev, msg,
msg->x_msg_header.msg_length,
&coredev->reload_start_done);
if (rc < 0) {
- sms_err("device reload failed, rc %d", rc);
+ pr_err("device reload failed, rc %d\n", rc);
goto exit_fw_download;
}
mem_address = *(u32 *) &payload[20];
@@ -982,7 +979,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
if (rc < 0)
goto exit_fw_download;
- sms_debug("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x",
+ pr_debug("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x\n",
calc_checksum);
SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_DATA_VALIDITY_REQ,
sizeof(msg->x_msg_header) +
@@ -1001,7 +998,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
struct sms_msg_data *trigger_msg =
(struct sms_msg_data *) msg;
- sms_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ");
+ pr_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ\n");
SMS_INIT_MSG(&msg->x_msg_header,
MSG_SMS_SWDOWNLOAD_TRIGGER_REQ,
sizeof(struct sms_msg_hdr) +
@@ -1037,12 +1034,13 @@ exit_fw_download:
kfree(msg);
if (coredev->postload_handler) {
- sms_debug("rc=%d, postload=0x%p", rc, coredev->postload_handler);
+ pr_debug("rc=%d, postload=0x%p\n",
+ rc, coredev->postload_handler);
if (rc >= 0)
return coredev->postload_handler(coredev->context);
}
- sms_debug("rc=%d", rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
@@ -1121,11 +1119,11 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
if (mode <= DEVICE_MODE_NONE || mode >= DEVICE_MODE_MAX)
return NULL;
- sms_debug("trying to get fw name from sms_boards board_id %d mode %d",
+ pr_debug("trying to get fw name from sms_boards board_id %d mode %d\n",
board_id, mode);
fw = sms_get_board(board_id)->fw;
if (!fw || !fw[mode]) {
- sms_debug("cannot find fw name in sms_boards, getting from lookup table mode %d type %d",
+ pr_debug("cannot find fw name in sms_boards, getting from lookup table mode %d type %d\n",
mode, type);
return smscore_fw_lkup[type][mode];
}
@@ -1154,10 +1152,10 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
char *fw_filename = smscore_get_fw_filename(coredev, mode);
if (!fw_filename) {
- sms_err("mode %d not supported on this device", mode);
+ pr_err("mode %d not supported on this device\n", mode);
return -ENOENT;
}
- sms_debug("Firmware name: %s", fw_filename);
+ pr_debug("Firmware name: %s\n", fw_filename);
if (loadfirmware_handler == NULL && !(coredev->device_flags
& SMS_DEVICE_FAMILY2))
@@ -1165,14 +1163,14 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
rc = request_firmware(&fw, fw_filename, coredev->device);
if (rc < 0) {
- sms_err("failed to open firmware file \"%s\"", fw_filename);
+ pr_err("failed to open firmware file '%s'\n", fw_filename);
return rc;
}
- sms_info("read fw %s, buffer size=0x%zx", fw_filename, fw->size);
+ pr_debug("read fw %s, buffer size=0x%zx\n", fw_filename, fw->size);
fw_buf = kmalloc(ALIGN(fw->size, SMS_ALLOC_ALIGNMENT),
GFP_KERNEL | GFP_DMA);
if (!fw_buf) {
- sms_err("failed to allocate firmware buffer");
+ pr_err("failed to allocate firmware buffer\n");
rc = -ENOMEM;
} else {
memcpy(fw_buf, fw->data, fw->size);
@@ -1226,18 +1224,18 @@ void smscore_unregister_device(struct smscore_device_t *coredev)
if (num_buffers == coredev->num_buffers)
break;
if (++retry > 10) {
- sms_info("exiting although not all buffers released.");
+ pr_info("exiting although not all buffers released.\n");
break;
}
- sms_info("waiting for %d buffer(s)",
+ pr_debug("waiting for %d buffer(s)\n",
coredev->num_buffers - num_buffers);
kmutex_unlock(&g_smscore_deviceslock);
msleep(100);
kmutex_lock(&g_smscore_deviceslock);
}
- sms_info("freed %d buffers", num_buffers);
+ pr_debug("freed %d buffers\n", num_buffers);
if (coredev->common_buffer)
dma_free_coherent(NULL, coredev->common_buffer_size,
@@ -1250,7 +1248,7 @@ void smscore_unregister_device(struct smscore_device_t *coredev)
kmutex_unlock(&g_smscore_deviceslock);
- sms_info("device %p destroyed", coredev);
+ pr_debug("device %p destroyed\n", coredev);
}
EXPORT_SYMBOL_GPL(smscore_unregister_device);
@@ -1271,7 +1269,7 @@ static int smscore_detect_mode(struct smscore_device_t *coredev)
rc = smscore_sendrequest_and_wait(coredev, msg, msg->msg_length,
&coredev->version_ex_done);
if (rc == -ETIME) {
- sms_err("MSG_SMS_GET_VERSION_EX_REQ failed first try");
+ pr_err("MSG_SMS_GET_VERSION_EX_REQ failed first try\n");
if (wait_for_completion_timeout(&coredev->resume_done,
msecs_to_jiffies(5000))) {
@@ -1279,7 +1277,7 @@ static int smscore_detect_mode(struct smscore_device_t *coredev)
coredev, msg, msg->msg_length,
&coredev->version_ex_done);
if (rc < 0)
- sms_err("MSG_SMS_GET_VERSION_EX_REQ failed second try, rc %d",
+ pr_err("MSG_SMS_GET_VERSION_EX_REQ failed second try, rc %d\n",
rc);
} else
rc = -ETIME;
@@ -1308,7 +1306,7 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
buffer = kmalloc(sizeof(struct sms_msg_data) +
SMS_DMA_ALIGNMENT, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- sms_err("Could not allocate buffer for init device message.");
+ pr_err("Could not allocate buffer for init device message.\n");
return -ENOMEM;
}
@@ -1339,10 +1337,10 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
{
int rc = 0;
- sms_debug("set device mode to %d", mode);
+ pr_debug("set device mode to %d\n", mode);
if (coredev->device_flags & SMS_DEVICE_FAMILY2) {
if (mode <= DEVICE_MODE_NONE || mode >= DEVICE_MODE_MAX) {
- sms_err("invalid mode specified %d", mode);
+ pr_err("invalid mode specified %d\n", mode);
return -EINVAL;
}
@@ -1351,13 +1349,13 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
if (!(coredev->device_flags & SMS_DEVICE_NOT_READY)) {
rc = smscore_detect_mode(coredev);
if (rc < 0) {
- sms_err("mode detect failed %d", rc);
+ pr_err("mode detect failed %d\n", rc);
return rc;
}
}
if (coredev->mode == mode) {
- sms_info("device mode %d already set", mode);
+ pr_debug("device mode %d already set\n", mode);
return 0;
}
@@ -1365,19 +1363,19 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
rc = smscore_load_firmware_from_file(coredev,
mode, NULL);
if (rc >= 0)
- sms_info("firmware download success");
+ pr_debug("firmware download success\n");
} else {
- sms_info("mode %d is already supported by running firmware",
+ pr_debug("mode %d is already supported by running firmware\n",
mode);
}
if (coredev->fw_version >= 0x800) {
rc = smscore_init_device(coredev, mode);
if (rc < 0)
- sms_err("device init failed, rc %d.", rc);
+ pr_err("device init failed, rc %d.\n", rc);
}
} else {
if (mode <= DEVICE_MODE_NONE || mode >= DEVICE_MODE_MAX) {
- sms_err("invalid mode specified %d", mode);
+ pr_err("invalid mode specified %d\n", mode);
return -EINVAL;
}
@@ -1414,9 +1412,9 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
}
if (rc < 0)
- sms_err("return error code %d.", rc);
+ pr_err("return error code %d.\n", rc);
else
- sms_debug("Success setting device mode.");
+ pr_debug("Success setting device mode.\n");
return rc;
}
@@ -1495,7 +1493,7 @@ void smscore_onresponse(struct smscore_device_t *coredev,
last_sample_time = time_now;
if (time_now - last_sample_time > 10000) {
- sms_debug("data rate %d bytes/secs",
+ pr_debug("data rate %d bytes/secs\n",
(int)((data_total * 1000) /
(time_now - last_sample_time)));
@@ -1539,7 +1537,7 @@ void smscore_onresponse(struct smscore_device_t *coredev,
{
struct sms_version_res *ver =
(struct sms_version_res *) phdr;
- sms_debug("Firmware id %d prots 0x%x ver %d.%d",
+ pr_debug("Firmware id %d prots 0x%x ver %d.%d\n",
ver->firmware_id, ver->supported_protocols,
ver->rom_ver_major, ver->rom_ver_minor);
@@ -1562,7 +1560,7 @@ void smscore_onresponse(struct smscore_device_t *coredev,
{
struct sms_msg_data *validity = (struct sms_msg_data *) phdr;
- sms_debug("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x",
+ pr_debug("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x\n",
validity->msg_data[0]);
complete(&coredev->data_validity_done);
break;
@@ -1588,7 +1586,7 @@ void smscore_onresponse(struct smscore_device_t *coredev,
{
u32 *msgdata = (u32 *) phdr;
coredev->gpio_get_res = msgdata[1];
- sms_debug("gpio level %d",
+ pr_debug("gpio level %d\n",
coredev->gpio_get_res);
complete(&coredev->gpio_get_level_done);
break;
@@ -1615,7 +1613,7 @@ void smscore_onresponse(struct smscore_device_t *coredev,
break;
default:
- sms_debug("message %s(%d) not handled.",
+ pr_debug("message %s(%d) not handled.\n",
smscore_translate_msg(phdr->msg_type),
phdr->msg_type);
break;
@@ -1681,7 +1679,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
struct smscore_client_t *registered_client;
if (!client) {
- sms_err("bad parameter.");
+ pr_err("bad parameter.\n");
return -EINVAL;
}
registered_client = smscore_find_client(coredev, data_type, id);
@@ -1689,12 +1687,12 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
return 0;
if (registered_client) {
- sms_err("The msg ID already registered to another client.");
+ pr_err("The msg ID already registered to another client.\n");
return -EEXIST;
}
listentry = kzalloc(sizeof(struct smscore_idlist_t), GFP_KERNEL);
if (!listentry) {
- sms_err("Can't allocate memory for client id.");
+ pr_err("Can't allocate memory for client id.\n");
return -ENOMEM;
}
listentry->id = id;
@@ -1726,13 +1724,13 @@ int smscore_register_client(struct smscore_device_t *coredev,
/* check that no other channel with same parameters exists */
if (smscore_find_client(coredev, params->data_type,
params->initial_id)) {
- sms_err("Client already exist.");
+ pr_err("Client already exist.\n");
return -EEXIST;
}
newclient = kzalloc(sizeof(struct smscore_client_t), GFP_KERNEL);
if (!newclient) {
- sms_err("Failed to allocate memory for client.");
+ pr_err("Failed to allocate memory for client.\n");
return -ENOMEM;
}
@@ -1746,7 +1744,7 @@ int smscore_register_client(struct smscore_device_t *coredev,
smscore_validate_client(coredev, newclient, params->data_type,
params->initial_id);
*client = newclient;
- sms_debug("%p %d %d", params->context, params->data_type,
+ pr_debug("%p %d %d\n", params->context, params->data_type,
params->initial_id);
return 0;
@@ -1775,7 +1773,7 @@ void smscore_unregister_client(struct smscore_client_t *client)
kfree(identry);
}
- sms_info("%p", client->context);
+ pr_debug("%p\n", client->context);
list_del(&client->entry);
kfree(client);
@@ -1803,7 +1801,7 @@ int smsclient_sendrequest(struct smscore_client_t *client,
int rc;
if (client == NULL) {
- sms_err("Got NULL client");
+ pr_err("Got NULL client\n");
return -EINVAL;
}
@@ -1811,7 +1809,7 @@ int smsclient_sendrequest(struct smscore_client_t *client,
/* check that no other channel with same id exists */
if (coredev == NULL) {
- sms_err("Got NULL coredev");
+ pr_err("Got NULL coredev\n");
return -EINVAL;
}
@@ -2016,9 +2014,9 @@ int smscore_gpio_configure(struct smscore_device_t *coredev, u8 pin_num,
if (rc != 0) {
if (rc == -ETIME)
- sms_err("smscore_gpio_configure timeout");
+ pr_err("smscore_gpio_configure timeout\n");
else
- sms_err("smscore_gpio_configure error");
+ pr_err("smscore_gpio_configure error\n");
}
free:
kfree(buffer);
@@ -2065,9 +2063,9 @@ int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 pin_num,
if (rc != 0) {
if (rc == -ETIME)
- sms_err("smscore_gpio_set_level timeout");
+ pr_err("smscore_gpio_set_level timeout\n");
else
- sms_err("smscore_gpio_set_level error");
+ pr_err("smscore_gpio_set_level error\n");
}
kfree(buffer);
@@ -2113,9 +2111,9 @@ int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 pin_num,
if (rc != 0) {
if (rc == -ETIME)
- sms_err("smscore_gpio_get_level timeout");
+ pr_err("smscore_gpio_get_level timeout\n");
else
- sms_err("smscore_gpio_get_level error");
+ pr_err("smscore_gpio_get_level error\n");
}
kfree(buffer);
@@ -2163,7 +2161,7 @@ static void __exit smscore_module_exit(void)
}
kmutex_unlock(&g_smscore_registrylock);
- sms_debug("");
+ pr_debug("\n");
}
module_init(smscore_module_init);
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index 9c9063cd3208..eb8bd689b936 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -22,6 +22,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#ifndef __SMS_CORE_API_H__
#define __SMS_CORE_API_H__
+#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+
#include <linux/device.h>
#include <linux/list.h>
#include <linux/mm.h>
@@ -31,6 +33,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include <linux/wait.h>
#include <linux/timer.h>
+#include <media/media-device.h>
+
#include <asm/page.h>
#include "smsir.h"
@@ -215,6 +219,10 @@ struct smscore_device_t {
bool is_usb_device;
int led_state;
+
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ struct media_device *media_dev;
+#endif
};
/* GPIO definitions for antenna frequency domain control (SMS8021) */
@@ -1115,7 +1123,8 @@ extern int smscore_register_hotplug(hotplug_t hotplug);
extern void smscore_unregister_hotplug(hotplug_t hotplug);
extern int smscore_register_device(struct smsdevice_params_t *params,
- struct smscore_device_t **coredev);
+ struct smscore_device_t **coredev,
+ void *mdev);
extern void smscore_unregister_device(struct smscore_device_t *coredev);
extern int smscore_start_device(struct smscore_device_t *coredev);
@@ -1168,25 +1177,4 @@ int smscore_led_state(struct smscore_device_t *core, int led);
/* ------------------------------------------------------------------------ */
-#define DBG_INFO 1
-#define DBG_ADV 2
-
-#define sms_printk(kern, fmt, arg...) \
- printk(kern "%s: " fmt "\n", __func__, ##arg)
-
-#define dprintk(kern, lvl, fmt, arg...) do {\
- if (sms_dbg & lvl) \
- sms_printk(kern, fmt, ##arg); \
-} while (0)
-
-#define sms_log(fmt, arg...) sms_printk(KERN_INFO, fmt, ##arg)
-#define sms_err(fmt, arg...) \
- sms_printk(KERN_ERR, "line: %d: " fmt, __LINE__, ##arg)
-#define sms_warn(fmt, arg...) sms_printk(KERN_WARNING, fmt, ##arg)
-#define sms_info(fmt, arg...) \
- dprintk(KERN_INFO, DBG_INFO, fmt, ##arg)
-#define sms_debug(fmt, arg...) \
- dprintk(KERN_DEBUG, DBG_ADV, fmt, ##arg)
-
-
#endif /* __SMS_CORE_API_H__ */
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index 2408d7e9451e..1a8677ade391 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -17,7 +17,7 @@
*
***********************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include "smscoreapi.h"
#include <linux/module.h>
#include <linux/slab.h>
@@ -31,8 +31,6 @@
#include "dvb_demux.h"
#include "dvb_frontend.h"
-#include "smscoreapi.h"
-
#include "smsdvb.h"
static struct dentry *smsdvb_debugfs_usb_root;
@@ -536,7 +534,7 @@ int smsdvb_debugfs_register(void)
*/
d = debugfs_create_dir("smsdvb", usb_debug_root);
if (IS_ERR_OR_NULL(d)) {
- sms_err("Couldn't create sysfs node for smsdvb");
+ pr_err("Couldn't create sysfs node for smsdvb\n");
return PTR_ERR(d);
} else {
smsdvb_debugfs_usb_root = d;
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 85151efdd94c..367b8e77feb8 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -19,6 +19,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
****************************************************************/
+#include "smscoreapi.h"
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -29,7 +31,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "dvb_demux.h"
#include "dvb_frontend.h"
-#include "smscoreapi.h"
#include "sms-cards.h"
#include "smsdvb.h"
@@ -39,11 +40,6 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static struct list_head g_smsdvb_clients;
static struct mutex g_smsdvb_clientslock;
-static int sms_dbg;
-module_param_named(debug, sms_dbg, int, 0644);
-MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))");
-
-
static u32 sms_to_guard_interval_table[] = {
[0] = GUARD_INTERVAL_1_32,
[1] = GUARD_INTERVAL_1_16,
@@ -82,48 +78,48 @@ static void sms_board_dvb3_event(struct smsdvb_client_t *client,
struct smscore_device_t *coredev = client->coredev;
switch (event) {
case DVB3_EVENT_INIT:
- sms_debug("DVB3_EVENT_INIT");
+ pr_debug("DVB3_EVENT_INIT\n");
sms_board_event(coredev, BOARD_EVENT_BIND);
break;
case DVB3_EVENT_SLEEP:
- sms_debug("DVB3_EVENT_SLEEP");
+ pr_debug("DVB3_EVENT_SLEEP\n");
sms_board_event(coredev, BOARD_EVENT_POWER_SUSPEND);
break;
case DVB3_EVENT_HOTPLUG:
- sms_debug("DVB3_EVENT_HOTPLUG");
+ pr_debug("DVB3_EVENT_HOTPLUG\n");
sms_board_event(coredev, BOARD_EVENT_POWER_INIT);
break;
case DVB3_EVENT_FE_LOCK:
if (client->event_fe_state != DVB3_EVENT_FE_LOCK) {
client->event_fe_state = DVB3_EVENT_FE_LOCK;
- sms_debug("DVB3_EVENT_FE_LOCK");
+ pr_debug("DVB3_EVENT_FE_LOCK\n");
sms_board_event(coredev, BOARD_EVENT_FE_LOCK);
}
break;
case DVB3_EVENT_FE_UNLOCK:
if (client->event_fe_state != DVB3_EVENT_FE_UNLOCK) {
client->event_fe_state = DVB3_EVENT_FE_UNLOCK;
- sms_debug("DVB3_EVENT_FE_UNLOCK");
+ pr_debug("DVB3_EVENT_FE_UNLOCK\n");
sms_board_event(coredev, BOARD_EVENT_FE_UNLOCK);
}
break;
case DVB3_EVENT_UNC_OK:
if (client->event_unc_state != DVB3_EVENT_UNC_OK) {
client->event_unc_state = DVB3_EVENT_UNC_OK;
- sms_debug("DVB3_EVENT_UNC_OK");
+ pr_debug("DVB3_EVENT_UNC_OK\n");
sms_board_event(coredev, BOARD_EVENT_MULTIPLEX_OK);
}
break;
case DVB3_EVENT_UNC_ERR:
if (client->event_unc_state != DVB3_EVENT_UNC_ERR) {
client->event_unc_state = DVB3_EVENT_UNC_ERR;
- sms_debug("DVB3_EVENT_UNC_ERR");
+ pr_debug("DVB3_EVENT_UNC_ERR\n");
sms_board_event(coredev, BOARD_EVENT_MULTIPLEX_ERRORS);
}
break;
default:
- sms_err("Unknown dvb3 api event");
+ pr_err("Unknown dvb3 api event\n");
break;
}
}
@@ -590,7 +586,7 @@ static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
is_status_update = true;
break;
default:
- sms_info("message not handled");
+ pr_debug("message not handled\n");
}
smscore_putbuffer(client->coredev, cb);
@@ -613,6 +609,19 @@ static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
return 0;
}
+static void smsdvb_media_device_unregister(struct smsdvb_client_t *client)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ struct smscore_device_t *coredev = client->coredev;
+
+ if (!coredev->media_dev)
+ return;
+ media_device_unregister(coredev->media_dev);
+ kfree(coredev->media_dev);
+ coredev->media_dev = NULL;
+#endif
+}
+
static void smsdvb_unregister_client(struct smsdvb_client_t *client)
{
/* must be called under clientslock */
@@ -624,6 +633,7 @@ static void smsdvb_unregister_client(struct smsdvb_client_t *client)
dvb_unregister_frontend(&client->frontend);
dvb_dmxdev_release(&client->dmxdev);
dvb_dmx_release(&client->demux);
+ smsdvb_media_device_unregister(client);
dvb_unregister_adapter(&client->adapter);
kfree(client);
}
@@ -643,7 +653,7 @@ static int smsdvb_start_feed(struct dvb_demux_feed *feed)
container_of(feed->demux, struct smsdvb_client_t, demux);
struct sms_msg_data pid_msg;
- sms_debug("add pid %d(%x)",
+ pr_debug("add pid %d(%x)\n",
feed->pid, feed->pid);
client->feed_users++;
@@ -665,7 +675,7 @@ static int smsdvb_stop_feed(struct dvb_demux_feed *feed)
container_of(feed->demux, struct smsdvb_client_t, demux);
struct sms_msg_data pid_msg;
- sms_debug("remove pid %d(%x)",
+ pr_debug("remove pid %d(%x)\n",
feed->pid, feed->pid);
client->feed_users--;
@@ -835,7 +845,7 @@ static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static int smsdvb_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *tune)
{
- sms_debug("");
+ pr_debug("\n");
tune->min_delay_ms = 400;
tune->step_size = 250000;
@@ -869,7 +879,7 @@ static int smsdvb_dvbt_set_frontend(struct dvb_frontend *fe)
msg.Data[0] = c->frequency;
msg.Data[2] = 12000000;
- sms_info("%s: freq %d band %d", __func__, c->frequency,
+ pr_debug("%s: freq %d band %d\n", __func__, c->frequency,
c->bandwidth_hz);
switch (c->bandwidth_hz / 1000000) {
@@ -954,7 +964,7 @@ static int smsdvb_isdbt_set_frontend(struct dvb_frontend *fe)
c->bandwidth_hz = 6000000;
- sms_info("%s: freq %d segwidth %d segindex %d", __func__,
+ pr_debug("freq %d segwidth %d segindex %d\n",
c->frequency, c->isdbt_sb_segment_count,
c->isdbt_sb_segment_idx);
@@ -1082,10 +1092,8 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
if (!arrival)
return 0;
client = kzalloc(sizeof(struct smsdvb_client_t), GFP_KERNEL);
- if (!client) {
- sms_err("kmalloc() failed");
+ if (!client)
return -ENOMEM;
- }
/* register dvb adapter */
rc = dvb_register_adapter(&client->adapter,
@@ -1093,9 +1101,10 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
smscore_get_board_id(coredev))->name,
THIS_MODULE, device, adapter_nr);
if (rc < 0) {
- sms_err("dvb_register_adapter() failed %d", rc);
+ pr_err("dvb_register_adapter() failed %d\n", rc);
goto adapter_error;
}
+ dvb_register_media_controller(&client->adapter, coredev->media_dev);
/* init dvb demux */
client->demux.dmx.capabilities = DMX_TS_FILTERING;
@@ -1106,7 +1115,7 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
rc = dvb_dmx_init(&client->demux);
if (rc < 0) {
- sms_err("dvb_dmx_init failed %d", rc);
+ pr_err("dvb_dmx_init failed %d\n", rc);
goto dvbdmx_error;
}
@@ -1117,7 +1126,7 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
rc = dvb_dmxdev_init(&client->dmxdev, &client->adapter);
if (rc < 0) {
- sms_err("dvb_dmxdev_init failed %d", rc);
+ pr_err("dvb_dmxdev_init failed %d\n", rc);
goto dmxdev_error;
}
@@ -1138,7 +1147,7 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
rc = dvb_register_frontend(&client->adapter, &client->frontend);
if (rc < 0) {
- sms_err("frontend registration failed %d", rc);
+ pr_err("frontend registration failed %d\n", rc);
goto frontend_error;
}
@@ -1150,7 +1159,7 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
rc = smscore_register_client(coredev, &params, &client->smsclient);
if (rc < 0) {
- sms_err("smscore_register_client() failed %d", rc);
+ pr_err("smscore_register_client() failed %d\n", rc);
goto client_error;
}
@@ -1169,12 +1178,14 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
client->event_unc_state = -1;
sms_board_dvb3_event(client, DVB3_EVENT_HOTPLUG);
- sms_info("success");
sms_board_setup(coredev);
if (smsdvb_debugfs_create(client) < 0)
- sms_info("failed to create debugfs node");
+ pr_info("failed to create debugfs node\n");
+
+ dvb_create_media_graph(&client->adapter);
+ pr_info("DVB interface registered.\n");
return 0;
client_error:
@@ -1187,6 +1198,7 @@ dmxdev_error:
dvb_dmx_release(&client->demux);
dvbdmx_error:
+ smsdvb_media_device_unregister(client);
dvb_unregister_adapter(&client->adapter);
adapter_error:
@@ -1205,7 +1217,7 @@ static int __init smsdvb_module_init(void)
rc = smscore_register_hotplug(smsdvb_hotplug);
- sms_debug("");
+ pr_debug("\n");
return rc;
}
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index 35d0e887bd65..1d60d200d9ab 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -25,10 +25,11 @@
****************************************************************/
+#include "smscoreapi.h"
+
#include <linux/types.h>
#include <linux/input.h>
-#include "smscoreapi.h"
#include "smsir.h"
#include "sms-cards.h"
@@ -56,16 +57,14 @@ int sms_ir_init(struct smscore_device_t *coredev)
int board_id = smscore_get_board_id(coredev);
struct rc_dev *dev;
- sms_log("Allocating rc device");
+ pr_debug("Allocating rc device\n");
dev = rc_allocate_device();
- if (!dev) {
- sms_err("Not enough memory");
+ if (!dev)
return -ENOMEM;
- }
coredev->ir.controller = 0; /* Todo: vega/nova SPI number */
coredev->ir.timeout = IR_DEFAULT_TIMEOUT;
- sms_log("IR port %d, timeout %d ms",
+ pr_debug("IR port %d, timeout %d ms\n",
coredev->ir.controller, coredev->ir.timeout);
snprintf(coredev->ir.name, sizeof(coredev->ir.name),
@@ -92,11 +91,12 @@ int sms_ir_init(struct smscore_device_t *coredev)
dev->map_name = sms_get_board(board_id)->rc_codes;
dev->driver_name = MODULE_NAME;
- sms_log("Input device (IR) %s is set for key events", dev->input_name);
+ pr_debug("Input device (IR) %s is set for key events\n",
+ dev->input_name);
err = rc_register_device(dev);
if (err < 0) {
- sms_err("Failed to register device");
+ pr_err("Failed to register device\n");
rc_free_device(dev);
return err;
}
@@ -109,5 +109,5 @@ void sms_ir_exit(struct smscore_device_t *coredev)
{
rc_unregister_device(coredev->ir.dev);
- sms_log("");
+ pr_debug("\n");
}
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index abff803ad69a..d0e3f9d85f34 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1136,10 +1136,13 @@ static const struct file_operations dvb_demux_fops = {
.llseek = default_llseek,
};
-static struct dvb_device dvbdev_demux = {
+static const struct dvb_device dvbdev_demux = {
.priv = NULL,
.users = 1,
.writers = 1,
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ .name = "dvb-demux",
+#endif
.fops = &dvb_demux_fops
};
@@ -1209,13 +1212,15 @@ static const struct file_operations dvb_dvr_fops = {
.llseek = default_llseek,
};
-static struct dvb_device dvbdev_dvr = {
+static const struct dvb_device dvbdev_dvr = {
.priv = NULL,
.readers = 1,
.users = 1,
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ .name = "dvb-dvr",
+#endif
.fops = &dvb_dvr_fops
};
-
int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
{
int i;
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 80ab8d0ff6e0..c117fb3b4aff 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -245,6 +245,7 @@
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
+#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011
#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
@@ -318,6 +319,7 @@
#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6
#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7
#define USB_PID_WINFAST_DTV2000DS 0x6a04
+#define USB_PID_WINFAST_DTV2000DS_PLUS 0x6f12
#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025
#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026
#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00
@@ -385,4 +387,5 @@
#define USB_PID_PCTV_2002E 0x025c
#define USB_PID_PCTV_2002E_SE 0x025d
#define USB_PID_SVEON_STV27 0xd3af
+#define USB_PID_TURBOX_DTT_2000 0xd3a4
#endif
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 0aac3096728e..72937756f60c 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -1638,15 +1638,17 @@ static const struct file_operations dvb_ca_fops = {
.llseek = noop_llseek,
};
-static struct dvb_device dvbdev_ca = {
+static const struct dvb_device dvbdev_ca = {
.priv = NULL,
.users = 1,
.readers = 1,
.writers = 1,
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ .name = "dvb-ca-en50221",
+#endif
.fops = &dvb_ca_fops,
};
-
/* ******************************************************************************** */
/* Initialisation/shutdown functions */
@@ -1676,14 +1678,14 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
/* initialise the system data */
if ((ca = kzalloc(sizeof(struct dvb_ca_private), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
- goto error;
+ goto exit;
}
ca->pub = pubca;
ca->flags = flags;
ca->slot_count = slot_count;
if ((ca->slot_info = kcalloc(slot_count, sizeof(struct dvb_ca_slot), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
- goto error;
+ goto free_ca;
}
init_waitqueue_head(&ca->wait_queue);
ca->open = 0;
@@ -1694,7 +1696,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
/* register the DVB device */
ret = dvb_register_device(dvb_adapter, &ca->dvbdev, &dvbdev_ca, ca, DVB_DEVICE_CA);
if (ret)
- goto error;
+ goto free_slot_info;
/* now initialise each slot */
for (i = 0; i < slot_count; i++) {
@@ -1709,7 +1711,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
if (signal_pending(current)) {
ret = -EINTR;
- goto error;
+ goto unregister_device;
}
mb();
@@ -1720,17 +1722,17 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
ret = PTR_ERR(ca->thread);
printk("dvb_ca_init: failed to start kernel_thread (%d)\n",
ret);
- goto error;
+ goto unregister_device;
}
return 0;
-error:
- if (ca != NULL) {
- if (ca->dvbdev != NULL)
- dvb_unregister_device(ca->dvbdev);
- kfree(ca->slot_info);
- kfree(ca);
- }
+unregister_device:
+ dvb_unregister_device(ca->dvbdev);
+free_slot_info:
+ kfree(ca->slot_info);
+free_ca:
+ kfree(ca);
+exit:
pubca->private = NULL;
return ret;
}
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2cf30576bf39..882ca417f328 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -131,6 +131,11 @@ struct dvb_frontend_private {
int quality;
unsigned int check_wrapped;
enum dvbfe_search algo_status;
+
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ struct media_pipeline pipe;
+ struct media_entity *pipe_start_entity;
+#endif
};
static void dvb_frontend_wakeup(struct dvb_frontend *fe);
@@ -590,12 +595,106 @@ static void dvb_frontend_wakeup(struct dvb_frontend *fe)
wake_up_interruptible(&fepriv->wait_queue);
}
+/**
+ * dvb_enable_media_tuner() - tries to enable the DVB tuner
+ *
+ * @fe: struct dvb_frontend pointer
+ *
+ * This function ensures that just one media tuner is enabled for a given
+ * frontend. It has two different behaviors:
+ * - For trivial devices with just one tuner:
+ * it just enables the existing tuner->fe link
+ * - For devices with more than one tuner:
+ * It is up to the driver to implement the logic that will enable one tuner
+ * and disable the other ones. However, if more than one tuner is enabled for
+ * the same frontend, it will print an error message and return -EINVAL.
+ *
+ * At return, it will return the error code returned by media_entity_setup_link,
+ * or 0 if everything is OK, if no tuner is linked to the frontend or if the
+ * mdev is NULL.
+ */
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+static int dvb_enable_media_tuner(struct dvb_frontend *fe)
+{
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ struct dvb_adapter *adapter = fe->dvb;
+ struct media_device *mdev = adapter->mdev;
+ struct media_entity *entity, *source;
+ struct media_link *link, *found_link = NULL;
+ int i, ret, n_links = 0, active_links = 0;
+
+ fepriv->pipe_start_entity = NULL;
+
+ if (!mdev)
+ return 0;
+
+ entity = fepriv->dvbdev->entity;
+ fepriv->pipe_start_entity = entity;
+
+ for (i = 0; i < entity->num_links; i++) {
+ link = &entity->links[i];
+ if (link->sink->entity == entity) {
+ found_link = link;
+ n_links++;
+ if (link->flags & MEDIA_LNK_FL_ENABLED)
+ active_links++;
+ }
+ }
+
+ if (!n_links || active_links == 1 || !found_link)
+ return 0;
+
+ /*
+ * If a frontend has more than one tuner linked, it is up to the driver
+ * to select with one will be the active one, as the frontend core can't
+ * guess. If the driver doesn't do that, it is a bug.
+ */
+ if (n_links > 1 && active_links != 1) {
+ dev_err(fe->dvb->device,
+ "WARNING: there are %d active links among %d tuners. This is a driver's bug!\n",
+ active_links, n_links);
+ return -EINVAL;
+ }
+
+ source = found_link->source->entity;
+ fepriv->pipe_start_entity = source;
+ for (i = 0; i < source->num_links; i++) {
+ struct media_entity *sink;
+ int flags = 0;
+
+ link = &source->links[i];
+ sink = link->sink->entity;
+
+ if (sink == entity)
+ flags = MEDIA_LNK_FL_ENABLED;
+
+ ret = media_entity_setup_link(link, flags);
+ if (ret) {
+ dev_err(fe->dvb->device,
+ "Couldn't change link %s->%s to %s. Error %d\n",
+ source->name, sink->name,
+ flags ? "enabled" : "disabled",
+ ret);
+ return ret;
+ } else
+ dev_dbg(fe->dvb->device,
+ "link %s->%s was %s\n",
+ source->name, sink->name,
+ flags ? "ENABLED" : "disabled");
+ }
+ return 0;
+}
+#endif
+
static int dvb_frontend_thread(void *data)
{
struct dvb_frontend *fe = data;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
fe_status_t s;
enum dvbfe_algo algo;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ int ret;
+#endif
bool re_tune = false;
bool semheld = false;
@@ -609,6 +708,20 @@ static int dvb_frontend_thread(void *data)
fepriv->wakeup = 0;
fepriv->reinitialise = 0;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ ret = dvb_enable_media_tuner(fe);
+ if (ret) {
+ /* FIXME: return an error if it fails */
+ dev_info(fe->dvb->device,
+ "proceeding with FE task\n");
+ } else if (fepriv->pipe_start_entity) {
+ ret = media_entity_pipeline_start(fepriv->pipe_start_entity,
+ &fepriv->pipe);
+ if (ret)
+ return ret;
+ }
+#endif
+
dvb_frontend_init(fe);
set_freezable();
@@ -718,6 +831,12 @@ restart:
}
}
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (fepriv->pipe_start_entity)
+ media_entity_pipeline_stop(fepriv->pipe_start_entity);
+ fepriv->pipe_start_entity = NULL;
+#endif
+
if (dvb_powerdown_on_sleep) {
if (fe->ops.set_voltage)
fe->ops.set_voltage(fe, SEC_VOLTAGE_OFF);
@@ -2612,11 +2731,14 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
struct dvb_frontend* fe)
{
struct dvb_frontend_private *fepriv;
- static const struct dvb_device dvbdev_template = {
+ const struct dvb_device dvbdev_template = {
.users = ~0,
.writers = 1,
.readers = (~0)-1,
.fops = &dvb_frontend_fops,
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ .name = fe->ops.info.name,
+#endif
.kernel_ioctl = dvb_frontend_ioctl
};
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 686d3277dad1..a694fb1ea228 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -1190,7 +1190,6 @@ static int dvb_net_stop(struct net_device *dev)
static const struct header_ops dvb_header_ops = {
.create = eth_header,
.parse = eth_header_parse,
- .rebuild = eth_rebuild_header,
};
@@ -1462,14 +1461,16 @@ static const struct file_operations dvb_net_fops = {
.llseek = noop_llseek,
};
-static struct dvb_device dvbdev_net = {
+static const struct dvb_device dvbdev_net = {
.priv = NULL,
.users = 1,
.writers = 1,
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ .name = "dvb-net",
+#endif
.fops = &dvb_net_fops,
};
-
void dvb_net_release (struct dvb_net *dvbnet)
{
int i;
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 983db75de350..13bb57f0457f 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -180,6 +180,93 @@ skip:
return -ENFILE;
}
+static void dvb_register_media_device(struct dvb_device *dvbdev,
+ int type, int minor)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ int ret = 0, npads;
+
+ if (!dvbdev->adapter->mdev)
+ return;
+
+ dvbdev->entity = kzalloc(sizeof(*dvbdev->entity), GFP_KERNEL);
+ if (!dvbdev->entity)
+ return;
+
+ dvbdev->entity->info.dev.major = DVB_MAJOR;
+ dvbdev->entity->info.dev.minor = minor;
+ dvbdev->entity->name = dvbdev->name;
+
+ switch (type) {
+ case DVB_DEVICE_CA:
+ case DVB_DEVICE_DEMUX:
+ case DVB_DEVICE_FRONTEND:
+ npads = 2;
+ break;
+ case DVB_DEVICE_NET:
+ npads = 0;
+ break;
+ default:
+ npads = 1;
+ }
+
+ if (npads) {
+ dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
+ GFP_KERNEL);
+ if (!dvbdev->pads) {
+ kfree(dvbdev->entity);
+ return;
+ }
+ }
+
+ switch (type) {
+ case DVB_DEVICE_FRONTEND:
+ dvbdev->entity->type = MEDIA_ENT_T_DEVNODE_DVB_FE;
+ dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK;
+ dvbdev->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ break;
+ case DVB_DEVICE_DEMUX:
+ dvbdev->entity->type = MEDIA_ENT_T_DEVNODE_DVB_DEMUX;
+ dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK;
+ dvbdev->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ break;
+ case DVB_DEVICE_DVR:
+ dvbdev->entity->type = MEDIA_ENT_T_DEVNODE_DVB_DVR;
+ dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK;
+ break;
+ case DVB_DEVICE_CA:
+ dvbdev->entity->type = MEDIA_ENT_T_DEVNODE_DVB_CA;
+ dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK;
+ dvbdev->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ break;
+ case DVB_DEVICE_NET:
+ dvbdev->entity->type = MEDIA_ENT_T_DEVNODE_DVB_NET;
+ break;
+ default:
+ kfree(dvbdev->entity);
+ dvbdev->entity = NULL;
+ return;
+ }
+
+ if (npads)
+ ret = media_entity_init(dvbdev->entity, npads, dvbdev->pads, 0);
+ if (!ret)
+ ret = media_device_register_entity(dvbdev->adapter->mdev,
+ dvbdev->entity);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "%s: media_device_register_entity failed for %s\n",
+ __func__, dvbdev->entity->name);
+ kfree(dvbdev->pads);
+ kfree(dvbdev->entity);
+ dvbdev->entity = NULL;
+ return;
+ }
+
+ printk(KERN_DEBUG "%s: media device '%s' registered.\n",
+ __func__, dvbdev->entity->name);
+#endif
+}
int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
const struct dvb_device *template, void *priv, int type)
@@ -258,10 +345,11 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
__func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
return PTR_ERR(clsdev);
}
-
dprintk(KERN_DEBUG "DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
adap->num, dnames[type], id, minor, minor);
+ dvb_register_media_device(dvbdev, type, minor);
+
return 0;
}
EXPORT_SYMBOL(dvb_register_device);
@@ -278,12 +366,66 @@ void dvb_unregister_device(struct dvb_device *dvbdev)
device_destroy(dvb_class, MKDEV(DVB_MAJOR, dvbdev->minor));
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ if (dvbdev->entity) {
+ media_device_unregister_entity(dvbdev->entity);
+ kfree(dvbdev->entity);
+ kfree(dvbdev->pads);
+ }
+#endif
+
list_del (&dvbdev->list_head);
kfree (dvbdev->fops);
kfree (dvbdev);
}
EXPORT_SYMBOL(dvb_unregister_device);
+
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+void dvb_create_media_graph(struct dvb_adapter *adap)
+{
+ struct media_device *mdev = adap->mdev;
+ struct media_entity *entity, *tuner = NULL, *fe = NULL;
+ struct media_entity *demux = NULL, *dvr = NULL, *ca = NULL;
+
+ if (!mdev)
+ return;
+
+ media_device_for_each_entity(entity, mdev) {
+ switch (entity->type) {
+ case MEDIA_ENT_T_V4L2_SUBDEV_TUNER:
+ tuner = entity;
+ break;
+ case MEDIA_ENT_T_DEVNODE_DVB_FE:
+ fe = entity;
+ break;
+ case MEDIA_ENT_T_DEVNODE_DVB_DEMUX:
+ demux = entity;
+ break;
+ case MEDIA_ENT_T_DEVNODE_DVB_DVR:
+ dvr = entity;
+ break;
+ case MEDIA_ENT_T_DEVNODE_DVB_CA:
+ ca = entity;
+ break;
+ }
+ }
+
+ if (tuner && fe)
+ media_entity_create_link(tuner, 0, fe, 0, 0);
+
+ if (fe && demux)
+ media_entity_create_link(fe, 1, demux, 0, MEDIA_LNK_FL_ENABLED);
+
+ if (demux && dvr)
+ media_entity_create_link(demux, 1, dvr, 0, MEDIA_LNK_FL_ENABLED);
+
+ if (demux && ca)
+ media_entity_create_link(demux, 1, ca, 0, MEDIA_LNK_FL_ENABLED);
+}
+EXPORT_SYMBOL_GPL(dvb_create_media_graph);
+#endif
+
static int dvbdev_check_free_adapter_num(int num)
{
struct list_head *entry;
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index f96b28e7fc95..12629b8ecb0c 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -27,6 +27,7 @@
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/list.h>
+#include <media/media-device.h>
#define DVB_MAJOR 212
@@ -71,6 +72,10 @@ struct dvb_adapter {
int mfe_shared; /* indicates mutually exclusive frontends */
struct dvb_device *mfe_dvbdev; /* frontend device in use */
struct mutex mfe_lock; /* access lock for thread creation */
+
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ struct media_device *mdev;
+#endif
};
@@ -92,6 +97,15 @@ struct dvb_device {
/* don't really need those !? -- FIXME: use video_usercopy */
int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg);
+ /* Needed for media controller register/unregister */
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ const char *name;
+
+ /* Allocated and filled inside dvbdev.c */
+ struct media_entity *entity;
+ struct media_pad *pads;
+#endif
+
void *priv;
};
@@ -109,6 +123,19 @@ extern int dvb_register_device (struct dvb_adapter *adap,
extern void dvb_unregister_device (struct dvb_device *dvbdev);
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+void dvb_create_media_graph(struct dvb_adapter *adap);
+static inline void dvb_register_media_controller(struct dvb_adapter *adap,
+ struct media_device *mdev)
+{
+ adap->mdev = mdev;
+}
+
+#else
+static inline void dvb_create_media_graph(struct dvb_adapter *adap) {}
+#define dvb_register_media_controller(a, b) {}
+#endif
+
extern int dvb_generic_open (struct inode *inode, struct file *file);
extern int dvb_generic_release (struct inode *inode, struct file *file);
extern long dvb_generic_ioctl (struct file *file,
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index bb76727d924e..97c151d5b2e1 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -577,6 +577,14 @@ config DVB_LGDT3305
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
+config DVB_LGDT3306A
+ tristate "LG Electronics LGDT3306A based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ An ATSC 8VSB and QAM-B 64/256 demodulator module. Say Y when you want
+ to support this frontend.
+
config DVB_LG2160
tristate "LG Electronics LG216x based"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index ba59df63d050..23d399bec804 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
+obj-$(CONFIG_DVB_LGDT3306A) += lgdt3306a.o
obj-$(CONFIG_DVB_LG2160) += lg2160.o
obj-$(CONFIG_DVB_CX24123) += cx24123.o
obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
diff --git a/drivers/media/dvb-frontends/a8293.h b/drivers/media/dvb-frontends/a8293.h
index b6ef6427cfa5..5f0411939ffc 100644
--- a/drivers/media/dvb-frontends/a8293.h
+++ b/drivers/media/dvb-frontends/a8293.h
@@ -27,7 +27,7 @@ struct a8293_config {
u8 i2c_addr;
};
-#if IS_ENABLED(CONFIG_DVB_A8293)
+#if IS_REACHABLE(CONFIG_DVB_A8293)
extern struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, const struct a8293_config *cfg);
#else
diff --git a/drivers/media/dvb-frontends/af9013.h b/drivers/media/dvb-frontends/af9013.h
index 09273b2cd310..1dcc936e1661 100644
--- a/drivers/media/dvb-frontends/af9013.h
+++ b/drivers/media/dvb-frontends/af9013.h
@@ -103,7 +103,7 @@ struct af9013_config {
u8 gpio[4];
};
-#if IS_ENABLED(CONFIG_DVB_AF9013)
+#if IS_REACHABLE(CONFIG_DVB_AF9013)
extern struct dvb_frontend *af9013_attach(const struct af9013_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/atbm8830.h b/drivers/media/dvb-frontends/atbm8830.h
index 8e0ac98f8d08..5446d13fdfe8 100644
--- a/drivers/media/dvb-frontends/atbm8830.h
+++ b/drivers/media/dvb-frontends/atbm8830.h
@@ -61,7 +61,7 @@ struct atbm8830_config {
u8 agc_hold_loop;
};
-#if IS_ENABLED(CONFIG_DVB_ATBM8830)
+#if IS_REACHABLE(CONFIG_DVB_ATBM8830)
extern struct dvb_frontend *atbm8830_attach(const struct atbm8830_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/au8522.h b/drivers/media/dvb-frontends/au8522.h
index 612251958855..dde61582c158 100644
--- a/drivers/media/dvb-frontends/au8522.h
+++ b/drivers/media/dvb-frontends/au8522.h
@@ -61,7 +61,7 @@ struct au8522_config {
enum au8522_if_freq qam_if;
};
-#if IS_ENABLED(CONFIG_DVB_AU8522_DTV)
+#if IS_REACHABLE(CONFIG_DVB_AU8522_DTV)
extern struct dvb_frontend *au8522_attach(const struct au8522_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/bcm3510.h b/drivers/media/dvb-frontends/bcm3510.h
index 5bd56b1623bf..ff66492fb940 100644
--- a/drivers/media/dvb-frontends/bcm3510.h
+++ b/drivers/media/dvb-frontends/bcm3510.h
@@ -34,7 +34,7 @@ struct bcm3510_config
int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
};
-#if IS_ENABLED(CONFIG_DVB_BCM3510)
+#if IS_REACHABLE(CONFIG_DVB_BCM3510)
extern struct dvb_frontend* bcm3510_attach(const struct bcm3510_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/cx22700.h b/drivers/media/dvb-frontends/cx22700.h
index 382a7b1f3618..e0a764868e6f 100644
--- a/drivers/media/dvb-frontends/cx22700.h
+++ b/drivers/media/dvb-frontends/cx22700.h
@@ -31,7 +31,7 @@ struct cx22700_config
u8 demod_address;
};
-#if IS_ENABLED(CONFIG_DVB_CX22700)
+#if IS_REACHABLE(CONFIG_DVB_CX22700)
extern struct dvb_frontend* cx22700_attach(const struct cx22700_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/cx22702.h b/drivers/media/dvb-frontends/cx22702.h
index 0b1a6c2f9d5f..68b69a7660d2 100644
--- a/drivers/media/dvb-frontends/cx22702.h
+++ b/drivers/media/dvb-frontends/cx22702.h
@@ -41,7 +41,7 @@ struct cx22702_config {
u8 output_mode;
};
-#if IS_ENABLED(CONFIG_DVB_CX22702)
+#if IS_REACHABLE(CONFIG_DVB_CX22702)
extern struct dvb_frontend *cx22702_attach(
const struct cx22702_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/cx24110.h b/drivers/media/dvb-frontends/cx24110.h
index 527aff1f2723..d5453ed20b28 100644
--- a/drivers/media/dvb-frontends/cx24110.h
+++ b/drivers/media/dvb-frontends/cx24110.h
@@ -46,7 +46,7 @@ static inline int cx24110_pll_write(struct dvb_frontend *fe, u32 val)
return 0;
}
-#if IS_ENABLED(CONFIG_DVB_CX24110)
+#if IS_REACHABLE(CONFIG_DVB_CX24110)
extern struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/cx24113.h b/drivers/media/dvb-frontends/cx24113.h
index 782711ba1a32..962919b9b6e6 100644
--- a/drivers/media/dvb-frontends/cx24113.h
+++ b/drivers/media/dvb-frontends/cx24113.h
@@ -32,7 +32,7 @@ struct cx24113_config {
u32 xtal_khz;
};
-#if IS_ENABLED(CONFIG_DVB_TUNER_CX24113)
+#if IS_REACHABLE(CONFIG_DVB_TUNER_CX24113)
extern struct dvb_frontend *cx24113_attach(struct dvb_frontend *,
const struct cx24113_config *config, struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/cx24116.h b/drivers/media/dvb-frontends/cx24116.h
index 2ec84fae3f9f..f6dbabc1d62b 100644
--- a/drivers/media/dvb-frontends/cx24116.h
+++ b/drivers/media/dvb-frontends/cx24116.h
@@ -41,7 +41,7 @@ struct cx24116_config {
u16 i2c_wr_max;
};
-#if IS_ENABLED(CONFIG_DVB_CX24116)
+#if IS_REACHABLE(CONFIG_DVB_CX24116)
extern struct dvb_frontend *cx24116_attach(
const struct cx24116_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/cx24117.h b/drivers/media/dvb-frontends/cx24117.h
index 4e59e9574fa7..1648ab432168 100644
--- a/drivers/media/dvb-frontends/cx24117.h
+++ b/drivers/media/dvb-frontends/cx24117.h
@@ -30,7 +30,7 @@ struct cx24117_config {
u8 demod_address;
};
-#if IS_ENABLED(CONFIG_DVB_CX24117)
+#if IS_REACHABLE(CONFIG_DVB_CX24117)
extern struct dvb_frontend *cx24117_attach(
const struct cx24117_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/cx24123.h b/drivers/media/dvb-frontends/cx24123.h
index 102e70d17c43..758aee5a072f 100644
--- a/drivers/media/dvb-frontends/cx24123.h
+++ b/drivers/media/dvb-frontends/cx24123.h
@@ -39,7 +39,7 @@ struct cx24123_config {
void (*agc_callback) (struct dvb_frontend *);
};
-#if IS_ENABLED(CONFIG_DVB_CX24123)
+#if IS_REACHABLE(CONFIG_DVB_CX24123)
extern struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
struct i2c_adapter *i2c);
extern struct i2c_adapter *cx24123_get_tuner_i2c_adapter(struct dvb_frontend *);
diff --git a/drivers/media/dvb-frontends/cxd2820r.h b/drivers/media/dvb-frontends/cxd2820r.h
index 6095dbcf7850..56d42760263d 100644
--- a/drivers/media/dvb-frontends/cxd2820r.h
+++ b/drivers/media/dvb-frontends/cxd2820r.h
@@ -72,7 +72,7 @@ struct cxd2820r_config {
};
-#if IS_ENABLED(CONFIG_DVB_CXD2820R)
+#if IS_REACHABLE(CONFIG_DVB_CXD2820R)
extern struct dvb_frontend *cxd2820r_attach(
const struct cxd2820r_config *config,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 149fdca3fb44..72b0e2db3aab 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -79,7 +79,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
num = if_freq / 1000; /* Hz => kHz */
num *= 0x4000;
- if_ctl = 0x4000 - cxd2820r_div_u64_round_closest(num, 41000);
+ if_ctl = 0x4000 - DIV_ROUND_CLOSEST_ULL(num, 41000);
buf[0] = (if_ctl >> 8) & 0x3f;
buf[1] = (if_ctl >> 0) & 0xff;
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 422e84bbb008..490e090048ef 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -244,12 +244,6 @@ error:
return ret;
}
-/* 64 bit div with round closest, like DIV_ROUND_CLOSEST but 64 bit */
-u32 cxd2820r_div_u64_round_closest(u64 dividend, u32 divisor)
-{
- return div_u64(dividend + (divisor / 2), divisor);
-}
-
static int cxd2820r_set_frontend(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/cxd2820r_priv.h b/drivers/media/dvb-frontends/cxd2820r_priv.h
index 7ff5f60c83e1..4b428959b16e 100644
--- a/drivers/media/dvb-frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb-frontends/cxd2820r_priv.h
@@ -64,8 +64,6 @@ int cxd2820r_wr_reg_mask(struct cxd2820r_priv *priv, u32 reg, u8 val,
int cxd2820r_wr_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val,
int len);
-u32 cxd2820r_div_u64_round_closest(u64 dividend, u32 divisor);
-
int cxd2820r_wr_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val,
int len);
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index 51401d036530..008cb2ac8480 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -103,7 +103,7 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
num = if_freq / 1000; /* Hz => kHz */
num *= 0x1000000;
- if_ctl = cxd2820r_div_u64_round_closest(num, 41000);
+ if_ctl = DIV_ROUND_CLOSEST_ULL(num, 41000);
buf[0] = ((if_ctl >> 16) & 0xff);
buf[1] = ((if_ctl >> 8) & 0xff);
buf[2] = ((if_ctl >> 0) & 0xff);
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index 9c0c4f42175c..35fe364c7182 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -120,7 +120,7 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe)
num = if_freq / 1000; /* Hz => kHz */
num *= 0x1000000;
- if_ctl = cxd2820r_div_u64_round_closest(num, 41000);
+ if_ctl = DIV_ROUND_CLOSEST_ULL(num, 41000);
buf[0] = ((if_ctl >> 16) & 0xff);
buf[1] = ((if_ctl >> 8) & 0xff);
buf[2] = ((if_ctl >> 0) & 0xff);
diff --git a/drivers/media/dvb-frontends/dib0070.h b/drivers/media/dvb-frontends/dib0070.h
index 0c6befcc9143..6c0b6672b1d9 100644
--- a/drivers/media/dvb-frontends/dib0070.h
+++ b/drivers/media/dvb-frontends/dib0070.h
@@ -48,7 +48,7 @@ struct dib0070_config {
u8 vga_filter;
};
-#if IS_ENABLED(CONFIG_DVB_TUNER_DIB0070)
+#if IS_REACHABLE(CONFIG_DVB_TUNER_DIB0070)
extern struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg);
extern u16 dib0070_wbd_offset(struct dvb_frontend *);
extern void dib0070_ctrl_agc_filter(struct dvb_frontend *, u8 open);
diff --git a/drivers/media/dvb-frontends/dib0090.h b/drivers/media/dvb-frontends/dib0090.h
index 6a090954fa10..ad74bc823f08 100644
--- a/drivers/media/dvb-frontends/dib0090.h
+++ b/drivers/media/dvb-frontends/dib0090.h
@@ -75,7 +75,7 @@ struct dib0090_config {
u8 force_crystal_mode;
};
-#if IS_ENABLED(CONFIG_DVB_TUNER_DIB0090)
+#if IS_REACHABLE(CONFIG_DVB_TUNER_DIB0090)
extern struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
extern struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
extern void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast);
diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
index 9b6c3bbc983a..6ae9899b5b45 100644
--- a/drivers/media/dvb-frontends/dib3000.h
+++ b/drivers/media/dvb-frontends/dib3000.h
@@ -41,7 +41,7 @@ struct dib_fe_xfer_ops
int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
};
-#if IS_ENABLED(CONFIG_DVB_DIB3000MB)
+#if IS_REACHABLE(CONFIG_DVB_DIB3000MB)
extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
#else
diff --git a/drivers/media/dvb-frontends/dib3000mc.h b/drivers/media/dvb-frontends/dib3000mc.h
index 129d1425516a..74816f793611 100644
--- a/drivers/media/dvb-frontends/dib3000mc.h
+++ b/drivers/media/dvb-frontends/dib3000mc.h
@@ -41,7 +41,7 @@ struct dib3000mc_config {
#define DEFAULT_DIB3000MC_I2C_ADDRESS 16
#define DEFAULT_DIB3000P_I2C_ADDRESS 24
-#if IS_ENABLED(CONFIG_DVB_DIB3000MC)
+#if IS_REACHABLE(CONFIG_DVB_DIB3000MC)
extern struct dvb_frontend *dib3000mc_attach(struct i2c_adapter *i2c_adap,
u8 i2c_addr,
struct dib3000mc_config *cfg);
diff --git a/drivers/media/dvb-frontends/dib7000m.h b/drivers/media/dvb-frontends/dib7000m.h
index b585413f9a29..6468c278cc4d 100644
--- a/drivers/media/dvb-frontends/dib7000m.h
+++ b/drivers/media/dvb-frontends/dib7000m.h
@@ -40,7 +40,7 @@ struct dib7000m_config {
#define DEFAULT_DIB7000M_I2C_ADDRESS 18
-#if IS_ENABLED(CONFIG_DVB_DIB7000M)
+#if IS_REACHABLE(CONFIG_DVB_DIB7000M)
extern struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
u8 i2c_addr,
struct dib7000m_config *cfg);
diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
index 1fea0e972654..baa278928cf3 100644
--- a/drivers/media/dvb-frontends/dib7000p.h
+++ b/drivers/media/dvb-frontends/dib7000p.h
@@ -66,7 +66,7 @@ struct dib7000p_ops {
struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
};
-#if IS_ENABLED(CONFIG_DVB_DIB7000P)
+#if IS_REACHABLE(CONFIG_DVB_DIB7000P)
void *dib7000p_attach(struct dib7000p_ops *ops);
#else
static inline void *dib7000p_attach(struct dib7000p_ops *ops)
diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
index 84cc10383dcd..780c37bdcb72 100644
--- a/drivers/media/dvb-frontends/dib8000.h
+++ b/drivers/media/dvb-frontends/dib8000.h
@@ -63,7 +63,7 @@ struct dib8000_ops {
struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
};
-#if IS_ENABLED(CONFIG_DVB_DIB8000)
+#if IS_REACHABLE(CONFIG_DVB_DIB8000)
void *dib8000_attach(struct dib8000_ops *ops);
#else
static inline int dib8000_attach(struct dib8000_ops *ops)
diff --git a/drivers/media/dvb-frontends/dib9000.h b/drivers/media/dvb-frontends/dib9000.h
index f3639f045ff0..b10a70aa7c9f 100644
--- a/drivers/media/dvb-frontends/dib9000.h
+++ b/drivers/media/dvb-frontends/dib9000.h
@@ -27,7 +27,7 @@ struct dib9000_config {
#define DEFAULT_DIB9000_I2C_ADDRESS 18
-#if IS_ENABLED(CONFIG_DVB_DIB9000)
+#if IS_REACHABLE(CONFIG_DVB_DIB9000)
extern struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg);
extern int dib9000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr);
extern struct i2c_adapter *dib9000_get_tuner_interface(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h b/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h
index cfd0b96b6939..8188062953af 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h
@@ -34,7 +34,7 @@ struct drx39xxj_state {
const struct firmware *fw;
};
-#if IS_ENABLED(CONFIG_DVB_DRX39XYJ)
+#if IS_REACHABLE(CONFIG_DVB_DRX39XYJ)
struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c);
#else
static inline struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c) {
diff --git a/drivers/media/dvb-frontends/drxd.h b/drivers/media/dvb-frontends/drxd.h
index d998e4d5a7fc..a47c22d6667e 100644
--- a/drivers/media/dvb-frontends/drxd.h
+++ b/drivers/media/dvb-frontends/drxd.h
@@ -52,7 +52,7 @@ struct drxd_config {
s16(*osc_deviation) (void *priv, s16 dev, int flag);
};
-#if IS_ENABLED(CONFIG_DVB_DRXD)
+#if IS_REACHABLE(CONFIG_DVB_DRXD)
extern
struct dvb_frontend *drxd_attach(const struct drxd_config *config,
void *priv, struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index f6cb34660327..8f0b9eec528f 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -51,7 +51,7 @@ struct drxk_config {
int qam_demod_parameter_count;
};
-#if IS_ENABLED(CONFIG_DVB_DRXK)
+#if IS_REACHABLE(CONFIG_DVB_DRXK)
extern struct dvb_frontend *drxk_attach(const struct drxk_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/ds3000.h b/drivers/media/dvb-frontends/ds3000.h
index f9c21fb7af13..153169da9017 100644
--- a/drivers/media/dvb-frontends/ds3000.h
+++ b/drivers/media/dvb-frontends/ds3000.h
@@ -35,7 +35,7 @@ struct ds3000_config {
void (*set_lock_led)(struct dvb_frontend *fe, int offon);
};
-#if IS_ENABLED(CONFIG_DVB_DS3000)
+#if IS_REACHABLE(CONFIG_DVB_DS3000)
extern struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/dvb-pll.h b/drivers/media/dvb-frontends/dvb-pll.h
index f4b5a0601c3a..bf9602a88b6c 100644
--- a/drivers/media/dvb-frontends/dvb-pll.h
+++ b/drivers/media/dvb-frontends/dvb-pll.h
@@ -38,7 +38,7 @@
* @param pll_desc_id dvb_pll_desc to use.
* @return Frontend pointer on success, NULL on failure
*/
-#if IS_ENABLED(CONFIG_DVB_PLL)
+#if IS_REACHABLE(CONFIG_DVB_PLL)
extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
int pll_addr,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.h b/drivers/media/dvb-frontends/dvb_dummy_fe.h
index 0cbf96105631..15e4ceab869a 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.h
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.h
@@ -26,7 +26,7 @@
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
-#if IS_ENABLED(CONFIG_DVB_DUMMY_FE)
+#if IS_REACHABLE(CONFIG_DVB_DUMMY_FE)
extern struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void);
extern struct dvb_frontend* dvb_dummy_fe_qpsk_attach(void);
extern struct dvb_frontend* dvb_dummy_fe_qam_attach(void);
diff --git a/drivers/media/dvb-frontends/ec100.h b/drivers/media/dvb-frontends/ec100.h
index 37558403068d..9544bab5cd1d 100644
--- a/drivers/media/dvb-frontends/ec100.h
+++ b/drivers/media/dvb-frontends/ec100.h
@@ -31,7 +31,7 @@ struct ec100_config {
};
-#if IS_ENABLED(CONFIG_DVB_EC100)
+#if IS_REACHABLE(CONFIG_DVB_EC100)
extern struct dvb_frontend *ec100_attach(const struct ec100_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/hd29l2.h b/drivers/media/dvb-frontends/hd29l2.h
index 05cd13028a91..48e9ab74c883 100644
--- a/drivers/media/dvb-frontends/hd29l2.h
+++ b/drivers/media/dvb-frontends/hd29l2.h
@@ -51,7 +51,7 @@ struct hd29l2_config {
};
-#if IS_ENABLED(CONFIG_DVB_HD29L2)
+#if IS_REACHABLE(CONFIG_DVB_HD29L2)
extern struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/isl6405.h b/drivers/media/dvb-frontends/isl6405.h
index 8abb70c26fd9..3c148b830bd1 100644
--- a/drivers/media/dvb-frontends/isl6405.h
+++ b/drivers/media/dvb-frontends/isl6405.h
@@ -55,7 +55,7 @@
#define ISL6405_ENT2 0x20
#define ISL6405_ISEL2 0x40
-#if IS_ENABLED(CONFIG_DVB_ISL6405)
+#if IS_REACHABLE(CONFIG_DVB_ISL6405)
/* override_set and override_clear control which system register bits (above)
* to always set & clear
*/
diff --git a/drivers/media/dvb-frontends/isl6421.h b/drivers/media/dvb-frontends/isl6421.h
index 630e7f8a150e..3273597833fd 100644
--- a/drivers/media/dvb-frontends/isl6421.h
+++ b/drivers/media/dvb-frontends/isl6421.h
@@ -39,7 +39,7 @@
#define ISL6421_ISEL1 0x20
#define ISL6421_DCL 0x40
-#if IS_ENABLED(CONFIG_DVB_ISL6421)
+#if IS_REACHABLE(CONFIG_DVB_ISL6421)
/* override_set and override_clear control which system register bits (above) to always set & clear */
extern struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 i2c_addr,
u8 override_set, u8 override_clear, bool override_tone);
diff --git a/drivers/media/dvb-frontends/isl6423.h b/drivers/media/dvb-frontends/isl6423.h
index 80dfd9cc4f41..a64df0ee256b 100644
--- a/drivers/media/dvb-frontends/isl6423.h
+++ b/drivers/media/dvb-frontends/isl6423.h
@@ -42,7 +42,7 @@ struct isl6423_config {
u8 mod_extern;
};
-#if IS_ENABLED(CONFIG_DVB_ISL6423)
+#if IS_REACHABLE(CONFIG_DVB_ISL6423)
extern struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb-frontends/itd1000.h b/drivers/media/dvb-frontends/itd1000.h
index edae0902f4fd..a691bb6f26de 100644
--- a/drivers/media/dvb-frontends/itd1000.h
+++ b/drivers/media/dvb-frontends/itd1000.h
@@ -29,7 +29,7 @@ struct itd1000_config {
u8 i2c_address;
};
-#if IS_ENABLED(CONFIG_DVB_TUNER_ITD1000)
+#if IS_REACHABLE(CONFIG_DVB_TUNER_ITD1000)
extern struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct itd1000_config *cfg);
#else
static inline struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct itd1000_config *cfg)
diff --git a/drivers/media/dvb-frontends/ix2505v.h b/drivers/media/dvb-frontends/ix2505v.h
index 1a735a75aa98..af107a2dd357 100644
--- a/drivers/media/dvb-frontends/ix2505v.h
+++ b/drivers/media/dvb-frontends/ix2505v.h
@@ -49,7 +49,7 @@ struct ix2505v_config {
};
-#if IS_ENABLED(CONFIG_DVB_IX2505V)
+#if IS_REACHABLE(CONFIG_DVB_IX2505V)
extern struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
const struct ix2505v_config *config, struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/l64781.h b/drivers/media/dvb-frontends/l64781.h
index 6813b08a774d..8697e2c2ba36 100644
--- a/drivers/media/dvb-frontends/l64781.h
+++ b/drivers/media/dvb-frontends/l64781.h
@@ -31,7 +31,7 @@ struct l64781_config
u8 demod_address;
};
-#if IS_ENABLED(CONFIG_DVB_L64781)
+#if IS_REACHABLE(CONFIG_DVB_L64781)
extern struct dvb_frontend* l64781_attach(const struct l64781_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/lg2160.h b/drivers/media/dvb-frontends/lg2160.h
index 194a07a78dc1..d20bd909de39 100644
--- a/drivers/media/dvb-frontends/lg2160.h
+++ b/drivers/media/dvb-frontends/lg2160.h
@@ -67,7 +67,7 @@ struct lg2160_config {
enum lg_chip_type lg_chip;
};
-#if IS_ENABLED(CONFIG_DVB_LG2160)
+#if IS_REACHABLE(CONFIG_DVB_LG2160)
extern
struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
struct i2c_adapter *i2c_adap);
diff --git a/drivers/media/dvb-frontends/lgdt3305.h b/drivers/media/dvb-frontends/lgdt3305.h
index 9c03e530e01b..f91a1b49ce2f 100644
--- a/drivers/media/dvb-frontends/lgdt3305.h
+++ b/drivers/media/dvb-frontends/lgdt3305.h
@@ -80,7 +80,7 @@ struct lgdt3305_config {
enum lgdt_demod_chip_type demod_chip;
};
-#if IS_ENABLED(CONFIG_DVB_LGDT3305)
+#if IS_REACHABLE(CONFIG_DVB_LGDT3305)
extern
struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config,
struct i2c_adapter *i2c_adap);
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
new file mode 100644
index 000000000000..d9a2b0e768e0
--- /dev/null
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -0,0 +1,2144 @@
+/*
+ * Support for LGDT3306A - 8VSB/QAM-B
+ *
+ * Copyright (C) 2013 Fred Richter <frichter@hauppauge.com>
+ * - driver structure based on lgdt3305.[ch] by Michael Krufky
+ * - code based on LG3306_V0.35 API by LG Electronics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/div64.h>
+#include <linux/dvb/frontend.h>
+#include "dvb_math.h"
+#include "lgdt3306a.h"
+
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debug level (info=1, reg=2 (or-able))");
+
+#define DBG_INFO 1
+#define DBG_REG 2
+#define DBG_DUMP 4 /* FGR - comment out to remove dump code */
+
+#define lg_debug(fmt, arg...) \
+ printk(KERN_DEBUG pr_fmt(fmt), ## arg)
+
+#define dbg_info(fmt, arg...) \
+ do { \
+ if (debug & DBG_INFO) \
+ lg_debug(fmt, ## arg); \
+ } while (0)
+
+#define dbg_reg(fmt, arg...) \
+ do { \
+ if (debug & DBG_REG) \
+ lg_debug(fmt, ## arg); \
+ } while (0)
+
+#define lg_chkerr(ret) \
+({ \
+ int __ret; \
+ __ret = (ret < 0); \
+ if (__ret) \
+ pr_err("error %d on line %d\n", ret, __LINE__); \
+ __ret; \
+})
+
+struct lgdt3306a_state {
+ struct i2c_adapter *i2c_adap;
+ const struct lgdt3306a_config *cfg;
+
+ struct dvb_frontend frontend;
+
+ fe_modulation_t current_modulation;
+ u32 current_frequency;
+ u32 snr;
+};
+
+/*
+ * LG3306A Register Usage
+ * (LG does not really name the registers, so this code does not either)
+ *
+ * 0000 -> 00FF Common control and status
+ * 1000 -> 10FF Synchronizer control and status
+ * 1F00 -> 1FFF Smart Antenna control and status
+ * 2100 -> 21FF VSB Equalizer control and status
+ * 2800 -> 28FF QAM Equalizer control and status
+ * 3000 -> 30FF FEC control and status
+ */
+
+enum lgdt3306a_lock_status {
+ LG3306_UNLOCK = 0x00,
+ LG3306_LOCK = 0x01,
+ LG3306_UNKNOWN_LOCK = 0xff
+};
+
+enum lgdt3306a_neverlock_status {
+ LG3306_NL_INIT = 0x00,
+ LG3306_NL_PROCESS = 0x01,
+ LG3306_NL_LOCK = 0x02,
+ LG3306_NL_FAIL = 0x03,
+ LG3306_NL_UNKNOWN = 0xff
+};
+
+enum lgdt3306a_modulation {
+ LG3306_VSB = 0x00,
+ LG3306_QAM64 = 0x01,
+ LG3306_QAM256 = 0x02,
+ LG3306_UNKNOWN_MODE = 0xff
+};
+
+enum lgdt3306a_lock_check {
+ LG3306_SYNC_LOCK,
+ LG3306_FEC_LOCK,
+ LG3306_TR_LOCK,
+ LG3306_AGC_LOCK,
+};
+
+
+#ifdef DBG_DUMP
+static void lgdt3306a_DumpAllRegs(struct lgdt3306a_state *state);
+static void lgdt3306a_DumpRegs(struct lgdt3306a_state *state);
+#endif
+
+
+static int lgdt3306a_write_reg(struct lgdt3306a_state *state, u16 reg, u8 val)
+{
+ int ret;
+ u8 buf[] = { reg >> 8, reg & 0xff, val };
+ struct i2c_msg msg = {
+ .addr = state->cfg->i2c_addr, .flags = 0,
+ .buf = buf, .len = 3,
+ };
+
+ dbg_reg("reg: 0x%04x, val: 0x%02x\n", reg, val);
+
+ ret = i2c_transfer(state->i2c_adap, &msg, 1);
+
+ if (ret != 1) {
+ pr_err("error (addr %02x %02x <- %02x, err = %i)\n",
+ msg.buf[0], msg.buf[1], msg.buf[2], ret);
+ if (ret < 0)
+ return ret;
+ else
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int lgdt3306a_read_reg(struct lgdt3306a_state *state, u16 reg, u8 *val)
+{
+ int ret;
+ u8 reg_buf[] = { reg >> 8, reg & 0xff };
+ struct i2c_msg msg[] = {
+ { .addr = state->cfg->i2c_addr,
+ .flags = 0, .buf = reg_buf, .len = 2 },
+ { .addr = state->cfg->i2c_addr,
+ .flags = I2C_M_RD, .buf = val, .len = 1 },
+ };
+
+ ret = i2c_transfer(state->i2c_adap, msg, 2);
+
+ if (ret != 2) {
+ pr_err("error (addr %02x reg %04x error (ret == %i)\n",
+ state->cfg->i2c_addr, reg, ret);
+ if (ret < 0)
+ return ret;
+ else
+ return -EREMOTEIO;
+ }
+ dbg_reg("reg: 0x%04x, val: 0x%02x\n", reg, *val);
+
+ return 0;
+}
+
+#define read_reg(state, reg) \
+({ \
+ u8 __val; \
+ int ret = lgdt3306a_read_reg(state, reg, &__val); \
+ if (lg_chkerr(ret)) \
+ __val = 0; \
+ __val; \
+})
+
+static int lgdt3306a_set_reg_bit(struct lgdt3306a_state *state,
+ u16 reg, int bit, int onoff)
+{
+ u8 val;
+ int ret;
+
+ dbg_reg("reg: 0x%04x, bit: %d, level: %d\n", reg, bit, onoff);
+
+ ret = lgdt3306a_read_reg(state, reg, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ val &= ~(1 << bit);
+ val |= (onoff & 1) << bit;
+
+ ret = lgdt3306a_write_reg(state, reg, val);
+ lg_chkerr(ret);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_soft_reset(struct lgdt3306a_state *state)
+{
+ int ret;
+
+ dbg_info("\n");
+
+ ret = lgdt3306a_set_reg_bit(state, 0x0000, 7, 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ msleep(20);
+ ret = lgdt3306a_set_reg_bit(state, 0x0000, 7, 1);
+ lg_chkerr(ret);
+
+fail:
+ return ret;
+}
+
+static int lgdt3306a_mpeg_mode(struct lgdt3306a_state *state,
+ enum lgdt3306a_mpeg_mode mode)
+{
+ u8 val;
+ int ret;
+
+ dbg_info("(%d)\n", mode);
+ /* transport packet format - TPSENB=0x80 */
+ ret = lgdt3306a_set_reg_bit(state, 0x0071, 7,
+ mode == LGDT3306A_MPEG_PARALLEL ? 1 : 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /*
+ * start of packet signal duration
+ * TPSSOPBITEN=0x40; 0=byte duration, 1=bit duration
+ */
+ ret = lgdt3306a_set_reg_bit(state, 0x0071, 6, 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_read_reg(state, 0x0070, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ val |= 0x10; /* TPCLKSUPB=0x10 */
+
+ if (mode == LGDT3306A_MPEG_PARALLEL)
+ val &= ~0x10;
+
+ ret = lgdt3306a_write_reg(state, 0x0070, val);
+ lg_chkerr(ret);
+
+fail:
+ return ret;
+}
+
+static int lgdt3306a_mpeg_mode_polarity(struct lgdt3306a_state *state,
+ enum lgdt3306a_tp_clock_edge edge,
+ enum lgdt3306a_tp_valid_polarity valid)
+{
+ u8 val;
+ int ret;
+
+ dbg_info("edge=%d, valid=%d\n", edge, valid);
+
+ ret = lgdt3306a_read_reg(state, 0x0070, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ val &= ~0x06; /* TPCLKPOL=0x04, TPVALPOL=0x02 */
+
+ if (edge == LGDT3306A_TPCLK_RISING_EDGE)
+ val |= 0x04;
+ if (valid == LGDT3306A_TP_VALID_HIGH)
+ val |= 0x02;
+
+ ret = lgdt3306a_write_reg(state, 0x0070, val);
+ lg_chkerr(ret);
+
+fail:
+ return ret;
+}
+
+static int lgdt3306a_mpeg_tristate(struct lgdt3306a_state *state,
+ int mode)
+{
+ u8 val;
+ int ret;
+
+ dbg_info("(%d)\n", mode);
+
+ if (mode) {
+ ret = lgdt3306a_read_reg(state, 0x0070, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+ /*
+ * Tristate bus; TPOUTEN=0x80, TPCLKOUTEN=0x20,
+ * TPDATAOUTEN=0x08
+ */
+ val &= ~0xa8;
+ ret = lgdt3306a_write_reg(state, 0x0070, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* AGCIFOUTENB=0x40; 1=Disable IFAGC pin */
+ ret = lgdt3306a_set_reg_bit(state, 0x0003, 6, 1);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ } else {
+ /* enable IFAGC pin */
+ ret = lgdt3306a_set_reg_bit(state, 0x0003, 6, 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_read_reg(state, 0x0070, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ val |= 0xa8; /* enable bus */
+ ret = lgdt3306a_write_reg(state, 0x0070, val);
+ if (lg_chkerr(ret))
+ goto fail;
+ }
+
+fail:
+ return ret;
+}
+
+static int lgdt3306a_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+
+ dbg_info("acquire=%d\n", acquire);
+
+ return lgdt3306a_mpeg_tristate(state, acquire ? 0 : 1);
+
+}
+
+static int lgdt3306a_power(struct lgdt3306a_state *state,
+ int mode)
+{
+ int ret;
+
+ dbg_info("(%d)\n", mode);
+
+ if (mode == 0) {
+ /* into reset */
+ ret = lgdt3306a_set_reg_bit(state, 0x0000, 7, 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* power down */
+ ret = lgdt3306a_set_reg_bit(state, 0x0000, 0, 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ } else {
+ /* out of reset */
+ ret = lgdt3306a_set_reg_bit(state, 0x0000, 7, 1);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* power up */
+ ret = lgdt3306a_set_reg_bit(state, 0x0000, 0, 1);
+ if (lg_chkerr(ret))
+ goto fail;
+ }
+
+#ifdef DBG_DUMP
+ lgdt3306a_DumpAllRegs(state);
+#endif
+fail:
+ return ret;
+}
+
+
+static int lgdt3306a_set_vsb(struct lgdt3306a_state *state)
+{
+ u8 val;
+ int ret;
+
+ dbg_info("\n");
+
+ /* 0. Spectrum inversion detection manual; spectrum inverted */
+ ret = lgdt3306a_read_reg(state, 0x0002, &val);
+ val &= 0xf7; /* SPECINVAUTO Off */
+ val |= 0x04; /* SPECINV On */
+ ret = lgdt3306a_write_reg(state, 0x0002, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 1. Selection of standard mode(0x08=QAM, 0x80=VSB) */
+ ret = lgdt3306a_write_reg(state, 0x0008, 0x80);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 2. Bandwidth mode for VSB(6MHz) */
+ ret = lgdt3306a_read_reg(state, 0x0009, &val);
+ val &= 0xe3;
+ val |= 0x0c; /* STDOPDETTMODE[2:0]=3 */
+ ret = lgdt3306a_write_reg(state, 0x0009, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 3. QAM mode detection mode(None) */
+ ret = lgdt3306a_read_reg(state, 0x0009, &val);
+ val &= 0xfc; /* STDOPDETCMODE[1:0]=0 */
+ ret = lgdt3306a_write_reg(state, 0x0009, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 4. ADC sampling frequency rate(2x sampling) */
+ ret = lgdt3306a_read_reg(state, 0x000d, &val);
+ val &= 0xbf; /* SAMPLING4XFEN=0 */
+ ret = lgdt3306a_write_reg(state, 0x000d, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+#if 0
+ /* FGR - disable any AICC filtering, testing only */
+
+ ret = lgdt3306a_write_reg(state, 0x0024, 0x00);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* AICCFIXFREQ0 NT N-1(Video rejection) */
+ ret = lgdt3306a_write_reg(state, 0x002e, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x002f, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x0030, 0x00);
+
+ /* AICCFIXFREQ1 NT N-1(Audio rejection) */
+ ret = lgdt3306a_write_reg(state, 0x002b, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x002c, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x002d, 0x00);
+
+ /* AICCFIXFREQ2 NT Co-Channel(Video rejection) */
+ ret = lgdt3306a_write_reg(state, 0x0028, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x0029, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x002a, 0x00);
+
+ /* AICCFIXFREQ3 NT Co-Channel(Audio rejection) */
+ ret = lgdt3306a_write_reg(state, 0x0025, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x0026, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x0027, 0x00);
+
+#else
+ /* FGR - this works well for HVR-1955,1975 */
+
+ /* 5. AICCOPMODE NT N-1 Adj. */
+ ret = lgdt3306a_write_reg(state, 0x0024, 0x5A);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* AICCFIXFREQ0 NT N-1(Video rejection) */
+ ret = lgdt3306a_write_reg(state, 0x002e, 0x5A);
+ ret = lgdt3306a_write_reg(state, 0x002f, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x0030, 0x00);
+
+ /* AICCFIXFREQ1 NT N-1(Audio rejection) */
+ ret = lgdt3306a_write_reg(state, 0x002b, 0x36);
+ ret = lgdt3306a_write_reg(state, 0x002c, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x002d, 0x00);
+
+ /* AICCFIXFREQ2 NT Co-Channel(Video rejection) */
+ ret = lgdt3306a_write_reg(state, 0x0028, 0x2A);
+ ret = lgdt3306a_write_reg(state, 0x0029, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x002a, 0x00);
+
+ /* AICCFIXFREQ3 NT Co-Channel(Audio rejection) */
+ ret = lgdt3306a_write_reg(state, 0x0025, 0x06);
+ ret = lgdt3306a_write_reg(state, 0x0026, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x0027, 0x00);
+#endif
+
+ ret = lgdt3306a_read_reg(state, 0x001e, &val);
+ val &= 0x0f;
+ val |= 0xa0;
+ ret = lgdt3306a_write_reg(state, 0x001e, val);
+
+ ret = lgdt3306a_write_reg(state, 0x0022, 0x08);
+
+ ret = lgdt3306a_write_reg(state, 0x0023, 0xFF);
+
+ ret = lgdt3306a_read_reg(state, 0x211f, &val);
+ val &= 0xef;
+ ret = lgdt3306a_write_reg(state, 0x211f, val);
+
+ ret = lgdt3306a_write_reg(state, 0x2173, 0x01);
+
+ ret = lgdt3306a_read_reg(state, 0x1061, &val);
+ val &= 0xf8;
+ val |= 0x04;
+ ret = lgdt3306a_write_reg(state, 0x1061, val);
+
+ ret = lgdt3306a_read_reg(state, 0x103d, &val);
+ val &= 0xcf;
+ ret = lgdt3306a_write_reg(state, 0x103d, val);
+
+ ret = lgdt3306a_write_reg(state, 0x2122, 0x40);
+
+ ret = lgdt3306a_read_reg(state, 0x2141, &val);
+ val &= 0x3f;
+ ret = lgdt3306a_write_reg(state, 0x2141, val);
+
+ ret = lgdt3306a_read_reg(state, 0x2135, &val);
+ val &= 0x0f;
+ val |= 0x70;
+ ret = lgdt3306a_write_reg(state, 0x2135, val);
+
+ ret = lgdt3306a_read_reg(state, 0x0003, &val);
+ val &= 0xf7;
+ ret = lgdt3306a_write_reg(state, 0x0003, val);
+
+ ret = lgdt3306a_read_reg(state, 0x001c, &val);
+ val &= 0x7f;
+ ret = lgdt3306a_write_reg(state, 0x001c, val);
+
+ /* 6. EQ step size */
+ ret = lgdt3306a_read_reg(state, 0x2179, &val);
+ val &= 0xf8;
+ ret = lgdt3306a_write_reg(state, 0x2179, val);
+
+ ret = lgdt3306a_read_reg(state, 0x217a, &val);
+ val &= 0xf8;
+ ret = lgdt3306a_write_reg(state, 0x217a, val);
+
+ /* 7. Reset */
+ ret = lgdt3306a_soft_reset(state);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ dbg_info("complete\n");
+fail:
+ return ret;
+}
+
+static int lgdt3306a_set_qam(struct lgdt3306a_state *state, int modulation)
+{
+ u8 val;
+ int ret;
+
+ dbg_info("modulation=%d\n", modulation);
+
+ /* 1. Selection of standard mode(0x08=QAM, 0x80=VSB) */
+ ret = lgdt3306a_write_reg(state, 0x0008, 0x08);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 1a. Spectrum inversion detection to Auto */
+ ret = lgdt3306a_read_reg(state, 0x0002, &val);
+ val &= 0xfb; /* SPECINV Off */
+ val |= 0x08; /* SPECINVAUTO On */
+ ret = lgdt3306a_write_reg(state, 0x0002, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 2. Bandwidth mode for QAM */
+ ret = lgdt3306a_read_reg(state, 0x0009, &val);
+ val &= 0xe3; /* STDOPDETTMODE[2:0]=0 VSB Off */
+ ret = lgdt3306a_write_reg(state, 0x0009, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 3. : 64QAM/256QAM detection(manual, auto) */
+ ret = lgdt3306a_read_reg(state, 0x0009, &val);
+ val &= 0xfc;
+ val |= 0x02; /* STDOPDETCMODE[1:0]=1=Manual 2=Auto */
+ ret = lgdt3306a_write_reg(state, 0x0009, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 3a. : 64QAM/256QAM selection for manual */
+ ret = lgdt3306a_read_reg(state, 0x101a, &val);
+ val &= 0xf8;
+ if (modulation == QAM_64)
+ val |= 0x02; /* QMDQMODE[2:0]=2=QAM64 */
+ else
+ val |= 0x04; /* QMDQMODE[2:0]=4=QAM256 */
+
+ ret = lgdt3306a_write_reg(state, 0x101a, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 4. ADC sampling frequency rate(4x sampling) */
+ ret = lgdt3306a_read_reg(state, 0x000d, &val);
+ val &= 0xbf;
+ val |= 0x40; /* SAMPLING4XFEN=1 */
+ ret = lgdt3306a_write_reg(state, 0x000d, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 5. No AICC operation in QAM mode */
+ ret = lgdt3306a_read_reg(state, 0x0024, &val);
+ val &= 0x00;
+ ret = lgdt3306a_write_reg(state, 0x0024, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 6. Reset */
+ ret = lgdt3306a_soft_reset(state);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ dbg_info("complete\n");
+fail:
+ return ret;
+}
+
+static int lgdt3306a_set_modulation(struct lgdt3306a_state *state,
+ struct dtv_frontend_properties *p)
+{
+ int ret;
+
+ dbg_info("\n");
+
+ switch (p->modulation) {
+ case VSB_8:
+ ret = lgdt3306a_set_vsb(state);
+ break;
+ case QAM_64:
+ ret = lgdt3306a_set_qam(state, QAM_64);
+ break;
+ case QAM_256:
+ ret = lgdt3306a_set_qam(state, QAM_256);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (lg_chkerr(ret))
+ goto fail;
+
+ state->current_modulation = p->modulation;
+
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_agc_setup(struct lgdt3306a_state *state,
+ struct dtv_frontend_properties *p)
+{
+ /* TODO: anything we want to do here??? */
+ dbg_info("\n");
+
+ switch (p->modulation) {
+ case VSB_8:
+ break;
+ case QAM_64:
+ case QAM_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_set_inversion(struct lgdt3306a_state *state,
+ int inversion)
+{
+ int ret;
+
+ dbg_info("(%d)\n", inversion);
+
+ ret = lgdt3306a_set_reg_bit(state, 0x0002, 2, inversion ? 1 : 0);
+ return ret;
+}
+
+static int lgdt3306a_set_inversion_auto(struct lgdt3306a_state *state,
+ int enabled)
+{
+ int ret;
+
+ dbg_info("(%d)\n", enabled);
+
+ /* 0=Manual 1=Auto(QAM only) - SPECINVAUTO=0x04 */
+ ret = lgdt3306a_set_reg_bit(state, 0x0002, 3, enabled);
+ return ret;
+}
+
+static int lgdt3306a_spectral_inversion(struct lgdt3306a_state *state,
+ struct dtv_frontend_properties *p,
+ int inversion)
+{
+ int ret = 0;
+
+ dbg_info("(%d)\n", inversion);
+#if 0
+ /*
+ * FGR - spectral_inversion defaults already set for VSB and QAM;
+ * can enable later if desired
+ */
+
+ ret = lgdt3306a_set_inversion(state, inversion);
+
+ switch (p->modulation) {
+ case VSB_8:
+ /* Manual only for VSB */
+ ret = lgdt3306a_set_inversion_auto(state, 0);
+ break;
+ case QAM_64:
+ case QAM_256:
+ /* Auto ok for QAM */
+ ret = lgdt3306a_set_inversion_auto(state, 1);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+#endif
+ return ret;
+}
+
+static int lgdt3306a_set_if(struct lgdt3306a_state *state,
+ struct dtv_frontend_properties *p)
+{
+ int ret;
+ u16 if_freq_khz;
+ u8 nco1, nco2;
+
+ switch (p->modulation) {
+ case VSB_8:
+ if_freq_khz = state->cfg->vsb_if_khz;
+ break;
+ case QAM_64:
+ case QAM_256:
+ if_freq_khz = state->cfg->qam_if_khz;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (if_freq_khz) {
+ default:
+ pr_warn("IF=%d KHz is not supportted, 3250 assumed\n",
+ if_freq_khz);
+ /* fallthrough */
+ case 3250: /* 3.25Mhz */
+ nco1 = 0x34;
+ nco2 = 0x00;
+ break;
+ case 3500: /* 3.50Mhz */
+ nco1 = 0x38;
+ nco2 = 0x00;
+ break;
+ case 4000: /* 4.00Mhz */
+ nco1 = 0x40;
+ nco2 = 0x00;
+ break;
+ case 5000: /* 5.00Mhz */
+ nco1 = 0x50;
+ nco2 = 0x00;
+ break;
+ case 5380: /* 5.38Mhz */
+ nco1 = 0x56;
+ nco2 = 0x14;
+ break;
+ }
+ ret = lgdt3306a_write_reg(state, 0x0010, nco1);
+ if (ret)
+ return ret;
+ ret = lgdt3306a_write_reg(state, 0x0011, nco2);
+ if (ret)
+ return ret;
+
+ dbg_info("if_freq=%d KHz->[%04x]\n", if_freq_khz, nco1<<8 | nco2);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+
+ if (state->cfg->deny_i2c_rptr) {
+ dbg_info("deny_i2c_rptr=%d\n", state->cfg->deny_i2c_rptr);
+ return 0;
+ }
+ dbg_info("(%d)\n", enable);
+
+ /* NI2CRPTEN=0x80 */
+ return lgdt3306a_set_reg_bit(state, 0x0002, 7, enable ? 0 : 1);
+}
+
+static int lgdt3306a_sleep(struct lgdt3306a_state *state)
+{
+ int ret;
+
+ dbg_info("\n");
+ state->current_frequency = -1; /* force re-tune, when we wake */
+
+ ret = lgdt3306a_mpeg_tristate(state, 1); /* disable data bus */
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_power(state, 0); /* power down */
+ lg_chkerr(ret);
+
+fail:
+ return 0;
+}
+
+static int lgdt3306a_fe_sleep(struct dvb_frontend *fe)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+
+ return lgdt3306a_sleep(state);
+}
+
+static int lgdt3306a_init(struct dvb_frontend *fe)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+ u8 val;
+ int ret;
+
+ dbg_info("\n");
+
+ /* 1. Normal operation mode */
+ ret = lgdt3306a_set_reg_bit(state, 0x0001, 0, 1); /* SIMFASTENB=0x01 */
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 2. Spectrum inversion auto detection (Not valid for VSB) */
+ ret = lgdt3306a_set_inversion_auto(state, 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 3. Spectrum inversion(According to the tuner configuration) */
+ ret = lgdt3306a_set_inversion(state, 1);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 4. Peak-to-peak voltage of ADC input signal */
+
+ /* ADCSEL1V=0x80=1Vpp; 0x00=2Vpp */
+ ret = lgdt3306a_set_reg_bit(state, 0x0004, 7, 1);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 5. ADC output data capture clock phase */
+
+ /* 0=same phase as ADC clock */
+ ret = lgdt3306a_set_reg_bit(state, 0x0004, 2, 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 5a. ADC sampling clock source */
+
+ /* ADCCLKPLLSEL=0x08; 0=use ext clock, not PLL */
+ ret = lgdt3306a_set_reg_bit(state, 0x0004, 3, 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 6. Automatic PLL set */
+
+ /* PLLSETAUTO=0x40; 0=off */
+ ret = lgdt3306a_set_reg_bit(state, 0x0005, 6, 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ if (state->cfg->xtalMHz == 24) { /* 24MHz */
+ /* 7. Frequency for PLL output(0x2564 for 192MHz for 24MHz) */
+ ret = lgdt3306a_read_reg(state, 0x0005, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+ val &= 0xc0;
+ val |= 0x25;
+ ret = lgdt3306a_write_reg(state, 0x0005, val);
+ if (lg_chkerr(ret))
+ goto fail;
+ ret = lgdt3306a_write_reg(state, 0x0006, 0x64);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 8. ADC sampling frequency(0x180000 for 24MHz sampling) */
+ ret = lgdt3306a_read_reg(state, 0x000d, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+ val &= 0xc0;
+ val |= 0x18;
+ ret = lgdt3306a_write_reg(state, 0x000d, val);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ } else if (state->cfg->xtalMHz == 25) { /* 25MHz */
+ /* 7. Frequency for PLL output */
+ ret = lgdt3306a_read_reg(state, 0x0005, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+ val &= 0xc0;
+ val |= 0x25;
+ ret = lgdt3306a_write_reg(state, 0x0005, val);
+ if (lg_chkerr(ret))
+ goto fail;
+ ret = lgdt3306a_write_reg(state, 0x0006, 0x64);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ /* 8. ADC sampling frequency(0x190000 for 25MHz sampling) */
+ ret = lgdt3306a_read_reg(state, 0x000d, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+ val &= 0xc0;
+ val |= 0x19;
+ ret = lgdt3306a_write_reg(state, 0x000d, val);
+ if (lg_chkerr(ret))
+ goto fail;
+ } else {
+ pr_err("Bad xtalMHz=%d\n", state->cfg->xtalMHz);
+ }
+#if 0
+ ret = lgdt3306a_write_reg(state, 0x000e, 0x00);
+ ret = lgdt3306a_write_reg(state, 0x000f, 0x00);
+#endif
+
+ /* 9. Center frequency of input signal of ADC */
+ ret = lgdt3306a_write_reg(state, 0x0010, 0x34); /* 3.25MHz */
+ ret = lgdt3306a_write_reg(state, 0x0011, 0x00);
+
+ /* 10. Fixed gain error value */
+ ret = lgdt3306a_write_reg(state, 0x0014, 0); /* gain error=0 */
+
+ /* 10a. VSB TR BW gear shift initial step */
+ ret = lgdt3306a_read_reg(state, 0x103c, &val);
+ val &= 0x0f;
+ val |= 0x20; /* SAMGSAUTOSTL_V[3:0] = 2 */
+ ret = lgdt3306a_write_reg(state, 0x103c, val);
+
+ /* 10b. Timing offset calibration in low temperature for VSB */
+ ret = lgdt3306a_read_reg(state, 0x103d, &val);
+ val &= 0xfc;
+ val |= 0x03;
+ ret = lgdt3306a_write_reg(state, 0x103d, val);
+
+ /* 10c. Timing offset calibration in low temperature for QAM */
+ ret = lgdt3306a_read_reg(state, 0x1036, &val);
+ val &= 0xf0;
+ val |= 0x0c;
+ ret = lgdt3306a_write_reg(state, 0x1036, val);
+
+ /* 11. Using the imaginary part of CIR in CIR loading */
+ ret = lgdt3306a_read_reg(state, 0x211f, &val);
+ val &= 0xef; /* do not use imaginary of CIR */
+ ret = lgdt3306a_write_reg(state, 0x211f, val);
+
+ /* 12. Control of no signal detector function */
+ ret = lgdt3306a_read_reg(state, 0x2849, &val);
+ val &= 0xef; /* NOUSENOSIGDET=0, enable no signal detector */
+ ret = lgdt3306a_write_reg(state, 0x2849, val);
+
+ /* FGR - put demod in some known mode */
+ ret = lgdt3306a_set_vsb(state);
+
+ /* 13. TP stream format */
+ ret = lgdt3306a_mpeg_mode(state, state->cfg->mpeg_mode);
+
+ /* 14. disable output buses */
+ ret = lgdt3306a_mpeg_tristate(state, 1);
+
+ /* 15. Sleep (in reset) */
+ ret = lgdt3306a_sleep(state);
+ lg_chkerr(ret);
+
+fail:
+ return ret;
+}
+
+static int lgdt3306a_set_parameters(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+ int ret;
+
+ dbg_info("(%d, %d)\n", p->frequency, p->modulation);
+
+ if (state->current_frequency == p->frequency &&
+ state->current_modulation == p->modulation) {
+ dbg_info(" (already set, skipping ...)\n");
+ return 0;
+ }
+ state->current_frequency = -1;
+ state->current_modulation = -1;
+
+ ret = lgdt3306a_power(state, 1); /* power up */
+ if (lg_chkerr(ret))
+ goto fail;
+
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+#if 0
+ if (lg_chkerr(ret))
+ goto fail;
+ state->current_frequency = p->frequency;
+#endif
+ }
+
+ ret = lgdt3306a_set_modulation(state, p);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_agc_setup(state, p);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_set_if(state, p);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_spectral_inversion(state, p,
+ state->cfg->spectral_inversion ? 1 : 0);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_mpeg_mode(state, state->cfg->mpeg_mode);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_mpeg_mode_polarity(state,
+ state->cfg->tpclk_edge,
+ state->cfg->tpvalid_polarity);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_mpeg_tristate(state, 0); /* enable data bus */
+ if (lg_chkerr(ret))
+ goto fail;
+
+ ret = lgdt3306a_soft_reset(state);
+ if (lg_chkerr(ret))
+ goto fail;
+
+#ifdef DBG_DUMP
+ lgdt3306a_DumpAllRegs(state);
+#endif
+ state->current_frequency = p->frequency;
+fail:
+ return ret;
+}
+
+static int lgdt3306a_get_frontend(struct dvb_frontend *fe)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ dbg_info("(%u, %d)\n",
+ state->current_frequency, state->current_modulation);
+
+ p->modulation = state->current_modulation;
+ p->frequency = state->current_frequency;
+ return 0;
+}
+
+static enum dvbfe_algo lgdt3306a_get_frontend_algo(struct dvb_frontend *fe)
+{
+#if 1
+ return DVBFE_ALGO_CUSTOM;
+#else
+ return DVBFE_ALGO_HW;
+#endif
+}
+
+/* ------------------------------------------------------------------------ */
+static int lgdt3306a_monitor_vsb(struct lgdt3306a_state *state)
+{
+ u8 val;
+ int ret;
+ u8 snrRef, maxPowerMan, nCombDet;
+ u16 fbDlyCir;
+
+ ret = lgdt3306a_read_reg(state, 0x21a1, &val);
+ if (ret)
+ return ret;
+ snrRef = val & 0x3f;
+
+ ret = lgdt3306a_read_reg(state, 0x2185, &maxPowerMan);
+ if (ret)
+ return ret;
+
+ ret = lgdt3306a_read_reg(state, 0x2191, &val);
+ if (ret)
+ return ret;
+ nCombDet = (val & 0x80) >> 7;
+
+ ret = lgdt3306a_read_reg(state, 0x2180, &val);
+ if (ret)
+ return ret;
+ fbDlyCir = (val & 0x03) << 8;
+
+ ret = lgdt3306a_read_reg(state, 0x2181, &val);
+ if (ret)
+ return ret;
+ fbDlyCir |= val;
+
+ dbg_info("snrRef=%d maxPowerMan=0x%x nCombDet=%d fbDlyCir=0x%x\n",
+ snrRef, maxPowerMan, nCombDet, fbDlyCir);
+
+ /* Carrier offset sub loop bandwidth */
+ ret = lgdt3306a_read_reg(state, 0x1061, &val);
+ if (ret)
+ return ret;
+ val &= 0xf8;
+ if ((snrRef > 18) && (maxPowerMan > 0x68)
+ && (nCombDet == 0x01)
+ && ((fbDlyCir == 0x03FF) || (fbDlyCir < 0x6C))) {
+ /* SNR is over 18dB and no ghosting */
+ val |= 0x00; /* final bandwidth = 0 */
+ } else {
+ val |= 0x04; /* final bandwidth = 4 */
+ }
+ ret = lgdt3306a_write_reg(state, 0x1061, val);
+ if (ret)
+ return ret;
+
+ /* Adjust Notch Filter */
+ ret = lgdt3306a_read_reg(state, 0x0024, &val);
+ if (ret)
+ return ret;
+ val &= 0x0f;
+ if (nCombDet == 0) { /* Turn on the Notch Filter */
+ val |= 0x50;
+ }
+ ret = lgdt3306a_write_reg(state, 0x0024, val);
+ if (ret)
+ return ret;
+
+ /* VSB Timing Recovery output normalization */
+ ret = lgdt3306a_read_reg(state, 0x103d, &val);
+ if (ret)
+ return ret;
+ val &= 0xcf;
+ val |= 0x20;
+ ret = lgdt3306a_write_reg(state, 0x103d, val);
+
+ return ret;
+}
+
+static enum lgdt3306a_modulation
+lgdt3306a_check_oper_mode(struct lgdt3306a_state *state)
+{
+ u8 val = 0;
+ int ret;
+
+ ret = lgdt3306a_read_reg(state, 0x0081, &val);
+ if (ret)
+ goto err;
+
+ if (val & 0x80) {
+ dbg_info("VSB\n");
+ return LG3306_VSB;
+ }
+ if (val & 0x08) {
+ ret = lgdt3306a_read_reg(state, 0x00a6, &val);
+ if (ret)
+ goto err;
+ val = val >> 2;
+ if (val & 0x01) {
+ dbg_info("QAM256\n");
+ return LG3306_QAM256;
+ }
+ dbg_info("QAM64\n");
+ return LG3306_QAM64;
+ }
+err:
+ pr_warn("UNKNOWN\n");
+ return LG3306_UNKNOWN_MODE;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_check_lock_status(struct lgdt3306a_state *state,
+ enum lgdt3306a_lock_check whatLock)
+{
+ u8 val = 0;
+ int ret;
+ enum lgdt3306a_modulation modeOper;
+ enum lgdt3306a_lock_status lockStatus;
+
+ modeOper = LG3306_UNKNOWN_MODE;
+
+ switch (whatLock) {
+ case LG3306_SYNC_LOCK:
+ {
+ ret = lgdt3306a_read_reg(state, 0x00a6, &val);
+ if (ret)
+ return ret;
+
+ if ((val & 0x80) == 0x80)
+ lockStatus = LG3306_LOCK;
+ else
+ lockStatus = LG3306_UNLOCK;
+
+ dbg_info("SYNC_LOCK=%x\n", lockStatus);
+ break;
+ }
+ case LG3306_AGC_LOCK:
+ {
+ ret = lgdt3306a_read_reg(state, 0x0080, &val);
+ if (ret)
+ return ret;
+
+ if ((val & 0x40) == 0x40)
+ lockStatus = LG3306_LOCK;
+ else
+ lockStatus = LG3306_UNLOCK;
+
+ dbg_info("AGC_LOCK=%x\n", lockStatus);
+ break;
+ }
+ case LG3306_TR_LOCK:
+ {
+ modeOper = lgdt3306a_check_oper_mode(state);
+ if ((modeOper == LG3306_QAM64) || (modeOper == LG3306_QAM256)) {
+ ret = lgdt3306a_read_reg(state, 0x1094, &val);
+ if (ret)
+ return ret;
+
+ if ((val & 0x80) == 0x80)
+ lockStatus = LG3306_LOCK;
+ else
+ lockStatus = LG3306_UNLOCK;
+ } else
+ lockStatus = LG3306_UNKNOWN_LOCK;
+
+ dbg_info("TR_LOCK=%x\n", lockStatus);
+ break;
+ }
+ case LG3306_FEC_LOCK:
+ {
+ modeOper = lgdt3306a_check_oper_mode(state);
+ if ((modeOper == LG3306_QAM64) || (modeOper == LG3306_QAM256)) {
+ ret = lgdt3306a_read_reg(state, 0x0080, &val);
+ if (ret)
+ return ret;
+
+ if ((val & 0x10) == 0x10)
+ lockStatus = LG3306_LOCK;
+ else
+ lockStatus = LG3306_UNLOCK;
+ } else
+ lockStatus = LG3306_UNKNOWN_LOCK;
+
+ dbg_info("FEC_LOCK=%x\n", lockStatus);
+ break;
+ }
+
+ default:
+ lockStatus = LG3306_UNKNOWN_LOCK;
+ pr_warn("UNKNOWN whatLock=%d\n", whatLock);
+ break;
+ }
+
+ return lockStatus;
+}
+
+static enum lgdt3306a_neverlock_status
+lgdt3306a_check_neverlock_status(struct lgdt3306a_state *state)
+{
+ u8 val = 0;
+ int ret;
+ enum lgdt3306a_neverlock_status lockStatus;
+
+ ret = lgdt3306a_read_reg(state, 0x0080, &val);
+ if (ret)
+ return ret;
+ lockStatus = (enum lgdt3306a_neverlock_status)(val & 0x03);
+
+ dbg_info("NeverLock=%d", lockStatus);
+
+ return lockStatus;
+}
+
+static int lgdt3306a_pre_monitoring(struct lgdt3306a_state *state)
+{
+ u8 val = 0;
+ int ret;
+ u8 currChDiffACQ, snrRef, mainStrong, aiccrejStatus;
+
+ /* Channel variation */
+ ret = lgdt3306a_read_reg(state, 0x21bc, &currChDiffACQ);
+ if (ret)
+ return ret;
+
+ /* SNR of Frame sync */
+ ret = lgdt3306a_read_reg(state, 0x21a1, &val);
+ if (ret)
+ return ret;
+ snrRef = val & 0x3f;
+
+ /* Strong Main CIR */
+ ret = lgdt3306a_read_reg(state, 0x2199, &val);
+ if (ret)
+ return ret;
+ mainStrong = (val & 0x40) >> 6;
+
+ ret = lgdt3306a_read_reg(state, 0x0090, &val);
+ if (ret)
+ return ret;
+ aiccrejStatus = (val & 0xf0) >> 4;
+
+ dbg_info("snrRef=%d mainStrong=%d aiccrejStatus=%d currChDiffACQ=0x%x\n",
+ snrRef, mainStrong, aiccrejStatus, currChDiffACQ);
+
+#if 0
+ /* Dynamic ghost exists */
+ if ((mainStrong == 0) && (currChDiffACQ > 0x70))
+#endif
+ if (mainStrong == 0) {
+ ret = lgdt3306a_read_reg(state, 0x2135, &val);
+ if (ret)
+ return ret;
+ val &= 0x0f;
+ val |= 0xa0;
+ ret = lgdt3306a_write_reg(state, 0x2135, val);
+ if (ret)
+ return ret;
+
+ ret = lgdt3306a_read_reg(state, 0x2141, &val);
+ if (ret)
+ return ret;
+ val &= 0x3f;
+ val |= 0x80;
+ ret = lgdt3306a_write_reg(state, 0x2141, val);
+ if (ret)
+ return ret;
+
+ ret = lgdt3306a_write_reg(state, 0x2122, 0x70);
+ if (ret)
+ return ret;
+ } else { /* Weak ghost or static channel */
+ ret = lgdt3306a_read_reg(state, 0x2135, &val);
+ if (ret)
+ return ret;
+ val &= 0x0f;
+ val |= 0x70;
+ ret = lgdt3306a_write_reg(state, 0x2135, val);
+ if (ret)
+ return ret;
+
+ ret = lgdt3306a_read_reg(state, 0x2141, &val);
+ if (ret)
+ return ret;
+ val &= 0x3f;
+ val |= 0x40;
+ ret = lgdt3306a_write_reg(state, 0x2141, val);
+ if (ret)
+ return ret;
+
+ ret = lgdt3306a_write_reg(state, 0x2122, 0x40);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_sync_lock_poll(struct lgdt3306a_state *state)
+{
+ enum lgdt3306a_lock_status syncLockStatus = LG3306_UNLOCK;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ msleep(30);
+
+ syncLockStatus = lgdt3306a_check_lock_status(state,
+ LG3306_SYNC_LOCK);
+
+ if (syncLockStatus == LG3306_LOCK) {
+ dbg_info("locked(%d)\n", i);
+ return LG3306_LOCK;
+ }
+ }
+ dbg_info("not locked\n");
+ return LG3306_UNLOCK;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_fec_lock_poll(struct lgdt3306a_state *state)
+{
+ enum lgdt3306a_lock_status FECLockStatus = LG3306_UNLOCK;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ msleep(30);
+
+ FECLockStatus = lgdt3306a_check_lock_status(state,
+ LG3306_FEC_LOCK);
+
+ if (FECLockStatus == LG3306_LOCK) {
+ dbg_info("locked(%d)\n", i);
+ return FECLockStatus;
+ }
+ }
+ dbg_info("not locked\n");
+ return FECLockStatus;
+}
+
+static enum lgdt3306a_neverlock_status
+lgdt3306a_neverlock_poll(struct lgdt3306a_state *state)
+{
+ enum lgdt3306a_neverlock_status NLLockStatus = LG3306_NL_FAIL;
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ msleep(30);
+
+ NLLockStatus = lgdt3306a_check_neverlock_status(state);
+
+ if (NLLockStatus == LG3306_NL_LOCK) {
+ dbg_info("NL_LOCK(%d)\n", i);
+ return NLLockStatus;
+ }
+ }
+ dbg_info("NLLockStatus=%d\n", NLLockStatus);
+ return NLLockStatus;
+}
+
+static u8 lgdt3306a_get_packet_error(struct lgdt3306a_state *state)
+{
+ u8 val;
+ int ret;
+
+ ret = lgdt3306a_read_reg(state, 0x00fa, &val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static const u32 valx_x10[] = {
+ 10, 11, 13, 15, 17, 20, 25, 33, 41, 50, 59, 73, 87, 100
+};
+static const u32 log10x_x1000[] = {
+ 0, 41, 114, 176, 230, 301, 398, 518, 613, 699, 771, 863, 939, 1000
+};
+
+static u32 log10_x1000(u32 x)
+{
+ u32 diff_val, step_val, step_log10;
+ u32 log_val = 0;
+ u32 i;
+
+ if (x <= 0)
+ return -1000000; /* signal error */
+
+ if (x == 10)
+ return 0; /* log(1)=0 */
+
+ if (x < 10) {
+ while (x < 10) {
+ x = x * 10;
+ log_val--;
+ }
+ } else { /* x > 10 */
+ while (x >= 100) {
+ x = x / 10;
+ log_val++;
+ }
+ }
+ log_val *= 1000;
+
+ if (x == 10) /* was our input an exact multiple of 10 */
+ return log_val; /* don't need to interpolate */
+
+ /* find our place on the log curve */
+ for (i = 1; i < ARRAY_SIZE(valx_x10); i++) {
+ if (valx_x10[i] >= x)
+ break;
+ }
+ if (i == ARRAY_SIZE(valx_x10))
+ return log_val + log10x_x1000[i - 1];
+
+ diff_val = x - valx_x10[i-1];
+ step_val = valx_x10[i] - valx_x10[i - 1];
+ step_log10 = log10x_x1000[i] - log10x_x1000[i - 1];
+
+ /* do a linear interpolation to get in-between values */
+ return log_val + log10x_x1000[i - 1] +
+ ((diff_val*step_log10) / step_val);
+}
+
+static u32 lgdt3306a_calculate_snr_x100(struct lgdt3306a_state *state)
+{
+ u32 mse; /* Mean-Square Error */
+ u32 pwr; /* Constelation power */
+ u32 snr_x100;
+
+ mse = (read_reg(state, 0x00ec) << 8) |
+ (read_reg(state, 0x00ed));
+ pwr = (read_reg(state, 0x00e8) << 8) |
+ (read_reg(state, 0x00e9));
+
+ if (mse == 0) /* no signal */
+ return 0;
+
+ snr_x100 = log10_x1000((pwr * 10000) / mse) - 3000;
+ dbg_info("mse=%u, pwr=%u, snr_x100=%d\n", mse, pwr, snr_x100);
+
+ return snr_x100;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_vsb_lock_poll(struct lgdt3306a_state *state)
+{
+ int ret;
+ u8 cnt = 0;
+ u8 packet_error;
+ u32 snr;
+
+ for (cnt = 0; cnt < 10; cnt++) {
+ if (lgdt3306a_sync_lock_poll(state) == LG3306_UNLOCK) {
+ dbg_info("no sync lock!\n");
+ return LG3306_UNLOCK;
+ }
+
+ msleep(20);
+ ret = lgdt3306a_pre_monitoring(state);
+ if (ret)
+ break;
+
+ packet_error = lgdt3306a_get_packet_error(state);
+ snr = lgdt3306a_calculate_snr_x100(state);
+ dbg_info("cnt=%d errors=%d snr=%d\n", cnt, packet_error, snr);
+
+ if ((snr >= 1500) && (packet_error < 0xff))
+ return LG3306_LOCK;
+ }
+
+ dbg_info("not locked!\n");
+ return LG3306_UNLOCK;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_qam_lock_poll(struct lgdt3306a_state *state)
+{
+ u8 cnt;
+ u8 packet_error;
+ u32 snr;
+
+ for (cnt = 0; cnt < 10; cnt++) {
+ if (lgdt3306a_fec_lock_poll(state) == LG3306_UNLOCK) {
+ dbg_info("no fec lock!\n");
+ return LG3306_UNLOCK;
+ }
+
+ msleep(20);
+
+ packet_error = lgdt3306a_get_packet_error(state);
+ snr = lgdt3306a_calculate_snr_x100(state);
+ dbg_info("cnt=%d errors=%d snr=%d\n", cnt, packet_error, snr);
+
+ if ((snr >= 1500) && (packet_error < 0xff))
+ return LG3306_LOCK;
+ }
+
+ dbg_info("not locked!\n");
+ return LG3306_UNLOCK;
+}
+
+static int lgdt3306a_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+ u16 strength = 0;
+ int ret = 0;
+
+ if (fe->ops.tuner_ops.get_rf_strength) {
+ ret = fe->ops.tuner_ops.get_rf_strength(fe, &strength);
+ if (ret == 0)
+ dbg_info("strength=%d\n", strength);
+ else
+ dbg_info("fe->ops.tuner_ops.get_rf_strength() failed\n");
+ }
+
+ *status = 0;
+ if (lgdt3306a_neverlock_poll(state) == LG3306_NL_LOCK) {
+ *status |= FE_HAS_SIGNAL;
+ *status |= FE_HAS_CARRIER;
+
+ switch (state->current_modulation) {
+ case QAM_256:
+ case QAM_64:
+ if (lgdt3306a_qam_lock_poll(state) == LG3306_LOCK) {
+ *status |= FE_HAS_VITERBI;
+ *status |= FE_HAS_SYNC;
+
+ *status |= FE_HAS_LOCK;
+ }
+ break;
+ case VSB_8:
+ if (lgdt3306a_vsb_lock_poll(state) == LG3306_LOCK) {
+ *status |= FE_HAS_VITERBI;
+ *status |= FE_HAS_SYNC;
+
+ *status |= FE_HAS_LOCK;
+
+ ret = lgdt3306a_monitor_vsb(state);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+ return ret;
+}
+
+
+static int lgdt3306a_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+
+ state->snr = lgdt3306a_calculate_snr_x100(state);
+ /* report SNR in dB * 10 */
+ *snr = state->snr/10;
+
+ return 0;
+}
+
+static int lgdt3306a_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ /*
+ * Calculate some sort of "strength" from SNR
+ */
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+ u16 snr; /* snr_x10 */
+ int ret;
+ u32 ref_snr; /* snr*100 */
+ u32 str;
+
+ *strength = 0;
+
+ switch (state->current_modulation) {
+ case VSB_8:
+ ref_snr = 1600; /* 16dB */
+ break;
+ case QAM_64:
+ ref_snr = 2200; /* 22dB */
+ break;
+ case QAM_256:
+ ref_snr = 2800; /* 28dB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = fe->ops.read_snr(fe, &snr);
+ if (lg_chkerr(ret))
+ goto fail;
+
+ if (state->snr <= (ref_snr - 100))
+ str = 0;
+ else if (state->snr <= ref_snr)
+ str = (0xffff * 65) / 100; /* 65% */
+ else {
+ str = state->snr - ref_snr;
+ str /= 50;
+ str += 78; /* 78%-100% */
+ if (str > 100)
+ str = 100;
+ str = (0xffff * str) / 100;
+ }
+ *strength = (u16)str;
+ dbg_info("strength=%u\n", *strength);
+
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+ u32 tmp;
+
+ *ber = 0;
+#if 1
+ /* FGR - FIXME - I don't know what value is expected by dvb_core
+ * what is the scale of the value?? */
+ tmp = read_reg(state, 0x00fc); /* NBERVALUE[24-31] */
+ tmp = (tmp << 8) | read_reg(state, 0x00fd); /* NBERVALUE[16-23] */
+ tmp = (tmp << 8) | read_reg(state, 0x00fe); /* NBERVALUE[8-15] */
+ tmp = (tmp << 8) | read_reg(state, 0x00ff); /* NBERVALUE[0-7] */
+ *ber = tmp;
+ dbg_info("ber=%u\n", tmp);
+#endif
+ return 0;
+}
+
+static int lgdt3306a_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+
+ *ucblocks = 0;
+#if 1
+ /* FGR - FIXME - I don't know what value is expected by dvb_core
+ * what happens when value wraps? */
+ *ucblocks = read_reg(state, 0x00f4); /* TPIFTPERRCNT[0-7] */
+ dbg_info("ucblocks=%u\n", *ucblocks);
+#endif
+
+ return 0;
+}
+
+static int lgdt3306a_tune(struct dvb_frontend *fe, bool re_tune,
+ unsigned int mode_flags, unsigned int *delay,
+ fe_status_t *status)
+{
+ int ret = 0;
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+
+ dbg_info("re_tune=%u\n", re_tune);
+
+ if (re_tune) {
+ state->current_frequency = -1; /* force re-tune */
+ ret = lgdt3306a_set_parameters(fe);
+ if (ret != 0)
+ return ret;
+ }
+ *delay = 125;
+ ret = lgdt3306a_read_status(fe, status);
+
+ return ret;
+}
+
+static int lgdt3306a_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings
+ *fe_tune_settings)
+{
+ fe_tune_settings->min_delay_ms = 100;
+ dbg_info("\n");
+ return 0;
+}
+
+static int lgdt3306a_search(struct dvb_frontend *fe)
+{
+ fe_status_t status = 0;
+ int i, ret;
+
+ /* set frontend */
+ ret = lgdt3306a_set_parameters(fe);
+ if (ret)
+ goto error;
+
+ /* wait frontend lock */
+ for (i = 20; i > 0; i--) {
+ dbg_info(": loop=%d\n", i);
+ msleep(50);
+ ret = lgdt3306a_read_status(fe, &status);
+ if (ret)
+ goto error;
+
+ if (status & FE_HAS_LOCK)
+ break;
+ }
+
+ /* check if we have a valid signal */
+ if (status & FE_HAS_LOCK)
+ return DVBFE_ALGO_SEARCH_SUCCESS;
+ else
+ return DVBFE_ALGO_SEARCH_AGAIN;
+
+error:
+ dbg_info("failed (%d)\n", ret);
+ return DVBFE_ALGO_SEARCH_ERROR;
+}
+
+static void lgdt3306a_release(struct dvb_frontend *fe)
+{
+ struct lgdt3306a_state *state = fe->demodulator_priv;
+
+ dbg_info("\n");
+ kfree(state);
+}
+
+static struct dvb_frontend_ops lgdt3306a_ops;
+
+struct dvb_frontend *lgdt3306a_attach(const struct lgdt3306a_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ struct lgdt3306a_state *state = NULL;
+ int ret;
+ u8 val;
+
+ dbg_info("(%d-%04x)\n",
+ i2c_adap ? i2c_adapter_id(i2c_adap) : 0,
+ config ? config->i2c_addr : 0);
+
+ state = kzalloc(sizeof(struct lgdt3306a_state), GFP_KERNEL);
+ if (state == NULL)
+ goto fail;
+
+ state->cfg = config;
+ state->i2c_adap = i2c_adap;
+
+ memcpy(&state->frontend.ops, &lgdt3306a_ops,
+ sizeof(struct dvb_frontend_ops));
+ state->frontend.demodulator_priv = state;
+
+ /* verify that we're talking to a lg3306a */
+ /* FGR - NOTE - there is no obvious ChipId to check; we check
+ * some "known" bits after reset, but it's still just a guess */
+ ret = lgdt3306a_read_reg(state, 0x0000, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+ if ((val & 0x74) != 0x74) {
+ pr_warn("expected 0x74, got 0x%x\n", (val & 0x74));
+#if 0
+ /* FIXME - re-enable when we know this is right */
+ goto fail;
+#endif
+ }
+ ret = lgdt3306a_read_reg(state, 0x0001, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+ if ((val & 0xf6) != 0xc6) {
+ pr_warn("expected 0xc6, got 0x%x\n", (val & 0xf6));
+#if 0
+ /* FIXME - re-enable when we know this is right */
+ goto fail;
+#endif
+ }
+ ret = lgdt3306a_read_reg(state, 0x0002, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+ if ((val & 0x73) != 0x03) {
+ pr_warn("expected 0x03, got 0x%x\n", (val & 0x73));
+#if 0
+ /* FIXME - re-enable when we know this is right */
+ goto fail;
+#endif
+ }
+
+ state->current_frequency = -1;
+ state->current_modulation = -1;
+
+ lgdt3306a_sleep(state);
+
+ return &state->frontend;
+
+fail:
+ pr_warn("unable to detect LGDT3306A hardware\n");
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(lgdt3306a_attach);
+
+#ifdef DBG_DUMP
+
+static const short regtab[] = {
+ 0x0000, /* SOFTRSTB 1'b1 1'b1 1'b1 ADCPDB 1'b1 PLLPDB GBBPDB 11111111 */
+ 0x0001, /* 1'b1 1'b1 1'b0 1'b0 AUTORPTRS */
+ 0x0002, /* NI2CRPTEN 1'b0 1'b0 1'b0 SPECINVAUT */
+ 0x0003, /* AGCRFOUT */
+ 0x0004, /* ADCSEL1V ADCCNT ADCCNF ADCCNS ADCCLKPLL */
+ 0x0005, /* PLLINDIVSE */
+ 0x0006, /* PLLCTRL[7:0] 11100001 */
+ 0x0007, /* SYSINITWAITTIME[7:0] (msec) 00001000 */
+ 0x0008, /* STDOPMODE[7:0] 10000000 */
+ 0x0009, /* 1'b0 1'b0 1'b0 STDOPDETTMODE[2:0] STDOPDETCMODE[1:0] 00011110 */
+ 0x000a, /* DAFTEN 1'b1 x x SCSYSLOCK */
+ 0x000b, /* SCSYSLOCKCHKTIME[7:0] (10msec) 01100100 */
+ 0x000d, /* x SAMPLING4 */
+ 0x000e, /* SAMFREQ[15:8] 00000000 */
+ 0x000f, /* SAMFREQ[7:0] 00000000 */
+ 0x0010, /* IFFREQ[15:8] 01100000 */
+ 0x0011, /* IFFREQ[7:0] 00000000 */
+ 0x0012, /* AGCEN AGCREFMO */
+ 0x0013, /* AGCRFFIXB AGCIFFIXB AGCLOCKDETRNGSEL[1:0] 1'b1 1'b0 1'b0 1'b0 11101000 */
+ 0x0014, /* AGCFIXVALUE[7:0] 01111111 */
+ 0x0015, /* AGCREF[15:8] 00001010 */
+ 0x0016, /* AGCREF[7:0] 11100100 */
+ 0x0017, /* AGCDELAY[7:0] 00100000 */
+ 0x0018, /* AGCRFBW[3:0] AGCIFBW[3:0] 10001000 */
+ 0x0019, /* AGCUDOUTMODE[1:0] AGCUDCTRLLEN[1:0] AGCUDCTRL */
+ 0x001c, /* 1'b1 PFEN MFEN AICCVSYNC */
+ 0x001d, /* 1'b0 1'b1 1'b0 1'b1 AICCVSYNC */
+ 0x001e, /* AICCALPHA[3:0] 1'b1 1'b0 1'b1 1'b0 01111010 */
+ 0x001f, /* AICCDETTH[19:16] AICCOFFTH[19:16] 00000000 */
+ 0x0020, /* AICCDETTH[15:8] 01111100 */
+ 0x0021, /* AICCDETTH[7:0] 00000000 */
+ 0x0022, /* AICCOFFTH[15:8] 00000101 */
+ 0x0023, /* AICCOFFTH[7:0] 11100000 */
+ 0x0024, /* AICCOPMODE3[1:0] AICCOPMODE2[1:0] AICCOPMODE1[1:0] AICCOPMODE0[1:0] 00000000 */
+ 0x0025, /* AICCFIXFREQ3[23:16] 00000000 */
+ 0x0026, /* AICCFIXFREQ3[15:8] 00000000 */
+ 0x0027, /* AICCFIXFREQ3[7:0] 00000000 */
+ 0x0028, /* AICCFIXFREQ2[23:16] 00000000 */
+ 0x0029, /* AICCFIXFREQ2[15:8] 00000000 */
+ 0x002a, /* AICCFIXFREQ2[7:0] 00000000 */
+ 0x002b, /* AICCFIXFREQ1[23:16] 00000000 */
+ 0x002c, /* AICCFIXFREQ1[15:8] 00000000 */
+ 0x002d, /* AICCFIXFREQ1[7:0] 00000000 */
+ 0x002e, /* AICCFIXFREQ0[23:16] 00000000 */
+ 0x002f, /* AICCFIXFREQ0[15:8] 00000000 */
+ 0x0030, /* AICCFIXFREQ0[7:0] 00000000 */
+ 0x0031, /* 1'b0 1'b1 1'b0 1'b0 x DAGC1STER */
+ 0x0032, /* DAGC1STEN DAGC1STER */
+ 0x0033, /* DAGC1STREF[15:8] 00001010 */
+ 0x0034, /* DAGC1STREF[7:0] 11100100 */
+ 0x0035, /* DAGC2NDE */
+ 0x0036, /* DAGC2NDREF[15:8] 00001010 */
+ 0x0037, /* DAGC2NDREF[7:0] 10000000 */
+ 0x0038, /* DAGC2NDLOCKDETRNGSEL[1:0] */
+ 0x003d, /* 1'b1 SAMGEARS */
+ 0x0040, /* SAMLFGMA */
+ 0x0041, /* SAMLFBWM */
+ 0x0044, /* 1'b1 CRGEARSHE */
+ 0x0045, /* CRLFGMAN */
+ 0x0046, /* CFLFBWMA */
+ 0x0047, /* CRLFGMAN */
+ 0x0048, /* x x x x CRLFGSTEP_VS[3:0] xxxx1001 */
+ 0x0049, /* CRLFBWMA */
+ 0x004a, /* CRLFBWMA */
+ 0x0050, /* 1'b0 1'b1 1'b1 1'b0 MSECALCDA */
+ 0x0070, /* TPOUTEN TPIFEN TPCLKOUTE */
+ 0x0071, /* TPSENB TPSSOPBITE */
+ 0x0073, /* TP47HINS x x CHBERINT PERMODE[1:0] PERINT[1:0] 1xx11100 */
+ 0x0075, /* x x x x x IQSWAPCTRL[2:0] xxxxx000 */
+ 0x0076, /* NBERCON NBERST NBERPOL NBERWSYN */
+ 0x0077, /* x NBERLOSTTH[2:0] NBERACQTH[3:0] x0000000 */
+ 0x0078, /* NBERPOLY[31:24] 00000000 */
+ 0x0079, /* NBERPOLY[23:16] 00000000 */
+ 0x007a, /* NBERPOLY[15:8] 00000000 */
+ 0x007b, /* NBERPOLY[7:0] 00000000 */
+ 0x007c, /* NBERPED[31:24] 00000000 */
+ 0x007d, /* NBERPED[23:16] 00000000 */
+ 0x007e, /* NBERPED[15:8] 00000000 */
+ 0x007f, /* NBERPED[7:0] 00000000 */
+ 0x0080, /* x AGCLOCK DAGCLOCK SYSLOCK x x NEVERLOCK[1:0] */
+ 0x0085, /* SPECINVST */
+ 0x0088, /* SYSLOCKTIME[15:8] */
+ 0x0089, /* SYSLOCKTIME[7:0] */
+ 0x008c, /* FECLOCKTIME[15:8] */
+ 0x008d, /* FECLOCKTIME[7:0] */
+ 0x008e, /* AGCACCOUT[15:8] */
+ 0x008f, /* AGCACCOUT[7:0] */
+ 0x0090, /* AICCREJSTATUS[3:0] AICCREJBUSY[3:0] */
+ 0x0091, /* AICCVSYNC */
+ 0x009c, /* CARRFREQOFFSET[15:8] */
+ 0x009d, /* CARRFREQOFFSET[7:0] */
+ 0x00a1, /* SAMFREQOFFSET[23:16] */
+ 0x00a2, /* SAMFREQOFFSET[15:8] */
+ 0x00a3, /* SAMFREQOFFSET[7:0] */
+ 0x00a6, /* SYNCLOCK SYNCLOCKH */
+#if 0 /* covered elsewhere */
+ 0x00e8, /* CONSTPWR[15:8] */
+ 0x00e9, /* CONSTPWR[7:0] */
+ 0x00ea, /* BMSE[15:8] */
+ 0x00eb, /* BMSE[7:0] */
+ 0x00ec, /* MSE[15:8] */
+ 0x00ed, /* MSE[7:0] */
+ 0x00ee, /* CONSTI[7:0] */
+ 0x00ef, /* CONSTQ[7:0] */
+#endif
+ 0x00f4, /* TPIFTPERRCNT[7:0] */
+ 0x00f5, /* TPCORREC */
+ 0x00f6, /* VBBER[15:8] */
+ 0x00f7, /* VBBER[7:0] */
+ 0x00f8, /* VABER[15:8] */
+ 0x00f9, /* VABER[7:0] */
+ 0x00fa, /* TPERRCNT[7:0] */
+ 0x00fb, /* NBERLOCK x x x x x x x */
+ 0x00fc, /* NBERVALUE[31:24] */
+ 0x00fd, /* NBERVALUE[23:16] */
+ 0x00fe, /* NBERVALUE[15:8] */
+ 0x00ff, /* NBERVALUE[7:0] */
+ 0x1000, /* 1'b0 WODAGCOU */
+ 0x1005, /* x x 1'b1 1'b1 x SRD_Q_QM */
+ 0x1009, /* SRDWAITTIME[7:0] (10msec) 00100011 */
+ 0x100a, /* SRDWAITTIME_CQS[7:0] (msec) 01100100 */
+ 0x101a, /* x 1'b1 1'b0 1'b0 x QMDQAMMODE[2:0] x100x010 */
+ 0x1036, /* 1'b0 1'b1 1'b0 1'b0 SAMGSEND_CQS[3:0] 01001110 */
+ 0x103c, /* SAMGSAUTOSTL_V[3:0] SAMGSAUTOEDL_V[3:0] 01000110 */
+ 0x103d, /* 1'b1 1'b1 SAMCNORMBP_V[1:0] 1'b0 1'b0 SAMMODESEL_V[1:0] 11100001 */
+ 0x103f, /* SAMZTEDSE */
+ 0x105d, /* EQSTATUSE */
+ 0x105f, /* x PMAPG2_V[2:0] x DMAPG2_V[2:0] x001x011 */
+ 0x1060, /* 1'b1 EQSTATUSE */
+ 0x1061, /* CRMAPBWSTL_V[3:0] CRMAPBWEDL_V[3:0] 00000100 */
+ 0x1065, /* 1'b0 x CRMODE_V[1:0] 1'b1 x 1'b1 x 0x111x1x */
+ 0x1066, /* 1'b0 1'b0 1'b1 1'b0 1'b1 PNBOOSTSE */
+ 0x1068, /* CREPHNGAIN2_V[3:0] CREPHNPBW_V[3:0] 10010001 */
+ 0x106e, /* x x x x x CREPHNEN_ */
+ 0x106f, /* CREPHNTH_V[7:0] 00010101 */
+ 0x1072, /* CRSWEEPN */
+ 0x1073, /* CRPGAIN_V[3:0] x x 1'b1 1'b1 1001xx11 */
+ 0x1074, /* CRPBW_V[3:0] x x 1'b1 1'b1 0001xx11 */
+ 0x1080, /* DAFTSTATUS[1:0] x x x x x x */
+ 0x1081, /* SRDSTATUS[1:0] x x x x x SRDLOCK */
+ 0x10a9, /* EQSTATUS_CQS[1:0] x x x x x x */
+ 0x10b7, /* EQSTATUS_V[1:0] x x x x x x */
+#if 0 /* SMART_ANT */
+ 0x1f00, /* MODEDETE */
+ 0x1f01, /* x x x x x x x SFNRST xxxxxxx0 */
+ 0x1f03, /* NUMOFANT[7:0] 10000000 */
+ 0x1f04, /* x SELMASK[6:0] x0000000 */
+ 0x1f05, /* x SETMASK[6:0] x0000000 */
+ 0x1f06, /* x TXDATA[6:0] x0000000 */
+ 0x1f07, /* x CHNUMBER[6:0] x0000000 */
+ 0x1f09, /* AGCTIME[23:16] 10011000 */
+ 0x1f0a, /* AGCTIME[15:8] 10010110 */
+ 0x1f0b, /* AGCTIME[7:0] 10000000 */
+ 0x1f0c, /* ANTTIME[31:24] 00000000 */
+ 0x1f0d, /* ANTTIME[23:16] 00000011 */
+ 0x1f0e, /* ANTTIME[15:8] 10010000 */
+ 0x1f0f, /* ANTTIME[7:0] 10010000 */
+ 0x1f11, /* SYNCTIME[23:16] 10011000 */
+ 0x1f12, /* SYNCTIME[15:8] 10010110 */
+ 0x1f13, /* SYNCTIME[7:0] 10000000 */
+ 0x1f14, /* SNRTIME[31:24] 00000001 */
+ 0x1f15, /* SNRTIME[23:16] 01111101 */
+ 0x1f16, /* SNRTIME[15:8] 01111000 */
+ 0x1f17, /* SNRTIME[7:0] 01000000 */
+ 0x1f19, /* FECTIME[23:16] 00000000 */
+ 0x1f1a, /* FECTIME[15:8] 01110010 */
+ 0x1f1b, /* FECTIME[7:0] 01110000 */
+ 0x1f1d, /* FECTHD[7:0] 00000011 */
+ 0x1f1f, /* SNRTHD[23:16] 00001000 */
+ 0x1f20, /* SNRTHD[15:8] 01111111 */
+ 0x1f21, /* SNRTHD[7:0] 10000101 */
+ 0x1f80, /* IRQFLG x x SFSDRFLG MODEBFLG SAVEFLG SCANFLG TRACKFLG */
+ 0x1f81, /* x SYNCCON SNRCON FECCON x STDBUSY SYNCRST AGCFZCO */
+ 0x1f82, /* x x x SCANOPCD[4:0] */
+ 0x1f83, /* x x x x MAINOPCD[3:0] */
+ 0x1f84, /* x x RXDATA[13:8] */
+ 0x1f85, /* RXDATA[7:0] */
+ 0x1f86, /* x x SDTDATA[13:8] */
+ 0x1f87, /* SDTDATA[7:0] */
+ 0x1f89, /* ANTSNR[23:16] */
+ 0x1f8a, /* ANTSNR[15:8] */
+ 0x1f8b, /* ANTSNR[7:0] */
+ 0x1f8c, /* x x x x ANTFEC[13:8] */
+ 0x1f8d, /* ANTFEC[7:0] */
+ 0x1f8e, /* MAXCNT[7:0] */
+ 0x1f8f, /* SCANCNT[7:0] */
+ 0x1f91, /* MAXPW[23:16] */
+ 0x1f92, /* MAXPW[15:8] */
+ 0x1f93, /* MAXPW[7:0] */
+ 0x1f95, /* CURPWMSE[23:16] */
+ 0x1f96, /* CURPWMSE[15:8] */
+ 0x1f97, /* CURPWMSE[7:0] */
+#endif /* SMART_ANT */
+ 0x211f, /* 1'b1 1'b1 1'b1 CIRQEN x x 1'b0 1'b0 1111xx00 */
+ 0x212a, /* EQAUTOST */
+ 0x2122, /* CHFAST[7:0] 01100000 */
+ 0x212b, /* FFFSTEP_V[3:0] x FBFSTEP_V[2:0] 0001x001 */
+ 0x212c, /* PHDEROTBWSEL[3:0] 1'b1 1'b1 1'b1 1'b0 10001110 */
+ 0x212d, /* 1'b1 1'b1 1'b1 1'b1 x x TPIFLOCKS */
+ 0x2135, /* DYNTRACKFDEQ[3:0] x 1'b0 1'b0 1'b0 1010x000 */
+ 0x2141, /* TRMODE[1:0] 1'b1 1'b1 1'b0 1'b1 1'b1 1'b1 01110111 */
+ 0x2162, /* AICCCTRLE */
+ 0x2173, /* PHNCNFCNT[7:0] 00000100 */
+ 0x2179, /* 1'b0 1'b0 1'b0 1'b1 x BADSINGLEDYNTRACKFBF[2:0] 0001x001 */
+ 0x217a, /* 1'b0 1'b0 1'b0 1'b1 x BADSLOWSINGLEDYNTRACKFBF[2:0] 0001x001 */
+ 0x217e, /* CNFCNTTPIF[7:0] 00001000 */
+ 0x217f, /* TPERRCNTTPIF[7:0] 00000001 */
+ 0x2180, /* x x x x x x FBDLYCIR[9:8] */
+ 0x2181, /* FBDLYCIR[7:0] */
+ 0x2185, /* MAXPWRMAIN[7:0] */
+ 0x2191, /* NCOMBDET x x x x x x x */
+ 0x2199, /* x MAINSTRON */
+ 0x219a, /* FFFEQSTEPOUT_V[3:0] FBFSTEPOUT_V[2:0] */
+ 0x21a1, /* x x SNRREF[5:0] */
+ 0x2845, /* 1'b0 1'b1 x x FFFSTEP_CQS[1:0] FFFCENTERTAP[1:0] 01xx1110 */
+ 0x2846, /* 1'b0 x 1'b0 1'b1 FBFSTEP_CQS[1:0] 1'b1 1'b0 0x011110 */
+ 0x2847, /* ENNOSIGDE */
+ 0x2849, /* 1'b1 1'b1 NOUSENOSI */
+ 0x284a, /* EQINITWAITTIME[7:0] 01100100 */
+ 0x3000, /* 1'b1 1'b1 1'b1 x x x 1'b0 RPTRSTM */
+ 0x3001, /* RPTRSTWAITTIME[7:0] (100msec) 00110010 */
+ 0x3031, /* FRAMELOC */
+ 0x3032, /* 1'b1 1'b0 1'b0 1'b0 x x FRAMELOCKMODE_CQS[1:0] 1000xx11 */
+ 0x30a9, /* VDLOCK_Q FRAMELOCK */
+ 0x30aa, /* MPEGLOCK */
+};
+
+#define numDumpRegs (sizeof(regtab)/sizeof(regtab[0]))
+static u8 regval1[numDumpRegs] = {0, };
+static u8 regval2[numDumpRegs] = {0, };
+
+static void lgdt3306a_DumpAllRegs(struct lgdt3306a_state *state)
+{
+ memset(regval2, 0xff, sizeof(regval2));
+ lgdt3306a_DumpRegs(state);
+}
+
+static void lgdt3306a_DumpRegs(struct lgdt3306a_state *state)
+{
+ int i;
+ int sav_debug = debug;
+
+ if ((debug & DBG_DUMP) == 0)
+ return;
+ debug &= ~DBG_REG; /* suppress DBG_REG during reg dump */
+
+ lg_debug("\n");
+
+ for (i = 0; i < numDumpRegs; i++) {
+ lgdt3306a_read_reg(state, regtab[i], &regval1[i]);
+ if (regval1[i] != regval2[i]) {
+ lg_debug(" %04X = %02X\n", regtab[i], regval1[i]);
+ regval2[i] = regval1[i];
+ }
+ }
+ debug = sav_debug;
+}
+#endif /* DBG_DUMP */
+
+
+
+static struct dvb_frontend_ops lgdt3306a_ops = {
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+ .info = {
+ .name = "LG Electronics LGDT3306A VSB/QAM Frontend",
+ .frequency_min = 54000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 62500,
+ .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
+ },
+ .i2c_gate_ctrl = lgdt3306a_i2c_gate_ctrl,
+ .init = lgdt3306a_init,
+ .sleep = lgdt3306a_fe_sleep,
+ /* if this is set, it overrides the default swzigzag */
+ .tune = lgdt3306a_tune,
+ .set_frontend = lgdt3306a_set_parameters,
+ .get_frontend = lgdt3306a_get_frontend,
+ .get_frontend_algo = lgdt3306a_get_frontend_algo,
+ .get_tune_settings = lgdt3306a_get_tune_settings,
+ .read_status = lgdt3306a_read_status,
+ .read_ber = lgdt3306a_read_ber,
+ .read_signal_strength = lgdt3306a_read_signal_strength,
+ .read_snr = lgdt3306a_read_snr,
+ .read_ucblocks = lgdt3306a_read_ucblocks,
+ .release = lgdt3306a_release,
+ .ts_bus_ctrl = lgdt3306a_ts_bus_ctrl,
+ .search = lgdt3306a_search,
+};
+
+MODULE_DESCRIPTION("LG Electronics LGDT3306A ATSC/QAM-B Demodulator Driver");
+MODULE_AUTHOR("Fred Richter <frichter@hauppauge.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.2");
diff --git a/drivers/media/dvb-frontends/lgdt3306a.h b/drivers/media/dvb-frontends/lgdt3306a.h
new file mode 100644
index 000000000000..9dbb2dced1fe
--- /dev/null
+++ b/drivers/media/dvb-frontends/lgdt3306a.h
@@ -0,0 +1,74 @@
+/*
+ * Support for LGDT3306A - 8VSB/QAM-B
+ *
+ * Copyright (C) 2013,2014 Fred Richter <frichter@hauppauge.com>
+ * based on lgdt3305.[ch] by Michael Krufky
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LGDT3306A_H_
+#define _LGDT3306A_H_
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+
+
+enum lgdt3306a_mpeg_mode {
+ LGDT3306A_MPEG_PARALLEL = 0,
+ LGDT3306A_MPEG_SERIAL = 1,
+};
+
+enum lgdt3306a_tp_clock_edge {
+ LGDT3306A_TPCLK_RISING_EDGE = 0,
+ LGDT3306A_TPCLK_FALLING_EDGE = 1,
+};
+
+enum lgdt3306a_tp_valid_polarity {
+ LGDT3306A_TP_VALID_LOW = 0,
+ LGDT3306A_TP_VALID_HIGH = 1,
+};
+
+struct lgdt3306a_config {
+ u8 i2c_addr;
+
+ /* user defined IF frequency in KHz */
+ u16 qam_if_khz;
+ u16 vsb_if_khz;
+
+ /* disable i2c repeater - 0:repeater enabled 1:repeater disabled */
+ unsigned int deny_i2c_rptr:1;
+
+ /* spectral inversion - 0:disabled 1:enabled */
+ unsigned int spectral_inversion:1;
+
+ enum lgdt3306a_mpeg_mode mpeg_mode;
+ enum lgdt3306a_tp_clock_edge tpclk_edge;
+ enum lgdt3306a_tp_valid_polarity tpvalid_polarity;
+
+ /* demod clock freq in MHz; 24 or 25 supported */
+ int xtalMHz;
+};
+
+#if IS_REACHABLE(CONFIG_DVB_LGDT3306A)
+struct dvb_frontend *lgdt3306a_attach(const struct lgdt3306a_config *config,
+ struct i2c_adapter *i2c_adap);
+#else
+static inline
+struct dvb_frontend *lgdt3306a_attach(const struct lgdt3306a_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_LGDT3306A */
+
+#endif /* _LGDT3306A_H_ */
diff --git a/drivers/media/dvb-frontends/lgdt330x.h b/drivers/media/dvb-frontends/lgdt330x.h
index 8bb332219fc4..c73eeb45e330 100644
--- a/drivers/media/dvb-frontends/lgdt330x.h
+++ b/drivers/media/dvb-frontends/lgdt330x.h
@@ -52,7 +52,7 @@ struct lgdt330x_config
int clock_polarity_flip;
};
-#if IS_ENABLED(CONFIG_DVB_LGDT330X)
+#if IS_REACHABLE(CONFIG_DVB_LGDT330X)
extern struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/lgs8gl5.h b/drivers/media/dvb-frontends/lgs8gl5.h
index c2da59614727..a5b3faf121f0 100644
--- a/drivers/media/dvb-frontends/lgs8gl5.h
+++ b/drivers/media/dvb-frontends/lgs8gl5.h
@@ -31,7 +31,7 @@ struct lgs8gl5_config {
u8 demod_address;
};
-#if IS_ENABLED(CONFIG_DVB_LGS8GL5)
+#if IS_REACHABLE(CONFIG_DVB_LGS8GL5)
extern struct dvb_frontend *lgs8gl5_attach(
const struct lgs8gl5_config *config, struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/lgs8gxx.h b/drivers/media/dvb-frontends/lgs8gxx.h
index dadb78bf61a9..368c9928ef7f 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.h
+++ b/drivers/media/dvb-frontends/lgs8gxx.h
@@ -80,7 +80,7 @@ struct lgs8gxx_config {
u8 tuner_address;
};
-#if IS_ENABLED(CONFIG_DVB_LGS8GXX)
+#if IS_REACHABLE(CONFIG_DVB_LGS8GXX)
extern struct dvb_frontend *lgs8gxx_attach(const struct lgs8gxx_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/lnbh24.h b/drivers/media/dvb-frontends/lnbh24.h
index b327a4f31d16..a088b8ec1e53 100644
--- a/drivers/media/dvb-frontends/lnbh24.h
+++ b/drivers/media/dvb-frontends/lnbh24.h
@@ -37,7 +37,7 @@
#include <linux/dvb/frontend.h>
-#if IS_ENABLED(CONFIG_DVB_LNBP21)
+#if IS_REACHABLE(CONFIG_DVB_LNBP21)
/* override_set and override_clear control which
system register bits (above) to always set & clear */
extern struct dvb_frontend *lnbh24_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb-frontends/lnbp21.h b/drivers/media/dvb-frontends/lnbp21.h
index dbcbcc2f20a3..a9b530de62a6 100644
--- a/drivers/media/dvb-frontends/lnbp21.h
+++ b/drivers/media/dvb-frontends/lnbp21.h
@@ -57,7 +57,7 @@
#include <linux/dvb/frontend.h>
-#if IS_ENABLED(CONFIG_DVB_LNBP21)
+#if IS_REACHABLE(CONFIG_DVB_LNBP21)
/* override_set and override_clear control which
system register bits (above) to always set & clear */
extern struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb-frontends/lnbp22.h b/drivers/media/dvb-frontends/lnbp22.h
index 63861b311dd8..628148385182 100644
--- a/drivers/media/dvb-frontends/lnbp22.h
+++ b/drivers/media/dvb-frontends/lnbp22.h
@@ -39,7 +39,7 @@
#include <linux/dvb/frontend.h>
-#if IS_ENABLED(CONFIG_DVB_LNBP22)
+#if IS_REACHABLE(CONFIG_DVB_LNBP22)
/*
* override_set and override_clear control which system register bits (above)
* to always set & clear
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index ba4ee0b48834..d3d928e1c0ce 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -630,7 +630,7 @@ static int m88ds3103_init(struct dvb_frontend *fe)
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
if (ret) {
- dev_err(&priv->i2c->dev, "%s: firmare file '%s' not found\n",
+ dev_err(&priv->i2c->dev, "%s: firmware file '%s' not found\n",
KBUILD_MODNAME, fw_file);
goto err;
}
diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
index 0a50ea90736b..de7430178e9e 100644
--- a/drivers/media/dvb-frontends/m88rs2000.h
+++ b/drivers/media/dvb-frontends/m88rs2000.h
@@ -41,7 +41,7 @@ enum {
CALL_IS_READ,
};
-#if IS_ENABLED(CONFIG_DVB_M88RS2000)
+#if IS_REACHABLE(CONFIG_DVB_M88RS2000)
extern struct dvb_frontend *m88rs2000_attach(
const struct m88rs2000_config *config, struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/mb86a16.h b/drivers/media/dvb-frontends/mb86a16.h
index 277ce061acf9..e486dc0d8e60 100644
--- a/drivers/media/dvb-frontends/mb86a16.h
+++ b/drivers/media/dvb-frontends/mb86a16.h
@@ -33,7 +33,7 @@ struct mb86a16_config {
-#if IS_ENABLED(CONFIG_DVB_MB86A16)
+#if IS_REACHABLE(CONFIG_DVB_MB86A16)
extern struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
struct i2c_adapter *i2c_adap);
diff --git a/drivers/media/dvb-frontends/mb86a20s.h b/drivers/media/dvb-frontends/mb86a20s.h
index cbeb941fba7c..f749c8ac5f39 100644
--- a/drivers/media/dvb-frontends/mb86a20s.h
+++ b/drivers/media/dvb-frontends/mb86a20s.h
@@ -34,7 +34,7 @@ struct mb86a20s_config {
bool is_serial;
};
-#if IS_ENABLED(CONFIG_DVB_MB86A20S)
+#if IS_REACHABLE(CONFIG_DVB_MB86A20S)
extern struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
struct i2c_adapter *i2c);
extern struct i2c_adapter *mb86a20s_get_tuner_i2c_adapter(struct dvb_frontend *);
diff --git a/drivers/media/dvb-frontends/mn88472.h b/drivers/media/dvb-frontends/mn88472.h
index e4e0b80d3091..095294d292f3 100644
--- a/drivers/media/dvb-frontends/mn88472.h
+++ b/drivers/media/dvb-frontends/mn88472.h
@@ -19,6 +19,16 @@
#include <linux/dvb/frontend.h>
+enum ts_clock {
+ VARIABLE_TS_CLOCK,
+ FIXED_TS_CLOCK,
+};
+
+enum ts_mode {
+ SERIAL_TS_MODE,
+ PARALLEL_TS_MODE,
+};
+
struct mn88472_config {
/*
* Max num of bytes given I2C adapter could write at once.
@@ -39,6 +49,8 @@ struct mn88472_config {
* Hz
*/
u32 xtal;
+ int ts_mode;
+ int ts_clock;
};
#endif
diff --git a/drivers/media/dvb-frontends/mn88473.h b/drivers/media/dvb-frontends/mn88473.h
index a373ec93cbe0..c717ebed0e03 100644
--- a/drivers/media/dvb-frontends/mn88473.h
+++ b/drivers/media/dvb-frontends/mn88473.h
@@ -33,6 +33,12 @@ struct mn88473_config {
* DVB frontend.
*/
struct dvb_frontend **fe;
+
+ /*
+ * Xtal frequency.
+ * Hz
+ */
+ u32 xtal;
};
#endif
diff --git a/drivers/media/dvb-frontends/mt312.h b/drivers/media/dvb-frontends/mt312.h
index 5706621ad79d..386939a90555 100644
--- a/drivers/media/dvb-frontends/mt312.h
+++ b/drivers/media/dvb-frontends/mt312.h
@@ -36,7 +36,7 @@ struct mt312_config {
unsigned int voltage_inverted:1;
};
-#if IS_ENABLED(CONFIG_DVB_MT312)
+#if IS_REACHABLE(CONFIG_DVB_MT312)
struct dvb_frontend *mt312_attach(const struct mt312_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/mt352.h b/drivers/media/dvb-frontends/mt352.h
index 451d904e1500..5873263bd1af 100644
--- a/drivers/media/dvb-frontends/mt352.h
+++ b/drivers/media/dvb-frontends/mt352.h
@@ -51,7 +51,7 @@ struct mt352_config
int (*demod_init)(struct dvb_frontend* fe);
};
-#if IS_ENABLED(CONFIG_DVB_MT352)
+#if IS_REACHABLE(CONFIG_DVB_MT352)
extern struct dvb_frontend* mt352_attach(const struct mt352_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/nxt200x.h b/drivers/media/dvb-frontends/nxt200x.h
index e38d01fb6c2b..825b928ef542 100644
--- a/drivers/media/dvb-frontends/nxt200x.h
+++ b/drivers/media/dvb-frontends/nxt200x.h
@@ -42,7 +42,7 @@ struct nxt200x_config
int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
};
-#if IS_ENABLED(CONFIG_DVB_NXT200X)
+#if IS_REACHABLE(CONFIG_DVB_NXT200X)
extern struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/nxt6000.h b/drivers/media/dvb-frontends/nxt6000.h
index b5867c2ae681..a94cefcc6dfd 100644
--- a/drivers/media/dvb-frontends/nxt6000.h
+++ b/drivers/media/dvb-frontends/nxt6000.h
@@ -33,7 +33,7 @@ struct nxt6000_config
u8 clock_inversion:1;
};
-#if IS_ENABLED(CONFIG_DVB_NXT6000)
+#if IS_REACHABLE(CONFIG_DVB_NXT6000)
extern struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/or51132.h b/drivers/media/dvb-frontends/or51132.h
index cdb5be3c65d6..9acf8dc87413 100644
--- a/drivers/media/dvb-frontends/or51132.h
+++ b/drivers/media/dvb-frontends/or51132.h
@@ -34,7 +34,7 @@ struct or51132_config
int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
};
-#if IS_ENABLED(CONFIG_DVB_OR51132)
+#if IS_REACHABLE(CONFIG_DVB_OR51132)
extern struct dvb_frontend* or51132_attach(const struct or51132_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/or51211.h b/drivers/media/dvb-frontends/or51211.h
index 9a8ae936b62d..cc6adab63249 100644
--- a/drivers/media/dvb-frontends/or51211.h
+++ b/drivers/media/dvb-frontends/or51211.h
@@ -37,7 +37,7 @@ struct or51211_config
void (*sleep)(struct dvb_frontend * fe);
};
-#if IS_ENABLED(CONFIG_DVB_OR51211)
+#if IS_REACHABLE(CONFIG_DVB_OR51211)
extern struct dvb_frontend* or51211_attach(const struct or51211_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 67faa8d6950e..b400f7b3c2e7 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -685,7 +685,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, fe_status_t *status)
struct rtl2832_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
int ret;
- u32 tmp;
+ u32 uninitialized_var(tmp);
dev_dbg(&client->dev, "\n");
diff --git a/drivers/media/dvb-frontends/s5h1409.h b/drivers/media/dvb-frontends/s5h1409.h
index 9e143f5c8107..f58b9ca5557a 100644
--- a/drivers/media/dvb-frontends/s5h1409.h
+++ b/drivers/media/dvb-frontends/s5h1409.h
@@ -67,7 +67,7 @@ struct s5h1409_config {
u8 hvr1600_opt;
};
-#if IS_ENABLED(CONFIG_DVB_S5H1409)
+#if IS_REACHABLE(CONFIG_DVB_S5H1409)
extern struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/s5h1411.h b/drivers/media/dvb-frontends/s5h1411.h
index 1d7deb615674..f3a87f7ec360 100644
--- a/drivers/media/dvb-frontends/s5h1411.h
+++ b/drivers/media/dvb-frontends/s5h1411.h
@@ -69,7 +69,7 @@ struct s5h1411_config {
u8 status_mode;
};
-#if IS_ENABLED(CONFIG_DVB_S5H1411)
+#if IS_REACHABLE(CONFIG_DVB_S5H1411)
extern struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/s5h1420.h b/drivers/media/dvb-frontends/s5h1420.h
index 210049b5cf30..142d93e7d02b 100644
--- a/drivers/media/dvb-frontends/s5h1420.h
+++ b/drivers/media/dvb-frontends/s5h1420.h
@@ -40,7 +40,7 @@ struct s5h1420_config
u8 serial_mpeg:1;
};
-#if IS_ENABLED(CONFIG_DVB_S5H1420)
+#if IS_REACHABLE(CONFIG_DVB_S5H1420)
extern struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
struct i2c_adapter *i2c);
extern struct i2c_adapter *s5h1420_get_tuner_i2c_adapter(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-frontends/s5h1432.h b/drivers/media/dvb-frontends/s5h1432.h
index 70917dd2533a..f490c5ee5801 100644
--- a/drivers/media/dvb-frontends/s5h1432.h
+++ b/drivers/media/dvb-frontends/s5h1432.h
@@ -75,7 +75,7 @@ struct s5h1432_config {
u8 status_mode;
};
-#if IS_ENABLED(CONFIG_DVB_S5H1432)
+#if IS_REACHABLE(CONFIG_DVB_S5H1432)
extern struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/s921.h b/drivers/media/dvb-frontends/s921.h
index 9b20c9e0eb88..7d3999a4e974 100644
--- a/drivers/media/dvb-frontends/s921.h
+++ b/drivers/media/dvb-frontends/s921.h
@@ -25,7 +25,7 @@ struct s921_config {
u8 demod_address;
};
-#if IS_ENABLED(CONFIG_DVB_S921)
+#if IS_REACHABLE(CONFIG_DVB_S921)
extern struct dvb_frontend *s921_attach(const struct s921_config *config,
struct i2c_adapter *i2c);
extern struct i2c_adapter *s921_get_tuner_i2c_adapter(struct dvb_frontend *);
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 98ddb49ad52b..4cc5d10ed0d4 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -505,7 +505,7 @@ static int si2165_upload_firmware(struct si2165_state *state)
/* reset crc */
ret = si2165_writereg8(state, 0x0379, 0x01);
if (ret)
- return ret;
+ goto error;
ret = si2165_upload_firmware_block(state, data, len,
&offset, block_count);
diff --git a/drivers/media/dvb-frontends/si2165.h b/drivers/media/dvb-frontends/si2165.h
index efaa08123b92..8a15d6a9c552 100644
--- a/drivers/media/dvb-frontends/si2165.h
+++ b/drivers/media/dvb-frontends/si2165.h
@@ -45,7 +45,7 @@ struct si2165_config {
bool inversion;
};
-#if IS_ENABLED(CONFIG_DVB_SI2165)
+#if IS_REACHABLE(CONFIG_DVB_SI2165)
struct dvb_frontend *si2165_attach(
const struct si2165_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index aadd1367673f..d7efce8043ed 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -40,7 +40,7 @@ struct si2168_dev {
bool ts_clock_inv;
};
-/* firmare command struct */
+/* firmware command struct */
#define SI2168_ARGLEN 30
struct si2168_cmd {
u8 args[SI2168_ARGLEN];
diff --git a/drivers/media/dvb-frontends/si21xx.h b/drivers/media/dvb-frontends/si21xx.h
index 1509fed44a3a..ef5f351ca68e 100644
--- a/drivers/media/dvb-frontends/si21xx.h
+++ b/drivers/media/dvb-frontends/si21xx.h
@@ -13,7 +13,7 @@ struct si21xx_config {
int min_delay_ms;
};
-#if IS_ENABLED(CONFIG_DVB_SI21XX)
+#if IS_REACHABLE(CONFIG_DVB_SI21XX)
extern struct dvb_frontend *si21xx_attach(const struct si21xx_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index cc1ef966f99f..8fd42767e263 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -413,11 +413,8 @@ static int sp2_remove(struct i2c_client *client)
struct sp2 *s = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
-
sp2_exit(client);
- if (s != NULL)
- kfree(s);
-
+ kfree(s);
return 0;
}
diff --git a/drivers/media/dvb-frontends/sp8870.h b/drivers/media/dvb-frontends/sp8870.h
index 065ec67d4e30..f507b9fd707b 100644
--- a/drivers/media/dvb-frontends/sp8870.h
+++ b/drivers/media/dvb-frontends/sp8870.h
@@ -35,7 +35,7 @@ struct sp8870_config
int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
};
-#if IS_ENABLED(CONFIG_DVB_SP8870)
+#if IS_REACHABLE(CONFIG_DVB_SP8870)
extern struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/sp887x.h b/drivers/media/dvb-frontends/sp887x.h
index 2cdc4e8bc9cd..412f011e6dfd 100644
--- a/drivers/media/dvb-frontends/sp887x.h
+++ b/drivers/media/dvb-frontends/sp887x.h
@@ -17,7 +17,7 @@ struct sp887x_config
int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
};
-#if IS_ENABLED(CONFIG_DVB_SP887X)
+#if IS_REACHABLE(CONFIG_DVB_SP887X)
extern struct dvb_frontend* sp887x_attach(const struct sp887x_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/stb0899_drv.h b/drivers/media/dvb-frontends/stb0899_drv.h
index 139264d19263..0a72131a57db 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.h
+++ b/drivers/media/dvb-frontends/stb0899_drv.h
@@ -141,7 +141,7 @@ struct stb0899_config {
int (*tuner_set_rfsiggain)(struct dvb_frontend *fe, u32 rf_gain);
};
-#if IS_ENABLED(CONFIG_DVB_STB0899)
+#if IS_REACHABLE(CONFIG_DVB_STB0899)
extern struct dvb_frontend *stb0899_attach(struct stb0899_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/stb6000.h b/drivers/media/dvb-frontends/stb6000.h
index a768189bfaad..da581b652cb9 100644
--- a/drivers/media/dvb-frontends/stb6000.h
+++ b/drivers/media/dvb-frontends/stb6000.h
@@ -35,7 +35,7 @@
* @param i2c i2c adapter to use.
* @return FE pointer on success, NULL on failure.
*/
-#if IS_ENABLED(CONFIG_DVB_STB6000)
+#if IS_REACHABLE(CONFIG_DVB_STB6000)
extern struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/stb6100.h b/drivers/media/dvb-frontends/stb6100.h
index 3a1e40f3b8be..218c8188865d 100644
--- a/drivers/media/dvb-frontends/stb6100.h
+++ b/drivers/media/dvb-frontends/stb6100.h
@@ -94,7 +94,7 @@ struct stb6100_state {
u32 reference;
};
-#if IS_ENABLED(CONFIG_DVB_STB6100)
+#if IS_REACHABLE(CONFIG_DVB_STB6100)
extern struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
const struct stb6100_config *config,
diff --git a/drivers/media/dvb-frontends/stv0288.h b/drivers/media/dvb-frontends/stv0288.h
index a0bd93107154..b58603c00c80 100644
--- a/drivers/media/dvb-frontends/stv0288.h
+++ b/drivers/media/dvb-frontends/stv0288.h
@@ -43,7 +43,7 @@ struct stv0288_config {
int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
};
-#if IS_ENABLED(CONFIG_DVB_STV0288)
+#if IS_REACHABLE(CONFIG_DVB_STV0288)
extern struct dvb_frontend *stv0288_attach(const struct stv0288_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/stv0297.h b/drivers/media/dvb-frontends/stv0297.h
index c8ff3639ce00..b30632a67333 100644
--- a/drivers/media/dvb-frontends/stv0297.h
+++ b/drivers/media/dvb-frontends/stv0297.h
@@ -42,7 +42,7 @@ struct stv0297_config
u8 stop_during_read:1;
};
-#if IS_ENABLED(CONFIG_DVB_STV0297)
+#if IS_REACHABLE(CONFIG_DVB_STV0297)
extern struct dvb_frontend* stv0297_attach(const struct stv0297_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/stv0299.h b/drivers/media/dvb-frontends/stv0299.h
index 06f70fc8327b..0aca30a8ec25 100644
--- a/drivers/media/dvb-frontends/stv0299.h
+++ b/drivers/media/dvb-frontends/stv0299.h
@@ -95,7 +95,7 @@ struct stv0299_config
int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
};
-#if IS_ENABLED(CONFIG_DVB_STV0299)
+#if IS_REACHABLE(CONFIG_DVB_STV0299)
extern struct dvb_frontend *stv0299_attach(const struct stv0299_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/stv0367.h b/drivers/media/dvb-frontends/stv0367.h
index ea80b341f094..92b3e85fb818 100644
--- a/drivers/media/dvb-frontends/stv0367.h
+++ b/drivers/media/dvb-frontends/stv0367.h
@@ -39,7 +39,7 @@ struct stv0367_config {
int clk_pol;
};
-#if IS_ENABLED(CONFIG_DVB_STV0367)
+#if IS_REACHABLE(CONFIG_DVB_STV0367)
extern struct
dvb_frontend *stv0367ter_attach(const struct stv0367_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/stv0900.h b/drivers/media/dvb-frontends/stv0900.h
index e2a6dc69ecb4..c90bf00ea9ce 100644
--- a/drivers/media/dvb-frontends/stv0900.h
+++ b/drivers/media/dvb-frontends/stv0900.h
@@ -58,7 +58,7 @@ struct stv0900_config {
void (*set_lock_led)(struct dvb_frontend *fe, int offon);
};
-#if IS_ENABLED(CONFIG_DVB_STV0900)
+#if IS_REACHABLE(CONFIG_DVB_STV0900)
extern struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
struct i2c_adapter *i2c, int demod);
#else
diff --git a/drivers/media/dvb-frontends/stv090x.h b/drivers/media/dvb-frontends/stv090x.h
index 742eeda99000..012e55e5032e 100644
--- a/drivers/media/dvb-frontends/stv090x.h
+++ b/drivers/media/dvb-frontends/stv090x.h
@@ -107,7 +107,7 @@ struct stv090x_config {
u8 xor_value);
};
-#if IS_ENABLED(CONFIG_DVB_STV090x)
+#if IS_REACHABLE(CONFIG_DVB_STV090x)
struct dvb_frontend *stv090x_attach(struct stv090x_config *config,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/stv6110.h b/drivers/media/dvb-frontends/stv6110.h
index 8fa07e6a6745..f3c8a5c6b77d 100644
--- a/drivers/media/dvb-frontends/stv6110.h
+++ b/drivers/media/dvb-frontends/stv6110.h
@@ -46,7 +46,7 @@ struct stv6110_config {
u8 clk_div; /* divisor value for the output clock */
};
-#if IS_ENABLED(CONFIG_DVB_STV6110)
+#if IS_REACHABLE(CONFIG_DVB_STV6110)
extern struct dvb_frontend *stv6110_attach(struct dvb_frontend *fe,
const struct stv6110_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/stv6110x.h b/drivers/media/dvb-frontends/stv6110x.h
index bc4766db29c5..9f7eb251aec3 100644
--- a/drivers/media/dvb-frontends/stv6110x.h
+++ b/drivers/media/dvb-frontends/stv6110x.h
@@ -53,7 +53,7 @@ struct stv6110x_devctl {
};
-#if IS_ENABLED(CONFIG_DVB_STV6110x)
+#if IS_REACHABLE(CONFIG_DVB_STV6110x)
extern struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
const struct stv6110x_config *config,
diff --git a/drivers/media/dvb-frontends/tda1002x.h b/drivers/media/dvb-frontends/tda1002x.h
index e404b6e44802..0d334613de1b 100644
--- a/drivers/media/dvb-frontends/tda1002x.h
+++ b/drivers/media/dvb-frontends/tda1002x.h
@@ -57,7 +57,7 @@ struct tda10023_config {
u16 deltaf;
};
-#if IS_ENABLED(CONFIG_DVB_TDA10021)
+#if IS_REACHABLE(CONFIG_DVB_TDA10021)
extern struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
struct i2c_adapter* i2c, u8 pwm);
#else
@@ -69,7 +69,7 @@ static inline struct dvb_frontend* tda10021_attach(const struct tda1002x_config*
}
#endif // CONFIG_DVB_TDA10021
-#if IS_ENABLED(CONFIG_DVB_TDA10023)
+#if IS_REACHABLE(CONFIG_DVB_TDA10023)
extern struct dvb_frontend *tda10023_attach(
const struct tda10023_config *config,
struct i2c_adapter *i2c, u8 pwm);
diff --git a/drivers/media/dvb-frontends/tda10048.h b/drivers/media/dvb-frontends/tda10048.h
index 5e7bf4e47cb3..bc77a7311de1 100644
--- a/drivers/media/dvb-frontends/tda10048.h
+++ b/drivers/media/dvb-frontends/tda10048.h
@@ -73,7 +73,7 @@ struct tda10048_config {
u8 pll_n;
};
-#if IS_ENABLED(CONFIG_DVB_TDA10048)
+#if IS_REACHABLE(CONFIG_DVB_TDA10048)
extern struct dvb_frontend *tda10048_attach(
const struct tda10048_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/tda1004x.h b/drivers/media/dvb-frontends/tda1004x.h
index dd283fbb61c0..efd7659dace9 100644
--- a/drivers/media/dvb-frontends/tda1004x.h
+++ b/drivers/media/dvb-frontends/tda1004x.h
@@ -117,7 +117,7 @@ struct tda1004x_state {
enum tda1004x_demod demod_type;
};
-#if IS_ENABLED(CONFIG_DVB_TDA1004X)
+#if IS_REACHABLE(CONFIG_DVB_TDA1004X)
extern struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
struct i2c_adapter* i2c);
diff --git a/drivers/media/dvb-frontends/tda10071.h b/drivers/media/dvb-frontends/tda10071.h
index 331b5a819383..da89f4249846 100644
--- a/drivers/media/dvb-frontends/tda10071.h
+++ b/drivers/media/dvb-frontends/tda10071.h
@@ -72,7 +72,7 @@ struct tda10071_config {
};
-#if IS_ENABLED(CONFIG_DVB_TDA10071)
+#if IS_REACHABLE(CONFIG_DVB_TDA10071)
extern struct dvb_frontend *tda10071_attach(
const struct tda10071_config *config, struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 420486192736..03f839c431e9 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -99,7 +99,7 @@ struct tda10071_reg_val_mask {
#define CMD_BER_CONTROL 0x3e
#define CMD_BER_UPDATE_COUNTERS 0x3f
-/* firmare command struct */
+/* firmware command struct */
#define TDA10071_ARGLEN 30
struct tda10071_cmd {
u8 args[TDA10071_ARGLEN];
diff --git a/drivers/media/dvb-frontends/tda10086.h b/drivers/media/dvb-frontends/tda10086.h
index 458fe91c1b88..690e469995b6 100644
--- a/drivers/media/dvb-frontends/tda10086.h
+++ b/drivers/media/dvb-frontends/tda10086.h
@@ -46,7 +46,7 @@ struct tda10086_config
enum tda10086_xtal xtal_freq;
};
-#if IS_ENABLED(CONFIG_DVB_TDA10086)
+#if IS_REACHABLE(CONFIG_DVB_TDA10086)
extern struct dvb_frontend* tda10086_attach(const struct tda10086_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.h b/drivers/media/dvb-frontends/tda18271c2dd.h
index dd84f7b69bec..7ebd8eaff4eb 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.h
+++ b/drivers/media/dvb-frontends/tda18271c2dd.h
@@ -3,7 +3,7 @@
#include <linux/kconfig.h>
-#if IS_ENABLED(CONFIG_DVB_TDA18271C2DD)
+#if IS_REACHABLE(CONFIG_DVB_TDA18271C2DD)
struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 adr);
#else
diff --git a/drivers/media/dvb-frontends/tda665x.h b/drivers/media/dvb-frontends/tda665x.h
index 03a0da6d5cf2..baf520baa42e 100644
--- a/drivers/media/dvb-frontends/tda665x.h
+++ b/drivers/media/dvb-frontends/tda665x.h
@@ -31,7 +31,7 @@ struct tda665x_config {
u32 ref_divider;
};
-#if IS_ENABLED(CONFIG_DVB_TDA665x)
+#if IS_REACHABLE(CONFIG_DVB_TDA665x)
extern struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
const struct tda665x_config *config,
diff --git a/drivers/media/dvb-frontends/tda8083.h b/drivers/media/dvb-frontends/tda8083.h
index de6b1860dfdd..46be06fa7e0d 100644
--- a/drivers/media/dvb-frontends/tda8083.h
+++ b/drivers/media/dvb-frontends/tda8083.h
@@ -35,7 +35,7 @@ struct tda8083_config
u8 demod_address;
};
-#if IS_ENABLED(CONFIG_DVB_TDA8083)
+#if IS_REACHABLE(CONFIG_DVB_TDA8083)
extern struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/tda8261.h b/drivers/media/dvb-frontends/tda8261.h
index 55cf4ffcbfdf..9fa5b3076d5b 100644
--- a/drivers/media/dvb-frontends/tda8261.h
+++ b/drivers/media/dvb-frontends/tda8261.h
@@ -34,7 +34,7 @@ struct tda8261_config {
enum tda8261_step step_size;
};
-#if IS_ENABLED(CONFIG_DVB_TDA8261)
+#if IS_REACHABLE(CONFIG_DVB_TDA8261)
extern struct dvb_frontend *tda8261_attach(struct dvb_frontend *fe,
const struct tda8261_config *config,
diff --git a/drivers/media/dvb-frontends/tda826x.h b/drivers/media/dvb-frontends/tda826x.h
index 5f0f20e7e4f8..81abe1aebe9f 100644
--- a/drivers/media/dvb-frontends/tda826x.h
+++ b/drivers/media/dvb-frontends/tda826x.h
@@ -35,7 +35,7 @@
* @param has_loopthrough Set to 1 if the card has a loopthrough RF connector.
* @return FE pointer on success, NULL on failure.
*/
-#if IS_ENABLED(CONFIG_DVB_TDA826X)
+#if IS_REACHABLE(CONFIG_DVB_TDA826X)
extern struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c,
int has_loopthrough);
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 9aba044dabed..90164a38cd36 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -26,12 +26,23 @@
#define FREQ_OFFSET_LOW_SYM_RATE 3000
struct ts2020_priv {
+ struct dvb_frontend *fe;
/* i2c details */
int i2c_address;
struct i2c_adapter *i2c;
- u8 clk_out_div;
+ u8 clk_out:2;
+ u8 clk_out_div:5;
u32 frequency;
u32 frequency_div;
+#define TS2020_M88TS2020 0
+#define TS2020_M88TS2022 1
+ u8 tuner;
+ u8 loop_through:1;
+};
+
+struct ts2020_reg_val {
+ u8 reg;
+ u8 val;
};
static int ts2020_release(struct dvb_frontend *fe)
@@ -112,40 +123,77 @@ static int ts2020_readreg(struct dvb_frontend *fe, u8 reg)
static int ts2020_sleep(struct dvb_frontend *fe)
{
struct ts2020_priv *priv = fe->tuner_priv;
- int ret;
- u8 buf[] = { 10, 0 };
- struct i2c_msg msg = {
- .addr = priv->i2c_address,
- .flags = 0,
- .buf = buf,
- .len = 2
- };
+ u8 u8tmp;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
-
- ret = i2c_transfer(priv->i2c, &msg, 1);
- if (ret != 1)
- printk(KERN_ERR "%s: i2c error\n", __func__);
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+ if (priv->tuner == TS2020_M88TS2020)
+ u8tmp = 0x0a; /* XXX: probably wrong */
+ else
+ u8tmp = 0x00;
- return (ret == 1) ? 0 : ret;
+ return ts2020_writereg(fe, u8tmp, 0x00);
}
static int ts2020_init(struct dvb_frontend *fe)
{
struct ts2020_priv *priv = fe->tuner_priv;
+ int i;
+ u8 u8tmp;
+
+ if (priv->tuner == TS2020_M88TS2020) {
+ ts2020_writereg(fe, 0x42, 0x73);
+ ts2020_writereg(fe, 0x05, priv->clk_out_div);
+ ts2020_writereg(fe, 0x20, 0x27);
+ ts2020_writereg(fe, 0x07, 0x02);
+ ts2020_writereg(fe, 0x11, 0xff);
+ ts2020_writereg(fe, 0x60, 0xf9);
+ ts2020_writereg(fe, 0x08, 0x01);
+ ts2020_writereg(fe, 0x00, 0x41);
+ } else {
+ static const struct ts2020_reg_val reg_vals[] = {
+ {0x7d, 0x9d},
+ {0x7c, 0x9a},
+ {0x7a, 0x76},
+ {0x3b, 0x01},
+ {0x63, 0x88},
+ {0x61, 0x85},
+ {0x22, 0x30},
+ {0x30, 0x40},
+ {0x20, 0x23},
+ {0x24, 0x02},
+ {0x12, 0xa0},
+ };
+
+ ts2020_writereg(fe, 0x00, 0x01);
+ ts2020_writereg(fe, 0x00, 0x03);
+
+ switch (priv->clk_out) {
+ case TS2020_CLK_OUT_DISABLED:
+ u8tmp = 0x60;
+ break;
+ case TS2020_CLK_OUT_ENABLED:
+ u8tmp = 0x70;
+ ts2020_writereg(fe, 0x05, priv->clk_out_div);
+ break;
+ case TS2020_CLK_OUT_ENABLED_XTALOUT:
+ u8tmp = 0x6c;
+ break;
+ default:
+ u8tmp = 0x60;
+ break;
+ }
- ts2020_writereg(fe, 0x42, 0x73);
- ts2020_writereg(fe, 0x05, priv->clk_out_div);
- ts2020_writereg(fe, 0x20, 0x27);
- ts2020_writereg(fe, 0x07, 0x02);
- ts2020_writereg(fe, 0x11, 0xff);
- ts2020_writereg(fe, 0x60, 0xf9);
- ts2020_writereg(fe, 0x08, 0x01);
- ts2020_writereg(fe, 0x00, 0x41);
+ ts2020_writereg(fe, 0x42, u8tmp);
+
+ if (priv->loop_through)
+ u8tmp = 0xec;
+ else
+ u8tmp = 0x6c;
+
+ ts2020_writereg(fe, 0x62, u8tmp);
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++)
+ ts2020_writereg(fe, reg_vals[i].reg, reg_vals[i].val);
+ }
return 0;
}
@@ -203,7 +251,14 @@ static int ts2020_set_params(struct dvb_frontend *fe)
ndiv = ndiv + ndiv % 2;
ndiv = ndiv - 1024;
- ret = ts2020_writereg(fe, 0x10, 0x80 | lo);
+ if (priv->tuner == TS2020_M88TS2020) {
+ lpf_coeff = 2766;
+ ret = ts2020_writereg(fe, 0x10, 0x80 | lo);
+ } else {
+ lpf_coeff = 3200;
+ ret = ts2020_writereg(fe, 0x10, 0x0b);
+ ret |= ts2020_writereg(fe, 0x11, 0x40);
+ }
/* Set frequency divider */
ret |= ts2020_writereg(fe, 0x01, (ndiv >> 8) & 0xf);
@@ -220,7 +275,8 @@ static int ts2020_set_params(struct dvb_frontend *fe)
ret |= ts2020_tuner_gate_ctrl(fe, 0x08);
/* Tuner RF */
- ret |= ts2020_set_tuner_rf(fe);
+ if (priv->tuner == TS2020_M88TS2020)
+ ret |= ts2020_set_tuner_rf(fe);
gdiv28 = (TS2020_XTAL_FREQ / 1000 * 1694 + 500) / 1000;
ret |= ts2020_writereg(fe, 0x04, gdiv28 & 0xff);
@@ -228,6 +284,15 @@ static int ts2020_set_params(struct dvb_frontend *fe)
if (ret < 0)
return -ENODEV;
+ if (priv->tuner == TS2020_M88TS2022) {
+ ret = ts2020_writereg(fe, 0x25, 0x00);
+ ret |= ts2020_writereg(fe, 0x27, 0x70);
+ ret |= ts2020_writereg(fe, 0x41, 0x09);
+ ret |= ts2020_writereg(fe, 0x08, 0x0b);
+ if (ret < 0)
+ return -ENODEV;
+ }
+
value = ts2020_readreg(fe, 0x26);
f3db = (symbol_rate * 135) / 200 + 2000;
@@ -243,8 +308,6 @@ static int ts2020_set_params(struct dvb_frontend *fe)
if (mlpf_max > 63)
mlpf_max = 63;
- lpf_coeff = 2766;
-
nlpf = (f3db * gdiv28 * 2 / lpf_coeff /
(TS2020_XTAL_FREQ / 1000) + 1) / 2;
if (nlpf > 23)
@@ -285,6 +348,13 @@ static int ts2020_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct ts2020_priv *priv = fe->tuner_priv;
*frequency = priv->frequency;
+
+ return 0;
+}
+
+static int ts2020_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ *frequency = 0; /* Zero-IF */
return 0;
}
@@ -324,6 +394,7 @@ static struct dvb_tuner_ops ts2020_tuner_ops = {
.sleep = ts2020_sleep,
.set_params = ts2020_set_params,
.get_frequency = ts2020_get_frequency,
+ .get_if_frequency = ts2020_get_if_frequency,
.get_rf_strength = ts2020_read_signal_strength,
};
@@ -340,8 +411,10 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
priv->i2c_address = config->tuner_address;
priv->i2c = i2c;
+ priv->clk_out = config->clk_out;
priv->clk_out_div = config->clk_out_div;
priv->frequency_div = config->frequency_div;
+ priv->fe = fe;
fe->tuner_priv = priv;
if (!priv->frequency_div)
@@ -358,9 +431,13 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
/* Check the tuner version */
buf = ts2020_readreg(fe, 0x00);
- if ((buf == 0x01) || (buf == 0x41) || (buf == 0x81))
+ if ((buf == 0x01) || (buf == 0x41) || (buf == 0x81)) {
printk(KERN_INFO "%s: Find tuner TS2020!\n", __func__);
- else {
+ priv->tuner = TS2020_M88TS2020;
+ } else if ((buf == 0x83) || (buf == 0xc3)) {
+ printk(KERN_INFO "%s: Find tuner TS2022!\n", __func__);
+ priv->tuner = TS2020_M88TS2022;
+ } else {
printk(KERN_ERR "%s: Read tuner reg[0] = %d\n", __func__, buf);
kfree(priv);
return NULL;
@@ -373,6 +450,165 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
}
EXPORT_SYMBOL(ts2020_attach);
+static int ts2020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ts2020_config *pdata = client->dev.platform_data;
+ struct dvb_frontend *fe = pdata->fe;
+ struct ts2020_priv *dev;
+ int ret;
+ u8 u8tmp;
+ unsigned int utmp;
+ char *chip_str;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev->i2c = client->adapter;
+ dev->i2c_address = client->addr;
+ dev->clk_out = pdata->clk_out;
+ dev->clk_out_div = pdata->clk_out_div;
+ dev->frequency_div = pdata->frequency_div;
+ dev->fe = fe;
+ fe->tuner_priv = dev;
+
+ /* check if the tuner is there */
+ ret = ts2020_readreg(fe, 0x00);
+ if (ret < 0)
+ goto err;
+ utmp = ret;
+
+ if ((utmp & 0x03) == 0x00) {
+ ret = ts2020_writereg(fe, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ usleep_range(2000, 50000);
+ }
+
+ ret = ts2020_writereg(fe, 0x00, 0x03);
+ if (ret)
+ goto err;
+
+ usleep_range(2000, 50000);
+
+ ret = ts2020_readreg(fe, 0x00);
+ if (ret < 0)
+ goto err;
+ utmp = ret;
+
+ dev_dbg(&client->dev, "chip_id=%02x\n", utmp);
+
+ switch (utmp) {
+ case 0x01:
+ case 0x41:
+ case 0x81:
+ dev->tuner = TS2020_M88TS2020;
+ chip_str = "TS2020";
+ if (!dev->frequency_div)
+ dev->frequency_div = 1060000;
+ break;
+ case 0xc3:
+ case 0x83:
+ dev->tuner = TS2020_M88TS2022;
+ chip_str = "TS2022";
+ if (!dev->frequency_div)
+ dev->frequency_div = 1103000;
+ break;
+ default:
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (dev->tuner == TS2020_M88TS2022) {
+ switch (dev->clk_out) {
+ case TS2020_CLK_OUT_DISABLED:
+ u8tmp = 0x60;
+ break;
+ case TS2020_CLK_OUT_ENABLED:
+ u8tmp = 0x70;
+ ret = ts2020_writereg(fe, 0x05, dev->clk_out_div);
+ if (ret)
+ goto err;
+ break;
+ case TS2020_CLK_OUT_ENABLED_XTALOUT:
+ u8tmp = 0x6c;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ts2020_writereg(fe, 0x42, u8tmp);
+ if (ret)
+ goto err;
+
+ if (dev->loop_through)
+ u8tmp = 0xec;
+ else
+ u8tmp = 0x6c;
+
+ ret = ts2020_writereg(fe, 0x62, u8tmp);
+ if (ret)
+ goto err;
+ }
+
+ /* sleep */
+ ret = ts2020_writereg(fe, 0x00, 0x00);
+ if (ret)
+ goto err;
+
+ dev_info(&client->dev,
+ "Montage Technology %s successfully identified\n", chip_str);
+
+ memcpy(&fe->ops.tuner_ops, &ts2020_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ fe->ops.tuner_ops.release = NULL;
+
+ i2c_set_clientdata(client, dev);
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ kfree(dev);
+ return ret;
+}
+
+static int ts2020_remove(struct i2c_client *client)
+{
+ struct ts2020_priv *dev = i2c_get_clientdata(client);
+ struct dvb_frontend *fe = dev->fe;
+
+ dev_dbg(&client->dev, "\n");
+
+ memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = NULL;
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ts2020_id_table[] = {
+ {"ts2020", 0},
+ {"ts2022", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ts2020_id_table);
+
+static struct i2c_driver ts2020_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ts2020",
+ },
+ .probe = ts2020_probe,
+ .remove = ts2020_remove,
+ .id_table = ts2020_id_table,
+};
+
+module_i2c_driver(ts2020_driver);
+
MODULE_AUTHOR("Konstantin Dimitrov <kosio.dimitrov@gmail.com>");
MODULE_DESCRIPTION("Montage Technology TS2020 - Silicon tuner driver module");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h
index b2fe6bb3a38b..1714af94eca2 100644
--- a/drivers/media/dvb-frontends/ts2020.h
+++ b/drivers/media/dvb-frontends/ts2020.h
@@ -27,11 +27,34 @@
struct ts2020_config {
u8 tuner_address;
- u8 clk_out_div;
u32 frequency_div;
+
+ /*
+ * RF loop-through
+ */
+ u8 loop_through:1;
+
+ /*
+ * clock output
+ */
+#define TS2020_CLK_OUT_DISABLED 0
+#define TS2020_CLK_OUT_ENABLED 1
+#define TS2020_CLK_OUT_ENABLED_XTALOUT 2
+ u8 clk_out:2;
+
+ /*
+ * clock output divider
+ * 1 - 31
+ */
+ u8 clk_out_div:5;
+
+ /*
+ * pointer to DVB frontend
+ */
+ struct dvb_frontend *fe;
};
-#if IS_ENABLED(CONFIG_DVB_TS2020)
+#if IS_REACHABLE(CONFIG_DVB_TS2020)
extern struct dvb_frontend *ts2020_attach(
struct dvb_frontend *fe,
diff --git a/drivers/media/dvb-frontends/tua6100.h b/drivers/media/dvb-frontends/tua6100.h
index 83a9c30e67ca..52919e04e258 100644
--- a/drivers/media/dvb-frontends/tua6100.h
+++ b/drivers/media/dvb-frontends/tua6100.h
@@ -34,7 +34,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if IS_ENABLED(CONFIG_DVB_TUA6100)
+#if IS_REACHABLE(CONFIG_DVB_TUA6100)
extern struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c);
#else
static inline struct dvb_frontend* tua6100_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c)
diff --git a/drivers/media/dvb-frontends/ves1820.h b/drivers/media/dvb-frontends/ves1820.h
index c073f353ac38..ece46fdcd714 100644
--- a/drivers/media/dvb-frontends/ves1820.h
+++ b/drivers/media/dvb-frontends/ves1820.h
@@ -41,7 +41,7 @@ struct ves1820_config
u8 selagc:1;
};
-#if IS_ENABLED(CONFIG_DVB_VES1820)
+#if IS_REACHABLE(CONFIG_DVB_VES1820)
extern struct dvb_frontend* ves1820_attach(const struct ves1820_config* config,
struct i2c_adapter* i2c, u8 pwm);
#else
diff --git a/drivers/media/dvb-frontends/ves1x93.h b/drivers/media/dvb-frontends/ves1x93.h
index 2307caea6aec..4510fe2f6676 100644
--- a/drivers/media/dvb-frontends/ves1x93.h
+++ b/drivers/media/dvb-frontends/ves1x93.h
@@ -40,7 +40,7 @@ struct ves1x93_config
u8 invert_pwm:1;
};
-#if IS_ENABLED(CONFIG_DVB_VES1X93)
+#if IS_REACHABLE(CONFIG_DVB_VES1X93)
extern struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/zl10036.h b/drivers/media/dvb-frontends/zl10036.h
index 5f1e8217eeb6..670e76a654ee 100644
--- a/drivers/media/dvb-frontends/zl10036.h
+++ b/drivers/media/dvb-frontends/zl10036.h
@@ -38,7 +38,7 @@ struct zl10036_config {
int rf_loop_enable;
};
-#if IS_ENABLED(CONFIG_DVB_ZL10036)
+#if IS_REACHABLE(CONFIG_DVB_ZL10036)
extern struct dvb_frontend *zl10036_attach(struct dvb_frontend *fe,
const struct zl10036_config *config, struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/zl10039.h b/drivers/media/dvb-frontends/zl10039.h
index 750b9bca9d02..070929444e71 100644
--- a/drivers/media/dvb-frontends/zl10039.h
+++ b/drivers/media/dvb-frontends/zl10039.h
@@ -24,7 +24,7 @@
#include <linux/kconfig.h>
-#if IS_ENABLED(CONFIG_DVB_ZL10039)
+#if IS_REACHABLE(CONFIG_DVB_ZL10039)
struct dvb_frontend *zl10039_attach(struct dvb_frontend *fe,
u8 i2c_addr,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/zl10353.h b/drivers/media/dvb-frontends/zl10353.h
index 50c1004aef36..37aa6e8f454a 100644
--- a/drivers/media/dvb-frontends/zl10353.h
+++ b/drivers/media/dvb-frontends/zl10353.h
@@ -47,7 +47,7 @@ struct zl10353_config
u8 pll_0; /* default: 0x15 */
};
-#if IS_ENABLED(CONFIG_DVB_ZL10353)
+#if IS_REACHABLE(CONFIG_DVB_ZL10353)
extern struct dvb_frontend* zl10353_attach(const struct zl10353_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index da58c9bb67c2..6f30ea76151a 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -466,6 +466,17 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+config VIDEO_OV2659
+ tristate "OmniVision OV2659 sensor support"
+ depends on VIDEO_V4L2 && I2C
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OmniVision
+ OV2659 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov2659.
+
config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
depends on I2C && VIDEO_V4L2
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 9858900168bf..f165faea5b3f 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -77,3 +77,4 @@ obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
+obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index fada17566205..69094ab047b1 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -239,8 +239,8 @@ static void ad9389b_set_IT_content_AVI_InfoFrame(struct v4l2_subdev *sd)
{
struct ad9389b_state *state = get_ad9389b_state(sd);
- if (state->dv_timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
- /* CEA format, not IT */
+ if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
+ /* CE format, not IT */
ad9389b_wr_and_or(sd, 0xcd, 0xbf, 0x00);
} else {
/* IT format */
@@ -255,11 +255,11 @@ static int ad9389b_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2
switch (ctrl->val) {
case V4L2_DV_RGB_RANGE_AUTO:
/* automatic */
- if (state->dv_timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
- /* cea format, RGB limited range (16-235) */
+ if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
+ /* CE format, RGB limited range (16-235) */
ad9389b_csc_rgb_full2limit(sd, true);
} else {
- /* not cea format, RGB full range (0-255) */
+ /* not CE format, RGB full range (0-255) */
ad9389b_csc_rgb_full2limit(sd, false);
}
break;
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index b75878c27c2a..a493c0b0b5fe 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -582,7 +582,7 @@ static void adv7180_exit_controls(struct adv7180_state *state)
}
static int adv7180_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -645,13 +645,13 @@ static int adv7180_set_field_mode(struct adv7180_state *state)
}
static int adv7180_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct adv7180_state *state = to_state(sd);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(fh, 0);
+ format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
} else {
adv7180_mbus_fmt(sd, &format->format);
format->format.field = state->field;
@@ -661,7 +661,7 @@ static int adv7180_get_pad_format(struct v4l2_subdev *sd,
}
static int adv7180_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct adv7180_state *state = to_state(sd);
@@ -686,7 +686,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
adv7180_set_power(state, true);
}
} else {
- framefmt = v4l2_subdev_get_try_format(fh, 0);
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, 0);
*framefmt = format->format;
}
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 9d38f7b36cd1..7c50833e7d17 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -506,7 +506,6 @@ static int adv7343_remove(struct i2c_client *client)
struct adv7343_state *state = to_state(sd);
v4l2_async_unregister_subdev(&state->sd);
- v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
return 0;
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 81736aaf0f31..12d93203d405 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -312,8 +312,8 @@ static void adv7511_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable)
static void adv7511_set_IT_content_AVI_InfoFrame(struct v4l2_subdev *sd)
{
struct adv7511_state *state = get_adv7511_state(sd);
- if (state->dv_timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
- /* CEA format, not IT */
+ if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
+ /* CE format, not IT */
adv7511_wr_and_or(sd, 0x57, 0x7f, 0x00);
} else {
/* IT format */
@@ -331,11 +331,11 @@ static int adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2
/* automatic */
struct adv7511_state *state = get_adv7511_state(sd);
- if (state->dv_timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
- /* cea format, RGB limited range (16-235) */
+ if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
+ /* CE format, RGB limited range (16-235) */
adv7511_csc_rgb_full2limit(sd, true);
} else {
- /* not cea format, RGB full range (0-255) */
+ /* not CE format, RGB full range (0-255) */
adv7511_csc_rgb_full2limit(sd, false);
}
}
@@ -810,7 +810,7 @@ static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
}
static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad != 0)
@@ -842,8 +842,9 @@ static void adv7511_fill_format(struct adv7511_state *state,
format->field = V4L2_FIELD_NONE;
}
-static int adv7511_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_format *format)
+static int adv7511_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
{
struct adv7511_state *state = get_adv7511_state(sd);
@@ -855,7 +856,7 @@ static int adv7511_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(fh, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
format->format.code = fmt->code;
format->format.colorspace = fmt->colorspace;
format->format.ycbcr_enc = fmt->ycbcr_enc;
@@ -870,8 +871,9 @@ static int adv7511_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return 0;
}
-static int adv7511_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_format *format)
+static int adv7511_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
{
struct adv7511_state *state = get_adv7511_state(sd);
/*
@@ -905,7 +907,7 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(fh, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
fmt->code = format->format.code;
fmt->colorspace = format->format.colorspace;
fmt->ycbcr_enc = format->format.ycbcr_enc;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index d228b7c82310..60ffcf098bef 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -53,41 +53,41 @@ MODULE_AUTHOR("Mats Randgaard <mats.randgaard@cisco.com>");
MODULE_LICENSE("GPL");
/* ADV7604 system clock frequency */
-#define ADV7604_fsc (28636360)
+#define ADV76XX_FSC (28636360)
-#define ADV7604_RGB_OUT (1 << 1)
+#define ADV76XX_RGB_OUT (1 << 1)
-#define ADV7604_OP_FORMAT_SEL_8BIT (0 << 0)
+#define ADV76XX_OP_FORMAT_SEL_8BIT (0 << 0)
#define ADV7604_OP_FORMAT_SEL_10BIT (1 << 0)
-#define ADV7604_OP_FORMAT_SEL_12BIT (2 << 0)
+#define ADV76XX_OP_FORMAT_SEL_12BIT (2 << 0)
-#define ADV7604_OP_MODE_SEL_SDR_422 (0 << 5)
+#define ADV76XX_OP_MODE_SEL_SDR_422 (0 << 5)
#define ADV7604_OP_MODE_SEL_DDR_422 (1 << 5)
-#define ADV7604_OP_MODE_SEL_SDR_444 (2 << 5)
+#define ADV76XX_OP_MODE_SEL_SDR_444 (2 << 5)
#define ADV7604_OP_MODE_SEL_DDR_444 (3 << 5)
-#define ADV7604_OP_MODE_SEL_SDR_422_2X (4 << 5)
+#define ADV76XX_OP_MODE_SEL_SDR_422_2X (4 << 5)
#define ADV7604_OP_MODE_SEL_ADI_CM (5 << 5)
-#define ADV7604_OP_CH_SEL_GBR (0 << 5)
-#define ADV7604_OP_CH_SEL_GRB (1 << 5)
-#define ADV7604_OP_CH_SEL_BGR (2 << 5)
-#define ADV7604_OP_CH_SEL_RGB (3 << 5)
-#define ADV7604_OP_CH_SEL_BRG (4 << 5)
-#define ADV7604_OP_CH_SEL_RBG (5 << 5)
+#define ADV76XX_OP_CH_SEL_GBR (0 << 5)
+#define ADV76XX_OP_CH_SEL_GRB (1 << 5)
+#define ADV76XX_OP_CH_SEL_BGR (2 << 5)
+#define ADV76XX_OP_CH_SEL_RGB (3 << 5)
+#define ADV76XX_OP_CH_SEL_BRG (4 << 5)
+#define ADV76XX_OP_CH_SEL_RBG (5 << 5)
-#define ADV7604_OP_SWAP_CB_CR (1 << 0)
+#define ADV76XX_OP_SWAP_CB_CR (1 << 0)
-enum adv7604_type {
+enum adv76xx_type {
ADV7604,
ADV7611,
};
-struct adv7604_reg_seq {
+struct adv76xx_reg_seq {
unsigned int reg;
u8 val;
};
-struct adv7604_format_info {
+struct adv76xx_format_info {
u32 code;
u8 op_ch_sel;
bool rgb_out;
@@ -95,8 +95,8 @@ struct adv7604_format_info {
u8 op_format_sel;
};
-struct adv7604_chip_info {
- enum adv7604_type type;
+struct adv76xx_chip_info {
+ enum adv76xx_type type;
bool has_afe;
unsigned int max_port;
@@ -109,8 +109,9 @@ struct adv7604_chip_info {
unsigned int cable_det_mask;
unsigned int tdms_lock_mask;
unsigned int fmt_change_digital_mask;
+ unsigned int cp_csc;
- const struct adv7604_format_info *formats;
+ const struct adv76xx_format_info *formats;
unsigned int nformats;
void (*set_termination)(struct v4l2_subdev *sd, bool enable);
@@ -119,7 +120,7 @@ struct adv7604_chip_info {
unsigned int (*read_cable_det)(struct v4l2_subdev *sd);
/* 0 = AFE, 1 = HDMI */
- const struct adv7604_reg_seq *recommended_settings[2];
+ const struct adv76xx_reg_seq *recommended_settings[2];
unsigned int num_recommended_settings[2];
unsigned long page_mask;
@@ -133,22 +134,22 @@ struct adv7604_chip_info {
**********************************************************************
*/
-struct adv7604_state {
- const struct adv7604_chip_info *info;
- struct adv7604_platform_data pdata;
+struct adv76xx_state {
+ const struct adv76xx_chip_info *info;
+ struct adv76xx_platform_data pdata;
struct gpio_desc *hpd_gpio[4];
struct v4l2_subdev sd;
- struct media_pad pads[ADV7604_PAD_MAX];
+ struct media_pad pads[ADV76XX_PAD_MAX];
unsigned int source_pad;
struct v4l2_ctrl_handler hdl;
- enum adv7604_pad selected_input;
+ enum adv76xx_pad selected_input;
struct v4l2_dv_timings timings;
- const struct adv7604_format_info *format;
+ const struct adv76xx_format_info *format;
struct {
u8 edid[256];
@@ -163,7 +164,7 @@ struct adv7604_state {
bool restart_stdi_once;
/* i2c clients */
- struct i2c_client *i2c_clients[ADV7604_PAGE_MAX];
+ struct i2c_client *i2c_clients[ADV76XX_PAGE_MAX];
/* controls */
struct v4l2_ctrl *detect_tx_5v_ctrl;
@@ -173,13 +174,13 @@ struct adv7604_state {
struct v4l2_ctrl *rgb_quantization_range_ctrl;
};
-static bool adv7604_has_afe(struct adv7604_state *state)
+static bool adv76xx_has_afe(struct adv76xx_state *state)
{
return state->info->has_afe;
}
/* Supported CEA and DMT timings */
-static const struct v4l2_dv_timings adv7604_timings[] = {
+static const struct v4l2_dv_timings adv76xx_timings[] = {
V4L2_DV_BT_CEA_720X480P59_94,
V4L2_DV_BT_CEA_720X576P50,
V4L2_DV_BT_CEA_1280X720P24,
@@ -243,14 +244,14 @@ static const struct v4l2_dv_timings adv7604_timings[] = {
{ },
};
-struct adv7604_video_standards {
+struct adv76xx_video_standards {
struct v4l2_dv_timings timings;
u8 vid_std;
u8 v_freq;
};
/* sorted by number of lines */
-static const struct adv7604_video_standards adv7604_prim_mode_comp[] = {
+static const struct adv76xx_video_standards adv7604_prim_mode_comp[] = {
/* { V4L2_DV_BT_CEA_720X480P59_94, 0x0a, 0x00 }, TODO flickering */
{ V4L2_DV_BT_CEA_720X576P50, 0x0b, 0x00 },
{ V4L2_DV_BT_CEA_1280X720P50, 0x19, 0x01 },
@@ -265,7 +266,7 @@ static const struct adv7604_video_standards adv7604_prim_mode_comp[] = {
};
/* sorted by number of lines */
-static const struct adv7604_video_standards adv7604_prim_mode_gr[] = {
+static const struct adv76xx_video_standards adv7604_prim_mode_gr[] = {
{ V4L2_DV_BT_DMT_640X480P60, 0x08, 0x00 },
{ V4L2_DV_BT_DMT_640X480P72, 0x09, 0x00 },
{ V4L2_DV_BT_DMT_640X480P75, 0x0a, 0x00 },
@@ -293,7 +294,7 @@ static const struct adv7604_video_standards adv7604_prim_mode_gr[] = {
};
/* sorted by number of lines */
-static const struct adv7604_video_standards adv7604_prim_mode_hdmi_comp[] = {
+static const struct adv76xx_video_standards adv76xx_prim_mode_hdmi_comp[] = {
{ V4L2_DV_BT_CEA_720X480P59_94, 0x0a, 0x00 },
{ V4L2_DV_BT_CEA_720X576P50, 0x0b, 0x00 },
{ V4L2_DV_BT_CEA_1280X720P50, 0x13, 0x01 },
@@ -307,7 +308,7 @@ static const struct adv7604_video_standards adv7604_prim_mode_hdmi_comp[] = {
};
/* sorted by number of lines */
-static const struct adv7604_video_standards adv7604_prim_mode_hdmi_gr[] = {
+static const struct adv76xx_video_standards adv76xx_prim_mode_hdmi_gr[] = {
{ V4L2_DV_BT_DMT_640X480P60, 0x08, 0x00 },
{ V4L2_DV_BT_DMT_640X480P72, 0x09, 0x00 },
{ V4L2_DV_BT_DMT_640X480P75, 0x0a, 0x00 },
@@ -328,9 +329,9 @@ static const struct adv7604_video_standards adv7604_prim_mode_hdmi_gr[] = {
/* ----------------------------------------------------------------------- */
-static inline struct adv7604_state *to_state(struct v4l2_subdev *sd)
+static inline struct adv76xx_state *to_state(struct v4l2_subdev *sd)
{
- return container_of(sd, struct adv7604_state, sd);
+ return container_of(sd, struct adv76xx_state, sd);
}
static inline unsigned htotal(const struct v4l2_bt_timings *t)
@@ -360,15 +361,15 @@ static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
return -EIO;
}
-static s32 adv_smbus_read_byte_data(struct adv7604_state *state,
- enum adv7604_page page, u8 command)
+static s32 adv_smbus_read_byte_data(struct adv76xx_state *state,
+ enum adv76xx_page page, u8 command)
{
return adv_smbus_read_byte_data_check(state->i2c_clients[page],
command, true);
}
-static s32 adv_smbus_write_byte_data(struct adv7604_state *state,
- enum adv7604_page page, u8 command,
+static s32 adv_smbus_write_byte_data(struct adv76xx_state *state,
+ enum adv76xx_page page, u8 command,
u8 value)
{
struct i2c_client *client = state->i2c_clients[page];
@@ -391,8 +392,8 @@ static s32 adv_smbus_write_byte_data(struct adv7604_state *state,
return err;
}
-static s32 adv_smbus_write_i2c_block_data(struct adv7604_state *state,
- enum adv7604_page page, u8 command,
+static s32 adv_smbus_write_i2c_block_data(struct adv76xx_state *state,
+ enum adv76xx_page page, u8 command,
unsigned length, const u8 *values)
{
struct i2c_client *client = state->i2c_clients[page];
@@ -411,16 +412,16 @@ static s32 adv_smbus_write_i2c_block_data(struct adv7604_state *state,
static inline int io_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_IO, reg);
+ return adv_smbus_read_byte_data(state, ADV76XX_PAGE_IO, reg);
}
static inline int io_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_IO, reg, val);
+ return adv_smbus_write_byte_data(state, ADV76XX_PAGE_IO, reg, val);
}
static inline int io_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -430,73 +431,73 @@ static inline int io_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 v
static inline int avlink_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
return adv_smbus_read_byte_data(state, ADV7604_PAGE_AVLINK, reg);
}
static inline int avlink_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
return adv_smbus_write_byte_data(state, ADV7604_PAGE_AVLINK, reg, val);
}
static inline int cec_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_CEC, reg);
+ return adv_smbus_read_byte_data(state, ADV76XX_PAGE_CEC, reg);
}
static inline int cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_CEC, reg, val);
+ return adv_smbus_write_byte_data(state, ADV76XX_PAGE_CEC, reg, val);
}
static inline int infoframe_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_INFOFRAME, reg);
+ return adv_smbus_read_byte_data(state, ADV76XX_PAGE_INFOFRAME, reg);
}
static inline int infoframe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_INFOFRAME,
+ return adv_smbus_write_byte_data(state, ADV76XX_PAGE_INFOFRAME,
reg, val);
}
static inline int afe_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_AFE, reg);
+ return adv_smbus_read_byte_data(state, ADV76XX_PAGE_AFE, reg);
}
static inline int afe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_AFE, reg, val);
+ return adv_smbus_write_byte_data(state, ADV76XX_PAGE_AFE, reg, val);
}
static inline int rep_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_REP, reg);
+ return adv_smbus_read_byte_data(state, ADV76XX_PAGE_REP, reg);
}
static inline int rep_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_REP, reg, val);
+ return adv_smbus_write_byte_data(state, ADV76XX_PAGE_REP, reg, val);
}
static inline int rep_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -506,64 +507,60 @@ static inline int rep_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8
static inline int edid_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_EDID, reg);
+ return adv_smbus_read_byte_data(state, ADV76XX_PAGE_EDID, reg);
}
static inline int edid_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_EDID, reg, val);
+ return adv_smbus_write_byte_data(state, ADV76XX_PAGE_EDID, reg, val);
}
static inline int edid_write_block(struct v4l2_subdev *sd,
unsigned len, const u8 *val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
int err = 0;
int i;
v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n", __func__, len);
for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
- err = adv_smbus_write_i2c_block_data(state, ADV7604_PAGE_EDID,
+ err = adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_EDID,
i, I2C_SMBUS_BLOCK_MAX, val + i);
return err;
}
-static void adv7604_set_hpd(struct adv7604_state *state, unsigned int hpd)
+static void adv76xx_set_hpd(struct adv76xx_state *state, unsigned int hpd)
{
unsigned int i;
- for (i = 0; i < state->info->num_dv_ports; ++i) {
- if (IS_ERR(state->hpd_gpio[i]))
- continue;
-
+ for (i = 0; i < state->info->num_dv_ports; ++i)
gpiod_set_value_cansleep(state->hpd_gpio[i], hpd & BIT(i));
- }
- v4l2_subdev_notify(&state->sd, ADV7604_HOTPLUG, &hpd);
+ v4l2_subdev_notify(&state->sd, ADV76XX_HOTPLUG, &hpd);
}
-static void adv7604_delayed_work_enable_hotplug(struct work_struct *work)
+static void adv76xx_delayed_work_enable_hotplug(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct adv7604_state *state = container_of(dwork, struct adv7604_state,
+ struct adv76xx_state *state = container_of(dwork, struct adv76xx_state,
delayed_work_enable_hotplug);
struct v4l2_subdev *sd = &state->sd;
v4l2_dbg(2, debug, sd, "%s: enable hotplug\n", __func__);
- adv7604_set_hpd(state, state->edid.present);
+ adv76xx_set_hpd(state, state->edid.present);
}
static inline int hdmi_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_HDMI, reg);
+ return adv_smbus_read_byte_data(state, ADV76XX_PAGE_HDMI, reg);
}
static u16 hdmi_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
@@ -573,9 +570,9 @@ static u16 hdmi_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
static inline int hdmi_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_HDMI, reg, val);
+ return adv_smbus_write_byte_data(state, ADV76XX_PAGE_HDMI, reg, val);
}
static inline int hdmi_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -585,16 +582,16 @@ static inline int hdmi_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8
static inline int test_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_TEST, reg, val);
+ return adv_smbus_write_byte_data(state, ADV76XX_PAGE_TEST, reg, val);
}
static inline int cp_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_CP, reg);
+ return adv_smbus_read_byte_data(state, ADV76XX_PAGE_CP, reg);
}
static u16 cp_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
@@ -604,9 +601,9 @@ static u16 cp_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
static inline int cp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_CP, reg, val);
+ return adv_smbus_write_byte_data(state, ADV76XX_PAGE_CP, reg, val);
}
static inline int cp_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -616,25 +613,25 @@ static inline int cp_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 v
static inline int vdp_read(struct v4l2_subdev *sd, u8 reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
return adv_smbus_read_byte_data(state, ADV7604_PAGE_VDP, reg);
}
static inline int vdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
return adv_smbus_write_byte_data(state, ADV7604_PAGE_VDP, reg, val);
}
-#define ADV7604_REG(page, offset) (((page) << 8) | (offset))
-#define ADV7604_REG_SEQ_TERM 0xffff
+#define ADV76XX_REG(page, offset) (((page) << 8) | (offset))
+#define ADV76XX_REG_SEQ_TERM 0xffff
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int adv7604_read_reg(struct v4l2_subdev *sd, unsigned int reg)
+static int adv76xx_read_reg(struct v4l2_subdev *sd, unsigned int reg)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
unsigned int page = reg >> 8;
if (!(BIT(page) & state->info->page_mask))
@@ -646,9 +643,9 @@ static int adv7604_read_reg(struct v4l2_subdev *sd, unsigned int reg)
}
#endif
-static int adv7604_write_reg(struct v4l2_subdev *sd, unsigned int reg, u8 val)
+static int adv76xx_write_reg(struct v4l2_subdev *sd, unsigned int reg, u8 val)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
unsigned int page = reg >> 8;
if (!(BIT(page) & state->info->page_mask))
@@ -659,91 +656,91 @@ static int adv7604_write_reg(struct v4l2_subdev *sd, unsigned int reg, u8 val)
return adv_smbus_write_byte_data(state, page, reg, val);
}
-static void adv7604_write_reg_seq(struct v4l2_subdev *sd,
- const struct adv7604_reg_seq *reg_seq)
+static void adv76xx_write_reg_seq(struct v4l2_subdev *sd,
+ const struct adv76xx_reg_seq *reg_seq)
{
unsigned int i;
- for (i = 0; reg_seq[i].reg != ADV7604_REG_SEQ_TERM; i++)
- adv7604_write_reg(sd, reg_seq[i].reg, reg_seq[i].val);
+ for (i = 0; reg_seq[i].reg != ADV76XX_REG_SEQ_TERM; i++)
+ adv76xx_write_reg(sd, reg_seq[i].reg, reg_seq[i].val);
}
/* -----------------------------------------------------------------------------
* Format helpers
*/
-static const struct adv7604_format_info adv7604_formats[] = {
- { MEDIA_BUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false,
- ADV7604_OP_MODE_SEL_SDR_444 | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YUYV10_2X10, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT },
- { MEDIA_BUS_FMT_YVYU10_2X10, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT },
- { MEDIA_BUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_UYVY10_1X20, ADV7604_OP_CH_SEL_RBG, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
- { MEDIA_BUS_FMT_VYUY10_1X20, ADV7604_OP_CH_SEL_RBG, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
- { MEDIA_BUS_FMT_YUYV10_1X20, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
- { MEDIA_BUS_FMT_YVYU10_1X20, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
- { MEDIA_BUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
+static const struct adv76xx_format_info adv7604_formats[] = {
+ { MEDIA_BUS_FMT_RGB888_1X24, ADV76XX_OP_CH_SEL_RGB, true, false,
+ ADV76XX_OP_MODE_SEL_SDR_444 | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YUYV8_2X8, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YVYU8_2X8, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YUYV10_2X10, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT },
+ { MEDIA_BUS_FMT_YVYU10_2X10, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT },
+ { MEDIA_BUS_FMT_YUYV12_2X12, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_YVYU12_2X12, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_UYVY8_1X16, ADV76XX_OP_CH_SEL_RBG, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_VYUY8_1X16, ADV76XX_OP_CH_SEL_RBG, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YUYV8_1X16, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YVYU8_1X16, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_UYVY10_1X20, ADV76XX_OP_CH_SEL_RBG, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
+ { MEDIA_BUS_FMT_VYUY10_1X20, ADV76XX_OP_CH_SEL_RBG, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
+ { MEDIA_BUS_FMT_YUYV10_1X20, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
+ { MEDIA_BUS_FMT_YVYU10_1X20, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
+ { MEDIA_BUS_FMT_UYVY12_1X24, ADV76XX_OP_CH_SEL_RBG, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_VYUY12_1X24, ADV76XX_OP_CH_SEL_RBG, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_YUYV12_1X24, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_YVYU12_1X24, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
};
-static const struct adv7604_format_info adv7611_formats[] = {
- { MEDIA_BUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false,
- ADV7604_OP_MODE_SEL_SDR_444 | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { MEDIA_BUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { MEDIA_BUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true,
- ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
+static const struct adv76xx_format_info adv7611_formats[] = {
+ { MEDIA_BUS_FMT_RGB888_1X24, ADV76XX_OP_CH_SEL_RGB, true, false,
+ ADV76XX_OP_MODE_SEL_SDR_444 | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YUYV8_2X8, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YVYU8_2X8, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YUYV12_2X12, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_YVYU12_2X12, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_UYVY8_1X16, ADV76XX_OP_CH_SEL_RBG, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_VYUY8_1X16, ADV76XX_OP_CH_SEL_RBG, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YUYV8_1X16, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YVYU8_1X16, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_UYVY12_1X24, ADV76XX_OP_CH_SEL_RBG, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_VYUY12_1X24, ADV76XX_OP_CH_SEL_RBG, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_YUYV12_1X24, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
+ { MEDIA_BUS_FMT_YVYU12_1X24, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
};
-static const struct adv7604_format_info *
-adv7604_format_info(struct adv7604_state *state, u32 code)
+static const struct adv76xx_format_info *
+adv76xx_format_info(struct adv76xx_state *state, u32 code)
{
unsigned int i;
@@ -759,7 +756,7 @@ adv7604_format_info(struct adv7604_state *state, u32 code)
static inline bool is_analog_input(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
return state->selected_input == ADV7604_PAD_VGA_RGB ||
state->selected_input == ADV7604_PAD_VGA_COMP;
@@ -767,9 +764,9 @@ static inline bool is_analog_input(struct v4l2_subdev *sd)
static inline bool is_digital_input(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- return state->selected_input == ADV7604_PAD_HDMI_PORT_A ||
+ return state->selected_input == ADV76XX_PAD_HDMI_PORT_A ||
state->selected_input == ADV7604_PAD_HDMI_PORT_B ||
state->selected_input == ADV7604_PAD_HDMI_PORT_C ||
state->selected_input == ADV7604_PAD_HDMI_PORT_D;
@@ -778,7 +775,7 @@ static inline bool is_digital_input(struct v4l2_subdev *sd)
/* ----------------------------------------------------------------------- */
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static void adv7604_inv_register(struct v4l2_subdev *sd)
+static void adv76xx_inv_register(struct v4l2_subdev *sd)
{
v4l2_info(sd, "0x000-0x0ff: IO Map\n");
v4l2_info(sd, "0x100-0x1ff: AVLink Map\n");
@@ -795,15 +792,15 @@ static void adv7604_inv_register(struct v4l2_subdev *sd)
v4l2_info(sd, "0xc00-0xcff: VDP Map\n");
}
-static int adv7604_g_register(struct v4l2_subdev *sd,
+static int adv76xx_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
int ret;
- ret = adv7604_read_reg(sd, reg->reg);
+ ret = adv76xx_read_reg(sd, reg->reg);
if (ret < 0) {
v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
- adv7604_inv_register(sd);
+ adv76xx_inv_register(sd);
return ret;
}
@@ -813,15 +810,15 @@ static int adv7604_g_register(struct v4l2_subdev *sd,
return 0;
}
-static int adv7604_s_register(struct v4l2_subdev *sd,
+static int adv76xx_s_register(struct v4l2_subdev *sd,
const struct v4l2_dbg_register *reg)
{
int ret;
- ret = adv7604_write_reg(sd, reg->reg, reg->val);
+ ret = adv76xx_write_reg(sd, reg->reg, reg->val);
if (ret < 0) {
v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
- adv7604_inv_register(sd);
+ adv76xx_inv_register(sd);
return ret;
}
@@ -846,10 +843,10 @@ static unsigned int adv7611_read_cable_det(struct v4l2_subdev *sd)
return value & 1;
}
-static int adv7604_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
+static int adv76xx_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_chip_info *info = state->info;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl,
info->read_cable_det(sd));
@@ -857,7 +854,7 @@ static int adv7604_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
u8 prim_mode,
- const struct adv7604_video_standards *predef_vid_timings,
+ const struct adv76xx_video_standards *predef_vid_timings,
const struct v4l2_dv_timings *timings)
{
int i;
@@ -878,12 +875,12 @@ static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
static int configure_predefined_video_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
int err;
v4l2_dbg(1, debug, sd, "%s", __func__);
- if (adv7604_has_afe(state)) {
+ if (adv76xx_has_afe(state)) {
/* reset to default values */
io_write(sd, 0x16, 0x43);
io_write(sd, 0x17, 0x5a);
@@ -909,10 +906,10 @@ static int configure_predefined_video_timings(struct v4l2_subdev *sd,
0x02, adv7604_prim_mode_gr, timings);
} else if (is_digital_input(sd)) {
err = find_and_set_predefined_video_timings(sd,
- 0x05, adv7604_prim_mode_hdmi_comp, timings);
+ 0x05, adv76xx_prim_mode_hdmi_comp, timings);
if (err)
err = find_and_set_predefined_video_timings(sd,
- 0x06, adv7604_prim_mode_hdmi_gr, timings);
+ 0x06, adv76xx_prim_mode_hdmi_gr, timings);
} else {
v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
__func__, state->selected_input);
@@ -926,7 +923,7 @@ static int configure_predefined_video_timings(struct v4l2_subdev *sd,
static void configure_custom_video_timings(struct v4l2_subdev *sd,
const struct v4l2_bt_timings *bt)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
u32 width = htotal(bt);
u32 height = vtotal(bt);
u16 cp_start_sav = bt->hsync + bt->hbackporch - 4;
@@ -934,7 +931,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
u16 cp_start_vbi = height - bt->vfrontporch;
u16 cp_end_vbi = bt->vsync + bt->vbackporch;
u16 ch1_fr_ll = (((u32)bt->pixelclock / 100) > 0) ?
- ((width * (ADV7604_fsc / 100)) / ((u32)bt->pixelclock / 100)) : 0;
+ ((width * (ADV76XX_FSC / 100)) / ((u32)bt->pixelclock / 100)) : 0;
const u8 pll[2] = {
0xc0 | ((width >> 8) & 0x1f),
width & 0xff
@@ -952,7 +949,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
/* Should only be set in auto-graphics mode [REF_02, p. 91-92] */
/* setup PLL_DIV_MAN_EN and PLL_DIV_RATIO */
/* IO-map reg. 0x16 and 0x17 should be written in sequence */
- if (adv_smbus_write_i2c_block_data(state, ADV7604_PAGE_IO,
+ if (adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_IO,
0x16, 2, pll))
v4l2_err(sd, "writing to reg 0x16 and 0x17 failed\n");
@@ -983,9 +980,9 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
cp_write(sd, 0xac, (height & 0x0f) << 4);
}
-static void adv7604_set_offset(struct v4l2_subdev *sd, bool auto_offset, u16 offset_a, u16 offset_b, u16 offset_c)
+static void adv76xx_set_offset(struct v4l2_subdev *sd, bool auto_offset, u16 offset_a, u16 offset_b, u16 offset_c)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
u8 offset_buf[4];
if (auto_offset) {
@@ -1004,14 +1001,14 @@ static void adv7604_set_offset(struct v4l2_subdev *sd, bool auto_offset, u16 off
offset_buf[3] = offset_c & 0x0ff;
/* Registers must be written in this order with no i2c access in between */
- if (adv_smbus_write_i2c_block_data(state, ADV7604_PAGE_CP,
+ if (adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_CP,
0x77, 4, offset_buf))
v4l2_err(sd, "%s: i2c error writing to CP reg 0x77, 0x78, 0x79, 0x7a\n", __func__);
}
-static void adv7604_set_gain(struct v4l2_subdev *sd, bool auto_gain, u16 gain_a, u16 gain_b, u16 gain_c)
+static void adv76xx_set_gain(struct v4l2_subdev *sd, bool auto_gain, u16 gain_a, u16 gain_b, u16 gain_c)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
u8 gain_buf[4];
u8 gain_man = 1;
u8 agc_mode_man = 1;
@@ -1034,14 +1031,14 @@ static void adv7604_set_gain(struct v4l2_subdev *sd, bool auto_gain, u16 gain_a,
gain_buf[3] = ((gain_c & 0x0ff));
/* Registers must be written in this order with no i2c access in between */
- if (adv_smbus_write_i2c_block_data(state, ADV7604_PAGE_CP,
+ if (adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_CP,
0x73, 4, gain_buf))
v4l2_err(sd, "%s: i2c error writing to CP reg 0x73, 0x74, 0x75, 0x76\n", __func__);
}
static void set_rgb_quantization_range(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
bool rgb_output = io_read(sd, 0x02) & 0x02;
bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
@@ -1049,8 +1046,8 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
__func__, state->rgb_quantization_range,
rgb_output, hdmi_signal);
- adv7604_set_gain(sd, true, 0x0, 0x0, 0x0);
- adv7604_set_offset(sd, true, 0x0, 0x0, 0x0);
+ adv76xx_set_gain(sd, true, 0x0, 0x0, 0x0);
+ adv76xx_set_offset(sd, true, 0x0, 0x0, 0x0);
switch (state->rgb_quantization_range) {
case V4L2_DV_RGB_RANGE_AUTO:
@@ -1078,7 +1075,7 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
/* Receiving DVI-D signal
* ADV7604 selects RGB limited range regardless of
* input format (CE/IT) in automatic mode */
- if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ if (state->timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
/* RGB limited range (16-235) */
io_write_clr_set(sd, 0x02, 0xf0, 0x00);
} else {
@@ -1086,10 +1083,10 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
io_write_clr_set(sd, 0x02, 0xf0, 0x10);
if (is_digital_input(sd) && rgb_output) {
- adv7604_set_offset(sd, false, 0x40, 0x40, 0x40);
+ adv76xx_set_offset(sd, false, 0x40, 0x40, 0x40);
} else {
- adv7604_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
- adv7604_set_offset(sd, false, 0x70, 0x70, 0x70);
+ adv76xx_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+ adv76xx_set_offset(sd, false, 0x70, 0x70, 0x70);
}
}
break;
@@ -1119,21 +1116,21 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
/* Adjust gain/offset for DVI-D signals only */
if (rgb_output) {
- adv7604_set_offset(sd, false, 0x40, 0x40, 0x40);
+ adv76xx_set_offset(sd, false, 0x40, 0x40, 0x40);
} else {
- adv7604_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
- adv7604_set_offset(sd, false, 0x70, 0x70, 0x70);
+ adv76xx_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+ adv76xx_set_offset(sd, false, 0x70, 0x70, 0x70);
}
break;
}
}
-static int adv7604_s_ctrl(struct v4l2_ctrl *ctrl)
+static int adv76xx_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd =
- &container_of(ctrl->handler, struct adv7604_state, hdl)->sd;
+ &container_of(ctrl->handler, struct adv76xx_state, hdl)->sd;
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -1153,7 +1150,7 @@ static int adv7604_s_ctrl(struct v4l2_ctrl *ctrl)
set_rgb_quantization_range(sd);
return 0;
case V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE:
- if (!adv7604_has_afe(state))
+ if (!adv76xx_has_afe(state))
return -EINVAL;
/* Set the analog sampling phase. This is needed to find the
best sampling phase for analog video: an application or
@@ -1185,15 +1182,15 @@ static inline bool no_power(struct v4l2_subdev *sd)
static inline bool no_signal_tmds(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
return !(io_read(sd, 0x6a) & (0x10 >> state->selected_input));
}
static inline bool no_lock_tmds(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_chip_info *info = state->info;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
return (io_read(sd, 0x6a) & info->tdms_lock_mask) != info->tdms_lock_mask;
}
@@ -1205,13 +1202,13 @@ static inline bool is_hdmi(struct v4l2_subdev *sd)
static inline bool no_lock_sspd(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
/*
* Chips without a AFE don't expose registers for the SSPD, so just assume
* that we have a lock.
*/
- if (adv7604_has_afe(state))
+ if (adv76xx_has_afe(state))
return false;
/* TODO channel 2 */
@@ -1243,9 +1240,9 @@ static inline bool no_signal(struct v4l2_subdev *sd)
static inline bool no_lock_cp(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- if (!adv7604_has_afe(state))
+ if (!adv76xx_has_afe(state))
return false;
/* CP has detected a non standard number of lines on the incoming
@@ -1253,13 +1250,19 @@ static inline bool no_lock_cp(struct v4l2_subdev *sd)
return io_read(sd, 0x12) & 0x01;
}
-static int adv7604_g_input_status(struct v4l2_subdev *sd, u32 *status)
+static inline bool in_free_run(struct v4l2_subdev *sd)
+{
+ return cp_read(sd, 0xff) & 0x10;
+}
+
+static int adv76xx_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
*status = 0;
*status |= no_power(sd) ? V4L2_IN_ST_NO_POWER : 0;
*status |= no_signal(sd) ? V4L2_IN_ST_NO_SIGNAL : 0;
- if (no_lock_cp(sd))
- *status |= is_digital_input(sd) ? V4L2_IN_ST_NO_SYNC : V4L2_IN_ST_NO_H_LOCK;
+ if (!in_free_run(sd) && no_lock_cp(sd))
+ *status |= is_digital_input(sd) ?
+ V4L2_IN_ST_NO_SYNC : V4L2_IN_ST_NO_H_LOCK;
v4l2_dbg(1, debug, sd, "%s: status = 0x%x\n", __func__, *status);
@@ -1278,22 +1281,22 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
struct stdi_readback *stdi,
struct v4l2_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
- u32 hfreq = (ADV7604_fsc * 8) / stdi->bl;
+ struct adv76xx_state *state = to_state(sd);
+ u32 hfreq = (ADV76XX_FSC * 8) / stdi->bl;
u32 pix_clk;
int i;
- for (i = 0; adv7604_timings[i].bt.height; i++) {
- if (vtotal(&adv7604_timings[i].bt) != stdi->lcf + 1)
+ for (i = 0; adv76xx_timings[i].bt.height; i++) {
+ if (vtotal(&adv76xx_timings[i].bt) != stdi->lcf + 1)
continue;
- if (adv7604_timings[i].bt.vsync != stdi->lcvs)
+ if (adv76xx_timings[i].bt.vsync != stdi->lcvs)
continue;
- pix_clk = hfreq * htotal(&adv7604_timings[i].bt);
+ pix_clk = hfreq * htotal(&adv76xx_timings[i].bt);
- if ((pix_clk < adv7604_timings[i].bt.pixelclock + 1000000) &&
- (pix_clk > adv7604_timings[i].bt.pixelclock - 1000000)) {
- *timings = adv7604_timings[i];
+ if ((pix_clk < adv76xx_timings[i].bt.pixelclock + 1000000) &&
+ (pix_clk > adv76xx_timings[i].bt.pixelclock - 1000000)) {
+ *timings = adv76xx_timings[i];
return 0;
}
}
@@ -1319,8 +1322,8 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
static int read_stdi(struct v4l2_subdev *sd, struct stdi_readback *stdi)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_chip_info *info = state->info;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
u8 polarity;
if (no_lock_stdi(sd) || no_lock_sspd(sd)) {
@@ -1334,7 +1337,7 @@ static int read_stdi(struct v4l2_subdev *sd, struct stdi_readback *stdi)
stdi->lcvs = cp_read(sd, 0xb3) >> 3;
stdi->interlaced = io_read(sd, 0x12) & 0x10;
- if (adv7604_has_afe(state)) {
+ if (adv76xx_has_afe(state)) {
/* read SSPD */
polarity = cp_read(sd, 0xb5);
if ((polarity & 0x03) == 0x01) {
@@ -1373,26 +1376,26 @@ static int read_stdi(struct v4l2_subdev *sd, struct stdi_readback *stdi)
return 0;
}
-static int adv7604_enum_dv_timings(struct v4l2_subdev *sd,
+static int adv76xx_enum_dv_timings(struct v4l2_subdev *sd,
struct v4l2_enum_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
- if (timings->index >= ARRAY_SIZE(adv7604_timings) - 1)
+ if (timings->index >= ARRAY_SIZE(adv76xx_timings) - 1)
return -EINVAL;
if (timings->pad >= state->source_pad)
return -EINVAL;
memset(timings->reserved, 0, sizeof(timings->reserved));
- timings->timings = adv7604_timings[timings->index];
+ timings->timings = adv76xx_timings[timings->index];
return 0;
}
-static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
+static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
if (cap->pad >= state->source_pad)
return -EINVAL;
@@ -1403,7 +1406,7 @@ static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
cap->bt.min_pixelclock = 25000000;
switch (cap->pad) {
- case ADV7604_PAD_HDMI_PORT_A:
+ case ADV76XX_PAD_HDMI_PORT_A:
case ADV7604_PAD_HDMI_PORT_B:
case ADV7604_PAD_HDMI_PORT_C:
case ADV7604_PAD_HDMI_PORT_D:
@@ -1424,16 +1427,16 @@ static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
}
/* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
- if the format is listed in adv7604_timings[] */
-static void adv7604_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
+ if the format is listed in adv76xx_timings[] */
+static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
int i;
- for (i = 0; adv7604_timings[i].bt.width; i++) {
- if (v4l2_match_dv_timings(timings, &adv7604_timings[i],
+ for (i = 0; adv76xx_timings[i].bt.width; i++) {
+ if (v4l2_match_dv_timings(timings, &adv76xx_timings[i],
is_digital_input(sd) ? 250000 : 1000000)) {
- *timings = adv7604_timings[i];
+ *timings = adv76xx_timings[i];
break;
}
}
@@ -1471,11 +1474,11 @@ static unsigned int adv7611_read_hdmi_pixelclock(struct v4l2_subdev *sd)
return ((a << 1) | (b >> 7)) * 1000000 + (b & 0x7f) * 1000000 / 128;
}
-static int adv7604_query_dv_timings(struct v4l2_subdev *sd,
+static int adv76xx_query_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_chip_info *info = state->info;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
struct v4l2_bt_timings *bt = &timings->bt;
struct stdi_readback stdi;
@@ -1519,7 +1522,7 @@ static int adv7604_query_dv_timings(struct v4l2_subdev *sd,
bt->il_vsync = hdmi_read16(sd, 0x30, 0x1fff) / 2;
bt->il_vbackporch = hdmi_read16(sd, 0x34, 0x1fff) / 2;
}
- adv7604_fill_optional_dv_timings_fields(sd, timings);
+ adv76xx_fill_optional_dv_timings_fields(sd, timings);
} else {
/* find format
* Since LCVS values are inaccurate [REF_03, p. 275-276],
@@ -1576,16 +1579,16 @@ found:
}
if (debug > 1)
- v4l2_print_dv_timings(sd->name, "adv7604_query_dv_timings: ",
+ v4l2_print_dv_timings(sd->name, "adv76xx_query_dv_timings: ",
timings, true);
return 0;
}
-static int adv7604_s_dv_timings(struct v4l2_subdev *sd,
+static int adv76xx_s_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
struct v4l2_bt_timings *bt;
int err;
@@ -1606,7 +1609,7 @@ static int adv7604_s_dv_timings(struct v4l2_subdev *sd,
return -ERANGE;
}
- adv7604_fill_optional_dv_timings_fields(sd, timings);
+ adv76xx_fill_optional_dv_timings_fields(sd, timings);
state->timings = *timings;
@@ -1623,15 +1626,15 @@ static int adv7604_s_dv_timings(struct v4l2_subdev *sd,
set_rgb_quantization_range(sd);
if (debug > 1)
- v4l2_print_dv_timings(sd->name, "adv7604_s_dv_timings: ",
+ v4l2_print_dv_timings(sd->name, "adv76xx_s_dv_timings: ",
timings, true);
return 0;
}
-static int adv7604_g_dv_timings(struct v4l2_subdev *sd,
+static int adv76xx_g_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
*timings = state->timings;
return 0;
@@ -1649,7 +1652,7 @@ static void adv7611_set_termination(struct v4l2_subdev *sd, bool enable)
static void enable_input(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
if (is_analog_input(sd)) {
io_write(sd, 0x15, 0xb0); /* Disable Tristate of Pins (no audio) */
@@ -1666,7 +1669,7 @@ static void enable_input(struct v4l2_subdev *sd)
static void disable_input(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
hdmi_write_clr_set(sd, 0x1a, 0x10, 0x10); /* Mute audio */
msleep(16); /* 512 samples with >= 32 kHz sample rate [REF_03, c. 7.16.10] */
@@ -1676,11 +1679,11 @@ static void disable_input(struct v4l2_subdev *sd)
static void select_input(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_chip_info *info = state->info;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
if (is_analog_input(sd)) {
- adv7604_write_reg_seq(sd, info->recommended_settings[0]);
+ adv76xx_write_reg_seq(sd, info->recommended_settings[0]);
afe_write(sd, 0x00, 0x08); /* power up ADC */
afe_write(sd, 0x01, 0x06); /* power up Analog Front End */
@@ -1688,9 +1691,9 @@ static void select_input(struct v4l2_subdev *sd)
} else if (is_digital_input(sd)) {
hdmi_write(sd, 0x00, state->selected_input & 0x03);
- adv7604_write_reg_seq(sd, info->recommended_settings[1]);
+ adv76xx_write_reg_seq(sd, info->recommended_settings[1]);
- if (adv7604_has_afe(state)) {
+ if (adv76xx_has_afe(state)) {
afe_write(sd, 0x00, 0xff); /* power down ADC */
afe_write(sd, 0x01, 0xfe); /* power down Analog Front End */
afe_write(sd, 0xc8, 0x40); /* phase control */
@@ -1705,10 +1708,10 @@ static void select_input(struct v4l2_subdev *sd)
}
}
-static int adv7604_s_routing(struct v4l2_subdev *sd,
+static int adv76xx_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
v4l2_dbg(2, debug, sd, "%s: input %d, selected input %d",
__func__, input, state->selected_input);
@@ -1730,11 +1733,11 @@ static int adv7604_s_routing(struct v4l2_subdev *sd,
return 0;
}
-static int adv7604_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+static int adv76xx_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
if (code->index >= state->info->nformats)
return -EINVAL;
@@ -1744,7 +1747,7 @@ static int adv7604_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static void adv7604_fill_format(struct adv7604_state *state,
+static void adv76xx_fill_format(struct adv76xx_state *state,
struct v4l2_mbus_framefmt *format)
{
memset(format, 0, sizeof(*format));
@@ -1752,8 +1755,9 @@ static void adv7604_fill_format(struct adv7604_state *state,
format->width = state->timings.bt.width;
format->height = state->timings.bt.height;
format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
- if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861)
+ if (state->timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO)
format->colorspace = (state->timings.bt.height <= 576) ?
V4L2_COLORSPACE_SMPTE170M : V4L2_COLORSPACE_REC709;
}
@@ -1765,7 +1769,7 @@ static void adv7604_fill_format(struct adv7604_state *state,
*
* The following table gives the op_ch_value from the format component order
* (expressed as op_ch_sel value in column) and the bus reordering (expressed as
- * adv7604_bus_order value in row).
+ * adv76xx_bus_order value in row).
*
* | GBR(0) GRB(1) BGR(2) RGB(3) BRG(4) RBG(5)
* ----------+-------------------------------------------------
@@ -1776,11 +1780,11 @@ static void adv7604_fill_format(struct adv7604_state *state,
* BRG (ROR) | BRG RBG GRB GBR RGB BGR
* GBR (ROL) | RGB BGR RBG BRG GBR GRB
*/
-static unsigned int adv7604_op_ch_sel(struct adv7604_state *state)
+static unsigned int adv76xx_op_ch_sel(struct adv76xx_state *state)
{
#define _SEL(a,b,c,d,e,f) { \
- ADV7604_OP_CH_SEL_##a, ADV7604_OP_CH_SEL_##b, ADV7604_OP_CH_SEL_##c, \
- ADV7604_OP_CH_SEL_##d, ADV7604_OP_CH_SEL_##e, ADV7604_OP_CH_SEL_##f }
+ ADV76XX_OP_CH_SEL_##a, ADV76XX_OP_CH_SEL_##b, ADV76XX_OP_CH_SEL_##c, \
+ ADV76XX_OP_CH_SEL_##d, ADV76XX_OP_CH_SEL_##e, ADV76XX_OP_CH_SEL_##f }
#define _BUS(x) [ADV7604_BUS_ORDER_##x]
static const unsigned int op_ch_sel[6][6] = {
@@ -1795,33 +1799,34 @@ static unsigned int adv7604_op_ch_sel(struct adv7604_state *state)
return op_ch_sel[state->pdata.bus_order][state->format->op_ch_sel >> 5];
}
-static void adv7604_setup_format(struct adv7604_state *state)
+static void adv76xx_setup_format(struct adv76xx_state *state)
{
struct v4l2_subdev *sd = &state->sd;
io_write_clr_set(sd, 0x02, 0x02,
- state->format->rgb_out ? ADV7604_RGB_OUT : 0);
+ state->format->rgb_out ? ADV76XX_RGB_OUT : 0);
io_write(sd, 0x03, state->format->op_format_sel |
state->pdata.op_format_mode_sel);
- io_write_clr_set(sd, 0x04, 0xe0, adv7604_op_ch_sel(state));
+ io_write_clr_set(sd, 0x04, 0xe0, adv76xx_op_ch_sel(state));
io_write_clr_set(sd, 0x05, 0x01,
- state->format->swap_cb_cr ? ADV7604_OP_SWAP_CB_CR : 0);
+ state->format->swap_cb_cr ? ADV76XX_OP_SWAP_CB_CR : 0);
}
-static int adv7604_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int adv76xx_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
if (format->pad != state->source_pad)
return -EINVAL;
- adv7604_fill_format(state, &format->format);
+ adv76xx_fill_format(state, &format->format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(fh, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
format->format.code = fmt->code;
} else {
format->format.code = state->format->code;
@@ -1830,39 +1835,40 @@ static int adv7604_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return 0;
}
-static int adv7604_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int adv76xx_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_format_info *info;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_format_info *info;
if (format->pad != state->source_pad)
return -EINVAL;
- info = adv7604_format_info(state, format->format.code);
+ info = adv76xx_format_info(state, format->format.code);
if (info == NULL)
- info = adv7604_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
+ info = adv76xx_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
- adv7604_fill_format(state, &format->format);
+ adv76xx_fill_format(state, &format->format);
format->format.code = info->code;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(fh, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
fmt->code = format->format.code;
} else {
state->format = info;
- adv7604_setup_format(state);
+ adv76xx_setup_format(state);
}
return 0;
}
-static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_chip_info *info = state->info;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
const u8 irq_reg_0x43 = io_read(sd, 0x43);
const u8 irq_reg_0x6b = io_read(sd, 0x6b);
const u8 irq_reg_0x70 = io_read(sd, 0x70);
@@ -1890,7 +1896,7 @@ static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
"%s: fmt_change = 0x%x, fmt_change_digital = 0x%x\n",
__func__, fmt_change, fmt_change_digital);
- v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
+ v4l2_subdev_notify(sd, ADV76XX_FMT_CHANGE, NULL);
if (handled)
*handled = true;
@@ -1909,22 +1915,22 @@ static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
if (tx_5v) {
v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
io_write(sd, 0x71, tx_5v);
- adv7604_s_detect_tx_5v_ctrl(sd);
+ adv76xx_s_detect_tx_5v_ctrl(sd);
if (handled)
*handled = true;
}
return 0;
}
-static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
+static int adv76xx_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
u8 *data = NULL;
memset(edid->reserved, 0, sizeof(edid->reserved));
switch (edid->pad) {
- case ADV7604_PAD_HDMI_PORT_A:
+ case ADV76XX_PAD_HDMI_PORT_A:
case ADV7604_PAD_HDMI_PORT_B:
case ADV7604_PAD_HDMI_PORT_C:
case ADV7604_PAD_HDMI_PORT_D:
@@ -1982,10 +1988,10 @@ static int get_edid_spa_location(const u8 *edid)
return -1;
}
-static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
+static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_chip_info *info = state->info;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
int spa_loc;
int err;
int i;
@@ -1999,7 +2005,7 @@ static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
if (edid->blocks == 0) {
/* Disable hotplug and I2C access to EDID RAM from DDC port */
state->edid.present &= ~(1 << edid->pad);
- adv7604_set_hpd(state, state->edid.present);
+ adv76xx_set_hpd(state, state->edid.present);
rep_write_clr_set(sd, info->edid_enable_reg, 0x0f, state->edid.present);
/* Fall back to a 16:9 aspect ratio */
@@ -2023,7 +2029,7 @@ static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
/* Disable hotplug and I2C access to EDID RAM from DDC port */
cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
- adv7604_set_hpd(state, 0);
+ adv76xx_set_hpd(state, 0);
rep_write_clr_set(sd, info->edid_enable_reg, 0x0f, 0x00);
spa_loc = get_edid_spa_location(edid->edid);
@@ -2031,7 +2037,7 @@ static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
spa_loc = 0xc0; /* Default value [REF_02, p. 116] */
switch (edid->pad) {
- case ADV7604_PAD_HDMI_PORT_A:
+ case ADV76XX_PAD_HDMI_PORT_A:
state->spa_port_a[0] = edid->edid[spa_loc];
state->spa_port_a[1] = edid->edid[spa_loc + 1];
break;
@@ -2074,7 +2080,7 @@ static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
return err;
}
- /* adv7604 calculates the checksums and enables I2C access to internal
+ /* adv76xx calculates the checksums and enables I2C access to internal
EDID RAM from DDC port. */
rep_write_clr_set(sd, info->edid_enable_reg, 0x0f, state->edid.present);
@@ -2138,10 +2144,10 @@ static void print_avi_infoframe(struct v4l2_subdev *sd)
buf[8], buf[9], buf[10], buf[11], buf[12], buf[13]);
}
-static int adv7604_log_status(struct v4l2_subdev *sd)
+static int adv76xx_log_status(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_chip_info *info = state->info;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
struct v4l2_dv_timings timings;
struct stdi_readback stdi;
u8 reg_io_0x02 = io_read(sd, 0x02);
@@ -2200,7 +2206,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "STDI locked: %s\n", no_lock_stdi(sd) ? "false" : "true");
v4l2_info(sd, "CP locked: %s\n", no_lock_cp(sd) ? "false" : "true");
v4l2_info(sd, "CP free run: %s\n",
- (!!(cp_read(sd, 0xff) & 0x10) ? "on" : "off"));
+ (in_free_run(sd)) ? "on" : "off");
v4l2_info(sd, "Prim-mode = 0x%x, video std = 0x%x, v_freq = 0x%x\n",
io_read(sd, 0x01) & 0x0f, io_read(sd, 0x00) & 0x3f,
(io_read(sd, 0x01) & 0x70) >> 4);
@@ -2213,7 +2219,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
stdi.lcf, stdi.bl, stdi.lcvs,
stdi.interlaced ? "interlaced" : "progressive",
stdi.hs_pol, stdi.vs_pol);
- if (adv7604_query_dv_timings(sd, &timings))
+ if (adv76xx_query_dv_timings(sd, &timings))
v4l2_info(sd, "No video detected\n");
else
v4l2_print_dv_timings(sd->name, "Detected format: ",
@@ -2235,7 +2241,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
((reg_io_0x02 & 0x04) ^ (reg_io_0x02 & 0x01)) ?
"enabled" : "disabled");
v4l2_info(sd, "Color space conversion: %s\n",
- csc_coeff_sel_rb[cp_read(sd, 0xfc) >> 4]);
+ csc_coeff_sel_rb[cp_read(sd, info->cp_csc) >> 4]);
if (!is_digital_input(sd))
return 0;
@@ -2279,47 +2285,47 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
/* ----------------------------------------------------------------------- */
-static const struct v4l2_ctrl_ops adv7604_ctrl_ops = {
- .s_ctrl = adv7604_s_ctrl,
+static const struct v4l2_ctrl_ops adv76xx_ctrl_ops = {
+ .s_ctrl = adv76xx_s_ctrl,
};
-static const struct v4l2_subdev_core_ops adv7604_core_ops = {
- .log_status = adv7604_log_status,
- .interrupt_service_routine = adv7604_isr,
+static const struct v4l2_subdev_core_ops adv76xx_core_ops = {
+ .log_status = adv76xx_log_status,
+ .interrupt_service_routine = adv76xx_isr,
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = adv7604_g_register,
- .s_register = adv7604_s_register,
+ .g_register = adv76xx_g_register,
+ .s_register = adv76xx_s_register,
#endif
};
-static const struct v4l2_subdev_video_ops adv7604_video_ops = {
- .s_routing = adv7604_s_routing,
- .g_input_status = adv7604_g_input_status,
- .s_dv_timings = adv7604_s_dv_timings,
- .g_dv_timings = adv7604_g_dv_timings,
- .query_dv_timings = adv7604_query_dv_timings,
+static const struct v4l2_subdev_video_ops adv76xx_video_ops = {
+ .s_routing = adv76xx_s_routing,
+ .g_input_status = adv76xx_g_input_status,
+ .s_dv_timings = adv76xx_s_dv_timings,
+ .g_dv_timings = adv76xx_g_dv_timings,
+ .query_dv_timings = adv76xx_query_dv_timings,
};
-static const struct v4l2_subdev_pad_ops adv7604_pad_ops = {
- .enum_mbus_code = adv7604_enum_mbus_code,
- .get_fmt = adv7604_get_format,
- .set_fmt = adv7604_set_format,
- .get_edid = adv7604_get_edid,
- .set_edid = adv7604_set_edid,
- .dv_timings_cap = adv7604_dv_timings_cap,
- .enum_dv_timings = adv7604_enum_dv_timings,
+static const struct v4l2_subdev_pad_ops adv76xx_pad_ops = {
+ .enum_mbus_code = adv76xx_enum_mbus_code,
+ .get_fmt = adv76xx_get_format,
+ .set_fmt = adv76xx_set_format,
+ .get_edid = adv76xx_get_edid,
+ .set_edid = adv76xx_set_edid,
+ .dv_timings_cap = adv76xx_dv_timings_cap,
+ .enum_dv_timings = adv76xx_enum_dv_timings,
};
-static const struct v4l2_subdev_ops adv7604_ops = {
- .core = &adv7604_core_ops,
- .video = &adv7604_video_ops,
- .pad = &adv7604_pad_ops,
+static const struct v4l2_subdev_ops adv76xx_ops = {
+ .core = &adv76xx_core_ops,
+ .video = &adv76xx_video_ops,
+ .pad = &adv76xx_pad_ops,
};
/* -------------------------- custom ctrls ---------------------------------- */
static const struct v4l2_ctrl_config adv7604_ctrl_analog_sampling_phase = {
- .ops = &adv7604_ctrl_ops,
+ .ops = &adv76xx_ctrl_ops,
.id = V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE,
.name = "Analog Sampling Phase",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -2329,8 +2335,8 @@ static const struct v4l2_ctrl_config adv7604_ctrl_analog_sampling_phase = {
.def = 0,
};
-static const struct v4l2_ctrl_config adv7604_ctrl_free_run_color_manual = {
- .ops = &adv7604_ctrl_ops,
+static const struct v4l2_ctrl_config adv76xx_ctrl_free_run_color_manual = {
+ .ops = &adv76xx_ctrl_ops,
.id = V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL,
.name = "Free Running Color, Manual",
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -2340,8 +2346,8 @@ static const struct v4l2_ctrl_config adv7604_ctrl_free_run_color_manual = {
.def = false,
};
-static const struct v4l2_ctrl_config adv7604_ctrl_free_run_color = {
- .ops = &adv7604_ctrl_ops,
+static const struct v4l2_ctrl_config adv76xx_ctrl_free_run_color = {
+ .ops = &adv76xx_ctrl_ops,
.id = V4L2_CID_ADV_RX_FREE_RUN_COLOR,
.name = "Free Running Color",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -2353,11 +2359,11 @@ static const struct v4l2_ctrl_config adv7604_ctrl_free_run_color = {
/* ----------------------------------------------------------------------- */
-static int adv7604_core_init(struct v4l2_subdev *sd)
+static int adv76xx_core_init(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
- const struct adv7604_chip_info *info = state->info;
- struct adv7604_platform_data *pdata = &state->pdata;
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
+ struct adv76xx_platform_data *pdata = &state->pdata;
hdmi_write(sd, 0x48,
(pdata->disable_pwrdnb ? 0x80 : 0) |
@@ -2385,7 +2391,7 @@ static int adv7604_core_init(struct v4l2_subdev *sd)
io_write_clr_set(sd, 0x05, 0x0e, pdata->blank_data << 3 |
pdata->insert_av_codes << 2 |
pdata->replicate_av_codes << 1);
- adv7604_setup_format(state);
+ adv76xx_setup_format(state);
cp_write(sd, 0x69, 0x30); /* Enable CP CSC */
@@ -2415,7 +2421,7 @@ static int adv7604_core_init(struct v4l2_subdev *sd)
/* TODO from platform data */
afe_write(sd, 0xb5, 0x01); /* Setting MCLK to 256Fs */
- if (adv7604_has_afe(state)) {
+ if (adv76xx_has_afe(state)) {
afe_write(sd, 0x02, pdata->ain_sel); /* Select analog input muxing mode */
io_write_clr_set(sd, 0x30, 1 << 4, pdata->output_bus_lsb_to_msb << 4);
}
@@ -2440,7 +2446,7 @@ static void adv7611_setup_irqs(struct v4l2_subdev *sd)
io_write(sd, 0x41, 0xd0); /* STDI irq for any change, disable INT2 */
}
-static void adv7604_unregister_clients(struct adv7604_state *state)
+static void adv76xx_unregister_clients(struct adv76xx_state *state)
{
unsigned int i;
@@ -2450,7 +2456,7 @@ static void adv7604_unregister_clients(struct adv7604_state *state)
}
}
-static struct i2c_client *adv7604_dummy_client(struct v4l2_subdev *sd,
+static struct i2c_client *adv76xx_dummy_client(struct v4l2_subdev *sd,
u8 addr, u8 io_reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -2460,74 +2466,74 @@ static struct i2c_client *adv7604_dummy_client(struct v4l2_subdev *sd,
return i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
}
-static const struct adv7604_reg_seq adv7604_recommended_settings_afe[] = {
+static const struct adv76xx_reg_seq adv7604_recommended_settings_afe[] = {
/* reset ADI recommended settings for HDMI: */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x0d), 0x04 }, /* HDMI filter optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x0d), 0x04 }, /* HDMI filter optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x3d), 0x00 }, /* DDC bus active pull-up control */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x3e), 0x74 }, /* TMDS PLL optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x4e), 0x3b }, /* TMDS PLL optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x57), 0x74 }, /* TMDS PLL optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x58), 0x63 }, /* TMDS PLL optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8d), 0x18 }, /* equaliser */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8e), 0x34 }, /* equaliser */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x93), 0x88 }, /* equaliser */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x94), 0x2e }, /* equaliser */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x96), 0x00 }, /* enable automatic EQ changing */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x0d), 0x04 }, /* HDMI filter optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x0d), 0x04 }, /* HDMI filter optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x3d), 0x00 }, /* DDC bus active pull-up control */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x3e), 0x74 }, /* TMDS PLL optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x4e), 0x3b }, /* TMDS PLL optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x57), 0x74 }, /* TMDS PLL optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x58), 0x63 }, /* TMDS PLL optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x8d), 0x18 }, /* equaliser */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x8e), 0x34 }, /* equaliser */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x93), 0x88 }, /* equaliser */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x94), 0x2e }, /* equaliser */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x96), 0x00 }, /* enable automatic EQ changing */
/* set ADI recommended settings for digitizer */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 17. */
- { ADV7604_REG(ADV7604_PAGE_AFE, 0x12), 0x7b }, /* ADC noise shaping filter controls */
- { ADV7604_REG(ADV7604_PAGE_AFE, 0x0c), 0x1f }, /* CP core gain controls */
- { ADV7604_REG(ADV7604_PAGE_CP, 0x3e), 0x04 }, /* CP core pre-gain control */
- { ADV7604_REG(ADV7604_PAGE_CP, 0xc3), 0x39 }, /* CP coast control. Graphics mode */
- { ADV7604_REG(ADV7604_PAGE_CP, 0x40), 0x5c }, /* CP core pre-gain control. Graphics mode */
+ { ADV76XX_REG(ADV76XX_PAGE_AFE, 0x12), 0x7b }, /* ADC noise shaping filter controls */
+ { ADV76XX_REG(ADV76XX_PAGE_AFE, 0x0c), 0x1f }, /* CP core gain controls */
+ { ADV76XX_REG(ADV76XX_PAGE_CP, 0x3e), 0x04 }, /* CP core pre-gain control */
+ { ADV76XX_REG(ADV76XX_PAGE_CP, 0xc3), 0x39 }, /* CP coast control. Graphics mode */
+ { ADV76XX_REG(ADV76XX_PAGE_CP, 0x40), 0x5c }, /* CP core pre-gain control. Graphics mode */
- { ADV7604_REG_SEQ_TERM, 0 },
+ { ADV76XX_REG_SEQ_TERM, 0 },
};
-static const struct adv7604_reg_seq adv7604_recommended_settings_hdmi[] = {
+static const struct adv76xx_reg_seq adv7604_recommended_settings_hdmi[] = {
/* set ADI recommended settings for HDMI: */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x0d), 0x84 }, /* HDMI filter optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x3d), 0x10 }, /* DDC bus active pull-up control */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x3e), 0x39 }, /* TMDS PLL optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x4e), 0x3b }, /* TMDS PLL optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x57), 0xb6 }, /* TMDS PLL optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x58), 0x03 }, /* TMDS PLL optimization */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8d), 0x18 }, /* equaliser */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8e), 0x34 }, /* equaliser */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x93), 0x8b }, /* equaliser */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x94), 0x2d }, /* equaliser */
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x96), 0x01 }, /* enable automatic EQ changing */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x0d), 0x84 }, /* HDMI filter optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x3d), 0x10 }, /* DDC bus active pull-up control */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x3e), 0x39 }, /* TMDS PLL optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x4e), 0x3b }, /* TMDS PLL optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x57), 0xb6 }, /* TMDS PLL optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x58), 0x03 }, /* TMDS PLL optimization */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x8d), 0x18 }, /* equaliser */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x8e), 0x34 }, /* equaliser */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x93), 0x8b }, /* equaliser */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x94), 0x2d }, /* equaliser */
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x96), 0x01 }, /* enable automatic EQ changing */
/* reset ADI recommended settings for digitizer */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 17. */
- { ADV7604_REG(ADV7604_PAGE_AFE, 0x12), 0xfb }, /* ADC noise shaping filter controls */
- { ADV7604_REG(ADV7604_PAGE_AFE, 0x0c), 0x0d }, /* CP core gain controls */
+ { ADV76XX_REG(ADV76XX_PAGE_AFE, 0x12), 0xfb }, /* ADC noise shaping filter controls */
+ { ADV76XX_REG(ADV76XX_PAGE_AFE, 0x0c), 0x0d }, /* CP core gain controls */
- { ADV7604_REG_SEQ_TERM, 0 },
+ { ADV76XX_REG_SEQ_TERM, 0 },
};
-static const struct adv7604_reg_seq adv7611_recommended_settings_hdmi[] = {
+static const struct adv76xx_reg_seq adv7611_recommended_settings_hdmi[] = {
/* ADV7611 Register Settings Recommendations Rev 1.5, May 2014 */
- { ADV7604_REG(ADV7604_PAGE_CP, 0x6c), 0x00 },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x9b), 0x03 },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x6f), 0x08 },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x85), 0x1f },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x87), 0x70 },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x57), 0xda },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x58), 0x01 },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x03), 0x98 },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x4c), 0x44 },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8d), 0x04 },
- { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8e), 0x1e },
-
- { ADV7604_REG_SEQ_TERM, 0 },
+ { ADV76XX_REG(ADV76XX_PAGE_CP, 0x6c), 0x00 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x9b), 0x03 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x6f), 0x08 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x85), 0x1f },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x87), 0x70 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x57), 0xda },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x58), 0x01 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x03), 0x98 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x4c), 0x44 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x8d), 0x04 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x8e), 0x1e },
+
+ { ADV76XX_REG_SEQ_TERM, 0 },
};
-static const struct adv7604_chip_info adv7604_chip_info[] = {
+static const struct adv76xx_chip_info adv76xx_chip_info[] = {
[ADV7604] = {
.type = ADV7604,
.has_afe = true,
@@ -2539,6 +2545,7 @@ static const struct adv7604_chip_info adv7604_chip_info[] = {
.tdms_lock_mask = 0xe0,
.cable_det_mask = 0x1e,
.fmt_change_digital_mask = 0xc1,
+ .cp_csc = 0xfc,
.formats = adv7604_formats,
.nformats = ARRAY_SIZE(adv7604_formats),
.set_termination = adv7604_set_termination,
@@ -2553,18 +2560,18 @@ static const struct adv7604_chip_info adv7604_chip_info[] = {
[0] = ARRAY_SIZE(adv7604_recommended_settings_afe),
[1] = ARRAY_SIZE(adv7604_recommended_settings_hdmi),
},
- .page_mask = BIT(ADV7604_PAGE_IO) | BIT(ADV7604_PAGE_AVLINK) |
- BIT(ADV7604_PAGE_CEC) | BIT(ADV7604_PAGE_INFOFRAME) |
+ .page_mask = BIT(ADV76XX_PAGE_IO) | BIT(ADV7604_PAGE_AVLINK) |
+ BIT(ADV76XX_PAGE_CEC) | BIT(ADV76XX_PAGE_INFOFRAME) |
BIT(ADV7604_PAGE_ESDP) | BIT(ADV7604_PAGE_DPP) |
- BIT(ADV7604_PAGE_AFE) | BIT(ADV7604_PAGE_REP) |
- BIT(ADV7604_PAGE_EDID) | BIT(ADV7604_PAGE_HDMI) |
- BIT(ADV7604_PAGE_TEST) | BIT(ADV7604_PAGE_CP) |
+ BIT(ADV76XX_PAGE_AFE) | BIT(ADV76XX_PAGE_REP) |
+ BIT(ADV76XX_PAGE_EDID) | BIT(ADV76XX_PAGE_HDMI) |
+ BIT(ADV76XX_PAGE_TEST) | BIT(ADV76XX_PAGE_CP) |
BIT(ADV7604_PAGE_VDP),
},
[ADV7611] = {
.type = ADV7611,
.has_afe = false,
- .max_port = ADV7604_PAD_HDMI_PORT_A,
+ .max_port = ADV76XX_PAD_HDMI_PORT_A,
.num_dv_ports = 1,
.edid_enable_reg = 0x74,
.edid_status_reg = 0x76,
@@ -2572,6 +2579,7 @@ static const struct adv7604_chip_info adv7604_chip_info[] = {
.tdms_lock_mask = 0x43,
.cable_det_mask = 0x01,
.fmt_change_digital_mask = 0x03,
+ .cp_csc = 0xf4,
.formats = adv7611_formats,
.nformats = ARRAY_SIZE(adv7611_formats),
.set_termination = adv7611_set_termination,
@@ -2584,34 +2592,34 @@ static const struct adv7604_chip_info adv7604_chip_info[] = {
.num_recommended_settings = {
[1] = ARRAY_SIZE(adv7611_recommended_settings_hdmi),
},
- .page_mask = BIT(ADV7604_PAGE_IO) | BIT(ADV7604_PAGE_CEC) |
- BIT(ADV7604_PAGE_INFOFRAME) | BIT(ADV7604_PAGE_AFE) |
- BIT(ADV7604_PAGE_REP) | BIT(ADV7604_PAGE_EDID) |
- BIT(ADV7604_PAGE_HDMI) | BIT(ADV7604_PAGE_CP),
+ .page_mask = BIT(ADV76XX_PAGE_IO) | BIT(ADV76XX_PAGE_CEC) |
+ BIT(ADV76XX_PAGE_INFOFRAME) | BIT(ADV76XX_PAGE_AFE) |
+ BIT(ADV76XX_PAGE_REP) | BIT(ADV76XX_PAGE_EDID) |
+ BIT(ADV76XX_PAGE_HDMI) | BIT(ADV76XX_PAGE_CP),
},
};
-static struct i2c_device_id adv7604_i2c_id[] = {
- { "adv7604", (kernel_ulong_t)&adv7604_chip_info[ADV7604] },
- { "adv7611", (kernel_ulong_t)&adv7604_chip_info[ADV7611] },
+static struct i2c_device_id adv76xx_i2c_id[] = {
+ { "adv7604", (kernel_ulong_t)&adv76xx_chip_info[ADV7604] },
+ { "adv7611", (kernel_ulong_t)&adv76xx_chip_info[ADV7611] },
{ }
};
-MODULE_DEVICE_TABLE(i2c, adv7604_i2c_id);
+MODULE_DEVICE_TABLE(i2c, adv76xx_i2c_id);
-static struct of_device_id adv7604_of_id[] __maybe_unused = {
- { .compatible = "adi,adv7611", .data = &adv7604_chip_info[ADV7611] },
+static struct of_device_id adv76xx_of_id[] __maybe_unused = {
+ { .compatible = "adi,adv7611", .data = &adv76xx_chip_info[ADV7611] },
{ }
};
-MODULE_DEVICE_TABLE(of, adv7604_of_id);
+MODULE_DEVICE_TABLE(of, adv76xx_of_id);
-static int adv7604_parse_dt(struct adv7604_state *state)
+static int adv76xx_parse_dt(struct adv76xx_state *state)
{
struct v4l2_of_endpoint bus_cfg;
struct device_node *endpoint;
struct device_node *np;
unsigned int flags;
- np = state->i2c_clients[ADV7604_PAGE_IO]->dev.of_node;
+ np = state->i2c_clients[ADV76XX_PAGE_IO]->dev.of_node;
/* Parse the endpoint. */
endpoint = of_graph_get_next_endpoint(np, NULL);
@@ -2638,20 +2646,20 @@ static int adv7604_parse_dt(struct adv7604_state *state)
}
/* Disable the interrupt for now as no DT-based board uses it. */
- state->pdata.int1_config = ADV7604_INT1_CONFIG_DISABLED;
+ state->pdata.int1_config = ADV76XX_INT1_CONFIG_DISABLED;
/* Use the default I2C addresses. */
state->pdata.i2c_addresses[ADV7604_PAGE_AVLINK] = 0x42;
- state->pdata.i2c_addresses[ADV7604_PAGE_CEC] = 0x40;
- state->pdata.i2c_addresses[ADV7604_PAGE_INFOFRAME] = 0x3e;
+ state->pdata.i2c_addresses[ADV76XX_PAGE_CEC] = 0x40;
+ state->pdata.i2c_addresses[ADV76XX_PAGE_INFOFRAME] = 0x3e;
state->pdata.i2c_addresses[ADV7604_PAGE_ESDP] = 0x38;
state->pdata.i2c_addresses[ADV7604_PAGE_DPP] = 0x3c;
- state->pdata.i2c_addresses[ADV7604_PAGE_AFE] = 0x26;
- state->pdata.i2c_addresses[ADV7604_PAGE_REP] = 0x32;
- state->pdata.i2c_addresses[ADV7604_PAGE_EDID] = 0x36;
- state->pdata.i2c_addresses[ADV7604_PAGE_HDMI] = 0x34;
- state->pdata.i2c_addresses[ADV7604_PAGE_TEST] = 0x30;
- state->pdata.i2c_addresses[ADV7604_PAGE_CP] = 0x22;
+ state->pdata.i2c_addresses[ADV76XX_PAGE_AFE] = 0x26;
+ state->pdata.i2c_addresses[ADV76XX_PAGE_REP] = 0x32;
+ state->pdata.i2c_addresses[ADV76XX_PAGE_EDID] = 0x36;
+ state->pdata.i2c_addresses[ADV76XX_PAGE_HDMI] = 0x34;
+ state->pdata.i2c_addresses[ADV76XX_PAGE_TEST] = 0x30;
+ state->pdata.i2c_addresses[ADV76XX_PAGE_CP] = 0x22;
state->pdata.i2c_addresses[ADV7604_PAGE_VDP] = 0x24;
/* Hardcode the remaining platform data fields. */
@@ -2666,12 +2674,12 @@ static int adv7604_parse_dt(struct adv7604_state *state)
return 0;
}
-static int adv7604_probe(struct i2c_client *client,
+static int adv76xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
static const struct v4l2_dv_timings cea640x480 =
V4L2_DV_BT_CEA_640X480P59_94;
- struct adv7604_state *state;
+ struct adv76xx_state *state;
struct v4l2_ctrl_handler *hdl;
struct v4l2_subdev *sd;
unsigned int i;
@@ -2681,16 +2689,16 @@ static int adv7604_probe(struct i2c_client *client,
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- v4l_dbg(1, debug, client, "detecting adv7604 client on address 0x%x\n",
+ v4l_dbg(1, debug, client, "detecting adv76xx client on address 0x%x\n",
client->addr << 1);
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (!state) {
- v4l_err(client, "Could not allocate adv7604_state memory!\n");
+ v4l_err(client, "Could not allocate adv76xx_state memory!\n");
return -ENOMEM;
}
- state->i2c_clients[ADV7604_PAGE_IO] = client;
+ state->i2c_clients[ADV76XX_PAGE_IO] = client;
/* initialize variables */
state->restart_stdi_once = true;
@@ -2699,18 +2707,18 @@ static int adv7604_probe(struct i2c_client *client,
if (IS_ENABLED(CONFIG_OF) && client->dev.of_node) {
const struct of_device_id *oid;
- oid = of_match_node(adv7604_of_id, client->dev.of_node);
+ oid = of_match_node(adv76xx_of_id, client->dev.of_node);
state->info = oid->data;
- err = adv7604_parse_dt(state);
+ err = adv76xx_parse_dt(state);
if (err < 0) {
v4l_err(client, "DT parsing error\n");
return err;
}
} else if (client->dev.platform_data) {
- struct adv7604_platform_data *pdata = client->dev.platform_data;
+ struct adv76xx_platform_data *pdata = client->dev.platform_data;
- state->info = (const struct adv7604_chip_info *)id->driver_data;
+ state->info = (const struct adv76xx_chip_info *)id->driver_data;
state->pdata = *pdata;
} else {
v4l_err(client, "No platform data!\n");
@@ -2720,20 +2728,20 @@ static int adv7604_probe(struct i2c_client *client,
/* Request GPIOs. */
for (i = 0; i < state->info->num_dv_ports; ++i) {
state->hpd_gpio[i] =
- devm_gpiod_get_index(&client->dev, "hpd", i);
+ devm_gpiod_get_index_optional(&client->dev, "hpd", i,
+ GPIOD_OUT_LOW);
if (IS_ERR(state->hpd_gpio[i]))
- continue;
-
- gpiod_direction_output(state->hpd_gpio[i], 0);
+ return PTR_ERR(state->hpd_gpio[i]);
- v4l_info(client, "Handling HPD %u GPIO\n", i);
+ if (state->hpd_gpio[i])
+ v4l_info(client, "Handling HPD %u GPIO\n", i);
}
state->timings = cea640x480;
- state->format = adv7604_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
+ state->format = adv76xx_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
sd = &state->sd;
- v4l2_i2c_subdev_init(sd, client, &adv7604_ops);
+ v4l2_i2c_subdev_init(sd, client, &adv76xx_ops);
snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
id->name, i2c_adapter_id(client->adapter),
client->addr);
@@ -2763,15 +2771,15 @@ static int adv7604_probe(struct i2c_client *client,
/* control handlers */
hdl = &state->hdl;
- v4l2_ctrl_handler_init(hdl, adv7604_has_afe(state) ? 9 : 8);
+ v4l2_ctrl_handler_init(hdl, adv76xx_has_afe(state) ? 9 : 8);
- v4l2_ctrl_new_std(hdl, &adv7604_ctrl_ops,
+ v4l2_ctrl_new_std(hdl, &adv76xx_ctrl_ops,
V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
- v4l2_ctrl_new_std(hdl, &adv7604_ctrl_ops,
+ v4l2_ctrl_new_std(hdl, &adv76xx_ctrl_ops,
V4L2_CID_CONTRAST, 0, 255, 1, 128);
- v4l2_ctrl_new_std(hdl, &adv7604_ctrl_ops,
+ v4l2_ctrl_new_std(hdl, &adv76xx_ctrl_ops,
V4L2_CID_SATURATION, 0, 255, 1, 128);
- v4l2_ctrl_new_std(hdl, &adv7604_ctrl_ops,
+ v4l2_ctrl_new_std(hdl, &adv76xx_ctrl_ops,
V4L2_CID_HUE, 0, 128, 1, 0);
/* private controls */
@@ -2779,18 +2787,18 @@ static int adv7604_probe(struct i2c_client *client,
V4L2_CID_DV_RX_POWER_PRESENT, 0,
(1 << state->info->num_dv_ports) - 1, 0, 0);
state->rgb_quantization_range_ctrl =
- v4l2_ctrl_new_std_menu(hdl, &adv7604_ctrl_ops,
+ v4l2_ctrl_new_std_menu(hdl, &adv76xx_ctrl_ops,
V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
0, V4L2_DV_RGB_RANGE_AUTO);
/* custom controls */
- if (adv7604_has_afe(state))
+ if (adv76xx_has_afe(state))
state->analog_sampling_phase_ctrl =
v4l2_ctrl_new_custom(hdl, &adv7604_ctrl_analog_sampling_phase, NULL);
state->free_run_color_manual_ctrl =
- v4l2_ctrl_new_custom(hdl, &adv7604_ctrl_free_run_color_manual, NULL);
+ v4l2_ctrl_new_custom(hdl, &adv76xx_ctrl_free_run_color_manual, NULL);
state->free_run_color_ctrl =
- v4l2_ctrl_new_custom(hdl, &adv7604_ctrl_free_run_color, NULL);
+ v4l2_ctrl_new_custom(hdl, &adv76xx_ctrl_free_run_color, NULL);
sd->ctrl_handler = hdl;
if (hdl->error) {
@@ -2799,22 +2807,22 @@ static int adv7604_probe(struct i2c_client *client,
}
state->detect_tx_5v_ctrl->is_private = true;
state->rgb_quantization_range_ctrl->is_private = true;
- if (adv7604_has_afe(state))
+ if (adv76xx_has_afe(state))
state->analog_sampling_phase_ctrl->is_private = true;
state->free_run_color_manual_ctrl->is_private = true;
state->free_run_color_ctrl->is_private = true;
- if (adv7604_s_detect_tx_5v_ctrl(sd)) {
+ if (adv76xx_s_detect_tx_5v_ctrl(sd)) {
err = -ENODEV;
goto err_hdl;
}
- for (i = 1; i < ADV7604_PAGE_MAX; ++i) {
+ for (i = 1; i < ADV76XX_PAGE_MAX; ++i) {
if (!(BIT(i) & state->info->page_mask))
continue;
state->i2c_clients[i] =
- adv7604_dummy_client(sd, state->pdata.i2c_addresses[i],
+ adv76xx_dummy_client(sd, state->pdata.i2c_addresses[i],
0xf2 + i);
if (state->i2c_clients[i] == NULL) {
err = -ENOMEM;
@@ -2832,7 +2840,7 @@ static int adv7604_probe(struct i2c_client *client,
}
INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
- adv7604_delayed_work_enable_hotplug);
+ adv76xx_delayed_work_enable_hotplug);
state->source_pad = state->info->num_dv_ports
+ (state->info->has_afe ? 2 : 0);
@@ -2845,7 +2853,7 @@ static int adv7604_probe(struct i2c_client *client,
if (err)
goto err_work_queues;
- err = adv7604_core_init(sd);
+ err = adv76xx_core_init(sd);
if (err)
goto err_entity;
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
@@ -2863,7 +2871,7 @@ err_work_queues:
cancel_delayed_work(&state->delayed_work_enable_hotplug);
destroy_workqueue(state->work_queues);
err_i2c:
- adv7604_unregister_clients(state);
+ adv76xx_unregister_clients(state);
err_hdl:
v4l2_ctrl_handler_free(hdl);
return err;
@@ -2871,32 +2879,31 @@ err_hdl:
/* ----------------------------------------------------------------------- */
-static int adv7604_remove(struct i2c_client *client)
+static int adv76xx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct adv7604_state *state = to_state(sd);
+ struct adv76xx_state *state = to_state(sd);
cancel_delayed_work(&state->delayed_work_enable_hotplug);
destroy_workqueue(state->work_queues);
v4l2_async_unregister_subdev(sd);
- v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
- adv7604_unregister_clients(to_state(sd));
+ adv76xx_unregister_clients(to_state(sd));
v4l2_ctrl_handler_free(sd->ctrl_handler);
return 0;
}
/* ----------------------------------------------------------------------- */
-static struct i2c_driver adv7604_driver = {
+static struct i2c_driver adv76xx_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "adv7604",
- .of_match_table = of_match_ptr(adv7604_of_id),
+ .of_match_table = of_match_ptr(adv76xx_of_id),
},
- .probe = adv7604_probe,
- .remove = adv7604_remove,
- .id_table = adv7604_i2c_id,
+ .probe = adv76xx_probe,
+ .remove = adv76xx_remove,
+ .id_table = adv76xx_i2c_id,
};
-module_i2c_driver(adv7604_driver);
+module_i2c_driver(adv76xx_driver);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 7c215ee142c4..b5a37fe10a6a 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1119,7 +1119,7 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
/* Receiving DVI-D signal
* ADV7842 selects RGB limited range regardless of
* input format (CE/IT) in automatic mode */
- if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ if (state->timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
/* RGB limited range (16-235) */
io_write_and_or(sd, 0x02, 0x0f, 0x00);
} else {
@@ -1901,7 +1901,8 @@ static int adv7842_g_mbus_fmt(struct v4l2_subdev *sd,
return 0;
}
- if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ if (state->timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
fmt->colorspace = (state->timings.bt.height <= 576) ?
V4L2_COLORSPACE_SMPTE170M : V4L2_COLORSPACE_REC709;
}
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 573e08826b9b..bd496447749a 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -5137,6 +5137,9 @@ static int cx25840_probe(struct i2c_client *client,
int default_volume;
u32 id;
u16 device_id;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ int ret;
+#endif
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -5178,6 +5181,33 @@ static int cx25840_probe(struct i2c_client *client,
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &cx25840_ops);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ /*
+ * TODO: add media controller support for analog video inputs like
+ * composite, svideo, etc.
+ * A real input pad for this analog demod would be like:
+ * ___________
+ * TUNER --------> | |
+ * | |
+ * SVIDEO .......> | cx25840 |
+ * | |
+ * COMPOSITE1 ...> |_________|
+ *
+ * However, at least for now, there's no much gain on modelling
+ * those extra inputs. So, let's add it only when needed.
+ */
+ state->pads[CX25840_PAD_INPUT].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CX25840_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[CX25840_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_DECODER;
+
+ ret = media_entity_init(&sd->entity, ARRAY_SIZE(state->pads),
+ state->pads, 0);
+ if (ret < 0) {
+ v4l_info(client, "failed to initialize media entity!\n");
+ return ret;
+ }
+#endif
switch (id) {
case CX23885_AV:
diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h
index 37bc04217c44..fdea48ce0c03 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.h
+++ b/drivers/media/i2c/cx25840/cx25840-core.h
@@ -41,6 +41,14 @@ enum cx25840_model {
CX25837,
};
+enum cx25840_media_pads {
+ CX25840_PAD_INPUT,
+ CX25840_PAD_VID_OUT,
+ CX25840_PAD_VBI_OUT,
+
+ CX25840_NUM_PADS
+};
+
struct cx25840_state {
struct i2c_client *c;
struct v4l2_subdev sd;
@@ -64,6 +72,9 @@ struct cx25840_state {
wait_queue_head_t fw_wait; /* wake up when the fw load is finished */
struct work_struct fw_work; /* work entry for fw load */
struct cx25840_ir_state *ir_state;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pads[CX25840_NUM_PADS];
+#endif
};
static inline struct cx25840_state *to_state(struct v4l2_subdev *sd)
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 6ed16e569bbf..6404c0d93e7a 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -531,17 +531,17 @@ static int __find_resolution(struct v4l2_subdev *sd,
}
static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which,
enum m5mols_restype type)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL;
+ return cfg ? v4l2_subdev_get_try_format(&info->sd, cfg, 0) : NULL;
return &info->ffmt[type];
}
-static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct m5mols_info *info = to_m5mols(sd);
@@ -550,7 +550,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
mutex_lock(&info->lock);
- format = __find_format(info, fh, fmt->which, info->res_type);
+ format = __find_format(info, cfg, fmt->which, info->res_type);
if (format)
fmt->format = *format;
else
@@ -560,7 +560,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return ret;
}
-static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct m5mols_info *info = to_m5mols(sd);
@@ -574,7 +574,7 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (ret < 0)
return ret;
- sfmt = __find_format(info, fh, fmt->which, type);
+ sfmt = __find_format(info, cfg, fmt->which, type);
if (!sfmt)
return 0;
@@ -640,7 +640,7 @@ static int m5mols_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!code || code->index >= SIZE_DEFAULT_FFMT)
@@ -895,7 +895,7 @@ static const struct v4l2_subdev_core_ops m5mols_core_ops = {
*/
static int m5mols_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
*format = m5mols_default_ffmt[0];
return 0;
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 76431223f0ff..c7747bd0cabb 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -317,7 +317,7 @@ static int mt9m032_setup_pll(struct mt9m032 *sensor)
*/
static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -328,7 +328,7 @@ static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index != 0 || fse->code != MEDIA_BUS_FMT_Y8_1X8)
@@ -345,18 +345,18 @@ static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev,
/**
* __mt9m032_get_pad_crop() - get crop rect
* @sensor: pointer to the sensor struct
- * @fh: file handle for getting the try crop rect from
+ * @cfg: v4l2_subdev_pad_config for getting the try crop rect from
* @which: select try or active crop rect
*
* Returns a pointer the current active or fh relative try crop rect
*/
static struct v4l2_rect *
-__mt9m032_get_pad_crop(struct mt9m032 *sensor, struct v4l2_subdev_fh *fh,
+__mt9m032_get_pad_crop(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(fh, 0);
+ return v4l2_subdev_get_try_crop(&sensor->subdev, cfg, 0);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->crop;
default:
@@ -367,18 +367,18 @@ __mt9m032_get_pad_crop(struct mt9m032 *sensor, struct v4l2_subdev_fh *fh,
/**
* __mt9m032_get_pad_format() - get format
* @sensor: pointer to the sensor struct
- * @fh: file handle for getting the try format from
+ * @cfg: v4l2_subdev_pad_config for getting the try format from
* @which: select try or active format
*
* Returns a pointer the current active or fh relative try format
*/
static struct v4l2_mbus_framefmt *
-__mt9m032_get_pad_format(struct mt9m032 *sensor, struct v4l2_subdev_fh *fh,
+__mt9m032_get_pad_format(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(fh, 0);
+ return v4l2_subdev_get_try_format(&sensor->subdev, cfg, 0);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->format;
default:
@@ -387,20 +387,20 @@ __mt9m032_get_pad_format(struct mt9m032 *sensor, struct v4l2_subdev_fh *fh,
}
static int mt9m032_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
mutex_lock(&sensor->lock);
- fmt->format = *__mt9m032_get_pad_format(sensor, fh, fmt->which);
+ fmt->format = *__mt9m032_get_pad_format(sensor, cfg, fmt->which);
mutex_unlock(&sensor->lock);
return 0;
}
static int mt9m032_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -414,7 +414,7 @@ static int mt9m032_set_pad_format(struct v4l2_subdev *subdev,
}
/* Scaling is not supported, the format is thus fixed. */
- fmt->format = *__mt9m032_get_pad_format(sensor, fh, fmt->which);
+ fmt->format = *__mt9m032_get_pad_format(sensor, cfg, fmt->which);
ret = 0;
done:
@@ -423,7 +423,7 @@ done:
}
static int mt9m032_get_pad_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -432,14 +432,14 @@ static int mt9m032_get_pad_selection(struct v4l2_subdev *subdev,
return -EINVAL;
mutex_lock(&sensor->lock);
- sel->r = *__mt9m032_get_pad_crop(sensor, fh, sel->which);
+ sel->r = *__mt9m032_get_pad_crop(sensor, cfg, sel->which);
mutex_unlock(&sensor->lock);
return 0;
}
static int mt9m032_set_pad_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -475,13 +475,13 @@ static int mt9m032_set_pad_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9m032_get_pad_crop(sensor, fh, sel->which);
+ __crop = __mt9m032_get_pad_crop(sensor, cfg, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- format = __mt9m032_get_pad_format(sensor, fh, sel->which);
+ format = __mt9m032_get_pad_format(sensor, cfg, sel->which);
format->width = rect.width;
format->height = rect.height;
}
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index e3acae9a2ec3..0db15f528ac1 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -15,12 +15,11 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
@@ -28,6 +27,7 @@
#include <linux/videodev2.h>
#include <media/mt9p031.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
@@ -135,7 +135,7 @@ struct mt9p031 {
struct aptina_pll pll;
unsigned int clk_div;
bool use_pll;
- int reset;
+ struct gpio_desc *reset;
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *blc_auto;
@@ -251,7 +251,7 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
div = DIV_ROUND_UP(pdata->ext_freq, pdata->target_freq);
div = roundup_pow_of_two(div) / 2;
- mt9p031->clk_div = max_t(unsigned int, div, 64);
+ mt9p031->clk_div = min_t(unsigned int, div, 64);
mt9p031->use_pll = false;
return 0;
@@ -308,9 +308,9 @@ static int mt9p031_power_on(struct mt9p031 *mt9p031)
{
int ret;
- /* Ensure RESET_BAR is low */
- if (gpio_is_valid(mt9p031->reset)) {
- gpio_set_value(mt9p031->reset, 0);
+ /* Ensure RESET_BAR is active */
+ if (mt9p031->reset) {
+ gpiod_set_value(mt9p031->reset, 1);
usleep_range(1000, 2000);
}
@@ -331,8 +331,8 @@ static int mt9p031_power_on(struct mt9p031 *mt9p031)
}
/* Now RESET_BAR must be high */
- if (gpio_is_valid(mt9p031->reset)) {
- gpio_set_value(mt9p031->reset, 1);
+ if (mt9p031->reset) {
+ gpiod_set_value(mt9p031->reset, 0);
usleep_range(1000, 2000);
}
@@ -341,8 +341,8 @@ static int mt9p031_power_on(struct mt9p031 *mt9p031)
static void mt9p031_power_off(struct mt9p031 *mt9p031)
{
- if (gpio_is_valid(mt9p031->reset)) {
- gpio_set_value(mt9p031->reset, 0);
+ if (mt9p031->reset) {
+ gpiod_set_value(mt9p031->reset, 1);
usleep_range(1000, 2000);
}
@@ -474,7 +474,7 @@ static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
}
static int mt9p031_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -487,7 +487,7 @@ static int mt9p031_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9p031_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -505,12 +505,12 @@ static int mt9p031_enum_frame_size(struct v4l2_subdev *subdev,
}
static struct v4l2_mbus_framefmt *
-__mt9p031_get_pad_format(struct mt9p031 *mt9p031, struct v4l2_subdev_fh *fh,
+__mt9p031_get_pad_format(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&mt9p031->subdev, cfg, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->format;
default:
@@ -519,12 +519,12 @@ __mt9p031_get_pad_format(struct mt9p031 *mt9p031, struct v4l2_subdev_fh *fh,
}
static struct v4l2_rect *
-__mt9p031_get_pad_crop(struct mt9p031 *mt9p031, struct v4l2_subdev_fh *fh,
+__mt9p031_get_pad_crop(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(fh, pad);
+ return v4l2_subdev_get_try_crop(&mt9p031->subdev, cfg, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->crop;
default:
@@ -533,18 +533,18 @@ __mt9p031_get_pad_crop(struct mt9p031 *mt9p031, struct v4l2_subdev_fh *fh,
}
static int mt9p031_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
- fmt->format = *__mt9p031_get_pad_format(mt9p031, fh, fmt->pad,
+ fmt->format = *__mt9p031_get_pad_format(mt9p031, cfg, fmt->pad,
fmt->which);
return 0;
}
static int mt9p031_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -555,7 +555,7 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
unsigned int hratio;
unsigned int vratio;
- __crop = __mt9p031_get_pad_crop(mt9p031, fh, format->pad,
+ __crop = __mt9p031_get_pad_crop(mt9p031, cfg, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
@@ -571,7 +571,7 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
vratio = DIV_ROUND_CLOSEST(__crop->height, height);
- __format = __mt9p031_get_pad_format(mt9p031, fh, format->pad,
+ __format = __mt9p031_get_pad_format(mt9p031, cfg, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
@@ -582,7 +582,7 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
}
static int mt9p031_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -590,12 +590,12 @@ static int mt9p031_get_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__mt9p031_get_pad_crop(mt9p031, fh, sel->pad, sel->which);
+ sel->r = *__mt9p031_get_pad_crop(mt9p031, cfg, sel->pad, sel->which);
return 0;
}
static int mt9p031_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -625,13 +625,13 @@ static int mt9p031_set_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9p031_get_pad_crop(mt9p031, fh, sel->pad, sel->which);
+ __crop = __mt9p031_get_pad_crop(mt9p031, cfg, sel->pad, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9p031_get_pad_format(mt9p031, fh, sel->pad,
+ __format = __mt9p031_get_pad_format(mt9p031, cfg, sel->pad,
sel->which);
__format->width = rect.width;
__format->height = rect.height;
@@ -946,13 +946,13 @@ static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(fh, 0);
+ crop = v4l2_subdev_get_try_crop(subdev, fh->pad, 0);
crop->left = MT9P031_COLUMN_START_DEF;
crop->top = MT9P031_ROW_START_DEF;
crop->width = MT9P031_WINDOW_WIDTH_DEF;
crop->height = MT9P031_WINDOW_HEIGHT_DEF;
- format = v4l2_subdev_get_try_format(fh, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
format->code = MEDIA_BUS_FMT_Y12_1X12;
@@ -1022,7 +1022,6 @@ mt9p031_get_pdata(struct i2c_client *client)
if (!pdata)
goto done;
- pdata->reset = of_get_named_gpio(client->dev.of_node, "reset-gpios", 0);
of_property_read_u32(np, "input-clock-frequency", &pdata->ext_freq);
of_property_read_u32(np, "pixel-clock-frequency", &pdata->target_freq);
@@ -1059,7 +1058,6 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
mt9p031->model = did->driver_data;
- mt9p031->reset = -1;
mt9p031->regulators[0].supply = "vdd";
mt9p031->regulators[1].supply = "vdd_io";
@@ -1071,6 +1069,8 @@ static int mt9p031_probe(struct i2c_client *client,
return ret;
}
+ mutex_init(&mt9p031->power_lock);
+
v4l2_ctrl_handler_init(&mt9p031->ctrls, ARRAY_SIZE(mt9p031_ctrls) + 6);
v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
@@ -1108,7 +1108,6 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->blc_offset = v4l2_ctrl_find(&mt9p031->ctrls,
V4L2_CID_BLC_DIGITAL_OFFSET);
- mutex_init(&mt9p031->power_lock);
v4l2_i2c_subdev_init(&mt9p031->subdev, client, &mt9p031_subdev_ops);
mt9p031->subdev.internal_ops = &mt9p031_subdev_internal_ops;
@@ -1134,21 +1133,20 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->format.field = V4L2_FIELD_NONE;
mt9p031->format.colorspace = V4L2_COLORSPACE_SRGB;
- if (gpio_is_valid(pdata->reset)) {
- ret = devm_gpio_request_one(&client->dev, pdata->reset,
- GPIOF_OUT_INIT_LOW, "mt9p031_rst");
- if (ret < 0)
- goto done;
-
- mt9p031->reset = pdata->reset;
- }
+ mt9p031->reset = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
ret = mt9p031_clk_setup(mt9p031);
+ if (ret)
+ goto done;
+
+ ret = v4l2_async_register_subdev(&mt9p031->subdev);
done:
if (ret < 0) {
v4l2_ctrl_handler_free(&mt9p031->ctrls);
media_entity_cleanup(&mt9p031->subdev.entity);
+ mutex_destroy(&mt9p031->power_lock);
}
return ret;
@@ -1160,8 +1158,9 @@ static int mt9p031_remove(struct i2c_client *client)
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
v4l2_ctrl_handler_free(&mt9p031->ctrls);
- v4l2_device_unregister_subdev(subdev);
+ v4l2_async_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&mt9p031->power_lock);
return 0;
}
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index f6ca636b538d..8ae99f7f254c 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -244,12 +244,12 @@ static int __mt9t001_set_power(struct mt9t001 *mt9t001, bool on)
*/
static struct v4l2_mbus_framefmt *
-__mt9t001_get_pad_format(struct mt9t001 *mt9t001, struct v4l2_subdev_fh *fh,
+__mt9t001_get_pad_format(struct mt9t001 *mt9t001, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&mt9t001->subdev, cfg, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9t001->format;
default:
@@ -258,12 +258,12 @@ __mt9t001_get_pad_format(struct mt9t001 *mt9t001, struct v4l2_subdev_fh *fh,
}
static struct v4l2_rect *
-__mt9t001_get_pad_crop(struct mt9t001 *mt9t001, struct v4l2_subdev_fh *fh,
+__mt9t001_get_pad_crop(struct mt9t001 *mt9t001, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(fh, pad);
+ return v4l2_subdev_get_try_crop(&mt9t001->subdev, cfg, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9t001->crop;
default:
@@ -327,7 +327,7 @@ static int mt9t001_s_stream(struct v4l2_subdev *subdev, int enable)
}
static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -338,7 +338,7 @@ static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= 8 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
@@ -353,18 +353,18 @@ static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev,
}
static int mt9t001_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
- format->format = *__mt9t001_get_pad_format(mt9t001, fh, format->pad,
+ format->format = *__mt9t001_get_pad_format(mt9t001, cfg, format->pad,
format->which);
return 0;
}
static int mt9t001_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -375,7 +375,7 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
unsigned int hratio;
unsigned int vratio;
- __crop = __mt9t001_get_pad_crop(mt9t001, fh, format->pad,
+ __crop = __mt9t001_get_pad_crop(mt9t001, cfg, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
@@ -391,7 +391,7 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
vratio = DIV_ROUND_CLOSEST(__crop->height, height);
- __format = __mt9t001_get_pad_format(mt9t001, fh, format->pad,
+ __format = __mt9t001_get_pad_format(mt9t001, cfg, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
@@ -402,7 +402,7 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
}
static int mt9t001_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -410,12 +410,12 @@ static int mt9t001_get_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__mt9t001_get_pad_crop(mt9t001, fh, sel->pad, sel->which);
+ sel->r = *__mt9t001_get_pad_crop(mt9t001, cfg, sel->pad, sel->which);
return 0;
}
static int mt9t001_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -447,13 +447,13 @@ static int mt9t001_set_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9t001_get_pad_crop(mt9t001, fh, sel->pad, sel->which);
+ __crop = __mt9t001_get_pad_crop(mt9t001, cfg, sel->pad, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9t001_get_pad_format(mt9t001, fh, sel->pad,
+ __format = __mt9t001_get_pad_format(mt9t001, cfg, sel->pad,
sel->which);
__format->width = rect.width;
__format->height = rect.height;
@@ -790,13 +790,13 @@ static int mt9t001_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(fh, 0);
+ crop = v4l2_subdev_get_try_crop(subdev, fh->pad, 0);
crop->left = MT9T001_COLUMN_START_DEF;
crop->top = MT9T001_ROW_START_DEF;
crop->width = MT9T001_WINDOW_WIDTH_DEF + 1;
crop->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
- format = v4l2_subdev_get_try_format(fh, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
format->width = MT9T001_WINDOW_WIDTH_DEF + 1;
format->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index bd3f979a4d49..977f4006edbd 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -17,6 +17,8 @@
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -26,6 +28,7 @@
#include <media/mt9v032.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-of.h>
#include <media/v4l2-subdev.h>
/* The first four rows are black rows. The active area spans 753x481 pixels. */
@@ -371,12 +374,12 @@ static int __mt9v032_set_power(struct mt9v032 *mt9v032, bool on)
*/
static struct v4l2_mbus_framefmt *
-__mt9v032_get_pad_format(struct mt9v032 *mt9v032, struct v4l2_subdev_fh *fh,
+__mt9v032_get_pad_format(struct mt9v032 *mt9v032, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&mt9v032->subdev, cfg, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v032->format;
default:
@@ -385,12 +388,12 @@ __mt9v032_get_pad_format(struct mt9v032 *mt9v032, struct v4l2_subdev_fh *fh,
}
static struct v4l2_rect *
-__mt9v032_get_pad_crop(struct mt9v032 *mt9v032, struct v4l2_subdev_fh *fh,
+__mt9v032_get_pad_crop(struct mt9v032 *mt9v032, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(fh, pad);
+ return v4l2_subdev_get_try_crop(&mt9v032->subdev, cfg, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v032->crop;
default:
@@ -448,7 +451,7 @@ static int mt9v032_s_stream(struct v4l2_subdev *subdev, int enable)
}
static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -459,7 +462,7 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
@@ -474,12 +477,12 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
}
static int mt9v032_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
- format->format = *__mt9v032_get_pad_format(mt9v032, fh, format->pad,
+ format->format = *__mt9v032_get_pad_format(mt9v032, cfg, format->pad,
format->which);
return 0;
}
@@ -509,7 +512,7 @@ static unsigned int mt9v032_calc_ratio(unsigned int input, unsigned int output)
}
static int mt9v032_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -520,7 +523,7 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
unsigned int hratio;
unsigned int vratio;
- __crop = __mt9v032_get_pad_crop(mt9v032, fh, format->pad,
+ __crop = __mt9v032_get_pad_crop(mt9v032, cfg, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
@@ -536,7 +539,7 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
hratio = mt9v032_calc_ratio(__crop->width, width);
vratio = mt9v032_calc_ratio(__crop->height, height);
- __format = __mt9v032_get_pad_format(mt9v032, fh, format->pad,
+ __format = __mt9v032_get_pad_format(mt9v032, cfg, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
@@ -553,7 +556,7 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
}
static int mt9v032_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -561,12 +564,12 @@ static int mt9v032_get_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__mt9v032_get_pad_crop(mt9v032, fh, sel->pad, sel->which);
+ sel->r = *__mt9v032_get_pad_crop(mt9v032, cfg, sel->pad, sel->which);
return 0;
}
static int mt9v032_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -598,13 +601,13 @@ static int mt9v032_set_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int,
rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9v032_get_pad_crop(mt9v032, fh, sel->pad, sel->which);
+ __crop = __mt9v032_get_pad_crop(mt9v032, cfg, sel->pad, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9v032_get_pad_format(mt9v032, fh, sel->pad,
+ __format = __mt9v032_get_pad_format(mt9v032, cfg, sel->pad,
sel->which);
__format->width = rect.width;
__format->height = rect.height;
@@ -810,13 +813,13 @@ static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(fh, 0);
+ crop = v4l2_subdev_get_try_crop(subdev, fh->pad, 0);
crop->left = MT9V032_COLUMN_START_DEF;
crop->top = MT9V032_ROW_START_DEF;
crop->width = MT9V032_WINDOW_WIDTH_DEF;
crop->height = MT9V032_WINDOW_HEIGHT_DEF;
- format = v4l2_subdev_get_try_format(fh, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
if (mt9v032->model->color)
format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
@@ -876,10 +879,58 @@ static const struct regmap_config mt9v032_regmap_config = {
* Driver initialization and probing
*/
+static struct mt9v032_platform_data *
+mt9v032_get_pdata(struct i2c_client *client)
+{
+ struct mt9v032_platform_data *pdata;
+ struct v4l2_of_endpoint endpoint;
+ struct device_node *np;
+ struct property *prop;
+
+ if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
+ return client->dev.platform_data;
+
+ np = of_graph_get_next_endpoint(client->dev.of_node, NULL);
+ if (!np)
+ return NULL;
+
+ if (v4l2_of_parse_endpoint(np, &endpoint) < 0)
+ goto done;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto done;
+
+ prop = of_find_property(np, "link-frequencies", NULL);
+ if (prop) {
+ u64 *link_freqs;
+ size_t size = prop->length / sizeof(*link_freqs);
+
+ link_freqs = devm_kcalloc(&client->dev, size,
+ sizeof(*link_freqs), GFP_KERNEL);
+ if (!link_freqs)
+ goto done;
+
+ if (of_property_read_u64_array(np, "link-frequencies",
+ link_freqs, size) < 0)
+ goto done;
+
+ pdata->link_freqs = link_freqs;
+ pdata->link_def_freq = link_freqs[0];
+ }
+
+ pdata->clk_pol = !!(endpoint.bus.parallel.flags &
+ V4L2_MBUS_PCLK_SAMPLE_RISING);
+
+done:
+ of_node_put(np);
+ return pdata;
+}
+
static int mt9v032_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
- struct mt9v032_platform_data *pdata = client->dev.platform_data;
+ struct mt9v032_platform_data *pdata = mt9v032_get_pdata(client);
struct mt9v032 *mt9v032;
unsigned int i;
int ret;
@@ -961,9 +1012,12 @@ static int mt9v032_probe(struct i2c_client *client,
mt9v032->subdev.ctrl_handler = &mt9v032->ctrls;
- if (mt9v032->ctrls.error)
- printk(KERN_INFO "%s: control initialization error %d\n",
- __func__, mt9v032->ctrls.error);
+ if (mt9v032->ctrls.error) {
+ dev_err(&client->dev, "control initialization error %d\n",
+ mt9v032->ctrls.error);
+ ret = mt9v032->ctrls.error;
+ goto err;
+ }
mt9v032->crop.left = MT9V032_COLUMN_START_DEF;
mt9v032->crop.top = MT9V032_ROW_START_DEF;
@@ -1016,7 +1070,6 @@ static int mt9v032_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
v4l2_ctrl_handler_free(&mt9v032->ctrls);
- v4l2_device_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
return 0;
@@ -1035,9 +1088,25 @@ static const struct i2c_device_id mt9v032_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mt9v032_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id mt9v032_of_match[] = {
+ { .compatible = "aptina,mt9v022" },
+ { .compatible = "aptina,mt9v022m" },
+ { .compatible = "aptina,mt9v024" },
+ { .compatible = "aptina,mt9v024m" },
+ { .compatible = "aptina,mt9v032" },
+ { .compatible = "aptina,mt9v032m" },
+ { .compatible = "aptina,mt9v034" },
+ { .compatible = "aptina,mt9v034m" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mt9v032_of_match);
+#endif
+
static struct i2c_driver mt9v032_driver = {
.driver = {
.name = "mt9v032",
+ .of_match_table = of_match_ptr(mt9v032_of_match),
},
.probe = mt9v032_probe,
.remove = mt9v032_remove,
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index 00c7b26f4823..f197b6cbd407 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -492,7 +492,7 @@ unlock:
}
static int noon010_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(noon010_formats))
@@ -502,15 +502,16 @@ static int noon010_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int noon010_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int noon010_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (fh) {
- mf = v4l2_subdev_get_try_format(fh, 0);
+ if (cfg) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
fmt->format = *mf;
}
return 0;
@@ -542,7 +543,7 @@ static const struct noon010_format *noon010_try_fmt(struct v4l2_subdev *sd,
return &noon010_formats[i];
}
-static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
@@ -557,8 +558,8 @@ static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (fh) {
- mf = v4l2_subdev_get_try_format(fh, 0);
+ if (cfg) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
*mf = fmt->format;
}
return 0;
@@ -640,7 +641,7 @@ static int noon010_log_status(struct v4l2_subdev *sd)
static int noon010_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(fh, 0);
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd, fh->pad, 0);
mf->width = noon010_sizes[0].width;
mf->height = noon010_sizes[0].height;
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
new file mode 100644
index 000000000000..edebd114279d
--- /dev/null
+++ b/drivers/media/i2c/ov2659.c
@@ -0,0 +1,1509 @@
+/*
+ * Omnivision OV2659 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2015 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/ov2659.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-of.h>
+#include <media/v4l2-subdev.h>
+
+#define DRIVER_NAME "ov2659"
+
+/*
+ * OV2659 register definitions
+ */
+#define REG_SOFTWARE_STANDBY 0x0100
+#define REG_SOFTWARE_RESET 0x0103
+#define REG_IO_CTRL00 0x3000
+#define REG_IO_CTRL01 0x3001
+#define REG_IO_CTRL02 0x3002
+#define REG_OUTPUT_VALUE00 0x3008
+#define REG_OUTPUT_VALUE01 0x3009
+#define REG_OUTPUT_VALUE02 0x300d
+#define REG_OUTPUT_SELECT00 0x300e
+#define REG_OUTPUT_SELECT01 0x300f
+#define REG_OUTPUT_SELECT02 0x3010
+#define REG_OUTPUT_DRIVE 0x3011
+#define REG_INPUT_READOUT00 0x302d
+#define REG_INPUT_READOUT01 0x302e
+#define REG_INPUT_READOUT02 0x302f
+
+#define REG_SC_PLL_CTRL0 0x3003
+#define REG_SC_PLL_CTRL1 0x3004
+#define REG_SC_PLL_CTRL2 0x3005
+#define REG_SC_PLL_CTRL3 0x3006
+#define REG_SC_CHIP_ID_H 0x300a
+#define REG_SC_CHIP_ID_L 0x300b
+#define REG_SC_PWC 0x3014
+#define REG_SC_CLKRST0 0x301a
+#define REG_SC_CLKRST1 0x301b
+#define REG_SC_CLKRST2 0x301c
+#define REG_SC_CLKRST3 0x301d
+#define REG_SC_SUB_ID 0x302a
+#define REG_SC_SCCB_ID 0x302b
+
+#define REG_GROUP_ADDRESS_00 0x3200
+#define REG_GROUP_ADDRESS_01 0x3201
+#define REG_GROUP_ADDRESS_02 0x3202
+#define REG_GROUP_ADDRESS_03 0x3203
+#define REG_GROUP_ACCESS 0x3208
+
+#define REG_AWB_R_GAIN_H 0x3400
+#define REG_AWB_R_GAIN_L 0x3401
+#define REG_AWB_G_GAIN_H 0x3402
+#define REG_AWB_G_GAIN_L 0x3403
+#define REG_AWB_B_GAIN_H 0x3404
+#define REG_AWB_B_GAIN_L 0x3405
+#define REG_AWB_MANUAL_CONTROL 0x3406
+
+#define REG_TIMING_HS_H 0x3800
+#define REG_TIMING_HS_L 0x3801
+#define REG_TIMING_VS_H 0x3802
+#define REG_TIMING_VS_L 0x3803
+#define REG_TIMING_HW_H 0x3804
+#define REG_TIMING_HW_L 0x3805
+#define REG_TIMING_VH_H 0x3806
+#define REG_TIMING_VH_L 0x3807
+#define REG_TIMING_DVPHO_H 0x3808
+#define REG_TIMING_DVPHO_L 0x3809
+#define REG_TIMING_DVPVO_H 0x380a
+#define REG_TIMING_DVPVO_L 0x380b
+#define REG_TIMING_HTS_H 0x380c
+#define REG_TIMING_HTS_L 0x380d
+#define REG_TIMING_VTS_H 0x380e
+#define REG_TIMING_VTS_L 0x380f
+#define REG_TIMING_HOFFS_H 0x3810
+#define REG_TIMING_HOFFS_L 0x3811
+#define REG_TIMING_VOFFS_H 0x3812
+#define REG_TIMING_VOFFS_L 0x3813
+#define REG_TIMING_XINC 0x3814
+#define REG_TIMING_YINC 0x3815
+#define REG_TIMING_VERT_FORMAT 0x3820
+#define REG_TIMING_HORIZ_FORMAT 0x3821
+
+#define REG_FORMAT_CTRL00 0x4300
+
+#define REG_VFIFO_READ_START_H 0x4608
+#define REG_VFIFO_READ_START_L 0x4609
+
+#define REG_DVP_CTRL02 0x4708
+
+#define REG_ISP_CTRL00 0x5000
+#define REG_ISP_CTRL01 0x5001
+#define REG_ISP_CTRL02 0x5002
+
+#define REG_LENC_RED_X0_H 0x500c
+#define REG_LENC_RED_X0_L 0x500d
+#define REG_LENC_RED_Y0_H 0x500e
+#define REG_LENC_RED_Y0_L 0x500f
+#define REG_LENC_RED_A1 0x5010
+#define REG_LENC_RED_B1 0x5011
+#define REG_LENC_RED_A2_B2 0x5012
+#define REG_LENC_GREEN_X0_H 0x5013
+#define REG_LENC_GREEN_X0_L 0x5014
+#define REG_LENC_GREEN_Y0_H 0x5015
+#define REG_LENC_GREEN_Y0_L 0x5016
+#define REG_LENC_GREEN_A1 0x5017
+#define REG_LENC_GREEN_B1 0x5018
+#define REG_LENC_GREEN_A2_B2 0x5019
+#define REG_LENC_BLUE_X0_H 0x501a
+#define REG_LENC_BLUE_X0_L 0x501b
+#define REG_LENC_BLUE_Y0_H 0x501c
+#define REG_LENC_BLUE_Y0_L 0x501d
+#define REG_LENC_BLUE_A1 0x501e
+#define REG_LENC_BLUE_B1 0x501f
+#define REG_LENC_BLUE_A2_B2 0x5020
+
+#define REG_AWB_CTRL00 0x5035
+#define REG_AWB_CTRL01 0x5036
+#define REG_AWB_CTRL02 0x5037
+#define REG_AWB_CTRL03 0x5038
+#define REG_AWB_CTRL04 0x5039
+#define REG_AWB_LOCAL_LIMIT 0x503a
+#define REG_AWB_CTRL12 0x5049
+#define REG_AWB_CTRL13 0x504a
+#define REG_AWB_CTRL14 0x504b
+
+#define REG_SHARPENMT_THRESH1 0x5064
+#define REG_SHARPENMT_THRESH2 0x5065
+#define REG_SHARPENMT_OFFSET1 0x5066
+#define REG_SHARPENMT_OFFSET2 0x5067
+#define REG_DENOISE_THRESH1 0x5068
+#define REG_DENOISE_THRESH2 0x5069
+#define REG_DENOISE_OFFSET1 0x506a
+#define REG_DENOISE_OFFSET2 0x506b
+#define REG_SHARPEN_THRESH1 0x506c
+#define REG_SHARPEN_THRESH2 0x506d
+#define REG_CIP_CTRL00 0x506e
+#define REG_CIP_CTRL01 0x506f
+
+#define REG_CMX_SIGN 0x5079
+#define REG_CMX_MISC_CTRL 0x507a
+
+#define REG_PRE_ISP_CTRL00 0x50a0
+#define TEST_PATTERN_ENABLE BIT(7)
+#define VERTICAL_COLOR_BAR_MASK 0x53
+
+#define REG_NULL 0x0000 /* Array end token */
+
+#define OV265X_ID(_msb, _lsb) ((_msb) << 8 | (_lsb))
+#define OV2659_ID 0x2656
+
+struct sensor_register {
+ u16 addr;
+ u8 value;
+};
+
+struct ov2659_framesize {
+ u16 width;
+ u16 height;
+ u16 max_exp_lines;
+ const struct sensor_register *regs;
+};
+
+struct ov2659_pll_ctrl {
+ u8 ctrl1;
+ u8 ctrl2;
+ u8 ctrl3;
+};
+
+struct ov2659_pixfmt {
+ u32 code;
+ /* Output format Register Value (REG_FORMAT_CTRL00) */
+ struct sensor_register *format_ctrl_regs;
+};
+
+struct pll_ctrl_reg {
+ unsigned int div;
+ unsigned char reg;
+};
+
+struct ov2659 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ unsigned int xvclk_frequency;
+ const struct ov2659_platform_data *pdata;
+ struct mutex lock;
+ struct i2c_client *client;
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *link_frequency;
+ const struct ov2659_framesize *frame_size;
+ struct sensor_register *format_ctrl_regs;
+ struct ov2659_pll_ctrl pll;
+ int streaming;
+};
+
+static const struct sensor_register ov2659_init_regs[] = {
+ { REG_IO_CTRL00, 0x03 },
+ { REG_IO_CTRL01, 0xff },
+ { REG_IO_CTRL02, 0xe0 },
+ { 0x3633, 0x3d },
+ { 0x3620, 0x02 },
+ { 0x3631, 0x11 },
+ { 0x3612, 0x04 },
+ { 0x3630, 0x20 },
+ { 0x4702, 0x02 },
+ { 0x370c, 0x34 },
+ { REG_TIMING_HS_H, 0x00 },
+ { REG_TIMING_HS_L, 0x00 },
+ { REG_TIMING_VS_H, 0x00 },
+ { REG_TIMING_VS_L, 0x00 },
+ { REG_TIMING_HW_H, 0x06 },
+ { REG_TIMING_HW_L, 0x5f },
+ { REG_TIMING_VH_H, 0x04 },
+ { REG_TIMING_VH_L, 0xb7 },
+ { REG_TIMING_DVPHO_H, 0x03 },
+ { REG_TIMING_DVPHO_L, 0x20 },
+ { REG_TIMING_DVPVO_H, 0x02 },
+ { REG_TIMING_DVPVO_L, 0x58 },
+ { REG_TIMING_HTS_H, 0x05 },
+ { REG_TIMING_HTS_L, 0x14 },
+ { REG_TIMING_VTS_H, 0x02 },
+ { REG_TIMING_VTS_L, 0x68 },
+ { REG_TIMING_HOFFS_L, 0x08 },
+ { REG_TIMING_VOFFS_L, 0x02 },
+ { REG_TIMING_XINC, 0x31 },
+ { REG_TIMING_YINC, 0x31 },
+ { 0x3a02, 0x02 },
+ { 0x3a03, 0x68 },
+ { 0x3a08, 0x00 },
+ { 0x3a09, 0x5c },
+ { 0x3a0a, 0x00 },
+ { 0x3a0b, 0x4d },
+ { 0x3a0d, 0x08 },
+ { 0x3a0e, 0x06 },
+ { 0x3a14, 0x02 },
+ { 0x3a15, 0x28 },
+ { REG_DVP_CTRL02, 0x01 },
+ { 0x3623, 0x00 },
+ { 0x3634, 0x76 },
+ { 0x3701, 0x44 },
+ { 0x3702, 0x18 },
+ { 0x3703, 0x24 },
+ { 0x3704, 0x24 },
+ { 0x3705, 0x0c },
+ { REG_TIMING_VERT_FORMAT, 0x81 },
+ { REG_TIMING_HORIZ_FORMAT, 0x01 },
+ { 0x370a, 0x52 },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_FORMAT_CTRL00, 0x30 },
+ { 0x5086, 0x02 },
+ { REG_ISP_CTRL00, 0xfb },
+ { REG_ISP_CTRL01, 0x1f },
+ { REG_ISP_CTRL02, 0x00 },
+ { 0x5025, 0x0e },
+ { 0x5026, 0x18 },
+ { 0x5027, 0x34 },
+ { 0x5028, 0x4c },
+ { 0x5029, 0x62 },
+ { 0x502a, 0x74 },
+ { 0x502b, 0x85 },
+ { 0x502c, 0x92 },
+ { 0x502d, 0x9e },
+ { 0x502e, 0xb2 },
+ { 0x502f, 0xc0 },
+ { 0x5030, 0xcc },
+ { 0x5031, 0xe0 },
+ { 0x5032, 0xee },
+ { 0x5033, 0xf6 },
+ { 0x5034, 0x11 },
+ { 0x5070, 0x1c },
+ { 0x5071, 0x5b },
+ { 0x5072, 0x05 },
+ { 0x5073, 0x20 },
+ { 0x5074, 0x94 },
+ { 0x5075, 0xb4 },
+ { 0x5076, 0xb4 },
+ { 0x5077, 0xaf },
+ { 0x5078, 0x05 },
+ { REG_CMX_SIGN, 0x98 },
+ { REG_CMX_MISC_CTRL, 0x21 },
+ { REG_AWB_CTRL00, 0x6a },
+ { REG_AWB_CTRL01, 0x11 },
+ { REG_AWB_CTRL02, 0x92 },
+ { REG_AWB_CTRL03, 0x21 },
+ { REG_AWB_CTRL04, 0xe1 },
+ { REG_AWB_LOCAL_LIMIT, 0x01 },
+ { 0x503c, 0x05 },
+ { 0x503d, 0x08 },
+ { 0x503e, 0x08 },
+ { 0x503f, 0x64 },
+ { 0x5040, 0x58 },
+ { 0x5041, 0x2a },
+ { 0x5042, 0xc5 },
+ { 0x5043, 0x2e },
+ { 0x5044, 0x3a },
+ { 0x5045, 0x3c },
+ { 0x5046, 0x44 },
+ { 0x5047, 0xf8 },
+ { 0x5048, 0x08 },
+ { REG_AWB_CTRL12, 0x70 },
+ { REG_AWB_CTRL13, 0xf0 },
+ { REG_AWB_CTRL14, 0xf0 },
+ { REG_LENC_RED_X0_H, 0x03 },
+ { REG_LENC_RED_X0_L, 0x20 },
+ { REG_LENC_RED_Y0_H, 0x02 },
+ { REG_LENC_RED_Y0_L, 0x5c },
+ { REG_LENC_RED_A1, 0x48 },
+ { REG_LENC_RED_B1, 0x00 },
+ { REG_LENC_RED_A2_B2, 0x66 },
+ { REG_LENC_GREEN_X0_H, 0x03 },
+ { REG_LENC_GREEN_X0_L, 0x30 },
+ { REG_LENC_GREEN_Y0_H, 0x02 },
+ { REG_LENC_GREEN_Y0_L, 0x7c },
+ { REG_LENC_GREEN_A1, 0x40 },
+ { REG_LENC_GREEN_B1, 0x00 },
+ { REG_LENC_GREEN_A2_B2, 0x66 },
+ { REG_LENC_BLUE_X0_H, 0x03 },
+ { REG_LENC_BLUE_X0_L, 0x10 },
+ { REG_LENC_BLUE_Y0_H, 0x02 },
+ { REG_LENC_BLUE_Y0_L, 0x7c },
+ { REG_LENC_BLUE_A1, 0x3a },
+ { REG_LENC_BLUE_B1, 0x00 },
+ { REG_LENC_BLUE_A2_B2, 0x66 },
+ { REG_CIP_CTRL00, 0x44 },
+ { REG_SHARPENMT_THRESH1, 0x08 },
+ { REG_SHARPENMT_THRESH2, 0x10 },
+ { REG_SHARPENMT_OFFSET1, 0x12 },
+ { REG_SHARPENMT_OFFSET2, 0x02 },
+ { REG_SHARPEN_THRESH1, 0x08 },
+ { REG_SHARPEN_THRESH2, 0x10 },
+ { REG_CIP_CTRL01, 0xa6 },
+ { REG_DENOISE_THRESH1, 0x08 },
+ { REG_DENOISE_THRESH2, 0x10 },
+ { REG_DENOISE_OFFSET1, 0x04 },
+ { REG_DENOISE_OFFSET2, 0x12 },
+ { 0x507e, 0x40 },
+ { 0x507f, 0x20 },
+ { 0x507b, 0x02 },
+ { REG_CMX_MISC_CTRL, 0x01 },
+ { 0x5084, 0x0c },
+ { 0x5085, 0x3e },
+ { 0x5005, 0x80 },
+ { 0x3a0f, 0x30 },
+ { 0x3a10, 0x28 },
+ { 0x3a1b, 0x32 },
+ { 0x3a1e, 0x26 },
+ { 0x3a11, 0x60 },
+ { 0x3a1f, 0x14 },
+ { 0x5060, 0x69 },
+ { 0x5061, 0x7d },
+ { 0x5062, 0x7d },
+ { 0x5063, 0x69 },
+ { REG_NULL, 0x00 },
+};
+
+/* 1280X720 720p */
+static struct sensor_register ov2659_720p[] = {
+ { REG_TIMING_HS_H, 0x00 },
+ { REG_TIMING_HS_L, 0xa0 },
+ { REG_TIMING_VS_H, 0x00 },
+ { REG_TIMING_VS_L, 0xf0 },
+ { REG_TIMING_HW_H, 0x05 },
+ { REG_TIMING_HW_L, 0xbf },
+ { REG_TIMING_VH_H, 0x03 },
+ { REG_TIMING_VH_L, 0xcb },
+ { REG_TIMING_DVPHO_H, 0x05 },
+ { REG_TIMING_DVPHO_L, 0x00 },
+ { REG_TIMING_DVPVO_H, 0x02 },
+ { REG_TIMING_DVPVO_L, 0xd0 },
+ { REG_TIMING_HTS_H, 0x06 },
+ { REG_TIMING_HTS_L, 0x4c },
+ { REG_TIMING_VTS_H, 0x02 },
+ { REG_TIMING_VTS_L, 0xe8 },
+ { REG_TIMING_HOFFS_L, 0x10 },
+ { REG_TIMING_VOFFS_L, 0x06 },
+ { REG_TIMING_XINC, 0x11 },
+ { REG_TIMING_YINC, 0x11 },
+ { REG_TIMING_VERT_FORMAT, 0x80 },
+ { REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x3a03, 0xe8 },
+ { 0x3a09, 0x6f },
+ { 0x3a0b, 0x5d },
+ { 0x3a15, 0x9a },
+ { REG_NULL, 0x00 },
+};
+
+/* 1600X1200 UXGA */
+static struct sensor_register ov2659_uxga[] = {
+ { REG_TIMING_HS_H, 0x00 },
+ { REG_TIMING_HS_L, 0x00 },
+ { REG_TIMING_VS_H, 0x00 },
+ { REG_TIMING_VS_L, 0x00 },
+ { REG_TIMING_HW_H, 0x06 },
+ { REG_TIMING_HW_L, 0x5f },
+ { REG_TIMING_VH_H, 0x04 },
+ { REG_TIMING_VH_L, 0xbb },
+ { REG_TIMING_DVPHO_H, 0x06 },
+ { REG_TIMING_DVPHO_L, 0x40 },
+ { REG_TIMING_DVPVO_H, 0x04 },
+ { REG_TIMING_DVPVO_L, 0xb0 },
+ { REG_TIMING_HTS_H, 0x07 },
+ { REG_TIMING_HTS_L, 0x9f },
+ { REG_TIMING_VTS_H, 0x04 },
+ { REG_TIMING_VTS_L, 0xd0 },
+ { REG_TIMING_HOFFS_L, 0x10 },
+ { REG_TIMING_VOFFS_L, 0x06 },
+ { REG_TIMING_XINC, 0x11 },
+ { REG_TIMING_YINC, 0x11 },
+ { 0x3a02, 0x04 },
+ { 0x3a03, 0xd0 },
+ { 0x3a08, 0x00 },
+ { 0x3a09, 0xb8 },
+ { 0x3a0a, 0x00 },
+ { 0x3a0b, 0x9a },
+ { 0x3a0d, 0x08 },
+ { 0x3a0e, 0x06 },
+ { 0x3a14, 0x04 },
+ { 0x3a15, 0x50 },
+ { 0x3623, 0x00 },
+ { 0x3634, 0x44 },
+ { 0x3701, 0x44 },
+ { 0x3702, 0x30 },
+ { 0x3703, 0x48 },
+ { 0x3704, 0x48 },
+ { 0x3705, 0x18 },
+ { REG_TIMING_VERT_FORMAT, 0x80 },
+ { REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x370a, 0x12 },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
+ { REG_NULL, 0x00 },
+};
+
+/* 1280X1024 SXGA */
+static struct sensor_register ov2659_sxga[] = {
+ { REG_TIMING_HS_H, 0x00 },
+ { REG_TIMING_HS_L, 0x00 },
+ { REG_TIMING_VS_H, 0x00 },
+ { REG_TIMING_VS_L, 0x00 },
+ { REG_TIMING_HW_H, 0x06 },
+ { REG_TIMING_HW_L, 0x5f },
+ { REG_TIMING_VH_H, 0x04 },
+ { REG_TIMING_VH_L, 0xb7 },
+ { REG_TIMING_DVPHO_H, 0x05 },
+ { REG_TIMING_DVPHO_L, 0x00 },
+ { REG_TIMING_DVPVO_H, 0x04 },
+ { REG_TIMING_DVPVO_L, 0x00 },
+ { REG_TIMING_HTS_H, 0x07 },
+ { REG_TIMING_HTS_L, 0x9c },
+ { REG_TIMING_VTS_H, 0x04 },
+ { REG_TIMING_VTS_L, 0xd0 },
+ { REG_TIMING_HOFFS_L, 0x10 },
+ { REG_TIMING_VOFFS_L, 0x06 },
+ { REG_TIMING_XINC, 0x11 },
+ { REG_TIMING_YINC, 0x11 },
+ { 0x3a02, 0x02 },
+ { 0x3a03, 0x68 },
+ { 0x3a08, 0x00 },
+ { 0x3a09, 0x5c },
+ { 0x3a0a, 0x00 },
+ { 0x3a0b, 0x4d },
+ { 0x3a0d, 0x08 },
+ { 0x3a0e, 0x06 },
+ { 0x3a14, 0x02 },
+ { 0x3a15, 0x28 },
+ { 0x3623, 0x00 },
+ { 0x3634, 0x76 },
+ { 0x3701, 0x44 },
+ { 0x3702, 0x18 },
+ { 0x3703, 0x24 },
+ { 0x3704, 0x24 },
+ { 0x3705, 0x0c },
+ { REG_TIMING_VERT_FORMAT, 0x80 },
+ { REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x370a, 0x52 },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
+ { REG_NULL, 0x00 },
+};
+
+/* 1024X768 SXGA */
+static struct sensor_register ov2659_xga[] = {
+ { REG_TIMING_HS_H, 0x00 },
+ { REG_TIMING_HS_L, 0x00 },
+ { REG_TIMING_VS_H, 0x00 },
+ { REG_TIMING_VS_L, 0x00 },
+ { REG_TIMING_HW_H, 0x06 },
+ { REG_TIMING_HW_L, 0x5f },
+ { REG_TIMING_VH_H, 0x04 },
+ { REG_TIMING_VH_L, 0xb7 },
+ { REG_TIMING_DVPHO_H, 0x04 },
+ { REG_TIMING_DVPHO_L, 0x00 },
+ { REG_TIMING_DVPVO_H, 0x03 },
+ { REG_TIMING_DVPVO_L, 0x00 },
+ { REG_TIMING_HTS_H, 0x07 },
+ { REG_TIMING_HTS_L, 0x9c },
+ { REG_TIMING_VTS_H, 0x04 },
+ { REG_TIMING_VTS_L, 0xd0 },
+ { REG_TIMING_HOFFS_L, 0x10 },
+ { REG_TIMING_VOFFS_L, 0x06 },
+ { REG_TIMING_XINC, 0x11 },
+ { REG_TIMING_YINC, 0x11 },
+ { 0x3a02, 0x02 },
+ { 0x3a03, 0x68 },
+ { 0x3a08, 0x00 },
+ { 0x3a09, 0x5c },
+ { 0x3a0a, 0x00 },
+ { 0x3a0b, 0x4d },
+ { 0x3a0d, 0x08 },
+ { 0x3a0e, 0x06 },
+ { 0x3a14, 0x02 },
+ { 0x3a15, 0x28 },
+ { 0x3623, 0x00 },
+ { 0x3634, 0x76 },
+ { 0x3701, 0x44 },
+ { 0x3702, 0x18 },
+ { 0x3703, 0x24 },
+ { 0x3704, 0x24 },
+ { 0x3705, 0x0c },
+ { REG_TIMING_VERT_FORMAT, 0x80 },
+ { REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x370a, 0x52 },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
+ { REG_NULL, 0x00 },
+};
+
+/* 800X600 SVGA */
+static struct sensor_register ov2659_svga[] = {
+ { REG_TIMING_HS_H, 0x00 },
+ { REG_TIMING_HS_L, 0x00 },
+ { REG_TIMING_VS_H, 0x00 },
+ { REG_TIMING_VS_L, 0x00 },
+ { REG_TIMING_HW_H, 0x06 },
+ { REG_TIMING_HW_L, 0x5f },
+ { REG_TIMING_VH_H, 0x04 },
+ { REG_TIMING_VH_L, 0xb7 },
+ { REG_TIMING_DVPHO_H, 0x03 },
+ { REG_TIMING_DVPHO_L, 0x20 },
+ { REG_TIMING_DVPVO_H, 0x02 },
+ { REG_TIMING_DVPVO_L, 0x58 },
+ { REG_TIMING_HTS_H, 0x05 },
+ { REG_TIMING_HTS_L, 0x14 },
+ { REG_TIMING_VTS_H, 0x02 },
+ { REG_TIMING_VTS_L, 0x68 },
+ { REG_TIMING_HOFFS_L, 0x08 },
+ { REG_TIMING_VOFFS_L, 0x02 },
+ { REG_TIMING_XINC, 0x31 },
+ { REG_TIMING_YINC, 0x31 },
+ { 0x3a02, 0x02 },
+ { 0x3a03, 0x68 },
+ { 0x3a08, 0x00 },
+ { 0x3a09, 0x5c },
+ { 0x3a0a, 0x00 },
+ { 0x3a0b, 0x4d },
+ { 0x3a0d, 0x08 },
+ { 0x3a0e, 0x06 },
+ { 0x3a14, 0x02 },
+ { 0x3a15, 0x28 },
+ { 0x3623, 0x00 },
+ { 0x3634, 0x76 },
+ { 0x3701, 0x44 },
+ { 0x3702, 0x18 },
+ { 0x3703, 0x24 },
+ { 0x3704, 0x24 },
+ { 0x3705, 0x0c },
+ { REG_TIMING_VERT_FORMAT, 0x81 },
+ { REG_TIMING_HORIZ_FORMAT, 0x01 },
+ { 0x370a, 0x52 },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
+ { REG_NULL, 0x00 },
+};
+
+/* 640X480 VGA */
+static struct sensor_register ov2659_vga[] = {
+ { REG_TIMING_HS_H, 0x00 },
+ { REG_TIMING_HS_L, 0x00 },
+ { REG_TIMING_VS_H, 0x00 },
+ { REG_TIMING_VS_L, 0x00 },
+ { REG_TIMING_HW_H, 0x06 },
+ { REG_TIMING_HW_L, 0x5f },
+ { REG_TIMING_VH_H, 0x04 },
+ { REG_TIMING_VH_L, 0xb7 },
+ { REG_TIMING_DVPHO_H, 0x02 },
+ { REG_TIMING_DVPHO_L, 0x80 },
+ { REG_TIMING_DVPVO_H, 0x01 },
+ { REG_TIMING_DVPVO_L, 0xe0 },
+ { REG_TIMING_HTS_H, 0x05 },
+ { REG_TIMING_HTS_L, 0x14 },
+ { REG_TIMING_VTS_H, 0x02 },
+ { REG_TIMING_VTS_L, 0x68 },
+ { REG_TIMING_HOFFS_L, 0x08 },
+ { REG_TIMING_VOFFS_L, 0x02 },
+ { REG_TIMING_XINC, 0x31 },
+ { REG_TIMING_YINC, 0x31 },
+ { 0x3a02, 0x02 },
+ { 0x3a03, 0x68 },
+ { 0x3a08, 0x00 },
+ { 0x3a09, 0x5c },
+ { 0x3a0a, 0x00 },
+ { 0x3a0b, 0x4d },
+ { 0x3a0d, 0x08 },
+ { 0x3a0e, 0x06 },
+ { 0x3a14, 0x02 },
+ { 0x3a15, 0x28 },
+ { 0x3623, 0x00 },
+ { 0x3634, 0x76 },
+ { 0x3701, 0x44 },
+ { 0x3702, 0x18 },
+ { 0x3703, 0x24 },
+ { 0x3704, 0x24 },
+ { 0x3705, 0x0c },
+ { REG_TIMING_VERT_FORMAT, 0x81 },
+ { REG_TIMING_HORIZ_FORMAT, 0x01 },
+ { 0x370a, 0x52 },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x10 },
+ { REG_NULL, 0x00 },
+};
+
+/* 320X240 QVGA */
+static struct sensor_register ov2659_qvga[] = {
+ { REG_TIMING_HS_H, 0x00 },
+ { REG_TIMING_HS_L, 0x00 },
+ { REG_TIMING_VS_H, 0x00 },
+ { REG_TIMING_VS_L, 0x00 },
+ { REG_TIMING_HW_H, 0x06 },
+ { REG_TIMING_HW_L, 0x5f },
+ { REG_TIMING_VH_H, 0x04 },
+ { REG_TIMING_VH_L, 0xb7 },
+ { REG_TIMING_DVPHO_H, 0x01 },
+ { REG_TIMING_DVPHO_L, 0x40 },
+ { REG_TIMING_DVPVO_H, 0x00 },
+ { REG_TIMING_DVPVO_L, 0xf0 },
+ { REG_TIMING_HTS_H, 0x05 },
+ { REG_TIMING_HTS_L, 0x14 },
+ { REG_TIMING_VTS_H, 0x02 },
+ { REG_TIMING_VTS_L, 0x68 },
+ { REG_TIMING_HOFFS_L, 0x08 },
+ { REG_TIMING_VOFFS_L, 0x02 },
+ { REG_TIMING_XINC, 0x31 },
+ { REG_TIMING_YINC, 0x31 },
+ { 0x3a02, 0x02 },
+ { 0x3a03, 0x68 },
+ { 0x3a08, 0x00 },
+ { 0x3a09, 0x5c },
+ { 0x3a0a, 0x00 },
+ { 0x3a0b, 0x4d },
+ { 0x3a0d, 0x08 },
+ { 0x3a0e, 0x06 },
+ { 0x3a14, 0x02 },
+ { 0x3a15, 0x28 },
+ { 0x3623, 0x00 },
+ { 0x3634, 0x76 },
+ { 0x3701, 0x44 },
+ { 0x3702, 0x18 },
+ { 0x3703, 0x24 },
+ { 0x3704, 0x24 },
+ { 0x3705, 0x0c },
+ { REG_TIMING_VERT_FORMAT, 0x81 },
+ { REG_TIMING_HORIZ_FORMAT, 0x01 },
+ { 0x370a, 0x52 },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x10 },
+ { REG_NULL, 0x00 },
+};
+
+static const struct pll_ctrl_reg ctrl3[] = {
+ { 1, 0x00 },
+ { 2, 0x02 },
+ { 3, 0x03 },
+ { 4, 0x06 },
+ { 6, 0x0d },
+ { 8, 0x0e },
+ { 12, 0x0f },
+ { 16, 0x12 },
+ { 24, 0x13 },
+ { 32, 0x16 },
+ { 48, 0x1b },
+ { 64, 0x1e },
+ { 96, 0x1f },
+ { 0, 0x00 },
+};
+
+static const struct pll_ctrl_reg ctrl1[] = {
+ { 2, 0x10 },
+ { 4, 0x20 },
+ { 6, 0x30 },
+ { 8, 0x40 },
+ { 10, 0x50 },
+ { 12, 0x60 },
+ { 14, 0x70 },
+ { 16, 0x80 },
+ { 18, 0x90 },
+ { 20, 0xa0 },
+ { 22, 0xb0 },
+ { 24, 0xc0 },
+ { 26, 0xd0 },
+ { 28, 0xe0 },
+ { 30, 0xf0 },
+ { 0, 0x00 },
+};
+
+static const struct ov2659_framesize ov2659_framesizes[] = {
+ { /* QVGA */
+ .width = 320,
+ .height = 240,
+ .regs = ov2659_qvga,
+ .max_exp_lines = 248,
+ }, { /* VGA */
+ .width = 640,
+ .height = 480,
+ .regs = ov2659_vga,
+ .max_exp_lines = 498,
+ }, { /* SVGA */
+ .width = 800,
+ .height = 600,
+ .regs = ov2659_svga,
+ .max_exp_lines = 498,
+ }, { /* XGA */
+ .width = 1024,
+ .height = 768,
+ .regs = ov2659_xga,
+ .max_exp_lines = 498,
+ }, { /* 720P */
+ .width = 1280,
+ .height = 720,
+ .regs = ov2659_720p,
+ .max_exp_lines = 498,
+ }, { /* SXGA */
+ .width = 1280,
+ .height = 1024,
+ .regs = ov2659_sxga,
+ .max_exp_lines = 1048,
+ }, { /* UXGA */
+ .width = 1600,
+ .height = 1200,
+ .regs = ov2659_uxga,
+ .max_exp_lines = 498,
+ },
+};
+
+/* YUV422 YUYV*/
+static struct sensor_register ov2659_format_yuyv[] = {
+ { REG_FORMAT_CTRL00, 0x30 },
+ { REG_NULL, 0x0 },
+};
+
+/* YUV422 UYVY */
+static struct sensor_register ov2659_format_uyvy[] = {
+ { REG_FORMAT_CTRL00, 0x32 },
+ { REG_NULL, 0x0 },
+};
+
+/* Raw Bayer BGGR */
+static struct sensor_register ov2659_format_bggr[] = {
+ { REG_FORMAT_CTRL00, 0x00 },
+ { REG_NULL, 0x0 },
+};
+
+/* RGB565 */
+static struct sensor_register ov2659_format_rgb565[] = {
+ { REG_FORMAT_CTRL00, 0x60 },
+ { REG_NULL, 0x0 },
+};
+
+static const struct ov2659_pixfmt ov2659_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .format_ctrl_regs = ov2659_format_yuyv,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .format_ctrl_regs = ov2659_format_uyvy,
+ }, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .format_ctrl_regs = ov2659_format_rgb565,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .format_ctrl_regs = ov2659_format_bggr,
+ },
+};
+
+static inline struct ov2659 *to_ov2659(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov2659, sd);
+}
+
+/* sensor register write */
+static int ov2659_write(struct i2c_client *client, u16 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[3];
+ int ret;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xFF;
+ buf[2] = val;
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.buf = buf;
+ msg.len = sizeof(buf);
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret >= 0)
+ return 0;
+
+ dev_dbg(&client->dev,
+ "ov2659 write reg(0x%x val:0x%x) failed !\n", reg, val);
+
+ return ret;
+}
+
+/* sensor register read */
+static int ov2659_read(struct i2c_client *client, u16 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2];
+ int ret;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xFF;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags;
+ msg[0].buf = buf;
+ msg[0].len = sizeof(buf);
+
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags | I2C_M_RD;
+ msg[1].buf = buf;
+ msg[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret >= 0) {
+ *val = buf[0];
+ return 0;
+ }
+
+ dev_dbg(&client->dev,
+ "ov2659 read reg(0x%x val:0x%x) failed !\n", reg, *val);
+
+ return ret;
+}
+
+static int ov2659_write_array(struct i2c_client *client,
+ const struct sensor_register *regs)
+{
+ int i, ret = 0;
+
+ for (i = 0; ret == 0 && regs[i].addr; i++)
+ ret = ov2659_write(client, regs[i].addr, regs[i].value);
+
+ return ret;
+}
+
+static void ov2659_pll_calc_params(struct ov2659 *ov2659)
+{
+ const struct ov2659_platform_data *pdata = ov2659->pdata;
+ u8 ctrl1_reg = 0, ctrl2_reg = 0, ctrl3_reg = 0;
+ struct i2c_client *client = ov2659->client;
+ unsigned int desired = pdata->link_frequency;
+ u32 s_prediv = 1, s_postdiv = 1, s_mult = 1;
+ u32 prediv, postdiv, mult;
+ u32 bestdelta = -1;
+ u32 delta, actual;
+ int i, j;
+
+ for (i = 0; ctrl1[i].div != 0; i++) {
+ postdiv = ctrl1[i].div;
+ for (j = 0; ctrl3[j].div != 0; j++) {
+ prediv = ctrl3[j].div;
+ for (mult = 1; mult <= 63; mult++) {
+ actual = ov2659->xvclk_frequency;
+ actual *= mult;
+ actual /= prediv;
+ actual /= postdiv;
+ delta = actual - desired;
+ delta = abs(delta);
+
+ if ((delta < bestdelta) || (bestdelta == -1)) {
+ bestdelta = delta;
+ s_mult = mult;
+ s_prediv = prediv;
+ s_postdiv = postdiv;
+ ctrl1_reg = ctrl1[i].reg;
+ ctrl2_reg = mult;
+ ctrl3_reg = ctrl3[j].reg;
+ }
+ }
+ }
+ }
+
+ ov2659->pll.ctrl1 = ctrl1_reg;
+ ov2659->pll.ctrl2 = ctrl2_reg;
+ ov2659->pll.ctrl3 = ctrl3_reg;
+
+ dev_dbg(&client->dev,
+ "Actual reg config: ctrl1_reg: %02x ctrl2_reg: %02x ctrl3_reg: %02x\n",
+ ctrl1_reg, ctrl2_reg, ctrl3_reg);
+}
+
+static int ov2659_set_pixel_clock(struct ov2659 *ov2659)
+{
+ struct i2c_client *client = ov2659->client;
+ struct sensor_register pll_regs[] = {
+ {REG_SC_PLL_CTRL1, ov2659->pll.ctrl1},
+ {REG_SC_PLL_CTRL2, ov2659->pll.ctrl2},
+ {REG_SC_PLL_CTRL3, ov2659->pll.ctrl3},
+ {REG_NULL, 0x00},
+ };
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ return ov2659_write_array(client, pll_regs);
+};
+
+static void ov2659_get_default_format(struct v4l2_mbus_framefmt *format)
+{
+ format->width = ov2659_framesizes[2].width;
+ format->height = ov2659_framesizes[2].height;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+ format->code = ov2659_formats[0].code;
+ format->field = V4L2_FIELD_NONE;
+}
+
+static void ov2659_set_streaming(struct ov2659 *ov2659, int on)
+{
+ struct i2c_client *client = ov2659->client;
+ int ret;
+
+ on = !!on;
+
+ dev_dbg(&client->dev, "%s: on: %d\n", __func__, on);
+
+ ret = ov2659_write(client, REG_SOFTWARE_STANDBY, on);
+ if (ret)
+ dev_err(&client->dev, "ov2659 soft standby failed\n");
+}
+
+static int ov2659_init(struct v4l2_subdev *sd, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return ov2659_write_array(client, ov2659_init_regs);
+}
+
+/*
+ * V4L2 subdev video and pad level operations
+ */
+
+static int ov2659_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ if (code->index >= ARRAY_SIZE(ov2659_formats))
+ return -EINVAL;
+
+ code->code = ov2659_formats[code->index].code;
+
+ return 0;
+}
+
+static int ov2659_enum_frame_sizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int i = ARRAY_SIZE(ov2659_formats);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ if (fse->index >= ARRAY_SIZE(ov2659_framesizes))
+ return -EINVAL;
+
+ while (--i)
+ if (fse->code == ov2659_formats[i].code)
+ break;
+
+ fse->code = ov2659_formats[i].code;
+
+ fse->min_width = ov2659_framesizes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = ov2659_framesizes[fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int ov2659_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ dev_dbg(&client->dev, "ov2659_get_fmt\n");
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mutex_lock(&ov2659->lock);
+ fmt->format = *mf;
+ mutex_unlock(&ov2659->lock);
+ return 0;
+ }
+
+ mutex_lock(&ov2659->lock);
+ fmt->format = ov2659->format;
+ mutex_unlock(&ov2659->lock);
+
+ dev_dbg(&client->dev, "ov2659_get_fmt: %x %dx%d\n",
+ ov2659->format.code, ov2659->format.width,
+ ov2659->format.height);
+
+ return 0;
+}
+
+static void __ov2659_try_frame_size(struct v4l2_mbus_framefmt *mf,
+ const struct ov2659_framesize **size)
+{
+ const struct ov2659_framesize *fsize = &ov2659_framesizes[0];
+ const struct ov2659_framesize *match = NULL;
+ int i = ARRAY_SIZE(ov2659_framesizes);
+ unsigned int min_err = UINT_MAX;
+
+ while (i--) {
+ int err = abs(fsize->width - mf->width)
+ + abs(fsize->height - mf->height);
+ if ((err < min_err) && (fsize->regs[0].addr)) {
+ min_err = err;
+ match = fsize;
+ }
+ fsize++;
+ }
+
+ if (!match)
+ match = &ov2659_framesizes[2];
+
+ mf->width = match->width;
+ mf->height = match->height;
+
+ if (size)
+ *size = match;
+}
+
+static int ov2659_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ unsigned int index = ARRAY_SIZE(ov2659_formats);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ const struct ov2659_framesize *size = NULL;
+ struct ov2659 *ov2659 = to_ov2659(sd);
+ int ret = 0;
+
+ dev_dbg(&client->dev, "ov2659_set_fmt\n");
+
+ __ov2659_try_frame_size(mf, &size);
+
+ while (--index >= 0)
+ if (ov2659_formats[index].code == mf->code)
+ break;
+
+ if (index < 0)
+ return -EINVAL;
+
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->code = ov2659_formats[index].code;
+ mf->field = V4L2_FIELD_NONE;
+
+ mutex_lock(&ov2659->lock);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *mf = fmt->format;
+ } else {
+ s64 val;
+
+ if (ov2659->streaming) {
+ mutex_unlock(&ov2659->lock);
+ return -EBUSY;
+ }
+
+ ov2659->frame_size = size;
+ ov2659->format = fmt->format;
+ ov2659->format_ctrl_regs =
+ ov2659_formats[index].format_ctrl_regs;
+
+ if (ov2659->format.code != MEDIA_BUS_FMT_SBGGR8_1X8)
+ val = ov2659->pdata->link_frequency / 2;
+ else
+ val = ov2659->pdata->link_frequency;
+
+ ret = v4l2_ctrl_s_ctrl_int64(ov2659->link_frequency, val);
+ if (ret < 0)
+ dev_warn(&client->dev,
+ "failed to set link_frequency rate (%d)\n",
+ ret);
+ }
+
+ mutex_unlock(&ov2659->lock);
+ return ret;
+}
+
+static int ov2659_set_frame_size(struct ov2659 *ov2659)
+{
+ struct i2c_client *client = ov2659->client;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ return ov2659_write_array(ov2659->client, ov2659->frame_size->regs);
+}
+
+static int ov2659_set_format(struct ov2659 *ov2659)
+{
+ struct i2c_client *client = ov2659->client;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ return ov2659_write_array(ov2659->client, ov2659->format_ctrl_regs);
+}
+
+static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+ int ret = 0;
+
+ dev_dbg(&client->dev, "%s: on: %d\n", __func__, on);
+
+ mutex_lock(&ov2659->lock);
+
+ on = !!on;
+
+ if (ov2659->streaming == on)
+ goto unlock;
+
+ if (!on) {
+ /* Stop Streaming Sequence */
+ ov2659_set_streaming(ov2659, 0);
+ ov2659->streaming = on;
+ goto unlock;
+ }
+
+ ov2659_set_pixel_clock(ov2659);
+ ov2659_set_frame_size(ov2659);
+ ov2659_set_format(ov2659);
+ ov2659_set_streaming(ov2659, 1);
+ ov2659->streaming = on;
+
+unlock:
+ mutex_unlock(&ov2659->lock);
+ return ret;
+}
+
+static int ov2659_set_test_pattern(struct ov2659 *ov2659, int value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2659->sd);
+ int ret;
+ u8 val;
+
+ ret = ov2659_read(client, REG_PRE_ISP_CTRL00, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (value) {
+ case 0:
+ val &= ~TEST_PATTERN_ENABLE;
+ break;
+ case 1:
+ val &= VERTICAL_COLOR_BAR_MASK;
+ val |= TEST_PATTERN_ENABLE;
+ break;
+ }
+
+ return ov2659_write(client, REG_PRE_ISP_CTRL00, val);
+}
+
+static int ov2659_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2659 *ov2659 =
+ container_of(ctrl->handler, struct ov2659, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ return ov2659_set_test_pattern(ov2659, ctrl->val);
+ }
+
+ return 0;
+}
+
+static struct v4l2_ctrl_ops ov2659_ctrl_ops = {
+ .s_ctrl = ov2659_s_ctrl,
+};
+
+static const char * const ov2659_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color Bars",
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev internal operations
+ */
+
+static int ov2659_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ ov2659_get_default_format(format);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops ov2659_subdev_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops ov2659_subdev_video_ops = {
+ .s_stream = ov2659_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov2659_subdev_pad_ops = {
+ .enum_mbus_code = ov2659_enum_mbus_code,
+ .enum_frame_size = ov2659_enum_frame_sizes,
+ .get_fmt = ov2659_get_fmt,
+ .set_fmt = ov2659_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ov2659_subdev_ops = {
+ .core = &ov2659_subdev_core_ops,
+ .video = &ov2659_subdev_video_ops,
+ .pad = &ov2659_subdev_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ov2659_subdev_internal_ops = {
+ .open = ov2659_open,
+};
+
+static int ov2659_detect(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 pid, ver;
+ int ret;
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ ret = ov2659_write(client, REG_SOFTWARE_RESET, 0x01);
+ if (ret != 0) {
+ dev_err(&client->dev, "Sensor soft reset failed\n");
+ return -ENODEV;
+ }
+ usleep_range(1000, 2000);
+
+ ret = ov2659_init(sd, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Check sensor revision */
+ ret = ov2659_read(client, REG_SC_CHIP_ID_H, &pid);
+ if (!ret)
+ ret = ov2659_read(client, REG_SC_CHIP_ID_L, &ver);
+
+ if (!ret) {
+ unsigned short id;
+
+ id = OV265X_ID(pid, ver);
+ if (id != OV2659_ID)
+ dev_err(&client->dev,
+ "Sensor detection failed (%04X, %d)\n",
+ id, ret);
+ else
+ dev_info(&client->dev, "Found OV%04X sensor\n", id);
+ }
+
+ return ret;
+}
+
+static struct ov2659_platform_data *
+ov2659_get_pdata(struct i2c_client *client)
+{
+ struct ov2659_platform_data *pdata;
+ struct device_node *endpoint;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
+ return client->dev.platform_data;
+
+ endpoint = of_graph_get_next_endpoint(client->dev.of_node, NULL);
+ if (!endpoint)
+ return NULL;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto done;
+
+ ret = of_property_read_u64(endpoint, "link-frequencies",
+ &pdata->link_frequency);
+ if (ret) {
+ dev_err(&client->dev, "link-frequencies property not found\n");
+ pdata = NULL;
+ }
+
+done:
+ of_node_put(endpoint);
+ return pdata;
+}
+
+static int ov2659_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct ov2659_platform_data *pdata = ov2659_get_pdata(client);
+ struct v4l2_subdev *sd;
+ struct ov2659 *ov2659;
+ struct clk *clk;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&client->dev, "platform data not specified\n");
+ return -EINVAL;
+ }
+
+ ov2659 = devm_kzalloc(&client->dev, sizeof(*ov2659), GFP_KERNEL);
+ if (!ov2659)
+ return -ENOMEM;
+
+ ov2659->pdata = pdata;
+ ov2659->client = client;
+
+ clk = devm_clk_get(&client->dev, "xvclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ov2659->xvclk_frequency = clk_get_rate(clk);
+ if (ov2659->xvclk_frequency < 6000000 ||
+ ov2659->xvclk_frequency > 27000000)
+ return -EINVAL;
+
+ v4l2_ctrl_handler_init(&ov2659->ctrls, 2);
+ ov2659->link_frequency =
+ v4l2_ctrl_new_std(&ov2659->ctrls, &ov2659_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ pdata->link_frequency / 2,
+ pdata->link_frequency, 1,
+ pdata->link_frequency);
+ v4l2_ctrl_new_std_menu_items(&ov2659->ctrls, &ov2659_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov2659_test_pattern_menu) - 1,
+ 0, 0, ov2659_test_pattern_menu);
+ ov2659->sd.ctrl_handler = &ov2659->ctrls;
+
+ if (ov2659->ctrls.error) {
+ dev_err(&client->dev, "%s: control initialization error %d\n",
+ __func__, ov2659->ctrls.error);
+ return ov2659->ctrls.error;
+ }
+
+ sd = &ov2659->sd;
+ client->flags |= I2C_CLIENT_SCCB;
+ v4l2_i2c_subdev_init(sd, client, &ov2659_subdev_ops);
+
+ sd->internal_ops = &ov2659_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ ov2659->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &ov2659->pad, 0);
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(&ov2659->ctrls);
+ return ret;
+ }
+#endif
+
+ mutex_init(&ov2659->lock);
+
+ ov2659_get_default_format(&ov2659->format);
+ ov2659->frame_size = &ov2659_framesizes[2];
+ ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs;
+
+ ret = ov2659_detect(sd);
+ if (ret < 0)
+ goto error;
+
+ /* Calculate the PLL register value needed */
+ ov2659_pll_calc_params(ov2659);
+
+ ret = v4l2_async_register_subdev(&ov2659->sd);
+ if (ret)
+ goto error;
+
+ dev_info(&client->dev, "%s sensor driver registered !!\n", sd->name);
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&ov2659->ctrls);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&sd->entity);
+#endif
+ mutex_destroy(&ov2659->lock);
+ return ret;
+}
+
+static int ov2659_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+
+ v4l2_ctrl_handler_free(&ov2659->ctrls);
+ v4l2_async_unregister_subdev(sd);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&sd->entity);
+#endif
+ mutex_destroy(&ov2659->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov2659_id[] = {
+ { "ov2659", 0 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(i2c, ov2659_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id ov2659_of_match[] = {
+ { .compatible = "ovti,ov2659", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ov2659_of_match);
+#endif
+
+static struct i2c_driver ov2659_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(ov2659_of_match),
+ },
+ .probe = ov2659_probe,
+ .remove = ov2659_remove,
+ .id_table = ov2659_id,
+};
+
+module_i2c_driver(ov2659_i2c_driver);
+
+MODULE_AUTHOR("Benoit Parrot <bparrot@ti.com>");
+MODULE_DESCRIPTION("OV2659 CMOS Image Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 957927f7a353..b9847527eb5a 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1069,29 +1069,35 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
static int ov7670_frame_rates[] = { 30, 15, 10, 5, 1 };
-static int ov7670_enum_frameintervals(struct v4l2_subdev *sd,
- struct v4l2_frmivalenum *interval)
+static int ov7670_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
{
- if (interval->index >= ARRAY_SIZE(ov7670_frame_rates))
+ if (fie->pad)
return -EINVAL;
- interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
- interval->discrete.numerator = 1;
- interval->discrete.denominator = ov7670_frame_rates[interval->index];
+ if (fie->index >= ARRAY_SIZE(ov7670_frame_rates))
+ return -EINVAL;
+ fie->interval.numerator = 1;
+ fie->interval.denominator = ov7670_frame_rates[fie->index];
return 0;
}
/*
* Frame size enumeration
*/
-static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
- struct v4l2_frmsizeenum *fsize)
+static int ov7670_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
{
struct ov7670_info *info = to_state(sd);
int i;
int num_valid = -1;
- __u32 index = fsize->index;
+ __u32 index = fse->index;
unsigned int n_win_sizes = info->devtype->n_win_sizes;
+ if (fse->pad)
+ return -EINVAL;
+
/*
* If a minimum width/height was requested, filter out the capture
* windows that fall outside that.
@@ -1103,9 +1109,8 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
if (info->min_height && win->height < info->min_height)
continue;
if (index == ++num_valid) {
- fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- fsize->discrete.width = win->width;
- fsize->discrete.height = win->height;
+ fse->min_width = fse->max_width = win->width;
+ fse->min_height = fse->max_height = win->height;
return 0;
}
}
@@ -1485,13 +1490,17 @@ static const struct v4l2_subdev_video_ops ov7670_video_ops = {
.s_mbus_fmt = ov7670_s_mbus_fmt,
.s_parm = ov7670_s_parm,
.g_parm = ov7670_g_parm,
- .enum_frameintervals = ov7670_enum_frameintervals,
- .enum_framesizes = ov7670_enum_framesizes,
+};
+
+static const struct v4l2_subdev_pad_ops ov7670_pad_ops = {
+ .enum_frame_interval = ov7670_enum_frame_interval,
+ .enum_frame_size = ov7670_enum_frame_size,
};
static const struct v4l2_subdev_ops ov7670_ops = {
.core = &ov7670_core_ops,
.video = &ov7670_video_ops,
+ .pad = &ov7670_pad_ops,
};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 2246bd5436ad..2bc473385c91 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1067,7 +1067,7 @@ static void ov965x_get_default_format(struct v4l2_mbus_framefmt *mf)
}
static int ov965x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(ov965x_formats))
@@ -1078,7 +1078,7 @@ static int ov965x_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov965x_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
int i = ARRAY_SIZE(ov965x_formats);
@@ -1164,14 +1164,14 @@ static int ov965x_s_frame_interval(struct v4l2_subdev *sd,
return ret;
}
-static int ov965x_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ov965x_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct ov965x *ov965x = to_ov965x(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, 0);
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
fmt->format = *mf;
return 0;
}
@@ -1208,7 +1208,7 @@ static void __ov965x_try_frame_size(struct v4l2_mbus_framefmt *mf,
*size = match;
}
-static int ov965x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ov965x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
unsigned int index = ARRAY_SIZE(ov965x_formats);
@@ -1230,8 +1230,8 @@ static int ov965x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
mutex_lock(&ov965x->lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (fh != NULL) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ if (cfg != NULL) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
}
} else {
@@ -1361,7 +1361,7 @@ static int ov965x_s_stream(struct v4l2_subdev *sd, int on)
*/
static int ov965x_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(fh, 0);
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd, fh->pad, 0);
ov965x_get_default_format(mf);
return 0;
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index ee0f57e01b56..08b234bd2962 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -824,10 +824,11 @@ static const struct s5c73m3_frame_size *s5c73m3_find_frame_size(
}
static void s5c73m3_oif_try_format(struct s5c73m3 *state,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt,
const struct s5c73m3_frame_size **fs)
{
+ struct v4l2_subdev *sd = &state->sensor_sd;
u32 code;
switch (fmt->pad) {
@@ -850,7 +851,7 @@ static void s5c73m3_oif_try_format(struct s5c73m3 *state,
*fs = state->oif_pix_size[RES_ISP];
else
*fs = s5c73m3_find_frame_size(
- v4l2_subdev_get_try_format(fh,
+ v4l2_subdev_get_try_format(sd, cfg,
OIF_ISP_PAD),
RES_ISP);
break;
@@ -860,7 +861,7 @@ static void s5c73m3_oif_try_format(struct s5c73m3 *state,
}
static void s5c73m3_try_format(struct s5c73m3 *state,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt,
const struct s5c73m3_frame_size **fs)
{
@@ -952,7 +953,7 @@ static int s5c73m3_oif_s_frame_interval(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
@@ -990,7 +991,7 @@ static int s5c73m3_oif_get_pad_code(int pad, int index)
}
static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
@@ -998,7 +999,7 @@ static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
u32 code;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
return 0;
}
@@ -1024,7 +1025,7 @@ static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
@@ -1032,7 +1033,7 @@ static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
u32 code;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
return 0;
}
@@ -1062,7 +1063,7 @@ static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
}
static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
const struct s5c73m3_frame_size *frame_size = NULL;
@@ -1072,10 +1073,10 @@ static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&state->lock);
- s5c73m3_try_format(state, fh, fmt, &frame_size);
+ s5c73m3_try_format(state, cfg, fmt, &frame_size);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
} else {
switch (fmt->pad) {
@@ -1101,7 +1102,7 @@ static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
const struct s5c73m3_frame_size *frame_size = NULL;
@@ -1111,13 +1112,13 @@ static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&state->lock);
- s5c73m3_oif_try_format(state, fh, fmt, &frame_size);
+ s5c73m3_oif_try_format(state, cfg, fmt, &frame_size);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
if (fmt->pad == OIF_ISP_PAD) {
- mf = v4l2_subdev_get_try_format(fh, OIF_SOURCE_PAD);
+ mf = v4l2_subdev_get_try_format(sd, cfg, OIF_SOURCE_PAD);
mf->width = fmt->format.width;
mf->height = fmt->format.height;
}
@@ -1189,7 +1190,7 @@ static int s5c73m3_oif_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
}
static int s5c73m3_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
static const int codes[] = {
@@ -1205,7 +1206,7 @@ static int s5c73m3_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
int ret;
@@ -1220,7 +1221,7 @@ static int s5c73m3_oif_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5c73m3_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
int idx;
@@ -1247,9 +1248,10 @@ static int s5c73m3_enum_frame_size(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
int idx;
if (fse->pad == OIF_SOURCE_PAD) {
@@ -1259,11 +1261,25 @@ static int s5c73m3_oif_enum_frame_size(struct v4l2_subdev *sd,
switch (fse->code) {
case S5C73M3_JPEG_FMT:
case S5C73M3_ISP_FMT: {
- struct v4l2_mbus_framefmt *mf =
- v4l2_subdev_get_try_format(fh, OIF_ISP_PAD);
+ unsigned w, h;
+
+ if (fse->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(sd, cfg,
+ OIF_ISP_PAD);
+
+ w = mf->width;
+ h = mf->height;
+ } else {
+ const struct s5c73m3_frame_size *fs;
- fse->max_width = fse->min_width = mf->width;
- fse->max_height = fse->min_height = mf->height;
+ fs = state->oif_pix_size[RES_ISP];
+ w = fs->width;
+ h = fs->height;
+ }
+ fse->max_width = fse->min_width = w;
+ fse->max_height = fse->min_height = h;
return 0;
}
default:
@@ -1306,11 +1322,11 @@ static int s5c73m3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(fh, S5C73M3_ISP_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->pad, S5C73M3_ISP_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
- mf = v4l2_subdev_get_try_format(fh, S5C73M3_JPEG_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->pad, S5C73M3_JPEG_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
S5C73M3_JPEG_FMT);
@@ -1321,15 +1337,15 @@ static int s5c73m3_oif_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(fh, OIF_ISP_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->pad, OIF_ISP_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
- mf = v4l2_subdev_get_try_format(fh, OIF_JPEG_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->pad, OIF_JPEG_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
S5C73M3_JPEG_FMT);
- mf = v4l2_subdev_get_try_format(fh, OIF_SOURCE_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->pad, OIF_SOURCE_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
return 0;
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
index f60b265b4da1..63eb19093381 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
@@ -52,7 +52,7 @@ static int spi_xmit(struct spi_device *spi_dev, void *addr, const int len,
xfer.rx_buf = addr;
if (spi_dev == NULL) {
- dev_err(&spi_dev->dev, "SPI device is uninitialized\n");
+ pr_err("SPI device is uninitialized\n");
return -ENODEV;
}
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 70071314789e..97084237275d 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -531,7 +531,7 @@ static int s5k4ecgx_try_frame_size(struct v4l2_mbus_framefmt *mf,
}
static int s5k4ecgx_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5k4ecgx_formats))
@@ -541,15 +541,15 @@ static int s5k4ecgx_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int s5k4ecgx_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int s5k4ecgx_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct s5k4ecgx *priv = to_s5k4ecgx(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (fh) {
- mf = v4l2_subdev_get_try_format(fh, 0);
+ if (cfg) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
fmt->format = *mf;
}
return 0;
@@ -581,7 +581,7 @@ static const struct s5k4ecgx_pixfmt *s5k4ecgx_try_fmt(struct v4l2_subdev *sd,
return &s5k4ecgx_formats[i];
}
-static int s5k4ecgx_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int s5k4ecgx_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct s5k4ecgx *priv = to_s5k4ecgx(sd);
@@ -596,8 +596,8 @@ static int s5k4ecgx_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (fh) {
- mf = v4l2_subdev_get_try_format(fh, 0);
+ if (cfg) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
*mf = fmt->format;
}
return 0;
@@ -692,7 +692,7 @@ static int s5k4ecgx_registered(struct v4l2_subdev *sd)
*/
static int s5k4ecgx_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(fh, 0);
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd, fh->pad, 0);
mf->width = s5k4ecgx_prev_sizes[0].size.width;
mf->height = s5k4ecgx_prev_sizes[0].size.height;
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index a3d7d0391302..297ef04e146a 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -374,6 +374,8 @@ static int s5k5baf_fw_parse(struct device *dev, struct s5k5baf_fw **fw,
count -= S5K5BAG_FW_TAG_LEN;
d = devm_kzalloc(dev, count * sizeof(u16), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
for (i = 0; i < count; ++i)
d[i] = le16_to_cpu(data[i]);
@@ -1182,7 +1184,7 @@ static int s5k5baf_s_frame_interval(struct v4l2_subdev *sd,
* V4L2 subdev pad level and video operations
*/
static int s5k5baf_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (fie->index > S5K5BAF_MAX_FR_TIME - S5K5BAF_MIN_FR_TIME ||
@@ -1201,7 +1203,7 @@ static int s5k5baf_enum_frame_interval(struct v4l2_subdev *sd,
}
static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad == PAD_CIS) {
@@ -1219,7 +1221,7 @@ static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5k5baf_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
int i;
@@ -1276,7 +1278,7 @@ static int s5k5baf_try_isp_format(struct v4l2_mbus_framefmt *mf)
return pixfmt;
}
-static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct s5k5baf *state = to_s5k5baf(sd);
@@ -1284,7 +1286,7 @@ static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1306,7 +1308,7 @@ static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return 0;
}
-static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *mf = &fmt->format;
@@ -1317,7 +1319,7 @@ static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
mf->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(fh, fmt->pad) = *mf;
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = *mf;
return 0;
}
@@ -1369,7 +1371,7 @@ static int s5k5baf_is_bound_target(u32 target)
}
static int s5k5baf_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
static enum selection_rect rtype;
@@ -1389,9 +1391,9 @@ static int s5k5baf_get_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
if (rtype == R_COMPOSE)
- sel->r = *v4l2_subdev_get_try_compose(fh, sel->pad);
+ sel->r = *v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
else
- sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
return 0;
}
@@ -1460,7 +1462,7 @@ static bool s5k5baf_cmp_rect(const struct v4l2_rect *r1,
}
static int s5k5baf_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
static enum selection_rect rtype;
@@ -1481,9 +1483,9 @@ static int s5k5baf_set_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
rects = (struct v4l2_rect * []) {
&s5k5baf_cis_rect,
- v4l2_subdev_get_try_crop(fh, PAD_CIS),
- v4l2_subdev_get_try_compose(fh, PAD_CIS),
- v4l2_subdev_get_try_crop(fh, PAD_OUT)
+ v4l2_subdev_get_try_crop(sd, cfg, PAD_CIS),
+ v4l2_subdev_get_try_compose(sd, cfg, PAD_CIS),
+ v4l2_subdev_get_try_crop(sd, cfg, PAD_OUT)
};
s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
return 0;
@@ -1701,22 +1703,22 @@ static int s5k5baf_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(fh, PAD_CIS);
+ mf = v4l2_subdev_get_try_format(sd, fh->pad, PAD_CIS);
s5k5baf_try_cis_format(mf);
if (s5k5baf_is_cis_subdev(sd))
return 0;
- mf = v4l2_subdev_get_try_format(fh, PAD_OUT);
+ mf = v4l2_subdev_get_try_format(sd, fh->pad, PAD_OUT);
mf->colorspace = s5k5baf_formats[0].colorspace;
mf->code = s5k5baf_formats[0].code;
mf->width = s5k5baf_cis_rect.width;
mf->height = s5k5baf_cis_rect.height;
mf->field = V4L2_FIELD_NONE;
- *v4l2_subdev_get_try_crop(fh, PAD_CIS) = s5k5baf_cis_rect;
- *v4l2_subdev_get_try_compose(fh, PAD_CIS) = s5k5baf_cis_rect;
- *v4l2_subdev_get_try_crop(fh, PAD_OUT) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_crop(sd, fh->pad, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_compose(sd, fh->pad, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_crop(sd, fh->pad, PAD_OUT) = s5k5baf_cis_rect;
return 0;
}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index 91b841a1b850..bc389d5e42ae 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -99,7 +99,7 @@ static const struct v4l2_mbus_framefmt *find_sensor_format(
}
static int s5k6a3_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5k6a3_formats))
@@ -123,17 +123,17 @@ static void s5k6a3_try_format(struct v4l2_mbus_framefmt *mf)
}
static struct v4l2_mbus_framefmt *__s5k6a3_get_format(
- struct s5k6a3 *sensor, struct v4l2_subdev_fh *fh,
+ struct s5k6a3 *sensor, struct v4l2_subdev_pad_config *cfg,
u32 pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return fh ? v4l2_subdev_get_try_format(fh, pad) : NULL;
+ return cfg ? v4l2_subdev_get_try_format(&sensor->subdev, cfg, pad) : NULL;
return &sensor->format;
}
static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct s5k6a3 *sensor = sd_to_s5k6a3(sd);
@@ -141,7 +141,7 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
s5k6a3_try_format(&fmt->format);
- mf = __s5k6a3_get_format(sensor, fh, fmt->pad, fmt->which);
+ mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
if (mf) {
mutex_lock(&sensor->lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -152,13 +152,13 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
}
static int s5k6a3_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
{
struct s5k6a3 *sensor = sd_to_s5k6a3(sd);
struct v4l2_mbus_framefmt *mf;
- mf = __s5k6a3_get_format(sensor, fh, fmt->pad, fmt->which);
+ mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
mutex_lock(&sensor->lock);
fmt->format = *mf;
@@ -174,7 +174,7 @@ static struct v4l2_subdev_pad_ops s5k6a3_pad_ops = {
static int s5k6a3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
*format = s5k6a3_formats[0];
format->width = S5K6A3_DEFAULT_WIDTH;
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index b1c583239dab..de803a11efb4 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -996,7 +996,7 @@ static int s5k6aa_s_frame_interval(struct v4l2_subdev *sd,
* V4L2 subdev pad level and video operations
*/
static int s5k6aa_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1023,7 +1023,7 @@ static int s5k6aa_enum_frame_interval(struct v4l2_subdev *sd,
}
static int s5k6aa_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5k6aa_formats))
@@ -1034,7 +1034,7 @@ static int s5k6aa_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5k6aa_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
int i = ARRAY_SIZE(s5k6aa_formats);
@@ -1056,14 +1056,14 @@ static int s5k6aa_enum_frame_size(struct v4l2_subdev *sd,
}
static struct v4l2_rect *
-__s5k6aa_get_crop_rect(struct s5k6aa *s5k6aa, struct v4l2_subdev_fh *fh,
+__s5k6aa_get_crop_rect(struct s5k6aa *s5k6aa, struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
return &s5k6aa->ccd_rect;
WARN_ON(which != V4L2_SUBDEV_FORMAT_TRY);
- return v4l2_subdev_get_try_crop(fh, 0);
+ return v4l2_subdev_get_try_crop(&s5k6aa->sd, cfg, 0);
}
static void s5k6aa_try_format(struct s5k6aa *s5k6aa,
@@ -1087,7 +1087,7 @@ static void s5k6aa_try_format(struct s5k6aa *s5k6aa,
mf->field = V4L2_FIELD_NONE;
}
-static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1096,7 +1096,7 @@ static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
memset(fmt->reserved, 0, sizeof(fmt->reserved));
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, 0);
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
fmt->format = *mf;
return 0;
}
@@ -1108,7 +1108,7 @@ static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return 0;
}
-static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1121,8 +1121,8 @@ static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
s5k6aa_try_format(s5k6aa, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
- crop = v4l2_subdev_get_try_crop(fh, 0);
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ crop = v4l2_subdev_get_try_crop(sd, cfg, 0);
} else {
if (s5k6aa->streaming) {
ret = -EBUSY;
@@ -1162,7 +1162,7 @@ static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
}
static int s5k6aa_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1174,7 +1174,7 @@ static int s5k6aa_get_selection(struct v4l2_subdev *sd,
memset(sel->reserved, 0, sizeof(sel->reserved));
mutex_lock(&s5k6aa->lock);
- rect = __s5k6aa_get_crop_rect(s5k6aa, fh, sel->which);
+ rect = __s5k6aa_get_crop_rect(s5k6aa, cfg, sel->which);
sel->r = *rect;
mutex_unlock(&s5k6aa->lock);
@@ -1185,7 +1185,7 @@ static int s5k6aa_get_selection(struct v4l2_subdev *sd,
}
static int s5k6aa_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1197,13 +1197,13 @@ static int s5k6aa_set_selection(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&s5k6aa->lock);
- crop_r = __s5k6aa_get_crop_rect(s5k6aa, fh, sel->which);
+ crop_r = __s5k6aa_get_crop_rect(s5k6aa, cfg, sel->which);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
mf = &s5k6aa->preset->mbus_fmt;
s5k6aa->apply_crop = 1;
} else {
- mf = v4l2_subdev_get_try_format(fh, 0);
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
}
v4l_bound_align_image(&sel->r.width, mf->width,
S5K6AA_WIN_WIDTH_MAX, 1,
@@ -1424,8 +1424,8 @@ static int s5k6aa_initialize_ctrls(struct s5k6aa *s5k6aa)
*/
static int s5k6aa_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
- struct v4l2_rect *crop = v4l2_subdev_get_try_crop(fh, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, fh->pad, 0);
format->colorspace = s5k6aa_formats[0].colorspace;
format->code = s5k6aa_formats[0].code;
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index d47eff5d3101..557f25def3a0 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -344,7 +344,7 @@ static const struct smiapp_csi_data_format smiapp_csi_data_formats[] = {
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GBRG, },
};
-const char *pixel_order_str[] = { "GRBG", "RGGB", "BGGR", "GBRG" };
+static const char *pixel_order_str[] = { "GRBG", "RGGB", "BGGR", "GBRG" };
#define to_csi_format_idx(fmt) (((unsigned long)(fmt) \
- (unsigned long)smiapp_csi_data_formats) \
@@ -1557,7 +1557,7 @@ static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
}
static int smiapp_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
@@ -1611,13 +1611,13 @@ static u32 __smiapp_get_mbus_code(struct v4l2_subdev *subdev,
}
static int __smiapp_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
} else {
struct v4l2_rect *r;
@@ -1636,21 +1636,21 @@ static int __smiapp_get_format(struct v4l2_subdev *subdev,
}
static int smiapp_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
int rval;
mutex_lock(&sensor->mutex);
- rval = __smiapp_get_format(subdev, fh, fmt);
+ rval = __smiapp_get_format(subdev, cfg, fmt);
mutex_unlock(&sensor->mutex);
return rval;
}
static void smiapp_get_crop_compose(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_rect **crops,
struct v4l2_rect **comps, int which)
{
@@ -1666,12 +1666,12 @@ static void smiapp_get_crop_compose(struct v4l2_subdev *subdev,
} else {
if (crops) {
for (i = 0; i < subdev->entity.num_pads; i++) {
- crops[i] = v4l2_subdev_get_try_crop(fh, i);
+ crops[i] = v4l2_subdev_get_try_crop(subdev, cfg, i);
BUG_ON(!crops[i]);
}
}
if (comps) {
- *comps = v4l2_subdev_get_try_compose(fh,
+ *comps = v4l2_subdev_get_try_compose(subdev, cfg,
SMIAPP_PAD_SINK);
BUG_ON(!*comps);
}
@@ -1680,14 +1680,14 @@ static void smiapp_get_crop_compose(struct v4l2_subdev *subdev,
/* Changes require propagation only on sink pad. */
static void smiapp_propagate(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh, int which,
+ struct v4l2_subdev_pad_config *cfg, int which,
int target)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
struct v4l2_rect *comp, *crops[SMIAPP_PADS];
- smiapp_get_crop_compose(subdev, fh, crops, &comp, which);
+ smiapp_get_crop_compose(subdev, cfg, crops, &comp, which);
switch (target) {
case V4L2_SEL_TGT_CROP:
@@ -1730,7 +1730,7 @@ static const struct smiapp_csi_data_format
}
static int smiapp_set_format_source(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -1741,7 +1741,7 @@ static int smiapp_set_format_source(struct v4l2_subdev *subdev,
unsigned int i;
int rval;
- rval = __smiapp_get_format(subdev, fh, fmt);
+ rval = __smiapp_get_format(subdev, cfg, fmt);
if (rval)
return rval;
@@ -1783,7 +1783,7 @@ static int smiapp_set_format_source(struct v4l2_subdev *subdev,
}
static int smiapp_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -1795,7 +1795,7 @@ static int smiapp_set_format(struct v4l2_subdev *subdev,
if (fmt->pad == ssd->source_pad) {
int rval;
- rval = smiapp_set_format_source(subdev, fh, fmt);
+ rval = smiapp_set_format_source(subdev, cfg, fmt);
mutex_unlock(&sensor->mutex);
@@ -1817,7 +1817,7 @@ static int smiapp_set_format(struct v4l2_subdev *subdev,
sensor->limits[SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE],
sensor->limits[SMIAPP_LIMIT_MAX_Y_OUTPUT_SIZE]);
- smiapp_get_crop_compose(subdev, fh, crops, NULL, fmt->which);
+ smiapp_get_crop_compose(subdev, cfg, crops, NULL, fmt->which);
crops[ssd->sink_pad]->left = 0;
crops[ssd->sink_pad]->top = 0;
@@ -1825,7 +1825,7 @@ static int smiapp_set_format(struct v4l2_subdev *subdev,
crops[ssd->sink_pad]->height = fmt->format.height;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
ssd->sink_fmt = *crops[ssd->sink_pad];
- smiapp_propagate(subdev, fh, fmt->which,
+ smiapp_propagate(subdev, cfg, fmt->which,
V4L2_SEL_TGT_CROP);
mutex_unlock(&sensor->mutex);
@@ -1878,7 +1878,7 @@ static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w,
}
static void smiapp_set_compose_binner(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel,
struct v4l2_rect **crops,
struct v4l2_rect *comp)
@@ -1926,7 +1926,7 @@ static void smiapp_set_compose_binner(struct v4l2_subdev *subdev,
* result.
*/
static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel,
struct v4l2_rect **crops,
struct v4l2_rect *comp)
@@ -2042,25 +2042,25 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
}
/* We're only called on source pads. This function sets scaling. */
static int smiapp_set_compose(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
struct v4l2_rect *comp, *crops[SMIAPP_PADS];
- smiapp_get_crop_compose(subdev, fh, crops, &comp, sel->which);
+ smiapp_get_crop_compose(subdev, cfg, crops, &comp, sel->which);
sel->r.top = 0;
sel->r.left = 0;
if (ssd == sensor->binner)
- smiapp_set_compose_binner(subdev, fh, sel, crops, comp);
+ smiapp_set_compose_binner(subdev, cfg, sel, crops, comp);
else
- smiapp_set_compose_scaler(subdev, fh, sel, crops, comp);
+ smiapp_set_compose_scaler(subdev, cfg, sel, crops, comp);
*comp = sel->r;
- smiapp_propagate(subdev, fh, sel->which,
+ smiapp_propagate(subdev, cfg, sel->which,
V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -2113,7 +2113,7 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
}
static int smiapp_set_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -2121,7 +2121,7 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
struct v4l2_rect *src_size, *crops[SMIAPP_PADS];
struct v4l2_rect _r;
- smiapp_get_crop_compose(subdev, fh, crops, NULL, sel->which);
+ smiapp_get_crop_compose(subdev, cfg, crops, NULL, sel->which);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
if (sel->pad == ssd->sink_pad)
@@ -2132,15 +2132,15 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
if (sel->pad == ssd->sink_pad) {
_r.left = 0;
_r.top = 0;
- _r.width = v4l2_subdev_get_try_format(fh, sel->pad)
+ _r.width = v4l2_subdev_get_try_format(subdev, cfg, sel->pad)
->width;
- _r.height = v4l2_subdev_get_try_format(fh, sel->pad)
+ _r.height = v4l2_subdev_get_try_format(subdev, cfg, sel->pad)
->height;
src_size = &_r;
} else {
src_size =
v4l2_subdev_get_try_compose(
- fh, ssd->sink_pad);
+ subdev, cfg, ssd->sink_pad);
}
}
@@ -2158,14 +2158,14 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
*crops[sel->pad] = sel->r;
if (ssd != sensor->pixel_array && sel->pad == SMIAPP_PAD_SINK)
- smiapp_propagate(subdev, fh, sel->which,
+ smiapp_propagate(subdev, cfg, sel->which,
V4L2_SEL_TGT_CROP);
return 0;
}
static int __smiapp_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -2178,13 +2178,13 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
if (ret)
return ret;
- smiapp_get_crop_compose(subdev, fh, crops, &comp, sel->which);
+ smiapp_get_crop_compose(subdev, cfg, crops, &comp, sel->which);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
sink_fmt = ssd->sink_fmt;
} else {
struct v4l2_mbus_framefmt *fmt =
- v4l2_subdev_get_try_format(fh, ssd->sink_pad);
+ v4l2_subdev_get_try_format(subdev, cfg, ssd->sink_pad);
sink_fmt.left = 0;
sink_fmt.top = 0;
@@ -2220,20 +2220,20 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
}
static int smiapp_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
int rval;
mutex_lock(&sensor->mutex);
- rval = __smiapp_get_selection(subdev, fh, sel);
+ rval = __smiapp_get_selection(subdev, cfg, sel);
mutex_unlock(&sensor->mutex);
return rval;
}
static int smiapp_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -2259,10 +2259,10 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- ret = smiapp_set_crop(subdev, fh, sel);
+ ret = smiapp_set_crop(subdev, cfg, sel);
break;
case V4L2_SEL_TGT_COMPOSE:
- ret = smiapp_set_compose(subdev, fh, sel);
+ ret = smiapp_set_compose(subdev, cfg, sel);
break;
default:
ret = -EINVAL;
@@ -2841,8 +2841,8 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
for (i = 0; i < ssd->npads; i++) {
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(fh, i);
- struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(fh, i);
+ v4l2_subdev_get_try_format(sd, fh->pad, i);
+ struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(sd, fh->pad, i);
struct v4l2_rect *try_comp;
try_fmt->width = sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
@@ -2858,7 +2858,7 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
if (ssd != sensor->pixel_array)
continue;
- try_comp = v4l2_subdev_get_try_compose(fh, i);
+ try_comp = v4l2_subdev_get_try_compose(sd, fh->pad, i);
*try_comp = *try_crop;
}
@@ -2977,12 +2977,7 @@ static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
struct smiapp_platform_data *pdata;
struct v4l2_of_endpoint bus_cfg;
struct device_node *ep;
- struct property *prop;
- __be32 *val;
uint32_t asize;
-#ifdef CONFIG_OF
- unsigned int i;
-#endif
int rval;
if (!dev->of_node)
@@ -2993,10 +2988,8 @@ static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
return NULL;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- rval = -ENOMEM;
+ if (!pdata)
goto out_err;
- }
v4l2_of_parse_endpoint(ep, &bus_cfg);
@@ -3006,7 +2999,6 @@ static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
break;
/* FIXME: add CCP2 support. */
default:
- rval = -EINVAL;
goto out_err;
}
@@ -3030,8 +3022,7 @@ static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
dev_dbg(dev, "reset %d, nvm %d, clk %d, csi %d\n", pdata->xshutdown,
pdata->nvm_size, pdata->ext_clk, pdata->csi_signalling_mode);
- rval = of_get_property(
- dev->of_node, "link-frequencies", &asize) ? 0 : -ENOENT;
+ rval = of_get_property(ep, "link-frequencies", &asize) ? 0 : -ENOENT;
if (rval) {
dev_warn(dev, "can't get link-frequencies array size\n");
goto out_err;
@@ -3044,25 +3035,12 @@ static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
}
asize /= sizeof(*pdata->op_sys_clock);
- /*
- * Read a 64-bit array --- this will be replaced with a
- * of_property_read_u64_array() once it's merged.
- */
- prop = of_find_property(dev->of_node, "link-frequencies", NULL);
- if (!prop)
- goto out_err;
- if (!prop->value)
- goto out_err;
- if (asize * sizeof(*pdata->op_sys_clock) > prop->length)
- goto out_err;
- val = prop->value;
- if (IS_ERR(val))
+ rval = of_property_read_u64_array(
+ ep, "link-frequencies", pdata->op_sys_clock, asize);
+ if (rval) {
+ dev_warn(dev, "can't get link-frequencies\n");
goto out_err;
-
-#ifdef CONFIG_OF
- for (i = 0; i < asize; i++)
- pdata->op_sys_clock[i] = of_read_number(val + i * 2, 2);
-#endif
+ }
for (; asize > 0; asize--)
dev_dbg(dev, "freq %d: %lld\n", asize - 1,
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/soc_camera/mt9m111.c
index 5992ea93257a..441e0fda24fe 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/soc_camera/mt9m111.c
@@ -1016,7 +1016,6 @@ static int mt9m111_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&mt9m111->subdev);
v4l2_clk_put(mt9m111->clk);
- v4l2_device_unregister_subdev(&mt9m111->subdev);
v4l2_ctrl_handler_free(&mt9m111->hdl);
return 0;
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index 1fdce2f6f880..e3c907a97765 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -18,6 +18,9 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
@@ -283,6 +286,10 @@ struct ov2640_priv {
u32 cfmt_code;
struct v4l2_clk *clk;
const struct ov2640_win_size *win;
+
+ struct soc_camera_subdev_desc ssdd_dt;
+ struct gpio_desc *resetb_gpio;
+ struct gpio_desc *pwdn_gpio;
};
/*
@@ -1038,6 +1045,63 @@ static struct v4l2_subdev_ops ov2640_subdev_ops = {
.video = &ov2640_subdev_video_ops,
};
+/* OF probe functions */
+static int ov2640_hw_power(struct device *dev, int on)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ov2640_priv *priv = to_ov2640(client);
+
+ dev_dbg(&client->dev, "%s: %s the camera\n",
+ __func__, on ? "ENABLE" : "DISABLE");
+
+ if (priv->pwdn_gpio)
+ gpiod_direction_output(priv->pwdn_gpio, !on);
+
+ return 0;
+}
+
+static int ov2640_hw_reset(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ov2640_priv *priv = to_ov2640(client);
+
+ if (priv->resetb_gpio) {
+ /* Active the resetb pin to perform a reset pulse */
+ gpiod_direction_output(priv->resetb_gpio, 1);
+ usleep_range(3000, 5000);
+ gpiod_direction_output(priv->resetb_gpio, 0);
+ }
+
+ return 0;
+}
+
+static int ov2640_probe_dt(struct i2c_client *client,
+ struct ov2640_priv *priv)
+{
+ /* Request the reset GPIO deasserted */
+ priv->resetb_gpio = devm_gpiod_get_optional(&client->dev, "resetb",
+ GPIOD_OUT_LOW);
+ if (!priv->resetb_gpio)
+ dev_dbg(&client->dev, "resetb gpio is not assigned!\n");
+ else if (IS_ERR(priv->resetb_gpio))
+ return PTR_ERR(priv->resetb_gpio);
+
+ /* Request the power down GPIO asserted */
+ priv->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "pwdn",
+ GPIOD_OUT_HIGH);
+ if (!priv->pwdn_gpio)
+ dev_dbg(&client->dev, "pwdn gpio is not assigned!\n");
+ else if (IS_ERR(priv->pwdn_gpio))
+ return PTR_ERR(priv->pwdn_gpio);
+
+ /* Initialize the soc_camera_subdev_desc */
+ priv->ssdd_dt.power = ov2640_hw_power;
+ priv->ssdd_dt.reset = ov2640_hw_reset;
+ client->dev.platform_data = &priv->ssdd_dt;
+
+ return 0;
+}
+
/*
* i2c_driver functions
*/
@@ -1049,12 +1113,6 @@ static int ov2640_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret;
- if (!ssdd) {
- dev_err(&adapter->dev,
- "OV2640: Missing platform_data for driver\n");
- return -EINVAL;
- }
-
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev,
"OV2640: I2C-Adapter doesn't support SMBUS\n");
@@ -1068,6 +1126,22 @@ static int ov2640_probe(struct i2c_client *client,
return -ENOMEM;
}
+ priv->clk = v4l2_clk_get(&client->dev, "xvclk");
+ if (IS_ERR(priv->clk))
+ return -EPROBE_DEFER;
+
+ if (!ssdd && !client->dev.of_node) {
+ dev_err(&client->dev, "Missing platform_data for driver\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
+ if (!ssdd) {
+ ret = ov2640_probe_dt(client, priv);
+ if (ret)
+ goto err_clk;
+ }
+
v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 2);
v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
@@ -1075,24 +1149,27 @@ static int ov2640_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error)
- return priv->hdl.error;
-
- priv->clk = v4l2_clk_get(&client->dev, "mclk");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto eclkget;
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto err_clk;
}
ret = ov2640_video_probe(client);
- if (ret) {
- v4l2_clk_put(priv->clk);
-eclkget:
- v4l2_ctrl_handler_free(&priv->hdl);
- } else {
- dev_info(&adapter->dev, "OV2640 Probed\n");
- }
+ if (ret < 0)
+ goto err_videoprobe;
+
+ ret = v4l2_async_register_subdev(&priv->subdev);
+ if (ret < 0)
+ goto err_videoprobe;
+
+ dev_info(&adapter->dev, "OV2640 Probed\n");
+ return 0;
+
+err_videoprobe:
+ v4l2_ctrl_handler_free(&priv->hdl);
+err_clk:
+ v4l2_clk_put(priv->clk);
return ret;
}
@@ -1100,6 +1177,7 @@ static int ov2640_remove(struct i2c_client *client)
{
struct ov2640_priv *priv = to_ov2640(client);
+ v4l2_async_unregister_subdev(&priv->subdev);
v4l2_clk_put(priv->clk);
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
@@ -1112,9 +1190,16 @@ static const struct i2c_device_id ov2640_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ov2640_id);
+static const struct of_device_id ov2640_of_match[] = {
+ {.compatible = "ovti,ov2640", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ov2640_of_match);
+
static struct i2c_driver ov2640_i2c_driver = {
.driver = {
.name = "ov2640",
+ .of_match_table = of_match_ptr(ov2640_of_match),
},
.probe = ov2640_probe,
.remove = ov2640_remove,
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index ed9ae8875348..9f7fdb6b61ca 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -52,10 +52,6 @@ MODULE_DESCRIPTION("TI THS7303 video amplifier driver");
MODULE_AUTHOR("Chaithrika U S");
MODULE_LICENSE("GPL");
-static int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level 0-1");
-
static inline struct ths7303_state *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct ths7303_state, sd);
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 4ebd329d7b42..73fc42bc2de6 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -479,7 +479,6 @@ static int ths8200_remove(struct i2c_client *client)
ths8200_s_power(sd, false);
v4l2_async_unregister_subdev(&decoder->sd);
- v4l2_device_unregister_subdev(sd);
return 0;
}
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 204204259ac6..1c6bc306ecdc 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -923,13 +923,13 @@ static const struct v4l2_ctrl_ops tvp514x_ctrl_ops = {
/**
* tvp514x_enum_mbus_code() - V4L2 decoder interface handler for enum_mbus_code
* @sd: pointer to standard V4L2 sub-device structure
- * @fh: file handle
+ * @cfg: pad configuration
* @code: pointer to v4l2_subdev_mbus_code_enum structure
*
* Enumertaes mbus codes supported
*/
static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
u32 pad = code->pad;
@@ -950,13 +950,13 @@ static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
/**
* tvp514x_get_pad_format() - V4L2 decoder interface handler for get pad format
* @sd: pointer to standard V4L2 sub-device structure
- * @fh: file handle
+ * @cfg: pad configuration
* @format: pointer to v4l2_subdev_format structure
*
* Retrieves pad format which is active or tried based on requirement
*/
static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
@@ -979,13 +979,13 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
/**
* tvp514x_set_pad_format() - V4L2 decoder interface handler for set pad format
* @sd: pointer to standard V4L2 sub-device structure
- * @fh: file handle
+ * @cfg: pad configuration
* @format: pointer to v4l2_subdev_format structure
*
* Set pad format for the output pad
*/
static int tvp514x_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
@@ -1209,7 +1209,6 @@ static int tvp514x_remove(struct i2c_client *client)
struct tvp514x_decoder *decoder = to_decoder(sd);
v4l2_async_unregister_subdev(&decoder->sd);
- v4l2_device_unregister_subdev(sd);
#if defined(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&decoder->sd.entity);
#endif
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index fe4870e22cfe..787cdfb08749 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -846,13 +846,13 @@ static const struct v4l2_ctrl_ops tvp7002_ctrl_ops = {
/*
* tvp7002_enum_mbus_code() - Enum supported digital video format on pad
* @sd: pointer to standard V4L2 sub-device structure
- * @fh: file handle for the subdev
+ * @cfg: pad configuration
* @code: pointer to subdev enum mbus code struct
*
* Enumerate supported digital video formats for pad.
*/
static int
-tvp7002_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+tvp7002_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Check requested format index is within range */
@@ -867,13 +867,13 @@ tvp7002_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* tvp7002_get_pad_format() - get video format on pad
* @sd: pointer to standard V4L2 sub-device structure
- * @fh: file handle for the subdev
+ * @cfg: pad configuration
* @fmt: pointer to subdev format struct
*
* get video format for pad.
*/
static int
-tvp7002_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+tvp7002_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct tvp7002 *tvp7002 = to_tvp7002(sd);
@@ -890,16 +890,16 @@ tvp7002_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* tvp7002_set_pad_format() - set video format on pad
* @sd: pointer to standard V4L2 sub-device structure
- * @fh: file handle for the subdev
+ * @cfg: pad configuration
* @fmt: pointer to subdev format struct
*
* set video format for pad.
*/
static int
-tvp7002_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+tvp7002_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
- return tvp7002_get_pad_format(sd, fh, fmt);
+ return tvp7002_get_pad_format(sd, cfg, fmt);
}
/* V4L2 core operation handlers */
@@ -1116,7 +1116,6 @@ static int tvp7002_remove(struct i2c_client *c)
#if defined(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&device->sd.entity);
#endif
- v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&device->hdl);
return 0;
}
diff --git a/drivers/media/mmc/siano/smssdio.c b/drivers/media/mmc/siano/smssdio.c
index 912c2814c6cf..fee2d710bbf8 100644
--- a/drivers/media/mmc/siano/smssdio.c
+++ b/drivers/media/mmc/siano/smssdio.c
@@ -32,6 +32,8 @@
* Fix stop command
*/
+#include "smscoreapi.h"
+
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/firmware.h>
@@ -41,7 +43,6 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/module.h>
-#include "smscoreapi.h"
#include "sms-cards.h"
#include "smsendian.h"
@@ -141,14 +142,14 @@ static void smssdio_interrupt(struct sdio_func *func)
*/
(void)sdio_readb(func, SMSSDIO_INT, &ret);
if (ret) {
- sms_err("Unable to read interrupt register!\n");
+ pr_err("Unable to read interrupt register!\n");
return;
}
if (smsdev->split_cb == NULL) {
cb = smscore_getbuffer(smsdev->coredev);
if (!cb) {
- sms_err("Unable to allocate data buffer!\n");
+ pr_err("Unable to allocate data buffer!\n");
return;
}
@@ -157,7 +158,7 @@ static void smssdio_interrupt(struct sdio_func *func)
SMSSDIO_DATA,
SMSSDIO_BLOCK_SIZE);
if (ret) {
- sms_err("Error %d reading initial block!\n", ret);
+ pr_err("Error %d reading initial block!\n", ret);
return;
}
@@ -198,7 +199,7 @@ static void smssdio_interrupt(struct sdio_func *func)
size);
if (ret && ret != -EINVAL) {
smscore_putbuffer(smsdev->coredev, cb);
- sms_err("Error %d reading data from card!\n", ret);
+ pr_err("Error %d reading data from card!\n", ret);
return;
}
@@ -216,8 +217,8 @@ static void smssdio_interrupt(struct sdio_func *func)
smsdev->func->cur_blksize);
if (ret) {
smscore_putbuffer(smsdev->coredev, cb);
- sms_err("Error %d reading "
- "data from card!\n", ret);
+ pr_err("Error %d reading data from card!\n",
+ ret);
return;
}
@@ -278,7 +279,7 @@ static int smssdio_probe(struct sdio_func *func,
goto free;
}
- ret = smscore_register_device(&params, &smsdev->coredev);
+ ret = smscore_register_device(&params, &smsdev->coredev, NULL);
if (ret < 0)
goto free;
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 0939d399b774..8aa726651630 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -416,9 +416,6 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
int result = 0;
unsigned char lat;
struct bt878 *bt;
-#if defined(__powerpc__)
- unsigned int cmd;
-#endif
unsigned int cardid;
printk(KERN_INFO "bt878: Bt878 AUDIO function found (%d).\n",
@@ -461,15 +458,6 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
printk("irq: %d, latency: %d, memory: 0x%lx\n",
bt->irq, lat, bt->bt878_adr);
-
-#if defined(__powerpc__)
- /* on OpenFirmware machines (PowerMac at least), PCI memory cycle */
- /* response on cards with no firmware is not enabled by OF */
- pci_read_config_dword(dev, PCI_COMMAND, &cmd);
- cmd = (cmd | PCI_COMMAND_MEMORY);
- pci_write_config_dword(dev, PCI_COMMAND, cmd);
-#endif
-
#ifdef __sparc__
bt->bt878_mem = (unsigned char *) bt->bt878_adr;
#else
diff --git a/drivers/media/pci/bt8xx/bt878.h b/drivers/media/pci/bt8xx/bt878.h
index d19b59299d78..49af240b5894 100644
--- a/drivers/media/pci/bt8xx/bt878.h
+++ b/drivers/media/pci/bt8xx/bt878.h
@@ -142,18 +142,7 @@ void bt878_start(struct bt878 *bt, u32 controlreg, u32 op_sync_orin,
u32 irq_err_ignore);
void bt878_stop(struct bt878 *bt);
-#if defined(__powerpc__) /* big-endian */
-static inline void io_st_le32(volatile unsigned __iomem *addr, unsigned val)
-{
- st_le32(addr, val);
- eieio();
-}
-
-#define bmtwrite(dat,adr) io_st_le32((adr),(dat))
-#define bmtread(adr) ld_le32((adr))
-#else
#define bmtwrite(dat,adr) writel((dat), (adr))
#define bmtread(adr) readl(adr)
-#endif
#endif
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 4ec2a3c3f23c..bc12060e0882 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2474,7 +2474,7 @@ static int bttv_querycap(struct file *file, void *priv,
return -EINVAL;
strlcpy(cap->driver, "bttv", sizeof(cap->driver));
- strlcpy(cap->card, btv->video_dev->name, sizeof(cap->card));
+ strlcpy(cap->card, btv->video_dev.name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"PCI:%s", pci_name(btv->c.pci));
cap->capabilities =
@@ -2484,9 +2484,9 @@ static int bttv_querycap(struct file *file, void *priv,
V4L2_CAP_DEVICE_CAPS;
if (no_overlay <= 0)
cap->capabilities |= V4L2_CAP_VIDEO_OVERLAY;
- if (btv->vbi_dev)
+ if (video_is_registered(&btv->vbi_dev))
cap->capabilities |= V4L2_CAP_VBI_CAPTURE;
- if (btv->radio_dev)
+ if (video_is_registered(&btv->radio_dev))
cap->capabilities |= V4L2_CAP_RADIO;
/*
@@ -3905,18 +3905,14 @@ static irqreturn_t bttv_irq(int irq, void *dev_id)
/* ----------------------------------------------------------------------- */
/* initialization */
-static struct video_device *vdev_init(struct bttv *btv,
- const struct video_device *template,
- const char *type_name)
+static void vdev_init(struct bttv *btv,
+ struct video_device *vfd,
+ const struct video_device *template,
+ const char *type_name)
{
- struct video_device *vfd;
-
- vfd = video_device_alloc();
- if (NULL == vfd)
- return NULL;
*vfd = *template;
vfd->v4l2_dev = &btv->c.v4l2_dev;
- vfd->release = video_device_release;
+ vfd->release = video_device_release_empty;
video_set_drvdata(vfd, btv);
snprintf(vfd->name, sizeof(vfd->name), "BT%d%s %s (%s)",
btv->id, (btv->id==848 && btv->revision==0x12) ? "A" : "",
@@ -3927,32 +3923,13 @@ static struct video_device *vdev_init(struct bttv *btv,
v4l2_disable_ioctl(vfd, VIDIOC_G_TUNER);
v4l2_disable_ioctl(vfd, VIDIOC_S_TUNER);
}
- return vfd;
}
static void bttv_unregister_video(struct bttv *btv)
{
- if (btv->video_dev) {
- if (video_is_registered(btv->video_dev))
- video_unregister_device(btv->video_dev);
- else
- video_device_release(btv->video_dev);
- btv->video_dev = NULL;
- }
- if (btv->vbi_dev) {
- if (video_is_registered(btv->vbi_dev))
- video_unregister_device(btv->vbi_dev);
- else
- video_device_release(btv->vbi_dev);
- btv->vbi_dev = NULL;
- }
- if (btv->radio_dev) {
- if (video_is_registered(btv->radio_dev))
- video_unregister_device(btv->radio_dev);
- else
- video_device_release(btv->radio_dev);
- btv->radio_dev = NULL;
- }
+ video_unregister_device(&btv->video_dev);
+ video_unregister_device(&btv->vbi_dev);
+ video_unregister_device(&btv->radio_dev);
}
/* register video4linux devices */
@@ -3962,44 +3939,38 @@ static int bttv_register_video(struct bttv *btv)
pr_notice("Overlay support disabled\n");
/* video */
- btv->video_dev = vdev_init(btv, &bttv_video_template, "video");
+ vdev_init(btv, &btv->video_dev, &bttv_video_template, "video");
- if (NULL == btv->video_dev)
- goto err;
- if (video_register_device(btv->video_dev, VFL_TYPE_GRABBER,
+ if (video_register_device(&btv->video_dev, VFL_TYPE_GRABBER,
video_nr[btv->c.nr]) < 0)
goto err;
pr_info("%d: registered device %s\n",
- btv->c.nr, video_device_node_name(btv->video_dev));
- if (device_create_file(&btv->video_dev->dev,
+ btv->c.nr, video_device_node_name(&btv->video_dev));
+ if (device_create_file(&btv->video_dev.dev,
&dev_attr_card)<0) {
pr_err("%d: device_create_file 'card' failed\n", btv->c.nr);
goto err;
}
/* vbi */
- btv->vbi_dev = vdev_init(btv, &bttv_video_template, "vbi");
+ vdev_init(btv, &btv->vbi_dev, &bttv_video_template, "vbi");
- if (NULL == btv->vbi_dev)
- goto err;
- if (video_register_device(btv->vbi_dev, VFL_TYPE_VBI,
+ if (video_register_device(&btv->vbi_dev, VFL_TYPE_VBI,
vbi_nr[btv->c.nr]) < 0)
goto err;
pr_info("%d: registered device %s\n",
- btv->c.nr, video_device_node_name(btv->vbi_dev));
+ btv->c.nr, video_device_node_name(&btv->vbi_dev));
if (!btv->has_radio)
return 0;
/* radio */
- btv->radio_dev = vdev_init(btv, &radio_template, "radio");
- if (NULL == btv->radio_dev)
- goto err;
- btv->radio_dev->ctrl_handler = &btv->radio_ctrl_handler;
- if (video_register_device(btv->radio_dev, VFL_TYPE_RADIO,
+ vdev_init(btv, &btv->radio_dev, &radio_template, "radio");
+ btv->radio_dev.ctrl_handler = &btv->radio_ctrl_handler;
+ if (video_register_device(&btv->radio_dev, VFL_TYPE_RADIO,
radio_nr[btv->c.nr]) < 0)
goto err;
pr_info("%d: registered device %s\n",
- btv->c.nr, video_device_node_name(btv->radio_dev));
+ btv->c.nr, video_device_node_name(&btv->radio_dev));
/* all done */
return 0;
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index bc048c586b1f..a444cfb35c0b 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -404,9 +404,9 @@ struct bttv {
struct v4l2_subdev *sd_tda7432;
/* video4linux (1) */
- struct video_device *video_dev;
- struct video_device *radio_dev;
- struct video_device *vbi_dev;
+ struct video_device video_dev;
+ struct video_device radio_dev;
+ struct video_device vbi_dev;
/* controls */
struct v4l2_ctrl_handler ctrl_handler;
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index ea272bcb38df..0b0e8015ad34 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -216,7 +216,7 @@ static int cx18_alsa_load(struct cx18 *cx)
}
s = &cx->streams[CX18_ENC_STREAM_TYPE_PCM];
- if (s->video_dev == NULL) {
+ if (s->video_dev.v4l2_dev == NULL) {
CX18_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - "
"skipping\n", __func__);
return 0;
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 207d6e82403b..b15beed2dc14 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -373,7 +373,7 @@ struct cx18_in_work_order {
struct cx18_stream {
/* These first five fields are always set, even if the stream
is not actually created. */
- struct video_device *video_dev; /* NULL when stream not created */
+ struct video_device video_dev; /* v4l2_dev is NULL when stream not created */
struct cx18_dvb *dvb; /* DVB / Digital Transport */
struct cx18 *cx; /* for ease of use */
const char *name; /* name of the stream */
@@ -409,6 +409,7 @@ struct cx18_stream {
/* Videobuf for YUV video */
u32 pixelformat;
u32 vb_bytes_per_frame;
+ u32 vb_bytes_per_line;
struct list_head vb_capture; /* video capture queue */
spinlock_t vb_lock;
struct timer_list vb_timeout;
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 76a3b4ac541e..df837408efd5 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -34,6 +34,7 @@
#include "cx18-controls.h"
#include "cx18-ioctl.h"
#include "cx18-cards.h"
+#include <media/v4l2-event.h>
/* This function tries to claim the stream for a specific file descriptor.
If no one else is using this stream then the stream is claimed and
@@ -609,13 +610,16 @@ ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct cx18_open_id *id = file2id(filp);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags);
+ unsigned res = 0;
/* Start a capture if there is none */
- if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags)) {
+ if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags) &&
+ (req_events & (POLLIN | POLLRDNORM))) {
int rc;
mutex_lock(&cx->serialize_lock);
@@ -632,21 +636,26 @@ unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(id->type == CX18_ENC_STREAM_TYPE_YUV)) {
int videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
+
+ if (v4l2_event_pending(&id->fh))
+ res |= POLLPRI;
if (eof && videobuf_poll == POLLERR)
- return POLLHUP;
- else
- return videobuf_poll;
+ return res | POLLHUP;
+ return res | videobuf_poll;
}
/* add stream's waitq to the poll list */
CX18_DEBUG_HI_FILE("Encoder poll\n");
- poll_wait(filp, &s->waitq, wait);
+ if (v4l2_event_pending(&id->fh))
+ res |= POLLPRI;
+ else
+ poll_wait(filp, &s->waitq, wait);
if (atomic_read(&s->q_full.depth))
- return POLLIN | POLLRDNORM;
+ return res | POLLIN | POLLRDNORM;
if (eof)
- return POLLHUP;
- return 0;
+ return res | POLLHUP;
+ return res;
}
int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
@@ -797,7 +806,7 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
CX18_DEBUG_WARN("nomem on v4l2 open\n");
return -ENOMEM;
}
- v4l2_fh_init(&item->fh, s->video_dev);
+ v4l2_fh_init(&item->fh, &s->video_dev);
item->cx = cx;
item->type = s->type;
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index b8e4b68a9196..79aee30d5fd8 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -39,6 +39,7 @@
#include "cx18-cards.h"
#include "cx18-av-core.h"
#include <media/tveeprom.h>
+#include <media/v4l2-event.h>
u16 cx18_service2vbi(int type)
{
@@ -159,7 +160,7 @@ static int cx18_g_fmt_vid_cap(struct file *file, void *fh,
if (id->type == CX18_ENC_STREAM_TYPE_YUV) {
pixfmt->pixelformat = s->pixelformat;
pixfmt->sizeimage = s->vb_bytes_per_frame;
- pixfmt->bytesperline = 720;
+ pixfmt->bytesperline = s->vb_bytes_per_line;
} else {
pixfmt->pixelformat = V4L2_PIX_FMT_MPEG;
pixfmt->sizeimage = 128 * 1024;
@@ -287,10 +288,13 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
s->pixelformat = fmt->fmt.pix.pixelformat;
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_HM12) {
s->vb_bytes_per_frame = h * 720 * 3 / 2;
- else
+ s->vb_bytes_per_line = 720; /* First plane */
+ } else {
s->vb_bytes_per_frame = h * 720 * 2;
+ s->vb_bytes_per_line = 1440; /* Packed */
+ }
mbus_fmt.width = cx->cxhdl.width = w;
mbus_fmt.height = cx->cxhdl.height = h;
@@ -447,34 +451,29 @@ static int cx18_cropcap(struct file *file, void *fh,
if (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- cropcap->bounds.top = cropcap->bounds.left = 0;
- cropcap->bounds.width = 720;
- cropcap->bounds.height = cx->is_50hz ? 576 : 480;
cropcap->pixelaspect.numerator = cx->is_50hz ? 59 : 10;
cropcap->pixelaspect.denominator = cx->is_50hz ? 54 : 11;
- cropcap->defrect = cropcap->bounds;
return 0;
}
-static int cx18_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
-{
- struct cx18_open_id *id = fh2id(fh);
- struct cx18 *cx = id->cx;
-
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- CX18_DEBUG_WARN("VIDIOC_S_CROP not implemented\n");
- return -EINVAL;
-}
-
-static int cx18_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+static int cx18_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
{
struct cx18 *cx = fh2id(fh)->cx;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- CX18_DEBUG_WARN("VIDIOC_G_CROP not implemented\n");
- return -EINVAL;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = sel->r.left = 0;
+ sel->r.width = 720;
+ sel->r.height = cx->is_50hz ? 576 : 480;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
@@ -510,6 +509,9 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
{
struct cx18_open_id *id = fh2id(fh);
struct cx18 *cx = id->cx;
+ v4l2_std_id std = V4L2_STD_ALL;
+ const struct cx18_card_video_input *card_input =
+ cx->card->video_inputs + inp;
if (inp >= cx->nof_inputs)
return -EINVAL;
@@ -525,6 +527,11 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
cx->active_input = inp;
/* Set the audio input to whatever is appropriate for the input type. */
cx->audio_input = cx->card->video_inputs[inp].audio_index;
+ if (card_input->video_type == V4L2_INPUT_TYPE_TUNER)
+ std = cx->tuner_std;
+ cx->streams[CX18_ENC_STREAM_TYPE_MPG].video_dev.tvnorms = std;
+ cx->streams[CX18_ENC_STREAM_TYPE_YUV].video_dev.tvnorms = std;
+ cx->streams[CX18_ENC_STREAM_TYPE_VBI].video_dev.tvnorms = std;
/* prevent others from messing with the streams until
we're finished changing inputs. */
@@ -1036,7 +1043,7 @@ static int cx18_log_status(struct file *file, void *fh)
for (i = 0; i < CX18_MAX_STREAMS; i++) {
struct cx18_stream *s = &cx->streams[i];
- if (s->video_dev == NULL || s->buffers == 0)
+ if (s->video_dev.v4l2_dev == NULL || s->buffers == 0)
continue;
CX18_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n",
s->name, s->s_flags,
@@ -1078,8 +1085,7 @@ static const struct v4l2_ioctl_ops cx18_ioctl_ops = {
.vidioc_enumaudio = cx18_enumaudio,
.vidioc_enum_input = cx18_enum_input,
.vidioc_cropcap = cx18_cropcap,
- .vidioc_s_crop = cx18_s_crop,
- .vidioc_g_crop = cx18_g_crop,
+ .vidioc_g_selection = cx18_g_selection,
.vidioc_g_input = cx18_g_input,
.vidioc_s_input = cx18_s_input,
.vidioc_g_frequency = cx18_g_frequency,
@@ -1114,6 +1120,8 @@ static const struct v4l2_ioctl_ops cx18_ioctl_ops = {
.vidioc_querybuf = cx18_querybuf,
.vidioc_qbuf = cx18_qbuf,
.vidioc_dqbuf = cx18_dqbuf,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
void cx18_set_funcs(struct video_device *vdev)
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 369445fcf3e5..c82d25d53341 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -254,11 +254,8 @@ static struct videobuf_queue_ops cx18_videobuf_qops = {
static void cx18_stream_init(struct cx18 *cx, int type)
{
struct cx18_stream *s = &cx->streams[type];
- struct video_device *video_dev = s->video_dev;
- /* we need to keep video_dev, so restore it afterwards */
memset(s, 0, sizeof(*s));
- s->video_dev = video_dev;
/* initialize cx18_stream fields */
s->dvb = NULL;
@@ -307,6 +304,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
/* Assume the previous pixel default */
s->pixelformat = V4L2_PIX_FMT_HM12;
s->vb_bytes_per_frame = cx->cxhdl.height * 720 * 3 / 2;
+ s->vb_bytes_per_line = 720;
}
}
@@ -319,12 +317,12 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
/*
* These five fields are always initialized.
- * For analog capture related streams, if video_dev == NULL then the
+ * For analog capture related streams, if video_dev.v4l2_dev == NULL then the
* stream is not in use.
* For the TS stream, if dvb == NULL then the stream is not in use.
* In those cases no other fields but these four can be used.
*/
- s->video_dev = NULL;
+ s->video_dev.v4l2_dev = NULL;
s->dvb = NULL;
s->cx = cx;
s->type = type;
@@ -367,24 +365,20 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
if (num_offset == -1)
return 0;
- /* allocate and initialize the v4l2 video device structure */
- s->video_dev = video_device_alloc();
- if (s->video_dev == NULL) {
- CX18_ERR("Couldn't allocate v4l2 video_device for %s\n",
- s->name);
- return -ENOMEM;
- }
-
- snprintf(s->video_dev->name, sizeof(s->video_dev->name), "%s %s",
+ /* initialize the v4l2 video device structure */
+ snprintf(s->video_dev.name, sizeof(s->video_dev.name), "%s %s",
cx->v4l2_dev.name, s->name);
- s->video_dev->num = num;
- s->video_dev->v4l2_dev = &cx->v4l2_dev;
- s->video_dev->fops = &cx18_v4l2_enc_fops;
- s->video_dev->release = video_device_release;
- s->video_dev->tvnorms = V4L2_STD_ALL;
- s->video_dev->lock = &cx->serialize_lock;
- cx18_set_funcs(s->video_dev);
+ s->video_dev.num = num;
+ s->video_dev.v4l2_dev = &cx->v4l2_dev;
+ s->video_dev.fops = &cx18_v4l2_enc_fops;
+ s->video_dev.release = video_device_release_empty;
+ if (cx->card->video_inputs->video_type == CX18_CARD_INPUT_VID_TUNER)
+ s->video_dev.tvnorms = cx->tuner_std;
+ else
+ s->video_dev.tvnorms = V4L2_STD_ALL;
+ s->video_dev.lock = &cx->serialize_lock;
+ cx18_set_funcs(&s->video_dev);
return 0;
}
@@ -428,31 +422,30 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
}
}
- if (s->video_dev == NULL)
+ if (s->video_dev.v4l2_dev == NULL)
return 0;
- num = s->video_dev->num;
+ num = s->video_dev.num;
/* card number + user defined offset + device offset */
if (type != CX18_ENC_STREAM_TYPE_MPG) {
struct cx18_stream *s_mpg = &cx->streams[CX18_ENC_STREAM_TYPE_MPG];
- if (s_mpg->video_dev)
- num = s_mpg->video_dev->num
+ if (s_mpg->video_dev.v4l2_dev)
+ num = s_mpg->video_dev.num
+ cx18_stream_info[type].num_offset;
}
- video_set_drvdata(s->video_dev, s);
+ video_set_drvdata(&s->video_dev, s);
/* Register device. First try the desired minor, then any free one. */
- ret = video_register_device_no_warn(s->video_dev, vfl_type, num);
+ ret = video_register_device_no_warn(&s->video_dev, vfl_type, num);
if (ret < 0) {
CX18_ERR("Couldn't register v4l2 device for %s (device node number %d)\n",
s->name, num);
- video_device_release(s->video_dev);
- s->video_dev = NULL;
+ s->video_dev.v4l2_dev = NULL;
return ret;
}
- name = video_device_node_name(s->video_dev);
+ name = video_device_node_name(&s->video_dev);
switch (vfl_type) {
case VFL_TYPE_GRABBER:
@@ -542,10 +535,9 @@ void cx18_streams_cleanup(struct cx18 *cx, int unregister)
}
/* If struct video_device exists, can have buffers allocated */
- vdev = cx->streams[type].video_dev;
+ vdev = &cx->streams[type].video_dev;
- cx->streams[type].video_dev = NULL;
- if (vdev == NULL)
+ if (vdev->v4l2_dev == NULL)
continue;
if (type == CX18_ENC_STREAM_TYPE_YUV)
@@ -553,11 +545,7 @@ void cx18_streams_cleanup(struct cx18 *cx, int unregister)
cx18_stream_free(&cx->streams[type]);
- /* Unregister or release device */
- if (unregister)
- video_unregister_device(vdev);
- else
- video_device_release(vdev);
+ video_unregister_device(vdev);
}
}
@@ -1042,7 +1030,7 @@ u32 cx18_find_handle(struct cx18 *cx)
for (i = 0; i < CX18_MAX_STREAMS; i++) {
struct cx18_stream *s = &cx->streams[i];
- if (s->video_dev && (s->handle != CX18_INVALID_TASK_HANDLE))
+ if (s->video_dev.v4l2_dev && (s->handle != CX18_INVALID_TASK_HANDLE))
return s->handle;
}
return CX18_INVALID_TASK_HANDLE;
diff --git a/drivers/media/pci/cx18/cx18-streams.h b/drivers/media/pci/cx18/cx18-streams.h
index 713b0e61536d..27f8af9b11cd 100644
--- a/drivers/media/pci/cx18/cx18-streams.h
+++ b/drivers/media/pci/cx18/cx18-streams.h
@@ -33,7 +33,7 @@ void cx18_stream_rotate_idx_mdls(struct cx18 *cx);
static inline bool cx18_stream_enabled(struct cx18_stream *s)
{
- return s->video_dev ||
+ return s->video_dev.v4l2_dev ||
(s->dvb && s->dvb->enabled) ||
(s->type == CX18_ENC_STREAM_TYPE_IDX &&
s->cx->stream_buffers[CX18_ENC_STREAM_TYPE_IDX] != 0);
diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig
index 74d774e5227b..2e1b88ccdbf2 100644
--- a/drivers/media/pci/cx23885/Kconfig
+++ b/drivers/media/pci/cx23885/Kconfig
@@ -40,7 +40,6 @@ config VIDEO_CX23885
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_M88RS6000T if MEDIA_SUBDRV_AUTOSELECT
select DVB_TUNER_DIB0070 if MEDIA_SUBDRV_AUTOSELECT
---help---
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index 2bbbf545b042..0a91df2c9f08 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -483,7 +483,6 @@ static void altera_hw_filt_release(void *main_dev, int filt_nr)
}
}
-EXPORT_SYMBOL(altera_hw_filt_release);
void altera_ci_release(void *dev, int ci_nr)
{
@@ -598,7 +597,6 @@ static int altera_pid_feed_control(void *demux_dev, int filt_nr,
return 0;
}
-EXPORT_SYMBOL(altera_pid_feed_control);
static int altera_ci_start_feed(struct dvb_demux_feed *feed, int num)
{
@@ -699,7 +697,6 @@ err:
return ret;
}
-EXPORT_SYMBOL(altera_hw_filt_init);
int altera_ci_init(struct altera_ci_config *config, int ci_nr)
{
diff --git a/drivers/media/pci/cx23885/altera-ci.h b/drivers/media/pci/cx23885/altera-ci.h
index 5028f0cf83f4..6c511723fd1b 100644
--- a/drivers/media/pci/cx23885/altera-ci.h
+++ b/drivers/media/pci/cx23885/altera-ci.h
@@ -39,7 +39,7 @@ struct altera_ci_config {
int (*fpga_rw) (void *dev, int ad_rg, int val, int rw);
};
-#if IS_ENABLED(CONFIG_MEDIA_ALTERA_CI)
+#if IS_REACHABLE(CONFIG_MEDIA_ALTERA_CI)
extern int altera_ci_init(struct altera_ci_config *config, int ci_nr);
extern void altera_ci_release(void *dev, int ci_nr);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 1ad49946d7fa..7aee76af7a85 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -825,6 +825,7 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
int i;
spin_lock_init(&dev->pci_irqmask_lock);
+ spin_lock_init(&dev->slock);
mutex_init(&dev->lock);
mutex_init(&dev->gpio_lock);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 45fbe1e4d2d0..745caabe3397 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -73,7 +73,6 @@
#include "si2157.h"
#include "sp2.h"
#include "m88ds3103.h"
-#include "m88ts2022.h"
#include "m88rs6000t.h"
static unsigned int debug;
@@ -1187,7 +1186,7 @@ static int dvb_register(struct cx23885_tsport *port)
struct vb2_dvb_frontend *fe0, *fe1 = NULL;
struct si2168_config si2168_config;
struct si2157_config si2157_config;
- struct m88ts2022_config m88ts2022_config;
+ struct ts2020_config ts2020_config;
struct i2c_board_info info;
struct i2c_adapter *adapter;
struct i2c_client *client_demod = NULL, *client_tuner = NULL;
@@ -1856,13 +1855,12 @@ static int dvb_register(struct cx23885_tsport *port)
break;
/* attach tuner */
- memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
- m88ts2022_config.fe = fe0->dvb.frontend;
- m88ts2022_config.clock = 27000000;
+ memset(&ts2020_config, 0, sizeof(ts2020_config));
+ ts2020_config.fe = fe0->dvb.frontend;
memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ strlcpy(info.type, "ts2020", I2C_NAME_SIZE);
info.addr = 0x60;
- info.platform_data = &m88ts2022_config;
+ info.platform_data = &ts2020_config;
request_module(info.type);
client_tuner = i2c_new_device(adapter, &info);
if (client_tuner == NULL ||
@@ -1986,13 +1984,12 @@ static int dvb_register(struct cx23885_tsport *port)
break;
/* attach tuner */
- memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
- m88ts2022_config.fe = fe0->dvb.frontend;
- m88ts2022_config.clock = 27000000;
+ memset(&ts2020_config, 0, sizeof(ts2020_config));
+ ts2020_config.fe = fe0->dvb.frontend;
memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ strlcpy(info.type, "ts2020", I2C_NAME_SIZE);
info.addr = 0x60;
- info.platform_data = &m88ts2022_config;
+ info.platform_data = &ts2020_config;
request_module(info.type);
client_tuner = i2c_new_device(adapter, &info);
if (client_tuner == NULL || client_tuner->dev.driver == NULL)
@@ -2032,13 +2029,12 @@ static int dvb_register(struct cx23885_tsport *port)
break;
/* attach tuner */
- memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
- m88ts2022_config.fe = fe0->dvb.frontend;
- m88ts2022_config.clock = 27000000;
+ memset(&ts2020_config, 0, sizeof(ts2020_config));
+ ts2020_config.fe = fe0->dvb.frontend;
memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ strlcpy(info.type, "ts2020", I2C_NAME_SIZE);
info.addr = 0x60;
- info.platform_data = &m88ts2022_config;
+ info.platform_data = &ts2020_config;
request_module(info.type);
client_tuner = i2c_new_device(adapter, &info);
if (client_tuner == NULL || client_tuner->dev.driver == NULL)
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 5e93c682a3f5..2232b389c441 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -1137,7 +1137,6 @@ int cx23885_video_register(struct cx23885_dev *dev)
int err;
dprintk(1, "%s()\n", __func__);
- spin_lock_init(&dev->slock);
/* Initialize VBI template */
cx23885_vbi_template = cx23885_video_template;
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index b6be46e94289..24216efa56e7 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1102,32 +1102,26 @@ static int cx8802_blackbird_advise_release(struct cx8802_driver *drv)
static void blackbird_unregister_video(struct cx8802_dev *dev)
{
- if (dev->mpeg_dev) {
- if (video_is_registered(dev->mpeg_dev))
- video_unregister_device(dev->mpeg_dev);
- else
- video_device_release(dev->mpeg_dev);
- dev->mpeg_dev = NULL;
- }
+ video_unregister_device(&dev->mpeg_dev);
}
static int blackbird_register_video(struct cx8802_dev *dev)
{
int err;
- dev->mpeg_dev = cx88_vdev_init(dev->core, dev->pci,
- &cx8802_mpeg_template, "mpeg");
- dev->mpeg_dev->ctrl_handler = &dev->cxhdl.hdl;
- video_set_drvdata(dev->mpeg_dev, dev);
- dev->mpeg_dev->queue = &dev->vb2_mpegq;
- err = video_register_device(dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
+ cx88_vdev_init(dev->core, dev->pci, &dev->mpeg_dev,
+ &cx8802_mpeg_template, "mpeg");
+ dev->mpeg_dev.ctrl_handler = &dev->cxhdl.hdl;
+ video_set_drvdata(&dev->mpeg_dev, dev);
+ dev->mpeg_dev.queue = &dev->vb2_mpegq;
+ err = video_register_device(&dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
if (err < 0) {
printk(KERN_INFO "%s/2: can't register mpeg device\n",
dev->core->name);
return err;
}
printk(KERN_INFO "%s/2: registered device %s [mpeg]\n",
- dev->core->name, video_device_node_name(dev->mpeg_dev));
+ dev->core->name, video_device_node_name(&dev->mpeg_dev));
return 0;
}
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index c38d5a12e277..3501be9f19d8 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -985,17 +985,14 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
/* ------------------------------------------------------------------ */
-struct video_device *cx88_vdev_init(struct cx88_core *core,
- struct pci_dev *pci,
- const struct video_device *template_,
- const char *type)
+void cx88_vdev_init(struct cx88_core *core,
+ struct pci_dev *pci,
+ struct video_device *vfd,
+ const struct video_device *template_,
+ const char *type)
{
- struct video_device *vfd;
-
- vfd = video_device_alloc();
- if (NULL == vfd)
- return NULL;
*vfd = *template_;
+
/*
* The dev pointer of v4l2_device is NULL, instead we set the
* video_device dev_parent pointer to the correct PCI bus device.
@@ -1004,11 +1001,10 @@ struct video_device *cx88_vdev_init(struct cx88_core *core,
*/
vfd->v4l2_dev = &core->v4l2_dev;
vfd->dev_parent = &pci->dev;
- vfd->release = video_device_release;
+ vfd->release = video_device_release_empty;
vfd->lock = &core->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
core->name, type, core->board.name);
- return vfd;
}
struct cx88_core* cx88_core_get(struct pci_dev *pci)
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index a369b0840acf..98344540c51f 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -732,7 +732,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
if (IS_ERR(dev->alloc_ctx)) {
err = PTR_ERR(dev->alloc_ctx);
- goto fail_core;
+ goto fail_dev;
}
dev->core = core;
@@ -754,6 +754,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
fail_free:
vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
+ fail_dev:
kfree(dev);
fail_core:
core->dvbdev = NULL;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 860c98fc72c7..c9decd80bf61 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1274,27 +1274,9 @@ static const struct v4l2_ctrl_ops cx8800_ctrl_aud_ops = {
static void cx8800_unregister_video(struct cx8800_dev *dev)
{
- if (dev->radio_dev) {
- if (video_is_registered(dev->radio_dev))
- video_unregister_device(dev->radio_dev);
- else
- video_device_release(dev->radio_dev);
- dev->radio_dev = NULL;
- }
- if (dev->vbi_dev) {
- if (video_is_registered(dev->vbi_dev))
- video_unregister_device(dev->vbi_dev);
- else
- video_device_release(dev->vbi_dev);
- dev->vbi_dev = NULL;
- }
- if (dev->video_dev) {
- if (video_is_registered(dev->video_dev))
- video_unregister_device(dev->video_dev);
- else
- video_device_release(dev->video_dev);
- dev->video_dev = NULL;
- }
+ video_unregister_device(&dev->radio_dev);
+ video_unregister_device(&dev->vbi_dev);
+ video_unregister_device(&dev->video_dev);
}
static int cx8800_initdev(struct pci_dev *pci_dev,
@@ -1485,12 +1467,12 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
goto fail_unreg;
/* register v4l devices */
- dev->video_dev = cx88_vdev_init(core,dev->pci,
- &cx8800_video_template,"video");
- video_set_drvdata(dev->video_dev, dev);
- dev->video_dev->ctrl_handler = &core->video_hdl;
- dev->video_dev->queue = &dev->vb2_vidq;
- err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
+ cx88_vdev_init(core, dev->pci, &dev->video_dev,
+ &cx8800_video_template, "video");
+ video_set_drvdata(&dev->video_dev, dev);
+ dev->video_dev.ctrl_handler = &core->video_hdl;
+ dev->video_dev.queue = &dev->vb2_vidq;
+ err = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER,
video_nr[core->nr]);
if (err < 0) {
printk(KERN_ERR "%s/0: can't register video device\n",
@@ -1498,12 +1480,13 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
- core->name, video_device_node_name(dev->video_dev));
+ core->name, video_device_node_name(&dev->video_dev));
- dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
- video_set_drvdata(dev->vbi_dev, dev);
- dev->vbi_dev->queue = &dev->vb2_vbiq;
- err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
+ cx88_vdev_init(core, dev->pci, &dev->vbi_dev,
+ &cx8800_vbi_template, "vbi");
+ video_set_drvdata(&dev->vbi_dev, dev);
+ dev->vbi_dev.queue = &dev->vb2_vbiq;
+ err = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[core->nr]);
if (err < 0) {
printk(KERN_ERR "%s/0: can't register vbi device\n",
@@ -1511,14 +1494,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device %s\n",
- core->name, video_device_node_name(dev->vbi_dev));
+ core->name, video_device_node_name(&dev->vbi_dev));
if (core->board.radio.type == CX88_RADIO) {
- dev->radio_dev = cx88_vdev_init(core,dev->pci,
- &cx8800_radio_template,"radio");
- video_set_drvdata(dev->radio_dev, dev);
- dev->radio_dev->ctrl_handler = &core->audio_hdl;
- err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
+ cx88_vdev_init(core, dev->pci, &dev->radio_dev,
+ &cx8800_radio_template, "radio");
+ video_set_drvdata(&dev->radio_dev, dev);
+ dev->radio_dev.ctrl_handler = &core->audio_hdl;
+ err = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[core->nr]);
if (err < 0) {
printk(KERN_ERR "%s/0: can't register radio device\n",
@@ -1526,7 +1509,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device %s\n",
- core->name, video_device_node_name(dev->radio_dev));
+ core->name, video_device_node_name(&dev->radio_dev));
}
/* start tvaudio thread */
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 7748ca9abb09..b9fe1ac24803 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -478,9 +478,9 @@ struct cx8800_dev {
/* various device info */
unsigned int resources;
- struct video_device *video_dev;
- struct video_device *vbi_dev;
- struct video_device *radio_dev;
+ struct video_device video_dev;
+ struct video_device vbi_dev;
+ struct video_device radio_dev;
/* pci i/o */
struct pci_dev *pci;
@@ -563,7 +563,7 @@ struct cx8802_dev {
/* for blackbird only */
struct list_head devlist;
#if IS_ENABLED(CONFIG_VIDEO_CX88_BLACKBIRD)
- struct video_device *mpeg_dev;
+ struct video_device mpeg_dev;
u32 mailbox;
/* mpeg params */
@@ -647,10 +647,11 @@ extern int cx88_set_scale(struct cx88_core *core, unsigned int width,
unsigned int height, enum v4l2_field field);
extern int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm);
-extern struct video_device *cx88_vdev_init(struct cx88_core *core,
- struct pci_dev *pci,
- const struct video_device *template_,
- const char *type);
+extern void cx88_vdev_init(struct cx88_core *core,
+ struct pci_dev *pci,
+ struct video_device *vfd,
+ const struct video_device *template_,
+ const char *type);
extern struct cx88_core *cx88_core_get(struct pci_dev *pci);
extern void cx88_core_put(struct cx88_core *core,
struct pci_dev *pci);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 39b52929755a..41fa21534edf 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -224,7 +224,7 @@ static int ivtv_alsa_load(struct ivtv *itv)
}
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
- if (s->vdev == NULL) {
+ if (s->vdev.v4l2_dev == NULL) {
IVTV_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - "
"skipping\n", __func__);
return 0;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index 7bf9cbca4fa6..f198b9826ed8 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -167,7 +167,7 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
- v4l2_fh_init(&item.fh, s->vdev);
+ v4l2_fh_init(&item.fh, &s->vdev);
item.itv = itv;
item.type = s->type;
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 802642d26643..c2e60b4f292d 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1284,7 +1284,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
return 0;
free_streams:
- ivtv_streams_cleanup(itv, 1);
+ ivtv_streams_cleanup(itv);
free_irq:
free_irq(itv->pdev->irq, (void *)itv);
free_i2c:
@@ -1444,7 +1444,7 @@ static void ivtv_remove(struct pci_dev *pdev)
flush_kthread_worker(&itv->irq_worker);
kthread_stop(itv->irq_worker_task);
- ivtv_streams_cleanup(itv, 1);
+ ivtv_streams_cleanup(itv);
ivtv_udma_free(itv);
v4l2_ctrl_handler_free(&itv->cxhdl.hdl);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index bc309f42c8ed..e8b6c7ad2ba9 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -327,7 +327,7 @@ struct ivtv; /* forward reference */
struct ivtv_stream {
/* These first four fields are always set, even if the stream
is not actually created. */
- struct video_device *vdev; /* NULL when stream not created */
+ struct video_device vdev; /* vdev.v4l2_dev is NULL if there is no device */
struct ivtv *itv; /* for ease of use */
const char *name; /* name of the stream */
int type; /* stream type */
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index e5ff6277ca85..605d280d8a5f 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -995,7 +995,7 @@ static int ivtv_open(struct file *filp)
IVTV_DEBUG_WARN("nomem on v4l2 open\n");
return -ENOMEM;
}
- v4l2_fh_init(&item->fh, s->vdev);
+ v4l2_fh_init(&item->fh, &s->vdev);
item->itv = itv;
item->type = s->type;
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 4d8ee18c3feb..6fe6c4a0e858 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -448,9 +448,12 @@ static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
struct v4l2_window *winfmt = &fmt->fmt.win;
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
+ if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ return -EINVAL;
+ if (!itv->osd_video_pbase)
return -EINVAL;
winfmt->chromakey = itv->osd_chroma_key;
winfmt->global_alpha = itv->osd_global_alpha;
@@ -555,10 +558,13 @@ static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format
static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
u32 chromakey = fmt->fmt.win.chromakey;
u8 global_alpha = fmt->fmt.win.global_alpha;
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
+ if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ return -EINVAL;
+ if (!itv->osd_video_pbase)
return -EINVAL;
ivtv_g_fmt_vid_out_overlay(file, fh, fmt);
fmt->fmt.win.chromakey = chromakey;
@@ -741,6 +747,11 @@ static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vc
snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(itv->pdev));
vcap->capabilities = itv->v4l2_cap | V4L2_CAP_DEVICE_CAPS;
vcap->device_caps = s->caps;
+ if ((s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY) &&
+ !itv->osd_video_pbase) {
+ vcap->capabilities &= ~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ vcap->device_caps &= ~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ }
return 0;
}
@@ -816,80 +827,103 @@ static int ivtv_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropca
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
- struct yuv_playback_info *yi = &itv->yuv_info;
- int streamtype;
-
- streamtype = id->type;
- if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
- return -EINVAL;
- cropcap->bounds.top = cropcap->bounds.left = 0;
- cropcap->bounds.width = 720;
if (cropcap->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- cropcap->bounds.height = itv->is_50hz ? 576 : 480;
cropcap->pixelaspect.numerator = itv->is_50hz ? 59 : 10;
cropcap->pixelaspect.denominator = itv->is_50hz ? 54 : 11;
- } else if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) {
- if (yi->track_osd) {
- cropcap->bounds.width = yi->osd_full_w;
- cropcap->bounds.height = yi->osd_full_h;
- } else {
- cropcap->bounds.width = 720;
- cropcap->bounds.height =
- itv->is_out_50hz ? 576 : 480;
- }
+ } else if (cropcap->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
cropcap->pixelaspect.numerator = itv->is_out_50hz ? 59 : 10;
cropcap->pixelaspect.denominator = itv->is_out_50hz ? 54 : 11;
} else {
- cropcap->bounds.height = itv->is_out_50hz ? 576 : 480;
- cropcap->pixelaspect.numerator = itv->is_out_50hz ? 59 : 10;
- cropcap->pixelaspect.denominator = itv->is_out_50hz ? 54 : 11;
+ return -EINVAL;
}
- cropcap->defrect = cropcap->bounds;
return 0;
}
-static int ivtv_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+static int ivtv_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
- int streamtype;
+ struct v4l2_rect r = { 0, 0, 720, 0 };
+ int streamtype = id->type;
- streamtype = id->type;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
+ return -EINVAL;
- if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) {
- if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) {
- yi->main_rect = crop->c;
- return 0;
- } else {
- if (!ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
- crop->c.width, crop->c.height, crop->c.left, crop->c.top)) {
- itv->main_rect = crop->c;
- return 0;
- }
- }
+ if (sel->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
+
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
+ return -EINVAL;
+
+ r.height = itv->is_out_50hz ? 576 : 480;
+ if (streamtype == IVTV_DEC_STREAM_TYPE_YUV && yi->track_osd) {
+ r.width = yi->osd_full_w;
+ r.height = yi->osd_full_h;
+ }
+ sel->r.width = clamp(sel->r.width, 16U, r.width);
+ sel->r.height = clamp(sel->r.height, 16U, r.height);
+ sel->r.left = clamp_t(unsigned, sel->r.left, 0, r.width - sel->r.width);
+ sel->r.top = clamp_t(unsigned, sel->r.top, 0, r.height - sel->r.height);
+
+ if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) {
+ yi->main_rect = sel->r;
+ return 0;
+ }
+ if (!ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
+ sel->r.width, sel->r.height, sel->r.left, sel->r.top)) {
+ itv->main_rect = sel->r;
+ return 0;
}
return -EINVAL;
}
-static int ivtv_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+static int ivtv_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
- int streamtype;
+ struct v4l2_rect r = { 0, 0, 720, 0 };
+ int streamtype = id->type;
+
+ if (sel->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = sel->r.left = 0;
+ sel->r.width = 720;
+ sel->r.height = itv->is_50hz ? 576 : 480;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
- streamtype = id->type;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
+ return -EINVAL;
- if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) {
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE:
if (streamtype == IVTV_DEC_STREAM_TYPE_YUV)
- crop->c = yi->main_rect;
+ sel->r = yi->main_rect;
else
- crop->c = itv->main_rect;
+ sel->r = itv->main_rect;
+ return 0;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ r.height = itv->is_out_50hz ? 576 : 480;
+ if (streamtype == IVTV_DEC_STREAM_TYPE_YUV && yi->track_osd) {
+ r.width = yi->osd_full_w;
+ r.height = yi->osd_full_h;
+ }
+ sel->r = r;
return 0;
}
return -EINVAL;
@@ -987,7 +1021,7 @@ int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
else
std = V4L2_STD_ALL;
for (i = 0; i <= IVTV_ENC_STREAM_TYPE_VBI; i++)
- itv->streams[i].vdev->tvnorms = std;
+ itv->streams[i].vdev.tvnorms = std;
/* prevent others from messing with the streams until
we're finished changing inputs. */
@@ -1038,7 +1072,7 @@ static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
- if (s->vdev->vfl_dir)
+ if (s->vdev.vfl_dir)
return -ENOTTY;
if (vf->tuner != 0)
return -EINVAL;
@@ -1052,7 +1086,7 @@ int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
- if (s->vdev->vfl_dir)
+ if (s->vdev.vfl_dir)
return -ENOTTY;
if (vf->tuner != 0)
return -EINVAL;
@@ -1340,6 +1374,7 @@ static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder
static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
{
struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
u32 data[CX2341X_MBOX_MAX_DATA];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -1363,10 +1398,10 @@ static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
0,
};
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
- return -EINVAL;
+ if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ return -ENOTTY;
if (!itv->osd_video_pbase)
- return -EINVAL;
+ return -ENOTTY;
fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY |
V4L2_FBUF_CAP_GLOBAL_ALPHA;
@@ -1427,12 +1462,13 @@ static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffe
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
+ struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
struct yuv_playback_info *yi = &itv->yuv_info;
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
- return -EINVAL;
+ if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ return -ENOTTY;
if (!itv->osd_video_pbase)
- return -EINVAL;
+ return -ENOTTY;
itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0;
itv->osd_local_alpha_state =
@@ -1447,9 +1483,12 @@ static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
+ struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
- return -EINVAL;
+ if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ return -ENOTTY;
+ if (!itv->osd_video_pbase)
+ return -ENOTTY;
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, on != 0);
@@ -1547,7 +1586,7 @@ static int ivtv_log_status(struct file *file, void *fh)
for (i = 0; i < IVTV_MAX_STREAMS; i++) {
struct ivtv_stream *s = &itv->streams[i];
- if (s->vdev == NULL || s->buffers == 0)
+ if (s->vdev.v4l2_dev == NULL || s->buffers == 0)
continue;
IVTV_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", s->name, s->s_flags,
(s->buffers - s->q_free.buffers) * 100 / s->buffers,
@@ -1837,8 +1876,8 @@ static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_enum_output = ivtv_enum_output,
.vidioc_enumaudout = ivtv_enumaudout,
.vidioc_cropcap = ivtv_cropcap,
- .vidioc_s_crop = ivtv_s_crop,
- .vidioc_g_crop = ivtv_g_crop,
+ .vidioc_s_selection = ivtv_s_selection,
+ .vidioc_g_selection = ivtv_g_selection,
.vidioc_g_input = ivtv_g_input,
.vidioc_s_input = ivtv_s_input,
.vidioc_g_output = ivtv_g_output,
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index e7d701777e53..36ca2d67c812 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -75,7 +75,7 @@ static void ivtv_pio_work_handler(struct ivtv *itv)
IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
- s->vdev == NULL || !ivtv_use_pio(s)) {
+ s->vdev.v4l2_dev == NULL || !ivtv_use_pio(s)) {
itv->cur_pio_stream = -1;
/* trigger PIO complete user interrupt */
write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
@@ -132,7 +132,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
int rc;
/* sanity checks */
- if (s->vdev == NULL) {
+ if (s->vdev.v4l2_dev == NULL) {
IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
return -1;
}
@@ -890,8 +890,8 @@ static void ivtv_irq_vsync(struct ivtv *itv)
if (s)
wake_up(&s->waitq);
}
- if (s && s->vdev)
- v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
+ if (s && s->vdev.v4l2_dev)
+ v4l2_event_queue(&s->vdev, frame ? &evtop : &evbottom);
wake_up(&itv->vsync_waitq);
/* Send VBI to saa7127 */
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index f0a1cc472313..d27c6df97566 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -130,7 +130,8 @@ static struct {
"decoder MPG",
VFL_TYPE_GRABBER, IVTV_V4L2_DEC_MPG_OFFSET,
PCI_DMA_TODEVICE, 0,
- V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VBI */
@@ -151,7 +152,8 @@ static struct {
"decoder YUV",
VFL_TYPE_GRABBER, IVTV_V4L2_DEC_YUV_OFFSET,
PCI_DMA_TODEVICE, 0,
- V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY,
&ivtv_v4l2_dec_fops
}
};
@@ -159,11 +161,9 @@ static struct {
static void ivtv_stream_init(struct ivtv *itv, int type)
{
struct ivtv_stream *s = &itv->streams[type];
- struct video_device *vdev = s->vdev;
/* we need to keep vdev, so restore it afterwards */
memset(s, 0, sizeof(*s));
- s->vdev = vdev;
/* initialize ivtv_stream fields */
s->itv = itv;
@@ -194,10 +194,10 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
int num_offset = ivtv_stream_info[type].num_offset;
int num = itv->instance + ivtv_first_minor + num_offset;
- /* These four fields are always initialized. If vdev == NULL, then
+ /* These four fields are always initialized. If vdev.v4l2_dev == NULL, then
this stream is not in use. In that case no other fields but these
four can be used. */
- s->vdev = NULL;
+ s->vdev.v4l2_dev = NULL;
s->itv = itv;
s->type = type;
s->name = ivtv_stream_info[type].name;
@@ -218,40 +218,33 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
ivtv_stream_init(itv, type);
- /* allocate and initialize the v4l2 video device structure */
- s->vdev = video_device_alloc();
- if (s->vdev == NULL) {
- IVTV_ERR("Couldn't allocate v4l2 video_device for %s\n", s->name);
- return -ENOMEM;
- }
-
- snprintf(s->vdev->name, sizeof(s->vdev->name), "%s %s",
+ snprintf(s->vdev.name, sizeof(s->vdev.name), "%s %s",
itv->v4l2_dev.name, s->name);
- s->vdev->num = num;
- s->vdev->v4l2_dev = &itv->v4l2_dev;
+ s->vdev.num = num;
+ s->vdev.v4l2_dev = &itv->v4l2_dev;
if (ivtv_stream_info[type].v4l2_caps &
(V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_SLICED_VBI_OUTPUT))
- s->vdev->vfl_dir = VFL_DIR_TX;
- s->vdev->fops = ivtv_stream_info[type].fops;
- s->vdev->ctrl_handler = itv->v4l2_dev.ctrl_handler;
- s->vdev->release = video_device_release;
- s->vdev->tvnorms = V4L2_STD_ALL;
- s->vdev->lock = &itv->serialize_lock;
+ s->vdev.vfl_dir = VFL_DIR_TX;
+ s->vdev.fops = ivtv_stream_info[type].fops;
+ s->vdev.ctrl_handler = itv->v4l2_dev.ctrl_handler;
+ s->vdev.release = video_device_release_empty;
+ s->vdev.tvnorms = V4L2_STD_ALL;
+ s->vdev.lock = &itv->serialize_lock;
if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
- v4l2_disable_ioctl(s->vdev, VIDIOC_S_AUDIO);
- v4l2_disable_ioctl(s->vdev, VIDIOC_G_AUDIO);
- v4l2_disable_ioctl(s->vdev, VIDIOC_ENUMAUDIO);
- v4l2_disable_ioctl(s->vdev, VIDIOC_ENUMINPUT);
- v4l2_disable_ioctl(s->vdev, VIDIOC_S_INPUT);
- v4l2_disable_ioctl(s->vdev, VIDIOC_G_INPUT);
- v4l2_disable_ioctl(s->vdev, VIDIOC_S_FREQUENCY);
- v4l2_disable_ioctl(s->vdev, VIDIOC_G_FREQUENCY);
- v4l2_disable_ioctl(s->vdev, VIDIOC_S_TUNER);
- v4l2_disable_ioctl(s->vdev, VIDIOC_G_TUNER);
- v4l2_disable_ioctl(s->vdev, VIDIOC_S_STD);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_ENUMAUDIO);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_ENUMINPUT);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_S_INPUT);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_G_INPUT);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(&s->vdev, VIDIOC_S_STD);
}
- ivtv_set_funcs(s->vdev);
+ ivtv_set_funcs(&s->vdev);
return 0;
}
@@ -266,7 +259,7 @@ int ivtv_streams_setup(struct ivtv *itv)
if (ivtv_prep_dev(itv, type))
break;
- if (itv->streams[type].vdev == NULL)
+ if (itv->streams[type].vdev.v4l2_dev == NULL)
continue;
/* Allocate Stream */
@@ -277,7 +270,7 @@ int ivtv_streams_setup(struct ivtv *itv)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
- ivtv_streams_cleanup(itv, 0);
+ ivtv_streams_cleanup(itv);
return -ENOMEM;
}
@@ -288,28 +281,26 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
const char *name;
int num;
- if (s->vdev == NULL)
+ if (s->vdev.v4l2_dev == NULL)
return 0;
- num = s->vdev->num;
+ num = s->vdev.num;
/* card number + user defined offset + device offset */
if (type != IVTV_ENC_STREAM_TYPE_MPG) {
struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
- if (s_mpg->vdev)
- num = s_mpg->vdev->num + ivtv_stream_info[type].num_offset;
+ if (s_mpg->vdev.v4l2_dev)
+ num = s_mpg->vdev.num + ivtv_stream_info[type].num_offset;
}
- video_set_drvdata(s->vdev, s);
+ video_set_drvdata(&s->vdev, s);
/* Register device. First try the desired minor, then any free one. */
- if (video_register_device_no_warn(s->vdev, vfl_type, num)) {
+ if (video_register_device_no_warn(&s->vdev, vfl_type, num)) {
IVTV_ERR("Couldn't register v4l2 device for %s (device node number %d)\n",
s->name, num);
- video_device_release(s->vdev);
- s->vdev = NULL;
return -ENOMEM;
}
- name = video_device_node_name(s->vdev);
+ name = video_device_node_name(&s->vdev);
switch (vfl_type) {
case VFL_TYPE_GRABBER:
@@ -346,29 +337,25 @@ int ivtv_streams_register(struct ivtv *itv)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
- ivtv_streams_cleanup(itv, 1);
+ ivtv_streams_cleanup(itv);
return -ENOMEM;
}
/* Unregister v4l2 devices */
-void ivtv_streams_cleanup(struct ivtv *itv, int unregister)
+void ivtv_streams_cleanup(struct ivtv *itv)
{
int type;
/* Teardown all streams */
for (type = 0; type < IVTV_MAX_STREAMS; type++) {
- struct video_device *vdev = itv->streams[type].vdev;
+ struct video_device *vdev = &itv->streams[type].vdev;
- itv->streams[type].vdev = NULL;
- if (vdev == NULL)
+ if (vdev->v4l2_dev == NULL)
continue;
+ video_unregister_device(vdev);
ivtv_stream_free(&itv->streams[type]);
- /* Unregister or release device */
- if (unregister)
- video_unregister_device(vdev);
- else
- video_device_release(vdev);
+ itv->streams[type].vdev.v4l2_dev = NULL;
}
}
@@ -492,7 +479,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
int captype = 0, subtype = 0;
int enable_passthrough = 0;
- if (s->vdev == NULL)
+ if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name);
@@ -661,7 +648,7 @@ static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
u16 width;
u16 height;
- if (s->vdev == NULL)
+ if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
IVTV_DEBUG_INFO("Setting some initial decoder settings\n");
@@ -723,7 +710,7 @@ int ivtv_start_v4l2_decode_stream(struct ivtv_stream *s, int gop_offset)
struct ivtv *itv = s->itv;
int rc;
- if (s->vdev == NULL)
+ if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
if (test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags))
@@ -778,7 +765,7 @@ void ivtv_stop_all_captures(struct ivtv *itv)
for (i = IVTV_MAX_STREAMS - 1; i >= 0; i--) {
struct ivtv_stream *s = &itv->streams[i];
- if (s->vdev == NULL)
+ if (s->vdev.v4l2_dev == NULL)
continue;
if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
ivtv_stop_v4l2_encode_stream(s, 0);
@@ -793,7 +780,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
int cap_type;
int stopmode;
- if (s->vdev == NULL)
+ if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
/* This function assumes that you are allowed to stop the capture
@@ -917,7 +904,7 @@ int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
};
struct ivtv *itv = s->itv;
- if (s->vdev == NULL)
+ if (s->vdev.v4l2_dev == NULL)
return -EINVAL;
if (s->type != IVTV_DEC_STREAM_TYPE_YUV && s->type != IVTV_DEC_STREAM_TYPE_MPG)
@@ -969,7 +956,7 @@ int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
set_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags);
wake_up(&itv->event_waitq);
- v4l2_event_queue(s->vdev, &ev);
+ v4l2_event_queue(&s->vdev, &ev);
/* wake up wait queues */
wake_up(&s->waitq);
@@ -982,7 +969,7 @@ int ivtv_passthrough_mode(struct ivtv *itv, int enable)
struct ivtv_stream *yuv_stream = &itv->streams[IVTV_ENC_STREAM_TYPE_YUV];
struct ivtv_stream *dec_stream = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
- if (yuv_stream->vdev == NULL || dec_stream->vdev == NULL)
+ if (yuv_stream->vdev.v4l2_dev == NULL || dec_stream->vdev.v4l2_dev == NULL)
return -EINVAL;
IVTV_DEBUG_INFO("ivtv ioctl: Select passthrough mode\n");
diff --git a/drivers/media/pci/ivtv/ivtv-streams.h b/drivers/media/pci/ivtv/ivtv-streams.h
index a653a5136417..3d76a415fbd8 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.h
+++ b/drivers/media/pci/ivtv/ivtv-streams.h
@@ -23,7 +23,7 @@
int ivtv_streams_setup(struct ivtv *itv);
int ivtv_streams_register(struct ivtv *itv);
-void ivtv_streams_cleanup(struct ivtv *itv, int unregister);
+void ivtv_streams_cleanup(struct ivtv *itv);
/* Capture related */
int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s);
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 9d9f90cb7740..ba887e8e1b17 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1546,7 +1546,7 @@ static struct video_device meye_template = {
.name = "meye",
.fops = &meye_fops,
.ioctl_ops = &meye_ioctl_ops,
- .release = video_device_release,
+ .release = video_device_release_empty,
};
static const struct v4l2_ctrl_ops meye_ctrl_ops = {
@@ -1623,7 +1623,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
if (meye.mchip_dev != NULL) {
printk(KERN_ERR "meye: only one device allowed!\n");
- goto outnotdev;
+ return ret;
}
ret = v4l2_device_register(&pcidev->dev, v4l2_dev);
@@ -1633,11 +1633,6 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
}
ret = -ENOMEM;
meye.mchip_dev = pcidev;
- meye.vdev = video_device_alloc();
- if (!meye.vdev) {
- v4l2_err(v4l2_dev, "video_device_alloc() failed!\n");
- goto outnotdev;
- }
meye.grab_temp = vmalloc(MCHIP_NB_PAGES_MJPEG * PAGE_SIZE);
if (!meye.grab_temp) {
@@ -1658,8 +1653,8 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
goto outkfifoalloc2;
}
- memcpy(meye.vdev, &meye_template, sizeof(meye_template));
- meye.vdev->v4l2_dev = &meye.v4l2_dev;
+ meye.vdev = meye_template;
+ meye.vdev.v4l2_dev = &meye.v4l2_dev;
ret = -EIO;
if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
@@ -1743,9 +1738,9 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
}
v4l2_ctrl_handler_setup(&meye.hdl);
- meye.vdev->ctrl_handler = &meye.hdl;
+ meye.vdev.ctrl_handler = &meye.hdl;
- if (video_register_device(meye.vdev, VFL_TYPE_GRABBER,
+ if (video_register_device(&meye.vdev, VFL_TYPE_GRABBER,
video_nr) < 0) {
v4l2_err(v4l2_dev, "video_register_device failed\n");
goto outvideoreg;
@@ -1777,14 +1772,12 @@ outkfifoalloc2:
outkfifoalloc1:
vfree(meye.grab_temp);
outvmalloc:
- video_device_release(meye.vdev);
-outnotdev:
return ret;
}
static void meye_remove(struct pci_dev *pcidev)
{
- video_unregister_device(meye.vdev);
+ video_unregister_device(&meye.vdev);
mchip_hic_stop();
diff --git a/drivers/media/pci/meye/meye.h b/drivers/media/pci/meye/meye.h
index 6fed9274cfa5..751be5e533c7 100644
--- a/drivers/media/pci/meye/meye.h
+++ b/drivers/media/pci/meye/meye.h
@@ -311,7 +311,7 @@ struct meye {
struct kfifo doneq; /* queue for grabbed buffers */
spinlock_t doneq_lock; /* lock protecting the queue */
wait_queue_head_t proc_list; /* wait queue */
- struct video_device *vdev; /* video device parameters */
+ struct video_device vdev; /* video device parameters */
u16 brightness;
u16 hue;
u16 contrast;
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index 366434f5647e..03cbcd2095c6 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -66,7 +66,7 @@ struct hexium
{
int type;
- struct video_device *video_dev;
+ struct video_device video_dev;
struct i2c_adapter i2c_adapter;
int cur_input; /* current input */
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index a1eb26d11070..15f0d66ff78a 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -63,7 +63,7 @@ struct hexium_data
struct hexium
{
int type;
- struct video_device *video_dev;
+ struct video_device video_dev;
struct i2c_adapter i2c_adapter;
int cur_input; /* current input */
diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c
index c4c8fce8f2b4..0ca1e07ae783 100644
--- a/drivers/media/pci/saa7146/mxb.c
+++ b/drivers/media/pci/saa7146/mxb.c
@@ -151,8 +151,8 @@ static struct mxb_routing TEA6420_line[MXB_AUDIOS + 1][2] = {
struct mxb
{
- struct video_device *video_dev;
- struct video_device *vbi_dev;
+ struct video_device video_dev;
+ struct video_device vbi_dev;
struct i2c_adapter i2c_adapter;
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 4b0bec3766ed..9cf3c6cba498 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1436,11 +1436,11 @@ static void saa7164_finidev(struct pci_dev *pci_dev)
saa7164_i2c_unregister(&dev->i2c_bus[1]);
saa7164_i2c_unregister(&dev->i2c_bus[2]);
- pci_disable_device(pci_dev);
-
/* unregister stuff */
free_irq(pci_dev->irq, dev);
+ pci_disable_device(pci_dev);
+
mutex_lock(&devlist);
list_del(&dev->devlist);
mutex_unlock(&devlist);
diff --git a/drivers/media/pci/smipcie/Kconfig b/drivers/media/pci/smipcie/Kconfig
index c8de53f5ea28..21a1583dbd8f 100644
--- a/drivers/media/pci/smipcie/Kconfig
+++ b/drivers/media/pci/smipcie/Kconfig
@@ -4,7 +4,7 @@ config DVB_SMIPCIE
select I2C_ALGOBIT
select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_M88RS6000T if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
help
diff --git a/drivers/media/pci/smipcie/smipcie.c b/drivers/media/pci/smipcie/smipcie.c
index 36c8ed77309c..411592524c63 100644
--- a/drivers/media/pci/smipcie/smipcie.c
+++ b/drivers/media/pci/smipcie/smipcie.c
@@ -16,7 +16,7 @@
#include "smipcie.h"
#include "m88ds3103.h"
-#include "m88ts2022.h"
+#include "ts2020.h"
#include "m88rs6000t.h"
#include "si2168.h"
#include "si2157.h"
@@ -532,9 +532,7 @@ static int smi_dvbsky_m88ds3103_fe_attach(struct smi_port *port)
struct i2c_adapter *tuner_i2c_adapter;
struct i2c_client *tuner_client;
struct i2c_board_info tuner_info;
- struct m88ts2022_config m88ts2022_config = {
- .clock = 27000000,
- };
+ struct ts2020_config ts2020_config = {};
memset(&tuner_info, 0, sizeof(struct i2c_board_info));
i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
@@ -546,10 +544,10 @@ static int smi_dvbsky_m88ds3103_fe_attach(struct smi_port *port)
return ret;
}
/* attach tuner */
- m88ts2022_config.fe = port->fe;
- strlcpy(tuner_info.type, "m88ts2022", I2C_NAME_SIZE);
+ ts2020_config.fe = port->fe;
+ strlcpy(tuner_info.type, "ts2020", I2C_NAME_SIZE);
tuner_info.addr = 0x60;
- tuner_info.platform_data = &m88ts2022_config;
+ tuner_info.platform_data = &ts2020_config;
tuner_client = smi_add_i2c_client(tuner_i2c_adapter, &tuner_info);
if (!tuner_client) {
ret = -ENODEV;
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 22450f583da1..d384a6b0b09f 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -127,7 +127,7 @@ static inline struct vip_buffer *to_vip_buffer(struct vb2_buffer *vb2)
*/
struct sta2x11_vip {
struct v4l2_device v4l2_dev;
- struct video_device *video_dev;
+ struct video_device video_dev;
struct pci_dev *pdev;
struct i2c_adapter *adapter;
unsigned int register_save_area[IRQ_COUNT + SAVE_COUNT + AUX_COUNT];
@@ -763,7 +763,7 @@ static const struct v4l2_ioctl_ops vip_ioctl_ops = {
static struct video_device video_dev_template = {
.name = KBUILD_MODNAME,
- .release = video_device_release,
+ .release = video_device_release_empty,
.fops = &vip_fops,
.ioctl_ops = &vip_ioctl_ops,
.tvnorms = V4L2_STD_ALL,
@@ -1082,19 +1082,13 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
goto release_buf;
}
- /* Alloc, initialize and register video device */
- vip->video_dev = video_device_alloc();
- if (!vip->video_dev) {
- ret = -ENOMEM;
- goto release_irq;
- }
+ /* Initialize and register video device */
+ vip->video_dev = video_dev_template;
+ vip->video_dev.v4l2_dev = &vip->v4l2_dev;
+ vip->video_dev.queue = &vip->vb_vidq;
+ video_set_drvdata(&vip->video_dev, vip);
- vip->video_dev = &video_dev_template;
- vip->video_dev->v4l2_dev = &vip->v4l2_dev;
- vip->video_dev->queue = &vip->vb_vidq;
- video_set_drvdata(vip->video_dev, vip);
-
- ret = video_register_device(vip->video_dev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&vip->video_dev, VFL_TYPE_GRABBER, -1);
if (ret)
goto vrelease;
@@ -1124,13 +1118,9 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
return 0;
vunreg:
- video_set_drvdata(vip->video_dev, NULL);
+ video_set_drvdata(&vip->video_dev, NULL);
vrelease:
- if (video_is_registered(vip->video_dev))
- video_unregister_device(vip->video_dev);
- else
- video_device_release(vip->video_dev);
-release_irq:
+ video_unregister_device(&vip->video_dev);
free_irq(pdev->irq, vip);
release_buf:
sta2x11_vip_release_buffer(vip);
@@ -1175,9 +1165,8 @@ static void sta2x11_vip_remove_one(struct pci_dev *pdev)
sta2x11_vip_clear_register(vip);
- video_set_drvdata(vip->video_dev, NULL);
- video_unregister_device(vip->video_dev);
- /*do not call video_device_release() here, is already done */
+ video_set_drvdata(&vip->video_dev, NULL);
+ video_unregister_device(&vip->video_dev);
free_irq(pdev->irq, vip);
pci_disable_msi(pdev);
vb2_queue_release(&vip->vb_vidq);
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index ef3d9606b269..835635b0c712 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -102,8 +102,8 @@ struct av7110 {
struct dvb_device dvb_dev;
struct dvb_net dvb_net;
- struct video_device *v4l_dev;
- struct video_device *vbi_dev;
+ struct video_device v4l_dev;
+ struct video_device vbi_dev;
struct saa7146_dev *dev;
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 0ba3875af22e..54c9910256f8 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -68,7 +68,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct budget_av {
struct budget budget;
- struct video_device *vd;
+ struct video_device vd;
int cur_input;
int has_saa7113;
struct tasklet_struct ciintf_irq_tasklet;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index d9b872b9285a..421f53188c6c 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -56,7 +56,7 @@ config VIDEO_VIU
config VIDEO_TIMBERDALE
tristate "Support for timberdale Video In/LogiWIN"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && HAS_DMA
depends on (MFD_TIMBERDALE && TIMB_DMA) || COMPILE_TEST
select VIDEO_ADV7180
select VIDEOBUF_DMA_CONTIG
@@ -90,6 +90,7 @@ config VIDEO_OMAP3
select ARM_DMA_USE_IOMMU
select OMAP_IOMMU
select VIDEOBUF2_DMA_CONTIG
+ select MFD_SYSCON
---help---
Driver for an OMAP 3 camera controller.
@@ -117,6 +118,7 @@ source "drivers/media/platform/soc_camera/Kconfig"
source "drivers/media/platform/exynos4-is/Kconfig"
source "drivers/media/platform/s5p-tv/Kconfig"
source "drivers/media/platform/am437x/Kconfig"
+source "drivers/media/platform/xilinx/Kconfig"
endif # V4L_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 3ec154742083..8f855616c237 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -48,4 +48,6 @@ obj-y += omap/
obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
+obj-$(CONFIG_VIDEO_XILINX) += xilinx/
+
ccflags-y += -I$(srctree)/drivers/media/i2c
diff --git a/drivers/media/platform/am437x/Kconfig b/drivers/media/platform/am437x/Kconfig
index 7b023a76e32e..42d9c186710a 100644
--- a/drivers/media/platform/am437x/Kconfig
+++ b/drivers/media/platform/am437x/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_AM437X_VPFE
tristate "TI AM437x VPFE video capture driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
depends on SOC_AM43XX || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
help
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 56a5cb0d2152..a30cc2f7e4f1 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1645,6 +1645,7 @@ static int vpfe_enum_size(struct file *file, void *priv,
fse.index = fsize->index;
fse.pad = 0;
fse.code = mbus.code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
if (ret)
return -EINVAL;
@@ -1700,11 +1701,16 @@ static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
{
struct vpfe_config *cfg = vpfe->cfg;
struct vpfe_subdev_info *sdinfo;
+ struct i2c_client *client;
+ struct i2c_client *curr_client;
int i, j = 0;
+ curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd);
for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
sdinfo = &cfg->sub_devs[i];
- if (!strcmp(sdinfo->name, vpfe->current_subdev->name)) {
+ client = v4l2_get_subdevdata(sdinfo->sd);
+ if (client->addr == curr_client->addr &&
+ client->adapter->nr == client->adapter->nr) {
if (vpfe->current_input >= 1)
return -1;
*app_input_index = j + vpfe->current_input;
@@ -2296,20 +2302,10 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
- sdinfo = &vpfe->cfg->sub_devs[i];
-
- if (!strcmp(sdinfo->name, subdev->name)) {
+ if (vpfe->cfg->asd[i]->match.of.node == asd[i].match.of.node) {
+ sdinfo = &vpfe->cfg->sub_devs[i];
vpfe->sd[i] = subdev;
- vpfe_info(vpfe,
- "v4l2 sub device %s registered\n",
- subdev->name);
- vpfe->sd[i]->grp_id =
- sdinfo->grp_id;
- /* update tvnorms from the sub devices */
- for (j = 0; j < 1; j++)
- vpfe->video_dev->tvnorms |=
- sdinfo->inputs[j].std;
-
+ vpfe->sd[i]->grp_id = sdinfo->grp_id;
found = true;
break;
}
@@ -2320,6 +2316,8 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
return -EINVAL;
}
+ vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
+
/* setup the supported formats & indexes */
for (j = 0, i = 0; ; ++j) {
struct vpfe_fmt *fmt;
@@ -2327,6 +2325,7 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
memset(&mbus_code, 0, sizeof(mbus_code));
mbus_code.index = j;
+ mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
NULL, &mbus_code);
if (ret)
@@ -2390,9 +2389,9 @@ static int vpfe_probe_complete(struct vpfe_device *vpfe)
INIT_LIST_HEAD(&vpfe->dma_queue);
- vdev = vpfe->video_dev;
+ vdev = &vpfe->video_dev;
strlcpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
- vdev->release = video_device_release;
+ vdev->release = video_device_release_empty;
vdev->fops = &vpfe_fops;
vdev->ioctl_ops = &vpfe_ioctl_ops;
vdev->v4l2_dev = &vpfe->v4l2_dev;
@@ -2400,7 +2399,7 @@ static int vpfe_probe_complete(struct vpfe_device *vpfe)
vdev->queue = q;
vdev->lock = &vpfe->lock;
video_set_drvdata(vdev, vpfe);
- err = video_register_device(vpfe->video_dev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&vpfe->video_dev, VFL_TYPE_GRABBER, -1);
if (err) {
vpfe_err(vpfe,
"Unable to register video device.\n");
@@ -2425,7 +2424,7 @@ static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
static struct vpfe_config *
vpfe_get_pdata(struct platform_device *pdev)
{
- struct device_node *endpoint = NULL, *rem = NULL;
+ struct device_node *endpoint = NULL;
struct v4l2_of_endpoint bus_cfg;
struct vpfe_subdev_info *sdinfo;
struct vpfe_config *pdata;
@@ -2443,6 +2442,8 @@ vpfe_get_pdata(struct platform_device *pdev)
return NULL;
for (i = 0; ; i++) {
+ struct device_node *rem;
+
endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
endpoint);
if (!endpoint)
@@ -2497,14 +2498,17 @@ vpfe_get_pdata(struct platform_device *pdev)
goto done;
}
- strncpy(sdinfo->name, rem->name, sizeof(sdinfo->name));
-
pdata->asd[i] = devm_kzalloc(&pdev->dev,
sizeof(struct v4l2_async_subdev),
GFP_KERNEL);
+ if (!pdata->asd[i]) {
+ of_node_put(rem);
+ pdata = NULL;
+ goto done;
+ }
+
pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_OF;
pdata->asd[i]->match.of.node = rem;
- of_node_put(endpoint);
of_node_put(rem);
}
@@ -2513,7 +2517,6 @@ vpfe_get_pdata(struct platform_device *pdev)
done:
of_node_put(endpoint);
- of_node_put(rem);
return NULL;
}
@@ -2561,17 +2564,11 @@ static int vpfe_probe(struct platform_device *pdev)
return -EINVAL;
}
- vpfe->video_dev = video_device_alloc();
- if (!vpfe->video_dev) {
- dev_err(&pdev->dev, "Unable to allocate video device\n");
- return -ENOMEM;
- }
-
ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
if (ret) {
vpfe_err(vpfe,
"Unable to register v4l2 device.\n");
- goto probe_out_video_release;
+ return ret;
}
/* set the driver data in platform device */
@@ -2609,9 +2606,6 @@ static int vpfe_probe(struct platform_device *pdev)
probe_out_v4l2_unregister:
v4l2_device_unregister(&vpfe->v4l2_dev);
-probe_out_video_release:
- if (!video_is_registered(vpfe->video_dev))
- video_device_release(vpfe->video_dev);
return ret;
}
@@ -2628,7 +2622,7 @@ static int vpfe_remove(struct platform_device *pdev)
v4l2_async_notifier_unregister(&vpfe->notifier);
v4l2_device_unregister(&vpfe->v4l2_dev);
- video_unregister_device(vpfe->video_dev);
+ video_unregister_device(&vpfe->video_dev);
return 0;
}
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
index 0f557352313d..5bfb35649a39 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -83,7 +83,6 @@ struct vpfe_route {
};
struct vpfe_subdev_info {
- char name[32];
/* Sub device group id */
int grp_id;
/* inputs available at the sub device */
@@ -223,7 +222,7 @@ struct vpfe_ccdc {
struct vpfe_device {
/* V4l2 specific parameters */
/* Identifies video device for this channel */
- struct video_device *video_dev;
+ struct video_device video_dev;
/* sub devices */
struct v4l2_subdev **sd;
/* vpfe cfg */
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 8f6698668ecf..6a437f86dcdc 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -44,7 +44,6 @@
#include <media/blackfin/ppi.h>
#define CAPTURE_DRV_NAME "bfin_capture"
-#define BCAP_MIN_NUM_BUF 2
struct bcap_format {
char *desc;
@@ -65,7 +64,7 @@ struct bcap_device {
/* v4l2 control handler */
struct v4l2_ctrl_handler ctrl_handler;
/* device node data */
- struct video_device *video_dev;
+ struct video_device video_dev;
/* sub device instance */
struct v4l2_subdev *sd;
/* capture config */
@@ -104,12 +103,8 @@ struct bcap_device {
struct completion comp;
/* prepare to stop */
bool stop;
-};
-
-struct bcap_fh {
- struct v4l2_fh fh;
- /* indicates whether this file handle is doing IO */
- bool io_allowed;
+ /* vb2 buffer sequence counter */
+ unsigned sequence;
};
static const struct bcap_format bcap_formats[] = {
@@ -201,90 +196,6 @@ static void bcap_free_sensor_formats(struct bcap_device *bcap_dev)
bcap_dev->sensor_formats = NULL;
}
-static int bcap_open(struct file *file)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
- struct video_device *vfd = bcap_dev->video_dev;
- struct bcap_fh *bcap_fh;
-
- if (!bcap_dev->sd) {
- v4l2_err(&bcap_dev->v4l2_dev, "No sub device registered\n");
- return -ENODEV;
- }
-
- bcap_fh = kzalloc(sizeof(*bcap_fh), GFP_KERNEL);
- if (!bcap_fh) {
- v4l2_err(&bcap_dev->v4l2_dev,
- "unable to allocate memory for file handle object\n");
- return -ENOMEM;
- }
-
- v4l2_fh_init(&bcap_fh->fh, vfd);
-
- /* store pointer to v4l2_fh in private_data member of file */
- file->private_data = &bcap_fh->fh;
- v4l2_fh_add(&bcap_fh->fh);
- bcap_fh->io_allowed = false;
- return 0;
-}
-
-static int bcap_release(struct file *file)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
- struct v4l2_fh *fh = file->private_data;
- struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
-
- /* if this instance is doing IO */
- if (bcap_fh->io_allowed)
- vb2_queue_release(&bcap_dev->buffer_queue);
-
- file->private_data = NULL;
- v4l2_fh_del(&bcap_fh->fh);
- v4l2_fh_exit(&bcap_fh->fh);
- kfree(bcap_fh);
- return 0;
-}
-
-static int bcap_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
- int ret;
-
- if (mutex_lock_interruptible(&bcap_dev->mutex))
- return -ERESTARTSYS;
- ret = vb2_mmap(&bcap_dev->buffer_queue, vma);
- mutex_unlock(&bcap_dev->mutex);
- return ret;
-}
-
-#ifndef CONFIG_MMU
-static unsigned long bcap_get_unmapped_area(struct file *file,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
-
- return vb2_get_unmapped_area(&bcap_dev->buffer_queue,
- addr,
- len,
- pgoff,
- flags);
-}
-#endif
-
-static unsigned int bcap_poll(struct file *file, poll_table *wait)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
- unsigned int res;
-
- mutex_lock(&bcap_dev->mutex);
- res = vb2_poll(&bcap_dev->buffer_queue, file, wait);
- mutex_unlock(&bcap_dev->mutex);
- return res;
-}
-
static int bcap_queue_setup(struct vb2_queue *vq,
const struct v4l2_format *fmt,
unsigned int *nbuffers, unsigned int *nplanes,
@@ -292,37 +203,32 @@ static int bcap_queue_setup(struct vb2_queue *vq,
{
struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
- if (*nbuffers < BCAP_MIN_NUM_BUF)
- *nbuffers = BCAP_MIN_NUM_BUF;
+ if (fmt && fmt->fmt.pix.sizeimage < bcap_dev->fmt.sizeimage)
+ return -EINVAL;
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2;
*nplanes = 1;
- sizes[0] = bcap_dev->fmt.sizeimage;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : bcap_dev->fmt.sizeimage;
alloc_ctxs[0] = bcap_dev->alloc_ctx;
return 0;
}
-static int bcap_buffer_init(struct vb2_buffer *vb)
-{
- struct bcap_buffer *buf = to_bcap_vb(vb);
-
- INIT_LIST_HEAD(&buf->list);
- return 0;
-}
-
static int bcap_buffer_prepare(struct vb2_buffer *vb)
{
struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
- struct bcap_buffer *buf = to_bcap_vb(vb);
- unsigned long size;
+ unsigned long size = bcap_dev->fmt.sizeimage;
- size = bcap_dev->fmt.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
v4l2_err(&bcap_dev->v4l2_dev, "buffer too small (%lu < %lu)\n",
vb2_plane_size(vb, 0), size);
return -EINVAL;
}
- vb2_set_plane_payload(&buf->vb, 0, size);
+ vb2_set_plane_payload(vb, 0, size);
+
+ vb->v4l2_buf.field = bcap_dev->fmt.field;
return 0;
}
@@ -353,14 +259,16 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
struct ppi_if *ppi = bcap_dev->ppi;
+ struct bcap_buffer *buf, *tmp;
struct ppi_params params;
+ dma_addr_t addr;
int ret;
/* enable streamon on the sub device */
ret = v4l2_subdev_call(bcap_dev->sd, video, s_stream, 1);
if (ret && (ret != -ENOIOCTLCMD)) {
v4l2_err(&bcap_dev->v4l2_dev, "stream on failed in subdev\n");
- return ret;
+ goto err;
}
/* set ppi params */
@@ -399,7 +307,7 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret < 0) {
v4l2_err(&bcap_dev->v4l2_dev,
"Error in setting ppi params\n");
- return ret;
+ goto err;
}
/* attach ppi DMA irq handler */
@@ -407,12 +315,34 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret < 0) {
v4l2_err(&bcap_dev->v4l2_dev,
"Error in attaching interrupt handler\n");
- return ret;
+ goto err;
}
+ bcap_dev->sequence = 0;
+
reinit_completion(&bcap_dev->comp);
bcap_dev->stop = false;
+
+ /* get the next frame from the dma queue */
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
+ struct bcap_buffer, list);
+ /* remove buffer from the dma queue */
+ list_del_init(&bcap_dev->cur_frm->list);
+ addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+ /* update DMA address */
+ ppi->ops->update_addr(ppi, (unsigned long)addr);
+ /* enable ppi */
+ ppi->ops->start(ppi);
+
return 0;
+
+err:
+ list_for_each_entry_safe(buf, tmp, &bcap_dev->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+
+ return ret;
}
static void bcap_stop_streaming(struct vb2_queue *vq)
@@ -431,6 +361,9 @@ static void bcap_stop_streaming(struct vb2_queue *vq)
"stream off failed in subdev\n");
/* release all active buffers */
+ if (bcap_dev->cur_frm)
+ vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR);
+
while (!list_empty(&bcap_dev->dma_queue)) {
bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
struct bcap_buffer, list);
@@ -441,7 +374,6 @@ static void bcap_stop_streaming(struct vb2_queue *vq)
static struct vb2_ops bcap_video_qops = {
.queue_setup = bcap_queue_setup,
- .buf_init = bcap_buffer_init,
.buf_prepare = bcap_buffer_prepare,
.buf_cleanup = bcap_buffer_cleanup,
.buf_queue = bcap_buffer_queue,
@@ -451,57 +383,6 @@ static struct vb2_ops bcap_video_qops = {
.stop_streaming = bcap_stop_streaming,
};
-static int bcap_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *req_buf)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
- struct vb2_queue *vq = &bcap_dev->buffer_queue;
- struct v4l2_fh *fh = file->private_data;
- struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
-
- if (vb2_is_busy(vq))
- return -EBUSY;
-
- bcap_fh->io_allowed = true;
-
- return vb2_reqbufs(vq, req_buf);
-}
-
-static int bcap_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
-
- return vb2_querybuf(&bcap_dev->buffer_queue, buf);
-}
-
-static int bcap_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
- struct v4l2_fh *fh = file->private_data;
- struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
-
- if (!bcap_fh->io_allowed)
- return -EBUSY;
-
- return vb2_qbuf(&bcap_dev->buffer_queue, buf);
-}
-
-static int bcap_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
- struct v4l2_fh *fh = file->private_data;
- struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
-
- if (!bcap_fh->io_allowed)
- return -EBUSY;
-
- return vb2_dqbuf(&bcap_dev->buffer_queue,
- buf, file->f_flags & O_NONBLOCK);
-}
-
static irqreturn_t bcap_isr(int irq, void *dev_id)
{
struct ppi_if *ppi = dev_id;
@@ -517,6 +398,7 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
ppi->err = false;
} else {
+ vb->v4l2_buf.sequence = bcap_dev->sequence++;
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
}
bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
@@ -543,62 +425,14 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int bcap_streamon(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
- struct bcap_fh *fh = file->private_data;
- struct ppi_if *ppi = bcap_dev->ppi;
- dma_addr_t addr;
- int ret;
-
- if (!fh->io_allowed)
- return -EBUSY;
-
- /* call streamon to start streaming in videobuf */
- ret = vb2_streamon(&bcap_dev->buffer_queue, buf_type);
- if (ret)
- return ret;
-
- /* if dma queue is empty, return error */
- if (list_empty(&bcap_dev->dma_queue)) {
- v4l2_err(&bcap_dev->v4l2_dev, "dma queue is empty\n");
- ret = -EINVAL;
- goto err;
- }
-
- /* get the next frame from the dma queue */
- bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
- struct bcap_buffer, list);
- /* remove buffer from the dma queue */
- list_del_init(&bcap_dev->cur_frm->list);
- addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
- /* update DMA address */
- ppi->ops->update_addr(ppi, (unsigned long)addr);
- /* enable ppi */
- ppi->ops->start(ppi);
-
- return 0;
-err:
- vb2_streamoff(&bcap_dev->buffer_queue, buf_type);
- return ret;
-}
-
-static int bcap_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct bcap_device *bcap_dev = video_drvdata(file);
- struct bcap_fh *fh = file->private_data;
-
- if (!fh->io_allowed)
- return -EBUSY;
-
- return vb2_streamoff(&bcap_dev->buffer_queue, buf_type);
-}
-
static int bcap_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_input input;
+
+ input = bcap_dev->cfg->inputs[bcap_dev->cur_input];
+ if (!(input.capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
return v4l2_subdev_call(bcap_dev->sd, video, querystd, std);
}
@@ -606,6 +440,11 @@ static int bcap_querystd(struct file *file, void *priv, v4l2_std_id *std)
static int bcap_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_input input;
+
+ input = bcap_dev->cfg->inputs[bcap_dev->cur_input];
+ if (!(input.capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
*std = bcap_dev->std;
return 0;
@@ -614,8 +453,13 @@ static int bcap_g_std(struct file *file, void *priv, v4l2_std_id *std)
static int bcap_s_std(struct file *file, void *priv, v4l2_std_id std)
{
struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_input input;
int ret;
+ input = bcap_dev->cfg->inputs[bcap_dev->cur_input];
+ if (!(input.capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
+
if (vb2_is_busy(&bcap_dev->buffer_queue))
return -EBUSY;
@@ -631,6 +475,11 @@ static int bcap_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_input input;
+
+ input = bcap_dev->cfg->inputs[bcap_dev->cur_input];
+ if (!(input.capabilities & V4L2_IN_CAP_DV_TIMINGS))
+ return -ENODATA;
timings->pad = 0;
@@ -642,6 +491,11 @@ static int bcap_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_input input;
+
+ input = bcap_dev->cfg->inputs[bcap_dev->cur_input];
+ if (!(input.capabilities & V4L2_IN_CAP_DV_TIMINGS))
+ return -ENODATA;
return v4l2_subdev_call(bcap_dev->sd, video,
query_dv_timings, timings);
@@ -651,6 +505,11 @@ static int bcap_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_input input;
+
+ input = bcap_dev->cfg->inputs[bcap_dev->cur_input];
+ if (!(input.capabilities & V4L2_IN_CAP_DV_TIMINGS))
+ return -ENODATA;
*timings = bcap_dev->dv_timings;
return 0;
@@ -660,7 +519,13 @@ static int bcap_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct bcap_device *bcap_dev = video_drvdata(file);
+ struct v4l2_input input;
int ret;
+
+ input = bcap_dev->cfg->inputs[bcap_dev->cur_input];
+ if (!(input.capabilities & V4L2_IN_CAP_DV_TIMINGS))
+ return -ENODATA;
+
if (vb2_is_busy(&bcap_dev->buffer_queue))
return -EBUSY;
@@ -881,12 +746,14 @@ static const struct v4l2_ioctl_ops bcap_ioctl_ops = {
.vidioc_g_dv_timings = bcap_g_dv_timings,
.vidioc_query_dv_timings = bcap_query_dv_timings,
.vidioc_enum_dv_timings = bcap_enum_dv_timings,
- .vidioc_reqbufs = bcap_reqbufs,
- .vidioc_querybuf = bcap_querybuf,
- .vidioc_qbuf = bcap_qbuf,
- .vidioc_dqbuf = bcap_dqbuf,
- .vidioc_streamon = bcap_streamon,
- .vidioc_streamoff = bcap_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_parm = bcap_g_parm,
.vidioc_s_parm = bcap_s_parm,
.vidioc_log_status = bcap_log_status,
@@ -894,14 +761,14 @@ static const struct v4l2_ioctl_ops bcap_ioctl_ops = {
static struct v4l2_file_operations bcap_fops = {
.owner = THIS_MODULE,
- .open = bcap_open,
- .release = bcap_release,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
.unlocked_ioctl = video_ioctl2,
- .mmap = bcap_mmap,
+ .mmap = vb2_fop_mmap,
#ifndef CONFIG_MMU
- .get_unmapped_area = bcap_get_unmapped_area,
+ .get_unmapped_area = vb2_fop_get_unmapped_area,
#endif
- .poll = bcap_poll
+ .poll = vb2_fop_poll
};
static int bcap_probe(struct platform_device *pdev)
@@ -942,27 +809,20 @@ static int bcap_probe(struct platform_device *pdev)
goto err_free_ppi;
}
- vfd = video_device_alloc();
- if (!vfd) {
- ret = -ENOMEM;
- v4l2_err(pdev->dev.driver, "Unable to alloc video device\n");
- goto err_cleanup_ctx;
- }
-
+ vfd = &bcap_dev->video_dev;
/* initialize field of video device */
- vfd->release = video_device_release;
+ vfd->release = video_device_release_empty;
vfd->fops = &bcap_fops;
vfd->ioctl_ops = &bcap_ioctl_ops;
vfd->tvnorms = 0;
vfd->v4l2_dev = &bcap_dev->v4l2_dev;
strncpy(vfd->name, CAPTURE_DRV_NAME, sizeof(vfd->name));
- bcap_dev->video_dev = vfd;
ret = v4l2_device_register(&pdev->dev, &bcap_dev->v4l2_dev);
if (ret) {
v4l2_err(pdev->dev.driver,
"Unable to register v4l2 device\n");
- goto err_release_vdev;
+ goto err_cleanup_ctx;
}
v4l2_info(&bcap_dev->v4l2_dev, "v4l2 device registered\n");
@@ -978,13 +838,14 @@ static int bcap_probe(struct platform_device *pdev)
/* initialize queue */
q = &bcap_dev->buffer_queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
q->drv_priv = bcap_dev;
q->buf_struct_size = sizeof(struct bcap_buffer);
q->ops = &bcap_video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &bcap_dev->mutex;
+ q->min_buffers_needed = 1;
ret = vb2_queue_init(q);
if (ret)
@@ -997,15 +858,16 @@ static int bcap_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&bcap_dev->dma_queue);
vfd->lock = &bcap_dev->mutex;
+ vfd->queue = q;
/* register video device */
- ret = video_register_device(bcap_dev->video_dev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&bcap_dev->video_dev, VFL_TYPE_GRABBER, -1);
if (ret) {
v4l2_err(&bcap_dev->v4l2_dev,
"Unable to register video device\n");
goto err_free_handler;
}
- video_set_drvdata(bcap_dev->video_dev, bcap_dev);
+ video_set_drvdata(&bcap_dev->video_dev, bcap_dev);
v4l2_info(&bcap_dev->v4l2_dev, "video device registered as: %s\n",
video_device_node_name(vfd));
@@ -1083,15 +945,11 @@ static int bcap_probe(struct platform_device *pdev)
}
return 0;
err_unreg_vdev:
- video_unregister_device(bcap_dev->video_dev);
- bcap_dev->video_dev = NULL;
+ video_unregister_device(&bcap_dev->video_dev);
err_free_handler:
v4l2_ctrl_handler_free(&bcap_dev->ctrl_handler);
err_unreg_v4l2:
v4l2_device_unregister(&bcap_dev->v4l2_dev);
-err_release_vdev:
- if (bcap_dev->video_dev)
- video_device_release(bcap_dev->video_dev);
err_cleanup_ctx:
vb2_dma_contig_cleanup_ctx(bcap_dev->alloc_ctx);
err_free_ppi:
@@ -1108,7 +966,7 @@ static int bcap_remove(struct platform_device *pdev)
struct bcap_device, v4l2_dev);
bcap_free_sensor_formats(bcap_dev);
- video_unregister_device(bcap_dev->video_dev);
+ video_unregister_device(&bcap_dev->video_dev);
v4l2_ctrl_handler_free(&bcap_dev->ctrl_handler);
v4l2_device_unregister(v4l2_dev);
vb2_dma_contig_cleanup_ctx(bcap_dev->alloc_ctx);
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
index 25ce15561695..834e504bf085 100644
--- a/drivers/media/platform/coda/Makefile
+++ b/drivers/media/platform/coda/Makefile
@@ -1,3 +1,5 @@
+ccflags-y += -I$(src)
+
coda-objs := coda-common.o coda-bit.o coda-h264.o coda-jpeg.o
obj-$(CONFIG_VIDEO_CODA) += coda.o
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 856b542b35b9..d0430071d2ee 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -29,13 +30,18 @@
#include <media/videobuf2-vmalloc.h>
#include "coda.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+#define CODA_PARA_BUF_SIZE (10 * 1024)
#define CODA7_PS_BUF_SIZE 0x28000
#define CODA9_PS_SAVE_SIZE (512 * 1024)
#define CODA_DEFAULT_GAMMA 4096
#define CODA9_DEFAULT_GAMMA 24576 /* 0.75 * 32768 */
+static void coda_free_bitstream_buffer(struct coda_ctx *ctx);
+
static inline int coda_is_initialized(struct coda_dev *dev)
{
return coda_read(dev, CODA_REG_BIT_CUR_PC) != 0;
@@ -84,15 +90,21 @@ static void coda_command_async(struct coda_ctx *ctx, int cmd)
coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD);
+ trace_coda_bit_run(ctx, cmd);
+
coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
}
static int coda_command_sync(struct coda_ctx *ctx, int cmd)
{
struct coda_dev *dev = ctx->dev;
+ int ret;
coda_command_async(ctx, cmd);
- return coda_wait_timeout(dev);
+ ret = coda_wait_timeout(dev);
+ trace_coda_bit_done(ctx);
+
+ return ret;
}
int coda_hw_reset(struct coda_ctx *ctx)
@@ -177,10 +189,6 @@ static int coda_bitstream_queue(struct coda_ctx *ctx,
if (n < src_size)
return -ENOSPC;
- dma_sync_single_for_device(&ctx->dev->plat_dev->dev,
- ctx->bitstream.paddr, ctx->bitstream.size,
- DMA_TO_DEVICE);
-
src_buf->v4l2_buf.sequence = ctx->qsequence++;
return 0;
@@ -214,7 +222,7 @@ static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
return true;
}
-void coda_fill_bitstream(struct coda_ctx *ctx)
+void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
{
struct vb2_buffer *src_buf;
struct coda_buffer_meta *meta;
@@ -235,9 +243,12 @@ void coda_fill_bitstream(struct coda_ctx *ctx)
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
!coda_jpeg_check_buffer(ctx, src_buf)) {
v4l2_err(&ctx->dev->v4l2_dev,
- "dropping invalid JPEG frame\n");
+ "dropping invalid JPEG frame %d\n",
+ ctx->qsequence);
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(src_buf, streaming ?
+ VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_QUEUED);
continue;
}
@@ -262,6 +273,8 @@ void coda_fill_bitstream(struct coda_ctx *ctx)
ctx->bitstream_fifo.kfifo.mask;
list_add_tail(&meta->list,
&ctx->buffer_meta_list);
+
+ trace_coda_bit_queue(ctx, src_buf, meta);
}
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
@@ -297,6 +310,14 @@ static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
p[index ^ 1] = value;
}
+static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
+ struct coda_aux_buf *buf, size_t size,
+ const char *name)
+{
+ return coda_alloc_aux_buf(ctx->dev, buf, size, name, ctx->debugfs_entry);
+}
+
+
static void coda_free_framebuffers(struct coda_ctx *ctx)
{
int i;
@@ -377,6 +398,7 @@ static void coda_free_context_buffers(struct coda_ctx *ctx)
coda_free_aux_buf(dev, &ctx->psbuf);
if (dev->devtype->product != CODA_DX6)
coda_free_aux_buf(dev, &ctx->workbuf);
+ coda_free_aux_buf(dev, &ctx->parabuf);
}
static int coda_alloc_context_buffers(struct coda_ctx *ctx,
@@ -386,57 +408,42 @@ static int coda_alloc_context_buffers(struct coda_ctx *ctx,
size_t size;
int ret;
+ if (!ctx->parabuf.vaddr) {
+ ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
+ CODA_PARA_BUF_SIZE, "parabuf");
+ if (ret < 0)
+ return ret;
+ }
+
if (dev->devtype->product == CODA_DX6)
return 0;
- if (ctx->psbuf.vaddr) {
- v4l2_err(&dev->v4l2_dev, "psmembuf still allocated\n");
- return -EBUSY;
- }
- if (ctx->slicebuf.vaddr) {
- v4l2_err(&dev->v4l2_dev, "slicebuf still allocated\n");
- return -EBUSY;
- }
- if (ctx->workbuf.vaddr) {
- v4l2_err(&dev->v4l2_dev, "context buffer still allocated\n");
- ret = -EBUSY;
- return -ENOMEM;
- }
-
- if (q_data->fourcc == V4L2_PIX_FMT_H264) {
+ if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) {
/* worst case slice size */
size = (DIV_ROUND_UP(q_data->width, 16) *
DIV_ROUND_UP(q_data->height, 16)) * 3200 / 8 + 512;
ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size,
"slicebuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev,
- "failed to allocate %d byte slice buffer",
- ctx->slicebuf.size);
- return ret;
- }
+ if (ret < 0)
+ goto err;
}
- if (dev->devtype->product == CODA_7541) {
+ if (!ctx->psbuf.vaddr && dev->devtype->product == CODA_7541) {
ret = coda_alloc_context_buf(ctx, &ctx->psbuf,
CODA7_PS_BUF_SIZE, "psbuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev,
- "failed to allocate psmem buffer");
+ if (ret < 0)
goto err;
- }
}
- size = dev->devtype->workbuf_size;
- if (dev->devtype->product == CODA_960 &&
- q_data->fourcc == V4L2_PIX_FMT_H264)
- size += CODA9_PS_SAVE_SIZE;
- ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size, "workbuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev,
- "failed to allocate %d byte context buffer",
- ctx->workbuf.size);
- goto err;
+ if (!ctx->workbuf.vaddr) {
+ size = dev->devtype->workbuf_size;
+ if (dev->devtype->product == CODA_960 &&
+ q_data->fourcc == V4L2_PIX_FMT_H264)
+ size += CODA9_PS_SAVE_SIZE;
+ ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size,
+ "workbuf");
+ if (ret < 0)
+ goto err;
}
return 0;
@@ -709,6 +716,27 @@ err_clk_per:
* Encoder context operations
*/
+static int coda_encoder_reqbufs(struct coda_ctx *ctx,
+ struct v4l2_requestbuffers *rb)
+{
+ struct coda_q_data *q_data_src;
+ int ret;
+
+ if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return 0;
+
+ if (rb->count) {
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ ret = coda_alloc_context_buffers(ctx, q_data_src);
+ if (ret < 0)
+ return ret;
+ } else {
+ coda_free_context_buffers(ctx);
+ }
+
+ return 0;
+}
+
static int coda_start_encoding(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
@@ -725,11 +753,6 @@ static int coda_start_encoding(struct coda_ctx *ctx)
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
dst_fourcc = q_data_dst->fourcc;
- /* Allocate per-instance buffers */
- ret = coda_alloc_context_buffers(ctx, q_data_src);
- if (ret < 0)
- return ret;
-
buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0);
bitstream_size = q_data_dst->sizeimage;
@@ -1227,6 +1250,8 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
coda_write(dev, ctx->iram_info.axi_sram_use,
CODA7_REG_BIT_AXI_SRAM_USE);
+ trace_coda_enc_pic_run(ctx, src_buf);
+
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
return 0;
@@ -1241,6 +1266,8 @@ static void coda_finish_encode(struct coda_ctx *ctx)
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ trace_coda_enc_pic_done(ctx, dst_buf);
+
/* Get results from the coda */
start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
@@ -1311,7 +1338,6 @@ static void coda_seq_end_work(struct work_struct *work)
ctx->bitstream.vaddr, ctx->bitstream.size);
coda_free_framebuffers(ctx);
- coda_free_context_buffers(ctx);
mutex_unlock(&dev->coda_mutex);
mutex_unlock(&ctx->buffer_mutex);
@@ -1322,11 +1348,13 @@ static void coda_bit_release(struct coda_ctx *ctx)
mutex_lock(&ctx->buffer_mutex);
coda_free_framebuffers(ctx);
coda_free_context_buffers(ctx);
+ coda_free_bitstream_buffer(ctx);
mutex_unlock(&ctx->buffer_mutex);
}
const struct coda_context_ops coda_bit_encode_ops = {
.queue_init = coda_encoder_queue_init,
+ .reqbufs = coda_encoder_reqbufs,
.start_streaming = coda_start_encoding,
.prepare_run = coda_prepare_encode,
.finish_run = coda_finish_encode,
@@ -1338,6 +1366,65 @@ const struct coda_context_ops coda_bit_encode_ops = {
* Decoder context operations
*/
+static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx,
+ struct coda_q_data *q_data)
+{
+ if (ctx->bitstream.vaddr)
+ return 0;
+
+ ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2);
+ ctx->bitstream.vaddr = dma_alloc_writecombine(
+ &ctx->dev->plat_dev->dev, ctx->bitstream.size,
+ &ctx->bitstream.paddr, GFP_KERNEL);
+ if (!ctx->bitstream.vaddr) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "failed to allocate bitstream ringbuffer");
+ return -ENOMEM;
+ }
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+
+ return 0;
+}
+
+static void coda_free_bitstream_buffer(struct coda_ctx *ctx)
+{
+ if (ctx->bitstream.vaddr == NULL)
+ return;
+
+ dma_free_writecombine(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
+ ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ ctx->bitstream.vaddr = NULL;
+ kfifo_init(&ctx->bitstream_fifo, NULL, 0);
+}
+
+static int coda_decoder_reqbufs(struct coda_ctx *ctx,
+ struct v4l2_requestbuffers *rb)
+{
+ struct coda_q_data *q_data_src;
+ int ret;
+
+ if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return 0;
+
+ if (rb->count) {
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ ret = coda_alloc_context_buffers(ctx, q_data_src);
+ if (ret < 0)
+ return ret;
+ ret = coda_alloc_bitstream_buffer(ctx, q_data_src);
+ if (ret < 0) {
+ coda_free_context_buffers(ctx);
+ return ret;
+ }
+ } else {
+ coda_free_bitstream_buffer(ctx);
+ coda_free_context_buffers(ctx);
+ }
+
+ return 0;
+}
+
static int __coda_start_decoding(struct coda_ctx *ctx)
{
struct coda_q_data *q_data_src, *q_data_dst;
@@ -1356,11 +1443,6 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
src_fourcc = q_data_src->fourcc;
dst_fourcc = q_data_dst->fourcc;
- /* Allocate per-instance buffers */
- ret = coda_alloc_context_buffers(ctx, q_data_src);
- if (ret < 0)
- return ret;
-
coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
/* Update coda bitstream read and write pointers from kfifo */
@@ -1579,7 +1661,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
/* Try to copy source buffer contents into the bitstream ringbuffer */
mutex_lock(&ctx->bitstream_mutex);
- coda_fill_bitstream(ctx);
+ coda_fill_bitstream(ctx, true);
mutex_unlock(&ctx->bitstream_mutex);
if (coda_get_bitstream_payload(ctx) < 512 &&
@@ -1675,6 +1757,8 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
/* Clear decode success flag */
coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
+ trace_coda_dec_pic_run(ctx, meta);
+
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
return 0;
@@ -1704,7 +1788,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
* by up to 512 bytes
*/
if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) {
- if (coda_get_bitstream_payload(ctx) >= CODA_MAX_FRAME_SIZE - 512)
+ if (coda_get_bitstream_payload(ctx) >= ctx->bitstream.size - 512)
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
}
@@ -1835,6 +1919,8 @@ static void coda_finish_decode(struct coda_ctx *ctx)
}
mutex_unlock(&ctx->bitstream_mutex);
+ trace_coda_dec_pic_done(ctx, &ctx->frame_metas[decoded_idx]);
+
val = coda_read(dev, CODA_RET_DEC_PIC_TYPE) & 0x7;
if (val == 0)
ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_KEYFRAME;
@@ -1874,6 +1960,8 @@ static void coda_finish_decode(struct coda_ctx *ctx)
dst_buf->v4l2_buf.timecode = meta->timecode;
dst_buf->v4l2_buf.timestamp = meta->timestamp;
+ trace_coda_dec_rot_done(ctx, meta, dst_buf);
+
switch (q_data_dst->fourcc) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
@@ -1906,6 +1994,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
const struct coda_context_ops coda_bit_decode_ops = {
.queue_init = coda_decoder_queue_init,
+ .reqbufs = coda_decoder_reqbufs,
.start_streaming = coda_start_decoding,
.prepare_run = coda_prepare_decode,
.finish_run = coda_finish_decode,
@@ -1931,6 +2020,8 @@ irqreturn_t coda_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+ trace_coda_bit_done(ctx);
+
if (ctx->aborting) {
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"task has been aborted\n");
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 6f32e6d6b156..8e6fe0200117 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -46,7 +46,6 @@
#define CODADX6_MAX_INSTANCES 4
#define CODA_MAX_FORMATS 4
-#define CODA_PARA_BUF_SIZE (10 * 1024)
#define CODA_ISRAM_SIZE (2048 * 2)
#define MIN_W 176
@@ -696,6 +695,26 @@ static int coda_s_fmt_vid_out(struct file *file, void *priv,
return coda_s_fmt(ctx, &f_cap);
}
+static int coda_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb);
+ if (ret)
+ return ret;
+
+ /*
+ * Allow to allocate instance specific per-context buffers, such as
+ * bitstream ringbuffer, slice buffer, work buffer, etc. if needed.
+ */
+ if (rb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && ctx->ops->reqbufs)
+ return ctx->ops->reqbufs(ctx, rb);
+
+ return 0;
+}
+
static int coda_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
@@ -841,7 +860,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_try_fmt_vid_out = coda_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = coda_s_fmt_vid_out,
- .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_reqbufs = coda_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = coda_qbuf,
@@ -1173,7 +1192,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
mutex_lock(&ctx->bitstream_mutex);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
if (vb2_is_streaming(vb->vb2_queue))
- coda_fill_bitstream(ctx);
+ coda_fill_bitstream(ctx, true);
mutex_unlock(&ctx->bitstream_mutex);
} else {
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
@@ -1215,8 +1234,9 @@ void coda_free_aux_buf(struct coda_dev *dev,
buf->vaddr, buf->paddr);
buf->vaddr = NULL;
buf->size = 0;
+ debugfs_remove(buf->dentry);
+ buf->dentry = NULL;
}
- debugfs_remove(buf->dentry);
}
static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
@@ -1232,9 +1252,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
if (q_data_src->fourcc == V4L2_PIX_FMT_H264 ||
(q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
ctx->dev->devtype->product == CODA_7541)) {
- /* copy the buffers that where queued before streamon */
+ /* copy the buffers that were queued before streamon */
mutex_lock(&ctx->bitstream_mutex);
- coda_fill_bitstream(ctx);
+ coda_fill_bitstream(ctx, false);
mutex_unlock(&ctx->bitstream_mutex);
if (coda_get_bitstream_payload(ctx) < 512) {
@@ -1262,12 +1282,23 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
if (!(ctx->streamon_out & ctx->streamon_cap))
return 0;
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if ((q_data_src->width != q_data_dst->width &&
+ round_up(q_data_src->width, 16) != q_data_dst->width) ||
+ (q_data_src->height != q_data_dst->height &&
+ round_up(q_data_src->height, 16) != q_data_dst->height)) {
+ v4l2_err(v4l2_dev, "can't convert %dx%d to %dx%d\n",
+ q_data_src->width, q_data_src->height,
+ q_data_dst->width, q_data_dst->height);
+ ret = -EINVAL;
+ goto err;
+ }
+
/* Allow BIT decoder device_run with no new buffers queued */
if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true);
ctx->gopcounter = ctx->params.gop_size - 1;
- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
q_data_dst->fourcc);
@@ -1308,6 +1339,9 @@ static void coda_stop_streaming(struct vb2_queue *q)
struct coda_ctx *ctx = vb2_get_drv_priv(q);
struct coda_dev *dev = ctx->dev;
struct vb2_buffer *buf;
+ bool stop;
+
+ stop = ctx->streamon_out && ctx->streamon_cap;
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
@@ -1332,7 +1366,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
}
- if (!ctx->streamon_out && !ctx->streamon_cap) {
+ if (stop) {
struct coda_buffer_meta *meta;
if (ctx->ops->seq_end_work) {
@@ -1457,7 +1491,7 @@ static const struct v4l2_ctrl_ops coda_ctrl_ops = {
static void coda_encode_ctrls(struct coda_ctx *ctx)
{
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1, 0);
+ V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1000, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 60, 1, 16);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
@@ -1541,6 +1575,13 @@ static int coda_queue_init(struct coda_ctx *ctx, struct vb2_queue *vq)
vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
vq->lock = &ctx->dev->dev_mutex;
+ /* One way to indicate end-of-stream for coda is to set the
+ * bytesused == 0. However by default videobuf2 handles bytesused
+ * equal to 0 as a special case and changes its value to the size
+ * of the buffer. Set the allow_zero_bytesused flag, so
+ * that videobuf2 will keep the value of bytesused intact.
+ */
+ vq->allow_zero_bytesused = 1;
return vb2_queue_init(vq);
}
@@ -1621,6 +1662,11 @@ static int coda_open(struct file *file)
set_bit(idx, &dev->instance_mask);
name = kasprintf(GFP_KERNEL, "context%d", idx);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_coda_name_init;
+ }
+
ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root);
kfree(name);
@@ -1682,28 +1728,6 @@ static int coda_open(struct file *file)
ctx->fh.ctrl_handler = &ctx->ctrls;
- if (ctx->use_bit) {
- ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
- CODA_PARA_BUF_SIZE, "parabuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
- goto err_dma_alloc;
- }
- }
- if (ctx->use_bit && ctx->inst_type == CODA_INST_DECODER) {
- ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
- ctx->bitstream.vaddr = dma_alloc_writecombine(
- &dev->plat_dev->dev, ctx->bitstream.size,
- &ctx->bitstream.paddr, GFP_KERNEL);
- if (!ctx->bitstream.vaddr) {
- v4l2_err(&dev->v4l2_dev,
- "failed to allocate bitstream ringbuffer");
- ret = -ENOMEM;
- goto err_dma_writecombine;
- }
- }
- kfifo_init(&ctx->bitstream_fifo,
- ctx->bitstream.vaddr, ctx->bitstream.size);
mutex_init(&ctx->bitstream_mutex);
mutex_init(&ctx->buffer_mutex);
INIT_LIST_HEAD(&ctx->buffer_meta_list);
@@ -1717,12 +1741,6 @@ static int coda_open(struct file *file)
return 0;
-err_dma_writecombine:
- if (ctx->dev->devtype->product == CODA_DX6)
- coda_free_aux_buf(dev, &ctx->workbuf);
- coda_free_aux_buf(dev, &ctx->parabuf);
-err_dma_alloc:
- v4l2_ctrl_handler_free(&ctx->ctrls);
err_ctrls_setup:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
err_ctx_init:
@@ -1735,6 +1753,7 @@ err_pm_get:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
clear_bit(ctx->idx, &dev->instance_mask);
+err_coda_name_init:
err_coda_max:
kfree(ctx);
return ret;
@@ -1764,14 +1783,9 @@ static int coda_release(struct file *file)
list_del(&ctx->list);
coda_unlock(ctx);
- if (ctx->bitstream.vaddr) {
- dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size,
- ctx->bitstream.vaddr, ctx->bitstream.paddr);
- }
if (ctx->dev->devtype->product == CODA_DX6)
coda_free_aux_buf(dev, &ctx->workbuf);
- coda_free_aux_buf(dev, &ctx->parabuf);
v4l2_ctrl_handler_free(&ctx->ctrls);
clk_disable_unprepare(dev->clk_ahb);
clk_disable_unprepare(dev->clk_per);
@@ -1901,8 +1915,7 @@ static int coda_register_device(struct coda_dev *dev, int i)
if (i >= dev->devtype->num_vdevs)
return -EINVAL;
- snprintf(vfd->name, sizeof(vfd->name), "%s",
- dev->devtype->vdevs[i]->name);
+ strlcpy(vfd->name, dev->devtype->vdevs[i]->name, sizeof(vfd->name));
vfd->fops = &coda_fops;
vfd->ioctl_ops = &coda_ioctl_ops;
vfd->release = video_device_release_empty,
@@ -1933,10 +1946,8 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
/* allocate auxiliary per-device code buffer for the BIT processor */
ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size, "codebuf",
dev->debugfs_root);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to allocate code buffer\n");
+ if (ret < 0)
goto put_pm;
- }
/* Copy the whole firmware image to the code buffer */
memcpy(dev->codebuf.vaddr, fw->data, fw->size);
@@ -2174,20 +2185,16 @@ static int coda_probe(struct platform_device *pdev)
ret = coda_alloc_aux_buf(dev, &dev->workbuf,
dev->devtype->workbuf_size, "workbuf",
dev->debugfs_root);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to allocate work buffer\n");
+ if (ret < 0)
goto err_v4l2_register;
- }
}
if (dev->devtype->tempbuf_size) {
ret = coda_alloc_aux_buf(dev, &dev->tempbuf,
dev->devtype->tempbuf_size, "tempbuf",
dev->debugfs_root);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to allocate temp buffer\n");
+ if (ret < 0)
goto err_v4l2_register;
- }
}
dev->iram.size = dev->devtype->iram_size;
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
index 8fa3e353f9e2..11e734bc2cbd 100644
--- a/drivers/media/platform/coda/coda-jpeg.c
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -13,6 +13,7 @@
#include <linux/swab.h>
#include "coda.h"
+#include "trace.h"
#define SOI_MARKER 0xffd8
#define EOI_MARKER 0xffd9
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 0c35cd5032ff..6a5c8f6c688e 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -12,6 +12,9 @@
* (at your option) any later version.
*/
+#ifndef __CODA_H__
+#define __CODA_H__
+
#include <linux/debugfs.h>
#include <linux/irqreturn.h>
#include <linux/mutex.h>
@@ -26,7 +29,6 @@
#include "coda_regs.h"
#define CODA_MAX_FRAMEBUFFERS 8
-#define CODA_MAX_FRAME_SIZE 0x100000
#define FMO_SLICE_SAVE_BUF_SIZE (32)
enum {
@@ -178,6 +180,7 @@ struct coda_ctx;
struct coda_context_ops {
int (*queue_init)(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
+ int (*reqbufs)(struct coda_ctx *ctx, struct v4l2_requestbuffers *rb);
int (*start_streaming)(struct coda_ctx *ctx);
int (*prepare_run)(struct coda_ctx *ctx);
void (*finish_run)(struct coda_ctx *ctx);
@@ -249,13 +252,6 @@ int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
size_t size, const char *name, struct dentry *parent);
void coda_free_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf);
-static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
- struct coda_aux_buf *buf, size_t size,
- const char *name)
-{
- return coda_alloc_aux_buf(ctx->dev, buf, size, name, ctx->debugfs_entry);
-}
-
int coda_encoder_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
@@ -263,7 +259,7 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
int coda_hw_reset(struct coda_ctx *ctx);
-void coda_fill_bitstream(struct coda_ctx *ctx);
+void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming);
void coda_set_gdi_regs(struct coda_ctx *ctx);
@@ -284,7 +280,7 @@ const char *coda_product_name(int product);
int coda_check_firmware(struct coda_dev *dev);
-static inline int coda_get_bitstream_payload(struct coda_ctx *ctx)
+static inline unsigned int coda_get_bitstream_payload(struct coda_ctx *ctx)
{
return kfifo_len(&ctx->bitstream_fifo);
}
@@ -301,3 +297,5 @@ extern const struct coda_context_ops coda_bit_encode_ops;
extern const struct coda_context_ops coda_bit_decode_ops;
irqreturn_t coda_irq_handler(int irq, void *data);
+
+#endif /* __CODA_H__ */
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
new file mode 100644
index 000000000000..d1d06cbd1f6a
--- /dev/null
+++ b/drivers/media/platform/coda/trace.h
@@ -0,0 +1,203 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM coda
+
+#if !defined(__CODA_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __CODA_TRACE_H__
+
+#include <linux/tracepoint.h>
+#include <media/videobuf2-core.h>
+
+#include "coda.h"
+
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+
+TRACE_EVENT(coda_bit_run,
+ TP_PROTO(struct coda_ctx *ctx, int cmd),
+
+ TP_ARGS(ctx, cmd),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, ctx)
+ __field(int, cmd)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->ctx = ctx->idx;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("minor = %d, ctx = %d, cmd = %d",
+ __entry->minor, __entry->ctx, __entry->cmd)
+);
+
+TRACE_EVENT(coda_bit_done,
+ TP_PROTO(struct coda_ctx *ctx),
+
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, ctx = %d", __entry->minor, __entry->ctx)
+);
+
+TRACE_EVENT(coda_enc_pic_run,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+
+ TP_ARGS(ctx, buf),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, index)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->index = buf->v4l2_buf.index;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, index = %d, ctx = %d",
+ __entry->minor, __entry->index, __entry->ctx)
+);
+
+TRACE_EVENT(coda_enc_pic_done,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+
+ TP_ARGS(ctx, buf),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, index)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->index = buf->v4l2_buf.index;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, index = %d, ctx = %d",
+ __entry->minor, __entry->index, __entry->ctx)
+);
+
+TRACE_EVENT(coda_bit_queue,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+ struct coda_buffer_meta *meta),
+
+ TP_ARGS(ctx, buf, meta),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, index)
+ __field(int, start)
+ __field(int, end)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->index = buf->v4l2_buf.index;
+ __entry->start = meta->start;
+ __entry->end = meta->end;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, index = %d, start = 0x%x, end = 0x%x, ctx = %d",
+ __entry->minor, __entry->index, __entry->start, __entry->end,
+ __entry->ctx)
+);
+
+TRACE_EVENT(coda_dec_pic_run,
+ TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
+
+ TP_ARGS(ctx, meta),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, start)
+ __field(int, end)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->start = meta ? meta->start : 0;
+ __entry->end = meta ? meta->end : 0;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, start = 0x%x, end = 0x%x, ctx = %d",
+ __entry->minor, __entry->start, __entry->end, __entry->ctx)
+);
+
+TRACE_EVENT(coda_dec_pic_done,
+ TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
+
+ TP_ARGS(ctx, meta),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, start)
+ __field(int, end)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->start = meta->start;
+ __entry->end = meta->end;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, start = 0x%x, end = 0x%x, ctx = %d",
+ __entry->minor, __entry->start, __entry->end, __entry->ctx)
+);
+
+TRACE_EVENT(coda_dec_rot_done,
+ TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta,
+ struct vb2_buffer *buf),
+
+ TP_ARGS(ctx, meta, buf),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(int, start)
+ __field(int, end)
+ __field(int, index)
+ __field(int, ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = ctx->fh.vdev->minor;
+ __entry->start = meta->start;
+ __entry->end = meta->end;
+ __entry->index = buf->v4l2_buf.index;
+ __entry->ctx = ctx->idx;
+ ),
+
+ TP_printk("minor = %d, start = 0x%x, end = 0x%x, index = %d, ctx = %d",
+ __entry->minor, __entry->start, __entry->end, __entry->index,
+ __entry->ctx)
+);
+
+#endif /* __CODA_TRACE_H__ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index b41bf7e822c8..ccfcf3f528d3 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1871,16 +1871,9 @@ static int vpfe_probe(struct platform_device *pdev)
goto probe_free_ccdc_cfg_mem;
}
- /* Allocate memory for video device */
- vfd = video_device_alloc();
- if (NULL == vfd) {
- ret = -ENOMEM;
- v4l2_err(pdev->dev.driver, "Unable to alloc video device\n");
- goto probe_out_release_irq;
- }
-
+ vfd = &vpfe_dev->video_dev;
/* Initialize field of video device */
- vfd->release = video_device_release;
+ vfd->release = video_device_release_empty;
vfd->fops = &vpfe_fops;
vfd->ioctl_ops = &vpfe_ioctl_ops;
vfd->tvnorms = 0;
@@ -1891,14 +1884,12 @@ static int vpfe_probe(struct platform_device *pdev)
(VPFE_CAPTURE_VERSION_CODE >> 16) & 0xff,
(VPFE_CAPTURE_VERSION_CODE >> 8) & 0xff,
(VPFE_CAPTURE_VERSION_CODE) & 0xff);
- /* Set video_dev to the video device */
- vpfe_dev->video_dev = vfd;
ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
if (ret) {
v4l2_err(pdev->dev.driver,
"Unable to register v4l2 device.\n");
- goto probe_out_video_release;
+ goto probe_out_release_irq;
}
v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
spin_lock_init(&vpfe_dev->irqlock);
@@ -1914,7 +1905,7 @@ static int vpfe_probe(struct platform_device *pdev)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
"video_dev=%p\n", &vpfe_dev->video_dev);
vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ret = video_register_device(vpfe_dev->video_dev,
+ ret = video_register_device(&vpfe_dev->video_dev,
VFL_TYPE_GRABBER, -1);
if (ret) {
@@ -1927,7 +1918,7 @@ static int vpfe_probe(struct platform_device *pdev)
/* set the driver data in platform device */
platform_set_drvdata(pdev, vpfe_dev);
/* set driver private data */
- video_set_drvdata(vpfe_dev->video_dev, vpfe_dev);
+ video_set_drvdata(&vpfe_dev->video_dev, vpfe_dev);
i2c_adap = i2c_get_adapter(vpfe_cfg->i2c_adapter_id);
num_subdevs = vpfe_cfg->num_subdevs;
vpfe_dev->sd = kmalloc(sizeof(struct v4l2_subdev *) * num_subdevs,
@@ -1979,12 +1970,9 @@ static int vpfe_probe(struct platform_device *pdev)
probe_sd_out:
kfree(vpfe_dev->sd);
probe_out_video_unregister:
- video_unregister_device(vpfe_dev->video_dev);
+ video_unregister_device(&vpfe_dev->video_dev);
probe_out_v4l2_unregister:
v4l2_device_unregister(&vpfe_dev->v4l2_dev);
-probe_out_video_release:
- if (!video_is_registered(vpfe_dev->video_dev))
- video_device_release(vpfe_dev->video_dev);
probe_out_release_irq:
free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
probe_free_ccdc_cfg_mem:
@@ -2007,7 +1995,7 @@ static int vpfe_remove(struct platform_device *pdev)
free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
kfree(vpfe_dev->sd);
v4l2_device_unregister(&vpfe_dev->v4l2_dev);
- video_unregister_device(vpfe_dev->video_dev);
+ video_unregister_device(&vpfe_dev->video_dev);
kfree(vpfe_dev);
kfree(ccdc_cfg);
return 0;
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index fa0a51521772..a5f548138b91 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -712,7 +712,7 @@ static int vpif_set_input(
ch->vpifparams.iface = chan_cfg->vpif_if;
/* update tvnorms from the sub device input info */
- ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
+ ch->video_dev.tvnorms = chan_cfg->inputs[index].input.std;
return 0;
}
@@ -1337,7 +1337,7 @@ static int vpif_probe_complete(void)
struct video_device *vdev;
struct channel_obj *ch;
struct vb2_queue *q;
- int i, j, err, k;
+ int j, err, k;
for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
ch = vpif_obj.dev[j];
@@ -1384,16 +1384,16 @@ static int vpif_probe_complete(void)
INIT_LIST_HEAD(&common->dma_queue);
/* Initialize the video_device structure */
- vdev = ch->video_dev;
+ vdev = &ch->video_dev;
strlcpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name));
- vdev->release = video_device_release;
+ vdev->release = video_device_release_empty;
vdev->fops = &vpif_fops;
vdev->ioctl_ops = &vpif_ioctl_ops;
vdev->v4l2_dev = &vpif_obj.v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
vdev->queue = q;
vdev->lock = &common->lock;
- video_set_drvdata(ch->video_dev, ch);
+ video_set_drvdata(&ch->video_dev, ch);
err = video_register_device(vdev,
VFL_TYPE_GRABBER, (j ? 1 : 0));
if (err)
@@ -1410,14 +1410,9 @@ probe_out:
common = &ch->common[k];
vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
/* Unregister video device */
- video_unregister_device(ch->video_dev);
+ video_unregister_device(&ch->video_dev);
}
kfree(vpif_obj.sd);
- for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
- ch = vpif_obj.dev[i];
- /* Note: does nothing if ch->video_dev == NULL */
- video_device_release(ch->video_dev);
- }
v4l2_device_unregister(&vpif_obj.v4l2_dev);
return err;
@@ -1438,13 +1433,11 @@ static int vpif_async_complete(struct v4l2_async_notifier *notifier)
static __init int vpif_probe(struct platform_device *pdev)
{
struct vpif_subdev_info *subdevdata;
- int i, j, err;
- int res_idx = 0;
struct i2c_adapter *i2c_adap;
- struct channel_obj *ch;
- struct video_device *vfd;
struct resource *res;
int subdev_count;
+ int res_idx = 0;
+ int i, err;
vpif_dev = &pdev->dev;
@@ -1472,24 +1465,6 @@ static __init int vpif_probe(struct platform_device *pdev)
res_idx++;
}
- for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
- /* Get the pointer to the channel object */
- ch = vpif_obj.dev[i];
- /* Allocate memory for video device */
- vfd = video_device_alloc();
- if (NULL == vfd) {
- for (j = 0; j < i; j++) {
- ch = vpif_obj.dev[j];
- video_device_release(ch->video_dev);
- }
- err = -ENOMEM;
- goto vpif_unregister;
- }
-
- /* Set video_dev to the video device */
- ch->video_dev = vfd;
- }
-
vpif_obj.config = pdev->dev.platform_data;
subdev_count = vpif_obj.config->subdev_count;
@@ -1498,7 +1473,7 @@ static __init int vpif_probe(struct platform_device *pdev)
if (vpif_obj.sd == NULL) {
vpif_err("unable to allocate memory for subdevice pointers\n");
err = -ENOMEM;
- goto vpif_sd_error;
+ goto vpif_unregister;
}
if (!vpif_obj.config->asd_sizes) {
@@ -1541,13 +1516,6 @@ static __init int vpif_probe(struct platform_device *pdev)
probe_subdev_out:
/* free sub devices memory */
kfree(vpif_obj.sd);
-
-vpif_sd_error:
- for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
- ch = vpif_obj.dev[i];
- /* Note: does nothing if ch->video_dev == NULL */
- video_device_release(ch->video_dev);
- }
vpif_unregister:
v4l2_device_unregister(&vpif_obj.v4l2_dev);
@@ -1576,7 +1544,7 @@ static int vpif_remove(struct platform_device *device)
common = &ch->common[VPIF_VIDEO_INDEX];
vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
/* Unregister video device */
- video_unregister_device(ch->video_dev);
+ video_unregister_device(&ch->video_dev);
kfree(vpif_obj.dev[i]);
}
return 0;
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
index f65d28d38e66..8b8a663f6b22 100644
--- a/drivers/media/platform/davinci/vpif_capture.h
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -92,7 +92,7 @@ struct common_obj {
struct channel_obj {
/* Identifies video device for this channel */
- struct video_device *video_dev;
+ struct video_device video_dev;
/* Indicates id of the field which is being displayed */
u32 field_id;
/* flag to indicate whether decoder is initialized */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 839c24de1fd8..682e5d578bf7 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -829,7 +829,7 @@ static int vpif_set_output(struct vpif_display_config *vpif_cfg,
ch->sd = sd;
if (chan_cfg->outputs != NULL)
/* update tvnorms from the sub device output info */
- ch->video_dev->tvnorms = chan_cfg->outputs[index].output.std;
+ ch->video_dev.tvnorms = chan_cfg->outputs[index].output.std;
return 0;
}
@@ -1204,16 +1204,16 @@ static int vpif_probe_complete(void)
ch, &ch->video_dev);
/* Initialize the video_device structure */
- vdev = ch->video_dev;
+ vdev = &ch->video_dev;
strlcpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name));
- vdev->release = video_device_release;
+ vdev->release = video_device_release_empty;
vdev->fops = &vpif_fops;
vdev->ioctl_ops = &vpif_ioctl_ops;
vdev->v4l2_dev = &vpif_obj.v4l2_dev;
vdev->vfl_dir = VFL_DIR_TX;
vdev->queue = q;
vdev->lock = &common->lock;
- video_set_drvdata(ch->video_dev, ch);
+ video_set_drvdata(&ch->video_dev, ch);
err = video_register_device(vdev, VFL_TYPE_GRABBER,
(j ? 3 : 2));
if (err < 0)
@@ -1227,9 +1227,7 @@ probe_out:
ch = vpif_obj.dev[k];
common = &ch->common[k];
vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
- video_unregister_device(ch->video_dev);
- video_device_release(ch->video_dev);
- ch->video_dev = NULL;
+ video_unregister_device(&ch->video_dev);
}
return err;
}
@@ -1246,13 +1244,11 @@ static int vpif_async_complete(struct v4l2_async_notifier *notifier)
static __init int vpif_probe(struct platform_device *pdev)
{
struct vpif_subdev_info *subdevdata;
- int i, j = 0, err = 0;
- int res_idx = 0;
struct i2c_adapter *i2c_adap;
- struct channel_obj *ch;
- struct video_device *vfd;
struct resource *res;
int subdev_count;
+ int res_idx = 0;
+ int i, err;
vpif_dev = &pdev->dev;
err = initialize_vpif();
@@ -1281,25 +1277,6 @@ static __init int vpif_probe(struct platform_device *pdev)
res_idx++;
}
- for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
- /* Get the pointer to the channel object */
- ch = vpif_obj.dev[i];
-
- /* Allocate memory for video device */
- vfd = video_device_alloc();
- if (vfd == NULL) {
- for (j = 0; j < i; j++) {
- ch = vpif_obj.dev[j];
- video_device_release(ch->video_dev);
- }
- err = -ENOMEM;
- goto vpif_unregister;
- }
-
- /* Set video_dev to the video device */
- ch->video_dev = vfd;
- }
-
vpif_obj.config = pdev->dev.platform_data;
subdev_count = vpif_obj.config->subdev_count;
subdevdata = vpif_obj.config->subdevinfo;
@@ -1308,7 +1285,7 @@ static __init int vpif_probe(struct platform_device *pdev)
if (vpif_obj.sd == NULL) {
vpif_err("unable to allocate memory for subdevice pointers\n");
err = -ENOMEM;
- goto vpif_sd_error;
+ goto vpif_unregister;
}
if (!vpif_obj.config->asd_sizes) {
@@ -1348,12 +1325,6 @@ static __init int vpif_probe(struct platform_device *pdev)
probe_subdev_out:
kfree(vpif_obj.sd);
-vpif_sd_error:
- for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
- ch = vpif_obj.dev[i];
- /* Note: does nothing if ch->video_dev == NULL */
- video_device_release(ch->video_dev);
- }
vpif_unregister:
v4l2_device_unregister(&vpif_obj.v4l2_dev);
@@ -1379,9 +1350,7 @@ static int vpif_remove(struct platform_device *device)
common = &ch->common[VPIF_VIDEO_INDEX];
vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
/* Unregister video device */
- video_unregister_device(ch->video_dev);
-
- ch->video_dev = NULL;
+ video_unregister_device(&ch->video_dev);
kfree(vpif_obj.dev[i]);
}
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
index 7b21a7607674..849e0e385f18 100644
--- a/drivers/media/platform/davinci/vpif_display.h
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -100,7 +100,7 @@ struct common_obj {
struct channel_obj {
/* V4l2 specific parameters */
- struct video_device *video_dev; /* Identifies video device for
+ struct video_device video_dev; /* Identifies video device for
* this channel */
u32 field_id; /* Indicates id of the field
* which is being displayed */
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 8a2fd8c33d42..cfebf292e15a 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -1482,7 +1482,7 @@ void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
}
static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct fimc_fmt *fmt;
@@ -1495,7 +1495,7 @@ static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1504,7 +1504,7 @@ static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1536,7 +1536,7 @@ static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
}
static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1559,7 +1559,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
mf->colorspace = V4L2_COLORSPACE_JPEG;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
return 0;
}
@@ -1602,7 +1602,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1628,10 +1628,10 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
return 0;
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
f = &ctx->d_frame;
break;
default:
@@ -1657,7 +1657,7 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
}
static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1675,10 +1675,10 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
f = &ctx->d_frame;
break;
default:
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index 60c744915549..5d78f5716f3b 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -112,7 +112,7 @@ static const struct media_entity_operations fimc_is_subdev_media_ops = {
};
static int fimc_is_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct fimc_fmt *fmt;
@@ -125,14 +125,14 @@ static int fimc_is_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct fimc_isp *isp = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf = &fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *mf = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
return 0;
}
@@ -162,7 +162,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
}
static void __isp_subdev_try_format(struct fimc_isp *isp,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *mf = &fmt->format;
@@ -178,7 +178,7 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
mf->code = MEDIA_BUS_FMT_SGRBG10_1X10;
} else {
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- format = v4l2_subdev_get_try_format(fh,
+ format = v4l2_subdev_get_try_format(&isp->subdev, cfg,
FIMC_ISP_SD_PAD_SINK);
else
format = &isp->sink_fmt;
@@ -197,7 +197,7 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
}
static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct fimc_isp *isp = v4l2_get_subdevdata(sd);
@@ -209,10 +209,10 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
__func__, fmt->pad, mf->code, mf->width, mf->height);
mutex_lock(&isp->subdev_lock);
- __isp_subdev_try_format(isp, fh, fmt);
+ __isp_subdev_try_format(isp, cfg, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
/* Propagate format to the source pads */
@@ -223,8 +223,8 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
for (pad = FIMC_ISP_SD_PAD_SRC_FIFO;
pad < FIMC_ISP_SD_PADS_NUM; pad++) {
format.pad = pad;
- __isp_subdev_try_format(isp, fh, &format);
- mf = v4l2_subdev_get_try_format(fh, pad);
+ __isp_subdev_try_format(isp, cfg, &format);
+ mf = v4l2_subdev_get_try_format(sd, cfg, pad);
*mf = format.format;
}
}
@@ -236,7 +236,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
isp->sink_fmt = *mf;
format.pad = FIMC_ISP_SD_PAD_SRC_DMA;
- __isp_subdev_try_format(isp, fh, &format);
+ __isp_subdev_try_format(isp, cfg, &format);
isp->src_fmt = format.format;
__is_set_frame_size(is, &isp->src_fmt);
@@ -369,7 +369,7 @@ static int fimc_isp_subdev_open(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt fmt;
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(fh, FIMC_ISP_SD_PAD_SINK);
+ format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SINK);
fmt.colorspace = V4L2_COLORSPACE_SRGB;
fmt.code = fimc_isp_formats[0].mbus_code;
@@ -378,12 +378,12 @@ static int fimc_isp_subdev_open(struct v4l2_subdev *sd,
fmt.field = V4L2_FIELD_NONE;
*format = fmt;
- format = v4l2_subdev_get_try_format(fh, FIMC_ISP_SD_PAD_SRC_FIFO);
+ format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SRC_FIFO);
fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
*format = fmt;
- format = v4l2_subdev_get_try_format(fh, FIMC_ISP_SD_PAD_SRC_DMA);
+ format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SRC_DMA);
*format = fmt;
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 2510f189e242..ca6261a86a5f 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -568,7 +568,7 @@ static const struct v4l2_file_operations fimc_lite_fops = {
*/
static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
struct flite_drvdata *dd = fimc->dd;
@@ -592,13 +592,13 @@ static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc,
struct v4l2_rect *rect;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sink_fmt = v4l2_subdev_get_try_format(fh,
+ sink_fmt = v4l2_subdev_get_try_format(&fimc->subdev, cfg,
FLITE_SD_PAD_SINK);
mf->code = sink_fmt->code;
mf->colorspace = sink_fmt->colorspace;
- rect = v4l2_subdev_get_try_crop(fh,
+ rect = v4l2_subdev_get_try_crop(&fimc->subdev, cfg,
FLITE_SD_PAD_SINK);
} else {
mf->code = sink->fmt->mbus_code;
@@ -1047,7 +1047,7 @@ static const struct media_entity_operations fimc_lite_subdev_media_ops = {
};
static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct fimc_fmt *fmt;
@@ -1060,16 +1060,17 @@ static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static struct v4l2_mbus_framefmt *__fimc_lite_subdev_get_try_fmt(
- struct v4l2_subdev_fh *fh, unsigned int pad)
+ struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad)
{
if (pad != FLITE_SD_PAD_SINK)
pad = FLITE_SD_PAD_SOURCE_DMA;
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(sd, cfg, pad);
}
static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1077,7 +1078,7 @@ static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
struct flite_frame *f = &fimc->inp_frame;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = __fimc_lite_subdev_get_try_fmt(fh, fmt->pad);
+ mf = __fimc_lite_subdev_get_try_fmt(sd, cfg, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1100,7 +1101,7 @@ static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
}
static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1122,17 +1123,17 @@ static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
return -EBUSY;
}
- ffmt = fimc_lite_subdev_try_fmt(fimc, fh, fmt);
+ ffmt = fimc_lite_subdev_try_fmt(fimc, cfg, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *src_fmt;
- mf = __fimc_lite_subdev_get_try_fmt(fh, fmt->pad);
+ mf = __fimc_lite_subdev_get_try_fmt(sd, cfg, fmt->pad);
*mf = fmt->format;
if (fmt->pad == FLITE_SD_PAD_SINK) {
unsigned int pad = FLITE_SD_PAD_SOURCE_DMA;
- src_fmt = __fimc_lite_subdev_get_try_fmt(fh, pad);
+ src_fmt = __fimc_lite_subdev_get_try_fmt(sd, cfg, pad);
*src_fmt = *mf;
}
@@ -1160,7 +1161,7 @@ static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1172,7 +1173,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
return 0;
}
@@ -1195,7 +1196,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
}
static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1209,7 +1210,7 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
fimc_lite_try_crop(fimc, &sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r;
+ *v4l2_subdev_get_try_crop(sd, cfg, sel->pad) = sel->r;
} else {
unsigned long flags;
spin_lock_irqsave(&fimc->slock, flags);
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 2504aa89a6f4..d74e1bec3d86 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -540,7 +540,7 @@ unlock:
}
static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5pcsis_formats))
@@ -568,23 +568,23 @@ static struct csis_pix_format const *s5pcsis_try_format(
}
static struct v4l2_mbus_framefmt *__s5pcsis_get_format(
- struct csis_state *state, struct v4l2_subdev_fh *fh,
+ struct csis_state *state, struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL;
+ return cfg ? v4l2_subdev_get_try_format(&state->sd, cfg, 0) : NULL;
return &state->format;
}
-static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct csis_pix_format const *csis_fmt;
struct v4l2_mbus_framefmt *mf;
- mf = __s5pcsis_get_format(state, fh, fmt->which);
+ mf = __s5pcsis_get_format(state, cfg, fmt->which);
if (fmt->pad == CSIS_PAD_SOURCE) {
if (mf) {
@@ -605,13 +605,13 @@ static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return 0;
}
-static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct v4l2_mbus_framefmt *mf;
- mf = __s5pcsis_get_format(state, fh, fmt->which);
+ mf = __s5pcsis_get_format(state, cfg, fmt->which);
if (!mf)
return -EINVAL;
@@ -651,7 +651,7 @@ static int s5pcsis_log_status(struct v4l2_subdev *sd)
static int s5pcsis_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
format->colorspace = V4L2_COLORSPACE_JPEG;
format->code = s5pcsis_formats[0].code;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index b70c1aecca37..92d954973ccf 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -127,7 +127,7 @@ static struct deinterlace_fmt *find_format(struct v4l2_format *f)
struct deinterlace_dev {
struct v4l2_device v4l2_dev;
- struct video_device *vfd;
+ struct video_device vfd;
atomic_t busy;
struct mutex dev_mutex;
@@ -983,7 +983,7 @@ static struct video_device deinterlace_videodev = {
.fops = &deinterlace_fops,
.ioctl_ops = &deinterlace_ioctl_ops,
.minor = -1,
- .release = video_device_release,
+ .release = video_device_release_empty,
.vfl_dir = VFL_DIR_M2M,
};
@@ -1026,13 +1026,7 @@ static int deinterlace_probe(struct platform_device *pdev)
atomic_set(&pcdev->busy, 0);
mutex_init(&pcdev->dev_mutex);
- vfd = video_device_alloc();
- if (!vfd) {
- v4l2_err(&pcdev->v4l2_dev, "Failed to allocate video device\n");
- ret = -ENOMEM;
- goto unreg_dev;
- }
-
+ vfd = &pcdev->vfd;
*vfd = deinterlace_videodev;
vfd->lock = &pcdev->dev_mutex;
vfd->v4l2_dev = &pcdev->v4l2_dev;
@@ -1040,12 +1034,11 @@ static int deinterlace_probe(struct platform_device *pdev)
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n");
- goto rel_vdev;
+ goto unreg_dev;
}
video_set_drvdata(vfd, pcdev);
snprintf(vfd->name, sizeof(vfd->name), "%s", deinterlace_videodev.name);
- pcdev->vfd = vfd;
v4l2_info(&pcdev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
" Device registered as /dev/video%d\n", vfd->num);
@@ -1069,11 +1062,9 @@ static int deinterlace_probe(struct platform_device *pdev)
v4l2_m2m_release(pcdev->m2m_dev);
err_m2m:
- video_unregister_device(pcdev->vfd);
+ video_unregister_device(&pcdev->vfd);
err_ctx:
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
-rel_vdev:
- video_device_release(vfd);
unreg_dev:
v4l2_device_unregister(&pcdev->v4l2_dev);
rel_dma:
@@ -1088,7 +1079,7 @@ static int deinterlace_remove(struct platform_device *pdev)
v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
v4l2_m2m_release(pcdev->m2m_dev);
- video_unregister_device(pcdev->vfd);
+ video_unregister_device(&pcdev->vfd);
v4l2_device_unregister(&pcdev->v4l2_dev);
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
dma_release_channel(pcdev->dma_chan);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index dd5b1415f974..110fd70c7326 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -116,8 +116,8 @@ static struct mcam_format_struct {
.planar = false,
},
{
- .desc = "UYVY 4:2:2",
- .pixelformat = V4L2_PIX_FMT_UYVY,
+ .desc = "YVYU 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YVYU,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = false,
@@ -748,7 +748,7 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
switch (fmt->pixelformat) {
case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
widthy = fmt->width * 2;
widthuv = 0;
break;
@@ -784,15 +784,15 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
mcam_reg_write_mask(cam, REG_CTRL0,
- C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
+ C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK);
break;
case V4L2_PIX_FMT_YUYV:
mcam_reg_write_mask(cam, REG_CTRL0,
- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK);
break;
- case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
mcam_reg_write_mask(cam, REG_CTRL0,
- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK);
break;
case V4L2_PIX_FMT_JPEG:
mcam_reg_write_mask(cam, REG_CTRL0,
@@ -1568,24 +1568,64 @@ static int mcam_vidioc_enum_framesizes(struct file *filp, void *priv,
struct v4l2_frmsizeenum *sizes)
{
struct mcam_camera *cam = priv;
+ struct mcam_format_struct *f;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = sizes->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
int ret;
+ f = mcam_find_format(sizes->pixel_format);
+ if (f->pixelformat != sizes->pixel_format)
+ return -EINVAL;
+ fse.code = f->mbus_code;
mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, enum_framesizes, sizes);
+ ret = sensor_call(cam, pad, enum_frame_size, NULL, &fse);
mutex_unlock(&cam->s_mutex);
- return ret;
+ if (ret)
+ return ret;
+ if (fse.min_width == fse.max_width &&
+ fse.min_height == fse.max_height) {
+ sizes->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ sizes->discrete.width = fse.min_width;
+ sizes->discrete.height = fse.min_height;
+ return 0;
+ }
+ sizes->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ sizes->stepwise.min_width = fse.min_width;
+ sizes->stepwise.max_width = fse.max_width;
+ sizes->stepwise.min_height = fse.min_height;
+ sizes->stepwise.max_height = fse.max_height;
+ sizes->stepwise.step_width = 1;
+ sizes->stepwise.step_height = 1;
+ return 0;
}
static int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv,
struct v4l2_frmivalenum *interval)
{
struct mcam_camera *cam = priv;
+ struct mcam_format_struct *f;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = interval->index,
+ .width = interval->width,
+ .height = interval->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
int ret;
+ f = mcam_find_format(interval->pixel_format);
+ if (f->pixelformat != interval->pixel_format)
+ return -EINVAL;
+ fie.code = f->mbus_code;
mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, enum_frameintervals, interval);
+ ret = sensor_call(cam, pad, enum_frame_interval, NULL, &fie);
mutex_unlock(&cam->s_mutex);
- return ret;
+ if (ret)
+ return ret;
+ interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ interval->discrete = fie.interval;
+ return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index aa0c6eac254a..7ffdf4dbaf8c 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -330,10 +330,10 @@ int mccic_resume(struct mcam_camera *cam);
#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
-#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
-#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
-#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
-#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
+#define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */
+#define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */
+#define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */
+#define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */
/* Bayer bits 18,19 if needed */
#define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
#define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index ba2d8f973d58..17b189a81ec5 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -1978,7 +1978,7 @@ static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
vout->cropped_offset = 0;
if (ovid->rotation_type == VOUT_ROT_VRFB) {
- int static_vrfb_allocation = (vid_num == 0) ?
+ bool static_vrfb_allocation = (vid_num == 0) ?
vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
ret = omap_vout_setup_vrfb_bufs(pdev, vid_num,
static_vrfb_allocation);
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index aa39306afc73..c6e252760c62 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -21,6 +21,7 @@
#include "omap_voutdef.h"
#include "omap_voutlib.h"
+#include "omap_vout_vrfb.h"
#define OMAP_DMA_NO_DEVICE 0
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.h b/drivers/media/platform/omap/omap_vout_vrfb.h
index 4c2314839b48..c976975024df 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.h
+++ b/drivers/media/platform/omap/omap_vout_vrfb.h
@@ -15,7 +15,7 @@
#ifdef CONFIG_VIDEO_OMAP2_VOUT_VRFB
void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout);
int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
- u32 static_vrfb_allocation);
+ bool static_vrfb_allocation);
void omap_vout_release_vrfb(struct omap_vout_device *vout);
int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
unsigned int *count, unsigned int startindex);
@@ -25,7 +25,7 @@ void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout);
#else
static inline void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { };
static inline int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
- u32 static_vrfb_allocation)
+ bool static_vrfb_allocation)
{ return 0; };
static inline void omap_vout_release_vrfb(struct omap_vout_device *vout) { };
static inline int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index deca80903c3a..18d0a871747f 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -51,6 +51,7 @@
#include <linux/dma-mapping.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/omap-iommu.h>
#include <linux/platform_device.h>
@@ -63,6 +64,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-of.h>
#include "isp.h"
#include "ispreg.h"
@@ -85,35 +87,45 @@ static void isp_restore_ctx(struct isp_device *isp);
static const struct isp_res_mapping isp_res_maps[] = {
{
.isp_rev = ISP_REVISION_2_0,
- .map = 1 << OMAP3_ISP_IOMEM_MAIN |
- 1 << OMAP3_ISP_IOMEM_CCP2 |
- 1 << OMAP3_ISP_IOMEM_CCDC |
- 1 << OMAP3_ISP_IOMEM_HIST |
- 1 << OMAP3_ISP_IOMEM_H3A |
- 1 << OMAP3_ISP_IOMEM_PREV |
- 1 << OMAP3_ISP_IOMEM_RESZ |
- 1 << OMAP3_ISP_IOMEM_SBL |
- 1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
- 1 << OMAP3_ISP_IOMEM_CSIPHY2 |
- 1 << OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
+ .offset = {
+ /* first MMIO area */
+ 0x0000, /* base, len 0x0070 */
+ 0x0400, /* ccp2, len 0x01f0 */
+ 0x0600, /* ccdc, len 0x00a8 */
+ 0x0a00, /* hist, len 0x0048 */
+ 0x0c00, /* h3a, len 0x0060 */
+ 0x0e00, /* preview, len 0x00a0 */
+ 0x1000, /* resizer, len 0x00ac */
+ 0x1200, /* sbl, len 0x00fc */
+ /* second MMIO area */
+ 0x0000, /* csi2a, len 0x0170 */
+ 0x0170, /* csiphy2, len 0x000c */
+ },
+ .syscon_offset = 0xdc,
+ .phy_type = ISP_PHY_TYPE_3430,
},
{
.isp_rev = ISP_REVISION_15_0,
- .map = 1 << OMAP3_ISP_IOMEM_MAIN |
- 1 << OMAP3_ISP_IOMEM_CCP2 |
- 1 << OMAP3_ISP_IOMEM_CCDC |
- 1 << OMAP3_ISP_IOMEM_HIST |
- 1 << OMAP3_ISP_IOMEM_H3A |
- 1 << OMAP3_ISP_IOMEM_PREV |
- 1 << OMAP3_ISP_IOMEM_RESZ |
- 1 << OMAP3_ISP_IOMEM_SBL |
- 1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
- 1 << OMAP3_ISP_IOMEM_CSIPHY2 |
- 1 << OMAP3_ISP_IOMEM_CSI2A_REGS2 |
- 1 << OMAP3_ISP_IOMEM_CSI2C_REGS1 |
- 1 << OMAP3_ISP_IOMEM_CSIPHY1 |
- 1 << OMAP3_ISP_IOMEM_CSI2C_REGS2 |
- 1 << OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
+ .offset = {
+ /* first MMIO area */
+ 0x0000, /* base, len 0x0070 */
+ 0x0400, /* ccp2, len 0x01f0 */
+ 0x0600, /* ccdc, len 0x00a8 */
+ 0x0a00, /* hist, len 0x0048 */
+ 0x0c00, /* h3a, len 0x0060 */
+ 0x0e00, /* preview, len 0x00a0 */
+ 0x1000, /* resizer, len 0x00ac */
+ 0x1200, /* sbl, len 0x00fc */
+ /* second MMIO area */
+ 0x0000, /* csi2a, len 0x0170 (1st area) */
+ 0x0170, /* csiphy2, len 0x000c */
+ 0x01c0, /* csi2a, len 0x0040 (2nd area) */
+ 0x0400, /* csi2c, len 0x0170 (1st area) */
+ 0x0570, /* csiphy1, len 0x000c */
+ 0x05c0, /* csi2c, len 0x0040 (2nd area) */
+ },
+ .syscon_offset = 0x2f0,
+ .phy_type = ISP_PHY_TYPE_3630,
},
};
@@ -279,9 +291,20 @@ static const struct clk_init_data isp_xclk_init_data = {
.num_parents = 1,
};
+static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
+{
+ unsigned int idx = clkspec->args[0];
+ struct isp_device *isp = data;
+
+ if (idx >= ARRAY_SIZE(isp->xclks))
+ return ERR_PTR(-ENOENT);
+
+ return isp->xclks[idx].clk;
+}
+
static int isp_xclk_init(struct isp_device *isp)
{
- struct isp_platform_data *pdata = isp->pdata;
+ struct device_node *np = isp->dev->of_node;
struct clk_init_data init;
unsigned int i;
@@ -311,37 +334,27 @@ static int isp_xclk_init(struct isp_device *isp)
xclk->clk = clk_register(NULL, &xclk->hw);
if (IS_ERR(xclk->clk))
return PTR_ERR(xclk->clk);
-
- if (pdata->xclks[i].con_id == NULL &&
- pdata->xclks[i].dev_id == NULL)
- continue;
-
- xclk->lookup = kzalloc(sizeof(*xclk->lookup), GFP_KERNEL);
- if (xclk->lookup == NULL)
- return -ENOMEM;
-
- xclk->lookup->con_id = pdata->xclks[i].con_id;
- xclk->lookup->dev_id = pdata->xclks[i].dev_id;
- xclk->lookup->clk = xclk->clk;
-
- clkdev_add(xclk->lookup);
}
+ if (np)
+ of_clk_add_provider(np, isp_xclk_src_get, isp);
+
return 0;
}
static void isp_xclk_cleanup(struct isp_device *isp)
{
+ struct device_node *np = isp->dev->of_node;
unsigned int i;
+ if (np)
+ of_clk_del_provider(np);
+
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
struct isp_xclk *xclk = &isp->xclks[i];
if (!IS_ERR(xclk->clk))
clk_unregister(xclk->clk);
-
- if (xclk->lookup)
- clkdev_drop(xclk->lookup);
}
}
@@ -422,7 +435,7 @@ static void isp_core_init(struct isp_device *isp, int idle)
*/
void omap3isp_configure_bridge(struct isp_device *isp,
enum ccdc_input_entity input,
- const struct isp_parallel_platform_data *pdata,
+ const struct isp_parallel_cfg *parcfg,
unsigned int shift, unsigned int bridge)
{
u32 ispctrl_val;
@@ -437,8 +450,8 @@ void omap3isp_configure_bridge(struct isp_device *isp,
switch (input) {
case CCDC_INPUT_PARALLEL:
ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
- ispctrl_val |= pdata->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
- shift += pdata->data_lane_shift * 2;
+ ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
+ shift += parcfg->data_lane_shift * 2;
break;
case CCDC_INPUT_CSI2A:
@@ -1784,58 +1797,121 @@ static void isp_unregister_entities(struct isp_device *isp)
}
/*
- * isp_register_subdev_group - Register a group of subdevices
+ * isp_register_subdev - Register a sub-device
* @isp: OMAP3 ISP device
- * @board_info: I2C subdevs board information array
+ * @isp_subdev: platform data related to a sub-device
*
- * Register all I2C subdevices in the board_info array. The array must be
- * terminated by a NULL entry, and the first entry must be the sensor.
+ * Register an I2C sub-device which has not been registered by other
+ * means (such as the Device Tree).
*
- * Return a pointer to the sensor media entity if it has been successfully
+ * Return a pointer to the sub-device if it has been successfully
* registered, or NULL otherwise.
*/
static struct v4l2_subdev *
-isp_register_subdev_group(struct isp_device *isp,
- struct isp_subdev_i2c_board_info *board_info)
+isp_register_subdev(struct isp_device *isp,
+ struct isp_platform_subdev *isp_subdev)
{
- struct v4l2_subdev *sensor = NULL;
- unsigned int first;
+ struct i2c_adapter *adapter;
+ struct v4l2_subdev *sd;
- if (board_info->board_info == NULL)
+ if (isp_subdev->board_info == NULL)
return NULL;
- for (first = 1; board_info->board_info; ++board_info, first = 0) {
- struct v4l2_subdev *subdev;
- struct i2c_adapter *adapter;
+ adapter = i2c_get_adapter(isp_subdev->i2c_adapter_id);
+ if (adapter == NULL) {
+ dev_err(isp->dev,
+ "%s: Unable to get I2C adapter %d for device %s\n",
+ __func__, isp_subdev->i2c_adapter_id,
+ isp_subdev->board_info->type);
+ return NULL;
+ }
- adapter = i2c_get_adapter(board_info->i2c_adapter_id);
- if (adapter == NULL) {
- dev_err(isp->dev, "%s: Unable to get I2C adapter %d for "
- "device %s\n", __func__,
- board_info->i2c_adapter_id,
- board_info->board_info->type);
- continue;
- }
+ sd = v4l2_i2c_new_subdev_board(&isp->v4l2_dev, adapter,
+ isp_subdev->board_info, NULL);
+ if (sd == NULL) {
+ dev_err(isp->dev, "%s: Unable to register subdev %s\n",
+ __func__, isp_subdev->board_info->type);
+ return NULL;
+ }
- subdev = v4l2_i2c_new_subdev_board(&isp->v4l2_dev, adapter,
- board_info->board_info, NULL);
- if (subdev == NULL) {
- dev_err(isp->dev, "%s: Unable to register subdev %s\n",
- __func__, board_info->board_info->type);
- continue;
- }
+ return sd;
+}
+
+static int isp_link_entity(
+ struct isp_device *isp, struct media_entity *entity,
+ enum isp_interface_type interface)
+{
+ struct media_entity *input;
+ unsigned int flags;
+ unsigned int pad;
+ unsigned int i;
+
+ /* Connect the sensor to the correct interface module.
+ * Parallel sensors are connected directly to the CCDC, while
+ * serial sensors are connected to the CSI2a, CCP2b or CSI2c
+ * receiver through CSIPHY1 or CSIPHY2.
+ */
+ switch (interface) {
+ case ISP_INTERFACE_PARALLEL:
+ input = &isp->isp_ccdc.subdev.entity;
+ pad = CCDC_PAD_SINK;
+ flags = 0;
+ break;
+
+ case ISP_INTERFACE_CSI2A_PHY2:
+ input = &isp->isp_csi2a.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
+ break;
+
+ case ISP_INTERFACE_CCP2B_PHY1:
+ case ISP_INTERFACE_CCP2B_PHY2:
+ input = &isp->isp_ccp2.subdev.entity;
+ pad = CCP2_PAD_SINK;
+ flags = 0;
+ break;
+
+ case ISP_INTERFACE_CSI2C_PHY1:
+ input = &isp->isp_csi2c.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
+ break;
- if (first)
- sensor = subdev;
+ default:
+ dev_err(isp->dev, "%s: invalid interface type %u\n", __func__,
+ interface);
+ return -EINVAL;
+ }
+
+ /*
+ * Not all interfaces are available on all revisions of the
+ * ISP. The sub-devices of those interfaces aren't initialised
+ * in such a case. Check this by ensuring the num_pads is
+ * non-zero.
+ */
+ if (!input->num_pads) {
+ dev_err(isp->dev, "%s: invalid input %u\n", entity->name,
+ interface);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < entity->num_pads; i++) {
+ if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+ if (i == entity->num_pads) {
+ dev_err(isp->dev, "%s: no source pad in external entity\n",
+ __func__);
+ return -EINVAL;
}
- return sensor;
+ return media_entity_create_link(entity, i, input, pad, flags);
}
static int isp_register_entities(struct isp_device *isp)
{
struct isp_platform_data *pdata = isp->pdata;
- struct isp_v4l2_subdevs_group *subdevs;
+ struct isp_platform_subdev *isp_subdev;
int ret;
isp->media_dev.dev = isp->dev;
@@ -1892,74 +1968,31 @@ static int isp_register_entities(struct isp_device *isp)
if (ret < 0)
goto done;
+ /*
+ * Device Tree --- the external sub-devices will be registered
+ * later. The same goes for the sub-device node registration.
+ */
+ if (isp->dev->of_node)
+ return 0;
+
/* Register external entities */
- for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
- struct v4l2_subdev *sensor;
- struct media_entity *input;
- unsigned int flags;
- unsigned int pad;
- unsigned int i;
-
- sensor = isp_register_subdev_group(isp, subdevs->subdevs);
- if (sensor == NULL)
- continue;
+ for (isp_subdev = pdata ? pdata->subdevs : NULL;
+ isp_subdev && isp_subdev->board_info; isp_subdev++) {
+ struct v4l2_subdev *sd;
- sensor->host_priv = subdevs;
+ sd = isp_register_subdev(isp, isp_subdev);
- /* Connect the sensor to the correct interface module. Parallel
- * sensors are connected directly to the CCDC, while serial
- * sensors are connected to the CSI2a, CCP2b or CSI2c receiver
- * through CSIPHY1 or CSIPHY2.
+ /*
+ * No bus information --- this is either a flash or a
+ * lens subdev.
*/
- switch (subdevs->interface) {
- case ISP_INTERFACE_PARALLEL:
- input = &isp->isp_ccdc.subdev.entity;
- pad = CCDC_PAD_SINK;
- flags = 0;
- break;
-
- case ISP_INTERFACE_CSI2A_PHY2:
- input = &isp->isp_csi2a.subdev.entity;
- pad = CSI2_PAD_SINK;
- flags = MEDIA_LNK_FL_IMMUTABLE
- | MEDIA_LNK_FL_ENABLED;
- break;
-
- case ISP_INTERFACE_CCP2B_PHY1:
- case ISP_INTERFACE_CCP2B_PHY2:
- input = &isp->isp_ccp2.subdev.entity;
- pad = CCP2_PAD_SINK;
- flags = 0;
- break;
-
- case ISP_INTERFACE_CSI2C_PHY1:
- input = &isp->isp_csi2c.subdev.entity;
- pad = CSI2_PAD_SINK;
- flags = MEDIA_LNK_FL_IMMUTABLE
- | MEDIA_LNK_FL_ENABLED;
- break;
-
- default:
- dev_err(isp->dev, "%s: invalid interface type %u\n",
- __func__, subdevs->interface);
- ret = -EINVAL;
- goto done;
- }
+ if (!sd || !isp_subdev->bus)
+ continue;
- for (i = 0; i < sensor->entity.num_pads; i++) {
- if (sensor->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
- break;
- }
- if (i == sensor->entity.num_pads) {
- dev_err(isp->dev,
- "%s: no source pad in external entity\n",
- __func__);
- ret = -EINVAL;
- goto done;
- }
+ sd->host_priv = isp_subdev->bus;
- ret = media_entity_create_link(&sensor->entity, i, input, pad,
- flags);
+ ret = isp_link_entity(isp, &sd->entity,
+ isp_subdev->bus->interface);
if (ret < 0)
goto done;
}
@@ -1967,8 +2000,10 @@ static int isp_register_entities(struct isp_device *isp)
ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
done:
- if (ret < 0)
+ if (ret < 0) {
isp_unregister_entities(isp);
+ v4l2_async_notifier_unregister(&isp->notifier);
+ }
return ret;
}
@@ -2183,6 +2218,7 @@ static int isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
+ v4l2_async_notifier_unregister(&isp->notifier);
isp_unregister_entities(isp);
isp_cleanup_modules(isp);
isp_xclk_cleanup(isp);
@@ -2194,26 +2230,156 @@ static int isp_remove(struct platform_device *pdev)
return 0;
}
-static int isp_map_mem_resource(struct platform_device *pdev,
- struct isp_device *isp,
- enum isp_mem_resources res)
+enum isp_of_phy {
+ ISP_OF_PHY_PARALLEL = 0,
+ ISP_OF_PHY_CSIPHY1,
+ ISP_OF_PHY_CSIPHY2,
+};
+
+static int isp_of_parse_node(struct device *dev, struct device_node *node,
+ struct isp_async_subdev *isd)
{
- struct resource *mem;
+ struct isp_bus_cfg *buscfg = &isd->bus;
+ struct v4l2_of_endpoint vep;
+ unsigned int i;
- /* request the mem region for the camera registers */
+ v4l2_of_parse_endpoint(node, &vep);
+
+ dev_dbg(dev, "parsing endpoint %s, interface %u\n", node->full_name,
+ vep.base.port);
+
+ switch (vep.base.port) {
+ case ISP_OF_PHY_PARALLEL:
+ buscfg->interface = ISP_INTERFACE_PARALLEL;
+ buscfg->bus.parallel.data_lane_shift =
+ vep.bus.parallel.data_shift;
+ buscfg->bus.parallel.clk_pol =
+ !!(vep.bus.parallel.flags
+ & V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ buscfg->bus.parallel.hs_pol =
+ !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ buscfg->bus.parallel.vs_pol =
+ !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ buscfg->bus.parallel.fld_pol =
+ !!(vep.bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
+ buscfg->bus.parallel.data_pol =
+ !!(vep.bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
+ break;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
+ case ISP_OF_PHY_CSIPHY1:
+ case ISP_OF_PHY_CSIPHY2:
+ /* FIXME: always assume CSI-2 for now. */
+ switch (vep.base.port) {
+ case ISP_OF_PHY_CSIPHY1:
+ buscfg->interface = ISP_INTERFACE_CSI2C_PHY1;
+ break;
+ case ISP_OF_PHY_CSIPHY2:
+ buscfg->interface = ISP_INTERFACE_CSI2A_PHY2;
+ break;
+ }
+ buscfg->bus.csi2.lanecfg.clk.pos = vep.bus.mipi_csi2.clock_lane;
+ buscfg->bus.csi2.lanecfg.clk.pol =
+ vep.bus.mipi_csi2.lane_polarities[0];
+ dev_dbg(dev, "clock lane polarity %u, pos %u\n",
+ buscfg->bus.csi2.lanecfg.clk.pol,
+ buscfg->bus.csi2.lanecfg.clk.pos);
+
+ for (i = 0; i < ISP_CSIPHY2_NUM_DATA_LANES; i++) {
+ buscfg->bus.csi2.lanecfg.data[i].pos =
+ vep.bus.mipi_csi2.data_lanes[i];
+ buscfg->bus.csi2.lanecfg.data[i].pol =
+ vep.bus.mipi_csi2.lane_polarities[i + 1];
+ dev_dbg(dev, "data lane %u polarity %u, pos %u\n", i,
+ buscfg->bus.csi2.lanecfg.data[i].pol,
+ buscfg->bus.csi2.lanecfg.data[i].pos);
+ }
- /* map the region */
- isp->mmio_base[res] = devm_ioremap_resource(isp->dev, mem);
- if (IS_ERR(isp->mmio_base[res]))
- return PTR_ERR(isp->mmio_base[res]);
+ /*
+ * FIXME: now we assume the CRC is always there.
+ * Implement a way to obtain this information from the
+ * sensor. Frame descriptors, perhaps?
+ */
+ buscfg->bus.csi2.crc = 1;
+ break;
- isp->mmio_base_phys[res] = mem->start;
+ default:
+ dev_warn(dev, "%s: invalid interface %u\n", node->full_name,
+ vep.base.port);
+ break;
+ }
return 0;
}
+static int isp_of_parse_nodes(struct device *dev,
+ struct v4l2_async_notifier *notifier)
+{
+ struct device_node *node = NULL;
+
+ notifier->subdevs = devm_kcalloc(
+ dev, ISP_MAX_SUBDEVS, sizeof(*notifier->subdevs), GFP_KERNEL);
+ if (!notifier->subdevs)
+ return -ENOMEM;
+
+ while (notifier->num_subdevs < ISP_MAX_SUBDEVS &&
+ (node = of_graph_get_next_endpoint(dev->of_node, node))) {
+ struct isp_async_subdev *isd;
+
+ isd = devm_kzalloc(dev, sizeof(*isd), GFP_KERNEL);
+ if (!isd) {
+ of_node_put(node);
+ return -ENOMEM;
+ }
+
+ notifier->subdevs[notifier->num_subdevs] = &isd->asd;
+
+ if (isp_of_parse_node(dev, node, isd)) {
+ of_node_put(node);
+ return -EINVAL;
+ }
+
+ isd->asd.match.of.node = of_graph_get_remote_port_parent(node);
+ of_node_put(node);
+ if (!isd->asd.match.of.node) {
+ dev_warn(dev, "bad remote port parent\n");
+ return -EINVAL;
+ }
+
+ isd->asd.match_type = V4L2_ASYNC_MATCH_OF;
+ notifier->num_subdevs++;
+ }
+
+ return notifier->num_subdevs;
+}
+
+static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct isp_device *isp = container_of(async, struct isp_device,
+ notifier);
+ struct isp_async_subdev *isd =
+ container_of(asd, struct isp_async_subdev, asd);
+ int ret;
+
+ ret = isp_link_entity(isp, &subdev->entity, isd->bus.interface);
+ if (ret < 0)
+ return ret;
+
+ isd->sd = subdev;
+ isd->sd->host_priv = &isd->bus;
+
+ return ret;
+}
+
+static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
+{
+ struct isp_device *isp = container_of(async, struct isp_device,
+ notifier);
+
+ return v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+}
+
/*
* isp_probe - Probe ISP platform device
* @pdev: Pointer to ISP platform device
@@ -2227,47 +2393,86 @@ static int isp_map_mem_resource(struct platform_device *pdev,
*/
static int isp_probe(struct platform_device *pdev)
{
- struct isp_platform_data *pdata = pdev->dev.platform_data;
struct isp_device *isp;
+ struct resource *mem;
int ret;
int i, m;
- if (pdata == NULL)
- return -EINVAL;
-
isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
if (!isp) {
dev_err(&pdev->dev, "could not allocate memory\n");
return -ENOMEM;
}
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+ ret = of_property_read_u32(pdev->dev.of_node, "ti,phy-type",
+ &isp->phy_type);
+ if (ret)
+ return ret;
+
+ isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(isp->syscon))
+ return PTR_ERR(isp->syscon);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1,
+ &isp->syscon_offset);
+ if (ret)
+ return ret;
+
+ ret = isp_of_parse_nodes(&pdev->dev, &isp->notifier);
+ if (ret < 0)
+ return ret;
+ ret = v4l2_async_notifier_register(&isp->v4l2_dev,
+ &isp->notifier);
+ if (ret)
+ return ret;
+ } else {
+ isp->pdata = pdev->dev.platform_data;
+ isp->syscon = syscon_regmap_lookup_by_pdevname("syscon.0");
+ if (IS_ERR(isp->syscon))
+ return PTR_ERR(isp->syscon);
+ dev_warn(&pdev->dev,
+ "Platform data support is deprecated! Please move to DT now!\n");
+ }
+
isp->autoidle = autoidle;
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
isp->dev = &pdev->dev;
- isp->pdata = pdata;
isp->ref_count = 0;
ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
if (ret)
- return ret;
+ goto error;
platform_set_drvdata(pdev, isp);
/* Regulators */
- isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "VDD_CSIPHY1");
- isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "VDD_CSIPHY2");
+ isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1");
+ isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2");
/* Clocks
*
* The ISP clock tree is revision-dependent. We thus need to enable ICLK
* manually to read the revision before calling __omap3isp_get().
+ *
+ * Start by mapping the ISP MMIO area, which is in two pieces.
+ * The ISP IOMMU is in between. Map both now, and fill in the
+ * ISP revision specific portions a little later in the
+ * function.
*/
- ret = isp_map_mem_resource(pdev, isp, OMAP3_ISP_IOMEM_MAIN);
- if (ret < 0)
- goto error;
+ for (i = 0; i < 2; i++) {
+ unsigned int map_idx = i ? OMAP3_ISP_IOMEM_CSI2A_REGS1 : 0;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ isp->mmio_base[map_idx] =
+ devm_ioremap_resource(isp->dev, mem);
+ if (IS_ERR(isp->mmio_base[map_idx]))
+ return PTR_ERR(isp->mmio_base[map_idx]);
+ }
ret = isp_get_clocks(isp);
if (ret < 0)
@@ -2308,14 +2513,23 @@ static int isp_probe(struct platform_device *pdev)
goto error_isp;
}
- for (i = 1; i < OMAP3_ISP_IOMEM_LAST; i++) {
- if (isp_res_maps[m].map & 1 << i) {
- ret = isp_map_mem_resource(pdev, isp, i);
- if (ret)
- goto error_isp;
- }
+ if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) {
+ isp->syscon_offset = isp_res_maps[m].syscon_offset;
+ isp->phy_type = isp_res_maps[m].phy_type;
}
+ for (i = 1; i < OMAP3_ISP_IOMEM_CSI2A_REGS1; i++)
+ isp->mmio_base[i] =
+ isp->mmio_base[0] + isp_res_maps[m].offset[i];
+
+ for (i = OMAP3_ISP_IOMEM_CSIPHY2; i < OMAP3_ISP_IOMEM_LAST; i++)
+ isp->mmio_base[i] =
+ isp->mmio_base[OMAP3_ISP_IOMEM_CSI2A_REGS1]
+ + isp_res_maps[m].offset[i];
+
+ isp->mmio_hist_base_phys =
+ mem->start + isp_res_maps[m].offset[OMAP3_ISP_IOMEM_HIST];
+
/* IOMMU */
ret = isp_attach_iommu(isp);
if (ret < 0) {
@@ -2343,6 +2557,9 @@ static int isp_probe(struct platform_device *pdev)
if (ret < 0)
goto error_iommu;
+ isp->notifier.bound = isp_subdev_notifier_bound;
+ isp->notifier.complete = isp_subdev_notifier_complete;
+
ret = isp_register_entities(isp);
if (ret < 0)
goto error_modules;
@@ -2378,6 +2595,11 @@ static struct platform_device_id omap3isp_id_table[] = {
};
MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
+static const struct of_device_id omap3isp_of_table[] = {
+ { .compatible = "ti,omap3-isp" },
+ { },
+};
+
static struct platform_driver omap3isp_driver = {
.probe = isp_probe,
.remove = isp_remove,
@@ -2385,6 +2607,7 @@ static struct platform_driver omap3isp_driver = {
.driver = {
.name = "omap3isp",
.pm = &omap3isp_pm_ops,
+ .of_match_table = omap3isp_of_table,
},
};
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index cfdfc8714b6b..e579943175c4 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -18,6 +18,7 @@
#define OMAP3_ISP_CORE_H
#include <media/omap3isp.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -59,8 +60,6 @@ enum isp_mem_resources {
OMAP3_ISP_IOMEM_CSI2C_REGS1,
OMAP3_ISP_IOMEM_CSIPHY1,
OMAP3_ISP_IOMEM_CSI2C_REGS2,
- OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
- OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
OMAP3_ISP_IOMEM_LAST
};
@@ -93,14 +92,25 @@ enum isp_subclk_resource {
/* ISP2P: OMAP 36xx */
#define ISP_REVISION_15_0 0xF0
+#define ISP_PHY_TYPE_3430 0
+#define ISP_PHY_TYPE_3630 1
+
+struct regmap;
+
/*
* struct isp_res_mapping - Map ISP io resources to ISP revision.
* @isp_rev: ISP_REVISION_x_x
- * @map: bitmap for enum isp_mem_resources
+ * @offset: register offsets of various ISP sub-blocks
+ * @syscon_offset: offset of the syscon register for 343x / 3630
+ * (CONTROL_CSIRXFE / CONTROL_CAMERA_PHY_CTRL, respectively)
+ * from the syscon base address
+ * @phy_type: ISP_PHY_TYPE_{3430,3630}
*/
struct isp_res_mapping {
u32 isp_rev;
- u32 map;
+ u32 offset[OMAP3_ISP_IOMEM_LAST];
+ u32 syscon_offset;
+ u32 phy_type;
};
/*
@@ -122,7 +132,6 @@ enum isp_xclk_id {
struct isp_xclk {
struct isp_device *isp;
struct clk_hw hw;
- struct clk_lookup *lookup;
struct clk *clk;
enum isp_xclk_id id;
@@ -138,8 +147,11 @@ struct isp_xclk {
* @irq_num: Currently used IRQ number.
* @mmio_base: Array with kernel base addresses for ioremapped ISP register
* regions.
- * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
- * regions.
+ * @mmio_hist_base_phys: Physical L4 bus address for ISP hist block register
+ * region.
+ * @syscon: Regmap for the syscon register space
+ * @syscon_offset: Offset of the CSIPHY control register in syscon
+ * @phy_type: ISP_PHY_TYPE_{3430,3630}
* @mapping: IOMMU mapping
* @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP.
@@ -166,6 +178,7 @@ struct isp_xclk {
*/
struct isp_device {
struct v4l2_device v4l2_dev;
+ struct v4l2_async_notifier notifier;
struct media_device media_dev;
struct device *dev;
u32 revision;
@@ -175,7 +188,10 @@ struct isp_device {
unsigned int irq_num;
void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
- unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
+ unsigned long mmio_hist_base_phys;
+ struct regmap *syscon;
+ u32 syscon_offset;
+ u32 phy_type;
struct dma_iommu_mapping *mapping;
@@ -209,6 +225,15 @@ struct isp_device {
unsigned int sbl_resources;
unsigned int subclk_resources;
+
+#define ISP_MAX_SUBDEVS 8
+ struct v4l2_subdev *subdevs[ISP_MAX_SUBDEVS];
+};
+
+struct isp_async_subdev {
+ struct v4l2_subdev *sd;
+ struct isp_bus_cfg bus;
+ struct v4l2_async_subdev asd;
};
#define v4l2_dev_to_isp_device(dev) \
@@ -229,7 +254,7 @@ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe);
void omap3isp_configure_bridge(struct isp_device *isp,
enum ccdc_input_entity input,
- const struct isp_parallel_platform_data *pdata,
+ const struct isp_parallel_cfg *buscfg,
unsigned int shift, unsigned int bridge);
struct isp_device *omap3isp_get(struct isp_device *isp);
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 587489a072d5..a6a61cce43dd 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -32,7 +32,7 @@
#define CCDC_MIN_HEIGHT 32
static struct v4l2_mbus_framefmt *
-__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which);
static const unsigned int ccdc_fmts[] = {
@@ -958,11 +958,11 @@ void omap3isp_ccdc_max_rate(struct isp_ccdc_device *ccdc,
/*
* ccdc_config_sync_if - Set CCDC sync interface configuration
* @ccdc: Pointer to ISP CCDC device.
- * @pdata: Parallel interface platform data (may be NULL)
+ * @parcfg: Parallel interface platform data (may be NULL)
* @data_size: Data size
*/
static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
- struct isp_parallel_platform_data *pdata,
+ struct isp_parallel_cfg *parcfg,
unsigned int data_size)
{
struct isp_device *isp = to_isp_device(ccdc);
@@ -1000,19 +1000,19 @@ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
break;
}
- if (pdata && pdata->data_pol)
+ if (parcfg && parcfg->data_pol)
syn_mode |= ISPCCDC_SYN_MODE_DATAPOL;
- if (pdata && pdata->hs_pol)
+ if (parcfg && parcfg->hs_pol)
syn_mode |= ISPCCDC_SYN_MODE_HDPOL;
/* The polarity of the vertical sync signal output by the BT.656
* decoder is not documented and seems to be active low.
*/
- if ((pdata && pdata->vs_pol) || ccdc->bt656)
+ if ((parcfg && parcfg->vs_pol) || ccdc->bt656)
syn_mode |= ISPCCDC_SYN_MODE_VDPOL;
- if (pdata && pdata->fld_pol)
+ if (parcfg && parcfg->fld_pol)
syn_mode |= ISPCCDC_SYN_MODE_FLDPOL;
isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
@@ -1115,7 +1115,7 @@ static const u32 ccdc_sgbrg_pattern =
static void ccdc_configure(struct isp_ccdc_device *ccdc)
{
struct isp_device *isp = to_isp_device(ccdc);
- struct isp_parallel_platform_data *pdata = NULL;
+ struct isp_parallel_cfg *parcfg = NULL;
struct v4l2_subdev *sensor;
struct v4l2_mbus_framefmt *format;
const struct v4l2_rect *crop;
@@ -1145,7 +1145,7 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
if (!ret)
ccdc->bt656 = cfg.type == V4L2_MBUS_BT656;
- pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv)
+ parcfg = &((struct isp_bus_cfg *)sensor->host_priv)
->bus.parallel;
}
@@ -1175,10 +1175,10 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
else
bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
- omap3isp_configure_bridge(isp, ccdc->input, pdata, shift, bridge);
+ omap3isp_configure_bridge(isp, ccdc->input, parcfg, shift, bridge);
/* Configure the sync interface. */
- ccdc_config_sync_if(ccdc, pdata, depth_out);
+ ccdc_config_sync_if(ccdc, parcfg, depth_out);
syn_mode = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
@@ -1935,21 +1935,21 @@ static int ccdc_set_stream(struct v4l2_subdev *sd, int enable)
}
static struct v4l2_mbus_framefmt *
-__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&ccdc->subdev, cfg, pad);
else
return &ccdc->formats[pad];
}
static struct v4l2_rect *
-__ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+__ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(fh, CCDC_PAD_SOURCE_OF);
+ return v4l2_subdev_get_try_crop(&ccdc->subdev, cfg, CCDC_PAD_SOURCE_OF);
else
return &ccdc->crop;
}
@@ -1957,12 +1957,12 @@ __ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
/*
* ccdc_try_format - Try video format on a pad
* @ccdc: ISP CCDC device
- * @fh : V4L2 subdev file handle
+ * @cfg : V4L2 subdev pad configuration
* @pad: Pad number
* @fmt: Format
*/
static void
-ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1998,7 +1998,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
case CCDC_PAD_SOURCE_OF:
pixelcode = fmt->code;
field = fmt->field;
- *fmt = *__ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
+ *fmt = *__ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, which);
/* In SYNC mode the bridge converts YUV formats from 2X8 to
* 1X16. In BT.656 no such conversion occurs. As we don't know
@@ -2023,7 +2023,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
}
/* Hardcode the output size to the crop rectangle size. */
- crop = __ccdc_get_crop(ccdc, fh, which);
+ crop = __ccdc_get_crop(ccdc, cfg, which);
fmt->width = crop->width;
fmt->height = crop->height;
@@ -2040,7 +2040,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
break;
case CCDC_PAD_SOURCE_VP:
- *fmt = *__ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
+ *fmt = *__ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, which);
/* The video port interface truncates the data to 10 bits. */
info = omap3isp_video_format_info(fmt->code);
@@ -2112,12 +2112,12 @@ static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
/*
* ccdc_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg : V4L2 subdev pad configuration
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2132,8 +2132,8 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
break;
case CCDC_PAD_SOURCE_OF:
- format = __ccdc_get_format(ccdc, fh, code->pad,
- V4L2_SUBDEV_FORMAT_TRY);
+ format = __ccdc_get_format(ccdc, cfg, code->pad,
+ code->which);
if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
format->code == MEDIA_BUS_FMT_UYVY8_2X8) {
@@ -2163,8 +2163,8 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __ccdc_get_format(ccdc, fh, code->pad,
- V4L2_SUBDEV_FORMAT_TRY);
+ format = __ccdc_get_format(ccdc, cfg, code->pad,
+ code->which);
/* A pixel code equal to 0 means that the video port doesn't
* support the input format. Don't enumerate any pixel code.
@@ -2183,7 +2183,7 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2195,7 +2195,7 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ ccdc_try_format(ccdc, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -2205,7 +2205,7 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ ccdc_try_format(ccdc, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -2215,7 +2215,7 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
/*
* ccdc_get_selection - Retrieve a selection rectangle on a pad
* @sd: ISP CCDC V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @sel: Selection rectangle
*
* The only supported rectangles are the crop rectangles on the output formatter
@@ -2223,7 +2223,7 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
*
* Return 0 on success or a negative error code otherwise.
*/
-static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2239,12 +2239,12 @@ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
sel->r.width = INT_MAX;
sel->r.height = INT_MAX;
- format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, sel->which);
ccdc_try_crop(ccdc, format, &sel->r);
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+ sel->r = *__ccdc_get_crop(ccdc, cfg, sel->which);
break;
default:
@@ -2257,7 +2257,7 @@ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ccdc_set_selection - Set a selection rectangle on a pad
* @sd: ISP CCDC V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @sel: Selection rectangle
*
* The only supported rectangle is the actual crop rectangle on the output
@@ -2265,7 +2265,7 @@ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
*
* Return 0 on success or a negative error code otherwise.
*/
-static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2284,17 +2284,17 @@ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
* rectangle.
*/
if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
- sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+ sel->r = *__ccdc_get_crop(ccdc, cfg, sel->which);
return 0;
}
- format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, sel->which);
ccdc_try_crop(ccdc, format, &sel->r);
- *__ccdc_get_crop(ccdc, fh, sel->which) = sel->r;
+ *__ccdc_get_crop(ccdc, cfg, sel->which) = sel->r;
/* Update the source format. */
- format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF, sel->which);
- ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format, sel->which);
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, sel->which);
+ ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, format, sel->which);
return 0;
}
@@ -2302,19 +2302,19 @@ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ccdc_get_format - Retrieve the video format on a pad
* @sd : ISP CCDC V4L2 subdevice
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which);
+ format = __ccdc_get_format(ccdc, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -2325,30 +2325,30 @@ static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ccdc_set_format - Set the video format on a pad
* @sd : ISP CCDC V4L2 subdevice
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which);
+ format = __ccdc_get_format(ccdc, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ccdc_try_format(ccdc, fh, fmt->pad, &fmt->format, fmt->which);
+ ccdc_try_format(ccdc, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CCDC_PAD_SINK) {
/* Reset the crop rectangle. */
- crop = __ccdc_get_crop(ccdc, fh, fmt->which);
+ crop = __ccdc_get_crop(ccdc, cfg, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
@@ -2357,16 +2357,16 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
ccdc_try_crop(ccdc, &fmt->format, crop);
/* Update the source formats. */
- format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF,
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_OF,
fmt->which);
*format = fmt->format;
- ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format,
+ ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, format,
fmt->which);
- format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_VP,
+ format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
- ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_VP, format,
+ ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_VP, format,
fmt->which);
}
@@ -2417,11 +2417,11 @@ static int ccdc_link_validate(struct v4l2_subdev *sd,
/* We've got a parallel sensor here. */
if (ccdc->input == CCDC_INPUT_PARALLEL) {
- struct isp_parallel_platform_data *pdata =
- &((struct isp_v4l2_subdevs_group *)
+ struct isp_parallel_cfg *parcfg =
+ &((struct isp_bus_cfg *)
media_entity_to_v4l2_subdev(link->source->entity)
->host_priv)->bus.parallel;
- parallel_shift = pdata->data_lane_shift * 2;
+ parallel_shift = parcfg->data_lane_shift * 2;
} else {
parallel_shift = 0;
}
@@ -2453,7 +2453,7 @@ static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ccdc_set_format(sd, fh, &format);
+ ccdc_set_format(sd, fh ? fh->pad : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index f4aedb37e41e..38e6a974c5b1 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -201,14 +201,14 @@ static void ccp2_mem_enable(struct isp_ccp2_device *ccp2, u8 enable)
/*
* ccp2_phyif_config - Initialize CCP2 phy interface config
* @ccp2: Pointer to ISP CCP2 device
- * @pdata: CCP2 platform data
+ * @buscfg: CCP2 platform data
*
* Configure the CCP2 physical interface module from platform data.
*
* Returns -EIO if strobe is chosen in CSI1 mode, or 0 on success.
*/
static int ccp2_phyif_config(struct isp_ccp2_device *ccp2,
- const struct isp_ccp2_platform_data *pdata)
+ const struct isp_ccp2_cfg *buscfg)
{
struct isp_device *isp = to_isp_device(ccp2);
u32 val;
@@ -218,16 +218,16 @@ static int ccp2_phyif_config(struct isp_ccp2_device *ccp2,
ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE;
/* Data/strobe physical layer */
BIT_SET(val, ISPCCP2_CTRL_PHY_SEL_SHIFT, ISPCCP2_CTRL_PHY_SEL_MASK,
- pdata->phy_layer);
+ buscfg->phy_layer);
BIT_SET(val, ISPCCP2_CTRL_INV_SHIFT, ISPCCP2_CTRL_INV_MASK,
- pdata->strobe_clk_pol);
+ buscfg->strobe_clk_pol);
isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
if (!(val & ISPCCP2_CTRL_MODE)) {
- if (pdata->ccp2_mode == ISP_CCP2_MODE_CCP2)
+ if (buscfg->ccp2_mode == ISP_CCP2_MODE_CCP2)
dev_warn(isp->dev, "OMAP3 CCP2 bus not available\n");
- if (pdata->phy_layer == ISP_CCP2_PHY_DATA_STROBE)
+ if (buscfg->phy_layer == ISP_CCP2_PHY_DATA_STROBE)
/* Strobe mode requires CCP2 */
return -EIO;
}
@@ -347,7 +347,7 @@ static void ccp2_lcx_config(struct isp_ccp2_device *ccp2,
*/
static int ccp2_if_configure(struct isp_ccp2_device *ccp2)
{
- const struct isp_v4l2_subdevs_group *pdata;
+ const struct isp_bus_cfg *buscfg;
struct v4l2_mbus_framefmt *format;
struct media_pad *pad;
struct v4l2_subdev *sensor;
@@ -358,20 +358,20 @@ static int ccp2_if_configure(struct isp_ccp2_device *ccp2)
pad = media_entity_remote_pad(&ccp2->pads[CCP2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
- pdata = sensor->host_priv;
+ buscfg = sensor->host_priv;
- ret = ccp2_phyif_config(ccp2, &pdata->bus.ccp2);
+ ret = ccp2_phyif_config(ccp2, &buscfg->bus.ccp2);
if (ret < 0)
return ret;
- ccp2_vp_config(ccp2, pdata->bus.ccp2.vpclk_div + 1);
+ ccp2_vp_config(ccp2, buscfg->bus.ccp2.vpclk_div + 1);
v4l2_subdev_call(sensor, sensor, g_skip_top_lines, &lines);
format = &ccp2->formats[CCP2_PAD_SINK];
ccp2->if_cfg.data_start = lines;
- ccp2->if_cfg.crc = pdata->bus.ccp2.crc;
+ ccp2->if_cfg.crc = buscfg->bus.ccp2.crc;
ccp2->if_cfg.format = format->code;
ccp2->if_cfg.data_size = format->height;
@@ -611,17 +611,17 @@ static const unsigned int ccp2_fmts[] = {
/*
* __ccp2_get_format - helper function for getting ccp2 format
* @ccp2 : Pointer to ISP CCP2 device
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @pad : pad number
* @which : wanted subdev format
* return format structure or NULL on error
*/
static struct v4l2_mbus_framefmt *
-__ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_fh *fh,
+__ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&ccp2->subdev, cfg, pad);
else
return &ccp2->formats[pad];
}
@@ -629,13 +629,13 @@ __ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_fh *fh,
/*
* ccp2_try_format - Handle try format by pad subdev method
* @ccp2 : Pointer to ISP CCP2 device
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @pad : pad num
* @fmt : pointer to v4l2 mbus format structure
* @which : wanted subdev format
*/
static void ccp2_try_format(struct isp_ccp2_device *ccp2,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -669,7 +669,7 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
* When CCP2 write to memory feature will be added this
* should be changed properly.
*/
- format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SINK, which);
+ format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SINK, which);
memcpy(fmt, format, sizeof(*fmt));
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
break;
@@ -682,12 +682,12 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
/*
* ccp2_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
@@ -702,8 +702,8 @@ static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SINK,
- V4L2_SUBDEV_FORMAT_TRY);
+ format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SINK,
+ code->which);
code->code = format->code;
}
@@ -711,7 +711,7 @@ static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
@@ -723,7 +723,7 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ccp2_try_format(ccp2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ ccp2_try_format(ccp2, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -733,7 +733,7 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ccp2_try_format(ccp2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ ccp2_try_format(ccp2, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -743,17 +743,17 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
/*
* ccp2_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ccp2_get_format(ccp2, fh, fmt->pad, fmt->which);
+ format = __ccp2_get_format(ccp2, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -764,29 +764,29 @@ static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ccp2_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt : pointer to v4l2 subdev format structure
* returns zero
*/
-static int ccp2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ccp2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ccp2_get_format(ccp2, fh, fmt->pad, fmt->which);
+ format = __ccp2_get_format(ccp2, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ccp2_try_format(ccp2, fh, fmt->pad, &fmt->format, fmt->which);
+ ccp2_try_format(ccp2, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CCP2_PAD_SINK) {
- format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SOURCE,
+ format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- ccp2_try_format(ccp2, fh, CCP2_PAD_SOURCE, format, fmt->which);
+ ccp2_try_format(ccp2, cfg, CCP2_PAD_SOURCE, format, fmt->which);
}
return 0;
@@ -811,7 +811,7 @@ static int ccp2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ccp2_set_format(sd, fh, &format);
+ ccp2_set_format(sd, fh ? fh->pad : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 09c686d96ae8..a78338d012b4 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -548,7 +548,8 @@ int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
static int csi2_configure(struct isp_csi2_device *csi2)
{
- const struct isp_v4l2_subdevs_group *pdata;
+ struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
+ const struct isp_bus_cfg *buscfg;
struct isp_device *isp = csi2->isp;
struct isp_csi2_timing_cfg *timing = &csi2->timing[0];
struct v4l2_subdev *sensor;
@@ -565,14 +566,19 @@ static int csi2_configure(struct isp_csi2_device *csi2)
pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
- pdata = sensor->host_priv;
+ buscfg = sensor->host_priv;
csi2->frame_skip = 0;
v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
- csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div;
+ csi2->ctrl.vp_out_ctrl =
+ clamp_t(unsigned int, pipe->l3_ick / pipe->external_rate - 1,
+ 1, 3);
+ dev_dbg(isp->dev, "%s: l3_ick %lu, external_rate %u, vp_out_ctrl %u\n",
+ __func__, pipe->l3_ick, pipe->external_rate,
+ csi2->ctrl.vp_out_ctrl);
csi2->ctrl.frame_mode = ISP_CSI2_FRAME_IMMEDIATE;
- csi2->ctrl.ecc_enable = pdata->bus.csi2.crc;
+ csi2->ctrl.ecc_enable = buscfg->bus.csi2.crc;
timing->ionum = 1;
timing->force_rx_mode = 1;
@@ -829,17 +835,17 @@ static const struct isp_video_operations csi2_ispvideo_ops = {
*/
static struct v4l2_mbus_framefmt *
-__csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+__csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
else
return &csi2->formats[pad];
}
static void
-csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -869,7 +875,7 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
* compression.
*/
pixelcode = fmt->code;
- format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK, which);
+ format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK, which);
memcpy(fmt, format, sizeof(*fmt));
/*
@@ -890,12 +896,12 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
/*
* csi2_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -908,8 +914,8 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csi2_input_fmts[code->index];
} else {
- format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK,
- V4L2_SUBDEV_FORMAT_TRY);
+ format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK,
+ code->which);
switch (code->index) {
case 0:
/* Passthrough sink pad code */
@@ -932,7 +938,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
}
static int csi2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -944,7 +950,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -954,7 +960,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -964,17 +970,17 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
/*
* csi2_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -985,29 +991,29 @@ static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* csi2_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- csi2_try_format(csi2, fh, fmt->pad, &fmt->format, fmt->which);
+ csi2_try_format(csi2, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CSI2_PAD_SINK) {
- format = __csi2_get_format(csi2, fh, CSI2_PAD_SOURCE,
+ format = __csi2_get_format(csi2, cfg, CSI2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- csi2_try_format(csi2, fh, CSI2_PAD_SOURCE, format, fmt->which);
+ csi2_try_format(csi2, cfg, CSI2_PAD_SOURCE, format, fmt->which);
}
return 0;
@@ -1032,7 +1038,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- csi2_set_format(sd, fh, &format);
+ csi2_set_format(sd, fh ? fh->pad : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index e033f2237a72..495447d66cfd 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include "isp.h"
@@ -26,10 +27,11 @@ static void csiphy_routing_cfg_3630(struct isp_csiphy *phy,
enum isp_interface_type iface,
bool ccp2_strobe)
{
- u32 reg = isp_reg_readl(
- phy->isp, OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+ u32 reg;
u32 shift, mode;
+ regmap_read(phy->isp->syscon, phy->isp->syscon_offset, &reg);
+
switch (iface) {
default:
/* Should not happen in practice, but let's keep the compiler happy. */
@@ -63,8 +65,7 @@ static void csiphy_routing_cfg_3630(struct isp_csiphy *phy,
reg &= ~(OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK << shift);
reg |= mode << shift;
- isp_reg_writel(phy->isp, reg,
- OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+ regmap_write(phy->isp->syscon, phy->isp->syscon_offset, reg);
}
static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on,
@@ -78,16 +79,14 @@ static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on,
return;
if (!on) {
- isp_reg_writel(phy->isp, 0,
- OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+ regmap_write(phy->isp->syscon, phy->isp->syscon_offset, 0);
return;
}
if (ccp2_strobe)
csirxfe |= OMAP343X_CONTROL_CSIRXFE_SELFORM;
- isp_reg_writel(phy->isp, csirxfe,
- OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+ regmap_write(phy->isp->syscon, phy->isp->syscon_offset, csirxfe);
}
/*
@@ -106,10 +105,9 @@ static void csiphy_routing_cfg(struct isp_csiphy *phy,
enum isp_interface_type iface, bool on,
bool ccp2_strobe)
{
- if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL]
- && on)
+ if (phy->isp->phy_type == ISP_PHY_TYPE_3630 && on)
return csiphy_routing_cfg_3630(phy, iface, ccp2_strobe);
- if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE])
+ if (phy->isp->phy_type == ISP_PHY_TYPE_3430)
return csiphy_routing_cfg_3430(phy, iface, on, ccp2_strobe);
}
@@ -168,18 +166,25 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy)
{
struct isp_csi2_device *csi2 = phy->csi2;
struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
- struct isp_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
+ struct isp_bus_cfg *buscfg = pipe->external->host_priv;
struct isp_csiphy_lanes_cfg *lanes;
int csi2_ddrclk_khz;
unsigned int used_lanes = 0;
unsigned int i;
u32 reg;
- if (subdevs->interface == ISP_INTERFACE_CCP2B_PHY1
- || subdevs->interface == ISP_INTERFACE_CCP2B_PHY2)
- lanes = &subdevs->bus.ccp2.lanecfg;
+ if (!buscfg) {
+ struct isp_async_subdev *isd =
+ container_of(pipe->external->asd,
+ struct isp_async_subdev, asd);
+ buscfg = &isd->bus;
+ }
+
+ if (buscfg->interface == ISP_INTERFACE_CCP2B_PHY1
+ || buscfg->interface == ISP_INTERFACE_CCP2B_PHY2)
+ lanes = &buscfg->bus.ccp2.lanecfg;
else
- lanes = &subdevs->bus.csi2.lanecfg;
+ lanes = &buscfg->bus.csi2.lanecfg;
/* Clock and data lanes verification */
for (i = 0; i < phy->num_data_lanes; i++) {
@@ -203,8 +208,8 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy)
* issue since the MPU power domain is forced on whilst the
* ISP is in use.
*/
- csiphy_routing_cfg(phy, subdevs->interface, true,
- subdevs->bus.ccp2.phy_layer);
+ csiphy_routing_cfg(phy, buscfg->interface, true,
+ buscfg->bus.ccp2.phy_layer);
/* DPHY timing configuration */
/* CSI-2 is DDR and we only count used lanes. */
@@ -302,11 +307,10 @@ void omap3isp_csiphy_release(struct isp_csiphy *phy)
struct isp_csi2_device *csi2 = phy->csi2;
struct isp_pipeline *pipe =
to_isp_pipeline(&csi2->subdev.entity);
- struct isp_v4l2_subdevs_group *subdevs =
- pipe->external->host_priv;
+ struct isp_bus_cfg *buscfg = pipe->external->host_priv;
- csiphy_routing_cfg(phy, subdevs->interface, false,
- subdevs->bus.ccp2.phy_layer);
+ csiphy_routing_cfg(phy, buscfg->interface, false,
+ buscfg->bus.ccp2.phy_layer);
csiphy_power_autoswitch_enable(phy, false);
csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
regulator_disable(phy->vdd);
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index b208c5417146..ccaf92f39236 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -297,7 +297,6 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
aewb->ops = &h3a_aewb_ops;
aewb->priv = aewb_cfg;
- aewb->dma_ch = -1;
aewb->event_type = V4L2_EVENT_OMAP3ISP_AEWB;
aewb->isp = isp;
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index 8a83e195f3e3..92937f7eecef 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -360,7 +360,6 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
af->ops = &h3a_af_ops;
af->priv = af_cfg;
- af->dma_ch = -1;
af->event_type = V4L2_EVENT_OMAP3ISP_AF;
af->isp = isp;
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index ce822c34c843..7138b043a4aa 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -16,20 +16,18 @@
*/
#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dmaengine.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/device.h>
#include "isp.h"
#include "ispreg.h"
#include "isphist.h"
-#define OMAP24XX_DMA_NO_DEVICE 0
-
#define HIST_CONFIG_DMA 1
-#define HIST_USING_DMA(hist) ((hist)->dma_ch >= 0)
-
/*
* hist_reset_mem - clear Histogram memory before start stats engine.
*/
@@ -62,20 +60,6 @@ static void hist_reset_mem(struct ispstat *hist)
hist->wait_acc_frames = conf->num_acc_frames;
}
-static void hist_dma_config(struct ispstat *hist)
-{
- struct isp_device *isp = hist->isp;
-
- hist->dma_config.data_type = OMAP_DMA_DATA_TYPE_S32;
- hist->dma_config.sync_mode = OMAP_DMA_SYNC_ELEMENT;
- hist->dma_config.frame_count = 1;
- hist->dma_config.src_amode = OMAP_DMA_AMODE_CONSTANT;
- hist->dma_config.src_start = isp->mmio_base_phys[OMAP3_ISP_IOMEM_HIST]
- + ISPHIST_DATA;
- hist->dma_config.dst_amode = OMAP_DMA_AMODE_POST_INC;
- hist->dma_config.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
-}
-
/*
* hist_setup_regs - Helper function to update Histogram registers.
*/
@@ -176,17 +160,12 @@ static int hist_busy(struct ispstat *hist)
& ISPHIST_PCR_BUSY;
}
-static void hist_dma_cb(int lch, u16 ch_status, void *data)
+static void hist_dma_cb(void *data)
{
struct ispstat *hist = data;
- if (ch_status & ~OMAP_DMA_BLOCK_IRQ) {
- dev_dbg(hist->isp->dev, "hist: DMA error. status = 0x%04x\n",
- ch_status);
- omap_stop_dma(lch);
- hist_reset_mem(hist);
- atomic_set(&hist->buf_err, 1);
- }
+ /* FIXME: The DMA engine API can't report transfer errors :-/ */
+
isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
ISPHIST_CNT_CLEAR);
@@ -198,24 +177,57 @@ static void hist_dma_cb(int lch, u16 ch_status, void *data)
static int hist_buf_dma(struct ispstat *hist)
{
dma_addr_t dma_addr = hist->active_buf->dma_addr;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_slave_config cfg;
+ dma_cookie_t cookie;
+ int ret;
if (unlikely(!dma_addr)) {
dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
- hist_reset_mem(hist);
- return STAT_NO_BUF;
+ goto error;
}
isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
ISPHIST_CNT_CLEAR);
omap3isp_flush(hist->isp);
- hist->dma_config.dst_start = dma_addr;
- hist->dma_config.elem_count = hist->buf_size / sizeof(u32);
- omap_set_dma_params(hist->dma_ch, &hist->dma_config);
- omap_start_dma(hist->dma_ch);
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = hist->isp->mmio_hist_base_phys + ISPHIST_DATA;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = hist->buf_size / 4;
+
+ ret = dmaengine_slave_config(hist->dma_ch, &cfg);
+ if (ret < 0) {
+ dev_dbg(hist->isp->dev,
+ "hist: DMA slave configuration failed\n");
+ goto error;
+ }
+
+ tx = dmaengine_prep_slave_single(hist->dma_ch, dma_addr,
+ hist->buf_size, DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK);
+ if (tx == NULL) {
+ dev_dbg(hist->isp->dev,
+ "hist: DMA slave preparation failed\n");
+ goto error;
+ }
+
+ tx->callback = hist_dma_cb;
+ tx->callback_param = hist;
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_dbg(hist->isp->dev, "hist: DMA submission failed\n");
+ goto error;
+ }
+
+ dma_async_issue_pending(hist->dma_ch);
return STAT_BUF_WAITING_DMA;
+
+error:
+ hist_reset_mem(hist);
+ return STAT_NO_BUF;
}
static int hist_buf_pio(struct ispstat *hist)
@@ -272,7 +284,7 @@ static int hist_buf_process(struct ispstat *hist)
if (--(hist->wait_acc_frames))
return STAT_NO_BUF;
- if (HIST_USING_DMA(hist))
+ if (hist->dma_ch)
ret = hist_buf_dma(hist);
else
ret = hist_buf_pio(hist);
@@ -473,18 +485,28 @@ int omap3isp_hist_init(struct isp_device *isp)
hist->isp = isp;
- if (HIST_CONFIG_DMA)
- ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST",
- hist_dma_cb, hist, &hist->dma_ch);
- if (ret) {
- if (HIST_CONFIG_DMA)
- dev_warn(isp->dev, "hist: DMA request channel failed. "
- "Using PIO only.\n");
- hist->dma_ch = -1;
- } else {
- dev_dbg(isp->dev, "hist: DMA channel = %d\n", hist->dma_ch);
- hist_dma_config(hist);
- omap_enable_dma_irq(hist->dma_ch, OMAP_DMA_BLOCK_IRQ);
+ if (HIST_CONFIG_DMA) {
+ struct platform_device *pdev = to_platform_device(isp->dev);
+ struct resource *res;
+ unsigned int sig = 0;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ "hist");
+ if (res)
+ sig = res->start;
+
+ hist->dma_ch = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn, &sig, isp->dev, "hist");
+ if (!hist->dma_ch)
+ dev_warn(isp->dev,
+ "hist: DMA channel request failed, using PIO\n");
+ else
+ dev_dbg(isp->dev, "hist: using DMA channel %s\n",
+ dma_chan_name(hist->dma_ch));
}
hist->ops = &hist_ops;
@@ -493,8 +515,8 @@ int omap3isp_hist_init(struct isp_device *isp)
ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
if (ret) {
- if (HIST_USING_DMA(hist))
- omap_free_dma(hist->dma_ch);
+ if (hist->dma_ch)
+ dma_release_channel(hist->dma_ch);
}
return ret;
@@ -505,7 +527,10 @@ int omap3isp_hist_init(struct isp_device *isp)
*/
void omap3isp_hist_cleanup(struct isp_device *isp)
{
- if (HIST_USING_DMA(&isp->isp_hist))
- omap_free_dma(isp->isp_hist.dma_ch);
- omap3isp_stat_cleanup(&isp->isp_hist);
+ struct ispstat *hist = &isp->isp_hist;
+
+ if (hist->dma_ch)
+ dma_release_channel(hist->dma_ch);
+
+ omap3isp_stat_cleanup(hist);
}
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index dd9eed45d853..15cb254ccc39 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -1686,21 +1686,21 @@ static int preview_set_stream(struct v4l2_subdev *sd, int enable)
}
static struct v4l2_mbus_framefmt *
-__preview_get_format(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
+__preview_get_format(struct isp_prev_device *prev, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&prev->subdev, cfg, pad);
else
return &prev->formats[pad];
}
static struct v4l2_rect *
-__preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
+__preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(fh, PREV_PAD_SINK);
+ return v4l2_subdev_get_try_crop(&prev->subdev, cfg, PREV_PAD_SINK);
else
return &prev->crop;
}
@@ -1727,7 +1727,7 @@ static const unsigned int preview_output_fmts[] = {
/*
* preview_try_format - Validate a format
* @prev: ISP preview engine
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @pad: pad number
* @fmt: format to be validated
* @which: try/active format selector
@@ -1736,7 +1736,7 @@ static const unsigned int preview_output_fmts[] = {
* engine limits and the format and crop rectangles on other pads.
*/
static void preview_try_format(struct isp_prev_device *prev,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1777,7 +1777,7 @@ static void preview_try_format(struct isp_prev_device *prev,
case PREV_PAD_SOURCE:
pixelcode = fmt->code;
- *fmt = *__preview_get_format(prev, fh, PREV_PAD_SINK, which);
+ *fmt = *__preview_get_format(prev, cfg, PREV_PAD_SINK, which);
switch (pixelcode) {
case MEDIA_BUS_FMT_YUYV8_1X16:
@@ -1795,7 +1795,7 @@ static void preview_try_format(struct isp_prev_device *prev,
* is not supported yet, hardcode the output size to the crop
* rectangle size.
*/
- crop = __preview_get_crop(prev, fh, which);
+ crop = __preview_get_crop(prev, cfg, which);
fmt->width = crop->width;
fmt->height = crop->height;
@@ -1864,12 +1864,12 @@ static void preview_try_crop(struct isp_prev_device *prev,
/*
* preview_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int preview_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
@@ -1893,7 +1893,7 @@ static int preview_enum_mbus_code(struct v4l2_subdev *sd,
}
static int preview_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
@@ -1905,7 +1905,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- preview_try_format(prev, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ preview_try_format(prev, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1915,7 +1915,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- preview_try_format(prev, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ preview_try_format(prev, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1925,7 +1925,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
/*
* preview_get_selection - Retrieve a selection rectangle on a pad
* @sd: ISP preview V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @sel: Selection rectangle
*
* The only supported rectangles are the crop rectangles on the sink pad.
@@ -1933,7 +1933,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
* Return 0 on success or a negative error code otherwise.
*/
static int preview_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
@@ -1949,13 +1949,13 @@ static int preview_get_selection(struct v4l2_subdev *sd,
sel->r.width = INT_MAX;
sel->r.height = INT_MAX;
- format = __preview_get_format(prev, fh, PREV_PAD_SINK,
+ format = __preview_get_format(prev, cfg, PREV_PAD_SINK,
sel->which);
preview_try_crop(prev, format, &sel->r);
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *__preview_get_crop(prev, fh, sel->which);
+ sel->r = *__preview_get_crop(prev, cfg, sel->which);
break;
default:
@@ -1968,7 +1968,7 @@ static int preview_get_selection(struct v4l2_subdev *sd,
/*
* preview_set_selection - Set a selection rectangle on a pad
* @sd: ISP preview V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @sel: Selection rectangle
*
* The only supported rectangle is the actual crop rectangle on the sink pad.
@@ -1976,7 +1976,7 @@ static int preview_get_selection(struct v4l2_subdev *sd,
* Return 0 on success or a negative error code otherwise.
*/
static int preview_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
@@ -1995,17 +1995,17 @@ static int preview_set_selection(struct v4l2_subdev *sd,
* rectangle.
*/
if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
- sel->r = *__preview_get_crop(prev, fh, sel->which);
+ sel->r = *__preview_get_crop(prev, cfg, sel->which);
return 0;
}
- format = __preview_get_format(prev, fh, PREV_PAD_SINK, sel->which);
+ format = __preview_get_format(prev, cfg, PREV_PAD_SINK, sel->which);
preview_try_crop(prev, format, &sel->r);
- *__preview_get_crop(prev, fh, sel->which) = sel->r;
+ *__preview_get_crop(prev, cfg, sel->which) = sel->r;
/* Update the source format. */
- format = __preview_get_format(prev, fh, PREV_PAD_SOURCE, sel->which);
- preview_try_format(prev, fh, PREV_PAD_SOURCE, format, sel->which);
+ format = __preview_get_format(prev, cfg, PREV_PAD_SOURCE, sel->which);
+ preview_try_format(prev, cfg, PREV_PAD_SOURCE, format, sel->which);
return 0;
}
@@ -2013,17 +2013,17 @@ static int preview_set_selection(struct v4l2_subdev *sd,
/*
* preview_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __preview_get_format(prev, fh, fmt->pad, fmt->which);
+ format = __preview_get_format(prev, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -2034,28 +2034,28 @@ static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* preview_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- format = __preview_get_format(prev, fh, fmt->pad, fmt->which);
+ format = __preview_get_format(prev, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- preview_try_format(prev, fh, fmt->pad, &fmt->format, fmt->which);
+ preview_try_format(prev, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == PREV_PAD_SINK) {
/* Reset the crop rectangle. */
- crop = __preview_get_crop(prev, fh, fmt->which);
+ crop = __preview_get_crop(prev, cfg, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
@@ -2064,9 +2064,9 @@ static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
preview_try_crop(prev, &fmt->format, crop);
/* Update the source format. */
- format = __preview_get_format(prev, fh, PREV_PAD_SOURCE,
+ format = __preview_get_format(prev, cfg, PREV_PAD_SOURCE,
fmt->which);
- preview_try_format(prev, fh, PREV_PAD_SOURCE, format,
+ preview_try_format(prev, cfg, PREV_PAD_SOURCE, format,
fmt->which);
}
@@ -2093,7 +2093,7 @@ static int preview_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- preview_set_format(sd, fh, &format);
+ preview_set_format(sd, fh ? fh->pad : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 2b9bc4839876..7cfb43dc0ffd 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -112,16 +112,16 @@ static const struct isprsz_coef filter_coefs = {
* __resizer_get_format - helper function for getting resizer format
* @res : pointer to resizer private structure
* @pad : pad number
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @which : wanted subdev format
* return zero
*/
static struct v4l2_mbus_framefmt *
-__resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_fh *fh,
+__resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&res->subdev, cfg, pad);
else
return &res->formats[pad];
}
@@ -129,15 +129,15 @@ __resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_fh *fh,
/*
* __resizer_get_crop - helper function for getting resizer crop rectangle
* @res : pointer to resizer private structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @which : wanted subdev crop rectangle
*/
static struct v4l2_rect *
-__resizer_get_crop(struct isp_res_device *res, struct v4l2_subdev_fh *fh,
+__resizer_get_crop(struct isp_res_device *res, struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(fh, RESZ_PAD_SINK);
+ return v4l2_subdev_get_try_crop(&res->subdev, cfg, RESZ_PAD_SINK);
else
return &res->crop.request;
}
@@ -1215,7 +1215,7 @@ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
/*
* resizer_get_selection - Retrieve a selection rectangle on a pad
* @sd: ISP resizer V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @sel: Selection rectangle
*
* The only supported rectangles are the crop rectangles on the sink pad.
@@ -1223,7 +1223,7 @@ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
* Return 0 on success or a negative error code otherwise.
*/
static int resizer_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1234,9 +1234,9 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
if (sel->pad != RESZ_PAD_SINK)
return -EINVAL;
- format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
+ format_sink = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
sel->which);
- format_source = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
+ format_source = __resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
sel->which);
switch (sel->target) {
@@ -1251,7 +1251,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *__resizer_get_crop(res, fh, sel->which);
+ sel->r = *__resizer_get_crop(res, cfg, sel->which);
resizer_calc_ratios(res, &sel->r, format_source, &ratio);
break;
@@ -1265,7 +1265,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
/*
* resizer_set_selection - Set a selection rectangle on a pad
* @sd: ISP resizer V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @sel: Selection rectangle
*
* The only supported rectangle is the actual crop rectangle on the sink pad.
@@ -1276,7 +1276,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
* Return 0 on success or a negative error code otherwise.
*/
static int resizer_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1290,9 +1290,9 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
sel->pad != RESZ_PAD_SINK)
return -EINVAL;
- format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
+ format_sink = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
sel->which);
- format_source = *__resizer_get_format(res, fh, RESZ_PAD_SOURCE,
+ format_source = *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
sel->which);
dev_dbg(isp->dev, "%s(%s): req %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
@@ -1310,7 +1310,7 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
* stored the mangled rectangle.
*/
resizer_try_crop(format_sink, &format_source, &sel->r);
- *__resizer_get_crop(res, fh, sel->which) = sel->r;
+ *__resizer_get_crop(res, cfg, sel->which) = sel->r;
resizer_calc_ratios(res, &sel->r, &format_source, &ratio);
dev_dbg(isp->dev, "%s(%s): got %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
@@ -1320,7 +1320,7 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
format_source.width, format_source.height);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *__resizer_get_format(res, fh, RESZ_PAD_SOURCE, sel->which) =
+ *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE, sel->which) =
format_source;
return 0;
}
@@ -1331,7 +1331,7 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
*/
spin_lock_irqsave(&res->lock, flags);
- *__resizer_get_format(res, fh, RESZ_PAD_SOURCE, sel->which) =
+ *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE, sel->which) =
format_source;
res->ratio = ratio;
@@ -1368,13 +1368,13 @@ static unsigned int resizer_max_in_width(struct isp_res_device *res)
/*
* resizer_try_format - Handle try format by pad subdev method
* @res : ISP resizer device
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @pad : pad num
* @fmt : pointer to v4l2 format structure
* @which : wanted subdev format
*/
static void resizer_try_format(struct isp_res_device *res,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1395,10 +1395,10 @@ static void resizer_try_format(struct isp_res_device *res,
break;
case RESZ_PAD_SOURCE:
- format = __resizer_get_format(res, fh, RESZ_PAD_SINK, which);
+ format = __resizer_get_format(res, cfg, RESZ_PAD_SINK, which);
fmt->code = format->code;
- crop = *__resizer_get_crop(res, fh, which);
+ crop = *__resizer_get_crop(res, cfg, which);
resizer_calc_ratios(res, &crop, fmt, &ratio);
break;
}
@@ -1410,12 +1410,12 @@ static void resizer_try_format(struct isp_res_device *res,
/*
* resizer_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1430,8 +1430,8 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __resizer_get_format(res, fh, RESZ_PAD_SINK,
- V4L2_SUBDEV_FORMAT_TRY);
+ format = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ code->which);
code->code = format->code;
}
@@ -1439,7 +1439,7 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
}
static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1451,7 +1451,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- resizer_try_format(res, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ resizer_try_format(res, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1461,7 +1461,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- resizer_try_format(res, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ resizer_try_format(res, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1471,17 +1471,17 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
/*
* resizer_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(res, fh, fmt->pad, fmt->which);
+ format = __resizer_get_format(res, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -1492,37 +1492,37 @@ static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* resizer_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- format = __resizer_get_format(res, fh, fmt->pad, fmt->which);
+ format = __resizer_get_format(res, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- resizer_try_format(res, fh, fmt->pad, &fmt->format, fmt->which);
+ resizer_try_format(res, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
if (fmt->pad == RESZ_PAD_SINK) {
/* reset crop rectangle */
- crop = __resizer_get_crop(res, fh, fmt->which);
+ crop = __resizer_get_crop(res, cfg, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
crop->height = fmt->format.height;
/* Propagate the format from sink to source */
- format = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
+ format = __resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- resizer_try_format(res, fh, RESZ_PAD_SOURCE, format,
+ resizer_try_format(res, cfg, RESZ_PAD_SOURCE, format,
fmt->which);
}
@@ -1573,7 +1573,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_YUYV8_1X16;
format.format.width = 4096;
format.format.height = 4096;
- resizer_set_format(sd, fh, &format);
+ resizer_set_format(sd, fh ? fh->pad : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index a94e8340508f..20434e83e801 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -21,7 +21,7 @@
#include "isp.h"
-#define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch >= 0)
+#define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch != NULL)
/*
* MAGIC_SIZE must always be the greatest common divisor of
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
index b32b29677e2c..b79380d83fcf 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -20,7 +20,6 @@
#include <linux/types.h>
#include <linux/omap3isp.h>
-#include <linux/omap-dma.h>
#include <media/v4l2-event.h>
#include "isp.h"
@@ -33,6 +32,7 @@
#define STAT_NO_BUF 1 /* An error has occurred */
#define STAT_BUF_WAITING_DMA 2 /* Histogram only: DMA is running */
+struct dma_chan;
struct ispstat;
struct ispstat_buffer {
@@ -96,7 +96,6 @@ struct ispstat {
u8 inc_config;
atomic_t buf_err;
enum ispstat_state_t state; /* enabling/disabling state */
- struct omap_dma_channel_params dma_config;
struct isp_device *isp;
void *priv; /* pointer to priv config struct */
void *recover_priv; /* pointer to recover priv configuration */
@@ -110,7 +109,7 @@ struct ispstat {
u32 frame_number;
u32 buf_size;
u32 buf_alloc_size;
- int dma_ch;
+ struct dma_chan *dma_ch;
unsigned long event_type;
struct ispstat_buffer *buf;
struct ispstat_buffer *active_buf;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 3fe9047ef466..d285af18df7f 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -452,7 +452,6 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
enum isp_pipeline_state state;
struct isp_buffer *buf;
unsigned long flags;
- struct timespec ts;
spin_lock_irqsave(&video->irqlock, flags);
if (WARN_ON(list_empty(&video->dmaqueue))) {
@@ -465,9 +464,7 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
list_del(&buf->irqlist);
spin_unlock_irqrestore(&video->irqlock, flags);
- ktime_get_ts(&ts);
- buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
- buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
/* Do frame number propagation only if this is the output video node.
* Frame number either comes from the CSI receivers or it gets
@@ -524,7 +521,6 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
irqlist);
- buf->vb.state = VB2_BUF_STATE_ACTIVE;
spin_unlock_irqrestore(&video->irqlock, flags);
@@ -1022,7 +1018,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
pipe->entities = 0;
- if (video->isp->pdata->set_constraints)
+ if (video->isp->pdata && video->isp->pdata->set_constraints)
video->isp->pdata->set_constraints(video->isp, true);
pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
pipe->max_rate = pipe->l3_ick;
@@ -1104,7 +1100,7 @@ err_set_stream:
err_check_format:
media_entity_pipeline_stop(&video->video.entity);
err_pipeline_start:
- if (video->isp->pdata->set_constraints)
+ if (video->isp->pdata && video->isp->pdata->set_constraints)
video->isp->pdata->set_constraints(video->isp, false);
/* The DMA queue must be emptied here, otherwise CCDC interrupts that
* will get triggered the next time the CCDC is powered up will try to
@@ -1165,7 +1161,7 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
video->queue = NULL;
video->error = false;
- if (video->isp->pdata->set_constraints)
+ if (video->isp->pdata && video->isp->pdata->set_constraints)
video->isp->pdata->set_constraints(video->isp, false);
media_entity_pipeline_stop(&video->video.entity);
@@ -1326,14 +1322,8 @@ static unsigned int isp_video_poll(struct file *file, poll_table *wait)
static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
{
struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
- struct isp_video *video = video_drvdata(file);
- int ret;
-
- mutex_lock(&video->queue_lock);
- ret = vb2_mmap(&vfh->queue, vma);
- mutex_unlock(&video->queue_lock);
- return ret;
+ return vb2_mmap(&vfh->queue, vma);
}
static struct v4l2_file_operations isp_video_fops = {
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 54479d60cc0d..f6a61b9ceff4 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -1219,7 +1219,7 @@ static const u32 camif_mbus_formats[] = {
*/
static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(camif_mbus_formats))
@@ -1230,14 +1230,14 @@ static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s3c_camif_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf = &fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1297,7 +1297,7 @@ static void __camif_subdev_try_format(struct camif_dev *camif,
}
static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
@@ -1325,7 +1325,7 @@ static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
__camif_subdev_try_format(camif, mf, fmt->pad);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
mutex_unlock(&camif->lock);
return 0;
@@ -1364,7 +1364,7 @@ static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
@@ -1377,7 +1377,7 @@ static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
return 0;
}
@@ -1451,7 +1451,7 @@ static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
}
static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
@@ -1465,7 +1465,7 @@ static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
__camif_try_crop(camif, &sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r;
+ *v4l2_subdev_get_try_crop(sd, cfg, sel->pad) = sel->r;
} else {
unsigned long flags;
unsigned int i;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index a92ff4249d10..bfbf1575677c 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -621,6 +621,7 @@ static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx)
return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
return ctx->subsampling;
case SJPEG_EXYNOS3250:
+ case SJPEG_EXYNOS5420:
if (ctx->subsampling > 3)
return V4L2_JPEG_CHROMA_SUBSAMPLING_411;
return exynos3250_decoded_subsampling[ctx->subsampling];
@@ -1142,13 +1143,13 @@ static void jpeg_bound_align_image(struct s5p_jpeg_ctx *ctx,
w_step = 1 << walign;
h_step = 1 << halign;
- if (ctx->jpeg->variant->version == SJPEG_EXYNOS3250) {
+ if (ctx->jpeg->variant->hw3250_compat) {
/*
* Rightmost and bottommost pixels are cropped by the
- * Exynos3250 JPEG IP for RGB formats, for the specific
- * width and height values respectively. This assignment
- * will result in v4l_bound_align_image returning dimensions
- * reduced by 1 for the aforementioned cases.
+ * Exynos3250/compatible JPEG IP for RGB formats, for the
+ * specific width and height values respectively. This
+ * assignment will result in v4l_bound_align_image returning
+ * dimensions reduced by 1 for the aforementioned cases.
*/
if (w_step == 4 && ((width & 3) == 1)) {
wmax = width;
@@ -1384,12 +1385,12 @@ static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
/*
* Prevent downscaling to YUV420 format by more than 2
- * for Exynos3250 SoC as it produces broken raw image
+ * for Exynos3250/compatible SoC as it produces broken raw image
* in such cases.
*/
if (ct->mode == S5P_JPEG_DECODE &&
f_type == FMT_TYPE_CAPTURE &&
- ct->jpeg->variant->version == SJPEG_EXYNOS3250 &&
+ ct->jpeg->variant->hw3250_compat &&
pix->pixelformat == V4L2_PIX_FMT_YUV420 &&
ct->scale_factor > 2) {
scale_rect.width = ct->out_q.w / 2;
@@ -1569,12 +1570,12 @@ static int s5p_jpeg_s_selection(struct file *file, void *fh,
if (s->target == V4L2_SEL_TGT_COMPOSE) {
if (ctx->mode != S5P_JPEG_DECODE)
return -EINVAL;
- if (ctx->jpeg->variant->version == SJPEG_EXYNOS3250)
+ if (ctx->jpeg->variant->hw3250_compat)
ret = exynos3250_jpeg_try_downscale(ctx, rect);
} else if (s->target == V4L2_SEL_TGT_CROP) {
if (ctx->mode != S5P_JPEG_ENCODE)
return -EINVAL;
- if (ctx->jpeg->variant->version == SJPEG_EXYNOS3250)
+ if (ctx->jpeg->variant->hw3250_compat)
ret = exynos3250_jpeg_try_crop(ctx, rect);
}
@@ -1604,8 +1605,9 @@ static int s5p_jpeg_adjust_subs_ctrl(struct s5p_jpeg_ctx *ctx, int *ctrl_val)
case SJPEG_S5P:
return 0;
case SJPEG_EXYNOS3250:
+ case SJPEG_EXYNOS5420:
/*
- * The exynos3250 device can produce JPEG image only
+ * The exynos3250/compatible device can produce JPEG image only
* of 4:4:4 subsampling when given RGB32 source image.
*/
if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB32)
@@ -1624,7 +1626,7 @@ static int s5p_jpeg_adjust_subs_ctrl(struct s5p_jpeg_ctx *ctx, int *ctrl_val)
}
/*
- * The exynos4x12 and exynos3250 devices require resulting
+ * The exynos4x12 and exynos3250/compatible devices require resulting
* jpeg subsampling not to be lower than the input raw image
* subsampling.
*/
@@ -1842,7 +1844,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
struct s5p_jpeg *jpeg = ctx->jpeg;
struct s5p_jpeg_fmt *fmt;
struct vb2_buffer *vb;
- struct s5p_jpeg_addr jpeg_addr;
+ struct s5p_jpeg_addr jpeg_addr = {};
u32 pix_size, padding_bytes = 0;
jpeg_addr.cb = 0;
@@ -1946,7 +1948,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
struct s5p_jpeg *jpeg = ctx->jpeg;
struct s5p_jpeg_fmt *fmt;
struct vb2_buffer *vb;
- struct s5p_jpeg_addr jpeg_addr;
+ struct s5p_jpeg_addr jpeg_addr = {};
u32 pix_size;
pix_size = ctx->cap_q.w * ctx->cap_q.h;
@@ -2020,6 +2022,16 @@ static void exynos3250_jpeg_device_run(void *priv)
exynos3250_jpeg_qtbl(jpeg->regs, 2, 1);
exynos3250_jpeg_qtbl(jpeg->regs, 3, 1);
+ /*
+ * Some SoCs require setting Huffman tables before each run
+ */
+ if (jpeg->variant->htbl_reinit) {
+ s5p_jpeg_set_hdctbl(jpeg->regs);
+ s5p_jpeg_set_hdctblg(jpeg->regs);
+ s5p_jpeg_set_hactbl(jpeg->regs);
+ s5p_jpeg_set_hactblg(jpeg->regs);
+ }
+
/* Y, Cb, Cr use Huffman table 0 */
exynos3250_jpeg_htbl_ac(jpeg->regs, 1);
exynos3250_jpeg_htbl_dc(jpeg->regs, 1);
@@ -2663,13 +2675,12 @@ static int s5p_jpeg_runtime_resume(struct device *dev)
/*
* JPEG IP allows storing two Huffman tables for each component.
* We fill table 0 for each component and do this here only
- * for S5PC210 and Exynos3250 SoCs. Exynos4x12 SoC requires
- * programming its Huffman tables each time the encoding process
- * is initialized, and thus it is accomplished in the device_run
- * callback of m2m_ops.
+ * for S5PC210 and Exynos3250 SoCs. Exynos4x12 and Exynos542x SoC
+ * require programming their Huffman tables each time the encoding
+ * process is initialized, and thus it is accomplished in the
+ * device_run callback of m2m_ops.
*/
- if (jpeg->variant->version == SJPEG_S5P ||
- jpeg->variant->version == SJPEG_EXYNOS3250) {
+ if (!jpeg->variant->htbl_reinit) {
s5p_jpeg_set_hdctbl(jpeg->regs);
s5p_jpeg_set_hdctblg(jpeg->regs);
s5p_jpeg_set_hactbl(jpeg->regs);
@@ -2717,6 +2728,7 @@ static struct s5p_jpeg_variant exynos3250_jpeg_drvdata = {
.jpeg_irq = exynos3250_jpeg_irq,
.m2m_ops = &exynos3250_jpeg_m2m_ops,
.fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS3250,
+ .hw3250_compat = 1,
};
static struct s5p_jpeg_variant exynos4_jpeg_drvdata = {
@@ -2724,6 +2736,16 @@ static struct s5p_jpeg_variant exynos4_jpeg_drvdata = {
.jpeg_irq = exynos4_jpeg_irq,
.m2m_ops = &exynos4_jpeg_m2m_ops,
.fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS4,
+ .htbl_reinit = 1,
+};
+
+static struct s5p_jpeg_variant exynos5420_jpeg_drvdata = {
+ .version = SJPEG_EXYNOS5420,
+ .jpeg_irq = exynos3250_jpeg_irq, /* intentionally 3250 */
+ .m2m_ops = &exynos3250_jpeg_m2m_ops, /* intentionally 3250 */
+ .fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS3250, /* intentionally 3250 */
+ .hw3250_compat = 1,
+ .htbl_reinit = 1,
};
static const struct of_device_id samsung_jpeg_match[] = {
@@ -2739,6 +2761,9 @@ static const struct of_device_id samsung_jpeg_match[] = {
}, {
.compatible = "samsung,exynos4212-jpeg",
.data = &exynos4_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos5420-jpeg",
+ .data = &exynos5420_jpeg_drvdata,
},
{},
};
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 764b32de326b..7d9a9ed19cea 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -67,10 +67,12 @@
#define SJPEG_SUBSAMPLING_420 0x22
/* Version numbers */
-
-#define SJPEG_S5P 1
-#define SJPEG_EXYNOS3250 2
-#define SJPEG_EXYNOS4 3
+enum sjpeg_version {
+ SJPEG_S5P,
+ SJPEG_EXYNOS3250,
+ SJPEG_EXYNOS4,
+ SJPEG_EXYNOS5420,
+};
enum exynos4_jpeg_result {
OK_ENC_OR_DEC,
@@ -130,6 +132,8 @@ struct s5p_jpeg {
struct s5p_jpeg_variant {
unsigned int version;
unsigned int fmt_ver_flag;
+ unsigned int hw3250_compat:1;
+ unsigned int htbl_reinit:1;
struct v4l2_m2m_ops *m2m_ops;
irqreturn_t (*jpeg_irq)(int irq, void *priv);
};
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
index e3b8e67e005f..b5f20e722b63 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
@@ -51,18 +51,6 @@ void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
writel(reg, regs + S5P_JPGCMOD);
}
-void s5p_jpeg_input_raw_y16(void __iomem *regs, bool y16)
-{
- unsigned long reg;
-
- reg = readl(regs + S5P_JPGCMOD);
- if (y16)
- reg |= S5P_MODE_Y16;
- else
- reg &= ~S5P_MODE_Y16_MASK;
- writel(reg, regs + S5P_JPGCMOD);
-}
-
void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode)
{
unsigned long reg, m;
@@ -208,26 +196,6 @@ void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
writel(reg, regs + S5P_JPGINTSE);
}
-void s5p_jpeg_timer_enable(void __iomem *regs, unsigned long val)
-{
- unsigned long reg;
-
- reg = readl(regs + S5P_JPG_TIMER_SE);
- reg |= S5P_TIMER_INT_EN;
- reg &= ~S5P_TIMER_INIT_MASK;
- reg |= val & S5P_TIMER_INIT_MASK;
- writel(reg, regs + S5P_JPG_TIMER_SE);
-}
-
-void s5p_jpeg_timer_disable(void __iomem *regs)
-{
- unsigned long reg;
-
- reg = readl(regs + S5P_JPG_TIMER_SE);
- reg &= ~S5P_TIMER_INT_EN_MASK;
- writel(reg, regs + S5P_JPG_TIMER_SE);
-}
-
int s5p_jpeg_timer_stat(void __iomem *regs)
{
return (int)((readl(regs + S5P_JPG_TIMER_ST) & S5P_TIMER_INT_STAT_MASK)
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
index c11ebe86b9c9..f208fa3ed738 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
@@ -29,7 +29,6 @@
void s5p_jpeg_reset(void __iomem *regs);
void s5p_jpeg_poweron(void __iomem *regs);
void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode);
-void s5p_jpeg_input_raw_y16(void __iomem *regs, bool y16);
void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode);
void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode);
unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs);
@@ -42,8 +41,6 @@ void s5p_jpeg_x(void __iomem *regs, unsigned int x);
void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable);
void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable);
void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl);
-void s5p_jpeg_timer_enable(void __iomem *regs, unsigned long val);
-void s5p_jpeg_timer_disable(void __iomem *regs);
int s5p_jpeg_timer_stat(void __iomem *regs);
void s5p_jpeg_clear_timer_stat(void __iomem *regs);
void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 98374e8bad3e..8333fbc2fe96 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -844,6 +844,13 @@ static int s5p_mfc_open(struct file *file)
ret = -ENOENT;
goto err_queue_init;
}
+ /* One way to indicate end-of-stream for MFC is to set the
+ * bytesused == 0. However by default videobuf2 handles bytesused
+ * equal to 0 as a special case and changes its value to the size
+ * of the buffer. Set the allow_zero_bytesused flag so that videobuf2
+ * will keep the value of bytesused intact.
+ */
+ q->allow_zero_bytesused = 1;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 72d4f2e1efc0..751f3b618337 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -287,7 +287,7 @@ static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
u32 bl_width = divup(width, blk->width);
u32 bl_height = divup(height, blk->height);
u32 sizeimage = bl_width * bl_height * blk->size;
- u16 bytesperline = bl_width * blk->size / blk->height;
+ u32 bytesperline = bl_width * blk->size / blk->height;
plane->sizeimage += sizeimage;
plane->bytesperline = max(plane->bytesperline, bytesperline);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 261f1195b49f..dde1ccc730be 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -62,7 +62,7 @@ enum sh_vou_status {
struct sh_vou_device {
struct v4l2_device v4l2_dev;
- struct video_device *vdev;
+ struct video_device vdev;
atomic_t use_count;
struct sh_vou_pdata *pdata;
spinlock_t lock;
@@ -890,7 +890,7 @@ static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id std_id)
dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, std_id);
- if (std_id & ~vou_dev->vdev->tvnorms)
+ if (std_id & ~vou_dev->vdev.tvnorms)
return -EINVAL;
ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
@@ -1168,10 +1168,10 @@ static int sh_vou_open(struct file *file)
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
- file->private_data = vou_file;
-
- if (mutex_lock_interruptible(&vou_dev->fop_lock))
+ if (mutex_lock_interruptible(&vou_dev->fop_lock)) {
+ kfree(vou_file);
return -ERESTARTSYS;
+ }
if (atomic_inc_return(&vou_dev->use_count) == 1) {
int ret;
/* First open */
@@ -1183,6 +1183,7 @@ static int sh_vou_open(struct file *file)
pm_runtime_put(vou_dev->v4l2_dev.dev);
vou_dev->status = SH_VOU_IDLE;
mutex_unlock(&vou_dev->fop_lock);
+ kfree(vou_file);
return ret;
}
}
@@ -1192,9 +1193,11 @@ static int sh_vou_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_OUTPUT,
V4L2_FIELD_NONE,
sizeof(struct videobuf_buffer),
- vou_dev->vdev, &vou_dev->fop_lock);
+ &vou_dev->vdev, &vou_dev->fop_lock);
mutex_unlock(&vou_dev->fop_lock);
+ file->private_data = vou_file;
+
return 0;
}
@@ -1358,21 +1361,14 @@ static int sh_vou_probe(struct platform_device *pdev)
goto ev4l2devreg;
}
- /* Allocate memory for video device */
- vdev = video_device_alloc();
- if (vdev == NULL) {
- ret = -ENOMEM;
- goto evdevalloc;
- }
-
+ vdev = &vou_dev->vdev;
*vdev = sh_vou_video_template;
if (vou_pdata->bus_fmt == SH_VOU_BUS_8BIT)
vdev->tvnorms |= V4L2_STD_PAL;
vdev->v4l2_dev = &vou_dev->v4l2_dev;
- vdev->release = video_device_release;
+ vdev->release = video_device_release_empty;
vdev->lock = &vou_dev->fop_lock;
- vou_dev->vdev = vdev;
video_set_drvdata(vdev, vou_dev);
pm_runtime_enable(&pdev->dev);
@@ -1406,9 +1402,7 @@ ei2cnd:
ereset:
i2c_put_adapter(i2c_adap);
ei2cgadap:
- video_device_release(vdev);
pm_runtime_disable(&pdev->dev);
-evdevalloc:
v4l2_device_unregister(&vou_dev->v4l2_dev);
ev4l2devreg:
free_irq(irq, vou_dev);
@@ -1435,7 +1429,7 @@ static int sh_vou_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, vou_dev);
pm_runtime_disable(&pdev->dev);
- video_unregister_device(vou_dev->vdev);
+ video_unregister_device(&vou_dev->vdev);
i2c_put_adapter(client->adapter);
v4l2_device_unregister(&vou_dev->v4l2_dev);
iounmap(vou_dev->base);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 279ab9f6ae38..6460f8e1b07f 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -135,6 +135,8 @@
#define VIN_MAX_WIDTH 2048
#define VIN_MAX_HEIGHT 2048
+#define TIMEOUT_MS 100
+
enum chip_id {
RCAR_GEN2,
RCAR_H1,
@@ -820,7 +822,10 @@ static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv)
if (priv->state == STOPPING) {
priv->request_to_stop = true;
spin_unlock_irq(&priv->lock);
- wait_for_completion(&priv->capture_stop);
+ if (!wait_for_completion_timeout(
+ &priv->capture_stop,
+ msecs_to_jiffies(TIMEOUT_MS)))
+ priv->state = STOPPED;
spin_lock_irq(&priv->lock);
}
}
@@ -977,19 +982,6 @@ static void rcar_vin_remove_device(struct soc_camera_device *icd)
icd->devnum);
}
-/* Called with .host_lock held */
-static int rcar_vin_clock_start(struct soc_camera_host *ici)
-{
- /* VIN does not have "mclk" */
- return 0;
-}
-
-/* Called with .host_lock held */
-static void rcar_vin_clock_stop(struct soc_camera_host *ici)
-{
- /* VIN does not have "mclk" */
-}
-
static void set_coeff(struct rcar_vin_priv *priv, unsigned short xs)
{
int i;
@@ -1803,8 +1795,6 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
.owner = THIS_MODULE,
.add = rcar_vin_add_device,
.remove = rcar_vin_remove_device,
- .clock_start = rcar_vin_clock_start,
- .clock_stop = rcar_vin_clock_stop,
.get_formats = rcar_vin_get_formats,
.put_formats = rcar_vin_put_formats,
.get_crop = rcar_vin_get_crop,
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
index c4e7aa0ee7e1..cd93241eb497 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
@@ -380,7 +380,6 @@ static int sh_csi2_remove(struct platform_device *pdev)
struct sh_csi2 *priv = container_of(subdev, struct sh_csi2, subdev);
v4l2_async_unregister_subdev(&priv->subdev);
- v4l2_device_unregister_subdev(subdev);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 66634b469c98..7bfe7665687f 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -177,6 +177,30 @@ static int __soc_camera_power_off(struct soc_camera_device *icd)
return 0;
}
+static int soc_camera_clock_start(struct soc_camera_host *ici)
+{
+ int ret;
+
+ if (!ici->ops->clock_start)
+ return 0;
+
+ mutex_lock(&ici->clk_lock);
+ ret = ici->ops->clock_start(ici);
+ mutex_unlock(&ici->clk_lock);
+
+ return ret;
+}
+
+static void soc_camera_clock_stop(struct soc_camera_host *ici)
+{
+ if (!ici->ops->clock_stop)
+ return;
+
+ mutex_lock(&ici->clk_lock);
+ ici->ops->clock_stop(ici);
+ mutex_unlock(&ici->clk_lock);
+}
+
const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
struct soc_camera_device *icd, unsigned int fourcc)
{
@@ -584,9 +608,7 @@ static int soc_camera_add_device(struct soc_camera_device *icd)
return -EBUSY;
if (!icd->clk) {
- mutex_lock(&ici->clk_lock);
- ret = ici->ops->clock_start(ici);
- mutex_unlock(&ici->clk_lock);
+ ret = soc_camera_clock_start(ici);
if (ret < 0)
return ret;
}
@@ -602,11 +624,8 @@ static int soc_camera_add_device(struct soc_camera_device *icd)
return 0;
eadd:
- if (!icd->clk) {
- mutex_lock(&ici->clk_lock);
- ici->ops->clock_stop(ici);
- mutex_unlock(&ici->clk_lock);
- }
+ if (!icd->clk)
+ soc_camera_clock_stop(ici);
return ret;
}
@@ -619,11 +638,8 @@ static void soc_camera_remove_device(struct soc_camera_device *icd)
if (ici->ops->remove)
ici->ops->remove(icd);
- if (!icd->clk) {
- mutex_lock(&ici->clk_lock);
- ici->ops->clock_stop(ici);
- mutex_unlock(&ici->clk_lock);
- }
+ if (!icd->clk)
+ soc_camera_clock_stop(ici);
ici->icd = NULL;
}
@@ -688,7 +704,8 @@ static int soc_camera_open(struct file *file)
/* The camera could have been already on, try to reset */
if (sdesc->subdev_desc.reset)
- sdesc->subdev_desc.reset(icd->pdev);
+ if (icd->control)
+ sdesc->subdev_desc.reset(icd->control);
ret = soc_camera_add_device(icd);
if (ret < 0) {
@@ -1159,7 +1176,8 @@ static void scan_add_host(struct soc_camera_host *ici)
/* The camera could have been already on, try to reset */
if (ssdd->reset)
- ssdd->reset(icd->pdev);
+ if (icd->control)
+ ssdd->reset(icd->control);
icd->parent = ici->v4l2_dev.dev;
@@ -1178,7 +1196,6 @@ static int soc_camera_clk_enable(struct v4l2_clk *clk)
{
struct soc_camera_device *icd = clk->priv;
struct soc_camera_host *ici;
- int ret;
if (!icd || !icd->parent)
return -ENODEV;
@@ -1192,10 +1209,7 @@ static int soc_camera_clk_enable(struct v4l2_clk *clk)
* If a different client is currently being probed, the host will tell
* you to go
*/
- mutex_lock(&ici->clk_lock);
- ret = ici->ops->clock_start(ici);
- mutex_unlock(&ici->clk_lock);
- return ret;
+ return soc_camera_clock_start(ici);
}
static void soc_camera_clk_disable(struct v4l2_clk *clk)
@@ -1208,9 +1222,7 @@ static void soc_camera_clk_disable(struct v4l2_clk *clk)
ici = to_soc_camera_host(icd->parent);
- mutex_lock(&ici->clk_lock);
- ici->ops->clock_stop(ici);
- mutex_unlock(&ici->clk_lock);
+ soc_camera_clock_stop(ici);
module_put(ici->ops->owner);
}
@@ -1364,7 +1376,7 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd,
snprintf(clk_name, sizeof(clk_name), "%d-%04x",
shd->i2c_adapter_id, shd->board_info->addr);
- icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, "mclk", icd);
+ icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd);
if (IS_ERR(icd->clk)) {
ret = PTR_ERR(icd->clk);
goto eclkreg;
@@ -1445,7 +1457,7 @@ static int soc_camera_async_bound(struct v4l2_async_notifier *notifier,
memcpy(&sdesc->subdev_desc, ssdd,
sizeof(sdesc->subdev_desc));
if (ssdd->reset)
- ssdd->reset(icd->pdev);
+ ssdd->reset(&client->dev);
}
icd->control = &client->dev;
@@ -1545,7 +1557,7 @@ static int scan_async_group(struct soc_camera_host *ici,
snprintf(clk_name, sizeof(clk_name), "%d-%04x",
sasd->asd.match.i2c.adapter_id, sasd->asd.match.i2c.address);
- icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, "mclk", icd);
+ icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd);
if (IS_ERR(icd->clk)) {
ret = PTR_ERR(icd->clk);
goto eclkreg;
@@ -1650,7 +1662,7 @@ static int soc_of_bind(struct soc_camera_host *ici,
snprintf(clk_name, sizeof(clk_name), "of-%s",
of_node_full_name(remote));
- icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, "mclk", icd);
+ icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd);
if (IS_ERR(icd->clk)) {
ret = PTR_ERR(icd->clk);
goto eclkreg;
@@ -1659,6 +1671,8 @@ static int soc_of_bind(struct soc_camera_host *ici,
ret = v4l2_async_notifier_register(&ici->v4l2_dev, &sasc->notifier);
if (!ret)
return 0;
+
+ v4l2_clk_unregister(icd->clk);
eclkreg:
icd->clk = NULL;
platform_device_del(sasc->pdev);
@@ -1694,7 +1708,6 @@ static void scan_of_host(struct soc_camera_host *ici)
if (!i)
soc_of_bind(ici, epn, ren->parent);
- of_node_put(epn);
of_node_put(ren);
if (i) {
@@ -1702,6 +1715,8 @@ static void scan_of_host(struct soc_camera_host *ici)
break;
}
}
+
+ of_node_put(epn);
}
#else
@@ -1750,9 +1765,7 @@ static int soc_camera_probe(struct soc_camera_host *ici,
ret = -EINVAL;
goto eadd;
} else {
- mutex_lock(&ici->clk_lock);
- ret = ici->ops->clock_start(ici);
- mutex_unlock(&ici->clk_lock);
+ ret = soc_camera_clock_start(ici);
if (ret < 0)
goto eadd;
@@ -1792,9 +1805,7 @@ efinish:
module_put(control->driver->owner);
enodrv:
eadddev:
- mutex_lock(&ici->clk_lock);
- ici->ops->clock_stop(ici);
- mutex_unlock(&ici->clk_lock);
+ soc_camera_clock_stop(ici);
}
eadd:
if (icd->vdev) {
@@ -1888,22 +1899,34 @@ static int default_enum_framesizes(struct soc_camera_device *icd,
int ret;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
- __u32 pixfmt = fsize->pixel_format;
- struct v4l2_frmsizeenum fsize_mbus = *fsize;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ xlate = soc_camera_xlate_by_fourcc(icd, fsize->pixel_format);
if (!xlate)
return -EINVAL;
- /* map xlate-code to pixel_format, sensor only handle xlate-code*/
- fsize_mbus.pixel_format = xlate->code;
+ fse.code = xlate->code;
- ret = v4l2_subdev_call(sd, video, enum_framesizes, &fsize_mbus);
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
if (ret < 0)
return ret;
- *fsize = fsize_mbus;
- fsize->pixel_format = pixfmt;
-
+ if (fse.min_width == fse.max_width &&
+ fse.min_height == fse.max_height) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.min_width;
+ fsize->discrete.height = fse.min_height;
+ return 0;
+ }
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = fse.min_width;
+ fsize->stepwise.max_width = fse.max_width;
+ fsize->stepwise.min_height = fse.min_height;
+ fsize->stepwise.max_height = fse.max_height;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
return 0;
}
@@ -1920,8 +1943,6 @@ int soc_camera_host_register(struct soc_camera_host *ici)
((!ici->ops->init_videobuf ||
!ici->ops->reqbufs) &&
!ici->ops->init_videobuf2) ||
- !ici->ops->clock_start ||
- !ici->ops->clock_stop ||
!ici->ops->poll ||
!ici->v4l2_dev.dev)
return -EINVAL;
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 86989d86abfa..678ed9f353cb 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -1147,12 +1147,23 @@ static int viacam_enum_frameintervals(struct file *filp, void *priv,
struct v4l2_frmivalenum *interval)
{
struct via_camera *cam = priv;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = interval->index,
+ .code = cam->mbus_code,
+ .width = cam->sensor_format.width,
+ .height = cam->sensor_format.height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
int ret;
mutex_lock(&cam->lock);
- ret = sensor_call(cam, video, enum_frameintervals, interval);
+ ret = sensor_call(cam, pad, enum_frame_interval, NULL, &fie);
mutex_unlock(&cam->lock);
- return ret;
+ if (ret)
+ return ret;
+ interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ interval->discrete = fie.interval;
+ return 0;
}
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index d9d844aab39b..4d6b4cc57c57 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -142,7 +142,7 @@ static struct vim2m_fmt *find_format(struct v4l2_format *f)
struct vim2m_dev {
struct v4l2_device v4l2_dev;
- struct video_device *vfd;
+ struct video_device vfd;
atomic_t num_inst;
struct mutex dev_mutex;
@@ -968,7 +968,7 @@ static struct video_device vim2m_videodev = {
.fops = &vim2m_fops,
.ioctl_ops = &vim2m_ioctl_ops,
.minor = -1,
- .release = video_device_release,
+ .release = video_device_release_empty,
};
static struct v4l2_m2m_ops m2m_ops = {
@@ -996,26 +996,19 @@ static int vim2m_probe(struct platform_device *pdev)
atomic_set(&dev->num_inst, 0);
mutex_init(&dev->dev_mutex);
- vfd = video_device_alloc();
- if (!vfd) {
- v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
- ret = -ENOMEM;
- goto unreg_dev;
- }
-
- *vfd = vim2m_videodev;
+ dev->vfd = vim2m_videodev;
+ vfd = &dev->vfd;
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- goto rel_vdev;
+ goto unreg_dev;
}
video_set_drvdata(vfd, dev);
snprintf(vfd->name, sizeof(vfd->name), "%s", vim2m_videodev.name);
- dev->vfd = vfd;
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
@@ -1033,9 +1026,7 @@ static int vim2m_probe(struct platform_device *pdev)
err_m2m:
v4l2_m2m_release(dev->m2m_dev);
- video_unregister_device(dev->vfd);
-rel_vdev:
- video_device_release(vfd);
+ video_unregister_device(&dev->vfd);
unreg_dev:
v4l2_device_unregister(&dev->v4l2_dev);
@@ -1049,7 +1040,7 @@ static int vim2m_remove(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_NAME);
v4l2_m2m_release(dev->m2m_dev);
del_timer_sync(&dev->timer);
- video_unregister_device(dev->vfd);
+ video_unregister_device(&dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
return 0;
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index a7e033a5d291..d33f16495dbc 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/font.h>
#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <linux/v4l2-dv-timings.h>
#include <media/videobuf2-vmalloc.h>
@@ -618,7 +619,23 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
Initialization and module stuff
------------------------------------------------------------------*/
-static int __init vivid_create_instance(int inst)
+static void vivid_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct vivid_dev *dev = container_of(v4l2_dev, struct vivid_dev, v4l2_dev);
+
+ vivid_free_controls(dev);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vfree(dev->scaled_line);
+ vfree(dev->blended_line);
+ vfree(dev->edid);
+ vfree(dev->bitmap_cap);
+ vfree(dev->bitmap_out);
+ tpg_free(&dev->tpg);
+ kfree(dev->query_dv_timings_qmenu);
+ kfree(dev);
+}
+
+static int vivid_create_instance(struct platform_device *pdev, int inst)
{
static const struct v4l2_dv_timings def_dv_timings =
V4L2_DV_BT_CEA_1280X720P60;
@@ -646,9 +663,12 @@ static int __init vivid_create_instance(int inst)
/* register v4l2_device */
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
"%s-%03d", VIVID_MODULE_NAME, inst);
- ret = v4l2_device_register(NULL, &dev->v4l2_dev);
- if (ret)
- goto free_dev;
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ kfree(dev);
+ return ret;
+ }
+ dev->v4l2_dev.release = vivid_dev_release;
/* start detecting feature set */
@@ -1256,15 +1276,8 @@ unreg_dev:
video_unregister_device(&dev->vbi_cap_dev);
video_unregister_device(&dev->vid_out_dev);
video_unregister_device(&dev->vid_cap_dev);
- vivid_free_controls(dev);
- v4l2_device_unregister(&dev->v4l2_dev);
free_dev:
- vfree(dev->scaled_line);
- vfree(dev->blended_line);
- vfree(dev->edid);
- tpg_free(&dev->tpg);
- kfree(dev->query_dv_timings_qmenu);
- kfree(dev);
+ v4l2_device_put(&dev->v4l2_dev);
return ret;
}
@@ -1274,7 +1287,7 @@ free_dev:
will succeed. This is limited to the maximum number of devices that
videodev supports, which is equal to VIDEO_NUM_DEVICES.
*/
-static int __init vivid_init(void)
+static int vivid_probe(struct platform_device *pdev)
{
const struct font_desc *font = find_font("VGA8x16");
int ret = 0, i;
@@ -1289,7 +1302,7 @@ static int __init vivid_init(void)
n_devs = clamp_t(unsigned, n_devs, 1, VIVID_MAX_DEVS);
for (i = 0; i < n_devs; i++) {
- ret = vivid_create_instance(i);
+ ret = vivid_create_instance(pdev, i);
if (ret) {
/* If some instantiations succeeded, keep driver */
if (i)
@@ -1309,7 +1322,7 @@ static int __init vivid_init(void)
return ret;
}
-static void __exit vivid_exit(void)
+static int vivid_remove(struct platform_device *pdev)
{
struct vivid_dev *dev;
unsigned i;
@@ -1358,18 +1371,48 @@ static void __exit vivid_exit(void)
unregister_framebuffer(&dev->fb_info);
vivid_fb_release_buffers(dev);
}
- v4l2_device_unregister(&dev->v4l2_dev);
- vivid_free_controls(dev);
- vfree(dev->scaled_line);
- vfree(dev->blended_line);
- vfree(dev->edid);
- vfree(dev->bitmap_cap);
- vfree(dev->bitmap_out);
- tpg_free(&dev->tpg);
- kfree(dev->query_dv_timings_qmenu);
- kfree(dev);
+ v4l2_device_put(&dev->v4l2_dev);
vivid_devs[i] = NULL;
}
+ return 0;
+}
+
+static void vivid_pdev_release(struct device *dev)
+{
+}
+
+static struct platform_device vivid_pdev = {
+ .name = "vivid",
+ .dev.release = vivid_pdev_release,
+};
+
+static struct platform_driver vivid_pdrv = {
+ .probe = vivid_probe,
+ .remove = vivid_remove,
+ .driver = {
+ .name = "vivid",
+ },
+};
+
+static int __init vivid_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&vivid_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&vivid_pdrv);
+ if (ret)
+ platform_device_unregister(&vivid_pdev);
+
+ return ret;
+}
+
+static void __exit vivid_exit(void)
+{
+ platform_driver_unregister(&vivid_pdrv);
+ platform_device_unregister(&vivid_pdev);
}
module_init(vivid_init);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 4b497df4b6a4..9e15aee9a52e 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -79,12 +79,14 @@ extern unsigned vivid_debug;
struct vivid_fmt {
const char *name;
u32 fourcc; /* v4l2 format id */
- u8 depth;
bool is_yuv;
bool can_do_overlay;
+ u8 vdownsampling[TPG_MAX_PLANES];
u32 alpha_mask;
u8 planes;
- u32 data_offset[2];
+ u8 buffers;
+ u32 data_offset[TPG_MAX_PLANES];
+ u32 bit_depth[TPG_MAX_PLANES];
};
extern struct vivid_fmt vivid_formats[];
@@ -332,7 +334,7 @@ struct vivid_dev {
u32 ycbcr_enc_out;
u32 quantization_out;
u32 service_set_out;
- u32 bytesperline_out[2];
+ unsigned bytesperline_out[TPG_MAX_PLANES];
unsigned tv_field_out;
unsigned tv_audio_output;
bool vbi_out_have_wss;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 32a798f2d953..2b9070098b08 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -818,7 +818,7 @@ static int vivid_vid_out_s_ctrl(struct v4l2_ctrl *ctrl)
dev->dvi_d_out = ctrl->val == V4L2_DV_TX_MODE_DVI_D;
if (!vivid_is_hdmi_out(dev))
break;
- if (!dev->dvi_d_out && (bt->standards & V4L2_DV_BT_STD_CEA861)) {
+ if (!dev->dvi_d_out && (bt->flags & V4L2_DV_FL_IS_CE_VIDEO)) {
if (bt->width == 720 && bt->height <= 576)
dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
else
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 39a67cfae120..1727f5453f0b 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -229,14 +229,29 @@ static void vivid_precalc_copy_rects(struct vivid_dev *dev)
dev->loop_vid_overlay_cap.left, dev->loop_vid_overlay_cap.top);
}
+static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
+ unsigned p, unsigned bpl[TPG_MAX_PLANES], unsigned h)
+{
+ unsigned i;
+ void *vbuf;
+
+ if (p == 0 || tpg_g_buffers(tpg) > 1)
+ return vb2_plane_vaddr(&buf->vb, p);
+ vbuf = vb2_plane_vaddr(&buf->vb, 0);
+ for (i = 0; i < p; i++)
+ vbuf += bpl[i] * h / tpg->vdownsampling[i];
+ return vbuf;
+}
+
static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
struct vivid_buffer *vid_cap_buf)
{
bool blank = dev->must_blank[vid_cap_buf->vb.v4l2_buf.index];
struct tpg_data *tpg = &dev->tpg;
struct vivid_buffer *vid_out_buf = NULL;
- unsigned pixsize = tpg_g_twopixelsize(tpg, p) / 2;
- unsigned img_width = dev->compose_cap.width;
+ unsigned vdiv = dev->fmt_out->vdownsampling[p];
+ unsigned twopixsize = tpg_g_twopixelsize(tpg, p);
+ unsigned img_width = tpg_hdiv(tpg, p, dev->compose_cap.width);
unsigned img_height = dev->compose_cap.height;
unsigned stride_cap = tpg->bytesperline[p];
unsigned stride_out = dev->bytesperline_out[p];
@@ -255,6 +270,7 @@ static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
unsigned vid_overlay_fract_part = 0;
unsigned vid_overlay_y = 0;
unsigned vid_overlay_error = 0;
+ unsigned vid_cap_left = tpg_hdiv(tpg, p, dev->loop_vid_cap.left);
unsigned vid_cap_right;
bool quick;
@@ -269,25 +285,29 @@ static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
vid_cap_buf->vb.v4l2_buf.field = vid_out_buf->vb.v4l2_buf.field;
- voutbuf = vb2_plane_vaddr(&vid_out_buf->vb, p) +
- vid_out_buf->vb.v4l2_planes[p].data_offset;
- voutbuf += dev->loop_vid_out.left * pixsize + dev->loop_vid_out.top * stride_out;
- vcapbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride_cap;
+ voutbuf = plane_vaddr(tpg, vid_out_buf, p,
+ dev->bytesperline_out, dev->fmt_out_rect.height);
+ if (p < dev->fmt_out->buffers)
+ voutbuf += vid_out_buf->vb.v4l2_planes[p].data_offset;
+ voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) +
+ (dev->loop_vid_out.top / vdiv) * stride_out;
+ vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) +
+ (dev->compose_cap.top / vdiv) * stride_cap;
if (dev->loop_vid_copy.width == 0 || dev->loop_vid_copy.height == 0) {
/*
* If there is nothing to copy, then just fill the capture window
* with black.
*/
- for (y = 0; y < hmax; y++, vcapbuf += stride_cap)
- memcpy(vcapbuf, tpg->black_line[p], img_width * pixsize);
+ for (y = 0; y < hmax / vdiv; y++, vcapbuf += stride_cap)
+ memcpy(vcapbuf, tpg->black_line[p], img_width);
return 0;
}
if (dev->overlay_out_enabled &&
dev->loop_vid_overlay.width && dev->loop_vid_overlay.height) {
vosdbuf = dev->video_vbase;
- vosdbuf += dev->loop_fb_copy.left * pixsize +
+ vosdbuf += (dev->loop_fb_copy.left * twopixsize) / 2 +
dev->loop_fb_copy.top * stride_osd;
vid_overlay_int_part = dev->loop_vid_overlay.height /
dev->loop_vid_overlay_cap.height;
@@ -295,12 +315,12 @@ static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
dev->loop_vid_overlay_cap.height;
}
- vid_cap_right = dev->loop_vid_cap.left + dev->loop_vid_cap.width;
+ vid_cap_right = tpg_hdiv(tpg, p, dev->loop_vid_cap.left + dev->loop_vid_cap.width);
/* quick is true if no video scaling is needed */
quick = dev->loop_vid_out.width == dev->loop_vid_cap.width;
dev->cur_scaled_line = dev->loop_vid_out.height;
- for (y = 0; y < hmax; y++, vcapbuf += stride_cap) {
+ for (y = 0; y < hmax; y += vdiv, vcapbuf += stride_cap) {
/* osdline is true if this line requires overlay blending */
bool osdline = vosdbuf && y >= dev->loop_vid_overlay_cap.top &&
y < dev->loop_vid_overlay_cap.top + dev->loop_vid_overlay_cap.height;
@@ -311,34 +331,34 @@ static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
*/
if (y < dev->loop_vid_cap.top ||
y >= dev->loop_vid_cap.top + dev->loop_vid_cap.height) {
- memcpy(vcapbuf, tpg->black_line[p], img_width * pixsize);
+ memcpy(vcapbuf, tpg->black_line[p], img_width);
continue;
}
/* fill the left border with black */
if (dev->loop_vid_cap.left)
- memcpy(vcapbuf, tpg->black_line[p], dev->loop_vid_cap.left * pixsize);
+ memcpy(vcapbuf, tpg->black_line[p], vid_cap_left);
/* fill the right border with black */
if (vid_cap_right < img_width)
- memcpy(vcapbuf + vid_cap_right * pixsize,
- tpg->black_line[p], (img_width - vid_cap_right) * pixsize);
+ memcpy(vcapbuf + vid_cap_right, tpg->black_line[p],
+ img_width - vid_cap_right);
if (quick && !osdline) {
- memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
+ memcpy(vcapbuf + vid_cap_left,
voutbuf + vid_out_y * stride_out,
- dev->loop_vid_cap.width * pixsize);
+ tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
goto update_vid_out_y;
}
if (dev->cur_scaled_line == vid_out_y) {
- memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
- dev->scaled_line,
- dev->loop_vid_cap.width * pixsize);
+ memcpy(vcapbuf + vid_cap_left, dev->scaled_line,
+ tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
goto update_vid_out_y;
}
if (!osdline) {
scale_line(voutbuf + vid_out_y * stride_out, dev->scaled_line,
- dev->loop_vid_out.width, dev->loop_vid_cap.width,
+ tpg_hdiv(tpg, p, dev->loop_vid_out.width),
+ tpg_hdiv(tpg, p, dev->loop_vid_cap.width),
tpg_g_twopixelsize(tpg, p));
} else {
/*
@@ -346,7 +366,8 @@ static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
* loop_vid_overlay rectangle.
*/
unsigned offset =
- (dev->loop_vid_overlay.left - dev->loop_vid_copy.left) * pixsize;
+ ((dev->loop_vid_overlay.left - dev->loop_vid_copy.left) *
+ twopixsize) / 2;
u8 *osd = vosdbuf + vid_overlay_y * stride_osd;
scale_line(voutbuf + vid_out_y * stride_out, dev->blended_line,
@@ -356,18 +377,17 @@ static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
blend_line(dev, vid_overlay_y + dev->loop_vid_overlay.top,
dev->loop_vid_overlay.left,
dev->blended_line + offset, osd,
- dev->loop_vid_overlay.width, pixsize);
+ dev->loop_vid_overlay.width, twopixsize / 2);
else
memcpy(dev->blended_line + offset,
- osd, dev->loop_vid_overlay.width * pixsize);
+ osd, (dev->loop_vid_overlay.width * twopixsize) / 2);
scale_line(dev->blended_line, dev->scaled_line,
dev->loop_vid_copy.width, dev->loop_vid_cap.width,
tpg_g_twopixelsize(tpg, p));
}
dev->cur_scaled_line = vid_out_y;
- memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
- dev->scaled_line,
- dev->loop_vid_cap.width * pixsize);
+ memcpy(vcapbuf + vid_cap_left, dev->scaled_line,
+ tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
update_vid_out_y:
if (osdline) {
@@ -380,21 +400,22 @@ update_vid_out_y:
}
vid_out_y += vid_out_int_part;
vid_out_error += vid_out_fract_part;
- if (vid_out_error >= dev->loop_vid_cap.height) {
- vid_out_error -= dev->loop_vid_cap.height;
+ if (vid_out_error >= dev->loop_vid_cap.height / vdiv) {
+ vid_out_error -= dev->loop_vid_cap.height / vdiv;
vid_out_y++;
}
}
if (!blank)
return 0;
- for (; y < img_height; y++, vcapbuf += stride_cap)
- memcpy(vcapbuf, tpg->contrast_line[p], img_width * pixsize);
+ for (; y < img_height; y += vdiv, vcapbuf += stride_cap)
+ memcpy(vcapbuf, tpg->contrast_line[p], img_width);
return 0;
}
static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
{
+ struct tpg_data *tpg = &dev->tpg;
unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
unsigned line_height = 16 / factor;
bool is_tv = vivid_is_sdtv_cap(dev);
@@ -427,7 +448,7 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
* standards.
*/
buf->vb.v4l2_buf.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
- V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
/*
* The sequence counter counts frames, not fields. So divide
* by two.
@@ -436,27 +457,29 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
} else {
buf->vb.v4l2_buf.field = dev->field_cap;
}
- tpg_s_field(&dev->tpg, buf->vb.v4l2_buf.field);
- tpg_s_perc_fill_blank(&dev->tpg, dev->must_blank[buf->vb.v4l2_buf.index]);
+ tpg_s_field(tpg, buf->vb.v4l2_buf.field,
+ dev->field_cap == V4L2_FIELD_ALTERNATE);
+ tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.v4l2_buf.index]);
vivid_precalc_copy_rects(dev);
- for (p = 0; p < tpg_g_planes(&dev->tpg); p++) {
- void *vbuf = vb2_plane_vaddr(&buf->vb, p);
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ void *vbuf = plane_vaddr(tpg, buf, p,
+ tpg->bytesperline, tpg->buf_height);
/*
* The first plane of a multiplanar format has a non-zero
* data_offset. This helps testing whether the application
* correctly supports non-zero data offsets.
*/
- if (dev->fmt_cap->data_offset[p]) {
+ if (p < tpg_g_buffers(tpg) && dev->fmt_cap->data_offset[p]) {
memset(vbuf, dev->fmt_cap->data_offset[p] & 0xff,
dev->fmt_cap->data_offset[p]);
vbuf += dev->fmt_cap->data_offset[p];
}
- tpg_calc_text_basep(&dev->tpg, basep, p, vbuf);
+ tpg_calc_text_basep(tpg, basep, p, vbuf);
if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
- tpg_fillbuffer(&dev->tpg, vivid_get_std_cap(dev), p, vbuf);
+ tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev), p, vbuf);
}
dev->must_blank[buf->vb.v4l2_buf.index] = false;
@@ -475,12 +498,12 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
(dev->field_cap == V4L2_FIELD_ALTERNATE) ?
(buf->vb.v4l2_buf.field == V4L2_FIELD_TOP ?
" top" : " bottom") : "");
- tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
}
if (dev->osd_mode == 0) {
snprintf(str, sizeof(str), " %dx%d, input %d ",
dev->src_rect.width, dev->src_rect.height, dev->input);
- tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
gain = v4l2_ctrl_g_ctrl(dev->gain);
mutex_lock(dev->ctrl_hdl_user_vid.lock);
@@ -490,38 +513,38 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
dev->contrast->cur.val,
dev->saturation->cur.val,
dev->hue->cur.val);
- tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
snprintf(str, sizeof(str),
" autogain %d, gain %3d, alpha 0x%02x ",
dev->autogain->cur.val, gain, dev->alpha->cur.val);
mutex_unlock(dev->ctrl_hdl_user_vid.lock);
- tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
mutex_lock(dev->ctrl_hdl_user_aud.lock);
snprintf(str, sizeof(str),
" volume %3d, mute %d ",
dev->volume->cur.val, dev->mute->cur.val);
mutex_unlock(dev->ctrl_hdl_user_aud.lock);
- tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
mutex_lock(dev->ctrl_hdl_user_gen.lock);
snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
dev->int32->cur.val,
*dev->int64->p_cur.p_s64,
dev->bitmask->cur.val);
- tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
dev->boolean->cur.val,
dev->menu->qmenu[dev->menu->cur.val],
dev->string->p_cur.p_char);
- tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
dev->int_menu->qmenu_int[dev->int_menu->cur.val],
dev->int_menu->cur.val);
mutex_unlock(dev->ctrl_hdl_user_gen.lock);
- tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
if (dev->button_pressed) {
dev->button_pressed--;
snprintf(str, sizeof(str), " button pressed!");
- tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
}
}
@@ -585,6 +608,12 @@ static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
bool quick = dev->bitmap_cap == NULL && dev->clipcount_cap == 0;
int x, y, w, out_x = 0;
+ /*
+ * Overlay support is only supported for formats that have a twopixelsize
+ * that's >= 2. Warn and bail out if that's not the case.
+ */
+ if (WARN_ON(pixsize == 0))
+ return;
if ((dev->overlay_cap_field == V4L2_FIELD_TOP ||
dev->overlay_cap_field == V4L2_FIELD_BOTTOM) &&
dev->overlay_cap_field != buf->vb.v4l2_buf.field)
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 4af55f18829f..caf131666e37 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -27,6 +27,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/v4l2-dv-timings.h>
+#include <linux/fixp-arith.h>
#include "vivid-core.h"
#include "vivid-ctrls.h"
@@ -423,40 +424,19 @@ int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
return 0;
}
-#define FIXP_FRAC (1 << 15)
-#define FIXP_PI ((int)(FIXP_FRAC * 3.141592653589))
-
-/* cos() from cx88 driver: cx88-dsp.c */
-static s32 fixp_cos(unsigned int x)
-{
- u32 t2, t4, t6, t8;
- u16 period = x / FIXP_PI;
-
- if (period % 2)
- return -fixp_cos(x - FIXP_PI);
- x = x % FIXP_PI;
- if (x > FIXP_PI/2)
- return -fixp_cos(FIXP_PI/2 - (x % (FIXP_PI/2)));
- /* Now x is between 0 and FIXP_PI/2.
- * To calculate cos(x) we use it's Taylor polinom. */
- t2 = x*x/FIXP_FRAC/2;
- t4 = t2*x/FIXP_FRAC*x/FIXP_FRAC/3/4;
- t6 = t4*x/FIXP_FRAC*x/FIXP_FRAC/5/6;
- t8 = t6*x/FIXP_FRAC*x/FIXP_FRAC/7/8;
- return FIXP_FRAC-t2+t4-t6+t8;
-}
-
-static inline s32 fixp_sin(unsigned int x)
-{
- return -fixp_cos(x + (FIXP_PI / 2));
-}
+#define FIXP_N (15)
+#define FIXP_FRAC (1 << FIXP_N)
+#define FIXP_2PI ((int)(2 * 3.141592653589 * FIXP_FRAC))
void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
{
u8 *vbuf = vb2_plane_vaddr(&buf->vb, 0);
unsigned long i;
unsigned long plane_size = vb2_plane_size(&buf->vb, 0);
- int fixp_src_phase_step, fixp_i, fixp_q;
+ s32 src_phase_step;
+ s32 mod_phase_step;
+ s32 fixp_i;
+ s32 fixp_q;
/*
* TODO: Generated beep tone goes very crackly when sample rate is
@@ -466,28 +446,36 @@ void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
/* calculate phase step */
#define BEEP_FREQ 1000 /* 1kHz beep */
- fixp_src_phase_step = DIV_ROUND_CLOSEST(2 * FIXP_PI * BEEP_FREQ,
+ src_phase_step = DIV_ROUND_CLOSEST(FIXP_2PI * BEEP_FREQ,
dev->sdr_adc_freq);
for (i = 0; i < plane_size; i += 2) {
- dev->sdr_fixp_mod_phase += fixp_cos(dev->sdr_fixp_src_phase);
- dev->sdr_fixp_src_phase += fixp_src_phase_step;
+ mod_phase_step = fixp_cos32_rad(dev->sdr_fixp_src_phase,
+ FIXP_2PI) >> (31 - FIXP_N);
+
+ dev->sdr_fixp_src_phase += src_phase_step;
+ dev->sdr_fixp_mod_phase += mod_phase_step / 4;
/*
* Transfer phases to [0 / 2xPI] in order to avoid variable
* overflow and make it suitable for cosine implementation
* used, which does not support negative angles.
*/
- while (dev->sdr_fixp_mod_phase < (0 * FIXP_PI))
- dev->sdr_fixp_mod_phase += (2 * FIXP_PI);
- while (dev->sdr_fixp_mod_phase > (2 * FIXP_PI))
- dev->sdr_fixp_mod_phase -= (2 * FIXP_PI);
+ while (dev->sdr_fixp_mod_phase < FIXP_2PI)
+ dev->sdr_fixp_mod_phase += FIXP_2PI;
+ while (dev->sdr_fixp_mod_phase > FIXP_2PI)
+ dev->sdr_fixp_mod_phase -= FIXP_2PI;
+
+ while (dev->sdr_fixp_src_phase > FIXP_2PI)
+ dev->sdr_fixp_src_phase -= FIXP_2PI;
- while (dev->sdr_fixp_src_phase > (2 * FIXP_PI))
- dev->sdr_fixp_src_phase -= (2 * FIXP_PI);
+ fixp_i = fixp_cos32_rad(dev->sdr_fixp_mod_phase, FIXP_2PI);
+ fixp_q = fixp_sin32_rad(dev->sdr_fixp_mod_phase, FIXP_2PI);
- fixp_i = fixp_cos(dev->sdr_fixp_mod_phase);
- fixp_q = fixp_sin(dev->sdr_fixp_mod_phase);
+ /* Normalize fraction values represented with 32 bit precision
+ * to fixed point representation with FIXP_N bits */
+ fixp_i >>= (31 - FIXP_N);
+ fixp_q >>= (31 - FIXP_N);
/* convert 'fixp float' to u8 */
/* u8 = X * 127.5f + 127.5f; where X is float [-1.0 / +1.0] */
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index 34493f435d5a..cb766eb154e7 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -35,7 +35,10 @@ const char * const tpg_pattern_strings[] = {
"100% Green",
"100% Blue",
"16x16 Checkers",
+ "2x2 Checkers",
"1x1 Checkers",
+ "2x2 Red/Green Checkers",
+ "1x1 Red/Green Checkers",
"Alternating Hor Lines",
"Alternating Vert Lines",
"One Pixel Wide Cross",
@@ -120,15 +123,20 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
tpg->max_line_width = max_w;
for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
- unsigned pixelsz = plane ? 1 : 4;
+ unsigned pixelsz = plane ? 2 : 4;
tpg->lines[pat][plane] = vzalloc(max_w * 2 * pixelsz);
if (!tpg->lines[pat][plane])
return -ENOMEM;
+ if (plane == 0)
+ continue;
+ tpg->downsampled_lines[pat][plane] = vzalloc(max_w * 2 * pixelsz);
+ if (!tpg->downsampled_lines[pat][plane])
+ return -ENOMEM;
}
}
for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
- unsigned pixelsz = plane ? 1 : 4;
+ unsigned pixelsz = plane ? 2 : 4;
tpg->contrast_line[plane] = vzalloc(max_w * pixelsz);
if (!tpg->contrast_line[plane])
@@ -152,6 +160,10 @@ void tpg_free(struct tpg_data *tpg)
for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
vfree(tpg->lines[pat][plane]);
tpg->lines[pat][plane] = NULL;
+ if (plane == 0)
+ continue;
+ vfree(tpg->downsampled_lines[pat][plane]);
+ tpg->downsampled_lines[pat][plane] = NULL;
}
for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
vfree(tpg->contrast_line[plane]);
@@ -167,14 +179,38 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
{
tpg->fourcc = fourcc;
tpg->planes = 1;
+ tpg->buffers = 1;
tpg->recalc_colors = true;
+ tpg->interleaved = false;
+ tpg->vdownsampling[0] = 1;
+ tpg->hdownsampling[0] = 1;
+ tpg->hmask[0] = ~0;
+ tpg->hmask[1] = ~0;
+ tpg->hmask[2] = ~0;
+
switch (fourcc) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ tpg->interleaved = true;
+ tpg->vdownsampling[1] = 1;
+ tpg->hdownsampling[1] = 1;
+ tpg->planes = 2;
+ /* fall through */
+ case V4L2_PIX_FMT_RGB332:
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_RGB565X:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_XRGB444:
+ case V4L2_PIX_FMT_ARGB444:
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
case V4L2_PIX_FMT_ARGB555:
case V4L2_PIX_FMT_RGB555X:
+ case V4L2_PIX_FMT_XRGB555X:
+ case V4L2_PIX_FMT_ARGB555X:
+ case V4L2_PIX_FMT_BGR666:
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_BGR24:
case V4L2_PIX_FMT_RGB32:
@@ -183,16 +219,72 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_GREY:
tpg->is_yuv = false;
break;
+ case V4L2_PIX_FMT_YUV444:
+ case V4L2_PIX_FMT_YUV555:
+ case V4L2_PIX_FMT_YUV565:
+ case V4L2_PIX_FMT_YUV32:
+ tpg->is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YVU420M:
+ tpg->buffers = 3;
+ /* fall through */
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ tpg->vdownsampling[1] = 2;
+ tpg->vdownsampling[2] = 2;
+ tpg->hdownsampling[1] = 2;
+ tpg->hdownsampling[2] = 2;
+ tpg->planes = 3;
+ tpg->is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ tpg->vdownsampling[1] = 1;
+ tpg->vdownsampling[2] = 1;
+ tpg->hdownsampling[1] = 2;
+ tpg->hdownsampling[2] = 2;
+ tpg->planes = 3;
+ tpg->is_yuv = true;
+ break;
case V4L2_PIX_FMT_NV16M:
case V4L2_PIX_FMT_NV61M:
+ tpg->buffers = 2;
+ /* fall through */
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ tpg->vdownsampling[1] = 1;
+ tpg->hdownsampling[1] = 1;
+ tpg->hmask[1] = ~1;
tpg->planes = 2;
- /* fall-through */
+ tpg->is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV21M:
+ tpg->buffers = 2;
+ /* fall through */
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ tpg->vdownsampling[1] = 2;
+ tpg->hdownsampling[1] = 1;
+ tpg->hmask[1] = ~1;
+ tpg->planes = 2;
+ tpg->is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ tpg->vdownsampling[1] = 1;
+ tpg->hdownsampling[1] = 1;
+ tpg->planes = 2;
+ tpg->is_yuv = true;
+ break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
+ tpg->hmask[0] = ~1;
tpg->is_yuv = true;
break;
default:
@@ -200,35 +292,75 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
}
switch (fourcc) {
+ case V4L2_PIX_FMT_RGB332:
+ tpg->twopixelsize[0] = 2;
+ break;
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_RGB565X:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_XRGB444:
+ case V4L2_PIX_FMT_ARGB444:
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
case V4L2_PIX_FMT_ARGB555:
case V4L2_PIX_FMT_RGB555X:
+ case V4L2_PIX_FMT_XRGB555X:
+ case V4L2_PIX_FMT_ARGB555X:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUV444:
+ case V4L2_PIX_FMT_YUV555:
+ case V4L2_PIX_FMT_YUV565:
tpg->twopixelsize[0] = 2 * 2;
break;
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_BGR24:
tpg->twopixelsize[0] = 2 * 3;
break;
+ case V4L2_PIX_FMT_BGR666:
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_XRGB32:
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_YUV32:
tpg->twopixelsize[0] = 2 * 4;
break;
+ case V4L2_PIX_FMT_GREY:
+ tpg->twopixelsize[0] = 2;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV21M:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_NV16M:
case V4L2_PIX_FMT_NV61M:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
tpg->twopixelsize[0] = 2;
tpg->twopixelsize[1] = 2;
break;
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YVU420M:
+ tpg->twopixelsize[0] = 2;
+ tpg->twopixelsize[1] = 2;
+ tpg->twopixelsize[2] = 2;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ tpg->twopixelsize[0] = 2;
+ tpg->twopixelsize[1] = 4;
+ break;
}
return true;
}
@@ -267,7 +399,8 @@ void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
tpg->compose.width = width;
tpg->compose.height = tpg->buf_height;
for (p = 0; p < tpg->planes; p++)
- tpg->bytesperline[p] = width * tpg->twopixelsize[p] / 2;
+ tpg->bytesperline[p] = (width * tpg->twopixelsize[p]) /
+ (2 * tpg->hdownsampling[p]);
tpg->recalc_square_border = true;
}
@@ -347,9 +480,9 @@ static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
{ COEFF(0.5, 224), COEFF(-0.445, 224), COEFF(-0.055, 224) },
};
static const int bt2020[3][3] = {
- { COEFF(0.2726, 219), COEFF(0.6780, 219), COEFF(0.0593, 219) },
+ { COEFF(0.2627, 219), COEFF(0.6780, 219), COEFF(0.0593, 219) },
{ COEFF(-0.1396, 224), COEFF(-0.3604, 224), COEFF(0.5, 224) },
- { COEFF(0.5, 224), COEFF(-0.4629, 224), COEFF(-0.0405, 224) },
+ { COEFF(0.5, 224), COEFF(-0.4598, 224), COEFF(-0.0402, 224) },
};
bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
unsigned y_offset = full ? 0 : 16;
@@ -524,10 +657,10 @@ static void precalculate_color(struct tpg_data *tpg, int k)
g <<= 4;
b <<= 4;
}
- if (tpg->qual == TPG_QUAL_GRAY) {
+ if (tpg->qual == TPG_QUAL_GRAY || tpg->fourcc == V4L2_PIX_FMT_GREY) {
/* Rec. 709 Luma function */
/* (0.2126, 0.7152, 0.0722) * (255 * 256) */
- r = g = b = ((13879 * r + 46688 * g + 4713 * b) >> 16) + (16 << 4);
+ r = g = b = (13879 * r + 46688 * g + 4713 * b) >> 16;
}
/*
@@ -601,9 +734,29 @@ static void precalculate_color(struct tpg_data *tpg, int k)
cb = clamp(cb, 16 << 4, 240 << 4);
cr = clamp(cr, 16 << 4, 240 << 4);
}
- tpg->colors[k][0] = clamp(y >> 4, 1, 254);
- tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
- tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
+ y = clamp(y >> 4, 1, 254);
+ cb = clamp(cb >> 4, 1, 254);
+ cr = clamp(cr >> 4, 1, 254);
+ switch (tpg->fourcc) {
+ case V4L2_PIX_FMT_YUV444:
+ y >>= 4;
+ cb >>= 4;
+ cr >>= 4;
+ break;
+ case V4L2_PIX_FMT_YUV555:
+ y >>= 3;
+ cb >>= 3;
+ cr >>= 3;
+ break;
+ case V4L2_PIX_FMT_YUV565:
+ y >>= 3;
+ cb >>= 2;
+ cr >>= 3;
+ break;
+ }
+ tpg->colors[k][0] = y;
+ tpg->colors[k][1] = cb;
+ tpg->colors[k][2] = cr;
} else {
if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
r = (r * 219) / 255 + (16 << 4);
@@ -611,20 +764,39 @@ static void precalculate_color(struct tpg_data *tpg, int k)
b = (b * 219) / 255 + (16 << 4);
}
switch (tpg->fourcc) {
+ case V4L2_PIX_FMT_RGB332:
+ r >>= 9;
+ g >>= 9;
+ b >>= 10;
+ break;
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_RGB565X:
r >>= 7;
g >>= 6;
b >>= 7;
break;
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_XRGB444:
+ case V4L2_PIX_FMT_ARGB444:
+ r >>= 8;
+ g >>= 8;
+ b >>= 8;
+ break;
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
case V4L2_PIX_FMT_ARGB555:
case V4L2_PIX_FMT_RGB555X:
+ case V4L2_PIX_FMT_XRGB555X:
+ case V4L2_PIX_FMT_ARGB555X:
r >>= 7;
g >>= 7;
b >>= 7;
break;
+ case V4L2_PIX_FMT_BGR666:
+ r >>= 6;
+ g >>= 6;
+ b >>= 6;
+ break;
default:
r >>= 4;
g >>= 4;
@@ -665,31 +837,120 @@ static void gen_twopix(struct tpg_data *tpg,
b_v = tpg->colors[color][2]; /* B or precalculated V */
switch (tpg->fourcc) {
+ case V4L2_PIX_FMT_GREY:
+ buf[0][offset] = r_y;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YUV420M:
+ buf[0][offset] = r_y;
+ if (odd) {
+ buf[1][0] = (buf[1][0] + g_u) / 2;
+ buf[2][0] = (buf[2][0] + b_v) / 2;
+ buf[1][1] = buf[1][0];
+ buf[2][1] = buf[2][0];
+ break;
+ }
+ buf[1][0] = g_u;
+ buf[2][0] = b_v;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YVU420M:
+ buf[0][offset] = r_y;
+ if (odd) {
+ buf[1][0] = (buf[1][0] + b_v) / 2;
+ buf[2][0] = (buf[2][0] + g_u) / 2;
+ buf[1][1] = buf[1][0];
+ buf[2][1] = buf[2][0];
+ break;
+ }
+ buf[1][0] = b_v;
+ buf[2][0] = g_u;
+ break;
+
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV16M:
buf[0][offset] = r_y;
- buf[1][offset] = odd ? b_v : g_u;
+ if (odd) {
+ buf[1][0] = (buf[1][0] + g_u) / 2;
+ buf[1][1] = (buf[1][1] + b_v) / 2;
+ break;
+ }
+ buf[1][0] = g_u;
+ buf[1][1] = b_v;
break;
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV21M:
+ case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_NV61M:
buf[0][offset] = r_y;
- buf[1][offset] = odd ? g_u : b_v;
+ if (odd) {
+ buf[1][0] = (buf[1][0] + b_v) / 2;
+ buf[1][1] = (buf[1][1] + g_u) / 2;
+ break;
+ }
+ buf[1][0] = b_v;
+ buf[1][1] = g_u;
+ break;
+
+ case V4L2_PIX_FMT_NV24:
+ buf[0][offset] = r_y;
+ buf[1][2 * offset] = g_u;
+ buf[1][2 * offset + 1] = b_v;
+ break;
+
+ case V4L2_PIX_FMT_NV42:
+ buf[0][offset] = r_y;
+ buf[1][2 * offset] = b_v;
+ buf[1][2 * offset + 1] = g_u;
break;
case V4L2_PIX_FMT_YUYV:
buf[0][offset] = r_y;
- buf[0][offset + 1] = odd ? b_v : g_u;
+ if (odd) {
+ buf[0][1] = (buf[0][1] + g_u) / 2;
+ buf[0][3] = (buf[0][3] + b_v) / 2;
+ break;
+ }
+ buf[0][1] = g_u;
+ buf[0][3] = b_v;
break;
case V4L2_PIX_FMT_UYVY:
- buf[0][offset] = odd ? b_v : g_u;
buf[0][offset + 1] = r_y;
+ if (odd) {
+ buf[0][0] = (buf[0][0] + g_u) / 2;
+ buf[0][2] = (buf[0][2] + b_v) / 2;
+ break;
+ }
+ buf[0][0] = g_u;
+ buf[0][2] = b_v;
break;
case V4L2_PIX_FMT_YVYU:
buf[0][offset] = r_y;
- buf[0][offset + 1] = odd ? g_u : b_v;
+ if (odd) {
+ buf[0][1] = (buf[0][1] + b_v) / 2;
+ buf[0][3] = (buf[0][3] + g_u) / 2;
+ break;
+ }
+ buf[0][1] = b_v;
+ buf[0][3] = g_u;
break;
case V4L2_PIX_FMT_VYUY:
- buf[0][offset] = odd ? g_u : b_v;
buf[0][offset + 1] = r_y;
+ if (odd) {
+ buf[0][0] = (buf[0][0] + b_v) / 2;
+ buf[0][2] = (buf[0][2] + g_u) / 2;
+ break;
+ }
+ buf[0][0] = b_v;
+ buf[0][2] = g_u;
+ break;
+ case V4L2_PIX_FMT_RGB332:
+ buf[0][offset] = (r_y << 5) | (g_u << 2) | b_v;
break;
+ case V4L2_PIX_FMT_YUV565:
case V4L2_PIX_FMT_RGB565:
buf[0][offset] = (g_u << 5) | b_v;
buf[0][offset + 1] = (r_y << 3) | (g_u >> 3);
@@ -698,15 +959,29 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset] = (r_y << 3) | (g_u >> 3);
buf[0][offset + 1] = (g_u << 5) | b_v;
break;
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_XRGB444:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_YUV444:
+ case V4L2_PIX_FMT_ARGB444:
+ buf[0][offset] = (g_u << 4) | b_v;
+ buf[0][offset + 1] = (alpha & 0xf0) | r_y;
+ break;
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
alpha = 0;
/* fall through */
+ case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_ARGB555:
buf[0][offset] = (g_u << 5) | b_v;
buf[0][offset + 1] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
break;
case V4L2_PIX_FMT_RGB555X:
+ case V4L2_PIX_FMT_XRGB555X:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_ARGB555X:
buf[0][offset] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
buf[0][offset + 1] = (g_u << 5) | b_v;
break;
@@ -720,10 +995,17 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset + 1] = g_u;
buf[0][offset + 2] = r_y;
break;
+ case V4L2_PIX_FMT_BGR666:
+ buf[0][offset] = (b_v << 2) | (g_u >> 4);
+ buf[0][offset + 1] = (g_u << 4) | (r_y >> 2);
+ buf[0][offset + 2] = r_y << 6;
+ buf[0][offset + 3] = 0;
+ break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_XRGB32:
alpha = 0;
/* fall through */
+ case V4L2_PIX_FMT_YUV32:
case V4L2_PIX_FMT_ARGB32:
buf[0][offset] = alpha;
buf[0][offset + 1] = r_y;
@@ -740,15 +1022,47 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset + 2] = r_y;
buf[0][offset + 3] = alpha;
break;
+ case V4L2_PIX_FMT_SBGGR8:
+ buf[0][offset] = odd ? g_u : b_v;
+ buf[1][offset] = odd ? r_y : g_u;
+ break;
+ case V4L2_PIX_FMT_SGBRG8:
+ buf[0][offset] = odd ? b_v : g_u;
+ buf[1][offset] = odd ? g_u : r_y;
+ break;
+ case V4L2_PIX_FMT_SGRBG8:
+ buf[0][offset] = odd ? r_y : g_u;
+ buf[1][offset] = odd ? g_u : b_v;
+ break;
+ case V4L2_PIX_FMT_SRGGB8:
+ buf[0][offset] = odd ? g_u : r_y;
+ buf[1][offset] = odd ? b_v : g_u;
+ break;
+ }
+}
+
+unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line)
+{
+ switch (tpg->fourcc) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ return buf_line & 1;
+ default:
+ return 0;
}
}
/* Return how many pattern lines are used by the current pattern. */
-static unsigned tpg_get_pat_lines(struct tpg_data *tpg)
+static unsigned tpg_get_pat_lines(const struct tpg_data *tpg)
{
switch (tpg->pattern) {
case TPG_PAT_CHECKERS_16X16:
+ case TPG_PAT_CHECKERS_2X2:
case TPG_PAT_CHECKERS_1X1:
+ case TPG_PAT_COLOR_CHECKERS_2X2:
+ case TPG_PAT_COLOR_CHECKERS_1X1:
case TPG_PAT_ALTERNATING_HLINES:
case TPG_PAT_CROSS_1_PIXEL:
case TPG_PAT_CROSS_2_PIXELS:
@@ -763,14 +1077,18 @@ static unsigned tpg_get_pat_lines(struct tpg_data *tpg)
}
/* Which pattern line should be used for the given frame line. */
-static unsigned tpg_get_pat_line(struct tpg_data *tpg, unsigned line)
+static unsigned tpg_get_pat_line(const struct tpg_data *tpg, unsigned line)
{
switch (tpg->pattern) {
case TPG_PAT_CHECKERS_16X16:
return (line >> 4) & 1;
case TPG_PAT_CHECKERS_1X1:
+ case TPG_PAT_COLOR_CHECKERS_1X1:
case TPG_PAT_ALTERNATING_HLINES:
return line & 1;
+ case TPG_PAT_CHECKERS_2X2:
+ case TPG_PAT_COLOR_CHECKERS_2X2:
+ return (line & 2) >> 1;
case TPG_PAT_100_COLORSQUARES:
case TPG_PAT_100_HCOLORBAR:
return (line * 8) / tpg->src_height;
@@ -789,7 +1107,8 @@ static unsigned tpg_get_pat_line(struct tpg_data *tpg, unsigned line)
* Which color should be used for the given pattern line and X coordinate.
* Note: x is in the range 0 to 2 * tpg->src_width.
*/
-static enum tpg_color tpg_get_color(struct tpg_data *tpg, unsigned pat_line, unsigned x)
+static enum tpg_color tpg_get_color(const struct tpg_data *tpg,
+ unsigned pat_line, unsigned x)
{
/* Maximum number of bars are TPG_COLOR_MAX - otherwise, the input print code
should be modified */
@@ -836,6 +1155,15 @@ static enum tpg_color tpg_get_color(struct tpg_data *tpg, unsigned pat_line, uns
case TPG_PAT_CHECKERS_1X1:
return ((x & 1) ^ (pat_line & 1)) ?
TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
+ case TPG_PAT_COLOR_CHECKERS_1X1:
+ return ((x & 1) ^ (pat_line & 1)) ?
+ TPG_COLOR_100_RED : TPG_COLOR_100_BLUE;
+ case TPG_PAT_CHECKERS_2X2:
+ return (((x >> 1) & 1) ^ (pat_line & 1)) ?
+ TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
+ case TPG_PAT_COLOR_CHECKERS_2X2:
+ return (((x >> 1) & 1) ^ (pat_line & 1)) ?
+ TPG_COLOR_100_RED : TPG_COLOR_100_BLUE;
case TPG_PAT_ALTERNATING_HLINES:
return pat_line ? TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
case TPG_PAT_ALTERNATING_VLINES:
@@ -948,6 +1276,7 @@ static void tpg_calculate_square_border(struct tpg_data *tpg)
static void tpg_precalculate_line(struct tpg_data *tpg)
{
enum tpg_color contrast;
+ u8 pix[TPG_MAX_PLANES][8];
unsigned pat;
unsigned p;
unsigned x;
@@ -974,7 +1303,6 @@ static void tpg_precalculate_line(struct tpg_data *tpg)
for (x = 0; x < tpg->scaled_width * 2; x += 2) {
unsigned real_x = src_x;
enum tpg_color color1, color2;
- u8 pix[TPG_MAX_PLANES][8];
real_x = tpg->hflip ? tpg->src_width * 2 - real_x - 2 : real_x;
color1 = tpg_get_color(tpg, pat, real_x);
@@ -1001,39 +1329,53 @@ static void tpg_precalculate_line(struct tpg_data *tpg)
gen_twopix(tpg, pix, tpg->hflip ? color1 : color2, 1);
for (p = 0; p < tpg->planes; p++) {
unsigned twopixsize = tpg->twopixelsize[p];
- u8 *pos = tpg->lines[pat][p] + x * twopixsize / 2;
+ unsigned hdiv = tpg->hdownsampling[p];
+ u8 *pos = tpg->lines[pat][p] + tpg_hdiv(tpg, p, x);
- memcpy(pos, pix[p], twopixsize);
+ memcpy(pos, pix[p], twopixsize / hdiv);
}
}
}
- for (x = 0; x < tpg->scaled_width; x += 2) {
- u8 pix[TPG_MAX_PLANES][8];
- gen_twopix(tpg, pix, contrast, 0);
- gen_twopix(tpg, pix, contrast, 1);
- for (p = 0; p < tpg->planes; p++) {
- unsigned twopixsize = tpg->twopixelsize[p];
- u8 *pos = tpg->contrast_line[p] + x * twopixsize / 2;
+ if (tpg->vdownsampling[tpg->planes - 1] > 1) {
+ unsigned pat_lines = tpg_get_pat_lines(tpg);
- memcpy(pos, pix[p], twopixsize);
+ for (pat = 0; pat < pat_lines; pat++) {
+ unsigned next_pat = (pat + 1) % pat_lines;
+
+ for (p = 1; p < tpg->planes; p++) {
+ unsigned w = tpg_hdiv(tpg, p, tpg->scaled_width * 2);
+ u8 *pos1 = tpg->lines[pat][p];
+ u8 *pos2 = tpg->lines[next_pat][p];
+ u8 *dest = tpg->downsampled_lines[pat][p];
+
+ for (x = 0; x < w; x++, pos1++, pos2++, dest++)
+ *dest = ((u16)*pos1 + (u16)*pos2) / 2;
+ }
}
}
- for (x = 0; x < tpg->scaled_width; x += 2) {
- u8 pix[TPG_MAX_PLANES][8];
- gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 0);
- gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 1);
- for (p = 0; p < tpg->planes; p++) {
- unsigned twopixsize = tpg->twopixelsize[p];
- u8 *pos = tpg->black_line[p] + x * twopixsize / 2;
+ gen_twopix(tpg, pix, contrast, 0);
+ gen_twopix(tpg, pix, contrast, 1);
+ for (p = 0; p < tpg->planes; p++) {
+ unsigned twopixsize = tpg->twopixelsize[p];
+ u8 *pos = tpg->contrast_line[p];
+ for (x = 0; x < tpg->scaled_width; x += 2, pos += twopixsize)
+ memcpy(pos, pix[p], twopixsize);
+ }
+
+ gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 0);
+ gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 1);
+ for (p = 0; p < tpg->planes; p++) {
+ unsigned twopixsize = tpg->twopixelsize[p];
+ u8 *pos = tpg->black_line[p];
+
+ for (x = 0; x < tpg->scaled_width; x += 2, pos += twopixsize)
memcpy(pos, pix[p], twopixsize);
- }
}
- for (x = 0; x < tpg->scaled_width * 2; x += 2) {
- u8 pix[TPG_MAX_PLANES][8];
+ for (x = 0; x < tpg->scaled_width * 2; x += 2) {
gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 0);
gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 1);
for (p = 0; p < tpg->planes; p++) {
@@ -1043,6 +1385,7 @@ static void tpg_precalculate_line(struct tpg_data *tpg)
memcpy(pos, pix[p], twopixsize);
}
}
+
gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 0);
gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 1);
gen_twopix(tpg, tpg->textfg, TPG_COLOR_TEXTFG, 0);
@@ -1052,8 +1395,8 @@ static void tpg_precalculate_line(struct tpg_data *tpg)
/* need this to do rgb24 rendering */
typedef struct { u16 __; u8 _; } __packed x24;
-void tpg_gen_text(struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
- int y, int x, char *text)
+void tpg_gen_text(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
+ int y, int x, char *text)
{
int line;
unsigned step = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1;
@@ -1083,24 +1426,37 @@ void tpg_gen_text(struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
div = 2;
for (p = 0; p < tpg->planes; p++) {
- /* Print stream time */
+ unsigned vdiv = tpg->vdownsampling[p];
+ unsigned hdiv = tpg->hdownsampling[p];
+
+ /* Print text */
#define PRINTSTR(PIXTYPE) do { \
PIXTYPE fg; \
PIXTYPE bg; \
memcpy(&fg, tpg->textfg[p], sizeof(PIXTYPE)); \
memcpy(&bg, tpg->textbg[p], sizeof(PIXTYPE)); \
\
- for (line = first; line < 16; line += step) { \
+ for (line = first; line < 16; line += vdiv * step) { \
int l = tpg->vflip ? 15 - line : line; \
- PIXTYPE *pos = (PIXTYPE *)(basep[p][line & 1] + \
- ((y * step + l) / div) * tpg->bytesperline[p] + \
- x * sizeof(PIXTYPE)); \
+ PIXTYPE *pos = (PIXTYPE *)(basep[p][(line / vdiv) & 1] + \
+ ((y * step + l) / (vdiv * div)) * tpg->bytesperline[p] + \
+ (x / hdiv) * sizeof(PIXTYPE)); \
unsigned s; \
\
for (s = 0; s < len; s++) { \
u8 chr = font8x16[text[s] * 16 + line]; \
\
- if (tpg->hflip) { \
+ if (hdiv == 2 && tpg->hflip) { \
+ pos[3] = (chr & (0x01 << 6) ? fg : bg); \
+ pos[2] = (chr & (0x01 << 4) ? fg : bg); \
+ pos[1] = (chr & (0x01 << 2) ? fg : bg); \
+ pos[0] = (chr & (0x01 << 0) ? fg : bg); \
+ } else if (hdiv == 2) { \
+ pos[0] = (chr & (0x01 << 7) ? fg : bg); \
+ pos[1] = (chr & (0x01 << 5) ? fg : bg); \
+ pos[2] = (chr & (0x01 << 3) ? fg : bg); \
+ pos[3] = (chr & (0x01 << 1) ? fg : bg); \
+ } else if (tpg->hflip) { \
pos[7] = (chr & (0x01 << 7) ? fg : bg); \
pos[6] = (chr & (0x01 << 6) ? fg : bg); \
pos[5] = (chr & (0x01 << 5) ? fg : bg); \
@@ -1120,7 +1476,7 @@ void tpg_gen_text(struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
pos[7] = (chr & (0x01 << 0) ? fg : bg); \
} \
\
- pos += tpg->hflip ? -8 : 8; \
+ pos += (tpg->hflip ? -8 : 8) / hdiv; \
} \
} \
} while (0)
@@ -1187,7 +1543,7 @@ void tpg_update_mv_step(struct tpg_data *tpg)
}
/* Map the line number relative to the crop rectangle to a frame line number */
-static unsigned tpg_calc_frameline(struct tpg_data *tpg, unsigned src_y,
+static unsigned tpg_calc_frameline(const struct tpg_data *tpg, unsigned src_y,
unsigned field)
{
switch (field) {
@@ -1204,7 +1560,7 @@ static unsigned tpg_calc_frameline(struct tpg_data *tpg, unsigned src_y,
* Map the line number relative to the compose rectangle to a destination
* buffer line number.
*/
-static unsigned tpg_calc_buffer_line(struct tpg_data *tpg, unsigned y,
+static unsigned tpg_calc_buffer_line(const struct tpg_data *tpg, unsigned y,
unsigned field)
{
y += tpg->compose.top;
@@ -1265,6 +1621,10 @@ static void tpg_recalc(struct tpg_data *tpg)
V4L2_QUANTIZATION_LIM_RANGE;
break;
}
+ } else if (tpg->colorspace == V4L2_COLORSPACE_BT2020) {
+ /* R'G'B' BT.2020 is limited range */
+ tpg->real_quantization =
+ V4L2_QUANTIZATION_LIM_RANGE;
}
}
tpg_precalculate_colors(tpg);
@@ -1283,191 +1643,388 @@ void tpg_calc_text_basep(struct tpg_data *tpg,
u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf)
{
unsigned stride = tpg->bytesperline[p];
+ unsigned h = tpg->buf_height;
tpg_recalc(tpg);
basep[p][0] = vbuf;
basep[p][1] = vbuf;
+ h /= tpg->vdownsampling[p];
if (tpg->field == V4L2_FIELD_SEQ_TB)
- basep[p][1] += tpg->buf_height * stride / 2;
+ basep[p][1] += h * stride / 2;
else if (tpg->field == V4L2_FIELD_SEQ_BT)
- basep[p][0] += tpg->buf_height * stride / 2;
+ basep[p][0] += h * stride / 2;
+ if (p == 0 && tpg->interleaved)
+ tpg_calc_text_basep(tpg, basep, 1, vbuf);
}
-void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf)
+static int tpg_pattern_avg(const struct tpg_data *tpg,
+ unsigned pat1, unsigned pat2)
{
- bool is_tv = std;
- bool is_60hz = is_tv && (std & V4L2_STD_525_60);
- unsigned mv_hor_old = tpg->mv_hor_count % tpg->src_width;
- unsigned mv_hor_new = (tpg->mv_hor_count + tpg->mv_hor_step) % tpg->src_width;
- unsigned mv_vert_old = tpg->mv_vert_count % tpg->src_height;
- unsigned mv_vert_new = (tpg->mv_vert_count + tpg->mv_vert_step) % tpg->src_height;
+ unsigned pat_lines = tpg_get_pat_lines(tpg);
+
+ if (pat1 == (pat2 + 1) % pat_lines)
+ return pat2;
+ if (pat2 == (pat1 + 1) % pat_lines)
+ return pat1;
+ return -1;
+}
+
+/*
+ * This struct contains common parameters used by both the drawing of the
+ * test pattern and the drawing of the extras (borders, square, etc.)
+ */
+struct tpg_draw_params {
+ /* common data */
+ bool is_tv;
+ bool is_60hz;
+ unsigned twopixsize;
+ unsigned img_width;
+ unsigned stride;
+ unsigned hmax;
+ unsigned frame_line;
+ unsigned frame_line_next;
+
+ /* test pattern */
+ unsigned mv_hor_old;
+ unsigned mv_hor_new;
+ unsigned mv_vert_old;
+ unsigned mv_vert_new;
+
+ /* extras */
unsigned wss_width;
- unsigned f;
- int hmax = (tpg->compose.height * tpg->perc_fill) / 100;
- int h;
- unsigned twopixsize = tpg->twopixelsize[p];
- unsigned img_width = tpg->compose.width * twopixsize / 2;
- unsigned line_offset;
- unsigned left_pillar_width = 0;
- unsigned right_pillar_start = img_width;
- unsigned stride = tpg->bytesperline[p];
- unsigned factor = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1;
- u8 *orig_vbuf = vbuf;
+ unsigned wss_random_offset;
+ unsigned sav_eav_f;
+ unsigned left_pillar_width;
+ unsigned right_pillar_start;
+};
- /* Coarse scaling with Bresenham */
- unsigned int_part = (tpg->crop.height / factor) / tpg->compose.height;
- unsigned fract_part = (tpg->crop.height / factor) % tpg->compose.height;
- unsigned src_y = 0;
- unsigned error = 0;
+static void tpg_fill_params_pattern(const struct tpg_data *tpg, unsigned p,
+ struct tpg_draw_params *params)
+{
+ params->mv_hor_old =
+ tpg_hscale_div(tpg, p, tpg->mv_hor_count % tpg->src_width);
+ params->mv_hor_new =
+ tpg_hscale_div(tpg, p, (tpg->mv_hor_count + tpg->mv_hor_step) %
+ tpg->src_width);
+ params->mv_vert_old = tpg->mv_vert_count % tpg->src_height;
+ params->mv_vert_new =
+ (tpg->mv_vert_count + tpg->mv_vert_step) % tpg->src_height;
+}
- tpg_recalc(tpg);
+static void tpg_fill_params_extras(const struct tpg_data *tpg,
+ unsigned p,
+ struct tpg_draw_params *params)
+{
+ unsigned left_pillar_width = 0;
+ unsigned right_pillar_start = params->img_width;
+
+ params->wss_width = tpg->crop.left < tpg->src_width / 2 ?
+ tpg->src_width / 2 - tpg->crop.left : 0;
+ if (params->wss_width > tpg->crop.width)
+ params->wss_width = tpg->crop.width;
+ params->wss_width = tpg_hscale_div(tpg, p, params->wss_width);
+ params->wss_random_offset =
+ params->twopixsize * prandom_u32_max(tpg->src_width / 2);
- mv_hor_old = (mv_hor_old * tpg->scaled_width / tpg->src_width) & ~1;
- mv_hor_new = (mv_hor_new * tpg->scaled_width / tpg->src_width) & ~1;
- wss_width = tpg->crop.left < tpg->src_width / 2 ?
- tpg->src_width / 2 - tpg->crop.left : 0;
- if (wss_width > tpg->crop.width)
- wss_width = tpg->crop.width;
- wss_width = wss_width * tpg->scaled_width / tpg->src_width;
-
- vbuf += tpg->compose.left * twopixsize / 2;
- line_offset = tpg->crop.left * tpg->scaled_width / tpg->src_width;
- line_offset = (line_offset & ~1) * twopixsize / 2;
if (tpg->crop.left < tpg->border.left) {
left_pillar_width = tpg->border.left - tpg->crop.left;
if (left_pillar_width > tpg->crop.width)
left_pillar_width = tpg->crop.width;
- left_pillar_width = (left_pillar_width * tpg->scaled_width) / tpg->src_width;
- left_pillar_width = (left_pillar_width & ~1) * twopixsize / 2;
+ left_pillar_width = tpg_hscale_div(tpg, p, left_pillar_width);
}
- if (tpg->crop.left + tpg->crop.width > tpg->border.left + tpg->border.width) {
- right_pillar_start = tpg->border.left + tpg->border.width - tpg->crop.left;
- right_pillar_start = (right_pillar_start * tpg->scaled_width) / tpg->src_width;
- right_pillar_start = (right_pillar_start & ~1) * twopixsize / 2;
- if (right_pillar_start > img_width)
- right_pillar_start = img_width;
+ params->left_pillar_width = left_pillar_width;
+
+ if (tpg->crop.left + tpg->crop.width >
+ tpg->border.left + tpg->border.width) {
+ right_pillar_start =
+ tpg->border.left + tpg->border.width - tpg->crop.left;
+ right_pillar_start =
+ tpg_hscale_div(tpg, p, right_pillar_start);
+ if (right_pillar_start > params->img_width)
+ right_pillar_start = params->img_width;
}
+ params->right_pillar_start = right_pillar_start;
- f = tpg->field == (is_60hz ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
+ params->sav_eav_f = tpg->field ==
+ (params->is_60hz ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
+}
- for (h = 0; h < tpg->compose.height; h++) {
- bool even;
- bool fill_blank = false;
- unsigned frame_line;
- unsigned buf_line;
- unsigned pat_line_old;
- unsigned pat_line_new;
- u8 *linestart_older;
- u8 *linestart_newer;
- u8 *linestart_top;
- u8 *linestart_bottom;
-
- frame_line = tpg_calc_frameline(tpg, src_y, tpg->field);
- even = !(frame_line & 1);
- buf_line = tpg_calc_buffer_line(tpg, h, tpg->field);
- src_y += int_part;
- error += fract_part;
- if (error >= tpg->compose.height) {
- error -= tpg->compose.height;
- src_y++;
- }
+static void tpg_fill_plane_extras(const struct tpg_data *tpg,
+ const struct tpg_draw_params *params,
+ unsigned p, unsigned h, u8 *vbuf)
+{
+ unsigned twopixsize = params->twopixsize;
+ unsigned img_width = params->img_width;
+ unsigned frame_line = params->frame_line;
+ const struct v4l2_rect *sq = &tpg->square;
+ const struct v4l2_rect *b = &tpg->border;
+ const struct v4l2_rect *c = &tpg->crop;
+
+ if (params->is_tv && !params->is_60hz &&
+ frame_line == 0 && params->wss_width) {
+ /*
+ * Replace the first half of the top line of a 50 Hz frame
+ * with random data to simulate a WSS signal.
+ */
+ u8 *wss = tpg->random_line[p] + params->wss_random_offset;
- if (h >= hmax) {
- if (hmax == tpg->compose.height)
- continue;
- if (!tpg->perc_fill_blank)
- continue;
- fill_blank = true;
- }
+ memcpy(vbuf, wss, params->wss_width);
+ }
+
+ if (tpg->show_border && frame_line >= b->top &&
+ frame_line < b->top + b->height) {
+ unsigned bottom = b->top + b->height - 1;
+ unsigned left = params->left_pillar_width;
+ unsigned right = params->right_pillar_start;
- if (tpg->vflip)
- frame_line = tpg->src_height - frame_line - 1;
-
- if (fill_blank) {
- linestart_older = tpg->contrast_line[p];
- linestart_newer = tpg->contrast_line[p];
- } else if (tpg->qual != TPG_QUAL_NOISE &&
- (frame_line < tpg->border.top ||
- frame_line >= tpg->border.top + tpg->border.height)) {
- linestart_older = tpg->black_line[p];
- linestart_newer = tpg->black_line[p];
- } else if (tpg->pattern == TPG_PAT_NOISE || tpg->qual == TPG_QUAL_NOISE) {
- linestart_older = tpg->random_line[p] +
- twopixsize * prandom_u32_max(tpg->src_width / 2);
- linestart_newer = tpg->random_line[p] +
- twopixsize * prandom_u32_max(tpg->src_width / 2);
+ if (frame_line == b->top || frame_line == b->top + 1 ||
+ frame_line == bottom || frame_line == bottom - 1) {
+ memcpy(vbuf + left, tpg->contrast_line[p],
+ right - left);
} else {
- pat_line_old = tpg_get_pat_line(tpg,
- (frame_line + mv_vert_old) % tpg->src_height);
- pat_line_new = tpg_get_pat_line(tpg,
- (frame_line + mv_vert_new) % tpg->src_height);
- linestart_older = tpg->lines[pat_line_old][p] +
- mv_hor_old * twopixsize / 2;
- linestart_newer = tpg->lines[pat_line_new][p] +
- mv_hor_new * twopixsize / 2;
- linestart_older += line_offset;
- linestart_newer += line_offset;
+ if (b->left >= c->left &&
+ b->left < c->left + c->width)
+ memcpy(vbuf + left,
+ tpg->contrast_line[p], twopixsize);
+ if (b->left + b->width > c->left &&
+ b->left + b->width <= c->left + c->width)
+ memcpy(vbuf + right - twopixsize,
+ tpg->contrast_line[p], twopixsize);
}
- if (is_60hz) {
- linestart_top = linestart_newer;
- linestart_bottom = linestart_older;
- } else {
- linestart_top = linestart_older;
- linestart_bottom = linestart_newer;
+ }
+ if (tpg->qual != TPG_QUAL_NOISE && frame_line >= b->top &&
+ frame_line < b->top + b->height) {
+ memcpy(vbuf, tpg->black_line[p], params->left_pillar_width);
+ memcpy(vbuf + params->right_pillar_start, tpg->black_line[p],
+ img_width - params->right_pillar_start);
+ }
+ if (tpg->show_square && frame_line >= sq->top &&
+ frame_line < sq->top + sq->height &&
+ sq->left < c->left + c->width &&
+ sq->left + sq->width >= c->left) {
+ unsigned left = sq->left;
+ unsigned width = sq->width;
+
+ if (c->left > left) {
+ width -= c->left - left;
+ left = c->left;
}
+ if (c->left + c->width < left + width)
+ width -= left + width - c->left - c->width;
+ left -= c->left;
+ left = tpg_hscale_div(tpg, p, left);
+ width = tpg_hscale_div(tpg, p, width);
+ memcpy(vbuf + left, tpg->contrast_line[p], width);
+ }
+ if (tpg->insert_sav) {
+ unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width / 3);
+ u8 *p = vbuf + offset;
+ unsigned vact = 0, hact = 0;
+
+ p[0] = 0xff;
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0x80 | (params->sav_eav_f << 6) |
+ (vact << 5) | (hact << 4) |
+ ((hact ^ vact) << 3) |
+ ((hact ^ params->sav_eav_f) << 2) |
+ ((params->sav_eav_f ^ vact) << 1) |
+ (hact ^ vact ^ params->sav_eav_f);
+ }
+ if (tpg->insert_eav) {
+ unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width * 2 / 3);
+ u8 *p = vbuf + offset;
+ unsigned vact = 0, hact = 1;
+
+ p[0] = 0xff;
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0x80 | (params->sav_eav_f << 6) |
+ (vact << 5) | (hact << 4) |
+ ((hact ^ vact) << 3) |
+ ((hact ^ params->sav_eav_f) << 2) |
+ ((params->sav_eav_f ^ vact) << 1) |
+ (hact ^ vact ^ params->sav_eav_f);
+ }
+}
- switch (tpg->field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
- if (even)
- memcpy(vbuf + buf_line * stride, linestart_top, img_width);
- else
- memcpy(vbuf + buf_line * stride, linestart_bottom, img_width);
- break;
- case V4L2_FIELD_INTERLACED_BT:
- if (even)
- memcpy(vbuf + buf_line * stride, linestart_bottom, img_width);
- else
- memcpy(vbuf + buf_line * stride, linestart_top, img_width);
- break;
- case V4L2_FIELD_TOP:
- memcpy(vbuf + buf_line * stride, linestart_top, img_width);
- break;
- case V4L2_FIELD_BOTTOM:
- memcpy(vbuf + buf_line * stride, linestart_bottom, img_width);
- break;
- case V4L2_FIELD_NONE:
- default:
- memcpy(vbuf + buf_line * stride, linestart_older, img_width);
- break;
- }
+static void tpg_fill_plane_pattern(const struct tpg_data *tpg,
+ const struct tpg_draw_params *params,
+ unsigned p, unsigned h, u8 *vbuf)
+{
+ unsigned twopixsize = params->twopixsize;
+ unsigned img_width = params->img_width;
+ unsigned mv_hor_old = params->mv_hor_old;
+ unsigned mv_hor_new = params->mv_hor_new;
+ unsigned mv_vert_old = params->mv_vert_old;
+ unsigned mv_vert_new = params->mv_vert_new;
+ unsigned frame_line = params->frame_line;
+ unsigned frame_line_next = params->frame_line_next;
+ unsigned line_offset = tpg_hscale_div(tpg, p, tpg->crop.left);
+ bool even;
+ bool fill_blank = false;
+ unsigned pat_line_old;
+ unsigned pat_line_new;
+ u8 *linestart_older;
+ u8 *linestart_newer;
+ u8 *linestart_top;
+ u8 *linestart_bottom;
+
+ even = !(frame_line & 1);
+
+ if (h >= params->hmax) {
+ if (params->hmax == tpg->compose.height)
+ return;
+ if (!tpg->perc_fill_blank)
+ return;
+ fill_blank = true;
+ }
- if (is_tv && !is_60hz && frame_line == 0 && wss_width) {
- /*
- * Replace the first half of the top line of a 50 Hz frame
- * with random data to simulate a WSS signal.
- */
- u8 *wss = tpg->random_line[p] +
+ if (tpg->vflip) {
+ frame_line = tpg->src_height - frame_line - 1;
+ frame_line_next = tpg->src_height - frame_line_next - 1;
+ }
+
+ if (fill_blank) {
+ linestart_older = tpg->contrast_line[p];
+ linestart_newer = tpg->contrast_line[p];
+ } else if (tpg->qual != TPG_QUAL_NOISE &&
+ (frame_line < tpg->border.top ||
+ frame_line >= tpg->border.top + tpg->border.height)) {
+ linestart_older = tpg->black_line[p];
+ linestart_newer = tpg->black_line[p];
+ } else if (tpg->pattern == TPG_PAT_NOISE || tpg->qual == TPG_QUAL_NOISE) {
+ linestart_older = tpg->random_line[p] +
+ twopixsize * prandom_u32_max(tpg->src_width / 2);
+ linestart_newer = tpg->random_line[p] +
twopixsize * prandom_u32_max(tpg->src_width / 2);
+ } else {
+ unsigned frame_line_old =
+ (frame_line + mv_vert_old) % tpg->src_height;
+ unsigned frame_line_new =
+ (frame_line + mv_vert_new) % tpg->src_height;
+ unsigned pat_line_next_old;
+ unsigned pat_line_next_new;
- memcpy(vbuf + buf_line * stride, wss, wss_width * twopixsize / 2);
+ pat_line_old = tpg_get_pat_line(tpg, frame_line_old);
+ pat_line_new = tpg_get_pat_line(tpg, frame_line_new);
+ linestart_older = tpg->lines[pat_line_old][p] + mv_hor_old;
+ linestart_newer = tpg->lines[pat_line_new][p] + mv_hor_new;
+
+ if (tpg->vdownsampling[p] > 1 && frame_line != frame_line_next) {
+ int avg_pat;
+
+ /*
+ * Now decide whether we need to use downsampled_lines[].
+ * That's necessary if the two lines use different patterns.
+ */
+ pat_line_next_old = tpg_get_pat_line(tpg,
+ (frame_line_next + mv_vert_old) % tpg->src_height);
+ pat_line_next_new = tpg_get_pat_line(tpg,
+ (frame_line_next + mv_vert_new) % tpg->src_height);
+
+ switch (tpg->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED_TB:
+ avg_pat = tpg_pattern_avg(tpg, pat_line_old, pat_line_new);
+ if (avg_pat < 0)
+ break;
+ linestart_older = tpg->downsampled_lines[avg_pat][p] + mv_hor_old;
+ linestart_newer = linestart_older;
+ break;
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_SEQ_TB:
+ avg_pat = tpg_pattern_avg(tpg, pat_line_old, pat_line_next_old);
+ if (avg_pat >= 0)
+ linestart_older = tpg->downsampled_lines[avg_pat][p] +
+ mv_hor_old;
+ avg_pat = tpg_pattern_avg(tpg, pat_line_new, pat_line_next_new);
+ if (avg_pat >= 0)
+ linestart_newer = tpg->downsampled_lines[avg_pat][p] +
+ mv_hor_new;
+ break;
+ }
}
+ linestart_older += line_offset;
+ linestart_newer += line_offset;
+ }
+ if (tpg->field_alternate) {
+ linestart_top = linestart_bottom = linestart_older;
+ } else if (params->is_60hz) {
+ linestart_top = linestart_newer;
+ linestart_bottom = linestart_older;
+ } else {
+ linestart_top = linestart_older;
+ linestart_bottom = linestart_newer;
+ }
+
+ switch (tpg->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ if (even)
+ memcpy(vbuf, linestart_top, img_width);
+ else
+ memcpy(vbuf, linestart_bottom, img_width);
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ if (even)
+ memcpy(vbuf, linestart_bottom, img_width);
+ else
+ memcpy(vbuf, linestart_top, img_width);
+ break;
+ case V4L2_FIELD_TOP:
+ memcpy(vbuf, linestart_top, img_width);
+ break;
+ case V4L2_FIELD_BOTTOM:
+ memcpy(vbuf, linestart_bottom, img_width);
+ break;
+ case V4L2_FIELD_NONE:
+ default:
+ memcpy(vbuf, linestart_older, img_width);
+ break;
}
+}
+
+void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
+ unsigned p, u8 *vbuf)
+{
+ struct tpg_draw_params params;
+ unsigned factor = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1;
+
+ /* Coarse scaling with Bresenham */
+ unsigned int_part = (tpg->crop.height / factor) / tpg->compose.height;
+ unsigned fract_part = (tpg->crop.height / factor) % tpg->compose.height;
+ unsigned src_y = 0;
+ unsigned error = 0;
+ unsigned h;
+
+ tpg_recalc(tpg);
+
+ params.is_tv = std;
+ params.is_60hz = std & V4L2_STD_525_60;
+ params.twopixsize = tpg->twopixelsize[p];
+ params.img_width = tpg_hdiv(tpg, p, tpg->compose.width);
+ params.stride = tpg->bytesperline[p];
+ params.hmax = (tpg->compose.height * tpg->perc_fill) / 100;
+
+ tpg_fill_params_pattern(tpg, p, &params);
+ tpg_fill_params_extras(tpg, p, &params);
+
+ vbuf += tpg_hdiv(tpg, p, tpg->compose.left);
- vbuf = orig_vbuf;
- vbuf += tpg->compose.left * twopixsize / 2;
- src_y = 0;
- error = 0;
for (h = 0; h < tpg->compose.height; h++) {
- unsigned frame_line = tpg_calc_frameline(tpg, src_y, tpg->field);
- unsigned buf_line = tpg_calc_buffer_line(tpg, h, tpg->field);
- const struct v4l2_rect *sq = &tpg->square;
- const struct v4l2_rect *b = &tpg->border;
- const struct v4l2_rect *c = &tpg->crop;
+ unsigned buf_line;
+ params.frame_line = tpg_calc_frameline(tpg, src_y, tpg->field);
+ params.frame_line_next = params.frame_line;
+ buf_line = tpg_calc_buffer_line(tpg, h, tpg->field);
src_y += int_part;
error += fract_part;
if (error >= tpg->compose.height) {
@@ -1475,80 +2032,61 @@ void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf)
src_y++;
}
- if (tpg->show_border && frame_line >= b->top &&
- frame_line < b->top + b->height) {
- unsigned bottom = b->top + b->height - 1;
- unsigned left = left_pillar_width;
- unsigned right = right_pillar_start;
+ /*
+ * For line-interleaved formats determine the 'plane'
+ * based on the buffer line.
+ */
+ if (tpg_g_interleaved(tpg))
+ p = tpg_g_interleaved_plane(tpg, buf_line);
- if (frame_line == b->top || frame_line == b->top + 1 ||
- frame_line == bottom || frame_line == bottom - 1) {
- memcpy(vbuf + buf_line * stride + left, tpg->contrast_line[p],
- right - left);
+ if (tpg->vdownsampling[p] > 1) {
+ /*
+ * When doing vertical downsampling the field setting
+ * matters: for SEQ_BT/TB we downsample each field
+ * separately (i.e. lines 0+2 are combined, as are
+ * lines 1+3), for the other field settings we combine
+ * odd and even lines. Doing that for SEQ_BT/TB would
+ * be really weird.
+ */
+ if (tpg->field == V4L2_FIELD_SEQ_BT ||
+ tpg->field == V4L2_FIELD_SEQ_TB) {
+ unsigned next_src_y = src_y;
+
+ if ((h & 3) >= 2)
+ continue;
+ next_src_y += int_part;
+ if (error + fract_part >= tpg->compose.height)
+ next_src_y++;
+ params.frame_line_next =
+ tpg_calc_frameline(tpg, next_src_y, tpg->field);
} else {
- if (b->left >= c->left &&
- b->left < c->left + c->width)
- memcpy(vbuf + buf_line * stride + left,
- tpg->contrast_line[p], twopixsize);
- if (b->left + b->width > c->left &&
- b->left + b->width <= c->left + c->width)
- memcpy(vbuf + buf_line * stride + right - twopixsize,
- tpg->contrast_line[p], twopixsize);
+ if (h & 1)
+ continue;
+ params.frame_line_next =
+ tpg_calc_frameline(tpg, src_y, tpg->field);
}
+
+ buf_line /= tpg->vdownsampling[p];
}
- if (tpg->qual != TPG_QUAL_NOISE && frame_line >= b->top &&
- frame_line < b->top + b->height) {
- memcpy(vbuf + buf_line * stride, tpg->black_line[p], left_pillar_width);
- memcpy(vbuf + buf_line * stride + right_pillar_start, tpg->black_line[p],
- img_width - right_pillar_start);
- }
- if (tpg->show_square && frame_line >= sq->top &&
- frame_line < sq->top + sq->height &&
- sq->left < c->left + c->width &&
- sq->left + sq->width >= c->left) {
- unsigned left = sq->left;
- unsigned width = sq->width;
-
- if (c->left > left) {
- width -= c->left - left;
- left = c->left;
- }
- if (c->left + c->width < left + width)
- width -= left + width - c->left - c->width;
- left -= c->left;
- left = (left * tpg->scaled_width) / tpg->src_width;
- left = (left & ~1) * twopixsize / 2;
- width = (width * tpg->scaled_width) / tpg->src_width;
- width = (width & ~1) * twopixsize / 2;
- memcpy(vbuf + buf_line * stride + left, tpg->contrast_line[p], width);
- }
- if (tpg->insert_sav) {
- unsigned offset = (tpg->compose.width / 6) * twopixsize;
- u8 *p = vbuf + buf_line * stride + offset;
- unsigned vact = 0, hact = 0;
-
- p[0] = 0xff;
- p[1] = 0;
- p[2] = 0;
- p[3] = 0x80 | (f << 6) | (vact << 5) | (hact << 4) |
- ((hact ^ vact) << 3) |
- ((hact ^ f) << 2) |
- ((f ^ vact) << 1) |
- (hact ^ vact ^ f);
- }
- if (tpg->insert_eav) {
- unsigned offset = (tpg->compose.width / 6) * 2 * twopixsize;
- u8 *p = vbuf + buf_line * stride + offset;
- unsigned vact = 0, hact = 1;
-
- p[0] = 0xff;
- p[1] = 0;
- p[2] = 0;
- p[3] = 0x80 | (f << 6) | (vact << 5) | (hact << 4) |
- ((hact ^ vact) << 3) |
- ((hact ^ f) << 2) |
- ((f ^ vact) << 1) |
- (hact ^ vact ^ f);
- }
+ tpg_fill_plane_pattern(tpg, &params, p, h,
+ vbuf + buf_line * params.stride);
+ tpg_fill_plane_extras(tpg, &params, p, h,
+ vbuf + buf_line * params.stride);
+ }
+}
+
+void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf)
+{
+ unsigned offset = 0;
+ unsigned i;
+
+ if (tpg->buffers > 1) {
+ tpg_fill_plane_buffer(tpg, std, p, vbuf);
+ return;
+ }
+
+ for (i = 0; i < tpg_g_planes(tpg); i++) {
+ tpg_fill_plane_buffer(tpg, std, i, vbuf + offset);
+ offset += tpg_calc_plane_size(tpg, i);
}
}
diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h
index bd8b1c760b3f..a50cd2e2535b 100644
--- a/drivers/media/platform/vivid/vivid-tpg.h
+++ b/drivers/media/platform/vivid/vivid-tpg.h
@@ -41,7 +41,10 @@ enum tpg_pattern {
TPG_PAT_GREEN,
TPG_PAT_BLUE,
TPG_PAT_CHECKERS_16X16,
+ TPG_PAT_CHECKERS_2X2,
TPG_PAT_CHECKERS_1X1,
+ TPG_PAT_COLOR_CHECKERS_2X2,
+ TPG_PAT_COLOR_CHECKERS_1X1,
TPG_PAT_ALTERNATING_HLINES,
TPG_PAT_ALTERNATING_VLINES,
TPG_PAT_CROSS_1_PIXEL,
@@ -87,7 +90,7 @@ enum tpg_move_mode {
extern const char * const tpg_aspect_strings[];
-#define TPG_MAX_PLANES 2
+#define TPG_MAX_PLANES 3
#define TPG_MAX_PAT_LINES 8
struct tpg_data {
@@ -98,6 +101,7 @@ struct tpg_data {
/* Scaled output frame size */
unsigned scaled_width;
u32 field;
+ bool field_alternate;
/* crop coordinates are frame-based */
struct v4l2_rect crop;
/* compose coordinates are format-based */
@@ -134,7 +138,16 @@ struct tpg_data {
enum tpg_pixel_aspect pix_aspect;
unsigned rgb_range;
unsigned real_rgb_range;
+ unsigned buffers;
unsigned planes;
+ bool interleaved;
+ u8 vdownsampling[TPG_MAX_PLANES];
+ u8 hdownsampling[TPG_MAX_PLANES];
+ /*
+ * horizontal positions must be ANDed with this value to enforce
+ * correct boundaries for packed YUYV values.
+ */
+ unsigned hmask[TPG_MAX_PLANES];
/* Used to store the colors in native format, either RGB or YUV */
u8 colors[TPG_COLOR_MAX][3];
u8 textfg[TPG_MAX_PLANES][8], textbg[TPG_MAX_PLANES][8];
@@ -168,6 +181,7 @@ struct tpg_data {
/* Used to store TPG_MAX_PAT_LINES lines, each with up to two planes */
unsigned max_line_width;
u8 *lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
+ u8 *downsampled_lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
u8 *random_line[TPG_MAX_PLANES];
u8 *contrast_line[TPG_MAX_PLANES];
u8 *black_line[TPG_MAX_PLANES];
@@ -180,11 +194,15 @@ void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
u32 field);
void tpg_set_font(const u8 *f);
-void tpg_gen_text(struct tpg_data *tpg,
+void tpg_gen_text(const struct tpg_data *tpg,
u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
void tpg_calc_text_basep(struct tpg_data *tpg,
u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
-void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf);
+unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line);
+void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
+ unsigned p, u8 *vbuf);
+void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std,
+ unsigned p, u8 *vbuf);
bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
const struct v4l2_rect *compose);
@@ -323,9 +341,19 @@ static inline u32 tpg_g_quantization(const struct tpg_data *tpg)
return tpg->quantization;
}
+static inline unsigned tpg_g_buffers(const struct tpg_data *tpg)
+{
+ return tpg->buffers;
+}
+
static inline unsigned tpg_g_planes(const struct tpg_data *tpg)
{
- return tpg->planes;
+ return tpg->interleaved ? 1 : tpg->planes;
+}
+
+static inline bool tpg_g_interleaved(const struct tpg_data *tpg)
+{
+ return tpg->interleaved;
}
static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned plane)
@@ -333,6 +361,24 @@ static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned p
return tpg->twopixelsize[plane];
}
+static inline unsigned tpg_hdiv(const struct tpg_data *tpg,
+ unsigned plane, unsigned x)
+{
+ return ((x / tpg->hdownsampling[plane]) & tpg->hmask[plane]) *
+ tpg->twopixelsize[plane] / 2;
+}
+
+static inline unsigned tpg_hscale(const struct tpg_data *tpg, unsigned x)
+{
+ return (x * tpg->scaled_width) / tpg->src_width;
+}
+
+static inline unsigned tpg_hscale_div(const struct tpg_data *tpg,
+ unsigned plane, unsigned x)
+{
+ return tpg_hdiv(tpg, plane, tpg_hscale(tpg, x));
+}
+
static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned plane)
{
return tpg->bytesperline[plane];
@@ -340,7 +386,60 @@ static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned p
static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsigned bpl)
{
- tpg->bytesperline[plane] = bpl;
+ unsigned p;
+
+ if (tpg->buffers > 1) {
+ tpg->bytesperline[plane] = bpl;
+ return;
+ }
+
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
+
+ tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
+ }
+}
+
+
+static inline unsigned tpg_g_line_width(const struct tpg_data *tpg, unsigned plane)
+{
+ unsigned w = 0;
+ unsigned p;
+
+ if (tpg->buffers > 1)
+ return tpg_g_bytesperline(tpg, plane);
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ unsigned plane_w = tpg_g_bytesperline(tpg, p);
+
+ w += plane_w / tpg->vdownsampling[p];
+ }
+ return w;
+}
+
+static inline unsigned tpg_calc_line_width(const struct tpg_data *tpg,
+ unsigned plane, unsigned bpl)
+{
+ unsigned w = 0;
+ unsigned p;
+
+ if (tpg->buffers > 1)
+ return bpl;
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
+
+ plane_w /= tpg->hdownsampling[p];
+ w += plane_w / tpg->vdownsampling[p];
+ }
+ return w;
+}
+
+static inline unsigned tpg_calc_plane_size(const struct tpg_data *tpg, unsigned plane)
+{
+ if (plane >= tpg_g_planes(tpg))
+ return 0;
+
+ return tpg_g_bytesperline(tpg, plane) * tpg->buf_height /
+ tpg->vdownsampling[plane];
}
static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h)
@@ -348,9 +447,10 @@ static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h)
tpg->buf_height = h;
}
-static inline void tpg_s_field(struct tpg_data *tpg, unsigned field)
+static inline void tpg_s_field(struct tpg_data *tpg, unsigned field, bool alternate)
{
tpg->field = field;
+ tpg->field_alternate = alternate;
}
static inline void tpg_s_perc_fill(struct tpg_data *tpg,
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 867a29a6d18f..dab5990f45a0 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -42,20 +42,26 @@ static const struct vivid_fmt formats_ovl[] = {
{
.name = "RGB565 (LE)",
.fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.planes = 1,
+ .buffers = 1,
},
{
.name = "XRGB555 (LE)",
.fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.planes = 1,
+ .buffers = 1,
},
{
.name = "ARGB555 (LE)",
.fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.planes = 1,
+ .buffers = 1,
},
};
@@ -94,7 +100,7 @@ static int vid_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *f
unsigned sizes[], void *alloc_ctxs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
- unsigned planes = tpg_g_planes(&dev->tpg);
+ unsigned buffers = tpg_g_buffers(&dev->tpg);
unsigned h = dev->fmt_cap_rect.height;
unsigned p;
@@ -127,39 +133,36 @@ static int vid_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *f
mp = &fmt->fmt.pix_mp;
/*
* Check if the number of planes in the specified format match
- * the number of planes in the current format. You can't mix that.
+ * the number of buffers in the current format. You can't mix that.
*/
- if (mp->num_planes != planes)
+ if (mp->num_planes != buffers)
return -EINVAL;
vfmt = vivid_get_format(dev, mp->pixelformat);
- for (p = 0; p < planes; p++) {
+ for (p = 0; p < buffers; p++) {
sizes[p] = mp->plane_fmt[p].sizeimage;
- if (sizes[0] < tpg_g_bytesperline(&dev->tpg, 0) * h +
+ if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
vfmt->data_offset[p])
return -EINVAL;
}
} else {
- for (p = 0; p < planes; p++)
- sizes[p] = tpg_g_bytesperline(&dev->tpg, p) * h +
+ for (p = 0; p < buffers; p++)
+ sizes[p] = tpg_g_line_width(&dev->tpg, p) * h +
dev->fmt_cap->data_offset[p];
}
if (vq->num_buffers + *nbuffers < 2)
*nbuffers = 2 - vq->num_buffers;
- *nplanes = planes;
+ *nplanes = buffers;
/*
* videobuf2-vmalloc allocator is context-less so no need to set
* alloc_ctxs array.
*/
- if (planes == 2)
- dprintk(dev, 1, "%s, count=%d, sizes=%u, %u\n", __func__,
- *nbuffers, sizes[0], sizes[1]);
- else
- dprintk(dev, 1, "%s, count=%d, size=%u\n", __func__,
- *nbuffers, sizes[0]);
+ dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
+ for (p = 0; p < buffers; p++)
+ dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
return 0;
}
@@ -168,7 +171,7 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb)
{
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size;
- unsigned planes = tpg_g_planes(&dev->tpg);
+ unsigned buffers = tpg_g_buffers(&dev->tpg);
unsigned p;
dprintk(dev, 1, "%s\n", __func__);
@@ -184,13 +187,13 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb)
dev->buf_prepare_error = false;
return -EINVAL;
}
- for (p = 0; p < planes; p++) {
- size = tpg_g_bytesperline(&dev->tpg, p) * dev->fmt_cap_rect.height +
+ for (p = 0; p < buffers; p++) {
+ size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height +
dev->fmt_cap->data_offset[p];
- if (vb2_plane_size(vb, 0) < size) {
+ if (vb2_plane_size(vb, p) < size) {
dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
- __func__, p, vb2_plane_size(vb, 0), size);
+ __func__, p, vb2_plane_size(vb, p), size);
return -EINVAL;
}
@@ -441,7 +444,7 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
*/
if (keep_controls || !dev->colorspace)
break;
- if (bt->standards & V4L2_DV_BT_STD_CEA861) {
+ if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
if (bt->width == 720 && bt->height <= 576)
v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
else
@@ -526,11 +529,11 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv,
mp->colorspace = vivid_colorspace_cap(dev);
mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
mp->quantization = vivid_quantization_cap(dev);
- mp->num_planes = dev->fmt_cap->planes;
+ mp->num_planes = dev->fmt_cap->buffers;
for (p = 0; p < mp->num_planes; p++) {
mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
mp->plane_fmt[p].sizeimage =
- mp->plane_fmt[p].bytesperline * mp->height +
+ tpg_g_line_width(&dev->tpg, p) * mp->height +
dev->fmt_cap->data_offset[p];
}
return 0;
@@ -596,18 +599,19 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
/* This driver supports custom bytesperline values */
- /* Calculate the minimum supported bytesperline value */
- bytesperline = (mp->width * fmt->depth) >> 3;
- /* Calculate the maximum supported bytesperline value */
- max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->depth) >> 3;
- mp->num_planes = fmt->planes;
+ mp->num_planes = fmt->buffers;
for (p = 0; p < mp->num_planes; p++) {
+ /* Calculate the minimum supported bytesperline value */
+ bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
+ /* Calculate the maximum supported bytesperline value */
+ max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
+
if (pfmt[p].bytesperline > max_bpl)
pfmt[p].bytesperline = max_bpl;
if (pfmt[p].bytesperline < bytesperline)
pfmt[p].bytesperline = bytesperline;
- pfmt[p].sizeimage = pfmt[p].bytesperline * mp->height +
- fmt->data_offset[p];
+ pfmt[p].sizeimage = tpg_calc_line_width(&dev->tpg, p, pfmt[p].bytesperline) *
+ mp->height + fmt->data_offset[p];
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
mp->colorspace = vivid_colorspace_cap(dev);
@@ -627,6 +631,7 @@ int vivid_s_fmt_vid_cap(struct file *file, void *priv,
struct vb2_queue *q = &dev->vb_vid_cap_q;
int ret = vivid_try_fmt_vid_cap(file, priv, f);
unsigned factor = 1;
+ unsigned p;
unsigned i;
if (ret < 0)
@@ -729,13 +734,15 @@ int vivid_s_fmt_vid_cap(struct file *file, void *priv,
dev->fmt_cap_rect.width = mp->width;
dev->fmt_cap_rect.height = mp->height;
tpg_s_buf_height(&dev->tpg, mp->height);
- tpg_s_bytesperline(&dev->tpg, 0, mp->plane_fmt[0].bytesperline);
- if (tpg_g_planes(&dev->tpg) > 1)
- tpg_s_bytesperline(&dev->tpg, 1, mp->plane_fmt[1].bytesperline);
+ tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
+ for (p = 0; p < tpg_g_buffers(&dev->tpg); p++)
+ tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline);
dev->field_cap = mp->field;
- tpg_s_field(&dev->tpg, dev->field_cap);
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true);
+ else
+ tpg_s_field(&dev->tpg, dev->field_cap, false);
tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
- tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
if (vivid_is_sdtv_cap(dev))
dev->tv_field_cap = mp->field;
tpg_update_mv_step(&dev->tpg);
@@ -1012,8 +1019,12 @@ int vivid_vid_cap_cropcap(struct file *file, void *priv,
int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
+ struct vivid_dev *dev = video_drvdata(file);
const struct vivid_fmt *fmt;
+ if (dev->multiplanar)
+ return -ENOTTY;
+
if (f->index >= ARRAY_SIZE(formats_ovl))
return -EINVAL;
@@ -1032,6 +1043,9 @@ int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_window *win = &f->fmt.win;
unsigned clipcount = win->clipcount;
+ if (dev->multiplanar)
+ return -ENOTTY;
+
win->w.top = dev->overlay_cap_top;
win->w.left = dev->overlay_cap_left;
win->w.width = compose->width;
@@ -1063,6 +1077,9 @@ int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_window *win = &f->fmt.win;
int i, j;
+ if (dev->multiplanar)
+ return -ENOTTY;
+
win->w.left = clamp_t(int, win->w.left,
-dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
win->w.top = clamp_t(int, win->w.top,
@@ -1150,6 +1167,9 @@ int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
{
struct vivid_dev *dev = video_drvdata(file);
+ if (dev->multiplanar)
+ return -ENOTTY;
+
if (i && dev->fb_vbase_cap == NULL)
return -EINVAL;
@@ -1169,6 +1189,9 @@ int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
{
struct vivid_dev *dev = video_drvdata(file);
+ if (dev->multiplanar)
+ return -ENOTTY;
+
*a = dev->fb_cap;
a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
V4L2_FBUF_CAP_LIST_CLIPPING;
@@ -1185,6 +1208,9 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
struct vivid_dev *dev = video_drvdata(file);
const struct vivid_fmt *fmt;
+ if (dev->multiplanar)
+ return -ENOTTY;
+
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
return -EPERM;
@@ -1202,7 +1228,7 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
fmt = vivid_get_format(dev, a->fmt.pixelformat);
if (!fmt || !fmt->can_do_overlay)
return -EINVAL;
- if (a->fmt.bytesperline < (a->fmt.width * fmt->depth) / 8)
+ if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
return -EINVAL;
if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
return -EINVAL;
@@ -1332,7 +1358,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
break;
case HDMI:
- if (bt->standards & V4L2_DV_BT_STD_CEA861) {
+ if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
else
@@ -1552,6 +1578,65 @@ int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
return 0;
}
+static void find_aspect_ratio(u32 width, u32 height,
+ u32 *num, u32 *denom)
+{
+ if (!(height % 3) && ((height * 4 / 3) == width)) {
+ *num = 4;
+ *denom = 3;
+ } else if (!(height % 9) && ((height * 16 / 9) == width)) {
+ *num = 16;
+ *denom = 9;
+ } else if (!(height % 10) && ((height * 16 / 10) == width)) {
+ *num = 16;
+ *denom = 10;
+ } else if (!(height % 4) && ((height * 5 / 4) == width)) {
+ *num = 5;
+ *denom = 4;
+ } else if (!(height % 9) && ((height * 15 / 9) == width)) {
+ *num = 15;
+ *denom = 9;
+ } else { /* default to 16:9 */
+ *num = 16;
+ *denom = 9;
+ }
+}
+
+static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+ u32 total_h_pixel;
+ u32 total_v_lines;
+ u32 h_freq;
+
+ if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap,
+ NULL, NULL))
+ return false;
+
+ total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt);
+ total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
+
+ h_freq = (u32)bt->pixelclock / total_h_pixel;
+
+ if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
+ if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync,
+ bt->polarities, timings))
+ return true;
+ }
+
+ if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
+ struct v4l2_fract aspect_ratio;
+
+ find_aspect_ratio(bt->width, bt->height,
+ &aspect_ratio.numerator,
+ &aspect_ratio.denominator);
+ if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
+ bt->polarities, aspect_ratio, timings))
+ return true;
+ }
+ return false;
+}
+
int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
struct v4l2_dv_timings *timings)
{
@@ -1559,13 +1644,16 @@ int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
if (!vivid_is_hdmi_cap(dev))
return -ENODATA;
- if (vb2_is_busy(&dev->vb_vid_cap_q))
- return -EBUSY;
if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
- 0, NULL, NULL))
+ 0, NULL, NULL) &&
+ !valid_cvt_gtf_timings(timings))
return -EINVAL;
+
if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0))
return 0;
+ if (vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+
dev->dv_timings_cap = *timings;
vivid_update_format_cap(dev, false);
return 0;
@@ -1663,18 +1751,14 @@ int vidioc_enum_frameintervals(struct file *file, void *priv,
return -EINVAL;
if (!vivid_is_webcam(dev)) {
- static const struct v4l2_fract step = { 1, 1 };
-
if (fival->index)
return -EINVAL;
if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
return -EINVAL;
if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
return -EINVAL;
- fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
- fival->stepwise.min = tpf_min;
- fival->stepwise.max = tpf_max;
- fival->stepwise.step = step;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = dev->timeperframe_vid_cap;
return 0;
}
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 6bef1e6d6788..aa446271ad34 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -33,8 +33,9 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 25000000, 600000000,
- V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT,
+ V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
};
@@ -46,145 +47,435 @@ struct vivid_fmt vivid_formats[] = {
{
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.is_yuv = true,
.planes = 1,
- .data_offset = { PLANE0_DATA_OFFSET, 0 },
+ .buffers = 1,
+ .data_offset = { PLANE0_DATA_OFFSET },
},
{
.name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.is_yuv = true,
.planes = 1,
+ .buffers = 1,
},
{
.name = "4:2:2, packed, YVYU",
.fourcc = V4L2_PIX_FMT_YVYU,
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.is_yuv = true,
.planes = 1,
+ .buffers = 1,
},
{
.name = "4:2:2, packed, VYUY",
.fourcc = V4L2_PIX_FMT_VYUY,
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .is_yuv = true,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "YUV 4:2:2 triplanar",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 4, 4 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 1,
+ },
+ {
+ .name = "YUV 4:2:0 triplanar",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .vdownsampling = { 1, 2, 2 },
+ .bit_depth = { 8, 4, 4 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 1,
+ },
+ {
+ .name = "YVU 4:2:0 triplanar",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .vdownsampling = { 1, 2, 2 },
+ .bit_depth = { 8, 4, 4 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 1,
+ },
+ {
+ .name = "YUV 4:2:0 biplanar",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .vdownsampling = { 1, 2 },
+ .bit_depth = { 8, 8 },
+ .is_yuv = true,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .name = "YVU 4:2:0 biplanar",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .vdownsampling = { 1, 2 },
+ .bit_depth = { 8, 8 },
+ .is_yuv = true,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .name = "YUV 4:2:2 biplanar",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 8 },
+ .is_yuv = true,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .name = "YVU 4:2:2 biplanar",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 8 },
+ .is_yuv = true,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .name = "YUV 4:4:4 biplanar",
+ .fourcc = V4L2_PIX_FMT_NV24,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 16 },
+ .is_yuv = true,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .name = "YVU 4:4:4 biplanar",
+ .fourcc = V4L2_PIX_FMT_NV42,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 16 },
+ .is_yuv = true,
+ .planes = 2,
+ .buffers = 1,
+ },
+ {
+ .name = "YUV555 (LE)",
+ .fourcc = V4L2_PIX_FMT_YUV555, /* uuuvvvvv ayyyyyuu */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x8000,
+ },
+ {
+ .name = "YUV565 (LE)",
+ .fourcc = V4L2_PIX_FMT_YUV565, /* uuuvvvvv yyyyyuuu */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "YUV444",
+ .fourcc = V4L2_PIX_FMT_YUV444, /* uuuuvvvv aaaayyyy */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0xf000,
+ },
+ {
+ .name = "YUV32 (LE)",
+ .fourcc = V4L2_PIX_FMT_YUV32, /* ayuv */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x000000ff,
+ },
+ {
+ .name = "Monochrome",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
.is_yuv = true,
.planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "RGB332",
+ .fourcc = V4L2_PIX_FMT_RGB332, /* rrrgggbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
},
{
.name = "RGB565 (LE)",
.fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.planes = 1,
+ .buffers = 1,
.can_do_overlay = true,
},
{
.name = "RGB565 (BE)",
.fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.planes = 1,
+ .buffers = 1,
.can_do_overlay = true,
},
{
+ .name = "RGB444",
+ .fourcc = V4L2_PIX_FMT_RGB444, /* xxxxrrrr ggggbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "XRGB444",
+ .fourcc = V4L2_PIX_FMT_XRGB444, /* xxxxrrrr ggggbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "ARGB444",
+ .fourcc = V4L2_PIX_FMT_ARGB444, /* aaaarrrr ggggbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x00f0,
+ },
+ {
.name = "RGB555 (LE)",
- .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
- .depth = 16,
+ .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb xrrrrrgg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.planes = 1,
+ .buffers = 1,
.can_do_overlay = true,
},
{
.name = "XRGB555 (LE)",
- .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
- .depth = 16,
+ .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb xrrrrrgg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.planes = 1,
+ .buffers = 1,
.can_do_overlay = true,
},
{
.name = "ARGB555 (LE)",
.fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
- .depth = 16,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.planes = 1,
+ .buffers = 1,
.can_do_overlay = true,
.alpha_mask = 0x8000,
},
{
.name = "RGB555 (BE)",
- .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
- .depth = 16,
+ .fourcc = V4L2_PIX_FMT_RGB555X, /* xrrrrrgg gggbbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
.planes = 1,
- .can_do_overlay = true,
+ .buffers = 1,
+ },
+ {
+ .name = "XRGB555 (BE)",
+ .fourcc = V4L2_PIX_FMT_XRGB555X, /* xrrrrrgg gggbbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "ARGB555 (BE)",
+ .fourcc = V4L2_PIX_FMT_ARGB555X, /* arrrrrgg gggbbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x0080,
},
{
.name = "RGB24 (LE)",
.fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
- .depth = 24,
+ .vdownsampling = { 1 },
+ .bit_depth = { 24 },
.planes = 1,
+ .buffers = 1,
},
{
.name = "RGB24 (BE)",
.fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
- .depth = 24,
+ .vdownsampling = { 1 },
+ .bit_depth = { 24 },
.planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "BGR666",
+ .fourcc = V4L2_PIX_FMT_BGR666, /* bbbbbbgg ggggrrrr rrxxxxxx */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
},
{
.name = "RGB32 (LE)",
- .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
- .depth = 32,
+ .fourcc = V4L2_PIX_FMT_RGB32, /* xrgb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
.planes = 1,
+ .buffers = 1,
},
{
.name = "RGB32 (BE)",
- .fourcc = V4L2_PIX_FMT_BGR32, /* bgra */
- .depth = 32,
+ .fourcc = V4L2_PIX_FMT_BGR32, /* bgrx */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
.planes = 1,
+ .buffers = 1,
},
{
.name = "XRGB32 (LE)",
- .fourcc = V4L2_PIX_FMT_XRGB32, /* argb */
- .depth = 32,
+ .fourcc = V4L2_PIX_FMT_XRGB32, /* xrgb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
.planes = 1,
+ .buffers = 1,
},
{
.name = "XRGB32 (BE)",
- .fourcc = V4L2_PIX_FMT_XBGR32, /* bgra */
- .depth = 32,
+ .fourcc = V4L2_PIX_FMT_XBGR32, /* bgrx */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
.planes = 1,
+ .buffers = 1,
},
{
.name = "ARGB32 (LE)",
.fourcc = V4L2_PIX_FMT_ARGB32, /* argb */
- .depth = 32,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
.planes = 1,
+ .buffers = 1,
.alpha_mask = 0x000000ff,
},
{
.name = "ARGB32 (BE)",
.fourcc = V4L2_PIX_FMT_ABGR32, /* bgra */
- .depth = 32,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
.planes = 1,
+ .buffers = 1,
.alpha_mask = 0xff000000,
},
{
- .name = "4:2:2, planar, YUV",
+ .name = "Bayer BG/GR",
+ .fourcc = V4L2_PIX_FMT_SBGGR8, /* Bayer BG/GR */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "Bayer GB/RG",
+ .fourcc = V4L2_PIX_FMT_SGBRG8, /* Bayer GB/RG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "Bayer GR/BG",
+ .fourcc = V4L2_PIX_FMT_SGRBG8, /* Bayer GR/BG */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "Bayer RG/GB",
+ .fourcc = V4L2_PIX_FMT_SRGGB8, /* Bayer RG/GB */
+ .vdownsampling = { 1 },
+ .bit_depth = { 8 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .name = "4:2:2, biplanar, YUV",
.fourcc = V4L2_PIX_FMT_NV16M,
- .depth = 8,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 8 },
.is_yuv = true,
.planes = 2,
+ .buffers = 2,
.data_offset = { PLANE0_DATA_OFFSET, 0 },
},
{
- .name = "4:2:2, planar, YVU",
+ .name = "4:2:2, biplanar, YVU",
.fourcc = V4L2_PIX_FMT_NV61M,
- .depth = 8,
+ .vdownsampling = { 1, 1 },
+ .bit_depth = { 8, 8 },
.is_yuv = true,
.planes = 2,
+ .buffers = 2,
.data_offset = { 0, PLANE0_DATA_OFFSET },
},
+ {
+ .name = "4:2:0, triplanar, YUV",
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .vdownsampling = { 1, 2, 2 },
+ .bit_depth = { 8, 4, 4 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .name = "4:2:0, triplanar, YVU",
+ .fourcc = V4L2_PIX_FMT_YVU420M,
+ .vdownsampling = { 1, 2, 2 },
+ .bit_depth = { 8, 4, 4 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .name = "4:2:0, biplanar, YUV",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .vdownsampling = { 1, 2 },
+ .bit_depth = { 8, 8 },
+ .is_yuv = true,
+ .planes = 2,
+ .buffers = 2,
+ },
+ {
+ .name = "4:2:0, biplanar, YVU",
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .vdownsampling = { 1, 2 },
+ .bit_depth = { 8, 8 },
+ .is_yuv = true,
+ .planes = 2,
+ .buffers = 2,
+ },
};
-/* There are 2 multiplanar formats in the list */
-#define VIVID_MPLANAR_FORMATS 2
+/* There are 6 multiplanar formats in the list */
+#define VIVID_MPLANAR_FORMATS 6
const struct vivid_fmt *vivid_get_format(struct vivid_dev *dev, u32 pixelformat)
{
@@ -194,7 +485,7 @@ const struct vivid_fmt *vivid_get_format(struct vivid_dev *dev, u32 pixelformat)
for (k = 0; k < ARRAY_SIZE(vivid_formats); k++) {
fmt = &vivid_formats[k];
if (fmt->fourcc == pixelformat)
- if (fmt->planes == 1 || dev->multiplanar)
+ if (fmt->buffers == 1 || dev->multiplanar)
return fmt;
}
@@ -210,6 +501,13 @@ bool vivid_vid_can_loop(struct vivid_dev *dev)
return false;
if (dev->field_cap != dev->field_out)
return false;
+ /*
+ * While this can be supported, it is just too much work
+ * to actually implement.
+ */
+ if (dev->field_cap == V4L2_FIELD_SEQ_TB ||
+ dev->field_cap == V4L2_FIELD_SEQ_BT)
+ return false;
if (vivid_is_svid_cap(dev) && vivid_is_svid_out(dev)) {
if (!(dev->std_cap & V4L2_STD_525_60) !=
!(dev->std_out & V4L2_STD_525_60))
@@ -397,6 +695,9 @@ int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r)
unsigned w = r->width;
unsigned h = r->height;
+ /* sanitize w and h in case someone passes ~0 as the value */
+ w &= 0xffff;
+ h &= 0xffff;
if (!(flags & V4L2_SEL_FLAG_LE)) {
w++;
h++;
@@ -421,8 +722,9 @@ int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r)
r->top = 0;
if (r->left < 0)
r->left = 0;
- r->left &= ~1;
- r->top &= ~1;
+ /* sanitize left and top in case someone passes ~0 as the value */
+ r->left &= 0xfffe;
+ r->top &= 0xfffe;
if (r->left + w > MAX_WIDTH)
r->left = MAX_WIDTH - w;
if (r->top + h > MAX_HEIGHT)
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 39ff79f6aa67..0af43dc7715c 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -36,9 +36,14 @@ static int vid_out_queue_setup(struct vb2_queue *vq, const struct v4l2_format *f
unsigned sizes[], void *alloc_ctxs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
- unsigned planes = dev->fmt_out->planes;
+ const struct vivid_fmt *vfmt = dev->fmt_out;
+ unsigned planes = vfmt->buffers;
unsigned h = dev->fmt_out_rect.height;
unsigned size = dev->bytesperline_out[0] * h;
+ unsigned p;
+
+ for (p = vfmt->buffers; p < vfmt->planes; p++)
+ size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
if (dev->field_out == V4L2_FIELD_ALTERNATE) {
/*
@@ -74,21 +79,16 @@ static int vid_out_queue_setup(struct vb2_queue *vq, const struct v4l2_format *f
if (mp->num_planes != planes)
return -EINVAL;
sizes[0] = mp->plane_fmt[0].sizeimage;
- if (planes == 2) {
- sizes[1] = mp->plane_fmt[1].sizeimage;
- if (sizes[0] < dev->bytesperline_out[0] * h ||
- sizes[1] < dev->bytesperline_out[1] * h)
- return -EINVAL;
- } else if (sizes[0] < size) {
+ if (sizes[0] < size)
return -EINVAL;
+ for (p = 1; p < planes; p++) {
+ sizes[p] = mp->plane_fmt[p].sizeimage;
+ if (sizes[p] < dev->bytesperline_out[p] * h)
+ return -EINVAL;
}
} else {
- if (planes == 2) {
- sizes[0] = dev->bytesperline_out[0] * h;
- sizes[1] = dev->bytesperline_out[1] * h;
- } else {
- sizes[0] = size;
- }
+ for (p = 0; p < planes; p++)
+ sizes[p] = p ? dev->bytesperline_out[p] * h : size;
}
if (vq->num_buffers + *nbuffers < 2)
@@ -101,12 +101,9 @@ static int vid_out_queue_setup(struct vb2_queue *vq, const struct v4l2_format *f
* alloc_ctxs array.
*/
- if (planes == 2)
- dprintk(dev, 1, "%s, count=%d, sizes=%u, %u\n", __func__,
- *nbuffers, sizes[0], sizes[1]);
- else
- dprintk(dev, 1, "%s, count=%d, size=%u\n", __func__,
- *nbuffers, sizes[0]);
+ dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
+ for (p = 0; p < planes; p++)
+ dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
return 0;
}
@@ -114,7 +111,7 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
{
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size;
- unsigned planes = dev->fmt_out->planes;
+ unsigned planes;
unsigned p;
dprintk(dev, 1, "%s\n", __func__);
@@ -122,6 +119,8 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
if (WARN_ON(NULL == dev->fmt_out))
return -EINVAL;
+ planes = dev->fmt_out->planes;
+
if (dev->buf_prepare_error) {
/*
* Error injection: test what happens if buf_prepare() returns
@@ -220,7 +219,7 @@ const struct vb2_ops vivid_vid_out_qops = {
void vivid_update_format_out(struct vivid_dev *dev)
{
struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt;
- unsigned size;
+ unsigned size, p;
switch (dev->output_type[dev->output]) {
case SVID:
@@ -249,7 +248,7 @@ void vivid_update_format_out(struct vivid_dev *dev)
dev->field_out = V4L2_FIELD_ALTERNATE;
else
dev->field_out = V4L2_FIELD_NONE;
- if (!dev->dvi_d_out && (bt->standards & V4L2_DV_BT_STD_CEA861)) {
+ if (!dev->dvi_d_out && (bt->flags & V4L2_DV_FL_IS_CE_VIDEO)) {
if (bt->width == 720 && bt->height <= 576)
dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
else
@@ -267,9 +266,9 @@ void vivid_update_format_out(struct vivid_dev *dev)
if (V4L2_FIELD_HAS_T_OR_B(dev->field_out))
dev->crop_out.height /= 2;
dev->fmt_out_rect = dev->crop_out;
- dev->bytesperline_out[0] = (dev->sink_rect.width * dev->fmt_out->depth) / 8;
- if (dev->fmt_out->planes == 2)
- dev->bytesperline_out[1] = (dev->sink_rect.width * dev->fmt_out->depth) / 8;
+ for (p = 0; p < dev->fmt_out->planes; p++)
+ dev->bytesperline_out[p] =
+ (dev->sink_rect.width * dev->fmt_out->bit_depth[p]) / 8;
}
/* Map the field to something that is valid for the current output */
@@ -313,21 +312,28 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ const struct vivid_fmt *fmt = dev->fmt_out;
unsigned p;
mp->width = dev->fmt_out_rect.width;
mp->height = dev->fmt_out_rect.height;
mp->field = dev->field_out;
- mp->pixelformat = dev->fmt_out->fourcc;
+ mp->pixelformat = fmt->fourcc;
mp->colorspace = dev->colorspace_out;
mp->ycbcr_enc = dev->ycbcr_enc_out;
mp->quantization = dev->quantization_out;
- mp->num_planes = dev->fmt_out->planes;
+ mp->num_planes = fmt->buffers;
for (p = 0; p < mp->num_planes; p++) {
mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
mp->plane_fmt[p].sizeimage =
mp->plane_fmt[p].bytesperline * mp->height;
}
+ for (p = fmt->buffers; p < fmt->planes; p++) {
+ unsigned stride = dev->bytesperline_out[p];
+
+ mp->plane_fmt[0].sizeimage +=
+ (stride * mp->height) / fmt->vdownsampling[p];
+ }
return 0;
}
@@ -386,10 +392,10 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv,
/* This driver supports custom bytesperline values */
/* Calculate the minimum supported bytesperline value */
- bytesperline = (mp->width * fmt->depth) >> 3;
+ bytesperline = (mp->width * fmt->bit_depth[0]) >> 3;
/* Calculate the maximum supported bytesperline value */
- max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->depth) >> 3;
- mp->num_planes = fmt->planes;
+ max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[0]) >> 3;
+ mp->num_planes = fmt->buffers;
for (p = 0; p < mp->num_planes; p++) {
if (pfmt[p].bytesperline > max_bpl)
pfmt[p].bytesperline = max_bpl;
@@ -398,11 +404,14 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv,
pfmt[p].sizeimage = pfmt[p].bytesperline * mp->height;
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
+ for (p = fmt->buffers; p < fmt->planes; p++)
+ pfmt[0].sizeimage += (pfmt[0].bytesperline * fmt->bit_depth[p]) /
+ (fmt->bit_depth[0] * fmt->vdownsampling[p]);
mp->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
mp->quantization = V4L2_QUANTIZATION_DEFAULT;
if (vivid_is_svid_out(dev)) {
mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
- } else if (dev->dvi_d_out || !(bt->standards & V4L2_DV_BT_STD_CEA861)) {
+ } else if (dev->dvi_d_out || !(bt->flags & V4L2_DV_FL_IS_CE_VIDEO)) {
mp->colorspace = V4L2_COLORSPACE_SRGB;
if (dev->dvi_d_out)
mp->quantization = V4L2_QUANTIZATION_LIM_RANGE;
@@ -429,6 +438,7 @@ int vivid_s_fmt_vid_out(struct file *file, void *priv,
struct vb2_queue *q = &dev->vb_vid_out_q;
int ret = vivid_try_fmt_vid_out(file, priv, f);
unsigned factor = 1;
+ unsigned p;
if (ret < 0)
return ret;
@@ -524,9 +534,12 @@ int vivid_s_fmt_vid_out(struct file *file, void *priv,
dev->fmt_out_rect.width = mp->width;
dev->fmt_out_rect.height = mp->height;
- dev->bytesperline_out[0] = mp->plane_fmt[0].bytesperline;
- if (mp->num_planes > 1)
- dev->bytesperline_out[1] = mp->plane_fmt[1].bytesperline;
+ for (p = 0; p < mp->num_planes; p++)
+ dev->bytesperline_out[p] = mp->plane_fmt[p].bytesperline;
+ for (p = dev->fmt_out->buffers; p < dev->fmt_out->planes; p++)
+ dev->bytesperline_out[p] =
+ (dev->bytesperline_out[0] * dev->fmt_out->bit_depth[p]) /
+ dev->fmt_out->bit_depth[0];
dev->field_out = mp->field;
if (vivid_is_svid_out(dev))
dev->tv_field_out = mp->field;
@@ -1114,13 +1127,13 @@ int vivid_vid_out_s_dv_timings(struct file *file, void *_fh,
if (!vivid_is_hdmi_out(dev))
return -ENODATA;
- if (vb2_is_busy(&dev->vb_vid_out_q))
- return -EBUSY;
if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
0, NULL, NULL))
return -EINVAL;
if (v4l2_match_dv_timings(timings, &dev->dv_timings_out, 0))
return 0;
+ if (vb2_is_busy(&dev->vb_vid_out_q))
+ return -EBUSY;
dev->dv_timings_out = *timings;
vivid_update_format_out(dev);
return 0;
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index 401e2b77a0b6..7dd763311c0f 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -183,13 +183,14 @@ static int bru_s_stream(struct v4l2_subdev *subdev, int enable)
*/
static int bru_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
MEDIA_BUS_FMT_ARGB8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
+ struct vsp1_bru *bru = to_bru(subdev);
struct v4l2_mbus_framefmt *format;
if (code->pad == BRU_PAD_SINK(0)) {
@@ -201,7 +202,8 @@ static int bru_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- format = v4l2_subdev_get_try_format(fh, BRU_PAD_SINK(0));
+ format = vsp1_entity_get_pad_format(&bru->entity, cfg,
+ BRU_PAD_SINK(0), code->which);
code->code = format->code;
}
@@ -209,7 +211,7 @@ static int bru_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int bru_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index)
@@ -228,12 +230,12 @@ static int bru_enum_frame_size(struct v4l2_subdev *subdev,
}
static struct v4l2_rect *bru_get_compose(struct vsp1_bru *bru,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(fh, pad);
+ return v4l2_subdev_get_try_crop(&bru->entity.subdev, cfg, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &bru->inputs[pad].compose;
default:
@@ -241,18 +243,18 @@ static struct v4l2_rect *bru_get_compose(struct vsp1_bru *bru,
}
}
-static int bru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int bru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_bru *bru = to_bru(subdev);
- fmt->format = *vsp1_entity_get_pad_format(&bru->entity, fh, fmt->pad,
+ fmt->format = *vsp1_entity_get_pad_format(&bru->entity, cfg, fmt->pad,
fmt->which);
return 0;
}
-static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_fh *fh,
+static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -268,7 +270,7 @@ static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_fh *fh,
default:
/* The BRU can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&bru->entity, fh,
+ format = vsp1_entity_get_pad_format(&bru->entity, cfg,
BRU_PAD_SINK(0), which);
fmt->code = format->code;
break;
@@ -280,15 +282,15 @@ static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_fh *fh,
fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
-static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_bru *bru = to_bru(subdev);
struct v4l2_mbus_framefmt *format;
- bru_try_format(bru, fh, fmt->pad, &fmt->format, fmt->which);
+ bru_try_format(bru, cfg, fmt->pad, &fmt->format, fmt->which);
- format = vsp1_entity_get_pad_format(&bru->entity, fh, fmt->pad,
+ format = vsp1_entity_get_pad_format(&bru->entity, cfg, fmt->pad,
fmt->which);
*format = fmt->format;
@@ -296,7 +298,7 @@ static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
if (fmt->pad != BRU_PAD_SOURCE) {
struct v4l2_rect *compose;
- compose = bru_get_compose(bru, fh, fmt->pad, fmt->which);
+ compose = bru_get_compose(bru, cfg, fmt->pad, fmt->which);
compose->left = 0;
compose->top = 0;
compose->width = format->width;
@@ -308,7 +310,7 @@ static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
unsigned int i;
for (i = 0; i <= BRU_PAD_SOURCE; ++i) {
- format = vsp1_entity_get_pad_format(&bru->entity, fh,
+ format = vsp1_entity_get_pad_format(&bru->entity, cfg,
i, fmt->which);
format->code = fmt->format.code;
}
@@ -318,7 +320,7 @@ static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
}
static int bru_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct vsp1_bru *bru = to_bru(subdev);
@@ -335,7 +337,7 @@ static int bru_get_selection(struct v4l2_subdev *subdev,
return 0;
case V4L2_SEL_TGT_COMPOSE:
- sel->r = *bru_get_compose(bru, fh, sel->pad, sel->which);
+ sel->r = *bru_get_compose(bru, cfg, sel->pad, sel->which);
return 0;
default:
@@ -344,7 +346,7 @@ static int bru_get_selection(struct v4l2_subdev *subdev,
}
static int bru_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct vsp1_bru *bru = to_bru(subdev);
@@ -360,7 +362,7 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
/* The compose rectangle top left corner must be inside the output
* frame.
*/
- format = vsp1_entity_get_pad_format(&bru->entity, fh, BRU_PAD_SOURCE,
+ format = vsp1_entity_get_pad_format(&bru->entity, cfg, BRU_PAD_SOURCE,
sel->which);
sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
@@ -368,12 +370,12 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
/* Scaling isn't supported, the compose rectangle size must be identical
* to the sink format size.
*/
- format = vsp1_entity_get_pad_format(&bru->entity, fh, sel->pad,
+ format = vsp1_entity_get_pad_format(&bru->entity, cfg, sel->pad,
sel->which);
sel->r.width = format->width;
sel->r.height = format->height;
- compose = bru_get_compose(bru, fh, sel->pad, sel->which);
+ compose = bru_get_compose(bru, cfg, sel->pad, sel->which);
*compose = sel->r;
return 0;
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 79af71d5e270..a453bb4ddd37 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -63,12 +63,12 @@ int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming)
struct v4l2_mbus_framefmt *
vsp1_entity_get_pad_format(struct vsp1_entity *entity,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&entity->subdev, cfg, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &entity->formats[pad];
default:
@@ -79,14 +79,14 @@ vsp1_entity_get_pad_format(struct vsp1_entity *entity,
/*
* vsp1_entity_init_formats - Initialize formats on all pads
* @subdev: V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad configuration
*
- * Initialize all pad formats with default values. If fh is not NULL, try
+ * Initialize all pad formats with default values. If cfg is not NULL, try
* formats are initialized on the file handle. Otherwise active formats are
* initialized on the device.
*/
void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh)
+ struct v4l2_subdev_pad_config *cfg)
{
struct v4l2_subdev_format format;
unsigned int pad;
@@ -95,17 +95,17 @@ void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
memset(&format, 0, sizeof(format));
format.pad = pad;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY
+ format.which = cfg ? V4L2_SUBDEV_FORMAT_TRY
: V4L2_SUBDEV_FORMAT_ACTIVE;
- v4l2_subdev_call(subdev, pad, set_fmt, fh, &format);
+ v4l2_subdev_call(subdev, pad, set_fmt, cfg, &format);
}
}
static int vsp1_entity_open(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh)
{
- vsp1_entity_init_formats(subdev, fh);
+ vsp1_entity_init_formats(subdev, fh->pad);
return 0;
}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index aa20aaa58208..62c768d1c6aa 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -91,10 +91,10 @@ extern const struct media_entity_operations vsp1_media_ops;
struct v4l2_mbus_framefmt *
vsp1_entity_get_pad_format(struct vsp1_entity *entity,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
unsigned int pad, u32 which);
void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh);
+ struct v4l2_subdev_pad_config *cfg);
bool vsp1_entity_is_streaming(struct vsp1_entity *entity);
int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming);
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index 0bc0471746c9..8ffb817ae525 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -55,7 +55,7 @@ static int hsit_s_stream(struct v4l2_subdev *subdev, int enable)
*/
static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
@@ -73,12 +73,14 @@ static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(fh, fse->pad);
+ format = vsp1_entity_get_pad_format(&hsit->entity, cfg, fse->pad,
+ fse->which);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -102,25 +104,25 @@ static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
}
static int hsit_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
- fmt->format = *vsp1_entity_get_pad_format(&hsit->entity, fh, fmt->pad,
+ fmt->format = *vsp1_entity_get_pad_format(&hsit->entity, cfg, fmt->pad,
fmt->which);
return 0;
}
static int hsit_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
struct v4l2_mbus_framefmt *format;
- format = vsp1_entity_get_pad_format(&hsit->entity, fh, fmt->pad,
+ format = vsp1_entity_get_pad_format(&hsit->entity, cfg, fmt->pad,
fmt->which);
if (fmt->pad == HSIT_PAD_SOURCE) {
@@ -143,7 +145,7 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
fmt->format = *format;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&hsit->entity, fh, HSIT_PAD_SOURCE,
+ format = vsp1_entity_get_pad_format(&hsit->entity, cfg, HSIT_PAD_SOURCE,
fmt->which);
*format = fmt->format;
format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 17a6ca7dafe6..39fa5ef20fbb 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -74,13 +74,14 @@ static int lif_s_stream(struct v4l2_subdev *subdev, int enable)
*/
static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
MEDIA_BUS_FMT_ARGB8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
+ struct vsp1_lif *lif = to_lif(subdev);
if (code->pad == LIF_PAD_SINK) {
if (code->index >= ARRAY_SIZE(codes))
@@ -96,7 +97,8 @@ static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- format = v4l2_subdev_get_try_format(fh, LIF_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&lif->entity, cfg,
+ LIF_PAD_SINK, code->which);
code->code = format->code;
}
@@ -104,12 +106,14 @@ static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int lif_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct vsp1_lif *lif = to_lif(subdev);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(fh, LIF_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&lif->entity, cfg, LIF_PAD_SINK,
+ fse->which);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -129,18 +133,18 @@ static int lif_enum_frame_size(struct v4l2_subdev *subdev,
return 0;
}
-static int lif_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int lif_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_lif *lif = to_lif(subdev);
- fmt->format = *vsp1_entity_get_pad_format(&lif->entity, fh, fmt->pad,
+ fmt->format = *vsp1_entity_get_pad_format(&lif->entity, cfg, fmt->pad,
fmt->which);
return 0;
}
-static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_lif *lif = to_lif(subdev);
@@ -151,7 +155,7 @@ static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
- format = vsp1_entity_get_pad_format(&lif->entity, fh, fmt->pad,
+ format = vsp1_entity_get_pad_format(&lif->entity, cfg, fmt->pad,
fmt->which);
if (fmt->pad == LIF_PAD_SOURCE) {
@@ -173,7 +177,7 @@ static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
fmt->format = *format;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&lif->entity, fh, LIF_PAD_SOURCE,
+ format = vsp1_entity_get_pad_format(&lif->entity, cfg, LIF_PAD_SOURCE,
fmt->which);
*format = fmt->format;
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index 6f185c3621fe..656ec272a414 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -82,7 +82,7 @@ static int lut_s_stream(struct v4l2_subdev *subdev, int enable)
*/
static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -90,6 +90,7 @@ static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_AHSV8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
+ struct vsp1_lut *lut = to_lut(subdev);
struct v4l2_mbus_framefmt *format;
if (code->pad == LUT_PAD_SINK) {
@@ -104,7 +105,8 @@ static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- format = v4l2_subdev_get_try_format(fh, LUT_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&lut->entity, cfg,
+ LUT_PAD_SINK, code->which);
code->code = format->code;
}
@@ -112,12 +114,14 @@ static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int lut_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct vsp1_lut *lut = to_lut(subdev);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(fh, fse->pad);
+ format = vsp1_entity_get_pad_format(&lut->entity, cfg,
+ fse->pad, fse->which);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -140,18 +144,18 @@ static int lut_enum_frame_size(struct v4l2_subdev *subdev,
return 0;
}
-static int lut_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int lut_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_lut *lut = to_lut(subdev);
- fmt->format = *vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
+ fmt->format = *vsp1_entity_get_pad_format(&lut->entity, cfg, fmt->pad,
fmt->which);
return 0;
}
-static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_lut *lut = to_lut(subdev);
@@ -163,7 +167,7 @@ static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
- format = vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
+ format = vsp1_entity_get_pad_format(&lut->entity, cfg, fmt->pad,
fmt->which);
if (fmt->pad == LUT_PAD_SOURCE) {
@@ -182,7 +186,7 @@ static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
fmt->format = *format;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&lut->entity, fh, LUT_PAD_SOURCE,
+ format = vsp1_entity_get_pad_format(&lut->entity, cfg, LUT_PAD_SOURCE,
fmt->which);
*format = fmt->format;
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 1f1ba26a834a..fa71f4695e16 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -25,7 +25,7 @@
*/
int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -42,13 +42,14 @@ int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
}
int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(fh, fse->pad);
+ format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, fse->pad,
+ fse->which);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -72,11 +73,11 @@ int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
}
static struct v4l2_rect *
-vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_subdev_fh *fh, u32 which)
+vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_subdev_pad_config *cfg, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(fh, RWPF_PAD_SINK);
+ return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, cfg, RWPF_PAD_SINK);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &rwpf->crop;
default:
@@ -84,18 +85,18 @@ vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_subdev_fh *fh, u32 which)
}
}
-int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- fmt->format = *vsp1_entity_get_pad_format(&rwpf->entity, fh, fmt->pad,
+ fmt->format = *vsp1_entity_get_pad_format(&rwpf->entity, cfg, fmt->pad,
fmt->which);
return 0;
}
-int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
@@ -107,7 +108,7 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
- format = vsp1_entity_get_pad_format(&rwpf->entity, fh, fmt->pad,
+ format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, fmt->pad,
fmt->which);
if (fmt->pad == RWPF_PAD_SOURCE) {
@@ -130,14 +131,14 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
fmt->format = *format;
/* Update the sink crop rectangle. */
- crop = vsp1_rwpf_get_crop(rwpf, fh, fmt->which);
+ crop = vsp1_rwpf_get_crop(rwpf, cfg, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
crop->height = fmt->format.height;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SOURCE,
+ format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SOURCE,
fmt->which);
*format = fmt->format;
@@ -145,7 +146,7 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
}
int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
@@ -157,11 +158,11 @@ int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *vsp1_rwpf_get_crop(rwpf, fh, sel->which);
+ sel->r = *vsp1_rwpf_get_crop(rwpf, cfg, sel->which);
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
- format = vsp1_entity_get_pad_format(&rwpf->entity, fh,
+ format = vsp1_entity_get_pad_format(&rwpf->entity, cfg,
RWPF_PAD_SINK, sel->which);
sel->r.left = 0;
sel->r.top = 0;
@@ -177,7 +178,7 @@ int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
}
int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
@@ -194,7 +195,7 @@ int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
/* Make sure the crop rectangle is entirely contained in the image. The
* WPF top and left offsets are limited to 255.
*/
- format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SINK,
+ format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SINK,
sel->which);
sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
@@ -207,11 +208,11 @@ int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
sel->r.height = min_t(unsigned int, sel->r.height,
format->height - sel->r.top);
- crop = vsp1_rwpf_get_crop(rwpf, fh, sel->which);
+ crop = vsp1_rwpf_get_crop(rwpf, cfg, sel->which);
*crop = sel->r;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SOURCE,
+ format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SOURCE,
sel->which);
format->width = crop->width;
format->height = crop->height;
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 2cf1f13d3bf9..f452dce1a931 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -51,20 +51,20 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code);
int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse);
-int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt);
-int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt);
int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel);
int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel);
#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index 1129494c7cfc..6310acab60e7 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -166,13 +166,14 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
*/
static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
MEDIA_BUS_FMT_ARGB8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
+ struct vsp1_sru *sru = to_sru(subdev);
struct v4l2_mbus_framefmt *format;
if (code->pad == SRU_PAD_SINK) {
@@ -187,7 +188,8 @@ static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- format = v4l2_subdev_get_try_format(fh, SRU_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&sru->entity, cfg,
+ SRU_PAD_SINK, code->which);
code->code = format->code;
}
@@ -195,12 +197,14 @@ static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int sru_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct vsp1_sru *sru = to_sru(subdev);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(fh, SRU_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&sru->entity, cfg,
+ SRU_PAD_SINK, fse->which);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -226,18 +230,18 @@ static int sru_enum_frame_size(struct v4l2_subdev *subdev,
return 0;
}
-static int sru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int sru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_sru *sru = to_sru(subdev);
- fmt->format = *vsp1_entity_get_pad_format(&sru->entity, fh, fmt->pad,
+ fmt->format = *vsp1_entity_get_pad_format(&sru->entity, cfg, fmt->pad,
fmt->which);
return 0;
}
-static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_fh *fh,
+static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -258,7 +262,7 @@ static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_fh *fh,
case SRU_PAD_SOURCE:
/* The SRU can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&sru->entity, fh,
+ format = vsp1_entity_get_pad_format(&sru->entity, cfg,
SRU_PAD_SINK, which);
fmt->code = format->code;
@@ -288,25 +292,25 @@ static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_fh *fh,
fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
-static int sru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int sru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_sru *sru = to_sru(subdev);
struct v4l2_mbus_framefmt *format;
- sru_try_format(sru, fh, fmt->pad, &fmt->format, fmt->which);
+ sru_try_format(sru, cfg, fmt->pad, &fmt->format, fmt->which);
- format = vsp1_entity_get_pad_format(&sru->entity, fh, fmt->pad,
+ format = vsp1_entity_get_pad_format(&sru->entity, cfg, fmt->pad,
fmt->which);
*format = fmt->format;
if (fmt->pad == SRU_PAD_SINK) {
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&sru->entity, fh,
+ format = vsp1_entity_get_pad_format(&sru->entity, cfg,
SRU_PAD_SOURCE, fmt->which);
*format = fmt->format;
- sru_try_format(sru, fh, SRU_PAD_SOURCE, format, fmt->which);
+ sru_try_format(sru, cfg, SRU_PAD_SOURCE, format, fmt->which);
}
return 0;
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index a4afec133800..ccc8243e3493 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -169,13 +169,14 @@ static int uds_s_stream(struct v4l2_subdev *subdev, int enable)
*/
static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
MEDIA_BUS_FMT_ARGB8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
+ struct vsp1_uds *uds = to_uds(subdev);
if (code->pad == UDS_PAD_SINK) {
if (code->index >= ARRAY_SIZE(codes))
@@ -191,7 +192,8 @@ static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- format = v4l2_subdev_get_try_format(fh, UDS_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&uds->entity, cfg,
+ UDS_PAD_SINK, code->which);
code->code = format->code;
}
@@ -199,12 +201,14 @@ static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int uds_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct vsp1_uds *uds = to_uds(subdev);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(fh, UDS_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&uds->entity, cfg,
+ UDS_PAD_SINK, fse->which);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -224,18 +228,18 @@ static int uds_enum_frame_size(struct v4l2_subdev *subdev,
return 0;
}
-static int uds_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int uds_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_uds *uds = to_uds(subdev);
- fmt->format = *vsp1_entity_get_pad_format(&uds->entity, fh, fmt->pad,
+ fmt->format = *vsp1_entity_get_pad_format(&uds->entity, cfg, fmt->pad,
fmt->which);
return 0;
}
-static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_fh *fh,
+static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -256,7 +260,7 @@ static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_fh *fh,
case UDS_PAD_SOURCE:
/* The UDS scales but can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&uds->entity, fh,
+ format = vsp1_entity_get_pad_format(&uds->entity, cfg,
UDS_PAD_SINK, which);
fmt->code = format->code;
@@ -271,25 +275,25 @@ static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_fh *fh,
fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
-static int uds_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+static int uds_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_uds *uds = to_uds(subdev);
struct v4l2_mbus_framefmt *format;
- uds_try_format(uds, fh, fmt->pad, &fmt->format, fmt->which);
+ uds_try_format(uds, cfg, fmt->pad, &fmt->format, fmt->which);
- format = vsp1_entity_get_pad_format(&uds->entity, fh, fmt->pad,
+ format = vsp1_entity_get_pad_format(&uds->entity, cfg, fmt->pad,
fmt->which);
*format = fmt->format;
if (fmt->pad == UDS_PAD_SINK) {
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&uds->entity, fh,
+ format = vsp1_entity_get_pad_format(&uds->entity, cfg,
UDS_PAD_SOURCE, fmt->which);
*format = fmt->format;
- uds_try_format(uds, fh, UDS_PAD_SOURCE, format, fmt->which);
+ uds_try_format(uds, cfg, UDS_PAD_SOURCE, format, fmt->which);
}
return 0;
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
new file mode 100644
index 000000000000..d7324c726fc2
--- /dev/null
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -0,0 +1,23 @@
+config VIDEO_XILINX
+ tristate "Xilinx Video IP (EXPERIMENTAL)"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Driver for Xilinx Video IP Pipelines
+
+if VIDEO_XILINX
+
+config VIDEO_XILINX_TPG
+ tristate "Xilinx Video Test Pattern Generator"
+ depends on VIDEO_XILINX
+ select VIDEO_XILINX_VTC
+ ---help---
+ Driver for the Xilinx Video Test Pattern Generator
+
+config VIDEO_XILINX_VTC
+ tristate "Xilinx Video Timing Controller"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Timing Controller
+
+endif #VIDEO_XILINX
diff --git a/drivers/media/platform/xilinx/Makefile b/drivers/media/platform/xilinx/Makefile
new file mode 100644
index 000000000000..e8a0f2a9f733
--- /dev/null
+++ b/drivers/media/platform/xilinx/Makefile
@@ -0,0 +1,5 @@
+xilinx-video-objs += xilinx-dma.o xilinx-vip.o xilinx-vipp.o
+
+obj-$(CONFIG_VIDEO_XILINX) += xilinx-video.o
+obj-$(CONFIG_VIDEO_XILINX_TPG) += xilinx-tpg.o
+obj-$(CONFIG_VIDEO_XILINX_VTC) += xilinx-vtc.o
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
new file mode 100644
index 000000000000..efde88adf624
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -0,0 +1,766 @@
+/*
+ * Xilinx Video DMA
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma/xilinx_dma.h>
+#include <linux/lcm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "xilinx-dma.h"
+#include "xilinx-vip.h"
+#include "xilinx-vipp.h"
+
+#define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV
+#define XVIP_DMA_DEF_WIDTH 1920
+#define XVIP_DMA_DEF_HEIGHT 1080
+
+/* Minimum and maximum widths are expressed in bytes */
+#define XVIP_DMA_MIN_WIDTH 1U
+#define XVIP_DMA_MAX_WIDTH 65535U
+#define XVIP_DMA_MIN_HEIGHT 1U
+#define XVIP_DMA_MAX_HEIGHT 8191U
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static struct v4l2_subdev *
+xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(local);
+ if (remote == NULL ||
+ media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int xvip_dma_verify_format(struct xvip_dma *dma)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
+ if (subdev == NULL)
+ return -EPIPE;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ if (dma->fmtinfo->code != fmt.format.code ||
+ dma->format.height != fmt.format.height ||
+ dma->format.width != fmt.format.width ||
+ dma->format.colorspace != fmt.format.colorspace)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Stream Management
+ */
+
+/**
+ * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
+ * @pipe: The pipeline
+ * @start: Start (when true) or stop (when false) the pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * or stop all of them.
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
+{
+ struct xvip_dma *dma = pipe->output;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ entity = &dma->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, start);
+ if (start && ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: The pipeline
+ * @on: Turn the stream on when true or off when false
+ *
+ * The pipeline is shared between all DMA engines connect at its input and
+ * output. While the stream state of DMA engines can be controlled
+ * independently, pipelines have a shared stream state that enable or disable
+ * all entities in the pipeline. For this reason the pipeline uses a streaming
+ * counter that tracks the number of DMA engines that have requested the stream
+ * to be enabled.
+ *
+ * When called with the @on argument set to true, this function will increment
+ * the pipeline streaming count. If the streaming count reaches the number of
+ * DMA engines in the pipeline it will enable all entities that belong to the
+ * pipeline.
+ *
+ * Similarly, when called with the @on argument set to false, this function will
+ * decrement the pipeline streaming count and disable all entities in the
+ * pipeline when the streaming count reaches zero.
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. Stopping the pipeline never fails. The pipeline state is
+ * not updated when the operation fails.
+ */
+static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
+{
+ int ret = 0;
+
+ mutex_lock(&pipe->lock);
+
+ if (on) {
+ if (pipe->stream_count == pipe->num_dmas - 1) {
+ ret = xvip_pipeline_start_stop(pipe, true);
+ if (ret < 0)
+ goto done;
+ }
+ pipe->stream_count++;
+ } else {
+ if (--pipe->stream_count == 0)
+ xvip_pipeline_start_stop(pipe, false);
+ }
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
+ struct xvip_dma *start)
+{
+ struct media_entity_graph graph;
+ struct media_entity *entity = &start->video.entity;
+ struct media_device *mdev = entity->parent;
+ unsigned int num_inputs = 0;
+ unsigned int num_outputs = 0;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the video nodes. */
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ struct xvip_dma *dma;
+
+ if (entity->type != MEDIA_ENT_T_DEVNODE_V4L)
+ continue;
+
+ dma = to_xvip_dma(media_entity_to_video_device(entity));
+
+ if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
+ pipe->output = dma;
+ num_outputs++;
+ } else {
+ num_inputs++;
+ }
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ /* We need exactly one output and zero or one input. */
+ if (num_outputs != 1 || num_inputs > 1)
+ return -EPIPE;
+
+ pipe->num_dmas = num_inputs + num_outputs;
+
+ return 0;
+}
+
+static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ pipe->num_dmas = 0;
+ pipe->output = NULL;
+}
+
+/**
+ * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
+ * @pipe: the pipeline
+ *
+ * Decrease the pipeline use count and clean it up if we were the last user.
+ */
+static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ mutex_lock(&pipe->lock);
+
+ /* If we're the last user clean up the pipeline. */
+ if (--pipe->use_count == 0)
+ __xvip_pipeline_cleanup(pipe);
+
+ mutex_unlock(&pipe->lock);
+}
+
+/**
+ * xvip_pipeline_prepare - Prepare the pipeline for streaming
+ * @pipe: the pipeline
+ * @dma: DMA engine at one end of the pipeline
+ *
+ * Validate the pipeline if no user exists yet, otherwise just increase the use
+ * count.
+ *
+ * Return: 0 if successful or -EPIPE if the pipeline is not valid.
+ */
+static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
+ struct xvip_dma *dma)
+{
+ int ret;
+
+ mutex_lock(&pipe->lock);
+
+ /* If we're the first user validate and initialize the pipeline. */
+ if (pipe->use_count == 0) {
+ ret = xvip_pipeline_validate(pipe, dma);
+ if (ret < 0) {
+ __xvip_pipeline_cleanup(pipe);
+ goto done;
+ }
+ }
+
+ pipe->use_count++;
+ ret = 0;
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * videobuf2 queue operations
+ */
+
+/**
+ * struct xvip_dma_buffer - Video DMA buffer
+ * @buf: vb2 buffer base object
+ * @queue: buffer list entry in the DMA engine queued buffers list
+ * @dma: DMA channel that uses the buffer
+ */
+struct xvip_dma_buffer {
+ struct vb2_buffer buf;
+ struct list_head queue;
+ struct xvip_dma *dma;
+};
+
+#define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
+
+static void xvip_dma_complete(void *param)
+{
+ struct xvip_dma_buffer *buf = param;
+ struct xvip_dma *dma = buf->dma;
+
+ spin_lock(&dma->queued_lock);
+ list_del(&buf->queue);
+ spin_unlock(&dma->queued_lock);
+
+ buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
+ buf->buf.v4l2_buf.sequence = dma->sequence++;
+ v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp);
+ vb2_set_plane_payload(&buf->buf, 0, dma->format.sizeimage);
+ vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+}
+
+static int
+xvip_dma_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct xvip_dma *dma = vb2_get_drv_priv(vq);
+
+ /* Make sure the image size is large enough. */
+ if (fmt && fmt->fmt.pix.sizeimage < dma->format.sizeimage)
+ return -EINVAL;
+
+ *nplanes = 1;
+
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : dma->format.sizeimage;
+ alloc_ctxs[0] = dma->alloc_ctx;
+
+ return 0;
+}
+
+static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
+ struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
+
+ buf->dma = dma;
+
+ return 0;
+}
+
+static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
+{
+ struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
+ struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ u32 flags;
+
+ if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ dma->xt.dir = DMA_DEV_TO_MEM;
+ dma->xt.src_sgl = false;
+ dma->xt.dst_sgl = true;
+ dma->xt.dst_start = addr;
+ } else {
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ dma->xt.dir = DMA_MEM_TO_DEV;
+ dma->xt.src_sgl = true;
+ dma->xt.dst_sgl = false;
+ dma->xt.src_start = addr;
+ }
+
+ dma->xt.frame_size = 1;
+ dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
+ dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
+ dma->xt.numf = dma->format.height;
+
+ desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
+ if (!desc) {
+ dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
+ vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ return;
+ }
+ desc->callback = xvip_dma_complete;
+ desc->callback_param = buf;
+
+ spin_lock_irq(&dma->queued_lock);
+ list_add_tail(&buf->queue, &dma->queued_bufs);
+ spin_unlock_irq(&dma->queued_lock);
+
+ dmaengine_submit(desc);
+
+ if (vb2_is_streaming(&dma->queue))
+ dma_async_issue_pending(dma->dma);
+}
+
+static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct xvip_dma *dma = vb2_get_drv_priv(vq);
+ struct xvip_dma_buffer *buf, *nbuf;
+ struct xvip_pipeline *pipe;
+ int ret;
+
+ dma->sequence = 0;
+
+ /*
+ * Start streaming on the pipeline. No link touching an entity in the
+ * pipeline can be activated or deactivated once streaming is started.
+ *
+ * Use the pipeline object embedded in the first DMA object that starts
+ * streaming.
+ */
+ pipe = dma->video.entity.pipe
+ ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
+
+ ret = media_entity_pipeline_start(&dma->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto error;
+
+ /* Verify that the configured format matches the output of the
+ * connected subdev.
+ */
+ ret = xvip_dma_verify_format(dma);
+ if (ret < 0)
+ goto error_stop;
+
+ ret = xvip_pipeline_prepare(pipe, dma);
+ if (ret < 0)
+ goto error_stop;
+
+ /* Start the DMA engine. This must be done before starting the blocks
+ * in the pipeline to avoid DMA synchronization issues.
+ */
+ dma_async_issue_pending(dma->dma);
+
+ /* Start the pipeline. */
+ xvip_pipeline_set_stream(pipe, true);
+
+ return 0;
+
+error_stop:
+ media_entity_pipeline_stop(&dma->video.entity);
+
+error:
+ /* Give back all queued buffers to videobuf2. */
+ spin_lock_irq(&dma->queued_lock);
+ list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
+ vb2_buffer_done(&buf->buf, VB2_BUF_STATE_QUEUED);
+ list_del(&buf->queue);
+ }
+ spin_unlock_irq(&dma->queued_lock);
+
+ return ret;
+}
+
+static void xvip_dma_stop_streaming(struct vb2_queue *vq)
+{
+ struct xvip_dma *dma = vb2_get_drv_priv(vq);
+ struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
+ struct xvip_dma_buffer *buf, *nbuf;
+
+ /* Stop the pipeline. */
+ xvip_pipeline_set_stream(pipe, false);
+
+ /* Stop and reset the DMA engine. */
+ dmaengine_terminate_all(dma->dma);
+
+ /* Cleanup the pipeline and mark it as being stopped. */
+ xvip_pipeline_cleanup(pipe);
+ media_entity_pipeline_stop(&dma->video.entity);
+
+ /* Give back all queued buffers to videobuf2. */
+ spin_lock_irq(&dma->queued_lock);
+ list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
+ vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ list_del(&buf->queue);
+ }
+ spin_unlock_irq(&dma->queued_lock);
+}
+
+static struct vb2_ops xvip_dma_queue_qops = {
+ .queue_setup = xvip_dma_queue_setup,
+ .buf_prepare = xvip_dma_buffer_prepare,
+ .buf_queue = xvip_dma_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = xvip_dma_start_streaming,
+ .stop_streaming = xvip_dma_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | dma->xdev->v4l2_caps;
+
+ if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ else
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+
+ strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
+ strlcpy(cap->card, dma->video.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
+ dma->xdev->dev->of_node->name, dma->port);
+
+ return 0;
+}
+
+/* FIXME: without this callback function, some applications are not configured
+ * with correct formats, and it results in frames in wrong format. Whether this
+ * callback needs to be required is not clearly defined, so it should be
+ * clarified through the mailing list.
+ */
+static int
+xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->pixelformat = dma->format.pixelformat;
+ strlcpy(f->description, dma->fmtinfo->description,
+ sizeof(f->description));
+
+ return 0;
+}
+
+static int
+xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+
+ format->fmt.pix = dma->format;
+
+ return 0;
+}
+
+static void
+__xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
+ const struct xvip_video_format **fmtinfo)
+{
+ const struct xvip_video_format *info;
+ unsigned int min_width;
+ unsigned int max_width;
+ unsigned int min_bpl;
+ unsigned int max_bpl;
+ unsigned int width;
+ unsigned int align;
+ unsigned int bpl;
+
+ /* Retrieve format information and select the default format if the
+ * requested format isn't supported.
+ */
+ info = xvip_get_format_by_fourcc(pix->pixelformat);
+ if (IS_ERR(info))
+ info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
+
+ pix->pixelformat = info->fourcc;
+ pix->field = V4L2_FIELD_NONE;
+
+ /* The transfer alignment requirements are expressed in bytes. Compute
+ * the minimum and maximum values, clamp the requested width and convert
+ * it back to pixels.
+ */
+ align = lcm(dma->align, info->bpp);
+ min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
+ max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
+ width = rounddown(pix->width * info->bpp, align);
+
+ pix->width = clamp(width, min_width, max_width) / info->bpp;
+ pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
+ XVIP_DMA_MAX_HEIGHT);
+
+ /* Clamp the requested bytes per line value. If the maximum bytes per
+ * line value is zero, the module doesn't support user configurable line
+ * sizes. Override the requested value with the minimum in that case.
+ */
+ min_bpl = pix->width * info->bpp;
+ max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
+ bpl = rounddown(pix->bytesperline, dma->align);
+
+ pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ if (fmtinfo)
+ *fmtinfo = info;
+}
+
+static int
+xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+
+ __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
+ return 0;
+}
+
+static int
+xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ const struct xvip_video_format *info;
+
+ __xvip_dma_try_format(dma, &format->fmt.pix, &info);
+
+ if (vb2_is_busy(&dma->queue))
+ return -EBUSY;
+
+ dma->format = format->fmt.pix;
+ dma->fmtinfo = info;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
+ .vidioc_querycap = xvip_dma_querycap,
+ .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
+ .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
+ .vidioc_g_fmt_vid_out = xvip_dma_get_format,
+ .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
+ .vidioc_s_fmt_vid_out = xvip_dma_set_format,
+ .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
+ .vidioc_try_fmt_vid_out = xvip_dma_try_format,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static const struct v4l2_file_operations xvip_dma_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * Xilinx Video DMA Core
+ */
+
+int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
+ enum v4l2_buf_type type, unsigned int port)
+{
+ char name[14];
+ int ret;
+
+ dma->xdev = xdev;
+ dma->port = port;
+ mutex_init(&dma->lock);
+ mutex_init(&dma->pipe.lock);
+ INIT_LIST_HEAD(&dma->queued_bufs);
+ spin_lock_init(&dma->queued_lock);
+
+ dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
+ dma->format.pixelformat = dma->fmtinfo->fourcc;
+ dma->format.colorspace = V4L2_COLORSPACE_SRGB;
+ dma->format.field = V4L2_FIELD_NONE;
+ dma->format.width = XVIP_DMA_DEF_WIDTH;
+ dma->format.height = XVIP_DMA_DEF_HEIGHT;
+ dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
+ dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
+
+ /* Initialize the media entity... */
+ dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
+ ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_init(&dma->video.entity, 1, &dma->pad, 0);
+ if (ret < 0)
+ goto error;
+
+ /* ... and the video node... */
+ dma->video.fops = &xvip_dma_fops;
+ dma->video.v4l2_dev = &xdev->v4l2_dev;
+ dma->video.queue = &dma->queue;
+ snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
+ xdev->dev->of_node->name,
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
+ port);
+ dma->video.vfl_type = VFL_TYPE_GRABBER;
+ dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
+ ? VFL_DIR_RX : VFL_DIR_TX;
+ dma->video.release = video_device_release_empty;
+ dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
+ dma->video.lock = &dma->lock;
+
+ video_set_drvdata(&dma->video, dma);
+
+ /* ... and the buffers queue... */
+ dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev);
+ if (IS_ERR(dma->alloc_ctx))
+ goto error;
+
+ /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
+ * V4L2 APIs would be inefficient. Testing on the command line with a
+ * 'cat /dev/video?' thus won't be possible, but given that the driver
+ * anyway requires a test tool to setup the pipeline before any video
+ * stream can be started, requiring a specific V4L2 test tool as well
+ * instead of 'cat' isn't really a drawback.
+ */
+ dma->queue.type = type;
+ dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dma->queue.lock = &dma->lock;
+ dma->queue.drv_priv = dma;
+ dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
+ dma->queue.ops = &xvip_dma_queue_qops;
+ dma->queue.mem_ops = &vb2_dma_contig_memops;
+ dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
+ | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
+ ret = vb2_queue_init(&dma->queue);
+ if (ret < 0) {
+ dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
+ goto error;
+ }
+
+ /* ... and the DMA channel. */
+ sprintf(name, "port%u", port);
+ dma->dma = dma_request_slave_channel(dma->xdev->dev, name);
+ if (dma->dma == NULL) {
+ dev_err(dma->xdev->dev, "no VDMA channel found\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dma->align = 1 << dma->dma->device->copy_align;
+
+ ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(dma->xdev->dev, "failed to register video device\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ xvip_dma_cleanup(dma);
+ return ret;
+}
+
+void xvip_dma_cleanup(struct xvip_dma *dma)
+{
+ if (video_is_registered(&dma->video))
+ video_unregister_device(&dma->video);
+
+ if (dma->dma)
+ dma_release_channel(dma->dma);
+
+ if (!IS_ERR_OR_NULL(dma->alloc_ctx))
+ vb2_dma_contig_cleanup_ctx(dma->alloc_ctx);
+
+ media_entity_cleanup(&dma->video.entity);
+
+ mutex_destroy(&dma->lock);
+ mutex_destroy(&dma->pipe.lock);
+}
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
new file mode 100644
index 000000000000..a540111f8d3d
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -0,0 +1,109 @@
+/*
+ * Xilinx Video DMA
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_VIP_DMA_H__
+#define __XILINX_VIP_DMA_H__
+
+#include <linux/dmaengine.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+
+struct dma_chan;
+struct xvip_composite_device;
+struct xvip_video_format;
+
+/**
+ * struct xvip_pipeline - Xilinx Video IP pipeline structure
+ * @pipe: media pipeline
+ * @lock: protects the pipeline @stream_count
+ * @use_count: number of DMA engines using the pipeline
+ * @stream_count: number of DMA engines currently streaming
+ * @num_dmas: number of DMA engines in the pipeline
+ * @output: DMA engine at the output of the pipeline
+ */
+struct xvip_pipeline {
+ struct media_pipeline pipe;
+
+ struct mutex lock;
+ unsigned int use_count;
+ unsigned int stream_count;
+
+ unsigned int num_dmas;
+ struct xvip_dma *output;
+};
+
+static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
+{
+ return container_of(e->pipe, struct xvip_pipeline, pipe);
+}
+
+/**
+ * struct xvip_dma - Video DMA channel
+ * @list: list entry in a composite device dmas list
+ * @video: V4L2 video device associated with the DMA channel
+ * @pad: media pad for the video device entity
+ * @xdev: composite device the DMA channel belongs to
+ * @pipe: pipeline belonging to the DMA channel
+ * @port: composite device DT node port number for the DMA channel
+ * @lock: protects the @format, @fmtinfo and @queue fields
+ * @format: active V4L2 pixel format
+ * @fmtinfo: format information corresponding to the active @format
+ * @queue: vb2 buffers queue
+ * @alloc_ctx: allocation context for the vb2 @queue
+ * @sequence: V4L2 buffers sequence number
+ * @queued_bufs: list of queued buffers
+ * @queued_lock: protects the buf_queued list
+ * @dma: DMA engine channel
+ * @align: transfer alignment required by the DMA channel (in bytes)
+ * @xt: dma interleaved template for dma configuration
+ * @sgl: data chunk structure for dma_interleaved_template
+ */
+struct xvip_dma {
+ struct list_head list;
+ struct video_device video;
+ struct media_pad pad;
+
+ struct xvip_composite_device *xdev;
+ struct xvip_pipeline pipe;
+ unsigned int port;
+
+ struct mutex lock;
+ struct v4l2_pix_format format;
+ const struct xvip_video_format *fmtinfo;
+
+ struct vb2_queue queue;
+ void *alloc_ctx;
+ unsigned int sequence;
+
+ struct list_head queued_bufs;
+ spinlock_t queued_lock;
+
+ struct dma_chan *dma;
+ unsigned int align;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+#define to_xvip_dma(vdev) container_of(vdev, struct xvip_dma, video)
+
+int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
+ enum v4l2_buf_type type, unsigned int port);
+void xvip_dma_cleanup(struct xvip_dma *dma);
+
+#endif /* __XILINX_VIP_DMA_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
new file mode 100644
index 000000000000..b5f7d5ecb7f6
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -0,0 +1,931 @@
+/*
+ * Xilinx Test Pattern Generator
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+#include "xilinx-vtc.h"
+
+#define XTPG_CTRL_STATUS_SLAVE_ERROR (1 << 16)
+#define XTPG_CTRL_IRQ_SLAVE_ERROR (1 << 16)
+
+#define XTPG_PATTERN_CONTROL 0x0100
+#define XTPG_PATTERN_MASK (0xf << 0)
+#define XTPG_PATTERN_CONTROL_CROSS_HAIRS (1 << 4)
+#define XTPG_PATTERN_CONTROL_MOVING_BOX (1 << 5)
+#define XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT 6
+#define XTPG_PATTERN_CONTROL_COLOR_MASK_MASK (0xf << 6)
+#define XTPG_PATTERN_CONTROL_STUCK_PIXEL (1 << 9)
+#define XTPG_PATTERN_CONTROL_NOISE (1 << 10)
+#define XTPG_PATTERN_CONTROL_MOTION (1 << 12)
+#define XTPG_MOTION_SPEED 0x0104
+#define XTPG_CROSS_HAIRS 0x0108
+#define XTPG_CROSS_HAIRS_ROW_SHIFT 0
+#define XTPG_CROSS_HAIRS_ROW_MASK (0xfff << 0)
+#define XTPG_CROSS_HAIRS_COLUMN_SHIFT 16
+#define XTPG_CROSS_HAIRS_COLUMN_MASK (0xfff << 16)
+#define XTPG_ZPLATE_HOR_CONTROL 0x010c
+#define XTPG_ZPLATE_VER_CONTROL 0x0110
+#define XTPG_ZPLATE_START_SHIFT 0
+#define XTPG_ZPLATE_START_MASK (0xffff << 0)
+#define XTPG_ZPLATE_SPEED_SHIFT 16
+#define XTPG_ZPLATE_SPEED_MASK (0xffff << 16)
+#define XTPG_BOX_SIZE 0x0114
+#define XTPG_BOX_COLOR 0x0118
+#define XTPG_STUCK_PIXEL_THRESH 0x011c
+#define XTPG_NOISE_GAIN 0x0120
+#define XTPG_BAYER_PHASE 0x0124
+#define XTPG_BAYER_PHASE_RGGB 0
+#define XTPG_BAYER_PHASE_GRBG 1
+#define XTPG_BAYER_PHASE_GBRG 2
+#define XTPG_BAYER_PHASE_BGGR 3
+#define XTPG_BAYER_PHASE_OFF 4
+
+/*
+ * The minimum blanking value is one clock cycle for the front porch, one clock
+ * cycle for the sync pulse and one clock cycle for the back porch.
+ */
+#define XTPG_MIN_HBLANK 3
+#define XTPG_MAX_HBLANK (XVTC_MAX_HSIZE - XVIP_MIN_WIDTH)
+#define XTPG_MIN_VBLANK 3
+#define XTPG_MAX_VBLANK (XVTC_MAX_VSIZE - XVIP_MIN_HEIGHT)
+
+/**
+ * struct xtpg_device - Xilinx Test Pattern Generator device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @npads: number of pads (1 or 2)
+ * @has_input: whether an input is connected to the sink pad
+ * @formats: active V4L2 media bus format for each pad
+ * @default_format: default V4L2 media bus format
+ * @vip_format: format information corresponding to the active format
+ * @bayer: boolean flag if TPG is set to any bayer format
+ * @ctrl_handler: control handler
+ * @hblank: horizontal blanking control
+ * @vblank: vertical blanking control
+ * @pattern: test pattern control
+ * @streaming: is the video stream active
+ * @vtc: video timing controller
+ * @vtmux_gpio: video timing mux GPIO
+ */
+struct xtpg_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+ unsigned int npads;
+ bool has_input;
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_format;
+ const struct xvip_video_format *vip_format;
+ bool bayer;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *pattern;
+ bool streaming;
+
+ struct xvtc_device *vtc;
+ struct gpio_desc *vtmux_gpio;
+};
+
+static inline struct xtpg_device *to_tpg(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xtpg_device, xvip.subdev);
+}
+
+static u32 xtpg_get_bayer_phase(unsigned int code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return XTPG_BAYER_PHASE_RGGB;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ return XTPG_BAYER_PHASE_GRBG;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ return XTPG_BAYER_PHASE_GBRG;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return XTPG_BAYER_PHASE_BGGR;
+ default:
+ return XTPG_BAYER_PHASE_OFF;
+ }
+}
+
+static void __xtpg_update_pattern_control(struct xtpg_device *xtpg,
+ bool passthrough, bool pattern)
+{
+ u32 pattern_mask = (1 << (xtpg->pattern->maximum + 1)) - 1;
+
+ /*
+ * If the TPG has no sink pad or no input connected to its sink pad
+ * passthrough mode can't be enabled.
+ */
+ if (xtpg->npads == 1 || !xtpg->has_input)
+ passthrough = false;
+
+ /* If passthrough mode is allowed unmask bit 0. */
+ if (passthrough)
+ pattern_mask &= ~1;
+
+ /* If test pattern mode is allowed unmask all other bits. */
+ if (pattern)
+ pattern_mask &= 1;
+
+ __v4l2_ctrl_modify_range(xtpg->pattern, 0, xtpg->pattern->maximum,
+ pattern_mask, pattern ? 9 : 0);
+}
+
+static void xtpg_update_pattern_control(struct xtpg_device *xtpg,
+ bool passthrough, bool pattern)
+{
+ mutex_lock(xtpg->ctrl_handler.lock);
+ __xtpg_update_pattern_control(xtpg, passthrough, pattern);
+ mutex_unlock(xtpg->ctrl_handler.lock);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+ unsigned int width = xtpg->formats[0].width;
+ unsigned int height = xtpg->formats[0].height;
+ bool passthrough;
+ u32 bayer_phase;
+
+ if (!enable) {
+ xvip_stop(&xtpg->xvip);
+ if (xtpg->vtc)
+ xvtc_generator_stop(xtpg->vtc);
+
+ xtpg_update_pattern_control(xtpg, true, true);
+ xtpg->streaming = false;
+ return 0;
+ }
+
+ xvip_set_frame_size(&xtpg->xvip, &xtpg->formats[0]);
+
+ if (xtpg->vtc) {
+ struct xvtc_config config = {
+ .hblank_start = width,
+ .hsync_start = width + 1,
+ .vblank_start = height,
+ .vsync_start = height + 1,
+ };
+ unsigned int htotal;
+ unsigned int vtotal;
+
+ htotal = min_t(unsigned int, XVTC_MAX_HSIZE,
+ v4l2_ctrl_g_ctrl(xtpg->hblank) + width);
+ vtotal = min_t(unsigned int, XVTC_MAX_VSIZE,
+ v4l2_ctrl_g_ctrl(xtpg->vblank) + height);
+
+ config.hsync_end = htotal - 1;
+ config.hsize = htotal;
+ config.vsync_end = vtotal - 1;
+ config.vsize = vtotal;
+
+ xvtc_generator_start(xtpg->vtc, &config);
+ }
+
+ /*
+ * Configure the bayer phase and video timing mux based on the
+ * operation mode (passthrough or test pattern generation). The test
+ * pattern can be modified by the control set handler, we thus need to
+ * take the control lock here to avoid races.
+ */
+ mutex_lock(xtpg->ctrl_handler.lock);
+
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_MASK, xtpg->pattern->cur.val);
+
+ /*
+ * Switching between passthrough and test pattern generation modes isn't
+ * allowed during streaming, update the control range accordingly.
+ */
+ passthrough = xtpg->pattern->cur.val == 0;
+ __xtpg_update_pattern_control(xtpg, passthrough, !passthrough);
+
+ xtpg->streaming = true;
+
+ mutex_unlock(xtpg->ctrl_handler.lock);
+
+ /*
+ * For TPG v5.0, the bayer phase needs to be off for the pass through
+ * mode, otherwise the external input would be subsampled.
+ */
+ bayer_phase = passthrough ? XTPG_BAYER_PHASE_OFF
+ : xtpg_get_bayer_phase(xtpg->formats[0].code);
+ xvip_write(&xtpg->xvip, XTPG_BAYER_PHASE, bayer_phase);
+
+ if (xtpg->vtmux_gpio)
+ gpiod_set_value_cansleep(xtpg->vtmux_gpio, !passthrough);
+
+ xvip_start(&xtpg->xvip);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xtpg_get_pad_format(struct xtpg_device *xtpg,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xtpg->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xtpg->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xtpg_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+
+ fmt->format = *__xtpg_get_pad_format(xtpg, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xtpg_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ u32 bayer_phase;
+
+ __format = __xtpg_get_pad_format(xtpg, cfg, fmt->pad, fmt->which);
+
+ /* In two pads mode the source pad format is always identical to the
+ * sink pad format.
+ */
+ if (xtpg->npads == 2 && fmt->pad == 1) {
+ fmt->format = *__format;
+ return 0;
+ }
+
+ /* Bayer phase is configurable at runtime */
+ if (xtpg->bayer) {
+ bayer_phase = xtpg_get_bayer_phase(fmt->format.code);
+ if (bayer_phase != XTPG_BAYER_PHASE_OFF)
+ __format->code = fmt->format.code;
+ }
+
+ xvip_set_format_size(__format, fmt);
+
+ fmt->format = *__format;
+
+ /* Propagate the format to the source pad. */
+ if (xtpg->npads == 2) {
+ __format = __xtpg_get_pad_format(xtpg, cfg, 1, fmt->which);
+ *__format = fmt->format;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ /* Min / max values for pad 0 is always fixed in both one and two pads
+ * modes. In two pads mode, the source pad(= 1) size is identical to
+ * the sink pad size */
+ if (fse->pad == 0) {
+ fse->min_width = XVIP_MIN_WIDTH;
+ fse->max_width = XVIP_MAX_WIDTH;
+ fse->min_height = XVIP_MIN_HEIGHT;
+ fse->max_height = XVIP_MAX_HEIGHT;
+ } else {
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static int xtpg_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ *format = xtpg->default_format;
+
+ if (xtpg->npads == 2) {
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, 1);
+ *format = xtpg->default_format;
+ }
+
+ return 0;
+}
+
+static int xtpg_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xtpg_device *xtpg = container_of(ctrl->handler,
+ struct xtpg_device,
+ ctrl_handler);
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_MASK, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_CROSS_HAIRS:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_CROSS_HAIRS, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_MOVING_BOX:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_MOVING_BOX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_COLOR_MASK:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_COLOR_MASK_MASK,
+ ctrl->val <<
+ XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_STUCK_PIXEL:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_STUCK_PIXEL, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_NOISE:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_NOISE, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_MOTION:
+ xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_MOTION, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_MOTION_SPEED:
+ xvip_write(&xtpg->xvip, XTPG_MOTION_SPEED, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_ROW_MASK,
+ ctrl->val << XTPG_CROSS_HAIRS_ROW_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_COLUMN_MASK,
+ ctrl->val << XTPG_CROSS_HAIRS_COLUMN_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_ZPLATE_HOR_START:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_ZPLATE_VER_START:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED:
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ return 0;
+ case V4L2_CID_XILINX_TPG_BOX_SIZE:
+ xvip_write(&xtpg->xvip, XTPG_BOX_SIZE, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_BOX_COLOR:
+ xvip_write(&xtpg->xvip, XTPG_BOX_COLOR, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH:
+ xvip_write(&xtpg->xvip, XTPG_STUCK_PIXEL_THRESH, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_TPG_NOISE_GAIN:
+ xvip_write(&xtpg->xvip, XTPG_NOISE_GAIN, ctrl->val);
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xtpg_ctrl_ops = {
+ .s_ctrl = xtpg_s_ctrl,
+};
+
+static struct v4l2_subdev_core_ops xtpg_core_ops = {
+};
+
+static struct v4l2_subdev_video_ops xtpg_video_ops = {
+ .s_stream = xtpg_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xtpg_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xtpg_enum_frame_size,
+ .get_fmt = xtpg_get_format,
+ .set_fmt = xtpg_set_format,
+};
+
+static struct v4l2_subdev_ops xtpg_ops = {
+ .core = &xtpg_core_ops,
+ .video = &xtpg_video_ops,
+ .pad = &xtpg_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xtpg_internal_ops = {
+ .open = xtpg_open,
+ .close = xtpg_close,
+};
+
+/*
+ * Control Config
+ */
+
+static const char *const xtpg_pattern_strings[] = {
+ "Passthrough",
+ "Horizontal Ramp",
+ "Vertical Ramp",
+ "Temporal Ramp",
+ "Solid Red",
+ "Solid Green",
+ "Solid Blue",
+ "Solid Black",
+ "Solid White",
+ "Color Bars",
+ "Zone Plate",
+ "Tartan Color Bars",
+ "Cross Hatch",
+ "None",
+ "Vertical/Horizontal Ramps",
+ "Black/White Checker Board",
+};
+
+static struct v4l2_ctrl_config xtpg_ctrls[] = {
+ {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_CROSS_HAIRS,
+ .name = "Test Pattern: Cross Hairs",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOVING_BOX,
+ .name = "Test Pattern: Moving Box",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_COLOR_MASK,
+ .name = "Test Pattern: Color Mask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = 0xf,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL,
+ .name = "Test Pattern: Stuck Pixel",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_NOISE,
+ .name = "Test Pattern: Noise",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOTION,
+ .name = "Test Pattern: Motion",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOTION_SPEED,
+ .name = "Test Pattern: Motion Speed",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 8) - 1,
+ .step = 1,
+ .def = 4,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW,
+ .name = "Test Pattern: Cross Hairs Row",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0x64,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN,
+ .name = "Test Pattern: Cross Hairs Column",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0x64,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_ZPLATE_HOR_START,
+ .name = "Test Pattern: Zplate Horizontal Start Pos",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 0x1e,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED,
+ .name = "Test Pattern: Zplate Horizontal Speed",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_ZPLATE_VER_START,
+ .name = "Test Pattern: Zplate Vertical Start Pos",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 1,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED,
+ .name = "Test Pattern: Zplate Vertical Speed",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_BOX_SIZE,
+ .name = "Test Pattern: Box Size",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0x32,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_BOX_COLOR,
+ .name = "Test Pattern: Box Color(RGB)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 24) - 1,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH,
+ .name = "Test Pattern: Stuck Pixel threshold",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_NOISE_GAIN,
+ .name = "Test Pattern: Noise Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 8) - 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xtpg_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+static int __maybe_unused xtpg_pm_suspend(struct device *dev)
+{
+ struct xtpg_device *xtpg = dev_get_drvdata(dev);
+
+ xvip_suspend(&xtpg->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xtpg_pm_resume(struct device *dev)
+{
+ struct xtpg_device *xtpg = dev_get_drvdata(dev);
+
+ xvip_resume(&xtpg->xvip);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xtpg_parse_of(struct xtpg_device *xtpg)
+{
+ struct device *dev = xtpg->xvip.dev;
+ struct device_node *node = xtpg->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int nports = 0;
+ bool has_endpoint = false;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ const struct xvip_video_format *format;
+ struct device_node *endpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ format = xvip_of_get_format(port);
+ if (IS_ERR(format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(format);
+ }
+
+ /* Get and check the format description */
+ if (!xtpg->vip_format) {
+ xtpg->vip_format = format;
+ } else if (xtpg->vip_format != format) {
+ dev_err(dev, "in/out format mismatch in DT");
+ return -EINVAL;
+ }
+
+ if (nports == 0) {
+ endpoint = of_get_next_child(port, NULL);
+ if (endpoint)
+ has_endpoint = true;
+ of_node_put(endpoint);
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ if (nports != 1 && nports != 2) {
+ dev_err(dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+
+ xtpg->npads = nports;
+ if (nports == 2 && has_endpoint)
+ xtpg->has_input = true;
+
+ return 0;
+}
+
+static int xtpg_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xtpg_device *xtpg;
+ u32 i, bayer_phase;
+ int ret;
+
+ xtpg = devm_kzalloc(&pdev->dev, sizeof(*xtpg), GFP_KERNEL);
+ if (!xtpg)
+ return -ENOMEM;
+
+ xtpg->xvip.dev = &pdev->dev;
+
+ ret = xtpg_parse_of(xtpg);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xtpg->xvip);
+ if (ret < 0)
+ return ret;
+
+ xtpg->vtmux_gpio = devm_gpiod_get_optional(&pdev->dev, "timing",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xtpg->vtmux_gpio)) {
+ ret = PTR_ERR(xtpg->vtmux_gpio);
+ goto error_resource;
+ }
+
+ xtpg->vtc = xvtc_of_get(pdev->dev.of_node);
+ if (IS_ERR(xtpg->vtc)) {
+ ret = PTR_ERR(xtpg->vtc);
+ goto error_resource;
+ }
+
+ /* Reset and initialize the core */
+ xvip_reset(&xtpg->xvip);
+
+ /* Initialize V4L2 subdevice and media entity. Pad numbers depend on the
+ * number of pads.
+ */
+ if (xtpg->npads == 2) {
+ xtpg->pads[0].flags = MEDIA_PAD_FL_SINK;
+ xtpg->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ } else {
+ xtpg->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ }
+
+ /* Initialize the default format */
+ xtpg->default_format.code = xtpg->vip_format->code;
+ xtpg->default_format.field = V4L2_FIELD_NONE;
+ xtpg->default_format.colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xtpg->xvip, &xtpg->default_format);
+
+ bayer_phase = xtpg_get_bayer_phase(xtpg->vip_format->code);
+ if (bayer_phase != XTPG_BAYER_PHASE_OFF)
+ xtpg->bayer = true;
+
+ xtpg->formats[0] = xtpg->default_format;
+ if (xtpg->npads == 2)
+ xtpg->formats[1] = xtpg->default_format;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xtpg->xvip.subdev;
+ v4l2_subdev_init(subdev, &xtpg_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xtpg_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xtpg);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.ops = &xtpg_media_ops;
+
+ ret = media_entity_init(&subdev->entity, xtpg->npads, xtpg->pads, 0);
+ if (ret < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 3 + ARRAY_SIZE(xtpg_ctrls));
+
+ xtpg->vblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
+ V4L2_CID_VBLANK, XTPG_MIN_VBLANK,
+ XTPG_MAX_VBLANK, 1, 100);
+ xtpg->hblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
+ V4L2_CID_HBLANK, XTPG_MIN_HBLANK,
+ XTPG_MAX_HBLANK, 1, 100);
+ xtpg->pattern = v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
+ &xtpg_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(xtpg_pattern_strings) - 1,
+ 1, 9, xtpg_pattern_strings);
+
+ for (i = 0; i < ARRAY_SIZE(xtpg_ctrls); i++)
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler, &xtpg_ctrls[i], NULL);
+
+ if (xtpg->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xtpg->ctrl_handler.error;
+ goto error;
+ }
+ subdev->ctrl_handler = &xtpg->ctrl_handler;
+
+ xtpg_update_pattern_control(xtpg, true, true);
+
+ ret = v4l2_ctrl_handler_setup(&xtpg->ctrl_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, xtpg);
+
+ xvip_print_version(&xtpg->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xtpg->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvtc_put(xtpg->vtc);
+error_resource:
+ xvip_cleanup_resources(&xtpg->xvip);
+ return ret;
+}
+
+static int xtpg_remove(struct platform_device *pdev)
+{
+ struct xtpg_device *xtpg = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xtpg->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xtpg->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xtpg->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xtpg_pm_ops, xtpg_pm_suspend, xtpg_pm_resume);
+
+static const struct of_device_id xtpg_of_id_table[] = {
+ { .compatible = "xlnx,v-tpg-5.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xtpg_of_id_table);
+
+static struct platform_driver xtpg_driver = {
+ .driver = {
+ .name = "xilinx-tpg",
+ .pm = &xtpg_pm_ops,
+ .of_match_table = xtpg_of_id_table,
+ },
+ .probe = xtpg_probe,
+ .remove = xtpg_remove,
+};
+
+module_platform_driver(xtpg_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Test Pattern Generator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
new file mode 100644
index 000000000000..311259129504
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -0,0 +1,323 @@
+/*
+ * Xilinx Video IP Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/media/xilinx-vip.h>
+
+#include "xilinx-vip.h"
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static const struct xvip_video_format xvip_video_formats[] = {
+ { XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 2, V4L2_PIX_FMT_YUYV, "4:2:2, packed, YUYV" },
+ { XVIP_VF_YUV_444, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
+ 3, V4L2_PIX_FMT_YUV444, "4:4:4, packed, YUYV" },
+ { XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 3, 0, NULL },
+ { XVIP_VF_MONO_SENSOR, 8, "mono", MEDIA_BUS_FMT_Y8_1X8,
+ 1, V4L2_PIX_FMT_GREY, "Greyscale 8-bit" },
+ { XVIP_VF_MONO_SENSOR, 8, "rggb", MEDIA_BUS_FMT_SRGGB8_1X8,
+ 1, V4L2_PIX_FMT_SGRBG8, "Bayer 8-bit RGGB" },
+ { XVIP_VF_MONO_SENSOR, 8, "grbg", MEDIA_BUS_FMT_SGRBG8_1X8,
+ 1, V4L2_PIX_FMT_SGRBG8, "Bayer 8-bit GRBG" },
+ { XVIP_VF_MONO_SENSOR, 8, "gbrg", MEDIA_BUS_FMT_SGBRG8_1X8,
+ 1, V4L2_PIX_FMT_SGBRG8, "Bayer 8-bit GBRG" },
+ { XVIP_VF_MONO_SENSOR, 8, "bggr", MEDIA_BUS_FMT_SBGGR8_1X8,
+ 1, V4L2_PIX_FMT_SBGGR8, "Bayer 8-bit BGGR" },
+};
+
+/**
+ * xvip_get_format_by_code - Retrieve format information for a media bus code
+ * @code: the format media bus code
+ *
+ * Return: a pointer to the format information structure corresponding to the
+ * given V4L2 media bus format @code, or ERR_PTR if no corresponding format can
+ * be found.
+ */
+const struct xvip_video_format *xvip_get_format_by_code(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xvip_video_formats); ++i) {
+ const struct xvip_video_format *format = &xvip_video_formats[i];
+
+ if (format->code == code)
+ return format;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(xvip_get_format_by_code);
+
+/**
+ * xvip_get_format_by_fourcc - Retrieve format information for a 4CC
+ * @fourcc: the format 4CC
+ *
+ * Return: a pointer to the format information structure corresponding to the
+ * given V4L2 format @fourcc, or ERR_PTR if no corresponding format can be
+ * found.
+ */
+const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xvip_video_formats); ++i) {
+ const struct xvip_video_format *format = &xvip_video_formats[i];
+
+ if (format->fourcc == fourcc)
+ return format;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(xvip_get_format_by_fourcc);
+
+/**
+ * xvip_of_get_format - Parse a device tree node and return format information
+ * @node: the device tree node
+ *
+ * Read the xlnx,video-format, xlnx,video-width and xlnx,cfa-pattern properties
+ * from the device tree @node passed as an argument and return the corresponding
+ * format information.
+ *
+ * Return: a pointer to the format information structure corresponding to the
+ * format name and width, or ERR_PTR if no corresponding format can be found.
+ */
+const struct xvip_video_format *xvip_of_get_format(struct device_node *node)
+{
+ const char *pattern = "mono";
+ unsigned int vf_code;
+ unsigned int i;
+ u32 width;
+ int ret;
+
+ ret = of_property_read_u32(node, "xlnx,video-format", &vf_code);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ ret = of_property_read_u32(node, "xlnx,video-width", &width);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (vf_code == XVIP_VF_MONO_SENSOR)
+ of_property_read_string(node, "xlnx,cfa-pattern", &pattern);
+
+ for (i = 0; i < ARRAY_SIZE(xvip_video_formats); ++i) {
+ const struct xvip_video_format *format = &xvip_video_formats[i];
+
+ if (format->vf_code != vf_code || format->width != width)
+ continue;
+
+ if (vf_code == XVIP_VF_MONO_SENSOR &&
+ strcmp(pattern, format->pattern))
+ continue;
+
+ return format;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(xvip_of_get_format);
+
+/**
+ * xvip_set_format_size - Set the media bus frame format size
+ * @format: V4L2 frame format on media bus
+ * @fmt: media bus format
+ *
+ * Set the media bus frame format size. The width / height from the subdevice
+ * format are set to the given media bus format. The new format size is stored
+ * in @format. The width and height are clamped using default min / max values.
+ */
+void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
+ const struct v4l2_subdev_format *fmt)
+{
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XVIP_MIN_WIDTH, XVIP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XVIP_MIN_HEIGHT, XVIP_MAX_HEIGHT);
+}
+EXPORT_SYMBOL_GPL(xvip_set_format_size);
+
+/**
+ * xvip_clr_or_set - Clear or set the register with a bitmask
+ * @xvip: Xilinx Video IP device
+ * @addr: address of register
+ * @mask: bitmask to be set or cleared
+ * @set: boolean flag indicating whether to set or clear
+ *
+ * Clear or set the register at address @addr with a bitmask @mask depending on
+ * the boolean flag @set. When the flag @set is true, the bitmask is set in
+ * the register, otherwise the bitmask is cleared from the register
+ * when the flag @set is false.
+ *
+ * Fox eample, this function can be used to set a control with a boolean value
+ * requested by users. If the caller knows whether to set or clear in the first
+ * place, the caller should call xvip_clr() or xvip_set() directly instead of
+ * using this function.
+ */
+void xvip_clr_or_set(struct xvip_device *xvip, u32 addr, u32 mask, bool set)
+{
+ u32 reg;
+
+ reg = xvip_read(xvip, addr);
+ reg = set ? reg | mask : reg & ~mask;
+ xvip_write(xvip, addr, reg);
+}
+EXPORT_SYMBOL_GPL(xvip_clr_or_set);
+
+/**
+ * xvip_clr_and_set - Clear and set the register with a bitmask
+ * @xvip: Xilinx Video IP device
+ * @addr: address of register
+ * @clr: bitmask to be cleared
+ * @set: bitmask to be set
+ *
+ * Clear a bit(s) of mask @clr in the register at address @addr, then set
+ * a bit(s) of mask @set in the register after.
+ */
+void xvip_clr_and_set(struct xvip_device *xvip, u32 addr, u32 clr, u32 set)
+{
+ u32 reg;
+
+ reg = xvip_read(xvip, addr);
+ reg &= ~clr;
+ reg |= set;
+ xvip_write(xvip, addr, reg);
+}
+EXPORT_SYMBOL_GPL(xvip_clr_and_set);
+
+int xvip_init_resources(struct xvip_device *xvip)
+{
+ struct platform_device *pdev = to_platform_device(xvip->dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xvip->iomem = devm_ioremap_resource(xvip->dev, res);
+ if (IS_ERR(xvip->iomem))
+ return PTR_ERR(xvip->iomem);
+
+ xvip->clk = devm_clk_get(xvip->dev, NULL);
+ if (IS_ERR(xvip->clk))
+ return PTR_ERR(xvip->clk);
+
+ clk_prepare_enable(xvip->clk);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvip_init_resources);
+
+void xvip_cleanup_resources(struct xvip_device *xvip)
+{
+ clk_disable_unprepare(xvip->clk);
+}
+EXPORT_SYMBOL_GPL(xvip_cleanup_resources);
+
+/* -----------------------------------------------------------------------------
+ * Subdev operations handlers
+ */
+
+/**
+ * xvip_enum_mbus_code - Enumerate the media format code
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: returning media bus code
+ *
+ * Enumerate the media bus code of the subdevice. Return the corresponding
+ * pad format code. This function only works for subdevices with fixed format
+ * on all pads. Subdevices with multiple format should have their own
+ * function to enumerate mbus codes.
+ *
+ * Return: 0 if the media bus code is found, or -EINVAL if the format index
+ * is not valid.
+ */
+int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ /* Enumerating frame sizes based on the active configuration isn't
+ * supported yet.
+ */
+ if (code->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, code->pad);
+
+ code->code = format->code;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvip_enum_mbus_code);
+
+/**
+ * xvip_enum_frame_size - Enumerate the media bus frame size
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: returning media bus frame size
+ *
+ * This function is a drop-in implementation of the subdev enum_frame_size pad
+ * operation. It assumes that the subdevice has one sink pad and one source
+ * pad, and that the format on the source pad is always identical to the
+ * format on the sink pad. Entities with different requirements need to
+ * implement their own enum_frame_size handlers.
+ *
+ * Return: 0 if the media bus frame size is found, or -EINVAL
+ * if the index or the code is not valid.
+ */
+int xvip_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ /* Enumerating frame sizes based on the active configuration isn't
+ * supported yet.
+ */
+ if (fse->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == XVIP_PAD_SINK) {
+ fse->min_width = XVIP_MIN_WIDTH;
+ fse->max_width = XVIP_MAX_WIDTH;
+ fse->min_height = XVIP_MIN_HEIGHT;
+ fse->max_height = XVIP_MAX_HEIGHT;
+ } else {
+ /* The size on the source pad is fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvip_enum_frame_size);
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
new file mode 100644
index 000000000000..42fee2026815
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -0,0 +1,238 @@
+/*
+ * Xilinx Video IP Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_VIP_H__
+#define __XILINX_VIP_H__
+
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+
+struct clk;
+
+/*
+ * Minimum and maximum width and height common to most video IP cores. IP
+ * cores with different requirements must define their own values.
+ */
+#define XVIP_MIN_WIDTH 32
+#define XVIP_MAX_WIDTH 7680
+#define XVIP_MIN_HEIGHT 32
+#define XVIP_MAX_HEIGHT 7680
+
+/*
+ * Pad IDs. IP cores with with multiple inputs or outputs should define
+ * their own values.
+ */
+#define XVIP_PAD_SINK 0
+#define XVIP_PAD_SOURCE 1
+
+/* Xilinx Video IP Control Registers */
+#define XVIP_CTRL_CONTROL 0x0000
+#define XVIP_CTRL_CONTROL_SW_ENABLE (1 << 0)
+#define XVIP_CTRL_CONTROL_REG_UPDATE (1 << 1)
+#define XVIP_CTRL_CONTROL_BYPASS (1 << 4)
+#define XVIP_CTRL_CONTROL_TEST_PATTERN (1 << 5)
+#define XVIP_CTRL_CONTROL_FRAME_SYNC_RESET (1 << 30)
+#define XVIP_CTRL_CONTROL_SW_RESET (1 << 31)
+#define XVIP_CTRL_STATUS 0x0004
+#define XVIP_CTRL_STATUS_PROC_STARTED (1 << 0)
+#define XVIP_CTRL_STATUS_EOF (1 << 1)
+#define XVIP_CTRL_ERROR 0x0008
+#define XVIP_CTRL_ERROR_SLAVE_EOL_EARLY (1 << 0)
+#define XVIP_CTRL_ERROR_SLAVE_EOL_LATE (1 << 1)
+#define XVIP_CTRL_ERROR_SLAVE_SOF_EARLY (1 << 2)
+#define XVIP_CTRL_ERROR_SLAVE_SOF_LATE (1 << 3)
+#define XVIP_CTRL_IRQ_ENABLE 0x000c
+#define XVIP_CTRL_IRQ_ENABLE_PROC_STARTED (1 << 0)
+#define XVIP_CTRL_IRQ_EOF (1 << 1)
+#define XVIP_CTRL_VERSION 0x0010
+#define XVIP_CTRL_VERSION_MAJOR_MASK (0xff << 24)
+#define XVIP_CTRL_VERSION_MAJOR_SHIFT 24
+#define XVIP_CTRL_VERSION_MINOR_MASK (0xff << 16)
+#define XVIP_CTRL_VERSION_MINOR_SHIFT 16
+#define XVIP_CTRL_VERSION_REVISION_MASK (0xf << 12)
+#define XVIP_CTRL_VERSION_REVISION_SHIFT 12
+#define XVIP_CTRL_VERSION_PATCH_MASK (0xf << 8)
+#define XVIP_CTRL_VERSION_PATCH_SHIFT 8
+#define XVIP_CTRL_VERSION_INTERNAL_MASK (0xff << 0)
+#define XVIP_CTRL_VERSION_INTERNAL_SHIFT 0
+
+/* Xilinx Video IP Timing Registers */
+#define XVIP_ACTIVE_SIZE 0x0020
+#define XVIP_ACTIVE_VSIZE_MASK (0x7ff << 16)
+#define XVIP_ACTIVE_VSIZE_SHIFT 16
+#define XVIP_ACTIVE_HSIZE_MASK (0x7ff << 0)
+#define XVIP_ACTIVE_HSIZE_SHIFT 0
+#define XVIP_ENCODING 0x0028
+#define XVIP_ENCODING_NBITS_8 (0 << 4)
+#define XVIP_ENCODING_NBITS_10 (1 << 4)
+#define XVIP_ENCODING_NBITS_12 (2 << 4)
+#define XVIP_ENCODING_NBITS_16 (3 << 4)
+#define XVIP_ENCODING_NBITS_MASK (3 << 4)
+#define XVIP_ENCODING_NBITS_SHIFT 4
+#define XVIP_ENCODING_VIDEO_FORMAT_YUV422 (0 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_YUV444 (1 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_RGB (2 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_YUV420 (3 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_MASK (3 << 0)
+#define XVIP_ENCODING_VIDEO_FORMAT_SHIFT 0
+
+/**
+ * struct xvip_device - Xilinx Video IP device structure
+ * @subdev: V4L2 subdevice
+ * @dev: (OF) device
+ * @iomem: device I/O register space remapped to kernel virtual memory
+ * @clk: video core clock
+ * @saved_ctrl: saved control register for resume / suspend
+ */
+struct xvip_device {
+ struct v4l2_subdev subdev;
+ struct device *dev;
+ void __iomem *iomem;
+ struct clk *clk;
+ u32 saved_ctrl;
+};
+
+/**
+ * struct xvip_video_format - Xilinx Video IP video format description
+ * @vf_code: AXI4 video format code
+ * @width: AXI4 format width in bits per component
+ * @pattern: CFA pattern for Mono/Sensor formats
+ * @code: media bus format code
+ * @bpp: bytes per pixel (when stored in memory)
+ * @fourcc: V4L2 pixel format FCC identifier
+ * @description: format description, suitable for userspace
+ */
+struct xvip_video_format {
+ unsigned int vf_code;
+ unsigned int width;
+ const char *pattern;
+ unsigned int code;
+ unsigned int bpp;
+ u32 fourcc;
+ const char *description;
+};
+
+const struct xvip_video_format *xvip_get_format_by_code(unsigned int code);
+const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc);
+const struct xvip_video_format *xvip_of_get_format(struct device_node *node);
+void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
+ const struct v4l2_subdev_format *fmt);
+int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code);
+int xvip_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse);
+
+static inline u32 xvip_read(struct xvip_device *xvip, u32 addr)
+{
+ return ioread32(xvip->iomem + addr);
+}
+
+static inline void xvip_write(struct xvip_device *xvip, u32 addr, u32 value)
+{
+ iowrite32(value, xvip->iomem + addr);
+}
+
+static inline void xvip_clr(struct xvip_device *xvip, u32 addr, u32 clr)
+{
+ xvip_write(xvip, addr, xvip_read(xvip, addr) & ~clr);
+}
+
+static inline void xvip_set(struct xvip_device *xvip, u32 addr, u32 set)
+{
+ xvip_write(xvip, addr, xvip_read(xvip, addr) | set);
+}
+
+void xvip_clr_or_set(struct xvip_device *xvip, u32 addr, u32 mask, bool set);
+void xvip_clr_and_set(struct xvip_device *xvip, u32 addr, u32 clr, u32 set);
+
+int xvip_init_resources(struct xvip_device *xvip);
+void xvip_cleanup_resources(struct xvip_device *xvip);
+
+static inline void xvip_reset(struct xvip_device *xvip)
+{
+ xvip_write(xvip, XVIP_CTRL_CONTROL, XVIP_CTRL_CONTROL_SW_RESET);
+}
+
+static inline void xvip_start(struct xvip_device *xvip)
+{
+ xvip_set(xvip, XVIP_CTRL_CONTROL,
+ XVIP_CTRL_CONTROL_SW_ENABLE | XVIP_CTRL_CONTROL_REG_UPDATE);
+}
+
+static inline void xvip_stop(struct xvip_device *xvip)
+{
+ xvip_clr(xvip, XVIP_CTRL_CONTROL, XVIP_CTRL_CONTROL_SW_ENABLE);
+}
+
+static inline void xvip_resume(struct xvip_device *xvip)
+{
+ xvip_write(xvip, XVIP_CTRL_CONTROL,
+ xvip->saved_ctrl | XVIP_CTRL_CONTROL_SW_ENABLE);
+}
+
+static inline void xvip_suspend(struct xvip_device *xvip)
+{
+ xvip->saved_ctrl = xvip_read(xvip, XVIP_CTRL_CONTROL);
+ xvip_write(xvip, XVIP_CTRL_CONTROL,
+ xvip->saved_ctrl & ~XVIP_CTRL_CONTROL_SW_ENABLE);
+}
+
+static inline void xvip_set_frame_size(struct xvip_device *xvip,
+ const struct v4l2_mbus_framefmt *format)
+{
+ xvip_write(xvip, XVIP_ACTIVE_SIZE,
+ (format->height << XVIP_ACTIVE_VSIZE_SHIFT) |
+ (format->width << XVIP_ACTIVE_HSIZE_SHIFT));
+}
+
+static inline void xvip_get_frame_size(struct xvip_device *xvip,
+ struct v4l2_mbus_framefmt *format)
+{
+ u32 reg;
+
+ reg = xvip_read(xvip, XVIP_ACTIVE_SIZE);
+ format->width = (reg & XVIP_ACTIVE_HSIZE_MASK) >>
+ XVIP_ACTIVE_HSIZE_SHIFT;
+ format->height = (reg & XVIP_ACTIVE_VSIZE_MASK) >>
+ XVIP_ACTIVE_VSIZE_SHIFT;
+}
+
+static inline void xvip_enable_reg_update(struct xvip_device *xvip)
+{
+ xvip_set(xvip, XVIP_CTRL_CONTROL, XVIP_CTRL_CONTROL_REG_UPDATE);
+}
+
+static inline void xvip_disable_reg_update(struct xvip_device *xvip)
+{
+ xvip_clr(xvip, XVIP_CTRL_CONTROL, XVIP_CTRL_CONTROL_REG_UPDATE);
+}
+
+static inline void xvip_print_version(struct xvip_device *xvip)
+{
+ u32 version;
+
+ version = xvip_read(xvip, XVIP_CTRL_VERSION);
+
+ dev_info(xvip->dev, "device found, version %u.%02x%x\n",
+ ((version & XVIP_CTRL_VERSION_MAJOR_MASK) >>
+ XVIP_CTRL_VERSION_MAJOR_SHIFT),
+ ((version & XVIP_CTRL_VERSION_MINOR_MASK) >>
+ XVIP_CTRL_VERSION_MINOR_SHIFT),
+ ((version & XVIP_CTRL_VERSION_REVISION_MASK) >>
+ XVIP_CTRL_VERSION_REVISION_SHIFT));
+}
+
+#endif /* __XILINX_VIP_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
new file mode 100644
index 000000000000..7b7cb9c28d2c
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -0,0 +1,669 @@
+/*
+ * Xilinx Video IP Composite Device
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-of.h>
+
+#include "xilinx-dma.h"
+#include "xilinx-vipp.h"
+
+#define XVIPP_DMA_S2MM 0
+#define XVIPP_DMA_MM2S 1
+
+/**
+ * struct xvip_graph_entity - Entity in the video graph
+ * @list: list entry in a graph entities list
+ * @node: the entity's DT node
+ * @entity: media entity, from the corresponding V4L2 subdev
+ * @asd: subdev asynchronous registration information
+ * @subdev: V4L2 subdev
+ */
+struct xvip_graph_entity {
+ struct list_head list;
+ struct device_node *node;
+ struct media_entity *entity;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+};
+
+/* -----------------------------------------------------------------------------
+ * Graph Management
+ */
+
+static struct xvip_graph_entity *
+xvip_graph_find_entity(struct xvip_composite_device *xdev,
+ const struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node == node)
+ return entity;
+ }
+
+ return NULL;
+}
+
+static int xvip_graph_build_one(struct xvip_composite_device *xdev,
+ struct xvip_graph_entity *entity)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct media_entity *local = entity->entity;
+ struct media_entity *remote;
+ struct media_pad *local_pad;
+ struct media_pad *remote_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_of_link link;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for entity %s\n", local->name);
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ next = of_graph_get_next_endpoint(entity->node, ep);
+ if (next == NULL)
+ break;
+
+ of_node_put(ep);
+ ep = next;
+
+ dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name);
+
+ ret = v4l2_of_parse_link(ep, &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %s\n",
+ ep->full_name);
+ continue;
+ }
+
+ /* Skip sink ports, they will be processed from the other end of
+ * the link.
+ */
+ if (link.local_port >= local->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %s\n",
+ link.local_port, link.local_node->full_name);
+ v4l2_of_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ local_pad = &local->pads[link.local_port];
+
+ if (local_pad->flags & MEDIA_PAD_FL_SINK) {
+ dev_dbg(xdev->dev, "skipping sink port %s:%u\n",
+ link.local_node->full_name, link.local_port);
+ v4l2_of_put_link(&link);
+ continue;
+ }
+
+ /* Skip DMA engines, they will be processed separately. */
+ if (link.remote_node == xdev->dev->of_node) {
+ dev_dbg(xdev->dev, "skipping DMA port %s:%u\n",
+ link.local_node->full_name, link.local_port);
+ v4l2_of_put_link(&link);
+ continue;
+ }
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev, link.remote_node);
+ if (ent == NULL) {
+ dev_err(xdev->dev, "no entity found for %s\n",
+ link.remote_node->full_name);
+ v4l2_of_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+
+ remote = ent->entity;
+
+ if (link.remote_port >= remote->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %s\n",
+ link.remote_port, link.remote_node->full_name);
+ v4l2_of_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ remote_pad = &remote->pads[link.remote_port];
+
+ v4l2_of_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+
+ ret = media_entity_create_link(local, local_pad->index,
+ remote, remote_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+ break;
+ }
+ }
+
+ of_node_put(ep);
+ return ret;
+}
+
+static struct xvip_dma *
+xvip_graph_find_dma(struct xvip_composite_device *xdev, unsigned int port)
+{
+ struct xvip_dma *dma;
+
+ list_for_each_entry(dma, &xdev->dmas, list) {
+ if (dma->port == port)
+ return dma;
+ }
+
+ return NULL;
+}
+
+static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct device_node *node = xdev->dev->of_node;
+ struct media_entity *source;
+ struct media_entity *sink;
+ struct media_pad *source_pad;
+ struct media_pad *sink_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_of_link link;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ struct xvip_dma *dma;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for DMA engines\n");
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ next = of_graph_get_next_endpoint(node, ep);
+ if (next == NULL)
+ break;
+
+ of_node_put(ep);
+ ep = next;
+
+ dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name);
+
+ ret = v4l2_of_parse_link(ep, &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %s\n",
+ ep->full_name);
+ continue;
+ }
+
+ /* Find the DMA engine. */
+ dma = xvip_graph_find_dma(xdev, link.local_port);
+ if (dma == NULL) {
+ dev_err(xdev->dev, "no DMA engine found for port %u\n",
+ link.local_port);
+ v4l2_of_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev_dbg(xdev->dev, "creating link for DMA engine %s\n",
+ dma->video.name);
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev, link.remote_node);
+ if (ent == NULL) {
+ dev_err(xdev->dev, "no entity found for %s\n",
+ link.remote_node->full_name);
+ v4l2_of_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+
+ if (link.remote_port >= ent->entity->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %s\n",
+ link.remote_port, link.remote_node->full_name);
+ v4l2_of_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (dma->pad.flags & MEDIA_PAD_FL_SOURCE) {
+ source = &dma->video.entity;
+ source_pad = &dma->pad;
+ sink = ent->entity;
+ sink_pad = &sink->pads[link.remote_port];
+ } else {
+ source = ent->entity;
+ source_pad = &source->pads[link.remote_port];
+ sink = &dma->video.entity;
+ sink_pad = &dma->pad;
+ }
+
+ v4l2_of_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+
+ ret = media_entity_create_link(source, source_pad->index,
+ sink, sink_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+ break;
+ }
+ }
+
+ of_node_put(ep);
+ return ret;
+}
+
+static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct xvip_composite_device *xdev =
+ container_of(notifier, struct xvip_composite_device, notifier);
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ dev_dbg(xdev->dev, "notify complete, all subdevs registered\n");
+
+ /* Create links for every entity. */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_build_one(xdev, entity);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Create links for DMA channels. */
+ ret = xvip_graph_build_dma(xdev);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&xdev->v4l2_dev);
+ if (ret < 0)
+ dev_err(xdev->dev, "failed to register subdev nodes\n");
+
+ return ret;
+}
+
+static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct xvip_composite_device *xdev =
+ container_of(notifier, struct xvip_composite_device, notifier);
+ struct xvip_graph_entity *entity;
+
+ /* Locate the entity corresponding to the bound subdev and store the
+ * subdev pointer.
+ */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node != subdev->dev->of_node)
+ continue;
+
+ if (entity->subdev) {
+ dev_err(xdev->dev, "duplicate subdev for node %s\n",
+ entity->node->full_name);
+ return -EINVAL;
+ }
+
+ dev_dbg(xdev->dev, "subdev %s bound\n", subdev->name);
+ entity->entity = &subdev->entity;
+ entity->subdev = subdev;
+ return 0;
+ }
+
+ dev_err(xdev->dev, "no entity for subdev %s\n", subdev->name);
+ return -EINVAL;
+}
+
+static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
+ struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+ struct device_node *remote;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "parsing node %s\n", node->full_name);
+
+ while (1) {
+ next = of_graph_get_next_endpoint(node, ep);
+ if (next == NULL)
+ break;
+
+ of_node_put(ep);
+ ep = next;
+
+ dev_dbg(xdev->dev, "handling endpoint %s\n", ep->full_name);
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (remote == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Skip entities that we have already processed. */
+ if (remote == xdev->dev->of_node ||
+ xvip_graph_find_entity(xdev, remote)) {
+ of_node_put(remote);
+ continue;
+ }
+
+ entity = devm_kzalloc(xdev->dev, sizeof(*entity), GFP_KERNEL);
+ if (entity == NULL) {
+ of_node_put(remote);
+ ret = -ENOMEM;
+ break;
+ }
+
+ entity->node = remote;
+ entity->asd.match_type = V4L2_ASYNC_MATCH_OF;
+ entity->asd.match.of.node = remote;
+ list_add_tail(&entity->list, &xdev->entities);
+ xdev->num_subdevs++;
+ }
+
+ of_node_put(ep);
+ return ret;
+}
+
+static int xvip_graph_parse(struct xvip_composite_device *xdev)
+{
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ /*
+ * Walk the links to parse the full graph. Start by parsing the
+ * composite node and then parse entities in turn. The list_for_each
+ * loop will handle entities added at the end of the list while walking
+ * the links.
+ */
+ ret = xvip_graph_parse_one(xdev, xdev->dev->of_node);
+ if (ret < 0)
+ return 0;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_parse_one(xdev, entity->node);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev,
+ struct device_node *node)
+{
+ struct xvip_dma *dma;
+ enum v4l2_buf_type type;
+ const char *direction;
+ unsigned int index;
+ int ret;
+
+ ret = of_property_read_string(node, "direction", &direction);
+ if (ret < 0)
+ return ret;
+
+ if (strcmp(direction, "input") == 0)
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (strcmp(direction, "output") == 0)
+ type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ else
+ return -EINVAL;
+
+ of_property_read_u32(node, "reg", &index);
+
+ dma = devm_kzalloc(xdev->dev, sizeof(*dma), GFP_KERNEL);
+ if (dma == NULL)
+ return -ENOMEM;
+
+ ret = xvip_dma_init(xdev, dma, type, index);
+ if (ret < 0) {
+ dev_err(xdev->dev, "%s initialization failed\n",
+ node->full_name);
+ return ret;
+ }
+
+ list_add_tail(&dma->list, &xdev->dmas);
+
+ xdev->v4l2_caps |= type == V4L2_BUF_TYPE_VIDEO_CAPTURE
+ ? V4L2_CAP_VIDEO_CAPTURE : V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
+{
+ struct device_node *ports;
+ struct device_node *port;
+ int ret;
+
+ ports = of_get_child_by_name(xdev->dev->of_node, "ports");
+ if (ports == NULL) {
+ dev_err(xdev->dev, "ports node not present\n");
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(ports, port) {
+ ret = xvip_graph_dma_init_one(xdev, port);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void xvip_graph_cleanup(struct xvip_composite_device *xdev)
+{
+ struct xvip_graph_entity *entityp;
+ struct xvip_graph_entity *entity;
+ struct xvip_dma *dmap;
+ struct xvip_dma *dma;
+
+ v4l2_async_notifier_unregister(&xdev->notifier);
+
+ list_for_each_entry_safe(entity, entityp, &xdev->entities, list) {
+ of_node_put(entity->node);
+ list_del(&entity->list);
+ }
+
+ list_for_each_entry_safe(dma, dmap, &xdev->dmas, list) {
+ xvip_dma_cleanup(dma);
+ list_del(&dma->list);
+ }
+}
+
+static int xvip_graph_init(struct xvip_composite_device *xdev)
+{
+ struct xvip_graph_entity *entity;
+ struct v4l2_async_subdev **subdevs = NULL;
+ unsigned int num_subdevs;
+ unsigned int i;
+ int ret;
+
+ /* Init the DMA channels. */
+ ret = xvip_graph_dma_init(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "DMA initialization failed\n");
+ goto done;
+ }
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = xvip_graph_parse(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "graph parsing failed\n");
+ goto done;
+ }
+
+ if (!xdev->num_subdevs) {
+ dev_err(xdev->dev, "no subdev found in graph\n");
+ goto done;
+ }
+
+ /* Register the subdevices notifier. */
+ num_subdevs = xdev->num_subdevs;
+ subdevs = devm_kzalloc(xdev->dev, sizeof(*subdevs) * num_subdevs,
+ GFP_KERNEL);
+ if (subdevs == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ i = 0;
+ list_for_each_entry(entity, &xdev->entities, list)
+ subdevs[i++] = &entity->asd;
+
+ xdev->notifier.subdevs = subdevs;
+ xdev->notifier.num_subdevs = num_subdevs;
+ xdev->notifier.bound = xvip_graph_notify_bound;
+ xdev->notifier.complete = xvip_graph_notify_complete;
+
+ ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
+ if (ret < 0) {
+ dev_err(xdev->dev, "notifier registration failed\n");
+ goto done;
+ }
+
+ ret = 0;
+
+done:
+ if (ret < 0)
+ xvip_graph_cleanup(xdev);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Controller and V4L2
+ */
+
+static void xvip_composite_v4l2_cleanup(struct xvip_composite_device *xdev)
+{
+ v4l2_device_unregister(&xdev->v4l2_dev);
+ media_device_unregister(&xdev->media_dev);
+}
+
+static int xvip_composite_v4l2_init(struct xvip_composite_device *xdev)
+{
+ int ret;
+
+ xdev->media_dev.dev = xdev->dev;
+ strlcpy(xdev->media_dev.model, "Xilinx Video Composite Device",
+ sizeof(xdev->media_dev.model));
+ xdev->media_dev.hw_revision = 0;
+
+ ret = media_device_register(&xdev->media_dev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "media device registration failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ xdev->v4l2_dev.mdev = &xdev->media_dev;
+ ret = v4l2_device_register(xdev->dev, &xdev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "V4L2 device registration failed (%d)\n",
+ ret);
+ media_device_unregister(&xdev->media_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xvip_composite_probe(struct platform_device *pdev)
+{
+ struct xvip_composite_device *xdev;
+ int ret;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&xdev->entities);
+ INIT_LIST_HEAD(&xdev->dmas);
+
+ ret = xvip_composite_v4l2_init(xdev);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_graph_init(xdev);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xdev);
+
+ dev_info(xdev->dev, "device registered\n");
+
+ return 0;
+
+error:
+ xvip_composite_v4l2_cleanup(xdev);
+ return ret;
+}
+
+static int xvip_composite_remove(struct platform_device *pdev)
+{
+ struct xvip_composite_device *xdev = platform_get_drvdata(pdev);
+
+ xvip_graph_cleanup(xdev);
+ xvip_composite_v4l2_cleanup(xdev);
+
+ return 0;
+}
+
+static const struct of_device_id xvip_composite_of_id_table[] = {
+ { .compatible = "xlnx,video" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvip_composite_of_id_table);
+
+static struct platform_driver xvip_composite_driver = {
+ .driver = {
+ .name = "xilinx-video",
+ .of_match_table = xvip_composite_of_id_table,
+ },
+ .probe = xvip_composite_probe,
+ .remove = xvip_composite_remove,
+};
+
+module_platform_driver(xvip_composite_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video IP Composite Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
new file mode 100644
index 000000000000..faf6b6e80b3b
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -0,0 +1,49 @@
+/*
+ * Xilinx Video IP Composite Device
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_VIPP_H__
+#define __XILINX_VIPP_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+/**
+ * struct xvip_composite_device - Xilinx Video IP device structure
+ * @v4l2_dev: V4L2 device
+ * @media_dev: media device
+ * @dev: (OF) device
+ * @notifier: V4L2 asynchronous subdevs notifier
+ * @entities: entities in the graph as a list of xvip_graph_entity
+ * @num_subdevs: number of subdevs in the pipeline
+ * @dmas: list of DMA channels at the pipeline output and input
+ * @v4l2_caps: V4L2 capabilities of the whole device (see VIDIOC_QUERYCAP)
+ */
+struct xvip_composite_device {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct device *dev;
+
+ struct v4l2_async_notifier notifier;
+ struct list_head entities;
+ unsigned int num_subdevs;
+
+ struct list_head dmas;
+ u32 v4l2_caps;
+};
+
+#endif /* __XILINX_VIPP_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.c b/drivers/media/platform/xilinx/xilinx-vtc.c
new file mode 100644
index 000000000000..01c750edcac5
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vtc.c
@@ -0,0 +1,380 @@
+/*
+ * Xilinx Video Timing Controller
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "xilinx-vip.h"
+#include "xilinx-vtc.h"
+
+#define XVTC_CONTROL_FIELD_ID_POL_SRC (1 << 26)
+#define XVTC_CONTROL_ACTIVE_CHROMA_POL_SRC (1 << 25)
+#define XVTC_CONTROL_ACTIVE_VIDEO_POL_SRC (1 << 24)
+#define XVTC_CONTROL_HSYNC_POL_SRC (1 << 23)
+#define XVTC_CONTROL_VSYNC_POL_SRC (1 << 22)
+#define XVTC_CONTROL_HBLANK_POL_SRC (1 << 21)
+#define XVTC_CONTROL_VBLANK_POL_SRC (1 << 20)
+#define XVTC_CONTROL_CHROMA_SRC (1 << 18)
+#define XVTC_CONTROL_VBLANK_HOFF_SRC (1 << 17)
+#define XVTC_CONTROL_VSYNC_END_SRC (1 << 16)
+#define XVTC_CONTROL_VSYNC_START_SRC (1 << 15)
+#define XVTC_CONTROL_ACTIVE_VSIZE_SRC (1 << 14)
+#define XVTC_CONTROL_FRAME_VSIZE_SRC (1 << 13)
+#define XVTC_CONTROL_HSYNC_END_SRC (1 << 11)
+#define XVTC_CONTROL_HSYNC_START_SRC (1 << 10)
+#define XVTC_CONTROL_ACTIVE_HSIZE_SRC (1 << 9)
+#define XVTC_CONTROL_FRAME_HSIZE_SRC (1 << 8)
+#define XVTC_CONTROL_SYNC_ENABLE (1 << 5)
+#define XVTC_CONTROL_DET_ENABLE (1 << 3)
+#define XVTC_CONTROL_GEN_ENABLE (1 << 2)
+
+#define XVTC_STATUS_FSYNC(n) ((n) << 16)
+#define XVTC_STATUS_GEN_ACTIVE_VIDEO (1 << 13)
+#define XVTC_STATUS_GEN_VBLANK (1 << 12)
+#define XVTC_STATUS_DET_ACTIVE_VIDEO (1 << 11)
+#define XVTC_STATUS_DET_VBLANK (1 << 10)
+#define XVTC_STATUS_LOCK_LOSS (1 << 9)
+#define XVTC_STATUS_LOCK (1 << 8)
+
+#define XVTC_ERROR_ACTIVE_CHROMA_LOCK (1 << 21)
+#define XVTC_ERROR_ACTIVE_VIDEO_LOCK (1 << 20)
+#define XVTC_ERROR_HSYNC_LOCK (1 << 19)
+#define XVTC_ERROR_VSYNC_LOCK (1 << 18)
+#define XVTC_ERROR_HBLANK_LOCK (1 << 17)
+#define XVTC_ERROR_VBLANK_LOCK (1 << 16)
+
+#define XVTC_IRQ_ENABLE_FSYNC(n) ((n) << 16)
+#define XVTC_IRQ_ENABLE_GEN_ACTIVE_VIDEO (1 << 13)
+#define XVTC_IRQ_ENABLE_GEN_VBLANK (1 << 12)
+#define XVTC_IRQ_ENABLE_DET_ACTIVE_VIDEO (1 << 11)
+#define XVTC_IRQ_ENABLE_DET_VBLANK (1 << 10)
+#define XVTC_IRQ_ENABLE_LOCK_LOSS (1 << 9)
+#define XVTC_IRQ_ENABLE_LOCK (1 << 8)
+
+/*
+ * The following registers exist in two blocks, one at 0x0020 for the detector
+ * and one at 0x0060 for the generator.
+ */
+
+#define XVTC_DETECTOR_OFFSET 0x0020
+#define XVTC_GENERATOR_OFFSET 0x0060
+
+#define XVTC_ACTIVE_SIZE 0x0000
+#define XVTC_ACTIVE_VSIZE_SHIFT 16
+#define XVTC_ACTIVE_VSIZE_MASK (0x1fff << 16)
+#define XVTC_ACTIVE_HSIZE_SHIFT 0
+#define XVTC_ACTIVE_HSIZE_MASK (0x1fff << 0)
+
+#define XVTC_TIMING_STATUS 0x0004
+#define XVTC_TIMING_STATUS_ACTIVE_VIDEO (1 << 2)
+#define XVTC_TIMING_STATUS_VBLANK (1 << 1)
+#define XVTC_TIMING_STATUS_LOCKED (1 << 0)
+
+#define XVTC_ENCODING 0x0008
+#define XVTC_ENCODING_CHROMA_PARITY_SHIFT 8
+#define XVTC_ENCODING_CHROMA_PARITY_MASK (3 << 8)
+#define XVTC_ENCODING_CHROMA_PARITY_EVEN_ALL (0 << 8)
+#define XVTC_ENCODING_CHROMA_PARITY_ODD_ALL (1 << 8)
+#define XVTC_ENCODING_CHROMA_PARITY_EVEN_EVEN (2 << 8)
+#define XVTC_ENCODING_CHROMA_PARITY_ODD_EVEN (3 << 8)
+#define XVTC_ENCODING_VIDEO_FORMAT_SHIFT 0
+#define XVTC_ENCODING_VIDEO_FORMAT_MASK (0xf << 0)
+#define XVTC_ENCODING_VIDEO_FORMAT_YUV422 (0 << 0)
+#define XVTC_ENCODING_VIDEO_FORMAT_YUV444 (1 << 0)
+#define XVTC_ENCODING_VIDEO_FORMAT_RGB (2 << 0)
+#define XVTC_ENCODING_VIDEO_FORMAT_YUV420 (3 << 0)
+
+#define XVTC_POLARITY 0x000c
+#define XVTC_POLARITY_ACTIVE_CHROMA_POL (1 << 5)
+#define XVTC_POLARITY_ACTIVE_VIDEO_POL (1 << 4)
+#define XVTC_POLARITY_HSYNC_POL (1 << 3)
+#define XVTC_POLARITY_VSYNC_POL (1 << 2)
+#define XVTC_POLARITY_HBLANK_POL (1 << 1)
+#define XVTC_POLARITY_VBLANK_POL (1 << 0)
+
+#define XVTC_HSIZE 0x0010
+#define XVTC_HSIZE_MASK (0x1fff << 0)
+
+#define XVTC_VSIZE 0x0014
+#define XVTC_VSIZE_MASK (0x1fff << 0)
+
+#define XVTC_HSYNC 0x0018
+#define XVTC_HSYNC_END_SHIFT 16
+#define XVTC_HSYNC_END_MASK (0x1fff << 16)
+#define XVTC_HSYNC_START_SHIFT 0
+#define XVTC_HSYNC_START_MASK (0x1fff << 0)
+
+#define XVTC_F0_VBLANK_H 0x001c
+#define XVTC_F0_VBLANK_HEND_SHIFT 16
+#define XVTC_F0_VBLANK_HEND_MASK (0x1fff << 16)
+#define XVTC_F0_VBLANK_HSTART_SHIFT 0
+#define XVTC_F0_VBLANK_HSTART_MASK (0x1fff << 0)
+
+#define XVTC_F0_VSYNC_V 0x0020
+#define XVTC_F0_VSYNC_VEND_SHIFT 16
+#define XVTC_F0_VSYNC_VEND_MASK (0x1fff << 16)
+#define XVTC_F0_VSYNC_VSTART_SHIFT 0
+#define XVTC_F0_VSYNC_VSTART_MASK (0x1fff << 0)
+
+#define XVTC_F0_VSYNC_H 0x0024
+#define XVTC_F0_VSYNC_HEND_SHIFT 16
+#define XVTC_F0_VSYNC_HEND_MASK (0x1fff << 16)
+#define XVTC_F0_VSYNC_HSTART_SHIFT 0
+#define XVTC_F0_VSYNC_HSTART_MASK (0x1fff << 0)
+
+#define XVTC_FRAME_SYNC_CONFIG(n) (0x0100 + 4 * (n))
+#define XVTC_FRAME_SYNC_V_START_SHIFT 16
+#define XVTC_FRAME_SYNC_V_START_MASK (0x1fff << 16)
+#define XVTC_FRAME_SYNC_H_START_SHIFT 0
+#define XVTC_FRAME_SYNC_H_START_MASK (0x1fff << 0)
+
+#define XVTC_GENERATOR_GLOBAL_DELAY 0x0104
+
+/**
+ * struct xvtc_device - Xilinx Video Timing Controller device structure
+ * @xvip: Xilinx Video IP device
+ * @list: entry in the global VTC list
+ * @has_detector: the VTC has a timing detector
+ * @has_generator: the VTC has a timing generator
+ * @config: generator timings configuration
+ */
+struct xvtc_device {
+ struct xvip_device xvip;
+ struct list_head list;
+
+ bool has_detector;
+ bool has_generator;
+
+ struct xvtc_config config;
+};
+
+static LIST_HEAD(xvtc_list);
+static DEFINE_MUTEX(xvtc_lock);
+
+static inline void xvtc_gen_write(struct xvtc_device *xvtc, u32 addr, u32 value)
+{
+ xvip_write(&xvtc->xvip, XVTC_GENERATOR_OFFSET + addr, value);
+}
+
+/* -----------------------------------------------------------------------------
+ * Generator Operations
+ */
+
+int xvtc_generator_start(struct xvtc_device *xvtc,
+ const struct xvtc_config *config)
+{
+ int ret;
+
+ if (!xvtc->has_generator)
+ return -ENXIO;
+
+ ret = clk_prepare_enable(xvtc->xvip.clk);
+ if (ret < 0)
+ return ret;
+
+ /* We don't care about the chroma active signal, encoding parameters are
+ * not important for now.
+ */
+ xvtc_gen_write(xvtc, XVTC_POLARITY,
+ XVTC_POLARITY_ACTIVE_CHROMA_POL |
+ XVTC_POLARITY_ACTIVE_VIDEO_POL |
+ XVTC_POLARITY_HSYNC_POL | XVTC_POLARITY_VSYNC_POL |
+ XVTC_POLARITY_HBLANK_POL | XVTC_POLARITY_VBLANK_POL);
+
+ /* Hardcode the polarity to active high, as required by the video in to
+ * AXI4-stream core.
+ */
+ xvtc_gen_write(xvtc, XVTC_ENCODING, 0);
+
+ /* Configure the timings. The VBLANK and VSYNC signals assertion and
+ * deassertion are hardcoded to the first pixel of the line.
+ */
+ xvtc_gen_write(xvtc, XVTC_ACTIVE_SIZE,
+ (config->vblank_start << XVTC_ACTIVE_VSIZE_SHIFT) |
+ (config->hblank_start << XVTC_ACTIVE_HSIZE_SHIFT));
+ xvtc_gen_write(xvtc, XVTC_HSIZE, config->hsize);
+ xvtc_gen_write(xvtc, XVTC_VSIZE, config->vsize);
+ xvtc_gen_write(xvtc, XVTC_HSYNC,
+ (config->hsync_end << XVTC_HSYNC_END_SHIFT) |
+ (config->hsync_start << XVTC_HSYNC_START_SHIFT));
+ xvtc_gen_write(xvtc, XVTC_F0_VBLANK_H, 0);
+ xvtc_gen_write(xvtc, XVTC_F0_VSYNC_V,
+ (config->vsync_end << XVTC_F0_VSYNC_VEND_SHIFT) |
+ (config->vsync_start << XVTC_F0_VSYNC_VSTART_SHIFT));
+ xvtc_gen_write(xvtc, XVTC_F0_VSYNC_H, 0);
+
+ /* Enable the generator. Set the source of all generator parameters to
+ * generator registers.
+ */
+ xvip_write(&xvtc->xvip, XVIP_CTRL_CONTROL,
+ XVTC_CONTROL_ACTIVE_CHROMA_POL_SRC |
+ XVTC_CONTROL_ACTIVE_VIDEO_POL_SRC |
+ XVTC_CONTROL_HSYNC_POL_SRC | XVTC_CONTROL_VSYNC_POL_SRC |
+ XVTC_CONTROL_HBLANK_POL_SRC | XVTC_CONTROL_VBLANK_POL_SRC |
+ XVTC_CONTROL_CHROMA_SRC | XVTC_CONTROL_VBLANK_HOFF_SRC |
+ XVTC_CONTROL_VSYNC_END_SRC | XVTC_CONTROL_VSYNC_START_SRC |
+ XVTC_CONTROL_ACTIVE_VSIZE_SRC |
+ XVTC_CONTROL_FRAME_VSIZE_SRC | XVTC_CONTROL_HSYNC_END_SRC |
+ XVTC_CONTROL_HSYNC_START_SRC |
+ XVTC_CONTROL_ACTIVE_HSIZE_SRC |
+ XVTC_CONTROL_FRAME_HSIZE_SRC | XVTC_CONTROL_GEN_ENABLE |
+ XVIP_CTRL_CONTROL_REG_UPDATE);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvtc_generator_start);
+
+int xvtc_generator_stop(struct xvtc_device *xvtc)
+{
+ if (!xvtc->has_generator)
+ return -ENXIO;
+
+ xvip_write(&xvtc->xvip, XVIP_CTRL_CONTROL, 0);
+
+ clk_disable_unprepare(xvtc->xvip.clk);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xvtc_generator_stop);
+
+struct xvtc_device *xvtc_of_get(struct device_node *np)
+{
+ struct device_node *xvtc_node;
+ struct xvtc_device *found = NULL;
+ struct xvtc_device *xvtc;
+
+ if (!of_find_property(np, "xlnx,vtc", NULL))
+ return NULL;
+
+ xvtc_node = of_parse_phandle(np, "xlnx,vtc", 0);
+ if (xvtc_node == NULL)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&xvtc_lock);
+ list_for_each_entry(xvtc, &xvtc_list, list) {
+ if (xvtc->xvip.dev->of_node == xvtc_node) {
+ found = xvtc;
+ break;
+ }
+ }
+ mutex_unlock(&xvtc_lock);
+
+ of_node_put(xvtc_node);
+
+ if (!found)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(xvtc_of_get);
+
+void xvtc_put(struct xvtc_device *xvtc)
+{
+}
+EXPORT_SYMBOL_GPL(xvtc_put);
+
+/* -----------------------------------------------------------------------------
+ * Registration and Unregistration
+ */
+
+static void xvtc_register_device(struct xvtc_device *xvtc)
+{
+ mutex_lock(&xvtc_lock);
+ list_add_tail(&xvtc->list, &xvtc_list);
+ mutex_unlock(&xvtc_lock);
+}
+
+static void xvtc_unregister_device(struct xvtc_device *xvtc)
+{
+ mutex_lock(&xvtc_lock);
+ list_del(&xvtc->list);
+ mutex_unlock(&xvtc_lock);
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xvtc_parse_of(struct xvtc_device *xvtc)
+{
+ struct device_node *node = xvtc->xvip.dev->of_node;
+
+ xvtc->has_detector = of_property_read_bool(node, "xlnx,detector");
+ xvtc->has_generator = of_property_read_bool(node, "xlnx,generator");
+
+ return 0;
+}
+
+static int xvtc_probe(struct platform_device *pdev)
+{
+ struct xvtc_device *xvtc;
+ int ret;
+
+ xvtc = devm_kzalloc(&pdev->dev, sizeof(*xvtc), GFP_KERNEL);
+ if (!xvtc)
+ return -ENOMEM;
+
+ xvtc->xvip.dev = &pdev->dev;
+
+ ret = xvtc_parse_of(xvtc);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xvtc->xvip);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, xvtc);
+
+ xvip_print_version(&xvtc->xvip);
+
+ xvtc_register_device(xvtc);
+
+ return 0;
+}
+
+static int xvtc_remove(struct platform_device *pdev)
+{
+ struct xvtc_device *xvtc = platform_get_drvdata(pdev);
+
+ xvtc_unregister_device(xvtc);
+
+ xvip_cleanup_resources(&xvtc->xvip);
+
+ return 0;
+}
+
+static const struct of_device_id xvtc_of_id_table[] = {
+ { .compatible = "xlnx,v-tc-6.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvtc_of_id_table);
+
+static struct platform_driver xvtc_driver = {
+ .driver = {
+ .name = "xilinx-vtc",
+ .of_match_table = xvtc_of_id_table,
+ },
+ .probe = xvtc_probe,
+ .remove = xvtc_remove,
+};
+
+module_platform_driver(xvtc_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video Timing Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
new file mode 100644
index 000000000000..e1bb2cfcf428
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -0,0 +1,42 @@
+/*
+ * Xilinx Video Timing Controller
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_VTC_H__
+#define __XILINX_VTC_H__
+
+struct device_node;
+struct xvtc_device;
+
+#define XVTC_MAX_HSIZE 8191
+#define XVTC_MAX_VSIZE 8191
+
+struct xvtc_config {
+ unsigned int hblank_start;
+ unsigned int hsync_start;
+ unsigned int hsync_end;
+ unsigned int hsize;
+ unsigned int vblank_start;
+ unsigned int vsync_start;
+ unsigned int vsync_end;
+ unsigned int vsize;
+};
+
+struct xvtc_device *xvtc_of_get(struct device_node *np);
+void xvtc_put(struct xvtc_device *xvtc);
+
+int xvtc_generator_start(struct xvtc_device *xvtc,
+ const struct xvtc_config *config);
+int xvtc_generator_stop(struct xvtc_device *xvtc);
+
+#endif /* __XILINX_VTC_H__ */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index b8f36445516b..a93f681aa9d6 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -347,6 +347,7 @@ static int wl1273_fm_set_tx_freq(struct wl1273_device *radio, unsigned int freq)
{
struct wl1273_core *core = radio->core;
int r = 0;
+ unsigned long t;
if (freq < WL1273_BAND_TX_LOW) {
dev_err(radio->dev,
@@ -378,11 +379,11 @@ static int wl1273_fm_set_tx_freq(struct wl1273_device *radio, unsigned int freq)
reinit_completion(&radio->busy);
/* wait for the FR IRQ */
- r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
- if (!r)
+ t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
+ if (!t)
return -ETIMEDOUT;
- dev_dbg(radio->dev, "WL1273_CHANL_SET: %d\n", r);
+ dev_dbg(radio->dev, "WL1273_CHANL_SET: %lu\n", t);
/* Enable the output power */
r = core->write(core, WL1273_POWER_ENB_SET, 1);
@@ -392,12 +393,12 @@ static int wl1273_fm_set_tx_freq(struct wl1273_device *radio, unsigned int freq)
reinit_completion(&radio->busy);
/* wait for the POWER_ENB IRQ */
- r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
- if (!r)
+ t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
+ if (!t)
return -ETIMEDOUT;
radio->tx_frequency = freq;
- dev_dbg(radio->dev, "WL1273_POWER_ENB_SET: %d\n", r);
+ dev_dbg(radio->dev, "WL1273_POWER_ENB_SET: %lu\n", t);
return 0;
}
@@ -406,6 +407,7 @@ static int wl1273_fm_set_rx_freq(struct wl1273_device *radio, unsigned int freq)
{
struct wl1273_core *core = radio->core;
int r, f;
+ unsigned long t;
if (freq < radio->rangelow) {
dev_err(radio->dev,
@@ -446,8 +448,8 @@ static int wl1273_fm_set_rx_freq(struct wl1273_device *radio, unsigned int freq)
reinit_completion(&radio->busy);
- r = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
- if (!r) {
+ t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
+ if (!t) {
dev_err(radio->dev, "%s: TIMEOUT\n", __func__);
return -ETIMEDOUT;
}
@@ -826,9 +828,12 @@ static int wl1273_fm_set_seek(struct wl1273_device *radio,
if (r)
goto out;
+ /* wait for the FR IRQ */
wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
- if (!(radio->irq_received & WL1273_BL_EVENT))
+ if (!(radio->irq_received & WL1273_BL_EVENT)) {
+ r = -ETIMEDOUT;
goto out;
+ }
radio->irq_received &= ~WL1273_BL_EVENT;
@@ -854,7 +859,9 @@ static int wl1273_fm_set_seek(struct wl1273_device *radio,
if (r)
goto out;
- wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
+ /* wait for the FR IRQ */
+ if (!wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000)))
+ r = -ETIMEDOUT;
out:
dev_dbg(radio->dev, "%s: Err: %d\n", __func__, r);
return r;
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 909c3f92d839..1d827adab7eb 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -208,6 +208,7 @@ static int si470x_set_band(struct si470x_device *radio, int band)
static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
{
int retval;
+ unsigned long time_left;
bool timed_out = false;
/* start tuning */
@@ -219,9 +220,9 @@ static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
/* wait till tune operation has completed */
reinit_completion(&radio->completion);
- retval = wait_for_completion_timeout(&radio->completion,
- msecs_to_jiffies(tune_timeout));
- if (!retval)
+ time_left = wait_for_completion_timeout(&radio->completion,
+ msecs_to_jiffies(tune_timeout));
+ if (time_left == 0)
timed_out = true;
if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
@@ -301,6 +302,7 @@ static int si470x_set_seek(struct si470x_device *radio,
int band, retval;
unsigned int freq;
bool timed_out = false;
+ unsigned long time_left;
/* set band */
if (seek->rangelow || seek->rangehigh) {
@@ -342,9 +344,9 @@ static int si470x_set_seek(struct si470x_device *radio,
/* wait till tune operation has completed */
reinit_completion(&radio->completion);
- retval = wait_for_completion_timeout(&radio->completion,
- msecs_to_jiffies(seek_timeout));
- if (!retval)
+ time_left = wait_for_completion_timeout(&radio->completion,
+ msecs_to_jiffies(seek_timeout));
+ if (time_left == 0)
timed_out = true;
if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index c90004dac170..e9d03ac69a27 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -383,7 +383,7 @@ static int si4713_powerup(struct si4713_device *sdev)
}
}
- if (!IS_ERR(sdev->gpio_reset)) {
+ if (sdev->gpio_reset) {
udelay(50);
gpiod_set_value(sdev->gpio_reset, 1);
}
@@ -407,8 +407,7 @@ static int si4713_powerup(struct si4713_device *sdev)
SI4713_STC_INT | SI4713_CTS);
return err;
}
- if (!IS_ERR(sdev->gpio_reset))
- gpiod_set_value(sdev->gpio_reset, 0);
+ gpiod_set_value(sdev->gpio_reset, 0);
if (sdev->vdd) {
@@ -447,7 +446,7 @@ static int si4713_powerdown(struct si4713_device *sdev)
v4l2_dbg(1, debug, &sdev->sd, "Power down response: 0x%02x\n",
resp[0]);
v4l2_dbg(1, debug, &sdev->sd, "Device in reset mode\n");
- if (!IS_ERR(sdev->gpio_reset))
+ if (sdev->gpio_reset)
gpiod_set_value(sdev->gpio_reset, 0);
if (sdev->vdd) {
@@ -1460,14 +1459,9 @@ static int si4713_probe(struct i2c_client *client,
goto exit;
}
- sdev->gpio_reset = devm_gpiod_get(&client->dev, "reset");
- if (!IS_ERR(sdev->gpio_reset)) {
- gpiod_direction_output(sdev->gpio_reset, 0);
- } else if (PTR_ERR(sdev->gpio_reset) == -ENOENT) {
- dev_dbg(&client->dev, "No reset GPIO assigned\n");
- } else if (PTR_ERR(sdev->gpio_reset) == -ENOSYS) {
- dev_dbg(&client->dev, "No reset GPIO support\n");
- } else {
+ sdev->gpio_reset = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sdev->gpio_reset)) {
rval = PTR_ERR(sdev->gpio_reset);
dev_err(&client->dev, "Failed to request gpio: %d\n", rval);
goto exit;
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index f359be7e9dd9..9d6574bebf78 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -5,7 +5,7 @@ menu "Texas Instruments WL128x FM driver (ST based)"
config RADIO_WL128X
tristate "Texas Instruments WL128x FM Radio"
depends on VIDEO_V4L2 && RFKILL && GPIOLIB && TTY
- select TI_ST if NET
+ depends on TI_ST
help
Choose Y here if you have this FM radio chip.
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index a5bd3f674bbd..fb42f0fd0c1f 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -36,7 +36,7 @@
#include "fmdrv_rx.h"
#include "fmdrv_tx.h"
-static struct video_device *gradio_dev;
+static struct video_device gradio_dev;
static u8 radio_disconnected;
/* -- V4L2 RADIO (/dev/radioX) device file operation interfaces --- */
@@ -517,7 +517,7 @@ static struct video_device fm_viddev_template = {
.fops = &fm_drv_fops,
.ioctl_ops = &fm_drv_ioctl_ops,
.name = FM_DRV_NAME,
- .release = video_device_release,
+ .release = video_device_release_empty,
/*
* To ensure both the tuner and modulator ioctls are accessible we
* set the vfl_dir to M2M to indicate this.
@@ -543,29 +543,21 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
/* Init mutex for core locking */
mutex_init(&fmdev->mutex);
- /* Allocate new video device */
- gradio_dev = video_device_alloc();
- if (NULL == gradio_dev) {
- fmerr("Can't allocate video device\n");
- return -ENOMEM;
- }
-
/* Setup FM driver's V4L2 properties */
- memcpy(gradio_dev, &fm_viddev_template, sizeof(fm_viddev_template));
+ gradio_dev = fm_viddev_template;
- video_set_drvdata(gradio_dev, fmdev);
+ video_set_drvdata(&gradio_dev, fmdev);
- gradio_dev->lock = &fmdev->mutex;
- gradio_dev->v4l2_dev = &fmdev->v4l2_dev;
+ gradio_dev.lock = &fmdev->mutex;
+ gradio_dev.v4l2_dev = &fmdev->v4l2_dev;
/* Register with V4L2 subsystem as RADIO device */
- if (video_register_device(gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
- video_device_release(gradio_dev);
+ if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
fmerr("Could not register video device\n");
return -ENOMEM;
}
- fmdev->radio_dev = gradio_dev;
+ fmdev->radio_dev = &gradio_dev;
/* Register to v4l2 ctrl handler framework */
fmdev->radio_dev->ctrl_handler = &fmdev->ctrl_handler;
@@ -611,13 +603,13 @@ void *fm_v4l2_deinit_video_device(void)
struct fmdev *fmdev;
- fmdev = video_get_drvdata(gradio_dev);
+ fmdev = video_get_drvdata(&gradio_dev);
/* Unregister to v4l2 ctrl handler framework*/
v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
/* Unregister RADIO device from V4L2 subsystem */
- video_unregister_device(gradio_dev);
+ video_unregister_device(&gradio_dev);
v4l2_device_unregister(&fmdev->v4l2_dev);
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index e80f2c6c5f1a..8d77e1c4a141 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1195,16 +1195,6 @@ static struct pnp_driver ene_driver = {
.shutdown = ene_shutdown,
};
-static int __init ene_init(void)
-{
- return pnp_register_driver(&ene_driver);
-}
-
-static void ene_exit(void)
-{
- pnp_unregister_driver(&ene_driver);
-}
-
module_param(sample_period, int, S_IRUGO);
MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)");
@@ -1226,5 +1216,4 @@ MODULE_DESCRIPTION
MODULE_AUTHOR("Maxim Levitsky");
MODULE_LICENSE("GPL");
-module_init(ene_init);
-module_exit(ene_exit);
+module_pnp_driver(ene_driver);
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index b5167573240e..5c63c2ec6183 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -684,16 +684,6 @@ static struct pnp_driver fintek_driver = {
.shutdown = fintek_shutdown,
};
-static int __init fintek_init(void)
-{
- return pnp_register_driver(&fintek_driver);
-}
-
-static void __exit fintek_exit(void)
-{
- pnp_unregister_driver(&fintek_driver);
-}
-
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging output");
@@ -703,5 +693,4 @@ MODULE_DESCRIPTION(FINTEK_DESCRIPTION " driver");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
MODULE_LICENSE("GPL");
-module_init(fintek_init);
-module_exit(fintek_exit);
+module_pnp_driver(fintek_driver);
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index 77c78de4f5bf..03fe080278df 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -110,16 +110,32 @@ static int img_ir_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(priv->clk))
dev_warn(&pdev->dev, "cannot get core clock resource\n");
+
+ /* Get sys clock */
+ priv->sys_clk = devm_clk_get(&pdev->dev, "sys");
+ if (IS_ERR(priv->sys_clk))
+ dev_warn(&pdev->dev, "cannot get sys clock resource\n");
/*
- * The driver doesn't need to know about the system ("sys") or power
- * modulation ("mod") clocks yet
+ * Enabling the system clock before the register interface is
+ * accessed. ISR shouldn't get called with Sys Clock disabled,
+ * hence exiting probe with an error.
*/
+ if (!IS_ERR(priv->sys_clk)) {
+ error = clk_prepare_enable(priv->sys_clk);
+ if (error) {
+ dev_err(&pdev->dev, "cannot enable sys clock\n");
+ return error;
+ }
+ }
/* Set up raw & hw decoder */
error = img_ir_probe_raw(priv);
error2 = img_ir_probe_hw(priv);
- if (error && error2)
- return (error == -ENODEV) ? error2 : error;
+ if (error && error2) {
+ if (error == -ENODEV)
+ error = error2;
+ goto err_probe;
+ }
/* Get the IRQ */
priv->irq = irq;
@@ -139,6 +155,9 @@ static int img_ir_probe(struct platform_device *pdev)
err_irq:
img_ir_remove_hw(priv);
img_ir_remove_raw(priv);
+err_probe:
+ if (!IS_ERR(priv->sys_clk))
+ clk_disable_unprepare(priv->sys_clk);
return error;
}
@@ -146,12 +165,14 @@ static int img_ir_remove(struct platform_device *pdev)
{
struct img_ir_priv *priv = platform_get_drvdata(pdev);
- free_irq(priv->irq, img_ir_isr);
+ free_irq(priv->irq, priv);
img_ir_remove_hw(priv);
img_ir_remove_raw(priv);
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
+ if (!IS_ERR(priv->sys_clk))
+ clk_disable_unprepare(priv->sys_clk);
return 0;
}
diff --git a/drivers/media/rc/img-ir/img-ir.h b/drivers/media/rc/img-ir/img-ir.h
index 2ddf56083182..f1387c016d3d 100644
--- a/drivers/media/rc/img-ir/img-ir.h
+++ b/drivers/media/rc/img-ir/img-ir.h
@@ -138,6 +138,7 @@ struct clk;
* @dev: Platform device.
* @irq: IRQ number.
* @clk: Input clock.
+ * @sys_clk: System clock.
* @reg_base: Iomem base address of IR register block.
* @lock: Protects IR registers and variables in this struct.
* @raw: Driver data for raw decoder.
@@ -147,6 +148,7 @@ struct img_ir_priv {
struct device *dev;
int irq;
struct clk *clk;
+ struct clk *sys_clk;
void __iomem *reg_base;
spinlock_t lock;
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index b0df62961c14..58ec5986274e 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -16,14 +16,6 @@
#include <linux/regmap.h>
#include <media/rc-core.h>
-/* Allow the driver to compile on all architectures */
-#ifndef writel_relaxed
-# define writel_relaxed writel
-#endif
-#ifndef readl_relaxed
-# define readl_relaxed readl
-#endif
-
#define IR_ENABLE 0x00
#define IR_CONFIG 0x04
#define CNT_LEADS 0x08
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 56abf9120cc2..0f301903aa6f 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1708,21 +1708,10 @@ static struct pnp_driver ite_driver = {
.shutdown = ite_shutdown,
};
-static int __init ite_init(void)
-{
- return pnp_register_driver(&ite_driver);
-}
-
-static void __exit ite_exit(void)
-{
- pnp_unregister_driver(&ite_driver);
-}
-
MODULE_DEVICE_TABLE(pnp, ite_ids);
MODULE_DESCRIPTION("ITE Tech Inc. IT8712F/ITE8512F CIR driver");
MODULE_AUTHOR("Juan J. Garcia de Soria <skandalfo@gmail.com>");
MODULE_LICENSE("GPL");
-module_init(ite_init);
-module_exit(ite_exit);
+module_pnp_driver(ite_driver);
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 9c2c8635ff33..85af7a869167 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -1219,16 +1219,6 @@ static struct pnp_driver nvt_driver = {
.shutdown = nvt_shutdown,
};
-static int __init nvt_init(void)
-{
- return pnp_register_driver(&nvt_driver);
-}
-
-static void __exit nvt_exit(void)
-{
- pnp_unregister_driver(&nvt_driver);
-}
-
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging output");
@@ -1238,5 +1228,4 @@ MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
MODULE_LICENSE("GPL");
-module_init(nvt_init);
-module_exit(nvt_exit);
+module_pnp_driver(nvt_driver);
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index 42e5a01b9192..983510d282f6 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -224,14 +224,6 @@ config MEDIA_TUNER_FC2580
help
FCI FC2580 silicon tuner driver.
-config MEDIA_TUNER_M88TS2022
- tristate "Montage M88TS2022 silicon tuner"
- depends on MEDIA_SUPPORT && I2C
- select REGMAP_I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Montage M88TS2022 silicon tuner driver.
-
config MEDIA_TUNER_M88RS6000T
tristate "Montage M88RS6000 internal tuner"
depends on MEDIA_SUPPORT && I2C
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index da4fe6ef73e7..06a9ab65e5fa 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_MEDIA_TUNER_E4000) += e4000.o
obj-$(CONFIG_MEDIA_TUNER_FC2580) += fc2580.o
obj-$(CONFIG_MEDIA_TUNER_TUA9001) += tua9001.o
obj-$(CONFIG_MEDIA_TUNER_SI2157) += si2157.o
-obj-$(CONFIG_MEDIA_TUNER_M88TS2022) += m88ts2022.o
obj-$(CONFIG_MEDIA_TUNER_FC0011) += fc0011.o
obj-$(CONFIG_MEDIA_TUNER_FC0012) += fc0012.o
obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
diff --git a/drivers/media/tuners/fc0011.h b/drivers/media/tuners/fc0011.h
index 43ec893a6877..81bb568d6943 100644
--- a/drivers/media/tuners/fc0011.h
+++ b/drivers/media/tuners/fc0011.h
@@ -23,7 +23,7 @@ enum fc0011_fe_callback_commands {
FC0011_FE_CALLBACK_RESET,
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_FC0011)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_FC0011)
struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
const struct fc0011_config *config);
diff --git a/drivers/media/tuners/fc0012.h b/drivers/media/tuners/fc0012.h
index 1d08057e3275..9ad32859bab0 100644
--- a/drivers/media/tuners/fc0012.h
+++ b/drivers/media/tuners/fc0012.h
@@ -49,7 +49,7 @@ struct fc0012_config {
bool clock_out;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_FC0012)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_FC0012)
extern struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
const struct fc0012_config *cfg);
diff --git a/drivers/media/tuners/fc0013.h b/drivers/media/tuners/fc0013.h
index d65d5b37f56e..e130bd7a3230 100644
--- a/drivers/media/tuners/fc0013.h
+++ b/drivers/media/tuners/fc0013.h
@@ -26,7 +26,7 @@
#include "dvb_frontend.h"
#include "fc001x-common.h"
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_FC0013)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_FC0013)
extern struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
u8 i2c_address, int dual_master,
diff --git a/drivers/media/tuners/fc2580.h b/drivers/media/tuners/fc2580.h
index 9c43c1cc82d9..b1ce6770f88e 100644
--- a/drivers/media/tuners/fc2580.h
+++ b/drivers/media/tuners/fc2580.h
@@ -37,7 +37,7 @@ struct fc2580_config {
u32 clock;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_FC2580)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_FC2580)
extern struct dvb_frontend *fc2580_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, const struct fc2580_config *cfg);
#else
diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
deleted file mode 100644
index 066e5431da93..000000000000
--- a/drivers/media/tuners/m88ts2022.c
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * Montage M88TS2022 silicon tuner driver
- *
- * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Some calculations are taken from existing TS2020 driver.
- */
-
-#include "m88ts2022_priv.h"
-
-static int m88ts2022_cmd(struct m88ts2022_dev *dev, int op, int sleep, u8 reg,
- u8 mask, u8 val, u8 *reg_val)
-{
- int ret, i;
- unsigned int utmp;
- struct m88ts2022_reg_val reg_vals[] = {
- {0x51, 0x1f - op},
- {0x51, 0x1f},
- {0x50, 0x00 + op},
- {0x50, 0x00},
- };
-
- for (i = 0; i < 2; i++) {
- dev_dbg(&dev->client->dev,
- "i=%d op=%02x reg=%02x mask=%02x val=%02x\n",
- i, op, reg, mask, val);
-
- for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
- ret = regmap_write(dev->regmap, reg_vals[i].reg,
- reg_vals[i].val);
- if (ret)
- goto err;
- }
-
- usleep_range(sleep * 1000, sleep * 10000);
-
- ret = regmap_read(dev->regmap, reg, &utmp);
- if (ret)
- goto err;
-
- if ((utmp & mask) != val)
- break;
- }
-
- if (reg_val)
- *reg_val = utmp;
-err:
- return ret;
-}
-
-static int m88ts2022_set_params(struct dvb_frontend *fe)
-{
- struct m88ts2022_dev *dev = fe->tuner_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret;
- unsigned int utmp, frequency_khz, frequency_offset_khz, f_3db_hz;
- unsigned int f_ref_khz, f_vco_khz, div_ref, div_out, pll_n, gdiv28;
- u8 buf[3], u8tmp, cap_code, lpf_gm, lpf_mxdiv, div_max, div_min;
- u16 u16tmp;
-
- dev_dbg(&dev->client->dev,
- "frequency=%d symbol_rate=%d rolloff=%d\n",
- c->frequency, c->symbol_rate, c->rolloff);
- /*
- * Integer-N PLL synthesizer
- * kHz is used for all calculations to keep calculations within 32-bit
- */
- f_ref_khz = DIV_ROUND_CLOSEST(dev->cfg.clock, 1000);
- div_ref = DIV_ROUND_CLOSEST(f_ref_khz, 2000);
-
- if (c->symbol_rate < 5000000)
- frequency_offset_khz = 3000; /* 3 MHz */
- else
- frequency_offset_khz = 0;
-
- frequency_khz = c->frequency + frequency_offset_khz;
-
- if (frequency_khz < 1103000) {
- div_out = 4;
- u8tmp = 0x1b;
- } else {
- div_out = 2;
- u8tmp = 0x0b;
- }
-
- buf[0] = u8tmp;
- buf[1] = 0x40;
- ret = regmap_bulk_write(dev->regmap, 0x10, buf, 2);
- if (ret)
- goto err;
-
- f_vco_khz = frequency_khz * div_out;
- pll_n = f_vco_khz * div_ref / f_ref_khz;
- pll_n += pll_n % 2;
- dev->frequency_khz = pll_n * f_ref_khz / div_ref / div_out;
-
- if (pll_n < 4095)
- u16tmp = pll_n - 1024;
- else if (pll_n < 6143)
- u16tmp = pll_n + 1024;
- else
- u16tmp = pll_n + 3072;
-
- buf[0] = (u16tmp >> 8) & 0x3f;
- buf[1] = (u16tmp >> 0) & 0xff;
- buf[2] = div_ref - 8;
- ret = regmap_bulk_write(dev->regmap, 0x01, buf, 3);
- if (ret)
- goto err;
-
- dev_dbg(&dev->client->dev,
- "frequency=%u offset=%d f_vco_khz=%u pll_n=%u div_ref=%u div_out=%u\n",
- dev->frequency_khz, dev->frequency_khz - c->frequency,
- f_vco_khz, pll_n, div_ref, div_out);
-
- ret = m88ts2022_cmd(dev, 0x10, 5, 0x15, 0x40, 0x00, NULL);
- if (ret)
- goto err;
-
- ret = regmap_read(dev->regmap, 0x14, &utmp);
- if (ret)
- goto err;
-
- utmp &= 0x7f;
- if (utmp < 64) {
- ret = regmap_update_bits(dev->regmap, 0x10, 0x80, 0x80);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x11, 0x6f);
- if (ret)
- goto err;
-
- ret = m88ts2022_cmd(dev, 0x10, 5, 0x15, 0x40, 0x00, NULL);
- if (ret)
- goto err;
- }
-
- ret = regmap_read(dev->regmap, 0x14, &utmp);
- if (ret)
- goto err;
-
- utmp &= 0x1f;
- if (utmp > 19) {
- ret = regmap_update_bits(dev->regmap, 0x10, 0x02, 0x00);
- if (ret)
- goto err;
- }
-
- ret = m88ts2022_cmd(dev, 0x08, 5, 0x3c, 0xff, 0x00, NULL);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x25, 0x00);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x27, 0x70);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x41, 0x09);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x08, 0x0b);
- if (ret)
- goto err;
-
- /* filters */
- gdiv28 = DIV_ROUND_CLOSEST(f_ref_khz * 1694U, 1000000U);
-
- ret = regmap_write(dev->regmap, 0x04, gdiv28);
- if (ret)
- goto err;
-
- ret = m88ts2022_cmd(dev, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
- if (ret)
- goto err;
-
- cap_code = u8tmp & 0x3f;
-
- ret = regmap_write(dev->regmap, 0x41, 0x0d);
- if (ret)
- goto err;
-
- ret = m88ts2022_cmd(dev, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
- if (ret)
- goto err;
-
- u8tmp &= 0x3f;
- cap_code = (cap_code + u8tmp) / 2;
- gdiv28 = gdiv28 * 207 / (cap_code * 2 + 151);
- div_max = gdiv28 * 135 / 100;
- div_min = gdiv28 * 78 / 100;
- div_max = clamp_val(div_max, 0U, 63U);
-
- f_3db_hz = mult_frac(c->symbol_rate, 135, 200);
- f_3db_hz += 2000000U + (frequency_offset_khz * 1000U);
- f_3db_hz = clamp(f_3db_hz, 7000000U, 40000000U);
-
-#define LPF_COEFF 3200U
- lpf_gm = DIV_ROUND_CLOSEST(f_3db_hz * gdiv28, LPF_COEFF * f_ref_khz);
- lpf_gm = clamp_val(lpf_gm, 1U, 23U);
-
- lpf_mxdiv = DIV_ROUND_CLOSEST(lpf_gm * LPF_COEFF * f_ref_khz, f_3db_hz);
- if (lpf_mxdiv < div_min)
- lpf_mxdiv = DIV_ROUND_CLOSEST(++lpf_gm * LPF_COEFF * f_ref_khz, f_3db_hz);
- lpf_mxdiv = clamp_val(lpf_mxdiv, 0U, div_max);
-
- ret = regmap_write(dev->regmap, 0x04, lpf_mxdiv);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x06, lpf_gm);
- if (ret)
- goto err;
-
- ret = m88ts2022_cmd(dev, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
- if (ret)
- goto err;
-
- cap_code = u8tmp & 0x3f;
-
- ret = regmap_write(dev->regmap, 0x41, 0x09);
- if (ret)
- goto err;
-
- ret = m88ts2022_cmd(dev, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
- if (ret)
- goto err;
-
- u8tmp &= 0x3f;
- cap_code = (cap_code + u8tmp) / 2;
-
- u8tmp = cap_code | 0x80;
- ret = regmap_write(dev->regmap, 0x25, u8tmp);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x27, 0x30);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x08, 0x09);
- if (ret)
- goto err;
-
- ret = m88ts2022_cmd(dev, 0x01, 20, 0x21, 0xff, 0x00, NULL);
- if (ret)
- goto err;
-err:
- if (ret)
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
- return ret;
-}
-
-static int m88ts2022_init(struct dvb_frontend *fe)
-{
- struct m88ts2022_dev *dev = fe->tuner_priv;
- int ret, i;
- u8 u8tmp;
- static const struct m88ts2022_reg_val reg_vals[] = {
- {0x7d, 0x9d},
- {0x7c, 0x9a},
- {0x7a, 0x76},
- {0x3b, 0x01},
- {0x63, 0x88},
- {0x61, 0x85},
- {0x22, 0x30},
- {0x30, 0x40},
- {0x20, 0x23},
- {0x24, 0x02},
- {0x12, 0xa0},
- };
-
- dev_dbg(&dev->client->dev, "\n");
-
- ret = regmap_write(dev->regmap, 0x00, 0x01);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x00, 0x03);
- if (ret)
- goto err;
-
- switch (dev->cfg.clock_out) {
- case M88TS2022_CLOCK_OUT_DISABLED:
- u8tmp = 0x60;
- break;
- case M88TS2022_CLOCK_OUT_ENABLED:
- u8tmp = 0x70;
- ret = regmap_write(dev->regmap, 0x05, dev->cfg.clock_out_div);
- if (ret)
- goto err;
- break;
- case M88TS2022_CLOCK_OUT_ENABLED_XTALOUT:
- u8tmp = 0x6c;
- break;
- default:
- goto err;
- }
-
- ret = regmap_write(dev->regmap, 0x42, u8tmp);
- if (ret)
- goto err;
-
- if (dev->cfg.loop_through)
- u8tmp = 0xec;
- else
- u8tmp = 0x6c;
-
- ret = regmap_write(dev->regmap, 0x62, u8tmp);
- if (ret)
- goto err;
-
- for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
- ret = regmap_write(dev->regmap, reg_vals[i].reg, reg_vals[i].val);
- if (ret)
- goto err;
- }
-err:
- if (ret)
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int m88ts2022_sleep(struct dvb_frontend *fe)
-{
- struct m88ts2022_dev *dev = fe->tuner_priv;
- int ret;
-
- dev_dbg(&dev->client->dev, "\n");
-
- ret = regmap_write(dev->regmap, 0x00, 0x00);
- if (ret)
- goto err;
-err:
- if (ret)
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int m88ts2022_get_frequency(struct dvb_frontend *fe, u32 *frequency)
-{
- struct m88ts2022_dev *dev = fe->tuner_priv;
-
- dev_dbg(&dev->client->dev, "\n");
-
- *frequency = dev->frequency_khz;
- return 0;
-}
-
-static int m88ts2022_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
-{
- struct m88ts2022_dev *dev = fe->tuner_priv;
-
- dev_dbg(&dev->client->dev, "\n");
-
- *frequency = 0; /* Zero-IF */
- return 0;
-}
-
-static int m88ts2022_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
-{
- struct m88ts2022_dev *dev = fe->tuner_priv;
- int ret;
- u16 gain, u16tmp;
- unsigned int utmp, gain1, gain2, gain3;
-
- ret = regmap_read(dev->regmap, 0x3d, &utmp);
- if (ret)
- goto err;
-
- gain1 = (utmp >> 0) & 0x1f;
- gain1 = clamp(gain1, 0U, 15U);
-
- ret = regmap_read(dev->regmap, 0x21, &utmp);
- if (ret)
- goto err;
-
- gain2 = (utmp >> 0) & 0x1f;
- gain2 = clamp(gain2, 2U, 16U);
-
- ret = regmap_read(dev->regmap, 0x66, &utmp);
- if (ret)
- goto err;
-
- gain3 = (utmp >> 3) & 0x07;
- gain3 = clamp(gain3, 0U, 6U);
-
- gain = gain1 * 265 + gain2 * 338 + gain3 * 285;
-
- /* scale value to 0x0000-0xffff */
- u16tmp = (0xffff - gain);
- u16tmp = clamp_val(u16tmp, 59000U, 61500U);
-
- *strength = (u16tmp - 59000) * 0xffff / (61500 - 59000);
-err:
- if (ret)
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static const struct dvb_tuner_ops m88ts2022_tuner_ops = {
- .info = {
- .name = "Montage M88TS2022",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- },
-
- .init = m88ts2022_init,
- .sleep = m88ts2022_sleep,
- .set_params = m88ts2022_set_params,
-
- .get_frequency = m88ts2022_get_frequency,
- .get_if_frequency = m88ts2022_get_if_frequency,
- .get_rf_strength = m88ts2022_get_rf_strength,
-};
-
-static int m88ts2022_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct m88ts2022_config *cfg = client->dev.platform_data;
- struct dvb_frontend *fe = cfg->fe;
- struct m88ts2022_dev *dev;
- int ret;
- u8 u8tmp;
- unsigned int utmp;
- static const struct regmap_config regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- };
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- dev_err(&client->dev, "kzalloc() failed\n");
- goto err;
- }
-
- memcpy(&dev->cfg, cfg, sizeof(struct m88ts2022_config));
- dev->client = client;
- dev->regmap = devm_regmap_init_i2c(client, &regmap_config);
- if (IS_ERR(dev->regmap)) {
- ret = PTR_ERR(dev->regmap);
- goto err;
- }
-
- /* check if the tuner is there */
- ret = regmap_read(dev->regmap, 0x00, &utmp);
- if (ret)
- goto err;
-
- if ((utmp & 0x03) == 0x00) {
- ret = regmap_write(dev->regmap, 0x00, 0x01);
- if (ret)
- goto err;
-
- usleep_range(2000, 50000);
- }
-
- ret = regmap_write(dev->regmap, 0x00, 0x03);
- if (ret)
- goto err;
-
- usleep_range(2000, 50000);
-
- ret = regmap_read(dev->regmap, 0x00, &utmp);
- if (ret)
- goto err;
-
- dev_dbg(&dev->client->dev, "chip_id=%02x\n", utmp);
-
- switch (utmp) {
- case 0xc3:
- case 0x83:
- break;
- default:
- ret = -ENODEV;
- goto err;
- }
-
- switch (dev->cfg.clock_out) {
- case M88TS2022_CLOCK_OUT_DISABLED:
- u8tmp = 0x60;
- break;
- case M88TS2022_CLOCK_OUT_ENABLED:
- u8tmp = 0x70;
- ret = regmap_write(dev->regmap, 0x05, dev->cfg.clock_out_div);
- if (ret)
- goto err;
- break;
- case M88TS2022_CLOCK_OUT_ENABLED_XTALOUT:
- u8tmp = 0x6c;
- break;
- default:
- ret = -EINVAL;
- goto err;
- }
-
- ret = regmap_write(dev->regmap, 0x42, u8tmp);
- if (ret)
- goto err;
-
- if (dev->cfg.loop_through)
- u8tmp = 0xec;
- else
- u8tmp = 0x6c;
-
- ret = regmap_write(dev->regmap, 0x62, u8tmp);
- if (ret)
- goto err;
-
- /* sleep */
- ret = regmap_write(dev->regmap, 0x00, 0x00);
- if (ret)
- goto err;
-
- dev_info(&dev->client->dev, "Montage M88TS2022 successfully identified\n");
-
- fe->tuner_priv = dev;
- memcpy(&fe->ops.tuner_ops, &m88ts2022_tuner_ops,
- sizeof(struct dvb_tuner_ops));
-
- i2c_set_clientdata(client, dev);
- return 0;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- kfree(dev);
- return ret;
-}
-
-static int m88ts2022_remove(struct i2c_client *client)
-{
- struct m88ts2022_dev *dev = i2c_get_clientdata(client);
- struct dvb_frontend *fe = dev->cfg.fe;
-
- dev_dbg(&client->dev, "\n");
-
- memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
- fe->tuner_priv = NULL;
- kfree(dev);
-
- return 0;
-}
-
-static const struct i2c_device_id m88ts2022_id[] = {
- {"m88ts2022", 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, m88ts2022_id);
-
-static struct i2c_driver m88ts2022_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "m88ts2022",
- },
- .probe = m88ts2022_probe,
- .remove = m88ts2022_remove,
- .id_table = m88ts2022_id,
-};
-
-module_i2c_driver(m88ts2022_driver);
-
-MODULE_DESCRIPTION("Montage M88TS2022 silicon tuner driver");
-MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/m88ts2022.h b/drivers/media/tuners/m88ts2022.h
deleted file mode 100644
index 659fa1b1633a..000000000000
--- a/drivers/media/tuners/m88ts2022.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Montage M88TS2022 silicon tuner driver
- *
- * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef M88TS2022_H
-#define M88TS2022_H
-
-#include "dvb_frontend.h"
-
-struct m88ts2022_config {
- /*
- * clock
- * 16000000 - 32000000
- */
- u32 clock;
-
- /*
- * RF loop-through
- */
- u8 loop_through:1;
-
- /*
- * clock output
- */
-#define M88TS2022_CLOCK_OUT_DISABLED 0
-#define M88TS2022_CLOCK_OUT_ENABLED 1
-#define M88TS2022_CLOCK_OUT_ENABLED_XTALOUT 2
- u8 clock_out:2;
-
- /*
- * clock output divider
- * 1 - 31
- */
- u8 clock_out_div:5;
-
- /*
- * pointer to DVB frontend
- */
- struct dvb_frontend *fe;
-};
-
-#endif
diff --git a/drivers/media/tuners/m88ts2022_priv.h b/drivers/media/tuners/m88ts2022_priv.h
deleted file mode 100644
index feeb5ad6beef..000000000000
--- a/drivers/media/tuners/m88ts2022_priv.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Montage M88TS2022 silicon tuner driver
- *
- * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef M88TS2022_PRIV_H
-#define M88TS2022_PRIV_H
-
-#include "m88ts2022.h"
-#include <linux/regmap.h>
-
-struct m88ts2022_dev {
- struct m88ts2022_config cfg;
- struct i2c_client *client;
- struct regmap *regmap;
- u32 frequency_khz;
-};
-
-struct m88ts2022_reg_val {
- u8 reg;
- u8 val;
-};
-
-#endif
diff --git a/drivers/media/tuners/max2165.h b/drivers/media/tuners/max2165.h
index 26e1dc64bb67..5054f01a78fb 100644
--- a/drivers/media/tuners/max2165.h
+++ b/drivers/media/tuners/max2165.h
@@ -32,7 +32,7 @@ struct max2165_config {
u8 osc_clk; /* in MHz, selectable values: 4,16,18,20,22,24,26,28 */
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_MAX2165)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_MAX2165)
extern struct dvb_frontend *max2165_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct max2165_config *cfg);
diff --git a/drivers/media/tuners/mc44s803.h b/drivers/media/tuners/mc44s803.h
index 9aae50aca2b7..b3e614be657d 100644
--- a/drivers/media/tuners/mc44s803.h
+++ b/drivers/media/tuners/mc44s803.h
@@ -32,7 +32,7 @@ struct mc44s803_config {
u8 dig_out;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_MC44S803)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_MC44S803)
extern struct dvb_frontend *mc44s803_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, struct mc44s803_config *cfg);
#else
diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c
index 26019e731993..74cfc3c98edb 100644
--- a/drivers/media/tuners/msi001.c
+++ b/drivers/media/tuners/msi001.c
@@ -408,7 +408,7 @@ static int msi001_s_ctrl(struct v4l2_ctrl *ctrl)
s->mixer_gain->cur.val, s->if_gain->val);
break;
default:
- dev_dbg(&s->spi->dev, "unkown control %d\n", ctrl->id);
+ dev_dbg(&s->spi->dev, "unknown control %d\n", ctrl->id);
ret = -EINVAL;
}
diff --git a/drivers/media/tuners/mt2060.h b/drivers/media/tuners/mt2060.h
index c64fc19cb278..6efed359a24f 100644
--- a/drivers/media/tuners/mt2060.h
+++ b/drivers/media/tuners/mt2060.h
@@ -30,7 +30,7 @@ struct mt2060_config {
u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2060)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_MT2060)
extern struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1);
#else
static inline struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1)
diff --git a/drivers/media/tuners/mt2063.h b/drivers/media/tuners/mt2063.h
index e1acfc8e7ae3..e55e0a6dd1be 100644
--- a/drivers/media/tuners/mt2063.h
+++ b/drivers/media/tuners/mt2063.h
@@ -8,7 +8,7 @@ struct mt2063_config {
u32 refclock;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2063)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_MT2063)
struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
struct mt2063_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/tuners/mt20xx.h b/drivers/media/tuners/mt20xx.h
index f56241ccaa00..9912362b415e 100644
--- a/drivers/media/tuners/mt20xx.h
+++ b/drivers/media/tuners/mt20xx.h
@@ -20,7 +20,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT20XX)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_MT20XX)
extern struct dvb_frontend *microtune_attach(struct dvb_frontend *fe,
struct i2c_adapter* i2c_adap,
u8 i2c_addr);
diff --git a/drivers/media/tuners/mt2131.h b/drivers/media/tuners/mt2131.h
index 837c854b9c65..8267a6ae5d84 100644
--- a/drivers/media/tuners/mt2131.h
+++ b/drivers/media/tuners/mt2131.h
@@ -30,7 +30,7 @@ struct mt2131_config {
u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2131)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_MT2131)
extern struct dvb_frontend* mt2131_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct mt2131_config *cfg,
diff --git a/drivers/media/tuners/mt2266.h b/drivers/media/tuners/mt2266.h
index fad6dd657d77..69abefa18c37 100644
--- a/drivers/media/tuners/mt2266.h
+++ b/drivers/media/tuners/mt2266.h
@@ -24,7 +24,7 @@ struct mt2266_config {
u8 i2c_address;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2266)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_MT2266)
extern struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg);
#else
static inline struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg)
diff --git a/drivers/media/tuners/mxl5005s.h b/drivers/media/tuners/mxl5005s.h
index ae8db885ad87..5764b12c5c7c 100644
--- a/drivers/media/tuners/mxl5005s.h
+++ b/drivers/media/tuners/mxl5005s.h
@@ -118,7 +118,7 @@ struct mxl5005s_config {
u8 AgcMasterByte;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_MXL5005S)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_MXL5005S)
extern struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct mxl5005s_config *config);
diff --git a/drivers/media/tuners/mxl5007t.h b/drivers/media/tuners/mxl5007t.h
index ae7037d681c5..e786d1f23ff1 100644
--- a/drivers/media/tuners/mxl5007t.h
+++ b/drivers/media/tuners/mxl5007t.h
@@ -77,7 +77,7 @@ struct mxl5007t_config {
unsigned int clk_out_enable:1;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_MXL5007T)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_MXL5007T)
extern struct dvb_frontend *mxl5007t_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 addr,
struct mxl5007t_config *cfg);
diff --git a/drivers/media/tuners/qt1010.h b/drivers/media/tuners/qt1010.h
index 8ab5d479749f..e3198f23437c 100644
--- a/drivers/media/tuners/qt1010.h
+++ b/drivers/media/tuners/qt1010.h
@@ -36,7 +36,7 @@ struct qt1010_config {
* @param cfg tuner hw based configuration
* @return fe pointer on success, NULL on failure
*/
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_QT1010)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_QT1010)
extern struct dvb_frontend *qt1010_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct qt1010_config *cfg);
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 8e040cf9cf13..71159a58860f 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -775,6 +775,19 @@ static int r820t_sysfreq_sel(struct r820t_priv *priv, u32 freq,
div_buf_cur = 0x30; /* 11, 150u */
filter_cur = 0x40; /* 10, low */
break;
+ case SYS_DVBC_ANNEX_A:
+ mixer_top = 0x24; /* mixer top:13 , top-1, low-discharge */
+ lna_top = 0xe5;
+ lna_vth_l = 0x62;
+ mixer_vth_l = 0x75;
+ air_cable1_in = 0x60;
+ cable2_in = 0x00;
+ pre_dect = 0x40;
+ lna_discharge = 14;
+ cp_cur = 0x38; /* 111, auto */
+ div_buf_cur = 0x30; /* 11, 150u */
+ filter_cur = 0x40; /* 10, low */
+ break;
default: /* DVB-T 8M */
mixer_top = 0x24; /* mixer top:13 , top-1, low-discharge */
lna_top = 0xe5; /* detect bw 3, lna top:4, predet top:2 */
@@ -957,7 +970,7 @@ static int r820t_set_tv_standard(struct r820t_priv *priv,
ext_enable = 0x40; /* r30[6], ext enable; r30[5]:0 ext at lna max */
loop_through = 0x00; /* r5[7], lt on */
lt_att = 0x00; /* r31[7], lt att enable */
- flt_ext_widest = 0x00; /* r15[7]: flt_ext_wide off */
+ flt_ext_widest = 0x80; /* r15[7]: flt_ext_wide on */
polyfil_cur = 0x60; /* r25[6:5]:min */
} else if (delsys == SYS_DVBC_ANNEX_A) {
if_khz = 5070;
@@ -971,6 +984,18 @@ static int r820t_set_tv_standard(struct r820t_priv *priv,
lt_att = 0x00; /* r31[7], lt att enable */
flt_ext_widest = 0x00; /* r15[7]: flt_ext_wide off */
polyfil_cur = 0x60; /* r25[6:5]:min */
+ } else if (delsys == SYS_DVBC_ANNEX_C) {
+ if_khz = 4063;
+ filt_cal_lo = 55000;
+ filt_gain = 0x10; /* +3db, 6mhz on */
+ img_r = 0x00; /* image negative */
+ filt_q = 0x10; /* r10[4]:low q(1'b1) */
+ hp_cor = 0x6a; /* 1.7m disable, +0cap, 1.0mhz */
+ ext_enable = 0x40; /* r30[6]=1 ext enable; r30[5]:1 ext at lna max-1 */
+ loop_through = 0x00; /* r5[7], lt on */
+ lt_att = 0x00; /* r31[7], lt att enable */
+ flt_ext_widest = 0x80; /* r15[7]: flt_ext_wide on */
+ polyfil_cur = 0x60; /* r25[6:5]:min */
} else {
if (bw <= 6) {
if_khz = 3570;
@@ -1186,7 +1211,7 @@ static int r820t_read_gain(struct r820t_priv *priv)
if (rc < 0)
return rc;
- return ((data[3] & 0x0f) << 1) + ((data[3] & 0xf0) >> 4);
+ return ((data[3] & 0x08) << 1) + ((data[3] & 0xf0) >> 4);
}
#if 0
diff --git a/drivers/media/tuners/r820t.h b/drivers/media/tuners/r820t.h
index 48af3548027d..b1e5661af1c7 100644
--- a/drivers/media/tuners/r820t.h
+++ b/drivers/media/tuners/r820t.h
@@ -42,7 +42,7 @@ struct r820t_config {
bool use_predetect;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_R820T)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_R820T)
struct dvb_frontend *r820t_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
const struct r820t_config *cfg);
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index fcf139dfdec6..d74ae26621ca 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -244,6 +244,7 @@ static int si2157_set_params(struct dvb_frontend *fe)
int ret;
struct si2157_cmd cmd;
u8 bandwidth, delivery_system;
+ u32 if_frequency = 5000000;
dev_dbg(&client->dev,
"delivery_system=%d frequency=%u bandwidth_hz=%u\n",
@@ -266,9 +267,11 @@ static int si2157_set_params(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_ATSC:
delivery_system = 0x00;
+ if_frequency = 3250000;
break;
case SYS_DVBC_ANNEX_B:
delivery_system = 0x10;
+ if_frequency = 4000000;
break;
case SYS_DVBT:
case SYS_DVBT2: /* it seems DVB-T and DVB-T2 both are 0x20 here */
@@ -302,6 +305,20 @@ static int si2157_set_params(struct dvb_frontend *fe)
if (ret)
goto err;
+ /* set if frequency if needed */
+ if (if_frequency != dev->if_frequency) {
+ memcpy(cmd.args, "\x14\x00\x06\x07", 4);
+ cmd.args[4] = (if_frequency / 1000) & 0xff;
+ cmd.args[5] = ((if_frequency / 1000) >> 8) & 0xff;
+ cmd.wlen = 6;
+ cmd.rlen = 4;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ dev->if_frequency = if_frequency;
+ }
+
/* set frequency */
memcpy(cmd.args, "\x41\x00\x00\x00\x00\x00\x00\x00", 8);
cmd.args[4] = (c->frequency >> 0) & 0xff;
@@ -322,14 +339,17 @@ err:
static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
- *frequency = 5000000; /* default value of property 0x0706 */
+ struct i2c_client *client = fe->tuner_priv;
+ struct si2157_dev *dev = i2c_get_clientdata(client);
+
+ *frequency = dev->if_frequency;
return 0;
}
static const struct dvb_tuner_ops si2157_ops = {
.info = {
.name = "Silicon Labs Si2146/2147/2148/2157/2158",
- .frequency_min = 110000000,
+ .frequency_min = 55000000,
.frequency_max = 862000000,
},
@@ -360,6 +380,7 @@ static int si2157_probe(struct i2c_client *client,
dev->inversion = cfg->inversion;
dev->fw_loaded = false;
dev->chiptype = (u8)id->driver_data;
+ dev->if_frequency = 5000000; /* default value of property 0x0706 */
mutex_init(&dev->i2c_mutex);
/* check if the tuner is there */
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 7aa53bce5593..cd8fa5b25304 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -28,6 +28,7 @@ struct si2157_dev {
bool fw_loaded;
bool inversion;
u8 chiptype;
+ u32 if_frequency;
};
#define SI2157_CHIPTYPE_SI2157 0
diff --git a/drivers/media/tuners/tda18218.h b/drivers/media/tuners/tda18218.h
index 366410e0cc9a..1eacb4f84e93 100644
--- a/drivers/media/tuners/tda18218.h
+++ b/drivers/media/tuners/tda18218.h
@@ -30,7 +30,7 @@ struct tda18218_config {
u8 loop_through:1;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA18218)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_TDA18218)
extern struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, struct tda18218_config *cfg);
#else
diff --git a/drivers/media/tuners/tda18271.h b/drivers/media/tuners/tda18271.h
index 4c418d63f540..0a846333ce57 100644
--- a/drivers/media/tuners/tda18271.h
+++ b/drivers/media/tuners/tda18271.h
@@ -121,7 +121,7 @@ enum tda18271_mode {
TDA18271_DIGITAL,
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA18271)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_TDA18271)
extern struct dvb_frontend *tda18271_attach(struct dvb_frontend *fe, u8 addr,
struct i2c_adapter *i2c,
struct tda18271_config *cfg);
diff --git a/drivers/media/tuners/tda827x.h b/drivers/media/tuners/tda827x.h
index b64292152baf..abf2e2fe5350 100644
--- a/drivers/media/tuners/tda827x.h
+++ b/drivers/media/tuners/tda827x.h
@@ -51,7 +51,7 @@ struct tda827x_config
* @param cfg optional callback function pointers.
* @return FE pointer on success, NULL on failure.
*/
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA827X)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_TDA827X)
extern struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c,
struct tda827x_config *cfg);
diff --git a/drivers/media/tuners/tda8290.h b/drivers/media/tuners/tda8290.h
index cf96e585785e..901b8cac7105 100644
--- a/drivers/media/tuners/tda8290.h
+++ b/drivers/media/tuners/tda8290.h
@@ -38,7 +38,7 @@ struct tda829x_config {
struct tda18271_std_map *tda18271_std_map;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA8290)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_TDA8290)
extern int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr);
extern struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/tda9887.h b/drivers/media/tuners/tda9887.h
index 37a4a1123e0c..95070eca02ca 100644
--- a/drivers/media/tuners/tda9887.h
+++ b/drivers/media/tuners/tda9887.h
@@ -21,7 +21,7 @@
#include "dvb_frontend.h"
/* ------------------------------------------------------------------------ */
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA9887)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_TDA9887)
extern struct dvb_frontend *tda9887_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c_adap,
u8 i2c_addr);
diff --git a/drivers/media/tuners/tea5761.h b/drivers/media/tuners/tea5761.h
index 933228ffb509..2d624d9919e3 100644
--- a/drivers/media/tuners/tea5761.h
+++ b/drivers/media/tuners/tea5761.h
@@ -20,7 +20,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_TEA5761)
extern int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
extern struct dvb_frontend *tea5761_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/tea5767.h b/drivers/media/tuners/tea5767.h
index c39101199383..4f6f6c92db78 100644
--- a/drivers/media/tuners/tea5767.h
+++ b/drivers/media/tuners/tea5767.h
@@ -39,7 +39,7 @@ struct tea5767_ctrl {
enum tea5767_xtal xtal_freq;
};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5767)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_TEA5767)
extern int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
extern struct dvb_frontend *tea5767_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/tua9001.h b/drivers/media/tuners/tua9001.h
index 26358da1c100..2c3375c7aeb9 100644
--- a/drivers/media/tuners/tua9001.h
+++ b/drivers/media/tuners/tua9001.h
@@ -51,7 +51,7 @@ struct tua9001_config {
#define TUA9001_CMD_RESETN 1
#define TUA9001_CMD_RXEN 2
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TUA9001)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_TUA9001)
extern struct dvb_frontend *tua9001_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, struct tua9001_config *cfg);
#else
diff --git a/drivers/media/tuners/tuner-simple.h b/drivers/media/tuners/tuner-simple.h
index ffd12cfe650b..6399b45b0590 100644
--- a/drivers/media/tuners/tuner-simple.h
+++ b/drivers/media/tuners/tuner-simple.h
@@ -20,7 +20,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_SIMPLE)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_SIMPLE)
extern struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c_adap,
u8 i2c_addr,
diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h
index 181d087faec4..98e4effca896 100644
--- a/drivers/media/tuners/tuner-xc2028.h
+++ b/drivers/media/tuners/tuner-xc2028.h
@@ -56,7 +56,7 @@ struct xc2028_config {
#define XC2028_RESET_CLK 1
#define XC2028_I2C_FLUSH 2
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_XC2028)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_XC2028)
extern struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe,
struct xc2028_config *cfg);
#else
diff --git a/drivers/media/tuners/xc4000.h b/drivers/media/tuners/xc4000.h
index 97c23de5296c..40517860cf67 100644
--- a/drivers/media/tuners/xc4000.h
+++ b/drivers/media/tuners/xc4000.h
@@ -50,7 +50,7 @@ struct xc4000_config {
* it's passed back to a bridge during tuner_callback().
*/
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_XC4000)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_XC4000)
extern struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct xc4000_config *cfg);
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 2a039de8ab9a..e6e5e90d8d95 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1336,7 +1336,10 @@ static int xc5000_release(struct dvb_frontend *fe)
if (priv) {
cancel_delayed_work(&priv->timer_sleep);
- release_firmware(priv->firmware);
+ if (priv->firmware) {
+ release_firmware(priv->firmware);
+ priv->firmware = NULL;
+ }
hybrid_tuner_release_state(priv);
}
diff --git a/drivers/media/tuners/xc5000.h b/drivers/media/tuners/xc5000.h
index 6aa534f17a30..00ba29e21fb9 100644
--- a/drivers/media/tuners/xc5000.h
+++ b/drivers/media/tuners/xc5000.h
@@ -58,7 +58,7 @@ struct xc5000_config {
* it's passed back to a bridge during tuner_callback().
*/
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_XC5000)
+#if IS_REACHABLE(CONFIG_MEDIA_TUNER_XC5000)
extern struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
const struct xc5000_config *cfg);
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index a27cb5fcdef8..1a362a041ab3 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -299,29 +299,23 @@ static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
* Announces that a buffer were filled and request the next
*/
static inline void buffer_filled(struct au0828_dev *dev,
- struct au0828_dmaqueue *dma_q,
- struct au0828_buffer *buf)
+ struct au0828_dmaqueue *dma_q,
+ struct au0828_buffer *buf)
{
- /* Advice that buffer was filled */
- au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
+ struct vb2_buffer *vb = &buf->vb;
+ struct vb2_queue *q = vb->vb2_queue;
- buf->vb.v4l2_buf.sequence = dev->frame_count++;
- buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
-}
-
-static inline void vbi_buffer_filled(struct au0828_dev *dev,
- struct au0828_dmaqueue *dma_q,
- struct au0828_buffer *buf)
-{
/* Advice that buffer was filled */
au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
- buf->vb.v4l2_buf.sequence = dev->vbi_frame_count++;
- buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ vb->v4l2_buf.sequence = dev->frame_count++;
+ else
+ vb->v4l2_buf.sequence = dev->vbi_frame_count++;
+
+ vb->v4l2_buf.field = V4L2_FIELD_INTERLACED;
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
}
/*
@@ -574,9 +568,7 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (fbyte & 0x40) {
/* VBI */
if (vbi_buf != NULL)
- vbi_buffer_filled(dev,
- vbi_dma_q,
- vbi_buf);
+ buffer_filled(dev, vbi_dma_q, vbi_buf);
vbi_get_next_buf(vbi_dma_q, &vbi_buf);
if (vbi_buf == NULL)
vbioutp = NULL;
@@ -899,12 +891,8 @@ void au0828_analog_unregister(struct au0828_dev *dev)
{
dprintk(1, "au0828_analog_unregister called\n");
mutex_lock(&au0828_sysfs_lock);
-
- if (dev->vdev)
- video_unregister_device(dev->vdev);
- if (dev->vbi_dev)
- video_unregister_device(dev->vbi_dev);
-
+ video_unregister_device(&dev->vdev);
+ video_unregister_device(&dev->vbi_dev);
mutex_unlock(&au0828_sysfs_lock);
}
@@ -949,7 +937,7 @@ static void au0828_vbi_buffer_timeout(unsigned long data)
if (buf != NULL) {
vbi_data = vb2_plane_vaddr(&buf->vb, 0);
memset(vbi_data, 0x00, buf->length);
- vbi_buffer_filled(dev, dma_q, buf);
+ buffer_filled(dev, dma_q, buf);
}
vbi_get_next_buf(dma_q, &buf);
@@ -1286,7 +1274,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
input->audioset = 2;
}
- input->std = dev->vdev->tvnorms;
+ input->std = dev->vdev.tvnorms;
return 0;
}
@@ -1704,7 +1692,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
static const struct video_device au0828_video_template = {
.fops = &au0828_v4l_fops,
- .release = video_device_release,
+ .release = video_device_release_empty,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_M,
};
@@ -1814,52 +1802,36 @@ int au0828_analog_register(struct au0828_dev *dev,
dev->std = V4L2_STD_NTSC_M;
au0828_s_input(dev, 0);
- /* allocate and fill v4l2 video struct */
- dev->vdev = video_device_alloc();
- if (NULL == dev->vdev) {
- dprintk(1, "Can't allocate video_device.\n");
- return -ENOMEM;
- }
-
- /* allocate the VBI struct */
- dev->vbi_dev = video_device_alloc();
- if (NULL == dev->vbi_dev) {
- dprintk(1, "Can't allocate vbi_device.\n");
- ret = -ENOMEM;
- goto err_vdev;
- }
-
mutex_init(&dev->vb_queue_lock);
mutex_init(&dev->vb_vbi_queue_lock);
/* Fill the video capture device struct */
- *dev->vdev = au0828_video_template;
- dev->vdev->v4l2_dev = &dev->v4l2_dev;
- dev->vdev->lock = &dev->lock;
- dev->vdev->queue = &dev->vb_vidq;
- dev->vdev->queue->lock = &dev->vb_queue_lock;
- strcpy(dev->vdev->name, "au0828a video");
+ dev->vdev = au0828_video_template;
+ dev->vdev.v4l2_dev = &dev->v4l2_dev;
+ dev->vdev.lock = &dev->lock;
+ dev->vdev.queue = &dev->vb_vidq;
+ dev->vdev.queue->lock = &dev->vb_queue_lock;
+ strcpy(dev->vdev.name, "au0828a video");
/* Setup the VBI device */
- *dev->vbi_dev = au0828_video_template;
- dev->vbi_dev->v4l2_dev = &dev->v4l2_dev;
- dev->vbi_dev->lock = &dev->lock;
- dev->vbi_dev->queue = &dev->vb_vbiq;
- dev->vbi_dev->queue->lock = &dev->vb_vbi_queue_lock;
- strcpy(dev->vbi_dev->name, "au0828a vbi");
+ dev->vbi_dev = au0828_video_template;
+ dev->vbi_dev.v4l2_dev = &dev->v4l2_dev;
+ dev->vbi_dev.lock = &dev->lock;
+ dev->vbi_dev.queue = &dev->vb_vbiq;
+ dev->vbi_dev.queue->lock = &dev->vb_vbi_queue_lock;
+ strcpy(dev->vbi_dev.name, "au0828a vbi");
/* initialize videobuf2 stuff */
retval = au0828_vb2_setup(dev);
if (retval != 0) {
dprintk(1, "unable to setup videobuf2 queues (error = %d).\n",
retval);
- ret = -ENODEV;
- goto err_vbi_dev;
+ return -ENODEV;
}
/* Register the v4l2 device */
- video_set_drvdata(dev->vdev, dev);
- retval = video_register_device(dev->vdev, VFL_TYPE_GRABBER, -1);
+ video_set_drvdata(&dev->vdev, dev);
+ retval = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1);
if (retval != 0) {
dprintk(1, "unable to register video device (error = %d).\n",
retval);
@@ -1868,8 +1840,8 @@ int au0828_analog_register(struct au0828_dev *dev,
}
/* Register the vbi device */
- video_set_drvdata(dev->vbi_dev, dev);
- retval = video_register_device(dev->vbi_dev, VFL_TYPE_VBI, -1);
+ video_set_drvdata(&dev->vbi_dev, dev);
+ retval = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI, -1);
if (retval != 0) {
dprintk(1, "unable to register vbi device (error = %d).\n",
retval);
@@ -1882,14 +1854,10 @@ int au0828_analog_register(struct au0828_dev *dev,
return 0;
err_reg_vbi_dev:
- video_unregister_device(dev->vdev);
+ video_unregister_device(&dev->vdev);
err_reg_vdev:
vb2_queue_release(&dev->vb_vidq);
vb2_queue_release(&dev->vb_vbiq);
-err_vbi_dev:
- video_device_release(dev->vbi_dev);
-err_vdev:
- video_device_release(dev->vdev);
return ret;
}
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index eb1518742ae6..3b480005ce3b 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -209,8 +209,8 @@ struct au0828_dev {
struct au0828_rc *ir;
#endif
- struct video_device *vdev;
- struct video_device *vbi_dev;
+ struct video_device vdev;
+ struct video_device vbi_dev;
/* Videobuf2 */
struct vb2_queue vb_vidq;
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 173c0e287a08..0cced3e5b040 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -47,6 +47,7 @@ config VIDEO_CX231XX_DVB
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LGDT3306A if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI2165 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 3f295b4d1a3d..983ea8339154 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1868,13 +1868,9 @@ void cx231xx_417_unregister(struct cx231xx *dev)
dprintk(1, "%s()\n", __func__);
dprintk(3, "%s()\n", __func__);
- if (dev->v4l_device) {
- if (-1 != dev->v4l_device->minor)
- video_unregister_device(dev->v4l_device);
- else
- video_device_release(dev->v4l_device);
+ if (video_is_registered(&dev->v4l_device)) {
+ video_unregister_device(&dev->v4l_device);
v4l2_ctrl_handler_free(&dev->mpeg_ctrl_handler.hdl);
- dev->v4l_device = NULL;
}
}
@@ -1911,25 +1907,21 @@ static struct cx2341x_handler_ops cx231xx_ops = {
.s_video_encoding = cx231xx_s_video_encoding,
};
-static struct video_device *cx231xx_video_dev_alloc(
+static void cx231xx_video_dev_init(
struct cx231xx *dev,
struct usb_device *usbdev,
- struct video_device *template,
- char *type)
+ struct video_device *vfd,
+ const struct video_device *template,
+ const char *type)
{
- struct video_device *vfd;
-
dprintk(1, "%s()\n", __func__);
- vfd = video_device_alloc();
- if (NULL == vfd)
- return NULL;
*vfd = *template;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
type, cx231xx_boards[dev->model].name);
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->lock = &dev->lock;
- vfd->release = video_device_release;
+ vfd->release = video_device_release_empty;
vfd->ctrl_handler = &dev->mpeg_ctrl_handler.hdl;
video_set_drvdata(vfd, dev);
if (dev->tuner_type == TUNER_ABSENT) {
@@ -1938,9 +1930,6 @@ static struct video_device *cx231xx_video_dev_alloc(
v4l2_disable_ioctl(vfd, VIDIOC_G_TUNER);
v4l2_disable_ioctl(vfd, VIDIOC_S_TUNER);
}
-
- return vfd;
-
}
int cx231xx_417_register(struct cx231xx *dev)
@@ -1983,9 +1972,9 @@ int cx231xx_417_register(struct cx231xx *dev)
cx2341x_handler_set_50hz(&dev->mpeg_ctrl_handler, false);
/* Allocate and initialize V4L video device */
- dev->v4l_device = cx231xx_video_dev_alloc(dev,
- dev->udev, &cx231xx_mpeg_template, "mpeg");
- err = video_register_device(dev->v4l_device,
+ cx231xx_video_dev_init(dev, dev->udev,
+ &dev->v4l_device, &cx231xx_mpeg_template, "mpeg");
+ err = video_register_device(&dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
dprintk(3, "%s: can't register mpeg device\n", dev->name);
@@ -1994,7 +1983,7 @@ int cx231xx_417_register(struct cx231xx *dev)
}
dprintk(3, "%s: registered device video%d [mpeg]\n",
- dev->name, dev->v4l_device->num);
+ dev->name, dev->v4l_device.num);
return 0;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index da03733690bd..fe00da105e77 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -776,6 +776,45 @@ struct cx231xx_board cx231xx_boards[] = {
.gpio = NULL,
} },
},
+ [CX231XX_BOARD_HAUPPAUGE_955Q] = {
+ .name = "Hauppauge WinTV-HVR-955Q (111401)",
+ .tuner_type = TUNER_ABSENT,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .demod_i2c_master = I2C_2,
+ .has_dvb = 1,
+ .demod_addr = 0x0e,
+ .norm = V4L2_STD_NTSC,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ } },
+ },
};
const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
@@ -805,6 +844,8 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC},
{USB_DEVICE(0x2040, 0xb120),
.driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
+ {USB_DEVICE(0x2040, 0xb123),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_955Q},
{USB_DEVICE(0x2040, 0xb130),
.driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx},
{USB_DEVICE(0x2040, 0xb131),
@@ -912,9 +953,6 @@ static inline void cx231xx_set_model(struct cx231xx *dev)
*/
void cx231xx_pre_card_setup(struct cx231xx *dev)
{
-
- cx231xx_set_model(dev);
-
dev_info(dev->dev, "Identified as %s (card=%d)\n",
dev->board.name, dev->model);
@@ -1052,6 +1090,7 @@ void cx231xx_card_setup(struct cx231xx *dev)
switch (dev->model) {
case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx:
case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
+ case CX231XX_BOARD_HAUPPAUGE_955Q:
{
struct tveeprom tvee;
static u8 eeprom[256];
@@ -1092,6 +1131,17 @@ void cx231xx_config_i2c(struct cx231xx *dev)
call_all(dev, video, s_stream, 1);
}
+static void cx231xx_unregister_media_device(struct cx231xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ if (dev->media_dev) {
+ media_device_unregister(dev->media_dev);
+ kfree(dev->media_dev);
+ dev->media_dev = NULL;
+ }
+#endif
+}
+
/*
* cx231xx_realease_resources()
* unregisters the v4l2,i2c and usb devices
@@ -1099,6 +1149,8 @@ void cx231xx_config_i2c(struct cx231xx *dev)
*/
void cx231xx_release_resources(struct cx231xx *dev)
{
+ cx231xx_unregister_media_device(dev);
+
cx231xx_release_analog_resources(dev);
cx231xx_remove_from_devlist(dev);
@@ -1117,6 +1169,74 @@ void cx231xx_release_resources(struct cx231xx *dev)
clear_bit(dev->devno, &cx231xx_devused);
}
+static void cx231xx_media_device_register(struct cx231xx *dev,
+ struct usb_device *udev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device *mdev;
+ int ret;
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return;
+
+ mdev->dev = dev->dev;
+ strlcpy(mdev->model, dev->board.name, sizeof(mdev->model));
+ if (udev->serial)
+ strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
+ strcpy(mdev->bus_info, udev->devpath);
+ mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ mdev->driver_version = LINUX_VERSION_CODE;
+
+ ret = media_device_register(mdev);
+ if (ret) {
+ dev_err(dev->dev,
+ "Couldn't create a media device. Error: %d\n",
+ ret);
+ kfree(mdev);
+ return;
+ }
+
+ dev->media_dev = mdev;
+#endif
+}
+
+static void cx231xx_create_media_graph(struct cx231xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device *mdev = dev->media_dev;
+ struct media_entity *entity;
+ struct media_entity *tuner = NULL, *decoder = NULL;
+
+ if (!mdev)
+ return;
+
+ media_device_for_each_entity(entity, mdev) {
+ switch (entity->type) {
+ case MEDIA_ENT_T_V4L2_SUBDEV_TUNER:
+ tuner = entity;
+ break;
+ case MEDIA_ENT_T_V4L2_SUBDEV_DECODER:
+ decoder = entity;
+ break;
+ }
+ }
+
+ /* Analog setup, using tuner as a link */
+
+ if (!decoder)
+ return;
+
+ if (tuner)
+ media_entity_create_link(tuner, 0, decoder, 0,
+ MEDIA_LNK_FL_ENABLED);
+ media_entity_create_link(decoder, 1, &dev->vdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED);
+ media_entity_create_link(decoder, 2, &dev->vbi_dev.entity, 0,
+ MEDIA_LNK_FL_ENABLED);
+#endif
+}
+
/*
* cx231xx_init_dev()
* allocates and inits the device structs, registers i2c bus and v4l device
@@ -1225,10 +1345,8 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
}
retval = cx231xx_register_analog_devices(dev);
- if (retval) {
- cx231xx_release_analog_resources(dev);
+ if (retval)
goto err_analog;
- }
cx231xx_ir_init(dev);
@@ -1236,6 +1354,8 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
return 0;
err_analog:
+ cx231xx_unregister_media_device(dev);
+ cx231xx_release_analog_resources(dev);
cx231xx_remove_from_devlist(dev);
err_dev_init:
cx231xx_dev_uninit(dev);
@@ -1438,6 +1558,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
dev->video_mode.alt = -1;
dev->dev = d;
+ cx231xx_set_model(dev);
+
dev->interface_count++;
/* reset gpio dir and value */
dev->gpio_dir = 0;
@@ -1502,7 +1624,13 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
+ /* Register the media controller */
+ cx231xx_media_device_register(dev, udev);
+
/* Create v4l2 device */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->v4l2_dev.mdev = dev->media_dev;
+#endif
retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
if (retval) {
dev_err(d, "v4l2_device_register failed\n");
@@ -1568,6 +1696,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* load other modules required */
request_modules(dev);
+ cx231xx_create_media_graph(dev);
+
return 0;
err_video_alt:
/* cx231xx_uninit_dev: */
@@ -1618,7 +1748,7 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
if (dev->users) {
dev_warn(dev->dev,
"device %s is open! Deregistration and memory deallocation are deferred on close.\n",
- video_device_node_name(dev->vdev));
+ video_device_node_name(&dev->vdev));
/* Even having users, it is safe to remove the RC i2c driver */
cx231xx_ir_exit(dev);
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 4a3f28c4e8d3..e42bde081cd7 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -176,16 +176,9 @@ int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus,
saddr_len = req_data->saddr_len;
/* Set wValue */
- if (saddr_len == 1) /* need check saddr_len == 0 */
- ven_req.wValue =
- req_data->
- dev_addr << 9 | _i2c_period << 4 | saddr_len << 2 |
- _i2c_nostop << 1 | I2C_SYNC | _i2c_reserve << 6;
- else
- ven_req.wValue =
- req_data->
- dev_addr << 9 | _i2c_period << 4 | saddr_len << 2 |
- _i2c_nostop << 1 | I2C_SYNC | _i2c_reserve << 6;
+ ven_req.wValue = (req_data->dev_addr << 9 | _i2c_period << 4 |
+ saddr_len << 2 | _i2c_nostop << 1 | I2C_SYNC |
+ _i2c_reserve << 6);
/* set channel number */
if (req_data->direction & I2C_M_RD) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index dd600b994e69..610d5675bde6 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -34,6 +34,7 @@
#include "si2165.h"
#include "mb86a20s.h"
#include "si2157.h"
+#include "lgdt3306a.h"
MODULE_DESCRIPTION("driver for cx231xx based DVB cards");
MODULE_AUTHOR("Srinivasa Deevi <srinivasa.deevi@conexant.com>");
@@ -160,6 +161,18 @@ static const struct si2165_config pctv_quatro_stick_1114xx_si2165_config = {
.ref_freq_Hz = 24000000,
};
+static struct lgdt3306a_config hauppauge_955q_lgdt3306a_config = {
+ .i2c_addr = 0x59,
+ .qam_if_khz = 4000,
+ .vsb_if_khz = 3250,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 1,
+ .mpeg_mode = LGDT3306A_MPEG_SERIAL,
+ .tpclk_edge = LGDT3306A_TPCLK_RISING_EDGE,
+ .tpvalid_polarity = LGDT3306A_TP_VALID_HIGH,
+ .xtalMHz = 25,
+};
+
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
{
char *errmsg = "Unknown";
@@ -455,6 +468,7 @@ static int register_dvb(struct cx231xx_dvb *dvb,
mutex_init(&dvb->lock);
+
/* register adapter */
result = dvb_register_adapter(&dvb->adapter, dev->name, module, device,
adapter_nr);
@@ -464,6 +478,7 @@ static int register_dvb(struct cx231xx_dvb *dvb,
dev->name, result);
goto fail_adapter;
}
+ dvb_register_media_controller(&dvb->adapter, dev->media_dev);
/* Ensure all frontends negotiate bus access */
dvb->frontend->ops.ts_bus_ctrl = cx231xx_dvb_bus_ctrl;
@@ -536,6 +551,8 @@ static int register_dvb(struct cx231xx_dvb *dvb,
/* register network adapter */
dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
+ dvb_create_media_graph(&dvb->adapter);
+
return 0;
fail_fe_conn:
@@ -807,7 +824,61 @@ static int dvb_init(struct cx231xx *dev)
dev->dvb->i2c_client_tuner = client;
break;
}
+ case CX231XX_BOARD_HAUPPAUGE_955Q:
+ {
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ struct si2157_config si2157_config;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+
+ dev->dvb->frontend = dvb_attach(lgdt3306a_attach,
+ &hauppauge_955q_lgdt3306a_config,
+ tuner_i2c
+ );
+
+ if (dev->dvb->frontend == NULL) {
+ dev_err(dev->dev,
+ "Failed to attach LGDT3306A frontend.\n");
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ dev->dvb->frontend->ops.i2c_gate_ctrl = NULL;
+
+ /* define general-purpose callback pointer */
+ dvb->frontend->callback = cx231xx_tuner_callback;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = dev->dvb->frontend;
+ si2157_config.inversion = true;
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module("si2157");
+ client = i2c_new_device(
+ tuner_i2c,
+ &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ dvb_frontend_detach(dev->dvb->frontend);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ dvb_frontend_detach(dev->dvb->frontend);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dev->cx231xx_reset_analog_tuner = NULL;
+
+ dev->dvb->i2c_client_tuner = client;
+ break;
+ }
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
case CX231XX_BOARD_KWORLD_UB430_USB_HYBRID:
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index ecea76fe07f6..c261e160c158 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -100,6 +100,75 @@ static struct cx231xx_fmt format[] = {
};
+static int cx231xx_enable_analog_tuner(struct cx231xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device *mdev = dev->media_dev;
+ struct media_entity *entity, *decoder = NULL, *source;
+ struct media_link *link, *found_link = NULL;
+ int i, ret, active_links = 0;
+
+ if (!mdev)
+ return 0;
+
+ /*
+ * This will find the tuner that is connected into the decoder.
+ * Technically, this is not 100% correct, as the device may be
+ * using an analog input instead of the tuner. However, as we can't
+ * do DVB streaming while the DMA engine is being used for V4L2,
+ * this should be enough for the actual needs.
+ */
+ media_device_for_each_entity(entity, mdev) {
+ if (entity->type == MEDIA_ENT_T_V4L2_SUBDEV_DECODER) {
+ decoder = entity;
+ break;
+ }
+ }
+ if (!decoder)
+ return 0;
+
+ for (i = 0; i < decoder->num_links; i++) {
+ link = &decoder->links[i];
+ if (link->sink->entity == decoder) {
+ found_link = link;
+ if (link->flags & MEDIA_LNK_FL_ENABLED)
+ active_links++;
+ break;
+ }
+ }
+
+ if (active_links == 1 || !found_link)
+ return 0;
+
+ source = found_link->source->entity;
+ for (i = 0; i < source->num_links; i++) {
+ struct media_entity *sink;
+ int flags = 0;
+
+ link = &source->links[i];
+ sink = link->sink->entity;
+
+ if (sink == entity)
+ flags = MEDIA_LNK_FL_ENABLED;
+
+ ret = media_entity_setup_link(link, flags);
+ if (ret) {
+ dev_err(dev->dev,
+ "Couldn't change link %s->%s to %s. Error %d\n",
+ source->name, sink->name,
+ flags ? "enabled" : "disabled",
+ ret);
+ return ret;
+ } else
+ dev_dbg(dev->dev,
+ "link %s->%s was %s\n",
+ source->name, sink->name,
+ flags ? "ENABLED" : "disabled");
+ }
+#endif
+ return 0;
+}
+
/* ------------------------------------------------------------------
Video buffer and parser functions
------------------------------------------------------------------*/
@@ -667,6 +736,9 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
if (*count < CX231XX_MIN_BUF)
*count = CX231XX_MIN_BUF;
+
+ cx231xx_enable_analog_tuner(dev);
+
return 0;
}
@@ -756,6 +828,7 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
}
buf->vb.state = VIDEOBUF_PREPARED;
+
return 0;
fail:
@@ -1056,7 +1129,7 @@ int cx231xx_enum_input(struct file *file, void *priv,
(CX231XX_VMUX_CABLE == INPUT(n)->type))
i->type = V4L2_INPUT_TYPE_TUNER;
- i->std = dev->vdev->tvnorms;
+ i->std = dev->vdev.tvnorms;
/* If they are asking about the active input, read signal status */
if (n == dev->video_input) {
@@ -1451,7 +1524,7 @@ int cx231xx_querycap(struct file *file, void *priv,
cap->capabilities = cap->device_caps | V4L2_CAP_READWRITE |
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
- if (dev->radio_dev)
+ if (video_is_registered(&dev->radio_dev))
cap->capabilities |= V4L2_CAP_RADIO;
return 0;
@@ -1729,34 +1802,21 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
/*FIXME: I2C IR should be disconnected */
- if (dev->radio_dev) {
- if (video_is_registered(dev->radio_dev))
- video_unregister_device(dev->radio_dev);
- else
- video_device_release(dev->radio_dev);
- dev->radio_dev = NULL;
- }
- if (dev->vbi_dev) {
+ if (video_is_registered(&dev->radio_dev))
+ video_unregister_device(&dev->radio_dev);
+ if (video_is_registered(&dev->vbi_dev)) {
dev_info(dev->dev, "V4L2 device %s deregistered\n",
- video_device_node_name(dev->vbi_dev));
- if (video_is_registered(dev->vbi_dev))
- video_unregister_device(dev->vbi_dev);
- else
- video_device_release(dev->vbi_dev);
- dev->vbi_dev = NULL;
+ video_device_node_name(&dev->vbi_dev));
+ video_unregister_device(&dev->vbi_dev);
}
- if (dev->vdev) {
+ if (video_is_registered(&dev->vdev)) {
dev_info(dev->dev, "V4L2 device %s deregistered\n",
- video_device_node_name(dev->vdev));
+ video_device_node_name(&dev->vdev));
if (dev->board.has_417)
cx231xx_417_unregister(dev);
- if (video_is_registered(dev->vdev))
- video_unregister_device(dev->vdev);
- else
- video_device_release(dev->vdev);
- dev->vdev = NULL;
+ video_unregister_device(&dev->vdev);
}
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_ctrl_handler_free(&dev->radio_ctrl_handler);
@@ -2013,7 +2073,7 @@ static struct video_device cx231xx_vbi_template;
static const struct video_device cx231xx_video_template = {
.fops = &cx231xx_v4l_fops,
- .release = video_device_release,
+ .release = video_device_release_empty,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = V4L2_STD_ALL,
};
@@ -2049,19 +2109,14 @@ static struct video_device cx231xx_radio_template = {
/******************************** usb interface ******************************/
-static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
- const struct video_device
- *template, const char *type_name)
+static void cx231xx_vdev_init(struct cx231xx *dev,
+ struct video_device *vfd,
+ const struct video_device *template,
+ const char *type_name)
{
- struct video_device *vfd;
-
- vfd = video_device_alloc();
- if (NULL == vfd)
- return NULL;
-
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
- vfd->release = video_device_release;
+ vfd->release = video_device_release_empty;
vfd->lock = &dev->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
@@ -2073,7 +2128,6 @@ static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
v4l2_disable_ioctl(vfd, VIDIOC_G_TUNER);
v4l2_disable_ioctl(vfd, VIDIOC_S_TUNER);
}
- return vfd;
}
int cx231xx_register_analog_devices(struct cx231xx *dev)
@@ -2116,15 +2170,16 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
/* write code here... */
/* allocate and fill video video_device struct */
- dev->vdev = cx231xx_vdev_init(dev, &cx231xx_video_template, "video");
- if (!dev->vdev) {
- dev_err(dev->dev, "cannot allocate video_device.\n");
- return -ENODEV;
- }
-
- dev->vdev->ctrl_handler = &dev->ctrl_handler;
+ cx231xx_vdev_init(dev, &dev->vdev, &cx231xx_video_template, "video");
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ dev->video_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&dev->vdev.entity, 1, &dev->video_pad, 0);
+ if (ret < 0)
+ dev_err(dev->dev, "failed to initialize video media entity!\n");
+#endif
+ dev->vdev.ctrl_handler = &dev->ctrl_handler;
/* register v4l2 video video_device */
- ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER,
+ ret = video_register_device(&dev->vdev, VFL_TYPE_GRABBER,
video_nr[dev->devno]);
if (ret) {
dev_err(dev->dev,
@@ -2134,22 +2189,24 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
}
dev_info(dev->dev, "Registered video device %s [v4l2]\n",
- video_device_node_name(dev->vdev));
+ video_device_node_name(&dev->vdev));
/* Initialize VBI template */
cx231xx_vbi_template = cx231xx_video_template;
strcpy(cx231xx_vbi_template.name, "cx231xx-vbi");
/* Allocate and fill vbi video_device struct */
- dev->vbi_dev = cx231xx_vdev_init(dev, &cx231xx_vbi_template, "vbi");
+ cx231xx_vdev_init(dev, &dev->vbi_dev, &cx231xx_vbi_template, "vbi");
- if (!dev->vbi_dev) {
- dev_err(dev->dev, "cannot allocate video_device.\n");
- return -ENODEV;
- }
- dev->vbi_dev->ctrl_handler = &dev->ctrl_handler;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ dev->vbi_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&dev->vbi_dev.entity, 1, &dev->vbi_pad, 0);
+ if (ret < 0)
+ dev_err(dev->dev, "failed to initialize vbi media entity!\n");
+#endif
+ dev->vbi_dev.ctrl_handler = &dev->ctrl_handler;
/* register v4l2 vbi video_device */
- ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
+ ret = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->devno]);
if (ret < 0) {
dev_err(dev->dev, "unable to register vbi device\n");
@@ -2157,18 +2214,13 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
}
dev_info(dev->dev, "Registered VBI device %s\n",
- video_device_node_name(dev->vbi_dev));
+ video_device_node_name(&dev->vbi_dev));
if (cx231xx_boards[dev->model].radio.type == CX231XX_RADIO) {
- dev->radio_dev = cx231xx_vdev_init(dev, &cx231xx_radio_template,
- "radio");
- if (!dev->radio_dev) {
- dev_err(dev->dev,
- "cannot allocate video_device.\n");
- return -ENODEV;
- }
- dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
- ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
+ cx231xx_vdev_init(dev, &dev->radio_dev,
+ &cx231xx_radio_template, "radio");
+ dev->radio_dev.ctrl_handler = &dev->radio_ctrl_handler;
+ ret = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
dev_err(dev->dev,
@@ -2176,7 +2228,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
return ret;
}
dev_info(dev->dev, "Registered radio device as %s\n",
- video_device_node_name(dev->radio_dev));
+ video_device_node_name(&dev->radio_dev));
}
return 0;
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index 6d6f3ee812f6..00d3bce9a690 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -76,6 +76,7 @@
#define CX231XX_BOARD_KWORLD_UB445_USB_HYBRID 18
#define CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx 19
#define CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx 20
+#define CX231XX_BOARD_HAUPPAUGE_955Q 21
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
@@ -633,7 +634,7 @@ struct cx231xx {
/* video for linux */
int users; /* user count for exclusive use */
- struct video_device *vdev; /* video for linux device struct */
+ struct video_device vdev; /* video for linux device struct */
v4l2_std_id norm; /* selected tv norm */
int ctl_freq; /* selected frequency */
unsigned int ctl_ainput; /* selected audio input */
@@ -655,8 +656,13 @@ struct cx231xx {
struct mutex ctrl_urb_lock; /* protects urb_buf */
struct list_head inqueue, outqueue;
wait_queue_head_t open, wait_frame, wait_stream;
- struct video_device *vbi_dev;
- struct video_device *radio_dev;
+ struct video_device vbi_dev;
+ struct video_device radio_dev;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_device *media_dev;
+ struct media_pad video_pad, vbi_pad;
+#endif
unsigned char eedata[256];
@@ -718,7 +724,7 @@ struct cx231xx {
u8 USE_ISO;
struct cx231xx_tvnorm encodernorm;
struct cx231xx_tsport ts1, ts2;
- struct video_device *v4l_device;
+ struct video_device v4l_device;
atomic_t v4l_reader_count;
u32 freq;
unsigned int input;
@@ -972,8 +978,11 @@ extern void cx231xx_417_unregister(struct cx231xx *dev);
int cx231xx_ir_init(struct cx231xx *dev);
void cx231xx_ir_exit(struct cx231xx *dev);
#else
-#define cx231xx_ir_init(dev) (0)
-#define cx231xx_ir_exit(dev) (0)
+static inline int cx231xx_ir_init(struct cx231xx *dev)
+{
+ return 0;
+}
+static inline void cx231xx_ir_exit(struct cx231xx *dev) {}
#endif
static inline unsigned int norm_maxw(struct cx231xx *dev)
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 0982e734fab5..9facc92c8dea 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -146,7 +146,7 @@ config DVB_USB_DVBSKY
depends on DVB_USB_V2
select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
select DVB_SP2 if MEDIA_SUBDRV_AUTOSELECT
help
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 41c6363dff08..023d91f7e654 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -25,6 +25,7 @@
#include <linux/usb/input.h>
#include <linux/firmware.h>
#include <media/rc-core.h>
+#include <media/media-device.h>
#include "dvb_frontend.h"
#include "dvb_demux.h"
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 9913e0f59485..f5df9eaba04f 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -400,10 +400,61 @@ skip_feed_stop:
return ret;
}
+static void dvb_usbv2_media_device_register(struct dvb_usb_adapter *adap)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ struct media_device *mdev;
+ struct dvb_usb_device *d = adap_to_d(adap);
+ struct usb_device *udev = d->udev;
+ int ret;
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return;
+
+ mdev->dev = &udev->dev;
+ strlcpy(mdev->model, d->name, sizeof(mdev->model));
+ if (udev->serial)
+ strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
+ strcpy(mdev->bus_info, udev->devpath);
+ mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ mdev->driver_version = LINUX_VERSION_CODE;
+
+ ret = media_device_register(mdev);
+ if (ret) {
+ dev_err(&d->udev->dev,
+ "Couldn't create a media device. Error: %d\n",
+ ret);
+ kfree(mdev);
+ return;
+ }
+
+ dvb_register_media_controller(&adap->dvb_adap, mdev);
+
+ dev_info(&d->udev->dev, "media controller created\n");
+
+#endif
+}
+
+static void dvb_usbv2_media_device_unregister(struct dvb_usb_adapter *adap)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+
+ if (!adap->dvb_adap.mdev)
+ return;
+
+ media_device_unregister(adap->dvb_adap.mdev);
+ kfree(adap->dvb_adap.mdev);
+ adap->dvb_adap.mdev = NULL;
+
+#endif
+}
+
static int dvb_usbv2_adapter_dvb_init(struct dvb_usb_adapter *adap)
{
int ret;
struct dvb_usb_device *d = adap_to_d(adap);
+
dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
ret = dvb_register_adapter(&adap->dvb_adap, d->name, d->props->owner,
@@ -416,6 +467,8 @@ static int dvb_usbv2_adapter_dvb_init(struct dvb_usb_adapter *adap)
adap->dvb_adap.priv = adap;
+ dvb_usbv2_media_device_register(adap);
+
if (d->props->read_mac_address) {
ret = d->props->read_mac_address(adap,
adap->dvb_adap.proposed_mac);
@@ -464,6 +517,7 @@ err_dvb_net_init:
err_dvb_dmxdev_init:
dvb_dmx_release(&adap->demux);
err_dvb_dmx_init:
+ dvb_usbv2_media_device_unregister(adap);
dvb_unregister_adapter(&adap->dvb_adap);
err_dvb_register_adapter:
adap->dvb_adap.priv = NULL;
@@ -480,6 +534,7 @@ static int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
adap->demux.dmx.close(&adap->demux.dmx);
dvb_dmxdev_release(&adap->dmxdev);
dvb_dmx_release(&adap->demux);
+ dvb_usbv2_media_device_unregister(adap);
dvb_unregister_adapter(&adap->dvb_adap);
}
@@ -643,6 +698,8 @@ static int dvb_usbv2_adapter_frontend_init(struct dvb_usb_adapter *adap)
}
}
+ dvb_create_media_graph(&adap->dvb_adap);
+
return 0;
err_dvb_unregister_frontend:
@@ -955,6 +1012,7 @@ void dvb_usbv2_disconnect(struct usb_interface *intf)
struct dvb_usb_device *d = usb_get_intfdata(intf);
const char *name = d->name;
struct device dev = d->udev->dev;
+
dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 9b5add4499e3..cdf59bcd760c 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -20,7 +20,7 @@
#include "dvb_usb.h"
#include "m88ds3103.h"
-#include "m88ts2022.h"
+#include "ts2020.h"
#include "sp2.h"
#include "si2168.h"
#include "si2157.h"
@@ -315,9 +315,7 @@ static int dvbsky_s960_attach(struct dvb_usb_adapter *adap)
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
struct i2c_board_info info;
- struct m88ts2022_config m88ts2022_config = {
- .clock = 27000000,
- };
+ struct ts2020_config ts2020_config = {};
memset(&info, 0, sizeof(struct i2c_board_info));
/* attach demod */
@@ -332,11 +330,11 @@ static int dvbsky_s960_attach(struct dvb_usb_adapter *adap)
}
/* attach tuner */
- m88ts2022_config.fe = adap->fe[0];
- strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ ts2020_config.fe = adap->fe[0];
+ strlcpy(info.type, "ts2020", I2C_NAME_SIZE);
info.addr = 0x60;
- info.platform_data = &m88ts2022_config;
- request_module("m88ts2022");
+ info.platform_data = &ts2020_config;
+ request_module("ts2020");
client = i2c_new_device(i2c_adapter, &info);
if (client == NULL || client->dev.driver == NULL) {
dvb_frontend_detach(adap->fe[0]);
@@ -439,9 +437,7 @@ static int dvbsky_s960c_attach(struct dvb_usb_adapter *adap)
struct i2c_client *client_tuner, *client_ci;
struct i2c_board_info info;
struct sp2_config sp2_config;
- struct m88ts2022_config m88ts2022_config = {
- .clock = 27000000,
- };
+ struct ts2020_config ts2020_config = {};
memset(&info, 0, sizeof(struct i2c_board_info));
/* attach demod */
@@ -456,11 +452,11 @@ static int dvbsky_s960c_attach(struct dvb_usb_adapter *adap)
}
/* attach tuner */
- m88ts2022_config.fe = adap->fe[0];
- strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ ts2020_config.fe = adap->fe[0];
+ strlcpy(info.type, "ts2020", I2C_NAME_SIZE);
info.addr = 0x60;
- info.platform_data = &m88ts2022_config;
- request_module("m88ts2022");
+ info.platform_data = &ts2020_config;
+ request_module("ts2020");
client_tuner = i2c_new_device(i2c_adapter, &info);
if (client_tuner == NULL || client_tuner->dev.driver == NULL) {
ret = -ENODEV;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 87fc0fe29ebd..895441fe90f7 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -866,6 +866,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
mn88472_config.i2c_wr_max = 22,
strlcpy(info.type, "mn88472", I2C_NAME_SIZE);
mn88472_config.xtal = 20500000;
+ mn88472_config.ts_mode = SERIAL_TS_MODE;
+ mn88472_config.ts_clock = VARIABLE_TS_CLOCK;
info.addr = 0x18;
info.platform_data = &mn88472_config;
request_module(info.type);
@@ -1609,7 +1611,7 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
rc->allowed_protos = RC_BIT_ALL;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->query = rtl2832u_rc_query;
- rc->interval = 400;
+ rc->interval = 200;
return 0;
}
@@ -1724,6 +1726,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl28xxu_props, "DigitalNow Quad DVB-T Receiver", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_MINID,
&rtl28xxu_props, "Leadtek Winfast DTV Dongle Mini D", NULL) },
+ { DVB_USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS_PLUS,
+ &rtl28xxu_props, "Leadtek WinFast DTV2000DS Plus", RC_MAP_LEADTEK_Y04G0051) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d3,
&rtl28xxu_props, "TerraTec Cinergy T Stick RC (Rev. 3)", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1102,
@@ -1754,6 +1758,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl28xxu_props, "Sveon STV21", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV27,
&rtl28xxu_props, "Sveon STV27", NULL) },
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TURBOX_DTT_2000,
+ &rtl28xxu_props, "TURBO-X Pure TV Tuner DTT-2000", NULL) },
/* RTL2832P devices: */
{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index 3364200db093..128eee61570d 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -278,9 +278,10 @@ config DVB_USB_DW2102
select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
select DVB_M88RS2000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the DvbWorld, TeVii, Prof DVB-S/S2 USB2.0
- receivers.
+ Say Y here to support the DvbWorld, TeVii, Prof, TechnoTrend
+ DVB-S/S2 USB2.0 receivers.
config DVB_USB_CINERGY_T2
tristate "Terratec CinergyT2/qanu USB 2.0 DVB-T receiver"
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index f327c49d7e09..ffc3704abded 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -1516,28 +1516,95 @@ static void cxusb_disconnect(struct usb_interface *intf)
dvb_usb_device_exit(intf);
}
-static struct usb_device_id cxusb_table [] = {
- { USB_DEVICE(USB_VID_MEDION, USB_PID_MEDION_MD95700) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_COLD) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_WARM) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_COLD) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_WARM) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_COLD) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_WARM) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_4) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM) },
- { USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_A868R) },
- { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2) },
- { USB_DEVICE(USB_VID_CONEXANT, USB_PID_CONEXANT_D680_DMB) },
- { USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_D689) },
- { USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230) },
+enum cxusb_table_index {
+ MEDION_MD95700,
+ DVICO_BLUEBIRD_LG064F_COLD,
+ DVICO_BLUEBIRD_LG064F_WARM,
+ DVICO_BLUEBIRD_DUAL_1_COLD,
+ DVICO_BLUEBIRD_DUAL_1_WARM,
+ DVICO_BLUEBIRD_LGZ201_COLD,
+ DVICO_BLUEBIRD_LGZ201_WARM,
+ DVICO_BLUEBIRD_TH7579_COLD,
+ DVICO_BLUEBIRD_TH7579_WARM,
+ DIGITALNOW_BLUEBIRD_DUAL_1_COLD,
+ DIGITALNOW_BLUEBIRD_DUAL_1_WARM,
+ DVICO_BLUEBIRD_DUAL_2_COLD,
+ DVICO_BLUEBIRD_DUAL_2_WARM,
+ DVICO_BLUEBIRD_DUAL_4,
+ DVICO_BLUEBIRD_DVB_T_NANO_2,
+ DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM,
+ AVERMEDIA_VOLAR_A868R,
+ DVICO_BLUEBIRD_DUAL_4_REV_2,
+ CONEXANT_D680_DMB,
+ MYGICA_D689,
+ MYGICA_T230,
+ NR__cxusb_table_index
+};
+
+static struct usb_device_id cxusb_table[NR__cxusb_table_index + 1] = {
+ [MEDION_MD95700] = {
+ USB_DEVICE(USB_VID_MEDION, USB_PID_MEDION_MD95700)
+ },
+ [DVICO_BLUEBIRD_LG064F_COLD] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_COLD)
+ },
+ [DVICO_BLUEBIRD_LG064F_WARM] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_WARM)
+ },
+ [DVICO_BLUEBIRD_DUAL_1_COLD] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD)
+ },
+ [DVICO_BLUEBIRD_DUAL_1_WARM] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM)
+ },
+ [DVICO_BLUEBIRD_LGZ201_COLD] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_COLD)
+ },
+ [DVICO_BLUEBIRD_LGZ201_WARM] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_WARM)
+ },
+ [DVICO_BLUEBIRD_TH7579_COLD] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_COLD)
+ },
+ [DVICO_BLUEBIRD_TH7579_WARM] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_WARM)
+ },
+ [DIGITALNOW_BLUEBIRD_DUAL_1_COLD] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD)
+ },
+ [DIGITALNOW_BLUEBIRD_DUAL_1_WARM] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM)
+ },
+ [DVICO_BLUEBIRD_DUAL_2_COLD] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD)
+ },
+ [DVICO_BLUEBIRD_DUAL_2_WARM] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM)
+ },
+ [DVICO_BLUEBIRD_DUAL_4] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_4)
+ },
+ [DVICO_BLUEBIRD_DVB_T_NANO_2] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2)
+ },
+ [DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM)
+ },
+ [AVERMEDIA_VOLAR_A868R] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_A868R)
+ },
+ [DVICO_BLUEBIRD_DUAL_4_REV_2] = {
+ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2)
+ },
+ [CONEXANT_D680_DMB] = {
+ USB_DEVICE(USB_VID_CONEXANT, USB_PID_CONEXANT_D680_DMB)
+ },
+ [MYGICA_D689] = {
+ USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_D689)
+ },
+ [MYGICA_T230] = {
+ USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230)
+ },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, cxusb_table);
@@ -1581,7 +1648,7 @@ static struct dvb_usb_device_properties cxusb_medion_properties = {
.devices = {
{ "Medion MD95700 (MDUSBTV-HYBRID)",
{ NULL },
- { &cxusb_table[0], NULL },
+ { &cxusb_table[MEDION_MD95700], NULL },
},
}
};
@@ -1637,8 +1704,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
.num_device_descs = 1,
.devices = {
{ "DViCO FusionHDTV5 USB Gold",
- { &cxusb_table[1], NULL },
- { &cxusb_table[2], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_LG064F_COLD], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_LG064F_WARM], NULL },
},
}
};
@@ -1693,16 +1760,16 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
.num_device_descs = 3,
.devices = {
{ "DViCO FusionHDTV DVB-T Dual USB",
- { &cxusb_table[3], NULL },
- { &cxusb_table[4], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DUAL_1_COLD], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DUAL_1_WARM], NULL },
},
{ "DigitalNow DVB-T Dual USB",
- { &cxusb_table[9], NULL },
- { &cxusb_table[10], NULL },
+ { &cxusb_table[DIGITALNOW_BLUEBIRD_DUAL_1_COLD], NULL },
+ { &cxusb_table[DIGITALNOW_BLUEBIRD_DUAL_1_WARM], NULL },
},
{ "DViCO FusionHDTV DVB-T Dual Digital 2",
- { &cxusb_table[11], NULL },
- { &cxusb_table[12], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DUAL_2_COLD], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DUAL_2_WARM], NULL },
},
}
};
@@ -1756,8 +1823,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
.num_device_descs = 1,
.devices = {
{ "DViCO FusionHDTV DVB-T USB (LGZ201)",
- { &cxusb_table[5], NULL },
- { &cxusb_table[6], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_LGZ201_COLD], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_LGZ201_WARM], NULL },
},
}
};
@@ -1812,8 +1879,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
.num_device_descs = 1,
.devices = {
{ "DViCO FusionHDTV DVB-T USB (TH7579)",
- { &cxusb_table[7], NULL },
- { &cxusb_table[8], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_TH7579_COLD], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_TH7579_WARM], NULL },
},
}
};
@@ -1865,7 +1932,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = {
.devices = {
{ "DViCO FusionHDTV DVB-T Dual Digital 4",
{ NULL },
- { &cxusb_table[13], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DUAL_4], NULL },
},
}
};
@@ -1918,7 +1985,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
.devices = {
{ "DViCO FusionHDTV DVB-T NANO2",
{ NULL },
- { &cxusb_table[14], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DVB_T_NANO_2], NULL },
},
}
};
@@ -1972,8 +2039,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
.num_device_descs = 1,
.devices = {
{ "DViCO FusionHDTV DVB-T NANO2 w/o firmware",
- { &cxusb_table[14], NULL },
- { &cxusb_table[15], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DVB_T_NANO_2], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM], NULL },
},
}
};
@@ -2017,7 +2084,7 @@ static struct dvb_usb_device_properties cxusb_aver_a868r_properties = {
.devices = {
{ "AVerMedia AVerTVHD Volar (A868R)",
{ NULL },
- { &cxusb_table[16], NULL },
+ { &cxusb_table[AVERMEDIA_VOLAR_A868R], NULL },
},
}
};
@@ -2071,7 +2138,7 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = {
.devices = {
{ "DViCO FusionHDTV DVB-T Dual Digital 4 (rev 2)",
{ NULL },
- { &cxusb_table[17], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DUAL_4_REV_2], NULL },
},
}
};
@@ -2125,7 +2192,7 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties = {
{
"Conexant DMB-TH Stick",
{ NULL },
- { &cxusb_table[18], NULL },
+ { &cxusb_table[CONEXANT_D680_DMB], NULL },
},
}
};
@@ -2179,7 +2246,7 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
{
"Mygica D689 DMB-TH",
{ NULL },
- { &cxusb_table[19], NULL },
+ { &cxusb_table[MYGICA_D689], NULL },
},
}
};
@@ -2232,7 +2299,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
{
"Mygica T230 DVB-T/T2/C",
{ NULL },
- { &cxusb_table[20], NULL },
+ { &cxusb_table[MYGICA_T230], NULL },
},
}
};
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 50856dbf5496..2b40393836ff 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -651,9 +651,6 @@ out:
return ret;
}
-/* Number of keypresses to ignore before start repeating */
-#define RC_REPEAT_DELAY_V1_20 10
-
/* This is the structure of the RC response packet starting in firmware 1.20 */
struct dib0700_rc_response {
u8 report_id;
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index e1757b8f5f5d..d7d55a20e959 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -510,9 +510,6 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap)
static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
-/* Number of keypresses to ignore before start repeating */
-#define RC_REPEAT_DELAY 6
-
/*
* This function is used only when firmware is < 1.20 version. Newer
* firmwares use bulk mode, with functions implemented at dib0700_core,
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
index 719413b15f20..8a260c854653 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
@@ -84,14 +84,61 @@ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
static int dvb_usb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
- deb_ts("start pid: 0x%04x, feedtype: %d\n", dvbdmxfeed->pid,dvbdmxfeed->type);
- return dvb_usb_ctrl_feed(dvbdmxfeed,1);
+ deb_ts("start pid: 0x%04x, feedtype: %d\n", dvbdmxfeed->pid,
+ dvbdmxfeed->type);
+ return dvb_usb_ctrl_feed(dvbdmxfeed, 1);
}
static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
deb_ts("stop pid: 0x%04x, feedtype: %d\n", dvbdmxfeed->pid, dvbdmxfeed->type);
- return dvb_usb_ctrl_feed(dvbdmxfeed,0);
+ return dvb_usb_ctrl_feed(dvbdmxfeed, 0);
+}
+
+static void dvb_usb_media_device_register(struct dvb_usb_adapter *adap)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ struct media_device *mdev;
+ struct dvb_usb_device *d = adap->dev;
+ struct usb_device *udev = d->udev;
+ int ret;
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return;
+
+ mdev->dev = &udev->dev;
+ strlcpy(mdev->model, d->desc->name, sizeof(mdev->model));
+ if (udev->serial)
+ strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
+ strcpy(mdev->bus_info, udev->devpath);
+ mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ mdev->driver_version = LINUX_VERSION_CODE;
+
+ ret = media_device_register(mdev);
+ if (ret) {
+ dev_err(&d->udev->dev,
+ "Couldn't create a media device. Error: %d\n",
+ ret);
+ kfree(mdev);
+ return;
+ }
+ dvb_register_media_controller(&adap->dvb_adap, mdev);
+
+ dev_info(&d->udev->dev, "media controller created\n");
+#endif
+}
+
+static void dvb_usb_media_device_unregister(struct dvb_usb_adapter *adap)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (!adap->dvb_adap.mdev)
+ return;
+
+ media_device_unregister(adap->dvb_adap.mdev);
+ kfree(adap->dvb_adap.mdev);
+ adap->dvb_adap.mdev = NULL;
+#endif
}
int dvb_usb_adapter_dvb_init(struct dvb_usb_adapter *adap, short *adapter_nums)
@@ -107,9 +154,11 @@ int dvb_usb_adapter_dvb_init(struct dvb_usb_adapter *adap, short *adapter_nums)
}
adap->dvb_adap.priv = adap;
+ dvb_usb_media_device_register(adap);
+
if (adap->dev->props.read_mac_address) {
- if (adap->dev->props.read_mac_address(adap->dev,adap->dvb_adap.proposed_mac) == 0)
- info("MAC address: %pM",adap->dvb_adap.proposed_mac);
+ if (adap->dev->props.read_mac_address(adap->dev, adap->dvb_adap.proposed_mac) == 0)
+ info("MAC address: %pM", adap->dvb_adap.proposed_mac);
else
err("MAC address reading failed.");
}
@@ -128,7 +177,7 @@ int dvb_usb_adapter_dvb_init(struct dvb_usb_adapter *adap, short *adapter_nums)
adap->demux.stop_feed = dvb_usb_stop_feed;
adap->demux.write_to_decoder = NULL;
if ((ret = dvb_dmx_init(&adap->demux)) < 0) {
- err("dvb_dmx_init failed: error %d",ret);
+ err("dvb_dmx_init failed: error %d", ret);
goto err_dmx;
}
@@ -136,13 +185,13 @@ int dvb_usb_adapter_dvb_init(struct dvb_usb_adapter *adap, short *adapter_nums)
adap->dmxdev.demux = &adap->demux.dmx;
adap->dmxdev.capabilities = 0;
if ((ret = dvb_dmxdev_init(&adap->dmxdev, &adap->dvb_adap)) < 0) {
- err("dvb_dmxdev_init failed: error %d",ret);
+ err("dvb_dmxdev_init failed: error %d", ret);
goto err_dmx_dev;
}
if ((ret = dvb_net_init(&adap->dvb_adap, &adap->dvb_net,
&adap->demux.dmx)) < 0) {
- err("dvb_net_init failed: error %d",ret);
+ err("dvb_net_init failed: error %d", ret);
goto err_net_init;
}
@@ -154,6 +203,7 @@ err_net_init:
err_dmx_dev:
dvb_dmx_release(&adap->demux);
err_dmx:
+ dvb_usb_media_device_unregister(adap);
dvb_unregister_adapter(&adap->dvb_adap);
err:
return ret;
@@ -167,6 +217,7 @@ int dvb_usb_adapter_dvb_exit(struct dvb_usb_adapter *adap)
adap->demux.dmx.close(&adap->demux.dmx);
dvb_dmxdev_release(&adap->dmxdev);
dvb_dmx_release(&adap->demux);
+ dvb_usb_media_device_unregister(adap);
dvb_unregister_adapter(&adap->dvb_adap);
adap->state &= ~DVB_USB_ADAP_STATE_DVB;
}
@@ -268,6 +319,8 @@ int dvb_usb_adapter_frontend_init(struct dvb_usb_adapter *adap)
adap->num_frontends_initialized++;
}
+ dvb_create_media_graph(&adap->dvb_adap);
+
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 1a3df10d6bad..f1f357f43ff0 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -2,7 +2,8 @@
* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
* TeVii S600, S630, S650, S660, S480, S421, S632
* Prof 1100, 7500,
- * Geniatech SU3000, T220 Cards
+ * Geniatech SU3000, T220,
+ * TechnoTrend S2-4600 Cards
* Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify it
@@ -31,6 +32,8 @@
#include "m88rs2000.h"
#include "tda18271.h"
#include "cxd2820r.h"
+#include "m88ds3103.h"
+#include "ts2020.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
@@ -112,11 +115,9 @@
"Please see linux/Documentation/dvb/ for more details " \
"on firmware-problems."
-struct su3000_state {
+struct dw2102_state {
u8 initialized;
-};
-
-struct s6x0_state {
+ struct i2c_client *i2c_client_tuner;
int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
};
@@ -887,7 +888,7 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
{
- struct su3000_state *state = (struct su3000_state *)d->priv;
+ struct dw2102_state *state = (struct dw2102_state *)d->priv;
u8 obuf[] = {0xde, 0};
info("%s: %d, initialized %d\n", __func__, i, state->initialized);
@@ -973,7 +974,7 @@ static int s660_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct dvb_usb_adapter *d =
(struct dvb_usb_adapter *)(fe->dvb->priv);
- struct s6x0_state *st = (struct s6x0_state *)d->dev->priv;
+ struct dw2102_state *st = (struct dw2102_state *)d->dev->priv;
dw210x_set_voltage(fe, voltage);
if (st->old_set_voltage)
@@ -1117,6 +1118,22 @@ static struct tda18271_config tda18271_config = {
.gate = TDA18271_GATE_DIGITAL,
};
+static const struct m88ds3103_config tt_s2_4600_m88ds3103_config = {
+ .i2c_addr = 0x68,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .ts_mode = M88DS3103_TS_CI,
+ .ts_clk = 16000,
+ .ts_clk_pol = 0,
+ .spec_inv = 0,
+ .agc_inv = 0,
+ .clock_out = M88DS3103_CLOCK_OUT_ENABLED,
+ .envelope_mode = 0,
+ .agc = 0x99,
+ .lnb_hv_pol = 1,
+ .lnb_en_pol = 0,
+};
+
static u8 m88rs2000_inittab[] = {
DEMOD_WRITE, 0x9a, 0x30,
DEMOD_WRITE, 0x00, 0x01,
@@ -1295,7 +1312,7 @@ static int stv0288_frontend_attach(struct dvb_usb_adapter *d)
static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
{
- struct s6x0_state *st = (struct s6x0_state *)d->dev->priv;
+ struct dw2102_state *st = d->dev->priv;
u8 obuf[] = {7, 1};
d->fe_adap[0].fe = dvb_attach(ds3000_attach, &s660_ds3000_config,
@@ -1461,6 +1478,84 @@ static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
+static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct dw2102_state *state = d->priv;
+ u8 obuf[3] = { 0xe, 0x80, 0 };
+ u8 ibuf[] = { 0 };
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ struct ts2020_config ts2020_config = {};
+
+ if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ obuf[0] = 0xe;
+ obuf[1] = 0x02;
+ obuf[2] = 1;
+
+ if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+ msleep(300);
+
+ obuf[0] = 0xe;
+ obuf[1] = 0x83;
+ obuf[2] = 0;
+
+ if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ obuf[0] = 0xe;
+ obuf[1] = 0x83;
+ obuf[2] = 1;
+
+ if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ obuf[0] = 0x51;
+
+ if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
+ err("command 0x51 transfer failed.");
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+
+ adap->fe_adap[0].fe = dvb_attach(m88ds3103_attach,
+ &tt_s2_4600_m88ds3103_config,
+ &d->i2c_adap,
+ &i2c_adapter);
+ if (adap->fe_adap[0].fe == NULL)
+ return -ENODEV;
+
+ /* attach tuner */
+ ts2020_config.fe = adap->fe_adap[0].fe;
+ strlcpy(info.type, "ts2022", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &ts2020_config;
+ request_module("ts2020");
+ client = i2c_new_device(i2c_adapter, &info);
+
+ if (client == NULL || client->dev.driver == NULL) {
+ dvb_frontend_detach(adap->fe_adap[0].fe);
+ return -ENODEV;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ dvb_frontend_detach(adap->fe_adap[0].fe);
+ return -ENODEV;
+ }
+
+ /* delegate signal strength measurement to tuner */
+ adap->fe_adap[0].fe->ops.read_signal_strength =
+ adap->fe_adap[0].fe->ops.tuner_ops.get_rf_strength;
+
+ state->i2c_client_tuner = client;
+
+ return 0;
+}
+
static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60,
@@ -1561,6 +1656,7 @@ enum dw2102_table_entry {
TERRATEC_CINERGY_S2_R2,
GOTVIEW_SAT_HD,
GENIATECH_T220,
+ TECHNOTREND_S2_4600,
};
static struct usb_device_id dw2102_table[] = {
@@ -1584,6 +1680,8 @@ static struct usb_device_id dw2102_table[] = {
[TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00b0)},
[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
[GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
+ [TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_CONNECT_S2_4600)},
{ }
};
@@ -1857,7 +1955,7 @@ static struct dvb_usb_device_properties dw3101_properties = {
static struct dvb_usb_device_properties s6x0_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .size_of_priv = sizeof(struct s6x0_state),
+ .size_of_priv = sizeof(struct dw2102_state),
.firmware = S630_FIRMWARE,
.no_reconnect = 1,
@@ -1950,7 +2048,7 @@ static struct dvb_usb_device_description d632 = {
static struct dvb_usb_device_properties su3000_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .size_of_priv = sizeof(struct su3000_state),
+ .size_of_priv = sizeof(struct dw2102_state),
.power_ctrl = su3000_power_ctrl,
.num_adapters = 1,
.identify_state = su3000_identify_state,
@@ -2015,7 +2113,7 @@ static struct dvb_usb_device_properties su3000_properties = {
static struct dvb_usb_device_properties t220_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .size_of_priv = sizeof(struct su3000_state),
+ .size_of_priv = sizeof(struct dw2102_state),
.power_ctrl = su3000_power_ctrl,
.num_adapters = 1,
.identify_state = su3000_identify_state,
@@ -2061,6 +2159,55 @@ static struct dvb_usb_device_properties t220_properties = {
}
};
+static struct dvb_usb_device_properties tt_s2_4600_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .size_of_priv = sizeof(struct dw2102_state),
+ .power_ctrl = su3000_power_ctrl,
+ .num_adapters = 1,
+ .identify_state = su3000_identify_state,
+ .i2c_algo = &su3000_i2c_algo,
+
+ .rc.core = {
+ .rc_interval = 250,
+ .rc_codes = RC_MAP_TT_1500,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_RC5,
+ .rc_query = su3000_rc_query,
+ },
+
+ .read_mac_address = su3000_read_mac_address,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .streaming_ctrl = su3000_streaming_ctrl,
+ .frontend_attach = tt_s2_4600_frontend_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x82,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+ }
+ }
+ }
+ } },
+ }
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "TechnoTrend TT-connect S2-4600",
+ { &dw2102_table[TECHNOTREND_S2_4600], NULL },
+ { NULL },
+ },
+ }
+};
+
static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2135,16 +2282,34 @@ static int dw2102_probe(struct usb_interface *intf,
0 == dvb_usb_device_init(intf, &su3000_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &t220_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
THIS_MODULE, NULL, adapter_nr))
return 0;
return -ENODEV;
}
+static void dw2102_disconnect(struct usb_interface *intf)
+{
+ struct dvb_usb_device *d = usb_get_intfdata(intf);
+ struct dw2102_state *st = (struct dw2102_state *)d->priv;
+ struct i2c_client *client;
+
+ /* remove I2C client for tuner */
+ client = st->i2c_client_tuner;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ dvb_usb_device_exit(intf);
+}
+
static struct usb_driver dw2102_driver = {
.name = "dw2102",
.probe = dw2102_probe,
- .disconnect = dvb_usb_device_exit,
+ .disconnect = dw2102_disconnect,
.id_table = dw2102_table,
};
@@ -2155,7 +2320,8 @@ MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
" DVB-C 3101 USB2.0,"
" TeVii S600, S630, S650, S660, S480, S421, S632"
" Prof 1100, 7500 USB2.0,"
- " Geniatech SU3000, T220 devices");
+ " Geniatech SU3000, T220,"
+ " TechnoTrend S2-4600 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DW2101_FIRMWARE);
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index f5d7198753c7..e382210c4ada 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -55,7 +55,7 @@ config VIDEO_EM28XX_DVB
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select DVB_DRX39XYJ if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 7be661f73930..a4b22c2c3ba7 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -330,7 +330,7 @@ int em28xx_init_camera(struct em28xx *dev)
v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
i2c_adapter_id(adap), client->addr);
- v4l2->clk = v4l2_clk_register_fixed(clk_name, "mclk", -EINVAL);
+ v4l2->clk = v4l2_clk_register_fixed(clk_name, -EINVAL);
if (IS_ERR(v4l2->clk))
return PTR_ERR(v4l2->clk);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index d9704e66b8c9..394004607059 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -1157,6 +1157,15 @@ struct em28xx_board em28xx_boards[] = {
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
},
+ [EM2884_BOARD_ELGATO_EYETV_HYBRID_2008] = {
+ .name = "Elgato EyeTV Hybrid 2008 INT",
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
+ .tuner_type = TUNER_ABSENT,
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ },
[EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = {
.name = "Hauppauge WinTV HVR 900",
.tda9887_conf = TDA9887_PRESENT,
@@ -2378,8 +2387,10 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2860_BOARD_TERRATEC_GRABBY },
{ USB_DEVICE(0x0ccd, 0x00b2),
.driver_info = EM2884_BOARD_CINERGY_HTC_STICK },
+ { USB_DEVICE(0x0fd9, 0x0018),
+ .driver_info = EM2884_BOARD_ELGATO_EYETV_HYBRID_2008 },
{ USB_DEVICE(0x0fd9, 0x0033),
- .driver_info = EM2860_BOARD_ELGATO_VIDEO_CAPTURE},
+ .driver_info = EM2860_BOARD_ELGATO_VIDEO_CAPTURE },
{ USB_DEVICE(0x185b, 0x2870),
.driver_info = EM2870_BOARD_COMPRO_VIDEOMATE },
{ USB_DEVICE(0x185b, 0x2041),
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index aee70d483264..a5b22c5a240c 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -54,7 +54,7 @@
#include "qt1010.h"
#include "mb86a20s.h"
#include "m88ds3103.h"
-#include "m88ts2022.h"
+#include "ts2020.h"
#include "si2168.h"
#include "si2157.h"
@@ -1380,6 +1380,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
}
}
break;
+ case EM2884_BOARD_ELGATO_EYETV_HYBRID_2008:
case EM2884_BOARD_CINERGY_HTC_STICK:
terratec_htc_stick_init(dev);
@@ -1491,8 +1492,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
struct i2c_board_info info;
- struct m88ts2022_config m88ts2022_config = {
- .clock = 27000000,
+ struct ts2020_config ts2020_config = {
};
memset(&info, 0, sizeof(struct i2c_board_info));
@@ -1507,11 +1507,11 @@ static int em28xx_dvb_init(struct em28xx *dev)
}
/* attach tuner */
- m88ts2022_config.fe = dvb->fe[0];
- strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ ts2020_config.fe = dvb->fe[0];
+ strlcpy(info.type, "ts2022", I2C_NAME_SIZE);
info.addr = 0x60;
- info.platform_data = &m88ts2022_config;
- request_module("m88ts2022");
+ info.platform_data = &ts2020_config;
+ request_module("ts2020");
client = i2c_new_device(i2c_adapter, &info);
if (client == NULL || client->dev.driver == NULL) {
dvb_frontend_detach(dvb->fe[0]);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 9ecf65629b3d..14eba9c65de3 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1472,7 +1472,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
(EM28XX_VMUX_CABLE == INPUT(n)->type))
i->type = V4L2_INPUT_TYPE_TUNER;
- i->std = dev->v4l2->vdev->tvnorms;
+ i->std = dev->v4l2->vdev.tvnorms;
/* webcams do not have the STD API */
if (dev->board.is_webcam)
i->capabilities = 0;
@@ -1730,9 +1730,9 @@ static int vidioc_querycap(struct file *file, void *priv,
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS |
V4L2_CAP_READWRITE | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- if (v4l2->vbi_dev)
+ if (video_is_registered(&v4l2->vbi_dev))
cap->capabilities |= V4L2_CAP_VBI_CAPTURE;
- if (v4l2->radio_dev)
+ if (video_is_registered(&v4l2->radio_dev))
cap->capabilities |= V4L2_CAP_RADIO;
return 0;
}
@@ -1966,20 +1966,20 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
- if (v4l2->radio_dev) {
+ if (video_is_registered(&v4l2->radio_dev)) {
em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(v4l2->radio_dev));
- video_unregister_device(v4l2->radio_dev);
+ video_device_node_name(&v4l2->radio_dev));
+ video_unregister_device(&v4l2->radio_dev);
}
- if (v4l2->vbi_dev) {
+ if (video_is_registered(&v4l2->vbi_dev)) {
em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(v4l2->vbi_dev));
- video_unregister_device(v4l2->vbi_dev);
+ video_device_node_name(&v4l2->vbi_dev));
+ video_unregister_device(&v4l2->vbi_dev);
}
- if (v4l2->vdev) {
+ if (video_is_registered(&v4l2->vdev)) {
em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(v4l2->vdev));
- video_unregister_device(v4l2->vdev);
+ video_device_node_name(&v4l2->vdev));
+ video_unregister_device(&v4l2->vdev);
}
v4l2_ctrl_handler_free(&v4l2->ctrl_handler);
@@ -2127,7 +2127,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
static const struct video_device em28xx_video_template = {
.fops = &em28xx_v4l_fops,
.ioctl_ops = &video_ioctl_ops,
- .release = video_device_release,
+ .release = video_device_release_empty,
.tvnorms = V4L2_STD_ALL,
};
@@ -2156,7 +2156,7 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
static struct video_device em28xx_radio_template = {
.fops = &radio_fops,
.ioctl_ops = &radio_ioctl_ops,
- .release = video_device_release,
+ .release = video_device_release_empty,
};
/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
@@ -2179,17 +2179,11 @@ static unsigned short msp3400_addrs[] = {
/******************************** usb interface ******************************/
-static struct video_device
-*em28xx_vdev_init(struct em28xx *dev,
- const struct video_device *template,
- const char *type_name)
+static void em28xx_vdev_init(struct em28xx *dev,
+ struct video_device *vfd,
+ const struct video_device *template,
+ const char *type_name)
{
- struct video_device *vfd;
-
- vfd = video_device_alloc();
- if (NULL == vfd)
- return NULL;
-
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2->v4l2_dev;
vfd->lock = &dev->lock;
@@ -2200,7 +2194,6 @@ static struct video_device
dev->name, type_name);
video_set_drvdata(vfd, dev);
- return vfd;
}
static void em28xx_tuner_setup(struct em28xx *dev, unsigned short tuner_addr)
@@ -2491,38 +2484,33 @@ static int em28xx_v4l2_init(struct em28xx *dev)
goto unregister_dev;
/* allocate and fill video video_device struct */
- v4l2->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video");
- if (!v4l2->vdev) {
- em28xx_errdev("cannot allocate video_device.\n");
- ret = -ENODEV;
- goto unregister_dev;
- }
+ em28xx_vdev_init(dev, &v4l2->vdev, &em28xx_video_template, "video");
mutex_init(&v4l2->vb_queue_lock);
mutex_init(&v4l2->vb_vbi_queue_lock);
- v4l2->vdev->queue = &v4l2->vb_vidq;
- v4l2->vdev->queue->lock = &v4l2->vb_queue_lock;
+ v4l2->vdev.queue = &v4l2->vb_vidq;
+ v4l2->vdev.queue->lock = &v4l2->vb_queue_lock;
/* disable inapplicable ioctls */
if (dev->board.is_webcam) {
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_QUERYSTD);
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_G_STD);
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_S_STD);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_QUERYSTD);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_G_STD);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_S_STD);
} else {
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_S_PARM);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_S_PARM);
}
if (dev->tuner_type == TUNER_ABSENT) {
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_G_TUNER);
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_S_TUNER);
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_G_FREQUENCY);
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_S_FREQUENCY);
}
if (dev->int_audio_type == EM28XX_INT_AUDIO_NONE) {
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_G_AUDIO);
- v4l2_disable_ioctl(v4l2->vdev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&v4l2->vdev, VIDIOC_S_AUDIO);
}
/* register v4l2 video video_device */
- ret = video_register_device(v4l2->vdev, VFL_TYPE_GRABBER,
+ ret = video_register_device(&v4l2->vdev, VFL_TYPE_GRABBER,
video_nr[dev->devno]);
if (ret) {
em28xx_errdev("unable to register video device (error=%i).\n",
@@ -2532,27 +2520,27 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/* Allocate and fill vbi video_device struct */
if (em28xx_vbi_supported(dev) == 1) {
- v4l2->vbi_dev = em28xx_vdev_init(dev, &em28xx_video_template,
- "vbi");
+ em28xx_vdev_init(dev, &v4l2->vbi_dev, &em28xx_video_template,
+ "vbi");
- v4l2->vbi_dev->queue = &v4l2->vb_vbiq;
- v4l2->vbi_dev->queue->lock = &v4l2->vb_vbi_queue_lock;
+ v4l2->vbi_dev.queue = &v4l2->vb_vbiq;
+ v4l2->vbi_dev.queue->lock = &v4l2->vb_vbi_queue_lock;
/* disable inapplicable ioctls */
- v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_S_PARM);
+ v4l2_disable_ioctl(&v4l2->vbi_dev, VIDIOC_S_PARM);
if (dev->tuner_type == TUNER_ABSENT) {
- v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_G_TUNER);
- v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_S_TUNER);
- v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_G_FREQUENCY);
- v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&v4l2->vbi_dev, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(&v4l2->vbi_dev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&v4l2->vbi_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&v4l2->vbi_dev, VIDIOC_S_FREQUENCY);
}
if (dev->int_audio_type == EM28XX_INT_AUDIO_NONE) {
- v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_G_AUDIO);
- v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&v4l2->vbi_dev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&v4l2->vbi_dev, VIDIOC_S_AUDIO);
}
/* register v4l2 vbi video_device */
- ret = video_register_device(v4l2->vbi_dev, VFL_TYPE_VBI,
+ ret = video_register_device(&v4l2->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->devno]);
if (ret < 0) {
em28xx_errdev("unable to register vbi device\n");
@@ -2561,29 +2549,24 @@ static int em28xx_v4l2_init(struct em28xx *dev)
}
if (em28xx_boards[dev->model].radio.type == EM28XX_RADIO) {
- v4l2->radio_dev = em28xx_vdev_init(dev, &em28xx_radio_template,
- "radio");
- if (!v4l2->radio_dev) {
- em28xx_errdev("cannot allocate video_device.\n");
- ret = -ENODEV;
- goto unregister_dev;
- }
- ret = video_register_device(v4l2->radio_dev, VFL_TYPE_RADIO,
+ em28xx_vdev_init(dev, &v4l2->radio_dev, &em28xx_radio_template,
+ "radio");
+ ret = video_register_device(&v4l2->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
em28xx_errdev("can't register radio device\n");
goto unregister_dev;
}
em28xx_info("Registered radio device as %s\n",
- video_device_node_name(v4l2->radio_dev));
+ video_device_node_name(&v4l2->radio_dev));
}
em28xx_info("V4L2 video device registered as %s\n",
- video_device_node_name(v4l2->vdev));
+ video_device_node_name(&v4l2->vdev));
- if (v4l2->vbi_dev)
+ if (video_is_registered(&v4l2->vbi_dev))
em28xx_info("V4L2 VBI device registered as %s\n",
- video_device_node_name(v4l2->vbi_dev));
+ video_device_node_name(&v4l2->vbi_dev));
/* Save some power by putting tuner to sleep */
v4l2_device_call_all(&v4l2->v4l2_dev, 0, core, s_power, 0);
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 9c7075344109..e6559c6f143c 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -143,6 +143,7 @@
#define EM28178_BOARD_PCTV_292E 94
#define EM2861_BOARD_LEADTEK_VC100 95
#define EM28178_BOARD_TERRATEC_T2_STICK_HD 96
+#define EM2884_BOARD_ELGATO_EYETV_HYBRID_2008 97
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -512,9 +513,9 @@ struct em28xx_v4l2 {
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_clk *clk;
- struct video_device *vdev;
- struct video_device *vbi_dev;
- struct video_device *radio_dev;
+ struct video_device vdev;
+ struct video_device vbi_dev;
+ struct video_device radio_dev;
/* Videobuf2 */
struct vb2_queue vb_vidq;
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index a9c866d6d82d..146071b8e116 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -816,21 +816,16 @@ static void sethue(struct gspca_dev *gspca_dev, s32 val)
s16 huesin;
s16 huecos;
- /* fixp_sin and fixp_cos accept only positive values, while
- * our val is between -90 and 90
- */
- val += 360;
-
/* According to the datasheet the registers expect HUESIN and
* HUECOS to be the result of the trigonometric functions,
* scaled by 0x80.
*
- * The 0x100 here represents the maximun absolute value
+ * The 0x7fff here represents the maximum absolute value
* returned byt fixp_sin and fixp_cos, so the scaling will
* consider the result like in the interval [-1.0, 1.0].
*/
- huesin = fixp_sin(val) * 0x80 / 0x100;
- huecos = fixp_cos(val) * 0x80 / 0x100;
+ huesin = fixp_sin16(val) * 0x80 / 0x7fff;
+ huecos = fixp_cos16(val) * 0x80 / 0x7fff;
if (huesin < 0) {
sccb_reg_write(gspca_dev, 0xab,
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index 5fcd1eec2004..c70ff406b07a 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -969,7 +969,9 @@ static void jpeg_set_qual(u8 *jpeg_hdr,
{
int i, sc;
- if (quality < 50)
+ if (quality <= 0)
+ sc = 5000;
+ else if (quality < 50)
sc = 5000 / quality;
else
sc = 200 - quality * 2;
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 42b4cdf28cfd..3fc64197b4e6 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -69,10 +69,6 @@ MODULE_DEVICE_TABLE(usb, hdpvr_table);
void hdpvr_delete(struct hdpvr_device *dev)
{
hdpvr_free_buffers(dev);
-
- if (dev->video_dev)
- video_device_release(dev->video_dev);
-
usb_put_dev(dev->udev);
}
@@ -397,7 +393,7 @@ static int hdpvr_probe(struct usb_interface *interface,
/* let the user know what node this device is now attached to */
v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
- video_device_node_name(dev->video_dev));
+ video_device_node_name(&dev->video_dev));
return 0;
reg_fail:
@@ -420,7 +416,7 @@ static void hdpvr_disconnect(struct usb_interface *interface)
struct hdpvr_device *dev = to_hdpvr_dev(usb_get_intfdata(interface));
v4l2_info(&dev->v4l2_dev, "device %s disconnected\n",
- video_device_node_name(dev->video_dev));
+ video_device_node_name(&dev->video_dev));
/* prevent more I/O from starting and stop any ongoing */
mutex_lock(&dev->io_mutex);
dev->status = STATUS_DISCONNECTED;
@@ -436,7 +432,7 @@ static void hdpvr_disconnect(struct usb_interface *interface)
#if IS_ENABLED(CONFIG_I2C)
i2c_del_adapter(&dev->i2c_adapter);
#endif
- video_unregister_device(dev->video_dev);
+ video_unregister_device(&dev->video_dev);
atomic_dec(&dev_nr);
}
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 59d15fd242ba..d8d8c0f519fc 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -797,7 +797,7 @@ static int vidioc_s_input(struct file *file, void *_fh,
* Comment this out for now, but if the legacy mode can be
* removed in the future, then this code should be enabled
* again.
- dev->video_dev->tvnorms =
+ dev->video_dev.tvnorms =
(index != HDPVR_COMPONENT) ? V4L2_STD_ALL : 0;
*/
}
@@ -1228,19 +1228,12 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
}
/* setup and register video device */
- dev->video_dev = video_device_alloc();
- if (!dev->video_dev) {
- v4l2_err(&dev->v4l2_dev, "video_device_alloc() failed\n");
- res = -ENOMEM;
- goto error;
- }
-
- *dev->video_dev = hdpvr_video_template;
- strcpy(dev->video_dev->name, "Hauppauge HD PVR");
- dev->video_dev->v4l2_dev = &dev->v4l2_dev;
- video_set_drvdata(dev->video_dev, dev);
+ dev->video_dev = hdpvr_video_template;
+ strcpy(dev->video_dev.name, "Hauppauge HD PVR");
+ dev->video_dev.v4l2_dev = &dev->v4l2_dev;
+ video_set_drvdata(&dev->video_dev, dev);
- res = video_register_device(dev->video_dev, VFL_TYPE_GRABBER, devnum);
+ res = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER, devnum);
if (res < 0) {
v4l2_err(&dev->v4l2_dev, "video_device registration failed\n");
goto error;
diff --git a/drivers/media/usb/hdpvr/hdpvr.h b/drivers/media/usb/hdpvr/hdpvr.h
index dc685d44cb3e..a3194304182d 100644
--- a/drivers/media/usb/hdpvr/hdpvr.h
+++ b/drivers/media/usb/hdpvr/hdpvr.h
@@ -66,7 +66,7 @@ struct hdpvr_options {
/* Structure to hold all of our device specific stuff */
struct hdpvr_device {
/* the v4l device for this device */
- struct video_device *video_dev;
+ struct video_device video_dev;
/* the control handler for this device */
struct v4l2_ctrl_handler hdl;
/* the usb device for this device */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 35e4ea530494..1c5f85bf7ed4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include "pvrusb2-context.h"
#include "pvrusb2-hdw.h"
#include "pvrusb2.h"
@@ -32,6 +31,7 @@
#include <linux/module.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -50,14 +50,11 @@ struct pvr2_v4l2_dev {
};
struct pvr2_v4l2_fh {
+ struct v4l2_fh fh;
struct pvr2_channel channel;
struct pvr2_v4l2_dev *pdi;
- enum v4l2_priority prio;
struct pvr2_ioread *rhp;
struct file *file;
- struct pvr2_v4l2 *vhead;
- struct pvr2_v4l2_fh *vnext;
- struct pvr2_v4l2_fh *vprev;
wait_queue_head_t wait_data;
int fw_mode_flag;
/* Map contiguous ordinal value to input id */
@@ -67,10 +64,6 @@ struct pvr2_v4l2_fh {
struct pvr2_v4l2 {
struct pvr2_channel channel;
- struct pvr2_v4l2_fh *vfirst;
- struct pvr2_v4l2_fh *vlast;
-
- struct v4l2_prio_state prio;
/* streams - Note that these must be separately, individually,
* allocated pointers. This is because the v4l core is going to
@@ -169,23 +162,6 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
return 0;
}
-static int pvr2_g_priority(struct file *file, void *priv, enum v4l2_priority *p)
-{
- struct pvr2_v4l2_fh *fh = file->private_data;
- struct pvr2_v4l2 *vp = fh->vhead;
-
- *p = v4l2_prio_max(&vp->prio);
- return 0;
-}
-
-static int pvr2_s_priority(struct file *file, void *priv, enum v4l2_priority prio)
-{
- struct pvr2_v4l2_fh *fh = file->private_data;
- struct pvr2_v4l2 *vp = fh->vhead;
-
- return v4l2_prio_change(&vp->prio, &fh->prio, prio);
-}
-
static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct pvr2_v4l2_fh *fh = file->private_data;
@@ -805,8 +781,6 @@ static int pvr2_log_status(struct file *file, void *priv)
static const struct v4l2_ioctl_ops pvr2_ioctl_ops = {
.vidioc_querycap = pvr2_querycap,
- .vidioc_g_priority = pvr2_g_priority,
- .vidioc_s_priority = pvr2_s_priority,
.vidioc_s_audio = pvr2_s_audio,
.vidioc_g_audio = pvr2_g_audio,
.vidioc_enumaudio = pvr2_enumaudio,
@@ -911,7 +885,9 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
if (!vp->channel.mc_head->disconnect_flag) return;
pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
- if (vp->vfirst) return;
+ if (!list_empty(&vp->dev_video->devbase.fh_list) ||
+ !list_empty(&vp->dev_radio->devbase.fh_list))
+ return;
pvr2_v4l2_destroy_no_lock(vp);
}
@@ -921,7 +897,6 @@ static long pvr2_v4l2_ioctl(struct file *file,
{
struct pvr2_v4l2_fh *fh = file->private_data;
- struct pvr2_v4l2 *vp = fh->vhead;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
long ret = -EINVAL;
@@ -934,18 +909,6 @@ static long pvr2_v4l2_ioctl(struct file *file,
return -EFAULT;
}
- /* check priority */
- switch (cmd) {
- case VIDIOC_S_CTRL:
- case VIDIOC_S_STD:
- case VIDIOC_S_INPUT:
- case VIDIOC_S_TUNER:
- case VIDIOC_S_FREQUENCY:
- ret = v4l2_prio_check(&vp->prio, fh->prio);
- if (ret)
- return ret;
- }
-
ret = video_ioctl2(file, cmd, arg);
pvr2_hdw_commit_ctl(hdw);
@@ -970,7 +933,7 @@ static long pvr2_v4l2_ioctl(struct file *file,
static int pvr2_v4l2_release(struct file *file)
{
struct pvr2_v4l2_fh *fhp = file->private_data;
- struct pvr2_v4l2 *vp = fhp->vhead;
+ struct pvr2_v4l2 *vp = fhp->pdi->v4lp;
struct pvr2_hdw *hdw = fhp->channel.mc_head->hdw;
pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_release");
@@ -984,22 +947,10 @@ static int pvr2_v4l2_release(struct file *file)
fhp->rhp = NULL;
}
- v4l2_prio_close(&vp->prio, fhp->prio);
+ v4l2_fh_del(&fhp->fh);
+ v4l2_fh_exit(&fhp->fh);
file->private_data = NULL;
- if (fhp->vnext) {
- fhp->vnext->vprev = fhp->vprev;
- } else {
- vp->vlast = fhp->vprev;
- }
- if (fhp->vprev) {
- fhp->vprev->vnext = fhp->vnext;
- } else {
- vp->vfirst = fhp->vnext;
- }
- fhp->vnext = NULL;
- fhp->vprev = NULL;
- fhp->vhead = NULL;
pvr2_channel_done(&fhp->channel);
pvr2_trace(PVR2_TRACE_STRUCT,
"Destroying pvr_v4l2_fh id=%p",fhp);
@@ -1008,7 +959,9 @@ static int pvr2_v4l2_release(struct file *file)
fhp->input_map = NULL;
}
kfree(fhp);
- if (vp->channel.mc_head->disconnect_flag && !vp->vfirst) {
+ if (vp->channel.mc_head->disconnect_flag &&
+ list_empty(&vp->dev_video->devbase.fh_list) &&
+ list_empty(&vp->dev_radio->devbase.fh_list)) {
pvr2_v4l2_destroy_no_lock(vp);
}
return 0;
@@ -1043,6 +996,7 @@ static int pvr2_v4l2_open(struct file *file)
return -ENOMEM;
}
+ v4l2_fh_init(&fhp->fh, &dip->devbase);
init_waitqueue_head(&fhp->wait_data);
fhp->pdi = dip;
@@ -1093,21 +1047,11 @@ static int pvr2_v4l2_open(struct file *file)
fhp->input_map[input_cnt++] = idx;
}
- fhp->vnext = NULL;
- fhp->vprev = vp->vlast;
- if (vp->vlast) {
- vp->vlast->vnext = fhp;
- } else {
- vp->vfirst = fhp;
- }
- vp->vlast = fhp;
- fhp->vhead = vp;
-
fhp->file = file;
file->private_data = fhp;
- v4l2_prio_open(&vp->prio, &fhp->prio);
fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw);
+ v4l2_fh_add(&fhp->fh);
return 0;
}
@@ -1247,7 +1191,7 @@ static const struct v4l2_file_operations vdev_fops = {
.open = pvr2_v4l2_open,
.release = pvr2_v4l2_release,
.read = pvr2_v4l2_read,
- .ioctl = pvr2_v4l2_ioctl,
+ .unlocked_ioctl = pvr2_v4l2_ioctl,
.poll = pvr2_v4l2_poll,
};
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 94e10b10b66e..c945e4c2fbd4 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -19,6 +19,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
****************************************************************/
+#include "smscoreapi.h"
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/usb.h>
@@ -26,14 +28,9 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include <linux/slab.h>
#include <linux/module.h>
-#include "smscoreapi.h"
#include "sms-cards.h"
#include "smsendian.h"
-static int sms_dbg;
-module_param_named(debug, sms_dbg, int, 0644);
-MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))");
-
#define USB1_BUFFER_SIZE 0x1000
#define USB2_BUFFER_SIZE 0x2000
@@ -87,7 +84,7 @@ static void smsusb_onresponse(struct urb *urb)
struct smsusb_device_t *dev = surb->dev;
if (urb->status == -ESHUTDOWN) {
- sms_err("error, urb status %d (-ESHUTDOWN), %d bytes",
+ pr_err("error, urb status %d (-ESHUTDOWN), %d bytes\n",
urb->status, urb->actual_length);
return;
}
@@ -109,9 +106,7 @@ static void smsusb_onresponse(struct urb *urb)
/* sanity check */
if (((int) phdr->msg_length +
surb->cb->offset) > urb->actual_length) {
- sms_err("invalid response "
- "msglen %d offset %d "
- "size %d",
+ pr_err("invalid response msglen %d offset %d size %d\n",
phdr->msg_length,
surb->cb->offset,
urb->actual_length);
@@ -125,7 +120,7 @@ static void smsusb_onresponse(struct urb *urb)
} else
surb->cb->offset = 0;
- sms_debug("received %s(%d) size: %d",
+ pr_debug("received %s(%d) size: %d\n",
smscore_translate_msg(phdr->msg_type),
phdr->msg_type, phdr->msg_length);
@@ -134,12 +129,11 @@ static void smsusb_onresponse(struct urb *urb)
smscore_onresponse(dev->coredev, surb->cb);
surb->cb = NULL;
} else {
- sms_err("invalid response "
- "msglen %d actual %d",
+ pr_err("invalid response msglen %d actual %d\n",
phdr->msg_length, urb->actual_length);
}
} else
- sms_err("error, urb status %d, %d bytes",
+ pr_err("error, urb status %d, %d bytes\n",
urb->status, urb->actual_length);
@@ -153,7 +147,7 @@ static int smsusb_submit_urb(struct smsusb_device_t *dev,
if (!surb->cb) {
surb->cb = smscore_getbuffer(dev->coredev);
if (!surb->cb) {
- sms_err("smscore_getbuffer(...) returned NULL");
+ pr_err("smscore_getbuffer(...) returned NULL\n");
return -ENOMEM;
}
}
@@ -194,7 +188,7 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
for (i = 0; i < MAX_URBS; i++) {
rc = smsusb_submit_urb(dev, &dev->surbs[i]);
if (rc < 0) {
- sms_err("smsusb_submit_urb(...) failed");
+ pr_err("smsusb_submit_urb(...) failed\n");
smsusb_stop_streaming(dev);
break;
}
@@ -210,11 +204,11 @@ static int smsusb_sendrequest(void *context, void *buffer, size_t size)
int dummy;
if (dev->state != SMSUSB_ACTIVE) {
- sms_debug("Device not active yet");
+ pr_debug("Device not active yet\n");
return -ENOENT;
}
- sms_debug("sending %s(%d) size: %d",
+ pr_debug("sending %s(%d) size: %d\n",
smscore_translate_msg(phdr->msg_type), phdr->msg_type,
phdr->msg_length);
@@ -249,7 +243,7 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id)
id = sms_get_board(board_id)->default_mode;
if (id < DEVICE_MODE_DVBT || id > DEVICE_MODE_DVBT_BDA) {
- sms_err("invalid firmware id specified %d", id);
+ pr_err("invalid firmware id specified %d\n", id);
return -EINVAL;
}
@@ -257,13 +251,13 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id)
rc = request_firmware(&fw, fw_filename, &udev->dev);
if (rc < 0) {
- sms_warn("failed to open \"%s\" mode %d, "
- "trying again with default firmware", fw_filename, id);
+ pr_warn("failed to open '%s' mode %d, trying again with default firmware\n",
+ fw_filename, id);
fw_filename = smsusb1_fw_lkup[id];
rc = request_firmware(&fw, fw_filename, &udev->dev);
if (rc < 0) {
- sms_warn("failed to open \"%s\" mode %d",
+ pr_warn("failed to open '%s' mode %d\n",
fw_filename, id);
return rc;
@@ -277,14 +271,14 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id)
rc = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 2),
fw_buffer, fw->size, &dummy, 1000);
- sms_info("sent %zu(%d) bytes, rc %d", fw->size, dummy, rc);
+ pr_debug("sent %zu(%d) bytes, rc %d\n", fw->size, dummy, rc);
kfree(fw_buffer);
} else {
- sms_err("failed to allocate firmware buffer");
+ pr_err("failed to allocate firmware buffer\n");
rc = -ENOMEM;
}
- sms_info("read FW %s, size=%zu", fw_filename, fw->size);
+ pr_debug("read FW %s, size=%zu\n", fw_filename, fw->size);
release_firmware(fw);
@@ -300,7 +294,7 @@ static void smsusb1_detectmode(void *context, int *mode)
if (!product_string) {
product_string = "none";
- sms_err("product string not found");
+ pr_err("product string not found\n");
} else if (strstr(product_string, "DVBH"))
*mode = 1;
else if (strstr(product_string, "BDA"))
@@ -310,7 +304,7 @@ static void smsusb1_detectmode(void *context, int *mode)
else if (strstr(product_string, "TDMB"))
*mode = 2;
- sms_info("%d \"%s\"", *mode, product_string);
+ pr_debug("%d \"%s\"\n", *mode, product_string);
}
static int smsusb1_setmode(void *context, int mode)
@@ -319,7 +313,7 @@ static int smsusb1_setmode(void *context, int mode)
sizeof(struct sms_msg_hdr), 0 };
if (mode < DEVICE_MODE_DVBT || mode > DEVICE_MODE_DVBT_BDA) {
- sms_err("invalid firmware id specified %d", mode);
+ pr_err("invalid firmware id specified %d\n", mode);
return -EINVAL;
}
@@ -339,25 +333,61 @@ static void smsusb_term_device(struct usb_interface *intf)
if (dev->coredev)
smscore_unregister_device(dev->coredev);
- sms_info("device 0x%p destroyed", dev);
+ pr_debug("device 0x%p destroyed\n", dev);
kfree(dev);
}
usb_set_intfdata(intf, NULL);
}
+static void *siano_media_device_register(struct smsusb_device_t *dev,
+ int board_id)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ struct media_device *mdev;
+ struct usb_device *udev = dev->udev;
+ struct sms_board *board = sms_get_board(board_id);
+ int ret;
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return NULL;
+
+ mdev->dev = &udev->dev;
+ strlcpy(mdev->model, board->name, sizeof(mdev->model));
+ if (udev->serial)
+ strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
+ strcpy(mdev->bus_info, udev->devpath);
+ mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ mdev->driver_version = LINUX_VERSION_CODE;
+
+ ret = media_device_register(mdev);
+ if (ret) {
+ pr_err("Couldn't create a media device. Error: %d\n",
+ ret);
+ kfree(mdev);
+ return NULL;
+ }
+
+ pr_info("media controller created\n");
+
+ return mdev;
+#else
+ return NULL;
+#endif
+}
+
static int smsusb_init_device(struct usb_interface *intf, int board_id)
{
struct smsdevice_params_t params;
struct smsusb_device_t *dev;
+ void *mdev;
int i, rc;
/* create device object */
dev = kzalloc(sizeof(struct smsusb_device_t), GFP_KERNEL);
- if (!dev) {
- sms_err("kzalloc(sizeof(struct smsusb_device_t) failed");
+ if (!dev)
return -ENOMEM;
- }
memset(&params, 0, sizeof(params));
usb_set_intfdata(intf, dev);
@@ -374,7 +404,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
params.detectmode_handler = smsusb1_detectmode;
break;
case SMS_UNKNOWN_TYPE:
- sms_err("Unspecified sms device type!");
+ pr_err("Unspecified sms device type!\n");
/* fall-thru */
default:
dev->buffer_size = USB2_BUFFER_SIZE;
@@ -393,7 +423,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
dev->out_ep = intf->cur_altsetting->endpoint[i].desc.bEndpointAddress;
}
- sms_info("in_ep = %02x, out_ep = %02x",
+ pr_debug("in_ep = %02x, out_ep = %02x\n",
dev->in_ep, dev->out_ep);
params.device = &dev->udev->dev;
@@ -403,11 +433,17 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
params.context = dev;
usb_make_path(dev->udev, params.devpath, sizeof(params.devpath));
+ mdev = siano_media_device_register(dev, board_id);
+
/* register in smscore */
- rc = smscore_register_device(&params, &dev->coredev);
+ rc = smscore_register_device(&params, &dev->coredev, mdev);
if (rc < 0) {
- sms_err("smscore_register_device(...) failed, rc %d", rc);
+ pr_err("smscore_register_device(...) failed, rc %d\n", rc);
smsusb_term_device(intf);
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ media_device_unregister(mdev);
+#endif
+ kfree(mdev);
return rc;
}
@@ -421,10 +457,10 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
usb_init_urb(&dev->surbs[i].urb);
}
- sms_info("smsusb_start_streaming(...).");
+ pr_debug("smsusb_start_streaming(...).\n");
rc = smsusb_start_streaming(dev);
if (rc < 0) {
- sms_err("smsusb_start_streaming(...) failed");
+ pr_err("smsusb_start_streaming(...) failed\n");
smsusb_term_device(intf);
return rc;
}
@@ -433,12 +469,12 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
rc = smscore_start_device(dev->coredev);
if (rc < 0) {
- sms_err("smscore_start_device(...) failed");
+ pr_err("smscore_start_device(...) failed\n");
smsusb_term_device(intf);
return rc;
}
- sms_info("device 0x%p created", dev);
+ pr_debug("device 0x%p created\n", dev);
return rc;
}
@@ -450,13 +486,13 @@ static int smsusb_probe(struct usb_interface *intf,
char devpath[32];
int i, rc;
- sms_info("board id=%lu, interface number %d",
+ pr_info("board id=%lu, interface number %d\n",
id->driver_info,
intf->cur_altsetting->desc.bInterfaceNumber);
if (sms_get_board(id->driver_info)->intf_num !=
intf->cur_altsetting->desc.bInterfaceNumber) {
- sms_debug("interface %d won't be used. Expecting interface %d to popup",
+ pr_debug("interface %d won't be used. Expecting interface %d to popup\n",
intf->cur_altsetting->desc.bInterfaceNumber,
sms_get_board(id->driver_info)->intf_num);
return -ENODEV;
@@ -467,15 +503,15 @@ static int smsusb_probe(struct usb_interface *intf,
intf->cur_altsetting->desc.bInterfaceNumber,
0);
if (rc < 0) {
- sms_err("usb_set_interface failed, rc %d", rc);
+ pr_err("usb_set_interface failed, rc %d\n", rc);
return rc;
}
}
- sms_info("smsusb_probe %d",
+ pr_debug("smsusb_probe %d\n",
intf->cur_altsetting->desc.bInterfaceNumber);
for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
- sms_info("endpoint %d %02x %02x %d", i,
+ pr_debug("endpoint %d %02x %02x %d\n", i,
intf->cur_altsetting->endpoint[i].desc.bEndpointAddress,
intf->cur_altsetting->endpoint[i].desc.bmAttributes,
intf->cur_altsetting->endpoint[i].desc.wMaxPacketSize);
@@ -489,7 +525,7 @@ static int smsusb_probe(struct usb_interface *intf,
}
if ((udev->actconfig->desc.bNumInterfaces == 2) &&
(intf->cur_altsetting->desc.bInterfaceNumber == 0)) {
- sms_debug("rom interface 0 is not used");
+ pr_debug("rom interface 0 is not used\n");
return -ENODEV;
}
@@ -498,23 +534,25 @@ static int smsusb_probe(struct usb_interface *intf,
snprintf(devpath, sizeof(devpath), "usb\\%d-%s",
udev->bus->busnum, udev->devpath);
- sms_info("stellar device in cold state was found at %s.", devpath);
+ pr_info("stellar device in cold state was found at %s.\n",
+ devpath);
rc = smsusb1_load_firmware(
udev, smscore_registry_getmode(devpath),
id->driver_info);
/* This device will reset and gain another USB ID */
if (!rc)
- sms_info("stellar device now in warm state");
+ pr_info("stellar device now in warm state\n");
else
- sms_err("Failed to put stellar in warm state. Error: %d", rc);
+ pr_err("Failed to put stellar in warm state. Error: %d\n",
+ rc);
return rc;
} else {
rc = smsusb_init_device(intf, id->driver_info);
}
- sms_info("Device initialized with return code %d", rc);
+ pr_info("Device initialized with return code %d\n", rc);
sms_board_load_modules(id->driver_info);
return rc;
}
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 65a326c5128f..749ad5603c9e 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -240,6 +240,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
if (mutex_lock_interruptible(&dev->v4l_lock))
return -ERESTARTSYS;
+ /*
+ * Once URBs are cancelled, the URB complete handler
+ * won't be running. This is required to safely release the
+ * current buffer (dev->isoc_ctl.buf).
+ */
stk1160_cancel_isoc(dev);
/*
@@ -620,8 +625,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
stk1160_info("buffer [%p/%d] aborted\n",
buf, buf->vb.v4l2_buf.index);
}
- /* It's important to clear current buffer */
- dev->isoc_ctl.buf = NULL;
+
+ /* It's important to release the current buffer */
+ if (dev->isoc_ctl.buf) {
+ buf = dev->isoc_ctl.buf;
+ dev->isoc_ctl.buf = NULL;
+
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ stk1160_info("buffer [%p/%d] aborted\n",
+ buf, buf->vb.v4l2_buf.index);
+ }
spin_unlock_irqrestore(&dev->buf_lock, flags);
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index e08fa587332f..c21c4c004f97 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -556,10 +556,8 @@ static int stk_free_sio_buffers(struct stk_camera *dev)
nbufs = dev->n_sbufs;
dev->n_sbufs = 0;
spin_unlock_irqrestore(&dev->spinlock, flags);
- for (i = 0; i < nbufs; i++) {
- if (dev->sio_bufs[i].buffer != NULL)
- vfree(dev->sio_bufs[i].buffer);
- }
+ for (i = 0; i < nbufs; i++)
+ vfree(dev->sio_bufs[i].buffer);
kfree(dev->sio_bufs);
dev->sio_bufs = NULL;
return 0;
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 0f14d3ccc7b4..77ce9efe1f24 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1576,7 +1576,7 @@ static struct video_device tm6000_template = {
.name = "tm6000",
.fops = &tm6000_fops,
.ioctl_ops = &video_ioctl_ops,
- .release = video_device_release,
+ .release = video_device_release_empty,
.tvnorms = TM6000_STD,
};
@@ -1609,25 +1609,19 @@ static struct video_device tm6000_radio_template = {
* ------------------------------------------------------------------
*/
-static struct video_device *vdev_init(struct tm6000_core *dev,
+static void vdev_init(struct tm6000_core *dev,
+ struct video_device *vfd,
const struct video_device
*template, const char *type_name)
{
- struct video_device *vfd;
-
- vfd = video_device_alloc();
- if (NULL == vfd)
- return NULL;
-
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
- vfd->release = video_device_release;
+ vfd->release = video_device_release_empty;
vfd->lock = &dev->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
video_set_drvdata(vfd, dev);
- return vfd;
}
int tm6000_v4l2_register(struct tm6000_core *dev)
@@ -1658,62 +1652,46 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
if (ret)
goto free_ctrl;
- dev->vfd = vdev_init(dev, &tm6000_template, "video");
+ vdev_init(dev, &dev->vfd, &tm6000_template, "video");
- if (!dev->vfd) {
- printk(KERN_INFO "%s: can't register video device\n",
- dev->name);
- ret = -ENOMEM;
- goto free_ctrl;
- }
- dev->vfd->ctrl_handler = &dev->ctrl_handler;
+ dev->vfd.ctrl_handler = &dev->ctrl_handler;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vidq.queued);
- ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
+ ret = video_register_device(&dev->vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0) {
printk(KERN_INFO "%s: can't register video device\n",
dev->name);
- video_device_release(dev->vfd);
- dev->vfd = NULL;
goto free_ctrl;
}
printk(KERN_INFO "%s: registered device %s\n",
- dev->name, video_device_node_name(dev->vfd));
+ dev->name, video_device_node_name(&dev->vfd));
if (dev->caps.has_radio) {
- dev->radio_dev = vdev_init(dev, &tm6000_radio_template,
+ vdev_init(dev, &dev->radio_dev, &tm6000_radio_template,
"radio");
- if (!dev->radio_dev) {
- printk(KERN_INFO "%s: can't register radio device\n",
- dev->name);
- ret = -ENXIO;
- goto unreg_video;
- }
-
- dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
- ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
+ dev->radio_dev.ctrl_handler = &dev->radio_ctrl_handler;
+ ret = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO,
radio_nr);
if (ret < 0) {
printk(KERN_INFO "%s: can't register radio device\n",
dev->name);
- video_device_release(dev->radio_dev);
goto unreg_video;
}
printk(KERN_INFO "%s: registered device %s\n",
- dev->name, video_device_node_name(dev->radio_dev));
+ dev->name, video_device_node_name(&dev->radio_dev));
}
printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
return ret;
unreg_video:
- video_unregister_device(dev->vfd);
+ video_unregister_device(&dev->vfd);
free_ctrl:
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_ctrl_handler_free(&dev->radio_ctrl_handler);
@@ -1722,19 +1700,12 @@ free_ctrl:
int tm6000_v4l2_unregister(struct tm6000_core *dev)
{
- video_unregister_device(dev->vfd);
+ video_unregister_device(&dev->vfd);
/* if URB buffers are still allocated free them now */
tm6000_free_urb_buffers(dev);
- if (dev->radio_dev) {
- if (video_is_registered(dev->radio_dev))
- video_unregister_device(dev->radio_dev);
- else
- video_device_release(dev->radio_dev);
- dev->radio_dev = NULL;
- }
-
+ video_unregister_device(&dev->radio_dev);
return 0;
}
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index 08bd0740dd23..f2127944776f 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -220,8 +220,8 @@ struct tm6000_core {
struct tm6000_fh *resources; /* Points to fh that is streaming */
bool is_res_read;
- struct video_device *vfd;
- struct video_device *radio_dev;
+ struct video_device vfd;
+ struct video_device radio_dev;
struct tm6000_dmaqueue vidq;
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index cd2fbf11e3b4..12b403e78d52 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -471,7 +471,7 @@ static int vidioc_g_register(struct file *file, void *priv,
/* NT100x has a 8-bit register space */
err_code = usbvision_read_reg(usbvision, reg->reg&0xff);
if (err_code < 0) {
- dev_err(&usbvision->vdev->dev,
+ dev_err(&usbvision->vdev.dev,
"%s: VIDIOC_DBG_G_REGISTER failed: error %d\n",
__func__, err_code);
return err_code;
@@ -490,7 +490,7 @@ static int vidioc_s_register(struct file *file, void *priv,
/* NT100x has a 8-bit register space */
err_code = usbvision_write_reg(usbvision, reg->reg & 0xff, reg->val);
if (err_code < 0) {
- dev_err(&usbvision->vdev->dev,
+ dev_err(&usbvision->vdev.dev,
"%s: VIDIOC_DBG_S_REGISTER failed: error %d\n",
__func__, err_code);
return err_code;
@@ -1157,7 +1157,7 @@ static int usbvision_radio_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
if (usbvision->user) {
- dev_err(&usbvision->rdev->dev,
+ dev_err(&usbvision->rdev.dev,
"%s: Someone tried to open an already opened USBVision Radio!\n",
__func__);
err_code = -EBUSY;
@@ -1280,7 +1280,7 @@ static struct video_device usbvision_video_template = {
.fops = &usbvision_fops,
.ioctl_ops = &usbvision_ioctl_ops,
.name = "usbvision-video",
- .release = video_device_release,
+ .release = video_device_release_empty,
.tvnorms = USBVISION_NORMS,
};
@@ -1312,58 +1312,46 @@ static const struct v4l2_ioctl_ops usbvision_radio_ioctl_ops = {
static struct video_device usbvision_radio_template = {
.fops = &usbvision_radio_fops,
.name = "usbvision-radio",
- .release = video_device_release,
+ .release = video_device_release_empty,
.ioctl_ops = &usbvision_radio_ioctl_ops,
};
-static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
- struct video_device *vdev_template,
- char *name)
+static void usbvision_vdev_init(struct usb_usbvision *usbvision,
+ struct video_device *vdev,
+ const struct video_device *vdev_template,
+ const char *name)
{
struct usb_device *usb_dev = usbvision->dev;
- struct video_device *vdev;
if (usb_dev == NULL) {
dev_err(&usbvision->dev->dev,
"%s: usbvision->dev is not set\n", __func__);
- return NULL;
+ return;
}
- vdev = video_device_alloc();
- if (NULL == vdev)
- return NULL;
*vdev = *vdev_template;
vdev->lock = &usbvision->v4l2_lock;
vdev->v4l2_dev = &usbvision->v4l2_dev;
snprintf(vdev->name, sizeof(vdev->name), "%s", name);
video_set_drvdata(vdev, usbvision);
- return vdev;
}
/* unregister video4linux devices */
static void usbvision_unregister_video(struct usb_usbvision *usbvision)
{
/* Radio Device: */
- if (usbvision->rdev) {
+ if (video_is_registered(&usbvision->rdev)) {
PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
- video_device_node_name(usbvision->rdev));
- if (video_is_registered(usbvision->rdev))
- video_unregister_device(usbvision->rdev);
- else
- video_device_release(usbvision->rdev);
- usbvision->rdev = NULL;
+ video_device_node_name(&usbvision->rdev));
+ video_unregister_device(&usbvision->rdev);
}
/* Video Device: */
- if (usbvision->vdev) {
+ if (video_is_registered(&usbvision->vdev)) {
PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
- video_device_node_name(usbvision->vdev));
- if (video_is_registered(usbvision->vdev))
- video_unregister_device(usbvision->vdev);
- else
- video_device_release(usbvision->vdev);
- usbvision->vdev = NULL;
+ video_device_node_name(&usbvision->vdev));
+ video_unregister_device(&usbvision->vdev);
}
}
@@ -1371,28 +1359,22 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
static int usbvision_register_video(struct usb_usbvision *usbvision)
{
/* Video Device: */
- usbvision->vdev = usbvision_vdev_init(usbvision,
- &usbvision_video_template,
- "USBVision Video");
- if (usbvision->vdev == NULL)
- goto err_exit;
- if (video_register_device(usbvision->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
+ usbvision_vdev_init(usbvision, &usbvision->vdev,
+ &usbvision_video_template, "USBVision Video");
+ if (video_register_device(&usbvision->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
goto err_exit;
printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n",
- usbvision->nr, video_device_node_name(usbvision->vdev));
+ usbvision->nr, video_device_node_name(&usbvision->vdev));
/* Radio Device: */
if (usbvision_device_data[usbvision->dev_model].radio) {
/* usbvision has radio */
- usbvision->rdev = usbvision_vdev_init(usbvision,
- &usbvision_radio_template,
- "USBVision Radio");
- if (usbvision->rdev == NULL)
- goto err_exit;
- if (video_register_device(usbvision->rdev, VFL_TYPE_RADIO, radio_nr) < 0)
+ usbvision_vdev_init(usbvision, &usbvision->rdev,
+ &usbvision_radio_template, "USBVision Radio");
+ if (video_register_device(&usbvision->rdev, VFL_TYPE_RADIO, radio_nr) < 0)
goto err_exit;
printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n",
- usbvision->nr, video_device_node_name(usbvision->rdev));
+ usbvision->nr, video_device_node_name(&usbvision->rdev));
}
/* all done */
return 0;
@@ -1461,7 +1443,7 @@ static void usbvision_release(struct usb_usbvision *usbvision)
usbvision->initialized = 0;
- usbvision_remove_sysfs(usbvision->vdev);
+ usbvision_remove_sysfs(&usbvision->vdev);
usbvision_unregister_video(usbvision);
kfree(usbvision->alt_max_pkt_size);
@@ -1525,7 +1507,7 @@ static int usbvision_probe(struct usb_interface *intf,
const struct usb_host_interface *interface;
struct usb_usbvision *usbvision = NULL;
const struct usb_endpoint_descriptor *endpoint;
- int model, i;
+ int model, i, ret;
PDEBUG(DBG_PROBE, "VID=%#04x, PID=%#04x, ifnum=%u",
dev->descriptor.idVendor,
@@ -1534,7 +1516,8 @@ static int usbvision_probe(struct usb_interface *intf,
model = devid->driver_info;
if (model < 0 || model >= usbvision_device_data_size) {
PDEBUG(DBG_PROBE, "model out of bounds %d", model);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_usb;
}
printk(KERN_INFO "%s: %s found\n", __func__,
usbvision_device_data[model].model_string);
@@ -1549,18 +1532,21 @@ static int usbvision_probe(struct usb_interface *intf,
__func__, ifnum);
dev_err(&intf->dev, "%s: Endpoint attributes %d",
__func__, endpoint->bmAttributes);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_usb;
}
if (usb_endpoint_dir_out(endpoint)) {
dev_err(&intf->dev, "%s: interface %d. has ISO OUT endpoint!\n",
__func__, ifnum);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_usb;
}
usbvision = usbvision_alloc(dev, intf);
if (usbvision == NULL) {
dev_err(&intf->dev, "%s: couldn't allocate USBVision struct\n", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_usb;
}
if (dev->descriptor.bNumConfigurations > 1)
@@ -1579,8 +1565,8 @@ static int usbvision_probe(struct usb_interface *intf,
usbvision->alt_max_pkt_size = kmalloc(32 * usbvision->num_alt, GFP_KERNEL);
if (usbvision->alt_max_pkt_size == NULL) {
dev_err(&intf->dev, "usbvision: out of memory!\n");
- usbvision_release(usbvision);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_pkt;
}
for (i = 0; i < usbvision->num_alt; i++) {
@@ -1611,10 +1597,16 @@ static int usbvision_probe(struct usb_interface *intf,
usbvision_configure_video(usbvision);
usbvision_register_video(usbvision);
- usbvision_create_sysfs(usbvision->vdev);
+ usbvision_create_sysfs(&usbvision->vdev);
PDEBUG(DBG_PROBE, "success");
return 0;
+
+err_pkt:
+ usbvision_release(usbvision);
+err_usb:
+ usb_put_dev(dev);
+ return ret;
}
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 77aeb1ed9a81..140a1f67566e 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -357,8 +357,8 @@ extern struct usb_device_id usbvision_table[];
struct usb_usbvision {
struct v4l2_device v4l2_dev;
- struct video_device *vdev; /* Video Device */
- struct video_device *rdev; /* Radio Device */
+ struct video_device vdev; /* Video Device */
+ struct video_device rdev; /* Radio Device */
/* i2c Declaration Section*/
struct i2c_adapter i2c_adap;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index cf27006c29dc..5970dd6a1c1c 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1669,10 +1669,6 @@ static void uvc_delete(struct uvc_device *dev)
#ifdef CONFIG_MEDIA_CONTROLLER
uvc_mc_cleanup_entity(entity);
#endif
- if (entity->vdev) {
- video_device_release(entity->vdev);
- entity->vdev = NULL;
- }
kfree(entity);
}
@@ -1717,11 +1713,10 @@ static void uvc_unregister_video(struct uvc_device *dev)
atomic_inc(&dev->nstreams);
list_for_each_entry(stream, &dev->streams, list) {
- if (stream->vdev == NULL)
+ if (!video_is_registered(&stream->vdev))
continue;
- video_unregister_device(stream->vdev);
- stream->vdev = NULL;
+ video_unregister_device(&stream->vdev);
uvc_debugfs_cleanup_stream(stream);
}
@@ -1736,7 +1731,7 @@ static void uvc_unregister_video(struct uvc_device *dev)
static int uvc_register_video(struct uvc_device *dev,
struct uvc_streaming *stream)
{
- struct video_device *vdev;
+ struct video_device *vdev = &stream->vdev;
int ret;
/* Initialize the video buffers queue. */
@@ -1757,12 +1752,6 @@ static int uvc_register_video(struct uvc_device *dev,
uvc_debugfs_init_stream(stream);
/* Register the device with V4L. */
- vdev = video_device_alloc();
- if (vdev == NULL) {
- uvc_printk(KERN_ERR, "Failed to allocate video device (%d).\n",
- ret);
- return -ENOMEM;
- }
/* We already hold a reference to dev->udev. The video device will be
* unregistered before the reference is released, so we don't need to
@@ -1780,15 +1769,12 @@ static int uvc_register_video(struct uvc_device *dev,
/* Set the driver data before calling video_register_device, otherwise
* uvc_v4l2_open might race us.
*/
- stream->vdev = vdev;
video_set_drvdata(vdev, stream);
ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
uvc_printk(KERN_ERR, "Failed to register video device (%d).\n",
ret);
- stream->vdev = NULL;
- video_device_release(vdev);
return ret;
}
@@ -1827,7 +1813,7 @@ static int uvc_register_terms(struct uvc_device *dev,
if (ret < 0)
return ret;
- term->vdev = stream->vdev;
+ term->vdev = &stream->vdev;
}
return 0;
@@ -2461,6 +2447,14 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_PROBE_EXTRAFIELDS },
+ /* Aveo Technology USB 2.0 Camera (Tasco USB Microscope) */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x1871,
+ .idProduct = 0x0516,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0 },
/* Ecamm Pico iMage */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 10c554e7655c..87a19f33e460 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -306,25 +306,14 @@ int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_mmap(&queue->queue, vma);
- mutex_unlock(&queue->mutex);
-
- return ret;
+ return vb2_mmap(&queue->queue, vma);
}
#ifndef CONFIG_MMU
unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
unsigned long pgoff)
{
- unsigned long ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
- mutex_unlock(&queue->mutex);
- return ret;
+ return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
}
#endif
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 43e953f73e02..c4b1ac6750d8 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -511,7 +511,7 @@ static int uvc_v4l2_open(struct file *file)
stream->dev->users++;
mutex_unlock(&stream->dev->lock);
- v4l2_fh_init(&handle->vfh, stream->vdev);
+ v4l2_fh_init(&handle->vfh, &stream->vdev);
v4l2_fh_add(&handle->vfh);
handle->chain = stream->chain;
handle->stream = stream;
@@ -882,6 +882,35 @@ static int uvc_ioctl_queryctrl(struct file *file, void *fh,
return uvc_query_v4l2_ctrl(chain, qc);
}
+static int uvc_ioctl_query_ext_ctrl(struct file *file, void *fh,
+ struct v4l2_query_ext_ctrl *qec)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
+ struct v4l2_queryctrl qc = { qec->id };
+ int ret;
+
+ ret = uvc_query_v4l2_ctrl(chain, &qc);
+ if (ret)
+ return ret;
+
+ qec->id = qc.id;
+ qec->type = qc.type;
+ strlcpy(qec->name, qc.name, sizeof(qec->name));
+ qec->minimum = qc.minimum;
+ qec->maximum = qc.maximum;
+ qec->step = qc.step;
+ qec->default_value = qc.default_value;
+ qec->flags = qc.flags;
+ qec->elem_size = 4;
+ qec->elems = 1;
+ qec->nr_of_dims = 0;
+ memset(qec->dims, 0, sizeof(qec->dims));
+ memset(qec->reserved, 0, sizeof(qec->reserved));
+
+ return 0;
+}
+
static int uvc_ioctl_g_ctrl(struct file *file, void *fh,
struct v4l2_control *ctrl)
{
@@ -1018,26 +1047,37 @@ static int uvc_ioctl_querymenu(struct file *file, void *fh,
return uvc_query_v4l2_menu(chain, qm);
}
-static int uvc_ioctl_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *ccap)
+static int uvc_ioctl_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
{
struct uvc_fh *handle = fh;
struct uvc_streaming *stream = handle->stream;
- if (ccap->type != stream->type)
+ if (sel->type != stream->type)
return -EINVAL;
- ccap->bounds.left = 0;
- ccap->bounds.top = 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (stream->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (stream->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sel->r.left = 0;
+ sel->r.top = 0;
mutex_lock(&stream->mutex);
- ccap->bounds.width = stream->cur_frame->wWidth;
- ccap->bounds.height = stream->cur_frame->wHeight;
+ sel->r.width = stream->cur_frame->wWidth;
+ sel->r.height = stream->cur_frame->wHeight;
mutex_unlock(&stream->mutex);
- ccap->defrect = ccap->bounds;
-
- ccap->pixelaspect.numerator = 1;
- ccap->pixelaspect.denominator = 1;
return 0;
}
@@ -1133,6 +1173,9 @@ static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
uvc_simplify_fraction(&fival->discrete.numerator,
&fival->discrete.denominator, 8, 333);
} else {
+ if (fival->index)
+ return -EINVAL;
+
fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
fival->stepwise.min.numerator = frame->dwFrameInterval[0];
fival->stepwise.min.denominator = 10000000;
@@ -1443,13 +1486,14 @@ const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_g_input = uvc_ioctl_g_input,
.vidioc_s_input = uvc_ioctl_s_input,
.vidioc_queryctrl = uvc_ioctl_queryctrl,
+ .vidioc_query_ext_ctrl = uvc_ioctl_query_ext_ctrl,
.vidioc_g_ctrl = uvc_ioctl_g_ctrl,
.vidioc_s_ctrl = uvc_ioctl_s_ctrl,
.vidioc_g_ext_ctrls = uvc_ioctl_g_ext_ctrls,
.vidioc_s_ext_ctrls = uvc_ioctl_s_ext_ctrls,
.vidioc_try_ext_ctrls = uvc_ioctl_try_ext_ctrls,
.vidioc_querymenu = uvc_ioctl_querymenu,
- .vidioc_cropcap = uvc_ioctl_cropcap,
+ .vidioc_g_selection = uvc_ioctl_g_selection,
.vidioc_g_parm = uvc_ioctl_g_parm,
.vidioc_s_parm = uvc_ioctl_s_parm,
.vidioc_enum_framesizes = uvc_ioctl_enum_framesizes,
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index c63e5b55e143..1b594c203992 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -443,7 +443,7 @@ struct uvc_stats_stream {
struct uvc_streaming {
struct list_head list;
struct uvc_device *dev;
- struct video_device *vdev;
+ struct video_device vdev;
struct uvc_video_chain *chain;
atomic_t active;
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 559f8372e2eb..abdcffabcb59 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -134,6 +134,9 @@ struct tuner {
unsigned int type; /* chip type id */
void *config;
const char *name;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad;
+#endif
};
/*
@@ -434,6 +437,10 @@ static void set_type(struct i2c_client *c, unsigned int type,
t->name = analog_ops->info.name;
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+ t->sd.entity.name = t->name;
+#endif
+
tuner_dbg("type set to %s\n", t->name);
t->mode_mask = new_mode_mask;
@@ -592,6 +599,9 @@ static int tuner_probe(struct i2c_client *client,
struct tuner *t;
struct tuner *radio;
struct tuner *tv;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int ret;
+#endif
t = kzalloc(sizeof(struct tuner), GFP_KERNEL);
if (NULL == t)
@@ -684,6 +694,18 @@ static int tuner_probe(struct i2c_client *client,
/* Should be just before return */
register_client:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ t->pad.flags = MEDIA_PAD_FL_SOURCE;
+ t->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_TUNER;
+ t->sd.entity.name = t->name;
+
+ ret = media_entity_init(&t->sd.entity, 1, &t->pad, 0);
+ if (ret < 0) {
+ tuner_err("failed to initialize media entity!\n");
+ kfree(t);
+ return -ENODEV;
+ }
+#endif
/* Sets a default mode */
if (t->mode_mask & T_ANALOG_TV)
t->mode = V4L2_TUNER_ANALOG_TV;
diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c
index e18cc0469cf8..34e416a554f6 100644
--- a/drivers/media/v4l2-core/v4l2-clk.c
+++ b/drivers/media/v4l2-core/v4l2-clk.c
@@ -9,6 +9,7 @@
*/
#include <linux/atomic.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -23,17 +24,13 @@
static DEFINE_MUTEX(clk_lock);
static LIST_HEAD(clk_list);
-static struct v4l2_clk *v4l2_clk_find(const char *dev_id, const char *id)
+static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
{
struct v4l2_clk *clk;
- list_for_each_entry(clk, &clk_list, list) {
- if (strcmp(dev_id, clk->dev_id))
- continue;
-
- if (!id || !clk->id || !strcmp(clk->id, id))
+ list_for_each_entry(clk, &clk_list, list)
+ if (!strcmp(dev_id, clk->dev_id))
return clk;
- }
return ERR_PTR(-ENODEV);
}
@@ -41,9 +38,24 @@ static struct v4l2_clk *v4l2_clk_find(const char *dev_id, const char *id)
struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
{
struct v4l2_clk *clk;
+ struct clk *ccf_clk = clk_get(dev, id);
+
+ if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!IS_ERR_OR_NULL(ccf_clk)) {
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk) {
+ clk_put(ccf_clk);
+ return ERR_PTR(-ENOMEM);
+ }
+ clk->clk = ccf_clk;
+
+ return clk;
+ }
mutex_lock(&clk_lock);
- clk = v4l2_clk_find(dev_name(dev), id);
+ clk = v4l2_clk_find(dev_name(dev));
if (!IS_ERR(clk))
atomic_inc(&clk->use_count);
@@ -60,6 +72,12 @@ void v4l2_clk_put(struct v4l2_clk *clk)
if (IS_ERR(clk))
return;
+ if (clk->clk) {
+ clk_put(clk->clk);
+ kfree(clk);
+ return;
+ }
+
mutex_lock(&clk_lock);
list_for_each_entry(tmp, &clk_list, list)
@@ -97,8 +115,12 @@ static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
int v4l2_clk_enable(struct v4l2_clk *clk)
{
- int ret = v4l2_clk_lock_driver(clk);
+ int ret;
+
+ if (clk->clk)
+ return clk_prepare_enable(clk->clk);
+ ret = v4l2_clk_lock_driver(clk);
if (ret < 0)
return ret;
@@ -124,11 +146,14 @@ void v4l2_clk_disable(struct v4l2_clk *clk)
{
int enable;
+ if (clk->clk)
+ return clk_disable_unprepare(clk->clk);
+
mutex_lock(&clk->lock);
enable = --clk->enable;
- if (WARN(enable < 0, "Unbalanced %s() on %s:%s!\n", __func__,
- clk->dev_id, clk->id))
+ if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
+ clk->dev_id))
clk->enable++;
else if (!enable && clk->ops->disable)
clk->ops->disable(clk);
@@ -141,8 +166,12 @@ EXPORT_SYMBOL(v4l2_clk_disable);
unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
{
- int ret = v4l2_clk_lock_driver(clk);
+ int ret;
+
+ if (clk->clk)
+ return clk_get_rate(clk->clk);
+ ret = v4l2_clk_lock_driver(clk);
if (ret < 0)
return ret;
@@ -161,7 +190,16 @@ EXPORT_SYMBOL(v4l2_clk_get_rate);
int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
{
- int ret = v4l2_clk_lock_driver(clk);
+ int ret;
+
+ if (clk->clk) {
+ long r = clk_round_rate(clk->clk, rate);
+ if (r < 0)
+ return r;
+ return clk_set_rate(clk->clk, r);
+ }
+
+ ret = v4l2_clk_lock_driver(clk);
if (ret < 0)
return ret;
@@ -181,7 +219,7 @@ EXPORT_SYMBOL(v4l2_clk_set_rate);
struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
const char *dev_id,
- const char *id, void *priv)
+ void *priv)
{
struct v4l2_clk *clk;
int ret;
@@ -193,9 +231,8 @@ struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
if (!clk)
return ERR_PTR(-ENOMEM);
- clk->id = kstrdup(id, GFP_KERNEL);
clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
- if ((id && !clk->id) || !clk->dev_id) {
+ if (!clk->dev_id) {
ret = -ENOMEM;
goto ealloc;
}
@@ -205,7 +242,7 @@ struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
mutex_init(&clk->lock);
mutex_lock(&clk_lock);
- if (!IS_ERR(v4l2_clk_find(dev_id, id))) {
+ if (!IS_ERR(v4l2_clk_find(dev_id))) {
mutex_unlock(&clk_lock);
ret = -EEXIST;
goto eexist;
@@ -217,7 +254,6 @@ struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
eexist:
ealloc:
- kfree(clk->id);
kfree(clk->dev_id);
kfree(clk);
return ERR_PTR(ret);
@@ -227,15 +263,14 @@ EXPORT_SYMBOL(v4l2_clk_register);
void v4l2_clk_unregister(struct v4l2_clk *clk)
{
if (WARN(atomic_read(&clk->use_count),
- "%s(): Refusing to unregister ref-counted %s:%s clock!\n",
- __func__, clk->dev_id, clk->id))
+ "%s(): Refusing to unregister ref-counted %s clock!\n",
+ __func__, clk->dev_id))
return;
mutex_lock(&clk_lock);
list_del(&clk->list);
mutex_unlock(&clk_lock);
- kfree(clk->id);
kfree(clk->dev_id);
kfree(clk);
}
@@ -253,7 +288,7 @@ static unsigned long fixed_get_rate(struct v4l2_clk *clk)
}
struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
- const char *id, unsigned long rate, struct module *owner)
+ unsigned long rate, struct module *owner)
{
struct v4l2_clk *clk;
struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -265,7 +300,7 @@ struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
priv->ops.get_rate = fixed_get_rate;
priv->ops.owner = owner;
- clk = v4l2_clk_register(&priv->ops, dev_id, id, priv);
+ clk = v4l2_clk_register(&priv->ops, dev_id, priv);
if (IS_ERR(clk))
kfree(priv);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 45c5b4710601..e3a3468002e6 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -991,7 +991,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_AUTO_FOCUS_START:
case V4L2_CID_AUTO_FOCUS_STOP:
*type = V4L2_CTRL_TYPE_BUTTON;
- *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
*min = *max = *step = *def = 0;
break;
case V4L2_CID_POWER_LINE_FREQUENCY:
@@ -1172,7 +1173,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_FOCUS_RELATIVE:
case V4L2_CID_IRIS_RELATIVE:
case V4L2_CID_ZOOM_RELATIVE:
- *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
break;
case V4L2_CID_FLASH_STROBE_STATUS:
case V4L2_CID_AUTO_FOCUS_STATUS:
@@ -1609,6 +1611,19 @@ static int cluster_changed(struct v4l2_ctrl *master)
if (ctrl == NULL)
continue;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE)
+ changed = ctrl_changed = true;
+
+ /*
+ * Set has_changed to false to avoid generating
+ * the event V4L2_EVENT_CTRL_CH_VALUE
+ */
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ ctrl->has_changed = false;
+ continue;
+ }
+
for (idx = 0; !ctrl_changed && idx < ctrl->elems; idx++)
ctrl_changed = !ctrl->type_ops->equal(ctrl, idx,
ctrl->p_cur, ctrl->p_new);
@@ -1974,7 +1989,8 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
sz_extra = 0;
if (type == V4L2_CTRL_TYPE_BUTTON)
- flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
+ flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
flags |= V4L2_CTRL_FLAG_READ_ONLY;
else if (type == V4L2_CTRL_TYPE_INTEGER64 ||
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 86bb93fd7db8..71a1b93b0790 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -357,34 +357,6 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
if (lock)
mutex_unlock(lock);
- } else if (vdev->fops->ioctl) {
- /* This code path is a replacement for the BKL. It is a major
- * hack but it will have to do for those drivers that are not
- * yet converted to use unlocked_ioctl.
- *
- * All drivers implement struct v4l2_device, so we use the
- * lock defined there to serialize the ioctls.
- *
- * However, if the driver sleeps, then it blocks all ioctls
- * since the lock is still held. This is very common for
- * VIDIOC_DQBUF since that normally waits for a frame to arrive.
- * As a result any other ioctl calls will proceed very, very
- * slowly since each call will have to wait for the VIDIOC_QBUF
- * to finish. Things that should take 0.01s may now take 10-20
- * seconds.
- *
- * The workaround is to *not* take the lock for VIDIOC_DQBUF.
- * This actually works OK for videobuf-based drivers, since
- * videobuf will take its own internal lock.
- */
- struct mutex *m = &vdev->v4l2_dev->ioctl_lock;
-
- if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m))
- return -ERESTARTSYS;
- if (video_is_registered(vdev))
- ret = vdev->fops->ioctl(filp, cmd, arg);
- if (cmd != VIDIOC_DQBUF)
- mutex_unlock(m);
} else
ret = -ENOTTY;
@@ -560,10 +532,9 @@ static void determine_valid_ioctls(struct video_device *vdev)
/* vfl_type and vfl_dir independent ioctls */
SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap);
- if (ops->vidioc_g_priority)
- set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
- if (ops->vidioc_s_priority)
- set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
+
/* Note: the control handler can also be passed through the filehandle,
and that can't be tested here. If the bit for these control ioctls
is set, then the ioctl is valid. But if it is 0, then it can still
@@ -640,6 +611,14 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ if (ops->vidioc_g_crop || ops->vidioc_g_selection)
+ set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
+ if (ops->vidioc_s_crop || ops->vidioc_s_selection)
+ set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
+ SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
+ if (ops->vidioc_cropcap || ops->vidioc_g_selection)
+ set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
} else if (is_vbi) {
/* vbi specific ioctls */
if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
@@ -708,14 +687,6 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
}
- if (ops->vidioc_g_crop || ops->vidioc_g_selection)
- set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
- if (ops->vidioc_s_crop || ops->vidioc_s_selection)
- set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
- SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
- if (ops->vidioc_cropcap || ops->vidioc_g_selection)
- set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
ops->vidioc_g_std))
set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
@@ -943,8 +914,8 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
vdev->vfl_type != VFL_TYPE_SUBDEV) {
vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
vdev->entity.name = vdev->name;
- vdev->entity.info.v4l.major = VIDEO_MAJOR;
- vdev->entity.info.v4l.minor = vdev->minor;
+ vdev->entity.info.dev.major = VIDEO_MAJOR;
+ vdev->entity.info.dev.minor = vdev->minor;
ret = media_device_register_entity(vdev->v4l2_dev->mdev,
&vdev->entity);
if (ret < 0)
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 015f92aab44a..5b0a30b9252b 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -37,7 +37,6 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
INIT_LIST_HEAD(&v4l2_dev->subdevs);
spin_lock_init(&v4l2_dev->lock);
- mutex_init(&v4l2_dev->ioctl_lock);
v4l2_prio_init(&v4l2_dev->prio);
kref_init(&v4l2_dev->ref);
get_device(dev);
@@ -248,8 +247,8 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
goto clean_up;
}
#if defined(CONFIG_MEDIA_CONTROLLER)
- sd->entity.info.v4l.major = VIDEO_MAJOR;
- sd->entity.info.v4l.minor = vdev->minor;
+ sd->entity.info.dev.major = VIDEO_MAJOR;
+ sd->entity.info.dev.minor = vdev->minor;
#endif
sd->devnode = vdev;
}
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index b1d8dbb39665..c0e96382feba 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -282,7 +282,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
bt->vsync, bt->vbackporch);
pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
- pr_info("%s: flags (0x%x):%s%s%s%s\n", dev_prefix, bt->flags,
+ pr_info("%s: flags (0x%x):%s%s%s%s%s\n", dev_prefix, bt->flags,
(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
" REDUCED_BLANKING" : "",
(bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
@@ -290,7 +290,9 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
(bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
" REDUCED_FPS" : "",
(bt->flags & V4L2_DV_FL_HALF_LINE) ?
- " HALF_LINE" : "");
+ " HALF_LINE" : "",
+ (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
+ " CE_VIDEO" : "");
pr_info("%s: standards (0x%x):%s%s%s%s\n", dev_prefix, bt->standards,
(bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
(bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index b08407225db1..aa407cb5f830 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -257,7 +257,7 @@ static void v4l_print_format(const void *arg, bool write_only)
pr_cont(", width=%u, height=%u, "
"pixelformat=%c%c%c%c, field=%s, "
"bytesperline=%u, sizeimage=%u, colorspace=%d, "
- "flags %x, ycbcr_enc=%u, quantization=%u\n",
+ "flags=0x%x, ycbcr_enc=%u, quantization=%u\n",
pix->width, pix->height,
(pix->pixelformat & 0xff),
(pix->pixelformat >> 8) & 0xff,
@@ -273,7 +273,7 @@ static void v4l_print_format(const void *arg, bool write_only)
mp = &p->fmt.pix_mp;
pr_cont(", width=%u, height=%u, "
"format=%c%c%c%c, field=%s, "
- "colorspace=%d, num_planes=%u, flags=%x, "
+ "colorspace=%d, num_planes=%u, flags=0x%x, "
"ycbcr_enc=%u, quantization=%u\n",
mp->width, mp->height,
(mp->pixelformat & 0xff),
@@ -901,6 +901,8 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
*/
if (!allow_priv && c->ctrl_class == V4L2_CID_PRIVATE_BASE)
return 0;
+ if (c->ctrl_class == 0)
+ return 1;
/* Check that all controls are from the same control class. */
for (i = 0; i < c->count; i++) {
if (V4L2_CTRL_ID2CLASS(c->controls[i].id) != c->ctrl_class) {
@@ -1046,8 +1048,6 @@ static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd;
u32 *p = arg;
- if (ops->vidioc_g_priority)
- return ops->vidioc_g_priority(file, fh, arg);
vfd = video_devdata(file);
*p = v4l2_prio_max(vfd->prio);
return 0;
@@ -1060,9 +1060,9 @@ static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
struct v4l2_fh *vfh;
u32 *p = arg;
- if (ops->vidioc_s_priority)
- return ops->vidioc_s_priority(file, fh, *p);
vfd = video_devdata(file);
+ if (!test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
+ return -ENOTTY;
vfh = file->private_data;
return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
}
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 80c588f4e429..73824a5ada83 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v4l2_m2m_get_vq);
*/
void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
- struct v4l2_m2m_buffer *b = NULL;
+ struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
*/
void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{
- struct v4l2_m2m_buffer *b = NULL;
+ struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
index b4ed9a955fbe..83143d39dea7 100644
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -19,11 +19,10 @@
#include <media/v4l2-of.h>
-static void v4l2_of_parse_csi_bus(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint)
+static int v4l2_of_parse_csi_bus(const struct device_node *node,
+ struct v4l2_of_endpoint *endpoint)
{
struct v4l2_of_bus_mipi_csi2 *bus = &endpoint->bus.mipi_csi2;
- u32 data_lanes[ARRAY_SIZE(bus->data_lanes)];
struct property *prop;
bool have_clk_lane = false;
unsigned int flags = 0;
@@ -32,16 +31,34 @@ static void v4l2_of_parse_csi_bus(const struct device_node *node,
prop = of_find_property(node, "data-lanes", NULL);
if (prop) {
const __be32 *lane = NULL;
- int i;
+ unsigned int i;
- for (i = 0; i < ARRAY_SIZE(data_lanes); i++) {
- lane = of_prop_next_u32(prop, lane, &data_lanes[i]);
+ for (i = 0; i < ARRAY_SIZE(bus->data_lanes); i++) {
+ lane = of_prop_next_u32(prop, lane, &v);
if (!lane)
break;
+ bus->data_lanes[i] = v;
}
bus->num_data_lanes = i;
- while (i--)
- bus->data_lanes[i] = data_lanes[i];
+ }
+
+ prop = of_find_property(node, "lane-polarities", NULL);
+ if (prop) {
+ const __be32 *polarity = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bus->lane_polarities); i++) {
+ polarity = of_prop_next_u32(prop, polarity, &v);
+ if (!polarity)
+ break;
+ bus->lane_polarities[i] = v;
+ }
+
+ if (i < 1 + bus->num_data_lanes /* clock + data */) {
+ pr_warn("%s: too few lane-polarities entries (need %u, got %u)\n",
+ node->full_name, 1 + bus->num_data_lanes, i);
+ return -EINVAL;
+ }
}
if (!of_property_read_u32(node, "clock-lanes", &v)) {
@@ -56,6 +73,8 @@ static void v4l2_of_parse_csi_bus(const struct device_node *node,
bus->flags = flags;
endpoint->bus_type = V4L2_MBUS_CSI2;
+
+ return 0;
}
static void v4l2_of_parse_parallel_bus(const struct device_node *node,
@@ -127,11 +146,15 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node,
int v4l2_of_parse_endpoint(const struct device_node *node,
struct v4l2_of_endpoint *endpoint)
{
+ int rval;
+
of_graph_parse_endpoint(node, &endpoint->base);
endpoint->bus_type = 0;
memset(&endpoint->bus, 0, sizeof(endpoint->bus));
- v4l2_of_parse_csi_bus(node, endpoint);
+ rval = v4l2_of_parse_csi_bus(node, endpoint);
+ if (rval)
+ return rval;
/*
* Parse the parallel video bus properties only if none
* of the MIPI CSI-2 specific properties were found.
@@ -142,3 +165,64 @@ int v4l2_of_parse_endpoint(const struct device_node *node,
return 0;
}
EXPORT_SYMBOL(v4l2_of_parse_endpoint);
+
+/**
+ * v4l2_of_parse_link() - parse a link between two endpoints
+ * @node: pointer to the endpoint at the local end of the link
+ * @link: pointer to the V4L2 OF link data structure
+ *
+ * Fill the link structure with the local and remote nodes and port numbers.
+ * The local_node and remote_node fields are set to point to the local and
+ * remote port's parent nodes respectively (the port parent node being the
+ * parent node of the port node if that node isn't a 'ports' node, or the
+ * grand-parent node of the port node otherwise).
+ *
+ * A reference is taken to both the local and remote nodes, the caller must use
+ * v4l2_of_put_link() to drop the references when done with the link.
+ *
+ * Return: 0 on success, or -ENOLINK if the remote endpoint can't be found.
+ */
+int v4l2_of_parse_link(const struct device_node *node,
+ struct v4l2_of_link *link)
+{
+ struct device_node *np;
+
+ memset(link, 0, sizeof(*link));
+
+ np = of_get_parent(node);
+ of_property_read_u32(np, "reg", &link->local_port);
+ np = of_get_next_parent(np);
+ if (of_node_cmp(np->name, "ports") == 0)
+ np = of_get_next_parent(np);
+ link->local_node = np;
+
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+ if (!np) {
+ of_node_put(link->local_node);
+ return -ENOLINK;
+ }
+
+ np = of_get_parent(np);
+ of_property_read_u32(np, "reg", &link->remote_port);
+ np = of_get_next_parent(np);
+ if (of_node_cmp(np->name, "ports") == 0)
+ np = of_get_next_parent(np);
+ link->remote_node = np;
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_of_parse_link);
+
+/**
+ * v4l2_of_put_link() - drop references to nodes in a link
+ * @link: pointer to the V4L2 OF link data structure
+ *
+ * Drop references to the local and remote nodes in the link. This function must
+ * be called on every link parsed with v4l2_of_parse_link().
+ */
+void v4l2_of_put_link(struct v4l2_of_link *link)
+{
+ of_node_put(link->local_node);
+ of_node_put(link->remote_node);
+}
+EXPORT_SYMBOL(v4l2_of_put_link);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 19a034e79be4..63596063b213 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -93,8 +93,7 @@ static int subdev_open(struct file *file)
err:
#if defined(CONFIG_MEDIA_CONTROLLER)
- if (entity)
- media_entity_put(entity);
+ media_entity_put(entity);
#endif
v4l2_fh_del(&subdev_fh->vfh);
v4l2_fh_exit(&subdev_fh->vfh);
@@ -262,7 +261,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (rval)
return rval;
- return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh, format);
+ return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh->pad, format);
}
case VIDIOC_SUBDEV_S_FMT: {
@@ -272,7 +271,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (rval)
return rval;
- return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh, format);
+ return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh->pad, format);
}
case VIDIOC_SUBDEV_G_CROP: {
@@ -289,7 +288,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
sel.target = V4L2_SEL_TGT_CROP;
rval = v4l2_subdev_call(
- sd, pad, get_selection, subdev_fh, &sel);
+ sd, pad, get_selection, subdev_fh->pad, &sel);
crop->rect = sel.r;
@@ -311,7 +310,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
sel.r = crop->rect;
rval = v4l2_subdev_call(
- sd, pad, set_selection, subdev_fh, &sel);
+ sd, pad, set_selection, subdev_fh->pad, &sel);
crop->rect = sel.r;
@@ -321,20 +320,28 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
struct v4l2_subdev_mbus_code_enum *code = arg;
+ if (code->which != V4L2_SUBDEV_FORMAT_TRY &&
+ code->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (code->pad >= sd->entity.num_pads)
return -EINVAL;
- return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh,
+ return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh->pad,
code);
}
case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
struct v4l2_subdev_frame_size_enum *fse = arg;
+ if (fse->which != V4L2_SUBDEV_FORMAT_TRY &&
+ fse->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fse->pad >= sd->entity.num_pads)
return -EINVAL;
- return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh,
+ return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh->pad,
fse);
}
@@ -359,10 +366,14 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval_enum *fie = arg;
+ if (fie->which != V4L2_SUBDEV_FORMAT_TRY &&
+ fie->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fie->pad >= sd->entity.num_pads)
return -EINVAL;
- return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh,
+ return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh->pad,
fie);
}
@@ -374,7 +385,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return rval;
return v4l2_subdev_call(
- sd, pad, get_selection, subdev_fh, sel);
+ sd, pad, get_selection, subdev_fh->pad, sel);
}
case VIDIOC_SUBDEV_S_SELECTION: {
@@ -385,7 +396,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return rval;
return v4l2_subdev_call(
- sd, pad, set_selection, subdev_fh, sel);
+ sd, pad, set_selection, subdev_fh->pad, sel);
}
case VIDIOC_G_EDID: {
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index cc16e76a2493..66ada01c796c 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1247,6 +1247,16 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
{
unsigned int plane;
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ if (WARN_ON_ONCE(b->bytesused == 0)) {
+ pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+ else
+ pr_warn_once("use the actual size instead.\n");
+ }
+ }
+
if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
if (b->memory == V4L2_MEMORY_USERPTR) {
for (plane = 0; plane < vb->num_planes; ++plane) {
@@ -1276,13 +1286,22 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
* userspace clearly never bothered to set it and
* it's a safe assumption that they really meant to
* use the full plane sizes.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0
+ * as a way to indicate that streaming is finished.
+ * In that case, the driver should use the
+ * allow_zero_bytesused flag to keep old userspace
+ * applications working.
*/
for (plane = 0; plane < vb->num_planes; ++plane) {
struct v4l2_plane *pdst = &v4l2_planes[plane];
struct v4l2_plane *psrc = &b->m.planes[plane];
- pdst->bytesused = psrc->bytesused ?
- psrc->bytesused : pdst->length;
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pdst->bytesused = psrc->bytesused;
+ else
+ pdst->bytesused = psrc->bytesused ?
+ psrc->bytesused : pdst->length;
pdst->data_offset = psrc->data_offset;
}
}
@@ -1295,6 +1314,11 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
*
* If bytesused == 0 for the output buffer, then fall back
* to the full buffer size as that's a sensible default.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0 as
+ * a way to indicate that streaming is finished. In that case,
+ * the driver should use the allow_zero_bytesused flag to keep
+ * old userspace applications working.
*/
if (b->memory == V4L2_MEMORY_USERPTR) {
v4l2_planes[0].m.userptr = b->m.userptr;
@@ -1306,10 +1330,13 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
v4l2_planes[0].length = b->length;
}
- if (V4L2_TYPE_IS_OUTPUT(b->type))
- v4l2_planes[0].bytesused = b->bytesused ?
- b->bytesused : v4l2_planes[0].length;
- else
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ if (vb->vb2_queue->allow_zero_bytesused)
+ v4l2_planes[0].bytesused = b->bytesused;
+ else
+ v4l2_planes[0].bytesused = b->bytesused ?
+ b->bytesused : v4l2_planes[0].length;
+ } else
v4l2_planes[0].bytesused = 0;
}
@@ -2760,7 +2787,8 @@ struct vb2_fileio_data {
unsigned int initial_index;
unsigned int q_count;
unsigned int dq_count;
- unsigned int flags;
+ unsigned read_once:1;
+ unsigned write_immediately:1;
};
/**
@@ -2798,14 +2826,16 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
*/
count = 1;
- dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
- (read) ? "read" : "write", count, q->io_flags);
+ dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
+ (read) ? "read" : "write", count, q->fileio_read_once,
+ q->fileio_write_immediately);
fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
if (fileio == NULL)
return -ENOMEM;
- fileio->flags = q->io_flags;
+ fileio->read_once = q->fileio_read_once;
+ fileio->write_immediately = q->fileio_write_immediately;
/*
* Request buffers and use MMAP type to force driver
@@ -3028,13 +3058,11 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
/*
* Queue next buffer if required.
*/
- if (buf->pos == buf->size ||
- (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
+ if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
/*
* Check if this is the last buffer to read.
*/
- if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
- fileio->dq_count == 1) {
+ if (read && fileio->read_once && fileio->dq_count == 1) {
dprintk(3, "read limit reached\n");
return __vb2_cleanup_fileio(q);
}
@@ -3225,7 +3253,6 @@ EXPORT_SYMBOL_GPL(vb2_thread_start);
int vb2_thread_stop(struct vb2_queue *q)
{
struct vb2_threadio_data *threadio = q->threadio;
- struct vb2_fileio_data *fileio = q->fileio;
int err;
if (threadio == NULL)
@@ -3411,6 +3438,8 @@ ssize_t vb2_fop_write(struct file *file, const char __user *buf,
struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
int err = -EBUSY;
+ if (!(vdev->queue->io_modes & VB2_WRITE))
+ return -EINVAL;
if (lock && mutex_lock_interruptible(lock))
return -ERESTARTSYS;
if (vb2_queue_is_busy(vdev, file))
@@ -3433,6 +3462,8 @@ ssize_t vb2_fop_read(struct file *file, char __user *buf,
struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
int err = -EBUSY;
+ if (!(vdev->queue->io_modes & VB2_READ))
+ return -EINVAL;
if (lock && mutex_lock_interruptible(lock))
return -ERESTARTSYS;
if (vb2_queue_is_busy(vdev, file))
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 69e0483adfee..644dec73d220 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -402,6 +402,12 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_dc_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
if (!buf->sgt_base)
buf->sgt_base = vb2_dc_get_base_sgt(buf);
@@ -409,7 +415,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
if (WARN_ON(!buf->sgt_base))
return NULL;
- dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL);
+ dbuf = dma_buf_export(&exp_info);
if (IS_ERR(dbuf))
return NULL;
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index b1838abb6d00..45c708e463b9 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -583,11 +583,17 @@ static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_dma_sg_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
if (WARN_ON(!buf->dma_sgt))
return NULL;
- dbuf = dma_buf_export(buf, &vb2_dma_sg_dmabuf_ops, buf->size, flags, NULL);
+ dbuf = dma_buf_export(&exp_info);
if (IS_ERR(dbuf))
return NULL;
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index bcde88572429..657ab302a5cf 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -368,11 +368,17 @@ static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flag
{
struct vb2_vmalloc_buf *buf = buf_priv;
struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_vmalloc_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
if (WARN_ON(!buf->vaddr))
return NULL;
- dbuf = dma_buf_export(buf, &vb2_vmalloc_dmabuf_ops, buf->size, flags, NULL);
+ dbuf = dma_buf_export(&exp_info);
if (IS_ERR(dbuf))
return NULL;
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 191383d8c94d..868036f70f8f 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -83,6 +83,15 @@ config FSL_IFC
bool
depends on FSL_SOC
+config JZ4780_NEMC
+ bool "Ingenic JZ4780 SoC NEMC driver"
+ default y
+ depends on MACH_JZ4780
+ help
+ This driver is for the NAND/External Memory Controller (NEMC) in
+ the Ingenic JZ4780. This controller is used to handle external
+ memory devices such as NAND and SRAM.
+
source "drivers/memory/tegra/Kconfig"
endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 6b6548124473..b670441e3cdf 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -13,5 +13,6 @@ obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o
obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
+obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
new file mode 100644
index 000000000000..919d1925acb9
--- /dev/null
+++ b/drivers/memory/jz4780-nemc.c
@@ -0,0 +1,391 @@
+/*
+ * JZ4780 NAND/external memory controller (NEMC)
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex@alex-smith.me.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/jz4780-nemc.h>
+
+#define NEMC_SMCRn(n) (0x14 + (((n) - 1) * 4))
+#define NEMC_NFCSR 0x50
+
+#define NEMC_SMCR_SMT BIT(0)
+#define NEMC_SMCR_BW_SHIFT 6
+#define NEMC_SMCR_BW_MASK (0x3 << NEMC_SMCR_BW_SHIFT)
+#define NEMC_SMCR_BW_8 (0 << 6)
+#define NEMC_SMCR_TAS_SHIFT 8
+#define NEMC_SMCR_TAS_MASK (0xf << NEMC_SMCR_TAS_SHIFT)
+#define NEMC_SMCR_TAH_SHIFT 12
+#define NEMC_SMCR_TAH_MASK (0xf << NEMC_SMCR_TAH_SHIFT)
+#define NEMC_SMCR_TBP_SHIFT 16
+#define NEMC_SMCR_TBP_MASK (0xf << NEMC_SMCR_TBP_SHIFT)
+#define NEMC_SMCR_TAW_SHIFT 20
+#define NEMC_SMCR_TAW_MASK (0xf << NEMC_SMCR_TAW_SHIFT)
+#define NEMC_SMCR_TSTRV_SHIFT 24
+#define NEMC_SMCR_TSTRV_MASK (0x3f << NEMC_SMCR_TSTRV_SHIFT)
+
+#define NEMC_NFCSR_NFEn(n) BIT(((n) - 1) << 1)
+#define NEMC_NFCSR_NFCEn(n) BIT((((n) - 1) << 1) + 1)
+#define NEMC_NFCSR_TNFEn(n) BIT(16 + (n) - 1)
+
+struct jz4780_nemc {
+ spinlock_t lock;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ uint32_t clk_period;
+ unsigned long banks_present;
+};
+
+/**
+ * jz4780_nemc_num_banks() - count the number of banks referenced by a device
+ * @dev: device to count banks for, must be a child of the NEMC.
+ *
+ * Return: The number of unique NEMC banks referred to by the specified NEMC
+ * child device. Unique here means that a device that references the same bank
+ * multiple times in the its "reg" property will only count once.
+ */
+unsigned int jz4780_nemc_num_banks(struct device *dev)
+{
+ const __be32 *prop;
+ unsigned int bank, count = 0;
+ unsigned long referenced = 0;
+ int i = 0;
+
+ while ((prop = of_get_address(dev->of_node, i++, NULL, NULL))) {
+ bank = of_read_number(prop, 1);
+ if (!(referenced & BIT(bank))) {
+ referenced |= BIT(bank);
+ count++;
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(jz4780_nemc_num_banks);
+
+/**
+ * jz4780_nemc_set_type() - set the type of device connected to a bank
+ * @dev: child device of the NEMC.
+ * @bank: bank number to configure.
+ * @type: type of device connected to the bank.
+ */
+void jz4780_nemc_set_type(struct device *dev, unsigned int bank,
+ enum jz4780_nemc_bank_type type)
+{
+ struct jz4780_nemc *nemc = dev_get_drvdata(dev->parent);
+ uint32_t nfcsr;
+
+ nfcsr = readl(nemc->base + NEMC_NFCSR);
+
+ /* TODO: Support toggle NAND devices. */
+ switch (type) {
+ case JZ4780_NEMC_BANK_SRAM:
+ nfcsr &= ~(NEMC_NFCSR_TNFEn(bank) | NEMC_NFCSR_NFEn(bank));
+ break;
+ case JZ4780_NEMC_BANK_NAND:
+ nfcsr &= ~NEMC_NFCSR_TNFEn(bank);
+ nfcsr |= NEMC_NFCSR_NFEn(bank);
+ break;
+ }
+
+ writel(nfcsr, nemc->base + NEMC_NFCSR);
+}
+EXPORT_SYMBOL(jz4780_nemc_set_type);
+
+/**
+ * jz4780_nemc_assert() - (de-)assert a NAND device's chip enable pin
+ * @dev: child device of the NEMC.
+ * @bank: bank number of device.
+ * @assert: whether the chip enable pin should be asserted.
+ *
+ * (De-)asserts the chip enable pin for the NAND device connected to the
+ * specified bank.
+ */
+void jz4780_nemc_assert(struct device *dev, unsigned int bank, bool assert)
+{
+ struct jz4780_nemc *nemc = dev_get_drvdata(dev->parent);
+ uint32_t nfcsr;
+
+ nfcsr = readl(nemc->base + NEMC_NFCSR);
+
+ if (assert)
+ nfcsr |= NEMC_NFCSR_NFCEn(bank);
+ else
+ nfcsr &= ~NEMC_NFCSR_NFCEn(bank);
+
+ writel(nfcsr, nemc->base + NEMC_NFCSR);
+}
+EXPORT_SYMBOL(jz4780_nemc_assert);
+
+static uint32_t jz4780_nemc_clk_period(struct jz4780_nemc *nemc)
+{
+ unsigned long rate;
+
+ rate = clk_get_rate(nemc->clk);
+ if (!rate)
+ return 0;
+
+ /* Return in picoseconds. */
+ return div64_ul(1000000000000ull, rate);
+}
+
+static uint32_t jz4780_nemc_ns_to_cycles(struct jz4780_nemc *nemc, uint32_t ns)
+{
+ return ((ns * 1000) + nemc->clk_period - 1) / nemc->clk_period;
+}
+
+static bool jz4780_nemc_configure_bank(struct jz4780_nemc *nemc,
+ unsigned int bank,
+ struct device_node *node)
+{
+ uint32_t smcr, val, cycles;
+
+ /*
+ * Conversion of tBP and tAW cycle counts to values supported by the
+ * hardware (round up to the next supported value).
+ */
+ static const uint32_t convert_tBP_tAW[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+
+ /* 11 - 12 -> 12 cycles */
+ 11, 11,
+
+ /* 13 - 15 -> 15 cycles */
+ 12, 12, 12,
+
+ /* 16 - 20 -> 20 cycles */
+ 13, 13, 13, 13, 13,
+
+ /* 21 - 25 -> 25 cycles */
+ 14, 14, 14, 14, 14,
+
+ /* 26 - 31 -> 31 cycles */
+ 15, 15, 15, 15, 15, 15
+ };
+
+ smcr = readl(nemc->base + NEMC_SMCRn(bank));
+ smcr &= ~NEMC_SMCR_SMT;
+
+ if (!of_property_read_u32(node, "ingenic,nemc-bus-width", &val)) {
+ smcr &= ~NEMC_SMCR_BW_MASK;
+ switch (val) {
+ case 8:
+ smcr |= NEMC_SMCR_BW_8;
+ break;
+ default:
+ /*
+ * Earlier SoCs support a 16 bit bus width (the 4780
+ * does not), until those are properly supported, error.
+ */
+ dev_err(nemc->dev, "unsupported bus width: %u\n", val);
+ return false;
+ }
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tAS", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TAS_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > 15) {
+ dev_err(nemc->dev, "tAS %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= cycles << NEMC_SMCR_TAS_SHIFT;
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tAH", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TAH_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > 15) {
+ dev_err(nemc->dev, "tAH %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= cycles << NEMC_SMCR_TAH_SHIFT;
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tBP", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TBP_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > 31) {
+ dev_err(nemc->dev, "tBP %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= convert_tBP_tAW[cycles] << NEMC_SMCR_TBP_SHIFT;
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tAW", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TAW_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > 31) {
+ dev_err(nemc->dev, "tAW %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= convert_tBP_tAW[cycles] << NEMC_SMCR_TAW_SHIFT;
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tSTRV", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TSTRV_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > 63) {
+ dev_err(nemc->dev, "tSTRV %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= cycles << NEMC_SMCR_TSTRV_SHIFT;
+ }
+
+ writel(smcr, nemc->base + NEMC_SMCRn(bank));
+ return true;
+}
+
+static int jz4780_nemc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct jz4780_nemc *nemc;
+ struct resource *res;
+ struct device_node *child;
+ const __be32 *prop;
+ unsigned int bank;
+ unsigned long referenced;
+ int i, ret;
+
+ nemc = devm_kzalloc(dev, sizeof(*nemc), GFP_KERNEL);
+ if (!nemc)
+ return -ENOMEM;
+
+ spin_lock_init(&nemc->lock);
+ nemc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nemc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nemc->base)) {
+ dev_err(dev, "failed to get I/O memory\n");
+ return PTR_ERR(nemc->base);
+ }
+
+ writel(0, nemc->base + NEMC_NFCSR);
+
+ nemc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(nemc->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(nemc->clk);
+ }
+
+ ret = clk_prepare_enable(nemc->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ nemc->clk_period = jz4780_nemc_clk_period(nemc);
+ if (!nemc->clk_period) {
+ dev_err(dev, "failed to calculate clock period\n");
+ clk_disable_unprepare(nemc->clk);
+ return -EINVAL;
+ }
+
+ /*
+ * Iterate over child devices, check that they do not conflict with
+ * each other, and register child devices for them. If a child device
+ * has invalid properties, it is ignored and no platform device is
+ * registered for it.
+ */
+ for_each_child_of_node(nemc->dev->of_node, child) {
+ referenced = 0;
+ i = 0;
+ while ((prop = of_get_address(child, i++, NULL, NULL))) {
+ bank = of_read_number(prop, 1);
+ if (bank < 1 || bank >= JZ4780_NEMC_NUM_BANKS) {
+ dev_err(nemc->dev,
+ "%s requests invalid bank %u\n",
+ child->full_name, bank);
+
+ /* Will continue the outer loop below. */
+ referenced = 0;
+ break;
+ }
+
+ referenced |= BIT(bank);
+ }
+
+ if (!referenced) {
+ dev_err(nemc->dev, "%s has no addresses\n",
+ child->full_name);
+ continue;
+ } else if (nemc->banks_present & referenced) {
+ dev_err(nemc->dev, "%s conflicts with another node\n",
+ child->full_name);
+ continue;
+ }
+
+ /* Configure bank parameters. */
+ for_each_set_bit(bank, &referenced, JZ4780_NEMC_NUM_BANKS) {
+ if (!jz4780_nemc_configure_bank(nemc, bank, child)) {
+ referenced = 0;
+ break;
+ }
+ }
+
+ if (referenced) {
+ if (of_platform_device_create(child, NULL, nemc->dev))
+ nemc->banks_present |= referenced;
+ }
+ }
+
+ platform_set_drvdata(pdev, nemc);
+ dev_info(dev, "JZ4780 NEMC initialised\n");
+ return 0;
+}
+
+static int jz4780_nemc_remove(struct platform_device *pdev)
+{
+ struct jz4780_nemc *nemc = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(nemc->clk);
+ return 0;
+}
+
+static const struct of_device_id jz4780_nemc_dt_match[] = {
+ { .compatible = "ingenic,jz4780-nemc" },
+ {},
+};
+
+static struct platform_driver jz4780_nemc_driver = {
+ .probe = jz4780_nemc_probe,
+ .remove = jz4780_nemc_remove,
+ .driver = {
+ .name = "jz4780-nemc",
+ .of_match_table = of_match_ptr(jz4780_nemc_dt_match),
+ },
+};
+
+static int __init jz4780_nemc_init(void)
+{
+ return platform_driver_register(&jz4780_nemc_driver);
+}
+subsys_initcall(jz4780_nemc_init);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 24696f59215b..c94ea0d68746 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -12,8 +12,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#undef DEBUG
-
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -29,6 +27,7 @@
#include <linux/of_address.h>
#include <linux/of_mtd.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/omap-gpmc.h>
#include <linux/mtd/nand.h>
#include <linux/pm_runtime.h>
@@ -136,13 +135,21 @@
#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27)
#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27)
#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25)
+/** CLKACTIVATIONTIME Max Ticks */
+#define GPMC_CONFIG1_CLKACTIVATIONTIME_MAX 2
#define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23)
+/** ATTACHEDDEVICEPAGELENGTH Max Value */
+#define GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX 2
#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22)
#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21)
-#define GPMC_CONFIG1_WAIT_MON_IIME(val) ((val & 3) << 18)
+#define GPMC_CONFIG1_WAIT_MON_TIME(val) ((val & 3) << 18)
+/** WAITMONITORINGTIME Max Ticks */
+#define GPMC_CONFIG1_WAITMONITORINGTIME_MAX 2
#define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16)
#define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12)
#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1)
+/** DEVICESIZE Max Value */
+#define GPMC_CONFIG1_DEVICESIZE_MAX 1
#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10)
#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0)
#define GPMC_CONFIG1_MUXTYPE(val) ((val & 3) << 8)
@@ -153,6 +160,15 @@
#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3))
#define GPMC_CONFIG7_CSVALID (1 << 6)
+#define GPMC_CONFIG7_BASEADDRESS_MASK 0x3f
+#define GPMC_CONFIG7_CSVALID_MASK BIT(6)
+#define GPMC_CONFIG7_MASKADDRESS_OFFSET 8
+#define GPMC_CONFIG7_MASKADDRESS_MASK (0xf << GPMC_CONFIG7_MASKADDRESS_OFFSET)
+/* All CONFIG7 bits except reserved bits */
+#define GPMC_CONFIG7_MASK (GPMC_CONFIG7_BASEADDRESS_MASK | \
+ GPMC_CONFIG7_CSVALID_MASK | \
+ GPMC_CONFIG7_MASKADDRESS_MASK)
+
#define GPMC_DEVICETYPE_NOR 0
#define GPMC_DEVICETYPE_NAND 2
#define GPMC_CONFIG_WRITEPROTECT 0x00000010
@@ -169,6 +185,11 @@
*/
#define GPMC_NR_IRQ 2
+enum gpmc_clk_domain {
+ GPMC_CD_FCLK,
+ GPMC_CD_CLK
+};
+
struct gpmc_cs_data {
const char *name;
@@ -267,16 +288,55 @@ static unsigned long gpmc_get_fclk_period(void)
return rate;
}
-static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
+/**
+ * gpmc_get_clk_period - get period of selected clock domain in ps
+ * @cs Chip Select Region.
+ * @cd Clock Domain.
+ *
+ * GPMC_CS_CONFIG1 GPMCFCLKDIVIDER for cs has to be setup
+ * prior to calling this function with GPMC_CD_CLK.
+ */
+static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
+{
+
+ unsigned long tick_ps = gpmc_get_fclk_period();
+ u32 l;
+ int div;
+
+ switch (cd) {
+ case GPMC_CD_CLK:
+ /* get current clk divider */
+ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+ div = (l & 0x03) + 1;
+ /* get GPMC_CLK period */
+ tick_ps *= div;
+ break;
+ case GPMC_CD_FCLK:
+ /* FALL-THROUGH */
+ default:
+ break;
+ }
+
+ return tick_ps;
+
+}
+
+static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs,
+ enum gpmc_clk_domain cd)
{
unsigned long tick_ps;
/* Calculate in picosecs to yield more exact results */
- tick_ps = gpmc_get_fclk_period();
+ tick_ps = gpmc_get_clk_period(cs, cd);
return (time_ns * 1000 + tick_ps - 1) / tick_ps;
}
+static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
+{
+ return gpmc_ns_to_clk_ticks(time_ns, /* any CS */ 0, GPMC_CD_FCLK);
+}
+
static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
{
unsigned long tick_ps;
@@ -287,9 +347,15 @@ static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
return (time_ps + tick_ps - 1) / tick_ps;
}
+unsigned int gpmc_clk_ticks_to_ns(unsigned ticks, int cs,
+ enum gpmc_clk_domain cd)
+{
+ return ticks * gpmc_get_clk_period(cs, cd) / 1000;
+}
+
unsigned int gpmc_ticks_to_ns(unsigned int ticks)
{
- return ticks * gpmc_get_fclk_period() / 1000;
+ return gpmc_clk_ticks_to_ns(ticks, /* any CS */ 0, GPMC_CD_FCLK);
}
static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
@@ -338,33 +404,66 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
}
#ifdef DEBUG
-static int get_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
- bool raw, bool noval, int shift,
- const char *name)
+/**
+ * get_gpmc_timing_reg - read a timing parameter and print DTS settings for it.
+ * @cs: Chip Select Region
+ * @reg: GPMC_CS_CONFIGn register offset.
+ * @st_bit: Start Bit
+ * @end_bit: End Bit. Must be >= @st_bit.
+ * @ma:x Maximum parameter value (before optional @shift).
+ * If 0, maximum is as high as @st_bit and @end_bit allow.
+ * @name: DTS node name, w/o "gpmc,"
+ * @cd: Clock Domain of timing parameter.
+ * @shift: Parameter value left shifts @shift, which is then printed instead of value.
+ * @raw: Raw Format Option.
+ * raw format: gpmc,name = <value>
+ * tick format: gpmc,name = <value> /&zwj;* x ns -- y ns; x ticks *&zwj;/
+ * Where x ns -- y ns result in the same tick value.
+ * When @max is exceeded, "invalid" is printed inside comment.
+ * @noval: Parameter values equal to 0 are not printed.
+ * @return: Specified timing parameter (after optional @shift).
+ *
+ */
+static int get_gpmc_timing_reg(
+ /* timing specifiers */
+ int cs, int reg, int st_bit, int end_bit, int max,
+ const char *name, const enum gpmc_clk_domain cd,
+ /* value transform */
+ int shift,
+ /* format specifiers */
+ bool raw, bool noval)
{
u32 l;
- int nr_bits, max_value, mask;
+ int nr_bits;
+ int mask;
+ bool invalid;
l = gpmc_cs_read_reg(cs, reg);
nr_bits = end_bit - st_bit + 1;
- max_value = (1 << nr_bits) - 1;
- mask = max_value << st_bit;
- l = (l & mask) >> st_bit;
+ mask = (1 << nr_bits) - 1;
+ l = (l >> st_bit) & mask;
+ if (!max)
+ max = mask;
+ invalid = l > max;
if (shift)
l = (shift << l);
if (noval && (l == 0))
return 0;
if (!raw) {
- unsigned int time_ns_min, time_ns, time_ns_max;
-
- time_ns_min = gpmc_ticks_to_ns(l ? l - 1 : 0);
- time_ns = gpmc_ticks_to_ns(l);
- time_ns_max = gpmc_ticks_to_ns(l + 1 > max_value ?
- max_value : l + 1);
- pr_info("gpmc,%s = <%u> (%u - %u ns, %i ticks)\n",
- name, time_ns, time_ns_min, time_ns_max, l);
+ /* DTS tick format for timings in ns */
+ unsigned int time_ns;
+ unsigned int time_ns_min = 0;
+
+ if (l)
+ time_ns_min = gpmc_clk_ticks_to_ns(l - 1, cs, cd) + 1;
+ time_ns = gpmc_clk_ticks_to_ns(l, cs, cd);
+ pr_info("gpmc,%s = <%u> /* %u ns - %u ns; %i ticks%s*/\n",
+ name, time_ns, time_ns_min, time_ns, l,
+ invalid ? "; invalid " : " ");
} else {
- pr_info("gpmc,%s = <%u>\n", name, l);
+ /* raw format */
+ pr_info("gpmc,%s = <%u>%s\n", name, l,
+ invalid ? " /* invalid */" : "");
}
return l;
@@ -374,13 +473,19 @@ static int get_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
pr_info("cs%i %s: 0x%08x\n", cs, #config, \
gpmc_cs_read_reg(cs, config))
#define GPMC_GET_RAW(reg, st, end, field) \
- get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 0, 0, field)
+ get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 0)
+#define GPMC_GET_RAW_MAX(reg, st, end, max, field) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, 0, 1, 0)
#define GPMC_GET_RAW_BOOL(reg, st, end, field) \
- get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, 0, field)
-#define GPMC_GET_RAW_SHIFT(reg, st, end, shift, field) \
- get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, (shift), field)
+ get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 1)
+#define GPMC_GET_RAW_SHIFT_MAX(reg, st, end, shift, max, field) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, (shift), 1, 1)
#define GPMC_GET_TICKS(reg, st, end, field) \
- get_gpmc_timing_reg(cs, (reg), (st), (end), 0, 0, 0, field)
+ get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 0, 0)
+#define GPMC_GET_TICKS_CD(reg, st, end, field, cd) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, (cd), 0, 0, 0)
+#define GPMC_GET_TICKS_CD_MAX(reg, st, end, max, field, cd) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, (cd), 0, 0, 0)
static void gpmc_show_regs(int cs, const char *desc)
{
@@ -404,11 +509,14 @@ static void gpmc_cs_show_timings(int cs, const char *desc)
pr_info("gpmc cs%i access configuration:\n", cs);
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
- GPMC_GET_RAW(GPMC_CS_CONFIG1, 12, 13, "device-width");
+ GPMC_GET_RAW_MAX(GPMC_CS_CONFIG1, 12, 13,
+ GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
- GPMC_GET_RAW_SHIFT(GPMC_CS_CONFIG1, 23, 24, 4, "burst-length");
+ GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 23, 24, 4,
+ GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX,
+ "burst-length");
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 27, 27, "sync-write");
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 28, 28, "burst-write");
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 29, 29, "gpmc,sync-read");
@@ -448,8 +556,12 @@ static void gpmc_cs_show_timings(int cs, const char *desc)
GPMC_GET_TICKS(GPMC_CS_CONFIG6, 0, 3, "bus-turnaround-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG6, 8, 11, "cycle2cycle-delay-ns");
- GPMC_GET_TICKS(GPMC_CS_CONFIG1, 18, 19, "wait-monitoring-ns");
- GPMC_GET_TICKS(GPMC_CS_CONFIG1, 25, 26, "clk-activation-ns");
+ GPMC_GET_TICKS_CD_MAX(GPMC_CS_CONFIG1, 18, 19,
+ GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
+ "wait-monitoring-ns", GPMC_CD_CLK);
+ GPMC_GET_TICKS_CD_MAX(GPMC_CS_CONFIG1, 25, 26,
+ GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
+ "clk-activation-ns", GPMC_CD_FCLK);
GPMC_GET_TICKS(GPMC_CS_CONFIG6, 16, 19, "wr-data-mux-bus-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG6, 24, 28, "wr-access-ns");
@@ -460,8 +572,24 @@ static inline void gpmc_cs_show_timings(int cs, const char *desc)
}
#endif
-static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
- int time, const char *name)
+/**
+ * set_gpmc_timing_reg - set a single timing parameter for Chip Select Region.
+ * Caller is expected to have initialized CONFIG1 GPMCFCLKDIVIDER
+ * prior to calling this function with @cd equal to GPMC_CD_CLK.
+ *
+ * @cs: Chip Select Region.
+ * @reg: GPMC_CS_CONFIGn register offset.
+ * @st_bit: Start Bit
+ * @end_bit: End Bit. Must be >= @st_bit.
+ * @max: Maximum parameter value.
+ * If 0, maximum is as high as @st_bit and @end_bit allow.
+ * @time: Timing parameter in ns.
+ * @cd: Timing parameter clock domain.
+ * @name: Timing parameter name.
+ * @return: 0 on success, -1 on error.
+ */
+static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max,
+ int time, enum gpmc_clk_domain cd, const char *name)
{
u32 l;
int ticks, mask, nr_bits;
@@ -469,22 +597,25 @@ static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
if (time == 0)
ticks = 0;
else
- ticks = gpmc_ns_to_ticks(time);
+ ticks = gpmc_ns_to_clk_ticks(time, cs, cd);
nr_bits = end_bit - st_bit + 1;
mask = (1 << nr_bits) - 1;
- if (ticks > mask) {
- pr_err("%s: GPMC error! CS%d: %s: %d ns, %d ticks > %d\n",
- __func__, cs, name, time, ticks, mask);
+ if (!max)
+ max = mask;
+
+ if (ticks > max) {
+ pr_err("%s: GPMC CS%d: %s %d ns, %d ticks > %d ticks\n",
+ __func__, cs, name, time, ticks, max);
return -1;
}
l = gpmc_cs_read_reg(cs, reg);
#ifdef DEBUG
- printk(KERN_INFO
- "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
- cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
+ pr_info(
+ "GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
+ cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
(l >> st_bit) & mask, time);
#endif
l &= ~(mask << st_bit);
@@ -494,18 +625,56 @@ static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
return 0;
}
-#define GPMC_SET_ONE(reg, st, end, field) \
- if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
- t->field, #field) < 0) \
+#define GPMC_SET_ONE_CD_MAX(reg, st, end, max, field, cd) \
+ if (set_gpmc_timing_reg(cs, (reg), (st), (end), (max), \
+ t->field, (cd), #field) < 0) \
return -1
+#define GPMC_SET_ONE(reg, st, end, field) \
+ GPMC_SET_ONE_CD_MAX(reg, st, end, 0, field, GPMC_CD_FCLK)
+
+/**
+ * gpmc_calc_waitmonitoring_divider - calculate proper GPMCFCLKDIVIDER based on WAITMONITORINGTIME
+ * WAITMONITORINGTIME will be _at least_ as long as desired, i.e.
+ * read --> don't sample bus too early
+ * write --> data is longer on bus
+ *
+ * Formula:
+ * gpmc_clk_div + 1 = ceil(ceil(waitmonitoringtime_ns / gpmc_fclk_ns)
+ * / waitmonitoring_ticks)
+ * WAITMONITORINGTIME resulting in 0 or 1 tick with div = 1 are caught by
+ * div <= 0 check.
+ *
+ * @wait_monitoring: WAITMONITORINGTIME in ns.
+ * @return: -1 on failure to scale, else proper divider > 0.
+ */
+static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
+{
+
+ int div = gpmc_ns_to_ticks(wait_monitoring);
+
+ div += GPMC_CONFIG1_WAITMONITORINGTIME_MAX - 1;
+ div /= GPMC_CONFIG1_WAITMONITORINGTIME_MAX;
+
+ if (div > 4)
+ return -1;
+ if (div <= 0)
+ div = 1;
+
+ return div;
+
+}
+
+/**
+ * gpmc_calc_divider - calculate GPMC_FCLK divider for sync_clk GPMC_CLK period.
+ * @sync_clk: GPMC_CLK period in ps.
+ * @return: Returns at least 1 if GPMC_FCLK can be divided to GPMC_CLK.
+ * Else, returns -1.
+ */
int gpmc_calc_divider(unsigned int sync_clk)
{
- int div;
- u32 l;
+ int div = gpmc_ps_to_ticks(sync_clk);
- l = sync_clk + (gpmc_get_fclk_period() - 1);
- div = l / gpmc_get_fclk_period();
if (div > 4)
return -1;
if (div <= 0)
@@ -514,7 +683,15 @@ int gpmc_calc_divider(unsigned int sync_clk)
return div;
}
-int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
+/**
+ * gpmc_cs_set_timings - program timing parameters for Chip Select Region.
+ * @cs: Chip Select Region.
+ * @t: GPMC timing parameters.
+ * @s: GPMC timing settings.
+ * @return: 0 on success, -1 on error.
+ */
+int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
+ const struct gpmc_settings *s)
{
int div;
u32 l;
@@ -524,6 +701,33 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
if (div < 0)
return div;
+ /*
+ * See if we need to change the divider for waitmonitoringtime.
+ *
+ * Calculate GPMCFCLKDIVIDER independent of gpmc,sync-clk-ps in DT for
+ * pure asynchronous accesses, i.e. both read and write asynchronous.
+ * However, only do so if WAITMONITORINGTIME is actually used, i.e.
+ * either WAITREADMONITORING or WAITWRITEMONITORING is set.
+ *
+ * This statement must not change div to scale async WAITMONITORINGTIME
+ * to protect mixed synchronous and asynchronous accesses.
+ *
+ * We raise an error later if WAITMONITORINGTIME does not fit.
+ */
+ if (!s->sync_read && !s->sync_write &&
+ (s->wait_on_read || s->wait_on_write)
+ ) {
+
+ div = gpmc_calc_waitmonitoring_divider(t->wait_monitoring);
+ if (div < 0) {
+ pr_err("%s: waitmonitoringtime %3d ns too large for greatest gpmcfclkdivider.\n",
+ __func__,
+ t->wait_monitoring
+ );
+ return -1;
+ }
+ }
+
GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
@@ -546,27 +750,27 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
- GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring);
- GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation);
-
if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
if (gpmc_capability & GPMC_HAS_WR_ACCESS)
GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
- /* caller is expected to have initialized CONFIG1 to cover
- * at least sync vs async
- */
l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
- if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
+ l &= ~0x03;
+ l |= (div - 1);
+ gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
+
+ GPMC_SET_ONE_CD_MAX(GPMC_CS_CONFIG1, 18, 19,
+ GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
+ wait_monitoring, GPMC_CD_CLK);
+ GPMC_SET_ONE_CD_MAX(GPMC_CS_CONFIG1, 25, 26,
+ GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
+ clk_activation, GPMC_CD_FCLK);
+
#ifdef DEBUG
- printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
- cs, (div * gpmc_get_fclk_period()) / 1000, div);
+ pr_info("GPMC CS%d CLK period is %lu ns (div %d)\n",
+ cs, (div * gpmc_get_fclk_period()) / 1000, div);
#endif
- l &= ~0x03;
- l |= (div - 1);
- gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
- }
gpmc_cs_bool_timings(cs, &t->bool_timings);
gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings");
@@ -586,12 +790,15 @@ static int gpmc_cs_set_memconf(int cs, u32 base, u32 size)
if (base & (size - 1))
return -EINVAL;
+ base >>= GPMC_CHUNK_SHIFT;
mask = (1 << GPMC_SECTION_SHIFT) - size;
+ mask >>= GPMC_CHUNK_SHIFT;
+ mask <<= GPMC_CONFIG7_MASKADDRESS_OFFSET;
+
l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
- l &= ~0x3f;
- l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
- l &= ~(0x0f << 8);
- l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
+ l &= ~GPMC_CONFIG7_MASK;
+ l |= base & GPMC_CONFIG7_BASEADDRESS_MASK;
+ l |= mask & GPMC_CONFIG7_MASKADDRESS_MASK;
l |= GPMC_CONFIG7_CSVALID;
gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
@@ -656,7 +863,7 @@ static void gpmc_cs_set_name(int cs, const char *name)
gpmc->name = name;
}
-const char *gpmc_cs_get_name(int cs)
+static const char *gpmc_cs_get_name(int cs)
{
struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
@@ -1786,7 +1993,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
if (ret < 0)
goto err;
- ret = gpmc_cs_set_timings(cs, &gpmc_t);
+ ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
if (ret) {
dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n",
child->name);
@@ -1802,8 +2009,21 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
gpmc_cs_enable_mem(cs);
no_timings:
- if (of_platform_device_create(child, NULL, &pdev->dev))
- return 0;
+
+ /* create platform device, NULL on error or when disabled */
+ if (!of_platform_device_create(child, NULL, &pdev->dev))
+ goto err_child_fail;
+
+ /* is child a common bus? */
+ if (of_match_node(of_default_bus_match_table, child))
+ /* create children and other common bus children */
+ if (of_platform_populate(child, of_default_bus_match_table,
+ NULL, &pdev->dev))
+ goto err_child_fail;
+
+ return 0;
+
+err_child_fail:
dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
ret = -ENODEV;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index fc145d202c46..922a750640e8 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
if (msb->data_dir == READ) {
- for (cnt = 0; cnt < msb->current_seg; cnt++)
+ for (cnt = 0; cnt < msb->current_seg; cnt++) {
t_len += msb->req_sg[cnt].length
/ msb->page_size;
@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
t_len += msb->current_page - 1;
t_len *= msb->page_size;
+ }
}
} else
t_len = blk_rq_bytes(msb->block_req);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 38356e39adba..d5ad04dad081 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -283,6 +283,18 @@ config HTC_I2CPLD
This device provides input and output GPIOs through an I2C
interface to one or more sub-chips.
+config MFD_INTEL_QUARK_I2C_GPIO
+ tristate "Intel Quark MFD I2C GPIO"
+ depends on PCI
+ depends on X86
+ depends on COMMON_CLK
+ select MFD_CORE
+ help
+ This MFD provides support for I2C and GPIO that exist only
+ in a single PCI device. It splits the 2 IO devices to
+ their respective IO driver.
+ The GPIO exports a total amount of 8 interrupt-capable GPIOs.
+
config LPC_ICH
tristate "Intel ICH LPC"
depends on PCI
@@ -364,6 +376,7 @@ config MFD_KEMPLD
* COMe-bIP#
* COMe-bPC2 (ETXexpress-PC)
* COMe-bSC# (ETXexpress-SC T#)
+ * COMe-cBL6
* COMe-cBT6
* COMe-cCT6
* COMe-cDC2 (microETXexpress-DC)
@@ -455,6 +468,20 @@ config MFD_MAX77693
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_MAX77843
+ bool "Maxim Semiconductor MAX77843 PMIC Support"
+ depends on I2C=y
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Say yes here to add support for Maxim Semiconductor MAX77843.
+ This is companion Power Management IC with LEDs, Haptic, Charger,
+ Fuel Gauge, MUIC(Micro USB Interface Controller) controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MAX8907
tristate "Maxim Semiconductor MAX8907 PMIC Support"
select MFD_CORE
@@ -502,6 +529,16 @@ config MFD_MAX8998
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_MT6397
+ tristate "MediaTek MT6397 PMIC Support"
+ select MFD_CORE
+ select IRQ_DOMAIN
+ help
+ Say yes here to add support for MediaTek MT6397 PMIC. This is
+ a Power Management IC. This driver provides common support for
+ accessing the device; additional drivers must be enabled in order
+ to use the functionality of the device.
+
config MFD_MENF21BMC
tristate "MEN 14F021P00 Board Management Controller Support"
depends on I2C
@@ -655,6 +692,7 @@ config MFD_RT5033
depends on I2C=y
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
help
This driver provides for the Richtek RT5033 Power Management IC,
which includes the I2C driver and the Core APIs. This driver provides
@@ -753,6 +791,18 @@ config MFD_SM501_GPIO
lines on the SM501. The platform data is used to supply the
base number for the first GPIO line to register.
+config MFD_SKY81452
+ tristate "Skyworks Solutions SKY81452"
+ select MFD_CORE
+ select REGMAP_I2C
+ depends on I2C
+ help
+ This is the core driver for the Skyworks SKY81452 backlight and
+ voltage regulator device.
+
+ This driver can also be built as a module. If so, the module
+ will be called sky81452.
+
config MFD_SMSC
bool "SMSC ECE1099 series chips"
depends on I2C=y
@@ -1210,6 +1260,7 @@ config MFD_TIMBERDALE
config MFD_TC3589X
bool "Toshiba TC35892 and variants"
depends on I2C=y
+ depends on OF
select MFD_CORE
help
Support for the Toshiba TC35892 and variants I/O Expander.
@@ -1289,10 +1340,11 @@ config MFD_WM5102
Support for Wolfson Microelectronics WM5102 low power audio SoC
config MFD_WM5110
- bool "Wolfson Microelectronics WM5110"
+ bool "Wolfson Microelectronics WM5110 and WM8280/WM8281"
depends on MFD_ARIZONA
help
- Support for Wolfson Microelectronics WM5110 low power audio SoC
+ Support for Wolfson Microelectronics WM5110 and WM8280/WM8281
+ low power audio SoC
config MFD_WM8997
bool "Wolfson Microelectronics WM8997"
@@ -1362,7 +1414,7 @@ config MFD_WM8994
depends on I2C
help
The WM8994 is a highly integrated hi-fi CODEC designed for
- smartphone applicatiosn. As well as audio functionality it
+ smartphone applications. As well as audio functionality it
has on board GPIO and regulator functionality which is
supported via the relevant subsystems. This driver provides
core support for the WM8994, in order to use the actual
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 19f3d744e3bd..0e5cfeba107c 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_MFD_CROS_EC) += cros_ec.o
obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
-rtsx_pci-objs := rtsx_pcr.o rtsx_gops.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o
@@ -117,6 +117,7 @@ obj-$(CONFIG_MFD_DA9150) += da9150-core.o
obj-$(CONFIG_MFD_MAX14577) += max14577.o
obj-$(CONFIG_MFD_MAX77686) += max77686.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o
+obj-$(CONFIG_MFD_MAX77843) += max77843.o
obj-$(CONFIG_MFD_MAX8907) += max8907.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
@@ -138,6 +139,7 @@ obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_MFD_KEMPLD) += kempld-core.o
+obj-$(CONFIG_MFD_INTEL_QUARK_I2C_GPIO) += intel_quark_i2c_gpio.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
obj-$(CONFIG_LPC_ICH) += lpc_ich.o
obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
@@ -178,6 +180,8 @@ obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
obj-$(CONFIG_MFD_DLN2) += dln2.o
obj-$(CONFIG_MFD_RT5033) += rt5033.o
+obj-$(CONFIG_MFD_SKY81452) += sky81452.o
intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
+obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 9a8e185f11df..cdd6f3d63314 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1283,7 +1283,7 @@ static irqreturn_t ab8500_debug_handler(int irq, void *data)
/* Prints to seq_file or log_buf */
static int ab8500_registers_print(struct device *dev, u32 bank,
- struct seq_file *s)
+ struct seq_file *s)
{
unsigned int i;
@@ -1304,20 +1304,19 @@ static int ab8500_registers_print(struct device *dev, u32 bank,
}
if (s) {
- err = seq_printf(s,
- " [0x%02X/0x%02X]: 0x%02X\n",
- bank, reg, value);
- if (err < 0) {
- /* Error is not returned here since
- * the output is wanted in any case */
+ seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n",
+ bank, reg, value);
+ /* Error is not returned here since
+ * the output is wanted in any case */
+ if (seq_has_overflowed(s))
return 0;
- }
} else {
dev_info(dev, " [0x%02X/0x%02X]: 0x%02X\n",
bank, reg, value);
}
}
}
+
return 0;
}
@@ -1330,8 +1329,7 @@ static int ab8500_print_bank_registers(struct seq_file *s, void *p)
seq_printf(s, " bank 0x%02X:\n", bank);
- ab8500_registers_print(dev, bank, s);
- return 0;
+ return ab8500_registers_print(dev, bank, s);
}
static int ab8500_registers_open(struct inode *inode, struct file *file)
@@ -1355,9 +1353,12 @@ static int ab8500_print_all_banks(struct seq_file *s, void *p)
seq_puts(s, AB8500_NAME_STRING " register values:\n");
for (i = 0; i < AB8500_NUM_BANKS; i++) {
- seq_printf(s, " bank 0x%02X:\n", i);
+ int err;
- ab8500_registers_print(dev, i, s);
+ seq_printf(s, " bank 0x%02X:\n", i);
+ err = ab8500_registers_print(dev, i, s);
+ if (err)
+ return err;
}
return 0;
}
@@ -1458,7 +1459,8 @@ static const struct file_operations ab8500_all_banks_fops = {
static int ab8500_bank_print(struct seq_file *s, void *p)
{
- return seq_printf(s, "0x%02X\n", debug_bank);
+ seq_printf(s, "0x%02X\n", debug_bank);
+ return 0;
}
static int ab8500_bank_open(struct inode *inode, struct file *file)
@@ -1490,7 +1492,8 @@ static ssize_t ab8500_bank_write(struct file *file,
static int ab8500_address_print(struct seq_file *s, void *p)
{
- return seq_printf(s, "0x%02X\n", debug_address);
+ seq_printf(s, "0x%02X\n", debug_address);
+ return 0;
}
static int ab8500_address_open(struct inode *inode, struct file *file)
@@ -1598,7 +1601,8 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
for (line = 0; line < num_interrupt_lines; line++) {
struct irq_desc *desc = irq_to_desc(line + irq_first);
- seq_printf(s, "%3i: %6i %4i", line,
+ seq_printf(s, "%3i: %6i %4i",
+ line,
num_interrupts[line],
num_wake_interrupts[line]);
@@ -1705,8 +1709,7 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
dev_err(dev, "ab->read fail %d\n", err);
return err;
}
- err = seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n",
- bank, reg, value);
+ seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n", bank, reg, value);
}
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, orig_value);
@@ -1743,8 +1746,9 @@ static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
BAT_CTRL, bat_ctrl_raw);
- return seq_printf(s, "%d,0x%X\n",
- bat_ctrl_convert, bat_ctrl_raw);
+ seq_printf(s, "%d,0x%X\n", bat_ctrl_convert, bat_ctrl_raw);
+
+ return 0;
}
static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
@@ -1773,8 +1777,9 @@ static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
btemp_ball_raw);
- return seq_printf(s,
- "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
+ seq_printf(s, "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
+
+ return 0;
}
static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
@@ -1804,8 +1809,9 @@ static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
MAIN_CHARGER_V, main_charger_v_raw);
- return seq_printf(s, "%d,0x%X\n",
- main_charger_v_convert, main_charger_v_raw);
+ seq_printf(s, "%d,0x%X\n", main_charger_v_convert, main_charger_v_raw);
+
+ return 0;
}
static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
@@ -1835,8 +1841,9 @@ static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
acc_detect1_raw);
- return seq_printf(s, "%d,0x%X\n",
- acc_detect1_convert, acc_detect1_raw);
+ seq_printf(s, "%d,0x%X\n", acc_detect1_convert, acc_detect1_raw);
+
+ return 0;
}
static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
@@ -1866,8 +1873,9 @@ static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
ACC_DETECT2, acc_detect2_raw);
- return seq_printf(s, "%d,0x%X\n",
- acc_detect2_convert, acc_detect2_raw);
+ seq_printf(s, "%d,0x%X\n", acc_detect2_convert, acc_detect2_raw);
+
+ return 0;
}
static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
@@ -1897,8 +1905,9 @@ static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
aux1_raw);
- return seq_printf(s, "%d,0x%X\n",
- aux1_convert, aux1_raw);
+ seq_printf(s, "%d,0x%X\n", aux1_convert, aux1_raw);
+
+ return 0;
}
static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
@@ -1926,8 +1935,9 @@ static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
aux2_raw);
- return seq_printf(s, "%d,0x%X\n",
- aux2_convert, aux2_raw);
+ seq_printf(s, "%d,0x%X\n", aux2_convert, aux2_raw);
+
+ return 0;
}
static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
@@ -1955,8 +1965,9 @@ static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
main_bat_v_raw);
- return seq_printf(s, "%d,0x%X\n",
- main_bat_v_convert, main_bat_v_raw);
+ seq_printf(s, "%d,0x%X\n", main_bat_v_convert, main_bat_v_raw);
+
+ return 0;
}
static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
@@ -1986,8 +1997,9 @@ static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
vbus_v_raw);
- return seq_printf(s, "%d,0x%X\n",
- vbus_v_convert, vbus_v_raw);
+ seq_printf(s, "%d,0x%X\n", vbus_v_convert, vbus_v_raw);
+
+ return 0;
}
static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
@@ -2015,8 +2027,9 @@ static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
MAIN_CHARGER_C, main_charger_c_raw);
- return seq_printf(s, "%d,0x%X\n",
- main_charger_c_convert, main_charger_c_raw);
+ seq_printf(s, "%d,0x%X\n", main_charger_c_convert, main_charger_c_raw);
+
+ return 0;
}
static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
@@ -2046,8 +2059,9 @@ static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
USB_CHARGER_C, usb_charger_c_raw);
- return seq_printf(s, "%d,0x%X\n",
- usb_charger_c_convert, usb_charger_c_raw);
+ seq_printf(s, "%d,0x%X\n", usb_charger_c_convert, usb_charger_c_raw);
+
+ return 0;
}
static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
@@ -2077,8 +2091,9 @@ static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
BK_BAT_V, bk_bat_v_raw);
- return seq_printf(s, "%d,0x%X\n",
- bk_bat_v_convert, bk_bat_v_raw);
+ seq_printf(s, "%d,0x%X\n", bk_bat_v_convert, bk_bat_v_raw);
+
+ return 0;
}
static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
@@ -2107,8 +2122,9 @@ static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
die_temp_raw);
- return seq_printf(s, "%d,0x%X\n",
- die_temp_convert, die_temp_raw);
+ seq_printf(s, "%d,0x%X\n", die_temp_convert, die_temp_raw);
+
+ return 0;
}
static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
@@ -2137,8 +2153,9 @@ static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
usb_id_convert = ab8500_gpadc_ad_to_voltage(gpadc, USB_ID,
usb_id_raw);
- return seq_printf(s, "%d,0x%X\n",
- usb_id_convert, usb_id_raw);
+ seq_printf(s, "%d,0x%X\n", usb_id_convert, usb_id_raw);
+
+ return 0;
}
static int ab8500_gpadc_usb_id_open(struct inode *inode, struct file *file)
@@ -2166,8 +2183,9 @@ static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
xtal_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, XTAL_TEMP,
xtal_temp_raw);
- return seq_printf(s, "%d,0x%X\n",
- xtal_temp_convert, xtal_temp_raw);
+ seq_printf(s, "%d,0x%X\n", xtal_temp_convert, xtal_temp_raw);
+
+ return 0;
}
static int ab8540_gpadc_xtal_temp_open(struct inode *inode, struct file *file)
@@ -2197,8 +2215,9 @@ static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
ab8500_gpadc_ad_to_voltage(gpadc, VBAT_TRUE_MEAS,
vbat_true_meas_raw);
- return seq_printf(s, "%d,0x%X\n",
- vbat_true_meas_convert, vbat_true_meas_raw);
+ seq_printf(s, "%d,0x%X\n", vbat_true_meas_convert, vbat_true_meas_raw);
+
+ return 0;
}
static int ab8540_gpadc_vbat_true_meas_open(struct inode *inode,
@@ -2233,9 +2252,13 @@ static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
ibat_raw);
- return seq_printf(s, "%d,0x%X\n" "%d,0x%X\n",
- bat_ctrl_convert, bat_ctrl_raw,
- ibat_convert, ibat_raw);
+ seq_printf(s,
+ "%d,0x%X\n"
+ "%d,0x%X\n",
+ bat_ctrl_convert, bat_ctrl_raw,
+ ibat_convert, ibat_raw);
+
+ return 0;
}
static int ab8540_gpadc_bat_ctrl_and_ibat_open(struct inode *inode,
@@ -2269,9 +2292,13 @@ static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
ibat_raw);
- return seq_printf(s, "%d,0x%X\n" "%d,0x%X\n",
- vbat_meas_convert, vbat_meas_raw,
- ibat_convert, ibat_raw);
+ seq_printf(s,
+ "%d,0x%X\n"
+ "%d,0x%X\n",
+ vbat_meas_convert, vbat_meas_raw,
+ ibat_convert, ibat_raw);
+
+ return 0;
}
static int ab8540_gpadc_vbat_meas_and_ibat_open(struct inode *inode,
@@ -2307,9 +2334,13 @@ static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
ibat_raw);
- return seq_printf(s, "%d,0x%X\n" "%d,0x%X\n",
- vbat_true_meas_convert, vbat_true_meas_raw,
- ibat_convert, ibat_raw);
+ seq_printf(s,
+ "%d,0x%X\n"
+ "%d,0x%X\n",
+ vbat_true_meas_convert, vbat_true_meas_raw,
+ ibat_convert, ibat_raw);
+
+ return 0;
}
static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode,
@@ -2344,9 +2375,13 @@ static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
ibat_raw);
- return seq_printf(s, "%d,0x%X\n" "%d,0x%X\n",
- bat_temp_convert, bat_temp_raw,
- ibat_convert, ibat_raw);
+ seq_printf(s,
+ "%d,0x%X\n"
+ "%d,0x%X\n",
+ bat_temp_convert, bat_temp_raw,
+ ibat_convert, ibat_raw);
+
+ return 0;
}
static int ab8540_gpadc_bat_temp_and_ibat_open(struct inode *inode,
@@ -2373,16 +2408,19 @@ static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
ab8540_gpadc_get_otp(gpadc, &vmain_l, &vmain_h, &btemp_l, &btemp_h,
&vbat_l, &vbat_h, &ibat_l, &ibat_h);
- return seq_printf(s, "VMAIN_L:0x%X\n"
- "VMAIN_H:0x%X\n"
- "BTEMP_L:0x%X\n"
- "BTEMP_H:0x%X\n"
- "VBAT_L:0x%X\n"
- "VBAT_H:0x%X\n"
- "IBAT_L:0x%X\n"
- "IBAT_H:0x%X\n",
- vmain_l, vmain_h, btemp_l, btemp_h,
- vbat_l, vbat_h, ibat_l, ibat_h);
+ seq_printf(s,
+ "VMAIN_L:0x%X\n"
+ "VMAIN_H:0x%X\n"
+ "BTEMP_L:0x%X\n"
+ "BTEMP_H:0x%X\n"
+ "VBAT_L:0x%X\n"
+ "VBAT_H:0x%X\n"
+ "IBAT_L:0x%X\n"
+ "IBAT_H:0x%X\n",
+ vmain_l, vmain_h, btemp_l, btemp_h,
+ vbat_l, vbat_h, ibat_l, ibat_h);
+
+ return 0;
}
static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file)
@@ -2400,7 +2438,9 @@ static const struct file_operations ab8540_gpadc_otp_calib_fops = {
static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
{
- return seq_printf(s, "%d\n", avg_sample);
+ seq_printf(s, "%d\n", avg_sample);
+
+ return 0;
}
static int ab8500_gpadc_avg_sample_open(struct inode *inode, struct file *file)
@@ -2445,7 +2485,9 @@ static const struct file_operations ab8500_gpadc_avg_sample_fops = {
static int ab8500_gpadc_trig_edge_print(struct seq_file *s, void *p)
{
- return seq_printf(s, "%d\n", trig_edge);
+ seq_printf(s, "%d\n", trig_edge);
+
+ return 0;
}
static int ab8500_gpadc_trig_edge_open(struct inode *inode, struct file *file)
@@ -2490,7 +2532,9 @@ static const struct file_operations ab8500_gpadc_trig_edge_fops = {
static int ab8500_gpadc_trig_timer_print(struct seq_file *s, void *p)
{
- return seq_printf(s, "%d\n", trig_timer);
+ seq_printf(s, "%d\n", trig_timer);
+
+ return 0;
}
static int ab8500_gpadc_trig_timer_open(struct inode *inode, struct file *file)
@@ -2533,7 +2577,9 @@ static const struct file_operations ab8500_gpadc_trig_timer_fops = {
static int ab8500_gpadc_conv_type_print(struct seq_file *s, void *p)
{
- return seq_printf(s, "%d\n", conv_type);
+ seq_printf(s, "%d\n", conv_type);
+
+ return 0;
}
static int ab8500_gpadc_conv_type_open(struct inode *inode, struct file *file)
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 09ba8f186e6a..6ca6dfab50eb 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -561,12 +561,23 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
count++;
}
+ count = 0;
+ of_property_for_each_u32(arizona->dev->of_node, "wlf,dmic-ref", prop,
+ cur, val) {
+ if (count == ARRAY_SIZE(arizona->pdata.dmic_ref))
+ break;
+
+ arizona->pdata.dmic_ref[count] = val;
+ count++;
+ }
+
return 0;
}
const struct of_device_id arizona_of_match[] = {
{ .compatible = "wlf,wm5102", .data = (void *)WM5102 },
{ .compatible = "wlf,wm5110", .data = (void *)WM5110 },
+ { .compatible = "wlf,wm8280", .data = (void *)WM8280 },
{ .compatible = "wlf,wm8997", .data = (void *)WM8997 },
{},
};
@@ -671,6 +682,7 @@ int arizona_dev_init(struct arizona *arizona)
switch (arizona->type) {
case WM5102:
case WM5110:
+ case WM8280:
case WM8997:
for (i = 0; i < ARRAY_SIZE(wm5102_core_supplies); i++)
arizona->core_supplies[i].supply
@@ -834,11 +846,19 @@ int arizona_dev_init(struct arizona *arizona)
#endif
#ifdef CONFIG_MFD_WM5110
case 0x5110:
- type_name = "WM5110";
- if (arizona->type != WM5110) {
+ switch (arizona->type) {
+ case WM5110:
+ type_name = "WM5110";
+ break;
+ case WM8280:
+ type_name = "WM8280";
+ break;
+ default:
+ type_name = "WM5110";
dev_err(arizona->dev, "WM5110 registered as %d\n",
arizona->type);
arizona->type = WM5110;
+ break;
}
apply_patch = wm5110_patch;
break;
@@ -1010,6 +1030,7 @@ int arizona_dev_init(struct arizona *arizona)
ARRAY_SIZE(wm5102_devs), NULL, 0, NULL);
break;
case WM5110:
+ case WM8280:
ret = mfd_add_devices(arizona->dev, -1, wm5110_devs,
ARRAY_SIZE(wm5110_devs), NULL, 0, NULL);
break;
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index 9d4156fb082a..ff782a5de235 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -44,6 +44,7 @@ static int arizona_i2c_probe(struct i2c_client *i2c,
#endif
#ifdef CONFIG_MFD_WM5110
case WM5110:
+ case WM8280:
regmap_config = &wm5110_i2c_regmap;
break;
#endif
@@ -87,6 +88,7 @@ static int arizona_i2c_remove(struct i2c_client *i2c)
static const struct i2c_device_id arizona_i2c_id[] = {
{ "wm5102", WM5102 },
{ "wm5110", WM5110 },
+ { "wm8280", WM8280 },
{ "wm8997", WM8997 },
{ }
};
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 3a3fe7cc6d61..d063b94b94b5 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -211,6 +211,7 @@ int arizona_irq_init(struct arizona *arizona)
#endif
#ifdef CONFIG_MFD_WM5110
case WM5110:
+ case WM8280:
aod = &wm5110_aod;
switch (arizona->rev) {
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 8ef58bcff193..1e845f6d407b 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -44,6 +44,7 @@ static int arizona_spi_probe(struct spi_device *spi)
#endif
#ifdef CONFIG_MFD_WM5110
case WM5110:
+ case WM8280:
regmap_config = &wm5110_spi_regmap;
break;
#endif
@@ -84,6 +85,7 @@ static int arizona_spi_remove(struct spi_device *spi)
static const struct spi_device_id arizona_spi_ids[] = {
{ "wm5102", WM5102 },
{ "wm5110", WM5110 },
+ { "wm8280", WM8280 },
{ },
};
MODULE_DEVICE_TABLE(spi, arizona_spi_ids);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 0acbe52b2411..d18029be6a78 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -29,7 +29,7 @@
#define AXP20X_OFF 0x80
-static const char const *axp20x_model_names[] = {
+static const char * const axp20x_model_names[] = {
"AXP202",
"AXP209",
"AXP288",
@@ -290,6 +290,29 @@ static struct resource axp288_adc_resources[] = {
},
};
+static struct resource axp288_extcon_resources[] = {
+ {
+ .start = AXP288_IRQ_VBUS_FALL,
+ .end = AXP288_IRQ_VBUS_FALL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = AXP288_IRQ_VBUS_RISE,
+ .end = AXP288_IRQ_VBUS_RISE,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = AXP288_IRQ_MV_CHNG,
+ .end = AXP288_IRQ_MV_CHNG,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = AXP288_IRQ_BC_USB_CHNG,
+ .end = AXP288_IRQ_BC_USB_CHNG,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct resource axp288_charger_resources[] = {
{
.start = AXP288_IRQ_OV,
@@ -345,6 +368,11 @@ static struct mfd_cell axp288_cells[] = {
.resources = axp288_adc_resources,
},
{
+ .name = "axp288_extcon",
+ .num_resources = ARRAY_SIZE(axp288_extcon_resources),
+ .resources = axp288_extcon_resources,
+ },
+ {
.name = "axp288_charger",
.num_resources = ARRAY_SIZE(axp288_charger_resources),
.resources = axp288_charger_resources,
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index fc0c81ef04ff..c4aecc6f8373 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -74,15 +74,11 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
ret = ec_dev->cmd_xfer(ec_dev, msg);
if (msg->result == EC_RES_IN_PROGRESS) {
int i;
- struct cros_ec_command status_msg;
- struct ec_response_get_comms_status status;
+ struct cros_ec_command status_msg = { };
+ struct ec_response_get_comms_status *status;
- status_msg.version = 0;
status_msg.command = EC_CMD_GET_COMMS_STATUS;
- status_msg.outdata = NULL;
- status_msg.outsize = 0;
- status_msg.indata = (uint8_t *)&status;
- status_msg.insize = sizeof(status);
+ status_msg.insize = sizeof(*status);
/*
* Query the EC's status until it's no longer busy or
@@ -98,7 +94,10 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
msg->result = status_msg.result;
if (status_msg.result != EC_RES_SUCCESS)
break;
- if (!(status.flags & EC_COMMS_STATUS_PROCESSING))
+
+ status = (struct ec_response_get_comms_status *)
+ status_msg.indata;
+ if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
break;
}
}
@@ -119,6 +118,10 @@ static const struct mfd_cell cros_devs[] = {
.id = 2,
.of_compatible = "google,cros-ec-i2c-tunnel",
},
+ {
+ .name = "cros-ec-ctl",
+ .id = 3,
+ },
};
int cros_ec_register(struct cros_ec_device *ec_dev)
diff --git a/drivers/mfd/da9052-irq.c b/drivers/mfd/da9052-irq.c
index 57ae7841f536..e65ca194fa98 100644
--- a/drivers/mfd/da9052-irq.c
+++ b/drivers/mfd/da9052-irq.c
@@ -262,6 +262,8 @@ int da9052_irq_init(struct da9052 *da9052)
goto regmap_err;
}
+ enable_irq_wake(da9052->chip_irq);
+
ret = da9052_request_irq(da9052, DA9052_IRQ_ADC_EOM, "adc-irq",
da9052_auxadc_irq, da9052);
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 45ae0b7d13ef..b5de8a6856c0 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -32,7 +32,7 @@ static int da9052_spi_probe(struct spi_device *spi)
if (!da9052)
return -ENOMEM;
- spi->mode = SPI_MODE_0 | SPI_CPOL;
+ spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
spi_setup(spi);
@@ -43,6 +43,10 @@ static int da9052_spi_probe(struct spi_device *spi)
config = da9052_regmap_config;
config.read_flag_mask = 1;
+ config.reg_bits = 7;
+ config.pad_bits = 1;
+ config.val_bits = 8;
+ config.use_single_rw = 1;
da9052->regmap = devm_regmap_init_spi(spi, &config);
if (IS_ERR(da9052->regmap)) {
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
index 4d757b97ef9a..5549817df32e 100644
--- a/drivers/mfd/da9150-core.c
+++ b/drivers/mfd/da9150-core.c
@@ -95,7 +95,7 @@ static const struct regmap_range_cfg da9150_range_cfg[] = {
},
};
-static struct regmap_config da9150_regmap_config = {
+static const struct regmap_config da9150_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.ranges = da9150_range_cfg,
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 1be9bd1c046d..704e189ca162 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -435,7 +435,7 @@ static int _dln2_transfer(struct dln2_dev *dln2, u16 handle, u16 cmd,
struct dln2_response *rsp;
struct dln2_rx_context *rxc;
struct device *dev = &dln2->interface->dev;
- const unsigned long timeout = DLN2_USB_TIMEOUT * HZ / 1000;
+ const unsigned long timeout = msecs_to_jiffies(DLN2_USB_TIMEOUT);
struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
int size;
diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c
index 7210ae28bf81..95b2ff8f223a 100644
--- a/drivers/mfd/hi6421-pmic-core.c
+++ b/drivers/mfd/hi6421-pmic-core.c
@@ -93,7 +93,7 @@ static int hi6421_pmic_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id of_hi6421_pmic_match_tbl[] = {
+static const struct of_device_id of_hi6421_pmic_match_tbl[] = {
{ .compatible = "hisilicon,hi6421-pmic", },
{ },
};
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
new file mode 100644
index 000000000000..1ce16037d043
--- /dev/null
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -0,0 +1,282 @@
+/*
+ * Intel Quark MFD PCI driver for I2C & GPIO
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Intel Quark PCI device for I2C and GPIO controller sharing the same
+ * PCI function. This PCI driver will split the 2 devices into their
+ * respective drivers.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/dmi.h>
+#include <linux/platform_data/gpio-dwapb.h>
+#include <linux/platform_data/i2c-designware.h>
+
+/* PCI BAR for register base address */
+#define MFD_I2C_BAR 0
+#define MFD_GPIO_BAR 1
+
+/* The base GPIO number under GPIOLIB framework */
+#define INTEL_QUARK_MFD_GPIO_BASE 8
+
+/* The default number of South-Cluster GPIO on Quark. */
+#define INTEL_QUARK_MFD_NGPIO 8
+
+/* The DesignWare GPIO ports on Quark. */
+#define INTEL_QUARK_GPIO_NPORTS 1
+
+#define INTEL_QUARK_IORES_MEM 0
+#define INTEL_QUARK_IORES_IRQ 1
+
+#define INTEL_QUARK_I2C_CONTROLLER_CLK "i2c_designware.0"
+
+/* The Quark I2C controller source clock */
+#define INTEL_QUARK_I2C_CLK_HZ 33000000
+
+#define INTEL_QUARK_I2C_NCLK 1
+
+struct intel_quark_mfd {
+ struct pci_dev *pdev;
+ struct clk *i2c_clk;
+ struct clk_lookup *i2c_clk_lookup;
+};
+
+struct i2c_mode_info {
+ const char *name;
+ unsigned int i2c_scl_freq;
+};
+
+static const struct i2c_mode_info platform_i2c_mode_info[] = {
+ {
+ .name = "Galileo",
+ .i2c_scl_freq = 100000,
+ },
+ {
+ .name = "GalileoGen2",
+ .i2c_scl_freq = 400000,
+ },
+ {}
+};
+
+static struct resource intel_quark_i2c_res[] = {
+ [INTEL_QUARK_IORES_MEM] = {
+ .flags = IORESOURCE_MEM,
+ },
+ [INTEL_QUARK_IORES_IRQ] = {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource intel_quark_gpio_res[] = {
+ [INTEL_QUARK_IORES_MEM] = {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct mfd_cell intel_quark_mfd_cells[] = {
+ {
+ .id = MFD_I2C_BAR,
+ .name = "i2c_designware",
+ .num_resources = ARRAY_SIZE(intel_quark_i2c_res),
+ .resources = intel_quark_i2c_res,
+ .ignore_resource_conflicts = true,
+ },
+ {
+ .id = MFD_GPIO_BAR,
+ .name = "gpio-dwapb",
+ .num_resources = ARRAY_SIZE(intel_quark_gpio_res),
+ .resources = intel_quark_gpio_res,
+ .ignore_resource_conflicts = true,
+ },
+};
+
+static const struct pci_device_id intel_quark_mfd_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0934), },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, intel_quark_mfd_ids);
+
+static int intel_quark_register_i2c_clk(struct intel_quark_mfd *quark_mfd)
+{
+ struct pci_dev *pdev = quark_mfd->pdev;
+ struct clk_lookup *i2c_clk_lookup;
+ struct clk *i2c_clk;
+ int ret;
+
+ i2c_clk_lookup = devm_kcalloc(&pdev->dev, INTEL_QUARK_I2C_NCLK,
+ sizeof(*i2c_clk_lookup), GFP_KERNEL);
+ if (!i2c_clk_lookup)
+ return -ENOMEM;
+
+ i2c_clk_lookup[0].dev_id = INTEL_QUARK_I2C_CONTROLLER_CLK;
+
+ i2c_clk = clk_register_fixed_rate(&pdev->dev,
+ INTEL_QUARK_I2C_CONTROLLER_CLK, NULL,
+ CLK_IS_ROOT, INTEL_QUARK_I2C_CLK_HZ);
+
+ quark_mfd->i2c_clk_lookup = i2c_clk_lookup;
+ quark_mfd->i2c_clk = i2c_clk;
+
+ ret = clk_register_clkdevs(i2c_clk, i2c_clk_lookup,
+ INTEL_QUARK_I2C_NCLK);
+ if (ret)
+ dev_err(&pdev->dev, "Fixed clk register failed: %d\n", ret);
+
+ return ret;
+}
+
+static void intel_quark_unregister_i2c_clk(struct pci_dev *pdev)
+{
+ struct intel_quark_mfd *quark_mfd = dev_get_drvdata(&pdev->dev);
+
+ if (!quark_mfd->i2c_clk || !quark_mfd->i2c_clk_lookup)
+ return;
+
+ clkdev_drop(quark_mfd->i2c_clk_lookup);
+ clk_unregister(quark_mfd->i2c_clk);
+}
+
+static int intel_quark_i2c_setup(struct pci_dev *pdev, struct mfd_cell *cell)
+{
+ const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ const struct i2c_mode_info *info;
+ struct dw_i2c_platform_data *pdata;
+ struct resource *res = (struct resource *)cell->resources;
+ struct device *dev = &pdev->dev;
+
+ res[INTEL_QUARK_IORES_MEM].start =
+ pci_resource_start(pdev, MFD_I2C_BAR);
+ res[INTEL_QUARK_IORES_MEM].end =
+ pci_resource_end(pdev, MFD_I2C_BAR);
+
+ res[INTEL_QUARK_IORES_IRQ].start = pdev->irq;
+ res[INTEL_QUARK_IORES_IRQ].end = pdev->irq;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /* Normal mode by default */
+ pdata->i2c_scl_freq = 100000;
+
+ if (board_name) {
+ for (info = platform_i2c_mode_info; info->name; info++) {
+ if (!strcmp(board_name, info->name)) {
+ pdata->i2c_scl_freq = info->i2c_scl_freq;
+ break;
+ }
+ }
+ }
+
+ cell->platform_data = pdata;
+ cell->pdata_size = sizeof(*pdata);
+
+ return 0;
+}
+
+static int intel_quark_gpio_setup(struct pci_dev *pdev, struct mfd_cell *cell)
+{
+ struct dwapb_platform_data *pdata;
+ struct resource *res = (struct resource *)cell->resources;
+ struct device *dev = &pdev->dev;
+
+ res[INTEL_QUARK_IORES_MEM].start =
+ pci_resource_start(pdev, MFD_GPIO_BAR);
+ res[INTEL_QUARK_IORES_MEM].end =
+ pci_resource_end(pdev, MFD_GPIO_BAR);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /* For intel quark x1000, it has only one port: portA */
+ pdata->nports = INTEL_QUARK_GPIO_NPORTS;
+ pdata->properties = devm_kcalloc(dev, pdata->nports,
+ sizeof(*pdata->properties),
+ GFP_KERNEL);
+ if (!pdata->properties)
+ return -ENOMEM;
+
+ /* Set the properties for portA */
+ pdata->properties->node = NULL;
+ pdata->properties->name = "intel-quark-x1000-gpio-portA";
+ pdata->properties->idx = 0;
+ pdata->properties->ngpio = INTEL_QUARK_MFD_NGPIO;
+ pdata->properties->gpio_base = INTEL_QUARK_MFD_GPIO_BASE;
+ pdata->properties->irq = pdev->irq;
+ pdata->properties->irq_shared = true;
+
+ cell->platform_data = pdata;
+ cell->pdata_size = sizeof(*pdata);
+
+ return 0;
+}
+
+static int intel_quark_mfd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct intel_quark_mfd *quark_mfd;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ quark_mfd = devm_kzalloc(&pdev->dev, sizeof(*quark_mfd), GFP_KERNEL);
+ if (!quark_mfd)
+ return -ENOMEM;
+ quark_mfd->pdev = pdev;
+
+ ret = intel_quark_register_i2c_clk(quark_mfd);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&pdev->dev, quark_mfd);
+
+ ret = intel_quark_i2c_setup(pdev, &intel_quark_mfd_cells[MFD_I2C_BAR]);
+ if (ret)
+ return ret;
+
+ ret = intel_quark_gpio_setup(pdev,
+ &intel_quark_mfd_cells[MFD_GPIO_BAR]);
+ if (ret)
+ return ret;
+
+ return mfd_add_devices(&pdev->dev, 0, intel_quark_mfd_cells,
+ ARRAY_SIZE(intel_quark_mfd_cells), NULL, 0,
+ NULL);
+}
+
+static void intel_quark_mfd_remove(struct pci_dev *pdev)
+{
+ intel_quark_unregister_i2c_clk(pdev);
+ mfd_remove_devices(&pdev->dev);
+}
+
+static struct pci_driver intel_quark_mfd_driver = {
+ .name = "intel_quark_mfd_i2c_gpio",
+ .id_table = intel_quark_mfd_ids,
+ .probe = intel_quark_mfd_probe,
+ .remove = intel_quark_mfd_remove,
+};
+
+module_pci_driver(intel_quark_mfd_driver);
+
+MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
+MODULE_DESCRIPTION("Intel Quark MFD PCI driver for I2C & GPIO");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 80cef048b904..7b50b6b208a5 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -26,19 +26,14 @@
#include <linux/mfd/intel_soc_pmic.h>
#include "intel_soc_pmic_core.h"
-/*
- * On some boards the PMIC interrupt may come from a GPIO line.
- * Try to lookup the ACPI table and see if such connection exists. If not,
- * return -ENOENT and use the IRQ provided by I2C.
- */
static int intel_soc_pmic_find_gpio_irq(struct device *dev)
{
struct gpio_desc *desc;
int irq;
- desc = devm_gpiod_get_index(dev, "intel_soc_pmic", 0);
+ desc = devm_gpiod_get_index(dev, "intel_soc_pmic", 0, GPIOD_IN);
if (IS_ERR(desc))
- return -ENOENT;
+ return PTR_ERR(desc);
irq = gpiod_to_irq(desc);
if (irq < 0)
@@ -71,6 +66,11 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config);
+ /*
+ * On some boards the PMIC interrupt may come from a GPIO line. Try to
+ * lookup the ACPI table for a such connection and setup a GPIO
+ * interrupt if it exists. Otherwise use the IRQ provided by I2C
+ */
irq = intel_soc_pmic_find_gpio_irq(dev);
pmic->irq = (irq < 0) ? i2c->irq : irq;
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 5615522f8d62..8057849d51ac 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -508,8 +508,15 @@ static struct dmi_system_id kempld_dmi_table[] __initdata = {
},
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
- },
- {
+ }, {
+ .ident = "CBL6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-cBL6"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "CCR2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index f35d4280b2f7..12d960a60ec4 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -539,72 +539,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
* functions that probably will be registered by other drivers.
*/
static const struct pci_device_id lpc_ich_ids[] = {
- { PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
- { PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
- { PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
- { PCI_VDEVICE(INTEL, 0x244c), LPC_ICH2M},
- { PCI_VDEVICE(INTEL, 0x2480), LPC_ICH3},
- { PCI_VDEVICE(INTEL, 0x248c), LPC_ICH3M},
- { PCI_VDEVICE(INTEL, 0x24c0), LPC_ICH4},
- { PCI_VDEVICE(INTEL, 0x24cc), LPC_ICH4M},
- { PCI_VDEVICE(INTEL, 0x2450), LPC_CICH},
- { PCI_VDEVICE(INTEL, 0x24d0), LPC_ICH5},
- { PCI_VDEVICE(INTEL, 0x25a1), LPC_6300ESB},
- { PCI_VDEVICE(INTEL, 0x2640), LPC_ICH6},
- { PCI_VDEVICE(INTEL, 0x2641), LPC_ICH6M},
- { PCI_VDEVICE(INTEL, 0x2642), LPC_ICH6W},
- { PCI_VDEVICE(INTEL, 0x2670), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x2671), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x2672), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x2673), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x2674), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x2675), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x2676), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x2677), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x2678), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x2679), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x267a), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x267b), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x267c), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x267d), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x267e), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x267f), LPC_631XESB},
- { PCI_VDEVICE(INTEL, 0x27b8), LPC_ICH7},
- { PCI_VDEVICE(INTEL, 0x27b0), LPC_ICH7DH},
- { PCI_VDEVICE(INTEL, 0x27b9), LPC_ICH7M},
- { PCI_VDEVICE(INTEL, 0x27bd), LPC_ICH7MDH},
- { PCI_VDEVICE(INTEL, 0x27bc), LPC_NM10},
- { PCI_VDEVICE(INTEL, 0x2810), LPC_ICH8},
- { PCI_VDEVICE(INTEL, 0x2812), LPC_ICH8DH},
- { PCI_VDEVICE(INTEL, 0x2814), LPC_ICH8DO},
- { PCI_VDEVICE(INTEL, 0x2815), LPC_ICH8M},
- { PCI_VDEVICE(INTEL, 0x2811), LPC_ICH8ME},
- { PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
- { PCI_VDEVICE(INTEL, 0x2916), LPC_ICH9R},
- { PCI_VDEVICE(INTEL, 0x2912), LPC_ICH9DH},
- { PCI_VDEVICE(INTEL, 0x2914), LPC_ICH9DO},
- { PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
- { PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
- { PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
- { PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
- { PCI_VDEVICE(INTEL, 0x3a1a), LPC_ICH10D},
- { PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
- { PCI_VDEVICE(INTEL, 0x3b00), LPC_PCH},
- { PCI_VDEVICE(INTEL, 0x3b01), LPC_PCHM},
- { PCI_VDEVICE(INTEL, 0x3b02), LPC_P55},
- { PCI_VDEVICE(INTEL, 0x3b03), LPC_PM55},
- { PCI_VDEVICE(INTEL, 0x3b06), LPC_H55},
- { PCI_VDEVICE(INTEL, 0x3b07), LPC_QM57},
- { PCI_VDEVICE(INTEL, 0x3b08), LPC_H57},
- { PCI_VDEVICE(INTEL, 0x3b09), LPC_HM55},
- { PCI_VDEVICE(INTEL, 0x3b0a), LPC_Q57},
- { PCI_VDEVICE(INTEL, 0x3b0b), LPC_HM57},
- { PCI_VDEVICE(INTEL, 0x3b0d), LPC_PCHMSFF},
- { PCI_VDEVICE(INTEL, 0x3b0f), LPC_QS57},
- { PCI_VDEVICE(INTEL, 0x3b12), LPC_3400},
- { PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
- { PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
- { PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
+ { PCI_VDEVICE(INTEL, 0x0f1c), LPC_BAYTRAIL},
{ PCI_VDEVICE(INTEL, 0x1c41), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c42), LPC_CPTD},
{ PCI_VDEVICE(INTEL, 0x1c43), LPC_CPTM},
@@ -638,7 +573,6 @@ static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1c5f), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1d40), LPC_PBG},
{ PCI_VDEVICE(INTEL, 0x1d41), LPC_PBG},
- { PCI_VDEVICE(INTEL, 0x2310), LPC_DH89XXCC},
{ PCI_VDEVICE(INTEL, 0x1e40), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e41), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e42), LPC_PPT},
@@ -671,6 +605,79 @@ static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1e5d), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5e), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5f), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1f38), LPC_AVN},
+ { PCI_VDEVICE(INTEL, 0x1f39), LPC_AVN},
+ { PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
+ { PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
+ { PCI_VDEVICE(INTEL, 0x229c), LPC_BRASWELL},
+ { PCI_VDEVICE(INTEL, 0x2310), LPC_DH89XXCC},
+ { PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
+ { PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
+ { PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
+ { PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
+ { PCI_VDEVICE(INTEL, 0x244c), LPC_ICH2M},
+ { PCI_VDEVICE(INTEL, 0x2450), LPC_CICH},
+ { PCI_VDEVICE(INTEL, 0x2480), LPC_ICH3},
+ { PCI_VDEVICE(INTEL, 0x248c), LPC_ICH3M},
+ { PCI_VDEVICE(INTEL, 0x24c0), LPC_ICH4},
+ { PCI_VDEVICE(INTEL, 0x24cc), LPC_ICH4M},
+ { PCI_VDEVICE(INTEL, 0x24d0), LPC_ICH5},
+ { PCI_VDEVICE(INTEL, 0x25a1), LPC_6300ESB},
+ { PCI_VDEVICE(INTEL, 0x2640), LPC_ICH6},
+ { PCI_VDEVICE(INTEL, 0x2641), LPC_ICH6M},
+ { PCI_VDEVICE(INTEL, 0x2642), LPC_ICH6W},
+ { PCI_VDEVICE(INTEL, 0x2670), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2671), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2672), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2673), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2674), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2675), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2676), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2677), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2678), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2679), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267a), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267b), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267c), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267d), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267e), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267f), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x27b0), LPC_ICH7DH},
+ { PCI_VDEVICE(INTEL, 0x27b8), LPC_ICH7},
+ { PCI_VDEVICE(INTEL, 0x27b9), LPC_ICH7M},
+ { PCI_VDEVICE(INTEL, 0x27bc), LPC_NM10},
+ { PCI_VDEVICE(INTEL, 0x27bd), LPC_ICH7MDH},
+ { PCI_VDEVICE(INTEL, 0x2810), LPC_ICH8},
+ { PCI_VDEVICE(INTEL, 0x2811), LPC_ICH8ME},
+ { PCI_VDEVICE(INTEL, 0x2812), LPC_ICH8DH},
+ { PCI_VDEVICE(INTEL, 0x2814), LPC_ICH8DO},
+ { PCI_VDEVICE(INTEL, 0x2815), LPC_ICH8M},
+ { PCI_VDEVICE(INTEL, 0x2912), LPC_ICH9DH},
+ { PCI_VDEVICE(INTEL, 0x2914), LPC_ICH9DO},
+ { PCI_VDEVICE(INTEL, 0x2916), LPC_ICH9R},
+ { PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
+ { PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
+ { PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
+ { PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
+ { PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
+ { PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
+ { PCI_VDEVICE(INTEL, 0x3a1a), LPC_ICH10D},
+ { PCI_VDEVICE(INTEL, 0x3b00), LPC_PCH},
+ { PCI_VDEVICE(INTEL, 0x3b01), LPC_PCHM},
+ { PCI_VDEVICE(INTEL, 0x3b02), LPC_P55},
+ { PCI_VDEVICE(INTEL, 0x3b03), LPC_PM55},
+ { PCI_VDEVICE(INTEL, 0x3b06), LPC_H55},
+ { PCI_VDEVICE(INTEL, 0x3b07), LPC_QM57},
+ { PCI_VDEVICE(INTEL, 0x3b08), LPC_H57},
+ { PCI_VDEVICE(INTEL, 0x3b09), LPC_HM55},
+ { PCI_VDEVICE(INTEL, 0x3b0a), LPC_Q57},
+ { PCI_VDEVICE(INTEL, 0x3b0b), LPC_HM57},
+ { PCI_VDEVICE(INTEL, 0x3b0d), LPC_PCHMSFF},
+ { PCI_VDEVICE(INTEL, 0x3b0f), LPC_QS57},
+ { PCI_VDEVICE(INTEL, 0x3b12), LPC_3400},
+ { PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
+ { PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
+ { PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
{ PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
@@ -703,14 +710,11 @@ static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x8c5d), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c5e), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c5f), LPC_LPT},
- { PCI_VDEVICE(INTEL, 0x9c40), LPC_LPT_LP},
- { PCI_VDEVICE(INTEL, 0x9c41), LPC_LPT_LP},
- { PCI_VDEVICE(INTEL, 0x9c42), LPC_LPT_LP},
- { PCI_VDEVICE(INTEL, 0x9c43), LPC_LPT_LP},
- { PCI_VDEVICE(INTEL, 0x9c44), LPC_LPT_LP},
- { PCI_VDEVICE(INTEL, 0x9c45), LPC_LPT_LP},
- { PCI_VDEVICE(INTEL, 0x9c46), LPC_LPT_LP},
- { PCI_VDEVICE(INTEL, 0x9c47), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x8cc1), LPC_9S},
+ { PCI_VDEVICE(INTEL, 0x8cc2), LPC_9S},
+ { PCI_VDEVICE(INTEL, 0x8cc3), LPC_9S},
+ { PCI_VDEVICE(INTEL, 0x8cc4), LPC_9S},
+ { PCI_VDEVICE(INTEL, 0x8cc6), LPC_9S},
{ PCI_VDEVICE(INTEL, 0x8d40), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d41), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d42), LPC_WBG},
@@ -743,12 +747,14 @@ static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x8d5d), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d5e), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d5f), LPC_WBG},
- { PCI_VDEVICE(INTEL, 0x1f38), LPC_AVN},
- { PCI_VDEVICE(INTEL, 0x1f39), LPC_AVN},
- { PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
- { PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
- { PCI_VDEVICE(INTEL, 0x0f1c), LPC_BAYTRAIL},
- { PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
+ { PCI_VDEVICE(INTEL, 0x9c40), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9c41), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9c42), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9c43), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9c44), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9c45), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9c46), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x9c47), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc1), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc2), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc3), LPC_WPT_LP},
@@ -756,12 +762,6 @@ static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x9cc6), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc7), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc9), LPC_WPT_LP},
- { PCI_VDEVICE(INTEL, 0x229c), LPC_BRASWELL},
- { PCI_VDEVICE(INTEL, 0x8cc1), LPC_9S},
- { PCI_VDEVICE(INTEL, 0x8cc2), LPC_9S},
- { PCI_VDEVICE(INTEL, 0x8cc3), LPC_9S},
- { PCI_VDEVICE(INTEL, 0x8cc4), LPC_9S},
- { PCI_VDEVICE(INTEL, 0x8cc6), LPC_9S},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index a159593e27a0..cb14afa97e6f 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -53,8 +53,8 @@ static const struct mfd_cell max77693_devs[] = {
.of_compatible = "maxim,max77693-haptic",
},
{
- .name = "max77693-flash",
- .of_compatible = "maxim,max77693-flash",
+ .name = "max77693-led",
+ .of_compatible = "maxim,max77693-led",
},
};
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
new file mode 100644
index 000000000000..a354ac677ec7
--- /dev/null
+++ b/drivers/mfd/max77843.c
@@ -0,0 +1,243 @@
+/*
+ * MFD core driver for the Maxim MAX77843
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77843-private.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+static const struct mfd_cell max77843_devs[] = {
+ {
+ .name = "max77843-muic",
+ .of_compatible = "maxim,max77843-muic",
+ }, {
+ .name = "max77843-regulator",
+ .of_compatible = "maxim,max77843-regulator",
+ }, {
+ .name = "max77843-charger",
+ .of_compatible = "maxim,max77843-charger"
+ }, {
+ .name = "max77843-fuelgauge",
+ .of_compatible = "maxim,max77843-fuelgauge",
+ }, {
+ .name = "max77843-haptic",
+ .of_compatible = "maxim,max77843-haptic",
+ },
+};
+
+static const struct regmap_config max77843_charger_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77843_CHG_REG_END,
+};
+
+static const struct regmap_config max77843_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77843_SYS_REG_END,
+};
+
+static const struct regmap_irq max77843_irqs[] = {
+ /* TOPSYS interrupts */
+ { .reg_offset = 0, .mask = MAX77843_SYS_IRQ_SYSUVLO_INT, },
+ { .reg_offset = 0, .mask = MAX77843_SYS_IRQ_SYSOVLO_INT, },
+ { .reg_offset = 0, .mask = MAX77843_SYS_IRQ_TSHDN_INT, },
+ { .reg_offset = 0, .mask = MAX77843_SYS_IRQ_TM_INT, },
+};
+
+static const struct regmap_irq_chip max77843_irq_chip = {
+ .name = "max77843",
+ .status_base = MAX77843_SYS_REG_SYSINTSRC,
+ .mask_base = MAX77843_SYS_REG_SYSINTMASK,
+ .mask_invert = false,
+ .num_regs = 1,
+ .irqs = max77843_irqs,
+ .num_irqs = ARRAY_SIZE(max77843_irqs),
+};
+
+/* Charger and Charger regulator use same regmap. */
+static int max77843_chg_init(struct max77843 *max77843)
+{
+ int ret;
+
+ max77843->i2c_chg = i2c_new_dummy(max77843->i2c->adapter, I2C_ADDR_CHG);
+ if (!max77843->i2c_chg) {
+ dev_err(&max77843->i2c->dev,
+ "Cannot allocate I2C device for Charger\n");
+ return PTR_ERR(max77843->i2c_chg);
+ }
+ i2c_set_clientdata(max77843->i2c_chg, max77843);
+
+ max77843->regmap_chg = devm_regmap_init_i2c(max77843->i2c_chg,
+ &max77843_charger_regmap_config);
+ if (IS_ERR(max77843->regmap_chg)) {
+ ret = PTR_ERR(max77843->regmap_chg);
+ goto err_chg_i2c;
+ }
+
+ return 0;
+
+err_chg_i2c:
+ i2c_unregister_device(max77843->i2c_chg);
+
+ return ret;
+}
+
+static int max77843_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max77843 *max77843;
+ unsigned int reg_data;
+ int ret;
+
+ max77843 = devm_kzalloc(&i2c->dev, sizeof(*max77843), GFP_KERNEL);
+ if (!max77843)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, max77843);
+ max77843->dev = &i2c->dev;
+ max77843->i2c = i2c;
+ max77843->irq = i2c->irq;
+
+ max77843->regmap = devm_regmap_init_i2c(i2c,
+ &max77843_regmap_config);
+ if (IS_ERR(max77843->regmap)) {
+ dev_err(&i2c->dev, "Failed to allocate topsys register map\n");
+ return PTR_ERR(max77843->regmap);
+ }
+
+ ret = regmap_add_irq_chip(max77843->regmap, max77843->irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
+ 0, &max77843_irq_chip, &max77843->irq_data);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to add TOPSYS IRQ chip\n");
+ return ret;
+ }
+
+ ret = regmap_read(max77843->regmap,
+ MAX77843_SYS_REG_PMICID, &reg_data);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to read PMIC ID\n");
+ goto err_pmic_id;
+ }
+ dev_info(&i2c->dev, "device ID: 0x%x\n", reg_data);
+
+ ret = max77843_chg_init(max77843);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to init Charger\n");
+ goto err_pmic_id;
+ }
+
+ ret = regmap_update_bits(max77843->regmap,
+ MAX77843_SYS_REG_INTSRCMASK,
+ MAX77843_INTSRC_MASK_MASK,
+ (unsigned int)~MAX77843_INTSRC_MASK_MASK);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to unmask interrupt source\n");
+ goto err_pmic_id;
+ }
+
+ ret = mfd_add_devices(max77843->dev, -1, max77843_devs,
+ ARRAY_SIZE(max77843_devs), NULL, 0, NULL);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to add mfd device\n");
+ goto err_pmic_id;
+ }
+
+ device_init_wakeup(max77843->dev, true);
+
+ return 0;
+
+err_pmic_id:
+ regmap_del_irq_chip(max77843->irq, max77843->irq_data);
+
+ return ret;
+}
+
+static int max77843_remove(struct i2c_client *i2c)
+{
+ struct max77843 *max77843 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(max77843->dev);
+
+ regmap_del_irq_chip(max77843->irq, max77843->irq_data);
+
+ i2c_unregister_device(max77843->i2c_chg);
+
+ return 0;
+}
+
+static const struct of_device_id max77843_dt_match[] = {
+ { .compatible = "maxim,max77843", },
+ { },
+};
+
+static const struct i2c_device_id max77843_id[] = {
+ { "max77843", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, max77843_id);
+
+static int __maybe_unused max77843_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77843 *max77843 = i2c_get_clientdata(i2c);
+
+ disable_irq(max77843->irq);
+ if (device_may_wakeup(dev))
+ enable_irq_wake(max77843->irq);
+
+ return 0;
+}
+
+static int __maybe_unused max77843_resume(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77843 *max77843 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(max77843->irq);
+ enable_irq(max77843->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(max77843_pm, max77843_suspend, max77843_resume);
+
+static struct i2c_driver max77843_i2c_driver = {
+ .driver = {
+ .name = "max77843",
+ .pm = &max77843_pm,
+ .of_match_table = max77843_dt_match,
+ },
+ .probe = max77843_probe,
+ .remove = max77843_remove,
+ .id_table = max77843_id,
+};
+
+static int __init max77843_i2c_init(void)
+{
+ return i2c_add_driver(&max77843_i2c_driver);
+}
+subsys_initcall(max77843_i2c_init);
+
+static void __exit max77843_i2c_exit(void)
+{
+ i2c_del_driver(&max77843_i2c_driver);
+}
+module_exit(max77843_i2c_exit);
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 64dde5d24b32..25fd7116493a 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -51,19 +51,19 @@
void mc13xxx_lock(struct mc13xxx *mc13xxx)
{
if (!mutex_trylock(&mc13xxx->lock)) {
- dev_dbg(mc13xxx->dev, "wait for %s from %pf\n",
+ dev_dbg(mc13xxx->dev, "wait for %s from %ps\n",
__func__, __builtin_return_address(0));
mutex_lock(&mc13xxx->lock);
}
- dev_dbg(mc13xxx->dev, "%s from %pf\n",
+ dev_dbg(mc13xxx->dev, "%s from %ps\n",
__func__, __builtin_return_address(0));
}
EXPORT_SYMBOL(mc13xxx_lock);
void mc13xxx_unlock(struct mc13xxx *mc13xxx)
{
- dev_dbg(mc13xxx->dev, "%s from %pf\n",
+ dev_dbg(mc13xxx->dev, "%s from %ps\n",
__func__, __builtin_return_address(0));
mutex_unlock(&mc13xxx->lock);
}
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 9f01aef539dd..3ac36f5ccd3e 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -532,29 +532,6 @@ static const struct menelaus_vtg_value vcore_values[] = {
{ 1450, 18 },
};
-int menelaus_set_vcore_sw(unsigned int mV)
-{
- int val, ret;
- struct i2c_client *c = the_menelaus->client;
-
- val = menelaus_get_vtg_value(mV, vcore_values,
- ARRAY_SIZE(vcore_values));
- if (val < 0)
- return -EINVAL;
-
- dev_dbg(&c->dev, "Setting VCORE to %d mV (val 0x%02x)\n", mV, val);
-
- /* Set SW mode and the voltage in one go. */
- mutex_lock(&the_menelaus->lock);
- ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
- if (ret == 0)
- the_menelaus->vcore_hw_mode = 0;
- mutex_unlock(&the_menelaus->lock);
- msleep(1);
-
- return ret;
-}
-
int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV)
{
int fval, rval, val, ret;
@@ -1239,7 +1216,7 @@ static int menelaus_probe(struct i2c_client *client,
err = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
if (err < 0)
goto fail;
- if (err & BIT(7))
+ if (err & VCORE_CTRL1_HW_NSW)
menelaus->vcore_hw_mode = 1;
else
menelaus->vcore_hw_mode = 0;
@@ -1259,7 +1236,7 @@ fail:
return err;
}
-static int __exit menelaus_remove(struct i2c_client *client)
+static int menelaus_remove(struct i2c_client *client)
{
struct menelaus_chip *menelaus = i2c_get_clientdata(client);
@@ -1280,7 +1257,7 @@ static struct i2c_driver menelaus_i2c_driver = {
.name = DRIVER_NAME,
},
.probe = menelaus_probe,
- .remove = __exit_p(menelaus_remove),
+ .remove = menelaus_remove,
.id_table = menelaus_id,
};
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 2a87f69be53d..1aed3b7b8d9b 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -128,7 +128,7 @@ static int mfd_add_device(struct device *parent, int id,
int platform_id;
int r;
- if (id < 0)
+ if (id == PLATFORM_DEVID_AUTO)
platform_id = id;
else
platform_id = id + cell->id;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
new file mode 100644
index 000000000000..09bc7804952a
--- /dev/null
+++ b/drivers/mfd/mt6397-core.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6397/registers.h>
+
+static const struct mfd_cell mt6397_devs[] = {
+ {
+ .name = "mt6397-rtc",
+ .of_compatible = "mediatek,mt6397-rtc",
+ }, {
+ .name = "mt6397-regulator",
+ .of_compatible = "mediatek,mt6397-regulator",
+ }, {
+ .name = "mt6397-codec",
+ .of_compatible = "mediatek,mt6397-codec",
+ }, {
+ .name = "mt6397-clk",
+ .of_compatible = "mediatek,mt6397-clk",
+ },
+};
+
+static void mt6397_irq_lock(struct irq_data *data)
+{
+ struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+
+ mutex_lock(&mt6397->irqlock);
+}
+
+static void mt6397_irq_sync_unlock(struct irq_data *data)
+{
+ struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+
+ regmap_write(mt6397->regmap, MT6397_INT_CON0, mt6397->irq_masks_cur[0]);
+ regmap_write(mt6397->regmap, MT6397_INT_CON1, mt6397->irq_masks_cur[1]);
+
+ mutex_unlock(&mt6397->irqlock);
+}
+
+static void mt6397_irq_disable(struct irq_data *data)
+{
+ struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+ int shift = data->hwirq & 0xf;
+ int reg = data->hwirq >> 4;
+
+ mt6397->irq_masks_cur[reg] &= ~BIT(shift);
+}
+
+static void mt6397_irq_enable(struct irq_data *data)
+{
+ struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+ int shift = data->hwirq & 0xf;
+ int reg = data->hwirq >> 4;
+
+ mt6397->irq_masks_cur[reg] |= BIT(shift);
+}
+
+static struct irq_chip mt6397_irq_chip = {
+ .name = "mt6397-irq",
+ .irq_bus_lock = mt6397_irq_lock,
+ .irq_bus_sync_unlock = mt6397_irq_sync_unlock,
+ .irq_enable = mt6397_irq_enable,
+ .irq_disable = mt6397_irq_disable,
+};
+
+static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
+ int irqbase)
+{
+ unsigned int status;
+ int i, irq, ret;
+
+ ret = regmap_read(mt6397->regmap, reg, &status);
+ if (ret) {
+ dev_err(mt6397->dev, "Failed to read irq status: %d\n", ret);
+ return;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (status & BIT(i)) {
+ irq = irq_find_mapping(mt6397->irq_domain, irqbase + i);
+ if (irq)
+ handle_nested_irq(irq);
+ }
+ }
+
+ regmap_write(mt6397->regmap, reg, status);
+}
+
+static irqreturn_t mt6397_irq_thread(int irq, void *data)
+{
+ struct mt6397_chip *mt6397 = data;
+
+ mt6397_irq_handle_reg(mt6397, MT6397_INT_STATUS0, 0);
+ mt6397_irq_handle_reg(mt6397, MT6397_INT_STATUS1, 16);
+
+ return IRQ_HANDLED;
+}
+
+static int mt6397_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct mt6397_chip *mt6397 = d->host_data;
+
+ irq_set_chip_data(irq, mt6397);
+ irq_set_chip_and_handler(irq, &mt6397_irq_chip, handle_level_irq);
+ irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops mt6397_irq_domain_ops = {
+ .map = mt6397_irq_domain_map,
+};
+
+static int mt6397_irq_init(struct mt6397_chip *mt6397)
+{
+ int ret;
+
+ mutex_init(&mt6397->irqlock);
+
+ /* Mask all interrupt sources */
+ regmap_write(mt6397->regmap, MT6397_INT_CON0, 0x0);
+ regmap_write(mt6397->regmap, MT6397_INT_CON1, 0x0);
+
+ mt6397->irq_domain = irq_domain_add_linear(mt6397->dev->of_node,
+ MT6397_IRQ_NR, &mt6397_irq_domain_ops, mt6397);
+ if (!mt6397->irq_domain) {
+ dev_err(mt6397->dev, "could not create irq domain\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_request_threaded_irq(mt6397->dev, mt6397->irq, NULL,
+ mt6397_irq_thread, IRQF_ONESHOT, "mt6397-pmic", mt6397);
+ if (ret) {
+ dev_err(mt6397->dev, "failed to register irq=%d; err: %d\n",
+ mt6397->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mt6397_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct mt6397_chip *mt6397;
+
+ mt6397 = devm_kzalloc(&pdev->dev, sizeof(*mt6397), GFP_KERNEL);
+ if (!mt6397)
+ return -ENOMEM;
+
+ mt6397->dev = &pdev->dev;
+ /*
+ * mt6397 MFD is child device of soc pmic wrapper.
+ * Regmap is set from its parent.
+ */
+ mt6397->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!mt6397->regmap)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, mt6397);
+
+ mt6397->irq = platform_get_irq(pdev, 0);
+ if (mt6397->irq > 0) {
+ ret = mt6397_irq_init(mt6397);
+ if (ret)
+ return ret;
+ }
+
+ ret = mfd_add_devices(&pdev->dev, -1, mt6397_devs,
+ ARRAY_SIZE(mt6397_devs), NULL, 0, NULL);
+ if (ret)
+ dev_err(&pdev->dev, "failed to add child devices: %d\n", ret);
+
+ return ret;
+}
+
+static int mt6397_remove(struct platform_device *pdev)
+{
+ mfd_remove_devices(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id mt6397_of_match[] = {
+ { .compatible = "mediatek,mt6397" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mt6397_of_match);
+
+static struct platform_driver mt6397_driver = {
+ .probe = mt6397_probe,
+ .remove = mt6397_remove,
+ .driver = {
+ .name = "mt6397",
+ .of_match_table = of_match_ptr(mt6397_of_match),
+ },
+};
+
+module_platform_driver(mt6397_driver);
+
+MODULE_AUTHOR("Flora Fu, MediaTek");
+MODULE_DESCRIPTION("Driver for MediaTek MT6397 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mt6397");
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 4b8beb2a1579..af6ac1c4b45c 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -17,6 +17,100 @@
#include <linux/regmap.h>
#include <linux/of_platform.h>
+#define PMIC_REV2 0x101
+#define PMIC_REV3 0x102
+#define PMIC_REV4 0x103
+#define PMIC_TYPE 0x104
+#define PMIC_SUBTYPE 0x105
+
+#define PMIC_TYPE_VALUE 0x51
+
+#define COMMON_SUBTYPE 0x00
+#define PM8941_SUBTYPE 0x01
+#define PM8841_SUBTYPE 0x02
+#define PM8019_SUBTYPE 0x03
+#define PM8226_SUBTYPE 0x04
+#define PM8110_SUBTYPE 0x05
+#define PMA8084_SUBTYPE 0x06
+#define PMI8962_SUBTYPE 0x07
+#define PMD9635_SUBTYPE 0x08
+#define PM8994_SUBTYPE 0x09
+#define PMI8994_SUBTYPE 0x0a
+#define PM8916_SUBTYPE 0x0b
+#define PM8004_SUBTYPE 0x0c
+#define PM8909_SUBTYPE 0x0d
+
+static const struct of_device_id pmic_spmi_id_table[] = {
+ { .compatible = "qcom,spmi-pmic", .data = (void *)COMMON_SUBTYPE },
+ { .compatible = "qcom,pm8941", .data = (void *)PM8941_SUBTYPE },
+ { .compatible = "qcom,pm8841", .data = (void *)PM8841_SUBTYPE },
+ { .compatible = "qcom,pm8019", .data = (void *)PM8019_SUBTYPE },
+ { .compatible = "qcom,pm8226", .data = (void *)PM8226_SUBTYPE },
+ { .compatible = "qcom,pm8110", .data = (void *)PM8110_SUBTYPE },
+ { .compatible = "qcom,pma8084", .data = (void *)PMA8084_SUBTYPE },
+ { .compatible = "qcom,pmi8962", .data = (void *)PMI8962_SUBTYPE },
+ { .compatible = "qcom,pmd9635", .data = (void *)PMD9635_SUBTYPE },
+ { .compatible = "qcom,pm8994", .data = (void *)PM8994_SUBTYPE },
+ { .compatible = "qcom,pmi8994", .data = (void *)PMI8994_SUBTYPE },
+ { .compatible = "qcom,pm8916", .data = (void *)PM8916_SUBTYPE },
+ { .compatible = "qcom,pm8004", .data = (void *)PM8004_SUBTYPE },
+ { .compatible = "qcom,pm8909", .data = (void *)PM8909_SUBTYPE },
+ { }
+};
+
+static void pmic_spmi_show_revid(struct regmap *map, struct device *dev)
+{
+ unsigned int rev2, minor, major, type, subtype;
+ const char *name = "unknown";
+ int ret, i;
+
+ ret = regmap_read(map, PMIC_TYPE, &type);
+ if (ret < 0)
+ return;
+
+ if (type != PMIC_TYPE_VALUE)
+ return;
+
+ ret = regmap_read(map, PMIC_SUBTYPE, &subtype);
+ if (ret < 0)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(pmic_spmi_id_table); i++) {
+ if (subtype == (unsigned long)pmic_spmi_id_table[i].data)
+ break;
+ }
+
+ if (i != ARRAY_SIZE(pmic_spmi_id_table))
+ name = pmic_spmi_id_table[i].compatible;
+
+ ret = regmap_read(map, PMIC_REV2, &rev2);
+ if (ret < 0)
+ return;
+
+ ret = regmap_read(map, PMIC_REV3, &minor);
+ if (ret < 0)
+ return;
+
+ ret = regmap_read(map, PMIC_REV4, &major);
+ if (ret < 0)
+ return;
+
+ /*
+ * In early versions of PM8941 and PM8226, the major revision number
+ * started incrementing from 0 (eg 0 = v1.0, 1 = v2.0).
+ * Increment the major revision number here if the chip is an early
+ * version of PM8941 or PM8226.
+ */
+ if ((subtype == PM8941_SUBTYPE || subtype == PM8226_SUBTYPE) &&
+ major < 0x02)
+ major++;
+
+ if (subtype == PM8110_SUBTYPE)
+ minor = rev2;
+
+ dev_dbg(dev, "%x: %s v%d.%d\n", subtype, name, major, minor);
+}
+
static const struct regmap_config spmi_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
@@ -33,6 +127,8 @@ static int pmic_spmi_probe(struct spmi_device *sdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ pmic_spmi_show_revid(regmap, &sdev->dev);
+
return of_platform_populate(root, NULL, NULL, &sdev->dev);
}
@@ -41,13 +137,6 @@ static void pmic_spmi_remove(struct spmi_device *sdev)
of_platform_depopulate(&sdev->dev);
}
-static const struct of_device_id pmic_spmi_id_table[] = {
- { .compatible = "qcom,spmi-pmic" },
- { .compatible = "qcom,pm8941" },
- { .compatible = "qcom,pm8841" },
- { .compatible = "qcom,pma8084" },
- { }
-};
MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
static struct spmi_driver pmic_spmi_driver = {
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index f696328c2933..12e324319573 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -323,10 +323,51 @@ static const struct qcom_rpm_data msm8960_template = {
.n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
};
+static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
+ [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 },
+ [QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 },
+ [QCOM_RPM_APPS_FABRIC_CLK] = { 27, 11, 8, 1 },
+ [QCOM_RPM_SYS_FABRIC_CLK] = { 28, 12, 9, 1 },
+ [QCOM_RPM_NSS_FABRIC_0_CLK] = { 29, 13, 10, 1 },
+ [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 30, 14, 11, 1 },
+ [QCOM_RPM_SFPB_CLK] = { 31, 15, 12, 1 },
+ [QCOM_RPM_CFPB_CLK] = { 32, 16, 13, 1 },
+ [QCOM_RPM_NSS_FABRIC_1_CLK] = { 33, 17, 14, 1 },
+ [QCOM_RPM_EBI1_CLK] = { 34, 18, 16, 1 },
+ [QCOM_RPM_APPS_FABRIC_HALT] = { 35, 19, 18, 2 },
+ [QCOM_RPM_APPS_FABRIC_MODE] = { 37, 20, 19, 3 },
+ [QCOM_RPM_APPS_FABRIC_IOCTL] = { 40, 21, 20, 1 },
+ [QCOM_RPM_APPS_FABRIC_ARB] = { 41, 22, 21, 12 },
+ [QCOM_RPM_SYS_FABRIC_HALT] = { 53, 23, 22, 2 },
+ [QCOM_RPM_SYS_FABRIC_MODE] = { 55, 24, 23, 3 },
+ [QCOM_RPM_SYS_FABRIC_IOCTL] = { 58, 25, 24, 1 },
+ [QCOM_RPM_SYS_FABRIC_ARB] = { 59, 26, 25, 30 },
+ [QCOM_RPM_MM_FABRIC_HALT] = { 89, 27, 26, 2 },
+ [QCOM_RPM_MM_FABRIC_MODE] = { 91, 28, 27, 3 },
+ [QCOM_RPM_MM_FABRIC_IOCTL] = { 94, 29, 28, 1 },
+ [QCOM_RPM_MM_FABRIC_ARB] = { 95, 30, 29, 2 },
+ [QCOM_RPM_CXO_BUFFERS] = { 209, 33, 31, 1 },
+ [QCOM_RPM_USB_OTG_SWITCH] = { 210, 34, 32, 1 },
+ [QCOM_RPM_HDMI_SWITCH] = { 211, 35, 33, 1 },
+ [QCOM_RPM_DDR_DMM] = { 212, 36, 34, 2 },
+ [QCOM_RPM_VDDMIN_GPIO] = { 215, 40, 39, 1 },
+ [QCOM_RPM_SMB208_S1a] = { 216, 41, 90, 2 },
+ [QCOM_RPM_SMB208_S1b] = { 218, 43, 91, 2 },
+ [QCOM_RPM_SMB208_S2a] = { 220, 45, 92, 2 },
+ [QCOM_RPM_SMB208_S2b] = { 222, 47, 93, 2 },
+};
+
+static const struct qcom_rpm_data ipq806x_template = {
+ .version = 3,
+ .resource_table = ipq806x_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table),
+};
+
static const struct of_device_id qcom_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8064", .data = &apq8064_template },
{ .compatible = "qcom,rpm-msm8660", .data = &msm8660_template },
{ .compatible = "qcom,rpm-msm8960", .data = &msm8960_template },
+ { .compatible = "qcom,rpm-ipq8064", .data = &ipq806x_template },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_rpm_of_match);
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index bd0215069875..4b1e4399754b 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -89,6 +89,7 @@ static const struct rk808_reg_data pre_init_reg[] = {
{ RK808_BOOST_CONFIG_REG, BOOST_ILMIN_MASK, BOOST_ILMIN_100MA },
{ RK808_BUCK1_CONFIG_REG, BUCK1_RATE_MASK, BUCK_ILMIN_200MA },
{ RK808_BUCK2_CONFIG_REG, BUCK2_RATE_MASK, BUCK_ILMIN_200MA },
+ { RK808_DCDC_UV_ACT_REG, BUCK_UV_ACT_MASK, BUCK_UV_ACT_DISABLE},
{ RK808_VB_MON_REG, MASK_ALL, VB_LO_ACT |
VB_LO_SEL_3500MV },
};
@@ -245,7 +246,7 @@ static int rk808_remove(struct i2c_client *client)
return 0;
}
-static struct of_device_id rk808_of_match[] = {
+static const struct of_device_id rk808_of_match[] = {
{ .compatible = "rockchip,rk808" },
{ },
};
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
index fdd34c883d86..b3ae6592014a 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/mfd/rtl8411.c
@@ -53,7 +53,7 @@ static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
u8 reg3 = 0;
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg1);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
if (!rtsx_vendor_setting_valid(reg1))
return;
@@ -65,7 +65,7 @@ static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1);
rtsx_pci_read_config_byte(pcr, PCR_SETTING_REG3, &reg3);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3);
}
@@ -74,7 +74,7 @@ static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
u32 reg = 0;
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
return;
@@ -260,9 +260,8 @@ static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
rtsx_pci_write_register(pcr, CARD_PWR_CTL,
BPP_POWER_MASK, BPP_POWER_OFF);
- dev_dbg(&(pcr->pci->dev),
- "After CD deglitch, card_exist = 0x%x\n",
- card_exist);
+ pcr_dbg(pcr, "After CD deglitch, card_exist = 0x%x\n",
+ card_exist);
}
if (card_exist & MS_EXIST) {
diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c
index cb04174a8924..373e253c33df 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/mfd/rts5209.c
@@ -38,7 +38,7 @@ static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
u32 reg;
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (rts5209_vendor_setting1_valid(reg)) {
if (rts5209_reg_check_ms_pmos(reg))
@@ -47,7 +47,7 @@ static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
}
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (rts5209_vendor_setting2_valid(reg)) {
pcr->sd30_drive_sel_1v8 =
diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c
index 32407404d838..ce012d78ce2a 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/mfd/rts5227.c
@@ -63,7 +63,7 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
u32 reg;
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
return;
@@ -74,7 +74,7 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
@@ -118,11 +118,9 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
rts5227_fill_driving(pcr, OUTPUT_3V3);
/* Configure force_clock_req */
if (pcr->flags & PCR_REVERSE_SOCKET)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- AUTOLOAD_CFG_BASE + 3, 0xB8, 0xB8);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB8, 0xB8);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- AUTOLOAD_CFG_BASE + 3, 0xB8, 0x88);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB8, 0x88);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x10, 0x00);
return rtsx_pci_send_cmd(pcr, 100);
@@ -132,7 +130,7 @@ static int rts5227_optimize_phy(struct rtsx_pcr *pcr)
{
int err;
- err = rtsx_gops_pm_reset(pcr);
+ err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
if (err < 0)
return err;
diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c
index 6353f5df087a..ace45384ec8b 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/mfd/rts5229.c
@@ -38,7 +38,7 @@ static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
u32 reg;
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
return;
@@ -50,7 +50,7 @@ static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 =
map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
}
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
index cf425cc959d5..eb2d5866f719 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/mfd/rts5249.c
@@ -36,16 +36,16 @@ static u8 rts5249_get_ic_version(struct rtsx_pcr *pcr)
static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
{
u8 driving_3v3[4][3] = {
- {0x11, 0x11, 0x11},
+ {0x11, 0x11, 0x18},
{0x55, 0x55, 0x5C},
- {0x99, 0x99, 0x92},
- {0x99, 0x99, 0x92},
+ {0xFF, 0xFF, 0xFF},
+ {0x96, 0x96, 0x96},
};
u8 driving_1v8[4][3] = {
+ {0xC4, 0xC4, 0xC4},
{0x3C, 0x3C, 0x3C},
- {0xB3, 0xB3, 0xB3},
{0xFE, 0xFE, 0xFE},
- {0xC4, 0xC4, 0xC4},
+ {0xB3, 0xB3, 0xB3},
};
u8 (*driving)[3], drive_sel;
@@ -65,15 +65,17 @@ static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
0xFF, driving[drive_sel][2]);
}
-static void rts5249_fetch_vendor_settings(struct rtsx_pcr *pcr)
+static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
u32 reg;
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
- if (!rtsx_vendor_setting_valid(reg))
+ if (!rtsx_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
return;
+ }
pcr->aspm_en = rtsx_reg_to_aspm(reg);
pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
@@ -81,13 +83,13 @@ static void rts5249_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
}
-static void rts5249_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
{
/* Set relink_time to 0 */
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
@@ -95,7 +97,8 @@ static void rts5249_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
if (pm_state == HOST_ENTER_S3)
- rtsx_pci_write_register(pcr, PM_CTRL3, 0x10, 0x10);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
}
@@ -104,6 +107,8 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
{
rtsx_pci_init_cmd(pcr);
+ /* Rest L1SUB Config */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
/* Configure GPIO as output */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
/* Reset ASPM state to default value */
@@ -116,12 +121,9 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
/* Configure driving */
rts5249_fill_driving(pcr, OUTPUT_3V3);
if (pcr->flags & PCR_REVERSE_SOCKET)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- AUTOLOAD_CFG_BASE + 3, 0xB0, 0xB0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- AUTOLOAD_CFG_BASE + 3, 0xB0, 0x80);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x10, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
return rtsx_pci_send_cmd(pcr, 100);
}
@@ -130,15 +132,16 @@ static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
{
int err;
- err = rtsx_gops_pm_reset(pcr);
+ err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
if (err < 0)
return err;
- err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV,
- PHY_REG_REV_RESV | PHY_REG_REV_RXIDLE_LATCHED |
- PHY_REG_REV_P1_EN | PHY_REG_REV_RXIDLE_EN |
- PHY_REG_REV_RX_PWST | PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 |
- PHY_REG_REV_STOP_CLKRD | PHY_REG_REV_STOP_CLKWR);
+ err = rtsx_pci_write_phy_register(pcr, PHY_REV,
+ PHY_REV_RESV | PHY_REV_RXIDLE_LATCHED |
+ PHY_REV_P1_EN | PHY_REV_RXIDLE_EN |
+ PHY_REV_CLKREQ_TX_EN | PHY_REV_RX_PWST |
+ PHY_REV_CLKREQ_DT_1_0 | PHY_REV_STOP_CLKRD |
+ PHY_REV_STOP_CLKWR);
if (err < 0)
return err;
@@ -149,19 +152,21 @@ static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN);
if (err < 0)
return err;
+
err = rtsx_pci_write_phy_register(pcr, PHY_PCR,
PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 |
- PHY_PCR_RSSI_EN);
+ PHY_PCR_RSSI_EN | PHY_PCR_RX10K);
if (err < 0)
return err;
+
err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR |
- PHY_RCR2_CDR_CP_10 | PHY_RCR2_CDR_SR_2 |
- PHY_RCR2_FREQSEL_12 | PHY_RCR2_CPADJEN |
- PHY_RCR2_CDR_SC_8 | PHY_RCR2_CALIB_LATE);
+ PHY_RCR2_CDR_SR_2 | PHY_RCR2_FREQSEL_12 |
+ PHY_RCR2_CDR_SC_12P | PHY_RCR2_CALIB_LATE);
if (err < 0)
return err;
+
err = rtsx_pci_write_phy_register(pcr, PHY_FLD4,
PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF |
PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA |
@@ -169,11 +174,12 @@ static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
PHY_FLD4_BER_CHK_EN);
if (err < 0)
return err;
- err = rtsx_pci_write_phy_register(pcr, PHY_RDR, PHY_RDR_RXDSEL_1_9);
+ err = rtsx_pci_write_phy_register(pcr, PHY_RDR,
+ PHY_RDR_RXDSEL_1_9 | PHY_SSC_AUTO_PWD);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, PHY_RCR1,
- PHY_RCR1_ADP_TIME | PHY_RCR1_VCO_COARSE);
+ PHY_RCR1_ADP_TIME_4 | PHY_RCR1_VCO_COARSE);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, PHY_FLD3,
@@ -181,33 +187,34 @@ static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
PHY_FLD3_RXDELINK);
if (err < 0)
return err;
+
return rtsx_pci_write_phy_register(pcr, PHY_TUNE,
PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 |
PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 |
- PHY_TUNE_TUNED12);
+ PHY_TUNE_TUNED12 | PHY_TUNE_TUNEA12);
}
-static int rts5249_turn_on_led(struct rtsx_pcr *pcr)
+static int rtsx_base_turn_on_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
}
-static int rts5249_turn_off_led(struct rtsx_pcr *pcr)
+static int rtsx_base_turn_off_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
}
-static int rts5249_enable_auto_blink(struct rtsx_pcr *pcr)
+static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
}
-static int rts5249_disable_auto_blink(struct rtsx_pcr *pcr)
+static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
}
-static int rts5249_card_power_on(struct rtsx_pcr *pcr, int card)
+static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
@@ -234,7 +241,7 @@ static int rts5249_card_power_on(struct rtsx_pcr *pcr, int card)
return 0;
}
-static int rts5249_card_power_off(struct rtsx_pcr *pcr, int card)
+static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card)
{
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
@@ -244,22 +251,35 @@ static int rts5249_card_power_off(struct rtsx_pcr *pcr, int card)
return rtsx_pci_send_cmd(pcr, 100);
}
-static int rts5249_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
+ u16 append;
- if (voltage == OUTPUT_3V3) {
- err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, 0x4FC0 | 0x24);
+ switch (voltage) {
+ case OUTPUT_3V3:
+ err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK,
+ PHY_TUNE_VOLTAGE_3V3);
if (err < 0)
return err;
- } else if (voltage == OUTPUT_1V8) {
- err = rtsx_pci_write_phy_register(pcr, PHY_BACR, 0x3C02);
+ break;
+ case OUTPUT_1V8:
+ append = PHY_TUNE_D18_1V8;
+ if (CHK_PCI_PID(pcr, 0x5249)) {
+ err = rtsx_pci_update_phy(pcr, PHY_BACR,
+ PHY_BACR_BASIC_MASK, 0);
+ if (err < 0)
+ return err;
+ append = PHY_TUNE_D18_1V7;
+ }
+
+ err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK,
+ append);
if (err < 0)
return err;
- err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, 0x4C40 | 0x24);
- if (err < 0)
- return err;
- } else {
+ break;
+ default:
+ pcr_dbg(pcr, "unknown output voltage %d\n", voltage);
return -EINVAL;
}
@@ -270,17 +290,17 @@ static int rts5249_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
}
static const struct pcr_ops rts5249_pcr_ops = {
- .fetch_vendor_settings = rts5249_fetch_vendor_settings,
+ .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.extra_init_hw = rts5249_extra_init_hw,
.optimize_phy = rts5249_optimize_phy,
- .turn_on_led = rts5249_turn_on_led,
- .turn_off_led = rts5249_turn_off_led,
- .enable_auto_blink = rts5249_enable_auto_blink,
- .disable_auto_blink = rts5249_disable_auto_blink,
- .card_power_on = rts5249_card_power_on,
- .card_power_off = rts5249_card_power_off,
- .switch_output_voltage = rts5249_switch_output_voltage,
- .force_power_down = rts5249_force_power_down,
+ .turn_on_led = rtsx_base_turn_on_led,
+ .turn_off_led = rtsx_base_turn_off_led,
+ .enable_auto_blink = rtsx_base_enable_auto_blink,
+ .disable_auto_blink = rtsx_base_disable_auto_blink,
+ .card_power_on = rtsx_base_card_power_on,
+ .card_power_off = rtsx_base_card_power_off,
+ .switch_output_voltage = rtsx_base_switch_output_voltage,
+ .force_power_down = rtsx_base_force_power_down,
};
/* SD Pull Control Enable:
@@ -343,7 +363,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
pcr->flags = 0;
pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
- pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_C;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
@@ -354,4 +374,219 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
pcr->sd_pull_ctl_disable_tbl = rts5249_sd_pull_ctl_disable_tbl;
pcr->ms_pull_ctl_enable_tbl = rts5249_ms_pull_ctl_enable_tbl;
pcr->ms_pull_ctl_disable_tbl = rts5249_ms_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = PM_CTRL3;
+}
+
+static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val)
+{
+ addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr;
+
+ return __rtsx_pci_write_phy_register(pcr, addr, val);
+}
+
+static int rts524a_read_phy(struct rtsx_pcr *pcr, u8 addr, u16 *val)
+{
+ addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr;
+
+ return __rtsx_pci_read_phy_register(pcr, addr, val);
}
+
+static int rts524a_optimize_phy(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
+ D3_DELINK_MODE_EN, 0x00);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_phy_register(pcr, PHY_PCR,
+ PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
+ PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 | PHY_PCR_RSSI_EN);
+ rtsx_pci_write_phy_register(pcr, PHY_SSCCR3,
+ PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY);
+
+ if (is_version(pcr, 0x524A, IC_VER_A)) {
+ rtsx_pci_write_phy_register(pcr, PHY_SSCCR3,
+ PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY);
+ rtsx_pci_write_phy_register(pcr, PHY_SSCCR2,
+ PHY_SSCCR2_PLL_NCODE | PHY_SSCCR2_TIME0 |
+ PHY_SSCCR2_TIME2_WIDTH);
+ rtsx_pci_write_phy_register(pcr, PHY_ANA1A,
+ PHY_ANA1A_TXR_LOOPBACK | PHY_ANA1A_RXT_BIST |
+ PHY_ANA1A_TXR_BIST | PHY_ANA1A_REV);
+ rtsx_pci_write_phy_register(pcr, PHY_ANA1D,
+ PHY_ANA1D_DEBUG_ADDR);
+ rtsx_pci_write_phy_register(pcr, PHY_DIG1E,
+ PHY_DIG1E_REV | PHY_DIG1E_D0_X_D1 |
+ PHY_DIG1E_RX_ON_HOST | PHY_DIG1E_RCLK_REF_HOST |
+ PHY_DIG1E_RCLK_TX_EN_KEEP |
+ PHY_DIG1E_RCLK_TX_TERM_KEEP |
+ PHY_DIG1E_RCLK_RX_EIDLE_ON | PHY_DIG1E_TX_TERM_KEEP |
+ PHY_DIG1E_RX_TERM_KEEP | PHY_DIG1E_TX_EN_KEEP |
+ PHY_DIG1E_RX_EN_KEEP);
+ }
+
+ rtsx_pci_write_phy_register(pcr, PHY_ANA08,
+ PHY_ANA08_RX_EQ_DCGAIN | PHY_ANA08_SEL_RX_EN |
+ PHY_ANA08_RX_EQ_VAL | PHY_ANA08_SCP | PHY_ANA08_SEL_IPI);
+
+ return 0;
+}
+
+static int rts524a_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ rts5249_extra_init_hw(pcr);
+
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
+ FORCE_ASPM_L1_EN, FORCE_ASPM_L1_EN);
+ rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_VCC_LMT_EN,
+ LDO_VCC_LMT_EN);
+ rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+ if (is_version(pcr, 0x524A, IC_VER_A)) {
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ LDO_DV18_SR_MASK, LDO_DV18_SR_DF);
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
+ LDO_VCC_REF_TUNE_MASK, LDO_VCC_REF_1V2);
+ rtsx_pci_write_register(pcr, LDO_VIO_CFG,
+ LDO_VIO_REF_TUNE_MASK, LDO_VIO_REF_1V2);
+ rtsx_pci_write_register(pcr, LDO_VIO_CFG,
+ LDO_VIO_SR_MASK, LDO_VIO_SR_DF);
+ rtsx_pci_write_register(pcr, LDO_DV12S_CFG,
+ LDO_REF12_TUNE_MASK, LDO_REF12_TUNE_DF);
+ rtsx_pci_write_register(pcr, SD40_LDO_CTL1,
+ SD40_VIO_TUNE_MASK, SD40_VIO_TUNE_1V7);
+ }
+
+ return 0;
+}
+
+static const struct pcr_ops rts524a_pcr_ops = {
+ .write_phy = rts524a_write_phy,
+ .read_phy = rts524a_read_phy,
+ .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+ .extra_init_hw = rts524a_extra_init_hw,
+ .optimize_phy = rts524a_optimize_phy,
+ .turn_on_led = rtsx_base_turn_on_led,
+ .turn_off_led = rtsx_base_turn_off_led,
+ .enable_auto_blink = rtsx_base_enable_auto_blink,
+ .disable_auto_blink = rtsx_base_disable_auto_blink,
+ .card_power_on = rtsx_base_card_power_on,
+ .card_power_off = rtsx_base_card_power_off,
+ .switch_output_voltage = rtsx_base_switch_output_voltage,
+ .force_power_down = rtsx_base_force_power_down,
+};
+
+void rts524a_init_params(struct rtsx_pcr *pcr)
+{
+ rts5249_init_params(pcr);
+
+ pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+ pcr->ops = &rts524a_pcr_ops;
+}
+
+static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
+ LDO_VCC_TUNE_MASK, LDO_VCC_3V3);
+ return rtsx_base_card_power_on(pcr, card);
+}
+
+static int rts525a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ LDO_D3318_MASK, LDO_D3318_33V);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ LDO_D3318_MASK, LDO_D3318_18V);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rtsx_pci_init_cmd(pcr);
+ rts5249_fill_driving(pcr, voltage);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts525a_optimize_phy(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
+ D3_DELINK_MODE_EN, 0x00);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_phy_register(pcr, _PHY_FLD0,
+ _PHY_FLD0_CLK_REQ_20C | _PHY_FLD0_RX_IDLE_EN |
+ _PHY_FLD0_BIT_ERR_RSTN | _PHY_FLD0_BER_COUNT |
+ _PHY_FLD0_BER_TIMER | _PHY_FLD0_CHECK_EN);
+
+ rtsx_pci_write_phy_register(pcr, _PHY_ANA03,
+ _PHY_ANA03_TIMER_MAX | _PHY_ANA03_OOBS_DEB_EN |
+ _PHY_CMU_DEBUG_EN);
+
+ if (is_version(pcr, 0x525A, IC_VER_A))
+ rtsx_pci_write_phy_register(pcr, _PHY_REV0,
+ _PHY_REV0_FILTER_OUT | _PHY_REV0_CDR_BYPASS_PFD |
+ _PHY_REV0_CDR_RX_IDLE_BYPASS);
+
+ return 0;
+}
+
+static int rts525a_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ rts5249_extra_init_hw(pcr);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+ if (is_version(pcr, 0x525A, IC_VER_A)) {
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG2,
+ L1SUB_AUTO_CFG, L1SUB_AUTO_CFG);
+ rtsx_pci_write_register(pcr, RREF_CFG,
+ RREF_VBGSEL_MASK, RREF_VBGSEL_1V25);
+ rtsx_pci_write_register(pcr, LDO_VIO_CFG,
+ LDO_VIO_TUNE_MASK, LDO_VIO_1V7);
+ rtsx_pci_write_register(pcr, LDO_DV12S_CFG,
+ LDO_D12_TUNE_MASK, LDO_D12_TUNE_DF);
+ rtsx_pci_write_register(pcr, LDO_AV12S_CFG,
+ LDO_AV12S_TUNE_MASK, LDO_AV12S_TUNE_DF);
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
+ LDO_VCC_LMTVTH_MASK, LDO_VCC_LMTVTH_2A);
+ rtsx_pci_write_register(pcr, OOBS_CONFIG,
+ OOBS_AUTOK_DIS | OOBS_VAL_MASK, 0x89);
+ }
+
+ return 0;
+}
+
+static const struct pcr_ops rts525a_pcr_ops = {
+ .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+ .extra_init_hw = rts525a_extra_init_hw,
+ .optimize_phy = rts525a_optimize_phy,
+ .turn_on_led = rtsx_base_turn_on_led,
+ .turn_off_led = rtsx_base_turn_off_led,
+ .enable_auto_blink = rtsx_base_enable_auto_blink,
+ .disable_auto_blink = rtsx_base_disable_auto_blink,
+ .card_power_on = rts525a_card_power_on,
+ .card_power_off = rtsx_base_card_power_off,
+ .switch_output_voltage = rts525a_switch_output_voltage,
+ .force_power_down = rtsx_base_force_power_down,
+};
+
+void rts525a_init_params(struct rtsx_pcr *pcr)
+{
+ rts5249_init_params(pcr);
+
+ pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+ pcr->ops = &rts525a_pcr_ops;
+}
+
diff --git a/drivers/mfd/rtsx_gops.c b/drivers/mfd/rtsx_gops.c
deleted file mode 100644
index b1a98c678593..000000000000
--- a/drivers/mfd/rtsx_gops.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- * Micky Ching <micky_ching@realsil.com.cn>
- */
-
-#include <linux/mfd/rtsx_pci.h>
-#include "rtsx_pcr.h"
-
-int rtsx_gops_pm_reset(struct rtsx_pcr *pcr)
-{
- int err;
-
- /* init aspm */
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0x00);
- err = rtsx_pci_update_cfg_byte(pcr, LCTLR, ~LCTLR_ASPM_CTL_MASK, 0x00);
- if (err < 0)
- return err;
-
- /* reset PM_CTRL3 before send buffer cmd */
- return rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
-}
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 30f7ca89a0e6..a66540a49079 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -58,11 +58,25 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
+static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ 0xFC, pcr->aspm_en);
+}
+
+static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ 0xFC, 0);
+}
+
void rtsx_pci_start_run(struct rtsx_pcr *pcr)
{
/* If pci device removed, don't queue idle work any more */
@@ -75,7 +89,7 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr)
pcr->ops->enable_auto_blink(pcr);
if (pcr->aspm_en)
- rtsx_pci_write_config_byte(pcr, LCTLR, 0);
+ rtsx_pci_disable_aspm(pcr);
}
mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
@@ -130,7 +144,7 @@ int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
}
EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
-int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
+int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
{
int err, i, finished = 0;
u8 tmp;
@@ -162,9 +176,17 @@ int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
return 0;
}
+
+int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
+{
+ if (pcr->ops->write_phy)
+ return pcr->ops->write_phy(pcr, addr, val);
+
+ return __rtsx_pci_write_phy_register(pcr, addr, val);
+}
EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
-int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
+int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
{
int err, i, finished = 0;
u16 data;
@@ -210,6 +232,14 @@ int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
return 0;
}
+
+int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
+{
+ if (pcr->ops->read_phy)
+ return pcr->ops->read_phy(pcr, addr, val);
+
+ return __rtsx_pci_read_phy_register(pcr, addr, val);
+}
EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
@@ -286,8 +316,7 @@ int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
- dev_dbg(&(pcr->pci->dev), "Timeout (%s %d)\n",
- __func__, __LINE__);
+ pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
err = -ETIMEDOUT;
goto finish_send_cmd;
}
@@ -323,8 +352,7 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
u64 val;
u8 option = SG_VALID | SG_TRANS_DATA;
- dev_dbg(&(pcr->pci->dev), "DMA addr: 0x%x, Len: 0x%x\n",
- (unsigned int)addr, len);
+ pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
if (end)
option |= SG_END;
@@ -339,11 +367,11 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
{
int err = 0, count;
- dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
+ pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
if (count < 1)
return -EINVAL;
- dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
+ pcr_dbg(pcr, "DMA mapping count: %d\n", count);
err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
@@ -417,8 +445,7 @@ int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
- dev_dbg(&(pcr->pci->dev), "Timeout (%s %d)\n",
- __func__, __LINE__);
+ pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
err = -ETIMEDOUT;
goto out;
}
@@ -592,7 +619,7 @@ static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
/* Enable Bus Interrupt */
rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
- dev_dbg(&(pcr->pci->dev), "RTSX_BIER: 0x%08x\n", pcr->bier);
+ pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
}
static inline u8 double_ssc_depth(u8 depth)
@@ -638,14 +665,13 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return err;
card_clock /= 1000000;
- dev_dbg(&(pcr->pci->dev), "Switch card clock to %dMHz\n", card_clock);
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
clk = card_clock;
if (!initial_mode && double_clk)
clk = card_clock * 2;
- dev_dbg(&(pcr->pci->dev),
- "Internal SSC clock: %dMHz (cur_clock = %d)\n",
- clk, pcr->cur_clock);
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
if (clk == pcr->cur_clock)
return 0;
@@ -674,14 +700,14 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
}
div++;
}
- dev_dbg(&(pcr->pci->dev), "n = %d, div = %d\n", n, div);
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
ssc_depth = depth[ssc_depth];
if (double_clk)
ssc_depth = double_ssc_depth(ssc_depth);
ssc_depth = revise_ssc_depth(ssc_depth, div);
- dev_dbg(&(pcr->pci->dev), "ssc_depth = %d\n", ssc_depth);
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
@@ -803,13 +829,13 @@ static void rtsx_pci_card_detect(struct work_struct *work)
dwork = to_delayed_work(work);
pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
- dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__);
+ pcr_dbg(pcr, "--> %s\n", __func__);
mutex_lock(&pcr->pcr_mutex);
spin_lock_irqsave(&pcr->lock, flags);
irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
- dev_dbg(&(pcr->pci->dev), "irq_status: 0x%08x\n", irq_status);
+ pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
irq_status &= CARD_EXIST;
card_inserted = pcr->card_inserted & irq_status;
@@ -820,9 +846,8 @@ static void rtsx_pci_card_detect(struct work_struct *work)
spin_unlock_irqrestore(&pcr->lock, flags);
if (card_inserted || card_removed) {
- dev_dbg(&(pcr->pci->dev),
- "card_inserted: 0x%x, card_removed: 0x%x\n",
- card_inserted, card_removed);
+ pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
+ card_inserted, card_removed);
if (pcr->ops->cd_deglitch)
card_inserted = pcr->ops->cd_deglitch(pcr);
@@ -930,7 +955,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
- dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__);
+ pcr_dbg(pcr, "--> %s\n", __func__);
mutex_lock(&pcr->pcr_mutex);
@@ -942,7 +967,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
pcr->ops->turn_off_led(pcr);
if (pcr->aspm_en)
- rtsx_pci_write_config_byte(pcr, LCTLR, pcr->aspm_en);
+ rtsx_pci_enable_aspm(pcr);
mutex_unlock(&pcr->pcr_mutex);
}
@@ -968,6 +993,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
int err;
+ pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
rtsx_pci_enable_bus_int(pcr);
@@ -980,6 +1006,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
/* Wait SSC power stable */
udelay(200);
+ rtsx_pci_disable_aspm(pcr);
if (pcr->ops->optimize_phy) {
err = pcr->ops->optimize_phy(pcr);
if (err < 0)
@@ -1028,10 +1055,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
if (err < 0)
return err;
- rtsx_pci_write_config_byte(pcr, LCTLR, 0);
-
/* Enable clk_request_n to enable clock power management */
- rtsx_pci_write_config_byte(pcr, 0x81, 1);
+ rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
/* Enter L1 when host tx idle */
rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
@@ -1081,6 +1106,14 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
rts5249_init_params(pcr);
break;
+ case 0x524A:
+ rts524a_init_params(pcr);
+ break;
+
+ case 0x525A:
+ rts525a_init_params(pcr);
+ break;
+
case 0x5287:
rtl8411b_init_params(pcr);
break;
@@ -1090,7 +1123,7 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
break;
}
- dev_dbg(&(pcr->pci->dev), "PID: 0x%04x, IC version: 0x%02x\n",
+ pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
PCI_PID(pcr), pcr->ic_version);
pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
@@ -1101,14 +1134,14 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
- dev_dbg(&(pcr->pci->dev), "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
- dev_dbg(&(pcr->pci->dev), "pcr->sd30_drive_sel_1v8 = 0x%x\n",
+ pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
+ pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
pcr->sd30_drive_sel_1v8);
- dev_dbg(&(pcr->pci->dev), "pcr->sd30_drive_sel_3v3 = 0x%x\n",
+ pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
pcr->sd30_drive_sel_3v3);
- dev_dbg(&(pcr->pci->dev), "pcr->card_drive_sel = 0x%x\n",
+ pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
pcr->card_drive_sel);
- dev_dbg(&(pcr->pci->dev), "pcr->flags = 0x%x\n", pcr->flags);
+ pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
pcr->state = PDEV_STAT_IDLE;
err = rtsx_pci_init_hw(pcr);
@@ -1126,7 +1159,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
struct rtsx_pcr *pcr;
struct pcr_handle *handle;
u32 base, len;
- int ret, i;
+ int ret, i, bar = 0;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1171,8 +1204,10 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->pci = pcidev;
dev_set_drvdata(&pcidev->dev, handle);
- len = pci_resource_len(pcidev, 0);
- base = pci_resource_start(pcidev, 0);
+ if (CHK_PCI_PID(pcr, 0x525A))
+ bar = 1;
+ len = pci_resource_len(pcidev, bar);
+ base = pci_resource_start(pcidev, bar);
pcr->remap_addr = ioremap_nocache(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index fe2bbb67defc..ce48842570d7 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -27,12 +27,20 @@
#define MIN_DIV_N_PCR 80
#define MAX_DIV_N_PCR 208
+#define RTS524A_PME_FORCE_CTL 0xFF78
+#define RTS524A_PM_CTRL3 0xFF7E
+
+int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
+int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
+
void rts5209_init_params(struct rtsx_pcr *pcr);
void rts5229_init_params(struct rtsx_pcr *pcr);
void rtl8411_init_params(struct rtsx_pcr *pcr);
void rtl8402_init_params(struct rtsx_pcr *pcr);
void rts5227_init_params(struct rtsx_pcr *pcr);
void rts5249_init_params(struct rtsx_pcr *pcr);
+void rts524a_init_params(struct rtsx_pcr *pcr);
+void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 0a7bc43db4e4..4a69afb425ad 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -69,6 +69,8 @@ static const struct mfd_cell s2mps11_devs[] = {
{
.name = "s2mps11-pmic",
}, {
+ .name = "s2mps14-rtc",
+ }, {
.name = "s2mps11-clk",
.of_compatible = "samsung,s2mps11-clk",
}
@@ -267,10 +269,8 @@ static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
struct sec_platform_data *pd;
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- dev_err(dev, "could not allocate memory for pdata\n");
+ if (!pd)
return ERR_PTR(-ENOMEM);
- }
/*
* ToDo: the 'wakeup' member in the platform data is more of a linux
@@ -333,7 +333,6 @@ static int sec_pmic_probe(struct i2c_client *i2c,
}
if (pdata) {
sec_pmic->device_type = pdata->device_type;
- sec_pmic->ono = pdata->ono;
sec_pmic->irq_base = pdata->irq_base;
sec_pmic->wakeup = pdata->wakeup;
sec_pmic->pdata = pdata;
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index ba86a918c2da..806fa8dbb22d 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -61,14 +61,14 @@ static const struct regmap_irq s2mps11_irqs[] = {
.reg_offset = 1,
.mask = S2MPS11_IRQ_RTC60S_MASK,
},
- [S2MPS11_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
[S2MPS11_IRQ_RTCA1] = {
.reg_offset = 1,
.mask = S2MPS11_IRQ_RTCA1_MASK,
},
+ [S2MPS11_IRQ_RTCA0] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTCA0_MASK,
+ },
[S2MPS11_IRQ_SMPL] = {
.reg_offset = 1,
.mask = S2MPS11_IRQ_SMPL_MASK,
@@ -484,6 +484,12 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return ret;
}
+ /*
+ * The rtc-s5m driver requests S2MPS14_IRQ_RTCA0 also for S2MPS11
+ * so the interrupt number must be consistent.
+ */
+ BUILD_BUG_ON(((enum s2mps14_irq)S2MPS11_IRQ_RTCA0) != S2MPS14_IRQ_RTCA0);
+
return 0;
}
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index 0e4a76daf187..7f87c62d91b3 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -766,7 +766,7 @@ static int si476x_core_probe(struct i2c_client *client,
sizeof(struct v4l2_rds_data),
GFP_KERNEL);
if (rval) {
- dev_err(&client->dev, "Could not alloate the FIFO\n");
+ dev_err(&client->dev, "Could not allocate the FIFO\n");
goto free_gpio;
}
mutex_init(&core->rds_drainer_status_lock);
diff --git a/drivers/mfd/sky81452.c b/drivers/mfd/sky81452.c
new file mode 100644
index 000000000000..b0c9b0415650
--- /dev/null
+++ b/drivers/mfd/sky81452.c
@@ -0,0 +1,108 @@
+/*
+ * sky81452.c SKY81452 MFD driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/sky81452.h>
+
+static const struct regmap_config sky81452_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int sky81452_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ const struct sky81452_platform_data *pdata = dev_get_platdata(dev);
+ struct mfd_cell cells[2];
+ struct regmap *regmap;
+ int ret;
+
+ if (!pdata) {
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ }
+
+ regmap = devm_regmap_init_i2c(client, &sky81452_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to initialize.err=%ld\n", PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ i2c_set_clientdata(client, regmap);
+
+ memset(cells, 0, sizeof(cells));
+ cells[0].name = "sky81452-backlight";
+ cells[0].of_compatible = "skyworks,sky81452-backlight";
+ cells[0].platform_data = pdata->bl_pdata;
+ cells[0].pdata_size = sizeof(*pdata->bl_pdata);
+ cells[1].name = "sky81452-regulator";
+ cells[1].platform_data = pdata->regulator_init_data;
+ cells[1].pdata_size = sizeof(*pdata->regulator_init_data);
+
+ ret = mfd_add_devices(dev, -1, cells, ARRAY_SIZE(cells), NULL, 0, NULL);
+ if (ret)
+ dev_err(dev, "failed to add child devices. err=%d\n", ret);
+
+ return ret;
+}
+
+static int sky81452_remove(struct i2c_client *client)
+{
+ mfd_remove_devices(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id sky81452_ids[] = {
+ { "sky81452" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sky81452_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id sky81452_of_match[] = {
+ { .compatible = "skyworks,sky81452", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sky81452_of_match);
+#endif
+
+static struct i2c_driver sky81452_driver = {
+ .driver = {
+ .name = "sky81452",
+ .of_match_table = of_match_ptr(sky81452_of_match),
+ },
+ .probe = sky81452_probe,
+ .remove = sky81452_remove,
+ .id_table = sky81452_ids,
+};
+
+module_i2c_driver(sky81452_driver);
+
+MODULE_DESCRIPTION("Skyworks SKY81452 MFD driver");
+MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@skyworksinc.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index aacb3720065c..cf356395c9e9 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -318,7 +318,6 @@ static int tc3589x_device_init(struct tc3589x *tc3589x)
return ret;
}
-#ifdef CONFIG_OF
static const struct of_device_id tc3589x_match[] = {
/* Legacy compatible string */
{ .compatible = "tc3589x", .data = (void *) TC3589X_UNKNOWN },
@@ -359,14 +358,6 @@ tc3589x_of_probe(struct device *dev, enum tc3589x_version *version)
return pdata;
}
-#else
-static inline struct tc3589x_platform_data *
-tc3589x_of_probe(struct device *dev, enum tc3589x_version *version)
-{
- dev_err(dev, "no device tree support\n");
- return ERR_PTR(-ENODEV);
-}
-#endif
static int tc3589x_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 467c80e1c4ae..e4e4b22eebc9 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -68,12 +68,6 @@ static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
DEFINE_WAIT(wait);
u32 reg;
- /*
- * disable TSC steps so it does not run while the ADC is using it. If
- * write 0 while it is running (it just started or was already running)
- * then it completes all steps that were enabled and stops then.
- */
- tscadc_writel(tsadc, REG_SE, 0);
reg = tscadc_readl(tsadc, REG_ADCFSM);
if (reg & SEQ_STATUS) {
tsadc->adc_waiting = true;
@@ -86,8 +80,12 @@ static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
spin_lock_irq(&tsadc->reg_lock);
finish_wait(&tsadc->reg_se_wait, &wait);
+ /*
+ * Sequencer should either be idle or
+ * busy applying the charge step.
+ */
reg = tscadc_readl(tsadc, REG_ADCFSM);
- WARN_ON(reg & SEQ_STATUS);
+ WARN_ON((reg & SEQ_STATUS) && !(reg & CHARGE_STEP));
tsadc->adc_waiting = false;
}
tsadc->adc_in_use = true;
@@ -96,7 +94,6 @@ static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val)
{
spin_lock_irq(&tsadc->reg_lock);
- tsadc->reg_se_cache |= val;
am335x_tscadc_need_adc(tsadc);
tscadc_writel(tsadc, REG_SE, val);
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 743fb524fc8a..448f0a182dc4 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -515,7 +515,7 @@ static int tps65010_gpio_get(struct gpio_chip *chip, unsigned offset)
static struct tps65010 *the_tps;
-static int __exit tps65010_remove(struct i2c_client *client)
+static int tps65010_remove(struct i2c_client *client)
{
struct tps65010 *tps = i2c_get_clientdata(client);
struct tps65010_board *board = dev_get_platdata(&client->dev);
@@ -684,7 +684,7 @@ static struct i2c_driver tps65010_driver = {
.name = "tps65010",
},
.probe = tps65010_probe,
- .remove = __exit_p(tps65010_remove),
+ .remove = tps65010_remove,
.id_table = tps65010_id,
};
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 393509246037..f440aed61305 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -829,7 +829,7 @@ static struct twl4030_power_data osc_off_idle = {
.board_config = osc_off_rconfig,
};
-static struct of_device_id twl4030_power_of_match[] = {
+static const struct of_device_id twl4030_power_of_match[] = {
{
.compatible = "ti,twl4030-power",
},
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index f71ee3dbc2a2..c5265c1262c5 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -814,4 +814,3 @@ MODULE_DESCRIPTION("TWL6040 MFD");
MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:twl6040");
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index 8f43ab8fd2d6..3e628df9280c 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -47,71 +47,26 @@
#define SYS_HBI_MASK 0xfff
#define SYS_PROCIDx_HBI_SHIFT 0
-#define SYS_MCI_CARDIN (1 << 0)
-#define SYS_MCI_WPROT (1 << 1)
-
#define SYS_MISC_MASTERSITE (1 << 14)
-
-static void __iomem *__vexpress_sysreg_base;
-
-static void __iomem *vexpress_sysreg_base(void)
+void vexpress_flags_set(u32 data)
{
- if (!__vexpress_sysreg_base) {
+ static void __iomem *base;
+
+ if (!base) {
struct device_node *node = of_find_compatible_node(NULL, NULL,
"arm,vexpress-sysreg");
- __vexpress_sysreg_base = of_iomap(node, 0);
+ base = of_iomap(node, 0);
}
- WARN_ON(!__vexpress_sysreg_base);
-
- return __vexpress_sysreg_base;
-}
-
-
-static int vexpress_sysreg_get_master(void)
-{
- if (readl(vexpress_sysreg_base() + SYS_MISC) & SYS_MISC_MASTERSITE)
- return VEXPRESS_SITE_DB2;
-
- return VEXPRESS_SITE_DB1;
-}
-
-void vexpress_flags_set(u32 data)
-{
- writel(~0, vexpress_sysreg_base() + SYS_FLAGSCLR);
- writel(data, vexpress_sysreg_base() + SYS_FLAGSSET);
-}
-
-unsigned int vexpress_get_mci_cardin(struct device *dev)
-{
- return readl(vexpress_sysreg_base() + SYS_MCI) & SYS_MCI_CARDIN;
-}
-
-u32 vexpress_get_procid(int site)
-{
- if (site == VEXPRESS_SITE_MASTER)
- site = vexpress_sysreg_get_master();
+ if (WARN_ON(!base))
+ return;
- return readl(vexpress_sysreg_base() + (site == VEXPRESS_SITE_DB1 ?
- SYS_PROCID0 : SYS_PROCID1));
+ writel(~0, base + SYS_FLAGSCLR);
+ writel(data, base + SYS_FLAGSSET);
}
-void __iomem *vexpress_get_24mhz_clock_base(void)
-{
- return vexpress_sysreg_base() + SYS_24MHZ;
-}
-
-
-void __init vexpress_sysreg_early_init(void __iomem *base)
-{
- __vexpress_sysreg_base = base;
-
- vexpress_config_set_master(vexpress_sysreg_get_master());
-}
-
-
/* The sysreg block is just a random collection of various functions... */
static struct syscon_platform_data vexpress_sysreg_sys_id_pdata = {
@@ -210,6 +165,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
struct resource *mem;
void __iomem *base;
struct bgpio_chip *mmc_gpio_chip;
+ int master;
u32 dt_hbi;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -220,11 +176,14 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
if (!base)
return -ENOMEM;
- vexpress_config_set_master(vexpress_sysreg_get_master());
+ master = readl(base + SYS_MISC) & SYS_MISC_MASTERSITE ?
+ VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
+ vexpress_config_set_master(master);
/* Confirm board type against DT property, if available */
if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
- u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER);
+ u32 id = readl(base + (master == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
if (WARN_ON(dt_hbi != hbi))
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index b326a82017ee..aeae6ec123b3 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -1172,9 +1172,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DAC_DIGITAL_VOLUME_3L:
case ARIZONA_DAC_VOLUME_LIMIT_3L:
case ARIZONA_NOISE_GATE_SELECT_3L:
- case ARIZONA_OUTPUT_PATH_CONFIG_3R:
- case ARIZONA_DAC_DIGITAL_VOLUME_3R:
- case ARIZONA_DAC_VOLUME_LIMIT_3R:
case ARIZONA_OUTPUT_PATH_CONFIG_4L:
case ARIZONA_DAC_DIGITAL_VOLUME_4L:
case ARIZONA_OUT_VOLUME_4L:
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 4c4a59b25537..7f90ce5a569a 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -230,6 +230,8 @@ static const struct i2c_device_id bh1780_id[] = {
{ },
};
+MODULE_DEVICE_TABLE(i2c, bh1780_id);
+
#ifdef CONFIG_OF
static const struct of_device_id of_bh1780_match[] = {
{ .compatible = "rohm,bh1780gli", },
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 06166ac000e0..0b1bd85e4ae6 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -479,6 +479,7 @@ static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
static noinline int fpga_program_cpu(struct fpga_dev *priv)
{
int ret;
+ unsigned long timeout;
/* Disable the programmer */
fpga_programmer_disable(priv);
@@ -497,8 +498,8 @@ static noinline int fpga_program_cpu(struct fpga_dev *priv)
goto out_disable_controller;
/* Wait for the interrupt handler to signal that programming finished */
- ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
- if (!ret) {
+ timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
+ if (!timeout) {
dev_err(priv->dev, "Timed out waiting for completion\n");
ret = -ETIMEDOUT;
goto out_disable_controller;
@@ -536,6 +537,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
struct sg_table table;
dma_cookie_t cookie;
int ret, i;
+ unsigned long timeout;
/* Disable the programmer */
fpga_programmer_disable(priv);
@@ -623,8 +625,8 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
dev_dbg(priv->dev, "enabled the controller\n");
/* Wait for the interrupt handler to signal that programming finished */
- ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
- if (!ret) {
+ timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
+ if (!timeout) {
dev_err(priv->dev, "Timed out waiting for completion\n");
ret = -ETIMEDOUT;
goto out_disable_controller;
@@ -1142,7 +1144,7 @@ out_return:
return ret;
}
-static struct of_device_id fpga_of_match[] = {
+static const struct of_device_id fpga_of_match[] = {
{ .compatible = "carma,fpga-programmer", },
{},
};
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 68cdfe151bdb..5aba3fd789de 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -1486,7 +1486,7 @@ static int data_of_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id data_of_match[] = {
+static const struct of_device_id data_of_match[] = {
{ .compatible = "carma,carma-fpga", },
{},
};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 3ef4627f9cb1..4739689d23ad 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -950,6 +950,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
struct lis3lv02d_platform_data *pdata;
struct device_node *np = lis3->of_node;
u32 val;
+ s32 sval;
if (!lis3->of_node)
return 0;
@@ -1031,6 +1032,23 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO;
if (of_get_property(np, "st,wakeup-z-hi", NULL))
pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI;
+ if (of_get_property(np, "st,wakeup-threshold", &val))
+ pdata->wakeup_thresh = val;
+
+ if (of_get_property(np, "st,wakeup2-x-lo", NULL))
+ pdata->wakeup_flags2 |= LIS3_WAKEUP_X_LO;
+ if (of_get_property(np, "st,wakeup2-x-hi", NULL))
+ pdata->wakeup_flags2 |= LIS3_WAKEUP_X_HI;
+ if (of_get_property(np, "st,wakeup2-y-lo", NULL))
+ pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_LO;
+ if (of_get_property(np, "st,wakeup2-y-hi", NULL))
+ pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_HI;
+ if (of_get_property(np, "st,wakeup2-z-lo", NULL))
+ pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_LO;
+ if (of_get_property(np, "st,wakeup2-z-hi", NULL))
+ pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_HI;
+ if (of_get_property(np, "st,wakeup2-threshold", &val))
+ pdata->wakeup_thresh2 = val;
if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) {
switch (val) {
@@ -1054,29 +1072,29 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
if (of_get_property(np, "st,hipass2-disable", NULL))
pdata->hipass_ctrl |= LIS3_HIPASS2_DISABLE;
- if (of_get_property(np, "st,axis-x", &val))
- pdata->axis_x = val;
- if (of_get_property(np, "st,axis-y", &val))
- pdata->axis_y = val;
- if (of_get_property(np, "st,axis-z", &val))
- pdata->axis_z = val;
+ if (of_property_read_s32(np, "st,axis-x", &sval) == 0)
+ pdata->axis_x = sval;
+ if (of_property_read_s32(np, "st,axis-y", &sval) == 0)
+ pdata->axis_y = sval;
+ if (of_property_read_s32(np, "st,axis-z", &sval) == 0)
+ pdata->axis_z = sval;
if (of_get_property(np, "st,default-rate", NULL))
pdata->default_rate = val;
- if (of_get_property(np, "st,min-limit-x", &val))
- pdata->st_min_limits[0] = val;
- if (of_get_property(np, "st,min-limit-y", &val))
- pdata->st_min_limits[1] = val;
- if (of_get_property(np, "st,min-limit-z", &val))
- pdata->st_min_limits[2] = val;
-
- if (of_get_property(np, "st,max-limit-x", &val))
- pdata->st_max_limits[0] = val;
- if (of_get_property(np, "st,max-limit-y", &val))
- pdata->st_max_limits[1] = val;
- if (of_get_property(np, "st,max-limit-z", &val))
- pdata->st_max_limits[2] = val;
+ if (of_property_read_s32(np, "st,min-limit-x", &sval) == 0)
+ pdata->st_min_limits[0] = sval;
+ if (of_property_read_s32(np, "st,min-limit-y", &sval) == 0)
+ pdata->st_min_limits[1] = sval;
+ if (of_property_read_s32(np, "st,min-limit-z", &sval) == 0)
+ pdata->st_min_limits[2] = sval;
+
+ if (of_property_read_s32(np, "st,max-limit-x", &sval) == 0)
+ pdata->st_max_limits[0] = sval;
+ if (of_property_read_s32(np, "st,max-limit-y", &sval) == 0)
+ pdata->st_max_limits[1] = sval;
+ if (of_property_read_s32(np, "st,max-limit-z", &sval) == 0)
+ pdata->st_max_limits[2] = sval;
lis3->pdata = pdata;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 63fe096d4462..e3e7f1dc27ba 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -106,7 +106,7 @@ static union axis_conversion lis3lv02d_axis_map =
{ .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } };
#ifdef CONFIG_OF
-static struct of_device_id lis3lv02d_i2c_dt_ids[] = {
+static const struct of_device_id lis3lv02d_i2c_dt_ids[] = {
{ .compatible = "st,lis3lv02d" },
{}
};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index bd06d0cfac45..b2f6e1651ac9 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -61,7 +61,7 @@ static union axis_conversion lis3lv02d_axis_normal =
{ .as_array = { 1, 2, 3 } };
#ifdef CONFIG_OF
-static struct of_device_id lis302dl_spi_dt_ids[] = {
+static const struct of_device_id lis302dl_spi_dt_ids[] = {
{ .compatible = "st,lis302dl-spi" },
{}
};
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 8ebc6cda1373..518914a82b83 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -21,3 +21,6 @@ mei-me-objs += hw-me.o
obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
mei-txe-objs := pci-txe.o
mei-txe-objs += hw-txe.o
+
+mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
+CFLAGS_mei-trace.o = -I$(src)
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index c4cb9a984a5f..d2cd53e3fac3 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -19,7 +19,6 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fcntl.h>
-#include <linux/aio.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/list.h>
@@ -49,10 +48,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
{
/* reset iamthif parameters. */
dev->iamthif_current_cb = NULL;
- dev->iamthif_msg_buf_size = 0;
- dev->iamthif_msg_buf_index = 0;
dev->iamthif_canceled = false;
- dev->iamthif_ioctl = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
dev->iamthif_stall_timer = 0;
@@ -70,7 +66,6 @@ int mei_amthif_host_init(struct mei_device *dev)
{
struct mei_cl *cl = &dev->iamthif_cl;
struct mei_me_client *me_cl;
- unsigned char *msg_buf;
int ret;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
@@ -91,18 +86,6 @@ int mei_amthif_host_init(struct mei_device *dev)
dev->iamthif_mtu = me_cl->props.max_msg_length;
dev_dbg(dev->dev, "IAMTHIF_MTU = %d\n", dev->iamthif_mtu);
- kfree(dev->iamthif_msg_buf);
- dev->iamthif_msg_buf = NULL;
-
- /* allocate storage for ME message buffer */
- msg_buf = kcalloc(dev->iamthif_mtu,
- sizeof(unsigned char), GFP_KERNEL);
- if (!msg_buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- dev->iamthif_msg_buf = msg_buf;
ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
if (ret < 0) {
@@ -195,30 +178,33 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
dev_dbg(dev->dev, "woke up from sleep\n");
}
+ if (cb->status) {
+ rets = cb->status;
+ dev_dbg(dev->dev, "read operation failed %d\n", rets);
+ goto free;
+ }
dev_dbg(dev->dev, "Got amthif data\n");
dev->iamthif_timer = 0;
- if (cb) {
- timeout = cb->read_time +
- mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
- dev_dbg(dev->dev, "amthif timeout = %lud\n",
- timeout);
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev->dev, "amthif Time out\n");
- /* 15 sec for the message has expired */
- list_del(&cb->list);
- rets = -ETIME;
- goto free;
- }
+ timeout = cb->read_time +
+ mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
+ dev_dbg(dev->dev, "amthif timeout = %lud\n",
+ timeout);
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev->dev, "amthif Time out\n");
+ /* 15 sec for the message has expired */
+ list_del_init(&cb->list);
+ rets = -ETIME;
+ goto free;
}
/* if the whole message will fit remove it from the list */
if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
- list_del(&cb->list);
+ list_del_init(&cb->list);
else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
/* end of the message has been reached */
- list_del(&cb->list);
+ list_del_init(&cb->list);
rets = 0;
goto free;
}
@@ -226,15 +212,15 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
* remove message from deletion list
*/
- dev_dbg(dev->dev, "amthif cb->response_buffer size - %d\n",
- cb->response_buffer.size);
+ dev_dbg(dev->dev, "amthif cb->buf size - %d\n",
+ cb->buf.size);
dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
/* length is being truncated to PAGE_SIZE, however,
* the buf_idx may point beyond */
length = min_t(size_t, length, (cb->buf_idx - *offset));
- if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
+ if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
} else {
@@ -253,126 +239,88 @@ out:
}
/**
- * mei_amthif_send_cmd - send amthif command to the ME
+ * mei_amthif_read_start - queue message for sending read credential
*
- * @dev: the device structure
- * @cb: mei call back struct
+ * @cl: host client
+ * @file: file pointer of message recipient
*
* Return: 0 on success, <0 on failure.
- *
*/
-static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
+static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
{
- struct mei_msg_hdr mei_hdr;
- struct mei_cl *cl;
- int ret;
-
- if (!dev || !cb)
- return -ENODEV;
+ struct mei_device *dev = cl->dev;
+ struct mei_cl_cb *cb;
+ size_t length = dev->iamthif_mtu;
+ int rets;
- dev_dbg(dev->dev, "write data to amthif client.\n");
+ cb = mei_io_cb_init(cl, MEI_FOP_READ, file);
+ if (!cb) {
+ rets = -ENOMEM;
+ goto err;
+ }
- dev->iamthif_state = MEI_IAMTHIF_WRITING;
- dev->iamthif_current_cb = cb;
- dev->iamthif_file_object = cb->file_object;
- dev->iamthif_canceled = false;
- dev->iamthif_ioctl = true;
- dev->iamthif_msg_buf_size = cb->request_buffer.size;
- memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
- cb->request_buffer.size);
- cl = &dev->iamthif_cl;
+ rets = mei_io_cb_alloc_buf(cb, length);
+ if (rets)
+ goto err;
- ret = mei_cl_flow_ctrl_creds(cl);
- if (ret < 0)
- return ret;
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
- if (ret && mei_hbuf_acquire(dev)) {
- ret = 0;
- if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
- mei_hdr.length = mei_hbuf_max_len(dev);
- mei_hdr.msg_complete = 0;
- } else {
- mei_hdr.length = cb->request_buffer.size;
- mei_hdr.msg_complete = 1;
- }
+ dev->iamthif_state = MEI_IAMTHIF_READING;
+ dev->iamthif_file_object = cb->file_object;
+ dev->iamthif_current_cb = cb;
- mei_hdr.host_addr = cl->host_client_id;
- mei_hdr.me_addr = cl->me_client_id;
- mei_hdr.reserved = 0;
- mei_hdr.internal = 0;
- dev->iamthif_msg_buf_index += mei_hdr.length;
- ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf);
- if (ret)
- return ret;
-
- if (mei_hdr.msg_complete) {
- if (mei_cl_flow_ctrl_reduce(cl))
- return -EIO;
- dev->iamthif_flow_control_pending = true;
- dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
- dev_dbg(dev->dev, "add amthif cb to write waiting list\n");
- dev->iamthif_current_cb = cb;
- dev->iamthif_file_object = cb->file_object;
- list_add_tail(&cb->list, &dev->write_waiting_list.list);
- } else {
- dev_dbg(dev->dev, "message does not complete, so add amthif cb to write list.\n");
- list_add_tail(&cb->list, &dev->write_list.list);
- }
- } else {
- list_add_tail(&cb->list, &dev->write_list.list);
- }
return 0;
+err:
+ mei_io_cb_free(cb);
+ return rets;
}
/**
- * mei_amthif_write - write amthif data to amthif client
+ * mei_amthif_send_cmd - send amthif command to the ME
*
- * @dev: the device structure
+ * @cl: the host client
* @cb: mei call back struct
*
* Return: 0 on success, <0 on failure.
- *
*/
-int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb)
+static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
{
+ struct mei_device *dev;
int ret;
- if (!dev || !cb)
+ if (!cl->dev || !cb)
return -ENODEV;
- ret = mei_io_cb_alloc_resp_buf(cb, dev->iamthif_mtu);
- if (ret)
+ dev = cl->dev;
+
+ dev->iamthif_state = MEI_IAMTHIF_WRITING;
+ dev->iamthif_current_cb = cb;
+ dev->iamthif_file_object = cb->file_object;
+ dev->iamthif_canceled = false;
+
+ ret = mei_cl_write(cl, cb, false);
+ if (ret < 0)
return ret;
- cb->fop_type = MEI_FOP_WRITE;
+ if (cb->completed)
+ cb->status = mei_amthif_read_start(cl, cb->file_object);
- if (!list_empty(&dev->amthif_cmd_list.list) ||
- dev->iamthif_state != MEI_IAMTHIF_IDLE) {
- dev_dbg(dev->dev,
- "amthif state = %d\n", dev->iamthif_state);
- dev_dbg(dev->dev, "AMTHIF: add cb to the wait list\n");
- list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
- return 0;
- }
- return mei_amthif_send_cmd(dev, cb);
+ return 0;
}
+
/**
* mei_amthif_run_next_cmd - send next amt command from queue
*
* @dev: the device structure
+ *
+ * Return: 0 on success, <0 on failure.
*/
-void mei_amthif_run_next_cmd(struct mei_device *dev)
+int mei_amthif_run_next_cmd(struct mei_device *dev)
{
+ struct mei_cl *cl = &dev->iamthif_cl;
struct mei_cl_cb *cb;
- int ret;
-
- if (!dev)
- return;
- dev->iamthif_msg_buf_size = 0;
- dev->iamthif_msg_buf_index = 0;
dev->iamthif_canceled = false;
- dev->iamthif_ioctl = true;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
dev->iamthif_file_object = NULL;
@@ -382,13 +330,48 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
cb = list_first_entry_or_null(&dev->amthif_cmd_list.list,
typeof(*cb), list);
if (!cb)
- return;
- list_del(&cb->list);
- ret = mei_amthif_send_cmd(dev, cb);
- if (ret)
- dev_warn(dev->dev, "amthif write failed status = %d\n", ret);
+ return 0;
+
+ list_del_init(&cb->list);
+ return mei_amthif_send_cmd(cl, cb);
}
+/**
+ * mei_amthif_write - write amthif data to amthif client
+ *
+ * @cl: host client
+ * @cb: mei call back struct
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
+{
+
+ struct mei_device *dev;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ if (WARN_ON(!cb))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
+ return mei_amthif_run_next_cmd(dev);
+}
+
+/**
+ * mei_amthif_poll - the amthif poll function
+ *
+ * @dev: the device structure
+ * @file: pointer to file structure
+ * @wait: pointer to poll_table structure
+ *
+ * Return: poll mask
+ *
+ * Locking: called under "dev->device_lock" lock
+ */
unsigned int mei_amthif_poll(struct mei_device *dev,
struct file *file, poll_table *wait)
@@ -397,19 +380,12 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
poll_wait(file, &dev->iamthif_cl.wait, wait);
- mutex_lock(&dev->device_lock);
- if (!mei_cl_is_connected(&dev->iamthif_cl)) {
-
- mask = POLLERR;
-
- } else if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
- dev->iamthif_file_object == file) {
+ if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
+ dev->iamthif_file_object == file) {
- mask |= (POLLIN | POLLRDNORM);
- dev_dbg(dev->dev, "run next amthif cb\n");
+ mask |= POLLIN | POLLRDNORM;
mei_amthif_run_next_cmd(dev);
}
- mutex_unlock(&dev->device_lock);
return mask;
}
@@ -428,71 +404,14 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list)
{
- struct mei_device *dev = cl->dev;
- struct mei_msg_hdr mei_hdr;
- size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
- u32 msg_slots = mei_data2slots(len);
- int slots;
- int rets;
-
- rets = mei_cl_flow_ctrl_creds(cl);
- if (rets < 0)
- return rets;
-
- if (rets == 0) {
- cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
- return 0;
- }
-
- mei_hdr.host_addr = cl->host_client_id;
- mei_hdr.me_addr = cl->me_client_id;
- mei_hdr.reserved = 0;
- mei_hdr.internal = 0;
-
- slots = mei_hbuf_empty_slots(dev);
-
- if (slots >= msg_slots) {
- mei_hdr.length = len;
- mei_hdr.msg_complete = 1;
- /* Split the message only if we can write the whole host buffer */
- } else if (slots == dev->hbuf_depth) {
- msg_slots = slots;
- len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr.length = len;
- mei_hdr.msg_complete = 0;
- } else {
- /* wait for next time the host buffer is empty */
- return 0;
- }
-
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
-
- rets = mei_write_message(dev, &mei_hdr,
- dev->iamthif_msg_buf + dev->iamthif_msg_buf_index);
- if (rets) {
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- cl->status = rets;
- list_del(&cb->list);
- return rets;
- }
-
- if (mei_cl_flow_ctrl_reduce(cl))
- return -EIO;
-
- dev->iamthif_msg_buf_index += mei_hdr.length;
- cl->status = 0;
-
- if (mei_hdr.msg_complete) {
- dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
- dev->iamthif_flow_control_pending = true;
-
- /* save iamthif cb sent to amthif client */
- cb->buf_idx = dev->iamthif_msg_buf_index;
- dev->iamthif_current_cb = cb;
+ int ret;
- list_move_tail(&cb->list, &dev->write_waiting_list.list);
- }
+ ret = mei_cl_irq_write(cl, cb, cmpl_list);
+ if (ret)
+ return ret;
+ if (cb->completed)
+ cb->status = mei_amthif_read_start(cl, cb->file_object);
return 0;
}
@@ -501,83 +420,35 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
* mei_amthif_irq_read_msg - read routine after ISR to
* handle the read amthif message
*
- * @dev: the device structure
+ * @cl: mei client
* @mei_hdr: header of amthif message
- * @complete_list: An instance of our list structure
+ * @cmpl_list: completed callbacks list
*
- * Return: 0 on success, <0 on failure.
+ * Return: -ENODEV if cb is NULL 0 otherwise; error message is in cb->status
*/
-int mei_amthif_irq_read_msg(struct mei_device *dev,
+int mei_amthif_irq_read_msg(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr,
- struct mei_cl_cb *complete_list)
+ struct mei_cl_cb *cmpl_list)
{
- struct mei_cl_cb *cb;
- unsigned char *buffer;
-
- BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
- BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
+ struct mei_device *dev;
+ int ret;
- buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index;
- BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
+ dev = cl->dev;
- mei_read_slots(dev, buffer, mei_hdr->length);
+ if (dev->iamthif_state != MEI_IAMTHIF_READING)
+ return 0;
- dev->iamthif_msg_buf_index += mei_hdr->length;
+ ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
+ if (ret)
+ return ret;
if (!mei_hdr->msg_complete)
return 0;
- dev_dbg(dev->dev, "amthif_message_buffer_index =%d\n",
- mei_hdr->length);
-
dev_dbg(dev->dev, "completed amthif read.\n ");
- if (!dev->iamthif_current_cb)
- return -ENODEV;
-
- cb = dev->iamthif_current_cb;
dev->iamthif_current_cb = NULL;
-
dev->iamthif_stall_timer = 0;
- cb->buf_idx = dev->iamthif_msg_buf_index;
- cb->read_time = jiffies;
- if (dev->iamthif_ioctl) {
- /* found the iamthif cb */
- dev_dbg(dev->dev, "complete the amthif read cb.\n ");
- dev_dbg(dev->dev, "add the amthif read cb to complete.\n ");
- list_add_tail(&cb->list, &complete_list->list);
- }
- return 0;
-}
-
-/**
- * mei_amthif_irq_read - prepares to read amthif data.
- *
- * @dev: the device structure.
- * @slots: free slots.
- *
- * Return: 0, OK; otherwise, error.
- */
-int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
-{
- u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
-
- if (*slots < msg_slots)
- return -EMSGSIZE;
-
- *slots -= msg_slots;
-
- if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
- dev_dbg(dev->dev, "iamthif flow control failed\n");
- return -EIO;
- }
- dev_dbg(dev->dev, "iamthif flow control success\n");
- dev->iamthif_state = MEI_IAMTHIF_READING;
- dev->iamthif_flow_control_pending = false;
- dev->iamthif_msg_buf_index = 0;
- dev->iamthif_msg_buf_size = 0;
- dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
- dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
return 0;
}
@@ -589,17 +460,30 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
*/
void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
{
+
+ if (cb->fop_type == MEI_FOP_WRITE) {
+ if (!cb->status) {
+ dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
+ mei_io_cb_free(cb);
+ return;
+ }
+ /*
+ * in case of error enqueue the write cb to complete read list
+ * so it can be propagated to the reader
+ */
+ list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
+ wake_up_interruptible(&dev->iamthif_cl.wait);
+ return;
+ }
+
if (dev->iamthif_canceled != 1) {
dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
dev->iamthif_stall_timer = 0;
- memcpy(cb->response_buffer.data,
- dev->iamthif_msg_buf,
- dev->iamthif_msg_buf_index);
list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
dev_dbg(dev->dev, "amthif read completed\n");
dev->iamthif_timer = jiffies;
dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
- dev->iamthif_timer);
+ dev->iamthif_timer);
} else {
mei_amthif_run_next_cmd(dev);
}
@@ -624,26 +508,22 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
static bool mei_clear_list(struct mei_device *dev,
const struct file *file, struct list_head *mei_cb_list)
{
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
+ struct mei_cl *cl = &dev->iamthif_cl;
+ struct mei_cl_cb *cb, *next;
bool removed = false;
/* list all list member */
- list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, list) {
+ list_for_each_entry_safe(cb, next, mei_cb_list, list) {
/* check if list member associated with a file */
- if (file == cb_pos->file_object) {
- /* remove member from the list */
- list_del(&cb_pos->list);
+ if (file == cb->file_object) {
/* check if cb equal to current iamthif cb */
- if (dev->iamthif_current_cb == cb_pos) {
+ if (dev->iamthif_current_cb == cb) {
dev->iamthif_current_cb = NULL;
/* send flow control to iamthif client */
- mei_hbm_cl_flow_control_req(dev,
- &dev->iamthif_cl);
+ mei_hbm_cl_flow_control_req(dev, cl);
}
/* free all allocated buffers */
- mei_io_cb_free(cb_pos);
- cb_pos = NULL;
+ mei_io_cb_free(cb);
removed = true;
}
}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index be767f4db26a..4cf38c39878a 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -238,7 +238,7 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
dev = cl->dev;
mutex_lock(&dev->device_lock);
- if (cl->state != MEI_FILE_CONNECTED) {
+ if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
}
@@ -255,17 +255,13 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
goto out;
}
- cb = mei_io_cb_init(cl, NULL);
+ cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
if (!cb) {
rets = -ENOMEM;
goto out;
}
- rets = mei_io_cb_alloc_req_buf(cb, length);
- if (rets < 0)
- goto out;
-
- memcpy(cb->request_buffer.data, buf, length);
+ memcpy(cb->buf.data, buf, length);
rets = mei_cl_write(cl, cb, blocking);
@@ -292,20 +288,21 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
mutex_lock(&dev->device_lock);
- if (!cl->read_cb) {
- rets = mei_cl_read_start(cl, length);
- if (rets < 0)
- goto out;
- }
+ cb = mei_cl_read_cb(cl, NULL);
+ if (cb)
+ goto copy;
- if (cl->reading_state != MEI_READ_COMPLETE &&
- !waitqueue_active(&cl->rx_wait)) {
+ rets = mei_cl_read_start(cl, length, NULL);
+ if (rets && rets != -EBUSY)
+ goto out;
+
+ if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
mutex_unlock(&dev->device_lock);
if (wait_event_interruptible(cl->rx_wait,
- cl->reading_state == MEI_READ_COMPLETE ||
- mei_cl_is_transitioning(cl))) {
+ (!list_empty(&cl->rd_completed)) ||
+ (!mei_cl_is_connected(cl)))) {
if (signal_pending(current))
return -EINTR;
@@ -313,23 +310,31 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
}
mutex_lock(&dev->device_lock);
- }
- cb = cl->read_cb;
+ if (!mei_cl_is_connected(cl)) {
+ rets = -EBUSY;
+ goto out;
+ }
+ }
- if (cl->reading_state != MEI_READ_COMPLETE) {
+ cb = mei_cl_read_cb(cl, NULL);
+ if (!cb) {
rets = 0;
goto out;
}
+copy:
+ if (cb->status) {
+ rets = cb->status;
+ goto free;
+ }
+
r_length = min_t(size_t, length, cb->buf_idx);
- memcpy(buf, cb->response_buffer.data, r_length);
+ memcpy(buf, cb->buf.data, r_length);
rets = r_length;
+free:
mei_io_cb_free(cb);
- cl->reading_state = MEI_IDLE;
- cl->read_cb = NULL;
-
out:
mutex_unlock(&dev->device_lock);
@@ -386,7 +391,7 @@ static void mei_bus_event_work(struct work_struct *work)
device->events = 0;
/* Prepare for the next read */
- mei_cl_read_start(device->cl, 0);
+ mei_cl_read_start(device->cl, 0, NULL);
}
int mei_cl_register_event_cb(struct mei_cl_device *device,
@@ -400,7 +405,7 @@ int mei_cl_register_event_cb(struct mei_cl_device *device,
device->event_context = context;
INIT_WORK(&device->event_work, mei_bus_event_work);
- mei_cl_read_start(device->cl, 0);
+ mei_cl_read_start(device->cl, 0, NULL);
return 0;
}
@@ -441,8 +446,8 @@ int mei_cl_enable_device(struct mei_cl_device *device)
mutex_unlock(&dev->device_lock);
- if (device->event_cb && !cl->read_cb)
- mei_cl_read_start(device->cl, 0);
+ if (device->event_cb)
+ mei_cl_read_start(device->cl, 0, NULL);
if (!device->ops || !device->ops->enable)
return 0;
@@ -462,54 +467,34 @@ int mei_cl_disable_device(struct mei_cl_device *device)
dev = cl->dev;
+ if (device->ops && device->ops->disable)
+ device->ops->disable(device);
+
+ device->event_cb = NULL;
+
mutex_lock(&dev->device_lock);
- if (cl->state != MEI_FILE_CONNECTED) {
- mutex_unlock(&dev->device_lock);
+ if (!mei_cl_is_connected(cl)) {
dev_err(dev->dev, "Already disconnected");
-
- return 0;
+ err = 0;
+ goto out;
}
cl->state = MEI_FILE_DISCONNECTING;
err = mei_cl_disconnect(cl);
if (err < 0) {
- mutex_unlock(&dev->device_lock);
- dev_err(dev->dev,
- "Could not disconnect from the ME client");
-
- return err;
+ dev_err(dev->dev, "Could not disconnect from the ME client");
+ goto out;
}
/* Flush queues and remove any pending read */
- mei_cl_flush_queues(cl);
-
- if (cl->read_cb) {
- struct mei_cl_cb *cb = NULL;
-
- cb = mei_cl_find_read_cb(cl);
- /* Remove entry from read list */
- if (cb)
- list_del(&cb->list);
-
- cb = cl->read_cb;
- cl->read_cb = NULL;
-
- if (cb) {
- mei_io_cb_free(cb);
- cb = NULL;
- }
- }
-
- device->event_cb = NULL;
+ mei_cl_flush_queues(cl, NULL);
+out:
mutex_unlock(&dev->device_lock);
+ return err;
- if (!device->ops || !device->ops->disable)
- return 0;
-
- return device->ops->disable(device);
}
EXPORT_SYMBOL_GPL(mei_cl_disable_device);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index dfbddfe1c7a0..1e99ef6a54a2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -48,14 +48,14 @@ void mei_me_cl_init(struct mei_me_client *me_cl)
*/
struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
{
- if (me_cl)
- kref_get(&me_cl->refcnt);
+ if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
+ return me_cl;
- return me_cl;
+ return NULL;
}
/**
- * mei_me_cl_release - unlink and free me client
+ * mei_me_cl_release - free me client
*
* Locking: called under "dev->device_lock" lock
*
@@ -65,9 +65,10 @@ static void mei_me_cl_release(struct kref *ref)
{
struct mei_me_client *me_cl =
container_of(ref, struct mei_me_client, refcnt);
- list_del(&me_cl->list);
+
kfree(me_cl);
}
+
/**
* mei_me_cl_put - decrease me client refcount and free client if necessary
*
@@ -82,51 +83,146 @@ void mei_me_cl_put(struct mei_me_client *me_cl)
}
/**
- * mei_me_cl_by_uuid - locate me client by uuid
+ * __mei_me_cl_del - delete me client form the list and decrease
+ * reference counter
+ *
+ * @dev: mei device
+ * @me_cl: me client
+ *
+ * Locking: dev->me_clients_rwsem
+ */
+static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
+{
+ if (!me_cl)
+ return;
+
+ list_del(&me_cl->list);
+ mei_me_cl_put(me_cl);
+}
+
+/**
+ * mei_me_cl_add - add me client to the list
+ *
+ * @dev: mei device
+ * @me_cl: me client
+ */
+void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
+{
+ down_write(&dev->me_clients_rwsem);
+ list_add(&me_cl->list, &dev->me_clients);
+ up_write(&dev->me_clients_rwsem);
+}
+
+/**
+ * __mei_me_cl_by_uuid - locate me client by uuid
* increases ref count
*
* @dev: mei device
* @uuid: me client uuid
*
- * Locking: called under "dev->device_lock" lock
- *
* Return: me client or NULL if not found
+ *
+ * Locking: dev->me_clients_rwsem
*/
-struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
+static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
const uuid_le *uuid)
{
struct mei_me_client *me_cl;
+ const uuid_le *pn;
- list_for_each_entry(me_cl, &dev->me_clients, list)
- if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
+ WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
+
+ list_for_each_entry(me_cl, &dev->me_clients, list) {
+ pn = &me_cl->props.protocol_name;
+ if (uuid_le_cmp(*uuid, *pn) == 0)
return mei_me_cl_get(me_cl);
+ }
return NULL;
}
/**
+ * mei_me_cl_by_uuid - locate me client by uuid
+ * increases ref count
+ *
+ * @dev: mei device
+ * @uuid: me client uuid
+ *
+ * Return: me client or NULL if not found
+ *
+ * Locking: dev->me_clients_rwsem
+ */
+struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
+ const uuid_le *uuid)
+{
+ struct mei_me_client *me_cl;
+
+ down_read(&dev->me_clients_rwsem);
+ me_cl = __mei_me_cl_by_uuid(dev, uuid);
+ up_read(&dev->me_clients_rwsem);
+
+ return me_cl;
+}
+
+/**
* mei_me_cl_by_id - locate me client by client id
* increases ref count
*
* @dev: the device structure
* @client_id: me client id
*
- * Locking: called under "dev->device_lock" lock
- *
* Return: me client or NULL if not found
+ *
+ * Locking: dev->me_clients_rwsem
*/
struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
+ struct mei_me_client *__me_cl, *me_cl = NULL;
+
+ down_read(&dev->me_clients_rwsem);
+ list_for_each_entry(__me_cl, &dev->me_clients, list) {
+ if (__me_cl->client_id == client_id) {
+ me_cl = mei_me_cl_get(__me_cl);
+ break;
+ }
+ }
+ up_read(&dev->me_clients_rwsem);
+
+ return me_cl;
+}
+
+/**
+ * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
+ * increases ref count
+ *
+ * @dev: the device structure
+ * @uuid: me client uuid
+ * @client_id: me client id
+ *
+ * Return: me client or null if not found
+ *
+ * Locking: dev->me_clients_rwsem
+ */
+static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
+ const uuid_le *uuid, u8 client_id)
+{
struct mei_me_client *me_cl;
+ const uuid_le *pn;
+
+ WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
- list_for_each_entry(me_cl, &dev->me_clients, list)
- if (me_cl->client_id == client_id)
+ list_for_each_entry(me_cl, &dev->me_clients, list) {
+ pn = &me_cl->props.protocol_name;
+ if (uuid_le_cmp(*uuid, *pn) == 0 &&
+ me_cl->client_id == client_id)
return mei_me_cl_get(me_cl);
+ }
return NULL;
}
+
/**
* mei_me_cl_by_uuid_id - locate me client by client id and uuid
* increases ref count
@@ -135,21 +231,18 @@ struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
* @uuid: me client uuid
* @client_id: me client id
*
- * Locking: called under "dev->device_lock" lock
- *
- * Return: me client or NULL if not found
+ * Return: me client or null if not found
*/
struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 client_id)
{
struct mei_me_client *me_cl;
- list_for_each_entry(me_cl, &dev->me_clients, list)
- if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
- me_cl->client_id == client_id)
- return mei_me_cl_get(me_cl);
+ down_read(&dev->me_clients_rwsem);
+ me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
+ up_read(&dev->me_clients_rwsem);
- return NULL;
+ return me_cl;
}
/**
@@ -162,12 +255,14 @@ struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
*/
void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
{
- struct mei_me_client *me_cl, *next;
+ struct mei_me_client *me_cl;
dev_dbg(dev->dev, "remove %pUl\n", uuid);
- list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
- if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
- mei_me_cl_put(me_cl);
+
+ down_write(&dev->me_clients_rwsem);
+ me_cl = __mei_me_cl_by_uuid(dev, uuid);
+ __mei_me_cl_del(dev, me_cl);
+ up_write(&dev->me_clients_rwsem);
}
/**
@@ -181,15 +276,14 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
*/
void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
{
- struct mei_me_client *me_cl, *next;
- const uuid_le *pn;
+ struct mei_me_client *me_cl;
dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
- list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) {
- pn = &me_cl->props.protocol_name;
- if (me_cl->client_id == id && uuid_le_cmp(*uuid, *pn) == 0)
- mei_me_cl_put(me_cl);
- }
+
+ down_write(&dev->me_clients_rwsem);
+ me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
+ __mei_me_cl_del(dev, me_cl);
+ up_write(&dev->me_clients_rwsem);
}
/**
@@ -203,12 +297,12 @@ void mei_me_cl_rm_all(struct mei_device *dev)
{
struct mei_me_client *me_cl, *next;
+ down_write(&dev->me_clients_rwsem);
list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
- mei_me_cl_put(me_cl);
+ __mei_me_cl_del(dev, me_cl);
+ up_write(&dev->me_clients_rwsem);
}
-
-
/**
* mei_cl_cmp_id - tells if the clients are the same
*
@@ -227,7 +321,48 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
}
/**
- * mei_io_list_flush - removes cbs belonging to cl.
+ * mei_io_cb_free - free mei_cb_private related memory
+ *
+ * @cb: mei callback struct
+ */
+void mei_io_cb_free(struct mei_cl_cb *cb)
+{
+ if (cb == NULL)
+ return;
+
+ list_del(&cb->list);
+ kfree(cb->buf.data);
+ kfree(cb);
+}
+
+/**
+ * mei_io_cb_init - allocate and initialize io callback
+ *
+ * @cl: mei client
+ * @type: operation type
+ * @fp: pointer to file structure
+ *
+ * Return: mei_cl_cb pointer or NULL;
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
+ struct file *fp)
+{
+ struct mei_cl_cb *cb;
+
+ cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ if (!cb)
+ return NULL;
+
+ INIT_LIST_HEAD(&cb->list);
+ cb->file_object = fp;
+ cb->cl = cl;
+ cb->buf_idx = 0;
+ cb->fop_type = type;
+ return cb;
+}
+
+/**
+ * __mei_io_list_flush - removes and frees cbs belonging to cl.
*
* @list: an instance of our list structure
* @cl: host client, can be NULL for flushing the whole list
@@ -236,13 +371,12 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
static void __mei_io_list_flush(struct mei_cl_cb *list,
struct mei_cl *cl, bool free)
{
- struct mei_cl_cb *cb;
- struct mei_cl_cb *next;
+ struct mei_cl_cb *cb, *next;
/* enable removing everything if no cl is specified */
list_for_each_entry_safe(cb, next, &list->list, list) {
if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
- list_del(&cb->list);
+ list_del_init(&cb->list);
if (free)
mei_io_cb_free(cb);
}
@@ -260,7 +394,6 @@ void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
__mei_io_list_flush(list, cl, false);
}
-
/**
* mei_io_list_free - removes cb belonging to cl and free them
*
@@ -273,103 +406,107 @@ static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
}
/**
- * mei_io_cb_free - free mei_cb_private related memory
+ * mei_io_cb_alloc_buf - allocate callback buffer
*
- * @cb: mei callback struct
+ * @cb: io callback structure
+ * @length: size of the buffer
+ *
+ * Return: 0 on success
+ * -EINVAL if cb is NULL
+ * -ENOMEM if allocation failed
*/
-void mei_io_cb_free(struct mei_cl_cb *cb)
+int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
{
- if (cb == NULL)
- return;
+ if (!cb)
+ return -EINVAL;
- kfree(cb->request_buffer.data);
- kfree(cb->response_buffer.data);
- kfree(cb);
+ if (length == 0)
+ return 0;
+
+ cb->buf.data = kmalloc(length, GFP_KERNEL);
+ if (!cb->buf.data)
+ return -ENOMEM;
+ cb->buf.size = length;
+ return 0;
}
/**
- * mei_io_cb_init - allocate and initialize io callback
+ * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
*
- * @cl: mei client
- * @fp: pointer to file structure
+ * @cl: host client
+ * @length: size of the buffer
+ * @type: operation type
+ * @fp: associated file pointer (might be NULL)
*
- * Return: mei_cl_cb pointer or NULL;
+ * Return: cb on success and NULL on failure
*/
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
+struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
+ enum mei_cb_file_ops type, struct file *fp)
{
struct mei_cl_cb *cb;
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ cb = mei_io_cb_init(cl, type, fp);
if (!cb)
return NULL;
- mei_io_list_init(cb);
+ if (mei_io_cb_alloc_buf(cb, length)) {
+ mei_io_cb_free(cb);
+ return NULL;
+ }
- cb->file_object = fp;
- cb->cl = cl;
- cb->buf_idx = 0;
return cb;
}
/**
- * mei_io_cb_alloc_req_buf - allocate request buffer
+ * mei_cl_read_cb - find this cl's callback in the read list
+ * for a specific file
*
- * @cb: io callback structure
- * @length: size of the buffer
+ * @cl: host client
+ * @fp: file pointer (matching cb file object), may be NULL
*
- * Return: 0 on success
- * -EINVAL if cb is NULL
- * -ENOMEM if allocation failed
+ * Return: cb on success, NULL if cb is not found
*/
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
+struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
{
- if (!cb)
- return -EINVAL;
+ struct mei_cl_cb *cb;
- if (length == 0)
- return 0;
+ list_for_each_entry(cb, &cl->rd_completed, list)
+ if (!fp || fp == cb->file_object)
+ return cb;
- cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
- if (!cb->request_buffer.data)
- return -ENOMEM;
- cb->request_buffer.size = length;
- return 0;
+ return NULL;
}
+
/**
- * mei_io_cb_alloc_resp_buf - allocate response buffer
- *
- * @cb: io callback structure
- * @length: size of the buffer
+ * mei_cl_read_cb_flush - free client's read pending and completed cbs
+ * for a specific file
*
- * Return: 0 on success
- * -EINVAL if cb is NULL
- * -ENOMEM if allocation failed
+ * @cl: host client
+ * @fp: file pointer (matching cb file object), may be NULL
*/
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
+void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
{
- if (!cb)
- return -EINVAL;
+ struct mei_cl_cb *cb, *next;
- if (length == 0)
- return 0;
-
- cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
- if (!cb->response_buffer.data)
- return -ENOMEM;
- cb->response_buffer.size = length;
- return 0;
-}
+ list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
+ if (!fp || fp == cb->file_object)
+ mei_io_cb_free(cb);
+ list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
+ if (!fp || fp == cb->file_object)
+ mei_io_cb_free(cb);
+}
/**
* mei_cl_flush_queues - flushes queue lists belonging to cl.
*
* @cl: host client
+ * @fp: file pointer (matching cb file object), may be NULL
*
* Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
*/
-int mei_cl_flush_queues(struct mei_cl *cl)
+int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
{
struct mei_device *dev;
@@ -379,13 +516,15 @@ int mei_cl_flush_queues(struct mei_cl *cl)
dev = cl->dev;
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
- mei_io_list_flush(&cl->dev->read_list, cl);
mei_io_list_free(&cl->dev->write_list, cl);
mei_io_list_free(&cl->dev->write_waiting_list, cl);
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
+
+ mei_cl_read_cb_flush(cl, fp);
+
return 0;
}
@@ -402,9 +541,10 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
init_waitqueue_head(&cl->wait);
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
+ INIT_LIST_HEAD(&cl->rd_completed);
+ INIT_LIST_HEAD(&cl->rd_pending);
INIT_LIST_HEAD(&cl->link);
INIT_LIST_HEAD(&cl->device_link);
- cl->reading_state = MEI_IDLE;
cl->writing_state = MEI_IDLE;
cl->dev = dev;
}
@@ -429,31 +569,14 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
}
/**
- * mei_cl_find_read_cb - find this cl's callback in the read list
+ * mei_cl_link - allocate host id in the host map
*
* @cl: host client
- *
- * Return: cb on success, NULL on error
- */
-struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
-{
- struct mei_device *dev = cl->dev;
- struct mei_cl_cb *cb;
-
- list_for_each_entry(cb, &dev->read_list.list, list)
- if (mei_cl_cmp_id(cl, cb->cl))
- return cb;
- return NULL;
-}
-
-/** mei_cl_link: allocate host id in the host map
- *
- * @cl - host client
- * @id - fixed host id or -1 for generic one
+ * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
*
* Return: 0 on success
* -EINVAL on incorrect values
- * -ENONET if client not found
+ * -EMFILE if open count exceeded.
*/
int mei_cl_link(struct mei_cl *cl, int id)
{
@@ -535,28 +658,31 @@ int mei_cl_unlink(struct mei_cl *cl)
void mei_host_client_init(struct work_struct *work)
{
- struct mei_device *dev = container_of(work,
- struct mei_device, init_work);
+ struct mei_device *dev =
+ container_of(work, struct mei_device, init_work);
struct mei_me_client *me_cl;
- struct mei_client_properties *props;
mutex_lock(&dev->device_lock);
- list_for_each_entry(me_cl, &dev->me_clients, list) {
- props = &me_cl->props;
- if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid))
- mei_amthif_host_init(dev);
- else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid))
- mei_wd_host_init(dev);
- else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid))
- mei_nfc_host_init(dev);
+ me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
+ if (me_cl)
+ mei_amthif_host_init(dev);
+ mei_me_cl_put(me_cl);
+
+ me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
+ if (me_cl)
+ mei_wd_host_init(dev);
+ mei_me_cl_put(me_cl);
+
+ me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
+ if (me_cl)
+ mei_nfc_host_init(dev);
+ mei_me_cl_put(me_cl);
- }
dev->dev_state = MEI_DEV_ENABLED;
dev->reset_count = 0;
-
mutex_unlock(&dev->device_lock);
pm_runtime_mark_last_busy(dev->dev);
@@ -620,13 +746,10 @@ int mei_cl_disconnect(struct mei_cl *cl)
return rets;
}
- cb = mei_io_cb_init(cl, NULL);
- if (!cb) {
- rets = -ENOMEM;
+ cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
+ rets = cb ? 0 : -ENOMEM;
+ if (rets)
goto free;
- }
-
- cb->fop_type = MEI_FOP_DISCONNECT;
if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_disconnect_req(dev, cl)) {
@@ -727,13 +850,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
return rets;
}
- cb = mei_io_cb_init(cl, file);
- if (!cb) {
- rets = -ENOMEM;
+ cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file);
+ rets = cb ? 0 : -ENOMEM;
+ if (rets)
goto out;
- }
-
- cb->fop_type = MEI_FOP_CONNECT;
/* run hbuf acquire last so we don't have to undo */
if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
@@ -756,7 +876,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
- if (cl->state != MEI_FILE_CONNECTED) {
+ if (!mei_cl_is_connected(cl)) {
cl->state = MEI_FILE_DISCONNECTED;
/* something went really wrong */
if (!cl->status)
@@ -778,6 +898,37 @@ out:
}
/**
+ * mei_cl_alloc_linked - allocate and link host client
+ *
+ * @dev: the device structure
+ * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
+ *
+ * Return: cl on success ERR_PTR on failure
+ */
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
+{
+ struct mei_cl *cl;
+ int ret;
+
+ cl = mei_cl_allocate(dev);
+ if (!cl) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = mei_cl_link(cl, id);
+ if (ret)
+ goto err;
+
+ return cl;
+err:
+ kfree(cl);
+ return ERR_PTR(ret);
+}
+
+
+
+/**
* mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
*
* @cl: private data of the file object
@@ -866,10 +1017,11 @@ out:
*
* @cl: host client
* @length: number of bytes to read
+ * @fp: pointer to file structure
*
* Return: 0 on success, <0 on failure.
*/
-int mei_cl_read_start(struct mei_cl *cl, size_t length)
+int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@@ -884,10 +1036,10 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
if (!mei_cl_is_connected(cl))
return -ENODEV;
- if (cl->read_cb) {
- cl_dbg(dev, cl, "read is pending.\n");
+ /* HW currently supports only one pending read */
+ if (!list_empty(&cl->rd_pending))
return -EBUSY;
- }
+
me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
if (!me_cl) {
cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
@@ -904,29 +1056,21 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
return rets;
}
- cb = mei_io_cb_init(cl, NULL);
- if (!cb) {
- rets = -ENOMEM;
- goto out;
- }
-
- rets = mei_io_cb_alloc_resp_buf(cb, length);
+ cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
+ rets = cb ? 0 : -ENOMEM;
if (rets)
goto out;
- cb->fop_type = MEI_FOP_READ;
if (mei_hbuf_acquire(dev)) {
rets = mei_hbm_cl_flow_control_req(dev, cl);
if (rets < 0)
goto out;
- list_add_tail(&cb->list, &dev->read_list.list);
+ list_add_tail(&cb->list, &cl->rd_pending);
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
- cl->read_cb = cb;
-
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
@@ -964,7 +1108,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
dev = cl->dev;
- buf = &cb->request_buffer;
+ buf = &cb->buf;
rets = mei_cl_flow_ctrl_creds(cl);
if (rets < 0)
@@ -999,7 +1143,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
}
cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
- cb->request_buffer.size, cb->buf_idx);
+ cb->buf.size, cb->buf_idx);
rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
if (rets) {
@@ -1011,6 +1155,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cl->status = 0;
cl->writing_state = MEI_WRITING;
cb->buf_idx += mei_hdr.length;
+ cb->completed = mei_hdr.msg_complete == 1;
if (mei_hdr.msg_complete) {
if (mei_cl_flow_ctrl_reduce(cl))
@@ -1048,7 +1193,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
dev = cl->dev;
- buf = &cb->request_buffer;
+ buf = &cb->buf;
cl_dbg(dev, cl, "size=%d\n", buf->size);
@@ -1059,7 +1204,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
return rets;
}
- cb->fop_type = MEI_FOP_WRITE;
cb->buf_idx = 0;
cl->writing_state = MEI_IDLE;
@@ -1099,6 +1243,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length;
+ cb->completed = mei_hdr.msg_complete == 1;
out:
if (mei_hdr.msg_complete) {
@@ -1151,11 +1296,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
if (waitqueue_active(&cl->tx_wait))
wake_up_interruptible(&cl->tx_wait);
- } else if (cb->fop_type == MEI_FOP_READ &&
- MEI_READING == cl->reading_state) {
- cl->reading_state = MEI_READ_COMPLETE;
+ } else if (cb->fop_type == MEI_FOP_READ) {
+ list_add_tail(&cb->list, &cl->rd_completed);
if (waitqueue_active(&cl->rx_wait))
- wake_up_interruptible(&cl->rx_wait);
+ wake_up_interruptible_all(&cl->rx_wait);
else
mei_cl_bus_rx_event(cl);
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index cfcde8e97fc4..0a39e5d45171 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -31,7 +31,10 @@ void mei_me_cl_init(struct mei_me_client *me_cl);
void mei_me_cl_put(struct mei_me_client *me_cl);
struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl);
-struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
+void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl);
+void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl);
+
+struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
const uuid_le *uuid);
struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
@@ -44,10 +47,10 @@ void mei_me_cl_rm_all(struct mei_device *dev);
/*
* MEI IO Functions
*/
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
+ struct file *fp);
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
+int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length);
/**
@@ -72,9 +75,14 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
int mei_cl_link(struct mei_cl *cl, int id);
int mei_cl_unlink(struct mei_cl *cl);
-int mei_cl_flush_queues(struct mei_cl *cl);
-struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id);
+struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
+ const struct file *fp);
+void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
+struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
+ enum mei_cb_file_ops type, struct file *fp);
+int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
@@ -82,23 +90,25 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
/*
* MEI input output function prototype
*/
+
+/**
+ * mei_cl_is_connected - host client is connected
+ *
+ * @cl: host clinet
+ *
+ * Return: true if the host clinet is connected
+ */
static inline bool mei_cl_is_connected(struct mei_cl *cl)
{
- return cl->dev &&
- cl->dev->dev_state == MEI_DEV_ENABLED &&
- cl->state == MEI_FILE_CONNECTED;
-}
-static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
-{
- return MEI_FILE_INITIALIZING == cl->state ||
- MEI_FILE_DISCONNECTED == cl->state ||
- MEI_FILE_DISCONNECTING == cl->state;
+ return cl->state == MEI_FILE_CONNECTED;
}
bool mei_cl_is_other_connecting(struct mei_cl *cl);
int mei_cl_disconnect(struct mei_cl *cl);
int mei_cl_connect(struct mei_cl *cl, struct file *file);
-int mei_cl_read_start(struct mei_cl *cl, size_t length);
+int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
+int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
+ struct mei_cl_cb *cmpl_list);
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index b125380ee871..d9cd7e6ee484 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -28,7 +28,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct mei_device *dev = fp->private_data;
- struct mei_me_client *me_cl, *n;
+ struct mei_me_client *me_cl;
size_t bufsz = 1;
char *buf;
int i = 0;
@@ -38,15 +38,14 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
#define HDR \
" |id|fix| UUID |con|msg len|sb|refc|\n"
- mutex_lock(&dev->device_lock);
-
+ down_read(&dev->me_clients_rwsem);
list_for_each_entry(me_cl, &dev->me_clients, list)
bufsz++;
bufsz *= sizeof(HDR) + 1;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
- mutex_unlock(&dev->device_lock);
+ up_read(&dev->me_clients_rwsem);
return -ENOMEM;
}
@@ -56,10 +55,9 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
if (dev->dev_state != MEI_DEV_ENABLED)
goto out;
- list_for_each_entry_safe(me_cl, n, &dev->me_clients, list) {
+ list_for_each_entry(me_cl, &dev->me_clients, list) {
- me_cl = mei_me_cl_get(me_cl);
- if (me_cl) {
+ if (mei_me_cl_get(me_cl)) {
pos += scnprintf(buf + pos, bufsz - pos,
"%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n",
i++, me_cl->client_id,
@@ -69,12 +67,13 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
me_cl->props.max_msg_length,
me_cl->props.single_recv_buf,
atomic_read(&me_cl->refcnt.refcount));
- }
- mei_me_cl_put(me_cl);
+ mei_me_cl_put(me_cl);
+ }
}
+
out:
- mutex_unlock(&dev->device_lock);
+ up_read(&dev->me_clients_rwsem);
ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
kfree(buf);
return ret;
@@ -118,7 +117,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
pos += scnprintf(buf + pos, bufsz - pos,
"%2d|%2d|%4d|%5d|%2d|%2d|\n",
i, cl->me_client_id, cl->host_client_id, cl->state,
- cl->reading_state, cl->writing_state);
+ !list_empty(&cl->rd_completed), cl->writing_state);
i++;
}
out:
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index c8412d41e4f1..58da92565c5e 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -338,7 +338,8 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
me_cl->client_id = res->me_addr;
me_cl->mei_flow_ctrl_creds = 0;
- list_add(&me_cl->list, &dev->me_clients);
+ mei_me_cl_add(dev, me_cl);
+
return 0;
}
@@ -638,7 +639,7 @@ static void mei_hbm_cl_res(struct mei_device *dev,
continue;
if (mei_hbm_cl_addr_equal(cl, rs)) {
- list_del(&cb->list);
+ list_del_init(&cb->list);
break;
}
}
@@ -683,10 +684,9 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
cl->state = MEI_FILE_DISCONNECTED;
cl->timer_count = 0;
- cb = mei_io_cb_init(cl, NULL);
+ cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
if (!cb)
return -ENOMEM;
- cb->fop_type = MEI_FOP_DISCONNECT_RSP;
cl_dbg(dev, cl, "add disconnect response as first\n");
list_add(&cb->list, &dev->ctrl_wr_list.list);
}
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index f8fd503dfbd6..6fb75e62a764 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -25,6 +25,8 @@
#include "hw-me.h"
#include "hw-me-regs.h"
+#include "mei-trace.h"
+
/**
* mei_me_reg_read - Reads 32bit data from the mei device
*
@@ -61,45 +63,79 @@ static inline void mei_me_reg_write(const struct mei_me_hw *hw,
*
* Return: ME_CB_RW register value (u32)
*/
-static u32 mei_me_mecbrw_read(const struct mei_device *dev)
+static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
{
return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
}
+
+/**
+ * mei_me_hcbww_write - write 32bit data to the host circular buffer
+ *
+ * @dev: the device structure
+ * @data: 32bit data to be written to the host circular buffer
+ */
+static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
+{
+ mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
+}
+
/**
* mei_me_mecsr_read - Reads 32bit data from the ME CSR
*
- * @hw: the me hardware structure
+ * @dev: the device structure
*
* Return: ME_CSR_HA register value (u32)
*/
-static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
+static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
{
- return mei_me_reg_read(hw, ME_CSR_HA);
+ u32 reg;
+
+ reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
+ trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
+
+ return reg;
}
/**
* mei_hcsr_read - Reads 32bit data from the host CSR
*
- * @hw: the me hardware structure
+ * @dev: the device structure
*
* Return: H_CSR register value (u32)
*/
-static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
+static inline u32 mei_hcsr_read(const struct mei_device *dev)
+{
+ u32 reg;
+
+ reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
+ trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
+
+ return reg;
+}
+
+/**
+ * mei_hcsr_write - writes H_CSR register to the mei device
+ *
+ * @dev: the device structure
+ * @reg: new register value
+ */
+static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
{
- return mei_me_reg_read(hw, H_CSR);
+ trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
+ mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
}
/**
* mei_hcsr_set - writes H_CSR register to the mei device,
* and ignores the H_IS bit for it is write-one-to-zero.
*
- * @hw: the me hardware structure
- * @hcsr: new register value
+ * @dev: the device structure
+ * @reg: new register value
*/
-static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
+static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
{
- hcsr &= ~H_IS;
- mei_me_reg_write(hw, H_CSR, hcsr);
+ reg &= ~H_IS;
+ mei_hcsr_write(dev, reg);
}
/**
@@ -141,7 +177,7 @@ static int mei_me_fw_status(struct mei_device *dev,
static void mei_me_hw_config(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- u32 hcsr = mei_hcsr_read(to_me_hw(dev));
+ u32 hcsr = mei_hcsr_read(dev);
/* Doesn't change in runtime */
dev->hbuf_depth = (hcsr & H_CBD) >> 24;
@@ -170,11 +206,10 @@ static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
*/
static void mei_me_intr_clear(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 hcsr = mei_hcsr_read(hw);
+ u32 hcsr = mei_hcsr_read(dev);
if ((hcsr & H_IS) == H_IS)
- mei_me_reg_write(hw, H_CSR, hcsr);
+ mei_hcsr_write(dev, hcsr);
}
/**
* mei_me_intr_enable - enables mei device interrupts
@@ -183,11 +218,10 @@ static void mei_me_intr_clear(struct mei_device *dev)
*/
static void mei_me_intr_enable(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 hcsr = mei_hcsr_read(hw);
+ u32 hcsr = mei_hcsr_read(dev);
hcsr |= H_IE;
- mei_hcsr_set(hw, hcsr);
+ mei_hcsr_set(dev, hcsr);
}
/**
@@ -197,11 +231,10 @@ static void mei_me_intr_enable(struct mei_device *dev)
*/
static void mei_me_intr_disable(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 hcsr = mei_hcsr_read(hw);
+ u32 hcsr = mei_hcsr_read(dev);
hcsr &= ~H_IE;
- mei_hcsr_set(hw, hcsr);
+ mei_hcsr_set(dev, hcsr);
}
/**
@@ -211,12 +244,11 @@ static void mei_me_intr_disable(struct mei_device *dev)
*/
static void mei_me_hw_reset_release(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 hcsr = mei_hcsr_read(hw);
+ u32 hcsr = mei_hcsr_read(dev);
hcsr |= H_IG;
hcsr &= ~H_RST;
- mei_hcsr_set(hw, hcsr);
+ mei_hcsr_set(dev, hcsr);
/* complete this write before we set host ready on another CPU */
mmiowb();
@@ -231,8 +263,7 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
*/
static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
{
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 hcsr = mei_hcsr_read(hw);
+ u32 hcsr = mei_hcsr_read(dev);
/* H_RST may be found lit before reset is started,
* for example if preceding reset flow hasn't completed.
@@ -242,8 +273,8 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
if ((hcsr & H_RST) == H_RST) {
dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
hcsr &= ~H_RST;
- mei_hcsr_set(hw, hcsr);
- hcsr = mei_hcsr_read(hw);
+ mei_hcsr_set(dev, hcsr);
+ hcsr = mei_hcsr_read(dev);
}
hcsr |= H_RST | H_IG | H_IS;
@@ -254,13 +285,13 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
hcsr &= ~H_IE;
dev->recvd_hw_ready = false;
- mei_me_reg_write(hw, H_CSR, hcsr);
+ mei_hcsr_write(dev, hcsr);
/*
* Host reads the H_CSR once to ensure that the
* posted write to H_CSR completes.
*/
- hcsr = mei_hcsr_read(hw);
+ hcsr = mei_hcsr_read(dev);
if ((hcsr & H_RST) == 0)
dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
@@ -281,11 +312,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
*/
static void mei_me_host_set_ready(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 hcsr = mei_hcsr_read(hw);
+ u32 hcsr = mei_hcsr_read(dev);
hcsr |= H_IE | H_IG | H_RDY;
- mei_hcsr_set(hw, hcsr);
+ mei_hcsr_set(dev, hcsr);
}
/**
@@ -296,8 +326,7 @@ static void mei_me_host_set_ready(struct mei_device *dev)
*/
static bool mei_me_host_is_ready(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 hcsr = mei_hcsr_read(hw);
+ u32 hcsr = mei_hcsr_read(dev);
return (hcsr & H_RDY) == H_RDY;
}
@@ -310,8 +339,7 @@ static bool mei_me_host_is_ready(struct mei_device *dev)
*/
static bool mei_me_hw_is_ready(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 mecsr = mei_me_mecsr_read(hw);
+ u32 mecsr = mei_me_mecsr_read(dev);
return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
}
@@ -368,11 +396,10 @@ static int mei_me_hw_start(struct mei_device *dev)
*/
static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr;
char read_ptr, write_ptr;
- hcsr = mei_hcsr_read(hw);
+ hcsr = mei_hcsr_read(dev);
read_ptr = (char) ((hcsr & H_CBRP) >> 8);
write_ptr = (char) ((hcsr & H_CBWP) >> 16);
@@ -439,7 +466,6 @@ static int mei_me_write_message(struct mei_device *dev,
struct mei_msg_hdr *header,
unsigned char *buf)
{
- struct mei_me_hw *hw = to_me_hw(dev);
unsigned long rem;
unsigned long length = header->length;
u32 *reg_buf = (u32 *)buf;
@@ -457,21 +483,21 @@ static int mei_me_write_message(struct mei_device *dev,
if (empty_slots < 0 || dw_cnt > empty_slots)
return -EMSGSIZE;
- mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
+ mei_me_hcbww_write(dev, *((u32 *) header));
for (i = 0; i < length / 4; i++)
- mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
+ mei_me_hcbww_write(dev, reg_buf[i]);
rem = length & 0x3;
if (rem > 0) {
u32 reg = 0;
memcpy(&reg, &buf[length - rem], rem);
- mei_me_reg_write(hw, H_CB_WW, reg);
+ mei_me_hcbww_write(dev, reg);
}
- hcsr = mei_hcsr_read(hw) | H_IG;
- mei_hcsr_set(hw, hcsr);
+ hcsr = mei_hcsr_read(dev) | H_IG;
+ mei_hcsr_set(dev, hcsr);
if (!mei_me_hw_is_ready(dev))
return -EIO;
@@ -487,12 +513,11 @@ static int mei_me_write_message(struct mei_device *dev,
*/
static int mei_me_count_full_read_slots(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
u32 me_csr;
char read_ptr, write_ptr;
unsigned char buffer_depth, filled_slots;
- me_csr = mei_me_mecsr_read(hw);
+ me_csr = mei_me_mecsr_read(dev);
buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
@@ -518,7 +543,6 @@ static int mei_me_count_full_read_slots(struct mei_device *dev)
static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
unsigned long buffer_length)
{
- struct mei_me_hw *hw = to_me_hw(dev);
u32 *reg_buf = (u32 *)buffer;
u32 hcsr;
@@ -531,49 +555,59 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
memcpy(reg_buf, &reg, buffer_length);
}
- hcsr = mei_hcsr_read(hw) | H_IG;
- mei_hcsr_set(hw, hcsr);
+ hcsr = mei_hcsr_read(dev) | H_IG;
+ mei_hcsr_set(dev, hcsr);
return 0;
}
/**
- * mei_me_pg_enter - write pg enter register
+ * mei_me_pg_set - write pg enter register
*
* @dev: the device structure
*/
-static void mei_me_pg_enter(struct mei_device *dev)
+static void mei_me_pg_set(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
+ u32 reg;
+
+ reg = mei_me_reg_read(hw, H_HPG_CSR);
+ trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
reg |= H_HPG_CSR_PGI;
+
+ trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
mei_me_reg_write(hw, H_HPG_CSR, reg);
}
/**
- * mei_me_pg_exit - write pg exit register
+ * mei_me_pg_unset - write pg exit register
*
* @dev: the device structure
*/
-static void mei_me_pg_exit(struct mei_device *dev)
+static void mei_me_pg_unset(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
+ u32 reg;
+
+ reg = mei_me_reg_read(hw, H_HPG_CSR);
+ trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
reg |= H_HPG_CSR_PGIHEXR;
+
+ trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
mei_me_reg_write(hw, H_HPG_CSR, reg);
}
/**
- * mei_me_pg_set_sync - perform pg entry procedure
+ * mei_me_pg_enter_sync - perform pg entry procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
-int mei_me_pg_set_sync(struct mei_device *dev)
+int mei_me_pg_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -591,7 +625,7 @@ int mei_me_pg_set_sync(struct mei_device *dev)
mutex_lock(&dev->device_lock);
if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
- mei_me_pg_enter(dev);
+ mei_me_pg_set(dev);
ret = 0;
} else {
ret = -ETIME;
@@ -604,13 +638,13 @@ int mei_me_pg_set_sync(struct mei_device *dev)
}
/**
- * mei_me_pg_unset_sync - perform pg exit procedure
+ * mei_me_pg_exit_sync - perform pg exit procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
-int mei_me_pg_unset_sync(struct mei_device *dev)
+int mei_me_pg_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -621,7 +655,7 @@ int mei_me_pg_unset_sync(struct mei_device *dev)
dev->pg_event = MEI_PG_EVENT_WAIT;
- mei_me_pg_exit(dev);
+ mei_me_pg_unset(dev);
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
@@ -649,8 +683,7 @@ reply:
*/
static bool mei_me_pg_is_enabled(struct mei_device *dev)
{
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
+ u32 reg = mei_me_mecsr_read(dev);
if ((reg & ME_PGIC_HRA) == 0)
goto notsupported;
@@ -683,14 +716,13 @@ notsupported:
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
- struct mei_me_hw *hw = to_me_hw(dev);
- u32 csr_reg = mei_hcsr_read(hw);
+ u32 hcsr = mei_hcsr_read(dev);
- if ((csr_reg & H_IS) != H_IS)
+ if ((hcsr & H_IS) != H_IS)
return IRQ_NONE;
/* clear H_IS bit in H_CSR */
- mei_me_reg_write(hw, H_CSR, csr_reg);
+ mei_hcsr_write(dev, hcsr);
return IRQ_WAKE_THREAD;
}
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index d6567af44377..6022d52af6f6 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -71,8 +71,8 @@ extern const struct mei_cfg mei_me_pch8_sps_cfg;
struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
const struct mei_cfg *cfg);
-int mei_me_pg_set_sync(struct mei_device *dev);
-int mei_me_pg_unset_sync(struct mei_device *dev);
+int mei_me_pg_enter_sync(struct mei_device *dev);
+int mei_me_pg_exit_sync(struct mei_device *dev);
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 618ea721aca8..7abafe7d120d 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -412,7 +412,7 @@ static void mei_txe_intr_disable(struct mei_device *dev)
mei_txe_br_reg_write(hw, HIER_REG, 0);
}
/**
- * mei_txe_intr_disable - enable all interrupts
+ * mei_txe_intr_enable - enable all interrupts
*
* @dev: the device structure
*/
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 6ad049a08e4d..97353cf8d9b6 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -389,6 +389,7 @@ void mei_device_init(struct mei_device *dev,
INIT_LIST_HEAD(&dev->device_list);
INIT_LIST_HEAD(&dev->me_clients);
mutex_init(&dev->device_lock);
+ init_rwsem(&dev->me_clients_rwsem);
init_waitqueue_head(&dev->wait_hw_ready);
init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_hbm_start);
@@ -396,7 +397,6 @@ void mei_device_init(struct mei_device *dev,
dev->dev_state = MEI_DEV_INITIALIZING;
dev->reset_count = 0;
- mei_io_list_init(&dev->read_list);
mei_io_list_init(&dev->write_list);
mei_io_list_init(&dev->write_waiting_list);
mei_io_list_init(&dev->ctrl_wr_list);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 711cddfa9c99..3f84d2edcde4 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -43,7 +43,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
list_for_each_entry_safe(cb, next, &compl_list->list, list) {
cl = cb->cl;
- list_del(&cb->list);
+ list_del_init(&cb->list);
dev_dbg(dev->dev, "completing call back.\n");
if (cl == &dev->iamthif_cl)
@@ -68,91 +68,91 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
return cl->host_client_id == mei_hdr->host_addr &&
cl->me_client_id == mei_hdr->me_addr;
}
+
/**
- * mei_cl_is_reading - checks if the client
- * is the one to read this message
- *
- * @cl: mei client
- * @mei_hdr: header of mei message
+ * mei_irq_discard_msg - discard received message
*
- * Return: true on match and false otherwise
+ * @dev: mei device
+ * @hdr: message header
*/
-static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr)
+static inline
+void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
{
- return mei_cl_hbm_equal(cl, mei_hdr) &&
- cl->state == MEI_FILE_CONNECTED &&
- cl->reading_state != MEI_READ_COMPLETE;
+ /*
+ * no need to check for size as it is guarantied
+ * that length fits into rd_msg_buf
+ */
+ mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
+ dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
+ MEI_HDR_PRM(hdr));
}
/**
* mei_cl_irq_read_msg - process client message
*
- * @dev: the device structure
+ * @cl: reading client
* @mei_hdr: header of mei client message
- * @complete_list: An instance of our list structure
+ * @complete_list: completion list
*
- * Return: 0 on success, <0 on failure.
+ * Return: always 0
*/
-static int mei_cl_irq_read_msg(struct mei_device *dev,
- struct mei_msg_hdr *mei_hdr,
- struct mei_cl_cb *complete_list)
+int mei_cl_irq_read_msg(struct mei_cl *cl,
+ struct mei_msg_hdr *mei_hdr,
+ struct mei_cl_cb *complete_list)
{
- struct mei_cl *cl;
- struct mei_cl_cb *cb, *next;
+ struct mei_device *dev = cl->dev;
+ struct mei_cl_cb *cb;
unsigned char *buffer = NULL;
- list_for_each_entry_safe(cb, next, &dev->read_list.list, list) {
- cl = cb->cl;
- if (!mei_cl_is_reading(cl, mei_hdr))
- continue;
-
- cl->reading_state = MEI_READING;
+ cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
+ if (!cb) {
+ cl_err(dev, cl, "pending read cb not found\n");
+ goto out;
+ }
- if (cb->response_buffer.size == 0 ||
- cb->response_buffer.data == NULL) {
- cl_err(dev, cl, "response buffer is not allocated.\n");
- list_del(&cb->list);
- return -ENOMEM;
- }
+ if (!mei_cl_is_connected(cl)) {
+ cl_dbg(dev, cl, "not connected\n");
+ cb->status = -ENODEV;
+ goto out;
+ }
- if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) {
- cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
- cb->response_buffer.size,
- mei_hdr->length, cb->buf_idx);
- buffer = krealloc(cb->response_buffer.data,
- mei_hdr->length + cb->buf_idx,
- GFP_KERNEL);
-
- if (!buffer) {
- list_del(&cb->list);
- return -ENOMEM;
- }
- cb->response_buffer.data = buffer;
- cb->response_buffer.size =
- mei_hdr->length + cb->buf_idx;
- }
+ if (cb->buf.size == 0 || cb->buf.data == NULL) {
+ cl_err(dev, cl, "response buffer is not allocated.\n");
+ list_move_tail(&cb->list, &complete_list->list);
+ cb->status = -ENOMEM;
+ goto out;
+ }
- buffer = cb->response_buffer.data + cb->buf_idx;
- mei_read_slots(dev, buffer, mei_hdr->length);
+ if (cb->buf.size < mei_hdr->length + cb->buf_idx) {
+ cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
+ cb->buf.size, mei_hdr->length, cb->buf_idx);
+ buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx,
+ GFP_KERNEL);
- cb->buf_idx += mei_hdr->length;
- if (mei_hdr->msg_complete) {
- cl->status = 0;
- list_del(&cb->list);
- cl_dbg(dev, cl, "completed read length = %lu\n",
- cb->buf_idx);
- list_add_tail(&cb->list, &complete_list->list);
+ if (!buffer) {
+ cb->status = -ENOMEM;
+ list_move_tail(&cb->list, &complete_list->list);
+ goto out;
}
- break;
+ cb->buf.data = buffer;
+ cb->buf.size = mei_hdr->length + cb->buf_idx;
}
- dev_dbg(dev->dev, "message read\n");
- if (!buffer) {
- mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
- dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
- MEI_HDR_PRM(mei_hdr));
+ buffer = cb->buf.data + cb->buf_idx;
+ mei_read_slots(dev, buffer, mei_hdr->length);
+
+ cb->buf_idx += mei_hdr->length;
+
+ if (mei_hdr->msg_complete) {
+ cb->read_time = jiffies;
+ cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx);
+ list_move_tail(&cb->list, &complete_list->list);
}
+out:
+ if (!buffer)
+ mei_irq_discard_msg(dev, mei_hdr);
+
return 0;
}
@@ -183,7 +183,6 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
cl->state = MEI_FILE_DISCONNECTED;
cl->status = 0;
- list_del(&cb->list);
mei_io_cb_free(cb);
return ret;
@@ -263,7 +262,7 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret;
}
- list_move_tail(&cb->list, &dev->read_list.list);
+ list_move_tail(&cb->list, &cl->rd_pending);
return 0;
}
@@ -301,7 +300,7 @@ static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
if (ret) {
cl->status = ret;
cb->buf_idx = 0;
- list_del(&cb->list);
+ list_del_init(&cb->list);
return ret;
}
@@ -378,25 +377,13 @@ int mei_irq_read_handler(struct mei_device *dev,
goto end;
}
- if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
- MEI_FILE_CONNECTED == dev->iamthif_cl.state &&
- dev->iamthif_state == MEI_IAMTHIF_READING) {
-
- ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
- if (ret) {
- dev_err(dev->dev, "mei_amthif_irq_read_msg failed = %d\n",
- ret);
- goto end;
- }
+ if (cl == &dev->iamthif_cl) {
+ ret = mei_amthif_irq_read_msg(cl, mei_hdr, cmpl_list);
} else {
- ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
- if (ret) {
- dev_err(dev->dev, "mei_cl_irq_read_msg failed = %d\n",
- ret);
- goto end;
- }
+ ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
}
+
reset_slots:
/* reset the number of slots and header */
*slots = mei_count_full_read_slots(dev);
@@ -449,21 +436,9 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
cl = cb->cl;
cl->status = 0;
- list_del(&cb->list);
- if (cb->fop_type == MEI_FOP_WRITE &&
- cl != &dev->iamthif_cl) {
- cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
- cl->writing_state = MEI_WRITE_COMPLETE;
- list_add_tail(&cb->list, &cmpl_list->list);
- }
- if (cl == &dev->iamthif_cl) {
- cl_dbg(dev, cl, "check iamthif flow control.\n");
- if (dev->iamthif_flow_control_pending) {
- ret = mei_amthif_irq_read(dev, &slots);
- if (ret)
- return ret;
- }
- }
+ cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
+ cl->writing_state = MEI_WRITE_COMPLETE;
+ list_move_tail(&cb->list, &cmpl_list->list);
}
if (dev->wd_state == MEI_WD_STOPPING) {
@@ -587,10 +562,7 @@ void mei_timer(struct work_struct *work)
if (--dev->iamthif_stall_timer == 0) {
dev_err(dev->dev, "timer: amthif hanged.\n");
mei_reset(dev);
- dev->iamthif_msg_buf_size = 0;
- dev->iamthif_msg_buf_index = 0;
dev->iamthif_canceled = false;
- dev->iamthif_ioctl = true;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
@@ -636,4 +608,3 @@ out:
schedule_delayed_work(&dev->timer_work, 2 * HZ);
mutex_unlock(&dev->device_lock);
}
-
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 3c019c0e60eb..3e2968159506 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -22,7 +22,6 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fcntl.h>
-#include <linux/aio.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/ioctl.h>
@@ -59,24 +58,18 @@ static int mei_open(struct inode *inode, struct file *file)
mutex_lock(&dev->device_lock);
- cl = NULL;
-
- err = -ENODEV;
if (dev->dev_state != MEI_DEV_ENABLED) {
dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
mei_dev_state_str(dev->dev_state));
+ err = -ENODEV;
goto err_unlock;
}
- err = -ENOMEM;
- cl = mei_cl_allocate(dev);
- if (!cl)
- goto err_unlock;
-
- /* open_handle_count check is handled in the mei_cl_link */
- err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
- if (err)
+ cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
+ if (IS_ERR(cl)) {
+ err = PTR_ERR(cl);
goto err_unlock;
+ }
file->private_data = cl;
@@ -86,7 +79,6 @@ static int mei_open(struct inode *inode, struct file *file)
err_unlock:
mutex_unlock(&dev->device_lock);
- kfree(cl);
return err;
}
@@ -101,7 +93,6 @@ err_unlock:
static int mei_release(struct inode *inode, struct file *file)
{
struct mei_cl *cl = file->private_data;
- struct mei_cl_cb *cb;
struct mei_device *dev;
int rets = 0;
@@ -115,33 +106,18 @@ static int mei_release(struct inode *inode, struct file *file)
rets = mei_amthif_release(dev, file);
goto out;
}
- if (cl->state == MEI_FILE_CONNECTED) {
+ if (mei_cl_is_connected(cl)) {
cl->state = MEI_FILE_DISCONNECTING;
cl_dbg(dev, cl, "disconnecting\n");
rets = mei_cl_disconnect(cl);
}
- mei_cl_flush_queues(cl);
+ mei_cl_flush_queues(cl, file);
cl_dbg(dev, cl, "removing\n");
mei_cl_unlink(cl);
-
- /* free read cb */
- cb = NULL;
- if (cl->read_cb) {
- cb = mei_cl_find_read_cb(cl);
- /* Remove entry from read list */
- if (cb)
- list_del(&cb->list);
-
- cb = cl->read_cb;
- cl->read_cb = NULL;
- }
-
file->private_data = NULL;
- mei_io_cb_free(cb);
-
kfree(cl);
out:
mutex_unlock(&dev->device_lock);
@@ -163,9 +139,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb = NULL;
struct mei_device *dev;
+ struct mei_cl_cb *cb = NULL;
int rets;
int err;
@@ -192,8 +167,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
- if (cl->read_cb) {
- cb = cl->read_cb;
+ cb = mei_cl_read_cb(cl, file);
+ if (cb) {
/* read what left */
if (cb->buf_idx > *offset)
goto copy_buffer;
@@ -209,7 +184,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
*offset = 0;
}
- err = mei_cl_read_start(cl, length);
+ err = mei_cl_read_start(cl, length, file);
if (err && err != -EBUSY) {
dev_dbg(dev->dev,
"mei start read failure with status = %d\n", err);
@@ -217,8 +192,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
- if (MEI_READ_COMPLETE != cl->reading_state &&
- !waitqueue_active(&cl->rx_wait)) {
+ if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
if (file->f_flags & O_NONBLOCK) {
rets = -EAGAIN;
goto out;
@@ -227,8 +201,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
mutex_unlock(&dev->device_lock);
if (wait_event_interruptible(cl->rx_wait,
- MEI_READ_COMPLETE == cl->reading_state ||
- mei_cl_is_transitioning(cl))) {
+ (!list_empty(&cl->rd_completed)) ||
+ (!mei_cl_is_connected(cl)))) {
if (signal_pending(current))
return -EINTR;
@@ -236,26 +210,28 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
}
mutex_lock(&dev->device_lock);
- if (mei_cl_is_transitioning(cl)) {
+ if (!mei_cl_is_connected(cl)) {
rets = -EBUSY;
goto out;
}
}
- cb = cl->read_cb;
-
+ cb = mei_cl_read_cb(cl, file);
if (!cb) {
- rets = -ENODEV;
- goto out;
- }
- if (cl->reading_state != MEI_READ_COMPLETE) {
rets = 0;
goto out;
}
- /* now copy the data to user space */
+
copy_buffer:
+ /* now copy the data to user space */
+ if (cb->status) {
+ rets = cb->status;
+ dev_dbg(dev->dev, "read operation failed %d\n", rets);
+ goto free;
+ }
+
dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n",
- cb->response_buffer.size, cb->buf_idx);
+ cb->buf.size, cb->buf_idx);
if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
rets = -EMSGSIZE;
goto free;
@@ -265,7 +241,7 @@ copy_buffer:
* however buf_idx may point beyond that */
length = min_t(size_t, length, cb->buf_idx - *offset);
- if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
+ if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto free;
@@ -277,13 +253,8 @@ copy_buffer:
goto out;
free:
- cb_pos = mei_cl_find_read_cb(cl);
- /* Remove entry from read list */
- if (cb_pos)
- list_del(&cb_pos->list);
mei_io_cb_free(cb);
- cl->reading_state = MEI_IDLE;
- cl->read_cb = NULL;
+
out:
dev_dbg(dev->dev, "end mei read rets= %d\n", rets);
mutex_unlock(&dev->device_lock);
@@ -337,9 +308,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out;
}
- if (cl->state != MEI_FILE_CONNECTED) {
- dev_err(dev->dev, "host client = %d, is not connected to ME client = %d",
- cl->host_client_id, cl->me_client_id);
+ if (!mei_cl_is_connected(cl)) {
+ cl_err(dev, cl, "is not connected");
rets = -ENODEV;
goto out;
}
@@ -350,41 +320,22 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
timeout = write_cb->read_time +
mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
- if (time_after(jiffies, timeout) ||
- cl->reading_state == MEI_READ_COMPLETE) {
+ if (time_after(jiffies, timeout)) {
*offset = 0;
- list_del(&write_cb->list);
mei_io_cb_free(write_cb);
write_cb = NULL;
}
}
}
- /* free entry used in read */
- if (cl->reading_state == MEI_READ_COMPLETE) {
- *offset = 0;
- write_cb = mei_cl_find_read_cb(cl);
- if (write_cb) {
- list_del(&write_cb->list);
- mei_io_cb_free(write_cb);
- write_cb = NULL;
- cl->reading_state = MEI_IDLE;
- cl->read_cb = NULL;
- }
- } else if (cl->reading_state == MEI_IDLE)
- *offset = 0;
-
-
- write_cb = mei_io_cb_init(cl, file);
+ *offset = 0;
+ write_cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
if (!write_cb) {
rets = -ENOMEM;
goto out;
}
- rets = mei_io_cb_alloc_req_buf(write_cb, length);
- if (rets)
- goto out;
- rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
+ rets = copy_from_user(write_cb->buf.data, ubuf, length);
if (rets) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
@@ -392,7 +343,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
}
if (cl == &dev->iamthif_cl) {
- rets = mei_amthif_write(dev, write_cb);
+ rets = mei_amthif_write(cl, write_cb);
if (rets) {
dev_err(dev->dev,
@@ -465,7 +416,7 @@ static int mei_ioctl_connect_client(struct file *file,
*/
if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
dev_dbg(dev->dev, "FW Client is amthi\n");
- if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
+ if (!mei_cl_is_connected(&dev->iamthif_cl)) {
rets = -ENODEV;
goto end;
}
@@ -589,6 +540,7 @@ static long mei_compat_ioctl(struct file *file,
*/
static unsigned int mei_poll(struct file *file, poll_table *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
unsigned int mask = 0;
@@ -600,27 +552,26 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
mutex_lock(&dev->device_lock);
- if (!mei_cl_is_connected(cl)) {
+
+ if (dev->dev_state != MEI_DEV_ENABLED ||
+ !mei_cl_is_connected(cl)) {
mask = POLLERR;
goto out;
}
- mutex_unlock(&dev->device_lock);
-
-
- if (cl == &dev->iamthif_cl)
- return mei_amthif_poll(dev, file, wait);
-
- poll_wait(file, &cl->tx_wait, wait);
-
- mutex_lock(&dev->device_lock);
-
- if (!mei_cl_is_connected(cl)) {
- mask = POLLERR;
+ if (cl == &dev->iamthif_cl) {
+ mask = mei_amthif_poll(dev, file, wait);
goto out;
}
- mask |= (POLLIN | POLLRDNORM);
+ if (req_events & (POLLIN | POLLRDNORM)) {
+ poll_wait(file, &cl->rx_wait, wait);
+
+ if (!list_empty(&cl->rd_completed))
+ mask |= POLLIN | POLLRDNORM;
+ else
+ mei_cl_read_start(cl, 0, file);
+ }
out:
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
new file mode 100644
index 000000000000..388efb519138
--- /dev/null
+++ b/drivers/misc/mei/mei-trace.c
@@ -0,0 +1,25 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/module.h>
+
+/* sparse doesn't like tracepoint macros */
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mei-trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
+EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
+#endif /* __CHECKER__ */
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
new file mode 100644
index 000000000000..47e1bc6551d4
--- /dev/null
+++ b/drivers/misc/mei/mei-trace.h
@@ -0,0 +1,74 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#if !defined(_MEI_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MEI_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <linux/device.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mei
+
+TRACE_EVENT(mei_reg_read,
+ TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+ TP_ARGS(dev, reg, offs, val),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(const char *, reg)
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev))
+ __entry->reg = reg;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] read %s:[%#x] = %#x",
+ __get_str(dev), __entry->reg, __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(mei_reg_write,
+ TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+ TP_ARGS(dev, reg, offs, val),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(const char *, reg)
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev))
+ __entry->reg = reg;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] write %s[%#x] = %#x)",
+ __get_str(dev), __entry->reg, __entry->offs, __entry->val)
+);
+
+#endif /* _MEI_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mei-trace
+#include <trace/define_trace.h>
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 6c6ce9381535..f066ecd71939 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -194,23 +194,25 @@ struct mei_cl;
* @list: link in callback queue
* @cl: file client who is running this operation
* @fop_type: file operation type
- * @request_buffer: buffer to store request data
- * @response_buffer: buffer to store response data
+ * @buf: buffer for data associated with the callback
* @buf_idx: last read index
* @read_time: last read operation time stamp (iamthif)
* @file_object: pointer to file structure
+ * @status: io status of the cb
* @internal: communication between driver and FW flag
+ * @completed: the transfer or reception has completed
*/
struct mei_cl_cb {
struct list_head list;
struct mei_cl *cl;
enum mei_cb_file_ops fop_type;
- struct mei_msg_data request_buffer;
- struct mei_msg_data response_buffer;
+ struct mei_msg_data buf;
unsigned long buf_idx;
unsigned long read_time;
struct file *file_object;
+ int status;
u32 internal:1;
+ u32 completed:1;
};
/**
@@ -229,9 +231,9 @@ struct mei_cl_cb {
* @me_client_id: me/fw id
* @mei_flow_ctrl_creds: transmit flow credentials
* @timer_count: watchdog timer for operation completion
- * @reading_state: state of the rx
* @writing_state: state of the tx
- * @read_cb: current pending reading callback
+ * @rd_pending: pending read credits
+ * @rd_completed: completed read
*
* @device: device on the mei client bus
* @device_link: link to bus clients
@@ -249,9 +251,9 @@ struct mei_cl {
u8 me_client_id;
u8 mei_flow_ctrl_creds;
u8 timer_count;
- enum mei_file_transaction_states reading_state;
enum mei_file_transaction_states writing_state;
- struct mei_cl_cb *read_cb;
+ struct list_head rd_pending;
+ struct list_head rd_completed;
/* MEI CL bus data */
struct mei_cl_device *device;
@@ -423,7 +425,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @cdev : character device
* @minor : minor number allocated for device
*
- * @read_list : read completion list
* @write_list : write pending list
* @write_waiting_list : write completion list
* @ctrl_wr_list : pending control write list
@@ -460,6 +461,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @version : HBM protocol version in use
* @hbm_f_pg_supported : hbm feature pgi protocol
*
+ * @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
* @me_clients_map : FW clients bit map
* @host_clients_map : host clients id pool
@@ -480,12 +482,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @iamthif_mtu : amthif client max message length
* @iamthif_timer : time stamp of current amthif command completion
* @iamthif_stall_timer : timer to detect amthif hang
- * @iamthif_msg_buf : amthif current message buffer
- * @iamthif_msg_buf_size : size of current amthif message request buffer
- * @iamthif_msg_buf_index : current index in amthif message request buffer
* @iamthif_state : amthif processor state
- * @iamthif_flow_control_pending: amthif waits for flow control
- * @iamthif_ioctl : wait for completion if amthif control message
* @iamthif_canceled : current amthif command is canceled
*
* @init_work : work item for the device init
@@ -503,7 +500,6 @@ struct mei_device {
struct cdev cdev;
int minor;
- struct mei_cl_cb read_list;
struct mei_cl_cb write_list;
struct mei_cl_cb write_waiting_list;
struct mei_cl_cb ctrl_wr_list;
@@ -556,6 +552,7 @@ struct mei_device {
struct hbm_version version;
unsigned int hbm_f_pg_supported:1;
+ struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
@@ -579,12 +576,7 @@ struct mei_device {
int iamthif_mtu;
unsigned long iamthif_timer;
u32 iamthif_stall_timer;
- unsigned char *iamthif_msg_buf; /* Note: memory has to be allocated */
- u32 iamthif_msg_buf_size;
- u32 iamthif_msg_buf_index;
enum iamthif_states iamthif_state;
- bool iamthif_flow_control_pending;
- bool iamthif_ioctl;
bool iamthif_canceled;
struct work_struct init_work;
@@ -662,8 +654,6 @@ void mei_amthif_reset_params(struct mei_device *dev);
int mei_amthif_host_init(struct mei_device *dev);
-int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
-
int mei_amthif_read(struct mei_device *dev, struct file *file,
char __user *ubuf, size_t length, loff_t *offset);
@@ -675,13 +665,13 @@ int mei_amthif_release(struct mei_device *dev, struct file *file);
struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
struct file *file);
-void mei_amthif_run_next_cmd(struct mei_device *dev);
-
+int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
+int mei_amthif_run_next_cmd(struct mei_device *dev);
int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
-int mei_amthif_irq_read_msg(struct mei_device *dev,
+int mei_amthif_irq_read_msg(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr,
struct mei_cl_cb *complete_list);
int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index bb61a119b8bb..c3bcb63686d7 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -482,8 +482,8 @@ err:
int mei_nfc_host_init(struct mei_device *dev)
{
struct mei_nfc_dev *ndev;
- struct mei_cl *cl_info, *cl = NULL;
- struct mei_me_client *me_cl;
+ struct mei_cl *cl_info, *cl;
+ struct mei_me_client *me_cl = NULL;
int ret;
@@ -500,17 +500,6 @@ int mei_nfc_host_init(struct mei_device *dev)
goto err;
}
- ndev->cl_info = mei_cl_allocate(dev);
- ndev->cl = mei_cl_allocate(dev);
-
- cl = ndev->cl;
- cl_info = ndev->cl_info;
-
- if (!cl || !cl_info) {
- ret = -ENOMEM;
- goto err;
- }
-
/* check for valid client id */
me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
if (!me_cl) {
@@ -519,17 +508,21 @@ int mei_nfc_host_init(struct mei_device *dev)
goto err;
}
+ cl_info = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
+ if (IS_ERR(cl_info)) {
+ ret = PTR_ERR(cl_info);
+ goto err;
+ }
+
cl_info->me_client_id = me_cl->client_id;
cl_info->cl_uuid = me_cl->props.protocol_name;
mei_me_cl_put(me_cl);
-
- ret = mei_cl_link(cl_info, MEI_HOST_CLIENT_ID_ANY);
- if (ret)
- goto err;
-
+ me_cl = NULL;
list_add_tail(&cl_info->device_link, &dev->device_list);
+ ndev->cl_info = cl_info;
+
/* check for valid client id */
me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
if (!me_cl) {
@@ -538,16 +531,21 @@ int mei_nfc_host_init(struct mei_device *dev)
goto err;
}
+ cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
+ if (IS_ERR(cl)) {
+ ret = PTR_ERR(cl);
+ goto err;
+ }
+
cl->me_client_id = me_cl->client_id;
cl->cl_uuid = me_cl->props.protocol_name;
mei_me_cl_put(me_cl);
-
- ret = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
- if (ret)
- goto err;
+ me_cl = NULL;
list_add_tail(&cl->device_link, &dev->device_list);
+ ndev->cl = cl;
+
ndev->req_id = 1;
INIT_WORK(&ndev->init_work, mei_nfc_init);
@@ -557,6 +555,7 @@ int mei_nfc_host_init(struct mei_device *dev)
return 0;
err:
+ mei_me_cl_put(me_cl);
mei_nfc_free(ndev);
return ret;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index bd3039ab8f98..23f71f5ce4fb 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -21,7 +21,6 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fcntl.h>
-#include <linux/aio.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/ioctl.h>
@@ -389,7 +388,7 @@ static int mei_me_pm_runtime_suspend(struct device *device)
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev))
- ret = mei_me_pg_set_sync(dev);
+ ret = mei_me_pg_enter_sync(dev);
else
ret = -EAGAIN;
@@ -414,7 +413,7 @@ static int mei_me_pm_runtime_resume(struct device *device)
mutex_lock(&dev->device_lock);
- ret = mei_me_pg_unset_sync(dev);
+ ret = mei_me_pg_exit_sync(dev);
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index c86e2ddbe30a..dcfcba44b6f7 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -63,7 +63,7 @@ static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
}
}
/**
- * mei_probe - Device Initialization Routine
+ * mei_txe_probe - Device Initialization Routine
*
* @pdev: PCI device structure
* @ent: entry in mei_txe_pci_tbl
@@ -193,7 +193,7 @@ end:
}
/**
- * mei_remove - Device Removal Routine
+ * mei_txe_remove - Device Removal Routine
*
* @pdev: PCI device structure
*
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 475f1dea45bf..2725f865c3d6 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -160,9 +160,10 @@ int mei_wd_send(struct mei_device *dev)
*/
int mei_wd_stop(struct mei_device *dev)
{
+ struct mei_cl *cl = &dev->wd_cl;
int ret;
- if (dev->wd_cl.state != MEI_FILE_CONNECTED ||
+ if (!mei_cl_is_connected(cl) ||
dev->wd_state != MEI_WD_RUNNING)
return 0;
@@ -170,7 +171,7 @@ int mei_wd_stop(struct mei_device *dev)
dev->wd_state = MEI_WD_STOPPING;
- ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
+ ret = mei_cl_flow_ctrl_creds(cl);
if (ret < 0)
goto err;
@@ -202,22 +203,25 @@ err:
return ret;
}
-/*
+/**
* mei_wd_ops_start - wd start command from the watchdog core.
*
- * @wd_dev - watchdog device struct
+ * @wd_dev: watchdog device struct
*
* Return: 0 if success, negative errno code for failure
*/
static int mei_wd_ops_start(struct watchdog_device *wd_dev)
{
- int err = -ENODEV;
struct mei_device *dev;
+ struct mei_cl *cl;
+ int err = -ENODEV;
dev = watchdog_get_drvdata(wd_dev);
if (!dev)
return -ENODEV;
+ cl = &dev->wd_cl;
+
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED) {
@@ -226,8 +230,8 @@ static int mei_wd_ops_start(struct watchdog_device *wd_dev)
goto end_unlock;
}
- if (dev->wd_cl.state != MEI_FILE_CONNECTED) {
- dev_dbg(dev->dev, "MEI Driver is not connected to Watchdog Client\n");
+ if (!mei_cl_is_connected(cl)) {
+ cl_dbg(dev, cl, "MEI Driver is not connected to Watchdog Client\n");
goto end_unlock;
}
@@ -239,10 +243,10 @@ end_unlock:
return err;
}
-/*
+/**
* mei_wd_ops_stop - wd stop command from the watchdog core.
*
- * @wd_dev - watchdog device struct
+ * @wd_dev: watchdog device struct
*
* Return: 0 if success, negative errno code for failure
*/
@@ -261,10 +265,10 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
return 0;
}
-/*
+/**
* mei_wd_ops_ping - wd ping command from the watchdog core.
*
- * @wd_dev - watchdog device struct
+ * @wd_dev: watchdog device struct
*
* Return: 0 if success, negative errno code for failure
*/
@@ -282,8 +286,8 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
mutex_lock(&dev->device_lock);
- if (cl->state != MEI_FILE_CONNECTED) {
- dev_err(dev->dev, "wd: not connected.\n");
+ if (!mei_cl_is_connected(cl)) {
+ cl_err(dev, cl, "wd: not connected.\n");
ret = -ENODEV;
goto end;
}
@@ -311,11 +315,11 @@ end:
return ret;
}
-/*
+/**
* mei_wd_ops_set_timeout - wd set timeout command from the watchdog core.
*
- * @wd_dev - watchdog device struct
- * @timeout - timeout value to set
+ * @wd_dev: watchdog device struct
+ * @timeout: timeout value to set
*
* Return: 0 if success, negative errno code for failure
*/
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index ff2b0fb1a6be..d9fa609da061 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -309,7 +309,7 @@ void mic_complete_resume(struct mic_device *mdev)
*/
void mic_prepare_suspend(struct mic_device *mdev)
{
- int rc;
+ unsigned long timeout;
#define MIC_SUSPEND_TIMEOUT (60 * HZ)
@@ -331,10 +331,10 @@ void mic_prepare_suspend(struct mic_device *mdev)
*/
mic_set_state(mdev, MIC_SUSPENDING);
mutex_unlock(&mdev->mic_mutex);
- rc = wait_for_completion_timeout(&mdev->reset_wait,
- MIC_SUSPEND_TIMEOUT);
+ timeout = wait_for_completion_timeout(&mdev->reset_wait,
+ MIC_SUSPEND_TIMEOUT);
/* Force reset the card if the shutdown completion timed out */
- if (!rc) {
+ if (!timeout) {
mutex_lock(&mdev->mic_mutex);
mic_set_state(mdev, MIC_SUSPENDED);
mutex_unlock(&mdev->mic_mutex);
@@ -348,10 +348,10 @@ void mic_prepare_suspend(struct mic_device *mdev)
*/
mic_set_state(mdev, MIC_SUSPENDED);
mutex_unlock(&mdev->mic_mutex);
- rc = wait_for_completion_timeout(&mdev->reset_wait,
- MIC_SUSPEND_TIMEOUT);
+ timeout = wait_for_completion_timeout(&mdev->reset_wait,
+ MIC_SUSPEND_TIMEOUT);
/* Force reset the card if the shutdown completion timed out */
- if (!rc)
+ if (!timeout)
mic_stop(mdev, true);
break;
default:
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index d686f2846ac7..b4ca6c884d19 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -363,8 +363,6 @@ static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
{
int rc;
- pci_msi_off(pdev);
-
/* Enable intx */
pci_intx(pdev, 1);
rc = mic_setup_callbacks(mdev);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 21181fa243df..eeaaf5fca105 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -69,12 +69,23 @@ static int sram_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&reserve_list);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virt_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(virt_base))
- return PTR_ERR(virt_base);
+ if (!res) {
+ dev_err(&pdev->dev, "found no memory resource\n");
+ return -EINVAL;
+ }
size = resource_size(res);
+ if (!devm_request_mem_region(&pdev->dev,
+ res->start, size, pdev->name)) {
+ dev_err(&pdev->dev, "could not request region for resource\n");
+ return -EBUSY;
+ }
+
+ virt_base = devm_ioremap_wc(&pdev->dev, res->start, size);
+ if (IS_ERR(virt_base))
+ return PTR_ERR(virt_base);
+
sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
if (!sram)
return -ENOMEM;
@@ -205,7 +216,7 @@ static int sram_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id sram_dt_ids[] = {
+static const struct of_device_id sram_dt_ids[] = {
{ .compatible = "mmio-sram" },
{}
};
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index a606c8901e18..a37a42f67088 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -236,6 +236,7 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
{
struct tifm_adapter *fm = pci_get_drvdata(dev);
int rc;
+ unsigned long timeout;
unsigned int good_sockets = 0, bad_sockets = 0;
unsigned long flags;
unsigned char new_ids[fm->num_sockets];
@@ -272,8 +273,8 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
if (good_sockets) {
fm->finish_me = &finish_resume;
spin_unlock_irqrestore(&fm->lock, flags);
- rc = wait_for_completion_timeout(&finish_resume, HZ);
- dev_dbg(&dev->dev, "wait returned %d\n", rc);
+ timeout = wait_for_completion_timeout(&finish_resume, HZ);
+ dev_dbg(&dev->dev, "wait returned %lu\n", timeout);
writel(TIFM_IRQ_FIFOMASK(good_sockets)
| TIFM_IRQ_CARDMASK(good_sockets),
fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 032d35cf93ca..b823f9a6e464 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.1.0-k");
+MODULE_VERSION("1.1.3.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 66fc9921fc85..a721b5d8a9da 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -395,6 +395,12 @@ static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
return -EFAULT;
}
+ if (VMCI_DG_SIZE(dg) != send_info.len) {
+ vmci_ioctl_err("datagram size mismatch\n");
+ kfree(dg);
+ return -EINVAL;
+ }
+
pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
dg->dst.context, dg->dst.resource,
dg->src.context, dg->src.resource,
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 35f19a683822..f42d9c4e4561 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -295,12 +295,20 @@ static void *qp_alloc_queue(u64 size, u32 flags)
{
u64 i;
struct vmci_queue *queue;
- const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
- const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
- const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
- const size_t queue_size =
- sizeof(*queue) + sizeof(*queue->kernel_if) +
- pas_size + vas_size;
+ size_t pas_size;
+ size_t vas_size;
+ size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
+ const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+
+ if (num_pages >
+ (SIZE_MAX - queue_size) /
+ (sizeof(*queue->kernel_if->u.g.pas) +
+ sizeof(*queue->kernel_if->u.g.vas)))
+ return NULL;
+
+ pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
+ vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
+ queue_size += pas_size + vas_size;
queue = vmalloc(queue_size);
if (!queue)
@@ -615,10 +623,15 @@ static int qp_memcpy_from_queue_iov(void *dest,
static struct vmci_queue *qp_host_alloc_queue(u64 size)
{
struct vmci_queue *queue;
- const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ size_t queue_page_size;
+ const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
- const size_t queue_page_size =
- num_pages * sizeof(*queue->kernel_if->u.h.page);
+
+ if (num_pages > (SIZE_MAX - queue_size) /
+ sizeof(*queue->kernel_if->u.h.page))
+ return NULL;
+
+ queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
if (queue) {
@@ -737,7 +750,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
produce_q->kernel_if->num_pages, 1,
produce_q->kernel_if->u.h.header_page);
if (retval < produce_q->kernel_if->num_pages) {
- pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
+ pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
+ retval);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
retval, false);
err = VMCI_ERROR_NO_MEM;
@@ -748,7 +762,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
consume_q->kernel_if->num_pages, 1,
consume_q->kernel_if->u.h.header_page);
if (retval < consume_q->kernel_if->num_pages) {
- pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
+ pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
+ retval);
qp_release_pages(consume_q->kernel_if->u.h.header_page,
retval, false);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c69afb5e264e..60f7141a6b02 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1029,6 +1029,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
+int mmc_access_rpmb(struct mmc_queue *mq)
+{
+ struct mmc_blk_data *md = mq->data;
+ /*
+ * If this is a RPMB partition access, return ture
+ */
+ if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ return true;
+
+ return false;
+}
+
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
@@ -2230,7 +2242,7 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
part_md->part_type = part_type;
list_add(&part_md->part, &md->part);
- string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
+ string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s partition %u %s\n",
part_md->disk->disk_name, mmc_card_id(card),
@@ -2418,9 +2430,8 @@ static const struct mmc_fixup blk_fixups[] =
END_FIXUP
};
-static int mmc_blk_probe(struct device *dev)
+static int mmc_blk_probe(struct mmc_card *card)
{
- struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_blk_data *md, *part_md;
char cap_str[10];
@@ -2436,7 +2447,7 @@ static int mmc_blk_probe(struct device *dev)
if (IS_ERR(md))
return PTR_ERR(md);
- string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
+ string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s %s %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
@@ -2445,7 +2456,7 @@ static int mmc_blk_probe(struct device *dev)
if (mmc_blk_alloc_parts(card, md))
goto out;
- dev_set_drvdata(dev, md);
+ dev_set_drvdata(&card->dev, md);
if (mmc_add_disk(md))
goto out;
@@ -2475,10 +2486,9 @@ static int mmc_blk_probe(struct device *dev)
return 0;
}
-static int mmc_blk_remove(struct device *dev)
+static void mmc_blk_remove(struct mmc_card *card)
{
- struct mmc_card *card = mmc_dev_to_card(dev);
- struct mmc_blk_data *md = dev_get_drvdata(dev);
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
mmc_blk_remove_parts(card, md);
pm_runtime_get_sync(&card->dev);
@@ -2489,15 +2499,13 @@ static int mmc_blk_remove(struct device *dev)
pm_runtime_disable(&card->dev);
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
- dev_set_drvdata(dev, NULL);
-
- return 0;
+ dev_set_drvdata(&card->dev, NULL);
}
-static int _mmc_blk_suspend(struct device *dev)
+static int _mmc_blk_suspend(struct mmc_card *card)
{
struct mmc_blk_data *part_md;
- struct mmc_blk_data *md = dev_get_drvdata(dev);
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
if (md) {
mmc_queue_suspend(&md->queue);
@@ -2508,15 +2516,17 @@ static int _mmc_blk_suspend(struct device *dev)
return 0;
}
-static void mmc_blk_shutdown(struct device *dev)
+static void mmc_blk_shutdown(struct mmc_card *card)
{
- _mmc_blk_suspend(dev);
+ _mmc_blk_suspend(card);
}
#ifdef CONFIG_PM_SLEEP
static int mmc_blk_suspend(struct device *dev)
{
- return _mmc_blk_suspend(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ return _mmc_blk_suspend(card);
}
static int mmc_blk_resume(struct device *dev)
@@ -2541,9 +2551,11 @@ static int mmc_blk_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
-static struct device_driver mmc_driver = {
- .name = "mmcblk",
- .pm = &mmc_blk_pm_ops,
+static struct mmc_driver mmc_driver = {
+ .drv = {
+ .name = "mmcblk",
+ .pm = &mmc_blk_pm_ops,
+ },
.probe = mmc_blk_probe,
.remove = mmc_blk_remove,
.shutdown = mmc_blk_shutdown,
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 7dac4695163b..53b741398b93 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -14,7 +14,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/slab.h>
-#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/swap.h> /* For nr_free_buffer_pages() */
@@ -2996,9 +2995,8 @@ err:
return ret;
}
-static int mmc_test_probe(struct device *dev)
+static int mmc_test_probe(struct mmc_card *card)
{
- struct mmc_card *card = mmc_dev_to_card(dev);
int ret;
if (!mmc_card_mmc(card) && !mmc_card_sd(card))
@@ -3013,22 +3011,20 @@ static int mmc_test_probe(struct device *dev)
return 0;
}
-static int mmc_test_remove(struct device *dev)
+static void mmc_test_remove(struct mmc_card *card)
{
- struct mmc_card *card = mmc_dev_to_card(dev);
-
mmc_test_free_result(card);
mmc_test_free_dbgfs_file(card);
-
- return 0;
}
-static void mmc_test_shutdown(struct device *dev)
+static void mmc_test_shutdown(struct mmc_card *card)
{
}
-static struct device_driver mmc_driver = {
- .name = "mmc_test",
+static struct mmc_driver mmc_driver = {
+ .drv = {
+ .name = "mmc_test",
+ },
.probe = mmc_test_probe,
.remove = mmc_test_remove,
.shutdown = mmc_test_shutdown,
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 236d194c2883..8efa3684aef8 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
}
- if (mq && mmc_card_removed(mq->card))
+ if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL;
req->cmd_flags |= REQ_DONTPREP;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 5752d50049a3..99e6521e6169 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
extern void mmc_packed_clean(struct mmc_queue *);
+extern int mmc_access_rpmb(struct mmc_queue *);
+
#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c5ef10065a4a..972ff844cf5a 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -26,6 +26,8 @@
#include "sdio_cis.h"
#include "bus.h"
+#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
+
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -105,14 +107,33 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
+static int mmc_bus_probe(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ return drv->probe(card);
+}
+
+static int mmc_bus_remove(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ drv->remove(card);
+
+ return 0;
+}
+
static void mmc_bus_shutdown(struct device *dev)
{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
int ret;
- if (dev->driver && dev->driver->shutdown)
- dev->driver->shutdown(dev);
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(card);
if (host->bus_ops->shutdown) {
ret = host->bus_ops->shutdown(host);
@@ -181,6 +202,8 @@ static struct bus_type mmc_bus_type = {
.dev_groups = mmc_dev_groups,
.match = mmc_bus_match,
.uevent = mmc_bus_uevent,
+ .probe = mmc_bus_probe,
+ .remove = mmc_bus_remove,
.shutdown = mmc_bus_shutdown,
.pm = &mmc_bus_pm_ops,
};
@@ -199,22 +222,24 @@ void mmc_unregister_bus(void)
* mmc_register_driver - register a media driver
* @drv: MMC media driver
*/
-int mmc_register_driver(struct device_driver *drv)
+int mmc_register_driver(struct mmc_driver *drv)
{
- drv->bus = &mmc_bus_type;
- return driver_register(drv);
+ drv->drv.bus = &mmc_bus_type;
+ return driver_register(&drv->drv);
}
+
EXPORT_SYMBOL(mmc_register_driver);
/**
* mmc_unregister_driver - unregister a media driver
* @drv: MMC media driver
*/
-void mmc_unregister_driver(struct device_driver *drv)
+void mmc_unregister_driver(struct mmc_driver *drv)
{
- drv->bus = &mmc_bus_type;
- driver_unregister(drv);
+ drv->drv.bus = &mmc_bus_type;
+ driver_unregister(&drv->drv);
}
+
EXPORT_SYMBOL(mmc_unregister_driver);
static void mmc_release_card(struct device *dev)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c296bc098fe2..92e7671426eb 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2651,6 +2651,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c84131e28625..f36c76f8b232 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1758,7 +1758,7 @@ static int mmc_runtime_suspend(struct mmc_host *host)
err = _mmc_suspend(host, true);
if (err)
- pr_err("%s: error %d doing aggessive suspend\n",
+ pr_err("%s: error %d doing aggressive suspend\n",
mmc_hostname(host), err);
return err;
@@ -1776,7 +1776,7 @@ static int mmc_runtime_resume(struct mmc_host *host)
err = _mmc_resume(host);
if (err)
- pr_err("%s: error %d doing aggessive resume\n",
+ pr_err("%s: error %d doing aggressive resume\n",
mmc_hostname(host), err);
return 0;
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index ab2129781161..4c1d1757dbf9 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -73,7 +73,7 @@ int mmc_pwrseq_alloc(struct mmc_host *host)
pwrseq = match->alloc(host, &pdev->dev);
if (IS_ERR(pwrseq)) {
- ret = PTR_ERR(host->pwrseq);
+ ret = PTR_ERR(pwrseq);
goto err;
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ad4d43eae99d..31a9ef256d06 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1157,7 +1157,7 @@ static int mmc_sd_runtime_suspend(struct mmc_host *host)
err = _mmc_sd_suspend(host);
if (err)
- pr_err("%s: error %d doing aggessive suspend\n",
+ pr_err("%s: error %d doing aggressive suspend\n",
mmc_hostname(host), err);
return err;
@@ -1175,7 +1175,7 @@ static int mmc_sd_runtime_resume(struct mmc_host *host)
err = _mmc_sd_resume(host);
if (err)
- pr_err("%s: error %d doing aggessive resume\n",
+ pr_err("%s: error %d doing aggressive resume\n",
mmc_hostname(host), err);
return 0;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 7f4db908f89b..b1f837e749fe 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -408,14 +408,6 @@ config MMC_SDHCI_MSM
If unsure, say N.
-config MMC_MSM
- tristate "Qualcomm SDCC Controller Support"
- depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
- help
- This provides support for the SD/MMC cell found in the
- MSM and QSD SOCs from Qualcomm. The controller also has
- support for SDIO devices.
-
config MMC_MXC
tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
depends on ARCH_MXC || PPC_MPC512x
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 711e913450f5..e3ab5b968651 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_MMC_OMAP) += omap.o
obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
-obj-$(CONFIG_MMC_MSM) += msm_sdcc.o
obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
obj-$(CONFIG_MMC_GOLDFISH) += android-goldfish.o
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 38b29265cc7c..5f5adafb253a 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -589,9 +589,11 @@ static int dw_mci_idmac_init(struct dw_mci *host)
host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
- for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
+ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
p->des3 = cpu_to_le32(host->sg_dma +
(sizeof(struct idmac_desc) * (i + 1)));
+ p->des1 = 0;
+ }
/* Set the last descriptor as the end-of-ring descriptor */
p->des3 = cpu_to_le32(host->sg_dma);
@@ -1300,7 +1302,8 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
- if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
+ if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ (mmc->caps & MMC_CAP_NONREMOVABLE))
present = 1;
else if (!IS_ERR_VALUE(gpio_cd))
present = gpio_cd;
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
deleted file mode 100644
index 90c60fd4ff6e..000000000000
--- a/drivers/mmc/host/msm_sdcc.c
+++ /dev/null
@@ -1,1474 +0,0 @@
-/*
- * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
- *
- * Copyright (C) 2007 Google Inc,
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- * Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based on mmci.c
- *
- * Author: San Mehat (san@android.com)
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/highmem.h>
-#include <linux/log2.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio.h>
-#include <linux/clk.h>
-#include <linux/scatterlist.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/debugfs.h>
-#include <linux/io.h>
-#include <linux/memory.h>
-#include <linux/gfp.h>
-#include <linux/gpio.h>
-
-#include <asm/cacheflush.h>
-#include <asm/div64.h>
-#include <asm/sizes.h>
-
-#include <linux/platform_data/mmc-msm_sdcc.h>
-#include <mach/dma.h>
-#include <mach/clk.h>
-
-#include "msm_sdcc.h"
-
-#define DRIVER_NAME "msm-sdcc"
-
-#define BUSCLK_PWRSAVE 1
-#define BUSCLK_TIMEOUT (HZ)
-static unsigned int msmsdcc_fmin = 144000;
-static unsigned int msmsdcc_fmax = 50000000;
-static unsigned int msmsdcc_4bit = 1;
-static unsigned int msmsdcc_pwrsave = 1;
-static unsigned int msmsdcc_piopoll = 1;
-static unsigned int msmsdcc_sdioirq;
-
-#define PIO_SPINMAX 30
-#define CMD_SPINMAX 20
-
-
-static inline void
-msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
-{
- WARN_ON(!host->clks_on);
-
- BUG_ON(host->curr.mrq);
-
- if (deferr) {
- mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
- } else {
- del_timer_sync(&host->busclk_timer);
- /* Need to check clks_on again in case the busclk
- * timer fired
- */
- if (host->clks_on) {
- clk_disable(host->clk);
- clk_disable(host->pclk);
- host->clks_on = 0;
- }
- }
-}
-
-static inline int
-msmsdcc_enable_clocks(struct msmsdcc_host *host)
-{
- int rc;
-
- del_timer_sync(&host->busclk_timer);
-
- if (!host->clks_on) {
- rc = clk_enable(host->pclk);
- if (rc)
- return rc;
- rc = clk_enable(host->clk);
- if (rc) {
- clk_disable(host->pclk);
- return rc;
- }
- udelay(1 + ((3 * USEC_PER_SEC) /
- (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
- host->clks_on = 1;
- }
- return 0;
-}
-
-static inline unsigned int
-msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
-{
- return readl(host->base + reg);
-}
-
-static inline void
-msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
-{
- writel(data, host->base + reg);
- /* 3 clk delay required! */
- udelay(1 + ((3 * USEC_PER_SEC) /
- (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
-}
-
-static void
-msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
- u32 c);
-
-static void msmsdcc_reset_and_restore(struct msmsdcc_host *host)
-{
- u32 mci_clk = 0;
- u32 mci_mask0 = 0;
- int ret = 0;
-
- /* Save the controller state */
- mci_clk = readl(host->base + MMCICLOCK);
- mci_mask0 = readl(host->base + MMCIMASK0);
-
- /* Reset the controller */
- ret = clk_reset(host->clk, CLK_RESET_ASSERT);
- if (ret)
- pr_err("%s: Clock assert failed at %u Hz with err %d\n",
- mmc_hostname(host->mmc), host->clk_rate, ret);
-
- ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
- if (ret)
- pr_err("%s: Clock deassert failed at %u Hz with err %d\n",
- mmc_hostname(host->mmc), host->clk_rate, ret);
-
- pr_info("%s: Controller has been re-initialiazed\n",
- mmc_hostname(host->mmc));
-
- /* Restore the contoller state */
- writel(host->pwr, host->base + MMCIPOWER);
- writel(mci_clk, host->base + MMCICLOCK);
- writel(mci_mask0, host->base + MMCIMASK0);
- ret = clk_set_rate(host->clk, host->clk_rate);
- if (ret)
- pr_err("%s: Failed to set clk rate %u Hz (%d)\n",
- mmc_hostname(host->mmc), host->clk_rate, ret);
-}
-
-static void
-msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
-{
- BUG_ON(host->curr.data);
-
- host->curr.mrq = NULL;
- host->curr.cmd = NULL;
-
- if (mrq->data)
- mrq->data->bytes_xfered = host->curr.data_xfered;
- if (mrq->cmd->error == -ETIMEDOUT)
- mdelay(5);
-
-#if BUSCLK_PWRSAVE
- msmsdcc_disable_clocks(host, 1);
-#endif
- /*
- * Need to drop the host lock here; mmc_request_done may call
- * back into the driver...
- */
- spin_unlock(&host->lock);
- mmc_request_done(host->mmc, mrq);
- spin_lock(&host->lock);
-}
-
-static void
-msmsdcc_stop_data(struct msmsdcc_host *host)
-{
- host->curr.data = NULL;
- host->curr.got_dataend = 0;
-}
-
-uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
-{
- return host->memres->start + MMCIFIFO;
-}
-
-static inline void
-msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
- msmsdcc_writel(host, arg, MMCIARGUMENT);
- msmsdcc_writel(host, c, MMCICOMMAND);
-}
-
-static void
-msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
-{
- struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
-
- msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
- msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,
- MMCIDATALENGTH);
- msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) &
- (~MCI_IRQ_PIO)) | host->cmd_pio_irqmask, MMCIMASK0);
- msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
-
- if (host->cmd_cmd) {
- msmsdcc_start_command_exec(host,
- (u32) host->cmd_cmd->arg,
- (u32) host->cmd_c);
- }
- host->dma.active = 1;
-}
-
-static void
-msmsdcc_dma_complete_tlet(unsigned long data)
-{
- struct msmsdcc_host *host = (struct msmsdcc_host *)data;
- unsigned long flags;
- struct mmc_request *mrq;
- struct msm_dmov_errdata err;
-
- spin_lock_irqsave(&host->lock, flags);
- host->dma.active = 0;
-
- err = host->dma.err;
- mrq = host->curr.mrq;
- BUG_ON(!mrq);
- WARN_ON(!mrq->data);
-
- if (!(host->dma.result & DMOV_RSLT_VALID)) {
- pr_err("msmsdcc: Invalid DataMover result\n");
- goto out;
- }
-
- if (host->dma.result & DMOV_RSLT_DONE) {
- host->curr.data_xfered = host->curr.xfer_size;
- } else {
- /* Error or flush */
- if (host->dma.result & DMOV_RSLT_ERROR)
- pr_err("%s: DMA error (0x%.8x)\n",
- mmc_hostname(host->mmc), host->dma.result);
- if (host->dma.result & DMOV_RSLT_FLUSH)
- pr_err("%s: DMA channel flushed (0x%.8x)\n",
- mmc_hostname(host->mmc), host->dma.result);
-
- pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
- err.flush[0], err.flush[1], err.flush[2],
- err.flush[3], err.flush[4], err.flush[5]);
-
- msmsdcc_reset_and_restore(host);
- if (!mrq->data->error)
- mrq->data->error = -EIO;
- }
- dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
- host->dma.dir);
-
- host->dma.sg = NULL;
- host->dma.busy = 0;
-
- if (host->curr.got_dataend || mrq->data->error) {
-
- /*
- * If we've already gotten our DATAEND / DATABLKEND
- * for this request, then complete it through here.
- */
- msmsdcc_stop_data(host);
-
- if (!mrq->data->error)
- host->curr.data_xfered = host->curr.xfer_size;
- if (!mrq->data->stop || mrq->cmd->error) {
- host->curr.mrq = NULL;
- host->curr.cmd = NULL;
- mrq->data->bytes_xfered = host->curr.data_xfered;
-
- spin_unlock_irqrestore(&host->lock, flags);
-#if BUSCLK_PWRSAVE
- msmsdcc_disable_clocks(host, 1);
-#endif
- mmc_request_done(host->mmc, mrq);
- return;
- } else
- msmsdcc_start_command(host, mrq->data->stop, 0);
- }
-
-out:
- spin_unlock_irqrestore(&host->lock, flags);
- return;
-}
-
-static void
-msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
- unsigned int result,
- struct msm_dmov_errdata *err)
-{
- struct msmsdcc_dma_data *dma_data =
- container_of(cmd, struct msmsdcc_dma_data, hdr);
- struct msmsdcc_host *host = dma_data->host;
-
- dma_data->result = result;
- if (err)
- memcpy(&dma_data->err, err, sizeof(struct msm_dmov_errdata));
-
- tasklet_schedule(&host->dma_tlet);
-}
-
-static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
-{
- if (host->dma.channel == -1)
- return -ENOENT;
-
- if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
- return -EINVAL;
- if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
- return -EINVAL;
- return 0;
-}
-
-static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
-{
- struct msmsdcc_nc_dmadata *nc;
- dmov_box *box;
- uint32_t rows;
- uint32_t crci;
- unsigned int n;
- int i, rc;
- struct scatterlist *sg = data->sg;
-
- rc = validate_dma(host, data);
- if (rc)
- return rc;
-
- host->dma.sg = data->sg;
- host->dma.num_ents = data->sg_len;
-
- BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
-
- nc = host->dma.nc;
-
- switch (host->pdev_id) {
- case 1:
- crci = MSMSDCC_CRCI_SDC1;
- break;
- case 2:
- crci = MSMSDCC_CRCI_SDC2;
- break;
- case 3:
- crci = MSMSDCC_CRCI_SDC3;
- break;
- case 4:
- crci = MSMSDCC_CRCI_SDC4;
- break;
- default:
- host->dma.sg = NULL;
- host->dma.num_ents = 0;
- return -ENOENT;
- }
-
- if (data->flags & MMC_DATA_READ)
- host->dma.dir = DMA_FROM_DEVICE;
- else
- host->dma.dir = DMA_TO_DEVICE;
-
- host->curr.user_pages = 0;
-
- box = &nc->cmd[0];
-
- /* location of command block must be 64 bit aligned */
- BUG_ON(host->dma.cmd_busaddr & 0x07);
-
- nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
- host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
- DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
- host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
-
- n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
- host->dma.num_ents, host->dma.dir);
- if (n == 0) {
- pr_err("%s: Unable to map in all sg elements\n",
- mmc_hostname(host->mmc));
- host->dma.sg = NULL;
- host->dma.num_ents = 0;
- return -ENOMEM;
- }
-
- for_each_sg(host->dma.sg, sg, n, i) {
-
- box->cmd = CMD_MODE_BOX;
-
- if (i == n - 1)
- box->cmd |= CMD_LC;
- rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
- (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
- (sg_dma_len(sg) / MCI_FIFOSIZE) ;
-
- if (data->flags & MMC_DATA_READ) {
- box->src_row_addr = msmsdcc_fifo_addr(host);
- box->dst_row_addr = sg_dma_address(sg);
-
- box->src_dst_len = (MCI_FIFOSIZE << 16) |
- (MCI_FIFOSIZE);
- box->row_offset = MCI_FIFOSIZE;
-
- box->num_rows = rows * ((1 << 16) + 1);
- box->cmd |= CMD_SRC_CRCI(crci);
- } else {
- box->src_row_addr = sg_dma_address(sg);
- box->dst_row_addr = msmsdcc_fifo_addr(host);
-
- box->src_dst_len = (MCI_FIFOSIZE << 16) |
- (MCI_FIFOSIZE);
- box->row_offset = (MCI_FIFOSIZE << 16);
-
- box->num_rows = rows * ((1 << 16) + 1);
- box->cmd |= CMD_DST_CRCI(crci);
- }
- box++;
- }
-
- return 0;
-}
-
-static int
-snoop_cccr_abort(struct mmc_command *cmd)
-{
- if ((cmd->opcode == 52) &&
- (cmd->arg & 0x80000000) &&
- (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
- return 1;
- return 0;
-}
-
-static void
-msmsdcc_start_command_deferred(struct msmsdcc_host *host,
- struct mmc_command *cmd, u32 *c)
-{
- *c |= (cmd->opcode | MCI_CPSM_ENABLE);
-
- if (cmd->flags & MMC_RSP_PRESENT) {
- if (cmd->flags & MMC_RSP_136)
- *c |= MCI_CPSM_LONGRSP;
- *c |= MCI_CPSM_RESPONSE;
- }
-
- if (/*interrupt*/0)
- *c |= MCI_CPSM_INTERRUPT;
-
- if ((((cmd->opcode == 17) || (cmd->opcode == 18)) ||
- ((cmd->opcode == 24) || (cmd->opcode == 25))) ||
- (cmd->opcode == 53))
- *c |= MCI_CSPM_DATCMD;
-
- if (host->prog_scan && (cmd->opcode == 12)) {
- *c |= MCI_CPSM_PROGENA;
- host->prog_enable = true;
- }
-
- if (cmd == cmd->mrq->stop)
- *c |= MCI_CSPM_MCIABORT;
-
- if (snoop_cccr_abort(cmd))
- *c |= MCI_CSPM_MCIABORT;
-
- if (host->curr.cmd != NULL) {
- pr_err("%s: Overlapping command requests\n",
- mmc_hostname(host->mmc));
- }
- host->curr.cmd = cmd;
-}
-
-static void
-msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
- struct mmc_command *cmd, u32 c)
-{
- unsigned int datactrl, timeout;
- unsigned long long clks;
- unsigned int pio_irqmask = 0;
-
- host->curr.data = data;
- host->curr.xfer_size = data->blksz * data->blocks;
- host->curr.xfer_remain = host->curr.xfer_size;
- host->curr.data_xfered = 0;
- host->curr.got_dataend = 0;
-
- memset(&host->pio, 0, sizeof(host->pio));
-
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
-
- if (!msmsdcc_config_dma(host, data))
- datactrl |= MCI_DPSM_DMAENABLE;
- else {
- host->pio.sg = data->sg;
- host->pio.sg_len = data->sg_len;
- host->pio.sg_off = 0;
-
- if (data->flags & MMC_DATA_READ) {
- pio_irqmask = MCI_RXFIFOHALFFULLMASK;
- if (host->curr.xfer_remain < MCI_FIFOSIZE)
- pio_irqmask |= MCI_RXDATAAVLBLMASK;
- } else
- pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
- }
-
- if (data->flags & MMC_DATA_READ)
- datactrl |= MCI_DPSM_DIRECTION;
-
- clks = (unsigned long long)data->timeout_ns * host->clk_rate;
- do_div(clks, NSEC_PER_SEC);
- timeout = data->timeout_clks + (unsigned int)clks*2 ;
-
- if (datactrl & MCI_DPSM_DMAENABLE) {
- /* Save parameters for the exec function */
- host->cmd_timeout = timeout;
- host->cmd_pio_irqmask = pio_irqmask;
- host->cmd_datactrl = datactrl;
- host->cmd_cmd = cmd;
-
- host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
- host->dma.hdr.data = (void *)host;
- host->dma.busy = 1;
-
- if (cmd) {
- msmsdcc_start_command_deferred(host, cmd, &c);
- host->cmd_c = c;
- }
- msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
- if (data->flags & MMC_DATA_WRITE)
- host->prog_scan = true;
- } else {
- msmsdcc_writel(host, timeout, MMCIDATATIMER);
-
- msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
-
- msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) &
- (~MCI_IRQ_PIO)) | pio_irqmask, MMCIMASK0);
-
- msmsdcc_writel(host, datactrl, MMCIDATACTRL);
-
- if (cmd) {
- /* Daisy-chain the command if requested */
- msmsdcc_start_command(host, cmd, c);
- }
- }
-}
-
-static void
-msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
-{
- if (cmd == cmd->mrq->stop)
- c |= MCI_CSPM_MCIABORT;
-
- host->stats.cmds++;
-
- msmsdcc_start_command_deferred(host, cmd, &c);
- msmsdcc_start_command_exec(host, cmd->arg, c);
-}
-
-static void
-msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
- unsigned int status)
-{
- if (status & MCI_DATACRCFAIL) {
- pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
- pr_err("%s: opcode 0x%.8x\n", __func__,
- data->mrq->cmd->opcode);
- pr_err("%s: blksz %d, blocks %d\n", __func__,
- data->blksz, data->blocks);
- data->error = -EILSEQ;
- } else if (status & MCI_DATATIMEOUT) {
- pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
- data->error = -ETIMEDOUT;
- } else if (status & MCI_RXOVERRUN) {
- pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
- data->error = -EIO;
- } else if (status & MCI_TXUNDERRUN) {
- pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
- data->error = -EIO;
- } else {
- pr_err("%s: Unknown error (0x%.8x)\n",
- mmc_hostname(host->mmc), status);
- data->error = -EIO;
- }
-}
-
-
-static int
-msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
-{
- uint32_t *ptr = (uint32_t *) buffer;
- int count = 0;
-
- if (remain % 4)
- remain = ((remain >> 2) + 1) << 2;
-
- while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
- *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
- ptr++;
- count += sizeof(uint32_t);
-
- remain -= sizeof(uint32_t);
- if (remain == 0)
- break;
- }
- return count;
-}
-
-static int
-msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
- unsigned int remain, u32 status)
-{
- void __iomem *base = host->base;
- char *ptr = buffer;
-
- do {
- unsigned int count, maxcnt, sz;
-
- maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
- MCI_FIFOHALFSIZE;
- count = min(remain, maxcnt);
-
- sz = count % 4 ? (count >> 2) + 1 : (count >> 2);
- writesl(base + MMCIFIFO, ptr, sz);
- ptr += count;
- remain -= count;
-
- if (remain == 0)
- break;
-
- status = msmsdcc_readl(host, MMCISTATUS);
- } while (status & MCI_TXFIFOHALFEMPTY);
-
- return ptr - buffer;
-}
-
-static int
-msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
-{
- while (maxspin) {
- if ((msmsdcc_readl(host, MMCISTATUS) & mask))
- return 0;
- udelay(1);
- --maxspin;
- }
- return -ETIMEDOUT;
-}
-
-static irqreturn_t
-msmsdcc_pio_irq(int irq, void *dev_id)
-{
- struct msmsdcc_host *host = dev_id;
- uint32_t status;
- u32 mci_mask0;
-
- status = msmsdcc_readl(host, MMCISTATUS);
- mci_mask0 = msmsdcc_readl(host, MMCIMASK0);
-
- if (((mci_mask0 & status) & MCI_IRQ_PIO) == 0)
- return IRQ_NONE;
-
- do {
- unsigned long flags;
- unsigned int remain, len;
- char *buffer;
-
- if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
- if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
- break;
-
- if (msmsdcc_spin_on_status(host,
- (MCI_TXFIFOHALFEMPTY |
- MCI_RXDATAAVLBL),
- PIO_SPINMAX)) {
- break;
- }
- }
-
- /* Map the current scatter buffer */
- local_irq_save(flags);
- buffer = kmap_atomic(sg_page(host->pio.sg))
- + host->pio.sg->offset;
- buffer += host->pio.sg_off;
- remain = host->pio.sg->length - host->pio.sg_off;
- len = 0;
- if (status & MCI_RXACTIVE)
- len = msmsdcc_pio_read(host, buffer, remain);
- if (status & MCI_TXACTIVE)
- len = msmsdcc_pio_write(host, buffer, remain, status);
-
- /* Unmap the buffer */
- kunmap_atomic(buffer);
- local_irq_restore(flags);
-
- host->pio.sg_off += len;
- host->curr.xfer_remain -= len;
- host->curr.data_xfered += len;
- remain -= len;
-
- if (remain == 0) {
- /* This sg page is full - do some housekeeping */
- if (status & MCI_RXACTIVE && host->curr.user_pages)
- flush_dcache_page(sg_page(host->pio.sg));
-
- if (!--host->pio.sg_len) {
- memset(&host->pio, 0, sizeof(host->pio));
- break;
- }
-
- /* Advance to next sg */
- host->pio.sg++;
- host->pio.sg_off = 0;
- }
-
- status = msmsdcc_readl(host, MMCISTATUS);
- } while (1);
-
- if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
- msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) |
- MCI_RXDATAAVLBLMASK, MMCIMASK0);
-
- if (!host->curr.xfer_remain)
- msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) | 0,
- MMCIMASK0);
-
- return IRQ_HANDLED;
-}
-
-static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
-{
- struct mmc_command *cmd = host->curr.cmd;
-
- host->curr.cmd = NULL;
- cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
- cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
- cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
- cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
-
- if (status & MCI_CMDTIMEOUT) {
- cmd->error = -ETIMEDOUT;
- } else if (status & MCI_CMDCRCFAIL &&
- cmd->flags & MMC_RSP_CRC) {
- pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
- cmd->error = -EILSEQ;
- }
-
- if (!cmd->data || cmd->error) {
- if (host->curr.data && host->dma.sg)
- msm_dmov_stop_cmd(host->dma.channel,
- &host->dma.hdr, 0);
- else if (host->curr.data) { /* Non DMA */
- msmsdcc_reset_and_restore(host);
- msmsdcc_stop_data(host);
- msmsdcc_request_end(host, cmd->mrq);
- } else { /* host->data == NULL */
- if (!cmd->error && host->prog_enable) {
- if (status & MCI_PROGDONE) {
- host->prog_scan = false;
- host->prog_enable = false;
- msmsdcc_request_end(host, cmd->mrq);
- } else {
- host->curr.cmd = cmd;
- }
- } else {
- if (host->prog_enable) {
- host->prog_scan = false;
- host->prog_enable = false;
- }
- msmsdcc_request_end(host, cmd->mrq);
- }
- }
- } else if (cmd->data)
- if (!(cmd->data->flags & MMC_DATA_READ))
- msmsdcc_start_data(host, cmd->data,
- NULL, 0);
-}
-
-static void
-msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
- void __iomem *base)
-{
- struct mmc_data *data = host->curr.data;
-
- if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
- MCI_CMDTIMEOUT | MCI_PROGDONE) && host->curr.cmd) {
- msmsdcc_do_cmdirq(host, status);
- }
-
- if (!data)
- return;
-
- /* Check for data errors */
- if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
- MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
- msmsdcc_data_err(host, data, status);
- host->curr.data_xfered = 0;
- if (host->dma.sg)
- msm_dmov_stop_cmd(host->dma.channel,
- &host->dma.hdr, 0);
- else {
- msmsdcc_reset_and_restore(host);
- if (host->curr.data)
- msmsdcc_stop_data(host);
- if (!data->stop)
- msmsdcc_request_end(host, data->mrq);
- else
- msmsdcc_start_command(host, data->stop, 0);
- }
- }
-
- /* Check for data done */
- if (!host->curr.got_dataend && (status & MCI_DATAEND))
- host->curr.got_dataend = 1;
-
- /*
- * If DMA is still in progress, we complete via the completion handler
- */
- if (host->curr.got_dataend && !host->dma.busy) {
- /*
- * There appears to be an issue in the controller where
- * if you request a small block transfer (< fifo size),
- * you may get your DATAEND/DATABLKEND irq without the
- * PIO data irq.
- *
- * Check to see if there is still data to be read,
- * and simulate a PIO irq.
- */
- if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
- msmsdcc_pio_irq(1, host);
-
- msmsdcc_stop_data(host);
- if (!data->error)
- host->curr.data_xfered = host->curr.xfer_size;
-
- if (!data->stop)
- msmsdcc_request_end(host, data->mrq);
- else
- msmsdcc_start_command(host, data->stop, 0);
- }
-}
-
-static irqreturn_t
-msmsdcc_irq(int irq, void *dev_id)
-{
- struct msmsdcc_host *host = dev_id;
- void __iomem *base = host->base;
- u32 status;
- int ret = 0;
- int cardint = 0;
-
- spin_lock(&host->lock);
-
- do {
- status = msmsdcc_readl(host, MMCISTATUS);
- status &= msmsdcc_readl(host, MMCIMASK0);
- if ((status & (~MCI_IRQ_PIO)) == 0)
- break;
- msmsdcc_writel(host, status, MMCICLEAR);
-
- if (status & MCI_SDIOINTR)
- status &= ~MCI_SDIOINTR;
-
- if (!status)
- break;
-
- msmsdcc_handle_irq_data(host, status, base);
-
- if (status & MCI_SDIOINTOPER) {
- cardint = 1;
- status &= ~MCI_SDIOINTOPER;
- }
- ret = 1;
- } while (status);
-
- spin_unlock(&host->lock);
-
- /*
- * We have to delay handling the card interrupt as it calls
- * back into the driver.
- */
- if (cardint)
- mmc_signal_sdio_irq(host->mmc);
-
- return IRQ_RETVAL(ret);
-}
-
-static void
-msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct msmsdcc_host *host = mmc_priv(mmc);
- unsigned long flags;
-
- WARN_ON(host->curr.mrq != NULL);
- WARN_ON(host->pwr == 0);
-
- spin_lock_irqsave(&host->lock, flags);
-
- host->stats.reqs++;
-
- if (host->eject) {
- if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
- mrq->cmd->error = 0;
- mrq->data->bytes_xfered = mrq->data->blksz *
- mrq->data->blocks;
- } else
- mrq->cmd->error = -ENOMEDIUM;
-
- spin_unlock_irqrestore(&host->lock, flags);
- mmc_request_done(mmc, mrq);
- return;
- }
-
- msmsdcc_enable_clocks(host);
-
- host->curr.mrq = mrq;
-
- if (mrq->data && mrq->data->flags & MMC_DATA_READ)
- /* Queue/read data, daisy-chain command when data starts */
- msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
- else
- msmsdcc_start_command(host, mrq->cmd, 0);
-
- if (host->cmdpoll && !msmsdcc_spin_on_status(host,
- MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
- CMD_SPINMAX)) {
- uint32_t status = msmsdcc_readl(host, MMCISTATUS);
- msmsdcc_do_cmdirq(host, status);
- msmsdcc_writel(host,
- MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
- MMCICLEAR);
- host->stats.cmdpoll_hits++;
- } else {
- host->stats.cmdpoll_misses++;
- }
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
-{
- struct msm_mmc_gpio_data *curr;
- int i, rc = 0;
-
- if (!host->plat->gpio_data || host->gpio_config_status == enable)
- return;
-
- curr = host->plat->gpio_data;
- for (i = 0; i < curr->size; i++) {
- if (enable) {
- rc = gpio_request(curr->gpio[i].no,
- curr->gpio[i].name);
- if (rc) {
- pr_err("%s: gpio_request(%d, %s) failed %d\n",
- mmc_hostname(host->mmc),
- curr->gpio[i].no,
- curr->gpio[i].name, rc);
- goto free_gpios;
- }
- } else {
- gpio_free(curr->gpio[i].no);
- }
- }
- host->gpio_config_status = enable;
- return;
-
-free_gpios:
- for (; i >= 0; i--)
- gpio_free(curr->gpio[i].no);
-}
-
-static void
-msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct msmsdcc_host *host = mmc_priv(mmc);
- u32 clk = 0, pwr = 0;
- int rc;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
-
- msmsdcc_enable_clocks(host);
-
- spin_unlock_irqrestore(&host->lock, flags);
-
- if (ios->clock) {
- if (ios->clock != host->clk_rate) {
- rc = clk_set_rate(host->clk, ios->clock);
- if (rc < 0)
- pr_err("%s: Error setting clock rate (%d)\n",
- mmc_hostname(host->mmc), rc);
- else
- host->clk_rate = ios->clock;
- }
- clk |= MCI_CLK_ENABLE;
- }
-
- if (ios->bus_width == MMC_BUS_WIDTH_4)
- clk |= (2 << 10); /* Set WIDEBUS */
-
- if (ios->clock > 400000 && msmsdcc_pwrsave)
- clk |= (1 << 9); /* PWRSAVE */
-
- clk |= (1 << 12); /* FLOW_ENA */
- clk |= (1 << 15); /* feedback clock */
-
- if (host->plat->translate_vdd)
- pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
-
- switch (ios->power_mode) {
- case MMC_POWER_OFF:
- msmsdcc_setup_gpio(host, false);
- break;
- case MMC_POWER_UP:
- pwr |= MCI_PWR_UP;
- msmsdcc_setup_gpio(host, true);
- break;
- case MMC_POWER_ON:
- pwr |= MCI_PWR_ON;
- break;
- }
-
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
- pwr |= MCI_OD;
-
- msmsdcc_writel(host, clk, MMCICLOCK);
-
- if (host->pwr != pwr) {
- host->pwr = pwr;
- msmsdcc_writel(host, pwr, MMCIPOWER);
- }
-#if BUSCLK_PWRSAVE
- spin_lock_irqsave(&host->lock, flags);
- msmsdcc_disable_clocks(host, 1);
- spin_unlock_irqrestore(&host->lock, flags);
-#endif
-}
-
-static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
-{
- struct msmsdcc_host *host = mmc_priv(mmc);
- unsigned long flags;
- u32 status;
-
- spin_lock_irqsave(&host->lock, flags);
- if (msmsdcc_sdioirq == 1) {
- status = msmsdcc_readl(host, MMCIMASK0);
- if (enable)
- status |= MCI_SDIOINTOPERMASK;
- else
- status &= ~MCI_SDIOINTOPERMASK;
- host->saved_irq0mask = status;
- msmsdcc_writel(host, status, MMCIMASK0);
- }
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static void msmsdcc_init_card(struct mmc_host *mmc, struct mmc_card *card)
-{
- struct msmsdcc_host *host = mmc_priv(mmc);
-
- if (host->plat->init_card)
- host->plat->init_card(card);
-}
-
-static const struct mmc_host_ops msmsdcc_ops = {
- .request = msmsdcc_request,
- .set_ios = msmsdcc_set_ios,
- .enable_sdio_irq = msmsdcc_enable_sdio_irq,
- .init_card = msmsdcc_init_card,
-};
-
-static void
-msmsdcc_check_status(unsigned long data)
-{
- struct msmsdcc_host *host = (struct msmsdcc_host *)data;
- unsigned int status;
-
- if (!host->plat->status) {
- mmc_detect_change(host->mmc, 0);
- goto out;
- }
-
- status = host->plat->status(mmc_dev(host->mmc));
- host->eject = !status;
- if (status ^ host->oldstat) {
- pr_info("%s: Slot status change detected (%d -> %d)\n",
- mmc_hostname(host->mmc), host->oldstat, status);
- if (status)
- mmc_detect_change(host->mmc, (5 * HZ) / 2);
- else
- mmc_detect_change(host->mmc, 0);
- }
-
- host->oldstat = status;
-
-out:
- if (host->timer.function)
- mod_timer(&host->timer, jiffies + HZ);
-}
-
-static irqreturn_t
-msmsdcc_platform_status_irq(int irq, void *dev_id)
-{
- struct msmsdcc_host *host = dev_id;
-
- pr_debug("%s: %d\n", __func__, irq);
- msmsdcc_check_status((unsigned long) host);
- return IRQ_HANDLED;
-}
-
-static void
-msmsdcc_status_notify_cb(int card_present, void *dev_id)
-{
- struct msmsdcc_host *host = dev_id;
-
- pr_debug("%s: card_present %d\n", mmc_hostname(host->mmc),
- card_present);
- msmsdcc_check_status((unsigned long) host);
-}
-
-static void
-msmsdcc_busclk_expired(unsigned long _data)
-{
- struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
-
- if (host->clks_on)
- msmsdcc_disable_clocks(host, 0);
-}
-
-static int
-msmsdcc_init_dma(struct msmsdcc_host *host)
-{
- memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
- host->dma.host = host;
- host->dma.channel = -1;
-
- if (!host->dmares)
- return -ENODEV;
-
- host->dma.nc = dma_alloc_coherent(NULL,
- sizeof(struct msmsdcc_nc_dmadata),
- &host->dma.nc_busaddr,
- GFP_KERNEL);
- if (host->dma.nc == NULL) {
- pr_err("Unable to allocate DMA buffer\n");
- return -ENOMEM;
- }
- memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
- host->dma.cmd_busaddr = host->dma.nc_busaddr;
- host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
- offsetof(struct msmsdcc_nc_dmadata, cmdptr);
- host->dma.channel = host->dmares->start;
-
- return 0;
-}
-
-static int
-msmsdcc_probe(struct platform_device *pdev)
-{
- struct msm_mmc_platform_data *plat = pdev->dev.platform_data;
- struct msmsdcc_host *host;
- struct mmc_host *mmc;
- struct resource *cmd_irqres = NULL;
- struct resource *stat_irqres = NULL;
- struct resource *memres = NULL;
- struct resource *dmares = NULL;
- int ret;
-
- /* must have platform data */
- if (!plat) {
- pr_err("%s: Platform data not available\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- if (pdev->id < 1 || pdev->id > 4)
- return -EINVAL;
-
- if (pdev->resource == NULL || pdev->num_resources < 2) {
- pr_err("%s: Invalid resource\n", __func__);
- return -ENXIO;
- }
-
- memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- "cmd_irq");
- stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- "status_irq");
-
- if (!cmd_irqres || !memres) {
- pr_err("%s: Invalid resource\n", __func__);
- return -ENXIO;
- }
-
- /*
- * Setup our host structure
- */
-
- mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out;
- }
-
- host = mmc_priv(mmc);
- host->pdev_id = pdev->id;
- host->plat = plat;
- host->mmc = mmc;
- host->curr.cmd = NULL;
- init_timer(&host->busclk_timer);
- host->busclk_timer.data = (unsigned long) host;
- host->busclk_timer.function = msmsdcc_busclk_expired;
-
-
- host->cmdpoll = 1;
-
- host->base = ioremap(memres->start, PAGE_SIZE);
- if (!host->base) {
- ret = -ENOMEM;
- goto host_free;
- }
-
- host->cmd_irqres = cmd_irqres;
- host->memres = memres;
- host->dmares = dmares;
- spin_lock_init(&host->lock);
-
- tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet,
- (unsigned long)host);
-
- /*
- * Setup DMA
- */
- if (host->dmares) {
- ret = msmsdcc_init_dma(host);
- if (ret)
- goto ioremap_free;
- } else {
- host->dma.channel = -1;
- }
-
- /* Get our clocks */
- host->pclk = clk_get(&pdev->dev, "sdc_pclk");
- if (IS_ERR(host->pclk)) {
- ret = PTR_ERR(host->pclk);
- goto dma_free;
- }
-
- host->clk = clk_get(&pdev->dev, "sdc_clk");
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- goto pclk_put;
- }
-
- ret = clk_set_rate(host->clk, msmsdcc_fmin);
- if (ret) {
- pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
- goto clk_put;
- }
-
- ret = clk_prepare(host->pclk);
- if (ret)
- goto clk_put;
-
- ret = clk_prepare(host->clk);
- if (ret)
- goto clk_unprepare_p;
-
- /* Enable clocks */
- ret = msmsdcc_enable_clocks(host);
- if (ret)
- goto clk_unprepare;
-
- host->pclk_rate = clk_get_rate(host->pclk);
- host->clk_rate = clk_get_rate(host->clk);
-
- /*
- * Setup MMC host structure
- */
- mmc->ops = &msmsdcc_ops;
- mmc->f_min = msmsdcc_fmin;
- mmc->f_max = msmsdcc_fmax;
- mmc->ocr_avail = plat->ocr_mask;
-
- if (msmsdcc_4bit)
- mmc->caps |= MMC_CAP_4_BIT_DATA;
- if (msmsdcc_sdioirq)
- mmc->caps |= MMC_CAP_SDIO_IRQ;
- mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
-
- mmc->max_segs = NR_SG;
- mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
- mmc->max_blk_count = 65536;
-
- mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
- mmc->max_seg_size = mmc->max_req_size;
-
- msmsdcc_writel(host, 0, MMCIMASK0);
- msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
-
- msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
- host->saved_irq0mask = MCI_IRQENABLE;
-
- /*
- * Setup card detect change
- */
-
- memset(&host->timer, 0, sizeof(host->timer));
-
- if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
- unsigned long irqflags = IRQF_SHARED |
- (stat_irqres->flags & IRQF_TRIGGER_MASK);
-
- host->stat_irq = stat_irqres->start;
- ret = request_irq(host->stat_irq,
- msmsdcc_platform_status_irq,
- irqflags,
- DRIVER_NAME " (slot)",
- host);
- if (ret) {
- pr_err("%s: Unable to get slot IRQ %d (%d)\n",
- mmc_hostname(mmc), host->stat_irq, ret);
- goto clk_disable;
- }
- } else if (plat->register_status_notify) {
- plat->register_status_notify(msmsdcc_status_notify_cb, host);
- } else if (!plat->status)
- pr_err("%s: No card detect facilities available\n",
- mmc_hostname(mmc));
- else {
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = msmsdcc_check_status;
- host->timer.expires = jiffies + HZ;
- add_timer(&host->timer);
- }
-
- if (plat->status) {
- host->oldstat = host->plat->status(mmc_dev(host->mmc));
- host->eject = !host->oldstat;
- }
-
- ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
- DRIVER_NAME " (cmd)", host);
- if (ret)
- goto stat_irq_free;
-
- ret = request_irq(cmd_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
- DRIVER_NAME " (pio)", host);
- if (ret)
- goto cmd_irq_free;
-
- platform_set_drvdata(pdev, mmc);
- mmc_add_host(mmc);
-
- pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
- mmc_hostname(mmc), (unsigned long long)memres->start,
- (unsigned int) cmd_irqres->start,
- (unsigned int) host->stat_irq, host->dma.channel);
- pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
- (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
- pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
- mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
- pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
- pr_info("%s: Power save feature enable = %d\n",
- mmc_hostname(mmc), msmsdcc_pwrsave);
-
- if (host->dma.channel != -1) {
- pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
- mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
- pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
- mmc_hostname(mmc), host->dma.cmd_busaddr,
- host->dma.cmdptr_busaddr);
- } else
- pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
- if (host->timer.function)
- pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
-
- return 0;
- cmd_irq_free:
- free_irq(cmd_irqres->start, host);
- stat_irq_free:
- if (host->stat_irq)
- free_irq(host->stat_irq, host);
- clk_disable:
- msmsdcc_disable_clocks(host, 0);
- clk_unprepare:
- clk_unprepare(host->clk);
- clk_unprepare_p:
- clk_unprepare(host->pclk);
- clk_put:
- clk_put(host->clk);
- pclk_put:
- clk_put(host->pclk);
-dma_free:
- if (host->dmares)
- dma_free_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata),
- host->dma.nc, host->dma.nc_busaddr);
-ioremap_free:
- tasklet_kill(&host->dma_tlet);
- iounmap(host->base);
- host_free:
- mmc_free_host(mmc);
- out:
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int
-msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
-
- if (mmc) {
- struct msmsdcc_host *host = mmc_priv(mmc);
-
- if (host->stat_irq)
- disable_irq(host->stat_irq);
-
- msmsdcc_writel(host, 0, MMCIMASK0);
- if (host->clks_on)
- msmsdcc_disable_clocks(host, 0);
- }
- return 0;
-}
-
-static int
-msmsdcc_resume(struct platform_device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
-
- if (mmc) {
- struct msmsdcc_host *host = mmc_priv(mmc);
-
- msmsdcc_enable_clocks(host);
-
- msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
-
- if (host->stat_irq)
- enable_irq(host->stat_irq);
-#if BUSCLK_PWRSAVE
- msmsdcc_disable_clocks(host, 1);
-#endif
- }
- return 0;
-}
-#else
-#define msmsdcc_suspend 0
-#define msmsdcc_resume 0
-#endif
-
-static struct platform_driver msmsdcc_driver = {
- .probe = msmsdcc_probe,
- .suspend = msmsdcc_suspend,
- .resume = msmsdcc_resume,
- .driver = {
- .name = "msm_sdcc",
- },
-};
-
-module_platform_driver(msmsdcc_driver);
-
-MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
deleted file mode 100644
index 402028d16b86..000000000000
--- a/drivers/mmc/host/msm_sdcc.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * linux/drivers/mmc/host/msmsdcc.h - QCT MSM7K SDC Controller
- *
- * Copyright (C) 2008 Google, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * - Based on mmci.h
- */
-
-#ifndef _MSM_SDCC_H
-#define _MSM_SDCC_H
-
-#define MSMSDCC_CRCI_SDC1 6
-#define MSMSDCC_CRCI_SDC2 7
-#define MSMSDCC_CRCI_SDC3 12
-#define MSMSDCC_CRCI_SDC4 13
-
-#define MMCIPOWER 0x000
-#define MCI_PWR_OFF 0x00
-#define MCI_PWR_UP 0x02
-#define MCI_PWR_ON 0x03
-#define MCI_OD (1 << 6)
-
-#define MMCICLOCK 0x004
-#define MCI_CLK_ENABLE (1 << 8)
-#define MCI_CLK_PWRSAVE (1 << 9)
-#define MCI_CLK_WIDEBUS (1 << 10)
-#define MCI_CLK_FLOWENA (1 << 12)
-#define MCI_CLK_INVERTOUT (1 << 13)
-#define MCI_CLK_SELECTIN (1 << 14)
-
-#define MMCIARGUMENT 0x008
-#define MMCICOMMAND 0x00c
-#define MCI_CPSM_RESPONSE (1 << 6)
-#define MCI_CPSM_LONGRSP (1 << 7)
-#define MCI_CPSM_INTERRUPT (1 << 8)
-#define MCI_CPSM_PENDING (1 << 9)
-#define MCI_CPSM_ENABLE (1 << 10)
-#define MCI_CPSM_PROGENA (1 << 11)
-#define MCI_CSPM_DATCMD (1 << 12)
-#define MCI_CSPM_MCIABORT (1 << 13)
-#define MCI_CSPM_CCSENABLE (1 << 14)
-#define MCI_CSPM_CCSDISABLE (1 << 15)
-
-
-#define MMCIRESPCMD 0x010
-#define MMCIRESPONSE0 0x014
-#define MMCIRESPONSE1 0x018
-#define MMCIRESPONSE2 0x01c
-#define MMCIRESPONSE3 0x020
-#define MMCIDATATIMER 0x024
-#define MMCIDATALENGTH 0x028
-
-#define MMCIDATACTRL 0x02c
-#define MCI_DPSM_ENABLE (1 << 0)
-#define MCI_DPSM_DIRECTION (1 << 1)
-#define MCI_DPSM_MODE (1 << 2)
-#define MCI_DPSM_DMAENABLE (1 << 3)
-
-#define MMCIDATACNT 0x030
-#define MMCISTATUS 0x034
-#define MCI_CMDCRCFAIL (1 << 0)
-#define MCI_DATACRCFAIL (1 << 1)
-#define MCI_CMDTIMEOUT (1 << 2)
-#define MCI_DATATIMEOUT (1 << 3)
-#define MCI_TXUNDERRUN (1 << 4)
-#define MCI_RXOVERRUN (1 << 5)
-#define MCI_CMDRESPEND (1 << 6)
-#define MCI_CMDSENT (1 << 7)
-#define MCI_DATAEND (1 << 8)
-#define MCI_DATABLOCKEND (1 << 10)
-#define MCI_CMDACTIVE (1 << 11)
-#define MCI_TXACTIVE (1 << 12)
-#define MCI_RXACTIVE (1 << 13)
-#define MCI_TXFIFOHALFEMPTY (1 << 14)
-#define MCI_RXFIFOHALFFULL (1 << 15)
-#define MCI_TXFIFOFULL (1 << 16)
-#define MCI_RXFIFOFULL (1 << 17)
-#define MCI_TXFIFOEMPTY (1 << 18)
-#define MCI_RXFIFOEMPTY (1 << 19)
-#define MCI_TXDATAAVLBL (1 << 20)
-#define MCI_RXDATAAVLBL (1 << 21)
-#define MCI_SDIOINTR (1 << 22)
-#define MCI_PROGDONE (1 << 23)
-#define MCI_ATACMDCOMPL (1 << 24)
-#define MCI_SDIOINTOPER (1 << 25)
-#define MCI_CCSTIMEOUT (1 << 26)
-
-#define MMCICLEAR 0x038
-#define MCI_CMDCRCFAILCLR (1 << 0)
-#define MCI_DATACRCFAILCLR (1 << 1)
-#define MCI_CMDTIMEOUTCLR (1 << 2)
-#define MCI_DATATIMEOUTCLR (1 << 3)
-#define MCI_TXUNDERRUNCLR (1 << 4)
-#define MCI_RXOVERRUNCLR (1 << 5)
-#define MCI_CMDRESPENDCLR (1 << 6)
-#define MCI_CMDSENTCLR (1 << 7)
-#define MCI_DATAENDCLR (1 << 8)
-#define MCI_DATABLOCKENDCLR (1 << 10)
-
-#define MMCIMASK0 0x03c
-#define MCI_CMDCRCFAILMASK (1 << 0)
-#define MCI_DATACRCFAILMASK (1 << 1)
-#define MCI_CMDTIMEOUTMASK (1 << 2)
-#define MCI_DATATIMEOUTMASK (1 << 3)
-#define MCI_TXUNDERRUNMASK (1 << 4)
-#define MCI_RXOVERRUNMASK (1 << 5)
-#define MCI_CMDRESPENDMASK (1 << 6)
-#define MCI_CMDSENTMASK (1 << 7)
-#define MCI_DATAENDMASK (1 << 8)
-#define MCI_DATABLOCKENDMASK (1 << 10)
-#define MCI_CMDACTIVEMASK (1 << 11)
-#define MCI_TXACTIVEMASK (1 << 12)
-#define MCI_RXACTIVEMASK (1 << 13)
-#define MCI_TXFIFOHALFEMPTYMASK (1 << 14)
-#define MCI_RXFIFOHALFFULLMASK (1 << 15)
-#define MCI_TXFIFOFULLMASK (1 << 16)
-#define MCI_RXFIFOFULLMASK (1 << 17)
-#define MCI_TXFIFOEMPTYMASK (1 << 18)
-#define MCI_RXFIFOEMPTYMASK (1 << 19)
-#define MCI_TXDATAAVLBLMASK (1 << 20)
-#define MCI_RXDATAAVLBLMASK (1 << 21)
-#define MCI_SDIOINTMASK (1 << 22)
-#define MCI_PROGDONEMASK (1 << 23)
-#define MCI_ATACMDCOMPLMASK (1 << 24)
-#define MCI_SDIOINTOPERMASK (1 << 25)
-#define MCI_CCSTIMEOUTMASK (1 << 26)
-
-#define MMCIMASK1 0x040
-#define MMCIFIFOCNT 0x044
-#define MCICCSTIMER 0x058
-
-#define MMCIFIFO 0x080 /* to 0x0bc */
-
-#define MCI_IRQENABLE \
- (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
- MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
- MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK)
-
-#define MCI_IRQ_PIO \
- (MCI_RXDATAAVLBLMASK | MCI_TXDATAAVLBLMASK | MCI_RXFIFOEMPTYMASK | \
- MCI_TXFIFOEMPTYMASK | MCI_RXFIFOFULLMASK | MCI_TXFIFOFULLMASK | \
- MCI_RXFIFOHALFFULLMASK | MCI_TXFIFOHALFEMPTYMASK | \
- MCI_RXACTIVEMASK | MCI_TXACTIVEMASK)
-/*
- * The size of the FIFO in bytes.
- */
-#define MCI_FIFOSIZE (16*4)
-
-#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
-
-#define NR_SG 32
-
-struct clk;
-
-struct msmsdcc_nc_dmadata {
- dmov_box cmd[NR_SG];
- uint32_t cmdptr;
-};
-
-struct msmsdcc_dma_data {
- struct msmsdcc_nc_dmadata *nc;
- dma_addr_t nc_busaddr;
- dma_addr_t cmd_busaddr;
- dma_addr_t cmdptr_busaddr;
-
- struct msm_dmov_cmd hdr;
- enum dma_data_direction dir;
-
- struct scatterlist *sg;
- int num_ents;
-
- int channel;
- struct msmsdcc_host *host;
- int busy; /* Set if DM is busy */
- int active;
- unsigned int result;
- struct msm_dmov_errdata err;
-};
-
-struct msmsdcc_pio_data {
- struct scatterlist *sg;
- unsigned int sg_len;
- unsigned int sg_off;
-};
-
-struct msmsdcc_curr_req {
- struct mmc_request *mrq;
- struct mmc_command *cmd;
- struct mmc_data *data;
- unsigned int xfer_size; /* Total data size */
- unsigned int xfer_remain; /* Bytes remaining to send */
- unsigned int data_xfered; /* Bytes acked by BLKEND irq */
- int got_dataend;
- int user_pages;
-};
-
-struct msmsdcc_stats {
- unsigned int reqs;
- unsigned int cmds;
- unsigned int cmdpoll_hits;
- unsigned int cmdpoll_misses;
-};
-
-struct msmsdcc_host {
- struct resource *cmd_irqres;
- struct resource *memres;
- struct resource *dmares;
- void __iomem *base;
- int pdev_id;
- unsigned int stat_irq;
-
- struct msmsdcc_curr_req curr;
-
- struct mmc_host *mmc;
- struct clk *clk; /* main MMC bus clock */
- struct clk *pclk; /* SDCC peripheral bus clock */
- unsigned int clks_on; /* set if clocks are enabled */
- struct timer_list busclk_timer;
-
- unsigned int eject; /* eject state */
-
- spinlock_t lock;
-
- unsigned int clk_rate; /* Current clock rate */
- unsigned int pclk_rate;
-
- u32 pwr;
- u32 saved_irq0mask; /* MMCIMASK0 reg value */
- struct msm_mmc_platform_data *plat;
-
- struct timer_list timer;
- unsigned int oldstat;
-
- struct msmsdcc_dma_data dma;
- struct msmsdcc_pio_data pio;
- int cmdpoll;
- struct msmsdcc_stats stats;
-
- struct tasklet_struct dma_tlet;
- /* Command parameters */
- unsigned int cmd_timeout;
- unsigned int cmd_pio_irqmask;
- unsigned int cmd_datactrl;
- struct mmc_command *cmd_cmd;
- u32 cmd_c;
- bool gpio_config_status;
-
- bool prog_scan;
- bool prog_enable;
-};
-
-#endif
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 5316d9b9e7b4..317d709f7550 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -281,7 +281,7 @@ static inline void buffer_swap32(u32 *buf, int len)
int i;
for (i = 0; i < ((len + 3) / 4); i++) {
- st_le32(buf, *buf);
+ *buf = swab32(*buf);
buf++;
}
}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 072f67066df3..7eff087cf515 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -388,7 +388,7 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
{
struct dma_slave_config cfg = { 0, };
struct dma_chan *chan;
- unsigned int slave_id;
+ void *slave_data = NULL;
struct resource *res;
dma_cap_mask_t mask;
int ret;
@@ -397,13 +397,12 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
dma_cap_set(DMA_SLAVE, mask);
if (pdata)
- slave_id = direction == DMA_MEM_TO_DEV
- ? pdata->slave_id_tx : pdata->slave_id_rx;
- else
- slave_id = 0;
+ slave_data = direction == DMA_MEM_TO_DEV ?
+ (void *)pdata->slave_id_tx :
+ (void *)pdata->slave_id_rx;
chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- (void *)(unsigned long)slave_id, &host->pd->dev,
+ slave_data, &host->pd->dev,
direction == DMA_MEM_TO_DEV ? "tx" : "rx");
dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
@@ -414,8 +413,6 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
- /* In the OF case the driver will get the slave ID from the DT */
- cfg.slave_id = slave_id;
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
@@ -1411,7 +1408,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->addr = reg;
- host->timeout = msecs_to_jiffies(1000);
+ host->timeout = msecs_to_jiffies(10000);
host->ccs_enable = !pd || !pd->ccs_unsupported;
host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 6906a905cd54..354f4f335ed5 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -201,7 +201,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
of_match_device(sh_mobile_sdhi_of_match, &pdev->dev);
struct sh_mobile_sdhi *priv;
struct tmio_mmc_data *mmc_data;
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
+ struct tmio_mmc_data *mmd = pdev->dev.platform_data;
struct tmio_mmc_host *host;
struct resource *res;
int irq, ret, i = 0;
@@ -245,30 +245,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
else
host->bus_shift = 0;
- mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
- if (p) {
- mmc_data->flags = p->tmio_flags;
- mmc_data->ocr_mask = p->tmio_ocr_mask;
- mmc_data->capabilities |= p->tmio_caps;
- mmc_data->capabilities2 |= p->tmio_caps2;
- mmc_data->cd_gpio = p->cd_gpio;
-
- if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
- /*
- * Yes, we have to provide slave IDs twice to TMIO:
- * once as a filter parameter and once for channel
- * configuration as an explicit slave ID
- */
- dma_priv->chan_priv_tx = (void *)p->dma_slave_tx;
- dma_priv->chan_priv_rx = (void *)p->dma_slave_rx;
- dma_priv->slave_id_tx = p->dma_slave_tx;
- dma_priv->slave_id_rx = p->dma_slave_rx;
- }
- }
+ if (mmd)
+ *mmc_data = *mmd;
+
dma_priv->filter = shdma_chan_filter;
dma_priv->enable = sh_mobile_sdhi_enable_dma;
mmc_data->alignment_shift = 1; /* 2-byte alignment */
+ mmc_data->capabilities |= MMC_CAP_MMC_HIGHSPEED;
/*
* All SDHI blocks support 2-byte and larger block sizes in 4-bit
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index fc3805ed69d1..4a597f5a53e2 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -43,10 +43,6 @@ struct tmio_mmc_data;
struct tmio_mmc_host;
struct tmio_mmc_dma {
- void *chan_priv_tx;
- void *chan_priv_rx;
- int slave_id_tx;
- int slave_id_rx;
enum dma_slave_buswidth dma_buswidth;
bool (*filter)(struct dma_chan *chan, void *arg);
void (*enable)(struct tmio_mmc_host *host, bool enable);
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 331bb618e398..e4b05dbb9ca8 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -261,7 +261,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
{
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (!host->dma || (!host->pdev->dev.of_node &&
- (!host->dma->chan_priv_tx || !host->dma->chan_priv_rx)))
+ (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
return;
if (!host->chan_tx && !host->chan_rx) {
@@ -278,7 +278,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_slave_channel_compat(mask,
- host->dma->filter, host->dma->chan_priv_tx,
+ host->dma->filter, pdata->chan_priv_tx,
&host->pdev->dev, "tx");
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
@@ -286,8 +286,6 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (!host->chan_tx)
return;
- if (host->dma->chan_priv_tx)
- cfg.slave_id = host->dma->slave_id_tx;
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
cfg.dst_addr_width = host->dma->dma_buswidth;
@@ -299,7 +297,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
goto ecfgtx;
host->chan_rx = dma_request_slave_channel_compat(mask,
- host->dma->filter, host->dma->chan_priv_rx,
+ host->dma->filter, pdata->chan_priv_rx,
&host->pdev->dev, "rx");
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
@@ -307,8 +305,6 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (!host->chan_rx)
goto ereqrx;
- if (host->dma->chan_priv_rx)
- cfg.slave_id = host->dma->slave_id_rx;
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
cfg.src_addr_width = host->dma->dma_buswidth;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 71fea895ce38..a03ad2951c7b 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -309,6 +309,19 @@ config MTD_SWAP
The driver provides wear leveling by storing erase counter into the
OOB.
+config MTD_PARTITIONED_MASTER
+ bool "Retain master device when partitioned"
+ default n
+ depends on MTD
+ help
+ For historical reasons, by default, either a master is present or
+ several partitions are present, but not both. The concern was that
+ data listed in multiple partitions was dangerous; however, SCSI does
+ this and it is frequently useful for applications. This config option
+ leaves the master in even if the device is partitioned. It also makes
+ the parent of the partition device be the master device, rather than
+ what lies behind the master.
+
source "drivers/mtd/chips/Kconfig"
source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 423666b51efb..9a1a6ffd16b8 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -206,23 +206,23 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
}
offset += (ersize * ernum);
- }
+ }
- if (offset != devsize) {
- /* Argh */
- printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
- kfree(mtd->eraseregions);
- kfree(cfi->cmdset_priv);
- kfree(mtd);
- return NULL;
- }
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ kfree(mtd->eraseregions);
+ kfree(cfi->cmdset_priv);
+ kfree(mtd);
+ return NULL;
+ }
- for (i=0; i<mtd->numeraseregions;i++){
- printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
- i, (unsigned long long)mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].numblocks);
- }
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
+ i, (unsigned long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+ }
/* Also select the correct geometry setup too */
mtd->_erase = cfi_staa_erase_varsize;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 66f0405f7e53..b16f3cda97ff 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -9,7 +9,15 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/*
+ * When the first attempt at device initialization fails, we may need to
+ * wait a little bit and retry. This timeout, by default 3 seconds, gives
+ * device time to start up. Required on BCM2708 and a few other chipsets.
+ */
+#define MTD_DEFAULT_TIMEOUT 3
+
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
@@ -209,10 +217,14 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
}
-static struct block2mtd_dev *add_device(char *devname, int erase_size)
+static struct block2mtd_dev *add_device(char *devname, int erase_size,
+ int timeout)
{
+#ifndef MODULE
+ int i;
+#endif
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
- struct block_device *bdev;
+ struct block_device *bdev = ERR_PTR(-ENODEV);
struct block2mtd_dev *dev;
char *name;
@@ -225,15 +237,28 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Get a handle on the device */
bdev = blkdev_get_by_path(devname, mode, dev);
-#ifndef MODULE
- if (IS_ERR(bdev)) {
-
- /* We might not have rootfs mounted at this point. Try
- to resolve the device name by other means. */
- dev_t devt = name_to_dev_t(devname);
- if (devt)
- bdev = blkdev_get_by_dev(devt, mode, dev);
+#ifndef MODULE
+ /*
+ * We might not have the root device mounted at this point.
+ * Try to resolve the device name by other means.
+ */
+ for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
+ dev_t devt;
+
+ if (i)
+ /*
+ * Calling wait_for_device_probe in the first loop
+ * was not enough, sleep for a bit in subsequent
+ * go-arounds.
+ */
+ msleep(1000);
+ wait_for_device_probe();
+
+ devt = name_to_dev_t(devname);
+ if (!devt)
+ continue;
+ bdev = blkdev_get_by_dev(devt, mode, dev);
}
#endif
@@ -280,6 +305,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Device didn't get added, so free the entry */
goto err_destroy_mutex;
}
+
list_add(&dev->list, &blkmtd_device_list);
pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
dev->mtd.index,
@@ -348,16 +374,19 @@ static inline void kill_final_newline(char *str)
#ifndef MODULE
static int block2mtd_init_called = 0;
-static char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
+/* 80 for device, 12 for erase size */
+static char block2mtd_paramline[80 + 12];
#endif
static int block2mtd_setup2(const char *val)
{
- char buf[80 + 12]; /* 80 for device, 12 for erase size */
+ /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
+ char buf[80 + 12 + 80 + 8];
char *str = buf;
char *token[2];
char *name;
size_t erase_size = PAGE_SIZE;
+ unsigned long timeout = MTD_DEFAULT_TIMEOUT;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
@@ -395,7 +424,7 @@ static int block2mtd_setup2(const char *val)
}
}
- add_device(name, erase_size);
+ add_device(name, erase_size, timeout);
return 0;
}
@@ -463,8 +492,7 @@ static void block2mtd_exit(void)
}
}
-
-module_init(block2mtd_init);
+late_initcall(block2mtd_init);
module_exit(block2mtd_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 448ce42f951e..866d31904475 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1805,7 +1805,7 @@ static int __init doc_dbg_register(struct docg3 *docg3)
}
}
-static void __exit doc_dbg_unregister(struct docg3 *docg3)
+static void doc_dbg_unregister(struct docg3 *docg3)
{
debugfs_remove_recursive(docg3->debugfs_root);
}
@@ -2033,7 +2033,7 @@ static int __init docg3_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct resource *ress;
void __iomem *base;
- int ret, floor, found = 0;
+ int ret, floor;
struct docg3_cascade *cascade;
ret = -ENXIO;
@@ -2073,14 +2073,11 @@ static int __init docg3_probe(struct platform_device *pdev)
0);
if (ret)
goto err_probe;
- found++;
}
ret = doc_register_sysfs(pdev, cascade);
if (ret)
goto err_probe;
- if (!found)
- goto notfound;
platform_set_drvdata(pdev, cascade);
doc_dbg_register(cascade->floors[0]->priv);
@@ -2103,7 +2100,7 @@ err_probe:
*
* Returns 0
*/
-static int __exit docg3_release(struct platform_device *pdev)
+static int docg3_release(struct platform_device *pdev)
{
struct docg3_cascade *cascade = platform_get_drvdata(pdev);
struct docg3 *docg3 = cascade->floors[0]->priv;
@@ -2134,7 +2131,7 @@ static struct platform_driver g3_driver = {
},
.suspend = docg3_suspend,
.resume = docg3_resume,
- .remove = __exit_p(docg3_release),
+ .remove = docg3_release,
};
module_platform_driver_probe(g3_driver, docg3_probe);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 85e35467fba6..3af137f49ac9 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -223,6 +223,8 @@ static int m25p_probe(struct spi_device *spi)
*/
if (data && data->type)
flash_name = data->type;
+ else if (!strcmp(spi->modalias, "spi-nor"))
+ flash_name = NULL; /* auto-detect */
else
flash_name = spi->modalias;
@@ -247,9 +249,16 @@ static int m25p_remove(struct spi_device *spi)
}
/*
- * XXX This needs to be kept in sync with spi_nor_ids. We can't share
- * it with spi-nor, because if this is built as a module then modpost
- * won't be able to read it and add appropriate aliases.
+ * Do NOT add to this array without reading the following:
+ *
+ * Historically, many flash devices are bound to this driver by their name. But
+ * since most of these flash are compatible to some extent, and their
+ * differences can often be differentiated by the JEDEC read-ID command, we
+ * encourage new users to add support to the spi-nor library, and simply bind
+ * against a generic string here (e.g., "jedec,spi-nor").
+ *
+ * Many flash names are kept here in this list (as well as in spi-nor.c) to
+ * keep them available as module aliases for existing platforms.
*/
static const struct spi_device_id m25p_ids[] = {
{"at25fs010"}, {"at25fs040"}, {"at25df041a"}, {"at25df321a"},
@@ -291,6 +300,12 @@ static const struct spi_device_id m25p_ids[] = {
{"w25x64"}, {"w25q64"}, {"w25q80"}, {"w25q80bl"},
{"w25q128"}, {"w25q256"}, {"cat25c11"},
{"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"},
+
+ /*
+ * Generic support for SPI NOR that can be identified by the JEDEC READ
+ * ID opcode (0x9F). Use this, if possible.
+ */
+ {"spi-nor"},
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index ba801d2c6dcc..e715ae90632f 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -242,7 +242,7 @@ config MTD_L440GX
config MTD_CFI_FLAGADM
tristate "CFI Flash device mapping on FlagaDM"
- depends on 8xx && MTD_CFI
+ depends on PPC_8xx && MTD_CFI
help
Mapping for the Flaga digital module. If you don't have one, ignore
this setting.
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index ea697202935a..892ad6ac63f2 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -274,7 +274,7 @@ static int sa1100_mtd_probe(struct platform_device *pdev)
return err;
}
-static int __exit sa1100_mtd_remove(struct platform_device *pdev)
+static int sa1100_mtd_remove(struct platform_device *pdev)
{
struct sa_info *info = platform_get_drvdata(pdev);
struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
@@ -286,7 +286,7 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
- .remove = __exit_p(sa1100_mtd_remove),
+ .remove = sa1100_mtd_remove,
.driver = {
.name = "sa1100-mtd",
},
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index d1d671daf235..9969fedb1f13 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -117,5 +117,5 @@ module_exit(cleanup_ts5500_map);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sean Young <sean@mess.org>");
-MODULE_DESCRIPTION("MTD map driver for Techology Systems TS-5500 board");
+MODULE_DESCRIPTION("MTD map driver for Technology Systems TS-5500 board");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index d08229eb44d8..2b0c52870999 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -171,9 +171,6 @@ static void mtd_blktrans_work(struct work_struct *work)
background_done = 0;
}
- if (req)
- __blk_end_request_all(req, -EIO);
-
spin_unlock_irq(rq->queue_lock);
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 11883bd26d9d..d172195fbd15 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -38,6 +38,7 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/reboot.h>
+#include <linux/kconfig.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -501,6 +502,29 @@ out_error:
return ret;
}
+static int mtd_add_device_partitions(struct mtd_info *mtd,
+ struct mtd_partition *real_parts,
+ int nbparts)
+{
+ int ret;
+
+ if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
+ ret = add_mtd_device(mtd);
+ if (ret == 1)
+ return -ENODEV;
+ }
+
+ if (nbparts > 0) {
+ ret = add_mtd_partitions(mtd, real_parts, nbparts);
+ if (ret && IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ del_mtd_device(mtd);
+ return ret;
+ }
+
+ return 0;
+}
+
+
/**
* mtd_device_parse_register - parse partitions and register an MTD device.
*
@@ -523,7 +547,8 @@ out_error:
* found this functions tries to fallback to information specified in
* @parts/@nr_parts.
* * If any partitioning info was found, this function registers the found
- * partitions.
+ * partitions. If the MTD_PARTITIONED_MASTER option is set, then the device
+ * as a whole is registered first.
* * If no partitions were found this function just registers the MTD device
* @mtd and exits.
*
@@ -534,27 +559,21 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
- int err;
- struct mtd_partition *real_parts;
+ int ret;
+ struct mtd_partition *real_parts = NULL;
- err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
- if (err <= 0 && nr_parts && parts) {
+ ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
+ if (ret <= 0 && nr_parts && parts) {
real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
GFP_KERNEL);
if (!real_parts)
- err = -ENOMEM;
+ ret = -ENOMEM;
else
- err = nr_parts;
+ ret = nr_parts;
}
- if (err > 0) {
- err = add_mtd_partitions(mtd, real_parts, err);
- kfree(real_parts);
- } else if (err == 0) {
- err = add_mtd_device(mtd);
- if (err == 1)
- err = -ENODEV;
- }
+ if (ret >= 0)
+ ret = mtd_add_device_partitions(mtd, real_parts, ret);
/*
* FIXME: some drivers unfortunately call this function more than once.
@@ -569,7 +588,8 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
register_reboot_notifier(&mtd->reboot_notifier);
}
- return err;
+ kfree(real_parts);
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_device_parse_register);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index e779de315ade..cafdb8855a79 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -30,6 +30,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/err.h>
+#include <linux/kconfig.h>
#include "mtdcore.h"
@@ -379,10 +380,17 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.name = name;
slave->mtd.owner = master->owner;
- /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
- * to have the same data be in two different partitions.
+ /* NOTE: Historically, we didn't arrange MTDs as a tree out of
+ * concern for showing the same data in multiple partitions.
+ * However, it is very useful to have the master node present,
+ * so the MTD_PARTITIONED_MASTER option allows that. The master
+ * will have device nodes etc only if this is set, so make the
+ * parent conditional on that option. Note, this is a way to
+ * distinguish between the master and the partition in sysfs.
*/
- slave->mtd.dev.parent = master->dev.parent;
+ slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
+ &master->dev :
+ master->dev.parent;
slave->mtd._read = part_read;
slave->mtd._write = part_write;
@@ -546,12 +554,35 @@ out_register:
return slave;
}
+static ssize_t mtd_partition_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_part *part = PART(mtd);
+ return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
+}
+
+static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
+
+static const struct attribute *mtd_partition_attrs[] = {
+ &dev_attr_offset.attr,
+ NULL
+};
+
+static int mtd_add_partition_attrs(struct mtd_part *new)
+{
+ int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
+ if (ret)
+ printk(KERN_WARNING
+ "mtd: failed to create partition attrs, err=%d\n", ret);
+ return ret;
+}
+
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length)
{
struct mtd_partition part;
- struct mtd_part *p, *new;
- uint64_t start, end;
+ struct mtd_part *new;
int ret = 0;
/* the direct offset is expected */
@@ -575,31 +606,15 @@ int mtd_add_partition(struct mtd_info *master, const char *name,
if (IS_ERR(new))
return PTR_ERR(new);
- start = offset;
- end = offset + length;
-
mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry(p, &mtd_partitions, list)
- if (p->master == master) {
- if ((start >= p->offset) &&
- (start < (p->offset + p->mtd.size)))
- goto err_inv;
-
- if ((end >= p->offset) &&
- (end < (p->offset + p->mtd.size)))
- goto err_inv;
- }
-
list_add(&new->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&new->mtd);
+ mtd_add_partition_attrs(new);
+
return ret;
-err_inv:
- mutex_unlock(&mtd_partitions_mutex);
- free_partition(new);
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
@@ -612,6 +627,8 @@ int mtd_del_partition(struct mtd_info *master, int partno)
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if ((slave->master == master) &&
(slave->mtd.index == partno)) {
+ sysfs_remove_files(&slave->mtd.dev.kobj,
+ mtd_partition_attrs);
ret = del_mtd_device(&slave->mtd);
if (ret < 0)
break;
@@ -631,8 +648,8 @@ EXPORT_SYMBOL_GPL(mtd_del_partition);
* and registers slave MTD objects which are bound to the master according to
* the partition definitions.
*
- * We don't register the master, or expect the caller to have done so,
- * for reasons of data integrity.
+ * For historical reasons, this function's caller only registers the master
+ * if the MTD_PARTITIONED_MASTER config option is set.
*/
int add_mtd_partitions(struct mtd_info *master,
@@ -655,6 +672,7 @@ int add_mtd_partitions(struct mtd_info *master,
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&slave->mtd);
+ mtd_add_partition_attrs(slave);
cur_offset = slave->offset + slave->mtd.size;
}
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index d93c849b70b5..46010bd895b1 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -485,7 +485,7 @@ static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
for (i = 0; i < ecc_len; i++)
layout->eccpos[i] = oobsize - ecc_len + i;
- layout->oobfree[0].offset = 2;
+ layout->oobfree[0].offset = PMECC_OOB_RESERVED_BYTES;
layout->oobfree[0].length =
oobsize - ecc_len - layout->oobfree[0].offset;
}
@@ -1204,14 +1204,14 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
goto err;
}
- regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev, regs_rom);
- if (IS_ERR(host->pmecc_rom_base)) {
- if (!host->has_no_lookup_table)
- /* Don't display the information again */
+ if (!host->has_no_lookup_table) {
+ regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev,
+ regs_rom);
+ if (IS_ERR(host->pmecc_rom_base)) {
dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n");
-
- host->has_no_lookup_table = true;
+ host->has_no_lookup_table = true;
+ }
}
if (host->has_no_lookup_table) {
@@ -1254,7 +1254,8 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
nand_chip->ecc.steps = mtd->writesize / sector_size;
nand_chip->ecc.total = nand_chip->ecc.bytes *
nand_chip->ecc.steps;
- if (nand_chip->ecc.total > mtd->oobsize - 2) {
+ if (nand_chip->ecc.total >
+ mtd->oobsize - PMECC_OOB_RESERVED_BYTES) {
dev_err(host->dev, "No room for ECC bytes\n");
err_no = -EINVAL;
goto err;
@@ -1719,7 +1720,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
comp[index++] = &host->nfc->comp_cmd_done;
if (index == 0) {
- dev_err(host->dev, "Unkown interrupt flag: 0x%08x\n", flag);
+ dev_err(host->dev, "Unknown interrupt flag: 0x%08x\n", flag);
return -EINVAL;
}
@@ -1752,11 +1753,10 @@ static int nfc_send_command(struct atmel_nand_host *host,
cmd, addr, cycle0);
timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
- while (nfc_cmd_readl(NFCADDR_CMD_NFCBUSY, host->nfc->base_cmd_regs)
- & NFCADDR_CMD_NFCBUSY) {
+ while (nfc_readl(host->nfc->hsmc_regs, SR) & NFC_SR_BUSY) {
if (time_after(jiffies, timeout)) {
dev_err(host->dev,
- "Time out to wait CMD_NFCBUSY ready!\n");
+ "Time out to wait for NFC ready!\n");
return -ETIMEDOUT;
}
}
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index d4035e335ad8..668e7358f19b 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -152,4 +152,7 @@
/* Time out value for reading PMECC status register */
#define PMECC_MAX_TIMEOUT_MS 100
+/* Reserved bytes in oob area */
+#define PMECC_OOB_RESERVED_BYTES 2
+
#endif
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 85b8ca6af7d2..4d5d26221a7e 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -35,6 +35,7 @@
#define NFC_CTRL_DISABLE (1 << 1)
#define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */
+#define NFC_SR_BUSY (1 << 8)
#define NFC_SR_XFR_DONE (1 << 16)
#define NFC_SR_CMD_DONE (1 << 17)
#define NFC_SR_DTOE (1 << 20)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index f44c6061536a..870c7fc0f759 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -225,7 +225,6 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
- uint16_t TclsRising = 1;
uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
uint16_t dv_window = 0;
uint16_t en_lo, en_hi;
@@ -276,8 +275,6 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
- if (!TclsRising)
- cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
if (cs_cnt == 0)
cs_cnt = 1;
@@ -1536,6 +1533,9 @@ int denali_init(struct denali_nand_info *denali)
denali->nand.options |= NAND_SKIP_BBTSCAN;
denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+ /* no subpage writes on denali */
+ denali->nand.options |= NAND_NO_SUBPAGE_WRITE;
+
/*
* Denali Controller only support 15bit and 8bit ECC in MRST,
* so just let controller do 15bit ECC for MLC and 8bit ECC for
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 4c05f4f6a5c6..51394e59901b 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -317,7 +317,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
- IFC_TIMEOUT_MSECS * HZ/1000);
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
/* ctrl->nand_stat will be updated from IRQ context */
if (!ctrl->nand_stat)
@@ -860,7 +860,7 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
- IFC_TIMEOUT_MSECS * HZ/1000);
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index edfaa21b1817..e58af4bfa8c8 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -873,6 +873,7 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
{
struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 val;
+ int ret;
/* Set default NAND width to 8 bits */
pdata->width = 8;
@@ -891,8 +892,12 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
sizeof(*pdata->nand_timings), GFP_KERNEL);
if (!pdata->nand_timings)
return -ENOMEM;
- of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
+ ret = of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
sizeof(*pdata->nand_timings));
+ if (ret) {
+ dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n");
+ pdata->nand_timings = NULL;
+ }
/* Set default NAND bank to 0 */
pdata->bank = 0;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 27f272ed502a..43fa16b5f510 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -1105,7 +1105,7 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
reg = readl(r->gpmi_regs + HW_GPMI_STAT);
} else
- dev_err(this->dev, "unknow arch.\n");
+ dev_err(this->dev, "unknown arch.\n");
return reg & mask;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 33f3c3c54dbc..1b8f3500e6d2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -446,7 +446,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
struct dma_async_tx_descriptor *desc)
{
struct completion *dma_c = &this->dma_done;
- int err;
+ unsigned long timeout;
init_completion(dma_c);
@@ -456,8 +456,8 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
dma_async_issue_pending(get_dma_chan(this));
/* Wait for the interrupt from the DMA block. */
- err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
- if (!err) {
+ timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
+ if (!timeout) {
dev_err(this->dev, "DMA timeout, last DMA :%d\n",
this->last_dma_type);
gpmi_dump_info(this);
@@ -477,7 +477,7 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
struct dma_async_tx_descriptor *desc)
{
struct completion *bch_c = &this->bch_done;
- int err;
+ unsigned long timeout;
/* Prepare to receive an interrupt from the BCH block. */
init_completion(bch_c);
@@ -486,8 +486,8 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
start_dma_without_bch_irq(this, desc);
/* Wait for the interrupt from the BCH block. */
- err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
- if (!err) {
+ timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
+ if (!timeout) {
dev_err(this->dev, "BCH timeout, last DMA :%d\n",
this->last_dma_type);
gpmi_dump_info(this);
@@ -1950,7 +1950,9 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
ret = nand_boot_init(this);
if (ret)
goto err_out;
- chip->scan_bbt(mtd);
+ ret = chip->scan_bbt(mtd);
+ if (ret)
+ goto err_out;
ppdata.of_node = this->pdev->dev.of_node;
ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index a8f550fec35e..372e0e38f59b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -386,26 +386,51 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
/* This function polls the NANDFC to wait for the basic operation to
* complete by checking the INT bit of config2 register.
*/
-static void wait_op_done(struct mxc_nand_host *host, int useirq)
+static int wait_op_done(struct mxc_nand_host *host, int useirq)
{
- int max_retries = 8000;
+ int ret = 0;
+
+ /*
+ * If operation is already complete, don't bother to setup an irq or a
+ * loop.
+ */
+ if (host->devtype_data->check_int(host))
+ return 0;
if (useirq) {
- if (!host->devtype_data->check_int(host)) {
- reinit_completion(&host->op_completion);
- irq_control(host, 1);
- wait_for_completion(&host->op_completion);
+ unsigned long timeout;
+
+ reinit_completion(&host->op_completion);
+
+ irq_control(host, 1);
+
+ timeout = wait_for_completion_timeout(&host->op_completion, HZ);
+ if (!timeout && !host->devtype_data->check_int(host)) {
+ dev_dbg(host->dev, "timeout waiting for irq\n");
+ ret = -ETIMEDOUT;
}
} else {
- while (max_retries-- > 0) {
- if (host->devtype_data->check_int(host))
- break;
+ int max_retries = 8000;
+ int done;
+ do {
udelay(1);
+
+ done = host->devtype_data->check_int(host);
+ if (done)
+ break;
+
+ } while (--max_retries);
+
+ if (!done) {
+ dev_dbg(host->dev, "timeout polling for completion\n");
+ ret = -ETIMEDOUT;
}
- if (max_retries < 0)
- pr_debug("%s: INT not set\n", __func__);
}
+
+ WARN_ONCE(ret < 0, "timeout! useirq=%d\n", useirq);
+
+ return ret;
}
static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
@@ -527,30 +552,17 @@ static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
static void send_read_id_v3(struct mxc_nand_host *host)
{
- struct nand_chip *this = &host->nand;
-
/* Read ID into main buffer */
writel(NFC_ID, NFC_V3_LAUNCH);
wait_op_done(host, true);
memcpy32_fromio(host->data_buf, host->main_area0, 16);
-
- if (this->options & NAND_BUSWIDTH_16) {
- /* compress the ID info */
- host->data_buf[1] = host->data_buf[2];
- host->data_buf[2] = host->data_buf[4];
- host->data_buf[3] = host->data_buf[6];
- host->data_buf[4] = host->data_buf[8];
- host->data_buf[5] = host->data_buf[10];
- }
}
/* Request the NANDFC to perform a read of the NAND device ID. */
static void send_read_id_v1_v2(struct mxc_nand_host *host)
{
- struct nand_chip *this = &host->nand;
-
/* NANDFC buffer 0 is used for device ID output */
writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
@@ -560,15 +572,6 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
wait_op_done(host, true);
memcpy32_fromio(host->data_buf, host->main_area0, 16);
-
- if (this->options & NAND_BUSWIDTH_16) {
- /* compress the ID info */
- host->data_buf[1] = host->data_buf[2];
- host->data_buf[2] = host->data_buf[4];
- host->data_buf[3] = host->data_buf[6];
- host->data_buf[4] = host->data_buf[8];
- host->data_buf[5] = host->data_buf[10];
- }
}
static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -694,9 +697,17 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
if (host->status_request)
return host->devtype_data->get_dev_status(host) & 0xFF;
- ret = *(uint8_t *)(host->data_buf + host->buf_start);
- host->buf_start++;
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ /* only take the lower byte of each word */
+ ret = *(uint16_t *)(host->data_buf + host->buf_start);
+
+ host->buf_start += 2;
+ } else {
+ ret = *(uint8_t *)(host->data_buf + host->buf_start);
+ host->buf_start++;
+ }
+ pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
@@ -825,6 +836,12 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
}
}
+/*
+ * MXC NANDFC can only perform full page+spare or spare-only read/write. When
+ * the upper layers perform a read/write buf operation, the saved column address
+ * is used to index into the full page. So usually this function is called with
+ * column == 0 (unless no column cycle is needed indicated by column == -1)
+ */
static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
{
struct nand_chip *nand_chip = mtd->priv;
@@ -832,16 +849,13 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
/* Write out column address, if necessary */
if (column != -1) {
- /*
- * MXC NANDFC can only perform full page+spare or
- * spare-only read/write. When the upper layers
- * perform a read/write buf operation, the saved column
- * address is used to index into the full page.
- */
- host->devtype_data->send_addr(host, 0, page_addr == -1);
+ host->devtype_data->send_addr(host, column & 0xff,
+ page_addr == -1);
if (mtd->writesize > 512)
/* another col addr cycle for 2k page */
- host->devtype_data->send_addr(host, 0, false);
+ host->devtype_data->send_addr(host,
+ (column >> 8) & 0xff,
+ false);
}
/* Write out page address, if necessary */
@@ -903,7 +917,7 @@ static void preset_v1(struct mtd_info *mtd)
struct mxc_nand_host *host = nand_chip->priv;
uint16_t config1 = 0;
- if (nand_chip->ecc.mode == NAND_ECC_HW)
+ if (nand_chip->ecc.mode == NAND_ECC_HW && mtd->writesize)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
if (!host->devtype_data->irqpending_quirk)
@@ -931,9 +945,6 @@ static void preset_v2(struct mtd_info *mtd)
struct mxc_nand_host *host = nand_chip->priv;
uint16_t config1 = 0;
- if (nand_chip->ecc.mode == NAND_ECC_HW)
- config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
-
config1 |= NFC_V2_CONFIG1_FP_INT;
if (!host->devtype_data->irqpending_quirk)
@@ -942,6 +953,9 @@ static void preset_v2(struct mtd_info *mtd)
if (mtd->writesize) {
uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
host->eccsize = get_eccsize(mtd);
if (host->eccsize == 4)
config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
@@ -999,9 +1013,6 @@ static void preset_v3(struct mtd_info *mtd)
NFC_V3_CONFIG2_INT_MSK |
NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
- if (chip->ecc.mode == NAND_ECC_HW)
- config2 |= NFC_V3_CONFIG2_ECC_EN;
-
addr_phases = fls(chip->pagemask) >> 3;
if (mtd->writesize == 2048) {
@@ -1016,6 +1027,9 @@ static void preset_v3(struct mtd_info *mtd)
}
if (mtd->writesize) {
+ if (chip->ecc.mode == NAND_ECC_HW)
+ config2 |= NFC_V3_CONFIG2_ECC_EN;
+
config2 |= NFC_V3_CONFIG2_PPB(
ffs(mtd->erasesize / mtd->writesize) - 6,
host->devtype_data->ppb_shift);
@@ -1066,6 +1080,9 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->status_request = true;
host->devtype_data->send_cmd(host, command, true);
+ WARN_ONCE(column != -1 || page_addr != -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -1079,7 +1096,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
command = NAND_CMD_READ0; /* only READ0 is valid */
host->devtype_data->send_cmd(host, command, false);
- mxc_do_addr_cycle(mtd, column, page_addr);
+ WARN_ONCE(column < 0,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, 0, page_addr);
if (mtd->writesize > 512)
host->devtype_data->send_cmd(host,
@@ -1100,7 +1120,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->buf_start = column;
host->devtype_data->send_cmd(host, command, false);
- mxc_do_addr_cycle(mtd, column, page_addr);
+ WARN_ONCE(column < -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, 0, page_addr);
break;
case NAND_CMD_PAGEPROG:
@@ -1108,6 +1131,9 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
copy_spare(mtd, false);
host->devtype_data->send_page(mtd, NFC_INPUT);
host->devtype_data->send_cmd(host, command, true);
+ WARN_ONCE(column != -1 || page_addr != -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -1115,15 +1141,29 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
host->devtype_data->send_read_id(host);
- host->buf_start = column;
+ host->buf_start = 0;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
host->devtype_data->send_cmd(host, command, false);
+ WARN_ONCE(column != -1,
+ "Unexpected column value (cmd=%u, col=%d)\n",
+ command, column);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
+ case NAND_CMD_PARAM:
+ host->devtype_data->send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+ memcpy32_fromio(host->data_buf, host->main_area0, 512);
+ host->buf_start = 0;
+ break;
+ default:
+ WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
+ command);
+ break;
}
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index df7eb4ff07d1..c2e1232cd45c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -386,7 +386,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
uint8_t buf[2] = { 0, 0 };
int ret = 0, res, i = 0;
- ops.datbuf = NULL;
+ memset(&ops, 0, sizeof(ops));
ops.oobbuf = buf;
ops.ooboffs = chip->badblockpos;
if (chip->options & NAND_BUSWIDTH_16) {
@@ -566,6 +566,25 @@ void nand_wait_ready(struct mtd_info *mtd)
EXPORT_SYMBOL_GPL(nand_wait_ready);
/**
+ * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout in ms
+ *
+ * Wait for status ready (i.e. command done) or timeout.
+ */
+static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+ register struct nand_chip *chip = mtd->priv;
+
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ if ((chip->read_byte(mtd) & NAND_STATUS_READY))
+ break;
+ touch_softlockup_watchdog();
+ } while (time_before(jiffies, timeo));
+};
+
+/**
* nand_command - [DEFAULT] Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
@@ -643,8 +662,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd,
NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
- ;
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
return;
/* This applies to read commands */
@@ -740,8 +759,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
- ;
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
return;
case NAND_CMD_RNDOUT:
@@ -968,7 +987,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
- ret = -EINVAL;
+ return -EINVAL;
/* Align to last block address if size addresses end of the device */
if (ofs + len == mtd->size)
@@ -1031,7 +1050,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
- ret = -EINVAL;
+ return -EINVAL;
nand_get_device(mtd, FL_LOCKING);
@@ -1716,9 +1735,9 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
nand_get_device(mtd, FL_READING);
+ memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = buf;
- ops.oobbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_read_ops(mtd, from, &ops);
*retlen = ops.retlen;
@@ -2124,7 +2143,7 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
/**
- * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
+ * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
* @mtd: mtd info structure
* @chip: nand chip info structure
* @offset: column address of subpage within the page
@@ -2508,9 +2527,9 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
+ memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
- ops.oobbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(mtd, to, &ops);
@@ -2536,9 +2555,9 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
int ret;
nand_get_device(mtd, FL_WRITING);
+ memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
- ops.oobbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(mtd, to, &ops);
*retlen = ops.retlen;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 10b1f7a4fe50..a4615fcc3d00 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -38,8 +38,8 @@
#include <linux/platform_data/mtd-nand-pxa3xx.h>
-#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
-#define NAND_STOP_DELAY (2 * HZ/50)
+#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
+#define NAND_STOP_DELAY msecs_to_jiffies(40)
#define PAGE_CHUNK_SIZE (2048)
/*
@@ -605,11 +605,24 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
{}
#endif
+static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
+{
+ struct pxa3xx_nand_info *info = data;
+
+ handle_data_pio(info);
+
+ info->state = STATE_CMD_DONE;
+ nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
struct pxa3xx_nand_info *info = devid;
unsigned int status, is_completed = 0, is_ready = 0;
unsigned int ready, cmd_done;
+ irqreturn_t ret = IRQ_HANDLED;
if (info->cs == 0) {
ready = NDSR_FLASH_RDY;
@@ -651,7 +664,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
} else {
info->state = (status & NDSR_RDDREQ) ?
STATE_PIO_READING : STATE_PIO_WRITING;
- handle_data_pio(info);
+ ret = IRQ_WAKE_THREAD;
+ goto NORMAL_IRQ_EXIT;
}
}
if (status & cmd_done) {
@@ -692,7 +706,7 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
if (is_ready)
complete(&info->dev_ready);
NORMAL_IRQ_EXIT:
- return IRQ_HANDLED;
+ return ret;
}
static inline int is_buf_blank(uint8_t *buf, size_t len)
@@ -951,7 +965,7 @@ static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
- int ret, exec_cmd;
+ int exec_cmd;
/*
* if this is a x16 device ,then convert the input
@@ -983,9 +997,8 @@ static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
info->need_wait = 1;
pxa3xx_nand_start(info);
- ret = wait_for_completion_timeout(&info->cmd_complete,
- CHIP_DELAY_TIMEOUT);
- if (!ret) {
+ if (!wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT)) {
dev_err(&info->pdev->dev, "Wait time out!!!\n");
/* Stop State Machine for next command cycle */
pxa3xx_nand_stop(info);
@@ -1000,7 +1013,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
- int ret, exec_cmd, ext_cmd_type;
+ int exec_cmd, ext_cmd_type;
/*
* if this is a x16 device then convert the input
@@ -1063,9 +1076,8 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
init_completion(&info->cmd_complete);
pxa3xx_nand_start(info);
- ret = wait_for_completion_timeout(&info->cmd_complete,
- CHIP_DELAY_TIMEOUT);
- if (!ret) {
+ if (!wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT)) {
dev_err(&info->pdev->dev, "Wait time out!!!\n");
/* Stop State Machine for next command cycle */
pxa3xx_nand_stop(info);
@@ -1198,13 +1210,11 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
- int ret;
if (info->need_wait) {
- ret = wait_for_completion_timeout(&info->dev_ready,
- CHIP_DELAY_TIMEOUT);
info->need_wait = 0;
- if (!ret) {
+ if (!wait_for_completion_timeout(&info->dev_ready,
+ CHIP_DELAY_TIMEOUT)) {
dev_err(&info->pdev->dev, "Ready time out!!!\n");
return NAND_STATUS_FAIL;
}
@@ -1508,6 +1518,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
return ret;
}
+ memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
+
pxa3xx_flash_ids[0].name = f->name;
pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
pxa3xx_flash_ids[0].pagesize = f->page_size;
@@ -1710,7 +1722,9 @@ static int alloc_nand_resource(struct platform_device *pdev)
/* initialize all interrupts to be disabled */
disable_int(info, NDSR_MASK);
- ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
+ ret = request_threaded_irq(irq, pxa3xx_nand_irq,
+ pxa3xx_nand_irq_thread, IRQF_ONESHOT,
+ pdev->name, info);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ\n");
goto fail_free_buf;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 35aef5edb588..0e02be47ce1d 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -948,8 +948,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
cpu_type = platform_get_device_id(pdev)->driver_data;
- pr_debug("s3c2410_nand_probe(%p)\n", pdev);
-
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
@@ -1045,7 +1043,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
- pr_debug("initialised ok\n");
return 0;
exit_error:
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index a21c378f096a..c3ce81c1a716 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -159,7 +159,6 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
return;
memset(&cfg, 0, sizeof(cfg));
- cfg.slave_id = pdata->slave_id_fifo0_tx;
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
cfg.src_addr = 0;
@@ -175,7 +174,6 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
if (!flctl->chan_fifo0_rx)
goto err;
- cfg.slave_id = pdata->slave_id_fifo0_rx;
cfg.direction = DMA_DEV_TO_MEM;
cfg.dst_addr = 0;
cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 635ee0027691..43b3392ffee7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1743,7 +1743,6 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
struct onenand_chip *this = mtd->priv;
int column, subpage;
int written = 0;
- int ret = 0;
if (this->state == FL_PM_SUSPENDED)
return -EBUSY;
@@ -1786,15 +1785,10 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
onenand_panic_wait(mtd);
/* In partial page write we don't update bufferram */
- onenand_update_bufferram(mtd, to, !ret && !subpage);
+ onenand_update_bufferram(mtd, to, !subpage);
if (ONENAND_IS_2PLANE(this)) {
ONENAND_SET_BUFFERRAM1(this);
- onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
- }
-
- if (ret) {
- printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
- break;
+ onenand_update_bufferram(mtd, to + this->writesize, !subpage);
}
written += thislen;
@@ -1808,7 +1802,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
}
*retlen = written;
- return ret;
+ return 0;
}
/**
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 1c7308c2c77d..5d5d36272bb5 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -460,8 +460,7 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR);
/* Wait for the interrupt. */
- err = wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000));
- if (!err) {
+ if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) {
dev_err(q->dev,
"cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
cmd, addr, readl(base + QUADSPI_FR),
@@ -830,27 +829,27 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ret = clk_prepare_enable(q->clk_en);
if (ret) {
- dev_err(dev, "can not enable the qspi_en clock\n");
+ dev_err(dev, "cannot enable the qspi_en clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(q->clk);
if (ret) {
- dev_err(dev, "can not enable the qspi clock\n");
+ dev_err(dev, "cannot enable the qspi clock: %d\n", ret);
goto clk_failed;
}
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- dev_err(dev, "failed to get the irq\n");
+ dev_err(dev, "failed to get the irq: %d\n", ret);
goto irq_failed;
}
ret = devm_request_irq(dev, ret,
fsl_qspi_irq_handler, 0, pdev->name, q);
if (ret) {
- dev_err(dev, "failed to request irq.\n");
+ dev_err(dev, "failed to request irq: %d\n", ret);
goto irq_failed;
}
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index b6a5a0c269e1..14a5d2325dac 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -369,17 +369,13 @@ erase_err:
return ret;
}
-static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct mtd_info *mtd = nor->mtd;
uint32_t offset = ofs;
uint8_t status_old, status_new;
int ret = 0;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
- if (ret)
- return ret;
-
status_old = read_sr(nor);
if (offset < mtd->size - (mtd->size / 2))
@@ -402,26 +398,18 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
(status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
write_enable(nor);
ret = write_sr(nor, status_new);
- if (ret)
- goto err;
}
-err:
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
return ret;
}
-static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct mtd_info *mtd = nor->mtd;
uint32_t offset = ofs;
uint8_t status_old, status_new;
int ret = 0;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
- if (ret)
- return ret;
-
status_old = read_sr(nor);
if (offset+len > mtd->size - (mtd->size / 64))
@@ -444,15 +432,41 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
(status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
write_enable(nor);
ret = write_sr(nor, status_new);
- if (ret)
- goto err;
}
-err:
+ return ret;
+}
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_lock(nor, ofs, len);
+
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
return ret;
}
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_unlock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+}
+
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
((kernel_ulong_t)&(struct flash_info) { \
@@ -524,6 +538,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, 0) },
/* ESMT */
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
@@ -553,6 +568,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
@@ -648,6 +664,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
@@ -658,6 +675,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
@@ -1045,6 +1063,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
/* nor protection support for STmicro chips */
if (JEDEC_MFR(info) == CFI_MFR_ST) {
+ nor->flash_lock = stm_lock;
+ nor->flash_unlock = stm_unlock;
+ }
+
+ if (nor->flash_lock && nor->flash_unlock) {
mtd->_lock = spi_nor_lock;
mtd->_unlock = spi_nor_unlock;
}
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index e579f9027c47..79316159eec6 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -9,6 +9,8 @@
#include <linux/slab.h>
#include <linux/mtd/nand_ecc.h>
+#include "mtd_test.h"
+
/*
* Test the implementation for software ECC
*
@@ -274,6 +276,10 @@ static int nand_ecc_test_run(const size_t size)
}
pr_info("ok - %s-%zd\n",
nand_ecc_test[i].name, size);
+
+ err = mtdtest_relax();
+ if (err)
+ break;
}
error:
kfree(error_data);
diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h
index f437c776c54f..4b7bee17c924 100644
--- a/drivers/mtd/tests/mtd_test.h
+++ b/drivers/mtd/tests/mtd_test.h
@@ -1,4 +1,16 @@
#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+static inline int mtdtest_relax(void)
+{
+ cond_resched();
+ if (signal_pending(current)) {
+ pr_info("aborting test due to pending signal!\n");
+ return -EINTR;
+ }
+
+ return 0;
+}
int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum);
int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 273f7e553954..09a4ccac53a2 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -320,6 +320,10 @@ static int overwrite_test(void)
break;
}
+ err = mtdtest_relax();
+ if (err)
+ break;
+
opno++;
}
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 5e061186eab1..8e8525f0202f 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -70,7 +70,7 @@ static int write_eraseblock(int ebnum)
int i;
struct mtd_oob_ops ops;
int err = 0;
- loff_t addr = ebnum * mtd->erasesize;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
@@ -112,7 +112,10 @@ static int write_whole_device(void)
return err;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
}
pr_info("written %u eraseblocks\n", i);
return 0;
@@ -141,6 +144,31 @@ static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t cou
return bitflips;
}
+/*
+ * Compare with 0xff and show the address, offset and data bytes at
+ * comparison failure. Return number of bitflips encountered.
+ */
+static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
+ size_t count)
+{
+ const unsigned char *su1;
+ int res;
+ size_t i = 0;
+ size_t bitflips = 0;
+
+ for (su1 = cs; 0 < count; ++su1, count--, i++) {
+ res = *su1 ^ 0xff;
+ if (res) {
+ pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0xff diff 0x%x\n",
+ (unsigned long)addr, (unsigned long)offset + i,
+ *su1, res);
+ bitflips += hweight8(res);
+ }
+ }
+
+ return bitflips;
+}
+
static int verify_eraseblock(int ebnum)
{
int i;
@@ -203,6 +231,15 @@ static int verify_eraseblock(int ebnum)
bitflips = memcmpshow(addr, readbuf + use_offset,
writebuf + (use_len_max * i) + use_offset,
use_len);
+
+ /* verify pre-offset area for 0xff */
+ bitflips += memffshow(addr, 0, readbuf, use_offset);
+
+ /* verify post-(use_offset + use_len) area for 0xff */
+ k = use_offset + use_len;
+ bitflips += memffshow(addr, k, readbuf + k,
+ mtd->ecclayout->oobavail - k);
+
if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
@@ -212,34 +249,8 @@ static int verify_eraseblock(int ebnum)
return -1;
}
} else if (bitflips) {
- pr_info("ignoring error as within bitflip_limit\n");
+ pr_info("ignoring errors as within bitflip limit\n");
}
-
- for (k = 0; k < use_offset; ++k)
- if (readbuf[k] != 0xff) {
- pr_err("error: verify 0xff "
- "failed at %#llx\n",
- (long long)addr);
- errcnt += 1;
- if (errcnt > 1000) {
- pr_err("error: too "
- "many errors\n");
- return -1;
- }
- }
- for (k = use_offset + use_len;
- k < mtd->ecclayout->oobavail; ++k)
- if (readbuf[k] != 0xff) {
- pr_err("error: verify 0xff "
- "failed at %#llx\n",
- (long long)addr);
- errcnt += 1;
- if (errcnt > 1000) {
- pr_err("error: too "
- "many errors\n");
- return -1;
- }
- }
}
if (vary_offset)
do_vary_offset();
@@ -310,7 +321,10 @@ static int verify_all_eraseblocks(void)
return err;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
}
pr_info("verified %u eraseblocks\n", i);
return 0;
@@ -421,7 +435,10 @@ static int __init mtd_oobtest_init(void)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
@@ -634,7 +651,11 @@ static int __init mtd_oobtest_init(void)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+
addr += mtd->writesize;
}
}
@@ -672,7 +693,10 @@ static int __init mtd_oobtest_init(void)
}
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
index 88296e888e9d..ba1890d5632c 100644
--- a/drivers/mtd/tests/pagetest.c
+++ b/drivers/mtd/tests/pagetest.c
@@ -407,7 +407,10 @@ static int __init mtd_pagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("written %u eraseblocks\n", i);
@@ -422,7 +425,10 @@ static int __init mtd_pagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
diff --git a/drivers/mtd/tests/readtest.c b/drivers/mtd/tests/readtest.c
index a54cf1511114..58df07acdbdb 100644
--- a/drivers/mtd/tests/readtest.c
+++ b/drivers/mtd/tests/readtest.c
@@ -190,7 +190,12 @@ static int __init mtd_readtest_init(void)
if (!err)
err = ret;
}
- cond_resched();
+
+ ret = mtdtest_relax();
+ if (ret) {
+ err = ret;
+ goto out;
+ }
}
if (err)
diff --git a/drivers/mtd/tests/speedtest.c b/drivers/mtd/tests/speedtest.c
index 5ee9f7021020..5a6f31af06f9 100644
--- a/drivers/mtd/tests/speedtest.c
+++ b/drivers/mtd/tests/speedtest.c
@@ -185,7 +185,7 @@ static long calc_speed(void)
(finish.tv_usec - start.tv_usec) / 1000;
if (ms == 0)
return 0;
- k = goodebcnt * (mtd->erasesize / 1024) * 1000;
+ k = (uint64_t)goodebcnt * (mtd->erasesize / 1024) * 1000;
do_div(k, ms);
return k;
}
@@ -269,7 +269,10 @@ static int __init mtd_speedtest_init(void)
err = write_eraseblock(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -284,7 +287,10 @@ static int __init mtd_speedtest_init(void)
err = read_eraseblock(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -303,7 +309,10 @@ static int __init mtd_speedtest_init(void)
err = write_eraseblock_by_page(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -318,7 +327,10 @@ static int __init mtd_speedtest_init(void)
err = read_eraseblock_by_page(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -337,7 +349,10 @@ static int __init mtd_speedtest_init(void)
err = write_eraseblock_by_2pages(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -352,7 +367,10 @@ static int __init mtd_speedtest_init(void)
err = read_eraseblock_by_2pages(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -385,7 +403,11 @@ static int __init mtd_speedtest_init(void)
err = multiblock_erase(i, j);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+
i += j;
}
stop_timing();
diff --git a/drivers/mtd/tests/stresstest.c b/drivers/mtd/tests/stresstest.c
index c9d42cc2df1b..e509f8aa9a7e 100644
--- a/drivers/mtd/tests/stresstest.c
+++ b/drivers/mtd/tests/stresstest.c
@@ -96,7 +96,7 @@ static int do_read(void)
if (offs + len > mtd->erasesize)
len = mtd->erasesize - offs;
}
- addr = eb * mtd->erasesize + offs;
+ addr = (loff_t)eb * mtd->erasesize + offs;
return mtdtest_read(mtd, addr, len, readbuf);
}
@@ -124,7 +124,7 @@ static int do_write(void)
offsets[eb + 1] = 0;
}
}
- addr = eb * mtd->erasesize + offs;
+ addr = (loff_t)eb * mtd->erasesize + offs;
err = mtdtest_write(mtd, addr, len, writebuf);
if (unlikely(err))
return err;
@@ -221,7 +221,10 @@ static int __init mtd_stresstest_init(void)
err = do_operation();
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("finished, %d operations done\n", op);
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
index 7b59ef522d5e..aecc6ce5a9e1 100644
--- a/drivers/mtd/tests/subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -95,7 +95,7 @@ static int write_eraseblock2(int ebnum)
loff_t addr = (loff_t)ebnum * mtd->erasesize;
for (k = 1; k < 33; ++k) {
- if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
+ if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
break;
prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
@@ -195,7 +195,7 @@ static int verify_eraseblock2(int ebnum)
loff_t addr = (loff_t)ebnum * mtd->erasesize;
for (k = 1; k < 33; ++k) {
- if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
+ if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
break;
prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
clear_data(readbuf, subpgsize * k);
@@ -269,7 +269,10 @@ static int verify_all_eraseblocks_ff(void)
return err;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
}
pr_info("verified %u eraseblocks\n", i);
return 0;
@@ -346,7 +349,10 @@ static int __init mtd_subpagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("written %u eraseblocks\n", i);
@@ -360,7 +366,10 @@ static int __init mtd_subpagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
@@ -383,7 +392,10 @@ static int __init mtd_subpagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("written %u eraseblocks\n", i);
@@ -398,7 +410,10 @@ static int __init mtd_subpagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
index b55bc52a1340..e5d6e6d9532f 100644
--- a/drivers/mtd/tests/torturetest.c
+++ b/drivers/mtd/tests/torturetest.c
@@ -101,11 +101,11 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
{
int err, retries = 0;
size_t read;
- loff_t addr = ebnum * mtd->erasesize;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
if (pgcnt) {
- addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+ addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
@@ -155,11 +155,11 @@ static inline int write_pattern(int ebnum, void *buf)
{
int err;
size_t written;
- loff_t addr = ebnum * mtd->erasesize;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
if (pgcnt) {
- addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+ addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
err = mtd_write(mtd, addr, len, &written, buf);
@@ -279,7 +279,10 @@ static int __init tort_init(void)
" for 0xFF... pattern\n");
goto out;
}
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
}
@@ -294,7 +297,10 @@ static int __init tort_init(void)
err = write_pattern(i, patt);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
/* Verify what we wrote */
@@ -314,7 +320,10 @@ static int __init tort_init(void)
"0x55AA55..." : "0xAA55AA...");
goto out;
}
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
}
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 9d2e16f3150a..68eea5befaf1 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
second_is_newer = !second_is_newer;
} else {
dbg_bld("PEB %d CRC is OK", pnum);
- bitflips = !!err;
+ bitflips |= !!err;
}
mutex_unlock(&ubi->buf_mutex);
@@ -1301,6 +1301,30 @@ out_ech:
return err;
}
+static struct ubi_attach_info *alloc_ai(void)
+{
+ struct ubi_attach_info *ai;
+
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
+ return ai;
+
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ kfree(ai);
+ ai = NULL;
+ }
+
+ return ai;
+}
+
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
@@ -1313,7 +1337,7 @@ out_ech:
* UBI_NO_FASTMAP denotes that no fastmap was found.
* UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
*/
-static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
{
int err, pnum, fm_anchor = -1;
unsigned long long max_sqnum = 0;
@@ -1334,7 +1358,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
+ err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
if (err < 0)
goto out_vidh;
@@ -1350,7 +1374,12 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (fm_anchor < 0)
return UBI_NO_FASTMAP;
- return ubi_scan_fastmap(ubi, ai, fm_anchor);
+ destroy_ai(*ai);
+ *ai = alloc_ai();
+ if (!*ai)
+ return -ENOMEM;
+
+ return ubi_scan_fastmap(ubi, *ai, fm_anchor);
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
@@ -1362,30 +1391,6 @@ out:
#endif
-static struct ubi_attach_info *alloc_ai(const char *slab_name)
-{
- struct ubi_attach_info *ai;
-
- ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
- if (!ai)
- return ai;
-
- INIT_LIST_HEAD(&ai->corr);
- INIT_LIST_HEAD(&ai->free);
- INIT_LIST_HEAD(&ai->erase);
- INIT_LIST_HEAD(&ai->alien);
- ai->volumes = RB_ROOT;
- ai->aeb_slab_cache = kmem_cache_create(slab_name,
- sizeof(struct ubi_ainf_peb),
- 0, 0, NULL);
- if (!ai->aeb_slab_cache) {
- kfree(ai);
- ai = NULL;
- }
-
- return ai;
-}
-
/**
* ubi_attach - attach an MTD device.
* @ubi: UBI device descriptor
@@ -1399,7 +1404,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
int err;
struct ubi_attach_info *ai;
- ai = alloc_ai("ubi_aeb_slab_cache");
+ ai = alloc_ai();
if (!ai)
return -ENOMEM;
@@ -1413,11 +1418,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
if (force_scan)
err = scan_all(ubi, ai, 0);
else {
- err = scan_fast(ubi, ai);
- if (err > 0) {
+ err = scan_fast(ubi, &ai);
+ if (err > 0 || mtd_is_eccerr(err)) {
if (err != UBI_NO_FASTMAP) {
destroy_ai(ai);
- ai = alloc_ai("ubi_aeb_slab_cache2");
+ ai = alloc_ai();
if (!ai)
return -ENOMEM;
@@ -1453,10 +1458,10 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
goto out_wl;
#ifdef CONFIG_MTD_UBI_FASTMAP
- if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
+ if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
struct ubi_attach_info *scan_ai;
- scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
+ scan_ai = alloc_ai();
if (!scan_ai) {
err = -ENOMEM;
goto out_wl;
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index db2c05b6fe7f..c9eb78f10a0d 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -310,6 +310,8 @@ static void ubiblock_do_work(struct work_struct *work)
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
ret = ubiblock_read(pdu);
+ rq_flush_dcache_pages(req);
+
blk_mq_end_request(req, ret);
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index ba01a8d22d28..b7f824d5ee88 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -81,6 +81,7 @@ static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
#ifdef CONFIG_MTD_UBI_FASTMAP
/* UBI module parameter to enable fastmap automatically on non-fastmap images */
static bool fm_autoconvert;
+static bool fm_debug;
#endif
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
@@ -154,23 +155,22 @@ static struct device_attribute dev_mtd_num =
*/
int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
{
+ int ret;
struct ubi_notification nt;
ubi_do_get_device_info(ubi, &nt.di);
ubi_do_get_volume_info(ubi, vol, &nt.vi);
-#ifdef CONFIG_MTD_UBI_FASTMAP
switch (ntype) {
case UBI_VOLUME_ADDED:
case UBI_VOLUME_REMOVED:
case UBI_VOLUME_RESIZED:
case UBI_VOLUME_RENAMED:
- if (ubi_update_fastmap(ubi)) {
- ubi_err(ubi, "Unable to update fastmap!");
- ubi_ro_mode(ubi);
- }
+ ret = ubi_update_fastmap(ubi);
+ if (ret)
+ ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
}
-#endif
+
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
}
@@ -950,8 +950,10 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
- ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
+ ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
ubi->fm_disabled = !fm_autoconvert;
+ if (fm_debug)
+ ubi_enable_dbg_chk_fastmap(ubi);
if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
<= UBI_FM_MAX_START) {
@@ -970,8 +972,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
mutex_init(&ubi->ckvol_mutex);
mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
- mutex_init(&ubi->fm_mutex);
- init_rwsem(&ubi->fm_sem);
+ init_rwsem(&ubi->fm_protect);
+ init_rwsem(&ubi->fm_eba_sem);
ubi_msg(ubi, "attaching mtd%d", mtd->index);
@@ -1115,8 +1117,11 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
#ifdef CONFIG_MTD_UBI_FASTMAP
/* If we don't write a new fastmap at detach time we lose all
- * EC updates that have been made since the last written fastmap. */
- ubi_update_fastmap(ubi);
+ * EC updates that have been made since the last written fastmap.
+ * In case of fastmap debugging we omit the update to simulate an
+ * unclean shutdown. */
+ if (!ubi_dbg_chk_fastmap(ubi))
+ ubi_update_fastmap(ubi);
#endif
/*
* Before freeing anything, we have to stop the background thread to
@@ -1164,9 +1169,9 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
return ERR_PTR(err);
/* MTD device number is defined by the major / minor numbers */
- major = imajor(path.dentry->d_inode);
- minor = iminor(path.dentry->d_inode);
- mode = path.dentry->d_inode->i_mode;
+ major = imajor(d_backing_inode(path.dentry));
+ minor = iminor(d_backing_inode(path.dentry));
+ mode = d_backing_inode(path.dentry)->i_mode;
path_put(&path);
if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
return ERR_PTR(-EINVAL);
@@ -1501,6 +1506,8 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
#ifdef CONFIG_MTD_UBI_FASTMAP
module_param(fm_autoconvert, bool, 0644);
MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+module_param(fm_debug, bool, 0);
+MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
#endif
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index d647e504f9b1..d16fccf79179 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -455,7 +455,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
/* Validate the request */
err = -EINVAL;
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
+ req.bytes < 0 || req.bytes > vol->usable_leb_size)
break;
err = get_exclusive(desc);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 7335c9ff9d99..b077e43b5ba9 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -263,7 +263,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
struct dentry *dent = file->f_path.dentry;
struct ubi_device *ubi;
struct ubi_debug_info *d;
- char buf[3];
+ char buf[8];
int val;
ubi = ubi_get_device(ubi_num);
@@ -275,12 +275,30 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
val = d->chk_gen;
else if (dent == d->dfs_chk_io)
val = d->chk_io;
+ else if (dent == d->dfs_chk_fastmap)
+ val = d->chk_fastmap;
else if (dent == d->dfs_disable_bgt)
val = d->disable_bgt;
else if (dent == d->dfs_emulate_bitflips)
val = d->emulate_bitflips;
else if (dent == d->dfs_emulate_io_failures)
val = d->emulate_io_failures;
+ else if (dent == d->dfs_emulate_power_cut) {
+ snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_power_cut_min) {
+ snprintf(buf, sizeof(buf), "%u\n", d->power_cut_min);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_power_cut_max) {
+ snprintf(buf, sizeof(buf), "%u\n", d->power_cut_max);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ }
else {
count = -EINVAL;
goto out;
@@ -309,7 +327,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
struct ubi_device *ubi;
struct ubi_debug_info *d;
size_t buf_size;
- char buf[8];
+ char buf[8] = {0};
int val;
ubi = ubi_get_device(ubi_num);
@@ -323,6 +341,21 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
goto out;
}
+ if (dent == d->dfs_power_cut_min) {
+ if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_power_cut_max) {
+ if (kstrtouint(buf, 0, &d->power_cut_max) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_emulate_power_cut) {
+ if (kstrtoint(buf, 0, &val) != 0)
+ count = -EINVAL;
+ d->emulate_power_cut = val;
+ goto out;
+ }
+
if (buf[0] == '1')
val = 1;
else if (buf[0] == '0')
@@ -336,6 +369,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
d->chk_gen = val;
else if (dent == d->dfs_chk_io)
d->chk_io = val;
+ else if (dent == d->dfs_chk_fastmap)
+ d->chk_fastmap = val;
else if (dent == d->dfs_disable_bgt)
d->disable_bgt = val;
else if (dent == d->dfs_emulate_bitflips)
@@ -406,6 +441,13 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
goto out_remove;
d->dfs_chk_io = dent;
+ fname = "chk_fastmap";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_fastmap = dent;
+
fname = "tst_disable_bgt";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
&dfs_fops);
@@ -427,6 +469,27 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
goto out_remove;
d->dfs_emulate_io_failures = dent;
+ fname = "tst_emulate_power_cut";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_power_cut = dent;
+
+ fname = "tst_emulate_power_cut_min";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_power_cut_min = dent;
+
+ fname = "tst_emulate_power_cut_max";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_power_cut_max = dent;
+
return 0;
out_remove:
@@ -447,3 +510,36 @@ void ubi_debugfs_exit_dev(struct ubi_device *ubi)
if (IS_ENABLED(CONFIG_DEBUG_FS))
debugfs_remove_recursive(ubi->dbg.dfs_dir);
}
+
+/**
+ * ubi_dbg_power_cut - emulate a power cut if it is time to do so
+ * @ubi: UBI device description object
+ * @caller: Flags set to indicate from where the function is being called
+ *
+ * Returns non-zero if a power cut was emulated, zero if not.
+ */
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
+{
+ unsigned int range;
+
+ if ((ubi->dbg.emulate_power_cut & caller) == 0)
+ return 0;
+
+ if (ubi->dbg.power_cut_counter == 0) {
+ ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
+
+ if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
+ range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
+ ubi->dbg.power_cut_counter += prandom_u32() % range;
+ }
+ return 0;
+ }
+
+ ubi->dbg.power_cut_counter--;
+ if (ubi->dbg.power_cut_counter)
+ return 0;
+
+ ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
+ ubi_ro_mode(ubi);
+ return 1;
+}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index cba89fcd1587..eb8985e5c178 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -127,4 +127,16 @@ static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
{
return ubi->dbg.chk_gen;
}
+
+static inline int ubi_dbg_chk_fastmap(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_fastmap;
+}
+
+static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
+{
+ ubi->dbg.chk_fastmap = 1;
+}
+
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 16e34b37d134..51bca035cd83 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -340,9 +340,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
- down_read(&ubi->fm_sem);
+ down_read(&ubi->fm_eba_sem);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
- up_read(&ubi->fm_sem);
+ up_read(&ubi->fm_eba_sem);
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock:
@@ -567,6 +567,7 @@ retry:
new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
+ up_read(&ubi->fm_eba_sem);
return new_pnum;
}
@@ -577,13 +578,16 @@ retry:
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0)
err = -EIO;
+ up_read(&ubi->fm_eba_sem);
goto out_put;
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
- if (err)
+ if (err) {
+ up_read(&ubi->fm_eba_sem);
goto write_error;
+ }
data_size = offset + len;
mutex_lock(&ubi->buf_mutex);
@@ -592,8 +596,10 @@ retry:
/* Read everything before the area where the write failure happened */
if (offset > 0) {
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
- if (err && err != UBI_IO_BITFLIPS)
+ if (err && err != UBI_IO_BITFLIPS) {
+ up_read(&ubi->fm_eba_sem);
goto out_unlock;
+ }
}
memcpy(ubi->peb_buf + offset, buf, len);
@@ -601,15 +607,15 @@ retry:
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
if (err) {
mutex_unlock(&ubi->buf_mutex);
+ up_read(&ubi->fm_eba_sem);
goto write_error;
}
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
- down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = new_pnum;
- up_read(&ubi->fm_sem);
+ up_read(&ubi->fm_eba_sem);
ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg(ubi, "data was successfully recovered");
@@ -704,6 +710,7 @@ retry:
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
+ up_read(&ubi->fm_eba_sem);
return pnum;
}
@@ -714,6 +721,7 @@ retry:
if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
goto write_error;
}
@@ -722,13 +730,13 @@ retry:
if (err) {
ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
goto write_error;
}
}
- down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
- up_read(&ubi->fm_sem);
+ up_read(&ubi->fm_eba_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -825,6 +833,7 @@ retry:
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
+ up_read(&ubi->fm_eba_sem);
return pnum;
}
@@ -835,6 +844,7 @@ retry:
if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
goto write_error;
}
@@ -842,13 +852,13 @@ retry:
if (err) {
ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
len, pnum);
+ up_read(&ubi->fm_eba_sem);
goto write_error;
}
ubi_assert(vol->eba_tbl[lnum] < 0);
- down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
- up_read(&ubi->fm_sem);
+ up_read(&ubi->fm_eba_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -900,7 +910,7 @@ write_error:
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len)
{
- int err, pnum, tries = 0, vol_id = vol->vol_id;
+ int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
@@ -943,6 +953,7 @@ retry:
pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
err = pnum;
+ up_read(&ubi->fm_eba_sem);
goto out_leb_unlock;
}
@@ -953,6 +964,7 @@ retry:
if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
goto write_error;
}
@@ -960,19 +972,20 @@ retry:
if (err) {
ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
len, pnum);
+ up_read(&ubi->fm_eba_sem);
goto write_error;
}
- if (vol->eba_tbl[lnum] >= 0) {
- err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
+ old_pnum = vol->eba_tbl[lnum];
+ vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_eba_sem);
+
+ if (old_pnum >= 0) {
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
if (err)
goto out_leb_unlock;
}
- down_read(&ubi->fm_sem);
- vol->eba_tbl[lnum] = pnum;
- up_read(&ubi->fm_sem);
-
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
out_mutex:
@@ -1218,9 +1231,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl[lnum] == from);
- down_read(&ubi->fm_sem);
+ down_read(&ubi->fm_eba_sem);
vol->eba_tbl[lnum] = to;
- up_read(&ubi->fm_sem);
+ up_read(&ubi->fm_eba_sem);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
@@ -1419,7 +1432,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
* during re-size.
*/
ubi_move_aeb_to_list(av, aeb, &ai->erase);
- vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ else
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
}
}
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
new file mode 100644
index 000000000000..b2a665398bca
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+/**
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
+ */
+static void update_fastmap_work_fn(struct work_struct *wrk)
+{
+ struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+
+ ubi_update_fastmap(ubi);
+ spin_lock(&ubi->wl_lock);
+ ubi->fm_work_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *victim = NULL;
+ int max_ec = UBI_MAX_ERASECOUNTER;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb) {
+ if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+ victim = e;
+ max_ec = e->ec;
+ }
+ }
+
+ return victim;
+}
+
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+ struct ubi_fm_pool *pool)
+{
+ int i;
+ struct ubi_wl_entry *e;
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+}
+
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb)
+ if (e->pnum < UBI_FM_MAX_START)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+ struct ubi_wl_entry *e = NULL;
+
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ goto out;
+
+ if (anchor)
+ e = find_anchor_wl_entry(&ubi->free);
+ else
+ e = find_mean_wl_entry(ubi, &ubi->free);
+
+ if (!e)
+ goto out;
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /* remove it from the free list,
+ * the wl subsystem does no longer know this erase block */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+out:
+ return e;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_wl_entry *e;
+ int enough;
+
+ spin_lock(&ubi->wl_lock);
+
+ return_unused_pool_pebs(ubi, wl_pool);
+ return_unused_pool_pebs(ubi, pool);
+
+ wl_pool->size = 0;
+ pool->size = 0;
+
+ for (;;) {
+ enough = 0;
+ if (pool->size < pool->max_size) {
+ if (!ubi->free.rb_node)
+ break;
+
+ e = wl_get_wle(ubi);
+ if (!e)
+ break;
+
+ pool->pebs[pool->size] = e->pnum;
+ pool->size++;
+ } else
+ enough++;
+
+ if (wl_pool->size < wl_pool->max_size) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ wl_pool->pebs[wl_pool->size] = e->pnum;
+ wl_pool->size++;
+ } else
+ enough++;
+
+ if (enough == 2)
+ break;
+ }
+
+ wl_pool->used = 0;
+ pool->used = 0;
+
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int ret, retried = 0;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+again:
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+
+ /* We check here also for the WL pool because at this point we can
+ * refill the WL pool synchronous. */
+ if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_eba_sem);
+ ret = ubi_update_fastmap(ubi);
+ if (ret) {
+ ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
+ down_read(&ubi->fm_eba_sem);
+ return -ENOSPC;
+ }
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+ }
+
+ if (pool->used == pool->size) {
+ spin_unlock(&ubi->wl_lock);
+ if (retried) {
+ ubi_err(ubi, "Unable to get a free PEB from user WL pool");
+ ret = -ENOSPC;
+ goto out;
+ }
+ retried = 1;
+ up_read(&ubi->fm_eba_sem);
+ goto again;
+ }
+
+ ubi_assert(pool->used < pool->size);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+out:
+ return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size) {
+ /* We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as possible. */
+ if (!ubi->fm_work_scheduled) {
+ ubi->fm_work_scheduled = 1;
+ schedule_work(&ubi->fm_work);
+ }
+ return NULL;
+ }
+
+ pnum = pool->pebs[pool->used++];
+ return ubi->lookuptbl[pnum];
+}
+
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ return -ENOMEM;
+ }
+
+ wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+ schedule_ubi_work(ubi, wrk);
+ return 0;
+}
+
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int vol_id, pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+
+ /* This can happen if we recovered from a fastmap the very
+ * first time and writing now a new one. In this case the wl system
+ * has never seen any PEB used by the original fastmap.
+ */
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ }
+
+ spin_unlock(&ubi->wl_lock);
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+ return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+ return wrk->func == erase_worker;
+}
+
+static void ubi_fastmap_close(struct ubi_device *ubi)
+{
+ int i;
+
+ flush_work(&ubi->fm_work);
+ return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ kfree(ubi->fm->e[i]);
+ }
+ kfree(ubi->fm);
+}
+
+/**
+ * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
+ * See find_mean_wl_entry()
+ *
+ * @ubi: UBI device description object
+ * @e: physical eraseblock to return
+ * @root: RB tree to test against.
+ */
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root) {
+ if (e && !ubi->fm_disabled && !ubi->fm &&
+ e->pnum < UBI_FM_MAX_START)
+ e = rb_entry(rb_next(root->rb_node),
+ struct ubi_wl_entry, u.rb);
+
+ return e;
+}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index db3defdfc3c0..02a6de2f53ee 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
* Author: Richard Weinberger <richard@nod.at>
*
* This program is free software; you can redistribute it and/or modify
@@ -17,6 +18,69 @@
#include "ubi.h"
/**
+ * init_seen - allocate memory for used for debugging.
+ * @ubi: UBI device description object
+ */
+static inline int *init_seen(struct ubi_device *ubi)
+{
+ int *ret;
+
+ if (!ubi_dbg_chk_fastmap(ubi))
+ return NULL;
+
+ ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ return ret;
+}
+
+/**
+ * free_seen - free the seen logic integer array.
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void free_seen(int *seen)
+{
+ kfree(seen);
+}
+
+/**
+ * set_seen - mark a PEB as seen.
+ * @ubi: UBI device description object
+ * @pnum: The PEB to be makred as seen
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
+{
+ if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+ return;
+
+ seen[pnum] = 1;
+}
+
+/**
+ * self_check_seen - check whether all PEB have been seen by fastmap.
+ * @ubi: UBI device description object
+ * @seen: integer array of @ubi->peb_count size
+ */
+static int self_check_seen(struct ubi_device *ubi, int *seen)
+{
+ int pnum, ret = 0;
+
+ if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+ return 0;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ if (!seen[pnum] && ubi->lookuptbl[pnum]) {
+ ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/**
* ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
* @ubi: UBI device description object
*/
@@ -136,14 +200,15 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
if (!av)
goto out;
- av->highest_lnum = av->leb_count = 0;
+ av->highest_lnum = av->leb_count = av->used_ebs = 0;
av->vol_id = vol_id;
- av->used_ebs = used_ebs;
av->data_pad = data_pad;
av->last_data_size = last_eb_bytes;
av->compat = 0;
av->vol_type = vol_type;
av->root = RB_ROOT;
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = used_ebs;
dbg_bld("found volume (ID %i)", vol_id);
@@ -362,6 +427,7 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
if (aeb->pnum == pnum) {
rb_erase(&aeb->u.rb, &av->root);
+ av->leb_count--;
kmem_cache_free(ai->aeb_slab_cache, aeb);
return;
}
@@ -376,7 +442,6 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
* @pebs: an array of all PEB numbers in the to be scanned pool
* @pool_size: size of the pool (number of entries in @pebs)
* @max_sqnum: pointer to the maximal sequence number
- * @eba_orphans: list of PEBs which need to be scanned
* @free: list of PEBs which are most likely free (and go into @ai->free)
*
* Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
@@ -384,12 +449,12 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
*/
static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
int *pebs, int pool_size, unsigned long long *max_sqnum,
- struct list_head *eba_orphans, struct list_head *free)
+ struct list_head *free)
{
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
- struct ubi_ainf_peb *new_aeb, *tmp_aeb;
- int i, pnum, err, found_orphan, ret = 0;
+ struct ubi_ainf_peb *new_aeb;
+ int i, pnum, err, ret = 0;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
@@ -457,18 +522,6 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (err == UBI_IO_BITFLIPS)
scrub = 1;
- found_orphan = 0;
- list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
- if (tmp_aeb->pnum == pnum) {
- found_orphan = 1;
- break;
- }
- }
- if (found_orphan) {
- list_del(&tmp_aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
- }
-
new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
GFP_KERNEL);
if (!new_aeb) {
@@ -543,10 +596,9 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
struct ubi_attach_info *ai,
struct ubi_fastmap_layout *fm)
{
- struct list_head used, eba_orphans, free;
+ struct list_head used, free;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
- struct ubi_ec_hdr *ech;
struct ubi_fm_sb *fmsb;
struct ubi_fm_hdr *fmhdr;
struct ubi_fm_scan_pool *fmpl1, *fmpl2;
@@ -560,22 +612,8 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
INIT_LIST_HEAD(&used);
INIT_LIST_HEAD(&free);
- INIT_LIST_HEAD(&eba_orphans);
- INIT_LIST_HEAD(&ai->corr);
- INIT_LIST_HEAD(&ai->free);
- INIT_LIST_HEAD(&ai->erase);
- INIT_LIST_HEAD(&ai->alien);
- ai->volumes = RB_ROOT;
ai->min_ec = UBI_MAX_ERASECOUNTER;
- ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
- sizeof(struct ubi_ainf_peb),
- 0, 0, NULL);
- if (!ai->aeb_slab_cache) {
- ret = -ENOMEM;
- goto fail;
- }
-
fmsb = (struct ubi_fm_sb *)(fm_raw);
ai->max_sqnum = fmsb->sqnum;
fm_pos += sizeof(struct ubi_fm_sb);
@@ -741,28 +779,9 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
}
}
- /* This can happen if a PEB is already in an EBA known
- * by this fastmap but the PEB itself is not in the used
- * list.
- * In this case the PEB can be within the fastmap pool
- * or while writing the fastmap it was in the protection
- * queue.
- */
if (!aeb) {
- aeb = kmem_cache_alloc(ai->aeb_slab_cache,
- GFP_KERNEL);
- if (!aeb) {
- ret = -ENOMEM;
-
- goto fail;
- }
-
- aeb->lnum = j;
- aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
- aeb->ec = -1;
- aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
- list_add_tail(&aeb->u.list, &eba_orphans);
- continue;
+ ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
+ goto fail_bad;
}
aeb->lnum = j;
@@ -775,49 +794,13 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
aeb->pnum, aeb->lnum, av->vol_id);
}
-
- ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
- if (!ech) {
- ret = -ENOMEM;
- goto fail;
- }
-
- list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
- u.list) {
- int err;
-
- if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
- ubi_err(ubi, "bad PEB in fastmap EBA orphan list");
- ret = UBI_BAD_FASTMAP;
- kfree(ech);
- goto fail;
- }
-
- err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
- if (err && err != UBI_IO_BITFLIPS) {
- ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
- tmp_aeb->pnum, err);
- ret = err > 0 ? UBI_BAD_FASTMAP : err;
- kfree(ech);
-
- goto fail;
- } else if (err == UBI_IO_BITFLIPS)
- tmp_aeb->scrub = 1;
-
- tmp_aeb->ec = be64_to_cpu(ech->ec);
- assign_aeb_to_av(ai, tmp_aeb, av);
- }
-
- kfree(ech);
}
- ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
- &eba_orphans, &free);
+ ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free);
if (ret)
goto fail;
- ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
- &eba_orphans, &free);
+ ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free);
if (ret)
goto fail;
@@ -827,8 +810,9 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
list_move_tail(&tmp_aeb->u.list, &ai->free);
- ubi_assert(list_empty(&used));
- ubi_assert(list_empty(&eba_orphans));
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->erase);
+
ubi_assert(list_empty(&free));
/*
@@ -850,10 +834,6 @@ fail:
list_del(&tmp_aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
- list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
- list_del(&tmp_aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
- }
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
list_del(&tmp_aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
@@ -884,7 +864,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
__be32 crc, tmp_crc;
unsigned long long sqnum = 0;
- mutex_lock(&ubi->fm_mutex);
+ down_write(&ubi->fm_protect);
memset(ubi->fm_buf, 0, ubi->fm_size);
fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
@@ -1075,7 +1055,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi_free_vid_hdr(ubi, vh);
kfree(ech);
out:
- mutex_unlock(&ubi->fm_mutex);
+ up_write(&ubi->fm_protect);
if (ret == UBI_BAD_FASTMAP)
ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
return ret;
@@ -1107,13 +1087,14 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
struct ubi_fm_ec *fec;
struct ubi_fm_volhdr *fvh;
struct ubi_fm_eba *feba;
- struct rb_node *node;
struct ubi_wl_entry *wl_e;
struct ubi_volume *vol;
struct ubi_vid_hdr *avhdr, *dvhdr;
struct ubi_work *ubi_wrk;
+ struct rb_node *tmp_rb;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
+ int *seen_pebs = NULL;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -1130,6 +1111,12 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
goto out_kfree;
}
+ seen_pebs = init_seen(ubi);
+ if (IS_ERR(seen_pebs)) {
+ ret = PTR_ERR(seen_pebs);
+ goto out_kfree;
+ }
+
spin_lock(&ubi->volumes_lock);
spin_lock(&ubi->wl_lock);
@@ -1160,8 +1147,10 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
- for (i = 0; i < ubi->fm_pool.size; i++)
+ for (i = 0; i < ubi->fm_pool.size; i++) {
fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+ set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
+ }
fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl2);
@@ -1169,14 +1158,16 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
- for (i = 0; i < ubi->fm_wl_pool.size; i++)
+ for (i = 0; i < ubi->fm_wl_pool.size; i++) {
fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+ set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
+ }
- for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
- wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
free_peb_count++;
@@ -1185,11 +1176,11 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
}
fmh->free_peb_count = cpu_to_be32(free_peb_count);
- for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
- wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
used_peb_count++;
@@ -1197,25 +1188,24 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ubi_assert(fm_pos <= ubi->fm_size);
}
- for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) {
- list_for_each_entry(wl_e, &ubi->pq[i], u.list) {
- fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ ubi_for_each_protected_peb(ubi, i, wl_e) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
- fec->pnum = cpu_to_be32(wl_e->pnum);
- fec->ec = cpu_to_be32(wl_e->ec);
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
- used_peb_count++;
- fm_pos += sizeof(*fec);
- ubi_assert(fm_pos <= ubi->fm_size);
- }
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->used_peb_count = cpu_to_be32(used_peb_count);
- for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
- wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
scrub_peb_count++;
@@ -1233,6 +1223,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
erase_peb_count++;
@@ -1292,6 +1283,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
for (i = 0; i < new_fm->used_blocks; i++) {
fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+ set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
}
@@ -1325,11 +1317,13 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ubi_assert(new_fm);
ubi->fm = new_fm;
+ ret = self_check_seen(ubi, seen_pebs);
dbg_bld("fastmap written!");
out_kfree:
ubi_free_vid_hdr(ubi, avhdr);
ubi_free_vid_hdr(ubi, dvhdr);
+ free_seen(seen_pebs);
out:
return ret;
}
@@ -1384,31 +1378,87 @@ out:
/**
* invalidate_fastmap - destroys a fastmap.
* @ubi: UBI device object
- * @fm: the fastmap to be destroyed
*
+ * This function ensures that upon next UBI attach a full scan
+ * is issued. We need this if UBI is about to write a new fastmap
+ * but is unable to do so. In this case we have two options:
+ * a) Make sure that the current fastmap will not be usued upon
+ * attach time and contine or b) fall back to RO mode to have the
+ * current fastmap in a valid state.
* Returns 0 on success, < 0 indicates an internal error.
*/
-static int invalidate_fastmap(struct ubi_device *ubi,
- struct ubi_fastmap_layout *fm)
+static int invalidate_fastmap(struct ubi_device *ubi)
{
int ret;
- struct ubi_vid_hdr *vh;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_wl_entry *e;
+ struct ubi_vid_hdr *vh = NULL;
- ret = erase_block(ubi, fm->e[0]->pnum);
- if (ret < 0)
- return ret;
+ if (!ubi->fm)
+ return 0;
+
+ ubi->fm = NULL;
+
+ ret = -ENOMEM;
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm)
+ goto out;
vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
if (!vh)
- return -ENOMEM;
+ goto out_free_fm;
- /* deleting the current fastmap SB is not enough, an old SB may exist,
- * so create a (corrupted) SB such that fastmap will find it and fall
- * back to scanning mode in any case */
+ ret = -ENOSPC;
+ e = ubi_wl_get_fm_peb(ubi, 1);
+ if (!e)
+ goto out_free_fm;
+
+ /*
+ * Create fake fastmap such that UBI will fall back
+ * to scanning mode.
+ */
vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
+ ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
+ if (ret < 0) {
+ ubi_wl_put_fm_peb(ubi, e, 0, 0);
+ goto out_free_fm;
+ }
+
+ fm->used_blocks = 1;
+ fm->e[0] = e;
+
+ ubi->fm = fm;
+out:
+ ubi_free_vid_hdr(ubi, vh);
return ret;
+
+out_free_fm:
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * return_fm_pebs - returns all PEBs used by a fastmap back to the
+ * WL sub-system.
+ * @ubi: UBI device object
+ * @fm: fastmap layout object
+ */
+static void return_fm_pebs(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *fm)
+{
+ int i;
+
+ if (!fm)
+ return;
+
+ for (i = 0; i < fm->used_blocks; i++) {
+ if (fm->e[i]) {
+ ubi_wl_put_fm_peb(ubi, fm->e[i], i,
+ fm->to_be_tortured[i]);
+ fm->e[i] = NULL;
+ }
+ }
}
/**
@@ -1420,45 +1470,32 @@ static int invalidate_fastmap(struct ubi_device *ubi,
*/
int ubi_update_fastmap(struct ubi_device *ubi)
{
- int ret, i;
+ int ret, i, j;
struct ubi_fastmap_layout *new_fm, *old_fm;
struct ubi_wl_entry *tmp_e;
- mutex_lock(&ubi->fm_mutex);
+ down_write(&ubi->fm_protect);
ubi_refill_pools(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
- mutex_unlock(&ubi->fm_mutex);
+ up_write(&ubi->fm_protect);
return 0;
}
ret = ubi_ensure_anchor_pebs(ubi);
if (ret) {
- mutex_unlock(&ubi->fm_mutex);
+ up_write(&ubi->fm_protect);
return ret;
}
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
- mutex_unlock(&ubi->fm_mutex);
+ up_write(&ubi->fm_protect);
return -ENOMEM;
}
new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
-
- for (i = 0; i < new_fm->used_blocks; i++) {
- new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!new_fm->e[i]) {
- while (i--)
- kfree(new_fm->e[i]);
-
- kfree(new_fm);
- mutex_unlock(&ubi->fm_mutex);
- return -ENOMEM;
- }
- }
-
old_fm = ubi->fm;
ubi->fm = NULL;
@@ -1473,37 +1510,49 @@ int ubi_update_fastmap(struct ubi_device *ubi)
tmp_e = ubi_wl_get_fm_peb(ubi, 0);
spin_unlock(&ubi->wl_lock);
- if (!tmp_e && !old_fm) {
- int j;
- ubi_err(ubi, "could not get any free erase block");
-
- for (j = 1; j < i; j++)
- ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
-
- ret = -ENOSPC;
- goto err;
- } else if (!tmp_e && old_fm) {
- ret = erase_block(ubi, old_fm->e[i]->pnum);
- if (ret < 0) {
- int j;
-
- for (j = 1; j < i; j++)
- ubi_wl_put_fm_peb(ubi, new_fm->e[j],
- j, 0);
+ if (!tmp_e) {
+ if (old_fm && old_fm->e[i]) {
+ ret = erase_block(ubi, old_fm->e[i]->pnum);
+ if (ret < 0) {
+ ubi_err(ubi, "could not erase old fastmap PEB");
+
+ for (j = 1; j < i; j++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+ j, 0);
+ new_fm->e[j] = NULL;
+ }
+ goto err;
+ }
+ new_fm->e[i] = old_fm->e[i];
+ old_fm->e[i] = NULL;
+ } else {
+ ubi_err(ubi, "could not get any free erase block");
+
+ for (j = 1; j < i; j++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+ new_fm->e[j] = NULL;
+ }
- ubi_err(ubi, "could not erase old fastmap PEB");
+ ret = -ENOSPC;
goto err;
}
-
- new_fm->e[i]->pnum = old_fm->e[i]->pnum;
- new_fm->e[i]->ec = old_fm->e[i]->ec;
} else {
- new_fm->e[i]->pnum = tmp_e->pnum;
- new_fm->e[i]->ec = tmp_e->ec;
+ new_fm->e[i] = tmp_e;
- if (old_fm)
+ if (old_fm && old_fm->e[i]) {
ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
old_fm->to_be_tortured[i]);
+ old_fm->e[i] = NULL;
+ }
+ }
+ }
+
+ /* Old fastmap is larger than the new one */
+ if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
+ for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ old_fm->e[i] = NULL;
}
}
@@ -1516,67 +1565,67 @@ int ubi_update_fastmap(struct ubi_device *ubi)
if (!tmp_e) {
ret = erase_block(ubi, old_fm->e[0]->pnum);
if (ret < 0) {
- int i;
ubi_err(ubi, "could not erase old anchor PEB");
- for (i = 1; i < new_fm->used_blocks; i++)
+ for (i = 1; i < new_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[i],
i, 0);
+ new_fm->e[i] = NULL;
+ }
goto err;
}
-
- new_fm->e[0]->pnum = old_fm->e[0]->pnum;
+ new_fm->e[0] = old_fm->e[0];
new_fm->e[0]->ec = ret;
+ old_fm->e[0] = NULL;
} else {
/* we've got a new anchor PEB, return the old one */
ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
old_fm->to_be_tortured[0]);
-
- new_fm->e[0]->pnum = tmp_e->pnum;
- new_fm->e[0]->ec = tmp_e->ec;
+ new_fm->e[0] = tmp_e;
+ old_fm->e[0] = NULL;
}
} else {
if (!tmp_e) {
- int i;
ubi_err(ubi, "could not find any anchor PEB");
- for (i = 1; i < new_fm->used_blocks; i++)
+ for (i = 1; i < new_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+ new_fm->e[i] = NULL;
+ }
ret = -ENOSPC;
goto err;
}
-
- new_fm->e[0]->pnum = tmp_e->pnum;
- new_fm->e[0]->ec = tmp_e->ec;
+ new_fm->e[0] = tmp_e;
}
down_write(&ubi->work_sem);
- down_write(&ubi->fm_sem);
+ down_write(&ubi->fm_eba_sem);
ret = ubi_write_fastmap(ubi, new_fm);
- up_write(&ubi->fm_sem);
+ up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
if (ret)
goto err;
out_unlock:
- mutex_unlock(&ubi->fm_mutex);
+ up_write(&ubi->fm_protect);
kfree(old_fm);
return ret;
err:
- kfree(new_fm);
-
ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
- ret = 0;
- if (old_fm) {
- ret = invalidate_fastmap(ubi, old_fm);
- if (ret < 0)
- ubi_err(ubi, "Unable to invalidiate current fastmap!");
- else if (ret)
- ret = 0;
+ ret = invalidate_fastmap(ubi);
+ if (ret < 0) {
+ ubi_err(ubi, "Unable to invalidiate current fastmap!");
+ ubi_ro_mode(ubi);
+ } else {
+ return_fm_pebs(ubi, old_fm);
+ return_fm_pebs(ubi, new_fm);
+ ret = 0;
}
+
+ kfree(new_fm);
goto out_unlock;
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index ed0bcb35472f..5bbd1f094f4e 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -859,6 +859,9 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
if (err)
return err;
+ if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
+ return -EROFS;
+
err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
return err;
}
@@ -1106,6 +1109,9 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
if (err)
return err;
+ if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
+ return -EROFS;
+
p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 478e00cf2d9e..e844887732fb 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -314,7 +314,7 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
if (error)
return ERR_PTR(error);
- inode = path.dentry->d_inode;
+ inode = d_backing_inode(path.dentry);
mod = inode->i_mode;
ubi_num = ubi_major2num(imajor(inode));
vol_id = iminor(inode) - 1;
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index ac2b24d1783d..d0d072e7ccd2 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -403,8 +403,6 @@ struct ubi_vtbl_record {
#define UBI_FM_MIN_POOL_SIZE 8
#define UBI_FM_MAX_POOL_SIZE 256
-#define UBI_FM_WL_POOL_SIZE 25
-
/**
* struct ubi_fm_sb - UBI fastmap super block
* @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c5be82d9d345..c998212fc680 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -151,6 +151,17 @@ enum {
UBI_BAD_FASTMAP,
};
+/*
+ * Flags for emulate_power_cut in ubi_debug_info
+ *
+ * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
+ * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
+ */
+enum {
+ POWER_CUT_EC_WRITE = 0x01,
+ POWER_CUT_VID_WRITE = 0x02,
+};
+
/**
* struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree
@@ -356,30 +367,48 @@ struct ubi_wl_entry;
*
* @chk_gen: if UBI general extra checks are enabled
* @chk_io: if UBI I/O extra checks are enabled
+ * @chk_fastmap: if UBI fastmap extra checks are enabled
* @disable_bgt: disable the background task for testing purposes
* @emulate_bitflips: emulate bit-flips for testing purposes
* @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @emulate_power_cut: emulate power cut for testing purposes
+ * @power_cut_counter: count down for writes left until emulated power cut
+ * @power_cut_min: minimum number of writes before emulating a power cut
+ * @power_cut_max: maximum number of writes until emulating a power cut
* @dfs_dir_name: name of debugfs directory containing files of this UBI device
* @dfs_dir: direntry object of the UBI device debugfs directory
* @dfs_chk_gen: debugfs knob to enable UBI general extra checks
* @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks
* @dfs_disable_bgt: debugfs knob to disable the background task
* @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
* @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ * @dfs_emulate_power_cut: debugfs knob to emulate power cuts
+ * @dfs_power_cut_min: debugfs knob for minimum writes before power cut
+ * @dfs_power_cut_max: debugfs knob for maximum writes until power cut
*/
struct ubi_debug_info {
unsigned int chk_gen:1;
unsigned int chk_io:1;
+ unsigned int chk_fastmap:1;
unsigned int disable_bgt:1;
unsigned int emulate_bitflips:1;
unsigned int emulate_io_failures:1;
+ unsigned int emulate_power_cut:2;
+ unsigned int power_cut_counter;
+ unsigned int power_cut_min;
+ unsigned int power_cut_max;
char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
struct dentry *dfs_dir;
struct dentry *dfs_chk_gen;
struct dentry *dfs_chk_io;
+ struct dentry *dfs_chk_fastmap;
struct dentry *dfs_disable_bgt;
struct dentry *dfs_emulate_bitflips;
struct dentry *dfs_emulate_io_failures;
+ struct dentry *dfs_emulate_power_cut;
+ struct dentry *dfs_power_cut_min;
+ struct dentry *dfs_power_cut_max;
};
/**
@@ -426,11 +455,13 @@ struct ubi_debug_info {
* @fm_pool: in-memory data structure of the fastmap pool
* @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
* sub-system
- * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
+ * @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure
+ * that critical sections cannot be interrupted by ubi_update_fastmap()
* @fm_buf: vmalloc()'d buffer which holds the raw fastmap
* @fm_size: fastmap size in bytes
- * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
* @fm_work: fastmap work queue
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -442,7 +473,8 @@ struct ubi_debug_info {
* @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
* @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
- * @erroneous, and @erroneous_peb_count fields
+ * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
+ * and @fm_wl_pool fields
* @move_mutex: serializes eraseblock moves
* @work_sem: used to wait for all the scheduled works to finish and prevent
* new works from being submitted
@@ -479,7 +511,7 @@ struct ubi_debug_info {
* @vid_hdr_offset: starting offset of the volume identifier header (might be
* unaligned)
* @vid_hdr_aloffset: starting offset of the VID header aligned to
- * @hdrs_min_io_size
+ * @hdrs_min_io_size
* @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
* not
@@ -532,11 +564,12 @@ struct ubi_device {
struct ubi_fastmap_layout *fm;
struct ubi_fm_pool fm_pool;
struct ubi_fm_pool fm_wl_pool;
- struct rw_semaphore fm_sem;
- struct mutex fm_mutex;
+ struct rw_semaphore fm_eba_sem;
+ struct rw_semaphore fm_protect;
void *fm_buf;
size_t fm_size;
struct work_struct fm_work;
+ int fm_work_scheduled;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
@@ -868,10 +901,14 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr);
/* fastmap.c */
+#ifdef CONFIG_MTD_UBI_FASTMAP
size_t ubi_calc_fm_size(struct ubi_device *ubi);
int ubi_update_fastmap(struct ubi_device *ubi);
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
int fm_anchor);
+#else
+static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
+#endif
/* block.c */
#ifdef CONFIG_MTD_UBI_BLOCK
@@ -892,6 +929,42 @@ static inline int ubiblock_remove(struct ubi_volume_info *vi)
}
#endif
+/*
+ * ubi_for_each_free_peb - walk the UBI free RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_free_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
+
+/*
+ * ubi_for_each_used_peb - walk the UBI used RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_used_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
+
+/*
+ * ubi_for_each_scub_peb - walk the UBI scub RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_scrub_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
+
+/*
+ * ubi_for_each_protected_peb - walk the UBI protection queue.
+ * @ubi: UBI device description object
+ * @i: a integer used as counter
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ */
+#define ubi_for_each_protected_peb(ubi, i, e) \
+ for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++) \
+ list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
/*
* ubi_rb_for_each_entry - walk an RB-tree.
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 8f7bde6a85d6..16214d3d57a4 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -103,6 +103,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include "ubi.h"
+#include "wl.h"
/* Number of physical eraseblocks reserved for wear-leveling purposes */
#define WL_RESERVED_PEBS 1
@@ -140,42 +141,6 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e);
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
- * @wrk: the work description object
- */
-static void update_fastmap_work_fn(struct work_struct *wrk)
-{
- struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
- ubi_update_fastmap(ubi);
-}
-
-/**
- * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
- * @ubi: UBI device description object
- * @pnum: the to be checked PEB
- */
-static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
-{
- int i;
-
- if (!ubi->fm)
- return 0;
-
- for (i = 0; i < ubi->fm->used_blocks; i++)
- if (ubi->fm->e[i]->pnum == pnum)
- return 1;
-
- return 0;
-}
-#else
-static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
-{
- return 0;
-}
-#endif
-
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add
@@ -213,6 +178,20 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
}
/**
+ * wl_tree_destroy - destroy a wear-leveling entry.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to add
+ *
+ * This function destroys a wear leveling entry and removes
+ * the reference from the lookup table.
+ */
+static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ ubi->lookuptbl[e->pnum] = NULL;
+ kmem_cache_free(ubi_wl_entry_slab, e);
+}
+
+/**
* do_work - do one pending work.
* @ubi: UBI device description object
*
@@ -260,33 +239,6 @@ static int do_work(struct ubi_device *ubi)
}
/**
- * produce_free_peb - produce a free physical eraseblock.
- * @ubi: UBI device description object
- *
- * This function tries to make a free PEB by means of synchronous execution of
- * pending works. This may be needed if, for example the background thread is
- * disabled. Returns zero in case of success and a negative error code in case
- * of failure.
- */
-static int produce_free_peb(struct ubi_device *ubi)
-{
- int err;
-
- while (!ubi->free.rb_node && ubi->works_count) {
- spin_unlock(&ubi->wl_lock);
-
- dbg_wl("do one work synchronously");
- err = do_work(ubi);
-
- spin_lock(&ubi->wl_lock);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
* in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
* @e: the wear-leveling entry to check
* @root: the root of the tree
@@ -409,119 +361,32 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
-#ifdef CONFIG_MTD_UBI_FASTMAP
/* If no fastmap has been written and this WL entry can be used
* as anchor PEB, hold it back and return the second best
* WL entry such that fastmap can use the anchor PEB later. */
- if (e && !ubi->fm_disabled && !ubi->fm &&
- e->pnum < UBI_FM_MAX_START)
- e = rb_entry(rb_next(root->rb_node),
- struct ubi_wl_entry, u.rb);
-#endif
+ e = may_reserve_for_fm(ubi, e, root);
} else
e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
return e;
}
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
- * @root: the RB-tree where to look for
- */
-static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
-{
- struct rb_node *p;
- struct ubi_wl_entry *e, *victim = NULL;
- int max_ec = UBI_MAX_ERASECOUNTER;
-
- ubi_rb_for_each_entry(p, e, root, u.rb) {
- if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
- victim = e;
- max_ec = e->ec;
- }
- }
-
- return victim;
-}
-
-static int anchor_pebs_avalible(struct rb_root *root)
-{
- struct rb_node *p;
- struct ubi_wl_entry *e;
-
- ubi_rb_for_each_entry(p, e, root, u.rb)
- if (e->pnum < UBI_FM_MAX_START)
- return 1;
-
- return 0;
-}
-
/**
- * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
+ * refill_wl_user_pool().
* @ubi: UBI device description object
- * @anchor: This PEB will be used as anchor PEB by fastmap
*
- * The function returns a physical erase block with a given maximal number
- * and removes it from the wl subsystem.
- * Must be called with wl_lock held!
+ * This function returns a a wear leveling entry in case of success and
+ * NULL in case of failure.
*/
-struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
{
- struct ubi_wl_entry *e = NULL;
-
- if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
- goto out;
-
- if (anchor)
- e = find_anchor_wl_entry(&ubi->free);
- else
- e = find_mean_wl_entry(ubi, &ubi->free);
-
- if (!e)
- goto out;
-
- self_check_in_wl_tree(ubi, e, &ubi->free);
-
- /* remove it from the free list,
- * the wl subsystem does no longer know this erase block */
- rb_erase(&e->u.rb, &ubi->free);
- ubi->free_count--;
-out:
- return e;
-}
-#endif
-
-/**
- * __wl_get_peb - get a physical eraseblock.
- * @ubi: UBI device description object
- *
- * This function returns a physical eraseblock in case of success and a
- * negative error code in case of failure.
- */
-static int __wl_get_peb(struct ubi_device *ubi)
-{
- int err;
struct ubi_wl_entry *e;
-retry:
- if (!ubi->free.rb_node) {
- if (ubi->works_count == 0) {
- ubi_err(ubi, "no free eraseblocks");
- ubi_assert(list_empty(&ubi->works));
- return -ENOSPC;
- }
-
- err = produce_free_peb(ubi);
- if (err < 0)
- return err;
- goto retry;
- }
-
e = find_mean_wl_entry(ubi, &ubi->free);
if (!e) {
ubi_err(ubi, "no free eraseblocks");
- return -ENOSPC;
+ return NULL;
}
self_check_in_wl_tree(ubi, e, &ubi->free);
@@ -533,174 +398,10 @@ retry:
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
-#ifndef CONFIG_MTD_UBI_FASTMAP
- /* We have to enqueue e only if fastmap is disabled,
- * is fastmap enabled prot_queue_add() will be called by
- * ubi_wl_get_peb() after removing e from the pool. */
- prot_queue_add(ubi, e);
-#endif
- return e->pnum;
-}
-
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * return_unused_pool_pebs - returns unused PEB to the free tree.
- * @ubi: UBI device description object
- * @pool: fastmap pool description object
- */
-static void return_unused_pool_pebs(struct ubi_device *ubi,
- struct ubi_fm_pool *pool)
-{
- int i;
- struct ubi_wl_entry *e;
-
- for (i = pool->used; i < pool->size; i++) {
- e = ubi->lookuptbl[pool->pebs[i]];
- wl_tree_add(e, &ubi->free);
- ubi->free_count++;
- }
-}
-
-/**
- * refill_wl_pool - refills all the fastmap pool used by the
- * WL sub-system.
- * @ubi: UBI device description object
- */
-static void refill_wl_pool(struct ubi_device *ubi)
-{
- struct ubi_wl_entry *e;
- struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
-
- return_unused_pool_pebs(ubi, pool);
-
- for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
- if (!ubi->free.rb_node ||
- (ubi->free_count - ubi->beb_rsvd_pebs < 5))
- break;
-
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
- self_check_in_wl_tree(ubi, e, &ubi->free);
- rb_erase(&e->u.rb, &ubi->free);
- ubi->free_count--;
-
- pool->pebs[pool->size] = e->pnum;
- }
- pool->used = 0;
-}
-
-/**
- * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
- * @ubi: UBI device description object
- */
-static void refill_wl_user_pool(struct ubi_device *ubi)
-{
- struct ubi_fm_pool *pool = &ubi->fm_pool;
-
- return_unused_pool_pebs(ubi, pool);
-
- for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
- pool->pebs[pool->size] = __wl_get_peb(ubi);
- if (pool->pebs[pool->size] < 0)
- break;
- }
- pool->used = 0;
-}
-
-/**
- * ubi_refill_pools - refills all fastmap PEB pools.
- * @ubi: UBI device description object
- */
-void ubi_refill_pools(struct ubi_device *ubi)
-{
- spin_lock(&ubi->wl_lock);
- refill_wl_pool(ubi);
- refill_wl_user_pool(ubi);
- spin_unlock(&ubi->wl_lock);
-}
-
-/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
- * the fastmap pool.
- */
-int ubi_wl_get_peb(struct ubi_device *ubi)
-{
- int ret;
- struct ubi_fm_pool *pool = &ubi->fm_pool;
- struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
-
- if (!pool->size || !wl_pool->size || pool->used == pool->size ||
- wl_pool->used == wl_pool->size)
- ubi_update_fastmap(ubi);
-
- /* we got not a single free PEB */
- if (!pool->size)
- ret = -ENOSPC;
- else {
- spin_lock(&ubi->wl_lock);
- ret = pool->pebs[pool->used++];
- prot_queue_add(ubi, ubi->lookuptbl[ret]);
- spin_unlock(&ubi->wl_lock);
- }
-
- return ret;
-}
-
-/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
- *
- * @ubi: UBI device description object
- */
-static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
-{
- struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
- int pnum;
-
- if (pool->used == pool->size || !pool->size) {
- /* We cannot update the fastmap here because this
- * function is called in atomic context.
- * Let's fail here and refill/update it as soon as possible. */
- schedule_work(&ubi->fm_work);
- return NULL;
- } else {
- pnum = pool->pebs[pool->used++];
- return ubi->lookuptbl[pnum];
- }
-}
-#else
-static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
-{
- struct ubi_wl_entry *e;
-
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
- self_check_in_wl_tree(ubi, e, &ubi->free);
- ubi->free_count--;
- ubi_assert(ubi->free_count >= 0);
- rb_erase(&e->u.rb, &ubi->free);
return e;
}
-int ubi_wl_get_peb(struct ubi_device *ubi)
-{
- int peb, err;
-
- spin_lock(&ubi->wl_lock);
- peb = __wl_get_peb(ubi);
- spin_unlock(&ubi->wl_lock);
-
- if (peb < 0)
- return peb;
-
- err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
- ubi->peb_size - ubi->vid_hdr_aloffset);
- if (err) {
- ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes",
- peb);
- return err;
- }
-
- return peb;
-}
-#endif
-
/**
* prot_queue_del - remove a physical eraseblock from the protection queue.
* @ubi: UBI device description object
@@ -867,17 +568,6 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int shutdown);
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * ubi_is_erase_work - checks whether a work is erase work.
- * @wrk: The work object to be checked
- */
-int ubi_is_erase_work(struct ubi_work *wrk)
-{
- return wrk->func == erase_worker;
-}
-#endif
-
/**
* schedule_erase - schedule an erase work.
* @ubi: UBI device description object
@@ -895,7 +585,6 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
struct ubi_work *wl_wrk;
ubi_assert(e);
- ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
@@ -942,51 +631,6 @@ static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
return erase_worker(ubi, wl_wrk, 0);
}
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
- * sub-system.
- * see: ubi_wl_put_peb()
- *
- * @ubi: UBI device description object
- * @fm_e: physical eraseblock to return
- * @lnum: the last used logical eraseblock number for the PEB
- * @torture: if this physical eraseblock has to be tortured
- */
-int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
- int lnum, int torture)
-{
- struct ubi_wl_entry *e;
- int vol_id, pnum = fm_e->pnum;
-
- dbg_wl("PEB %d", pnum);
-
- ubi_assert(pnum >= 0);
- ubi_assert(pnum < ubi->peb_count);
-
- spin_lock(&ubi->wl_lock);
- e = ubi->lookuptbl[pnum];
-
- /* This can happen if we recovered from a fastmap the very
- * first time and writing now a new one. In this case the wl system
- * has never seen any PEB used by the original fastmap.
- */
- if (!e) {
- e = fm_e;
- ubi_assert(e->ec >= 0);
- ubi->lookuptbl[pnum] = e;
- } else {
- e->ec = fm_e->ec;
- kfree(fm_e);
- }
-
- spin_unlock(&ubi->wl_lock);
-
- vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
- return schedule_erase(ubi, e, vol_id, lnum, torture);
-}
-#endif
-
/**
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
@@ -1002,7 +646,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int shutdown)
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
- int vol_id = -1, uninitialized_var(lnum);
+ int vol_id = -1, lnum = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
int anchor = wrk->anchor;
#endif
@@ -1214,7 +858,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
if (e2)
- kmem_cache_free(ubi_wl_entry_slab, e2);
+ wl_entry_destroy(ubi, e2);
goto out_ro;
}
@@ -1282,8 +926,8 @@ out_error:
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
- kmem_cache_free(ubi_wl_entry_slab, e1);
- kmem_cache_free(ubi_wl_entry_slab, e2);
+ wl_entry_destroy(ubi, e1);
+ wl_entry_destroy(ubi, e2);
out_ro:
ubi_ro_mode(ubi);
@@ -1369,38 +1013,6 @@ out_unlock:
return err;
}
-#ifdef CONFIG_MTD_UBI_FASTMAP
-/**
- * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
- * @ubi: UBI device description object
- */
-int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
-{
- struct ubi_work *wrk;
-
- spin_lock(&ubi->wl_lock);
- if (ubi->wl_scheduled) {
- spin_unlock(&ubi->wl_lock);
- return 0;
- }
- ubi->wl_scheduled = 1;
- spin_unlock(&ubi->wl_lock);
-
- wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
- if (!wrk) {
- spin_lock(&ubi->wl_lock);
- ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- return -ENOMEM;
- }
-
- wrk->anchor = 1;
- wrk->func = &wear_leveling_worker;
- schedule_ubi_work(ubi, wrk);
- return 0;
-}
-#endif
-
/**
* erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
@@ -1425,15 +1037,13 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
if (shutdown) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
kfree(wl_wrk);
- kmem_cache_free(ubi_wl_entry_slab, e);
+ wl_entry_destroy(ubi, e);
return 0;
}
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
- ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
-
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
/* Fine, we've erased it successfully */
@@ -1471,7 +1081,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
return err;
}
- kmem_cache_free(ubi_wl_entry_slab, e);
+ wl_entry_destroy(ubi, e);
if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
@@ -1563,6 +1173,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
+ down_read(&ubi->fm_protect);
+
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
@@ -1593,6 +1205,7 @@ retry:
ubi_assert(!ubi->move_to_put);
ubi->move_to_put = 1;
spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
return 0;
} else {
if (in_wl_tree(e, &ubi->used)) {
@@ -1614,6 +1227,7 @@ retry:
ubi_err(ubi, "PEB %d not found", pnum);
ubi_ro_mode(ubi);
spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
return err;
}
}
@@ -1627,6 +1241,7 @@ retry:
spin_unlock(&ubi->wl_lock);
}
+ up_read(&ubi->fm_protect);
return err;
}
@@ -1758,9 +1373,10 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
/**
* tree_destroy - destroy an RB-tree.
+ * @ubi: UBI device description object
* @root: the root of the tree to destroy
*/
-static void tree_destroy(struct rb_root *root)
+static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
{
struct rb_node *rb;
struct ubi_wl_entry *e;
@@ -1782,7 +1398,7 @@ static void tree_destroy(struct rb_root *root)
rb->rb_right = NULL;
}
- kmem_cache_free(ubi_wl_entry_slab, e);
+ wl_entry_destroy(ubi, e);
}
}
}
@@ -1850,6 +1466,9 @@ int ubi_thread(void *u)
*/
static void shutdown_work(struct ubi_device *ubi)
{
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ flush_work(&ubi->fm_work);
+#endif
while (!list_empty(&ubi->works)) {
struct ubi_work *wrk;
@@ -1883,9 +1502,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
init_rwsem(&ubi->work_sem);
ubi->max_ec = ai->max_ec;
INIT_LIST_HEAD(&ubi->works);
-#ifdef CONFIG_MTD_UBI_FASTMAP
- INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
-#endif
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
@@ -1907,10 +1523,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
- ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
- kmem_cache_free(ubi_wl_entry_slab, e);
+ wl_entry_destroy(ubi, e);
goto out_free;
}
@@ -1928,7 +1543,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
- ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
wl_tree_add(e, &ubi->free);
ubi->free_count++;
@@ -1966,17 +1580,20 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
dbg_wl("found %i PEBs", found_pebs);
- if (ubi->fm)
+ if (ubi->fm) {
ubi_assert(ubi->good_peb_count == \
found_pebs + ubi->fm->used_blocks);
+
+ for (i = 0; i < ubi->fm->used_blocks; i++) {
+ e = ubi->fm->e[i];
+ ubi->lookuptbl[e->pnum] = e;
+ }
+ }
else
ubi_assert(ubi->good_peb_count == found_pebs);
reserved_pebs = WL_RESERVED_PEBS;
-#ifdef CONFIG_MTD_UBI_FASTMAP
- /* Reserve enough LEBs to store two fastmaps. */
- reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
-#endif
+ ubi_fastmap_init(ubi, &reserved_pebs);
if (ubi->avail_pebs < reserved_pebs) {
ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
@@ -1998,9 +1615,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
out_free:
shutdown_work(ubi);
- tree_destroy(&ubi->used);
- tree_destroy(&ubi->free);
- tree_destroy(&ubi->scrub);
+ tree_destroy(ubi, &ubi->used);
+ tree_destroy(ubi, &ubi->free);
+ tree_destroy(ubi, &ubi->scrub);
kfree(ubi->lookuptbl);
return err;
}
@@ -2017,7 +1634,7 @@ static void protection_queue_destroy(struct ubi_device *ubi)
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
list_del(&e->u.list);
- kmem_cache_free(ubi_wl_entry_slab, e);
+ wl_entry_destroy(ubi, e);
}
}
}
@@ -2029,12 +1646,13 @@ static void protection_queue_destroy(struct ubi_device *ubi)
void ubi_wl_close(struct ubi_device *ubi)
{
dbg_wl("close the WL sub-system");
+ ubi_fastmap_close(ubi);
shutdown_work(ubi);
protection_queue_destroy(ubi);
- tree_destroy(&ubi->used);
- tree_destroy(&ubi->erroneous);
- tree_destroy(&ubi->free);
- tree_destroy(&ubi->scrub);
+ tree_destroy(ubi, &ubi->used);
+ tree_destroy(ubi, &ubi->erroneous);
+ tree_destroy(ubi, &ubi->free);
+ tree_destroy(ubi, &ubi->scrub);
kfree(ubi->lookuptbl);
}
@@ -2133,3 +1751,94 @@ static int self_check_in_pq(const struct ubi_device *ubi,
dump_stack();
return -EINVAL;
}
+#ifndef CONFIG_MTD_UBI_FASTMAP
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ ubi->free_count--;
+ ubi_assert(ubi->free_count >= 0);
+ rb_erase(&e->u.rb, &ubi->free);
+
+ return e;
+}
+
+/**
+ * produce_free_peb - produce a free physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function tries to make a free PEB by means of synchronous execution of
+ * pending works. This may be needed if, for example the background thread is
+ * disabled. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int produce_free_peb(struct ubi_device *ubi)
+{
+ int err;
+
+ while (!ubi->free.rb_node && ubi->works_count) {
+ spin_unlock(&ubi->wl_lock);
+
+ dbg_wl("do one work synchronously");
+ err = do_work(ubi);
+
+ spin_lock(&ubi->wl_lock);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+retry:
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+ if (!ubi->free.rb_node) {
+ if (ubi->works_count == 0) {
+ ubi_err(ubi, "no free eraseblocks");
+ ubi_assert(list_empty(&ubi->works));
+ spin_unlock(&ubi->wl_lock);
+ return -ENOSPC;
+ }
+
+ err = produce_free_peb(ubi);
+ if (err < 0) {
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_eba_sem);
+ goto retry;
+
+ }
+ e = wl_get_wle(ubi);
+ prot_queue_add(ubi, e);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
+ if (err) {
+ ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
+ return err;
+ }
+
+ return e->pnum;
+}
+#else
+#include "fastmap-wl.c"
+#endif
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
new file mode 100644
index 000000000000..bd1f07e5ce9a
--- /dev/null
+++ b/drivers/mtd/ubi/wl.h
@@ -0,0 +1,28 @@
+#ifndef UBI_WL_H
+#define UBI_WL_H
+#ifdef CONFIG_MTD_UBI_FASTMAP
+static int anchor_pebs_avalible(struct rb_root *root);
+static void update_fastmap_work_fn(struct work_struct *wrk);
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static void ubi_fastmap_close(struct ubi_device *ubi);
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
+{
+ /* Reserve enough LEBs to store two fastmaps. */
+ *count += (ubi->fm_size / ubi->leb_size) * 2;
+ INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+}
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root);
+#else /* !CONFIG_MTD_UBI_FASTMAP */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { }
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root) {
+ return e;
+}
+#endif /* CONFIG_MTD_UBI_FASTMAP */
+#endif /* UBI_WL_H */
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 09de683c167e..10f71c732b59 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -104,7 +104,6 @@ EXPORT_SYMBOL(arcnet_timeout);
static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len);
-static int arcnet_rebuild_header(struct sk_buff *skb);
static int go_tx(struct net_device *dev);
static int debug = ARCNET_DEBUG;
@@ -312,7 +311,6 @@ static int choose_mtu(void)
static const struct header_ops arcnet_header_ops = {
.create = arcnet_header,
- .rebuild = arcnet_rebuild_header,
};
static const struct net_device_ops arcnet_netdev_ops = {
@@ -538,59 +536,6 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
return proto->build_header(skb, dev, type, _daddr);
}
-
-/*
- * Rebuild the ARCnet hard header. This is called after an ARP (or in the
- * future other address resolution) has completed on this sk_buff. We now
- * let ARP fill in the destination field.
- */
-static int arcnet_rebuild_header(struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- struct arcnet_local *lp = netdev_priv(dev);
- int status = 0; /* default is failure */
- unsigned short type;
- uint8_t daddr=0;
- struct ArcProto *proto;
- /*
- * XXX: Why not use skb->mac_len?
- */
- if (skb->network_header - skb->mac_header != 2) {
- BUGMSG(D_NORMAL,
- "rebuild_header: shouldn't be here! (hdrsize=%d)\n",
- (int)(skb->network_header - skb->mac_header));
- return 0;
- }
- type = *(uint16_t *) skb_pull(skb, 2);
- BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type);
-
- if (type == ETH_P_IP) {
-#ifdef CONFIG_INET
- BUGMSG(D_DURING, "rebuild header for ethernet protocol %Xh\n", type);
- status = arp_find(&daddr, skb) ? 1 : 0;
- BUGMSG(D_DURING, " rebuilt: dest is %d; protocol %Xh\n",
- daddr, type);
-#endif
- } else {
- BUGMSG(D_NORMAL,
- "I don't understand ethernet protocol %Xh addresses!\n", type);
- dev->stats.tx_errors++;
- dev->stats.tx_aborted_errors++;
- }
-
- /* if we couldn't resolve the address... give up. */
- if (!status)
- return 0;
-
- /* add the _real_ header this time! */
- proto = arc_proto_map[lp->default_proto[daddr]];
- proto->build_header(skb, dev, type, daddr);
-
- return 1; /* success */
-}
-
-
-
/* Called by the kernel in order to transmit a packet. */
netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
struct net_device *dev)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index cfc4a9c1000a..fbd54f0e32e8 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -70,6 +70,7 @@
#define AD_PORT_STANDBY 0x80
#define AD_PORT_SELECTED 0x100
#define AD_PORT_MOVED 0x200
+#define AD_PORT_CHURNED (AD_PORT_ACTOR_CHURN | AD_PORT_PARTNER_CHURN)
/* Port Key definitions
* key is determined according to the link speed, duplex and
@@ -1013,16 +1014,19 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* check if state machine should change state */
/* first, check if port was reinitialized */
- if (port->sm_vars & AD_PORT_BEGIN)
+ if (port->sm_vars & AD_PORT_BEGIN) {
port->sm_rx_state = AD_RX_INITIALIZE;
+ port->sm_vars |= AD_PORT_CHURNED;
/* check if port is not enabled */
- else if (!(port->sm_vars & AD_PORT_BEGIN)
+ } else if (!(port->sm_vars & AD_PORT_BEGIN)
&& !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
port->sm_rx_state = AD_RX_PORT_DISABLED;
/* check if new lacpdu arrived */
else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
(port->sm_rx_state == AD_RX_DEFAULTED) ||
(port->sm_rx_state == AD_RX_CURRENT))) {
+ if (port->sm_rx_state != AD_RX_CURRENT)
+ port->sm_vars |= AD_PORT_CHURNED;
port->sm_rx_timer_counter = 0;
port->sm_rx_state = AD_RX_CURRENT;
} else {
@@ -1100,9 +1104,11 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
*/
port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
+ port->partner_oper.port_state |= AD_STATE_LACP_TIMEOUT;
port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
port->actor_oper_port_state |= AD_STATE_EXPIRED;
+ port->sm_vars |= AD_PORT_CHURNED;
break;
case AD_RX_DEFAULTED:
__update_default_selected(port);
@@ -1132,6 +1138,45 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
}
/**
+ * ad_churn_machine - handle port churn's state machine
+ * @port: the port we're looking at
+ *
+ */
+static void ad_churn_machine(struct port *port)
+{
+ if (port->sm_vars & AD_PORT_CHURNED) {
+ port->sm_vars &= ~AD_PORT_CHURNED;
+ port->sm_churn_actor_state = AD_CHURN_MONITOR;
+ port->sm_churn_partner_state = AD_CHURN_MONITOR;
+ port->sm_churn_actor_timer_counter =
+ __ad_timer_to_ticks(AD_ACTOR_CHURN_TIMER, 0);
+ port->sm_churn_partner_timer_counter =
+ __ad_timer_to_ticks(AD_PARTNER_CHURN_TIMER, 0);
+ return;
+ }
+ if (port->sm_churn_actor_timer_counter &&
+ !(--port->sm_churn_actor_timer_counter) &&
+ port->sm_churn_actor_state == AD_CHURN_MONITOR) {
+ if (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION) {
+ port->sm_churn_actor_state = AD_NO_CHURN;
+ } else {
+ port->churn_actor_count++;
+ port->sm_churn_actor_state = AD_CHURN;
+ }
+ }
+ if (port->sm_churn_partner_timer_counter &&
+ !(--port->sm_churn_partner_timer_counter) &&
+ port->sm_churn_partner_state == AD_CHURN_MONITOR) {
+ if (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) {
+ port->sm_churn_partner_state = AD_NO_CHURN;
+ } else {
+ port->churn_partner_count++;
+ port->sm_churn_partner_state = AD_CHURN;
+ }
+ }
+}
+
+/**
* ad_tx_machine - handle a port's tx state machine
* @port: the port we're looking at
*/
@@ -1383,8 +1428,10 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
else
port->aggregator->is_individual = true;
- port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key;
- port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key;
+ port->aggregator->actor_admin_aggregator_key =
+ port->actor_admin_port_key;
+ port->aggregator->actor_oper_aggregator_key =
+ port->actor_oper_port_key;
port->aggregator->partner_system =
port->partner_oper.system;
port->aggregator->partner_system_priority =
@@ -1710,14 +1757,9 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
};
if (port) {
- port->actor_port_number = 1;
port->actor_port_priority = 0xff;
- port->actor_system = null_mac_addr;
- port->actor_system_priority = 0xffff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
- port->actor_admin_port_key = 1;
- port->actor_oper_port_key = 1;
port->actor_admin_port_state = AD_STATE_AGGREGATION |
AD_STATE_LACP_ACTIVITY;
port->actor_oper_port_state = AD_STATE_AGGREGATION |
@@ -1731,7 +1773,7 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->is_enabled = true;
/* private parameters */
- port->sm_vars = 0x3;
+ port->sm_vars = AD_PORT_BEGIN | AD_PORT_LACP_ENABLED;
port->sm_rx_state = 0;
port->sm_rx_timer_counter = 0;
port->sm_periodic_state = 0;
@@ -1739,12 +1781,17 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->sm_mux_state = 0;
port->sm_mux_timer_counter = 0;
port->sm_tx_state = 0;
- port->sm_tx_timer_counter = 0;
- port->slave = NULL;
port->aggregator = NULL;
port->next_port_in_aggregator = NULL;
port->transaction_id = 0;
+ port->sm_churn_actor_timer_counter = 0;
+ port->sm_churn_actor_state = 0;
+ port->churn_actor_count = 0;
+ port->sm_churn_partner_timer_counter = 0;
+ port->sm_churn_partner_state = 0;
+ port->churn_partner_count = 0;
+
memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu));
}
}
@@ -1916,8 +1963,6 @@ void bond_3ad_bind_slave(struct slave *slave)
* lacpdu's are sent in one second)
*/
port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
- port->aggregator = NULL;
- port->next_port_in_aggregator = NULL;
__disable_port(port);
@@ -2164,6 +2209,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
ad_port_selection_logic(port, &update_slave_arr);
ad_mux_machine(port, &update_slave_arr);
ad_tx_machine(port);
+ ad_churn_machine(port);
/* turn off the BEGIN bit, since we already handled it */
if (port->sm_vars & AD_PORT_BEGIN)
@@ -2279,8 +2325,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
spin_lock_bh(&slave->bond->mode_lock);
port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS;
- port->actor_oper_port_key = port->actor_admin_port_key |=
- (__get_link_speed(port) << 1);
+ port->actor_admin_port_key |= __get_link_speed(port) << 1;
+ port->actor_oper_port_key = port->actor_admin_port_key;
netdev_dbg(slave->bond->dev, "Port %d changed speed\n", port->actor_port_number);
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
@@ -2312,8 +2358,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
spin_lock_bh(&slave->bond->mode_lock);
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
- port->actor_oper_port_key = port->actor_admin_port_key |=
- __get_duplex(port);
+ port->actor_admin_port_key |= __get_duplex(port);
+ port->actor_oper_port_key = port->actor_admin_port_key;
netdev_dbg(slave->bond->dev, "Port %d slave %s changed duplex\n",
port->actor_port_number, slave->dev->name);
if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
@@ -2354,21 +2400,19 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
* on link up we are forcing recheck on the duplex and speed since
* some of he adaptors(ce1000.lan) report.
*/
+ port->actor_admin_port_key &= ~(AD_DUPLEX_KEY_MASKS|AD_SPEED_KEY_MASKS);
if (link == BOND_LINK_UP) {
port->is_enabled = true;
- port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
- port->actor_oper_port_key = port->actor_admin_port_key |=
- __get_duplex(port);
- port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS;
- port->actor_oper_port_key = port->actor_admin_port_key |=
- (__get_link_speed(port) << 1);
+ port->actor_admin_port_key |=
+ (__get_link_speed(port) << 1) | __get_duplex(port);
+ if (port->actor_admin_port_key & AD_DUPLEX_KEY_MASKS)
+ port->sm_vars |= AD_PORT_LACP_ENABLED;
} else {
/* link has failed */
port->is_enabled = false;
- port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
- port->actor_oper_port_key = (port->actor_admin_port_key &=
- ~AD_SPEED_KEY_MASKS);
+ port->sm_vars &= ~AD_PORT_LACP_ENABLED;
}
+ port->actor_oper_port_key = port->actor_admin_port_key;
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
link == BOND_LINK_UP ? "UP" : "DOWN");
@@ -2485,6 +2529,9 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
if (skb->protocol != PKT_TYPE_LACPDU)
return RX_HANDLER_ANOTHER;
+ if (!MAC_ADDRESS_EQUAL(eth_hdr(skb)->h_dest, lacpdu_mcast_addr))
+ return RX_HANDLER_ANOTHER;
+
lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
if (!lacpdu)
return RX_HANDLER_ANOTHER;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 089a4028859d..d5fe5d5f490f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -82,6 +82,8 @@
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
+#include "bonding_priv.h"
+
/*---------------------------- Module parameters ----------------------------*/
/* monitor all links that often (in milliseconds). <=0 disables monitoring */
@@ -928,6 +930,39 @@ static inline void slave_disable_netpoll(struct slave *slave)
static void bond_poll_controller(struct net_device *bond_dev)
{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave = NULL;
+ struct list_head *iter;
+ struct ad_info ad_info;
+ struct netpoll_info *ni;
+ const struct net_device_ops *ops;
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ if (bond_3ad_get_active_agg_info(bond, &ad_info))
+ return;
+
+ rcu_read_lock_bh();
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ ops = slave->dev->netdev_ops;
+ if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
+ continue;
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ struct aggregator *agg =
+ SLAVE_AD_INFO(slave)->port.aggregator;
+
+ if (agg &&
+ agg->aggregator_identifier != ad_info.aggregator_id)
+ continue;
+ }
+
+ ni = rcu_dereference_bh(slave->dev->npinfo);
+ if (down_trylock(&ni->dev_lock))
+ continue;
+ ops->ndo_poll_controller(slave->dev);
+ up(&ni->dev_lock);
+ }
+ rcu_read_unlock_bh();
}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
@@ -2900,6 +2935,8 @@ static int bond_slave_netdev_event(unsigned long event,
if (old_duplex != slave->duplex)
bond_3ad_adapter_duplex_changed(slave);
}
+ /* Fallthrough */
+ case NETDEV_DOWN:
/* Refresh slave-array if applicable!
* If the setup does not use miimon or arpmon (mode-specific!),
* then these events will not cause the slave-array to be
@@ -2911,10 +2948,6 @@ static int bond_slave_netdev_event(unsigned long event,
if (bond_mode_uses_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
break;
- case NETDEV_DOWN:
- if (bond_mode_uses_xmit_hash(bond))
- bond_update_slave_arr(bond, NULL);
- break;
case NETDEV_CHANGEMTU:
/* TODO: Should slaves be allowed to
* independently alter their MTU? For
@@ -4008,6 +4041,7 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_fix_features = bond_fix_features,
.ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
.ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
+ .ndo_features_check = passthru_features_check,
};
static const struct device_type bond_type = {
@@ -4510,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void)
int bond_create(struct net *net, const char *name)
{
struct net_device *bond_dev;
+ struct bonding *bond;
+ struct alb_bond_info *bond_info;
int res;
rtnl_lock();
@@ -4523,6 +4559,14 @@ int bond_create(struct net *net, const char *name)
return -ENOMEM;
}
+ /*
+ * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
+ * It is set to 0 by default which is wrong.
+ */
+ bond = netdev_priv(bond_dev);
+ bond_info = &(BOND_ALB_INFO(bond));
+ bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
+
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 976f5ad2a0f2..b20b35acb47d 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -4,6 +4,7 @@
#include <net/netns/generic.h>
#include <net/bonding.h>
+#include "bonding_priv.h"
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
@@ -176,18 +177,51 @@ static void bond_info_show_slave(struct seq_file *seq,
slave->link_failure_count);
seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
+ seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- const struct aggregator *agg
- = SLAVE_AD_INFO(slave)->port.aggregator;
+ const struct port *port = &SLAVE_AD_INFO(slave)->port;
+ const struct aggregator *agg = port->aggregator;
- if (agg)
+ if (agg) {
seq_printf(seq, "Aggregator ID: %d\n",
agg->aggregator_identifier);
- else
+ seq_printf(seq, "Actor Churn State: %s\n",
+ bond_3ad_churn_desc(port->sm_churn_actor_state));
+ seq_printf(seq, "Partner Churn State: %s\n",
+ bond_3ad_churn_desc(port->sm_churn_partner_state));
+ seq_printf(seq, "Actor Churned Count: %d\n",
+ port->churn_actor_count);
+ seq_printf(seq, "Partner Churned Count: %d\n",
+ port->churn_partner_count);
+
+ seq_puts(seq, "details actor lacp pdu:\n");
+ seq_printf(seq, " system priority: %d\n",
+ port->actor_system_priority);
+ seq_printf(seq, " port key: %d\n",
+ port->actor_oper_port_key);
+ seq_printf(seq, " port priority: %d\n",
+ port->actor_port_priority);
+ seq_printf(seq, " port number: %d\n",
+ port->actor_port_number);
+ seq_printf(seq, " port state: %d\n",
+ port->actor_oper_port_state);
+
+ seq_puts(seq, "details partner lacp pdu:\n");
+ seq_printf(seq, " system priority: %d\n",
+ port->partner_oper.system_priority);
+ seq_printf(seq, " oper key: %d\n",
+ port->partner_oper.key);
+ seq_printf(seq, " port priority: %d\n",
+ port->partner_oper.port_priority);
+ seq_printf(seq, " port number: %d\n",
+ port->partner_oper.port_number);
+ seq_printf(seq, " port state: %d\n",
+ port->partner_oper.port_state);
+ } else {
seq_puts(seq, "Aggregator ID: N/A\n");
+ }
}
- seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
}
static int bond_info_seq_show(struct seq_file *seq, void *v)
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
new file mode 100644
index 000000000000..5a4d81a9437c
--- /dev/null
+++ b/drivers/net/bonding/bonding_priv.h
@@ -0,0 +1,25 @@
+/*
+ * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
+ *
+ * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
+ * NCM: Network and Communications Management, Inc.
+ *
+ * BUT, I'm the one who modified it for ethernet, so:
+ * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef _BONDING_PRIV_H
+#define _BONDING_PRIV_H
+
+#define DRV_VERSION "3.7.1"
+#define DRV_RELDATE "April 27, 2011"
+#define DRV_NAME "bonding"
+#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
+
+#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+
+#endif
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 27bbc56de15f..9da06537237f 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -70,7 +70,6 @@ struct ser_device {
struct tty_struct *tty;
bool tx_started;
unsigned long state;
- char *tty_name;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_tty_dir;
struct debugfs_blob_wrapper tx_blob;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 58808f651452..e8c96b8e86f4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -112,7 +112,7 @@ config PCH_CAN
config CAN_GRCAN
tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
- depends on OF
+ depends on OF && HAS_DMA
---help---
Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
Note that the driver supports little endian, even though little
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index eeb4b8b6b335..f4e40aa4d2a2 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -291,13 +291,13 @@ static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
{
- return __raw_readl(priv->reg_base + reg);
+ return readl_relaxed(priv->reg_base + reg);
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
u32 value)
{
- __raw_writel(value, priv->reg_base + reg);
+ writel_relaxed(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index e7a6363e736b..27ad312e7abf 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -20,13 +20,121 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <asm/bfin_can.h>
#include <asm/portmux.h>
#define DRV_NAME "bfin_can"
#define BFIN_CAN_TIMEOUT 100
#define TX_ECHO_SKB_MAX 1
+/* transmit and receive channels */
+#define TRANSMIT_CHL 24
+#define RECEIVE_STD_CHL 0
+#define RECEIVE_EXT_CHL 4
+#define RECEIVE_RTR_CHL 8
+#define RECEIVE_EXT_RTR_CHL 12
+#define MAX_CHL_NUMBER 32
+
+/* All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits. So use a helper macro to streamline this
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+
+/* bfin can registers layout */
+struct bfin_can_mask_regs {
+ __BFP(aml);
+ __BFP(amh);
+};
+
+struct bfin_can_channel_regs {
+ /* data[0,2,4,6] -> data{0,1,2,3} while data[1,3,5,7] is padding */
+ u16 data[8];
+ __BFP(dlc);
+ __BFP(tsv);
+ __BFP(id0);
+ __BFP(id1);
+};
+
+struct bfin_can_regs {
+ /* global control and status registers */
+ __BFP(mc1); /* offset 0x00 */
+ __BFP(md1); /* offset 0x04 */
+ __BFP(trs1); /* offset 0x08 */
+ __BFP(trr1); /* offset 0x0c */
+ __BFP(ta1); /* offset 0x10 */
+ __BFP(aa1); /* offset 0x14 */
+ __BFP(rmp1); /* offset 0x18 */
+ __BFP(rml1); /* offset 0x1c */
+ __BFP(mbtif1); /* offset 0x20 */
+ __BFP(mbrif1); /* offset 0x24 */
+ __BFP(mbim1); /* offset 0x28 */
+ __BFP(rfh1); /* offset 0x2c */
+ __BFP(opss1); /* offset 0x30 */
+ u32 __pad1[3];
+ __BFP(mc2); /* offset 0x40 */
+ __BFP(md2); /* offset 0x44 */
+ __BFP(trs2); /* offset 0x48 */
+ __BFP(trr2); /* offset 0x4c */
+ __BFP(ta2); /* offset 0x50 */
+ __BFP(aa2); /* offset 0x54 */
+ __BFP(rmp2); /* offset 0x58 */
+ __BFP(rml2); /* offset 0x5c */
+ __BFP(mbtif2); /* offset 0x60 */
+ __BFP(mbrif2); /* offset 0x64 */
+ __BFP(mbim2); /* offset 0x68 */
+ __BFP(rfh2); /* offset 0x6c */
+ __BFP(opss2); /* offset 0x70 */
+ u32 __pad2[3];
+ __BFP(clock); /* offset 0x80 */
+ __BFP(timing); /* offset 0x84 */
+ __BFP(debug); /* offset 0x88 */
+ __BFP(status); /* offset 0x8c */
+ __BFP(cec); /* offset 0x90 */
+ __BFP(gis); /* offset 0x94 */
+ __BFP(gim); /* offset 0x98 */
+ __BFP(gif); /* offset 0x9c */
+ __BFP(control); /* offset 0xa0 */
+ __BFP(intr); /* offset 0xa4 */
+ __BFP(version); /* offset 0xa8 */
+ __BFP(mbtd); /* offset 0xac */
+ __BFP(ewr); /* offset 0xb0 */
+ __BFP(esr); /* offset 0xb4 */
+ u32 __pad3[2];
+ __BFP(ucreg); /* offset 0xc0 */
+ __BFP(uccnt); /* offset 0xc4 */
+ __BFP(ucrc); /* offset 0xc8 */
+ __BFP(uccnf); /* offset 0xcc */
+ u32 __pad4[1];
+ __BFP(version2); /* offset 0xd4 */
+ u32 __pad5[10];
+
+ /* channel(mailbox) mask and message registers */
+ struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */
+ struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */
+};
+
+#undef __BFP
+
+#define SRS 0x0001 /* Software Reset */
+#define SER 0x0008 /* Stuff Error */
+#define BOIM 0x0008 /* Enable Bus Off Interrupt */
+#define CCR 0x0080 /* CAN Configuration Mode Request */
+#define CCA 0x0080 /* Configuration Mode Acknowledge */
+#define SAM 0x0080 /* Sampling */
+#define AME 0x8000 /* Acceptance Mask Enable */
+#define RMLIM 0x0080 /* Enable RX Message Lost Interrupt */
+#define RMLIS 0x0080 /* RX Message Lost IRQ Status */
+#define RTR 0x4000 /* Remote Frame Transmission Request */
+#define BOIS 0x0008 /* Bus Off IRQ Status */
+#define IDE 0x2000 /* Identifier Extension */
+#define EPIS 0x0004 /* Error-Passive Mode IRQ Status */
+#define EPIM 0x0004 /* Enable Error-Passive Mode Interrupt */
+#define EWTIS 0x0001 /* TX Error Count IRQ Status */
+#define EWRIS 0x0002 /* RX Error Count IRQ Status */
+#define BEF 0x0040 /* Bit Error Flag */
+#define FER 0x0080 /* Form Error Flag */
+#define SMR 0x0020 /* Sleep Mode Request */
+#define SMACK 0x0008 /* Sleep Mode Acknowledge */
+
/*
* bfin can private data
*/
@@ -78,8 +186,8 @@ static int bfin_can_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
timing |= SAM;
- bfin_write(&reg->clock, clk);
- bfin_write(&reg->timing, timing);
+ writew(clk, &reg->clock);
+ writew(timing, &reg->timing);
netdev_info(dev, "setting CLOCK=0x%04x TIMING=0x%04x\n", clk, timing);
@@ -94,16 +202,14 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
int i;
/* disable interrupts */
- bfin_write(&reg->mbim1, 0);
- bfin_write(&reg->mbim2, 0);
- bfin_write(&reg->gim, 0);
+ writew(0, &reg->mbim1);
+ writew(0, &reg->mbim2);
+ writew(0, &reg->gim);
/* reset can and enter configuration mode */
- bfin_write(&reg->control, SRS | CCR);
- SSYNC();
- bfin_write(&reg->control, CCR);
- SSYNC();
- while (!(bfin_read(&reg->control) & CCA)) {
+ writew(SRS | CCR, &reg->control);
+ writew(CCR, &reg->control);
+ while (!(readw(&reg->control) & CCA)) {
udelay(10);
if (--timeout == 0) {
netdev_err(dev, "fail to enter configuration mode\n");
@@ -116,34 +222,33 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
* by writing to CAN Mailbox Configuration Registers 1 and 2
* For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
*/
- bfin_write(&reg->mc1, 0);
- bfin_write(&reg->mc2, 0);
+ writew(0, &reg->mc1);
+ writew(0, &reg->mc2);
/* Set Mailbox Direction */
- bfin_write(&reg->md1, 0xFFFF); /* mailbox 1-16 are RX */
- bfin_write(&reg->md2, 0); /* mailbox 17-32 are TX */
+ writew(0xFFFF, &reg->md1); /* mailbox 1-16 are RX */
+ writew(0, &reg->md2); /* mailbox 17-32 are TX */
/* RECEIVE_STD_CHL */
for (i = 0; i < 2; i++) {
- bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
- bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
- bfin_write(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
- bfin_write(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
- bfin_write(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
+ writew(0, &reg->chl[RECEIVE_STD_CHL + i].id0);
+ writew(AME, &reg->chl[RECEIVE_STD_CHL + i].id1);
+ writew(0, &reg->chl[RECEIVE_STD_CHL + i].dlc);
+ writew(0x1FFF, &reg->msk[RECEIVE_STD_CHL + i].amh);
+ writew(0xFFFF, &reg->msk[RECEIVE_STD_CHL + i].aml);
}
/* RECEIVE_EXT_CHL */
for (i = 0; i < 2; i++) {
- bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
- bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
- bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
- bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
- bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
+ writew(0, &reg->chl[RECEIVE_EXT_CHL + i].id0);
+ writew(AME | IDE, &reg->chl[RECEIVE_EXT_CHL + i].id1);
+ writew(0, &reg->chl[RECEIVE_EXT_CHL + i].dlc);
+ writew(0x1FFF, &reg->msk[RECEIVE_EXT_CHL + i].amh);
+ writew(0xFFFF, &reg->msk[RECEIVE_EXT_CHL + i].aml);
}
- bfin_write(&reg->mc2, BIT(TRANSMIT_CHL - 16));
- bfin_write(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
- SSYNC();
+ writew(BIT(TRANSMIT_CHL - 16), &reg->mc2);
+ writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), &reg->mc1);
priv->can.state = CAN_STATE_STOPPED;
}
@@ -157,9 +262,9 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
/*
* leave configuration mode
*/
- bfin_write(&reg->control, bfin_read(&reg->control) & ~CCR);
+ writew(readw(&reg->control) & ~CCR, &reg->control);
- while (bfin_read(&reg->status) & CCA) {
+ while (readw(&reg->status) & CCA) {
udelay(10);
if (--timeout == 0) {
netdev_err(dev, "fail to leave configuration mode\n");
@@ -170,26 +275,25 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
/*
* clear _All_ tx and rx interrupts
*/
- bfin_write(&reg->mbtif1, 0xFFFF);
- bfin_write(&reg->mbtif2, 0xFFFF);
- bfin_write(&reg->mbrif1, 0xFFFF);
- bfin_write(&reg->mbrif2, 0xFFFF);
+ writew(0xFFFF, &reg->mbtif1);
+ writew(0xFFFF, &reg->mbtif2);
+ writew(0xFFFF, &reg->mbrif1);
+ writew(0xFFFF, &reg->mbrif2);
/*
* clear global interrupt status register
*/
- bfin_write(&reg->gis, 0x7FF); /* overwrites with '1' */
+ writew(0x7FF, &reg->gis); /* overwrites with '1' */
/*
* Initialize Interrupts
* - set bits in the mailbox interrupt mask register
* - global interrupt mask
*/
- bfin_write(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
- bfin_write(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
+ writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), &reg->mbim1);
+ writew(BIT(TRANSMIT_CHL - 16), &reg->mbim2);
- bfin_write(&reg->gim, EPIM | BOIM | RMLIM);
- SSYNC();
+ writew(EPIM | BOIM | RMLIM, &reg->gim);
}
static void bfin_can_start(struct net_device *dev)
@@ -226,7 +330,7 @@ static int bfin_can_get_berr_counter(const struct net_device *dev,
struct bfin_can_priv *priv = netdev_priv(dev);
struct bfin_can_regs __iomem *reg = priv->membase;
- u16 cec = bfin_read(&reg->cec);
+ u16 cec = readw(&reg->cec);
bec->txerr = cec >> 8;
bec->rxerr = cec;
@@ -252,28 +356,28 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fill id */
if (id & CAN_EFF_FLAG) {
- bfin_write(&reg->chl[TRANSMIT_CHL].id0, id);
+ writew(id, &reg->chl[TRANSMIT_CHL].id0);
val = ((id & 0x1FFF0000) >> 16) | IDE;
} else
val = (id << 2);
if (id & CAN_RTR_FLAG)
val |= RTR;
- bfin_write(&reg->chl[TRANSMIT_CHL].id1, val | AME);
+ writew(val | AME, &reg->chl[TRANSMIT_CHL].id1);
/* fill payload */
for (i = 0; i < 8; i += 2) {
val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
((6 - i) < dlc ? (data[6 - i] << 8) : 0);
- bfin_write(&reg->chl[TRANSMIT_CHL].data[i], val);
+ writew(val, &reg->chl[TRANSMIT_CHL].data[i]);
}
/* fill data length code */
- bfin_write(&reg->chl[TRANSMIT_CHL].dlc, dlc);
+ writew(dlc, &reg->chl[TRANSMIT_CHL].dlc);
can_put_echo_skb(skb, dev, 0);
/* set transmit request */
- bfin_write(&reg->trs2, BIT(TRANSMIT_CHL - 16));
+ writew(BIT(TRANSMIT_CHL - 16), &reg->trs2);
return 0;
}
@@ -296,26 +400,26 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
/* get id */
if (isrc & BIT(RECEIVE_EXT_CHL)) {
/* extended frame format (EFF) */
- cf->can_id = ((bfin_read(&reg->chl[RECEIVE_EXT_CHL].id1)
+ cf->can_id = ((readw(&reg->chl[RECEIVE_EXT_CHL].id1)
& 0x1FFF) << 16)
- + bfin_read(&reg->chl[RECEIVE_EXT_CHL].id0);
+ + readw(&reg->chl[RECEIVE_EXT_CHL].id0);
cf->can_id |= CAN_EFF_FLAG;
obj = RECEIVE_EXT_CHL;
} else {
/* standard frame format (SFF) */
- cf->can_id = (bfin_read(&reg->chl[RECEIVE_STD_CHL].id1)
+ cf->can_id = (readw(&reg->chl[RECEIVE_STD_CHL].id1)
& 0x1ffc) >> 2;
obj = RECEIVE_STD_CHL;
}
- if (bfin_read(&reg->chl[obj].id1) & RTR)
+ if (readw(&reg->chl[obj].id1) & RTR)
cf->can_id |= CAN_RTR_FLAG;
/* get data length code */
- cf->can_dlc = get_can_dlc(bfin_read(&reg->chl[obj].dlc) & 0xF);
+ cf->can_dlc = get_can_dlc(readw(&reg->chl[obj].dlc) & 0xF);
/* get payload */
for (i = 0; i < 8; i += 2) {
- val = bfin_read(&reg->chl[obj].data[i]);
+ val = readw(&reg->chl[obj].data[i]);
cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
}
@@ -369,7 +473,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
state == CAN_STATE_ERROR_PASSIVE)) {
- u16 cec = bfin_read(&reg->cec);
+ u16 cec = readw(&reg->cec);
u8 rxerr = cec;
u8 txerr = cec >> 8;
@@ -420,23 +524,23 @@ static irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
struct net_device_stats *stats = &dev->stats;
u16 status, isrc;
- if ((irq == priv->tx_irq) && bfin_read(&reg->mbtif2)) {
+ if ((irq == priv->tx_irq) && readw(&reg->mbtif2)) {
/* transmission complete interrupt */
- bfin_write(&reg->mbtif2, 0xFFFF);
+ writew(0xFFFF, &reg->mbtif2);
stats->tx_packets++;
- stats->tx_bytes += bfin_read(&reg->chl[TRANSMIT_CHL].dlc);
+ stats->tx_bytes += readw(&reg->chl[TRANSMIT_CHL].dlc);
can_get_echo_skb(dev, 0);
netif_wake_queue(dev);
- } else if ((irq == priv->rx_irq) && bfin_read(&reg->mbrif1)) {
+ } else if ((irq == priv->rx_irq) && readw(&reg->mbrif1)) {
/* receive interrupt */
- isrc = bfin_read(&reg->mbrif1);
- bfin_write(&reg->mbrif1, 0xFFFF);
+ isrc = readw(&reg->mbrif1);
+ writew(0xFFFF, &reg->mbrif1);
bfin_can_rx(dev, isrc);
- } else if ((irq == priv->err_irq) && bfin_read(&reg->gis)) {
+ } else if ((irq == priv->err_irq) && readw(&reg->gis)) {
/* error interrupt */
- isrc = bfin_read(&reg->gis);
- status = bfin_read(&reg->esr);
- bfin_write(&reg->gis, 0x7FF);
+ isrc = readw(&reg->gis);
+ status = readw(&reg->esr);
+ writew(0x7FF, &reg->gis);
bfin_can_err(dev, isrc, status);
} else {
return IRQ_NONE;
@@ -556,16 +660,10 @@ static int bfin_can_probe(struct platform_device *pdev)
goto exit;
}
- if (!request_mem_region(res_mem->start, resource_size(res_mem),
- dev_name(&pdev->dev))) {
- err = -EBUSY;
- goto exit;
- }
-
/* request peripheral pins */
err = peripheral_request_list(pdata, dev_name(&pdev->dev));
if (err)
- goto exit_mem_release;
+ goto exit;
dev = alloc_bfin_candev();
if (!dev) {
@@ -574,7 +672,13 @@ static int bfin_can_probe(struct platform_device *pdev)
}
priv = netdev_priv(dev);
- priv->membase = (void __iomem *)res_mem->start;
+
+ priv->membase = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(priv->membase)) {
+ err = PTR_ERR(priv->membase);
+ goto exit_peri_pin_free;
+ }
+
priv->rx_irq = rx_irq->start;
priv->tx_irq = tx_irq->start;
priv->err_irq = err_irq->start;
@@ -606,8 +710,6 @@ exit_candev_free:
free_candev(dev);
exit_peri_pin_free:
peripheral_free_list(pdata);
-exit_mem_release:
- release_mem_region(res_mem->start, resource_size(res_mem));
exit:
return err;
}
@@ -616,15 +718,11 @@ static int bfin_can_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct bfin_can_priv *priv = netdev_priv(dev);
- struct resource *res;
bfin_can_set_reset_mode(dev);
unregister_candev(dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
peripheral_free_list(priv->pin_list);
free_candev(dev);
@@ -641,9 +739,8 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
if (netif_running(dev)) {
/* enter sleep mode */
- bfin_write(&reg->control, bfin_read(&reg->control) | SMR);
- SSYNC();
- while (!(bfin_read(&reg->intr) & SMACK)) {
+ writew(readw(&reg->control) | SMR, &reg->control);
+ while (!(readw(&reg->intr) & SMACK)) {
udelay(10);
if (--timeout == 0) {
netdev_err(dev, "fail to enter sleep mode\n");
@@ -663,8 +760,7 @@ static int bfin_can_resume(struct platform_device *pdev)
if (netif_running(dev)) {
/* leave sleep mode */
- bfin_write(&reg->intr, 0);
- SSYNC();
+ writew(0, &reg->intr);
}
return 0;
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index b1e8851d3cc4..866e5e12fdd2 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -254,7 +254,7 @@ static int cc770_platform_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id cc770_platform_table[] = {
+static const struct of_device_id cc770_platform_table[] = {
{.compatible = "bosch,cc770"}, /* CC770 from Bosch */
{.compatible = "intc,82527"}, /* AN82527 from Intel CP */
{},
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index fed1bbd0b0d2..e3d7e22a4fa0 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1725,7 +1725,7 @@ static int grcan_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id grcan_match[] = {
+static const struct of_device_id grcan_match[] = {
{.name = "GAISLER_GRCAN"},
{.name = "01_03d"},
{.name = "GAISLER_GRHCAN"},
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
index ab7f1b01be49..c1b667675fa1 100644
--- a/drivers/net/can/led.c
+++ b/drivers/net/can/led.c
@@ -30,20 +30,28 @@ void can_led_event(struct net_device *netdev, enum can_led_event event)
case CAN_LED_EVENT_OPEN:
led_trigger_event(priv->tx_led_trig, LED_FULL);
led_trigger_event(priv->rx_led_trig, LED_FULL);
+ led_trigger_event(priv->rxtx_led_trig, LED_FULL);
break;
case CAN_LED_EVENT_STOP:
led_trigger_event(priv->tx_led_trig, LED_OFF);
led_trigger_event(priv->rx_led_trig, LED_OFF);
+ led_trigger_event(priv->rxtx_led_trig, LED_OFF);
break;
case CAN_LED_EVENT_TX:
- if (led_delay)
+ if (led_delay) {
led_trigger_blink_oneshot(priv->tx_led_trig,
&led_delay, &led_delay, 1);
+ led_trigger_blink_oneshot(priv->rxtx_led_trig,
+ &led_delay, &led_delay, 1);
+ }
break;
case CAN_LED_EVENT_RX:
- if (led_delay)
+ if (led_delay) {
led_trigger_blink_oneshot(priv->rx_led_trig,
&led_delay, &led_delay, 1);
+ led_trigger_blink_oneshot(priv->rxtx_led_trig,
+ &led_delay, &led_delay, 1);
+ }
break;
}
}
@@ -55,6 +63,7 @@ static void can_led_release(struct device *gendev, void *res)
led_trigger_unregister_simple(priv->tx_led_trig);
led_trigger_unregister_simple(priv->rx_led_trig);
+ led_trigger_unregister_simple(priv->rxtx_led_trig);
}
/* Register CAN LED triggers for a CAN device
@@ -76,11 +85,15 @@ void devm_can_led_init(struct net_device *netdev)
"%s-tx", netdev->name);
snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name),
"%s-rx", netdev->name);
+ snprintf(priv->rxtx_led_trig_name, sizeof(priv->rxtx_led_trig_name),
+ "%s-rxtx", netdev->name);
led_trigger_register_simple(priv->tx_led_trig_name,
&priv->tx_led_trig);
led_trigger_register_simple(priv->rx_led_trig_name,
&priv->rx_led_trig);
+ led_trigger_register_simple(priv->rxtx_led_trig_name,
+ &priv->rxtx_led_trig);
devres_add(&netdev->dev, res);
}
@@ -97,7 +110,7 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
if (!priv)
return NOTIFY_DONE;
- if (!priv->tx_led_trig || !priv->rx_led_trig)
+ if (!priv->tx_led_trig || !priv->rx_led_trig || !priv->rxtx_led_trig)
return NOTIFY_DONE;
if (msg == NETDEV_CHANGENAME) {
@@ -106,6 +119,9 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
snprintf(name, sizeof(name), "%s-rx", netdev->name);
led_trigger_rename_static(name, priv->rx_led_trig);
+
+ snprintf(name, sizeof(name), "%s-rxtx", netdev->name);
+ led_trigger_rename_static(name, priv->rxtx_led_trig);
}
return NOTIFY_DONE;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2e04b3aeeb37..ef655177bb5e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -312,8 +312,8 @@ static inline u32 m_can_fifo_read(const struct m_can_priv *priv,
static inline void m_can_fifo_write(const struct m_can_priv *priv,
u32 fpi, unsigned int offset, u32 val)
{
- return writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
- fpi * TXB_ELEMENT_SIZE + offset);
+ writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
+ fpi * TXB_ELEMENT_SIZE + offset);
}
static inline void m_can_config_endisable(const struct m_can_priv *priv,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index ad024e60ba8c..c7427bdd3a4b 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -43,7 +43,7 @@ struct mpc5xxx_can_data {
};
#ifdef CONFIG_PPC_MPC52xx
-static struct of_device_id mpc52xx_cdm_ids[] = {
+static const struct of_device_id mpc52xx_cdm_ids[] = {
{ .compatible = "fsl,mpc5200-cdm", },
{}
};
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 93115250eaf5..0552ed46a206 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -242,7 +242,7 @@ static int sp_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id sp_of_table[] = {
+static const struct of_device_id sp_of_table[] = {
{.compatible = "nxp,sja1000"},
{},
};
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 9376f5e5b94e..866bac0ae7e9 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -123,7 +123,7 @@ MODULE_LICENSE("GPL v2");
* CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME.
*/
struct cpc_can_msg {
- u32 id;
+ __le32 id;
u8 length;
u8 msg[8];
};
@@ -200,8 +200,8 @@ struct __packed ems_cpc_msg {
u8 type; /* type of message */
u8 length; /* length of data within union 'msg' */
u8 msgid; /* confirmation handle */
- u32 ts_sec; /* timestamp in seconds */
- u32 ts_nsec; /* timestamp in nano seconds */
+ __le32 ts_sec; /* timestamp in seconds */
+ __le32 ts_nsec; /* timestamp in nano seconds */
union {
u8 generic[64];
@@ -765,7 +765,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];
- msg->msg.can_msg.id = cf->can_id & CAN_ERR_MASK;
+ msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
msg->msg.can_msg.length = cf->can_dlc;
if (cf->can_id & CAN_RTR_FLAG) {
@@ -783,9 +783,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
}
- /* Respect byte order */
- msg->msg.can_msg.id = cpu_to_le32(msg->msg.can_msg.id);
-
for (i = 0; i < MAX_TX_URBS; i++) {
if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
context = &dev->tx_contexts[i];
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index bacca0bd89c1..411c1af92c62 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -139,7 +139,7 @@ struct tx_msg {
u8 cmd;
u8 net;
u8 dlc;
- __le32 hnd;
+ u32 hnd; /* opaque handle, not used by device */
__le32 id; /* upper 3 bits contain flags */
u8 data[8];
};
@@ -149,7 +149,7 @@ struct tx_done_msg {
u8 cmd;
u8 net;
u8 status;
- __le32 hnd;
+ u32 hnd; /* opaque handle, not used by device */
__le32 ts;
};
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 57611fd91229..8b17a9065b0b 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
MSG_FLAG_NERR)) {
- netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
+ netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
msg->u.rx_can_header.flag);
stats->rx_errors++;
@@ -1867,7 +1867,7 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
if (!dev->nets[i])
continue;
- unregister_netdev(dev->nets[i]->netdev);
+ unregister_candev(dev->nets[i]->netdev);
}
kvaser_usb_unlink_all_urbs(dev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index a9221ad9f1a0..09d14e70abd7 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -182,7 +182,7 @@ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
{
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
- int err;
+ int err = 0;
u8 *packet_ptr;
int i, n = 1, packet_len;
ptrdiff_t cmd_len;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 6c6764312285..fc55e8e0351d 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -509,10 +509,11 @@ static int xcan_rx(struct net_device *ndev)
cf->can_id |= CAN_RTR_FLAG;
}
- if (!(id_xcan & XCAN_IDR_SRR_MASK)) {
- data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
- data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
+ /* DW1/DW2 must always be read to remove message from RXFIFO */
+ data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
+ data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
/* Change Xilinx CAN data format to socketCAN data format */
if (cf->can_dlc > 0)
*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
@@ -1185,7 +1186,7 @@ static int xcan_remove(struct platform_device *pdev)
}
/* Match table for OF platform binding */
-static struct of_device_id xcan_of_match[] = {
+static const struct of_device_id xcan_of_match[] = {
{ .compatible = "xlnx,zynq-can-1.0", },
{ .compatible = "xlnx,axi-can-1.00.a", },
{ /* end of list */ },
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 48e62a34f7f2..18550c7ebe6f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -7,7 +7,7 @@ config NET_DSA_MV88E6XXX
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
- select NET_DSA
+ depends on NET_DSA
select NET_DSA_TAG_TRAILER
---help---
This enables support for the Marvell 88E6060 ethernet switch
@@ -19,7 +19,7 @@ config NET_DSA_MV88E6XXX_NEED_PPU
config NET_DSA_MV88E6131
tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
- select NET_DSA
+ depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_MV88E6XXX_NEED_PPU
select NET_DSA_TAG_DSA
@@ -29,7 +29,7 @@ config NET_DSA_MV88E6131
config NET_DSA_MV88E6123_61_65
tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
- select NET_DSA
+ depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
@@ -38,7 +38,7 @@ config NET_DSA_MV88E6123_61_65
config NET_DSA_MV88E6171
tristate "Marvell 88E6171/6172 ethernet switch chip support"
- select NET_DSA
+ depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
@@ -47,7 +47,7 @@ config NET_DSA_MV88E6171
config NET_DSA_MV88E6352
tristate "Marvell 88E6176/88E6352 ethernet switch chip support"
- select NET_DSA
+ depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
@@ -56,8 +56,7 @@ config NET_DSA_MV88E6352
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
- depends on HAS_IOMEM
- select NET_DSA
+ depends on HAS_IOMEM && NET_DSA
select NET_DSA_TAG_BRCM
select FIXED_PHY
select BCM7XXX_PHY
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 4daffb284931..cedb572bf25a 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -23,6 +23,7 @@
#include <linux/of_address.h>
#include <net/dsa.h>
#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
@@ -299,10 +300,14 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
if (port == 7)
intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
- /* Set this port, and only this one to be in the default VLAN */
+ /* Set this port, and only this one to be in the default VLAN,
+ * if member of a bridge, restore its membership prior to
+ * bringing down this port.
+ */
reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
reg &= ~PORT_VLAN_CTRL_MASK;
reg |= (1 << port);
+ reg |= priv->port_sts[port].vlan_ctl_mask;
core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
bcm_sf2_imp_vlan_setup(ds, cpu_port);
@@ -400,6 +405,151 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
return 0;
}
+/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
+ * flush for that port.
+ */
+static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ core_writel(priv, port, CORE_FAST_AGE_PORT);
+
+ reg = core_readl(priv, CORE_FAST_AGE_CTRL);
+ reg |= EN_AGE_PORT | FAST_AGE_STR_DONE;
+ core_writel(priv, reg, CORE_FAST_AGE_CTRL);
+
+ do {
+ reg = core_readl(priv, CORE_FAST_AGE_CTRL);
+ if (!(reg & FAST_AGE_STR_DONE))
+ break;
+
+ cpu_relax();
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
+ u32 br_port_mask)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ unsigned int i;
+ u32 reg, p_ctl;
+
+ p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
+
+ for (i = 0; i < priv->hw_params.num_ports; i++) {
+ if (!((1 << i) & br_port_mask))
+ continue;
+
+ /* Add this local port to the remote port VLAN control
+ * membership and update the remote port bitmask
+ */
+ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
+ reg |= 1 << port;
+ core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
+ priv->port_sts[i].vlan_ctl_mask = reg;
+
+ p_ctl |= 1 << i;
+ }
+
+ /* Configure the local port VLAN control membership to include
+ * remote ports and update the local port bitmask
+ */
+ core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
+ priv->port_sts[port].vlan_ctl_mask = p_ctl;
+
+ return 0;
+}
+
+static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port,
+ u32 br_port_mask)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ unsigned int i;
+ u32 reg, p_ctl;
+
+ p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
+
+ for (i = 0; i < priv->hw_params.num_ports; i++) {
+ /* Don't touch the remaining ports */
+ if (!((1 << i) & br_port_mask))
+ continue;
+
+ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
+ reg &= ~(1 << port);
+ core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
+ priv->port_sts[port].vlan_ctl_mask = reg;
+
+ /* Prevent self removal to preserve isolation */
+ if (port != i)
+ p_ctl &= ~(1 << i);
+ }
+
+ core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
+ priv->port_sts[port].vlan_ctl_mask = p_ctl;
+
+ return 0;
+}
+
+static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ u8 hw_state, cur_hw_state;
+ int ret = 0;
+ u32 reg;
+
+ reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+ cur_hw_state = reg >> G_MISTP_STATE_SHIFT;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = G_MISTP_DIS_STATE;
+ break;
+ case BR_STATE_LISTENING:
+ hw_state = G_MISTP_LISTEN_STATE;
+ break;
+ case BR_STATE_LEARNING:
+ hw_state = G_MISTP_LEARN_STATE;
+ break;
+ case BR_STATE_FORWARDING:
+ hw_state = G_MISTP_FWD_STATE;
+ break;
+ case BR_STATE_BLOCKING:
+ hw_state = G_MISTP_BLOCK_STATE;
+ break;
+ default:
+ pr_err("%s: invalid STP state: %d\n", __func__, state);
+ return -EINVAL;
+ }
+
+ /* Fast-age ARL entries if we are moving a port from Learning or
+ * Forwarding state to Disabled, Blocking or Listening state
+ */
+ if (cur_hw_state != hw_state) {
+ if (cur_hw_state & 4 && !(hw_state & 4)) {
+ ret = bcm_sf2_sw_fast_age_port(ds, port);
+ if (ret) {
+ pr_err("%s: fast-ageing failed\n", __func__);
+ return ret;
+ }
+ }
+ }
+
+ reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+ reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
+ reg |= hw_state;
+ core_writel(priv, reg, CORE_G_PCTL_PORT(port));
+
+ return 0;
+}
+
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
{
struct bcm_sf2_priv *priv = dev_id;
@@ -916,6 +1066,9 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
.port_disable = bcm_sf2_port_disable,
.get_eee = bcm_sf2_sw_get_eee,
.set_eee = bcm_sf2_sw_set_eee,
+ .port_join_bridge = bcm_sf2_sw_br_join,
+ .port_leave_bridge = bcm_sf2_sw_br_leave,
+ .port_stp_update = bcm_sf2_sw_br_set_stp_state,
};
static int __init bcm_sf2_init(void)
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 7b7053d3c5fa..22e2ebf31333 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -46,6 +46,8 @@ struct bcm_sf2_port_status {
unsigned int link;
struct ethtool_eee eee;
+
+ u32 vlan_ctl_mask;
};
struct bcm_sf2_priv {
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index cabdfa5e217a..fa4e6e78c9ea 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -163,6 +163,21 @@
#define EN_CHIP_RST (1 << 6)
#define EN_SW_RESET (1 << 4)
+#define CORE_FAST_AGE_CTRL 0x00220
+#define EN_FAST_AGE_STATIC (1 << 0)
+#define EN_AGE_DYNAMIC (1 << 1)
+#define EN_AGE_PORT (1 << 2)
+#define EN_AGE_VLAN (1 << 3)
+#define EN_AGE_SPT (1 << 4)
+#define EN_AGE_MCAST (1 << 5)
+#define FAST_AGE_STR_DONE (1 << 7)
+
+#define CORE_FAST_AGE_PORT 0x00224
+#define AGE_PORT_MASK 0xf
+
+#define CORE_FAST_AGE_VID 0x00228
+#define AGE_VID_MASK 0x3fff
+
#define CORE_LNKSTS 0x00400
#define LNK_STS_MASK 0x1ff
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index e9c736e1cef3..b4af6d5aff7c 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -25,66 +25,33 @@ static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr)
if (bus == NULL)
return NULL;
- ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
- if (ret == 0x1212)
+ if (ret == PORT_SWITCH_ID_6123_A1)
return "Marvell 88E6123 (A1)";
- if (ret == 0x1213)
+ if (ret == PORT_SWITCH_ID_6123_A2)
return "Marvell 88E6123 (A2)";
- if ((ret & 0xfff0) == 0x1210)
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6123)
return "Marvell 88E6123";
- if (ret == 0x1612)
+ if (ret == PORT_SWITCH_ID_6161_A1)
return "Marvell 88E6161 (A1)";
- if (ret == 0x1613)
+ if (ret == PORT_SWITCH_ID_6161_A2)
return "Marvell 88E6161 (A2)";
- if ((ret & 0xfff0) == 0x1610)
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6161)
return "Marvell 88E6161";
- if (ret == 0x1652)
+ if (ret == PORT_SWITCH_ID_6165_A1)
return "Marvell 88E6165 (A1)";
- if (ret == 0x1653)
+ if (ret == PORT_SWITCH_ID_6165_A2)
return "Marvell 88e6165 (A2)";
- if ((ret & 0xfff0) == 0x1650)
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6165)
return "Marvell 88E6165";
}
return NULL;
}
-static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds)
-{
- int i;
- int ret;
- unsigned long timeout;
-
- /* Set all ports to the disabled state. */
- for (i = 0; i < 8; i++) {
- ret = REG_READ(REG_PORT(i), 0x04);
- REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
- }
-
- /* Wait for transmit queues to drain. */
- usleep_range(2000, 4000);
-
- /* Reset the switch. */
- REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
-
- /* Wait up to one second for reset to complete. */
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- if ((ret & 0xc800) == 0xc800)
- break;
-
- usleep_range(1000, 2000);
- }
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
-
- return 0;
-}
-
static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
{
int ret;
@@ -222,28 +189,6 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
val |= 0x000c;
REG_WRITE(addr, 0x04, val);
- /* Port Control 1: disable trunking. Also, if this is the
- * CPU port, enable learn messages to be sent to this port.
- */
- REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
-
- /* Port based VLAN map: give each port its own address
- * database, allow the CPU port to talk to each of the 'real'
- * ports, and allow each of the 'real' ports to only talk to
- * the upstream port.
- */
- val = (p & 0xf) << 12;
- if (dsa_is_cpu_port(ds, p))
- val |= ds->phys_port_mask;
- else
- val |= 1 << dsa_upstream_port(ds);
- REG_WRITE(addr, 0x06, val);
-
- /* Default VLAN ID and priority: don't set a default VLAN
- * ID, and set the default packet priority to zero.
- */
- REG_WRITE(addr, 0x07, 0x0000);
-
/* Port Control 2: don't force a good FCS, set the maximum
* frame size to 10240 bytes, don't let the switch add or
* strip 802.1q tags, don't discard tagged or untagged frames
@@ -288,7 +233,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
*/
REG_WRITE(addr, 0x19, 0x7654);
- return 0;
+ return mv88e6xxx_setup_port_common(ds, p);
}
static int mv88e6123_61_65_setup(struct dsa_switch *ds)
@@ -297,11 +242,23 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
int i;
int ret;
- mutex_init(&ps->smi_mutex);
- mutex_init(&ps->stats_mutex);
- mutex_init(&ps->phy_mutex);
+ ret = mv88e6xxx_setup_common(ds);
+ if (ret < 0)
+ return ret;
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6123:
+ ps->num_ports = 3;
+ break;
+ case PORT_SWITCH_ID_6161:
+ case PORT_SWITCH_ID_6165:
+ ps->num_ports = 6;
+ break;
+ default:
+ return -ENODEV;
+ }
- ret = mv88e6123_61_65_switch_reset(ds);
+ ret = mv88e6xxx_switch_reset(ds, false);
if (ret < 0)
return ret;
@@ -311,7 +268,7 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < ps->num_ports; i++) {
ret = mv88e6123_61_65_setup_port(ds, i);
if (ret < 0)
return ret;
@@ -320,108 +277,18 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
return 0;
}
-static int mv88e6123_61_65_port_to_phy_addr(int port)
-{
- if (port >= 0 && port <= 4)
- return port;
- return -1;
-}
-
-static int
-mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6123_61_65_port_to_phy_addr(port);
- int ret;
-
- mutex_lock(&ps->phy_mutex);
- ret = mv88e6xxx_phy_read(ds, addr, regnum);
- mutex_unlock(&ps->phy_mutex);
- return ret;
-}
-
-static int
-mv88e6123_61_65_phy_write(struct dsa_switch *ds,
- int port, int regnum, u16 val)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6123_61_65_port_to_phy_addr(port);
- int ret;
-
- mutex_lock(&ps->phy_mutex);
- ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
- mutex_unlock(&ps->phy_mutex);
- return ret;
-}
-
-static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
- { "in_good_octets", 8, 0x00, },
- { "in_bad_octets", 4, 0x02, },
- { "in_unicast", 4, 0x04, },
- { "in_broadcasts", 4, 0x06, },
- { "in_multicasts", 4, 0x07, },
- { "in_pause", 4, 0x16, },
- { "in_undersize", 4, 0x18, },
- { "in_fragments", 4, 0x19, },
- { "in_oversize", 4, 0x1a, },
- { "in_jabber", 4, 0x1b, },
- { "in_rx_error", 4, 0x1c, },
- { "in_fcs_error", 4, 0x1d, },
- { "out_octets", 8, 0x0e, },
- { "out_unicast", 4, 0x10, },
- { "out_broadcasts", 4, 0x13, },
- { "out_multicasts", 4, 0x12, },
- { "out_pause", 4, 0x15, },
- { "excessive", 4, 0x11, },
- { "collisions", 4, 0x1e, },
- { "deferred", 4, 0x05, },
- { "single", 4, 0x14, },
- { "multiple", 4, 0x17, },
- { "out_fcs_error", 4, 0x03, },
- { "late", 4, 0x1f, },
- { "hist_64bytes", 4, 0x08, },
- { "hist_65_127bytes", 4, 0x09, },
- { "hist_128_255bytes", 4, 0x0a, },
- { "hist_256_511bytes", 4, 0x0b, },
- { "hist_512_1023bytes", 4, 0x0c, },
- { "hist_1024_max_bytes", 4, 0x0d, },
- { "sw_in_discards", 4, 0x110, },
- { "sw_in_filtered", 2, 0x112, },
- { "sw_out_filtered", 2, 0x113, },
-};
-
-static void
-mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
- mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
- mv88e6123_61_65_hw_stats, port, data);
-}
-
-static void
-mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds,
- int port, uint64_t *data)
-{
- mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
- mv88e6123_61_65_hw_stats, port, data);
-}
-
-static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
-{
- return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
-}
-
struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_EDSA,
.priv_size = sizeof(struct mv88e6xxx_priv_state),
.probe = mv88e6123_61_65_probe,
.setup = mv88e6123_61_65_setup,
.set_addr = mv88e6xxx_set_addr_indirect,
- .phy_read = mv88e6123_61_65_phy_read,
- .phy_write = mv88e6123_61_65_phy_write,
+ .phy_read = mv88e6xxx_phy_read,
+ .phy_write = mv88e6xxx_phy_write,
.poll_link = mv88e6xxx_poll_link,
- .get_strings = mv88e6123_61_65_get_strings,
- .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats,
- .get_sset_count = mv88e6123_61_65_get_sset_count,
+ .get_strings = mv88e6xxx_get_strings,
+ .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_sset_count = mv88e6xxx_get_sset_count,
#ifdef CONFIG_NET_DSA_HWMON
.get_temp = mv88e6xxx_get_temp,
#endif
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 2540ef0142af..e54824fa0d95 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -17,12 +17,6 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
-/* Switch product IDs */
-#define ID_6085 0x04a0
-#define ID_6095 0x0950
-#define ID_6131 0x1060
-#define ID_6131_B2 0x1066
-
static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
{
struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
@@ -31,56 +25,23 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
if (bus == NULL)
return NULL;
- ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
int ret_masked = ret & 0xfff0;
- if (ret_masked == ID_6085)
+ if (ret_masked == PORT_SWITCH_ID_6085)
return "Marvell 88E6085";
- if (ret_masked == ID_6095)
+ if (ret_masked == PORT_SWITCH_ID_6095)
return "Marvell 88E6095/88E6095F";
- if (ret == ID_6131_B2)
+ if (ret == PORT_SWITCH_ID_6131_B2)
return "Marvell 88E6131 (B2)";
- if (ret_masked == ID_6131)
+ if (ret_masked == PORT_SWITCH_ID_6131)
return "Marvell 88E6131";
}
return NULL;
}
-static int mv88e6131_switch_reset(struct dsa_switch *ds)
-{
- int i;
- int ret;
- unsigned long timeout;
-
- /* Set all ports to the disabled state. */
- for (i = 0; i < 11; i++) {
- ret = REG_READ(REG_PORT(i), 0x04);
- REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
- }
-
- /* Wait for transmit queues to drain. */
- usleep_range(2000, 4000);
-
- /* Reset the switch. */
- REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
-
- /* Wait up to one second for reset to complete. */
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- if ((ret & 0xc800) == 0xc800)
- break;
-
- usleep_range(1000, 2000);
- }
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
-
- return 0;
-}
-
static int mv88e6131_setup_global(struct dsa_switch *ds)
{
int ret;
@@ -174,7 +135,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* (100 Mb/s on 6085) full duplex.
*/
if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- if (ps->id == ID_6085)
+ if (ps->id == PORT_SWITCH_ID_6085)
REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
else
REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
@@ -201,35 +162,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
/* On 6085, unknown multicast forward is controlled
* here rather than in Port Control 2 register.
*/
- if (ps->id == ID_6085)
+ if (ps->id == PORT_SWITCH_ID_6085)
val |= 0x0008;
}
if (ds->dsa_port_mask & (1 << p))
val |= 0x0100;
REG_WRITE(addr, 0x04, val);
- /* Port Control 1: disable trunking. Also, if this is the
- * CPU port, enable learn messages to be sent to this port.
- */
- REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
-
- /* Port based VLAN map: give each port its own address
- * database, allow the CPU port to talk to each of the 'real'
- * ports, and allow each of the 'real' ports to only talk to
- * the upstream port.
- */
- val = (p & 0xf) << 12;
- if (dsa_is_cpu_port(ds, p))
- val |= ds->phys_port_mask;
- else
- val |= 1 << dsa_upstream_port(ds);
- REG_WRITE(addr, 0x06, val);
-
- /* Default VLAN ID and priority: don't set a default VLAN
- * ID, and set the default packet priority to zero.
- */
- REG_WRITE(addr, 0x07, 0x0000);
-
/* Port Control 2: don't force a good FCS, don't use
* VLAN-based, source address-based or destination
* address-based priority overrides, don't let the switch
@@ -242,7 +181,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* If this is the upstream port for this switch, enable
* forwarding of unknown multicast addresses.
*/
- if (ps->id == ID_6085)
+ if (ps->id == PORT_SWITCH_ID_6085)
/* on 6085, bits 3:0 are reserved, bit 6 control ARP
* mirroring, and multicast forward is handled in
* Port Control register.
@@ -278,7 +217,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
*/
REG_WRITE(addr, 0x19, 0x7654);
- return 0;
+ return mv88e6xxx_setup_port_common(ds, p);
}
static int mv88e6131_setup(struct dsa_switch *ds)
@@ -287,13 +226,28 @@ static int mv88e6131_setup(struct dsa_switch *ds)
int i;
int ret;
- mutex_init(&ps->smi_mutex);
+ ret = mv88e6xxx_setup_common(ds);
+ if (ret < 0)
+ return ret;
+
mv88e6xxx_ppu_state_init(ds);
- mutex_init(&ps->stats_mutex);
- ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6085:
+ ps->num_ports = 10;
+ break;
+ case PORT_SWITCH_ID_6095:
+ ps->num_ports = 11;
+ break;
+ case PORT_SWITCH_ID_6131:
+ case PORT_SWITCH_ID_6131_B2:
+ ps->num_ports = 8;
+ break;
+ default:
+ return -ENODEV;
+ }
- ret = mv88e6131_switch_reset(ds);
+ ret = mv88e6xxx_switch_reset(ds, false);
if (ret < 0)
return ret;
@@ -303,7 +257,7 @@ static int mv88e6131_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- for (i = 0; i < 11; i++) {
+ for (i = 0; i < ps->num_ports; i++) {
ret = mv88e6131_setup_port(ds, i);
if (ret < 0)
return ret;
@@ -312,17 +266,24 @@ static int mv88e6131_setup(struct dsa_switch *ds)
return 0;
}
-static int mv88e6131_port_to_phy_addr(int port)
+static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
{
- if (port >= 0 && port <= 11)
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (port >= 0 && port < ps->num_ports)
return port;
- return -1;
+
+ return -EINVAL;
}
static int
mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
{
- int addr = mv88e6131_port_to_phy_addr(port);
+ int addr = mv88e6131_port_to_phy_addr(ds, port);
+
+ if (addr < 0)
+ return addr;
+
return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
}
@@ -330,61 +291,12 @@ static int
mv88e6131_phy_write(struct dsa_switch *ds,
int port, int regnum, u16 val)
{
- int addr = mv88e6131_port_to_phy_addr(port);
- return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
-}
-
-static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = {
- { "in_good_octets", 8, 0x00, },
- { "in_bad_octets", 4, 0x02, },
- { "in_unicast", 4, 0x04, },
- { "in_broadcasts", 4, 0x06, },
- { "in_multicasts", 4, 0x07, },
- { "in_pause", 4, 0x16, },
- { "in_undersize", 4, 0x18, },
- { "in_fragments", 4, 0x19, },
- { "in_oversize", 4, 0x1a, },
- { "in_jabber", 4, 0x1b, },
- { "in_rx_error", 4, 0x1c, },
- { "in_fcs_error", 4, 0x1d, },
- { "out_octets", 8, 0x0e, },
- { "out_unicast", 4, 0x10, },
- { "out_broadcasts", 4, 0x13, },
- { "out_multicasts", 4, 0x12, },
- { "out_pause", 4, 0x15, },
- { "excessive", 4, 0x11, },
- { "collisions", 4, 0x1e, },
- { "deferred", 4, 0x05, },
- { "single", 4, 0x14, },
- { "multiple", 4, 0x17, },
- { "out_fcs_error", 4, 0x03, },
- { "late", 4, 0x1f, },
- { "hist_64bytes", 4, 0x08, },
- { "hist_65_127bytes", 4, 0x09, },
- { "hist_128_255bytes", 4, 0x0a, },
- { "hist_256_511bytes", 4, 0x0b, },
- { "hist_512_1023bytes", 4, 0x0c, },
- { "hist_1024_max_bytes", 4, 0x0d, },
-};
-
-static void
-mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
- mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats),
- mv88e6131_hw_stats, port, data);
-}
+ int addr = mv88e6131_port_to_phy_addr(ds, port);
-static void
-mv88e6131_get_ethtool_stats(struct dsa_switch *ds,
- int port, uint64_t *data)
-{
- mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats),
- mv88e6131_hw_stats, port, data);
-}
+ if (addr < 0)
+ return addr;
-static int mv88e6131_get_sset_count(struct dsa_switch *ds)
-{
- return ARRAY_SIZE(mv88e6131_hw_stats);
+ return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
}
struct dsa_switch_driver mv88e6131_switch_driver = {
@@ -396,9 +308,9 @@ struct dsa_switch_driver mv88e6131_switch_driver = {
.phy_read = mv88e6131_phy_read,
.phy_write = mv88e6131_phy_write,
.poll_link = mv88e6xxx_poll_link,
- .get_strings = mv88e6131_get_strings,
- .get_ethtool_stats = mv88e6131_get_ethtool_stats,
- .get_sset_count = mv88e6131_get_sset_count,
+ .get_strings = mv88e6xxx_get_strings,
+ .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_sset_count = mv88e6xxx_get_sset_count,
};
MODULE_ALIAS("platform:mv88e6085");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index aa33d16f2e22..9104efea0e3e 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -25,69 +25,27 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
if (bus == NULL)
return NULL;
- ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
- if ((ret & 0xfff0) == 0x1710)
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6171)
return "Marvell 88E6171";
- if ((ret & 0xfff0) == 0x1720)
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
return "Marvell 88E6172";
}
return NULL;
}
-static int mv88e6171_switch_reset(struct dsa_switch *ds)
-{
- int i;
- int ret;
- unsigned long timeout;
-
- /* Set all ports to the disabled state. */
- for (i = 0; i < 8; i++) {
- ret = REG_READ(REG_PORT(i), 0x04);
- REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
- }
-
- /* Wait for transmit queues to drain. */
- usleep_range(2000, 4000);
-
- /* Reset the switch. */
- REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
-
- /* Wait up to one second for reset to complete. */
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- if ((ret & 0xc800) == 0xc800)
- break;
-
- usleep_range(1000, 2000);
- }
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
-
- /* Enable ports not under DSA, e.g. WAN port */
- for (i = 0; i < 8; i++) {
- if (dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i))
- continue;
-
- ret = REG_READ(REG_PORT(i), 0x04);
- REG_WRITE(REG_PORT(i), 0x04, ret | 0x03);
- }
-
- return 0;
-}
-
static int mv88e6171_setup_global(struct dsa_switch *ds)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
int i;
- /* Disable the PHY polling unit (since there won't be any
- * external PHYs to poll), don't discard packets with
- * excessive collisions, and mask all interrupt sources.
+ /* Discard packets with excessive collisions, mask all
+ * interrupt sources, enable PPU.
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
+ REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
/* Set the default address aging time to 5 minutes, and
* enable address learn messages to be sent to all message
@@ -145,7 +103,7 @@ static int mv88e6171_setup_global(struct dsa_switch *ds)
}
/* Clear all trunk masks. */
- for (i = 0; i < 8; i++)
+ for (i = 0; i < ps->num_ports; i++)
REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
/* Clear all trunk mappings. */
@@ -219,28 +177,6 @@ static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
val |= 0x000c;
REG_WRITE(addr, 0x04, val);
- /* Port Control 1: disable trunking. Also, if this is the
- * CPU port, enable learn messages to be sent to this port.
- */
- REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
-
- /* Port based VLAN map: give each port its own address
- * database, allow the CPU port to talk to each of the 'real'
- * ports, and allow each of the 'real' ports to only talk to
- * the upstream port.
- */
- val = (p & 0xf) << 12;
- if (dsa_is_cpu_port(ds, p))
- val |= ds->phys_port_mask;
- else
- val |= 1 << dsa_upstream_port(ds);
- REG_WRITE(addr, 0x06, val);
-
- /* Default VLAN ID and priority: don't set a default VLAN
- * ID, and set the default packet priority to zero.
- */
- REG_WRITE(addr, 0x07, 0x0000);
-
/* Port Control 2: don't force a good FCS, set the maximum
* frame size to 10240 bytes, don't let the switch add or
* strip 802.1q tags, don't discard tagged or untagged frames
@@ -285,19 +221,22 @@ static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
*/
REG_WRITE(addr, 0x19, 0x7654);
- return 0;
+ return mv88e6xxx_setup_port_common(ds, p);
}
static int mv88e6171_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i;
int ret;
- mutex_init(&ps->smi_mutex);
- mutex_init(&ps->stats_mutex);
+ ret = mv88e6xxx_setup_common(ds);
+ if (ret < 0)
+ return ret;
+
+ ps->num_ports = 7;
- ret = mv88e6171_switch_reset(ds);
+ ret = mv88e6xxx_switch_reset(ds, true);
if (ret < 0)
return ret;
@@ -307,7 +246,7 @@ static int mv88e6171_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < ps->num_ports; i++) {
if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)))
continue;
@@ -316,96 +255,29 @@ static int mv88e6171_setup(struct dsa_switch *ds)
return ret;
}
- mutex_init(&ps->phy_mutex);
-
return 0;
}
-static int mv88e6171_port_to_phy_addr(int port)
-{
- if (port >= 0 && port <= 4)
- return port;
- return -1;
-}
-
-static int
-mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6171_get_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6171_port_to_phy_addr(port);
- int ret;
- mutex_lock(&ps->phy_mutex);
- ret = mv88e6xxx_phy_read(ds, addr, regnum);
- mutex_unlock(&ps->phy_mutex);
- return ret;
-}
-
-static int
-mv88e6171_phy_write(struct dsa_switch *ds,
- int port, int regnum, u16 val)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6171_port_to_phy_addr(port);
- int ret;
+ if (ps->id == PORT_SWITCH_ID_6172)
+ return mv88e6xxx_get_eee(ds, port, e);
- mutex_lock(&ps->phy_mutex);
- ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
- mutex_unlock(&ps->phy_mutex);
- return ret;
+ return -EOPNOTSUPP;
}
-static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = {
- { "in_good_octets", 8, 0x00, },
- { "in_bad_octets", 4, 0x02, },
- { "in_unicast", 4, 0x04, },
- { "in_broadcasts", 4, 0x06, },
- { "in_multicasts", 4, 0x07, },
- { "in_pause", 4, 0x16, },
- { "in_undersize", 4, 0x18, },
- { "in_fragments", 4, 0x19, },
- { "in_oversize", 4, 0x1a, },
- { "in_jabber", 4, 0x1b, },
- { "in_rx_error", 4, 0x1c, },
- { "in_fcs_error", 4, 0x1d, },
- { "out_octets", 8, 0x0e, },
- { "out_unicast", 4, 0x10, },
- { "out_broadcasts", 4, 0x13, },
- { "out_multicasts", 4, 0x12, },
- { "out_pause", 4, 0x15, },
- { "excessive", 4, 0x11, },
- { "collisions", 4, 0x1e, },
- { "deferred", 4, 0x05, },
- { "single", 4, 0x14, },
- { "multiple", 4, 0x17, },
- { "out_fcs_error", 4, 0x03, },
- { "late", 4, 0x1f, },
- { "hist_64bytes", 4, 0x08, },
- { "hist_65_127bytes", 4, 0x09, },
- { "hist_128_255bytes", 4, 0x0a, },
- { "hist_256_511bytes", 4, 0x0b, },
- { "hist_512_1023bytes", 4, 0x0c, },
- { "hist_1024_max_bytes", 4, 0x0d, },
-};
-
-static void
-mv88e6171_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+static int mv88e6171_set_eee(struct dsa_switch *ds, int port,
+ struct phy_device *phydev, struct ethtool_eee *e)
{
- mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6171_hw_stats),
- mv88e6171_hw_stats, port, data);
-}
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-static void
-mv88e6171_get_ethtool_stats(struct dsa_switch *ds,
- int port, uint64_t *data)
-{
- mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6171_hw_stats),
- mv88e6171_hw_stats, port, data);
-}
+ if (ps->id == PORT_SWITCH_ID_6172)
+ return mv88e6xxx_set_eee(ds, port, phydev, e);
-static int mv88e6171_get_sset_count(struct dsa_switch *ds)
-{
- return ARRAY_SIZE(mv88e6171_hw_stats);
+ return -EOPNOTSUPP;
}
struct dsa_switch_driver mv88e6171_switch_driver = {
@@ -414,17 +286,25 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.probe = mv88e6171_probe,
.setup = mv88e6171_setup,
.set_addr = mv88e6xxx_set_addr_indirect,
- .phy_read = mv88e6171_phy_read,
- .phy_write = mv88e6171_phy_write,
+ .phy_read = mv88e6xxx_phy_read_indirect,
+ .phy_write = mv88e6xxx_phy_write_indirect,
.poll_link = mv88e6xxx_poll_link,
- .get_strings = mv88e6171_get_strings,
- .get_ethtool_stats = mv88e6171_get_ethtool_stats,
- .get_sset_count = mv88e6171_get_sset_count,
+ .get_strings = mv88e6xxx_get_strings,
+ .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_sset_count = mv88e6xxx_get_sset_count,
+ .set_eee = mv88e6171_set_eee,
+ .get_eee = mv88e6171_get_eee,
#ifdef CONFIG_NET_DSA_HWMON
.get_temp = mv88e6xxx_get_temp,
#endif
.get_regs_len = mv88e6xxx_get_regs_len,
.get_regs = mv88e6xxx_get_regs,
+ .port_join_bridge = mv88e6xxx_join_bridge,
+ .port_leave_bridge = mv88e6xxx_leave_bridge,
+ .port_stp_update = mv88e6xxx_port_stp_update,
+ .fdb_add = mv88e6xxx_port_fdb_add,
+ .fdb_del = mv88e6xxx_port_fdb_del,
+ .fdb_getnext = mv88e6xxx_port_fdb_getnext,
};
MODULE_ALIAS("platform:mv88e6171");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index e13adc7b3dda..126c11b81e75 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -22,59 +22,6 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
-static int mv88e6352_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
-{
- unsigned long timeout = jiffies + HZ / 10;
-
- while (time_before(jiffies, timeout)) {
- int ret;
-
- ret = REG_READ(reg, offset);
- if (!(ret & mask))
- return 0;
-
- usleep_range(1000, 2000);
- }
- return -ETIMEDOUT;
-}
-
-static inline int mv88e6352_phy_wait(struct dsa_switch *ds)
-{
- return mv88e6352_wait(ds, REG_GLOBAL2, 0x18, 0x8000);
-}
-
-static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds)
-{
- return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x0800);
-}
-
-static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds)
-{
- return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x8000);
-}
-
-static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum)
-{
- int ret;
-
- REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum);
-
- ret = mv88e6352_phy_wait(ds);
- if (ret < 0)
- return ret;
-
- return REG_READ(REG_GLOBAL2, 0x19);
-}
-
-static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum,
- u16 val)
-{
- REG_WRITE(REG_GLOBAL2, 0x19, val);
- REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum);
-
- return mv88e6352_phy_wait(ds);
-}
-
static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
{
struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
@@ -83,58 +30,24 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
if (bus == NULL)
return NULL;
- ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
- if ((ret & 0xfff0) == 0x1760)
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
return "Marvell 88E6176";
- if (ret == 0x3521)
+ if (ret == PORT_SWITCH_ID_6352_A0)
return "Marvell 88E6352 (A0)";
- if (ret == 0x3522)
+ if (ret == PORT_SWITCH_ID_6352_A1)
return "Marvell 88E6352 (A1)";
- if ((ret & 0xfff0) == 0x3520)
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6352)
return "Marvell 88E6352";
}
return NULL;
}
-static int mv88e6352_switch_reset(struct dsa_switch *ds)
-{
- unsigned long timeout;
- int ret;
- int i;
-
- /* Set all ports to the disabled state. */
- for (i = 0; i < 7; i++) {
- ret = REG_READ(REG_PORT(i), 0x04);
- REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
- }
-
- /* Wait for transmit queues to drain. */
- usleep_range(2000, 4000);
-
- /* Reset the switch. Keep PPU active (bit 14, undocumented).
- * The PPU needs to be active to support indirect phy register
- * accesses through global registers 0x18 and 0x19.
- */
- REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
-
- /* Wait up to one second for reset to complete. */
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- if ((ret & 0x8800) == 0x8800)
- break;
- usleep_range(1000, 2000);
- }
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
-
- return 0;
-}
-
static int mv88e6352_setup_global(struct dsa_switch *ds)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
int i;
@@ -205,7 +118,7 @@ static int mv88e6352_setup_global(struct dsa_switch *ds)
/* Disable ingress rate limiting by resetting all ingress
* rate limit registers to their initial state.
*/
- for (i = 0; i < 7; i++)
+ for (i = 0; i < ps->num_ports; i++)
REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
/* Initialise cross-chip port VLAN table to reset defaults. */
@@ -268,28 +181,6 @@ static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
val |= 0x000c;
REG_WRITE(addr, 0x04, val);
- /* Port Control 1: disable trunking. Also, if this is the
- * CPU port, enable learn messages to be sent to this port.
- */
- REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
-
- /* Port based VLAN map: give each port its own address
- * database, allow the CPU port to talk to each of the 'real'
- * ports, and allow each of the 'real' ports to only talk to
- * the upstream port.
- */
- val = (p & 0xf) << 12;
- if (dsa_is_cpu_port(ds, p))
- val |= ds->phys_port_mask;
- else
- val |= 1 << dsa_upstream_port(ds);
- REG_WRITE(addr, 0x06, val);
-
- /* Default VLAN ID and priority: don't set a default VLAN
- * ID, and set the default packet priority to zero.
- */
- REG_WRITE(addr, 0x07, 0x0000);
-
/* Port Control 2: don't force a good FCS, set the maximum
* frame size to 10240 bytes, don't let the switch add or
* strip 802.1q tags, don't discard tagged or untagged frames
@@ -334,53 +225,18 @@ static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
*/
REG_WRITE(addr, 0x19, 0x7654);
- return 0;
+ return mv88e6xxx_setup_port_common(ds, p);
}
#ifdef CONFIG_NET_DSA_HWMON
-static int mv88e6352_phy_page_read(struct dsa_switch *ds,
- int port, int page, int reg)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- mutex_lock(&ps->phy_mutex);
- ret = __mv88e6352_phy_write(ds, port, 0x16, page);
- if (ret < 0)
- goto error;
- ret = __mv88e6352_phy_read(ds, port, reg);
-error:
- __mv88e6352_phy_write(ds, port, 0x16, 0x0);
- mutex_unlock(&ps->phy_mutex);
- return ret;
-}
-
-static int mv88e6352_phy_page_write(struct dsa_switch *ds,
- int port, int page, int reg, int val)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- mutex_lock(&ps->phy_mutex);
- ret = __mv88e6352_phy_write(ds, port, 0x16, page);
- if (ret < 0)
- goto error;
-
- ret = __mv88e6352_phy_write(ds, port, reg, val);
-error:
- __mv88e6352_phy_write(ds, port, 0x16, 0x0);
- mutex_unlock(&ps->phy_mutex);
- return ret;
-}
-
static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
{
int ret;
*temp = 0;
- ret = mv88e6352_phy_page_read(ds, 0, 6, 27);
+ ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
if (ret < 0)
return ret;
@@ -395,7 +251,7 @@ static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
*temp = 0;
- ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+ ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
if (ret < 0)
return ret;
@@ -408,11 +264,11 @@ static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
{
int ret;
- ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+ ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
if (ret < 0)
return ret;
temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
- return mv88e6352_phy_page_write(ds, 0, 6, 26,
+ return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
(ret & 0xe0ff) | (temp << 8));
}
@@ -422,7 +278,7 @@ static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
*alarm = false;
- ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+ ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
if (ret < 0)
return ret;
@@ -438,14 +294,15 @@ static int mv88e6352_setup(struct dsa_switch *ds)
int ret;
int i;
- mutex_init(&ps->smi_mutex);
- mutex_init(&ps->stats_mutex);
- mutex_init(&ps->phy_mutex);
- mutex_init(&ps->eeprom_mutex);
+ ret = mv88e6xxx_setup_common(ds);
+ if (ret < 0)
+ return ret;
- ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+ ps->num_ports = 7;
- ret = mv88e6352_switch_reset(ds);
+ mutex_init(&ps->eeprom_mutex);
+
+ ret = mv88e6xxx_switch_reset(ds, true);
if (ret < 0)
return ret;
@@ -455,7 +312,7 @@ static int mv88e6352_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ps->num_ports; i++) {
ret = mv88e6352_setup_port(ds, i);
if (ret < 0)
return ret;
@@ -464,83 +321,6 @@ static int mv88e6352_setup(struct dsa_switch *ds)
return 0;
}
-static int mv88e6352_port_to_phy_addr(int port)
-{
- if (port >= 0 && port <= 4)
- return port;
- return -EINVAL;
-}
-
-static int
-mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6352_port_to_phy_addr(port);
- int ret;
-
- if (addr < 0)
- return addr;
-
- mutex_lock(&ps->phy_mutex);
- ret = __mv88e6352_phy_read(ds, addr, regnum);
- mutex_unlock(&ps->phy_mutex);
-
- return ret;
-}
-
-static int
-mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6352_port_to_phy_addr(port);
- int ret;
-
- if (addr < 0)
- return addr;
-
- mutex_lock(&ps->phy_mutex);
- ret = __mv88e6352_phy_write(ds, addr, regnum, val);
- mutex_unlock(&ps->phy_mutex);
-
- return ret;
-}
-
-static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = {
- { "in_good_octets", 8, 0x00, },
- { "in_bad_octets", 4, 0x02, },
- { "in_unicast", 4, 0x04, },
- { "in_broadcasts", 4, 0x06, },
- { "in_multicasts", 4, 0x07, },
- { "in_pause", 4, 0x16, },
- { "in_undersize", 4, 0x18, },
- { "in_fragments", 4, 0x19, },
- { "in_oversize", 4, 0x1a, },
- { "in_jabber", 4, 0x1b, },
- { "in_rx_error", 4, 0x1c, },
- { "in_fcs_error", 4, 0x1d, },
- { "out_octets", 8, 0x0e, },
- { "out_unicast", 4, 0x10, },
- { "out_broadcasts", 4, 0x13, },
- { "out_multicasts", 4, 0x12, },
- { "out_pause", 4, 0x15, },
- { "excessive", 4, 0x11, },
- { "collisions", 4, 0x1e, },
- { "deferred", 4, 0x05, },
- { "single", 4, 0x14, },
- { "multiple", 4, 0x17, },
- { "out_fcs_error", 4, 0x03, },
- { "late", 4, 0x1f, },
- { "hist_64bytes", 4, 0x08, },
- { "hist_65_127bytes", 4, 0x09, },
- { "hist_128_255bytes", 4, 0x0a, },
- { "hist_256_511bytes", 4, 0x0b, },
- { "hist_512_1023bytes", 4, 0x0c, },
- { "hist_1024_max_bytes", 4, 0x0d, },
- { "sw_in_discards", 4, 0x110, },
- { "sw_in_filtered", 2, 0x112, },
- { "sw_out_filtered", 2, 0x113, },
-};
-
static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -553,7 +333,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
if (ret < 0)
goto error;
- ret = mv88e6352_eeprom_busy_wait(ds);
+ ret = mv88e6xxx_eeprom_busy_wait(ds);
if (ret < 0)
goto error;
@@ -576,7 +356,7 @@ static int mv88e6352_get_eeprom(struct dsa_switch *ds,
eeprom->magic = 0xc3ec4951;
- ret = mv88e6352_eeprom_load_wait(ds);
+ ret = mv88e6xxx_eeprom_load_wait(ds);
if (ret < 0)
return ret;
@@ -657,7 +437,7 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
if (ret < 0)
goto error;
- ret = mv88e6352_eeprom_busy_wait(ds);
+ ret = mv88e6xxx_eeprom_busy_wait(ds);
error:
mutex_unlock(&ps->eeprom_mutex);
return ret;
@@ -681,7 +461,7 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds,
len = eeprom->len;
eeprom->len = 0;
- ret = mv88e6352_eeprom_load_wait(ds);
+ ret = mv88e6xxx_eeprom_load_wait(ds);
if (ret < 0)
return ret;
@@ -739,37 +519,20 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds,
return 0;
}
-static void
-mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
-{
- mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats),
- mv88e6352_hw_stats, port, data);
-}
-
-static void
-mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
-{
- mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats),
- mv88e6352_hw_stats, port, data);
-}
-
-static int mv88e6352_get_sset_count(struct dsa_switch *ds)
-{
- return ARRAY_SIZE(mv88e6352_hw_stats);
-}
-
struct dsa_switch_driver mv88e6352_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_EDSA,
.priv_size = sizeof(struct mv88e6xxx_priv_state),
.probe = mv88e6352_probe,
.setup = mv88e6352_setup,
.set_addr = mv88e6xxx_set_addr_indirect,
- .phy_read = mv88e6352_phy_read,
- .phy_write = mv88e6352_phy_write,
+ .phy_read = mv88e6xxx_phy_read_indirect,
+ .phy_write = mv88e6xxx_phy_write_indirect,
.poll_link = mv88e6xxx_poll_link,
- .get_strings = mv88e6352_get_strings,
- .get_ethtool_stats = mv88e6352_get_ethtool_stats,
- .get_sset_count = mv88e6352_get_sset_count,
+ .get_strings = mv88e6xxx_get_strings,
+ .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_sset_count = mv88e6xxx_get_sset_count,
+ .set_eee = mv88e6xxx_set_eee,
+ .get_eee = mv88e6xxx_get_eee,
#ifdef CONFIG_NET_DSA_HWMON
.get_temp = mv88e6352_get_temp,
.get_temp_limit = mv88e6352_get_temp_limit,
@@ -780,6 +543,12 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
.set_eeprom = mv88e6352_set_eeprom,
.get_regs_len = mv88e6xxx_get_regs_len,
.get_regs = mv88e6xxx_get_regs,
+ .port_join_bridge = mv88e6xxx_join_bridge,
+ .port_leave_bridge = mv88e6xxx_leave_bridge,
+ .port_stp_update = mv88e6xxx_port_stp_update,
+ .fdb_add = mv88e6xxx_port_fdb_add,
+ .fdb_del = mv88e6xxx_port_fdb_del,
+ .fdb_getnext = mv88e6xxx_port_fdb_getnext,
};
MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 3e7e31a6abb7..cf309aa92802 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -9,6 +9,8 @@
*/
#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -31,11 +33,11 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
int i;
for (i = 0; i < 16; i++) {
- ret = mdiobus_read(bus, sw_addr, 0);
+ ret = mdiobus_read(bus, sw_addr, SMI_CMD);
if (ret < 0)
return ret;
- if ((ret & 0x8000) == 0)
+ if ((ret & SMI_CMD_BUSY) == 0)
return 0;
}
@@ -55,7 +57,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
return ret;
/* Transmit the read command. */
- ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg);
+ ret = mdiobus_write(bus, sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_READ | (addr << 5) | reg);
if (ret < 0)
return ret;
@@ -65,26 +68,23 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
return ret;
/* Read the data. */
- ret = mdiobus_read(bus, sw_addr, 1);
+ ret = mdiobus_read(bus, sw_addr, SMI_DATA);
if (ret < 0)
return ret;
return ret & 0xffff;
}
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
int ret;
if (bus == NULL)
return -EINVAL;
- mutex_lock(&ps->smi_mutex);
ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
- mutex_unlock(&ps->smi_mutex);
-
if (ret < 0)
return ret;
@@ -94,6 +94,18 @@ int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
return ret;
}
+int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = _mv88e6xxx_reg_read(ds, addr, reg);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
int reg, u16 val)
{
@@ -108,12 +120,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
return ret;
/* Transmit the data to write. */
- ret = mdiobus_write(bus, sw_addr, 1, val);
+ ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
if (ret < 0)
return ret;
/* Transmit the write command. */
- ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg);
+ ret = mdiobus_write(bus, sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
if (ret < 0)
return ret;
@@ -125,11 +138,11 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
return 0;
}
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
+ u16 val)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
- int ret;
if (bus == NULL)
return -EINVAL;
@@ -137,8 +150,16 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
addr, reg, val);
+ return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
+}
+
+int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
mutex_lock(&ps->smi_mutex);
- ret = __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
+ ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -147,26 +168,26 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
int mv88e6xxx_config_prio(struct dsa_switch *ds)
{
/* Configure the IP ToS mapping registers. */
- REG_WRITE(REG_GLOBAL, 0x10, 0x0000);
- REG_WRITE(REG_GLOBAL, 0x11, 0x0000);
- REG_WRITE(REG_GLOBAL, 0x12, 0x5555);
- REG_WRITE(REG_GLOBAL, 0x13, 0x5555);
- REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa);
- REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa);
- REG_WRITE(REG_GLOBAL, 0x16, 0xffff);
- REG_WRITE(REG_GLOBAL, 0x17, 0xffff);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
/* Configure the IEEE 802.1p priority mapping register. */
- REG_WRITE(REG_GLOBAL, 0x18, 0xfa41);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
return 0;
}
int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
{
- REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
- REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
- REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
return 0;
}
@@ -180,12 +201,13 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
int j;
/* Write the MAC address byte. */
- REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]);
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
+ GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
/* Wait for the write to complete. */
for (j = 0; j < 16; j++) {
- ret = REG_READ(REG_GLOBAL2, 0x0d);
- if ((ret & 0x8000) == 0)
+ ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
+ if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
break;
}
if (j == 16)
@@ -195,14 +217,17 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
return 0;
}
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
{
if (addr >= 0)
return mv88e6xxx_reg_read(ds, addr, regnum);
return 0xffff;
}
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val)
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
+ u16 val)
{
if (addr >= 0)
return mv88e6xxx_reg_write(ds, addr, regnum, val);
@@ -215,14 +240,16 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
int ret;
unsigned long timeout;
- ret = REG_READ(REG_GLOBAL, 0x04);
- REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000);
+ ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+ ret & ~GLOBAL_CONTROL_PPU_ENABLE);
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, 0x00);
+ ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
usleep_range(1000, 2000);
- if ((ret & 0xc000) != 0xc000)
+ if ((ret & GLOBAL_STATUS_PPU_MASK) !=
+ GLOBAL_STATUS_PPU_POLLING)
return 0;
}
@@ -234,14 +261,15 @@ static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
int ret;
unsigned long timeout;
- ret = REG_READ(REG_GLOBAL, 0x04);
- REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000);
+ ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, 0x00);
+ ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
usleep_range(1000, 2000);
- if ((ret & 0xc000) == 0xc000)
+ if ((ret & GLOBAL_STATUS_PPU_MASK) ==
+ GLOBAL_STATUS_PPU_POLLING)
return 0;
}
@@ -362,11 +390,12 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
link = 0;
if (dev->flags & IFF_UP) {
- port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00);
+ port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
+ PORT_STATUS);
if (port_status < 0)
continue;
- link = !!(port_status & 0x0800);
+ link = !!(port_status & PORT_STATUS_LINK);
}
if (!link) {
@@ -377,22 +406,22 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
continue;
}
- switch (port_status & 0x0300) {
- case 0x0000:
+ switch (port_status & PORT_STATUS_SPEED_MASK) {
+ case PORT_STATUS_SPEED_10:
speed = 10;
break;
- case 0x0100:
+ case PORT_STATUS_SPEED_100:
speed = 100;
break;
- case 0x0200:
+ case PORT_STATUS_SPEED_1000:
speed = 1000;
break;
default:
speed = -1;
break;
}
- duplex = (port_status & 0x0400) ? 1 : 0;
- fc = (port_status & 0x8000) ? 1 : 0;
+ duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
+ fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
if (!netif_carrier_ok(dev)) {
netdev_info(dev,
@@ -405,14 +434,27 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
}
}
+static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6352:
+ case PORT_SWITCH_ID_6172:
+ case PORT_SWITCH_ID_6176:
+ return true;
+ }
+ return false;
+}
+
static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
{
int ret;
int i;
for (i = 0; i < 10; i++) {
- ret = REG_READ(REG_GLOBAL, 0x1d);
- if ((ret & 0x8000) == 0)
+ ret = REG_READ(REG_GLOBAL, GLOBAL_STATS_OP);
+ if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
return 0;
}
@@ -423,8 +465,13 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
{
int ret;
+ if (mv88e6xxx_6352_family(ds))
+ port = (port + 1) << 5;
+
/* Snapshot the hardware statistics counters for this port. */
- REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port);
+ REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_CAPTURE_PORT |
+ GLOBAL_STATS_OP_HIST_RX_TX | port);
/* Wait for the snapshotting to complete. */
ret = mv88e6xxx_stats_wait(ds);
@@ -441,7 +488,9 @@ static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
*val = 0;
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat);
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_READ_CAPTURED |
+ GLOBAL_STATS_OP_HIST_RX_TX | stat);
if (ret < 0)
return;
@@ -449,22 +498,77 @@ static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
if (ret < 0)
return;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e);
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
if (ret < 0)
return;
_val = ret << 16;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f);
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
if (ret < 0)
return;
*val = _val | ret;
}
-void mv88e6xxx_get_strings(struct dsa_switch *ds,
- int nr_stats, struct mv88e6xxx_hw_stat *stats,
- int port, uint8_t *data)
+static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, },
+ { "in_bad_octets", 4, 0x02, },
+ { "in_unicast", 4, 0x04, },
+ { "in_broadcasts", 4, 0x06, },
+ { "in_multicasts", 4, 0x07, },
+ { "in_pause", 4, 0x16, },
+ { "in_undersize", 4, 0x18, },
+ { "in_fragments", 4, 0x19, },
+ { "in_oversize", 4, 0x1a, },
+ { "in_jabber", 4, 0x1b, },
+ { "in_rx_error", 4, 0x1c, },
+ { "in_fcs_error", 4, 0x1d, },
+ { "out_octets", 8, 0x0e, },
+ { "out_unicast", 4, 0x10, },
+ { "out_broadcasts", 4, 0x13, },
+ { "out_multicasts", 4, 0x12, },
+ { "out_pause", 4, 0x15, },
+ { "excessive", 4, 0x11, },
+ { "collisions", 4, 0x1e, },
+ { "deferred", 4, 0x05, },
+ { "single", 4, 0x14, },
+ { "multiple", 4, 0x17, },
+ { "out_fcs_error", 4, 0x03, },
+ { "late", 4, 0x1f, },
+ { "hist_64bytes", 4, 0x08, },
+ { "hist_65_127bytes", 4, 0x09, },
+ { "hist_128_255bytes", 4, 0x0a, },
+ { "hist_256_511bytes", 4, 0x0b, },
+ { "hist_512_1023bytes", 4, 0x0c, },
+ { "hist_1024_max_bytes", 4, 0x0d, },
+ /* Not all devices have the following counters */
+ { "sw_in_discards", 4, 0x110, },
+ { "sw_in_filtered", 2, 0x112, },
+ { "sw_out_filtered", 2, 0x113, },
+
+};
+
+static bool have_sw_in_discards(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
+ case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
+ case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
+ case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
+ case PORT_SWITCH_ID_6352:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
+ int nr_stats,
+ struct mv88e6xxx_hw_stat *stats,
+ int port, uint8_t *data)
{
int i;
@@ -474,9 +578,10 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds,
}
}
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
- int nr_stats, struct mv88e6xxx_hw_stat *stats,
- int port, uint64_t *data)
+static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+ int nr_stats,
+ struct mv88e6xxx_hw_stat *stats,
+ int port, uint64_t *data)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
@@ -497,8 +602,6 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
u32 high = 0;
if (s->reg >= 0x100) {
- int ret;
-
ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
s->reg - 0x100);
if (ret < 0)
@@ -524,6 +627,39 @@ error:
mutex_unlock(&ps->stats_mutex);
}
+/* All the statistics in the table */
+void
+mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ if (have_sw_in_discards(ds))
+ _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
+ mv88e6xxx_hw_stats, port, data);
+ else
+ _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
+ mv88e6xxx_hw_stats, port, data);
+}
+
+int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+{
+ if (have_sw_in_discards(ds))
+ return ARRAY_SIZE(mv88e6xxx_hw_stats);
+ return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
+}
+
+void
+mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+ int port, uint64_t *data)
+{
+ if (have_sw_in_discards(ds))
+ _mv88e6xxx_get_ethtool_stats(
+ ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
+ mv88e6xxx_hw_stats, port, data);
+ else
+ _mv88e6xxx_get_ethtool_stats(
+ ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
+ mv88e6xxx_hw_stats, port, data);
+}
+
int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
{
return 32 * sizeof(u16);
@@ -560,42 +696,756 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
mutex_lock(&ps->phy_mutex);
- ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+ ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
if (ret < 0)
goto error;
/* Enable temperature sensor */
- ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
if (ret < 0)
goto error;
- ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+ ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
if (ret < 0)
goto error;
/* Wait for temperature to stabilize */
usleep_range(10000, 12000);
- val = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
if (val < 0) {
ret = val;
goto error;
}
/* Disable temperature sensor */
- ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+ ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
if (ret < 0)
goto error;
*temp = ((val & 0x1f) - 5) * 5;
error:
- mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+ _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
mutex_unlock(&ps->phy_mutex);
return ret;
}
#endif /* CONFIG_NET_DSA_HWMON */
+static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+{
+ unsigned long timeout = jiffies + HZ / 10;
+
+ while (time_before(jiffies, timeout)) {
+ int ret;
+
+ ret = REG_READ(reg, offset);
+ if (!(ret & mask))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+ return -ETIMEDOUT;
+}
+
+int mv88e6xxx_phy_wait(struct dsa_switch *ds)
+{
+ return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_BUSY);
+}
+
+int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
+{
+ return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_LOAD);
+}
+
+int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
+{
+ return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_BUSY);
+}
+
+/* Must be called with SMI lock held */
+static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+{
+ unsigned long timeout = jiffies + HZ / 10;
+
+ while (time_before(jiffies, timeout)) {
+ int ret;
+
+ ret = _mv88e6xxx_reg_read(ds, reg, offset);
+ if (ret < 0)
+ return ret;
+ if (!(ret & mask))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+ return -ETIMEDOUT;
+}
+
+/* Must be called with SMI lock held */
+static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
+{
+ return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
+ GLOBAL_ATU_OP_BUSY);
+}
+
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
+ int regnum)
+{
+ int ret;
+
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum);
+
+ ret = mv88e6xxx_phy_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ return REG_READ(REG_GLOBAL2, GLOBAL2_SMI_DATA);
+}
+
+/* Must be called with phy mutex held */
+static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
+ int regnum, u16 val)
+{
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum);
+
+ return mv88e6xxx_phy_wait(ds);
+}
+
+int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int reg;
+
+ mutex_lock(&ps->phy_mutex);
+
+ reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+ if (reg < 0)
+ goto out;
+
+ e->eee_enabled = !!(reg & 0x0200);
+ e->tx_lpi_enabled = !!(reg & 0x0100);
+
+ reg = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+ if (reg < 0)
+ goto out;
+
+ e->eee_active = !!(reg & PORT_STATUS_EEE);
+ reg = 0;
+
+out:
+ mutex_unlock(&ps->phy_mutex);
+ return reg;
+}
+
+int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+ struct phy_device *phydev, struct ethtool_eee *e)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int reg;
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+
+ ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+ if (ret < 0)
+ goto out;
+
+ reg = ret & ~0x0300;
+ if (e->eee_enabled)
+ reg |= 0x0200;
+ if (e->tx_lpi_enabled)
+ reg |= 0x0100;
+
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
+out:
+ mutex_unlock(&ps->phy_mutex);
+
+ return ret;
+}
+
+static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
+{
+ int ret;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_atu_wait(ds);
+}
+
+static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
+{
+ int ret;
+
+ ret = _mv88e6xxx_atu_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
+}
+
+static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int reg, ret = 0;
+ u8 oldstate;
+
+ mutex_lock(&ps->smi_mutex);
+
+ reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
+ if (reg < 0) {
+ ret = reg;
+ goto abort;
+ }
+
+ oldstate = reg & PORT_CONTROL_STATE_MASK;
+ if (oldstate != state) {
+ /* Flush forwarding database if we're moving a port
+ * from Learning or Forwarding state to Disabled or
+ * Blocking or Listening state.
+ */
+ if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
+ state <= PORT_CONTROL_STATE_BLOCKING) {
+ ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
+ if (ret)
+ goto abort;
+ }
+ reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
+ reg);
+ }
+
+abort:
+ mutex_unlock(&ps->smi_mutex);
+ return ret;
+}
+
+/* Must be called with smi lock held */
+static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u8 fid = ps->fid[port];
+ u16 reg = fid << 12;
+
+ if (dsa_is_cpu_port(ds, port))
+ reg |= ds->phys_port_mask;
+ else
+ reg |= (ps->bridge_mask[fid] |
+ (1 << dsa_upstream_port(ds))) & ~(1 << port);
+
+ return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
+}
+
+/* Must be called with smi lock held */
+static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int port;
+ u32 mask;
+ int ret;
+
+ mask = ds->phys_port_mask;
+ while (mask) {
+ port = __ffs(mask);
+ mask &= ~(1 << port);
+ if (ps->fid[port] != fid)
+ continue;
+
+ ret = _mv88e6xxx_update_port_config(ds, port);
+ if (ret)
+ return ret;
+ }
+
+ return _mv88e6xxx_flush_fid(ds, fid);
+}
+
+/* Bridge handling functions */
+
+int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret = 0;
+ u32 nmask;
+ int fid;
+
+ /* If the bridge group is not empty, join that group.
+ * Otherwise create a new group.
+ */
+ fid = ps->fid[port];
+ nmask = br_port_mask & ~(1 << port);
+ if (nmask)
+ fid = ps->fid[__ffs(nmask)];
+
+ nmask = ps->bridge_mask[fid] | (1 << port);
+ if (nmask != br_port_mask) {
+ netdev_err(ds->ports[port],
+ "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
+ fid, br_port_mask, nmask);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ps->smi_mutex);
+
+ ps->bridge_mask[fid] = br_port_mask;
+
+ if (fid != ps->fid[port]) {
+ ps->fid_mask |= 1 << ps->fid[port];
+ ps->fid[port] = fid;
+ ret = _mv88e6xxx_update_bridge_config(ds, fid);
+ }
+
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u8 fid, newfid;
+ int ret;
+
+ fid = ps->fid[port];
+
+ if (ps->bridge_mask[fid] != br_port_mask) {
+ netdev_err(ds->ports[port],
+ "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
+ fid, br_port_mask, ps->bridge_mask[fid]);
+ return -EINVAL;
+ }
+
+ /* If the port was the last port of a bridge, we are done.
+ * Otherwise assign a new fid to the port, and fix up
+ * the bridge configuration.
+ */
+ if (br_port_mask == (1 << port))
+ return 0;
+
+ mutex_lock(&ps->smi_mutex);
+
+ newfid = __ffs(ps->fid_mask);
+ ps->fid[port] = newfid;
+ ps->fid_mask &= (1 << newfid);
+ ps->bridge_mask[fid] &= ~(1 << port);
+ ps->bridge_mask[newfid] = 1 << port;
+
+ ret = _mv88e6xxx_update_bridge_config(ds, fid);
+ if (!ret)
+ ret = _mv88e6xxx_update_bridge_config(ds, newfid);
+
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = PORT_CONTROL_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ stp_state = PORT_CONTROL_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = PORT_CONTROL_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = PORT_CONTROL_STATE_FORWARDING;
+ break;
+ }
+
+ netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
+
+ /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
+ * so we can not update the port state directly but need to schedule it.
+ */
+ ps->port_state[port] = stp_state;
+ set_bit(port, &ps->port_state_update_mask);
+ schedule_work(&ps->bridge_work);
+
+ return 0;
+}
+
+static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
+ const unsigned char *addr)
+{
+ int i, ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = _mv88e6xxx_reg_write(
+ ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
+ (addr[i * 2] << 8) | addr[i * 2 + 1]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
+{
+ int i, ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ GLOBAL_ATU_MAC_01 + i);
+ if (ret < 0)
+ return ret;
+ addr[i * 2] = ret >> 8;
+ addr[i * 2 + 1] = ret & 0xff;
+ }
+
+ return 0;
+}
+
+static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
+ const unsigned char *addr, int state)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u8 fid = ps->fid[port];
+ int ret;
+
+ ret = _mv88e6xxx_atu_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ ret = __mv88e6xxx_write_addr(ds, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
+ (0x10 << port) | state);
+ if (ret)
+ return ret;
+
+ ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
+
+ return ret;
+}
+
+int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ int state = is_multicast_ether_addr(addr) ?
+ GLOBAL_ATU_DATA_STATE_MC_STATIC :
+ GLOBAL_ATU_DATA_STATE_UC_STATIC;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
+ GLOBAL_ATU_DATA_STATE_UNUSED);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
+ unsigned char *addr, bool *is_static)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u8 fid = ps->fid[port];
+ int ret, state;
+
+ ret = _mv88e6xxx_atu_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ ret = __mv88e6xxx_write_addr(ds, addr);
+ if (ret < 0)
+ return ret;
+
+ do {
+ ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
+ if (ret < 0)
+ return ret;
+ state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+ if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ return -ENOENT;
+ } while (!(((ret >> 4) & 0xff) & (1 << port)));
+
+ ret = __mv88e6xxx_read_addr(ds, addr);
+ if (ret < 0)
+ return ret;
+
+ *is_static = state == (is_multicast_ether_addr(addr) ?
+ GLOBAL_ATU_DATA_STATE_MC_STATIC :
+ GLOBAL_ATU_DATA_STATE_UC_STATIC);
+
+ return 0;
+}
+
+/* get next entry for port */
+int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
+ unsigned char *addr, bool *is_static)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+static void mv88e6xxx_bridge_work(struct work_struct *work)
+{
+ struct mv88e6xxx_priv_state *ps;
+ struct dsa_switch *ds;
+ int port;
+
+ ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
+ ds = ((struct dsa_switch *)ps) - 1;
+
+ while (ps->port_state_update_mask) {
+ port = __ffs(ps->port_state_update_mask);
+ clear_bit(port, &ps->port_state_update_mask);
+ mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
+ }
+}
+
+int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret, fid;
+
+ mutex_lock(&ps->smi_mutex);
+
+ /* Port Control 1: disable trunking, disable sending
+ * learning messages to this port.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
+ if (ret)
+ goto abort;
+
+ /* Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the upstream port.
+ */
+ fid = __ffs(ps->fid_mask);
+ ps->fid[port] = fid;
+ ps->fid_mask &= ~(1 << fid);
+
+ if (!dsa_is_cpu_port(ds, port))
+ ps->bridge_mask[fid] = 1 << port;
+
+ ret = _mv88e6xxx_update_port_config(ds, port);
+ if (ret)
+ goto abort;
+
+ /* Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+ 0x0000);
+abort:
+ mutex_unlock(&ps->smi_mutex);
+ return ret;
+}
+
+int mv88e6xxx_setup_common(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ mutex_init(&ps->smi_mutex);
+ mutex_init(&ps->stats_mutex);
+ mutex_init(&ps->phy_mutex);
+
+ ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
+
+ ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
+
+ INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
+
+ return 0;
+}
+
+int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
+ unsigned long timeout;
+ int ret;
+ int i;
+
+ /* Set all ports to the disabled state. */
+ for (i = 0; i < ps->num_ports; i++) {
+ ret = REG_READ(REG_PORT(i), PORT_CONTROL);
+ REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
+ }
+
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
+
+ /* Reset the switch. Keep the PPU active if requested. The PPU
+ * needs to be active to support indirect phy register access
+ * through global registers 0x18 and 0x19.
+ */
+ if (ppu_active)
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
+ else
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & is_reset) == is_reset)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+ if (ret < 0)
+ goto error;
+ ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
+error:
+ _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
+int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
+ int reg, int val)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+ if (ret < 0)
+ goto error;
+
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
+error:
+ _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
+static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (port >= 0 && port < ps->num_ports)
+ return port;
+ return -EINVAL;
+}
+
+int
+mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = _mv88e6xxx_phy_read(ds, addr, regnum);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
+int
+mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
+int
+mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
+int
+mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
+ u16 val)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
static int __init mv88e6xxx_init(void)
{
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
@@ -619,6 +1469,9 @@ static void __exit mv88e6xxx_cleanup(void)
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
unregister_switch_driver(&mv88e6171_switch_driver);
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
+ unregister_switch_driver(&mv88e6352_switch_driver);
+#endif
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
unregister_switch_driver(&mv88e6123_61_65_switch_driver);
#endif
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 03e397efde36..e045154f3364 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -11,9 +11,199 @@
#ifndef __MV88E6XXX_H
#define __MV88E6XXX_H
+#define SMI_CMD 0x00
+#define SMI_CMD_BUSY BIT(15)
+#define SMI_CMD_CLAUSE_22 BIT(12)
+#define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
+#define SMI_DATA 0x01
+
#define REG_PORT(p) (0x10 + (p))
+#define PORT_STATUS 0x00
+#define PORT_STATUS_PAUSE_EN BIT(15)
+#define PORT_STATUS_MY_PAUSE BIT(14)
+#define PORT_STATUS_HD_FLOW BIT(13)
+#define PORT_STATUS_PHY_DETECT BIT(12)
+#define PORT_STATUS_LINK BIT(11)
+#define PORT_STATUS_DUPLEX BIT(10)
+#define PORT_STATUS_SPEED_MASK 0x0300
+#define PORT_STATUS_SPEED_10 0x0000
+#define PORT_STATUS_SPEED_100 0x0100
+#define PORT_STATUS_SPEED_1000 0x0200
+#define PORT_STATUS_EEE BIT(6) /* 6352 */
+#define PORT_STATUS_AM_DIS BIT(6) /* 6165 */
+#define PORT_STATUS_MGMII BIT(6) /* 6185 */
+#define PORT_STATUS_TX_PAUSED BIT(5)
+#define PORT_STATUS_FLOW_CTRL BIT(4)
+#define PORT_PCS_CTRL 0x01
+#define PORT_SWITCH_ID 0x03
+#define PORT_SWITCH_ID_6085 0x04a0
+#define PORT_SWITCH_ID_6095 0x0950
+#define PORT_SWITCH_ID_6123 0x1210
+#define PORT_SWITCH_ID_6123_A1 0x1212
+#define PORT_SWITCH_ID_6123_A2 0x1213
+#define PORT_SWITCH_ID_6131 0x1060
+#define PORT_SWITCH_ID_6131_B2 0x1066
+#define PORT_SWITCH_ID_6152 0x1a40
+#define PORT_SWITCH_ID_6155 0x1a50
+#define PORT_SWITCH_ID_6161 0x1610
+#define PORT_SWITCH_ID_6161_A1 0x1612
+#define PORT_SWITCH_ID_6161_A2 0x1613
+#define PORT_SWITCH_ID_6165 0x1650
+#define PORT_SWITCH_ID_6165_A1 0x1652
+#define PORT_SWITCH_ID_6165_A2 0x1653
+#define PORT_SWITCH_ID_6171 0x1710
+#define PORT_SWITCH_ID_6172 0x1720
+#define PORT_SWITCH_ID_6176 0x1760
+#define PORT_SWITCH_ID_6182 0x1a60
+#define PORT_SWITCH_ID_6185 0x1a70
+#define PORT_SWITCH_ID_6352 0x3520
+#define PORT_SWITCH_ID_6352_A0 0x3521
+#define PORT_SWITCH_ID_6352_A1 0x3522
+#define PORT_CONTROL 0x04
+#define PORT_CONTROL_STATE_MASK 0x03
+#define PORT_CONTROL_STATE_DISABLED 0x00
+#define PORT_CONTROL_STATE_BLOCKING 0x01
+#define PORT_CONTROL_STATE_LEARNING 0x02
+#define PORT_CONTROL_STATE_FORWARDING 0x03
+#define PORT_CONTROL_1 0x05
+#define PORT_BASE_VLAN 0x06
+#define PORT_DEFAULT_VLAN 0x07
+#define PORT_CONTROL_2 0x08
+#define PORT_RATE_CONTROL 0x09
+#define PORT_RATE_CONTROL_2 0x0a
+#define PORT_ASSOC_VECTOR 0x0b
+#define PORT_IN_DISCARD_LO 0x10
+#define PORT_IN_DISCARD_HI 0x11
+#define PORT_IN_FILTERED 0x12
+#define PORT_OUT_FILTERED 0x13
+#define PORT_TAG_REGMAP_0123 0x19
+#define PORT_TAG_REGMAP_4567 0x1a
+
#define REG_GLOBAL 0x1b
+#define GLOBAL_STATUS 0x00
+#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
+/* Two bits for 6165, 6185 etc */
+#define GLOBAL_STATUS_PPU_MASK (0x3 << 14)
+#define GLOBAL_STATUS_PPU_DISABLED_RST (0x0 << 14)
+#define GLOBAL_STATUS_PPU_INITIALIZING (0x1 << 14)
+#define GLOBAL_STATUS_PPU_DISABLED (0x2 << 14)
+#define GLOBAL_STATUS_PPU_POLLING (0x3 << 14)
+#define GLOBAL_MAC_01 0x01
+#define GLOBAL_MAC_23 0x02
+#define GLOBAL_MAC_45 0x03
+#define GLOBAL_CONTROL 0x04
+#define GLOBAL_CONTROL_SW_RESET BIT(15)
+#define GLOBAL_CONTROL_PPU_ENABLE BIT(14)
+#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) /* 6352 */
+#define GLOBAL_CONTROL_SCHED_PRIO BIT(11) /* 6152 */
+#define GLOBAL_CONTROL_MAX_FRAME_1632 BIT(10) /* 6152 */
+#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */
+#define GLOBAL_CONTROL_DEVICE_EN BIT(7)
+#define GLOBAL_CONTROL_STATS_DONE_EN BIT(6)
+#define GLOBAL_CONTROL_VTU_PROBLEM_EN BIT(5)
+#define GLOBAL_CONTROL_VTU_DONE_EN BIT(4)
+#define GLOBAL_CONTROL_ATU_PROBLEM_EN BIT(3)
+#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
+#define GLOBAL_CONTROL_TCAM_EN BIT(1)
+#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
+#define GLOBAL_VTU_OP 0x05
+#define GLOBAL_VTU_VID 0x06
+#define GLOBAL_VTU_DATA_0_3 0x07
+#define GLOBAL_VTU_DATA_4_7 0x08
+#define GLOBAL_VTU_DATA_8_11 0x09
+#define GLOBAL_ATU_CONTROL 0x0a
+#define GLOBAL_ATU_OP 0x0b
+#define GLOBAL_ATU_OP_BUSY BIT(15)
+#define GLOBAL_ATU_OP_NOP (0 << 12)
+#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_NON_STATIC ((2 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_DATA 0x0c
+#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
+#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
+#define GLOBAL_ATU_DATA_STATE_UC_MGMT 0x0d
+#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
+#define GLOBAL_ATU_DATA_STATE_UC_PRIO_OVER 0x0f
+#define GLOBAL_ATU_DATA_STATE_MC_NONE_RATE 0x05
+#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
+#define GLOBAL_ATU_DATA_STATE_MC_MGMT 0x0e
+#define GLOBAL_ATU_DATA_STATE_MC_PRIO_OVER 0x0f
+#define GLOBAL_ATU_MAC_01 0x0d
+#define GLOBAL_ATU_MAC_23 0x0e
+#define GLOBAL_ATU_MAC_45 0x0f
+#define GLOBAL_IP_PRI_0 0x10
+#define GLOBAL_IP_PRI_1 0x11
+#define GLOBAL_IP_PRI_2 0x12
+#define GLOBAL_IP_PRI_3 0x13
+#define GLOBAL_IP_PRI_4 0x14
+#define GLOBAL_IP_PRI_5 0x15
+#define GLOBAL_IP_PRI_6 0x16
+#define GLOBAL_IP_PRI_7 0x17
+#define GLOBAL_IEEE_PRI 0x18
+#define GLOBAL_CORE_TAG_TYPE 0x19
+#define GLOBAL_MONITOR_CONTROL 0x1a
+#define GLOBAL_CONTROL_2 0x1c
+#define GLOBAL_STATS_OP 0x1d
+#define GLOBAL_STATS_OP_BUSY BIT(15)
+#define GLOBAL_STATS_OP_NOP (0 << 12)
+#define GLOBAL_STATS_OP_FLUSH_ALL ((1 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_FLUSH_PORT ((2 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_READ_CAPTURED ((4 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_CAPTURE_PORT ((5 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_RX ((1 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_TX ((2 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_RX_TX ((3 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_COUNTER_32 0x1e
+#define GLOBAL_STATS_COUNTER_01 0x1f
+
#define REG_GLOBAL2 0x1c
+#define GLOBAL2_INT_SOURCE 0x00
+#define GLOBAL2_INT_MASK 0x01
+#define GLOBAL2_MGMT_EN_2X 0x02
+#define GLOBAL2_MGMT_EN_0X 0x03
+#define GLOBAL2_FLOW_CONTROL 0x04
+#define GLOBAL2_SWITCH_MGMT 0x05
+#define GLOBAL2_DEVICE_MAPPING 0x06
+#define GLOBAL2_TRUNK_MASK 0x07
+#define GLOBAL2_TRUNK_MAPPING 0x08
+#define GLOBAL2_INGRESS_OP 0x09
+#define GLOBAL2_INGRESS_DATA 0x0a
+#define GLOBAL2_PVT_ADDR 0x0b
+#define GLOBAL2_PVT_DATA 0x0c
+#define GLOBAL2_SWITCH_MAC 0x0d
+#define GLOBAL2_SWITCH_MAC_BUSY BIT(15)
+#define GLOBAL2_ATU_STATS 0x0e
+#define GLOBAL2_PRIO_OVERRIDE 0x0f
+#define GLOBAL2_EEPROM_OP 0x14
+#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
+#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
+#define GLOBAL2_EEPROM_DATA 0x15
+#define GLOBAL2_PTP_AVB_OP 0x16
+#define GLOBAL2_PTP_AVB_DATA 0x17
+#define GLOBAL2_SMI_OP 0x18
+#define GLOBAL2_SMI_OP_BUSY BIT(15)
+#define GLOBAL2_SMI_OP_CLAUSE_22 BIT(12)
+#define GLOBAL2_SMI_OP_22_WRITE ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \
+ GLOBAL2_SMI_OP_CLAUSE_22)
+#define GLOBAL2_SMI_OP_22_READ ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \
+ GLOBAL2_SMI_OP_CLAUSE_22)
+#define GLOBAL2_SMI_OP_45_WRITE_ADDR ((0 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_OP_45_WRITE_DATA ((1 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_OP_45_READ_DATA ((2 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_DATA 0x19
+#define GLOBAL2_SCRATCH_MISC 0x1a
+#define GLOBAL2_WDOG_CONTROL 0x1b
+#define GLOBAL2_QOS_WEIGHT 0x1c
+#define GLOBAL2_MISC 0x1d
struct mv88e6xxx_priv_state {
/* When using multi-chip addressing, this mutex protects
@@ -49,6 +239,18 @@ struct mv88e6xxx_priv_state {
struct mutex eeprom_mutex;
int id; /* switch product id */
+ int num_ports; /* number of switch ports */
+
+ /* hw bridging */
+
+ u32 fid_mask;
+ u8 fid[DSA_MAX_PORTS];
+ u16 bridge_mask[DSA_MAX_PORTS];
+
+ unsigned long port_state_update_mask;
+ u8 port_state[DSA_MAX_PORTS];
+
+ struct work_struct bridge_work;
};
struct mv88e6xxx_hw_stat {
@@ -57,6 +259,9 @@ struct mv88e6xxx_hw_stat {
int reg;
};
+int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
+int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port);
+int mv88e6xxx_setup_common(struct dsa_switch *ds);
int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
@@ -65,24 +270,46 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
int mv88e6xxx_config_prio(struct dsa_switch *ds);
int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val);
+int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
+int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val);
+int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum);
+int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
+ u16 val);
void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
int regnum, u16 val);
void mv88e6xxx_poll_link(struct dsa_switch *ds);
-void mv88e6xxx_get_strings(struct dsa_switch *ds,
- int nr_stats, struct mv88e6xxx_hw_stat *stats,
- int port, uint8_t *data);
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
- int nr_stats, struct mv88e6xxx_hw_stat *stats,
- int port, uint64_t *data);
+void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
+void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data);
+int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
+int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p);
int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-
+int mv88e6xxx_phy_wait(struct dsa_switch *ds);
+int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
+int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
+int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
+int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
+ u16 val);
+int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+ struct phy_device *phydev, struct ethtool_eee *e);
+int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
+int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
+int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
+int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
+ unsigned char *addr, bool *is_static);
+int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
+int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
+ int reg, int val);
extern struct dsa_switch_driver mv88e6131_switch_driver;
extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
extern struct dsa_switch_driver mv88e6352_switch_driver;
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index b36ee9e0d220..d686b9cac29f 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec)
char *s;
if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
- printk(KERN_ERR "%s: unable to read podule description string\n",
+ printk(KERN_ERR "%s: unable to read module description string\n",
dev_name(&ec->dev));
goto no_addr;
}
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index ec20611e9de2..096531a73124 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -983,10 +983,9 @@ static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
- u32 remainder;
unsigned long flags;
struct bfin_mac_local *lp =
container_of(ptp, struct bfin_mac_local, caps);
@@ -997,21 +996,20 @@ static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
spin_unlock_irqrestore(&lp->phc_lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
+
return 0;
}
static int bfin_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
struct bfin_mac_local *lp =
container_of(ptp, struct bfin_mac_local, caps);
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
+ ns = timespec64_to_ns(ts);
spin_lock_irqsave(&lp->phc_lock, flags);
@@ -1039,8 +1037,8 @@ static struct ptp_clock_info bfin_ptp_caps = {
.pps = 0,
.adjfreq = bfin_ptp_adjfreq,
.adjtime = bfin_ptp_adjtime,
- .gettime = bfin_ptp_gettime,
- .settime = bfin_ptp_settime,
+ .gettime64 = bfin_ptp_gettime,
+ .settime64 = bfin_ptp_settime,
.enable = bfin_ptp_enable,
};
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 2b8bfeeee9cf..ae89de7deb13 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1588,7 +1588,7 @@ static int greth_of_remove(struct platform_device *of_dev)
return 0;
}
-static struct of_device_id greth_of_match[] = {
+static const struct of_device_id greth_of_match[] = {
{
.name = "GAISLER_ETHMAC",
},
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index f3470d96837a..bab01c849165 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -757,7 +757,7 @@ static void emac_shutdown(struct net_device *dev)
/* Disable all interrupt */
writel(0, db->membase + EMAC_INT_CTL_REG);
- /* clear interupt status */
+ /* clear interrupt status */
reg_val = readl(db->membase + EMAC_INT_STA_REG);
writel(reg_val, db->membase + EMAC_INT_STA_REG);
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index e335626e1b6b..89cd11d86642 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -58,21 +58,17 @@ struct msgdma_extended_desc {
/* Tx buffer control flags
*/
#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
- MSGDMA_DESC_CTL_TR_ERR_IRQ | \
MSGDMA_DESC_CTL_GO)
-#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
- MSGDMA_DESC_CTL_GO)
+#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO)
#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
MSGDMA_DESC_CTL_TR_COMP_IRQ | \
- MSGDMA_DESC_CTL_TR_ERR_IRQ | \
MSGDMA_DESC_CTL_GO)
#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
MSGDMA_DESC_CTL_GEN_EOP | \
MSGDMA_DESC_CTL_TR_COMP_IRQ | \
- MSGDMA_DESC_CTL_TR_ERR_IRQ | \
MSGDMA_DESC_CTL_GO)
#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 6725dc00750b..da48e66377b5 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -89,7 +89,7 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
#define TXQUEUESTOP_THRESHHOLD 2
-static struct of_device_id altera_tse_ids[];
+static const struct of_device_id altera_tse_ids[];
static inline u32 tse_tx_avail(struct altera_tse_private *priv)
{
@@ -105,11 +105,11 @@ static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
/* set MDIO address */
csrwr32((mii_id & 0x1f), priv->mac_dev,
- tse_csroffs(mdio_phy0_addr));
+ tse_csroffs(mdio_phy1_addr));
/* get the data */
return csrrd32(priv->mac_dev,
- tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
+ tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
}
static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
@@ -120,10 +120,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
/* set MDIO address */
csrwr32((mii_id & 0x1f), priv->mac_dev,
- tse_csroffs(mdio_phy0_addr));
+ tse_csroffs(mdio_phy1_addr));
/* write the data */
- csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
+ csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
return 0;
}
@@ -376,8 +376,13 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
u16 pktlength;
u16 pktstatus;
- while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
- (count < limit)) {
+ /* Check for count < limit first as get_rx_status is changing
+ * the response-fifo so we must process the next packet
+ * after calling get_rx_status if a response is pending.
+ * (reading the last byte of the response pops the value from the fifo.)
+ */
+ while ((count < limit) &&
+ ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
pktstatus = rxstatus >> 16;
pktlength = rxstatus & 0xffff;
@@ -386,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
"RCV pktstatus %08X pktlength %08X\n",
pktstatus, pktlength);
+ /* DMA trasfer from TSE starts with 2 aditional bytes for
+ * IP payload alignment. Status returned by get_rx_status()
+ * contains DMA transfer length. Packet is 2 bytes shorter.
+ */
+ pktlength -= 2;
+
count++;
next_entry = (++priv->rx_cons) % priv->rx_ring_size;
@@ -772,6 +783,8 @@ static int init_phy(struct net_device *dev)
struct altera_tse_private *priv = netdev_priv(dev);
struct phy_device *phydev;
struct device_node *phynode;
+ bool fixed_link = false;
+ int rc = 0;
/* Avoid init phy in case of no phy present */
if (!priv->phy_iface)
@@ -784,13 +797,32 @@ static int init_phy(struct net_device *dev)
phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
if (!phynode) {
- netdev_dbg(dev, "no phy-handle found\n");
- if (!priv->mdio) {
- netdev_err(dev,
- "No phy-handle nor local mdio specified\n");
- return -ENODEV;
+ /* check if a fixed-link is defined in device-tree */
+ if (of_phy_is_fixed_link(priv->device->of_node)) {
+ rc = of_phy_register_fixed_link(priv->device->of_node);
+ if (rc < 0) {
+ netdev_err(dev, "cannot register fixed PHY\n");
+ return rc;
+ }
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ phynode = of_node_get(priv->device->of_node);
+ fixed_link = true;
+
+ netdev_dbg(dev, "fixed-link detected\n");
+ phydev = of_phy_connect(dev, phynode,
+ &altera_tse_adjust_link,
+ 0, priv->phy_iface);
+ } else {
+ netdev_dbg(dev, "no phy-handle found\n");
+ if (!priv->mdio) {
+ netdev_err(dev, "No phy-handle nor local mdio specified\n");
+ return -ENODEV;
+ }
+ phydev = connect_local_phy(dev);
}
- phydev = connect_local_phy(dev);
} else {
netdev_dbg(dev, "phy-handle found\n");
phydev = of_phy_connect(dev, phynode,
@@ -814,10 +846,10 @@ static int init_phy(struct net_device *dev)
/* Broken HW is sometimes missing the pull-up resistor on the
* MDIO line, which results in reads to non-existent devices returning
* 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well.
+ * device as well. If a fixed-link is used the phy_id is always 0.
* Note: phydev->phy_id is the result of reading the UID PHY registers.
*/
- if (phydev->phy_id == 0) {
+ if ((phydev->phy_id == 0) && !fixed_link) {
netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
phy_disconnect(phydev);
return -ENODEV;
@@ -1098,8 +1130,12 @@ static int tse_open(struct net_device *dev)
spin_lock(&priv->mac_cfg_lock);
ret = reset_mac(priv);
+ /* Note that reset_mac will fail if the clocks are gated by the PHY
+ * due to the PHY being put into isolation or power down mode.
+ * This is not an error if reset fails due to no clock.
+ */
if (ret)
- netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+ netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
ret = init_mac(priv);
spin_unlock(&priv->mac_cfg_lock);
@@ -1203,8 +1239,12 @@ static int tse_shutdown(struct net_device *dev)
spin_lock(&priv->tx_lock);
ret = reset_mac(priv);
+ /* Note that reset_mac will fail if the clocks are gated by the PHY
+ * due to the PHY being put into isolation or power down mode.
+ * This is not an error if reset fails due to no clock.
+ */
if (ret)
- netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+ netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
priv->dmaops->reset_dma(priv);
free_skbufs(dev);
@@ -1568,7 +1608,7 @@ static const struct altera_dmaops altera_dtype_msgdma = {
.start_rxdma = msgdma_start_rxdma,
};
-static struct of_device_id altera_tse_ids[] = {
+static const struct of_device_id altera_tse_ids[] = {
{ .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
{ .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
{ .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index c638c85f3954..426916036151 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,8 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on (OF_NET || ACPI) && HAS_IOMEM
+ depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
+ depends on ARM64 || COMPILE_TEST
select PHYLIB
select AMD_XGBE_PHY
select BITREVERSE
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 4c2ae2221780..94960055fa1f 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -723,13 +723,13 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
* the last correctly noting the error.
*/
if(status & ERR_BIT) {
- /* reseting flags */
+ /* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
/* check for STP and ENP */
if(!((status & STP_BIT) && (status & ENP_BIT))){
- /* reseting flags */
+ /* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index a75092d584cc..7cdb18512407 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -614,7 +614,7 @@ typedef enum {
/* Assume contoller gets data 10 times the maximum processing time */
#define REPEAT_CNT 10
-/* amd8111e decriptor flag definitions */
+/* amd8111e descriptor flag definitions */
typedef enum {
OWN_BIT = (1 << 15),
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 15a8190a6f75..bc8b04f42882 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1735,7 +1735,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
if (!is_valid_ether_addr(dev->dev_addr))
- memset(dev->dev_addr, 0, ETH_ALEN);
+ eth_zero_addr(dev->dev_addr);
if (pcnet32_debug & NETIF_MSG_PROBE) {
pr_cont(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 29a09271b64a..34c28aac767f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -365,6 +365,8 @@
#define MAC_HWF0R_TXCOESEL_WIDTH 1
#define MAC_HWF0R_VLHASH_INDEX 4
#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADDR64_INDEX 14
+#define MAC_HWF1R_ADDR64_WIDTH 2
#define MAC_HWF1R_ADVTHWORD_INDEX 13
#define MAC_HWF1R_ADVTHWORD_WIDTH 1
#define MAC_HWF1R_DBGMEMA_INDEX 19
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 400757b49872..21d9497518fd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -853,6 +853,22 @@ static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
return 0;
}
+static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ unsigned int pr_mode, am_mode;
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ xgbe_set_promiscuous_mode(pdata, pr_mode);
+ xgbe_set_all_multicast_mode(pdata, am_mode);
+
+ xgbe_add_mac_addresses(pdata);
+
+ return 0;
+}
+
static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
{
@@ -1068,7 +1084,7 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc3 = 0;
/* Make sure ownership is written to the descriptor */
- wmb();
+ dma_wmb();
}
static void xgbe_tx_desc_init(struct xgbe_channel *channel)
@@ -1101,9 +1117,24 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
DBGPR("<--tx_desc_init\n");
}
-static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
+static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata, unsigned int index)
{
struct xgbe_ring_desc *rdesc = rdata->rdesc;
+ unsigned int rx_usecs = pdata->rx_usecs;
+ unsigned int rx_frames = pdata->rx_frames;
+ unsigned int inte;
+
+ if (!rx_usecs && !rx_frames) {
+ /* No coalescing, interrupt for every descriptor */
+ inte = 1;
+ } else {
+ /* Set interrupt based on Rx frame coalescing setting */
+ if (rx_frames && !((index + 1) % rx_frames))
+ inte = 1;
+ else
+ inte = 0;
+ }
/* Reset the Rx descriptor
* Set buffer 1 (lo) address to header dma address (lo)
@@ -1117,19 +1148,18 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
- rdata->interrupt ? 1 : 0);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
/* Since the Rx DMA engine is likely running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
* for the descriptor
*/
- wmb();
+ dma_wmb();
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
/* Make sure ownership is written to the descriptor */
- wmb();
+ dma_wmb();
}
static void xgbe_rx_desc_init(struct xgbe_channel *channel)
@@ -1138,26 +1168,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
unsigned int start_index = ring->cur;
- unsigned int rx_coalesce, rx_frames;
unsigned int i;
DBGPR("-->rx_desc_init\n");
- rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
- rx_frames = pdata->rx_frames;
-
/* Initialize all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- /* Set interrupt on completion bit as appropriate */
- if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
- rdata->interrupt = 0;
- else
- rdata->interrupt = 1;
-
/* Initialize Rx descriptor */
- xgbe_rx_desc_reset(rdata);
+ xgbe_rx_desc_reset(pdata, rdata, i);
}
/* Update the total number of Rx descriptors */
@@ -1358,18 +1378,20 @@ static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring_data *rdata;
+ /* Make sure everything is written before the register write */
+ wmb();
+
/* Issue a poll command to Tx DMA by writing address
* of next immediate free descriptor */
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
lower_32_bits(rdata->rdesc_dma));
- /* Start the Tx coalescing timer */
+ /* Start the Tx timer */
if (pdata->tx_usecs && !channel->tx_timer_active) {
channel->tx_timer_active = 1;
- hrtimer_start(&channel->tx_timer,
- ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
- HRTIMER_MODE_REL);
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
}
ring->tx.xmit_more = 0;
@@ -1565,7 +1587,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
* is written to the descriptor(s) before setting the OWN bit
* for the first descriptor
*/
- wmb();
+ dma_wmb();
/* Set OWN bit for the first descriptor */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
@@ -1577,7 +1599,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
#endif
/* Make sure ownership is written to the descriptor */
- wmb();
+ dma_wmb();
ring->cur = cur_index + 1;
if (!packet->skb->xmit_more ||
@@ -1613,7 +1635,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
return 1;
/* Make sure descriptor fields are read after reading the OWN bit */
- rmb();
+ dma_rmb();
#ifdef XGMAC_ENABLE_RX_DESC_DUMP
xgbe_dump_rx_desc(ring, rdesc, ring->cur);
@@ -2004,7 +2026,8 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
- netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
+ netdev_notice(pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
pdata->tx_q_count, ((fifo_size + 1) * 256));
}
@@ -2019,7 +2042,8 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
- netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
+ netdev_notice(pdata->netdev,
+ "%d Rx hardware queues, %d byte fifo per queue\n",
pdata->rx_q_count, ((fifo_size + 1) * 256));
}
@@ -2800,6 +2824,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
* Initialize MAC related features
*/
xgbe_config_mac_address(pdata);
+ xgbe_config_rx_mode(pdata);
xgbe_config_jumbo_enable(pdata);
xgbe_config_flow_control(pdata);
xgbe_config_mac_speed(pdata);
@@ -2819,10 +2844,8 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->tx_complete = xgbe_tx_complete;
- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
hw_if->set_mac_address = xgbe_set_mac_address;
+ hw_if->config_rx_mode = xgbe_config_rx_mode;
hw_if->enable_rx_csum = xgbe_enable_rx_csum;
hw_if->disable_rx_csum = xgbe_disable_rx_csum;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 885b02b5be07..db84ddcfec84 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -129,7 +129,6 @@
static int xgbe_one_poll(struct napi_struct *, int);
static int xgbe_all_poll(struct napi_struct *, int);
-static void xgbe_set_rx_mode(struct net_device *);
static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
{
@@ -411,11 +410,9 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
+static void xgbe_tx_timer(unsigned long data)
{
- struct xgbe_channel *channel = container_of(timer,
- struct xgbe_channel,
- tx_timer);
+ struct xgbe_channel *channel = (struct xgbe_channel *)data;
struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
@@ -437,8 +434,6 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
channel->tx_timer_active = 0;
DBGPR("<--xgbe_tx_timer\n");
-
- return HRTIMER_NORESTART;
}
static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
@@ -454,9 +449,8 @@ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
break;
DBGPR(" %s adding tx timer\n", channel->name);
- hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- channel->tx_timer.function = xgbe_tx_timer;
+ setup_timer(&channel->tx_timer, xgbe_tx_timer,
+ (unsigned long)channel);
}
DBGPR("<--xgbe_init_tx_timers\n");
@@ -475,8 +469,7 @@ static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
break;
DBGPR(" %s deleting tx timer\n", channel->name);
- channel->tx_timer_active = 0;
- hrtimer_cancel(&channel->tx_timer);
+ del_timer_sync(&channel->tx_timer);
}
DBGPR("<--xgbe_stop_tx_timers\n");
@@ -519,6 +512,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
RXFIFOSIZE);
hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
TXFIFOSIZE);
+ hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
@@ -553,6 +547,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
break;
}
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
/* The Queue, Channel and TC counts are zero based so increment them
* to get the actual number
*/
@@ -692,6 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_init_rx_coalesce\n");
pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
hw_if->config_rx_coalesce(pdata);
@@ -941,8 +951,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_start\n");
- xgbe_set_rx_mode(netdev);
-
hw_if->init(pdata);
phy_start(pdata->phydev);
@@ -1522,17 +1530,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int pr_mode, am_mode;
DBGPR("-->xgbe_set_rx_mode\n");
- pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
- am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
-
- hw_if->set_promiscuous_mode(pdata, pr_mode);
- hw_if->set_all_multicast_mode(pdata, am_mode);
-
- hw_if->add_mac_addresses(pdata);
+ hw_if->config_rx_mode(pdata);
DBGPR("<--xgbe_set_rx_mode\n");
}
@@ -1599,6 +1600,14 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
return 0;
}
+static void xgbe_tx_timeout(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ netdev_warn(netdev, "tx timeout, device restarting\n");
+ schedule_work(&pdata->restart_work);
+}
+
static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *s)
{
@@ -1763,6 +1772,7 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = xgbe_ioctl,
.ndo_change_mtu = xgbe_change_mtu,
+ .ndo_tx_timeout = xgbe_tx_timeout,
.ndo_get_stats64 = xgbe_get_stats64,
.ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
@@ -1795,11 +1805,14 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
if (desc_if->map_rx_buffer(pdata, ring, rdata))
break;
- hw_if->rx_desc_reset(rdata);
+ hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
ring->dirty++;
}
+ /* Make sure everything is written before the register write */
+ wmb();
+
/* Update the Rx Tail Pointer Register with address of
* the last cleaned entry */
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
@@ -1807,16 +1820,15 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
lower_32_bits(rdata->rdesc_dma));
}
-static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
struct xgbe_ring_data *rdata,
unsigned int *len)
{
- struct net_device *netdev = pdata->netdev;
struct sk_buff *skb;
u8 *packet;
unsigned int copy_len;
- skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
+ skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
if (!skb)
return NULL;
@@ -1863,7 +1875,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
/* Make sure descriptor fields are read after reading the OWN
* bit */
- rmb();
+ dma_rmb();
#ifdef XGMAC_ENABLE_TX_DESC_DUMP
xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
@@ -1986,7 +1998,7 @@ read_again:
rdata->rx.hdr.dma_len,
DMA_FROM_DEVICE);
- skb = xgbe_create_skb(pdata, rdata, &put_len);
+ skb = xgbe_create_skb(napi, rdata, &put_len);
if (!skb) {
error = 1;
goto skip_data;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index ebf489351555..5f149e8ee20f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -291,7 +291,6 @@ static int xgbe_get_settings(struct net_device *netdev,
return -ENODEV;
ret = phy_ethtool_gset(pdata->phydev, cmd);
- cmd->transceiver = XCVR_EXTERNAL;
DBGPR("<--xgbe_get_settings\n");
@@ -378,18 +377,14 @@ static int xgbe_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int riwt;
DBGPR("-->xgbe_get_coalesce\n");
memset(ec, 0, sizeof(struct ethtool_coalesce));
- riwt = pdata->rx_riwt;
- ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
+ ec->rx_coalesce_usecs = pdata->rx_usecs;
ec->rx_max_coalesced_frames = pdata->rx_frames;
- ec->tx_coalesce_usecs = pdata->tx_usecs;
ec->tx_max_coalesced_frames = pdata->tx_frames;
DBGPR("<--xgbe_get_coalesce\n");
@@ -403,13 +398,14 @@ static int xgbe_set_coalesce(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int rx_frames, rx_riwt, rx_usecs;
- unsigned int tx_frames, tx_usecs;
+ unsigned int tx_frames;
DBGPR("-->xgbe_set_coalesce\n");
/* Check for not supported parameters */
if ((ec->rx_coalesce_usecs_irq) ||
(ec->rx_max_coalesced_frames_irq) ||
+ (ec->tx_coalesce_usecs) ||
(ec->tx_coalesce_usecs_irq) ||
(ec->tx_max_coalesced_frames_irq) ||
(ec->stats_block_coalesce_usecs) ||
@@ -428,28 +424,18 @@ static int xgbe_set_coalesce(struct net_device *netdev,
(ec->rate_sample_interval))
return -EOPNOTSUPP;
- /* Can only change rx-frames when interface is down (see
- * rx_descriptor_init in xgbe-dev.c)
- */
- rx_frames = pdata->rx_frames;
- if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
- netdev_alert(netdev,
- "interface must be down to change rx-frames\n");
- return -EINVAL;
- }
-
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
+ rx_usecs = ec->rx_coalesce_usecs;
rx_frames = ec->rx_max_coalesced_frames;
/* Use smallest possible value if conversion resulted in zero */
- if (ec->rx_coalesce_usecs && !rx_riwt)
+ if (rx_usecs && !rx_riwt)
rx_riwt = 1;
/* Check the bounds of values for Rx */
if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
- rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
- rx_usecs);
+ hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
return -EINVAL;
}
if (rx_frames > pdata->rx_desc_count) {
@@ -458,7 +444,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
- tx_usecs = ec->tx_coalesce_usecs;
tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */
@@ -469,10 +454,10 @@ static int xgbe_set_coalesce(struct net_device *netdev,
}
pdata->rx_riwt = rx_riwt;
+ pdata->rx_usecs = rx_usecs;
pdata->rx_frames = rx_frames;
hw_if->config_rx_coalesce(pdata);
- pdata->tx_usecs = tx_usecs;
pdata->tx_frames = tx_frames;
hw_if->config_tx_coalesce(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 32dd65137051..714905384900 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -374,15 +374,6 @@ static int xgbe_probe(struct platform_device *pdev)
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
}
- /* Set the DMA mask */
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed\n");
- goto err_io;
- }
-
/* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
@@ -409,6 +400,16 @@ static int xgbe_probe(struct platform_device *pdev)
/* Set default configuration data */
xgbe_default_config(pdata);
+ /* Set the DMA mask */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(pdata->hw_feat.dma_width));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed\n");
+ goto err_io;
+ }
+
/* Calculate the number of Tx and Rx rings to be created
* -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
* the number of Tx queues to the number of Tx channels
@@ -490,6 +491,9 @@ static int xgbe_probe(struct platform_device *pdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* Use default watchdog timeout */
+ netdev->watchdog_timeo = 0;
+
xgbe_init_rx_coalesce(pdata);
xgbe_init_tx_coalesce(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index f326178ef376..b03e4f58d02e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -179,7 +179,7 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
@@ -193,12 +193,13 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
- *ts = ns_to_timespec(nsec);
+ *ts = ns_to_timespec64(nsec);
return 0;
}
-static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
+static int xgbe_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
@@ -206,7 +207,7 @@ static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
unsigned long flags;
u64 nsec;
- nsec = timespec_to_ns(ts);
+ nsec = timespec64_to_ns(ts);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
@@ -236,8 +237,8 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
info->max_adj = pdata->ptpclk_rate;
info->adjfreq = xgbe_adjfreq;
info->adjtime = xgbe_adjtime;
- info->gettime = xgbe_gettime;
- info->settime = xgbe_settime;
+ info->gettime64 = xgbe_gettime;
+ info->settime64 = xgbe_settime;
info->enable = xgbe_enable;
clock = ptp_clock_register(info, pdata->dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 13e8f95c077c..e62dfa2deab6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -222,7 +222,7 @@
((_idx) & ((_ring)->rdesc_count - 1)))
/* Default coalescing parameters */
-#define XGMAC_INIT_DMA_TX_USECS 50
+#define XGMAC_INIT_DMA_TX_USECS 1000
#define XGMAC_INIT_DMA_TX_FRAMES 25
#define XGMAC_MAX_DMA_RIWT 0xff
@@ -325,8 +325,6 @@ struct xgbe_ring_data {
struct xgbe_tx_ring_data tx; /* Tx-related data */
struct xgbe_rx_ring_data rx; /* Rx-related data */
- unsigned int interrupt; /* Interrupt indicator */
-
unsigned int mapped_as_page;
/* Incomplete receive save location. If the budget is exhausted
@@ -410,7 +408,7 @@ struct xgbe_channel {
unsigned int saved_ier;
unsigned int tx_timer_active;
- struct hrtimer tx_timer;
+ struct timer_list tx_timer;
struct xgbe_ring *tx_ring;
struct xgbe_ring *rx_ring;
@@ -497,10 +495,8 @@ struct xgbe_mmc_stats {
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
- int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
- int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
- int (*add_mac_addresses)(struct xgbe_prv_data *);
int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+ int (*config_rx_mode)(struct xgbe_prv_data *);
int (*enable_rx_csum)(struct xgbe_prv_data *);
int (*disable_rx_csum)(struct xgbe_prv_data *);
@@ -536,8 +532,9 @@ struct xgbe_hw_if {
int (*dev_read)(struct xgbe_channel *);
void (*tx_desc_init)(struct xgbe_channel *);
void (*rx_desc_init)(struct xgbe_channel *);
- void (*rx_desc_reset)(struct xgbe_ring_data *);
void (*tx_desc_reset)(struct xgbe_ring_data *);
+ void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *,
+ unsigned int);
int (*is_last_desc)(struct xgbe_ring_desc *);
int (*is_context_desc)(struct xgbe_ring_desc *);
void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
@@ -620,7 +617,7 @@ struct xgbe_hw_features {
unsigned int mgk; /* PMT magic packet */
unsigned int mmc; /* RMON module */
unsigned int aoe; /* ARP Offload */
- unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
unsigned int eee; /* Energy Efficient Ethernet */
unsigned int tx_coe; /* Tx Checksum Offload */
unsigned int rx_coe; /* Rx Checksum Offload */
@@ -632,6 +629,7 @@ struct xgbe_hw_features {
unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
unsigned int dcb; /* DCB Feature */
unsigned int sph; /* Split Header Feature */
unsigned int tso; /* TCP Segmentation Offload */
@@ -715,6 +713,7 @@ struct xgbe_prv_data {
/* Rx coalescing settings */
unsigned int rx_riwt;
+ unsigned int rx_usecs;
unsigned int rx_frames;
/* Current Rx buffer size */
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index f4054d242f3c..19e38afbc5ee 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,6 +1,7 @@
config NET_XGENE
tristate "APM X-Gene SoC Ethernet Driver"
depends on HAS_DMA
+ depends on ARCH_XGENE || COMPILE_TEST
select PHYLIB
help
This is the Ethernet driver for the on-chip ethernet interface on the
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index ec45f3256f0e..d9bc89d69266 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -97,6 +97,8 @@ enum xgene_enet_rm {
#define QCOHERENT BIT(4)
#define RECOMBBUF BIT(27)
+#define MAC_OFFSET 0x30
+
#define BLOCK_ETH_CSR_OFFSET 0x2000
#define BLOCK_ETH_RING_IF_OFFSET 0x9000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 635a83be7e5e..40d3530d7f30 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -428,13 +428,23 @@ static int xgene_enet_register_irq(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
+ struct xgene_enet_desc_ring *ring;
int ret;
- ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq,
- IRQF_SHARED, ndev->name, pdata->rx_ring);
- if (ret) {
- netdev_err(ndev, "rx%d interrupt request failed\n",
- pdata->rx_ring->irq);
+ ring = pdata->rx_ring;
+ ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
+ IRQF_SHARED, ring->irq_name, ring);
+ if (ret)
+ netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
+
+ if (pdata->cq_cnt) {
+ ring = pdata->tx_ring->cp_ring;
+ ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
+ IRQF_SHARED, ring->irq_name, ring);
+ if (ret) {
+ netdev_err(ndev, "Failed to request irq %s\n",
+ ring->irq_name);
+ }
}
return ret;
@@ -448,6 +458,37 @@ static void xgene_enet_free_irq(struct net_device *ndev)
pdata = netdev_priv(ndev);
dev = ndev_to_dev(ndev);
devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
+
+ if (pdata->cq_cnt) {
+ devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
+ pdata->tx_ring->cp_ring);
+ }
+}
+
+static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
+{
+ struct napi_struct *napi;
+
+ napi = &pdata->rx_ring->napi;
+ napi_enable(napi);
+
+ if (pdata->cq_cnt) {
+ napi = &pdata->tx_ring->cp_ring->napi;
+ napi_enable(napi);
+ }
+}
+
+static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
+{
+ struct napi_struct *napi;
+
+ napi = &pdata->rx_ring->napi;
+ napi_disable(napi);
+
+ if (pdata->cq_cnt) {
+ napi = &pdata->tx_ring->cp_ring->napi;
+ napi_disable(napi);
+ }
}
static int xgene_enet_open(struct net_device *ndev)
@@ -462,7 +503,7 @@ static int xgene_enet_open(struct net_device *ndev)
ret = xgene_enet_register_irq(ndev);
if (ret)
return ret;
- napi_enable(&pdata->rx_ring->napi);
+ xgene_enet_napi_enable(pdata);
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
phy_start(pdata->phy_dev);
@@ -486,7 +527,7 @@ static int xgene_enet_close(struct net_device *ndev)
else
cancel_delayed_work_sync(&pdata->link_work);
- napi_disable(&pdata->rx_ring->napi);
+ xgene_enet_napi_disable(pdata);
xgene_enet_free_irq(ndev);
xgene_enet_process_ring(pdata->rx_ring, -1);
@@ -580,6 +621,8 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
if (ring) {
if (ring->cp_ring && ring->cp_ring->cp_skb)
devm_kfree(dev, ring->cp_ring->cp_skb);
+ if (ring->cp_ring && pdata->cq_cnt)
+ xgene_enet_free_desc_ring(ring->cp_ring);
xgene_enet_free_desc_ring(ring);
}
@@ -645,9 +688,11 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
- u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
- u8 bp_bufnum = START_BP_BUFNUM;
- u16 ring_id, ring_num = START_RING_NUM;
+ u8 cpu_bufnum = pdata->cpu_bufnum;
+ u8 eth_bufnum = pdata->eth_bufnum;
+ u8 bp_bufnum = pdata->bp_bufnum;
+ u16 ring_num = pdata->ring_num;
+ u16 ring_id;
int ret;
/* allocate rx descriptor ring */
@@ -671,6 +716,12 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
rx_ring->nbufpool = NUM_BUFPOOL;
rx_ring->buf_pool = buf_pool;
rx_ring->irq = pdata->rx_irq;
+ if (!pdata->cq_cnt) {
+ snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
+ ndev->name);
+ } else {
+ snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
+ }
buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
sizeof(struct sk_buff *), GFP_KERNEL);
if (!buf_pool->rx_skb) {
@@ -692,7 +743,22 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
}
pdata->tx_ring = tx_ring;
- cp_ring = pdata->rx_ring;
+ if (!pdata->cq_cnt) {
+ cp_ring = pdata->rx_ring;
+ } else {
+ /* allocate tx completion descriptor ring */
+ ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
+ cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+ RING_CFGSIZE_16KB,
+ ring_id);
+ if (!cp_ring) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ cp_ring->irq = pdata->txc_irq;
+ snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
+ }
+
cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
sizeof(struct sk_buff *), GFP_KERNEL);
if (!cp_ring->cp_skb) {
@@ -752,6 +818,22 @@ static const struct net_device_ops xgene_ndev_ops = {
.ndo_set_mac_address = xgene_enet_set_mac_address,
};
+static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
+{
+ u32 id = 0;
+ int ret;
+
+ ret = device_property_read_u32(dev, "port-id", &id);
+ if (!ret && id > 1) {
+ dev_err(dev, "Incorrect port-id specified\n");
+ return -ENODEV;
+ }
+
+ pdata->port_id = id;
+
+ return 0;
+}
+
static int xgene_get_mac_address(struct device *dev,
unsigned char *addr)
{
@@ -835,13 +917,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
return -ENOMEM;
}
- ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- dev_err(dev, "Unable to get ENET Rx IRQ\n");
- ret = ret ? : -ENXIO;
+ ret = xgene_get_port_id(dev, pdata);
+ if (ret)
return ret;
- }
- pdata->rx_irq = ret;
if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
eth_hw_addr_random(ndev);
@@ -860,19 +938,37 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
return -ENODEV;
}
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(dev, "Unable to get ENET Rx IRQ\n");
+ ret = ret ? : -ENXIO;
+ return ret;
+ }
+ pdata->rx_irq = ret;
+
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
+ ret = platform_get_irq(pdev, 1);
+ if (ret <= 0) {
+ dev_err(dev, "Unable to get ENET Tx completion IRQ\n");
+ ret = ret ? : -ENXIO;
+ return ret;
+ }
+ pdata->txc_irq = ret;
+ }
+
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
/* Firmware may have set up the clock already. */
pdata->clk = NULL;
}
- base_addr = pdata->base_addr;
+ base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
- pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
+ pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
@@ -928,13 +1024,60 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
pdata->mac_ops = &xgene_sgmac_ops;
pdata->port_ops = &xgene_sgport_ops;
pdata->rm = RM1;
+ pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
break;
default:
pdata->mac_ops = &xgene_xgmac_ops;
pdata->port_ops = &xgene_xgport_ops;
pdata->rm = RM0;
+ pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
break;
}
+
+ switch (pdata->port_id) {
+ case 0:
+ pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+ pdata->eth_bufnum = START_ETH_BUFNUM_0;
+ pdata->bp_bufnum = START_BP_BUFNUM_0;
+ pdata->ring_num = START_RING_NUM_0;
+ break;
+ case 1:
+ pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = START_BP_BUFNUM_1;
+ pdata->ring_num = START_RING_NUM_1;
+ break;
+ default:
+ break;
+ }
+
+}
+
+static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
+{
+ struct napi_struct *napi;
+
+ napi = &pdata->rx_ring->napi;
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
+
+ if (pdata->cq_cnt) {
+ napi = &pdata->tx_ring->cp_ring->napi;
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
+ NAPI_POLL_WEIGHT);
+ }
+}
+
+static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
+{
+ struct napi_struct *napi;
+
+ napi = &pdata->rx_ring->napi;
+ netif_napi_del(napi);
+
+ if (pdata->cq_cnt) {
+ napi = &pdata->tx_ring->cp_ring->napi;
+ netif_napi_del(napi);
+ }
}
static int xgene_enet_probe(struct platform_device *pdev)
@@ -942,7 +1085,6 @@ static int xgene_enet_probe(struct platform_device *pdev)
struct net_device *ndev;
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
- struct napi_struct *napi;
struct xgene_mac_ops *mac_ops;
int ret;
@@ -984,8 +1126,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
if (ret)
goto err;
- napi = &pdata->rx_ring->napi;
- netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
+ xgene_enet_napi_add(pdata);
mac_ops = pdata->mac_ops;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
ret = xgene_enet_mdio_config(pdata);
@@ -1012,7 +1153,7 @@ static int xgene_enet_remove(struct platform_device *pdev)
mac_ops->rx_disable(pdata);
mac_ops->tx_disable(pdata);
- netif_napi_del(&pdata->rx_ring->napi);
+ xgene_enet_napi_del(pdata);
xgene_enet_mdio_remove(pdata);
xgene_enet_delete_desc_rings(pdata);
unregister_netdev(ndev);
@@ -1033,7 +1174,7 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
#endif
#ifdef CONFIG_OF
-static struct of_device_id xgene_enet_of_match[] = {
+static const struct of_device_id xgene_enet_of_match[] = {
{.compatible = "apm,xgene-enet",},
{.compatible = "apm,xgene1-sgenet",},
{.compatible = "apm,xgene1-xgenet",},
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index c2d465c3db66..8f3d232b09bc 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -41,9 +41,18 @@
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
-#define START_ETH_BUFNUM 2
-#define START_BP_BUFNUM 0x22
-#define START_RING_NUM 8
+
+#define START_CPU_BUFNUM_0 0
+#define START_ETH_BUFNUM_0 2
+#define START_BP_BUFNUM_0 0x22
+#define START_RING_NUM_0 8
+#define START_CPU_BUFNUM_1 12
+#define START_ETH_BUFNUM_1 10
+#define START_BP_BUFNUM_1 0x2A
+#define START_RING_NUM_1 264
+
+#define IRQ_ID_SIZE 16
+#define XGENE_MAX_TXC_RINGS 1
#define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
@@ -57,6 +66,7 @@ struct xgene_enet_desc_ring {
u16 tail;
u16 slots;
u16 irq;
+ char irq_name[IRQ_ID_SIZE];
u32 size;
u32 state[NUM_RING_CONFIG];
void __iomem *cmd_base;
@@ -111,6 +121,8 @@ struct xgene_enet_pdata {
u32 cp_qcnt_hi;
u32 cp_qcnt_low;
u32 rx_irq;
+ u32 txc_irq;
+ u8 cq_cnt;
void __iomem *eth_csr_addr;
void __iomem *eth_ring_if_addr;
void __iomem *eth_diag_csr_addr;
@@ -125,6 +137,11 @@ struct xgene_enet_pdata {
struct xgene_mac_ops *mac_ops;
struct xgene_port_ops *port_ops;
struct delayed_work link_work;
+ u32 port_id;
+ u8 cpu_bufnum;
+ u8 eth_bufnum;
+ u8 bp_bufnum;
+ u16 ring_num;
};
struct xgene_indirect_ctl {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f5d4f68c288c..f27fb6f2a93b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -226,6 +226,7 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
static void xgene_sgmac_init(struct xgene_enet_pdata *p)
{
u32 data, loop = 10;
+ u32 offset = p->port_id * 4;
xgene_sgmac_reset(p);
@@ -272,9 +273,9 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
/* Bypass traffic gating */
- xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
+ xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
- xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0);
+ xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
}
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -330,13 +331,14 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
u32 dst_ring_num, u16 bufpool_id)
{
u32 data, fpsel;
+ u32 offset = p->port_id * MAC_OFFSET;
data = CFG_CLE_BYPASS_EN0;
- xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data);
+ xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
- xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data);
+ xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
}
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index daae0e016253..a65d7a60f116 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -483,8 +483,8 @@ static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
bmwrite(dev, TXCFG, (config & ~TxMACEnable));
bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
/* disable rx and tx dma */
- st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
- st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
/* free some skb's */
for (i=0; i<N_RX_RING; i++) {
if (bp->rx_bufs[i] != NULL) {
@@ -699,8 +699,8 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
while (1) {
cp = &bp->rx_cmds[i];
- stat = ld_le16(&cp->xfer_status);
- residual = ld_le16(&cp->res_count);
+ stat = le16_to_cpu(cp->xfer_status);
+ residual = le16_to_cpu(cp->res_count);
if ((stat & ACTIVE) == 0)
break;
nb = RX_BUFLEN - residual - 2;
@@ -728,8 +728,8 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
skb_reserve(bp->rx_bufs[i], 2);
}
bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
- st_le16(&cp->res_count, 0);
- st_le16(&cp->xfer_status, 0);
+ cp->res_count = cpu_to_le16(0);
+ cp->xfer_status = cpu_to_le16(0);
last = i;
if (++i >= N_RX_RING) i = 0;
}
@@ -769,7 +769,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
while (1) {
cp = &bp->tx_cmds[bp->tx_empty];
- stat = ld_le16(&cp->xfer_status);
+ stat = le16_to_cpu(cp->xfer_status);
if (txintcount < 10) {
XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
}
@@ -1411,8 +1411,8 @@ static int bmac_close(struct net_device *dev)
bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
/* disable rx and tx dma */
- st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
- st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
/* free some skb's */
XXDEBUG(("bmac: free rx bufs\n"));
@@ -1493,7 +1493,7 @@ static void bmac_tx_timeout(unsigned long data)
cp = &bp->tx_cmds[bp->tx_empty];
/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
-/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
+/* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */
/* mb->pr, mb->xmtfs, mb->fifofc)); */
/* turn off both tx and rx and reset the chip */
@@ -1506,7 +1506,7 @@ static void bmac_tx_timeout(unsigned long data)
bmac_enable_and_reset_chip(dev);
/* restart rx dma */
- cp = bus_to_virt(ld_le32(&rd->cmdptr));
+ cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
out_le16(&cp->xfer_status, 0);
out_le32(&rd->cmdptr, virt_to_bus(cp));
@@ -1553,10 +1553,10 @@ static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
ip = (int*)(cp+i);
printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
- ld_le32(ip+0),
- ld_le32(ip+1),
- ld_le32(ip+2),
- ld_le32(ip+3));
+ le32_to_cpup(ip+0),
+ le32_to_cpup(ip+1),
+ le32_to_cpup(ip+2),
+ le32_to_cpup(ip+3));
}
}
@@ -1621,7 +1621,7 @@ static int bmac_remove(struct macio_dev *mdev)
return 0;
}
-static struct of_device_id bmac_match[] =
+static const struct of_device_id bmac_match[] =
{
{
.name = "bmac",
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 842fe7684904..e58a7c73766e 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -310,7 +310,7 @@ static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
* way on some machines.
*/
for (i = 200; i > 0; --i)
- if (ld_le32(&dma->control) & RUN)
+ if (le32_to_cpu(dma->control) & RUN)
udelay(1);
}
@@ -452,21 +452,21 @@ static int mace_open(struct net_device *dev)
data = skb->data;
}
mp->rx_bufs[i] = skb;
- st_le16(&cp->req_count, RX_BUFLEN);
- st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
- st_le32(&cp->phy_addr, virt_to_bus(data));
+ cp->req_count = cpu_to_le16(RX_BUFLEN);
+ cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS);
+ cp->phy_addr = cpu_to_le32(virt_to_bus(data));
cp->xfer_status = 0;
++cp;
}
mp->rx_bufs[i] = NULL;
- st_le16(&cp->command, DBDMA_STOP);
+ cp->command = cpu_to_le16(DBDMA_STOP);
mp->rx_fill = i;
mp->rx_empty = 0;
/* Put a branch back to the beginning of the receive command list */
++cp;
- st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
- st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds));
+ cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
+ cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds));
/* start rx dma */
out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
@@ -475,8 +475,8 @@ static int mace_open(struct net_device *dev)
/* put a branch at the end of the tx command list */
cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
- st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
- st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds));
+ cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
+ cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds));
/* reset tx dma */
out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
@@ -507,8 +507,8 @@ static int mace_close(struct net_device *dev)
out_8(&mb->imr, 0xff); /* disable all intrs */
/* disable rx and tx dma */
- st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
- st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
+ rd->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
+ td->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
mace_clean_rings(mp);
@@ -558,8 +558,8 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
}
mp->tx_bufs[fill] = skb;
cp = mp->tx_cmds + NCMDS_TX * fill;
- st_le16(&cp->req_count, len);
- st_le32(&cp->phy_addr, virt_to_bus(skb->data));
+ cp->req_count = cpu_to_le16(len);
+ cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data));
np = mp->tx_cmds + NCMDS_TX * next;
out_le16(&np->command, DBDMA_STOP);
@@ -691,7 +691,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
out_8(&mb->xmtfc, AUTO_PAD_XMIT);
continue;
}
- dstat = ld_le32(&td->status);
+ dstat = le32_to_cpu(td->status);
/* stop DMA controller */
out_le32(&td->control, RUN << 16);
/*
@@ -720,11 +720,11 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
mace_reset(dev);
/*
* XXX mace likes to hang the machine after a xmtfs error.
- * This is hard to reproduce, reseting *may* help
+ * This is hard to reproduce, resetting *may* help
*/
}
cp = mp->tx_cmds + NCMDS_TX * i;
- stat = ld_le16(&cp->xfer_status);
+ stat = le16_to_cpu(cp->xfer_status);
if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
/*
* Check whether there were in fact 2 bytes written to
@@ -830,7 +830,7 @@ static void mace_tx_timeout(unsigned long data)
mace_reset(dev);
/* restart rx dma */
- cp = bus_to_virt(ld_le32(&rd->cmdptr));
+ cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
dbdma_reset(rd);
out_le16(&cp->xfer_status, 0);
out_le32(&rd->cmdptr, virt_to_bus(cp));
@@ -889,20 +889,20 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
spin_lock_irqsave(&mp->lock, flags);
for (i = mp->rx_empty; i != mp->rx_fill; ) {
cp = mp->rx_cmds + i;
- stat = ld_le16(&cp->xfer_status);
+ stat = le16_to_cpu(cp->xfer_status);
if ((stat & ACTIVE) == 0) {
next = i + 1;
if (next >= N_RX_RING)
next = 0;
np = mp->rx_cmds + next;
if (next != mp->rx_fill &&
- (ld_le16(&np->xfer_status) & ACTIVE) != 0) {
+ (le16_to_cpu(np->xfer_status) & ACTIVE) != 0) {
printk(KERN_DEBUG "mace: lost a status word\n");
++mace_lost_status;
} else
break;
}
- nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);
+ nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count);
out_le16(&cp->command, DBDMA_STOP);
/* got a packet, have a look at it */
skb = mp->rx_bufs[i];
@@ -962,13 +962,13 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
mp->rx_bufs[i] = skb;
}
}
- st_le16(&cp->req_count, RX_BUFLEN);
+ cp->req_count = cpu_to_le16(RX_BUFLEN);
data = skb? skb->data: dummy_buf;
- st_le32(&cp->phy_addr, virt_to_bus(data));
+ cp->phy_addr = cpu_to_le32(virt_to_bus(data));
out_le16(&cp->xfer_status, 0);
out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
#if 0
- if ((ld_le32(&rd->status) & ACTIVE) != 0) {
+ if ((le32_to_cpu(rd->status) & ACTIVE) != 0) {
out_le32(&rd->control, (PAUSE << 16) | PAUSE);
while ((in_le32(&rd->status) & ACTIVE) != 0)
;
@@ -984,7 +984,7 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct of_device_id mace_match[] =
+static const struct of_device_id mace_match[] =
{
{
.name = "mace",
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 6e66127e6abf..89914ca17a49 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -575,7 +575,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
mace_reset(dev);
/*
* XXX mace likes to hang the machine after a xmtfs error.
- * This is hard to reproduce, reseting *may* help
+ * This is hard to reproduce, resetting *may* help
*/
}
/* dma should have finished */
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 8e262e2b39b6..dea29ee24da4 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -25,8 +25,7 @@ config ARC_EMAC_CORE
config ARC_EMAC
tristate "ARC EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ
- depends on OF_NET
+ depends on OF_IRQ && OF_NET && HAS_DMA
---help---
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -35,7 +34,7 @@ config ARC_EMAC
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && REGULATOR
+ depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA
---help---
Support for Rockchip RK3066/RK3188 EMAC ethernet controllers.
This selects Rockchip SoC glue layer support for the
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 52fdfe225978..a8b80c56ac25 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -307,7 +307,7 @@ void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel)
/*
* atl1c_read_phy_core
- * core funtion to read register in PHY via MDIO control regsiter.
+ * core function to read register in PHY via MDIO control regsiter.
* ext: extension register (see IEEE 802.3)
* dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
* reg: reg to read
@@ -356,7 +356,7 @@ int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
/*
* atl1c_write_phy_core
- * core funtion to write to register in PHY via MDIO control regsiter.
+ * core function to write to register in PHY via MDIO control register.
* ext: extension register (see IEEE 802.3)
* dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
* reg: reg to write
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 587f63e87588..932bd1862f7a 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -752,7 +752,7 @@ static void atl1c_patch_assign(struct atl1c_hw *hw)
if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
hw->revision_id == L2CB_V21) {
- /* config acess mode */
+ /* config access mode */
pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
REG_PCIE_DEV_MISC_CTRL);
pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
index 74df16aef793..88a6271de5bc 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
@@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
#define TWSI_CTRL_SW_LDSTART 0x800
#define TWSI_CTRL_HW_LDSTART 0x1000
-#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F
+#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
#define TWSI_CTRL_LD_EXIST 0x400000
#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 41a3c9804427..a6f9142b9048 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -71,12 +71,12 @@ config BCMGENET
Broadcom BCM7xxx Set Top Box family chipset.
config BNX2
- tristate "QLogic NetXtremeII support"
+ tristate "QLogic bnx2 support"
depends on PCI
select CRC32
select FW_LOADER
---help---
- This driver supports QLogic NetXtremeII gigabit Ethernet cards.
+ This driver supports QLogic bnx2 gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called bnx2. This is recommended.
@@ -87,8 +87,8 @@ config CNIC
select BNX2
select UIO
---help---
- This driver supports offload features of QLogic NetXtremeII
- gigabit Ethernet cards.
+ This driver supports offload features of QLogic bnx2 gigabit
+ Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called cnic. This is recommended.
@@ -142,7 +142,7 @@ config BNX2X_SRIOV
config BGMAC
tristate "BCMA bus GBit core support"
- depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
+ depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
select PHYLIB
---help---
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index bd5916a60cb5..77363d680532 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -400,7 +400,7 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
}
#ifdef CONFIG_BCM47XX
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
char buf[20];
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 7e3d87a88c76..e2c043eabbf3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters {
u32 jbr; /* RO # of xmited jabber count*/
u32 bytes; /* RO # of xmited byte count */
u32 pok; /* RO # of xmited good pkt */
- u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+ u32 uc; /* RO (0x4f0) # of xmited unicast pkt */
};
struct bcm_sysport_mib {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0469f72c6e7e..21e3c38c7c75 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -14,9 +14,10 @@
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
static const struct bcma_device_id bgmac_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
@@ -114,54 +115,89 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
}
+static void
+bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+ int i, int len, u32 ctl0)
+{
+ struct bgmac_slot_info *slot;
+ struct bgmac_dma_desc *dma_desc;
+ u32 ctl1;
+
+ if (i == BGMAC_TX_RING_SLOTS - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+
+ ctl1 = len & BGMAC_DESC_CTL1_LEN;
+
+ slot = &ring->slots[i];
+ dma_desc = &ring->cpu_base[i];
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+}
+
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
struct sk_buff *skb)
{
struct device *dma_dev = bgmac->core->dma_dev;
struct net_device *net_dev = bgmac->net_dev;
- struct bgmac_dma_desc *dma_desc;
- struct bgmac_slot_info *slot;
- u32 ctl0, ctl1;
- int free_slots;
+ int index = ring->end % BGMAC_TX_RING_SLOTS;
+ struct bgmac_slot_info *slot = &ring->slots[index];
+ int nr_frags;
+ u32 flags;
+ int i;
if (skb->len > BGMAC_DESC_CTL1_LEN) {
bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
- goto err_stop_drop;
+ goto err_drop;
}
- if (ring->start <= ring->end)
- free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
- else
- free_slots = ring->start - ring->end;
- if (free_slots == 1) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ skb_checksum_help(skb);
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* ring->end - ring->start will return the number of valid slots,
+ * even when ring->end overflows
+ */
+ if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
netif_stop_queue(net_dev);
return NETDEV_TX_BUSY;
}
- slot = &ring->slots[ring->end];
- slot->skb = skb;
- slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
+ slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, slot->dma_addr)) {
- bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
- ring->mmio_base);
- goto err_stop_drop;
- }
+ if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
+ goto err_dma_head;
- ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
- if (ring->end == ring->num_slots - 1)
- ctl0 |= BGMAC_DESC_CTL0_EOT;
- ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
+ flags = BGMAC_DESC_CTL0_SOF;
+ if (!nr_frags)
+ flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
- dma_desc = ring->cpu_base;
- dma_desc += ring->end;
- dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
- dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
- dma_desc->ctl0 = cpu_to_le32(ctl0);
- dma_desc->ctl1 = cpu_to_le32(ctl1);
+ bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
+ flags = 0;
+
+ for (i = 0; i < nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
+
+ index = (index + 1) % BGMAC_TX_RING_SLOTS;
+ slot = &ring->slots[index];
+ slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
+ goto err_dma;
+
+ if (i == nr_frags - 1)
+ flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
+
+ bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
+ }
+ slot->skb = skb;
+ ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
wmb();
@@ -169,20 +205,34 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
/* Increase ring->end to point empty slot. We tell hardware the first
* slot it should *not* read.
*/
- if (++ring->end >= BGMAC_TX_RING_SLOTS)
- ring->end = 0;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
ring->index_base +
- ring->end * sizeof(struct bgmac_dma_desc));
+ (ring->end % BGMAC_TX_RING_SLOTS) *
+ sizeof(struct bgmac_dma_desc));
- /* Always keep one slot free to allow detecting bugged calls. */
- if (--free_slots == 1)
+ if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
netif_stop_queue(net_dev);
return NETDEV_TX_OK;
-err_stop_drop:
- netif_stop_queue(net_dev);
+err_dma:
+ dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ while (i > 0) {
+ int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
+ struct bgmac_slot_info *slot = &ring->slots[index];
+ u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
+ int len = ctl1 & BGMAC_DESC_CTL1_LEN;
+
+ dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
+ }
+
+err_dma_head:
+ bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
+ ring->mmio_base);
+
+err_drop:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -202,34 +252,45 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
empty_slot &= BGMAC_DMA_TX_STATDPTR;
empty_slot /= sizeof(struct bgmac_dma_desc);
- while (ring->start != empty_slot) {
- struct bgmac_slot_info *slot = &ring->slots[ring->start];
+ while (ring->start != ring->end) {
+ int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
+ struct bgmac_slot_info *slot = &ring->slots[slot_idx];
+ u32 ctl1;
+ int len;
- if (slot->skb) {
+ if (slot_idx == empty_slot)
+ break;
+
+ ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
+ len = ctl1 & BGMAC_DESC_CTL1_LEN;
+ if (ctl1 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
- dma_unmap_single(dma_dev, slot->dma_addr,
- slot->skb->len, DMA_TO_DEVICE);
- slot->dma_addr = 0;
+ dma_unmap_single(dma_dev, slot->dma_addr, len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, slot->dma_addr, len,
+ DMA_TO_DEVICE);
+ if (slot->skb) {
bytes_compl += slot->skb->len;
pkts_compl++;
/* Free memory! :) */
dev_kfree_skb(slot->skb);
slot->skb = NULL;
- } else {
- bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
- ring->start, ring->end);
}
- if (++ring->start >= BGMAC_TX_RING_SLOTS)
- ring->start = 0;
+ slot->dma_addr = 0;
+ ring->start++;
freed = true;
}
+ if (!pkts_compl)
+ return;
+
netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
- if (freed && netif_queue_stopped(bgmac->net_dev))
+ if (netif_queue_stopped(bgmac->net_dev))
netif_wake_queue(bgmac->net_dev);
}
@@ -275,43 +336,53 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
struct bgmac_slot_info *slot)
{
struct device *dma_dev = bgmac->core->dma_dev;
- struct sk_buff *skb;
dma_addr_t dma_addr;
struct bgmac_rx_header *rx;
+ void *buf;
/* Alloc skb */
- skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
- if (!skb)
+ buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
+ if (!buf)
return -ENOMEM;
/* Poison - if everything goes fine, hardware will overwrite it */
- rx = (struct bgmac_rx_header *)skb->data;
+ rx = buf + BGMAC_RX_BUF_OFFSET;
rx->len = cpu_to_le16(0xdead);
rx->flags = cpu_to_le16(0xbeef);
/* Map skb for the DMA */
- dma_addr = dma_map_single(dma_dev, skb->data,
+ dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
bgmac_err(bgmac, "DMA mapping error\n");
- dev_kfree_skb(skb);
+ put_page(virt_to_head_page(buf));
return -ENOMEM;
}
/* Update the slot */
- slot->skb = skb;
+ slot->buf = buf;
slot->dma_addr = dma_addr;
return 0;
}
+static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ dma_wmb();
+
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+ ring->index_base +
+ ring->end * sizeof(struct bgmac_dma_desc));
+}
+
static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
struct bgmac_dma_ring *ring, int desc_idx)
{
struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
u32 ctl0 = 0, ctl1 = 0;
- if (desc_idx == ring->num_slots - 1)
+ if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
ctl0 |= BGMAC_DESC_CTL0_EOT;
ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
/* Is there any BGMAC device that requires extension? */
@@ -323,6 +394,21 @@ static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
dma_desc->ctl0 = cpu_to_le32(ctl0);
dma_desc->ctl1 = cpu_to_le32(ctl1);
+
+ ring->end = desc_idx;
+}
+
+static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
+ struct bgmac_slot_info *slot)
+{
+ struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
+
+ dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+ dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
}
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
@@ -337,70 +423,62 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
end_slot &= BGMAC_DMA_RX_STATDPTR;
end_slot /= sizeof(struct bgmac_dma_desc);
- ring->end = end_slot;
-
- while (ring->start != ring->end) {
+ while (ring->start != end_slot) {
struct device *dma_dev = bgmac->core->dma_dev;
struct bgmac_slot_info *slot = &ring->slots[ring->start];
- struct sk_buff *skb = slot->skb;
- struct bgmac_rx_header *rx;
+ struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
+ struct sk_buff *skb;
+ void *buf = slot->buf;
+ dma_addr_t dma_addr = slot->dma_addr;
u16 len, flags;
- /* Unmap buffer to make it accessible to the CPU */
- dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ do {
+ /* Prepare new skb as replacement */
+ if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
+ bgmac_dma_rx_poison_buf(dma_dev, slot);
+ break;
+ }
- /* Get info from the header */
- rx = (struct bgmac_rx_header *)skb->data;
- len = le16_to_cpu(rx->len);
- flags = le16_to_cpu(rx->flags);
+ /* Unmap buffer to make it accessible to the CPU */
+ dma_unmap_single(dma_dev, dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
- do {
- dma_addr_t old_dma_addr = slot->dma_addr;
- int err;
+ /* Get info from the header */
+ len = le16_to_cpu(rx->len);
+ flags = le16_to_cpu(rx->flags);
/* Check for poison and drop or pass the packet */
if (len == 0xdead && flags == 0xbeef) {
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
ring->start);
- dma_sync_single_for_device(dma_dev,
- slot->dma_addr,
- BGMAC_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ put_page(virt_to_head_page(buf));
break;
}
- /* Omit CRC. */
- len -= ETH_FCS_LEN;
-
- /* Prepare new skb as replacement */
- err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
- if (err) {
- /* Poison the old skb */
- rx->len = cpu_to_le16(0xdead);
- rx->flags = cpu_to_le16(0xbeef);
-
- dma_sync_single_for_device(dma_dev,
- slot->dma_addr,
- BGMAC_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ if (len > BGMAC_RX_ALLOC_SIZE) {
+ bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
+ ring->start);
+ put_page(virt_to_head_page(buf));
break;
}
- bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
- /* Unmap old skb, we'll pass it to the netfif */
- dma_unmap_single(dma_dev, old_dma_addr,
- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ /* Omit CRC. */
+ len -= ETH_FCS_LEN;
- skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
- skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
+ skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
+ skb_put(skb, BGMAC_RX_FRAME_OFFSET +
+ BGMAC_RX_BUF_OFFSET + len);
+ skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
+ BGMAC_RX_BUF_OFFSET);
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, bgmac->net_dev);
- netif_receive_skb(skb);
+ napi_gro_receive(&bgmac->napi, skb);
handled++;
} while (0);
+ bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
+
if (++ring->start >= BGMAC_RX_RING_SLOTS)
ring->start = 0;
@@ -408,6 +486,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
break;
}
+ bgmac_dma_rx_update_index(bgmac, ring);
+
return handled;
}
@@ -433,40 +513,90 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac,
return false;
}
-static void bgmac_dma_ring_free(struct bgmac *bgmac,
- struct bgmac_dma_ring *ring)
+static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
{
struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_dma_desc *dma_desc = ring->cpu_base;
struct bgmac_slot_info *slot;
- int size;
int i;
- for (i = 0; i < ring->num_slots; i++) {
+ for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
+ int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
+
slot = &ring->slots[i];
- if (slot->skb) {
- if (slot->dma_addr)
- dma_unmap_single(dma_dev, slot->dma_addr,
- slot->skb->len, DMA_TO_DEVICE);
- dev_kfree_skb(slot->skb);
- }
+ dev_kfree_skb(slot->skb);
+
+ if (!slot->dma_addr)
+ continue;
+
+ if (slot->skb)
+ dma_unmap_single(dma_dev, slot->dma_addr,
+ len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, slot->dma_addr,
+ len, DMA_TO_DEVICE);
}
+}
- if (ring->cpu_base) {
- /* Free ring of descriptors */
- size = ring->num_slots * sizeof(struct bgmac_dma_desc);
- dma_free_coherent(dma_dev, size, ring->cpu_base,
- ring->dma_base);
+static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_slot_info *slot;
+ int i;
+
+ for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
+ slot = &ring->slots[i];
+ if (!slot->dma_addr)
+ continue;
+
+ dma_unmap_single(dma_dev, slot->dma_addr,
+ BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ put_page(virt_to_head_page(slot->buf));
+ slot->dma_addr = 0;
}
}
+static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring,
+ int num_slots)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ int size;
+
+ if (!ring->cpu_base)
+ return;
+
+ /* Free ring of descriptors */
+ size = num_slots * sizeof(struct bgmac_dma_desc);
+ dma_free_coherent(dma_dev, size, ring->cpu_base,
+ ring->dma_base);
+}
+
+static void bgmac_dma_cleanup(struct bgmac *bgmac)
+{
+ int i;
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+ bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+ bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
+}
+
static void bgmac_dma_free(struct bgmac *bgmac)
{
int i;
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
- bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
+ bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
+ BGMAC_TX_RING_SLOTS);
+
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
- bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
+ bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
+ BGMAC_RX_RING_SLOTS);
}
static int bgmac_dma_alloc(struct bgmac *bgmac)
@@ -489,11 +619,10 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
ring = &bgmac->tx_ring[i];
- ring->num_slots = BGMAC_TX_RING_SLOTS;
ring->mmio_base = ring_base[i];
/* Alloc ring of descriptors */
- size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
&ring->dma_base,
GFP_KERNEL);
@@ -514,14 +643,11 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
}
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
- int j;
-
ring = &bgmac->rx_ring[i];
- ring->num_slots = BGMAC_RX_RING_SLOTS;
ring->mmio_base = ring_base[i];
/* Alloc ring of descriptors */
- size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
&ring->dma_base,
GFP_KERNEL);
@@ -538,15 +664,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
ring->index_base = lower_32_bits(ring->dma_base);
else
ring->index_base = 0;
-
- /* Alloc RX slots */
- for (j = 0; j < ring->num_slots; j++) {
- err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
- if (err) {
- bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
- goto err_dma_free;
- }
- }
}
return 0;
@@ -556,10 +673,10 @@ err_dma_free:
return -ENOMEM;
}
-static void bgmac_dma_init(struct bgmac *bgmac)
+static int bgmac_dma_init(struct bgmac *bgmac)
{
struct bgmac_dma_ring *ring;
- int i;
+ int i, err;
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
ring = &bgmac->tx_ring[i];
@@ -591,16 +708,24 @@ static void bgmac_dma_init(struct bgmac *bgmac)
if (ring->unaligned)
bgmac_dma_rx_enable(bgmac, ring);
- for (j = 0; j < ring->num_slots; j++)
- bgmac_dma_rx_setup_desc(bgmac, ring, j);
-
- bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
- ring->index_base +
- ring->num_slots * sizeof(struct bgmac_dma_desc));
-
ring->start = 0;
ring->end = 0;
+ for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
+ err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
+ if (err)
+ goto error;
+
+ bgmac_dma_rx_setup_desc(bgmac, ring, j);
+ }
+
+ bgmac_dma_rx_update_index(bgmac, ring);
}
+
+ return 0;
+
+error:
+ bgmac_dma_cleanup(bgmac);
+ return err;
}
/**************************************************
@@ -1008,8 +1133,6 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
bgmac_phy_init(bgmac);
netdev_reset_queue(bgmac->net_dev);
-
- bgmac->int_status = 0;
}
static void bgmac_chip_intrs_on(struct bgmac *bgmac)
@@ -1078,11 +1201,8 @@ static void bgmac_enable(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
-static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
+static void bgmac_chip_init(struct bgmac *bgmac)
{
- struct bgmac_dma_ring *ring;
- int i;
-
/* 1 interrupt per received frame */
bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
@@ -1100,16 +1220,7 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
- if (full_init) {
- bgmac_dma_init(bgmac);
- if (1) /* FIXME: is there any case we don't want IRQs? */
- bgmac_chip_intrs_on(bgmac);
- } else {
- for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
- ring = &bgmac->rx_ring[i];
- bgmac_dma_rx_enable(bgmac, ring);
- }
- }
+ bgmac_chip_intrs_on(bgmac);
bgmac_enable(bgmac);
}
@@ -1124,14 +1235,13 @@ static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
if (!int_status)
return IRQ_NONE;
- /* Ack */
- bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
+ int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
+ if (int_status)
+ bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status);
/* Disable new interrupts until handling existing ones */
bgmac_chip_intrs_off(bgmac);
- bgmac->int_status = int_status;
-
napi_schedule(&bgmac->napi);
return IRQ_HANDLED;
@@ -1140,25 +1250,17 @@ static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
static int bgmac_poll(struct napi_struct *napi, int weight)
{
struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
- struct bgmac_dma_ring *ring;
int handled = 0;
- if (bgmac->int_status & BGMAC_IS_TX0) {
- ring = &bgmac->tx_ring[0];
- bgmac_dma_tx_free(bgmac, ring);
- bgmac->int_status &= ~BGMAC_IS_TX0;
- }
+ /* Ack */
+ bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
- if (bgmac->int_status & BGMAC_IS_RX) {
- ring = &bgmac->rx_ring[0];
- handled += bgmac_dma_rx_read(bgmac, ring, weight);
- bgmac->int_status &= ~BGMAC_IS_RX;
- }
+ bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
+ handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
- if (bgmac->int_status) {
- bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
- bgmac->int_status = 0;
- }
+ /* Poll again if more events arrived in the meantime */
+ if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
+ return weight;
if (handled < weight) {
napi_complete(napi);
@@ -1178,23 +1280,27 @@ static int bgmac_open(struct net_device *net_dev)
int err = 0;
bgmac_chip_reset(bgmac);
+
+ err = bgmac_dma_init(bgmac);
+ if (err)
+ return err;
+
/* Specs say about reclaiming rings here, but we do that in DMA init */
- bgmac_chip_init(bgmac, true);
+ bgmac_chip_init(bgmac);
err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
KBUILD_MODNAME, net_dev);
if (err < 0) {
bgmac_err(bgmac, "IRQ request error: %d!\n", err);
- goto err_out;
+ bgmac_dma_cleanup(bgmac);
+ return err;
}
napi_enable(&bgmac->napi);
phy_start(bgmac->phy_dev);
netif_carrier_on(net_dev);
-
-err_out:
- return err;
+ return 0;
}
static int bgmac_stop(struct net_device *net_dev)
@@ -1210,6 +1316,7 @@ static int bgmac_stop(struct net_device *net_dev)
free_irq(bgmac->core->irq, net_dev);
bgmac_chip_reset(bgmac);
+ bgmac_dma_cleanup(bgmac);
return 0;
}
@@ -1330,13 +1437,46 @@ static void bgmac_adjust_link(struct net_device *net_dev)
}
}
+static int bgmac_fixed_phy_register(struct bgmac *bgmac)
+{
+ struct fixed_phy_status fphy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ };
+ struct phy_device *phy_dev;
+ int err;
+
+ phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ if (!phy_dev || IS_ERR(phy_dev)) {
+ bgmac_err(bgmac, "Failed to register fixed PHY device\n");
+ return -ENODEV;
+ }
+
+ err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (err) {
+ bgmac_err(bgmac, "Connecting PHY failed\n");
+ return err;
+ }
+
+ bgmac->phy_dev = phy_dev;
+
+ return err;
+}
+
static int bgmac_mii_register(struct bgmac *bgmac)
{
+ struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
char bus_id[MII_BUS_ID_SIZE + 3];
int i, err = 0;
+ if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+ ci->id == BCMA_CHIP_ID_BCM53018)
+ return bgmac_fixed_phy_register(bgmac);
+
mii_bus = mdiobus_alloc();
if (!mii_bus)
return -ENOMEM;
@@ -1517,6 +1657,10 @@ static int bgmac_probe(struct bcma_device *core)
goto err_dma_free;
}
+ net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ net_dev->hw_features = net_dev->features;
+ net_dev->vlan_features = net_dev->features;
+
err = register_netdev(bgmac->net_dev);
if (err) {
bgmac_err(bgmac, "Cannot register net device\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 89fa5bc69c51..db27febbb215 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -345,8 +345,8 @@
#define BGMAC_DESC_CTL0_EOT 0x10000000 /* End of ring */
#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */
-#define BGMAC_DESC_CTL0_SOF 0x40000000 /* Start of frame */
-#define BGMAC_DESC_CTL0_EOF 0x80000000 /* End of frame */
+#define BGMAC_DESC_CTL0_EOF 0x40000000 /* End of frame */
+#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */
#define BGMAC_DESC_CTL1_LEN 0x00001FFF
#define BGMAC_PHY_NOREGS 0x1E
@@ -356,12 +356,16 @@
#define BGMAC_MAX_RX_RINGS 1
#define BGMAC_TX_RING_SLOTS 128
-#define BGMAC_RX_RING_SLOTS 512 - 1 /* Why -1? Well, Broadcom does that... */
+#define BGMAC_RX_RING_SLOTS 512
#define BGMAC_RX_HEADER_LEN 28 /* Last 24 bytes are unused. Well... */
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
+#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
+ BGMAC_RX_FRAME_OFFSET)
#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
+#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define BGMAC_BFL_ENETROBO 0x0010 /* has ephy roboswitch spi */
#define BGMAC_BFL_ENETADM 0x0080 /* has ADMtek switch */
@@ -383,7 +387,10 @@
#define ETHER_MAX_LEN 1518
struct bgmac_slot_info {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ void *buf;
+ };
dma_addr_t dma_addr;
};
@@ -409,14 +416,13 @@ enum bgmac_dma_ring_type {
* empty.
*/
struct bgmac_dma_ring {
- u16 num_slots;
- u16 start;
- u16 end;
+ u32 start;
+ u32 end;
- u16 mmio_base;
struct bgmac_dma_desc *cpu_base;
dma_addr_t dma_base;
u32 index_base; /* Used for unaligned rings only, otherwise 0 */
+ u16 mmio_base;
bool unaligned;
struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
@@ -447,7 +453,6 @@ struct bgmac {
/* Int */
u32 int_mask;
- u32 int_status;
/* Current MAC state */
int mac_speed;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 02bf0b86995b..2b66ef3d8217 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -1,7 +1,7 @@
-/* bnx2.c: QLogic NX2 network driver.
+/* bnx2.c: QLogic bnx2 network driver.
*
* Copyright (c) 2004-2014 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.5"
-#define DRV_MODULE_RELDATE "December 20, 2013"
+#define DRV_MODULE_VERSION "2.2.6"
+#define DRV_MODULE_RELDATE "January 29, 2014"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -72,10 +72,10 @@
#define TX_TIMEOUT (5*HZ)
static char version[] =
- "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+ "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
-MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver");
+MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_MIPS_FILE_06);
@@ -4984,8 +4984,6 @@ bnx2_init_chip(struct bnx2 *bp)
bp->idle_chk_status_idx = 0xffff;
- bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
-
/* Set up how to generate a link change interrupt. */
BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
@@ -7710,17 +7708,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
return 0;
}
-static netdev_features_t
-bnx2_fix_features(struct net_device *dev, netdev_features_t features)
-{
- struct bnx2 *bp = netdev_priv(dev);
-
- if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
- features |= NETIF_F_HW_VLAN_CTAG_RX;
-
- return features;
-}
-
static int
bnx2_set_features(struct net_device *dev, netdev_features_t features)
{
@@ -8527,7 +8514,6 @@ static const struct net_device_ops bnx2_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnx2_change_mac_addr,
.ndo_change_mtu = bnx2_change_mtu,
- .ndo_fix_features = bnx2_fix_features,
.ndo_set_features = bnx2_set_features,
.ndo_tx_timeout = bnx2_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -8578,6 +8564,9 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= dev->hw_features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
+ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+
if ((rc = register_netdev(dev))) {
dev_err(&pdev->dev, "Cannot register net device\n");
goto error;
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 28df35d35893..f92f76c44756 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -1,7 +1,7 @@
-/* bnx2.h: QLogic NX2 network driver.
+/* bnx2.h: QLogic bnx2 network driver.
*
* Copyright (c) 2004-2014 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h
index 7db79c28b5ff..b0f2ccadaffd 100644
--- a/drivers/net/ethernet/broadcom/bnx2_fw.h
+++ b/drivers/net/ethernet/broadcom/bnx2_fw.h
@@ -1,7 +1,7 @@
-/* bnx2_fw.h: QLogic NX2 network driver.
+/* bnx2_fw.h: QLogic bnx2 network driver.
*
* Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4085c4b31047..a3b0f7a0c61e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -521,6 +521,7 @@ struct bnx2x_fp_txdata {
};
enum bnx2x_tpa_mode_t {
+ TPA_MODE_DISABLED,
TPA_MODE_LRO,
TPA_MODE_GRO
};
@@ -531,20 +532,8 @@ struct bnx2x_fastpath {
struct napi_struct napi;
#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
-#define BNX2X_FP_STATE_IDLE 0
-#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
-#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
-#define BNX2X_FP_STATE_DISABLED (1 << 2)
-#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
-#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
-#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
-#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
-#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
-#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
- /* protect state */
- spinlock_t lock;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+ unsigned long busy_poll_state;
+#endif
union host_hc_status_block status_blk;
/* chip independent shortcuts into sb structure */
@@ -601,7 +590,6 @@ struct bnx2x_fastpath {
/* TPA related */
struct bnx2x_agg_info *tpa_info;
- u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used;
#endif
@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
+
+enum bnx2x_fp_state {
+ BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
+
+ BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
+ BNX2X_STATE_FP_NAPI_REQ = BIT(1),
+
+ BNX2X_STATE_FP_POLL_BIT = 2,
+ BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
+
+ BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
+};
+
+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
{
- spin_lock_init(&fp->lock);
- fp->state = BNX2X_FP_STATE_IDLE;
+ WRITE_ONCE(fp->busy_poll_state, 0);
}
/* called from the device poll routine to get ownership of a FP */
static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
{
- bool rc = true;
-
- spin_lock_bh(&fp->lock);
- if (fp->state & BNX2X_FP_LOCKED) {
- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
- fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
- rc = false;
- } else {
- /* we don't care if someone yielded */
- fp->state = BNX2X_FP_STATE_NAPI;
+ unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
+
+ while (1) {
+ switch (old) {
+ case BNX2X_STATE_FP_POLL:
+ /* make sure bnx2x_fp_lock_poll() wont starve us */
+ set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
+ &fp->busy_poll_state);
+ /* fallthrough */
+ case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
+ return false;
+ default:
+ break;
+ }
+ prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
+ if (unlikely(prev != old)) {
+ old = prev;
+ continue;
+ }
+ return true;
}
- spin_unlock_bh(&fp->lock);
- return rc;
}
-/* returns true is someone tried to get the FP while napi had it */
-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{
- bool rc = false;
-
- spin_lock_bh(&fp->lock);
- WARN_ON(fp->state &
- (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
-
- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
- rc = true;
-
- /* state ==> idle, unless currently disabled */
- fp->state &= BNX2X_FP_STATE_DISABLED;
- spin_unlock_bh(&fp->lock);
- return rc;
+ smp_wmb();
+ fp->busy_poll_state = 0;
}
/* called from bnx2x_low_latency_poll() */
static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
{
- bool rc = true;
-
- spin_lock_bh(&fp->lock);
- if ((fp->state & BNX2X_FP_LOCKED)) {
- fp->state |= BNX2X_FP_STATE_POLL_YIELD;
- rc = false;
- } else {
- /* preserve yield marks */
- fp->state |= BNX2X_FP_STATE_POLL;
- }
- spin_unlock_bh(&fp->lock);
- return rc;
+ return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
}
-/* returns true if someone tried to get the FP while it was locked */
-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
{
- bool rc = false;
-
- spin_lock_bh(&fp->lock);
- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
-
- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
- rc = true;
-
- /* state ==> idle, unless currently disabled */
- fp->state &= BNX2X_FP_STATE_DISABLED;
- spin_unlock_bh(&fp->lock);
- return rc;
+ smp_mb__before_atomic();
+ clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
}
-/* true if a socket is polling, even if it did not get the lock */
+/* true if a socket is polling */
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
- WARN_ON(!(fp->state & BNX2X_FP_OWNED));
- return fp->state & BNX2X_FP_USER_PEND;
+ return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
}
/* false if fp is currently owned */
static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
{
- int rc = true;
-
- spin_lock_bh(&fp->lock);
- if (fp->state & BNX2X_FP_OWNED)
- rc = false;
- fp->state |= BNX2X_FP_STATE_DISABLED;
- spin_unlock_bh(&fp->lock);
+ set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
+ return !bnx2x_fp_ll_polling(fp);
- return rc;
}
#else
-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
{
}
@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
return true;
}
-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{
- return false;
}
static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
return false;
}
-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
{
- return false;
}
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
@@ -1580,9 +1545,7 @@ struct bnx2x {
#define USING_MSIX_FLAG (1 << 5)
#define USING_MSI_FLAG (1 << 6)
#define DISABLE_MSI_FLAG (1 << 7)
-#define TPA_ENABLE_FLAG (1 << 8)
#define NO_MCP_FLAG (1 << 9)
-#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
#define NO_ISCSI_OOO_FLAG (1 << 13)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0a9faa134a9a..ec56a9b65dc3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
u16 frag_size, pages;
#ifdef BNX2X_STOP_ON_ERROR
/* sanity check */
- if (fp->disable_tpa &&
+ if (fp->mode == TPA_MODE_DISABLED &&
(CQE_TYPE_START(cqe_fp_type) ||
CQE_TYPE_STOP(cqe_fp_type)))
- BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
+ BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
CQE_TYPE(cqe_fp_type));
#endif
@@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
- if (!fp->disable_tpa) {
+ if (fp->mode != TPA_MODE_DISABLED) {
/* Fill the per-aggregation pool */
for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info =
@@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
j);
bnx2x_free_tpa_pool(bp, fp, i);
- fp->disable_tpa = 1;
+ fp->mode = TPA_MODE_DISABLED;
break;
}
dma_unmap_addr_set(first_buf, mapping, 0);
@@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
ring_prod);
bnx2x_free_tpa_pool(bp, fp,
MAX_AGG_QS(bp));
- fp->disable_tpa = 1;
+ fp->mode = TPA_MODE_DISABLED;
ring_prod = 0;
break;
}
@@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
bnx2x_free_rx_bds(fp);
- if (!fp->disable_tpa)
+ if (fp->mode != TPA_MODE_DISABLED)
bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
}
}
@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
int i;
for_each_rx_queue_cnic(bp, i) {
- bnx2x_fp_init_lock(&bp->fp[i]);
+ bnx2x_fp_busy_poll_init(&bp->fp[i]);
napi_enable(&bnx2x_fp(bp, i, napi));
}
}
@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
int i;
for_each_eth_queue(bp, i) {
- bnx2x_fp_init_lock(&bp->fp[i]);
+ bnx2x_fp_busy_poll_init(&bp->fp[i]);
napi_enable(&bnx2x_fp(bp, i, napi));
}
}
@@ -2477,17 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
*/
- fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
- (bp->flags & GRO_ENABLE_FLAG &&
- bnx2x_mtu_allows_gro(bp->dev->mtu)));
- if (bp->flags & TPA_ENABLE_FLAG)
+ if (bp->dev->features & NETIF_F_LRO)
fp->mode = TPA_MODE_LRO;
- else if (bp->flags & GRO_ENABLE_FLAG)
+ else if (bp->dev->features & NETIF_F_GRO &&
+ bnx2x_mtu_allows_gro(bp->dev->mtu))
fp->mode = TPA_MODE_GRO;
+ else
+ fp->mode = TPA_MODE_DISABLED;
- /* We don't want TPA on an FCoE L2 ring */
- if (IS_FCOE_FP(fp))
- fp->disable_tpa = 1;
+ /* We don't want TPA if it's disabled in bp
+ * or if this is an FCoE L2 ring.
+ */
+ if (bp->disable_tpa || IS_FCOE_FP(fp))
+ fp->mode = TPA_MODE_DISABLED;
}
int bnx2x_load_cnic(struct bnx2x *bp)
@@ -2608,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/*
* Zero fastpath structures preserving invariants like napi, which are
* allocated only once, fp index, max_cos, bp pointer.
- * Also set fp->disable_tpa and txdata_ptr.
+ * Also set fp->mode and txdata_ptr.
*/
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
@@ -3191,9 +3193,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
}
}
+ bnx2x_fp_unlock_napi(fp);
+
/* Fall out from the NAPI loop if needed */
- if (!bnx2x_fp_unlock_napi(fp) &&
- !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
/* No need to update SB for FCoE L2 ring as long as
* it's connected to the default SB and the SB
@@ -3246,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi)
if ((bp->state == BNX2X_STATE_CLOSED) ||
(bp->state == BNX2X_STATE_ERROR) ||
- (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
+ (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
return LL_FLUSH_FAILED;
if (!bnx2x_fp_lock_poll(fp))
@@ -4542,7 +4545,7 @@ alloc_mem_err:
* In these cases we disable the queue
* Min size is different for OOO, TPA and non-TPA queues
*/
- if (ring_size < (fp->disable_tpa ?
+ if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
/* release memory allocated for this queue */
bnx2x_free_fp_mem_at(bp, index);
@@ -4783,6 +4786,11 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnx2x *bp = netdev_priv(dev);
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
+ return -EPERM;
+ }
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
BNX2X_ERR("Can't perform change MTU during parity recovery\n");
return -EAGAIN;
@@ -4808,66 +4816,71 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ if (pci_num_vf(bp->pdev)) {
+ netdev_features_t changed = dev->features ^ features;
+
+ /* Revert the requested changes in features if they
+ * would require internal reload of PF in bnx2x_set_features().
+ */
+ if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
+ features &= ~NETIF_F_RXCSUM;
+ features |= dev->features & NETIF_F_RXCSUM;
+ }
+
+ if (changed & NETIF_F_LOOPBACK) {
+ features &= ~NETIF_F_LOOPBACK;
+ features |= dev->features & NETIF_F_LOOPBACK;
+ }
+ }
+
/* TPA requires Rx CSUM offloading */
if (!(features & NETIF_F_RXCSUM)) {
features &= ~NETIF_F_LRO;
features &= ~NETIF_F_GRO;
}
- /* Note: do not disable SW GRO in kernel when HW GRO is off */
- if (bp->disable_tpa)
- features &= ~NETIF_F_LRO;
-
return features;
}
int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 flags = bp->flags;
- u32 changes;
+ netdev_features_t changes = features ^ dev->features;
bool bnx2x_reload = false;
+ int rc;
- if (features & NETIF_F_LRO)
- flags |= TPA_ENABLE_FLAG;
- else
- flags &= ~TPA_ENABLE_FLAG;
-
- if (features & NETIF_F_GRO)
- flags |= GRO_ENABLE_FLAG;
- else
- flags &= ~GRO_ENABLE_FLAG;
-
- if (features & NETIF_F_LOOPBACK) {
- if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
- bp->link_params.loopback_mode = LOOPBACK_BMAC;
- bnx2x_reload = true;
- }
- } else {
- if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
- bp->link_params.loopback_mode = LOOPBACK_NONE;
- bnx2x_reload = true;
+ /* VFs or non SRIOV PFs should be able to change loopback feature */
+ if (!pci_num_vf(bp->pdev)) {
+ if (features & NETIF_F_LOOPBACK) {
+ if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+ bnx2x_reload = true;
+ }
+ } else {
+ if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
+ bp->link_params.loopback_mode = LOOPBACK_NONE;
+ bnx2x_reload = true;
+ }
}
}
- changes = flags ^ bp->flags;
-
/* if GRO is changed while LRO is enabled, don't force a reload */
- if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
- changes &= ~GRO_ENABLE_FLAG;
+ if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
+ changes &= ~NETIF_F_GRO;
/* if GRO is changed while HW TPA is off, don't force a reload */
- if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
- changes &= ~GRO_ENABLE_FLAG;
+ if ((changes & NETIF_F_GRO) && bp->disable_tpa)
+ changes &= ~NETIF_F_GRO;
if (changes)
bnx2x_reload = true;
- bp->flags = flags;
-
if (bnx2x_reload) {
- if (bp->recovery_state == BNX2X_RECOVERY_DONE)
- return bnx2x_reload_if_running(dev);
+ if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
+ dev->features = features;
+ rc = bnx2x_reload_if_running(dev);
+ return rc ? rc : 1;
+ }
/* else: bnx2x_nic_load() will be called at end of recovery */
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index adcacda7af7b..d7a71758e876 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
- if (fp->disable_tpa)
+ if (fp->mode == TPA_MODE_DISABLED)
return;
for (i = 0; i < last; i++)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ffe4e003e636..48ed005ba73f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1843,6 +1843,12 @@ static int bnx2x_set_ringparam(struct net_device *dev,
"set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
ering->rx_pending, ering->tx_pending);
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV,
+ "VFs are enabled, can not change ring parameters\n");
+ return -EPERM;
+ }
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
DP(BNX2X_MSG_ETHTOOL,
"Handling parity error recovery. Try again later\n");
@@ -2446,7 +2452,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
}
packet = skb_put(skb, pkt_size);
memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
- memset(packet + ETH_ALEN, 0, ETH_ALEN);
+ eth_zero_addr(packet + ETH_ALEN);
memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
for (i = ETH_HLEN; i < pkt_size; i++)
packet[i] = (unsigned char) (i & 0xff);
@@ -2899,6 +2905,12 @@ static void bnx2x_self_test(struct net_device *dev,
u8 is_serdes, link_up;
int rc, cnt = 0;
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV,
+ "VFs are enabled, can not perform self test\n");
+ return;
+ }
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
"Handling parity error recovery. Try again later\n");
@@ -3468,6 +3480,11 @@ static int bnx2x_set_channels(struct net_device *dev,
channels->rx_count, channels->tx_count, channels->other_count,
channels->combined_count);
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n");
+ return -EPERM;
+ }
+
/* We don't support separate rx / tx channels.
* We don't allow setting 'other' channels.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 583591d52497..058bc7328220 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -521,6 +521,17 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
*/
#define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
#define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+ /* Set non-default values for TXFIR in SFP mode. */
+ #define PORT_HW_CFG_TX_DRV_IFIR_MASK 0x00F00000
+ #define PORT_HW_CFG_TX_DRV_IFIR_SHIFT 20
+
+ /* Set non-default values for IPREDRIVER in SFP mode. */
+ #define PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK 0x0F000000
+ #define PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT 24
+
+ /* Set non-default values for POST2 in SFP mode. */
+ #define PORT_HW_CFG_TX_DRV_POST2_MASK 0xF0000000
+ #define PORT_HW_CFG_TX_DRV_POST2_SHIFT 28
u32 reserved0[5]; /* 0x17c */
@@ -2247,8 +2258,8 @@ struct shmem2_region {
#define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000
u32 reserved5[2];
- u32 reserved6[PORT_MAX];
-
+ u32 link_change_count[PORT_MAX]; /* Offset 0x160-0x164 */
+ #define LINK_CHANGE_COUNT_MASK 0xff /* Offset 0x168 */
/* driver version for each personality */
struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index bd90e50bd8e6..d6e1975b7b69 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -278,7 +278,7 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
}
-/* congestion managment port init api description
+/* congestion management port init api description
* the api works as follows:
* the driver should pass the cmng_init_input struct, the port_init function
* will prepare the required internal ram structure which will be passed back
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 778e4cd32571..21a0d6afca4a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -195,6 +195,10 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
#define MAX_PACKET_SIZE (9700)
#define MAX_KR_LINK_RETRY 4
+#define DEFAULT_TX_DRV_BRDCT 2
+#define DEFAULT_TX_DRV_IFIR 0
+#define DEFAULT_TX_DRV_POST2 3
+#define DEFAULT_TX_DRV_IPRE_DRIVER 6
/**********************************************************/
/* INTERFACE */
@@ -563,7 +567,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
* Will return the NIG ETS registers to init values.Except
* credit_upper_bound.
* That isn't used in this configuration (No WFQ is enabled) and will be
-* configured acording to spec
+* configured according to spec
*.
******************************************************************************/
static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
@@ -680,7 +684,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
* Will return the PBF ETS registers to init values.Except
* credit_upper_bound.
* That isn't used in this configuration (No WFQ is enabled) and will be
-* configured acording to spec
+* configured according to spec
*.
******************************************************************************/
static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
@@ -738,7 +742,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
}
/******************************************************************************
* Description:
-* E3B0 disable will return basicly the values to init values.
+* E3B0 disable will return basically the values to init values.
*.
******************************************************************************/
static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
@@ -761,7 +765,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
/******************************************************************************
* Description:
-* Disable will return basicly the values to init values.
+* Disable will return basically the values to init values.
*
******************************************************************************/
int bnx2x_ets_disabled(struct link_params *params,
@@ -2938,7 +2942,7 @@ static int bnx2x_eee_initial_config(struct link_params *params,
{
vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
- /* Propogate params' bits --> vars (for migration exposure) */
+ /* Propagate params' bits --> vars (for migration exposure) */
if (params->eee_mode & EEE_MODE_ENABLE_LPI)
vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
else
@@ -3595,10 +3599,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
-#define WC_TX_DRIVER(post2, idriver, ipre) \
+#define WC_TX_DRIVER(post2, idriver, ipre, ifir) \
((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
(idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
- (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET) | \
+ (ifir << MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET))
#define WC_TX_FIR(post, main, pre) \
((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
@@ -3765,12 +3770,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- WC_TX_DRIVER(0x02, 0x06, 0x09));
+ WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
/* Configure the next lane if dual mode */
if (phy->flags & FLAGS_WC_DUAL_MODE)
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
- WC_TX_DRIVER(0x02, 0x06, 0x09));
+ WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
0x03f0);
@@ -3933,6 +3938,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+ u32 ifir_val, ipost2_val, ipre_driver_val;
/* Hold rxSeqStart */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
@@ -3978,7 +3984,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
if (is_xfi) {
misc1_val |= 0x5;
tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
- tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03, 0);
} else {
cfg_tap_val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -3987,10 +3993,6 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
- tx_drv_brdct = (cfg_tap_val &
- PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
- PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
-
misc1_val |= 0x9;
/* TAP values are controlled by nvram, if value there isn't 0 */
@@ -3999,11 +4001,36 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
else
tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
- if (tx_drv_brdct)
- tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
- 0x06);
- else
- tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
+ ifir_val = DEFAULT_TX_DRV_IFIR;
+ ipost2_val = DEFAULT_TX_DRV_POST2;
+ ipre_driver_val = DEFAULT_TX_DRV_IPRE_DRIVER;
+ tx_drv_brdct = DEFAULT_TX_DRV_BRDCT;
+
+ /* If any of the IFIR/IPRE_DRIVER/POST@ is set, apply all
+ * configuration.
+ */
+ if (cfg_tap_val & (PORT_HW_CFG_TX_DRV_IFIR_MASK |
+ PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK |
+ PORT_HW_CFG_TX_DRV_POST2_MASK)) {
+ ifir_val = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_IFIR_MASK) >>
+ PORT_HW_CFG_TX_DRV_IFIR_SHIFT;
+ ipre_driver_val = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK)
+ >> PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT;
+ ipost2_val = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_POST2_MASK) >>
+ PORT_HW_CFG_TX_DRV_POST2_SHIFT;
+ }
+
+ if (cfg_tap_val & PORT_HW_CFG_TX_DRV_BROADCAST_MASK) {
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+ }
+
+ tx_driver_val = WC_TX_DRIVER(ipost2_val, tx_drv_brdct,
+ ipre_driver_val, ifir_val);
}
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4144,7 +4171,7 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
MDIO_WC_REG_TX_FIR_TAP_ENABLE));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- WC_TX_DRIVER(0x02, 0x02, 0x02));
+ WC_TX_DRIVER(0x02, 0x02, 0x02, 0));
}
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -6731,6 +6758,25 @@ static int bnx2x_update_link_up(struct link_params *params,
msleep(20);
return rc;
}
+
+static void bnx2x_chng_link_count(struct link_params *params, bool clear)
+{
+ struct bnx2x *bp = params->bp;
+ u32 addr, val;
+
+ /* Verify the link_change_count is supported by the MFW */
+ if (!(SHMEM2_HAS(bp, link_change_count)))
+ return;
+
+ addr = params->shmem2_base +
+ offsetof(struct shmem2_region, link_change_count[params->port]);
+ if (clear)
+ val = 0;
+ else
+ val = REG_RD(bp, addr) + 1;
+ REG_WR(bp, addr, val);
+}
+
/* The bnx2x_link_update function should be called upon link
* interrupt.
* Link is considered up as follows:
@@ -6749,6 +6795,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
struct link_vars phy_vars[MAX_PHYS];
u8 port = params->port;
u8 link_10g_plus, phy_index;
+ u32 prev_link_status = vars->link_status;
u8 ext_phy_link_up = 0, cur_link_up;
int rc = 0;
u8 is_mi_int = 0;
@@ -6988,6 +7035,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
else
rc = bnx2x_update_link_down(params, vars);
+ if ((prev_link_status ^ vars->link_status) & LINK_STATUS_LINK_UP)
+ bnx2x_chng_link_count(params, false);
+
/* Update MCP link status was changed */
if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
@@ -12631,6 +12681,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params->link_flags = PHY_INITIALIZED;
/* Driver opens NIG-BRB filters */
bnx2x_set_rx_filter(params, 1);
+ bnx2x_chng_link_count(params, true);
/* Check if link flap can be avoided */
lfa_status = bnx2x_check_lfa(params);
@@ -12705,6 +12756,7 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
/* Disable attentions */
vars->link_status = 0;
+ bnx2x_chng_link_count(params, true);
bnx2x_update_mng(params, vars->link_status);
vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
SHMEM_EEE_ACTIVE_BIT);
@@ -13308,7 +13360,7 @@ static void bnx2x_check_over_curr(struct link_params *params,
vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
}
-/* Returns 0 if no change occured since last check; 1 otherwise. */
+/* Returns 0 if no change occurred since last check; 1 otherwise. */
static u8 bnx2x_analyze_link_error(struct link_params *params,
struct link_vars *vars, u32 status,
u32 phy_flag, u32 link_flag, u8 notify)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 1ec635f54994..fd52ce95127e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
}
- if (!fp->disable_tpa) {
+ if (fp->mode != TPA_MODE_DISABLED) {
__set_bit(BNX2X_Q_FLG_TPA, &flags);
__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
if (fp->mode == TPA_MODE_GRO)
@@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
u16 sge_sz = 0;
u16 tpa_agg_size = 0;
- if (!fp->disable_tpa) {
+ if (fp->mode != TPA_MODE_DISABLED) {
pause->sge_th_lo = SGE_TH_LO(bp);
pause->sge_th_hi = SGE_TH_HI(bp);
@@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
/* This flag is relevant for E1x only.
* E2 doesn't have a TPA configuration in a function level.
*/
- flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
+ flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
func_init.func_flgs = flags;
func_init.pf_id = BP_FUNC(bp);
@@ -11556,13 +11556,13 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
/* Disable iSCSI OOO if MAC configuration is invalid. */
if (!is_valid_ether_addr(iscsi_mac)) {
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
- memset(iscsi_mac, 0, ETH_ALEN);
+ eth_zero_addr(iscsi_mac);
}
/* Disable FCoE if MAC configuration is invalid. */
if (!is_valid_ether_addr(fip_mac)) {
bp->flags |= NO_FCOE_FLAG;
- memset(bp->fip_mac, 0, ETH_ALEN);
+ eth_zero_addr(bp->fip_mac);
}
}
@@ -11573,7 +11573,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
int port = BP_PORT(bp);
/* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
+ eth_zero_addr(bp->dev->dev_addr);
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -11620,7 +11620,7 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
u32 cfg;
if (IS_VF(bp))
- return 0;
+ return false;
if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
/* Take function: tmp = func */
@@ -11660,6 +11660,13 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
u32 val = 0, val2 = 0;
int rc = 0;
+ /* Validate that chip access is feasible */
+ if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
+ dev_err(&bp->pdev->dev,
+ "Chip read returns all Fs. Preventing probe from continuing\n");
+ return -EINVAL;
+ }
+
bnx2x_get_common_hwinfo(bp);
/*
@@ -12100,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
/* Set TPA flags */
if (bp->disable_tpa) {
- bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
+ bp->dev->hw_features &= ~NETIF_F_LRO;
bp->dev->features &= ~NETIF_F_LRO;
- } else {
- bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
- bp->dev->features |= NETIF_F_LRO;
}
if (CHIP_IS_E1(bp))
@@ -12566,6 +12570,7 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
+ features = vlan_features_check(skb, features);
return vxlan_features_check(skb, features);
}
@@ -13287,30 +13292,27 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
u64 ns;
- u32 remainder;
ns = timecounter_read(&bp->timecounter);
DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
- ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
u64 ns;
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
+ ns = timespec64_to_ns(ts);
DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
@@ -13342,8 +13344,8 @@ static void bnx2x_register_phc(struct bnx2x *bp)
bp->ptp_clock_info.pps = 0;
bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
- bp->ptp_clock_info.gettime = bnx2x_ptp_gettime;
- bp->ptp_clock_info.settime = bnx2x_ptp_settime;
+ bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
+ bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
bp->ptp_clock_info.enable = bnx2x_ptp_enable;
bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
@@ -13366,6 +13368,17 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bool is_vf;
int cnic_cnt;
+ /* Management FW 'remembers' living interfaces. Allow it some time
+ * to forget previously living interfaces, allowing a proper re-load.
+ */
+ if (is_kdump_kernel()) {
+ ktime_t now = ktime_get_boottime();
+ ktime_t fw_ready_time = ktime_set(5, 0);
+
+ if (ktime_before(now, fw_ready_time))
+ msleep(ktime_ms_delta(fw_ready_time, now));
+ }
+
/* An estimated maximum supported CoS number according to the chip
* version.
* We will try to roughly estimate the maximum number of CoSes this chip
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 6fe547c93e74..49d511092c82 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -29,7 +29,7 @@
#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
/* [RW 1] Initiate the ATC array - reset all the valid bits */
#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
-/* [R 1] ATC initalization done */
+/* [R 1] ATC initialization done */
#define ATC_REG_ATC_INIT_DONE 0x1100bc
/* [RC 6] Interrupt register #0 read clear */
#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
@@ -7341,6 +7341,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET 0x01
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_MASK 0x000e
#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index cfe3c7695455..f67348d16966 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -592,7 +592,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
GFP_KERNEL);
if (!mc) {
- BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
+ BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
return -ENOMEM;
}
}
@@ -2695,7 +2695,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
else
/* function has not been loaded yet. Show mac as 0s */
- memset(&ivi->mac, 0, ETH_ALEN);
+ eth_zero_addr(ivi->mac);
/* vlan */
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 800ab44a07ce..266b055c2360 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1583,7 +1583,7 @@ void bnx2x_memset_stats(struct bnx2x *bp)
if (bp->port.pmf && bp->port.port_stx)
bnx2x_port_stats_base_init(bp);
- /* mark the end of statistics initializiation */
+ /* mark the end of statistics initialization */
bp->stats_init = false;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index be40eabc5304..06b8c0d8fd3b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
/* select tpa mode to request */
- if (!fp->disable_tpa) {
+ if (fp->mode != TPA_MODE_DISABLED) {
flags |= VFPF_QUEUE_FLG_TPA;
flags |= VFPF_QUEUE_FLG_TPA_IPV6;
if (fp->mode == TPA_MODE_GRO)
@@ -800,7 +800,7 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp,
req->rss_key_size = T_ETH_RSS_KEY;
req->rss_result_mask = params->rss_result_mask;
- /* flags handled individually for backward/forward compatability */
+ /* flags handled individually for backward/forward compatibility */
if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
req->rss_flags |= VFPF_RSS_MODE_DISABLED;
if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
@@ -1869,7 +1869,7 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
rss.rss_obj = &vf->rss_conf_obj;
rss.rss_result_mask = rss_tlv->rss_result_mask;
- /* flags handled individually for backward/forward compatability */
+ /* flags handled individually for backward/forward compatibility */
rss.rss_flags = 0;
rss.ramrod_flags = 0;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f05fab65d78a..17c145fdf3ff 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,7 +1,7 @@
/* cnic.c: QLogic CNIC core network driver.
*
* Copyright (c) 2006-2014 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,11 +58,11 @@
#define CNIC_MODULE_NAME "cnic"
static char version[] =
- "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+ "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
"Chen (zongxi@broadcom.com");
-MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver");
+MODULE_DESCRIPTION("QLogic cnic Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(CNIC_MODULE_VERSION);
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 8bb36c1c4d68..ef6125b0ee3e 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,7 +1,7 @@
-/* cnic_if.h: QLogic CNIC core network driver.
+/* cnic_if.h: QLogic cnic core network driver.
*
* Copyright (c) 2006-2014 Broadcom Corporation
- * Copyright (c) 2014 QLogic Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,8 +15,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.20"
-#define CNIC_MODULE_RELDATE "March 14, 2014"
+#define CNIC_MODULE_VERSION "2.5.21"
+#define CNIC_MODULE_RELDATE "January 29, 2015"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6befde61c203..6043734ea613 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -54,8 +54,10 @@
/* Default highest priority queue for multi queue support */
#define GENET_Q0_PRIORITY 0
-#define GENET_DEFAULT_BD_CNT \
- (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
+#define GENET_Q16_RX_BD_CNT \
+ (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
+#define GENET_Q16_TX_BD_CNT \
+ (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
#define RX_BUF_LENGTH 2048
#define SKB_ALIGNMENT 32
@@ -195,6 +197,14 @@ enum dma_reg {
DMA_PRIORITY_0,
DMA_PRIORITY_1,
DMA_PRIORITY_2,
+ DMA_INDEX2RING_0,
+ DMA_INDEX2RING_1,
+ DMA_INDEX2RING_2,
+ DMA_INDEX2RING_3,
+ DMA_INDEX2RING_4,
+ DMA_INDEX2RING_5,
+ DMA_INDEX2RING_6,
+ DMA_INDEX2RING_7,
};
static const u8 bcmgenet_dma_regs_v3plus[] = {
@@ -206,6 +216,14 @@ static const u8 bcmgenet_dma_regs_v3plus[] = {
[DMA_PRIORITY_0] = 0x30,
[DMA_PRIORITY_1] = 0x34,
[DMA_PRIORITY_2] = 0x38,
+ [DMA_INDEX2RING_0] = 0x70,
+ [DMA_INDEX2RING_1] = 0x74,
+ [DMA_INDEX2RING_2] = 0x78,
+ [DMA_INDEX2RING_3] = 0x7C,
+ [DMA_INDEX2RING_4] = 0x80,
+ [DMA_INDEX2RING_5] = 0x84,
+ [DMA_INDEX2RING_6] = 0x88,
+ [DMA_INDEX2RING_7] = 0x8C,
};
static const u8 bcmgenet_dma_regs_v2[] = {
@@ -829,9 +847,10 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
};
/* Power down the unimac, based on mode. */
-static void bcmgenet_power_down(struct bcmgenet_priv *priv,
+static int bcmgenet_power_down(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
+ int ret = 0;
u32 reg;
switch (mode) {
@@ -840,7 +859,7 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv,
break;
case GENET_POWER_WOL_MAGIC:
- bcmgenet_wol_power_down_cfg(priv, mode);
+ ret = bcmgenet_wol_power_down_cfg(priv, mode);
break;
case GENET_POWER_PASSIVE:
@@ -850,11 +869,15 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv,
reg |= (EXT_PWR_DOWN_PHY |
EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+
+ bcmgenet_phy_power_set(priv->dev, false);
}
break;
default:
break;
}
+
+ return 0;
}
static void bcmgenet_power_up(struct bcmgenet_priv *priv,
@@ -923,7 +946,7 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
tx_cb_ptr = ring->cbs;
tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
- tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
+
/* Advancing local write pointer */
if (ring->write_ptr == ring->end_ptr)
ring->write_ptr = ring->cb_ptr;
@@ -941,36 +964,54 @@ static void bcmgenet_free_cb(struct enet_cb *cb)
dma_unmap_addr_set(cb, dma_addr, 0);
}
-static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
- struct bcmgenet_tx_ring *ring)
+static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
{
- bcmgenet_intrl2_0_writel(priv,
- UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
INTRL2_CPU_MASK_SET);
}
-static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
- struct bcmgenet_tx_ring *ring)
+static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
{
- bcmgenet_intrl2_0_writel(priv,
- UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
INTRL2_CPU_MASK_CLEAR);
}
-static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
- struct bcmgenet_tx_ring *ring)
+static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(ring->priv,
+ 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
+ INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
{
- bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+ bcmgenet_intrl2_1_writel(ring->priv,
+ 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
INTRL2_CPU_MASK_CLEAR);
- priv->int1_mask &= ~(1 << ring->index);
}
-static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
- struct bcmgenet_tx_ring *ring)
+static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
{
- bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+ bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
+ INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
+ INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
+ INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
INTRL2_CPU_MASK_SET);
- priv->int1_mask |= (1 << ring->index);
}
/* Unlocked version of the reclaim routine */
@@ -978,39 +1019,32 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned int pkts_compl = 0;
- unsigned int bds_compl;
unsigned int c_index;
+ unsigned int txbds_ready;
+ unsigned int txbds_processed = 0;
/* Compute how many buffers are transmitted since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
- txq = netdev_get_tx_queue(dev, ring->queue);
-
- last_c_index = ring->c_index;
- num_tx_bds = ring->size;
+ c_index &= DMA_C_INDEX_MASK;
- c_index &= (num_tx_bds - 1);
-
- if (c_index >= last_c_index)
- last_tx_cn = c_index - last_c_index;
+ if (likely(c_index >= ring->c_index))
+ txbds_ready = c_index - ring->c_index;
else
- last_tx_cn = num_tx_bds - last_c_index + c_index;
+ txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
netif_dbg(priv, tx_done, dev,
- "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
- __func__, ring->index,
- c_index, last_tx_cn, last_c_index);
+ "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+ __func__, ring->index, ring->c_index, c_index, txbds_ready);
/* Reclaim transmitted buffers */
- while (last_tx_cn-- > 0) {
- tx_cb_ptr = ring->cbs + last_c_index;
- bds_compl = 0;
+ while (txbds_processed < txbds_ready) {
+ tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
if (tx_cb_ptr->skb) {
pkts_compl++;
- bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
+ dev->stats.tx_packets++;
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -1026,20 +1060,23 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
DMA_TO_DEVICE);
dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
}
- dev->stats.tx_packets++;
- ring->free_bds += bds_compl;
- last_c_index++;
- last_c_index &= (num_tx_bds - 1);
+ txbds_processed++;
+ if (likely(ring->clean_ptr < ring->end_ptr))
+ ring->clean_ptr++;
+ else
+ ring->clean_ptr = ring->cb_ptr;
}
+ ring->free_bds += txbds_processed;
+ ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+ txq = netdev_get_tx_queue(dev, ring->queue);
if (netif_tx_queue_stopped(txq))
netif_tx_wake_queue(txq);
}
- ring->c_index = c_index;
-
return pkts_compl;
}
@@ -1066,7 +1103,7 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
if (work_done == 0) {
napi_complete(napi);
- ring->int_enable(ring->priv, ring);
+ ring->int_enable(ring);
return 0;
}
@@ -1132,11 +1169,6 @@ static int bcmgenet_xmit_single(struct net_device *dev,
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
- /* Decrement total BD count and advance our write pointer */
- ring->free_bds -= 1;
- ring->prod_index += 1;
- ring->prod_index &= DMA_P_INDEX_MASK;
-
return 0;
}
@@ -1175,11 +1207,6 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
(frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
-
- ring->free_bds -= 1;
- ring->prod_index += 1;
- ring->prod_index &= DMA_P_INDEX_MASK;
-
return 0;
}
@@ -1323,119 +1350,128 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
- /* we kept a software copy of how much we should advance the TDMA
- * producer index, now write it down to the hardware
- */
- bcmgenet_tdma_ring_writel(priv, ring->index,
- ring->prod_index, TDMA_PROD_INDEX);
+ /* Decrement total BD count and advance our write pointer */
+ ring->free_bds -= nr_frags + 1;
+ ring->prod_index += nr_frags + 1;
+ ring->prod_index &= DMA_P_INDEX_MASK;
if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
+ if (!skb->xmit_more || netif_xmit_stopped(txq))
+ /* Packets are ready, update producer index */
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
out:
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
-
-static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
+static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
+ struct enet_cb *cb)
{
struct device *kdev = &priv->pdev->dev;
struct sk_buff *skb;
+ struct sk_buff *rx_skb;
dma_addr_t mapping;
- int ret;
+ /* Allocate a new Rx skb */
skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ priv->mib.alloc_rx_buff_failed++;
+ netif_err(priv, rx_err, priv->dev,
+ "%s: Rx skb allocation failed\n", __func__);
+ return NULL;
+ }
- /* a caller did not release this control block */
- WARN_ON(cb->skb != NULL);
- cb->skb = skb;
- mapping = dma_map_single(kdev, skb->data,
- priv->rx_buf_len, DMA_FROM_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
+ /* DMA-map the new Rx skb */
+ mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(kdev, mapping)) {
priv->mib.rx_dma_failed++;
- bcmgenet_free_cb(cb);
+ dev_kfree_skb_any(skb);
netif_err(priv, rx_err, priv->dev,
- "%s DMA map failed\n", __func__);
- return ret;
+ "%s: Rx skb DMA mapping failed\n", __func__);
+ return NULL;
}
- dma_unmap_addr_set(cb, dma_addr, mapping);
- /* assign packet, prepare descriptor, and advance pointer */
-
- dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
-
- /* turn on the newly assigned BD for DMA to use */
- priv->rx_bd_assign_index++;
- priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+ /* Grab the current Rx skb from the ring and DMA-unmap it */
+ rx_skb = cb->skb;
+ if (likely(rx_skb))
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
- priv->rx_bd_assign_ptr = priv->rx_bds +
- (priv->rx_bd_assign_index * DMA_DESC_SIZE);
+ /* Put the new Rx skb on the ring */
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dmadesc_set_addr(priv, cb->bd_addr, mapping);
- return 0;
+ /* Return the current Rx skb to caller */
+ return rx_skb;
}
/* bcmgenet_desc_rx - descriptor based rx process.
* this could be called from bottom half, or from NAPI polling method.
*/
-static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
+static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int budget)
{
+ struct bcmgenet_priv *priv = ring->priv;
struct net_device *dev = priv->dev;
struct enet_cb *cb;
struct sk_buff *skb;
u32 dma_length_status;
unsigned long dma_flag;
- int len, err;
+ int len;
unsigned int rxpktprocessed = 0, rxpkttoprocess;
unsigned int p_index;
+ unsigned int discards;
unsigned int chksum_ok = 0;
- p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
+ p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
+
+ discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
+ DMA_P_INDEX_DISCARD_CNT_MASK;
+ if (discards > ring->old_discards) {
+ discards = discards - ring->old_discards;
+ dev->stats.rx_missed_errors += discards;
+ dev->stats.rx_errors += discards;
+ ring->old_discards += discards;
+
+ /* Clear HW register when we reach 75% of maximum 0xFFFF */
+ if (ring->old_discards >= 0xC000) {
+ ring->old_discards = 0;
+ bcmgenet_rdma_ring_writel(priv, ring->index, 0,
+ RDMA_PROD_INDEX);
+ }
+ }
+
p_index &= DMA_P_INDEX_MASK;
- if (p_index < priv->rx_c_index)
- rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
- priv->rx_c_index + p_index;
+ if (likely(p_index >= ring->c_index))
+ rxpkttoprocess = p_index - ring->c_index;
else
- rxpkttoprocess = p_index - priv->rx_c_index;
+ rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
+ p_index;
netif_dbg(priv, rx_status, dev,
"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
while ((rxpktprocessed < rxpkttoprocess) &&
(rxpktprocessed < budget)) {
- cb = &priv->rx_cbs[priv->rx_read_ptr];
- skb = cb->skb;
+ cb = &priv->rx_cbs[ring->read_ptr];
+ skb = bcmgenet_rx_refill(priv, cb);
- /* We do not have a backing SKB, so we do not have a
- * corresponding DMA mapping for this incoming packet since
- * bcmgenet_rx_refill always either has both skb and mapping or
- * none.
- */
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
dev->stats.rx_errors++;
- goto refill;
+ goto next;
}
- /* Unmap the packet contents such that we can use the
- * RSV from the 64 bytes descriptor when enabled and save
- * a 32-bits register read
- */
- dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
- priv->rx_buf_len, DMA_FROM_DEVICE);
-
if (!priv->desc_64b_en) {
dma_length_status =
- dmadesc_get_length_status(priv,
- priv->rx_bds +
- (priv->rx_read_ptr *
- DMA_DESC_SIZE));
+ dmadesc_get_length_status(priv, cb->bd_addr);
} else {
struct status_64 *status;
@@ -1451,18 +1487,18 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
netif_dbg(priv, rx_status, dev,
"%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
- __func__, p_index, priv->rx_c_index,
- priv->rx_read_ptr, dma_length_status);
+ __func__, p_index, ring->c_index,
+ ring->read_ptr, dma_length_status);
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
dev->stats.rx_dropped++;
dev->stats.rx_errors++;
- dev_kfree_skb_any(cb->skb);
- cb->skb = NULL;
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
}
+
/* report errors */
if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
DMA_RX_OV |
@@ -1481,11 +1517,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
dev->stats.rx_length_errors++;
dev->stats.rx_dropped++;
dev->stats.rx_errors++;
-
- /* discard the packet and advance consumer index.*/
- dev_kfree_skb_any(cb->skb);
- cb->skb = NULL;
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
} /* error packet */
chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
@@ -1517,47 +1550,61 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
dev->stats.multicast++;
/* Notify kernel */
- napi_gro_receive(&priv->napi, skb);
- cb->skb = NULL;
+ napi_gro_receive(&ring->napi, skb);
netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
- /* refill RX path on the current control block */
-refill:
- err = bcmgenet_rx_refill(priv, cb);
- if (err) {
- priv->mib.alloc_rx_buff_failed++;
- netif_err(priv, rx_err, dev, "Rx refill failed\n");
- }
-
+next:
rxpktprocessed++;
- priv->rx_read_ptr++;
- priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+ if (likely(ring->read_ptr < ring->end_ptr))
+ ring->read_ptr++;
+ else
+ ring->read_ptr = ring->cb_ptr;
+
+ ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
+ bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
}
return rxpktprocessed;
}
+/* Rx NAPI polling method */
+static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmgenet_rx_ring *ring = container_of(napi,
+ struct bcmgenet_rx_ring, napi);
+ unsigned int work_done;
+
+ work_done = bcmgenet_desc_rx(ring, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ ring->int_enable(ring);
+ }
+
+ return work_done;
+}
+
/* Assign skb to RX DMA descriptor. */
-static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
+static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
+ struct bcmgenet_rx_ring *ring)
{
struct enet_cb *cb;
- int ret = 0;
+ struct sk_buff *skb;
int i;
- netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
+ netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
/* loop here for each buffer needing assign */
- for (i = 0; i < priv->num_rx_bds; i++) {
- cb = &priv->rx_cbs[priv->rx_bd_assign_index];
- if (cb->skb)
- continue;
-
- ret = bcmgenet_rx_refill(priv, cb);
- if (ret)
- break;
+ for (i = 0; i < ring->size; i++) {
+ cb = ring->cbs + i;
+ skb = bcmgenet_rx_refill(priv, cb);
+ if (skb)
+ dev_kfree_skb_any(skb);
+ if (!cb->skb)
+ return -ENOMEM;
}
- return ret;
+ return 0;
}
static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
@@ -1645,8 +1692,10 @@ static int init_umac(struct bcmgenet_priv *priv)
{
struct device *kdev = &priv->pdev->dev;
int ret;
- u32 reg, cpu_mask_clear;
- int index;
+ u32 reg;
+ u32 int0_enable = 0;
+ u32 int1_enable = 0;
+ int i;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1673,16 +1722,21 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
+ /* Enable Rx default queue 16 interrupts */
+ int0_enable |= UMAC_IRQ_RXDMA_DONE;
- dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+ /* Enable Tx default queue 16 interrupts */
+ int0_enable |= UMAC_IRQ_TXDMA_DONE;
/* Monitor cable plug/unplugged event for internal PHY */
if (phy_is_internal(priv->phydev)) {
- cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->ext_phy) {
- cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ int0_enable |= UMAC_IRQ_LINK_EVENT;
+
reg = bcmgenet_bp_mc_get(priv);
reg |= BIT(priv->hw_params->bp_in_en_shift);
@@ -1696,13 +1750,18 @@ static int init_umac(struct bcmgenet_priv *priv)
/* Enable MDIO interrupts on GENET v3+ */
if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
- cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
+ int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
- bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+ /* Enable Rx priority queue interrupts */
+ for (i = 0; i < priv->hw_params->rx_queues; ++i)
+ int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
- for (index = 0; index < priv->hw_params->tx_queues; index++)
- bcmgenet_intrl2_1_writel(priv, (1 << index),
- INTRL2_CPU_MASK_CLEAR);
+ /* Enable Tx priority queue interrupts */
+ for (i = 0; i < priv->hw_params->tx_queues; ++i)
+ int1_enable |= (1 << i);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
/* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
@@ -1710,21 +1769,17 @@ static int init_umac(struct bcmgenet_priv *priv)
return 0;
}
-/* Initialize all house-keeping variables for a TX ring, along
- * with corresponding hardware registers
- */
+/* Initialize a Tx ring along with corresponding hardware registers */
static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
unsigned int index, unsigned int size,
- unsigned int write_ptr, unsigned int end_ptr)
+ unsigned int start_ptr, unsigned int end_ptr)
{
struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
u32 words_per_bd = WORDS_PER_BD(priv);
u32 flow_period_val = 0;
- unsigned int first_bd;
spin_lock_init(&ring->lock);
ring->priv = priv;
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
ring->index = index;
if (index == DESC_INDEX) {
ring->queue = 0;
@@ -1735,12 +1790,13 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
ring->int_enable = bcmgenet_tx_ring_int_enable;
ring->int_disable = bcmgenet_tx_ring_int_disable;
}
- ring->cbs = priv->tx_cbs + write_ptr;
+ ring->cbs = priv->tx_cbs + start_ptr;
ring->size = size;
+ ring->clean_ptr = start_ptr;
ring->c_index = 0;
ring->free_bds = size;
- ring->write_ptr = write_ptr;
- ring->cb_ptr = write_ptr;
+ ring->write_ptr = start_ptr;
+ ring->cb_ptr = start_ptr;
ring->end_ptr = end_ptr - 1;
ring->prod_index = 0;
@@ -1754,149 +1810,319 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
/* Disable rate control for now */
bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
TDMA_FLOW_PERIOD);
- /* Unclassified traffic goes to ring 16 */
bcmgenet_tdma_ring_writel(priv, index,
((size << DMA_RING_SIZE_SHIFT) |
RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
- first_bd = write_ptr;
-
/* Set start and end address, read and write pointers */
- bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
DMA_START_ADDR);
- bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
TDMA_READ_PTR);
- bcmgenet_tdma_ring_writel(priv, index, first_bd,
+ bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
TDMA_WRITE_PTR);
bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
DMA_END_ADDR);
-
- napi_enable(&ring->napi);
-}
-
-static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
- unsigned int index)
-{
- struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
-
- napi_disable(&ring->napi);
- netif_napi_del(&ring->napi);
}
/* Initialize a RDMA ring */
static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
- unsigned int index, unsigned int size)
+ unsigned int index, unsigned int size,
+ unsigned int start_ptr, unsigned int end_ptr)
{
+ struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
u32 words_per_bd = WORDS_PER_BD(priv);
int ret;
- priv->num_rx_bds = TOTAL_DESC;
- priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
- priv->rx_bd_assign_ptr = priv->rx_bds;
- priv->rx_bd_assign_index = 0;
- priv->rx_c_index = 0;
- priv->rx_read_ptr = 0;
- priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
- GFP_KERNEL);
- if (!priv->rx_cbs)
- return -ENOMEM;
+ ring->priv = priv;
+ ring->index = index;
+ if (index == DESC_INDEX) {
+ ring->int_enable = bcmgenet_rx_ring16_int_enable;
+ ring->int_disable = bcmgenet_rx_ring16_int_disable;
+ } else {
+ ring->int_enable = bcmgenet_rx_ring_int_enable;
+ ring->int_disable = bcmgenet_rx_ring_int_disable;
+ }
+ ring->cbs = priv->rx_cbs + start_ptr;
+ ring->size = size;
+ ring->c_index = 0;
+ ring->read_ptr = start_ptr;
+ ring->cb_ptr = start_ptr;
+ ring->end_ptr = end_ptr - 1;
- ret = bcmgenet_alloc_rx_buffers(priv);
- if (ret) {
- kfree(priv->rx_cbs);
+ ret = bcmgenet_alloc_rx_buffers(priv, ring);
+ if (ret)
return ret;
- }
- bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
+ bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
bcmgenet_rdma_ring_writel(priv, index,
((size << DMA_RING_SIZE_SHIFT) |
RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
- bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
- bcmgenet_rdma_ring_writel(priv, index,
- words_per_bd * size - 1, DMA_END_ADDR);
bcmgenet_rdma_ring_writel(priv, index,
(DMA_FC_THRESH_LO <<
DMA_XOFF_THRESHOLD_SHIFT) |
DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
- bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
+
+ /* Set start and end address, read and write pointers */
+ bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
+ DMA_START_ADDR);
+ bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
+ RDMA_READ_PTR);
+ bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
+ RDMA_WRITE_PTR);
+ bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+ DMA_END_ADDR);
return ret;
}
-/* init multi xmit queues, only available for GENET2+
- * the queue is partitioned as follows:
+static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
+{
+ unsigned int i;
+ struct bcmgenet_tx_ring *ring;
+
+ for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ ring = &priv->tx_rings[i];
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
+ }
+
+ ring = &priv->tx_rings[DESC_INDEX];
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
+}
+
+static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
+{
+ unsigned int i;
+ struct bcmgenet_tx_ring *ring;
+
+ for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ ring = &priv->tx_rings[i];
+ napi_enable(&ring->napi);
+ }
+
+ ring = &priv->tx_rings[DESC_INDEX];
+ napi_enable(&ring->napi);
+}
+
+static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
+{
+ unsigned int i;
+ struct bcmgenet_tx_ring *ring;
+
+ for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ ring = &priv->tx_rings[i];
+ napi_disable(&ring->napi);
+ }
+
+ ring = &priv->tx_rings[DESC_INDEX];
+ napi_disable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
+{
+ unsigned int i;
+ struct bcmgenet_tx_ring *ring;
+
+ for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ ring = &priv->tx_rings[i];
+ netif_napi_del(&ring->napi);
+ }
+
+ ring = &priv->tx_rings[DESC_INDEX];
+ netif_napi_del(&ring->napi);
+}
+
+/* Initialize Tx queues
*
- * queue 0 - 3 is priority based, each one has 32 descriptors,
+ * Queues 0-3 are priority-based, each one has 32 descriptors,
* with queue 0 being the highest priority queue.
*
- * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
- * descriptors: 256 - (number of tx queues * bds per queues) = 128
- * descriptors.
+ * Queue 16 is the default Tx queue with
+ * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
*
- * The transmit control block pool is then partitioned as following:
- * - tx_cbs[0...127] are for queue 16
- * - tx_ring_cbs[0] points to tx_cbs[128..159]
- * - tx_ring_cbs[1] points to tx_cbs[160..191]
- * - tx_ring_cbs[2] points to tx_cbs[192..223]
- * - tx_ring_cbs[3] points to tx_cbs[224..255]
+ * The transmit control block pool is then partitioned as follows:
+ * - Tx queue 0 uses tx_cbs[0..31]
+ * - Tx queue 1 uses tx_cbs[32..63]
+ * - Tx queue 2 uses tx_cbs[64..95]
+ * - Tx queue 3 uses tx_cbs[96..127]
+ * - Tx queue 16 uses tx_cbs[128..255]
*/
-static void bcmgenet_init_multiq(struct net_device *dev)
+static void bcmgenet_init_tx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned int i, dma_enable;
- u32 reg, dma_ctrl, ring_cfg = 0;
+ u32 i, dma_enable;
+ u32 dma_ctrl, ring_cfg;
u32 dma_priority[3] = {0, 0, 0};
- if (!netif_is_multiqueue(dev)) {
- netdev_warn(dev, "called with non multi queue aware HW\n");
- return;
- }
-
dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
dma_enable = dma_ctrl & DMA_EN;
dma_ctrl &= ~DMA_EN;
bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+ dma_ctrl = 0;
+ ring_cfg = 0;
+
/* Enable strict priority arbiter mode */
bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
+ /* Initialize Tx priority queues */
for (i = 0; i < priv->hw_params->tx_queues; i++) {
- /* first 64 tx_cbs are reserved for default tx queue
- * (ring 16)
- */
- bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
- i * priv->hw_params->bds_cnt,
- (i + 1) * priv->hw_params->bds_cnt);
-
- /* Configure ring as descriptor ring and setup priority */
- ring_cfg |= 1 << i;
- dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
-
+ bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
+ i * priv->hw_params->tx_bds_per_q,
+ (i + 1) * priv->hw_params->tx_bds_per_q);
+ ring_cfg |= (1 << i);
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
dma_priority[DMA_PRIO_REG_INDEX(i)] |=
((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
}
- /* Set ring 16 priority and program the hardware registers */
+ /* Initialize Tx default queue 16 */
+ bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
+ priv->hw_params->tx_queues *
+ priv->hw_params->tx_bds_per_q,
+ TOTAL_DESC);
+ ring_cfg |= (1 << DESC_INDEX);
+ dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
DMA_PRIO_REG_SHIFT(DESC_INDEX));
+
+ /* Set Tx queue priorities */
bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
+ /* Initialize Tx NAPI */
+ bcmgenet_init_tx_napi(priv);
+
+ /* Enable Tx queues */
+ bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+
+ /* Enable Tx DMA */
+ if (dma_enable)
+ dma_ctrl |= DMA_EN;
+ bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+}
+
+static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
+{
+ unsigned int i;
+ struct bcmgenet_rx_ring *ring;
+
+ for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ ring = &priv->rx_rings[i];
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
+ }
+
+ ring = &priv->rx_rings[DESC_INDEX];
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
+}
+
+static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
+{
+ unsigned int i;
+ struct bcmgenet_rx_ring *ring;
+
+ for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ ring = &priv->rx_rings[i];
+ napi_enable(&ring->napi);
+ }
+
+ ring = &priv->rx_rings[DESC_INDEX];
+ napi_enable(&ring->napi);
+}
+
+static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
+{
+ unsigned int i;
+ struct bcmgenet_rx_ring *ring;
+
+ for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ ring = &priv->rx_rings[i];
+ napi_disable(&ring->napi);
+ }
+
+ ring = &priv->rx_rings[DESC_INDEX];
+ napi_disable(&ring->napi);
+}
+
+static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
+{
+ unsigned int i;
+ struct bcmgenet_rx_ring *ring;
+
+ for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ ring = &priv->rx_rings[i];
+ netif_napi_del(&ring->napi);
+ }
+
+ ring = &priv->rx_rings[DESC_INDEX];
+ netif_napi_del(&ring->napi);
+}
+
+/* Initialize Rx queues
+ *
+ * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
+ * used to direct traffic to these queues.
+ *
+ * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ */
+static int bcmgenet_init_rx_queues(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 i;
+ u32 dma_enable;
+ u32 dma_ctrl;
+ u32 ring_cfg;
+ int ret;
+
+ dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ dma_enable = dma_ctrl & DMA_EN;
+ dma_ctrl &= ~DMA_EN;
+ bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+ dma_ctrl = 0;
+ ring_cfg = 0;
+
+ /* Initialize Rx priority queues */
+ for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ ret = bcmgenet_init_rx_ring(priv, i,
+ priv->hw_params->rx_bds_per_q,
+ i * priv->hw_params->rx_bds_per_q,
+ (i + 1) *
+ priv->hw_params->rx_bds_per_q);
+ if (ret)
+ return ret;
+
+ ring_cfg |= (1 << i);
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ }
+
+ /* Initialize Rx default queue 16 */
+ ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
+ priv->hw_params->rx_queues *
+ priv->hw_params->rx_bds_per_q,
+ TOTAL_DESC);
+ if (ret)
+ return ret;
+
+ ring_cfg |= (1 << DESC_INDEX);
+ dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
+
+ /* Initialize Rx NAPI */
+ bcmgenet_init_rx_napi(priv);
+
/* Enable rings */
- reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
- reg |= ring_cfg;
- bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
+ bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
/* Configure ring as descriptor ring and re-enable DMA if enabled */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
if (dma_enable)
- reg |= DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ dma_ctrl |= DMA_EN;
+ bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+ return 0;
}
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
@@ -1950,10 +2176,13 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
return ret;
}
-static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
+ bcmgenet_fini_rx_napi(priv);
+ bcmgenet_fini_tx_napi(priv);
+
/* disable DMA */
bcmgenet_dma_teardown(priv);
@@ -1969,37 +2198,27 @@ static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
kfree(priv->tx_cbs);
}
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
-{
- int i;
-
- bcmgenet_fini_tx_ring(priv, DESC_INDEX);
-
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- bcmgenet_fini_tx_ring(priv, i);
-
- __bcmgenet_fini_dma(priv);
-}
-
/* init_edma: Initialize DMA control register */
static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
{
int ret;
+ unsigned int i;
+ struct enet_cb *cb;
- netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
-
- /* by default, enable ring 16 (descriptor based) */
- ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
- if (ret) {
- netdev_err(priv->dev, "failed to initialize RX ring\n");
- return ret;
- }
+ netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
- /* init rDma */
- bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+ /* Initialize common Rx ring structures */
+ priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
+ priv->num_rx_bds = TOTAL_DESC;
+ priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->rx_cbs)
+ return -ENOMEM;
- /* Init tDma */
- bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = priv->rx_cbs + i;
+ cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
+ }
/* Initialize common TX ring structures */
priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
@@ -2007,43 +2226,35 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
GFP_KERNEL);
if (!priv->tx_cbs) {
- __bcmgenet_fini_dma(priv);
+ kfree(priv->rx_cbs);
return -ENOMEM;
}
- /* initialize multi xmit queue */
- bcmgenet_init_multiq(priv->dev);
-
- /* initialize special ring 16 */
- bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
- priv->hw_params->tx_queues *
- priv->hw_params->bds_cnt,
- TOTAL_DESC);
+ for (i = 0; i < priv->num_tx_bds; i++) {
+ cb = priv->tx_cbs + i;
+ cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
+ }
- return 0;
-}
+ /* Init rDma */
+ bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
-/* NAPI polling method*/
-static int bcmgenet_poll(struct napi_struct *napi, int budget)
-{
- struct bcmgenet_priv *priv = container_of(napi,
- struct bcmgenet_priv, napi);
- unsigned int work_done;
+ /* Initialize Rx queues */
+ ret = bcmgenet_init_rx_queues(priv->dev);
+ if (ret) {
+ netdev_err(priv->dev, "failed to initialize Rx queues\n");
+ bcmgenet_free_rx_buffers(priv);
+ kfree(priv->rx_cbs);
+ kfree(priv->tx_cbs);
+ return ret;
+ }
- work_done = bcmgenet_desc_rx(priv, budget);
+ /* Init tDma */
+ bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
- /* Advancing our consumer index*/
- priv->rx_c_index += work_done;
- priv->rx_c_index &= DMA_C_INDEX_MASK;
- bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
- priv->rx_c_index, RDMA_CONS_INDEX);
- if (work_done < budget) {
- napi_complete(napi);
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
- INTRL2_CPU_MASK_CLEAR);
- }
+ /* Initialize Tx queues */
+ bcmgenet_init_tx_queues(priv->dev);
- return work_done;
+ return 0;
}
/* Interrupt bottom half */
@@ -2063,87 +2274,100 @@ static void bcmgenet_irq_task(struct work_struct *work)
/* Link UP/DOWN event */
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+ (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
phy_mac_interrupt(priv->phydev,
- priv->irq0_stat & UMAC_IRQ_LINK_UP);
- priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
+ !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
+ priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
}
}
-/* bcmgenet_isr1: interrupt handler for ring buffer. */
+/* bcmgenet_isr1: handle Rx and Tx priority queues */
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
- struct bcmgenet_tx_ring *ring;
+ struct bcmgenet_rx_ring *rx_ring;
+ struct bcmgenet_tx_ring *tx_ring;
unsigned int index;
/* Save irq status for bottom-half processing. */
priv->irq1_stat =
bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+
/* clear interrupts */
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
- /* Check the MBDONE interrupts.
- * packet is done, reclaim descriptors
- */
+ /* Check Rx priority queue interrupts */
+ for (index = 0; index < priv->hw_params->rx_queues; index++) {
+ if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+ continue;
+
+ rx_ring = &priv->rx_rings[index];
+
+ if (likely(napi_schedule_prep(&rx_ring->napi))) {
+ rx_ring->int_disable(rx_ring);
+ __napi_schedule(&rx_ring->napi);
+ }
+ }
+
+ /* Check Tx priority queue interrupts */
for (index = 0; index < priv->hw_params->tx_queues; index++) {
if (!(priv->irq1_stat & BIT(index)))
continue;
- ring = &priv->tx_rings[index];
+ tx_ring = &priv->tx_rings[index];
- if (likely(napi_schedule_prep(&ring->napi))) {
- ring->int_disable(priv, ring);
- __napi_schedule(&ring->napi);
+ if (likely(napi_schedule_prep(&tx_ring->napi))) {
+ tx_ring->int_disable(tx_ring);
+ __napi_schedule(&tx_ring->napi);
}
}
return IRQ_HANDLED;
}
-/* bcmgenet_isr0: Handle various interrupts. */
+/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
+ struct bcmgenet_rx_ring *rx_ring;
+ struct bcmgenet_tx_ring *tx_ring;
/* Save irq status for bottom-half processing. */
priv->irq0_stat =
bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+
/* clear interrupts */
bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
"IRQ=0x%x\n", priv->irq0_stat);
- if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
- /* We use NAPI(software interrupt throttling, if
- * Rx Descriptor throttling is not used.
- * Disable interrupt, will be enabled in the poll method.
- */
- if (likely(napi_schedule_prep(&priv->napi))) {
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
- INTRL2_CPU_MASK_SET);
- __napi_schedule(&priv->napi);
+ if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+ rx_ring = &priv->rx_rings[DESC_INDEX];
+
+ if (likely(napi_schedule_prep(&rx_ring->napi))) {
+ rx_ring->int_disable(rx_ring);
+ __napi_schedule(&rx_ring->napi);
}
}
- if (priv->irq0_stat &
- (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
- struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
- if (likely(napi_schedule_prep(&ring->napi))) {
- ring->int_disable(priv, ring);
- __napi_schedule(&ring->napi);
+ if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+ tx_ring = &priv->tx_rings[DESC_INDEX];
+
+ if (likely(napi_schedule_prep(&tx_ring->napi))) {
+ tx_ring->int_disable(tx_ring);
+ __napi_schedule(&tx_ring->napi);
}
}
+
if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
UMAC_IRQ_PHY_DET_F |
- UMAC_IRQ_LINK_UP |
- UMAC_IRQ_LINK_DOWN |
+ UMAC_IRQ_LINK_EVENT |
UMAC_IRQ_HFB_SM |
UMAC_IRQ_HFB_MM |
UMAC_IRQ_MPD_R)) {
@@ -2227,18 +2451,170 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
+static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
+ u32 f_index)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ return !!(reg & (1 << (f_index % 32)));
+}
+
+static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+}
+
+static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
+ u32 f_index, u32 rx_queue)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = f_index / 8;
+ reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
+ reg &= ~(0xF << (4 * (f_index % 8)));
+ reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
+ bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
+}
+
+static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
+ u32 f_index, u32 f_length)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_LEN_V3PLUS +
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
+ sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg &= ~(0xFF << (8 * (f_index % 4)));
+ reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+}
+
+static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
+{
+ u32 f_index;
+
+ for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
+ if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
+ return f_index;
+
+ return -ENOMEM;
+}
+
+/* bcmgenet_hfb_add_filter
+ *
+ * Add new filter to Hardware Filter Block to match and direct Rx traffic to
+ * desired Rx queue.
+ *
+ * f_data is an array of unsigned 32-bit integers where each 32-bit integer
+ * provides filter data for 2 bytes (4 nibbles) of Rx frame:
+ *
+ * bits 31:20 - unused
+ * bit 19 - nibble 0 match enable
+ * bit 18 - nibble 1 match enable
+ * bit 17 - nibble 2 match enable
+ * bit 16 - nibble 3 match enable
+ * bits 15:12 - nibble 0 data
+ * bits 11:8 - nibble 1 data
+ * bits 7:4 - nibble 2 data
+ * bits 3:0 - nibble 3 data
+ *
+ * Example:
+ * In order to match:
+ * - Ethernet frame type = 0x0800 (IP)
+ * - IP version field = 4
+ * - IP protocol field = 0x11 (UDP)
+ *
+ * The following filter is needed:
+ * u32 hfb_filter_ipv4_udp[] = {
+ * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
+ * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
+ * };
+ *
+ * To add the filter to HFB and direct the traffic to Rx queue 0, call:
+ * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
+ * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
+ */
+int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
+ u32 f_length, u32 rx_queue)
+{
+ int f_index;
+ u32 i;
+
+ f_index = bcmgenet_hfb_find_unused_filter(priv);
+ if (f_index < 0)
+ return -ENOMEM;
+
+ if (f_length > priv->hw_params->hfb_filter_size)
+ return -EINVAL;
+
+ for (i = 0; i < f_length; i++)
+ bcmgenet_hfb_writel(priv, f_data[i],
+ (f_index * priv->hw_params->hfb_filter_size + i) *
+ sizeof(u32));
+
+ bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
+ bcmgenet_hfb_enable_filter(priv, f_index);
+ bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
+
+ return 0;
+}
+
+/* bcmgenet_hfb_clear
+ *
+ * Clear Hardware Filter Block and disable all filtering.
+ */
+static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
+{
+ u32 i;
+
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
+
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0x0, i);
+
+ for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
+ bcmgenet_hfb_reg_writel(priv, 0x0,
+ HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+
+ for (i = 0; i < priv->hw_params->hfb_filter_cnt *
+ priv->hw_params->hfb_filter_size; i++)
+ bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
+}
+
+static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
+ bcmgenet_hfb_clear(priv);
+}
+
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
- napi_enable(&priv->napi);
+ bcmgenet_enable_rx_napi(priv);
+ bcmgenet_enable_tx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
- if (phy_is_internal(priv->phydev))
- bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
netif_tx_start_all_queues(dev);
phy_start(priv->phydev);
@@ -2257,6 +2633,12 @@ static int bcmgenet_open(struct net_device *dev)
if (!IS_ERR(priv->clk))
clk_prepare_enable(priv->clk);
+ /* If this is an internal GPHY, power it back on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
/* take MAC out of reset */
bcmgenet_umac_reset(priv);
@@ -2286,12 +2668,15 @@ static int bcmgenet_open(struct net_device *dev)
ret = bcmgenet_init_dma(priv);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
- goto err_fini_dma;
+ goto err_clk_disable;
}
/* Always enable ring 16 - descriptor ring */
bcmgenet_enable_dma(priv, dma_ctrl);
+ /* HFB init */
+ bcmgenet_hfb_init(priv);
+
ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
dev->name, priv);
if (ret < 0) {
@@ -2331,10 +2716,10 @@ static void bcmgenet_netif_stop(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
netif_tx_stop_all_queues(dev);
- napi_disable(&priv->napi);
phy_stop(priv->phydev);
-
bcmgenet_intr_disable(priv);
+ bcmgenet_disable_rx_napi(priv);
+ bcmgenet_disable_tx_napi(priv);
/* Wait for pending work items to complete. Since interrupts are
* disabled no new work will be scheduled.
@@ -2377,12 +2762,12 @@ static int bcmgenet_close(struct net_device *dev)
free_irq(priv->irq1, priv);
if (phy_is_internal(priv->phydev))
- bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+ ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
- return 0;
+ return ret;
}
static void bcmgenet_timeout(struct net_device *dev)
@@ -2499,8 +2884,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
[GENET_V1] = {
.tx_queues = 0,
+ .tx_bds_per_q = 0,
.rx_queues = 0,
- .bds_cnt = 0,
+ .rx_bds_per_q = 0,
.bp_in_en_shift = 16,
.bp_in_mask = 0xffff,
.hfb_filter_cnt = 16,
@@ -2512,8 +2898,9 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
},
[GENET_V2] = {
.tx_queues = 4,
- .rx_queues = 4,
- .bds_cnt = 32,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
.bp_in_en_shift = 16,
.bp_in_mask = 0xffff,
.hfb_filter_cnt = 16,
@@ -2528,11 +2915,13 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
},
[GENET_V3] = {
.tx_queues = 4,
- .rx_queues = 4,
- .bds_cnt = 32,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
.bp_in_en_shift = 17,
.bp_in_mask = 0x1ffff,
.hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
.qtag_mask = 0x3F,
.tbuf_offset = 0x0600,
.hfb_offset = 0x8000,
@@ -2540,15 +2929,18 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
.rdma_offset = 0x10000,
.tdma_offset = 0x11000,
.words_per_bd = 2,
- .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+ GENET_HAS_MOCA_LINK_DET,
},
[GENET_V4] = {
.tx_queues = 4,
- .rx_queues = 4,
- .bds_cnt = 32,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
.bp_in_en_shift = 17,
.bp_in_mask = 0x1ffff,
.hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
.qtag_mask = 0x3F,
.tbuf_offset = 0x0600,
.hfb_offset = 0x8000,
@@ -2556,7 +2948,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
.rdma_offset = 0x2000,
.tdma_offset = 0x4000,
.words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
},
};
@@ -2645,14 +3038,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
#endif
pr_debug("Configuration for version: %d\n"
- "TXq: %1d, RXq: %1d, BDs: %1d\n"
+ "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
"BP << en: %2d, BP msk: 0x%05x\n"
"HFB count: %2d, QTAQ msk: 0x%05x\n"
"TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
"RDMA: 0x%05x, TDMA: 0x%05x\n"
"Words/BD: %d\n",
priv->version,
- params->tx_queues, params->rx_queues, params->bds_cnt,
+ params->tx_queues, params->tx_bds_per_q,
+ params->rx_queues, params->rx_bds_per_q,
params->bp_in_en_shift, params->bp_in_mask,
params->hfb_filter_cnt, params->qtag_mask,
params->tbuf_offset, params->hfb_offset,
@@ -2680,8 +3074,9 @@ static int bcmgenet_probe(struct platform_device *pdev)
struct resource *r;
int err = -EIO;
- /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
- dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
+ /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
+ dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
+ GENET_MAX_MQ_CNT + 1);
if (!dev) {
dev_err(&pdev->dev, "can't allocate net device\n");
return -ENOMEM;
@@ -2727,7 +3122,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev->watchdog_timeo = 2 * HZ;
dev->ethtool_ops = &bcmgenet_ethtool_ops;
dev->netdev_ops = &bcmgenet_netdev_ops;
- netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
@@ -2860,14 +3254,16 @@ static int bcmgenet_suspend(struct device *d)
/* Prepare the device for Wake-on-LAN and switch to the slow clock */
if (device_may_wakeup(d) && priv->wolopts) {
- bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+ ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
clk_prepare_enable(priv->clk_wol);
+ } else if (phy_is_internal(priv->phydev)) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
}
/* Turn off the clocks */
clk_disable_unprepare(priv->clk);
- return 0;
+ return ret;
}
static int bcmgenet_resume(struct device *d)
@@ -2886,6 +3282,12 @@ static int bcmgenet_resume(struct device *d)
if (ret)
return ret;
+ /* If this is an internal GPHY, power it back on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
bcmgenet_umac_reset(priv);
ret = init_umac(priv);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0d370d168aee..6f2887a5e0be 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -293,6 +293,7 @@ struct bcmgenet_mib_counters {
#define UMAC_IRQ_PHY_DET_F (1 << 3)
#define UMAC_IRQ_LINK_UP (1 << 4)
#define UMAC_IRQ_LINK_DOWN (1 << 5)
+#define UMAC_IRQ_LINK_EVENT (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN)
#define UMAC_IRQ_UMAC (1 << 6)
#define UMAC_IRQ_UMAC_TSV (1 << 7)
#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8)
@@ -303,13 +304,22 @@ struct bcmgenet_mib_counters {
#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
+#define UMAC_IRQ_RXDMA_DONE (UMAC_IRQ_RXDMA_PDONE | \
+ UMAC_IRQ_RXDMA_BDONE)
#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
+#define UMAC_IRQ_TXDMA_DONE (UMAC_IRQ_TXDMA_PDONE | \
+ UMAC_IRQ_TXDMA_BDONE)
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+/* INTRL2 instance 1 definitions */
+#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF
+#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF
+#define UMAC_IRQ1_RX_INTR_SHIFT 16
+
/* Register block offsets */
#define GENET_SYS_OFF 0x0000
#define GENET_GR_BRIDGE_OFF 0x0040
@@ -354,6 +364,7 @@ struct bcmgenet_mib_counters {
#define EXT_GPHY_CTRL 0x1C
#define EXT_CFG_IDDQ_BIAS (1 << 0)
#define EXT_CFG_PWR_DOWN (1 << 1)
+#define EXT_CK25_DIS (1 << 4)
#define EXT_GPHY_RESET (1 << 5)
/* DMA rings size */
@@ -497,17 +508,20 @@ enum bcmgenet_version {
#define GENET_HAS_40BITS (1 << 0)
#define GENET_HAS_EXT (1 << 1)
#define GENET_HAS_MDIO_INTR (1 << 2)
+#define GENET_HAS_MOCA_LINK_DET (1 << 3)
/* BCMGENET hardware parameters, keep this structure nicely aligned
* since it is going to be used in hot paths
*/
struct bcmgenet_hw_params {
u8 tx_queues;
+ u8 tx_bds_per_q;
u8 rx_queues;
- u8 bds_cnt;
+ u8 rx_bds_per_q;
u8 bp_in_en_shift;
u32 bp_in_mask;
u8 hfb_filter_cnt;
+ u8 hfb_filter_size;
u8 qtag_mask;
u16 tbuf_offset;
u32 hfb_offset;
@@ -525,16 +539,30 @@ struct bcmgenet_tx_ring {
unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
+ unsigned int clean_ptr; /* Tx ring clean pointer */
unsigned int c_index; /* last consumer index of each ring*/
unsigned int free_bds; /* # of free bds for each ring */
unsigned int write_ptr; /* Tx ring write pointer SW copy */
unsigned int prod_index; /* Tx ring producer index SW copy */
unsigned int cb_ptr; /* Tx ring initial CB ptr */
unsigned int end_ptr; /* Tx ring end CB ptr */
- void (*int_enable)(struct bcmgenet_priv *priv,
- struct bcmgenet_tx_ring *);
- void (*int_disable)(struct bcmgenet_priv *priv,
- struct bcmgenet_tx_ring *);
+ void (*int_enable)(struct bcmgenet_tx_ring *);
+ void (*int_disable)(struct bcmgenet_tx_ring *);
+ struct bcmgenet_priv *priv;
+};
+
+struct bcmgenet_rx_ring {
+ struct napi_struct napi; /* Rx NAPI struct */
+ unsigned int index; /* Rx ring index */
+ struct enet_cb *cbs; /* Rx ring buffer control block */
+ unsigned int size; /* Rx ring size */
+ unsigned int c_index; /* Rx last consumer index */
+ unsigned int read_ptr; /* Rx ring read pointer */
+ unsigned int cb_ptr; /* Rx ring initial CB ptr */
+ unsigned int end_ptr; /* Rx ring end CB ptr */
+ unsigned int old_discards;
+ void (*int_enable)(struct bcmgenet_rx_ring *);
+ void (*int_disable)(struct bcmgenet_rx_ring *);
struct bcmgenet_priv *priv;
};
@@ -543,11 +571,6 @@ struct bcmgenet_priv {
void __iomem *base;
enum bcmgenet_version version;
struct net_device *dev;
- u32 int0_mask;
- u32 int1_mask;
-
- /* NAPI for descriptor based rx */
- struct napi_struct napi ____cacheline_aligned;
/* transmit variables */
void __iomem *tx_bds;
@@ -558,13 +581,11 @@ struct bcmgenet_priv {
/* receive variables */
void __iomem *rx_bds;
- void __iomem *rx_bd_assign_ptr;
- int rx_bd_assign_index;
struct enet_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_buf_len;
- unsigned int rx_read_ptr;
- unsigned int rx_c_index;
+
+ struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
/* other misc variables */
struct bcmgenet_hw_params *hw_params;
@@ -651,6 +672,7 @@ int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_mii_reset(struct net_device *dev);
+void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
/* Wake-on-LAN routines */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 446889cc3c6a..e7651b3c6c57 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -168,7 +168,7 @@ void bcmgenet_mii_reset(struct net_device *dev)
}
}
-static void bcmgenet_ephy_power_up(struct net_device *dev)
+void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg = 0;
@@ -178,14 +178,25 @@ static void bcmgenet_ephy_power_up(struct net_device *dev)
return;
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
- reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
- reg |= EXT_GPHY_RESET;
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- mdelay(2);
+ if (enable) {
+ reg &= ~EXT_CK25_DIS;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
- reg &= ~EXT_GPHY_RESET;
+ reg &= ~EXT_GPHY_RESET;
+ } else {
+ reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+ reg |= EXT_CK25_DIS;
+ }
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- udelay(20);
+ udelay(60);
}
static void bcmgenet_internal_phy_setup(struct net_device *dev)
@@ -193,8 +204,8 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg;
- /* Power up EPHY */
- bcmgenet_ephy_power_up(dev);
+ /* Power up PHY */
+ bcmgenet_phy_power_set(dev, true);
/* enable APD */
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_PWR_DN_EN_LD;
@@ -451,6 +462,15 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
return 0;
}
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+ struct fixed_phy_status *status)
+{
+ if (dev && dev->phydev && status)
+ status->link = dev->phydev->link;
+
+ return 0;
+}
+
static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
{
struct device *kdev = &priv->pdev->dev;
@@ -502,6 +522,13 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
dev_err(kdev, "failed to register fixed PHY device\n");
return -ENODEV;
}
+
+ if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
+ ret = fixed_phy_set_link_update(
+ phydev, bcmgenet_fixed_phy_link_update);
+ if (!ret)
+ phydev->link = 0;
+ }
}
priv->phydev = phydev;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 23a019cee279..069952fa5d64 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6217,10 +6217,9 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
- u32 remainder;
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
tg3_full_lock(tp, 0);
@@ -6228,19 +6227,18 @@ static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
ns += tp->ptp_adjust;
tg3_full_unlock(tp);
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
static int tg3_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
u64 ns;
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
- ns = timespec_to_ns(ts);
+ ns = timespec64_to_ns(ts);
tg3_full_lock(tp, 0);
tg3_refclk_write(tp, ns);
@@ -6320,8 +6318,8 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
- .gettime = tg3_ptp_gettime,
- .settime = tg3_ptp_settime,
+ .gettime64 = tg3_ptp_gettime,
+ .settime64 = tg3_ptp_settime,
.enable = tg3_ptp_enable,
};
@@ -7244,7 +7242,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
if (tnapi == &tp->napi[1] && tp->rx_refill)
continue;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Reenable interrupts. */
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -7337,7 +7335,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
sblk->status &= ~SD_STATUS_UPDATED;
if (likely(!tg3_has_work(tnapi))) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
tg3_int_reenable(tnapi);
break;
}
@@ -18131,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- tp->pcierr_recovery = true;
+ /* We needn't recover from permanent error */
+ if (state == pci_channel_io_frozen)
+ tp->pcierr_recovery = true;
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig
index 264155778857..4e8c0b6c57d0 100644
--- a/drivers/net/ethernet/brocade/Kconfig
+++ b/drivers/net/ethernet/brocade/Kconfig
@@ -1,9 +1,9 @@
#
-# Brocade device configuration
+# QLogic BR-series device configuration
#
config NET_VENDOR_BROCADE
- bool "Brocade devices"
+ bool "QLogic BR-series devices"
default y
depends on PCI
---help---
@@ -13,8 +13,8 @@ config NET_VENDOR_BROCADE
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Brocade cards. If you say Y, you will be asked for
- your specific card in the following questions.
+ the questions about QLogic BR-series cards. If you say Y, you will be
+ asked for your specific card in the following questions.
if NET_VENDOR_BROCADE
diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile
index b58238d2df6a..fec10f9b4558 100644
--- a/drivers/net/ethernet/brocade/Makefile
+++ b/drivers/net/ethernet/brocade/Makefile
@@ -1,5 +1,5 @@
#
-# Makefile for the Brocade device drivers.
+# Makefile for the QLogic BR-series device drivers.
#
obj-$(CONFIG_BNA) += bna/
diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig
index dc2eb526fbf7..fe01279a8843 100644
--- a/drivers/net/ethernet/brocade/bna/Kconfig
+++ b/drivers/net/ethernet/brocade/bna/Kconfig
@@ -1,17 +1,17 @@
#
-# Brocade network device configuration
+# QLogic BR-series network device configuration
#
config BNA
- tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+ tristate "QLogic BR-series 1010/1020/1860 10Gb Ethernet Driver support"
depends on PCI
---help---
- This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
- cards.
+ This driver supports QLogic BR-series 1010/1020/1860 10Gb CEE capable
+ Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called bna.
- For general information and support, go to the Brocade support
+ For general information and support, go to the QLogic support
website at:
- <http://support.brocade.com>
+ <http://support.qlogic.com>
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 6027302ae73a..6e10b99733a2 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -1,5 +1,6 @@
#
-# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+# Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+# Copyright (c) 2014-2015 QLogic Corporation.
# All rights reserved.
#
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 550d2521ba76..cf9f3956f198 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#include "bfa_cee.h"
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h
index 93fde633d6f3..d04eef5d5a77 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BFA_CEE_H__
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index ad004a4c3897..af25d8e8fae0 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
/* BFA common services */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index b7d8127c198f..3bfd9da92630 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BFA_DEFS_H__
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index b39c5f23974b..a37326d44fbb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BFA_DEFS_CNA_H__
#define __BFA_DEFS_CNA_H__
@@ -134,7 +135,7 @@ struct bfa_cee_lldp_str {
u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
};
-/* LLDP paramters */
+/* LLDP parameters */
struct bfa_cee_lldp_cfg {
struct bfa_cee_lldp_str chassis_id;
struct bfa_cee_lldp_str port_id;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 7fb396fe679d..7a45cd0b594d 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BFA_DEFS_MFG_COMM_H__
#define __BFA_DEFS_MFG_COMM_H__
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
index ea9af9ae754d..a43b56002752 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BFA_DEFS_STATUS_H__
#define __BFA_DEFS_STATUS_H__
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 354ae9792bad..594a2ab36d31 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#include "bfa_ioc.h"
@@ -1339,7 +1340,7 @@ bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
return true;
}
-/* Returns TRUE if major minor and maintainence are same.
+/* Returns TRUE if major minor and maintenance are same.
* If patch version are same, check for MD5 Checksum to be same.
*/
static bool
@@ -2763,7 +2764,7 @@ bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
list_add_tail(&notify->qe, &ioc->notify_q);
}
-#define BFA_MFG_NAME "Brocade"
+#define BFA_MFG_NAME "QLogic"
static void
bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
struct bfa_adapter_attr *ad_attr)
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 20cff7df4b55..effb7156e7a4 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BFA_IOC_H__
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index d639558455cb..2e72445dbb4f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#include "bfa_ioc.h"
@@ -698,7 +699,7 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb)
/*
* Ignore mode and program for the max clock (which is FC16)
- * Firmware/NFC will do the PLL init appropiately
+ * Firmware/NFC will do the PLL init appropriately
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index 55067d0d25cf..c07d5b9372f4 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
/* MSGQ module source file. */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
index a6a565a366dc..66bc8b5acd57 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BFA_MSGQ_H__
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 8c563a77cdf6..2bcde4042268 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BFI_H__
#define __BFI_H__
@@ -158,8 +159,8 @@ enum bfi_asic_gen {
};
enum bfi_asic_mode {
- BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */
- BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */
+ BFI_ASIC_MODE_FC = 1, /* FC up to 8G speed */
+ BFI_ASIC_MODE_FC16 = 2, /* FC up to 16G speed */
BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */
BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */
};
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index 6704a4392973..bd605bee72ee 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BFI_CNA_H__
#define __BFI_CNA_H__
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index ae072dc5d238..bccca3bbadb8 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
/* BNA Hardware and Firmware Interface */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index c49fa312ddbd..2835b51eabec 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,13 +11,14 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
/*
- * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
+ * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs
*/
#ifndef __BFI_REG_H__
@@ -221,7 +222,7 @@ enum {
#define __PMM_1T_RESET_P 0x00000001
#define PMM_1T_RESET_REG_P1 0x00023c1c
-/* Brocade 1860 Adapter specific defines */
+/* QLogic BR-series 1860 Adapter specific defines */
#define CT2_PCI_CPQ_BASE 0x00030000
#define CT2_PCI_APP_BASE 0x00030100
#define CT2_PCI_ETH_BASE 0x00030400
@@ -264,7 +265,7 @@ enum {
#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38)
/*
- * Brocade 1860 adapter CPQ block registers
+ * QLogic BR-series 1860 adapter CPQ block registers
*/
#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00)
#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20)
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 1f512190d696..8ba72b1f36d9 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BNA_H__
#define __BNA_H__
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 903466ef41c0..deb8da6ab9cc 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#include "bna.h"
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 2702d02e98d9..174af0e9d056 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
/* File for interrupt macros and functions */
@@ -362,7 +363,7 @@ struct bna_txq_wi_vector {
/* TxQ Entry Structure
*
- * BEWARE: Load values into this structure with correct endianess.
+ * BEWARE: Load values into this structure with correct endianness.
*/
struct bna_txq_entry {
union {
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 5fac411c52f4..8ab3a5f62706 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#include "bna.h"
#include "bfi.h"
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index 621547cd3504..d0a7a566f5d6 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BNA_TYPES_H__
#define __BNA_TYPES_H__
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7714d7790089..37072a83f9d6 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#include <linux/bitops.h>
#include <linux/netdevice.h>
@@ -3867,7 +3868,7 @@ bnad_module_init(void)
{
int err;
- pr_info("Brocade 10G Ethernet driver - version: %s\n",
+ pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n",
BNAD_VERSION);
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
@@ -3894,7 +3895,7 @@ module_exit(bnad_module_exit);
MODULE_AUTHOR("Brocade");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
MODULE_VERSION(BNAD_VERSION);
MODULE_FIRMWARE(CNA_FW_FILE_CT);
MODULE_FIRMWARE(CNA_FW_FILE_CT2);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 2842c188e0da..7ead6c23edb6 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __BNAD_H__
#define __BNAD_H__
@@ -71,7 +72,7 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.2.23.0"
+#define BNAD_VERSION "3.2.25.1"
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 619083a860a4..72c89550417c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#include <linux/debugfs.h>
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index d26adac6ab99..12f344debd1c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#include "cna.h"
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index b3ff6d507951..28e7d0ffeab1 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2006-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#ifndef __CNA_H__
@@ -37,8 +38,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin"
-#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.5.1.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index 6f72771caea6..ebf462d8082f 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -1,5 +1,5 @@
/*
- * Linux network driver for Brocade Converged Network Adapter.
+ * Linux network driver for QLogic BR-series Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
@@ -11,9 +11,10 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014-2015 QLogic Corporation
* All rights reserved
- * www.brocade.com
+ * www.qlogic.com
*/
#include <linux/firmware.h>
#include "bnad.h"
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 321d2ad235d9..1ba3e3a67389 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
config NET_CADENCE
bool "Cadence devices"
- depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
+ depends on HAS_IOMEM
default y
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -20,17 +20,9 @@ config NET_CADENCE
if NET_CADENCE
-config ARM_AT91_ETHER
- tristate "AT91RM9200 Ethernet support"
- depends on HAS_DMA && (ARCH_AT91 || COMPILE_TEST)
- select MACB
- ---help---
- If you wish to compile a kernel for the AT91RM9200 and enable
- ethernet support, then you should always answer Y to this.
-
config MACB
tristate "Cadence MACB/GEM support"
- depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
+ depends on HAS_DMA
select PHYLIB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
index 9068b8331ed1..91f79b1f0505 100644
--- a/drivers/net/ethernet/cadence/Makefile
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -2,5 +2,4 @@
# Makefile for the Atmel network device drivers.
#
-obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
obj-$(CONFIG_MACB) += macb.o
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
deleted file mode 100644
index 7ef55f5fa664..000000000000
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * Ethernet driver for the Atmel AT91RM9200 (Thunder)
- *
- * Copyright (C) 2003 SAN People (Pty) Ltd
- *
- * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
- * Initial version by Rick Bronson 01/11/2003
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/dma-mapping.h>
-#include <linux/ethtool.h>
-#include <linux/platform_data/macb.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/gfp.h>
-#include <linux/phy.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_net.h>
-
-#include "macb.h"
-
-/* 1518 rounded up */
-#define MAX_RBUFF_SZ 0x600
-/* max number of receive buffers */
-#define MAX_RX_DESCR 9
-
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
-{
- struct macb *lp = netdev_priv(dev);
- dma_addr_t addr;
- u32 ctl;
- int i;
-
- lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
- (MAX_RX_DESCR *
- sizeof(struct macb_dma_desc)),
- &lp->rx_ring_dma, GFP_KERNEL);
- if (!lp->rx_ring)
- return -ENOMEM;
-
- lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * MAX_RBUFF_SZ,
- &lp->rx_buffers_dma, GFP_KERNEL);
- if (!lp->rx_buffers) {
- dma_free_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
- return -ENOMEM;
- }
-
- addr = lp->rx_buffers_dma;
- for (i = 0; i < MAX_RX_DESCR; i++) {
- lp->rx_ring[i].addr = addr;
- lp->rx_ring[i].ctrl = 0;
- addr += MAX_RBUFF_SZ;
- }
-
- /* Set the Wrap bit on the last descriptor */
- lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
-
- /* Reset buffer index */
- lp->rx_tail = 0;
-
- /* Program address of descriptor list in Rx Buffer Queue register */
- macb_writel(lp, RBQP, lp->rx_ring_dma);
-
- /* Enable Receive and Transmit */
- ctl = macb_readl(lp, NCR);
- macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
-
- return 0;
-}
-
-/* Open the ethernet interface */
-static int at91ether_open(struct net_device *dev)
-{
- struct macb *lp = netdev_priv(dev);
- u32 ctl;
- int ret;
-
- /* Clear internal statistics */
- ctl = macb_readl(lp, NCR);
- macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
-
- macb_set_hwaddr(lp);
-
- ret = at91ether_start(dev);
- if (ret)
- return ret;
-
- /* Enable MAC interrupts */
- macb_writel(lp, IER, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
-
- /* schedule a link state check */
- phy_start(lp->phy_dev);
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-/* Close the interface */
-static int at91ether_close(struct net_device *dev)
-{
- struct macb *lp = netdev_priv(dev);
- u32 ctl;
-
- /* Disable Receiver and Transmitter */
- ctl = macb_readl(lp, NCR);
- macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
- /* Disable MAC interrupts */
- macb_writel(lp, IDR, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
-
- netif_stop_queue(dev);
-
- dma_free_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
-
- dma_free_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * MAX_RBUFF_SZ,
- lp->rx_buffers, lp->rx_buffers_dma);
- lp->rx_buffers = NULL;
-
- return 0;
-}
-
-/* Transmit packet */
-static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct macb *lp = netdev_priv(dev);
-
- if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
- netif_stop_queue(dev);
-
- /* Store packet information (to free when Tx completed) */
- lp->skb = skb;
- lp->skb_length = skb->len;
- lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
- DMA_TO_DEVICE);
-
- /* Set address of the data in the Transmit Address register */
- macb_writel(lp, TAR, lp->skb_physaddr);
- /* Set length of the packet in the Transmit Control register */
- macb_writel(lp, TCR, skb->len);
-
- } else {
- netdev_err(dev, "%s called, but device is busy!\n", __func__);
- return NETDEV_TX_BUSY;
- }
-
- return NETDEV_TX_OK;
-}
-
-/* Extract received frame from buffer descriptors and sent to upper layers.
- * (Called from interrupt context)
- */
-static void at91ether_rx(struct net_device *dev)
-{
- struct macb *lp = netdev_priv(dev);
- unsigned char *p_recv;
- struct sk_buff *skb;
- unsigned int pktlen;
-
- while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
- p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
- pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
- skb = netdev_alloc_skb(dev, pktlen + 2);
- if (skb) {
- skb_reserve(skb, 2);
- memcpy(skb_put(skb, pktlen), p_recv, pktlen);
-
- skb->protocol = eth_type_trans(skb, dev);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pktlen;
- netif_rx(skb);
- } else {
- lp->stats.rx_dropped++;
- }
-
- if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
- lp->stats.multicast++;
-
- /* reset ownership bit */
- lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
-
- /* wrap after last buffer */
- if (lp->rx_tail == MAX_RX_DESCR - 1)
- lp->rx_tail = 0;
- else
- lp->rx_tail++;
- }
-}
-
-/* MAC interrupt handler */
-static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct macb *lp = netdev_priv(dev);
- u32 intstatus, ctl;
-
- /* MAC Interrupt Status register indicates what interrupts are pending.
- * It is automatically cleared once read.
- */
- intstatus = macb_readl(lp, ISR);
-
- /* Receive complete */
- if (intstatus & MACB_BIT(RCOMP))
- at91ether_rx(dev);
-
- /* Transmit complete */
- if (intstatus & MACB_BIT(TCOMP)) {
- /* The TCOM bit is set even if the transmission failed */
- if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
- lp->stats.tx_errors++;
-
- if (lp->skb) {
- dev_kfree_skb_irq(lp->skb);
- lp->skb = NULL;
- dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
- lp->stats.tx_packets++;
- lp->stats.tx_bytes += lp->skb_length;
- }
- netif_wake_queue(dev);
- }
-
- /* Work-around for EMAC Errata section 41.3.1 */
- if (intstatus & MACB_BIT(RXUBR)) {
- ctl = macb_readl(lp, NCR);
- macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
- macb_writel(lp, NCR, ctl | MACB_BIT(RE));
- }
-
- if (intstatus & MACB_BIT(ISR_ROVR))
- netdev_err(dev, "ROVR error\n");
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void at91ether_poll_controller(struct net_device *dev)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- at91ether_interrupt(dev->irq, dev);
- local_irq_restore(flags);
-}
-#endif
-
-static const struct net_device_ops at91ether_netdev_ops = {
- .ndo_open = at91ether_open,
- .ndo_stop = at91ether_close,
- .ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = macb_get_stats,
- .ndo_set_rx_mode = macb_set_rx_mode,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = macb_ioctl,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = at91ether_poll_controller,
-#endif
-};
-
-#if defined(CONFIG_OF)
-static const struct of_device_id at91ether_dt_ids[] = {
- { .compatible = "cdns,at91rm9200-emac" },
- { .compatible = "cdns,emac" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
-#endif
-
-/* Detect MAC & PHY and perform ethernet interface initialization */
-static int __init at91ether_probe(struct platform_device *pdev)
-{
- struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
- struct resource *regs;
- struct net_device *dev;
- struct phy_device *phydev;
- struct macb *lp;
- int res;
- u32 reg;
- const char *mac;
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs)
- return -ENOENT;
-
- dev = alloc_etherdev(sizeof(struct macb));
- if (!dev)
- return -ENOMEM;
-
- lp = netdev_priv(dev);
- lp->pdev = pdev;
- lp->dev = dev;
- spin_lock_init(&lp->lock);
-
- /* physical base address */
- dev->base_addr = regs->start;
- lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
- if (!lp->regs) {
- res = -ENOMEM;
- goto err_free_dev;
- }
-
- /* Clock */
- lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
- if (IS_ERR(lp->pclk)) {
- res = PTR_ERR(lp->pclk);
- goto err_free_dev;
- }
- clk_prepare_enable(lp->pclk);
-
- lp->hclk = ERR_PTR(-ENOENT);
- lp->tx_clk = ERR_PTR(-ENOENT);
-
- /* Install the interrupt handler */
- dev->irq = platform_get_irq(pdev, 0);
- res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
- if (res)
- goto err_disable_clock;
-
- dev->netdev_ops = &at91ether_netdev_ops;
- dev->ethtool_ops = &macb_ethtool_ops;
- platform_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
- memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
- else
- macb_get_hwaddr(lp);
-
- res = of_get_phy_mode(pdev->dev.of_node);
- if (res < 0) {
- if (board_data && board_data->is_rmii)
- lp->phy_interface = PHY_INTERFACE_MODE_RMII;
- else
- lp->phy_interface = PHY_INTERFACE_MODE_MII;
- } else {
- lp->phy_interface = res;
- }
-
- macb_writel(lp, NCR, 0);
-
- reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
- if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
- reg |= MACB_BIT(RM9200_RMII);
-
- macb_writel(lp, NCFGR, reg);
-
- /* Register the network interface */
- res = register_netdev(dev);
- if (res)
- goto err_disable_clock;
-
- res = macb_mii_init(lp);
- if (res)
- goto err_out_unregister_netdev;
-
- /* will be enabled in open() */
- netif_carrier_off(dev);
-
- phydev = lp->phy_dev;
- netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
- phydev->drv->name, dev_name(&phydev->dev),
- phydev->irq);
-
- /* Display ethernet banner */
- netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
- dev->base_addr, dev->irq, dev->dev_addr);
-
- return 0;
-
-err_out_unregister_netdev:
- unregister_netdev(dev);
-err_disable_clock:
- clk_disable_unprepare(lp->pclk);
-err_free_dev:
- free_netdev(dev);
- return res;
-}
-
-static int at91ether_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct macb *lp = netdev_priv(dev);
-
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
-
- mdiobus_unregister(lp->mii_bus);
- kfree(lp->mii_bus->irq);
- mdiobus_free(lp->mii_bus);
- unregister_netdev(dev);
- clk_disable_unprepare(lp->pclk);
- free_netdev(dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- struct net_device *net_dev = platform_get_drvdata(pdev);
- struct macb *lp = netdev_priv(net_dev);
-
- if (netif_running(net_dev)) {
- netif_stop_queue(net_dev);
- netif_device_detach(net_dev);
-
- clk_disable_unprepare(lp->pclk);
- }
- return 0;
-}
-
-static int at91ether_resume(struct platform_device *pdev)
-{
- struct net_device *net_dev = platform_get_drvdata(pdev);
- struct macb *lp = netdev_priv(net_dev);
-
- if (netif_running(net_dev)) {
- clk_prepare_enable(lp->pclk);
-
- netif_device_attach(net_dev);
- netif_start_queue(net_dev);
- }
- return 0;
-}
-#else
-#define at91ether_suspend NULL
-#define at91ether_resume NULL
-#endif
-
-static struct platform_driver at91ether_driver = {
- .remove = at91ether_remove,
- .suspend = at91ether_suspend,
- .resume = at91ether_resume,
- .driver = {
- .name = "at91_ether",
- .of_match_table = of_match_ptr(at91ether_dt_ids),
- },
-};
-
-module_platform_driver_probe(at91ether_driver, at91ether_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
-MODULE_AUTHOR("Andrew Victor");
-MODULE_ALIAS("platform:at91_ether");
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 81d41539fcba..61aa570aad9a 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -102,7 +102,7 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
}
-void macb_set_hwaddr(struct macb *bp)
+static void macb_set_hwaddr(struct macb *bp)
{
u32 bottom;
u16 top;
@@ -120,9 +120,8 @@ void macb_set_hwaddr(struct macb *bp)
macb_or_gem_writel(bp, SA4B, 0);
macb_or_gem_writel(bp, SA4T, 0);
}
-EXPORT_SYMBOL_GPL(macb_set_hwaddr);
-void macb_get_hwaddr(struct macb *bp)
+static void macb_get_hwaddr(struct macb *bp)
{
struct macb_platform_data *pdata;
u32 bottom;
@@ -162,7 +161,6 @@ void macb_get_hwaddr(struct macb *bp)
netdev_info(bp->dev, "invalid hw address, using random\n");
eth_hw_addr_random(bp->dev);
}
-EXPORT_SYMBOL_GPL(macb_get_hwaddr);
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
@@ -213,6 +211,9 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
{
long ferr, rate, rate_rounded;
+ if (!clk)
+ return;
+
switch (speed) {
case SPEED_10:
rate = 2500000;
@@ -292,11 +293,13 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
- if (!IS_ERR(bp->tx_clk))
- macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
-
if (status_change) {
if (phydev->link) {
+ /* Update the TX clock rate if and only if the link is
+ * up and there has been a link change.
+ */
+ macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
netif_carrier_on(dev);
netdev_info(dev, "link up (%d/%s)\n",
phydev->speed,
@@ -357,7 +360,7 @@ static int macb_mii_probe(struct net_device *dev)
return 0;
}
-int macb_mii_init(struct macb *bp)
+static int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
struct device_node *np;
@@ -438,7 +441,6 @@ err_out_free_mdiobus:
err_out:
return err;
}
-EXPORT_SYMBOL_GPL(macb_mii_init);
static void macb_update_stats(struct macb *bp)
{
@@ -449,7 +451,7 @@ static void macb_update_stats(struct macb *bp)
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
for(; p < end; p++, reg++)
- *p += __raw_readl(reg);
+ *p += readl_relaxed(reg);
}
static int macb_halt_tx(struct macb *bp)
@@ -705,6 +707,9 @@ static void gem_rx_refill(struct macb *bp)
/* properly align Ethernet header */
skb_reserve(skb, NET_IP_ALIGN);
+ } else {
+ bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
+ bp->rx_ring[entry].ctrl = 0;
}
}
@@ -976,7 +981,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
struct macb_queue *queue = dev_id;
struct macb *bp = queue->bp;
struct net_device *dev = bp->dev;
- u32 status;
+ u32 status, ctrl;
status = queue_readl(queue, ISR);
@@ -1032,6 +1037,15 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* add that if/when we get our hands on a full-blown MII PHY.
*/
+ if (status & MACB_BIT(RXUBR)) {
+ ctrl = macb_readl(bp, NCR);
+ macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+ macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(RXUBR));
+ }
+
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
if (macb_is_gem(bp))
@@ -1471,9 +1485,9 @@ static void macb_init_rings(struct macb *bp)
for (i = 0; i < TX_RING_SIZE; i++) {
bp->queues[0].tx_ring[i].addr = 0;
bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
- bp->queues[0].tx_head = 0;
- bp->queues[0].tx_tail = 0;
}
+ bp->queues[0].tx_head = 0;
+ bp->queues[0].tx_tail = 0;
bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
bp->rx_tail = 0;
@@ -1578,6 +1592,7 @@ static u32 macb_dbw(struct macb *bp)
static void macb_configure_dma(struct macb *bp)
{
u32 dmacfg;
+ u32 tmp, ncr;
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1585,7 +1600,24 @@ static void macb_configure_dma(struct macb *bp)
if (bp->dma_burst_length)
dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
- dmacfg &= ~GEM_BIT(ENDIA);
+ dmacfg &= ~GEM_BIT(ENDIA_PKT);
+
+ /* Find the CPU endianness by using the loopback bit of net_ctrl
+ * register. save it first. When the CPU is in big endian we
+ * need to program swaped mode for management descriptor access.
+ */
+ ncr = macb_readl(bp, NCR);
+ __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
+ tmp = __raw_readl(bp->regs + MACB_NCR);
+
+ if (tmp == MACB_BIT(LLB))
+ dmacfg &= ~GEM_BIT(ENDIA_DESC);
+ else
+ dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
+
+ /* Restore net_ctrl */
+ macb_writel(bp, NCR, ncr);
+
if (bp->dev->features & NETIF_F_HW_CSUM)
dmacfg |= GEM_BIT(TXCOEN);
else
@@ -1723,7 +1755,7 @@ static void macb_sethashtable(struct net_device *dev)
/*
* Enable/Disable promiscuous and multicast modes.
*/
-void macb_set_rx_mode(struct net_device *dev)
+static void macb_set_rx_mode(struct net_device *dev)
{
unsigned long cfg;
struct macb *bp = netdev_priv(dev);
@@ -1764,7 +1796,6 @@ void macb_set_rx_mode(struct net_device *dev)
macb_writel(bp, NCFGR, cfg);
}
-EXPORT_SYMBOL_GPL(macb_set_rx_mode);
static int macb_open(struct net_device *dev)
{
@@ -1832,14 +1863,14 @@ static void gem_update_stats(struct macb *bp)
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
- u64 val = __raw_readl(bp->regs + offset);
+ u64 val = readl_relaxed(bp->regs + offset);
bp->ethtool_stats[i] += val;
*p += val;
if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
/* Add GEM_OCTTXH, GEM_OCTRXH */
- val = __raw_readl(bp->regs + offset + 4);
+ val = readl_relaxed(bp->regs + offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
*(++p) += val;
}
@@ -1917,7 +1948,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
}
}
-struct net_device_stats *macb_get_stats(struct net_device *dev)
+static struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct net_device_stats *nstat = &bp->stats;
@@ -1937,12 +1968,12 @@ struct net_device_stats *macb_get_stats(struct net_device *dev)
hwstat->rx_oversize_pkts +
hwstat->rx_jabbers +
hwstat->rx_undersize_pkts +
- hwstat->sqe_test_errors +
hwstat->rx_length_mismatch);
nstat->tx_errors = (hwstat->tx_late_cols +
hwstat->tx_excessive_cols +
hwstat->tx_underruns +
- hwstat->tx_carrier_errors);
+ hwstat->tx_carrier_errors +
+ hwstat->sqe_test_errors);
nstat->collisions = (hwstat->tx_single_cols +
hwstat->tx_multiple_cols +
hwstat->tx_excessive_cols);
@@ -1963,7 +1994,6 @@ struct net_device_stats *macb_get_stats(struct net_device *dev)
return nstat;
}
-EXPORT_SYMBOL_GPL(macb_get_stats);
static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -2019,13 +2049,13 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
+ regs_buff[12] = macb_or_gem_readl(bp, USRIO);
if (macb_is_gem(bp)) {
- regs_buff[12] = gem_readl(bp, USRIO);
regs_buff[13] = gem_readl(bp, DMACFG);
}
}
-const struct ethtool_ops macb_ethtool_ops = {
+static const struct ethtool_ops macb_ethtool_ops = {
.get_settings = macb_get_settings,
.set_settings = macb_set_settings,
.get_regs_len = macb_get_regs_len,
@@ -2033,7 +2063,6 @@ const struct ethtool_ops macb_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
};
-EXPORT_SYMBOL_GPL(macb_ethtool_ops);
static const struct ethtool_ops gem_ethtool_ops = {
.get_settings = macb_get_settings,
@@ -2047,7 +2076,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_sset_count = gem_get_sset_count,
};
-int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev = bp->phy_dev;
@@ -2060,7 +2089,6 @@ int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, rq, cmd);
}
-EXPORT_SYMBOL_GPL(macb_ioctl);
static int macb_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -2112,63 +2140,20 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_set_features = macb_set_features,
};
-#if defined(CONFIG_OF)
-static const struct macb_config pc302gem_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
- .dma_burst_length = 16,
-};
-
-static const struct macb_config sama5d3_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
- .dma_burst_length = 16,
-};
-
-static const struct macb_config sama5d4_config = {
- .caps = 0,
- .dma_burst_length = 4,
-};
-
-static const struct of_device_id macb_dt_ids[] = {
- { .compatible = "cdns,at32ap7000-macb" },
- { .compatible = "cdns,at91sam9260-macb" },
- { .compatible = "cdns,macb" },
- { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
- { .compatible = "cdns,gem", .data = &pc302gem_config },
- { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
- { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, macb_dt_ids);
-#endif
-
/*
- * Configure peripheral capacities according to device tree
+ * Configure peripheral capabilities according to device tree
* and integration options used
*/
-static void macb_configure_caps(struct macb *bp)
+static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
{
u32 dcfg;
- const struct of_device_id *match;
- const struct macb_config *config;
- if (bp->pdev->dev.of_node) {
- match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
- if (match && match->data) {
- config = match->data;
+ if (dt_conf)
+ bp->caps = dt_conf->caps;
- bp->caps = config->caps;
- /*
- * As we have access to the matching node, configure
- * DMA burst length as well
- */
- bp->dma_burst_length = config->dma_burst_length;
- }
- }
-
- if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
+ if (macb_is_gem_hw(bp->regs)) {
bp->caps |= MACB_CAPS_MACB_IS_GEM;
- if (macb_is_gem(bp)) {
dcfg = gem_readl(bp, DCFG1);
if (GEM_BFEXT(IRQCOR, dcfg) == 0)
bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
@@ -2185,18 +2170,22 @@ static void macb_probe_queues(void __iomem *mem,
unsigned int *num_queues)
{
unsigned int hw_q;
- u32 mid;
*queue_mask = 0x1;
*num_queues = 1;
- /* is it macb or gem ? */
- mid = __raw_readl(mem + MACB_MID);
- if (MACB_BFEXT(IDNUM, mid) != 0x2)
+ /* is it macb or gem ?
+ *
+ * We need to read directly from the hardware here because
+ * we are early in the probe process and don't have the
+ * MACB_CAPS_MACB_IS_GEM flag positioned
+ */
+ if (!macb_is_gem_hw(mem))
return;
/* bit 0 is never set but queue 0 always exists */
- *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff;
+ *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
+
*queue_mask |= 0x1;
for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
@@ -2204,95 +2193,73 @@ static void macb_probe_queues(void __iomem *mem,
(*num_queues)++;
}
-static int macb_probe(struct platform_device *pdev)
+static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
+ struct clk **hclk, struct clk **tx_clk)
{
- struct macb_platform_data *pdata;
- struct resource *regs;
- struct net_device *dev;
- struct macb *bp;
- struct macb_queue *queue;
- struct phy_device *phydev;
- u32 config;
- int err = -ENXIO;
- const char *mac;
- void __iomem *mem;
- unsigned int hw_q, queue_mask, q, num_queues;
- struct clk *pclk, *hclk, *tx_clk;
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_err(&pdev->dev, "no mmio resource defined\n");
- goto err_out;
- }
+ int err;
- pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(pclk)) {
- err = PTR_ERR(pclk);
+ *pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(*pclk)) {
+ err = PTR_ERR(*pclk);
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
- goto err_out;
+ return err;
}
- hclk = devm_clk_get(&pdev->dev, "hclk");
- if (IS_ERR(hclk)) {
- err = PTR_ERR(hclk);
+ *hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(*hclk)) {
+ err = PTR_ERR(*hclk);
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
- goto err_out;
+ return err;
}
- tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ if (IS_ERR(*tx_clk))
+ *tx_clk = NULL;
- err = clk_prepare_enable(pclk);
+ err = clk_prepare_enable(*pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
- goto err_out;
+ return err;
}
- err = clk_prepare_enable(hclk);
+ err = clk_prepare_enable(*hclk);
if (err) {
dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
- goto err_out_disable_pclk;
+ goto err_disable_pclk;
}
- if (!IS_ERR(tx_clk)) {
- err = clk_prepare_enable(tx_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
- err);
- goto err_out_disable_hclk;
- }
+ err = clk_prepare_enable(*tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ goto err_disable_hclk;
}
- err = -ENOMEM;
- mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
- if (!mem) {
- dev_err(&pdev->dev, "failed to map registers, aborting.\n");
- goto err_out_disable_clocks;
- }
+ return 0;
- macb_probe_queues(mem, &queue_mask, &num_queues);
- dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
- if (!dev)
- goto err_out_disable_clocks;
+err_disable_hclk:
+ clk_disable_unprepare(*hclk);
- SET_NETDEV_DEV(dev, &pdev->dev);
+err_disable_pclk:
+ clk_disable_unprepare(*pclk);
- bp = netdev_priv(dev);
- bp->pdev = pdev;
- bp->dev = dev;
- bp->regs = mem;
- bp->num_queues = num_queues;
- bp->pclk = pclk;
- bp->hclk = hclk;
- bp->tx_clk = tx_clk;
+ return err;
+}
- spin_lock_init(&bp->lock);
+static int macb_init(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ unsigned int hw_q, q;
+ struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
+ int err;
+ u32 val;
/* set the queue register mapping once for all: queue0 has a special
* register mapping but we don't want to test the queue index then
* compute the corresponding register offset at run time.
*/
for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
- if (!(queue_mask & (1 << hw_q)))
+ if (!(bp->queue_mask & (1 << hw_q)))
continue;
queue = &bp->queues[q];
@@ -2319,27 +2286,21 @@ static int macb_probe(struct platform_device *pdev)
*/
queue->irq = platform_get_irq(pdev, q);
err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
- 0, dev->name, queue);
+ IRQF_SHARED, dev->name, queue);
if (err) {
dev_err(&pdev->dev,
"Unable to request IRQ %d (error %d)\n",
queue->irq, err);
- goto err_out_free_netdev;
+ return err;
}
INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
q++;
}
- dev->irq = bp->queues[0].irq;
dev->netdev_ops = &macb_netdev_ops;
netif_napi_add(dev, &bp->napi, macb_poll, 64);
- dev->base_addr = regs->start;
-
- /* setup capacities */
- macb_configure_caps(bp);
-
/* setup appropriated routines according to adapter type */
if (macb_is_gem(bp)) {
bp->max_tx_length = GEM_MAX_TX_LEN;
@@ -2366,18 +2327,470 @@ static int macb_probe(struct platform_device *pdev)
dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features;
+ val = 0;
+ if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ val = GEM_BIT(RGMII);
+ else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
+ (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
+ val = MACB_BIT(RMII);
+ else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
+ val = MACB_BIT(MII);
+
+ if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
+ val |= MACB_BIT(CLKEN);
+
+ macb_or_gem_writel(bp, USRIO, val);
+
/* Set MII management clock divider */
- config = macb_mdc_clk_div(bp);
- config |= macb_dbw(bp);
- macb_writel(bp, NCFGR, config);
+ val = macb_mdc_clk_div(bp);
+ val |= macb_dbw(bp);
+ macb_writel(bp, NCFGR, val);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+/* 1518 rounded up */
+#define AT91ETHER_MAX_RBUFF_SZ 0x600
+/* max number of receive buffers */
+#define AT91ETHER_MAX_RX_DESCR 9
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+ dma_addr_t addr;
+ u32 ctl;
+ int i;
+
+ lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+ (AT91ETHER_MAX_RX_DESCR *
+ sizeof(struct macb_dma_desc)),
+ &lp->rx_ring_dma, GFP_KERNEL);
+ if (!lp->rx_ring)
+ return -ENOMEM;
+
+ lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ AT91ETHER_MAX_RBUFF_SZ,
+ &lp->rx_buffers_dma, GFP_KERNEL);
+ if (!lp->rx_buffers) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
+ lp->rx_ring = NULL;
+ return -ENOMEM;
+ }
+
+ addr = lp->rx_buffers_dma;
+ for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
+ lp->rx_ring[i].addr = addr;
+ lp->rx_ring[i].ctrl = 0;
+ addr += AT91ETHER_MAX_RBUFF_SZ;
+ }
+
+ /* Set the Wrap bit on the last descriptor */
+ lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+
+ /* Reset buffer index */
+ lp->rx_tail = 0;
+
+ /* Program address of descriptor list in Rx Buffer Queue register */
+ macb_writel(lp, RBQP, lp->rx_ring_dma);
+
+ /* Enable Receive and Transmit */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+
+ return 0;
+}
+
+/* Open the ethernet interface */
+static int at91ether_open(struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+ u32 ctl;
+ int ret;
+
+ /* Clear internal statistics */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
+
+ macb_set_hwaddr(lp);
+
+ ret = at91ether_start(dev);
+ if (ret)
+ return ret;
+
+ /* Enable MAC interrupts */
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ /* schedule a link state check */
+ phy_start(lp->phy_dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* Close the interface */
+static int at91ether_close(struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+ u32 ctl;
+
+ /* Disable Receiver and Transmitter */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+ /* Disable MAC interrupts */
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ netif_stop_queue(dev);
+
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
+ lp->rx_ring = NULL;
+
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
+ lp->rx_buffers, lp->rx_buffers_dma);
+ lp->rx_buffers = NULL;
+
+ return 0;
+}
+
+/* Transmit packet */
+static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+
+ if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
+ netif_stop_queue(dev);
+
+ /* Store packet information (to free when Tx completed) */
+ lp->skb = skb;
+ lp->skb_length = skb->len;
+ lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ /* Set address of the data in the Transmit Address register */
+ macb_writel(lp, TAR, lp->skb_physaddr);
+ /* Set length of the packet in the Transmit Control register */
+ macb_writel(lp, TCR, skb->len);
+
+ } else {
+ netdev_err(dev, "%s called, but device is busy!\n", __func__);
+ return NETDEV_TX_BUSY;
+ }
- mac = of_get_mac_address(pdev->dev.of_node);
+ return NETDEV_TX_OK;
+}
+
+/* Extract received frame from buffer descriptors and sent to upper layers.
+ * (Called from interrupt context)
+ */
+static void at91ether_rx(struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+ unsigned char *p_recv;
+ struct sk_buff *skb;
+ unsigned int pktlen;
+
+ while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+ p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
+ pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+ skb = netdev_alloc_skb(dev, pktlen + 2);
+ if (skb) {
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, pktlen), p_recv, pktlen);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pktlen;
+ netif_rx(skb);
+ } else {
+ lp->stats.rx_dropped++;
+ }
+
+ if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+ lp->stats.multicast++;
+
+ /* reset ownership bit */
+ lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+
+ /* wrap after last buffer */
+ if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
+ lp->rx_tail = 0;
+ else
+ lp->rx_tail++;
+ }
+}
+
+/* MAC interrupt handler */
+static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct macb *lp = netdev_priv(dev);
+ u32 intstatus, ctl;
+
+ /* MAC Interrupt Status register indicates what interrupts are pending.
+ * It is automatically cleared once read.
+ */
+ intstatus = macb_readl(lp, ISR);
+
+ /* Receive complete */
+ if (intstatus & MACB_BIT(RCOMP))
+ at91ether_rx(dev);
+
+ /* Transmit complete */
+ if (intstatus & MACB_BIT(TCOMP)) {
+ /* The TCOM bit is set even if the transmission failed */
+ if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
+ lp->stats.tx_errors++;
+
+ if (lp->skb) {
+ dev_kfree_skb_irq(lp->skb);
+ lp->skb = NULL;
+ dma_unmap_single(NULL, lp->skb_physaddr,
+ lp->skb_length, DMA_TO_DEVICE);
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += lp->skb_length;
+ }
+ netif_wake_queue(dev);
+ }
+
+ /* Work-around for EMAC Errata section 41.3.1 */
+ if (intstatus & MACB_BIT(RXUBR)) {
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+ macb_writel(lp, NCR, ctl | MACB_BIT(RE));
+ }
+
+ if (intstatus & MACB_BIT(ISR_ROVR))
+ netdev_err(dev, "ROVR error\n");
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void at91ether_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ at91ether_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+static const struct net_device_ops at91ether_netdev_ops = {
+ .ndo_open = at91ether_open,
+ .ndo_stop = at91ether_close,
+ .ndo_start_xmit = at91ether_start_xmit,
+ .ndo_get_stats = macb_get_stats,
+ .ndo_set_rx_mode = macb_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_do_ioctl = macb_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = at91ether_poll_controller,
+#endif
+};
+
+static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
+ struct clk **hclk, struct clk **tx_clk)
+{
+ int err;
+
+ *hclk = NULL;
+ *tx_clk = NULL;
+
+ *pclk = devm_clk_get(&pdev->dev, "ether_clk");
+ if (IS_ERR(*pclk))
+ return PTR_ERR(*pclk);
+
+ err = clk_prepare_enable(*pclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int at91ether_init(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int err;
+ u32 reg;
+
+ dev->netdev_ops = &at91ether_netdev_ops;
+ dev->ethtool_ops = &macb_ethtool_ops;
+
+ err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
+ 0, dev->name, dev);
+ if (err)
+ return err;
+
+ macb_writel(bp, NCR, 0);
+
+ reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
+ if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+ reg |= MACB_BIT(RM9200_RMII);
+
+ macb_writel(bp, NCFGR, reg);
+
+ return 0;
+}
+
+static const struct macb_config at91sam9260_config = {
+ .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
+static const struct macb_config pc302gem_config = {
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
+static const struct macb_config sama5d3_config = {
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
+static const struct macb_config sama5d4_config = {
+ .caps = 0,
+ .dma_burst_length = 4,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
+static const struct macb_config emac_config = {
+ .clk_init = at91ether_clk_init,
+ .init = at91ether_init,
+};
+
+static const struct of_device_id macb_dt_ids[] = {
+ { .compatible = "cdns,at32ap7000-macb" },
+ { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
+ { .compatible = "cdns,macb" },
+ { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
+ { .compatible = "cdns,gem", .data = &pc302gem_config },
+ { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+ { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
+ { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
+ { .compatible = "cdns,emac", .data = &emac_config },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, macb_dt_ids);
+#endif /* CONFIG_OF */
+
+static int macb_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *, struct clk **,
+ struct clk **, struct clk **)
+ = macb_clk_init;
+ int (*init)(struct platform_device *) = macb_init;
+ struct device_node *np = pdev->dev.of_node;
+ const struct macb_config *macb_config = NULL;
+ struct clk *pclk, *hclk, *tx_clk;
+ unsigned int queue_mask, num_queues;
+ struct macb_platform_data *pdata;
+ struct phy_device *phydev;
+ struct net_device *dev;
+ struct resource *regs;
+ void __iomem *mem;
+ const char *mac;
+ struct macb *bp;
+ int err;
+
+ if (np) {
+ const struct of_device_id *match;
+
+ match = of_match_node(macb_dt_ids, np);
+ if (match && match->data) {
+ macb_config = match->data;
+ clk_init = macb_config->clk_init;
+ init = macb_config->init;
+ }
+ }
+
+ err = clk_init(pdev, &pclk, &hclk, &tx_clk);
+ if (err)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mem)) {
+ err = PTR_ERR(mem);
+ goto err_disable_clocks;
+ }
+
+ macb_probe_queues(mem, &queue_mask, &num_queues);
+ dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_disable_clocks;
+ }
+
+ dev->base_addr = regs->start;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ bp = netdev_priv(dev);
+ bp->pdev = pdev;
+ bp->dev = dev;
+ bp->regs = mem;
+ bp->num_queues = num_queues;
+ bp->queue_mask = queue_mask;
+ if (macb_config)
+ bp->dma_burst_length = macb_config->dma_burst_length;
+ bp->pclk = pclk;
+ bp->hclk = hclk;
+ bp->tx_clk = tx_clk;
+ spin_lock_init(&bp->lock);
+
+ /* setup capabilities */
+ macb_configure_caps(bp, macb_config);
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0) {
+ err = dev->irq;
+ goto err_disable_clocks;
+ }
+
+ mac = of_get_mac_address(np);
if (mac)
memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
else
macb_get_hwaddr(bp);
- err = of_get_phy_mode(pdev->dev.of_node);
+ err = of_get_phy_mode(np);
if (err < 0) {
pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->is_rmii)
@@ -2388,34 +2801,21 @@ static int macb_probe(struct platform_device *pdev)
bp->phy_interface = err;
}
- if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
- macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
- else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
-#if defined(CONFIG_ARCH_AT91)
- macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
- MACB_BIT(CLKEN)));
-#else
- macb_or_gem_writel(bp, USRIO, 0);
-#endif
- else
-#if defined(CONFIG_ARCH_AT91)
- macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
-#else
- macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
-#endif
+ /* IP specific init */
+ err = init(pdev);
+ if (err)
+ goto err_out_free_netdev;
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_free_netdev;
+ goto err_out_unregister_netdev;
}
err = macb_mii_init(bp);
if (err)
goto err_out_unregister_netdev;
- platform_set_drvdata(pdev, dev);
-
netif_carrier_off(dev);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -2430,16 +2830,15 @@ static int macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
+
err_out_free_netdev:
free_netdev(dev);
-err_out_disable_clocks:
- if (!IS_ERR(tx_clk))
- clk_disable_unprepare(tx_clk);
-err_out_disable_hclk:
+
+err_disable_clocks:
+ clk_disable_unprepare(tx_clk);
clk_disable_unprepare(hclk);
-err_out_disable_pclk:
clk_disable_unprepare(pclk);
-err_out:
+
return err;
}
@@ -2458,8 +2857,7 @@ static int macb_remove(struct platform_device *pdev)
kfree(bp->mii_bus->irq);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- if (!IS_ERR(bp->tx_clk))
- clk_disable_unprepare(bp->tx_clk);
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
free_netdev(dev);
@@ -2477,8 +2875,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
netif_carrier_off(netdev);
netif_device_detach(netdev);
- if (!IS_ERR(bp->tx_clk))
- clk_disable_unprepare(bp->tx_clk);
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
@@ -2493,8 +2890,7 @@ static int __maybe_unused macb_resume(struct device *dev)
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
- if (!IS_ERR(bp->tx_clk))
- clk_prepare_enable(bp->tx_clk);
+ clk_prepare_enable(bp->tx_clk);
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index ff85619a9732..eb7d76f7bf6a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -11,7 +11,7 @@
#define _MACB_H
#define MACB_GREGS_NBR 16
-#define MACB_GREGS_VERSION 1
+#define MACB_GREGS_VERSION 2
#define MACB_MAX_QUEUES 8
/* MACB register offsets */
@@ -229,8 +229,10 @@
/* Bitfields in DMACFG. */
#define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */
#define GEM_FBLDO_SIZE 5
-#define GEM_ENDIA_OFFSET 7 /* endian swap mode for packet data access */
-#define GEM_ENDIA_SIZE 1
+#define GEM_ENDIA_DESC_OFFSET 6 /* endian swap mode for management descriptor access */
+#define GEM_ENDIA_DESC_SIZE 1
+#define GEM_ENDIA_PKT_OFFSET 7 /* endian swap mode for packet data access */
+#define GEM_ENDIA_PKT_SIZE 1
#define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */
#define GEM_RXBMS_SIZE 2
#define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */
@@ -389,6 +391,8 @@
/* Capability mask bits */
#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
+#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002
+#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -423,17 +427,17 @@
/* Register access macros */
#define macb_readl(port,reg) \
- __raw_readl((port)->regs + MACB_##reg)
+ readl_relaxed((port)->regs + MACB_##reg)
#define macb_writel(port,reg,value) \
- __raw_writel((value), (port)->regs + MACB_##reg)
+ writel_relaxed((value), (port)->regs + MACB_##reg)
#define gem_readl(port, reg) \
- __raw_readl((port)->regs + GEM_##reg)
+ readl_relaxed((port)->regs + GEM_##reg)
#define gem_writel(port, reg, value) \
- __raw_writel((value), (port)->regs + GEM_##reg)
+ writel_relaxed((value), (port)->regs + GEM_##reg)
#define queue_readl(queue, reg) \
- __raw_readl((queue)->bp->regs + (queue)->reg)
+ readl_relaxed((queue)->bp->regs + (queue)->reg)
#define queue_writel(queue, reg, value) \
- __raw_writel((value), (queue)->bp->regs + (queue)->reg)
+ writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
@@ -750,6 +754,9 @@ struct macb_or_gem_ops {
struct macb_config {
u32 caps;
unsigned int dma_burst_length;
+ int (*clk_init)(struct platform_device *pdev, struct clk **pclk,
+ struct clk **hclk, struct clk **tx_clk);
+ int (*init)(struct platform_device *pdev);
};
struct macb_queue {
@@ -780,6 +787,7 @@ struct macb {
size_t rx_buffer_size;
unsigned int num_queues;
+ unsigned int queue_mask;
struct macb_queue queues[MACB_MAX_QUEUES];
spinlock_t lock;
@@ -820,18 +828,14 @@ struct macb {
u64 ethtool_stats[GEM_STATS_LEN];
};
-extern const struct ethtool_ops macb_ethtool_ops;
-
-int macb_mii_init(struct macb *bp);
-int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-struct net_device_stats *macb_get_stats(struct net_device *dev);
-void macb_set_rx_mode(struct net_device *dev);
-void macb_set_hwaddr(struct macb *bp);
-void macb_get_hwaddr(struct macb *bp);
-
static inline bool macb_is_gem(struct macb *bp)
{
return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
}
+static inline bool macb_is_gem_hw(void __iomem *addr)
+{
+ return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
+}
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 47bfea24b9e1..63efa0dc45ba 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -47,9 +47,9 @@
#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
#define XGMAC_PMT 0x00000704 /* PMT Control and Status */
#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
-#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */
+#define XGMAC_MMC_INTR_RX 0x00000804 /* Receive Interrupt */
#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
-#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Receive Interrupt Mask */
#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
/* Hardware TX Statistics Counters */
@@ -153,7 +153,7 @@
#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
#define XGMAC_FLOW_CTRL_PT_SHIFT 16
#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
-#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshold */
#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
@@ -254,18 +254,18 @@
/* XGMAC Operation Mode Register */
#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
-#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshold Ctrl */
#define XGMAC_OMR_TTC_MASK 0x00030000
-#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */
-#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */
-#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */
-#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */
+#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshold */
+#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshold MASK */
+#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshold */
+#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshold MASK */
#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
-#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
-#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
+#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshold Ctrl */
+#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshold Ctrl MASK */
/* XGMAC HW Features Register */
#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index ac6473f75eb9..7daa088a9bb7 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -97,6 +97,17 @@ config CHELSIO_T4_DCB
If unsure, say N.
+config CHELSIO_T4_FCOE
+ bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards"
+ default n
+ depends on CHELSIO_T4 && CHELSIO_T4_DCB && FCOE
+ ---help---
+ Enable FCoE offload features.
+ Say Y here if you want to enable Fibre Channel over Ethernet (FCoE) support
+ in the driver.
+
+ If unsure, say N.
+
config CHELSIO_T4VF
tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 186566bfdbc8..f5f1b0b51ebd 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -354,7 +354,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
adapter->msg_enable = val;
}
-static char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] = {
"TxOctetsOK",
"TxOctetsBad",
"TxUnicastFramesOK",
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index db76f7040455..b96e4bfcac41 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1537,7 +1537,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
adapter->msg_enable = val;
}
-static char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] = {
"TxOctetsOK ",
"TxFramesOK ",
"TxMulticastFramesOK",
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index d6aa602f168d..e4b5b057f417 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -422,7 +422,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
d->addr_lo = cpu_to_be32(mapping);
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
- wmb();
+ dma_wmb();
d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
return 0;
@@ -433,7 +433,7 @@ static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
{
d->addr_lo = cpu_to_be32(mapping);
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
- wmb();
+ dma_wmb();
d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
return 0;
@@ -579,7 +579,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
q->sdesc[q->pidx] = q->sdesc[idx];
to->addr_lo = from->addr_lo; /* already big endian */
to->addr_hi = from->addr_hi; /* likewise */
- wmb();
+ dma_wmb();
to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
@@ -1068,7 +1068,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
sd->eop = 1;
wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
V_WR_SGLSFLT(flits)) | wr_hi;
- wmb();
+ dma_wmb();
wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
V_WR_GEN(gen)) | wr_lo;
wr_gen2(d, gen);
@@ -1114,7 +1114,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
}
sd->eop = 1;
wrp->wr_hi |= htonl(F_WR_EOP);
- wmb();
+ dma_wmb();
wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
wr_gen2((struct tx_desc *)wp, ogen);
WARN_ON(ndesc != 0);
@@ -1184,7 +1184,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
| F_WR_SOP | F_WR_EOP | compl);
- wmb();
+ dma_wmb();
cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
V_WR_TID(q->token));
wr_gen2(d, gen);
@@ -1342,7 +1342,7 @@ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
V_WR_BCNTLFLT(len & 7));
- wmb();
+ dma_wmb();
to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
V_WR_LEN((len + 7) / 8));
wr_gen2(d, gen);
@@ -2271,7 +2271,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
u32 len, flags;
__be32 rss_hi, rss_lo;
- rmb();
+ dma_rmb();
eth = r->rss_hdr.opcode == CPL_RX_PKT;
rss_hi = *(const __be32 *)r;
rss_lo = r->rss_hdr.rss_hash_val;
@@ -2488,7 +2488,7 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
}
if (!is_new_response(r, q))
break;
- rmb();
+ dma_rmb();
} while (is_pure_response(r));
if (sleeping)
@@ -2523,7 +2523,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
if (!is_new_response(r, q))
return -1;
- rmb();
+ dma_rmb();
if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 184a8d545ac4..a22768c94200 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -840,7 +840,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
* Read the specified number of 32-bit words from the serial flash.
* If @byte_oriented is set the read data is stored as a byte array
* (i.e., big-endian), otherwise as 32-bit words in the platform's
- * natural endianess.
+ * natural endianness.
*/
static int t3_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index ae50cd72358c..ace0ab98d0f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
+cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index c6ff4890d171..524d11098c56 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -60,6 +60,11 @@ enum {
};
enum {
+ T4_REGMAP_SIZE = (160 * 1024),
+ T5_REGMAP_SIZE = (332 * 1024),
+};
+
+enum {
MEM_EDC0,
MEM_EDC1,
MEM_MC,
@@ -369,11 +374,22 @@ enum {
MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
- MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
+ MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
};
enum {
+ MAX_TXQ_ENTRIES = 16384,
+ MAX_CTRL_TXQ_ENTRIES = 1024,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
+ MIN_TXQ_ENTRIES = 32,
+ MIN_CTRL_TXQ_ENTRIES = 32,
+ MIN_RSPQ_ENTRIES = 128,
+ MIN_FL_ENTRIES = 16
+};
+
+enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
@@ -385,6 +401,10 @@ struct sge_rspq;
#include "cxgb4_dcb.h"
+#ifdef CONFIG_CHELSIO_T4_FCOE
+#include "cxgb4_fcoe.h"
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+
struct port_info {
struct adapter *adapter;
u16 viid;
@@ -404,6 +424,9 @@ struct port_info {
#ifdef CONFIG_CHELSIO_T4_DCB
struct port_dcb_info dcb; /* Data Center Bridging support */
#endif
+#ifdef CONFIG_CHELSIO_T4_FCOE
+ struct cxgb_fcoe fcoe;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
};
struct dentry;
@@ -597,8 +620,8 @@ struct sge {
u16 rdmaqs; /* # of available RDMA Rx queues */
u16 rdmaciqs; /* # of available RDMA concentrator IQs */
u16 ofld_rxq[MAX_OFLD_QSETS];
- u16 rdma_rxq[NCHAN];
- u16 rdma_ciq[NCHAN];
+ u16 rdma_rxq[MAX_RDMA_QUEUES];
+ u16 rdma_ciq[MAX_RDMA_CIQS];
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */
@@ -993,6 +1016,30 @@ static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
+/* Return a version number to identify the type of adapter. The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ * - bits 16..23: register dump version
+ */
+static inline unsigned int mk_adap_vers(struct adapter *ap)
+{
+ return CHELSIO_CHIP_VERSION(ap->params.chip) |
+ (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
+}
+
+/* Return a queue's interrupt hold-off time in us. 0 means no timer. */
+static inline unsigned int qtimer_val(const struct adapter *adap,
+ const struct sge_rspq *q)
+{
+ unsigned int idx = q->intr_params >> 1;
+
+ return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
+}
+
+/* driver version & name used for ethtool_drvinfo */
+extern char cxgb4_driver_name[];
+extern const char cxgb4_driver_version[];
+
void t4_os_portmod_changed(const struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
@@ -1022,6 +1069,10 @@ int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
int cxgb_busy_poll(struct napi_struct *napi);
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt);
+void cxgb4_set_ethtool_ops(struct net_device *netdev);
+int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
extern int dbfifo_int_thresh;
#define for_each_port(adapter, iter) \
@@ -1110,6 +1161,9 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
}
+unsigned int t4_get_regs_len(struct adapter *adapter);
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
+
int t4_seeprom_wp(struct adapter *adapter, bool enable);
int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index dcb047945290..371f75e782e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -538,7 +538,7 @@ static ssize_t tp_la_write(struct file *file, const char __user *buf,
char s[32];
unsigned long val;
size_t size = min(sizeof(s) - 1, count);
- struct adapter *adap = FILE_DATA(file)->i_private;
+ struct adapter *adap = file_inode(file)->i_private;
if (copy_from_user(s, buf, size))
return -EFAULT;
@@ -647,7 +647,7 @@ static int pm_stats_open(struct inode *inode, struct file *file)
static ssize_t pm_stats_clear(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct adapter *adap = FILE_DATA(file)->i_private;
+ struct adapter *adap = file_inode(file)->i_private;
t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0);
t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0);
@@ -1005,7 +1005,7 @@ static ssize_t mbox_write(struct file *file, const char __user *buf,
&data[7], &c) < 8 || c != '\n')
return -EINVAL;
- ino = FILE_DATA(file);
+ ino = file_inode(file);
mbox = (uintptr_t)ino->i_private & 7;
adap = ino->i_private - mbox;
addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
@@ -1034,7 +1034,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
loff_t pos = *ppos;
- loff_t avail = FILE_DATA(file)->i_size;
+ loff_t avail = file_inode(file)->i_size;
struct adapter *adap = file->private_data;
if (pos < 0)
@@ -1479,7 +1479,7 @@ static ssize_t rss_key_write(struct file *file, const char __user *buf,
int i, j;
u32 key[10];
char s[100], *p;
- struct adapter *adap = FILE_DATA(file)->i_private;
+ struct adapter *adap = file_inode(file)->i_private;
if (count > sizeof(s) - 1)
return -EINVAL;
@@ -1775,6 +1775,8 @@ do { \
int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
S("QType:", "RDMA-CPL");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
@@ -1794,6 +1796,8 @@ do { \
int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
S("QType:", "RDMA-CIQ");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
@@ -1947,12 +1951,6 @@ static const struct file_operations mem_debugfs_fops = {
.llseek = default_llseek,
};
-static void set_debugfs_file_size(struct dentry *de, loff_t size)
-{
- if (!IS_ERR(de) && de->d_inode)
- de->d_inode->i_size = size;
-}
-
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
{
@@ -2068,9 +2066,8 @@ int t4_setup_debugfs(struct adapter *adap)
}
}
- de = debugfs_create_file("flash", S_IRUSR, adap->debugfs_root, adap,
- &flash_debugfs_fops);
- set_debugfs_file_size(de, adap->params.sf_size);
+ de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
+ &flash_debugfs_fops, adap->params.sf_size);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index 8f418ba868bd..23f43a0f8950 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -37,8 +37,6 @@
#include <linux/export.h>
-#define FILE_DATA(_file) ((_file)->f_path.dentry->d_inode)
-
#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
static int name##_open(struct inode *inode, struct file *file) \
{ \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
new file mode 100644
index 000000000000..10d82b51d7ef
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -0,0 +1,915 @@
+/*
+ * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/mdio.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ return netdev2adap(dev)->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ netdev2adap(dev)->msg_enable = val;
+}
+
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+ "TxOctetsOK ",
+ "TxFramesOK ",
+ "TxBroadcastFrames ",
+ "TxMulticastFrames ",
+ "TxUnicastFrames ",
+ "TxErrorFrames ",
+
+ "TxFrames64 ",
+ "TxFrames65To127 ",
+ "TxFrames128To255 ",
+ "TxFrames256To511 ",
+ "TxFrames512To1023 ",
+ "TxFrames1024To1518 ",
+ "TxFrames1519ToMax ",
+
+ "TxFramesDropped ",
+ "TxPauseFrames ",
+ "TxPPP0Frames ",
+ "TxPPP1Frames ",
+ "TxPPP2Frames ",
+ "TxPPP3Frames ",
+ "TxPPP4Frames ",
+ "TxPPP5Frames ",
+ "TxPPP6Frames ",
+ "TxPPP7Frames ",
+
+ "RxOctetsOK ",
+ "RxFramesOK ",
+ "RxBroadcastFrames ",
+ "RxMulticastFrames ",
+ "RxUnicastFrames ",
+
+ "RxFramesTooLong ",
+ "RxJabberErrors ",
+ "RxFCSErrors ",
+ "RxLengthErrors ",
+ "RxSymbolErrors ",
+ "RxRuntFrames ",
+
+ "RxFrames64 ",
+ "RxFrames65To127 ",
+ "RxFrames128To255 ",
+ "RxFrames256To511 ",
+ "RxFrames512To1023 ",
+ "RxFrames1024To1518 ",
+ "RxFrames1519ToMax ",
+
+ "RxPauseFrames ",
+ "RxPPP0Frames ",
+ "RxPPP1Frames ",
+ "RxPPP2Frames ",
+ "RxPPP3Frames ",
+ "RxPPP4Frames ",
+ "RxPPP5Frames ",
+ "RxPPP6Frames ",
+ "RxPPP7Frames ",
+
+ "RxBG0FramesDropped ",
+ "RxBG1FramesDropped ",
+ "RxBG2FramesDropped ",
+ "RxBG3FramesDropped ",
+ "RxBG0FramesTrunc ",
+ "RxBG1FramesTrunc ",
+ "RxBG2FramesTrunc ",
+ "RxBG3FramesTrunc ",
+
+ "TSO ",
+ "TxCsumOffload ",
+ "RxCsumGood ",
+ "VLANextractions ",
+ "VLANinsertions ",
+ "GROpackets ",
+ "GROmerged ",
+ "WriteCoalSuccess ",
+ "WriteCoalFail ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ return t4_get_regs_len(adap);
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+ return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ u32 exprom_vers;
+
+ strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strlcpy(info->version, cxgb4_driver_version,
+ sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+
+ if (adapter->params.fw_vers)
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u.%u.%u.%u, TP %u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
+
+ if (!t4_get_exprom_version(adapter, &exprom_vers))
+ snprintf(info->erom_version, sizeof(info->erom_version),
+ "%u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_G(exprom_vers),
+ FW_HDR_FW_VER_MINOR_G(exprom_vers),
+ FW_HDR_FW_VER_MICRO_G(exprom_vers),
+ FW_HDR_FW_VER_BUILD_G(exprom_vers));
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+/* port stats maintained per queue of the port. They should be in the same
+ * order as in stats_strings above.
+ */
+struct queue_port_stats {
+ u64 tso;
+ u64 tx_csum;
+ u64 rx_csum;
+ u64 vlan_ex;
+ u64 vlan_ins;
+ u64 gro_pkts;
+ u64 gro_merged;
+};
+
+static void collect_sge_port_stats(const struct adapter *adap,
+ const struct port_info *p,
+ struct queue_port_stats *s)
+{
+ int i;
+ const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
+ const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < p->nqsets; i++, rx++, tx++) {
+ s->tso += tx->tso;
+ s->tx_csum += tx->tx_cso;
+ s->rx_csum += rx->stats.rx_cso;
+ s->vlan_ex += rx->stats.vlan_ex;
+ s->vlan_ins += tx->vlan_ins;
+ s->gro_pkts += rx->stats.lro_pkts;
+ s->gro_merged += rx->stats.lro_merged;
+ }
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ u32 val1, val2;
+
+ t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+
+ data += sizeof(struct port_stats) / sizeof(u64);
+ collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+ data += sizeof(struct queue_port_stats) / sizeof(u64);
+ if (!is_t4(adapter->params.chip)) {
+ t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
+ val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
+ val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
+ *data = val1 - val2;
+ data++;
+ *data = val2;
+ data++;
+ } else {
+ memset(data, 0, 2 * sizeof(u64));
+ *data += 2;
+ }
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct adapter *adap = netdev2adap(dev);
+ size_t buf_size;
+
+ buf_size = t4_get_regs_len(adap);
+ regs->version = mk_adap_vers(adap);
+ t4_get_regs(adap, buf, buf_size);
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+ if (p->link_cfg.autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+ return 0;
+}
+
+static int identify_port(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ unsigned int val;
+ struct adapter *adap = netdev2adap(dev);
+
+ if (state == ETHTOOL_ID_ACTIVE)
+ val = 0xffff;
+ else if (state == ETHTOOL_ID_INACTIVE)
+ val = 0;
+ else
+ return -EINVAL;
+
+ return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+}
+
+static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+ type == FW_PORT_TYPE_BT_XAUI) {
+ v |= SUPPORTED_TP;
+ if (caps & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+ v |= SUPPORTED_Backplane;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseKX_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseKX4_Full;
+ } else if (type == FW_PORT_TYPE_KR) {
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+ } else if (type == FW_PORT_TYPE_BP_AP) {
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+ } else if (type == FW_PORT_TYPE_BP4_AP) {
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKX4_Full;
+ } else if (type == FW_PORT_TYPE_FIBER_XFI ||
+ type == FW_PORT_TYPE_FIBER_XAUI ||
+ type == FW_PORT_TYPE_SFP ||
+ type == FW_PORT_TYPE_QSFP_10G ||
+ type == FW_PORT_TYPE_QSA) {
+ v |= SUPPORTED_FIBRE;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_BP40_BA ||
+ type == FW_PORT_TYPE_QSFP) {
+ v |= SUPPORTED_40000baseSR4_Full;
+ v |= SUPPORTED_FIBRE;
+ }
+
+ if (caps & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ return v;
+}
+
+static unsigned int to_fw_linkcaps(unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (caps & ADVERTISED_100baseT_Full)
+ v |= FW_PORT_CAP_SPEED_100M;
+ if (caps & ADVERTISED_1000baseT_Full)
+ v |= FW_PORT_CAP_SPEED_1G;
+ if (caps & ADVERTISED_10000baseT_Full)
+ v |= FW_PORT_CAP_SPEED_10G;
+ if (caps & ADVERTISED_40000baseSR4_Full)
+ v |= FW_PORT_CAP_SPEED_40G;
+ return v;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ const struct port_info *p = netdev_priv(dev);
+
+ if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XFI ||
+ p->port_type == FW_PORT_TYPE_BT_XAUI) {
+ cmd->port = PORT_TP;
+ } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+ p->port_type == FW_PORT_TYPE_FIBER_XAUI) {
+ cmd->port = PORT_FIBRE;
+ } else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSA ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_OTHER;
+ } else {
+ cmd->port = PORT_OTHER;
+ }
+
+ if (p->mdio_addr >= 0) {
+ cmd->phy_address = p->mdio_addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+ MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ } else {
+ cmd->phy_address = 0; /* not really, but no better option */
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->mdio_support = 0;
+ }
+
+ cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
+ cmd->advertising = from_fw_linkcaps(p->port_type,
+ p->link_cfg.advertising);
+ ethtool_cmd_speed_set(cmd,
+ netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->autoneg = p->link_cfg.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static unsigned int speed_to_caps(int speed)
+{
+ if (speed == 100)
+ return FW_PORT_CAP_SPEED_100M;
+ if (speed == 1000)
+ return FW_PORT_CAP_SPEED_1G;
+ if (speed == 10000)
+ return FW_PORT_CAP_SPEED_10G;
+ if (speed == 40000)
+ return FW_PORT_CAP_SPEED_40G;
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ unsigned int cap;
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_cfg;
+ u32 speed = ethtool_cmd_speed(cmd);
+
+ if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
+ return -EINVAL;
+
+ if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+ /* PHY offers a single speed. See if that's what's
+ * being requested.
+ */
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ (lc->supported & speed_to_caps(speed)))
+ return 0;
+ return -EINVAL;
+ }
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ cap = speed_to_caps(speed);
+
+ if (!(lc->supported & cap) ||
+ (speed == 1000) ||
+ (speed == 10000) ||
+ (speed == 40000))
+ return -EINVAL;
+ lc->requested_speed = cap;
+ lc->advertising = 0;
+ } else {
+ cap = to_fw_linkcaps(cmd->advertising);
+ if (!(lc->supported & cap))
+ return -EINVAL;
+ lc->requested_speed = 0;
+ lc->advertising = cap | FW_PORT_CAP_ANEG;
+ }
+ lc->autoneg = cmd->autoneg;
+
+ if (netif_running(dev))
+ return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ lc);
+ return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+ epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
+ epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_cfg;
+
+ if (epause->autoneg == AUTONEG_DISABLE)
+ lc->requested_fc = 0;
+ else if (lc->supported & FW_PORT_CAP_ANEG)
+ lc->requested_fc = PAUSE_AUTONEG;
+ else
+ return -EINVAL;
+
+ if (epause->rx_pause)
+ lc->requested_fc |= PAUSE_RX;
+ if (epause->tx_pause)
+ lc->requested_fc |= PAUSE_TX;
+ if (netif_running(dev))
+ return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ lc);
+ return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct sge *s = &pi->adapter->sge;
+
+ e->rx_max_pending = MAX_RX_BUFFERS;
+ e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+ e->rx_jumbo_max_pending = 0;
+ e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+ e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
+ e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+ e->rx_jumbo_pending = 0;
+ e->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ int i;
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+
+ if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
+ e->tx_pending > MAX_TXQ_ENTRIES ||
+ e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+ e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+ e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
+ return -EINVAL;
+
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+
+ for (i = 0; i < pi->nqsets; ++i) {
+ s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
+ s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
+ s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
+ }
+ return 0;
+}
+
+/**
+ * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
+ * @dev: the network device
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Set the RX interrupt hold-off parameters for a network device.
+ */
+static int set_rx_intr_params(struct net_device *dev,
+ unsigned int us, unsigned int cnt)
+{
+ int i, err;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+ for (i = 0; i < pi->nqsets; i++, q++) {
+ err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
+{
+ int i;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+ for (i = 0; i < pi->nqsets; i++, q++)
+ q->rspq.adaptive_rx = adaptive_rx;
+
+ return 0;
+}
+
+static int get_adaptive_rx_setting(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+ return q->rspq.adaptive_rx;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
+ return set_rx_intr_params(dev, c->rx_coalesce_usecs,
+ c->rx_max_coalesced_frames);
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adap = pi->adapter;
+ const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
+
+ c->rx_coalesce_usecs = qtimer_val(adap, rq);
+ c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+ adap->sge.counter_val[rq->pktcnt_idx] : 0;
+ c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
+ return 0;
+}
+
+/**
+ * eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
+ */
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+ fn *= sz;
+ if (phys_addr < 1024)
+ return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return 31744 - fn + phys_addr - 1024;
+ if (phys_addr < EEPROMSIZE)
+ return phys_addr - 1024 - fn;
+ return -EINVAL;
+}
+
+/* The next two routines implement eeprom read/write from physical addresses.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+ u8 *data)
+{
+ int i, err = 0;
+ struct adapter *adapter = netdev2adap(dev);
+ u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ e->magic = EEPROM_MAGIC;
+ for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+ err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+ if (!err)
+ memcpy(data, buf + e->offset, e->len);
+ kfree(buf);
+ return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ u8 *buf;
+ int err = 0;
+ u32 aligned_offset, aligned_len, *p;
+ struct adapter *adapter = netdev2adap(dev);
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ aligned_offset = eeprom->offset & ~3;
+ aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+ if (adapter->fn > 0) {
+ u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+
+ if (aligned_offset < start ||
+ aligned_offset + aligned_len > start + EEPROMPFSIZE)
+ return -EPERM;
+ }
+
+ if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+ /* RMW possibly needed for first or last words.
+ */
+ buf = kmalloc(aligned_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+ if (!err && aligned_len > 4)
+ err = eeprom_rd_phys(adapter,
+ aligned_offset + aligned_len - 4,
+ (u32 *)&buf[aligned_len - 4]);
+ if (err)
+ goto out;
+ memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+ } else {
+ buf = data;
+ }
+
+ err = t4_seeprom_wp(adapter, false);
+ if (err)
+ goto out;
+
+ for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+ err = eeprom_wr_phys(adapter, aligned_offset, *p);
+ aligned_offset += 4;
+ }
+
+ if (!err)
+ err = t4_seeprom_wp(adapter, true);
+out:
+ if (buf != data)
+ kfree(buf);
+ return err;
+}
+
+static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+{
+ int ret;
+ const struct firmware *fw;
+ struct adapter *adap = netdev2adap(netdev);
+ unsigned int mbox = PCIE_FW_MASTER_M + 1;
+
+ ef->data[sizeof(ef->data) - 1] = '\0';
+ ret = request_firmware(&fw, ef->data, adap->pdev_dev);
+ if (ret < 0)
+ return ret;
+
+ /* If the adapter has been fully initialized then we'll go ahead and
+ * try to get the firmware's cooperation in upgrading to the new
+ * firmware image otherwise we'll try to do the entire job from the
+ * host ... and we always "force" the operation in this path.
+ */
+ if (adap->flags & FULL_INIT_DONE)
+ mbox = adap->mbox;
+
+ ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
+ release_firmware(fw);
+ if (!ret)
+ dev_info(adap->pdev_dev,
+ "loaded firmware %s, reload cxgb4 driver\n", ef->data);
+ return ret;
+}
+
+#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
+#define BCAST_CRC 0xa0ccc1a6
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = netdev2adap(dev)->wol;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ int err = 0;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (wol->wolopts & ~WOL_SUPPORTED)
+ return -EINVAL;
+ t4_wol_magic_enable(pi->adapter, pi->tx_chan,
+ (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
+ if (wol->wolopts & WAKE_BCAST) {
+ err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
+ ~0ULL, 0, false);
+ if (!err)
+ err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
+ ~6ULL, ~0ULL, BCAST_CRC, true);
+ } else {
+ t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
+ }
+ return err;
+}
+
+static u32 get_rss_table_size(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int n = pi->rss_size;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!p)
+ return 0;
+ while (n--)
+ p[n] = pi->rss[n];
+ return 0;
+}
+
+static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
+ const u8 hfunc)
+{
+ unsigned int i;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* We require at least one supported parameter to be changed and no
+ * change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!p)
+ return 0;
+
+ for (i = 0; i < pi->rss_size; i++)
+ pi->rss[i] = p[i];
+ if (pi->adapter->flags & FULL_INIT_DONE)
+ return cxgb4_write_rss(pi, pi->rss);
+ return 0;
+}
+
+static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH: {
+ unsigned int v = pi->rss_mode;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ }
+ return 0;
+ }
+ case ETHTOOL_GRXRINGS:
+ info->data = pi->nqsets;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops cxgb_ethtool_ops = {
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .get_drvinfo = get_drvinfo,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_ringparam = get_sge_param,
+ .set_ringparam = set_sge_param,
+ .get_coalesce = get_coalesce,
+ .set_coalesce = set_coalesce,
+ .get_eeprom_len = get_eeprom_len,
+ .get_eeprom = get_eeprom,
+ .set_eeprom = set_eeprom,
+ .get_pauseparam = get_pauseparam,
+ .set_pauseparam = set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_strings = get_strings,
+ .set_phys_id = identify_port,
+ .nway_reset = restart_autoneg,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_stats,
+ .get_regs_len = get_regs_len,
+ .get_regs = get_regs,
+ .get_wol = get_wol,
+ .set_wol = set_wol,
+ .get_rxnfc = get_rxnfc,
+ .get_rxfh_indir_size = get_rss_table_size,
+ .get_rxfh = get_rss_table,
+ .set_rxfh = set_rss_table,
+ .flash_device = set_flash,
+};
+
+void cxgb4_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &cxgb_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
new file mode 100644
index 000000000000..6c8a62eefe51
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
@@ -0,0 +1,122 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/libfcoe.h>
+#include "cxgb4.h"
+
+bool cxgb_fcoe_sof_eof_supported(struct adapter *adap, struct sk_buff *skb)
+{
+ struct fcoe_hdr *fcoeh = (struct fcoe_hdr *)skb_network_header(skb);
+ u8 sof = fcoeh->fcoe_sof;
+ u8 eof = 0;
+
+ if ((sof != FC_SOF_I3) && (sof != FC_SOF_N3)) {
+ dev_err(adap->pdev_dev, "Unsupported SOF 0x%x\n", sof);
+ return false;
+ }
+
+ skb_copy_bits(skb, skb->len - 4, &eof, 1);
+
+ if ((eof != FC_EOF_N) && (eof != FC_EOF_T)) {
+ dev_err(adap->pdev_dev, "Unsupported EOF 0x%x\n", eof);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * cxgb_fcoe_enable - enable FCoE offload features
+ * @netdev: net device
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int cxgb_fcoe_enable(struct net_device *netdev)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adap = pi->adapter;
+ struct cxgb_fcoe *fcoe = &pi->fcoe;
+
+ if (is_t4(adap->params.chip))
+ return -EINVAL;
+
+ if (!(adap->flags & FULL_INIT_DONE))
+ return -EINVAL;
+
+ dev_info(adap->pdev_dev, "Enabling FCoE offload features\n");
+
+ netdev->features |= NETIF_F_FCOE_CRC;
+ netdev->vlan_features |= NETIF_F_FCOE_CRC;
+ netdev->features |= NETIF_F_FCOE_MTU;
+ netdev->vlan_features |= NETIF_F_FCOE_MTU;
+
+ netdev_features_change(netdev);
+
+ fcoe->flags |= CXGB_FCOE_ENABLED;
+
+ return 0;
+}
+
+/**
+ * cxgb_fcoe_disable - disable FCoE offload
+ * @netdev: net device
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int cxgb_fcoe_disable(struct net_device *netdev)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adap = pi->adapter;
+ struct cxgb_fcoe *fcoe = &pi->fcoe;
+
+ if (!(fcoe->flags & CXGB_FCOE_ENABLED))
+ return -EINVAL;
+
+ dev_info(adap->pdev_dev, "Disabling FCoE offload features\n");
+
+ fcoe->flags &= ~CXGB_FCOE_ENABLED;
+
+ netdev->features &= ~NETIF_F_FCOE_CRC;
+ netdev->vlan_features &= ~NETIF_F_FCOE_CRC;
+ netdev->features &= ~NETIF_F_FCOE_MTU;
+ netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
+
+ netdev_features_change(netdev);
+
+ return 0;
+}
+#endif /* CONFIG_CHELSIO_T4_FCOE */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h
new file mode 100644
index 000000000000..bf9258a56ac9
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h
@@ -0,0 +1,57 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_FCOE_H__
+#define __CXGB4_FCOE_H__
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+
+#define CXGB_FCOE_TXPKT_CSUM_START 28
+#define CXGB_FCOE_TXPKT_CSUM_END 8
+
+/* fcoe flags */
+enum {
+ CXGB_FCOE_ENABLED = (1 << 0),
+};
+
+struct cxgb_fcoe {
+ u8 flags;
+};
+
+int cxgb_fcoe_enable(struct net_device *);
+int cxgb_fcoe_disable(struct net_device *);
+bool cxgb_fcoe_sof_eof_supported(struct adapter *, struct sk_buff *);
+
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+#endif /* __CXGB4_FCOE_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d92995138f7e..803d91beec6f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -76,23 +76,15 @@
#include "clip_tbl.h"
#include "l2t.h"
+char cxgb4_driver_name[] = KBUILD_MODNAME;
+
#ifdef DRV_VERSION
#undef DRV_VERSION
#endif
#define DRV_VERSION "2.0.0-ko"
+const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5 Network Driver"
-enum {
- MAX_TXQ_ENTRIES = 16384,
- MAX_CTRL_TXQ_ENTRIES = 1024,
- MAX_RSPQ_ENTRIES = 16384,
- MAX_RX_BUFFERS = 16384,
- MIN_TXQ_ENTRIES = 32,
- MIN_CTRL_TXQ_ENTRIES = 32,
- MIN_RSPQ_ENTRIES = 128,
- MIN_FL_ENTRIES = 16
-};
-
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
* firmware command. The use of bit-field structure elements is purely to
@@ -124,7 +116,7 @@ struct filter_entry {
/* Macros needed to support the PCI Device ID Table ...
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
- static struct pci_device_id cxgb4_pci_tbl[] = {
+ static const struct pci_device_id cxgb4_pci_tbl[] = {
#define CH_PCI_DEVICE_ID_FUNCTION 0x4
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
@@ -732,7 +724,8 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
adap->swintr = 1;
t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
}
- t4_slow_intr_handler(adap);
+ if (adap->flags & MASTER_PF)
+ t4_slow_intr_handler(adap);
return IRQ_HANDLED;
}
@@ -857,14 +850,14 @@ static void free_msix_queue_irqs(struct adapter *adap)
}
/**
- * write_rss - write the RSS table for a given port
+ * cxgb4_write_rss - write the RSS table for a given port
* @pi: the port
* @queues: array of queue indices for RSS
*
* Sets up the portion of the HW RSS table for the port's VI to distribute
* packets to the Rx queues in @queues.
*/
-static int write_rss(const struct port_info *pi, const u16 *queues)
+int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
{
u16 *rss;
int i, err;
@@ -897,7 +890,7 @@ static int setup_rss(struct adapter *adap)
for_each_port(adap, i) {
const struct port_info *pi = adap2pinfo(adap, i);
- err = write_rss(pi, pi->rss);
+ err = cxgb4_write_rss(pi, pi->rss);
if (err)
return err;
}
@@ -972,6 +965,28 @@ static void enable_rx(struct adapter *adap)
}
}
+static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
+ unsigned int nq, unsigned int per_chan, int msi_idx,
+ u16 *ids)
+{
+ int i, err;
+
+ for (i = 0; i < nq; i++, q++) {
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+ adap->port[i / per_chan],
+ msi_idx, q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
+ if (err)
+ return err;
+ memset(&q->stats, 0, sizeof(q->stats));
+ if (ids)
+ ids[i] = q->rspq.abs_id;
+ }
+ return 0;
+}
+
/**
* setup_sge_queues - configure SGE Tx/Rx/response queues
* @adap: the adapter
@@ -1046,51 +1061,27 @@ freeout: t4_free_sge_resources(adap);
j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
for_each_ofldrxq(s, i) {
- struct sge_ofld_rxq *q = &s->ofldrxq[i];
- struct net_device *dev = adap->port[i / j];
-
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
- q->fl.size ? &q->fl : NULL,
- uldrx_handler);
- if (err)
- goto freeout;
- memset(&q->stats, 0, sizeof(q->stats));
- s->ofld_rxq[i] = q->rspq.abs_id;
- err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
+ err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
+ adap->port[i / j],
s->fw_evtq.cntxt_id);
if (err)
goto freeout;
}
- for_each_rdmarxq(s, i) {
- struct sge_ofld_rxq *q = &s->rdmarxq[i];
+#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
+ err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
+ if (err) \
+ goto freeout; \
+ if (msi_idx > 0) \
+ msi_idx += nq; \
+} while (0)
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
- msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler);
- if (err)
- goto freeout;
- memset(&q->stats, 0, sizeof(q->stats));
- s->rdma_rxq[i] = q->rspq.abs_id;
- }
-
- for_each_rdmaciq(s, i) {
- struct sge_ofld_rxq *q = &s->rdmaciq[i];
+ ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
+ ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
+ j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
+ ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
- msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler);
- if (err)
- goto freeout;
- memset(&q->stats, 0, sizeof(q->stats));
- s->rdma_ciq[i] = q->rspq.abs_id;
- }
+#undef ALLOC_OFLD_RXQS
for_each_port(adap, i) {
/*
@@ -1149,6 +1140,10 @@ static int set_filter_wr(struct adapter *adapter, int fidx)
struct fw_filter_wr *fwr;
unsigned int ftid;
+ skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
/* If the new filter requires loopback Destination MAC and/or VLAN
* rewriting then we need to allocate a Layer 2 Table (L2T) entry for
* the filter.
@@ -1156,19 +1151,21 @@ static int set_filter_wr(struct adapter *adapter, int fidx)
if (f->fs.newdmac || f->fs.newvlan) {
/* allocate L2T entry for new filter */
f->l2t = t4_l2t_alloc_switching(adapter->l2t);
- if (f->l2t == NULL)
+ if (f->l2t == NULL) {
+ kfree_skb(skb);
return -EAGAIN;
+ }
if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
f->fs.eport, f->fs.dmac)) {
cxgb4_l2t_release(f->l2t);
f->l2t = NULL;
+ kfree_skb(skb);
return -ENOMEM;
}
}
ftid = adapter->tids.ftid_base + fidx;
- skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
memset(fwr, 0, sizeof(*fwr));
@@ -1266,7 +1263,10 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
len = sizeof(*fwr);
ftid = adapter->tids.ftid_base + fidx;
- skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
fwr = (struct fw_filter_wr *)__skb_put(skb, len);
t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
@@ -1301,6 +1301,10 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = 0;
} else {
txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+#ifdef CONFIG_CHELSIO_T4_FCOE
+ if (skb->protocol == htons(ETH_P_FCOE))
+ txq = skb->priority & 0x7;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
}
return txq;
}
@@ -1325,1192 +1329,6 @@ static inline int is_offload(const struct adapter *adap)
return adap->params.offload;
}
-/*
- * Implementation of ethtool operations.
- */
-
-static u32 get_msglevel(struct net_device *dev)
-{
- return netdev2adap(dev)->msg_enable;
-}
-
-static void set_msglevel(struct net_device *dev, u32 val)
-{
- netdev2adap(dev)->msg_enable = val;
-}
-
-static char stats_strings[][ETH_GSTRING_LEN] = {
- "TxOctetsOK ",
- "TxFramesOK ",
- "TxBroadcastFrames ",
- "TxMulticastFrames ",
- "TxUnicastFrames ",
- "TxErrorFrames ",
-
- "TxFrames64 ",
- "TxFrames65To127 ",
- "TxFrames128To255 ",
- "TxFrames256To511 ",
- "TxFrames512To1023 ",
- "TxFrames1024To1518 ",
- "TxFrames1519ToMax ",
-
- "TxFramesDropped ",
- "TxPauseFrames ",
- "TxPPP0Frames ",
- "TxPPP1Frames ",
- "TxPPP2Frames ",
- "TxPPP3Frames ",
- "TxPPP4Frames ",
- "TxPPP5Frames ",
- "TxPPP6Frames ",
- "TxPPP7Frames ",
-
- "RxOctetsOK ",
- "RxFramesOK ",
- "RxBroadcastFrames ",
- "RxMulticastFrames ",
- "RxUnicastFrames ",
-
- "RxFramesTooLong ",
- "RxJabberErrors ",
- "RxFCSErrors ",
- "RxLengthErrors ",
- "RxSymbolErrors ",
- "RxRuntFrames ",
-
- "RxFrames64 ",
- "RxFrames65To127 ",
- "RxFrames128To255 ",
- "RxFrames256To511 ",
- "RxFrames512To1023 ",
- "RxFrames1024To1518 ",
- "RxFrames1519ToMax ",
-
- "RxPauseFrames ",
- "RxPPP0Frames ",
- "RxPPP1Frames ",
- "RxPPP2Frames ",
- "RxPPP3Frames ",
- "RxPPP4Frames ",
- "RxPPP5Frames ",
- "RxPPP6Frames ",
- "RxPPP7Frames ",
-
- "RxBG0FramesDropped ",
- "RxBG1FramesDropped ",
- "RxBG2FramesDropped ",
- "RxBG3FramesDropped ",
- "RxBG0FramesTrunc ",
- "RxBG1FramesTrunc ",
- "RxBG2FramesTrunc ",
- "RxBG3FramesTrunc ",
-
- "TSO ",
- "TxCsumOffload ",
- "RxCsumGood ",
- "VLANextractions ",
- "VLANinsertions ",
- "GROpackets ",
- "GROmerged ",
- "WriteCoalSuccess ",
- "WriteCoalFail ",
-};
-
-static int get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(stats_strings);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-#define T4_REGMAP_SIZE (160 * 1024)
-#define T5_REGMAP_SIZE (332 * 1024)
-
-static int get_regs_len(struct net_device *dev)
-{
- struct adapter *adap = netdev2adap(dev);
- if (is_t4(adap->params.chip))
- return T4_REGMAP_SIZE;
- else
- return T5_REGMAP_SIZE;
-}
-
-static int get_eeprom_len(struct net_device *dev)
-{
- return EEPROMSIZE;
-}
-
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- struct adapter *adapter = netdev2adap(dev);
- u32 exprom_vers;
-
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
- sizeof(info->bus_info));
-
- if (adapter->params.fw_vers)
- snprintf(info->fw_version, sizeof(info->fw_version),
- "%u.%u.%u.%u, TP %u.%u.%u.%u",
- FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
- FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
- FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
- FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
- FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
- FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
- FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
- FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
-
- if (!t4_get_exprom_version(adapter, &exprom_vers))
- snprintf(info->erom_version, sizeof(info->erom_version),
- "%u.%u.%u.%u",
- FW_HDR_FW_VER_MAJOR_G(exprom_vers),
- FW_HDR_FW_VER_MINOR_G(exprom_vers),
- FW_HDR_FW_VER_MICRO_G(exprom_vers),
- FW_HDR_FW_VER_BUILD_G(exprom_vers));
-}
-
-static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
- if (stringset == ETH_SS_STATS)
- memcpy(data, stats_strings, sizeof(stats_strings));
-}
-
-/*
- * port stats maintained per queue of the port. They should be in the same
- * order as in stats_strings above.
- */
-struct queue_port_stats {
- u64 tso;
- u64 tx_csum;
- u64 rx_csum;
- u64 vlan_ex;
- u64 vlan_ins;
- u64 gro_pkts;
- u64 gro_merged;
-};
-
-static void collect_sge_port_stats(const struct adapter *adap,
- const struct port_info *p, struct queue_port_stats *s)
-{
- int i;
- const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
- const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
-
- memset(s, 0, sizeof(*s));
- for (i = 0; i < p->nqsets; i++, rx++, tx++) {
- s->tso += tx->tso;
- s->tx_csum += tx->tx_cso;
- s->rx_csum += rx->stats.rx_cso;
- s->vlan_ex += rx->stats.vlan_ex;
- s->vlan_ins += tx->vlan_ins;
- s->gro_pkts += rx->stats.lro_pkts;
- s->gro_merged += rx->stats.lro_merged;
- }
-}
-
-static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
- u64 *data)
-{
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
- u32 val1, val2;
-
- t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
-
- data += sizeof(struct port_stats) / sizeof(u64);
- collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
- data += sizeof(struct queue_port_stats) / sizeof(u64);
- if (!is_t4(adapter->params.chip)) {
- t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
- val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
- val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
- *data = val1 - val2;
- data++;
- *data = val2;
- data++;
- } else {
- memset(data, 0, 2 * sizeof(u64));
- *data += 2;
- }
-}
-
-/*
- * Return a version number to identify the type of adapter. The scheme is:
- * - bits 0..9: chip version
- * - bits 10..15: chip revision
- * - bits 16..23: register dump version
- */
-static inline unsigned int mk_adap_vers(const struct adapter *ap)
-{
- return CHELSIO_CHIP_VERSION(ap->params.chip) |
- (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
-}
-
-static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
- unsigned int end)
-{
- u32 *p = buf + start;
-
- for ( ; start <= end; start += sizeof(u32))
- *p++ = t4_read_reg(ap, start);
-}
-
-static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *buf)
-{
- static const unsigned int t4_reg_ranges[] = {
- 0x1008, 0x1108,
- 0x1180, 0x11b4,
- 0x11fc, 0x123c,
- 0x1300, 0x173c,
- 0x1800, 0x18fc,
- 0x3000, 0x30d8,
- 0x30e0, 0x5924,
- 0x5960, 0x59d4,
- 0x5a00, 0x5af8,
- 0x6000, 0x6098,
- 0x6100, 0x6150,
- 0x6200, 0x6208,
- 0x6240, 0x6248,
- 0x6280, 0x6338,
- 0x6370, 0x638c,
- 0x6400, 0x643c,
- 0x6500, 0x6524,
- 0x6a00, 0x6a38,
- 0x6a60, 0x6a78,
- 0x6b00, 0x6b84,
- 0x6bf0, 0x6c84,
- 0x6cf0, 0x6d84,
- 0x6df0, 0x6e84,
- 0x6ef0, 0x6f84,
- 0x6ff0, 0x7084,
- 0x70f0, 0x7184,
- 0x71f0, 0x7284,
- 0x72f0, 0x7384,
- 0x73f0, 0x7450,
- 0x7500, 0x7530,
- 0x7600, 0x761c,
- 0x7680, 0x76cc,
- 0x7700, 0x7798,
- 0x77c0, 0x77fc,
- 0x7900, 0x79fc,
- 0x7b00, 0x7c38,
- 0x7d00, 0x7efc,
- 0x8dc0, 0x8e1c,
- 0x8e30, 0x8e78,
- 0x8ea0, 0x8f6c,
- 0x8fc0, 0x9074,
- 0x90fc, 0x90fc,
- 0x9400, 0x9458,
- 0x9600, 0x96bc,
- 0x9800, 0x9808,
- 0x9820, 0x983c,
- 0x9850, 0x9864,
- 0x9c00, 0x9c6c,
- 0x9c80, 0x9cec,
- 0x9d00, 0x9d6c,
- 0x9d80, 0x9dec,
- 0x9e00, 0x9e6c,
- 0x9e80, 0x9eec,
- 0x9f00, 0x9f6c,
- 0x9f80, 0x9fec,
- 0xd004, 0xd03c,
- 0xdfc0, 0xdfe0,
- 0xe000, 0xea7c,
- 0xf000, 0x11110,
- 0x11118, 0x11190,
- 0x19040, 0x1906c,
- 0x19078, 0x19080,
- 0x1908c, 0x19124,
- 0x19150, 0x191b0,
- 0x191d0, 0x191e8,
- 0x19238, 0x1924c,
- 0x193f8, 0x19474,
- 0x19490, 0x194f8,
- 0x19800, 0x19f30,
- 0x1a000, 0x1a06c,
- 0x1a0b0, 0x1a120,
- 0x1a128, 0x1a138,
- 0x1a190, 0x1a1c4,
- 0x1a1fc, 0x1a1fc,
- 0x1e040, 0x1e04c,
- 0x1e284, 0x1e28c,
- 0x1e2c0, 0x1e2c0,
- 0x1e2e0, 0x1e2e0,
- 0x1e300, 0x1e384,
- 0x1e3c0, 0x1e3c8,
- 0x1e440, 0x1e44c,
- 0x1e684, 0x1e68c,
- 0x1e6c0, 0x1e6c0,
- 0x1e6e0, 0x1e6e0,
- 0x1e700, 0x1e784,
- 0x1e7c0, 0x1e7c8,
- 0x1e840, 0x1e84c,
- 0x1ea84, 0x1ea8c,
- 0x1eac0, 0x1eac0,
- 0x1eae0, 0x1eae0,
- 0x1eb00, 0x1eb84,
- 0x1ebc0, 0x1ebc8,
- 0x1ec40, 0x1ec4c,
- 0x1ee84, 0x1ee8c,
- 0x1eec0, 0x1eec0,
- 0x1eee0, 0x1eee0,
- 0x1ef00, 0x1ef84,
- 0x1efc0, 0x1efc8,
- 0x1f040, 0x1f04c,
- 0x1f284, 0x1f28c,
- 0x1f2c0, 0x1f2c0,
- 0x1f2e0, 0x1f2e0,
- 0x1f300, 0x1f384,
- 0x1f3c0, 0x1f3c8,
- 0x1f440, 0x1f44c,
- 0x1f684, 0x1f68c,
- 0x1f6c0, 0x1f6c0,
- 0x1f6e0, 0x1f6e0,
- 0x1f700, 0x1f784,
- 0x1f7c0, 0x1f7c8,
- 0x1f840, 0x1f84c,
- 0x1fa84, 0x1fa8c,
- 0x1fac0, 0x1fac0,
- 0x1fae0, 0x1fae0,
- 0x1fb00, 0x1fb84,
- 0x1fbc0, 0x1fbc8,
- 0x1fc40, 0x1fc4c,
- 0x1fe84, 0x1fe8c,
- 0x1fec0, 0x1fec0,
- 0x1fee0, 0x1fee0,
- 0x1ff00, 0x1ff84,
- 0x1ffc0, 0x1ffc8,
- 0x20000, 0x2002c,
- 0x20100, 0x2013c,
- 0x20190, 0x201c8,
- 0x20200, 0x20318,
- 0x20400, 0x20528,
- 0x20540, 0x20614,
- 0x21000, 0x21040,
- 0x2104c, 0x21060,
- 0x210c0, 0x210ec,
- 0x21200, 0x21268,
- 0x21270, 0x21284,
- 0x212fc, 0x21388,
- 0x21400, 0x21404,
- 0x21500, 0x21518,
- 0x2152c, 0x2153c,
- 0x21550, 0x21554,
- 0x21600, 0x21600,
- 0x21608, 0x21628,
- 0x21630, 0x2163c,
- 0x21700, 0x2171c,
- 0x21780, 0x2178c,
- 0x21800, 0x21c38,
- 0x21c80, 0x21d7c,
- 0x21e00, 0x21e04,
- 0x22000, 0x2202c,
- 0x22100, 0x2213c,
- 0x22190, 0x221c8,
- 0x22200, 0x22318,
- 0x22400, 0x22528,
- 0x22540, 0x22614,
- 0x23000, 0x23040,
- 0x2304c, 0x23060,
- 0x230c0, 0x230ec,
- 0x23200, 0x23268,
- 0x23270, 0x23284,
- 0x232fc, 0x23388,
- 0x23400, 0x23404,
- 0x23500, 0x23518,
- 0x2352c, 0x2353c,
- 0x23550, 0x23554,
- 0x23600, 0x23600,
- 0x23608, 0x23628,
- 0x23630, 0x2363c,
- 0x23700, 0x2371c,
- 0x23780, 0x2378c,
- 0x23800, 0x23c38,
- 0x23c80, 0x23d7c,
- 0x23e00, 0x23e04,
- 0x24000, 0x2402c,
- 0x24100, 0x2413c,
- 0x24190, 0x241c8,
- 0x24200, 0x24318,
- 0x24400, 0x24528,
- 0x24540, 0x24614,
- 0x25000, 0x25040,
- 0x2504c, 0x25060,
- 0x250c0, 0x250ec,
- 0x25200, 0x25268,
- 0x25270, 0x25284,
- 0x252fc, 0x25388,
- 0x25400, 0x25404,
- 0x25500, 0x25518,
- 0x2552c, 0x2553c,
- 0x25550, 0x25554,
- 0x25600, 0x25600,
- 0x25608, 0x25628,
- 0x25630, 0x2563c,
- 0x25700, 0x2571c,
- 0x25780, 0x2578c,
- 0x25800, 0x25c38,
- 0x25c80, 0x25d7c,
- 0x25e00, 0x25e04,
- 0x26000, 0x2602c,
- 0x26100, 0x2613c,
- 0x26190, 0x261c8,
- 0x26200, 0x26318,
- 0x26400, 0x26528,
- 0x26540, 0x26614,
- 0x27000, 0x27040,
- 0x2704c, 0x27060,
- 0x270c0, 0x270ec,
- 0x27200, 0x27268,
- 0x27270, 0x27284,
- 0x272fc, 0x27388,
- 0x27400, 0x27404,
- 0x27500, 0x27518,
- 0x2752c, 0x2753c,
- 0x27550, 0x27554,
- 0x27600, 0x27600,
- 0x27608, 0x27628,
- 0x27630, 0x2763c,
- 0x27700, 0x2771c,
- 0x27780, 0x2778c,
- 0x27800, 0x27c38,
- 0x27c80, 0x27d7c,
- 0x27e00, 0x27e04
- };
-
- static const unsigned int t5_reg_ranges[] = {
- 0x1008, 0x1148,
- 0x1180, 0x11b4,
- 0x11fc, 0x123c,
- 0x1280, 0x173c,
- 0x1800, 0x18fc,
- 0x3000, 0x3028,
- 0x3060, 0x30d8,
- 0x30e0, 0x30fc,
- 0x3140, 0x357c,
- 0x35a8, 0x35cc,
- 0x35ec, 0x35ec,
- 0x3600, 0x5624,
- 0x56cc, 0x575c,
- 0x580c, 0x5814,
- 0x5890, 0x58bc,
- 0x5940, 0x59dc,
- 0x59fc, 0x5a18,
- 0x5a60, 0x5a9c,
- 0x5b9c, 0x5bfc,
- 0x6000, 0x6040,
- 0x6058, 0x614c,
- 0x7700, 0x7798,
- 0x77c0, 0x78fc,
- 0x7b00, 0x7c54,
- 0x7d00, 0x7efc,
- 0x8dc0, 0x8de0,
- 0x8df8, 0x8e84,
- 0x8ea0, 0x8f84,
- 0x8fc0, 0x90f8,
- 0x9400, 0x9470,
- 0x9600, 0x96f4,
- 0x9800, 0x9808,
- 0x9820, 0x983c,
- 0x9850, 0x9864,
- 0x9c00, 0x9c6c,
- 0x9c80, 0x9cec,
- 0x9d00, 0x9d6c,
- 0x9d80, 0x9dec,
- 0x9e00, 0x9e6c,
- 0x9e80, 0x9eec,
- 0x9f00, 0x9f6c,
- 0x9f80, 0xa020,
- 0xd004, 0xd03c,
- 0xdfc0, 0xdfe0,
- 0xe000, 0x11088,
- 0x1109c, 0x11110,
- 0x11118, 0x1117c,
- 0x11190, 0x11204,
- 0x19040, 0x1906c,
- 0x19078, 0x19080,
- 0x1908c, 0x19124,
- 0x19150, 0x191b0,
- 0x191d0, 0x191e8,
- 0x19238, 0x19290,
- 0x193f8, 0x19474,
- 0x19490, 0x194cc,
- 0x194f0, 0x194f8,
- 0x19c00, 0x19c60,
- 0x19c94, 0x19e10,
- 0x19e50, 0x19f34,
- 0x19f40, 0x19f50,
- 0x19f90, 0x19fe4,
- 0x1a000, 0x1a06c,
- 0x1a0b0, 0x1a120,
- 0x1a128, 0x1a138,
- 0x1a190, 0x1a1c4,
- 0x1a1fc, 0x1a1fc,
- 0x1e008, 0x1e00c,
- 0x1e040, 0x1e04c,
- 0x1e284, 0x1e290,
- 0x1e2c0, 0x1e2c0,
- 0x1e2e0, 0x1e2e0,
- 0x1e300, 0x1e384,
- 0x1e3c0, 0x1e3c8,
- 0x1e408, 0x1e40c,
- 0x1e440, 0x1e44c,
- 0x1e684, 0x1e690,
- 0x1e6c0, 0x1e6c0,
- 0x1e6e0, 0x1e6e0,
- 0x1e700, 0x1e784,
- 0x1e7c0, 0x1e7c8,
- 0x1e808, 0x1e80c,
- 0x1e840, 0x1e84c,
- 0x1ea84, 0x1ea90,
- 0x1eac0, 0x1eac0,
- 0x1eae0, 0x1eae0,
- 0x1eb00, 0x1eb84,
- 0x1ebc0, 0x1ebc8,
- 0x1ec08, 0x1ec0c,
- 0x1ec40, 0x1ec4c,
- 0x1ee84, 0x1ee90,
- 0x1eec0, 0x1eec0,
- 0x1eee0, 0x1eee0,
- 0x1ef00, 0x1ef84,
- 0x1efc0, 0x1efc8,
- 0x1f008, 0x1f00c,
- 0x1f040, 0x1f04c,
- 0x1f284, 0x1f290,
- 0x1f2c0, 0x1f2c0,
- 0x1f2e0, 0x1f2e0,
- 0x1f300, 0x1f384,
- 0x1f3c0, 0x1f3c8,
- 0x1f408, 0x1f40c,
- 0x1f440, 0x1f44c,
- 0x1f684, 0x1f690,
- 0x1f6c0, 0x1f6c0,
- 0x1f6e0, 0x1f6e0,
- 0x1f700, 0x1f784,
- 0x1f7c0, 0x1f7c8,
- 0x1f808, 0x1f80c,
- 0x1f840, 0x1f84c,
- 0x1fa84, 0x1fa90,
- 0x1fac0, 0x1fac0,
- 0x1fae0, 0x1fae0,
- 0x1fb00, 0x1fb84,
- 0x1fbc0, 0x1fbc8,
- 0x1fc08, 0x1fc0c,
- 0x1fc40, 0x1fc4c,
- 0x1fe84, 0x1fe90,
- 0x1fec0, 0x1fec0,
- 0x1fee0, 0x1fee0,
- 0x1ff00, 0x1ff84,
- 0x1ffc0, 0x1ffc8,
- 0x30000, 0x30030,
- 0x30100, 0x30144,
- 0x30190, 0x301d0,
- 0x30200, 0x30318,
- 0x30400, 0x3052c,
- 0x30540, 0x3061c,
- 0x30800, 0x30834,
- 0x308c0, 0x30908,
- 0x30910, 0x309ac,
- 0x30a00, 0x30a04,
- 0x30a0c, 0x30a2c,
- 0x30a44, 0x30a50,
- 0x30a74, 0x30c24,
- 0x30d08, 0x30d14,
- 0x30d1c, 0x30d20,
- 0x30d3c, 0x30d50,
- 0x31200, 0x3120c,
- 0x31220, 0x31220,
- 0x31240, 0x31240,
- 0x31600, 0x31600,
- 0x31608, 0x3160c,
- 0x31a00, 0x31a1c,
- 0x31e04, 0x31e20,
- 0x31e38, 0x31e3c,
- 0x31e80, 0x31e80,
- 0x31e88, 0x31ea8,
- 0x31eb0, 0x31eb4,
- 0x31ec8, 0x31ed4,
- 0x31fb8, 0x32004,
- 0x32208, 0x3223c,
- 0x32600, 0x32630,
- 0x32a00, 0x32abc,
- 0x32b00, 0x32b70,
- 0x33000, 0x33048,
- 0x33060, 0x3309c,
- 0x330f0, 0x33148,
- 0x33160, 0x3319c,
- 0x331f0, 0x332e4,
- 0x332f8, 0x333e4,
- 0x333f8, 0x33448,
- 0x33460, 0x3349c,
- 0x334f0, 0x33548,
- 0x33560, 0x3359c,
- 0x335f0, 0x336e4,
- 0x336f8, 0x337e4,
- 0x337f8, 0x337fc,
- 0x33814, 0x33814,
- 0x3382c, 0x3382c,
- 0x33880, 0x3388c,
- 0x338e8, 0x338ec,
- 0x33900, 0x33948,
- 0x33960, 0x3399c,
- 0x339f0, 0x33ae4,
- 0x33af8, 0x33b10,
- 0x33b28, 0x33b28,
- 0x33b3c, 0x33b50,
- 0x33bf0, 0x33c10,
- 0x33c28, 0x33c28,
- 0x33c3c, 0x33c50,
- 0x33cf0, 0x33cfc,
- 0x34000, 0x34030,
- 0x34100, 0x34144,
- 0x34190, 0x341d0,
- 0x34200, 0x34318,
- 0x34400, 0x3452c,
- 0x34540, 0x3461c,
- 0x34800, 0x34834,
- 0x348c0, 0x34908,
- 0x34910, 0x349ac,
- 0x34a00, 0x34a04,
- 0x34a0c, 0x34a2c,
- 0x34a44, 0x34a50,
- 0x34a74, 0x34c24,
- 0x34d08, 0x34d14,
- 0x34d1c, 0x34d20,
- 0x34d3c, 0x34d50,
- 0x35200, 0x3520c,
- 0x35220, 0x35220,
- 0x35240, 0x35240,
- 0x35600, 0x35600,
- 0x35608, 0x3560c,
- 0x35a00, 0x35a1c,
- 0x35e04, 0x35e20,
- 0x35e38, 0x35e3c,
- 0x35e80, 0x35e80,
- 0x35e88, 0x35ea8,
- 0x35eb0, 0x35eb4,
- 0x35ec8, 0x35ed4,
- 0x35fb8, 0x36004,
- 0x36208, 0x3623c,
- 0x36600, 0x36630,
- 0x36a00, 0x36abc,
- 0x36b00, 0x36b70,
- 0x37000, 0x37048,
- 0x37060, 0x3709c,
- 0x370f0, 0x37148,
- 0x37160, 0x3719c,
- 0x371f0, 0x372e4,
- 0x372f8, 0x373e4,
- 0x373f8, 0x37448,
- 0x37460, 0x3749c,
- 0x374f0, 0x37548,
- 0x37560, 0x3759c,
- 0x375f0, 0x376e4,
- 0x376f8, 0x377e4,
- 0x377f8, 0x377fc,
- 0x37814, 0x37814,
- 0x3782c, 0x3782c,
- 0x37880, 0x3788c,
- 0x378e8, 0x378ec,
- 0x37900, 0x37948,
- 0x37960, 0x3799c,
- 0x379f0, 0x37ae4,
- 0x37af8, 0x37b10,
- 0x37b28, 0x37b28,
- 0x37b3c, 0x37b50,
- 0x37bf0, 0x37c10,
- 0x37c28, 0x37c28,
- 0x37c3c, 0x37c50,
- 0x37cf0, 0x37cfc,
- 0x38000, 0x38030,
- 0x38100, 0x38144,
- 0x38190, 0x381d0,
- 0x38200, 0x38318,
- 0x38400, 0x3852c,
- 0x38540, 0x3861c,
- 0x38800, 0x38834,
- 0x388c0, 0x38908,
- 0x38910, 0x389ac,
- 0x38a00, 0x38a04,
- 0x38a0c, 0x38a2c,
- 0x38a44, 0x38a50,
- 0x38a74, 0x38c24,
- 0x38d08, 0x38d14,
- 0x38d1c, 0x38d20,
- 0x38d3c, 0x38d50,
- 0x39200, 0x3920c,
- 0x39220, 0x39220,
- 0x39240, 0x39240,
- 0x39600, 0x39600,
- 0x39608, 0x3960c,
- 0x39a00, 0x39a1c,
- 0x39e04, 0x39e20,
- 0x39e38, 0x39e3c,
- 0x39e80, 0x39e80,
- 0x39e88, 0x39ea8,
- 0x39eb0, 0x39eb4,
- 0x39ec8, 0x39ed4,
- 0x39fb8, 0x3a004,
- 0x3a208, 0x3a23c,
- 0x3a600, 0x3a630,
- 0x3aa00, 0x3aabc,
- 0x3ab00, 0x3ab70,
- 0x3b000, 0x3b048,
- 0x3b060, 0x3b09c,
- 0x3b0f0, 0x3b148,
- 0x3b160, 0x3b19c,
- 0x3b1f0, 0x3b2e4,
- 0x3b2f8, 0x3b3e4,
- 0x3b3f8, 0x3b448,
- 0x3b460, 0x3b49c,
- 0x3b4f0, 0x3b548,
- 0x3b560, 0x3b59c,
- 0x3b5f0, 0x3b6e4,
- 0x3b6f8, 0x3b7e4,
- 0x3b7f8, 0x3b7fc,
- 0x3b814, 0x3b814,
- 0x3b82c, 0x3b82c,
- 0x3b880, 0x3b88c,
- 0x3b8e8, 0x3b8ec,
- 0x3b900, 0x3b948,
- 0x3b960, 0x3b99c,
- 0x3b9f0, 0x3bae4,
- 0x3baf8, 0x3bb10,
- 0x3bb28, 0x3bb28,
- 0x3bb3c, 0x3bb50,
- 0x3bbf0, 0x3bc10,
- 0x3bc28, 0x3bc28,
- 0x3bc3c, 0x3bc50,
- 0x3bcf0, 0x3bcfc,
- 0x3c000, 0x3c030,
- 0x3c100, 0x3c144,
- 0x3c190, 0x3c1d0,
- 0x3c200, 0x3c318,
- 0x3c400, 0x3c52c,
- 0x3c540, 0x3c61c,
- 0x3c800, 0x3c834,
- 0x3c8c0, 0x3c908,
- 0x3c910, 0x3c9ac,
- 0x3ca00, 0x3ca04,
- 0x3ca0c, 0x3ca2c,
- 0x3ca44, 0x3ca50,
- 0x3ca74, 0x3cc24,
- 0x3cd08, 0x3cd14,
- 0x3cd1c, 0x3cd20,
- 0x3cd3c, 0x3cd50,
- 0x3d200, 0x3d20c,
- 0x3d220, 0x3d220,
- 0x3d240, 0x3d240,
- 0x3d600, 0x3d600,
- 0x3d608, 0x3d60c,
- 0x3da00, 0x3da1c,
- 0x3de04, 0x3de20,
- 0x3de38, 0x3de3c,
- 0x3de80, 0x3de80,
- 0x3de88, 0x3dea8,
- 0x3deb0, 0x3deb4,
- 0x3dec8, 0x3ded4,
- 0x3dfb8, 0x3e004,
- 0x3e208, 0x3e23c,
- 0x3e600, 0x3e630,
- 0x3ea00, 0x3eabc,
- 0x3eb00, 0x3eb70,
- 0x3f000, 0x3f048,
- 0x3f060, 0x3f09c,
- 0x3f0f0, 0x3f148,
- 0x3f160, 0x3f19c,
- 0x3f1f0, 0x3f2e4,
- 0x3f2f8, 0x3f3e4,
- 0x3f3f8, 0x3f448,
- 0x3f460, 0x3f49c,
- 0x3f4f0, 0x3f548,
- 0x3f560, 0x3f59c,
- 0x3f5f0, 0x3f6e4,
- 0x3f6f8, 0x3f7e4,
- 0x3f7f8, 0x3f7fc,
- 0x3f814, 0x3f814,
- 0x3f82c, 0x3f82c,
- 0x3f880, 0x3f88c,
- 0x3f8e8, 0x3f8ec,
- 0x3f900, 0x3f948,
- 0x3f960, 0x3f99c,
- 0x3f9f0, 0x3fae4,
- 0x3faf8, 0x3fb10,
- 0x3fb28, 0x3fb28,
- 0x3fb3c, 0x3fb50,
- 0x3fbf0, 0x3fc10,
- 0x3fc28, 0x3fc28,
- 0x3fc3c, 0x3fc50,
- 0x3fcf0, 0x3fcfc,
- 0x40000, 0x4000c,
- 0x40040, 0x40068,
- 0x40080, 0x40144,
- 0x40180, 0x4018c,
- 0x40200, 0x40298,
- 0x402ac, 0x4033c,
- 0x403f8, 0x403fc,
- 0x41304, 0x413c4,
- 0x41400, 0x4141c,
- 0x41480, 0x414d0,
- 0x44000, 0x44078,
- 0x440c0, 0x44278,
- 0x442c0, 0x44478,
- 0x444c0, 0x44678,
- 0x446c0, 0x44878,
- 0x448c0, 0x449fc,
- 0x45000, 0x45068,
- 0x45080, 0x45084,
- 0x450a0, 0x450b0,
- 0x45200, 0x45268,
- 0x45280, 0x45284,
- 0x452a0, 0x452b0,
- 0x460c0, 0x460e4,
- 0x47000, 0x4708c,
- 0x47200, 0x47250,
- 0x47400, 0x47420,
- 0x47600, 0x47618,
- 0x47800, 0x47814,
- 0x48000, 0x4800c,
- 0x48040, 0x48068,
- 0x48080, 0x48144,
- 0x48180, 0x4818c,
- 0x48200, 0x48298,
- 0x482ac, 0x4833c,
- 0x483f8, 0x483fc,
- 0x49304, 0x493c4,
- 0x49400, 0x4941c,
- 0x49480, 0x494d0,
- 0x4c000, 0x4c078,
- 0x4c0c0, 0x4c278,
- 0x4c2c0, 0x4c478,
- 0x4c4c0, 0x4c678,
- 0x4c6c0, 0x4c878,
- 0x4c8c0, 0x4c9fc,
- 0x4d000, 0x4d068,
- 0x4d080, 0x4d084,
- 0x4d0a0, 0x4d0b0,
- 0x4d200, 0x4d268,
- 0x4d280, 0x4d284,
- 0x4d2a0, 0x4d2b0,
- 0x4e0c0, 0x4e0e4,
- 0x4f000, 0x4f08c,
- 0x4f200, 0x4f250,
- 0x4f400, 0x4f420,
- 0x4f600, 0x4f618,
- 0x4f800, 0x4f814,
- 0x50000, 0x500cc,
- 0x50400, 0x50400,
- 0x50800, 0x508cc,
- 0x50c00, 0x50c00,
- 0x51000, 0x5101c,
- 0x51300, 0x51308,
- };
-
- int i;
- struct adapter *ap = netdev2adap(dev);
- static const unsigned int *reg_ranges;
- int arr_size = 0, buf_size = 0;
-
- if (is_t4(ap->params.chip)) {
- reg_ranges = &t4_reg_ranges[0];
- arr_size = ARRAY_SIZE(t4_reg_ranges);
- buf_size = T4_REGMAP_SIZE;
- } else {
- reg_ranges = &t5_reg_ranges[0];
- arr_size = ARRAY_SIZE(t5_reg_ranges);
- buf_size = T5_REGMAP_SIZE;
- }
-
- regs->version = mk_adap_vers(ap);
-
- memset(buf, 0, buf_size);
- for (i = 0; i < arr_size; i += 2)
- reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
-}
-
-static int restart_autoneg(struct net_device *dev)
-{
- struct port_info *p = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EAGAIN;
- if (p->link_cfg.autoneg != AUTONEG_ENABLE)
- return -EINVAL;
- t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
- return 0;
-}
-
-static int identify_port(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- unsigned int val;
- struct adapter *adap = netdev2adap(dev);
-
- if (state == ETHTOOL_ID_ACTIVE)
- val = 0xffff;
- else if (state == ETHTOOL_ID_INACTIVE)
- val = 0;
- else
- return -EINVAL;
-
- return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
-}
-
-static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
-{
- unsigned int v = 0;
-
- if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
- type == FW_PORT_TYPE_BT_XAUI) {
- v |= SUPPORTED_TP;
- if (caps & FW_PORT_CAP_SPEED_100M)
- v |= SUPPORTED_100baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
- v |= SUPPORTED_Backplane;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseKX_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseKX4_Full;
- } else if (type == FW_PORT_TYPE_KR)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
- else if (type == FW_PORT_TYPE_BP_AP)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
- else if (type == FW_PORT_TYPE_BP4_AP)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
- SUPPORTED_10000baseKX4_Full;
- else if (type == FW_PORT_TYPE_FIBER_XFI ||
- type == FW_PORT_TYPE_FIBER_XAUI ||
- type == FW_PORT_TYPE_SFP ||
- type == FW_PORT_TYPE_QSFP_10G ||
- type == FW_PORT_TYPE_QSA) {
- v |= SUPPORTED_FIBRE;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_BP40_BA ||
- type == FW_PORT_TYPE_QSFP) {
- v |= SUPPORTED_40000baseSR4_Full;
- v |= SUPPORTED_FIBRE;
- }
-
- if (caps & FW_PORT_CAP_ANEG)
- v |= SUPPORTED_Autoneg;
- return v;
-}
-
-static unsigned int to_fw_linkcaps(unsigned int caps)
-{
- unsigned int v = 0;
-
- if (caps & ADVERTISED_100baseT_Full)
- v |= FW_PORT_CAP_SPEED_100M;
- if (caps & ADVERTISED_1000baseT_Full)
- v |= FW_PORT_CAP_SPEED_1G;
- if (caps & ADVERTISED_10000baseT_Full)
- v |= FW_PORT_CAP_SPEED_10G;
- if (caps & ADVERTISED_40000baseSR4_Full)
- v |= FW_PORT_CAP_SPEED_40G;
- return v;
-}
-
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- const struct port_info *p = netdev_priv(dev);
-
- if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
- p->port_type == FW_PORT_TYPE_BT_XFI ||
- p->port_type == FW_PORT_TYPE_BT_XAUI)
- cmd->port = PORT_TP;
- else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
- p->port_type == FW_PORT_TYPE_FIBER_XAUI)
- cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_SFP ||
- p->port_type == FW_PORT_TYPE_QSFP_10G ||
- p->port_type == FW_PORT_TYPE_QSA ||
- p->port_type == FW_PORT_TYPE_QSFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
- p->mod_type == FW_PORT_MOD_TYPE_SR ||
- p->mod_type == FW_PORT_MOD_TYPE_ER ||
- p->mod_type == FW_PORT_MOD_TYPE_LRM)
- cmd->port = PORT_FIBRE;
- else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
- cmd->port = PORT_DA;
- else
- cmd->port = PORT_OTHER;
- } else
- cmd->port = PORT_OTHER;
-
- if (p->mdio_addr >= 0) {
- cmd->phy_address = p->mdio_addr;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
- MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
- } else {
- cmd->phy_address = 0; /* not really, but no better option */
- cmd->transceiver = XCVR_INTERNAL;
- cmd->mdio_support = 0;
- }
-
- cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
- cmd->advertising = from_fw_linkcaps(p->port_type,
- p->link_cfg.advertising);
- ethtool_cmd_speed_set(cmd,
- netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
- cmd->duplex = DUPLEX_FULL;
- cmd->autoneg = p->link_cfg.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
- return 0;
-}
-
-static unsigned int speed_to_caps(int speed)
-{
- if (speed == 100)
- return FW_PORT_CAP_SPEED_100M;
- if (speed == 1000)
- return FW_PORT_CAP_SPEED_1G;
- if (speed == 10000)
- return FW_PORT_CAP_SPEED_10G;
- if (speed == 40000)
- return FW_PORT_CAP_SPEED_40G;
- return 0;
-}
-
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- unsigned int cap;
- struct port_info *p = netdev_priv(dev);
- struct link_config *lc = &p->link_cfg;
- u32 speed = ethtool_cmd_speed(cmd);
-
- if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
- return -EINVAL;
-
- if (!(lc->supported & FW_PORT_CAP_ANEG)) {
- /*
- * PHY offers a single speed. See if that's what's
- * being requested.
- */
- if (cmd->autoneg == AUTONEG_DISABLE &&
- (lc->supported & speed_to_caps(speed)))
- return 0;
- return -EINVAL;
- }
-
- if (cmd->autoneg == AUTONEG_DISABLE) {
- cap = speed_to_caps(speed);
-
- if (!(lc->supported & cap) ||
- (speed == 1000) ||
- (speed == 10000) ||
- (speed == 40000))
- return -EINVAL;
- lc->requested_speed = cap;
- lc->advertising = 0;
- } else {
- cap = to_fw_linkcaps(cmd->advertising);
- if (!(lc->supported & cap))
- return -EINVAL;
- lc->requested_speed = 0;
- lc->advertising = cap | FW_PORT_CAP_ANEG;
- }
- lc->autoneg = cmd->autoneg;
-
- if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
- lc);
- return 0;
-}
-
-static void get_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *epause)
-{
- struct port_info *p = netdev_priv(dev);
-
- epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
- epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
- epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
-}
-
-static int set_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *epause)
-{
- struct port_info *p = netdev_priv(dev);
- struct link_config *lc = &p->link_cfg;
-
- if (epause->autoneg == AUTONEG_DISABLE)
- lc->requested_fc = 0;
- else if (lc->supported & FW_PORT_CAP_ANEG)
- lc->requested_fc = PAUSE_AUTONEG;
- else
- return -EINVAL;
-
- if (epause->rx_pause)
- lc->requested_fc |= PAUSE_RX;
- if (epause->tx_pause)
- lc->requested_fc |= PAUSE_TX;
- if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
- lc);
- return 0;
-}
-
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
-{
- const struct port_info *pi = netdev_priv(dev);
- const struct sge *s = &pi->adapter->sge;
-
- e->rx_max_pending = MAX_RX_BUFFERS;
- e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
- e->rx_jumbo_max_pending = 0;
- e->tx_max_pending = MAX_TXQ_ENTRIES;
-
- e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
- e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
- e->rx_jumbo_pending = 0;
- e->tx_pending = s->ethtxq[pi->first_qset].q.size;
-}
-
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
-{
- int i;
- const struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
- struct sge *s = &adapter->sge;
-
- if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
- e->tx_pending > MAX_TXQ_ENTRIES ||
- e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
- e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
- e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
- return -EINVAL;
-
- if (adapter->flags & FULL_INIT_DONE)
- return -EBUSY;
-
- for (i = 0; i < pi->nqsets; ++i) {
- s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
- s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
- s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
- }
- return 0;
-}
-
static int closest_timer(const struct sge *s, int time)
{
int i, delta, match = 0, min_delta = INT_MAX;
@@ -2543,19 +1361,8 @@ static int closest_thres(const struct sge *s, int thres)
return match;
}
-/*
- * Return a queue's interrupt hold-off time in us. 0 means no timer.
- */
-unsigned int qtimer_val(const struct adapter *adap,
- const struct sge_rspq *q)
-{
- unsigned int idx = q->intr_params >> 1;
-
- return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
-}
-
/**
- * set_rspq_intr_params - set a queue's interrupt holdoff parameters
+ * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
* @q: the Rx queue
* @us: the hold-off time in us, or 0 to disable timer
* @cnt: the hold-off packet count, or 0 to disable counter
@@ -2563,8 +1370,8 @@ unsigned int qtimer_val(const struct adapter *adap,
* Sets an Rx queue's interrupt hold-off time and packet count. At least
* one of the two needs to be enabled for the queue to generate interrupts.
*/
-static int set_rspq_intr_params(struct sge_rspq *q,
- unsigned int us, unsigned int cnt)
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
+ unsigned int us, unsigned int cnt)
{
struct adapter *adap = q->adap;
@@ -2595,259 +1402,6 @@ static int set_rspq_intr_params(struct sge_rspq *q,
return 0;
}
-/**
- * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
- * @dev: the network device
- * @us: the hold-off time in us, or 0 to disable timer
- * @cnt: the hold-off packet count, or 0 to disable counter
- *
- * Set the RX interrupt hold-off parameters for a network device.
- */
-static int set_rx_intr_params(struct net_device *dev,
- unsigned int us, unsigned int cnt)
-{
- int i, err;
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adap = pi->adapter;
- struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
- for (i = 0; i < pi->nqsets; i++, q++) {
- err = set_rspq_intr_params(&q->rspq, us, cnt);
- if (err)
- return err;
- }
- return 0;
-}
-
-static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
-{
- int i;
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adap = pi->adapter;
- struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
- for (i = 0; i < pi->nqsets; i++, q++)
- q->rspq.adaptive_rx = adaptive_rx;
-
- return 0;
-}
-
-static int get_adaptive_rx_setting(struct net_device *dev)
-{
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adap = pi->adapter;
- struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
-
- return q->rspq.adaptive_rx;
-}
-
-static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
-{
- set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
- return set_rx_intr_params(dev, c->rx_coalesce_usecs,
- c->rx_max_coalesced_frames);
-}
-
-static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
-{
- const struct port_info *pi = netdev_priv(dev);
- const struct adapter *adap = pi->adapter;
- const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
-
- c->rx_coalesce_usecs = qtimer_val(adap, rq);
- c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
- adap->sge.counter_val[rq->pktcnt_idx] : 0;
- c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
- return 0;
-}
-
-/**
- * eeprom_ptov - translate a physical EEPROM address to virtual
- * @phys_addr: the physical EEPROM address
- * @fn: the PCI function number
- * @sz: size of function-specific area
- *
- * Translate a physical EEPROM address to virtual. The first 1K is
- * accessed through virtual addresses starting at 31K, the rest is
- * accessed through virtual addresses starting at 0.
- *
- * The mapping is as follows:
- * [0..1K) -> [31K..32K)
- * [1K..1K+A) -> [31K-A..31K)
- * [1K+A..ES) -> [0..ES-A-1K)
- *
- * where A = @fn * @sz, and ES = EEPROM size.
- */
-static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
-{
- fn *= sz;
- if (phys_addr < 1024)
- return phys_addr + (31 << 10);
- if (phys_addr < 1024 + fn)
- return 31744 - fn + phys_addr - 1024;
- if (phys_addr < EEPROMSIZE)
- return phys_addr - 1024 - fn;
- return -EINVAL;
-}
-
-/*
- * The next two routines implement eeprom read/write from physical addresses.
- */
-static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
-{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
-
- if (vaddr >= 0)
- vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
- return vaddr < 0 ? vaddr : 0;
-}
-
-static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
-{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
-
- if (vaddr >= 0)
- vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
- return vaddr < 0 ? vaddr : 0;
-}
-
-#define EEPROM_MAGIC 0x38E2F10C
-
-static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
- u8 *data)
-{
- int i, err = 0;
- struct adapter *adapter = netdev2adap(dev);
-
- u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- e->magic = EEPROM_MAGIC;
- for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
- err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
-
- if (!err)
- memcpy(data, buf + e->offset, e->len);
- kfree(buf);
- return err;
-}
-
-static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
- u8 *data)
-{
- u8 *buf;
- int err = 0;
- u32 aligned_offset, aligned_len, *p;
- struct adapter *adapter = netdev2adap(dev);
-
- if (eeprom->magic != EEPROM_MAGIC)
- return -EINVAL;
-
- aligned_offset = eeprom->offset & ~3;
- aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
-
- if (adapter->fn > 0) {
- u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
-
- if (aligned_offset < start ||
- aligned_offset + aligned_len > start + EEPROMPFSIZE)
- return -EPERM;
- }
-
- if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
- /*
- * RMW possibly needed for first or last words.
- */
- buf = kmalloc(aligned_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
- if (!err && aligned_len > 4)
- err = eeprom_rd_phys(adapter,
- aligned_offset + aligned_len - 4,
- (u32 *)&buf[aligned_len - 4]);
- if (err)
- goto out;
- memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
- } else
- buf = data;
-
- err = t4_seeprom_wp(adapter, false);
- if (err)
- goto out;
-
- for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
- err = eeprom_wr_phys(adapter, aligned_offset, *p);
- aligned_offset += 4;
- }
-
- if (!err)
- err = t4_seeprom_wp(adapter, true);
-out:
- if (buf != data)
- kfree(buf);
- return err;
-}
-
-static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
-{
- int ret;
- const struct firmware *fw;
- struct adapter *adap = netdev2adap(netdev);
- unsigned int mbox = PCIE_FW_MASTER_M + 1;
-
- ef->data[sizeof(ef->data) - 1] = '\0';
- ret = request_firmware(&fw, ef->data, adap->pdev_dev);
- if (ret < 0)
- return ret;
-
- /* If the adapter has been fully initialized then we'll go ahead and
- * try to get the firmware's cooperation in upgrading to the new
- * firmware image otherwise we'll try to do the entire job from the
- * host ... and we always "force" the operation in this path.
- */
- if (adap->flags & FULL_INIT_DONE)
- mbox = adap->mbox;
-
- ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
- release_firmware(fw);
- if (!ret)
- dev_info(adap->pdev_dev, "loaded firmware %s,"
- " reload cxgb4 driver\n", ef->data);
- return ret;
-}
-
-#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
-#define BCAST_CRC 0xa0ccc1a6
-
-static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- wol->supported = WAKE_BCAST | WAKE_MAGIC;
- wol->wolopts = netdev2adap(dev)->wol;
- memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- int err = 0;
- struct port_info *pi = netdev_priv(dev);
-
- if (wol->wolopts & ~WOL_SUPPORTED)
- return -EINVAL;
- t4_wol_magic_enable(pi->adapter, pi->tx_chan,
- (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
- if (wol->wolopts & WAKE_BCAST) {
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
- ~0ULL, 0, false);
- if (!err)
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
- ~6ULL, ~0ULL, BCAST_CRC, true);
- } else
- t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
- return err;
-}
-
static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
const struct port_info *pi = netdev_priv(dev);
@@ -2865,144 +1419,6 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
return err;
}
-static u32 get_rss_table_size(struct net_device *dev)
-{
- const struct port_info *pi = netdev_priv(dev);
-
- return pi->rss_size;
-}
-
-static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
-{
- const struct port_info *pi = netdev_priv(dev);
- unsigned int n = pi->rss_size;
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!p)
- return 0;
- while (n--)
- p[n] = pi->rss[n];
- return 0;
-}
-
-static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
- const u8 hfunc)
-{
- unsigned int i;
- struct port_info *pi = netdev_priv(dev);
-
- /* We require at least one supported parameter to be changed and no
- * change in any of the unsupported parameters
- */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
- return -EOPNOTSUPP;
- if (!p)
- return 0;
-
- for (i = 0; i < pi->rss_size; i++)
- pi->rss[i] = p[i];
- if (pi->adapter->flags & FULL_INIT_DONE)
- return write_rss(pi, pi->rss);
- return 0;
-}
-
-static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules)
-{
- const struct port_info *pi = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- unsigned int v = pi->rss_mode;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V4_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V6_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- }
- return 0;
- }
- case ETHTOOL_GRXRINGS:
- info->data = pi->nqsets;
- return 0;
- }
- return -EOPNOTSUPP;
-}
-
-static const struct ethtool_ops cxgb_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
- .get_drvinfo = get_drvinfo,
- .get_msglevel = get_msglevel,
- .set_msglevel = set_msglevel,
- .get_ringparam = get_sge_param,
- .set_ringparam = set_sge_param,
- .get_coalesce = get_coalesce,
- .set_coalesce = set_coalesce,
- .get_eeprom_len = get_eeprom_len,
- .get_eeprom = get_eeprom,
- .set_eeprom = set_eeprom,
- .get_pauseparam = get_pauseparam,
- .set_pauseparam = set_pauseparam,
- .get_link = ethtool_op_get_link,
- .get_strings = get_strings,
- .set_phys_id = identify_port,
- .nway_reset = restart_autoneg,
- .get_sset_count = get_sset_count,
- .get_ethtool_stats = get_stats,
- .get_regs_len = get_regs_len,
- .get_regs = get_regs,
- .get_wol = get_wol,
- .set_wol = set_wol,
- .get_rxnfc = get_rxnfc,
- .get_rxfh_indir_size = get_rss_table_size,
- .get_rxfh = get_rss_table,
- .set_rxfh = set_rss_table,
- .flash_device = set_flash,
-};
-
static int setup_debugfs(struct adapter *adap)
{
if (IS_ERR_OR_NULL(adap->debugfs_root))
@@ -4601,6 +3017,10 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
+#ifdef CONFIG_CHELSIO_T4_FCOE
+ .ndo_fcoe_enable = cxgb_fcoe_enable,
+ .ndo_fcoe_disable = cxgb_fcoe_disable,
+#endif /* CONFIG_CHELSIO_T4_FCOE */
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = cxgb_busy_poll,
#endif
@@ -5416,7 +3836,7 @@ static int adap_init0(struct adapter *adap)
adap->tids.stid_base = val[1];
adap->tids.nstids = val[2] - val[1] + 1;
/*
- * Setup server filter region. Divide the availble filter
+ * Setup server filter region. Divide the available filter
* region into two parts. Regular filters get 1/3rd and server
* filters get 2/3rd part. This is only enabled if workarond
* path is enabled.
@@ -5683,7 +4103,7 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
unsigned int size, unsigned int iqe_size)
{
q->adap = adap;
- set_rspq_intr_params(q, us, cnt);
+ cxgb4_set_rspq_intr_params(q, us, cnt);
q->iqe_len = iqe_size;
q->size = size;
}
@@ -5758,7 +4178,16 @@ static void cfg_queues(struct adapter *adap)
s->ofldqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
- s->rdmaciqs = adap->params.nports;
+ /* Try and allow at least 1 CIQ per cpu rounding down
+ * to the number of ports, with a minimum of 1 per port.
+ * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
+ * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
+ * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
+ */
+ s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
+ s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
+ adap->params.nports;
+ s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -5844,12 +4273,17 @@ static void reduce_ethqs(struct adapter *adap, int n)
static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
- int i, want, need;
+ int i, want, need, allocated;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
- struct msix_entry entries[MAX_INGQ + 1];
+ struct msix_entry *entries;
- for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
+ GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_INGQ + 1; ++i)
entries[i].entry = i;
want = s->max_ethqsets + EXTRA_VECS;
@@ -5866,29 +4300,39 @@ static int enable_msix(struct adapter *adap)
#else
need = adap->params.nports + EXTRA_VECS + ofld_need;
#endif
- want = pci_enable_msix_range(adap->pdev, entries, need, want);
- if (want < 0)
- return want;
+ allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
+ if (allocated < 0) {
+ dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
+ " not using MSI-X\n");
+ kfree(entries);
+ return allocated;
+ }
- /*
- * Distribute available vectors to the various queue groups.
+ /* Distribute available vectors to the various queue groups.
* Every group gets its minimum requirement and NIC gets top
* priority for leftovers.
*/
- i = want - EXTRA_VECS - ofld_need;
+ i = allocated - EXTRA_VECS - ofld_need;
if (i < s->max_ethqsets) {
s->max_ethqsets = i;
if (i < s->ethqsets)
reduce_ethqs(adap, i);
}
if (is_offload(adap)) {
- i = want - EXTRA_VECS - s->max_ethqsets;
- i -= ofld_need - nchan;
+ if (allocated < want) {
+ s->rdmaqs = nchan;
+ s->rdmaciqs = nchan;
+ }
+
+ /* leftovers go to OFLD */
+ i = allocated - EXTRA_VECS - s->max_ethqsets -
+ s->rdmaqs - s->rdmaciqs;
s->ofldqsets = (i / nchan) * nchan; /* round down */
}
- for (i = 0; i < want; ++i)
+ for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
+ kfree(entries);
return 0;
}
@@ -6154,7 +4598,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->dcbnl_ops = &cxgb4_dcb_ops;
cxgb4_dcb_state_init(netdev);
#endif
- netdev->ethtool_ops = &cxgb_ethtool_ops;
+ cxgb4_set_ethtool_ops(netdev);
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b688b32c21fe..0d2eddab04ef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -46,6 +46,9 @@
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif /* CONFIG_NET_RX_BUSY_POLL */
+#ifdef CONFIG_CHELSIO_T4_FCOE
+#include <scsi/fc/fc_fcoe.h>
+#endif /* CONFIG_CHELSIO_T4_FCOE */
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4_values.h"
@@ -118,12 +121,6 @@
#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
/*
- * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
- * attempt to refill it.
- */
-#define FL_STARVE_THRES 4
-
-/*
* Suspend an Ethernet Tx queue with fewer available descriptors than this.
* This is the same as calc_tx_descs() for a TSO packet with
* nr_frags == MAX_SKB_FRAGS.
@@ -141,7 +138,7 @@
* Max Tx descriptor space we allow for an Ethernet packet to be inlined
* into a WR.
*/
-#define MAX_IMM_TX_PKT_LEN 128
+#define MAX_IMM_TX_PKT_LEN 256
/*
* Max size of a WR sent through a control Tx queue.
@@ -245,9 +242,21 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
return fl->size - 8; /* 1 descriptor = 8 buffers */
}
-static inline bool fl_starving(const struct sge_fl *fl)
+/**
+ * fl_starving - return whether a Free List is starving.
+ * @adapter: pointer to the adapter
+ * @fl: the Free List
+ *
+ * Tests specified Free List to see whether the number of buffers
+ * available to the hardware has falled below our "starvation"
+ * threshold.
+ */
+static inline bool fl_starving(const struct adapter *adapter,
+ const struct sge_fl *fl)
{
- return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+ const struct sge *s = &adapter->sge;
+
+ return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
static int map_skb(struct device *dev, const struct sk_buff *skb,
@@ -583,8 +592,10 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
unsigned int cred = q->avail;
__be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+ int node;
gfp |= __GFP_NOWARN;
+ node = dev_to_node(adap->pdev_dev);
if (s->fl_pg_order == 0)
goto alloc_small_pages;
@@ -593,7 +604,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
* Prefer large buffers
*/
while (n) {
- pg = __dev_alloc_pages(gfp, s->fl_pg_order);
+ pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
if (unlikely(!pg)) {
q->large_alloc_failed++;
break; /* fall back to single pages */
@@ -623,7 +634,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
alloc_small_pages:
while (n--) {
- pg = __dev_alloc_page(gfp);
+ pg = alloc_pages_node(node, gfp, 0);
if (unlikely(!pg)) {
q->alloc_failed++;
break;
@@ -652,7 +663,7 @@ out: cred = q->avail - cred;
q->pend_cred += cred;
ring_fl_db(adap, q);
- if (unlikely(fl_starving(q))) {
+ if (unlikely(fl_starving(adap, q))) {
smp_wmb();
set_bit(q->cntxt_id - adap->sge.egr_start,
adap->sge.starving_fl);
@@ -719,6 +730,22 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
*/
static inline unsigned int sgl_len(unsigned int n)
{
+ /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+ * addresses. The DSGL Work Request starts off with a 32-bit DSGL
+ * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+ * repeated sequences of { Length[i], Length[i+1], Address[i],
+ * Address[i+1] } (this ensures that all addresses are on 64-bit
+ * boundaries). If N is even, then Length[N+1] should be set to 0 and
+ * Address[N+1] is omitted.
+ *
+ * The following calculation incorporates all of the above. It's
+ * somewhat hard to follow but, briefly: the "+2" accounts for the
+ * first two flits which include the DSGL header, Length0 and
+ * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+ * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+ * finally the "+((n-1)&1)" adds the one remaining flit needed if
+ * (n-1) is odd ...
+ */
n--;
return (3 * n) / 2 + (n & 1) + 2;
}
@@ -766,12 +793,30 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
unsigned int flits;
int hdrlen = is_eth_imm(skb);
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+
if (hdrlen)
return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
if (skb_shinfo(skb)->gso_size)
- flits += 2;
+ flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
return flits;
}
@@ -1044,6 +1089,38 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
q->pidx -= q->size;
}
+#ifdef CONFIG_CHELSIO_T4_FCOE
+static inline int
+cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
+ const struct port_info *pi, u64 *cntrl)
+{
+ const struct cxgb_fcoe *fcoe = &pi->fcoe;
+
+ if (!(fcoe->flags & CXGB_FCOE_ENABLED))
+ return 0;
+
+ if (skb->protocol != htons(ETH_P_FCOE))
+ return 0;
+
+ skb_reset_mac_header(skb);
+ skb->mac_len = sizeof(struct ethhdr);
+
+ skb_set_network_header(skb, skb->mac_len);
+ skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
+
+ if (!cxgb_fcoe_sof_eof_supported(adap, skb))
+ return -ENOTSUPP;
+
+ /* FC CRC offload */
+ *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
+ TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
+ TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
+ TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
+ TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
+ return 0;
+}
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+
/**
* t4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1066,6 +1143,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
bool immediate = false;
+#ifdef CONFIG_CHELSIO_T4_FCOE
+ int err;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
/*
* The chip min packet length is 10 octets but play safe and reject
@@ -1082,6 +1162,13 @@ out_free: dev_kfree_skb_any(skb);
q = &adap->sge.ethtxq[qidx + pi->first_qset];
reclaim_completed_tx(adap, &q->q, true);
+ cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+ err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
+ if (unlikely(err == -ENOTSUPP))
+ goto out_free;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
flits = calc_tx_flits(skb);
ndesc = flits_to_desc(flits);
@@ -1153,13 +1240,17 @@ out_free: dev_kfree_skb_any(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
q->tx_cso++;
- } else
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ }
}
if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+#ifdef CONFIG_CHELSIO_T4_FCOE
+ if (skb->protocol == htons(ETH_P_FCOE))
+ cntrl |= TXPKT_VLAN(
+ ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
+#endif /* CONFIG_CHELSIO_T4_FCOE */
}
cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
@@ -1759,6 +1850,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
+#ifdef CONFIG_CHELSIO_T4_FCOE
+ struct port_info *pi;
+#endif
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
@@ -1799,8 +1893,24 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb->ip_summed = CHECKSUM_COMPLETE;
rxq->stats.rx_cso++;
}
- } else
+ } else {
skb_checksum_none_assert(skb);
+#ifdef CONFIG_CHELSIO_T4_FCOE
+#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
+ RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
+
+ pi = netdev_priv(skb->dev);
+ if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
+ if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
+ (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
+ if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+
+#undef CPL_RX_PKT_FLAGS
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+ }
if (unlikely(pkt->vlan_ex)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
@@ -1900,7 +2010,7 @@ static int process_responses(struct sge_rspq *q, int budget)
if (!is_new_response(rc, q))
break;
- rmb();
+ dma_rmb();
rsp_type = RSPD_TYPE(rc->type_gen);
if (likely(rsp_type == RSP_TYPE_FLBUF)) {
struct page_frag *fp;
@@ -2092,7 +2202,7 @@ static unsigned int process_intrq(struct adapter *adap)
if (!is_new_response(rc, q))
break;
- rmb();
+ dma_rmb();
if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
unsigned int qid = ntohl(rc->pldbuflen_qid);
@@ -2128,7 +2238,8 @@ static irqreturn_t t4_intr_msi(int irq, void *cookie)
{
struct adapter *adap = cookie;
- t4_slow_intr_handler(adap);
+ if (adap->flags & MASTER_PF)
+ t4_slow_intr_handler(adap);
process_intrq(adap);
return IRQ_HANDLED;
}
@@ -2143,7 +2254,8 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
struct adapter *adap = cookie;
t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
- if (t4_slow_intr_handler(adap) | process_intrq(adap))
+ if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
+ process_intrq(adap))
return IRQ_HANDLED;
return IRQ_NONE; /* probably shared interrupt */
}
@@ -2180,7 +2292,7 @@ static void sge_rx_timer_cb(unsigned long data)
clear_bit(id, s->starving_fl);
smp_mb__after_atomic();
- if (fl_starving(fl)) {
+ if (fl_starving(adap, fl)) {
rxq = container_of(fl, struct sge_eth_rxq, fl);
if (napi_reschedule(&rxq->rspq.napi))
fl->starving++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index ee394dc68303..e8578a742f2a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
memoffset = (mtype * (edc_size * 1024 * 1024));
else {
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
- MA_EXT_MEMORY1_BAR_A));
+ MA_EXT_MEMORY0_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
}
@@ -625,6 +625,734 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
return 0;
}
+/**
+ * t4_get_regs_len - return the size of the chips register set
+ * @adapter: the adapter
+ *
+ * Returns the size of the chip's BAR0 register space.
+ */
+unsigned int t4_get_regs_len(struct adapter *adapter)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ return T4_REGMAP_SIZE;
+
+ case CHELSIO_T5:
+ return T5_REGMAP_SIZE;
+ }
+
+ dev_err(adapter->pdev_dev,
+ "Unsupported chip version %d\n", chip_version);
+ return 0;
+}
+
+/**
+ * t4_get_regs - read chip registers into provided buffer
+ * @adap: the adapter
+ * @buf: register buffer
+ * @buf_size: size (in bytes) of register buffer
+ *
+ * If the provided register buffer isn't large enough for the chip's
+ * full register range, the register dump will be truncated to the
+ * register buffer's size.
+ */
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
+{
+ static const unsigned int t4_reg_ranges[] = {
+ 0x1008, 0x1108,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1300, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x30d8,
+ 0x30e0, 0x5924,
+ 0x5960, 0x59d4,
+ 0x5a00, 0x5af8,
+ 0x6000, 0x6098,
+ 0x6100, 0x6150,
+ 0x6200, 0x6208,
+ 0x6240, 0x6248,
+ 0x6280, 0x6338,
+ 0x6370, 0x638c,
+ 0x6400, 0x643c,
+ 0x6500, 0x6524,
+ 0x6a00, 0x6a38,
+ 0x6a60, 0x6a78,
+ 0x6b00, 0x6b84,
+ 0x6bf0, 0x6c84,
+ 0x6cf0, 0x6d84,
+ 0x6df0, 0x6e84,
+ 0x6ef0, 0x6f84,
+ 0x6ff0, 0x7084,
+ 0x70f0, 0x7184,
+ 0x71f0, 0x7284,
+ 0x72f0, 0x7384,
+ 0x73f0, 0x7450,
+ 0x7500, 0x7530,
+ 0x7600, 0x761c,
+ 0x7680, 0x76cc,
+ 0x7700, 0x7798,
+ 0x77c0, 0x77fc,
+ 0x7900, 0x79fc,
+ 0x7b00, 0x7c38,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8e1c,
+ 0x8e30, 0x8e78,
+ 0x8ea0, 0x8f6c,
+ 0x8fc0, 0x9074,
+ 0x90fc, 0x90fc,
+ 0x9400, 0x9458,
+ 0x9600, 0x96bc,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0x9fec,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xea7c,
+ 0xf000, 0x11110,
+ 0x11118, 0x11190,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x1924c,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194f8,
+ 0x19800, 0x19f30,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e28c,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e68c,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea8c,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee8c,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f28c,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f68c,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa8c,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe8c,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x20000, 0x2002c,
+ 0x20100, 0x2013c,
+ 0x20190, 0x201c8,
+ 0x20200, 0x20318,
+ 0x20400, 0x20528,
+ 0x20540, 0x20614,
+ 0x21000, 0x21040,
+ 0x2104c, 0x21060,
+ 0x210c0, 0x210ec,
+ 0x21200, 0x21268,
+ 0x21270, 0x21284,
+ 0x212fc, 0x21388,
+ 0x21400, 0x21404,
+ 0x21500, 0x21518,
+ 0x2152c, 0x2153c,
+ 0x21550, 0x21554,
+ 0x21600, 0x21600,
+ 0x21608, 0x21628,
+ 0x21630, 0x2163c,
+ 0x21700, 0x2171c,
+ 0x21780, 0x2178c,
+ 0x21800, 0x21c38,
+ 0x21c80, 0x21d7c,
+ 0x21e00, 0x21e04,
+ 0x22000, 0x2202c,
+ 0x22100, 0x2213c,
+ 0x22190, 0x221c8,
+ 0x22200, 0x22318,
+ 0x22400, 0x22528,
+ 0x22540, 0x22614,
+ 0x23000, 0x23040,
+ 0x2304c, 0x23060,
+ 0x230c0, 0x230ec,
+ 0x23200, 0x23268,
+ 0x23270, 0x23284,
+ 0x232fc, 0x23388,
+ 0x23400, 0x23404,
+ 0x23500, 0x23518,
+ 0x2352c, 0x2353c,
+ 0x23550, 0x23554,
+ 0x23600, 0x23600,
+ 0x23608, 0x23628,
+ 0x23630, 0x2363c,
+ 0x23700, 0x2371c,
+ 0x23780, 0x2378c,
+ 0x23800, 0x23c38,
+ 0x23c80, 0x23d7c,
+ 0x23e00, 0x23e04,
+ 0x24000, 0x2402c,
+ 0x24100, 0x2413c,
+ 0x24190, 0x241c8,
+ 0x24200, 0x24318,
+ 0x24400, 0x24528,
+ 0x24540, 0x24614,
+ 0x25000, 0x25040,
+ 0x2504c, 0x25060,
+ 0x250c0, 0x250ec,
+ 0x25200, 0x25268,
+ 0x25270, 0x25284,
+ 0x252fc, 0x25388,
+ 0x25400, 0x25404,
+ 0x25500, 0x25518,
+ 0x2552c, 0x2553c,
+ 0x25550, 0x25554,
+ 0x25600, 0x25600,
+ 0x25608, 0x25628,
+ 0x25630, 0x2563c,
+ 0x25700, 0x2571c,
+ 0x25780, 0x2578c,
+ 0x25800, 0x25c38,
+ 0x25c80, 0x25d7c,
+ 0x25e00, 0x25e04,
+ 0x26000, 0x2602c,
+ 0x26100, 0x2613c,
+ 0x26190, 0x261c8,
+ 0x26200, 0x26318,
+ 0x26400, 0x26528,
+ 0x26540, 0x26614,
+ 0x27000, 0x27040,
+ 0x2704c, 0x27060,
+ 0x270c0, 0x270ec,
+ 0x27200, 0x27268,
+ 0x27270, 0x27284,
+ 0x272fc, 0x27388,
+ 0x27400, 0x27404,
+ 0x27500, 0x27518,
+ 0x2752c, 0x2753c,
+ 0x27550, 0x27554,
+ 0x27600, 0x27600,
+ 0x27608, 0x27628,
+ 0x27630, 0x2763c,
+ 0x27700, 0x2771c,
+ 0x27780, 0x2778c,
+ 0x27800, 0x27c38,
+ 0x27c80, 0x27d7c,
+ 0x27e00, 0x27e04
+ };
+
+ static const unsigned int t5_reg_ranges[] = {
+ 0x1008, 0x1148,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1280, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3028,
+ 0x3060, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x58bc,
+ 0x5940, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a9c,
+ 0x5b9c, 0x5bfc,
+ 0x6000, 0x6040,
+ 0x6058, 0x614c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x78fc,
+ 0x7b00, 0x7c54,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e84,
+ 0x8ea0, 0x8f84,
+ 0x8fc0, 0x90f8,
+ 0x9400, 0x9470,
+ 0x9600, 0x96f4,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0x11088,
+ 0x1109c, 0x11110,
+ 0x11118, 0x1117c,
+ 0x11190, 0x11204,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c60,
+ 0x19c94, 0x19e10,
+ 0x19e50, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fe4,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30100, 0x30144,
+ 0x30190, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x30834,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309ac,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30c24,
+ 0x30d08, 0x30d14,
+ 0x30d1c, 0x30d20,
+ 0x30d3c, 0x30d50,
+ 0x31200, 0x3120c,
+ 0x31220, 0x31220,
+ 0x31240, 0x31240,
+ 0x31600, 0x31600,
+ 0x31608, 0x3160c,
+ 0x31a00, 0x31a1c,
+ 0x31e04, 0x31e20,
+ 0x31e38, 0x31e3c,
+ 0x31e80, 0x31e80,
+ 0x31e88, 0x31ea8,
+ 0x31eb0, 0x31eb4,
+ 0x31ec8, 0x31ed4,
+ 0x31fb8, 0x32004,
+ 0x32208, 0x3223c,
+ 0x32600, 0x32630,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b70,
+ 0x33000, 0x33048,
+ 0x33060, 0x3309c,
+ 0x330f0, 0x33148,
+ 0x33160, 0x3319c,
+ 0x331f0, 0x332e4,
+ 0x332f8, 0x333e4,
+ 0x333f8, 0x33448,
+ 0x33460, 0x3349c,
+ 0x334f0, 0x33548,
+ 0x33560, 0x3359c,
+ 0x335f0, 0x336e4,
+ 0x336f8, 0x337e4,
+ 0x337f8, 0x337fc,
+ 0x33814, 0x33814,
+ 0x3382c, 0x3382c,
+ 0x33880, 0x3388c,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x33948,
+ 0x33960, 0x3399c,
+ 0x339f0, 0x33ae4,
+ 0x33af8, 0x33b10,
+ 0x33b28, 0x33b28,
+ 0x33b3c, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c28, 0x33c28,
+ 0x33c3c, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34100, 0x34144,
+ 0x34190, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x34834,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349ac,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34c24,
+ 0x34d08, 0x34d14,
+ 0x34d1c, 0x34d20,
+ 0x34d3c, 0x34d50,
+ 0x35200, 0x3520c,
+ 0x35220, 0x35220,
+ 0x35240, 0x35240,
+ 0x35600, 0x35600,
+ 0x35608, 0x3560c,
+ 0x35a00, 0x35a1c,
+ 0x35e04, 0x35e20,
+ 0x35e38, 0x35e3c,
+ 0x35e80, 0x35e80,
+ 0x35e88, 0x35ea8,
+ 0x35eb0, 0x35eb4,
+ 0x35ec8, 0x35ed4,
+ 0x35fb8, 0x36004,
+ 0x36208, 0x3623c,
+ 0x36600, 0x36630,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b70,
+ 0x37000, 0x37048,
+ 0x37060, 0x3709c,
+ 0x370f0, 0x37148,
+ 0x37160, 0x3719c,
+ 0x371f0, 0x372e4,
+ 0x372f8, 0x373e4,
+ 0x373f8, 0x37448,
+ 0x37460, 0x3749c,
+ 0x374f0, 0x37548,
+ 0x37560, 0x3759c,
+ 0x375f0, 0x376e4,
+ 0x376f8, 0x377e4,
+ 0x377f8, 0x377fc,
+ 0x37814, 0x37814,
+ 0x3782c, 0x3782c,
+ 0x37880, 0x3788c,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x37948,
+ 0x37960, 0x3799c,
+ 0x379f0, 0x37ae4,
+ 0x37af8, 0x37b10,
+ 0x37b28, 0x37b28,
+ 0x37b3c, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c28, 0x37c28,
+ 0x37c3c, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x38000, 0x38030,
+ 0x38100, 0x38144,
+ 0x38190, 0x381d0,
+ 0x38200, 0x38318,
+ 0x38400, 0x3852c,
+ 0x38540, 0x3861c,
+ 0x38800, 0x38834,
+ 0x388c0, 0x38908,
+ 0x38910, 0x389ac,
+ 0x38a00, 0x38a04,
+ 0x38a0c, 0x38a2c,
+ 0x38a44, 0x38a50,
+ 0x38a74, 0x38c24,
+ 0x38d08, 0x38d14,
+ 0x38d1c, 0x38d20,
+ 0x38d3c, 0x38d50,
+ 0x39200, 0x3920c,
+ 0x39220, 0x39220,
+ 0x39240, 0x39240,
+ 0x39600, 0x39600,
+ 0x39608, 0x3960c,
+ 0x39a00, 0x39a1c,
+ 0x39e04, 0x39e20,
+ 0x39e38, 0x39e3c,
+ 0x39e80, 0x39e80,
+ 0x39e88, 0x39ea8,
+ 0x39eb0, 0x39eb4,
+ 0x39ec8, 0x39ed4,
+ 0x39fb8, 0x3a004,
+ 0x3a208, 0x3a23c,
+ 0x3a600, 0x3a630,
+ 0x3aa00, 0x3aabc,
+ 0x3ab00, 0x3ab70,
+ 0x3b000, 0x3b048,
+ 0x3b060, 0x3b09c,
+ 0x3b0f0, 0x3b148,
+ 0x3b160, 0x3b19c,
+ 0x3b1f0, 0x3b2e4,
+ 0x3b2f8, 0x3b3e4,
+ 0x3b3f8, 0x3b448,
+ 0x3b460, 0x3b49c,
+ 0x3b4f0, 0x3b548,
+ 0x3b560, 0x3b59c,
+ 0x3b5f0, 0x3b6e4,
+ 0x3b6f8, 0x3b7e4,
+ 0x3b7f8, 0x3b7fc,
+ 0x3b814, 0x3b814,
+ 0x3b82c, 0x3b82c,
+ 0x3b880, 0x3b88c,
+ 0x3b8e8, 0x3b8ec,
+ 0x3b900, 0x3b948,
+ 0x3b960, 0x3b99c,
+ 0x3b9f0, 0x3bae4,
+ 0x3baf8, 0x3bb10,
+ 0x3bb28, 0x3bb28,
+ 0x3bb3c, 0x3bb50,
+ 0x3bbf0, 0x3bc10,
+ 0x3bc28, 0x3bc28,
+ 0x3bc3c, 0x3bc50,
+ 0x3bcf0, 0x3bcfc,
+ 0x3c000, 0x3c030,
+ 0x3c100, 0x3c144,
+ 0x3c190, 0x3c1d0,
+ 0x3c200, 0x3c318,
+ 0x3c400, 0x3c52c,
+ 0x3c540, 0x3c61c,
+ 0x3c800, 0x3c834,
+ 0x3c8c0, 0x3c908,
+ 0x3c910, 0x3c9ac,
+ 0x3ca00, 0x3ca04,
+ 0x3ca0c, 0x3ca2c,
+ 0x3ca44, 0x3ca50,
+ 0x3ca74, 0x3cc24,
+ 0x3cd08, 0x3cd14,
+ 0x3cd1c, 0x3cd20,
+ 0x3cd3c, 0x3cd50,
+ 0x3d200, 0x3d20c,
+ 0x3d220, 0x3d220,
+ 0x3d240, 0x3d240,
+ 0x3d600, 0x3d600,
+ 0x3d608, 0x3d60c,
+ 0x3da00, 0x3da1c,
+ 0x3de04, 0x3de20,
+ 0x3de38, 0x3de3c,
+ 0x3de80, 0x3de80,
+ 0x3de88, 0x3dea8,
+ 0x3deb0, 0x3deb4,
+ 0x3dec8, 0x3ded4,
+ 0x3dfb8, 0x3e004,
+ 0x3e208, 0x3e23c,
+ 0x3e600, 0x3e630,
+ 0x3ea00, 0x3eabc,
+ 0x3eb00, 0x3eb70,
+ 0x3f000, 0x3f048,
+ 0x3f060, 0x3f09c,
+ 0x3f0f0, 0x3f148,
+ 0x3f160, 0x3f19c,
+ 0x3f1f0, 0x3f2e4,
+ 0x3f2f8, 0x3f3e4,
+ 0x3f3f8, 0x3f448,
+ 0x3f460, 0x3f49c,
+ 0x3f4f0, 0x3f548,
+ 0x3f560, 0x3f59c,
+ 0x3f5f0, 0x3f6e4,
+ 0x3f6f8, 0x3f7e4,
+ 0x3f7f8, 0x3f7fc,
+ 0x3f814, 0x3f814,
+ 0x3f82c, 0x3f82c,
+ 0x3f880, 0x3f88c,
+ 0x3f8e8, 0x3f8ec,
+ 0x3f900, 0x3f948,
+ 0x3f960, 0x3f99c,
+ 0x3f9f0, 0x3fae4,
+ 0x3faf8, 0x3fb10,
+ 0x3fb28, 0x3fb28,
+ 0x3fb3c, 0x3fb50,
+ 0x3fbf0, 0x3fc10,
+ 0x3fc28, 0x3fc28,
+ 0x3fc3c, 0x3fc50,
+ 0x3fcf0, 0x3fcfc,
+ 0x40000, 0x4000c,
+ 0x40040, 0x40068,
+ 0x40080, 0x40144,
+ 0x40180, 0x4018c,
+ 0x40200, 0x40298,
+ 0x402ac, 0x4033c,
+ 0x403f8, 0x403fc,
+ 0x41304, 0x413c4,
+ 0x41400, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x44078,
+ 0x440c0, 0x44278,
+ 0x442c0, 0x44478,
+ 0x444c0, 0x44678,
+ 0x446c0, 0x44878,
+ 0x448c0, 0x449fc,
+ 0x45000, 0x45068,
+ 0x45080, 0x45084,
+ 0x450a0, 0x450b0,
+ 0x45200, 0x45268,
+ 0x45280, 0x45284,
+ 0x452a0, 0x452b0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x48000, 0x4800c,
+ 0x48040, 0x48068,
+ 0x48080, 0x48144,
+ 0x48180, 0x4818c,
+ 0x48200, 0x48298,
+ 0x482ac, 0x4833c,
+ 0x483f8, 0x483fc,
+ 0x49304, 0x493c4,
+ 0x49400, 0x4941c,
+ 0x49480, 0x494d0,
+ 0x4c000, 0x4c078,
+ 0x4c0c0, 0x4c278,
+ 0x4c2c0, 0x4c478,
+ 0x4c4c0, 0x4c678,
+ 0x4c6c0, 0x4c878,
+ 0x4c8c0, 0x4c9fc,
+ 0x4d000, 0x4d068,
+ 0x4d080, 0x4d084,
+ 0x4d0a0, 0x4d0b0,
+ 0x4d200, 0x4d268,
+ 0x4d280, 0x4d284,
+ 0x4d2a0, 0x4d2b0,
+ 0x4e0c0, 0x4e0e4,
+ 0x4f000, 0x4f08c,
+ 0x4f200, 0x4f250,
+ 0x4f400, 0x4f420,
+ 0x4f600, 0x4f618,
+ 0x4f800, 0x4f814,
+ 0x50000, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x5101c,
+ 0x51300, 0x51308,
+ };
+
+ u32 *buf_end = (u32 *)((char *)buf + buf_size);
+ const unsigned int *reg_ranges;
+ int reg_ranges_size, range;
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /* Select the right set of register ranges to dump depending on the
+ * adapter chip type.
+ */
+ switch (chip_version) {
+ case CHELSIO_T4:
+ reg_ranges = t4_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
+ break;
+
+ case CHELSIO_T5:
+ reg_ranges = t5_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+ break;
+
+ default:
+ dev_err(adap->pdev_dev,
+ "Unsupported chip version %d\n", chip_version);
+ return;
+ }
+
+ /* Clear the register buffer and insert the appropriate register
+ * values selected by the above register ranges.
+ */
+ memset(buf, 0, buf_size);
+ for (range = 0; range < reg_ranges_size; range += 2) {
+ unsigned int reg = reg_ranges[range];
+ unsigned int last_reg = reg_ranges[range + 1];
+ u32 *bufp = (u32 *)((char *)buf + reg);
+
+ /* Iterate across the register range filling in the register
+ * buffer but don't write past the end of the register buffer.
+ */
+ while (reg <= last_reg && bufp < buf_end) {
+ *bufp++ = t4_read_reg(adap, reg);
+ reg += sizeof(u32);
+ }
+ }
+}
+
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
@@ -867,7 +1595,7 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
* Read the specified number of 32-bit words from the serial flash.
* If @byte_oriented is set the read data is stored as a byte array
* (i.e., big-endian), otherwise as 32-bit words in the platform's
- * natural endianess.
+ * natural endianness.
*/
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented)
@@ -3558,7 +4286,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* For the single-MTU buffers in unpacked mode we need to include
* space for the SGE Control Packet Shift, 14 byte Ethernet header,
* possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
- * Padding boundry. All of these are accommodated in the Factory
+ * Padding boundary. All of these are accommodated in the Factory
* Default Firmware Configuration File but we need to adjust it for
* this host's cache line size.
*/
@@ -4582,7 +5310,7 @@ int t4_init_tp_params(struct adapter *adap)
PROTOCOL_F);
/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
- * represents the presense of an Outer VLAN instead of a VNIC ID.
+ * represents the presence of an Outer VLAN instead of a VNIC ID.
*/
if ((adap->params.tp.ingress_config & VNIC_F) == 0)
adap->params.tp.vnic_shift = -1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 0fb975e258b3..30a2f56e99c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -794,6 +794,14 @@ struct cpl_rx_pkt {
__be16 err_vec;
};
+#define RXF_PSH_S 20
+#define RXF_PSH_V(x) ((x) << RXF_PSH_S)
+#define RXF_PSH_F RXF_PSH_V(1U)
+
+#define RXF_SYN_S 21
+#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
+#define RXF_SYN_F RXF_SYN_V(1U)
+
#define RXF_UDP_S 22
#define RXF_UDP_V(x) ((x) << RXF_UDP_S)
#define RXF_UDP_F RXF_UDP_V(1U)
@@ -810,6 +818,18 @@ struct cpl_rx_pkt {
#define RXF_IP6_V(x) ((x) << RXF_IP6_S)
#define RXF_IP6_F RXF_IP6_V(1U)
+#define RXF_SYN_COOKIE_S 26
+#define RXF_SYN_COOKIE_V(x) ((x) << RXF_SYN_COOKIE_S)
+#define RXF_SYN_COOKIE_F RXF_SYN_COOKIE_V(1U)
+
+#define RXF_FCOE_S 26
+#define RXF_FCOE_V(x) ((x) << RXF_FCOE_S)
+#define RXF_FCOE_F RXF_FCOE_V(1U)
+
+#define RXF_LRO_S 27
+#define RXF_LRO_V(x) ((x) << RXF_LRO_S)
+#define RXF_LRO_F RXF_LRO_V(1U)
+
/* rx_pkt.l2info fields */
#define RX_ETHHDR_LEN_S 0
#define RX_ETHHDR_LEN_M 0x1F
@@ -846,6 +866,11 @@ struct cpl_rx_pkt {
#define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
#define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
+/* rx_pkt.err_vec fields */
+#define RXERR_CSUM_S 13
+#define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S)
+#define RXERR_CSUM_F RXERR_CSUM_V(1U)
+
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index ddfb5b846045..1a9a6f334d2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -60,8 +60,6 @@
* -- Used to finish the definition of the PCI ID Table. Note that we
* -- will be adding a trailing semi-colon (";") here.
*/
-#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
-
#ifndef CH_PCI_DEVICE_ID_FUNCTION
#error CH_PCI_DEVICE_ID_FUNCTION not defined!
#endif
@@ -154,8 +152,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
-#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
-
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a4a19e0ec7f5..03fbfd1fb3df 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -36,7 +36,7 @@
#define _T4FW_INTERFACE_H_
enum fw_retval {
- FW_SUCCESS = 0, /* completed sucessfully */
+ FW_SUCCESS = 0, /* completed successfully */
FW_EPERM = 1, /* operation not permitted */
FW_ENOENT = 2, /* no such file or directory */
FW_EIO = 5, /* input/output error; hw bad */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 122e2964e63b..1d893b0b7ddf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -3034,7 +3034,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
/* Macros needed to support the PCI Device ID Table ...
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
- static struct pci_device_id cxgb4vf_pci_tbl[] = {
+ static const struct pci_device_id cxgb4vf_pci_tbl[] = {
#define CH_PCI_DEVICE_ID_FUNCTION 0x8
#define CH_PCI_ID_TABLE_ENTRY(devid) \
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index e0d711071afb..482f6de6817d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -875,7 +875,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* Write Header (incorporated as part of the cpl_tx_pkt_lso and
* cpl_tx_pkt structures), followed by either a TX Packet Write CPL
* message or, if we're doing a Large Send Offload, an LSO CPL message
- * with an embeded TX Packet Write CPL message.
+ * with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
if (skb_shinfo(skb)->gso_size)
@@ -1751,7 +1751,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
* Figure out what kind of response we've received from the
* SGE.
*/
- rmb();
+ dma_rmb();
rsp_type = RSPD_TYPE(rc->type_gen);
if (likely(rsp_type == RSP_TYPE_FLBUF)) {
struct page_frag *fp;
@@ -1935,7 +1935,7 @@ static unsigned int process_intrq(struct adapter *adapter)
* error and go on to the next response message. This should
* never happen ...
*/
- rmb();
+ dma_rmb();
if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
dev_err(adapter->pdev_dev,
"Unexpected INTRQ response type %d\n",
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 280b4a215849..966ee900ed00 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -339,7 +339,7 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
* @adapter: the adapter
*
* Issues a reset command to FW. For a Physical Function this would
- * result in the Firmware reseting all of its state. For a Virtual
+ * result in the Firmware resetting all of its state. For a Virtual
* Function this just resets the state associated with the VF.
*/
int t4vf_fw_reset(struct adapter *adapter)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index d1c025fd9726..60383040d6c6 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1578,7 +1578,7 @@ out1:
#ifndef CONFIG_CS89x0_PLATFORM
/*
- * This function converts the I/O port addres used by the cs89x0_probe() and
+ * This function converts the I/O port address used by the cs89x0_probe() and
* init_module() functions to the I/O memory address used by the
* cs89x0_probe1() function.
*/
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index a5179bfcdc2c..204bd182473b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -893,7 +893,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
} else {
memset(pp, 0, sizeof(*pp));
if (vf == PORT_SELF_VF)
- memset(netdev->dev_addr, 0, ETH_ALEN);
+ eth_zero_addr(netdev->dev_addr);
}
} else {
/* Set flag to indicate that the port assoc/disassoc
@@ -903,14 +903,14 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
/* If DISASSOCIATE, clean up all assigned/saved macaddresses */
if (pp->request == PORT_REQUEST_DISASSOCIATE) {
- memset(pp->mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(pp->mac_addr);
if (vf == PORT_SELF_VF)
- memset(netdev->dev_addr, 0, ETH_ALEN);
+ eth_zero_addr(netdev->dev_addr);
}
}
if (vf == PORT_SELF_VF)
- memset(pp->vf_mac, 0, ETH_ALEN);
+ eth_zero_addr(pp->vf_mac);
return err;
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 50a00777228e..afd8e78e024e 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -653,7 +653,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
if ( !(db->media_mode & DMFE_AUTO) )
db->op_mode = db->media_mode; /* Force Mode */
- /* Initialize Transmit/Receive decriptor and CR3/4 */
+ /* Initialize Transmit/Receive descriptor and CR3/4 */
dmfe_descriptor_init(dev);
/* Init CR6 to program DM910x operation */
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 1c5916b13778..2c30c0c83f98 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -564,7 +564,7 @@ static void uli526x_init(struct net_device *dev)
if ( !(db->media_mode & ULI526X_AUTO) )
db->op_mode = db->media_mode; /* Force Mode */
- /* Initialize Transmit/Receive decriptor and CR3/4 */
+ /* Initialize Transmit/Receive descriptor and CR3/4 */
uli526x_descriptor_init(dev, ioaddr);
/* Init CR6 to program M526X operation */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 27b9fe99a9bd..1bf1cdce74ac 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -30,11 +30,12 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/u64_stats_sync.h>
+#include <linux/cpumask.h>
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.4u"
+#define DRV_VER "10.6.0.1"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -87,6 +88,7 @@
#define BE3_MAX_EVT_QS 16
#define BE3_SRIOV_MAX_EVT_QS 8
+#define MAX_RSS_IFACES 15
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
#define MAX_TX_QS 32
@@ -97,6 +99,7 @@
#define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
+#define MAX_NUM_POST_ERX_DB 255u
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
@@ -182,6 +185,7 @@ struct be_eq_obj {
u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
+ cpumask_var_t affinity_mask;
#ifdef CONFIG_NET_RX_BUSY_POLL
#define BE_EQ_IDLE 0
@@ -238,10 +242,17 @@ struct be_tx_stats {
struct u64_stats_sync sync_compl;
};
+/* Structure to hold some data of interest obtained from a TX CQE */
+struct be_tx_compl_info {
+ u8 status; /* Completion status */
+ u16 end_index; /* Completed TXQ Index */
+};
+
struct be_tx_obj {
u32 db_offset;
struct be_queue_info q;
struct be_queue_info cq;
+ struct be_tx_compl_info txcp;
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats;
@@ -370,6 +381,7 @@ enum vf_state {
#define BE_FLAGS_VXLAN_OFFLOADS BIT(8)
#define BE_FLAGS_SETUP_DONE BIT(9)
#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10)
+#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
@@ -404,8 +416,11 @@ struct be_resources {
u16 max_tx_qs;
u16 max_rss_qs;
u16 max_rx_qs;
+ u16 max_cq_count;
u16 max_uc_mac; /* Max UC MACs programmable */
u16 max_vlans; /* Number of vlans supported */
+ u16 max_iface_count;
+ u16 max_mcc_count;
u16 max_evt_qs;
u32 if_cap_flags;
u32 vf_if_cap_flags; /* VF if capability flags */
@@ -418,6 +433,39 @@ struct rss_info {
u8 rss_hkey[RSS_HASH_KEY_LEN];
};
+/* Macros to read/write the 'features' word of be_wrb_params structure.
+ */
+#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT
+#define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT)
+
+#define BE_WRB_F_GET(word, name) \
+ (((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name))
+
+#define BE_WRB_F_SET(word, name, val) \
+ ((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name)))
+
+/* Feature/offload bits */
+enum {
+ BE_WRB_F_CRC_BIT, /* Ethernet CRC */
+ BE_WRB_F_IPCS_BIT, /* IP csum */
+ BE_WRB_F_TCPCS_BIT, /* TCP csum */
+ BE_WRB_F_UDPCS_BIT, /* UDP csum */
+ BE_WRB_F_LSO_BIT, /* LSO */
+ BE_WRB_F_LSO6_BIT, /* LSO6 */
+ BE_WRB_F_VLAN_BIT, /* VLAN */
+ BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */
+};
+
+/* The structure below provides a HW-agnostic abstraction of WRB params
+ * retrieved from a TX skb. This is in turn passed to chip specific routines
+ * during transmit, to set the corresponding params in the WRB.
+ */
+struct be_wrb_params {
+ u32 features; /* Feature bits */
+ u16 vlan_tag; /* VLAN tag */
+ u16 lso_mss; /* MSS for LSO */
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -449,6 +497,8 @@ struct be_adapter {
/* Rx rings */
u16 num_rx_qs;
+ u16 num_rss_qs;
+ u16 need_def_rxq;
struct be_rx_obj rx_obj[MAX_RX_QS];
u32 big_page_size; /* Compounded page size shared by rx wrbs */
@@ -463,7 +513,7 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
- struct delayed_work func_recovery_work;
+ struct delayed_work be_err_detection_work;
u32 flags;
u32 cmd_privileges;
/* Ethtool knobs and info */
@@ -596,9 +646,8 @@ extern const struct ethtool_ops be_ethtool_ops;
for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
i++, rxo++)
-/* Skip the default non-rss queue (last one)*/
#define for_all_rss_queues(adapter, rxo, i) \
- for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rss_qs; \
i++, rxo++)
#define for_all_tx_queues(adapter, txo, i) \
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 7f05f309e935..fb140faeafb1 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -635,73 +635,16 @@ static int lancer_wait_ready(struct be_adapter *adapter)
for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_RDY_MASK)
- break;
-
- msleep(1000);
- }
-
- if (i == SLIPORT_READY_TIMEOUT)
- return sliport_status ? : -1;
-
- return 0;
-}
-
-static bool lancer_provisioning_error(struct be_adapter *adapter)
-{
- u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
-
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
- sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
-
- if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
- sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
- return true;
- }
- return false;
-}
-
-int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
-{
- int status;
- u32 sliport_status, err, reset_needed;
- bool resource_error;
+ return 0;
- resource_error = lancer_provisioning_error(adapter);
- if (resource_error)
- return -EAGAIN;
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
+ !(sliport_status & SLIPORT_STATUS_RN_MASK))
+ return -EIO;
- status = lancer_wait_ready(adapter);
- if (!status) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- err = sliport_status & SLIPORT_STATUS_ERR_MASK;
- reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
- if (err && reset_needed) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
-
- /* check if adapter has corrected the error */
- status = lancer_wait_ready(adapter);
- sliport_status = ioread32(adapter->db +
- SLIPORT_STATUS_OFFSET);
- sliport_status &= (SLIPORT_STATUS_ERR_MASK |
- SLIPORT_STATUS_RN_MASK);
- if (status || sliport_status)
- status = -1;
- } else if (err || reset_needed) {
- status = -1;
- }
+ msleep(1000);
}
- /* Stop error recovery if error is not recoverable.
- * No resource error is temporary errors and will go away
- * when PF provisions resources.
- */
- resource_error = lancer_provisioning_error(adapter);
- if (resource_error)
- status = -EAGAIN;
- return status;
+ return sliport_status ? : -1;
}
int be_fw_wait_ready(struct be_adapter *adapter)
@@ -720,6 +663,10 @@ int be_fw_wait_ready(struct be_adapter *adapter)
}
do {
+ /* There's no means to poll POST state on BE2/3 VFs */
+ if (BEx_chip(adapter) && be_virtfn(adapter))
+ return 0;
+
stage = be_POST_stage_get(adapter);
if (stage == POST_STAGE_ARMFW_RDY)
return 0;
@@ -734,7 +681,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
err:
dev_err(dev, "POST timeout; stage=%#x\n", stage);
- return -1;
+ return -ETIMEDOUT;
}
static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
@@ -2123,16 +2070,12 @@ int be_cmd_reset_function(struct be_adapter *adapter)
int status;
if (lancer_chip(adapter)) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
status = lancer_wait_ready(adapter);
- if (!status) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
- status = lancer_test_and_set_rdy_state(adapter);
- }
- if (status) {
+ if (status)
dev_err(&adapter->pdev->dev,
"Adapter in non recoverable error\n");
- }
return status;
}
@@ -3075,7 +3018,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
mac_count = resp->true_mac_count + resp->pseudo_mac_count;
/* Mac list returned could contain one or more active mac_ids
- * or one or more true or pseudo permanant mac addresses.
+ * or one or more true or pseudo permanent mac addresses.
* If an active mac_id is present, return first active mac_id
* found.
*/
@@ -3130,7 +3073,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
int status;
bool pmac_valid = false;
- memset(mac, 0, ETH_ALEN);
+ eth_zero_addr(mac);
if (BEx_chip(adapter)) {
if (be_physfn(adapter))
@@ -3631,12 +3574,12 @@ static void be_copy_nic_desc(struct be_resources *res,
res->max_rss_qs = le16_to_cpu(desc->rssq_count);
res->max_rx_qs = le16_to_cpu(desc->rq_count);
res->max_evt_qs = le16_to_cpu(desc->eq_count);
+ res->max_cq_count = le16_to_cpu(desc->cq_count);
+ res->max_iface_count = le16_to_cpu(desc->iface_count);
+ res->max_mcc_count = le16_to_cpu(desc->mcc_count);
/* Clear flags that driver is not interested in */
res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
BE_IF_CAP_FLAGS_WANT;
- /* Need 1 RXQ as the default RXQ */
- if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
- res->max_rss_qs -= 1;
}
/* Uses Mbox */
@@ -3698,7 +3641,7 @@ err:
/* Will use MBOX only if MCCQ has not been created */
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 domain)
+ struct be_resources *res, u8 query, u8 domain)
{
struct be_cmd_resp_get_profile_config *resp;
struct be_cmd_req_get_profile_config *req;
@@ -3708,7 +3651,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_nic_res_desc *nic;
struct be_mcc_wrb wrb = {0};
struct be_dma_mem cmd;
- u32 desc_count;
+ u16 desc_count;
int status;
memset(&cmd, 0, sizeof(struct be_dma_mem));
@@ -3727,12 +3670,19 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
req->hdr.version = 1;
req->type = ACTIVE_PROFILE_TYPE;
+ /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
+ * descriptors with all bits set to "1" for the fields which can be
+ * modified using SET_PROFILE_CONFIG cmd.
+ */
+ if (query == RESOURCE_MODIFIABLE)
+ req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
+
status = be_cmd_notify_wait(adapter, &wrb);
if (status)
goto err;
resp = cmd.va;
- desc_count = le32_to_cpu(resp->desc_count);
+ desc_count = le16_to_cpu(resp->desc_count);
pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
desc_count);
@@ -3857,23 +3807,80 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
1, version, domain);
}
+static void be_fill_vf_res_template(struct be_adapter *adapter,
+ struct be_resources pool_res,
+ u16 num_vfs, u16 num_vf_qs,
+ struct be_nic_res_desc *nic_vft)
+{
+ u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
+ struct be_resources res_mod = {0};
+
+ /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
+ * which are modifiable using SET_PROFILE_CONFIG cmd.
+ */
+ be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
+
+ /* If RSS IFACE capability flags are modifiable for a VF, set the
+ * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
+ * more than 1 RSSQ is available for a VF.
+ * Otherwise, provision only 1 queue pair for VF.
+ */
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+ nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ if (num_vf_qs > 1) {
+ vf_if_cap_flags |= BE_IF_FLAGS_RSS;
+ if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
+ vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
+ } else {
+ vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
+ BE_IF_FLAGS_DEFQ_RSS);
+ }
+
+ nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
+ } else {
+ num_vf_qs = 1;
+ }
+
+ nic_vft->rq_count = cpu_to_le16(num_vf_qs);
+ nic_vft->txq_count = cpu_to_le16(num_vf_qs);
+ nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
+ nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
+ (num_vfs + 1));
+
+ /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
+ * among the PF and it's VFs, if the fields are changeable
+ */
+ if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
+ nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
+ (num_vfs + 1));
+
+ if (res_mod.max_vlans == FIELD_MODIFIABLE)
+ nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
+ (num_vfs + 1));
+
+ if (res_mod.max_iface_count == FIELD_MODIFIABLE)
+ nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
+ (num_vfs + 1));
+
+ if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
+ nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
+ (num_vfs + 1));
+}
+
int be_cmd_set_sriov_config(struct be_adapter *adapter,
- struct be_resources res, u16 num_vfs)
+ struct be_resources pool_res, u16 num_vfs,
+ u16 num_vf_qs)
{
struct {
struct be_pcie_res_desc pcie;
struct be_nic_res_desc nic_vft;
} __packed desc;
- u16 vf_q_count;
-
- if (BEx_chip(adapter) || lancer_chip(adapter))
- return 0;
/* PF PCIE descriptor */
be_reset_pcie_desc(&desc.pcie);
desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
- desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+ desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
desc.pcie.pf_num = adapter->pdev->devfn;
desc.pcie.sriov_state = num_vfs ? 1 : 0;
desc.pcie.num_vfs = cpu_to_le16(num_vfs);
@@ -3882,32 +3889,12 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter,
be_reset_nic_desc(&desc.nic_vft);
desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
- desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
- (1 << NOSV_SHIFT);
+ desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
desc.nic_vft.pf_num = adapter->pdev->devfn;
desc.nic_vft.vf_num = 0;
- if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
- /* If number of VFs requested is 8 less than max supported,
- * assign 8 queue pairs to the PF and divide the remaining
- * resources evenly among the VFs
- */
- if (num_vfs < (be_max_vfs(adapter) - 8))
- vf_q_count = (res.max_rss_qs - 8) / num_vfs;
- else
- vf_q_count = res.max_rss_qs / num_vfs;
-
- desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
- desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
- desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
- desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
- } else {
- desc.nic_vft.txq_count = cpu_to_le16(1);
- desc.nic_vft.rq_count = cpu_to_le16(1);
- desc.nic_vft.rssq_count = cpu_to_le16(0);
- /* One CQ for each TX, RX and MCCQ */
- desc.nic_vft.cq_count = cpu_to_le16(3);
- }
+ be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
+ &desc.nic_vft);
return be_cmd_set_profile_config(adapter, &desc,
2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index a7634a3f052a..1ec22300e254 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -588,14 +588,15 @@ enum be_if_flags {
BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
- BE_IF_FLAGS_MULTICAST = 0x1000
+ BE_IF_FLAGS_MULTICAST = 0x1000,
+ BE_IF_FLAGS_DEFQ_RSS = 0x1000000
};
#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
- BE_IF_FLAGS_UNTAGGED)
+ BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_DEFQ_RSS)
#define BE_IF_FLAGS_ALL_PROMISCUOUS (BE_IF_FLAGS_PROMISCUOUS | \
BE_IF_FLAGS_VLAN_PROMISCUOUS |\
@@ -2021,6 +2022,7 @@ struct be_cmd_req_set_ext_fat_caps {
#define PORT_RESOURCE_DESC_TYPE_V1 0x55
#define MAX_RESOURCE_DESC 264
+#define IF_CAPS_FLAGS_VALID_SHIFT 0 /* IF caps valid */
#define VFT_SHIFT 3 /* VF template */
#define IMM_SHIFT 6 /* Immediate */
#define NOSV_SHIFT 7 /* No save */
@@ -2131,20 +2133,28 @@ struct be_cmd_resp_get_func_config {
u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
};
-#define ACTIVE_PROFILE_TYPE 0x2
+enum {
+ RESOURCE_LIMITS,
+ RESOURCE_MODIFIABLE
+};
+
struct be_cmd_req_get_profile_config {
struct be_cmd_req_hdr hdr;
u8 rsvd;
+#define ACTIVE_PROFILE_TYPE 0x2
+#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3)
u8 type;
u16 rsvd1;
};
struct be_cmd_resp_get_profile_config {
struct be_cmd_resp_hdr hdr;
- u32 desc_count;
+ __le16 desc_count;
+ u16 rsvd;
u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
};
+#define FIELD_MODIFIABLE 0xFFFF
struct be_cmd_req_set_profile_config {
struct be_cmd_req_hdr hdr;
u32 rsvd;
@@ -2344,7 +2354,7 @@ int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 domain);
+ struct be_resources *res, u8 query, u8 domain);
int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
@@ -2355,4 +2365,5 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
int be_cmd_set_sriov_config(struct be_adapter *adapter,
- struct be_resources res, u16 num_vfs);
+ struct be_resources res, u16 num_vfs,
+ u16 num_vf_qs);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 4d2de4700769..b765c24625bf 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1097,7 +1097,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return status;
if (be_multi_rxq(adapter)) {
- for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for (j = 0; j < 128; j += adapter->num_rss_qs) {
for_all_rss_queues(adapter, rxo, i) {
if ((j + i) >= 128)
break;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e6b790f0d9dc..a6dcbf850c1f 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -30,6 +30,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
+/* num_vfs module param is obsolete.
+ * Use sysfs method to enable/disable VFs.
+ */
static unsigned int num_vfs;
module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
@@ -727,48 +730,86 @@ static u16 skb_ip_proto(struct sk_buff *skb)
ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
}
-static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- struct sk_buff *skb, u32 wrb_cnt, u32 len,
- bool skip_hw_vlan)
+static inline bool be_is_txq_full(struct be_tx_obj *txo)
{
- u16 vlan_tag, proto;
+ return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
+}
- memset(hdr, 0, sizeof(*hdr));
+static inline bool be_can_txq_wake(struct be_tx_obj *txo)
+{
+ return atomic_read(&txo->q.used) < txo->q.len / 2;
+}
- SET_TX_WRB_HDR_BITS(crc, hdr, 1);
+static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
+{
+ return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
+}
+
+static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ struct be_wrb_params *wrb_params)
+{
+ u16 proto;
if (skb_is_gso(skb)) {
- SET_TX_WRB_HDR_BITS(lso, hdr, 1);
- SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
+ BE_WRB_F_SET(wrb_params->features, LSO, 1);
+ wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
- SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
+ BE_WRB_F_SET(wrb_params->features, LSO6, 1);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->encapsulation) {
- SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
+ BE_WRB_F_SET(wrb_params->features, IPCS, 1);
proto = skb_inner_ip_proto(skb);
} else {
proto = skb_ip_proto(skb);
}
if (proto == IPPROTO_TCP)
- SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
+ BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
else if (proto == IPPROTO_UDP)
- SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
+ BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
}
if (skb_vlan_tag_present(skb)) {
- SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
- vlan_tag = be_get_tx_vlan_tag(adapter, skb);
- SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
+ BE_WRB_F_SET(wrb_params->features, VLAN, 1);
+ wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
}
- SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
- SET_TX_WRB_HDR_BITS(len, hdr, len);
+ BE_WRB_F_SET(wrb_params->features, CRC, 1);
+}
- /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
- * When this hack is not needed, the evt bit is set while ringing DB
+static void wrb_fill_hdr(struct be_adapter *adapter,
+ struct be_eth_hdr_wrb *hdr,
+ struct be_wrb_params *wrb_params,
+ struct sk_buff *skb)
+{
+ memset(hdr, 0, sizeof(*hdr));
+
+ SET_TX_WRB_HDR_BITS(crc, hdr,
+ BE_WRB_F_GET(wrb_params->features, CRC));
+ SET_TX_WRB_HDR_BITS(ipcs, hdr,
+ BE_WRB_F_GET(wrb_params->features, IPCS));
+ SET_TX_WRB_HDR_BITS(tcpcs, hdr,
+ BE_WRB_F_GET(wrb_params->features, TCPCS));
+ SET_TX_WRB_HDR_BITS(udpcs, hdr,
+ BE_WRB_F_GET(wrb_params->features, UDPCS));
+
+ SET_TX_WRB_HDR_BITS(lso, hdr,
+ BE_WRB_F_GET(wrb_params->features, LSO));
+ SET_TX_WRB_HDR_BITS(lso6, hdr,
+ BE_WRB_F_GET(wrb_params->features, LSO6));
+ SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
+
+ /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
+ * hack is not needed, the evt bit is set while ringing DB.
*/
- if (skip_hw_vlan)
- SET_TX_WRB_HDR_BITS(event, hdr, 1);
+ SET_TX_WRB_HDR_BITS(event, hdr,
+ BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
+ SET_TX_WRB_HDR_BITS(vlan, hdr,
+ BE_WRB_F_GET(wrb_params->features, VLAN));
+ SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
+
+ SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
+ SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -788,77 +829,124 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
}
-/* Returns the number of WRBs used up by the skb */
+/* Grab a WRB header for xmit */
+static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
+{
+ u16 head = txo->q.head;
+
+ queue_head_inc(&txo->q);
+ return head;
+}
+
+/* Set up the WRB header for xmit */
+static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
+ struct be_tx_obj *txo,
+ struct be_wrb_params *wrb_params,
+ struct sk_buff *skb, u16 head)
+{
+ u32 num_frags = skb_wrb_cnt(skb);
+ struct be_queue_info *txq = &txo->q;
+ struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
+
+ wrb_fill_hdr(adapter, hdr, wrb_params, skb);
+ be_dws_cpu_to_le(hdr, sizeof(*hdr));
+
+ BUG_ON(txo->sent_skb_list[head]);
+ txo->sent_skb_list[head] = skb;
+ txo->last_req_hdr = head;
+ atomic_add(num_frags, &txq->used);
+ txo->last_req_wrb_cnt = num_frags;
+ txo->pend_wrb_cnt += num_frags;
+}
+
+/* Setup a WRB fragment (buffer descriptor) for xmit */
+static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
+ int len)
+{
+ struct be_eth_wrb *wrb;
+ struct be_queue_info *txq = &txo->q;
+
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, busaddr, len);
+ queue_head_inc(txq);
+}
+
+/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
+ * was invoked. The producer index is restored to the previous packet and the
+ * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
+ */
+static void be_xmit_restore(struct be_adapter *adapter,
+ struct be_tx_obj *txo, u16 head, bool map_single,
+ u32 copied)
+{
+ struct device *dev;
+ struct be_eth_wrb *wrb;
+ struct be_queue_info *txq = &txo->q;
+
+ dev = &adapter->pdev->dev;
+ txq->head = head;
+
+ /* skip the first wrb (hdr); it's not mapped */
+ queue_head_inc(txq);
+ while (copied) {
+ wrb = queue_head_node(txq);
+ unmap_tx_frag(dev, wrb, map_single);
+ map_single = false;
+ copied -= le32_to_cpu(wrb->frag_len);
+ queue_head_inc(txq);
+ }
+
+ txq->head = head;
+}
+
+/* Enqueue the given packet for transmit. This routine allocates WRBs for the
+ * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
+ * of WRBs used up by the packet.
+ */
static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
- struct sk_buff *skb, bool skip_hw_vlan)
+ struct sk_buff *skb,
+ struct be_wrb_params *wrb_params)
{
u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
struct device *dev = &adapter->pdev->dev;
struct be_queue_info *txq = &txo->q;
- struct be_eth_hdr_wrb *hdr;
bool map_single = false;
- struct be_eth_wrb *wrb;
- dma_addr_t busaddr;
u16 head = txq->head;
+ dma_addr_t busaddr;
+ int len;
- hdr = queue_head_node(txq);
- wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
- be_dws_cpu_to_le(hdr, sizeof(*hdr));
-
- queue_head_inc(txq);
+ head = be_tx_get_wrb_hdr(txo);
if (skb->len > skb->data_len) {
- int len = skb_headlen(skb);
+ len = skb_headlen(skb);
busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
goto dma_err;
map_single = true;
- wrb = queue_head_node(txq);
- wrb_fill(wrb, busaddr, len);
- queue_head_inc(txq);
+ be_tx_setup_wrb_frag(txo, busaddr, len);
copied += len;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ len = skb_frag_size(frag);
- busaddr = skb_frag_dma_map(dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
+ busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
goto dma_err;
- wrb = queue_head_node(txq);
- wrb_fill(wrb, busaddr, skb_frag_size(frag));
- queue_head_inc(txq);
- copied += skb_frag_size(frag);
+ be_tx_setup_wrb_frag(txo, busaddr, len);
+ copied += len;
}
- BUG_ON(txo->sent_skb_list[head]);
- txo->sent_skb_list[head] = skb;
- txo->last_req_hdr = head;
- atomic_add(wrb_cnt, &txq->used);
- txo->last_req_wrb_cnt = wrb_cnt;
- txo->pend_wrb_cnt += wrb_cnt;
+ be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
be_tx_stats_update(txo, skb);
return wrb_cnt;
dma_err:
- /* Bring the queue back to the state it was in before this
- * routine was invoked.
- */
- txq->head = head;
- /* skip the first wrb (hdr); it's not mapped */
- queue_head_inc(txq);
- while (copied) {
- wrb = queue_head_node(txq);
- unmap_tx_frag(dev, wrb, map_single);
- map_single = false;
- copied -= le32_to_cpu(wrb->frag_len);
- adapter->drv_stats.dma_map_errors++;
- queue_head_inc(txq);
- }
- txq->head = head;
+ adapter->drv_stats.dma_map_errors++;
+ be_xmit_restore(adapter, txo, head, map_single, copied);
return 0;
}
@@ -869,7 +957,8 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
struct sk_buff *skb,
- bool *skip_hw_vlan)
+ struct be_wrb_params
+ *wrb_params)
{
u16 vlan_tag = 0;
@@ -886,8 +975,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
* skip VLAN insertion
*/
- if (skip_hw_vlan)
- *skip_hw_vlan = true;
+ BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
}
if (vlan_tag) {
@@ -905,8 +993,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
vlan_tag);
if (unlikely(!skb))
return skb;
- if (skip_hw_vlan)
- *skip_hw_vlan = true;
+ BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
}
return skb;
@@ -946,7 +1033,8 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
struct sk_buff *skb,
- bool *skip_hw_vlan)
+ struct be_wrb_params
+ *wrb_params)
{
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
unsigned int eth_hdr_len;
@@ -970,7 +1058,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
*/
if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
- *skip_hw_vlan = true;
+ BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
/* HW has a bug wherein it will calculate CSUM for VLAN
* pkts even though it is disabled.
@@ -978,7 +1066,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
*/
if (skb->ip_summed != CHECKSUM_PARTIAL &&
skb_vlan_tag_present(skb)) {
- skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
+ skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
if (unlikely(!skb))
goto err;
}
@@ -1000,7 +1088,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
*/
if (be_ipv6_tx_stall_chk(adapter, skb) &&
be_vlan_tag_tx_chk(adapter, skb)) {
- skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
+ skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
if (unlikely(!skb))
goto err;
}
@@ -1014,7 +1102,7 @@ err:
static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
struct sk_buff *skb,
- bool *skip_hw_vlan)
+ struct be_wrb_params *wrb_params)
{
/* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
* less may cause a transmit stall on that port. So the work-around is
@@ -1026,7 +1114,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
}
if (BEx_chip(adapter) || lancer_chip(adapter)) {
- skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
+ skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
if (!skb)
return NULL;
}
@@ -1060,24 +1148,26 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
- bool skip_hw_vlan = false, flush = !skb->xmit_more;
struct be_adapter *adapter = netdev_priv(netdev);
u16 q_idx = skb_get_queue_mapping(skb);
struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
- struct be_queue_info *txq = &txo->q;
+ struct be_wrb_params wrb_params = { 0 };
+ bool flush = !skb->xmit_more;
u16 wrb_cnt;
- skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
+ skb = be_xmit_workarounds(adapter, skb, &wrb_params);
if (unlikely(!skb))
goto drop;
- wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
+ be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
+
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
if (unlikely(!wrb_cnt)) {
dev_kfree_skb_any(skb);
goto drop;
}
- if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
+ if (be_is_txq_full(txo)) {
netif_stop_subqueue(netdev, q_idx);
tx_stats(txo)->tx_stops++;
}
@@ -2032,7 +2122,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
if (rxo->rx_post_starved)
rxo->rx_post_starved = false;
do {
- notify = min(256u, posted);
+ notify = min(MAX_NUM_POST_ERX_DB, posted);
be_rxq_notify(adapter, rxq->id, notify);
posted -= notify;
} while (posted);
@@ -2042,18 +2132,23 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
}
}
-static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
+static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
{
- struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
+ struct be_queue_info *tx_cq = &txo->cq;
+ struct be_tx_compl_info *txcp = &txo->txcp;
+ struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
- if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
+ if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
return NULL;
+ /* Ensure load ordering of valid bit dword and other dwords below */
rmb();
- be_dws_le_to_cpu(txcp, sizeof(*txcp));
+ be_dws_le_to_cpu(compl, sizeof(*compl));
- txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
+ txcp->status = GET_TX_COMPL_BITS(status, compl);
+ txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
+ compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
queue_tail_inc(tx_cq);
return txcp;
}
@@ -2174,9 +2269,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
{
u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
struct device *dev = &adapter->pdev->dev;
- struct be_tx_obj *txo;
+ struct be_tx_compl_info *txcp;
struct be_queue_info *txq;
- struct be_eth_tx_compl *txcp;
+ struct be_tx_obj *txo;
int i, pending_txqs;
/* Stop polling for compls when HW has been silent for 10ms */
@@ -2187,10 +2282,10 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
cmpl = 0;
num_wrbs = 0;
txq = &txo->q;
- while ((txcp = be_tx_compl_get(&txo->cq))) {
- end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
- num_wrbs += be_tx_compl_process(adapter, txo,
- end_idx);
+ while ((txcp = be_tx_compl_get(txo))) {
+ num_wrbs +=
+ be_tx_compl_process(adapter, txo,
+ txcp->end_index);
cmpl++;
}
if (cmpl) {
@@ -2198,7 +2293,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
atomic_sub(num_wrbs, &txq->used);
timeo = 0;
}
- if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
+ if (!be_is_tx_compl_pending(txo))
pending_txqs--;
}
@@ -2247,6 +2342,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
}
+ free_cpumask_var(eqo->affinity_mask);
be_queue_free(adapter, &eqo->q);
}
}
@@ -2262,6 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter)
adapter->cfg_num_qs);
for_all_evt_queues(adapter, eqo, i) {
+ if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
+ eqo->affinity_mask);
+
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
BE_NAPI_WEIGHT);
napi_hash_add(&eqo->napi);
@@ -2353,8 +2454,9 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
static int be_tx_qs_create(struct be_adapter *adapter)
{
- struct be_queue_info *cq, *eq;
+ struct be_queue_info *cq;
struct be_tx_obj *txo;
+ struct be_eq_obj *eqo;
int status, i;
adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
@@ -2372,8 +2474,8 @@ static int be_tx_qs_create(struct be_adapter *adapter)
/* If num_evt_qs is less than num_tx_qs, then more than
* one txq share an eq
*/
- eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
- status = be_cmd_cq_create(adapter, cq, eq, false, 3);
+ eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
+ status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
if (status)
return status;
@@ -2385,6 +2487,9 @@ static int be_tx_qs_create(struct be_adapter *adapter)
status = be_cmd_txq_create(adapter, txo);
if (status)
return status;
+
+ netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
+ eqo->idx);
}
dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
@@ -2413,13 +2518,19 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
int rc, i;
/* We can create as many RSS rings as there are EQs. */
- adapter->num_rx_qs = adapter->num_evt_qs;
+ adapter->num_rss_qs = adapter->num_evt_qs;
+
+ /* We'll use RSS only if atleast 2 RSS rings are supported. */
+ if (adapter->num_rss_qs <= 1)
+ adapter->num_rss_qs = 0;
+
+ adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
- /* We'll use RSS only if atleast 2 RSS rings are supported.
- * When RSS is used, we'll need a default RXQ for non-IP traffic.
+ /* When the interface is not capable of RSS rings (and there is no
+ * need to create a default RXQ) we'll still need one RXQ
*/
- if (adapter->num_rx_qs > 1)
- adapter->num_rx_qs++;
+ if (adapter->num_rx_qs == 0)
+ adapter->num_rx_qs = 1;
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
@@ -2438,8 +2549,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
}
dev_info(&adapter->pdev->dev,
- "created %d RSS queue(s) and 1 default RX queue\n",
- adapter->num_rx_qs - 1);
+ "created %d RX queue(s)\n", adapter->num_rx_qs);
return 0;
}
@@ -2549,7 +2659,7 @@ loop_continue:
return work_done;
}
-static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
+static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
{
switch (status) {
case BE_TX_COMP_HDR_PARSE_ERR:
@@ -2564,7 +2674,7 @@ static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
}
}
-static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
+static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
{
switch (status) {
case LANCER_TX_COMP_LSO_ERR:
@@ -2589,22 +2699,18 @@ static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
int idx)
{
- struct be_eth_tx_compl *txcp;
int num_wrbs = 0, work_done = 0;
- u32 compl_status;
- u16 last_idx;
+ struct be_tx_compl_info *txcp;
- while ((txcp = be_tx_compl_get(&txo->cq))) {
- last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
- num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
+ while ((txcp = be_tx_compl_get(txo))) {
+ num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
work_done++;
- compl_status = GET_TX_COMPL_BITS(status, txcp);
- if (compl_status) {
+ if (txcp->status) {
if (lancer_chip(adapter))
- lancer_update_tx_err(txo, compl_status);
+ lancer_update_tx_err(txo, txcp->status);
else
- be_update_tx_err(txo, compl_status);
+ be_update_tx_err(txo, txcp->status);
}
}
@@ -2615,7 +2721,7 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
/* As Tx wrbs have been freed up, wake up netdev queue
* if it was stopped due to lack of tx wrbs. */
if (__netif_subqueue_stopped(adapter->netdev, idx) &&
- atomic_read(&txo->q.used) < txo->q.len / 2) {
+ be_can_txq_wake(txo)) {
netif_wake_subqueue(adapter->netdev, idx);
}
@@ -2807,12 +2913,12 @@ void be_detect_error(struct be_adapter *adapter)
sliport_err2 = ioread32(adapter->db +
SLIPORT_ERROR2_OFFSET);
adapter->hw_error = true;
+ error_detected = true;
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
dev_info(dev, "Firmware update in progress\n");
} else {
- error_detected = true;
dev_err(dev, "Error detected in the card\n");
dev_err(dev, "ERR: sliport status 0x%x\n",
sliport_status);
@@ -2932,6 +3038,8 @@ static int be_msix_register(struct be_adapter *adapter)
status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
if (status)
goto err_msix;
+
+ irq_set_affinity_hint(vec, eqo->affinity_mask);
}
return 0;
@@ -2976,7 +3084,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct be_eq_obj *eqo;
- int i;
+ int i, vec;
if (!adapter->isr_registered)
return;
@@ -2988,8 +3096,11 @@ static void be_irq_unregister(struct be_adapter *adapter)
}
/* MSIx */
- for_all_evt_queues(adapter, eqo, i)
- free_irq(be_msix_vec_get(adapter, eqo), eqo);
+ for_all_evt_queues(adapter, eqo, i) {
+ vec = be_msix_vec_get(adapter, eqo);
+ irq_set_affinity_hint(vec, NULL);
+ free_irq(vec, eqo);
+ }
done:
adapter->isr_registered = false;
@@ -3071,12 +3182,14 @@ static int be_rx_qs_create(struct be_adapter *adapter)
return rc;
}
- /* The FW would like the default RXQ to be created first */
- rxo = default_rxo(adapter);
- rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
- adapter->if_handle, false, &rxo->rss_id);
- if (rc)
- return rc;
+ if (adapter->need_def_rxq || !adapter->num_rss_qs) {
+ rxo = default_rxo(adapter);
+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
+ rx_frag_size, adapter->if_handle,
+ false, &rxo->rss_id);
+ if (rc)
+ return rc;
+ }
for_all_rss_queues(adapter, rxo, i) {
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -3087,8 +3200,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for (j = 0; j < RSS_INDIR_TABLE_LEN;
- j += adapter->num_rx_qs - 1) {
+ for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
for_all_rss_queues(adapter, rxo, i) {
if ((j + i) >= RSS_INDIR_TABLE_LEN)
break;
@@ -3179,7 +3291,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
int status = 0;
u8 mac[ETH_ALEN];
- memset(mac, 0, ETH_ALEN);
+ eth_zero_addr(mac);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
@@ -3324,6 +3436,14 @@ static void be_cancel_worker(struct be_adapter *adapter)
}
}
+static void be_cancel_err_detection(struct be_adapter *adapter)
+{
+ if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
+ cancel_delayed_work_sync(&adapter->be_err_detection_work);
+ adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
+ }
+}
+
static void be_mac_clear(struct be_adapter *adapter)
{
if (adapter->pmac_id) {
@@ -3355,8 +3475,39 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
}
#endif
+static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
+{
+ struct be_resources res = adapter->pool_res;
+ u16 num_vf_qs = 1;
+
+ /* Distribute the queue resources equally among the PF and it's VFs
+ * Do not distribute queue resources in multi-channel configuration.
+ */
+ if (num_vfs && !be_is_mc(adapter)) {
+ /* If number of VFs requested is 8 less than max supported,
+ * assign 8 queue pairs to the PF and divide the remaining
+ * resources evenly among the VFs
+ */
+ if (num_vfs < (be_max_vfs(adapter) - 8))
+ num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
+ else
+ num_vf_qs = res.max_rss_qs / num_vfs;
+
+ /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
+ * interfaces per port. Provide RSS on VFs, only if number
+ * of VFs requested is less than MAX_RSS_IFACES limit.
+ */
+ if (num_vfs >= MAX_RSS_IFACES)
+ num_vf_qs = 1;
+ }
+ return num_vf_qs;
+}
+
static int be_clear(struct be_adapter *adapter)
{
+ struct pci_dev *pdev = adapter->pdev;
+ u16 num_vf_qs;
+
be_cancel_worker(adapter);
if (sriov_enabled(adapter))
@@ -3365,9 +3516,14 @@ static int be_clear(struct be_adapter *adapter)
/* Re-configure FW to distribute resources evenly across max-supported
* number of VFs, only when VFs are not already enabled.
*/
- if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
+ if (skyhawk_chip(adapter) && be_physfn(adapter) &&
+ !pci_vfs_assigned(pdev)) {
+ num_vf_qs = be_calculate_vf_qs(adapter,
+ pci_sriov_get_totalvfs(pdev));
be_cmd_set_sriov_config(adapter, adapter->pool_res,
- pci_sriov_get_totalvfs(adapter->pdev));
+ pci_sriov_get_totalvfs(pdev),
+ num_vf_qs);
+ }
#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
@@ -3391,7 +3547,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_RSS;
+ BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
en_flags &= cap_flags;
@@ -3412,6 +3568,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
status = be_cmd_get_profile_config(adapter, &res,
+ RESOURCE_LIMITS,
vf + 1);
if (!status) {
cap_flags = res.if_cap_flags;
@@ -3585,7 +3742,8 @@ static void BEx_get_resources(struct be_adapter *adapter,
/* On a SuperNIC profile, the driver needs to use the
* GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
*/
- be_cmd_get_profile_config(adapter, &super_nic_res, 0);
+ be_cmd_get_profile_config(adapter, &super_nic_res,
+ RESOURCE_LIMITS, 0);
/* Some old versions of BE3 FW don't report max_tx_qs value */
res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
} else {
@@ -3605,6 +3763,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
res->max_evt_qs = 1;
res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
+ res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
}
@@ -3624,13 +3783,12 @@ static void be_setup_init(struct be_adapter *adapter)
static int be_get_sriov_config(struct be_adapter *adapter)
{
- struct device *dev = &adapter->pdev->dev;
struct be_resources res = {0};
int max_vfs, old_vfs;
- /* Some old versions of BE3 FW don't report max_vfs value */
- be_cmd_get_profile_config(adapter, &res, 0);
+ be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
+ /* Some old versions of BE3 FW don't report max_vfs value */
if (BE3_chip(adapter) && !res.max_vfs) {
max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
@@ -3638,35 +3796,49 @@ static int be_get_sriov_config(struct be_adapter *adapter)
adapter->pool_res = res;
- if (!be_max_vfs(adapter)) {
- if (num_vfs)
- dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
- adapter->num_vfs = 0;
- return 0;
- }
-
- pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
-
- /* validate num_vfs module param */
+ /* If during previous unload of the driver, the VFs were not disabled,
+ * then we cannot rely on the PF POOL limits for the TotalVFs value.
+ * Instead use the TotalVFs value stored in the pci-dev struct.
+ */
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
- dev_info(dev, "%d VFs are already enabled\n", old_vfs);
- if (old_vfs != num_vfs)
- dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+ dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
+ old_vfs);
+
+ adapter->pool_res.max_vfs =
+ pci_sriov_get_totalvfs(adapter->pdev);
adapter->num_vfs = old_vfs;
- } else {
- if (num_vfs > be_max_vfs(adapter)) {
- dev_info(dev, "Resources unavailable to init %d VFs\n",
- num_vfs);
- dev_info(dev, "Limiting to %d VFs\n",
- be_max_vfs(adapter));
- }
- adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
}
return 0;
}
+static void be_alloc_sriov_res(struct be_adapter *adapter)
+{
+ int old_vfs = pci_num_vf(adapter->pdev);
+ u16 num_vf_qs;
+ int status;
+
+ be_get_sriov_config(adapter);
+
+ if (!old_vfs)
+ pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
+
+ /* When the HW is in SRIOV capable configuration, the PF-pool
+ * resources are given to PF during driver load, if there are no
+ * old VFs. This facility is not available in BE3 FW.
+ * Also, this is done by FW in Lancer chip.
+ */
+ if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
+ num_vf_qs = be_calculate_vf_qs(adapter, 0);
+ status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
+ num_vf_qs);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to optimize SRIOV resources\n");
+ }
+}
+
static int be_get_resources(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -3687,12 +3859,23 @@ static int be_get_resources(struct be_adapter *adapter)
if (status)
return status;
+ /* If a deafault RXQ must be created, we'll use up one RSSQ*/
+ if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
+ !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
+ res.max_rss_qs -= 1;
+
/* If RoCE may be enabled stash away half the EQs for RoCE */
if (be_roce_supported(adapter))
res.max_evt_qs /= 2;
adapter->res = res;
}
+ /* If FW supports RSS default queue, then skip creating non-RSS
+ * queue for non-IP traffic.
+ */
+ adapter->need_def_rxq = (be_if_cap_flags(adapter) &
+ BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
+
dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
be_max_txqs(adapter), be_max_rxqs(adapter),
be_max_rss(adapter), be_max_eqs(adapter),
@@ -3701,47 +3884,33 @@ static int be_get_resources(struct be_adapter *adapter)
be_max_uc(adapter), be_max_mc(adapter),
be_max_vlans(adapter));
+ /* Sanitize cfg_num_qs based on HW and platform limits */
+ adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
+ be_max_qs(adapter));
return 0;
}
-static void be_sriov_config(struct be_adapter *adapter)
-{
- struct device *dev = &adapter->pdev->dev;
- int status;
-
- status = be_get_sriov_config(adapter);
- if (status) {
- dev_err(dev, "Failed to query SR-IOV configuration\n");
- dev_err(dev, "SR-IOV cannot be enabled\n");
- return;
- }
-
- /* When the HW is in SRIOV capable configuration, the PF-pool
- * resources are equally distributed across the max-number of
- * VFs. The user may request only a subset of the max-vfs to be
- * enabled. Based on num_vfs, redistribute the resources across
- * num_vfs so that each VF will have access to more number of
- * resources. This facility is not available in BE3 FW.
- * Also, this is done by FW in Lancer chip.
- */
- if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
- status = be_cmd_set_sriov_config(adapter,
- adapter->pool_res,
- adapter->num_vfs);
- if (status)
- dev_err(dev, "Failed to optimize SR-IOV resources\n");
- }
-}
-
static int be_get_config(struct be_adapter *adapter)
{
+ int status, level;
u16 profile_id;
- int status;
+
+ status = be_cmd_get_cntl_attributes(adapter);
+ if (status)
+ return status;
status = be_cmd_query_fw_cfg(adapter);
if (status)
return status;
+ if (BEx_chip(adapter)) {
+ level = be_cmd_get_fw_log_level(adapter);
+ adapter->msg_enable =
+ level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ }
+
+ be_cmd_get_acpi_wol_cap(adapter);
+
be_cmd_query_port_name(adapter);
if (be_physfn(adapter)) {
@@ -3751,9 +3920,6 @@ static int be_get_config(struct be_adapter *adapter)
"Using profile 0x%x\n", profile_id);
}
- if (!BE2_chip(adapter) && be_physfn(adapter))
- be_sriov_config(adapter);
-
status = be_get_resources(adapter);
if (status)
return status;
@@ -3763,9 +3929,6 @@ static int be_get_config(struct be_adapter *adapter)
if (!adapter->pmac_id)
return -ENOMEM;
- /* Sanitize cfg_num_qs based on HW and platform limits */
- adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
-
return 0;
}
@@ -3799,6 +3962,13 @@ static void be_schedule_worker(struct be_adapter *adapter)
adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
}
+static void be_schedule_err_detection(struct be_adapter *adapter)
+{
+ schedule_delayed_work(&adapter->be_err_detection_work,
+ msecs_to_jiffies(1000));
+ adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
+}
+
static int be_setup_queues(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3881,16 +4051,61 @@ static inline int fw_major_num(const char *fw_ver)
return fw_major;
}
+/* If any VFs are already enabled don't FLR the PF */
+static bool be_reset_required(struct be_adapter *adapter)
+{
+ return pci_num_vf(adapter->pdev) ? false : true;
+}
+
+/* Wait for the FW to be ready and perform the required initialization */
+static int be_func_init(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_fw_wait_ready(adapter);
+ if (status)
+ return status;
+
+ if (be_reset_required(adapter)) {
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ return status;
+
+ /* Wait for interrupts to quiesce after an FLR */
+ msleep(100);
+
+ /* We can clear all errors when function reset succeeds */
+ be_clear_all_error(adapter);
+ }
+
+ /* Tell FW we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ return status;
+
+ /* Allow interrupts for other ULPs running on NIC function */
+ be_intr_set(adapter, true);
+
+ return 0;
+}
+
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
int status;
+ status = be_func_init(adapter);
+ if (status)
+ return status;
+
be_setup_init(adapter);
if (!lancer_chip(adapter))
be_cmd_req_native_mode(adapter);
+ if (!BE2_chip(adapter) && be_physfn(adapter))
+ be_alloc_sriov_res(adapter);
+
status = be_get_config(adapter);
if (status)
goto err;
@@ -3931,8 +4146,6 @@ static int be_setup(struct be_adapter *adapter)
be_set_rx_mode(adapter->netdev);
- be_cmd_get_acpi_wol_cap(adapter);
-
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
if (status)
@@ -4633,7 +4846,8 @@ err:
}
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 filter_mask)
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
{
struct be_adapter *adapter = netdev_priv(dev);
int status = 0;
@@ -4655,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
hsw_mode == PORT_FWD_TYPE_VEPA ?
BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
- 0, 0);
+ 0, 0, nlflags);
}
#ifdef CONFIG_BE2NET_VXLAN
@@ -4842,6 +5056,142 @@ static void be_netdev_init(struct net_device *netdev)
netdev->ethtool_ops = &be_ethtool_ops;
}
+static void be_cleanup(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ be_close(netdev);
+ rtnl_unlock();
+
+ be_clear(adapter);
+}
+
+static int be_resume(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ status = be_setup(adapter);
+ if (status)
+ return status;
+
+ if (netif_running(netdev)) {
+ status = be_open(netdev);
+ if (status)
+ return status;
+ }
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+static int be_err_recover(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ status = be_resume(adapter);
+ if (status)
+ goto err;
+
+ dev_info(dev, "Adapter recovery successful\n");
+ return 0;
+err:
+ if (be_physfn(adapter))
+ dev_err(dev, "Adapter recovery failed\n");
+ else
+ dev_err(dev, "Re-trying adapter recovery\n");
+
+ return status;
+}
+
+static void be_err_detection_task(struct work_struct *work)
+{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter,
+ be_err_detection_work.work);
+ int status = 0;
+
+ be_detect_error(adapter);
+
+ if (adapter->hw_error) {
+ be_cleanup(adapter);
+
+ /* As of now error recovery support is in Lancer only */
+ if (lancer_chip(adapter))
+ status = be_err_recover(adapter);
+ }
+
+ /* Always attempt recovery on VFs */
+ if (!status || be_virtfn(adapter))
+ be_schedule_err_detection(adapter);
+}
+
+static void be_log_sfp_info(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_query_sfp_info(adapter);
+ if (!status) {
+ dev_err(&adapter->pdev->dev,
+ "Unqualified SFP+ detected on %c from %s part no: %s",
+ adapter->port_name, adapter->phy.vendor_name,
+ adapter->phy.vendor_pn);
+ }
+ adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
+}
+
+static void be_worker(struct work_struct *work)
+{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, work.work);
+ struct be_rx_obj *rxo;
+ int i;
+
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions
+ */
+ if (!netif_running(adapter->netdev)) {
+ local_bh_disable();
+ be_process_mcc(adapter);
+ local_bh_enable();
+ goto reschedule;
+ }
+
+ if (!adapter->stats_cmd_sent) {
+ if (lancer_chip(adapter))
+ lancer_cmd_get_pport_stats(adapter,
+ &adapter->stats_cmd);
+ else
+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
+ }
+
+ if (be_physfn(adapter) &&
+ MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
+ for_all_rx_queues(adapter, rxo, i) {
+ /* Replenish RX-queues starved due to memory
+ * allocation failures.
+ */
+ if (rxo->rx_post_starved)
+ be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
+ }
+
+ be_eqd_update(adapter);
+
+ if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
+ be_log_sfp_info(adapter);
+
+reschedule:
+ adapter->work_counter++;
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
static void be_unmap_pci_bars(struct be_adapter *adapter)
{
if (adapter->csr)
@@ -4874,6 +5224,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
u8 __iomem *addr;
+ u32 sli_intf;
+
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
+ SLI_INTF_FAMILY_SHIFT;
+ adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
if (BEx_chip(adapter) && be_physfn(adapter)) {
adapter->csr = pci_iomap(pdev, 2, 0);
@@ -4907,109 +5263,93 @@ pci_map_err:
return -ENOMEM;
}
-static void be_ctrl_cleanup(struct be_adapter *adapter)
+static void be_drv_cleanup(struct be_adapter *adapter)
{
struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
-
- be_unmap_pci_bars(adapter);
+ struct device *dev = &adapter->pdev->dev;
if (mem->va)
- dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
- mem->dma);
+ dma_free_coherent(dev, mem->size, mem->va, mem->dma);
mem = &adapter->rx_filter;
if (mem->va)
- dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
- mem->dma);
+ dma_free_coherent(dev, mem->size, mem->va, mem->dma);
+
+ mem = &adapter->stats_cmd;
+ if (mem->va)
+ dma_free_coherent(dev, mem->size, mem->va, mem->dma);
}
-static int be_ctrl_init(struct be_adapter *adapter)
+/* Allocate and initialize various fields in be_adapter struct */
+static int be_drv_init(struct be_adapter *adapter)
{
struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
struct be_dma_mem *rx_filter = &adapter->rx_filter;
- u32 sli_intf;
- int status;
-
- pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
- SLI_INTF_FAMILY_SHIFT;
- adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
-
- status = be_map_pci_bars(adapter);
- if (status)
- goto done;
+ struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
+ struct device *dev = &adapter->pdev->dev;
+ int status = 0;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
- mbox_mem_alloc->size,
+ mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
&mbox_mem_alloc->dma,
GFP_KERNEL);
- if (!mbox_mem_alloc->va) {
- status = -ENOMEM;
- goto unmap_pci_bars;
- }
+ if (!mbox_mem_alloc->va)
+ return -ENOMEM;
+
mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
- rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
- rx_filter->size, &rx_filter->dma,
- GFP_KERNEL);
+ rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
+ &rx_filter->dma, GFP_KERNEL);
if (!rx_filter->va) {
status = -ENOMEM;
goto free_mbox;
}
+ if (lancer_chip(adapter))
+ stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
+ else if (BE2_chip(adapter))
+ stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
+ else if (BE3_chip(adapter))
+ stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+ else
+ stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
+ stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
+ &stats_cmd->dma, GFP_KERNEL);
+ if (!stats_cmd->va) {
+ status = -ENOMEM;
+ goto free_rx_filter;
+ }
+
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
-
init_completion(&adapter->et_cmd_compl);
- pci_save_state(adapter->pdev);
- return 0;
-free_mbox:
- dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
- mbox_mem_alloc->va, mbox_mem_alloc->dma);
-
-unmap_pci_bars:
- be_unmap_pci_bars(adapter);
-
-done:
- return status;
-}
-
-static void be_stats_cleanup(struct be_adapter *adapter)
-{
- struct be_dma_mem *cmd = &adapter->stats_cmd;
+ pci_save_state(adapter->pdev);
- if (cmd->va)
- dma_free_coherent(&adapter->pdev->dev, cmd->size,
- cmd->va, cmd->dma);
-}
+ INIT_DELAYED_WORK(&adapter->work, be_worker);
+ INIT_DELAYED_WORK(&adapter->be_err_detection_work,
+ be_err_detection_task);
-static int be_stats_init(struct be_adapter *adapter)
-{
- struct be_dma_mem *cmd = &adapter->stats_cmd;
+ adapter->rx_fc = true;
+ adapter->tx_fc = true;
- if (lancer_chip(adapter))
- cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
- else if (BE2_chip(adapter))
- cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
- else if (BE3_chip(adapter))
- cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
- else
- /* ALL non-BE ASICs */
- cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
+ /* Must be a power of 2 or else MODULO will BUG_ON */
+ adapter->be_get_temp_freq = 64;
- cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
- GFP_KERNEL);
- if (!cmd->va)
- return -ENOMEM;
return 0;
+
+free_rx_filter:
+ dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
+free_mbox:
+ dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
+ mbox_mem_alloc->dma);
+ return status;
}
static void be_remove(struct pci_dev *pdev)
@@ -5022,7 +5362,7 @@ static void be_remove(struct pci_dev *pdev)
be_roce_dev_remove(adapter);
be_intr_set(adapter, false);
- cancel_delayed_work_sync(&adapter->func_recovery_work);
+ be_cancel_err_detection(adapter);
unregister_netdev(adapter->netdev);
@@ -5031,9 +5371,8 @@ static void be_remove(struct pci_dev *pdev)
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
- be_stats_cleanup(adapter);
-
- be_ctrl_cleanup(adapter);
+ be_unmap_pci_bars(adapter);
+ be_drv_cleanup(adapter);
pci_disable_pcie_error_reporting(pdev);
@@ -5043,156 +5382,6 @@ static void be_remove(struct pci_dev *pdev)
free_netdev(adapter->netdev);
}
-static int be_get_initial_config(struct be_adapter *adapter)
-{
- int status, level;
-
- status = be_cmd_get_cntl_attributes(adapter);
- if (status)
- return status;
-
- /* Must be a power of 2 or else MODULO will BUG_ON */
- adapter->be_get_temp_freq = 64;
-
- if (BEx_chip(adapter)) {
- level = be_cmd_get_fw_log_level(adapter);
- adapter->msg_enable =
- level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
- }
-
- adapter->cfg_num_qs = netif_get_num_default_rss_queues();
- return 0;
-}
-
-static int lancer_recover_func(struct be_adapter *adapter)
-{
- struct device *dev = &adapter->pdev->dev;
- int status;
-
- status = lancer_test_and_set_rdy_state(adapter);
- if (status)
- goto err;
-
- if (netif_running(adapter->netdev))
- be_close(adapter->netdev);
-
- be_clear(adapter);
-
- be_clear_all_error(adapter);
-
- status = be_setup(adapter);
- if (status)
- goto err;
-
- if (netif_running(adapter->netdev)) {
- status = be_open(adapter->netdev);
- if (status)
- goto err;
- }
-
- dev_err(dev, "Adapter recovery successful\n");
- return 0;
-err:
- if (status == -EAGAIN)
- dev_err(dev, "Waiting for resource provisioning\n");
- else
- dev_err(dev, "Adapter recovery failed\n");
-
- return status;
-}
-
-static void be_func_recovery_task(struct work_struct *work)
-{
- struct be_adapter *adapter =
- container_of(work, struct be_adapter, func_recovery_work.work);
- int status = 0;
-
- be_detect_error(adapter);
-
- if (adapter->hw_error && lancer_chip(adapter)) {
- rtnl_lock();
- netif_device_detach(adapter->netdev);
- rtnl_unlock();
-
- status = lancer_recover_func(adapter);
- if (!status)
- netif_device_attach(adapter->netdev);
- }
-
- /* In Lancer, for all errors other than provisioning error (-EAGAIN),
- * no need to attempt further recovery.
- */
- if (!status || status == -EAGAIN)
- schedule_delayed_work(&adapter->func_recovery_work,
- msecs_to_jiffies(1000));
-}
-
-static void be_log_sfp_info(struct be_adapter *adapter)
-{
- int status;
-
- status = be_cmd_query_sfp_info(adapter);
- if (!status) {
- dev_err(&adapter->pdev->dev,
- "Unqualified SFP+ detected on %c from %s part no: %s",
- adapter->port_name, adapter->phy.vendor_name,
- adapter->phy.vendor_pn);
- }
- adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
-}
-
-static void be_worker(struct work_struct *work)
-{
- struct be_adapter *adapter =
- container_of(work, struct be_adapter, work.work);
- struct be_rx_obj *rxo;
- int i;
-
- /* when interrupts are not yet enabled, just reap any pending
- * mcc completions */
- if (!netif_running(adapter->netdev)) {
- local_bh_disable();
- be_process_mcc(adapter);
- local_bh_enable();
- goto reschedule;
- }
-
- if (!adapter->stats_cmd_sent) {
- if (lancer_chip(adapter))
- lancer_cmd_get_pport_stats(adapter,
- &adapter->stats_cmd);
- else
- be_cmd_get_stats(adapter, &adapter->stats_cmd);
- }
-
- if (be_physfn(adapter) &&
- MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
- be_cmd_get_die_temperature(adapter);
-
- for_all_rx_queues(adapter, rxo, i) {
- /* Replenish RX-queues starved due to memory
- * allocation failures.
- */
- if (rxo->rx_post_starved)
- be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
- }
-
- be_eqd_update(adapter);
-
- if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
- be_log_sfp_info(adapter);
-
-reschedule:
- adapter->work_counter++;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
-/* If any VFs are already enabled don't FLR the PF */
-static bool be_reset_required(struct be_adapter *adapter)
-{
- return pci_num_vf(adapter->pdev) ? false : true;
-}
-
static char *mc_name(struct be_adapter *adapter)
{
char *str = ""; /* default */
@@ -5291,50 +5480,17 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
if (!status)
dev_info(&pdev->dev, "PCIe error reporting enabled\n");
- status = be_ctrl_init(adapter);
+ status = be_map_pci_bars(adapter);
if (status)
goto free_netdev;
- /* sync up with fw's ready state */
- if (be_physfn(adapter)) {
- status = be_fw_wait_ready(adapter);
- if (status)
- goto ctrl_clean;
- }
-
- if (be_reset_required(adapter)) {
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
-
- /* Wait for interrupts to quiesce after an FLR */
- msleep(100);
- }
-
- /* Allow interrupts for other ULPs running on NIC function */
- be_intr_set(adapter, true);
-
- /* tell fw we're ready to fire cmds */
- status = be_cmd_fw_init(adapter);
- if (status)
- goto ctrl_clean;
-
- status = be_stats_init(adapter);
- if (status)
- goto ctrl_clean;
-
- status = be_get_initial_config(adapter);
+ status = be_drv_init(adapter);
if (status)
- goto stats_clean;
-
- INIT_DELAYED_WORK(&adapter->work, be_worker);
- INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
- adapter->rx_fc = true;
- adapter->tx_fc = true;
+ goto unmap_bars;
status = be_setup(adapter);
if (status)
- goto stats_clean;
+ goto drv_cleanup;
be_netdev_init(netdev);
status = register_netdev(netdev);
@@ -5343,8 +5499,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
be_roce_dev_add(adapter);
- schedule_delayed_work(&adapter->func_recovery_work,
- msecs_to_jiffies(1000));
+ be_schedule_err_detection(adapter);
dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
func_name(adapter), mc_name(adapter), adapter->port_name);
@@ -5353,10 +5508,10 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
unsetup:
be_clear(adapter);
-stats_clean:
- be_stats_cleanup(adapter);
-ctrl_clean:
- be_ctrl_cleanup(adapter);
+drv_cleanup:
+ be_drv_cleanup(adapter);
+unmap_bars:
+ be_unmap_pci_bars(adapter);
free_netdev:
free_netdev(netdev);
rel_reg:
@@ -5371,21 +5526,14 @@ do_none:
static int be_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
if (adapter->wol_en)
be_setup_wol(adapter, true);
be_intr_set(adapter, false);
- cancel_delayed_work_sync(&adapter->func_recovery_work);
+ be_cancel_err_detection(adapter);
- netif_device_detach(netdev);
- if (netif_running(netdev)) {
- rtnl_lock();
- be_close(netdev);
- rtnl_unlock();
- }
- be_clear(adapter);
+ be_cleanup(adapter);
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -5393,13 +5541,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int be_resume(struct pci_dev *pdev)
+static int be_pci_resume(struct pci_dev *pdev)
{
- int status = 0;
struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
-
- netif_device_detach(netdev);
+ int status = 0;
status = pci_enable_device(pdev);
if (status)
@@ -5408,30 +5553,11 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- status = be_fw_wait_ready(adapter);
- if (status)
- return status;
-
- status = be_cmd_reset_function(adapter);
- if (status)
- return status;
-
- be_intr_set(adapter, true);
- /* tell fw we're ready to fire cmds */
- status = be_cmd_fw_init(adapter);
+ status = be_resume(adapter);
if (status)
return status;
- be_setup(adapter);
- if (netif_running(netdev)) {
- rtnl_lock();
- be_open(netdev);
- rtnl_unlock();
- }
-
- schedule_delayed_work(&adapter->func_recovery_work,
- msecs_to_jiffies(1000));
- netif_device_attach(netdev);
+ be_schedule_err_detection(adapter);
if (adapter->wol_en)
be_setup_wol(adapter, false);
@@ -5451,7 +5577,7 @@ static void be_shutdown(struct pci_dev *pdev)
be_roce_dev_shutdown(adapter);
cancel_delayed_work_sync(&adapter->work);
- cancel_delayed_work_sync(&adapter->func_recovery_work);
+ be_cancel_err_detection(adapter);
netif_device_detach(adapter->netdev);
@@ -5464,22 +5590,15 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
dev_err(&adapter->pdev->dev, "EEH error detected\n");
if (!adapter->eeh_error) {
adapter->eeh_error = true;
- cancel_delayed_work_sync(&adapter->func_recovery_work);
-
- rtnl_lock();
- netif_device_detach(netdev);
- if (netif_running(netdev))
- be_close(netdev);
- rtnl_unlock();
+ be_cancel_err_detection(adapter);
- be_clear(adapter);
+ be_cleanup(adapter);
}
if (state == pci_channel_io_perm_failure)
@@ -5530,43 +5649,73 @@ static void be_eeh_resume(struct pci_dev *pdev)
{
int status = 0;
struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
dev_info(&adapter->pdev->dev, "EEH resume\n");
pci_save_state(pdev);
- status = be_cmd_reset_function(adapter);
+ status = be_resume(adapter);
if (status)
goto err;
- /* On some BE3 FW versions, after a HW reset,
- * interrupts will remain disabled for each function.
- * So, explicitly enable interrupts
+ be_schedule_err_detection(adapter);
+ return;
+err:
+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+}
+
+static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ u16 num_vf_qs;
+ int status;
+
+ if (!num_vfs)
+ be_vf_clear(adapter);
+
+ adapter->num_vfs = num_vfs;
+
+ if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "Cannot disable VFs while they are assigned\n");
+ return -EBUSY;
+ }
+
+ /* When the HW is in SRIOV capable configuration, the PF-pool resources
+ * are equally distributed across the max-number of VFs. The user may
+ * request only a subset of the max-vfs to be enabled.
+ * Based on num_vfs, redistribute the resources across num_vfs so that
+ * each VF will have access to more number of resources.
+ * This facility is not available in BE3 FW.
+ * Also, this is done by FW in Lancer chip.
*/
- be_intr_set(adapter, true);
+ if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
+ num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
+ status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
+ adapter->num_vfs, num_vf_qs);
+ if (status)
+ dev_err(&pdev->dev,
+ "Failed to optimize SR-IOV resources\n");
+ }
- /* tell fw we're ready to fire cmds */
- status = be_cmd_fw_init(adapter);
+ status = be_get_resources(adapter);
if (status)
- goto err;
+ return be_cmd_status(status);
- status = be_setup(adapter);
+ /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
+ rtnl_lock();
+ status = be_update_queues(adapter);
+ rtnl_unlock();
if (status)
- goto err;
+ return be_cmd_status(status);
- if (netif_running(netdev)) {
- status = be_open(netdev);
- if (status)
- goto err;
- }
+ if (adapter->num_vfs)
+ status = be_vf_setup(adapter);
- schedule_delayed_work(&adapter->func_recovery_work,
- msecs_to_jiffies(1000));
- netif_device_attach(netdev);
- return;
-err:
- dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+ if (!status)
+ return adapter->num_vfs;
+
+ return 0;
}
static const struct pci_error_handlers be_eeh_handlers = {
@@ -5581,8 +5730,9 @@ static struct pci_driver be_driver = {
.probe = be_probe,
.remove = be_remove,
.suspend = be_suspend,
- .resume = be_resume,
+ .resume = be_pci_resume,
.shutdown = be_shutdown,
+ .sriov_configure = be_pci_sriov_configure,
.err_handler = &be_eeh_handlers
};
@@ -5596,6 +5746,11 @@ static int __init be_init_module(void)
rx_frag_size = 2048;
}
+ if (num_vfs > 0) {
+ pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
+ pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
+ }
+
return pci_register_driver(&be_driver);
}
module_init(be_init_module);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index f88cfaa359e7..442410cd2ca4 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1299,7 +1299,7 @@ static int ethoc_resume(struct platform_device *pdev)
# define ethoc_resume NULL
#endif
-static struct of_device_id ethoc_match[] = {
+static const struct of_device_id ethoc_match[] = {
{ .compatible = "opencores,ethoc", },
{},
};
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index ba84c4a9ce32..25e3425729d0 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -58,14 +58,12 @@ source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
config FSL_PQ_MDIO
tristate "Freescale PQ MDIO"
- depends on FSL_SOC
select PHYLIB
---help---
This driver supports the MDIO bus used by the gianfar and UCC drivers.
config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
- depends on FSL_SOC
select PHYLIB
select OF_MDIO
---help---
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f6a3a7abd468..66d47e448e4d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev)
rcntl |= 0x40000000 | 0x00000020;
/* RGMII, RMII or MII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
rcntl |= (1 << 6);
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= (1 << 8);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index f495796248db..afe7f39cdd7c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -1057,7 +1057,7 @@ static int mpc52xx_fec_of_resume(struct platform_device *op)
}
#endif
-static struct of_device_id mpc52xx_fec_match[] = {
+static const struct of_device_id mpc52xx_fec_match[] = {
{ .compatible = "fsl,mpc5200b-fec", },
{ .compatible = "fsl,mpc5200-fec", },
{ .compatible = "mpc5200-fec", },
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index e0528900db02..1e647beaf989 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -134,7 +134,7 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of)
return 0;
}
-static struct of_device_id mpc52xx_fec_mdio_match[] = {
+static const struct of_device_id mpc52xx_fec_mdio_match[] = {
{ .compatible = "fsl,mpc5200b-mdio", },
{ .compatible = "fsl,mpc5200-mdio", },
{ .compatible = "mpc5200b-fec-phy", },
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1f9cf2345266..a583d89b13c4 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -136,7 +136,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
*/
writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
- /* It is recommended to doulbe check the TMODE field in the
+ /* It is recommended to double check the TMODE field in the
* TCSR register to be cleared before the first compare counter
* is written into TCCR register. Just add a double check.
*/
@@ -390,20 +390,18 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
*/
-static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct fec_enet_private *adapter =
container_of(ptp, struct fec_enet_private, ptp_caps);
u64 ns;
- u32 remainder;
unsigned long flags;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_read(&adapter->tc);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
@@ -417,7 +415,7 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
* wall timer value.
*/
static int fec_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
@@ -433,8 +431,7 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
return -EINVAL;
}
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
+ ns = timespec64_to_ns(ts);
/* Get the timer value based on timestamp.
* Update the counter with the masked value.
*/
@@ -584,8 +581,8 @@ void fec_ptp_init(struct platform_device *pdev)
fep->ptp_caps.pps = 1;
fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
fep->ptp_caps.adjtime = fec_ptp_adjtime;
- fep->ptp_caps.gettime = fec_ptp_gettime;
- fep->ptp_caps.settime = fec_ptp_settime;
+ fep->ptp_caps.gettime64 = fec_ptp_gettime;
+ fep->ptp_caps.settime64 = fec_ptp_settime;
fep->ptp_caps.enable = fec_ptp_enable;
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index a17628769a1f..9b3639eae676 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -916,7 +916,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
#endif
};
-static struct of_device_id fs_enet_match[];
+static const struct of_device_id fs_enet_match[];
static int fs_enet_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
@@ -1082,7 +1082,7 @@ static int fs_enet_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id fs_enet_match[] = {
+static const struct of_device_id fs_enet_match[] = {
#ifdef CONFIG_FS_ENET_HAS_SCC
{
.compatible = "fsl,cpm1-scc-enet",
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 1d5617d2d8bd..68a428de0bc0 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -213,7 +213,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id fs_enet_mdio_bb_match[] = {
+static const struct of_device_id fs_enet_mdio_bb_match[] = {
{
.compatible = "fsl,cpm2-mdio-bitbang",
},
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 1648e3582500..2be383e6d258 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -95,7 +95,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
}
-static struct of_device_id fs_enet_mdio_fec_match[];
+static const struct of_device_id fs_enet_mdio_fec_match[];
static int fs_enet_mdio_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
@@ -208,7 +208,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id fs_enet_mdio_fec_match[] = {
+static const struct of_device_id fs_enet_mdio_fec_match[] = {
{
.compatible = "fsl,pq1-fec-mdio",
},
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index d1a91e344e6b..3c40f6b99224 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -294,7 +294,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
#endif
-static struct of_device_id fsl_pq_mdio_match[] = {
+static const struct of_device_id fsl_pq_mdio_match[] = {
#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
{
.compatible = "fsl,gianfar-tbi",
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7bf3682cdf47..4ee080d49bc0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -158,7 +158,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
{
u32 lstatus;
- bdp->bufPtr = buf;
+ bdp->bufPtr = cpu_to_be32(buf);
lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
@@ -166,7 +166,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
gfar_wmb();
- bdp->lstatus = lstatus;
+ bdp->lstatus = cpu_to_be32(lstatus);
}
static int gfar_init_bds(struct net_device *ndev)
@@ -200,7 +200,8 @@ static int gfar_init_bds(struct net_device *ndev)
/* Set the last descriptor in the ring to indicate wrap */
txbdp--;
- txbdp->status |= TXBD_WRAP;
+ txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
+ TXBD_WRAP);
}
rfbptr = &regs->rfbptr0;
@@ -214,7 +215,7 @@ static int gfar_init_bds(struct net_device *ndev)
struct sk_buff *skb = rx_queue->rx_skbuff[j];
if (skb) {
- bufaddr = rxbdp->bufPtr;
+ bufaddr = be32_to_cpu(rxbdp->bufPtr);
} else {
skb = gfar_new_skb(ndev, &bufaddr);
if (!skb) {
@@ -696,19 +697,28 @@ static int gfar_parse_group(struct device_node *np,
grp->priv = priv;
spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
- u32 *rxq_mask, *txq_mask;
- rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
- txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+ u32 rxq_mask, txq_mask;
+ int ret;
+
+ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+
+ ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
+ if (!ret) {
+ grp->rx_bit_map = rxq_mask ?
+ rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ }
+
+ ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
+ if (!ret) {
+ grp->tx_bit_map = txq_mask ?
+ txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ }
if (priv->poll_mode == GFAR_SQ_POLLING) {
/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
- } else { /* GFAR_MQ_POLLING */
- grp->rx_bit_map = rxq_mask ?
- *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
- grp->tx_bit_map = txq_mask ?
- *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
}
} else {
grp->rx_bit_map = 0xFF;
@@ -769,11 +779,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
struct gfar_private *priv = NULL;
struct device_node *np = ofdev->dev.of_node;
struct device_node *child = NULL;
- const u32 *stash;
- const u32 *stash_len;
- const u32 *stash_idx;
+ struct property *stash;
+ u32 stash_len = 0;
+ u32 stash_idx = 0;
unsigned int num_tx_qs, num_rx_qs;
- u32 *tx_queues, *rx_queues;
unsigned short mode, poll_mode;
if (!np)
@@ -787,10 +796,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
poll_mode = GFAR_SQ_POLLING;
}
- /* parse the num of HW tx and rx queues */
- tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
- rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
-
if (mode == SQ_SG_MODE) {
num_tx_qs = 1;
num_rx_qs = 1;
@@ -809,8 +814,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_tx_qs = num_grps; /* one txq per int group */
num_rx_qs = num_grps; /* one rxq per int group */
} else { /* GFAR_MQ_POLLING */
- num_tx_qs = tx_queues ? *tx_queues : 1;
- num_rx_qs = rx_queues ? *rx_queues : 1;
+ u32 tx_queues, rx_queues;
+ int ret;
+
+ /* parse the num of HW tx and rx queues */
+ ret = of_property_read_u32(np, "fsl,num_tx_queues",
+ &tx_queues);
+ num_tx_qs = ret ? 1 : tx_queues;
+
+ ret = of_property_read_u32(np, "fsl,num_rx_queues",
+ &rx_queues);
+ num_rx_qs = ret ? 1 : rx_queues;
}
}
@@ -851,13 +865,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (err)
goto rx_alloc_failed;
+ err = of_property_read_string(np, "model", &model);
+ if (err) {
+ pr_err("Device model property missing, aborting\n");
+ goto rx_alloc_failed;
+ }
+
/* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
priv->rx_list.count = 0;
mutex_init(&priv->rx_queue_access);
- model = of_get_property(np, "model", NULL);
-
for (i = 0; i < MAXGROUPS; i++)
priv->gfargrp[i].regs = NULL;
@@ -877,22 +895,22 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
goto err_grp_init;
}
- stash = of_get_property(np, "bd-stash", NULL);
+ stash = of_find_property(np, "bd-stash", NULL);
if (stash) {
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
priv->bd_stash_en = 1;
}
- stash_len = of_get_property(np, "rx-stash-len", NULL);
+ err = of_property_read_u32(np, "rx-stash-len", &stash_len);
- if (stash_len)
- priv->rx_stash_size = *stash_len;
+ if (err == 0)
+ priv->rx_stash_size = stash_len;
- stash_idx = of_get_property(np, "rx-stash-idx", NULL);
+ err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
- if (stash_idx)
- priv->rx_stash_index = *stash_idx;
+ if (err == 0)
+ priv->rx_stash_index = stash_idx;
if (stash_len || stash_idx)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
@@ -919,15 +937,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
FSL_GIANFAR_DEV_HAS_TIMER;
- ctype = of_get_property(np, "phy-connection-type", NULL);
+ err = of_property_read_string(np, "phy-connection-type", &ctype);
/* We only care about rgmii-id. The rest are autodetected */
- if (ctype && !strcmp(ctype, "rgmii-id"))
+ if (err == 0 && !strcmp(ctype, "rgmii-id"))
priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
else
priv->interface = PHY_INTERFACE_MODE_MII;
- if (of_get_property(np, "fsl,magic-packet", NULL))
+ if (of_find_property(np, "fsl,magic-packet", NULL))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -1884,14 +1902,15 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
if (!tx_queue->tx_skbuff[i])
continue;
- dma_unmap_single(priv->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
+ be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
j++) {
txbdp++;
- dma_unmap_page(priv->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
+ be16_to_cpu(txbdp->length),
+ DMA_TO_DEVICE);
}
txbdp++;
dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1911,7 +1930,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) {
- dma_unmap_single(priv->dev, rxbdp->bufPtr,
+ dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
priv->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
@@ -2167,16 +2186,16 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
*/
if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
flags |= TXFCB_UDP;
- fcb->phcs = udp_hdr(skb)->check;
+ fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
} else
- fcb->phcs = tcp_hdr(skb)->check;
+ fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
/* l3os is the distance between the start of the
* frame (skb->data) and the start of the IP hdr.
* l4os is the distance between the start of the
* l3 hdr and the l4 hdr
*/
- fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
+ fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
fcb->l4os = skb_network_header_len(skb);
fcb->flags = flags;
@@ -2185,7 +2204,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
{
fcb->flags |= TXFCB_VLN;
- fcb->vlctl = skb_vlan_tag_get(skb);
+ fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
}
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
@@ -2298,7 +2317,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->stats.tx_packets++;
txbdp = txbdp_start = tx_queue->cur_tx;
- lstatus = txbdp->lstatus;
+ lstatus = be32_to_cpu(txbdp->lstatus);
/* Time stamp insertion requires one additional TxBD */
if (unlikely(do_tstamp))
@@ -2306,11 +2325,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_ring_size);
if (nr_frags == 0) {
- if (unlikely(do_tstamp))
- txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
- TXBD_INTERRUPT);
- else
+ if (unlikely(do_tstamp)) {
+ u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
+
+ lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
+ } else {
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ }
} else {
/* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) {
@@ -2320,7 +2342,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
frag_len = skb_shinfo(skb)->frags[i].size;
- lstatus = txbdp->lstatus | frag_len |
+ lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
BD_LFLAG(TXBD_READY);
/* Handle the last BD specially */
@@ -2336,11 +2358,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err;
/* set the TxBD length and buffer pointer */
- txbdp->bufPtr = bufaddr;
- txbdp->lstatus = lstatus;
+ txbdp->bufPtr = cpu_to_be32(bufaddr);
+ txbdp->lstatus = cpu_to_be32(lstatus);
}
- lstatus = txbdp_start->lstatus;
+ lstatus = be32_to_cpu(txbdp_start->lstatus);
}
/* Add TxPAL between FCB and frame if required */
@@ -2388,7 +2410,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
goto dma_map_err;
- txbdp_start->bufPtr = bufaddr;
+ txbdp_start->bufPtr = cpu_to_be32(bufaddr);
/* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
@@ -2396,9 +2418,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the full frame length.
*/
if (unlikely(do_tstamp)) {
- txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
- txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - fcb_len);
+ u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
+
+ bufaddr = be32_to_cpu(txbdp_start->bufPtr);
+ bufaddr += fcb_len;
+ lstatus_ts |= BD_LFLAG(TXBD_READY) |
+ (skb_headlen(skb) - fcb_len);
+
+ txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
+ txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2421,7 +2449,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
gfar_wmb();
- txbdp_start->lstatus = lstatus;
+ txbdp_start->lstatus = cpu_to_be32(lstatus);
gfar_wmb(); /* force lstatus write before tx_skbuff */
@@ -2460,13 +2488,14 @@ dma_map_err:
if (do_tstamp)
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
for (i = 0; i < nr_frags; i++) {
- lstatus = txbdp->lstatus;
+ lstatus = be32_to_cpu(txbdp->lstatus);
if (!(lstatus & BD_LFLAG(TXBD_READY)))
break;
- txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
- bufaddr = txbdp->bufPtr;
- dma_unmap_page(priv->dev, bufaddr, txbdp->length,
+ lstatus &= ~BD_LFLAG(TXBD_READY);
+ txbdp->lstatus = cpu_to_be32(lstatus);
+ bufaddr = be32_to_cpu(txbdp->bufPtr);
+ dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
DMA_TO_DEVICE);
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
}
@@ -2607,7 +2636,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
- lstatus = lbdp->lstatus;
+ lstatus = be32_to_cpu(lbdp->lstatus);
/* Only clean completed frames */
if ((lstatus & BD_LFLAG(TXBD_READY)) &&
@@ -2616,11 +2645,12 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
next = next_txbd(bdp, base, tx_ring_size);
- buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+ buflen = be16_to_cpu(next->length) +
+ GMAC_FCB_LEN + GMAC_TXPAL_LEN;
} else
- buflen = bdp->length;
+ buflen = be16_to_cpu(bdp->length);
- dma_unmap_single(priv->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2631,17 +2661,18 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
skb_tstamp_tx(skb, &shhwtstamps);
- bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ gfar_clear_txbd_status(bdp);
bdp = next;
}
- bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ gfar_clear_txbd_status(bdp);
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(priv->dev, bdp->bufPtr,
- bdp->length, DMA_TO_DEVICE);
- bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
+ be16_to_cpu(bdp->length),
+ DMA_TO_DEVICE);
+ gfar_clear_txbd_status(bdp);
bdp = next_txbd(bdp, base, tx_ring_size);
}
@@ -2798,13 +2829,13 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
* were verified, then we tell the kernel that no
* checksumming is necessary. Otherwise, it is [FIXME]
*/
- if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
+ if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
+ (RXFCB_CIP | RXFCB_CTU))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
}
-
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi)
@@ -2846,8 +2877,9 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
* RXFCB_VLN is pseudo randomly set.
*/
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- fcb->flags & RXFCB_VLN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
+ be16_to_cpu(fcb->flags) & RXFCB_VLN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(fcb->vlctl));
/* Send the packet up the stack */
napi_gro_receive(napi, skb);
@@ -2874,7 +2906,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
- while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
+ while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
struct sk_buff *newskb;
dma_addr_t bufaddr;
@@ -2885,21 +2917,22 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
- dma_unmap_single(priv->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
priv->rx_buffer_size, DMA_FROM_DEVICE);
- if (unlikely(!(bdp->status & RXBD_ERR) &&
- bdp->length > priv->rx_buffer_size))
- bdp->status = RXBD_LARGE;
+ if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
+ be16_to_cpu(bdp->length) > priv->rx_buffer_size))
+ bdp->status = cpu_to_be16(RXBD_LARGE);
/* We drop the frame if we failed to allocate a new buffer */
- if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
- bdp->status & RXBD_ERR)) {
- count_errors(bdp->status, dev);
+ if (unlikely(!newskb ||
+ !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
+ be16_to_cpu(bdp->status) & RXBD_ERR)) {
+ count_errors(be16_to_cpu(bdp->status), dev);
if (unlikely(!newskb)) {
newskb = skb;
- bufaddr = bdp->bufPtr;
+ bufaddr = be32_to_cpu(bdp->bufPtr);
} else if (skb)
dev_kfree_skb(skb);
} else {
@@ -2908,7 +2941,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
howmany++;
if (likely(skb)) {
- pkt_len = bdp->length - ETH_FCS_LEN;
+ pkt_len = be16_to_cpu(bdp->length) -
+ ETH_FCS_LEN;
/* Remove the FCS from the packet length */
skb_put(skb, pkt_len);
rx_queue->stats.rx_bytes += pkt_len;
@@ -3560,7 +3594,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
phy_print_status(phydev);
}
-static struct of_device_id gfar_match[] =
+static const struct of_device_id gfar_match[] =
{
{
.type = "network",
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 9e1802400c23..daa1d37de642 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -544,12 +544,12 @@ struct txbd8
{
union {
struct {
- u16 status; /* Status Fields */
- u16 length; /* Buffer length */
+ __be16 status; /* Status Fields */
+ __be16 length; /* Buffer length */
};
- u32 lstatus;
+ __be32 lstatus;
};
- u32 bufPtr; /* Buffer Pointer */
+ __be32 bufPtr; /* Buffer Pointer */
};
struct txfcb {
@@ -557,28 +557,28 @@ struct txfcb {
u8 ptp; /* Flag to enable tx timestamping */
u8 l4os; /* Level 4 Header Offset */
u8 l3os; /* Level 3 Header Offset */
- u16 phcs; /* Pseudo-header Checksum */
- u16 vlctl; /* VLAN control word */
+ __be16 phcs; /* Pseudo-header Checksum */
+ __be16 vlctl; /* VLAN control word */
};
struct rxbd8
{
union {
struct {
- u16 status; /* Status Fields */
- u16 length; /* Buffer Length */
+ __be16 status; /* Status Fields */
+ __be16 length; /* Buffer Length */
};
- u32 lstatus;
+ __be32 lstatus;
};
- u32 bufPtr; /* Buffer Pointer */
+ __be32 bufPtr; /* Buffer Pointer */
};
struct rxfcb {
- u16 flags;
+ __be16 flags;
u8 rq; /* Receive Queue index */
u8 pro; /* Layer 4 Protocol */
u16 reserved;
- u16 vlctl; /* VLAN control word */
+ __be16 vlctl; /* VLAN control word */
};
struct gianfar_skb_cb {
@@ -1287,6 +1287,14 @@ static inline void gfar_wmb(void)
#endif
}
+static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
+{
+ u32 lstatus = be32_to_cpu(bdp->lstatus);
+
+ lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp->lstatus = cpu_to_be32(lstatus);
+}
+
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 16826341a4c9..8e3cd77aa347 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -322,10 +322,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_gianfar_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
{
u64 ns;
- u32 remainder;
unsigned long flags;
struct etsects *etsects = container_of(ptp, struct etsects, caps);
@@ -335,20 +335,19 @@ static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
spin_unlock_irqrestore(&etsects->lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
+
return 0;
}
static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
struct etsects *etsects = container_of(ptp, struct etsects, caps);
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
+ ns = timespec64_to_ns(ts);
spin_lock_irqsave(&etsects->lock, flags);
@@ -418,8 +417,8 @@ static struct ptp_clock_info ptp_gianfar_caps = {
.pps = 1,
.adjfreq = ptp_gianfar_adjfreq,
.adjtime = ptp_gianfar_adjtime,
- .gettime = ptp_gianfar_gettime,
- .settime = ptp_gianfar_settime,
+ .gettime64 = ptp_gianfar_gettime,
+ .settime64 = ptp_gianfar_settime,
.enable = ptp_gianfar_enable,
};
@@ -440,7 +439,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
{
struct device_node *node = dev->dev.of_node;
struct etsects *etsects;
- struct timespec now;
+ struct timespec64 now;
int err = -ENOMEM;
u32 tmr_ctrl;
unsigned long flags;
@@ -495,7 +494,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
pr_err("ioremap ptp registers failed\n");
goto no_ioremap;
}
- getnstimeofday(&now);
+ getnstimeofday64(&now);
ptp_gianfar_settime(&etsects->caps, &now);
tmr_ctrl =
@@ -554,7 +553,7 @@ static int gianfar_ptp_remove(struct platform_device *dev)
return 0;
}
-static struct of_device_id match_table[] = {
+static const struct of_device_id match_table[] = {
{ .compatible = "fsl,etsec-ptp" },
{},
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 56b774d3a13d..4dd40e057f40 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3933,7 +3933,7 @@ static int ucc_geth_remove(struct platform_device* ofdev)
return 0;
}
-static struct of_device_id ucc_geth_match[] = {
+static const struct of_device_id ucc_geth_match[] = {
{
.type = "network",
.compatible = "ucc_geth",
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 3a83bc2c613c..7b8fe866f603 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -46,17 +46,43 @@ struct tgec_mdio_controller {
#define MDIO_DATA(x) (x & 0xffff)
#define MDIO_DATA_BSY BIT(31)
+struct mdio_fsl_priv {
+ struct tgec_mdio_controller __iomem *mdio_base;
+ bool is_little_endian;
+};
+
+static u32 xgmac_read32(void __iomem *regs,
+ bool is_little_endian)
+{
+ if (is_little_endian)
+ return ioread32(regs);
+ else
+ return ioread32be(regs);
+}
+
+static void xgmac_write32(u32 value,
+ void __iomem *regs,
+ bool is_little_endian)
+{
+ if (is_little_endian)
+ iowrite32(value, regs);
+ else
+ iowrite32be(value, regs);
+}
+
/*
* Wait until the MDIO bus is free
*/
static int xgmac_wait_until_free(struct device *dev,
- struct tgec_mdio_controller __iomem *regs)
+ struct tgec_mdio_controller __iomem *regs,
+ bool is_little_endian)
{
unsigned int timeout;
/* Wait till the bus is free */
timeout = TIMEOUT;
- while ((ioread32be(&regs->mdio_stat) & MDIO_STAT_BSY) && timeout) {
+ while ((xgmac_read32(&regs->mdio_stat, is_little_endian) &
+ MDIO_STAT_BSY) && timeout) {
cpu_relax();
timeout--;
}
@@ -73,13 +99,15 @@ static int xgmac_wait_until_free(struct device *dev,
* Wait till the MDIO read or write operation is complete
*/
static int xgmac_wait_until_done(struct device *dev,
- struct tgec_mdio_controller __iomem *regs)
+ struct tgec_mdio_controller __iomem *regs,
+ bool is_little_endian)
{
unsigned int timeout;
/* Wait till the MDIO write is complete */
timeout = TIMEOUT;
- while ((ioread32be(&regs->mdio_data) & MDIO_DATA_BSY) && timeout) {
+ while ((xgmac_read32(&regs->mdio_stat, is_little_endian) &
+ MDIO_STAT_BSY) && timeout) {
cpu_relax();
timeout--;
}
@@ -99,12 +127,14 @@ static int xgmac_wait_until_done(struct device *dev,
*/
static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
- struct tgec_mdio_controller __iomem *regs = bus->priv;
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
uint16_t dev_addr;
u32 mdio_ctl, mdio_stat;
int ret;
+ bool endian = priv->is_little_endian;
- mdio_stat = ioread32be(&regs->mdio_stat);
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
if (regnum & MII_ADDR_C45) {
/* Clause 45 (ie 10G) */
dev_addr = (regnum >> 16) & 0x1f;
@@ -115,29 +145,29 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
mdio_stat &= ~MDIO_STAT_ENC;
}
- iowrite32be(mdio_stat, &regs->mdio_stat);
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs);
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
if (ret)
return ret;
/* Set the port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- iowrite32be(mdio_ctl, &regs->mdio_ctl);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
/* Set the register address */
if (regnum & MII_ADDR_C45) {
- iowrite32be(regnum & 0xffff, &regs->mdio_addr);
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs);
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
if (ret)
return ret;
}
/* Write the value to the register */
- iowrite32be(MDIO_DATA(value), &regs->mdio_data);
+ xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
- ret = xgmac_wait_until_done(&bus->dev, regs);
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
if (ret)
return ret;
@@ -151,14 +181,16 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
*/
static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
- struct tgec_mdio_controller __iomem *regs = bus->priv;
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
uint16_t dev_addr;
uint32_t mdio_stat;
uint32_t mdio_ctl;
uint16_t value;
int ret;
+ bool endian = priv->is_little_endian;
- mdio_stat = ioread32be(&regs->mdio_stat);
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
if (regnum & MII_ADDR_C45) {
dev_addr = (regnum >> 16) & 0x1f;
mdio_stat |= MDIO_STAT_ENC;
@@ -167,41 +199,41 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_stat &= ~MDIO_STAT_ENC;
}
- iowrite32be(mdio_stat, &regs->mdio_stat);
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs);
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
if (ret)
return ret;
/* Set the Port and Device Addrs */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- iowrite32be(mdio_ctl, &regs->mdio_ctl);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
/* Set the register address */
if (regnum & MII_ADDR_C45) {
- iowrite32be(regnum & 0xffff, &regs->mdio_addr);
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs);
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
if (ret)
return ret;
}
/* Initiate the read */
- iowrite32be(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl);
+ xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
- ret = xgmac_wait_until_done(&bus->dev, regs);
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
if (ret)
return ret;
/* Return all Fs if nothing was there */
- if (ioread32be(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+ if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
dev_err(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = ioread32be(&regs->mdio_data) & 0xffff;
+ value = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
dev_dbg(&bus->dev, "read %04x\n", value);
return value;
@@ -212,6 +244,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mii_bus *bus;
struct resource res;
+ struct mdio_fsl_priv *priv;
int ret;
ret = of_address_to_resource(np, 0, &res);
@@ -220,7 +253,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return ret;
}
- bus = mdiobus_alloc();
+ bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv));
if (!bus)
return -ENOMEM;
@@ -231,12 +264,19 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
/* Set the PHY base address */
- bus->priv = of_iomap(np, 0);
- if (!bus->priv) {
+ priv = bus->priv;
+ priv->mdio_base = of_iomap(np, 0);
+ if (!priv->mdio_base) {
ret = -ENOMEM;
goto err_ioremap;
}
+ if (of_get_property(pdev->dev.of_node,
+ "little-endian", NULL))
+ priv->is_little_endian = true;
+ else
+ priv->is_little_endian = false;
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
@@ -248,7 +288,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return 0;
err_registration:
- iounmap(bus->priv);
+ iounmap(priv->mdio_base);
err_ioremap:
mdiobus_free(bus);
@@ -267,7 +307,7 @@ static int xgmac_mdio_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id xgmac_mdio_match[] = {
+static const struct of_device_id xgmac_mdio_match[] = {
{
.compatible = "fsl,fman-xmdio",
},
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index b72d238695d7..3b39fdddeb57 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -413,6 +413,15 @@ out:
return count;
}
+static void hip04_start_tx_timer(struct hip04_priv *priv)
+{
+ unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
+
+ /* allow timer to fire after half the time at the earliest */
+ hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
+ ns, HRTIMER_MODE_REL);
+}
+
static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct hip04_priv *priv = netdev_priv(ndev);
@@ -466,8 +475,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
} else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
/* cleanup not pending yet, start a new timer */
- hrtimer_start_expires(&priv->tx_coalesce_timer,
- HRTIMER_MODE_REL);
+ hip04_start_tx_timer(priv);
}
return NETDEV_TX_OK;
@@ -549,7 +557,7 @@ done:
/* clean up tx descriptors and start a new timer if necessary */
tx_remaining = hip04_tx_reclaim(ndev, false);
if (rx < budget && tx_remaining)
- hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL);
+ hip04_start_tx_timer(priv);
return rx;
}
@@ -809,7 +817,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
struct hip04_priv *priv;
struct resource *res;
unsigned int irq;
- ktime_t txtime;
int ret;
ndev = alloc_etherdev(sizeof(struct hip04_priv));
@@ -846,9 +853,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- /* allow timer to fire after half the time at the earliest */
- txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2);
- hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime);
priv->tx_coalesce_timer.function = tx_done;
priv->map = syscon_node_to_regmap(arg.np);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index c05e50759621..2a0dc127df3f 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -103,7 +103,7 @@ static int ehea_probe_adapter(struct platform_device *dev);
static int ehea_remove(struct platform_device *dev);
-static struct of_device_id ehea_module_device_table[] = {
+static const struct of_device_id ehea_module_device_table[] = {
{
.name = "lhea",
.compatible = "IBM,lhea",
@@ -116,7 +116,7 @@ static struct of_device_id ehea_module_device_table[] = {
};
MODULE_DEVICE_TABLE(of, ehea_module_device_table);
-static struct of_device_id ehea_device_table[] = {
+static const struct of_device_id ehea_device_table[] = {
{
.name = "lhea",
.compatible = "IBM,lhea",
@@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void)
{
int ret = 0;
- if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+ if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
return 0;
ret = ehea_create_busmap();
@@ -3381,12 +3381,14 @@ out3:
out2:
unregister_reboot_notifier(&ehea_reboot_nb);
out:
+ atomic_dec(&ehea_memory_hooks_registered);
return ret;
}
static void ehea_unregister_memory_hooks(void)
{
- if (atomic_read(&ehea_memory_hooks_registered))
+ /* Only remove the hooks if we've registered them */
+ if (atomic_read(&ehea_memory_hooks_registered) == 0)
return;
unregister_reboot_notifier(&ehea_reboot_nb);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 162762d1a12c..de7919322190 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -79,13 +79,6 @@ MODULE_AUTHOR
("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
MODULE_LICENSE("GPL");
-/*
- * PPC64 doesn't (yet) have a cacheable_memcpy
- */
-#ifdef CONFIG_PPC64
-#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
-#endif
-
/* minimum number of free TX descriptors required to wake up TX process */
#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
@@ -1673,7 +1666,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
dev_kfree_skb(dev->rx_sg_skb);
dev->rx_sg_skb = NULL;
} else {
- cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
+ memcpy(skb_tail_pointer(dev->rx_sg_skb),
dev->rx_skb[slot]->data, len);
skb_put(dev->rx_sg_skb, len);
emac_recycle_rx_skb(dev, slot, len);
@@ -1730,8 +1723,7 @@ static int emac_poll_rx(void *param, int budget)
goto oom;
skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
- cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
- len + 2);
+ memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
emac_recycle_rx_skb(dev, slot, len);
skb = copy_skb;
} else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
@@ -2981,7 +2973,7 @@ static int emac_remove(struct platform_device *ofdev)
}
/* XXX Features in here should be replaced by properties... */
-static struct of_device_id emac_match[] =
+static const struct of_device_id emac_match[] =
{
{
.type = "network",
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index dddaab11a4c7..fdb5cdb3cd15 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -753,7 +753,7 @@ static int mal_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id mal_platform_match[] =
+static const struct of_device_id mal_platform_match[] =
{
{
.compatible = "ibm,mcmal",
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 457088fc5b06..206ccbbae7bb 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -305,7 +305,7 @@ static int rgmii_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id rgmii_match[] =
+static const struct of_device_id rgmii_match[] =
{
{
.compatible = "ibm,rgmii",
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index cb18e7f917c6..32cb6c9007c5 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -148,7 +148,7 @@ static int tah_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id tah_match[] =
+static const struct of_device_id tah_match[] =
{
{
.compatible = "ibm,tah",
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 36409ccb75ea..8727b865ea02 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -295,7 +295,7 @@ static int zmii_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id zmii_match[] =
+static const struct of_device_id zmii_match[] =
{
{
.compatible = "ibm,zmii",
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index cd7675ac5bf9..18134766a114 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1238,7 +1238,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
- if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
+ if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
break;
if (i == IBMVETH_NUM_BUFF_POOLS)
@@ -1257,7 +1257,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
adapter->rx_buff_pool[i].active = 1;
- if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
+ if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
dev->mtu = new_mtu;
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index e9c3a87e5b11..1a450f4b6b12 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -414,7 +414,7 @@ enum cb_status {
/**
* cb_command - Command Block flags
- * @cb_tx_nc: 0: controler does CRC (normal), 1: CRC from skb memory
+ * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
*/
enum cb_command {
cb_nop = 0x0000,
@@ -899,7 +899,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
/* Order is important otherwise we'll be in a race with h/w:
* set S-bit in current first, then clear S-bit in previous. */
cb->command |= cpu_to_le16(cb_s);
- wmb();
+ dma_wmb();
cb->prev->command &= cpu_to_le16(~cb_s);
while (nic->cb_to_send != nic->cb_to_use) {
@@ -1843,7 +1843,7 @@ static int e100_tx_clean(struct nic *nic)
for (cb = nic->cb_to_clean;
cb->status & cpu_to_le16(cb_complete);
cb = nic->cb_to_clean = cb->next) {
- rmb(); /* read skb after status */
+ dma_rmb(); /* read skb after status */
netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
"cb[%d]->status = 0x%04X\n",
(int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
@@ -1993,7 +1993,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
"status=0x%04X\n", rfd_status);
- rmb(); /* read size after status bit */
+ dma_rmb(); /* read size after status bit */
/* If data isn't ready, nothing to indicate */
if (unlikely(!(rfd_status & cb_complete))) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 7f997d36948f..983eb4e6f7aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
+static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
+{
+}
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count);
@@ -516,6 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
+ netif_carrier_off(netdev);
/* disable receives in the hardware */
rctl = er32(RCTL);
@@ -544,7 +550,6 @@ void e1000_down(struct e1000_adapter *adapter)
adapter->link_speed = 0;
adapter->link_duplex = 0;
- netif_carrier_off(netdev);
e1000_reset(adapter);
e1000_clean_all_tx_rings(adapter);
@@ -1111,7 +1116,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (e1000_read_mac_addr(hw))
e_err(probe, "EEPROM Read Error\n");
}
- /* don't block initalization here due to bad MAC address */
+ /* don't block initialization here due to bad MAC address */
memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr))
@@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
msleep(1);
/* e1000_down has a dependency on max_frame_size */
hw->max_frame_size = max_frame;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ /* prevent buffers from being reallocated */
+ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
e1000_down(adapter);
+ }
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
@@ -3848,7 +3856,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
+ dma_rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
@@ -4146,7 +4154,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
@@ -4367,7 +4375,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
length = le16_to_cpu(rx_desc->length);
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index bb7ab3c321d6..0570c668ec3d 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -141,6 +141,7 @@
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
+#define E1000_RCTL_RDMTS_HEX 0x00010000
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 9416e5a7e0c8..0abc942c966e 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -40,6 +40,7 @@
#include <linux/ptp_classify.h>
#include <linux/mii.h>
#include <linux/mdio.h>
+#include <linux/pm_qos.h>
#include "hw.h"
struct e1000_info;
@@ -132,6 +133,7 @@ enum e1000_boards {
board_pchlan,
board_pch2lan,
board_pch_lpt,
+ board_pch_spt
};
struct e1000_ps_page {
@@ -342,6 +344,7 @@ struct e1000_adapter {
struct timecounter tc;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
+ struct pm_qos_request pm_qos_req;
u16 eee_advert;
};
@@ -501,6 +504,7 @@ extern const struct e1000_info e1000_ich10_info;
extern const struct e1000_info e1000_pch_info;
extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
+extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 865ce45f9ec3..11f486e4ff7b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -896,18 +896,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
mask |= (1 << 18);
break;
default:
break;
}
- if (mac->type == e1000_pch_lpt)
+ if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt))
wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
E1000_FWSM_WLOCK_MAC_SHIFT;
for (i = 0; i < mac->rar_entry_count; i++) {
- if (mac->type == e1000_pch_lpt) {
+ if ((mac->type == e1000_pch_lpt) ||
+ (mac->type == e1000_pch_spt)) {
/* Cannot test write-protected SHRAL[n] registers */
if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
continue;
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 72f5475c4b90..19e8c487db06 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -87,6 +87,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_I218_V2 0x15A1
#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */
#define E1000_REVISION_4 4
@@ -108,6 +112,7 @@ enum e1000_mac_type {
e1000_pchlan,
e1000_pch2lan,
e1000_pch_lpt,
+ e1000_pch_spt,
};
enum e1000_media_type {
@@ -153,6 +158,7 @@ enum e1000_bus_width {
e1000_bus_width_pcie_x1,
e1000_bus_width_pcie_x2,
e1000_bus_width_pcie_x4 = 4,
+ e1000_bus_width_pcie_x8 = 8,
e1000_bus_width_32,
e1000_bus_width_64,
e1000_bus_width_reserved
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 48b74a549155..9d81c0317433 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -123,6 +123,14 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
u16 *data);
static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
u8 size, u16 *data);
+static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data);
+static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 *data);
+static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 data);
+static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword);
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
@@ -229,7 +237,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (ret_val)
return false;
out:
- if (hw->mac.type == e1000_pch_lpt) {
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
/* Unforce SMBus mode in PHY */
e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
@@ -321,6 +330,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_pch_lpt:
+ case e1000_pch_spt:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -461,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -590,35 +601,54 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 gfpreg, sector_base_addr, sector_end_addr;
u16 i;
-
- /* Can't read flash registers if the register set isn't mapped. */
- if (!hw->flash_address) {
- e_dbg("ERROR: Flash registers not mapped\n");
- return -E1000_ERR_CONFIG;
- }
+ u32 nvm_size;
nvm->type = e1000_nvm_flash_sw;
- gfpreg = er32flash(ICH_FLASH_GFPREG);
+ if (hw->mac.type == e1000_pch_spt) {
+ /* in SPT, gfpreg doesn't exist. NVM size is taken from the
+ * STRAP register. This is because in SPT the GbE Flash region
+ * is no longer accessed through the flash registers. Instead,
+ * the mechanism has changed, and the Flash region access
+ * registers are now implemented in GbE memory space.
+ */
+ nvm->flash_base_addr = 0;
+ nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
+ * NVM_SIZE_MULTIPLIER;
+ nvm->flash_bank_size = nvm_size / 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ /* Set the base address for flash register access */
+ hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
+ } else {
+ /* Can't read flash registers if register set isn't mapped. */
+ if (!hw->flash_address) {
+ e_dbg("ERROR: Flash registers not mapped\n");
+ return -E1000_ERR_CONFIG;
+ }
- /* sector_X_addr is a "sector"-aligned address (4096 bytes)
- * Add 1 to sector_end_addr since this sector is included in
- * the overall size.
- */
- sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
- sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+ gfpreg = er32flash(ICH_FLASH_GFPREG);
- /* flash_base_addr is byte-aligned */
- nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes)
+ * Add 1 to sector_end_addr since this sector is included in
+ * the overall size.
+ */
+ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+ sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
- /* find total size of the NVM, then cut in half since the total
- * size represents two separate NVM banks.
- */
- nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
- << FLASH_SECTOR_ADDR_SHIFT);
- nvm->flash_bank_size /= 2;
- /* Adjust to word count */
- nvm->flash_bank_size /= sizeof(u16);
+ /* flash_base_addr is byte-aligned */
+ nvm->flash_base_addr = sector_base_addr
+ << FLASH_SECTOR_ADDR_SHIFT;
+
+ /* find total size of the NVM, then cut in half since the total
+ * size represents two separate NVM banks.
+ */
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
+ nvm->flash_bank_size /= 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ }
nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
@@ -682,6 +712,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_pch2lan;
/* fall-through */
case e1000_pch_lpt:
+ case e1000_pch_spt:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -699,7 +730,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
break;
}
- if (mac->type == e1000_pch_lpt) {
+ if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) {
mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface =
@@ -919,8 +950,9 @@ release:
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
- if (!link || ((status & E1000_STATUS_SPEED_100) &&
- (status & E1000_STATUS_FD)))
+ if ((hw->phy.revision > 5) || !link ||
+ ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
goto update_fextnvm6;
ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
@@ -1100,6 +1132,21 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
+ /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
+ * LPLU and disable Gig speed when entering ULP
+ */
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ phy_reg);
+ if (ret_val)
+ goto release;
+ }
+
/* Force SMBus mode in PHY */
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
if (ret_val)
@@ -1302,7 +1349,8 @@ out:
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val;
+ s32 ret_val, tipg_reg = 0;
+ u16 emi_addr, emi_val = 0;
bool link;
u16 phy_reg;
@@ -1333,48 +1381,55 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* the IPG and reduce Rx latency in the PHY.
*/
if (((hw->mac.type == e1000_pch2lan) ||
- (hw->mac.type == e1000_pch_lpt)) && link) {
+ (hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) && link) {
u32 reg;
reg = er32(STATUS);
+ tipg_reg = er32(TIPG);
+ tipg_reg &= ~E1000_TIPG_IPGT_MASK;
+
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
- u16 emi_addr;
+ tipg_reg |= 0xFF;
+ /* Reduce Rx latency in analog PHY */
+ emi_val = 0;
+ } else {
- reg = er32(TIPG);
- reg &= ~E1000_TIPG_IPGT_MASK;
- reg |= 0xFF;
- ew32(TIPG, reg);
+ /* Roll back the default values */
+ tipg_reg |= 0x08;
+ emi_val = 1;
+ }
- /* Reduce Rx latency in analog PHY */
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
+ ew32(TIPG, tipg_reg);
- if (hw->mac.type == e1000_pch2lan)
- emi_addr = I82579_RX_CONFIG;
- else
- emi_addr = I217_RX_CONFIG;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
- ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
+ if (hw->mac.type == e1000_pch2lan)
+ emi_addr = I82579_RX_CONFIG;
+ else
+ emi_addr = I217_RX_CONFIG;
+ ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
- hw->phy.ops.release(hw);
+ hw->phy.ops.release(hw);
- if (ret_val)
- return ret_val;
- }
+ if (ret_val)
+ return ret_val;
}
/* Work-around I218 hang issue */
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) ||
+ (hw->mac.type == e1000_pch_spt)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
return ret_val;
}
-
- if (hw->mac.type == e1000_pch_lpt) {
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
/* Set platform power management values for
* Latency Tolerance Reporting (LTR)
*/
@@ -1386,6 +1441,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
+ /* FEXTNVM6 K1-off workaround */
+ if (hw->mac.type == e1000_pch_spt) {
+ u32 pcieanacfg = er32(PCIEANACFG);
+ u32 fextnvm6 = er32(FEXTNVM6);
+
+ if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+ fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
+ else
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+
+ ew32(FEXTNVM6, fextnvm6);
+ }
+
if (!link)
return 0; /* No link detected */
@@ -1479,6 +1547,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -1929,6 +1998,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -2961,6 +3031,20 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
s32 ret_val;
switch (hw->mac.type) {
+ /* In SPT, read from the CTRL_EXT reg instead of
+ * accessing the sector valid bits from the nvm
+ */
+ case e1000_pch_spt:
+ *bank = er32(CTRL_EXT)
+ & E1000_CTRL_EXT_NVMVS;
+ if ((*bank == 0) || (*bank == 1)) {
+ e_dbg("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
+ } else {
+ *bank = *bank - 2;
+ return 0;
+ }
+ break;
case e1000_ich8lan:
case e1000_ich9lan:
eecd = er32(EECD);
@@ -3008,6 +3092,99 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
}
/**
+ * e1000_read_nvm_spt - NVM access for SPT
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words.
+ * @data: pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM
+ **/
+static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = 0;
+ u32 bank = 0;
+ u32 dword = 0;
+ u16 offset_to_read;
+ u16 i;
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = 0;
+
+ for (i = 0; i < words; i += 2) {
+ if (words - i == 1) {
+ if (dev_spec->shadow_ram[offset + i].modified) {
+ data[i] =
+ dev_spec->shadow_ram[offset + i].value;
+ } else {
+ offset_to_read = act_offset + i -
+ ((act_offset + i) % 2);
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ if ((act_offset + i) % 2 == 0)
+ data[i] = (u16)(dword & 0xFFFF);
+ else
+ data[i] = (u16)((dword >> 16) & 0xFFFF);
+ }
+ } else {
+ offset_to_read = act_offset + i;
+ if (!(dev_spec->shadow_ram[offset + i].modified) ||
+ !(dev_spec->shadow_ram[offset + i + 1].modified)) {
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ }
+ if (dev_spec->shadow_ram[offset + i].modified)
+ data[i] =
+ dev_spec->shadow_ram[offset + i].value;
+ else
+ data[i] = (u16)(dword & 0xFFFF);
+ if (dev_spec->shadow_ram[offset + i].modified)
+ data[i + 1] =
+ dev_spec->shadow_ram[offset + i + 1].value;
+ else
+ data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ e_dbg("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
* e1000_read_nvm_ich8lan - Read word(s) from the NVM
* @hw: pointer to the HW structure
* @offset: The offset (in bytes) of the word(s) to read.
@@ -3090,8 +3267,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Clear FCERR and DAEL in hw status by writing 1 */
hsfsts.hsf_status.flcerr = 1;
hsfsts.hsf_status.dael = 1;
-
- ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
+ else
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
/* Either we should have a hardware SPI cycle in progress
* bit to check against, in order to start a new cycle or
@@ -3107,7 +3286,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* Begin by setting Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
+ else
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
ret_val = 0;
} else {
s32 i;
@@ -3128,7 +3310,11 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* now set the Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
e_dbg("Flash controller busy, cannot get access\n");
}
@@ -3151,9 +3337,16 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
u32 i = 0;
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
- hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcgo = 1;
- ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
/* wait till FDONE bit is set to 1 */
do {
@@ -3170,6 +3363,23 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
}
/**
+ * e1000_read_flash_dword_ich8lan - Read dword from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash dword at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+ return e1000_read_flash_data32_ich8lan(hw, offset, data);
+}
+
+/**
* e1000_read_flash_word_ich8lan - Read word from flash
* @hw: pointer to the HW structure
* @offset: offset to data location
@@ -3201,7 +3411,14 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u16 word = 0;
- ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+ /* In SPT, only 32 bits access is supported,
+ * so this function should not be called.
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ else
+ ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+
if (ret_val)
return ret_val;
@@ -3287,6 +3504,82 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
}
/**
+ * e1000_read_flash_data32_ich8lan - Read dword from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the dword to read.
+ * @data: Pointer to the dword to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+
+static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
+ hw->mac.type != e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ break;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (!ret_val) {
+ *data = er32flash(ICH_FLASH_FDATA0);
+ break;
+ } else {
+ /* If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (!hsfsts.hsf_status.flcdone) {
+ e_dbg("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
* e1000_write_nvm_ich8lan - Write word(s) to the NVM
* @hw: pointer to the HW structure
* @offset: The offset (in bytes) of the word(s) to write.
@@ -3321,7 +3614,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ * e1000_update_nvm_checksum_spt - Update the checksum for NVM
* @hw: pointer to the HW structure
*
* The NVM checksum is updated by calling the generic update_nvm_checksum,
@@ -3331,13 +3624,13 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
* After a successful commit, the shadow ram is cleared and is ready for
* future writes.
**/
-static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
s32 ret_val;
- u16 data;
+ u32 dword = 0;
ret_val = e1000e_update_nvm_checksum_generic(hw);
if (ret_val)
@@ -3371,12 +3664,175 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
}
-
- for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
/* Determine whether to write the value stored
* in the other NVM bank or a modified value stored
* in the shadow RAM
*/
+ ret_val = e1000_read_flash_dword_ich8lan(hw,
+ i + old_bank_offset,
+ &dword);
+
+ if (dev_spec->shadow_ram[i].modified) {
+ dword &= 0xffff0000;
+ dword |= (dev_spec->shadow_ram[i].value & 0xffff);
+ }
+ if (dev_spec->shadow_ram[i + 1].modified) {
+ dword &= 0x0000ffff;
+ dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
+ << 16);
+ }
+ if (ret_val)
+ break;
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD - 1)
+ dword |= E1000_ICH_NVM_SIG_MASK << 16;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ usleep_range(100, 200);
+
+ /* Write the data to the new bank. Offset in words */
+ act_offset = i + new_bank_offset;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
+ dword);
+ if (ret_val)
+ break;
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
+ e_dbg("Flash commit failed.\n");
+ goto release;
+ }
+
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+
+ /*offset in words but we read dword */
+ --act_offset;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0xBFFFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
+ /* offset in words but we read dword */
+ act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0x00FFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ usleep_range(10000, 20000);
+ }
+
+out:
+ if (ret_val)
+ e_dbg("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u16 data = 0;
+
+ ret_val = e1000e_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
if (dev_spec->shadow_ram[i].modified) {
data = dev_spec->shadow_ram[i].value;
} else {
@@ -3498,6 +3954,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_pch_lpt:
+ case e1000_pch_spt:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
@@ -3583,9 +4040,13 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u8 count = 0;
- if (size < 1 || size > 2 || data > size * 0xff ||
- offset > ICH_FLASH_LINEAR_ADDR_MASK)
- return -E1000_ERR_NVM;
+ if (hw->mac.type == e1000_pch_spt) {
+ if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ } else {
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr);
@@ -3596,12 +4057,25 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val)
break;
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
- hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
- ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
@@ -3640,6 +4114,90 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
}
/**
+* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
+* @hw: pointer to the HW structure
+* @offset: The offset (in bytes) of the dwords to read.
+* @data: The 4 bytes to write to the NVM.
+*
+* Writes one/two/four bytes to the NVM using the flash access registers.
+**/
+static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val;
+ u8 count = 0;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ break;
+
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
+ >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ew32flash(ICH_FLASH_FDATA0, data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+
+ if (!ret_val)
+ break;
+
+ /* If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+ if (hsfsts.hsf_status.flcerr)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (!hsfsts.hsf_status.flcdone) {
+ e_dbg("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
* e1000_write_flash_byte_ich8lan - Write a single byte to NVM
* @hw: pointer to the HW structure
* @offset: The index of the byte to read.
@@ -3656,6 +4214,40 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
}
/**
+* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
+* @hw: pointer to the HW structure
+* @offset: The offset of the word to write.
+* @dword: The dword to write to the NVM.
+*
+* Writes a single dword to the NVM using the flash access registers.
+* Goes through a retry algorithm before giving up.
+**/
+static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword)
+{
+ s32 ret_val;
+ u16 program_retries;
+
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+
+ if (!ret_val)
+ return ret_val;
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
+ usleep_range(100, 200);
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+ if (!ret_val)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return 0;
+}
+
+/**
* e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
* @hw: pointer to the HW structure
* @offset: The offset of the byte to write.
@@ -3759,9 +4351,18 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
- hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval =
+ er32flash(ICH_FLASH_HSFSTS) >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
- ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
/* Write the last 24 bits of an index within the
* block into Flash Linear address field in Flash
@@ -4180,7 +4781,8 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
ew32(RFCTL, reg);
/* Enable ECC on Lynxpoint */
- if (hw->mac.type == e1000_pch_lpt) {
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
reg = er32(PBECCSTS);
reg |= E1000_PBECCSTS_ECC_ENABLE;
ew32(PBECCSTS, reg);
@@ -4583,7 +5185,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
(device_id == E1000_DEV_ID_PCH_I218_LM3) ||
- (device_id == E1000_DEV_ID_PCH_I218_V3)) {
+ (device_id == E1000_DEV_ID_PCH_I218_V3) ||
+ (hw->mac.type == e1000_pch_spt)) {
u32 fextnvm6 = er32(FEXTNVM6);
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
@@ -5058,6 +5661,17 @@ static const struct e1000_nvm_operations ich8_nvm_ops = {
.write = e1000_write_nvm_ich8lan,
};
+static const struct e1000_nvm_operations spt_nvm_ops = {
+ .acquire = e1000_acquire_nvm_ich8lan,
+ .release = e1000_release_nvm_ich8lan,
+ .read = e1000_read_nvm_spt,
+ .update = e1000_update_nvm_checksum_spt,
+ .reload = e1000e_reload_nvm_generic,
+ .valid_led_default = e1000_valid_led_default_ich8lan,
+ .validate = e1000_validate_nvm_checksum_ich8lan,
+ .write = e1000_write_nvm_ich8lan,
+};
+
const struct e1000_info e1000_ich8_info = {
.mac = e1000_ich8lan,
.flags = FLAG_HAS_WOL
@@ -5166,3 +5780,23 @@ const struct e1000_info e1000_pch_lpt_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &ich8_nvm_ops,
};
+
+const struct e1000_info e1000_pch_spt_info = {
+ .mac = e1000_pch_spt,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9018,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 8066a498eaac..770a573b9eea 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -95,9 +95,18 @@
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
+#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
+/* bit for disabling packet buffer read */
+#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+#define K1_ENTRY_LATENCY 0
+#define K1_MIN_TIME 1
+#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
+#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
+#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 1e8c40fd5c3d..c509a5c900f5 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -70,6 +70,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pchlan] = &e1000_pch_info,
[board_pch2lan] = &e1000_pch2_info,
[board_pch_lpt] = &e1000_pch_lpt_info,
+ [board_pch_spt] = &e1000_pch_spt_info,
};
struct e1000_reg_info {
@@ -946,7 +947,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
skb = buffer_info->skb;
buffer_info->skb = NULL;
@@ -1231,7 +1232,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
(count < tx_ring->count)) {
bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
+ dma_rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
@@ -1331,7 +1332,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
break;
(*work_done)++;
skb = buffer_info->skb;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
/* in the packet split case this is header only */
prefetch(skb->data - NET_IP_ALIGN);
@@ -1535,7 +1536,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
skb = buffer_info->skb;
buffer_info->skb = NULL;
@@ -1796,7 +1797,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* Reset on uncorrectable ECC error */
- if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt))) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -1876,7 +1878,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* Reset on uncorrectable ECC error */
- if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt))) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -2257,7 +2260,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
- } else if (hw->mac.type == e1000_pch_lpt) {
+ } else if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
@@ -3014,6 +3018,19 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TCTL, tctl);
hw->mac.ops.config_collision_dist(hw);
+
+ /* SPT Si errata workaround to avoid data corruption */
+ if (hw->mac.type == e1000_pch_spt) {
+ u32 reg_val;
+
+ reg_val = er32(IOSFPC);
+ reg_val |= E1000_RCTL_RDMTS_HEX;
+ ew32(IOSFPC, reg_val);
+
+ reg_val = er32(TARC(0));
+ reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ;
+ ew32(TARC(0), reg_val);
+ }
}
/**
@@ -3280,9 +3297,9 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RXDCTL(0), rxdctl | 0x3);
}
- pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
+ pm_qos_update_request(&adapter->pm_qos_req, lat);
} else {
- pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ pm_qos_update_request(&adapter->pm_qos_req,
PM_QOS_DEFAULT_VALUE);
}
@@ -3490,8 +3507,11 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
struct e1000_hw *hw = &adapter->hw;
u32 incvalue, incperiod, shift;
- /* Make sure clock is enabled on I217 before checking the frequency */
- if ((hw->mac.type == e1000_pch_lpt) &&
+ /* Make sure clock is enabled on I217/I218/I219 before checking
+ * the frequency
+ */
+ if (((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) &&
!(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
u32 fextnvm7 = er32(FEXTNVM7);
@@ -3505,10 +3525,13 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
case e1000_pch_lpt:
- /* On I217, the clock frequency is 25MHz or 96MHz as
- * indicated by the System Clock Frequency Indication
+ case e1000_pch_spt:
+ /* On I217, I218 and I219, the clock frequency is 25MHz
+ * or 96MHz as indicated by the System Clock Frequency
+ * Indication
*/
- if ((hw->mac.type != e1000_pch_lpt) ||
+ if (((hw->mac.type != e1000_pch_lpt) &&
+ (hw->mac.type != e1000_pch_spt)) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
/* Stable 96MHz frequency */
incperiod = INCPERIOD_96MHz;
@@ -3875,6 +3898,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
fc->refresh_time = 0x0400;
if (adapter->netdev->mtu <= ETH_DATA_LEN) {
@@ -4060,6 +4084,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
*/
set_bit(__E1000_DOWN, &adapter->state);
+ netif_carrier_off(netdev);
+
/* disable receives in the hardware */
rctl = er32(RCTL);
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
@@ -4084,8 +4110,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- netif_carrier_off(netdev);
-
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
spin_unlock(&adapter->stats64_lock);
@@ -4379,7 +4403,7 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
- pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
/* before we allocate an interrupt, we must be ready to handle it.
@@ -4490,7 +4514,7 @@ static int e1000_close(struct net_device *netdev)
!test_bit(__E1000_TESTING, &adapter->state))
e1000e_release_hw_control(adapter);
- pm_qos_remove_request(&adapter->netdev->pm_qos_req);
+ pm_qos_remove_request(&adapter->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
@@ -4759,7 +4783,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.mgpdc += er32(MGTPDC);
/* Correctable ECC Errors */
- if (hw->mac.type == e1000_pch_lpt) {
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -6144,7 +6169,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
- } else if (hw->mac.type == e1000_pch_lpt) {
+ } else if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
/* ULP does not support wake from unicast, multicast
* or broadcast.
@@ -6807,7 +6833,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
if ((adapter->flags & FLAG_HAS_FLASH) &&
- (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) &&
+ (hw->mac.type < e1000_pch_spt)) {
flash_start = pci_resource_start(pdev, 1);
flash_len = pci_resource_len(pdev, 1);
adapter->hw.flash_address = ioremap(flash_start, flash_len);
@@ -6847,7 +6874,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_hw_init;
if ((adapter->flags & FLAG_IS_ICH) &&
- (adapter->flags & FLAG_READ_ONLY_NVM))
+ (adapter->flags & FLAG_READ_ONLY_NVM) &&
+ (hw->mac.type < e1000_pch_spt))
e1000e_write_protect_nvm_ich8lan(&adapter->hw);
hw->mac.ops.get_bus_info(&adapter->hw);
@@ -7043,7 +7071,7 @@ err_hw_init:
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
- if (adapter->hw.flash_address)
+ if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt))
iounmap(adapter->hw.flash_address);
e1000e_reset_interrupt_capability(adapter);
err_flashmap:
@@ -7116,7 +7144,8 @@ static void e1000_remove(struct pci_dev *pdev)
kfree(adapter->rx_ring);
iounmap(adapter->hw.hw_addr);
- if (adapter->hw.flash_address)
+ if ((adapter->hw.flash_address) &&
+ (adapter->hw.mac.type < e1000_pch_spt))
iounmap(adapter->hw.flash_address);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
@@ -7213,6 +7242,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 978ef9c4a043..8d7b21dc7e19 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -106,20 +106,18 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
* Read the timecounter and return the correct value in ns after converting
* it into a struct timespec.
**/
-static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- u32 remainder;
u64 ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
ns = timecounter_read(&adapter->tc);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
- ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
@@ -133,14 +131,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
* wall timer value.
**/
static int e1000e_phc_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
u64 ns;
- ns = timespec_to_ns(ts);
+ ns = timespec64_to_ns(ts);
/* reset the timecounter */
spin_lock_irqsave(&adapter->systim_lock, flags);
@@ -171,11 +169,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
- struct timespec ts;
+ struct timespec64 ts;
- adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts);
+ adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
- e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ e_dbg("SYSTIM overflow check at %lld.%09lu\n",
+ (long long) ts.tv_sec, ts.tv_nsec);
schedule_delayed_work(&adapter->systim_overflow_work,
E1000_SYSTIM_OVERFLOW_PERIOD);
@@ -190,8 +189,8 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
- .gettime = e1000e_phc_gettime,
- .settime = e1000e_phc_settime,
+ .gettime64 = e1000e_phc_gettime,
+ .settime64 = e1000e_phc_settime,
.enable = e1000e_phc_enable,
};
@@ -221,7 +220,9 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch2lan:
case e1000_pch_lpt:
- if ((hw->mac.type != e1000_pch_lpt) ||
+ case e1000_pch_spt:
+ if (((hw->mac.type != e1000_pch_lpt) &&
+ (hw->mac.type != e1000_pch_spt)) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index ea235bbe50d3..85eefc4832ba 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -38,6 +38,7 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
@@ -67,6 +68,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_IOSFPC 0x00F28 /* TX corrupted data */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
@@ -121,6 +123,7 @@
(0x054E4 + ((_i - 16) * 8)))
#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 42eb4344a9dc..c8c8c5baefda 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -235,6 +235,9 @@ struct fm10k_vxlan_port {
__be16 port;
};
+/* one work queue for entire driver */
+extern struct workqueue_struct *fm10k_workqueue;
+
struct fm10k_intfc {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
@@ -266,7 +269,6 @@ struct fm10k_intfc {
u64 tx_csum_errors;
u64 alloc_failed;
u64 rx_csum_errors;
- u64 rx_errors;
u64 tx_bytes_nic;
u64 tx_packets_nic;
@@ -439,6 +441,7 @@ extern char fm10k_driver_name[];
extern const char fm10k_driver_version[];
int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
+__be16 fm10k_tx_encap_offload(struct sk_buff *skb);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
@@ -457,6 +460,9 @@ void fm10k_down(struct fm10k_intfc *interface);
void fm10k_update_stats(struct fm10k_intfc *interface);
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void fm10k_netpoll(struct net_device *netdev);
+#endif
/* Netdev */
struct net_device *fm10k_alloc_netdev(void);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index bf19dccd4288..6cfae6ac04ea 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -398,7 +398,7 @@ static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
/* Retrieve RX Owner Data */
id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
- /* Process RX Ring*/
+ /* Process RX Ring */
do {
rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
&q->rx_drops);
@@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
* Function invalidates the index values for the queues so any updates that
* may have happened are ignored and the base for the queue stats is reset.
**/
-
void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
{
u32 i;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 212a92dad222..5c7a4d7662d8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -128,7 +128,7 @@ static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
*
* Returns that we support only IEEE DCB for this interface
**/
-static u8 fm10k_dcbnl_getdcbx(struct net_device *dev)
+static u8 fm10k_dcbnl_getdcbx(struct net_device __always_unused *dev)
{
return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
}
@@ -140,7 +140,7 @@ static u8 fm10k_dcbnl_getdcbx(struct net_device *dev)
*
* Returns error on attempt to enable anything but IEEE DCB for this interface
**/
-static u8 fm10k_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+static u8 fm10k_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
{
return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 4327f86218b9..f45b4d71adb8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -36,14 +36,16 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos)
return (*pos < ring->count) ? pos : NULL;
}
-static void *fm10k_dbg_desc_seq_next(struct seq_file *s, void *v, loff_t *pos)
+static void *fm10k_dbg_desc_seq_next(struct seq_file *s,
+ void __always_unused *v, loff_t *pos)
{
struct fm10k_ring *ring = s->private;
return (++(*pos) < ring->count) ? pos : NULL;
}
-static void fm10k_dbg_desc_seq_stop(struct seq_file *s, void *v)
+static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s,
+ __always_unused void *v)
{
/* Do nothing. */
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 651f53bc7376..4b9d9f88af70 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -57,13 +57,12 @@ static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
.stat_offset = offsetof(struct fm10k_intfc, _stat) \
}
-static const struct fm10k_stats fm10k_gstrings_stats[] = {
+static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("tx_restart_queue", restart_queue),
FM10K_STAT("tx_busy", tx_busy),
FM10K_STAT("tx_csum_errors", tx_csum_errors),
FM10K_STAT("rx_alloc_failed", alloc_failed),
FM10K_STAT("rx_csum_errors", rx_csum_errors),
- FM10K_STAT("rx_errors", rx_errors),
FM10K_STAT("tx_packets_nic", tx_packets_nic),
FM10K_STAT("tx_bytes_nic", tx_bytes_nic),
@@ -73,38 +72,42 @@ static const struct fm10k_stats fm10k_gstrings_stats[] = {
FM10K_STAT("rx_overrun_pf", rx_overrun_pf),
FM10K_STAT("rx_overrun_vf", rx_overrun_vf),
- FM10K_STAT("timeout", stats.timeout.count),
- FM10K_STAT("ur", stats.ur.count),
- FM10K_STAT("ca", stats.ca.count),
- FM10K_STAT("um", stats.um.count),
- FM10K_STAT("xec", stats.xec.count),
- FM10K_STAT("vlan_drop", stats.vlan_drop.count),
- FM10K_STAT("loopback_drop", stats.loopback_drop.count),
- FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
-
FM10K_STAT("swapi_status", hw.swapi.status),
FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy),
- FM10K_STAT("mbx_tx_dropped", hw.mbx.tx_dropped),
+ FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped),
FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages),
FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords),
FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages),
FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords),
FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err),
+ FM10K_STAT("tx_hang_count", tx_timeout_count),
+
FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
};
-#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_stats)
+static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
+ FM10K_STAT("timeout", stats.timeout.count),
+ FM10K_STAT("ur", stats.ur.count),
+ FM10K_STAT("ca", stats.ca.count),
+ FM10K_STAT("um", stats.um.count),
+ FM10K_STAT("xec", stats.xec.count),
+ FM10K_STAT("vlan_drop", stats.vlan_drop.count),
+ FM10K_STAT("loopback_drop", stats.loopback_drop.count),
+ FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
+};
+
+#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
+#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
-#define FM10K_QUEUE_STATS_LEN \
- (MAX_QUEUES * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
+#define FM10K_QUEUE_STATS_LEN(_n) \
+ ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
-#define FM10K_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
- FM10K_NETDEV_STATS_LEN + \
- FM10K_QUEUE_STATS_LEN)
+#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
+ FM10K_NETDEV_STATS_LEN)
static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = {
"Mailbox test (on/offline)"
@@ -117,9 +120,9 @@ enum fm10k_self_test_types {
FM10K_TEST_MAX = FM10K_TEST_LEN
};
-static void fm10k_get_strings(struct net_device *dev, u32 stringset,
- u8 *data)
+static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
char *p = (char *)data;
int i;
@@ -135,12 +138,19 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset,
p += ETH_GSTRING_LEN;
}
for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_stats[i].stat_string,
+ memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < MAX_QUEUES; i++) {
+ if (interface->hw.mac.type != fm10k_mac_vf)
+ for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < interface->hw.mac.max_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
@@ -156,18 +166,28 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset,
static int fm10k_get_sset_count(struct net_device *dev, int sset)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
+ struct fm10k_hw *hw = &interface->hw;
+ int stats_len = FM10K_STATIC_STATS_LEN;
+
switch (sset) {
case ETH_SS_TEST:
return FM10K_TEST_LEN;
case ETH_SS_STATS:
- return FM10K_STATS_LEN;
+ stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues);
+
+ if (hw->mac.type != fm10k_mac_vf)
+ stats_len += FM10K_PF_STATS_LEN;
+
+ return stats_len;
default:
return -EOPNOTSUPP;
}
}
static void fm10k_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
{
const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -184,12 +204,21 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
}
for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- p = (char *)interface + fm10k_gstrings_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_stats[i].sizeof_stat ==
+ p = (char *)interface +
+ fm10k_gstrings_global_stats[i].stat_offset;
+ *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (i = 0; i < MAX_QUEUES; i++) {
+ if (interface->hw.mac.type != fm10k_mac_vf)
+ for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
+ p = (char *)interface +
+ fm10k_gstrings_pf_stats[i].stat_offset;
+ *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ for (i = 0; i < interface->hw.mac.max_queues; i++) {
struct fm10k_ring *ring;
u64 *queue_stat;
@@ -369,7 +398,7 @@ static void fm10k_get_drvinfo(struct net_device *dev,
strncpy(info->bus_info, pci_name(interface->pdev),
sizeof(info->bus_info) - 1);
- info->n_stats = FM10K_STATS_LEN;
+ info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS);
info->regdump_len = fm10k_get_regs_len(dev);
}
@@ -645,7 +674,7 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
}
static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 __always_unused *rule_locs)
{
struct fm10k_intfc *interface = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -851,7 +880,7 @@ static void fm10k_self_test(struct net_device *dev,
eth_test->flags |= ETH_TEST_FL_FAILED;
}
-static u32 fm10k_get_reta_size(struct net_device *netdev)
+static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
{
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
}
@@ -911,7 +940,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
return 0;
}
-static u32 fm10k_get_rssrk_size(struct net_device *netdev)
+static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
{
return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
}
@@ -1019,7 +1048,7 @@ static int fm10k_set_channels(struct net_device *dev,
}
static int fm10k_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct ethtool_ts_info *info)
{
struct fm10k_intfc *interface = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 060190864238..5b08e6284a3c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -47,7 +47,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
struct fm10k_iov_data *iov_data;
- s64 mbicr, vflre;
+ s64 vflre;
int i;
/* if there is no iov_data then there is no mailboxes to process */
@@ -63,7 +63,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
goto read_unlock;
if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
- goto process_mbx;
+ goto read_unlock;
/* read VFLRE to determine if any VFs have been reset */
do {
@@ -86,32 +86,6 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
}
} while (i != iov_data->num_vfs);
-process_mbx:
- /* read MBICR to determine which VFs require attention */
- mbicr = fm10k_read_reg(hw, FM10K_MBICR(1));
- mbicr <<= 32;
- mbicr |= fm10k_read_reg(hw, FM10K_MBICR(0));
-
- i = iov_data->next_vf_mbx ? : iov_data->num_vfs;
-
- for (mbicr <<= 64 - i; i--; mbicr += mbicr) {
- struct fm10k_mbx_info *mbx = &iov_data->vf_info[i].mbx;
-
- if (mbicr >= 0)
- continue;
-
- if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
- break;
-
- mbx->ops.process(hw, mbx);
- }
-
- if (i >= 0) {
- iov_data->next_vf_mbx = i + 1;
- } else if (iov_data->next_vf_mbx) {
- iov_data->next_vf_mbx = 0;
- goto process_mbx;
- }
read_unlock:
rcu_read_unlock();
@@ -139,6 +113,13 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
/* lock the mailbox for transmit and receive */
fm10k_mbx_lock(interface);
+ /* Most VF messages sent to the PF cause the PF to respond by
+ * requesting from the SM mailbox. This means that too many VF
+ * messages processed at once could cause a mailbox timeout on the PF.
+ * To prevent this, store a pointer to the next VF mbx to process. Use
+ * that as the start of the loop so that we don't starve whichever VF
+ * got ignored on the previous run.
+ */
process_mbx:
for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
@@ -155,10 +136,6 @@ process_mbx:
mbx->ops.connect(hw, mbx);
}
- /* no work pending, then just continue */
- if (mbx->ops.tx_complete(mbx) && !mbx->ops.rx_ready(mbx))
- continue;
-
/* guarantee we have free space in the SM mailbox */
if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
break;
@@ -167,6 +144,10 @@ process_mbx:
mbx->ops.process(hw, mbx);
}
+ /* if we stopped processing mailboxes early, update next_vf_mbx.
+ * Otherwise, reset next_vf_mbx, and restart loop so that we process
+ * the remaining mailboxes we skipped at the start.
+ */
if (i >= 0) {
iov_data->next_vf_mbx = i + 1;
} else if (iov_data->next_vf_mbx) {
@@ -275,7 +256,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
if (vf_idx >= iov_data->num_vfs)
return FM10K_ERR_PARAM;
- /* determine if an update has occured and if so notify the VF */
+ /* determine if an update has occurred and if so notify the VF */
vf_info = &iov_data->vf_info[vf_idx];
if (vf_info->sw_vid != pvid) {
vf_info->sw_vid = pvid;
@@ -488,8 +469,8 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
return 0;
}
-int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int unused,
- int rate)
+int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
+ int __always_unused unused, int rate)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 84ab9eea2768..c754b2027281 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -28,7 +28,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.12.2-k"
+#define DRV_VERSION "0.15.2-k"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
static const char fm10k_driver_string[] =
@@ -41,6 +41,9 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+/* single workqueue for entire fm10k driver */
+struct workqueue_struct *fm10k_workqueue = NULL;
+
/**
* fm10k_init_module - Driver Registration Routine
*
@@ -52,6 +55,10 @@ static int __init fm10k_init_module(void)
pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
pr_info("%s\n", fm10k_copyright);
+ /* create driver workqueue */
+ if (!fm10k_workqueue)
+ fm10k_workqueue = create_workqueue("fm10k");
+
fm10k_dbg_init();
return fm10k_register_pci_driver();
@@ -69,6 +76,11 @@ static void __exit fm10k_exit_module(void)
fm10k_unregister_pci_driver();
fm10k_dbg_exit();
+
+ /* destroy driver workqueue */
+ flush_workqueue(fm10k_workqueue);
+ destroy_workqueue(fm10k_workqueue);
+ fm10k_workqueue = NULL;
}
module_exit(fm10k_exit_module);
@@ -209,7 +221,7 @@ static inline bool fm10k_page_is_reserved(struct page *page)
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
struct page *page,
- unsigned int truesize)
+ unsigned int __maybe_unused truesize)
{
/* avoid re-using remote pages */
if (unlikely(fm10k_page_is_reserved(page)))
@@ -240,7 +252,6 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
/**
* fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
* @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
@@ -253,8 +264,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the interface.
**/
-static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring,
- struct fm10k_rx_buffer *rx_buffer,
+static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
union fm10k_rx_desc *rx_desc,
struct sk_buff *skb)
{
@@ -330,7 +340,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (fm10k_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) {
/* hand second half of page back to the ring */
fm10k_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -412,7 +422,7 @@ static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring,
}
static void fm10k_type_trans(struct fm10k_ring *rx_ring,
- union fm10k_rx_desc *rx_desc,
+ union fm10k_rx_desc __maybe_unused *rx_desc,
struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
@@ -509,8 +519,6 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
/**
* fm10k_pull_tail - fm10k specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being adjusted
*
* This function is an fm10k specific version of __pskb_pull_tail. The
@@ -520,9 +528,7 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
* As a result we can do things like drop a frag and maintain an accurate
* truesize for the skb.
*/
-static void fm10k_pull_tail(struct fm10k_ring *rx_ring,
- union fm10k_rx_desc *rx_desc,
- struct sk_buff *skb)
+static void fm10k_pull_tail(struct sk_buff *skb)
{
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned char *va;
@@ -576,7 +582,7 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
/* place header in linear portion of buffer */
if (skb_is_nonlinear(skb))
- fm10k_pull_tail(rx_ring, rx_desc, skb);
+ fm10k_pull_tail(skb);
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
@@ -604,7 +610,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = fm10k_desc_unused(rx_ring);
- do {
+ while (likely(total_packets < budget)) {
union fm10k_rx_desc *rx_desc;
/* return some buffers to hardware, one at a time is too slow */
@@ -653,7 +659,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
/* update budget accounting */
total_packets++;
- } while (likely(total_packets < budget));
+ }
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
@@ -711,10 +717,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
return NULL;
- /* verify protocol is transparent Ethernet bridging */
- if (nvgre_hdr->proto != htons(ETH_P_TEB))
- return NULL;
-
/* report start of ethernet header */
if (nvgre_hdr->flags & NVGRE_TNI)
return (struct ethhdr *)(nvgre_hdr + 1);
@@ -722,15 +724,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
return (struct ethhdr *)(&nvgre_hdr->tni);
}
-static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
+__be16 fm10k_tx_encap_offload(struct sk_buff *skb)
{
+ u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
struct ethhdr *eth_hdr;
- u8 l4_hdr = 0;
-/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
-#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164
- if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
- FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB))
return 0;
switch (vlan_get_protocol(skb)) {
@@ -760,12 +760,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
switch (eth_hdr->h_proto) {
case htons(ETH_P_IP):
+ inner_l4_hdr = inner_ip_hdr(skb)->protocol;
+ break;
case htons(ETH_P_IPV6):
+ inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
break;
default:
return 0;
}
+ switch (inner_l4_hdr) {
+ case IPPROTO_TCP:
+ inner_l4_hlen = inner_tcp_hdrlen(skb);
+ break;
+ case IPPROTO_UDP:
+ inner_l4_hlen = 8;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The hardware allows tunnel offloads only if the combined inner and
+ * outer header is 184 bytes or less
+ */
+ if (skb_inner_transport_header(skb) + inner_l4_hlen -
+ skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
+ return 0;
+
return eth_hdr->h_proto;
}
@@ -934,10 +955,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
smp_mb();
- /* We need to check again in a case another CPU has just
- * made room available. */
+ /* Check again in a case another CPU has just made room available */
if (likely(fm10k_desc_unused(tx_ring) < size))
return -EBUSY;
@@ -1182,7 +1203,6 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
{
/* Do the reset outside of interrupt context */
if (!test_bit(__FM10K_DOWN, &interface->state)) {
- netdev_err(interface->netdev, "Reset interface\n");
interface->tx_timeout_count++;
interface->flags |= FM10K_FLAG_RESET_REQUESTED;
fm10k_service_event_schedule(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 9f5457c9e627..1b2738380518 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo)
* @fifo: pointer to FIFO
* @offset: offset to add to head
*
- * This function returns the indicies into the fifo based on head + offset
+ * This function returns the indices into the fifo based on head + offset
**/
static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
{
@@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
* @fifo: pointer to FIFO
* @offset: offset to add to tail
*
- * This function returns the indicies into the fifo based on tail + offset
+ * This function returns the indices into the fifo based on tail + offset
**/
static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
{
@@ -126,6 +126,18 @@ static u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo)
}
/**
+ * fm10k_fifo_drop_all - Drop all messages in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function resets the head pointer to drop all messages in the FIFO,
+ * and ensure the FIFO is empty.
+ **/
+static void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo)
+{
+ fifo->head = fifo->tail;
+}
+
+/**
* fm10k_mbx_index_len - Convert a head/tail index into a length value
* @mbx: pointer to mailbox
* @head: head index
@@ -315,7 +327,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
} while (total_len < len);
/* message extends out of pushed section, but fits in FIFO */
- if ((len < total_len) && (msg_len <= mbx->rx.size))
+ if ((len < total_len) && (msg_len <= mbx->max_size))
return 0;
/* return length of invalid section */
@@ -326,8 +338,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
* fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem
* @mbx: pointer to mailbox
*
- * This function will take a seciton of the Rx FIFO and copy it into the
- mbx->tail--;
+ * This function will take a section of the Tx FIFO and copy it into the
* mailbox memory. The offset in mbmem is based on the lower bits of the
* tail and len determines the length to copy.
**/
@@ -418,7 +429,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw,
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
- * This function will take a seciton of the mailbox memory and copy it
+ * This function will take a section of the mailbox memory and copy it
* into the Rx FIFO. The offset is based on the lower bits of the
* head and len determines the length to copy.
**/
@@ -464,7 +475,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw,
* @tail: tail index of message
*
* This function will first validate the tail index and size for the
- * incoming message. It then updates the acknowlegment number and
+ * incoming message. It then updates the acknowledgment number and
* copies the data into the FIFO. It will return the number of messages
* dequeued on success and a negative value on error.
**/
@@ -761,7 +772,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw,
err = fm10k_fifo_enqueue(&mbx->tx, msg);
}
- /* if we failed trhead the error */
+ /* if we failed treat the error */
if (err) {
mbx->timeout = 0;
mbx->tx_busy++;
@@ -815,10 +826,10 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
{
u32 mbmem = mbx->mbmem_reg;
- /* write new msg header to notify recepient of change */
+ /* write new msg header to notify recipient of change */
fm10k_write_reg(hw, mbmem, mbx->mbx_hdr);
- /* write mailbox to sent interrupt */
+ /* write mailbox to send interrupt */
if (mbx->mbx_lock)
fm10k_write_reg(hw, mbx->mbx_reg, mbx->mbx_lock);
@@ -1052,8 +1063,11 @@ static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx)
* @mbx: pointer to mailbox
* @size: new value for max_size
*
- * This function will update the max_size value and drop any outgoing messages
- * from the head of the Tx FIFO that are larger than max_size.
+ * This function updates the max_size value and drops any outgoing messages
+ * at the head of the Tx FIFO if they are larger than max_size. It does not
+ * drop all messages, as this is too difficult to parse and remove them from
+ * the FIFO. Instead, rely on the checking to ensure that messages larger
+ * than max_size aren't pushed into the memory buffer.
**/
static void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size)
{
@@ -1251,7 +1265,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
/* we will need to pull all of the fields for verification */
head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
- /* we only have lower 10 bits of error number os add upper bits */
+ /* we only have lower 10 bits of error number so add upper bits */
err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO);
err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO);
@@ -1371,9 +1385,11 @@ static void fm10k_mbx_disconnect(struct fm10k_hw *hw,
timeout -= FM10K_MBX_POLL_DELAY;
} while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED));
- /* in case we didn't close just force the mailbox into shutdown */
+ /* in case we didn't close, just force the mailbox into shutdown and
+ * drop all left over messages in the FIFO.
+ */
fm10k_mbx_connect_reset(mbx);
- fm10k_mbx_update_max_size(mbx, 0);
+ fm10k_fifo_drop_all(&mbx->tx);
fm10k_write_reg(hw, mbx->mbmem_reg, 0);
}
@@ -1548,7 +1564,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
mbx->timeout = 0;
mbx->udelay = FM10K_MBX_INIT_DELAY;
- /* initalize tail and head */
+ /* initialize tail and head */
mbx->tail = 1;
mbx->head = 1;
@@ -1627,7 +1643,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx)
mbx->local = FM10K_SM_MBX_VERSION;
mbx->remote = 0;
- /* initalize tail and head */
+ /* initialize tail and head */
mbx->tail = 1;
mbx->head = 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index cfde8bac1aeb..2f4f41b7eae7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -356,7 +356,7 @@ static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface)
* fm10k_request_glort_range - Request GLORTs for use in configuring rules
* @interface: board private structure
*
- * This function allocates a range of glorts for this inteface to use.
+ * This function allocates a range of glorts for this interface to use.
**/
static void fm10k_request_glort_range(struct fm10k_intfc *interface)
{
@@ -770,18 +770,18 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
if (hw->mac.vlan_override)
return -EACCES;
- /* if default VLAN is already present do nothing */
- if (vid == hw->mac.default_vid)
- return -EBUSY;
-
/* update active_vlans bitmask */
set_bit(vid, interface->active_vlans);
if (!set)
clear_bit(vid, interface->active_vlans);
+ /* if default VLAN is already present do nothing */
+ if (vid == hw->mac.default_vid)
+ return 0;
+
fm10k_mbx_lock(interface);
- /* only need to update the VLAN if not in promiscous mode */
+ /* only need to update the VLAN if not in promiscuous mode */
if (!(netdev->flags & IFF_PROMISC)) {
err = hw->mac.ops.update_vlan(hw, vid, 0, set);
if (err)
@@ -970,14 +970,7 @@ static void fm10k_set_rx_mode(struct net_device *dev)
fm10k_mbx_lock(interface);
- /* syncronize all of the addresses */
- if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
- __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
- if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
- __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
- }
-
- /* if we aren't changing modes there is nothing to do */
+ /* update xcast mode first, but only if it changed */
if (interface->xcast_mode != xcast_mode) {
/* update VLAN table */
if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
@@ -992,6 +985,13 @@ static void fm10k_set_rx_mode(struct net_device *dev)
interface->xcast_mode = xcast_mode;
}
+ /* synchronize all of the addresses */
+ if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
+ __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
+ if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
+ __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
+ }
+
fm10k_mbx_unlock(interface);
}
@@ -1051,16 +1051,16 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
vid, true, 0);
}
- /* syncronize all of the addresses */
+ /* update xcast mode before syncronizing addresses */
+ hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
+
+ /* synchronize all of the addresses */
if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
__dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
}
- /* update xcast mode */
- hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
-
fm10k_mbx_unlock(interface);
/* record updated xcast mode state */
@@ -1126,7 +1126,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
}
for (i = 0; i < interface->num_tx_queues; i++) {
- ring = ACCESS_ONCE(interface->rx_ring[i]);
+ ring = ACCESS_ONCE(interface->tx_ring[i]);
if (!ring)
continue;
@@ -1350,6 +1350,16 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
}
}
+static netdev_features_t fm10k_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!skb->encapsulation || fm10k_tx_encap_offload(skb))
+ return features;
+
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+}
+
static const struct net_device_ops fm10k_netdev_ops = {
.ndo_open = fm10k_open,
.ndo_stop = fm10k_close,
@@ -1372,6 +1382,10 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_do_ioctl = fm10k_ioctl,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fm10k_netpoll,
+#endif
+ .ndo_features_check = fm10k_features_check,
};
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 4f5892cc32d7..df9fda38bdd1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -94,7 +94,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface)
{
if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) &&
!test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state))
- schedule_work(&interface->service_task);
+ queue_work(fm10k_workqueue, &interface->service_task);
}
static void fm10k_service_event_complete(struct fm10k_intfc *interface)
@@ -191,7 +191,6 @@ static void fm10k_reset_subtask(struct fm10k_intfc *interface)
interface->flags &= ~FM10K_FLAG_RESET_REQUESTED;
netdev_err(interface->netdev, "Reset interface\n");
- interface->tx_timeout_count++;
fm10k_reinit(interface);
}
@@ -357,11 +356,10 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
net_stats->rx_packets = pkts;
interface->alloc_failed = alloc_failed;
interface->rx_csum_errors = rx_csum_errors;
- interface->rx_errors = rx_errors;
hw->mac.ops.update_hw_stats(hw, &interface->stats);
- for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
+ for (i = 0; i < hw->mac.max_queues; i++) {
struct fm10k_hw_stats_q *q = &interface->stats.q[i];
tx_bytes_nic += q->tx_bytes.count;
@@ -378,7 +376,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
interface->rx_drops_nic = rx_drops_nic;
/* Fill out the OS statistics structure */
- net_stats->rx_errors = interface->stats.xec.count;
+ net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
}
@@ -648,7 +646,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* Configure the Rx buffer size for one buff without split */
srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
- /* Configure the Rx ring to supress loopback packets */
+ /* Configure the Rx ring to suppress loopback packets */
srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
@@ -808,7 +806,7 @@ static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
}
}
-static irqreturn_t fm10k_msix_clean_rings(int irq, void *data)
+static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data)
{
struct fm10k_q_vector *q_vector = data;
@@ -818,7 +816,7 @@ static irqreturn_t fm10k_msix_clean_rings(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t fm10k_msix_mbx_vf(int irq, void *data)
+static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
{
struct fm10k_intfc *interface = data;
struct fm10k_hw *hw = &interface->hw;
@@ -840,6 +838,28 @@ static irqreturn_t fm10k_msix_mbx_vf(int irq, void *data)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * fm10k_netpoll - A Polling 'interrupt' handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts. It's not called while the normal interrupt routine is executing.
+ **/
+void fm10k_netpoll(struct net_device *netdev)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__FM10K_DOWN, &interface->state))
+ return;
+
+ for (i = 0; i < interface->num_q_vectors; i++)
+ fm10k_msix_clean_rings(0, interface->q_vector[i]);
+}
+
+#endif
#define FM10K_ERR_MSG(type) case (type): error = #type; break
static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
struct fm10k_fault *fault)
@@ -964,7 +984,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
}
}
-static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data)
+static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
{
struct fm10k_intfc *interface = data;
struct fm10k_hw *hw = &interface->hw;
@@ -986,6 +1006,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data)
/* service mailboxes */
if (fm10k_mbx_trylock(interface)) {
mbx->ops.process(hw, mbx);
+ /* handle VFLRE events */
fm10k_iov_event(interface);
fm10k_mbx_unlock(interface);
}
@@ -1002,6 +1023,8 @@ static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data)
/* we should validate host state after interrupt event */
hw->mac.get_host_state = 1;
+
+ /* validate host state, and handle VF mailboxes in the service task */
fm10k_service_event_schedule(interface);
/* re-enable mailbox interrupt and indicate 20us delay */
@@ -1069,7 +1092,7 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
}
static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
struct fm10k_intfc *interface;
u64 timestamp;
@@ -1089,7 +1112,7 @@ static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results,
/* generic error handler for mailbox issues */
static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
struct fm10k_intfc *interface;
struct pci_dev *pdev;
@@ -1165,7 +1188,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
}
static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
struct fm10k_intfc *interface;
u16 glort, pvid;
@@ -1206,7 +1229,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
}
static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
struct fm10k_swapi_1588_timestamp timestamp;
struct fm10k_iov_data *iov_data;
@@ -1488,7 +1511,7 @@ void fm10k_up(struct fm10k_intfc *interface)
/* enable transmits */
netif_tx_start_all_queues(interface->netdev);
- /* kick off the service timer */
+ /* kick off the service timer now */
hw->mac.get_host_state = 1;
mod_timer(&interface->service_timer, jiffies);
}
@@ -1528,8 +1551,6 @@ void fm10k_down(struct fm10k_intfc *interface)
/* disable polling routines */
fm10k_napi_disable_all(interface);
- del_timer_sync(&interface->service_timer);
-
/* capture stats one last time before stopping interface */
fm10k_update_stats(interface);
@@ -1655,6 +1676,9 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
(unsigned long)interface);
INIT_WORK(&interface->service_task, fm10k_service_task);
+ /* kick off service timer now, even when interface is down */
+ mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
+
/* Intitialize timestamp data */
fm10k_ts_init(interface);
@@ -1871,6 +1895,8 @@ static void fm10k_remove(struct pci_dev *pdev)
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct net_device *netdev = interface->netdev;
+ del_timer_sync(&interface->service_timer);
+
set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
cancel_work_sync(&interface->service_task);
@@ -1984,7 +2010,8 @@ static int fm10k_resume(struct pci_dev *pdev)
* a sleep state. The fm10k hardware does not support wake on lan so the
* driver simply needs to shut down the device so it is in a low power state.
**/
-static int fm10k_suspend(struct pci_dev *pdev, pm_message_t state)
+static int fm10k_suspend(struct pci_dev *pdev,
+ pm_message_t __always_unused state)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct net_device *netdev = interface->netdev;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 7e4711958e46..891e21874b2a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -234,8 +234,7 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
vid = (vid << 17) >> 17;
/* verify the reserved 0 fields are 0 */
- if (len >= FM10K_VLAN_TABLE_VID_MAX ||
- vid >= FM10K_VLAN_TABLE_VID_MAX)
+ if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
return FM10K_ERR_PARAM;
/* Loop through the table updating all required VLANs */
@@ -312,7 +311,7 @@ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
}
/**
- * fm10k_update_uc_addr_pf - Update device unicast addresss
+ * fm10k_update_xc_addr_pf - Update device addresses
* @hw: pointer to the HW structure
* @glort: base resource tag for this request
* @mac: MAC address to add/remove from table
@@ -330,6 +329,9 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
struct fm10k_mac_update mac_update;
u32 msg[5];
+ /* clear set bit from VLAN ID */
+ vid &= ~FM10K_VLAN_CLEAR;
+
/* if glort or vlan are not valid return error */
if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
return FM10K_ERR_PARAM;
@@ -356,7 +358,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
}
/**
- * fm10k_update_uc_addr_pf - Update device unicast addresss
+ * fm10k_update_uc_addr_pf - Update device unicast addresses
* @hw: pointer to the HW structure
* @glort: base resource tag for this request
* @mac: MAC address to add/remove from table
@@ -454,7 +456,7 @@ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
break;
}
- /* always reset VFITR2[0] to point to last enabled PF vector*/
+ /* always reset VFITR2[0] to point to last enabled PF vector */
fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
/* reset ITR2[0] to point to last enabled PF vector */
@@ -677,7 +679,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
/* loop through unallocated rings assigning them back to PF */
for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
- fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | vid);
+ fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
+ FM10K_TXQCTL_UNLIMITED_BW | vid);
fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
}
@@ -812,7 +815,7 @@ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
if (vf_idx >= hw->iov.num_vfs)
return FM10K_ERR_PARAM;
- /* determine vector offset and count*/
+ /* determine vector offset and count */
vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
@@ -951,7 +954,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
if (vf_info->mbx.ops.disconnect)
vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
- /* determine vector offset and count*/
+ /* determine vector offset and count */
vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
@@ -1035,7 +1038,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
((u32)vf_info->mac[2]);
}
- /* map queue pairs back to VF from last to first*/
+ /* map queue pairs back to VF from last to first */
for (i = queues_per_pool; i--;) {
fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
@@ -1141,7 +1144,7 @@ static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
*
* This function is a default handler for MSI-X requests from the VF. The
* assumption is that in this case it is acceptable to just directly
- * hand off the message form the VF to the underlying shared code.
+ * hand off the message from the VF to the underlying shared code.
**/
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
@@ -1160,7 +1163,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
*
* This function is a default handler for MAC/VLAN requests from the VF.
* The assumption is that in this case it is acceptable to just directly
- * hand off the message form the VF to the underlying shared code.
+ * hand off the message from the VF to the underlying shared code.
**/
s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
@@ -1250,8 +1253,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
}
/* notify switch of request for new multicast address */
- err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac,
- !(vlan & FM10K_VLAN_CLEAR), 0);
+ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan,
+ !(vlan & FM10K_VLAN_CLEAR));
}
return err;
@@ -1404,7 +1407,7 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
&stats->vlan_drop);
loopback_drop = fm10k_read_hw_stats_32b(hw,
FM10K_STATS_LOOPBACK_DROP,
- &stats->loopback_drop);
+ &stats->loopback_drop);
nodesc_drop = fm10k_read_hw_stats_32b(hw,
FM10K_STATS_NODESC_DROP,
&stats->nodesc_drop);
@@ -1573,7 +1576,7 @@ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
s32 ret_val = 0;
u32 dma_ctrl2;
- /* verify the switch is ready for interraction */
+ /* verify the switch is ready for interaction */
dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
goto out;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
index d966044e017a..9043633c3e50 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -285,7 +285,7 @@ static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct fm10k_intfc *interface;
unsigned long flags;
@@ -297,17 +297,17 @@ static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
now = fm10k_systime_read(interface) + interface->ptp_adjust;
read_unlock_irqrestore(&interface->systime_lock, flags);
- *ts = ns_to_timespec(now);
+ *ts = ns_to_timespec64(now);
return 0;
}
static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct fm10k_intfc *interface;
unsigned long flags;
- u64 ns = timespec_to_ns(ts);
+ u64 ns = timespec64_to_ns(ts);
interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
@@ -319,7 +319,8 @@ static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
}
static int fm10k_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+ struct ptp_clock_request *rq,
+ int __always_unused on)
{
struct ptp_clock_time *t = &rq->perout.period;
struct fm10k_intfc *interface;
@@ -419,8 +420,8 @@ void fm10k_ptp_register(struct fm10k_intfc *interface)
ptp_caps->max_adj = 976562;
ptp_caps->adjfreq = fm10k_ptp_adjfreq;
ptp_caps->adjtime = fm10k_ptp_adjtime;
- ptp_caps->gettime = fm10k_ptp_gettime;
- ptp_caps->settime = fm10k_ptp_settime;
+ ptp_caps->gettime64 = fm10k_ptp_gettime;
+ ptp_caps->settime64 = fm10k_ptp_settime;
/* provide pins if BAR4 is accessible */
if (interface->sw_addr) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index fd0a05f011a8..9b29d7b0377a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -710,7 +710,7 @@ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags)
/**
* fm10k_tlv_msg_test - Validate all results on test message receive
* @hw: Pointer to hardware structure
- * @results: Pointer array to attributes in the mesage
+ * @results: Pointer array to attributes in the message
* @mbx: Pointer to mailbox information structure
*
* This function does a check to verify all attributes match what the test
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 7c6d9d5a8ae5..4af96686c584 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -356,6 +356,9 @@ struct fm10k_hw;
#define FM10K_QUEUE_DISABLE_TIMEOUT 100
#define FM10K_RESET_TIMEOUT 150
+/* Maximum supported combined inner and outer header length for encapsulation */
+#define FM10K_TUNNEL_HEADER_LENGTH 184
+
/* VF registers */
#define FM10K_VFCTRL 0x00000
#define FM10K_VFCTRL_RST 0x00000008
@@ -593,7 +596,7 @@ struct fm10k_vf_info {
u16 sw_vid; /* Switch API assigned VLAN */
u16 pf_vid; /* PF assigned Default VLAN */
u8 mac[ETH_ALEN]; /* PF Default MAC address */
- u8 vsi; /* VSI idenfifier */
+ u8 vsi; /* VSI identifier */
u8 vf_idx; /* which VF this is */
u8 vf_flags; /* flags indicating what modes
* are supported for the port
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index f0aa0f97b4a9..94f0f6a146d9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
if (err)
return err;
- /* If permenant address is set then we need to restore it */
+ /* If permanent address is set then we need to restore it */
if (is_valid_ether_addr(perm_addr)) {
bal = (((u32)perm_addr[3]) << 24) |
(((u32)perm_addr[4]) << 16) |
@@ -65,7 +65,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
* fm10k_reset_hw_vf - VF hardware reset
* @hw: pointer to hardware structure
*
- * This function should return the hardare to a state similar to the
+ * This function should return the hardware to a state similar to the
* one it is in after just being initialized.
**/
static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
@@ -124,6 +124,10 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
/* record maximum queue count */
hw->mac.max_queues = i;
+ /* fetch default VLAN */
+ hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) &
+ FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
+
return 0;
}
@@ -252,7 +256,7 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
}
/**
- * fm10k_update_uc_addr_vf - Update device unicast address
+ * fm10k_update_uc_addr_vf - Update device unicast addresses
* @hw: pointer to the HW structure
* @glort: unused
* @mac: MAC address to add/remove from table
@@ -282,7 +286,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
return FM10K_ERR_PARAM;
- /* add bit to notify us if this is a set of clear operation */
+ /* add bit to notify us if this is a set or clear operation */
if (!add)
vid |= FM10K_VLAN_CLEAR;
@@ -295,7 +299,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
}
/**
- * fm10k_update_mc_addr_vf - Update device multicast address
+ * fm10k_update_mc_addr_vf - Update device multicast addresses
* @hw: pointer to the HW structure
* @glort: unused
* @mac: MAC address to add/remove from table
@@ -319,7 +323,7 @@ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
if (!is_multicast_ether_addr(mac))
return FM10K_ERR_PARAM;
- /* add bit to notify us if this is a set of clear operation */
+ /* add bit to notify us if this is a set or clear operation */
if (!add)
vid |= FM10K_VLAN_CLEAR;
@@ -515,7 +519,7 @@ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
* @hw: pointer to the hardware structure
*
* Function reads the content of 2 registers, combined to represent a 64 bit
- * value measured in nanosecods. In order to guarantee the value is accurate
+ * value measured in nanoseconds. In order to guarantee the value is accurate
* we check the 32 most significant bits both before and after reading the
* 32 least significant bits to verify they didn't change as we were reading
* the registers.
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index c40581999121..b4729ba57c9c 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 - 2014 Intel Corporation.
+# Copyright(c) 2013 - 2015 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2b65cdcad6ba..33c35d3b7420 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -36,6 +36,7 @@
#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/ioport.h>
+#include <linux/iommu.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/string.h>
@@ -49,6 +50,7 @@
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
@@ -70,6 +72,7 @@
#define I40E_MAX_NUM_DESCRIPTORS 4096
#define I40E_MAX_REGISTER 0x800000
+#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64
@@ -94,6 +97,9 @@
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
+/* Ethtool Private Flags */
+#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0)
+
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
@@ -140,6 +146,7 @@ enum i40e_state_t {
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_REQUESTED,
+ __I40E_EMP_RESET_INTR_RECEIVED,
__I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED,
__I40E_PTP_TX_IN_PROGRESS,
@@ -168,6 +175,9 @@ struct i40e_lump_tracking {
#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
#define I40E_FDIR_BUFFER_FULL_MARGIN 10
#define I40E_FDIR_BUFFER_HEAD_ROOM 32
+#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
+
+#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
@@ -232,17 +242,17 @@ struct i40e_pf {
bool fc_autoneg_status;
u16 eeprom_version;
- u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */
+ u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
- u16 num_req_vfs; /* num vfs requested for this vf */
- u16 num_vf_qps; /* num queue pairs per vf */
+ u16 num_req_vfs; /* num VFs requested for this VF */
+ u16 num_vf_qps; /* num queue pairs per VF */
#ifdef I40E_FCOE
- u16 num_fcoe_qps; /* num fcoe queues this pf has set up */
+ u16 num_fcoe_qps; /* num fcoe queues this PF has set up */
u16 num_fcoe_msix; /* num queue vectors per fcoe pool */
#endif /* I40E_FCOE */
- u16 num_lan_qps; /* num lan queues this pf has set up */
- u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ u16 num_lan_qps; /* num lan queues this PF has set up */
+ u16 num_lan_msix; /* num queue vectors for the base PF vsi */
int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
@@ -269,7 +279,7 @@ struct i40e_pf {
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
- u16 msg_enable;
+ u32 msg_enable;
char int_name[I40E_INT_NAME_STR_LEN];
u16 adminq_work_limit; /* num of admin receive queue desc to process */
unsigned long service_timer_period;
@@ -383,6 +393,9 @@ struct i40e_pf {
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size;
+ /* These are only valid in NPAR modes */
+ u32 npar_max_bw;
+ u32 npar_min_bw;
};
struct i40e_mac_filter {
@@ -405,6 +418,7 @@ struct i40e_veb {
u16 uplink_seid;
u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
+ u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
u16 flags;
u16 bw_limit;
u8 bw_max_quanta;
@@ -461,6 +475,9 @@ struct i40e_vsi {
u16 rx_itr_setting;
u16 tx_itr_setting;
+ u16 rss_table_size;
+ u16 rss_size;
+
u16 max_frame;
u16 rx_hdr_len;
u16 rx_buf_len;
@@ -478,6 +495,7 @@ struct i40e_vsi {
u16 base_queue; /* vsi's first queue in hw array */
u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+ u16 req_queue_pairs; /* User requested queue pairs */
u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
@@ -504,6 +522,9 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
+
+ /* current rxnfc data */
+ struct ethtool_rxnfc rxnfc; /* current rss hash opts */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -544,14 +565,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
static char buf[32];
snprintf(buf, sizeof(buf),
- "f%d.%d a%d.%d n%02x.%02x e%08x",
- hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ "f%d.%d.%05d a%d.%d n%x.%02x e%x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
I40E_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
I40E_NVM_VERSION_LO_SHIFT,
- hw->nvm.eetrack);
+ (hw->nvm.eetrack & 0xffffff));
return buf;
}
@@ -593,7 +614,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
/**
* i40e_get_fd_cnt_all - get the total FD filter space available
- * @pf: pointer to the pf struct
+ * @pf: pointer to the PF struct
**/
static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
{
@@ -607,6 +628,7 @@ extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
@@ -618,9 +640,10 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
-int i40e_get_current_fd_count(struct i40e_pf *pf);
-int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
-int i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_current_fd_count(struct i40e_pf *pf);
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
@@ -680,6 +703,7 @@ int i40e_vlan_rx_add_vid(struct net_device *netdev,
int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
+int i40e_open(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -690,7 +714,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE
-int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
int i40e_setup_tc(struct net_device *netdev, u8 tc);
void i40e_netpoll(struct net_device *netdev);
@@ -712,6 +735,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
struct i40e_dcbx_config *new_cfg);
void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
void i40e_dcbnl_setup(struct i40e_vsi *vsi);
@@ -727,4 +751,8 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
+i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 77f6254a89ac..3e0d20037675 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
ret_code = i40e_aq_get_firmware_version(hw,
&hw->aq.fw_maj_ver,
&hw->aq.fw_min_ver,
+ &hw->aq.fw_build,
&hw->aq.api_maj_ver,
&hw->aq.api_min_ver,
NULL);
@@ -605,7 +606,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_free_arq;
/* get the NVM version info */
- i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+ &hw->nvm.version);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index de17b6fbcc4e..28e519a50de4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -93,6 +93,7 @@ struct i40e_adminq_info {
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
bool nvm_release_on_done;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6aea65dae5ed..0bae22da014d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -51,6 +51,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_20G_KR2:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_VF:
@@ -85,46 +86,53 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u16 len = le16_to_cpu(aq_desc->datalen);
- u8 *aq_buffer = (u8 *)buffer;
- u32 data[4];
- u32 i = 0;
+ u8 *buf = (u8 *)buffer;
+ u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
- aq_desc->retval);
+ le16_to_cpu(aq_desc->opcode),
+ le16_to_cpu(aq_desc->flags),
+ le16_to_cpu(aq_desc->datalen),
+ le16_to_cpu(aq_desc->retval));
i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
- aq_desc->cookie_high, aq_desc->cookie_low);
+ le32_to_cpu(aq_desc->cookie_high),
+ le32_to_cpu(aq_desc->cookie_low));
i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- aq_desc->params.internal.param0,
- aq_desc->params.internal.param1);
+ le32_to_cpu(aq_desc->params.internal.param0),
+ le32_to_cpu(aq_desc->params.internal.param1));
i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- aq_desc->params.external.addr_high,
- aq_desc->params.external.addr_low);
+ le32_to_cpu(aq_desc->params.external.addr_high),
+ le32_to_cpu(aq_desc->params.external.addr_low));
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
- memset(data, 0, sizeof(data));
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
- for (i = 0; i < len; i++) {
- data[((i % 16) / 4)] |=
- ((u32)aq_buffer[i]) << (8 * (i % 4));
- if ((i % 16) == 15) {
- i40e_debug(hw, mask,
- "\t0x%04X %08X %08X %08X %08X\n",
- i - 15, data[0], data[1], data[2],
- data[3]);
- memset(data, 0, sizeof(data));
- }
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i + 1], buf[i + 2],
+ buf[i + 3], buf[i + 4], buf[i + 5],
+ buf[i + 6], buf[i + 7], buf[i + 8],
+ buf[i + 9], buf[i + 10], buf[i + 11],
+ buf[i + 12], buf[i + 13], buf[i + 14],
+ buf[i + 15]);
+ /* write whatever's left over without overrunning the buffer */
+ if (i < len) {
+ char d_buf[80];
+ int j = 0;
+
+ memset(d_buf, 0, sizeof(d_buf));
+ j += sprintf(d_buf, "\t0x%04X ", i);
+ while (i < len)
+ j += sprintf(&d_buf[j], " %02X", buf[i++]);
+ i40e_debug(hw, mask, "%s\n", d_buf);
}
- if ((i % 16) != 0)
- i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
- i - (i % 16), data[0], data[1], data[2],
- data[3]);
}
}
@@ -534,7 +542,6 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
I40E_PTT_UNUSED_ENTRY(255)
};
-
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
@@ -685,7 +692,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
/**
* i40e_pre_tx_queue_cfg - pre tx queue configure
* @hw: pointer to the HW structure
- * @queue: target pf queue index
+ * @queue: target PF queue index
* @enable: state change request
*
* Handles hw requirement to indicate intention to enable
@@ -827,12 +834,15 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_CR1:
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ case I40E_PHY_TYPE_10GBASE_AOC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -849,7 +859,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
}
#define I40E_PF_RESET_WAIT_COUNT_A0 200
-#define I40E_PF_RESET_WAIT_COUNT 110
+#define I40E_PF_RESET_WAIT_COUNT 200
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -947,7 +957,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
u32 val;
u32 eol = 0x7ff;
- /* get number of interrupts, queues, and vfs */
+ /* get number of interrupts, queues, and VFs */
val = rd32(hw, I40E_GLPCI_CNF2);
num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
@@ -1076,8 +1086,11 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
return gpio_val;
}
-#define I40E_LED0 22
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
#define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
/**
* i40e_led_get - return current on/off mode
@@ -1090,6 +1103,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
+ u32 current_mode = 0;
u32 mode = 0;
int i;
@@ -1102,6 +1116,20 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
@@ -1121,6 +1149,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
**/
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
+ u32 current_mode = 0;
int i;
if (mode & 0xfffffff0)
@@ -1135,6 +1164,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -1298,14 +1341,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
}
/* Update the link info */
- status = i40e_update_link_info(hw, true);
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status) {
/* Wait a little bit (on 40G cards it sometimes takes a really
* long time for link to come back from the atomic reset)
* and try once more
*/
msleep(1000);
- status = i40e_update_link_info(hw, true);
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
}
if (status)
*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -1441,6 +1484,10 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->lse_enable = false;
+ if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
/* save link status information */
if (link)
*link = *hw_link_info;
@@ -1453,35 +1500,6 @@ aq_get_link_info_exit:
}
/**
- * i40e_update_link_info
- * @hw: pointer to the hw struct
- * @enable_lse: enable/disable LinkStatusEvent reporting
- *
- * Returns the link status of the adapter
- **/
-i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse)
-{
- struct i40e_aq_get_phy_abilities_resp abilities;
- i40e_status status;
-
- status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
- if (status)
- return status;
-
- status = i40e_aq_get_phy_capabilities(hw, false, false,
- &abilities, NULL);
- if (status)
- return status;
-
- if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
- hw->phy.link_info.an_enabled = true;
- else
- hw->phy.link_info.an_enabled = false;
-
- return status;
-}
-
-/**
* i40e_aq_set_phy_int_mask
* @hw: pointer to the hw struct
* @mask: interrupt mask to be set
@@ -1760,6 +1778,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @fw_major_version: firmware major version
* @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
* @api_major_version: major queue version
* @api_minor_version: minor queue version
* @cmd_details: pointer to command details structure or NULL
@@ -1768,6 +1787,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
**/
i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -1781,13 +1801,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status) {
- if (fw_major_version != NULL)
+ if (fw_major_version)
*fw_major_version = le16_to_cpu(resp->fw_major);
- if (fw_minor_version != NULL)
+ if (fw_minor_version)
*fw_minor_version = le16_to_cpu(resp->fw_minor);
- if (api_major_version != NULL)
+ if (fw_build)
+ *fw_build = le32_to_cpu(resp->fw_build);
+ if (api_major_version)
*api_major_version = le16_to_cpu(resp->api_major);
- if (api_minor_version != NULL)
+ if (api_minor_version)
*api_minor_version = le16_to_cpu(resp->api_minor);
}
@@ -1817,7 +1839,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
cmd->driver_major_ver = dv->major_version;
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
@@ -1997,7 +2019,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
- buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+ buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
@@ -2039,7 +2061,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
- buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
@@ -2061,7 +2083,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
- * @vfid: vf id to send msg
+ * @vfid: VF id to send msg
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
@@ -2106,7 +2128,7 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
* Read the register using the admin queue commands
**/
i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
- u32 reg_addr, u64 *reg_val,
+ u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2117,17 +2139,15 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
if (reg_val == NULL)
return I40E_ERR_PARAM;
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_debug_read_reg);
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status) {
- *reg_val = ((u64)cmd_resp->value_high << 32) |
- (u64)cmd_resp->value_low;
- *reg_val = le64_to_cpu(*reg_val);
+ *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
+ (u64)le32_to_cpu(cmd_resp->value_low);
}
return status;
@@ -2377,6 +2397,7 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_LED 0x61
#define I40E_DEV_FUNC_CAP_SDP 0x62
#define I40E_DEV_FUNC_CAP_MDIO 0x63
+#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64
/**
* i40e_parse_discover_capabilities
@@ -2521,11 +2542,18 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
+ case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+ p->wr_csr_prot = (u64)number;
+ p->wr_csr_prot |= (u64)logical_id << 32;
+ break;
default:
break;
}
}
+ if (p->fcoe)
+ i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
/* Software override ensuring FCoE is disabled if npar or mfp
* mode because it is not supported in these modes.
*/
@@ -3377,6 +3405,47 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ i40e_status status;
+
+ if (!reg_val0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp->address0 = cpu_to_le32(reg_addr0);
+ cmd_resp->address1 = cpu_to_le32(reg_addr1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ if (!status) {
+ *reg_val0 = le32_to_cpu(cmd_resp->data0);
+
+ if (reg_val1)
+ *reg_val1 = le32_to_cpu(cmd_resp->data1);
+ }
+
+ return status;
+}
+
+/**
* i40e_aq_resume_port_tx
* @hw: pointer to the hardware structure
* @cmd_details: pointer to command details structure or NULL
@@ -3440,3 +3509,136 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
break;
}
}
+
+/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_dump_internals *cmd =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ i40e_status status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_dump_internals);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cluster_id = cluster_id;
+ cmd->table_id = table_id;
+ cmd->idx = cpu_to_le32(start_index);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (ret_buff_size)
+ *ret_buff_size = le16_to_cpu(desc.datalen);
+ if (ret_next_table)
+ *ret_next_table = resp->table_id;
+ if (ret_next_index)
+ *ret_next_index = le32_to_cpu(resp->idx);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is true if min_bw is a valid value
+ * @max_valid: pointer for bool that is true if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
+{
+ i40e_status status;
+ u32 max_bw_addr, min_bw_addr;
+
+ /* Calculate the address of the min/max bw registers */
+ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+ min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+
+ /* Read the bandwidths from alt ram */
+ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+ min_bw_addr, min_bw);
+
+ if (*min_bw & I40E_ALT_BW_VALID_MASK)
+ *min_valid = true;
+ else
+ *min_valid = false;
+
+ if (*max_bw & I40E_ALT_BW_VALID_MASK)
+ *max_valid = true;
+ else
+ *max_valid = false;
+
+ return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ u16 bwd_size = sizeof(*bw_data);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_partition_bw);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ if (bwd_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(bwd_size);
+
+ status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
+ cmd_details);
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 3ce43588592d..2547aa21b2ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -419,7 +419,7 @@ static void i40e_cee_to_dcb_v1_config(
{
u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
- u8 i, tc, err, sync, oper;
+ u8 i, tc, err;
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -456,10 +456,8 @@ static void i40e_cee_to_dcb_v1_config(
status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
I40E_AQC_CEE_APP_STATUS_SHIFT;
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
- sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
- oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
- /* Add APPs if Error is False and Oper/Sync is True */
- if (!err && sync && oper) {
+ /* Add APPs if Error is False */
+ if (!err) {
/* CEE operating configuration supports FCoE/iSCSI/FIP only */
dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index a11c70ca5a28..bd5079d5c1b6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -178,6 +178,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return;
+ /* MFP mode but not an iSCSI PF so return */
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ return;
+
dcbxcfg = &hw->local_dcbx_config;
/* Set up all the App TLVs if DCBx is negotiated */
@@ -223,7 +227,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
/**
* i40e_dcbnl_del_app - Delete APP on all VSIs
- * @pf: the corresponding pf
+ * @pf: the corresponding PF
* @app: APP to delete
*
* Delete given APP from all the VSIs for given PF
@@ -268,23 +272,26 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
/**
* i40e_dcbnl_flush_apps - Delete all removed APPs
- * @pf: the corresponding pf
+ * @pf: the corresponding PF
+ * @old_cfg: old DCBX configuration data
* @new_cfg: new DCBX configuration data
*
* Find and delete all APPs that are not present in the passed
* DCB configuration
**/
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
struct i40e_dcbx_config *new_cfg)
{
struct i40e_dcb_app_priority_table app;
- struct i40e_dcbx_config *dcbxcfg;
- struct i40e_hw *hw = &pf->hw;
int i;
- dcbxcfg = &hw->local_dcbx_config;
- for (i = 0; i < dcbxcfg->numapps; i++) {
- app = dcbxcfg->app[i];
+ /* MFP mode but not an iSCSI PF so return */
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ return;
+
+ for (i = 0; i < old_cfg->numapps; i++) {
+ app = old_cfg->app[i];
/* The APP is not available anymore delete it */
if (!i40e_dcbnl_find_app(new_cfg, &app))
i40e_dcbnl_del_app(pf, &app);
@@ -306,9 +313,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return;
- /* Do not setup DCB NL ops for MFP mode */
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- dev->dcbnl_ops = &dcbnl_ops;
+ dev->dcbnl_ops = &dcbnl_ops;
/* Set initial IEEE DCB settings */
i40e_dcbnl_set_all(vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index c17ee77100d3..34170eabca7d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -35,7 +35,7 @@ static struct dentry *i40e_dbg_root;
/**
* i40e_dbg_find_vsi - searches for the vsi with the given seid
- * @pf - the pf structure to search for the vsi
+ * @pf - the PF structure to search for the vsi
* @seid - seid of the vsi it is searching for
**/
static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
@@ -54,7 +54,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
/**
* i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf - the pf structure to search for the veb
+ * @pf - the PF structure to search for the veb
* @seid - seid of the veb it is searching for
**/
static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
@@ -112,7 +112,7 @@ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
/**
* i40e_dbg_prep_dump_buf
- * @pf: the pf we're working with
+ * @pf: the PF we're working with
* @buflen: the desired buffer length
*
* Return positive if success, 0 if failed
@@ -318,7 +318,7 @@ static const struct file_operations i40e_dbg_dump_fops = {
* setup, adding or removing filters, or other things. Many of
* these will be useful for some forms of unit testing.
**************************************************************/
-static char i40e_dbg_command_buf[256] = "hello world";
+static char i40e_dbg_command_buf[256] = "";
/**
* i40e_dbg_command_read - read for command datum
@@ -390,6 +390,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
vsi->netdev_registered,
vsi->current_netdev_flags, vsi->state, vsi->flags);
+ if (vsi == pf->vsi[pf->lan_vsi])
+ dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+ pf->hw.mac.addr,
+ pf->hw.mac.san_addr,
+ pf->hw.mac.port_addr);
list_for_each_entry(f, &vsi->mac_filter_list, list) {
dev_info(&pf->pdev->dev,
" mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
@@ -675,7 +680,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
if (vsi->back)
- dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back);
+ dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back);
dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
dev_info(&pf->pdev->dev,
" tc_config: numtc = %d, enabled_tc = 0x%x\n",
@@ -921,9 +926,10 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
return;
}
dev_info(&pf->pdev->dev,
- "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n",
+ "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
- veb->uplink_seid);
+ veb->uplink_seid,
+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
i40e_dbg_dump_eth_stats(pf, &veb->stats);
}
@@ -945,7 +951,7 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
/**
* i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
- * @pf: the pf that would be altered
+ * @pf: the PF that would be altered
* @flag: flag that needs enabling or disabling
* @enable: Enable/disable FD SD/ATR
**/
@@ -957,7 +963,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
pf->flags &= ~flag;
pf->auto_disable_flags |= flag;
}
- dev_info(&pf->pdev->dev, "requesting a pf reset\n");
+ dev_info(&pf->pdev->dev, "requesting a PF reset\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
}
@@ -1382,6 +1388,50 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
r_cfg->app[i].selector,
r_cfg->app[i].protocolid);
}
+ } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
+ int cluster_id, table_id;
+ int index, ret;
+ u16 buff_len = 4096;
+ u32 next_index;
+ u8 next_table;
+ u8 *buff;
+ u16 rlen;
+
+ cnt = sscanf(&cmd_buf[18], "%i %i %i",
+ &cluster_id, &table_id, &index);
+ if (cnt != 3) {
+ dev_info(&pf->pdev->dev,
+ "dump debug fwdata <cluster_id> <table_id> <index>\n");
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev,
+ "AQ debug dump fwdata params %x %x %x %x\n",
+ cluster_id, table_id, index, buff_len);
+ buff = kzalloc(buff_len, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
+ index, buff_len, buff, &rlen,
+ &next_table, &next_index,
+ NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "debug dump fwdata AQ Failed %d 0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
+ rlen, next_table, next_index);
+ print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, rlen, true);
+ kfree(buff);
+ buff = NULL;
} else {
dev_info(&pf->pdev->dev,
"dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
@@ -1487,11 +1537,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else {
dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
}
- } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) {
- i40e_pf_reset_stats(pf);
- dev_info(&pf->pdev->dev, "pf clear stats called\n");
+ } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
+ if (pf->hw.partition_id == 1) {
+ i40e_pf_reset_stats(pf);
+ dev_info(&pf->pdev->dev, "port stats cleared\n");
+ } else {
+ dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
+ }
} else {
- dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
struct i40e_aq_desc *desc;
@@ -1893,11 +1947,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev, " dump desc aq\n");
dev_info(&pf->pdev->dev, " dump reset stats\n");
+ dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
dev_info(&pf->pdev->dev, " msg_enable [level]\n");
dev_info(&pf->pdev->dev, " read <reg>\n");
dev_info(&pf->pdev->dev, " write <reg> <value>\n");
dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
- dev_info(&pf->pdev->dev, " clear_stats pf\n");
+ dev_info(&pf->pdev->dev, " clear_stats port\n");
dev_info(&pf->pdev->dev, " pfr\n");
dev_info(&pf->pdev->dev, " corer\n");
dev_info(&pf->pdev->dev, " globr\n");
@@ -1935,7 +1990,7 @@ static const struct file_operations i40e_dbg_command_fops = {
* The netdev_ops entry in debugfs is for giving the driver commands
* to be executed from the netdev operations.
**************************************************************/
-static char i40e_dbg_netdev_ops_buf[256] = "hello world";
+static char i40e_dbg_netdev_ops_buf[256] = "";
/**
* i40e_dbg_netdev_ops - read for netdev_ops datum
@@ -2123,8 +2178,8 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
};
/**
- * i40e_dbg_pf_init - setup the debugfs directory for the pf
- * @pf: the pf that is starting up
+ * i40e_dbg_pf_init - setup the debugfs directory for the PF
+ * @pf: the PF that is starting up
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
@@ -2160,8 +2215,8 @@ create_failed:
}
/**
- * i40e_dbg_pf_exit - clear out the pf's debugfs entries
- * @pf: the pf that is stopping
+ * i40e_dbg_pf_exit - clear out the PF's debugfs entries
+ * @pf: the PF that is stopping
**/
void i40e_dbg_pf_exit(struct i40e_pf *pf)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b8230dc205ec..4cbaaeb902c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -113,7 +113,6 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
- I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
I40E_PF_STAT("crc_errors", stats.crc_errors),
I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
@@ -218,6 +217,13 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "NPAR",
+};
+
+#define I40E_PRIV_FLAGS_STR_LEN \
+ (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
+
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
* @pf: the PF struct
@@ -229,73 +235,20 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
}
/**
- * i40e_get_settings - Get Link Speed and Duplex settings
+ * i40e_get_settings_link_up - Get the Link settings for when link is up
+ * @hw: hw structure
+ * @ecmd: ethtool command to fill in
* @netdev: network interface device structure
- * @ecmd: ethtool command
*
- * Reports speed/duplex settings based on media_type
**/
-static int i40e_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static void i40e_get_settings_link_up(struct i40e_hw *hw,
+ struct ethtool_cmd *ecmd,
+ struct net_device *netdev)
{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_pf *pf = np->vsi->back;
- struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
- bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
u32 link_speed = hw_link_info->link_speed;
- /* hardware is either in 40G mode or 10G mode
- * NOTE: this section initializes supported and advertising
- */
- if (!link_up) {
- /* link is down and the driver needs to fall back on
- * device ID to determine what kinds of info to display,
- * it's mostly a guess that may change when link is up
- */
- switch (hw->device_id) {
- case I40E_DEV_ID_QSFP_A:
- case I40E_DEV_ID_QSFP_B:
- case I40E_DEV_ID_QSFP_C:
- /* pluggable QSFP */
- ecmd->supported = SUPPORTED_40000baseSR4_Full |
- SUPPORTED_40000baseCR4_Full |
- SUPPORTED_40000baseLR4_Full;
- ecmd->advertising = ADVERTISED_40000baseSR4_Full |
- ADVERTISED_40000baseCR4_Full |
- ADVERTISED_40000baseLR4_Full;
- break;
- case I40E_DEV_ID_KX_B:
- /* backplane 40G */
- ecmd->supported = SUPPORTED_40000baseKR4_Full;
- ecmd->advertising = ADVERTISED_40000baseKR4_Full;
- break;
- case I40E_DEV_ID_KX_C:
- /* backplane 10G */
- ecmd->supported = SUPPORTED_10000baseKR_Full;
- ecmd->advertising = ADVERTISED_10000baseKR_Full;
- break;
- case I40E_DEV_ID_10G_BASE_T:
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
- break;
- default:
- /* all the rest are 10G/1G */
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full;
- break;
- }
-
- /* skip phy_type use as it is zero when link is down */
- goto no_valid_phy_type;
- }
-
+ /* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_40GBASE_CR4_CU:
@@ -304,6 +257,11 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->advertising = ADVERTISED_Autoneg |
ADVERTISED_40000baseCR4_Full;
break;
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ ecmd->supported = SUPPORTED_40000baseCR4_Full;
+ break;
case I40E_PHY_TYPE_40GBASE_KR4:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_40000baseKR4_Full;
@@ -311,13 +269,17 @@ static int i40e_get_settings(struct net_device *netdev,
ADVERTISED_40000baseKR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_SR4:
- case I40E_PHY_TYPE_XLPPI:
- case I40E_PHY_TYPE_XLAUI:
ecmd->supported = SUPPORTED_40000baseSR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_LR4:
ecmd->supported = SUPPORTED_40000baseLR4_Full;
break;
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_20000baseKR2_Full;
+ ecmd->advertising = ADVERTISED_Autoneg |
+ ADVERTISED_20000baseKR2_Full;
+ break;
case I40E_PHY_TYPE_10GBASE_KX4:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseKX4_Full;
@@ -334,55 +296,56 @@ static int i40e_get_settings(struct net_device *netdev,
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ ecmd->supported = SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseKX_Full;
+ ecmd->advertising = ADVERTISED_Autoneg |
+ ADVERTISED_1000baseKX_Full;
break;
- case I40E_PHY_TYPE_10GBASE_CR1_CU:
- case I40E_PHY_TYPE_10GBASE_CR1:
case I40E_PHY_TYPE_10GBASE_T:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full;
+ ecmd->advertising = ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
+ ADVERTISED_10000baseT_Full;
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
case I40E_PHY_TYPE_SFI:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_10GBASE_AOC:
ecmd->supported = SUPPORTED_10000baseT_Full;
break;
- case I40E_PHY_TYPE_1000BASE_KX:
- case I40E_PHY_TYPE_1000BASE_T:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
- break;
- case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
- break;
case I40E_PHY_TYPE_SGMII:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
break;
default:
/* if we got here and link is up something bad is afoot */
@@ -390,8 +353,125 @@ static int i40e_get_settings(struct net_device *netdev,
hw_link_info->phy_type);
}
-no_valid_phy_type:
- /* this is if autoneg is enabled or disabled */
+ /* Set speed and duplex */
+ switch (link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ break;
+ case I40E_LINK_SPEED_20GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ case I40E_LINK_SPEED_1GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ break;
+ case I40E_LINK_SPEED_100MB:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
+ default:
+ break;
+ }
+ ecmd->duplex = DUPLEX_FULL;
+}
+
+/**
+ * i40e_get_settings_link_down - Get the Link settings for when link is down
+ * @hw: hw structure
+ * @ecmd: ethtool command to fill in
+ *
+ * Reports link settings that can be determined when link is down
+ **/
+static void i40e_get_settings_link_down(struct i40e_hw *hw,
+ struct ethtool_cmd *ecmd)
+{
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+ /* link is down and the driver needs to fall back on
+ * device ID to determine what kinds of info to display,
+ * it's mostly a guess that may change when link is up
+ */
+ switch (hw->device_id) {
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ /* pluggable QSFP */
+ ecmd->supported = SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseLR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full |
+ ADVERTISED_40000baseCR4_Full |
+ ADVERTISED_40000baseLR4_Full;
+ break;
+ case I40E_DEV_ID_KX_B:
+ /* backplane 40G */
+ ecmd->supported = SUPPORTED_40000baseKR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+ break;
+ case I40E_DEV_ID_KX_C:
+ /* backplane 10G */
+ ecmd->supported = SUPPORTED_10000baseKR_Full;
+ ecmd->advertising = ADVERTISED_10000baseKR_Full;
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ ecmd->supported = SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
+ /* Figure out what has been requested */
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ break;
+ case I40E_DEV_ID_20G_KR2:
+ /* backplane 20G */
+ ecmd->supported = SUPPORTED_20000baseKR2_Full;
+ ecmd->advertising = ADVERTISED_20000baseKR2_Full;
+ break;
+ default:
+ /* all the rest are 10G/1G */
+ ecmd->supported = SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full;
+ /* Figure out what has been requested */
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ }
+
+ /* With no link speed and duplex are unknown */
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+}
+
+/**
+ * i40e_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings based on media_type
+ **/
+static int i40e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+
+ if (link_up)
+ i40e_get_settings_link_up(hw, ecmd, netdev);
+ else
+ i40e_get_settings_link_down(hw, ecmd);
+
+ /* Now set the settings that don't rely on link being up/down */
+
+ /* Set autoneg settings */
ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -424,11 +504,13 @@ no_valid_phy_type:
break;
}
+ /* Set transceiver */
ecmd->transceiver = XCVR_EXTERNAL;
+ /* Set flow control settings */
ecmd->supported |= SUPPORTED_Pause;
- switch (hw->fc.current_mode) {
+ switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
ecmd->advertising |= ADVERTISED_Pause;
break;
@@ -445,30 +527,6 @@ no_valid_phy_type:
break;
}
- if (link_up) {
- switch (link_speed) {
- case I40E_LINK_SPEED_40GB:
- /* need a SPEED_40000 in ethtool.h */
- ethtool_cmd_speed_set(ecmd, 40000);
- break;
- case I40E_LINK_SPEED_10GB:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
- break;
- case I40E_LINK_SPEED_1GB:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
- break;
- case I40E_LINK_SPEED_100MB:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
- break;
- default:
- break;
- }
- ecmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
- }
-
return 0;
}
@@ -588,6 +646,8 @@ static int i40e_set_settings(struct net_device *netdev,
advertise & ADVERTISED_10000baseKX4_Full ||
advertise & ADVERTISED_10000baseKR_Full)
config.link_speed |= I40E_LINK_SPEED_10GB;
+ if (advertise & ADVERTISED_20000baseKR2_Full)
+ config.link_speed |= I40E_LINK_SPEED_20GB;
if (advertise & ADVERTISED_40000baseKR4_Full ||
advertise & ADVERTISED_40000baseCR4_Full ||
advertise & ADVERTISED_40000baseSR4_Full ||
@@ -601,6 +661,8 @@ static int i40e_set_settings(struct net_device *netdev,
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ /* save the requested speeds */
+ hw->phy.link_info.requested_speeds = config.link_speed;
/* set link and auto negotiation so changes take effect */
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* If link is up put link down */
@@ -621,7 +683,7 @@ static int i40e_set_settings(struct net_device *netdev,
return -EAGAIN;
}
- status = i40e_update_link_info(hw, true);
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status)
netdev_info(netdev, "Updating link info failed with error %d\n",
status);
@@ -767,7 +829,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n",
+ netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
status, hw->aq.asq_last_status);
err = -EAGAIN;
}
@@ -870,7 +932,9 @@ static int i40e_get_eeprom(struct net_device *netdev,
cmd = (struct i40e_nvm_access *)eeprom;
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val)
+ if (ret_val &&
+ ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
+ (hw->debug_mask & I40E_DEBUG_NVM)))
dev_info(&pf->pdev->dev,
"NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -974,7 +1038,10 @@ static int i40e_set_eeprom(struct net_device *netdev,
cmd = (struct i40e_nvm_access *)eeprom;
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY)
+ if (ret_val &&
+ ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
+ hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
+ (hw->debug_mask & I40E_DEBUG_NVM)))
dev_info(&pf->pdev->dev,
"NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -998,6 +1065,7 @@ static void i40e_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
}
static void i40e_get_ringparam(struct net_device *netdev,
@@ -1176,7 +1244,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_TEST:
return I40E_TEST_LEN;
case ETH_SS_STATS:
- if (vsi == pf->vsi[pf->lan_vsi]) {
+ if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
int len = I40E_PF_STATS_LEN(netdev);
if (pf->lan_veb != I40E_NO_VEB)
@@ -1185,6 +1253,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
} else {
return I40E_VSI_STATS_LEN(netdev);
}
+ case ETH_SS_PRIV_FLAGS:
+ return I40E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1247,7 +1317,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
i += 2;
}
rcu_read_unlock();
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
if (pf->lan_veb != I40E_NO_VEB) {
@@ -1320,7 +1390,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
if (pf->lan_veb != I40E_NO_VEB) {
@@ -1358,6 +1428,15 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ break;
}
}
@@ -1473,6 +1552,7 @@ static void i40e_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
struct i40e_pf *pf = np->vsi->back;
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
@@ -1480,6 +1560,12 @@ static void i40e_diag_test(struct net_device *netdev,
netif_info(pf, drv, netdev, "offline testing starting\n");
set_bit(__I40E_TESTING, &pf->state);
+ /* If the device is online then take it offline */
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
/* Link test performed before hardware reset
* so autoneg doesn't interfere with test result
@@ -1502,6 +1588,9 @@ static void i40e_diag_test(struct net_device *netdev,
clear_bit(__I40E_TESTING, &pf->state);
i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+ if (if_running)
+ dev_open(netdev);
} else {
/* Online tests */
netif_info(pf, drv, netdev, "online testing starting\n");
@@ -1599,6 +1688,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
case ETHTOOL_ID_INACTIVE:
i40e_led_set(hw, pf->led_status, false);
break;
+ default:
+ break;
}
return 0;
@@ -1703,6 +1794,11 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
{
cmd->data = 0;
+ if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) {
+ cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data;
+ cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type;
+ return 0;
+ }
/* Report default options for RSS on i40e */
switch (cmd->flow_type) {
case TCP_V4_FLOW:
@@ -1817,6 +1913,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
else
fsp->ring_cookie = rule->q_index;
+ if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
+ struct i40e_vsi *vsi;
+
+ vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
+ if (vsi && vsi->type == I40E_VSI_SRIOV) {
+ fsp->h_ext.data[1] = htonl(vsi->vf_id);
+ fsp->m_ext.data[1] = htonl(0x1);
+ }
+ }
+
return 0;
}
@@ -1974,6 +2080,9 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
i40e_flush(hw);
+ /* Save setting for future output/update */
+ pf->vsi[pf->lan_vsi]->rxnfc = *nfc;
+
return 0;
}
@@ -2107,6 +2216,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input;
struct i40e_pf *pf;
int ret = -EINVAL;
+ u16 vf_id;
if (!vsi)
return -EINVAL;
@@ -2167,7 +2277,22 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ if (ntohl(fsp->m_ext.data[1])) {
+ if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
+ netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+ goto free_input;
+ }
+ vf_id = ntohl(fsp->h_ext.data[1]);
+ /* Find vsi id from vf id and override dest vsi */
+ input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
+ if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
+ netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+ goto free_input;
+ }
+ }
+
ret = i40e_add_del_fdir(vsi, input, true);
+free_input:
if (ret)
kfree(input);
else
@@ -2281,10 +2406,6 @@ static int i40e_set_channels(struct net_device *dev,
/* update feature limits from largest to smallest supported values */
/* TODO: Flow director limit, DCB etc */
- /* cap RSS limit */
- if (count > pf->rss_size_max)
- count = pf->rss_size_max;
-
/* use rss_reconfig to rebuild with new queue count and update traffic
* class queue mapping
*/
@@ -2295,6 +2416,133 @@ static int i40e_set_channels(struct net_device *dev,
return -EINVAL;
}
+#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+/**
+ * i40e_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_key_size(struct net_device *netdev)
+{
+ return I40E_HKEY_ARRAY_SIZE;
+}
+
+/**
+ * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return I40E_HLUT_ARRAY_SIZE;
+}
+
+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_val;
+ int i, j;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+ reg_val = rd32(hw, I40E_PFQF_HLUT(i));
+ indir[j++] = reg_val & 0xff;
+ indir[j++] = (reg_val >> 8) & 0xff;
+ indir[j++] = (reg_val >> 16) & 0xff;
+ indir[j++] = (reg_val >> 24) & 0xff;
+ }
+
+ if (key) {
+ for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+ reg_val = rd32(hw, I40E_PFQF_HKEY(i));
+ key[j++] = (u8)(reg_val & 0xff);
+ key[j++] = (u8)((reg_val >> 8) & 0xff);
+ key[j++] = (u8)((reg_val >> 16) & 0xff);
+ key[j++] = (u8)((reg_val >> 24) & 0xff);
+ }
+ }
+ return 0;
+}
+
+/**
+ * i40e_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_val;
+ int i, j;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+ reg_val = indir[j++];
+ reg_val |= indir[j++] << 8;
+ reg_val |= indir[j++] << 16;
+ reg_val |= indir[j++] << 24;
+ wr32(hw, I40E_PFQF_HLUT(i), reg_val);
+ }
+
+ if (key) {
+ for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+ reg_val = key[j++];
+ reg_val |= key[j++] << 8;
+ reg_val |= key[j++] << 16;
+ reg_val |= key[j++] << 24;
+ wr32(hw, I40E_PFQF_HKEY(i), reg_val);
+ }
+ }
+ return 0;
+}
+
+/**
+ * i40e_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the i40e_priv_flags_strings
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40e_get_priv_flags(struct net_device *dev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 ret_flags = 0;
+
+ ret_flags |= pf->hw.func_caps.npar_enable ?
+ I40E_PRIV_FLAGS_NPAR_FLAG : 0;
+
+ return ret_flags;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.set_settings = i40e_set_settings,
@@ -2323,9 +2571,14 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_ethtool_stats = i40e_get_ethtool_stats,
.get_coalesce = i40e_get_coalesce,
.set_coalesce = i40e_set_coalesce,
+ .get_rxfh_key_size = i40e_get_rxfh_key_size,
+ .get_rxfh_indir_size = i40e_get_rxfh_indir_size,
+ .get_rxfh = i40e_get_rxfh,
+ .set_rxfh = i40e_set_rxfh,
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
+ .get_priv_flags = i40e_get_priv_flags,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 27c206e62da7..1803afeef23e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -24,7 +24,6 @@
*
******************************************************************************/
-
#include <linux/if_ether.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -150,7 +149,7 @@ static inline bool i40e_fcoe_xid_is_valid(u16 xid)
/**
* i40e_fcoe_ddp_unmap - unmap the mapped sglist associated
- * @pf: pointer to pf
+ * @pf: pointer to PF
* @ddp: sw DDP context
*
* Unmap the scatter-gather list associated with the given SW DDP context
@@ -269,7 +268,7 @@ out:
/**
* i40e_fcoe_sw_init - sets up the HW for FCoE
- * @pf: pointer to pf
+ * @pf: pointer to PF
*
* Returns 0 if FCoE is supported otherwise the error code
**/
@@ -329,7 +328,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
/**
* i40e_get_fcoe_tc_map - Return TC map for FCoE APP
- * @pf: pointer to pf
+ * @pf: pointer to PF
*
**/
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
@@ -381,12 +380,11 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
ctxt->pf_num = hw->pf_id;
ctxt->vf_num = 0;
ctxt->uplink_seid = vsi->uplink_seid;
- ctxt->connection_type = 0x1;
+ ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt->flags = I40E_AQ_VSI_TYPE_PF;
/* FCoE VSI would need the following sections */
- info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID |
- I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
/* FCoE VSI does not need these sections */
info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID |
@@ -395,7 +393,12 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
- info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ info->valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ info->switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
enabled_tc = i40e_get_fcoe_tc_map(pf);
i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
@@ -1303,8 +1306,7 @@ static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
/* MACLEN is ether header length in words not bytes */
td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
- return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
- td_cmd, td_offset);
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset);
}
/**
@@ -1443,7 +1445,6 @@ static int i40e_fcoe_set_features(struct net_device *netdev,
return 0;
}
-
static const struct net_device_ops i40e_fcoe_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -1470,6 +1471,11 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = {
.ndo_set_features = i40e_fcoe_set_features,
};
+/* fcoe network device type */
+static struct device_type fcoe_netdev_type = {
+ .name = "fcoe",
+};
+
/**
* i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI
* @vsi: pointer to the associated VSI struct
@@ -1503,6 +1509,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
netdev->mtu = FCOE_MTU;
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+ SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type);
/* set different dev_port value 1 for FCoE netdev than the default
* zero dev_port value for PF netdev, this helps biosdevname user
* tool to differentiate them correctly while both attached to the
@@ -1523,7 +1530,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
/**
* i40e_fcoe_vsi_setup - allocate and set up FCoE VSI
- * @pf: the pf that VSI is associated with
+ * @pf: the PF that VSI is associated with
*
**/
void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
@@ -1550,7 +1557,7 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0);
if (vsi) {
dev_dbg(&pf->pdev->dev,
- "Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n",
+ "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n",
vsi->seid, vsi->id, vsi->uplink_seid, seid);
} else {
dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
index 21e0f582031c..0d49e2d15d40 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
@@ -37,7 +37,6 @@
#define I40E_FILTER_CONTEXT_DESC(R, i) \
(&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i]))
-
/* receive queue descriptor filter status for FCoE */
#define I40E_RX_DESC_FLTSTAT_FCMASK 0x3
#define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index 4627588f4613..0079ad7bcd0e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -856,7 +856,7 @@ static void i40e_write_dword(u8 *hmc_bits,
if (ce_info->width < 32)
mask = ((u32)1 << ce_info->width) - 1;
else
- mask = 0xFFFFFFFF;
+ mask = ~(u32)0;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@@ -908,7 +908,7 @@ static void i40e_write_qword(u8 *hmc_bits,
if (ce_info->width < 64)
mask = ((u64)1 << ce_info->width) - 1;
else
- mask = 0xFFFFFFFFFFFFFFFF;
+ mask = ~(u64)0;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index dadda3c5d658..a54c14491e3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 6
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -75,6 +75,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
/* required last entry */
{0, }
};
@@ -249,6 +250,22 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
}
/**
+ * i40e_find_vsi_from_id - searches for the vsi with the given id
+ * @pf - the pf structure to search for the vsi
+ * @id - id of the vsi it is searching for
+ **/
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->id == id))
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
* i40e_service_event_schedule - Schedule the service task to wake up
* @pf: board private structure
*
@@ -450,7 +467,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
}
/**
- * i40e_pf_reset_stats - Reset all of the stats for the given pf
+ * i40e_pf_reset_stats - Reset all of the stats for the given PF
* @pf: the PF to be reset
**/
void i40e_pf_reset_stats(struct i40e_pf *pf)
@@ -896,7 +913,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
}
/**
- * i40e_update_pf_stats - Update the pf statistics counters.
+ * i40e_update_pf_stats - Update the PF statistics counters.
* @pf: the PF to be updated
**/
static void i40e_update_pf_stats(struct i40e_pf *pf)
@@ -919,11 +936,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded,
&osd->eth.rx_discards,
&nsd->eth.rx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_discards,
- &nsd->eth.tx_discards);
-
i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
I40E_GLPRT_UPRCL(hw->port),
pf->stat_offsets_loaded,
@@ -1133,7 +1145,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL
@@ -1161,7 +1173,7 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
* i40e_find_mac - Find a mac addr in the macvlan filters list
* @vsi: the VSI to be searched
* @macaddr: the MAC address we are searching for
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns the first filter with the provided MAC address or NULL if
@@ -1209,7 +1221,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
* i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
- * @is_vf: true if it is a vf
+ * @is_vf: true if it is a VF
* @is_netdev: true if it is a netdev
*
* Goes through all the macvlan filters and adds a
@@ -1270,7 +1282,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL when no memory available.
@@ -1330,7 +1342,7 @@ add_filter_out:
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure it's a vf filter, else doesn't matter
+ * @is_vf: make sure it's a VF filter, else doesn't matter
* @is_netdev: make sure it's a netdev filter, else doesn't matter
**/
void i40e_del_filter(struct i40e_vsi *vsi,
@@ -1357,7 +1369,7 @@ void i40e_del_filter(struct i40e_vsi *vsi,
f->counter--;
}
} else {
- /* make sure we don't remove a filter in use by vf or netdev */
+ /* make sure we don't remove a filter in use by VF or netdev */
int min_f = 0;
min_f += (f->is_vf ? 1 : 0);
min_f += (f->is_netdev ? 1 : 0);
@@ -1546,7 +1558,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.tc_info[i].qoffset = offset;
vsi->tc_config.tc_info[i].qcount = qcount;
- /* find the power-of-2 of the number of queue pairs */
+ /* find the next higher power-of-2 of num queue pairs */
num_qps = qcount;
pow = 0;
while (num_qps && ((1 << pow) < qcount)) {
@@ -1576,6 +1588,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
/* Set actual Tx/Rx queue pairs */
vsi->num_queue_pairs = offset;
+ if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
+ if (vsi->req_queue_pairs > 0)
+ vsi->num_queue_pairs = vsi->req_queue_pairs;
+ else
+ vsi->num_queue_pairs = pf->num_lan_msix;
+ }
/* Scheduler section valid can only be set for ADD VSI */
if (is_add) {
@@ -1967,7 +1985,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
ctxt.seid = vsi->seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
@@ -1996,7 +2014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
ctxt.seid = vsi->seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
@@ -2280,7 +2298,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (aq_ret) {
dev_info(&vsi->back->pdev->dev,
@@ -2398,20 +2416,20 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
struct i40e_vsi *vsi = ring->vsi;
cpumask_var_t mask;
- if (ring->q_vector && ring->netdev) {
- /* Single TC mode enable XPS */
- if (vsi->tc_config.numtc <= 1 &&
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
+ if (!ring->q_vector || !ring->netdev)
+ return;
+
+ /* Single TC mode enable XPS */
+ if (vsi->tc_config.numtc <= 1) {
+ if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
ring->queue_index);
- } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
- /* Disable XPS to allow selection based on TC */
- bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
- netif_set_xps_queue(ring->netdev, mask,
- ring->queue_index);
- free_cpumask_var(mask);
- }
+ } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ /* Disable XPS to allow selection based on TC */
+ bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
+ netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
+ free_cpumask_var(mask);
}
}
@@ -2596,7 +2614,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ if (ring_is_ps_enabled(ring)) {
+ i40e_alloc_rx_headers(ring);
+ i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
+ } else {
+ i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
+ }
return 0;
}
@@ -3183,13 +3206,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->globr_count++;
} else if (val == I40E_RESET_EMPR) {
pf->empr_count++;
- set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
}
}
if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
+ rd32(hw, I40E_PFHMC_ERRORINFO),
+ rd32(hw, I40E_PFHMC_ERRORDATA));
}
if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
@@ -3825,6 +3851,8 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
pci_disable_msix(pf->pdev);
kfree(pf->msix_entries);
pf->msix_entries = NULL;
+ kfree(pf->irq_pile);
+ pf->irq_pile = NULL;
} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
pci_disable_msi(pf->pdev);
}
@@ -4021,7 +4049,7 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
#endif
/**
* i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
- * @pf: pointer to pf
+ * @pf: pointer to PF
*
* Get TC map for ISCSI PF type that will include iSCSI TC
* and LAN TC.
@@ -4119,7 +4147,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
if (pf->hw.func_caps.iscsi)
enabled_tc = i40e_get_iscsi_tc_map(pf);
else
- enabled_tc = pf->hw.func_caps.enabled_tcmap;
+ return 1; /* Only TC0 */
/* At least have TC0 */
enabled_tc = (enabled_tc ? enabled_tc : 0x1);
@@ -4169,11 +4197,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
- /* MPF enabled and iSCSI PF type */
+ /* MFP enabled and iSCSI PF type */
if (pf->hw.func_caps.iscsi)
return i40e_get_iscsi_tc_map(pf);
else
- return pf->hw.func_caps.enabled_tcmap;
+ return i40e_pf_get_default_tc(pf);
}
/**
@@ -4196,7 +4224,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "couldn't get pf vsi bw config, err %d, aq_err %d\n",
+ "couldn't get PF vsi bw config, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
return -EINVAL;
}
@@ -4206,7 +4234,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
+ "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
return -EINVAL;
}
@@ -4383,7 +4411,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.pf_num = vsi->back->hw.pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
/* Update the VSI after updating the VSI queue-mapping information */
@@ -4563,6 +4591,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err = 0;
+ /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4))
+ goto out;
+
/* Get the initial DCB configuration */
err = i40e_init_dcb(hw);
if (!err) {
@@ -4626,6 +4659,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_40GB:
strlcpy(speed, "40 Gbps", SPEED_SIZE);
break;
+ case I40E_LINK_SPEED_20GB:
+ strncpy(speed, "20 Gbps", SPEED_SIZE);
+ break;
case I40E_LINK_SPEED_10GB:
strlcpy(speed, "10 Gbps", SPEED_SIZE);
break;
@@ -4853,11 +4889,7 @@ exit:
*
* Returns 0 on success, negative value on failure
**/
-#ifdef I40E_FCOE
int i40e_open(struct net_device *netdev)
-#else
-static int i40e_open(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -4967,7 +4999,7 @@ err_setup_tx:
/**
* i40e_fdir_filter_exit - Cleans up the Flow Director accounting
- * @pf: Pointer to pf
+ * @pf: Pointer to PF
*
* This function destroys the hlist where all the Flow Director
* filters were saved.
@@ -5055,24 +5087,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
- } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
-
- /* Request a Firmware Reset
- *
- * Same as Global reset, plus restarting the
- * embedded firmware engine.
- */
- /* enable EMP Reset */
- val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
- val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
- wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
-
- /* force the reset */
- val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
- val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
- wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
- i40e_flush(&pf->hw);
-
} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
@@ -5195,7 +5209,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
struct i40e_aqc_lldp_get_mib *mib =
(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
struct i40e_hw *hw = &pf->hw;
- struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
struct i40e_dcbx_config tmp_dcbx_cfg;
bool need_reconfig = false;
int ret = 0;
@@ -5226,10 +5239,11 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
goto exit;
}
- memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
/* Store the old configuration */
- tmp_dcbx_cfg = *dcbx_cfg;
+ tmp_dcbx_cfg = hw->local_dcbx_config;
+ /* Reset the old DCBx configuration data */
+ memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
/* Get updated DCBX data from firmware */
ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
@@ -5238,20 +5252,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
}
/* No change detected in DCBX configs */
- if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
+ sizeof(tmp_dcbx_cfg))) {
dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
goto exit;
}
- need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
+ need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
+ &hw->local_dcbx_config);
- i40e_dcbnl_flush_apps(pf, dcbx_cfg);
+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
if (!need_reconfig)
goto exit;
/* Enable DCB tagging only when more than one TC */
- if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
@@ -5351,9 +5367,9 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
* i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
* @pf: board private structure
**/
-int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
{
- int val, fcnt_prog;
+ u32 val, fcnt_prog;
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
@@ -5361,12 +5377,13 @@ int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
}
/**
- * i40e_get_current_fd_count - Get the count of total FD filters programmed
+ * i40e_get_current_fd_count - Get total FD filters programmed for this PF
* @pf: board private structure
**/
-int i40e_get_current_fd_count(struct i40e_pf *pf)
+u32 i40e_get_current_fd_count(struct i40e_pf *pf)
{
- int val, fcnt_prog;
+ u32 val, fcnt_prog;
+
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
@@ -5375,6 +5392,21 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
}
/**
+ * i40e_get_global_fd_count - Get total FD filters programmed on device
+ * @pf: board private structure
+ **/
+u32 i40e_get_global_fd_count(struct i40e_pf *pf)
+{
+ u32 val, fcnt_prog;
+
+ val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
+ fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
+ ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
+ I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+ return fcnt_prog;
+}
+
+/**
* i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
* @pf: board private structure
**/
@@ -5388,7 +5420,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
/* Check if, FD SB or ATR was auto disabled and if there is enough room
* to re-enable
*/
- fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+ fcnt_prog = i40e_get_global_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count;
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
(pf->fd_add_err == 0) ||
@@ -5410,13 +5442,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
}
#define I40E_MIN_FD_FLUSH_INTERVAL 10
+#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
/**
* i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
* @pf: board private structure
**/
static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
{
+ unsigned long min_flush_time;
int flush_wait_retry = 50;
+ bool disable_atr = false;
+ int fd_room;
int reg;
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
@@ -5424,9 +5460,20 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
- set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ /* If the flush is happening too quick and we have mostly
+ * SB rules we should not re-enable ATR for some time.
+ */
+ min_flush_time = pf->fd_flush_timestamp
+ + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+ fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+
+ if (!(time_after(jiffies, min_flush_time)) &&
+ (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ disable_atr = true;
+ }
+
pf->fd_flush_timestamp = jiffies;
- pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
@@ -5446,10 +5493,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
} else {
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ if (!disable_atr)
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
@@ -5460,7 +5505,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
* i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
* @pf: board private structure
**/
-int i40e_get_current_atr_cnt(struct i40e_pf *pf)
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
{
return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
}
@@ -5486,9 +5531,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
return;
- if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
- (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
- (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
i40e_fdir_flush_and_replay(pf);
i40e_fdir_check_and_reenable(pf);
@@ -5757,11 +5800,9 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
struct i40e_hw *hw = &pf->hw;
struct i40e_aqc_get_link_status *status =
(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
- struct i40e_link_status *hw_link_info = &hw->phy.link_info;
/* save off old link status information */
- memcpy(&pf->hw.phy.link_info_old, hw_link_info,
- sizeof(pf->hw.phy.link_info_old));
+ hw->phy.link_info_old = hw->phy.link_info;
/* Do a new status request to re-enable LSE reporting
* and load new status information into the hw struct
@@ -5875,6 +5916,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
case i40e_aqc_opc_send_msg_to_peer:
dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
break;
+ case i40e_aqc_opc_nvm_erase:
+ case i40e_aqc_opc_nvm_update:
+ i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
+ break;
default:
dev_info(&pf->pdev->dev,
"ARQ Error: Unknown event 0x%04x received\n",
@@ -5919,6 +5964,94 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
}
/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get PF vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get PF vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_config_bridge_mode - Configure the HW bridge mode
+ * @veb: pointer to the bridge instance
+ *
+ * Configure the loop back mode for the LAN VSI that is downlink to the
+ * specified HW bridge instance. It is expected this function is called
+ * when a new HW bridge is instantiated.
+ **/
+static void i40e_config_bridge_mode(struct i40e_veb *veb)
+{
+ struct i40e_pf *pf = veb->pf;
+
+ dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ if (veb->bridge_mode & BRIDGE_MODE_VEPA)
+ i40e_disable_pf_switch_lb(pf);
+ else
+ i40e_enable_pf_switch_lb(pf);
+}
+
+/**
* i40e_reconstitute_veb - rebuild the VEB and anything connected to it
* @veb: pointer to the VEB instance
*
@@ -5964,8 +6097,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
- /* Enable LB mode for the main VSI now that it is on a VEB */
- i40e_enable_pf_switch_lb(pf);
+ i40e_config_bridge_mode(veb);
/* create the remaining VSIs attached to this VEB */
for (v = 0; v < pf->num_alloc_vsi; v++) {
@@ -6137,7 +6269,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
- * Close up the VFs and other things in prep for pf Reset.
+ * Close up the VFs and other things in prep for PF Reset.
**/
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
@@ -6222,10 +6354,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
/* re-verify the eeprom if we just had an EMP reset */
- if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
- clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
i40e_verify_eeprom(pf);
- }
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
@@ -6335,13 +6465,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
}
- msleep(75);
- ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
- if (ret) {
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4)) {
+ msleep(75);
+ ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
}
-
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
ret = i40e_setup_misc_vector(pf);
@@ -6364,7 +6495,7 @@ clear_recovery:
}
/**
- * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
* @pf: board private structure
*
* Close up the VFs and other things in prep for a Core Reset,
@@ -6378,7 +6509,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
/**
* i40e_handle_mdd_event
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
*
* Called from the MDD irq handler to identify possibly malicious vfs
**/
@@ -6407,7 +6538,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_TX_QUEUE_SHIFT) -
pf->hw.func_caps.base_queue;
if (netif_msg_tx_err(pf))
- dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
event, queue, pf_num, vf_num);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
@@ -6493,7 +6624,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- u8 filter_index;
__be16 port;
int i;
@@ -6506,22 +6636,20 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
if (pf->pending_vxlan_bitmap & (1 << i)) {
pf->pending_vxlan_bitmap &= ~(1 << i);
port = pf->vxlan_ports[i];
- ret = port ?
- i40e_aq_add_udp_tunnel(hw, ntohs(port),
+ if (port)
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
I40E_AQC_TUNNEL_TYPE_VXLAN,
- &filter_index, NULL)
- : i40e_aq_del_udp_tunnel(hw, i, NULL);
+ NULL, NULL);
+ else
+ ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
if (ret) {
- dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
- port ? "adding" : "deleting",
- ntohs(port), port ? i : i);
-
+ dev_info(&pf->pdev->dev,
+ "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+ port ? "add" : "delete",
+ ntohs(port), i, ret,
+ pf->hw.aq.asq_last_status);
pf->vxlan_ports[i] = 0;
- } else {
- dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
- port ? "Added" : "Deleted",
- ntohs(port), port ? i : filter_index);
}
}
}
@@ -6728,6 +6856,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->idx = vsi_idx;
vsi->rx_itr_setting = pf->rx_itr_default;
vsi->tx_itr_setting = pf->tx_itr_default;
+ vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
+ pf->rss_table_size : 64;
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
@@ -6808,7 +6938,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
goto unlock_vsi;
}
- /* updates the pf for this cleared vsi */
+ /* updates the PF for this cleared vsi */
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
@@ -6921,15 +7051,14 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
*
* Work with the OS to set up the MSIX vectors needed.
*
- * Returns 0 on success, negative on failure
+ * Returns the number of vectors reserved or negative on failure
**/
static int i40e_init_msix(struct i40e_pf *pf)
{
- i40e_status err = 0;
struct i40e_hw *hw = &pf->hw;
- int other_vecs = 0;
+ int vectors_left;
int v_budget, i;
- int vec;
+ int v_actual;
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
return -ENODEV;
@@ -6951,24 +7080,62 @@ static int i40e_init_msix(struct i40e_pf *pf)
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
- pf->num_vmdq_msix = pf->num_vmdq_qps;
- other_vecs = 1;
- other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
- other_vecs++;
-
- /* Scale down if necessary, and the rings will share vectors */
- pf->num_lan_msix = min_t(int, pf->num_lan_msix,
- (hw->func_caps.num_msix_vectors - other_vecs));
- v_budget = pf->num_lan_msix + other_vecs;
+ vectors_left = hw->func_caps.num_msix_vectors;
+ v_budget = 0;
+
+ /* reserve one vector for miscellaneous handler */
+ if (vectors_left) {
+ v_budget++;
+ vectors_left--;
+ }
+
+ /* reserve vectors for the main PF traffic queues */
+ pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+ vectors_left -= pf->num_lan_msix;
+ v_budget += pf->num_lan_msix;
+
+ /* reserve one vector for sideband flow director */
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (vectors_left) {
+ v_budget++;
+ vectors_left--;
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ }
+ }
#ifdef I40E_FCOE
+ /* can we reserve enough for FCoE? */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- pf->num_fcoe_msix = pf->num_fcoe_qps;
+ if (!vectors_left)
+ pf->num_fcoe_msix = 0;
+ else if (vectors_left >= pf->num_fcoe_qps)
+ pf->num_fcoe_msix = pf->num_fcoe_qps;
+ else
+ pf->num_fcoe_msix = 1;
v_budget += pf->num_fcoe_msix;
+ vectors_left -= pf->num_fcoe_msix;
}
+
#endif
+ /* any vectors left over go for VMDq support */
+ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+ int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
+ int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
+
+ /* if we're short on vectors for what's desired, we limit
+ * the queues per vmdq. If this is still more than are
+ * available, the user will need to change the number of
+ * queues/vectors used by the PF later with the ethtool
+ * channels command
+ */
+ if (vmdq_vecs < vmdq_vecs_wanted)
+ pf->num_vmdq_qps = 1;
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
+
+ v_budget += vmdq_vecs;
+ vectors_left -= vmdq_vecs;
+ }
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
@@ -6977,9 +7144,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
- vec = i40e_reserve_msix_vectors(pf, v_budget);
+ v_actual = i40e_reserve_msix_vectors(pf, v_budget);
- if (vec != v_budget) {
+ if (v_actual != v_budget) {
/* If we have limited resources, we will start with no vectors
* for the special features and then allocate vectors to some
* of these features based on the policy and at the end disable
@@ -6992,26 +7159,30 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 0;
}
- if (vec < I40E_MIN_MSIX) {
+ if (v_actual < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
pf->msix_entries = NULL;
return -ENODEV;
- } else if (vec == I40E_MIN_MSIX) {
+ } else if (v_actual == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
- } else if (vec != v_budget) {
+ } else if (v_actual != v_budget) {
+ int vec;
+
/* reserve the misc vector */
- vec--;
+ vec = v_actual - 1;
/* Scale vector usage down */
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1;
+ pf->num_vmdq_qps = 1;
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
/* partition out the remaining vectors */
switch (vec) {
@@ -7037,10 +7208,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
vec--;
}
#endif
- pf->num_lan_msix = min_t(int, (vec / 2),
- pf->num_lan_qps);
- pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
- I40E_DEFAULT_NUM_VMDQ_VSI);
+ /* give the rest to the PF */
+ pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
break;
}
}
@@ -7057,7 +7226,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
}
#endif
- return err;
+ return v_actual;
}
/**
@@ -7132,13 +7301,14 @@ err_out:
* i40e_init_interrupt_scheme - Determine proper interrupt scheme
* @pf: board private structure to initialize
**/
-static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
+static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
{
- int err = 0;
+ int vectors = 0;
+ ssize_t size;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- err = i40e_init_msix(pf);
- if (err) {
+ vectors = i40e_init_msix(pf);
+ if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
@@ -7158,18 +7328,32 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
- err = pci_enable_msi(pf->pdev);
- if (err) {
- dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
+ vectors = pci_enable_msi(pf->pdev);
+ if (vectors < 0) {
+ dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
+ vectors);
pf->flags &= ~I40E_FLAG_MSI_ENABLED;
}
+ vectors = 1; /* one MSI or Legacy vector */
}
if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
- /* track first vector for misc interrupts */
- err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+ /* set up vector assignment tracking */
+ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
+ pf->irq_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->irq_pile) {
+ dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+ return -ENOMEM;
+ }
+ pf->irq_pile->num_entries = vectors;
+ pf->irq_pile->search_hint = 0;
+
+ /* track first vector for misc interrupts, ignore return */
+ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
+
+ return 0;
}
/**
@@ -7219,6 +7403,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
static int i40e_config_rss(struct i40e_pf *pf)
{
u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
u32 lut = 0;
int i, j;
@@ -7236,15 +7421,14 @@ static int i40e_config_rss(struct i40e_pf *pf)
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
/* Check capability and Set table size and register per hw expectation*/
reg_val = rd32(hw, I40E_PFQF_CTL_0);
- if (hw->func_caps.rss_table_size == 512) {
+ if (pf->rss_table_size == 512)
reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
- pf->rss_table_size = 512;
- } else {
- pf->rss_table_size = 128;
+ else
reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
- }
wr32(hw, I40E_PFQF_CTL_0, reg_val);
/* Populate the LUT with max no. of queues in round robin fashion */
@@ -7257,7 +7441,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
* If LAN VSI is the only consumer for RSS then this requirement
* is not necessary.
*/
- if (j == pf->rss_size)
+ if (j == vsi->rss_size)
j = 0;
/* lut = 4-byte sliding window of 4 lut entries */
lut = (lut << 8) | (j &
@@ -7281,15 +7465,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ int new_rss_size;
+
if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
return 0;
- queue_count = min_t(int, queue_count, pf->rss_size_max);
+ new_rss_size = min_t(int, queue_count, pf->rss_size_max);
- if (queue_count != pf->rss_size) {
+ if (queue_count != vsi->num_queue_pairs) {
+ vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
- pf->rss_size = queue_count;
+ pf->rss_size = new_rss_size;
i40e_reset_and_rebuild(pf, true);
i40e_config_rss(pf);
@@ -7299,6 +7487,128 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
}
/**
+ * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
+{
+ i40e_status status;
+ bool min_valid, max_valid;
+ u32 max_bw, min_bw;
+
+ status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+ &min_valid, &max_valid);
+
+ if (!status) {
+ if (min_valid)
+ pf->npar_min_bw = min_bw;
+ if (max_valid)
+ pf->npar_max_bw = max_bw;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_set_npar_bw_setting - Set BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
+{
+ struct i40e_aqc_configure_partition_bw_data bw_data;
+ i40e_status status;
+
+ /* Set the valid bit for this PF */
+ bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+ bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
+
+ /* Set the new bandwidths */
+ status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
+{
+ /* Commit temporary BW setting to permanent NVM image */
+ enum i40e_admin_queue_err last_aq_status;
+ i40e_status ret;
+ u16 nvm_word;
+
+ if (pf->hw.partition_id != 1) {
+ dev_info(&pf->pdev->dev,
+ "Commit BW only works on partition 1! This is partition %d",
+ pf->hw.partition_id);
+ ret = I40E_NOT_SUPPORTED;
+ goto bw_commit_out;
+ }
+
+ /* Acquire NVM for read access */
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot acquire NVM for read access, err %d: aq_err %d\n",
+ ret, last_aq_status);
+ goto bw_commit_out;
+ }
+
+ /* Read word 0x10 of NVM - SW compatibility word 1 */
+ ret = i40e_aq_read_nvm(&pf->hw,
+ I40E_SR_NVM_CONTROL_WORD,
+ 0x10, sizeof(nvm_word), &nvm_word,
+ false, NULL);
+ /* Save off last admin queue command status before releasing
+ * the NVM
+ */
+ last_aq_status = pf->hw.aq.asq_last_status;
+ i40e_release_nvm(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
+ ret, last_aq_status);
+ goto bw_commit_out;
+ }
+
+ /* Wait a bit for NVM release to complete */
+ msleep(50);
+
+ /* Acquire NVM for write access */
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot acquire NVM for write access, err %d: aq_err %d\n",
+ ret, last_aq_status);
+ goto bw_commit_out;
+ }
+ /* Write it back out unchanged to initiate update NVM,
+ * which will force a write of the shadow (alt) RAM to
+ * the NVM - thus storing the bandwidth values permanently.
+ */
+ ret = i40e_aq_update_nvm(&pf->hw,
+ I40E_SR_NVM_CONTROL_WORD,
+ 0x10, sizeof(nvm_word),
+ &nvm_word, true, NULL);
+ /* Save off last admin queue command status before releasing
+ * the NVM
+ */
+ last_aq_status = pf->hw.aq.asq_last_status;
+ i40e_release_nvm(&pf->hw);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "BW settings NOT SAVED, err %d aq_err %d\n",
+ ret, last_aq_status);
+bw_commit_out:
+
+ return ret;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -7324,8 +7634,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RX_1BUF_ENABLED;
+ I40E_FLAG_MSIX_ENABLED;
+
+ if (iommu_present(&pci_bus_type))
+ pf->flags |= I40E_FLAG_RX_PS_ENABLED;
+ else
+ pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
@@ -7336,6 +7650,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
*/
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
pf->rss_size = 1;
+ pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
@@ -7347,6 +7662,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ if (i40e_get_npar_bw_setting(pf))
+ dev_warn(&pf->pdev->dev,
+ "Could not get NPAR bw settings\n");
+ else
+ dev_info(&pf->pdev->dev,
+ "Min BW = %8.8x, Max BW = %8.8x\n",
+ pf->npar_min_bw, pf->npar_max_bw);
}
/* FW/NVM is not yet fixed in this regard */
@@ -7354,11 +7676,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- /* Setup a counter for fd_atr per pf */
+ /* Setup a counter for fd_atr per PF */
pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- /* Setup a counter for fd_sb per pf */
+ /* Setup a counter for fd_sb per PF */
pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
@@ -7406,22 +7728,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
pf->qp_pile->search_hint = 0;
- /* set up vector assignment tracking */
- size = sizeof(struct i40e_lump_tracking)
- + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
- pf->irq_pile = kzalloc(size, GFP_KERNEL);
- if (!pf->irq_pile) {
- kfree(pf->qp_pile);
- err = -ENOMEM;
- goto sw_init_done;
- }
- pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
- pf->irq_pile->search_hint = 0;
-
pf->tx_timeout_recovery_level = 1;
mutex_init(&pf->switch_mutex);
+ /* If NPAR is enabled nudge the Tx scheduler */
+ if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
+ i40e_set_npar_bw_setting(pf);
+
sw_init_done:
return err;
}
@@ -7534,7 +7848,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+ netdev_info(netdev, "vxlan port %d already offloaded\n",
+ ntohs(port));
return;
}
@@ -7542,7 +7857,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
next_idx = i40e_get_vxlan_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+ netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
ntohs(port));
return;
}
@@ -7550,8 +7865,9 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */
pf->vxlan_ports[next_idx] = port;
pf->pending_vxlan_bitmap |= (1 << next_idx);
-
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
}
/**
@@ -7579,12 +7895,13 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
* and make it pending
*/
pf->vxlan_ports[idx] = 0;
-
pf->pending_vxlan_bitmap |= (1 << idx);
-
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
+ ntohs(port));
} else {
- netdev_warn(netdev, "Port %d was not found, not deleting\n",
+ netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
ntohs(port));
}
}
@@ -7653,6 +7970,119 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
+#ifdef HAVE_BRIDGE_ATTRIBS
+/**
+ * i40e_ndo_bridge_setlink - Set the hardware bridge mode
+ * @dev: the netdev being configured
+ * @nlh: RTNL message
+ *
+ * Inserts a new hardware bridge if not already created and
+ * enables the bridging mode requested (VEB or VEPA). If the
+ * hardware bridge has already been inserted and the request
+ * is to change the mode then that requires a PF reset to
+ * allow rebuild of the components with required hardware
+ * bridge mode enabled.
+ **/
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = NULL;
+ struct nlattr *attr, *br_spec;
+ int i, rem;
+
+ /* Only for PF VSI for now */
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ return -EOPNOTSUPP;
+
+ /* Find the HW bridge for PF VSI */
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if ((mode != BRIDGE_MODE_VEPA) &&
+ (mode != BRIDGE_MODE_VEB))
+ return -EINVAL;
+
+ /* Insert a new HW bridge */
+ if (!veb) {
+ veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+ if (veb) {
+ veb->bridge_mode = mode;
+ i40e_config_bridge_mode(veb);
+ } else {
+ /* No Bridge HW offload available */
+ return -ENOENT;
+ }
+ break;
+ } else if (mode != veb->bridge_mode) {
+ /* Existing HW bridge but different mode needs reset */
+ veb->bridge_mode = mode;
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_ndo_bridge_getlink - Get the hardware bridge mode
+ * @skb: skb buff
+ * @pid: process id
+ * @seq: RTNL message seq #
+ * @dev: the netdev being configured
+ * @filter_mask: unused
+ *
+ * Return the mode in which the hardware bridge is operating in
+ * i.e VEB or VEPA.
+ **/
+#ifdef HAVE_BRIDGE_FILTER
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 __always_unused filter_mask, int nlflags)
+#else
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, int nlflags)
+#endif /* HAVE_BRIDGE_FILTER */
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = NULL;
+ int i;
+
+ /* Only for PF VSI for now */
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ return -EOPNOTSUPP;
+
+ /* Find the HW bridge for the PF VSI */
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+
+ if (!veb)
+ return 0;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+ nlflags);
+}
+#endif /* HAVE_BRIDGE_ATTRIBS */
+
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -7687,6 +8117,10 @@ static const struct net_device_ops i40e_netdev_ops = {
#endif
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
+#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
+ .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
+#endif /* HAVE_BRIDGE_ATTRIBS */
};
/**
@@ -7799,6 +8233,30 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
}
/**
+ * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
+ * @vsi: the VSI being queried
+ *
+ * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
+ **/
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
+{
+ struct i40e_veb *veb;
+ struct i40e_pf *pf = vsi->back;
+
+ /* Uplink is not a bridge so default to VEB */
+ if (vsi->veb_idx == I40E_NO_VEB)
+ return 1;
+
+ veb = pf->veb[vsi->veb_idx];
+ /* Uplink is a bridge in VEPA mode */
+ if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
+ return 0;
+
+ /* Uplink is a bridge in VEB mode */
+ return 1;
+}
+
+/**
* i40e_add_vsi - Add a VSI to the switch
* @vsi: the VSI being configured
*
@@ -7830,11 +8288,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get pf vsi config, err %d, aq_err %d\n",
+ "couldn't get PF vsi config, err %d, aq_err %d\n",
ret, pf->hw.aq.asq_last_status);
return -ENOENT;
}
- memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info = ctxt.info;
vsi->info.valid_sections = 0;
vsi->seid = ctxt.seid;
@@ -7883,12 +8341,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
- ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- ctxt.info.valid_sections |=
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- ctxt.info.switch_id =
+ ctxt.info.switch_id =
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -7896,16 +8356,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
- ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
- ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-
/* This VSI is connected to VEB so the switch_id
* should be set to zero by default.
*/
- ctxt.info.switch_id = 0;
- ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
/* Setup the VSI tx/rx queue map for TC0 only for now */
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
@@ -7915,15 +8377,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
ctxt.uplink_seid = vsi->uplink_seid;
- ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_VF;
- ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-
/* This VSI is connected to VEB so the switch_id
* should be set to zero by default.
*/
- ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
@@ -7961,7 +8426,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = -ENOENT;
goto err;
}
- memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info = ctxt.info;
vsi->info.valid_sections = 0;
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
@@ -8281,7 +8746,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
__func__);
return NULL;
}
- i40e_enable_pf_switch_lb(pf);
+ i40e_config_bridge_mode(veb);
}
for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
@@ -8724,7 +9189,7 @@ err_alloc:
}
/**
- * i40e_setup_pf_switch_element - set pf vars based on switch type
+ * i40e_setup_pf_switch_element - set PF vars based on switch type
* @pf: board private structure
* @ele: element we are building info from
* @num_reported: total number of elements
@@ -8930,15 +9395,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_config_rss(pf);
/* fill in link information and enable LSE reporting */
- i40e_update_link_info(&pf->hw, true);
- i40e_link_event(pf);
-
- /* Initialize user-specific link properties */
- pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
- I40E_AQ_AN_COMPLETED) ? true : false);
-
- /* fill in link information and enable LSE reporting */
- i40e_update_link_info(&pf->hw, true);
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
/* Initialize user-specific link properties */
@@ -9008,7 +9465,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
- pf->num_lan_qps = pf->rss_size_max;
+ pf->num_lan_qps = max_t(int, pf->rss_size_max,
+ num_online_cpus());
+ pf->num_lan_qps = min_t(int, pf->num_lan_qps,
+ pf->hw.func_caps.num_tx_qp);
+
queues_left -= pf->num_lan_qps;
}
@@ -9061,7 +9522,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
* i40e_setup_pf_filter_control - Setup PF static filter control
* @pf: PF to be setup
*
- * i40e_setup_pf_filter_control sets up a pf's initial filter control
+ * i40e_setup_pf_filter_control sets up a PF's initial filter control
* settings. If PE/FCoE are enabled then it will also set the per PF
* based filter sizes required for them. It also enables Flow director,
* ethertype and macvlan type filter settings for the pf.
@@ -9106,8 +9567,10 @@ static void i40e_print_features(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV
buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
#endif
- buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs);
+ buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
+ pf->hw.func_caps.num_vsis,
+ pf->vsi[pf->lan_vsi]->num_queue_pairs,
+ pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
if (pf->flags & I40E_FLAG_RSS_ENABLED)
buf += sprintf(buf, "RSS ");
@@ -9136,14 +9599,16 @@ static void i40e_print_features(struct i40e_pf *pf)
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
*
- * i40e_probe initializes a pf identified by a pci_dev structure.
- * The OS initialization, configuring of the pf private structure,
+ * i40e_probe initializes a PF identified by a pci_dev structure.
+ * The OS initialization, configuring of the PF private structure,
* and a hardware reset occur.
*
* Returns 0 on success, negative on failure
**/
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ unsigned long ioremap_len;
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
@@ -9195,8 +9660,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &pf->hw;
hw->back = pf;
- hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+
+ ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
+ I40E_MAX_CSR_SPACE);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
if (!hw->hw_addr) {
err = -EIO;
dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
@@ -9274,7 +9742,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
-
i40e_verify_eeprom(pf);
/* Rev 0 hardware was never productized */
@@ -9367,7 +9834,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up the main switch operations */
i40e_determine_queue_usage(pf);
- i40e_init_interrupt_scheme(pf);
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ goto err_switch_setup;
/* The number of VSIs reported by the FW is the minimum guaranteed
* to us; HW supports far more and we share the remaining pool with
@@ -9409,13 +9878,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
- msleep(75);
- err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
- if (err) {
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4)) {
+ msleep(75);
+ err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
}
-
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
@@ -9499,6 +9969,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
}
+ /* get the requested speeds from the fw */
+ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
+ err);
+ pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+
/* print a string summarizing features */
i40e_print_features(pf);
@@ -9517,7 +9994,6 @@ err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
err_init_lan_hmc:
kfree(pf->qp_pile);
- kfree(pf->irq_pile);
err_sw_init:
err_adminq_setup:
(void)i40e_shutdown_adminq(hw);
@@ -9617,7 +10093,6 @@ static void i40e_remove(struct pci_dev *pdev)
}
kfree(pf->qp_pile);
- kfree(pf->irq_pile);
kfree(pf->vsi);
iounmap(pf->hw.hw_addr);
@@ -9760,6 +10235,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
+
rtnl_lock();
i40e_prep_for_reset(pf);
rtnl_unlock();
@@ -9844,6 +10321,7 @@ static int __init i40e_init_module(void)
pr_info("%s: %s - version %s\n", i40e_driver_name,
i40e_driver_string, i40e_driver_version_str);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+
i40e_dbg_init();
return pci_register_driver(&i40e_driver);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 5defe0d63514..554e49d02683 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -164,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_word - Reads Shadow RAM
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
@@ -212,7 +212,21 @@ read_nvm_exit:
}
/**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ return i40e_read_nvm_word_srctl(hw, offset, data);
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
@@ -222,8 +236,8 @@ read_nvm_exit:
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
i40e_status ret_code = 0;
u16 index, word;
@@ -231,7 +245,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
/* Loop thru the selected region */
for (word = 0; word < *words; word++) {
index = offset + word;
- ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
if (ret_code)
break;
}
@@ -243,6 +257,23 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
+/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
@@ -302,11 +333,18 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
i40e_status ret_code = 0;
+ struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
- u16 word = 0;
- u32 i = 0;
+ u16 *data;
+ u16 i = 0;
+
+ ret_code = i40e_allocate_virt_mem(hw, &vmem,
+ I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+ if (ret_code)
+ goto i40e_calc_nvm_checksum_exit;
+ data = (u16 *)vmem.va;
/* read pointer to VPD area */
ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
@@ -317,7 +355,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
/* read pointer to PCIe Alt Auto-load module */
ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
- &pcie_alt_module);
+ &pcie_alt_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
@@ -327,33 +365,40 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
* except the VPD and PCIe ALT Auto-load modules
*/
for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Read SR page */
+ if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+ ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ }
+
/* Skip Checksum word */
if (i == I40E_SR_SW_CHECKSUM_WORD)
- i++;
+ continue;
/* Skip VPD module (convert byte size to word count) */
- if (i == (u32)vpd_module) {
- i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
- if (i >= hw->nvm.sr_size)
- break;
+ if ((i >= (u32)vpd_module) &&
+ (i < ((u32)vpd_module +
+ (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+ continue;
}
/* Skip PCIe ALT module (convert byte size to word count) */
- if (i == (u32)pcie_alt_module) {
- i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
- if (i >= hw->nvm.sr_size)
- break;
+ if ((i >= (u32)pcie_alt_module) &&
+ (i < ((u32)pcie_alt_module +
+ (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+ continue;
}
- ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
- if (ret_code) {
- ret_code = I40E_ERR_NVM_CHECKSUM;
- goto i40e_calc_nvm_checksum_exit;
- }
- checksum_local += word;
+ checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
}
*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
i40e_calc_nvm_checksum_exit:
+ i40e_free_virt_mem(hw, &vmem);
return ret_code;
}
@@ -776,13 +821,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
int *errno)
{
enum i40e_nvmupd_cmd upd_cmd;
- u8 transaction, module;
+ u8 transaction;
/* anything that doesn't match a recognized case is an error */
upd_cmd = I40E_NVMUPD_INVALID;
transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
/* limits on data size */
if ((cmd->data_size < 1) ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 68e852a96680..7b34f1e660ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
@@ -97,7 +98,6 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse);
i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
u64 advt_reg,
struct i40e_asq_cmd_details *cmd_details);
@@ -247,6 +247,12 @@ void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid,
+ bool *max_valid);
+i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
@@ -260,8 +266,6 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
-i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
- u16 *data);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
@@ -299,4 +303,9 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index fabcfa1b45b2..a92b7725dec3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -57,7 +57,7 @@
* timespec. However, since the registers are 64 bits of nanoseconds, we must
* convert the result to a timespec before we can return.
**/
-static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts)
{
struct i40e_hw *hw = &pf->hw;
u32 hi, lo;
@@ -69,7 +69,7 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
ns = (((u64)hi) << 32) | lo;
- *ts = ns_to_timespec(ns);
+ *ts = ns_to_timespec64(ns);
}
/**
@@ -81,10 +81,10 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
* we receive a timespec from the stack, we must convert that timespec into
* nanoseconds before programming the registers.
**/
-static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts)
{
struct i40e_hw *hw = &pf->hw;
- u64 ns = timespec_to_ns(ts);
+ u64 ns = timespec64_to_ns(ts);
/* The timer will not update until the high register is written, so
* write the low register first.
@@ -159,14 +159,14 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec now, then = ns_to_timespec(delta);
+ struct timespec64 now, then = ns_to_timespec64(delta);
unsigned long flags;
spin_lock_irqsave(&pf->tmreg_lock, flags);
i40e_ptp_read(pf, &now);
- now = timespec_add(now, then);
- i40e_ptp_write(pf, (const struct timespec *)&now);
+ now = timespec64_add(now, then);
+ i40e_ptp_write(pf, (const struct timespec64 *)&now);
spin_unlock_irqrestore(&pf->tmreg_lock, flags);
@@ -181,7 +181,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
* Read the device clock and return the correct value on ns, after converting it
* into a timespec struct.
**/
-static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
@@ -202,7 +202,7 @@ static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
* to ns happens in the write function.
**/
static int i40e_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
@@ -613,8 +613,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
pf->ptp_caps.pps = 0;
pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
- pf->ptp_caps.gettime = i40e_ptp_gettime;
- pf->ptp_caps.settime = i40e_ptp_settime;
+ pf->ptp_caps.gettime64 = i40e_ptp_gettime;
+ pf->ptp_caps.settime64 = i40e_ptp_settime;
pf->ptp_caps.enable = i40e_ptp_feature_enable;
/* Attempt to register the clock before enabling the hardware. */
@@ -673,7 +673,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
} else {
- struct timespec ts;
+ struct timespec64 ts;
u32 regval;
dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
@@ -695,7 +695,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
/* Set the clock value. */
- ts = ktime_to_timespec(ktime_get_real());
+ ts = ktime_to_timespec64(ktime_get_real());
i40e_ptp_settime(&pf->ptp_caps, &ts);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 65d3c8bb2d5b..522d6df51330 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -310,6 +310,10 @@
#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
@@ -421,6 +425,8 @@
#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
@@ -484,7 +490,9 @@
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
@@ -548,9 +556,6 @@
#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
@@ -1066,7 +1071,7 @@
#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
@@ -1171,7 +1176,7 @@
#define I40E_VFINT_ITRN_MAX_INDEX 2
#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
@@ -1803,9 +1808,6 @@
#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
@@ -1902,6 +1904,11 @@
#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
@@ -2374,20 +2381,20 @@
#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
@@ -2620,10 +2627,6 @@
#define I40E_GLPRT_TDOLD_MAX_INDEX 3
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDPC_MAX_INDEX 3
-#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_UPRCH_MAX_INDEX 3
#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
@@ -2990,9 +2993,6 @@
#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
@@ -3258,7 +3258,7 @@
#define I40E_VFINT_ITRN1_MAX_INDEX 2
#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index bbf1b1247ac4..4bd3a80aba82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -25,6 +25,7 @@
******************************************************************************/
#include <linux/prefetch.h>
+#include <net/busy_poll.h>
#include "i40e.h"
#include "i40e_prototype.h"
@@ -44,7 +45,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
* i40e_program_fdir_filter - Program a Flow Director filter
* @fdir_data: Packet data that will be filter parameters
* @raw_packet: the pre-allocated packet buffer for FDir
- * @pf: The pf pointer
+ * @pf: The PF pointer
* @add: True for add/update, False for remove
**/
int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
@@ -227,7 +228,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
err = true;
- } else {
+ } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
"Filter OK for PCTYPE %d loc = %d\n",
@@ -302,7 +303,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
err = true;
- } else {
+ } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
fd_data->pctype, fd_data->fd_id);
@@ -375,7 +376,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
err = true;
- } else {
+ } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
"Filter OK for PCTYPE %d loc = %d\n",
@@ -470,12 +471,27 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
rx_desc->wb.qword0.hi_dword.fd_id);
+ /* Check if the programming error is for ATR.
+ * If so, auto disable ATR and set a state for
+ * flush in progress. Next time we come here if flush is in
+ * progress do nothing, once flush is complete the state will
+ * be cleared.
+ */
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ return;
+
pf->fd_add_err++;
/* store the current atr filter count */
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
+ if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
+ (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ }
+
/* filter programming failed most likely due to table full */
- fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+ fcnt_prog = i40e_get_global_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count;
/* If ATR is running fcnt_prog can quickly change,
* if we are very close to full, it makes sense to disable
@@ -754,6 +770,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_desc = I40E_TX_DESC(tx_ring, 0);
}
+ prefetch(tx_desc);
+
/* update budget accounting */
budget--;
} while (likely(budget));
@@ -841,6 +859,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
/* allow 00 to be written to the index */
@@ -1031,6 +1050,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
+ if (ring_is_ps_enabled(rx_ring)) {
+ int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
+
+ rx_bi = &rx_ring->rx_bi[0];
+ if (rx_bi->hdr_buf) {
+ dma_free_coherent(dev,
+ bufsz,
+ rx_bi->hdr_buf,
+ rx_bi->dma);
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ rx_bi->dma = 0;
+ rx_bi->hdr_buf = NULL;
+ }
+ }
+ }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
rx_bi = &rx_ring->rx_bi[i];
@@ -1089,6 +1124,37 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
+ * i40e_alloc_rx_headers - allocate rx header buffers
+ * @rx_ring: ring to alloc buffers
+ *
+ * Allocate rx header buffers for the entire ring. As these are static,
+ * this is only called when setting up a new ring.
+ **/
+void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ dma_addr_t dma;
+ void *buffer;
+ int buf_size;
+ int i;
+
+ if (rx_ring->rx_bi[0].hdr_buf)
+ return;
+ /* Make sure the buffers don't cross cache line boundaries. */
+ buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
+ buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
+ &dma, GFP_KERNEL);
+ if (!buffer)
+ return;
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ rx_bi->dma = dma + (i * buf_size);
+ rx_bi->hdr_buf = buffer + (i * buf_size);
+ }
+}
+
+/**
* i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -1148,11 +1214,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
+ * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+
+ if (bi->skb) /* desc is in use */
+ goto no_buffers;
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ dma_sync_single_range_for_device(rx_ring->dev,
+ bi->dma,
+ 0,
+ rx_ring->rx_hdr_len,
+ DMA_FROM_DEVICE);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
**/
-void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
{
u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
@@ -1192,40 +1323,8 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
}
}
- if (ring_is_ps_enabled(rx_ring)) {
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- }
-
- if (!bi->page_dma) {
- /* use a half page if we're re-using */
- bi->page_offset ^= PAGE_SIZE / 2;
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev,
- bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- bi->page_dma = 0;
- goto no_buffers;
- }
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- } else {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- rx_desc->read.hdr_addr = 0;
- }
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
i++;
if (i == rx_ring->count)
i = 0;
@@ -1279,10 +1378,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct iphdr *iph;
__sum16 csum;
- ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+ ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
skb->ip_summed = CHECKSUM_NONE;
@@ -1410,13 +1509,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
}
/**
- * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
@@ -1432,25 +1531,54 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (budget <= 0)
return 0;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
-
- while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
- union i40e_rx_desc *next_rxd;
+ do {
struct i40e_rx_buffer *rx_bi;
struct sk_buff *skb;
u16 vlan_tag;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ i = rx_ring->next_to_clean;
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * DD bit is set.
+ */
+ dma_rmb();
if (i40e_rx_is_programming_status(qword)) {
i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
- goto next_desc;
+ I40E_RX_INCREMENT(rx_ring, i);
+ continue;
}
rx_bi = &rx_ring->rx_bi[i];
skb = rx_bi->skb;
- prefetch(skb->data);
+ if (likely(!skb)) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_hdr_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ break;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ 0,
+ rx_ring->rx_hdr_len,
+ DMA_FROM_DEVICE);
+ }
rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
@@ -1465,40 +1593,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
+ prefetch(rx_bi->page);
rx_bi->skb = NULL;
-
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * STATUS_DD bit is set
- */
- rmb();
-
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- if (rx_bi->dma) {
- u16 len;
-
+ cleaned_count++;
+ if (rx_hbo || rx_sph) {
+ int len;
if (rx_hbo)
len = I40E_RX_HDR_SIZE;
- else if (rx_sph)
- len = rx_header_len;
- else if (rx_packet_len)
- len = rx_packet_len; /* 1buf/no split found */
else
- len = rx_header_len; /* split always mode */
-
- skb_put(skb, len);
- dma_unmap_single(rx_ring->dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
+ len = rx_header_len;
+ memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
+ } else if (skb->len == 0) {
+ int len;
+
+ len = (rx_packet_len > skb_headlen(skb) ?
+ skb_headlen(skb) : rx_packet_len);
+ memcpy(__skb_put(skb, len),
+ rx_bi->page + rx_bi->page_offset,
+ len);
+ rx_bi->page_offset += len;
+ rx_packet_len -= len;
}
/* Get the rest of the data if this was a header split */
- if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
-
+ if (rx_packet_len) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_bi->page,
rx_bi->page_offset,
@@ -1520,22 +1638,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
DMA_FROM_DEVICE);
rx_bi->page_dma = 0;
}
- I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+ I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i];
-
- if (ring_is_ps_enabled(rx_ring)) {
- rx_bi->skb = next_buffer->skb;
- rx_bi->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- }
+ next_buffer->skb = skb;
rx_ring->rx_stats.non_eop_descs++;
- goto next_desc;
+ continue;
}
/* ERR_MASK will only have valid bits if EOP set */
@@ -1544,7 +1656,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* TODO: shouldn't we increment a counter indicating the
* drop?
*/
- goto next_desc;
+ continue;
}
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
@@ -1570,33 +1682,149 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
#ifdef I40E_FCOE
if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
dev_kfree_skb_any(skb);
- goto next_desc;
+ continue;
}
#endif
+ skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
i40e_receive_skb(rx_ring, skb, vlan_tag);
rx_ring->netdev->last_rx = jiffies;
- budget--;
-next_desc:
rx_desc->wb.qword1.status_error_len = 0;
- if (!budget)
- break;
- cleaned_count++;
+ } while (likely(total_rx_packets < budget));
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ return total_rx_packets;
+}
+
+/**
+ * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns number of packets cleaned
+ **/
+static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u16 rx_packet_len;
+ u8 rx_ptype;
+ u64 qword;
+ u16 i;
+
+ do {
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+ i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
cleaned_count = 0;
}
- /* use prefetched values */
- rx_desc = next_rxd;
+ i = rx_ring->next_to_clean;
+ rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
- }
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * DD bit is set.
+ */
+ dma_rmb();
+
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring, rx_desc);
+ I40E_RX_INCREMENT(rx_ring, i);
+ continue;
+ }
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_bi->skb = NULL;
+ cleaned_count++;
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ skb_put(skb, rx_packet_len);
+ dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+
+ I40E_RX_INCREMENT(rx_ring, i);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ rx_ring->rx_stats.non_eop_descs++;
+ continue;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
+ continue;
+ }
+
+ skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+ i40e_ptype_to_hash(rx_ptype));
+ if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+ i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+ I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+#ifdef I40E_FCOE
+ if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+#endif
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ rx_desc->wb.qword1.status_error_len = 0;
+ } while (likely(total_rx_packets < budget));
- rx_ring->next_to_clean = i;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1604,10 +1832,7 @@ next_desc:
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
- if (cleaned_count)
- i40e_alloc_rx_buffers(rx_ring, cleaned_count);
-
- return budget > 0;
+ return total_rx_packets;
}
/**
@@ -1628,6 +1853,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
+ int cleaned;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
@@ -1647,8 +1873,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
- i40e_for_each_ring(ring, q_vector->rx)
- clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+ i40e_for_each_ring(ring, q_vector->rx) {
+ if (ring_is_ps_enabled(ring))
+ cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
+ else
+ cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ /* if we didn't clean as many as budgeted, we must be done */
+ clean_complete &= (budget_per_ring != cleaned);
+ }
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
@@ -1715,6 +1947,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
+ if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ return;
+
/* if sampling is disabled do nothing */
if (!tx_ring->atr_sample_rate)
return;
@@ -1822,6 +2057,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
+ if (protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off by the user the
+ * stack sets the protocol to 8021q so that the driver
+ * can take any steps required to support the SW only
+ * VLAN handling. In our case the driver doesn't need
+ * to take any further steps so just set the protocol
+ * to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ goto out;
+ }
+
/* if we have a HW VLAN tag being added, default to the HW one */
if (skb_vlan_tag_present(skb)) {
tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
@@ -1838,6 +2086,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= I40E_TX_FLAGS_SW_VLAN;
}
+ if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+ goto out;
+
/* Insert 802.1p priority into VLAN header */
if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
(skb->priority != TC_PRIO_CONTROL)) {
@@ -1858,6 +2109,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
}
}
+
+out:
*flags = tx_flags;
return 0;
}
@@ -1982,8 +2235,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
+ u32 l4_tunnel = 0;
if (skb->encapsulation) {
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ break;
+ default:
+ return;
+ }
network_hdr_len = skb_inner_network_header_len(skb);
this_ip_hdr = inner_ip_hdr(skb);
this_ipv6_hdr = inner_ipv6_hdr(skb);
@@ -2006,8 +2267,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the ctx descriptor fields */
*cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- I40E_TXD_CTX_UDP_TUNNELING |
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ l4_tunnel |
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index dff0baeb1ecc..4b0b8102cdc3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -96,6 +96,14 @@ enum i40e_dyn_idx_t {
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_INCREMENT(r, i) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ r->next_to_clean = i; \
+ } while (0)
+
#define I40E_RX_NEXT_DESC(r, i, n) \
do { \
(i)++; \
@@ -152,6 +160,7 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
+ void *hdr_buf;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
@@ -224,8 +233,8 @@ struct i40e_ring {
u16 rx_buf_len;
u8 dtype;
#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
-#define I40E_RX_DTYPE_HEADER_SPLIT 2
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
u8 hsplit;
#define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2
@@ -281,7 +290,9 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index e9901ef06a63..568e855da0f3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -44,6 +44,7 @@
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
@@ -175,12 +176,12 @@ struct i40e_link_status {
u8 an_info;
u8 ext_info;
u8 loopback;
- bool an_enabled;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
u16 max_frame_size;
bool crc_enable;
u8 pacing;
+ u8 requested_speeds;
};
struct i40e_phy_info {
@@ -241,6 +242,7 @@ struct i40e_hw_capabilities {
u8 rx_buf_chain_len;
u32 enabled_tcmap;
u32 maxtc;
+ u64 wr_csr_prot;
};
struct i40e_mac_info {
@@ -1143,7 +1145,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
-#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
@@ -1401,6 +1403,19 @@ struct i40e_lldp_variables {
u16 crc8;
};
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK 0xFF
+#define I40E_ALT_BW_RELATIVE_MASK 0x40000000
+#define I40E_ALT_BW_VALID_MASK 0x80000000
+
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 61dd1b187624..2d20af290fbf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -59,31 +59,29 @@
* of the virtchnl_msg structure.
*/
enum i40e_virtchnl_ops {
-/* VF sends req. to pf for the following
- * ops.
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
*/
I40E_VIRTCHNL_OP_UNKNOWN = 0,
I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
- I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_VIRTCHNL_OP_DEL_VLAN,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
- I40E_VIRTCHNL_OP_GET_STATS,
- I40E_VIRTCHNL_OP_FCOE,
- I40E_VIRTCHNL_OP_CONFIG_RSS,
-/* PF sends status change events to vfs using
- * the following op.
- */
- I40E_VIRTCHNL_OP_EVENT,
+ I40E_VIRTCHNL_OP_RESET_VF = 2,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+ I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+ I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ I40E_VIRTCHNL_OP_GET_STATS = 15,
+ I40E_VIRTCHNL_OP_FCOE = 16,
+ I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 40f042af4131..78d1c4ff565e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,12 +26,135 @@
#include "i40e.h"
+/*********************notification routines***********************/
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the PF structure
+ * @opcode: operation code
+ * @retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg,
+ u16 msglen)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ continue;
+
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ }
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ **/
+static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+ if (vf->link_forced) {
+ pfe.event_data.link_event.link_status = vf->link_up;
+ pfe.event_data.link_event.link_speed =
+ (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ } else {
+ pfe.event_data.link_event.link_status =
+ ls->link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed = ls->link_speed;
+ }
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the PF structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ i40e_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0,
+ (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the VF structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+ int abs_vf_id;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return;
+
+ /* verify if the VF is in either init or active before proceeding */
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ return;
+
+ abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe,
+ sizeof(struct i40e_virtchnl_pf_event), NULL);
+}
/***********************misc routines*****************************/
/**
* i40e_vc_disable_vf
- * @pf: pointer to the pf info
- * @vf: pointer to the vf info
+ * @pf: pointer to the PF info
+ * @vf: pointer to the VF info
*
* Disable the VF through a SW reset
**/
@@ -48,38 +171,40 @@ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
/**
* i40e_vc_isvalid_vsi_id
- * @vf: pointer to the vf info
- * @vsi_id: vf relative vsi id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
*
- * check for the valid vsi id
+ * check for the valid VSI id
**/
-static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
- return pf->vsi[vsi_id]->vf_id == vf->vf_id;
+ return (vsi && (vsi->vf_id == vf->vf_id));
}
/**
* i40e_vc_isvalid_queue_id
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @vsi_id: vsi id
* @qid: vsi relative queue id
*
* check for the valid queue id
**/
-static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
u8 qid)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
- return qid < pf->vsi[vsi_id]->alloc_queue_pairs;
+ return (vsi && (qid < vsi->alloc_queue_pairs));
}
/**
* i40e_vc_isvalid_vector_id
- * @vf: pointer to the vf info
- * @vector_id: vf relative vector id
+ * @vf: pointer to the VF info
+ * @vector_id: VF relative vector id
*
* check for the valid vector id
**/
@@ -94,19 +219,22 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
/**
* i40e_vc_get_pf_queue_id
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
* @vsi_queue_id: vsi relative queue id
*
- * return pf relative queue id
+ * return PF relative queue id
**/
-static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
u8 vsi_queue_id)
{
struct i40e_pf *pf = vf->pf;
- struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+ if (!vsi)
+ return pf_queue_id;
+
if (le16_to_cpu(vsi->info.mapping_flags) &
I40E_AQ_VSI_QUE_MAP_NONCONTIG)
pf_queue_id =
@@ -120,13 +248,13 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
/**
* i40e_config_irq_link_list
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as given by the FW
* @vecmap: irq map info
*
* configure irq link list from the map
**/
-static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
struct i40e_virtchnl_vector_map *vecmap)
{
unsigned long linklistmap = 0, tempmap;
@@ -171,7 +299,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
I40E_VIRTCHNL_SUPPORTED_QTYPES));
vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
wr32(hw, reg_idx, reg);
@@ -198,7 +326,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
(I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
vsi_queue_id);
} else {
pf_queue_id = I40E_QUEUE_END_OF_LIST;
@@ -220,25 +348,27 @@ irq_list_done:
/**
* i40e_config_vsi_tx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
* @vsi_queue_id: vsi relative queue index
* @info: config. info
*
* configure tx queue
**/
-static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id,
struct i40e_virtchnl_txq_info *info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_hmc_obj_txq tx_ctx;
+ struct i40e_vsi *vsi;
u16 pf_queue_id;
u32 qtx_ctl;
int ret = 0;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+ vsi = i40e_find_vsi_from_id(pf, vsi_id);
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -246,7 +376,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
/* only set the required fields */
tx_ctx.base = info->dma_ring_addr / 128;
tx_ctx.qlen = info->ring_len;
- tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
+ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
tx_ctx.head_wb_ena = info->headwb_enabled;
tx_ctx.head_wb_addr = info->dma_headwb_addr;
@@ -287,14 +417,14 @@ error_context:
/**
* i40e_config_vsi_rx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
* @vsi_queue_id: vsi relative queue index
* @info: config. info
*
* configure rx queue
**/
-static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id,
struct i40e_virtchnl_rxq_info *info)
{
@@ -304,7 +434,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
u16 pf_queue_id;
int ret = 0;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
@@ -378,10 +508,10 @@ error_param:
/**
* i40e_alloc_vsi_res
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @type: type of VSI to allocate
*
- * alloc vf vsi context & resources
+ * alloc VF vsi context & resources
**/
static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
{
@@ -394,18 +524,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (!vsi) {
dev_err(&pf->pdev->dev,
- "add vsi failed for vf %d, aq_err %d\n",
+ "add vsi failed for VF %d, aq_err %d\n",
vf->vf_id, pf->hw.aq.asq_last_status);
ret = -ENOENT;
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- vf->lan_vsi_index = vsi->idx;
+ vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
- dev_info(&pf->pdev->dev,
- "VF %d assigned LAN VSI index %d, VSI id %d\n",
- vf->vf_id, vsi->idx, vsi->id);
/* If the port VLAN has been configured and then the
* VF driver was removed then the VSI port VLAN
* configuration was destroyed. Check if there is
@@ -446,9 +573,9 @@ error_alloc_vsi_res:
/**
* i40e_enable_vf_mappings
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * enable vf mappings
+ * enable VF mappings
**/
static void i40e_enable_vf_mappings(struct i40e_vf *vf)
{
@@ -469,8 +596,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
/* map PF queues to VF queues */
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) {
- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+ for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
total_queue_pairs++;
@@ -478,13 +605,13 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
/* map PF queues to VSI */
for (j = 0; j < 7; j++) {
- if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) {
+ if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
reg = 0x07FF07FF; /* unused */
} else {
- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
j * 2);
reg = qid;
- qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
(j * 2) + 1);
reg |= qid << 16;
}
@@ -496,9 +623,9 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
/**
* i40e_disable_vf_mappings
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * disable vf mappings
+ * disable VF mappings
**/
static void i40e_disable_vf_mappings(struct i40e_vf *vf)
{
@@ -516,9 +643,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)
/**
* i40e_free_vf_res
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * free vf resources
+ * free VF resources
**/
static void i40e_free_vf_res(struct i40e_vf *vf)
{
@@ -528,9 +655,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
int i, msix_vf;
/* free vsi & disconnect it from the parent uplink */
- if (vf->lan_vsi_index) {
- i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
- vf->lan_vsi_index = 0;
+ if (vf->lan_vsi_idx) {
+ i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+ vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0;
}
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -571,9 +698,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
/**
* i40e_alloc_vf_res
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * allocate vf resources
+ * allocate VF resources
**/
static int i40e_alloc_vf_res(struct i40e_vf *vf)
{
@@ -585,15 +712,15 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
if (ret)
goto error_alloc;
- total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
+ total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
/* store the total qps number for the runtime
- * vf req validation
+ * VF req validation
*/
vf->num_queue_pairs = total_queue_pairs;
- /* vf is now completely initialized */
+ /* VF is now completely initialized */
set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
error_alloc:
@@ -607,7 +734,7 @@ error_alloc:
#define VF_TRANS_PENDING_MASK 0x20
/**
* i40e_quiesce_vf_pci
- * @vf: pointer to the vf structure
+ * @vf: pointer to the VF structure
*
* Wait for VF PCI transactions to be cleared after reset. Returns -EIO
* if the transactions never clear.
@@ -634,10 +761,10 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
/**
* i40e_reset_vf
- * @vf: pointer to the vf structure
+ * @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * reset the vf
+ * reset the VF
**/
void i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -657,7 +784,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
- /* reset vf using VPGEN_VFRTRIG reg */
+ /* reset VF using VPGEN_VFRTRIG reg */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -685,6 +812,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
}
}
+ if (flr)
+ usleep_range(10000, 20000);
+
if (!rsd)
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id);
@@ -695,12 +825,12 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
/* On initial reset, we won't have any queues */
- if (vf->lan_vsi_index == 0)
+ if (vf->lan_vsi_idx == 0)
goto complete_reset;
- i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+ i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
complete_reset:
- /* reallocate vf resources to reset the VSI state */
+ /* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
i40e_alloc_vf_res(vf);
i40e_enable_vf_mappings(vf);
@@ -713,78 +843,10 @@ complete_reset:
}
/**
- * i40e_enable_pf_switch_lb
- * @pf: pointer to the pf structure
- *
- * enable switch loop back or die - no point in a return value
- **/
-void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
-{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- struct i40e_vsi_context ctxt;
- int aq_ret;
-
- ctxt.seid = pf->main_vsi_seid;
- ctxt.pf_num = pf->hw.pf_id;
- ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
- dev_info(&pf->pdev->dev,
- "%s couldn't get pf vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
- return;
- }
- ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
-
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
- dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
- }
-}
-
-/**
- * i40e_disable_pf_switch_lb
- * @pf: pointer to the pf structure
- *
- * disable switch loop back or die - no point in a return value
- **/
-static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
-{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- struct i40e_vsi_context ctxt;
- int aq_ret;
-
- ctxt.seid = pf->main_vsi_seid;
- ctxt.pf_num = pf->hw.pf_id;
- ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
- dev_info(&pf->pdev->dev,
- "%s couldn't get pf vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
- return;
- }
- ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
-
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
- dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
- }
-}
-
-/**
* i40e_free_vfs
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
*
- * free vf resources
+ * free VF resources
**/
void i40e_free_vfs(struct i40e_pf *pf)
{
@@ -797,16 +859,23 @@ void i40e_free_vfs(struct i40e_pf *pf)
while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
usleep_range(1000, 2000);
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+ i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+ false);
+
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
*/
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
msleep(20); /* let any messages in transit get finished up */
- /* free up vf resources */
+ /* free up VF resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
@@ -832,10 +901,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
}
- i40e_disable_pf_switch_lb(pf);
- } else {
- dev_warn(&pf->pdev->dev,
- "unable to disable SR-IOV because VFs are assigned.\n");
}
clear_bit(__I40E_VF_DISABLE, &pf->state);
}
@@ -843,10 +908,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV
/**
* i40e_alloc_vfs
- * @pf: pointer to the pf structure
- * @num_alloc_vfs: number of vfs to allocate
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
*
- * allocate vf resources
+ * allocate VF resources
**/
int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
{
@@ -883,15 +948,14 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
- /* vf resources get allocated during reset */
+ /* VF resources get allocated during reset */
i40e_reset_vf(&vfs[i], false);
- /* enable vf vplan_qtable mappings */
+ /* enable VF vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
}
pf->num_alloc_vfs = num_alloc_vfs;
- i40e_enable_pf_switch_lb(pf);
err_alloc:
if (ret)
i40e_free_vfs(pf);
@@ -905,7 +969,7 @@ err_iov:
/**
* i40e_pci_sriov_enable
* @pdev: pointer to a pci_dev structure
- * @num_vfs: number of vfs to allocate
+ * @num_vfs: number of VFs to allocate
*
* Enable or change the number of VFs
**/
@@ -945,7 +1009,7 @@ err_out:
/**
* i40e_pci_sriov_configure
* @pdev: pointer to a pci_dev structure
- * @num_vfs: number of vfs to allocate
+ * @num_vfs: number of VFs to allocate
*
* Enable or change the number of VFs. Called when the user updates the number
* of VFs in sysfs.
@@ -970,13 +1034,13 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
/**
* i40e_vc_send_msg_to_vf
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * send msg to vf
+ * send msg to VF
**/
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
@@ -1025,11 +1089,11 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
/**
* i40e_vc_send_resp_to_vf
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @opcode: operation code
* @retval: return value
*
- * send resp msg to vf
+ * send resp msg to VF
**/
static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
enum i40e_virtchnl_ops opcode,
@@ -1040,9 +1104,9 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
/**
* i40e_vc_get_version_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * called from the vf to request the API version used by the PF
+ * called from the VF to request the API version used by the PF
**/
static int i40e_vc_get_version_msg(struct i40e_vf *vf)
{
@@ -1058,11 +1122,11 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
/**
* i40e_vc_get_vf_resources_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to request its resources
+ * called from the VF to request its resources
**/
static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
{
@@ -1090,18 +1154,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
}
vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
- if (vf->lan_vsi_index) {
- vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+ if (vf->lan_vsi_idx) {
+ vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
vfres->vsi_res[i].num_queue_pairs =
- pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
+ pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
memcpy(vfres->vsi_res[i].default_mac_addr,
vf->default_lan_addr.addr, ETH_ALEN);
i++;
@@ -1109,7 +1173,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
err:
- /* send the response back to the vf */
+ /* send the response back to the VF */
ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
aq_ret, (u8 *)vfres, len);
@@ -1119,13 +1183,13 @@ err:
/**
* i40e_vc_reset_vf_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to reset itself,
- * unlike other virtchnl messages, pf driver
- * doesn't send the response back to the vf
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
**/
static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{
@@ -1135,12 +1199,12 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
/**
* i40e_vc_config_promiscuous_mode_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to configure the promiscuous mode of
- * vf vsis
+ * called from the VF to configure the promiscuous mode of
+ * VF vsis
**/
static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
u8 *msg, u16 msglen)
@@ -1153,21 +1217,21 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
bool allmulti = false;
i40e_status aq_ret;
+ vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
- (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+ (vsi->type != I40E_VSI_FCOE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi = pf->vsi[info->vsi_id];
if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
allmulti, NULL);
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
aq_ret);
@@ -1175,11 +1239,11 @@ error_param:
/**
* i40e_vc_config_queues_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to configure the rx/tx
+ * called from the VF to configure the rx/tx
* queues
**/
static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
@@ -1221,22 +1285,22 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
}
- /* set vsi num_queue_pairs in use to num configured by vf */
- pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs;
+ /* set vsi num_queue_pairs in use to num configured by VF */
+ pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
aq_ret);
}
/**
* i40e_vc_config_irq_map_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to configure the irq to
+ * called from the VF to configure the irq to
* queue map
**/
static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
@@ -1288,18 +1352,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_config_irq_link_list(vf, vsi_id, map);
}
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
aq_ret);
}
/**
* i40e_vc_enable_queues_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to enable all or specific queue(s)
+ * called from the VF to enable all or specific queue(s)
**/
static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
@@ -1323,21 +1387,22 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+
+ if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
aq_ret = I40E_ERR_TIMEOUT;
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
aq_ret);
}
/**
* i40e_vc_disable_queues_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to disable all or specific
+ * called from the VF to disable all or specific
* queue(s)
**/
static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
@@ -1345,7 +1410,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_virtchnl_queue_select *vqs =
(struct i40e_virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
@@ -1362,22 +1426,23 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+
+ if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
aq_ret = I40E_ERR_TIMEOUT;
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
aq_ret);
}
/**
* i40e_vc_get_stats_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to get vsi stats
+ * called from the VF to get vsi stats
**/
static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
@@ -1400,7 +1465,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- vsi = pf->vsi[vqs->vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1409,14 +1474,14 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
stats = vsi->eth_stats;
error_param:
- /* send the response back to the vf */
+ /* send the response back to the VF */
return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
(u8 *)&stats, sizeof(stats));
}
/**
* i40e_check_vf_permission
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @macaddr: pointer to the MAC Address being checked
*
* Check if the VF has permission to add or delete unicast MAC address
@@ -1450,7 +1515,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
/**
* i40e_vc_add_mac_addr_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1478,7 +1543,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (ret)
goto error_param;
}
- vsi = pf->vsi[vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
@@ -1507,14 +1572,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
ret);
}
/**
* i40e_vc_del_mac_addr_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1546,7 +1611,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
}
- vsi = pf->vsi[vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
@@ -1558,14 +1623,14 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
ret);
}
/**
* i40e_vc_add_vlan_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1596,7 +1661,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
}
- vsi = pf->vsi[vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1613,13 +1678,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
}
/**
* i40e_vc_remove_vlan_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1649,7 +1714,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
}
- vsi = pf->vsi[vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1664,13 +1729,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
}
/**
* i40e_vc_validate_vf_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
* @msghndl: msg handle
@@ -1776,14 +1841,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
/**
* i40e_vc_process_vf_msg
- * @pf: pointer to the pf structure
- * @vf_id: source vf id
+ * @pf: pointer to the PF structure
+ * @vf_id: source VF id
* @msg: pointer to the msg buffer
* @msglen: msg length
* @msghndl: msg handle
*
* called from the common aeq/arq handler to
- * process request from vf
+ * process request from VF
**/
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
@@ -1801,7 +1866,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
if (ret) {
- dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
+ dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
local_vf_id, v_opcode, msglen);
return ret;
}
@@ -1828,6 +1893,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+ i40e_vc_notify_vf_link_state(vf);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
@@ -1849,7 +1915,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
- dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, local_vf_id);
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
I40E_ERR_NOT_IMPLEMENTED);
@@ -1861,10 +1927,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
/**
* i40e_vc_process_vflr_event
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
*
* called from the vlfr irq handler to
- * free up vf resources and state variables
+ * free up VF resources and state variables
**/
int i40e_vc_process_vflr_event(struct i40e_pf *pf)
{
@@ -1885,7 +1951,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- /* read GLGEN_VFLRSTAT register to find out the flr vfs */
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & (1 << bit_idx)) {
@@ -1901,124 +1967,12 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
}
/**
- * i40e_vc_vf_broadcast
- * @pf: pointer to the pf structure
- * @opcode: operation code
- * @retval: return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send a message to all VFs on a given PF
- **/
-static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
- enum i40e_virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg,
- u16 msglen)
-{
- struct i40e_hw *hw = &pf->hw;
- struct i40e_vf *vf = pf->vf;
- int i;
-
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- /* Not all vfs are enabled so skip the ones that are not */
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
- !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
- continue;
-
- /* Ignore return value on purpose - a given VF may fail, but
- * we need to keep going and send to all of them
- */
- i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
- msg, msglen, NULL);
- }
-}
-
-/**
- * i40e_vc_notify_link_state
- * @pf: pointer to the pf structure
- *
- * send a link status message to all VFs on a given PF
- **/
-void i40e_vc_notify_link_state(struct i40e_pf *pf)
-{
- struct i40e_virtchnl_pf_event pfe;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_vf *vf = pf->vf;
- struct i40e_link_status *ls = &pf->hw.phy.link_info;
- int i;
-
- pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- if (vf->link_forced) {
- pfe.event_data.link_event.link_status = vf->link_up;
- pfe.event_data.link_event.link_speed =
- (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
- } else {
- pfe.event_data.link_event.link_status =
- ls->link_info & I40E_AQ_LINK_UP;
- pfe.event_data.link_event.link_speed = ls->link_speed;
- }
- i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
- 0, (u8 *)&pfe, sizeof(pfe),
- NULL);
- }
-}
-
-/**
- * i40e_vc_notify_reset
- * @pf: pointer to the pf structure
- *
- * indicate a pending reset to all VFs on a given PF
- **/
-void i40e_vc_notify_reset(struct i40e_pf *pf)
-{
- struct i40e_virtchnl_pf_event pfe;
-
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
- i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
- (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
-}
-
-/**
- * i40e_vc_notify_vf_reset
- * @vf: pointer to the vf structure
- *
- * indicate a pending reset to the given VF
- **/
-void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
-{
- struct i40e_virtchnl_pf_event pfe;
- int abs_vf_id;
-
- /* validate the request */
- if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
- return;
-
- /* verify if the VF is in either init or active before proceeding */
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
- !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
- return;
-
- abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
-
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
- i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
- I40E_SUCCESS, (u8 *)&pfe,
- sizeof(struct i40e_virtchnl_pf_event), NULL);
-}
-
-/**
* i40e_ndo_set_vf_mac
* @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
* @mac: mac address
*
- * program vf mac address
+ * program VF mac address
**/
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
@@ -2038,7 +1992,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
vf = &(pf->vf[vf_id]);
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev,
"Uninitialized VF %d\n", vf_id);
@@ -2083,11 +2037,11 @@ error_param:
/**
* i40e_ndo_set_vf_port_vlan
* @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
* @vlan_id: mac address
* @qos: priority setting
*
- * program vf vlan id and/or qos
+ * program VF vlan id and/or qos
**/
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos)
@@ -2112,7 +2066,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
}
vf = &(pf->vf[vf_id]);
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
ret = -EINVAL;
@@ -2196,10 +2150,10 @@ error_pvid:
/**
* i40e_ndo_set_vf_bw
* @netdev: network interface device structure
- * @vf_id: vf identifier
- * @tx_rate: tx rate
+ * @vf_id: VF identifier
+ * @tx_rate: Tx rate
*
- * configure vf tx rate
+ * configure VF Tx rate
**/
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate)
@@ -2219,13 +2173,13 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
}
if (min_tx_rate) {
- dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
+ dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
min_tx_rate, vf_id);
return -EINVAL;
}
vf = &(pf->vf[vf_id]);
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
ret = -EINVAL;
@@ -2247,7 +2201,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
}
if (max_tx_rate > speed) {
- dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
+ dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.",
max_tx_rate, vf->vf_id);
ret = -EINVAL;
goto error;
@@ -2276,10 +2230,10 @@ error:
/**
* i40e_ndo_get_vf_config
* @netdev: network interface device structure
- * @vf_id: vf identifier
- * @ivi: vf configuration structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
*
- * return vf configuration
+ * return VF configuration
**/
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi)
@@ -2299,7 +2253,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
vf = &(pf->vf[vf_id]);
/* first vsi is always the LAN vsi */
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
ret = -EINVAL;
@@ -2331,7 +2285,7 @@ error_param:
/**
* i40e_ndo_set_vf_link_state
* @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
* @link: required link state
*
* Set the link state of a specified VF, regardless of physical link state
@@ -2394,7 +2348,7 @@ error_out:
/**
* i40e_ndo_set_vf_spoofchk
* @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
* @enable: flag to enable or disable feature
*
* Enable or disable VF spoof checking
@@ -2423,11 +2377,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
vf->spoofchk = enable;
memset(&ctxt, 0, sizeof(ctxt));
- ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
+ ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
if (enable)
- ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 9452f5247cff..09043c1aae54 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -71,12 +71,12 @@ enum i40e_vf_capabilities {
struct i40e_vf {
struct i40e_pf *pf;
- /* vf id in the pf space */
+ /* VF id in the PF space */
u16 vf_id;
- /* all vf vsis connect to the same parent */
+ /* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
- /* vf Port Extender (PE) stag if used */
+ /* VF Port Extender (PE) stag if used */
u16 stag;
struct i40e_virtchnl_ether_addr default_lan_addr;
@@ -88,10 +88,10 @@ struct i40e_vf {
* When assigned, these will be non-zero, because VSI 0 is always
* the main LAN VSI for the PF.
*/
- u8 lan_vsi_index; /* index into PF struct */
+ u8 lan_vsi_idx; /* index into PF struct */
u8 lan_vsi_id; /* ID as used by firmware */
- u8 num_queue_pairs; /* num of qps assigned to vf vsis */
+ u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u64 num_mdd_events; /* num of mdd events detected */
u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
u64 num_valid_msgs; /* num of valid msgs detected */
@@ -100,7 +100,7 @@ struct i40e_vf {
unsigned long vf_states; /* vf's runtime states */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
- bool link_up; /* only valid if vf link is forced */
+ bool link_up; /* only valid if VF link is forced */
bool spoofchk;
};
@@ -113,7 +113,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf);
void i40e_reset_vf(struct i40e_vf *vf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
-/* vf configuration related iplink handlers */
+/* VF configuration related iplink handlers */
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
@@ -126,6 +126,5 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
-void i40e_enable_pf_switch_lb(struct i40e_pf *pf);
#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 60f04e96a80e..ef43d68f67b3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -93,6 +93,7 @@ struct i40e_adminq_info {
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
bool nvm_release_on_done;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 28c40c57d4f5..39fcb1dc4ea6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -51,6 +51,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_20G_KR2:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_VF:
@@ -85,46 +86,53 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u16 len = le16_to_cpu(aq_desc->datalen);
- u8 *aq_buffer = (u8 *)buffer;
- u32 data[4];
- u32 i = 0;
+ u8 *buf = (u8 *)buffer;
+ u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
- aq_desc->retval);
+ le16_to_cpu(aq_desc->opcode),
+ le16_to_cpu(aq_desc->flags),
+ le16_to_cpu(aq_desc->datalen),
+ le16_to_cpu(aq_desc->retval));
i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
- aq_desc->cookie_high, aq_desc->cookie_low);
+ le32_to_cpu(aq_desc->cookie_high),
+ le32_to_cpu(aq_desc->cookie_low));
i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- aq_desc->params.internal.param0,
- aq_desc->params.internal.param1);
+ le32_to_cpu(aq_desc->params.internal.param0),
+ le32_to_cpu(aq_desc->params.internal.param1));
i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- aq_desc->params.external.addr_high,
- aq_desc->params.external.addr_low);
+ le32_to_cpu(aq_desc->params.external.addr_high),
+ le32_to_cpu(aq_desc->params.external.addr_low));
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
- memset(data, 0, sizeof(data));
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
- for (i = 0; i < len; i++) {
- data[((i % 16) / 4)] |=
- ((u32)aq_buffer[i]) << (8 * (i % 4));
- if ((i % 16) == 15) {
- i40e_debug(hw, mask,
- "\t0x%04X %08X %08X %08X %08X\n",
- i - 15, data[0], data[1], data[2],
- data[3]);
- memset(data, 0, sizeof(data));
- }
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i + 1], buf[i + 2],
+ buf[i + 3], buf[i + 4], buf[i + 5],
+ buf[i + 6], buf[i + 7], buf[i + 8],
+ buf[i + 9], buf[i + 10], buf[i + 11],
+ buf[i + 12], buf[i + 13], buf[i + 14],
+ buf[i + 15]);
+ /* write whatever's left over without overrunning the buffer */
+ if (i < len) {
+ char d_buf[80];
+ int j = 0;
+
+ memset(d_buf, 0, sizeof(d_buf));
+ j += sprintf(d_buf, "\t0x%04X ", i);
+ while (i < len)
+ j += sprintf(&d_buf[j], " %02X", buf[i++]);
+ i40e_debug(hw, mask, "%s\n", d_buf);
}
- if ((i % 16) != 0)
- i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
- i - (i % 16), data[0], data[1], data[2],
- data[3]);
}
}
@@ -535,7 +543,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
I40E_PTT_UNUSED_ENTRY(255)
};
-
/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 9173834825ac..58e37a44b80a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -59,8 +59,7 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
-i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index c1f6a59bfea0..3cc737629bf7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -310,6 +310,10 @@
#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
@@ -421,6 +425,8 @@
#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
@@ -484,7 +490,9 @@
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
@@ -548,9 +556,6 @@
#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
@@ -1066,7 +1071,7 @@
#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
@@ -1171,7 +1176,7 @@
#define I40E_VFINT_ITRN_MAX_INDEX 2
#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
@@ -1803,9 +1808,6 @@
#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
@@ -1902,6 +1904,11 @@
#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
@@ -2374,20 +2381,20 @@
#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
@@ -2620,10 +2627,6 @@
#define I40E_GLPRT_TDOLD_MAX_INDEX 3
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDPC_MAX_INDEX 3
-#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_UPRCH_MAX_INDEX 3
#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
@@ -2990,9 +2993,6 @@
#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
@@ -3258,7 +3258,7 @@
#define I40E_VFINT_ITRN1_MAX_INDEX 2
#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 708891571dae..b077e02a0cc7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -25,6 +25,7 @@
******************************************************************************/
#include <linux/prefetch.h>
+#include <net/busy_poll.h>
#include "i40evf.h"
#include "i40e_prototype.h"
@@ -288,6 +289,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_desc = I40E_TX_DESC(tx_ring, 0);
}
+ prefetch(tx_desc);
+
/* update budget accounting */
budget--;
} while (likely(budget));
@@ -368,6 +371,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
/* allow 00 to be written to the index */
@@ -529,6 +533,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
+ if (ring_is_ps_enabled(rx_ring)) {
+ int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
+
+ rx_bi = &rx_ring->rx_bi[0];
+ if (rx_bi->hdr_buf) {
+ dma_free_coherent(dev,
+ bufsz,
+ rx_bi->hdr_buf,
+ rx_bi->dma);
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ rx_bi->dma = 0;
+ rx_bi->hdr_buf = NULL;
+ }
+ }
+ }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
rx_bi = &rx_ring->rx_bi[i];
@@ -587,6 +607,37 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
+ * i40evf_alloc_rx_headers - allocate rx header buffers
+ * @rx_ring: ring to alloc buffers
+ *
+ * Allocate rx header buffers for the entire ring. As these are static,
+ * this is only called when setting up a new ring.
+ **/
+void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ dma_addr_t dma;
+ void *buffer;
+ int buf_size;
+ int i;
+
+ if (rx_ring->rx_bi[0].hdr_buf)
+ return;
+ /* Make sure the buffers don't cross cache line boundaries. */
+ buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
+ buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
+ &dma, GFP_KERNEL);
+ if (!buffer)
+ return;
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ rx_bi->dma = dma + (i * buf_size);
+ rx_bi->hdr_buf = buffer + (i * buf_size);
+ }
+}
+
+/**
* i40evf_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -646,11 +697,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+
+ if (bi->skb) /* desc is in use */
+ goto no_buffers;
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ dma_sync_single_range_for_device(rx_ring->dev,
+ bi->dma,
+ 0,
+ rx_ring->rx_hdr_len,
+ DMA_FROM_DEVICE);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
**/
-void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
{
u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
@@ -690,40 +806,8 @@ void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
}
}
- if (ring_is_ps_enabled(rx_ring)) {
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- }
-
- if (!bi->page_dma) {
- /* use a half page if we're re-using */
- bi->page_offset ^= PAGE_SIZE / 2;
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev,
- bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- bi->page_dma = 0;
- goto no_buffers;
- }
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- } else {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- rx_desc->read.hdr_addr = 0;
- }
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
i++;
if (i == rx_ring->count)
i = 0;
@@ -777,10 +861,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct iphdr *iph;
__sum16 csum;
- ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+ ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
skb->ip_summed = CHECKSUM_NONE;
@@ -831,9 +915,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
- if (ipv4_tunnel &&
- (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
- !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ if (ipv4_tunnel) {
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -843,15 +925,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->protocol == htons(ETH_P_8021AD))
? VLAN_HLEN : 0;
- rx_udp_csum = udp_csum(skb);
- iph = ip_hdr(skb);
- csum = csum_tcpudp_magic(
- iph->saddr, iph->daddr,
- (skb->len - skb_transport_offset(skb)),
- IPPROTO_UDP, rx_udp_csum);
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+ (udp_hdr(skb)->check != 0)) {
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ (skb->len -
+ skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
- if (udp_hdr(skb)->check != csum)
- goto checksum_fail;
+ } /* else its GRE and so no outer UDP header */
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -906,13 +992,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
}
/**
- * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
@@ -925,20 +1011,49 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
u8 rx_ptype;
u64 qword;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
-
- while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
- union i40e_rx_desc *next_rxd;
+ do {
struct i40e_rx_buffer *rx_bi;
struct sk_buff *skb;
u16 vlan_tag;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ i = rx_ring->next_to_clean;
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * DD bit is set.
+ */
+ dma_rmb();
rx_bi = &rx_ring->rx_bi[i];
skb = rx_bi->skb;
- prefetch(skb->data);
+ if (likely(!skb)) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_hdr_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ break;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ 0,
+ rx_ring->rx_hdr_len,
+ DMA_FROM_DEVICE);
+ }
rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
@@ -953,40 +1068,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
+ prefetch(rx_bi->page);
rx_bi->skb = NULL;
-
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * STATUS_DD bit is set
- */
- rmb();
-
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- if (rx_bi->dma) {
- u16 len;
-
+ cleaned_count++;
+ if (rx_hbo || rx_sph) {
+ int len;
if (rx_hbo)
len = I40E_RX_HDR_SIZE;
- else if (rx_sph)
- len = rx_header_len;
- else if (rx_packet_len)
- len = rx_packet_len; /* 1buf/no split found */
else
- len = rx_header_len; /* split always mode */
-
- skb_put(skb, len);
- dma_unmap_single(rx_ring->dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
+ len = rx_header_len;
+ memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
+ } else if (skb->len == 0) {
+ int len;
+
+ len = (rx_packet_len > skb_headlen(skb) ?
+ skb_headlen(skb) : rx_packet_len);
+ memcpy(__skb_put(skb, len),
+ rx_bi->page + rx_bi->page_offset,
+ len);
+ rx_bi->page_offset += len;
+ rx_packet_len -= len;
}
/* Get the rest of the data if this was a header split */
- if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
-
+ if (rx_packet_len) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_bi->page,
rx_bi->page_offset,
@@ -1008,22 +1113,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
DMA_FROM_DEVICE);
rx_bi->page_dma = 0;
}
- I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+ I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i];
-
- if (ring_is_ps_enabled(rx_ring)) {
- rx_bi->skb = next_buffer->skb;
- rx_bi->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- }
+ next_buffer->skb = skb;
rx_ring->rx_stats.non_eop_descs++;
- goto next_desc;
+ continue;
}
/* ERR_MASK will only have valid bits if EOP set */
@@ -1032,7 +1131,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* TODO: shouldn't we increment a counter indicating the
* drop?
*/
- goto next_desc;
+ continue;
}
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
@@ -1048,30 +1147,134 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
+#ifdef I40E_FCOE
+ if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+#endif
+ skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
i40e_receive_skb(rx_ring, skb, vlan_tag);
rx_ring->netdev->last_rx = jiffies;
- budget--;
-next_desc:
rx_desc->wb.qword1.status_error_len = 0;
- if (!budget)
- break;
- cleaned_count++;
+ } while (likely(total_rx_packets < budget));
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ return total_rx_packets;
+}
+
+/**
+ * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns number of packets cleaned
+ **/
+static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u16 rx_packet_len;
+ u8 rx_ptype;
+ u64 qword;
+ u16 i;
+
+ do {
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+ i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
cleaned_count = 0;
}
- /* use prefetched values */
- rx_desc = next_rxd;
+ i = rx_ring->next_to_clean;
+ rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
- }
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * DD bit is set.
+ */
+ dma_rmb();
+
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_bi->skb = NULL;
+ cleaned_count++;
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ skb_put(skb, rx_packet_len);
+ dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+
+ I40E_RX_INCREMENT(rx_ring, i);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ rx_ring->rx_stats.non_eop_descs++;
+ continue;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
+ continue;
+ }
+
+ skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+ i40e_ptype_to_hash(rx_ptype));
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ rx_desc->wb.qword1.status_error_len = 0;
+ } while (likely(total_rx_packets < budget));
- rx_ring->next_to_clean = i;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1079,10 +1282,7 @@ next_desc:
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
- if (cleaned_count)
- i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
-
- return budget > 0;
+ return total_rx_packets;
}
/**
@@ -1103,6 +1303,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
+ int cleaned;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
@@ -1122,8 +1323,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
- i40e_for_each_ring(ring, q_vector->rx)
- clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+ i40e_for_each_ring(ring, q_vector->rx) {
+ if (ring_is_ps_enabled(ring))
+ cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
+ else
+ cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ /* if we didn't clean as many as budgeted, we must be done */
+ clean_complete &= (budget_per_ring != cleaned);
+ }
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
@@ -1163,6 +1370,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
+ if (protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off by the user the
+ * stack sets the protocol to 8021q so that the driver
+ * can take any steps required to support the SW only
+ * VLAN handling. In our case the driver doesn't need
+ * to take any further steps so just set the protocol
+ * to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ goto out;
+ }
+
/* if we have a HW VLAN tag being added, default to the HW one */
if (skb_vlan_tag_present(skb)) {
tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
@@ -1179,6 +1399,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= I40E_TX_FLAGS_SW_VLAN;
}
+out:
*flags = tx_flags;
return 0;
}
@@ -1262,8 +1483,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
+ u32 l4_tunnel = 0;
if (skb->encapsulation) {
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ break;
+ default:
+ return;
+ }
network_hdr_len = skb_inner_network_header_len(skb);
this_ip_hdr = inner_ip_hdr(skb);
this_ipv6_hdr = inner_ipv6_hdr(skb);
@@ -1286,8 +1515,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the ctx descriptor fields */
*cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- I40E_TXD_CTX_UDP_TUNNELING |
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ l4_tunnel |
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c950a038237c..1e49bb1fbac1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -96,6 +96,14 @@ enum i40e_dyn_idx_t {
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_INCREMENT(r, i) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ r->next_to_clean = i; \
+ } while (0)
+
#define I40E_RX_NEXT_DESC(r, i, n) \
do { \
(i)++; \
@@ -151,6 +159,7 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
+ void *hdr_buf;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
@@ -223,8 +232,8 @@ struct i40e_ring {
u16 rx_buf_len;
u8 dtype;
#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
-#define I40E_RX_DTYPE_HEADER_SPLIT 2
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
u8 hsplit;
#define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2
@@ -278,7 +287,9 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3d0fdaab5cc8..ec9d83a93379 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -44,7 +44,8 @@
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
#define I40E_DEV_ID_10G_BASE_T 0x1586
-#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
@@ -175,12 +176,12 @@ struct i40e_link_status {
u8 an_info;
u8 ext_info;
u8 loopback;
- bool an_enabled;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
u16 max_frame_size;
bool crc_enable;
u8 pacing;
+ u8 requested_speeds;
};
struct i40e_phy_info {
@@ -241,6 +242,7 @@ struct i40e_hw_capabilities {
u8 rx_buf_chain_len;
u32 enabled_tcmap;
u32 maxtc;
+ u64 wr_csr_prot;
};
struct i40e_mac_info {
@@ -1116,7 +1118,7 @@ struct i40e_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
-#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index e0c8208138f4..59f62f0e65dd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -59,31 +59,29 @@
* of the virtchnl_msg structure.
*/
enum i40e_virtchnl_ops {
-/* VF sends req. to pf for the following
- * ops.
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
*/
I40E_VIRTCHNL_OP_UNKNOWN = 0,
I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
- I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_VIRTCHNL_OP_DEL_VLAN,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
- I40E_VIRTCHNL_OP_GET_STATS,
- I40E_VIRTCHNL_OP_FCOE,
- I40E_VIRTCHNL_OP_CONFIG_RSS,
-/* PF sends status change events to vfs using
- * the following op.
- */
- I40E_VIRTCHNL_OP_EVENT,
+ I40E_VIRTCHNL_OP_RESET_VF = 2,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+ I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+ I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ I40E_VIRTCHNL_OP_GET_STATS = 15,
+ I40E_VIRTCHNL_OP_FCOE = 16,
+ I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 981224743c73..1b98c25b3092 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -225,7 +225,6 @@ struct i40evf_adapter {
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
/* flags for admin queue service task */
u32 aq_required;
- u32 aq_pending;
#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
@@ -272,6 +271,8 @@ void i40evf_update_stats(struct i40evf_adapter *adapter);
void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
+void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
void i40e_napi_add_all(struct i40evf_adapter *adapter);
void i40e_napi_del_all(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 69b97bac182c..f4e77665bc54 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@
#include <linux/uaccess.h>
-
struct i40evf_stats {
char stat_string[ETH_GSTRING_LEN];
int stat_offset;
@@ -180,7 +179,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev)
}
/**
- * i40evf_get_msglevel - Set debug message level
+ * i40evf_set_msglevel - Set debug message level
* @netdev: network interface device structure
* @data: message level
*
@@ -191,6 +190,8 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+ if (I40E_DEBUG_USER & data)
+ adapter->hw.debug_mask = data;
adapter->msg_enable = data;
}
@@ -208,7 +209,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, i40evf_driver_name, 32);
strlcpy(drvinfo->version, i40evf_driver_version, 32);
-
+ strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
}
@@ -640,12 +641,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (!indir)
return 0;
- for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
- hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
- indir[j++] = hlut_val & 0xff;
- indir[j++] = (hlut_val >> 8) & 0xff;
- indir[j++] = (hlut_val >> 16) & 0xff;
- indir[j++] = (hlut_val >> 24) & 0xff;
+ if (indir) {
+ for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+ indir[j++] = hlut_val & 0xff;
+ indir[j++] = (hlut_val >> 8) & 0xff;
+ indir[j++] = (hlut_val >> 16) & 0xff;
+ indir[j++] = (hlut_val >> 24) & 0xff;
+ }
}
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 8d8c201c63c1..7c53aca4b5a6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -28,15 +28,13 @@
#include "i40e_prototype.h"
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
-static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
-static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.2.0"
+#define DRV_VERSION "1.2.25"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -244,6 +242,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
if (mask & (1 << (i - 1))) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
}
}
@@ -263,6 +262,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
if (mask & 1) {
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
}
@@ -270,6 +270,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
if (mask & (1 << i)) {
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
}
@@ -524,7 +525,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
int err;
snprintf(adapter->misc_vector_name,
- sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx");
+ sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
+ dev_name(&adapter->pdev->dev));
err = request_irq(adapter->msix_entries[0].vector,
&i40evf_msix_aq, 0,
adapter->misc_vector_name, netdev);
@@ -662,13 +664,21 @@ i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
static struct
i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
- struct i40evf_vlan_filter *f;
+ struct i40evf_vlan_filter *f = NULL;
+ int count = 50;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section)) {
+ udelay(1);
+ if (--count == 0)
+ goto out;
+ }
f = i40evf_find_vlan(adapter, vlan);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
- return NULL;
+ goto clearout;
f->vlan = vlan;
@@ -678,6 +688,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
}
+clearout:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+out:
return f;
}
@@ -689,12 +702,21 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f;
+ int count = 50;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section)) {
+ udelay(1);
+ if (--count == 0)
+ return;
+ }
f = i40evf_find_vlan(adapter, vlan);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
}
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
/**
@@ -761,13 +783,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
u8 *macaddr)
{
struct i40evf_mac_filter *f;
+ int count = 50;
if (!macaddr)
return NULL;
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
+ &adapter->crit_section)) {
udelay(1);
+ if (--count == 0)
+ return NULL;
+ }
f = i40evf_find_filter(adapter, macaddr);
if (!f) {
@@ -828,6 +854,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
struct i40evf_mac_filter *f, *ftmp;
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
+ int count = 50;
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
@@ -838,8 +865,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
}
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
+ &adapter->crit_section)) {
udelay(1);
+ if (--count == 0) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get lock in %s\n", __func__);
+ return;
+ }
+ }
/* remove filter if not in netdev list */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
bool found = false;
@@ -920,7 +953,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = adapter->rx_rings[i];
- i40evf_alloc_rx_buffers(ring, ring->count);
+ i40evf_alloc_rx_buffers_1buf(ring, ring->count);
ring->next_to_use = ring->count - 1;
writel(ring->next_to_use, ring->tail);
}
@@ -958,6 +991,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
&adapter->crit_section))
usleep_range(500, 1000);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+ i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
/* remove all MAC filters */
@@ -972,7 +1008,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
- adapter->aq_pending = 0;
/* Schedule operations to close down the HW. Don't wait
* here for this to complete. The watchdog is still running
* and it will take care of this.
@@ -981,15 +1016,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
}
- netif_tx_disable(netdev);
-
- netif_tx_stop_all_queues(netdev);
-
- i40evf_napi_disable_all(adapter);
-
- msleep(20);
- netif_carrier_off(netdev);
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
@@ -1307,7 +1334,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
*/
return;
}
- adapter->aq_pending = 0;
adapter->aq_required = 0;
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
@@ -1327,7 +1353,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
schedule_work(&adapter->reset_task);
- adapter->aq_pending = 0;
adapter->aq_required = 0;
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
@@ -1336,7 +1361,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
/* Process admin queue tasks. After init, everything gets done
* here so we don't race on the admin queue.
*/
- if (adapter->aq_pending) {
+ if (adapter->current_op) {
if (!i40evf_asq_done(hw)) {
dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
i40evf_send_api_ver(adapter);
@@ -1344,6 +1369,11 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
+ i40evf_disable_queues(adapter);
+ goto watchdog_done;
+ }
+
if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
i40evf_map_queues(adapter);
goto watchdog_done;
@@ -1369,11 +1399,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
- i40evf_disable_queues(adapter);
- goto watchdog_done;
- }
-
if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
i40evf_configure_queues(adapter);
goto watchdog_done;
@@ -1407,41 +1432,22 @@ restart_watchdog:
}
/**
- * next_queue - increment to next available tx queue
- * @adapter: board private structure
- * @j: queue counter
- *
- * Helper function for RSS programming to increment through available
- * queus. Returns the next queue value.
- **/
-static int next_queue(struct i40evf_adapter *adapter, int j)
-{
- j += 1;
-
- return j >= adapter->num_active_queues ? 0 : j;
-}
-
-/**
- * i40evf_configure_rss - Prepare for RSS if used
+ * i40evf_configure_rss - Prepare for RSS
* @adapter: board private structure
**/
static void i40evf_configure_rss(struct i40evf_adapter *adapter)
{
u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
struct i40e_hw *hw = &adapter->hw;
+ u32 cqueue = 0;
u32 lut = 0;
int i, j;
u64 hena;
- /* No RSS for single queue. */
- if (adapter->num_active_queues == 1) {
- wr32(hw, I40E_VFQF_HENA(0), 0);
- wr32(hw, I40E_VFQF_HENA(1), 0);
- return;
- }
-
/* Hash type is configured by the PF - we just supply the key */
netdev_rss_key_fill(rss_key, sizeof(rss_key));
+
+ /* Fill out hash function seed */
for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
@@ -1451,16 +1457,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
/* Populate the LUT with max no. of queues in round robin fashion */
- j = adapter->num_active_queues;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
- j = next_queue(adapter, j);
- lut = j;
- j = next_queue(adapter, j);
- lut |= j << 8;
- j = next_queue(adapter, j);
- lut |= j << 16;
- j = next_queue(adapter, j);
- lut |= j << 24;
+ lut = 0;
+ for (j = 0; j < 4; j++) {
+ if (cqueue == adapter->vsi_res->num_queue_pairs)
+ cqueue = 0;
+ lut |= ((cqueue) << (8 * j));
+ cqueue++;
+ }
wr32(hw, I40E_VFQF_HLUT(i), lut);
}
i40e_flush(hw);
@@ -1481,9 +1485,11 @@ static void i40evf_reset_task(struct work_struct *work)
struct i40evf_adapter *adapter = container_of(work,
struct i40evf_adapter,
reset_task);
+ struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw;
- int i = 0, err;
+ struct i40evf_mac_filter *f;
uint32_t rstat_val;
+ int i = 0, err;
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section))
@@ -1528,7 +1534,11 @@ static void i40evf_reset_task(struct work_struct *work)
if (netif_running(adapter->netdev)) {
set_bit(__I40E_DOWN, &adapter->vsi.state);
- i40evf_down(adapter);
+ i40evf_irq_disable(adapter);
+ i40evf_napi_disable_all(adapter);
+ netif_tx_disable(netdev);
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
i40evf_free_traffic_irqs(adapter);
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
@@ -1560,22 +1570,38 @@ static void i40evf_reset_task(struct work_struct *work)
continue_reset:
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
- i40evf_down(adapter);
+ i40evf_irq_disable(adapter);
+
+ if (netif_running(adapter->netdev)) {
+ i40evf_napi_disable_all(adapter);
+ netif_tx_disable(netdev);
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+
adapter->state = __I40EVF_RESETTING;
/* kill and reinit the admin queue */
if (i40evf_shutdown_adminq(hw))
- dev_warn(&adapter->pdev->dev,
- "%s: Failed to destroy the Admin Queue resources\n",
- __func__);
+ dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n");
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
err = i40evf_init_adminq(hw);
if (err)
- dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
- __func__, err);
+ dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
+ err);
- adapter->aq_pending = 0;
- adapter->aq_required = 0;
i40evf_map_queues(adapter);
+
+ /* re-add all MAC filters */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->add = true;
+ }
+ /* re-add all VLAN filters */
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ f->add = true;
+ }
+ adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1688,7 +1714,7 @@ out:
*
* Free all transmit software resources
**/
-static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
{
int i;
@@ -1758,7 +1784,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
*
* Free all receive software resources
**/
-static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
{
int i;
@@ -1788,7 +1814,7 @@ static int i40evf_open(struct net_device *netdev)
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- if (adapter->state != __I40EVF_DOWN)
+ if (adapter->state != __I40EVF_DOWN || adapter->aq_required)
return -EBUSY;
/* allocate transmit descriptors */
@@ -1852,9 +1878,6 @@ static int i40evf_close(struct net_device *netdev)
adapter->state = __I40EVF_DOWN;
i40evf_free_traffic_irqs(adapter);
- i40evf_free_all_tx_resources(adapter);
- i40evf_free_all_rx_resources(adapter);
-
return 0;
}
@@ -1977,7 +2000,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
*
* This task completes the work that was begun in probe. Due to the nature
* of VF-PF communications, we may need to wait tens of milliseconds to get
- * reponses back from the PF. Rather than busy-wait in probe and bog down the
+ * responses back from the PF. Rather than busy-wait in probe and bog down the
* whole system, we'll do it in a task so we can sleep.
* This task only runs during driver init. Once we've established
* communications with the PF driver and set up our netdev, the watchdog
@@ -2003,7 +2026,7 @@ static void i40evf_init_task(struct work_struct *work)
if (err) {
dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
err);
- goto err;
+ goto err;
}
err = i40evf_check_reset_complete(hw);
if (err) {
@@ -2223,7 +2246,6 @@ static void i40evf_shutdown(struct pci_dev *pdev)
/* Prevent the watchdog from running. */
adapter->state = __I40EVF_REMOVE;
adapter->aq_required = 0;
- adapter->aq_pending = 0;
#ifdef CONFIG_PM
pci_save_state(pdev);
@@ -2368,7 +2390,7 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
}
/**
- * i40evf_resume - Power managment resume routine
+ * i40evf_resume - Power management resume routine
* @pdev: PCI device information struct
*
* Called when the system (VM) is resumed from sleep/suspend.
@@ -2441,7 +2463,6 @@ static void i40evf_remove(struct pci_dev *pdev)
/* Shut down all the garbage mashers on the detention level */
adapter->state = __I40EVF_REMOVE;
adapter->aq_required = 0;
- adapter->aq_pending = 0;
i40evf_request_reset(adapter);
msleep(20);
/* If the FW isn't responding, kick it once, but only once. */
@@ -2468,6 +2489,8 @@ static void i40evf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_regions(pdev);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
kfree(adapter->vf_res);
/* If we got removed before an up/down sequence, we've got a filter
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 3f0c85ecbca6..61e090558f31 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -250,7 +250,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi++;
}
- adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
@@ -277,7 +276,6 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
- adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
@@ -303,7 +301,6 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
- adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
@@ -354,7 +351,6 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
vimi->vecmap[v_idx].txq_map = 0;
vimi->vecmap[v_idx].rxq_map = 0;
- adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vimi, len);
@@ -415,7 +411,6 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
f->add = false;
}
}
- adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
(u8 *)veal, len);
@@ -476,7 +471,6 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
kfree(f);
}
}
- adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
(u8 *)veal, len);
@@ -537,7 +531,6 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
f->add = false;
}
}
- adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
@@ -598,7 +591,6 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
kfree(f);
}
}
- adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
@@ -720,9 +712,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
__func__, v_retval, v_opcode);
}
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
- /* no action, but also not an error */
- break;
case I40E_VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg;
@@ -740,37 +729,30 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->current_stats = *stats;
}
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
- break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
- break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
- break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
- break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
/* enable transmits */
i40evf_irq_enable(adapter, true);
netif_tx_start_all_queues(adapter->netdev);
netif_carrier_on(adapter->netdev);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
break;
+ case I40E_VIRTCHNL_OP_VERSION:
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
+ /* Don't display an error if we get these out of sequence.
+ * If the firmware needed to get kicked, we'll get these and
+ * it's no problem.
+ */
+ if (v_opcode != adapter->current_op)
+ return;
break;
default:
- dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n",
- v_opcode);
+ if (v_opcode != adapter->current_op)
+ dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
+ adapter->current_op, v_opcode);
break;
} /* switch v_opcode */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f366b3b96d03..a0a9b1fcb5e8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1036,7 +1036,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
if (q_vector->rx.ring)
- adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+ adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
netif_napi_del(&q_vector->napi);
@@ -1207,6 +1207,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
q_vector = adapter->q_vector[v_idx];
if (!q_vector)
q_vector = kzalloc(size, GFP_KERNEL);
+ else
+ memset(q_vector, 0, size);
if (!q_vector)
return -ENOMEM;
@@ -1776,6 +1778,7 @@ void igb_down(struct igb_adapter *adapter)
wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
/* flush and sleep below */
+ netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
/* disable transmits in the hardware */
@@ -1797,12 +1800,9 @@ void igb_down(struct igb_adapter *adapter)
}
}
-
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- netif_carrier_off(netdev);
-
/* record the stats before reset*/
spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter, &adapter->stats64);
@@ -2095,6 +2095,7 @@ static const struct net_device_ops igb_netdev_ops = {
#endif
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
+ .ndo_features_check = passthru_features_check,
};
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index d20fc8ed11f1..e3b9b63ad010 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -30,7 +30,7 @@
*
* Neither the 82576 nor the 82580 offer registers wide enough to hold
* nanoseconds time values for very long. For the 82580, SYSTIM always
- * counts nanoseconds, but the upper 24 bits are not availible. The
+ * counts nanoseconds, but the upper 24 bits are not available. The
* frequency is adjusted by changing the 32 bit fractional nanoseconds
* register, TIMINCA.
*
@@ -116,7 +116,8 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
}
/* SYSTIM read access for I210/I211 */
-static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
+static void igb_ptp_read_i210(struct igb_adapter *adapter,
+ struct timespec64 *ts)
{
struct e1000_hw *hw = &adapter->hw;
u32 sec, nsec;
@@ -134,7 +135,7 @@ static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
}
static void igb_ptp_write_i210(struct igb_adapter *adapter,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct e1000_hw *hw = &adapter->hw;
@@ -269,13 +270,13 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
unsigned long flags;
- struct timespec now, then = ns_to_timespec(delta);
+ struct timespec64 now, then = ns_to_timespec64(delta);
spin_lock_irqsave(&igb->tmreg_lock, flags);
igb_ptp_read_i210(igb, &now);
- now = timespec_add(now, then);
- igb_ptp_write_i210(igb, (const struct timespec *)&now);
+ now = timespec64_add(now, then);
+ igb_ptp_write_i210(igb, (const struct timespec64 *)&now);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
@@ -283,13 +284,12 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
}
static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
- struct timespec *ts)
+ struct timespec64 *ts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
unsigned long flags;
u64 ns;
- u32 remainder;
spin_lock_irqsave(&igb->tmreg_lock, flags);
@@ -297,14 +297,13 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
- struct timespec *ts)
+ struct timespec64 *ts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
@@ -320,15 +319,14 @@ static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
}
static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
unsigned long flags;
u64 ns;
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
+ ns = timespec64_to_ns(ts);
spin_lock_irqsave(&igb->tmreg_lock, flags);
@@ -340,7 +338,7 @@ static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
}
static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
@@ -358,7 +356,7 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
{
u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
- u32 mask[IGB_N_SDP] = {
+ static const u32 mask[IGB_N_SDP] = {
E1000_CTRL_SDP0_DIR,
E1000_CTRL_SDP1_DIR,
E1000_CTRL_EXT_SDP2_DIR,
@@ -373,16 +371,16 @@ static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
{
- struct e1000_hw *hw = &igb->hw;
- u32 aux0_sel_sdp[IGB_N_SDP] = {
+ static const u32 aux0_sel_sdp[IGB_N_SDP] = {
AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
};
- u32 aux1_sel_sdp[IGB_N_SDP] = {
+ static const u32 aux1_sel_sdp[IGB_N_SDP] = {
AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
};
- u32 ts_sdp_en[IGB_N_SDP] = {
+ static const u32 ts_sdp_en[IGB_N_SDP] = {
TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
};
+ struct e1000_hw *hw = &igb->hw;
u32 ctrl, ctrl_ext, tssdp = 0;
ctrl = rd32(E1000_CTRL);
@@ -409,28 +407,28 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
{
- struct e1000_hw *hw = &igb->hw;
- u32 aux0_sel_sdp[IGB_N_SDP] = {
+ static const u32 aux0_sel_sdp[IGB_N_SDP] = {
AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
};
- u32 aux1_sel_sdp[IGB_N_SDP] = {
+ static const u32 aux1_sel_sdp[IGB_N_SDP] = {
AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
};
- u32 ts_sdp_en[IGB_N_SDP] = {
+ static const u32 ts_sdp_en[IGB_N_SDP] = {
TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
};
- u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
+ static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
};
- u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
+ static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
};
- u32 ts_sdp_sel_clr[IGB_N_SDP] = {
+ static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
};
+ struct e1000_hw *hw = &igb->hw;
u32 ctrl, ctrl_ext, tssdp = 0;
ctrl = rd32(E1000_CTRL);
@@ -468,7 +466,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
unsigned long flags;
struct timespec ts;
- int pin;
+ int pin = -1;
s64 ns;
switch (rq->type) {
@@ -627,11 +625,12 @@ static void igb_ptp_overflow_check(struct work_struct *work)
{
struct igb_adapter *igb =
container_of(work, struct igb_adapter, ptp_overflow_work.work);
- struct timespec ts;
+ struct timespec64 ts;
- igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
+ igb->ptp_caps.gettime64(&igb->ptp_caps, &ts);
- pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ pr_debug("igb overflow check at %lld.%09lu\n",
+ (long long) ts.tv_sec, ts.tv_nsec);
schedule_delayed_work(&igb->ptp_overflow_work,
IGB_SYSTIM_OVERFLOW_PERIOD);
@@ -989,8 +988,8 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
- adapter->ptp_caps.settime = igb_ptp_settime_82576;
+ adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
adapter->cc.mask = CYCLECOUNTER_MASK(64);
@@ -1009,8 +1008,8 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
- adapter->ptp_caps.settime = igb_ptp_settime_82576;
+ adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
@@ -1038,8 +1037,8 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
- adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
- adapter->ptp_caps.settime = igb_ptp_settime_i210;
+ adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210;
+ adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
adapter->ptp_caps.verify = igb_ptp_verify_pin;
/* Enable the timer functions by clearing bit 31. */
@@ -1057,7 +1056,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
/* Initialize the clock and overflow work for devices that need it. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
- struct timespec ts = ktime_to_timespec(ktime_get_real());
+ struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
} else {
@@ -1171,7 +1170,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
/* Re-initialize the timer. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
- struct timespec ts = ktime_to_timespec(ktime_get_real());
+ struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
igb_ptp_write_i210(adapter, &ts);
} else {
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index d9fa999b1685..ae3f28332fa0 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -29,94 +28,93 @@
#define _E1000_DEFINES_H_
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define REQ_TX_DESCRIPTOR_MULTIPLE 8
-#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
/* IVAR valid bit */
-#define E1000_IVAR_VALID 0x80
+#define E1000_IVAR_VALID 0x80
/* Receive Descriptor bit definitions */
-#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
-#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
-#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
-#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
-#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
-#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
-#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-
-#define E1000_RXDEXT_STATERR_LB 0x00040000
-#define E1000_RXDEXT_STATERR_CE 0x01000000
-#define E1000_RXDEXT_STATERR_SE 0x02000000
-#define E1000_RXDEXT_STATERR_SEQ 0x04000000
-#define E1000_RXDEXT_STATERR_CXE 0x10000000
-#define E1000_RXDEXT_STATERR_TCPE 0x20000000
-#define E1000_RXDEXT_STATERR_IPE 0x40000000
-#define E1000_RXDEXT_STATERR_RXE 0x80000000
-
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_LB 0x00040000
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
/* Device Control */
-#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
/* Device Status */
-#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
-#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
-#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
-#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
-#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
-#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
-
-#define SPEED_10 10
-#define SPEED_100 100
-#define SPEED_1000 1000
-#define HALF_DUPLEX 1
-#define FULL_DUPLEX 2
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
/* Transmit Descriptor bit definitions */
-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */
-#define MAX_JUMBO_FRAME_SIZE 0x3F00
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
/* 802.1q VLAN Packet Size */
-#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
/* Error Codes */
-#define E1000_SUCCESS 0
-#define E1000_ERR_CONFIG 3
-#define E1000_ERR_MAC_INIT 5
-#define E1000_ERR_MBX 15
+#define E1000_SUCCESS 0
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_MBX 15
/* SRRCTL bit definitions */
-#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
-#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
-#define E1000_SRRCTL_DROP_EN 0x80000000
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
-#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
-#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
/* Additional Descriptor Control definitions */
-#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
-#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Que */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */
/* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 2178f87e9f61..c6996feb1cb4 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -36,7 +35,6 @@
#include "igbvf.h"
#include <linux/if_vlan.h>
-
struct igbvf_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
static int igbvf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev,
}
static int igbvf_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
return -EOPNOTSUPP;
}
static void igbvf_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
}
static int igbvf_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
return -EOPNOTSUPP;
}
@@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev,
static u32 igbvf_get_msglevel(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
+
return adapter->msg_enable;
}
static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
+
adapter->msg_enable = data;
}
@@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev)
}
static void igbvf_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+ struct ethtool_regs *regs, void *p)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev)
}
static int igbvf_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
return -EOPNOTSUPP;
}
static int igbvf_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
return -EOPNOTSUPP;
}
static void igbvf_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
}
static void igbvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev,
}
static int igbvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *temp_ring;
@@ -224,12 +224,12 @@ static int igbvf_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
+ new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD);
+ new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
- new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
+ new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD);
+ new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring->count) &&
@@ -239,7 +239,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
adapter->tx_ring->count = new_tx_count;
@@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev,
igbvf_down(adapter);
- /*
- * We can't just free everything and then setup again,
+ /* We can't just free everything and then setup again,
* because the ISRs in MSI-X mode get passed pointers
- * to the tx and rx ring structs.
+ * to the Tx and Rx ring structs.
*/
if (new_tx_count != adapter->tx_ring->count) {
memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
@@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
igbvf_free_rx_resources(adapter->rx_ring);
- memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
+ memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring));
}
err_setup:
igbvf_up(adapter);
@@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
}
static void igbvf_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
+ struct ethtool_test *eth_test, u64 *data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
set_bit(__IGBVF_TESTING, &adapter->state);
- /*
- * Link test performed before hardware reset so autoneg doesn't
+ /* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
if (igbvf_link_test(adapter, &data[0]))
@@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev,
}
static void igbvf_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
}
static int igbvf_set_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
return -EOPNOTSUPP;
}
static int igbvf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev,
}
static int igbvf_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
- (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+ (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
adapter->current_itr = ec->rx_coalesce_usecs << 2;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
@@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev,
adapter->current_itr = IGBVF_START_ITR;
adapter->requested_itr = ec->rx_coalesce_usecs;
} else if (ec->rx_coalesce_usecs == 0) {
- /*
- * The user's desire is to turn off interrupt throttling
+ /* The user's desire is to turn off interrupt throttling
* altogether, but due to HW limitations, we can't do that.
* Instead we set a very small value in EITR, which would
* allow ~967k interrupts per second, but allow the adapter's
@@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev,
adapter->current_itr = 4;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
- } else
+ } else {
return -EINVAL;
+ }
writel(adapter->current_itr,
hw->hw_addr + adapter->rx_ring->itr_register);
@@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev,
static int igbvf_nway_reset(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
+
if (netif_running(netdev))
igbvf_reinit_locked(adapter);
return 0;
}
-
static void igbvf_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
- u64 *data)
+ struct ethtool_stats *stats,
+ u64 *data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
int i;
@@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev,
igbvf_update_stats(adapter);
for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
char *p = (char *)adapter +
- igbvf_gstrings_stats[i].stat_offset;
+ igbvf_gstrings_stats[i].stat_offset;
char *b = (char *)adapter +
- igbvf_gstrings_stats[i].base_stat_offset;
+ igbvf_gstrings_stats[i].base_stat_offset;
data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
- (*(u32 *)p - *(u32 *)b));
+ sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
+ (*(u32 *)p - *(u32 *)b));
}
-
}
static int igbvf_get_sset_count(struct net_device *dev, int stringset)
{
- switch(stringset) {
+ switch (stringset) {
case ETH_SS_TEST:
return IGBVF_TEST_LEN;
case ETH_SS_STATS:
@@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset)
}
static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+ u8 *data)
{
u8 *p = data;
int i;
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 7d6a25c8f889..f166baab8d7e 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -43,10 +42,10 @@ struct igbvf_info;
struct igbvf_adapter;
/* Interrupt defines */
-#define IGBVF_START_ITR 488 /* ~8000 ints/sec */
-#define IGBVF_4K_ITR 980
-#define IGBVF_20K_ITR 196
-#define IGBVF_70K_ITR 56
+#define IGBVF_START_ITR 488 /* ~8000 ints/sec */
+#define IGBVF_4K_ITR 980
+#define IGBVF_20K_ITR 196
+#define IGBVF_70K_ITR 56
enum latency_range {
lowest_latency = 0,
@@ -55,56 +54,55 @@ enum latency_range {
latency_invalid = 255
};
-
/* Interrupt modes, as used by the IntMode parameter */
-#define IGBVF_INT_MODE_LEGACY 0
-#define IGBVF_INT_MODE_MSI 1
-#define IGBVF_INT_MODE_MSIX 2
+#define IGBVF_INT_MODE_LEGACY 0
+#define IGBVF_INT_MODE_MSI 1
+#define IGBVF_INT_MODE_MSIX 2
/* Tx/Rx descriptor defines */
-#define IGBVF_DEFAULT_TXD 256
-#define IGBVF_MAX_TXD 4096
-#define IGBVF_MIN_TXD 80
+#define IGBVF_DEFAULT_TXD 256
+#define IGBVF_MAX_TXD 4096
+#define IGBVF_MIN_TXD 80
-#define IGBVF_DEFAULT_RXD 256
-#define IGBVF_MAX_RXD 4096
-#define IGBVF_MIN_RXD 80
+#define IGBVF_DEFAULT_RXD 256
+#define IGBVF_MAX_RXD 4096
+#define IGBVF_MIN_RXD 80
-#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
-#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
+#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
+#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
- * descriptors available in its onboard memory.
- * Setting this to 0 disables RX descriptor prefetch.
+ * descriptors available in its onboard memory.
+ * Setting this to 0 disables RX descriptor prefetch.
* HTHRESH - MAC will only prefetch if there are at least this many descriptors
- * available in host memory.
- * If PTHRESH is 0, this should also be 0.
+ * available in host memory.
+ * If PTHRESH is 0, this should also be 0.
* WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
- * descriptors until either it has this many to write back, or the
- * ITR timer expires.
+ * descriptors until either it has this many to write back, or the
+ * ITR timer expires.
*/
-#define IGBVF_RX_PTHRESH 16
-#define IGBVF_RX_HTHRESH 8
-#define IGBVF_RX_WTHRESH 1
+#define IGBVF_RX_PTHRESH 16
+#define IGBVF_RX_HTHRESH 8
+#define IGBVF_RX_WTHRESH 1
/* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
-#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */
+#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGBVF_TX_QUEUE_WAKE 32
+#define IGBVF_TX_QUEUE_WAKE 32
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define AUTO_ALL_MODES 0
-#define IGBVF_EEPROM_APME 0x0400
+#define AUTO_ALL_MODES 0
+#define IGBVF_EEPROM_APME 0x0400
-#define IGBVF_MNG_VLAN_NONE (-1)
+#define IGBVF_MNG_VLAN_NONE (-1)
/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
enum igbvf_boards {
board_vf,
@@ -116,8 +114,7 @@ struct igbvf_queue_stats {
u64 bytes;
};
-/*
- * wrappers around a pointer to a socket buffer,
+/* wrappers around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igbvf_buffer {
@@ -148,10 +145,10 @@ union igbvf_desc {
struct igbvf_ring {
struct igbvf_adapter *adapter; /* backlink */
- union igbvf_desc *desc; /* pointer to ring memory */
- dma_addr_t dma; /* phys address of ring */
- unsigned int size; /* length of ring in bytes */
- unsigned int count; /* number of desc. in ring */
+ union igbvf_desc *desc; /* pointer to ring memory */
+ dma_addr_t dma; /* phys address of ring */
+ unsigned int size; /* length of ring in bytes */
+ unsigned int count; /* number of desc. in ring */
u16 next_to_use;
u16 next_to_clean;
@@ -202,9 +199,7 @@ struct igbvf_adapter {
u32 requested_itr; /* ints/sec or adaptive */
u32 current_itr; /* Actual ITR register value, not ints/sec */
- /*
- * Tx
- */
+ /* Tx */
struct igbvf_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
@@ -226,9 +221,7 @@ struct igbvf_adapter {
u32 tx_fifo_size;
u32 tx_dma_failed;
- /*
- * Rx
- */
+ /* Rx */
struct igbvf_ring *rx_ring;
u32 rx_int_delay;
@@ -249,7 +242,7 @@ struct igbvf_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
- spinlock_t stats_lock; /* prevent concurrent stats updates */
+ spinlock_t stats_lock; /* prevent concurrent stats updates */
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
@@ -286,16 +279,16 @@ struct igbvf_adapter {
};
struct igbvf_info {
- enum e1000_mac_type mac;
- unsigned int flags;
- u32 pba;
- void (*init_ops)(struct e1000_hw *);
- s32 (*get_variants)(struct igbvf_adapter *);
+ enum e1000_mac_type mac;
+ unsigned int flags;
+ u32 pba;
+ void (*init_ops)(struct e1000_hw *);
+ s32 (*get_variants)(struct igbvf_adapter *);
};
/* hardware capability, feature, and workaround flags */
-#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
-#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
+#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
#define IGBVF_TX_DESC_ADV(R, i) \
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index b4b65bc9fc5d..7b6cb4c3764c 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -54,10 +53,10 @@ out:
}
/**
- * e1000_poll_for_ack - Wait for message acknowledgement
+ * e1000_poll_for_ack - Wait for message acknowledgment
* @hw: pointer to the HW structure
*
- * returns SUCCESS if it successfully received a message acknowledgement
+ * returns SUCCESS if it successfully received a message acknowledgment
**/
static s32 e1000_poll_for_ack(struct e1000_hw *hw)
{
@@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
s32 ret_val = -E1000_ERR_MBX;
if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
- E1000_V2PMAILBOX_RSTI))) {
+ E1000_V2PMAILBOX_RSTI))) {
ret_val = E1000_SUCCESS;
hw->mbx.stats.rsts++;
}
@@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
/* Take ownership of the buffer */
ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
- /* reserve mailbox for vf use */
+ /* reserve mailbox for VF use */
if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
ret_val = E1000_SUCCESS;
@@ -283,7 +282,7 @@ out_no_write:
}
/**
- * e1000_read_mbx_vf - Reads a message from the inbox intended for vf
+ * e1000_read_mbx_vf - Reads a message from the inbox intended for VF
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
@@ -315,17 +314,18 @@ out_no_read:
}
/**
- * e1000_init_mbx_params_vf - set initial values for vf mailbox
+ * e1000_init_mbx_params_vf - set initial values for VF mailbox
* @hw: pointer to the HW structure
*
- * Initializes the hw->mbx struct to correct values for vf mailbox
+ * Initializes the hw->mbx struct to correct values for VF mailbox
*/
s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
/* start mailbox as timed out and let the reset_hw call set the timeout
- * value to being communications */
+ * value to being communications
+ */
mbx->timeout = 0;
mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
@@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
return E1000_SUCCESS;
}
-
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index 24370bcb0e22..f800bf8eedae 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -30,44 +29,44 @@
#include "vf.h"
-#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
-#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
-#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
-#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
-#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
-#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
-#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
-#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
-#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is E1000_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
-#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
-#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
-#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- clear to send requests */
+/* Messages below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS 0x20000000
/* We have a total wait time of 1s for vf mailbox posted messages */
-#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */
-#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */
+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */
+#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */
-#define E1000_VT_MSGINFO_SHIFT 16
+#define E1000_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
-#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_RESET 0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
-#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
-#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
-#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
-#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
s32 e1000_init_mbx_params_vf(struct e1000_hw *);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index ebf9d4a42fdd..95af14e139d7 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
static struct igbvf_info igbvf_vf_info = {
- .mac = e1000_vfadapt,
- .flags = 0,
- .pba = 10,
- .init_ops = e1000_init_function_pointers_vf,
+ .mac = e1000_vfadapt,
+ .flags = 0,
+ .pba = 10,
+ .init_ops = e1000_init_function_pointers_vf,
};
static struct igbvf_info igbvf_i350_vf_info = {
- .mac = e1000_vfadapt_i350,
- .flags = 0,
- .pba = 10,
- .init_ops = e1000_init_function_pointers_vf,
+ .mac = e1000_vfadapt_i350,
+ .flags = 0,
+ .pba = 10,
+ .init_ops = e1000_init_function_pointers_vf,
};
static const struct igbvf_info *igbvf_info_tbl[] = {
- [board_vf] = &igbvf_vf_info,
- [board_i350_vf] = &igbvf_i350_vf_info,
+ [board_vf] = &igbvf_vf_info,
+ [board_i350_vf] = &igbvf_i350_vf_info,
};
/**
* igbvf_desc_unused - calculate if we have unused descriptors
+ * @rx_ring: address of receive ring structure
**/
static int igbvf_desc_unused(struct igbvf_ring *ring)
{
@@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
* @skb: pointer to sk_buff to be indicated to stack
**/
static void igbvf_receive_skb(struct igbvf_adapter *adapter,
- struct net_device *netdev,
- struct sk_buff *skb,
- u32 status, u16 vlan)
+ struct net_device *netdev,
+ struct sk_buff *skb,
+ u32 status, u16 vlan)
{
u16 vid;
@@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
}
static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
- u32 status_err, struct sk_buff *skb)
+ u32 status_err, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
@@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
* @cleaned_count: number of buffers to repopulate
**/
static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
- int cleaned_count)
+ int cleaned_count)
{
struct igbvf_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
@@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
}
buffer_info->page_dma =
dma_map_page(&pdev->dev, buffer_info->page,
- buffer_info->page_offset,
- PAGE_SIZE / 2,
+ buffer_info->page_offset,
+ PAGE_SIZE / 2,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev,
buffer_info->page_dma)) {
@@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->skb = skb;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
- bufsz,
+ bufsz,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_kfree_skb(buffer_info->skb);
@@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
}
}
/* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
+ * each write-back erases this info.
+ */
if (adapter->rx_ps_hdr_size) {
rx_desc->read.pkt_addr =
cpu_to_le64(buffer_info->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
} else {
- rx_desc->read.pkt_addr =
- cpu_to_le64(buffer_info->dma);
+ rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
rx_desc->read.hdr_addr = 0;
}
@@ -247,7 +247,8 @@ no_buffers:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, adapter->hw.hw_addr + rx_ring->tail);
}
@@ -261,7 +262,7 @@ no_buffers:
* is no guarantee that everything was cleaned
**/
static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
- int *work_done, int work_to_do)
+ int *work_done, int work_to_do)
{
struct igbvf_ring *rx_ring = adapter->rx_ring;
struct net_device *netdev = adapter->netdev;
@@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
* that case, it fills the header buffer and spills the rest
* into the page.
*/
- hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
- E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
+ hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
+ & E1000_RXDADV_HDRBUFLEN_MASK) >>
+ E1000_RXDADV_HDRBUFLEN_SHIFT;
if (hlen > adapter->rx_ps_hdr_size)
hlen = adapter->rx_ps_hdr_size;
@@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
buffer_info->skb = NULL;
if (!adapter->rx_ps_hdr_size) {
dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_buffer_len,
+ adapter->rx_buffer_len,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
skb_put(skb, length);
@@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
if (!skb_shinfo(skb)->nr_frags) {
dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_ps_hdr_size,
+ adapter->rx_ps_hdr_size,
DMA_FROM_DEVICE);
skb_put(skb, hlen);
}
if (length) {
dma_unmap_page(&pdev->dev, buffer_info->page_dma,
- PAGE_SIZE / 2,
+ PAGE_SIZE / 2,
DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- buffer_info->page,
- buffer_info->page_offset,
- length);
+ buffer_info->page,
+ buffer_info->page_offset,
+ length);
if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
(page_count(buffer_info->page) != 1))
@@ -370,7 +372,7 @@ send_up:
skb->protocol = eth_type_trans(skb, netdev);
igbvf_receive_skb(adapter, netdev, skb, staterr,
- rx_desc->wb.upper.vlan);
+ rx_desc->wb.upper.vlan);
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -402,7 +404,7 @@ next_desc:
}
static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
- struct igbvf_buffer *buffer_info)
+ struct igbvf_buffer *buffer_info)
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
@@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
* Return 0 on success, negative on failure
**/
int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring)
+ struct igbvf_ring *tx_ring)
{
struct pci_dev *pdev = adapter->pdev;
int size;
@@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
err:
vfree(tx_ring->buffer_info);
dev_err(&adapter->pdev->dev,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ "Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
@@ -501,7 +503,7 @@ err:
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
dev_err(&adapter->pdev->dev,
- "Unable to allocate memory for the receive descriptor ring\n");
+ "Unable to allocate memory for the receive descriptor ring\n");
return -ENOMEM;
}
@@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- if (adapter->rx_ps_hdr_size){
+ if (adapter->rx_ps_hdr_size) {
dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_ps_hdr_size,
+ adapter->rx_ps_hdr_size,
DMA_FROM_DEVICE);
} else {
dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_buffer_len,
+ adapter->rx_buffer_len,
DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
@@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
if (buffer_info->page_dma)
dma_unmap_page(&pdev->dev,
buffer_info->page_dma,
- PAGE_SIZE / 2,
+ PAGE_SIZE / 2,
DMA_FROM_DEVICE);
put_page(buffer_info->page);
buffer_info->page = NULL;
@@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
rx_ring->buffer_info = NULL;
dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
* @packets: the number of packets during this measurement interval
* @bytes: the number of bytes during this measurement interval
*
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
+ * Stores a new ITR value based on packets and byte counts during the last
+ * interrupt. The advantage of per interrupt computation is faster updates
+ * and more accurate ITR for the current traffic pattern. Constants in this
+ * function were computed based on theoretical maximum wire speed and thresholds
+ * were set based on testing data as well as attempting to minimize response
+ * time while increasing bulk throughput.
**/
static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
enum latency_range itr_setting,
@@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
-
if (new_itr != adapter->tx_ring->itr_val) {
u32 current_itr = adapter->tx_ring->itr_val;
- /*
- * this attempts to bias the interrupt rate towards Bulk
+ /* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
* increasing
*/
new_itr = new_itr > current_itr ?
- min(current_itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(current_itr + (new_itr >> 2), new_itr) :
+ new_itr;
adapter->tx_ring->itr_val = new_itr;
adapter->tx_ring->set_itr = 1;
@@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
if (new_itr != adapter->rx_ring->itr_val) {
u32 current_itr = adapter->rx_ring->itr_val;
+
new_itr = new_itr > current_itr ?
- min(current_itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(current_itr + (new_itr >> 2), new_itr) :
+ new_itr;
adapter->rx_ring->itr_val = new_itr;
adapter->rx_ring->set_itr = 1;
@@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
segs = skb_shinfo(skb)->gso_segs ?: 1;
/* multiply data chunks by size of headers */
bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
+ skb->len;
total_packets += segs;
total_bytes += bytecount;
}
@@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
tx_ring->next_to_clean = i;
- if (unlikely(count &&
- netif_carrier_ok(netdev) &&
- igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
+ if (unlikely(count && netif_carrier_ok(netdev) &&
+ igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
- /* auto mask will automatically reenable the interrupt when we write
- * EICS */
+ /* auto mask will automatically re-enable the interrupt when we write
+ * EICS
+ */
if (!igbvf_clean_tx_irq(tx_ring))
/* Ring was not completely cleaned, so fire another interrupt */
ew32(EICS, tx_ring->eims_value);
@@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
#define IGBVF_NO_QUEUE -1
static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
- int tx_queue, int msix_vector)
+ int tx_queue, int msix_vector)
{
struct e1000_hw *hw = &adapter->hw;
u32 ivar, index;
/* 82576 uses a table-based method for assigning vectors.
- Each queue has a single entry in the table to which we write
- a vector number along with a "valid" bit. Sadly, the layout
- of the table is somewhat counterintuitive. */
+ * Each queue has a single entry in the table to which we write
+ * a vector number along with a "valid" bit. Sadly, the layout
+ * of the table is somewhat counterintuitive.
+ */
if (rx_queue > IGBVF_NO_QUEUE) {
index = (rx_queue >> 1);
ivar = array_er32(IVAR0, index);
@@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
/**
* igbvf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
*
* igbvf_configure_msix sets up the hardware to properly
* generate MSI-X interrupts.
@@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
/**
* igbvf_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: board private structure
*
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
@@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
int err = -ENOMEM;
int i;
- /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
+ /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */
adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
- GFP_KERNEL);
+ GFP_KERNEL);
if (adapter->msix_entries) {
for (i = 0; i < 3; i++)
adapter->msix_entries[i].entry = i;
err = pci_enable_msix_range(adapter->pdev,
- adapter->msix_entries, 3, 3);
+ adapter->msix_entries, 3, 3);
}
if (err < 0) {
/* MSI-X failed */
dev_err(&adapter->pdev->dev,
- "Failed to initialize MSI-X interrupts.\n");
+ "Failed to initialize MSI-X interrupts.\n");
igbvf_reset_interrupt_capability(adapter);
}
}
/**
* igbvf_request_msix - Initialize MSI-X interrupts
+ * @adapter: board private structure
*
* igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
* kernel.
@@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
}
err = request_irq(adapter->msix_entries[vector].vector,
- igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
- netdev);
+ igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
+ netdev);
if (err)
goto out;
@@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
vector++;
err = request_irq(adapter->msix_entries[vector].vector,
- igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
- netdev);
+ igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
+ netdev);
if (err)
goto out;
@@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
vector++;
err = request_irq(adapter->msix_entries[vector].vector,
- igbvf_msix_other, 0, netdev->name, netdev);
+ igbvf_msix_other, 0, netdev->name, netdev);
if (err)
goto out;
@@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
/**
* igbvf_request_irq - initialize interrupts
+ * @adapter: board private structure
*
* Attempts to configure interrupts using the best available
* capabilities of the hardware and kernel.
@@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter)
return err;
dev_err(&adapter->pdev->dev,
- "Unable to allocate interrupt, Error: %d\n", err);
+ "Unable to allocate interrupt, Error: %d\n", err);
return err;
}
@@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter)
/**
* igbvf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
**/
static void igbvf_irq_disable(struct igbvf_adapter *adapter)
{
@@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter)
/**
* igbvf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
**/
static void igbvf_irq_enable(struct igbvf_adapter *adapter)
{
@@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
if (hw->mac.ops.set_vfta(hw, vid, false)) {
dev_err(&adapter->pdev->dev,
- "Failed to remove vlan id %d\n", vid);
+ "Failed to remove vlan id %d\n", vid);
return -EINVAL;
}
clear_bit(vid, adapter->active_vlans);
@@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
/* Turn off Relaxed Ordering on head write-backs. The writebacks
* MUST be delivered in order or it will completely screw up
- * our bookeeping.
+ * our bookkeeping.
*/
dca_txctrl = er32(DCA_TXCTRL(0));
dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
@@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
u32 srrctl = 0;
srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
- E1000_SRRCTL_BSIZEHDR_MASK |
- E1000_SRRCTL_BSIZEPKT_MASK);
+ E1000_SRRCTL_BSIZEHDR_MASK |
+ E1000_SRRCTL_BSIZEPKT_MASK);
/* Enable queue drop to avoid head of line blocking */
srrctl |= E1000_SRRCTL_DROP_EN;
/* Setup buffer sizes */
srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
- E1000_SRRCTL_BSIZEPKT_SHIFT;
+ E1000_SRRCTL_BSIZEPKT_SHIFT;
if (adapter->rx_buffer_len < 2048) {
adapter->rx_ps_hdr_size = 0;
@@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
} else {
adapter->rx_ps_hdr_size = 128;
srrctl |= adapter->rx_ps_hdr_size <<
- E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
}
@@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
- /*
- * Setup the HW Rx Head and Tail Descriptor Pointers and
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
rdba = rx_ring->dma;
@@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter)
igbvf_setup_srrctl(adapter);
igbvf_configure_rx(adapter);
igbvf_alloc_rx_buffers(adapter->rx_ring,
- igbvf_desc_unused(adapter->rx_ring));
+ igbvf_desc_unused(adapter->rx_ring));
}
/* igbvf_reset - bring the hardware into a known good state
+ * @adapter: private board structure
*
* This function boots the hardware and enables some settings that
* require a configuration cycle of the hardware - those cannot be
@@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter)
hw->mac.get_link_status = 1;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
-
return 0;
}
@@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 rxdctl, txdctl;
- /*
- * signal that we're down so the interrupt handler does not
+ /* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer
*/
set_bit(__IGBVF_DOWN, &adapter->state);
@@ -1514,6 +1519,7 @@ void igbvf_down(struct igbvf_adapter *adapter)
rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+ netif_carrier_off(netdev);
netif_stop_queue(netdev);
/* disable transmits in the hardware */
@@ -1530,8 +1536,6 @@ void igbvf_down(struct igbvf_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
- netif_carrier_off(netdev);
-
/* record the stats before reset*/
igbvf_update_stats(adapter);
@@ -1547,7 +1551,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter)
{
might_sleep();
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
igbvf_down(adapter);
igbvf_up(adapter);
clear_bit(__IGBVF_RESETTING, &adapter->state);
@@ -1662,8 +1666,7 @@ static int igbvf_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
- /*
- * before we allocate an interrupt, we must be ready to handle it.
+ /* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
* clean_rx handler before we do so.
@@ -1725,6 +1728,7 @@ static int igbvf_close(struct net_device *netdev)
return 0;
}
+
/**
* igbvf_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -1753,15 +1757,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
return 0;
}
-#define UPDATE_VF_COUNTER(reg, name) \
- { \
- u32 current_counter = er32(reg); \
- if (current_counter < adapter->stats.last_##name) \
- adapter->stats.name += 0x100000000LL; \
- adapter->stats.last_##name = current_counter; \
- adapter->stats.name &= 0xFFFFFFFF00000000LL; \
- adapter->stats.name |= current_counter; \
- }
+#define UPDATE_VF_COUNTER(reg, name) \
+{ \
+ u32 current_counter = er32(reg); \
+ if (current_counter < adapter->stats.last_##name) \
+ adapter->stats.name += 0x100000000LL; \
+ adapter->stats.last_##name = current_counter; \
+ adapter->stats.name &= 0xFFFFFFFF00000000LL; \
+ adapter->stats.name |= current_counter; \
+}
/**
* igbvf_update_stats - Update the board statistics counters
@@ -1772,8 +1776,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- /*
- * Prevent stats update while adapter is being reset, link is down
+ /* Prevent stats update while adapter is being reset, link is down
* or if the pci connection is down.
*/
if (adapter->link_speed == 0)
@@ -1832,7 +1835,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
**/
static void igbvf_watchdog(unsigned long data)
{
- struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
+ struct igbvf_adapter *adapter = (struct igbvf_adapter *)data;
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -1841,8 +1844,8 @@ static void igbvf_watchdog(unsigned long data)
static void igbvf_watchdog_task(struct work_struct *work)
{
struct igbvf_adapter *adapter = container_of(work,
- struct igbvf_adapter,
- watchdog_task);
+ struct igbvf_adapter,
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -1855,8 +1858,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
mac->ops.get_link_up_info(&adapter->hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
igbvf_print_link_info(adapter);
netif_carrier_on(netdev);
@@ -1876,10 +1879,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
igbvf_update_stats(adapter);
} else {
tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
- tx_ring->count);
+ tx_ring->count);
if (tx_pending) {
- /*
- * We've lost link, so the controller stops DMA,
+ /* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context).
@@ -1898,15 +1900,15 @@ static void igbvf_watchdog_task(struct work_struct *work)
round_jiffies(jiffies + (2 * HZ)));
}
-#define IGBVF_TX_FLAGS_CSUM 0x00000001
-#define IGBVF_TX_FLAGS_VLAN 0x00000002
-#define IGBVF_TX_FLAGS_TSO 0x00000004
-#define IGBVF_TX_FLAGS_IPV4 0x00000008
-#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
+#define IGBVF_TX_FLAGS_CSUM 0x00000001
+#define IGBVF_TX_FLAGS_VLAN 0x00000002
+#define IGBVF_TX_FLAGS_TSO 0x00000004
+#define IGBVF_TX_FLAGS_IPV4 0x00000008
+#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
static int igbvf_tso(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
+ struct igbvf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
__be16 protocol)
{
@@ -1930,17 +1932,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
+
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
}
i = tx_ring->next_to_use;
@@ -1984,7 +1987,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
}
static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
+ struct igbvf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
__be16 protocol)
{
@@ -2005,8 +2008,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
info |= (skb_transport_header(skb) -
- skb_network_header(skb));
-
+ skb_network_header(skb));
context_desc->vlan_macip_lens = cpu_to_le32(info);
@@ -2055,6 +2057,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
netif_stop_queue(netdev);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
smp_mb();
/* We need to check again just in case room has been made available */
@@ -2067,11 +2073,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
return 0;
}
-#define IGBVF_MAX_TXD_PWR 16
-#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
+#define IGBVF_MAX_TXD_PWR 16
+#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
+ struct igbvf_ring *tx_ring,
struct sk_buff *skb)
{
struct igbvf_buffer *buffer_info;
@@ -2093,7 +2099,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
-
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
const struct skb_frag_struct *frag;
@@ -2111,7 +2116,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
- DMA_TO_DEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
}
@@ -2133,7 +2138,7 @@ dma_error:
/* clear timestamp and dma mappings for remaining portion of packet */
while (count--) {
- if (i==0)
+ if (i == 0)
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
@@ -2144,10 +2149,10 @@ dma_error:
}
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
+ struct igbvf_ring *tx_ring,
int tx_flags, int count,
unsigned int first, u32 paylen,
- u8 hdr_len)
+ u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc = NULL;
struct igbvf_buffer *buffer_info;
@@ -2155,7 +2160,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
unsigned int i;
cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
- E1000_ADVTXD_DCMD_DEXT);
+ E1000_ADVTXD_DCMD_DEXT);
if (tx_flags & IGBVF_TX_FLAGS_VLAN)
cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
@@ -2182,7 +2187,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | buffer_info->length);
+ cpu_to_le32(cmd_type_len | buffer_info->length);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
i++;
if (i == tx_ring->count)
@@ -2193,14 +2198,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
/* we need this if more than one processor can write to our tail
- * at a time, it syncronizes IO on IA64/Altix systems */
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
mmiowb();
}
@@ -2225,11 +2232,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- /*
- * need: count + 4 desc gap to keep tail from touching
- * + 2 desc gap to keep tail from touching head,
- * + 1 desc for skb->data,
- * + 1 desc for context descriptor,
+ /* need: count + 4 desc gap to keep tail from touching
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for skb->data,
+ * + 1 desc for context descriptor,
* head, otherwise try next time
*/
if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
@@ -2258,11 +2264,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
if (tso)
tx_flags |= IGBVF_TX_FLAGS_TSO;
else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
+ (skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IGBVF_TX_FLAGS_CSUM;
- /*
- * count reflects descriptors mapped, if 0 then mapping error
+ /* count reflects descriptors mapped, if 0 then mapping error
* has occurred and we need to rewind the descriptor queue
*/
count = igbvf_tx_map_adv(adapter, tx_ring, skb);
@@ -2313,6 +2318,7 @@ static void igbvf_tx_timeout(struct net_device *netdev)
static void igbvf_reset_task(struct work_struct *work)
{
struct igbvf_adapter *adapter;
+
adapter = container_of(work, struct igbvf_adapter, reset_task);
igbvf_reinit_locked(adapter);
@@ -2356,14 +2362,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
}
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
/* igbvf_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
if (netif_running(netdev))
igbvf_down(adapter);
- /*
- * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
@@ -2382,15 +2387,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = PAGE_SIZE / 2;
#endif
-
/* adjust allocation if LPE protects us, and we aren't using SBP */
if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
- ETH_FCS_LEN;
+ ETH_FCS_LEN;
dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -2477,8 +2481,7 @@ static void igbvf_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
@@ -2503,7 +2506,7 @@ static void igbvf_netpoll(struct net_device *netdev)
* this device has been detected.
*/
static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+ pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2583,7 +2586,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
}
static int igbvf_set_features(struct net_device *netdev,
- netdev_features_t features)
+ netdev_features_t features)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2596,21 +2599,21 @@ static int igbvf_set_features(struct net_device *netdev,
}
static const struct net_device_ops igbvf_netdev_ops = {
- .ndo_open = igbvf_open,
- .ndo_stop = igbvf_close,
- .ndo_start_xmit = igbvf_xmit_frame,
- .ndo_get_stats = igbvf_get_stats,
- .ndo_set_rx_mode = igbvf_set_multi,
- .ndo_set_mac_address = igbvf_set_mac,
- .ndo_change_mtu = igbvf_change_mtu,
- .ndo_do_ioctl = igbvf_ioctl,
- .ndo_tx_timeout = igbvf_tx_timeout,
- .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
+ .ndo_open = igbvf_open,
+ .ndo_stop = igbvf_close,
+ .ndo_start_xmit = igbvf_xmit_frame,
+ .ndo_get_stats = igbvf_get_stats,
+ .ndo_set_rx_mode = igbvf_set_multi,
+ .ndo_set_mac_address = igbvf_set_mac,
+ .ndo_change_mtu = igbvf_change_mtu,
+ .ndo_do_ioctl = igbvf_ioctl,
+ .ndo_tx_timeout = igbvf_tx_timeout,
+ .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = igbvf_netpoll,
+ .ndo_poll_controller = igbvf_netpoll,
#endif
- .ndo_set_features = igbvf_set_features,
+ .ndo_set_features = igbvf_set_features,
};
/**
@@ -2645,8 +2648,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
@@ -2686,7 +2689,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -EIO;
adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ pci_resource_len(pdev, 0));
if (!adapter->hw.hw_addr)
goto err_ioremap;
@@ -2712,16 +2715,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->bd_number = cards_found++;
netdev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
+ NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXCSUM;
netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
@@ -2742,7 +2745,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
dev_info(&pdev->dev, "Error reading MAC address.\n");
else if (is_zero_ether_addr(adapter->hw.mac.addr))
- dev_info(&pdev->dev, "MAC address not assigned by administrator.\n");
+ dev_info(&pdev->dev,
+ "MAC address not assigned by administrator.\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len);
}
@@ -2751,11 +2755,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "Assigning random MAC address.\n");
eth_hw_addr_random(netdev);
memcpy(adapter->hw.mac.addr, netdev->dev_addr,
- netdev->addr_len);
+ netdev->addr_len);
}
setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
- (unsigned long) adapter);
+ (unsigned long)adapter);
INIT_WORK(&adapter->reset_task, igbvf_reset_task);
INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
@@ -2818,8 +2822,7 @@ static void igbvf_remove(struct pci_dev *pdev)
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /*
- * The watchdog timer may be rescheduled, so explicitly
+ /* The watchdog timer may be rescheduled, so explicitly
* disable it from being rescheduled.
*/
set_bit(__IGBVF_DOWN, &adapter->state);
@@ -2832,9 +2835,8 @@ static void igbvf_remove(struct pci_dev *pdev)
igbvf_reset_interrupt_capability(adapter);
- /*
- * it is important to delete the napi struct prior to freeing the
- * rx ring so that you do not end up with null pointer refs
+ /* it is important to delete the NAPI struct prior to freeing the
+ * Rx ring so that you do not end up with null pointer refs
*/
netif_napi_del(&adapter->rx_ring->napi);
kfree(adapter->tx_ring);
@@ -2866,17 +2868,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
/* PCI Device API Driver */
static struct pci_driver igbvf_driver = {
- .name = igbvf_driver_name,
- .id_table = igbvf_pci_tbl,
- .probe = igbvf_probe,
- .remove = igbvf_remove,
+ .name = igbvf_driver_name,
+ .id_table = igbvf_pci_tbl,
+ .probe = igbvf_probe,
+ .remove = igbvf_remove,
#ifdef CONFIG_PM
/* Power Management Hooks */
- .suspend = igbvf_suspend,
- .resume = igbvf_resume,
+ .suspend = igbvf_suspend,
+ .resume = igbvf_resume,
#endif
- .shutdown = igbvf_shutdown,
- .err_handler = &igbvf_err_handler
+ .shutdown = igbvf_shutdown,
+ .err_handler = &igbvf_err_handler
};
/**
@@ -2888,6 +2890,7 @@ static struct pci_driver igbvf_driver = {
static int __init igbvf_init_module(void)
{
int ret;
+
pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
pr_info("%s\n", igbvf_copyright);
@@ -2909,7 +2912,6 @@ static void __exit igbvf_exit_module(void)
}
module_exit(igbvf_exit_module);
-
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h
index 7dc6341715dc..86a7c120b574 100644
--- a/drivers/net/ethernet/intel/igbvf/regs.h
+++ b/drivers/net/ethernet/intel/igbvf/regs.h
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -28,81 +27,81 @@
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
-#define E1000_CTRL 0x00000 /* Device Control - RW */
-#define E1000_STATUS 0x00008 /* Device Status - RO */
-#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
-#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
-#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
-#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
-#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
-#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
-#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
-#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
-#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
-/*
- * Convenience macros
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+
+/* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
* Example usage:
* E1000_RDBAL_REG(current_rx_queue)
*/
-#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
- (0x0C000 + ((_n) * 0x40)))
-#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
- (0x0C004 + ((_n) * 0x40)))
-#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
- (0x0C008 + ((_n) * 0x40)))
-#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
- (0x0C00C + ((_n) * 0x40)))
-#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
- (0x0C010 + ((_n) * 0x40)))
-#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
- (0x0C018 + ((_n) * 0x40)))
-#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
- (0x0C028 + ((_n) * 0x40)))
-#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
- (0x0E000 + ((_n) * 0x40)))
-#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
- (0x0E004 + ((_n) * 0x40)))
-#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
- (0x0E008 + ((_n) * 0x40)))
-#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
- (0x0E010 + ((_n) * 0x40)))
-#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
- (0x0E018 + ((_n) * 0x40)))
-#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
- (0x0E028 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
-#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
-#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x054E0 + ((_i - 16) * 8)))
-#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x054E4 + ((_i - 16) * 8)))
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+ (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
+#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
/* Statistics registers */
-#define E1000_VFGPRC 0x00F10
-#define E1000_VFGORC 0x00F18
-#define E1000_VFMPRC 0x00F3C
-#define E1000_VFGPTC 0x00F14
-#define E1000_VFGOTC 0x00F34
-#define E1000_VFGOTLBC 0x00F50
-#define E1000_VFGPTLBC 0x00F44
-#define E1000_VFGORLBC 0x00F48
-#define E1000_VFGPRLBC 0x00F40
+#define E1000_VFGPRC 0x00F10
+#define E1000_VFGORC 0x00F18
+#define E1000_VFMPRC 0x00F3C
+#define E1000_VFGPTC 0x00F14
+#define E1000_VFGOTC 0x00F34
+#define E1000_VFGOTLBC 0x00F50
+#define E1000_VFGPTLBC 0x00F44
+#define E1000_VFGORLBC 0x00F48
+#define E1000_VFGPRLBC 0x00F40
/* These act per VF so an array friendly macro is used */
-#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
-#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
/* Define macros for handling registers */
-#define er32(reg) readl(hw->hw_addr + E1000_##reg)
-#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg)
+#define er32(reg) readl(hw->hw_addr + E1000_##reg)
+#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg)
#define array_er32(reg, offset) \
readl(hw->hw_addr + E1000_##reg + (offset << 2))
#define array_ew32(reg, offset, val) \
writel((val), hw->hw_addr + E1000_##reg + (offset << 2))
-#define e1e_flush() er32(STATUS)
+#define e1e_flush() er32(STATUS)
#endif
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 955ad8c2c534..a13baa90ae20 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -25,17 +24,16 @@
*******************************************************************************/
-
#include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
- u16 *duplex);
+ u16 *duplex);
static s32 e1000_init_hw_vf(struct e1000_hw *hw);
static s32 e1000_reset_hw_vf(struct e1000_hw *hw);
static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
- u32, u32, u32);
+ u32, u32, u32);
static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
@@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw)
* the status register's data which is often stale and inaccurate.
**/
static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
+ u16 *duplex)
{
s32 status;
@@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
u8 *addr = (u8 *)(&msgbuf[1]);
u32 ctrl;
- /* assert vf queue/interrupt reset */
+ /* assert VF queue/interrupt reset */
ctrl = er32(CTRL);
ew32(CTRL, ctrl | E1000_CTRL_RST);
@@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
/* mailbox timeout can now become active */
mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
- /* notify pf of vf reset completion */
+ /* notify PF of VF reset completion */
msgbuf[0] = E1000_VF_RESET;
mbx->ops.write_posted(hw, msgbuf, 1);
@@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
- if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
+ if (msgbuf[0] == (E1000_VF_RESET |
+ E1000_VT_MSGTYPE_ACK))
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
else
ret_val = -E1000_ERR_MAC_INIT;
@@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
- /*
- * The bit_shift is the number of left-shifts
+ /* The bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
- (((u16) mc_addr[5]) << bit_shift)));
+ (((u16)mc_addr[5]) << bit_shift)));
return hash_value;
}
@@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
* unless there are workarounds that change this.
**/
static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 rar_used_count, u32 rar_count)
+ u8 *mc_addr_list, u32 mc_addr_count,
+ u32 rar_used_count, u32 rar_count)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[E1000_VFMAILBOX_SIZE];
@@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
* @addr: pointer to the receive address
* @index: receive address array register
**/
-static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
+static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
@@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u32 in_msg = 0;
- /*
- * We only want to run this if there has been a rst asserted.
+ /* We only want to run this if there has been a rst asserted.
* in this case that could mean a link change, device reset,
* or a virtual function reset
*/
@@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
if (!mac->get_link_status)
goto out;
- /* if link status is down no point in checking to see if pf is up */
+ /* if link status is down no point in checking to see if PF is up */
if (!(er32(STATUS) & E1000_STATUS_LU))
goto out;
/* if the read failed it could just be a mailbox collision, best wait
- * until we are called again and don't report an error */
+ * until we are called again and don't report an error
+ */
if (mbx->ops.read(hw, &in_msg, 1))
goto out;
/* if incoming message isn't clear to send we are waiting on response */
if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
- /* message is not CTS and is NACK we must have lost CTS status */
+ /* msg is not CTS and is NACK we must have lost CTS status */
if (in_msg & E1000_VT_MSGTYPE_NACK)
ret_val = -E1000_ERR_MAC_INIT;
goto out;
}
- /* the pf is talking, if we timed out in the past we reinit */
+ /* the PF is talking, if we timed out in the past we reinit */
if (!mbx->timeout) {
ret_val = -E1000_ERR_MAC_INIT;
goto out;
}
/* if we passed all the tests above then the link is up and we no
- * longer need to check for link */
+ * longer need to check for link
+ */
mac->get_link_status = false;
out:
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index 57db3c68dfcd..0f1eca639f68 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -38,30 +37,29 @@
struct e1000_hw;
-#define E1000_DEV_ID_82576_VF 0x10CA
-#define E1000_DEV_ID_I350_VF 0x1520
-#define E1000_REVISION_0 0
-#define E1000_REVISION_1 1
-#define E1000_REVISION_2 2
-#define E1000_REVISION_3 3
-#define E1000_REVISION_4 4
+#define E1000_DEV_ID_82576_VF 0x10CA
+#define E1000_DEV_ID_I350_VF 0x1520
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
-#define E1000_FUNC_0 0
-#define E1000_FUNC_1 1
+#define E1000_FUNC_0 0
+#define E1000_FUNC_1 1
-/*
- * Receive Address Register Count
+/* Receive Address Register Count
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* These entries are also used for MAC-based filtering.
*/
-#define E1000_RAR_ENTRIES_VF 1
+#define E1000_RAR_ENTRIES_VF 1
/* Receive Descriptor - Advanced */
union e1000_adv_rx_desc {
struct {
- u64 pkt_addr; /* Packet buffer address */
- u64 hdr_addr; /* Header buffer address */
+ u64 pkt_addr; /* Packet buffer address */
+ u64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
@@ -69,53 +67,53 @@ union e1000_adv_rx_desc {
u32 data;
struct {
u16 pkt_info; /* RSS/Packet type */
- u16 hdr_info; /* Split Header,
- * hdr buffer length */
+ /* Split Header, hdr buffer length */
+ u16 hdr_info;
} hs_rss;
} lo_dword;
union {
- u32 rss; /* RSS Hash */
+ u32 rss; /* RSS Hash */
struct {
- u16 ip_id; /* IP id */
- u16 csum; /* Packet Checksum */
+ u16 ip_id; /* IP id */
+ u16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
- u32 status_error; /* ext status/error */
- u16 length; /* Packet length */
- u16 vlan; /* VLAN tag */
+ u32 status_error; /* ext status/error */
+ u16 length; /* Packet length */
+ u16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
-#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
-#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
struct {
- u64 buffer_addr; /* Address of descriptor's data buf */
+ u64 buffer_addr; /* Address of descriptor's data buf */
u32 cmd_type_len;
u32 olinfo_status;
} read;
struct {
- u64 rsvd; /* Reserved */
+ u64 rsvd; /* Reserved */
u32 nxtseq_seed;
u32 status;
} wb;
};
/* Adv Transmit Descriptor Config Masks */
-#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
-#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
-#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
-#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
-#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
-#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
-#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
-#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
/* Context descriptors */
struct e1000_adv_tx_context_desc {
@@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc {
u32 mss_l4len_idx;
};
-#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
-#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
-#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
-#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
enum e1000_mac_type {
e1000_undefined = 0,
@@ -262,5 +260,4 @@ struct e1000_hw {
void e1000_rlpml_set_vf(struct e1000_hw *, u16);
void e1000_init_function_pointers_vf(struct e1000_hw *hw);
-
#endif /* _E1000_VF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 11a1bdbe3fd9..31f91459312f 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -285,6 +285,8 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
/* prevent the interrupt handler from restarting watchdog */
set_bit(__IXGB_DOWN, &adapter->flags);
+ netif_carrier_off(netdev);
+
napi_disable(&adapter->napi);
/* waiting for NAPI to complete can re-enable interrupts */
ixgb_irq_disable(adapter);
@@ -298,7 +300,6 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
adapter->link_speed = 0;
adapter->link_duplex = 0;
- netif_carrier_off(netdev);
netif_stop_queue(netdev);
ixgb_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7dcbbec09a70..636f9e350162 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -151,6 +151,7 @@ struct vf_data_storage {
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
+ bool rss_query_enabled;
unsigned int vf_api;
};
@@ -613,7 +614,6 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7)
#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
@@ -643,7 +643,6 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11)
/* Tx fast path data */
int num_tx_queues;
@@ -723,6 +722,8 @@ struct ixgbe_adapter {
u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 wol;
+ u16 bridge_mode;
+
u16 eeprom_verh;
u16 eeprom_verl;
u16 eeprom_cap;
@@ -766,6 +767,15 @@ struct ixgbe_adapter {
u8 default_up;
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
+
+/* maximum number of RETA entries among all devices supported by ixgbe
+ * driver: currently it's x550 device in non-SRIOV mode
+ */
+#define IXGBE_MAX_RETA_ENTRIES 512
+ u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
+
+#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+ u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -955,4 +965,5 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring);
+u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index c5c97b483d7c..824a7ab79ab6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -171,17 +171,21 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function.
- * Disables relaxed ordering Then set pcie completion timeout
+ * Disables relaxed ordering for archs other than SPARC
+ * Then set pcie completion timeout
*
**/
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
+#ifndef CONFIG_SPARC
u32 regval;
u32 i;
+#endif
s32 ret_val;
ret_val = ixgbe_start_hw_generic(hw);
+#ifndef CONFIG_SPARC
/* Disable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
@@ -197,7 +201,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
-
+#endif
if (ret_val)
return ret_val;
@@ -1193,6 +1197,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.init_thermal_sensor_thresh = NULL,
.prot_autoc_read = &prot_autoc_read_generic,
.prot_autoc_write = &prot_autoc_write_generic,
+ .enable_rx = &ixgbe_enable_rx_generic,
+ .disable_rx = &ixgbe_disable_rx_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index cf55a0df877b..e0c363948bf4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1977,7 +1977,10 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
*/
hw->mac.ops.disable_rx_buff(hw);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+ if (regval & IXGBE_RXCTRL_RXEN)
+ hw->mac.ops.enable_rx(hw);
+ else
+ hw->mac.ops.disable_rx(hw);
hw->mac.ops.enable_rx_buff(hw);
@@ -2336,6 +2339,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
.prot_autoc_read = &prot_autoc_read_82599,
.prot_autoc_write = &prot_autoc_write_82599,
+ .enable_rx = &ixgbe_enable_rx_generic,
+ .disable_rx = &ixgbe_disable_rx_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9c66babd4edd..06d8f3cfa099 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -312,7 +312,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
u32 i;
- u32 regval;
/* Clear the rate limiters */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
@@ -321,20 +320,25 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
+#ifndef CONFIG_SPARC
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ u32 regval;
+
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ u32 regval;
+
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
-
+#endif
return 0;
}
@@ -703,7 +707,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
hw->adapter_stopped = true;
/* Disable the receive unit */
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
+ hw->mac.ops.disable_rx(hw);
/* Clear interrupt mask to stop interrupts from being generated */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -2639,7 +2643,10 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
**/
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
{
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+ if (regval & IXGBE_RXCTRL_RXEN)
+ hw->mac.ops.enable_rx(hw);
+ else
+ hw->mac.ops.disable_rx(hw);
return 0;
}
@@ -3850,3 +3857,44 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
return 0;
}
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
+{
+ u32 rxctrl;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ u32 pfdtxgswc;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+ }
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+}
+
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
+{
+ u32 rxctrl;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ if (hw->mac.set_lben) {
+ u32 pfdtxgswc;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = false;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 8cfadcb2676e..f21f8a165ec4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -130,6 +130,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
#define IXGBE_FAILED_READ_REG 0xffffffffU
#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e5be0dd508de..eafa9ec802ba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1351,7 +1351,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
if (ixgbe_removed(adapter->hw.hw_addr)) {
*data = 1;
- return 1;
+ return true;
}
for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
before = ixgbe_read_reg(&adapter->hw, reg);
@@ -1376,7 +1376,7 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
if (ixgbe_removed(adapter->hw.hw_addr)) {
*data = 1;
- return 1;
+ return true;
}
before = ixgbe_read_reg(&adapter->hw, reg);
ixgbe_write_reg(&adapter->hw, reg, write & mask);
@@ -1637,9 +1637,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
/* shut down the DMA engines now so they can be reinitialized later */
/* first Rx */
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- reg_ctl &= ~IXGBE_RXCTRL_RXEN;
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
+ hw->mac.ops.disable_rx(hw);
ixgbe_disable_rx_queue(adapter, rx_ring);
/* now Tx */
@@ -1670,6 +1668,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
u32 rctl, reg_data;
int ret_val;
int err;
@@ -1713,14 +1712,16 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
goto err_nomem;
}
- rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
+ hw->mac.ops.disable_rx(hw);
ixgbe_configure_rx_ring(adapter, rx_ring);
- rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
+ rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
+ rctl |= IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
+ hw->mac.ops.enable_rx(hw);
+
return 0;
err_nomem:
@@ -2852,6 +2853,45 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ return sizeof(adapter->rss_key);
+}
+
+static u32 ixgbe_rss_indir_size(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ return ixgbe_rss_indir_tbl_entries(adapter);
+}
+
+static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
+{
+ int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
+
+ for (i = 0; i < reta_size; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+}
+
+static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (indir)
+ ixgbe_get_reta(adapter, indir);
+
+ if (key)
+ memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
+
+ return 0;
+}
+
static int ixgbe_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -3109,6 +3149,9 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
+ .get_rxfh_indir_size = ixgbe_rss_indir_size,
+ .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
+ .get_rxfh = ixgbe_get_rxfh,
.get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 2ad91cb04dab..631c603fc966 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -71,6 +71,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
struct ixgbe_fcoe *fcoe;
struct ixgbe_adapter *adapter;
struct ixgbe_fcoe_ddp *ddp;
+ struct ixgbe_hw *hw;
u32 fcbuff;
if (!netdev)
@@ -85,25 +86,51 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
if (!ddp->udl)
return 0;
+ hw = &adapter->hw;
len = ddp->len;
- /* if there an error, force to invalidate ddp context */
- if (ddp->err) {
+ /* if no error then skip ddp context invalidation */
+ if (!ddp->err)
+ goto skip_ddpinv;
+
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* X550 does not require DDP FCoE lock */
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid),
+ (xid | IXGBE_FCFLTRW_WE));
+
+ /* program FCBUFF */
+ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0);
+
+ /* program FCDMARW */
+ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
+ (xid | IXGBE_FCDMARW_WE));
+
+ /* read FCBUFF to check context invalidated */
+ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
+ (xid | IXGBE_FCDMARW_RE));
+ fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid));
+ } else {
+ /* other hardware requires DDP FCoE lock */
spin_lock_bh(&fcoe->lock);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
+ IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW,
(xid | IXGBE_FCFLTRW_WE));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+ IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
(xid | IXGBE_FCDMARW_WE));
/* guaranteed to be invalidated after 100us */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
(xid | IXGBE_FCDMARW_RE));
- fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
+ fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF);
spin_unlock_bh(&fcoe->lock);
- if (fcbuff & IXGBE_FCBUFF_VALID)
- udelay(100);
- }
+ }
+
+ if (fcbuff & IXGBE_FCBUFF_VALID)
+ usleep_range(100, 150);
+
+skip_ddpinv:
if (ddp->sgl)
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
@@ -272,7 +299,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
/* program DMA context */
hw = &adapter->hw;
- spin_lock_bh(&fcoe->lock);
/* turn on last frame indication for target mode as FCP_RSPtarget is
* supposed to send FCP_RSP when it is done. */
@@ -283,16 +309,33 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
}
- IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
- IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
- IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
- IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
- /* program filter context */
- IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
- IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
- IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* X550 does not require DDP lock */
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid),
+ ddp->udp & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff);
+ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw);
+ /* program filter context */
+ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID);
+ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw);
+ } else {
+ /* DDP lock for indirect DDP context access */
+ spin_lock_bh(&fcoe->lock);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
+ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
+ /* program filter context */
+ IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
+ IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
- spin_unlock_bh(&fcoe->lock);
+ spin_unlock_bh(&fcoe->lock);
+ }
return 1;
@@ -371,6 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
struct fcoe_crc_eof *crc;
__le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
__le32 ddp_err;
+ int ddp_max;
u32 fctl;
u16 xid;
@@ -392,7 +436,11 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
else
xid = be16_to_cpu(fh->fh_rx_id);
- if (xid >= IXGBE_FCOE_DDP_MAX)
+ ddp_max = IXGBE_FCOE_DDP_MAX;
+ /* X550 has different DDP Max limit */
+ if (adapter->hw.mac.type == ixgbe_mac_X550)
+ ddp_max = IXGBE_FCOE_DDP_MAX_X550;
+ if (xid >= ddp_max)
return -EINVAL;
fcoe = &adapter->fcoe;
@@ -612,7 +660,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
struct ixgbe_hw *hw = &adapter->hw;
- int i, fcoe_q, fcoe_i;
+ int i, fcoe_q, fcoe_i, fcoe_q_h = 0;
+ int fcreta_size;
u32 etqf;
/* Minimal functionality for FCoE requires at least CRC offloads */
@@ -633,10 +682,23 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
return;
/* Use one or more Rx queues for FCoE by redirection table */
- for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
+ fcreta_size = IXGBE_FCRETA_SIZE;
+ if (adapter->hw.mac.type == ixgbe_mac_X550)
+ fcreta_size = IXGBE_FCRETA_SIZE_X550;
+
+ for (i = 0; i < fcreta_size; i++) {
+ if (adapter->hw.mac.type == ixgbe_mac_X550) {
+ int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
+ fcoe->indices);
+ fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
+ fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
+ IXGBE_FCRETA_ENTRY_HIGH_MASK;
+ }
+
fcoe_i = fcoe->offset + (i % fcoe->indices);
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ fcoe_q |= fcoe_q_h;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
@@ -672,13 +734,18 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
{
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- int cpu, i;
+ int cpu, i, ddp_max;
/* do nothing if no DDP pools were allocated */
if (!fcoe->ddp_pool)
return;
- for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
+ ddp_max = IXGBE_FCOE_DDP_MAX;
+ /* X550 has different DDP Max limit */
+ if (adapter->hw.mac.type == ixgbe_mac_X550)
+ ddp_max = IXGBE_FCOE_DDP_MAX_X550;
+
+ for (i = 0; i < ddp_max; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
for_each_possible_cpu(cpu)
@@ -758,6 +825,9 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
}
adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+ /* X550 has different DDP Max limit */
+ if (adapter->hw.mac.type == ixgbe_mac_X550)
+ adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 0772b7730fce..38385876effb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -46,6 +46,7 @@
#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */
#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */
#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
+#define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */
/* Default traffic class to use for FCoE */
#define IXGBE_FCOE_DEFTC 3
@@ -77,7 +78,7 @@ struct ixgbe_fcoe {
struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
atomic_t refcnt;
spinlock_t lock;
- struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+ struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550];
void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 70cc4c5c0a01..5be12a00e1f4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1619,14 +1619,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = q_vector->adapter;
-
if (ixgbe_qv_busy_polling(q_vector))
netif_receive_skb(skb);
- else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
- napi_gro_receive(&q_vector->napi, skb);
else
- netif_rx(skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -2609,7 +2605,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
/* The lower 16bits of the EICR register are for the queue interrupts
- * which should be masked here in order to not accidently clear them if
+ * which should be masked here in order to not accidentally clear them if
* the bits are high when ixgbe_msix_other is called. There is a race
* condition otherwise which results in possible performance loss
* especially if the ixgbe_msix_other interrupt is triggering
@@ -3232,89 +3228,148 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
-static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
+/**
+ * Return a number of entries in the RSS indirection table
+ *
+ * @adapter: device handle
+ *
+ * - 82598/82599/X540: 128
+ * - X550(non-SRIOV mode): 512
+ * - X550(SRIOV mode): 64
+ */
+u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type < ixgbe_mac_X550)
+ return 128;
+ else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ return 64;
+ else
+ return 512;
+}
+
+/**
+ * Write the RETA table to HW
+ *
+ * @adapter: device handle
+ *
+ * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
+ */
+static void ixgbe_store_reta(struct ixgbe_adapter *adapter)
{
+ u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
u32 reta = 0;
- int i, j;
- int reta_entries = 128;
- u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- int indices_multi;
-
- /*
- * Program table for at least 2 queues w/ SR-IOV so that VFs can
- * make full use of any rings they may have. We will use the
- * PSRTYPE register to control how many rings we use within the PF.
- */
- if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
- rss_i = 2;
-
- /* Fill out hash function seeds */
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
+ u32 indices_multi;
+ u8 *indir_tbl = adapter->rss_indir_tbl;
/* Fill out the redirection table as follows:
- * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices
- * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index
- * X550: 512 (8 bit wide) entries containing 6 bit RSS index
+ * - 82598: 8 bit wide entries containing pair of 4 bit RSS
+ * indices.
+ * - 82599/X540: 8 bit wide entries containing 4 bit RSS index
+ * - X550: 8 bit wide entries containing 6 bit RSS index
*/
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
indices_multi = 0x11;
else
indices_multi = 0x1;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
- reta_entries = 512;
- default:
- break;
- }
-
- /* Fill out redirection table */
- for (i = 0, j = 0; i < reta_entries; i++, j++) {
- if (j == rss_i)
- j = 0;
- reta = (reta << 8) | (j * indices_multi);
+ /* Write redirection table to HW */
+ for (i = 0; i < reta_entries; i++) {
+ reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
if ((i & 3) == 3) {
if (i < 128)
IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
else
IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
reta);
+ reta = 0;
}
}
}
-static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed)
+/**
+ * Write the RETA table to HW (for x550 devices in SRIOV mode)
+ *
+ * @adapter: device handle
+ *
+ * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
+ */
+static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
{
+ u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
u32 vfreta = 0;
+ unsigned int pf_pool = adapter->num_vfs;
+
+ /* Write redirection table to HW */
+ for (i = 0; i < reta_entries; i++) {
+ vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+ vfreta);
+ vfreta = 0;
+ }
+ }
+}
+
+static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 i, j;
+ u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+
+ /* Program table for at least 2 queues w/ SR-IOV so that VFs can
+ * make full use of any rings they may have. We will use the
+ * PSRTYPE register to control how many rings we use within the PF.
+ */
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
+ rss_i = 2;
+
+ /* Fill out hash function seeds */
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
+
+ /* Fill out redirection table */
+ memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
+
+ for (i = 0, j = 0; i < reta_entries; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+
+ adapter->rss_indir_tbl[i] = j;
+ }
+
+ ixgbe_store_reta(adapter);
+}
+
+static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
unsigned int pf_pool = adapter->num_vfs;
int i, j;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
+ adapter->rss_key[i]);
/* Fill out the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
if (j == rss_i)
j = 0;
- vfreta = (vfreta << 8) | j;
- if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
- vfreta);
+
+ adapter->rss_indir_tbl[i] = j;
}
+
+ ixgbe_store_vfreta(adapter);
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
- u32 rss_key[10];
u32 rxcsum;
/* Disable indicating checksum in descriptor, enables RSS hash */
@@ -3358,7 +3413,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
unsigned int pf_pool = adapter->num_vfs;
@@ -3368,12 +3423,12 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
/* Setup RSS through the VF registers */
- ixgbe_setup_vfreta(adapter, rss_key);
+ ixgbe_setup_vfreta(adapter);
vfmrqc = IXGBE_MRQC_RSSEN;
vfmrqc |= rss_field;
IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
} else {
- ixgbe_setup_reta(adapter, rss_key);
+ ixgbe_setup_reta(adapter);
mrqc |= rss_field;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
@@ -3557,7 +3612,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
- if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
+ if (adapter->bridge_mode == BRIDGE_MODE_VEB)
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
@@ -3603,6 +3658,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
/* enable ethertype anti spoofing if hw supports it */
if (hw->mac.ops.set_ethertype_anti_spoofing)
hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+
+ /* Enable/Disable RSS query feature */
+ ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
+ adapter->vfinfo[i].rss_query_enabled);
}
}
@@ -3705,8 +3764,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
u32 rxctrl, rfctl;
/* disable receives while setting up the descriptors */
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+ hw->mac.ops.disable_rx(hw);
ixgbe_setup_psrtype(adapter);
ixgbe_setup_rdrxctl(adapter);
@@ -3731,6 +3789,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
/* disable drop enable for 82598 parts */
if (hw->mac.type == ixgbe_mac_82598EB)
rxctrl |= IXGBE_RXCTRL_DMBYPS;
@@ -3924,7 +3983,7 @@ static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
for (i = 0; i < hw->mac.num_rar_entries; i++) {
adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ eth_zero_addr(adapter->mac_table[i].addr);
adapter->mac_table[i].queue = 0;
}
ixgbe_sync_mac_table(adapter);
@@ -3992,7 +4051,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
adapter->mac_table[i].queue == queue) {
adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ eth_zero_addr(adapter->mac_table[i].addr);
adapter->mac_table[i].queue = 0;
ixgbe_sync_mac_table(adapter);
return 0;
@@ -5014,7 +5073,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *upper;
struct list_head *iter;
- u32 rxctrl;
int i;
/* signal that we are down to the interrupt handler */
@@ -5022,8 +5080,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
return; /* do nothing if already down */
/* disable receives */
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+ hw->mac.ops.disable_rx(hw);
/* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++)
@@ -6174,7 +6231,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/* Cause software interrupt to ensure rings are cleaned */
ixgbe_irq_rearm_queues(adapter, eics);
-
}
/**
@@ -7507,14 +7563,9 @@ static void ixgbe_netpoll(struct net_device *netdev)
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
- adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- for (i = 0; i < adapter->num_q_vectors; i++)
- ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
- } else {
- ixgbe_intr(adapter->pdev->irq, netdev);
- }
- adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
+ /* loop through and schedule all active queues */
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
}
#endif
@@ -7882,6 +7933,80 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
}
+/**
+ * ixgbe_configure_bridge_mode - set various bridge modes
+ * @adapter - the private structure
+ * @mode - requested bridge mode
+ *
+ * Configure some settings require for various bridge modes.
+ **/
+static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
+ __u16 mode)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int p, num_pools;
+ u32 vmdctl;
+
+ switch (mode) {
+ case BRIDGE_MODE_VEPA:
+ /* disable Tx loopback, rely on switch hairpin mode */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
+
+ /* must enable Rx switching replication to allow multicast
+ * packet reception on all VFs, and to enable source address
+ * pruning.
+ */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+ vmdctl |= IXGBE_VT_CTL_REPLEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
+
+ /* enable Rx source address pruning. Note, this requires
+ * replication to be enabled or else it does nothing.
+ */
+ num_pools = adapter->num_vfs + adapter->num_rx_pools;
+ for (p = 0; p < num_pools; p++) {
+ if (hw->mac.ops.set_source_address_pruning)
+ hw->mac.ops.set_source_address_pruning(hw,
+ true,
+ p);
+ }
+ break;
+ case BRIDGE_MODE_VEB:
+ /* enable Tx loopback for internal VF/PF communication */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
+ IXGBE_PFDTXGSWC_VT_LBEN);
+
+ /* disable Rx switching replication unless we have SR-IOV
+ * virtual functions
+ */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+ if (!adapter->num_vfs)
+ vmdctl &= ~IXGBE_VT_CTL_REPLEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
+
+ /* disable Rx source address pruning, since we don't expect to
+ * be receiving external loopback of our transmitted frames.
+ */
+ num_pools = adapter->num_vfs + adapter->num_rx_pools;
+ for (p = 0; p < num_pools; p++) {
+ if (hw->mac.ops.set_source_address_pruning)
+ hw->mac.ops.set_source_address_pruning(hw,
+ false,
+ p);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adapter->bridge_mode = mode;
+
+ e_info(drv, "enabling bridge mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+ return 0;
+}
+
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags)
{
@@ -7897,8 +8022,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
+ u32 status;
__u16 mode;
- u32 reg = 0;
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
@@ -7907,19 +8032,11 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
return -EINVAL;
mode = nla_get_u16(attr);
- if (mode == BRIDGE_MODE_VEPA) {
- reg = 0;
- adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
- } else if (mode == BRIDGE_MODE_VEB) {
- reg = IXGBE_PFDTXGSWC_VT_LBEN;
- adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
- } else
- return -EINVAL;
+ status = ixgbe_configure_bridge_mode(adapter, mode);
+ if (status)
+ return status;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
-
- e_info(drv, "enabling bridge mode: %s\n",
- mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ break;
}
return 0;
@@ -7927,20 +8044,15 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask)
+ u32 filter_mask, int nlflags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- u16 mode;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return 0;
- if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
- mode = BRIDGE_MODE_VEB;
- else
- mode = BRIDGE_MODE_VEPA;
-
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0);
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ adapter->bridge_mode, 0, 0, nlflags);
}
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
@@ -8052,6 +8164,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
+ .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
#ifdef CONFIG_IXGBE_DCB
@@ -8406,7 +8519,6 @@ skip_sriov:
NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
@@ -8428,6 +8540,7 @@ skip_sriov:
}
netdev->hw_features |= NETIF_F_RXALL;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
@@ -8989,8 +9102,6 @@ static void __exit ixgbe_exit_module(void)
pci_unregister_driver(&ixgbe_driver);
ixgbe_dbg_exit();
-
- rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
#ifdef CONFIG_IXGBE_DCA
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a5cb755de3a9..b1e4703ff2a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -73,6 +73,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -97,6 +98,10 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+/* mailbox API, version 1.2 VF requests */
+#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
+#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 79c00f57d3e7..e5ba04025e2b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -279,20 +279,18 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
*/
-static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
u64 ns;
- u32 remainder;
unsigned long flags;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_read(&adapter->tc);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
@@ -306,15 +304,14 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
* wall timer value.
*/
static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
u64 ns;
unsigned long flags;
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
+ ns = timespec64_to_ns(ts);
/* reset the timecounter */
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -407,7 +404,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
{
bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
IXGBE_OVERFLOW_PERIOD);
- struct timespec ts;
+ struct timespec64 ts;
if (timeout) {
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
@@ -488,7 +485,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
* @work: pointer to the work struct
*
* This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware
- * timestamp has been taken for the current skb. It is necesary, because the
+ * timestamp has been taken for the current skb. It is necessary, because the
* descriptor's "done" bit does not correlate with the timestamp event.
*/
static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
@@ -874,8 +871,8 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
- adapter->ptp_caps.settime = ixgbe_ptp_settime;
+ adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
case ixgbe_mac_82599EB:
@@ -890,8 +887,8 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
- adapter->ptp_caps.settime = ixgbe_ptp_settime;
+ adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 7f37fe7269a7..1d17b5872dd1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -36,6 +36,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#include <linux/if_bridge.h>
#ifdef NETIF_F_HW_VLAN_CTAG_TX
#include <linux/if_vlan.h>
#endif
@@ -79,7 +80,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
/* Initialize default switching mode VEB */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
- adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
+ adapter->bridge_mode = BRIDGE_MODE_VEB;
/* If call to enable VFs succeeded then allocate memory
* for per VF control structures.
@@ -105,9 +106,18 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
- /* enable spoof checking for all VFs */
- for (i = 0; i < adapter->num_vfs; i++)
+ for (i = 0; i < adapter->num_vfs; i++) {
+ /* enable spoof checking for all VFs */
adapter->vfinfo[i].spoofchk_enabled = true;
+
+ /* We support VF RSS querying only for 82599 and x540
+ * devices at the moment. These devices share RSS
+ * indirection table and RSS hash key with PF therefore
+ * we want to disable the querying by default.
+ */
+ adapter->vfinfo[i].rss_query_enabled = 0;
+ }
+
return 0;
}
@@ -141,7 +151,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* The 82599 supports up to 64 VFs per physical function
* but this implementation limits allocation to 63 so that
* basic networking resources are still available to the
- * physical function. If the user requests greater thn
+ * physical function. If the user requests greater than
* 63 VFs then it is an error - reset to default of zero.
*/
adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
@@ -424,6 +434,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
#endif /* CONFIG_FCOE */
switch (adapter->vfinfo[vf].vf_api) {
case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
/*
* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are
@@ -891,6 +902,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
switch (api) {
case ixgbe_mbox_api_10:
case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
adapter->vfinfo[vf].vf_api = api;
return 0;
default:
@@ -914,6 +926,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
switch (adapter->vfinfo[vf].vf_api) {
case ixgbe_mbox_api_20:
case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
break;
default:
return -1;
@@ -941,6 +954,53 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
return 0;
}
+static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+ u32 i, j;
+ u32 *out_buf = &msgbuf[1];
+ const u8 *reta = adapter->rss_indir_tbl;
+ u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter);
+
+ /* Check if operation is permitted */
+ if (!adapter->vfinfo[vf].rss_query_enabled)
+ return -EPERM;
+
+ /* verify the PF is supporting the correct API */
+ if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12)
+ return -EOPNOTSUPP;
+
+ /* This mailbox command is supported (required) only for 82599 and x540
+ * VFs which support up to 4 RSS queues. Therefore we will compress the
+ * RETA by saving only 2 bits from each entry. This way we will be able
+ * to transfer the whole RETA in a single mailbox operation.
+ */
+ for (i = 0; i < reta_size / 16; i++) {
+ out_buf[i] = 0;
+ for (j = 0; j < 16; j++)
+ out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j);
+ }
+
+ return 0;
+}
+
+static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u32 *rss_key = &msgbuf[1];
+
+ /* Check if the operation is permitted */
+ if (!adapter->vfinfo[vf].rss_query_enabled)
+ return -EPERM;
+
+ /* verify the PF is supporting the correct API */
+ if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12)
+ return -EOPNOTSUPP;
+
+ memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key));
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -997,6 +1057,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_GET_QUEUES:
retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_GET_RETA:
+ retval = ixgbe_get_vf_reta(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_GET_RSS_KEY:
+ retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
+ break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = IXGBE_ERR_MBX;
@@ -1330,6 +1396,26 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
return 0;
}
+int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
+ bool setting)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ /* This operation is currently supported only for 82599 and x540
+ * devices.
+ */
+ if (adapter->hw.mac.type < ixgbe_mac_82599EB ||
+ adapter->hw.mac.type >= ixgbe_mac_X550)
+ return -EOPNOTSUPP;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ adapter->vfinfo[vf].rss_query_enabled = setting;
+
+ return 0;
+}
+
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
@@ -1343,5 +1429,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
ivi->vlan = adapter->vfinfo[vf].pf_vlan;
ivi->qos = adapter->vfinfo[vf].pf_qos;
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
+ ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 32c26d586c01..2c197e6d1fe7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -47,6 +47,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
+ bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index fc5ecee56ca8..dd6ba5916dfe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -285,6 +285,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_PFFLPL 0x050B0
+#define IXGBE_PFFLPH 0x050B4
#define IXGBE_VT_CTL 0x051B0
#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */
@@ -608,6 +610,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RTTBCNRM 0x04980
#define IXGBE_RTTQCNRM 0x04980
+/* FCoE Direct DMA Context */
+#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
@@ -634,6 +638,9 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
+/* FCoE Direct Filter Context */
+#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10))
+#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4))
/* FCoE Filter Context Registers */
#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
@@ -664,6 +671,10 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */
+/* Higher 7 bits for the queue index */
+#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000
+#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16
/* Stats registers */
#define IXGBE_CRCERRS 0x04000
@@ -1690,7 +1701,7 @@ enum {
#define IXGBE_MACC_FS 0x00040000
#define IXGBE_MAC_RX2TX_LPBK 0x00000002
-/* Veto Bit definiton */
+/* Veto Bit definition */
#define IXGBE_MMNGC_MNG_VETO 0x00000001
/* LINKS Bit Masks */
@@ -2462,8 +2473,8 @@ struct ixgbe_hic_read_shadow_ram {
struct ixgbe_hic_write_shadow_ram {
union ixgbe_hic_hdr2 hdr;
- u32 address;
- u16 length;
+ __be32 address;
+ __be16 length;
u16 pad2;
u16 data;
u16 pad3;
@@ -3067,6 +3078,10 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ void (*disable_rx)(struct ixgbe_hw *hw);
+ void (*enable_rx)(struct ixgbe_hw *hw);
+ void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
+ unsigned int);
void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* DMA Coalescing */
@@ -3137,6 +3152,7 @@ struct ixgbe_mac_info {
u8 flags;
u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
+ bool set_lben;
};
struct ixgbe_phy_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 49395420c9b3..f5f948d08b43 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -820,6 +820,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.init_thermal_sensor_thresh = NULL,
.prot_autoc_read = &prot_autoc_read_generic,
.prot_autoc_write = &prot_autoc_write_generic,
+ .enable_rx = &ixgbe_enable_rx_generic,
+ .disable_rx = &ixgbe_disable_rx_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 50bf81908dd6..cf5cf819a6b8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -557,6 +557,47 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
return status;
}
+/** ixgbe_disable_rx_x550 - Disable RX unit
+ *
+ * Enables the Rx DMA unit for x550
+ **/
+static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
+{
+ u32 rxctrl, pfdtxgswc;
+ s32 status;
+ struct ixgbe_hic_disable_rxen fw_cmd;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
+ fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
+ fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ fw_cmd.port_number = (u8)hw->bus.lan_id;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(struct ixgbe_hic_disable_rxen),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
+
+ /* If we fail - disable RX using register write */
+ if (status) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+ }
+}
+
/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
* @hw: pointer to hardware structure
*
@@ -1306,8 +1347,8 @@ mac_reset_top:
* @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
**/
-void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
- int vf)
+static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
+ bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
@@ -1322,6 +1363,33 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
+/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable source address pruning
+ * @pool: Rx pool to set source address pruning for
+ **/
+static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool)
+{
+ u64 pfflp;
+
+ /* max rx pool is 63 */
+ if (pool > 63)
+ return;
+
+ pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
+ pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
+
+ if (enable)
+ pfflp |= (1ULL << pool);
+ else
+ pfflp &= ~(1ULL << pool);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -1356,6 +1424,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
.init_uta_tables = &ixgbe_init_uta_tables_generic, \
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
+ .set_source_address_pruning = \
+ &ixgbe_set_source_address_pruning_X550, \
.set_ethertype_anti_spoofing = \
&ixgbe_set_ethertype_anti_spoofing_X550, \
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
@@ -1366,6 +1436,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
.init_thermal_sensor_thresh = NULL, \
.prot_autoc_read = &prot_autoc_read_generic, \
.prot_autoc_write = &prot_autoc_write_generic, \
+ .enable_rx = &ixgbe_enable_rx_generic, \
+ .disable_rx = &ixgbe_disable_rx_x550, \
static struct ixgbe_mac_operations mac_ops_X550 = {
X550_COMMON_MAC
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 7412d378b77b..770e21a64388 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -29,138 +28,138 @@
#define _IXGBEVF_DEFINES_H_
/* Device IDs */
-#define IXGBE_DEV_ID_82599_VF 0x10ED
-#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
-#define IXGBE_VF_IRQ_CLEAR_MASK 7
-#define IXGBE_VF_MAX_TX_QUEUES 8
-#define IXGBE_VF_MAX_RX_QUEUES 8
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 8
+#define IXGBE_VF_MAX_RX_QUEUES 8
/* DCB define */
#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
/* Link speed */
typedef u32 ixgbe_link_speed;
-#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
-#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_100_FULL 0x0008
-#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
-#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
-#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
-#define IXGBE_LINKS_UP 0x40000000
-#define IXGBE_LINKS_SPEED_82599 0x30000000
-#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
-#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
-#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
+#define IXGBE_LINKS_SPEED_100_82599 0x10000000
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
-#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
-#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
/* Interrupt Vector Allocation Registers */
-#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
-#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
/* Receive Config masks */
-#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
-#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
-#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
-#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
-#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
-#define IXGBE_RXDCTL_RLPML_EN 0x00008000
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */
#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
/* PSRTYPE bit definitions */
-#define IXGBE_PSRTYPE_TCPHDR 0x00000010
-#define IXGBE_PSRTYPE_UDPHDR 0x00000020
-#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
-#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
-#define IXGBE_PSRTYPE_L2HDR 0x00001000
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
/* SRRCTL bit definitions */
-#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
-#define IXGBE_SRRCTL_RDMTS_SHIFT 22
-#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
-#define IXGBE_SRRCTL_DROP_EN 0x10000000
-#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
-#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
-#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
/* Receive Descriptor bit definitions */
-#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
-#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
-#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
-#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
-#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
-#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
-#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
-#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
-#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
-#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
-#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
-#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
-#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
-#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
-#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
-#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
-#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
-#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
-#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
-#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
-#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
-#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
-#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
-#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
-#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
-#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
-#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
-#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
-#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
-#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
-#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
-#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
-#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
-#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
-#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
-#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
-#define IXGBE_RXD_PRI_SHIFT 13
-#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
-#define IXGBE_RXD_CFI_SHIFT 12
-
-#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
-#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
-#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
-#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
-#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
-#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
-#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
-#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
-#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
-
-#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
-#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
-#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
-#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
-#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
-#define IXGBE_RXDADV_RSCCNT_SHIFT 17
-#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
-#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
-#define IXGBE_RXDADV_SPH 0x8000
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
IXGBE_RXD_ERR_CE | \
@@ -176,16 +175,16 @@ typedef u32 ixgbe_link_speed;
IXGBE_RXDADV_ERR_OSE | \
IXGBE_RXDADV_ERR_USE)
-#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
-#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
-#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor ext (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
@@ -241,44 +240,44 @@ struct ixgbe_adv_tx_context_desc {
};
/* Adv Transmit Descriptor Config Masks */
-#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
-#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
-#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
-#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
-#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
-#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
-#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
-#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
-#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
-#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
-#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
-#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
-#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
-#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
-#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
-#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
-#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
-#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
-#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
-#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
-#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
-#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
/* Interrupt register bitmasks */
-#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_EITR_CNT_WDIS 0x80000000
#define IXGBE_MAX_EITR 0x00000FF8
#define IXGBE_MIN_EITR 8
/* Error Codes */
-#define IXGBE_ERR_INVALID_MAC_ADDR -1
-#define IXGBE_ERR_RESET_FAILED -2
-#define IXGBE_ERR_INVALID_ARGUMENT -3
+#define IXGBE_ERR_INVALID_MAC_ADDR -1
+#define IXGBE_ERR_RESET_FAILED -2
+#define IXGBE_ERR_INVALID_ARGUMENT -3
/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index cc0e5b7ff041..b2f5b161d792 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -100,6 +99,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
+
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static int ixgbevf_get_settings(struct net_device *netdev,
@@ -120,6 +120,7 @@ static int ixgbevf_get_settings(struct net_device *netdev,
if (link_up) {
__u32 speed = SPEED_10000;
+
switch (link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
speed = SPEED_10000;
@@ -145,12 +146,14 @@ static int ixgbevf_get_settings(struct net_device *netdev,
static u32 ixgbevf_get_msglevel(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
return adapter->msg_enable;
}
static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
adapter->msg_enable = data;
}
@@ -185,7 +188,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
/* Interrupt */
/* don't read EICR because it can clear interrupt causes, instead
- * read EICS which is a shadow but doesn't clear EICR */
+ * read EICS which is a shadow but doesn't clear EICR
+ */
regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
@@ -390,21 +394,21 @@ clear_reset:
static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
{
- switch (stringset) {
- case ETH_SS_TEST:
- return IXGBE_TEST_LEN;
- case ETH_SS_STATS:
- return IXGBE_GLOBAL_STATS_LEN;
- default:
- return -EINVAL;
- }
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
}
static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- char *base = (char *) adapter;
+ char *base = (char *)adapter;
int i;
#ifdef BP_EXTENDED_STATS
u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
@@ -594,8 +598,7 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
}
test = reg_test_vf;
- /*
- * Perform the register test, looping through the test table
+ /* Perform the register test, looping through the test table
* until we either fail or reach the null entry.
*/
while (test->reg) {
@@ -617,8 +620,8 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
break;
case WRITE_NO_TEST:
ixgbe_write_reg(&adapter->hw,
- test->reg + (i * 0x40),
- test->write);
+ test->reg + (i * 0x40),
+ test->write);
break;
case TABLE32_TEST:
b = reg_pattern_test(adapter, data,
@@ -670,7 +673,8 @@ static void ixgbevf_diag_test(struct net_device *netdev,
hw_dbg(&adapter->hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
+ * interfere with test result
+ */
if (ixgbevf_link_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -724,7 +728,7 @@ static int ixgbevf_get_coalesce(struct net_device *netdev,
else
ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
- /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
return 0;
@@ -745,12 +749,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
int num_vectors, i;
u16 tx_itr_param, rx_itr_param;
- /* don't accept tx specific changes if we've got mixed RxTx vectors */
- if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
- && ec->tx_coalesce_usecs)
+ /* don't accept Tx specific changes if we've got mixed RxTx vectors */
+ if (adapter->q_vector[0]->tx.count &&
+ adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
return -EINVAL;
-
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
return -EINVAL;
@@ -765,7 +768,6 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
else
rx_itr_param = adapter->rx_itr_setting;
-
if (ec->tx_coalesce_usecs > 1)
adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
else
@@ -781,10 +783,10 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i];
if (q_vector->tx.count && !q_vector->rx.count)
- /* tx only */
+ /* Tx only */
q_vector->itr = tx_itr_param;
else
- /* rx only or mixed */
+ /* Rx only or mixed */
q_vector->itr = rx_itr_param;
ixgbevf_write_eitr(q_vector);
}
@@ -792,23 +794,92 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
return 0;
}
+static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = adapter->num_rx_queues;
+ return 0;
+ default:
+ hw_dbg(&adapter->hw, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* We support this operation only for 82599 and x540 at the moment */
+ if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
+ return IXGBEVF_82599_RETA_SIZE;
+
+ return 0;
+}
+
+static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* We support this operation only for 82599 and x540 at the moment */
+ if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
+ return IXGBEVF_RSS_HASH_KEY_SIZE;
+
+ return 0;
+}
+
+static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ /* If neither indirection table nor hash key was requested - just
+ * return a success avoiding taking any locks.
+ */
+ if (!indir && !key)
+ return 0;
+
+ spin_lock_bh(&adapter->mbx_lock);
+ if (indir)
+ err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+ adapter->num_rx_queues);
+
+ if (!err && key)
+ err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ return err;
+}
+
static const struct ethtool_ops ixgbevf_ethtool_ops = {
- .get_settings = ixgbevf_get_settings,
- .get_drvinfo = ixgbevf_get_drvinfo,
- .get_regs_len = ixgbevf_get_regs_len,
- .get_regs = ixgbevf_get_regs,
- .nway_reset = ixgbevf_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_ringparam = ixgbevf_get_ringparam,
- .set_ringparam = ixgbevf_set_ringparam,
- .get_msglevel = ixgbevf_get_msglevel,
- .set_msglevel = ixgbevf_set_msglevel,
- .self_test = ixgbevf_diag_test,
- .get_sset_count = ixgbevf_get_sset_count,
- .get_strings = ixgbevf_get_strings,
- .get_ethtool_stats = ixgbevf_get_ethtool_stats,
- .get_coalesce = ixgbevf_get_coalesce,
- .set_coalesce = ixgbevf_set_coalesce,
+ .get_settings = ixgbevf_get_settings,
+ .get_drvinfo = ixgbevf_get_drvinfo,
+ .get_regs_len = ixgbevf_get_regs_len,
+ .get_regs = ixgbevf_get_regs,
+ .nway_reset = ixgbevf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ixgbevf_get_ringparam,
+ .set_ringparam = ixgbevf_set_ringparam,
+ .get_msglevel = ixgbevf_get_msglevel,
+ .set_msglevel = ixgbevf_set_msglevel,
+ .self_test = ixgbevf_diag_test,
+ .get_sset_count = ixgbevf_get_sset_count,
+ .get_strings = ixgbevf_get_strings,
+ .get_ethtool_stats = ixgbevf_get_ethtool_stats,
+ .get_coalesce = ixgbevf_get_coalesce,
+ .set_coalesce = ixgbevf_set_coalesce,
+ .get_rxnfc = ixgbevf_get_rxnfc,
+ .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
+ .get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
+ .get_rxfh = ixgbevf_get_rxfh,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 3a9b356dff01..775d08900949 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -51,7 +50,8 @@
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
+ * so a DMA handle can be stored along with the buffer
+ */
struct ixgbevf_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
@@ -132,9 +132,10 @@ struct ixgbevf_ring {
u8 __iomem *tail;
struct sk_buff *skb;
- u16 reg_idx; /* holds the special value that gets the hardware register
- * offset associated with this ring, which is different
- * for DCB and RSS modes */
+ /* holds the special value that gets the hardware register offset
+ * associated with this ring, which is different for DCB and RSS modes
+ */
+ u16 reg_idx;
int queue_index; /* needed for multiqueue queue management */
};
@@ -143,21 +144,23 @@ struct ixgbevf_ring {
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES 2
+#define IXGBEVF_MAX_RSS_QUEUES 2
+#define IXGBEVF_82599_RETA_SIZE 128
+#define IXGBEVF_RSS_HASH_KEY_SIZE 40
-#define IXGBEVF_DEFAULT_TXD 1024
-#define IXGBEVF_DEFAULT_RXD 512
-#define IXGBEVF_MAX_TXD 4096
-#define IXGBEVF_MIN_TXD 64
-#define IXGBEVF_MAX_RXD 4096
-#define IXGBEVF_MIN_RXD 64
+#define IXGBEVF_DEFAULT_TXD 1024
+#define IXGBEVF_DEFAULT_RXD 512
+#define IXGBEVF_MAX_TXD 4096
+#define IXGBEVF_MIN_TXD 64
+#define IXGBEVF_MAX_RXD 4096
+#define IXGBEVF_MIN_RXD 64
/* Supported Rx Buffer Sizes */
-#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048 2048
-#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
-#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
+#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
@@ -186,10 +189,11 @@ struct ixgbevf_ring_container {
*/
struct ixgbevf_q_vector {
struct ixgbevf_adapter *adapter;
- u16 v_idx; /* index of q_vector within array, also used for
- * finding the bit in EICR and friends that
- * represents the vector for this ring */
- u16 itr; /* Interrupt throttle rate written to EITR */
+ /* index of q_vector within array, also used for finding the bit in
+ * EICR and friends that represents the vector for this ring
+ */
+ u16 v_idx;
+ u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi;
struct ixgbevf_ring_container rx, tx;
char name[IFNAMSIZ + 9];
@@ -199,19 +203,21 @@ struct ixgbevf_q_vector {
#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
-#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
-#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
+#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
+#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
-#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
-#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
+#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \
+ IXGBEVF_QV_STATE_POLL_YIELD)
+#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \
+ IXGBEVF_QV_STATE_POLL_YIELD)
spinlock_t lock;
#endif /* CONFIG_NET_RX_BUSY_POLL */
};
+
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
{
-
spin_lock_init(&q_vector->lock);
q_vector->state = IXGBEVF_QV_STATE_IDLE;
}
@@ -220,6 +226,7 @@ static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
{
int rc = true;
+
spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBEVF_QV_LOCKED) {
WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
@@ -240,6 +247,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
{
int rc = false;
+
spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
IXGBEVF_QV_STATE_NAPI_YIELD));
@@ -256,6 +264,7 @@ static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
{
int rc = true;
+
spin_lock_bh(&q_vector->lock);
if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
@@ -275,6 +284,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
{
int rc = false;
+
spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
@@ -297,6 +307,7 @@ static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
{
int rc = true;
+
spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBEVF_QV_OWNED)
rc = false;
@@ -307,8 +318,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
#endif /* CONFIG_NET_RX_BUSY_POLL */
-/*
- * microsecond values for various ITR rates shifted by 2 to fit itr register
+/* microsecond values for various ITR rates shifted by 2 to fit itr register
* with the first 3 bits reserved 0
*/
#define IXGBE_MIN_RSC_ITR 24
@@ -345,22 +355,22 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
writel(value, ring->tail);
}
-#define IXGBEVF_RX_DESC(R, i) \
+#define IXGBEVF_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
-#define IXGBEVF_TX_DESC(R, i) \
+#define IXGBEVF_TX_DESC(R, i) \
(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
-#define IXGBEVF_TX_CTXTDESC(R, i) \
+#define IXGBEVF_TX_CTXTDESC(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
-#define OTHER_VECTOR 1
-#define NON_Q_VECTORS (OTHER_VECTOR)
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
-#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_Q_VECTORS 2
-#define MIN_MSIX_Q_VECTORS 1
-#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
/* board specific private data structure */
struct ixgbevf_adapter {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4186981e562d..e71cdde9cb01 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -25,7 +24,6 @@
*******************************************************************************/
-
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
******************************************************************************/
@@ -170,12 +168,13 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
* @queue: queue to map the corresponding interrupt to
* @msix_vector: the vector to map to the corresponding queue
- */
+ **/
static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
u8 queue, u8 msix_vector)
{
u32 ivar, index;
struct ixgbe_hw *hw = &adapter->hw;
+
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -184,7 +183,7 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
ivar |= msix_vector;
IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
} else {
- /* tx or rx causes */
+ /* Tx or Rx causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
@@ -458,11 +457,12 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb);
}
-/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: structure containig ring specific data
* @rx_desc: current Rx descriptor being processed
* @skb: skb currently being received and modified
- */
+ **/
static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -492,7 +492,8 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
+/**
+ * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
@@ -500,7 +501,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
* This function checks the ring, descriptor, and packet information in
* order to populate the checksum, VLAN, protocol, and other fields within
* the skb.
- */
+ **/
static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -647,7 +648,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
}
}
-/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
+/**
+ * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being adjusted
*
@@ -657,7 +659,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
* that allow for significant optimizations versus the standard function.
* As a result we can do things like drop a frag and maintain an accurate
* truesize for the skb.
- */
+ **/
static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb)
{
@@ -686,7 +688,8 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
skb->tail += pull_len;
}
-/* ixgbevf_cleanup_headers - Correct corrupted or empty headers
+/**
+ * ixgbevf_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being fixed
@@ -702,7 +705,7 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
* it is large enough to qualify as a valid Ethernet frame.
*
* Returns true if an error was encountered and skb was freed.
- */
+ **/
static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -729,12 +732,13 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
return false;
}
-/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
+/**
+ * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
- */
+ **/
static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *old_buff)
{
@@ -764,7 +768,8 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
}
-/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
+/**
+ * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
* @rx_desc: descriptor containing length of buffer written by hardware
@@ -777,7 +782,7 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
*
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter.
- */
+ **/
static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
union ixgbe_adv_rx_desc *rx_desc,
@@ -958,7 +963,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* source pruning.
*/
if ((skb->pkt_type == PACKET_BROADCAST ||
- skb->pkt_type == PACKET_MULTICAST) &&
+ skb->pkt_type == PACKET_MULTICAST) &&
ether_addr_equal(rx_ring->netdev->dev_addr,
eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
@@ -1016,7 +1021,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
#endif
/* attempt to distribute budget to each queue fairly, but don't allow
- * the budget to go below 1 because we'll exit polling */
+ * the budget to go below 1 because we'll exit polling
+ */
if (q_vector->rx.count > 1)
per_ring_budget = max(budget/q_vector->rx.count, 1);
else
@@ -1049,7 +1055,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
/**
* ixgbevf_write_eitr - write VTEITR register in hardware specific way
* @q_vector: structure containing interrupt and ring information
- */
+ **/
void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
@@ -1057,8 +1063,7 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
int v_idx = q_vector->v_idx;
u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
- /*
- * set the WDIS bit to not clear the timer bits and cause an
+ /* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
*/
itr_reg |= IXGBE_EITR_CNT_WDIS;
@@ -1115,12 +1120,12 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
adapter->eims_enable_mask = 0;
- /*
- * Populate the IVAR table and set the ITR values to the
+ /* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
struct ixgbevf_ring *ring;
+
q_vector = adapter->q_vector[v_idx];
ixgbevf_for_each_ring(ring, q_vector->rx)
@@ -1130,13 +1135,13 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
if (q_vector->tx.ring && !q_vector->rx.ring) {
- /* tx only vector */
+ /* Tx only vector */
if (adapter->tx_itr_setting == 1)
q_vector->itr = IXGBE_10K_ITR;
else
q_vector->itr = adapter->tx_itr_setting;
} else {
- /* rx or rx/tx vector */
+ /* Rx or Rx/Tx vector */
if (adapter->rx_itr_setting == 1)
q_vector->itr = IXGBE_20K_ITR;
else
@@ -1167,13 +1172,13 @@ enum latency_range {
* @q_vector: structure containing interrupt and ring information
* @ring_container: structure containing ring performance data
*
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
**/
static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring_container *ring_container)
@@ -1187,7 +1192,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
if (packets == 0)
return;
- /* simple throttlerate management
+ /* simple throttle rate management
* 0-20MB/s lowest (100000 ints/s)
* 20-100MB/s low (20000 ints/s)
* 100-1249MB/s bulk (8000 ints/s)
@@ -1330,8 +1335,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- /*
- * The ideal configuration...
+ /* The ideal configuration...
* We have enough vectors to map one per queue.
*/
if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
@@ -1343,8 +1347,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
goto out;
}
- /*
- * If we don't have enough vectors for a 1-to-1
+ /* If we don't have enough vectors for a 1-to-1
* mapping, we'll have to group them so there are
* multiple queues per vector.
*/
@@ -1406,8 +1409,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
q_vector->name, q_vector);
if (err) {
hw_dbg(&adapter->hw,
- "request_irq failed for MSIX interrupt "
- "Error: %d\n", err);
+ "request_irq failed for MSIX interrupt Error: %d\n",
+ err);
goto free_queue_irqs;
}
}
@@ -1415,8 +1418,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
&ixgbevf_msix_other, 0, netdev->name, adapter);
if (err) {
- hw_dbg(&adapter->hw,
- "request_irq for msix_other failed: %d\n", err);
+ hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
+ err);
goto free_queue_irqs;
}
@@ -1448,6 +1451,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
for (i = 0; i < q_vectors; i++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+
q_vector->rx.ring = NULL;
q_vector->tx.ring = NULL;
q_vector->rx.count = 0;
@@ -1469,8 +1473,7 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
err = ixgbevf_request_msix_irqs(adapter);
if (err)
- hw_dbg(&adapter->hw,
- "request_irq failed, Error %d\n", err);
+ hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
return err;
}
@@ -1659,7 +1662,7 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
/* write value back with RXDCTL.ENABLE bit cleared */
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
- /* the hardware may take up to 100us to really disable the rx queue */
+ /* the hardware may take up to 100us to really disable the Rx queue */
do {
udelay(10);
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
@@ -1786,7 +1789,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring */
+ * the Base and Length of the Rx Descriptor Ring
+ */
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
}
@@ -1858,14 +1862,14 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
+
netdev_for_each_uc_addr(ha, netdev) {
hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
udelay(200);
}
} else {
- /*
- * If the list is empty then send message to PF driver to
- * clear all macvlans on this VF.
+ /* If the list is empty then send message to PF driver to
+ * clear all MAC VLANs on this VF.
*/
hw->mac.ops.set_uc_addr(hw, 0, NULL);
}
@@ -2026,7 +2030,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int api[] = { ixgbe_mbox_api_11,
+ int api[] = { ixgbe_mbox_api_12,
+ ixgbe_mbox_api_11,
ixgbe_mbox_api_10,
ixgbe_mbox_api_unknown };
int err = 0, idx = 0;
@@ -2184,7 +2189,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
return; /* do nothing if already down */
- /* disable all enabled rx queues */
+ /* disable all enabled Rx queues */
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
@@ -2328,6 +2333,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
switch (hw->api_version) {
case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
adapter->num_rx_queues = rss;
adapter->num_tx_queues = rss;
default:
@@ -2406,8 +2412,7 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
int err = 0;
int vector, v_budget;
- /*
- * It's easy to be greedy for MSI-X vectors, but it really
+ /* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
* (roughly) the same number of vectors as there are CPU's.
@@ -2418,7 +2423,8 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
v_budget += NON_Q_VECTORS;
/* A failure in MSI-X entry allocation isn't fatal, but it does
- * mean we disable MSI-X capabilities of the adapter. */
+ * mean we disable MSI-X capabilities of the adapter.
+ */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->msix_entries) {
@@ -2544,8 +2550,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
err = ixgbevf_alloc_q_vectors(adapter);
if (err) {
- hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
- "vectors\n");
+ hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
goto err_alloc_q_vectors;
}
@@ -2555,8 +2560,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
goto err_alloc_queues;
}
- hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
- "Tx Queue count = %u\n",
+ hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" :
"Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
@@ -2600,7 +2604,6 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_sw_init - Initialize general software structures
- * (struct ixgbevf_adapter)
* @adapter: board private structure to initialize
*
* ixgbevf_sw_init initializes the Adapter private data structure.
@@ -2615,7 +2618,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
int err;
/* PCI config space info */
-
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
hw->revision_id = pdev->revision;
@@ -2686,8 +2688,8 @@ out:
{ \
u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
- u64 current_counter = (current_counter_msb << 32) | \
- current_counter_lsb; \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
if (current_counter < last_counter) \
counter += 0x1000000000LL; \
last_counter = current_counter; \
@@ -2758,14 +2760,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
ixgbevf_reinit_locked(adapter);
}
-/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
- * @adapter - pointer to the device adapter structure
+/**
+ * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
+ * @adapter: pointer to the device adapter structure
*
* This function serves two purposes. First it strobes the interrupt lines
* in order to make certain interrupts are occurring. Secondly it sets the
* bits needed to check for TX hangs. As a result we should immediately
* determine if a hang has occurred.
- */
+ **/
static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -2783,7 +2786,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
set_check_for_tx_hang(adapter->tx_ring[i]);
}
- /* get one bit for every active tx/rx interrupt vector */
+ /* get one bit for every active Tx/Rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
@@ -2797,7 +2800,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_watchdog_update_link - update the link status
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
{
@@ -2825,7 +2828,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_watchdog_link_is_up - update netif_carrier status and
* print link up message
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
{
@@ -2850,7 +2853,7 @@ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_watchdog_link_is_down - update netif_carrier status and
* print link down message
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
**/
static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
{
@@ -2956,7 +2959,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
- * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ * @tx_ring: Tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
@@ -2983,8 +2986,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
err:
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
- "descriptor ring\n");
+ hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
@@ -3006,8 +3008,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
- hw_dbg(&adapter->hw,
- "Allocation for Tx Queue %u failed\n", i);
+ hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
break;
}
@@ -3016,7 +3017,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
- * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
@@ -3065,8 +3066,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
- hw_dbg(&adapter->hw,
- "Allocation for Rx Queue %u failed\n", i);
+ hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
break;
}
return err;
@@ -3136,11 +3136,11 @@ static int ixgbevf_open(struct net_device *netdev)
if (hw->adapter_stopped) {
ixgbevf_reset(adapter);
/* if adapter is still stopped then PF isn't up and
- * the vf can't start. */
+ * the VF can't start.
+ */
if (hw->adapter_stopped) {
err = IXGBE_ERR_MBX;
- pr_err("Unable to start - perhaps the PF Driver isn't "
- "up yet\n");
+ pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
goto err_setup_reset;
}
}
@@ -3163,8 +3163,7 @@ static int ixgbevf_open(struct net_device *netdev)
ixgbevf_configure(adapter);
- /*
- * Map the Tx/Rx rings to the vectors we were allotted.
+ /* Map the Tx/Rx rings to the vectors we were allotted.
* if request_irq will be called in this function map_rings
* must be called *before* up_complete
*/
@@ -3288,6 +3287,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
+
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
@@ -3313,7 +3313,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
*hdr_len += l4len;
*hdr_len = skb_transport_offset(skb) + l4len;
- /* update gso size and bytecount with header size */
+ /* update GSO size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
@@ -3343,6 +3343,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_hdr = 0;
+
switch (first->protocol) {
case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
@@ -3356,8 +3357,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but proto=%x!\n",
- first->protocol);
+ "partial checksum but proto=%x!\n",
+ first->protocol);
}
break;
}
@@ -3380,8 +3381,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ "partial checksum but l4 proto=%x!\n",
+ l4_hdr);
}
break;
}
@@ -3405,7 +3406,7 @@ static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
IXGBE_ADVTXD_DCMD_IFCS |
IXGBE_ADVTXD_DCMD_DEXT);
- /* set HW vlan bit if vlan is present */
+ /* set HW VLAN bit if VLAN is present */
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
@@ -3572,11 +3573,13 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
+ * but since that doesn't exist yet, just open code it.
+ */
smp_mb();
/* We need to check again in a case another CPU has just
- * made room available. */
+ * made room available.
+ */
if (likely(ixgbevf_desc_unused(tx_ring) < size))
return -EBUSY;
@@ -3609,14 +3612,13 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
tx_ring = adapter->tx_ring[skb->queue_mapping];
- /*
- * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+ /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
@@ -3712,6 +3714,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
break;
default:
@@ -3794,8 +3797,7 @@ static int ixgbevf_resume(struct pci_dev *pdev)
u32 err;
pci_restore_state(pdev);
- /*
- * pci_restore_state clears dev->state_saved so call
+ /* pci_restore_state clears dev->state_saved so call
* pci_save_state to restore it.
*/
pci_save_state(pdev);
@@ -3930,8 +3932,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto err_dma;
}
pci_using_dac = 0;
@@ -3962,8 +3963,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
- /*
- * call save state here in standalone driver because it relies on
+ /* call save state here in standalone driver because it relies on
* adapter struct to exist, and needs to call netdev_priv
*/
pci_save_state(pdev);
@@ -3978,7 +3978,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgbevf_assign_netdev_ops(netdev);
- /* Setup hw api */
+ /* Setup HW API */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
@@ -3998,11 +3998,11 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM;
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_CTAG_TX |
@@ -4131,7 +4131,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
*
* This function is called after a PCI bus error affecting
* this device has been detected.
- */
+ **/
static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -4166,7 +4166,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
*
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the ixgbevf_resume routine.
- */
+ **/
static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4194,7 +4194,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
* second-half of the ixgbevf_resume routine.
- */
+ **/
static void ixgbevf_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4214,17 +4214,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = {
};
static struct pci_driver ixgbevf_driver = {
- .name = ixgbevf_driver_name,
- .id_table = ixgbevf_pci_tbl,
- .probe = ixgbevf_probe,
- .remove = ixgbevf_remove,
+ .name = ixgbevf_driver_name,
+ .id_table = ixgbevf_pci_tbl,
+ .probe = ixgbevf_probe,
+ .remove = ixgbevf_remove,
#ifdef CONFIG_PM
/* Power Management Hooks */
- .suspend = ixgbevf_suspend,
- .resume = ixgbevf_resume,
+ .suspend = ixgbevf_suspend,
+ .resume = ixgbevf_resume,
#endif
- .shutdown = ixgbevf_shutdown,
- .err_handler = &ixgbevf_err_handler
+ .shutdown = ixgbevf_shutdown,
+ .err_handler = &ixgbevf_err_handler
};
/**
@@ -4236,6 +4236,7 @@ static struct pci_driver ixgbevf_driver = {
static int __init ixgbevf_init_module(void)
{
int ret;
+
pr_info("%s - version %s\n", ixgbevf_driver_string,
ixgbevf_driver_version);
@@ -4266,6 +4267,7 @@ static void __exit ixgbevf_exit_module(void)
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
{
struct ixgbevf_adapter *adapter = hw->back;
+
return adapter->netdev->name;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index d5028ddf4b31..dc68fea4894b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -52,10 +51,10 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
}
/**
- * ixgbevf_poll_for_ack - Wait for message acknowledgement
+ * ixgbevf_poll_for_ack - Wait for message acknowledgment
* @hw: pointer to the HW structure
*
- * returns 0 if it successfully received a message acknowledgement
+ * returns 0 if it successfully received a message acknowledgment
**/
static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
{
@@ -213,7 +212,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
s32 ret_val = IXGBE_ERR_MBX;
if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
- IXGBE_VFMAILBOX_RSTI))) {
+ IXGBE_VFMAILBOX_RSTI))) {
ret_val = 0;
hw->mbx.stats.rsts++;
}
@@ -234,7 +233,7 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
/* Take ownership of the buffer */
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
- /* reserve mailbox for vf use */
+ /* reserve mailbox for VF use */
if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
ret_val = 0;
@@ -254,8 +253,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
s32 ret_val;
u16 i;
-
- /* lock the mailbox to prevent pf/vf race condition */
+ /* lock the mailbox to prevent PF/VF race condition */
ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
if (ret_val)
goto out_no_write;
@@ -279,7 +277,7 @@ out_no_write:
}
/**
- * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
@@ -291,7 +289,7 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
s32 ret_val = 0;
u16 i;
- /* lock the mailbox to prevent pf/vf race condition */
+ /* lock the mailbox to prevent PF/VF race condition */
ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
if (ret_val)
goto out_no_read;
@@ -311,17 +309,18 @@ out_no_read:
}
/**
- * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox
* @hw: pointer to the HW structure
*
- * Initializes the hw->mbx struct to correct values for vf mailbox
+ * Initializes the hw->mbx struct to correct values for VF mailbox
*/
static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
/* start mailbox as timed out and let the reset_hw call set the timeout
- * value to begin communications */
+ * value to begin communications
+ */
mbx->timeout = 0;
mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
@@ -337,13 +336,13 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
}
const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
- .init_params = ixgbevf_init_mbx_params_vf,
- .read = ixgbevf_read_mbx_vf,
- .write = ixgbevf_write_mbx_vf,
- .read_posted = ixgbevf_read_posted_mbx,
- .write_posted = ixgbevf_write_posted_mbx,
- .check_for_msg = ixgbevf_check_for_msg_vf,
- .check_for_ack = ixgbevf_check_for_ack_vf,
- .check_for_rst = ixgbevf_check_for_rst_vf,
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .read = ixgbevf_read_mbx_vf,
+ .write = ixgbevf_write_mbx_vf,
+ .read_posted = ixgbevf_read_posted_mbx,
+ .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 0bc30058ff82..82f44e06e5fc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -30,71 +29,70 @@
#include "vf.h"
-#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
-#define IXGBE_VFMAILBOX 0x002FC
-#define IXGBE_VFMBMEM 0x00200
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
/* Define mailbox register bits */
-#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
-#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
-#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
-#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
-#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
-#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
-#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x)))
-#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn)))
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x)))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn)))
-#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
-#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
-#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
-#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
-#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
-
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
-#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
-#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
-#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- * clear to send requests */
-#define IXGBE_VT_MSGINFO_SHIFT 16
+/* Messages below or'd with this are the ACK */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with this are the NACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000
+/* Indicates that VF is still clear to send requests */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000
+#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
-#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
-/*
- * each element denotes a version of the API; existing numbers may not
+/* each element denotes a version of the API; existing numbers may not
* change; any additions must go at the end
*/
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
/* mailbox API, legacy requests */
-#define IXGBE_VF_RESET 0x01 /* VF requests reset */
-#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
-#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
-#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
/* mailbox API, version 1.0 VF requests */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
@@ -105,20 +103,24 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */
/* GET_QUEUES return data indices within the mailbox */
-#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
-#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
-#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
-#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port VLAN */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+
+/* mailbox API, version 1.2 VF requests */
+#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
+#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */
/* length of permanent address message returned from PF */
-#define IXGBE_VF_PERMADDR_MSG_LEN 4
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
-#define IXGBE_VF_MC_TYPE_WORD 3
+#define IXGBE_VF_MC_TYPE_WORD 3
-#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
-#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
-#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
/* forward declaration of the HW struct */
struct ixgbe_hw;
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 3e712fd6e695..2764fd16261f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -28,58 +27,58 @@
#ifndef _IXGBEVF_REGS_H_
#define _IXGBEVF_REGS_H_
-#define IXGBE_VFCTRL 0x00000
-#define IXGBE_VFSTATUS 0x00008
-#define IXGBE_VFLINKS 0x00010
-#define IXGBE_VFFRTIMER 0x00048
-#define IXGBE_VFRXMEMWRAP 0x03190
-#define IXGBE_VTEICR 0x00100
-#define IXGBE_VTEICS 0x00104
-#define IXGBE_VTEIMS 0x00108
-#define IXGBE_VTEIMC 0x0010C
-#define IXGBE_VTEIAC 0x00110
-#define IXGBE_VTEIAM 0x00114
-#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
-#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
-#define IXGBE_VTIVAR_MISC 0x00140
-#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
-#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
-#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
-#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
-#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
-#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
-#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
-#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
-#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
-#define IXGBE_VFPSRTYPE 0x00300
-#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
-#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
-#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
-#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
-#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
-#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
-#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
-#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
-#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
-#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
-#define IXGBE_VFGPRC 0x0101C
-#define IXGBE_VFGPTC 0x0201C
-#define IXGBE_VFGORC_LSB 0x01020
-#define IXGBE_VFGORC_MSB 0x01024
-#define IXGBE_VFGOTC_LSB 0x02020
-#define IXGBE_VFGOTC_MSB 0x02024
-#define IXGBE_VFMPRC 0x01034
-#define IXGBE_VFMRQC 0x3000
-#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
-#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+#define IXGBE_VFMRQC 0x3000
+#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
+#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
/* VFMRQC bits */
-#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */
-#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000
-#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000
-#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000
-#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */
+#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000
-#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
+#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index cdb53be7d995..d1339b050627 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -65,7 +64,7 @@ static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
* ixgbevf_reset_hw_vf - Performs hardware reset
* @hw: pointer to hardware structure
*
- * Resets the hardware by reseting the transmit and receive units, masks and
+ * Resets the hardware by resetting the transmit and receive units, masks and
* clears all interrupts.
**/
static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
@@ -102,9 +101,10 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
mdelay(10);
- /* set our "perm_addr" based on info provided by PF */
- /* also set up the mc_filter_type which is piggy backed
- * on the mac address in word 3 */
+ /* set our "perm_addr" based on info provided by PF
+ * also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3
+ */
ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
if (ret_val)
return ret_val;
@@ -117,7 +117,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
- memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
+ ether_addr_copy(hw->mac.perm_addr, addr);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
return 0;
@@ -138,8 +138,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
u32 reg_val;
u16 i;
- /*
- * Set the adapter_stopped flag so other driver functions stop touching
+ /* Set the adapter_stopped flag so other driver functions stop touching
* the hardware
*/
hw->adapter_stopped = true;
@@ -182,7 +181,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
*
* Extracts the 12 bits, from a multicast address, to determine which
* bit-vector to set in the multicast table. The hardware uses 12 bits, from
- * incoming rx multicast addresses, to determine the bit-vector to check in
+ * incoming Rx multicast addresses, to determine the bit-vector to check in
* the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
@@ -220,7 +219,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
**/
static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
- memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+ ether_addr_copy(mac_addr, hw->mac.perm_addr);
return 0;
}
@@ -233,8 +232,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
s32 ret_val;
memset(msgbuf, 0, sizeof(msgbuf));
- /*
- * If index is one then this is the start of a new list and needs
+ /* If index is one then this is the start of a new list and needs
* indication to the PF so it can do it's own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
@@ -242,7 +240,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr)
- memcpy(msg_addr, addr, ETH_ALEN);
+ ether_addr_copy(msg_addr, addr);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
@@ -259,6 +257,129 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
}
/**
+ * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
+ * @adapter: pointer to the port handle
+ * @reta: buffer to fill with RETA contents.
+ * @num_rx_queues: Number of Rx queues configured for this port
+ *
+ * The "reta" buffer should be big enough to contain 32 registers.
+ *
+ * Returns: 0 on success.
+ * if API doesn't support this operation - (-EOPNOTSUPP).
+ */
+int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
+{
+ int err, i, j;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u32 *hw_reta = &msgbuf[1];
+ u32 mask = 0;
+
+ /* We have to use a mailbox for 82599 and x540 devices only.
+ * For these devices RETA has 128 entries.
+ * Also these VFs support up to 4 RSS queues. Therefore PF will compress
+ * 16 RETA entries in each DWORD giving 2 bits to each entry.
+ */
+ int dwords = IXGBEVF_82599_RETA_SIZE / 16;
+
+ /* We support the RSS querying for 82599 and x540 devices only.
+ * Thus return an error if API doesn't support RETA querying or querying
+ * is not supported for this device type.
+ */
+ if (hw->api_version != ixgbe_mbox_api_12 ||
+ hw->mac.type >= ixgbe_mac_X550_vf)
+ return -EOPNOTSUPP;
+
+ msgbuf[0] = IXGBE_VF_GET_RETA;
+
+ err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+
+ if (err)
+ return err;
+
+ err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
+
+ if (err)
+ return err;
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* If the operation has been refused by a PF return -EPERM */
+ if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
+ return -EPERM;
+
+ /* If we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such.
+ */
+ if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_MBX;
+
+ /* ixgbevf doesn't support more than 2 queues at the moment */
+ if (num_rx_queues > 1)
+ mask = 0x1;
+
+ for (i = 0; i < dwords; i++)
+ for (j = 0; j < 16; j++)
+ reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_get_rss_key_locked - get the RSS Random Key
+ * @hw: pointer to the HW structure
+ * @rss_key: buffer to fill with RSS Hash Key contents.
+ *
+ * The "rss_key" buffer should be big enough to contain 10 registers.
+ *
+ * Returns: 0 on success.
+ * if API doesn't support this operation - (-EOPNOTSUPP).
+ */
+int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
+{
+ int err;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+
+ /* We currently support the RSS Random Key retrieval for 82599 and x540
+ * devices only.
+ *
+ * Thus return an error if API doesn't support RSS Random Key retrieval
+ * or if the operation is not supported for this device type.
+ */
+ if (hw->api_version != ixgbe_mbox_api_12 ||
+ hw->mac.type >= ixgbe_mac_X550_vf)
+ return -EOPNOTSUPP;
+
+ msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
+ err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+
+ if (err)
+ return err;
+
+ err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
+
+ if (err)
+ return err;
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* If the operation has been refused by a PF return -EPERM */
+ if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
+ return -EPERM;
+
+ /* If we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such.
+ */
+ if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_MBX;
+
+ memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
+
+ return 0;
+}
+
+/**
* ixgbevf_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
* @index: Receive address register to write
@@ -275,7 +396,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
- memcpy(msg_addr, addr, ETH_ALEN);
+ ether_addr_copy(msg_addr, addr);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
@@ -292,7 +413,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
}
static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
- u32 *msg, u16 size)
+ u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 retmsg[IXGBE_VFMAILBOX_SIZE];
@@ -348,7 +469,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
}
/**
- * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
@@ -462,7 +583,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
}
/* if the read failed it could just be a mailbox collision, best wait
- * until we are called again and don't report an error */
+ * until we are called again and don't report an error
+ */
if (mbx->ops.read(hw, &in_msg, 1))
goto out;
@@ -480,7 +602,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
}
/* if we passed all the tests above then the link is up and we no
- * longer need to check for link */
+ * longer need to check for link
+ */
mac->get_link_status = false;
out:
@@ -545,6 +668,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
/* do nothing if API doesn't support ixgbevf_get_queues */
switch (hw->api_version) {
case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
break;
default:
return 0;
@@ -561,8 +685,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- /*
- * if we we didn't get an ACK there must have been
+ /* if we we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
@@ -595,17 +718,17 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
}
static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
- .init_hw = ixgbevf_init_hw_vf,
- .reset_hw = ixgbevf_reset_hw_vf,
- .start_hw = ixgbevf_start_hw_vf,
- .get_mac_addr = ixgbevf_get_mac_addr_vf,
- .stop_adapter = ixgbevf_stop_hw_vf,
- .setup_link = ixgbevf_setup_mac_link_vf,
- .check_link = ixgbevf_check_mac_link_vf,
- .set_rar = ixgbevf_set_rar_vf,
- .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
- .set_uc_addr = ixgbevf_set_uc_addr_vf,
- .set_vfta = ixgbevf_set_vfta_vf,
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_check_mac_link_vf,
+ .set_rar = ixgbevf_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .set_uc_addr = ixgbevf_set_uc_addr_vf,
+ .set_vfta = ixgbevf_set_vfta_vf,
};
const struct ixgbevf_info ixgbevf_82599_vf_info = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 5b172427f459..d40f036b6df0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -169,7 +168,7 @@ struct ixgbevf_hw_stats {
};
struct ixgbevf_info {
- enum ixgbe_mac_type mac;
+ enum ixgbe_mac_type mac;
const struct ixgbe_mac_operations *mac_ops;
};
@@ -185,28 +184,32 @@ static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
return;
writel(value, reg_addr + reg);
}
+
#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v)
u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg);
#define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r)
static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg,
- u32 offset, u32 value)
+ u32 offset, u32 value)
{
ixgbe_write_reg(hw, reg + (offset << 2), value);
}
+
#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v)
static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
- u32 offset)
+ u32 offset)
{
return ixgbevf_read_reg(hw, reg + (offset << 2));
}
+
#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
+int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);
+int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key);
#endif /* __IXGBE_VF_H__ */
-
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 2db653225a0e..ce5f7f9cff06 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -100,6 +100,8 @@
#define MVNETA_TXQ_CMD 0x2448
#define MVNETA_TXQ_DISABLE_SHIFT 8
#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
+#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
+#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
#define MVNETA_ACC_MODE 0x2500
#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
@@ -122,6 +124,7 @@
#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
+#define MVNETA_MISCINTR_INTR_MASK BIT(31)
#define MVNETA_INTR_OLD_CAUSE 0x25a8
#define MVNETA_INTR_OLD_MASK 0x25ac
@@ -165,6 +168,7 @@
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
+#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
#define MVNETA_GMAC2_PORT_RGMII BIT(4)
#define MVNETA_GMAC2_PORT_RESET BIT(6)
@@ -180,9 +184,11 @@
#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
+#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
#define MVNETA_MIB_COUNTERS_BASE 0x3080
@@ -304,6 +310,7 @@ struct mvneta_port {
unsigned int link;
unsigned int duplex;
unsigned int speed;
+ int use_inband_status:1;
};
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -994,6 +1001,20 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
val &= ~MVNETA_PHY_POLLING_ENABLE;
mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
+ if (pp->use_inband_status) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
+ MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_AN_FLOW_CTRL_EN);
+ val |= MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
+ }
+
mvneta_set_ucast_table(pp, -1);
mvneta_set_special_mcast_table(pp, -1);
mvneta_set_other_mcast_table(pp, -1);
@@ -2043,6 +2064,28 @@ static irqreturn_t mvneta_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int mvneta_fixed_link_update(struct mvneta_port *pp,
+ struct phy_device *phy)
+{
+ struct fixed_phy_status status;
+ struct fixed_phy_status changed = {};
+ u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
+
+ status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+ if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+ status.speed = SPEED_1000;
+ else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+ status.speed = SPEED_100;
+ else
+ status.speed = SPEED_10;
+ status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
+ changed.link = 1;
+ changed.speed = 1;
+ changed.duplex = 1;
+ fixed_phy_update_state(phy, &status, &changed);
+ return 0;
+}
+
/* NAPI handler
* Bits 0 - 7 of the causeRxTx register indicate that are transmitted
* packets on the corresponding TXQ (Bit 0 is for TX queue 1).
@@ -2063,8 +2106,18 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
}
/* Read cause register */
- cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
- (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+ cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
+ if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
+ u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
+
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+ if (pp->use_inband_status && (cause_misc &
+ (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE |
+ MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
+ mvneta_fixed_link_update(pp, pp->phy_dev);
+ }
+ }
/* Release Tx descriptors */
if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
@@ -2109,7 +2162,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
local_irq_save(flags);
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+ MVNETA_RX_INTR_MASK(rxq_number) |
+ MVNETA_TX_INTR_MASK(txq_number) |
+ MVNETA_MISCINTR_INTR_MASK);
local_irq_restore(flags);
}
@@ -2373,7 +2428,13 @@ static void mvneta_start_dev(struct mvneta_port *pp)
/* Unmask interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+ MVNETA_RX_INTR_MASK(rxq_number) |
+ MVNETA_TX_INTR_MASK(txq_number) |
+ MVNETA_MISCINTR_INTR_MASK);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE |
+ MVNETA_CAUSE_PSC_SYNC_CHANGE);
phy_start(pp->phy_dev);
netif_tx_start_all_queues(pp->dev);
@@ -2523,9 +2584,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN);
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX);
if (phydev->duplex)
val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2554,12 +2613,24 @@ static void mvneta_adjust_link(struct net_device *ndev)
if (status_change) {
if (phydev->link) {
- u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val |= (MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_FORCE_LINK_DOWN);
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ if (!pp->use_inband_status) {
+ u32 val = mvreg_read(pp,
+ MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ val |= MVNETA_GMAC_FORCE_LINK_PASS;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ val);
+ }
mvneta_port_up(pp);
} else {
+ if (!pp->use_inband_status) {
+ u32 val = mvreg_read(pp,
+ MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+ val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ val);
+ }
mvneta_port_down(pp);
}
phy_print_status(phydev);
@@ -2905,6 +2976,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
return -EINVAL;
}
+ if (pp->use_inband_status)
+ ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+
/* Cancel Port Reset */
ctrl &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
@@ -2929,6 +3003,7 @@ static int mvneta_probe(struct platform_device *pdev)
char hw_mac_addr[ETH_ALEN];
const char *mac_from;
int phy_mode;
+ int fixed_phy = 0;
int err;
/* Our multiqueue support is not complete, so for now, only
@@ -2962,6 +3037,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot register fixed PHY\n");
goto err_free_irq;
}
+ fixed_phy = 1;
/* In the case of a fixed PHY, the DT node associated
* to the PHY is the Ethernet MAC DT node.
@@ -2985,6 +3061,8 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
+ pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
+ fixed_phy;
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
@@ -3062,6 +3140,12 @@ static int mvneta_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pp->dev);
+ if (pp->use_inband_status) {
+ struct phy_device *phy = of_phy_find_device(dn);
+
+ mvneta_fixed_link_update(pp, phy);
+ }
+
return 0;
err_free_stats:
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index fdf3e382e464..3e8b1bfb1f2e 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1423,7 +1423,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
{
struct mvpp2_prs_entry pe;
- /* Promiscous mode - Accept unknown packets */
+ /* Promiscuous mode - Accept unknown packets */
if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
/* Entry exist - update port only */
@@ -3402,7 +3402,7 @@ static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool
for (i = 0; i < bm_pool->buf_num; i++) {
u32 vaddr;
- /* Get buffer virtual adress (indirect access) */
+ /* Get buffer virtual address (indirect access) */
mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
if (!vaddr)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index af829c578400..7ace07dad6a3 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!np) {
dev_err(&pdev->dev, "missing phy-handle\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_netdev;
}
of_property_read_u32(np, "reg", &pep->phy_addr);
pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->smi_bus = mdiobus_alloc();
if (pep->smi_bus == NULL) {
err = -ENOMEM;
- goto err_base;
+ goto err_netdev;
}
pep->smi_bus->priv = pep;
pep->smi_bus->name = "pxa168_eth smi";
@@ -1551,13 +1552,10 @@ err_mdiobus:
mdiobus_unregister(pep->smi_bus);
err_free_mdio:
mdiobus_free(pep->smi_bus);
-err_base:
- iounmap(pep->base);
err_netdev:
free_netdev(dev);
err_clk:
- clk_disable(clk);
- clk_put(clk);
+ clk_disable_unprepare(clk);
return err;
}
@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
if (pep->phy)
phy_disconnect(pep->phy);
if (pep->clk) {
- clk_disable(pep->clk);
- clk_put(pep->clk);
- pep->clk = NULL;
+ clk_disable_unprepare(pep->clk);
}
- iounmap(pep->base);
- pep->base = NULL;
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 3e9c70f15b42..c82217e0d22d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,7 +1,8 @@
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
-mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
- mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
+mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
+ main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
+ srq.o resource_tracker.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 546ca4226916..4f7dc044601e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -48,6 +48,7 @@
#include "mlx4.h"
#include "fw.h"
+#include "fw_qos.h"
#define CMD_POLL_TOKEN 0xffff
#define INBOX_MASK 0xffffffffffffff00ULL
@@ -726,7 +727,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*/
if (op == MLX4_CMD_SET_PORT &&
(in_modifier == 1 || in_modifier == 2) &&
- op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
+ op_modifier == MLX4_SET_PORT_IB_OPCODE &&
+ context->fw_status == CMD_STAT_BAD_SIZE)
mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status);
else
@@ -937,21 +939,34 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
- /* compute slave's gid block */
- smp->attr_mod = cpu_to_be32(slave / 8);
- /* execute cmd */
- err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
- vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
- if (!err) {
- /* if needed, move slave gid to index 0 */
- if (slave % 8)
- memcpy(outsmp->data,
- outsmp->data + (slave % 8) * 8, 8);
- /* delete all other gids */
- memset(outsmp->data + 8, 0, 56);
+ __be64 guid = mlx4_get_admin_guid(dev, slave,
+ port);
+
+ /* set the PF admin guid to the FW/HW burned
+ * GUID, if it wasn't yet set
+ */
+ if (slave == 0 && guid == 0) {
+ smp->attr_mod = 0;
+ err = mlx4_cmd_box(dev,
+ inbox->dma,
+ outbox->dma,
+ vhcr->in_modifier,
+ opcode_modifier,
+ vhcr->op,
+ MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ mlx4_set_admin_guid(dev,
+ *(__be64 *)outsmp->
+ data, slave, port);
+ } else {
+ memcpy(outsmp->data, &guid, 8);
}
- return err;
+
+ /* clean all other gids */
+ memset(outsmp->data + 8, 0, 56);
+ return 0;
}
if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
@@ -1456,6 +1471,24 @@ static struct mlx4_cmd_info cmd_info[] = {
.wrapper = mlx4_CMD_EPERM_wrapper,
},
{
+ .opcode = MLX4_CMD_ALLOCATE_VPP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CMD_EPERM_wrapper,
+ },
+ {
+ .opcode = MLX4_CMD_SET_VPORT_QOS,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CMD_EPERM_wrapper,
+ },
+ {
.opcode = MLX4_CMD_CONF_SPECIAL_QP,
.has_inbox = false,
.has_outbox = false,
@@ -1500,6 +1533,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_ACCESS_REG_wrapper,
},
+ {
+ .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CMD_EPERM_wrapper,
+ },
/* Native multicast commands are not available for guests */
{
.opcode = MLX4_CMD_QP_ATTACH,
@@ -1782,7 +1824,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
vp_oper->state.default_qos == vp_admin->default_qos &&
- vp_oper->state.link_state == vp_admin->link_state)
+ vp_oper->state.link_state == vp_admin->link_state &&
+ vp_oper->state.qos_vport == vp_admin->qos_vport)
return 0;
if (!(priv->mfunc.master.slave_state[slave].active &&
@@ -1840,6 +1883,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
vp_oper->state.default_vlan = vp_admin->default_vlan;
vp_oper->state.default_qos = vp_admin->default_qos;
vp_oper->state.link_state = vp_admin->link_state;
+ vp_oper->state.qos_vport = vp_admin->qos_vport;
if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
@@ -1848,6 +1892,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
work->port = port;
work->slave = slave;
work->qos = vp_oper->state.default_qos;
+ work->qos_vport = vp_oper->state.qos_vport;
work->vlan_id = vp_oper->state.default_vlan;
work->vlan_ix = vp_oper->vlan_idx;
work->priv = priv;
@@ -1857,6 +1902,63 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
return 0;
}
+static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_qos_manager *port_qos_ctl;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
+ bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
+
+ /* Enable only default prio at PF init routine */
+ set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
+}
+
+static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
+{
+ int i;
+ int err;
+ int num_vfs;
+ u16 availible_vpp;
+ u8 vpp_param[MLX4_NUM_UP];
+ struct mlx4_qos_manager *port_qos;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
+ if (err) {
+ mlx4_info(dev, "Failed query availible VPPs\n");
+ return;
+ }
+
+ port_qos = &priv->mfunc.master.qos_ctl[port];
+ num_vfs = (availible_vpp /
+ bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
+
+ for (i = 0; i < MLX4_NUM_UP; i++) {
+ if (test_bit(i, port_qos->priority_bm))
+ vpp_param[i] = num_vfs;
+ }
+
+ err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
+ if (err) {
+ mlx4_info(dev, "Failed allocating VPPs\n");
+ return;
+ }
+
+ /* Query actual allocated VPP, just to make sure */
+ err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
+ if (err) {
+ mlx4_info(dev, "Failed query availible VPPs\n");
+ return;
+ }
+
+ port_qos->num_of_qos_vfs = num_vfs;
+ mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
+
+ for (i = 0; i < MLX4_NUM_UP; i++)
+ mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
+ vpp_param[i]);
+}
static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
{
@@ -2204,6 +2306,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
}
if (mlx4_is_master(dev)) {
+ struct mlx4_vf_oper_state *vf_oper;
+ struct mlx4_vf_admin_state *vf_admin;
+
priv->mfunc.master.slave_state =
kzalloc(dev->num_slaves *
sizeof(struct mlx4_slave_state), GFP_KERNEL);
@@ -2223,6 +2328,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
goto err_comm_oper;
for (i = 0; i < dev->num_slaves; ++i) {
+ vf_admin = &priv->mfunc.master.vf_admin[i];
+ vf_oper = &priv->mfunc.master.vf_oper[i];
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
@@ -2234,6 +2341,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
&priv->mfunc.comm[i].slave_read);
mmiowb();
for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ struct mlx4_vport_state *admin_vport;
+ struct mlx4_vport_state *oper_vport;
+
s_state->vlan_filter[port] =
kzalloc(sizeof(struct mlx4_vlan_fltr),
GFP_KERNEL);
@@ -2242,15 +2352,31 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
kfree(s_state->vlan_filter[port]);
goto err_slaves;
}
+
+ admin_vport = &vf_admin->vport[port];
+ oper_vport = &vf_oper->vport[port].state;
INIT_LIST_HEAD(&s_state->mcast_filters[port]);
- priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
- priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
- priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
- priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
+ admin_vport->default_vlan = MLX4_VGT;
+ oper_vport->default_vlan = MLX4_VGT;
+ admin_vport->qos_vport =
+ MLX4_VPP_DEFAULT_VPORT;
+ oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
+ vf_oper->vport[port].vlan_idx = NO_INDX;
+ vf_oper->vport[port].mac_idx = NO_INDX;
+ mlx4_set_random_admin_guid(dev, i, port);
}
spin_lock_init(&s_state->lock);
}
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ if (mlx4_is_eth(dev, port)) {
+ mlx4_set_default_port_qos(dev, port);
+ mlx4_allocate_port_vpps(dev, port);
+ }
+ }
+ }
+
memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
@@ -2671,6 +2797,103 @@ static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
return port;
}
+static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
+ int max_tx_rate)
+{
+ int i;
+ int err;
+ struct mlx4_qos_manager *port_qos;
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
+
+ port_qos = &priv->mfunc.master.qos_ctl[port];
+ memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
+
+ if (slave > port_qos->num_of_qos_vfs) {
+ mlx4_info(dev, "No availible VPP resources for this VF\n");
+ return -EINVAL;
+ }
+
+ /* Query for default QoS values from Vport 0 is needed */
+ err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
+ if (err) {
+ mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
+ return err;
+ }
+
+ for (i = 0; i < MLX4_NUM_UP; i++) {
+ if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
+ vpp_qos[i].max_avg_bw = max_tx_rate;
+ vpp_qos[i].enable = 1;
+ } else {
+ /* if user supplied tx_rate == 0, meaning no rate limit
+ * configuration is required. so we are leaving the
+ * value of max_avg_bw as queried from Vport 0.
+ */
+ vpp_qos[i].enable = 0;
+ }
+ }
+
+ err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
+ if (err) {
+ mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
+ return err;
+ }
+
+ return 0;
+}
+
+static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
+ struct mlx4_vport_state *vf_admin)
+{
+ struct mlx4_qos_manager *info;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (!mlx4_is_master(dev) ||
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
+ return false;
+
+ info = &priv->mfunc.master.qos_ctl[port];
+
+ if (vf_admin->default_vlan != MLX4_VGT &&
+ test_bit(vf_admin->default_qos, info->priority_bm))
+ return true;
+
+ return false;
+}
+
+static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
+ struct mlx4_vport_state *vf_admin,
+ int vlan, int qos)
+{
+ struct mlx4_vport_state dummy_admin = {0};
+
+ if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
+ !vf_admin->tx_rate)
+ return true;
+
+ dummy_admin.default_qos = qos;
+ dummy_admin.default_vlan = vlan;
+
+ /* VF wants to move to other VST state which is valid with current
+ * rate limit. Either differnt default vlan in VST or other
+ * supported QoS priority. Otherwise we don't allow this change when
+ * the TX rate is still configured.
+ */
+ if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
+ return true;
+
+ mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
+ (vlan == MLX4_VGT) ? "VGT" : "VST");
+
+ if (vlan != MLX4_VGT)
+ mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
+
+ mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
+
+ return false;
+}
+
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2714,12 +2937,22 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
+ return -EPERM;
+
if ((0 == vlan) && (0 == qos))
vf_admin->default_vlan = MLX4_VGT;
else
vf_admin->default_vlan = vlan;
vf_admin->default_qos = qos;
+ /* If rate was configured prior to VST, we saved the configured rate
+ * in vf_admin->rate and now, if priority supported we enforce the QoS
+ */
+ if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
+ vf_admin->tx_rate)
+ vf_admin->qos_vport = slave;
+
if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
mlx4_info(dev,
"updating vf %d port %d config will take effect on next VF restart\n",
@@ -2728,6 +2961,69 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
+int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
+ int max_tx_rate)
+{
+ int err;
+ int slave;
+ struct mlx4_vport_state *vf_admin;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (!mlx4_is_master(dev) ||
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
+ return -EPROTONOSUPPORT;
+
+ if (min_tx_rate) {
+ mlx4_info(dev, "Minimum BW share not supported\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ slave = mlx4_get_slave_indx(dev, vf);
+ if (slave < 0)
+ return -EINVAL;
+
+ port = mlx4_slaves_closest_port(dev, slave, port);
+ vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+
+ err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
+ if (err) {
+ mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
+ max_tx_rate);
+ return err;
+ }
+
+ vf_admin->tx_rate = max_tx_rate;
+ /* if VF is not in supported mode (VST with supported prio),
+ * we do not change vport configuration for its QPs, but save
+ * the rate, so it will be enforced when it moves to supported
+ * mode next time.
+ */
+ if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
+ mlx4_info(dev,
+ "rate set for VF %d when not in valid state\n", vf);
+
+ if (vf_admin->default_vlan != MLX4_VGT)
+ mlx4_info(dev, "VST priority not supported by QoS\n");
+ else
+ mlx4_info(dev, "VF in VGT mode (needed VST)\n");
+
+ mlx4_info(dev,
+ "rate %d take affect when VF moves to valid state\n",
+ max_tx_rate);
+ return 0;
+ }
+
+ /* If user sets rate 0 assigning default vport for its QPs */
+ vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
+
+ if (priv->mfunc.master.slave_state[slave].active &&
+ dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
+ mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
+
/* mlx4_get_slave_default_vlan -
* return true if VST ( default vlan)
* if VST, will return vlan & qos (if not NULL)
@@ -2801,7 +3097,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
ivf->vlan = s_info->default_vlan;
ivf->qos = s_info->default_qos;
- ivf->max_tx_rate = s_info->tx_rate;
+
+ if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
+ ivf->max_tx_rate = s_info->tx_rate;
+ else
+ ivf->max_tx_rate = 0;
+
ivf->min_tx_rate = 0;
ivf->spoofchk = s_info->spoofchk;
ivf->linkstate = s_info->link_state;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 90b5309cdb5c..8a083d73efdb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -164,20 +164,19 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
* Read the timecounter and return the correct value in ns after converting
* it into a struct timespec.
**/
-static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
{
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
unsigned long flags;
- u32 remainder;
u64 ns;
write_lock_irqsave(&mdev->clock_lock, flags);
ns = timecounter_read(&mdev->clock);
write_unlock_irqrestore(&mdev->clock_lock, flags);
- ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
@@ -191,11 +190,11 @@ static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
* wall timer value.
**/
static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
- u64 ns = timespec_to_ns(ts);
+ u64 ns = timespec64_to_ns(ts);
unsigned long flags;
/* reset the timecounter */
@@ -232,8 +231,8 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.pps = 0,
.adjfreq = mlx4_en_phc_adjfreq,
.adjtime = mlx4_en_phc_adjtime,
- .gettime = mlx4_en_phc_gettime,
- .settime = mlx4_en_phc_settime,
+ .gettime64 = mlx4_en_phc_gettime,
+ .settime64 = mlx4_en_phc_settime,
.enable = mlx4_en_phc_enable,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index c95ca252187c..f01918c63f28 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -35,6 +35,50 @@
#include <linux/math64.h>
#include "mlx4_en.h"
+#include "fw_qos.h"
+
+/* Definitions for QCN
+ */
+
+struct mlx4_congestion_control_mb_prio_802_1_qau_params {
+ __be32 modify_enable_high;
+ __be32 modify_enable_low;
+ __be32 reserved1;
+ __be32 extended_enable;
+ __be32 rppp_max_rps;
+ __be32 rpg_time_reset;
+ __be32 rpg_byte_reset;
+ __be32 rpg_threshold;
+ __be32 rpg_max_rate;
+ __be32 rpg_ai_rate;
+ __be32 rpg_hai_rate;
+ __be32 rpg_gd;
+ __be32 rpg_min_dec_fac;
+ __be32 rpg_min_rate;
+ __be32 max_time_rise;
+ __be32 max_byte_rise;
+ __be32 max_qdelta;
+ __be32 min_qoffset;
+ __be32 gd_coefficient;
+ __be32 reserved2[5];
+ __be32 cp_sample_base;
+ __be32 reserved3[39];
+};
+
+struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
+ __be64 rppp_rp_centiseconds;
+ __be32 reserved1;
+ __be32 ignored_cnm;
+ __be32 rppp_created_rps;
+ __be32 estimated_total_rate;
+ __be32 max_active_rate_limiter_index;
+ __be32 dropped_cnms_busy_fw;
+ __be32 reserved2;
+ __be32 cnms_handled_successfully;
+ __be32 min_total_limiters_rate;
+ __be32 max_total_limiters_rate;
+ __be32 reserved3[4];
+};
static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
@@ -183,6 +227,10 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
prof->rx_ppp);
if (err)
en_err(priv, "Failed setting pause params\n");
+ else
+ mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+ prof->rx_ppp, prof->rx_pause,
+ prof->tx_ppp, prof->tx_pause);
return err;
}
@@ -242,6 +290,178 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
return 0;
}
+#define RPG_ENABLE_BIT 31
+#define CN_TAG_BIT 30
+
+static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
+ struct ieee_qcn *qcn)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
+ struct mlx4_cmd_mailbox *mailbox_out = NULL;
+ u64 mailbox_in_dma = 0;
+ u32 inmod = 0;
+ int i, err;
+
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
+ return -EOPNOTSUPP;
+
+ mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
+ if (IS_ERR(mailbox_out))
+ return -ENOMEM;
+ hw_qcn =
+ (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
+ mailbox_out->buf;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ inmod = priv->port | ((1 << i) << 8) |
+ (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
+ err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
+ mailbox_out->dma,
+ inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
+ MLX4_CMD_CONGESTION_CTRL_OPCODE,
+ MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+ return err;
+ }
+
+ qcn->rpg_enable[i] =
+ be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
+ qcn->rppp_max_rps[i] =
+ be32_to_cpu(hw_qcn->rppp_max_rps);
+ qcn->rpg_time_reset[i] =
+ be32_to_cpu(hw_qcn->rpg_time_reset);
+ qcn->rpg_byte_reset[i] =
+ be32_to_cpu(hw_qcn->rpg_byte_reset);
+ qcn->rpg_threshold[i] =
+ be32_to_cpu(hw_qcn->rpg_threshold);
+ qcn->rpg_max_rate[i] =
+ be32_to_cpu(hw_qcn->rpg_max_rate);
+ qcn->rpg_ai_rate[i] =
+ be32_to_cpu(hw_qcn->rpg_ai_rate);
+ qcn->rpg_hai_rate[i] =
+ be32_to_cpu(hw_qcn->rpg_hai_rate);
+ qcn->rpg_gd[i] =
+ be32_to_cpu(hw_qcn->rpg_gd);
+ qcn->rpg_min_dec_fac[i] =
+ be32_to_cpu(hw_qcn->rpg_min_dec_fac);
+ qcn->rpg_min_rate[i] =
+ be32_to_cpu(hw_qcn->rpg_min_rate);
+ qcn->cndd_state_machine[i] =
+ priv->cndd_state[i];
+ }
+ mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+ return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
+ struct ieee_qcn *qcn)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
+ struct mlx4_cmd_mailbox *mailbox_in = NULL;
+ u64 mailbox_in_dma = 0;
+ u32 inmod = 0;
+ int i, err;
+#define MODIFY_ENABLE_HIGH_MASK 0xc0000000
+#define MODIFY_ENABLE_LOW_MASK 0xffc00000
+
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
+ return -EOPNOTSUPP;
+
+ mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
+ if (IS_ERR(mailbox_in))
+ return -ENOMEM;
+
+ mailbox_in_dma = mailbox_in->dma;
+ hw_qcn =
+ (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ inmod = priv->port | ((1 << i) << 8) |
+ (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
+
+ /* Before updating QCN parameter,
+ * need to set it's modify enable bit to 1
+ */
+
+ hw_qcn->modify_enable_high = cpu_to_be32(
+ MODIFY_ENABLE_HIGH_MASK);
+ hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
+
+ hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
+ hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
+ hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
+ hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
+ hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
+ hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
+ hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
+ hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
+ hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
+ hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
+ hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
+ priv->cndd_state[i] = qcn->cndd_state_machine[i];
+ if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
+ hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
+
+ err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
+ MLX4_CONGESTION_CONTROL_SET_PARAMS,
+ MLX4_CMD_CONGESTION_CTRL_OPCODE,
+ MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
+ return err;
+ }
+ }
+ mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
+ return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
+ struct ieee_qcn_stats *qcn_stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
+ struct mlx4_cmd_mailbox *mailbox_out = NULL;
+ u64 mailbox_in_dma = 0;
+ u32 inmod = 0;
+ int i, err;
+
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
+ return -EOPNOTSUPP;
+
+ mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
+ if (IS_ERR(mailbox_out))
+ return -ENOMEM;
+
+ hw_qcn_stats =
+ (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
+ mailbox_out->buf;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ inmod = priv->port | ((1 << i) << 8) |
+ (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
+ err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
+ mailbox_out->dma, inmod,
+ MLX4_CONGESTION_CONTROL_GET_STATISTICS,
+ MLX4_CMD_CONGESTION_CTRL_OPCODE,
+ MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+ return err;
+ }
+ qcn_stats->rppp_rp_centiseconds[i] =
+ be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
+ qcn_stats->rppp_created_rps[i] =
+ be32_to_cpu(hw_qcn_stats->rppp_created_rps);
+ }
+ mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+ return 0;
+}
+
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
.ieee_getets = mlx4_en_dcbnl_ieee_getets,
.ieee_setets = mlx4_en_dcbnl_ieee_setets,
@@ -252,6 +472,9 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
+ .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
+ .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
+ .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
};
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a7b58ba8492b..a2ddf3d75ff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -38,6 +38,7 @@
#include <linux/mlx4/device.h>
#include <linux/in.h>
#include <net/ip.h>
+#include <linux/bitmap.h>
#include "mlx4_en.h"
#include "en_port.h"
@@ -104,6 +105,7 @@ static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
};
static const char main_strings[][ETH_GSTRING_LEN] = {
+ /* main statistics */
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
"rx_length_errors", "rx_over_errors", "rx_crc_errors",
@@ -117,14 +119,76 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
+ /* priority flow control statistics rx */
+ "rx_pause_prio_0", "rx_pause_duration_prio_0",
+ "rx_pause_transition_prio_0",
+ "rx_pause_prio_1", "rx_pause_duration_prio_1",
+ "rx_pause_transition_prio_1",
+ "rx_pause_prio_2", "rx_pause_duration_prio_2",
+ "rx_pause_transition_prio_2",
+ "rx_pause_prio_3", "rx_pause_duration_prio_3",
+ "rx_pause_transition_prio_3",
+ "rx_pause_prio_4", "rx_pause_duration_prio_4",
+ "rx_pause_transition_prio_4",
+ "rx_pause_prio_5", "rx_pause_duration_prio_5",
+ "rx_pause_transition_prio_5",
+ "rx_pause_prio_6", "rx_pause_duration_prio_6",
+ "rx_pause_transition_prio_6",
+ "rx_pause_prio_7", "rx_pause_duration_prio_7",
+ "rx_pause_transition_prio_7",
+
+ /* flow control statistics rx */
+ "rx_pause", "rx_pause_duration", "rx_pause_transition",
+
+ /* priority flow control statistics tx */
+ "tx_pause_prio_0", "tx_pause_duration_prio_0",
+ "tx_pause_transition_prio_0",
+ "tx_pause_prio_1", "tx_pause_duration_prio_1",
+ "tx_pause_transition_prio_1",
+ "tx_pause_prio_2", "tx_pause_duration_prio_2",
+ "tx_pause_transition_prio_2",
+ "tx_pause_prio_3", "tx_pause_duration_prio_3",
+ "tx_pause_transition_prio_3",
+ "tx_pause_prio_4", "tx_pause_duration_prio_4",
+ "tx_pause_transition_prio_4",
+ "tx_pause_prio_5", "tx_pause_duration_prio_5",
+ "tx_pause_transition_prio_5",
+ "tx_pause_prio_6", "tx_pause_duration_prio_6",
+ "tx_pause_transition_prio_6",
+ "tx_pause_prio_7", "tx_pause_duration_prio_7",
+ "tx_pause_transition_prio_7",
+
+ /* flow control statistics tx */
+ "tx_pause", "tx_pause_duration", "tx_pause_transition",
+
/* packet statistics */
- "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
- "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
- "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
- "tx_prio_6", "tx_prio_7",
+ "rx_multicast_packets",
+ "rx_broadcast_packets",
+ "rx_jabbers",
+ "rx_in_range_length_error",
+ "rx_out_range_length_error",
+ "tx_multicast_packets",
+ "tx_broadcast_packets",
+ "rx_prio_0_packets", "rx_prio_0_bytes",
+ "rx_prio_1_packets", "rx_prio_1_bytes",
+ "rx_prio_2_packets", "rx_prio_2_bytes",
+ "rx_prio_3_packets", "rx_prio_3_bytes",
+ "rx_prio_4_packets", "rx_prio_4_bytes",
+ "rx_prio_5_packets", "rx_prio_5_bytes",
+ "rx_prio_6_packets", "rx_prio_6_bytes",
+ "rx_prio_7_packets", "rx_prio_7_bytes",
+ "rx_novlan_packets", "rx_novlan_bytes",
+ "tx_prio_0_packets", "tx_prio_0_bytes",
+ "tx_prio_1_packets", "tx_prio_1_bytes",
+ "tx_prio_2_packets", "tx_prio_2_bytes",
+ "tx_prio_3_packets", "tx_prio_3_bytes",
+ "tx_prio_4_packets", "tx_prio_4_bytes",
+ "tx_prio_5_packets", "tx_prio_5_bytes",
+ "tx_prio_6_packets", "tx_prio_6_bytes",
+ "tx_prio_7_packets", "tx_prio_7_bytes",
+ "tx_novlan_packets", "tx_novlan_bytes",
+
};
-#define NUM_MAIN_STATS 21
-#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
"Interrupt Test",
@@ -224,14 +288,50 @@ static int mlx4_en_set_wol(struct net_device *netdev,
return err;
}
+struct bitmap_iterator {
+ unsigned long *stats_bitmap;
+ unsigned int count;
+ unsigned int iterator;
+ bool advance_array; /* if set, force no increments */
+};
+
+static inline void bitmap_iterator_init(struct bitmap_iterator *h,
+ unsigned long *stats_bitmap,
+ int count)
+{
+ h->iterator = 0;
+ h->advance_array = !bitmap_empty(stats_bitmap, count);
+ h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
+ : count;
+ h->stats_bitmap = stats_bitmap;
+}
+
+static inline int bitmap_iterator_test(struct bitmap_iterator *h)
+{
+ return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap);
+}
+
+static inline int bitmap_iterator_inc(struct bitmap_iterator *h)
+{
+ return h->iterator++;
+}
+
+static inline unsigned int
+bitmap_iterator_count(struct bitmap_iterator *h)
+{
+ return h->count;
+}
+
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int bit_count = hweight64(priv->stats_bitmap);
+ struct bitmap_iterator it;
+
+ bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
switch (sset) {
case ETH_SS_STATS:
- return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
+ return bitmap_iterator_count(&it) +
(priv->tx_ring_num * 2) +
#ifdef CONFIG_NET_RX_BUSY_POLL
(priv->rx_ring_num * 5);
@@ -253,34 +353,45 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int index = 0;
- int i, j = 0;
+ int i;
+ struct bitmap_iterator it;
+
+ bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
spin_lock_bh(&priv->stats_lock);
- if (!(priv->stats_bitmap)) {
- for (i = 0; i < NUM_MAIN_STATS; i++)
- data[index++] =
- ((unsigned long *) &priv->stats)[i];
- for (i = 0; i < NUM_PORT_STATS; i++)
+ for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((unsigned long *)&priv->stats)[i];
+
+ for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((unsigned long *)&priv->port_stats)[i];
+
+ for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
+ i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
data[index++] =
- ((unsigned long *) &priv->port_stats)[i];
- for (i = 0; i < NUM_PKT_STATS; i++)
+ ((u64 *)&priv->rx_priority_flowstats)[i];
+
+ for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((u64 *)&priv->rx_flowstats)[i];
+
+ for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX;
+ i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
data[index++] =
- ((unsigned long *) &priv->pkstats)[i];
- } else {
- for (i = 0; i < NUM_MAIN_STATS; i++) {
- if ((priv->stats_bitmap >> j) & 1)
- data[index++] =
- ((unsigned long *) &priv->stats)[i];
- j++;
- }
- for (i = 0; i < NUM_PORT_STATS; i++) {
- if ((priv->stats_bitmap >> j) & 1)
- data[index++] =
- ((unsigned long *) &priv->port_stats)[i];
- j++;
- }
- }
+ ((u64 *)&priv->tx_priority_flowstats)[i];
+
+ for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((u64 *)&priv->tx_flowstats)[i];
+
+ for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((unsigned long *)&priv->pkstats)[i];
+
for (i = 0; i < priv->tx_ring_num; i++) {
data[index++] = priv->tx_ring[i]->packets;
data[index++] = priv->tx_ring[i]->bytes;
@@ -309,7 +420,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int index = 0;
- int i;
+ int i, strings = 0;
+ struct bitmap_iterator it;
+
+ bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
switch (stringset) {
case ETH_SS_TEST:
@@ -322,29 +436,30 @@ static void mlx4_en_get_strings(struct net_device *dev,
case ETH_SS_STATS:
/* Add main counters */
- if (!priv->stats_bitmap) {
- for (i = 0; i < NUM_MAIN_STATS; i++)
+ for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[strings]);
+
+ for (i = 0; i < NUM_PORT_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[i]);
- for (i = 0; i < NUM_PORT_STATS; i++)
+ main_strings[strings]);
+
+ for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[i +
- NUM_MAIN_STATS]);
- for (i = 0; i < NUM_PKT_STATS; i++)
+ main_strings[strings]);
+
+ for (i = 0; i < NUM_PKT_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[i +
- NUM_MAIN_STATS +
- NUM_PORT_STATS]);
- } else
- for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
- if ((priv->stats_bitmap >> i) & 1) {
- strcpy(data +
- (index++) * ETH_GSTRING_LEN,
- main_strings[i]);
- }
- if (!(priv->stats_bitmap >> i))
- break;
- }
+ main_strings[strings]);
+
for (i = 0; i < priv->tx_ring_num; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
@@ -885,6 +1000,12 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
priv->prof->rx_ppp);
if (err)
en_err(priv, "Failed setting pause params\n");
+ else
+ mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+ priv->prof->rx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->tx_pause);
return err;
}
@@ -981,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
struct mlx4_en_priv *priv = netdev_priv(dev);
/* check if requested function is supported by the device */
- if ((hfunc == ETH_RSS_HASH_TOP &&
- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
- (hfunc == ETH_RSS_HASH_XOR &&
- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
- return -EINVAL;
+ if (hfunc == ETH_RSS_HASH_TOP) {
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
+ return -EINVAL;
+ if (!(dev->features & NETIF_F_RXHASH))
+ en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+ return 0;
+ } else if (hfunc == ETH_RSS_HASH_XOR) {
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
+ return -EINVAL;
+ if (dev->features & NETIF_F_RXHASH)
+ en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+ return 0;
+ }
- priv->rss_hash_fn = hfunc;
- if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
- en_warn(priv,
- "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
- if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
- en_warn(priv,
- "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
- return 0;
+ return -EINVAL;
}
static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
@@ -1068,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
priv->prof->rss_rings = rss_rings;
if (key)
memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->rss_hash_fn = hfunc;
if (port_up) {
err = mlx4_en_start_port(dev);
@@ -1818,6 +1942,32 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
return 0;
}
+static int mlx4_en_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ int err;
+ u16 beacon_duration;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ beacon_duration = PORT_BEACON_MAX_LIMIT;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ beacon_duration = 0;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
+ return err;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -1827,6 +1977,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_sset_count = mlx4_en_get_sset_count,
.get_ethtool_stats = mlx4_en_get_ethtool_stats,
.self_test = mlx4_en_self_test,
+ .set_phys_id = mlx4_en_set_phys_id,
.get_wol = mlx4_en_get_wol,
.set_wol = mlx4_en_set_wol,
.get_msglevel = mlx4_en_get_msglevel,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 58d5a07d0ff4..913b716ed2e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -103,6 +103,11 @@ void mlx4_en_update_loopback_state(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ if (features & NETIF_F_LOOPBACK)
+ priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ else
+ priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 3485acf03014..32f5ec737472 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_ptp_overflow_check(mdev);
+ mlx4_en_recover_from_oom(priv);
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
}
@@ -1685,7 +1686,7 @@ int mlx4_en_start_port(struct net_device *dev)
}
/* Attach rx QP to bradcast address */
- memset(&mc_list[10], 0xff, ETH_ALEN);
+ eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
priv->port, 0, MLX4_PROT_ETH,
@@ -1721,7 +1722,7 @@ mac_err:
cq_err:
while (rx_index--) {
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
- mlx4_en_free_affinity_hint(priv, i);
+ mlx4_en_free_affinity_hint(priv, rx_index);
}
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
@@ -1786,7 +1787,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
}
/* Detach All multicasts */
- memset(&mc_list[10], 0xff, ETH_ALEN);
+ eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
MLX4_PROT_ETH, priv->broadcast_id);
@@ -1888,6 +1889,12 @@ static void mlx4_en_clear_stats(struct net_device *dev)
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
+ memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
+ memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
+ memset(&priv->rx_priority_flowstats, 0,
+ sizeof(priv->rx_priority_flowstats));
+ memset(&priv->tx_priority_flowstats, 0,
+ sizeof(priv->tx_priority_flowstats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i]->bytes = 0;
@@ -2189,31 +2196,50 @@ static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
+ bool reset = false;
int ret = 0;
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
+ en_info(priv, "Turn %s RX-FCS\n",
+ (features & NETIF_F_RXFCS) ? "ON" : "OFF");
+ reset = true;
+ }
+
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
+ u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
+
+ en_info(priv, "Turn %s RX-ALL\n",
+ ignore_fcs_value ? "ON" : "OFF");
+ ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
+ priv->port, ignore_fcs_value);
+ if (ret)
+ return ret;
+ }
+
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
en_info(priv, "Turn %s RX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
- ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
- features);
- if (ret)
- return ret;
+ reset = true;
}
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
en_info(priv, "Turn %s TX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
- if (features & NETIF_F_LOOPBACK)
- priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
- else
- priv->ctrl_flags &=
- cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
+ en_info(priv, "Turn %s loopback\n",
+ (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
+ mlx4_en_update_loopback_state(netdev, features);
+ }
- mlx4_en_update_loopback_state(netdev, features);
+ if (reset) {
+ ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
+ features);
+ if (ret)
+ return ret;
+ }
return 0;
-
}
static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
@@ -2236,6 +2262,16 @@ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
}
+static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+
+ return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
+ max_tx_rate);
+}
+
static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
@@ -2373,10 +2409,38 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
+ features = vlan_features_check(skb, features);
return vxlan_features_check(skb, features);
}
#endif
+static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
+ struct mlx4_update_qp_params params;
+ int err;
+
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
+ return -EOPNOTSUPP;
+
+ /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
+ if (maxrate >> 12) {
+ params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
+ params.rate_val = maxrate / 1000;
+ } else if (maxrate) {
+ params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
+ params.rate_val = maxrate;
+ } else { /* zero serves to revoke the QP rate-limitation */
+ params.rate_unit = 0;
+ params.rate_val = 0;
+ }
+
+ err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
+ &params);
+ return err;
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2408,6 +2472,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
#endif
+ .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2425,6 +2490,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
.ndo_set_vf_mac = mlx4_en_set_vf_mac,
.ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
+ .ndo_set_vf_rate = mlx4_en_set_vf_rate,
.ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
.ndo_get_vf_config = mlx4_en_get_vf_config,
@@ -2442,6 +2508,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
#endif
+ .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
};
struct mlx4_en_bond {
@@ -2618,6 +2685,82 @@ int mlx4_en_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
+void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
+ struct mlx4_en_stats_bitmap *stats_bitmap,
+ u8 rx_ppp, u8 rx_pause,
+ u8 tx_ppp, u8 tx_pause)
+{
+ int last_i = NUM_MAIN_STATS + NUM_PORT_STATS;
+
+ if (!mlx4_is_slave(dev) &&
+ (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
+ mutex_lock(&stats_bitmap->mutex);
+ bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
+
+ if (rx_ppp)
+ bitmap_set(stats_bitmap->bitmap, last_i,
+ NUM_FLOW_PRIORITY_STATS_RX);
+ last_i += NUM_FLOW_PRIORITY_STATS_RX;
+
+ if (rx_pause && !(rx_ppp))
+ bitmap_set(stats_bitmap->bitmap, last_i,
+ NUM_FLOW_STATS_RX);
+ last_i += NUM_FLOW_STATS_RX;
+
+ if (tx_ppp)
+ bitmap_set(stats_bitmap->bitmap, last_i,
+ NUM_FLOW_PRIORITY_STATS_TX);
+ last_i += NUM_FLOW_PRIORITY_STATS_TX;
+
+ if (tx_pause && !(tx_ppp))
+ bitmap_set(stats_bitmap->bitmap, last_i,
+ NUM_FLOW_STATS_TX);
+ last_i += NUM_FLOW_STATS_TX;
+
+ mutex_unlock(&stats_bitmap->mutex);
+ }
+}
+
+void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
+ struct mlx4_en_stats_bitmap *stats_bitmap,
+ u8 rx_ppp, u8 rx_pause,
+ u8 tx_ppp, u8 tx_pause)
+{
+ int last_i = 0;
+
+ mutex_init(&stats_bitmap->mutex);
+ bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
+
+ if (mlx4_is_slave(dev)) {
+ bitmap_set(stats_bitmap->bitmap, last_i +
+ MLX4_FIND_NETDEV_STAT(rx_packets), 1);
+ bitmap_set(stats_bitmap->bitmap, last_i +
+ MLX4_FIND_NETDEV_STAT(tx_packets), 1);
+ bitmap_set(stats_bitmap->bitmap, last_i +
+ MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
+ bitmap_set(stats_bitmap->bitmap, last_i +
+ MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
+ bitmap_set(stats_bitmap->bitmap, last_i +
+ MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
+ bitmap_set(stats_bitmap->bitmap, last_i +
+ MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
+ } else {
+ bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
+ }
+ last_i += NUM_MAIN_STATS;
+
+ bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
+ last_i += NUM_PORT_STATS;
+
+ mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
+ rx_ppp, rx_pause,
+ tx_ppp, tx_pause);
+ last_i += NUM_FLOW_STATS;
+
+ if (!mlx4_is_slave(dev))
+ bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
+}
+
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
@@ -2693,7 +2836,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
- if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
} else {
en_info(priv, "enabling only PFC DCB ops\n");
@@ -2780,6 +2923,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+ dev->hw_features |= NETIF_F_RXFCS;
+
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
+ dev->hw_features |= NETIF_F_RXALL;
+
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED &&
mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
@@ -2844,7 +2993,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
- mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+ mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+ mdev->profile.prof[priv->port].rx_ppp,
+ mdev->profile.prof[priv->port].rx_pause,
+ mdev->profile.prof[priv->port].tx_ppp,
+ mdev->profile.prof[priv->port].tx_pause);
err = register_netdev(dev);
if (err) {
@@ -2872,7 +3025,8 @@ int mlx4_en_reset_config(struct net_device *dev,
if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
- !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
+ !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+ !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
return 0; /* Nothing to change */
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
@@ -2911,6 +3065,13 @@ int mlx4_en_reset_config(struct net_device *dev,
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
+ if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
+ if (features & NETIF_F_RXFCS)
+ dev->features |= NETIF_F_RXFCS;
+ else
+ dev->features &= ~NETIF_F_RXFCS;
+ }
+
/* RX vlan offload and RX time-stamping can't co-exist !
* Regardless of the caller's choice,
* Turn Off RX vlan offload in case of time-stamping is ON
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 6cb80072af6c..0a56f010c846 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -128,9 +128,29 @@ out:
return err;
}
+/* Each counter set is located in struct mlx4_en_stat_out_mbox
+ * with a const offset between its prio components.
+ * This function runs over a counter set and sum all of it's prio components.
+ */
+static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
+{
+ __be64 *curr = start;
+ unsigned long ret = 0;
+ int i;
+ int offset = next - start;
+
+ for (i = 0; i < num; i++) {
+ ret += be64_to_cpu(*curr);
+ curr += offset;
+ }
+
+ return ret;
+}
+
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
+ struct mlx4_en_stat_out_flow_control_mbox *flowstats;
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
struct net_device_stats *stats = &priv->stats;
struct mlx4_cmd_mailbox *mailbox;
@@ -183,22 +203,25 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.xmit_more += ring->xmit_more;
}
+ /* net device stats */
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
- be32_to_cpu(mlx4_en_stats->RdropLength) +
be32_to_cpu(mlx4_en_stats->RJBBR) +
be32_to_cpu(mlx4_en_stats->RCRC) +
- be32_to_cpu(mlx4_en_stats->RRUNT);
- stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
- stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
- be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
- be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
- be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
- be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
- be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
- be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
- be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
- be64_to_cpu(mlx4_en_stats->MCAST_novlan);
+ be32_to_cpu(mlx4_en_stats->RRUNT) +
+ be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) +
+ be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) +
+ be32_to_cpu(mlx4_en_stats->RSHORT) +
+ en_stats_adder(&mlx4_en_stats->RGIANT_prio_0,
+ &mlx4_en_stats->RGIANT_prio_1,
+ NUM_PRIORITIES);
+ stats->tx_errors = en_stats_adder(&mlx4_en_stats->TGIANT_prio_0,
+ &mlx4_en_stats->TGIANT_prio_1,
+ NUM_PRIORITIES);
+ stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
+ &mlx4_en_stats->MCAST_prio_1,
+ NUM_PRIORITIES);
stats->collisions = 0;
+ stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
@@ -210,33 +233,116 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_fifo_errors = 0;
stats->tx_heartbeat_errors = 0;
stats->tx_window_errors = 0;
+ stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+
+ /* RX stats */
+ priv->pkstats.rx_multicast_packets = stats->multicast;
+ priv->pkstats.rx_broadcast_packets =
+ en_stats_adder(&mlx4_en_stats->RBCAST_prio_0,
+ &mlx4_en_stats->RBCAST_prio_1,
+ NUM_PRIORITIES);
+ priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR);
+ priv->pkstats.rx_in_range_length_error =
+ be64_to_cpu(mlx4_en_stats->RInRangeLengthErr);
+ priv->pkstats.rx_out_range_length_error =
+ be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr);
+
+ /* Tx stats */
+ priv->pkstats.tx_multicast_packets =
+ en_stats_adder(&mlx4_en_stats->TMCAST_prio_0,
+ &mlx4_en_stats->TMCAST_prio_1,
+ NUM_PRIORITIES);
+ priv->pkstats.tx_broadcast_packets =
+ en_stats_adder(&mlx4_en_stats->TBCAST_prio_0,
+ &mlx4_en_stats->TBCAST_prio_1,
+ NUM_PRIORITIES);
+
+ priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
+ priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0);
+ priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
+ priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1);
+ priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
+ priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2);
+ priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
+ priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3);
+ priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
+ priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4);
+ priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
+ priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5);
+ priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
+ priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6);
+ priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
+ priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7);
+ priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan);
+ priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan);
+ priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
+ priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0);
+ priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
+ priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1);
+ priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
+ priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2);
+ priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
+ priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3);
+ priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
+ priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4);
+ priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
+ priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5);
+ priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
+ priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6);
+ priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
+ priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7);
+ priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
+ priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
+
+ spin_unlock_bh(&priv->stats_lock);
+
+ /* 0xffs indicates invalid value */
+ memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
+ memset(mailbox->buf, 0,
+ sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
+ in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
+ 0, MLX4_CMD_DUMP_ETH_STATS,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+ }
+
+ flowstats = mailbox->buf;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ for (i = 0; i < MLX4_NUM_PRIORITIES; i++) {
+ priv->rx_priority_flowstats[i].rx_pause =
+ be64_to_cpu(flowstats[i].rx_pause);
+ priv->rx_priority_flowstats[i].rx_pause_duration =
+ be64_to_cpu(flowstats[i].rx_pause_duration);
+ priv->rx_priority_flowstats[i].rx_pause_transition =
+ be64_to_cpu(flowstats[i].rx_pause_transition);
+ priv->tx_priority_flowstats[i].tx_pause =
+ be64_to_cpu(flowstats[i].tx_pause);
+ priv->tx_priority_flowstats[i].tx_pause_duration =
+ be64_to_cpu(flowstats[i].tx_pause_duration);
+ priv->tx_priority_flowstats[i].tx_pause_transition =
+ be64_to_cpu(flowstats[i].tx_pause_transition);
+ }
+
+ /* if pfc is not in use, all priorities counters have the same value */
+ priv->rx_flowstats.rx_pause =
+ be64_to_cpu(flowstats[0].rx_pause);
+ priv->rx_flowstats.rx_pause_duration =
+ be64_to_cpu(flowstats[0].rx_pause_duration);
+ priv->rx_flowstats.rx_pause_transition =
+ be64_to_cpu(flowstats[0].rx_pause_transition);
+ priv->tx_flowstats.tx_pause =
+ be64_to_cpu(flowstats[0].tx_pause);
+ priv->tx_flowstats.tx_pause_duration =
+ be64_to_cpu(flowstats[0].tx_pause_duration);
+ priv->tx_flowstats.tx_pause_transition =
+ be64_to_cpu(flowstats[0].tx_pause_transition);
- priv->pkstats.broadcast =
- be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
- be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
- be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
- be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
- be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
- be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
- be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
- be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
- be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
- priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
- priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
- priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
- priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
- priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
- priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
- priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
- priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
- priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
- priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
- priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
- priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
- priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
- priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
- priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
- priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
spin_unlock_bh(&priv->stats_lock);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 698d60de1255..2a77a6b19121 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
}
+static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
+{
+ BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
+ return ring->prod == ring->cons;
+}
+
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
ring->cons, ring->prod);
/* Unmap and free Rx buffers */
- BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
- while (ring->cons != ring->prod) {
+ while (!mlx4_en_is_ring_empty(ring)) {
index = ring->cons & ring->size_mask;
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
mlx4_en_free_rx_desc(priv, ring, index);
@@ -491,6 +496,23 @@ err_allocator:
return err;
}
+/* We recover from out of memory by scheduling our napi poll
+ * function (mlx4_en_process_cq), which tries to allocate
+ * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
+ */
+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
+{
+ int ring;
+
+ if (!priv->port_up)
+ return;
+
+ for (ring = 0; ring < priv->rx_ring_num; ring++) {
+ if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+ napi_reschedule(&priv->rx_cq[ring]->napi);
+ }
+}
+
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
@@ -771,7 +793,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/*
* make sure we read the CQE after we read the ownership bit
*/
- rmb();
+ dma_rmb();
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
@@ -1116,7 +1138,10 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
/* Cancel FCS removal if FW allows */
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
context->param3 |= cpu_to_be32(1 << 29);
- ring->fcs_del = ETH_FCS_LEN;
+ if (priv->dev->features & NETIF_F_RXFCS)
+ ring->fcs_del = 0;
+ else
+ ring->fcs_del = ETH_FCS_LEN;
} else
ring->fcs_del = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index a61009f4b2df..b66e03d9711f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -66,7 +66,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
packet = (unsigned char *)skb_put(skb, packet_size);
memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
- memset(ethh->h_source, 0, ETH_ALEN);
+ eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
skb_set_mac_header(skb, 0);
for (i = 0; i < packet_size; ++i) /* fill our packet */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 55f9f5c5344e..f7bf312fb443 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
- if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
- cpumask_set_cpu(queue_index, &ring->affinity_mask);
+ if (queue_index < priv->num_tx_rings_p_up)
+ cpumask_set_cpu_local_first(queue_index,
+ priv->mdev->dev->numa_node,
+ &ring->affinity_mask);
*pring = ring;
return 0;
@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
- if (!user_prio && cpu_online(ring->queue_index))
+ if (!cpumask_empty(&ring->affinity_mask))
netif_set_xps_queue(priv->dev, &ring->affinity_mask,
ring->queue_index);
@@ -416,7 +418,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
* make sure we read the CQE after we read the
* ownership bit
*/
- rmb();
+ dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
@@ -667,7 +669,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
skb_frag_size(&shinfo->frags[0]));
}
- wmb();
+ dma_wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
}
@@ -804,7 +806,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
data->addr = cpu_to_be64(dma);
data->lkey = ring->mr_key;
- wmb();
+ dma_wmb();
data->byte_count = cpu_to_be32(byte_count);
--data;
}
@@ -821,7 +823,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
data->addr = cpu_to_be64(dma);
data->lkey = ring->mr_key;
- wmb();
+ dma_wmb();
data->byte_count = cpu_to_be32(byte_count);
}
/* tx completion can avoid cache line miss for common cases */
@@ -938,7 +940,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Ensure new descriptor hits memory
* before setting ownership of this descriptor to HW
*/
- wmb();
+ dma_wmb();
tx_desc->ctrl.owner_opcode = op_own;
wmb();
@@ -958,7 +960,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Ensure new descriptor hits memory
* before setting ownership of this descriptor to HW
*/
- wmb();
+ dma_wmb();
tx_desc->ctrl.owner_opcode = op_own;
if (send_doorbell) {
wmb();
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 6e70ffee8e87..2619c9fbf42d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -188,7 +188,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
- wmb();
+ dma_wmb();
s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
++slave_eq->prod;
@@ -473,7 +473,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
- rmb();
+ dma_rmb();
switch (eqe->type) {
case MLX4_EVENT_TYPE_COMP:
@@ -702,6 +702,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
}
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
+ flr_slave);
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_flr_event_work);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 5a21e5dc94cb..e30bf57ad7a1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -49,18 +49,20 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static bool enable_qos;
+static bool enable_qos = true;
module_param(enable_qos, bool, 0444);
-MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
+MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
+ u64 val; \
switch (sizeof (dest)) { \
case 1: (dest) = *(u8 *) __p; break; \
case 2: (dest) = be16_to_cpup(__p); break; \
case 4: (dest) = be32_to_cpup(__p); break; \
- case 8: (dest) = be64_to_cpup(__p); break; \
+ case 8: val = get_unaligned((u64 *)__p); \
+ (dest) = be64_to_cpu(val); break; \
default: __buggy_use_of_MLX4_GET(); \
} \
} while (0)
@@ -105,6 +107,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[41] = "Unicast VEP steering support",
[42] = "Multicast VEP steering support",
[48] = "Counters support",
+ [52] = "RSS IP fragments support",
[53] = "Port ETS Scheduler support",
[55] = "Port link type sensing support",
[59] = "Port management change event support",
@@ -143,7 +146,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[18] = "More than 80 VFs support",
[19] = "Performance optimized for limited rule configuration flow steering support",
[20] = "Recoverable error events support",
- [21] = "Port Remap support"
+ [21] = "Port Remap support",
+ [22] = "QCN support",
+ [23] = "QP rate limiting support",
+ [24] = "Ethernet Flow control statistics support",
+ [25] = "Granular QoS per VF support",
+ [26] = "Port ETS Scheduler support",
+ [27] = "Port beacon support",
+ [28] = "RX-ALL support",
};
int i;
@@ -641,6 +651,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSS_OFFSET 0x2e
#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
+#define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
@@ -670,12 +681,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
+#define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70
#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
-#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a
+#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -696,6 +708,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
+#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
+#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
+#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
+
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -768,15 +784,24 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->num_ports = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
dev_cap->max_msg_sz = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
+ if (field & 0x10)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
dev_cap->fs_max_num_qp_per_entry = field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
+ if (field & 0x1)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
@@ -856,6 +881,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
dev_cap->max_rq_desc_sz = size;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+ if (field & (1 << 4))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP;
if (field & (1 << 5))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
if (field & (1 << 6))
@@ -869,6 +896,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
if (field & 0x20)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
+ if (field & (1 << 2))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -882,6 +911,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
if (field & 1<<3)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
+ if (field & (1 << 5))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -900,6 +931,18 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
+ dev_cap->rl_caps.num_rates = size;
+ if (dev_cap->rl_caps.num_rates) {
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET);
+ dev_cap->rl_caps.max_val = size & 0xfff;
+ dev_cap->rl_caps.max_unit = size >> 14;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET);
+ dev_cap->rl_caps.min_val = size & 0xfff;
+ dev_cap->rl_caps.min_unit = size >> 14;
+ }
+
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
@@ -975,6 +1018,15 @@ void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->dmfs_high_rate_qpn_base);
mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
dev_cap->dmfs_high_rate_qpn_range);
+
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) {
+ struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps;
+
+ mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n",
+ rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val,
+ rl_caps->min_unit, rl_caps->min_val);
+ }
+
dump_dev_cap_flags(dev, dev_cap->flags);
dump_dev_cap_flags2(dev, dev_cap->flags2);
}
@@ -1058,6 +1110,7 @@ out:
return err;
}
+#define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28)
#define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
#define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21)
#define DEV_CAP_EXT_2_FLAG_FSM (1 << 20)
@@ -1071,6 +1124,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
u64 flags;
int err = 0;
u8 field;
+ u16 field16;
u32 bmme_flags, field32;
int real_port;
int slave_port;
@@ -1101,6 +1155,9 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
}
for (; slave_port < dev->caps.num_ports; ++slave_port)
flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
+
+ /* Not exposing RSS IP fragments to guests */
+ flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG;
MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
@@ -1113,11 +1170,16 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
- /* For guests, disable vxlan tunneling */
+ /* For guests, disable vxlan tunneling and QoS support */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
- field &= 0xf7;
+ field &= 0xd7;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
+ /* For guests, disable port BEACON */
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
+ field &= 0x7f;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
+
/* For guests, report Blueflame disabled */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
field &= 0x7f;
@@ -1146,9 +1208,28 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
/* turn off host side virt features (VST, FSM, etc) for guests */
MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
- DEV_CAP_EXT_2_FLAG_FSM);
+ DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS);
MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
+ /* turn off QCN for guests */
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
+ field &= 0xfe;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
+
+ /* turn off QP max-rate limiting for guests */
+ field16 = 0;
+ MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
+
+ /* turn off QoS per VF support for guests */
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+ field &= 0xef;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+
+ /* turn off ignore FCS feature for guests */
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
+ field &= 0xfb;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
+
return 0;
}
@@ -1526,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id)
* swaps each 4-byte word before passing it back to
* us. Therefore we need to swab it before printing.
*/
- for (i = 0; i < 4; ++i)
- ((u32 *) board_id)[i] =
- swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
+ u32 *bid_u32 = (u32 *)board_id;
+
+ for (i = 0; i < 4; ++i) {
+ u32 *addr;
+ u32 val;
+
+ addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
+ val = get_unaligned(addr);
+ val = swab32(val);
+ put_unaligned(val, &bid_u32[i]);
+ }
}
}
@@ -1648,13 +1737,17 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
/* Enable QoS support if module parameter set */
- if (enable_qos)
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
/* enable counters */
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
+ /* Enable RSS spread to fragmented IP packets when supported */
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
+
/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
@@ -1843,6 +1936,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
else
param->steering_mode = MLX4_STEERING_MODE_A0;
}
+
+ if (dword_field & (1 << 13))
+ param->rss_ip_frags = 1;
+
/* steering attributes */
if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index f44f7f6017ed..07cb7c2461ad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -127,6 +127,7 @@ struct mlx4_dev_cap {
u32 max_counters;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
+ struct mlx4_rate_limit_caps rl_caps;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
};
@@ -202,6 +203,7 @@ struct mlx4_init_hca_param {
u64 dev_cap_enabled;
u16 cqe_size; /* For use only when CQE stride feature enabled */
u16 eqe_size; /* For use only when EQE stride feature enabled */
+ u8 rss_ip_frags;
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c
new file mode 100644
index 000000000000..8f2fde0487c4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "fw_qos.h"
+#include "fw.h"
+
+enum {
+ /* allocate vpp opcode modifiers */
+ MLX4_ALLOCATE_VPP_ALLOCATE = 0x0,
+ MLX4_ALLOCATE_VPP_QUERY = 0x1
+};
+
+enum {
+ /* set vport qos opcode modifiers */
+ MLX4_SET_VPORT_QOS_SET = 0x0,
+ MLX4_SET_VPORT_QOS_QUERY = 0x1
+};
+
+struct mlx4_set_port_prio2tc_context {
+ u8 prio2tc[4];
+};
+
+struct mlx4_port_scheduler_tc_cfg_be {
+ __be16 pg;
+ __be16 bw_precentage;
+ __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
+ __be16 max_bw_value;
+};
+
+struct mlx4_set_port_scheduler_context {
+ struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
+};
+
+/* Granular Qos (per VF) section */
+struct mlx4_alloc_vpp_param {
+ __be32 availible_vpp;
+ __be32 vpp_p_up[MLX4_NUM_UP];
+};
+
+struct mlx4_prio_qos_param {
+ __be32 bw_share;
+ __be32 max_avg_bw;
+ __be32 reserved;
+ __be32 enable;
+ __be32 reserved1[4];
+};
+
+struct mlx4_set_vport_context {
+ __be32 reserved[8];
+ struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP];
+};
+
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_prio2tc_context *context;
+ int err;
+ u32 in_mod;
+ int i;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+
+ for (i = 0; i < MLX4_NUM_UP; i += 2)
+ context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
+
+ in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
+
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+ u8 *pg, u16 *ratelimit)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_scheduler_context *context;
+ int err;
+ u32 in_mod;
+ int i;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+
+ for (i = 0; i < MLX4_NUM_TC; i++) {
+ struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
+ u16 r;
+
+ if (ratelimit && ratelimit[i]) {
+ if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
+ r = ratelimit[i];
+ tc->max_bw_units =
+ htons(MLX4_RATELIMIT_100M_UNITS);
+ } else {
+ r = ratelimit[i] / 10;
+ tc->max_bw_units =
+ htons(MLX4_RATELIMIT_1G_UNITS);
+ }
+ tc->max_bw_value = htons(r);
+ } else {
+ tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
+ tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
+ }
+
+ tc->pg = htons(pg[i]);
+ tc->bw_precentage = htons(tc_tx_bw[i]);
+ }
+
+ in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+
+int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
+ u16 *availible_vpp, u8 *vpp_p_up)
+{
+ int i;
+ int err;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_alloc_vpp_param *out_param;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ out_param = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, port,
+ MLX4_ALLOCATE_VPP_QUERY,
+ MLX4_CMD_ALLOCATE_VPP,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ goto out;
+
+ /* Total number of supported VPPs */
+ *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp);
+
+ for (i = 0; i < MLX4_NUM_UP; i++)
+ vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get);
+
+int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up)
+{
+ int i;
+ int err;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_alloc_vpp_param *in_param;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ in_param = mailbox->buf;
+
+ for (i = 0; i < MLX4_NUM_UP; i++)
+ in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]);
+
+ err = mlx4_cmd(dev, mailbox->dma, port,
+ MLX4_ALLOCATE_VPP_ALLOCATE,
+ MLX4_CMD_ALLOCATE_VPP,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set);
+
+int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
+ struct mlx4_vport_qos_param *out_param)
+{
+ int i;
+ int err;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_vport_context *ctx;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ctx = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port,
+ MLX4_SET_VPORT_QOS_QUERY,
+ MLX4_CMD_SET_VPORT_QOS,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ goto out;
+
+ for (i = 0; i < MLX4_NUM_UP; i++) {
+ out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share);
+ out_param[i].max_avg_bw =
+ be32_to_cpu(ctx->qos_p_up[i].max_avg_bw);
+ out_param[i].enable =
+ !!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31);
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get);
+
+int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
+ struct mlx4_vport_qos_param *in_param)
+{
+ int i;
+ int err;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_vport_context *ctx;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ctx = mailbox->buf;
+
+ for (i = 0; i < MLX4_NUM_UP; i++) {
+ ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share);
+ ctx->qos_p_up[i].max_avg_bw =
+ cpu_to_be32(in_param[i].max_avg_bw);
+ ctx->qos_p_up[i].enable =
+ cpu_to_be32(in_param[i].enable << 31);
+ }
+
+ err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port,
+ MLX4_SET_VPORT_QOS_SET,
+ MLX4_CMD_SET_VPORT_QOS,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
new file mode 100644
index 000000000000..ac1f331878e6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_FW_QOS_H
+#define MLX4_FW_QOS_H
+
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/device.h>
+
+#define MLX4_NUM_UP 8
+#define MLX4_NUM_TC 8
+
+/* Default supported priorities for VPP allocation */
+#define MLX4_DEFAULT_QOS_PRIO (0)
+
+/* Derived from FW feature definition, 0 is the default vport fo all QPs */
+#define MLX4_VPP_DEFAULT_VPORT (0)
+
+struct mlx4_vport_qos_param {
+ u32 bw_share;
+ u32 max_avg_bw;
+ u8 enable;
+};
+
+/**
+ * mlx4_SET_PORT_PRIO2TC - This routine maps user priorities to traffic
+ * classes of a given port and device.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @prio2tc: Array of TC associated with each priorities.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
+
+/**
+ * mlx4_SET_PORT_SCHEDULER - This routine configures the arbitration between
+ * traffic classes (ETS) and configured rate limit for traffic classes.
+ * tc_tx_bw, pg and ratelimit are arrays where each index represents a TC.
+ * The description for those parameters below refers to a single TC.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @tc_tx_bw: The percentage of the bandwidth allocated for traffic class
+ * within a TC group. The sum of the bw_percentage of all the traffic
+ * classes within a TC group must equal 100% for correct operation.
+ * @pg: The TC group the traffic class is associated with.
+ * @ratelimit: The maximal bandwidth allowed for the use by this traffic class.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+ u8 *pg, u16 *ratelimit);
+/**
+ * mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation.
+ * Before distribution of VPPs to priorities, only availible_vpp is returned.
+ * After initialization it returns the distribution of VPPs among priorities.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @availible_vpp: Pointer to variable where number of availible VPPs is stored
+ * @vpp_p_up: Distribution of VPPs to priorities is stored in this array
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
+ u16 *availible_vpp, u8 *vpp_p_up);
+/**
+ * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
+ * The total number of VPPs assigned to all for a port must not exceed
+ * the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get.
+ * VPP allocation is allowed only after the port type has been set,
+ * and while no QPs are open for this port.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @vpp_p_up: Allocation of VPPs to different priorities.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
+
+/**
+ * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
+ * Each priority allowed for the Vport is assigned with a share of the BW,
+ * and a BW limitation. This commands query the current QoS values.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @vport: Vport id.
+ * @out_param: Array of mlx4_vport_qos_param that will contain the values.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
+ struct mlx4_vport_qos_param *out_param);
+
+/**
+ * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
+ * QoS parameters can be modified at any time, but must be initialized
+ * before any QP is associated with the VPort.
+ *
+ * @dev: mlx4_dev.
+ * @port: Physical port number.
+ * @vport: Vport id.
+ * @out_param: Array of mlx4_vport_qos_param which holds the requested values.
+ *
+ * Returns 0 on success or a negative mlx4_core errno code.
+ **/
+int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
+ struct mlx4_vport_qos_param *in_param);
+
+#endif /* MLX4_FW_QOS_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7e487223489a..ced5ecab5aa7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -297,6 +297,25 @@ static int mlx4_dev_port(struct mlx4_dev *dev, int port,
return err;
}
+static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
+{
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
+ return;
+
+ if (mlx4_is_mfunc(dev)) {
+ mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+ return;
+ }
+
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
+ mlx4_dbg(dev,
+ "Keep FCS is not supported - Disabling Ignore FCS");
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+ return;
+ }
+}
+
#define MLX4_A0_STEERING_TABLE_SIZE 256
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
@@ -489,6 +508,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
}
+ dev->caps.rl_caps = dev_cap->rl_caps;
+
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
dev->caps.dmfs_high_rate_qpn_range;
@@ -526,10 +547,20 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.alloc_res_qp_mask =
(dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
MLX4_RESERVE_A0_QP;
+
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
+ mlx4_warn(dev, "Old device ETS support detected\n");
+ mlx4_warn(dev, "Consider upgrading device FW.\n");
+ dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
+ }
+
} else {
dev->caps.alloc_res_qp_mask = 0;
}
+ mlx4_enable_ignore_fcs(dev);
+
return 0;
}
@@ -883,6 +914,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
+ mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
+ hca_param.rss_ip_frags ? "on" : "off");
if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
dev->caps.bf_reg_size)
@@ -2227,6 +2260,37 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_free);
+void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
+}
+EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
+
+__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return priv->mfunc.master.vf_admin[entry].vport[port].guid;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
+
+void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ __be64 guid;
+
+ /* hw GUID */
+ if (entry == 0)
+ return;
+
+ get_random_bytes((char *)&guid, sizeof(guid));
+ guid &= ~(cpu_to_be64(1ULL << 56));
+ guid |= cpu_to_be64(1ULL << 57);
+ priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
+}
+
static int mlx4_setup_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 1409d0cd6143..502d3dd2c888 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -50,6 +50,7 @@
#include <linux/mlx4/driver.h>
#include <linux/mlx4/doorbell.h>
#include <linux/mlx4/cmd.h>
+#include "fw_qos.h"
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
@@ -64,21 +65,6 @@
#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
-struct mlx4_set_port_prio2tc_context {
- u8 prio2tc[4];
-};
-
-struct mlx4_port_scheduler_tc_cfg_be {
- __be16 pg;
- __be16 bw_precentage;
- __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
- __be16 max_bw_value;
-};
-
-struct mlx4_set_port_scheduler_context {
- struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
-};
-
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
@@ -175,7 +161,7 @@ enum mlx4_res_tracker_free_type {
/*
*Virtual HCR structures.
- * mlx4_vhcr is the sw representation, in machine endianess
+ * mlx4_vhcr is the sw representation, in machine endianness
*
* mlx4_vhcr_cmd is the formalized structure, the one that is passed
* to FW to go through communication channel.
@@ -512,6 +498,8 @@ struct mlx4_vport_state {
u32 tx_rate;
bool spoofchk;
u32 link_state;
+ u8 qos_vport;
+ __be64 guid;
};
struct mlx4_vf_admin_state {
@@ -568,6 +556,11 @@ struct mlx4_slave_event_eq {
struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
};
+struct mlx4_qos_manager {
+ int num_of_qos_vfs;
+ DECLARE_BITMAP(priority_bm, MLX4_NUM_UP);
+};
+
struct mlx4_master_qp0_state {
int proxy_qp0_active;
int qp0_active;
@@ -592,6 +585,7 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_eqe cmd_eqe;
struct mlx4_slave_event_eq slave_eq;
struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
+ struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1];
};
struct mlx4_mfunc {
@@ -644,6 +638,7 @@ struct mlx4_vf_immed_vlan_work {
int orig_vlan_ix;
u8 port;
u8 qos;
+ u8 qos_vport;
u16 vlan_id;
u16 orig_vlan_id;
};
@@ -769,9 +764,11 @@ enum {
struct mlx4_set_port_general_context {
- u8 reserved[3];
+ u16 reserved1;
+ u8 v_ignore_fcs;
u8 flags;
- u16 reserved2;
+ u8 ignore_fcs;
+ u8 reserved2;
__be16 mtu;
u8 pptx;
u8 pfctx;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ebbe244e80dd..d021f079f181 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -55,6 +55,7 @@
#include <linux/mlx4/cmd.h>
#include "en_port.h"
+#include "mlx4_stats.h"
#define DRV_NAME "mlx4_en"
#define DRV_VERSION "2.2-1"
@@ -171,7 +172,6 @@ enum {
/* Number of samples to 'average' */
#define AVG_SIZE 128
#define AVG_FACTOR 1024
-#define NUM_PERF_STATS NUM_PERF_COUNTERS
#define INC_PERF_COUNTER(cnt) (++(cnt))
#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
@@ -182,7 +182,6 @@ enum {
#else
-#define NUM_PERF_STATS 0
#define INC_PERF_COUNTER(cnt) do {} while (0)
#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
@@ -435,37 +434,6 @@ struct mlx4_en_port_state {
u32 flags;
};
-struct mlx4_en_pkt_stats {
- unsigned long broadcast;
- unsigned long rx_prio[8];
- unsigned long tx_prio[8];
-#define NUM_PKT_STATS 17
-};
-
-struct mlx4_en_port_stats {
- unsigned long tso_packets;
- unsigned long xmit_more;
- unsigned long queue_stopped;
- unsigned long wake_queue;
- unsigned long tx_timeout;
- unsigned long rx_alloc_failed;
- unsigned long rx_chksum_good;
- unsigned long rx_chksum_none;
- unsigned long rx_chksum_complete;
- unsigned long tx_chksum_offload;
-#define NUM_PORT_STATS 10
-};
-
-struct mlx4_en_perf_stats {
- u32 tx_poll;
- u64 tx_pktsz_avg;
- u32 inflight_avg;
- u16 tx_coal_avg;
- u16 rx_coal_avg;
- u32 napi_quota;
-#define NUM_PERF_COUNTERS 6
-};
-
enum mlx4_en_mclist_act {
MCLIST_NONE,
MCLIST_REM,
@@ -514,9 +482,15 @@ enum {
MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
};
+#define PORT_BEACON_MAX_LIMIT (65535)
#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
#define MLX4_EN_MAC_HASH_IDX 5
+struct mlx4_en_stats_bitmap {
+ DECLARE_BITMAP(bitmap, NUM_ALL_STATS);
+ struct mutex mutex; /* for mutual access to stats bitmap */
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -592,8 +566,12 @@ struct mlx4_en_priv {
#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
+ struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
+ struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
+ struct mlx4_en_flow_stats_rx rx_flowstats;
+ struct mlx4_en_flow_stats_tx tx_flowstats;
struct mlx4_en_port_stats port_stats;
- u64 stats_bitmap;
+ struct mlx4_en_stats_bitmap stats_bitmap;
struct list_head mc_list;
struct list_head curr_list;
u64 broadcast_id;
@@ -608,6 +586,7 @@ struct mlx4_en_priv {
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
+ enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
#endif
#ifdef CONFIG_RFS_ACCEL
spinlock_t filters_lock;
@@ -761,6 +740,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
int mlx4_en_start_port(struct net_device *dev);
void mlx4_en_stop_port(struct net_device *dev, int detach);
+void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
+ struct mlx4_en_stats_bitmap *stats_bitmap,
+ u8 rx_ppp, u8 rx_pause,
+ u8 tx_ppp, u8 tx_pause);
+
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
@@ -790,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node);
@@ -846,7 +831,10 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
int mlx4_en_reset_config(struct net_device *dev,
struct hwtstamp_config ts_config,
netdev_features_t new_features);
-
+void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
+ struct mlx4_en_stats_bitmap *stats_bitmap,
+ u8 rx_ppp, u8 rx_pause,
+ u8 tx_ppp, u8 tx_pause);
int mlx4_en_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
new file mode 100644
index 000000000000..00555832a4ae
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -0,0 +1,107 @@
+#ifndef _MLX4_STATS_
+#define _MLX4_STATS_
+
+#ifdef MLX4_EN_PERF_STAT
+#define NUM_PERF_STATS NUM_PERF_COUNTERS
+#else
+#define NUM_PERF_STATS 0
+#endif
+
+#define NUM_PRIORITIES 9
+#define NUM_PRIORITY_STATS 2
+
+struct mlx4_en_pkt_stats {
+ unsigned long rx_multicast_packets;
+ unsigned long rx_broadcast_packets;
+ unsigned long rx_jabbers;
+ unsigned long rx_in_range_length_error;
+ unsigned long rx_out_range_length_error;
+ unsigned long tx_multicast_packets;
+ unsigned long tx_broadcast_packets;
+ unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
+ unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
+#define NUM_PKT_STATS 43
+};
+
+struct mlx4_en_port_stats {
+ unsigned long tso_packets;
+ unsigned long xmit_more;
+ unsigned long queue_stopped;
+ unsigned long wake_queue;
+ unsigned long tx_timeout;
+ unsigned long rx_alloc_failed;
+ unsigned long rx_chksum_good;
+ unsigned long rx_chksum_none;
+ unsigned long rx_chksum_complete;
+ unsigned long tx_chksum_offload;
+#define NUM_PORT_STATS 10
+};
+
+struct mlx4_en_perf_stats {
+ u32 tx_poll;
+ u64 tx_pktsz_avg;
+ u32 inflight_avg;
+ u16 tx_coal_avg;
+ u16 rx_coal_avg;
+ u32 napi_quota;
+#define NUM_PERF_COUNTERS 6
+};
+
+#define NUM_MAIN_STATS 21
+
+#define MLX4_NUM_PRIORITIES 8
+
+struct mlx4_en_flow_stats_rx {
+ u64 rx_pause;
+ u64 rx_pause_duration;
+ u64 rx_pause_transition;
+#define NUM_FLOW_STATS_RX 3
+#define NUM_FLOW_PRIORITY_STATS_RX (NUM_FLOW_STATS_RX * \
+ MLX4_NUM_PRIORITIES)
+};
+
+struct mlx4_en_flow_stats_tx {
+ u64 tx_pause;
+ u64 tx_pause_duration;
+ u64 tx_pause_transition;
+#define NUM_FLOW_STATS_TX 3
+#define NUM_FLOW_PRIORITY_STATS_TX (NUM_FLOW_STATS_TX * \
+ MLX4_NUM_PRIORITIES)
+};
+
+#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
+ NUM_FLOW_PRIORITY_STATS_TX + \
+ NUM_FLOW_PRIORITY_STATS_RX)
+
+struct mlx4_en_stat_out_flow_control_mbox {
+ /* Total number of PAUSE frames received from the far-end port */
+ __be64 rx_pause;
+ /* Total number of microseconds that far-end port requested to pause
+ * transmission of packets
+ */
+ __be64 rx_pause_duration;
+ /* Number of received transmission from XOFF state to XON state */
+ __be64 rx_pause_transition;
+ /* Total number of PAUSE frames sent from the far-end port */
+ __be64 tx_pause;
+ /* Total time in microseconds that transmission of packets has been
+ * paused
+ */
+ __be64 tx_pause_duration;
+ /* Number of transmitter transitions from XOFF state to XON state */
+ __be64 tx_pause_transition;
+ /* Reserverd */
+ __be64 reserved[2];
+};
+
+enum {
+ MLX4_DUMP_ETH_STATS_FLOW_CONTROL = 1 << 12
+};
+
+#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
+ NUM_FLOW_STATS + NUM_PERF_STATS)
+
+#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
+ sizeof(((struct net_device_stats *)0)->n))
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 9f268f05290a..c2b21313dba7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -38,6 +38,7 @@
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
+#include "mlx4_stats.h"
#define MLX4_MAC_VALID (1ull << 63)
@@ -49,6 +50,9 @@
#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
+#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
+#define MLX4_IGNORE_FCS_MASK 0x1
+
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
int i;
@@ -127,8 +131,9 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
@@ -341,8 +346,9 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -629,9 +635,9 @@ static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
MLX4_ROCE_GID_ENTRY_SIZE);
err = mlx4_cmd(dev, mailbox->dma,
- ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
- MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_NATIVE);
+ ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
+ MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mutex_unlock(&(priv->port[port].gid_table.mutex));
return err;
}
@@ -837,6 +843,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
MLX4_CMD_NATIVE);
}
+ /* Slaves are not allowed to SET_PORT beacon (LED) blink */
+ if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
+ mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
+ return -EPERM;
+ }
+
/* For IB, we only consider:
* - The capability mask, which is set to the aggregate of all
* slave function capabilities
@@ -945,8 +957,9 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
(vl_cap << MLX4_SET_PORT_VL_CAP));
- err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ err = mlx4_cmd(dev, mailbox->dma, port,
+ MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
if (err != -ENOMEM)
break;
}
@@ -975,8 +988,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
context->pfcrx = pfcrx;
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
@@ -1012,84 +1026,40 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
context->vlan_miss = MLX4_VLAN_MISS_IDX;
in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
-int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
+int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
{
struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_prio2tc_context *context;
- int err;
+ struct mlx4_set_port_general_context *context;
u32 in_mod;
- int i;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- for (i = 0; i < MLX4_NUM_UP; i += 2)
- context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
-
- in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
-
-int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
- u8 *pg, u16 *ratelimit)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_scheduler_context *context;
int err;
- u32 in_mod;
- int i;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
+ context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
+ if (ignore_fcs_value)
+ context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
+ else
+ context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
- for (i = 0; i < MLX4_NUM_TC; i++) {
- struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
- u16 r;
-
- if (ratelimit && ratelimit[i]) {
- if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
- r = ratelimit[i];
- tc->max_bw_units =
- htons(MLX4_RATELIMIT_100M_UNITS);
- } else {
- r = ratelimit[i]/10;
- tc->max_bw_units =
- htons(MLX4_RATELIMIT_1G_UNITS);
- }
- tc->max_bw_value = htons(r);
- } else {
- tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
- tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
- }
-
- tc->pg = htons(pg[i]);
- tc->bw_precentage = htons(tc_tx_bw[i]);
- }
-
- in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
-EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
enum {
VXLAN_ENABLE_MODIFY = 1 << 7,
@@ -1125,14 +1095,35 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
context->steering = steering;
in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
+int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
+{
+ int err;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ *((__be32 *)mailbox->buf) = cpu_to_be32(time);
+
+ err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
+
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -1184,22 +1175,6 @@ int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
vhcr->in_modifier, outbox);
}
-void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
-{
- if (!mlx4_is_mfunc(dev)) {
- *stats_bitmap = 0;
- return;
- }
-
- *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
- MLX4_STATS_TRAFFIC_DROPS_MASK |
- MLX4_STATS_PORT_COUNTERS_MASK);
-
- if (mlx4_is_master(dev))
- *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
-}
-EXPORT_SYMBOL(mlx4_set_stats_bitmap);
-
int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
int *slave_id)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index eda29dbbfcd2..b75214a80d0e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -442,6 +442,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
}
+ if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
+ qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
+ cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
+ }
+
+ if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+ qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
+ cmd->qp_context.qos_vport = params->qos_vport;
+ }
+
cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
cmd->qp_mask = cpu_to_be64(qp_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 6e413ac4e940..92fce1b98558 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -221,11 +221,6 @@ struct res_fs_rule {
int qpn;
};
-static int mlx4_is_eth(struct mlx4_dev *dev, int port)
-{
- return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
-}
-
static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
{
struct rb_node *node = root->rb_node;
@@ -770,6 +765,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
qpc->pri_path.sched_queue &= 0xC7;
qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
+ qpc->qos_vport = vp_oper->state.qos_vport;
}
if (vp_oper->state.spoofchk) {
qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
@@ -2849,7 +2845,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
{
int err;
int eqn = vhcr->in_modifier;
- int res_id = (slave << 8) | eqn;
+ int res_id = (slave << 10) | eqn;
struct mlx4_eq_context *eqc = inbox->buf;
int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
int mtt_size = eq_get_mtt_size(eqc);
@@ -2947,8 +2943,12 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
optpar = be32_to_cpu(*(__be32 *) inbox->buf);
- if (slave != mlx4_master_func_num(dev))
+ if (slave != mlx4_master_func_num(dev)) {
qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
+ /* setting QP rate-limit is disallowed for VFs */
+ if (qp_ctx->rate_limit_params)
+ return -EPERM;
+ }
switch (qp_type) {
case MLX4_QP_ST_RC:
@@ -3027,7 +3027,7 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
/* Call the SW implementation of write_mtt:
* - Prepare a dummy mtt struct
- * - Translate inbox contents to simple addresses in host endianess */
+ * - Translate inbox contents to simple addresses in host endianness */
mtt.offset = 0; /* TBD this is broken but I don't handle it since
we don't really use it */
mtt.order = 0;
@@ -3051,7 +3051,7 @@ int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
int eqn = vhcr->in_modifier;
- int res_id = eqn | (slave << 8);
+ int res_id = eqn | (slave << 10);
struct res_eq *eq;
int err;
@@ -3108,7 +3108,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
return 0;
mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
- res_id = (slave << 8) | event_eq->eqn;
+ res_id = (slave << 10) | event_eq->eqn;
err = get_res(dev, slave, res_id, RES_EQ, &req);
if (err)
goto unlock;
@@ -3131,7 +3131,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
memcpy(mailbox->buf, (u8 *) eqe, 28);
- in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+ in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
@@ -3157,7 +3157,7 @@ int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
int eqn = vhcr->in_modifier;
- int res_id = eqn | (slave << 8);
+ int res_id = eqn | (slave << 10);
struct res_eq *eq;
int err;
@@ -4714,13 +4714,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
break;
case RES_EQ_HW:
- err = mlx4_cmd(dev, slave, eqn & 0xff,
+ err = mlx4_cmd(dev, slave, eqn & 0x3ff,
1, MLX4_CMD_HW2SW_EQ,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
- slave, eqn);
+ slave, eqn & 0x3ff);
atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED;
break;
@@ -4918,6 +4918,11 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
qp->sched_queue & 0xC7;
upd_context->qp_context.pri_path.sched_queue |=
((work->qos & 0x7) << 3);
+ upd_context->qp_mask |=
+ cpu_to_be64(1ULL <<
+ MLX4_UPD_QP_MASK_QOS_VPP);
+ upd_context->qp_context.qos_vport =
+ work->qos_vport;
}
err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 201ca6d76ce5..ac0f7bf4be95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -171,6 +171,9 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
db->dma = pgdir->db_dma + offset;
+ db->db[0] = 0;
+ db->db[1] = 0;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a2853057c779..e3273faf4568 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -125,7 +125,10 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
u8 token;
spin_lock(&cmd->token_lock);
- token = cmd->token++ % 255 + 1;
+ cmd->token++;
+ if (cmd->token == 0)
+ cmd->token++;
+ token = cmd->token;
spin_unlock(&cmd->token_lock);
return token;
@@ -515,10 +518,11 @@ static void cmd_work_handler(struct work_struct *work)
ent->ts1 = ktime_get_ns();
/* ring doorbell after the descriptor is valid */
+ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
- mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
mmiowb();
+ /* if not in polling don't use ent after this point */
if (cmd->mode == CMD_MODE_POLLING) {
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
@@ -1236,7 +1240,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
goto out_out;
}
- err = mlx5_copy_from_msg(out, outb, out_size);
+ if (!callback)
+ err = mlx5_copy_from_msg(out, outb, out_size);
out_out:
if (!callback)
@@ -1319,6 +1324,45 @@ ex_err:
return err;
}
+static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+ struct device *ddev = &dev->pdev->dev;
+
+ cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+ &cmd->alloc_dma, GFP_KERNEL);
+ if (!cmd->cmd_alloc_buf)
+ return -ENOMEM;
+
+ /* make sure it is aligned to 4K */
+ if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
+ cmd->cmd_buf = cmd->cmd_alloc_buf;
+ cmd->dma = cmd->alloc_dma;
+ cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
+ return 0;
+ }
+
+ dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
+ cmd->alloc_dma);
+ cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
+ 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
+ &cmd->alloc_dma, GFP_KERNEL);
+ if (!cmd->cmd_alloc_buf)
+ return -ENOMEM;
+
+ cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
+ cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
+ cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
+ return 0;
+}
+
+static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+ struct device *ddev = &dev->pdev->dev;
+
+ dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
+ cmd->alloc_dma);
+}
+
int mlx5_cmd_init(struct mlx5_core_dev *dev)
{
int size = sizeof(struct mlx5_cmd_prot_block);
@@ -1341,17 +1385,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (!cmd->pool)
return -ENOMEM;
- cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
- if (!cmd->cmd_buf) {
- err = -ENOMEM;
+ err = alloc_cmd_page(dev, cmd);
+ if (err)
goto err_free_pool;
- }
- cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
- err = -ENOMEM;
- goto err_free;
- }
cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
cmd->log_sz = cmd_l >> 4 & 0xf;
@@ -1360,13 +1396,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1 << cmd->log_sz);
err = -EINVAL;
- goto err_map;
+ goto err_free_page;
}
if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
dev_err(&dev->pdev->dev, "command queue size overflow\n");
err = -EINVAL;
- goto err_map;
+ goto err_free_page;
}
cmd->checksum_disabled = 1;
@@ -1378,7 +1414,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
CMD_IF_REV, cmd->cmdif_rev);
err = -ENOTSUPP;
- goto err_map;
+ goto err_free_page;
}
spin_lock_init(&cmd->alloc_lock);
@@ -1394,7 +1430,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (cmd_l & 0xfff) {
dev_err(&dev->pdev->dev, "invalid command queue address\n");
err = -ENOMEM;
- goto err_map;
+ goto err_free_page;
}
iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
@@ -1410,7 +1446,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
err = create_msg_cache(dev);
if (err) {
dev_err(&dev->pdev->dev, "failed to create command cache\n");
- goto err_map;
+ goto err_free_page;
}
set_wqname(dev);
@@ -1435,11 +1471,8 @@ err_wq:
err_cache:
destroy_msg_cache(dev);
-err_map:
- dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-err_free:
- free_pages((unsigned long)cmd->cmd_buf, 0);
+err_free_page:
+ free_cmd_page(dev, cmd);
err_free_pool:
pci_pool_destroy(cmd->pool);
@@ -1455,9 +1488,7 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
clean_debug_files(dev);
destroy_workqueue(cmd->wq);
destroy_msg_cache(dev);
- dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- free_pages((unsigned long)cmd->cmd_buf, 0);
+ free_cmd_page(dev, cmd);
pci_pool_destroy(cmd->pool);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 43c5f4809526..eb0cf81f5f45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 4878025e231c..5210d92e6bc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index da82991239a8..58800e4f3958 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -208,7 +208,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
- rmb();
+ dma_rmb();
mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
eq->eqn, eqe_type_str(eqe->type));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 06f9036acd83..4b4cda3bcc5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 3e6670c4a7cd..292d76f2a904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
index fd80ecfa7195..ee1b0b965f34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5394a8486558..28425e5ea91f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -48,11 +48,11 @@
#include "mlx5_core.h"
#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "2.2-1"
-#define DRIVER_RELDATE "Feb 2014"
+#define DRIVER_VERSION "3.0"
+#define DRIVER_RELDATE "January 2015"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
+MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
@@ -288,8 +288,6 @@ static void copy_rw_fields(void *to, struct mlx5_caps *from)
MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
@@ -509,6 +507,87 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
return 0;
}
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq *eq, *n;
+ int err = -ENOENT;
+
+ spin_lock(&table->lock);
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ if (eq->index == vector) {
+ *eqn = eq->eqn;
+ *irqn = eq->irqn;
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock(&table->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_vector2eqn);
+
+static void free_comp_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq *eq, *n;
+
+ spin_lock(&table->lock);
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ list_del(&eq->list);
+ spin_unlock(&table->lock);
+ if (mlx5_destroy_unmap_eq(dev, eq))
+ mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
+ eq->eqn);
+ kfree(eq);
+ spin_lock(&table->lock);
+ }
+ spin_unlock(&table->lock);
+}
+
+static int alloc_comp_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ char name[MLX5_MAX_EQ_NAME];
+ struct mlx5_eq *eq;
+ int ncomp_vec;
+ int nent;
+ int err;
+ int i;
+
+ INIT_LIST_HEAD(&table->comp_eqs_list);
+ ncomp_vec = table->num_comp_vectors;
+ nent = MLX5_COMP_EQ_SIZE;
+ for (i = 0; i < ncomp_vec; i++) {
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq) {
+ err = -ENOMEM;
+ goto clean;
+ }
+
+ snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+ err = mlx5_create_map_eq(dev, eq,
+ i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
+ name, &dev->priv.uuari.uars[0]);
+ if (err) {
+ kfree(eq);
+ goto clean;
+ }
+ mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
+ eq->index = i;
+ spin_lock(&table->lock);
+ list_add_tail(&eq->list, &table->comp_eqs_list);
+ spin_unlock(&table->lock);
+ }
+
+ return 0;
+
+clean:
+ free_comp_eqs(dev);
+ return err;
+}
+
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -645,6 +724,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_free_uar;
}
+ err = alloc_comp_eqs(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
+ goto err_stop_eqs;
+ }
+
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
mlx5_init_cq_table(dev);
@@ -654,6 +739,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
+err_stop_eqs:
+ mlx5_stop_eqs(dev);
+
err_free_uar:
mlx5_free_uuars(dev, &priv->uuari);
@@ -697,7 +785,6 @@ err_dbg:
debugfs_remove(priv->dbg_root);
return err;
}
-EXPORT_SYMBOL(mlx5_dev_init);
static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
{
@@ -706,6 +793,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
+ free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
mlx5_eq_cleanup(dev);
@@ -820,6 +908,28 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
}
EXPORT_SYMBOL(mlx5_unregister_interface);
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+ if ((dev_ctx->intf->protocol == protocol) &&
+ dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev_ctx->context);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index 44837640bd7c..d79fd85d1dd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index f0c9f9a7a361..a051b906afdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 184c3615f479..1adb300dd850 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_query_mkey_mbox_out *out, int outlen)
{
- struct mlx5_destroy_mkey_mbox_in in;
+ struct mlx5_query_mkey_mbox_in in;
int err;
memset(&in, 0, sizeof(in));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 4fdaae9b54d9..8a64542abc16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -211,26 +211,28 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
return 0;
}
+#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
+
static void free_4k(struct mlx5_core_dev *dev, u64 addr)
{
struct fw_page *fwp;
int n;
- fwp = find_fw_page(dev, addr & PAGE_MASK);
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
if (!fwp) {
mlx5_core_warn(dev, "page not found\n");
return;
}
- n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
+ n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
fwp->free_count++;
set_bit(n, &fwp->bitmask);
if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1)
list_del(&fwp->list);
- dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
} else if (fwp->free_count == 1) {
@@ -243,8 +245,9 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
struct page *page;
u64 addr;
int err;
+ int nid = dev_to_node(&dev->pdev->dev);
- page = alloc_page(GFP_HIGHUSER);
+ page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
if (!page) {
mlx5_core_warn(dev, "failed to allocate page\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index 790da5c4ca4f..f2d3aee909e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 72c2d002c3b8..49e90f2612d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 575d853dbe05..dc7dbf7e9d98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 38bce93f8314..f9d25dcd03c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 06801d6f595e..5a89bb1d678a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 10988fbf47eb..6f332ebdf3b5 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4144,7 +4144,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
for (i = 0; i < hw->addr_list_size; i++) {
if (ether_addr_equal(hw->address[i], mac_addr)) {
- memset(hw->address[i], 0, ETH_ALEN);
+ eth_zero_addr(hw->address[i]);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
return 0;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 6c72e74fef3e..81d0f1c86d6d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -150,7 +150,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
priv->rx_head = 0;
- /* reset the MAC controler TX/RX desciptor base address */
+ /* reset the MAC controller TX/RX desciptor base address */
writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 1412f5af05ec..2bae50292dcd 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -69,11 +69,7 @@
#include <net/ip.h>
#include <net/tcp.h>
#include <asm/byteorder.h>
-#include <asm/io.h>
#include <asm/processor.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
#include <net/busy_poll.h>
#include "myri10ge_mcp.h"
@@ -242,8 +238,7 @@ struct myri10ge_priv {
unsigned int rdma_tags_available;
int intr_coal_delay;
__be32 __iomem *intr_coal_delay_ptr;
- int mtrr;
- int wc_enabled;
+ int wc_cookie;
int down_cnt;
wait_queue_head_t down_wq;
struct work_struct watchdog_work;
@@ -1905,7 +1900,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
"tx_heartbeat_errors", "tx_window_errors",
/* device-specific stats */
- "tx_boundary", "WC", "irq", "MSI", "MSIX",
+ "tx_boundary", "irq", "MSI", "MSIX",
"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
"serial_number", "watchdog_resets",
#ifdef CONFIG_MYRI10GE_DCA
@@ -1984,7 +1979,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
data[i] = ((u64 *)&link_stats)[i];
data[i++] = (unsigned int)mgp->tx_boundary;
- data[i++] = (unsigned int)mgp->wc_enabled;
data[i++] = (unsigned int)mgp->pdev->irq;
data[i++] = (unsigned int)mgp->msi_enabled;
data[i++] = (unsigned int)mgp->msix_enabled;
@@ -4040,14 +4034,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0);
- mgp->mtrr = -1;
- mgp->wc_enabled = 0;
-#ifdef CONFIG_MTRR
- mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
- MTRR_TYPE_WRCOMB, 1);
- if (mgp->mtrr >= 0)
- mgp->wc_enabled = 1;
-#endif
+ mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
if (mgp->sram == NULL) {
dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
@@ -4146,14 +4133,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_state;
}
if (mgp->msix_enabled)
- dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
+ dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
- (mgp->wc_enabled ? "Enabled" : "Disabled"));
+ (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
else
- dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
+ dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
mgp->msi_enabled ? "MSI" : "xPIC",
pdev->irq, mgp->tx_boundary, mgp->fw_name,
- (mgp->wc_enabled ? "Enabled" : "Disabled"));
+ (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
board_number++;
return 0;
@@ -4175,10 +4162,7 @@ abort_with_ioremap:
iounmap(mgp->sram);
abort_with_mtrr:
-#ifdef CONFIG_MTRR
- if (mgp->mtrr >= 0)
- mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
-#endif
+ arch_phys_wc_del(mgp->wc_cookie);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
@@ -4220,11 +4204,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
pci_restore_state(pdev);
iounmap(mgp->sram);
-
-#ifdef CONFIG_MTRR
- if (mgp->mtrr >= 0)
- mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
-#endif
+ arch_phys_wc_del(mgp->wc_cookie);
myri10ge_free_slices(mgp);
kfree(mgp->msix_vectors);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index a4cdf2f8041a..1e0f72b65459 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -1343,7 +1343,7 @@ static int init_nic(struct s2io_nic *nic)
TX_PA_CFG_IGNORE_L2_ERR;
writeq(val64, &bar0->tx_pa_cfg);
- /* Rx DMA intialization. */
+ /* Rx DMA initialization. */
val64 = 0;
for (i = 0; i < config->rx_ring_num; i++) {
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
@@ -2520,7 +2520,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
ring->dev->name);
if (first_rxdp) {
- wmb();
+ dma_wmb();
first_rxdp->Control_1 |= RXD_OWN_XENA;
}
swstats->mem_alloc_fail_cnt++;
@@ -2634,7 +2634,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
rxdp->Control_2 |= SET_RXD_MARKER;
if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
if (first_rxdp) {
- wmb();
+ dma_wmb();
first_rxdp->Control_1 |= RXD_OWN_XENA;
}
first_rxdp = rxdp;
@@ -2649,7 +2649,7 @@ end:
* and other fields are seen by adapter correctly.
*/
if (first_rxdp) {
- wmb();
+ dma_wmb();
first_rxdp->Control_1 |= RXD_OWN_XENA;
}
@@ -6950,7 +6950,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
}
set_rxd_buffer_size(sp, rxdp, size);
- wmb();
+ dma_wmb();
/* flip the Ownership bit to Hardware */
rxdp->Control_1 |= RXD_OWN_XENA;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index b07d552a27d4..be916eb2f2e7 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -18,6 +18,25 @@
#include "vxge-ethtool.h"
+static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
+ {"\n DRIVER STATISTICS"},
+ {"vpaths_opened"},
+ {"vpath_open_fail_cnt"},
+ {"link_up_cnt"},
+ {"link_down_cnt"},
+ {"tx_frms"},
+ {"tx_errors"},
+ {"tx_bytes"},
+ {"txd_not_free"},
+ {"txd_out_of_desc"},
+ {"rx_frms"},
+ {"rx_errors"},
+ {"rx_bytes"},
+ {"rx_mcast"},
+ {"pci_map_fail_cnt"},
+ {"skb_alloc_fail_cnt"}
+};
+
/**
* vxge_ethtool_sset - Sets different link parameters.
* @dev: device pointer.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
index 6cf3044d7f43..065a2c0429a4 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
@@ -19,25 +19,6 @@
/* Ethtool related variables and Macros. */
static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
-static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
- {"\n DRIVER STATISTICS"},
- {"vpaths_opened"},
- {"vpath_open_fail_cnt"},
- {"link_up_cnt"},
- {"link_down_cnt"},
- {"tx_frms"},
- {"tx_errors"},
- {"tx_bytes"},
- {"txd_not_free"},
- {"txd_out_of_desc"},
- {"rx_frms"},
- {"rx_errors"},
- {"rx_bytes"},
- {"rx_mcast"},
- {"pci_map_fail_cnt"},
- {"skb_alloc_fail_cnt"}
-};
-
#define VXGE_TITLE_LEN 5
#define VXGE_HW_VPATH_STATS_LEN 27
#define VXGE_HW_AGGR_STATS_LEN 13
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index d36599f47af5..7bf9c028d8d7 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1557,7 +1557,7 @@ static int octeon_mgmt_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id octeon_mgmt_match[] = {
+static const struct of_device_id octeon_mgmt_match[] = {
{
.compatible = "cavium,octeon-5750-mix",
},
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 4fe8ea96bd25..f6fcf7450352 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -394,7 +394,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev,
}
/**
- * pch_gbe_set_pauseparam - Set pause paramters
+ * pch_gbe_set_pauseparam - Set pause parameters
* @netdev: Network interface device structure
* @pause: Pause parameters structure
* Returns:
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 319d9d40f922..13d88a6025c8 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -350,7 +350,7 @@ V. Recent Changes
incorrectly defined and corrected (as per Michel Mueller).
02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots
- were available before reseting the tbusy and tx_full flags
+ were available before resetting the tbusy and tx_full flags
(as per Michel Mueller).
03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support.
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 716fc37ada5a..db80eb1c6d4f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -537,7 +537,7 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
u8 null_addr[ETH_ALEN];
int i;
- memset(null_addr, 0, ETH_ALEN);
+ eth_zero_addr(null_addr);
if (netdev->flags & IFF_PROMISC) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 5c4068353f66..7b43a3b4abdc 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
int i, j;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
- spin_lock(&adapter->tx_clean_lock);
+ spin_lock_bh(&adapter->tx_clean_lock);
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
@@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
}
cmd_buf++;
}
- spin_unlock(&adapter->tx_clean_lock);
+ spin_unlock_bh(&adapter->tx_clean_lock);
}
void netxen_free_sw_resources(struct netxen_adapter *adapter)
@@ -1764,7 +1764,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
int done = 0;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
- if (!spin_trylock(&adapter->tx_clean_lock))
+ if (!spin_trylock_bh(&adapter->tx_clean_lock))
return 1;
sw_consumer = tx_ring->sw_consumer;
@@ -1819,7 +1819,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
+ spin_unlock_bh(&adapter->tx_clean_lock);
return done;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index f3346a3779d3..69f828eb42cf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -205,7 +205,7 @@ struct qlcnic_add_rings_mbx_out {
* @phys_addr_{low|high}: DMA address of the transmit buffer
* @cnsmr_index_{low|high}: host consumer index
* @size: legth of transmit buffer ring
- * @intr_id: interrput id
+ * @intr_id: interrupt id
* @src: src of interrupt
*/
struct qlcnic_tx_mbx {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 2bb48d57e7a5..33669c29b341 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -269,7 +269,7 @@ static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
}
QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
- /* Clear gracefull reset bit */
+ /* Clear graceful reset bit */
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
@@ -889,7 +889,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
* @adapter: adapter structure
*
* Device will remain in this state until:
- * Reset request ACK's are recieved from all the functions
+ * Reset request ACK's are received from all the functions
* Wait time exceeds max time limit
*
* Returns: Error code or Success(0)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 3e0f705a4311..75ee9e4ced51 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -818,7 +818,7 @@ int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter,
if (rv)
netdev_err(adapter->netdev,
- "Failed to set Rx coalescing parametrs\n");
+ "Failed to set Rx coalescing parameters\n");
return rv;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index a430a34a4434..367f3976df56 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -507,6 +507,7 @@ static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
+ features = vlan_features_check(skb, features);
return vxlan_features_check(skb, features);
}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 8011ef3e7707..25800a1dedcb 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -460,7 +460,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Set Mac addr %pM\n", addr);
} else {
- memset(zero_mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(zero_mac_addr);
addr = &zero_mac_addr[0];
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Clearing MAC address\n");
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 2c811f66d5ac..6af028d5f9bc 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -41,7 +41,6 @@
#include <linux/skbuff.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
-#include <linux/version.h>
#include "qca_7k.h"
#include "qca_debug.h"
@@ -571,7 +570,7 @@ qcaspi_spi_thread(void *data)
}
/* can only handle other interrupts
- * if sync has occured
+ * if sync has occurred
*/
if (qca->sync == QCASPI_SYNC_READY) {
if (intr_cause & SPI_INT_PKT_AVLBL)
@@ -913,6 +912,8 @@ qca_spi_probe(struct spi_device *spi_device)
qca->spi_dev = spi_device;
qca->legacy_mode = legacy_mode;
+ spi_set_drvdata(spi_device, qcaspi_devs);
+
mac = of_get_mac_address(spi_device->dev.of_node);
if (mac)
@@ -945,8 +946,6 @@ qca_spi_probe(struct spi_device *spi_device)
return -EFAULT;
}
- spi_set_drvdata(spi_device, qcaspi_devs);
-
qcaspi_init_device_debugfs(qca);
return 0;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c70ab40d8698..3df51faf18ae 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6884,7 +6884,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp,
rtl8169_start_xmit(nskb, tp->dev);
} while (segs);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb_checksum_help(skb) < 0)
goto drop;
@@ -6896,7 +6896,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp,
drop:
stats = &tp->dev->stats;
stats->tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
}
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 736d5d1624a1..7fb244f565b2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -52,7 +52,12 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+#define SH_ETH_OFFSET_DEFAULTS \
+ [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
+
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
+ SH_ETH_OFFSET_DEFAULTS,
+
[EDSR] = 0x0000,
[EDMR] = 0x0400,
[EDTRR] = 0x0408,
@@ -132,9 +137,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_POST3] = 0x0078,
[TSU_POST4] = 0x007c,
[TSU_ADRH0] = 0x0100,
- [TSU_ADRL0] = 0x0104,
- [TSU_ADRH31] = 0x01f8,
- [TSU_ADRL31] = 0x01fc,
[TXNLCR0] = 0x0080,
[TXALCR0] = 0x0084,
@@ -151,6 +153,8 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
};
static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
+ SH_ETH_OFFSET_DEFAULTS,
+
[EDSR] = 0x0000,
[EDMR] = 0x0400,
[EDTRR] = 0x0408,
@@ -199,9 +203,6 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADSBSY] = 0x0060,
[TSU_TEN] = 0x0064,
[TSU_ADRH0] = 0x0100,
- [TSU_ADRL0] = 0x0104,
- [TSU_ADRH31] = 0x01f8,
- [TSU_ADRL31] = 0x01fc,
[TXNLCR0] = 0x0080,
[TXALCR0] = 0x0084,
@@ -210,6 +211,8 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
};
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
+ SH_ETH_OFFSET_DEFAULTS,
+
[ECMR] = 0x0300,
[RFLR] = 0x0308,
[ECSR] = 0x0310,
@@ -256,6 +259,8 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
};
static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+ SH_ETH_OFFSET_DEFAULTS,
+
[ECMR] = 0x0100,
[RFLR] = 0x0108,
[ECSR] = 0x0110,
@@ -308,6 +313,8 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
};
static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+ SH_ETH_OFFSET_DEFAULTS,
+
[EDMR] = 0x0000,
[EDTRR] = 0x0004,
[EDRRR] = 0x0008,
@@ -392,8 +399,6 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWALCR1] = 0x00b4,
[TSU_ADRH0] = 0x0100,
- [TSU_ADRL0] = 0x0104,
- [TSU_ADRL31] = 0x01fc,
};
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
@@ -588,6 +593,7 @@ static struct sh_eth_cpu_data sh7757_data = {
.no_ade = 1,
.rpadir = 1,
.rpadir_value = 2 << 16,
+ .rtrate = 1,
};
#define SH_GIGA_ETH_BASE 0xfee00000UL
@@ -1411,6 +1417,9 @@ static int sh_eth_txfree(struct net_device *ndev)
break;
/* TACT bit must be checked before all the following reads */
rmb();
+ netif_info(mdp, tx_done, ndev,
+ "tx entry %d status 0x%08x\n",
+ entry, edmac_to_cpu(mdp, txdesc->status));
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1456,6 +1465,10 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (--boguscnt < 0)
break;
+ netif_info(mdp, rx_status, ndev,
+ "rx entry %d status 0x%08x len %d\n",
+ entry, desc_status, pkt_len);
+
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
@@ -1500,6 +1513,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
netif_receive_skb(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
+ if (desc_status & RD_RFS8)
+ ndev->stats.multicast++;
}
entry = (++mdp->cur_rx) % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
@@ -1542,7 +1557,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
/* fix the values for the next receiving if RDE is set */
- if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
+ if (intr_status & EESR_RDE &&
+ mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
u32 count = (sh_eth_read(ndev, RDFAR) -
sh_eth_read(ndev, RDLAR)) >> 4;
@@ -1929,6 +1945,192 @@ error_exit:
return ret;
}
+/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
+ * version must be bumped as well. Just adding registers up to that
+ * limit is fine, as long as the existing register indices don't
+ * change.
+ */
+#define SH_ETH_REG_DUMP_VERSION 1
+#define SH_ETH_REG_DUMP_MAX_REGS 256
+
+static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_cpu_data *cd = mdp->cd;
+ u32 *valid_map;
+ size_t len;
+
+ BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
+
+ /* Dump starts with a bitmap that tells ethtool which
+ * registers are defined for this chip.
+ */
+ len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
+ if (buf) {
+ valid_map = buf;
+ buf += len;
+ } else {
+ valid_map = NULL;
+ }
+
+ /* Add a register to the dump, if it has a defined offset.
+ * This automatically skips most undefined registers, but for
+ * some it is also necessary to check a capability flag in
+ * struct sh_eth_cpu_data.
+ */
+#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
+#define add_reg_from(reg, read_expr) do { \
+ if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \
+ if (buf) { \
+ mark_reg_valid(reg); \
+ *buf++ = read_expr; \
+ } \
+ ++len; \
+ } \
+ } while (0)
+#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
+#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
+
+ add_reg(EDSR);
+ add_reg(EDMR);
+ add_reg(EDTRR);
+ add_reg(EDRRR);
+ add_reg(EESR);
+ add_reg(EESIPR);
+ add_reg(TDLAR);
+ add_reg(TDFAR);
+ add_reg(TDFXR);
+ add_reg(TDFFR);
+ add_reg(RDLAR);
+ add_reg(RDFAR);
+ add_reg(RDFXR);
+ add_reg(RDFFR);
+ add_reg(TRSCER);
+ add_reg(RMFCR);
+ add_reg(TFTR);
+ add_reg(FDR);
+ add_reg(RMCR);
+ add_reg(TFUCR);
+ add_reg(RFOCR);
+ if (cd->rmiimode)
+ add_reg(RMIIMODE);
+ add_reg(FCFTR);
+ if (cd->rpadir)
+ add_reg(RPADIR);
+ if (!cd->no_trimd)
+ add_reg(TRIMD);
+ add_reg(ECMR);
+ add_reg(ECSR);
+ add_reg(ECSIPR);
+ add_reg(PIR);
+ if (!cd->no_psr)
+ add_reg(PSR);
+ add_reg(RDMLR);
+ add_reg(RFLR);
+ add_reg(IPGR);
+ if (cd->apr)
+ add_reg(APR);
+ if (cd->mpr)
+ add_reg(MPR);
+ add_reg(RFCR);
+ add_reg(RFCF);
+ if (cd->tpauser)
+ add_reg(TPAUSER);
+ add_reg(TPAUSECR);
+ add_reg(GECMR);
+ if (cd->bculr)
+ add_reg(BCULR);
+ add_reg(MAHR);
+ add_reg(MALR);
+ add_reg(TROCR);
+ add_reg(CDCR);
+ add_reg(LCCR);
+ add_reg(CNDCR);
+ add_reg(CEFCR);
+ add_reg(FRECR);
+ add_reg(TSFRCR);
+ add_reg(TLFRCR);
+ add_reg(CERCR);
+ add_reg(CEECR);
+ add_reg(MAFCR);
+ if (cd->rtrate)
+ add_reg(RTRATE);
+ if (cd->hw_crc)
+ add_reg(CSMR);
+ if (cd->select_mii)
+ add_reg(RMII_MII);
+ add_reg(ARSTR);
+ if (cd->tsu) {
+ add_tsu_reg(TSU_CTRST);
+ add_tsu_reg(TSU_FWEN0);
+ add_tsu_reg(TSU_FWEN1);
+ add_tsu_reg(TSU_FCM);
+ add_tsu_reg(TSU_BSYSL0);
+ add_tsu_reg(TSU_BSYSL1);
+ add_tsu_reg(TSU_PRISL0);
+ add_tsu_reg(TSU_PRISL1);
+ add_tsu_reg(TSU_FWSL0);
+ add_tsu_reg(TSU_FWSL1);
+ add_tsu_reg(TSU_FWSLC);
+ add_tsu_reg(TSU_QTAG0);
+ add_tsu_reg(TSU_QTAG1);
+ add_tsu_reg(TSU_QTAGM0);
+ add_tsu_reg(TSU_QTAGM1);
+ add_tsu_reg(TSU_FWSR);
+ add_tsu_reg(TSU_FWINMK);
+ add_tsu_reg(TSU_ADQT0);
+ add_tsu_reg(TSU_ADQT1);
+ add_tsu_reg(TSU_VTAG0);
+ add_tsu_reg(TSU_VTAG1);
+ add_tsu_reg(TSU_ADSBSY);
+ add_tsu_reg(TSU_TEN);
+ add_tsu_reg(TSU_POST1);
+ add_tsu_reg(TSU_POST2);
+ add_tsu_reg(TSU_POST3);
+ add_tsu_reg(TSU_POST4);
+ if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
+ /* This is the start of a table, not just a single
+ * register.
+ */
+ if (buf) {
+ unsigned int i;
+
+ mark_reg_valid(TSU_ADRH0);
+ for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
+ *buf++ = ioread32(
+ mdp->tsu_addr +
+ mdp->reg_offset[TSU_ADRH0] +
+ i * 4);
+ }
+ len += SH_ETH_TSU_CAM_ENTRIES * 2;
+ }
+ }
+
+#undef mark_reg_valid
+#undef add_reg_from
+#undef add_reg
+#undef add_tsu_reg
+
+ return len * 4;
+}
+
+static int sh_eth_get_regs_len(struct net_device *ndev)
+{
+ return __sh_eth_get_regs(ndev, NULL);
+}
+
+static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ regs->version = SH_ETH_REG_DUMP_VERSION;
+
+ pm_runtime_get_sync(&mdp->pdev->dev);
+ __sh_eth_get_regs(ndev, buf);
+ pm_runtime_put_sync(&mdp->pdev->dev);
+}
+
static int sh_eth_nway_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2074,6 +2276,8 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_settings = sh_eth_get_settings,
.set_settings = sh_eth_set_settings,
+ .get_regs_len = sh_eth_get_regs_len,
+ .get_regs = sh_eth_get_regs,
.nway_reset = sh_eth_nway_reset,
.get_msglevel = sh_eth_get_msglevel,
.set_msglevel = sh_eth_set_msglevel,
@@ -2213,6 +2417,22 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
+/* The statistics registers have write-clear behaviour, which means we
+ * will lose any increment between the read and write. We mitigate
+ * this by only clearing when we read a non-zero value, so we will
+ * never falsely report a total of zero.
+ */
+static void
+sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
+{
+ u32 delta = sh_eth_read(ndev, reg);
+
+ if (delta) {
+ *stat += delta;
+ sh_eth_write(ndev, 0, reg);
+ }
+}
+
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2223,21 +2443,18 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
if (!mdp->is_opened)
return &ndev->stats;
- ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
- sh_eth_write(ndev, 0, TROCR); /* (write clear) */
- ndev->stats.collisions += sh_eth_read(ndev, CDCR);
- sh_eth_write(ndev, 0, CDCR); /* (write clear) */
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
- sh_eth_write(ndev, 0, LCCR); /* (write clear) */
+ sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
+ sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
+ sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
if (sh_eth_is_gether(mdp)) {
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
- sh_eth_write(ndev, 0, CERCR); /* (write clear) */
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
- sh_eth_write(ndev, 0, CEECR); /* (write clear) */
+ sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
+ CERCR);
+ sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
+ CEECR);
} else {
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
- sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
+ sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
+ CNDCR);
}
return &ndev->stats;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 259d03f353e1..06dbbe5201cb 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -32,6 +32,10 @@
#define SH_ETH_TSU_CAM_ENTRIES 32
enum {
+ /* IMPORTANT: To keep ethtool register dump working, add new
+ * register names immediately before SH_ETH_MAX_REGISTER_OFFSET.
+ */
+
/* E-DMAC registers */
EDSR = 0,
EDMR,
@@ -131,9 +135,7 @@ enum {
TSU_POST3,
TSU_POST4,
TSU_ADRH0,
- TSU_ADRL0,
- TSU_ADRH31,
- TSU_ADRL31,
+ /* TSU_ADR{H,L}{0..31} are assumed to be contiguous */
TXNLCR0,
TXALCR0,
@@ -491,6 +493,7 @@ struct sh_eth_cpu_data {
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */
unsigned rmiimode:1; /* EtherC has RMIIMODE register */
+ unsigned rtrate:1; /* EtherC has RTRATE register */
};
struct sh_eth_private {
@@ -543,19 +546,29 @@ static inline void sh_eth_soft_swap(char *src, int len)
#endif
}
+#define SH_ETH_OFFSET_INVALID ((u16) ~0)
+
static inline void sh_eth_write(struct net_device *ndev, u32 data,
int enum_index)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 offset = mdp->reg_offset[enum_index];
- iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]);
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return;
+
+ iowrite32(data, mdp->addr + offset);
}
static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return ~0U;
- return ioread32(mdp->addr + mdp->reg_offset[enum_index]);
+ return ioread32(mdp->addr + offset);
}
static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 5cecec282aba..ec251531bd9f 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -30,8 +30,12 @@
#include <linux/if_vlan.h>
#include <linux/if_bridge.h>
#include <linux/bitops.h>
+#include <linux/ctype.h>
#include <net/switchdev.h>
#include <net/rtnetlink.h>
+#include <net/ip_fib.h>
+#include <net/netevent.h>
+#include <net/arp.h>
#include <asm-generic/io-64-nonatomic-lo-hi.h>
#include <generated/utsrelease.h>
@@ -49,12 +53,12 @@ struct rocker_flow_tbl_key {
enum rocker_of_dpa_table_id tbl_id;
union {
struct {
- u32 in_lport;
- u32 in_lport_mask;
+ u32 in_pport;
+ u32 in_pport_mask;
enum rocker_of_dpa_table_id goto_tbl;
} ig_port;
struct {
- u32 in_lport;
+ u32 in_pport;
__be16 vlan_id;
__be16 vlan_id_mask;
enum rocker_of_dpa_table_id goto_tbl;
@@ -62,8 +66,8 @@ struct rocker_flow_tbl_key {
__be16 new_vlan_id;
} vlan;
struct {
- u32 in_lport;
- u32 in_lport_mask;
+ u32 in_pport;
+ u32 in_pport_mask;
__be16 eth_type;
u8 eth_dst[ETH_ALEN];
u8 eth_dst_mask[ETH_ALEN];
@@ -91,8 +95,8 @@ struct rocker_flow_tbl_key {
bool copy_to_cpu;
} bridge;
struct {
- u32 in_lport;
- u32 in_lport_mask;
+ u32 in_pport;
+ u32 in_pport_mask;
u8 eth_src[ETH_ALEN];
u8 eth_src_mask[ETH_ALEN];
u8 eth_dst[ETH_ALEN];
@@ -111,9 +115,10 @@ struct rocker_flow_tbl_key {
struct rocker_flow_tbl_entry {
struct hlist_node entry;
- u32 ref_count;
+ u32 cmd;
u64 cookie;
struct rocker_flow_tbl_key key;
+ size_t key_len;
u32 key_crc32; /* key */
};
@@ -148,7 +153,7 @@ struct rocker_fdb_tbl_entry {
u32 key_crc32; /* key */
bool learned;
struct rocker_fdb_tbl_key {
- u32 lport;
+ u32 pport;
u8 addr[ETH_ALEN];
__be16 vlan_id;
} key;
@@ -161,6 +166,16 @@ struct rocker_internal_vlan_tbl_entry {
__be16 vlan_id;
};
+struct rocker_neigh_tbl_entry {
+ struct hlist_node entry;
+ __be32 ip_addr; /* key */
+ struct net_device *dev;
+ u32 ref_count;
+ u32 index;
+ u8 eth_dst[ETH_ALEN];
+ bool ttl_check;
+};
+
struct rocker_desc_info {
char *data; /* mapped */
size_t data_size;
@@ -200,7 +215,7 @@ struct rocker_port {
struct net_device *bridge_dev;
struct rocker *rocker;
unsigned int port_number;
- u32 lport;
+ u32 pport;
__be16 internal_vlan_id;
int stp_state;
u32 brport_flags;
@@ -234,6 +249,9 @@ struct rocker {
unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
DECLARE_HASHTABLE(internal_vlan_tbl, 8);
spinlock_t internal_vlan_tbl_lock;
+ DECLARE_HASHTABLE(neigh_tbl, 16);
+ spinlock_t neigh_tbl_lock;
+ u32 neigh_tbl_next_index;
};
static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
@@ -256,7 +274,6 @@ enum {
ROCKER_PRIORITY_VLAN = 1,
ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
- ROCKER_PRIORITY_UNICAST_ROUTING = 1,
ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
ROCKER_PRIORITY_BRIDGING_VLAN = 3,
@@ -789,7 +806,30 @@ static u32 __pos_inc(u32 pos, size_t limit)
static int rocker_desc_err(struct rocker_desc_info *desc_info)
{
- return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN);
+ int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
+
+ switch (err) {
+ case ROCKER_OK:
+ return 0;
+ case -ROCKER_ENOENT:
+ return -ENOENT;
+ case -ROCKER_ENXIO:
+ return -ENXIO;
+ case -ROCKER_ENOMEM:
+ return -ENOMEM;
+ case -ROCKER_EEXIST:
+ return -EEXIST;
+ case -ROCKER_EINVAL:
+ return -EINVAL;
+ case -ROCKER_EMSGSIZE:
+ return -EMSGSIZE;
+ case -ROCKER_ENOTSUP:
+ return -EOPNOTSUPP;
+ case -ROCKER_ENOBUFS:
+ return -ENOBUFS;
+ }
+
+ return -EINVAL;
}
static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
@@ -1257,9 +1297,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
if (enable)
- val |= 1ULL << rocker_port->lport;
+ val |= 1ULL << rocker_port->pport;
else
- val &= ~(1ULL << rocker_port->lport);
+ val &= ~(1ULL << rocker_port->pport);
rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
}
@@ -1312,11 +1352,11 @@ static int rocker_event_link_change(struct rocker *rocker,
struct rocker_port *rocker_port;
rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
- if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] ||
+ if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
return -EIO;
port_number =
- rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1;
+ rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
if (port_number >= rocker->port_count)
@@ -1353,12 +1393,12 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker,
__be16 vlan_id;
rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
- if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] ||
+ if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
!attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
!attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
return -EIO;
port_number =
- rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1;
+ rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
@@ -1517,8 +1557,8 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker,
cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
if (!cmd_info)
return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
- rocker_port->lport))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
return -EMSGSIZE;
rocker_tlv_nest_end(desc_info, cmd_info);
return 0;
@@ -1591,6 +1631,53 @@ rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
return 0;
}
+struct port_name {
+ char *buf;
+ size_t len;
+};
+
+static int
+rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ struct port_name *name = priv;
+ struct rocker_tlv *attr;
+ size_t i, j, len;
+ char *str;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+ attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
+ if (!attr)
+ return -EIO;
+
+ len = min_t(size_t, rocker_tlv_len(attr), name->len);
+ str = rocker_tlv_data(attr);
+
+ /* make sure name only contains alphanumeric characters */
+ for (i = j = 0; i < len; ++i) {
+ if (isalnum(str[i])) {
+ name->buf[j] = str[i];
+ j++;
+ }
+ }
+
+ if (j == 0)
+ return -EIO;
+
+ name->buf[j] = '\0';
+
+ return 0;
+}
+
static int
rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
struct rocker_port *rocker_port,
@@ -1606,8 +1693,8 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
if (!cmd_info)
return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
- rocker_port->lport))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
return -EMSGSIZE;
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
ethtool_cmd_speed(ecmd)))
@@ -1637,8 +1724,8 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
if (!cmd_info)
return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
- rocker_port->lport))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
return -EMSGSIZE;
if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
ETH_ALEN, macaddr))
@@ -1661,8 +1748,8 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker,
cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
if (!cmd_info)
return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
- rocker_port->lport))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
return -EMSGSIZE;
if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
!!(rocker_port->brport_flags & BR_LEARNING)))
@@ -1715,11 +1802,11 @@ static int rocker_port_set_learning(struct rocker_port *rocker_port)
static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
struct rocker_flow_tbl_entry *entry)
{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
- entry->key.ig_port.in_lport))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+ entry->key.ig_port.in_pport))
return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
- entry->key.ig_port.in_lport_mask))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
+ entry->key.ig_port.in_pport_mask))
return -EMSGSIZE;
if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
entry->key.ig_port.goto_tbl))
@@ -1731,8 +1818,8 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
struct rocker_flow_tbl_entry *entry)
{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
- entry->key.vlan.in_lport))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+ entry->key.vlan.in_pport))
return -EMSGSIZE;
if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
entry->key.vlan.vlan_id))
@@ -1754,11 +1841,11 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
struct rocker_flow_tbl_entry *entry)
{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
- entry->key.term_mac.in_lport))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+ entry->key.term_mac.in_pport))
return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
- entry->key.term_mac.in_lport_mask))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
+ entry->key.term_mac.in_pport_mask))
return -EMSGSIZE;
if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
entry->key.term_mac.eth_type))
@@ -1845,11 +1932,11 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
struct rocker_flow_tbl_entry *entry)
{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
- entry->key.acl.in_lport))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+ entry->key.acl.in_pport))
return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
- entry->key.acl.in_lport_mask))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
+ entry->key.acl.in_pport_mask))
return -EMSGSIZE;
if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
ETH_ALEN, entry->key.acl.eth_src))
@@ -1917,8 +2004,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
struct rocker_tlv *cmd_info;
int err = 0;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
- ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
return -EMSGSIZE;
cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
if (!cmd_info)
@@ -1975,8 +2061,7 @@ static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
const struct rocker_flow_tbl_entry *entry = priv;
struct rocker_tlv *cmd_info;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
- ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
return -EMSGSIZE;
cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
if (!cmd_info)
@@ -1993,7 +2078,7 @@ static int
rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
struct rocker_group_tbl_entry *entry)
{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT,
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
ROCKER_GROUP_PORT_GET(entry->group_id)))
return -EMSGSIZE;
if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
@@ -2145,9 +2230,9 @@ static int rocker_cmd_group_tbl_del(struct rocker *rocker,
return 0;
}
-/*****************************************
- * Flow, group, FDB, internal VLAN tables
- *****************************************/
+/***************************************************
+ * Flow, group, FDB, internal VLAN and neigh tables
+ ***************************************************/
static int rocker_init_tbls(struct rocker *rocker)
{
@@ -2163,6 +2248,9 @@ static int rocker_init_tbls(struct rocker *rocker)
hash_init(rocker->internal_vlan_tbl);
spin_lock_init(&rocker->internal_vlan_tbl_lock);
+ hash_init(rocker->neigh_tbl);
+ spin_lock_init(&rocker->neigh_tbl_lock);
+
return 0;
}
@@ -2173,6 +2261,7 @@ static void rocker_free_tbls(struct rocker *rocker)
struct rocker_group_tbl_entry *group_entry;
struct rocker_fdb_tbl_entry *fdb_entry;
struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
+ struct rocker_neigh_tbl_entry *neigh_entry;
struct hlist_node *tmp;
int bkt;
@@ -2196,16 +2285,22 @@ static void rocker_free_tbls(struct rocker *rocker)
tmp, internal_vlan_entry, entry)
hash_del(&internal_vlan_entry->entry);
spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
+
+ spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
+ hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
+ hash_del(&neigh_entry->entry);
+ spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
}
static struct rocker_flow_tbl_entry *
rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
{
struct rocker_flow_tbl_entry *found;
+ size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
hash_for_each_possible(rocker->flow_tbl, found,
entry, match->key_crc32) {
- if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+ if (memcmp(&found->key, &match->key, key_len) == 0)
return found;
}
@@ -2218,42 +2313,34 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
+ size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
unsigned long flags;
- bool add_to_hw = false;
- int err = 0;
- match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+ match->key_crc32 = crc32(~0, &match->key, key_len);
spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
found = rocker_flow_tbl_find(rocker, match);
if (found) {
- kfree(match);
+ match->cookie = found->cookie;
+ hash_del(&found->entry);
+ kfree(found);
+ found = match;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
} else {
found = match;
found->cookie = rocker->flow_tbl_next_cookie++;
- hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
- add_to_hw = true;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
}
- found->ref_count++;
+ hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
- if (add_to_hw) {
- err = rocker_cmd_exec(rocker, rocker_port,
- rocker_cmd_flow_tbl_add,
- found, NULL, NULL, nowait);
- if (err) {
- spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
- hash_del(&found->entry);
- spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
- kfree(found);
- }
- }
-
- return err;
+ return rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_flow_tbl_add,
+ found, NULL, NULL, nowait);
}
static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
@@ -2262,29 +2349,26 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
+ size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
unsigned long flags;
- bool del_from_hw = false;
int err = 0;
- match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+ match->key_crc32 = crc32(~0, &match->key, key_len);
spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
found = rocker_flow_tbl_find(rocker, match);
if (found) {
- found->ref_count--;
- if (found->ref_count == 0) {
- hash_del(&found->entry);
- del_from_hw = true;
- }
+ hash_del(&found->entry);
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
}
spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
kfree(match);
- if (del_from_hw) {
+ if (found) {
err = rocker_cmd_exec(rocker, rocker_port,
rocker_cmd_flow_tbl_del,
found, NULL, NULL, nowait);
@@ -2311,7 +2395,7 @@ static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
- int flags, u32 in_lport, u32 in_lport_mask,
+ int flags, u32 in_pport, u32 in_pport_mask,
enum rocker_of_dpa_table_id goto_tbl)
{
struct rocker_flow_tbl_entry *entry;
@@ -2322,15 +2406,15 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
entry->key.priority = ROCKER_PRIORITY_IG_PORT;
entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
- entry->key.ig_port.in_lport = in_lport;
- entry->key.ig_port.in_lport_mask = in_lport_mask;
+ entry->key.ig_port.in_pport = in_pport;
+ entry->key.ig_port.in_pport_mask = in_pport_mask;
entry->key.ig_port.goto_tbl = goto_tbl;
return rocker_flow_tbl_do(rocker_port, flags, entry);
}
static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
- int flags, u32 in_lport,
+ int flags, u32 in_pport,
__be16 vlan_id, __be16 vlan_id_mask,
enum rocker_of_dpa_table_id goto_tbl,
bool untagged, __be16 new_vlan_id)
@@ -2343,7 +2427,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
entry->key.priority = ROCKER_PRIORITY_VLAN;
entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
- entry->key.vlan.in_lport = in_lport;
+ entry->key.vlan.in_pport = in_pport;
entry->key.vlan.vlan_id = vlan_id;
entry->key.vlan.vlan_id_mask = vlan_id_mask;
entry->key.vlan.goto_tbl = goto_tbl;
@@ -2355,7 +2439,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
- u32 in_lport, u32 in_lport_mask,
+ u32 in_pport, u32 in_pport_mask,
__be16 eth_type, const u8 *eth_dst,
const u8 *eth_dst_mask, __be16 vlan_id,
__be16 vlan_id_mask, bool copy_to_cpu,
@@ -2378,8 +2462,8 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
}
entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
- entry->key.term_mac.in_lport = in_lport;
- entry->key.term_mac.in_lport_mask = in_lport_mask;
+ entry->key.term_mac.in_pport = in_pport;
+ entry->key.term_mac.in_pport_mask = in_pport_mask;
entry->key.term_mac.eth_type = eth_type;
ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
@@ -2444,9 +2528,34 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
return rocker_flow_tbl_do(rocker_port, flags, entry);
}
+static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
+ __be16 eth_type, __be32 dst,
+ __be32 dst_mask, u32 priority,
+ enum rocker_of_dpa_table_id goto_tbl,
+ u32 group_id, int flags)
+{
+ struct rocker_flow_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+ entry->key.priority = priority;
+ entry->key.ucast_routing.eth_type = eth_type;
+ entry->key.ucast_routing.dst4 = dst;
+ entry->key.ucast_routing.dst4_mask = dst_mask;
+ entry->key.ucast_routing.goto_tbl = goto_tbl;
+ entry->key.ucast_routing.group_id = group_id;
+ entry->key_len = offsetof(struct rocker_flow_tbl_key,
+ ucast_routing.group_id);
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
- int flags, u32 in_lport,
- u32 in_lport_mask,
+ int flags, u32 in_pport,
+ u32 in_pport_mask,
const u8 *eth_src, const u8 *eth_src_mask,
const u8 *eth_dst, const u8 *eth_dst_mask,
__be16 eth_type,
@@ -2472,8 +2581,8 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
entry->key.priority = priority;
entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
- entry->key.acl.in_lport = in_lport;
- entry->key.acl.in_lport_mask = in_lport_mask;
+ entry->key.acl.in_pport = in_pport;
+ entry->key.acl.in_pport_mask = in_pport_mask;
if (eth_src)
ether_addr_copy(entry->key.acl.eth_src, eth_src);
@@ -2531,7 +2640,6 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
struct rocker *rocker = rocker_port->rocker;
struct rocker_group_tbl_entry *found;
unsigned long flags;
- int err = 0;
spin_lock_irqsave(&rocker->group_tbl_lock, flags);
@@ -2551,12 +2659,9 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
- if (found->cmd)
- err = rocker_cmd_exec(rocker, rocker_port,
- rocker_cmd_group_tbl_add,
- found, NULL, NULL, nowait);
-
- return err;
+ return rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_group_tbl_add,
+ found, NULL, NULL, nowait);
}
static int rocker_group_tbl_del(struct rocker_port *rocker_port,
@@ -2604,7 +2709,7 @@ static int rocker_group_tbl_do(struct rocker_port *rocker_port,
static int rocker_group_l2_interface(struct rocker_port *rocker_port,
int flags, __be16 vlan_id,
- u32 out_lport, int pop_vlan)
+ u32 out_pport, int pop_vlan)
{
struct rocker_group_tbl_entry *entry;
@@ -2612,7 +2717,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port,
if (!entry)
return -ENOMEM;
- entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+ entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
entry->l2_interface.pop_vlan = pop_vlan;
return rocker_group_tbl_do(rocker_port, flags, entry);
@@ -2652,17 +2757,262 @@ static int rocker_group_l2_flood(struct rocker_port *rocker_port,
group_id);
}
+static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
+ int flags, u32 index, u8 *src_mac,
+ u8 *dst_mac, __be16 vlan_id,
+ bool ttl_check, u32 pport)
+{
+ struct rocker_group_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
+ if (src_mac)
+ ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
+ if (dst_mac)
+ ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
+ entry->l3_unicast.vlan_id = vlan_id;
+ entry->l3_unicast.ttl_check = ttl_check;
+ entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
+
+ return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static struct rocker_neigh_tbl_entry *
+ rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
+{
+ struct rocker_neigh_tbl_entry *found;
+
+ hash_for_each_possible(rocker->neigh_tbl, found,
+ entry, be32_to_cpu(ip_addr))
+ if (found->ip_addr == ip_addr)
+ return found;
+
+ return NULL;
+}
+
+static void _rocker_neigh_add(struct rocker *rocker,
+ struct rocker_neigh_tbl_entry *entry)
+{
+ entry->index = rocker->neigh_tbl_next_index++;
+ entry->ref_count++;
+ hash_add(rocker->neigh_tbl, &entry->entry,
+ be32_to_cpu(entry->ip_addr));
+}
+
+static void _rocker_neigh_del(struct rocker *rocker,
+ struct rocker_neigh_tbl_entry *entry)
+{
+ if (--entry->ref_count == 0) {
+ hash_del(&entry->entry);
+ kfree(entry);
+ }
+}
+
+static void _rocker_neigh_update(struct rocker *rocker,
+ struct rocker_neigh_tbl_entry *entry,
+ u8 *eth_dst, bool ttl_check)
+{
+ if (eth_dst) {
+ ether_addr_copy(entry->eth_dst, eth_dst);
+ entry->ttl_check = ttl_check;
+ } else {
+ entry->ref_count++;
+ }
+}
+
+static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
+ int flags, __be32 ip_addr, u8 *eth_dst)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_neigh_tbl_entry *entry;
+ struct rocker_neigh_tbl_entry *found;
+ unsigned long lock_flags;
+ __be16 eth_type = htons(ETH_P_IP);
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 group_id;
+ u32 priority = 0;
+ bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+ bool updating;
+ bool removing;
+ int err = 0;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
+
+ found = rocker_neigh_tbl_find(rocker, ip_addr);
+
+ updating = found && adding;
+ removing = found && !adding;
+ adding = !found && adding;
+
+ if (adding) {
+ entry->ip_addr = ip_addr;
+ entry->dev = rocker_port->dev;
+ ether_addr_copy(entry->eth_dst, eth_dst);
+ entry->ttl_check = true;
+ _rocker_neigh_add(rocker, entry);
+ } else if (removing) {
+ memcpy(entry, found, sizeof(*entry));
+ _rocker_neigh_del(rocker, found);
+ } else if (updating) {
+ _rocker_neigh_update(rocker, found, eth_dst, true);
+ memcpy(entry, found, sizeof(*entry));
+ } else {
+ err = -ENOENT;
+ }
+
+ spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
+
+ if (err)
+ goto err_out;
+
+ /* For each active neighbor, we have an L3 unicast group and
+ * a /32 route to the neighbor, which uses the L3 unicast
+ * group. The L3 unicast group can also be referred to by
+ * other routes' nexthops.
+ */
+
+ err = rocker_group_l3_unicast(rocker_port, flags,
+ entry->index,
+ rocker_port->dev->dev_addr,
+ entry->eth_dst,
+ rocker_port->internal_vlan_id,
+ entry->ttl_check,
+ rocker_port->pport);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) L3 unicast group index %d\n",
+ err, entry->index);
+ goto err_out;
+ }
+
+ if (adding || removing) {
+ group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
+ err = rocker_flow_tbl_ucast4_routing(rocker_port,
+ eth_type, ip_addr,
+ inet_make_mask(32),
+ priority, goto_tbl,
+ group_id, flags);
+
+ if (err)
+ netdev_err(rocker_port->dev,
+ "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
+ err, &entry->ip_addr, group_id);
+ }
+
+err_out:
+ if (!adding)
+ kfree(entry);
+
+ return err;
+}
+
+static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
+ __be32 ip_addr)
+{
+ struct net_device *dev = rocker_port->dev;
+ struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
+ int err = 0;
+
+ if (!n)
+ n = neigh_create(&arp_tbl, &ip_addr, dev);
+ if (!n)
+ return -ENOMEM;
+
+ /* If the neigh is already resolved, then go ahead and
+ * install the entry, otherwise start the ARP process to
+ * resolve the neigh.
+ */
+
+ if (n->nud_state & NUD_VALID)
+ err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
+ else
+ neigh_event_send(n, NULL);
+
+ return err;
+}
+
+static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
+ __be32 ip_addr, u32 *index)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_neigh_tbl_entry *entry;
+ struct rocker_neigh_tbl_entry *found;
+ unsigned long lock_flags;
+ bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+ bool updating;
+ bool removing;
+ bool resolved = true;
+ int err = 0;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
+
+ found = rocker_neigh_tbl_find(rocker, ip_addr);
+ if (found)
+ *index = found->index;
+
+ updating = found && adding;
+ removing = found && !adding;
+ adding = !found && adding;
+
+ if (adding) {
+ entry->ip_addr = ip_addr;
+ entry->dev = rocker_port->dev;
+ _rocker_neigh_add(rocker, entry);
+ *index = entry->index;
+ resolved = false;
+ } else if (removing) {
+ _rocker_neigh_del(rocker, found);
+ } else if (updating) {
+ _rocker_neigh_update(rocker, found, NULL, false);
+ resolved = !is_zero_ether_addr(found->eth_dst);
+ } else {
+ err = -ENOENT;
+ }
+
+ spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
+
+ if (!adding)
+ kfree(entry);
+
+ if (err)
+ return err;
+
+ /* Resolved means neigh ip_addr is resolved to neigh mac. */
+
+ if (!resolved)
+ err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
+
+ return err;
+}
+
static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
int flags, __be16 vlan_id)
{
struct rocker_port *p;
struct rocker *rocker = rocker_port->rocker;
u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
- u32 group_ids[rocker->port_count];
+ u32 *group_ids;
u8 group_count = 0;
- int err;
+ int err = 0;
int i;
+ group_ids = kcalloc(rocker->port_count, sizeof(u32),
+ rocker_op_flags_gfp(flags));
+ if (!group_ids)
+ return -ENOMEM;
+
/* Adjust the flood group for this VLAN. The flood group
* references an L2 interface group for each port in this
* VLAN.
@@ -2674,14 +3024,13 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
continue;
if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
group_ids[group_count++] =
- ROCKER_GROUP_L2_INTERFACE(vlan_id,
- p->lport);
+ ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
}
}
/* If there are no bridged ports in this VLAN, we're done */
if (group_count == 0)
- return 0;
+ goto no_ports_in_vlan;
err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
group_count, group_ids,
@@ -2690,6 +3039,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 flood group\n", err);
+no_ports_in_vlan:
+ kfree(group_ids);
return err;
}
@@ -2700,7 +3051,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
struct rocker *rocker = rocker_port->rocker;
struct rocker_port *p;
bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
- u32 out_lport;
+ u32 out_pport;
int ref = 0;
int err;
int i;
@@ -2711,14 +3062,14 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
if (rocker_port->stp_state == BR_STATE_LEARNING ||
rocker_port->stp_state == BR_STATE_FORWARDING) {
- out_lport = rocker_port->lport;
+ out_pport = rocker_port->pport;
err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_lport,
+ vlan_id, out_pport,
pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
- "Error (%d) port VLAN l2 group for lport %d\n",
- err, out_lport);
+ "Error (%d) port VLAN l2 group for pport %d\n",
+ err, out_pport);
return err;
}
}
@@ -2737,9 +3088,9 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
if ((!adding || ref != 1) && (adding || ref != 0))
return 0;
- out_lport = 0;
+ out_pport = 0;
err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_lport,
+ vlan_id, out_pport,
pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
@@ -2799,9 +3150,9 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
int flags, struct rocker_ctrl *ctrl,
__be16 vlan_id)
{
- u32 in_lport = rocker_port->lport;
- u32 in_lport_mask = 0xffffffff;
- u32 out_lport = 0;
+ u32 in_pport = rocker_port->pport;
+ u32 in_pport_mask = 0xffffffff;
+ u32 out_pport = 0;
u8 *eth_src = NULL;
u8 *eth_src_mask = NULL;
__be16 vlan_id_mask = htons(0xffff);
@@ -2809,11 +3160,11 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
u8 ip_proto_mask = 0;
u8 ip_tos = 0;
u8 ip_tos_mask = 0;
- u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+ u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
int err;
err = rocker_flow_tbl_acl(rocker_port, flags,
- in_lport, in_lport_mask,
+ in_pport, in_pport_mask,
eth_src, eth_src_mask,
ctrl->eth_dst, ctrl->eth_dst_mask,
ctrl->eth_type,
@@ -2856,7 +3207,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
int flags, struct rocker_ctrl *ctrl,
__be16 vlan_id)
{
- u32 in_lport_mask = 0xffffffff;
+ u32 in_pport_mask = 0xffffffff;
__be16 vlan_id_mask = htons(0xffff);
int err;
@@ -2864,7 +3215,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
vlan_id = rocker_port->internal_vlan_id;
err = rocker_flow_tbl_term_mac(rocker_port,
- rocker_port->lport, in_lport_mask,
+ rocker_port->pport, in_pport_mask,
ctrl->eth_type, ctrl->eth_dst,
ctrl->eth_dst_mask, vlan_id,
vlan_id_mask, ctrl->copy_to_cpu,
@@ -2934,7 +3285,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
{
enum rocker_of_dpa_table_id goto_tbl =
ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
- u32 in_lport = rocker_port->lport;
+ u32 in_pport = rocker_port->pport;
__be16 vlan_id = htons(vid);
__be16 vlan_id_mask = htons(0xffff);
__be16 internal_vlan_id;
@@ -2978,7 +3329,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
}
err = rocker_flow_tbl_vlan(rocker_port, flags,
- in_lport, vlan_id, vlan_id_mask,
+ in_pport, vlan_id, vlan_id_mask,
goto_tbl, untagged, internal_vlan_id);
if (err)
netdev_err(rocker_port->dev,
@@ -2990,20 +3341,20 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
{
enum rocker_of_dpa_table_id goto_tbl;
- u32 in_lport;
- u32 in_lport_mask;
+ u32 in_pport;
+ u32 in_pport_mask;
int err;
/* Normal Ethernet Frames. Matches pkts from any local physical
* ports. Goto VLAN tbl.
*/
- in_lport = 0;
- in_lport_mask = 0xffff0000;
+ in_pport = 0;
+ in_pport_mask = 0xffff0000;
goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
err = rocker_flow_tbl_ig_port(rocker_port, flags,
- in_lport, in_lport_mask,
+ in_pport, in_pport_mask,
goto_tbl);
if (err)
netdev_err(rocker_port->dev,
@@ -3047,7 +3398,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
struct rocker_fdb_learn_work *lw;
enum rocker_of_dpa_table_id goto_tbl =
ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
- u32 out_lport = rocker_port->lport;
+ u32 out_pport = rocker_port->pport;
u32 tunnel_id = 0;
u32 group_id = ROCKER_GROUP_NONE;
bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
@@ -3055,7 +3406,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
int err;
if (rocker_port_is_bridged(rocker_port))
- group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+ group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
@@ -3114,7 +3465,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
return -ENOMEM;
fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
- fdb->key.lport = rocker_port->lport;
+ fdb->key.pport = rocker_port->pport;
ether_addr_copy(fdb->key.addr, addr);
fdb->key.vlan_id = vlan_id;
fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
@@ -3161,7 +3512,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.lport != rocker_port->lport)
+ if (found->key.pport != rocker_port->pport)
continue;
if (!found->learned)
continue;
@@ -3182,7 +3533,7 @@ err_out:
static int rocker_port_router_mac(struct rocker_port *rocker_port,
int flags, __be16 vlan_id)
{
- u32 in_lport_mask = 0xffffffff;
+ u32 in_pport_mask = 0xffffffff;
__be16 eth_type;
const u8 *dst_mac_mask = ff_mac;
__be16 vlan_id_mask = htons(0xffff);
@@ -3194,7 +3545,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
eth_type = htons(ETH_P_IP);
err = rocker_flow_tbl_term_mac(rocker_port,
- rocker_port->lport, in_lport_mask,
+ rocker_port->pport, in_pport_mask,
eth_type, rocker_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
copy_to_cpu, flags);
@@ -3203,7 +3554,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
eth_type = htons(ETH_P_IPV6);
err = rocker_flow_tbl_term_mac(rocker_port,
- rocker_port->lport, in_lport_mask,
+ rocker_port->pport, in_pport_mask,
eth_type, rocker_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
copy_to_cpu, flags);
@@ -3214,7 +3565,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
static int rocker_port_fwding(struct rocker_port *rocker_port)
{
bool pop_vlan;
- u32 out_lport;
+ u32 out_pport;
__be16 vlan_id;
u16 vid;
int flags = ROCKER_OP_FLAG_NOWAIT;
@@ -3231,19 +3582,19 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
rocker_port->stp_state != BR_STATE_FORWARDING)
flags |= ROCKER_OP_FLAG_REMOVE;
- out_lport = rocker_port->lport;
+ out_pport = rocker_port->pport;
for (vid = 1; vid < VLAN_N_VID; vid++) {
if (!test_bit(vid, rocker_port->vlan_bitmap))
continue;
vlan_id = htons(vid);
pop_vlan = rocker_vlan_id_is_internal(vlan_id);
err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_lport,
+ vlan_id, out_pport,
pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
- "Error (%d) port VLAN l2 group for lport %d\n",
- err, out_lport);
+ "Error (%d) port VLAN l2 group for pport %d\n",
+ err, out_pport);
return err;
}
}
@@ -3302,6 +3653,26 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
return rocker_port_fwding(rocker_port);
}
+static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
+{
+ if (rocker_port_is_bridged(rocker_port))
+ /* bridge STP will enable port */
+ return 0;
+
+ /* port is not bridged, so simulate going to FORWARDING state */
+ return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
+}
+
+static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
+{
+ if (rocker_port_is_bridged(rocker_port))
+ /* bridge STP will disable port */
+ return 0;
+
+ /* port is not bridged, so simulate going to DISABLED state */
+ return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+}
+
static struct rocker_internal_vlan_tbl_entry *
rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
{
@@ -3387,6 +3758,51 @@ not_found:
spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
}
+static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
+ int dst_len, struct fib_info *fi, u32 tb_id,
+ int flags)
+{
+ struct fib_nh *nh;
+ __be16 eth_type = htons(ETH_P_IP);
+ __be32 dst_mask = inet_make_mask(dst_len);
+ __be16 internal_vlan_id = rocker_port->internal_vlan_id;
+ u32 priority = fi->fib_priority;
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 group_id;
+ bool nh_on_port;
+ bool has_gw;
+ u32 index;
+ int err;
+
+ /* XXX support ECMP */
+
+ nh = fi->fib_nh;
+ nh_on_port = (fi->fib_dev == rocker_port->dev);
+ has_gw = !!nh->nh_gw;
+
+ if (has_gw && nh_on_port) {
+ err = rocker_port_ipv4_nh(rocker_port, flags,
+ nh->nh_gw, &index);
+ if (err)
+ return err;
+
+ group_id = ROCKER_GROUP_L3_UNICAST(index);
+ } else {
+ /* Send to CPU for processing */
+ group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
+ }
+
+ err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
+ dst_mask, priority, goto_tbl,
+ group_id, flags);
+ if (err)
+ netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
+ err, &dst);
+
+ return err;
+}
+
/*****************
* Net device ops
*****************/
@@ -3394,8 +3810,6 @@ not_found:
static int rocker_port_open(struct net_device *dev)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- u8 stp_state = rocker_port_is_bridged(rocker_port) ?
- BR_STATE_BLOCKING : BR_STATE_FORWARDING;
int err;
err = rocker_port_dma_rings_init(rocker_port);
@@ -3418,9 +3832,9 @@ static int rocker_port_open(struct net_device *dev)
goto err_request_rx_irq;
}
- err = rocker_port_stp_update(rocker_port, stp_state);
+ err = rocker_port_fwd_enable(rocker_port);
if (err)
- goto err_stp_update;
+ goto err_fwd_enable;
napi_enable(&rocker_port->napi_tx);
napi_enable(&rocker_port->napi_rx);
@@ -3428,7 +3842,7 @@ static int rocker_port_open(struct net_device *dev)
netif_start_queue(dev);
return 0;
-err_stp_update:
+err_fwd_enable:
free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
err_request_rx_irq:
free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
@@ -3445,7 +3859,7 @@ static int rocker_port_stop(struct net_device *dev)
rocker_port_set_enable(rocker_port, false);
napi_disable(&rocker_port->napi_rx);
napi_disable(&rocker_port->napi_tx);
- rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+ rocker_port_fwd_disable(rocker_port);
free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
rocker_port_dma_rings_fini(rocker_port);
@@ -3702,7 +4116,7 @@ static int rocker_port_fdb_dump(struct sk_buff *skb,
spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.lport != rocker_port->lport)
+ if (found->key.pport != rocker_port->pport)
continue;
if (idx < cb->args[0])
goto skip;
@@ -3762,32 +4176,30 @@ static int rocker_port_bridge_setlink(struct net_device *dev,
static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask)
+ u32 filter_mask, int nlflags)
{
struct rocker_port *rocker_port = netdev_priv(dev);
u16 mode = BRIDGE_MODE_UNDEF;
u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
- rocker_port->brport_flags, mask);
+ rocker_port->brport_flags, mask,
+ nlflags);
}
-static int rocker_port_switch_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid)
+static int rocker_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- struct rocker *rocker = rocker_port->rocker;
-
- psid->id_len = sizeof(rocker->hw.id);
- memcpy(&psid->id, &rocker->hw.id, psid->id_len);
- return 0;
-}
+ struct port_name name = { .buf = buf, .len = len };
+ int err;
-static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
+ err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_phys_name_proc,
+ &name, false);
- return rocker_port_stp_update(rocker_port, state);
+ return err ? -EOPNOTSUPP : 0;
}
static const struct net_device_ops rocker_port_netdev_ops = {
@@ -3802,8 +4214,61 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_fdb_dump = rocker_port_fdb_dump,
.ndo_bridge_setlink = rocker_port_bridge_setlink,
.ndo_bridge_getlink = rocker_port_bridge_getlink,
- .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get,
- .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update,
+ .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
+};
+
+/********************
+ * swdev interface
+ ********************/
+
+static int rocker_port_swdev_parent_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *psid)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker *rocker = rocker_port->rocker;
+
+ psid->id_len = sizeof(rocker->hw.id);
+ memcpy(&psid->id, &rocker->hw.id, psid->id_len);
+ return 0;
+}
+
+static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ return rocker_port_stp_update(rocker_port, state);
+}
+
+static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev,
+ __be32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type,
+ u32 nlflags, u32 tb_id)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int flags = 0;
+
+ return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
+ fi, tb_id, flags);
+}
+
+static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev,
+ __be32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int flags = ROCKER_OP_FLAG_REMOVE;
+
+ return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
+ fi, tb_id, flags);
+}
+
+static const struct swdev_ops rocker_port_swdev_ops = {
+ .swdev_parent_id_get = rocker_port_swdev_parent_id_get,
+ .swdev_port_stp_update = rocker_port_swdev_port_stp_update,
+ .swdev_fib_ipv4_add = rocker_port_swdev_fib_ipv4_add,
+ .swdev_fib_ipv4_del = rocker_port_swdev_fib_ipv4_del,
};
/********************
@@ -3882,8 +4347,8 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker,
if (!cmd_stats)
return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT,
- rocker_port->lport))
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
+ rocker_port->pport))
return -EMSGSIZE;
rocker_tlv_nest_end(desc_info, cmd_stats);
@@ -3900,7 +4365,7 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
struct rocker_tlv *pattr;
- u32 lport;
+ u32 pport;
u64 *data = priv;
int i;
@@ -3912,11 +4377,11 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
attrs[ROCKER_TLV_CMD_INFO]);
- if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT])
+ if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
return -EIO;
- lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]);
- if (lport != rocker_port->lport)
+ pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
+ if (pport != rocker_port->pport)
return -EIO;
for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
@@ -4104,7 +4569,7 @@ static void rocker_carrier_init(struct rocker_port *rocker_port)
u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
bool link_up;
- link_up = link_status & (1 << rocker_port->lport);
+ link_up = link_status & (1 << rocker_port->pport);
if (link_up)
netif_carrier_on(rocker_port->dev);
else
@@ -4152,20 +4617,22 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port->dev = dev;
rocker_port->rocker = rocker;
rocker_port->port_number = port_number;
- rocker_port->lport = port_number + 1;
+ rocker_port->pport = port_number + 1;
rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
rocker_port_dev_addr_init(rocker, rocker_port);
dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops;
+ dev->swdev_ops = &rocker_port_swdev_ops;
netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
NAPI_POLL_WEIGHT);
netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
NAPI_POLL_WEIGHT);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_SWITCH_OFFLOAD;
+ dev->features |= NETIF_F_NETNS_LOCAL |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_SWITCH_OFFLOAD;
err = register_netdev(dev);
if (err) {
@@ -4293,6 +4760,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
dev_err(&pdev->dev, "invalid PCI region size\n");
+ err = -EINVAL;
goto err_pci_resource_len_check;
}
@@ -4436,9 +4904,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port,
rocker_port->internal_vlan_id =
rocker_port_internal_vlan_id_get(rocker_port,
bridge->ifindex);
- err = rocker_port_vlan(rocker_port, 0, 0);
-
- return err;
+ return rocker_port_vlan(rocker_port, 0, 0);
}
static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
@@ -4458,6 +4924,11 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
rocker_port_internal_vlan_id_get(rocker_port,
rocker_port->dev->ifindex);
err = rocker_port_vlan(rocker_port, 0, 0);
+ if (err)
+ return err;
+
+ if (rocker_port->dev->flags & IFF_UP)
+ err = rocker_port_fwd_enable(rocker_port);
return err;
}
@@ -4509,6 +4980,48 @@ static struct notifier_block rocker_netdevice_nb __read_mostly = {
.notifier_call = rocker_netdevice_event,
};
+/************************************
+ * Net event notifier event handler
+ ************************************/
+
+static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
+ __be32 ip_addr = *(__be32 *)n->primary_key;
+
+ return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
+}
+
+static int rocker_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev;
+ struct neighbour *n = ptr;
+ int err;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ if (n->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+ dev = n->dev;
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+ err = rocker_neigh_update(dev, n);
+ if (err)
+ netdev_warn(dev,
+ "failed to handle neigh update (err %d)\n",
+ err);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_netevent_nb __read_mostly = {
+ .notifier_call = rocker_netevent_event,
+};
+
/***********************
* Module init and exit
***********************/
@@ -4518,18 +5031,21 @@ static int __init rocker_module_init(void)
int err;
register_netdevice_notifier(&rocker_netdevice_nb);
+ register_netevent_notifier(&rocker_netevent_nb);
err = pci_register_driver(&rocker_pci_driver);
if (err)
goto err_pci_register_driver;
return 0;
err_pci_register_driver:
+ unregister_netdevice_notifier(&rocker_netevent_nb);
unregister_netdevice_notifier(&rocker_netdevice_nb);
return err;
}
static void __exit rocker_module_exit(void)
{
+ unregister_netevent_notifier(&rocker_netevent_nb);
unregister_netdevice_notifier(&rocker_netdevice_nb);
pci_unregister_driver(&rocker_pci_driver);
}
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index a5bc432feada..a4e9591d7457 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -14,6 +14,21 @@
#include <linux/types.h>
+/* Return codes */
+enum {
+ ROCKER_OK = 0,
+ ROCKER_ENOENT = 2,
+ ROCKER_ENXIO = 6,
+ ROCKER_ENOMEM = 12,
+ ROCKER_EEXIST = 17,
+ ROCKER_EINVAL = 22,
+ ROCKER_EMSGSIZE = 90,
+ ROCKER_ENOTSUP = 95,
+ ROCKER_ENOBUFS = 105,
+};
+
+#define ROCKER_FP_PORTS_MAX 62
+
#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006
@@ -136,13 +151,14 @@ enum {
enum {
ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC,
- ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */
ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */
ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */
ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */
__ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
@@ -151,7 +167,7 @@ enum {
enum {
ROCKER_TLV_CMD_PORT_STATS_UNSPEC,
- ROCKER_TLV_CMD_PORT_STATS_LPORT, /* u32 */
+ ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */
ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */
ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */
@@ -191,7 +207,7 @@ enum {
enum {
ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC,
- ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */
+ ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */
ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */
__ROCKER_TLV_EVENT_LINK_CHANGED_MAX,
@@ -201,7 +217,7 @@ enum {
enum {
ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC,
- ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */
+ ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */
ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */
ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */
@@ -275,9 +291,9 @@ enum {
ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */
ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */
ROCKER_TLV_OF_DPA_COOKIE, /* u64 */
- ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */
- ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */
- ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */
+ ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */
ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */
ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */
ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */
@@ -291,7 +307,7 @@ enum {
ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */
ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */
ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */
- ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */
ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */
ROCKER_TLV_OF_DPA_DST_MAC, /* binary */
ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index c8a01ee4d25e..413ea14ab91f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -422,11 +422,11 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
/* assign queue number */
tx_ring->queue_no = queue_no;
- /* initalise counters */
+ /* initialise counters */
tx_ring->dirty_tx = 0;
tx_ring->cur_tx = 0;
- /* initalise TX queue lock */
+ /* initialise TX queue lock */
spin_lock_init(&tx_ring->tx_lock);
return 0;
@@ -515,7 +515,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
goto err_free_rx_buffers;
}
- /* initalise counters */
+ /* initialise counters */
rx_ring->cur_rx = 0;
rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
priv->dma_buf_sz = bfsize;
@@ -837,7 +837,7 @@ static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
/* free the skbuffs of the ring */
tx_free_ring_skbufs(tx_ring);
- /* initalise counters */
+ /* initialise counters */
tx_ring->cur_tx = 0;
tx_ring->dirty_tx = 0;
@@ -1176,7 +1176,7 @@ static int sxgbe_open(struct net_device *dev)
if (priv->phydev)
phy_start(priv->phydev);
- /* initalise TX coalesce parameters */
+ /* initialise TX coalesce parameters */
sxgbe_tx_init_coalesce(priv);
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
@@ -1721,7 +1721,7 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
* Description:
* This function is a driver entry point whenever ifconfig command gets
* executed to see device statistics. Statistics are number of
- * bytes sent or received, errors occured etc.
+ * bytes sent or received, errors occurred etc.
* Return value:
* This function returns various statistical information of device.
*/
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 238482495e81..4b00545a3ace 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2523,9 +2523,7 @@ int efx_try_recovery(struct efx_nic *efx)
* schedule a 'recover or reset', leading to this recovery handler.
* Manually call the eeh failure check function.
*/
- struct eeh_dev *eehdev =
- of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
-
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
if (eeh_dev_check_failure(eehdev)) {
/* The EEH mechanisms will handle the error and reset the
* device if necessary.
@@ -3215,7 +3213,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
return status;
}
-/* Fake a successfull reset, which will be performed later in efx_io_resume. */
+/* Fake a successful reset, which will be performed later in efx_io_resume. */
static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
{
struct efx_nic *efx = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 75975328e020..bb89e96a125e 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -645,7 +645,7 @@ static bool efx_check_tx_flush_complete(struct efx_nic *efx)
}
/* Flush all the transmit queues, and continue flushing receive queues until
- * they're all flushed. Wait for the DRAIN events to be recieved so that there
+ * they're all flushed. Wait for the DRAIN events to be received so that there
* are no more RX and TX events left on any channel. */
static int efx_farch_do_flush(struct efx_nic *efx)
{
@@ -1108,7 +1108,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
}
/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
- * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
* the RX queue back to the mask of RX queues in need of flushing.
*/
static void
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index a707fb5ef14c..e028de10e1b7 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -6497,7 +6497,7 @@
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 6b861e3de4b0..a2e9aee05cdd 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -323,9 +323,9 @@ struct efx_ptp_data {
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
-static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts);
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
static int efx_phc_settime(struct ptp_clock_info *ptp,
- const struct timespec *e_ts);
+ const struct timespec64 *e_ts);
static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
@@ -1198,8 +1198,8 @@ static const struct ptp_clock_info efx_phc_clock_info = {
.pps = 1,
.adjfreq = efx_phc_adjfreq,
.adjtime = efx_phc_adjtime,
- .gettime = efx_phc_gettime,
- .settime = efx_phc_settime,
+ .gettime64 = efx_phc_gettime,
+ .settime64 = efx_phc_settime,
.enable = efx_phc_enable,
};
@@ -1837,7 +1837,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
NULL, 0, NULL);
}
-static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
@@ -1859,28 +1859,28 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
kt = ptp_data->nic_to_kernel_time(
MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
- *ts = ktime_to_timespec(kt);
+ *ts = ktime_to_timespec64(kt);
return 0;
}
static int efx_phc_settime(struct ptp_clock_info *ptp,
- const struct timespec *e_ts)
+ const struct timespec64 *e_ts)
{
/* Get the current NIC time, efx_phc_gettime.
* Subtract from the desired time to get the offset
* call efx_phc_adjtime with the offset
*/
int rc;
- struct timespec time_now;
- struct timespec delta;
+ struct timespec64 time_now;
+ struct timespec64 delta;
rc = efx_phc_gettime(ptp, &time_now);
if (rc != 0)
return rc;
- delta = timespec_sub(*e_ts, time_now);
+ delta = timespec64_sub(*e_ts, time_now);
- rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta));
+ rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta));
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 10b6173d557d..b605dfd5c7bc 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -46,7 +46,7 @@ struct efx_loopback_payload {
struct iphdr ip;
struct udphdr udp;
__be16 iteration;
- const char msg[64];
+ char msg[64];
} __packed;
/* Loopback test source MAC address */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 3583f0208a6e..f12c811938d2 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -205,8 +205,7 @@ static int siena_map_reset_flags(u32 *flags)
*/
static void siena_monitor(struct efx_nic *efx)
{
- struct eeh_dev *eehdev =
- of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
eeh_dev_check_failure(eehdev);
}
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index a8bbbad68a88..fe83430796fd 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1067,7 +1067,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
}
/* Copy the list of individual addresses into the vfdi_status.peers
- * array and auxillary pages, protected by %local_lock. Drop that lock
+ * array and auxiliary pages, protected by %local_lock. Drop that lock
* and then broadcast the address list to every VF.
*/
static void efx_siena_sriov_peer_work(struct work_struct *data)
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
index ae044f44936a..f62901d4cae0 100644
--- a/drivers/net/ethernet/sfc/vfdi.h
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -98,7 +98,7 @@ struct vfdi_endpoint {
* @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
* @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
* finalize the SRAM entries.
- * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targetting the given RXQ.
+ * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
* @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
* @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
* from PF and write the initial status.
@@ -148,7 +148,7 @@ enum vfdi_op {
* @u.init_txq.flags: Checksum offload flags.
* @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
* address of each page backing the transmit queue.
- * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting
+ * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
* all traffic at this receive queue.
* @u.mac_filter.flags: MAC filter flags.
* @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 8678e39aba08..630f0b7800e4 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2204,27 +2204,17 @@ static int try_toggle_control_gpio(struct device *dev,
int value, unsigned int nsdelay)
{
struct gpio_desc *gpio = *desc;
- int res;
-
- gpio = devm_gpiod_get_index(dev, name, index);
- if (IS_ERR(gpio)) {
- if (PTR_ERR(gpio) == -ENOENT) {
- *desc = NULL;
- return 0;
- }
+ enum gpiod_flags flags = value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
+ gpio = devm_gpiod_get_index_optional(dev, name, index, flags);
+ if (IS_ERR(gpio))
return PTR_ERR(gpio);
+
+ if (gpio) {
+ if (nsdelay)
+ usleep_range(nsdelay, 2 * nsdelay);
+ gpiod_set_value_cansleep(gpio, value);
}
- res = gpiod_direction_output(gpio, !value);
- if (res) {
- dev_err(dev, "unable to toggle gpio %s: %i\n", name, res);
- devm_gpiod_put(dev, gpio);
- gpio = NULL;
- return res;
- }
- if (nsdelay)
- usleep_range(nsdelay, 2 * nsdelay);
- gpiod_set_value_cansleep(gpio, value);
*desc = gpio;
return 0;
@@ -2248,9 +2238,10 @@ static int smc_drv_probe(struct platform_device *pdev)
const struct of_device_id *match = NULL;
struct smc_local *lp;
struct net_device *ndev;
- struct resource *res, *ires;
+ struct resource *res;
unsigned int __iomem *addr;
unsigned long irq_flags = SMC_IRQ_FLAGS;
+ unsigned long irq_resflags;
int ret;
ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2342,16 +2333,19 @@ static int smc_drv_probe(struct platform_device *pdev)
goto out_free_netdev;
}
- ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!ires) {
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
ret = -ENODEV;
goto out_release_io;
}
-
- ndev->irq = ires->start;
-
- if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
- irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+ /*
+ * If this platform does not specify any special irqflags, or if
+ * the resource supplies a trigger, override the irqflags with
+ * the trigger flags from the resource.
+ */
+ irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+ if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
+ irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
ret = smc_request_attrib(pdev, ndev);
if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 2965c6ae7d6e..959aeeade0c9 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -843,7 +843,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev)
unsigned long flags;
/* Initialise tx packet using broadcast destination address */
- memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN);
+ eth_broadcast_addr(pdata->loopback_tx_pkt);
/* Use incrementing source address */
for (i = 6; i < 12; i++)
@@ -2418,9 +2418,9 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
struct net_device *dev;
struct smsc911x_data *pdata;
struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
- struct resource *res, *irq_res;
+ struct resource *res;
unsigned int intcfg = 0;
- int res_size, irq_flags;
+ int res_size, irq, irq_flags;
int retval;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2434,8 +2434,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
}
res_size = resource_size(res);
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
pr_warn("Could not allocate irq resource\n");
retval = -ENODEV;
goto out_0;
@@ -2455,8 +2455,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pdata = netdev_priv(dev);
- dev->irq = irq_res->start;
- irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
+ dev->irq = irq;
+ irq_flags = irq_get_trigger_type(irq);
pdata->ioaddr = ioremap_nocache(res->start, res_size);
pdata->dev = dev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index cd77289c3cfe..623c6ed8764a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -150,7 +150,7 @@ struct stmmac_extra_stats {
#define MAC_CSR_H_FRQ_MASK 0x20
#define HASH_TABLE_SIZE 64
-#define PAUSE_TIME 0x200
+#define PAUSE_TIME 0xffff
/* Flow Control defines */
#define FLOW_OFF 0
@@ -357,7 +357,8 @@ struct stmmac_dma_ops {
void (*dump_regs) (void __iomem *ioaddr);
/* Set tx/rx threshold in the csr6 register
* An invalid value enables the store-and-forward mode */
- void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
+ void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
+ int rxfifosz);
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index e97074cd5800..5a36bd2c7837 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -91,7 +91,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
STMMAC_RESOURCE_NAME);
if (IS_ERR(dwmac->stmmac_rst)) {
dev_info(dev, "Could not get reset control!\n");
- return -EINVAL;
+ if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dwmac->stmmac_rst = NULL;
}
dwmac->interface = of_get_phy_mode(np);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 64d8f56a9c17..b3fe0575ff6b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -172,6 +172,7 @@ enum inter_frame_gap {
/* GMAC FLOW CTRL defines */
#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */
#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
@@ -246,6 +247,56 @@ enum ttc_control {
#define DMA_CONTROL_FEF 0x00000080
#define DMA_CONTROL_FUF 0x00000040
+/* Receive flow control activation field
+ * RFA field in DMA control register, bits 23,10:9
+ */
+#define DMA_CONTROL_RFA_MASK 0x00800600
+
+/* Receive flow control deactivation field
+ * RFD field in DMA control register, bits 22,12:11
+ */
+#define DMA_CONTROL_RFD_MASK 0x00401800
+
+/* RFD and RFA fields are encoded as follows
+ *
+ * Bit Field
+ * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled)
+ * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled)
+ * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled)
+ * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled)
+ * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled)
+ * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled)
+ * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled)
+ * 1,11 - Reserved
+ *
+ * RFD should always be > RFA for a given FIFO size. RFD == RFA may work,
+ * but packet throughput performance may not be as expected.
+ *
+ * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame
+ * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause
+ * Description).
+ *
+ * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6),
+ * is set to 0. This allows pause frames with a quanta of 0 to be sent
+ * as an XOFF message to the link peer.
+ */
+
+#define RFA_FULL_MINUS_1K 0x00000000
+#define RFA_FULL_MINUS_2K 0x00000200
+#define RFA_FULL_MINUS_3K 0x00000400
+#define RFA_FULL_MINUS_4K 0x00000600
+#define RFA_FULL_MINUS_5K 0x00800000
+#define RFA_FULL_MINUS_6K 0x00800200
+#define RFA_FULL_MINUS_7K 0x00800400
+
+#define RFD_FULL_MINUS_1K 0x00000000
+#define RFD_FULL_MINUS_2K 0x00000800
+#define RFD_FULL_MINUS_3K 0x00001000
+#define RFD_FULL_MINUS_4K 0x00001800
+#define RFD_FULL_MINUS_5K 0x00400000
+#define RFD_FULL_MINUS_6K 0x00400800
+#define RFD_FULL_MINUS_7K 0x00401000
+
enum rtc_control {
DMA_CONTROL_RTC_64 = 0x00000000,
DMA_CONTROL_RTC_32 = 0x00000008,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 0adcf73cf722..371a669d69fd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -201,7 +201,10 @@ static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
void __iomem *ioaddr = hw->pcsr;
- unsigned int flow = 0;
+ /* Set flow such that DZPQ in Mac Register 6 is 0,
+ * and unicast pause detect is enabled.
+ */
+ unsigned int flow = GMAC_FLOW_CTRL_UP;
pr_debug("GMAC Flow-Control:\n");
if (fc & FLOW_RX) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 59d92e811750..0e8937c1184a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -106,8 +106,29 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
return 0;
}
+static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
+{
+ csr6 &= ~DMA_CONTROL_RFA_MASK;
+ csr6 &= ~DMA_CONTROL_RFD_MASK;
+
+ /* Leave flow control disabled if receive fifo size is less than
+ * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full,
+ * and send XON when 2K less than full.
+ */
+ if (rxfifosz < 4096) {
+ csr6 &= ~DMA_CONTROL_EFC;
+ pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n",
+ rxfifosz);
+ } else {
+ csr6 |= DMA_CONTROL_EFC;
+ csr6 |= RFA_FULL_MINUS_1K;
+ csr6 |= RFD_FULL_MINUS_2K;
+ }
+ return csr6;
+}
+
static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode)
+ int rxmode, int rxfifosz)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -153,6 +174,9 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
csr6 |= DMA_CONTROL_RTC_128;
}
+ /* Configure flow control based on rx fifo size */
+ csr6 = dwmac1000_configure_fc(csr6, rxfifosz);
+
writel(csr6, ioaddr + DMA_CONTROL);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 7d1dce9e7ffc..9d0971c1c2ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -72,7 +72,7 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
* control register.
*/
static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode)
+ int rxmode, int rxfifosz)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c0a391983372..2ac9552d1fa3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -97,6 +97,7 @@ struct stmmac_priv {
int wolopts;
int wol_irq;
struct clk *stmmac_clk;
+ struct clk *pclk;
struct reset_control *stmmac_rst;
int clk_csr;
struct timer_list eee_ctrl_timer;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a0ea84fe6519..05c146f718a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -609,7 +609,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
* where, freq_div_ratio = clk_ptp_ref_i/50MHz
* hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
* NOTE: clk_ptp_ref_i should be >= 50MHz to
- * achive 20ns accuracy.
+ * achieve 20ns accuracy.
*
* 2^x * y == (y << x), hence
* 2^32 * 50000000 ==> (50000000 << 32)
@@ -1277,8 +1277,10 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
+ int rxfifosz = priv->plat->rx_fifo_size;
+
if (priv->plat->force_thresh_dma_mode)
- priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
/*
* In case of GMAC, SF mode can be enabled
@@ -1287,10 +1289,12 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* 2) There is no bugged Jumbo frame support
* that needs to not insert csum in the TDES.
*/
- priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
+ rxfifosz);
priv->xstats.threshold = SF_DMA_MODE;
} else
- priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
+ rxfifosz);
}
/**
@@ -1442,6 +1446,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
int status;
+ int rxfifosz = priv->plat->rx_fifo_size;
status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
if (likely((status & handle_rx)) || (status & handle_tx)) {
@@ -1456,10 +1461,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
(tc <= 256)) {
tc += 64;
if (priv->plat->force_thresh_dma_mode)
- priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
+ rxfifosz);
else
priv->hw->dma->dma_mode(priv->ioaddr, tc,
- SF_DMA_MODE);
+ SF_DMA_MODE, rxfifosz);
priv->xstats.threshold = tc;
}
} else if (unlikely(status == tx_hard_error))
@@ -2849,6 +2855,16 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
}
clk_prepare_enable(priv->stmmac_clk);
+ priv->pclk = devm_clk_get(priv->device, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto error_pclk_get;
+ }
+ priv->pclk = NULL;
+ }
+ clk_prepare_enable(priv->pclk);
+
priv->stmmac_rst = devm_reset_control_get(priv->device,
STMMAC_RESOURCE_NAME);
if (IS_ERR(priv->stmmac_rst)) {
@@ -2934,6 +2950,8 @@ error_mdio_register:
error_netdev_register:
netif_napi_del(&priv->napi);
error_hw_init:
+ clk_disable_unprepare(priv->pclk);
+error_pclk_get:
clk_disable_unprepare(priv->stmmac_clk);
error_clk_get:
free_netdev(ndev);
@@ -2958,14 +2976,15 @@ int stmmac_dvr_remove(struct net_device *ndev)
priv->hw->dma->stop_tx(priv->ioaddr);
stmmac_set_mac(priv->ioaddr, false);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI)
- stmmac_mdio_unregister(ndev);
netif_carrier_off(ndev);
unregister_netdev(ndev);
if (priv->stmmac_rst)
reset_control_assert(priv->stmmac_rst);
+ clk_disable_unprepare(priv->pclk);
clk_disable_unprepare(priv->stmmac_clk);
+ if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+ priv->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
free_netdev(ndev);
return 0;
@@ -3011,6 +3030,7 @@ int stmmac_suspend(struct net_device *ndev)
stmmac_set_mac(priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
+ clk_disable(priv->pclk);
clk_disable(priv->stmmac_clk);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -3051,6 +3071,7 @@ int stmmac_resume(struct net_device *ndev)
pinctrl_pm_select_default_state(priv->device);
/* enable the clk prevously disabled */
clk_enable(priv->stmmac_clk);
+ clk_enable(priv->pclk);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f9b42f11950f..68aec5c460db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -23,6 +23,7 @@
*******************************************************************************/
#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
@@ -181,6 +182,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
sizeof(struct stmmac_mdio_bus_data),
GFP_KERNEL);
+ of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
+
+ of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size);
+
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index c5ee79d8a8c5..170a18b61281 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -105,13 +105,12 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
* Description: this function will read the current time from the
* hardware clock and store it in @ts.
*/
-static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
+static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct stmmac_priv *priv =
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
unsigned long flags;
u64 ns;
- u32 reminder;
spin_lock_irqsave(&priv->ptp_lock, flags);
@@ -119,8 +118,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
spin_unlock_irqrestore(&priv->ptp_lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder);
- ts->tv_nsec = reminder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
@@ -135,7 +133,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
* hardware clock.
*/
static int stmmac_set_time(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct stmmac_priv *priv =
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
@@ -168,8 +166,8 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
.pps = 0,
.adjfreq = stmmac_adjust_freq,
.adjtime = stmmac_adjust_time,
- .gettime = stmmac_get_time,
- .settime = stmmac_set_time,
+ .gettime64 = stmmac_get_time,
+ .settime64 = stmmac_set_time,
.enable = stmmac_enable,
};
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index fef5dec2cffe..e23a642357e7 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -718,7 +718,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
cluster_start = curr = (gp->rx_new & ~(4 - 1));
count = 0;
kick = -1;
- wmb();
+ dma_wmb();
while (curr != limit) {
curr = NEXT_RX(curr);
if (++count == 4) {
@@ -1038,7 +1038,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
if (gem_intme(entry))
ctrl |= TXDCTRL_INTME;
txd->buffer = cpu_to_le64(mapping);
- wmb();
+ dma_wmb();
txd->control_word = cpu_to_le64(ctrl);
entry = NEXT_TX(entry);
} else {
@@ -1076,7 +1076,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
txd = &gp->init_block->txd[entry];
txd->buffer = cpu_to_le64(mapping);
- wmb();
+ dma_wmb();
txd->control_word = cpu_to_le64(this_ctrl | len);
if (gem_intme(entry))
@@ -1086,7 +1086,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
}
txd = &gp->init_block->txd[first_entry];
txd->buffer = cpu_to_le64(first_mapping);
- wmb();
+ dma_wmb();
txd->control_word =
cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
}
@@ -1585,7 +1585,7 @@ static void gem_clean_rings(struct gem *gp)
gp->rx_skbs[i] = NULL;
}
rxd->status_word = 0;
- wmb();
+ dma_wmb();
rxd->buffer = 0;
}
@@ -1647,7 +1647,7 @@ static void gem_init_rings(struct gem *gp)
RX_BUF_ALLOC_SIZE(gp),
PCI_DMA_FROMDEVICE);
rxd->buffer = cpu_to_le64(dma_addr);
- wmb();
+ dma_wmb();
rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
skb_reserve(skb, RX_OFFSET);
}
@@ -1656,7 +1656,7 @@ static void gem_init_rings(struct gem *gp)
struct gem_txd *txd = &gb->txd[i];
txd->control_word = 0;
- wmb();
+ dma_wmb();
txd->buffer = 0;
}
wmb();
@@ -2175,7 +2175,7 @@ static int gem_do_start(struct net_device *dev)
}
/* Mark us as attached again if we come from resume(), this has
- * no effect if we weren't detatched and needs to be done now.
+ * no effect if we weren't detached and needs to be done now.
*/
netif_device_attach(dev);
@@ -2794,7 +2794,7 @@ static void gem_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- /* Ensure reset task is truely gone */
+ /* Ensure reset task is truly gone */
cancel_work_sync(&gp->reset_task);
/* Free resources */
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 7a8ca2c7b7df..cf4dcff051d5 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -196,14 +196,14 @@ static u32 sbus_hme_read32(void __iomem *reg)
static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
{
rxd->rx_addr = (__force hme32)addr;
- wmb();
+ dma_wmb();
rxd->rx_flags = (__force hme32)flags;
}
static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
{
txd->tx_addr = (__force hme32)addr;
- wmb();
+ dma_wmb();
txd->tx_flags = (__force hme32)flags;
}
@@ -225,14 +225,14 @@ static u32 pci_hme_read32(void __iomem *reg)
static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
{
rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
- wmb();
+ dma_wmb();
rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
}
static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
{
txd->tx_addr = (__force hme32)cpu_to_le32(addr);
- wmb();
+ dma_wmb();
txd->tx_flags = (__force hme32)cpu_to_le32(flags);
}
@@ -268,12 +268,12 @@ static u32 pci_hme_read_desc32(hme32 *p)
sbus_readl(__reg)
#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
- wmb(); \
+ dma_wmb(); \
(__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
} while(0)
#define hme_write_txd(__hp, __txd, __flags, __addr) \
do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
- wmb(); \
+ dma_wmb(); \
(__txd)->tx_flags = (__force hme32)(u32)(__flags); \
} while(0)
#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
@@ -293,12 +293,12 @@ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
readl(__reg)
#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
- wmb(); \
+ dma_wmb(); \
(__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
} while(0)
#define hme_write_txd(__hp, __txd, __flags, __addr) \
do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
- wmb(); \
+ dma_wmb(); \
(__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
} while(0)
static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 22e0cad1b4b5..53fe200e0b79 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -519,7 +519,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
if (desc->hdr.state != VIO_DESC_READY)
return 1;
- rmb();
+ dma_rmb();
viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
desc->hdr.state, desc->hdr.ack,
@@ -1380,7 +1380,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This has to be a non-SMP write barrier because we are writing
* to memory which is shared with the peer LDOM.
*/
- wmb();
+ dma_wmb();
d->hdr.state = VIO_DESC_READY;
@@ -1395,7 +1395,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
* is marked READY, but start_cons was false.
* If so, vnet_ack() should send out the missed "start" trigger.
*
- * Note that the wmb() above makes sure the cookies et al. are
+ * Note that the dma_wmb() above makes sure the cookies et al. are
* not globally visible before the VIO_DESC_READY, and that the
* stores are ordered correctly by the compiler. The consumer will
* not proceed until the VIO_DESC_READY is visible assuring that
@@ -1411,6 +1411,8 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(err < 0)) {
netdev_info(dev, "TX trigger error %d\n", err);
d->hdr.state = VIO_DESC_FREE;
+ skb = port->tx_bufs[txi].skb;
+ port->tx_bufs[txi].skb = NULL;
dev->stats.tx_carrier_errors++;
goto out_dropped;
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index f6a71092e135..631e0afd07d2 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -88,6 +88,7 @@ config TI_CPTS
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
select TI_CPSW_ALE
+ select TI_DAVINCI_MDIO
depends on OF
depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
---help---
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a1bbaf6352ba..b536b4c82752 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -726,7 +726,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
if (ndev_status && (status >= 0)) {
/* The packet received is for the interface which
* is already down and the other interface is up
- * and running, intead of freeing which results
+ * and running, instead of freeing which results
* in reducing of the number of rx descriptor in
* DMA engine, requeue skb back to cpdma.
*/
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index fbe42cb107ec..85a55b4ff8c0 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -167,10 +167,9 @@ static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
- u32 remainder;
unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
@@ -178,21 +177,19 @@ static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
ns = timecounter_read(&cpts->tc);
spin_unlock_irqrestore(&cpts->lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
static int cpts_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
+ ns = timespec64_to_ns(ts);
spin_lock_irqsave(&cpts->lock, flags);
timecounter_init(&cpts->tc, &cpts->cc, ns);
@@ -216,20 +213,20 @@ static struct ptp_clock_info cpts_info = {
.pps = 0,
.adjfreq = cpts_ptp_adjfreq,
.adjtime = cpts_ptp_adjtime,
- .gettime = cpts_ptp_gettime,
- .settime = cpts_ptp_settime,
+ .gettime64 = cpts_ptp_gettime,
+ .settime64 = cpts_ptp_settime,
.enable = cpts_ptp_enable,
};
static void cpts_overflow_check(struct work_struct *work)
{
- struct timespec ts;
+ struct timespec64 ts;
struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
cpts_ptp_gettime(&cpts->info, &ts);
- pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
}
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index 906e9bc412f5..bbacf5cccec2 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -41,7 +41,10 @@ struct netcp_tx_pipe {
struct netcp_device *netcp_device;
void *dma_queue;
unsigned int dma_queue_id;
- u8 dma_psflags;
+ /* To port for packet forwarded to switch. Used only by ethss */
+ u8 switch_to_port;
+#define SWITCH_TO_PORT_IN_TAGINFO BIT(0)
+ u8 flags;
void *dma_channel;
const char *dma_chan_name;
};
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index a31a8c3c8e7c..43efc3a0cda5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1098,9 +1098,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
struct netcp_tx_pipe *tx_pipe = NULL;
struct netcp_hook_list *tx_hook;
struct netcp_packet p_info;
- u32 packet_info = 0;
unsigned int dma_sz;
dma_addr_t dma;
+ u32 tmp = 0;
int ret = 0;
p_info.netcp = netcp;
@@ -1140,20 +1140,27 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
p_info.psdata_len);
set_words(psdata, p_info.psdata_len, psdata);
- packet_info |=
- (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
+ tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
KNAV_DMA_DESC_PSLEN_SHIFT;
}
- packet_info |= KNAV_DMA_DESC_HAS_EPIB |
+ tmp |= KNAV_DMA_DESC_HAS_EPIB |
((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
- KNAV_DMA_DESC_RETQ_SHIFT) |
- ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) <<
- KNAV_DMA_DESC_PSFLAG_SHIFT);
+ KNAV_DMA_DESC_RETQ_SHIFT);
- set_words(&packet_info, 1, &desc->packet_info);
+ if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) {
+ tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) <<
+ KNAV_DMA_DESC_PSFLAG_SHIFT);
+ }
+
+ set_words(&tmp, 1, &desc->packet_info);
set_words((u32 *)&skb, 1, &desc->pad[0]);
+ if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
+ tmp = tx_pipe->switch_to_port;
+ set_words((u32 *)&tmp, 1, &desc->tag_info);
+ }
+
/* submit packet descriptor */
ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
&dma_sz);
@@ -1320,7 +1327,7 @@ static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
if (addr)
ether_addr_copy(naddr->addr, addr);
else
- memset(naddr->addr, 0, ETH_ALEN);
+ eth_zero_addr(naddr->addr);
list_add_tail(&naddr->node, &netcp->addr_list);
return naddr;
@@ -2127,7 +2134,7 @@ static int netcp_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id of_match[] = {
+static const struct of_device_id of_match[] = {
{ .compatible = "ti,netcp-1.0", },
{},
};
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 84f5ce525750..9b7e0a34c98b 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -40,35 +40,61 @@
#define GBE_MODULE_NAME "netcp-gbe"
#define GBE_SS_VERSION_14 0x4ed21104
+#define GBE_SS_REG_INDEX 0
+#define GBE_SGMII34_REG_INDEX 1
+#define GBE_SM_REG_INDEX 2
+/* offset relative to base of GBE_SS_REG_INDEX */
#define GBE13_SGMII_MODULE_OFFSET 0x100
-#define GBE13_SGMII34_MODULE_OFFSET 0x400
-#define GBE13_SWITCH_MODULE_OFFSET 0x800
-#define GBE13_HOST_PORT_OFFSET 0x834
-#define GBE13_SLAVE_PORT_OFFSET 0x860
-#define GBE13_EMAC_OFFSET 0x900
-#define GBE13_SLAVE_PORT2_OFFSET 0xa00
-#define GBE13_HW_STATS_OFFSET 0xb00
-#define GBE13_ALE_OFFSET 0xe00
+/* offset relative to base of GBE_SM_REG_INDEX */
+#define GBE13_HOST_PORT_OFFSET 0x34
+#define GBE13_SLAVE_PORT_OFFSET 0x60
+#define GBE13_EMAC_OFFSET 0x100
+#define GBE13_SLAVE_PORT2_OFFSET 0x200
+#define GBE13_HW_STATS_OFFSET 0x300
+#define GBE13_ALE_OFFSET 0x600
#define GBE13_HOST_PORT_NUM 0
-#define GBE13_NUM_SLAVES 4
-#define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1)
#define GBE13_NUM_ALE_ENTRIES 1024
+/* 1G Ethernet NU SS defines */
+#define GBENU_MODULE_NAME "netcp-gbenu"
+#define GBE_SS_ID_NU 0x4ee6
+#define GBE_SS_ID_2U 0x4ee8
+
+#define IS_SS_ID_MU(d) \
+ ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
+ (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
+
+#define IS_SS_ID_NU(d) \
+ (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
+
+#define GBENU_SS_REG_INDEX 0
+#define GBENU_SM_REG_INDEX 1
+#define GBENU_SGMII_MODULE_OFFSET 0x100
+#define GBENU_HOST_PORT_OFFSET 0x1000
+#define GBENU_SLAVE_PORT_OFFSET 0x2000
+#define GBENU_EMAC_OFFSET 0x2330
+#define GBENU_HW_STATS_OFFSET 0x1a000
+#define GBENU_ALE_OFFSET 0x1e000
+#define GBENU_HOST_PORT_NUM 0
+#define GBENU_NUM_ALE_ENTRIES 1024
+
/* 10G Ethernet SS defines */
#define XGBE_MODULE_NAME "netcp-xgbe"
#define XGBE_SS_VERSION_10 0x4ee42100
-#define XGBE_SERDES_REG_INDEX 1
+#define XGBE_SS_REG_INDEX 0
+#define XGBE_SM_REG_INDEX 1
+#define XGBE_SERDES_REG_INDEX 2
+
+/* offset relative to base of XGBE_SS_REG_INDEX */
#define XGBE10_SGMII_MODULE_OFFSET 0x100
-#define XGBE10_SWITCH_MODULE_OFFSET 0x1000
-#define XGBE10_HOST_PORT_OFFSET 0x1034
-#define XGBE10_SLAVE_PORT_OFFSET 0x1064
-#define XGBE10_EMAC_OFFSET 0x1400
-#define XGBE10_ALE_OFFSET 0x1700
-#define XGBE10_HW_STATS_OFFSET 0x1800
+/* offset relative to base of XGBE_SM_REG_INDEX */
+#define XGBE10_HOST_PORT_OFFSET 0x34
+#define XGBE10_SLAVE_PORT_OFFSET 0x64
+#define XGBE10_EMAC_OFFSET 0x400
+#define XGBE10_ALE_OFFSET 0x700
+#define XGBE10_HW_STATS_OFFSET 0x800
#define XGBE10_HOST_PORT_NUM 0
-#define XGBE10_NUM_SLAVES 2
-#define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1)
#define XGBE10_NUM_ALE_ENTRIES 1024
#define GBE_TIMER_INTERVAL (HZ / 2)
@@ -88,7 +114,7 @@
#define MACSL_FULLDUPLEX BIT(0)
#define GBE_CTL_P0_ENABLE BIT(2)
-#define GBE_REG_VAL_STAT_ENABLE_ALL 0xff
+#define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
#define GBE_STATS_CD_SEL BIT(28)
@@ -108,11 +134,20 @@
#define GBE_STATSC_MODULE 2
#define GBE_STATSD_MODULE 3
+#define GBENU_STATS0_MODULE 0
+#define GBENU_STATS1_MODULE 1
+#define GBENU_STATS2_MODULE 2
+#define GBENU_STATS3_MODULE 3
+#define GBENU_STATS4_MODULE 4
+#define GBENU_STATS5_MODULE 5
+#define GBENU_STATS6_MODULE 6
+#define GBENU_STATS7_MODULE 7
+#define GBENU_STATS8_MODULE 8
+
#define XGBE_STATS0_MODULE 0
#define XGBE_STATS1_MODULE 1
#define XGBE_STATS2_MODULE 2
-#define MAX_SLAVES GBE13_NUM_SLAVES
/* s: 0-based slave_port */
#define SGMII_BASE(s) \
(((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
@@ -125,10 +160,14 @@
#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
offsetof(struct gbe##_##rb, rn)
+#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+ offsetof(struct gbenu##_##rb, rn)
#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
offsetof(struct xgbe##_##rb, rn)
#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
+#define HOST_TX_PRI_MAP_DEFAULT 0x00000000
+
struct xgbe_ss_regs {
u32 id_ver;
u32 synce_count;
@@ -258,6 +297,192 @@ struct xgbe_hw_stats {
#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
+struct gbenu_ss_regs {
+ u32 id_ver;
+ u32 synce_count; /* NU */
+ u32 synce_mux; /* NU */
+ u32 control; /* 2U */
+ u32 __rsvd_0[2]; /* 2U */
+ u32 rgmii_status; /* 2U */
+ u32 ss_status; /* 2U */
+};
+
+struct gbenu_switch_regs {
+ u32 id_ver;
+ u32 control;
+ u32 __rsvd_0[2];
+ u32 emcontrol;
+ u32 stat_port_en;
+ u32 ptype; /* NU */
+ u32 soft_idle;
+ u32 thru_rate; /* NU */
+ u32 gap_thresh; /* NU */
+ u32 tx_start_wds; /* NU */
+ u32 eee_prescale; /* 2U */
+ u32 tx_g_oflow_thresh_set; /* NU */
+ u32 tx_g_oflow_thresh_clr; /* NU */
+ u32 tx_g_buf_thresh_set_l; /* NU */
+ u32 tx_g_buf_thresh_set_h; /* NU */
+ u32 tx_g_buf_thresh_clr_l; /* NU */
+ u32 tx_g_buf_thresh_clr_h; /* NU */
+};
+
+struct gbenu_port_regs {
+ u32 __rsvd_0;
+ u32 control;
+ u32 max_blks; /* 2U */
+ u32 mem_align1;
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map; /* NU */
+ u32 pri_ctl; /* 2U */
+ u32 rx_pri_map;
+ u32 rx_maxlen;
+ u32 tx_blks_pri; /* NU */
+ u32 __rsvd_1;
+ u32 idle2lpi; /* 2U */
+ u32 lpi2idle; /* 2U */
+ u32 eee_status; /* 2U */
+ u32 __rsvd_2;
+ u32 __rsvd_3[176]; /* NU: more to add */
+ u32 __rsvd_4[2];
+ u32 sa_lo;
+ u32 sa_hi;
+ u32 ts_ctl;
+ u32 ts_seq_ltype;
+ u32 ts_vlan;
+ u32 ts_ctl_ltype2;
+ u32 ts_ctl2;
+};
+
+struct gbenu_host_port_regs {
+ u32 __rsvd_0;
+ u32 control;
+ u32 flow_id_offset; /* 2U */
+ u32 __rsvd_1;
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map; /* NU */
+ u32 pri_ctl;
+ u32 rx_pri_map;
+ u32 rx_maxlen;
+ u32 tx_blks_pri; /* NU */
+ u32 __rsvd_2;
+ u32 idle2lpi; /* 2U */
+ u32 lpi2wake; /* 2U */
+ u32 eee_status; /* 2U */
+ u32 __rsvd_3;
+ u32 __rsvd_4[184]; /* NU */
+ u32 host_blks_pri; /* NU */
+};
+
+struct gbenu_emac_regs {
+ u32 mac_control;
+ u32 mac_status;
+ u32 soft_reset;
+ u32 boff_test;
+ u32 rx_pause;
+ u32 __rsvd_0[11]; /* NU */
+ u32 tx_pause;
+ u32 __rsvd_1[11]; /* NU */
+ u32 em_control;
+ u32 tx_gap;
+};
+
+/* Some hw stat regs are applicable to slave port only.
+ * This is handled by gbenu_et_stats struct. Also some
+ * are for SS version NU and some are for 2U.
+ */
+struct gbenu_hw_stats {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames; /* slave */
+ u32 rx_crc_errors;
+ u32 rx_align_code_errors; /* slave */
+ u32 rx_oversized_frames;
+ u32 rx_jabber_frames; /* slave */
+ u32 rx_undersized_frames;
+ u32 rx_fragments; /* slave */
+ u32 ale_drop;
+ u32 ale_overrun_drop;
+ u32 rx_bytes;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames; /* slave */
+ u32 tx_deferred_frames; /* slave */
+ u32 tx_collision_frames; /* slave */
+ u32 tx_single_coll_frames; /* slave */
+ u32 tx_mult_coll_frames; /* slave */
+ u32 tx_excessive_collisions; /* slave */
+ u32 tx_late_collisions; /* slave */
+ u32 rx_ipg_error; /* slave 10G only */
+ u32 tx_carrier_sense_errors; /* slave */
+ u32 tx_bytes;
+ u32 tx_64B_frames;
+ u32 tx_65_to_127B_frames;
+ u32 tx_128_to_255B_frames;
+ u32 tx_256_to_511B_frames;
+ u32 tx_512_to_1023B_frames;
+ u32 tx_1024B_frames;
+ u32 net_bytes;
+ u32 rx_bottom_fifo_drop;
+ u32 rx_port_mask_drop;
+ u32 rx_top_fifo_drop;
+ u32 ale_rate_limit_drop;
+ u32 ale_vid_ingress_drop;
+ u32 ale_da_eq_sa_drop;
+ u32 __rsvd_0[3];
+ u32 ale_unknown_ucast;
+ u32 ale_unknown_ucast_bytes;
+ u32 ale_unknown_mcast;
+ u32 ale_unknown_mcast_bytes;
+ u32 ale_unknown_bcast;
+ u32 ale_unknown_bcast_bytes;
+ u32 ale_pol_match;
+ u32 ale_pol_match_red; /* NU */
+ u32 ale_pol_match_yellow; /* NU */
+ u32 __rsvd_1[44];
+ u32 tx_mem_protect_err;
+ /* following NU only */
+ u32 tx_pri0;
+ u32 tx_pri1;
+ u32 tx_pri2;
+ u32 tx_pri3;
+ u32 tx_pri4;
+ u32 tx_pri5;
+ u32 tx_pri6;
+ u32 tx_pri7;
+ u32 tx_pri0_bcnt;
+ u32 tx_pri1_bcnt;
+ u32 tx_pri2_bcnt;
+ u32 tx_pri3_bcnt;
+ u32 tx_pri4_bcnt;
+ u32 tx_pri5_bcnt;
+ u32 tx_pri6_bcnt;
+ u32 tx_pri7_bcnt;
+ u32 tx_pri0_drop;
+ u32 tx_pri1_drop;
+ u32 tx_pri2_drop;
+ u32 tx_pri3_drop;
+ u32 tx_pri4_drop;
+ u32 tx_pri5_drop;
+ u32 tx_pri6_drop;
+ u32 tx_pri7_drop;
+ u32 tx_pri0_drop_bcnt;
+ u32 tx_pri1_drop_bcnt;
+ u32 tx_pri2_drop_bcnt;
+ u32 tx_pri3_drop_bcnt;
+ u32 tx_pri4_drop_bcnt;
+ u32 tx_pri5_drop_bcnt;
+ u32 tx_pri6_drop_bcnt;
+ u32 tx_pri7_drop_bcnt;
+};
+
+#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
+#define GBENU_HW_STATS_REG_MAP_SZ 0x200
+
struct gbe_ss_regs {
u32 id_ver;
u32 synce_count;
@@ -316,6 +541,7 @@ struct gbe_port_regs_ofs {
u16 ts_vlan;
u16 ts_ctl_ltype2;
u16 ts_ctl2;
+ u16 rx_maxlen; /* 2U, NU */
};
struct gbe_host_port_regs {
@@ -390,9 +616,7 @@ struct gbe_hw_stats {
};
#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
-#define GBE13_NUM_HW_STATS_MOD 2
-#define XGBE10_NUM_HW_STATS_MOD 3
-#define GBE_MAX_HW_STAT_MODS 3
+#define GBE_MAX_HW_STAT_MODS 9
#define GBE_HW_STATS_REG_MAP_SZ 0x100
struct gbe_slave {
@@ -420,11 +644,14 @@ struct gbe_priv {
u32 ale_entries;
u32 ale_ports;
bool enable_ale;
+ u8 max_num_slaves;
+ u8 max_num_ports; /* max_num_slaves + 1 */
struct netcp_tx_pipe tx_pipe;
int host_port;
u32 rx_packet_max;
u32 ss_version;
+ u32 stats_en_mask;
void __iomem *ss_regs;
void __iomem *switch_regs;
@@ -475,275 +702,778 @@ struct netcp_ethtool_stat {
int offset;
};
-#define GBE_STATSA_INFO(field) "GBE_A:"#field, GBE_STATSA_MODULE,\
- FIELD_SIZEOF(struct gbe_hw_stats, field), \
- offsetof(struct gbe_hw_stats, field)
+#define GBE_STATSA_INFO(field) \
+{ \
+ "GBE_A:"#field, GBE_STATSA_MODULE, \
+ FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field) \
+}
-#define GBE_STATSB_INFO(field) "GBE_B:"#field, GBE_STATSB_MODULE,\
- FIELD_SIZEOF(struct gbe_hw_stats, field), \
- offsetof(struct gbe_hw_stats, field)
+#define GBE_STATSB_INFO(field) \
+{ \
+ "GBE_B:"#field, GBE_STATSB_MODULE, \
+ FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field) \
+}
-#define GBE_STATSC_INFO(field) "GBE_C:"#field, GBE_STATSC_MODULE,\
- FIELD_SIZEOF(struct gbe_hw_stats, field), \
- offsetof(struct gbe_hw_stats, field)
+#define GBE_STATSC_INFO(field) \
+{ \
+ "GBE_C:"#field, GBE_STATSC_MODULE, \
+ FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field) \
+}
-#define GBE_STATSD_INFO(field) "GBE_D:"#field, GBE_STATSD_MODULE,\
- FIELD_SIZEOF(struct gbe_hw_stats, field), \
- offsetof(struct gbe_hw_stats, field)
+#define GBE_STATSD_INFO(field) \
+{ \
+ "GBE_D:"#field, GBE_STATSD_MODULE, \
+ FIELD_SIZEOF(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field) \
+}
static const struct netcp_ethtool_stat gbe13_et_stats[] = {
/* GBE module A */
- {GBE_STATSA_INFO(rx_good_frames)},
- {GBE_STATSA_INFO(rx_broadcast_frames)},
- {GBE_STATSA_INFO(rx_multicast_frames)},
- {GBE_STATSA_INFO(rx_pause_frames)},
- {GBE_STATSA_INFO(rx_crc_errors)},
- {GBE_STATSA_INFO(rx_align_code_errors)},
- {GBE_STATSA_INFO(rx_oversized_frames)},
- {GBE_STATSA_INFO(rx_jabber_frames)},
- {GBE_STATSA_INFO(rx_undersized_frames)},
- {GBE_STATSA_INFO(rx_fragments)},
- {GBE_STATSA_INFO(rx_bytes)},
- {GBE_STATSA_INFO(tx_good_frames)},
- {GBE_STATSA_INFO(tx_broadcast_frames)},
- {GBE_STATSA_INFO(tx_multicast_frames)},
- {GBE_STATSA_INFO(tx_pause_frames)},
- {GBE_STATSA_INFO(tx_deferred_frames)},
- {GBE_STATSA_INFO(tx_collision_frames)},
- {GBE_STATSA_INFO(tx_single_coll_frames)},
- {GBE_STATSA_INFO(tx_mult_coll_frames)},
- {GBE_STATSA_INFO(tx_excessive_collisions)},
- {GBE_STATSA_INFO(tx_late_collisions)},
- {GBE_STATSA_INFO(tx_underrun)},
- {GBE_STATSA_INFO(tx_carrier_sense_errors)},
- {GBE_STATSA_INFO(tx_bytes)},
- {GBE_STATSA_INFO(tx_64byte_frames)},
- {GBE_STATSA_INFO(tx_65_to_127byte_frames)},
- {GBE_STATSA_INFO(tx_128_to_255byte_frames)},
- {GBE_STATSA_INFO(tx_256_to_511byte_frames)},
- {GBE_STATSA_INFO(tx_512_to_1023byte_frames)},
- {GBE_STATSA_INFO(tx_1024byte_frames)},
- {GBE_STATSA_INFO(net_bytes)},
- {GBE_STATSA_INFO(rx_sof_overruns)},
- {GBE_STATSA_INFO(rx_mof_overruns)},
- {GBE_STATSA_INFO(rx_dma_overruns)},
+ GBE_STATSA_INFO(rx_good_frames),
+ GBE_STATSA_INFO(rx_broadcast_frames),
+ GBE_STATSA_INFO(rx_multicast_frames),
+ GBE_STATSA_INFO(rx_pause_frames),
+ GBE_STATSA_INFO(rx_crc_errors),
+ GBE_STATSA_INFO(rx_align_code_errors),
+ GBE_STATSA_INFO(rx_oversized_frames),
+ GBE_STATSA_INFO(rx_jabber_frames),
+ GBE_STATSA_INFO(rx_undersized_frames),
+ GBE_STATSA_INFO(rx_fragments),
+ GBE_STATSA_INFO(rx_bytes),
+ GBE_STATSA_INFO(tx_good_frames),
+ GBE_STATSA_INFO(tx_broadcast_frames),
+ GBE_STATSA_INFO(tx_multicast_frames),
+ GBE_STATSA_INFO(tx_pause_frames),
+ GBE_STATSA_INFO(tx_deferred_frames),
+ GBE_STATSA_INFO(tx_collision_frames),
+ GBE_STATSA_INFO(tx_single_coll_frames),
+ GBE_STATSA_INFO(tx_mult_coll_frames),
+ GBE_STATSA_INFO(tx_excessive_collisions),
+ GBE_STATSA_INFO(tx_late_collisions),
+ GBE_STATSA_INFO(tx_underrun),
+ GBE_STATSA_INFO(tx_carrier_sense_errors),
+ GBE_STATSA_INFO(tx_bytes),
+ GBE_STATSA_INFO(tx_64byte_frames),
+ GBE_STATSA_INFO(tx_65_to_127byte_frames),
+ GBE_STATSA_INFO(tx_128_to_255byte_frames),
+ GBE_STATSA_INFO(tx_256_to_511byte_frames),
+ GBE_STATSA_INFO(tx_512_to_1023byte_frames),
+ GBE_STATSA_INFO(tx_1024byte_frames),
+ GBE_STATSA_INFO(net_bytes),
+ GBE_STATSA_INFO(rx_sof_overruns),
+ GBE_STATSA_INFO(rx_mof_overruns),
+ GBE_STATSA_INFO(rx_dma_overruns),
/* GBE module B */
- {GBE_STATSB_INFO(rx_good_frames)},
- {GBE_STATSB_INFO(rx_broadcast_frames)},
- {GBE_STATSB_INFO(rx_multicast_frames)},
- {GBE_STATSB_INFO(rx_pause_frames)},
- {GBE_STATSB_INFO(rx_crc_errors)},
- {GBE_STATSB_INFO(rx_align_code_errors)},
- {GBE_STATSB_INFO(rx_oversized_frames)},
- {GBE_STATSB_INFO(rx_jabber_frames)},
- {GBE_STATSB_INFO(rx_undersized_frames)},
- {GBE_STATSB_INFO(rx_fragments)},
- {GBE_STATSB_INFO(rx_bytes)},
- {GBE_STATSB_INFO(tx_good_frames)},
- {GBE_STATSB_INFO(tx_broadcast_frames)},
- {GBE_STATSB_INFO(tx_multicast_frames)},
- {GBE_STATSB_INFO(tx_pause_frames)},
- {GBE_STATSB_INFO(tx_deferred_frames)},
- {GBE_STATSB_INFO(tx_collision_frames)},
- {GBE_STATSB_INFO(tx_single_coll_frames)},
- {GBE_STATSB_INFO(tx_mult_coll_frames)},
- {GBE_STATSB_INFO(tx_excessive_collisions)},
- {GBE_STATSB_INFO(tx_late_collisions)},
- {GBE_STATSB_INFO(tx_underrun)},
- {GBE_STATSB_INFO(tx_carrier_sense_errors)},
- {GBE_STATSB_INFO(tx_bytes)},
- {GBE_STATSB_INFO(tx_64byte_frames)},
- {GBE_STATSB_INFO(tx_65_to_127byte_frames)},
- {GBE_STATSB_INFO(tx_128_to_255byte_frames)},
- {GBE_STATSB_INFO(tx_256_to_511byte_frames)},
- {GBE_STATSB_INFO(tx_512_to_1023byte_frames)},
- {GBE_STATSB_INFO(tx_1024byte_frames)},
- {GBE_STATSB_INFO(net_bytes)},
- {GBE_STATSB_INFO(rx_sof_overruns)},
- {GBE_STATSB_INFO(rx_mof_overruns)},
- {GBE_STATSB_INFO(rx_dma_overruns)},
+ GBE_STATSB_INFO(rx_good_frames),
+ GBE_STATSB_INFO(rx_broadcast_frames),
+ GBE_STATSB_INFO(rx_multicast_frames),
+ GBE_STATSB_INFO(rx_pause_frames),
+ GBE_STATSB_INFO(rx_crc_errors),
+ GBE_STATSB_INFO(rx_align_code_errors),
+ GBE_STATSB_INFO(rx_oversized_frames),
+ GBE_STATSB_INFO(rx_jabber_frames),
+ GBE_STATSB_INFO(rx_undersized_frames),
+ GBE_STATSB_INFO(rx_fragments),
+ GBE_STATSB_INFO(rx_bytes),
+ GBE_STATSB_INFO(tx_good_frames),
+ GBE_STATSB_INFO(tx_broadcast_frames),
+ GBE_STATSB_INFO(tx_multicast_frames),
+ GBE_STATSB_INFO(tx_pause_frames),
+ GBE_STATSB_INFO(tx_deferred_frames),
+ GBE_STATSB_INFO(tx_collision_frames),
+ GBE_STATSB_INFO(tx_single_coll_frames),
+ GBE_STATSB_INFO(tx_mult_coll_frames),
+ GBE_STATSB_INFO(tx_excessive_collisions),
+ GBE_STATSB_INFO(tx_late_collisions),
+ GBE_STATSB_INFO(tx_underrun),
+ GBE_STATSB_INFO(tx_carrier_sense_errors),
+ GBE_STATSB_INFO(tx_bytes),
+ GBE_STATSB_INFO(tx_64byte_frames),
+ GBE_STATSB_INFO(tx_65_to_127byte_frames),
+ GBE_STATSB_INFO(tx_128_to_255byte_frames),
+ GBE_STATSB_INFO(tx_256_to_511byte_frames),
+ GBE_STATSB_INFO(tx_512_to_1023byte_frames),
+ GBE_STATSB_INFO(tx_1024byte_frames),
+ GBE_STATSB_INFO(net_bytes),
+ GBE_STATSB_INFO(rx_sof_overruns),
+ GBE_STATSB_INFO(rx_mof_overruns),
+ GBE_STATSB_INFO(rx_dma_overruns),
/* GBE module C */
- {GBE_STATSC_INFO(rx_good_frames)},
- {GBE_STATSC_INFO(rx_broadcast_frames)},
- {GBE_STATSC_INFO(rx_multicast_frames)},
- {GBE_STATSC_INFO(rx_pause_frames)},
- {GBE_STATSC_INFO(rx_crc_errors)},
- {GBE_STATSC_INFO(rx_align_code_errors)},
- {GBE_STATSC_INFO(rx_oversized_frames)},
- {GBE_STATSC_INFO(rx_jabber_frames)},
- {GBE_STATSC_INFO(rx_undersized_frames)},
- {GBE_STATSC_INFO(rx_fragments)},
- {GBE_STATSC_INFO(rx_bytes)},
- {GBE_STATSC_INFO(tx_good_frames)},
- {GBE_STATSC_INFO(tx_broadcast_frames)},
- {GBE_STATSC_INFO(tx_multicast_frames)},
- {GBE_STATSC_INFO(tx_pause_frames)},
- {GBE_STATSC_INFO(tx_deferred_frames)},
- {GBE_STATSC_INFO(tx_collision_frames)},
- {GBE_STATSC_INFO(tx_single_coll_frames)},
- {GBE_STATSC_INFO(tx_mult_coll_frames)},
- {GBE_STATSC_INFO(tx_excessive_collisions)},
- {GBE_STATSC_INFO(tx_late_collisions)},
- {GBE_STATSC_INFO(tx_underrun)},
- {GBE_STATSC_INFO(tx_carrier_sense_errors)},
- {GBE_STATSC_INFO(tx_bytes)},
- {GBE_STATSC_INFO(tx_64byte_frames)},
- {GBE_STATSC_INFO(tx_65_to_127byte_frames)},
- {GBE_STATSC_INFO(tx_128_to_255byte_frames)},
- {GBE_STATSC_INFO(tx_256_to_511byte_frames)},
- {GBE_STATSC_INFO(tx_512_to_1023byte_frames)},
- {GBE_STATSC_INFO(tx_1024byte_frames)},
- {GBE_STATSC_INFO(net_bytes)},
- {GBE_STATSC_INFO(rx_sof_overruns)},
- {GBE_STATSC_INFO(rx_mof_overruns)},
- {GBE_STATSC_INFO(rx_dma_overruns)},
+ GBE_STATSC_INFO(rx_good_frames),
+ GBE_STATSC_INFO(rx_broadcast_frames),
+ GBE_STATSC_INFO(rx_multicast_frames),
+ GBE_STATSC_INFO(rx_pause_frames),
+ GBE_STATSC_INFO(rx_crc_errors),
+ GBE_STATSC_INFO(rx_align_code_errors),
+ GBE_STATSC_INFO(rx_oversized_frames),
+ GBE_STATSC_INFO(rx_jabber_frames),
+ GBE_STATSC_INFO(rx_undersized_frames),
+ GBE_STATSC_INFO(rx_fragments),
+ GBE_STATSC_INFO(rx_bytes),
+ GBE_STATSC_INFO(tx_good_frames),
+ GBE_STATSC_INFO(tx_broadcast_frames),
+ GBE_STATSC_INFO(tx_multicast_frames),
+ GBE_STATSC_INFO(tx_pause_frames),
+ GBE_STATSC_INFO(tx_deferred_frames),
+ GBE_STATSC_INFO(tx_collision_frames),
+ GBE_STATSC_INFO(tx_single_coll_frames),
+ GBE_STATSC_INFO(tx_mult_coll_frames),
+ GBE_STATSC_INFO(tx_excessive_collisions),
+ GBE_STATSC_INFO(tx_late_collisions),
+ GBE_STATSC_INFO(tx_underrun),
+ GBE_STATSC_INFO(tx_carrier_sense_errors),
+ GBE_STATSC_INFO(tx_bytes),
+ GBE_STATSC_INFO(tx_64byte_frames),
+ GBE_STATSC_INFO(tx_65_to_127byte_frames),
+ GBE_STATSC_INFO(tx_128_to_255byte_frames),
+ GBE_STATSC_INFO(tx_256_to_511byte_frames),
+ GBE_STATSC_INFO(tx_512_to_1023byte_frames),
+ GBE_STATSC_INFO(tx_1024byte_frames),
+ GBE_STATSC_INFO(net_bytes),
+ GBE_STATSC_INFO(rx_sof_overruns),
+ GBE_STATSC_INFO(rx_mof_overruns),
+ GBE_STATSC_INFO(rx_dma_overruns),
/* GBE module D */
- {GBE_STATSD_INFO(rx_good_frames)},
- {GBE_STATSD_INFO(rx_broadcast_frames)},
- {GBE_STATSD_INFO(rx_multicast_frames)},
- {GBE_STATSD_INFO(rx_pause_frames)},
- {GBE_STATSD_INFO(rx_crc_errors)},
- {GBE_STATSD_INFO(rx_align_code_errors)},
- {GBE_STATSD_INFO(rx_oversized_frames)},
- {GBE_STATSD_INFO(rx_jabber_frames)},
- {GBE_STATSD_INFO(rx_undersized_frames)},
- {GBE_STATSD_INFO(rx_fragments)},
- {GBE_STATSD_INFO(rx_bytes)},
- {GBE_STATSD_INFO(tx_good_frames)},
- {GBE_STATSD_INFO(tx_broadcast_frames)},
- {GBE_STATSD_INFO(tx_multicast_frames)},
- {GBE_STATSD_INFO(tx_pause_frames)},
- {GBE_STATSD_INFO(tx_deferred_frames)},
- {GBE_STATSD_INFO(tx_collision_frames)},
- {GBE_STATSD_INFO(tx_single_coll_frames)},
- {GBE_STATSD_INFO(tx_mult_coll_frames)},
- {GBE_STATSD_INFO(tx_excessive_collisions)},
- {GBE_STATSD_INFO(tx_late_collisions)},
- {GBE_STATSD_INFO(tx_underrun)},
- {GBE_STATSD_INFO(tx_carrier_sense_errors)},
- {GBE_STATSD_INFO(tx_bytes)},
- {GBE_STATSD_INFO(tx_64byte_frames)},
- {GBE_STATSD_INFO(tx_65_to_127byte_frames)},
- {GBE_STATSD_INFO(tx_128_to_255byte_frames)},
- {GBE_STATSD_INFO(tx_256_to_511byte_frames)},
- {GBE_STATSD_INFO(tx_512_to_1023byte_frames)},
- {GBE_STATSD_INFO(tx_1024byte_frames)},
- {GBE_STATSD_INFO(net_bytes)},
- {GBE_STATSD_INFO(rx_sof_overruns)},
- {GBE_STATSD_INFO(rx_mof_overruns)},
- {GBE_STATSD_INFO(rx_dma_overruns)},
+ GBE_STATSD_INFO(rx_good_frames),
+ GBE_STATSD_INFO(rx_broadcast_frames),
+ GBE_STATSD_INFO(rx_multicast_frames),
+ GBE_STATSD_INFO(rx_pause_frames),
+ GBE_STATSD_INFO(rx_crc_errors),
+ GBE_STATSD_INFO(rx_align_code_errors),
+ GBE_STATSD_INFO(rx_oversized_frames),
+ GBE_STATSD_INFO(rx_jabber_frames),
+ GBE_STATSD_INFO(rx_undersized_frames),
+ GBE_STATSD_INFO(rx_fragments),
+ GBE_STATSD_INFO(rx_bytes),
+ GBE_STATSD_INFO(tx_good_frames),
+ GBE_STATSD_INFO(tx_broadcast_frames),
+ GBE_STATSD_INFO(tx_multicast_frames),
+ GBE_STATSD_INFO(tx_pause_frames),
+ GBE_STATSD_INFO(tx_deferred_frames),
+ GBE_STATSD_INFO(tx_collision_frames),
+ GBE_STATSD_INFO(tx_single_coll_frames),
+ GBE_STATSD_INFO(tx_mult_coll_frames),
+ GBE_STATSD_INFO(tx_excessive_collisions),
+ GBE_STATSD_INFO(tx_late_collisions),
+ GBE_STATSD_INFO(tx_underrun),
+ GBE_STATSD_INFO(tx_carrier_sense_errors),
+ GBE_STATSD_INFO(tx_bytes),
+ GBE_STATSD_INFO(tx_64byte_frames),
+ GBE_STATSD_INFO(tx_65_to_127byte_frames),
+ GBE_STATSD_INFO(tx_128_to_255byte_frames),
+ GBE_STATSD_INFO(tx_256_to_511byte_frames),
+ GBE_STATSD_INFO(tx_512_to_1023byte_frames),
+ GBE_STATSD_INFO(tx_1024byte_frames),
+ GBE_STATSD_INFO(net_bytes),
+ GBE_STATSD_INFO(rx_sof_overruns),
+ GBE_STATSD_INFO(rx_mof_overruns),
+ GBE_STATSD_INFO(rx_dma_overruns),
};
-#define XGBE_STATS0_INFO(field) "GBE_0:"#field, XGBE_STATS0_MODULE, \
- FIELD_SIZEOF(struct xgbe_hw_stats, field), \
- offsetof(struct xgbe_hw_stats, field)
+/* This is the size of entries in GBENU_STATS_HOST */
+#define GBENU_ET_STATS_HOST_SIZE 33
+
+#define GBENU_STATS_HOST(field) \
+{ \
+ "GBE_HOST:"#field, GBENU_STATS0_MODULE, \
+ FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
-#define XGBE_STATS1_INFO(field) "GBE_1:"#field, XGBE_STATS1_MODULE, \
- FIELD_SIZEOF(struct xgbe_hw_stats, field), \
- offsetof(struct xgbe_hw_stats, field)
+/* This is the size of entries in GBENU_STATS_HOST */
+#define GBENU_ET_STATS_PORT_SIZE 46
-#define XGBE_STATS2_INFO(field) "GBE_2:"#field, XGBE_STATS2_MODULE, \
- FIELD_SIZEOF(struct xgbe_hw_stats, field), \
- offsetof(struct xgbe_hw_stats, field)
+#define GBENU_STATS_P1(field) \
+{ \
+ "GBE_P1:"#field, GBENU_STATS1_MODULE, \
+ FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P2(field) \
+{ \
+ "GBE_P2:"#field, GBENU_STATS2_MODULE, \
+ FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P3(field) \
+{ \
+ "GBE_P3:"#field, GBENU_STATS3_MODULE, \
+ FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P4(field) \
+{ \
+ "GBE_P4:"#field, GBENU_STATS4_MODULE, \
+ FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P5(field) \
+{ \
+ "GBE_P5:"#field, GBENU_STATS5_MODULE, \
+ FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P6(field) \
+{ \
+ "GBE_P6:"#field, GBENU_STATS6_MODULE, \
+ FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P7(field) \
+{ \
+ "GBE_P7:"#field, GBENU_STATS7_MODULE, \
+ FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P8(field) \
+{ \
+ "GBE_P8:"#field, GBENU_STATS8_MODULE, \
+ FIELD_SIZEOF(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+static const struct netcp_ethtool_stat gbenu_et_stats[] = {
+ /* GBENU Host Module */
+ GBENU_STATS_HOST(rx_good_frames),
+ GBENU_STATS_HOST(rx_broadcast_frames),
+ GBENU_STATS_HOST(rx_multicast_frames),
+ GBENU_STATS_HOST(rx_crc_errors),
+ GBENU_STATS_HOST(rx_oversized_frames),
+ GBENU_STATS_HOST(rx_undersized_frames),
+ GBENU_STATS_HOST(ale_drop),
+ GBENU_STATS_HOST(ale_overrun_drop),
+ GBENU_STATS_HOST(rx_bytes),
+ GBENU_STATS_HOST(tx_good_frames),
+ GBENU_STATS_HOST(tx_broadcast_frames),
+ GBENU_STATS_HOST(tx_multicast_frames),
+ GBENU_STATS_HOST(tx_bytes),
+ GBENU_STATS_HOST(tx_64B_frames),
+ GBENU_STATS_HOST(tx_65_to_127B_frames),
+ GBENU_STATS_HOST(tx_128_to_255B_frames),
+ GBENU_STATS_HOST(tx_256_to_511B_frames),
+ GBENU_STATS_HOST(tx_512_to_1023B_frames),
+ GBENU_STATS_HOST(tx_1024B_frames),
+ GBENU_STATS_HOST(net_bytes),
+ GBENU_STATS_HOST(rx_bottom_fifo_drop),
+ GBENU_STATS_HOST(rx_port_mask_drop),
+ GBENU_STATS_HOST(rx_top_fifo_drop),
+ GBENU_STATS_HOST(ale_rate_limit_drop),
+ GBENU_STATS_HOST(ale_vid_ingress_drop),
+ GBENU_STATS_HOST(ale_da_eq_sa_drop),
+ GBENU_STATS_HOST(ale_unknown_ucast),
+ GBENU_STATS_HOST(ale_unknown_ucast_bytes),
+ GBENU_STATS_HOST(ale_unknown_mcast),
+ GBENU_STATS_HOST(ale_unknown_mcast_bytes),
+ GBENU_STATS_HOST(ale_unknown_bcast),
+ GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+ GBENU_STATS_HOST(tx_mem_protect_err),
+ /* GBENU Module 1 */
+ GBENU_STATS_P1(rx_good_frames),
+ GBENU_STATS_P1(rx_broadcast_frames),
+ GBENU_STATS_P1(rx_multicast_frames),
+ GBENU_STATS_P1(rx_pause_frames),
+ GBENU_STATS_P1(rx_crc_errors),
+ GBENU_STATS_P1(rx_align_code_errors),
+ GBENU_STATS_P1(rx_oversized_frames),
+ GBENU_STATS_P1(rx_jabber_frames),
+ GBENU_STATS_P1(rx_undersized_frames),
+ GBENU_STATS_P1(rx_fragments),
+ GBENU_STATS_P1(ale_drop),
+ GBENU_STATS_P1(ale_overrun_drop),
+ GBENU_STATS_P1(rx_bytes),
+ GBENU_STATS_P1(tx_good_frames),
+ GBENU_STATS_P1(tx_broadcast_frames),
+ GBENU_STATS_P1(tx_multicast_frames),
+ GBENU_STATS_P1(tx_pause_frames),
+ GBENU_STATS_P1(tx_deferred_frames),
+ GBENU_STATS_P1(tx_collision_frames),
+ GBENU_STATS_P1(tx_single_coll_frames),
+ GBENU_STATS_P1(tx_mult_coll_frames),
+ GBENU_STATS_P1(tx_excessive_collisions),
+ GBENU_STATS_P1(tx_late_collisions),
+ GBENU_STATS_P1(rx_ipg_error),
+ GBENU_STATS_P1(tx_carrier_sense_errors),
+ GBENU_STATS_P1(tx_bytes),
+ GBENU_STATS_P1(tx_64B_frames),
+ GBENU_STATS_P1(tx_65_to_127B_frames),
+ GBENU_STATS_P1(tx_128_to_255B_frames),
+ GBENU_STATS_P1(tx_256_to_511B_frames),
+ GBENU_STATS_P1(tx_512_to_1023B_frames),
+ GBENU_STATS_P1(tx_1024B_frames),
+ GBENU_STATS_P1(net_bytes),
+ GBENU_STATS_P1(rx_bottom_fifo_drop),
+ GBENU_STATS_P1(rx_port_mask_drop),
+ GBENU_STATS_P1(rx_top_fifo_drop),
+ GBENU_STATS_P1(ale_rate_limit_drop),
+ GBENU_STATS_P1(ale_vid_ingress_drop),
+ GBENU_STATS_P1(ale_da_eq_sa_drop),
+ GBENU_STATS_P1(ale_unknown_ucast),
+ GBENU_STATS_P1(ale_unknown_ucast_bytes),
+ GBENU_STATS_P1(ale_unknown_mcast),
+ GBENU_STATS_P1(ale_unknown_mcast_bytes),
+ GBENU_STATS_P1(ale_unknown_bcast),
+ GBENU_STATS_P1(ale_unknown_bcast_bytes),
+ GBENU_STATS_P1(tx_mem_protect_err),
+ /* GBENU Module 2 */
+ GBENU_STATS_P2(rx_good_frames),
+ GBENU_STATS_P2(rx_broadcast_frames),
+ GBENU_STATS_P2(rx_multicast_frames),
+ GBENU_STATS_P2(rx_pause_frames),
+ GBENU_STATS_P2(rx_crc_errors),
+ GBENU_STATS_P2(rx_align_code_errors),
+ GBENU_STATS_P2(rx_oversized_frames),
+ GBENU_STATS_P2(rx_jabber_frames),
+ GBENU_STATS_P2(rx_undersized_frames),
+ GBENU_STATS_P2(rx_fragments),
+ GBENU_STATS_P2(ale_drop),
+ GBENU_STATS_P2(ale_overrun_drop),
+ GBENU_STATS_P2(rx_bytes),
+ GBENU_STATS_P2(tx_good_frames),
+ GBENU_STATS_P2(tx_broadcast_frames),
+ GBENU_STATS_P2(tx_multicast_frames),
+ GBENU_STATS_P2(tx_pause_frames),
+ GBENU_STATS_P2(tx_deferred_frames),
+ GBENU_STATS_P2(tx_collision_frames),
+ GBENU_STATS_P2(tx_single_coll_frames),
+ GBENU_STATS_P2(tx_mult_coll_frames),
+ GBENU_STATS_P2(tx_excessive_collisions),
+ GBENU_STATS_P2(tx_late_collisions),
+ GBENU_STATS_P2(rx_ipg_error),
+ GBENU_STATS_P2(tx_carrier_sense_errors),
+ GBENU_STATS_P2(tx_bytes),
+ GBENU_STATS_P2(tx_64B_frames),
+ GBENU_STATS_P2(tx_65_to_127B_frames),
+ GBENU_STATS_P2(tx_128_to_255B_frames),
+ GBENU_STATS_P2(tx_256_to_511B_frames),
+ GBENU_STATS_P2(tx_512_to_1023B_frames),
+ GBENU_STATS_P2(tx_1024B_frames),
+ GBENU_STATS_P2(net_bytes),
+ GBENU_STATS_P2(rx_bottom_fifo_drop),
+ GBENU_STATS_P2(rx_port_mask_drop),
+ GBENU_STATS_P2(rx_top_fifo_drop),
+ GBENU_STATS_P2(ale_rate_limit_drop),
+ GBENU_STATS_P2(ale_vid_ingress_drop),
+ GBENU_STATS_P2(ale_da_eq_sa_drop),
+ GBENU_STATS_P2(ale_unknown_ucast),
+ GBENU_STATS_P2(ale_unknown_ucast_bytes),
+ GBENU_STATS_P2(ale_unknown_mcast),
+ GBENU_STATS_P2(ale_unknown_mcast_bytes),
+ GBENU_STATS_P2(ale_unknown_bcast),
+ GBENU_STATS_P2(ale_unknown_bcast_bytes),
+ GBENU_STATS_P2(tx_mem_protect_err),
+ /* GBENU Module 3 */
+ GBENU_STATS_P3(rx_good_frames),
+ GBENU_STATS_P3(rx_broadcast_frames),
+ GBENU_STATS_P3(rx_multicast_frames),
+ GBENU_STATS_P3(rx_pause_frames),
+ GBENU_STATS_P3(rx_crc_errors),
+ GBENU_STATS_P3(rx_align_code_errors),
+ GBENU_STATS_P3(rx_oversized_frames),
+ GBENU_STATS_P3(rx_jabber_frames),
+ GBENU_STATS_P3(rx_undersized_frames),
+ GBENU_STATS_P3(rx_fragments),
+ GBENU_STATS_P3(ale_drop),
+ GBENU_STATS_P3(ale_overrun_drop),
+ GBENU_STATS_P3(rx_bytes),
+ GBENU_STATS_P3(tx_good_frames),
+ GBENU_STATS_P3(tx_broadcast_frames),
+ GBENU_STATS_P3(tx_multicast_frames),
+ GBENU_STATS_P3(tx_pause_frames),
+ GBENU_STATS_P3(tx_deferred_frames),
+ GBENU_STATS_P3(tx_collision_frames),
+ GBENU_STATS_P3(tx_single_coll_frames),
+ GBENU_STATS_P3(tx_mult_coll_frames),
+ GBENU_STATS_P3(tx_excessive_collisions),
+ GBENU_STATS_P3(tx_late_collisions),
+ GBENU_STATS_P3(rx_ipg_error),
+ GBENU_STATS_P3(tx_carrier_sense_errors),
+ GBENU_STATS_P3(tx_bytes),
+ GBENU_STATS_P3(tx_64B_frames),
+ GBENU_STATS_P3(tx_65_to_127B_frames),
+ GBENU_STATS_P3(tx_128_to_255B_frames),
+ GBENU_STATS_P3(tx_256_to_511B_frames),
+ GBENU_STATS_P3(tx_512_to_1023B_frames),
+ GBENU_STATS_P3(tx_1024B_frames),
+ GBENU_STATS_P3(net_bytes),
+ GBENU_STATS_P3(rx_bottom_fifo_drop),
+ GBENU_STATS_P3(rx_port_mask_drop),
+ GBENU_STATS_P3(rx_top_fifo_drop),
+ GBENU_STATS_P3(ale_rate_limit_drop),
+ GBENU_STATS_P3(ale_vid_ingress_drop),
+ GBENU_STATS_P3(ale_da_eq_sa_drop),
+ GBENU_STATS_P3(ale_unknown_ucast),
+ GBENU_STATS_P3(ale_unknown_ucast_bytes),
+ GBENU_STATS_P3(ale_unknown_mcast),
+ GBENU_STATS_P3(ale_unknown_mcast_bytes),
+ GBENU_STATS_P3(ale_unknown_bcast),
+ GBENU_STATS_P3(ale_unknown_bcast_bytes),
+ GBENU_STATS_P3(tx_mem_protect_err),
+ /* GBENU Module 4 */
+ GBENU_STATS_P4(rx_good_frames),
+ GBENU_STATS_P4(rx_broadcast_frames),
+ GBENU_STATS_P4(rx_multicast_frames),
+ GBENU_STATS_P4(rx_pause_frames),
+ GBENU_STATS_P4(rx_crc_errors),
+ GBENU_STATS_P4(rx_align_code_errors),
+ GBENU_STATS_P4(rx_oversized_frames),
+ GBENU_STATS_P4(rx_jabber_frames),
+ GBENU_STATS_P4(rx_undersized_frames),
+ GBENU_STATS_P4(rx_fragments),
+ GBENU_STATS_P4(ale_drop),
+ GBENU_STATS_P4(ale_overrun_drop),
+ GBENU_STATS_P4(rx_bytes),
+ GBENU_STATS_P4(tx_good_frames),
+ GBENU_STATS_P4(tx_broadcast_frames),
+ GBENU_STATS_P4(tx_multicast_frames),
+ GBENU_STATS_P4(tx_pause_frames),
+ GBENU_STATS_P4(tx_deferred_frames),
+ GBENU_STATS_P4(tx_collision_frames),
+ GBENU_STATS_P4(tx_single_coll_frames),
+ GBENU_STATS_P4(tx_mult_coll_frames),
+ GBENU_STATS_P4(tx_excessive_collisions),
+ GBENU_STATS_P4(tx_late_collisions),
+ GBENU_STATS_P4(rx_ipg_error),
+ GBENU_STATS_P4(tx_carrier_sense_errors),
+ GBENU_STATS_P4(tx_bytes),
+ GBENU_STATS_P4(tx_64B_frames),
+ GBENU_STATS_P4(tx_65_to_127B_frames),
+ GBENU_STATS_P4(tx_128_to_255B_frames),
+ GBENU_STATS_P4(tx_256_to_511B_frames),
+ GBENU_STATS_P4(tx_512_to_1023B_frames),
+ GBENU_STATS_P4(tx_1024B_frames),
+ GBENU_STATS_P4(net_bytes),
+ GBENU_STATS_P4(rx_bottom_fifo_drop),
+ GBENU_STATS_P4(rx_port_mask_drop),
+ GBENU_STATS_P4(rx_top_fifo_drop),
+ GBENU_STATS_P4(ale_rate_limit_drop),
+ GBENU_STATS_P4(ale_vid_ingress_drop),
+ GBENU_STATS_P4(ale_da_eq_sa_drop),
+ GBENU_STATS_P4(ale_unknown_ucast),
+ GBENU_STATS_P4(ale_unknown_ucast_bytes),
+ GBENU_STATS_P4(ale_unknown_mcast),
+ GBENU_STATS_P4(ale_unknown_mcast_bytes),
+ GBENU_STATS_P4(ale_unknown_bcast),
+ GBENU_STATS_P4(ale_unknown_bcast_bytes),
+ GBENU_STATS_P4(tx_mem_protect_err),
+ /* GBENU Module 5 */
+ GBENU_STATS_P5(rx_good_frames),
+ GBENU_STATS_P5(rx_broadcast_frames),
+ GBENU_STATS_P5(rx_multicast_frames),
+ GBENU_STATS_P5(rx_pause_frames),
+ GBENU_STATS_P5(rx_crc_errors),
+ GBENU_STATS_P5(rx_align_code_errors),
+ GBENU_STATS_P5(rx_oversized_frames),
+ GBENU_STATS_P5(rx_jabber_frames),
+ GBENU_STATS_P5(rx_undersized_frames),
+ GBENU_STATS_P5(rx_fragments),
+ GBENU_STATS_P5(ale_drop),
+ GBENU_STATS_P5(ale_overrun_drop),
+ GBENU_STATS_P5(rx_bytes),
+ GBENU_STATS_P5(tx_good_frames),
+ GBENU_STATS_P5(tx_broadcast_frames),
+ GBENU_STATS_P5(tx_multicast_frames),
+ GBENU_STATS_P5(tx_pause_frames),
+ GBENU_STATS_P5(tx_deferred_frames),
+ GBENU_STATS_P5(tx_collision_frames),
+ GBENU_STATS_P5(tx_single_coll_frames),
+ GBENU_STATS_P5(tx_mult_coll_frames),
+ GBENU_STATS_P5(tx_excessive_collisions),
+ GBENU_STATS_P5(tx_late_collisions),
+ GBENU_STATS_P5(rx_ipg_error),
+ GBENU_STATS_P5(tx_carrier_sense_errors),
+ GBENU_STATS_P5(tx_bytes),
+ GBENU_STATS_P5(tx_64B_frames),
+ GBENU_STATS_P5(tx_65_to_127B_frames),
+ GBENU_STATS_P5(tx_128_to_255B_frames),
+ GBENU_STATS_P5(tx_256_to_511B_frames),
+ GBENU_STATS_P5(tx_512_to_1023B_frames),
+ GBENU_STATS_P5(tx_1024B_frames),
+ GBENU_STATS_P5(net_bytes),
+ GBENU_STATS_P5(rx_bottom_fifo_drop),
+ GBENU_STATS_P5(rx_port_mask_drop),
+ GBENU_STATS_P5(rx_top_fifo_drop),
+ GBENU_STATS_P5(ale_rate_limit_drop),
+ GBENU_STATS_P5(ale_vid_ingress_drop),
+ GBENU_STATS_P5(ale_da_eq_sa_drop),
+ GBENU_STATS_P5(ale_unknown_ucast),
+ GBENU_STATS_P5(ale_unknown_ucast_bytes),
+ GBENU_STATS_P5(ale_unknown_mcast),
+ GBENU_STATS_P5(ale_unknown_mcast_bytes),
+ GBENU_STATS_P5(ale_unknown_bcast),
+ GBENU_STATS_P5(ale_unknown_bcast_bytes),
+ GBENU_STATS_P5(tx_mem_protect_err),
+ /* GBENU Module 6 */
+ GBENU_STATS_P6(rx_good_frames),
+ GBENU_STATS_P6(rx_broadcast_frames),
+ GBENU_STATS_P6(rx_multicast_frames),
+ GBENU_STATS_P6(rx_pause_frames),
+ GBENU_STATS_P6(rx_crc_errors),
+ GBENU_STATS_P6(rx_align_code_errors),
+ GBENU_STATS_P6(rx_oversized_frames),
+ GBENU_STATS_P6(rx_jabber_frames),
+ GBENU_STATS_P6(rx_undersized_frames),
+ GBENU_STATS_P6(rx_fragments),
+ GBENU_STATS_P6(ale_drop),
+ GBENU_STATS_P6(ale_overrun_drop),
+ GBENU_STATS_P6(rx_bytes),
+ GBENU_STATS_P6(tx_good_frames),
+ GBENU_STATS_P6(tx_broadcast_frames),
+ GBENU_STATS_P6(tx_multicast_frames),
+ GBENU_STATS_P6(tx_pause_frames),
+ GBENU_STATS_P6(tx_deferred_frames),
+ GBENU_STATS_P6(tx_collision_frames),
+ GBENU_STATS_P6(tx_single_coll_frames),
+ GBENU_STATS_P6(tx_mult_coll_frames),
+ GBENU_STATS_P6(tx_excessive_collisions),
+ GBENU_STATS_P6(tx_late_collisions),
+ GBENU_STATS_P6(rx_ipg_error),
+ GBENU_STATS_P6(tx_carrier_sense_errors),
+ GBENU_STATS_P6(tx_bytes),
+ GBENU_STATS_P6(tx_64B_frames),
+ GBENU_STATS_P6(tx_65_to_127B_frames),
+ GBENU_STATS_P6(tx_128_to_255B_frames),
+ GBENU_STATS_P6(tx_256_to_511B_frames),
+ GBENU_STATS_P6(tx_512_to_1023B_frames),
+ GBENU_STATS_P6(tx_1024B_frames),
+ GBENU_STATS_P6(net_bytes),
+ GBENU_STATS_P6(rx_bottom_fifo_drop),
+ GBENU_STATS_P6(rx_port_mask_drop),
+ GBENU_STATS_P6(rx_top_fifo_drop),
+ GBENU_STATS_P6(ale_rate_limit_drop),
+ GBENU_STATS_P6(ale_vid_ingress_drop),
+ GBENU_STATS_P6(ale_da_eq_sa_drop),
+ GBENU_STATS_P6(ale_unknown_ucast),
+ GBENU_STATS_P6(ale_unknown_ucast_bytes),
+ GBENU_STATS_P6(ale_unknown_mcast),
+ GBENU_STATS_P6(ale_unknown_mcast_bytes),
+ GBENU_STATS_P6(ale_unknown_bcast),
+ GBENU_STATS_P6(ale_unknown_bcast_bytes),
+ GBENU_STATS_P6(tx_mem_protect_err),
+ /* GBENU Module 7 */
+ GBENU_STATS_P7(rx_good_frames),
+ GBENU_STATS_P7(rx_broadcast_frames),
+ GBENU_STATS_P7(rx_multicast_frames),
+ GBENU_STATS_P7(rx_pause_frames),
+ GBENU_STATS_P7(rx_crc_errors),
+ GBENU_STATS_P7(rx_align_code_errors),
+ GBENU_STATS_P7(rx_oversized_frames),
+ GBENU_STATS_P7(rx_jabber_frames),
+ GBENU_STATS_P7(rx_undersized_frames),
+ GBENU_STATS_P7(rx_fragments),
+ GBENU_STATS_P7(ale_drop),
+ GBENU_STATS_P7(ale_overrun_drop),
+ GBENU_STATS_P7(rx_bytes),
+ GBENU_STATS_P7(tx_good_frames),
+ GBENU_STATS_P7(tx_broadcast_frames),
+ GBENU_STATS_P7(tx_multicast_frames),
+ GBENU_STATS_P7(tx_pause_frames),
+ GBENU_STATS_P7(tx_deferred_frames),
+ GBENU_STATS_P7(tx_collision_frames),
+ GBENU_STATS_P7(tx_single_coll_frames),
+ GBENU_STATS_P7(tx_mult_coll_frames),
+ GBENU_STATS_P7(tx_excessive_collisions),
+ GBENU_STATS_P7(tx_late_collisions),
+ GBENU_STATS_P7(rx_ipg_error),
+ GBENU_STATS_P7(tx_carrier_sense_errors),
+ GBENU_STATS_P7(tx_bytes),
+ GBENU_STATS_P7(tx_64B_frames),
+ GBENU_STATS_P7(tx_65_to_127B_frames),
+ GBENU_STATS_P7(tx_128_to_255B_frames),
+ GBENU_STATS_P7(tx_256_to_511B_frames),
+ GBENU_STATS_P7(tx_512_to_1023B_frames),
+ GBENU_STATS_P7(tx_1024B_frames),
+ GBENU_STATS_P7(net_bytes),
+ GBENU_STATS_P7(rx_bottom_fifo_drop),
+ GBENU_STATS_P7(rx_port_mask_drop),
+ GBENU_STATS_P7(rx_top_fifo_drop),
+ GBENU_STATS_P7(ale_rate_limit_drop),
+ GBENU_STATS_P7(ale_vid_ingress_drop),
+ GBENU_STATS_P7(ale_da_eq_sa_drop),
+ GBENU_STATS_P7(ale_unknown_ucast),
+ GBENU_STATS_P7(ale_unknown_ucast_bytes),
+ GBENU_STATS_P7(ale_unknown_mcast),
+ GBENU_STATS_P7(ale_unknown_mcast_bytes),
+ GBENU_STATS_P7(ale_unknown_bcast),
+ GBENU_STATS_P7(ale_unknown_bcast_bytes),
+ GBENU_STATS_P7(tx_mem_protect_err),
+ /* GBENU Module 8 */
+ GBENU_STATS_P8(rx_good_frames),
+ GBENU_STATS_P8(rx_broadcast_frames),
+ GBENU_STATS_P8(rx_multicast_frames),
+ GBENU_STATS_P8(rx_pause_frames),
+ GBENU_STATS_P8(rx_crc_errors),
+ GBENU_STATS_P8(rx_align_code_errors),
+ GBENU_STATS_P8(rx_oversized_frames),
+ GBENU_STATS_P8(rx_jabber_frames),
+ GBENU_STATS_P8(rx_undersized_frames),
+ GBENU_STATS_P8(rx_fragments),
+ GBENU_STATS_P8(ale_drop),
+ GBENU_STATS_P8(ale_overrun_drop),
+ GBENU_STATS_P8(rx_bytes),
+ GBENU_STATS_P8(tx_good_frames),
+ GBENU_STATS_P8(tx_broadcast_frames),
+ GBENU_STATS_P8(tx_multicast_frames),
+ GBENU_STATS_P8(tx_pause_frames),
+ GBENU_STATS_P8(tx_deferred_frames),
+ GBENU_STATS_P8(tx_collision_frames),
+ GBENU_STATS_P8(tx_single_coll_frames),
+ GBENU_STATS_P8(tx_mult_coll_frames),
+ GBENU_STATS_P8(tx_excessive_collisions),
+ GBENU_STATS_P8(tx_late_collisions),
+ GBENU_STATS_P8(rx_ipg_error),
+ GBENU_STATS_P8(tx_carrier_sense_errors),
+ GBENU_STATS_P8(tx_bytes),
+ GBENU_STATS_P8(tx_64B_frames),
+ GBENU_STATS_P8(tx_65_to_127B_frames),
+ GBENU_STATS_P8(tx_128_to_255B_frames),
+ GBENU_STATS_P8(tx_256_to_511B_frames),
+ GBENU_STATS_P8(tx_512_to_1023B_frames),
+ GBENU_STATS_P8(tx_1024B_frames),
+ GBENU_STATS_P8(net_bytes),
+ GBENU_STATS_P8(rx_bottom_fifo_drop),
+ GBENU_STATS_P8(rx_port_mask_drop),
+ GBENU_STATS_P8(rx_top_fifo_drop),
+ GBENU_STATS_P8(ale_rate_limit_drop),
+ GBENU_STATS_P8(ale_vid_ingress_drop),
+ GBENU_STATS_P8(ale_da_eq_sa_drop),
+ GBENU_STATS_P8(ale_unknown_ucast),
+ GBENU_STATS_P8(ale_unknown_ucast_bytes),
+ GBENU_STATS_P8(ale_unknown_mcast),
+ GBENU_STATS_P8(ale_unknown_mcast_bytes),
+ GBENU_STATS_P8(ale_unknown_bcast),
+ GBENU_STATS_P8(ale_unknown_bcast_bytes),
+ GBENU_STATS_P8(tx_mem_protect_err),
+};
+
+#define XGBE_STATS0_INFO(field) \
+{ \
+ "GBE_0:"#field, XGBE_STATS0_MODULE, \
+ FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+ offsetof(struct xgbe_hw_stats, field) \
+}
+
+#define XGBE_STATS1_INFO(field) \
+{ \
+ "GBE_1:"#field, XGBE_STATS1_MODULE, \
+ FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+ offsetof(struct xgbe_hw_stats, field) \
+}
+
+#define XGBE_STATS2_INFO(field) \
+{ \
+ "GBE_2:"#field, XGBE_STATS2_MODULE, \
+ FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+ offsetof(struct xgbe_hw_stats, field) \
+}
static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
/* GBE module 0 */
- {XGBE_STATS0_INFO(rx_good_frames)},
- {XGBE_STATS0_INFO(rx_broadcast_frames)},
- {XGBE_STATS0_INFO(rx_multicast_frames)},
- {XGBE_STATS0_INFO(rx_oversized_frames)},
- {XGBE_STATS0_INFO(rx_undersized_frames)},
- {XGBE_STATS0_INFO(overrun_type4)},
- {XGBE_STATS0_INFO(overrun_type5)},
- {XGBE_STATS0_INFO(rx_bytes)},
- {XGBE_STATS0_INFO(tx_good_frames)},
- {XGBE_STATS0_INFO(tx_broadcast_frames)},
- {XGBE_STATS0_INFO(tx_multicast_frames)},
- {XGBE_STATS0_INFO(tx_bytes)},
- {XGBE_STATS0_INFO(tx_64byte_frames)},
- {XGBE_STATS0_INFO(tx_65_to_127byte_frames)},
- {XGBE_STATS0_INFO(tx_128_to_255byte_frames)},
- {XGBE_STATS0_INFO(tx_256_to_511byte_frames)},
- {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)},
- {XGBE_STATS0_INFO(tx_1024byte_frames)},
- {XGBE_STATS0_INFO(net_bytes)},
- {XGBE_STATS0_INFO(rx_sof_overruns)},
- {XGBE_STATS0_INFO(rx_mof_overruns)},
- {XGBE_STATS0_INFO(rx_dma_overruns)},
+ XGBE_STATS0_INFO(rx_good_frames),
+ XGBE_STATS0_INFO(rx_broadcast_frames),
+ XGBE_STATS0_INFO(rx_multicast_frames),
+ XGBE_STATS0_INFO(rx_oversized_frames),
+ XGBE_STATS0_INFO(rx_undersized_frames),
+ XGBE_STATS0_INFO(overrun_type4),
+ XGBE_STATS0_INFO(overrun_type5),
+ XGBE_STATS0_INFO(rx_bytes),
+ XGBE_STATS0_INFO(tx_good_frames),
+ XGBE_STATS0_INFO(tx_broadcast_frames),
+ XGBE_STATS0_INFO(tx_multicast_frames),
+ XGBE_STATS0_INFO(tx_bytes),
+ XGBE_STATS0_INFO(tx_64byte_frames),
+ XGBE_STATS0_INFO(tx_65_to_127byte_frames),
+ XGBE_STATS0_INFO(tx_128_to_255byte_frames),
+ XGBE_STATS0_INFO(tx_256_to_511byte_frames),
+ XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
+ XGBE_STATS0_INFO(tx_1024byte_frames),
+ XGBE_STATS0_INFO(net_bytes),
+ XGBE_STATS0_INFO(rx_sof_overruns),
+ XGBE_STATS0_INFO(rx_mof_overruns),
+ XGBE_STATS0_INFO(rx_dma_overruns),
/* XGBE module 1 */
- {XGBE_STATS1_INFO(rx_good_frames)},
- {XGBE_STATS1_INFO(rx_broadcast_frames)},
- {XGBE_STATS1_INFO(rx_multicast_frames)},
- {XGBE_STATS1_INFO(rx_pause_frames)},
- {XGBE_STATS1_INFO(rx_crc_errors)},
- {XGBE_STATS1_INFO(rx_align_code_errors)},
- {XGBE_STATS1_INFO(rx_oversized_frames)},
- {XGBE_STATS1_INFO(rx_jabber_frames)},
- {XGBE_STATS1_INFO(rx_undersized_frames)},
- {XGBE_STATS1_INFO(rx_fragments)},
- {XGBE_STATS1_INFO(overrun_type4)},
- {XGBE_STATS1_INFO(overrun_type5)},
- {XGBE_STATS1_INFO(rx_bytes)},
- {XGBE_STATS1_INFO(tx_good_frames)},
- {XGBE_STATS1_INFO(tx_broadcast_frames)},
- {XGBE_STATS1_INFO(tx_multicast_frames)},
- {XGBE_STATS1_INFO(tx_pause_frames)},
- {XGBE_STATS1_INFO(tx_deferred_frames)},
- {XGBE_STATS1_INFO(tx_collision_frames)},
- {XGBE_STATS1_INFO(tx_single_coll_frames)},
- {XGBE_STATS1_INFO(tx_mult_coll_frames)},
- {XGBE_STATS1_INFO(tx_excessive_collisions)},
- {XGBE_STATS1_INFO(tx_late_collisions)},
- {XGBE_STATS1_INFO(tx_underrun)},
- {XGBE_STATS1_INFO(tx_carrier_sense_errors)},
- {XGBE_STATS1_INFO(tx_bytes)},
- {XGBE_STATS1_INFO(tx_64byte_frames)},
- {XGBE_STATS1_INFO(tx_65_to_127byte_frames)},
- {XGBE_STATS1_INFO(tx_128_to_255byte_frames)},
- {XGBE_STATS1_INFO(tx_256_to_511byte_frames)},
- {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)},
- {XGBE_STATS1_INFO(tx_1024byte_frames)},
- {XGBE_STATS1_INFO(net_bytes)},
- {XGBE_STATS1_INFO(rx_sof_overruns)},
- {XGBE_STATS1_INFO(rx_mof_overruns)},
- {XGBE_STATS1_INFO(rx_dma_overruns)},
+ XGBE_STATS1_INFO(rx_good_frames),
+ XGBE_STATS1_INFO(rx_broadcast_frames),
+ XGBE_STATS1_INFO(rx_multicast_frames),
+ XGBE_STATS1_INFO(rx_pause_frames),
+ XGBE_STATS1_INFO(rx_crc_errors),
+ XGBE_STATS1_INFO(rx_align_code_errors),
+ XGBE_STATS1_INFO(rx_oversized_frames),
+ XGBE_STATS1_INFO(rx_jabber_frames),
+ XGBE_STATS1_INFO(rx_undersized_frames),
+ XGBE_STATS1_INFO(rx_fragments),
+ XGBE_STATS1_INFO(overrun_type4),
+ XGBE_STATS1_INFO(overrun_type5),
+ XGBE_STATS1_INFO(rx_bytes),
+ XGBE_STATS1_INFO(tx_good_frames),
+ XGBE_STATS1_INFO(tx_broadcast_frames),
+ XGBE_STATS1_INFO(tx_multicast_frames),
+ XGBE_STATS1_INFO(tx_pause_frames),
+ XGBE_STATS1_INFO(tx_deferred_frames),
+ XGBE_STATS1_INFO(tx_collision_frames),
+ XGBE_STATS1_INFO(tx_single_coll_frames),
+ XGBE_STATS1_INFO(tx_mult_coll_frames),
+ XGBE_STATS1_INFO(tx_excessive_collisions),
+ XGBE_STATS1_INFO(tx_late_collisions),
+ XGBE_STATS1_INFO(tx_underrun),
+ XGBE_STATS1_INFO(tx_carrier_sense_errors),
+ XGBE_STATS1_INFO(tx_bytes),
+ XGBE_STATS1_INFO(tx_64byte_frames),
+ XGBE_STATS1_INFO(tx_65_to_127byte_frames),
+ XGBE_STATS1_INFO(tx_128_to_255byte_frames),
+ XGBE_STATS1_INFO(tx_256_to_511byte_frames),
+ XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
+ XGBE_STATS1_INFO(tx_1024byte_frames),
+ XGBE_STATS1_INFO(net_bytes),
+ XGBE_STATS1_INFO(rx_sof_overruns),
+ XGBE_STATS1_INFO(rx_mof_overruns),
+ XGBE_STATS1_INFO(rx_dma_overruns),
/* XGBE module 2 */
- {XGBE_STATS2_INFO(rx_good_frames)},
- {XGBE_STATS2_INFO(rx_broadcast_frames)},
- {XGBE_STATS2_INFO(rx_multicast_frames)},
- {XGBE_STATS2_INFO(rx_pause_frames)},
- {XGBE_STATS2_INFO(rx_crc_errors)},
- {XGBE_STATS2_INFO(rx_align_code_errors)},
- {XGBE_STATS2_INFO(rx_oversized_frames)},
- {XGBE_STATS2_INFO(rx_jabber_frames)},
- {XGBE_STATS2_INFO(rx_undersized_frames)},
- {XGBE_STATS2_INFO(rx_fragments)},
- {XGBE_STATS2_INFO(overrun_type4)},
- {XGBE_STATS2_INFO(overrun_type5)},
- {XGBE_STATS2_INFO(rx_bytes)},
- {XGBE_STATS2_INFO(tx_good_frames)},
- {XGBE_STATS2_INFO(tx_broadcast_frames)},
- {XGBE_STATS2_INFO(tx_multicast_frames)},
- {XGBE_STATS2_INFO(tx_pause_frames)},
- {XGBE_STATS2_INFO(tx_deferred_frames)},
- {XGBE_STATS2_INFO(tx_collision_frames)},
- {XGBE_STATS2_INFO(tx_single_coll_frames)},
- {XGBE_STATS2_INFO(tx_mult_coll_frames)},
- {XGBE_STATS2_INFO(tx_excessive_collisions)},
- {XGBE_STATS2_INFO(tx_late_collisions)},
- {XGBE_STATS2_INFO(tx_underrun)},
- {XGBE_STATS2_INFO(tx_carrier_sense_errors)},
- {XGBE_STATS2_INFO(tx_bytes)},
- {XGBE_STATS2_INFO(tx_64byte_frames)},
- {XGBE_STATS2_INFO(tx_65_to_127byte_frames)},
- {XGBE_STATS2_INFO(tx_128_to_255byte_frames)},
- {XGBE_STATS2_INFO(tx_256_to_511byte_frames)},
- {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)},
- {XGBE_STATS2_INFO(tx_1024byte_frames)},
- {XGBE_STATS2_INFO(net_bytes)},
- {XGBE_STATS2_INFO(rx_sof_overruns)},
- {XGBE_STATS2_INFO(rx_mof_overruns)},
- {XGBE_STATS2_INFO(rx_dma_overruns)},
+ XGBE_STATS2_INFO(rx_good_frames),
+ XGBE_STATS2_INFO(rx_broadcast_frames),
+ XGBE_STATS2_INFO(rx_multicast_frames),
+ XGBE_STATS2_INFO(rx_pause_frames),
+ XGBE_STATS2_INFO(rx_crc_errors),
+ XGBE_STATS2_INFO(rx_align_code_errors),
+ XGBE_STATS2_INFO(rx_oversized_frames),
+ XGBE_STATS2_INFO(rx_jabber_frames),
+ XGBE_STATS2_INFO(rx_undersized_frames),
+ XGBE_STATS2_INFO(rx_fragments),
+ XGBE_STATS2_INFO(overrun_type4),
+ XGBE_STATS2_INFO(overrun_type5),
+ XGBE_STATS2_INFO(rx_bytes),
+ XGBE_STATS2_INFO(tx_good_frames),
+ XGBE_STATS2_INFO(tx_broadcast_frames),
+ XGBE_STATS2_INFO(tx_multicast_frames),
+ XGBE_STATS2_INFO(tx_pause_frames),
+ XGBE_STATS2_INFO(tx_deferred_frames),
+ XGBE_STATS2_INFO(tx_collision_frames),
+ XGBE_STATS2_INFO(tx_single_coll_frames),
+ XGBE_STATS2_INFO(tx_mult_coll_frames),
+ XGBE_STATS2_INFO(tx_excessive_collisions),
+ XGBE_STATS2_INFO(tx_late_collisions),
+ XGBE_STATS2_INFO(tx_underrun),
+ XGBE_STATS2_INFO(tx_carrier_sense_errors),
+ XGBE_STATS2_INFO(tx_bytes),
+ XGBE_STATS2_INFO(tx_64byte_frames),
+ XGBE_STATS2_INFO(tx_65_to_127byte_frames),
+ XGBE_STATS2_INFO(tx_128_to_255byte_frames),
+ XGBE_STATS2_INFO(tx_256_to_511byte_frames),
+ XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
+ XGBE_STATS2_INFO(tx_1024byte_frames),
+ XGBE_STATS2_INFO(net_bytes),
+ XGBE_STATS2_INFO(rx_sof_overruns),
+ XGBE_STATS2_INFO(rx_mof_overruns),
+ XGBE_STATS2_INFO(rx_dma_overruns),
};
#define for_each_intf(i, priv) \
@@ -1035,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
ALE_PORT_STATE,
ALE_PORT_STATE_FORWARD);
- if (ndev && slave->open)
+ if (ndev && slave->open &&
+ slave->link_interface != SGMII_LINK_MAC_PHY &&
+ slave->link_interface != XGMII_LINK_MAC_PHY)
netif_carrier_on(ndev);
} else {
writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
@@ -1043,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
ALE_PORT_STATE,
ALE_PORT_STATE_DISABLE);
- if (ndev)
+ if (ndev &&
+ slave->link_interface != SGMII_LINK_MAC_PHY &&
+ slave->link_interface != XGMII_LINK_MAC_PHY)
netif_carrier_off(ndev);
}
@@ -1066,9 +1800,16 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
if (!slave->open)
return;
- if (!SLAVE_LINK_IS_XGMII(slave))
- sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp),
- sp);
+ if (!SLAVE_LINK_IS_XGMII(slave)) {
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ sgmii_link_state =
+ netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
+ else
+ sgmii_link_state =
+ netcp_sgmii_get_port_link(
+ gbe_dev->sgmii_port_regs, sp);
+ }
+
phy_link_state = gbe_phy_link_status(slave);
link_state = phy_link_state & sgmii_link_state;
@@ -1137,6 +1878,7 @@ static int gbe_port_reset(struct gbe_slave *slave)
static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
int max_rx_len)
{
+ void __iomem *rx_maxlen_reg;
u32 xgmii_mode;
if (max_rx_len > NETCP_MAX_FRAME_SIZE)
@@ -1150,7 +1892,12 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
}
- writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen));
+ if (IS_SS_ID_MU(gbe_dev))
+ rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
+ else
+ rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
+
+ writel(max_rx_len, rx_maxlen_reg);
writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
}
@@ -1242,6 +1989,12 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
static void gbe_init_host_port(struct gbe_priv *priv)
{
int bypass_en = 1;
+
+ /* Host Tx Pri */
+ if (IS_SS_ID_NU(priv))
+ writel(HOST_TX_PRI_MAP_DEFAULT,
+ GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
+
/* Max length register */
writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
rx_maxlen));
@@ -1472,15 +2225,21 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
GBE_RTL_VERSION(reg), GBE_IDENT(reg));
+ /* For 10G and on NetCP 1.5, use directed to port */
+ if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
+ gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
+
if (gbe_dev->enable_ale)
- gbe_intf->tx_pipe.dma_psflags = 0;
+ gbe_intf->tx_pipe.switch_to_port = 0;
else
- gbe_intf->tx_pipe.dma_psflags = port_num;
+ gbe_intf->tx_pipe.switch_to_port = port_num;
- dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n",
+ dev_dbg(gbe_dev->dev,
+ "opened TX channel %s: %p with to port %d, flags %d\n",
gbe_intf->tx_pipe.dma_chan_name,
gbe_intf->tx_pipe.dma_channel,
- gbe_intf->tx_pipe.dma_psflags);
+ gbe_intf->tx_pipe.switch_to_port,
+ gbe_intf->tx_pipe.flags);
gbe_slave_stop(gbe_intf);
@@ -1491,8 +2250,8 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
/* All statistics enabled and STAT AB visible by default */
- writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs,
- stat_port_en));
+ writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
+ stat_port_en));
ret = gbe_slave_open(gbe_intf);
if (ret)
@@ -1529,6 +2288,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
{
int port_reg_num;
u32 port_reg_ofs, emac_reg_ofs;
+ u32 port_reg_blk_sz, emac_reg_blk_sz;
if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
dev_err(gbe_dev->dev, "missing slave-port parameter\n");
@@ -1560,23 +2320,29 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
} else {
port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
}
+ emac_reg_ofs = GBE13_EMAC_OFFSET;
+ port_reg_blk_sz = 0x30;
+ emac_reg_blk_sz = 0x40;
+ } else if (IS_SS_ID_MU(gbe_dev)) {
+ port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
+ emac_reg_ofs = GBENU_EMAC_OFFSET;
+ port_reg_blk_sz = 0x1000;
+ emac_reg_blk_sz = 0x1000;
} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
+ emac_reg_ofs = XGBE10_EMAC_OFFSET;
+ port_reg_blk_sz = 0x30;
+ emac_reg_blk_sz = 0x40;
} else {
dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
gbe_dev->ss_version);
return -EINVAL;
}
- if (gbe_dev->ss_version == GBE_SS_VERSION_14)
- emac_reg_ofs = GBE13_EMAC_OFFSET;
- else if (gbe_dev->ss_version == XGBE_SS_VERSION_10)
- emac_reg_ofs = XGBE10_EMAC_OFFSET;
-
- slave->port_regs = gbe_dev->ss_regs + port_reg_ofs +
- (0x30 * port_reg_num);
- slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs +
- (0x40 * slave->slave_num);
+ slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
+ (port_reg_blk_sz * port_reg_num);
+ slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
+ (emac_reg_blk_sz * slave->slave_num);
if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
/* Initialize slave port register offsets */
@@ -1595,6 +2361,23 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
+ } else if (IS_SS_ID_MU(gbe_dev)) {
+ /* Initialize slave port register offsets */
+ GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
+ GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
+ GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
+ GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
+ GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
+
+ /* Initialize EMAC register offsets */
+ GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
+ GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
+
} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
/* Initialize slave port register offsets */
XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
@@ -1654,6 +2437,8 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
mac_phy_link = true;
slave->open = true;
+ if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+ break;
}
/* of_phy_connect() is needed only for MAC-PHY interface */
@@ -1724,24 +2509,41 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
void __iomem *regs;
int ret, i;
- ret = of_address_to_resource(node, 0, &res);
+ ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
if (ret) {
- dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n",
- node->name);
+ dev_err(gbe_dev->dev,
+ "Can't xlate xgbe of node(%s) ss address at %d\n",
+ node->name, XGBE_SS_REG_INDEX);
return ret;
}
regs = devm_ioremap_resource(gbe_dev->dev, &res);
if (IS_ERR(regs)) {
- dev_err(gbe_dev->dev, "Failed to map xgbe register base\n");
+ dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
return PTR_ERR(regs);
}
gbe_dev->ss_regs = regs;
+ ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't xlate xgbe of node(%s) sm address at %d\n",
+ node->name, XGBE_SM_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->switch_regs = regs;
+
ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
if (ret) {
- dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n",
- node->name);
+ dev_err(gbe_dev->dev,
+ "Can't xlate xgbe serdes of node(%s) address at %d\n",
+ node->name, XGBE_SERDES_REG_INDEX);
return ret;
}
@@ -1753,9 +2555,9 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
gbe_dev->xgbe_serdes_regs = regs;
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- XGBE10_NUM_STAT_ENTRIES *
- (XGBE10_NUM_SLAVES + 1) * sizeof(u64),
- GFP_KERNEL);
+ XGBE10_NUM_STAT_ENTRIES *
+ (gbe_dev->max_num_ports) * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
@@ -1764,19 +2566,19 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
gbe_dev->ss_version = XGBE_SS_VERSION_10;
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
XGBE10_SGMII_MODULE_OFFSET;
- gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET;
gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
- for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++)
- gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs +
+ for (i = 0; i < gbe_dev->max_num_ports; i++)
+ gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
- gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET;
- gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS;
+ gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
+ gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
gbe_dev->et_stats = xgbe10_et_stats;
gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+ gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
/* Subsystem registers */
XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
@@ -1803,10 +2605,11 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
void __iomem *regs;
int ret;
- ret = of_address_to_resource(node, 0, &res);
+ ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
if (ret) {
- dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n",
- node->name);
+ dev_err(gbe_dev->dev,
+ "Can't translate of node(%s) of gbe ss address at %d\n",
+ node->name, GBE_SS_REG_INDEX);
return ret;
}
@@ -1823,34 +2626,67 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
struct device_node *node)
{
+ struct resource res;
void __iomem *regs;
- int i;
+ int i, ret;
+
+ ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't translate of gbe node(%s) address at index %d\n",
+ node->name, GBE_SGMII34_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev,
+ "Failed to map gbe sgmii port34 register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->sgmii_port34_regs = regs;
+
+ ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't translate of gbe node(%s) address at index %d\n",
+ node->name, GBE_SM_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev,
+ "Failed to map gbe switch module register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->switch_regs = regs;
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
GBE13_NUM_HW_STAT_ENTRIES *
- GBE13_NUM_SLAVES * sizeof(u64),
+ gbe_dev->max_num_slaves * sizeof(u64),
GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
- regs = gbe_dev->ss_regs;
- gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET;
- gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET;
- gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET;
- gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET;
+ gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
+ gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
- for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++)
- gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET +
- (GBE_HW_STATS_REG_MAP_SZ * i);
+ for (i = 0; i < gbe_dev->max_num_slaves; i++) {
+ gbe_dev->hw_stats_regs[i] =
+ gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
+ (GBE_HW_STATS_REG_MAP_SZ * i);
+ }
- gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET;
- gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS;
+ gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
+ gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBE13_HOST_PORT_NUM;
gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
gbe_dev->et_stats = gbe13_et_stats;
gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+ gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
/* Subsystem registers */
GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
@@ -1869,6 +2705,80 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
return 0;
}
+static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ struct resource res;
+ void __iomem *regs;
+ int i, ret;
+
+ gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
+ GBENU_NUM_HW_STAT_ENTRIES *
+ (gbe_dev->max_num_ports) * sizeof(u64),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats) {
+ dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't translate of gbenu node(%s) addr at index %d\n",
+ node->name, GBENU_SM_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev,
+ "Failed to map gbenu switch module register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->switch_regs = regs;
+
+ gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
+ gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
+
+ for (i = 0; i < (gbe_dev->max_num_ports); i++)
+ gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
+ GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
+
+ gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
+ gbe_dev->ale_ports = gbe_dev->max_num_ports;
+ gbe_dev->host_port = GBENU_HOST_PORT_NUM;
+ gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
+ gbe_dev->et_stats = gbenu_et_stats;
+ gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
+
+ if (IS_SS_ID_NU(gbe_dev))
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+ else
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ GBENU_ET_STATS_PORT_SIZE;
+
+ /* Subsystem registers */
+ GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+
+ /* Switch module registers */
+ GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+ GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
+ GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+ GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+
+ /* Host port registers */
+ GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+ GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+
+ /* For NU only. 2U does not need tx_pri_map.
+ * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
+ * while 2U has only 1 such thread
+ */
+ GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
+ return 0;
+}
+
static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
struct device_node *node, void **inst_priv)
{
@@ -1888,6 +2798,21 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (!gbe_dev)
return -ENOMEM;
+ if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
+ of_device_is_compatible(node, "ti,netcp-gbe")) {
+ gbe_dev->max_num_slaves = 4;
+ } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
+ gbe_dev->max_num_slaves = 8;
+ } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
+ gbe_dev->max_num_slaves = 1;
+ } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
+ gbe_dev->max_num_slaves = 2;
+ } else {
+ dev_err(dev, "device tree node for unknown device\n");
+ return -EINVAL;
+ }
+ gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
+
gbe_dev->dev = dev;
gbe_dev->netcp_device = netcp_device;
gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
@@ -1923,7 +2848,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (ret)
goto quit;
- ret = set_gbe_ethss14_priv(gbe_dev, node);
+ dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
+
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ ret = set_gbe_ethss14_priv(gbe_dev, node);
+ else if (IS_SS_ID_MU(gbe_dev))
+ ret = set_gbenu_ethss_priv(gbe_dev, node);
+ else
+ ret = -ENODEV;
+
if (ret)
goto quit;
} else if (!strcmp(node->name, "xgbe")) {
@@ -1963,6 +2896,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
continue;
}
gbe_dev->num_slaves++;
+ if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+ break;
}
if (!gbe_dev->num_slaves)
@@ -1971,7 +2906,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
/* Initialize Secondary slave ports */
secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
- if (secondary_ports)
+ if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves))
init_secondary_ports(gbe_dev, secondary_ports);
of_node_put(secondary_ports);
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index bea8cd2bb56c..a3f7610002aa 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -838,7 +838,8 @@ static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
return ret;
}
-static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_mpipe_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
{
int ret = 0;
struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
@@ -850,7 +851,7 @@ static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
}
static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
int ret = 0;
struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
@@ -876,8 +877,8 @@ static struct ptp_clock_info ptp_mpipe_caps = {
.pps = 0,
.adjfreq = ptp_mpipe_adjfreq,
.adjtime = ptp_mpipe_adjtime,
- .gettime = ptp_mpipe_gettime,
- .settime = ptp_mpipe_settime,
+ .gettime64 = ptp_mpipe_gettime,
+ .settime64 = ptp_mpipe_settime,
.enable = ptp_mpipe_enable,
};
@@ -1122,7 +1123,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
addr + i * sizeof(struct tile_net_comps);
/* If this is a network cpu, create an iqueue. */
- if (cpu_isset(cpu, network_cpus_map)) {
+ if (cpumask_test_cpu(cpu, &network_cpus_map)) {
order = get_order(NOTIF_RING_SIZE);
page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
if (page == NULL) {
@@ -1298,7 +1299,7 @@ static int tile_net_init_mpipe(struct net_device *dev)
int first_ring, ring;
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
- int network_cpus_count = cpus_weight(network_cpus_map);
+ int network_cpus_count = cpumask_weight(&network_cpus_map);
if (!hash_default) {
netdev_err(dev, "Networking requires hash_default!\n");
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 74acb5cf6099..5d244b6b5e3a 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB || MIPS) || PPC_PS3
+ depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -42,7 +42,7 @@ config GELIC_WIRELESS
config SPIDER_NET
tristate "Spider Gigabit Ethernet driver"
- depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
+ depends on PCI && PPC_IBM_CELL_BLADE
select FW_LOADER
select SUNGEM_PHY
---help---
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index bb7992804664..ac62a5e248b0 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1065,7 +1065,7 @@ refill:
/*
* this call can fail, but for now, just leave this
- * decriptor without skb
+ * descriptor without skb
*/
gelic_descr_prepare_rx(card, descr);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 0a7f2e77557f..13214a6492ac 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1167,7 +1167,7 @@ static int gelic_wl_set_ap(struct net_device *netdev,
} else {
pr_debug("%s: clear bssid\n", __func__);
clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
- memset(wl->bssid, 0, ETH_ALEN);
+ eth_zero_addr(wl->bssid);
}
spin_unlock_irqrestore(&wl->lock, irqflag);
pr_debug("%s: ->\n", __func__);
@@ -1189,7 +1189,7 @@ static int gelic_wl_get_ap(struct net_device *netdev,
memcpy(data->ap_addr.sa_data, wl->active_bssid,
ETH_ALEN);
} else
- memset(data->ap_addr.sa_data, 0, ETH_ALEN);
+ eth_zero_addr(data->ap_addr.sa_data);
spin_unlock_irqrestore(&wl->lock, irqflag);
mutex_unlock(&wl->assoc_stat_lock);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 17e276651601..de2850497c09 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -70,12 +70,14 @@ static const int multicast_filter_limit = 32;
/* Operational parameters that are set at compile time. */
/* Keep the ring sizes a power of two for compile efficiency.
- The compiler will convert <unsigned>'%'<2^N> into a bit mask.
- Making the Tx ring too large decreases the effectiveness of channel
- bonding and packet priority.
- There are no ill effects from too-large receive rings. */
-#define TX_RING_SIZE 16
-#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+ * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ * Making the Tx ring too large decreases the effectiveness of channel
+ * bonding and packet priority.
+ * With BQL support, we can increase TX ring safely.
+ * There are no ill effects from too-large receive rings.
+ */
+#define TX_RING_SIZE 64
+#define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */
#define RX_RING_SIZE 64
/* Operational parameters that usually are not changed. */
@@ -286,7 +288,7 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
* The .data field is currently only used to store quirks
*/
static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
-static struct of_device_id rhine_of_tbl[] = {
+static const struct of_device_id rhine_of_tbl[] = {
{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
{ } /* terminate list */
};
@@ -1295,6 +1297,7 @@ static void alloc_tbufs(struct net_device* dev)
}
rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
+ netdev_reset_queue(dev);
}
static void free_tbufs(struct net_device* dev)
@@ -1795,6 +1798,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
else
rp->tx_ring[entry].tx_status = 0;
+ netdev_sent_queue(dev, skb->len);
/* lock eth irq */
wmb();
rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
@@ -1863,6 +1867,8 @@ static void rhine_tx(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct sk_buff *skb;
/* find and cleanup dirty tx descriptors */
while (rp->dirty_tx != rp->cur_tx) {
@@ -1871,6 +1877,7 @@ static void rhine_tx(struct net_device *dev)
entry, txstatus);
if (txstatus & DescOwn)
break;
+ skb = rp->tx_skbuff[entry];
if (txstatus & 0x8000) {
netif_dbg(rp, tx_done, dev,
"Transmit error, Tx status %08x\n", txstatus);
@@ -1899,7 +1906,7 @@ static void rhine_tx(struct net_device *dev)
(txstatus >> 3) & 0xF, txstatus & 0xF);
u64_stats_update_begin(&rp->tx_stats.syncp);
- rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
+ rp->tx_stats.bytes += skb->len;
rp->tx_stats.packets++;
u64_stats_update_end(&rp->tx_stats.syncp);
}
@@ -1907,13 +1914,17 @@ static void rhine_tx(struct net_device *dev)
if (rp->tx_skbuff_dma[entry]) {
dma_unmap_single(hwdev,
rp->tx_skbuff_dma[entry],
- rp->tx_skbuff[entry]->len,
+ skb->len,
DMA_TO_DEVICE);
}
- dev_consume_skb_any(rp->tx_skbuff[entry]);
+ bytes_compl += skb->len;
+ pkts_compl++;
+ dev_consume_skb_any(skb);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index c20206f83cc1..ae68afd50a15 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -392,7 +392,7 @@ MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
* Describe the OF device identifiers that we support in this
* device driver. Used for devicetree nodes.
*/
-static struct of_device_id velocity_of_ids[] = {
+static const struct of_device_id velocity_of_ids[] = {
{ .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
{ /* Sentinel */ },
};
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 0e0fbb5842b3..8b282d0b169c 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -56,7 +56,7 @@ MODULE_LICENSE("GPL");
#define W5100_S0_REGS 0x0400
#define W5100_S0_MR 0x0400 /* S0 Mode Register */
-#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscous) */
+#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */
#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */
#define W5100_S0_CR 0x0401 /* S0 Command Register */
#define S0_CR_OPEN 0x01 /* OPEN command */
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 4b310002258d..8da7b930ff59 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -63,7 +63,7 @@ MODULE_LICENSE("GPL");
#define IDR_W5300 0x5300 /* =0x5300 for WIZnet W5300 */
#define W5300_S0_MR 0x0200 /* S0 Mode Register */
#define S0_MR_CLOSED 0x0000 /* Close mode */
-#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscous) */
+#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscuous) */
#define S0_MR_MACRAW_MF 0x0044 /* MAC RAW mode (filtered) */
#define W5300_S0_CR 0x0202 /* S0 Command Register */
#define S0_CR_OPEN 0x0001 /* OPEN command */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index dbcbf0c5bcfa..af2694dc6f90 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -707,8 +707,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= STS_CTRL_APP0_SOP;
cur_p->len = skb_headlen(skb);
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
- DMA_TO_DEVICE);
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
cur_p->app4 = (unsigned long)skb;
for (ii = 0; ii < num_frag; ii++) {
@@ -1157,7 +1157,7 @@ static int temac_of_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id temac_of_match[] = {
+static const struct of_device_id temac_of_match[] = {
{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
{ .compatible = "xlnx,xps-ll-temac-2.00.a", },
{ .compatible = "xlnx,xps-ll-temac-2.02.a", },
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index a6d2860b712c..28b7e7d9c272 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -48,7 +48,7 @@
#define AXIENET_REGS_N 32
/* Match table for of_platform binding */
-static struct of_device_id axienet_of_match[] = {
+static const struct of_device_id axienet_of_match[] = {
{ .compatible = "xlnx,axi-ethernet-1.00.a", },
{ .compatible = "xlnx,axi-ethernet-1.01.a", },
{ .compatible = "xlnx,axi-ethernet-2.01.a", },
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 9d4ce388510a..6008eee01a33 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1062,7 +1062,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
} else {
dev_warn(&ofdev->dev, "Parameter %s not found,"
"defaulting to false\n", s);
- return 0;
+ return false;
}
}
@@ -1231,7 +1231,7 @@ static struct net_device_ops xemaclite_netdev_ops = {
};
/* Match table for OF platform binding */
-static struct of_device_id xemaclite_of_match[] = {
+static const struct of_device_id xemaclite_of_match[] = {
{ .compatible = "xlnx,opb-ethernetlite-1.01.a", },
{ .compatible = "xlnx,opb-ethernetlite-1.01.b", },
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 9e16a2819d48..5138407941cf 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -954,7 +954,7 @@ static void eth_set_mcast_list(struct net_device *dev)
return;
}
- memset(diffs, 0, ETH_ALEN);
+ eth_zero_addr(diffs);
addr = NULL;
netdev_for_each_mc_addr(ha, dev) {
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index daca0dee88f3..7c4a4151ef0f 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -247,6 +247,9 @@ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sixpack *sp = netdev_priv(dev);
+ if (skb->protocol == htons(ETH_P_IP))
+ return ax25_ip_xmit(skb);
+
spin_lock_bh(&sp->lock);
/* We were not busy, so we are now... :-) */
netif_stop_queue(dev);
@@ -284,18 +287,6 @@ static int sp_close(struct net_device *dev)
return 0;
}
-/* Return the frame type ID */
-static int sp_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
-{
-#ifdef CONFIG_INET
- if (type != ETH_P_AX25)
- return ax25_hard_header(skb, dev, type, daddr, saddr, len);
-#endif
- return 0;
-}
-
static int sp_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr_ax25 *sa = addr;
@@ -309,20 +300,6 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
-static int sp_rebuild_header(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
- return ax25_rebuild_header(skb);
-#else
- return 0;
-#endif
-}
-
-static const struct header_ops sp_header_ops = {
- .create = sp_header,
- .rebuild = sp_rebuild_header,
-};
-
static const struct net_device_ops sp_netdev_ops = {
.ndo_open = sp_open_dev,
.ndo_stop = sp_close,
@@ -337,7 +314,7 @@ static void sp_setup(struct net_device *dev)
dev->destructor = free_netdev;
dev->mtu = SIXP_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
- dev->header_ops = &sp_header_ops;
+ dev->header_ops = &ax25_header_ops;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a98c153f371e..83c7cce0d172 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -772,6 +772,9 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
+ if (skb->protocol == htons(ETH_P_IP))
+ return ax25_ip_xmit(skb);
+
if (skb->data[0] != 0) {
do_kiss_params(bc, skb->data, skb->len);
dev_kfree_skb(skb);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index c2894e43840e..63ff08a26da8 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -251,6 +251,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *orig_dev;
int size;
+ if (skb->protocol == htons(ETH_P_IP))
+ return ax25_ip_xmit(skb);
+
/*
* Just to be *really* sure not to send anything if the interface
* is down, the ethernet device may have gone.
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 0fad408f24aa..c3d377770616 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -920,6 +920,9 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
int i;
+ if (skb->protocol == htons(ETH_P_IP))
+ return ax25_ip_xmit(skb);
+
/* Temporarily stop the scheduler feeding us packets */
netif_stop_queue(dev);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index c67a27245072..49fe59b180a8 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -404,6 +404,9 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb,
{
struct hdlcdrv_state *sm = netdev_priv(dev);
+ if (skb->protocol == htons(ETH_P_IP))
+ return ax25_ip_xmit(skb);
+
if (skb->data[0] != 0) {
do_kiss_params(sm, skb->data, skb->len);
dev_kfree_skb(skb);
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index f990bb1c3e02..2ffbf13471d0 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -529,6 +529,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mkiss *ax = netdev_priv(dev);
+ if (skb->protocol == htons(ETH_P_IP))
+ return ax25_ip_xmit(skb);
+
if (!netif_running(dev)) {
printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
return NETDEV_TX_BUSY;
@@ -554,11 +557,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* We were not busy, so we are now... :-) */
- if (skb != NULL) {
- netif_stop_queue(dev);
- ax_encaps(dev, skb->data, skb->len);
- kfree_skb(skb);
- }
+ netif_stop_queue(dev);
+ ax_encaps(dev, skb->data, skb->len);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -573,32 +574,6 @@ static int ax_open_dev(struct net_device *dev)
return 0;
}
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
-
-/* Return the frame type ID */
-static int ax_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
-{
-#ifdef CONFIG_INET
- if (type != ETH_P_AX25)
- return ax25_hard_header(skb, dev, type, daddr, saddr, len);
-#endif
- return 0;
-}
-
-
-static int ax_rebuild_header(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
- return ax25_rebuild_header(skb);
-#else
- return 0;
-#endif
-}
-
-#endif /* CONFIG_{AX25,AX25_MODULE} */
-
/* Open the low-level part of the AX25 channel. Easy! */
static int ax_open(struct net_device *dev)
{
@@ -662,11 +637,6 @@ static int ax_close(struct net_device *dev)
return 0;
}
-static const struct header_ops ax_header_ops = {
- .create = ax_header,
- .rebuild = ax_rebuild_header,
-};
-
static const struct net_device_ops ax_netdev_ops = {
.ndo_open = ax_open_dev,
.ndo_stop = ax_close,
@@ -682,7 +652,7 @@ static void ax_setup(struct net_device *dev)
dev->addr_len = 0;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
- dev->header_ops = &ax_header_ops;
+ dev->header_ops = &ax25_header_ops;
dev->netdev_ops = &ax_netdev_ops;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 57be9e0e98a6..ce88df33fe17 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1639,6 +1639,9 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
char kisscmd;
+ if (skb->protocol == htons(ETH_P_IP))
+ return ax25_ip_xmit(skb);
+
if (skb->len > scc->stat.bufsize || skb->len < 2) {
scc->dev_stat.tx_dropped++; /* bogus frame */
dev_kfree_skb(skb);
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 717433cfb81d..1a4729c36aa4 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -597,6 +597,9 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb,
{
struct yam_port *yp = netdev_priv(dev);
+ if (skb->protocol == htons(ETH_P_IP))
+ return ax25_ip_xmit(skb);
+
skb_queue_tail(&yp->send_queue, skb);
dev->trans_start = jiffies;
return NETDEV_TX_OK;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 384ca4f4de4a..41071d32bc8e 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -129,8 +129,10 @@ struct hv_netvsc_packet {
/* Bookkeeping stuff */
u32 status;
- struct hv_device *device;
bool is_data_pkt;
+ bool xmit_more; /* from skb */
+ bool cp_partial; /* partial copy into send buffer */
+
u16 vlan_tci;
u16 q_idx;
@@ -145,11 +147,14 @@ struct hv_netvsc_packet {
/* This points to the memory after page_buf */
struct rndis_message *rndis_msg;
+ u32 rmsg_size; /* RNDIS header and PPI size */
+ u32 rmsg_pgcnt; /* page count of RNDIS header and PPI */
+
u32 total_data_buflen;
/* Points to the send/receive buffer where the ethernet frame is */
void *data;
u32 page_buf_cnt;
- struct hv_page_buffer page_buf[0];
+ struct hv_page_buffer *page_buf;
};
struct netvsc_device_info {
@@ -187,6 +192,7 @@ int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet);
void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct rndis_message *resp);
+void netvsc_xmit_completion(void *context);
int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet,
struct ndis_tcp_ip_checksum_info *csum_info);
@@ -596,7 +602,25 @@ struct nvsp_message {
#define VRSS_SEND_TAB_SIZE 16
-/* Per netvsc channel-specific */
+#define RNDIS_MAX_PKT_DEFAULT 8
+#define RNDIS_PKT_ALIGN_DEFAULT 8
+
+struct multi_send_data {
+ spinlock_t lock; /* protect struct multi_send_data */
+ struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
+ u32 count; /* counter of batched packets */
+};
+
+/* The context of the netvsc device */
+struct net_device_context {
+ /* point back to our device context */
+ struct hv_device *device_ctx;
+ struct delayed_work dwork;
+ struct work_struct work;
+ u32 msg_enable; /* debug level */
+};
+
+/* Per netvsc device */
struct netvsc_device {
struct hv_device *dev;
@@ -634,6 +658,7 @@ struct netvsc_device {
struct vmbus_channel *chn_table[NR_CPUS];
u32 send_table[VRSS_SEND_TAB_SIZE];
+ u32 max_chn;
u32 num_chn;
atomic_t queue_sends[NR_CPUS];
@@ -646,6 +671,13 @@ struct netvsc_device {
unsigned char *cb_buffer;
/* The sub channel callback buffer */
unsigned char *sub_cb_buf;
+
+ struct multi_send_data msd[NR_CPUS];
+ u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
+ u32 pkt_align; /* alignment bytes, e.g. 8 */
+
+ /* The net device context */
+ struct net_device_context *nd_ctx;
};
/* NdisInitialize message */
@@ -943,6 +975,10 @@ struct ndis_tcp_lso_info {
#define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
sizeof(u32))
+/* Total size of all PPI data */
+#define NDIS_ALL_PPI_SIZE (NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE + \
+ NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE)
+
/* Format of Information buffer passed in a SetRequest for the OID */
/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
struct rndis_config_parameter_info {
@@ -1155,6 +1191,8 @@ struct rndis_message {
#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container))
+#define RNDIS_AND_PPI_SIZE (sizeof(struct rndis_message) + NDIS_ALL_PPI_SIZE)
+
#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 208eb05446ba..ea091bc5ff09 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -37,6 +37,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
{
struct netvsc_device *net_device;
struct net_device *ndev = hv_get_drvdata(device);
+ int i;
net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
if (!net_device)
@@ -53,6 +54,11 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
net_device->destroy = false;
net_device->dev = device;
net_device->ndev = ndev;
+ net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
+ net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
+
+ for (i = 0; i < num_online_cpus(); i++)
+ spin_lock_init(&net_device->msd[i].lock);
hv_set_drvdata(device, net_device);
return net_device;
@@ -687,14 +693,28 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
unsigned int section_index,
+ u32 pend_size,
struct hv_netvsc_packet *packet)
{
char *start = net_device->send_buf;
- char *dest = (start + (section_index * net_device->send_section_size));
+ char *dest = start + (section_index * net_device->send_section_size)
+ + pend_size;
int i;
u32 msg_size = 0;
+ u32 padding = 0;
+ u32 remain = packet->total_data_buflen % net_device->pkt_align;
+ u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+
+ /* Add padding */
+ if (packet->is_data_pkt && packet->xmit_more && remain &&
+ !packet->cp_partial) {
+ padding = net_device->pkt_align - remain;
+ packet->rndis_msg->msg_len += padding;
+ packet->total_data_buflen += padding;
+ }
- for (i = 0; i < packet->page_buf_cnt; i++) {
+ for (i = 0; i < page_count; i++) {
char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
u32 offset = packet->page_buf[i].offset;
u32 len = packet->page_buf[i].len;
@@ -703,79 +723,64 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
msg_size += len;
dest += len;
}
+
+ if (padding) {
+ memset(dest, 0, padding);
+ msg_size += padding;
+ }
+
return msg_size;
}
-int netvsc_send(struct hv_device *device,
- struct hv_netvsc_packet *packet)
+static inline int netvsc_send_pkt(
+ struct hv_netvsc_packet *packet,
+ struct netvsc_device *net_device)
{
- struct netvsc_device *net_device;
- int ret = 0;
- struct nvsp_message sendMessage;
- struct net_device *ndev;
- struct vmbus_channel *out_channel = NULL;
- u64 req_id;
- unsigned int section_index = NETVSC_INVALID_INDEX;
- u32 msg_size = 0;
- struct sk_buff *skb = NULL;
+ struct nvsp_message nvmsg;
+ struct vmbus_channel *out_channel = packet->channel;
u16 q_idx = packet->q_idx;
+ struct net_device *ndev = net_device->ndev;
+ u64 req_id;
+ int ret;
+ struct hv_page_buffer *pgbuf;
-
- net_device = get_outbound_net_device(device);
- if (!net_device)
- return -ENODEV;
- ndev = net_device->ndev;
-
- sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
+ nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (packet->is_data_pkt) {
/* 0 is RMC_DATA; */
- sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
+ nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
} else {
/* 1 is RMC_CONTROL; */
- sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
- }
-
- /* Attempt to send via sendbuf */
- if (packet->total_data_buflen < net_device->send_section_size) {
- section_index = netvsc_get_next_send_section(net_device);
- if (section_index != NETVSC_INVALID_INDEX) {
- msg_size = netvsc_copy_to_send_buf(net_device,
- section_index,
- packet);
- skb = (struct sk_buff *)
- (unsigned long)packet->send_completion_tid;
- packet->page_buf_cnt = 0;
- }
+ nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
}
- packet->send_buf_index = section_index;
-
- sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
- section_index;
- sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
+ nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
+ packet->send_buf_index;
+ if (packet->send_buf_index == NETVSC_INVALID_INDEX)
+ nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
+ else
+ nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
+ packet->total_data_buflen;
if (packet->send_completion)
req_id = (ulong)packet;
else
req_id = 0;
- out_channel = net_device->chn_table[packet->q_idx];
- if (out_channel == NULL)
- out_channel = device->channel;
- packet->channel = out_channel;
-
if (out_channel->rescind)
return -ENODEV;
if (packet->page_buf_cnt) {
+ pgbuf = packet->cp_partial ? packet->page_buf +
+ packet->rmsg_pgcnt : packet->page_buf;
ret = vmbus_sendpacket_pagebuffer(out_channel,
- packet->page_buf,
+ pgbuf,
packet->page_buf_cnt,
- &sendMessage,
+ &nvmsg,
sizeof(struct nvsp_message),
req_id);
} else {
- ret = vmbus_sendpacket(out_channel, &sendMessage,
+ ret = vmbus_sendpacket(
+ out_channel, &nvmsg,
sizeof(struct nvsp_message),
req_id,
VM_PKT_DATA_INBAND,
@@ -809,13 +814,118 @@ int netvsc_send(struct hv_device *device,
packet, ret);
}
- if (ret != 0) {
- if (section_index != NETVSC_INVALID_INDEX)
- netvsc_free_send_slot(net_device, section_index);
- } else if (skb) {
- dev_kfree_skb_any(skb);
+ return ret;
+}
+
+int netvsc_send(struct hv_device *device,
+ struct hv_netvsc_packet *packet)
+{
+ struct netvsc_device *net_device;
+ int ret = 0, m_ret = 0;
+ struct vmbus_channel *out_channel;
+ u16 q_idx = packet->q_idx;
+ u32 pktlen = packet->total_data_buflen, msd_len = 0;
+ unsigned int section_index = NETVSC_INVALID_INDEX;
+ unsigned long flag;
+ struct multi_send_data *msdp;
+ struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
+ bool try_batch;
+
+ net_device = get_outbound_net_device(device);
+ if (!net_device)
+ return -ENODEV;
+
+ out_channel = net_device->chn_table[q_idx];
+ if (!out_channel) {
+ out_channel = device->channel;
+ q_idx = 0;
+ packet->q_idx = 0;
+ }
+ packet->channel = out_channel;
+ packet->send_buf_index = NETVSC_INVALID_INDEX;
+ packet->cp_partial = false;
+
+ msdp = &net_device->msd[q_idx];
+
+ /* batch packets in send buffer if possible */
+ spin_lock_irqsave(&msdp->lock, flag);
+ if (msdp->pkt)
+ msd_len = msdp->pkt->total_data_buflen;
+
+ try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count <
+ net_device->max_pkt;
+
+ if (try_batch && msd_len + pktlen + net_device->pkt_align <
+ net_device->send_section_size) {
+ section_index = msdp->pkt->send_buf_index;
+
+ } else if (try_batch && msd_len + packet->rmsg_size <
+ net_device->send_section_size) {
+ section_index = msdp->pkt->send_buf_index;
+ packet->cp_partial = true;
+
+ } else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
+ net_device->send_section_size) {
+ section_index = netvsc_get_next_send_section(net_device);
+ if (section_index != NETVSC_INVALID_INDEX) {
+ msd_send = msdp->pkt;
+ msdp->pkt = NULL;
+ msdp->count = 0;
+ msd_len = 0;
+ }
}
+ if (section_index != NETVSC_INVALID_INDEX) {
+ netvsc_copy_to_send_buf(net_device,
+ section_index, msd_len,
+ packet);
+
+ packet->send_buf_index = section_index;
+
+ if (packet->cp_partial) {
+ packet->page_buf_cnt -= packet->rmsg_pgcnt;
+ packet->total_data_buflen = msd_len + packet->rmsg_size;
+ } else {
+ packet->page_buf_cnt = 0;
+ packet->total_data_buflen += msd_len;
+ }
+
+ if (msdp->pkt)
+ netvsc_xmit_completion(msdp->pkt);
+
+ if (packet->xmit_more && !packet->cp_partial) {
+ msdp->pkt = packet;
+ msdp->count++;
+ } else {
+ cur_send = packet;
+ msdp->pkt = NULL;
+ msdp->count = 0;
+ }
+ } else {
+ msd_send = msdp->pkt;
+ msdp->pkt = NULL;
+ msdp->count = 0;
+ cur_send = packet;
+ }
+
+ spin_unlock_irqrestore(&msdp->lock, flag);
+
+ if (msd_send) {
+ m_ret = netvsc_send_pkt(msd_send, net_device);
+
+ if (m_ret != 0) {
+ netvsc_free_send_slot(net_device,
+ msd_send->send_buf_index);
+ netvsc_xmit_completion(msd_send);
+ }
+ }
+
+ if (cur_send)
+ ret = netvsc_send_pkt(cur_send, net_device);
+
+ if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
+ netvsc_free_send_slot(net_device, section_index);
+
return ret;
}
@@ -911,7 +1021,6 @@ static void netvsc_receive(struct netvsc_device *net_device,
}
count = vmxferpage_packet->range_cnt;
- netvsc_packet->device = device;
netvsc_packet->channel = channel;
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
@@ -1078,6 +1187,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
*/
ndev = net_device->ndev;
+ /* Add netvsc_device context to netvsc_device */
+ net_device->nd_ctx = netdev_priv(ndev);
+
/* Initialize the NetVSC channel extension */
init_completion(&net_device->channel_init_wait);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 15d82eda0baf..5993c7e2d723 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -40,18 +40,21 @@
#include "hyperv_net.h"
-struct net_device_context {
- /* point back to our device context */
- struct hv_device *device_ctx;
- struct delayed_work dwork;
- struct work_struct work;
-};
#define RING_SIZE_MIN 64
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR;
+
+static int debug = -1;
+module_param(debug, int, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
static void do_set_multicast(struct work_struct *w)
{
struct net_device_context *ndevctx =
@@ -229,16 +232,13 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
return q_idx;
}
-static void netvsc_xmit_completion(void *context)
+void netvsc_xmit_completion(void *context)
{
struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
struct sk_buff *skb = (struct sk_buff *)
(unsigned long)packet->send_completion_tid;
- u32 index = packet->send_buf_index;
- kfree(packet);
-
- if (skb && (index == NETVSC_INVALID_INDEX))
+ if (skb)
dev_kfree_skb_any(skb);
}
@@ -277,15 +277,16 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
}
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
- struct hv_page_buffer *pb)
+ struct hv_netvsc_packet *packet)
{
+ struct hv_page_buffer *pb = packet->page_buf;
u32 slots_used = 0;
char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
int i;
/* The packet is laid out thus:
- * 1. hdr
+ * 1. hdr: RNDIS header and PPI
* 2. skb linear data
* 3. skb fragment data
*/
@@ -294,6 +295,9 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
offset_in_page(hdr),
len, &pb[slots_used]);
+ packet->rmsg_size = len;
+ packet->rmsg_pgcnt = slots_used;
+
slots_used += fill_pg_buf(virt_to_page(data),
offset_in_page(data),
skb_headlen(skb), &pb[slots_used]);
@@ -370,50 +374,65 @@ not_ip:
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_netvsc_packet *packet;
+ struct hv_netvsc_packet *packet = NULL;
int ret;
unsigned int num_data_pgs;
struct rndis_message *rndis_msg;
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
bool isvlan;
+ bool linear = false;
struct rndis_per_packet_info *ppi;
struct ndis_tcp_ip_checksum_info *csum_info;
struct ndis_tcp_lso_info *lso_info;
int hdr_offset;
u32 net_trans_info;
u32 hash;
- u32 skb_length = skb->len;
+ u32 skb_length;
+ u32 pkt_sz;
+ struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
/* We will atmost need two pages to describe the rndis
* header. We can only transmit MAX_PAGE_BUFFER_COUNT number
- * of pages in a single packet.
+ * of pages in a single packet. If skb is scattered around
+ * more pages we try linearizing it.
*/
+
+check_size:
+ skb_length = skb->len;
num_data_pgs = netvsc_get_slots(skb) + 2;
- if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
- netdev_err(net, "Packet too big: %u\n", skb->len);
- dev_kfree_skb(skb);
- net->stats.tx_dropped++;
- return NETDEV_TX_OK;
+ if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
+ net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
+ num_data_pgs, skb->len);
+ ret = -EFAULT;
+ goto drop;
+ } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
+ if (skb_linearize(skb)) {
+ net_alert_ratelimited("failed to linearize skb\n");
+ ret = -ENOMEM;
+ goto drop;
+ }
+ linear = true;
+ goto check_size;
}
- /* Allocate a netvsc packet based on # of frags. */
- packet = kzalloc(sizeof(struct hv_netvsc_packet) +
- (num_data_pgs * sizeof(struct hv_page_buffer)) +
- sizeof(struct rndis_message) +
- NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
- NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC);
- if (!packet) {
- /* out of memory, drop packet */
- netdev_err(net, "unable to allocate hv_netvsc_packet\n");
-
- dev_kfree_skb(skb);
- net->stats.tx_dropped++;
- return NETDEV_TX_OK;
+ pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
+
+ ret = skb_cow_head(skb, pkt_sz);
+ if (ret) {
+ netdev_err(net, "unable to alloc hv_netvsc_packet\n");
+ ret = -ENOMEM;
+ goto drop;
}
+ /* Use the headroom for building up the packet */
+ packet = (struct hv_netvsc_packet *)skb->head;
+
+ packet->status = 0;
+ packet->xmit_more = skb->xmit_more;
packet->vlan_tci = skb->vlan_tci;
+ packet->page_buf = page_buf;
packet->q_idx = skb_get_queue_mapping(skb);
@@ -421,8 +440,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->total_data_buflen = skb->len;
packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
- sizeof(struct hv_netvsc_packet) +
- (num_data_pgs * sizeof(struct hv_page_buffer)));
+ sizeof(struct hv_netvsc_packet));
+
+ memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE);
/* Set the completion routine */
packet->send_completion = netvsc_xmit_completion;
@@ -554,7 +574,7 @@ do_send:
rndis_msg->msg_len += rndis_msg_size;
packet->total_data_buflen = rndis_msg->msg_len;
packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
- skb, &packet->page_buf[0]);
+ skb, packet);
ret = netvsc_send(net_device_ctx->device_ctx, packet);
@@ -563,7 +583,6 @@ drop:
net->stats.tx_bytes += skb_length;
net->stats.tx_packets++;
} else {
- kfree(packet);
if (ret != -EAGAIN) {
dev_kfree_skb_any(skb);
net->stats.tx_dropped++;
@@ -687,6 +706,19 @@ static void netvsc_get_drvinfo(struct net_device *net,
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
+static void netvsc_get_channels(struct net_device *net,
+ struct ethtool_channels *channel)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct hv_device *dev = net_device_ctx->device_ctx;
+ struct netvsc_device *nvdev = hv_get_drvdata(dev);
+
+ if (nvdev) {
+ channel->max_combined = nvdev->max_chn;
+ channel->combined_count = nvdev->num_chn;
+ }
+}
+
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
@@ -760,6 +792,7 @@ static void netvsc_poll_controller(struct net_device *net)
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_channels = netvsc_get_channels,
};
static const struct net_device_ops device_ops = {
@@ -831,16 +864,25 @@ static int netvsc_probe(struct hv_device *dev,
struct netvsc_device_info device_info;
struct netvsc_device *nvdev;
int ret;
+ u32 max_needed_headroom;
net = alloc_etherdev_mq(sizeof(struct net_device_context),
num_online_cpus());
if (!net)
return -ENOMEM;
+ max_needed_headroom = sizeof(struct hv_netvsc_packet) +
+ RNDIS_AND_PPI_SIZE;
+
netif_carrier_off(net);
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
+ net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
+ if (netif_msg_probe(net_device_ctx))
+ netdev_dbg(net, "netvsc msg_enable: %d\n",
+ net_device_ctx->msg_enable);
+
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
@@ -855,6 +897,13 @@ static int netvsc_probe(struct hv_device *dev,
net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
+ /*
+ * Request additional head room in the skb.
+ * We will use this space to build the rndis
+ * heaser and other state we need to maintain.
+ */
+ net->needed_headroom = max_needed_headroom;
+
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
ret = rndis_filter_device_add(dev, &device_info);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 7816d98bdddc..9118cea91882 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -47,8 +47,6 @@ struct rndis_request {
/* Simplify allocation by having a netvsc packet inline */
struct hv_netvsc_packet pkt;
- /* Set 2 pages for rndis requests crossing page boundary */
- struct hv_page_buffer buf[2];
struct rndis_message request_msg;
/*
@@ -210,6 +208,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
{
int ret;
struct hv_netvsc_packet *packet;
+ struct hv_page_buffer page_buf[2];
/* Setup the packet to send it */
packet = &req->pkt;
@@ -217,6 +216,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->is_data_pkt = false;
packet->total_data_buflen = req->request_msg.msg_len;
packet->page_buf_cnt = 1;
+ packet->page_buf = page_buf;
packet->page_buf[0].pfn = virt_to_phys(&req->request_msg) >>
PAGE_SHIFT;
@@ -237,6 +237,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
}
packet->send_completion = NULL;
+ packet->xmit_more = false;
ret = netvsc_send(dev->net_dev->dev, packet);
return ret;
@@ -428,7 +429,8 @@ int rndis_filter_receive(struct hv_device *dev,
rndis_msg = pkt->data;
- dump_rndis_message(dev, rndis_msg);
+ if (netif_msg_rx_err(net_dev->nd_ctx))
+ dump_rndis_message(dev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
@@ -855,6 +857,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
u32 status;
int ret;
unsigned long t;
+ struct netvsc_device *nvdev = dev->net_dev;
request = get_rndis_request(dev, RNDIS_MSG_INIT,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -889,6 +892,8 @@ static int rndis_filter_init_device(struct rndis_device *dev)
status = init_complete->status;
if (status == RNDIS_STATUS_SUCCESS) {
dev->state = RNDIS_DEV_INITIALIZED;
+ nvdev->max_pkt = init_complete->max_pkt_per_msg;
+ nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
ret = 0;
} else {
dev->state = RNDIS_DEV_UNINITIALIZED;
@@ -1027,6 +1032,7 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Initialize the rndis device */
net_device = hv_get_drvdata(dev);
+ net_device->max_chn = 1;
net_device->num_chn = 1;
net_device->extension = rndis_device;
@@ -1094,6 +1100,7 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2)
goto out;
+ net_device->max_chn = rsscap.num_recv_que;
net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
num_online_cpus() : rsscap.num_recv_que;
if (net_device->num_chn == 1)
@@ -1135,13 +1142,13 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
- vmbus_are_subchannels_present(dev->channel);
-
ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
out:
- if (ret)
+ if (ret) {
+ net_device->max_chn = 1;
net_device->num_chn = 1;
+ }
return 0; /* return 0 because primary channel can be used alone */
err_dev_remv:
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 7b051eacb7f1..67d00fbc2e0e 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -19,11 +19,12 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/delay.h>
-#include <linux/spinlock.h>
#include <linux/spi/spi.h>
#include <linux/spi/at86rf230.h>
#include <linux/regmap.h>
@@ -46,19 +47,27 @@ struct at86rf2xx_chip_data {
u16 t_off_to_tx_on;
u16 t_frame;
u16 t_p_ack;
- /* completion timeout for tx in msecs */
- u16 t_tx_timeout;
int rssi_base_val;
int (*set_channel)(struct at86rf230_local *, u8, u8);
int (*get_desense_steps)(struct at86rf230_local *, s32);
};
-#define AT86RF2XX_MAX_BUF (127 + 3)
+#define AT86RF2XX_MAX_BUF (127 + 3)
+/* tx retries to access the TX_ON state
+ * if it's above then force change will be started.
+ *
+ * We assume the max_frame_retries (7) value of 802.15.4 here.
+ */
+#define AT86RF2XX_MAX_TX_RETRIES 7
+/* We use the recommended 5 minutes timeout to recalibrate */
+#define AT86RF2XX_CAL_LOOP_TIMEOUT (5 * 60 * HZ)
struct at86rf230_state_change {
struct at86rf230_local *lp;
+ int irq;
+ struct hrtimer timer;
struct spi_message msg;
struct spi_transfer trx;
u8 buf[AT86RF2XX_MAX_BUF];
@@ -76,6 +85,7 @@ struct at86rf230_local {
struct ieee802154_hw *hw;
struct at86rf2xx_chip_data *data;
struct regmap *regmap;
+ int slp_tr;
struct completion state_complete;
struct at86rf230_state_change state;
@@ -83,166 +93,167 @@ struct at86rf230_local {
struct at86rf230_state_change irq;
bool tx_aret;
+ unsigned long cal_timeout;
s8 max_frame_retries;
bool is_tx;
- /* spinlock for is_tx protection */
- spinlock_t lock;
+ bool is_tx_from_off;
+ u8 tx_retry;
struct sk_buff *tx_skb;
struct at86rf230_state_change tx;
};
-#define RG_TRX_STATUS (0x01)
-#define SR_TRX_STATUS 0x01, 0x1f, 0
-#define SR_RESERVED_01_3 0x01, 0x20, 5
-#define SR_CCA_STATUS 0x01, 0x40, 6
-#define SR_CCA_DONE 0x01, 0x80, 7
-#define RG_TRX_STATE (0x02)
-#define SR_TRX_CMD 0x02, 0x1f, 0
-#define SR_TRAC_STATUS 0x02, 0xe0, 5
-#define RG_TRX_CTRL_0 (0x03)
-#define SR_CLKM_CTRL 0x03, 0x07, 0
-#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
-#define SR_PAD_IO_CLKM 0x03, 0x30, 4
-#define SR_PAD_IO 0x03, 0xc0, 6
-#define RG_TRX_CTRL_1 (0x04)
-#define SR_IRQ_POLARITY 0x04, 0x01, 0
-#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
-#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
-#define SR_RX_BL_CTRL 0x04, 0x10, 4
-#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
-#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
-#define SR_PA_EXT_EN 0x04, 0x80, 7
-#define RG_PHY_TX_PWR (0x05)
-#define SR_TX_PWR 0x05, 0x0f, 0
-#define SR_PA_LT 0x05, 0x30, 4
-#define SR_PA_BUF_LT 0x05, 0xc0, 6
-#define RG_PHY_RSSI (0x06)
-#define SR_RSSI 0x06, 0x1f, 0
-#define SR_RND_VALUE 0x06, 0x60, 5
-#define SR_RX_CRC_VALID 0x06, 0x80, 7
-#define RG_PHY_ED_LEVEL (0x07)
-#define SR_ED_LEVEL 0x07, 0xff, 0
-#define RG_PHY_CC_CCA (0x08)
-#define SR_CHANNEL 0x08, 0x1f, 0
-#define SR_CCA_MODE 0x08, 0x60, 5
-#define SR_CCA_REQUEST 0x08, 0x80, 7
-#define RG_CCA_THRES (0x09)
-#define SR_CCA_ED_THRES 0x09, 0x0f, 0
-#define SR_RESERVED_09_1 0x09, 0xf0, 4
-#define RG_RX_CTRL (0x0a)
-#define SR_PDT_THRES 0x0a, 0x0f, 0
-#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
-#define RG_SFD_VALUE (0x0b)
-#define SR_SFD_VALUE 0x0b, 0xff, 0
-#define RG_TRX_CTRL_2 (0x0c)
-#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
-#define SR_SUB_MODE 0x0c, 0x04, 2
-#define SR_BPSK_QPSK 0x0c, 0x08, 3
-#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
-#define SR_RESERVED_0c_5 0x0c, 0x60, 5
-#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
-#define RG_ANT_DIV (0x0d)
-#define SR_ANT_CTRL 0x0d, 0x03, 0
-#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
-#define SR_ANT_DIV_EN 0x0d, 0x08, 3
-#define SR_RESERVED_0d_2 0x0d, 0x70, 4
-#define SR_ANT_SEL 0x0d, 0x80, 7
-#define RG_IRQ_MASK (0x0e)
-#define SR_IRQ_MASK 0x0e, 0xff, 0
-#define RG_IRQ_STATUS (0x0f)
-#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
-#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
-#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
-#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
-#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
-#define SR_IRQ_5_AMI 0x0f, 0x20, 5
-#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
-#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
-#define RG_VREG_CTRL (0x10)
-#define SR_RESERVED_10_6 0x10, 0x03, 0
-#define SR_DVDD_OK 0x10, 0x04, 2
-#define SR_DVREG_EXT 0x10, 0x08, 3
-#define SR_RESERVED_10_3 0x10, 0x30, 4
-#define SR_AVDD_OK 0x10, 0x40, 6
-#define SR_AVREG_EXT 0x10, 0x80, 7
-#define RG_BATMON (0x11)
-#define SR_BATMON_VTH 0x11, 0x0f, 0
-#define SR_BATMON_HR 0x11, 0x10, 4
-#define SR_BATMON_OK 0x11, 0x20, 5
-#define SR_RESERVED_11_1 0x11, 0xc0, 6
-#define RG_XOSC_CTRL (0x12)
-#define SR_XTAL_TRIM 0x12, 0x0f, 0
-#define SR_XTAL_MODE 0x12, 0xf0, 4
-#define RG_RX_SYN (0x15)
-#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
-#define SR_RESERVED_15_2 0x15, 0x70, 4
-#define SR_RX_PDT_DIS 0x15, 0x80, 7
-#define RG_XAH_CTRL_1 (0x17)
-#define SR_RESERVED_17_8 0x17, 0x01, 0
-#define SR_AACK_PROM_MODE 0x17, 0x02, 1
-#define SR_AACK_ACK_TIME 0x17, 0x04, 2
-#define SR_RESERVED_17_5 0x17, 0x08, 3
-#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
-#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
-#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
-#define SR_RESERVED_17_1 0x17, 0x80, 7
-#define RG_FTN_CTRL (0x18)
-#define SR_RESERVED_18_2 0x18, 0x7f, 0
-#define SR_FTN_START 0x18, 0x80, 7
-#define RG_PLL_CF (0x1a)
-#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
-#define SR_PLL_CF_START 0x1a, 0x80, 7
-#define RG_PLL_DCU (0x1b)
-#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
-#define SR_RESERVED_1b_2 0x1b, 0x40, 6
-#define SR_PLL_DCU_START 0x1b, 0x80, 7
-#define RG_PART_NUM (0x1c)
-#define SR_PART_NUM 0x1c, 0xff, 0
-#define RG_VERSION_NUM (0x1d)
-#define SR_VERSION_NUM 0x1d, 0xff, 0
-#define RG_MAN_ID_0 (0x1e)
-#define SR_MAN_ID_0 0x1e, 0xff, 0
-#define RG_MAN_ID_1 (0x1f)
-#define SR_MAN_ID_1 0x1f, 0xff, 0
-#define RG_SHORT_ADDR_0 (0x20)
-#define SR_SHORT_ADDR_0 0x20, 0xff, 0
-#define RG_SHORT_ADDR_1 (0x21)
-#define SR_SHORT_ADDR_1 0x21, 0xff, 0
-#define RG_PAN_ID_0 (0x22)
-#define SR_PAN_ID_0 0x22, 0xff, 0
-#define RG_PAN_ID_1 (0x23)
-#define SR_PAN_ID_1 0x23, 0xff, 0
-#define RG_IEEE_ADDR_0 (0x24)
-#define SR_IEEE_ADDR_0 0x24, 0xff, 0
-#define RG_IEEE_ADDR_1 (0x25)
-#define SR_IEEE_ADDR_1 0x25, 0xff, 0
-#define RG_IEEE_ADDR_2 (0x26)
-#define SR_IEEE_ADDR_2 0x26, 0xff, 0
-#define RG_IEEE_ADDR_3 (0x27)
-#define SR_IEEE_ADDR_3 0x27, 0xff, 0
-#define RG_IEEE_ADDR_4 (0x28)
-#define SR_IEEE_ADDR_4 0x28, 0xff, 0
-#define RG_IEEE_ADDR_5 (0x29)
-#define SR_IEEE_ADDR_5 0x29, 0xff, 0
-#define RG_IEEE_ADDR_6 (0x2a)
-#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
-#define RG_IEEE_ADDR_7 (0x2b)
-#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
-#define RG_XAH_CTRL_0 (0x2c)
-#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
-#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
-#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
-#define RG_CSMA_SEED_0 (0x2d)
-#define SR_CSMA_SEED_0 0x2d, 0xff, 0
-#define RG_CSMA_SEED_1 (0x2e)
-#define SR_CSMA_SEED_1 0x2e, 0x07, 0
-#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
-#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
-#define SR_AACK_SET_PD 0x2e, 0x20, 5
-#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
-#define RG_CSMA_BE (0x2f)
-#define SR_MIN_BE 0x2f, 0x0f, 0
-#define SR_MAX_BE 0x2f, 0xf0, 4
+#define RG_TRX_STATUS (0x01)
+#define SR_TRX_STATUS 0x01, 0x1f, 0
+#define SR_RESERVED_01_3 0x01, 0x20, 5
+#define SR_CCA_STATUS 0x01, 0x40, 6
+#define SR_CCA_DONE 0x01, 0x80, 7
+#define RG_TRX_STATE (0x02)
+#define SR_TRX_CMD 0x02, 0x1f, 0
+#define SR_TRAC_STATUS 0x02, 0xe0, 5
+#define RG_TRX_CTRL_0 (0x03)
+#define SR_CLKM_CTRL 0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
+#define SR_PAD_IO_CLKM 0x03, 0x30, 4
+#define SR_PAD_IO 0x03, 0xc0, 6
+#define RG_TRX_CTRL_1 (0x04)
+#define SR_IRQ_POLARITY 0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
+#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
+#define SR_RX_BL_CTRL 0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
+#define SR_PA_EXT_EN 0x04, 0x80, 7
+#define RG_PHY_TX_PWR (0x05)
+#define SR_TX_PWR 0x05, 0x0f, 0
+#define SR_PA_LT 0x05, 0x30, 4
+#define SR_PA_BUF_LT 0x05, 0xc0, 6
+#define RG_PHY_RSSI (0x06)
+#define SR_RSSI 0x06, 0x1f, 0
+#define SR_RND_VALUE 0x06, 0x60, 5
+#define SR_RX_CRC_VALID 0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL (0x07)
+#define SR_ED_LEVEL 0x07, 0xff, 0
+#define RG_PHY_CC_CCA (0x08)
+#define SR_CHANNEL 0x08, 0x1f, 0
+#define SR_CCA_MODE 0x08, 0x60, 5
+#define SR_CCA_REQUEST 0x08, 0x80, 7
+#define RG_CCA_THRES (0x09)
+#define SR_CCA_ED_THRES 0x09, 0x0f, 0
+#define SR_RESERVED_09_1 0x09, 0xf0, 4
+#define RG_RX_CTRL (0x0a)
+#define SR_PDT_THRES 0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
+#define RG_SFD_VALUE (0x0b)
+#define SR_SFD_VALUE 0x0b, 0xff, 0
+#define RG_TRX_CTRL_2 (0x0c)
+#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
+#define SR_SUB_MODE 0x0c, 0x04, 2
+#define SR_BPSK_QPSK 0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
+#define SR_RESERVED_0c_5 0x0c, 0x60, 5
+#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
+#define RG_ANT_DIV (0x0d)
+#define SR_ANT_CTRL 0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
+#define SR_ANT_DIV_EN 0x0d, 0x08, 3
+#define SR_RESERVED_0d_2 0x0d, 0x70, 4
+#define SR_ANT_SEL 0x0d, 0x80, 7
+#define RG_IRQ_MASK (0x0e)
+#define SR_IRQ_MASK 0x0e, 0xff, 0
+#define RG_IRQ_STATUS (0x0f)
+#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
+#define SR_IRQ_5_AMI 0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
+#define RG_VREG_CTRL (0x10)
+#define SR_RESERVED_10_6 0x10, 0x03, 0
+#define SR_DVDD_OK 0x10, 0x04, 2
+#define SR_DVREG_EXT 0x10, 0x08, 3
+#define SR_RESERVED_10_3 0x10, 0x30, 4
+#define SR_AVDD_OK 0x10, 0x40, 6
+#define SR_AVREG_EXT 0x10, 0x80, 7
+#define RG_BATMON (0x11)
+#define SR_BATMON_VTH 0x11, 0x0f, 0
+#define SR_BATMON_HR 0x11, 0x10, 4
+#define SR_BATMON_OK 0x11, 0x20, 5
+#define SR_RESERVED_11_1 0x11, 0xc0, 6
+#define RG_XOSC_CTRL (0x12)
+#define SR_XTAL_TRIM 0x12, 0x0f, 0
+#define SR_XTAL_MODE 0x12, 0xf0, 4
+#define RG_RX_SYN (0x15)
+#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
+#define SR_RESERVED_15_2 0x15, 0x70, 4
+#define SR_RX_PDT_DIS 0x15, 0x80, 7
+#define RG_XAH_CTRL_1 (0x17)
+#define SR_RESERVED_17_8 0x17, 0x01, 0
+#define SR_AACK_PROM_MODE 0x17, 0x02, 1
+#define SR_AACK_ACK_TIME 0x17, 0x04, 2
+#define SR_RESERVED_17_5 0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
+#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
+#define SR_RESERVED_17_1 0x17, 0x80, 7
+#define RG_FTN_CTRL (0x18)
+#define SR_RESERVED_18_2 0x18, 0x7f, 0
+#define SR_FTN_START 0x18, 0x80, 7
+#define RG_PLL_CF (0x1a)
+#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
+#define SR_PLL_CF_START 0x1a, 0x80, 7
+#define RG_PLL_DCU (0x1b)
+#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2 0x1b, 0x40, 6
+#define SR_PLL_DCU_START 0x1b, 0x80, 7
+#define RG_PART_NUM (0x1c)
+#define SR_PART_NUM 0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM 0x1d, 0xff, 0
+#define RG_MAN_ID_0 (0x1e)
+#define SR_MAN_ID_0 0x1e, 0xff, 0
+#define RG_MAN_ID_1 (0x1f)
+#define SR_MAN_ID_1 0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0 (0x20)
+#define SR_SHORT_ADDR_0 0x20, 0xff, 0
+#define RG_SHORT_ADDR_1 (0x21)
+#define SR_SHORT_ADDR_1 0x21, 0xff, 0
+#define RG_PAN_ID_0 (0x22)
+#define SR_PAN_ID_0 0x22, 0xff, 0
+#define RG_PAN_ID_1 (0x23)
+#define SR_PAN_ID_1 0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0 0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1 0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2 0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3 0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4 0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5 0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
+#define RG_XAH_CTRL_0 (0x2c)
+#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0 0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1 0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
+#define SR_AACK_SET_PD 0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
+#define RG_CSMA_BE (0x2f)
+#define SR_MIN_BE 0x2f, 0x0f, 0
+#define SR_MAX_BE 0x2f, 0xf0, 4
#define CMD_REG 0x80
#define CMD_REG_MASK 0x3f
@@ -283,6 +294,8 @@ struct at86rf230_local {
#define STATE_BUSY_RX_AACK_NOCLK 0x1E
#define STATE_TRANSITION_IN_PROGRESS 0x1F
+#define TRX_STATE_MASK (0x1F)
+
#define AT86RF2XX_NUMREGS 0x3F
static void
@@ -313,7 +326,7 @@ at86rf230_read_subreg(struct at86rf230_local *lp,
int rc;
rc = __at86rf230_read(lp, addr, data);
- if (rc > 0)
+ if (!rc)
*data = (*data & mask) >> shift;
return rc;
@@ -327,6 +340,14 @@ at86rf230_write_subreg(struct at86rf230_local *lp,
return regmap_update_bits(lp->regmap, addr, mask, data << shift);
}
+static inline void
+at86rf230_slp_tr_rising_edge(struct at86rf230_local *lp)
+{
+ gpio_set_value(lp->slp_tr, 1);
+ udelay(1);
+ gpio_set_value(lp->slp_tr, 0);
+}
+
static bool
at86rf230_reg_writeable(struct device *dev, unsigned int reg)
{
@@ -409,6 +430,8 @@ at86rf230_reg_volatile(struct device *dev, unsigned int reg)
case RG_PHY_ED_LEVEL:
case RG_IRQ_STATUS:
case RG_VREG_CTRL:
+ case RG_PLL_CF:
+ case RG_PLL_DCU:
return true;
default:
return false;
@@ -446,6 +469,7 @@ at86rf230_async_error_recover(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
+ lp->is_tx = 0;
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false);
ieee802154_wake_queue(lp->hw);
}
@@ -472,25 +496,32 @@ at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
u8 *tx_buf = ctx->buf;
tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
- ctx->trx.len = 2;
ctx->msg.complete = complete;
ctx->irq_enable = irq_enable;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
if (irq_enable)
- enable_irq(lp->spi->irq);
+ enable_irq(ctx->irq);
at86rf230_async_error(lp, ctx, rc);
}
}
+static inline u8 at86rf230_state_to_force(u8 state)
+{
+ if (state == STATE_TX_ON)
+ return STATE_FORCE_TX_ON;
+ else
+ return STATE_FORCE_TRX_OFF;
+}
+
static void
at86rf230_async_state_assert(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = ctx->buf;
- const u8 trx_state = buf[1] & 0x1f;
+ const u8 trx_state = buf[1] & TRX_STATE_MASK;
/* Assert state change */
if (trx_state != ctx->to_state) {
@@ -514,10 +545,21 @@ at86rf230_async_state_assert(void *context)
* in STATE_BUSY_RX_AACK, we run a force state change
* to STATE_TX_ON. This is a timeout handling, if the
* transceiver stucks in STATE_BUSY_RX_AACK.
+ *
+ * Additional we do several retries to try to get into
+ * TX_ON state without forcing. If the retries are
+ * higher or equal than AT86RF2XX_MAX_TX_RETRIES we
+ * will do a force change.
*/
- if (ctx->to_state == STATE_TX_ON) {
- at86rf230_async_state_change(lp, ctx,
- STATE_FORCE_TX_ON,
+ if (ctx->to_state == STATE_TX_ON ||
+ ctx->to_state == STATE_TRX_OFF) {
+ u8 state = ctx->to_state;
+
+ if (lp->tx_retry >= AT86RF2XX_MAX_TX_RETRIES)
+ state = at86rf230_state_to_force(state);
+ lp->tx_retry++;
+
+ at86rf230_async_state_change(lp, ctx, state,
ctx->complete,
ctx->irq_enable);
return;
@@ -533,6 +575,19 @@ done:
ctx->complete(context);
}
+static enum hrtimer_restart at86rf230_async_state_timer(struct hrtimer *timer)
+{
+ struct at86rf230_state_change *ctx =
+ container_of(timer, struct at86rf230_state_change, timer);
+ struct at86rf230_local *lp = ctx->lp;
+
+ at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+ at86rf230_async_state_assert,
+ ctx->irq_enable);
+
+ return HRTIMER_NORESTART;
+}
+
/* Do state change timing delay. */
static void
at86rf230_async_state_delay(void *context)
@@ -541,6 +596,7 @@ at86rf230_async_state_delay(void *context)
struct at86rf230_local *lp = ctx->lp;
struct at86rf2xx_chip_data *c = lp->data;
bool force = false;
+ ktime_t tim;
/* The force state changes are will show as normal states in the
* state status subregister. We change the to_state to the
@@ -564,11 +620,21 @@ at86rf230_async_state_delay(void *context)
case STATE_TRX_OFF:
switch (ctx->to_state) {
case STATE_RX_AACK_ON:
- usleep_range(c->t_off_to_aack, c->t_off_to_aack + 10);
+ tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC);
+ /* state change from TRX_OFF to RX_AACK_ON to do a
+ * calibration, we need to reset the timeout for the
+ * next one.
+ */
+ lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
goto change;
+ case STATE_TX_ARET_ON:
case STATE_TX_ON:
- usleep_range(c->t_off_to_tx_on,
- c->t_off_to_tx_on + 10);
+ tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC);
+ /* state change from TRX_OFF to TX_ON or ARET_ON to do
+ * a calibration, we need to reset the timeout for the
+ * next one.
+ */
+ lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
goto change;
default:
break;
@@ -576,14 +642,15 @@ at86rf230_async_state_delay(void *context)
break;
case STATE_BUSY_RX_AACK:
switch (ctx->to_state) {
+ case STATE_TRX_OFF:
case STATE_TX_ON:
/* Wait for worst case receiving time if we
* didn't make a force change from BUSY_RX_AACK
- * to TX_ON.
+ * to TX_ON or TRX_OFF.
*/
if (!force) {
- usleep_range(c->t_frame + c->t_p_ack,
- c->t_frame + c->t_p_ack + 1000);
+ tim = ktime_set(0, (c->t_frame + c->t_p_ack) *
+ NSEC_PER_USEC);
goto change;
}
break;
@@ -595,7 +662,7 @@ at86rf230_async_state_delay(void *context)
case STATE_P_ON:
switch (ctx->to_state) {
case STATE_TRX_OFF:
- usleep_range(c->t_reset_to_off, c->t_reset_to_off + 10);
+ tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC);
goto change;
default:
break;
@@ -606,12 +673,10 @@ at86rf230_async_state_delay(void *context)
}
/* Default delay is 1us in the most cases */
- udelay(1);
+ tim = ktime_set(0, NSEC_PER_USEC);
change:
- at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
- at86rf230_async_state_assert,
- ctx->irq_enable);
+ hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
}
static void
@@ -620,7 +685,7 @@ at86rf230_async_state_change_start(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
u8 *buf = ctx->buf;
- const u8 trx_state = buf[1] & 0x1f;
+ const u8 trx_state = buf[1] & TRX_STATE_MASK;
int rc;
/* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
@@ -647,12 +712,11 @@ at86rf230_async_state_change_start(void *context)
*/
buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
buf[1] = ctx->to_state;
- ctx->trx.len = 2;
ctx->msg.complete = at86rf230_async_state_delay;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
if (ctx->irq_enable)
- enable_irq(lp->spi->irq);
+ enable_irq(ctx->irq);
at86rf230_async_error(lp, ctx, rc);
}
@@ -689,7 +753,7 @@ at86rf230_sync_state_change_complete(void *context)
static int
at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state)
{
- int rc;
+ unsigned long rc;
at86rf230_async_state_change(lp, &lp->state, state,
at86rf230_sync_state_change_complete,
@@ -710,11 +774,10 @@ at86rf230_tx_complete(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- struct sk_buff *skb = lp->tx_skb;
- enable_irq(lp->spi->irq);
+ enable_irq(ctx->irq);
- ieee802154_xmit_complete(lp->hw, skb, !lp->tx_aret);
+ ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
}
static void
@@ -723,21 +786,11 @@ at86rf230_tx_on(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON,
+ at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
at86rf230_tx_complete, true);
}
static void
-at86rf230_tx_trac_error(void *context)
-{
- struct at86rf230_state_change *ctx = context;
- struct at86rf230_local *lp = ctx->lp;
-
- at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
- at86rf230_tx_on, true);
-}
-
-static void
at86rf230_tx_trac_check(void *context)
{
struct at86rf230_state_change *ctx = context;
@@ -746,12 +799,12 @@ at86rf230_tx_trac_check(void *context)
const u8 trac = (buf[1] & 0xe0) >> 5;
/* If trac status is different than zero we need to do a state change
- * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
- * state to TX_ON.
+ * to STATE_FORCE_TRX_OFF then STATE_RX_AACK_ON to recover the
+ * transceiver.
*/
if (trac)
at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
- at86rf230_tx_trac_error, true);
+ at86rf230_tx_on, true);
else
at86rf230_tx_on(context);
}
@@ -767,14 +820,25 @@ at86rf230_tx_trac_status(void *context)
}
static void
-at86rf230_rx(struct at86rf230_local *lp,
- const u8 *data, const u8 len, const u8 lqi)
+at86rf230_rx_read_frame_complete(void *context)
{
- struct sk_buff *skb;
+ struct at86rf230_state_change *ctx = context;
+ struct at86rf230_local *lp = ctx->lp;
u8 rx_local_buf[AT86RF2XX_MAX_BUF];
+ const u8 *buf = ctx->buf;
+ struct sk_buff *skb;
+ u8 len, lqi;
- memcpy(rx_local_buf, data, len);
- enable_irq(lp->spi->irq);
+ len = buf[1];
+ if (!ieee802154_is_valid_psdu_len(len)) {
+ dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
+ len = IEEE802154_MTU;
+ }
+ lqi = buf[2 + len];
+
+ memcpy(rx_local_buf, buf + 2, len);
+ ctx->trx.len = 2;
+ enable_irq(ctx->irq);
skb = dev_alloc_skb(IEEE802154_MTU);
if (!skb) {
@@ -787,60 +851,41 @@ at86rf230_rx(struct at86rf230_local *lp,
}
static void
-at86rf230_rx_read_frame_complete(void *context)
+at86rf230_rx_read_frame(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- const u8 *buf = lp->irq.buf;
- u8 len = buf[1];
-
- if (!ieee802154_is_valid_psdu_len(len)) {
- dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
- len = IEEE802154_MTU;
- }
-
- at86rf230_rx(lp, buf + 2, len, buf[2 + len]);
-}
-
-static void
-at86rf230_rx_read_frame(struct at86rf230_local *lp)
-{
+ u8 *buf = ctx->buf;
int rc;
- u8 *buf = lp->irq.buf;
-
buf[0] = CMD_FB;
- lp->irq.trx.len = AT86RF2XX_MAX_BUF;
- lp->irq.msg.complete = at86rf230_rx_read_frame_complete;
- rc = spi_async(lp->spi, &lp->irq.msg);
+ ctx->trx.len = AT86RF2XX_MAX_BUF;
+ ctx->msg.complete = at86rf230_rx_read_frame_complete;
+ rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
- enable_irq(lp->spi->irq);
- at86rf230_async_error(lp, &lp->irq, rc);
+ ctx->trx.len = 2;
+ enable_irq(ctx->irq);
+ at86rf230_async_error(lp, ctx, rc);
}
}
static void
at86rf230_rx_trac_check(void *context)
{
- struct at86rf230_state_change *ctx = context;
- struct at86rf230_local *lp = ctx->lp;
-
/* Possible check on trac status here. This could be useful to make
* some stats why receive is failed. Not used at the moment, but it's
* maybe timing relevant. Datasheet doesn't say anything about this.
* The programming guide say do it so.
*/
- at86rf230_rx_read_frame(lp);
+ at86rf230_rx_read_frame(context);
}
static void
at86rf230_irq_trx_end(struct at86rf230_local *lp)
{
- spin_lock(&lp->lock);
if (lp->is_tx) {
lp->is_tx = 0;
- spin_unlock(&lp->lock);
if (lp->tx_aret)
at86rf230_async_state_change(lp, &lp->irq,
@@ -853,7 +898,6 @@ at86rf230_irq_trx_end(struct at86rf230_local *lp)
at86rf230_tx_complete,
true);
} else {
- spin_unlock(&lp->lock);
at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
at86rf230_rx_trac_check, true);
}
@@ -864,13 +908,13 @@ at86rf230_irq_status(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- const u8 *buf = lp->irq.buf;
+ const u8 *buf = ctx->buf;
const u8 irq = buf[1];
if (irq & IRQ_TRX_END) {
at86rf230_irq_trx_end(lp);
} else {
- enable_irq(lp->spi->irq);
+ enable_irq(ctx->irq);
dev_err(&lp->spi->dev, "not supported irq %02x received\n",
irq);
}
@@ -886,7 +930,6 @@ static irqreturn_t at86rf230_isr(int irq, void *data)
disable_irq_nosync(irq);
buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
- ctx->trx.len = 2;
ctx->msg.complete = at86rf230_irq_status;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
@@ -906,13 +949,18 @@ at86rf230_write_frame_complete(void *context)
u8 *buf = ctx->buf;
int rc;
- buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
- buf[1] = STATE_BUSY_TX;
ctx->trx.len = 2;
- ctx->msg.complete = NULL;
- rc = spi_async(lp->spi, &ctx->msg);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+
+ if (gpio_is_valid(lp->slp_tr)) {
+ at86rf230_slp_tr_rising_edge(lp);
+ } else {
+ buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+ buf[1] = STATE_BUSY_TX;
+ ctx->msg.complete = NULL;
+ rc = spi_async(lp->spi, &ctx->msg);
+ if (rc)
+ at86rf230_async_error(lp, ctx, rc);
+ }
}
static void
@@ -921,21 +969,21 @@ at86rf230_write_frame(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
struct sk_buff *skb = lp->tx_skb;
- u8 *buf = lp->tx.buf;
+ u8 *buf = ctx->buf;
int rc;
- spin_lock(&lp->lock);
lp->is_tx = 1;
- spin_unlock(&lp->lock);
buf[0] = CMD_FB | CMD_WRITE;
buf[1] = skb->len + 2;
memcpy(buf + 2, skb->data, skb->len);
- lp->tx.trx.len = skb->len + 2;
- lp->tx.msg.complete = at86rf230_write_frame_complete;
- rc = spi_async(lp->spi, &lp->tx.msg);
- if (rc)
+ ctx->trx.len = skb->len + 2;
+ ctx->msg.complete = at86rf230_write_frame_complete;
+ rc = spi_async(lp->spi, &ctx->msg);
+ if (rc) {
+ ctx->trx.len = 2;
at86rf230_async_error(lp, ctx, rc);
+ }
}
static void
@@ -948,24 +996,56 @@ at86rf230_xmit_tx_on(void *context)
at86rf230_write_frame, false);
}
+static void
+at86rf230_xmit_start(void *context)
+{
+ struct at86rf230_state_change *ctx = context;
+ struct at86rf230_local *lp = ctx->lp;
+
+ /* In ARET mode we need to go into STATE_TX_ARET_ON after we
+ * are in STATE_TX_ON. The pfad differs here, so we change
+ * the complete handler.
+ */
+ if (lp->tx_aret) {
+ if (lp->is_tx_from_off) {
+ lp->is_tx_from_off = false;
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+ at86rf230_xmit_tx_on,
+ false);
+ } else {
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+ at86rf230_xmit_tx_on,
+ false);
+ }
+ } else {
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+ at86rf230_write_frame, false);
+ }
+}
+
static int
at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
struct at86rf230_local *lp = hw->priv;
struct at86rf230_state_change *ctx = &lp->tx;
- void (*tx_complete)(void *context) = at86rf230_write_frame;
-
lp->tx_skb = skb;
+ lp->tx_retry = 0;
- /* In ARET mode we need to go into STATE_TX_ARET_ON after we
- * are in STATE_TX_ON. The pfad differs here, so we change
- * the complete handler.
+ /* After 5 minutes in PLL and the same frequency we run again the
+ * calibration loops which is recommended by at86rf2xx datasheets.
+ *
+ * The calibration is initiate by a state change from TRX_OFF
+ * to TX_ON, the lp->cal_timeout should be reinit by state_delay
+ * function then to start in the next 5 minutes.
*/
- if (lp->tx_aret)
- tx_complete = at86rf230_xmit_tx_on;
-
- at86rf230_async_state_change(lp, ctx, STATE_TX_ON, tx_complete, false);
+ if (time_is_before_jiffies(lp->cal_timeout)) {
+ lp->is_tx_from_off = true;
+ at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
+ at86rf230_xmit_start, false);
+ } else {
+ at86rf230_xmit_start(ctx);
+ }
return 0;
}
@@ -1061,6 +1141,8 @@ at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
/* Wait for PLL */
usleep_range(lp->data->t_channel_switch,
lp->data->t_channel_switch + 10);
+
+ lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
return rc;
}
@@ -1112,7 +1194,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
}
static int
-at86rf230_set_txpower(struct ieee802154_hw *hw, int db)
+at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db)
{
struct at86rf230_local *lp = hw->priv;
@@ -1281,7 +1363,6 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_off_to_tx_on = 80,
.t_frame = 4096,
.t_p_ack = 545,
- .t_tx_timeout = 2000,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
.get_desense_steps = at86rf23x_get_desens_steps
@@ -1295,7 +1376,6 @@ static struct at86rf2xx_chip_data at86rf231_data = {
.t_off_to_tx_on = 110,
.t_frame = 4096,
.t_p_ack = 545,
- .t_tx_timeout = 2000,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
.get_desense_steps = at86rf23x_get_desens_steps
@@ -1309,13 +1389,12 @@ static struct at86rf2xx_chip_data at86rf212_data = {
.t_off_to_tx_on = 200,
.t_frame = 4096,
.t_p_ack = 545,
- .t_tx_timeout = 2000,
.rssi_base_val = -100,
.set_channel = at86rf212_set_channel,
.get_desense_steps = at86rf212_get_desens_steps
};
-static int at86rf230_hw_init(struct at86rf230_local *lp)
+static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
{
int rc, irq_type, irq_pol = IRQ_ACTIVE_HIGH;
unsigned int dvdd;
@@ -1326,7 +1405,12 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return rc;
irq_type = irq_get_trigger_type(lp->spi->irq);
- if (irq_type == IRQ_TYPE_EDGE_FALLING)
+ if (irq_type == IRQ_TYPE_EDGE_RISING ||
+ irq_type == IRQ_TYPE_EDGE_FALLING)
+ dev_warn(&lp->spi->dev,
+ "Using edge triggered irq's are not recommended!\n");
+ if (irq_type == IRQ_TYPE_EDGE_FALLING ||
+ irq_type == IRQ_TYPE_LEVEL_LOW)
irq_pol = IRQ_ACTIVE_LOW;
rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
@@ -1341,6 +1425,11 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
if (rc)
return rc;
+ /* reset values differs in at86rf231 and at86rf233 */
+ rc = at86rf230_write_subreg(lp, SR_IRQ_MASK_MODE, 0);
+ if (rc)
+ return rc;
+
get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed));
rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]);
if (rc)
@@ -1362,6 +1451,45 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
usleep_range(lp->data->t_sleep_cycle,
lp->data->t_sleep_cycle + 100);
+ /* xtal_trim value is calculated by:
+ * CL = 0.5 * (CX + CTRIM + CPAR)
+ *
+ * whereas:
+ * CL = capacitor of used crystal
+ * CX = connected capacitors at xtal pins
+ * CPAR = in all at86rf2xx datasheets this is a constant value 3 pF,
+ * but this is different on each board setup. You need to fine
+ * tuning this value via CTRIM.
+ * CTRIM = variable capacitor setting. Resolution is 0.3 pF range is
+ * 0 pF upto 4.5 pF.
+ *
+ * Examples:
+ * atben transceiver:
+ *
+ * CL = 8 pF
+ * CX = 12 pF
+ * CPAR = 3 pF (We assume the magic constant from datasheet)
+ * CTRIM = 0.9 pF
+ *
+ * (12+0.9+3)/2 = 7.95 which is nearly at 8 pF
+ *
+ * xtal_trim = 0x3
+ *
+ * openlabs transceiver:
+ *
+ * CL = 16 pF
+ * CX = 22 pF
+ * CPAR = 3 pF (We assume the magic constant from datasheet)
+ * CTRIM = 4.5 pF
+ *
+ * (22+4.5+3)/2 = 14.75 which is the nearest value to 16 pF
+ *
+ * xtal_trim = 0xf
+ */
+ rc = at86rf230_write_subreg(lp, SR_XTAL_TRIM, xtal_trim);
+ if (rc)
+ return rc;
+
rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
if (rc)
return rc;
@@ -1377,24 +1505,30 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0);
}
-static struct at86rf230_platform_data *
-at86rf230_get_pdata(struct spi_device *spi)
+static int
+at86rf230_get_pdata(struct spi_device *spi, int *rstn, int *slp_tr,
+ u8 *xtal_trim)
{
- struct at86rf230_platform_data *pdata;
+ struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+ int ret;
- if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
- return spi->dev.platform_data;
+ if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) {
+ if (!pdata)
+ return -ENOENT;
- pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- goto done;
+ *rstn = pdata->rstn;
+ *slp_tr = pdata->slp_tr;
+ *xtal_trim = pdata->xtal_trim;
+ return 0;
+ }
- pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
- pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
+ *rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
+ *slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
+ ret = of_property_read_u8(spi->dev.of_node, "xtal-trim", xtal_trim);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
- spi->dev.platform_data = pdata;
-done:
- return pdata;
+ return 0;
}
static int
@@ -1478,66 +1612,78 @@ static void
at86rf230_setup_spi_messages(struct at86rf230_local *lp)
{
lp->state.lp = lp;
+ lp->state.irq = lp->spi->irq;
spi_message_init(&lp->state.msg);
lp->state.msg.context = &lp->state;
+ lp->state.trx.len = 2;
lp->state.trx.tx_buf = lp->state.buf;
lp->state.trx.rx_buf = lp->state.buf;
spi_message_add_tail(&lp->state.trx, &lp->state.msg);
+ hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ lp->state.timer.function = at86rf230_async_state_timer;
lp->irq.lp = lp;
+ lp->irq.irq = lp->spi->irq;
spi_message_init(&lp->irq.msg);
lp->irq.msg.context = &lp->irq;
+ lp->irq.trx.len = 2;
lp->irq.trx.tx_buf = lp->irq.buf;
lp->irq.trx.rx_buf = lp->irq.buf;
spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
+ hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ lp->irq.timer.function = at86rf230_async_state_timer;
lp->tx.lp = lp;
+ lp->tx.irq = lp->spi->irq;
spi_message_init(&lp->tx.msg);
lp->tx.msg.context = &lp->tx;
+ lp->tx.trx.len = 2;
lp->tx.trx.tx_buf = lp->tx.buf;
lp->tx.trx.rx_buf = lp->tx.buf;
spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
+ hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ lp->tx.timer.function = at86rf230_async_state_timer;
}
static int at86rf230_probe(struct spi_device *spi)
{
- struct at86rf230_platform_data *pdata;
struct ieee802154_hw *hw;
struct at86rf230_local *lp;
unsigned int status;
- int rc, irq_type;
+ int rc, irq_type, rstn, slp_tr;
+ u8 xtal_trim = 0;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ specified\n");
return -EINVAL;
}
- pdata = at86rf230_get_pdata(spi);
- if (!pdata) {
- dev_err(&spi->dev, "no platform_data\n");
- return -EINVAL;
+ rc = at86rf230_get_pdata(spi, &rstn, &slp_tr, &xtal_trim);
+ if (rc < 0) {
+ dev_err(&spi->dev, "failed to parse platform_data: %d\n", rc);
+ return rc;
}
- if (gpio_is_valid(pdata->rstn)) {
- rc = devm_gpio_request_one(&spi->dev, pdata->rstn,
+ if (gpio_is_valid(rstn)) {
+ rc = devm_gpio_request_one(&spi->dev, rstn,
GPIOF_OUT_INIT_HIGH, "rstn");
if (rc)
return rc;
}
- if (gpio_is_valid(pdata->slp_tr)) {
- rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr,
+ if (gpio_is_valid(slp_tr)) {
+ rc = devm_gpio_request_one(&spi->dev, slp_tr,
GPIOF_OUT_INIT_LOW, "slp_tr");
if (rc)
return rc;
}
/* Reset */
- if (gpio_is_valid(pdata->rstn)) {
+ if (gpio_is_valid(rstn)) {
udelay(1);
- gpio_set_value(pdata->rstn, 0);
+ gpio_set_value(rstn, 0);
udelay(1);
- gpio_set_value(pdata->rstn, 1);
+ gpio_set_value(rstn, 1);
usleep_range(120, 240);
}
@@ -1548,6 +1694,7 @@ static int at86rf230_probe(struct spi_device *spi)
lp = hw->priv;
lp->hw = hw;
lp->spi = spi;
+ lp->slp_tr = slp_tr;
hw->parent = &spi->dev;
hw->vif_data_size = sizeof(*lp);
ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
@@ -1566,12 +1713,11 @@ static int at86rf230_probe(struct spi_device *spi)
if (rc < 0)
goto free_dev;
- spin_lock_init(&lp->lock);
init_completion(&lp->state_complete);
spi_set_drvdata(spi, lp);
- rc = at86rf230_hw_init(lp);
+ rc = at86rf230_hw_init(lp, xtal_trim);
if (rc)
goto free_dev;
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 181b349b060e..f833b8bb6663 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -714,11 +714,45 @@ static irqreturn_t cc2520_sfd_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static int cc2520_get_platform_data(struct spi_device *spi,
+ struct cc2520_platform_data *pdata)
+{
+ struct device_node *np = spi->dev.of_node;
+ struct cc2520_private *priv = spi_get_drvdata(spi);
+
+ if (!np) {
+ struct cc2520_platform_data *spi_pdata = spi->dev.platform_data;
+ if (!spi_pdata)
+ return -ENOENT;
+ *pdata = *spi_pdata;
+ return 0;
+ }
+
+ pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0);
+ priv->fifo_pin = pdata->fifo;
+
+ pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0);
+
+ pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0);
+ pdata->cca = of_get_named_gpio(np, "cca-gpio", 0);
+ pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0);
+ pdata->reset = of_get_named_gpio(np, "reset-gpio", 0);
+
+ pdata->amplified = of_property_read_bool(np, "amplified");
+
+ return 0;
+}
+
static int cc2520_hw_init(struct cc2520_private *priv)
{
u8 status = 0, state = 0xff;
int ret;
int timeout = 100;
+ struct cc2520_platform_data pdata;
+
+ ret = cc2520_get_platform_data(priv->spi, &pdata);
+ if (ret)
+ goto err_ret;
ret = cc2520_read_register(priv, CC2520_FSMSTAT1, &state);
if (ret)
@@ -741,11 +775,47 @@ static int cc2520_hw_init(struct cc2520_private *priv)
dev_vdbg(&priv->spi->dev, "oscillator brought up\n");
- /* Registers default value: section 28.1 in Datasheet */
- ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7);
- if (ret)
- goto err_ret;
+ /* If the CC2520 is connected to a CC2591 amplifier, we must both
+ * configure GPIOs on the CC2520 to correctly configure the CC2591
+ * and change a couple settings of the CC2520 to work with the
+ * amplifier. See section 8 page 17 of TI application note AN065.
+ * http://www.ti.com/lit/an/swra229a/swra229a.pdf
+ */
+ if (pdata.amplified) {
+ ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF9);
+ if (ret)
+ goto err_ret;
+
+ ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x16);
+ if (ret)
+ goto err_ret;
+
+ ret = cc2520_write_register(priv, CC2520_GPIOCTRL0, 0x46);
+ if (ret)
+ goto err_ret;
+
+ ret = cc2520_write_register(priv, CC2520_GPIOCTRL5, 0x47);
+ if (ret)
+ goto err_ret;
+
+ ret = cc2520_write_register(priv, CC2520_GPIOPOLARITY, 0x1e);
+ if (ret)
+ goto err_ret;
+
+ ret = cc2520_write_register(priv, CC2520_TXCTRL, 0xc1);
+ if (ret)
+ goto err_ret;
+ } else {
+ ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7);
+ if (ret)
+ goto err_ret;
+ ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11);
+ if (ret)
+ goto err_ret;
+ }
+
+ /* Registers default value: section 28.1 in Datasheet */
ret = cc2520_write_register(priv, CC2520_CCACTRL0, 0x1A);
if (ret)
goto err_ret;
@@ -770,10 +840,6 @@ static int cc2520_hw_init(struct cc2520_private *priv)
if (ret)
goto err_ret;
- ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11);
- if (ret)
- goto err_ret;
-
ret = cc2520_write_register(priv, CC2520_ADCTEST0, 0x10);
if (ret)
goto err_ret;
@@ -808,40 +874,10 @@ err_ret:
return ret;
}
-static struct cc2520_platform_data *
-cc2520_get_platform_data(struct spi_device *spi)
-{
- struct cc2520_platform_data *pdata;
- struct device_node *np = spi->dev.of_node;
- struct cc2520_private *priv = spi_get_drvdata(spi);
-
- if (!np)
- return spi->dev.platform_data;
-
- pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- goto done;
-
- pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0);
- priv->fifo_pin = pdata->fifo;
-
- pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0);
-
- pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0);
- pdata->cca = of_get_named_gpio(np, "cca-gpio", 0);
- pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0);
- pdata->reset = of_get_named_gpio(np, "reset-gpio", 0);
-
- spi->dev.platform_data = pdata;
-
-done:
- return pdata;
-}
-
static int cc2520_probe(struct spi_device *spi)
{
struct cc2520_private *priv;
- struct cc2520_platform_data *pdata;
+ struct cc2520_platform_data pdata;
int ret;
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
@@ -850,8 +886,8 @@ static int cc2520_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
- pdata = cc2520_get_platform_data(spi);
- if (!pdata) {
+ ret = cc2520_get_platform_data(spi, &pdata);
+ if (ret < 0) {
dev_err(&spi->dev, "no platform data\n");
return -EINVAL;
}
@@ -869,76 +905,76 @@ static int cc2520_probe(struct spi_device *spi)
init_completion(&priv->tx_complete);
/* Request all the gpio's */
- if (!gpio_is_valid(pdata->fifo)) {
+ if (!gpio_is_valid(pdata.fifo)) {
dev_err(&spi->dev, "fifo gpio is not valid\n");
ret = -EINVAL;
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata->fifo,
+ ret = devm_gpio_request_one(&spi->dev, pdata.fifo,
GPIOF_IN, "fifo");
if (ret)
goto err_hw_init;
- if (!gpio_is_valid(pdata->cca)) {
+ if (!gpio_is_valid(pdata.cca)) {
dev_err(&spi->dev, "cca gpio is not valid\n");
ret = -EINVAL;
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata->cca,
+ ret = devm_gpio_request_one(&spi->dev, pdata.cca,
GPIOF_IN, "cca");
if (ret)
goto err_hw_init;
- if (!gpio_is_valid(pdata->fifop)) {
+ if (!gpio_is_valid(pdata.fifop)) {
dev_err(&spi->dev, "fifop gpio is not valid\n");
ret = -EINVAL;
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata->fifop,
+ ret = devm_gpio_request_one(&spi->dev, pdata.fifop,
GPIOF_IN, "fifop");
if (ret)
goto err_hw_init;
- if (!gpio_is_valid(pdata->sfd)) {
+ if (!gpio_is_valid(pdata.sfd)) {
dev_err(&spi->dev, "sfd gpio is not valid\n");
ret = -EINVAL;
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata->sfd,
+ ret = devm_gpio_request_one(&spi->dev, pdata.sfd,
GPIOF_IN, "sfd");
if (ret)
goto err_hw_init;
- if (!gpio_is_valid(pdata->reset)) {
+ if (!gpio_is_valid(pdata.reset)) {
dev_err(&spi->dev, "reset gpio is not valid\n");
ret = -EINVAL;
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata->reset,
+ ret = devm_gpio_request_one(&spi->dev, pdata.reset,
GPIOF_OUT_INIT_LOW, "reset");
if (ret)
goto err_hw_init;
- if (!gpio_is_valid(pdata->vreg)) {
+ if (!gpio_is_valid(pdata.vreg)) {
dev_err(&spi->dev, "vreg gpio is not valid\n");
ret = -EINVAL;
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata->vreg,
+ ret = devm_gpio_request_one(&spi->dev, pdata.vreg,
GPIOF_OUT_INIT_LOW, "vreg");
if (ret)
goto err_hw_init;
- gpio_set_value(pdata->vreg, HIGH);
+ gpio_set_value(pdata.vreg, HIGH);
usleep_range(100, 150);
- gpio_set_value(pdata->reset, HIGH);
+ gpio_set_value(pdata.reset, HIGH);
usleep_range(200, 250);
ret = cc2520_hw_init(priv);
@@ -947,7 +983,7 @@ static int cc2520_probe(struct spi_device *spi)
/* Set up fifop interrupt */
ret = devm_request_irq(&spi->dev,
- gpio_to_irq(pdata->fifop),
+ gpio_to_irq(pdata.fifop),
cc2520_fifop_isr,
IRQF_TRIGGER_RISING,
dev_name(&spi->dev),
@@ -959,7 +995,7 @@ static int cc2520_probe(struct spi_device *spi)
/* Set up sfd interrupt */
ret = devm_request_irq(&spi->dev,
- gpio_to_irq(pdata->sfd),
+ gpio_to_irq(pdata.sfd),
cc2520_sfd_isr,
IRQF_TRIGGER_FALLING,
dev_name(&spi->dev),
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 34f846b4bd05..94570aace241 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -105,7 +105,7 @@ static void ri_tasklet(unsigned long dev)
if (from & AT_EGRESS) {
dev_queue_xmit(skb);
} else if (from & AT_INGRESS) {
- skb_pull(skb, skb->dev->hard_header_len);
+ skb_pull(skb, skb->mac_len);
netif_receive_skb(skb);
} else
BUG();
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index b7877a194cfe..c30b5c300c05 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -342,7 +342,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
struct rtable *rt;
int err, ret = NET_XMIT_DROP;
struct flowi4 fl4 = {
- .flowi4_oif = dev->iflink,
+ .flowi4_oif = dev_get_iflink(dev),
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.daddr = ip4h->daddr,
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4fa14208d799..77b92a0fe557 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -114,7 +114,6 @@ static int ipvlan_init(struct net_device *dev)
dev->features = phy_dev->features & IPVLAN_FEATURES;
dev->features |= NETIF_F_LLTX;
dev->gso_max_size = phy_dev->gso_max_size;
- dev->iflink = phy_dev->ifindex;
dev->hard_header_len = phy_dev->hard_header_len;
ipvlan_set_lockdep_class(dev);
@@ -305,6 +304,13 @@ static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
return 0;
}
+static int ipvlan_get_iflink(const struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return ipvlan->phy_dev->ifindex;
+}
+
static const struct net_device_ops ipvlan_netdev_ops = {
.ndo_init = ipvlan_init,
.ndo_uninit = ipvlan_uninit,
@@ -317,6 +323,7 @@ static const struct net_device_ops ipvlan_netdev_ops = {
.ndo_get_stats64 = ipvlan_get_stats64,
.ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid,
+ .ndo_get_iflink = ipvlan_get_iflink,
};
static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -336,7 +343,6 @@ static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
static const struct header_ops ipvlan_header_ops = {
.create = ipvlan_hard_header,
- .rebuild = eth_rebuild_header,
.parse = eth_header_parse,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1df38bdae2ee..9f59f17dc317 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -550,7 +550,6 @@ static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
static const struct header_ops macvlan_hard_header_ops = {
.create = macvlan_hard_header,
- .rebuild = eth_rebuild_header,
.parse = eth_header_parse,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
@@ -600,10 +599,18 @@ static int macvlan_open(struct net_device *dev)
goto del_unicast;
}
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(lowerdev, 1);
+ if (err < 0)
+ goto clear_multi;
+ }
+
hash_add:
macvlan_hash_add(vlan);
return 0;
+clear_multi:
+ dev_set_allmulti(lowerdev, -1);
del_unicast:
dev_uc_del(lowerdev, dev->dev_addr);
out:
@@ -639,6 +646,9 @@ static int macvlan_stop(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(lowerdev, -1);
+
dev_uc_del(lowerdev, dev->dev_addr);
hash_del:
@@ -697,6 +707,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
if (dev->flags & IFF_UP) {
if (change & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(lowerdev,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+
}
}
@@ -787,7 +801,6 @@ static int macvlan_init(struct net_device *dev)
dev->hw_features |= NETIF_F_LRO;
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
dev->gso_max_size = lowerdev->gso_max_size;
- dev->iflink = lowerdev->ifindex;
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
@@ -996,6 +1009,13 @@ static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
+static int macvlan_dev_get_iflink(const struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ return vlan->lowerdev->ifindex;
+}
+
static const struct ethtool_ops macvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = macvlan_ethtool_get_settings,
@@ -1026,6 +1046,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_netpoll_setup = macvlan_dev_netpoll_setup,
.ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
#endif
+ .ndo_get_iflink = macvlan_dev_get_iflink,
};
void macvlan_common_setup(struct net_device *dev)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 27ecc5c4fa26..8c350c5d54ad 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -313,7 +313,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
*/
if (q->flags & IFF_VNET_HDR)
features |= vlan->tap_features;
- if (netif_needs_gso(dev, skb, features)) {
+ if (netif_needs_gso(skb, features)) {
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
if (IS_ERR(segs))
@@ -1118,8 +1118,6 @@ static const struct file_operations macvtap_fops = {
.owner = THIS_MODULE,
.open = macvtap_open,
.release = macvtap_release,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = macvtap_read_iter,
.write_iter = macvtap_write_iter,
.poll = macvtap_poll,
@@ -1130,16 +1128,15 @@ static const struct file_operations macvtap_fops = {
#endif
};
-static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int macvtap_sendmsg(struct socket *sock, struct msghdr *m,
+ size_t total_len)
{
struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
}
-static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len,
- int flags)
+static int macvtap_recvmsg(struct socket *sock, struct msghdr *m,
+ size_t total_len, int flags)
{
struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
int ret;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ba2f5e710af1..15731d1db918 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -47,6 +47,7 @@
#include <linux/netpoll.h>
#include <linux/inet.h>
#include <linux/configfs.h>
+#include <linux/etherdevice.h>
MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
MODULE_DESCRIPTION("Console driver for network interfaces");
@@ -185,7 +186,7 @@ static struct netconsole_target *alloc_param_target(char *target_config)
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
mutex_init(&nt->mutex);
- memset(nt->np.remote_mac, 0xff, ETH_ALEN);
+ eth_broadcast_addr(nt->np.remote_mac);
/* Parse parameters and setup netpoll */
err = netpoll_parse_options(&nt->np, target_config);
@@ -604,7 +605,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
mutex_init(&nt->mutex);
- memset(nt->np.remote_mac, 0xff, ETH_ALEN);
+ eth_broadcast_addr(nt->np.remote_mac);
/* Initialize the config_item member */
config_item_init_type_name(&nt->item, name, &netconsole_target_type);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 16adbc481772..70641d2c0429 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -27,6 +27,7 @@ config AMD_PHY
config AMD_XGBE_PHY
tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
depends on (OF || ACPI) && HAS_IOMEM
+ depends on ARM64 || COMPILE_TEST
---help---
Currently supports the AMD 10GbE PHY
@@ -68,8 +69,8 @@ config SMSC_PHY
config BROADCOM_PHY
tristate "Drivers for Broadcom PHYs"
---help---
- Currently supports the BCM5411, BCM5421, BCM5461, BCM5464, BCM5481
- and BCM5482 PHYs.
+ Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
+ BCM5481 and BCM5482 PHYs.
config BCM63XX_PHY
tristate "Drivers for Broadcom 63xx SOCs internal PHY"
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 32efbd48f326..fb276f64cd64 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -78,6 +78,7 @@
#include <linux/bitops.h>
#include <linux/property.h>
#include <linux/acpi.h>
+#include <linux/jiffies.h>
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_LICENSE("Dual BSD/GPL");
@@ -100,6 +101,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define XGBE_PHY_SPEED_2500 1
#define XGBE_PHY_SPEED_10000 2
+#define XGBE_AN_MS_TIMEOUT 500
+
#define XGBE_AN_INT_CMPLT 0x01
#define XGBE_AN_INC_LINK 0x02
#define XGBE_AN_PG_RCV 0x04
@@ -434,6 +437,7 @@ struct amd_xgbe_phy_priv {
unsigned int an_supported;
unsigned int parallel_detect;
unsigned int fec_ability;
+ unsigned long an_start;
unsigned int lpm_ctrl; /* CTRL1 for resume */
};
@@ -902,8 +906,23 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
{
struct amd_xgbe_phy_priv *priv = phydev->priv;
enum amd_xgbe_phy_rx *state;
+ unsigned long an_timeout;
int ret;
+ if (!priv->an_start) {
+ priv->an_start = jiffies;
+ } else {
+ an_timeout = priv->an_start +
+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+ if (time_after(jiffies, an_timeout)) {
+ /* Auto-negotiation timed out, reset state */
+ priv->kr_state = AMD_XGBE_RX_BPA;
+ priv->kx_state = AMD_XGBE_RX_BPA;
+
+ priv->an_start = jiffies;
+ }
+ }
+
state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
: &priv->kx_state;
@@ -932,8 +951,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
if (amd_xgbe_phy_in_kr_mode(phydev)) {
priv->kr_state = AMD_XGBE_RX_ERROR;
- if (!(phydev->supported & SUPPORTED_1000baseKX_Full) &&
- !(phydev->supported & SUPPORTED_2500baseX_Full))
+ if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
+ !(phydev->advertising & SUPPORTED_2500baseX_Full))
return AMD_XGBE_AN_NO_LINK;
if (priv->kx_state != AMD_XGBE_RX_BPA)
@@ -941,7 +960,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
} else {
priv->kx_state = AMD_XGBE_RX_ERROR;
- if (!(phydev->supported & SUPPORTED_10000baseKR_Full))
+ if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
return AMD_XGBE_AN_NO_LINK;
if (priv->kr_state != AMD_XGBE_RX_BPA)
@@ -1078,6 +1097,7 @@ again:
priv->an_state = AMD_XGBE_AN_READY;
priv->kr_state = AMD_XGBE_RX_BPA;
priv->kx_state = AMD_XGBE_RX_BPA;
+ priv->an_start = 0;
}
if (cur_state != priv->an_state)
@@ -1101,7 +1121,7 @@ static int amd_xgbe_an_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->supported & SUPPORTED_10000baseR_FEC)
+ if (phydev->advertising & SUPPORTED_10000baseR_FEC)
ret |= 0xc000;
else
ret &= ~0xc000;
@@ -1113,13 +1133,13 @@ static int amd_xgbe_an_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ if (phydev->advertising & SUPPORTED_10000baseKR_Full)
ret |= 0x80;
else
ret &= ~0x80;
- if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
- (phydev->supported & SUPPORTED_2500baseX_Full))
+ if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
+ (phydev->advertising & SUPPORTED_2500baseX_Full))
ret |= 0x20;
else
ret &= ~0x20;
@@ -1131,12 +1151,12 @@ static int amd_xgbe_an_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->supported & SUPPORTED_Pause)
+ if (phydev->advertising & SUPPORTED_Pause)
ret |= 0x400;
else
ret &= ~0x400;
- if (phydev->supported & SUPPORTED_Asym_Pause)
+ if (phydev->advertising & SUPPORTED_Asym_Pause)
ret |= 0x800;
else
ret &= ~0x800;
@@ -1212,38 +1232,14 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
priv->an_irq_allocated = 1;
}
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
- if (ret < 0)
- return ret;
- priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
-
- /* Initialize supported features */
- phydev->supported = SUPPORTED_Autoneg;
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->supported |= SUPPORTED_Backplane;
- phydev->supported |= SUPPORTED_10000baseKR_Full;
- switch (priv->speed_set) {
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
- phydev->supported |= SUPPORTED_1000baseKX_Full;
- break;
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
- phydev->supported |= SUPPORTED_2500baseX_Full;
- break;
- }
-
- if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
- phydev->supported |= SUPPORTED_10000baseR_FEC;
-
- phydev->advertising = phydev->supported;
-
/* Set initial mode - call the mode setting routines
* directly to insure we are properly configured
*/
- if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ if (phydev->advertising & SUPPORTED_10000baseKR_Full)
ret = amd_xgbe_phy_xgmii_mode(phydev);
- else if (phydev->supported & SUPPORTED_1000baseKX_Full)
+ else if (phydev->advertising & SUPPORTED_1000baseKX_Full)
ret = amd_xgbe_phy_gmii_mode(phydev);
- else if (phydev->supported & SUPPORTED_2500baseX_Full)
+ else if (phydev->advertising & SUPPORTED_2500baseX_Full)
ret = amd_xgbe_phy_gmii_2500_mode(phydev);
else
ret = -EINVAL;
@@ -1315,10 +1311,10 @@ static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
disable_irq(priv->an_irq);
/* Start auto-negotiation in a supported mode */
- if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ if (phydev->advertising & SUPPORTED_10000baseKR_Full)
ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
- else if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
- (phydev->supported & SUPPORTED_2500baseX_Full))
+ else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
+ (phydev->advertising & SUPPORTED_2500baseX_Full))
ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
else
ret = -EINVAL;
@@ -1746,6 +1742,29 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
sizeof(priv->serdes_dfe_tap_ena));
}
+ /* Initialize supported features */
+ phydev->supported = SUPPORTED_Autoneg;
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phydev->supported |= SUPPORTED_Backplane;
+ phydev->supported |= SUPPORTED_10000baseKR_Full;
+ switch (priv->speed_set) {
+ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+ phydev->supported |= SUPPORTED_1000baseKX_Full;
+ break;
+ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+ phydev->supported |= SUPPORTED_2500baseX_Full;
+ break;
+ }
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
+ if (ret < 0)
+ return ret;
+ priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
+ if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
+ phydev->supported |= SUPPORTED_10000baseR_FEC;
+
+ phydev->advertising = phydev->supported;
+
phydev->priv = priv;
if (!priv->adev || acpi_disabled)
@@ -1817,6 +1836,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = {
.phy_id_mask = XGBE_PHY_MASK,
.name = "AMD XGBE PHY",
.features = 0,
+ .flags = PHY_IS_INTERNAL,
.probe = amd_xgbe_phy_probe,
.remove = amd_xgbe_phy_remove,
.soft_reset = amd_xgbe_phy_soft_reset,
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f80e19ac6704..fabf11d32d27 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -192,16 +192,17 @@ static int at803x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->dev;
struct at803x_priv *priv;
+ struct gpio_desc *gpiod_reset;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->gpiod_reset = devm_gpiod_get(dev, "reset");
- if (IS_ERR(priv->gpiod_reset))
- priv->gpiod_reset = NULL;
- else
- gpiod_direction_output(priv->gpiod_reset, 1);
+ gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod_reset))
+ return PTR_ERR(gpiod_reset);
+
+ priv->gpiod_reset = gpiod_reset;
phydev->priv = priv;
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 974ec4515269..64c74c6a4828 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -396,6 +396,7 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
+ BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
{
.phy_id = PHY_ID_BCM7425,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index a52afb26421b..9c71295f2fef 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -549,6 +549,19 @@ static struct phy_driver broadcom_drivers[] = {
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
}, {
+ .phy_id = PHY_ID_BCM54616S,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM54616S",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm54xx_ack_interrupt,
+ .config_intr = bcm54xx_config_intr,
+ .driver = { .owner = THIS_MODULE },
+}, {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
@@ -660,6 +673,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
{ PHY_ID_BCM5421, 0xfffffff0 },
{ PHY_ID_BCM5461, 0xfffffff0 },
+ { PHY_ID_BCM54616S, 0xfffffff0 },
{ PHY_ID_BCM5464, 0xfffffff0 },
{ PHY_ID_BCM5482, 0xfffffff0 },
{ PHY_ID_BCM5482, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index e22e602beef3..496e02f961d3 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -257,7 +257,7 @@ static void ext_write(int broadcast, struct phy_device *phydev,
/* Caller must hold extreg_lock. */
static int tdr_write(int bc, struct phy_device *dev,
- const struct timespec *ts, u16 cmd)
+ const struct timespec64 *ts, u16 cmd)
{
ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec & 0xffff);/* ns[15:0] */
ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec >> 16); /* ns[31:16] */
@@ -411,12 +411,12 @@ static int ptp_dp83640_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
struct phy_device *phydev = clock->chosen->phydev;
- struct timespec ts;
+ struct timespec64 ts;
int err;
delta += ADJTIME_FIX;
- ts = ns_to_timespec(delta);
+ ts = ns_to_timespec64(delta);
mutex_lock(&clock->extreg_lock);
@@ -427,7 +427,8 @@ static int ptp_dp83640_adjtime(struct ptp_clock_info *ptp, s64 delta)
return err;
}
-static int ptp_dp83640_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_dp83640_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
{
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
@@ -452,7 +453,7 @@ static int ptp_dp83640_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
}
static int ptp_dp83640_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
@@ -605,7 +606,7 @@ static void recalibrate(struct dp83640_clock *clock)
{
s64 now, diff;
struct phy_txts event_ts;
- struct timespec ts;
+ struct timespec64 ts;
struct list_head *this;
struct dp83640_private *tmp;
struct phy_device *master = clock->chosen->phydev;
@@ -614,7 +615,7 @@ static void recalibrate(struct dp83640_clock *clock)
trigger = CAL_TRIGGER;
cal_gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PHYSYNC, 0);
if (cal_gpio < 1) {
- pr_err("PHY calibration pin not avaible - PHY is not calibrated.");
+ pr_err("PHY calibration pin not available - PHY is not calibrated.");
return;
}
@@ -697,7 +698,7 @@ static void recalibrate(struct dp83640_clock *clock)
diff = now - (s64) phy2txts(&event_ts);
pr_info("slave offset %lld nanoseconds\n", diff);
diff += ADJTIME_FIX;
- ts = ns_to_timespec(diff);
+ ts = ns_to_timespec64(diff);
tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK);
}
@@ -998,8 +999,8 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
clock->caps.pps = 0;
clock->caps.adjfreq = ptp_dp83640_adjfreq;
clock->caps.adjtime = ptp_dp83640_adjtime;
- clock->caps.gettime = ptp_dp83640_gettime;
- clock->caps.settime = ptp_dp83640_settime;
+ clock->caps.gettime64 = ptp_dp83640_gettime;
+ clock->caps.settime64 = ptp_dp83640_settime;
clock->caps.enable = ptp_dp83640_enable;
clock->caps.verify = ptp_dp83640_verify;
/*
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index a08a3c78ba97..1960b46add65 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -183,6 +183,35 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
+int fixed_phy_update_state(struct phy_device *phydev,
+ const struct fixed_phy_status *status,
+ const struct fixed_phy_status *changed)
+{
+ struct fixed_mdio_bus *fmb = &platform_fmb;
+ struct fixed_phy *fp;
+
+ if (!phydev || !phydev->bus)
+ return -EINVAL;
+
+ list_for_each_entry(fp, &fmb->phys, node) {
+ if (fp->addr == phydev->addr) {
+#define _UPD(x) if (changed->x) \
+ fp->status.x = status->x
+ _UPD(link);
+ _UPD(speed);
+ _UPD(duplex);
+ _UPD(pause);
+ _UPD(asym_pause);
+#undef _UPD
+ fixed_phy_update_regs(fp);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(fixed_phy_update_state);
+
int fixed_phy_add(unsigned int irq, int phy_addr,
struct fixed_phy_status *status)
{
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 6deac6d32f57..414fdf1f343f 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -187,7 +187,7 @@ static int unimac_mdio_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id unimac_mdio_ids[] = {
+static const struct of_device_id unimac_mdio_ids[] = {
{ .compatible = "brcm,genet-mdio-v4", },
{ .compatible = "brcm,genet-mdio-v3", },
{ .compatible = "brcm,genet-mdio-v2", },
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 0a0578a592b8..53d18150f4e2 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
* assume the pin serves as pull-up. If direction is
* output, the default value is high.
*/
- gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low);
+ gpio_set_value_cansleep(bitbang->mdo,
+ 1 ^ bitbang->mdo_active_low);
return;
}
@@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low;
+ return gpio_get_value_cansleep(bitbang->mdio) ^
+ bitbang->mdio_active_low;
}
static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
container_of(ctrl, struct mdio_gpio_info, ctrl);
if (bitbang->mdo)
- gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low);
+ gpio_set_value_cansleep(bitbang->mdo,
+ what ^ bitbang->mdo_active_low);
else
- gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low);
+ gpio_set_value_cansleep(bitbang->mdio,
+ what ^ bitbang->mdio_active_low);
}
static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low);
+ gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low);
}
static struct mdiobb_ops mdio_gpio_ops = {
@@ -164,7 +168,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
if (!new_bus->irq[i])
new_bus->irq[i] = PHY_POLL;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
+ if (bus_id != -1)
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
+ else
+ strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE);
if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
goto out_free_bus;
@@ -249,7 +256,7 @@ static int mdio_gpio_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id mdio_gpio_of_match[] = {
+static const struct of_device_id mdio_gpio_of_match[] = {
{ .compatible = "virtual,mdio-gpio", },
{ /* sentinel */ }
};
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 320eb15315c8..66edd99bc302 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -12,33 +12,30 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#define DRV_VERSION "1.1"
#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
-#define MDIO_MUX_GPIO_MAX_BITS 8
-
struct mdio_mux_gpio_state {
- struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS];
- unsigned int num_gpios;
+ struct gpio_descs *gpios;
void *mux_handle;
};
static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
void *data)
{
- int values[MDIO_MUX_GPIO_MAX_BITS];
- unsigned int n;
struct mdio_mux_gpio_state *s = data;
+ int values[s->gpios->ndescs];
+ unsigned int n;
if (current_child == desired_child)
return 0;
- for (n = 0; n < s->num_gpios; n++) {
+ for (n = 0; n < s->gpios->ndescs; n++)
values[n] = (desired_child >> n) & 1;
- }
- gpiod_set_array_cansleep(s->num_gpios, s->gpio, values);
+
+ gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values);
return 0;
}
@@ -46,60 +43,37 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
static int mdio_mux_gpio_probe(struct platform_device *pdev)
{
struct mdio_mux_gpio_state *s;
- int num_gpios;
- unsigned int n;
int r;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
- num_gpios = of_gpio_count(pdev->dev.of_node);
- if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
- return -ENODEV;
-
s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
- s->num_gpios = num_gpios;
-
- for (n = 0; n < num_gpios; ) {
- struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- goto err;
- }
- s->gpio[n] = gpio;
- n++;
- }
+ s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(s->gpios))
+ return PTR_ERR(s->gpios);
r = mdio_mux_init(&pdev->dev,
mdio_mux_gpio_switch_fn, &s->mux_handle, s);
- if (r == 0) {
- pdev->dev.platform_data = s;
- return 0;
- }
-err:
- while (n) {
- n--;
- gpiod_put(s->gpio[n]);
+ if (r != 0) {
+ gpiod_put_array(s->gpios);
+ return r;
}
- return r;
+
+ pdev->dev.platform_data = s;
+ return 0;
}
static int mdio_mux_gpio_remove(struct platform_device *pdev)
{
- unsigned int n;
struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
mdio_mux_uninit(s->mux_handle);
- for (n = 0; n < s->num_gpios; n++)
- gpiod_put(s->gpio[n]);
+ gpiod_put_array(s->gpios);
return 0;
}
-static struct of_device_id mdio_mux_gpio_match[] = {
+static const struct of_device_id mdio_mux_gpio_match[] = {
{
.compatible = "mdio-mux-gpio",
},
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 0aa985c74014..2377c1341172 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -145,7 +145,7 @@ static int mdio_mux_mmioreg_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id mdio_mux_mmioreg_match[] = {
+static const struct of_device_id mdio_mux_mmioreg_match[] = {
{
.compatible = "mdio-mux-mmioreg",
},
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index c81052486edc..c838ad6155f7 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -252,7 +252,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id octeon_mdiobus_match[] = {
+static const struct of_device_id octeon_mdiobus_match[] = {
{
.compatible = "cavium,octeon-3860-mdio",
},
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 1190fd8f0088..ebdc357c5131 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -548,7 +548,8 @@ static int kszphy_probe(struct phy_device *phydev)
}
clk = devm_clk_get(&phydev->dev, "rmii-ref");
- if (!IS_ERR(clk)) {
+ /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */
+ if (!IS_ERR_OR_NULL(clk)) {
unsigned long rate = clk_get_rate(clk);
bool rmii_ref_clk_sel_25_mhz;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index af034dba9bd6..9d15566521a7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1716,6 +1716,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
/* note: a 0-length skb is used as an error indication */
if (skb->len > 0) {
+ skb_checksum_complete_unset(skb);
#ifdef CONFIG_PPP_MULTILINK
/* XXX do channel-level decompression here */
if (PPP_PROTO(skb) == PPP_MP)
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 911b21602ff2..05005c660d4d 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
struct blkcipher_desc desc = { .tfm = state->arc4 };
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
- int sanity = 0;
struct scatterlist sg_in[1], sg_out[1];
if (isize <= PPP_HDRLEN + MPPE_OVHD) {
@@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
"mppe_decompress[%d]: ENCRYPTED bit not set!\n",
state->unit);
state->sanity_errors += 100;
- sanity = 1;
+ goto sanity_error;
}
if (!state->stateful && !flushed) {
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
"stateless mode!\n", state->unit);
state->sanity_errors += 100;
- sanity = 1;
+ goto sanity_error;
}
if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
"flag packet!\n", state->unit);
state->sanity_errors += 100;
- sanity = 1;
- }
-
- if (sanity) {
- if (state->sanity_errors < SANITY_MAX)
- return DECOMP_ERROR;
- else
- /*
- * Take LCP down if the peer is sending too many bogons.
- * We don't want to do this for a single or just a few
- * instances since it could just be due to packet corruption.
- */
- return DECOMP_FATALERROR;
+ goto sanity_error;
}
/*
@@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
*/
if (!state->stateful) {
+ /* Discard late packet */
+ if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE
+ > MPPE_CCOUNT_SPACE / 2) {
+ state->sanity_errors++;
+ goto sanity_error;
+ }
+
/* RFC 3078, sec 8.1. Rekey for every packet. */
while (state->ccount != ccount) {
mppe_rekey(state, 0);
@@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
state->sanity_errors >>= 1;
return osize;
+
+sanity_error:
+ if (state->sanity_errors < SANITY_MAX)
+ return DECOMP_ERROR;
+ else
+ /* Take LCP down if the peer is sending too many bogons.
+ * We don't want to do this for a single or just a few
+ * instances since it could just be due to packet corruption.
+ */
+ return DECOMP_FATALERROR;
}
/*
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index d2408a5e43a6..b62a5e3a1c65 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -380,6 +380,9 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
* can't change.
*/
+ if (skb->pkt_type == PACKET_OTHERHOST)
+ goto abort_kfree;
+
if (sk->sk_state & PPPOX_BOUND) {
ppp_input(&po->chan, skb);
} else if (sk->sk_state & PPPOX_RELAY) {
@@ -455,6 +458,22 @@ out:
return NET_RX_DROP;
}
+static void pppoe_unbind_sock_work(struct work_struct *work)
+{
+ struct pppox_sock *po = container_of(work, struct pppox_sock,
+ proto.pppoe.padt_work);
+ struct sock *sk = sk_pppox(po);
+
+ lock_sock(sk);
+ if (po->pppoe_dev) {
+ dev_put(po->pppoe_dev);
+ po->pppoe_dev = NULL;
+ }
+ pppox_unbind_sock(sk);
+ release_sock(sk);
+ sock_put(sk);
+}
+
/************************************************************************
*
* Receive a PPPoE Discovery frame.
@@ -500,7 +519,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
}
bh_unlock_sock(sk);
- sock_put(sk);
+ if (!schedule_work(&po->proto.pppoe.padt_work))
+ sock_put(sk);
}
abort:
@@ -613,6 +633,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
+ INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work);
+
error = -EINVAL;
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
@@ -820,8 +842,8 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
return err;
}
-static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
+ size_t total_len)
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
@@ -962,8 +984,8 @@ static const struct ppp_channel_ops pppoe_chan_ops = {
.start_xmit = pppoe_xmit,
};
-static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len, int flags)
+static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
+ size_t total_len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1dc628ffce2b..e3bfbd4d0136 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
nf_reset(skb);
skb->ip_summed = CHECKSUM_NONE;
- ip_select_ident(skb, NULL);
+ ip_select_ident(sock_net(sk), skb, NULL);
ip_send_check(iph);
ip_local_out(skb);
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 66c2f1a01963..aad0b59d41e3 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -1175,17 +1175,4 @@ MODULE_AUTHOR("Franco Venturi <fventuri@mediaone.net>");
MODULE_DESCRIPTION("General Instruments SB1000 driver");
MODULE_LICENSE("GPL");
-static int __init
-sb1000_init(void)
-{
- return pnp_register_driver(&sb1000_driver);
-}
-
-static void __exit
-sb1000_exit(void)
-{
- pnp_unregister_driver(&sb1000_driver);
-}
-
-module_init(sb1000_init);
-module_exit(sb1000_exit);
+module_pnp_driver(sb1000_driver);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 7d394846afc2..6928448f6b7f 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1935,6 +1935,9 @@ static netdev_features_t team_fix_features(struct net_device *dev,
mask);
}
rcu_read_unlock();
+
+ features = netdev_add_tso_features(features, mask);
+
return features;
}
@@ -1976,6 +1979,7 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_change_carrier = team_change_carrier,
.ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
.ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
+ .ndo_features_check = passthru_features_check,
};
/***********************
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 857dca47bf80..e470ae59d405 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1448,8 +1448,7 @@ static void tun_sock_write_space(struct sock *sk)
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
-static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
int ret;
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -1464,8 +1463,7 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
return ret;
}
-static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len,
+static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
int flags)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -2225,8 +2223,6 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = tun_chr_read_iter,
.write_iter = tun_chr_write_iter,
.poll = tun_chr_poll,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 8cfc3bb0c6a6..4e2b26a88b15 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -641,7 +641,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
u8 broadcast[ETH_ALEN];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
- memset(broadcast, 0xff, ETH_ALEN);
+ eth_broadcast_addr(broadcast);
memset(catc->multicast, 0, 64);
catc_multicast(broadcast, catc->multicast);
@@ -880,7 +880,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
dev_dbg(dev, "Filling the multicast list.\n");
- memset(broadcast, 0xff, ETH_ALEN);
+ eth_broadcast_addr(broadcast);
catc_multicast(broadcast, catc->multicast);
catc_multicast(netdev->dev_addr, catc->multicast);
catc_write_mem(catc, 0xfa80, catc->multicast, 64);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 96fc8a5bde84..e4b7a47a825c 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -394,7 +394,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
skb_put(skb, ETH_HLEN);
skb_reset_mac_header(skb);
eth_hdr(skb)->h_proto = proto;
- memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+ eth_zero_addr(eth_hdr(skb)->h_source);
memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
/* add datagram */
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 1762ad3910b2..e221bfcee76b 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -172,7 +172,7 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
if (!ret && link[0] == 1 && link[2] == 1)
break;
msleep(500);
- };
+ }
if (!timeout) {
dev_err(&udev->dev, "firmware not ready in time\n");
return -ETIMEDOUT;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 778e91531fac..111d907e0c11 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1477,6 +1477,7 @@ static void tiocmget_intr_callback(struct urb *urb)
struct uart_icount *icount;
struct hso_serial_state_notification *serial_state_notification;
struct usb_device *usb;
+ struct usb_interface *interface;
int if_num;
/* Sanity checks */
@@ -1494,7 +1495,9 @@ static void tiocmget_intr_callback(struct urb *urb)
BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
usb = serial->parent->usb;
- if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
+ interface = serial->parent->interface;
+
+ if_num = interface->cur_altsetting->desc.bInterfaceNumber;
/* wIndex should be the USB interface number of the port to which the
* notification applies, which should always be the Modem port.
@@ -1675,6 +1678,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
unsigned long flags;
int if_num;
struct hso_serial *serial = tty->driver_data;
+ struct usb_interface *interface;
/* sanity check */
if (!serial) {
@@ -1685,7 +1689,8 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM)
return -EINVAL;
- if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
+ interface = serial->parent->interface;
+ if_num = interface->cur_altsetting->desc.bInterfaceNumber;
spin_lock_irqsave(&serial->serial_lock, flags);
if (set & TIOCM_RTS)
@@ -2808,7 +2813,7 @@ static int hso_get_config_data(struct usb_interface *interface)
{
struct usb_device *usbdev = interface_to_usbdev(interface);
u8 *config_data = kmalloc(17, GFP_KERNEL);
- u32 if_num = interface->altsetting->desc.bInterfaceNumber;
+ u32 if_num = interface->cur_altsetting->desc.bInterfaceNumber;
s32 result;
if (!config_data)
@@ -2886,7 +2891,7 @@ static int hso_probe(struct usb_interface *interface,
return -ENODEV;
}
- if_num = interface->altsetting->desc.bInterfaceNumber;
+ if_num = interface->cur_altsetting->desc.bInterfaceNumber;
/* Get the interface/port specification from either driver_info or from
* the device itself */
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 8f37efd2d2fb..5714107533bb 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -201,7 +201,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
&buf->data[sizeof(*ethhdr) + 0x12],
ETH_ALEN);
} else {
- memset(ethhdr->h_source, 0, ETH_ALEN);
+ eth_zero_addr(ethhdr->h_source);
memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN);
/* Inbound IPv6 packets have an IPv4 ethertype (0x800)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 602dc6668c3a..f603f362504b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -108,7 +108,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
eth_hdr(skb)->h_proto = proto;
- memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+ eth_zero_addr(eth_hdr(skb)->h_source);
fix_dest:
memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
return 1;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 9f7c0ab3b349..aafa1a1898e4 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -104,7 +104,8 @@
#define USB_TX_AGG 0xd40a
#define USB_RX_BUF_TH 0xd40c
#define USB_USB_TIMER 0xd428
-#define USB_RX_EARLY_AGG 0xd42c
+#define USB_RX_EARLY_TIMEOUT 0xd42c
+#define USB_RX_EARLY_SIZE 0xd42e
#define USB_PM_CTRL_STATUS 0xd432
#define USB_TX_DMA 0xd434
#define USB_TOLERANCE 0xd490
@@ -349,10 +350,10 @@
/* USB_MISC_0 */
#define PCUT_STATUS 0x0001
-/* USB_RX_EARLY_AGG */
-#define EARLY_AGG_SUPPER 0x0e832981
-#define EARLY_AGG_HIGH 0x0e837a12
-#define EARLY_AGG_SLOW 0x0e83ffff
+/* USB_RX_EARLY_TIMEOUT */
+#define COALESCE_SUPER 85000U
+#define COALESCE_HIGH 250000U
+#define COALESCE_SLOW 524280U
/* USB_WDT11_CTRL */
#define TIMER11_EN 0x0001
@@ -607,6 +608,7 @@ struct r8152 {
u32 saved_wolopts;
u32 msg_enable;
u32 tx_qlen;
+ u32 coalesce;
u16 ocp_base;
u8 *intr_buff;
u8 version;
@@ -2143,28 +2145,19 @@ static int rtl8152_enable(struct r8152 *tp)
return rtl_enable(tp);
}
-static void r8153_set_rx_agg(struct r8152 *tp)
+static void r8153_set_rx_early_timeout(struct r8152 *tp)
{
- u8 speed;
+ u32 ocp_data = tp->coalesce / 8;
- speed = rtl8152_get_speed(tp);
- if (speed & _1000bps) {
- if (tp->udev->speed == USB_SPEED_SUPER) {
- ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
- RX_THR_SUPPER);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
- EARLY_AGG_SUPPER);
- } else {
- ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
- RX_THR_HIGH);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
- EARLY_AGG_HIGH);
- }
- } else {
- ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_SLOW);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
- EARLY_AGG_SLOW);
- }
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ocp_data);
+}
+
+static void r8153_set_rx_early_size(struct r8152 *tp)
+{
+ u32 mtu = tp->netdev->mtu;
+ u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
}
static int rtl8153_enable(struct r8152 *tp)
@@ -2174,7 +2167,8 @@ static int rtl8153_enable(struct r8152 *tp)
set_tx_qlen(tp);
rtl_set_eee_plus(tp);
- r8153_set_rx_agg(tp);
+ r8153_set_rx_early_timeout(tp);
+ r8153_set_rx_early_size(tp);
return rtl_enable(tp);
}
@@ -3720,6 +3714,61 @@ out:
return ret;
}
+static int rtl8152_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+
+ coalesce->rx_coalesce_usecs = tp->coalesce;
+
+ return 0;
+}
+
+static int rtl8152_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ int ret;
+
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+
+ if (coalesce->rx_coalesce_usecs > COALESCE_SLOW)
+ return -EINVAL;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->control);
+
+ if (tp->coalesce != coalesce->rx_coalesce_usecs) {
+ tp->coalesce = coalesce->rx_coalesce_usecs;
+
+ if (netif_running(tp->netdev) && netif_carrier_ok(netdev))
+ r8153_set_rx_early_timeout(tp);
+ }
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+
+ return ret;
+}
+
static struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
.get_settings = rtl8152_get_settings,
@@ -3733,6 +3782,8 @@ static struct ethtool_ops ops = {
.get_strings = rtl8152_get_strings,
.get_sset_count = rtl8152_get_sset_count,
.get_ethtool_stats = rtl8152_get_ethtool_stats,
+ .get_coalesce = rtl8152_get_coalesce,
+ .set_coalesce = rtl8152_set_coalesce,
.get_eee = rtl_ethtool_get_eee,
.set_eee = rtl_ethtool_set_eee,
};
@@ -3784,6 +3835,7 @@ out:
static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
{
struct r8152 *tp = netdev_priv(dev);
+ int ret;
switch (tp->version) {
case RTL_VER_01:
@@ -3796,9 +3848,22 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU)
return -EINVAL;
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->control);
+
dev->mtu = new_mtu;
- return 0;
+ if (netif_running(dev) && netif_carrier_ok(dev))
+ r8153_set_rx_early_size(tp);
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+
+ return ret;
}
static const struct net_device_ops rtl8152_netdev_ops = {
@@ -3967,6 +4032,18 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.reg_num_mask = 0x1f;
tp->mii.phy_id = R8152_PHY_ID;
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ tp->coalesce = COALESCE_SUPER;
+ break;
+ case USB_SPEED_HIGH:
+ tp->coalesce = COALESCE_HIGH;
+ break;
+ default:
+ tp->coalesce = COALESCE_SLOW;
+ break;
+ }
+
intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp);
@@ -4039,6 +4116,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{}
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 777757ae1973..3c86b107275a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1072,7 +1072,7 @@ static void __handle_set_rx_mode(struct usbnet *dev)
* especially now that control transfers can be queued.
*/
static void
-kevent (struct work_struct *work)
+usbnet_deferred_kevent (struct work_struct *work)
{
struct usbnet *dev =
container_of(work, struct usbnet, kevent);
@@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
- int length;
+ unsigned int length;
struct urb *urb = NULL;
struct skb_data *entry;
struct driver_info *info = dev->driver_info;
@@ -1413,7 +1413,7 @@ not_drop:
}
} else
netif_dbg(dev, tx_queued, dev->net,
- "> tx, len %d, type 0x%x\n", length, skb->protocol);
+ "> tx, len %u, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
@@ -1626,7 +1626,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init(&dev->rxq_pause);
dev->bh.func = usbnet_bh;
dev->bh.data = (unsigned long) dev;
- INIT_WORK (&dev->kevent, kevent);
+ INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
dev->delay.function = usbnet_bh;
dev->delay.data = (unsigned long) dev;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 4cca36ebc4fb..c8186ffda1a3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -263,6 +263,20 @@ static void veth_poll_controller(struct net_device *dev)
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
+static int veth_get_iflink(const struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+ int iflink;
+
+ rcu_read_lock();
+ peer = rcu_dereference(priv->peer);
+ iflink = peer ? peer->ifindex : 0;
+ rcu_read_unlock();
+
+ return iflink;
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -275,6 +289,7 @@ static const struct net_device_ops veth_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = veth_poll_controller,
#endif
+ .ndo_get_iflink = veth_get_iflink,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 59b0e9754ae3..63c7810e1545 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -749,9 +749,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
- unsigned int r, received = 0;
+ unsigned int r, received;
- received += virtnet_receive(rq, budget - received);
+ received = virtnet_receive(rq, budget);
/* Out of packets? */
if (received < budget) {
@@ -939,8 +939,16 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
nf_reset(skb);
- /* Apparently nice girls don't return TX_BUSY; stop the queue
- * before it gets out of hand. Naturally, this wastes entries. */
+ /* If running out of space, stop queue to avoid getting packets that we
+ * are then unable to transmit.
+ * An alternative would be to force queuing layer to requeue the skb by
+ * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+ * returned in a normal path of operation: it means that driver is not
+ * maintaining the TX queue stop/start state properly, and causes
+ * the stack to do a non-trivial amount of useless work.
+ * Since most packets only take 1 or 2 ring slots, stopping the queue
+ * early means 16 slots are typically wasted.
+ */
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 294214c15292..61c0840c448c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -819,6 +819,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_TxDataDesc *tdd;
+ u8 protocol = 0;
if (ctx->mss) { /* TSO */
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
@@ -831,16 +832,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (ctx->ipv4) {
const struct iphdr *iph = ip_hdr(skb);
- if (iph->protocol == IPPROTO_TCP)
- ctx->l4_hdr_size = tcp_hdrlen(skb);
- else if (iph->protocol == IPPROTO_UDP)
- ctx->l4_hdr_size = sizeof(struct udphdr);
- else
- ctx->l4_hdr_size = 0;
- } else {
- /* for simplicity, don't copy L4 headers */
+ protocol = iph->protocol;
+ } else if (ctx->ipv6) {
+ const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ protocol = ipv6h->nexthdr;
+ }
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ ctx->l4_hdr_size = tcp_hdrlen(skb);
+ break;
+ case IPPROTO_UDP:
+ ctx->l4_hdr_size = sizeof(struct udphdr);
+ break;
+ default:
ctx->l4_hdr_size = 0;
+ break;
}
+
ctx->copy_size = min(ctx->eth_ip_hdr_size +
ctx->l4_hdr_size, skb->len);
} else {
@@ -887,7 +897,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
IPPROTO_TCP, 0);
- } else {
+ } else if (ctx->ipv6) {
struct ipv6hdr *iph = ipv6_hdr(skb);
tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
@@ -938,6 +948,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
count = txd_estimate(skb);
ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
+ ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) {
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 4c8a944d58b4..c1d0e7a9da04 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -104,7 +104,7 @@ vmxnet3_rq_driver_stats[] = {
rx_buf_alloc_failure) },
};
-/* gloabl stats maintained by the driver */
+/* global stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_global_stats[] = {
/* description, offset */
@@ -272,7 +272,7 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXCSUM;
- /* update harware LRO capability accordingly */
+ /* update hardware LRO capability accordingly */
if (features & NETIF_F_LRO)
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_LRO;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index cd71c77f78f2..6bb769ae7de9 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.3.4.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.3.5.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01030400
+#define VMXNET3_DRIVER_VERSION_NUM 0x01030500
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -211,6 +211,7 @@ struct vmxnet3_tq_driver_stats {
struct vmxnet3_tx_ctx {
bool ipv4;
+ bool ipv6;
u16 mss;
u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
* offloading
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f8528a4cf54f..27a5f954f8e9 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -127,10 +127,6 @@ struct vxlan_dev {
__u8 ttl;
u32 flags; /* VXLAN_F_* in vxlan.h */
- struct work_struct sock_work;
- struct work_struct igmp_join;
- struct work_struct igmp_leave;
-
unsigned long age_interval;
struct timer_list age_timer;
spinlock_t hash_lock;
@@ -144,58 +140,56 @@ struct vxlan_dev {
static u32 vxlan_salt __read_mostly;
static struct workqueue_struct *vxlan_wq;
-static void vxlan_sock_work(struct work_struct *work);
-
#if IS_ENABLED(CONFIG_IPV6)
static inline
bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
{
- if (a->sa.sa_family != b->sa.sa_family)
- return false;
- if (a->sa.sa_family == AF_INET6)
- return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
- else
- return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+ if (a->sa.sa_family != b->sa.sa_family)
+ return false;
+ if (a->sa.sa_family == AF_INET6)
+ return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
+ else
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
}
static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
{
- if (ipa->sa.sa_family == AF_INET6)
- return ipv6_addr_any(&ipa->sin6.sin6_addr);
- else
- return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_any(&ipa->sin6.sin6_addr);
+ else
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
}
static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
{
- if (ipa->sa.sa_family == AF_INET6)
- return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
- else
- return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
+ else
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
}
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
- if (nla_len(nla) >= sizeof(struct in6_addr)) {
- nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
- ip->sa.sa_family = AF_INET6;
- return 0;
- } else if (nla_len(nla) >= sizeof(__be32)) {
- ip->sin.sin_addr.s_addr = nla_get_be32(nla);
- ip->sa.sa_family = AF_INET;
- return 0;
- } else {
- return -EAFNOSUPPORT;
- }
+ if (nla_len(nla) >= sizeof(struct in6_addr)) {
+ ip->sin6.sin6_addr = nla_get_in6_addr(nla);
+ ip->sa.sa_family = AF_INET6;
+ return 0;
+ } else if (nla_len(nla) >= sizeof(__be32)) {
+ ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
+ ip->sa.sa_family = AF_INET;
+ return 0;
+ } else {
+ return -EAFNOSUPPORT;
+ }
}
static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
- const union vxlan_addr *ip)
+ const union vxlan_addr *ip)
{
- if (ip->sa.sa_family == AF_INET6)
- return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
- else
- return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
+ if (ip->sa.sa_family == AF_INET6)
+ return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
+ else
+ return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
}
#else /* !CONFIG_IPV6 */
@@ -203,36 +197,36 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
static inline
bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
{
- return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
}
static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
{
- return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
}
static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
{
- return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
}
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
- if (nla_len(nla) >= sizeof(struct in6_addr)) {
- return -EAFNOSUPPORT;
- } else if (nla_len(nla) >= sizeof(__be32)) {
- ip->sin.sin_addr.s_addr = nla_get_be32(nla);
- ip->sa.sa_family = AF_INET;
- return 0;
- } else {
- return -EAFNOSUPPORT;
- }
+ if (nla_len(nla) >= sizeof(struct in6_addr)) {
+ return -EAFNOSUPPORT;
+ } else if (nla_len(nla) >= sizeof(__be32)) {
+ ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
+ ip->sa.sa_family = AF_INET;
+ return 0;
+ } else {
+ return -EAFNOSUPPORT;
+ }
}
static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
- const union vxlan_addr *ip)
+ const union vxlan_addr *ip)
{
- return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
+ return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
}
#endif
@@ -736,12 +730,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
/* Only change unicasts */
if (!(is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) {
- int rc = vxlan_fdb_replace(f, ip, port, vni,
+ notify |= vxlan_fdb_replace(f, ip, port, vni,
ifindex);
-
- if (rc < 0)
- return rc;
- notify |= rc;
} else
return -EOPNOTSUPP;
}
@@ -995,7 +985,7 @@ out:
/* Watch incoming packets to learn mapping between Ethernet address
* and Tunnel endpoint.
- * Return true if packet is bogus and should be droppped.
+ * Return true if packet is bogus and should be dropped.
*/
static bool vxlan_snoop(struct net_device *dev,
union vxlan_addr *src_ip, const u8 *src_mac)
@@ -1072,11 +1062,6 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
return false;
}
-static void vxlan_sock_hold(struct vxlan_sock *vs)
-{
- atomic_inc(&vs->refcnt);
-}
-
void vxlan_sock_release(struct vxlan_sock *vs)
{
struct sock *sk = vs->sock->sk;
@@ -1095,17 +1080,16 @@ void vxlan_sock_release(struct vxlan_sock *vs)
}
EXPORT_SYMBOL_GPL(vxlan_sock_release);
-/* Callback to update multicast group membership when first VNI on
- * multicast asddress is brought up
- * Done as workqueue because ip_mc_join_group acquires RTNL.
+/* Update multicast group membership when first VNI on
+ * multicast address is brought up
*/
-static void vxlan_igmp_join(struct work_struct *work)
+static int vxlan_igmp_join(struct vxlan_dev *vxlan)
{
- struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
struct vxlan_sock *vs = vxlan->vn_sock;
struct sock *sk = vs->sock->sk;
union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
int ifindex = vxlan->default_dst.remote_ifindex;
+ int ret = -EINVAL;
lock_sock(sk);
if (ip->sa.sa_family == AF_INET) {
@@ -1114,27 +1098,26 @@ static void vxlan_igmp_join(struct work_struct *work)
.imr_ifindex = ifindex,
};
- ip_mc_join_group(sk, &mreq);
+ ret = ip_mc_join_group(sk, &mreq);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
- &ip->sin6.sin6_addr);
+ ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
+ &ip->sin6.sin6_addr);
#endif
}
release_sock(sk);
- vxlan_sock_release(vs);
- dev_put(vxlan->dev);
+ return ret;
}
/* Inverse of vxlan_igmp_join when last VNI is brought down */
-static void vxlan_igmp_leave(struct work_struct *work)
+static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
{
- struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
struct vxlan_sock *vs = vxlan->vn_sock;
struct sock *sk = vs->sock->sk;
union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
int ifindex = vxlan->default_dst.remote_ifindex;
+ int ret = -EINVAL;
lock_sock(sk);
if (ip->sa.sa_family == AF_INET) {
@@ -1143,18 +1126,16 @@ static void vxlan_igmp_leave(struct work_struct *work)
.imr_ifindex = ifindex,
};
- ip_mc_leave_group(sk, &mreq);
+ ret = ip_mc_leave_group(sk, &mreq);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
- &ip->sin6.sin6_addr);
+ ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
+ &ip->sin6.sin6_addr);
#endif
}
-
release_sock(sk);
- vxlan_sock_release(vs);
- dev_put(vxlan->dev);
+ return ret;
}
static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
@@ -1244,7 +1225,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
* this as a malformed packet. This behavior diverges from
* VXLAN RFC (RFC7348) which stipulates that bits in reserved
* in reserved fields are to be ignored. The approach here
- * maintains compatbility with previous stack code, and also
+ * maintains compatibility with previous stack code, and also
* is more robust and provides a little more security in
* adding extensions to VXLAN.
*/
@@ -1687,7 +1668,8 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
}
#if IS_ENABLED(CONFIG_IPV6)
-static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr, __u8 prio, __u8 ttl,
__be16 src_port, __be16 dst_port,
@@ -1713,12 +1695,6 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
}
}
- skb = iptunnel_handle_offloads(skb, udp_sum, type);
- if (IS_ERR(skb)) {
- err = -EINVAL;
- goto err;
- }
-
skb_scrub_packet(skb, xnet);
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
@@ -1738,6 +1714,12 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
goto err;
}
+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
+ if (IS_ERR(skb)) {
+ err = -EINVAL;
+ goto err;
+ }
+
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_HF_VNI);
vxh->vx_vni = md->vni;
@@ -1763,7 +1745,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
- udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio,
+ udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
ttl, src_port, dst_port,
!!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
return 0;
@@ -1773,7 +1755,7 @@ err:
}
#endif
-int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port,
struct vxlan_metadata *md, bool xnet, u32 vxflags)
@@ -1798,10 +1780,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
}
}
- skb = iptunnel_handle_offloads(skb, udp_sum, type);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
@@ -1817,6 +1795,10 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
if (WARN_ON(!skb))
return -ENOMEM;
+ skb = iptunnel_handle_offloads(skb, udp_sum, type);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_HF_VNI);
vxh->vx_vni = md->vni;
@@ -1842,7 +1824,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
- return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
+ return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos,
ttl, df, src_port, dst_port, xnet,
!(vxflags & VXLAN_F_UDP_CSUM));
}
@@ -1897,6 +1879,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_rdst *rdst, bool did_rsc)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct sock *sk = vxlan->vn_sock->sock->sk;
struct rtable *rt = NULL;
const struct iphdr *old_iph;
struct flowi4 fl4;
@@ -1976,7 +1959,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
md.vni = htonl(vni << 8);
md.gbp = skb->mark;
- err = vxlan_xmit_skb(rt, skb, fl4.saddr,
+ err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
dst->sin.sin_addr.s_addr, tos, ttl, df,
src_port, dst_port, &md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
@@ -1990,7 +1973,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- struct sock *sk = vxlan->vn_sock->sock->sk;
struct dst_entry *ndst;
struct flowi6 fl6;
u32 flags;
@@ -2036,7 +2018,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
md.vni = htonl(vni << 8);
md.gbp = skb->mark;
- err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr,
+ err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
0, ttl, src_port, dst_port, &md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
vxlan->flags);
@@ -2175,37 +2157,22 @@ static void vxlan_cleanup(unsigned long arg)
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
__u32 vni = vxlan->default_dst.remote_vni;
vxlan->vn_sock = vs;
+ spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+ spin_unlock(&vn->sock_lock);
}
/* Setup stats when device is created */
static int vxlan_init(struct net_device *dev)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
- struct vxlan_sock *vs;
- bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
-
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
- vxlan->dst_port, vxlan->flags);
- if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
- /* If we have a socket with same port already, reuse it */
- vxlan_vs_add_dev(vs, vxlan);
- } else {
- /* otherwise make new socket outside of RTNL */
- dev_hold(dev);
- queue_work(vxlan_wq, &vxlan->sock_work);
- }
- spin_unlock(&vn->sock_lock);
-
return 0;
}
@@ -2223,12 +2190,9 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_sock *vs = vxlan->vn_sock;
vxlan_fdb_delete_default(vxlan);
- if (vs)
- vxlan_sock_release(vs);
free_percpu(dev->tstats);
}
@@ -2236,22 +2200,28 @@ static void vxlan_uninit(struct net_device *dev)
static int vxlan_open(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_sock *vs = vxlan->vn_sock;
+ struct vxlan_sock *vs;
+ int ret = 0;
- /* socket hasn't been created */
- if (!vs)
- return -ENOTCONN;
+ vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
+ false, vxlan->flags);
+ if (IS_ERR(vs))
+ return PTR_ERR(vs);
+
+ vxlan_vs_add_dev(vs, vxlan);
if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
- vxlan_sock_hold(vs);
- dev_hold(dev);
- queue_work(vxlan_wq, &vxlan->igmp_join);
+ ret = vxlan_igmp_join(vxlan);
+ if (ret) {
+ vxlan_sock_release(vs);
+ return ret;
+ }
}
if (vxlan->age_interval)
mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
- return 0;
+ return ret;
}
/* Purge the forwarding table */
@@ -2279,19 +2249,18 @@ static int vxlan_stop(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
+ int ret = 0;
- if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- !vxlan_group_used(vn, vxlan)) {
- vxlan_sock_hold(vs);
- dev_hold(dev);
- queue_work(vxlan_wq, &vxlan->igmp_leave);
- }
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+ !vxlan_group_used(vn, vxlan))
+ ret = vxlan_igmp_leave(vxlan);
del_timer_sync(&vxlan->age_timer);
vxlan_flush(vxlan);
+ vxlan_sock_release(vs);
- return 0;
+ return ret;
}
/* Stub, nothing needs to be done. */
@@ -2402,9 +2371,6 @@ static void vxlan_setup(struct net_device *dev)
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
- INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
- INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
- INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
init_timer_deferrable(&vxlan->age_timer);
vxlan->age_timer.function = vxlan_cleanup;
@@ -2516,7 +2482,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
} else {
udp_conf.family = AF_INET;
- udp_conf.local_ip.s_addr = INADDR_ANY;
}
udp_conf.local_udp_port = port;
@@ -2552,6 +2517,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
sock = vxlan_create_sock(net, ipv6, port, flags);
if (IS_ERR(sock)) {
+ pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
+ PTR_ERR(sock));
kfree(vs);
return ERR_CAST(sock);
}
@@ -2591,45 +2558,23 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
struct vxlan_sock *vs;
bool ipv6 = flags & VXLAN_F_IPV6;
- vs = vxlan_socket_create(net, port, rcv, data, flags);
- if (!IS_ERR(vs))
- return vs;
-
- if (no_share) /* Return error if sharing is not allowed. */
- return vs;
-
- spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags);
- if (vs && ((vs->rcv != rcv) ||
- !atomic_add_unless(&vs->refcnt, 1, 0)))
- vs = ERR_PTR(-EBUSY);
- spin_unlock(&vn->sock_lock);
-
- if (!vs)
- vs = ERR_PTR(-EINVAL);
+ if (!no_share) {
+ spin_lock(&vn->sock_lock);
+ vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
+ flags);
+ if (vs && vs->rcv == rcv) {
+ if (!atomic_add_unless(&vs->refcnt, 1, 0))
+ vs = ERR_PTR(-EBUSY);
+ spin_unlock(&vn->sock_lock);
+ return vs;
+ }
+ spin_unlock(&vn->sock_lock);
+ }
- return vs;
+ return vxlan_socket_create(net, port, rcv, data, flags);
}
EXPORT_SYMBOL_GPL(vxlan_sock_add);
-/* Scheduled at device creation to bind to a socket */
-static void vxlan_sock_work(struct work_struct *work)
-{
- struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
- struct net *net = vxlan->net;
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- __be16 port = vxlan->dst_port;
- struct vxlan_sock *nvs;
-
- nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
- spin_lock(&vn->sock_lock);
- if (!IS_ERR(nvs))
- vxlan_vs_add_dev(nvs, vxlan);
- spin_unlock(&vn->sock_lock);
-
- dev_put(vxlan->dev);
-}
-
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
@@ -2651,27 +2596,25 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
/* Unless IPv6 is explicitly requested, assume IPv4 */
dst->remote_ip.sa.sa_family = AF_INET;
if (data[IFLA_VXLAN_GROUP]) {
- dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+ dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
} else if (data[IFLA_VXLAN_GROUP6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
- nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
- sizeof(struct in6_addr));
+ dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
dst->remote_ip.sa.sa_family = AF_INET6;
use_ipv6 = true;
}
if (data[IFLA_VXLAN_LOCAL]) {
- vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
+ vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
vxlan->saddr.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_LOCAL6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
/* TODO: respect scope id */
- nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
- sizeof(struct in6_addr));
+ vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
vxlan->saddr.sa.sa_family = AF_INET6;
use_ipv6 = true;
}
@@ -2856,13 +2799,13 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (!vxlan_addr_any(&dst->remote_ip)) {
if (dst->remote_ip.sa.sa_family == AF_INET) {
- if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
- dst->remote_ip.sin.sin_addr.s_addr))
+ if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
+ dst->remote_ip.sin.sin_addr.s_addr))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
- &dst->remote_ip.sin6.sin6_addr))
+ if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
+ &dst->remote_ip.sin6.sin6_addr))
goto nla_put_failure;
#endif
}
@@ -2873,13 +2816,13 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (!vxlan_addr_any(&vxlan->saddr)) {
if (vxlan->saddr.sa.sa_family == AF_INET) {
- if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
- vxlan->saddr.sin.sin_addr.s_addr))
+ if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
+ vxlan->saddr.sin.sin_addr.s_addr))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
- &vxlan->saddr.sin6.sin6_addr))
+ if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
+ &vxlan->saddr.sin6.sin6_addr))
goto nla_put_failure;
#endif
}
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 88d121d43c08..bcfa01add7cc 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -579,6 +579,7 @@ static int cosa_probe(int base, int irq, int dma)
/* Register the network interface */
if (!(chan->netdev = alloc_hdlcdev(chan))) {
pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
+ err = -ENOMEM;
goto err_hdlcdev;
}
dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index bea0f313a7a8..317bc79cc8b9 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -850,6 +850,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_hdlcdev(sc);
if (!dev) {
printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
+ err = -ENOMEM;
goto err_hdlcdev;
}
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index e71a2ce7a448..d0c97c220026 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2676,7 +2676,7 @@ static void wifi_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 100;
- memset(dev->broadcast,0xFF, ETH_ALEN);
+ eth_broadcast_addr(dev->broadcast);
dev->flags = IFF_BROADCAST|IFF_MULTICAST;
}
@@ -3211,7 +3211,7 @@ static void airo_print_status(const char *devname, u16 status)
airo_print_dbg(devname, "link lost (TSF sync lost)");
break;
default:
- airo_print_dbg(devname, "unknow status %x\n", status);
+ airo_print_dbg(devname, "unknown status %x\n", status);
break;
}
break;
@@ -3233,7 +3233,7 @@ static void airo_print_status(const char *devname, u16 status)
case STAT_REASSOC:
break;
default:
- airo_print_dbg(devname, "unknow status %x\n", status);
+ airo_print_dbg(devname, "unknown status %x\n", status);
break;
}
}
@@ -3273,7 +3273,7 @@ static void airo_handle_link(struct airo_info *ai)
}
/* Send event to user space */
- memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
+ eth_zero_addr(wrqu.ap_addr.sa_data);
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL);
}
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index da92bfa76b7c..49219c508963 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1166,7 +1166,7 @@ static int at76_start_monitor(struct at76_priv *priv)
int ret;
memset(&scan, 0, sizeof(struct at76_req_scan));
- memset(scan.bssid, 0xff, ETH_ALEN);
+ eth_broadcast_addr(scan.bssid);
scan.channel = priv->channel;
scan.scan_type = SCAN_TYPE_PASSIVE;
@@ -1427,7 +1427,7 @@ static int at76_startup_device(struct at76_priv *priv)
at76_wait_completion(priv, CMD_STARTUP);
/* remove BSSID from previous run */
- memset(priv->bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->bssid);
priv->scanning = false;
@@ -1973,7 +1973,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw,
ieee80211_stop_queues(hw);
memset(&scan, 0, sizeof(struct at76_req_scan));
- memset(scan.bssid, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(scan.bssid);
if (req->n_ssids) {
scan.scan_type = SCAN_TYPE_ACTIVE;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index f92050617ae6..5147ebe4cd05 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -779,8 +779,6 @@ static void ar5523_tx(struct ieee80211_hw *hw,
ieee80211_stop_queues(hw);
}
- data->skb = skb;
-
spin_lock_irqsave(&ar->tx_data_list_lock, flags);
list_add_tail(&data->list, &ar->tx_queue_pending);
spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
@@ -817,10 +815,13 @@ static void ar5523_tx_work_locked(struct ar5523 *ar)
if (!data)
break;
- skb = data->skb;
+ txi = container_of((void *)data, struct ieee80211_tx_info,
+ driver_data);
txqid = 0;
- txi = IEEE80211_SKB_CB(skb);
+
+ skb = container_of((void *)txi, struct sk_buff, cb);
paylen = skb->len;
+
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
ar5523_err(ar, "Failed to allocate TX urb\n");
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.h b/drivers/net/wireless/ath/ar5523/ar5523.h
index 00c6fd346d48..9a322a65cdb5 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.h
+++ b/drivers/net/wireless/ath/ar5523/ar5523.h
@@ -74,7 +74,6 @@ struct ar5523_tx_cmd {
struct ar5523_tx_data {
struct list_head list;
struct ar5523 *ar;
- struct sk_buff *skb;
struct urb *urb;
};
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 1eebe2ea3dfb..7e9481099a8e 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -131,6 +131,9 @@ struct ath_ops {
void (*enable_write_buffer)(void *);
void (*write_flush) (void *);
u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
+ void (*enable_rmw_buffer)(void *);
+ void (*rmw_flush) (void *);
+
};
struct ath_common;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index c18647b87f71..0eddb204d85b 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -39,7 +39,7 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
-#define CE_DESC_FLAGS_META_DATA_LSB 3
+#define CE_DESC_FLAGS_META_DATA_LSB 2
struct ce_desc {
__le32 addr;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 310e12bc078a..c0e454bb6a8d 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -436,16 +436,16 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
- if (ar->board && !IS_ERR(ar->board))
+ if (!IS_ERR(ar->board))
release_firmware(ar->board);
- if (ar->otp && !IS_ERR(ar->otp))
+ if (!IS_ERR(ar->otp))
release_firmware(ar->otp);
- if (ar->firmware && !IS_ERR(ar->firmware))
+ if (!IS_ERR(ar->firmware))
release_firmware(ar->firmware);
- if (ar->cal_file && !IS_ERR(ar->cal_file))
+ if (!IS_ERR(ar->cal_file))
release_firmware(ar->cal_file);
ar->board = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index d60e46fe6d19..f65310c3ba5f 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -159,6 +159,25 @@ struct ath10k_fw_stats_peer {
u32 peer_rx_rate; /* 10x only */
};
+struct ath10k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[4];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[4];
+ u32 num_tx_frames_failures[4];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[10];
+ u32 beacon_rssi_history[10];
+};
+
struct ath10k_fw_stats_pdev {
struct list_head list;
@@ -220,6 +239,7 @@ struct ath10k_fw_stats_pdev {
struct ath10k_fw_stats {
struct list_head pdevs;
+ struct list_head vdevs;
struct list_head peers;
};
@@ -288,6 +308,7 @@ struct ath10k_vif {
bool is_started;
bool is_up;
bool spectral_enabled;
+ bool ps;
u32 aid;
u8 bssid[ETH_ALEN];
@@ -413,6 +434,12 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_WMI_10_2 = 4,
+ /* Some firmware revisions lack proper multi-interface client powersave
+ * implementation. Enabling PS could result in connection drops,
+ * traffic stalls, etc.
+ */
+ ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index d2281e5c2ffe..301081db1ef6 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -243,6 +243,16 @@ static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
}
}
+static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
{
struct ath10k_fw_stats_peer *i, *tmp;
@@ -258,6 +268,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
spin_lock_bh(&ar->data_lock);
ar->debug.fw_stats_done = false;
ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
spin_unlock_bh(&ar->data_lock);
}
@@ -273,14 +284,27 @@ static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
return num;
}
+static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
+{
+ struct ath10k_fw_stats_vdev *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_fw_stats stats = {};
bool is_start, is_started, is_end;
size_t num_peers;
+ size_t num_vdevs;
int ret;
INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
INIT_LIST_HEAD(&stats.peers);
spin_lock_bh(&ar->data_lock);
@@ -308,6 +332,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
}
num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+ num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
!list_empty(&stats.pdevs));
is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
@@ -330,7 +355,13 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
goto free;
}
+ if (num_vdevs >= BITS_PER_LONG) {
+ ath10k_warn(ar, "dropping fw vdev stats\n");
+ goto free;
+ }
+
list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
+ list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
}
complete(&ar->debug.fw_stats_complete);
@@ -340,6 +371,7 @@ free:
* resources if that is not the case.
*/
ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
+ ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
ath10k_debug_fw_stats_peers_free(&stats.peers);
unlock:
@@ -363,7 +395,10 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
reinit_completion(&ar->debug.fw_stats_complete);
- ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
+ ret = ath10k_wmi_request_stats(ar,
+ WMI_STAT_PDEV |
+ WMI_STAT_VDEV |
+ WMI_STAT_PEER);
if (ret) {
ath10k_warn(ar, "could not request stats (%d)\n", ret);
return ret;
@@ -395,8 +430,11 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
unsigned int len = 0;
unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev *vdev;
const struct ath10k_fw_stats_peer *peer;
size_t num_peers;
+ size_t num_vdevs;
+ int i;
spin_lock_bh(&ar->data_lock);
@@ -408,6 +446,7 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
}
num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
+ num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -531,6 +570,65 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "vdev id", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
"ath10k PEER stats", num_peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
@@ -1900,6 +1998,7 @@ int ath10k_debug_create(struct ath10k *ar)
return -ENOMEM;
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index c1da44f65a4d..01a2b384f358 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -176,7 +176,7 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
* automatically balances load wrt to CPU power.
*
* This probably comes at a cost of lower maximum throughput but
- * improves the avarage and stability. */
+ * improves the average and stability. */
spin_lock_bh(&htt->rx_ring.lock);
num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index d6d2f0f00caa..973485bd4121 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -611,7 +611,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i: %d\n",
+ ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
vdev_id, ret);
return ret;
}
@@ -658,7 +658,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
ret = ath10k_vdev_setup_sync(ar);
if (ret)
- ath10k_warn(ar, "failed to synchronise monitor vdev %i: %d\n",
+ ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
@@ -927,8 +927,9 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn(ar, "failed to synchronise setup for vdev %i: %d\n",
- arg.vdev_id, ret);
+ ath10k_warn(ar,
+ "failed to synchronize setup for vdev %i restart %d: %d\n",
+ arg.vdev_id, restart, ret);
return ret;
}
@@ -966,7 +967,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -1182,7 +1183,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
if (is_zero_ether_addr(arvif->bssid))
return;
- memset(arvif->bssid, 0, ETH_ALEN);
+ eth_zero_addr(arvif->bssid);
return;
}
@@ -1253,6 +1254,20 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
return 0;
}
+static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int num = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->ps)
+ num++;
+
+ return num;
+}
+
static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
@@ -1262,13 +1277,24 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
enum wmi_sta_ps_mode psmode;
int ret;
int ps_timeout;
+ bool enable_ps;
lockdep_assert_held(&arvif->ar->conf_mutex);
if (arvif->vif->type != NL80211_IFTYPE_STATION)
return 0;
- if (vif->bss_conf.ps) {
+ enable_ps = arvif->ps;
+
+ if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+ !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
+ ar->fw_features)) {
+ ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
+ arvif->vdev_id);
+ enable_ps = false;
+ }
+
+ if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
@@ -1386,7 +1412,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
lockdep_assert_held(&ar->conf_mutex);
bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
- info->bssid, NULL, 0, 0, 0);
+ info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY);
if (bss) {
const struct cfg80211_bss_ies *ies;
@@ -1781,6 +1808,68 @@ static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
ath10k_smps_map[smps]);
}
+static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta_vht_cap vht_cap)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret;
+ u32 param;
+ u32 value;
+
+ if (!(ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+ return 0;
+
+ param = ar->wmi.vdev_param->txbf;
+ value = 0;
+
+ if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
+ return 0;
+
+ /* The following logic is correct. If a remote STA advertises support
+ * for being a beamformer then we should enable us being a beamformee.
+ */
+
+ if (ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ if (ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
+ value, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/* can be called only in mac80211 callbacks due to `key_count` usage */
static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1789,6 +1878,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
struct wmi_peer_assoc_complete_arg peer_arg;
struct ieee80211_sta *ap_sta;
int ret;
@@ -1811,6 +1901,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
/* ap_sta must be accessed only within rcu section which must be left
* before calling ath10k_setup_peer_smps() which might sleep. */
ht_cap = ap_sta->ht_cap;
+ vht_cap = ap_sta->vht_cap;
ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
if (ret) {
@@ -1836,6 +1927,13 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
return;
}
+ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
+ arvif->vdev_id, bss_conf->bssid, ret);
+ return;
+ }
+
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
@@ -1853,6 +1951,18 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
}
arvif->is_up = true;
+
+ /* Workaround: Some firmware revisions (tested with qca6174
+ * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
+ * poked with peer param command.
+ */
+ ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
+ WMI_PEER_DUMMY_VAR, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
+ arvif->bssid, arvif->vdev_id, ret);
+ return;
+ }
}
static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
@@ -1860,6 +1970,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ieee80211_sta_vht_cap vht_cap = {};
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -1874,6 +1985,13 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
arvif->def_wep_key_idx = -1;
+ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
arvif->is_up = false;
}
@@ -2554,6 +2672,17 @@ static int ath10k_start_scan(struct ath10k *ar,
return -ETIMEDOUT;
}
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH10K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
/* Add a 200ms margin to account for event/command processing */
ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
msecs_to_jiffies(arg->max_scan_time+200));
@@ -3323,9 +3452,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
list_del(&arvif->list);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
+ ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
+ vif->addr);
if (ret)
- ath10k_warn(ar, "failed to remove peer for AP vdev %i: %d\n",
+ ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
@@ -3339,6 +3469,21 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
+ /* Some firmware revisions don't notify host about self-peer removal
+ * until after associated vdev is deleted.
+ */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
+ vif->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->num_peers--;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
ath10k_peer_cleanup(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex);
@@ -3534,7 +3679,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_PS) {
- ret = ath10k_mac_vif_setup_ps(arvif);
+ arvif->ps = vif->bss_conf.ps;
+
+ ret = ath10k_config_ps(ar);
if (ret)
ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
arvif->vdev_id, ret);
@@ -4857,7 +5004,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
bw = WMI_PEER_CHWIDTH_80MHZ;
break;
case IEEE80211_STA_RX_BW_160:
- ath10k_warn(ar, "Invalid bandwith %d in rc update for %pM\n",
+ ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
sta->bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e6972b09333e..7681237fe298 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -104,7 +104,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
- .src_sz_max = 512,
+ .src_sz_max = 2048,
.dest_nentries = 512,
},
@@ -174,7 +174,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(512),
+ .nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 04dc4b9db04e..c8b64e7a6089 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -110,8 +110,7 @@ struct wmi_ops {
bool deliver_cab);
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
const struct wmi_wmm_params_all_arg *arg);
- struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
- enum wmi_stats_id stats_id);
+ struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
enum wmi_force_fw_hang_type type,
u32 delay_ms);
@@ -816,14 +815,14 @@ ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
}
static inline int
-ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_request_stats)
return -EOPNOTSUPP;
- skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
+ skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
if (IS_ERR(skb))
return PTR_ERR(skb);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 71614ba1b145..ee0c5f602e29 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -869,16 +869,57 @@ static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
return 0;
}
+static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
+ struct ath10k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = __le32_to_cpu(src->vdev_id);
+ dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
+ dst->data_snr = __le32_to_cpu(src->data_snr);
+ dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
+ dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
+ dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
+ dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
+ dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
+ dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+ dst->num_tx_frames[i] =
+ __le32_to_cpu(src->num_tx_frames[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+ dst->num_tx_frames_retries[i] =
+ __le32_to_cpu(src->num_tx_frames_retries[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+ dst->num_tx_frames_failures[i] =
+ __le32_to_cpu(src->num_tx_frames_failures[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+ dst->tx_rate_history[i] =
+ __le32_to_cpu(src->tx_rate_history[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+ dst->beacon_rssi_history[i] =
+ __le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
struct sk_buff *skb,
struct ath10k_fw_stats *stats)
{
const void **tb;
- const struct wmi_stats_event *ev;
+ const struct wmi_tlv_stats_ev *ev;
const void *data;
- u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+ u32 num_pdev_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 num_chan_stats;
size_t data_len;
int ret;
+ int i;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
@@ -899,8 +940,73 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+ num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
+ num_pdev_stats, num_vdev_stats, num_peer_stats,
+ num_bcnflt_stats, num_chan_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = data;
+ if (data_len < sizeof(*src))
+ return -EPROTO;
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_vdev_stats; i++) {
+ const struct wmi_tlv_vdev_stats *src;
+ struct ath10k_fw_stats_vdev *dst;
+
+ src = data;
+ if (data_len < sizeof(*src))
+ return -EPROTO;
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
- WARN_ON(1); /* FIXME: not implemented yet */
+ ath10k_wmi_tlv_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10x_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = data;
+ if (data_len < sizeof(*src))
+ return -EPROTO;
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ list_add_tail(&dst->list, &stats->peers);
+ }
kfree(tb);
return 0;
@@ -1604,14 +1710,12 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg)
{
struct wmi_tlv_vdev_set_wmm_cmd *cmd;
- struct wmi_wmm_params *wmm;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
void *ptr;
- len = (sizeof(*tlv) + sizeof(*cmd)) +
- (4 * (sizeof(*tlv) + sizeof(*wmm)));
+ len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -1623,13 +1727,10 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- ptr += sizeof(*tlv);
- ptr += sizeof(*cmd);
-
- ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
- ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
- ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
- ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
return skb;
@@ -2080,8 +2181,7 @@ ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
}
static struct sk_buff *
-ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar,
- enum wmi_stats_id stats_id)
+ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
{
struct wmi_request_stats_cmd *cmd;
struct wmi_tlv *tlv;
@@ -2095,7 +2195,7 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar,
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
- cmd->stats_id = __cpu_to_le32(stats_id);
+ cmd->stats_id = __cpu_to_le32(stats_mask);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
return skb;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index de68fe76eae6..a6c8280cc4b1 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1302,8 +1302,14 @@ struct wmi_tlv_pdev_set_wmm_cmd {
__le32 dg_type; /* no idea.. */
} __packed;
+struct wmi_tlv_vdev_wmm_params {
+ __le32 dummy;
+ struct wmi_wmm_params params;
+} __packed;
+
struct wmi_tlv_vdev_set_wmm_cmd {
__le32 vdev_id;
+ struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
} __packed;
struct wmi_tlv_phyerr_ev {
@@ -1439,6 +1445,15 @@ struct wmi_tlv_sta_keepalive_cmd {
__le32 interval; /* in seconds */
} __packed;
+struct wmi_tlv_stats_ev {
+ __le32 stats_id; /* WMI_STAT_ */
+ __le32 num_pdev_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ __le32 num_chan_stats;
+} __packed;
+
void ath10k_wmi_tlv_attach(struct ath10k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index aeea1c793943..c7ea77edce24 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1125,6 +1125,25 @@ static void ath10k_wmi_event_scan_started(struct ath10k *ar)
}
}
+static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath10k_scan_finish(ar);
+ break;
+ }
+}
+
static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
{
lockdep_assert_held(&ar->data_lock);
@@ -1292,6 +1311,7 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_SCAN_EVENT_START_FAILED:
ath10k_warn(ar, "received scan start failure event\n");
+ ath10k_wmi_event_scan_start_failed(ar);
break;
case WMI_SCAN_EVENT_DEQUEUED:
case WMI_SCAN_EVENT_PREEMPTED:
@@ -4954,7 +4974,7 @@ ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
}
static struct sk_buff *
-ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
{
struct wmi_request_stats_cmd *cmd;
struct sk_buff *skb;
@@ -4964,9 +4984,10 @@ ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
return ERR_PTR(-ENOMEM);
cmd = (struct wmi_request_stats_cmd *)skb->data;
- cmd->stats_id = __cpu_to_le32(stats_id);
+ cmd->stats_id = __cpu_to_le32(stats_mask);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
+ stats_mask);
return skb;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 20ce3603e64b..adf935bf0580 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3057,8 +3057,12 @@ struct wmi_pdev_stats_peer {
} __packed;
enum wmi_stats_id {
- WMI_REQUEST_PEER_STAT = 0x01,
- WMI_REQUEST_AP_STAT = 0x02
+ WMI_STAT_PEER = BIT(0),
+ WMI_STAT_AP = BIT(1),
+ WMI_STAT_PDEV = BIT(2),
+ WMI_STAT_VDEV = BIT(3),
+ WMI_STAT_BCNFLT = BIT(4),
+ WMI_STAT_VDEV_RATE = BIT(5),
};
struct wlan_inst_rssi_args {
@@ -3093,7 +3097,7 @@ struct wmi_pdev_suspend_cmd {
} __packed;
struct wmi_stats_event {
- __le32 stats_id; /* %WMI_REQUEST_ */
+ __le32 stats_id; /* WMI_STAT_ */
/*
* number of pdev stats event structures
* (wmi_pdev_stats) 0 or 1
@@ -3745,6 +3749,11 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
};
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
/* slot time long */
#define WMI_VDEV_SLOT_TIME_LONG 0x1
/* slot time short */
@@ -4436,7 +4445,8 @@ enum wmi_peer_param {
WMI_PEER_AUTHORIZE = 0x3,
WMI_PEER_CHAN_WIDTH = 0x4,
WMI_PEER_NSS = 0x5,
- WMI_PEER_USE_4ADDR = 0x6
+ WMI_PEER_USE_4ADDR = 0x6,
+ WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
};
struct wmi_peer_set_param_cmd {
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 1ed7a88aeea9..7ca0d6f930fd 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1283,6 +1283,7 @@ struct ath5k_hw {
#define ATH_STAT_PROMISC 1
#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
+#define ATH_STAT_RESET 4 /* hw reset */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
unsigned int fif_filter_flags; /* Current FIF_* filter flags */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index bc9cb356fa69..a6131825c9f6 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -528,7 +528,7 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
* together with the BSSID mask when matching addresses.
*/
iter_data.hw_macaddr = common->macaddr;
- memset(&iter_data.mask, 0xff, ETH_ALEN);
+ eth_broadcast_addr(iter_data.mask);
iter_data.found_active = false;
iter_data.need_set_hw_addr = true;
iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
@@ -1523,6 +1523,9 @@ ath5k_set_current_imask(struct ath5k_hw *ah)
enum ath5k_int imask;
unsigned long flags;
+ if (test_bit(ATH_STAT_RESET, ah->status))
+ return;
+
spin_lock_irqsave(&ah->irqlock, flags);
imask = ah->imask;
if (ah->rx_pending)
@@ -2858,10 +2861,12 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
{
struct ath_common *common = ath5k_hw_common(ah);
int ret, ani_mode;
- bool fast;
+ bool fast = chan && modparam_fastchanswitch ? 1 : 0;
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
+ __set_bit(ATH_STAT_RESET, ah->status);
+
ath5k_hw_set_imr(ah, 0);
synchronize_irq(ah->irq);
ath5k_stop_tasklets(ah);
@@ -2876,11 +2881,29 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
* so we should also free any remaining
* tx buffers */
ath5k_drain_tx_buffs(ah);
+
+ /* Stop PCU */
+ ath5k_hw_stop_rx_pcu(ah);
+
+ /* Stop DMA
+ *
+ * Note: If DMA didn't stop continue
+ * since only a reset will fix it.
+ */
+ ret = ath5k_hw_dma_stop(ah);
+
+ /* RF Bus grant won't work if we have pending
+ * frames
+ */
+ if (ret && fast) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "DMA didn't stop, falling back to normal reset\n");
+ fast = false;
+ }
+
if (chan)
ah->curchan = chan;
- fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
-
ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
if (ret) {
ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
@@ -2934,6 +2957,8 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
*/
/* ath5k_chan_change(ah, c); */
+ __clear_bit(ATH_STAT_RESET, ah->status);
+
ath5k_beacon_config(ah);
/* intrs are enabled by ath5k_beacon_config */
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index b9b651ea9851..99e62f99a182 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1169,30 +1169,6 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ah->ah_version == AR5K_AR5212)
ath5k_hw_set_sleep_clock(ah, false);
- /*
- * Stop PCU
- */
- ath5k_hw_stop_rx_pcu(ah);
-
- /*
- * Stop DMA
- *
- * Note: If DMA didn't stop continue
- * since only a reset will fix it.
- */
- ret = ath5k_hw_dma_stop(ah);
-
- /* RF Bus grant won't work if we have pending
- * frames */
- if (ret && fast) {
- ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
- "DMA didn't stop, falling back to normal reset\n");
- fast = false;
- /* Non fatal, just continue with
- * normal reset */
- ret = 0;
- }
-
mode = channel->hw_value;
switch (mode) {
case AR5K_MODE_11A:
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 85da63a67faf..cce4625a53ad 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -686,20 +686,21 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
{
struct ath6kl *ar = vif->ar;
struct cfg80211_bss *bss;
- u16 cap_mask, cap_val;
+ u16 cap_val;
+ enum ieee80211_bss_type bss_type;
u8 *ie;
if (nw_type & ADHOC_NETWORK) {
- cap_mask = WLAN_CAPABILITY_IBSS;
cap_val = WLAN_CAPABILITY_IBSS;
+ bss_type = IEEE80211_BSS_TYPE_IBSS;
} else {
- cap_mask = WLAN_CAPABILITY_ESS;
cap_val = WLAN_CAPABILITY_ESS;
+ bss_type = IEEE80211_BSS_TYPE_ESS;
}
bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
vif->ssid, vif->ssid_len,
- cap_mask, cap_val);
+ bss_type, IEEE80211_PRIVACY_ANY);
if (bss == NULL) {
/*
* Since cfg80211 may not yet know about the BSS,
@@ -1495,6 +1496,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params)
@@ -1513,7 +1515,7 @@ static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
return ERR_PTR(-EINVAL);
}
- wdev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
+ wdev = ath6kl_interface_add(ar, name, name_assign_type, type, if_idx, nw_type);
if (!wdev)
return ERR_PTR(-ENOMEM);
@@ -2033,7 +2035,7 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
int ret;
/* Setup unicast pkt pattern */
- memset(mac_mask, 0xff, ETH_ALEN);
+ eth_broadcast_addr(mac_mask);
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
vif->fw_vif_idx, WOW_LIST_ID,
ETH_ALEN, 0, ndev->dev_addr,
@@ -3633,13 +3635,14 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
}
struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type)
{
struct net_device *ndev;
struct ath6kl_vif *vif;
- ndev = alloc_netdev(sizeof(*vif), name, NET_NAME_UNKNOWN, ether_setup);
+ ndev = alloc_netdev(sizeof(*vif), name, name_assign_type, ether_setup);
if (!ndev)
return NULL;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index b59becd91aea..5aa57a7639bf 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -25,6 +25,7 @@ enum ath6kl_cfg_suspend_mode {
};
struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type);
void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 0df74b245af4..4ec02cea0f43 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -211,8 +211,8 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
rtnl_lock();
/* Add an initial station interface */
- wdev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
- INFRA_NETWORK);
+ wdev = ath6kl_interface_add(ar, "wlan%d", NET_NAME_ENUM,
+ NL80211_IFTYPE_STATION, 0, INFRA_NETWORK);
rtnl_unlock();
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index b42ba46b5030..1af3fed5a72c 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -105,7 +105,7 @@ static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
memset(&ar->ap_stats.sta[sta->aid - 1], 0,
sizeof(struct wmi_per_sta_stat));
- memset(sta->mac, 0, ETH_ALEN);
+ eth_zero_addr(sta->mac);
memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
sta->aid = 0;
sta->sta_flags = 0;
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 473972288a84..ecda613c2d54 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -46,7 +46,8 @@ ath9k_hw-y:= \
ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
- ar9003_mci.o
+ ar9003_mci.o \
+ ar9003_aic.o
ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ca01d17d130f..25e45e4d1a60 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -107,11 +107,21 @@ static const struct ani_cck_level_entry cck_level_table[] = {
static void ath9k_hw_update_mibstats(struct ath_hw *ah,
struct ath9k_mib_stats *stats)
{
- stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
- stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
- stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
- stats->rts_good += REG_READ(ah, AR_RTS_OK);
- stats->beacons += REG_READ(ah, AR_BEACON_CNT);
+ u32 addr[5] = {AR_RTS_OK, AR_RTS_FAIL, AR_ACK_FAIL,
+ AR_FCS_FAIL, AR_BEACON_CNT};
+ u32 data[5];
+
+ REG_READ_MULTI(ah, &addr[0], &data[0], 5);
+ /* AR_RTS_OK */
+ stats->rts_good += data[0];
+ /* AR_RTS_FAIL */
+ stats->rts_bad += data[1];
+ /* AR_ACK_FAIL */
+ stats->ackrcv_bad += data[2];
+ /* AR_FCS_FAIL */
+ stats->fcs_bad += data[3];
+ /* AR_BEACON_CNT */
+ stats->beacons += data[4];
}
static void ath9k_ani_restart(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index f273427fdd29..6c23d279525f 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -681,12 +681,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
phymode |= AR_PHY_FC_DYN2040_PRI_CH;
}
+ ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_PHY_TURBO, phymode);
+ /* This function do only REG_WRITE, so
+ * we can include it to REGWRITE_BUFFER. */
ath9k_hw_set11nmac2040(ah, chan);
- ENABLE_REGWRITE_BUFFER(ah);
-
REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 42190b67c671..50fcd343c41a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -430,46 +430,43 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
u32 regVal;
unsigned int i;
u32 regList[][2] = {
- { 0x786c, 0 },
- { 0x7854, 0 },
- { 0x7820, 0 },
- { 0x7824, 0 },
- { 0x7868, 0 },
- { 0x783c, 0 },
- { 0x7838, 0 } ,
- { 0x7828, 0 } ,
+ { AR9285_AN_TOP3, 0 },
+ { AR9285_AN_RXTXBB1, 0 },
+ { AR9285_AN_RF2G1, 0 },
+ { AR9285_AN_RF2G2, 0 },
+ { AR9285_AN_TOP2, 0 },
+ { AR9285_AN_RF2G8, 0 },
+ { AR9285_AN_RF2G7, 0 },
+ { AR9285_AN_RF2G3, 0 },
};
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- regList[i][1] = REG_READ(ah, regList[i][0]);
-
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1));
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal |= (0x1 << 27);
- REG_WRITE(ah, 0x9808, regVal);
+ REG_READ_ARRAY(ah, regList, ARRAY_SIZE(regList));
+ ENABLE_REG_RMW_BUFFER(ah);
+ /* 7834, b1=0 */
+ REG_CLR_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
+ /* 9808, b27=1 */
+ REG_SET_BIT(ah, 0x9808, 1 << 27);
/* 786c,b23,1, pwddac=1 */
- REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ REG_SET_BIT(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC);
/* 7854, b5,1, pdrxtxbb=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1);
/* 7854, b7,1, pdv2i=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I);
/* 7854, b8,1, pddacinterface=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF);
/* 7824,b12,0, offcal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ REG_CLR_BIT(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL);
/* 7838, b1,0, pwddb=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ REG_CLR_BIT(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB);
/* 7820,b11,0, enpacal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL);
/* 7820,b25,1, pdpadrv1=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1);
/* 7820,b24,0, pdpadrv2=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2);
/* 7820,b23,0, pdpaout=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT);
/* 783c,b14-16,7, padrvgn2tab_0=7 */
REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
/*
@@ -477,8 +474,9 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
* does not matter since we turn it off
*/
REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
-
+ /* 7828, b0-11, ccom=fff */
REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
+ REG_RMW_BUFFER_FLUSH(ah);
/* Set:
* localmode=1,bmode=1,bmoderxtx=1,synthon=1,
@@ -490,15 +488,16 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
/* find off_6_1; */
for (i = 6; i > 0; i--) {
- regVal = REG_READ(ah, 0x7834);
+ regVal = REG_READ(ah, AR9285_AN_RF2G6);
regVal |= (1 << (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
+ REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
udelay(1);
/* regVal = REG_READ(ah, 0x7834); */
regVal &= (~(0x1 << (20 + i)));
- regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
+ regVal |= (MS(REG_READ(ah, AR9285_AN_RF2G9),
+ AR9285_AN_RXTXBB1_SPARE9)
<< (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
+ REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
}
regVal = (regVal >> 20) & 0x7f;
@@ -515,15 +514,15 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
ah->pacal_info.prev_offset = regVal;
}
- ENABLE_REGWRITE_BUFFER(ah);
- regVal = REG_READ(ah, 0x7834);
- regVal |= 0x1;
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal &= (~(0x1 << 27));
- REG_WRITE(ah, 0x9808, regVal);
+ ENABLE_REG_RMW_BUFFER(ah);
+ /* 7834, b1=1 */
+ REG_SET_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
+ /* 9808, b27=0 */
+ REG_CLR_BIT(ah, 0x9808, 1 << 27);
+ REG_RMW_BUFFER_FLUSH(ah);
+ ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < ARRAY_SIZE(regList); i++)
REG_WRITE(ah, regList[i][0], regList[i][1]);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
new file mode 100644
index 000000000000..1db119d77783
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_mci.h"
+#include "ar9003_aic.h"
+#include "ar9003_phy.h"
+#include "reg_aic.h"
+
+static const u8 com_att_db_table[ATH_AIC_MAX_COM_ATT_DB_TABLE] = {
+ 0, 3, 9, 15, 21, 27
+};
+
+static const u16 aic_lin_table[ATH_AIC_MAX_AIC_LIN_TABLE] = {
+ 8191, 7300, 6506, 5799, 5168, 4606, 4105, 3659,
+ 3261, 2906, 2590, 2309, 2057, 1834, 1634, 1457,
+ 1298, 1157, 1031, 919, 819, 730, 651, 580,
+ 517, 461, 411, 366, 326, 291, 259, 231,
+ 206, 183, 163, 146, 130, 116, 103, 92,
+ 82, 73, 65, 58, 52, 46, 41, 37,
+ 33, 29, 26, 23, 21, 18, 16, 15,
+ 13, 12, 10, 9, 8, 7, 7, 6,
+ 5, 5, 4, 4, 3
+};
+
+static bool ar9003_hw_is_aic_enabled(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+
+ /*
+ * Disable AIC for now, until we have all the
+ * HW code and the driver-layer support ready.
+ */
+ return false;
+
+ if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_AIC)
+ return false;
+
+ return true;
+}
+
+static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram,
+ bool dir, u8 index)
+{
+ int16_t i;
+
+ if (dir) {
+ for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ if (cal_sram[i].valid)
+ break;
+ }
+ } else {
+ for (i = index - 1; i >= 0; i--) {
+ if (cal_sram[i].valid)
+ break;
+ }
+ }
+
+ if ((i >= ATH_AIC_MAX_BT_CHANNEL) || (i < 0))
+ i = -1;
+
+ return i;
+}
+
+/*
+ * type 0: aic_lin_table, 1: com_att_db_table
+ */
+static int16_t ar9003_aic_find_index(u8 type, int16_t value)
+{
+ int16_t i = -1;
+
+ if (type == 0) {
+ for (i = ATH_AIC_MAX_AIC_LIN_TABLE - 1; i >= 0; i--) {
+ if (aic_lin_table[i] >= value)
+ break;
+ }
+ } else if (type == 1) {
+ for (i = 0; i < ATH_AIC_MAX_COM_ATT_DB_TABLE; i++) {
+ if (com_att_db_table[i] > value) {
+ i--;
+ break;
+ }
+ }
+
+ if (i >= ATH_AIC_MAX_COM_ATT_DB_TABLE)
+ i = -1;
+ }
+
+ return i;
+}
+
+static void ar9003_aic_gain_table(struct ath_hw *ah)
+{
+ u32 aic_atten_word[19], i;
+
+ /* Config LNA gain difference */
+ REG_WRITE(ah, AR_PHY_BT_COEX_4, 0x2c200a00);
+ REG_WRITE(ah, AR_PHY_BT_COEX_5, 0x5c4e4438);
+
+ /* Program gain table */
+ aic_atten_word[0] = (0x1 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0x1f & 0x1f); /* -01 dB: 4'd1, 5'd31, 00 dB: 4'd0, 5'd31 */
+ aic_atten_word[1] = (0x3 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x1f & 0x1f); /* -03 dB: 4'd3, 5'd31, -02 dB: 4'd2, 5'd31 */
+ aic_atten_word[2] = (0x5 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x1f & 0x1f); /* -05 dB: 4'd5, 5'd31, -04 dB: 4'd4, 5'd31 */
+ aic_atten_word[3] = (0x1 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0x1e & 0x1f); /* -07 dB: 4'd1, 5'd30, -06 dB: 4'd0, 5'd30 */
+ aic_atten_word[4] = (0x3 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x1e & 0x1f); /* -09 dB: 4'd3, 5'd30, -08 dB: 4'd2, 5'd30 */
+ aic_atten_word[5] = (0x5 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x1e & 0x1f); /* -11 dB: 4'd5, 5'd30, -10 dB: 4'd4, 5'd30 */
+ aic_atten_word[6] = (0x1 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0xf & 0x1f); /* -13 dB: 4'd1, 5'd15, -12 dB: 4'd0, 5'd15 */
+ aic_atten_word[7] = (0x3 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0xf & 0x1f); /* -15 dB: 4'd3, 5'd15, -14 dB: 4'd2, 5'd15 */
+ aic_atten_word[8] = (0x5 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0xf & 0x1f); /* -17 dB: 4'd5, 5'd15, -16 dB: 4'd4, 5'd15 */
+ aic_atten_word[9] = (0x1 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0x7 & 0x1f); /* -19 dB: 4'd1, 5'd07, -18 dB: 4'd0, 5'd07 */
+ aic_atten_word[10] = (0x3 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x7 & 0x1f); /* -21 dB: 4'd3, 5'd07, -20 dB: 4'd2, 5'd07 */
+ aic_atten_word[11] = (0x5 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x7 & 0x1f); /* -23 dB: 4'd5, 5'd07, -22 dB: 4'd4, 5'd07 */
+ aic_atten_word[12] = (0x7 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x6 & 0xf) << 5 |
+ (0x7 & 0x1f); /* -25 dB: 4'd7, 5'd07, -24 dB: 4'd6, 5'd07 */
+ aic_atten_word[13] = (0x3 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x3 & 0x1f); /* -27 dB: 4'd3, 5'd03, -26 dB: 4'd2, 5'd03 */
+ aic_atten_word[14] = (0x5 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x3 & 0x1f); /* -29 dB: 4'd5, 5'd03, -28 dB: 4'd4, 5'd03 */
+ aic_atten_word[15] = (0x1 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0x1 & 0x1f); /* -31 dB: 4'd1, 5'd01, -30 dB: 4'd0, 5'd01 */
+ aic_atten_word[16] = (0x3 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x1 & 0x1f); /* -33 dB: 4'd3, 5'd01, -32 dB: 4'd2, 5'd01 */
+ aic_atten_word[17] = (0x5 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x1 & 0x1f); /* -35 dB: 4'd5, 5'd01, -34 dB: 4'd4, 5'd01 */
+ aic_atten_word[18] = (0x7 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x6 & 0xf) << 5 |
+ (0x1 & 0x1f); /* -37 dB: 4'd7, 5'd01, -36 dB: 4'd6, 5'd01 */
+
+ /* Write to Gain table with auto increment enabled. */
+ REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
+ (ATH_AIC_SRAM_AUTO_INCREMENT |
+ ATH_AIC_SRAM_GAIN_TABLE_OFFSET));
+
+ for (i = 0; i < 19; i++) {
+ REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000),
+ aic_atten_word[i]);
+ }
+}
+
+static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ int i;
+
+ /* Write to Gain table with auto increment enabled. */
+ REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
+ (ATH_AIC_SRAM_AUTO_INCREMENT |
+ ATH_AIC_SRAM_CAL_OFFSET));
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000), 0);
+ aic->aic_sram[i] = 0;
+ }
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B0,
+ (SM(0, AR_PHY_AIC_MON_ENABLE) |
+ SM(127, AR_PHY_AIC_CAL_MAX_HOP_COUNT) |
+ SM(min_valid_count, AR_PHY_AIC_CAL_MIN_VALID_COUNT) |
+ SM(37, AR_PHY_AIC_F_WLAN) |
+ SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
+ SM(0, AR_PHY_AIC_CAL_ENABLE) |
+ SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
+ SM(0, AR_PHY_AIC_ENABLE)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B1,
+ (SM(0, AR_PHY_AIC_MON_ENABLE) |
+ SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
+ SM(0, AR_PHY_AIC_CAL_ENABLE) |
+ SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
+ SM(0, AR_PHY_AIC_ENABLE)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B0,
+ (SM(8, AR_PHY_AIC_CAL_BT_REF_DELAY) |
+ SM(0, AR_PHY_AIC_BT_IDLE_CFG) |
+ SM(1, AR_PHY_AIC_STDBY_COND) |
+ SM(37, AR_PHY_AIC_STDBY_ROT_ATT_DB) |
+ SM(5, AR_PHY_AIC_STDBY_COM_ATT_DB) |
+ SM(15, AR_PHY_AIC_RSSI_MAX) |
+ SM(0, AR_PHY_AIC_RSSI_MIN)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B1,
+ (SM(15, AR_PHY_AIC_RSSI_MAX) |
+ SM(0, AR_PHY_AIC_RSSI_MIN)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_2_B0,
+ (SM(44, AR_PHY_AIC_RADIO_DELAY) |
+ SM(8, AR_PHY_AIC_CAL_STEP_SIZE_CORR) |
+ SM(12, AR_PHY_AIC_CAL_ROT_IDX_CORR) |
+ SM(2, AR_PHY_AIC_CAL_CONV_CHECK_FACTOR) |
+ SM(5, AR_PHY_AIC_ROT_IDX_COUNT_MAX) |
+ SM(0, AR_PHY_AIC_CAL_SYNTH_TOGGLE) |
+ SM(0, AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX) |
+ SM(200, AR_PHY_AIC_CAL_SYNTH_SETTLING)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_3_B0,
+ (SM(2, AR_PHY_AIC_MON_MAX_HOP_COUNT) |
+ SM(1, AR_PHY_AIC_MON_MIN_STALE_COUNT) |
+ SM(1, AR_PHY_AIC_MON_PWR_EST_LONG) |
+ SM(2, AR_PHY_AIC_MON_PD_TALLY_SCALING) |
+ SM(10, AR_PHY_AIC_MON_PERF_THR) |
+ SM(2, AR_PHY_AIC_CAL_TARGET_MAG_SETTING) |
+ SM(1, AR_PHY_AIC_CAL_PERF_CHECK_FACTOR) |
+ SM(1, AR_PHY_AIC_CAL_PWR_EST_LONG)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B0,
+ (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
+ SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
+ SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
+ SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
+ SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B1,
+ (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
+ SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
+ SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
+ SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
+ SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
+
+ ar9003_aic_gain_table(ah);
+
+ /* Need to enable AIC reference signal in BT modem. */
+ REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
+ (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) |
+ ATH_AIC_BT_AIC_ENABLE));
+
+ aic->aic_cal_start_time = REG_READ(ah, AR_TSF_L32);
+
+ /* Start calibration */
+ REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_CH_VALID_RESET);
+ REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+
+ aic->aic_caled_chan = 0;
+ aic->aic_cal_state = AIC_CAL_STATE_STARTED;
+
+ return aic->aic_cal_state;
+}
+
+static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL];
+ struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL];
+ u32 dir_path_gain_idx, quad_path_gain_idx, value;
+ u32 fixed_com_att_db;
+ int8_t dir_path_sign, quad_path_sign;
+ int16_t i;
+ bool ret = true;
+
+ memset(&cal_sram, 0, sizeof(cal_sram));
+ memset(&aic_sram, 0, sizeof(aic_sram));
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ value = aic->aic_sram[i];
+
+ cal_sram[i].valid =
+ MS(value, AR_PHY_AIC_SRAM_VALID);
+ cal_sram[i].rot_quad_att_db =
+ MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB);
+ cal_sram[i].vga_quad_sign =
+ MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN);
+ cal_sram[i].rot_dir_att_db =
+ MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB);
+ cal_sram[i].vga_dir_sign =
+ MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN);
+ cal_sram[i].com_att_6db =
+ MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB);
+
+ if (cal_sram[i].valid) {
+ dir_path_gain_idx = cal_sram[i].rot_dir_att_db +
+ com_att_db_table[cal_sram[i].com_att_6db];
+ quad_path_gain_idx = cal_sram[i].rot_quad_att_db +
+ com_att_db_table[cal_sram[i].com_att_6db];
+
+ dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1;
+ quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1;
+
+ aic_sram[i].dir_path_gain_lin = dir_path_sign *
+ aic_lin_table[dir_path_gain_idx];
+ aic_sram[i].quad_path_gain_lin = quad_path_sign *
+ aic_lin_table[quad_path_gain_idx];
+ }
+ }
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ int16_t start_idx, end_idx;
+
+ if (cal_sram[i].valid)
+ continue;
+
+ start_idx = ar9003_aic_find_valid(cal_sram, 0, i);
+ end_idx = ar9003_aic_find_valid(cal_sram, 1, i);
+
+ if (start_idx < 0) {
+ /* extrapolation */
+ start_idx = end_idx;
+ end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx);
+
+ if (end_idx < 0) {
+ ret = false;
+ break;
+ }
+
+ aic_sram[i].dir_path_gain_lin =
+ ((aic_sram[start_idx].dir_path_gain_lin -
+ aic_sram[end_idx].dir_path_gain_lin) *
+ (start_idx - i) + ((end_idx - i) >> 1)) /
+ (end_idx - i) +
+ aic_sram[start_idx].dir_path_gain_lin;
+ aic_sram[i].quad_path_gain_lin =
+ ((aic_sram[start_idx].quad_path_gain_lin -
+ aic_sram[end_idx].quad_path_gain_lin) *
+ (start_idx - i) + ((end_idx - i) >> 1)) /
+ (end_idx - i) +
+ aic_sram[start_idx].quad_path_gain_lin;
+ }
+
+ if (end_idx < 0) {
+ /* extrapolation */
+ end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx);
+
+ if (end_idx < 0) {
+ ret = false;
+ break;
+ }
+
+ aic_sram[i].dir_path_gain_lin =
+ ((aic_sram[start_idx].dir_path_gain_lin -
+ aic_sram[end_idx].dir_path_gain_lin) *
+ (i - start_idx) + ((start_idx - end_idx) >> 1)) /
+ (start_idx - end_idx) +
+ aic_sram[start_idx].dir_path_gain_lin;
+ aic_sram[i].quad_path_gain_lin =
+ ((aic_sram[start_idx].quad_path_gain_lin -
+ aic_sram[end_idx].quad_path_gain_lin) *
+ (i - start_idx) + ((start_idx - end_idx) >> 1)) /
+ (start_idx - end_idx) +
+ aic_sram[start_idx].quad_path_gain_lin;
+
+ } else if (start_idx >= 0){
+ /* interpolation */
+ aic_sram[i].dir_path_gain_lin =
+ (((end_idx - i) * aic_sram[start_idx].dir_path_gain_lin) +
+ ((i - start_idx) * aic_sram[end_idx].dir_path_gain_lin) +
+ ((end_idx - start_idx) >> 1)) /
+ (end_idx - start_idx);
+ aic_sram[i].quad_path_gain_lin =
+ (((end_idx - i) * aic_sram[start_idx].quad_path_gain_lin) +
+ ((i - start_idx) * aic_sram[end_idx].quad_path_gain_lin) +
+ ((end_idx - start_idx) >> 1))/
+ (end_idx - start_idx);
+ }
+ }
+
+ /* From dir/quad_path_gain_lin to sram. */
+ i = ar9003_aic_find_valid(cal_sram, 1, 0);
+ if (i < 0) {
+ i = 0;
+ ret = false;
+ }
+ fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db];
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ int16_t rot_dir_path_att_db, rot_quad_path_att_db;
+
+ aic_sram[i].sram.vga_dir_sign =
+ (aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0;
+ aic_sram[i].sram.vga_quad_sign=
+ (aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0;
+
+ rot_dir_path_att_db =
+ ar9003_aic_find_index(0, abs(aic_sram[i].dir_path_gain_lin)) -
+ fixed_com_att_db;
+ rot_quad_path_att_db =
+ ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) -
+ fixed_com_att_db;
+
+ aic_sram[i].sram.com_att_6db =
+ ar9003_aic_find_index(1, fixed_com_att_db);
+
+ aic_sram[i].sram.valid = 1;
+
+ aic_sram[i].sram.rot_dir_att_db =
+ min(max(rot_dir_path_att_db,
+ (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB),
+ ATH_AIC_MAX_ROT_DIR_ATT_DB);
+ aic_sram[i].sram.rot_quad_att_db =
+ min(max(rot_quad_path_att_db,
+ (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB),
+ ATH_AIC_MAX_ROT_QUAD_ATT_DB);
+ }
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign,
+ AR_PHY_AIC_SRAM_VGA_DIR_SIGN) |
+ SM(aic_sram[i].sram.vga_quad_sign,
+ AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) |
+ SM(aic_sram[i].sram.com_att_6db,
+ AR_PHY_AIC_SRAM_COM_ATT_6DB) |
+ SM(aic_sram[i].sram.valid,
+ AR_PHY_AIC_SRAM_VALID) |
+ SM(aic_sram[i].sram.rot_dir_att_db,
+ AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) |
+ SM(aic_sram[i].sram.rot_quad_att_db,
+ AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB));
+ }
+
+ return ret;
+}
+
+static void ar9003_aic_cal_done(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+
+ /* Disable AIC reference signal in BT modem. */
+ REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
+ (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) &
+ ~ATH_AIC_BT_AIC_ENABLE));
+
+ if (ar9003_aic_cal_post_process(ah))
+ aic->aic_cal_state = AIC_CAL_STATE_DONE;
+ else
+ aic->aic_cal_state = AIC_CAL_STATE_ERROR;
+}
+
+static u8 ar9003_aic_cal_continue(struct ath_hw *ah, bool cal_once)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ int i, num_chan;
+
+ num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
+
+ if (!num_chan) {
+ aic->aic_cal_state = AIC_CAL_STATE_ERROR;
+ return aic->aic_cal_state;
+ }
+
+ if (cal_once) {
+ for (i = 0; i < 10000; i++) {
+ if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
+ AR_PHY_AIC_CAL_ENABLE) == 0)
+ break;
+
+ udelay(100);
+ }
+ }
+
+ /*
+ * Use AR_PHY_AIC_CAL_ENABLE bit instead of AR_PHY_AIC_CAL_DONE.
+ * Sometimes CAL_DONE bit is not asserted.
+ */
+ if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
+ AR_PHY_AIC_CAL_ENABLE) != 0) {
+ ath_dbg(common, MCI, "AIC cal is not done after 40ms");
+ goto exit;
+ }
+
+ REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1,
+ (ATH_AIC_SRAM_CAL_OFFSET | ATH_AIC_SRAM_AUTO_INCREMENT));
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ u32 value;
+
+ value = REG_READ(ah, AR_PHY_AIC_SRAM_DATA_B1);
+
+ if (value & 0x01) {
+ if (aic->aic_sram[i] == 0)
+ aic->aic_caled_chan++;
+
+ aic->aic_sram[i] = value;
+
+ if (!cal_once)
+ break;
+ }
+ }
+
+ if ((aic->aic_caled_chan >= num_chan) || cal_once) {
+ ar9003_aic_cal_done(ah);
+ } else {
+ /* Start calibration */
+ REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1,
+ AR_PHY_AIC_CAL_CH_VALID_RESET);
+ REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+ }
+exit:
+ return aic->aic_cal_state;
+
+}
+
+u8 ar9003_aic_calibration(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ u8 cal_ret = AIC_CAL_STATE_ERROR;
+
+ switch (aic->aic_cal_state) {
+ case AIC_CAL_STATE_IDLE:
+ cal_ret = ar9003_aic_cal_start(ah, 1);
+ break;
+ case AIC_CAL_STATE_STARTED:
+ cal_ret = ar9003_aic_cal_continue(ah, false);
+ break;
+ case AIC_CAL_STATE_DONE:
+ cal_ret = AIC_CAL_STATE_DONE;
+ break;
+ default:
+ break;
+ }
+
+ return cal_ret;
+}
+
+u8 ar9003_aic_start_normal(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ int16_t i;
+
+ if (aic->aic_cal_state != AIC_CAL_STATE_DONE)
+ return 1;
+
+ ar9003_aic_gain_table(ah);
+
+ REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1, ATH_AIC_SRAM_AUTO_INCREMENT);
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ REG_WRITE(ah, AR_PHY_AIC_SRAM_DATA_B1, aic->aic_sram[i]);
+ }
+
+ /* FIXME: Replace these with proper register names */
+ REG_WRITE(ah, 0xa6b0, 0x80);
+ REG_WRITE(ah, 0xa6b4, 0x5b2df0);
+ REG_WRITE(ah, 0xa6b8, 0x10762cc8);
+ REG_WRITE(ah, 0xa6bc, 0x1219a4b);
+ REG_WRITE(ah, 0xa6c0, 0x1e01);
+ REG_WRITE(ah, 0xb6b4, 0xf0);
+ REG_WRITE(ah, 0xb6c0, 0x1e01);
+ REG_WRITE(ah, 0xb6b0, 0x81);
+ REG_WRITE(ah, AR_PHY_65NM_CH1_RXTX4, 0x40000000);
+
+ aic->aic_enabled = true;
+
+ return 0;
+}
+
+u8 ar9003_aic_cal_reset(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+
+ aic->aic_cal_state = AIC_CAL_STATE_IDLE;
+ return aic->aic_cal_state;
+}
+
+u8 ar9003_aic_calibration_single(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u8 cal_ret;
+ int num_chan;
+
+ num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
+
+ (void) ar9003_aic_cal_start(ah, num_chan);
+ cal_ret = ar9003_aic_cal_continue(ah, true);
+
+ return cal_ret;
+}
+
+void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->is_aic_enabled = ar9003_hw_is_aic_enabled;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.h b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
new file mode 100644
index 000000000000..86f40644be43
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_AIC_H
+#define AR9003_AIC_H
+
+#define ATH_AIC_MAX_COM_ATT_DB_TABLE 6
+#define ATH_AIC_MAX_AIC_LIN_TABLE 69
+#define ATH_AIC_MIN_ROT_DIR_ATT_DB 0
+#define ATH_AIC_MIN_ROT_QUAD_ATT_DB 0
+#define ATH_AIC_MAX_ROT_DIR_ATT_DB 37
+#define ATH_AIC_MAX_ROT_QUAD_ATT_DB 37
+#define ATH_AIC_SRAM_AUTO_INCREMENT 0x80000000
+#define ATH_AIC_SRAM_GAIN_TABLE_OFFSET 0x280
+#define ATH_AIC_SRAM_CAL_OFFSET 0x140
+#define ATH_AIC_SRAM_OFFSET 0x00
+#define ATH_AIC_MEAS_MAG_THRESH 20
+#define ATH_AIC_BT_JUPITER_CTRL 0x66820
+#define ATH_AIC_BT_AIC_ENABLE 0x02
+
+enum aic_cal_state {
+ AIC_CAL_STATE_IDLE = 0,
+ AIC_CAL_STATE_STARTED,
+ AIC_CAL_STATE_DONE,
+ AIC_CAL_STATE_ERROR
+};
+
+struct ath_aic_sram_info {
+ bool valid:1;
+ bool vga_quad_sign:1;
+ bool vga_dir_sign:1;
+ u8 rot_quad_att_db;
+ u8 rot_dir_att_db;
+ u8 com_att_6db;
+};
+
+struct ath_aic_out_info {
+ int16_t dir_path_gain_lin;
+ int16_t quad_path_gain_lin;
+ struct ath_aic_sram_info sram;
+};
+
+u8 ar9003_aic_calibration(struct ath_hw *ah);
+u8 ar9003_aic_start_normal(struct ath_hw *ah);
+u8 ar9003_aic_cal_reset(struct ath_hw *ah);
+u8 ar9003_aic_calibration_single(struct ath_hw *ah);
+
+#endif /* AR9003_AIC_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 4335ccbe7d7e..79fd3b2dcbde 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -195,16 +195,16 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
- if (ah->config.no_pll_pwrsave) {
+ if (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) {
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9485_1_1_pcie_phy_clkreq_disable_L1);
+ ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9485_1_1_pcie_phy_clkreq_disable_L1);
+ ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
} else {
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
}
} else if (AR_SREV_9462_21(ah)) {
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -231,10 +231,20 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9462_2p1_modes_fast_clock);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9462_2p1_pciephy_clkreq_disable_L1);
- INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9462_2p1_pciephy_clkreq_disable_L1);
+
+ /* Awake -> Sleep Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9462_2p1_pciephy_clkreq_disable_L1);
+ }
+
+ /* Sleep -> Awake Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9462_2p1_pciephy_clkreq_disable_L1);
+ }
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -262,11 +272,18 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9462_2p0_common_rx_gain);
/* Awake -> Sleep Setting */
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9462_2p0_pciephy_clkreq_disable_L1);
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9462_2p0_pciephy_clkreq_disable_L1);
+ }
+
/* Sleep -> Awake Setting */
- INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9462_2p0_pciephy_clkreq_disable_L1);
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9462_2p0_pciephy_clkreq_disable_L1);
+ }
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -456,10 +473,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p1_Modes_lowest_ob_db_tx_gain_table);
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9565_1p1_pciephy_clkreq_disable_L1);
- INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9565_1p1_pciephy_clkreq_disable_L1);
+ /* Awake -> Sleep Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9565_1p1_pciephy_clkreq_disable_L1);
+ }
+
+ /* Sleep -> Awake Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9565_1p1_pciephy_clkreq_disable_L1);
+ }
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p1_modes_fast_clock);
@@ -491,10 +517,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9565_1p0_pciephy_clkreq_disable_L1);
- INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9565_1p0_pciephy_clkreq_disable_L1);
+ /* Awake -> Sleep Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9565_1p0_pciephy_clkreq_disable_L1);
+ }
+
+ /* Sleep -> Awake Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9565_1p0_pciephy_clkreq_disable_L1);
+ }
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p0_modes_fast_clock);
@@ -1130,6 +1165,12 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
ar9003_hw_init_mode_regs(ah);
+
+ if (AR_SREV_9003_PCOEM(ah)) {
+ WARN_ON(!ah->iniPcieSerdes.ia_array);
+ WARN_ON(!ah->iniPcieSerdesLowPower.ia_array);
+ }
+
priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
priv_ops->init_hang_checks = ar9003_hw_init_hang_checks;
priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang;
@@ -1139,4 +1180,5 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
ar9003_hw_attach_phy_ops(ah);
ar9003_hw_attach_calib_ops(ah);
ar9003_hw_attach_mac_ops(ah);
+ ar9003_hw_attach_aic_ops(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 7b94a6c7db3d..af5ee416a560 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -19,6 +19,7 @@
#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_mci.h"
+#include "ar9003_aic.h"
static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
{
@@ -284,12 +285,12 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
- if (mci->is_2g) {
+ if (mci->is_2g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
ar9003_mci_send_lna_transfer(ah, true);
udelay(5);
}
- if ((mci->is_2g && !mci->update_2g5g)) {
+ if (mci->is_2g && !mci->update_2g5g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
if (ar9003_mci_wait_for_interrupt(ah,
AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
@@ -593,7 +594,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
if (!time_out)
break;
- offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
+ offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
if (offset == MCI_GPM_INVALID)
continue;
@@ -657,7 +658,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
time_out = 0;
while (more_data == MCI_GPM_MORE) {
- offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
+ offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
if (offset == MCI_GPM_INVALID)
break;
@@ -771,8 +772,14 @@ exit:
static void ar9003_mci_mute_bt(struct ath_hw *ah)
{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
/* disable all MCI messages */
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
/* wait pending HW messages to flush out */
@@ -783,9 +790,10 @@ static void ar9003_mci_mute_bt(struct ath_hw *ah)
* 1. reset not after resuming from full sleep
* 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
*/
- ar9003_mci_send_lna_take(ah, true);
-
- udelay(5);
+ if (MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(5);
+ }
ar9003_mci_send_sys_sleeping(ah, true);
}
@@ -821,6 +829,80 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
}
+static void ar9003_mci_stat_setup(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!AR_SREV_9565(ah))
+ return;
+
+ if (mci->config & ATH_MCI_CONFIG_MCI_STAT_DBG) {
+ REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+ AR_MCI_DBG_CNT_CTRL_ENABLE, 1);
+ REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+ AR_MCI_DBG_CNT_CTRL_BT_LINKID,
+ MCI_STAT_ALL_BT_LINKID);
+ } else {
+ REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+ AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
+ }
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9565_1ANT(struct ath_hw *ah)
+{
+ u32 regval;
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9565_2ANT(struct ath_hw *ah)
+{
+ u32 regval;
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(0, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(0, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x0);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9462(struct ath_hw *ah)
+{
+ u32 regval;
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
bool is_full_sleep)
{
@@ -831,11 +913,6 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
is_full_sleep, is_2g);
- if (!mci->gpm_addr && !mci->sched_addr) {
- ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
- return -ENOMEM;
- }
-
if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
ath_err(common, "BTCOEX control register is dead\n");
return -EINVAL;
@@ -850,26 +927,17 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
* To avoid MCI state machine be affected by incoming remote MCI msgs,
* MCI mode will be enabled later, right before reset the MCI TX and RX.
*/
-
- regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
- SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
- SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
- SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
- SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
- SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
- SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
if (AR_SREV_9565(ah)) {
- regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
- SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
- AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+ u8 ant = MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH);
+
+ if (ant == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED)
+ ar9003_mci_set_btcoex_ctrl_9565_1ANT(ah);
+ else
+ ar9003_mci_set_btcoex_ctrl_9565_2ANT(ah);
} else {
- regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
- SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
+ ar9003_mci_set_btcoex_ctrl_9462(ah);
}
- REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
-
if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
ar9003_mci_osla_setup(ah, true);
else
@@ -926,26 +994,32 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
- ar9003_mci_get_next_gpm_offset(ah, true, NULL);
+ /* Init GPM offset after MCI Reset Rx */
+ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
(SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
- REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
- AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ if (MCI_ANT_ARCH_PA_LNA_SHARED(mci))
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ else
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
ar9003_mci_observation_set_up(ah);
mci->ready = true;
ar9003_mci_prep_interface(ah);
+ ar9003_mci_stat_setup(ah);
- if (AR_SREV_9565(ah))
- REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
- AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
if (en_int)
ar9003_mci_enable_interrupt(ah);
+ if (ath9k_hw_is_aic_enabled(ah))
+ ar9003_aic_start_normal(ah);
+
return 0;
}
@@ -1218,6 +1292,14 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
}
value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
break;
+ case MCI_STATE_INIT_GPM_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+
+ if (value < mci->gpm_len)
+ mci->gpm_idx = value;
+ else
+ mci->gpm_idx = 0;
+ break;
case MCI_STATE_LAST_SCHD_MSG_OFFSET:
value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
AR_MCI_RX_LAST_SCHD_MSG_INDEX);
@@ -1284,6 +1366,22 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
mci->need_flush_btinfo = false;
break;
+ case MCI_STATE_AIC_CAL:
+ if (ath9k_hw_is_aic_enabled(ah))
+ value = ar9003_aic_calibration(ah);
+ break;
+ case MCI_STATE_AIC_START:
+ if (ath9k_hw_is_aic_enabled(ah))
+ ar9003_aic_start_normal(ah);
+ break;
+ case MCI_STATE_AIC_CAL_RESET:
+ if (ath9k_hw_is_aic_enabled(ah))
+ value = ar9003_aic_cal_reset(ah);
+ break;
+ case MCI_STATE_AIC_CAL_SINGLE:
+ if (ath9k_hw_is_aic_enabled(ah))
+ value = ar9003_aic_calibration_single(ah);
+ break;
default:
break;
}
@@ -1364,21 +1462,11 @@ void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
mci->gpm_idx = 0;
}
-u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 offset, more_gpm = 0, gpm_ptr;
- if (first) {
- gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
-
- if (gpm_ptr >= mci->gpm_len)
- gpm_ptr = 0;
-
- mci->gpm_idx = gpm_ptr;
- return gpm_ptr;
- }
-
/*
* This could be useful to avoid new GPM message interrupt which
* may lead to spurious interrupt after power sleep, or multiple
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 66d7ab9f920d..e288611c12d5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -92,14 +92,36 @@ enum mci_gpm_coex_bt_update_flags_op {
#define ATH_MCI_CONFIG_CLK_DIV 0x00003000
#define ATH_MCI_CONFIG_CLK_DIV_S 12
#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000
+#define ATH_MCI_CONFIG_DISABLE_AIC 0x00008000
+#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN 0x007f0000
+#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN_S 16
+#define ATH_MCI_CONFIG_NO_QUIET_ACK 0x00800000
+#define ATH_MCI_CONFIG_NO_QUIET_ACK_S 23
+#define ATH_MCI_CONFIG_ANT_ARCH 0x07000000
+#define ATH_MCI_CONFIG_ANT_ARCH_S 24
+#define ATH_MCI_CONFIG_FORCE_QUIET_ACK 0x08000000
+#define ATH_MCI_CONFIG_FORCE_QUIET_ACK_S 27
+#define ATH_MCI_CONFIG_FORCE_2CHAIN_ACK 0x10000000
+#define ATH_MCI_CONFIG_MCI_STAT_DBG 0x20000000
#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000
#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000
#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \
ATH_MCI_CONFIG_MCI_OBS_TXRX | \
ATH_MCI_CONFIG_MCI_OBS_BT)
+
#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
+#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_NON_SHARED 0x00
+#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED 0x01
+#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_NON_SHARED 0x02
+#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED 0x03
+#define ATH_MCI_ANT_ARCH_3_ANT 0x04
+
+#define MCI_ANT_ARCH_PA_LNA_SHARED(mci) \
+ ((MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED) || \
+ (MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED))
+
enum mci_message_header { /* length of payload */
MCI_LNA_CTRL = 0x10, /* len = 0 */
MCI_CONT_NACK = 0x20, /* len = 0 */
@@ -188,20 +210,55 @@ enum mci_bt_state {
MCI_BT_CAL
};
+enum mci_ps_state {
+ MCI_PS_DISABLE,
+ MCI_PS_ENABLE,
+ MCI_PS_ENABLE_OFF,
+ MCI_PS_ENABLE_ON
+};
+
/* Type of state query */
enum mci_state_type {
MCI_STATE_ENABLE,
+ MCI_STATE_INIT_GPM_OFFSET,
+ MCI_STATE_CHECK_GPM_OFFSET,
+ MCI_STATE_NEXT_GPM_OFFSET,
+ MCI_STATE_LAST_GPM_OFFSET,
+ MCI_STATE_BT,
+ MCI_STATE_SET_BT_SLEEP,
MCI_STATE_SET_BT_AWAKE,
+ MCI_STATE_SET_BT_CAL_START,
+ MCI_STATE_SET_BT_CAL,
MCI_STATE_LAST_SCHD_MSG_OFFSET,
MCI_STATE_REMOTE_SLEEP,
+ MCI_STATE_CONT_STATUS,
MCI_STATE_RESET_REQ_WAKE,
MCI_STATE_SEND_WLAN_COEX_VERSION,
+ MCI_STATE_SET_BT_COEX_VERSION,
+ MCI_STATE_SEND_WLAN_CHANNELS,
MCI_STATE_SEND_VERSION_QUERY,
MCI_STATE_SEND_STATUS_QUERY,
+ MCI_STATE_NEED_FLUSH_BT_INFO,
+ MCI_STATE_SET_CONCUR_TX_PRI,
MCI_STATE_RECOVER_RX,
MCI_STATE_NEED_FTP_STOMP,
+ MCI_STATE_NEED_TUNING,
+ MCI_STATE_NEED_STAT_DEBUG,
+ MCI_STATE_SHARED_CHAIN_CONCUR_TX,
+ MCI_STATE_AIC_CAL,
+ MCI_STATE_AIC_START,
+ MCI_STATE_AIC_CAL_RESET,
+ MCI_STATE_AIC_CAL_SINGLE,
+ MCI_STATE_IS_AR9462,
+ MCI_STATE_IS_AR9565_1ANT,
+ MCI_STATE_IS_AR9565_2ANT,
+ MCI_STATE_WLAN_WEAK_SIGNAL,
+ MCI_STATE_SET_WLAN_PS_STATE,
+ MCI_STATE_GET_WLAN_PS_STATE,
MCI_STATE_DEBUG,
- MCI_STATE_NEED_FLUSH_BT_INFO,
+ MCI_STATE_STAT_DEBUG,
+ MCI_STATE_ALLOW_FCS,
+ MCI_STATE_SET_2G_CONTENTION,
MCI_STATE_MAX
};
@@ -255,7 +312,7 @@ int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
void ar9003_mci_cleanup(struct ath_hw *ah);
void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
u32 *rx_msg_intr);
-u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more);
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more);
void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index c311b2bfdb00..fc595b92ac56 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -640,16 +640,6 @@
#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00
#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8
-/* AIC Registers */
-#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
-#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
-#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
-#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
-#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4))
-#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8))
-#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
-#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
-
#define AR_PHY_65NM_CH0_TXRF3 0x16048
#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G 0x0000001e
#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1
@@ -989,21 +979,6 @@
#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
-/* SM 1 AIC Registers */
-
-#define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0)
-#define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4)
-#define AR_PHY_AIC_CTRL_2_B1 (AR_SM1_BASE + 0x4b8)
-#define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
- 0x4c0 : 0x4c4))
-#define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
- 0x4c4 : 0x4c8))
-#define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0)
-#define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc)
-
-#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
-#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
-
#define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + ((i) ? \
AR_SM1_BASE : AR_SM_BASE))
#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + ((i) ? \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 934418872e8e..e4d11fa7fe8c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -106,7 +106,7 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
int chain, i;
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(ah->rxchainmask & (1 << chain)))
+ if (!(ah->caps.rx_chainmask & (1 << chain)))
continue;
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
ar9003_hw_rtt_load_hist_entry(ah, chain, i,
@@ -171,7 +171,7 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
int chain, i;
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(ah->rxchainmask & (1 << chain)))
+ if (!(ah->caps.rx_chainmask & (1 << chain)))
continue;
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
ah->caldata->rtt_table[chain][i] =
@@ -193,7 +193,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
int chain, i;
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(ah->rxchainmask & (1 << chain)))
+ if (!(ah->caps.rx_chainmask & (1 << chain)))
continue;
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
index 86bfc9604dca..bea41df9fbd7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_wow.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
@@ -20,11 +20,25 @@
#include "reg_wow.h"
#include "hw-ops.h"
+static void ath9k_hw_set_sta_powersave(struct ath_hw *ah)
+{
+ if (!ath9k_hw_mci_is_enabled(ah))
+ goto set;
+ /*
+ * If MCI is being used, set PWR_SAV only when MCI's
+ * PS state is disabled.
+ */
+ if (ar9003_mci_state(ah, MCI_STATE_GET_WLAN_PS_STATE) != MCI_PS_DISABLE)
+ return;
+set:
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+}
+
static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+ ath9k_hw_set_sta_powersave(ah);
/* set rx disable bit */
REG_WRITE(ah, AR_CR, AR_CR_RXD);
@@ -44,6 +58,9 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
}
+ if (ath9k_hw_mci_is_enabled(ah))
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
}
@@ -74,8 +91,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
- REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
-
data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
(KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
@@ -88,9 +103,11 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
(ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
- if (AR_SREV_9462_20(ah)) {
- /* AR9462 2.0 has an extra descriptor word (time based
- * discard) compared to other chips */
+ if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
+ /*
+ * AR9462 2.0 and AR9565 have an extra descriptor word
+ * (time based discard) compared to other chips.
+ */
REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
wow_ka_data_word0 = AR_WOW_TXBUF(13);
} else {
@@ -99,7 +116,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
-
}
int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
@@ -170,18 +186,17 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
u32 val = 0, rval;
/*
- * read the WoW status register to know
- * the wakeup reason
+ * Read the WoW status register to know
+ * the wakeup reason.
*/
rval = REG_READ(ah, AR_WOW_PATTERN);
val = AR_WOW_STATUS(rval);
/*
- * mask only the WoW events that we have enabled. Sometimes
+ * Mask only the WoW events that we have enabled. Sometimes
* we have spurious WoW events from the AR_WOW_PATTERN
* register. This mask will clean it up.
*/
-
val &= ah->wow.wow_event_mask;
if (val) {
@@ -195,6 +210,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
wow_status |= AH_WOW_BEACON_MISS;
}
+ rval = REG_READ(ah, AR_MAC_PCU_WOW4);
+ val = AR_WOW_STATUS2(rval);
+ val &= ah->wow.wow_event_mask2;
+
+ if (val) {
+ if (AR_WOW2_PATTERN_FOUND(val))
+ wow_status |= AH_WOW_USER_PATTERN_EN;
+ }
+
/*
* set and clear WOW_PME_CLEAR registers for the chip to
* generate next wow signal.
@@ -206,10 +230,12 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
AR_PMCTRL_PWR_STATE_D1D3);
/*
- * clear all events
+ * Clear all events.
*/
REG_WRITE(ah, AR_WOW_PATTERN,
AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+ REG_WRITE(ah, AR_MAC_PCU_WOW4,
+ AR_WOW_CLEAR_EVENTS2(REG_READ(ah, AR_MAC_PCU_WOW4)));
/*
* restore the beacon threshold to init value
@@ -226,7 +252,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, false);
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah) || AR_SREV_9485(ah)) {
+ u32 dc = REG_READ(ah, AR_DIRECT_CONNECT);
+
+ if (!(dc & AR_DC_TSF2_ENABLE))
+ ath9k_hw_gen_timer_start_tsf2(ah);
+ }
+
ah->wow.wow_event_mask = 0;
+ ah->wow.wow_event_mask2 = 0;
return wow_status;
}
@@ -408,6 +442,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
ath9k_hw_wow_set_arwr_reg(ah);
+ if (ath9k_hw_mci_is_enabled(ah))
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
/* HW WoW */
REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 0f8e9464e4ab..a7a81b3969ce 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -184,12 +184,12 @@ struct ath_frame_info {
struct ath_buf *bf;
u16 framelen;
s8 txq;
- enum ath9k_key_type keytype;
u8 keyix;
u8 rtscts_rate;
u8 retries : 7;
u8 baw_tracked : 1;
u8 tx_power;
+ enum ath9k_key_type keytype:2;
};
struct ath_rxbuf {
@@ -645,6 +645,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
struct ath9k_vif_iter_data *iter_data);
void ath9k_calculate_summary_state(struct ath_softc *sc,
struct ath_chanctx *ctx);
+void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif);
/*******************/
/* Beacon Handling */
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 3dfc2c7f1f07..5a084d94ed90 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -103,7 +103,9 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
return;
}
- if (AR_SREV_9300_20_OR_LATER(ah)) {
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+ } else if (AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -307,6 +309,18 @@ static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
btcoex->enabled = true;
}
+static void ath9k_hw_btcoex_disable_mci(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ int i;
+
+ ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+ btcoex_hw->wlan_weight[i]);
+}
+
void ath9k_hw_btcoex_enable(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
@@ -318,17 +332,18 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
ath9k_hw_btcoex_enable_2wire(ah);
break;
case ATH_BTCOEX_CFG_3WIRE:
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
- ath9k_hw_btcoex_enable_mci(ah);
- return;
- }
ath9k_hw_btcoex_enable_3wire(ah);
break;
+ case ATH_BTCOEX_CFG_MCI:
+ ath9k_hw_btcoex_enable_mci(ah);
+ break;
}
- REG_RMW(ah, AR_GPIO_PDPU,
- (0x2 << (btcoex_hw->btactive_gpio * 2)),
- (0x3 << (btcoex_hw->btactive_gpio * 2)));
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) {
+ REG_RMW(ah, AR_GPIO_PDPU,
+ (0x2 << (btcoex_hw->btactive_gpio * 2)),
+ (0x3 << (btcoex_hw->btactive_gpio * 2)));
+ }
ah->btcoex_hw.enabled = true;
}
@@ -340,14 +355,14 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
int i;
btcoex_hw->enabled = false;
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
- ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
- for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
- REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
- btcoex_hw->wlan_weight[i]);
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI) {
+ ath9k_hw_btcoex_disable_mci(ah);
return;
}
- ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 6de26ea5d5fa..cd2f0a2373cb 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -44,6 +44,9 @@
#define AR9300_NUM_BT_WEIGHTS 4
#define AR9300_NUM_WLAN_WEIGHTS 4
+
+#define ATH_AIC_MAX_BT_CHANNEL 79
+
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
ATH_BTCOEX_STOMP_ALL,
@@ -58,6 +61,7 @@ enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE,
ATH_BTCOEX_CFG_2WIRE,
ATH_BTCOEX_CFG_3WIRE,
+ ATH_BTCOEX_CFG_MCI,
};
struct ath9k_hw_mci {
@@ -92,9 +96,18 @@ struct ath9k_hw_mci {
u32 last_recovery;
};
+struct ath9k_hw_aic {
+ bool aic_enabled;
+ u8 aic_cal_state;
+ u8 aic_caled_chan;
+ u32 aic_sram[ATH_AIC_MAX_BT_CHANNEL];
+ u32 aic_cal_start_time;
+};
+
struct ath_btcoex_hw {
enum ath_btcoex_scheme scheme;
struct ath9k_hw_mci mci;
+ struct ath9k_hw_aic aic;
bool enabled;
u8 wlanactive_gpio;
u8 btactive_gpio;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index e200a6e3aca5..3e2e24e4843f 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -238,7 +238,6 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath9k_nfcal_hist *h = NULL;
unsigned i, j;
- int32_t val;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
@@ -246,6 +245,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (ah->caldata)
h = ah->caldata->nfCalHist;
+ ENABLE_REG_RMW_BUFFER(ah);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
s16 nfval;
@@ -258,10 +258,8 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
else
nfval = default_nf;
- val = REG_READ(ah, ah->nf_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) nfval << 1) & 0x1ff);
- REG_WRITE(ah, ah->nf_regs[i], val);
+ REG_RMW(ah, ah->nf_regs[i],
+ (((u32) nfval << 1) & 0x1ff), 0x1ff);
}
}
@@ -274,6 +272,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_RMW_BUFFER_FLUSH(ah);
/*
* Wait for load to complete, should be fast, a few 10s of us.
@@ -309,19 +308,17 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* by the median we just loaded. This will be initial (and max) value
* of next noise floor calibration the baseband does.
*/
- ENABLE_REGWRITE_BUFFER(ah);
+ ENABLE_REG_RMW_BUFFER(ah);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
- val = REG_READ(ah, ah->nf_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) (-50) << 1) & 0x1ff);
- REG_WRITE(ah, ah->nf_regs[i], val);
+ REG_RMW(ah, ah->nf_regs[i],
+ (((u32) (-50) << 1) & 0x1ff), 0x1ff);
}
}
- REGWRITE_BUFFER_FLUSH(ah);
+ REG_RMW_BUFFER_FLUSH(ah);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 50a2e0ac3b8b..dbf8f4959642 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1156,7 +1156,10 @@ static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
if (tpc_enabled != ah->tpc_enabled) {
ah->tpc_enabled = tpc_enabled;
- ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+
+ mutex_lock(&sc->mutex);
+ ath9k_set_txpower(sc, NULL);
+ mutex_unlock(&sc->mutex);
}
return count;
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 726271c7c330..e98a9eaba7ff 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -126,8 +126,19 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
DFS_STAT_INC(sc, pulses_detected);
return true;
}
-#undef PRI_CH_RADAR_FOUND
-#undef EXT_CH_RADAR_FOUND
+
+static void
+ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
+{
+ struct dfs_pattern_detector *pd = sc->dfs_detector;
+ DFS_STAT_INC(sc, pulses_processed);
+ if (pd == NULL)
+ return;
+ if (!pd->add_pulse(pd, pe))
+ return;
+ DFS_STAT_INC(sc, radar_detected);
+ ieee80211_radar_detected(sc->hw);
+}
/*
* DFS: check PHY-error for radar pulse and feed the detector
@@ -176,18 +187,21 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
ard.pulse_length_pri = vdata_end[-3];
pe.freq = ah->curchan->channel;
pe.ts = mactime;
- if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
- struct dfs_pattern_detector *pd = sc->dfs_detector;
- ath_dbg(common, DFS,
- "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
- "width=%d, rssi=%d, delta_ts=%llu\n",
- pe.freq, pe.ts, pe.width, pe.rssi,
- pe.ts - sc->dfs_prev_pulse_ts);
- sc->dfs_prev_pulse_ts = pe.ts;
- DFS_STAT_INC(sc, pulses_processed);
- if (pd != NULL && pd->add_pulse(pd, &pe)) {
- DFS_STAT_INC(sc, radar_detected);
- ieee80211_radar_detected(sc->hw);
- }
+ if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
+ return;
+
+ ath_dbg(common, DFS,
+ "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
+ "width=%d, rssi=%d, delta_ts=%llu\n",
+ ard.pulse_bw_info, pe.freq, pe.ts, pe.width, pe.rssi,
+ pe.ts - sc->dfs_prev_pulse_ts);
+ sc->dfs_prev_pulse_ts = pe.ts;
+ if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
+ ath9k_dfs_process_radar_pulse(sc, &pe);
+ if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+ pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
+ ath9k_dfs_process_radar_pulse(sc, &pe);
}
}
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 971d770722cf..cc81482c934d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -27,12 +27,7 @@ void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
u32 shift, u32 val)
{
- u32 regVal;
-
- regVal = REG_READ(ah, reg) & ~mask;
- regVal |= (val << shift) & mask;
-
- REG_WRITE(ah, reg, regVal);
+ REG_RMW(ah, reg, ((val << shift) & mask), mask);
if (ah->config.analog_shiftreg)
udelay(100);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index e5a78d4fd66e..4773da6dc6f2 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -389,6 +389,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
}
}
+ ENABLE_REG_RMW_BUFFER(ah);
REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
(numXpdGain - 1) & 0x3);
REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
@@ -396,6 +397,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
xpdGainValues[1]);
REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0);
+ REG_RMW_BUFFER_FLUSH(ah);
for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
regChainOffset = i * 0x1000;
@@ -770,15 +772,14 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
struct ar5416_eeprom_4k *eep,
u8 txRxAttenLocal)
{
- REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0,
- pModal->antCtrlChain[0]);
+ ENABLE_REG_RMW_BUFFER(ah);
+ REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0,
+ pModal->antCtrlChain[0], 0);
- REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0),
- (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
- ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
- AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
- SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
- SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
+ REG_RMW(ah, AR_PHY_TIMING_CTRL4(0),
+ SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+ SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF);
if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
AR5416_EEP_MINOR_VER_3) {
@@ -817,6 +818,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000,
AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
+ REG_RMW_BUFFER_FLUSH(ah);
}
/*
@@ -928,6 +930,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
}
}
+ ENABLE_REG_RMW_BUFFER(ah);
if (AR_SREV_9271(ah)) {
ath9k_hw_analog_shift_rmw(ah,
AR9285_AN_RF2G3,
@@ -1032,18 +1035,19 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
AR9285_AN_RF2G4_DB2_4_S,
db2[4]);
}
+ REG_RMW_BUFFER_FLUSH(ah);
-
+ ENABLE_REG_RMW_BUFFER(ah);
REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
pModal->switchSettling);
REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
pModal->adcDesiredSize);
- REG_WRITE(ah, AR_PHY_RF_CTL4,
- SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
- SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
- SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) |
- SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON));
+ REG_RMW(ah, AR_PHY_RF_CTL4,
+ SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
+ SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
+ SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) |
+ SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON), 0);
REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
pModal->txEndToRxOn);
@@ -1072,6 +1076,8 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
pModal->swSettleHt40);
}
+ REG_RMW_BUFFER_FLUSH(ah);
+
bb_desired_scale = (pModal->bb_scale_smrt_antenna &
EEP_4K_BB_DESIRED_SCALE_MASK);
if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) {
@@ -1080,6 +1086,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25);
pwrctrl = mask * bb_desired_scale;
clr = mask * 0x1f;
+ ENABLE_REG_RMW_BUFFER(ah);
REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr);
REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr);
REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr);
@@ -1094,6 +1101,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
clr = mask * 0x1f;
REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr);
REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr);
+ REG_RMW_BUFFER_FLUSH(ah);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 098059039351..056f516bf017 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -466,6 +466,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
struct ar5416_eeprom_def *eep,
u8 txRxAttenLocal, int regChainOffset, int i)
{
+ ENABLE_REG_RMW_BUFFER(ah);
if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
txRxAttenLocal = pModal->txRxAttenCh[i];
@@ -483,16 +484,12 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
AR_PHY_GAIN_2GHZ_XATTEN2_DB,
pModal->xatten2Db[i]);
} else {
- REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
- (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
- ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
- | SM(pModal-> bswMargin[i],
- AR_PHY_GAIN_2GHZ_BSW_MARGIN));
- REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
- (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
- ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
- | SM(pModal->bswAtten[i],
- AR_PHY_GAIN_2GHZ_BSW_ATTEN));
+ REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ SM(pModal-> bswMargin[i], AR_PHY_GAIN_2GHZ_BSW_MARGIN),
+ AR_PHY_GAIN_2GHZ_BSW_MARGIN);
+ REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ SM(pModal->bswAtten[i], AR_PHY_GAIN_2GHZ_BSW_ATTEN),
+ AR_PHY_GAIN_2GHZ_BSW_ATTEN);
}
}
@@ -504,17 +501,14 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
AR_PHY_RXGAIN + regChainOffset,
AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]);
} else {
- REG_WRITE(ah,
- AR_PHY_RXGAIN + regChainOffset,
- (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) &
- ~AR_PHY_RXGAIN_TXRX_ATTEN)
- | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN));
- REG_WRITE(ah,
- AR_PHY_GAIN_2GHZ + regChainOffset,
- (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
- ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
- SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
+ REG_RMW(ah, AR_PHY_RXGAIN + regChainOffset,
+ SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN),
+ AR_PHY_RXGAIN_TXRX_ATTEN);
+ REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN),
+ AR_PHY_GAIN_2GHZ_RXTX_MARGIN);
}
+ REG_RMW_BUFFER_FLUSH(ah);
}
static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index da344b27326c..284706798c71 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -69,9 +69,15 @@ void ath_fill_led_pin(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- if (AR_SREV_9100(ah) || (ah->led_pin >= 0))
+ if (AR_SREV_9100(ah))
return;
+ if (ah->led_pin >= 0) {
+ if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK))
+ ath9k_hw_request_gpio(ah, ah->led_pin, "ath9k-led");
+ return;
+ }
+
if (AR_SREV_9287(ah))
ah->led_pin = ATH_LED_PIN_9287;
else if (AR_SREV_9485(sc->sc_ah))
@@ -202,17 +208,16 @@ static void ath_btcoex_period_timer(unsigned long data)
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- ath9k_mci_update_rssi(sc);
-
ath9k_ps_wakeup(sc);
+ spin_lock_bh(&btcoex->btcoex_lock);
- if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
- ath_detect_bt_priority(sc);
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ ath9k_mci_update_rssi(sc);
ath_mci_ftp_adjust(sc);
+ }
- spin_lock_bh(&btcoex->btcoex_lock);
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath_detect_bt_priority(sc);
stomp_type = btcoex->bt_stomp_type;
timer_period = btcoex->btcoex_no_stomp;
@@ -252,9 +257,6 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg)
struct ath_softc *sc = (struct ath_softc *)arg;
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_dbg(common, BTCOEX, "no stomp timer running\n");
ath9k_ps_wakeup(sc);
spin_lock_bh(&btcoex->btcoex_lock);
@@ -271,7 +273,7 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg)
ath9k_ps_restore(sc);
}
-static int ath_init_btcoex_timer(struct ath_softc *sc)
+static void ath_init_btcoex_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
@@ -280,6 +282,7 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
btcoex->btcoex_period / 100;
btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
+ btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
(unsigned long) sc);
@@ -287,8 +290,6 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
(unsigned long) sc);
spin_lock_init(&btcoex->btcoex_lock);
-
- return 0;
}
/*
@@ -299,6 +300,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
+ return;
+
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
/* make sure duty cycle timer is also stopped when resuming */
@@ -312,13 +317,19 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
mod_timer(&btcoex->period_timer, jiffies);
}
-
/*
* Pause btcoex timer and bt duty cycle timer
*/
void ath9k_btcoex_timer_pause(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
+ return;
+
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Stopping btcoex timers\n");
del_timer_sync(&btcoex->period_timer);
del_timer_sync(&btcoex->no_stomp_timer);
@@ -356,33 +367,33 @@ void ath9k_start_btcoex(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
- !ah->btcoex_hw.enabled) {
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT, 0);
- else
- ath9k_hw_btcoex_set_weight(ah, 0, 0,
- ATH_BTCOEX_STOMP_NONE);
- ath9k_hw_btcoex_enable(ah);
+ if (ah->btcoex_hw.enabled ||
+ ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
- ath9k_btcoex_timer_resume(sc);
- }
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT, 0);
+ else
+ ath9k_hw_btcoex_set_weight(ah, 0, 0,
+ ATH_BTCOEX_STOMP_NONE);
+ ath9k_hw_btcoex_enable(ah);
+ ath9k_btcoex_timer_resume(sc);
}
void ath9k_stop_btcoex(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- if (ah->btcoex_hw.enabled &&
- ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
- ath9k_btcoex_timer_pause(sc);
- ath9k_hw_btcoex_disable(ah);
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
- ath_mci_flush_profile(&sc->btcoex.mci);
- }
+ if (!ah->btcoex_hw.enabled ||
+ ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
+ ath9k_btcoex_timer_pause(sc);
+ ath9k_hw_btcoex_disable(ah);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ ath_mci_flush_profile(&sc->btcoex.mci);
}
void ath9k_deinit_btcoex(struct ath_softc *sc)
@@ -409,22 +420,20 @@ int ath9k_init_btcoex(struct ath_softc *sc)
break;
case ATH_BTCOEX_CFG_3WIRE:
ath9k_hw_btcoex_init_3wire(sc->sc_ah);
- r = ath_init_btcoex_timer(sc);
- if (r)
- return -1;
+ ath_init_btcoex_timer(sc);
txq = sc->tx.txq_map[IEEE80211_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
- sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- if (ath9k_hw_mci_is_enabled(ah)) {
- sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
- INIT_LIST_HEAD(&sc->btcoex.mci.info);
+ break;
+ case ATH_BTCOEX_CFG_MCI:
+ ath_init_btcoex_timer(sc);
- r = ath_mci_setup(sc);
- if (r)
- return r;
+ sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+ INIT_LIST_HEAD(&sc->btcoex.mci.info);
+ ath9k_hw_btcoex_init_mci(ah);
- ath9k_hw_btcoex_init_mci(ah);
- }
+ r = ath_mci_setup(sc);
+ if (r)
+ return r;
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 8e7153b186ed..10c02f5cbc5e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -40,6 +40,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
{ USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
+ { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
{ USB_DEVICE(0x0cf3, 0x7015),
.driver_info = AR9287_USB }, /* Atheros */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 300d3671d0ef..e82a0d4ce23f 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -444,6 +444,10 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
#define OP_BT_SCAN BIT(4)
#define OP_TSF_RESET BIT(6)
+enum htc_op_flags {
+ HTC_FWFLAG_NO_RMW,
+};
+
struct ath9k_htc_priv {
struct device *dev;
struct ieee80211_hw *hw;
@@ -482,6 +486,7 @@ struct ath9k_htc_priv {
bool reconfig_beacon;
unsigned int rxfilter;
unsigned long op_flags;
+ unsigned long fw_flags;
struct ath9k_hw_cal_data caldata;
struct ath_spec_scan_priv spec_priv;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index fd229409f676..d7beefe60683 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -376,17 +376,139 @@ static void ath9k_regwrite_flush(void *hw_priv)
mutex_unlock(&priv->wmi->multi_write_mutex);
}
-static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
+static void ath9k_reg_rmw_buffer(void *hw_priv,
+ u32 reg_offset, u32 set, u32 clr)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_rmw_mutex);
+
+ /* Store the register/value */
+ priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].reg =
+ cpu_to_be32(reg_offset);
+ priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].set =
+ cpu_to_be32(set);
+ priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].clr =
+ cpu_to_be32(clr);
+
+ priv->wmi->multi_rmw_idx++;
+
+ /* If the buffer is full, send it out. */
+ if (priv->wmi->multi_rmw_idx == MAX_RMW_CMD_NUMBER) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+ (u8 *) &priv->wmi->multi_rmw,
+ sizeof(struct register_write) * priv->wmi->multi_rmw_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_dbg(common, WMI,
+ "REGISTER RMW FAILED, multi len: %d\n",
+ priv->wmi->multi_rmw_idx);
+ }
+ priv->wmi->multi_rmw_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_rmw_mutex);
+}
+
+static void ath9k_reg_rmw_flush(void *hw_priv)
{
- u32 val;
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
+ return;
+
+ atomic_dec(&priv->wmi->m_rmw_cnt);
- val = ath9k_regread(hw_priv, reg_offset);
- val &= ~clr;
- val |= set;
- ath9k_regwrite(hw_priv, val, reg_offset);
+ mutex_lock(&priv->wmi->multi_rmw_mutex);
+
+ if (priv->wmi->multi_rmw_idx) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+ (u8 *) &priv->wmi->multi_rmw,
+ sizeof(struct register_rmw) * priv->wmi->multi_rmw_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_dbg(common, WMI,
+ "REGISTER RMW FAILED, multi len: %d\n",
+ priv->wmi->multi_rmw_idx);
+ }
+ priv->wmi->multi_rmw_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_rmw_mutex);
+}
+
+static void ath9k_enable_rmw_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
+ return;
+
+ atomic_inc(&priv->wmi->m_rmw_cnt);
+}
+
+static u32 ath9k_reg_rmw_single(void *hw_priv,
+ u32 reg_offset, u32 set, u32 clr)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct register_rmw buf, buf_ret;
+ int ret;
+ u32 val = 0;
+
+ buf.reg = cpu_to_be32(reg_offset);
+ buf.set = cpu_to_be32(set);
+ buf.clr = cpu_to_be32(clr);
+
+ ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+ (u8 *) &buf, sizeof(buf),
+ (u8 *) &buf_ret, sizeof(buf_ret),
+ 100);
+ if (unlikely(ret)) {
+ ath_dbg(common, WMI, "REGISTER RMW FAILED:(0x%04x, %d)\n",
+ reg_offset, ret);
+ }
return val;
}
+static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) {
+ u32 val;
+
+ val = REG_READ(ah, reg_offset);
+ val &= ~clr;
+ val |= set;
+ REG_WRITE(ah, reg_offset, val);
+
+ return 0;
+ }
+
+ if (atomic_read(&priv->wmi->m_rmw_cnt))
+ ath9k_reg_rmw_buffer(hw_priv, reg_offset, set, clr);
+ else
+ ath9k_reg_rmw_single(hw_priv, reg_offset, set, clr);
+
+ return 0;
+}
+
static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
{
*csz = L1_CACHE_BYTES >> 2;
@@ -501,6 +623,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
ah->reg_ops.write = ath9k_regwrite;
ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer;
ah->reg_ops.write_flush = ath9k_regwrite_flush;
+ ah->reg_ops.enable_rmw_buffer = ath9k_enable_rmw_buffer;
+ ah->reg_ops.rmw_flush = ath9k_reg_rmw_flush;
ah->reg_ops.rmw = ath9k_reg_rmw;
priv->ah = ah;
@@ -686,6 +810,12 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
return -EINVAL;
}
+ if (priv->fw_version_major == 1 && priv->fw_version_minor < 4)
+ set_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags);
+
+ dev_info(priv->dev, "FW RMW support: %s\n",
+ test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags) ? "Off" : "On");
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 92d5a6c5a225..564923c0df87 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -149,7 +149,7 @@ static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
* when matching addresses.
*/
iter_data.hw_macaddr = NULL;
- memset(&iter_data.mask, 0xff, ETH_ALEN);
+ eth_broadcast_addr(iter_data.mask);
if (vif)
ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 88769b64b20b..232339b05540 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -108,6 +108,14 @@ static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
}
+static inline bool ath9k_hw_is_aic_enabled(struct ath_hw *ah)
+{
+ if (ath9k_hw_private_ops(ah)->is_aic_enabled)
+ return ath9k_hw_private_ops(ah)->is_aic_enabled(ah);
+
+ return false;
+}
+
#endif
/* Private hardware call ops */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8529014e1a5e..5e15e8e10ed3 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -20,6 +20,7 @@
#include <linux/time.h>
#include <linux/bitops.h>
#include <linux/etherdevice.h>
+#include <linux/gpio.h>
#include <asm/unaligned.h>
#include "hw.h"
@@ -121,6 +122,36 @@ void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
REGWRITE_BUFFER_FLUSH(ah);
}
+void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size)
+{
+ u32 *tmp_reg_list, *tmp_data;
+ int i;
+
+ tmp_reg_list = kmalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp_reg_list) {
+ dev_err(ah->dev, "%s: tmp_reg_list: alloc filed\n", __func__);
+ return;
+ }
+
+ tmp_data = kmalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp_data) {
+ dev_err(ah->dev, "%s tmp_data: alloc filed\n", __func__);
+ goto error_tmp_data;
+ }
+
+ for (i = 0; i < size; i++)
+ tmp_reg_list[i] = array[i][0];
+
+ REG_READ_MULTI(ah, tmp_reg_list, tmp_data, size);
+
+ for (i = 0; i < size; i++)
+ array[i][1] = tmp_data[i];
+
+ kfree(tmp_data);
+error_tmp_data:
+ kfree(tmp_reg_list);
+}
+
u32 ath9k_hw_reverse_bits(u32 val, u32 n)
{
u32 retval;
@@ -366,6 +397,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.rimt_first = 700;
}
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ ah->config.pll_pwrsave = 7;
+
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
* _and_ if on non-uniprocessor systems (Multiprocessor/HT).
@@ -1197,6 +1231,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
u32 set = AR_STA_ID1_KSRCH_MODE;
+ ENABLE_REG_RMW_BUFFER(ah);
switch (opmode) {
case NL80211_IFTYPE_ADHOC:
if (!AR_SREV_9340_13(ah)) {
@@ -1218,6 +1253,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
break;
}
REG_RMW(ah, AR_STA_ID1, set, mask);
+ REG_RMW_BUFFER_FLUSH(ah);
}
void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
@@ -1930,6 +1966,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ath9k_hw_mci_is_enabled(ah))
REG_WRITE(ah, AR_OBS, 8);
+ ENABLE_REG_RMW_BUFFER(ah);
if (ah->config.rx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last);
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first);
@@ -1939,6 +1976,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
}
+ REG_RMW_BUFFER_FLUSH(ah);
ath9k_hw_init_bb(ah, chan);
@@ -2674,11 +2712,23 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
if (AR_SREV_9271(ah))
val = ~val;
- REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
- AR_GPIO_BIT(gpio));
+ if ((1 << gpio) & AR_GPIO_OE_OUT_MASK)
+ REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
+ AR_GPIO_BIT(gpio));
+ else
+ gpio_set_value(gpio, val & 1);
}
EXPORT_SYMBOL(ath9k_hw_set_gpio);
+void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label)
+{
+ if (gpio >= ah->caps.num_gpio_pins)
+ return;
+
+ gpio_request_one(gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, label);
+}
+EXPORT_SYMBOL(ath9k_hw_request_gpio);
+
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
{
REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e82e570de330..c1d2d0340feb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -27,6 +27,7 @@
#include "eeprom.h"
#include "calib.h"
#include "reg.h"
+#include "reg_mci.h"
#include "phy.h"
#include "btcoex.h"
#include "dynack.h"
@@ -99,6 +100,18 @@
(_ah)->reg_ops.write_flush((_ah)); \
} while (0)
+#define ENABLE_REG_RMW_BUFFER(_ah) \
+ do { \
+ if ((_ah)->reg_ops.enable_rmw_buffer) \
+ (_ah)->reg_ops.enable_rmw_buffer((_ah)); \
+ } while (0)
+
+#define REG_RMW_BUFFER_FLUSH(_ah) \
+ do { \
+ if ((_ah)->reg_ops.rmw_flush) \
+ (_ah)->reg_ops.rmw_flush((_ah)); \
+ } while (0)
+
#define PR_EEP(_s, _val) \
do { \
len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
@@ -125,6 +138,8 @@
#define REG_WRITE_ARRAY(iniarray, column, regWr) \
ath9k_hw_write_array(ah, iniarray, column, &(regWr))
+#define REG_READ_ARRAY(ah, array, size) \
+ ath9k_hw_read_array(ah, array, size)
#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
@@ -308,6 +323,12 @@ enum ath9k_hw_hang_checks {
HW_MAC_HANG = BIT(5),
};
+#define AR_PCIE_PLL_PWRSAVE_CONTROL BIT(0)
+#define AR_PCIE_PLL_PWRSAVE_ON_D3 BIT(1)
+#define AR_PCIE_PLL_PWRSAVE_ON_D0 BIT(2)
+#define AR_PCIE_CDR_PWRSAVE_ON_D3 BIT(3)
+#define AR_PCIE_CDR_PWRSAVE_ON_D0 BIT(4)
+
struct ath9k_ops_config {
int dma_beacon_response_time;
int sw_beacon_response_time;
@@ -334,7 +355,7 @@ struct ath9k_ops_config {
u32 ant_ctrl_comm2g_switch_enable;
bool xatten_margin_cfg;
bool alt_mingainidx;
- bool no_pll_pwrsave;
+ u8 pll_pwrsave;
bool tx_gain_buffalo;
bool led_active_high;
};
@@ -646,6 +667,10 @@ struct ath_hw_private_ops {
/* ANI */
void (*ani_cache_ini_regs)(struct ath_hw *ah);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ bool (*is_aic_enabled)(struct ath_hw *ah);
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
};
/**
@@ -999,6 +1024,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
u32 ah_signal_type);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
+void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label);
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
/* General Operation */
@@ -1007,6 +1033,7 @@ void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
int column, unsigned int *writecnt);
+void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size);
u32 ath9k_hw_reverse_bits(u32 val, u32 n);
u16 ath9k_hw_computetxtime(struct ath_hw *ah,
u8 phy, int kbps,
@@ -1116,6 +1143,7 @@ void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us);
void ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+void ar9003_hw_attach_aic_ops(struct ath_hw *ah);
static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
{
return ah->btcoex_hw.enabled;
@@ -1133,6 +1161,9 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
return ah->btcoex_hw.scheme;
}
#else
+static inline void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
+{
+}
static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
{
return false;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 6c6e88495394..f8d11efa7b0f 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -141,6 +141,16 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
return val;
}
+static void ath9k_multi_ioread32(void *hw_priv, u32 *addr,
+ u32 *val, u16 count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ val[i] = ath9k_ioread32(hw_priv, addr[i]);
+}
+
+
static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
u32 set, u32 clr)
{
@@ -437,8 +447,15 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc)
ath_info(common, "Enable WAR for ASPM D3/L1\n");
}
+ /*
+ * The default value of pll_pwrsave is 1.
+ * For certain AR9485 cards, it is set to 0.
+ * For AR9462, AR9565 it's set to 7.
+ */
+ ah->config.pll_pwrsave = 1;
+
if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) {
- ah->config.no_pll_pwrsave = true;
+ ah->config.pll_pwrsave = 0;
ath_info(common, "Disable PLL PowerSave\n");
}
@@ -530,6 +547,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->hw = sc->hw;
ah->hw_version.devid = devid;
ah->reg_ops.read = ath9k_ioread32;
+ ah->reg_ops.multi_read = ath9k_multi_ioread32;
ah->reg_ops.write = ath9k_iowrite32;
ah->reg_ops.rmw = ath9k_reg_rmw;
pCap = &ah->caps;
@@ -763,7 +781,8 @@ static const struct ieee80211_iface_combination if_comb[] = {
.num_different_channels = 1,
.beacon_int_infra_match = true,
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20),
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40),
}
#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9ede991b8d76..b0badef71ce7 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -994,7 +994,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
* BSSID mask when matching addresses.
*/
memset(iter_data, 0, sizeof(*iter_data));
- memset(&iter_data->mask, 0xff, ETH_ALEN);
+ eth_broadcast_addr(iter_data->mask);
iter_data->slottime = ATH9K_SLOT_TIME_9;
list_for_each_entry(avp, &ctx->vifs, list)
@@ -1139,7 +1139,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
ctx->primary_sta = iter_data.primary_sta;
} else {
ctx->primary_sta = NULL;
- memset(common->curbssid, 0, ETH_ALEN);
+ eth_zero_addr(common->curbssid);
common->curaid = 0;
ath9k_hw_write_associd(sc->sc_ah);
if (ath9k_hw_mci_is_enabled(sc->sc_ah))
@@ -1172,6 +1172,38 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
ath9k_ps_restore(sc);
}
+static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ int *power = (int *)data;
+
+ if (*power < vif->bss_conf.txpower)
+ *power = vif->bss_conf.txpower;
+}
+
+/* Called with sc->mutex held. */
+void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ int power;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+
+ ath9k_ps_wakeup(sc);
+ if (ah->tpc_enabled) {
+ power = (vif) ? vif->bss_conf.txpower : -1;
+ ieee80211_iterate_active_interfaces_atomic(
+ sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_tpc_vif_iter, &power);
+ if (power == -1)
+ power = sc->hw->conf.power_level;
+ } else {
+ power = sc->hw->conf.power_level;
+ }
+ sc->cur_chan->txpower = 2 * power;
+ ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+ sc->cur_chan->cur_txpower = reg->max_power_level;
+ ath9k_ps_restore(sc);
+}
+
static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1225,6 +1257,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ath9k_assign_hw_queues(hw, vif);
+ ath9k_set_txpower(sc, vif);
+
an->sc = sc;
an->sta = NULL;
an->vif = vif;
@@ -1265,6 +1299,8 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
ath9k_assign_hw_queues(hw, vif);
ath9k_calculate_summary_state(sc, avp->chanctx);
+ ath9k_set_txpower(sc, vif);
+
mutex_unlock(&sc->mutex);
return 0;
}
@@ -1294,6 +1330,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath9k_calculate_summary_state(sc, avp->chanctx);
+ ath9k_set_txpower(sc, NULL);
+
mutex_unlock(&sc->mutex);
}
@@ -1397,14 +1435,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
}
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
- sc->cur_chan->txpower = 2 * conf->power_level;
- ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
- sc->cur_chan->txpower,
- &sc->cur_chan->cur_txpower);
- }
-
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
@@ -1764,6 +1794,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & CHECK_ANI)
ath_check_ani(sc);
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath_dbg(common, CONFIG, "vif %pM power %d dbm power_type %d\n",
+ vif->addr, bss_conf->txpower, bss_conf->txpower_type);
+ ath9k_set_txpower(sc, vif);
+ }
+
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 3f7a11edb82a..66596b95273f 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -495,7 +495,7 @@ void ath_mci_intr(struct ath_softc *sc)
ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
- ar9003_mci_get_next_gpm_offset(ah, true, NULL);
+ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
return;
}
@@ -559,8 +559,7 @@ void ath_mci_intr(struct ath_softc *sc)
return;
pgpm = mci->gpm_buf.bf_addr;
- offset = ar9003_mci_get_next_gpm_offset(ah, false,
- &more_data);
+ offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
if (offset == MCI_GPM_INVALID)
break;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 9587ec655680..caba54ddad25 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -958,6 +958,8 @@
#define AR_SREV_9550(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
+#define AR_SREV_9550_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9550))
#define AR_SREV_9580(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
@@ -1128,6 +1130,8 @@ enum {
#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \
(AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
+#define AR_GPIO_OE_OUT_MASK (AR_SREV_9550_OR_LATER(ah) ? \
+ 0x0000000F : 0xFFFFFFFF)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
#define AR_GPIO_OE_OUT_DRV_LOW 0x1
@@ -2044,279 +2048,4 @@ enum {
#define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0
#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
-/* MCI Registers */
-
-#define AR_MCI_COMMAND0 0x1800
-#define AR_MCI_COMMAND0_HEADER 0xFF
-#define AR_MCI_COMMAND0_HEADER_S 0
-#define AR_MCI_COMMAND0_LEN 0x1f00
-#define AR_MCI_COMMAND0_LEN_S 8
-#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
-#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
-
-#define AR_MCI_COMMAND1 0x1804
-
-#define AR_MCI_COMMAND2 0x1808
-#define AR_MCI_COMMAND2_RESET_TX 0x01
-#define AR_MCI_COMMAND2_RESET_TX_S 0
-#define AR_MCI_COMMAND2_RESET_RX 0x02
-#define AR_MCI_COMMAND2_RESET_RX_S 1
-#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
-#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
-#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
-#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
-
-#define AR_MCI_RX_CTRL 0x180c
-
-#define AR_MCI_TX_CTRL 0x1810
-/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
-#define AR_MCI_TX_CTRL_CLK_DIV 0x03
-#define AR_MCI_TX_CTRL_CLK_DIV_S 0
-#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
-#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
-#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
-
-#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
-#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
-
-#define AR_MCI_SCHD_TABLE_0 0x1818
-#define AR_MCI_SCHD_TABLE_1 0x181c
-#define AR_MCI_GPM_0 0x1820
-#define AR_MCI_GPM_1 0x1824
-#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
-#define AR_MCI_GPM_WRITE_PTR_S 16
-#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
-#define AR_MCI_GPM_BUF_LEN_S 0
-
-#define AR_MCI_INTERRUPT_RAW 0x1828
-#define AR_MCI_INTERRUPT_EN 0x182c
-#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
-#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
-#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
-#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
-#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
-#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
-#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
-#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
-#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
-#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
-#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
-#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
-#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
-#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
-#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
-#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
-#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_S 9
-#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
-#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
-#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
-#define AR_MCI_INTERRUPT_BT_PRI_S 11
-#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
-#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
-#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
-#define AR_MCI_INTERRUPT_BT_FREQ_S 28
-#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
-#define AR_MCI_INTERRUPT_BT_STOMP_S 29
-#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
-#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
-#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
-#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
-
-#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
- AR_MCI_INTERRUPT_RX_INVALID_HDR | \
- AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
- AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
- AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
- AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
- AR_MCI_INTERRUPT_RX_MSG | \
- AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
- AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
-
-#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
- AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
- AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
- AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
-
-#define AR_MCI_REMOTE_CPU_INT 0x1830
-#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
-#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
-#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
- AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
- AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
- AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
- AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
- AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
-
-#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
- AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
- AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
- AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
- AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
-
-#define AR_MCI_CPU_INT 0x1840
-
-#define AR_MCI_RX_STATUS 0x1844
-#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
-#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
-#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
-#define AR_MCI_RX_REMOTE_SLEEP_S 12
-#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
-#define AR_MCI_RX_MCI_CLK_REQ_S 13
-
-#define AR_MCI_CONT_STATUS 0x1848
-#define AR_MCI_CONT_RSSI_POWER 0x000000FF
-#define AR_MCI_CONT_RSSI_POWER_S 0
-#define AR_MCI_CONT_PRIORITY 0x0000FF00
-#define AR_MCI_CONT_PRIORITY_S 8
-#define AR_MCI_CONT_TXRX 0x00010000
-#define AR_MCI_CONT_TXRX_S 16
-
-#define AR_MCI_BT_PRI0 0x184c
-#define AR_MCI_BT_PRI1 0x1850
-#define AR_MCI_BT_PRI2 0x1854
-#define AR_MCI_BT_PRI3 0x1858
-#define AR_MCI_BT_PRI 0x185c
-#define AR_MCI_WL_FREQ0 0x1860
-#define AR_MCI_WL_FREQ1 0x1864
-#define AR_MCI_WL_FREQ2 0x1868
-#define AR_MCI_GAIN 0x186c
-#define AR_MCI_WBTIMER1 0x1870
-#define AR_MCI_WBTIMER2 0x1874
-#define AR_MCI_WBTIMER3 0x1878
-#define AR_MCI_WBTIMER4 0x187c
-#define AR_MCI_MAXGAIN 0x1880
-#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
-#define AR_MCI_HW_SCHD_TBL_D0 0x1888
-#define AR_MCI_HW_SCHD_TBL_D1 0x188c
-#define AR_MCI_HW_SCHD_TBL_D2 0x1890
-#define AR_MCI_HW_SCHD_TBL_D3 0x1894
-#define AR_MCI_TX_PAYLOAD0 0x1898
-#define AR_MCI_TX_PAYLOAD1 0x189c
-#define AR_MCI_TX_PAYLOAD2 0x18a0
-#define AR_MCI_TX_PAYLOAD3 0x18a4
-#define AR_BTCOEX_WBTIMER 0x18a8
-
-#define AR_BTCOEX_CTRL 0x18ac
-#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
-#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
-#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
-#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
-#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
-#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
-#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
-#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
-#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
-#define AR_BTCOEX_CTRL_PA_SHARED_S 4
-#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
-#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
-#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
-#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
-#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
-#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
-#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
-#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
-#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
-#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
-#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
-#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
-#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
-#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
-#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
-#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
-#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
-#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
-#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
-#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
-#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
-#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
-
-#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
-#define AR_BTCOEX_WL_LNA 0x1940
-#define AR_BTCOEX_RFGAIN_CTRL 0x1944
-#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF
-#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0
-
-#define AR_BTCOEX_CTRL2 0x1948
-#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
-#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
-#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
-#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
-#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
-#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
-#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
-#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
-#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
-#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
-#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
-#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
-
-#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
-#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
-#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
-#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
-#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
-#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
-#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
-#define AR_GLB_WLAN_UART_INTF_EN_S 17
-#define AR_GLB_DS_JTAG_DISABLE 0x00040000
-#define AR_GLB_DS_JTAG_DISABLE_S 18
-
-#define AR_BTCOEX_RC 0x194c
-#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
-#define AR_BTCOEX_DBG 0x1a50
-#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
-#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
-
-#define AR_MCI_SCHD_TABLE_2 0x1a5c
-#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
-#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
-#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
-#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
-
-#define AR_BTCOEX_CTRL3 0x1a60
-#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
-#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
-
-#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
-#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
-
-#define AR_MCI_MISC 0x1a74
-#define AR_MCI_MISC_HW_FIX_EN 0x00000001
-#define AR_MCI_MISC_HW_FIX_EN_S 0
-#define AR_MCI_DBG_CNT_CTRL 0x1a78
-#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001
-#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0
-
#endif
diff --git a/drivers/net/wireless/ath/ath9k/reg_aic.h b/drivers/net/wireless/ath/ath9k/reg_aic.h
new file mode 100644
index 000000000000..955147ab48a2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/reg_aic.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_AIC_H
+#define REG_AIC_H
+
+#define AR_SM_BASE 0xa200
+#define AR_SM1_BASE 0xb200
+#define AR_AGC_BASE 0x9e00
+
+#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
+#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
+#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
+
+#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4)
+#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8)
+#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
+
+#define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0)
+
+#define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + 0x4c4)
+#define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + 0x4c8)
+#define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc)
+
+#define AR_PHY_AIC_SRAM_ADDR_B0 (AR_SM_BASE + 0x5f0)
+#define AR_PHY_AIC_SRAM_DATA_B0 (AR_SM_BASE + 0x5f4)
+
+#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
+#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
+
+#define AR_PHY_BT_COEX_4 (AR_AGC_BASE + 0x60)
+#define AR_PHY_BT_COEX_5 (AR_AGC_BASE + 0x64)
+
+/* AIC fields */
+#define AR_PHY_AIC_MON_ENABLE 0x80000000
+#define AR_PHY_AIC_MON_ENABLE_S 31
+#define AR_PHY_AIC_CAL_MAX_HOP_COUNT 0x7F000000
+#define AR_PHY_AIC_CAL_MAX_HOP_COUNT_S 24
+#define AR_PHY_AIC_CAL_MIN_VALID_COUNT 0x00FE0000
+#define AR_PHY_AIC_CAL_MIN_VALID_COUNT_S 17
+#define AR_PHY_AIC_F_WLAN 0x0001FC00
+#define AR_PHY_AIC_F_WLAN_S 10
+#define AR_PHY_AIC_CAL_CH_VALID_RESET 0x00000200
+#define AR_PHY_AIC_CAL_CH_VALID_RESET_S 9
+#define AR_PHY_AIC_CAL_ENABLE 0x00000100
+#define AR_PHY_AIC_CAL_ENABLE_S 8
+#define AR_PHY_AIC_BTTX_PWR_THR 0x000000FE
+#define AR_PHY_AIC_BTTX_PWR_THR_S 1
+#define AR_PHY_AIC_ENABLE 0x00000001
+#define AR_PHY_AIC_ENABLE_S 0
+#define AR_PHY_AIC_CAL_BT_REF_DELAY 0x00F00000
+#define AR_PHY_AIC_CAL_BT_REF_DELAY_S 20
+#define AR_PHY_AIC_BT_IDLE_CFG 0x00080000
+#define AR_PHY_AIC_BT_IDLE_CFG_S 19
+#define AR_PHY_AIC_STDBY_COND 0x00060000
+#define AR_PHY_AIC_STDBY_COND_S 17
+#define AR_PHY_AIC_STDBY_ROT_ATT_DB 0x0001F800
+#define AR_PHY_AIC_STDBY_ROT_ATT_DB_S 11
+#define AR_PHY_AIC_STDBY_COM_ATT_DB 0x00000700
+#define AR_PHY_AIC_STDBY_COM_ATT_DB_S 8
+#define AR_PHY_AIC_RSSI_MAX 0x000000F0
+#define AR_PHY_AIC_RSSI_MAX_S 4
+#define AR_PHY_AIC_RSSI_MIN 0x0000000F
+#define AR_PHY_AIC_RSSI_MIN_S 0
+#define AR_PHY_AIC_RADIO_DELAY 0x7F000000
+#define AR_PHY_AIC_RADIO_DELAY_S 24
+#define AR_PHY_AIC_CAL_STEP_SIZE_CORR 0x00F00000
+#define AR_PHY_AIC_CAL_STEP_SIZE_CORR_S 20
+#define AR_PHY_AIC_CAL_ROT_IDX_CORR 0x000F8000
+#define AR_PHY_AIC_CAL_ROT_IDX_CORR_S 15
+#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR 0x00006000
+#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR_S 13
+#define AR_PHY_AIC_ROT_IDX_COUNT_MAX 0x00001C00
+#define AR_PHY_AIC_ROT_IDX_COUNT_MAX_S 10
+#define AR_PHY_AIC_CAL_SYNTH_TOGGLE 0x00000200
+#define AR_PHY_AIC_CAL_SYNTH_TOGGLE_S 9
+#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX 0x00000100
+#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX_S 8
+#define AR_PHY_AIC_CAL_SYNTH_SETTLING 0x000000FF
+#define AR_PHY_AIC_CAL_SYNTH_SETTLING_S 0
+#define AR_PHY_AIC_MON_MAX_HOP_COUNT 0x07F00000
+#define AR_PHY_AIC_MON_MAX_HOP_COUNT_S 20
+#define AR_PHY_AIC_MON_MIN_STALE_COUNT 0x000FE000
+#define AR_PHY_AIC_MON_MIN_STALE_COUNT_S 13
+#define AR_PHY_AIC_MON_PWR_EST_LONG 0x00001000
+#define AR_PHY_AIC_MON_PWR_EST_LONG_S 12
+#define AR_PHY_AIC_MON_PD_TALLY_SCALING 0x00000C00
+#define AR_PHY_AIC_MON_PD_TALLY_SCALING_S 10
+#define AR_PHY_AIC_MON_PERF_THR 0x000003E0
+#define AR_PHY_AIC_MON_PERF_THR_S 5
+#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING 0x00000018
+#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING_S 3
+#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR 0x00000006
+#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR_S 1
+#define AR_PHY_AIC_CAL_PWR_EST_LONG 0x00000001
+#define AR_PHY_AIC_CAL_PWR_EST_LONG_S 0
+#define AR_PHY_AIC_MON_DONE 0x80000000
+#define AR_PHY_AIC_MON_DONE_S 31
+#define AR_PHY_AIC_MON_ACTIVE 0x40000000
+#define AR_PHY_AIC_MON_ACTIVE_S 30
+#define AR_PHY_AIC_MEAS_COUNT 0x3F000000
+#define AR_PHY_AIC_MEAS_COUNT_S 24
+#define AR_PHY_AIC_CAL_ANT_ISO_EST 0x00FC0000
+#define AR_PHY_AIC_CAL_ANT_ISO_EST_S 18
+#define AR_PHY_AIC_CAL_HOP_COUNT 0x0003F800
+#define AR_PHY_AIC_CAL_HOP_COUNT_S 11
+#define AR_PHY_AIC_CAL_VALID_COUNT 0x000007F0
+#define AR_PHY_AIC_CAL_VALID_COUNT_S 4
+#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR 0x00000008
+#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR_S 3
+#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR 0x00000004
+#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR_S 2
+#define AR_PHY_AIC_CAL_DONE 0x00000002
+#define AR_PHY_AIC_CAL_DONE_S 1
+#define AR_PHY_AIC_CAL_ACTIVE 0x00000001
+#define AR_PHY_AIC_CAL_ACTIVE_S 0
+
+#define AR_PHY_AIC_MEAS_MAG_MIN 0xFFC00000
+#define AR_PHY_AIC_MEAS_MAG_MIN_S 22
+#define AR_PHY_AIC_MON_STALE_COUNT 0x003F8000
+#define AR_PHY_AIC_MON_STALE_COUNT_S 15
+#define AR_PHY_AIC_MON_HOP_COUNT 0x00007F00
+#define AR_PHY_AIC_MON_HOP_COUNT_S 8
+#define AR_PHY_AIC_CAL_AIC_SM 0x000000F8
+#define AR_PHY_AIC_CAL_AIC_SM_S 3
+#define AR_PHY_AIC_SM 0x00000007
+#define AR_PHY_AIC_SM_S 0
+#define AR_PHY_AIC_SRAM_VALID 0x00000001
+#define AR_PHY_AIC_SRAM_VALID_S 0
+#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB 0x0000007E
+#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB_S 1
+#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN 0x00000080
+#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN_S 7
+#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB 0x00003F00
+#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB_S 8
+#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN 0x00004000
+#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN_S 14
+#define AR_PHY_AIC_SRAM_COM_ATT_6DB 0x00038000
+#define AR_PHY_AIC_SRAM_COM_ATT_6DB_S 15
+#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO 0x0000E000
+#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO_S 13
+#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO 0x00001E00
+#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO_S 9
+#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING 0x000001F8
+#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING_S 3
+#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF 0x00000006
+#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF_S 1
+#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED 0x00000001
+#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED_S 0
+
+#endif /* REG_AIC_H */
diff --git a/drivers/net/wireless/ath/ath9k/reg_mci.h b/drivers/net/wireless/ath/ath9k/reg_mci.h
new file mode 100644
index 000000000000..6251310704e3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/reg_mci.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_MCI_H
+#define REG_MCI_H
+
+#define AR_MCI_COMMAND0 0x1800
+#define AR_MCI_COMMAND0_HEADER 0xFF
+#define AR_MCI_COMMAND0_HEADER_S 0
+#define AR_MCI_COMMAND0_LEN 0x1f00
+#define AR_MCI_COMMAND0_LEN_S 8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
+
+#define AR_MCI_COMMAND1 0x1804
+
+#define AR_MCI_COMMAND2 0x1808
+#define AR_MCI_COMMAND2_RESET_TX 0x01
+#define AR_MCI_COMMAND2_RESET_TX_S 0
+#define AR_MCI_COMMAND2_RESET_RX 0x02
+#define AR_MCI_COMMAND2_RESET_RX_S 1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
+
+#define AR_MCI_RX_CTRL 0x180c
+
+#define AR_MCI_TX_CTRL 0x1810
+/*
+ * 0 = no division,
+ * 1 = divide by 2,
+ * 2 = divide by 4,
+ * 3 = divide by 8
+ */
+#define AR_MCI_TX_CTRL_CLK_DIV 0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S 0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
+
+#define AR_MCI_SCHD_TABLE_0 0x1818
+#define AR_MCI_SCHD_TABLE_1 0x181c
+#define AR_MCI_GPM_0 0x1820
+#define AR_MCI_GPM_1 0x1824
+#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S 16
+#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S 0
+
+#define AR_MCI_INTERRUPT_RAW 0x1828
+
+#define AR_MCI_INTERRUPT_EN 0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
+#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S 9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
+#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S 11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
+#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S 28
+#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S 29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
+
+#define AR_MCI_REMOTE_CPU_INT 0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
+
+#define AR_MCI_CPU_INT 0x1840
+
+#define AR_MCI_RX_STATUS 0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
+#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S 12
+#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S 13
+
+#define AR_MCI_CONT_STATUS 0x1848
+#define AR_MCI_CONT_RSSI_POWER 0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S 0
+#define AR_MCI_CONT_PRIORITY 0x0000FF00
+#define AR_MCI_CONT_PRIORITY_S 8
+#define AR_MCI_CONT_TXRX 0x00010000
+#define AR_MCI_CONT_TXRX_S 16
+
+#define AR_MCI_BT_PRI0 0x184c
+#define AR_MCI_BT_PRI1 0x1850
+#define AR_MCI_BT_PRI2 0x1854
+#define AR_MCI_BT_PRI3 0x1858
+#define AR_MCI_BT_PRI 0x185c
+#define AR_MCI_WL_FREQ0 0x1860
+#define AR_MCI_WL_FREQ1 0x1864
+#define AR_MCI_WL_FREQ2 0x1868
+#define AR_MCI_GAIN 0x186c
+#define AR_MCI_WBTIMER1 0x1870
+#define AR_MCI_WBTIMER2 0x1874
+#define AR_MCI_WBTIMER3 0x1878
+#define AR_MCI_WBTIMER4 0x187c
+#define AR_MCI_MAXGAIN 0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
+#define AR_MCI_HW_SCHD_TBL_D0 0x1888
+#define AR_MCI_HW_SCHD_TBL_D1 0x188c
+#define AR_MCI_HW_SCHD_TBL_D2 0x1890
+#define AR_MCI_HW_SCHD_TBL_D3 0x1894
+#define AR_MCI_TX_PAYLOAD0 0x1898
+#define AR_MCI_TX_PAYLOAD1 0x189c
+#define AR_MCI_TX_PAYLOAD2 0x18a0
+#define AR_MCI_TX_PAYLOAD3 0x18a4
+#define AR_BTCOEX_WBTIMER 0x18a8
+
+#define AR_BTCOEX_CTRL 0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
+#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
+#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
+#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S 4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
+#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
+
+#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
+
+#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA 0x1940
+#define AR_BTCOEX_RFGAIN_CTRL 0x1944
+#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF
+#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0
+
+#define AR_BTCOEX_CTRL2 0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
+#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S 17
+#define AR_GLB_DS_JTAG_DISABLE 0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S 18
+
+#define AR_BTCOEX_RC 0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG 0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
+
+#define AR_MCI_SCHD_TABLE_2 0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
+
+#define AR_BTCOEX_CTRL3 0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
+
+#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
+#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
+
+#define AR_MCI_MISC 0x1a74
+#define AR_MCI_MISC_HW_FIX_EN 0x00000001
+#define AR_MCI_MISC_HW_FIX_EN_S 0
+
+#define AR_MCI_DBG_CNT_CTRL 0x1a78
+#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001
+#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0
+#define AR_MCI_DBG_CNT_CTRL_BT_LINKID 0x000007f8
+#define AR_MCI_DBG_CNT_CTRL_BT_LINKID_S 3
+
+#define MCI_STAT_ALL_BT_LINKID 0xffff
+
+#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
+ AR_MCI_INTERRUPT_RX_INVALID_HDR | \
+ AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_MSG | \
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET | \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING | \
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#endif /* REG_MCI_H */
diff --git a/drivers/net/wireless/ath/ath9k/reg_wow.h b/drivers/net/wireless/ath/ath9k/reg_wow.h
index 3abfca56ca58..453054078cc4 100644
--- a/drivers/net/wireless/ath/ath9k/reg_wow.h
+++ b/drivers/net/wireless/ath/ath9k/reg_wow.h
@@ -72,7 +72,7 @@
#define AR_WOW_MAC_INTR_EN 0x00040000
#define AR_WOW_MAGIC_EN 0x00010000
#define AR_WOW_PATTERN_EN(x) (x & 0xff)
-#define AR_WOW_PAT_FOUND_SHIFT 8
+#define AR_WOW_PAT_FOUND_SHIFT 8
#define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
#define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
#define AR_WOW_MAGIC_PAT_FOUND 0x00020000
@@ -90,6 +90,14 @@
AR_WOW_BEACON_FAIL | \
AR_WOW_KEEP_ALIVE_FAIL))
+#define AR_WOW2_PATTERN_EN(x) ((x & 0xff) << 0)
+#define AR_WOW2_PATTERN_FOUND_SHIFT 8
+#define AR_WOW2_PATTERN_FOUND(x) (x & (0xff << AR_WOW2_PATTERN_FOUND_SHIFT))
+#define AR_WOW2_PATTERN_FOUND_MASK ((0xff) << AR_WOW2_PATTERN_FOUND_SHIFT)
+
+#define AR_WOW_STATUS2(x) (x & AR_WOW2_PATTERN_FOUND_MASK)
+#define AR_WOW_CLEAR_EVENTS2(x) (x & ~(AR_WOW2_PATTERN_EN(0xff)))
+
#define AR_WOW_AIFS_CNT(x) (x & 0xff)
#define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8)
#define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16)
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 65c8894c5f81..ca533b4321bd 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -61,6 +61,8 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
return "WMI_REG_READ_CMDID";
case WMI_REG_WRITE_CMDID:
return "WMI_REG_WRITE_CMDID";
+ case WMI_REG_RMW_CMDID:
+ return "WMI_REG_RMW_CMDID";
case WMI_RC_STATE_CHANGE_CMDID:
return "WMI_RC_STATE_CHANGE_CMDID";
case WMI_RC_RATE_UPDATE_CMDID:
@@ -101,6 +103,7 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
spin_lock_init(&wmi->event_lock);
mutex_init(&wmi->op_mutex);
mutex_init(&wmi->multi_write_mutex);
+ mutex_init(&wmi->multi_rmw_mutex);
init_completion(&wmi->cmd_wait);
INIT_LIST_HEAD(&wmi->pending_tx_events);
tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
@@ -224,7 +227,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
/* Check if there has been a timeout. */
spin_lock(&wmi->wmi_lock);
- if (cmd_id != wmi->last_cmd_id) {
+ if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
spin_unlock(&wmi->wmi_lock);
goto free_skb;
}
@@ -272,11 +275,16 @@ static int ath9k_wmi_cmd_issue(struct wmi *wmi,
enum wmi_cmd_id cmd, u16 len)
{
struct wmi_cmd_hdr *hdr;
+ unsigned long flags;
hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
hdr->command_id = cpu_to_be16(cmd);
hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
+ wmi->last_seq_id = wmi->tx_seq_id;
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+
return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid);
}
@@ -292,7 +300,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
struct sk_buff *skb;
u8 *data;
int time_left, ret = 0;
- unsigned long flags;
if (ah->ah_flags & AH_UNPLUGGED)
return 0;
@@ -320,10 +327,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
wmi->cmd_rsp_buf = rsp_buf;
wmi->cmd_rsp_len = rsp_len;
- spin_lock_irqsave(&wmi->wmi_lock, flags);
- wmi->last_cmd_id = cmd_id;
- spin_unlock_irqrestore(&wmi->wmi_lock, flags);
-
ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
if (ret)
goto out;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 0db37f230018..380175d5ecd7 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -112,6 +112,7 @@ enum wmi_cmd_id {
WMI_TX_STATS_CMDID,
WMI_RX_STATS_CMDID,
WMI_BITRATE_MASK_CMDID,
+ WMI_REG_RMW_CMDID,
};
enum wmi_event_id {
@@ -125,12 +126,19 @@ enum wmi_event_id {
};
#define MAX_CMD_NUMBER 62
+#define MAX_RMW_CMD_NUMBER 15
struct register_write {
__be32 reg;
__be32 val;
};
+struct register_rmw {
+ __be32 reg;
+ __be32 set;
+ __be32 clr;
+} __packed;
+
struct ath9k_htc_tx_event {
int count;
struct __wmi_event_txstatus txs;
@@ -143,7 +151,7 @@ struct wmi {
enum htc_endpoint_id ctrl_epid;
struct mutex op_mutex;
struct completion cmd_wait;
- enum wmi_cmd_id last_cmd_id;
+ u16 last_seq_id;
struct sk_buff_head wmi_event_queue;
struct tasklet_struct wmi_event_tasklet;
u16 tx_seq_id;
@@ -156,10 +164,18 @@ struct wmi {
spinlock_t wmi_lock;
+ /* multi write section */
atomic_t mwrite_cnt;
struct register_write multi_write[MAX_CMD_NUMBER];
u32 multi_write_idx;
struct mutex multi_write_mutex;
+
+ /* multi rmw section */
+ atomic_t m_rmw_cnt;
+ struct register_rmw multi_rmw[MAX_RMW_CMD_NUMBER];
+ u32 multi_rmw_idx;
+ struct mutex multi_rmw_mutex;
+
};
struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 1b8e75c4d2c2..3ad79bb4f2c2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1157,10 +1157,11 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
max_power = 1;
} else if (!bf->bf_state.bfs_paprd) {
if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
- max_power = min(ah->tx_power_stbc[rateidx],
- fi->tx_power);
+ max_power = min_t(u8, ah->tx_power_stbc[rateidx],
+ fi->tx_power);
else
- max_power = min(ah->tx_power[rateidx], fi->tx_power);
+ max_power = min_t(u8, ah->tx_power[rateidx],
+ fi->tx_power);
} else {
max_power = ah->paprd_training_power;
}
@@ -2115,6 +2116,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
struct ath_node *an = NULL;
enum ath9k_key_type keytype;
bool short_preamble = false;
+ u8 txpower;
/*
* We check if Short Preamble is needed for the CTS rate by
@@ -2131,6 +2133,16 @@ static void setup_frame_info(struct ieee80211_hw *hw,
if (sta)
an = (struct ath_node *) sta->drv_priv;
+ if (tx_info->control.vif) {
+ struct ieee80211_vif *vif = tx_info->control.vif;
+
+ txpower = 2 * vif->bss_conf.txpower;
+ } else {
+ struct ath_softc *sc = hw->priv;
+
+ txpower = sc->cur_chan->cur_txpower;
+ }
+
memset(fi, 0, sizeof(*fi));
fi->txq = -1;
if (hw_key)
@@ -2141,7 +2153,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
- fi->tx_power = MAX_RATE_POWER;
+ fi->tx_power = txpower;
if (!rate)
return;
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 3d57f8772389..c657ca26a71a 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -289,7 +289,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
"count=%d, count_false=%d\n",
event->freq, pd->rs->type_id,
ps->pri, ps->count, ps->count_falses);
- channel_detector_reset(dpd, cd);
+ pd->reset(pd, dpd->last_pulse_ts);
return true;
}
}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 69ed39731902..dbd894428be6 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -1701,7 +1701,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
} else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
/* TODO: it also support ARP response type */
} else {
- wcn36xx_warn("unknow keep alive packet type %d\n", packet_type);
+ wcn36xx_warn("unknown keep alive packet type %d\n", packet_type);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 2d5ea21be47e..b97172667bc7 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include "wil6210.h"
#include "wmi.h"
@@ -217,7 +218,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
if (cid < 0)
return -ENOENT;
- memcpy(mac, wil->sta[cid].addr, ETH_ALEN);
+ ether_addr_copy(mac, wil->sta[cid].addr);
wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
rc = wil_cid_fill_sinfo(wil, cid, sinfo);
@@ -387,15 +388,29 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
int ch;
int rc = 0;
+ wil_print_connect_params(wil, sme);
+
if (test_bit(wil_status_fwconnecting, wil->status) ||
test_bit(wil_status_fwconnected, wil->status))
return -EALREADY;
- wil_print_connect_params(wil, sme);
+ if (sme->ie_len > WMI_MAX_IE_LEN) {
+ wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
+ return -ERANGE;
+ }
+
+ rsn_eid = sme->ie ?
+ cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+ NULL;
+
+ if (sme->privacy && !rsn_eid) {
+ wil_err(wil, "Missing RSN IE for secure connection\n");
+ return -EINVAL;
+ }
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
- WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
if (!bss) {
wil_err(wil, "Unable to find BSS\n");
return -ENOENT;
@@ -407,17 +422,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
rc = -ENOENT;
goto out;
}
+ wil->privacy = sme->privacy;
- rsn_eid = sme->ie ?
- cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
- NULL;
- if (rsn_eid) {
- if (sme->ie_len > WMI_MAX_IE_LEN) {
- rc = -ERANGE;
- wil_err(wil, "IE too large (%td bytes)\n",
- sme->ie_len);
- goto out;
- }
+ if (wil->privacy) {
/* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
rc = wmi_del_cipher_key(wil, 0, bss->bssid);
if (rc) {
@@ -450,7 +457,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
bss->capability);
goto out;
}
- if (rsn_eid) {
+ if (wil->privacy) {
conn.dot11_auth_mode = WMI_AUTH11_SHARED;
conn.auth_mode = WMI_AUTH_WPA2_PSK;
conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
@@ -472,8 +479,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
}
conn.channel = ch - 1;
- memcpy(conn.bssid, bss->bssid, ETH_ALEN);
- memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
+ ether_addr_copy(conn.bssid, bss->bssid);
+ ether_addr_copy(conn.dst_mac, bss->bssid);
set_bit(wil_status_fwconnecting, wil->status);
@@ -769,15 +776,24 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
bcon->assocresp_ies);
- wil->secure_pcp = info->privacy;
+ wil->privacy = info->privacy;
netif_carrier_on(ndev);
rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
channel->hw_value);
if (rc)
- netif_carrier_off(ndev);
+ goto err_pcp_start;
+
+ rc = wil_bcast_init(wil);
+ if (rc)
+ goto err_bcast;
+ goto out; /* success */
+err_bcast:
+ wmi_pcp_stop(wil);
+err_pcp_start:
+ netif_carrier_off(ndev);
out:
mutex_unlock(&wil->mutex);
return rc;
@@ -911,6 +927,21 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy,
return 0;
}
+static int wil_cfg80211_change_bss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct bss_parameters *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ if (params->ap_isolate >= 0) {
+ wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
+ wil->ap_isolate, params->ap_isolate);
+ wil->ap_isolate = params->ap_isolate;
+ }
+
+ return 0;
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.scan = wil_cfg80211_scan,
.connect = wil_cfg80211_connect,
@@ -931,6 +962,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
.stop_ap = wil_cfg80211_stop_ap,
.del_station = wil_cfg80211_del_station,
.probe_client = wil_cfg80211_probe_client,
+ .change_bss = wil_cfg80211_change_bss,
};
static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 45c3558ec804..bbc22d88f78f 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -29,6 +29,7 @@
static u32 mem_addr;
static u32 dbg_txdesc_index;
static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
+u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */
enum dbg_off_type {
doff_u32 = 0,
@@ -102,23 +103,36 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
% vring->size;
int avail = vring->size - used - 1;
char name[10];
+ char sidle[10];
/* performance monitoring */
cycles_t now = get_cycles();
uint64_t idle = txdata->idle * 100;
uint64_t total = now - txdata->begin;
- do_div(idle, total);
+ if (total != 0) {
+ do_div(idle, total);
+ snprintf(sidle, sizeof(sidle), "%3d%%",
+ (int)idle);
+ } else {
+ snprintf(sidle, sizeof(sidle), "N/A");
+ }
txdata->begin = now;
txdata->idle = 0ULL;
snprintf(name, sizeof(name), "tx_%2d", i);
- seq_printf(s,
- "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %3d%%\n",
- wil->sta[cid].addr, cid, tid,
- txdata->agg_wsize, txdata->agg_timeout,
- txdata->agg_amsdu ? "+" : "-",
- used, avail, (int)idle);
+ if (cid < WIL6210_MAX_CID)
+ seq_printf(s,
+ "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
+ wil->sta[cid].addr, cid, tid,
+ txdata->agg_wsize,
+ txdata->agg_timeout,
+ txdata->agg_amsdu ? "+" : "-",
+ used, avail, sidle);
+ else
+ seq_printf(s,
+ "\nBroadcast [%3d|%3d] idle %s\n",
+ used, avail, sidle);
wil_print_vring(s, wil, name, vring, '_', 'H');
}
@@ -549,7 +563,7 @@ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
dev_close(ndev);
ndev->flags &= ~IFF_UP;
rtnl_unlock();
- wil_reset(wil);
+ wil_reset(wil, true);
return len;
}
@@ -618,7 +632,7 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
struct wil6210_priv *wil = file->private_data;
int rc;
char *kbuf = kmalloc(len + 1, GFP_KERNEL);
- char cmd[8];
+ char cmd[9];
int p1, p2, p3;
if (!kbuf)
@@ -1392,11 +1406,12 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
/* fields in struct wil6210_priv */
static const struct dbg_off dbg_wil_off[] = {
- WIL_FIELD(secure_pcp, S_IRUGO | S_IWUSR, doff_u32),
+ WIL_FIELD(privacy, S_IRUGO, doff_u32),
WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
WIL_FIELD(fw_version, S_IRUGO, doff_u32),
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
+ WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
{},
};
@@ -1412,6 +1427,8 @@ static const struct dbg_off dbg_statics[] = {
{"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32},
{"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32},
{"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
+ {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
+ doff_u32},
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 4c44a82c34d7..0ea695ff98ad 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -50,27 +50,19 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
wil_dbg_misc(wil, "%s()\n", __func__);
- if (test_bit(hw_capability_advanced_itr_moderation,
- wil->hw_capabilities)) {
- tx_itr_en = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
- if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
- tx_itr_val =
- ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
-
- rx_itr_en = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
- if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
- rx_itr_val =
- ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
- } else {
- rx_itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
- if (rx_itr_en & BIT_DMA_ITR_CNT_CRL_EN)
- rx_itr_val = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
- }
+ tx_itr_en = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+ if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
+ tx_itr_val =
+ ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+
+ rx_itr_en = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+ if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
+ rx_itr_val =
+ ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 93c5cc16c515..4428345e5a47 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -20,6 +20,7 @@
#include "fw.h"
MODULE_FIRMWARE(WIL_FW_NAME);
+MODULE_FIRMWARE(WIL_FW2_NAME);
/* target operations */
/* register read */
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index d4acf93a9a02..157f5ef384e0 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -451,8 +451,6 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
}
return -EINVAL;
}
- /* Mark FW as loaded from host */
- S(RGF_USER_USAGE_6, 1);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index a6f923086f31..28ffc18466c4 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -166,9 +166,16 @@ void wil_unmask_irq(struct wil6210_priv *wil)
/* target write operation */
#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-static
-void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
+void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ /* disable interrupt moderation for monitor
+ * to get better timestamp precision
+ */
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
+ return;
+
/* Disable and clear tx counter before (re)configuration */
W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
@@ -206,42 +213,8 @@ void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
}
-static
-void wil_configure_interrupt_moderation_lgc(struct wil6210_priv *wil)
-{
- /* disable, use usec resolution */
- W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_CLR);
-
- wil_info(wil, "set ITR_TRSH = %d usec\n", wil->rx_max_burst_duration);
- W(RGF_DMA_ITR_CNT_TRSH, wil->rx_max_burst_duration);
- /* start it */
- W(RGF_DMA_ITR_CNT_CRL,
- BIT_DMA_ITR_CNT_CRL_EN | BIT_DMA_ITR_CNT_CRL_EXT_TICK);
-}
-
#undef W
-void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
-{
- wil_dbg_irq(wil, "%s()\n", __func__);
-
- /* disable interrupt moderation for monitor
- * to get better timestamp precision
- */
- if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
- return;
-
- if (test_bit(hw_capability_advanced_itr_moderation,
- wil->hw_capabilities))
- wil_configure_interrupt_moderation_new(wil);
- else {
- /* Advanced interrupt moderation is not available before
- * Sparrow v2. Will use legacy interrupt moderation
- */
- wil_configure_interrupt_moderation_lgc(wil);
- }
-}
-
static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -253,7 +226,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
- if (!isr) {
+ if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: RX\n");
return IRQ_NONE;
}
@@ -266,17 +239,18 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
* action is always the same - should empty the accumulated
* packets from the RX ring.
*/
- if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) {
+ if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
+ BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
wil_dbg_irq(wil, "RX done\n");
- if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)
+ if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH))
wil_err_ratelimited(wil,
"Received \"Rx buffer is in risk of overflow\" interrupt\n");
isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH);
- if (test_bit(wil_status_reset_done, wil->status)) {
- if (test_bit(wil_status_napi_en, wil->status)) {
+ if (likely(test_bit(wil_status_reset_done, wil->status))) {
+ if (likely(test_bit(wil_status_napi_en, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_rx);
@@ -289,7 +263,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
}
}
- if (isr)
+ if (unlikely(isr))
wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
/* Rx IRQ will be enabled when NAPI processing finished */
@@ -313,19 +287,19 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
- if (!isr) {
+ if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: TX\n");
return IRQ_NONE;
}
wil6210_mask_irq_tx(wil);
- if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
+ if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
wil_dbg_irq(wil, "TX done\n");
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
/* clear also all VRING interrupts */
isr &= ~(BIT(25) - 1UL);
- if (test_bit(wil_status_reset_done, wil->status)) {
+ if (likely(test_bit(wil_status_reset_done, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_tx);
@@ -334,7 +308,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
}
}
- if (isr)
+ if (unlikely(isr))
wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
/* Tx IRQ will be enabled when NAPI processing finished */
@@ -523,11 +497,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
/**
* pseudo_cause is Clear-On-Read, no need to ACK
*/
- if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))
+ if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
return IRQ_NONE;
/* FIXME: IRQ mask debug */
- if (wil6210_debug_irq_mask(wil, pseudo_cause))
+ if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
return IRQ_NONE;
trace_wil6210_irq_pseudo(pseudo_cause);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index b04e0afdcb21..c2a238426425 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -29,10 +29,6 @@ bool no_fw_recovery;
module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
-static bool no_fw_load = true;
-module_param(no_fw_load, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(no_fw_load, " do not download FW, use one in on-card flash.");
-
/* if not set via modparam, will be set to default value of 1/8 of
* rx ring size during init flow
*/
@@ -72,6 +68,7 @@ MODULE_PARM_DESC(mtu_max, " Max MTU value.");
static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
+static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
static int ring_order_set(const char *val, const struct kernel_param *kp)
{
@@ -220,6 +217,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
+ wil_bcast_fini(wil);
netif_tx_stop_all_queues(ndev);
netif_carrier_off(ndev);
@@ -364,6 +362,35 @@ static int wil_find_free_vring(struct wil6210_priv *wil)
return -EINVAL;
}
+int wil_bcast_init(struct wil6210_priv *wil)
+{
+ int ri = wil->bcast_vring, rc;
+
+ if ((ri >= 0) && wil->vring_tx[ri].va)
+ return 0;
+
+ ri = wil_find_free_vring(wil);
+ if (ri < 0)
+ return ri;
+
+ rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
+ if (rc == 0)
+ wil->bcast_vring = ri;
+
+ return rc;
+}
+
+void wil_bcast_fini(struct wil6210_priv *wil)
+{
+ int ri = wil->bcast_vring;
+
+ if (ri < 0)
+ return;
+
+ wil->bcast_vring = -1;
+ wil_vring_fini_tx(wil, ri);
+}
+
static void wil_connect_worker(struct work_struct *work)
{
int rc;
@@ -411,6 +438,7 @@ int wil_priv_init(struct wil6210_priv *wil)
init_completion(&wil->wmi_call);
wil->pending_connect_cid = -1;
+ wil->bcast_vring = -1;
setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
@@ -520,8 +548,6 @@ static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
u32 x;
- bool is_reset_v2 = test_bit(hw_capability_reset_v2,
- wil->hw_capabilities);
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
@@ -532,82 +558,67 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_halt_cpu(wil);
+ /* clear all boot loader "ready" bits */
+ W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
/* Clear Fw Download notification */
C(RGF_USER_USAGE_6, BIT(0));
- if (is_reset_v2) {
- S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
- /* XTAL stabilization should take about 3ms */
- usleep_range(5000, 7000);
- x = R(RGF_CAF_PLL_LOCK_STATUS);
- if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
- wil_err(wil, "Xtal stabilization timeout\n"
- "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
- return -ETIME;
- }
- /* switch 10k to XTAL*/
- C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
- /* 40 MHz */
- C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
-
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+ S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+ /* XTAL stabilization should take about 3ms */
+ usleep_range(5000, 7000);
+ x = R(RGF_CAF_PLL_LOCK_STATUS);
+ if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
+ wil_err(wil, "Xtal stabilization timeout\n"
+ "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
+ return -ETIME;
}
+ /* switch 10k to XTAL*/
+ C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+ /* 40 MHz */
+ C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+
+ W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+ W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3,
- is_reset_v2 ? 0x000000f0 : 0x00000170);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
- if (is_reset_v2) {
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
- }
+ W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+ W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
- if (is_reset_v2) {
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
- /* reset A2 PCIE AHB */
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
- } else {
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
- W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
- }
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
- /* TODO: check order here!!! Erez code is different */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
- /* wait until device ready. typical time is 200..250 msec */
+ /* wait until device ready. typical time is 20..80 msec */
do {
msleep(RST_DELAY);
- x = R(RGF_USER_HW_MACHINE_STATE);
+ x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
+ wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
x);
return -ETIME;
}
- } while (x != HW_MACHINE_BOOT_DONE);
-
- if (!is_reset_v2)
- W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
+ } while (!(x & BIT_BL_READY));
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ /* enable fix for HW bug related to the SA/DA swap in AP Rx */
+ S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+ BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
}
-#undef R
-#undef W
-#undef S
-#undef C
-
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
{
le32_to_cpus(&r->base);
@@ -617,6 +628,32 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
le32_to_cpus(&r->head);
}
+static int wil_get_bl_info(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct RGF_BL bl;
+
+ wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
+ le32_to_cpus(&bl.ready);
+ le32_to_cpus(&bl.version);
+ le32_to_cpus(&bl.rf_type);
+ le32_to_cpus(&bl.baseband_type);
+
+ if (!is_valid_ether_addr(bl.mac_address)) {
+ wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(ndev->perm_addr, bl.mac_address);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ ether_addr_copy(ndev->dev_addr, bl.mac_address);
+ wil_info(wil,
+ "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+ bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+
+ return 0;
+}
+
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
ulong to = msecs_to_jiffies(1000);
@@ -637,7 +674,7 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
* After calling this routine, you're expected to reload
* the firmware.
*/
-int wil_reset(struct wil6210_priv *wil)
+int wil_reset(struct wil6210_priv *wil, bool load_fw)
{
int rc;
@@ -651,6 +688,7 @@ int wil_reset(struct wil6210_priv *wil)
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
+ wil_bcast_fini(wil);
/* prevent NAPI from being scheduled */
bitmap_zero(wil->status, wil_status_last);
@@ -675,46 +713,62 @@ int wil_reset(struct wil6210_priv *wil)
if (rc)
return rc;
- if (!no_fw_load) {
- wil_info(wil, "Use firmware <%s>\n", WIL_FW_NAME);
+ rc = wil_get_bl_info(wil);
+ if (rc)
+ return rc;
+
+ if (load_fw) {
+ wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
+ WIL_FW2_NAME);
+
wil_halt_cpu(wil);
/* Loading f/w from the file */
rc = wil_request_firmware(wil, WIL_FW_NAME);
if (rc)
return rc;
+ rc = wil_request_firmware(wil, WIL_FW2_NAME);
+ if (rc)
+ return rc;
+
+ /* Mark FW as loaded from host */
+ S(RGF_USER_USAGE_6, 1);
- /* clear any interrupts which on-card-firmware may have set */
+ /* clear any interrupts which on-card-firmware
+ * may have set
+ */
wil6210_clear_irq(wil);
- { /* CAF_ICR - clear and mask */
- u32 a = HOSTADDR(RGF_CAF_ICR) +
- offsetof(struct RGF_ICR, ICR);
- u32 m = HOSTADDR(RGF_CAF_ICR) +
- offsetof(struct RGF_ICR, IMV);
- u32 icr = ioread32(wil->csr + a);
-
- iowrite32(icr, wil->csr + a); /* W1C */
- iowrite32(~0, wil->csr + m);
- wmb(); /* wait for completion */
- }
+ /* CAF_ICR - clear and mask */
+ /* it is W1C, clear by writing back same value */
+ S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+ W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+
wil_release_cpu(wil);
- } else {
- wil_info(wil, "Use firmware from on-card flash\n");
}
/* init after reset */
wil->pending_connect_cid = -1;
+ wil->ap_isolate = 0;
reinit_completion(&wil->wmi_ready);
reinit_completion(&wil->wmi_call);
- wil_configure_interrupt_moderation(wil);
- wil_unmask_irq(wil);
+ if (load_fw) {
+ wil_configure_interrupt_moderation(wil);
+ wil_unmask_irq(wil);
- /* we just started MAC, wait for FW ready */
- rc = wil_wait_for_fw_ready(wil);
+ /* we just started MAC, wait for FW ready */
+ rc = wil_wait_for_fw_ready(wil);
+ if (rc == 0) /* check FW is responsive */
+ rc = wmi_echo(wil);
+ }
return rc;
}
+#undef R
+#undef W
+#undef S
+#undef C
+
void wil_fw_error_recovery(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "starting fw error recovery\n");
@@ -730,7 +784,7 @@ int __wil_up(struct wil6210_priv *wil)
WARN_ON(!mutex_is_locked(&wil->mutex));
- rc = wil_reset(wil);
+ rc = wil_reset(wil, true);
if (rc)
return rc;
@@ -837,7 +891,7 @@ int __wil_down(struct wil6210_priv *wil)
if (!iter)
wil_err(wil, "timeout waiting for idle FW/HW\n");
- wil_rx_fini(wil);
+ wil_reset(wil, false);
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ace30c1b5c64..f2f7ea29558e 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -82,7 +82,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
wil_rx_handle(wil, &quota);
done = budget - quota;
- if (done <= 1) { /* burst ends - only one packet processed */
+ if (done < budget) {
napi_complete(napi);
wil6210_unmask_irq_rx(wil);
wil_dbg_txrx(wil, "NAPI RX complete\n");
@@ -110,7 +110,7 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
tx_done += wil_tx_complete(wil, i);
}
- if (tx_done <= 1) { /* burst ends - only one packet processed */
+ if (tx_done < budget) {
napi_complete(napi);
wil6210_unmask_irq_tx(wil);
wil_dbg_txrx(wil, "NAPI TX complete\n");
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 3dd26709ccb2..109986114abf 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -39,18 +39,6 @@ void wil_set_capabilities(struct wil6210_priv *wil)
bitmap_zero(wil->hw_capabilities, hw_capability_last);
switch (rev_id) {
- case JTAG_DEV_ID_MARLON_B0:
- wil->hw_name = "Marlon B0";
- wil->hw_version = HW_VER_MARLON_B0;
- break;
- case JTAG_DEV_ID_SPARROW_A0:
- wil->hw_name = "Sparrow A0";
- wil->hw_version = HW_VER_SPARROW_A0;
- break;
- case JTAG_DEV_ID_SPARROW_A1:
- wil->hw_name = "Sparrow A1";
- wil->hw_version = HW_VER_SPARROW_A1;
- break;
case JTAG_DEV_ID_SPARROW_B0:
wil->hw_name = "Sparrow B0";
wil->hw_version = HW_VER_SPARROW_B0;
@@ -62,13 +50,6 @@ void wil_set_capabilities(struct wil6210_priv *wil)
}
wil_info(wil, "Board hardware is %s\n", wil->hw_name);
-
- if (wil->hw_version >= HW_VER_SPARROW_A0)
- set_bit(hw_capability_reset_v2, wil->hw_capabilities);
-
- if (wil->hw_version >= HW_VER_SPARROW_B0)
- set_bit(hw_capability_advanced_itr_moderation,
- wil->hw_capabilities);
}
void wil_disable_irq(struct wil6210_priv *wil)
@@ -150,7 +131,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
/* need reset here to obtain MAC */
mutex_lock(&wil->mutex);
- rc = wil_reset(wil);
+ rc = wil_reset(wil, false);
mutex_unlock(&wil->mutex);
if (debug_fw)
rc = 0;
@@ -265,8 +246,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil6210_debugfs_init(wil);
- /* check FW is alive */
- wmi_echo(wil);
return 0;
@@ -305,7 +284,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
}
static const struct pci_device_id wil6210_pcie_ids[] = {
- { PCI_DEVICE(0x1ae9, 0x0301) },
{ PCI_DEVICE(0x1ae9, 0x0310) },
{ PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
{ /* end: all zeroes */ },
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 8439f65db259..e8bd512d81a9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -33,6 +33,15 @@ module_param(rtap_include_phy_info, bool, S_IRUGO);
MODULE_PARM_DESC(rtap_include_phy_info,
" Include PHY info in the radiotap header, default - no");
+bool rx_align_2;
+module_param(rx_align_2, bool, S_IRUGO);
+MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
+
+static inline uint wil_rx_snaplen(void)
+{
+ return rx_align_2 ? 6 : 0;
+}
+
static inline int wil_vring_is_empty(struct vring *vring)
{
return vring->swhead == vring->swtail;
@@ -53,34 +62,38 @@ static inline int wil_vring_is_full(struct vring *vring)
return wil_vring_next_tail(vring) == vring->swhead;
}
-/*
- * Available space in Tx Vring
- */
-static inline int wil_vring_avail_tx(struct vring *vring)
+/* Used space in Tx Vring */
+static inline int wil_vring_used_tx(struct vring *vring)
{
u32 swhead = vring->swhead;
u32 swtail = vring->swtail;
- int used = (vring->size + swhead - swtail) % vring->size;
+ return (vring->size + swhead - swtail) % vring->size;
+}
- return vring->size - used - 1;
+/* Available space in Tx Vring */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+ return vring->size - wil_vring_used_tx(vring) - 1;
}
-/**
- * wil_vring_wmark_low - low watermark for available descriptor space
- */
+/* wil_vring_wmark_low - low watermark for available descriptor space */
static inline int wil_vring_wmark_low(struct vring *vring)
{
return vring->size/8;
}
-/**
- * wil_vring_wmark_high - high watermark for available descriptor space
- */
+/* wil_vring_wmark_high - high watermark for available descriptor space */
static inline int wil_vring_wmark_high(struct vring *vring)
{
return vring->size/4;
}
+/* wil_val_in_range - check if value in [min,max) */
+static inline bool wil_val_in_range(int val, int min, int max)
+{
+ return val >= min && val < max;
+}
+
static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
{
struct device *dev = wil_to_dev(wil);
@@ -98,8 +111,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
vring->va = NULL;
return -ENOMEM;
}
- /*
- * vring->va should be aligned on its size rounded up to power of 2
+ /* vring->va should be aligned on its size rounded up to power of 2
* This is granted by the dma_alloc_coherent
*/
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -206,7 +218,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
- unsigned int sz = mtu_max + ETH_HLEN;
+ unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d = &vring->va[i].rx;
dma_addr_t pa;
@@ -346,27 +358,6 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
}
}
-/*
- * Fast swap in place between 2 registers
- */
-static void wil_swap_u16(u16 *a, u16 *b)
-{
- *a ^= *b;
- *b ^= *a;
- *a ^= *b;
-}
-
-static void wil_swap_ethaddr(void *data)
-{
- struct ethhdr *eth = data;
- u16 *s = (u16 *)eth->h_source;
- u16 *d = (u16 *)eth->h_dest;
-
- wil_swap_u16(s++, d++);
- wil_swap_u16(s++, d++);
- wil_swap_u16(s, d);
-}
-
/**
* reap 1 frame from @swhead
*
@@ -383,40 +374,45 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
struct vring_rx_desc *d;
struct sk_buff *skb;
dma_addr_t pa;
- unsigned int sz = mtu_max + ETH_HLEN;
+ unsigned int snaplen = wil_rx_snaplen();
+ unsigned int sz = mtu_max + ETH_HLEN + snaplen;
u16 dmalen;
u8 ftype;
- u8 ds_bits;
int cid;
+ int i = (int)vring->swhead;
struct wil_net_stats *stats;
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
- if (wil_vring_is_empty(vring))
+ if (unlikely(wil_vring_is_empty(vring)))
return NULL;
- _d = &vring->va[vring->swhead].rx;
- if (!(_d->dma.status & RX_DMA_STATUS_DU)) {
+ _d = &vring->va[i].rx;
+ if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
/* it is not error, we just reached end of Rx done area */
return NULL;
}
- skb = vring->ctx[vring->swhead].skb;
+ skb = vring->ctx[i].skb;
+ vring->ctx[i].skb = NULL;
+ wil_vring_advance_head(vring, 1);
+ if (!skb) {
+ wil_err(wil, "No Rx skb at [%d]\n", i);
+ return NULL;
+ }
d = wil_skb_rxdesc(skb);
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
- vring->ctx[vring->swhead].skb = NULL;
- wil_vring_advance_head(vring, 1);
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
dmalen = le16_to_cpu(d->dma.length);
- trace_wil6210_rx(vring->swhead, d);
- wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen);
+ trace_wil6210_rx(i, d);
+ wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- if (dmalen > sz) {
+ if (unlikely(dmalen > sz)) {
wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
kfree_skb(skb);
return NULL;
@@ -445,14 +441,14 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
* in Rx descriptor. If type is not data, it is 802.11 frame as is
*/
ftype = wil_rxdesc_ftype(d) << 2;
- if (ftype != IEEE80211_FTYPE_DATA) {
+ if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
/* TODO: process it */
kfree_skb(skb);
return NULL;
}
- if (skb->len < ETH_HLEN) {
+ if (unlikely(skb->len < ETH_HLEN + snaplen)) {
wil_err(wil, "Short frame, len = %d\n", skb->len);
/* TODO: process it (i.e. BAR) */
kfree_skb(skb);
@@ -463,9 +459,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
* and in case of error drop the packet
* higher stack layers will handle retransmission (if required)
*/
- if (d->dma.status & RX_DMA_STATUS_L4I) {
+ if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
/* L4 protocol identified, csum calculated */
- if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
+ if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If HW reports bad checksum, let IP stack re-check it
* For example, HW don't understand Microsoft IP stack that
@@ -474,13 +470,15 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
*/
}
- ds_bits = wil_rxdesc_ds_bits(d);
- if (ds_bits == 1) {
- /*
- * HW bug - in ToDS mode, i.e. Rx on AP side,
- * addresses get swapped
+ if (snaplen) {
+ /* Packet layout
+ * +-------+-------+---------+------------+------+
+ * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
+ * +-------+-------+---------+------------+------+
+ * Need to remove SNAP, shifting SA and DA forward
*/
- wil_swap_ethaddr(skb->data);
+ memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, snaplen);
}
return skb;
@@ -503,7 +501,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
(next_tail != v->swhead) && (count-- > 0);
v->swtail = next_tail) {
rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
- if (rc) {
+ if (unlikely(rc)) {
wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
rc, v->swtail);
break;
@@ -520,17 +518,71 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
*/
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
{
- gro_result_t rc;
+ gro_result_t rc = GRO_NORMAL;
struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct wireless_dev *wdev = wil_to_wdev(wil);
unsigned int len = skb->len;
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- int cid = wil_rxdesc_cid(d);
+ int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+ struct ethhdr *eth = (void *)skb->data;
+ /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
+ * is not suitable, need to look at data
+ */
+ int mcast = is_multicast_ether_addr(eth->h_dest);
struct wil_net_stats *stats = &wil->sta[cid].stats;
+ struct sk_buff *xmit_skb = NULL;
+ static const char * const gro_res_str[] = {
+ [GRO_MERGED] = "GRO_MERGED",
+ [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
+ [GRO_HELD] = "GRO_HELD",
+ [GRO_NORMAL] = "GRO_NORMAL",
+ [GRO_DROP] = "GRO_DROP",
+ };
skb_orphan(skb);
- rc = napi_gro_receive(&wil->napi_rx, skb);
+ if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
+ if (mcast) {
+ /* send multicast frames both to higher layers in
+ * local net stack and back to the wireless medium
+ */
+ xmit_skb = skb_copy(skb, GFP_ATOMIC);
+ } else {
+ int xmit_cid = wil_find_cid(wil, eth->h_dest);
+
+ if (xmit_cid >= 0) {
+ /* The destination station is associated to
+ * this AP (in this VLAN), so send the frame
+ * directly to it and do not pass it to local
+ * net stack.
+ */
+ xmit_skb = skb;
+ skb = NULL;
+ }
+ }
+ }
+ if (xmit_skb) {
+ /* Send to wireless media and increase priority by 256 to
+ * keep the received priority instead of reclassifying
+ * the frame (see cfg80211_classify8021d).
+ */
+ xmit_skb->dev = ndev;
+ xmit_skb->priority += 256;
+ xmit_skb->protocol = htons(ETH_P_802_3);
+ skb_reset_network_header(xmit_skb);
+ skb_reset_mac_header(xmit_skb);
+ wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
+ dev_queue_xmit(xmit_skb);
+ }
+ if (skb) { /* deliver to local stack */
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ rc = napi_gro_receive(&wil->napi_rx, skb);
+ wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
+ len, gro_res_str[rc]);
+ }
+ /* statistics. rc set to GRO_NORMAL for AP bridging */
if (unlikely(rc == GRO_DROP)) {
ndev->stats.rx_dropped++;
stats->rx_dropped++;
@@ -540,17 +592,8 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
stats->rx_packets++;
ndev->stats.rx_bytes += len;
stats->rx_bytes += len;
- }
- {
- static const char * const gro_res_str[] = {
- [GRO_MERGED] = "GRO_MERGED",
- [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
- [GRO_HELD] = "GRO_HELD",
- [GRO_NORMAL] = "GRO_NORMAL",
- [GRO_DROP] = "GRO_DROP",
- };
- wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
- len, gro_res_str[rc]);
+ if (mcast)
+ ndev->stats.multicast++;
}
}
@@ -565,7 +608,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
struct vring *v = &wil->vring_rx;
struct sk_buff *skb;
- if (!v->va) {
+ if (unlikely(!v->va)) {
wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
return;
}
@@ -581,7 +624,6 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
skb->protocol = htons(ETH_P_802_2);
wil_netif_rx_any(skb, ndev);
} else {
- skb->protocol = eth_type_trans(skb, ndev);
wil_rx_reorder(wil, skb);
}
}
@@ -707,6 +749,72 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
return rc;
}
+int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
+{
+ int rc;
+ struct wmi_bcast_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size =
+ cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .ring_size = cpu_to_le16(size),
+ },
+ .ringid = id,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ },
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply;
+ struct vring *vring = &wil->vring_tx[id];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+
+ wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+ cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
+
+ if (vring->va) {
+ wil_err(wil, "Tx ring [%d] already allocated\n", id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memset(txdata, 0, sizeof(*txdata));
+ spin_lock_init(&txdata->lock);
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ goto out;
+
+ wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
+ wil->vring2cid_tid[id][1] = 0; /* TID */
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+ rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ goto out_free;
+
+ if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Tx config failed, status 0x%02x\n",
+ reply.cmd.status);
+ rc = -EINVAL;
+ goto out_free;
+ }
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+ txdata->enabled = 1;
+
+ return 0;
+ out_free:
+ wil_vring_free(wil, vring, 1);
+ out:
+
+ return rc;
+}
+
void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
{
struct vring *vring = &wil->vring_tx[id];
@@ -730,7 +838,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
memset(txdata, 0, sizeof(*txdata));
}
-static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
+static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct sk_buff *skb)
{
int i;
@@ -763,15 +871,6 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
return NULL;
}
-static void wil_set_da_for_vring(struct wil6210_priv *wil,
- struct sk_buff *skb, int vring_index)
-{
- struct ethhdr *eth = (void *)skb->data;
- int cid = wil->vring2cid_tid[vring_index][0];
-
- memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN);
-}
-
static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct sk_buff *skb);
@@ -792,6 +891,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
continue;
cid = wil->vring2cid_tid[i][0];
+ if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+ continue;
+
if (!wil->sta[cid].data_port_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
break;
@@ -806,17 +908,51 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
return NULL;
}
-/*
- * Find 1-st vring and return it; set dest address for this vring in skb
- * duplicate skb and send it to other active vrings
+/* Use one of 2 strategies:
+ *
+ * 1. New (real broadcast):
+ * use dedicated broadcast vring
+ * 2. Old (pseudo-DMS):
+ * Find 1-st vring and return it;
+ * duplicate skb and send it to other active vrings;
+ * in all cases override dest address to unicast peer's address
+ * Use old strategy when new is not supported yet:
+ * - for PBSS
+ * - for secure link
*/
-static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
- struct sk_buff *skb)
+static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v;
+ int i = wil->bcast_vring;
+
+ if (i < 0)
+ return NULL;
+ v = &wil->vring_tx[i];
+ if (!v->va)
+ return NULL;
+
+ return v;
+}
+
+static void wil_set_da_for_vring(struct wil6210_priv *wil,
+ struct sk_buff *skb, int vring_index)
+{
+ struct ethhdr *eth = (void *)skb->data;
+ int cid = wil->vring2cid_tid[vring_index][0];
+
+ ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
+}
+
+static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
+ struct sk_buff *skb)
{
struct vring *v, *v2;
struct sk_buff *skb2;
int i;
u8 cid;
+ struct ethhdr *eth = (void *)skb->data;
+ char *src = eth->h_source;
/* find 1-st vring eligible for data */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
@@ -825,9 +961,15 @@ static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
continue;
cid = wil->vring2cid_tid[i][0];
+ if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+ continue;
if (!wil->sta[cid].data_port_open)
continue;
+ /* don't Tx back to source when re-routing Rx->Tx at the AP */
+ if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+ continue;
+
goto found;
}
@@ -845,9 +987,14 @@ found:
if (!v2->va)
continue;
cid = wil->vring2cid_tid[i][0];
+ if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+ continue;
if (!wil->sta[cid].data_port_open)
continue;
+ if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+ continue;
+
skb2 = skb_copy(skb, GFP_ATOMIC);
if (skb2) {
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
@@ -861,6 +1008,20 @@ found:
return v;
}
+static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct wireless_dev *wdev = wil->wdev;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP)
+ return wil_find_tx_bcast_2(wil, skb);
+
+ if (wil->privacy)
+ return wil_find_tx_bcast_2(wil, skb);
+
+ return wil_find_tx_bcast_1(wil, skb);
+}
+
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
int vring_index)
{
@@ -952,13 +1113,16 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
uint i = swhead;
dma_addr_t pa;
+ int used;
+ bool mcast = (vring_index == wil->bcast_vring);
+ uint len = skb_headlen(skb);
wil_dbg_txrx(wil, "%s()\n", __func__);
if (unlikely(!txdata->enabled))
return -EINVAL;
- if (avail < 1 + nr_frags) {
+ if (unlikely(avail < 1 + nr_frags)) {
wil_err_ratelimited(wil,
"Tx ring[%2d] full. No space for %d fragments\n",
vring_index, 1 + nr_frags);
@@ -977,9 +1141,19 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
return -EINVAL;
vring->ctx[i].mapped_as = wil_mapped_as_single;
/* 1-st segment */
- wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
+ wil_tx_desc_map(d, pa, len, vring_index);
+ if (unlikely(mcast)) {
+ d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
+ if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
+ /* set MCS 1 */
+ d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
+ /* packet mode 2 */
+ d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
+ (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
+ }
+ }
/* Process TCP/UDP checksum offloading */
- if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
+ if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
vring_index);
goto dma_error;
@@ -1027,8 +1201,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
*/
vring->ctx[i].skb = skb_get(skb);
- if (wil_vring_is_empty(vring)) /* performance monitoring */
+ /* performance monitoring */
+ used = wil_vring_used_tx(vring);
+ if (wil_val_in_range(vring_idle_trsh,
+ used, used + nr_frags + 1)) {
txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ vring_index, used, used + nr_frags + 1);
+ }
/* advance swhead */
wil_vring_advance_head(vring, nr_frags + 1);
@@ -1077,23 +1257,24 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct ethhdr *eth = (void *)skb->data;
+ bool bcast = is_multicast_ether_addr(eth->h_dest);
struct vring *vring;
static bool pr_once_fw;
int rc;
wil_dbg_txrx(wil, "%s()\n", __func__);
- if (!test_bit(wil_status_fwready, wil->status)) {
+ if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
if (!pr_once_fw) {
wil_err(wil, "FW not ready\n");
pr_once_fw = true;
}
goto drop;
}
- if (!test_bit(wil_status_fwconnected, wil->status)) {
+ if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
wil_err(wil, "FW not connected\n");
goto drop;
}
- if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
wil_err(wil, "Xmit in monitor mode not supported\n");
goto drop;
}
@@ -1104,12 +1285,10 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* in STA mode (ESS), all to same VRING */
vring = wil_find_tx_vring_sta(wil, skb);
} else { /* direct communication, find matching VRING */
- if (is_unicast_ether_addr(eth->h_dest))
- vring = wil_find_tx_vring(wil, skb);
- else
- vring = wil_tx_bcast(wil, skb);
+ vring = bcast ? wil_find_tx_bcast(wil, skb) :
+ wil_find_tx_ucast(wil, skb);
}
- if (!vring) {
+ if (unlikely(!vring)) {
wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
goto drop;
}
@@ -1117,7 +1296,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
rc = wil_tx_vring(wil, vring, skb);
/* do we still have enough room in the vring? */
- if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring)) {
+ if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
netif_tx_stop_all_queues(wil_to_ndev(wil));
wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
}
@@ -1170,21 +1349,28 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
int done = 0;
int cid = wil->vring2cid_tid[ringid][0];
- struct wil_net_stats *stats = &wil->sta[cid].stats;
+ struct wil_net_stats *stats = NULL;
volatile struct vring_tx_desc *_d;
+ int used_before_complete;
+ int used_new;
- if (!vring->va) {
+ if (unlikely(!vring->va)) {
wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
return 0;
}
- if (!txdata->enabled) {
+ if (unlikely(!txdata->enabled)) {
wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
return 0;
}
wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
+ used_before_complete = wil_vring_used_tx(vring);
+
+ if (cid < WIL6210_MAX_CID)
+ stats = &wil->sta[cid].stats;
+
while (!wil_vring_is_empty(vring)) {
int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
@@ -1196,7 +1382,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
/* TODO: check we are not past head */
_d = &vring->va[lf].tx;
- if (!(_d->dma.status & TX_DMA_STATUS_DU))
+ if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
break;
new_swtail = (lf + 1) % vring->size;
@@ -1224,14 +1410,17 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
wil_txdesc_unmap(dev, d, ctx);
if (skb) {
- if (d->dma.error == 0) {
+ if (likely(d->dma.error == 0)) {
ndev->stats.tx_packets++;
- stats->tx_packets++;
ndev->stats.tx_bytes += skb->len;
- stats->tx_bytes += skb->len;
+ if (stats) {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ }
} else {
ndev->stats.tx_errors++;
- stats->tx_errors++;
+ if (stats)
+ stats->tx_errors++;
}
wil_consume_skb(skb, d->dma.error == 0);
}
@@ -1246,8 +1435,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
}
}
- if (wil_vring_is_empty(vring)) { /* performance monitoring */
- wil_dbg_txrx(wil, "Ring[%2d] empty\n", ringid);
+ /* performance monitoring */
+ used_new = wil_vring_used_tx(vring);
+ if (wil_val_in_range(vring_idle_trsh,
+ used_new, used_before_complete)) {
+ wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+ ringid, used_before_complete, used_new);
txdata->last_idle = get_cycles();
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 94611568fc9a..4310972c9e16 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -27,9 +27,12 @@ extern bool no_fw_recovery;
extern unsigned int mtu_max;
extern unsigned short rx_ring_overflow_thrsh;
extern int agg_wsize;
+extern u32 vring_idle_trsh;
+extern bool rx_align_2;
#define WIL_NAME "wil6210"
-#define WIL_FW_NAME "wil6210.fw"
+#define WIL_FW_NAME "wil6210.fw" /* code */
+#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
@@ -47,6 +50,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL_TX_Q_LEN_DEFAULT (4000)
#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10)
+#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7)
+#define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */
/* limit ring size in range [32..32k] */
#define WIL_RING_SIZE_ORDER_MIN (5)
#define WIL_RING_SIZE_ORDER_MAX (15)
@@ -120,6 +125,16 @@ struct RGF_ICR {
u32 IMC; /* Mask Clear, write 1 to clear */
} __packed;
+struct RGF_BL {
+ u32 ready; /* 0x880A3C bit [0] */
+#define BIT_BL_READY BIT(0)
+ u32 version; /* 0x880A40 version of the BL struct */
+ u32 rf_type; /* 0x880A44 ID of the connected RF */
+ u32 baseband_type; /* 0x880A48 ID of the baseband */
+ u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
+ u8 pad[2];
+} __packed;
+
/* registers - FW addresses */
#define RGF_USER_USAGE_1 (0x880004)
#define RGF_USER_USAGE_6 (0x880018)
@@ -130,6 +145,7 @@ struct RGF_ICR {
#define RGF_USER_MAC_CPU_0 (0x8801fc)
#define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */
#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
+#define RGF_USER_BL (0x880A3C) /* Boot Loader */
#define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */
#define RGF_USER_CLKS_CTL_0 (0x880abc)
#define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */
@@ -169,6 +185,13 @@ struct RGF_ICR {
#define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
#define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+/* Offload control (Sparrow B0+) */
+#define RGF_DMA_OFUL_NID_0 (0x881cd4)
+ #define BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN BIT(0)
+ #define BIT_DMA_OFUL_NID_0_TX_EXT_TR_EN BIT(1)
+ #define BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC BIT(2)
+ #define BIT_DMA_OFUL_NID_0_TX_EXT_A3_SRC BIT(3)
+
/* New (sparrow v2+) interrupt moderation control */
#define RGF_DMA_ITR_TX_DESQ_NO_MOD (0x881d40)
#define RGF_DMA_ITR_TX_CNT_TRSH (0x881d34)
@@ -229,16 +252,10 @@ struct RGF_ICR {
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
- #define JTAG_DEV_ID_MARLON_B0 (0x0612072f)
- #define JTAG_DEV_ID_SPARROW_A0 (0x0632072f)
- #define JTAG_DEV_ID_SPARROW_A1 (0x1632072f)
#define JTAG_DEV_ID_SPARROW_B0 (0x2632072f)
enum {
HW_VER_UNKNOWN,
- HW_VER_MARLON_B0, /* JTAG_DEV_ID_MARLON_B0 */
- HW_VER_SPARROW_A0, /* JTAG_DEV_ID_SPARROW_A0 */
- HW_VER_SPARROW_A1, /* JTAG_DEV_ID_SPARROW_A1 */
HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
};
@@ -482,8 +499,6 @@ enum {
};
enum {
- hw_capability_reset_v2 = 0,
- hw_capability_advanced_itr_moderation = 1,
hw_capability_last
};
@@ -528,8 +543,9 @@ struct wil6210_priv {
wait_queue_head_t wq; /* for all wait_event() use */
/* profile */
u32 monitor_flags;
- u32 secure_pcp; /* create secure PCP? */
+ u32 privacy; /* secure connection? */
int sinfo_gen;
+ u32 ap_isolate; /* no intra-BSS communication */
/* interrupt moderation */
u32 tx_max_burst_duration;
u32 tx_interframe_timeout;
@@ -581,6 +597,7 @@ struct wil6210_priv {
struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_sta_info sta[WIL6210_MAX_CID];
+ int bcast_vring;
/* scan */
struct cfg80211_scan_request *scan_request;
@@ -658,7 +675,7 @@ int wil_if_add(struct wil6210_priv *wil);
void wil_if_remove(struct wil6210_priv *wil);
int wil_priv_init(struct wil6210_priv *wil);
void wil_priv_deinit(struct wil6210_priv *wil);
-int wil_reset(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil, bool no_fw);
void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_set_recovery_state(struct wil6210_priv *wil, int state);
int wil_up(struct wil6210_priv *wil);
@@ -743,6 +760,9 @@ void wil_rx_fini(struct wil6210_priv *wil);
int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
int cid, int tid);
void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size);
+int wil_bcast_init(struct wil6210_priv *wil);
+void wil_bcast_fini(struct wil6210_priv *wil);
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int wil_tx_complete(struct wil6210_priv *wil, int ringid);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 0f3e4334c8e3..9fe2085be2c5 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -281,7 +281,6 @@ int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
/*=== Event handlers ===*/
static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
{
- struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
struct wmi_ready_event *evt = d;
@@ -290,11 +289,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
evt->mac, wil->n_mids);
-
- if (!is_valid_ether_addr(ndev->dev_addr)) {
- memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
- memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
- }
+ /* ignore MAC address, we already have it from the boot loader */
snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
"%d", wil->fw_version);
}
@@ -471,7 +466,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
/* FIXME FW can transmit only ucast frames to peer */
/* FIXME real ring_id instead of hard coded 0 */
- memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN);
+ ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
wil->sta[evt->cid].status = wil_sta_conn_pending;
wil->pending_connect_cid = evt->cid;
@@ -529,8 +524,8 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
}
eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
- memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
- memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
+ ether_addr_copy(eth->h_dest, ndev->dev_addr);
+ ether_addr_copy(eth->h_source, evt->src_mac);
eth->h_proto = cpu_to_be16(ETH_P_PAE);
memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
skb->protocol = eth_type_trans(skb, ndev);
@@ -856,7 +851,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
{
struct wmi_set_mac_address_cmd cmd;
- memcpy(cmd.mac, addr, ETH_ALEN);
+ ether_addr_copy(cmd.mac, addr);
wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
@@ -879,7 +874,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
struct wmi_pcp_started_event evt;
} __packed reply;
- if (!wil->secure_pcp)
+ if (!wil->privacy)
cmd.disable_sec = 1;
if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) ||
@@ -1114,6 +1109,11 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
*/
cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
}
+
+ if (rx_align_2)
+ cmd.l2_802_3_offload_ctrl |=
+ L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK;
+
/* typical time for secure PCP is 840ms */
rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
@@ -1162,7 +1162,8 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
struct wmi_disconnect_sta_cmd cmd = {
.disconnect_reason = cpu_to_le16(reason),
};
- memcpy(cmd.dst_mac, mac, ETH_ALEN);
+
+ ether_addr_copy(cmd.dst_mac, mac);
wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 8a4af613e191..b29055315350 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -70,7 +70,6 @@ enum wmi_command_id {
WMI_SET_UCODE_IDLE_CMDID = 0x0813,
WMI_SET_WORK_MODE_CMDID = 0x0815,
WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
- WMI_MARLON_R_ACTIVATE_CMDID = 0x0817,
WMI_MARLON_R_READ_CMDID = 0x0818,
WMI_MARLON_R_WRITE_CMDID = 0x0819,
WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
@@ -80,6 +79,7 @@ enum wmi_command_id {
WMI_RF_RX_TEST_CMDID = 0x081e,
WMI_CFG_RX_CHAIN_CMDID = 0x0820,
WMI_VRING_CFG_CMDID = 0x0821,
+ WMI_BCAST_VRING_CFG_CMDID = 0x0822,
WMI_VRING_BA_EN_CMDID = 0x0823,
WMI_VRING_BA_DIS_CMDID = 0x0824,
WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
@@ -99,6 +99,7 @@ enum wmi_command_id {
WMI_BF_TXSS_MGMT_CMDID = 0x0837,
WMI_BF_SM_MGMT_CMDID = 0x0838,
WMI_BF_RXSS_MGMT_CMDID = 0x0839,
+ WMI_BF_TRIG_CMDID = 0x083A,
WMI_SET_SECTORS_CMDID = 0x0849,
WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
WMI_MAINTAIN_RESUME_CMDID = 0x0851,
@@ -596,6 +597,22 @@ struct wmi_vring_cfg_cmd {
} __packed;
/*
+ * WMI_BCAST_VRING_CFG_CMDID
+ */
+struct wmi_bcast_vring_cfg {
+ struct wmi_sw_ring_cfg tx_sw_ring;
+ u8 ringid; /* 0-23 vrings */
+ u8 encap_trans_type;
+ u8 ds_cfg; /* 802.3 DS cfg */
+ u8 nwifi_ds_trans_type;
+} __packed;
+
+struct wmi_bcast_vring_cfg_cmd {
+ __le32 action;
+ struct wmi_bcast_vring_cfg vring_cfg;
+} __packed;
+
+/*
* WMI_VRING_BA_EN_CMDID
*/
struct wmi_vring_ba_en_cmd {
@@ -687,6 +704,9 @@ struct wmi_cfg_rx_chain_cmd {
#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+ #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
+ #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
+ #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
u8 l2_802_3_offload_ctrl;
#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
@@ -841,7 +861,6 @@ enum wmi_event_id {
WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
- WMI_MARLON_R_ACTIVATE_DONE_EVENTID = 0x1817,
WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 55db9f03eb2a..6a1f03c271c1 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1004,7 +1004,7 @@ static void frag_rx_path(struct atmel_private *priv,
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
- memset(priv->frag_source, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->frag_source);
}
}
@@ -1022,7 +1022,7 @@ static void frag_rx_path(struct atmel_private *priv,
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
- memset(priv->frag_source, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->frag_source);
more_frags = 1; /* don't send broken assembly */
}
}
@@ -1031,7 +1031,7 @@ static void frag_rx_path(struct atmel_private *priv,
priv->frag_no++;
if (!more_frags) { /* last one */
- memset(priv->frag_source, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->frag_source);
if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
priv->dev->stats.rx_dropped++;
} else {
@@ -1127,7 +1127,7 @@ static void rx_done_irq(struct atmel_private *priv)
atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
/* we use the same buffer for frag reassembly and control packets */
- memset(priv->frag_source, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->frag_source);
if (priv->do_rx_crc) {
/* last 4 octets is crc */
@@ -1379,7 +1379,7 @@ static int atmel_close(struct net_device *dev)
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ eth_zero_addr(wrqu.ap_addr.sa_data);
wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
}
@@ -1555,7 +1555,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->last_qual = jiffies;
priv->last_beacon_timestamp = 0;
memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
- memset(priv->BSSID, 0, ETH_ALEN);
+ eth_zero_addr(priv->BSSID);
priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
priv->station_was_associated = 0;
@@ -2760,7 +2760,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
u8 SSID_size;
} cmd;
- memset(cmd.BSSID, 0xff, ETH_ALEN);
+ eth_broadcast_addr(cmd.BSSID);
if (priv->fast_scan) {
cmd.SSID_size = priv->SSID_size;
@@ -4049,7 +4049,7 @@ static int reset_atmel_card(struct net_device *dev)
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ eth_zero_addr(wrqu.ap_addr.sa_data);
wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
}
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 1d7982afc0ad..6837064908be 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -553,7 +553,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
size_t buffersize, bool dma_to_device)
{
if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
- return 1;
+ return true;
switch (ring->type) {
case B43_DMA_30BIT:
@@ -571,13 +571,13 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
}
/* The address is OK. */
- return 0;
+ return false;
address_error:
/* We can't support this address. Unmap it again. */
unmap_descbuffer(ring, addr, buffersize, dma_to_device);
- return 1;
+ return true;
}
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -1099,16 +1099,16 @@ static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
enum b43_dmatype type)
{
if (type != B43_DMA_64BIT)
- return 1;
+ return true;
#ifdef CONFIG_B43_SSB
if (dev->dev->bus_type == B43_BUS_SSB &&
dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
!(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
- return 1;
+ return true;
#endif
- return 0;
+ return false;
}
int b43_dma_init(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 75345c1e8c34..b2f9521fe551 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4132,7 +4132,7 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
if (conf->bssid)
memcpy(wl->bssid, conf->bssid, ETH_ALEN);
else
- memset(wl->bssid, 0, ETH_ALEN);
+ eth_zero_addr(wl->bssid);
}
if (b43_status(dev) >= B43_STAT_INITIALIZED) {
@@ -4819,7 +4819,7 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
- bcma_core_pci_down(dev->dev->bdev->bus);
+ bcma_host_pci_down(dev->dev->bdev->bus);
break;
#endif
#ifdef CONFIG_B43_SSB
@@ -4866,9 +4866,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
- bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
+ bcma_host_pci_irq_ctl(dev->dev->bdev->bus,
dev->dev->bdev, true);
- bcma_core_pci_up(dev->dev->bdev->bus);
+ bcma_host_pci_up(dev->dev->bdev->bus);
break;
#endif
#ifdef CONFIG_B43_SSB
@@ -5051,7 +5051,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
wl->operating = false;
b43_adjust_opmode(dev);
- memset(wl->mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(wl->mac_addr);
b43_upload_card_macaddress(dev);
mutex_unlock(&wl->mutex);
@@ -5067,8 +5067,8 @@ static int b43_op_start(struct ieee80211_hw *hw)
/* Kill all old instance specific information to make sure
* the card won't use it in the short timeframe between start
* and mac80211 reconfiguring it. */
- memset(wl->bssid, 0, ETH_ALEN);
- memset(wl->mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(wl->bssid);
+ eth_zero_addr(wl->mac_addr);
wl->filter_flags = 0;
wl->radiotap_enabled = false;
b43_qos_clear(wl);
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index b2ed1795130b..f9dd892b9f27 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -427,7 +427,7 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
bool dma_to_device)
{
if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
- return 1;
+ return true;
switch (ring->type) {
case B43legacy_DMA_30BIT:
@@ -441,13 +441,13 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
}
/* The address is OK. */
- return 0;
+ return false;
address_error:
/* We can't support this address. Unmap it again. */
unmap_descbuffer(ring, addr, buffersize, dma_to_device);
- return 1;
+ return true;
}
static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4e58c0069830..c77b7f59505c 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2866,7 +2866,7 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
if (conf->bssid)
memcpy(wl->bssid, conf->bssid, ETH_ALEN);
else
- memset(wl->bssid, 0, ETH_ALEN);
+ eth_zero_addr(wl->bssid);
}
if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
@@ -3470,7 +3470,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
- memset(wl->mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(wl->mac_addr);
b43legacy_upload_card_macaddress(dev);
spin_unlock_irqrestore(&wl->irq_lock, flags);
@@ -3487,8 +3487,8 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
/* Kill all old instance specific information to make sure
* the card won't use it in the short timeframe between start
* and mac80211 reconfiguring it. */
- memset(wl->bssid, 0, ETH_ALEN);
- memset(wl->mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(wl->bssid);
+ eth_zero_addr(wl->mac_addr);
wl->filter_flags = 0;
wl->beacon0_uploaded = false;
wl->beacon1_uploaded = false;
diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
index c4559bcbc707..7c1bdbc02569 100644
--- a/drivers/net/wireless/b43legacy/rfkill.c
+++ b/drivers/net/wireless/b43legacy/rfkill.c
@@ -32,7 +32,7 @@ bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
if (dev->dev->id.revision >= 3) {
if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI)
& B43legacy_MMIO_RADIO_HWENABLED_HI_MASK))
- return 1;
+ return true;
} else {
/* To prevent CPU fault on PPC, do not read a register
* unless the interface is started; however, on resume
@@ -40,12 +40,12 @@ bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
* that happens, unconditionally return TRUE.
*/
if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
- return 1;
+ return true;
if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO)
& B43legacy_MMIO_RADIO_HWENABLED_LO_MASK)
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/* The poll callback for the hardware button. */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 7944224e3fc9..9b508bd3b839 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -29,6 +29,7 @@
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -58,6 +59,14 @@
#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
#define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
+struct brcmf_sdiod_freezer {
+ atomic_t freezing;
+ atomic_t thread_count;
+ u32 frozen_count;
+ wait_queue_head_t thread_freeze;
+ struct completion resumed;
+};
+
static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
@@ -197,6 +206,30 @@ int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
return 0;
}
+void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
+ enum brcmf_sdiod_state state)
+{
+ if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
+ state == sdiodev->state)
+ return;
+
+ brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
+ switch (sdiodev->state) {
+ case BRCMF_SDIOD_DATA:
+ /* any other state means bus interface is down */
+ brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
+ break;
+ case BRCMF_SDIOD_DOWN:
+ /* transition from DOWN to DATA means bus interface is up */
+ if (state == BRCMF_SDIOD_DATA)
+ brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
+ break;
+ default:
+ break;
+ }
+ sdiodev->state = state;
+}
+
static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
uint regaddr, u8 byte)
{
@@ -269,12 +302,6 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
return ret;
}
-static void brcmf_sdiod_nomedium_state(struct brcmf_sdio_dev *sdiodev)
-{
- sdiodev->state = BRCMF_STATE_NOMEDIUM;
- brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
-}
-
static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 regsz, void *data, bool write)
{
@@ -282,7 +309,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
s32 retry = 0;
int ret;
- if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
+ if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
return -ENOMEDIUM;
/*
@@ -308,7 +335,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
if (ret == -ENOMEDIUM)
- brcmf_sdiod_nomedium_state(sdiodev);
+ brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
else if (ret != 0) {
/*
* SleepCSR register access can fail when
@@ -331,7 +358,7 @@ brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
int err = 0, i;
u8 addr[3];
- if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
+ if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
return -ENOMEDIUM;
addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
@@ -460,7 +487,7 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
req_sz);
if (err == -ENOMEDIUM)
- brcmf_sdiod_nomedium_state(sdiodev);
+ brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
return err;
}
@@ -595,7 +622,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret == -ENOMEDIUM) {
- brcmf_sdiod_nomedium_state(sdiodev);
+ brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
break;
} else if (ret != 0) {
brcmf_err("CMD53 sg block %s failed %d\n",
@@ -877,6 +904,87 @@ static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
sdiodev->txglomsz = brcmf_sdiod_txglomsz;
}
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
+{
+ sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
+ if (!sdiodev->freezer)
+ return -ENOMEM;
+ atomic_set(&sdiodev->freezer->thread_count, 0);
+ atomic_set(&sdiodev->freezer->freezing, 0);
+ init_waitqueue_head(&sdiodev->freezer->thread_freeze);
+ init_completion(&sdiodev->freezer->resumed);
+ return 0;
+}
+
+static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
+{
+ if (sdiodev->freezer) {
+ WARN_ON(atomic_read(&sdiodev->freezer->freezing));
+ kfree(sdiodev->freezer);
+ }
+}
+
+static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
+{
+ atomic_t *expect = &sdiodev->freezer->thread_count;
+ int res = 0;
+
+ sdiodev->freezer->frozen_count = 0;
+ reinit_completion(&sdiodev->freezer->resumed);
+ atomic_set(&sdiodev->freezer->freezing, 1);
+ brcmf_sdio_trigger_dpc(sdiodev->bus);
+ wait_event(sdiodev->freezer->thread_freeze,
+ atomic_read(expect) == sdiodev->freezer->frozen_count);
+ sdio_claim_host(sdiodev->func[1]);
+ res = brcmf_sdio_sleep(sdiodev->bus, true);
+ sdio_release_host(sdiodev->func[1]);
+ return res;
+}
+
+static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
+{
+ sdio_claim_host(sdiodev->func[1]);
+ brcmf_sdio_sleep(sdiodev->bus, false);
+ sdio_release_host(sdiodev->func[1]);
+ atomic_set(&sdiodev->freezer->freezing, 0);
+ complete_all(&sdiodev->freezer->resumed);
+}
+
+bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
+{
+ return atomic_read(&sdiodev->freezer->freezing);
+}
+
+void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
+{
+ if (!brcmf_sdiod_freezing(sdiodev))
+ return;
+ sdiodev->freezer->frozen_count++;
+ wake_up(&sdiodev->freezer->thread_freeze);
+ wait_for_completion(&sdiodev->freezer->resumed);
+}
+
+void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
+{
+ atomic_inc(&sdiodev->freezer->thread_count);
+}
+
+void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
+{
+ atomic_dec(&sdiodev->freezer->thread_count);
+}
+#else
+static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
+{
+ return 0;
+}
+
+static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_PM_SLEEP */
+
static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{
if (sdiodev->bus) {
@@ -884,6 +992,8 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
sdiodev->bus = NULL;
}
+ brcmf_sdiod_freezer_detach(sdiodev);
+
/* Disable Function 2 */
sdio_claim_host(sdiodev->func[2]);
sdio_disable_func(sdiodev->func[2]);
@@ -897,6 +1007,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
sg_free_table(&sdiodev->sgtable);
sdiodev->sbwad = 0;
+ pm_runtime_allow(sdiodev->func[1]->card->host->parent);
return 0;
}
@@ -955,13 +1066,17 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
*/
brcmf_sdiod_sgtable_alloc(sdiodev);
+ ret = brcmf_sdiod_freezer_attach(sdiodev);
+ if (ret)
+ goto out;
+
/* try to attach to the target device */
sdiodev->bus = brcmf_sdio_probe(sdiodev);
if (!sdiodev->bus) {
ret = -ENODEV;
goto out;
}
-
+ pm_runtime_forbid(host->parent);
out:
if (ret)
brcmf_sdiod_remove(sdiodev);
@@ -983,6 +1098,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
{ /* end: all zeroes */ }
};
@@ -1050,9 +1167,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
bus_if->wowl_supported = true;
#endif
- sdiodev->sleeping = false;
- atomic_set(&sdiodev->suspend, false);
- init_waitqueue_head(&sdiodev->idle_wait);
+ brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
err = brcmf_sdiod_probe(sdiodev);
@@ -1083,7 +1198,7 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
brcmf_dbg(SDIO, "Function: %d\n", func->num);
- if (func->num != 1 && func->num != 2)
+ if (func->num != 1)
return;
bus_if = dev_get_drvdata(&func->dev);
@@ -1114,24 +1229,22 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
#ifdef CONFIG_PM_SLEEP
static int brcmf_ops_sdio_suspend(struct device *dev)
{
+ struct sdio_func *func;
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
mmc_pm_flag_t sdio_flags;
- brcmf_dbg(SDIO, "Enter\n");
+ func = container_of(dev, struct sdio_func, dev);
+ brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+ if (func->num != SDIO_FUNC_1)
+ return 0;
+
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- /* wait for watchdog to go idle */
- if (wait_event_timeout(sdiodev->idle_wait, sdiodev->sleeping,
- msecs_to_jiffies(3 * BRCMF_WD_POLL_MS)) == 0) {
- brcmf_err("bus still active\n");
- return -EBUSY;
- }
- /* disable watchdog */
+ brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
- atomic_set(&sdiodev->suspend, true);
if (sdiodev->wowl_enabled) {
sdio_flags = MMC_PM_KEEP_POWER;
@@ -1149,12 +1262,13 @@ static int brcmf_ops_sdio_resume(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct sdio_func *func = container_of(dev, struct sdio_func, dev);
- brcmf_dbg(SDIO, "Enter\n");
- if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
- disable_irq_wake(sdiodev->pdata->oob_irq_nr);
- brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
- atomic_set(&sdiodev->suspend, false);
+ brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+ if (func->num != SDIO_FUNC_2)
+ return 0;
+
+ brcmf_sdiod_freezer_off(sdiodev);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index b59b8c6c42ab..8a15ebbce4a3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -625,6 +625,7 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params)
@@ -648,7 +649,7 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
- wdev = brcmf_p2p_add_vif(wiphy, name, type, flags, params);
+ wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params);
if (!IS_ERR(wdev))
brcmf_cfg80211_update_proto_addr_mode(wdev);
return wdev;
@@ -700,7 +701,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
/* Do a scan abort to stop the driver's scan engine */
brcmf_dbg(SCAN, "ABORT scan in firmware\n");
memset(&params_le, 0, sizeof(params_le));
- memset(params_le.bssid, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(params_le.bssid);
params_le.bss_type = DOT11_BSSTYPE_ANY;
params_le.scan_type = 0;
params_le.channel_num = cpu_to_le32(1);
@@ -866,7 +867,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
char *ptr;
struct brcmf_ssid_le ssid_le;
- memset(params_le->bssid, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(params_le->bssid);
params_le->bss_type = DOT11_BSSTYPE_ANY;
params_le->scan_type = 0;
params_le->channel_num = 0;
@@ -1050,10 +1051,6 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
- /* Arm scan timeout timer */
- mod_timer(&cfg->escan_timeout, jiffies +
- WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
-
escan_req = false;
if (request) {
/* scan bss */
@@ -1112,12 +1109,14 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
}
}
+ /* Arm scan timeout timer */
+ mod_timer(&cfg->escan_timeout, jiffies +
+ WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
+
return 0;
scan_out:
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
- if (timer_pending(&cfg->escan_timeout))
- del_timer_sync(&cfg->escan_timeout);
cfg->scan_request = NULL;
return err;
}
@@ -1375,8 +1374,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
BRCMF_ASSOC_PARAMS_FIXED_SIZE;
memcpy(profile->bssid, params->bssid, ETH_ALEN);
} else {
- memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
- memset(profile->bssid, 0, ETH_ALEN);
+ eth_broadcast_addr(join_params.params_le.bssid);
+ eth_zero_addr(profile->bssid);
}
/* Channel */
@@ -1850,7 +1849,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
if (sme->bssid)
memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN);
else
- memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(ext_join_params->assoc_le.bssid);
if (cfg->channel) {
ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1);
@@ -1895,7 +1894,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
if (sme->bssid)
memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN);
else
- memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(join_params.params_le.bssid);
if (cfg->channel) {
join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec);
@@ -2252,7 +2251,6 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
/* we ignore this key index in this case */
- brcmf_err("invalid key index (%d)\n", key_idx);
return -EINVAL;
}
@@ -4272,7 +4270,7 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
return -EIO;
memcpy(&scbval.ea, params->mac, ETH_ALEN);
- scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
+ scbval.val = cpu_to_le32(params->reason_code);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
&scbval, sizeof(scbval));
if (err)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index 04d2ca0d87d6..ab2fac8b2760 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -100,9 +100,6 @@
#define BCM4329_CORE_SOCRAM_BASE 0x18003000
/* ARM Cortex M3 core, ID 0x82a */
#define BCM4329_CORE_ARM_BASE 0x18002000
-#define BCM4329_RAMSIZE 0x48000
-/* bcm43143 */
-#define BCM43143_RAMSIZE 0x70000
#define CORE_SB(base, field) \
(base + SBCONFIGOFF + offsetof(struct sbconfig, field))
@@ -150,6 +147,78 @@ struct sbconfig {
u32 sbidhigh; /* identification */
};
+/* bankidx and bankinfo reg defines corerev >= 8 */
+#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000
+#define SOCRAM_BANKINFO_SZMASK 0x0000007f
+#define SOCRAM_BANKIDX_ROM_MASK 0x00000100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8
+/* socram bankinfo memtype */
+#define SOCRAM_MEMTYPE_RAM 0
+#define SOCRAM_MEMTYPE_R0M 1
+#define SOCRAM_MEMTYPE_DEVRAM 2
+
+#define SOCRAM_BANKINFO_SZBASE 8192
+#define SRCI_LSS_MASK 0x00f00000
+#define SRCI_LSS_SHIFT 20
+#define SRCI_SRNB_MASK 0xf0
+#define SRCI_SRNB_SHIFT 4
+#define SRCI_SRBSZ_MASK 0xf
+#define SRCI_SRBSZ_SHIFT 0
+#define SR_BSZ_BASE 14
+
+struct sbsocramregs {
+ u32 coreinfo;
+ u32 bwalloc;
+ u32 extracoreinfo;
+ u32 biststat;
+ u32 bankidx;
+ u32 standbyctrl;
+
+ u32 errlogstatus; /* rev 6 */
+ u32 errlogaddr; /* rev 6 */
+ /* used for patching rev 3 & 5 */
+ u32 cambankidx;
+ u32 cambankstandbyctrl;
+ u32 cambankpatchctrl;
+ u32 cambankpatchtblbaseaddr;
+ u32 cambankcmdreg;
+ u32 cambankdatareg;
+ u32 cambankmaskreg;
+ u32 PAD[1];
+ u32 bankinfo; /* corev 8 */
+ u32 bankpda;
+ u32 PAD[14];
+ u32 extmemconfig;
+ u32 extmemparitycsr;
+ u32 extmemparityerrdata;
+ u32 extmemparityerrcnt;
+ u32 extmemwrctrlandsize;
+ u32 PAD[84];
+ u32 workaround;
+ u32 pwrctl; /* corerev >= 2 */
+ u32 PAD[133];
+ u32 sr_control; /* corerev >= 15 */
+ u32 sr_status; /* corerev >= 15 */
+ u32 sr_address; /* corerev >= 15 */
+ u32 sr_data; /* corerev >= 15 */
+};
+
+#define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f)
+
+#define ARMCR4_CAP (0x04)
+#define ARMCR4_BANKIDX (0x40)
+#define ARMCR4_BANKINFO (0x44)
+#define ARMCR4_BANKPDA (0x4C)
+
+#define ARMCR4_TCBBNB_MASK 0xf0
+#define ARMCR4_TCBBNB_SHIFT 4
+#define ARMCR4_TCBANB_MASK 0xf
+#define ARMCR4_TCBANB_SHIFT 0
+
+#define ARMCR4_BSZ_MASK 0x3f
+#define ARMCR4_BSZ_MULT 8192
+
struct brcmf_core_priv {
struct brcmf_core pub;
u32 wrapbase;
@@ -419,13 +488,13 @@ static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
return &core->pub;
}
-#ifdef DEBUG
/* safety check for chipinfo */
static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
{
struct brcmf_core_priv *core;
bool need_socram = false;
bool has_socram = false;
+ bool cpu_found = false;
int idx = 1;
list_for_each_entry(core, &ci->cores, list) {
@@ -435,22 +504,24 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
switch (core->pub.id) {
case BCMA_CORE_ARM_CM3:
+ cpu_found = true;
need_socram = true;
break;
case BCMA_CORE_INTERNAL_MEM:
has_socram = true;
break;
case BCMA_CORE_ARM_CR4:
- if (ci->pub.rambase == 0) {
- brcmf_err("RAM base not provided with ARM CR4 core\n");
- return -ENOMEM;
- }
+ cpu_found = true;
break;
default:
break;
}
}
+ if (!cpu_found) {
+ brcmf_err("CPU core not detected\n");
+ return -ENXIO;
+ }
/* check RAM core presence for ARM CM3 core */
if (need_socram && !has_socram) {
brcmf_err("RAM core not provided with ARM CM3 core\n");
@@ -458,56 +529,164 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
}
return 0;
}
-#else /* DEBUG */
-static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+
+static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg)
{
- return 0;
+ return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg);
}
-#endif
-static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+static void brcmf_chip_core_write32(struct brcmf_core_priv *core,
+ u16 reg, u32 val)
{
- switch (ci->pub.chip) {
- case BRCM_CC_4329_CHIP_ID:
- ci->pub.ramsize = BCM4329_RAMSIZE;
- break;
- case BRCM_CC_43143_CHIP_ID:
- ci->pub.ramsize = BCM43143_RAMSIZE;
- break;
- case BRCM_CC_43241_CHIP_ID:
- ci->pub.ramsize = 0x90000;
- break;
- case BRCM_CC_4330_CHIP_ID:
- ci->pub.ramsize = 0x48000;
- break;
+ core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val);
+}
+
+static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx,
+ u32 *banksize)
+{
+ u32 bankinfo;
+ u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+ bankidx |= idx;
+ brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx);
+ bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo));
+ *banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1;
+ *banksize *= SOCRAM_BANKINFO_SZBASE;
+ return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK);
+}
+
+static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
+ u32 *srsize)
+{
+ u32 coreinfo;
+ uint nb, banksize, lss;
+ bool retent;
+ int i;
+
+ *ramsize = 0;
+ *srsize = 0;
+
+ if (WARN_ON(sr->pub.rev < 4))
+ return;
+
+ if (!brcmf_chip_iscoreup(&sr->pub))
+ brcmf_chip_resetcore(&sr->pub, 0, 0, 0);
+
+ /* Get info for determining size */
+ coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo));
+ nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+
+ if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) {
+ banksize = (coreinfo & SRCI_SRBSZ_MASK);
+ lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+ if (lss != 0)
+ nb--;
+ *ramsize = nb * (1 << (banksize + SR_BSZ_BASE));
+ if (lss != 0)
+ *ramsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+ } else {
+ nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ for (i = 0; i < nb; i++) {
+ retent = brcmf_chip_socram_banksize(sr, i, &banksize);
+ *ramsize += banksize;
+ if (retent)
+ *srsize += banksize;
+ }
+ }
+
+ /* hardcoded save&restore memory sizes */
+ switch (sr->chip->pub.chip) {
case BRCM_CC_4334_CHIP_ID:
- case BRCM_CC_43340_CHIP_ID:
- ci->pub.ramsize = 0x80000;
+ if (sr->chip->pub.chiprev < 2)
+ *srsize = (32 * 1024);
break;
- case BRCM_CC_4335_CHIP_ID:
- ci->pub.ramsize = 0xc0000;
- ci->pub.rambase = 0x180000;
+ case BRCM_CC_43430_CHIP_ID:
+ /* assume sr for now as we can not check
+ * firmware sr capability at this point.
+ */
+ *srsize = (64 * 1024);
break;
- case BRCM_CC_43362_CHIP_ID:
- ci->pub.ramsize = 0x3c000;
+ default:
break;
+ }
+}
+
+/** Return the TCM-RAM size of the ARMCR4 core. */
+static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
+{
+ u32 corecap;
+ u32 memsize = 0;
+ u32 nab;
+ u32 nbb;
+ u32 totb;
+ u32 bxinfo;
+ u32 idx;
+
+ corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP);
+
+ nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
+ nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
+ totb = nab + nbb;
+
+ for (idx = 0; idx < totb; idx++) {
+ brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx);
+ bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO);
+ memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+ }
+
+ return memsize;
+}
+
+static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
+{
+ switch (ci->pub.chip) {
+ case BRCM_CC_4345_CHIP_ID:
+ return 0x198000;
+ case BRCM_CC_4335_CHIP_ID:
case BRCM_CC_4339_CHIP_ID:
case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID:
case BRCM_CC_43567_CHIP_ID:
case BRCM_CC_43569_CHIP_ID:
case BRCM_CC_43570_CHIP_ID:
- ci->pub.ramsize = 0xc0000;
- ci->pub.rambase = 0x180000;
- break;
case BRCM_CC_43602_CHIP_ID:
- ci->pub.ramsize = 0xf0000;
- ci->pub.rambase = 0x180000;
- break;
+ return 0x180000;
default:
brcmf_err("unknown chip: %s\n", ci->pub.name);
break;
}
+ return 0;
+}
+
+static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core_priv *mem_core;
+ struct brcmf_core *mem;
+
+ mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4);
+ if (mem) {
+ mem_core = container_of(mem, struct brcmf_core_priv, pub);
+ ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core);
+ ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
+ if (!ci->pub.rambase) {
+ brcmf_err("RAM base not provided with ARM CR4 core\n");
+ return -EINVAL;
+ }
+ } else {
+ mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_INTERNAL_MEM);
+ mem_core = container_of(mem, struct brcmf_core_priv, pub);
+ brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
+ &ci->pub.srsize);
+ }
+ brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n",
+ ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize,
+ ci->pub.srsize, ci->pub.srsize);
+
+ if (!ci->pub.ramsize) {
+ brcmf_err("RAM size is undetermined\n");
+ return -ENOMEM;
+ }
+ return 0;
}
static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
@@ -660,6 +839,7 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
struct brcmf_core *core;
u32 regdata;
u32 socitype;
+ int ret;
/* Get CC core rev
* Chipid is assume to be at offset 0 from SI_ENUM_BASE
@@ -712,9 +892,13 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
return -ENODEV;
}
- brcmf_chip_get_raminfo(ci);
+ ret = brcmf_chip_cores_check(ci);
+ if (ret)
+ return ret;
- return brcmf_chip_cores_check(ci);
+ /* assure chip is passive for core access */
+ brcmf_chip_set_passive(&ci->pub);
+ return brcmf_chip_get_raminfo(ci);
}
static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
@@ -778,12 +962,6 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
if (chip->ops->setup)
ret = chip->ops->setup(chip->ctx, pub);
- /*
- * Make sure any on-chip ARM is off (in case strapping is wrong),
- * or downloaded code was already running.
- */
- brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
- brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
return ret;
}
@@ -799,7 +977,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
err = -EINVAL;
if (WARN_ON(!ops->prepare))
err = -EINVAL;
- if (WARN_ON(!ops->exit_dl))
+ if (WARN_ON(!ops->activate))
err = -EINVAL;
if (err < 0)
return ERR_PTR(-EINVAL);
@@ -897,9 +1075,10 @@ void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
}
static void
-brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
+brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
{
struct brcmf_core *core;
+ struct brcmf_core_priv *sr;
brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
@@ -909,9 +1088,16 @@ brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
D11_BCMA_IOCTL_PHYCLOCKEN);
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
brcmf_chip_resetcore(core, 0, 0, 0);
+
+ /* disable bank #3 remap for this device */
+ if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
+ sr = container_of(core, struct brcmf_core_priv, pub);
+ brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
+ brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
+ }
}
-static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
+static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip)
{
struct brcmf_core *core;
@@ -921,7 +1107,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
return false;
}
- chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
+ chip->ops->activate(chip->ctx, &chip->pub, 0);
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
brcmf_chip_resetcore(core, 0, 0, 0);
@@ -930,7 +1116,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
}
static inline void
-brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
+brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip)
{
struct brcmf_core *core;
@@ -943,11 +1129,11 @@ brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
D11_BCMA_IOCTL_PHYCLOCKEN);
}
-static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
+static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
{
struct brcmf_core *core;
- chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
+ chip->ops->activate(chip->ctx, &chip->pub, rstvec);
/* restore ARM */
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
@@ -956,7 +1142,7 @@ static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
return true;
}
-void brcmf_chip_enter_download(struct brcmf_chip *pub)
+void brcmf_chip_set_passive(struct brcmf_chip *pub)
{
struct brcmf_chip_priv *chip;
struct brcmf_core *arm;
@@ -966,14 +1152,14 @@ void brcmf_chip_enter_download(struct brcmf_chip *pub)
chip = container_of(pub, struct brcmf_chip_priv, pub);
arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
if (arm) {
- brcmf_chip_cr4_enterdl(chip);
+ brcmf_chip_cr4_set_passive(chip);
return;
}
- brcmf_chip_cm3_enterdl(chip);
+ brcmf_chip_cm3_set_passive(chip);
}
-bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
+bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
{
struct brcmf_chip_priv *chip;
struct brcmf_core *arm;
@@ -983,9 +1169,9 @@ bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
chip = container_of(pub, struct brcmf_chip_priv, pub);
arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
if (arm)
- return brcmf_chip_cr4_exitdl(chip, rstvec);
+ return brcmf_chip_cr4_set_active(chip, rstvec);
- return brcmf_chip_cm3_exitdl(chip);
+ return brcmf_chip_cm3_set_active(chip);
}
bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
@@ -1016,6 +1202,10 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
addr = CORE_CC_REG(base, chipcontrol_data);
reg = chip->ops->read32(chip->ctx, addr);
return (reg & pmu_cc3_mask) != 0;
+ case BRCM_CC_43430_CHIP_ID:
+ addr = CORE_CC_REG(base, sr_control1);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return reg != 0;
default:
addr = CORE_CC_REG(base, pmucapabilities_ext);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
index c32908da90c8..60dcb38fc77a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
@@ -30,7 +30,8 @@
* @pmucaps: PMU capabilities.
* @pmurev: PMU revision.
* @rambase: RAM base address (only applicable for ARM CR4 chips).
- * @ramsize: amount of RAM on chip.
+ * @ramsize: amount of RAM on chip including retention.
+ * @srsize: amount of retention RAM on chip.
* @name: string representation of the chip identifier.
*/
struct brcmf_chip {
@@ -41,6 +42,7 @@ struct brcmf_chip {
u32 pmurev;
u32 rambase;
u32 ramsize;
+ u32 srsize;
char name[8];
};
@@ -64,7 +66,7 @@ struct brcmf_core {
* @write32: write 32-bit value over bus.
* @prepare: prepare bus for core configuration.
* @setup: bus-specific core setup.
- * @exit_dl: exit download state.
+ * @active: chip becomes active.
* The callback should use the provided @rstvec when non-zero.
*/
struct brcmf_buscore_ops {
@@ -72,7 +74,7 @@ struct brcmf_buscore_ops {
void (*write32)(void *ctx, u32 addr, u32 value);
int (*prepare)(void *ctx);
int (*setup)(void *ctx, struct brcmf_chip *chip);
- void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
+ void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
};
struct brcmf_chip *brcmf_chip_attach(void *ctx,
@@ -84,8 +86,8 @@ bool brcmf_chip_iscoreup(struct brcmf_core *core);
void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
u32 postreset);
-void brcmf_chip_enter_download(struct brcmf_chip *ci);
-bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
+void brcmf_chip_set_passive(struct brcmf_chip *ci);
+bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec);
bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
#endif /* BRCMF_AXIDMP_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index 2d6e2cc1b12c..f8f47dcfa886 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -944,6 +944,34 @@ fail:
return ret;
}
+static int brcmf_revinfo_read(struct seq_file *s, void *data)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
+ struct brcmf_rev_info *ri = &bus_if->drvr->revinfo;
+ char drev[BRCMU_DOTREV_LEN];
+ char brev[BRCMU_BOARDREV_LEN];
+
+ seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid);
+ seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid);
+ seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev));
+ seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum);
+ seq_printf(s, "chiprev: %u\n", ri->chiprev);
+ seq_printf(s, "chippkg: %u\n", ri->chippkg);
+ seq_printf(s, "corerev: %u\n", ri->corerev);
+ seq_printf(s, "boardid: 0x%04x\n", ri->boardid);
+ seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor);
+ seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev));
+ seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev));
+ seq_printf(s, "ucoderev: %u\n", ri->ucoderev);
+ seq_printf(s, "bus: %u\n", ri->bus);
+ seq_printf(s, "phytype: %u\n", ri->phytype);
+ seq_printf(s, "phyrev: %u\n", ri->phyrev);
+ seq_printf(s, "anarev: %u\n", ri->anarev);
+ seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
+
+ return 0;
+}
+
int brcmf_bus_start(struct device *dev)
{
int ret = -1;
@@ -974,6 +1002,8 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0)
goto fail;
+ brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
+
/* assure we have chipid before feature attach */
if (!bus_if->chip) {
bus_if->chip = drvr->revinfo.chipnum;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index 910fbb561469..eb1325371d3a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -236,7 +236,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
brcmf_flowring_block(flow, flowid, false);
hash_idx = ring->hash_id;
flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
- memset(flow->hash[hash_idx].mac, 0, ETH_ALEN);
+ eth_zero_addr(flow->hash[hash_idx].mac);
flow->rings[flowid] = NULL;
skb = skb_dequeue(&ring->skblist);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 6262612dec45..4ec9811f49c8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -481,10 +481,9 @@ static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
{
- if (waitqueue_active(&msgbuf->ioctl_resp_wait)) {
- msgbuf->ctl_completed = true;
+ msgbuf->ctl_completed = true;
+ if (waitqueue_active(&msgbuf->ioctl_resp_wait))
wake_up(&msgbuf->ioctl_resp_wait);
- }
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
index 77a51b8c1e12..3d513e407e3d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
@@ -17,11 +17,11 @@
#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
-#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20
-#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256
-#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 20
+#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 64
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 512
+#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 64
#define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024
-#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 256
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 512
#define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index effb48ebd864..710fbe570eb2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -697,7 +697,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
else
sparams->scan_type = 1;
- memset(&sparams->bssid, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(sparams->bssid);
if (ssid.SSID_len)
memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len);
sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
@@ -2246,11 +2246,13 @@ static void brcmf_p2p_delete_p2pdev(struct brcmf_p2p_info *p2p,
*
* @wiphy: wiphy device of new interface.
* @name: name of the new interface.
+ * @name_assign_type: origin of the interface name
* @type: nl80211 interface type.
* @flags: not used.
* @params: contains mac address for P2P device.
*/
struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
@@ -2310,6 +2312,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
}
strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+ ifp->ndev->name_assign_type = name_assign_type;
err = brcmf_net_attach(ifp, true);
if (err) {
brcmf_err("Registering netdevice failed\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
index 6821b26224be..872f382d9e49 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
@@ -149,6 +149,7 @@ struct brcmf_p2p_info {
s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg);
void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params);
int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 61c053a729be..1831ecd0813e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -47,8 +47,6 @@ enum brcmf_pcie_state {
#define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
#define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
-#define BRCMF_PCIE_4354_FW_NAME "brcm/brcmfmac4354-pcie.bin"
-#define BRCMF_PCIE_4354_NVRAM_NAME "brcm/brcmfmac4354-pcie.txt"
#define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
@@ -187,8 +185,8 @@ enum brcmf_pcie_state {
MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
-MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME);
-MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
@@ -509,8 +507,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
{
- brcmf_chip_enter_download(devinfo->ci);
-
if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
@@ -536,7 +532,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
brcmf_chip_resetcore(core, 0, 0, 0);
}
- return !brcmf_chip_exit_download(devinfo->ci, resetintr);
+ return !brcmf_chip_set_active(devinfo->ci, resetintr);
}
@@ -653,10 +649,9 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
console->log_str[console->log_idx] = ch;
console->log_idx++;
}
-
if (ch == '\n') {
console->log_str[console->log_idx] = 0;
- brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str);
+ brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str);
console->log_idx = 0;
}
}
@@ -1328,10 +1323,6 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
fw_name = BRCMF_PCIE_43602_FW_NAME;
nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
break;
- case BRCM_CC_4354_CHIP_ID:
- fw_name = BRCMF_PCIE_4354_FW_NAME;
- nvram_name = BRCMF_PCIE_4354_NVRAM_NAME;
- break;
case BRCM_CC_4356_CHIP_ID:
fw_name = BRCMF_PCIE_4356_FW_NAME;
nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
@@ -1566,8 +1557,8 @@ static int brcmf_pcie_buscoreprep(void *ctx)
}
-static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
- u32 rstvec)
+static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
+ u32 rstvec)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
@@ -1577,7 +1568,7 @@ static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
.prepare = brcmf_pcie_buscoreprep,
- .exit_dl = brcmf_pcie_buscore_exitdl,
+ .activate = brcmf_pcie_buscore_activate,
.read32 = brcmf_pcie_buscore_read32,
.write32 = brcmf_pcie_buscore_write32,
};
@@ -1856,7 +1847,6 @@ cleanup:
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
static struct pci_device_id brcmf_pcie_devid_table[] = {
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index faec35c899ec..ab0c89833013 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -432,8 +432,6 @@ struct brcmf_sdio {
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
struct brcmf_chip *ci; /* Chip info struct */
- u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
-
u32 hostintmask; /* Copy of Host Interrupt Mask */
atomic_t intstatus; /* Intstatus bits (events) pending */
atomic_t fcstate; /* State of dongle flow-control */
@@ -485,10 +483,9 @@ struct brcmf_sdio {
#endif /* DEBUG */
uint clkstate; /* State of sd and backplane clock(s) */
- bool activity; /* Activity flag for clock down */
s32 idletime; /* Control for activity timeout */
- s32 idlecount; /* Activity timeout counter */
- s32 idleclock; /* How to set bus driver when idle */
+ s32 idlecount; /* Activity timeout counter */
+ s32 idleclock; /* How to set bus driver when idle */
bool rxflow_mode; /* Rx flow control mode */
bool rxflow; /* Is rx flow control on */
bool alp_only; /* Don't use HT clock (ALP only) */
@@ -510,11 +507,13 @@ struct brcmf_sdio {
struct workqueue_struct *brcmf_wq;
struct work_struct datawork;
- atomic_t dpc_tskcnt;
+ bool dpc_triggered;
+ bool dpc_running;
bool txoff; /* Transmit flow-controlled */
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
+ bool sleeping;
u8 tx_hdrlen; /* sdio bus header length for tx packet */
bool txglom; /* host tx glomming enable flag */
@@ -616,6 +615,10 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
+#define BCM43430_FIRMWARE_NAME "brcm/brcmfmac43430-sdio.bin"
+#define BCM43430_NVRAM_NAME "brcm/brcmfmac43430-sdio.txt"
+#define BCM43455_FIRMWARE_NAME "brcm/brcmfmac43455-sdio.bin"
+#define BCM43455_NVRAM_NAME "brcm/brcmfmac43455-sdio.txt"
#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin"
#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt"
@@ -639,6 +642,10 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43430_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43455_NVRAM_NAME);
MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
@@ -668,6 +675,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
{ BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
{ BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
+ { BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) },
+ { BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) },
{ BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
};
@@ -958,13 +967,8 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
brcmf_dbg(SDIO, "Enter\n");
/* Early exit if we're already there */
- if (bus->clkstate == target) {
- if (target == CLK_AVAIL) {
- brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
- bus->activity = true;
- }
+ if (bus->clkstate == target)
return 0;
- }
switch (target) {
case CLK_AVAIL:
@@ -973,8 +977,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
brcmf_sdio_sdclk(bus, true);
/* Now request HT Avail on the backplane */
brcmf_sdio_htclk(bus, true, pendok);
- brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
- bus->activity = true;
break;
case CLK_SDONLY:
@@ -986,7 +988,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
else
brcmf_err("request for %d -> %d\n",
bus->clkstate, target);
- brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
break;
case CLK_NONE:
@@ -995,7 +996,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
brcmf_sdio_htclk(bus, false, false);
/* Now remove the SD clock */
brcmf_sdio_sdclk(bus, false);
- brcmf_sdio_wd_timer(bus, 0);
break;
}
#ifdef DEBUG
@@ -1013,26 +1013,16 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
(sleep ? "SLEEP" : "WAKE"),
- (bus->sdiodev->sleeping ? "SLEEP" : "WAKE"));
+ (bus->sleeping ? "SLEEP" : "WAKE"));
/* If SR is enabled control bus state with KSO */
if (bus->sr_enabled) {
/* Done if we're already in the requested state */
- if (sleep == bus->sdiodev->sleeping)
+ if (sleep == bus->sleeping)
goto end;
/* Going to sleep */
if (sleep) {
- /* Don't sleep if something is pending */
- if (atomic_read(&bus->intstatus) ||
- atomic_read(&bus->ipend) > 0 ||
- (!atomic_read(&bus->fcstate) &&
- brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
- data_ok(bus))) {
- err = -EBUSY;
- goto done;
- }
-
clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR,
&err);
@@ -1043,11 +1033,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
SBSDIO_ALP_AVAIL_REQ, &err);
}
err = brcmf_sdio_kso_control(bus, false);
- /* disable watchdog */
- if (!err)
- brcmf_sdio_wd_timer(bus, 0);
} else {
- bus->idlecount = 0;
err = brcmf_sdio_kso_control(bus, true);
}
if (err) {
@@ -1064,10 +1050,9 @@ end:
brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
} else {
brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
}
- bus->sdiodev->sleeping = sleep;
- if (sleep)
- wake_up(&bus->sdiodev->idle_wait);
+ bus->sleeping = sleep;
brcmf_dbg(SDIO, "new state %s\n",
(sleep ? "SLEEP" : "WAKE"));
done:
@@ -1085,44 +1070,47 @@ static inline bool brcmf_sdio_valid_shared_address(u32 addr)
static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
struct sdpcm_shared *sh)
{
- u32 addr;
+ u32 addr = 0;
int rv;
u32 shaddr = 0;
struct sdpcm_shared_le sh_le;
__le32 addr_le;
- shaddr = bus->ci->rambase + bus->ramsize - 4;
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_bus_sleep(bus, false, false);
/*
* Read last word in socram to determine
* address of sdpcm_shared structure
*/
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, false, false);
- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
- sdio_release_host(bus->sdiodev->func[1]);
+ shaddr = bus->ci->rambase + bus->ci->ramsize - 4;
+ if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci))
+ shaddr -= bus->ci->srsize;
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr,
+ (u8 *)&addr_le, 4);
if (rv < 0)
- return rv;
-
- addr = le32_to_cpu(addr_le);
-
- brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+ goto fail;
/*
* Check if addr is valid.
* NVRAM length at the end of memory should have been overwritten.
*/
+ addr = le32_to_cpu(addr_le);
if (!brcmf_sdio_valid_shared_address(addr)) {
- brcmf_err("invalid sdpcm_shared address 0x%08X\n",
- addr);
- return -EINVAL;
+ brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr);
+ rv = -EINVAL;
+ goto fail;
}
+ brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
+
/* Read hndrte_shared structure */
rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
sizeof(struct sdpcm_shared_le));
if (rv < 0)
- return rv;
+ goto fail;
+
+ sdio_release_host(bus->sdiodev->func[1]);
/* Endianness */
sh->flags = le32_to_cpu(sh_le.flags);
@@ -1139,8 +1127,13 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
sh->flags & SDPCM_SHARED_VERSION_MASK);
return -EPROTO;
}
-
return 0;
+
+fail:
+ brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
+ rv, addr);
+ sdio_release_host(bus->sdiodev->func[1]);
+ return rv;
}
static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
@@ -1909,7 +1902,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxpending = true;
for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_STATE_DATA;
+ !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA;
rd->seq_num++, rxleft--) {
/* Handle glomming separately */
@@ -2415,7 +2408,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if ((bus->sdiodev->state == BRCMF_STATE_DATA) &&
+ if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
bus->txoff = false;
brcmf_txflowblock(bus->sdiodev->dev, false);
@@ -2503,7 +2496,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
bus->watchdog_tsk = NULL;
}
- if (sdiodev->state != BRCMF_STATE_NOMEDIUM) {
+ if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
sdio_claim_host(sdiodev->func[1]);
/* Enable clock for device interrupts */
@@ -2603,21 +2596,6 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
return ret;
}
-static int brcmf_sdio_pm_resume_wait(struct brcmf_sdio_dev *sdiodev)
-{
-#ifdef CONFIG_PM_SLEEP
- int retry;
-
- /* Wait for possible resume to complete */
- retry = 0;
- while ((atomic_read(&sdiodev->suspend)) && (retry++ != 50))
- msleep(20);
- if (atomic_read(&sdiodev->suspend))
- return -EIO;
-#endif
- return 0;
-}
-
static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{
u32 newstatus = 0;
@@ -2628,9 +2606,6 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
- if (brcmf_sdio_pm_resume_wait(bus->sdiodev))
- return;
-
sdio_claim_host(bus->sdiodev->func[1]);
/* If waiting for HTAVAIL, check status */
@@ -2739,11 +2714,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
data_ok(bus)) {
sdio_claim_host(bus->sdiodev->func[1]);
- err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
- bus->ctrl_frame_len);
+ if (bus->ctrl_frame_stat) {
+ err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
+ bus->ctrl_frame_len);
+ bus->ctrl_frame_err = err;
+ wmb();
+ bus->ctrl_frame_stat = false;
+ }
sdio_release_host(bus->sdiodev->func[1]);
- bus->ctrl_frame_err = err;
- bus->ctrl_frame_stat = false;
brcmf_sdio_wait_event_wakeup(bus);
}
/* Send queued frames (limit 1 if rx may still be pending) */
@@ -2755,15 +2733,25 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_sdio_sendfromq(bus, framecnt);
}
- if ((bus->sdiodev->state != BRCMF_STATE_DATA) || (err != 0)) {
+ if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
brcmf_err("failed backplane access over SDIO, halting operation\n");
atomic_set(&bus->intstatus, 0);
+ if (bus->ctrl_frame_stat) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+ if (bus->ctrl_frame_stat) {
+ bus->ctrl_frame_err = -ENODEV;
+ wmb();
+ bus->ctrl_frame_stat = false;
+ brcmf_sdio_wait_event_wakeup(bus);
+ }
+ sdio_release_host(bus->sdiodev->func[1]);
+ }
} else if (atomic_read(&bus->intstatus) ||
atomic_read(&bus->ipend) > 0 ||
(!atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
data_ok(bus))) {
- atomic_inc(&bus->dpc_tskcnt);
+ bus->dpc_triggered = true;
}
}
@@ -2862,11 +2850,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
qcount[prec] = pktq_plen(&bus->txq, prec);
#endif
- if (atomic_read(&bus->dpc_tskcnt) == 0) {
- atomic_inc(&bus->dpc_tskcnt);
- queue_work(bus->brcmf_wq, &bus->datawork);
- }
-
+ brcmf_sdio_trigger_dpc(bus);
return ret;
}
@@ -2963,23 +2947,27 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
/* Send from dpc */
bus->ctrl_frame_buf = msg;
bus->ctrl_frame_len = msglen;
+ wmb();
bus->ctrl_frame_stat = true;
- if (atomic_read(&bus->dpc_tskcnt) == 0) {
- atomic_inc(&bus->dpc_tskcnt);
- queue_work(bus->brcmf_wq, &bus->datawork);
- }
+ brcmf_sdio_trigger_dpc(bus);
wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
msecs_to_jiffies(CTL_DONE_TIMEOUT));
-
- if (!bus->ctrl_frame_stat) {
+ ret = 0;
+ if (bus->ctrl_frame_stat) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+ if (bus->ctrl_frame_stat) {
+ brcmf_dbg(SDIO, "ctrl_frame timeout\n");
+ bus->ctrl_frame_stat = false;
+ ret = -ETIMEDOUT;
+ }
+ sdio_release_host(bus->sdiodev->func[1]);
+ }
+ if (!ret) {
brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
bus->ctrl_frame_err);
+ rmb();
ret = bus->ctrl_frame_err;
- } else {
- brcmf_dbg(SDIO, "ctrl_frame timeout\n");
- bus->ctrl_frame_stat = false;
- ret = -ETIMEDOUT;
}
if (ret)
@@ -3383,9 +3371,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
- /* Keep arm in reset */
- brcmf_chip_enter_download(bus->ci);
-
rstvec = get_unaligned_le32(fw->data);
brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
@@ -3405,13 +3390,13 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
}
/* Take arm out of reset */
- if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
+ if (!brcmf_chip_set_active(bus->ci, rstvec)) {
brcmf_err("error getting out of ARM core reset\n");
goto err;
}
/* Allow full data communication using DPC from now on. */
- bus->sdiodev->state = BRCMF_STATE_DATA;
+ brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
bcmerror = 0;
err:
@@ -3548,6 +3533,14 @@ done:
return err;
}
+void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
+{
+ if (!bus->dpc_triggered) {
+ bus->dpc_triggered = true;
+ queue_work(bus->brcmf_wq, &bus->datawork);
+ }
+}
+
void brcmf_sdio_isr(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3557,7 +3550,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
return;
}
- if (bus->sdiodev->state != BRCMF_STATE_DATA) {
+ if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
brcmf_err("bus is down. we have nothing to do\n");
return;
}
@@ -3574,11 +3567,11 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
if (!bus->intr)
brcmf_err("isr w/o interrupt configured!\n");
- atomic_inc(&bus->dpc_tskcnt);
+ bus->dpc_triggered = true;
queue_work(bus->brcmf_wq, &bus->datawork);
}
-static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
+static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
{
brcmf_dbg(TIMER, "Enter\n");
@@ -3594,7 +3587,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
if (!bus->intr ||
(bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
- if (atomic_read(&bus->dpc_tskcnt) == 0) {
+ if (!bus->dpc_triggered) {
u8 devpend;
sdio_claim_host(bus->sdiodev->func[1]);
@@ -3602,9 +3595,8 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
SDIO_CCCR_INTx,
NULL);
sdio_release_host(bus->sdiodev->func[1]);
- intstatus =
- devpend & (INTR_STATUS_FUNC1 |
- INTR_STATUS_FUNC2);
+ intstatus = devpend & (INTR_STATUS_FUNC1 |
+ INTR_STATUS_FUNC2);
}
/* If there is something, make like the ISR and
@@ -3613,7 +3605,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
bus->sdcnt.pollcnt++;
atomic_set(&bus->ipend, 1);
- atomic_inc(&bus->dpc_tskcnt);
+ bus->dpc_triggered = true;
queue_work(bus->brcmf_wq, &bus->datawork);
}
}
@@ -3623,7 +3615,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
}
#ifdef DEBUG
/* Poll for console output periodically */
- if (bus->sdiodev->state == BRCMF_STATE_DATA &&
+ if (bus->sdiodev->state == BRCMF_SDIOD_DATA &&
bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) {
@@ -3640,22 +3632,25 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
#endif /* DEBUG */
/* On idle timeout clear activity flag and/or turn off clock */
- if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
- if (++bus->idlecount >= bus->idletime) {
- bus->idlecount = 0;
- if (bus->activity) {
- bus->activity = false;
- brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
- } else {
+ if (!bus->dpc_triggered) {
+ rmb();
+ if ((!bus->dpc_running) && (bus->idletime > 0) &&
+ (bus->clkstate == CLK_AVAIL)) {
+ bus->idlecount++;
+ if (bus->idlecount > bus->idletime) {
brcmf_dbg(SDIO, "idle\n");
sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_wd_timer(bus, 0);
+ bus->idlecount = 0;
brcmf_sdio_bus_sleep(bus, true, false);
sdio_release_host(bus->sdiodev->func[1]);
}
+ } else {
+ bus->idlecount = 0;
}
+ } else {
+ bus->idlecount = 0;
}
-
- return (atomic_read(&bus->ipend) > 0);
}
static void brcmf_sdio_dataworker(struct work_struct *work)
@@ -3663,9 +3658,18 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
datawork);
- while (atomic_read(&bus->dpc_tskcnt)) {
- atomic_set(&bus->dpc_tskcnt, 0);
+ bus->dpc_running = true;
+ wmb();
+ while (ACCESS_ONCE(bus->dpc_triggered)) {
+ bus->dpc_triggered = false;
brcmf_sdio_dpc(bus);
+ bus->idlecount = 0;
+ }
+ bus->dpc_running = false;
+ if (brcmf_sdiod_freezing(bus->sdiodev)) {
+ brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
+ brcmf_sdiod_try_freeze(bus->sdiodev);
+ brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
}
}
@@ -3784,8 +3788,8 @@ static int brcmf_sdio_buscoreprep(void *ctx)
return 0;
}
-static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
- u32 rstvec)
+static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
+ u32 rstvec)
{
struct brcmf_sdio_dev *sdiodev = ctx;
struct brcmf_core *core;
@@ -3828,7 +3832,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
.prepare = brcmf_sdio_buscoreprep,
- .exit_dl = brcmf_sdio_buscore_exitdl,
+ .activate = brcmf_sdio_buscore_activate,
.read32 = brcmf_sdio_buscore_read32,
.write32 = brcmf_sdio_buscore_write32,
};
@@ -3882,13 +3886,6 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
- /* Get info on the SOCRAM cores... */
- bus->ramsize = bus->ci->ramsize;
- if (!(bus->ramsize)) {
- brcmf_err("failed to find SOCRAM memory!\n");
- goto fail;
- }
-
/* Set card control so an SDIO card reset does a WLAN backplane reset */
reg_val = brcmf_sdiod_regrb(bus->sdiodev,
SDIO_CCCR_BRCM_CARDCTRL, &err);
@@ -3944,13 +3941,19 @@ static int
brcmf_sdio_watchdog_thread(void *data)
{
struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+ int wait;
allow_signal(SIGTERM);
/* Run until signal received */
+ brcmf_sdiod_freezer_count(bus->sdiodev);
while (1) {
if (kthread_should_stop())
break;
- if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
+ brcmf_sdiod_freezer_uncount(bus->sdiodev);
+ wait = wait_for_completion_interruptible(&bus->watchdog_wait);
+ brcmf_sdiod_freezer_count(bus->sdiodev);
+ brcmf_sdiod_try_freeze(bus->sdiodev);
+ if (!wait) {
brcmf_sdio_bus_watchdog(bus);
/* Count the tick for reference */
bus->sdcnt.tickcnt++;
@@ -3971,7 +3974,7 @@ brcmf_sdio_watchdog(unsigned long data)
/* Reschedule the watchdog */
if (bus->wd_timer_valid)
mod_timer(&bus->timer,
- jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+ jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
}
}
@@ -4089,6 +4092,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret;
struct brcmf_sdio *bus;
+ struct workqueue_struct *wq;
brcmf_dbg(TRACE, "Enter\n");
@@ -4117,12 +4121,16 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
}
- INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
- bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
- if (bus->brcmf_wq == NULL) {
+ /* single-threaded workqueue */
+ wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
+ dev_name(&sdiodev->func[1]->dev));
+ if (!wq) {
brcmf_err("insufficient memory to create txworkqueue\n");
goto fail;
}
+ brcmf_sdiod_freezer_count(sdiodev);
+ INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
+ bus->brcmf_wq = wq;
/* attempt to attach to the dongle */
if (!(brcmf_sdio_probe_attach(bus))) {
@@ -4143,13 +4151,15 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
/* Initialize watchdog thread */
init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
- bus, "brcmf_watchdog");
+ bus, "brcmf_wdog/%s",
+ dev_name(&sdiodev->func[1]->dev));
if (IS_ERR(bus->watchdog_tsk)) {
pr_warn("brcmf_watchdog thread failed to start\n");
bus->watchdog_tsk = NULL;
}
/* Initialize DPC thread */
- atomic_set(&bus->dpc_tskcnt, 0);
+ bus->dpc_triggered = false;
+ bus->dpc_running = false;
/* Assign bus interface call back */
bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
@@ -4242,16 +4252,16 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
destroy_workqueue(bus->brcmf_wq);
if (bus->ci) {
- if (bus->sdiodev->state != BRCMF_STATE_NOMEDIUM) {
+ if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_wd_timer(bus, 0);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Leave the device in state where it is
- * 'quiet'. This is done by putting it in
- * download_state which essentially resets
- * all necessary cores.
+ * 'passive'. This is done by resetting all
+ * necessary cores.
*/
msleep(20);
- brcmf_chip_enter_download(bus->ci);
+ brcmf_chip_set_passive(bus->ci);
brcmf_sdio_clkctl(bus, CLK_NONE, false);
sdio_release_host(bus->sdiodev->func[1]);
}
@@ -4277,7 +4287,7 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
}
/* don't start the wd until fw is loaded */
- if (bus->sdiodev->state != BRCMF_STATE_DATA)
+ if (bus->sdiodev->state != BRCMF_SDIOD_DATA)
return;
if (wdtick) {
@@ -4290,16 +4300,28 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
dynamically changed or in the first instance
*/
bus->timer.expires =
- jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
+ jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS);
add_timer(&bus->timer);
} else {
/* Re arm the timer, at last watchdog period */
mod_timer(&bus->timer,
- jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+ jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
}
bus->wd_timer_valid = true;
bus->save_ms = wdtick;
}
}
+
+int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
+{
+ int ret;
+
+ sdio_claim_host(bus->sdiodev->func[1]);
+ ret = brcmf_sdio_bus_sleep(bus, sleep, false);
+ sdio_release_host(bus->sdiodev->func[1]);
+
+ return ret;
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
index ec2586a8425c..7328478b2d7b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
@@ -155,11 +155,17 @@
/* watchdog polling interval in ms */
#define BRCMF_WD_POLL_MS 10
-/* The state of the bus */
-enum brcmf_sdio_state {
- BRCMF_STATE_DOWN, /* Device available, still initialising */
- BRCMF_STATE_DATA, /* Ready for data transfers, DPC enabled */
- BRCMF_STATE_NOMEDIUM /* No medium access to dongle possible */
+/**
+ * enum brcmf_sdiod_state - the state of the bus.
+ *
+ * @BRCMF_SDIOD_DOWN: Device can be accessed, no DPC.
+ * @BRCMF_SDIOD_DATA: Ready for data transfers, DPC enabled.
+ * @BRCMF_SDIOD_NOMEDIUM: No medium access to dongle possible.
+ */
+enum brcmf_sdiod_state {
+ BRCMF_SDIOD_DOWN,
+ BRCMF_SDIOD_DATA,
+ BRCMF_SDIOD_NOMEDIUM
};
struct brcmf_sdreg {
@@ -169,15 +175,13 @@ struct brcmf_sdreg {
};
struct brcmf_sdio;
+struct brcmf_sdiod_freezer;
struct brcmf_sdio_dev {
struct sdio_func *func[SDIO_MAX_FUNCS];
u8 num_funcs; /* Supported funcs on client */
u32 sbwad; /* Save backplane window address */
struct brcmf_sdio *bus;
- atomic_t suspend; /* suspend flag */
- bool sleeping;
- wait_queue_head_t idle_wait;
struct device *dev;
struct brcmf_bus *bus_if;
struct brcmfmac_sdio_platform_data *pdata;
@@ -194,7 +198,8 @@ struct brcmf_sdio_dev {
char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
bool wowl_enabled;
- enum brcmf_sdio_state state;
+ enum brcmf_sdiod_state state;
+ struct brcmf_sdiod_freezer *freezer;
};
/* sdio core registers */
@@ -337,6 +342,28 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
/* Issue an abort to the specified function */
int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
+ enum brcmf_sdiod_state state);
+#ifdef CONFIG_PM_SLEEP
+bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev);
+#else
+static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
+{
+ return false;
+}
+static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
+{
+}
+static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
+{
+}
+static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_PM_SLEEP */
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdio_remove(struct brcmf_sdio *bus);
@@ -344,5 +371,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus);
void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
+int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep);
+void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus);
#endif /* BRCMFMAC_SDIO_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac.h b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac.h
new file mode 100644
index 000000000000..a0da3248b942
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__TRACE_BRCMSMAC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_BRCMSMAC_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac
+
+/*
+ * We define a tracepoint, its arguments, its printk format and its
+ * 'fast binary record' layout.
+ */
+TRACE_EVENT(brcms_timer,
+ /* TPPROTO is the prototype of the function called by this tracepoint */
+ TP_PROTO(struct brcms_timer *t),
+ /*
+ * TPARGS(firstarg, p) are the parameters names, same as found in the
+ * prototype.
+ */
+ TP_ARGS(t),
+ /*
+ * Fast binary tracing: define the trace record via TP_STRUCT__entry().
+ * You can think about it like a regular C structure local variable
+ * definition.
+ */
+ TP_STRUCT__entry(
+ __field(uint, ms)
+ __field(uint, set)
+ __field(uint, periodic)
+ ),
+ TP_fast_assign(
+ __entry->ms = t->ms;
+ __entry->set = t->set;
+ __entry->periodic = t->periodic;
+ ),
+ TP_printk(
+ "ms=%u set=%u periodic=%u",
+ __entry->ms, __entry->set, __entry->periodic
+ )
+);
+
+TRACE_EVENT(brcms_dpc,
+ TP_PROTO(unsigned long data),
+ TP_ARGS(data),
+ TP_STRUCT__entry(
+ __field(unsigned long, data)
+ ),
+ TP_fast_assign(
+ __entry->data = data;
+ ),
+ TP_printk(
+ "data=%p",
+ (void *)__entry->data
+ )
+);
+
+TRACE_EVENT(brcms_macintstatus,
+ TP_PROTO(const struct device *dev, int in_isr, u32 macintstatus,
+ u32 mask),
+ TP_ARGS(dev, in_isr, macintstatus, mask),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(int, in_isr)
+ __field(u32, macintstatus)
+ __field(u32, mask)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+ __entry->in_isr = in_isr;
+ __entry->macintstatus = macintstatus;
+ __entry->mask = mask;
+ ),
+ TP_printk("[%s] in_isr=%d macintstatus=%#x mask=%#x", __get_str(dev),
+ __entry->in_isr, __entry->macintstatus, __entry->mask)
+);
+#endif /* __TRACE_BRCMSMAC_H */
+
+#ifdef CONFIG_BRCM_TRACING
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE brcms_trace_brcmsmac
+#include <trace/define_trace.h>
+
+#endif /* CONFIG_BRCM_TRACING */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
new file mode 100644
index 000000000000..0e8a69ab909f
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__TRACE_BRCMSMAC_MSG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_BRCMSMAC_MSG_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac_msg
+
+#define MAX_MSG_LEN 100
+
+DECLARE_EVENT_CLASS(brcms_msg_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_warn,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_crit,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+TRACE_EVENT(brcms_dbg,
+ TP_PROTO(u32 level, const char *func, struct va_format *vaf),
+ TP_ARGS(level, func, vaf),
+ TP_STRUCT__entry(
+ __field(u32, level)
+ __string(func, func)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ __entry->level = level;
+ __assign_str(func, func);
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s: %s", __get_str(func), __get_str(msg))
+);
+#endif /* __TRACE_BRCMSMAC_MSG_H */
+
+#ifdef CONFIG_BRCM_TRACING
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE brcms_trace_brcmsmac_msg
+#include <trace/define_trace.h>
+
+#endif /* CONFIG_BRCM_TRACING */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h
new file mode 100644
index 000000000000..cf2cc070f1e5
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__TRACE_BRCMSMAC_TX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_BRCMSMAC_TX_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac_tx
+
+TRACE_EVENT(brcms_txdesc,
+ TP_PROTO(const struct device *dev,
+ void *txh, size_t txh_len),
+ TP_ARGS(dev, txh, txh_len),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __dynamic_array(u8, txh, txh_len)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+ memcpy(__get_dynamic_array(txh), txh, txh_len);
+ ),
+ TP_printk("[%s] txdesc", __get_str(dev))
+);
+
+TRACE_EVENT(brcms_txstatus,
+ TP_PROTO(const struct device *dev, u16 framelen, u16 frameid,
+ u16 status, u16 lasttxtime, u16 sequence, u16 phyerr,
+ u16 ackphyrxsh),
+ TP_ARGS(dev, framelen, frameid, status, lasttxtime, sequence, phyerr,
+ ackphyrxsh),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(u16, framelen)
+ __field(u16, frameid)
+ __field(u16, status)
+ __field(u16, lasttxtime)
+ __field(u16, sequence)
+ __field(u16, phyerr)
+ __field(u16, ackphyrxsh)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+ __entry->framelen = framelen;
+ __entry->frameid = frameid;
+ __entry->status = status;
+ __entry->lasttxtime = lasttxtime;
+ __entry->sequence = sequence;
+ __entry->phyerr = phyerr;
+ __entry->ackphyrxsh = ackphyrxsh;
+ ),
+ TP_printk("[%s] FrameId %#04x TxStatus %#04x LastTxTime %#04x "
+ "Seq %#04x PHYTxStatus %#04x RxAck %#04x",
+ __get_str(dev), __entry->frameid, __entry->status,
+ __entry->lasttxtime, __entry->sequence, __entry->phyerr,
+ __entry->ackphyrxsh)
+);
+
+TRACE_EVENT(brcms_ampdu_session,
+ TP_PROTO(const struct device *dev, unsigned max_ampdu_len,
+ u16 max_ampdu_frames, u16 ampdu_len, u16 ampdu_frames,
+ u16 dma_len),
+ TP_ARGS(dev, max_ampdu_len, max_ampdu_frames, ampdu_len, ampdu_frames,
+ dma_len),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(unsigned, max_ampdu_len)
+ __field(u16, max_ampdu_frames)
+ __field(u16, ampdu_len)
+ __field(u16, ampdu_frames)
+ __field(u16, dma_len)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+ __entry->max_ampdu_len = max_ampdu_len;
+ __entry->max_ampdu_frames = max_ampdu_frames;
+ __entry->ampdu_len = ampdu_len;
+ __entry->ampdu_frames = ampdu_frames;
+ __entry->dma_len = dma_len;
+ ),
+ TP_printk("[%s] ampdu session max_len=%u max_frames=%u len=%u frames=%u dma_len=%u",
+ __get_str(dev), __entry->max_ampdu_len,
+ __entry->max_ampdu_frames, __entry->ampdu_len,
+ __entry->ampdu_frames, __entry->dma_len)
+);
+#endif /* __TRACE_BRCMSMAC_TX_H */
+
+#ifdef CONFIG_BRCM_TRACING
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE brcms_trace_brcmsmac_tx
+#include <trace/define_trace.h>
+
+#endif /* CONFIG_BRCM_TRACING */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
index 871781e6a713..cbf2f06436fc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
@@ -14,9 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#if !defined(__TRACE_BRCMSMAC_H) || defined(TRACE_HEADER_MULTI_READ)
-
-#define __TRACE_BRCMSMAC_H
+#ifndef __BRCMS_TRACE_EVENTS_H
+#define __BRCMS_TRACE_EVENTS_H
#include <linux/types.h>
#include <linux/device.h>
@@ -34,222 +33,8 @@ static inline void trace_ ## name(proto) {}
static inline void trace_ ## name(proto) {}
#endif
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM brcmsmac
-
-/*
- * We define a tracepoint, its arguments, its printk format and its
- * 'fast binary record' layout.
- */
-TRACE_EVENT(brcms_timer,
- /* TPPROTO is the prototype of the function called by this tracepoint */
- TP_PROTO(struct brcms_timer *t),
- /*
- * TPARGS(firstarg, p) are the parameters names, same as found in the
- * prototype.
- */
- TP_ARGS(t),
- /*
- * Fast binary tracing: define the trace record via TP_STRUCT__entry().
- * You can think about it like a regular C structure local variable
- * definition.
- */
- TP_STRUCT__entry(
- __field(uint, ms)
- __field(uint, set)
- __field(uint, periodic)
- ),
- TP_fast_assign(
- __entry->ms = t->ms;
- __entry->set = t->set;
- __entry->periodic = t->periodic;
- ),
- TP_printk(
- "ms=%u set=%u periodic=%u",
- __entry->ms, __entry->set, __entry->periodic
- )
-);
-
-TRACE_EVENT(brcms_dpc,
- TP_PROTO(unsigned long data),
- TP_ARGS(data),
- TP_STRUCT__entry(
- __field(unsigned long, data)
- ),
- TP_fast_assign(
- __entry->data = data;
- ),
- TP_printk(
- "data=%p",
- (void *)__entry->data
- )
-);
-
-TRACE_EVENT(brcms_macintstatus,
- TP_PROTO(const struct device *dev, int in_isr, u32 macintstatus,
- u32 mask),
- TP_ARGS(dev, in_isr, macintstatus, mask),
- TP_STRUCT__entry(
- __string(dev, dev_name(dev))
- __field(int, in_isr)
- __field(u32, macintstatus)
- __field(u32, mask)
- ),
- TP_fast_assign(
- __assign_str(dev, dev_name(dev));
- __entry->in_isr = in_isr;
- __entry->macintstatus = macintstatus;
- __entry->mask = mask;
- ),
- TP_printk("[%s] in_isr=%d macintstatus=%#x mask=%#x", __get_str(dev),
- __entry->in_isr, __entry->macintstatus, __entry->mask)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM brcmsmac_tx
-
-TRACE_EVENT(brcms_txdesc,
- TP_PROTO(const struct device *dev,
- void *txh, size_t txh_len),
- TP_ARGS(dev, txh, txh_len),
- TP_STRUCT__entry(
- __string(dev, dev_name(dev))
- __dynamic_array(u8, txh, txh_len)
- ),
- TP_fast_assign(
- __assign_str(dev, dev_name(dev));
- memcpy(__get_dynamic_array(txh), txh, txh_len);
- ),
- TP_printk("[%s] txdesc", __get_str(dev))
-);
-
-TRACE_EVENT(brcms_txstatus,
- TP_PROTO(const struct device *dev, u16 framelen, u16 frameid,
- u16 status, u16 lasttxtime, u16 sequence, u16 phyerr,
- u16 ackphyrxsh),
- TP_ARGS(dev, framelen, frameid, status, lasttxtime, sequence, phyerr,
- ackphyrxsh),
- TP_STRUCT__entry(
- __string(dev, dev_name(dev))
- __field(u16, framelen)
- __field(u16, frameid)
- __field(u16, status)
- __field(u16, lasttxtime)
- __field(u16, sequence)
- __field(u16, phyerr)
- __field(u16, ackphyrxsh)
- ),
- TP_fast_assign(
- __assign_str(dev, dev_name(dev));
- __entry->framelen = framelen;
- __entry->frameid = frameid;
- __entry->status = status;
- __entry->lasttxtime = lasttxtime;
- __entry->sequence = sequence;
- __entry->phyerr = phyerr;
- __entry->ackphyrxsh = ackphyrxsh;
- ),
- TP_printk("[%s] FrameId %#04x TxStatus %#04x LastTxTime %#04x "
- "Seq %#04x PHYTxStatus %#04x RxAck %#04x",
- __get_str(dev), __entry->frameid, __entry->status,
- __entry->lasttxtime, __entry->sequence, __entry->phyerr,
- __entry->ackphyrxsh)
-);
-
-TRACE_EVENT(brcms_ampdu_session,
- TP_PROTO(const struct device *dev, unsigned max_ampdu_len,
- u16 max_ampdu_frames, u16 ampdu_len, u16 ampdu_frames,
- u16 dma_len),
- TP_ARGS(dev, max_ampdu_len, max_ampdu_frames, ampdu_len, ampdu_frames,
- dma_len),
- TP_STRUCT__entry(
- __string(dev, dev_name(dev))
- __field(unsigned, max_ampdu_len)
- __field(u16, max_ampdu_frames)
- __field(u16, ampdu_len)
- __field(u16, ampdu_frames)
- __field(u16, dma_len)
- ),
- TP_fast_assign(
- __assign_str(dev, dev_name(dev));
- __entry->max_ampdu_len = max_ampdu_len;
- __entry->max_ampdu_frames = max_ampdu_frames;
- __entry->ampdu_len = ampdu_len;
- __entry->ampdu_frames = ampdu_frames;
- __entry->dma_len = dma_len;
- ),
- TP_printk("[%s] ampdu session max_len=%u max_frames=%u len=%u frames=%u dma_len=%u",
- __get_str(dev), __entry->max_ampdu_len,
- __entry->max_ampdu_frames, __entry->ampdu_len,
- __entry->ampdu_frames, __entry->dma_len)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM brcmsmac_msg
-
-#define MAX_MSG_LEN 100
-
-DECLARE_EVENT_CLASS(brcms_msg_event,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf),
- TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
- ),
- TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
- ),
- TP_printk("%s", __get_str(msg))
-);
-
-DEFINE_EVENT(brcms_msg_event, brcms_info,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(brcms_msg_event, brcms_warn,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(brcms_msg_event, brcms_err,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(brcms_msg_event, brcms_crit,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-TRACE_EVENT(brcms_dbg,
- TP_PROTO(u32 level, const char *func, struct va_format *vaf),
- TP_ARGS(level, func, vaf),
- TP_STRUCT__entry(
- __field(u32, level)
- __string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
- ),
- TP_fast_assign(
- __entry->level = level;
- __assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
- ),
- TP_printk("%s: %s", __get_str(func), __get_str(msg))
-);
+#include "brcms_trace_brcmsmac.h"
+#include "brcms_trace_brcmsmac_tx.h"
+#include "brcms_trace_brcmsmac_msg.h"
#endif /* __TRACE_BRCMSMAC_H */
-
-#ifdef CONFIG_BRCM_TRACING
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE brcms_trace_events
-
-#include <trace/define_trace.h>
-
-#endif /* CONFIG_BRCM_TRACING */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index eb8584a9c49a..369527e27689 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -4668,7 +4668,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
brcms_c_coredisable(wlc_hw);
/* Match driver "down" state */
- bcma_core_pci_down(wlc_hw->d11core->bus);
+ bcma_host_pci_down(wlc_hw->d11core->bus);
/* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF);
@@ -4959,7 +4959,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
* Configure pci/pcmcia here instead of in brcms_c_attach()
* to allow mfg hotswap: down, hotswap (chip power cycle), up.
*/
- bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core,
+ bcma_host_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core,
true);
/*
@@ -4969,12 +4969,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
*/
if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
/* put SB PCI in down state again */
- bcma_core_pci_down(wlc_hw->d11core->bus);
+ bcma_host_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF);
return -ENOMEDIUM;
}
- bcma_core_pci_up(wlc_hw->d11core->bus);
+ bcma_host_pci_up(wlc_hw->d11core->bus);
/* reset the d11 core */
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@@ -5171,7 +5171,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
/* turn off primary xtal and pll */
if (!wlc_hw->noreset) {
- bcma_core_pci_down(wlc_hw->d11core->bus);
+ bcma_host_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF);
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index 941b1e41f366..1c4e9dd57960 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -2949,5 +2949,5 @@ bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *ppi)
if (ISNPHY(pi))
return wlc_phy_n_txpower_ipa_ison(pi);
else
- return 0;
+ return false;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 5f1366234a0d..93d4cde0eb31 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -4999,7 +4999,7 @@ void wlc_2064_vco_cal(struct brcms_phy *pi)
bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi)
{
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
- return 0;
+ return false;
else
return (LCNPHY_TX_PWR_CTRL_HW ==
wlc_lcnphy_get_tx_pwr_ctrl((pi)));
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 084f18f4f950..99dac9b8a082 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -23041,10 +23041,7 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G1_SEL)
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
NPHY_RSSI_SEL_W1);
- else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G2_SEL)
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
- NPHY_RSSI_SEL_W2);
- else
+ else /* RADIO_2055_WBRSSI_G2_SEL */
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
NPHY_RSSI_SEL_W2);
if (rssi_ctrl_state[1] == RADIO_2055_NBRSSI_SEL)
@@ -23053,13 +23050,9 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G1_SEL)
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
NPHY_RSSI_SEL_W1);
- else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G2_SEL)
+ else /* RADIO_2055_WBRSSI_G1_SEL */
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
NPHY_RSSI_SEL_W2);
- else
- wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
- NPHY_RSSI_SEL_W2);
-
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, rssi_type);
write_phy_reg(pi, 0x91, rfctrlintc_state[0]);
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 2124a17d0bfd..4efdd51af9c8 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -37,6 +37,8 @@
#define BRCM_CC_43362_CHIP_ID 43362
#define BRCM_CC_4335_CHIP_ID 0x4335
#define BRCM_CC_4339_CHIP_ID 0x4339
+#define BRCM_CC_43430_CHIP_ID 43430
+#define BRCM_CC_4345_CHIP_ID 0x4345
#define BRCM_CC_4354_CHIP_ID 0x4354
#define BRCM_CC_4356_CHIP_ID 0x4356
#define BRCM_CC_43566_CHIP_ID 43566
diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index d242333b7559..e1fd499930a0 100644
--- a/drivers/net/wireless/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -183,7 +183,14 @@ struct chipcregs {
u8 uart1lsr;
u8 uart1msr;
u8 uart1scratch;
- u32 PAD[126];
+ u32 PAD[62];
+
+ /* save/restore, corerev >= 48 */
+ u32 sr_capability; /* 0x500 */
+ u32 sr_control0; /* 0x504 */
+ u32 sr_control1; /* 0x508 */
+ u32 gpio_control; /* 0x50C */
+ u32 PAD[60];
/* PMU registers (corerev >= 20) */
u32 pmucontrol; /* 0x600 */
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 964b64ab7fe3..7603546d2de3 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -447,7 +447,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
}
#ifdef CONFIG_PM
-static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
+static int cw1200_spi_suspend(struct device *dev)
{
struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
@@ -458,10 +458,8 @@ static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
return 0;
}
-static int cw1200_spi_resume(struct device *dev)
-{
- return 0;
-}
+static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL);
+
#endif
static struct spi_driver spi_driver = {
@@ -472,8 +470,7 @@ static struct spi_driver spi_driver = {
.bus = &spi_bus_type,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
- .suspend = cw1200_spi_suspend,
- .resume = cw1200_spi_resume,
+ .pm = &cw1200_pm_ops,
#endif
},
};
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 4a47c7f8a246..b0f65fa09428 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -293,7 +293,7 @@ void cw1200_remove_interface(struct ieee80211_hw *dev,
}
priv->vif = NULL;
priv->mode = NL80211_IFTYPE_MONITOR;
- memset(priv->mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(priv->mac_addr);
memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo));
cw1200_free_keys(priv);
cw1200_setup_mac(priv);
@@ -1240,8 +1240,8 @@ static void cw1200_do_join(struct cw1200_common *priv)
bssid = priv->vif->bss_conf.bssid;
- bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel,
- bssid, NULL, 0, 0, 0);
+ bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel, bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (!bss && !conf->ibss_joined) {
wsm_unlock_tx(priv);
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
index 0bd541175ecd..d28bd49cb5fd 100644
--- a/drivers/net/wireless/cw1200/txrx.c
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -1429,7 +1429,7 @@ void cw1200_link_id_gc_work(struct work_struct *work)
priv->link_id_map &= ~mask;
priv->sta_asleep_mask &= ~mask;
priv->pspoll_mask &= ~mask;
- memset(map_link.mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(map_link.mac_addr);
spin_unlock_bh(&priv->ps_state_lock);
reset.link_id = i + 1;
wsm_reset(priv, &reset);
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 8bde77689469..055e11d353ca 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -174,8 +174,8 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
/* send broadcast and multicast frames to broadcast RA, if
* configured; otherwise, use unicast RA of the WDS link */
if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
- skb->data[0] & 0x01)
- memset(&hdr.addr1, 0xff, ETH_ALEN);
+ is_multicast_ether_addr(skb->data))
+ eth_broadcast_addr(hdr.addr1);
else if (iface->type == HOSTAP_INTERFACE_WDS)
memcpy(&hdr.addr1, iface->u.wds.remote_addr,
ETH_ALEN);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index fd8d83dd4f62..c995ace153ee 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -309,7 +309,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
int i;
PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name);
- memset(addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(addr);
resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
@@ -1015,8 +1015,8 @@ static void prism2_send_mgmt(struct net_device *dev,
memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */
} else if (ieee80211_is_ctl(hdr->frame_control)) {
/* control:ACK does not have addr2 or addr3 */
- memset(hdr->addr2, 0, ETH_ALEN);
- memset(hdr->addr3, 0, ETH_ALEN);
+ eth_zero_addr(hdr->addr2);
+ eth_zero_addr(hdr->addr3);
} else {
memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */
memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */
@@ -1601,7 +1601,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
memcpy(prev_ap, pos, ETH_ALEN);
pos++; pos++; pos++; left -= 6;
} else
- memset(prev_ap, 0, ETH_ALEN);
+ eth_zero_addr(prev_ap);
if (left >= 2) {
unsigned int ileft;
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index de7c4ffec309..7635ac4f6679 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -442,7 +442,7 @@ static void handle_info_queue_linkstatus(local_info_t *local)
} else {
netif_carrier_off(local->dev);
netif_carrier_off(local->ddev);
- memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ eth_zero_addr(wrqu.ap_addr.sa_data);
}
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 52919ad42726..01de1a3bf94e 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -224,7 +224,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr,
if (selected) {
if (do_not_remove)
- memset(selected->u.wds.remote_addr, 0, ETH_ALEN);
+ eth_zero_addr(selected->u.wds.remote_addr);
else {
hostap_remove_interface(selected->dev, rtnl_locked, 0);
local->wds_connections--;
@@ -798,7 +798,6 @@ static void prism2_tx_timeout(struct net_device *dev)
const struct header_ops hostap_80211_ops = {
.create = eth_header,
- .rebuild = eth_rebuild_header,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
.parse = hostap_80211_header_parse,
@@ -1088,7 +1087,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
(u8 *) &val, 2);
- memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ eth_zero_addr(wrqu.ap_addr.sa_data);
wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
return ret;
}
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index 57904015380f..ca25283e1c92 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -4,6 +4,7 @@
#include <linux/interrupt.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/mutex.h>
#include <net/iw_handler.h>
#include <net/ieee80211_radiotap.h>
@@ -85,16 +86,16 @@ struct hfa384x_rx_frame {
/* 802.11 */
__le16 frame_control;
__le16 duration_id;
- u8 addr1[6];
- u8 addr2[6];
- u8 addr3[6];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
__le16 seq_ctrl;
- u8 addr4[6];
+ u8 addr4[ETH_ALEN];
__le16 data_len;
/* 802.3 */
- u8 dst_addr[6];
- u8 src_addr[6];
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
__be16 len;
/* followed by frame data; max 2304 bytes */
@@ -114,16 +115,16 @@ struct hfa384x_tx_frame {
/* 802.11 */
__le16 frame_control; /* parts not used */
__le16 duration_id;
- u8 addr1[6];
- u8 addr2[6]; /* filled by firmware */
- u8 addr3[6];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN]; /* filled by firmware */
+ u8 addr3[ETH_ALEN];
__le16 seq_ctrl; /* filled by firmware */
- u8 addr4[6];
+ u8 addr4[ETH_ALEN];
__le16 data_len;
/* 802.3 */
- u8 dst_addr[6];
- u8 src_addr[6];
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
__be16 len;
/* followed by frame data; max 2304 bytes */
@@ -156,7 +157,7 @@ struct hfa384x_hostscan_request {
} __packed;
struct hfa384x_join_request {
- u8 bssid[6];
+ u8 bssid[ETH_ALEN];
__le16 channel;
} __packed;
@@ -228,7 +229,7 @@ struct hfa384x_scan_result {
__le16 chid;
__le16 anl;
__le16 sl;
- u8 bssid[6];
+ u8 bssid[ETH_ALEN];
__le16 beacon_interval;
__le16 capability;
__le16 ssid_len;
@@ -241,7 +242,7 @@ struct hfa384x_hostscan_result {
__le16 chid;
__le16 anl;
__le16 sl;
- u8 bssid[6];
+ u8 bssid[ETH_ALEN];
__le16 beacon_interval;
__le16 capability;
__le16 ssid_len;
@@ -824,7 +825,7 @@ struct local_info {
#define PRISM2_INFO_PENDING_SCANRESULTS 1
int prev_link_status; /* previous received LinkStatus info */
int prev_linkstatus_connected;
- u8 preferred_ap[6]; /* use this AP if possible */
+ u8 preferred_ap[ETH_ALEN]; /* use this AP if possible */
#ifdef PRISM2_CALLBACK
void *callback_data; /* Can be used in callbacks; e.g., allocate
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index 21de4fe6cf2d..d6ec44d7a391 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -66,7 +66,7 @@ config IPW2100_DEBUG
config IPW2200
tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
depends on PCI && CFG80211
- select CFG80211_WEXT
+ select CFG80211_WEXT_EXPORT
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 6fabea0309dd..08eb229e7816 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2147,8 +2147,8 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
return;
}
- memset(priv->bssid, 0, ETH_ALEN);
- memset(priv->ieee->bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->bssid);
+ eth_zero_addr(priv->ieee->bssid);
netif_carrier_off(priv->net_dev);
netif_stop_queue(priv->net_dev);
@@ -6956,7 +6956,7 @@ static int ipw2100_wx_get_wap(struct net_device *dev,
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
} else
- memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+ eth_zero_addr(wrqu->ap_addr.sa_data);
IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data);
return 0;
@@ -8300,7 +8300,7 @@ static void ipw2100_wx_event_work(struct work_struct *work)
priv->status & STATUS_RF_KILL_MASK ||
ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
&priv->bssid, &len)) {
- memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ eth_zero_addr(wrqu.ap_addr.sa_data);
} else {
/* We now have the BSSID, so can finish setting to the full
* associated state */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 67cad9b05ad8..39f3e6f5cbcd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1964,7 +1964,7 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
if (priv->status & STATUS_ASSOCIATED)
memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
else
- memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ eth_zero_addr(wrqu.ap_addr.sa_data);
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
@@ -7400,7 +7400,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
- memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(priv->assoc_request.dest);
priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
} else {
memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
@@ -8986,7 +8986,7 @@ static int ipw_wx_get_wap(struct net_device *dev,
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
} else
- memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+ eth_zero_addr(wrqu->ap_addr.sa_data);
IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
wrqu->ap_addr.sa_data);
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index eaaeea19d8c5..bac60b2bc3f0 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -1678,7 +1678,7 @@ il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
lq_sta->total_success > lq_sta->max_success_limit ||
(!lq_sta->search_better_tbl && lq_sta->flush_timer &&
flush_interval_passed)) {
- D_RATE("LQ: stay is expired %d %d %d\n:",
+ D_RATE("LQ: stay is expired %d %d %d\n",
lq_sta->total_failed, lq_sta->total_success,
flush_interval_passed);
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 2c4fa49686ef..887114582583 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4634,7 +4634,7 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
il->vif = NULL;
il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
il_teardown_interface(il, vif);
- memset(il->bssid, 0, ETH_ALEN);
+ eth_zero_addr(il->bssid);
D_MAC80211("leave\n");
mutex_unlock(&il->mutex);
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index cceb026e0793..5abd62ed8cb4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1130,20 +1130,23 @@ done:
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event)
+static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ if (event->type != RSSI_EVENT)
+ return;
+
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->mutex);
if (priv->lib->bt_params &&
priv->lib->bt_params->advanced_bt_coexist) {
- if (rssi_event == RSSI_EVENT_LOW)
+ if (event->u.rssi.data == RSSI_EVENT_LOW)
priv->bt_enable_pspoll = true;
- else if (rssi_event == RSSI_EVENT_HIGH)
+ else if (event->u.rssi.data == RSSI_EVENT_HIGH)
priv->bt_enable_pspoll = false;
iwlagn_send_advance_bt_config(priv);
@@ -1614,7 +1617,7 @@ const struct ieee80211_ops iwlagn_hw_ops = {
.channel_switch = iwlagn_mac_channel_switch,
.flush = iwlagn_mac_flush,
.tx_last_beacon = iwlagn_mac_tx_last_beacon,
- .rssi_callback = iwlagn_mac_rssi_callback,
+ .event_callback = iwlagn_mac_event_callback,
.set_tim = iwlagn_mac_set_tim,
};
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index c4d6dd7402d9..234e30f498b2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -1549,7 +1549,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
table.blink1, table.blink2, table.ilink1,
table.ilink2, table.bcon_time, table.gp1,
table.gp2, table.gp3, table.ucode_ver,
- table.hw_ver, table.brd_ver);
+ table.hw_ver, 0, table.brd_ver);
IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 32b78a66536d..3bd7c86e90d9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -3153,12 +3153,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
- desc += sprintf(buff+desc, " %s",
+ desc += sprintf(buff + desc, " %s",
(is_siso(tbl->lq_type)) ? "SISO" :
((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
- desc += sprintf(buff+desc, " %s",
+ desc += sprintf(buff + desc, " %s",
(tbl->is_ht40) ? "40MHz" : "20MHz");
- desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
+ desc += sprintf(buff + desc, " %s %s %s\n",
+ (tbl->is_SGI) ? "SGI" : "",
(lq_sta->is_green) ? "GF enabled" : "",
(lq_sta->is_agg) ? "AGG on" : "");
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 1e40a12de077..275df12a6045 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -189,9 +189,9 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_flags |= RATE_MCS_CCK_MSK;
/* Set up antennas */
- if (priv->lib->bt_params &&
- priv->lib->bt_params->advanced_bt_coexist &&
- priv->bt_full_concurrent) {
+ if (priv->lib->bt_params &&
+ priv->lib->bt_params->advanced_bt_coexist &&
+ priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
first_antenna(priv->nvm_data->valid_tx_ant));
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 97e38d2e2983..36e786f0387b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,16 +69,16 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 12
-#define IWL3160_UCODE_API_MAX 12
+#define IWL7260_UCODE_API_MAX 13
+#define IWL3160_UCODE_API_MAX 13
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 10
-#define IWL3160_UCODE_API_OK 10
+#define IWL7260_UCODE_API_OK 12
+#define IWL3160_UCODE_API_OK 12
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 9
-#define IWL3160_UCODE_API_MIN 9
+#define IWL7260_UCODE_API_MIN 10
+#define IWL3160_UCODE_API_MIN 10
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 2f7fe8167dc9..ce6321b7d241 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,13 +69,13 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 12
+#define IWL8000_UCODE_API_MAX 13
/* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK 10
+#define IWL8000_UCODE_API_OK 12
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 9
+#define IWL8000_UCODE_API_MIN 10
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
@@ -94,8 +94,8 @@
IWL8000_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
-#define DEFAULT_NVM_FILE_FAMILY_8000A "iwl_nvm_8000.bin"
-#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000B.bin"
+#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
+#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
/* Max SDIO RX aggregation size of the ADDBA request/response */
#define MAX_RX_AGG_SIZE_8260_SDIO 28
@@ -177,8 +177,8 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
- .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
- .default_nvm_file_8000A = DEFAULT_NVM_FILE_FAMILY_8000A,
+ .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
+ .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -192,8 +192,8 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
- .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
- .default_nvm_file_8000A = DEFAULT_NVM_FILE_FAMILY_8000A,
+ .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
+ .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 4b190d98a1ec..3f33f753ce2f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -92,9 +92,9 @@ static inline bool iwl_has_secure_boot(u32 hw_rev,
{
/* return 1 only for family 8000 B0 */
if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
- return 1;
+ return true;
- return 0;
+ return false;
}
/*
@@ -228,7 +228,7 @@ struct iwl_pwr_tx_backoff {
/**
* struct iwl_cfg
- * @name: Offical name of the device
+ * @name: Official name of the device
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
@@ -303,8 +303,8 @@ struct iwl_cfg {
bool lp_xtal_workaround;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
bool no_power_up_nic_in_init;
- const char *default_nvm_file;
- const char *default_nvm_file_8000A;
+ const char *default_nvm_file_B_step;
+ const char *default_nvm_file_C_step;
unsigned int max_rx_agg_size;
bool disable_dummy_notification;
unsigned int max_tx_agg_size;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 684254553558..9bb36d79c2bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -157,6 +157,7 @@ do { \
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC 0x00001000
#define IWL_DL_DROP 0x00002000
+#define IWL_DL_LAR 0x00004000
#define IWL_DL_COEX 0x00008000
/* 0x000F0000 - 0x00010000 */
#define IWL_DL_FW 0x00010000
@@ -219,5 +220,6 @@ do { \
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
+#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
new file mode 100644
index 000000000000..04e6649340b8
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
@@ -0,0 +1,79 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_DATA
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_data
+
+TRACE_EVENT(iwlwifi_dev_tx_data,
+ TP_PROTO(const struct device *dev,
+ struct sk_buff *skb,
+ void *data, size_t data_len),
+ TP_ARGS(dev, skb, data, data_len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ if (iwl_trace_data(skb))
+ memcpy(__get_dynamic_array(data), data, data_len);
+ ),
+ TP_printk("[%s] TX frame data", __get_str(dev))
+);
+
+TRACE_EVENT(iwlwifi_dev_rx_data,
+ TP_PROTO(const struct device *dev,
+ const struct iwl_trans *trans,
+ void *rxbuf, size_t len),
+ TP_ARGS(dev, trans, rxbuf, len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __dynamic_array(u8, data,
+ len - iwl_rx_trace_len(trans, rxbuf, len))
+ ),
+ TP_fast_assign(
+ size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
+ DEV_ASSIGN;
+ if (offs < len)
+ memcpy(__get_dynamic_array(data),
+ ((u8 *)rxbuf) + offs, len - offs);
+ ),
+ TP_printk("[%s] RX frame data", __get_str(dev))
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_DATA */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-data
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-io.h
new file mode 100644
index 000000000000..f62c54485852
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-io.h
@@ -0,0 +1,155 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_IO) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_IO
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_io
+
+TRACE_EVENT(iwlwifi_dev_ioread32,
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] read io[%#x] = %#x",
+ __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite8,
+ TP_PROTO(const struct device *dev, u32 offs, u8 val),
+ TP_ARGS(dev, offs, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, offs)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] write io[%#x] = %#x)",
+ __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite32,
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] write io[%#x] = %#x)",
+ __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] write PRPH[%#x] = %#x)",
+ __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_ioread_prph32,
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] read PRPH[%#x] = %#x",
+ __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_irq,
+ TP_PROTO(const struct device *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ ),
+ /* TP_printk("") doesn't compile */
+ TP_printk("%d", 0)
+);
+
+TRACE_EVENT(iwlwifi_dev_ict_read,
+ TP_PROTO(const struct device *dev, u32 index, u32 value),
+ TP_ARGS(dev, index, value),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, index)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->index = index;
+ __entry->value = value;
+ ),
+ TP_printk("[%s] read ict[%d] = %#.8x",
+ __get_str(dev), __entry->index, __entry->value)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_IO */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-io
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
new file mode 100644
index 000000000000..223b8752f924
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -0,0 +1,202 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_IWLWIFI
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi
+
+TRACE_EVENT(iwlwifi_dev_hcmd,
+ TP_PROTO(const struct device *dev,
+ struct iwl_host_cmd *cmd, u16 total_size,
+ struct iwl_cmd_header *hdr),
+ TP_ARGS(dev, cmd, total_size, hdr),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __dynamic_array(u8, hcmd, total_size)
+ __field(u32, flags)
+ ),
+ TP_fast_assign(
+ int i, offset = sizeof(*hdr);
+
+ DEV_ASSIGN;
+ __entry->flags = cmd->flags;
+ memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
+
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ if (!cmd->len[i])
+ continue;
+ memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
+ cmd->data[i], cmd->len[i]);
+ offset += cmd->len[i];
+ }
+ ),
+ TP_printk("[%s] hcmd %#.2x (%ssync)",
+ __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
+ __entry->flags & CMD_ASYNC ? "a" : "")
+);
+
+TRACE_EVENT(iwlwifi_dev_rx,
+ TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
+ void *rxbuf, size_t len),
+ TP_ARGS(dev, trans, rxbuf, len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ memcpy(__get_dynamic_array(rxbuf), rxbuf,
+ iwl_rx_trace_len(trans, rxbuf, len));
+ ),
+ TP_printk("[%s] RX cmd %#.2x",
+ __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
+);
+
+TRACE_EVENT(iwlwifi_dev_tx,
+ TP_PROTO(const struct device *dev, struct sk_buff *skb,
+ void *tfd, size_t tfdlen,
+ void *buf0, size_t buf0_len,
+ void *buf1, size_t buf1_len),
+ TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __field(size_t, framelen)
+ __dynamic_array(u8, tfd, tfdlen)
+
+ /*
+ * Do not insert between or below these items,
+ * we want to keep the frame together (except
+ * for the possible padding).
+ */
+ __dynamic_array(u8, buf0, buf0_len)
+ __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->framelen = buf0_len + buf1_len;
+ memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
+ memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
+ if (!iwl_trace_data(skb))
+ memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+ ),
+ TP_printk("[%s] TX %.2x (%zu bytes)",
+ __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
+ __entry->framelen)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_error,
+ TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
+ u32 data1, u32 data2, u32 line, u32 blink1,
+ u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
+ u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver,
+ u32 brd_ver),
+ TP_ARGS(dev, desc, tsf_low, data1, data2, line,
+ blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
+ gp3, major, minor, hw_ver, brd_ver),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, desc)
+ __field(u32, tsf_low)
+ __field(u32, data1)
+ __field(u32, data2)
+ __field(u32, line)
+ __field(u32, blink1)
+ __field(u32, blink2)
+ __field(u32, ilink1)
+ __field(u32, ilink2)
+ __field(u32, bcon_time)
+ __field(u32, gp1)
+ __field(u32, gp2)
+ __field(u32, gp3)
+ __field(u32, major)
+ __field(u32, minor)
+ __field(u32, hw_ver)
+ __field(u32, brd_ver)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->desc = desc;
+ __entry->tsf_low = tsf_low;
+ __entry->data1 = data1;
+ __entry->data2 = data2;
+ __entry->line = line;
+ __entry->blink1 = blink1;
+ __entry->blink2 = blink2;
+ __entry->ilink1 = ilink1;
+ __entry->ilink2 = ilink2;
+ __entry->bcon_time = bcon_time;
+ __entry->gp1 = gp1;
+ __entry->gp2 = gp2;
+ __entry->gp3 = gp3;
+ __entry->major = major;
+ __entry->minor = minor;
+ __entry->hw_ver = hw_ver;
+ __entry->brd_ver = brd_ver;
+ ),
+ TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
+ "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
+ "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X "
+ "minor 0x%08X hw 0x%08X brd 0x%08X",
+ __get_str(dev), __entry->desc, __entry->tsf_low,
+ __entry->data1,
+ __entry->data2, __entry->line, __entry->blink1,
+ __entry->blink2, __entry->ilink1, __entry->ilink2,
+ __entry->bcon_time, __entry->gp1, __entry->gp2,
+ __entry->gp3, __entry->major, __entry->minor,
+ __entry->hw_ver, __entry->brd_ver)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_event,
+ TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
+ TP_ARGS(dev, time, data, ev),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
+ __get_str(dev), __entry->time, __entry->data, __entry->ev)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-iwlwifi
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-msg.h
new file mode 100644
index 000000000000..a3b3c2465f89
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-msg.h
@@ -0,0 +1,97 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_MSG) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_MSG
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_msg
+
+#define MAX_MSG_LEN 110
+
+DECLARE_EVENT_CLASS(iwlwifi_msg_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_warn,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+TRACE_EVENT(iwlwifi_dbg,
+ TP_PROTO(u32 level, bool in_interrupt, const char *function,
+ struct va_format *vaf),
+ TP_ARGS(level, in_interrupt, function, vaf),
+ TP_STRUCT__entry(
+ __field(u32, level)
+ __field(u8, in_interrupt)
+ __string(function, function)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ __entry->level = level;
+ __entry->in_interrupt = in_interrupt;
+ __assign_str(function, function);
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_MSG */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-msg
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-ucode.h
new file mode 100644
index 000000000000..10839fae9cd9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-ucode.h
@@ -0,0 +1,81 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_UCODE) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_UCODE
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_ucode
+
+TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
+ TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
+ TP_ARGS(dev, time, data, ev),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
+ __get_str(dev), __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
+ TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_ARGS(dev, wraps, n_entry, p_entry),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __field(u32, wraps)
+ __field(u32, n_entry)
+ __field(u32, p_entry)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->wraps = wraps;
+ __entry->n_entry = n_entry;
+ __entry->p_entry = p_entry;
+ ),
+ TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X",
+ __get_str(dev), __entry->wraps, __entry->n_entry,
+ __entry->p_entry)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_UCODE */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-ucode
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 78bd41bf34b0..b87acd6a229b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -24,7 +24,7 @@
*
*****************************************************************************/
-#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#ifndef __IWLWIFI_DEVICE_TRACE
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
@@ -80,436 +80,10 @@ static inline void trace_ ## name(proto) {}
#define DEV_ENTRY __string(dev, dev_name(dev))
#define DEV_ASSIGN __assign_str(dev, dev_name(dev))
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_io
+#include "iwl-devtrace-io.h"
+#include "iwl-devtrace-ucode.h"
+#include "iwl-devtrace-msg.h"
+#include "iwl-devtrace-data.h"
+#include "iwl-devtrace-iwlwifi.h"
-TRACE_EVENT(iwlwifi_dev_ioread32,
- TP_PROTO(const struct device *dev, u32 offs, u32 val),
- TP_ARGS(dev, offs, val),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%s] read io[%#x] = %#x",
- __get_str(dev), __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_dev_iowrite8,
- TP_PROTO(const struct device *dev, u32 offs, u8 val),
- TP_ARGS(dev, offs, val),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, offs)
- __field(u8, val)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%s] write io[%#x] = %#x)",
- __get_str(dev), __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_dev_iowrite32,
- TP_PROTO(const struct device *dev, u32 offs, u32 val),
- TP_ARGS(dev, offs, val),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%s] write io[%#x] = %#x)",
- __get_str(dev), __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
- TP_PROTO(const struct device *dev, u32 offs, u32 val),
- TP_ARGS(dev, offs, val),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%s] write PRPH[%#x] = %#x)",
- __get_str(dev), __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_dev_ioread_prph32,
- TP_PROTO(const struct device *dev, u32 offs, u32 val),
- TP_ARGS(dev, offs, val),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%s] read PRPH[%#x] = %#x",
- __get_str(dev), __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_dev_irq,
- TP_PROTO(const struct device *dev),
- TP_ARGS(dev),
- TP_STRUCT__entry(
- DEV_ENTRY
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- ),
- /* TP_printk("") doesn't compile */
- TP_printk("%d", 0)
-);
-
-TRACE_EVENT(iwlwifi_dev_ict_read,
- TP_PROTO(const struct device *dev, u32 index, u32 value),
- TP_ARGS(dev, index, value),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, index)
- __field(u32, value)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->index = index;
- __entry->value = value;
- ),
- TP_printk("[%s] read ict[%d] = %#.8x",
- __get_str(dev), __entry->index, __entry->value)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_ucode
-
-TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
- TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
- TP_ARGS(dev, time, data, ev),
- TP_STRUCT__entry(
- DEV_ENTRY
-
- __field(u32, time)
- __field(u32, data)
- __field(u32, ev)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->time = time;
- __entry->data = data;
- __entry->ev = ev;
- ),
- TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
- __get_str(dev), __entry->time, __entry->data, __entry->ev)
-);
-
-TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
- TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry),
- TP_ARGS(dev, wraps, n_entry, p_entry),
- TP_STRUCT__entry(
- DEV_ENTRY
-
- __field(u32, wraps)
- __field(u32, n_entry)
- __field(u32, p_entry)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->wraps = wraps;
- __entry->n_entry = n_entry;
- __entry->p_entry = p_entry;
- ),
- TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X",
- __get_str(dev), __entry->wraps, __entry->n_entry,
- __entry->p_entry)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_msg
-
-#define MAX_MSG_LEN 110
-
-DECLARE_EVENT_CLASS(iwlwifi_msg_event,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf),
- TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
- ),
- TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
- ),
- TP_printk("%s", __get_str(msg))
-);
-
-DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_warn,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_info,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-TRACE_EVENT(iwlwifi_dbg,
- TP_PROTO(u32 level, bool in_interrupt, const char *function,
- struct va_format *vaf),
- TP_ARGS(level, in_interrupt, function, vaf),
- TP_STRUCT__entry(
- __field(u32, level)
- __field(u8, in_interrupt)
- __string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
- ),
- TP_fast_assign(
- __entry->level = level;
- __entry->in_interrupt = in_interrupt;
- __assign_str(function, function);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
- ),
- TP_printk("%s", __get_str(msg))
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_data
-
-TRACE_EVENT(iwlwifi_dev_tx_data,
- TP_PROTO(const struct device *dev,
- struct sk_buff *skb,
- void *data, size_t data_len),
- TP_ARGS(dev, skb, data, data_len),
- TP_STRUCT__entry(
- DEV_ENTRY
-
- __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- if (iwl_trace_data(skb))
- memcpy(__get_dynamic_array(data), data, data_len);
- ),
- TP_printk("[%s] TX frame data", __get_str(dev))
-);
-
-TRACE_EVENT(iwlwifi_dev_rx_data,
- TP_PROTO(const struct device *dev,
- const struct iwl_trans *trans,
- void *rxbuf, size_t len),
- TP_ARGS(dev, trans, rxbuf, len),
- TP_STRUCT__entry(
- DEV_ENTRY
-
- __dynamic_array(u8, data,
- len - iwl_rx_trace_len(trans, rxbuf, len))
- ),
- TP_fast_assign(
- size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
- DEV_ASSIGN;
- if (offs < len)
- memcpy(__get_dynamic_array(data),
- ((u8 *)rxbuf) + offs, len - offs);
- ),
- TP_printk("[%s] RX frame data", __get_str(dev))
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi
-
-TRACE_EVENT(iwlwifi_dev_hcmd,
- TP_PROTO(const struct device *dev,
- struct iwl_host_cmd *cmd, u16 total_size,
- struct iwl_cmd_header *hdr),
- TP_ARGS(dev, cmd, total_size, hdr),
- TP_STRUCT__entry(
- DEV_ENTRY
- __dynamic_array(u8, hcmd, total_size)
- __field(u32, flags)
- ),
- TP_fast_assign(
- int i, offset = sizeof(*hdr);
-
- DEV_ASSIGN;
- __entry->flags = cmd->flags;
- memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
-
- for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- if (!cmd->len[i])
- continue;
- memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
- cmd->data[i], cmd->len[i]);
- offset += cmd->len[i];
- }
- ),
- TP_printk("[%s] hcmd %#.2x (%ssync)",
- __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
- __entry->flags & CMD_ASYNC ? "a" : "")
-);
-
-TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
- void *rxbuf, size_t len),
- TP_ARGS(dev, trans, rxbuf, len),
- TP_STRUCT__entry(
- DEV_ENTRY
- __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf,
- iwl_rx_trace_len(trans, rxbuf, len));
- ),
- TP_printk("[%s] RX cmd %#.2x",
- __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
-);
-
-TRACE_EVENT(iwlwifi_dev_tx,
- TP_PROTO(const struct device *dev, struct sk_buff *skb,
- void *tfd, size_t tfdlen,
- void *buf0, size_t buf0_len,
- void *buf1, size_t buf1_len),
- TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
- TP_STRUCT__entry(
- DEV_ENTRY
-
- __field(size_t, framelen)
- __dynamic_array(u8, tfd, tfdlen)
-
- /*
- * Do not insert between or below these items,
- * we want to keep the frame together (except
- * for the possible padding).
- */
- __dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->framelen = buf0_len + buf1_len;
- memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
- memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- if (!iwl_trace_data(skb))
- memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
- ),
- TP_printk("[%s] TX %.2x (%zu bytes)",
- __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
- __entry->framelen)
-);
-
-TRACE_EVENT(iwlwifi_dev_ucode_error,
- TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
- u32 data1, u32 data2, u32 line, u32 blink1,
- u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
- u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
- u32 brd_ver),
- TP_ARGS(dev, desc, tsf_low, data1, data2, line,
- blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
- gp3, ucode_ver, hw_ver, brd_ver),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, desc)
- __field(u32, tsf_low)
- __field(u32, data1)
- __field(u32, data2)
- __field(u32, line)
- __field(u32, blink1)
- __field(u32, blink2)
- __field(u32, ilink1)
- __field(u32, ilink2)
- __field(u32, bcon_time)
- __field(u32, gp1)
- __field(u32, gp2)
- __field(u32, gp3)
- __field(u32, ucode_ver)
- __field(u32, hw_ver)
- __field(u32, brd_ver)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->desc = desc;
- __entry->tsf_low = tsf_low;
- __entry->data1 = data1;
- __entry->data2 = data2;
- __entry->line = line;
- __entry->blink1 = blink1;
- __entry->blink2 = blink2;
- __entry->ilink1 = ilink1;
- __entry->ilink2 = ilink2;
- __entry->bcon_time = bcon_time;
- __entry->gp1 = gp1;
- __entry->gp2 = gp2;
- __entry->gp3 = gp3;
- __entry->ucode_ver = ucode_ver;
- __entry->hw_ver = hw_ver;
- __entry->brd_ver = brd_ver;
- ),
- TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
- "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
- "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X "
- "hw 0x%08X brd 0x%08X",
- __get_str(dev), __entry->desc, __entry->tsf_low,
- __entry->data1,
- __entry->data2, __entry->line, __entry->blink1,
- __entry->blink2, __entry->ilink1, __entry->ilink2,
- __entry->bcon_time, __entry->gp1, __entry->gp2,
- __entry->gp3, __entry->ucode_ver, __entry->hw_ver,
- __entry->brd_ver)
-);
-
-TRACE_EVENT(iwlwifi_dev_ucode_event,
- TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
- TP_ARGS(dev, time, data, ev),
- TP_STRUCT__entry(
- DEV_ENTRY
-
- __field(u32, time)
- __field(u32, data)
- __field(u32, ev)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->time = time;
- __entry->data = data;
- __entry->ev = ev;
- ),
- TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
- __get_str(dev), __entry->time, __entry->data, __entry->ev)
-);
#endif /* __IWLWIFI_DEVICE_TRACE */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE iwl-devtrace
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index c7154ac42c8c..7267152e7dc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -145,7 +145,7 @@ static struct iwlwifi_opmode_table {
#define IWL_DEFAULT_SCAN_CHANNELS 40
/*
- * struct fw_sec: Just for the image parsing proccess.
+ * struct fw_sec: Just for the image parsing process.
* For the fw storage we are using struct fw_desc.
*/
struct fw_sec {
@@ -175,6 +175,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg_dest_tlv);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
kfree(drv->fw.dbg_conf_tlv[i]);
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
+ kfree(drv->fw.dbg_trigger_tlv[i]);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -239,16 +241,10 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
* previous name and uses the new format.
*/
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
- char rev_step[2] = {
- 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev), 0
- };
-
- /* A-step doesn't have an indication */
- if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP)
- rev_step[0] = 0;
+ char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
snprintf(drv->firmware_name, sizeof(drv->firmware_name),
- "%s%s-%s.ucode", name_pre, rev_step, tag);
+ "%s%c-%s.ucode", name_pre, rev_step, tag);
}
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
@@ -293,8 +289,10 @@ struct iwl_firmware_pieces {
/* FW debug data parsed for driver usage */
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
- struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
- size_t dbg_conf_tlv_len[FW_DBG_MAX];
+ struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+ size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
+ struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+ size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
};
/*
@@ -842,6 +840,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->n_scan_channels =
le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_FW_VERSION: {
+ __le32 *ptr = (void *)tlv_data;
+ u32 major, minor;
+ u8 local_comp;
+
+ if (tlv_len != sizeof(u32) * 3)
+ goto invalid_tlv_len;
+
+ major = le32_to_cpup(ptr++);
+ minor = le32_to_cpup(ptr++);
+ local_comp = le32_to_cpup(ptr);
+
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version), "%u.%u.%u",
+ major, minor, local_comp);
+ break;
+ }
case IWL_UCODE_TLV_FW_DBG_DEST: {
struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
@@ -897,6 +912,31 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
break;
}
+ case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
+ struct iwl_fw_dbg_trigger_tlv *trigger =
+ (void *)tlv_data;
+ u32 trigger_id = le32_to_cpu(trigger->id);
+
+ if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
+ IWL_ERR(drv,
+ "Skip unknown trigger: %u\n",
+ trigger->id);
+ break;
+ }
+
+ if (pieces->dbg_trigger_tlv[trigger_id]) {
+ IWL_ERR(drv,
+ "Ignore duplicate dbg trigger %u\n",
+ trigger->id);
+ break;
+ }
+
+ IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id);
+
+ pieces->dbg_trigger_tlv[trigger_id] = trigger;
+ pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
+ break;
+ }
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
usniffer_images = true;
iwl_store_ucode_sec(pieces, tlv_data,
@@ -968,34 +1008,34 @@ static int validate_sec_sizes(struct iwl_drv *drv,
/* Verify that uCode images will fit in card's SRAM. */
if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
- cfg->max_inst_size) {
+ cfg->max_inst_size) {
IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
get_sec_size(pieces, IWL_UCODE_REGULAR,
- IWL_UCODE_SECTION_INST));
+ IWL_UCODE_SECTION_INST));
return -1;
}
if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
- cfg->max_data_size) {
+ cfg->max_data_size) {
IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
get_sec_size(pieces, IWL_UCODE_REGULAR,
- IWL_UCODE_SECTION_DATA));
+ IWL_UCODE_SECTION_DATA));
return -1;
}
- if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
- cfg->max_inst_size) {
+ if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
+ cfg->max_inst_size) {
IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n",
get_sec_size(pieces, IWL_UCODE_INIT,
- IWL_UCODE_SECTION_INST));
+ IWL_UCODE_SECTION_INST));
return -1;
}
if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
- cfg->max_data_size) {
+ cfg->max_data_size) {
IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n",
get_sec_size(pieces, IWL_UCODE_REGULAR,
- IWL_UCODE_SECTION_DATA));
+ IWL_UCODE_SECTION_DATA));
return -1;
}
return 0;
@@ -1062,6 +1102,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
const unsigned int api_max = drv->cfg->ucode_api_max;
unsigned int api_ok = drv->cfg->ucode_api_ok;
const unsigned int api_min = drv->cfg->ucode_api_min;
+ size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
int i;
bool load_module = false;
@@ -1107,7 +1148,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (err)
goto try_again;
- api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+ if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
+ api_ver = drv->fw.ucode_ver;
+ else
+ api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
/*
* api_ver should match the api version forming part of the
@@ -1178,6 +1222,48 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
}
+ memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz));
+
+ trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] =
+ sizeof(struct iwl_fw_dbg_trigger_missed_bcon);
+ trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0;
+ trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] =
+ sizeof(struct iwl_fw_dbg_trigger_cmd);
+ trigger_tlv_sz[FW_DBG_TRIGGER_MLME] =
+ sizeof(struct iwl_fw_dbg_trigger_mlme);
+ trigger_tlv_sz[FW_DBG_TRIGGER_STATS] =
+ sizeof(struct iwl_fw_dbg_trigger_stats);
+ trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] =
+ sizeof(struct iwl_fw_dbg_trigger_low_rssi);
+ trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] =
+ sizeof(struct iwl_fw_dbg_trigger_txq_timer);
+ trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
+ sizeof(struct iwl_fw_dbg_trigger_time_event);
+
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
+ if (pieces->dbg_trigger_tlv[i]) {
+ /*
+ * If the trigger isn't long enough, WARN and exit.
+ * Someone is trying to debug something and he won't
+ * be able to catch the bug he is trying to chase.
+ * We'd better be noisy to be sure he knows what's
+ * going on.
+ */
+ if (WARN_ON(pieces->dbg_trigger_tlv_len[i] <
+ (trigger_tlv_sz[i] +
+ sizeof(struct iwl_fw_dbg_trigger_tlv))))
+ goto out_free_fw;
+ drv->fw.dbg_trigger_tlv_len[i] =
+ pieces->dbg_trigger_tlv_len[i];
+ drv->fw.dbg_trigger_tlv[i] =
+ kmemdup(pieces->dbg_trigger_tlv[i],
+ drv->fw.dbg_trigger_tlv_len[i],
+ GFP_KERNEL);
+ if (!drv->fw.dbg_trigger_tlv[i])
+ goto out_free_fw;
+ }
+ }
+
/* Now that we can no longer fail, copy information */
/*
@@ -1485,6 +1571,10 @@ module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable,
bool, S_IRUGO);
MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
+module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
+
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
bool, S_IRUGO | S_IWUSR);
#ifdef CONFIG_IWLWIFI_UAPSD
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index adf522c756e6..cda746b33db1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -68,7 +68,7 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
-#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
/* radio config bits (actual values from NVM definition) */
@@ -123,7 +123,7 @@ struct iwl_cfg;
* starts the driver: fetches the firmware. This should be called by bus
* specific system flows implementations. For example, the bus specific probe
* function should do bus related operations only, and then call to this
- * function. It returns the driver object or %NULL if an error occured.
+ * function. It returns the driver object or %NULL if an error occurred.
*/
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
const struct iwl_cfg *cfg);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index f0548b8a64b0..5234a0bf11e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -94,6 +94,7 @@ struct iwl_nvm_data {
u32 nvm_version;
s8 max_tx_pwr_half_dbm;
+ bool lar_enabled;
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels[];
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index 25d0105741db..219ca8acca62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -248,7 +248,7 @@ static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
/* stop in this case */
- /* set the uncorrectable OTP ECC bit for acknowledgement */
+ /* set the uncorrectable OTP ECC bit for acknowledgment */
iwl_set_bit(trans, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
@@ -256,7 +256,7 @@ static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
}
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
/* continue in this case */
- /* set the correctable OTP ECC bit for acknowledgement */
+ /* set the correctable OTP ECC bit for acknowledgment */
iwl_set_bit(trans, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 1f7f15eb86da..d45dc021cda2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -445,7 +445,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RX_LOW_WATERMARK 8
/**
- * struct iwl_rb_status - reseve buffer status
+ * struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
* @closed_rb_num [0:11] - Indicates the index of the RB which was closed
* @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 919a2548a92c..251bf8dc4a12 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -82,6 +82,8 @@
* sections like this in a single file.
* @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory
+ * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
+ * Structured as &struct iwl_fw_error_dump_trigger_desc.
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@@ -94,6 +96,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_TXF = 7,
IWL_FW_ERROR_DUMP_FH_REGS = 8,
IWL_FW_ERROR_DUMP_MEM = 9,
+ IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
IWL_FW_ERROR_DUMP_MAX,
};
@@ -180,7 +183,7 @@ struct iwl_fw_error_dump_info {
* struct iwl_fw_error_dump_fw_mon - FW monitor data
* @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
* @fw_mon_base_ptr: base pointer of the data
- * @fw_mon_cycle_cnt: number of wrap arounds
+ * @fw_mon_cycle_cnt: number of wraparounds
* @reserved: for future use
* @data: captured data
*/
@@ -230,4 +233,53 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
return (void *)(data->data + le32_to_cpu(data->len));
}
+/**
+ * enum iwl_fw_dbg_trigger - triggers available
+ *
+ * @FW_DBG_TRIGGER_USER: trigger log collection by user
+ * This should not be defined as a trigger to the driver, but a value the
+ * driver should set to indicate that the trigger was initiated by the
+ * user.
+ * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
+ * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
+ * missed.
+ * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
+ * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
+ * command response or a notification.
+ * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event.
+ * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
+ * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
+ * goes below a threshold.
+ * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang
+ * detection.
+ * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
+ * events.
+ */
+enum iwl_fw_dbg_trigger {
+ FW_DBG_TRIGGER_INVALID = 0,
+ FW_DBG_TRIGGER_USER,
+ FW_DBG_TRIGGER_FW_ASSERT,
+ FW_DBG_TRIGGER_MISSED_BEACONS,
+ FW_DBG_TRIGGER_CHANNEL_SWITCH,
+ FW_DBG_TRIGGER_FW_NOTIF,
+ FW_DBG_TRIGGER_MLME,
+ FW_DBG_TRIGGER_STATS,
+ FW_DBG_TRIGGER_RSSI,
+ FW_DBG_TRIGGER_TXQ_TIMERS,
+ FW_DBG_TRIGGER_TIME_EVENT,
+
+ /* must be last */
+ FW_DBG_TRIGGER_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition
+ * @type: %enum iwl_fw_dbg_trigger
+ * @data: raw data about what happened
+ */
+struct iwl_fw_error_dump_trigger_desc {
+ __le32 type;
+ u8 data[];
+};
+
#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 016d91384681..62db2e5e45eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -66,6 +66,7 @@
#define __iwl_fw_file_h__
#include <linux/netdevice.h>
+#include <linux/nl80211.h>
/* v1/v2 uCode file layout */
struct iwl_ucode_header {
@@ -133,8 +134,10 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
+ IWL_UCODE_TLV_FW_VERSION = 36,
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
+ IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
};
struct iwl_ucode_tlv {
@@ -156,7 +159,8 @@ struct iwl_tlv_ucode_header {
__le32 zero;
__le32 magic;
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
- __le32 ver; /* major/minor/API/serial */
+ /* major/minor/API/serial or major in new format */
+ __le32 ver;
__le32 build;
__le64 ignore;
/*
@@ -187,7 +191,7 @@ struct iwl_ucode_capa {
* enum iwl_ucode_tlv_flag - ucode API flags
* @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
* was a separate TLV but moved here to save space.
- * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
+ * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
@@ -236,12 +240,11 @@ enum iwl_ucode_tlv_flag {
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
- * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
- * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
- * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
* IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
+ * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
* @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
* regardless of the band or the number of the probes. FW will calculate
* the actual dwell time.
@@ -250,19 +253,22 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
* @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
+ * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
+ * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
- IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
- IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
- IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
+ IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9),
IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
+ IWL_UCODE_TLV_API_TX_POWER_DEV = BIT(11),
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18),
+ IWL_UCODE_TLV_API_STATS_V10 = BIT(19),
+ IWL_UCODE_TLV_API_NEW_VERSION = BIT(20),
};
/**
@@ -284,6 +290,13 @@ enum iwl_ucode_tlv_api {
* which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
+ * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
+ * sources for the MCC. This TLV bit is a future replacement to
+ * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
+ * is supported.
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
@@ -298,6 +311,10 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22),
+ IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28),
+ IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = BIT(29),
+ IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30),
};
/* The default calibrate table size if not specified by firmware file */
@@ -450,44 +467,207 @@ struct iwl_fw_dbg_conf_hcmd {
} __packed;
/**
- * struct iwl_fw_dbg_trigger - a TLV that describes a debug configuration
+ * enum iwl_fw_dbg_trigger_mode - triggers functionalities
*
- * @enabled: is this trigger enabled
- * @reserved:
- * @len: length, in bytes, of the %trigger field
- * @trigger: pointer to a trigger struct
+ * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
+ * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
*/
-struct iwl_fw_dbg_trigger {
- u8 enabled;
- u8 reserved;
- u8 len;
- u8 trigger[0];
+enum iwl_fw_dbg_trigger_mode {
+ IWL_FW_DBG_TRIGGER_START = BIT(0),
+ IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+};
+
+/**
+ * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger
+ * @IWL_FW_DBG_CONF_VIF_ANY: any vif type
+ * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode
+ * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode
+ * @IWL_FW_DBG_CONF_VIF_AP: AP mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
+ */
+enum iwl_fw_dbg_trigger_vif_type {
+ IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED,
+ IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC,
+ IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION,
+ IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP,
+ IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT,
+ IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO,
+ IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE,
+};
+
+/**
+ * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger
+ * @id: %enum iwl_fw_dbg_trigger
+ * @vif_type: %enum iwl_fw_dbg_trigger_vif_type
+ * @stop_conf_ids: bitmap of configurations this trigger relates to.
+ * if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding
+ * to the currently running configuration is set, the data should be
+ * collected.
+ * @stop_delay: how many milliseconds to wait before collecting the data
+ * after the STOP trigger fires.
+ * @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both
+ * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what
+ * configuration should be applied when the triggers kicks in.
+ * @occurrences: number of occurrences. 0 means the trigger will never fire.
+ */
+struct iwl_fw_dbg_trigger_tlv {
+ __le32 id;
+ __le32 vif_type;
+ __le32 stop_conf_ids;
+ __le32 stop_delay;
+ u8 mode;
+ u8 start_conf_id;
+ __le16 occurrences;
+ __le32 reserved[2];
+
+ u8 data[0];
} __packed;
+#define FW_DBG_START_FROM_ALIVE 0
+#define FW_DBG_CONF_MAX 32
+#define FW_DBG_INVALID 0xff
+
/**
- * enum iwl_fw_dbg_conf - configurations available
- *
- * @FW_DBG_CUSTOM: take this configuration from alive
- * Note that the trigger is NO-OP for this configuration
+ * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
+ * @stop_consec_missed_bcon: stop recording if threshold is crossed.
+ * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
+ * @start_consec_missed_bcon: start recording if threshold is crossed.
+ * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
+ * @reserved1: reserved
+ * @reserved2: reserved
*/
-enum iwl_fw_dbg_conf {
- FW_DBG_CUSTOM = 0,
+struct iwl_fw_dbg_trigger_missed_bcon {
+ __le32 stop_consec_missed_bcon;
+ __le32 stop_consec_missed_bcon_since_rx;
+ __le32 reserved2[2];
+ __le32 start_consec_missed_bcon;
+ __le32 start_consec_missed_bcon_since_rx;
+ __le32 reserved1[2];
+} __packed;
- /* must be last */
- FW_DBG_MAX,
- FW_DBG_INVALID = 0xff,
-};
+/**
+ * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
+ * cmds: the list of commands to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_cmd {
+ struct cmd {
+ u8 cmd_id;
+ u8 group_id;
+ } __packed cmds[16];
+} __packed;
+
+/**
+ * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * @stop_offset: the offset of the value to be monitored
+ * @stop_threshold: the threshold above which to collect
+ * @start_offset: the offset of the value to be monitored
+ * @start_threshold: the threshold above which to start recording
+ */
+struct iwl_fw_dbg_trigger_stats {
+ __le32 stop_offset;
+ __le32 stop_threshold;
+ __le32 start_offset;
+ __le32 start_threshold;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
+ * @rssi: RSSI value to trigger at
+ */
+struct iwl_fw_dbg_trigger_low_rssi {
+ __le32 rssi;
+} __packed;
/**
- * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration
+ * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events
+ * @stop_auth_denied: number of denied authentication to collect
+ * @stop_auth_timeout: number of authentication timeout to collect
+ * @stop_rx_deauth: number of Rx deauth before to collect
+ * @stop_tx_deauth: number of Tx deauth before to collect
+ * @stop_assoc_denied: number of denied association to collect
+ * @stop_assoc_timeout: number of association timeout to collect
+ * @stop_connection_loss: number of connection loss to collect
+ * @start_auth_denied: number of denied authentication to start recording
+ * @start_auth_timeout: number of authentication timeout to start recording
+ * @start_rx_deauth: number of Rx deauth to start recording
+ * @start_tx_deauth: number of Tx deauth to start recording
+ * @start_assoc_denied: number of denied association to start recording
+ * @start_assoc_timeout: number of association timeout to start recording
+ * @start_connection_loss: number of connection loss to start recording
+ */
+struct iwl_fw_dbg_trigger_mlme {
+ u8 stop_auth_denied;
+ u8 stop_auth_timeout;
+ u8 stop_rx_deauth;
+ u8 stop_tx_deauth;
+
+ u8 stop_assoc_denied;
+ u8 stop_assoc_timeout;
+ u8 stop_connection_loss;
+ u8 reserved;
+
+ u8 start_auth_denied;
+ u8 start_auth_timeout;
+ u8 start_rx_deauth;
+ u8 start_tx_deauth;
+
+ u8 start_assoc_denied;
+ u8 start_assoc_timeout;
+ u8 start_connection_loss;
+ u8 reserved2;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer
+ * @command_queue: timeout for the command queue in ms
+ * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms
+ * @softap: timeout for the queues of a softAP in ms
+ * @p2p_go: timeout for the queues of a P2P GO in ms
+ * @p2p_client: timeout for the queues of a P2P client in ms
+ * @p2p_device: timeout for the queues of a P2P device in ms
+ * @ibss: timeout for the queues of an IBSS in ms
+ * @tdls: timeout for the queues of a TDLS station in ms
+ */
+struct iwl_fw_dbg_trigger_txq_timer {
+ __le32 command_queue;
+ __le32 bss;
+ __le32 softap;
+ __le32 p2p_go;
+ __le32 p2p_client;
+ __le32 p2p_device;
+ __le32 ibss;
+ __le32 tdls;
+ __le32 reserved[4];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger
+ * time_Events: a list of tuples <id, action_bitmap>. The driver will issue a
+ * trigger each time a time event notification that relates to time event
+ * id with one of the actions in the bitmap is received and
+ * BIT(notif->status) is set in status_bitmap.
*
- * @id: %enum iwl_fw_dbg_conf
+ */
+struct iwl_fw_dbg_trigger_time_event {
+ struct {
+ __le32 id;
+ __le32 action_bitmap;
+ __le32 status_bitmap;
+ } __packed time_events[16];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
+ * @id: conf id
* @usniffer: should the uSniffer image be used
* @num_of_hcmds: how many HCMDs to send are present here
* @hcmd: a variable length host command to be sent to apply the configuration.
* If there is more than one HCMD to send, they will appear one after the
* other and be sent in the order that they appear in.
- * This parses IWL_UCODE_TLV_FW_DBG_CONF
+ * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to
+ * %FW_DBG_CONF_MAX configuration per run.
*/
struct iwl_fw_dbg_conf_tlv {
u8 id;
@@ -495,8 +675,6 @@ struct iwl_fw_dbg_conf_tlv {
u8 reserved;
u8 num_of_hcmds;
struct iwl_fw_dbg_conf_hcmd hcmd;
-
- /* struct iwl_fw_dbg_trigger sits after all variable length hcmds */
} __packed;
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index ffd785cc67d6..cf75bafae51d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -68,6 +68,7 @@
#include <net/mac80211.h>
#include "iwl-fw-file.h"
+#include "iwl-fw-error-dump.h"
/**
* enum iwl_ucode_type
@@ -157,6 +158,8 @@ struct iwl_fw_cscheme_list {
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
+ * @dbg_trigger_tlv: array of pointers to triggers TLVs
+ * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_fw {
@@ -186,9 +189,10 @@ struct iwl_fw {
u32 sdio_adma_addr;
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
- struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
- size_t dbg_conf_tlv_len[FW_DBG_MAX];
-
+ struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+ size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
+ struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+ size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
};
@@ -206,46 +210,29 @@ static inline const char *get_fw_dbg_mode_string(int mode)
}
}
-static inline const struct iwl_fw_dbg_trigger *
-iwl_fw_dbg_conf_get_trigger(const struct iwl_fw *fw, u8 id)
+static inline bool
+iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
{
const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
- u8 *ptr;
- int i;
if (!conf_tlv)
- return NULL;
-
- ptr = (void *)&conf_tlv->hcmd;
- for (i = 0; i < conf_tlv->num_of_hcmds; i++) {
- ptr += sizeof(conf_tlv->hcmd);
- ptr += le16_to_cpu(conf_tlv->hcmd.len);
- }
-
- return (const struct iwl_fw_dbg_trigger *)ptr;
-}
-
-static inline bool
-iwl_fw_dbg_conf_enabled(const struct iwl_fw *fw, u8 id)
-{
- const struct iwl_fw_dbg_trigger *trigger =
- iwl_fw_dbg_conf_get_trigger(fw, id);
-
- if (!trigger)
return false;
- return trigger->enabled;
+ return conf_tlv->usniffer;
}
-static inline bool
-iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
-{
- const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \
+ void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \
+ unlikely(__dbg_trigger); \
+})
- if (!conf_tlv)
- return false;
+static inline struct iwl_fw_dbg_trigger_tlv*
+iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, u8 id)
+{
+ if (WARN_ON(id >= ARRAY_SIZE(fw->dbg_trigger_tlv)))
+ return NULL;
- return conf_tlv->usniffer;
+ return fw->dbg_trigger_tlv[id];
}
#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 03250a45272e..27c66e477833 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -186,21 +186,16 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
void iwl_force_nmi(struct iwl_trans *trans)
{
- /*
- * In HW previous to the 8000 HW family, and in the 8000 HW family
- * itself when the revision step==0, the DEVICE_SET_NMI_REG is used
- * to force an NMI. Otherwise, a different register -
- * DEVICE_SET_NMI_8000B_REG - is used.
- */
- if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
- (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)) {
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_DRV);
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_HW);
} else {
- iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
- DEVICE_SET_NMI_8000B_VAL);
+ iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG,
+ DEVICE_SET_NMI_8000_VAL);
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG,
+ DEVICE_SET_NMI_VAL_DRV);
}
}
IWL_EXPORT_SYMBOL(iwl_force_nmi);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index e8eabd21ccfe..ac2b90df8413 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -103,6 +103,7 @@ enum iwl_disable_11n {
* @debug_level: levels are IWL_DL_*
* @ant_coupling: antenna coupling in dB, default = 0
* @d0i3_disable: disable d0i3, default = 1,
+ * @lar_disable: disable LAR (regulatory), default = 0
* @fw_monitor: allow to use firmware monitor
*/
struct iwl_mod_params {
@@ -121,6 +122,7 @@ struct iwl_mod_params {
char *nvm_file;
bool uapsd_disable;
bool d0i3_disable;
+ bool lar_disable;
bool fw_monitor;
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index c74f1a4edf23..83903a5025c2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -99,12 +99,15 @@ enum family_8000_nvm_offsets {
/* NVM SW-Section offset (in words) definitions */
NVM_SW_SECTION_FAMILY_8000 = 0x1C0,
NVM_VERSION_FAMILY_8000 = 0,
- RADIO_CFG_FAMILY_8000 = 2,
- SKU_FAMILY_8000 = 4,
- N_HW_ADDRS_FAMILY_8000 = 5,
+ RADIO_CFG_FAMILY_8000 = 0,
+ SKU_FAMILY_8000 = 2,
+ N_HW_ADDRS_FAMILY_8000 = 3,
/* NVM REGULATORY -Section offset (in words) definitions */
NVM_CHANNELS_FAMILY_8000 = 0,
+ NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7,
+ NVM_LAR_OFFSET_FAMILY_8000 = 0x507,
+ NVM_LAR_ENABLED_FAMILY_8000 = 0x7,
/* NVM calibration section offset (in words) definitions */
NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
@@ -146,7 +149,9 @@ static const u8 iwl_nvm_channels_family_8000[] = {
#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
#define FIRST_2GHZ_HT_MINUS 5
#define LAST_2GHZ_HT_PLUS 9
-#define LAST_5GHZ_HT 161
+#define LAST_5GHZ_HT 165
+#define LAST_5GHZ_HT_FAMILY_8000 181
+#define N_HW_ADDR_MASK 0xF
/* rate data (static) */
static struct ieee80211_rate iwl_cfg80211_rates[] = {
@@ -201,9 +206,57 @@ enum iwl_nvm_channel_flags {
#define CHECK_AND_PRINT_I(x) \
((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
+static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
+ u16 nvm_flags, const struct iwl_cfg *cfg)
+{
+ u32 flags = IEEE80211_CHAN_NO_HT40;
+ u32 last_5ghz_ht = LAST_5GHZ_HT;
+
+ if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+
+ if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ if (ch_num <= LAST_2GHZ_HT_PLUS)
+ flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+ if (ch_num >= FIRST_2GHZ_HT_MINUS)
+ flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+ } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+ flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+ else
+ flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+ }
+ if (!(nvm_flags & NVM_CHANNEL_80MHZ))
+ flags |= IEEE80211_CHAN_NO_80MHZ;
+ if (!(nvm_flags & NVM_CHANNEL_160MHZ))
+ flags |= IEEE80211_CHAN_NO_160MHZ;
+
+ if (!(nvm_flags & NVM_CHANNEL_IBSS))
+ flags |= IEEE80211_CHAN_NO_IR;
+
+ if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
+ flags |= IEEE80211_CHAN_NO_IR;
+
+ if (nvm_flags & NVM_CHANNEL_RADAR)
+ flags |= IEEE80211_CHAN_RADAR;
+
+ if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
+ flags |= IEEE80211_CHAN_INDOOR_ONLY;
+
+ /* Set the GO concurrent flag only in case that NO_IR is set.
+ * Otherwise it is meaningless
+ */
+ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
+ (flags & IEEE80211_CHAN_NO_IR))
+ flags |= IEEE80211_CHAN_GO_CONCURRENT;
+
+ return flags;
+}
+
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
- const __le16 * const nvm_ch_flags)
+ const __le16 * const nvm_ch_flags,
+ bool lar_supported)
{
int ch_idx;
int n_channels = 0;
@@ -228,9 +281,14 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
if (ch_idx >= num_2ghz_channels &&
!data->sku_cap_band_52GHz_enable)
- ch_flags &= ~NVM_CHANNEL_VALID;
+ continue;
- if (!(ch_flags & NVM_CHANNEL_VALID)) {
+ if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
+ /*
+ * Channels might become valid later if lar is
+ * supported, hence we still want to add them to
+ * the list of supported channels to cfg80211.
+ */
IWL_DEBUG_EEPROM(dev,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
nvm_chan[ch_idx],
@@ -250,45 +308,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
- /* TODO: Need to be dependent to the NVM */
- channel->flags = IEEE80211_CHAN_NO_HT40;
- if (ch_idx < num_2ghz_channels &&
- (ch_flags & NVM_CHANNEL_40MHZ)) {
- if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
- channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
- if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
- channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
- } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
- (ch_flags & NVM_CHANNEL_40MHZ)) {
- if ((ch_idx - num_2ghz_channels) % 2 == 0)
- channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
- else
- channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
- }
- if (!(ch_flags & NVM_CHANNEL_80MHZ))
- channel->flags |= IEEE80211_CHAN_NO_80MHZ;
- if (!(ch_flags & NVM_CHANNEL_160MHZ))
- channel->flags |= IEEE80211_CHAN_NO_160MHZ;
-
- if (!(ch_flags & NVM_CHANNEL_IBSS))
- channel->flags |= IEEE80211_CHAN_NO_IR;
-
- if (!(ch_flags & NVM_CHANNEL_ACTIVE))
- channel->flags |= IEEE80211_CHAN_NO_IR;
-
- if (ch_flags & NVM_CHANNEL_RADAR)
- channel->flags |= IEEE80211_CHAN_RADAR;
-
- if (ch_flags & NVM_CHANNEL_INDOOR_ONLY)
- channel->flags |= IEEE80211_CHAN_INDOOR_ONLY;
-
- /* Set the GO concurrent flag only in case that NO_IR is set.
- * Otherwise it is meaningless
- */
- if ((ch_flags & NVM_CHANNEL_GO_CONCURRENT) &&
- (channel->flags & IEEE80211_CHAN_NO_IR))
- channel->flags |= IEEE80211_CHAN_GO_CONCURRENT;
-
/* Initialize regulatory-based run-time data */
/*
@@ -297,6 +316,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
*/
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+
+ /* don't put limitations in case we're using LAR */
+ if (!lar_supported)
+ channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
+ ch_idx, is_5ghz,
+ ch_flags, cfg);
+ else
+ channel->flags = 0;
+
IWL_DEBUG_EEPROM(dev,
"Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
channel->hw_value,
@@ -370,8 +398,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
- const __le16 *ch_section, bool enable_vht,
- u8 tx_chains, u8 rx_chains)
+ const __le16 *ch_section,
+ u8 tx_chains, u8 rx_chains, bool lar_supported)
{
int n_channels;
int n_used = 0;
@@ -380,11 +408,12 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
n_channels = iwl_init_channel_map(
dev, cfg, data,
- &ch_section[NVM_CHANNELS]);
+ &ch_section[NVM_CHANNELS], lar_supported);
else
n_channels = iwl_init_channel_map(
dev, cfg, data,
- &ch_section[NVM_CHANNELS_FAMILY_8000]);
+ &ch_section[NVM_CHANNELS_FAMILY_8000],
+ lar_supported);
sband = &data->bands[IEEE80211_BAND_2GHZ];
sband->band = IEEE80211_BAND_2GHZ;
@@ -403,7 +432,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
IEEE80211_BAND_5GHZ);
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
tx_chains, rx_chains);
- if (enable_vht)
+ if (data->sku_cap_11ac_enable)
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains);
@@ -412,17 +441,16 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
n_used, n_channels);
}
-static int iwl_get_sku(const struct iwl_cfg *cfg,
- const __le16 *nvm_sw)
+static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+ const __le16 *phy_sku)
{
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
return le16_to_cpup(nvm_sw + SKU);
- else
- return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000));
+
+ return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
}
-static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
- const __le16 *nvm_sw)
+static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
{
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
return le16_to_cpup(nvm_sw + NVM_VERSION);
@@ -431,24 +459,26 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
NVM_VERSION_FAMILY_8000));
}
-static int iwl_get_radio_cfg(const struct iwl_cfg *cfg,
- const __le16 *nvm_sw)
+static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+ const __le16 *phy_sku)
{
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
return le16_to_cpup(nvm_sw + RADIO_CFG);
- else
- return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+
+ return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+
}
-#define N_HW_ADDRS_MASK_FAMILY_8000 0xF
-static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg,
- const __le16 *nvm_sw)
+static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
{
+ int n_hw_addr;
+
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
- else
- return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000))
- & N_HW_ADDRS_MASK_FAMILY_8000;
+
+ n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
+
+ return n_hw_addr & N_HW_ADDR_MASK;
}
static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
@@ -491,7 +521,8 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *mac_override,
- const __le16 *nvm_hw)
+ const __le16 *nvm_hw,
+ u32 mac_addr0, u32 mac_addr1)
{
const u8 *hw_addr;
@@ -515,48 +546,17 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
}
if (nvm_hw) {
- /* read the MAC address from OTP */
- if (!dev_is_pci(dev) || (data->nvm_version < 0xE08)) {
- /* read the mac address from the WFPM location */
- hw_addr = (const u8 *)(nvm_hw +
- HW_ADDR0_WFPM_FAMILY_8000);
- data->hw_addr[0] = hw_addr[3];
- data->hw_addr[1] = hw_addr[2];
- data->hw_addr[2] = hw_addr[1];
- data->hw_addr[3] = hw_addr[0];
-
- hw_addr = (const u8 *)(nvm_hw +
- HW_ADDR1_WFPM_FAMILY_8000);
- data->hw_addr[4] = hw_addr[1];
- data->hw_addr[5] = hw_addr[0];
- } else if ((data->nvm_version >= 0xE08) &&
- (data->nvm_version < 0xE0B)) {
- /* read "reverse order" from the PCIe location */
- hw_addr = (const u8 *)(nvm_hw +
- HW_ADDR0_PCIE_FAMILY_8000);
- data->hw_addr[5] = hw_addr[2];
- data->hw_addr[4] = hw_addr[1];
- data->hw_addr[3] = hw_addr[0];
-
- hw_addr = (const u8 *)(nvm_hw +
- HW_ADDR1_PCIE_FAMILY_8000);
- data->hw_addr[2] = hw_addr[3];
- data->hw_addr[1] = hw_addr[2];
- data->hw_addr[0] = hw_addr[1];
- } else {
- /* read from the PCIe location */
- hw_addr = (const u8 *)(nvm_hw +
- HW_ADDR0_PCIE_FAMILY_8000);
- data->hw_addr[5] = hw_addr[0];
- data->hw_addr[4] = hw_addr[1];
- data->hw_addr[3] = hw_addr[2];
-
- hw_addr = (const u8 *)(nvm_hw +
- HW_ADDR1_PCIE_FAMILY_8000);
- data->hw_addr[2] = hw_addr[1];
- data->hw_addr[1] = hw_addr[2];
- data->hw_addr[0] = hw_addr[3];
- }
+ /* read the MAC address from HW resisters */
+ hw_addr = (const u8 *)&mac_addr0;
+ data->hw_addr[0] = hw_addr[3];
+ data->hw_addr[1] = hw_addr[2];
+ data->hw_addr[2] = hw_addr[1];
+ data->hw_addr[3] = hw_addr[0];
+
+ hw_addr = (const u8 *)&mac_addr1;
+ data->hw_addr[4] = hw_addr[1];
+ data->hw_addr[5] = hw_addr[0];
+
if (!is_valid_ether_addr(data->hw_addr))
IWL_ERR_DEV(dev,
"mac address from hw section is not valid\n");
@@ -571,11 +571,14 @@ struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
- const __le16 *mac_override, u8 tx_chains, u8 rx_chains)
+ const __le16 *mac_override, const __le16 *phy_sku,
+ u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
+ u32 mac_addr0, u32 mac_addr1)
{
struct iwl_nvm_data *data;
u32 sku;
u32 radio_cfg;
+ u16 lar_config;
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
data = kzalloc(sizeof(*data) +
@@ -592,20 +595,21 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
- radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
+ radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku);
iwl_set_radio_cfg(cfg, data, radio_cfg);
if (data->valid_tx_ant)
tx_chains &= data->valid_tx_ant;
if (data->valid_rx_ant)
rx_chains &= data->valid_rx_ant;
- sku = iwl_get_sku(cfg, nvm_sw);
+ sku = iwl_get_sku(cfg, nvm_sw, phy_sku);
data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
- data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
data->sku_cap_11n_enable = false;
+ data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
+ (sku & NVM_SKU_CAP_11AC_ENABLE);
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
@@ -626,16 +630,23 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
iwl_set_hw_address(cfg, data, nvm_hw);
iwl_init_sbands(dev, cfg, data, nvm_sw,
- sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
- rx_chains);
+ tx_chains, rx_chains, lar_fw_supported);
} else {
+ u16 lar_offset = data->nvm_version < 0xE39 ?
+ NVM_LAR_OFFSET_FAMILY_8000_OLD :
+ NVM_LAR_OFFSET_FAMILY_8000;
+
+ lar_config = le16_to_cpup(regulatory + lar_offset);
+ data->lar_enabled = !!(lar_config &
+ NVM_LAR_ENABLED_FAMILY_8000);
+
/* MAC address in family 8000 */
iwl_set_hw_address_family_8000(dev, cfg, data, mac_override,
- nvm_hw);
+ nvm_hw, mac_addr0, mac_addr1);
iwl_init_sbands(dev, cfg, data, regulatory,
- sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
- rx_chains);
+ tx_chains, rx_chains,
+ lar_fw_supported && data->lar_enabled);
}
data->calib_version = 255;
@@ -643,3 +654,164 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
return data;
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
+
+static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
+ int ch_idx, u16 nvm_flags,
+ const struct iwl_cfg *cfg)
+{
+ u32 flags = NL80211_RRF_NO_HT40;
+ u32 last_5ghz_ht = LAST_5GHZ_HT;
+
+ if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+
+ if (ch_idx < NUM_2GHZ_CHANNELS &&
+ (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
+ flags &= ~NL80211_RRF_NO_HT40PLUS;
+ if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+ flags &= ~NL80211_RRF_NO_HT40MINUS;
+ } else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
+ (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+ flags &= ~NL80211_RRF_NO_HT40PLUS;
+ else
+ flags &= ~NL80211_RRF_NO_HT40MINUS;
+ }
+
+ if (!(nvm_flags & NVM_CHANNEL_80MHZ))
+ flags |= NL80211_RRF_NO_80MHZ;
+ if (!(nvm_flags & NVM_CHANNEL_160MHZ))
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
+ flags |= NL80211_RRF_NO_IR;
+
+ if (nvm_flags & NVM_CHANNEL_RADAR)
+ flags |= NL80211_RRF_DFS;
+
+ if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ /* Set the GO concurrent flag only in case that NO_IR is set.
+ * Otherwise it is meaningless
+ */
+ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
+ (flags & NL80211_RRF_NO_IR))
+ flags |= NL80211_RRF_GO_CONCURRENT;
+
+ return flags;
+}
+
+struct ieee80211_regdomain *
+iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+ int num_of_ch, __le32 *channels, u16 fw_mcc)
+{
+ int ch_idx;
+ u16 ch_flags, prev_ch_flags = 0;
+ const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
+ iwl_nvm_channels_family_8000 : iwl_nvm_channels;
+ struct ieee80211_regdomain *regd;
+ int size_of_regd;
+ struct ieee80211_reg_rule *rule;
+ enum ieee80211_band band;
+ int center_freq, prev_center_freq = 0;
+ int valid_rules = 0;
+ bool new_rule;
+ int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
+ IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS;
+
+ if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(num_of_ch > max_num_ch))
+ num_of_ch = max_num_ch;
+
+ IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n",
+ num_of_ch);
+
+ /* build a regdomain rule for every valid channel */
+ size_of_regd =
+ sizeof(struct ieee80211_regdomain) +
+ num_of_ch * sizeof(struct ieee80211_reg_rule);
+
+ regd = kzalloc(size_of_regd, GFP_KERNEL);
+ if (!regd)
+ return ERR_PTR(-ENOMEM);
+
+ for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+ ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
+ band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
+ band);
+ new_rule = false;
+
+ if (!(ch_flags & NVM_CHANNEL_VALID)) {
+ IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+ "Ch. %d Flags %x [%sGHz] - No traffic\n",
+ nvm_chan[ch_idx],
+ ch_flags,
+ (ch_idx >= NUM_2GHZ_CHANNELS) ?
+ "5.2" : "2.4");
+ continue;
+ }
+
+ /* we can't continue the same rule */
+ if (ch_idx == 0 || prev_ch_flags != ch_flags ||
+ center_freq - prev_center_freq > 20) {
+ valid_rules++;
+ new_rule = true;
+ }
+
+ rule = &regd->reg_rules[valid_rules - 1];
+
+ if (new_rule)
+ rule->freq_range.start_freq_khz =
+ MHZ_TO_KHZ(center_freq - 10);
+
+ rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
+
+ /* this doesn't matter - not used by FW */
+ rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
+ rule->power_rule.max_eirp =
+ DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
+
+ rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
+ ch_flags, cfg);
+
+ /* rely on auto-calculation to merge BW of contiguous chans */
+ rule->flags |= NL80211_RRF_AUTO_BW;
+ rule->freq_range.max_bandwidth_khz = 0;
+
+ prev_ch_flags = ch_flags;
+ prev_center_freq = center_freq;
+
+ IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+ "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
+ center_freq,
+ band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(40MHZ),
+ CHECK_AND_PRINT_I(80MHZ),
+ CHECK_AND_PRINT_I(160MHZ),
+ CHECK_AND_PRINT_I(INDOOR_ONLY),
+ CHECK_AND_PRINT_I(GO_CONCURRENT),
+ ch_flags,
+ ((ch_flags & NVM_CHANNEL_ACTIVE) &&
+ !(ch_flags & NVM_CHANNEL_RADAR))
+ ? "" : "not ");
+ }
+
+ regd->n_reg_rules = valid_rules;
+
+ /* set alpha2 from FW. */
+ regd->alpha2[0] = fw_mcc >> 8;
+ regd->alpha2[1] = fw_mcc & 0xff;
+
+ return regd;
+}
+IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index c9c45a39d212..822ba52e0e5a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -62,6 +62,7 @@
#ifndef __iwl_nvm_parse_h__
#define __iwl_nvm_parse_h__
+#include <net/cfg80211.h>
#include "iwl-eeprom-parse.h"
/**
@@ -76,6 +77,21 @@ struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
- const __le16 *mac_override, u8 tx_chains, u8 rx_chains);
+ const __le16 *mac_override, const __le16 *phy_sku,
+ u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
+ u32 mac_addr0, u32 mac_addr1);
+
+/**
+ * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
+ *
+ * This function parses the regulatory channel data received as a
+ * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
+ * to be fed into the regulatory core. An ERR_PTR is returned on error.
+ * If not given to the regulatory core, the user is responsible for freeing
+ * the regdomain returned here with kfree.
+ */
+struct ieee80211_regdomain *
+iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+ int num_of_ch, __le32 *channels, u16 fw_mcc);
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 17de6d46222a..ce1cdd7604e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -94,7 +94,7 @@ struct iwl_cfg;
* The operational mode has a very simple life cycle.
*
* 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
- * capabilities advertized by the fw file (in TLV format).
+ * capabilities advertised by the fw file (in TLV format).
* 2) The driver layer starts the op_mode (ops->start)
* 3) The op_mode registers mac80211
* 4) The op_mode is governed by mac80211
@@ -116,7 +116,7 @@ struct iwl_cfg;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD this Rx responds to. Can't sleep.
- * @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
+ * @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
* but the higher layers need to know about it (in particular mac80211 to
* to able to call the right NAPI RX functions); this function is needed
* to eventually call netif_napi_add() with higher layer involvement.
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index d4fb5cad07ea..a105455b6a24 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -72,7 +72,7 @@
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 7
+#define IWL_NUM_PAPD_CH_GROUPS 9
#define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry {
@@ -125,7 +125,7 @@ struct iwl_phy_db_chg_txp {
} __packed;
/*
- * phy db - Receieve phy db chunk after calibrations
+ * phy db - Receive phy db chunk after calibrations
*/
struct iwl_calib_res_notif_phy_db {
__le16 type;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 6221e4dfc64f..88a57e6e232f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -110,8 +110,8 @@
#define DEVICE_SET_NMI_REG 0x00a01c30
#define DEVICE_SET_NMI_VAL_HW BIT(0)
#define DEVICE_SET_NMI_VAL_DRV BIT(7)
-#define DEVICE_SET_NMI_8000B_REG 0x00a01c24
-#define DEVICE_SET_NMI_8000B_VAL 0x1000000
+#define DEVICE_SET_NMI_8000_REG 0x00a01c24
+#define DEVICE_SET_NMI_8000_VAL 0x1000000
/* Shared registers (0x0..0x3ff, via target indirect or periphery */
#define SHR_BASE 0x00a10000
@@ -295,25 +295,6 @@
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
-/* SECURE boot registers */
-#define LMPM_SECURE_BOOT_CONFIG_ADDR (0x100)
-enum secure_boot_config_reg {
- LMPM_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
- LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
-};
-
-#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0 (0xA01E30)
-#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
-#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
-enum secure_boot_status_reg {
- LMPM_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000001,
- LMPM_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
- LMPM_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
- LMPM_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
- LMPM_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
- LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
-};
-
#define FH_UCODE_LOAD_STATUS (0x1AF0)
#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
enum secure_load_status_reg {
@@ -334,8 +315,6 @@ enum secure_load_status_reg {
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
-#define LMPM_SECURE_TIME_OUT (100) /* 10 micro */
-
/* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88)
#define RXF_RD_D_SPACE (0xa00c40)
@@ -370,7 +349,33 @@ enum secure_load_status_reg {
#define MON_BUFF_CYCLE_CNT (0xa03c48)
#define DBGC_IN_SAMPLE (0xa03c00)
-#define DBGC_OUT_CTRL (0xa03c0c)
+
+/* enable the ID buf for read */
+#define WFPM_PS_CTL_CLR 0xA0300C
+#define WFMP_MAC_ADDR_0 0xA03080
+#define WFMP_MAC_ADDR_1 0xA03084
+#define LMPM_PMG_EN 0xA01CEC
+#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
+#define RFIC_REG_RD 0xAD0470
+#define WFPM_CTRL_REG 0xA03030
+enum {
+ ENABLE_WFPM = BIT(31),
+ WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
+};
+
+#define AUX_MISC_REG 0xA200B0
+enum {
+ HW_STEP_LOCATION_BITS = 24,
+};
+
+#define AUX_MISC_MASTER1_EN 0xA20818
+enum aux_misc_master1_en {
+ AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
+};
+
+#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
+#define RSA_ENABLE 0xA24B08
+#define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* FW chicken bits */
#define LMPM_CHICK 0xA01FF8
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index a96bd8db6ceb..56254a837214 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,10 +77,10 @@
/**
* DOC: Transport layer - what is it ?
*
- * The tranport layer is the layer that deals with the HW directly. It provides
+ * The transport layer is the layer that deals with the HW directly. It provides
* an abstraction of the underlying HW to the upper layer. The transport layer
* doesn't provide any policy, algorithm or anything of this kind, but only
- * mechanisms to make the HW do something.It is not completely stateless but
+ * mechanisms to make the HW do something. It is not completely stateless but
* close to it.
* We will have an implementation for each different supported bus.
*/
@@ -111,10 +111,10 @@
/**
* DOC: Host command section
*
- * A host command is a commaned issued by the upper layer to the fw. There are
+ * A host command is a command issued by the upper layer to the fw. There are
* several versions of fw that have several APIs. The transport layer is
* completely agnostic to these differences.
- * The transport does provide helper functionnality (i.e. SYNC / ASYNC mode),
+ * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
*/
#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
@@ -195,7 +195,7 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
* the response. The caller needs to call iwl_free_resp when done.
* @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
- * command queue, but after other high priority commands. valid only
+ * command queue, but after other high priority commands. Valid only
* with CMD_ASYNC.
* @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
* @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
@@ -421,8 +421,9 @@ struct iwl_trans_txq_scd_cfg {
*
* All the handlers MUST be implemented
*
- * @start_hw: starts the HW- from that point on, the HW can send interrupts
- * May sleep
+ * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
+ * out of a low power state. From that point on, the HW can send
+ * interrupts. May sleep.
* @op_mode_leave: Turn off the HW RF kill indication if on
* May sleep
* @start_fw: allocates and inits all the resources for the transport
@@ -432,10 +433,11 @@ struct iwl_trans_txq_scd_cfg {
* the SCD base address in SRAM, then provide it here, or 0 otherwise.
* May sleep
* @stop_device: stops the whole device (embedded CPU put to reset) and stops
- * the HW. From that point on, the HW will be in low power but will still
- * issue interrupt if the HW RF kill is triggered. This callback must do
- * the right thing and not crash even if start_hw() was called but not
- * start_fw(). May sleep
+ * the HW. If low_power is true, the NIC will be put in low power state.
+ * From that point on, the HW will be stopped but will still issue an
+ * interrupt if the HW RF kill switch is triggered.
+ * This callback must do the right thing and not crash even if %start_hw()
+ * was called but not &start_fw(). May sleep.
* @d3_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
@@ -458,6 +460,8 @@ struct iwl_trans_txq_scd_cfg {
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
+ * @freeze_txq_timer: prevents the timer of the queue from firing until the
+ * queue is set to awake. Must be atomic.
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
* @write8: write a u8 to a register at offset ofs from the BAR
@@ -489,14 +493,14 @@ struct iwl_trans_txq_scd_cfg {
*/
struct iwl_trans_ops {
- int (*start_hw)(struct iwl_trans *iwl_trans);
+ int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
int (*update_sf)(struct iwl_trans *trans,
struct iwl_sf_region *st_fwrd_space);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
- void (*stop_device)(struct iwl_trans *trans);
+ void (*stop_device)(struct iwl_trans *trans, bool low_power);
void (*d3_suspend)(struct iwl_trans *trans, bool test);
int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
@@ -517,6 +521,8 @@ struct iwl_trans_ops {
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
+ void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
+ bool freeze);
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -578,7 +584,7 @@ enum iwl_d0i3_mode {
* @cfg - pointer to the configuration
* @status: a bit-mask of transport status flags
* @dev - pointer to struct device * that represents the device
- * @hw_id: a u32 with the ID of the device / subdevice.
+ * @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @pm_support: set to true in start_hw if link pm is supported
@@ -595,6 +601,7 @@ enum iwl_d0i3_mode {
* @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
+ * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_trans {
@@ -628,7 +635,8 @@ struct iwl_trans {
u64 dflt_pwr_limit;
const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
- const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
+ const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+ struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num;
enum iwl_d0i3_mode d0i3_mode;
@@ -646,11 +654,16 @@ static inline void iwl_trans_configure(struct iwl_trans *trans,
trans->ops->configure(trans, trans_cfg);
}
-static inline int iwl_trans_start_hw(struct iwl_trans *trans)
+static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
{
might_sleep();
- return trans->ops->start_hw(trans);
+ return trans->ops->start_hw(trans, low_power);
+}
+
+static inline int iwl_trans_start_hw(struct iwl_trans *trans)
+{
+ return trans->ops->start_hw(trans, true);
}
static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
@@ -697,15 +710,21 @@ static inline int iwl_trans_update_sf(struct iwl_trans *trans,
return 0;
}
-static inline void iwl_trans_stop_device(struct iwl_trans *trans)
+static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
+ bool low_power)
{
might_sleep();
- trans->ops->stop_device(trans);
+ trans->ops->stop_device(trans, low_power);
trans->state = IWL_TRANS_NO_FW;
}
+static inline void iwl_trans_stop_device(struct iwl_trans *trans)
+{
+ _iwl_trans_stop_device(trans, true);
+}
+
static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
{
might_sleep();
@@ -871,6 +890,17 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
}
+static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs,
+ bool freeze)
+{
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+
+ if (trans->ops->freeze_txq_timer)
+ trans->ops->freeze_txq_timer(trans, txqs, freeze);
+}
+
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
u32 txqs)
{
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 7810c41cf9a7..13a0a03158de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -72,158 +72,6 @@
#include "mvm.h"
#include "iwl-debug.h"
-const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
- [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
- [BT_KILL_MSK_NEVER] = 0xffffffff,
- [BT_KILL_MSK_ALWAYS] = 0,
-};
-
-const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- },
- {
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- },
- {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_DEFAULT,
- },
-};
-
-const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_DEFAULT,
- },
-};
-
-static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
- cpu_to_le32(0xf0f0f0f0), /* 50% */
- cpu_to_le32(0xc0c0c0c0), /* 25% */
- cpu_to_le32(0xfcfcfcfc), /* 75% */
- cpu_to_le32(0xfefefefe), /* 87.5% */
-};
-
-static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
- {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- },
- {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- },
- {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- },
-};
-
-static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
- {
- /* Tight */
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0x00004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
- },
- {
- /* Loose */
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
- },
- {
- /* Tx Tx disabled */
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xeeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
- },
-};
-
/* 20MHz / 40MHz below / 40Mhz above*/
static const __le64 iwl_ci_mask[][3] = {
/* dummy entry for channel 0 */
@@ -596,14 +444,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
goto send_cmd;
}
- bt_cmd->max_kill = cpu_to_le32(5);
- bt_cmd->bt4_antenna_isolation_thr =
- cpu_to_le32(IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS);
- bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15);
- bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15);
- bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
- bt_cmd->override_secondary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
-
mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
bt_cmd->mode = cpu_to_le32(mode);
@@ -611,7 +451,7 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
bt_cmd->enabled_modules |=
cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
- if (IWL_MVM_BT_COEX_CORUNNING)
+ if (iwl_mvm_bt_is_plcr_supported(mvm))
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
if (IWL_MVM_BT_COEX_MPLUT) {
@@ -622,18 +462,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
- if (mvm->cfg->bt_shared_single_ant)
- memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
- sizeof(iwl_single_shared_ant));
- else
- memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
- sizeof(iwl_combined_lookup));
-
- memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost,
- sizeof(iwl_bt_prio_boost));
- bt_cmd->multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
- bt_cmd->multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
-
send_cmd:
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@@ -644,48 +472,6 @@ send_cmd:
return ret;
}
-static int iwl_mvm_bt_udpate_sw_boost(struct iwl_mvm *mvm)
-{
- struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
- u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
- u32 secondary_lut = le32_to_cpu(notif->secondary_ch_lut);
- u32 ag = le32_to_cpu(notif->bt_activity_grading);
- struct iwl_bt_coex_sw_boost_update_cmd cmd = {};
- u8 ack_kill_msk[NUM_PHY_CTX] = {};
- u8 cts_kill_msk[NUM_PHY_CTX] = {};
- int i;
-
- lockdep_assert_held(&mvm->mutex);
-
- ack_kill_msk[0] = iwl_bt_ack_kill_msk[ag][primary_lut];
- cts_kill_msk[0] = iwl_bt_cts_kill_msk[ag][primary_lut];
-
- ack_kill_msk[1] = iwl_bt_ack_kill_msk[ag][secondary_lut];
- cts_kill_msk[1] = iwl_bt_cts_kill_msk[ag][secondary_lut];
-
- /* Don't send HCMD if there is no update */
- if (!memcmp(ack_kill_msk, mvm->bt_ack_kill_msk, sizeof(ack_kill_msk)) ||
- !memcmp(cts_kill_msk, mvm->bt_cts_kill_msk, sizeof(cts_kill_msk)))
- return 0;
-
- memcpy(mvm->bt_ack_kill_msk, ack_kill_msk,
- sizeof(mvm->bt_ack_kill_msk));
- memcpy(mvm->bt_cts_kill_msk, cts_kill_msk,
- sizeof(mvm->bt_cts_kill_msk));
-
- BUILD_BUG_ON(ARRAY_SIZE(ack_kill_msk) < ARRAY_SIZE(cmd.boost_values));
-
- for (i = 0; i < ARRAY_SIZE(cmd.boost_values); i++) {
- cmd.boost_values[i].kill_ack_msk =
- cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk[i]]);
- cmd.boost_values[i].kill_cts_msk =
- cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk[i]]);
- }
-
- return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_SW_BOOST, 0,
- sizeof(cmd), &cmd);
-}
-
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
bool enable)
{
@@ -951,9 +737,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
}
-
- if (iwl_mvm_bt_udpate_sw_boost(mvm))
- IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
}
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
@@ -1024,7 +807,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
}
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event)
+ enum ieee80211_rssi_event_data rssi_event)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data data = {
@@ -1074,9 +857,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_rssi_iterator, &data);
-
- if (iwl_mvm_bt_udpate_sw_boost(mvm))
- IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
}
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
@@ -1235,7 +1015,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
- if (!IWL_MVM_BT_COEX_CORUNNING)
+ if (!iwl_mvm_bt_is_plcr_supported(mvm))
return 0;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index 542ee74f290a..d954591e0be5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -288,6 +288,65 @@ static const __le64 iwl_ci_mask[][3] = {
},
};
+enum iwl_bt_kill_msk {
+ BT_KILL_MSK_DEFAULT,
+ BT_KILL_MSK_NEVER,
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_MAX,
+};
+
+static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
+ [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
+ [BT_KILL_MSK_NEVER] = 0xffffffff,
+ [BT_KILL_MSK_ALWAYS] = 0,
+};
+
+static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
+ {
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_ALWAYS,
+ },
+ {
+ BT_KILL_MSK_NEVER,
+ BT_KILL_MSK_NEVER,
+ BT_KILL_MSK_NEVER,
+ },
+ {
+ BT_KILL_MSK_NEVER,
+ BT_KILL_MSK_NEVER,
+ BT_KILL_MSK_NEVER,
+ },
+ {
+ BT_KILL_MSK_DEFAULT,
+ BT_KILL_MSK_NEVER,
+ BT_KILL_MSK_DEFAULT,
+ },
+};
+
+static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
+ {
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_ALWAYS,
+ },
+ {
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_ALWAYS,
+ },
+ {
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_ALWAYS,
+ },
+ {
+ BT_KILL_MSK_DEFAULT,
+ BT_KILL_MSK_ALWAYS,
+ BT_KILL_MSK_DEFAULT,
+ },
+};
+
struct corunning_block_luts {
u8 range;
__le32 lut20[BT_COEX_CORUN_LUT_SIZE];
@@ -619,7 +678,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
- if (IWL_MVM_BT_COEX_CORUNNING) {
+ if (iwl_mvm_bt_is_plcr_supported(mvm)) {
bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40);
bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
@@ -633,7 +692,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
if (IWL_MVM_BT_COEX_TTC)
bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
- if (IWL_MVM_BT_COEX_RRC)
+ if (iwl_mvm_bt_is_rrc_supported(mvm))
bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
if (mvm->cfg->bt_shared_single_ant)
@@ -1069,7 +1128,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
}
void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event)
+ enum ieee80211_rssi_event_data rssi_event)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data data = {
@@ -1168,16 +1227,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
return lut_type != BT_COEX_LOOSE_LUT;
}
-bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant)
-{
- u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
- return ag < BT_HIGH_TRAFFIC;
-}
-
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
{
u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
- return ag == BT_OFF;
+ return ag < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
@@ -1214,7 +1267,7 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
- if (!IWL_MVM_BT_COEX_CORUNNING)
+ if (!iwl_mvm_bt_is_plcr_supported(mvm))
return 0;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 14e8fd661889..1b1b2bf26819 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -694,6 +694,9 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+ if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
+ IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
+
return 0;
}
@@ -1128,6 +1131,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test);
out:
if (ret < 0) {
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
ieee80211_restart_hw(mvm->hw);
iwl_mvm_free_nd(mvm);
}
@@ -1596,7 +1600,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* RF-kill already asserted again... */
if (!cmd.resp_pkt) {
- ret = -ERFKILL;
+ fw_status = ERR_PTR(-ERFKILL);
goto out_free_resp;
}
@@ -1605,7 +1609,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- ret = -EIO;
+ fw_status = ERR_PTR(-EIO);
goto out_free_resp;
}
@@ -1613,7 +1617,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (len != (status_size +
ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- ret = -EIO;
+ fw_status = ERR_PTR(-EIO);
goto out_free_resp;
}
@@ -1621,7 +1625,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
out_free_resp:
iwl_free_resp(&cmd);
- return ret ? ERR_PTR(ret) : fw_status;
+ return fw_status;
}
/* releases the MVM mutex */
@@ -1722,6 +1726,10 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
results->matched_profiles = le32_to_cpu(query->matched_profiles);
memcpy(results->matches, query->matches, sizeof(results->matches));
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
+#endif
+
out_free_resp:
iwl_free_resp(&cmd);
return ret;
@@ -1874,27 +1882,36 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
+ /*
+ * Query the current location and source from the D3 firmware so we
+ * can play it back when we re-intiailize the D0 firmware
+ */
+ iwl_mvm_update_changed_regdom(mvm);
+
if (mvm->net_detect) {
iwl_mvm_query_netdetect_reasons(mvm, vif);
+ /* has unlocked the mutex, so skip that */
+ goto out;
} else {
keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (keep)
mvm->keep_vif = vif;
#endif
+ /* has unlocked the mutex, so skip that */
+ goto out_iterate;
}
- /* has unlocked the mutex, so skip that */
- goto out;
out_unlock:
mutex_unlock(&mvm->mutex);
- out:
+out_iterate:
if (!test)
ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
+out:
/* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
@@ -2004,6 +2021,7 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
__iwl_mvm_resume(mvm, true);
rtnl_unlock();
iwl_abort_notification_waits(&mvm->notif_wait);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
ieee80211_restart_hw(mvm->hw);
/* wait for restart and disconnect all interfaces */
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 5fe14591e1c4..5f37eab5008d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -545,6 +545,57 @@ static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
return ret ? count : -EINVAL;
}
+static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u16 value;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ /* make sure the channel context is assigned */
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ mutex_unlock(&mvm->mutex);
+ return -EINVAL;
+ }
+
+ phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
+ rcu_read_unlock();
+
+ mvm->dbgfs_rx_phyinfo = value;
+
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
+ chanctx_conf->rx_chains_static,
+ chanctx_conf->rx_chains_dynamic);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[8];
+
+ snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -560,6 +611,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -575,7 +627,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
- mvmvif->mvm = mvm;
if (!mvmvif->dbgfs_dir) {
IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
@@ -595,6 +646,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 82c09d86af8c..9ac04c1ea706 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -562,11 +562,12 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
"\tSecondary Channel Bitmap 0x%016llx\n",
le64_to_cpu(cmd->bt_secondary_ci));
- pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
- pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
- iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
- pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
- iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "BT Configuration CMD - 0=default, 1=never, 2=always\n");
+ pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
+ mvm->bt_ack_kill_msk[0]);
+ pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
+ mvm->bt_cts_kill_msk[0]);
} else {
struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
@@ -579,21 +580,6 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
pos += scnprintf(buf+pos, bufsz-pos,
"\tSecondary Channel Bitmap 0x%016llx\n",
le64_to_cpu(cmd->bt_secondary_ci));
-
- pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tPrimary: ACK Kill Mask 0x%08x\n",
- iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tPrimary: CTS Kill Mask 0x%08x\n",
- iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tSecondary: ACK Kill Mask 0x%08x\n",
- iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[1]]);
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tSecondary: CTS Kill Mask 0x%08x\n",
- iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[1]]);
-
}
mutex_unlock(&mvm->mutex);
@@ -942,7 +928,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- enum iwl_fw_dbg_conf conf;
+ int conf;
char buf[8];
const size_t bufsz = sizeof(buf);
int pos = 0;
@@ -966,7 +952,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
if (ret)
return ret;
- if (WARN_ON(conf_id >= FW_DBG_MAX))
+ if (WARN_ON(conf_id >= FW_DBG_CONF_MAX))
return -EINVAL;
mutex_lock(&mvm->mutex);
@@ -985,7 +971,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (ret)
return ret;
- iwl_mvm_fw_dbg_collect(mvm);
+ iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
@@ -1487,26 +1473,6 @@ out:
return count;
}
-static ssize_t iwl_dbgfs_enable_scan_iteration_notif_write(struct iwl_mvm *mvm,
- char *buf,
- size_t count,
- loff_t *ppos)
-{
- int val;
-
- mutex_lock(&mvm->mutex);
-
- if (kstrtoint(buf, 10, &val)) {
- mutex_unlock(&mvm->mutex);
- return -EINVAL;
- }
-
- mvm->scan_iter_notif_enabled = val;
- mutex_unlock(&mvm->mutex);
-
- return count;
-}
-
MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
@@ -1529,7 +1495,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8);
-MVM_DEBUGFS_WRITE_FILE_OPS(enable_scan_iteration_notif, 8);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1573,8 +1538,11 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
- MVM_DEBUGFS_ADD_FILE(enable_scan_iteration_notif, mvm->debugfs_dir,
- S_IWUSR);
+ if (!debugfs_create_bool("enable_scan_iteration_notif",
+ S_IRUSR | S_IWUSR,
+ mvm->debugfs_dir,
+ &mvm->scan_iter_notif_enabled))
+ goto err;
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
@@ -1601,6 +1569,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
mvm->debugfs_dir, &mvm->d3_wake_sysassert))
goto err;
+ if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR,
+ mvm->debugfs_dir, &mvm->last_netdetect_scans))
+ goto err;
MVM_DEBUGFS_ADD_FILE(netdetect, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index f3b11897991e..d398a6102805 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -235,36 +235,12 @@ enum iwl_bt_coex_enabled_modules {
* struct iwl_bt_coex_cmd - bt coex configuration command
* @mode: enum %iwl_bt_coex_mode
* @enabled_modules: enum %iwl_bt_coex_enabled_modules
- * @max_kill: max count of Tx retries due to kill from PTA
- * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
- * should be set by default
- * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
- * should be set by default
- * @bt4_antenna_isolation_thr: antenna threshold value
- * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
- * @bt4_tx_rx_max_freq0: TxRx max frequency
- * @multiprio_lut: multi priority LUT configuration
- * @mplut_prio_boost: BT priority boost registers
- * @decision_lut: PTA decision LUT, per Prio-Ch
*
* The structure is used for the BT_COEX command.
*/
struct iwl_bt_coex_cmd {
__le32 mode;
__le32 enabled_modules;
-
- __le32 max_kill;
- __le32 override_primary_lut;
- __le32 override_secondary_lut;
- __le32 bt4_antenna_isolation_thr;
-
- __le32 bt4_tx_tx_delta_freq_thr;
- __le32 bt4_tx_rx_max_freq0;
-
- __le32 multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
- __le32 mplut_prio_boost[BT_COEX_BOOST_SIZE];
-
- __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
} __packed; /* BT_COEX_CMD_API_S_VER_6 */
/**
@@ -280,29 +256,6 @@ struct iwl_bt_coex_corun_lut_update_cmd {
} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */
/**
- * struct iwl_bt_coex_sw_boost - SW boost values
- * @wifi_tx_prio_boost: SW boost of wifi tx priority
- * @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
- * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
- */
-struct iwl_bt_coex_sw_boost {
- __le32 wifi_tx_prio_boost;
- __le32 wifi_rx_prio_boost;
- __le32 kill_ack_msk;
- __le32 kill_cts_msk;
-};
-
-/**
- * struct iwl_bt_coex_sw_boost_update_cmd - command to update the SW boost
- * @boost_values: check struct %iwl_bt_coex_sw_boost - one for each channel
- * primary / secondary / low priority
- */
-struct iwl_bt_coex_sw_boost_update_cmd {
- struct iwl_bt_coex_sw_boost boost_values[3];
-} __packed; /* BT_COEX_UPDATE_SW_BOOST_S_VER_1 */
-
-/**
* struct iwl_bt_coex_reduced_txp_update_cmd
* @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
* bits are the sta_id (value)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 6d3bea5c59d1..d7658d16e965 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -132,7 +132,7 @@ struct iwl_proto_offload_cmd_common {
* @solicited_node_ipv6_addr: broken -- solicited node address exists
* for each target address
* @target_ipv6_addr: our target addresses
- * @ndp_mac_addr: neighbor soliciation response MAC address
+ * @ndp_mac_addr: neighbor solicitation response MAC address
*/
struct iwl_proto_offload_cmd_v1 {
struct iwl_proto_offload_cmd_common common;
@@ -150,7 +150,7 @@ struct iwl_proto_offload_cmd_v1 {
* @solicited_node_ipv6_addr: broken -- solicited node address exists
* for each target address
* @target_ipv6_addr: our target addresses
- * @ndp_mac_addr: neighbor soliciation response MAC address
+ * @ndp_mac_addr: neighbor solicitation response MAC address
*/
struct iwl_proto_offload_cmd_v2 {
struct iwl_proto_offload_cmd_common common;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index c405cda1025f..f3f3ee0a766b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -70,6 +70,7 @@
#define MAC_INDEX_AUX 4
#define MAC_INDEX_MIN_DRIVER 0
#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
+#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1)
enum iwl_ac {
AC_BK,
@@ -254,7 +255,7 @@ struct iwl_mac_data_p2p_dev {
/**
* enum iwl_mac_filter_flags - MAC context filter flags
* @MAC_FILTER_IN_PROMISC: accept all data frames
- * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all mangement and
+ * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and
* control frames to the host
* @MAC_FILTER_ACCEPT_GRP: accept multicast frames
* @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 4fc0938b3fb6..b1baa33cc19b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -298,6 +298,40 @@ struct iwl_uapsd_misbehaving_ap_notif {
} __packed;
/**
+ * struct iwl_reduce_tx_power_cmd - TX power reduction command
+ * REDUCE_TX_POWER_CMD = 0x9f
+ * @flags: (reserved for future implementation)
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in dBms.
+ */
+struct iwl_reduce_tx_power_cmd {
+ u8 flags;
+ u8 mac_context_id;
+ __le16 pwr_restriction;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * REDUCE_TX_POWER_CMD = 0x9f
+ * @set_mode: 0 - MAC tx power, 1 - device tx power
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in 1/8 dBms.
+ * @dev_24: device TX power restriction in 1/8 dBms
+ * @dev_52_low: device TX power restriction upper band - low
+ * @dev_52_high: device TX power restriction upper band - high
+ */
+struct iwl_dev_tx_power_cmd {
+ __le32 set_mode;
+ __le32 mac_context_id;
+ __le16 pwr_restriction;
+ __le16 dev_24;
+ __le16 dev_52_low;
+ __le16 dev_52_high;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
+
+#define IWL_DEV_MAX_TX_POWER 0x7FFF
+
+/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
* @id_and_color: MAC contex identifier
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index cfc0e65b34a5..d6cced47d561 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -70,55 +70,10 @@
/* Scan Commands, Responses, Notifications */
-/* Masks for iwl_scan_channel.type flags */
-#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
-#define SCAN_CHANNEL_NARROW_BAND BIT(22)
-
/* Max number of IEs for direct SSID scans in a command */
#define PROBE_OPTION_MAX 20
/**
- * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
- * @channel: band is selected by iwl_scan_cmd "flags" field
- * @tx_gain: gain for analog radio
- * @dsp_atten: gain for DSP
- * @active_dwell: dwell time for active scan in TU, typically 5-50
- * @passive_dwell: dwell time for passive scan in TU, typically 20-500
- * @type: type is broken down to these bits:
- * bit 0: 0 = passive, 1 = active
- * bits 1-20: SSID direct bit map. If any of these bits is set then
- * the corresponding SSID IE is transmitted in probe request
- * (bit i adds IE in position i to the probe request)
- * bit 22: channel width, 0 = regular, 1 = TGj narrow channel
- *
- * @iteration_count:
- * @iteration_interval:
- * This struct is used once for each channel in the scan list.
- * Each channel can independently select:
- * 1) SSID for directed active scans
- * 2) Txpower setting (for rate specified within Tx command)
- * 3) How long to stay on-channel (behavior may be modified by quiet_time,
- * quiet_plcp_th, good_CRC_th)
- *
- * To avoid uCode errors, make sure the following are true (see comments
- * under struct iwl_scan_cmd about max_out_time and quiet_time):
- * 1) If using passive_dwell (i.e. passive_dwell != 0):
- * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
- * 2) quiet_time <= active_dwell
- * 3) If restricting off-channel time (i.e. max_out_time !=0):
- * passive_dwell < max_out_time
- * active_dwell < max_out_time
- */
-struct iwl_scan_channel {
- __le32 type;
- __le16 channel;
- __le16 iteration_count;
- __le32 iteration_interval;
- __le16 active_dwell;
- __le16 passive_dwell;
-} __packed; /* SCAN_CHANNEL_CONTROL_API_S_VER_1 */
-
-/**
* struct iwl_ssid_ie - directed scan network information element
*
* Up to 20 of these may appear in REPLY_SCAN_CMD,
@@ -132,152 +87,6 @@ struct iwl_ssid_ie {
u8 ssid[IEEE80211_MAX_SSID_LEN];
} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
-/**
- * iwl_scan_flags - masks for scan command flags
- *@SCAN_FLAGS_PERIODIC_SCAN:
- *@SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX:
- *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
- *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
- *@SCAN_FLAGS_FRAGMENTED_SCAN:
- *@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
- * in the past hour, even if they are marked as passive.
- */
-enum iwl_scan_flags {
- SCAN_FLAGS_PERIODIC_SCAN = BIT(0),
- SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX = BIT(1),
- SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2),
- SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3),
- SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4),
- SCAN_FLAGS_PASSIVE2ACTIVE = BIT(5),
-};
-
-/**
- * enum iwl_scan_type - Scan types for scan command
- * @SCAN_TYPE_FORCED:
- * @SCAN_TYPE_BACKGROUND:
- * @SCAN_TYPE_OS:
- * @SCAN_TYPE_ROAMING:
- * @SCAN_TYPE_ACTION:
- * @SCAN_TYPE_DISCOVERY:
- * @SCAN_TYPE_DISCOVERY_FORCED:
- */
-enum iwl_scan_type {
- SCAN_TYPE_FORCED = 0,
- SCAN_TYPE_BACKGROUND = 1,
- SCAN_TYPE_OS = 2,
- SCAN_TYPE_ROAMING = 3,
- SCAN_TYPE_ACTION = 4,
- SCAN_TYPE_DISCOVERY = 5,
- SCAN_TYPE_DISCOVERY_FORCED = 6,
-}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
-
-/**
- * struct iwl_scan_cmd - scan request command
- * ( SCAN_REQUEST_CMD = 0x80 )
- * @len: command length in bytes
- * @scan_flags: scan flags from SCAN_FLAGS_*
- * @channel_count: num of channels in channel list
- * (1 - ucode_capa.n_scan_channels)
- * @quiet_time: in msecs, dwell this time for active scan on quiet channels
- * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
- * this number of packets were received (typically 1)
- * @passive2active: is auto switching from passive to active during scan allowed
- * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in TUs, max out of serving channel time
- * @suspend_time: how long to pause scan when returning to service channel:
- * bits 0-19: beacon interal in TUs (suspend before executing)
- * bits 20-23: reserved
- * bits 24-31: number of beacons (suspend between channels)
- * @rxon_flags: RXON_FLG_*
- * @filter_flags: RXON_FILTER_*
- * @tx_cmd: for active scans (zero for passive), w/o payload,
- * no RS so specify TX rate
- * @direct_scan: direct scan SSIDs
- * @type: one of SCAN_TYPE_*
- * @repeats: how many time to repeat the scan
- */
-struct iwl_scan_cmd {
- __le16 len;
- u8 scan_flags;
- u8 channel_count;
- __le16 quiet_time;
- __le16 quiet_plcp_th;
- __le16 passive2active;
- __le16 rxchain_sel_flags;
- __le32 max_out_time;
- __le32 suspend_time;
- /* RX_ON_FLAGS_API_S_VER_1 */
- __le32 rxon_flags;
- __le32 filter_flags;
- struct iwl_tx_cmd tx_cmd;
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
- __le32 type;
- __le32 repeats;
-
- /*
- * Probe request frame, followed by channel list.
- *
- * Size of probe request frame is specified by byte count in tx_cmd.
- * Channel list follows immediately after probe request frame.
- * Number of channels in list is specified by channel_count.
- * Each channel in list is of type:
- *
- * struct iwl_scan_channel channels[0];
- *
- * NOTE: Only one band of channels can be scanned per pass. You
- * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
- * before requesting another scan.
- */
- u8 data[0];
-} __packed; /* SCAN_REQUEST_FIXED_PART_API_S_VER_5 */
-
-/* Response to scan request contains only status with one of these values */
-#define SCAN_RESPONSE_OK 0x1
-#define SCAN_RESPONSE_ERROR 0x2
-
-/*
- * SCAN_ABORT_CMD = 0x81
- * When scan abort is requested, the command has no fields except the common
- * header. The response contains only a status with one of these values.
- */
-#define SCAN_ABORT_POSSIBLE 0x1
-#define SCAN_ABORT_IGNORED 0x2 /* no pending scans */
-
-/* TODO: complete documentation */
-#define SCAN_OWNER_STATUS 0x1
-#define MEASURE_OWNER_STATUS 0x2
-
-/**
- * struct iwl_scan_start_notif - notifies start of scan in the device
- * ( SCAN_START_NOTIFICATION = 0x82 )
- * @tsf_low: TSF timer (lower half) in usecs
- * @tsf_high: TSF timer (higher half) in usecs
- * @beacon_timer: structured as follows:
- * bits 0:19 - beacon interval in usecs
- * bits 20:23 - reserved (0)
- * bits 24:31 - number of beacons
- * @channel: which channel is scanned
- * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
- * @status: one of *_OWNER_STATUS
- */
-struct iwl_scan_start_notif {
- __le32 tsf_low;
- __le32 tsf_high;
- __le32 beacon_timer;
- u8 channel;
- u8 band;
- u8 reserved[2];
- __le32 status;
-} __packed; /* SCAN_START_NTF_API_S_VER_1 */
-
-/* scan results probe_status first bit indicates success */
-#define SCAN_PROBE_STATUS_OK 0
-#define SCAN_PROBE_STATUS_TX_FAILED BIT(0)
-/* error statuses combined with TX_FAILED */
-#define SCAN_PROBE_STATUS_FAIL_TTL BIT(1)
-#define SCAN_PROBE_STATUS_FAIL_BT BIT(2)
-
/* How many statistics are gathered for each channel */
#define SCAN_RESULTS_STATISTICS 1
@@ -294,7 +103,7 @@ struct iwl_scan_start_notif {
* @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
* @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
* (not an error!)
- * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repeatition the driver
+ * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
* asked for
* @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
*/
@@ -313,46 +122,6 @@ enum iwl_scan_complete_status {
SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
};
-/**
- * struct iwl_scan_results_notif - scan results for one channel
- * ( SCAN_RESULTS_NOTIFICATION = 0x83 )
- * @channel: which channel the results are from
- * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
- * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
- * @num_probe_not_sent: # of request that weren't sent due to not enough time
- * @duration: duration spent in channel, in usecs
- * @statistics: statistics gathered for this channel
- */
-struct iwl_scan_results_notif {
- u8 channel;
- u8 band;
- u8 probe_status;
- u8 num_probe_not_sent;
- __le32 duration;
- __le32 statistics[SCAN_RESULTS_STATISTICS];
-} __packed; /* SCAN_RESULT_NTF_API_S_VER_2 */
-
-/**
- * struct iwl_scan_complete_notif - notifies end of scanning (all channels)
- * ( SCAN_COMPLETE_NOTIFICATION = 0x84 )
- * @scanned_channels: number of channels scanned (and number of valid results)
- * @status: one of SCAN_COMP_STATUS_*
- * @bt_status: BT on/off status
- * @last_channel: last channel that was scanned
- * @tsf_low: TSF timer (lower half) in usecs
- * @tsf_high: TSF timer (higher half) in usecs
- * @results: array of scan results, only "scanned_channels" of them are valid
- */
-struct iwl_scan_complete_notif {
- u8 scanned_channels;
- u8 status;
- u8 bt_status;
- u8 last_channel;
- __le32 tsf_low;
- __le32 tsf_high;
- struct iwl_scan_results_notif results[];
-} __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */
-
/* scan offload */
#define IWL_SCAN_MAX_BLACKLIST_LEN 64
#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
@@ -378,11 +147,11 @@ enum scan_framework_client {
* struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
* @scan_flags: see enum iwl_scan_flags
* @channel_count: channels in channel list
- * @quiet_time: dwell time, in milisiconds, on quiet channel
+ * @quiet_time: dwell time, in milliseconds, on quiet channel
* @quiet_plcp_th: quiet channel num of packets threshold
* @good_CRC_th: passive to active promotion threshold
* @rx_chain: RXON rx chain.
- * @max_out_time: max TUs to be out of assoceated channel
+ * @max_out_time: max TUs to be out of associated channel
* @suspend_time: pause scan this TUs when returning to service channel
* @flags: RXON flags
* @filter_flags: RXONfilter
@@ -423,7 +192,7 @@ enum iwl_scan_offload_channel_flags {
* see enum iwl_scan_offload_channel_flags.
* __le16 channel_number: channel number 1-13 etc.
* __le16 iter_count: repetition count for the channel.
- * __le32 iter_interval: interval between two innteration on one channel.
+ * __le32 iter_interval: interval between two iterations on one channel.
* u8 active_dwell.
* u8 passive_dwell.
*/
@@ -466,8 +235,8 @@ enum iwl_scan_offload_band_selection {
/**
* iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
* @ssid_index: index to ssid list in fixed part
- * @unicast_cipher: encryption olgorithm to match - bitmap
- * @aut_alg: authentication olgorithm to match - bitmap
+ * @unicast_cipher: encryption algorithm to match - bitmap
+ * @aut_alg: authentication algorithm to match - bitmap
* @network_type: enum iwl_scan_offload_network_type
* @band_selection: enum iwl_scan_offload_band_selection
* @client_bitmap: clients waiting for match - enum scan_framework_client
@@ -745,7 +514,7 @@ struct iwl_scan_req_unified_lmac {
} __packed;
/**
- * struct iwl_lmac_scan_results_notif - scan results for one channel -
+ * struct iwl_scan_results_notif - scan results for one channel -
* SCAN_RESULT_NTF_API_S_VER_3
* @channel: which channel the results are from
* @band: 0 for 5.2 GHz, 1 for 2.4 GHz
@@ -753,7 +522,7 @@ struct iwl_scan_req_unified_lmac {
* @num_probe_not_sent: # of request that weren't sent due to not enough time
* @duration: duration spent in channel, in usecs
*/
-struct iwl_lmac_scan_results_notif {
+struct iwl_scan_results_notif {
u8 channel;
u8 band;
u8 probe_status;
@@ -939,7 +708,7 @@ enum iwl_umac_scan_general_flags {
* @flags: bitmap - 0-19: directed scan to i'th ssid.
* @channel_num: channel number 1-13 etc.
* @iter_count: repetition count for the channel.
- * @iter_interval: interval between two scan interations on one channel.
+ * @iter_interval: interval between two scan iterations on one channel.
*/
struct iwl_scan_channel_cfg_umac {
__le32 flags;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
index 928168b18346..709e28d8b1b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
@@ -65,6 +65,7 @@
#ifndef __fw_api_stats_h__
#define __fw_api_stats_h__
+#include "fw-api-mac.h"
struct mvm_statistics_dbg {
__le32 burst_check;
@@ -218,7 +219,7 @@ struct mvm_statistics_bt_activity {
__le32 lo_priority_rx_denied_cnt;
} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
-struct mvm_statistics_general {
+struct mvm_statistics_general_v5 {
__le32 radio_temperature;
__le32 radio_voltage;
struct mvm_statistics_dbg dbg;
@@ -244,6 +245,39 @@ struct mvm_statistics_general {
struct mvm_statistics_bt_activity bt_activity;
} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
+struct mvm_statistics_general_v8 {
+ __le32 radio_temperature;
+ __le32 radio_voltage;
+ struct mvm_statistics_dbg dbg;
+ __le32 sleep_time;
+ __le32 slots_out;
+ __le32 slots_idle;
+ __le32 ttl_timestamp;
+ struct mvm_statistics_div slow_div;
+ __le32 rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
+ __le32 beacon_filtered;
+ __le32 missed_beacons;
+ __s8 beacon_filter_average_energy;
+ __s8 beacon_filter_reason;
+ __s8 beacon_filter_current_energy;
+ __s8 beacon_filter_reserved;
+ __le32 beacon_filter_delta_time;
+ struct mvm_statistics_bt_activity bt_activity;
+ __le64 rx_time;
+ __le64 on_time_rf;
+ __le64 on_time_scan;
+ __le64 tx_time;
+ __le32 beacon_counter[NUM_MAC_INDEX];
+ u8 beacon_average_energy[NUM_MAC_INDEX];
+ u8 reserved[4 - (NUM_MAC_INDEX % 4)];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
+
struct mvm_statistics_rx {
struct mvm_statistics_rx_phy ofdm;
struct mvm_statistics_rx_phy cck;
@@ -256,22 +290,28 @@ struct mvm_statistics_rx {
*
* By default, uCode issues this notification after receiving a beacon
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
- *
- * Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
- * 0x9c with CLEAR_STATS bit set (see above).
- *
- * uCode also issues this notification during scans. uCode clears statistics
- * appropriately so that each notification contains statistics for only the
- * one channel that has just been scanned.
+ * STATISTICS_CMD (0x9c), below.
*/
-struct iwl_notif_statistics {
+struct iwl_notif_statistics_v8 {
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
- struct mvm_statistics_general general;
+ struct mvm_statistics_general_v5 general;
} __packed; /* STATISTICS_NTFY_API_S_VER_8 */
+struct iwl_notif_statistics_v10 {
+ __le32 flag;
+ struct mvm_statistics_rx rx;
+ struct mvm_statistics_tx tx;
+ struct mvm_statistics_general_v8 general;
+} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
+
+#define IWL_STATISTICS_FLG_CLEAR 0x1
+#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2
+
+struct iwl_statistics_cmd {
+ __le32 flags;
+} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+
#endif /* __fw_api_stats_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index b56154fe8ec5..01b1da6ad359 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -192,6 +192,7 @@ enum {
BEACON_NOTIFICATION = 0x90,
BEACON_TEMPLATE_CMD = 0x91,
TX_ANT_CONFIGURATION_CMD = 0x98,
+ STATISTICS_CMD = 0x9c,
STATISTICS_NOTIFICATION = 0x9d,
EOSP_NOTIFICATION = 0x9e,
REDUCE_TX_POWER_CMD = 0x9f,
@@ -211,6 +212,10 @@ enum {
REPLY_RX_MPDU_CMD = 0xc1,
BA_NOTIF = 0xc5,
+ /* Location Aware Regulatory */
+ MCC_UPDATE_CMD = 0xc8,
+ MCC_CHUB_UPDATE_CMD = 0xc9,
+
MARKER_CMD = 0xcb,
/* BT Coex */
@@ -276,19 +281,6 @@ struct iwl_tx_ant_cfg_cmd {
__le32 valid;
} __packed;
-/**
- * struct iwl_reduce_tx_power_cmd - TX power reduction command
- * REDUCE_TX_POWER_CMD = 0x9f
- * @flags: (reserved for future implementation)
- * @mac_context_id: id of the mac ctx for which we are reducing TX power.
- * @pwr_restriction: TX power restriction in dBms.
- */
-struct iwl_reduce_tx_power_cmd {
- u8 flags;
- u8 mac_context_id;
- __le16 pwr_restriction;
-} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
-
/*
* Calibration control struct.
* Sent as part of the phy configuration command.
@@ -361,7 +353,8 @@ enum {
NVM_SECTION_TYPE_CALIBRATION = 4,
NVM_SECTION_TYPE_PRODUCTION = 5,
NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
- NVM_MAX_NUM_SECTIONS = 12,
+ NVM_SECTION_TYPE_PHY_SKU = 12,
+ NVM_MAX_NUM_SECTIONS = 13,
};
/**
@@ -431,7 +424,7 @@ enum {
#define IWL_ALIVE_FLG_RFKILL BIT(0)
-struct mvm_alive_resp {
+struct mvm_alive_resp_ver1 {
__le16 status;
__le16 flags;
u8 ucode_minor;
@@ -482,6 +475,30 @@ struct mvm_alive_resp_ver2 {
__le32 dbg_print_buff_addr;
} __packed; /* ALIVE_RES_API_S_VER_2 */
+struct mvm_alive_resp {
+ __le16 status;
+ __le16 flags;
+ __le32 ucode_minor;
+ __le32 ucode_major;
+ u8 ver_subtype;
+ u8 ver_type;
+ u8 mac;
+ u8 opt;
+ __le32 timestamp;
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
+ __le32 cpu_register_ptr;
+ __le32 dbgm_config_ptr;
+ __le32 alive_counter_ptr;
+ __le32 scd_base_ptr; /* SRAM address for SCD */
+ __le32 st_fwrd_addr; /* pointer to Store and forward */
+ __le32 st_fwrd_size;
+ __le32 umac_minor; /* UMAC version: minor */
+ __le32 umac_major; /* UMAC version: major */
+ __le32 error_info_addr; /* SRAM address for UMAC error log */
+ __le32 dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_3 */
+
/* Error response/notification */
enum {
FW_ERR_UNKNOWN_CMD = 0x0,
@@ -1417,7 +1434,19 @@ enum iwl_sf_scenario {
#define SF_W_MARK_LEGACY 4096
#define SF_W_MARK_SCAN 4096
-/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
+#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
+
+/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
@@ -1448,6 +1477,92 @@ struct iwl_sf_cfg_cmd {
__le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
} __packed; /* SF_CFG_API_S_VER_2 */
+/***********************************
+ * Location Aware Regulatory (LAR) API - MCC updates
+ ***********************************/
+
+/**
+ * struct iwl_mcc_update_cmd - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see iwl_mcc_source
+ * @reserved: reserved for alignment
+ */
+struct iwl_mcc_update_cmd {
+ __le16 mcc;
+ u8 source_id;
+ u8 reserved;
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S */
+
+/**
+ * iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwl_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ * channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwl_mcc_update_resp {
+ __le32 status;
+ __le16 mcc;
+ u8 cap;
+ u8 source_id;
+ __le32 n_channels;
+ __le32 channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */
+
+/**
+ * struct iwl_mcc_chub_notif - chub notifies of mcc change
+ * (MCC_CHUB_UPDATE_CMD = 0xc9)
+ * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
+ * the cellular and connectivity cores that gets updates of the mcc, and
+ * notifies the ucode directly of any mcc change.
+ * The ucode requests the driver to request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: identity of the change originator, see iwl_mcc_source
+ * @reserved1: reserved for alignment
+ */
+struct iwl_mcc_chub_notif {
+ u16 mcc;
+ u8 source_id;
+ u8 reserved1;
+} __packed; /* LAR_MCC_NOTIFY_S */
+
+enum iwl_mcc_update_status {
+ MCC_RESP_NEW_CHAN_PROFILE,
+ MCC_RESP_SAME_CHAN_PROFILE,
+ MCC_RESP_INVALID,
+ MCC_RESP_NVM_DISABLED,
+ MCC_RESP_ILLEGAL,
+ MCC_RESP_LOW_PRIORITY,
+};
+
+enum iwl_mcc_source {
+ MCC_SOURCE_OLD_FW = 0,
+ MCC_SOURCE_ME = 1,
+ MCC_SOURCE_BIOS = 2,
+ MCC_SOURCE_3G_LTE_HOST = 3,
+ MCC_SOURCE_3G_LTE_DEVICE = 4,
+ MCC_SOURCE_WIFI = 5,
+ MCC_SOURCE_RESERVED = 6,
+ MCC_SOURCE_DEFAULT = 7,
+ MCC_SOURCE_UNINITIALIZED = 8,
+ MCC_SOURCE_GET_CURRENT = 0x10
+};
+
/* DTS measurements */
enum iwl_dts_measurement_flags {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index ca38e9817374..df869633f4dd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -112,25 +112,27 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
- struct mvm_alive_resp *palive;
+ struct mvm_alive_resp_ver1 *palive1;
struct mvm_alive_resp_ver2 *palive2;
+ struct mvm_alive_resp *palive;
- if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
- palive = (void *)pkt->data;
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
+ palive1 = (void *)pkt->data;
mvm->support_umac_log = false;
mvm->error_event_table =
- le32_to_cpu(palive->error_event_table_ptr);
- mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+ le32_to_cpu(palive1->error_event_table_ptr);
+ mvm->log_event_table =
+ le32_to_cpu(palive1->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
- alive_data->valid = le16_to_cpu(palive->status) ==
+ alive_data->valid = le16_to_cpu(palive1->status) ==
IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mvm,
"Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive->status), palive->ver_type,
- palive->ver_subtype, palive->flags);
- } else {
+ le16_to_cpu(palive1->status), palive1->ver_type,
+ palive1->ver_subtype, palive1->flags);
+ } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
palive2 = (void *)pkt->data;
mvm->error_event_table =
@@ -156,6 +158,33 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
IWL_DEBUG_FW(mvm,
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
palive2->umac_major, palive2->umac_minor);
+ } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+ palive = (void *)pkt->data;
+
+ mvm->error_event_table =
+ le32_to_cpu(palive->error_event_table_ptr);
+ mvm->log_event_table =
+ le32_to_cpu(palive->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+ mvm->umac_error_event_table =
+ le32_to_cpu(palive->error_info_addr);
+ mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
+ mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
+
+ alive_data->valid = le16_to_cpu(palive->status) ==
+ IWL_ALIVE_STATUS_OK;
+ if (mvm->umac_error_event_table)
+ mvm->support_umac_log = true;
+
+ IWL_DEBUG_FW(mvm,
+ "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+ le16_to_cpu(palive->status), palive->ver_type,
+ palive->ver_subtype, palive->flags);
+
+ IWL_DEBUG_FW(mvm,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ le32_to_cpu(palive->umac_major),
+ le32_to_cpu(palive->umac_minor));
}
return true;
@@ -188,8 +217,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
struct iwl_sf_region st_fwrd_space;
if (ucode_type == IWL_UCODE_REGULAR &&
- iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_CUSTOM) &&
- iwl_fw_dbg_conf_enabled(mvm->fw, FW_DBG_CUSTOM))
+ iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
else
fw = iwl_get_ucode_image(mvm, ucode_type);
@@ -294,7 +322,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
+ if (WARN_ON_ONCE(mvm->calibrating))
return 0;
iwl_init_notification_wait(&mvm->notif_wait,
@@ -368,8 +396,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
*/
ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
MVM_UCODE_CALIB_TIMEOUT);
- if (!ret)
- mvm->init_ucode_complete = true;
if (ret && iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
@@ -451,20 +477,88 @@ exit:
iwl_free_resp(&cmd);
}
-void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm)
+int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
+ struct iwl_mvm_dump_desc *desc,
+ unsigned int delay)
+{
+ if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
+ return -EBUSY;
+
+ if (WARN_ON(mvm->fw_dump_desc))
+ iwl_mvm_free_fw_dump_desc(mvm);
+
+ IWL_WARN(mvm, "Collecting data: trigger %d fired.\n",
+ le32_to_cpu(desc->trig_desc.type));
+
+ mvm->fw_dump_desc = desc;
+
+ queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
+
+ return 0;
+}
+
+int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
+ const char *str, size_t len, unsigned int delay)
+{
+ struct iwl_mvm_dump_desc *desc;
+
+ desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->len = len;
+ desc->trig_desc.type = cpu_to_le32(trig);
+ memcpy(desc->trig_desc.data, str, len);
+
+ return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+}
+
+int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
+ struct iwl_fw_dbg_trigger_tlv *trigger,
+ const char *fmt, ...)
{
- /* stop recording */
- if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
- } else {
- iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
- iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+ unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+ u16 occurrences = le16_to_cpu(trigger->occurrences);
+ int ret, len = 0;
+ char buf[64];
+
+ if (!occurrences)
+ return 0;
+
+ if (fmt) {
+ va_list ap;
+
+ buf[sizeof(buf) - 1] = '\0';
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ /* check for truncation */
+ if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
+ buf[sizeof(buf) - 1] = '\0';
+
+ len = strlen(buf) + 1;
}
- schedule_work(&mvm->fw_error_dump_wk);
+ ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
+ len, delay);
+ if (ret)
+ return ret;
+
+ trigger->occurrences = cpu_to_le16(occurrences - 1);
+ return 0;
+}
+
+static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
+{
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ else
+ iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
}
-int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
+int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
{
u8 *ptr;
int ret;
@@ -474,6 +568,14 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
"Invalid configuration %d\n", conf_id))
return -EINVAL;
+ /* EARLY START - firmware's configuration is hard coded */
+ if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
+ !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
+ conf_id == FW_DBG_START_FROM_ALIVE) {
+ iwl_mvm_restart_early_start(mvm);
+ return 0;
+ }
+
if (!mvm->fw->dbg_conf_tlv[conf_id])
return -EINVAL;
@@ -545,25 +647,24 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* module loading, load init ucode now
* (for example, if we were in RFKILL)
*/
- if (!mvm->init_ucode_complete) {
- ret = iwl_run_init_mvm_ucode(mvm, false);
- if (ret && !iwlmvm_mod_params.init_dbg) {
- IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
- /* this can't happen */
- if (WARN_ON(ret > 0))
- ret = -ERFKILL;
- goto error;
- }
- if (!iwlmvm_mod_params.init_dbg) {
- /*
- * should stop and start HW since that INIT
- * image just loaded
- */
- iwl_trans_stop_device(mvm->trans);
- ret = iwl_trans_start_hw(mvm->trans);
- if (ret)
- return ret;
- }
+ ret = iwl_run_init_mvm_ucode(mvm, false);
+ if (ret && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+ /* this can't happen */
+ if (WARN_ON(ret > 0))
+ ret = -ERFKILL;
+ goto error;
+ }
+ if (!iwlmvm_mod_params.init_dbg) {
+ /*
+ * Stop and start the transport without entering low power
+ * mode. This will save the state of other components on the
+ * device that are triggered by the INIT firwmare (MFUART).
+ */
+ _iwl_trans_stop_device(mvm->trans, false);
+ _iwl_trans_start_hw(mvm->trans, false);
+ if (ret)
+ return ret;
}
if (iwlmvm_mod_params.init_dbg)
@@ -583,7 +684,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
mvm->fw_dbg_conf = FW_DBG_INVALID;
- iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM);
+ /* if we have a destination, assume EARLY START */
+ if (mvm->fw->dbg_dest_tlv)
+ mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
+ iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
if (ret)
@@ -640,6 +744,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ /*
+ * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
+ * anyway, so don't init MCC.
+ */
+ if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
+ ret = iwl_mvm_init_mcc(mvm);
+ if (ret)
+ goto error;
+ }
+
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
ret = iwl_mvm_config_scan(mvm);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 7bdc6220743f..8088c7137f7c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -244,6 +244,7 @@ static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif)
{
+ u8 sta_id;
struct iwl_mvm_hw_queues_iface_iterator_data data = {
.exclude_vif = exclude_vif,
.used_hw_queues =
@@ -264,6 +265,13 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
iwl_mvm_mac_sta_hw_queues_iter,
&data);
+ /*
+ * Some TDLS stations may be removed but are in the process of being
+ * drained. Don't touch their queues.
+ */
+ for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT)
+ data.used_hw_queues |= mvm->tfd_drained[sta_id];
+
return data.used_hw_queues;
}
@@ -462,9 +470,8 @@ exit_fail:
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
- mvm->cfg->base_params->wd_timeout :
- IWL_WATCHDOG_DISABLED;
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, false, false);
u32 ac;
int ret;
@@ -1367,10 +1374,18 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
{
struct iwl_missed_beacons_notif *missed_beacons = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
+ struct iwl_fw_dbg_trigger_tlv *trigger;
+ u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
+ u32 rx_missed_bcon, rx_missed_bcon_since_rx;
if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
return;
+ rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
+ rx_missed_bcon_since_rx =
+ le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
/*
* TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs.
@@ -1378,6 +1393,26 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif);
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
+ FW_DBG_TRIGGER_MISSED_BEACONS))
+ return;
+
+ trigger = iwl_fw_dbg_get_trigger(mvm->fw,
+ FW_DBG_TRIGGER_MISSED_BEACONS);
+ bcon_trig = (void *)trigger->data;
+ stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
+ stop_trig_missed_bcon_since_rx =
+ le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
+
+ /* TODO: implement start trigger */
+
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
+ return;
+
+ if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
+ rx_missed_bcon >= stop_trig_missed_bcon)
+ iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
}
int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 09654e73a533..40265b9c66ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -86,6 +86,7 @@
#include "iwl-fw-error-dump.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
+#include "iwl-nvm-parse.h"
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
{
@@ -301,6 +302,116 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
}
}
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed)
+{
+ struct ieee80211_regdomain *regd = NULL;
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcc_update_resp *resp;
+
+ IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
+ if (IS_ERR_OR_NULL(resp)) {
+ IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
+ PTR_RET(resp));
+ goto out;
+ }
+
+ if (changed)
+ *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
+
+ regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
+ __le32_to_cpu(resp->n_channels),
+ resp->channels,
+ __le16_to_cpu(resp->mcc));
+ /* Store the return source id */
+ src_id = resp->source_id;
+ kfree(resp);
+ if (IS_ERR_OR_NULL(regd)) {
+ IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
+ PTR_RET(regd));
+ goto out;
+ }
+
+ IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
+ regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
+ mvm->lar_regdom_set = true;
+ mvm->mcc_src = src_id;
+
+out:
+ return regd;
+}
+
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
+{
+ bool changed;
+ struct ieee80211_regdomain *regd;
+
+ if (!iwl_mvm_is_lar_supported(mvm))
+ return;
+
+ regd = iwl_mvm_get_current_regdomain(mvm, &changed);
+ if (!IS_ERR_OR_NULL(regd)) {
+ /* only update the regulatory core if changed */
+ if (changed)
+ regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+
+ kfree(regd);
+ }
+}
+
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+ bool *changed)
+{
+ return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
+ iwl_mvm_is_wifi_mcc_supported(mvm) ?
+ MCC_SOURCE_GET_CURRENT :
+ MCC_SOURCE_OLD_FW, changed);
+}
+
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
+{
+ enum iwl_mcc_source used_src;
+ struct ieee80211_regdomain *regd;
+ int ret;
+ bool changed;
+ const struct ieee80211_regdomain *r =
+ rtnl_dereference(mvm->hw->wiphy->regd);
+
+ if (!r)
+ return -ENOENT;
+
+ /* save the last source in case we overwrite it below */
+ used_src = mvm->mcc_src;
+ if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
+ /* Notify the firmware we support wifi location updates */
+ regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+ if (!IS_ERR_OR_NULL(regd))
+ kfree(regd);
+ }
+
+ /* Now set our last stored MCC and source */
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
+ &changed);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ /* update cfg80211 if the regdomain was changed */
+ if (changed)
+ ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+ else
+ ret = 0;
+
+ kfree(regd);
+ return ret;
+}
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -339,13 +450,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
- mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
- hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
- hw->wiphy->features |=
- NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
- NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
- }
+ hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
+ hw->wiphy->features |=
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -359,8 +467,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
- hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
- REGULATORY_DISABLE_BEACON_HINTS;
+ hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
+ if (iwl_mvm_is_lar_supported(mvm))
+ hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+ else
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
@@ -892,12 +1004,26 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
iwl_trans_release_nic_access(mvm->trans, &flags);
}
+void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
+{
+ if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
+ !mvm->fw_dump_desc)
+ return;
+
+ kfree(mvm->fw_dump_desc);
+ mvm->fw_dump_desc = NULL;
+}
+
+#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
+#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
+
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_info *dump_info;
struct iwl_fw_error_dump_mem *dump_mem;
+ struct iwl_fw_error_dump_trigger_desc *dump_trig;
struct iwl_mvm_dump_ptrs *fw_error_dump;
u32 sram_len, sram_ofs;
u32 file_len, fifo_data_len = 0;
@@ -906,16 +1032,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- /* W/A for 8000 HW family A-step */
- if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
- CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) {
- if (smem_len)
- smem_len = 0x38000;
-
- if (sram2_len)
- sram2_len = 0x10000;
- }
-
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump)
return;
@@ -967,6 +1083,18 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
fifo_data_len +
sizeof(*dump_info);
+ /*
+ * In 8000 HW family B-step include the ICCM (which resides separately)
+ */
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ IWL8260_ICCM_LEN;
+
+ if (mvm->fw_dump_desc)
+ file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
+ mvm->fw_dump_desc->len;
+
/* Make room for the SMEM, if it exists */
if (smem_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
@@ -978,6 +1106,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_file = vzalloc(file_len);
if (!dump_file) {
kfree(fw_error_dump);
+ iwl_mvm_free_fw_dump_desc(mvm);
return;
}
@@ -1006,6 +1135,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
iwl_mvm_dump_fifos(mvm, &dump_data);
+ if (mvm->fw_dump_desc) {
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
+ dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
+ mvm->fw_dump_desc->len);
+ dump_trig = (void *)dump_data->data;
+ memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
+ sizeof(*dump_trig) + mvm->fw_dump_desc->len);
+
+ /* now we can free this copy */
+ iwl_mvm_free_fw_dump_desc(mvm);
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
+
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -1036,6 +1178,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->data, sram2_len);
}
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
+ dump_data = iwl_fw_error_next_data(dump_data);
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
+ sizeof(*dump_mem));
+ dump_mem = (void *)dump_data->data;
+ dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+ dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
+ iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
+ dump_mem->data, IWL8260_ICCM_LEN);
+ }
+
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
fw_error_dump->op_mode_len = file_len;
if (fw_error_dump->trans_ptr)
@@ -1044,16 +1199,26 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
+
+ clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
}
+struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
+ .trig_desc = {
+ .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
+ },
+};
+
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
/* clear the D3 reconfig, we only need it to avoid dumping a
* firmware coredump on reconfiguration, we shouldn't do that
* on D3->D0 transition
*/
- if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status))
+ if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
+ mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
iwl_mvm_fw_error_dump(mvm);
+ }
/* cleanup all stale references (scan, roc), but keep the
* ucode_down ref until reconfig is complete
@@ -1094,6 +1259,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
+ mvm->fw_dbg_conf = FW_DBG_INVALID;
+
+ /* keep statistics ticking */
+ iwl_mvm_accu_radio_stats(mvm);
}
int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
@@ -1153,7 +1322,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
iwl_mvm_d0i3_enable_tx(mvm, NULL);
- ret = iwl_mvm_update_quotas(mvm, NULL);
+ ret = iwl_mvm_update_quotas(mvm, true, NULL);
if (ret)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
ret);
@@ -1216,6 +1385,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
+ /* firmware counters are obviously reset now, but we shouldn't
+ * partially track so also clear the fw_reset_accu counters.
+ */
+ memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
+
/*
* Disallow low power states when the FW is down by taking
* the UCODE_DOWN ref. in case of ongoing hw restart the
@@ -1246,6 +1420,20 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
*/
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ /* We shouldn't have any UIDs still set. Loop over all the UIDs to
+ * make sure there's nothing left there and warn if any is found.
+ */
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ int i;
+
+ for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
+ if (WARN_ONCE(mvm->scan_uid[i],
+ "UMAC scan UID %d was not cleaned\n",
+ mvm->scan_uid[i]))
+ mvm->scan_uid[i] = 0;
+ }
+ }
+
mvm->ucode_loaded = false;
}
@@ -1255,7 +1443,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk);
- flush_work(&mvm->fw_error_dump_wk);
+ cancel_delayed_work_sync(&mvm->fw_dump_wk);
+ iwl_mvm_free_fw_dump_desc(mvm);
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
@@ -1282,8 +1471,8 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
return NULL;
}
-static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- s8 tx_power)
+static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, s8 tx_power)
{
/* FW is in charge of regulatory enforcement */
struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
@@ -1296,6 +1485,26 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
&reduce_txpwr_cmd);
}
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ s16 tx_power)
+{
+ struct iwl_dev_tx_power_cmd cmd = {
+ .set_mode = 0,
+ .mac_context_id =
+ cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
+ .pwr_restriction = cpu_to_le16(8 * tx_power),
+ };
+
+ if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
+ return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
+
+ if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
+ cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
+ sizeof(cmd), &cmd);
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1303,6 +1512,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ mvmvif->mvm = mvm;
+
/*
* make sure D0i3 exit is completed, otherwise a target access
* during tx queue configuration could be done when still in
@@ -1320,6 +1531,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ /* make sure that beacon statistics don't go backwards with FW reset */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ mvmvif->beacon_stats.accu_num_beacons +=
+ mvmvif->beacon_stats.num_beacons;
+
/* Allocate resources for the MAC context, and add it to the fw */
ret = iwl_mvm_mac_ctxt_init(mvm, vif);
if (ret)
@@ -1434,9 +1650,33 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
if (tfd_msk) {
+ /*
+ * mac80211 first removes all the stations of the vif and
+ * then removes the vif. When it removes a station it also
+ * flushes the AMPDU session. So by now, all the AMPDU sessions
+ * of all the stations of this vif are closed, and the queues
+ * of these AMPDU sessions are properly closed.
+ * We still need to take care of the shared queues of the vif.
+ * Flush them here.
+ */
mutex_lock(&mvm->mutex);
iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
mutex_unlock(&mvm->mutex);
+
+ /*
+ * There are transports that buffer a few frames in the host.
+ * For these, the flush above isn't enough since while we were
+ * flushing, the transport might have sent more frames to the
+ * device. To solve this, wait here until the transport is
+ * empty. Technically, this could have replaced the flush
+ * above, but flush is much faster than draining. So flush
+ * first, and drain to make sure we have no frames in the
+ * transport anymore.
+ * If a station still had frames on the shared queues, it is
+ * already marked as draining, so to complete the draining, we
+ * just need to wait until the transport is empty.
+ */
+ iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
}
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
@@ -1813,8 +2053,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
+ /* clear statistics to get clean beacon counter */
+ iwl_mvm_request_statistics(mvm, true);
+ memset(&mvmvif->beacon_stats, 0,
+ sizeof(mvmvif->beacon_stats));
+
/* add quota for this interface */
- ret = iwl_mvm_update_quotas(mvm, NULL);
+ ret = iwl_mvm_update_quotas(mvm, true, NULL);
if (ret) {
IWL_ERR(mvm, "failed to update quotas\n");
return;
@@ -1866,7 +2111,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* remove quota for this interface */
- ret = iwl_mvm_update_quotas(mvm, NULL);
+ ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
@@ -1985,7 +2230,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* power updated needs to be done before quotas */
iwl_mvm_power_update_mac(mvm);
- ret = iwl_mvm_update_quotas(mvm, NULL);
+ ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret)
goto out_quota_failed;
@@ -2001,8 +2246,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (iwl_mvm_phy_ctx_count(mvm) > 1)
iwl_mvm_teardown_tdls_peers(mvm);
- mutex_unlock(&mvm->mutex);
- return 0;
+ goto out_unlock;
out_quota_failed:
iwl_mvm_power_update_mac(mvm);
@@ -2051,7 +2295,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
- iwl_mvm_update_quotas(mvm, NULL);
+ iwl_mvm_update_quotas(mvm, false, NULL);
iwl_mvm_send_rm_bcast_sta(mvm, vif);
iwl_mvm_binding_remove_vif(mvm, vif);
@@ -2190,6 +2434,12 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
ret = -EBUSY;
goto out;
@@ -2199,10 +2449,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
- else if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
- ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
else
- ret = iwl_mvm_scan_request(mvm, vif, req);
+ ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
if (ret)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
@@ -2272,25 +2520,35 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned long txqs = 0, tids = 0;
int tid;
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ if (tid_data->state != IWL_AGG_ON &&
+ tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
+ continue;
+
+ __set_bit(tid_data->txq_id, &txqs);
+
+ if (iwl_mvm_tid_queued(tid_data) == 0)
+ continue;
+
+ __set_bit(tid, &tids);
+ }
+
switch (cmd) {
case STA_NOTIFY_SLEEP:
if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
- spin_lock_bh(&mvmsta->lock);
- for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
- struct iwl_mvm_tid_data *tid_data;
- tid_data = &mvmsta->tid_data[tid];
- if (tid_data->state != IWL_AGG_ON &&
- tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
- continue;
- if (iwl_mvm_tid_queued(tid_data) == 0)
- continue;
+ for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
ieee80211_sta_set_buffered(sta, tid, true);
- }
- spin_unlock_bh(&mvmsta->lock);
+
+ if (txqs)
+ iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
/*
* The fw updates the STA to be asleep. Tx packets on the Tx
* queues to this station will not be transmitted. The fw will
@@ -2300,11 +2558,15 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
case STA_NOTIFY_AWAKE:
if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
break;
+
+ if (txqs)
+ iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
iwl_mvm_sta_modify_ps_wake(mvm, sta);
break;
default:
break;
}
+ spin_unlock_bh(&mvmsta->lock);
}
static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
@@ -2542,13 +2804,13 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- /* Newest FW fixes sched scan while connected on another interface */
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
- if (!vif->bss_conf.idle) {
- ret = -EBUSY;
- goto out;
- }
- } else if (!iwl_mvm_is_idle(mvm)) {
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!vif->bss_conf.idle) {
ret = -EBUSY;
goto out;
}
@@ -2838,6 +3100,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
duration, type);
+ flush_work(&mvm->roc_done_wk);
+
mutex_lock(&mvm->mutex);
switch (vif->type) {
@@ -3109,14 +3373,14 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
*/
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvmvif->monitor_active = true;
- ret = iwl_mvm_update_quotas(mvm, NULL);
+ ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret)
goto out_remove_binding;
}
/* Handle binding during CSA */
if (vif->type == NL80211_IFTYPE_AP) {
- iwl_mvm_update_quotas(mvm, NULL);
+ iwl_mvm_update_quotas(mvm, false, NULL);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
@@ -3140,7 +3404,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
- iwl_mvm_update_quotas(mvm, NULL);
+ iwl_mvm_update_quotas(mvm, false, NULL);
}
goto out;
@@ -3213,7 +3477,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
break;
}
- iwl_mvm_update_quotas(mvm, disabled_vif);
+ iwl_mvm_update_quotas(mvm, false, disabled_vif);
iwl_mvm_binding_remove_vif(mvm, vif);
out:
@@ -3405,7 +3669,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
mvm->noa_duration = noa_duration;
mvm->noa_vif = vif;
- return iwl_mvm_update_quotas(mvm, NULL);
+ return iwl_mvm_update_quotas(mvm, false, NULL);
case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
/* must be associated client vif - ignore authorized */
if (!vif || vif->type != NL80211_IFTYPE_STATION ||
@@ -3462,9 +3726,13 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ mvmvif->csa_failed = false;
+
IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
chsw->chandef.center_freq1);
+ iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
+
switch (vif->type) {
case NL80211_IFTYPE_AP:
csa_vif =
@@ -3534,6 +3802,12 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ if (mvmvif->csa_failed) {
+ mvmvif->csa_failed = false;
+ ret = -EIO;
+ goto out_unlock;
+ }
+
if (vif->type == NL80211_IFTYPE_STATION) {
struct iwl_mvm_sta *mvmsta;
@@ -3613,6 +3887,154 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
}
}
+static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ memset(survey, 0, sizeof(*survey));
+
+ /* only support global statistics right now */
+ if (idx != 0)
+ return -ENOENT;
+
+ if (!(mvm->fw->ucode_capa.capa[0] &
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ return -ENOENT;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvm->ucode_loaded) {
+ ret = iwl_mvm_request_statistics(mvm, false);
+ if (ret)
+ goto out;
+ }
+
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_SCAN;
+ survey->time = mvm->accu_radio_stats.on_time_rf +
+ mvm->radio_stats.on_time_rf;
+ do_div(survey->time, USEC_PER_MSEC);
+
+ survey->time_rx = mvm->accu_radio_stats.rx_time +
+ mvm->radio_stats.rx_time;
+ do_div(survey->time_rx, USEC_PER_MSEC);
+
+ survey->time_tx = mvm->accu_radio_stats.tx_time +
+ mvm->radio_stats.tx_time;
+ do_div(survey->time_tx, USEC_PER_MSEC);
+
+ survey->time_scan = mvm->accu_radio_stats.on_time_scan +
+ mvm->radio_stats.on_time_scan;
+ do_div(survey->time_scan, USEC_PER_MSEC);
+
+ ret = 0;
+ out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (!(mvm->fw->ucode_capa.capa[0] &
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ return;
+
+ /* if beacon filtering isn't on mac80211 does it anyway */
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ return;
+
+ if (!vif->bss_conf.assoc)
+ return;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvmvif->ap_sta_id != mvmsta->sta_id)
+ goto unlock;
+
+ if (iwl_mvm_request_statistics(mvm, false))
+ goto unlock;
+
+ sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
+ mvmvif->beacon_stats.accu_num_beacons;
+ sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+ if (mvmvif->beacon_stats.avg_signal) {
+ /* firmware only reports a value after RXing a few beacons */
+ sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
+ sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ }
+ unlock:
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
+ do { \
+ if ((_cnt) && --(_cnt)) \
+ break; \
+ iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
+ } while (0)
+
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+ return;
+
+ if (event->u.mlme.status == MLME_SUCCESS)
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
+ trig_mlme = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+ return;
+
+ if (event->u.mlme.data == ASSOC_EVENT) {
+ if (event->u.mlme.status == MLME_DENIED)
+ CHECK_MLME_TRIGGER(mvm, trig, buf,
+ trig_mlme->stop_assoc_denied,
+ "DENIED ASSOC: reason %d",
+ event->u.mlme.reason);
+ else if (event->u.mlme.status == MLME_TIMEOUT)
+ CHECK_MLME_TRIGGER(mvm, trig, buf,
+ trig_mlme->stop_assoc_timeout,
+ "ASSOC TIMEOUT");
+ } else if (event->u.mlme.data == AUTH_EVENT) {
+ if (event->u.mlme.status == MLME_DENIED)
+ CHECK_MLME_TRIGGER(mvm, trig, buf,
+ trig_mlme->stop_auth_denied,
+ "DENIED AUTH: reason %d",
+ event->u.mlme.reason);
+ else if (event->u.mlme.status == MLME_TIMEOUT)
+ CHECK_MLME_TRIGGER(mvm, trig, buf,
+ trig_mlme->stop_auth_timeout,
+ "AUTH TIMEOUT");
+ } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
+ CHECK_MLME_TRIGGER(mvm, trig, buf,
+ trig_mlme->stop_rx_deauth,
+ "DEAUTH RX %d", event->u.mlme.reason);
+ } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
+ CHECK_MLME_TRIGGER(mvm, trig, buf,
+ trig_mlme->stop_tx_deauth,
+ "DEAUTH TX %d", event->u.mlme.reason);
+ }
+#undef CHECK_MLME_TRIGGER
+}
+
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -3666,6 +4088,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
.tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
+ .event_callback = iwl_mvm_mac_event_callback,
+
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
#ifdef CONFIG_PM_SLEEP
@@ -3679,4 +4103,6 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
#endif
.set_default_unicast_key = iwl_mvm_set_default_unicast_key,
#endif
+ .get_survey = iwl_mvm_mac_get_survey,
+ .sta_statistics = iwl_mvm_mac_sta_statistics,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 6c69d0584f6c..cf70f681d1ac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -75,6 +75,7 @@
#include "iwl-trans.h"
#include "iwl-notif-wait.h"
#include "iwl-eeprom-parse.h"
+#include "iwl-fw-file.h"
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
@@ -145,6 +146,19 @@ struct iwl_mvm_dump_ptrs {
u32 op_mode_len;
};
+/**
+ * struct iwl_mvm_dump_desc - describes the dump
+ * @len: length of trig_desc->data
+ * @trig_desc: the description of the dump
+ */
+struct iwl_mvm_dump_desc {
+ size_t len;
+ /* must be last */
+ struct iwl_fw_error_dump_trigger_desc trig_desc;
+};
+
+extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert;
+
struct iwl_mvm_phy_ctxt {
u16 id;
u16 color;
@@ -335,10 +349,15 @@ struct iwl_mvm_vif_bf_data {
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
* @beacon_skb: the skb used to hold the AP/GO beacon template
- * @smps_requests: the SMPS requests of differents parts of the driver,
+ * @smps_requests: the SMPS requests of different parts of the driver,
* combined on update to yield the overall request to mac80211.
+ * @beacon_stats: beacon statistics, containing the # of received beacons,
+ * # of received beacons accumulated over FW restart, and the current
+ * average signal of beacons retrieved from the firmware
+ * @csa_failed: CSA failed to schedule time event, report an error later
*/
struct iwl_mvm_vif {
+ struct iwl_mvm *mvm;
u16 id;
u16 color;
u8 ap_sta_id;
@@ -354,6 +373,11 @@ struct iwl_mvm_vif {
bool ps_disabled;
struct iwl_mvm_vif_bf_data bf_data;
+ struct {
+ u32 num_beacons, accu_num_beacons;
+ u8 avg_signal;
+ } beacon_stats;
+
u32 ap_beacon_time;
enum iwl_tsf_id tsf_id;
@@ -396,7 +420,6 @@ struct iwl_mvm_vif {
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct iwl_mvm *mvm;
struct dentry *dbgfs_dir;
struct dentry *dbgfs_slink;
struct iwl_dbgfs_pm dbgfs_pm;
@@ -411,6 +434,7 @@ struct iwl_mvm_vif {
/* Indicates that CSA countdown may be started */
bool csa_countdown;
+ bool csa_failed;
};
static inline struct iwl_mvm_vif *
@@ -579,7 +603,6 @@ struct iwl_mvm {
enum iwl_ucode_type cur_ucode;
bool ucode_loaded;
- bool init_ucode_complete;
bool calibrating;
u32 error_event_table;
u32 log_event_table;
@@ -593,6 +616,13 @@ struct iwl_mvm {
struct mvm_statistics_rx rx_stats;
+ struct {
+ u64 rx_time;
+ u64 tx_time;
+ u64 on_time_rf;
+ u64 on_time_scan;
+ } radio_stats, accu_radio_stats;
+
u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
@@ -657,7 +687,7 @@ struct iwl_mvm {
bool disable_power_off;
bool disable_power_off_d3;
- bool scan_iter_notif_enabled;
+ u32 scan_iter_notif_enabled; /* must be u32 for debugfs_create_bool */
struct debugfs_blob_wrapper nvm_hw_blob;
struct debugfs_blob_wrapper nvm_sw_blob;
@@ -666,6 +696,7 @@ struct iwl_mvm {
struct iwl_mvm_frame_stats drv_rx_stats;
spinlock_t drv_stats_lock;
+ u16 dbgfs_rx_phyinfo;
#endif
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -687,8 +718,9 @@ struct iwl_mvm {
/* -1 for always, 0 for never, >0 for that many times */
s8 restart_fw;
- struct work_struct fw_error_dump_wk;
- enum iwl_fw_dbg_conf fw_dbg_conf;
+ u8 fw_dbg_conf;
+ struct delayed_work fw_dump_wk;
+ struct iwl_mvm_dump_desc *fw_dump_desc;
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
@@ -715,6 +747,7 @@ struct iwl_mvm {
void *d3_resume_sram;
u32 d3_test_pme_ptr;
struct ieee80211_vif *keep_vif;
+ u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
#endif
#endif
@@ -779,6 +812,9 @@ struct iwl_mvm {
/* system time of last beacon (for AP/GO interface) */
u32 ap_last_beacon_gp2;
+ bool lar_regdom_set;
+ enum iwl_mcc_source mcc_src;
+
u8 low_latency_agg_frame_limit;
/* TDLS channel switch data */
@@ -824,6 +860,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_D3_RECONFIG,
+ IWL_MVM_STATUS_DUMPING_FW_LOG,
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -878,11 +915,48 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
+static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
+{
+ bool nvm_lar = mvm->nvm_data->lar_enabled;
+ bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+
+ if (iwlwifi_mod_params.lar_disable)
+ return false;
+
+ /*
+ * Enable LAR only if it is supported by the FW (TLV) &&
+ * enabled in the NVM
+ */
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ return nvm_lar && tlv_lar;
+ else
+ return tlv_lar;
+}
+
+static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
+{
+ return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE ||
+ mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC;
+}
+
static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
{
return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
}
+static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
+{
+ return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
+ IWL_MVM_BT_COEX_CORUNNING;
+}
+
+static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
+{
+ return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+ IWL_MVM_BT_COEX_RRC;
+}
+
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@@ -951,12 +1025,13 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
}
/* Statistics */
-int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt);
int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
/* NVM */
int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
@@ -1067,20 +1142,14 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
/* Quota management */
-int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
struct ieee80211_vif *disabled_vif);
/* Scanning */
int iwl_mvm_scan_size(struct iwl_mvm *mvm);
-int iwl_mvm_scan_request(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
-int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
/* Scheduled scan */
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -1089,14 +1158,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies);
int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
-int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
- struct cfg80211_sched_scan_request *req);
int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
@@ -1225,7 +1288,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event);
+ enum ieee80211_rssi_event_data);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
@@ -1238,7 +1301,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
-bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
@@ -1246,7 +1308,7 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event);
+ enum ieee80211_rssi_event_data);
u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
@@ -1257,17 +1319,6 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-enum iwl_bt_kill_msk {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_MAX,
-};
-
-extern const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
-extern const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
-extern const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX];
-
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
void
@@ -1352,9 +1403,6 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
}
-/* Assoc status */
-bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
-
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
@@ -1367,6 +1415,23 @@ void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
int iwl_mvm_get_temp(struct iwl_mvm *mvm);
+/* Location Aware Regulatory */
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ enum iwl_mcc_source src_id);
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
+int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed);
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+ bool *changed);
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
+
/* smart fifo */
int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool added_vif);
@@ -1405,7 +1470,65 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
-int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf id);
-void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm);
+int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
+ const char *str, size_t len, unsigned int delay);
+int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
+ struct iwl_mvm_dump_desc *desc,
+ unsigned int delay);
+void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
+int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
+ struct iwl_fw_dbg_trigger_tlv *trigger,
+ const char *fmt, ...) __printf(3, 4);
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool tdls, bool cmd_q);
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ const char *errmsg);
+static inline bool
+iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
+ struct ieee80211_vif *vif)
+{
+ u32 trig_vif = le32_to_cpu(trig->vif_type);
+
+ return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
+}
+
+static inline bool
+iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm,
+ struct iwl_fw_dbg_trigger_tlv *trig)
+{
+ return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
+ (mvm->fw_dbg_conf == FW_DBG_INVALID ||
+ (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids))));
+}
+
+static inline bool
+iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_fw_dbg_trigger_tlv *trig)
+{
+ if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif))
+ return false;
+
+ return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig);
+}
+
+static inline void
+iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_fw_dbg_trigger trig)
+{
+ struct iwl_fw_dbg_trigger_tlv *trigger;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig))
+ return;
+
+ trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig);
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
+ return;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
+}
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 5383429d96c1..87b2a30a2308 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -63,18 +63,21 @@
*
*****************************************************************************/
#include <linux/firmware.h>
+#include <linux/rtnetlink.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-csr.h"
#include "mvm.h"
#include "iwl-eeprom-parse.h"
#include "iwl-eeprom-read.h"
#include "iwl-nvm-parse.h"
+#include "iwl-prph.h"
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
#define IWL_MAX_NVM_SECTION_SIZE 0x1b58
-#define IWL_MAX_NVM_8000A_SECTION_SIZE 0xffc
-#define IWL_MAX_NVM_8000B_SECTION_SIZE 0x1ffc
+#define IWL_MAX_NVM_8000_SECTION_SIZE 0x1ffc
#define NVM_WRITE_OPCODE 1
#define NVM_READ_OPCODE 0
@@ -262,7 +265,9 @@ static struct iwl_nvm_data *
iwl_parse_nvm_sections(struct iwl_mvm *mvm)
{
struct iwl_nvm_section *sections = mvm->nvm_sections;
- const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
+ const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
+ bool lar_enabled;
+ u32 mac_addr0, mac_addr1;
/* Checking for required sections */
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
@@ -286,22 +291,38 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
"Can't parse mac_address, empty sections\n");
return NULL;
}
+
+ /* PHY_SKU section is mandatory in B0 */
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+ IWL_ERR(mvm,
+ "Can't parse phy_sku in B0, empty sections\n");
+ return NULL;
+ }
}
if (WARN_ON(!mvm->cfg))
return NULL;
+ /* read the mac address from WFMP registers */
+ mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
+ mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
+
hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
mac_override =
(const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+ phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
+
+ lar_enabled = !iwlwifi_mod_params.lar_disable &&
+ (mvm->fw->ucode_capa.capa[0] &
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
- regulatory, mac_override,
- mvm->fw->valid_tx_ant,
- mvm->fw->valid_rx_ant);
+ regulatory, mac_override, phy_sku,
+ mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
+ lar_enabled, mac_addr0, mac_addr1);
}
#define MAX_NVM_FILE_LEN 16384
@@ -354,10 +375,8 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
/* Maximal size depends on HW family and step */
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
max_section_size = IWL_MAX_NVM_SECTION_SIZE;
- else if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP)
- max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE;
- else /* Family 8000 B-step or C-step */
- max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE;
+ else
+ max_section_size = IWL_MAX_NVM_8000_SECTION_SIZE;
/*
* Obtain NVM image via request_firmware. Since we already used
@@ -399,6 +418,15 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
le32_to_cpu(dword_buff[3]));
+
+ /* nvm file validation, dword_buff[2] holds the file version */
+ if ((CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
+ le32_to_cpu(dword_buff[2]) < 0xE4A) ||
+ (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP &&
+ le32_to_cpu(dword_buff[2]) >= 0xE4A)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else {
file_sec = (void *)fw_entry->data;
}
@@ -497,6 +525,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
int ret, section;
u32 size_read = 0;
u8 *nvm_buffer, *temp;
+ const char *nvm_file_B = mvm->cfg->default_nvm_file_B_step;
+ const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
return -EINVAL;
@@ -555,10 +585,27 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
/* load external NVM if configured */
if (mvm->nvm_file_name) {
- /* move to External NVM flow */
+ /* read External NVM file - take the default */
ret = iwl_mvm_read_external_nvm(mvm);
- if (ret)
- return ret;
+ if (ret) {
+ /* choose the nvm_file name according to the
+ * HW step
+ */
+ if (CSR_HW_REV_STEP(mvm->trans->hw_rev) ==
+ SILICON_B_STEP)
+ mvm->nvm_file_name = nvm_file_B;
+ else
+ mvm->nvm_file_name = nvm_file_C;
+
+ if (ret == -EFAULT && mvm->nvm_file_name) {
+ /* in case nvm file was failed try again */
+ ret = iwl_mvm_read_external_nvm(mvm);
+ if (ret)
+ return ret;
+ } else {
+ return ret;
+ }
+ }
}
/* parse the relevant nvm sections */
@@ -570,3 +617,257 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
return 0;
}
+
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ enum iwl_mcc_source src_id)
+{
+ struct iwl_mcc_update_cmd mcc_update_cmd = {
+ .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+ .source_id = (u8)src_id,
+ };
+ struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = MCC_UPDATE_CMD,
+ .flags = CMD_WANT_SKB,
+ .data = { &mcc_update_cmd },
+ };
+
+ int ret;
+ u32 status;
+ int resp_len, n_channels;
+ u16 mcc;
+
+ if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
+
+ IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
+ alpha2[0], alpha2[1], src_id);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
+ pkt->hdr.flags);
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* Extract MCC response */
+ mcc_resp = (void *)pkt->data;
+ status = le32_to_cpu(mcc_resp->status);
+
+ mcc = le16_to_cpu(mcc_resp->mcc);
+
+ /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
+ if (mcc == 0) {
+ mcc = 0x3030; /* "00" - world */
+ mcc_resp->mcc = cpu_to_le16(mcc);
+ }
+
+ n_channels = __le32_to_cpu(mcc_resp->n_channels);
+ IWL_DEBUG_LAR(mvm,
+ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
+ status, mcc, mcc >> 8, mcc & 0xff,
+ !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
+
+ resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
+ resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
+ if (!resp_cp) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = 0;
+exit:
+ iwl_free_resp(&cmd);
+ if (ret)
+ return ERR_PTR(ret);
+ return resp_cp;
+}
+
+#ifdef CONFIG_ACPI
+#define WRD_METHOD "WRDD"
+#define WRDD_WIFI (0x07)
+#define WRDD_WIGIG (0x10)
+
+static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
+{
+ union acpi_object *mcc_pkg, *domain_type, *mcc_value;
+ u32 i;
+
+ if (wrdd->type != ACPI_TYPE_PACKAGE ||
+ wrdd->package.count < 2 ||
+ wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ wrdd->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
+ return 0;
+ }
+
+ for (i = 1 ; i < wrdd->package.count ; ++i) {
+ mcc_pkg = &wrdd->package.elements[i];
+
+ if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
+ mcc_pkg->package.count < 2 ||
+ mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ mcc_pkg = NULL;
+ continue;
+ }
+
+ domain_type = &mcc_pkg->package.elements[0];
+ if (domain_type->integer.value == WRDD_WIFI)
+ break;
+
+ mcc_pkg = NULL;
+ }
+
+ if (mcc_pkg) {
+ mcc_value = &mcc_pkg->package.elements[1];
+ return mcc_value->integer.value;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
+{
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ u32 mcc_val;
+ struct pci_dev *pdev = to_pci_dev(mvm->dev);
+
+ root_handle = ACPI_HANDLE(&pdev->dev);
+ if (!root_handle) {
+ IWL_DEBUG_LAR(mvm,
+ "Could not retrieve root port ACPI handle\n");
+ return -ENOENT;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_LAR(mvm, "WRD method not found\n");
+ return -ENOENT;
+ }
+
+ /* Call WRDD with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
+ return -ENOENT;
+ }
+
+ mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
+ kfree(wrdd.pointer);
+ if (!mcc_val)
+ return -ENOENT;
+
+ mcc[0] = (mcc_val >> 8) & 0xff;
+ mcc[1] = mcc_val & 0xff;
+ mcc[2] = '\0';
+ return 0;
+}
+#else /* CONFIG_ACPI */
+static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
+{
+ return -ENOENT;
+}
+#endif
+
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
+{
+ bool tlv_lar;
+ bool nvm_lar;
+ int retval;
+ struct ieee80211_regdomain *regd;
+ char mcc[3];
+
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ tlv_lar = mvm->fw->ucode_capa.capa[0] &
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+ nvm_lar = mvm->nvm_data->lar_enabled;
+ if (tlv_lar != nvm_lar)
+ IWL_INFO(mvm,
+ "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
+ tlv_lar ? "enabled" : "disabled",
+ nvm_lar ? "enabled" : "disabled");
+ }
+
+ if (!iwl_mvm_is_lar_supported(mvm))
+ return 0;
+
+ /*
+ * try to replay the last set MCC to FW. If it doesn't exist,
+ * queue an update to cfg80211 to retrieve the default alpha2 from FW.
+ */
+ retval = iwl_mvm_init_fw_regd(mvm);
+ if (retval != -ENOENT)
+ return retval;
+
+ /*
+ * Driver regulatory hint for initial update, this also informs the
+ * firmware we support wifi location updates.
+ * Disallow scans that might crash the FW while the LAR regdomain
+ * is not set.
+ */
+ mvm->lar_regdom_set = false;
+
+ regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
+ !iwl_mvm_get_bios_mcc(mvm, mcc)) {
+ kfree(regd);
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
+ MCC_SOURCE_BIOS, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+ }
+
+ retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+ kfree(regd);
+ return retval;
+}
+
+int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
+ enum iwl_mcc_source src;
+ char mcc[3];
+ struct ieee80211_regdomain *regd;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+ return 0;
+
+ mcc[0] = notif->mcc >> 8;
+ mcc[1] = notif->mcc & 0xff;
+ mcc[2] = '\0';
+ src = notif->source_id;
+
+ IWL_DEBUG_LAR(mvm,
+ "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
+ mcc, src);
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return 0;
+
+ regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+ kfree(regd);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 2dffc3600ed3..1c66297d82c0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -82,7 +82,6 @@
#include "rs.h"
#include "fw-api-scan.h"
#include "time-event.h"
-#include "iwl-fw-error-dump.h"
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -234,11 +233,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_ant_coupling_notif, true),
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+ RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
- RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
- RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
RX_HANDLER(SCAN_ITERATION_COMPLETE,
iwl_mvm_rx_scan_offload_iter_complete_notif, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
@@ -311,6 +309,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(REPLY_RX_MPDU_CMD),
CMD(BEACON_NOTIFICATION),
CMD(BEACON_TEMPLATE_CMD),
+ CMD(STATISTICS_CMD),
CMD(STATISTICS_NOTIFICATION),
CMD(EOSP_NOTIFICATION),
CMD(REDUCE_TX_POWER_CMD),
@@ -359,6 +358,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(TDLS_CHANNEL_SWITCH_CMD),
CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
CMD(TDLS_CONFIG_CMD),
+ CMD(MCC_UPDATE_CMD),
};
#undef CMD
@@ -456,7 +456,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
- INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk);
+ INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
spin_lock_init(&mvm->d0i3_tx_lock);
@@ -488,8 +488,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
/* Set a short watchdog for the command queue */
trans_cfg.cmd_q_wdg_timeout =
- iwlmvm_mod_params.tfd_q_hang_detect ? IWL_DEF_WD_TIMEOUT :
- IWL_WATCHDOG_DISABLED;
+ iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
snprintf(mvm->hw->wiphy->fw_version,
sizeof(mvm->hw->wiphy->fw_version),
@@ -504,6 +503,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
sizeof(trans->dbg_conf_tlv));
+ trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
/* set up notification wait support */
iwl_notification_wait_init(&mvm->notif_wait);
@@ -523,12 +523,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
/* set the nvm_file_name according to priority */
if (iwlwifi_mod_params.nvm_file) {
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
- } else {
- if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
- (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP))
- mvm->nvm_file_name = mvm->cfg->default_nvm_file_8000A;
+ } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)
+ mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step;
else
- mvm->nvm_file_name = mvm->cfg->default_nvm_file;
+ mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step;
}
if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
@@ -685,6 +684,37 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
mutex_unlock(&mvm->mutex);
}
+static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_cmd *cmds_trig;
+ int i;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
+ cmds_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
+ /* don't collect on CMD 0 */
+ if (!cmds_trig->cmds[i].cmd_id)
+ break;
+
+ if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+ continue;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+ "CMD 0x%02x received",
+ pkt->hdr.cmd);
+ break;
+ }
+}
+
static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
@@ -693,6 +723,8 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u8 i;
+ iwl_mvm_rx_check_trigger(mvm, pkt);
+
/*
* Do the notification wait before RX handlers so
* even if the RX handler consumes the RXB we have
@@ -827,18 +859,28 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
{
struct iwl_mvm *mvm =
- container_of(work, struct iwl_mvm, fw_error_dump_wk);
+ container_of(work, struct iwl_mvm, fw_dump_wk.work);
if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
return;
mutex_lock(&mvm->mutex);
+
+ /* stop recording */
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ } else {
+ iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
+ /* wait before we collect the data till the DBGC stop */
+ udelay(100);
+ }
+
iwl_mvm_fw_error_dump(mvm);
/* start recording again if the firmware is not crashed */
WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
- mvm->fw->dbg_dest_tlv &&
- iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
+ mvm->fw->dbg_dest_tlv &&
+ iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
mutex_unlock(&mvm->mutex);
@@ -859,18 +901,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
* the next start() call from mac80211. If restart isn't called
* (no fw restart) scan status will stay busy.
*/
- switch (mvm->scan_status) {
- case IWL_MVM_SCAN_NONE:
- break;
- case IWL_MVM_SCAN_OS:
- ieee80211_scan_completed(mvm->hw, true);
- break;
- case IWL_MVM_SCAN_SCHED:
- /* Sched scan will be restarted by mac80211 in restart_hw. */
- if (!mvm->restart_fw)
- ieee80211_sched_scan_stopped(mvm->hw);
- break;
- }
+ iwl_mvm_report_scan_aborted(mvm);
/*
* If we're restarting already, don't cycle restarts.
@@ -879,7 +910,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
* can't recover this since we're already half suspended.
*/
if (!mvm->restart_fw && fw_error) {
- schedule_work(&mvm->fw_error_dump_wk);
+ iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
} else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
@@ -1140,7 +1171,7 @@ static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
- ieee80211_connection_loss(vif);
+ iwl_mvm_connection_loss(mvm, vif, "D0i3");
}
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
@@ -1236,6 +1267,10 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
iwl_free_resp(&get_status_cmd);
out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+
+ /* the FW might have updated the regdomain */
+ iwl_mvm_update_changed_regdom(mvm);
+
iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index 5b43616eeb06..e68a475e3071 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -67,7 +67,7 @@
#include "fw-api.h"
#include "mvm.h"
-/* Maps the driver specific channel width definition to the the fw values */
+/* Maps the driver specific channel width definition to the fw values */
u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
@@ -175,6 +175,10 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
cmd->rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (unlikely(mvm->dbgfs_rx_phyinfo))
+ cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+#endif
cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 2620dd0c45f9..d2c6ba9d326b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -66,6 +66,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
@@ -357,7 +358,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
- !mvmvif->pm_enabled || iwl_mvm_tdls_sta_count(mvm, vif))
+ !mvmvif->pm_enabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -491,7 +492,7 @@ void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
ETH_ALEN))
- memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN);
+ eth_zero_addr(mvmvif->uapsd_misbehaving_bssid);
}
static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
@@ -638,6 +639,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
if (vifs->ap_vif)
ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
+ /* don't allow PM if any TDLS stations exist */
+ if (iwl_mvm_tdls_sta_count(mvm, NULL))
+ return;
+
/* enable PM on bss if bss stand alone */
if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
bss_mvmvif->pm_enabled = true;
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index dbb2594390e9..509a66d05245 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -172,6 +172,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
}
int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+ bool force_update,
struct ieee80211_vif *disabled_vif)
{
struct iwl_time_quota_cmd cmd = {};
@@ -309,7 +310,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
"zero quota on binding %d\n", i);
}
- if (!send) {
+ if (!send && !force_update) {
/* don't send a practically unchanged command, the firmware has
* to re-initialize a lot of state and that can have an adverse
* impact on it
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 078f24cf4af3..f9928f2c125f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -160,6 +160,9 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl,
const struct rs_tx_column *next_col)
{
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_vif *mvmvif;
+
if (!sta->ht_cap.ht_supported)
return false;
@@ -172,6 +175,11 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
return false;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+ return false;
+
return true;
}
@@ -807,6 +815,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
rate->ldpc = true;
if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
rate->stbc = true;
+ if (ucode_rate & RATE_MCS_BF_MSK)
+ rate->bfer = true;
rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
@@ -816,7 +826,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (nss == 1) {
rate->type = LQ_HT_SISO;
- WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
+ WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+ "stbc %d bfer %d",
+ rate->stbc, rate->bfer);
} else if (nss == 2) {
rate->type = LQ_HT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
@@ -829,7 +841,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (nss == 1) {
rate->type = LQ_VHT_SISO;
- WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
+ WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+ "stbc %d bfer %d",
+ rate->stbc, rate->bfer);
} else if (nss == 2) {
rate->type = LQ_VHT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
@@ -1008,13 +1022,41 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
rs_get_lower_rate_in_column(lq_sta, rate);
}
-/* Simple function to compare two rate scale table types */
-static inline bool rs_rate_match(struct rs_rate *a,
- struct rs_rate *b)
+/* Check if both rates are identical
+ * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
+ * with a rate indicating STBC/BFER and ANT_AB.
+ */
+static inline bool rs_rate_equal(struct rs_rate *a,
+ struct rs_rate *b,
+ bool allow_ant_mismatch)
+
+{
+ bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
+ (a->bfer == b->bfer);
+
+ if (allow_ant_mismatch) {
+ if (a->stbc || a->bfer) {
+ WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
+ a->stbc, a->bfer, a->ant);
+ ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
+ } else if (b->stbc || b->bfer) {
+ WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
+ b->stbc, b->bfer, b->ant);
+ ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
+ }
+ }
+
+ return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
+ (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
+}
+
+/* Check if both rates share the same column */
+static inline bool rs_rate_column_match(struct rs_rate *a,
+ struct rs_rate *b)
{
bool ant_match;
- if (a->stbc)
+ if (a->stbc || a->bfer)
ant_match = (b->ant == ANT_A || b->ant == ANT_B);
else
ant_match = (a->ant == b->ant);
@@ -1023,16 +1065,35 @@ static inline bool rs_rate_match(struct rs_rate *a,
&& ant_match;
}
-static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
+static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
{
- if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- return RATE_MCS_CHAN_WIDTH_40;
- else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- return RATE_MCS_CHAN_WIDTH_80;
- else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
- return RATE_MCS_CHAN_WIDTH_160;
+ if (is_legacy(rate)) {
+ if (rate->ant == ANT_A)
+ return RS_COLUMN_LEGACY_ANT_A;
- return RATE_MCS_CHAN_WIDTH_20;
+ if (rate->ant == ANT_B)
+ return RS_COLUMN_LEGACY_ANT_B;
+
+ goto err;
+ }
+
+ if (is_siso(rate)) {
+ if (rate->ant == ANT_A || rate->stbc || rate->bfer)
+ return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
+ RS_COLUMN_SISO_ANT_A;
+
+ if (rate->ant == ANT_B)
+ return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
+ RS_COLUMN_SISO_ANT_B;
+
+ goto err;
+ }
+
+ if (is_mimo(rate))
+ return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
+
+err:
+ return RS_COLUMN_INVALID;
}
static u8 rs_get_tid(struct ieee80211_hdr *hdr)
@@ -1055,15 +1116,17 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
{
int legacy_success;
int retries;
- int mac_index, i;
+ int i;
struct iwl_lq_cmd *table;
- enum mac80211_rate_control_flags mac_flags;
- u32 ucode_rate;
- struct rs_rate rate;
+ u32 lq_hwrate;
+ struct rs_rate lq_rate, tx_resp_rate;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
+ u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+ bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS;
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@@ -1074,50 +1137,43 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return;
}
-#ifdef CONFIG_MAC80211_DEBUGFS
- /* Disable last tx check if we are debugging with fixed rate */
- if (lq_sta->pers.dbg_fixed_rate) {
- IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
- return;
- }
-#endif
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- /*
- * Ignore this Tx frame response if its initial rate doesn't match
- * that of latest Link Quality command. There may be stragglers
- * from a previous Link Quality command, but we're no longer interested
- * in those; they're either from the "active" mode while we're trying
- * to check "search" mode, or a prior "search" mode after we've moved
- * to a new "search" mode (which might become the new "active" mode).
- */
- table = &lq_sta->lq;
- ucode_rate = le32_to_cpu(table->rs_table[0]);
- rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
- if (info->band == IEEE80211_BAND_5GHZ)
- rate.index -= IWL_FIRST_OFDM_RATE;
- mac_flags = info->status.rates[0].flags;
- mac_index = info->status.rates[0].idx;
- /* For HT packets, map MCS to PLCP */
- if (mac_flags & IEEE80211_TX_RC_MCS) {
- /* Remove # of streams */
- mac_index &= RATE_HT_MCS_RATE_CODE_MSK;
- if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
- mac_index++;
- /*
- * mac80211 HT index is always zero-indexed; we need to move
- * HT OFDM rates after CCK rates in 2.4 GHz band
- */
- if (info->band == IEEE80211_BAND_2GHZ)
- mac_index += IWL_FIRST_OFDM_RATE;
- } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
- mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
- if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
- mac_index++;
+ rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* Disable last tx check if we are debugging with fixed rate but
+ * update tx stats */
+ if (lq_sta->pers.dbg_fixed_rate) {
+ int index = tx_resp_rate.index;
+ enum rs_column column;
+ int attempts, success;
+
+ column = rs_get_column_from_rate(&tx_resp_rate);
+ if (WARN_ONCE(column == RS_COLUMN_INVALID,
+ "Can't map rate 0x%x to column",
+ tx_resp_hwrate))
+ return;
+
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ attempts = info->status.ampdu_len;
+ success = info->status.ampdu_ack_len;
+ } else {
+ attempts = info->status.rates[0].count;
+ success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ }
+
+ lq_sta->pers.tx_stats[column][index].total += attempts;
+ lq_sta->pers.tx_stats[column][index].success += success;
+
+ IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
+ tx_resp_hwrate, success, attempts);
+ return;
}
+#endif
if (time_after(jiffies,
(unsigned long)(lq_sta->last_tx +
@@ -1133,21 +1189,23 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
lq_sta->last_tx = jiffies;
+ /* Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ lq_hwrate = le32_to_cpu(table->rs_table[0]);
+ rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
+
/* Here we actually compare this rate to the latest LQ command */
- if ((mac_index < 0) ||
- (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (rate.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
- (rate.ant != info->status.antenna) ||
- (!!(ucode_rate & RATE_MCS_HT_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
- (!!(ucode_rate & RATE_MCS_VHT_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
- (!!(ucode_rate & RATE_HT_MCS_GF_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
- (rate.index != mac_index)) {
+ if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
IWL_DEBUG_RATE(mvm,
- "initial rate %d does not match %d (0x%x)\n",
- mac_index, rate.index, ucode_rate);
+ "initial tx resp rate 0x%x does not match 0x%x\n",
+ tx_resp_hwrate, lq_hwrate);
+
/*
* Since rates mis-match, the last LQ command may have failed.
* After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
@@ -1175,14 +1233,14 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
}
- if (WARN_ON_ONCE(!rs_rate_match(&rate, &curr_tbl->rate))) {
+ if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
IWL_DEBUG_RATE(mvm,
"Neither active nor search matches tx rate\n");
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
- rs_dump_rate(mvm, &rate, "ACTUAL");
+ rs_dump_rate(mvm, &lq_rate, "ACTUAL");
/*
* no matching table found, let's by-pass the data collection
@@ -1207,9 +1265,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (info->status.ampdu_ack_len == 0)
info->status.ampdu_len = 1;
- ucode_rate = le32_to_cpu(table->rs_table[0]);
- rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
- rs_collect_tx_data(mvm, lq_sta, curr_tbl, rate.index,
+ rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len,
reduced_txp);
@@ -1221,9 +1277,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
info->status.ampdu_ack_len);
}
} else {
- /*
- * For legacy, update frame history with for each Tx retry.
- */
+ /* For legacy, update frame history with for each Tx retry. */
retries = info->status.rates[0].count - 1;
/* HW doesn't send more than 15 retries */
retries = min(retries, 15);
@@ -1232,21 +1286,23 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
/* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) {
- ucode_rate = le32_to_cpu(table->rs_table[i]);
- rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
+ lq_hwrate = le32_to_cpu(table->rs_table[i]);
+ rs_rate_from_ucode_rate(lq_hwrate, info->band,
+ &lq_rate);
/*
* Only collect stats if retried rate is in the same RS
* table as active/search.
*/
- if (rs_rate_match(&rate, &curr_tbl->rate))
+ if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
tmp_tbl = curr_tbl;
- else if (rs_rate_match(&rate, &other_tbl->rate))
+ else if (rs_rate_column_match(&lq_rate,
+ &other_tbl->rate))
tmp_tbl = other_tbl;
else
continue;
- rs_collect_tx_data(mvm, lq_sta, tmp_tbl, rate.index, 1,
- i < retries ? 0 : legacy_success,
+ rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index,
+ 1, i < retries ? 0 : legacy_success,
reduced_txp);
}
@@ -1257,7 +1313,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
}
/* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = ucode_rate;
+ lq_sta->last_rate_n_flags = lq_hwrate;
IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
done:
/* See if there's a better rate or modulation mode to try. */
@@ -1557,9 +1613,9 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
static void rs_update_rate_tbl(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- struct rs_rate *rate)
+ struct iwl_scale_tbl_info *tbl)
{
- rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
+ rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
}
@@ -2089,7 +2145,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
rate->type = LQ_NONE;
lq_sta->search_better_tbl = 0;
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
}
return;
}
@@ -2252,7 +2308,7 @@ lq_update:
/* Replace uCode's rate table for the destination station. */
if (update_lq) {
tbl->rate.index = index;
- rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
}
rs_stay_in_table(lq_sta, false);
@@ -2299,8 +2355,7 @@ lq_update:
rs_dump_rate(mvm, &tbl->rate,
"Switch to SEARCH TABLE:");
- rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
} else {
done_search = 1;
}
@@ -2554,6 +2609,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->pers.dbg_fixed_rate = 0;
lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
+ lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
#endif
lq_sta->pers.chains = 0;
memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
@@ -3079,19 +3135,21 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
goto out;
+#ifdef CONFIG_MAC80211_DEBUGFS
/* Check if forcing the decision is configured.
* Note that SISO is forced by not allowing STBC or BFER
*/
- if (lq_sta->ss_force == RS_SS_FORCE_STBC)
+ if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC)
ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
- else if (lq_sta->ss_force == RS_SS_FORCE_BFER)
+ else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER)
ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
- if (lq_sta->ss_force != RS_SS_FORCE_NONE) {
+ if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) {
IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
- lq_sta->ss_force);
+ lq_sta->pers.ss_force);
goto out;
}
+#endif
if (lq_sta->stbc_capable)
ss_params |= LQ_SS_STBC_1SS_ALLOWED;
@@ -3177,7 +3235,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
/*
- * In case of low latency, tell the firwmare to leave a frame in the
+ * In case of low latency, tell the firmware to leave a frame in the
* Tx Fifo so that it can start a transaction in the same TxOP. This
* basically allows the firmware to send bursts.
*/
@@ -3332,6 +3390,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
struct iwl_mvm *mvm;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct rs_rate *rate = &tbl->rate;
+ u32 ss_params;
mvm = lq_sta->pers.drv;
buff = kmalloc(2048, GFP_KERNEL);
if (!buff)
@@ -3351,16 +3410,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(is_legacy(rate)) ? "legacy" :
is_vht(rate) ? "VHT" : "HT");
if (!is_legacy(rate)) {
- desc += sprintf(buff+desc, " %s",
+ desc += sprintf(buff + desc, " %s",
(is_siso(rate)) ? "SISO" : "MIMO2");
- desc += sprintf(buff+desc, " %s",
- (is_ht20(rate)) ? "20MHz" :
- (is_ht40(rate)) ? "40MHz" :
- (is_ht80(rate)) ? "80Mhz" : "BAD BW");
- desc += sprintf(buff+desc, " %s %s %s\n",
- (rate->sgi) ? "SGI" : "NGI",
- (rate->ldpc) ? "LDPC" : "BCC",
- (lq_sta->is_agg) ? "AGG on" : "");
+ desc += sprintf(buff + desc, " %s",
+ (is_ht20(rate)) ? "20MHz" :
+ (is_ht40(rate)) ? "40MHz" :
+ (is_ht80(rate)) ? "80Mhz" : "BAD BW");
+ desc += sprintf(buff + desc, " %s %s %s\n",
+ (rate->sgi) ? "SGI" : "NGI",
+ (rate->ldpc) ? "LDPC" : "BCC",
+ (lq_sta->is_agg) ? "AGG on" : "");
}
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
@@ -3378,6 +3437,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->lq.agg_frame_cnt_limit);
desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
+ ss_params = le32_to_cpu(lq_sta->lq.ss_params);
+ desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
+ (ss_params & LQ_SS_PARAMS_VALID) ?
+ "VALID" : "INVALID",
+ (ss_params & LQ_SS_BFER_ALLOWED) ?
+ ", BFER" : "",
+ (ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
+ ", STBC" : "",
+ (ss_params & LQ_SS_FORCE) ?
+ ", FORCE" : "");
desc += sprintf(buff+desc,
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
lq_sta->lq.initial_rate_index[0],
@@ -3554,7 +3623,7 @@ static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
};
pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
- ss_force_name[lq_sta->ss_force]);
+ ss_force_name[lq_sta->pers.ss_force]);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -3565,12 +3634,12 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
int ret = 0;
if (!strncmp("none", buf, 4)) {
- lq_sta->ss_force = RS_SS_FORCE_NONE;
+ lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
} else if (!strncmp("siso", buf, 4)) {
- lq_sta->ss_force = RS_SS_FORCE_SISO;
+ lq_sta->pers.ss_force = RS_SS_FORCE_SISO;
} else if (!strncmp("stbc", buf, 4)) {
if (lq_sta->stbc_capable) {
- lq_sta->ss_force = RS_SS_FORCE_STBC;
+ lq_sta->pers.ss_force = RS_SS_FORCE_STBC;
} else {
IWL_ERR(mvm,
"can't force STBC. peer doesn't support\n");
@@ -3578,7 +3647,7 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
}
} else if (!strncmp("bfer", buf, 4)) {
if (lq_sta->bfer_capable) {
- lq_sta->ss_force = RS_SS_FORCE_BFER;
+ lq_sta->pers.ss_force = RS_SS_FORCE_BFER;
} else {
IWL_ERR(mvm,
"can't force BFER. peer doesn't support\n");
@@ -3673,7 +3742,7 @@ void iwl_mvm_rate_control_unregister(void)
/**
* iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
- * Tx protection, according to this rquest and previous requests,
+ * Tx protection, according to this request and previous requests,
* and send the LQ command.
* @mvmsta: The station
* @enable: Enable Tx protection?
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index dc4ef3dfafe1..e4aa9346a231 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -170,6 +170,7 @@ struct rs_rate {
bool sgi;
bool ldpc;
bool stbc;
+ bool bfer;
};
@@ -331,14 +332,14 @@ struct iwl_lq_sta {
/* tx power reduce for this sta */
int tpc_reduce;
- /* force STBC/BFER/SISO for testing */
- enum rs_ss_force_opt ss_force;
-
/* persistent fields - initialized only once - keep last! */
struct lq_sta_pers {
#ifdef CONFIG_MAC80211_DEBUGFS
u32 dbg_fixed_rate;
u8 dbg_fixed_txp_reduction;
+
+ /* force STBC/BFER/SISO for testing */
+ enum rs_ss_force_opt ss_force;
#endif
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index f922131b4eab..d6314ddf57b5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -345,6 +345,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_mvm_sta *mvmsta;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+
+ if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
+ ieee80211_is_beacon(hdr->frame_control)) {
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
+ bool trig_check;
+ s32 rssi;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw,
+ FW_DBG_TRIGGER_RSSI);
+ rssi_trig = (void *)trig->data;
+ rssi = le32_to_cpu(rssi_trig->rssi);
+
+ trig_check =
+ iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
+ trig);
+ if (trig_check && rx_status->signal < rssi)
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
+ }
}
rcu_read_unlock();
@@ -416,41 +435,54 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
}
static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
- struct iwl_notif_statistics *stats)
+ struct mvm_statistics_rx *rx_stats)
{
- /*
- * NOTE FW aggregates the statistics - BUT the statistics are cleared
- * when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS
- * bit set.
- */
lockdep_assert_held(&mvm->mutex);
- memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
+
+ mvm->rx_stats = *rx_stats;
}
struct iwl_mvm_stat_data {
- struct iwl_notif_statistics *stats;
struct iwl_mvm *mvm;
+ __le32 mac_id;
+ __s8 beacon_filter_average_energy;
+ struct mvm_statistics_general_v8 *general;
};
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_stat_data *data = _data;
- struct iwl_notif_statistics *stats = data->stats;
struct iwl_mvm *mvm = data->mvm;
- int sig = -stats->general.beacon_filter_average_energy;
+ int sig = -data->beacon_filter_average_energy;
int last_event;
int thold = vif->bss_conf.cqm_rssi_thold;
int hyst = vif->bss_conf.cqm_rssi_hyst;
- u16 id = le32_to_cpu(stats->rx.general.mac_id);
+ u16 id = le32_to_cpu(data->mac_id);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ /* This doesn't need the MAC ID check since it's not taking the
+ * data copied into the "data" struct, but rather the data from
+ * the notification directly.
+ */
+ if (data->general) {
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
+ mvmvif->beacon_stats.avg_signal =
+ -data->general->beacon_average_energy[mvmvif->id];
+ }
+
if (mvmvif->id != id)
return;
if (vif->type != NL80211_IFTYPE_STATION)
return;
+ if (sig == 0) {
+ IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
+ return;
+ }
+
mvmvif->bf_data.ave_beacon_signal = sig;
/* BT Coex */
@@ -500,34 +532,101 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
}
}
-/*
- * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
- *
- * TODO: This handler is implemented partially.
- */
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static inline void
+iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_notif_statistics *stats = (void *)&pkt->data;
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_stats *trig_stats;
+ u32 trig_offset, trig_thold;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
+ trig_stats = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+ return;
+
+ trig_offset = le32_to_cpu(trig_stats->stop_offset);
+ trig_thold = le32_to_cpu(trig_stats->stop_threshold);
+
+ if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
+ return;
+
+ if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
+ return;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
+}
+
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ size_t v8_len = sizeof(struct iwl_notif_statistics_v8);
+ size_t v10_len = sizeof(struct iwl_notif_statistics_v10);
struct iwl_mvm_stat_data data = {
- .stats = stats,
.mvm = mvm,
};
+ u32 temperature;
+
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
+ struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) != v10_len)
+ goto invalid;
+
+ temperature = le32_to_cpu(stats->general.radio_temperature);
+ data.mac_id = stats->rx.general.mac_id;
+ data.beacon_filter_average_energy =
+ stats->general.beacon_filter_average_energy;
+
+ iwl_mvm_update_rx_statistics(mvm, &stats->rx);
+
+ mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
+ mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
+ mvm->radio_stats.on_time_rf =
+ le64_to_cpu(stats->general.on_time_rf);
+ mvm->radio_stats.on_time_scan =
+ le64_to_cpu(stats->general.on_time_scan);
+
+ data.general = &stats->general;
+ } else {
+ struct iwl_notif_statistics_v8 *stats = (void *)&pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) != v8_len)
+ goto invalid;
+
+ temperature = le32_to_cpu(stats->general.radio_temperature);
+ data.mac_id = stats->rx.general.mac_id;
+ data.beacon_filter_average_energy =
+ stats->general.beacon_filter_average_energy;
+
+ iwl_mvm_update_rx_statistics(mvm, &stats->rx);
+ }
+
+ iwl_mvm_rx_stats_check_trigger(mvm, pkt);
/* Only handle rx statistics temperature changes if async temp
* notifications are not supported
*/
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
- iwl_mvm_tt_temp_changed(mvm,
- le32_to_cpu(stats->general.radio_temperature));
-
- iwl_mvm_update_rx_statistics(mvm, stats);
+ iwl_mvm_tt_temp_changed(mvm, temperature);
ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator,
&data);
+ return;
+ invalid:
+ IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
+ iwl_rx_packet_payload_len(pkt));
+}
+
+int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c47c8051da77..1075a213bd6a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -82,6 +82,7 @@ struct iwl_mvm_scan_params {
struct _dwell {
u16 passive;
u16 active;
+ u16 fragmented;
} dwell[IEEE80211_NUM_BANDS];
};
@@ -191,101 +192,6 @@ static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
}
-static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
- struct cfg80211_scan_request *req,
- bool basic_ssid,
- struct iwl_mvm_scan_params *params)
-{
- struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
- (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
- int i;
- int type = BIT(req->n_ssids) - 1;
- enum ieee80211_band band = req->channels[0]->band;
-
- if (!basic_ssid)
- type |= BIT(req->n_ssids);
-
- for (i = 0; i < cmd->channel_count; i++) {
- chan->channel = cpu_to_le16(req->channels[i]->hw_value);
- chan->type = cpu_to_le32(type);
- if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
- chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
- chan->active_dwell = cpu_to_le16(params->dwell[band].active);
- chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
- chan->iteration_count = cpu_to_le16(1);
- chan++;
- }
-}
-
-/*
- * Fill in probe request with the following parameters:
- * TA is our vif HW address, which mac80211 ensures we have.
- * Packet is broadcasted, so this is both SA and DA.
- * The probe request IE is made out of two: first comes the most prioritized
- * SSID if a directed scan is requested. Second comes whatever extra
- * information was given to us as the scan request IE.
- */
-static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
- int n_ssids, const u8 *ssid, int ssid_len,
- const u8 *band_ie, int band_ie_len,
- const u8 *common_ie, int common_ie_len,
- int left)
-{
- int len = 0;
- u8 *pos = NULL;
-
- /* Make sure there is enough space for the probe request,
- * two mandatory IEs and the data */
- left -= 24;
- if (left < 0)
- return 0;
-
- frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- eth_broadcast_addr(frame->da);
- memcpy(frame->sa, ta, ETH_ALEN);
- eth_broadcast_addr(frame->bssid);
- frame->seq_ctrl = 0;
-
- len += 24;
-
- /* for passive scans, no need to fill anything */
- if (n_ssids == 0)
- return (u16)len;
-
- /* points to the payload of the request */
- pos = &frame->u.probe_req.variable[0];
-
- /* fill in our SSID IE */
- left -= ssid_len + 2;
- if (left < 0)
- return 0;
- *pos++ = WLAN_EID_SSID;
- *pos++ = ssid_len;
- if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
- memcpy(pos, ssid, ssid_len);
- pos += ssid_len;
- }
-
- len += ssid_len + 2;
-
- if (WARN_ON(left < band_ie_len + common_ie_len))
- return len;
-
- if (band_ie && band_ie_len) {
- memcpy(pos, band_ie, band_ie_len);
- pos += band_ie_len;
- len += band_ie_len;
- }
-
- if (common_ie && common_ie_len) {
- memcpy(pos, common_ie, common_ie_len);
- pos += common_ie_len;
- len += common_ie_len;
- }
-
- return (u16)len;
-}
-
static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -325,7 +231,7 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
* If there is more than one active interface make
* passive scan more fragmented.
*/
- frag_passive_dwell = (global_cnt < 2) ? 40 : 20;
+ frag_passive_dwell = 40;
params->max_out_time = frag_passive_dwell;
} else {
params->suspend_time = 120;
@@ -358,10 +264,10 @@ not_bound:
for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
if (params->passive_fragmented)
- params->dwell[band].passive = frag_passive_dwell;
- else
- params->dwell[band].passive =
- iwl_mvm_get_passive_dwell(mvm, band);
+ params->dwell[band].fragmented = frag_passive_dwell;
+
+ params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
+ band);
params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
n_ssids);
}
@@ -379,20 +285,11 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
{
int max_probe_len;
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
- max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
- else
- max_probe_len = mvm->fw->ucode_capa.max_probe_length;
+ max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
/* we create the 802.11 header and SSID element */
max_probe_len -= 24 + 2;
- /* basic ssid is added only for hw_scan with and old api */
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) &&
- !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) &&
- !is_sched_scan)
- max_probe_len -= 32;
-
/* DS parameter set element is added on 2.4GHZ band if required */
if (iwl_mvm_rrm_scan_needed(mvm))
max_probe_len -= 3;
@@ -404,9 +301,6 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
{
int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN))
- return max_ie_len;
-
/* TODO: [BUG] This function should return the maximum allowed size of
* scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
* in the same command. So the correct implementation of this function
@@ -420,135 +314,12 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
return max_ie_len;
}
-int iwl_mvm_scan_request(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
-{
- struct iwl_host_cmd hcmd = {
- .id = SCAN_REQUEST_CMD,
- .len = { 0, },
- .data = { mvm->scan_cmd, },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- struct iwl_scan_cmd *cmd = mvm->scan_cmd;
- int ret;
- u32 status;
- int ssid_len = 0;
- u8 *ssid = NULL;
- bool basic_ssid = !(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
- struct iwl_mvm_scan_params params = {};
-
- lockdep_assert_held(&mvm->mutex);
-
- /* we should have failed registration if scan_cmd was NULL */
- if (WARN_ON(mvm->scan_cmd == NULL))
- return -ENOMEM;
-
- IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
- mvm->scan_status = IWL_MVM_SCAN_OS;
- memset(cmd, 0, ksize(cmd));
-
- cmd->channel_count = (u8)req->n_channels;
- cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
- cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
- cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
-
- iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, &params);
- cmd->max_out_time = cpu_to_le32(params.max_out_time);
- cmd->suspend_time = cpu_to_le32(params.suspend_time);
- if (params.passive_fragmented)
- cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
-
- cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
- cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
- MAC_FILTER_IN_BEACON);
-
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
- cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED);
- else
- cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
-
- cmd->repeats = cpu_to_le32(1);
-
- /*
- * If the user asked for passive scan, don't change to active scan if
- * you see any activity on the channel - remain passive.
- */
- if (req->n_ssids > 0) {
- cmd->passive2active = cpu_to_le16(1);
- cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
- if (basic_ssid) {
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
- }
- } else {
- cmd->passive2active = 0;
- cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
- }
-
- iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->ssids, req->n_ssids,
- basic_ssid ? 1 : 0);
-
- cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
- 3 << TX_CMD_FLG_BT_PRIO_POS);
-
- cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
- cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
- cmd->tx_cmd.rate_n_flags =
- iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
- req->no_cck);
-
- cmd->tx_cmd.len =
- cpu_to_le16(iwl_mvm_fill_probe_req(
- (struct ieee80211_mgmt *)cmd->data,
- vif->addr,
- req->n_ssids, ssid, ssid_len,
- req->ie, req->ie_len, NULL, 0,
- mvm->fw->ucode_capa.max_probe_length));
-
- iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
-
- cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
- le16_to_cpu(cmd->tx_cmd.len) +
- (cmd->channel_count * sizeof(struct iwl_scan_channel)));
- hcmd.len[0] = le16_to_cpu(cmd->len);
-
- status = SCAN_RESPONSE_OK;
- ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
- if (!ret && status == SCAN_RESPONSE_OK) {
- IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
- } else {
- /*
- * If the scan failed, it usually means that the FW was unable
- * to allocate the time events. Warn on it, but maybe we
- * should try to send the command again with different params.
- */
- IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
- status, ret);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- ret = -EIO;
- }
- return ret;
-}
-
-int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_cmd_response *resp = (void *)pkt->data;
-
- IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
- le32_to_cpu(resp->status));
- return 0;
-}
-
int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scan_complete_notif *notif = (void *)pkt->data;
+ struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
IWL_DEBUG_SCAN(mvm,
"Scan offload iteration complete: status=0x%x scanned channels=%d\n",
@@ -556,130 +327,25 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
return 0;
}
-int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scan_complete_notif *notif = (void *)pkt->data;
-
- lockdep_assert_held(&mvm->mutex);
-
- IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
- notif->status, notif->scanned_channels);
-
- if (mvm->scan_status == IWL_MVM_SCAN_OS)
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
-
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-
- return 0;
-}
-
int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) &&
- !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
- struct iwl_sched_scan_results *notif = (void *)pkt->data;
-
- if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN))
- return 0;
- }
-
IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
ieee80211_sched_scan_results(mvm->hw);
return 0;
}
-static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
- struct iwl_rx_packet *pkt, void *data)
-{
- struct iwl_mvm *mvm =
- container_of(notif_wait, struct iwl_mvm, notif_wait);
- struct iwl_scan_complete_notif *notif;
- u32 *resp;
-
- switch (pkt->hdr.cmd) {
- case SCAN_ABORT_CMD:
- resp = (void *)pkt->data;
- if (*resp == CAN_ABORT_STATUS) {
- IWL_DEBUG_SCAN(mvm,
- "Scan can be aborted, wait until completion\n");
- return false;
- }
-
- /*
- * If scan cannot be aborted, it means that we had a
- * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
- * ieee80211_scan_completed already.
- */
- IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
- *resp);
- return true;
-
- case SCAN_COMPLETE_NOTIFICATION:
- notif = (void *)pkt->data;
- IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
- notif->status);
- return true;
-
- default:
- WARN_ON(1);
- return false;
- };
-}
-
-static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
-{
- struct iwl_notification_wait wait_scan_abort;
- static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
- SCAN_COMPLETE_NOTIFICATION };
- int ret;
-
- iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
- scan_abort_notif,
- ARRAY_SIZE(scan_abort_notif),
- iwl_mvm_scan_abort_notif, NULL);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
- if (ret) {
- IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
- /* mac80211's state will be cleaned in the nic_restart flow */
- goto out_remove_notif;
- }
-
- return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
-
-out_remove_notif:
- iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
- return ret;
-}
-
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u8 status, ebs_status;
+ struct iwl_periodic_scan_complete *scan_notif;
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
- struct iwl_periodic_scan_complete *scan_notif;
+ scan_notif = (void *)pkt->data;
- scan_notif = (void *)pkt->data;
- status = scan_notif->status;
- ebs_status = scan_notif->ebs_status;
- } else {
- struct iwl_scan_offload_complete *scan_notif;
-
- scan_notif = (void *)pkt->data;
- status = scan_notif->status;
- ebs_status = scan_notif->ebs_status;
- }
/* scan status must be locked for proper checking */
lockdep_assert_held(&mvm->mutex);
@@ -687,9 +353,9 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
"%s completed, status %s, EBS status %s\n",
mvm->scan_status == IWL_MVM_SCAN_SCHED ?
"Scheduled scan" : "Scan",
- status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted",
- ebs_status == IWL_SCAN_EBS_SUCCESS ?
+ scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
"success" : "failed");
@@ -700,64 +366,16 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
} else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw,
- status == IWL_SCAN_OFFLOAD_ABORTED);
+ scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
}
- if (ebs_status)
+ if (scan_notif->ebs_status)
mvm->last_ebs_successful = false;
return 0;
}
-static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_ies *ies,
- enum ieee80211_band band,
- struct iwl_tx_cmd *cmd,
- u8 *data)
-{
- u16 cmd_len;
-
- cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
- cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
- cmd->sta_id = mvm->aux_sta.sta_id;
-
- cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
-
- cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
- vif->addr,
- 1, NULL, 0,
- ies->ies[band], ies->len[band],
- ies->common_ies, ies->common_ie_len,
- SCAN_OFFLOAD_PROBE_REQ_SIZE);
- cmd->len = cpu_to_le16(cmd_len);
-}
-
-static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct iwl_scan_offload_cmd *scan,
- struct iwl_mvm_scan_params *params)
-{
- scan->channel_count = req->n_channels;
- scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
- scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
- scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
- scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
-
- scan->max_out_time = cpu_to_le32(params->max_out_time);
- scan->suspend_time = cpu_to_le32(params->suspend_time);
-
- scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
- MAC_FILTER_IN_BEACON);
- scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
- scan->rep_count = cpu_to_le32(1);
-
- if (params->passive_fragmented)
- scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
-}
-
static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
{
int i;
@@ -815,127 +433,6 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
}
}
-static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
- struct cfg80211_sched_scan_request *req,
- u8 *channels_buffer,
- enum ieee80211_band band,
- int *head,
- u32 ssid_bitmap,
- struct iwl_mvm_scan_params *params)
-{
- u32 n_channels = mvm->fw->ucode_capa.n_scan_channels;
- __le32 *type = (__le32 *)channels_buffer;
- __le16 *channel_number = (__le16 *)(type + n_channels);
- __le16 *iter_count = channel_number + n_channels;
- __le32 *iter_interval = (__le32 *)(iter_count + n_channels);
- u8 *active_dwell = (u8 *)(iter_interval + n_channels);
- u8 *passive_dwell = active_dwell + n_channels;
- int i, index = 0;
-
- for (i = 0; i < req->n_channels; i++) {
- struct ieee80211_channel *chan = req->channels[i];
-
- if (chan->band != band)
- continue;
-
- index = *head;
- (*head)++;
-
- channel_number[index] = cpu_to_le16(chan->hw_value);
- active_dwell[index] = params->dwell[band].active;
- passive_dwell[index] = params->dwell[band].passive;
-
- iter_count[index] = cpu_to_le16(1);
- iter_interval[index] = 0;
-
- if (!(chan->flags & IEEE80211_CHAN_NO_IR))
- type[index] |=
- cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
-
- type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
- IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
-
- if (chan->flags & IEEE80211_CHAN_NO_HT40)
- type[index] |=
- cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
-
- /* scan for all SSIDs from req->ssids */
- type[index] |= cpu_to_le32(ssid_bitmap);
- }
-}
-
-int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies)
-{
- int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
- int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
- int head = 0;
- u32 ssid_bitmap;
- int cmd_len;
- int ret;
- u8 *probes;
- bool basic_ssid = !(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
-
- struct iwl_scan_offload_cfg *scan_cfg;
- struct iwl_host_cmd cmd = {
- .id = SCAN_OFFLOAD_CONFIG_CMD,
- };
- struct iwl_mvm_scan_params params = {};
-
- lockdep_assert_held(&mvm->mutex);
-
- cmd_len = sizeof(struct iwl_scan_offload_cfg) +
- mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE +
- 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
-
- scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
- if (!scan_cfg)
- return -ENOMEM;
-
- probes = scan_cfg->data +
- mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE;
-
- iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
- iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
- scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
-
- iwl_scan_offload_build_ssid(req, scan_cfg->scan_cmd.direct_scan,
- &ssid_bitmap, basic_ssid);
- /* build tx frames for supported bands */
- if (band_2ghz) {
- iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
- IEEE80211_BAND_2GHZ,
- &scan_cfg->scan_cmd.tx_cmd[0],
- probes);
- iwl_build_channel_cfg(mvm, req, scan_cfg->data,
- IEEE80211_BAND_2GHZ, &head,
- ssid_bitmap, &params);
- }
- if (band_5ghz) {
- iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
- IEEE80211_BAND_5GHZ,
- &scan_cfg->scan_cmd.tx_cmd[1],
- probes +
- SCAN_OFFLOAD_PROBE_REQ_SIZE);
- iwl_build_channel_cfg(mvm, req, scan_cfg->data,
- IEEE80211_BAND_5GHZ, &head,
- ssid_bitmap, &params);
- }
-
- cmd.data[0] = scan_cfg;
- cmd.len[0] = cmd_len;
- cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
-
- IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- kfree(scan_cfg);
- return ret;
-}
-
int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req)
{
@@ -1018,33 +515,6 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
return true;
}
-int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
- struct cfg80211_sched_scan_request *req)
-{
- struct iwl_scan_offload_req scan_req = {
- .watchdog = IWL_SCHED_SCAN_WATCHDOG,
-
- .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
- .schedule_line[0].delay = cpu_to_le16(req->interval / 1000),
- .schedule_line[0].full_scan_mul = 1,
-
- .schedule_line[1].iterations = 0xff,
- .schedule_line[1].delay = cpu_to_le16(req->interval / 1000),
- .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
- };
-
- if (iwl_mvm_scan_pass_all(mvm, req))
- scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
-
- if (mvm->last_ebs_successful &&
- mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
- scan_req.flags |=
- cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
-
- return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
- sizeof(scan_req), &scan_req);
-}
-
int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
@@ -1057,21 +527,12 @@ int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
if (ret)
return ret;
ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
- } else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
- mvm->scan_status = IWL_MVM_SCAN_SCHED;
- ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
- if (ret)
- return ret;
- ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
} else {
mvm->scan_status = IWL_MVM_SCAN_SCHED;
- ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
- if (ret)
- return ret;
ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
if (ret)
return ret;
- ret = iwl_mvm_sched_scan_start(mvm, req);
+ ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
}
return ret;
@@ -1088,9 +549,7 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
/* Exit instantly with error when device is not ready
* to receive scan abort command or it does not perform
* scheduled scan currently */
- if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
- (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
- mvm->scan_status != IWL_MVM_SCAN_OS))
+ if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return -EIO;
ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
@@ -1133,13 +592,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
goto out;
}
- if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
- (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
- mvm->scan_status != IWL_MVM_SCAN_OS)) {
- IWL_DEBUG_SCAN(mvm, "No scan to stop\n");
- return 0;
- }
-
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
@@ -1316,7 +768,7 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
if (params->passive_fragmented)
cmd->fragmented_dwell =
- params->dwell[IEEE80211_BAND_2GHZ].passive;
+ params->dwell[IEEE80211_BAND_2GHZ].fragmented;
cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time);
@@ -1483,6 +935,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->n_channels = (u8)req->n_channels;
+ cmd->delay = cpu_to_le32(req->delay);
+
if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
else
@@ -1579,9 +1033,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
return 0;
}
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
- return iwl_mvm_scan_offload_stop(mvm, true);
- return iwl_mvm_cancel_regular_scan(mvm);
+ return iwl_mvm_scan_offload_stop(mvm, true);
}
/* UMAC scan API */
@@ -1727,6 +1179,18 @@ static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
return false;
}
+static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm,
+ enum iwl_umac_scan_uid_type type)
+{
+ int i;
+
+ for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
+ if (mvm->scan_uid[i] & type)
+ return i;
+
+ return i;
+}
+
static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type)
{
@@ -1764,7 +1228,7 @@ iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
if (params->passive_fragmented)
cmd->fragmented_dwell =
- params->dwell[IEEE80211_BAND_2GHZ].passive;
+ params->dwell[IEEE80211_BAND_2GHZ].fragmented;
cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time);
cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
@@ -1986,7 +1450,13 @@ int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cpu_to_le16(req->interval / MSEC_PER_SEC);
sec_part->schedule[0].iter_count = 0xff;
- sec_part->delay = 0;
+ if (req->delay > U16_MAX) {
+ IWL_DEBUG_SCAN(mvm,
+ "delay value is > 16-bits, set to max possible\n");
+ sec_part->delay = cpu_to_le16(U16_MAX);
+ } else {
+ sec_part->delay = cpu_to_le16(req->delay);
+ }
iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
@@ -2158,14 +1628,59 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_req_umac_tail);
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
- return sizeof(struct iwl_scan_req_unified_lmac) +
- sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels +
- sizeof(struct iwl_scan_probe_req);
+ return sizeof(struct iwl_scan_req_unified_lmac) +
+ sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels +
+ sizeof(struct iwl_scan_probe_req);
+}
+
+/*
+ * This function is used in nic restart flow, to inform mac80211 about scans
+ * that was aborted by restart flow or by an assert.
+ */
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
+{
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ u32 uid, i;
- return sizeof(struct iwl_scan_cmd) +
- mvm->fw->ucode_capa.max_probe_length +
- mvm->fw->ucode_capa.n_scan_channels *
- sizeof(struct iwl_scan_channel);
+ uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
+ if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS) {
+ ieee80211_scan_completed(mvm->hw, true);
+ mvm->scan_uid[uid] = 0;
+ }
+ uid = iwl_mvm_find_first_scan(mvm,
+ IWL_UMAC_SCAN_UID_SCHED_SCAN);
+ if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS && !mvm->restart_fw) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->scan_uid[uid] = 0;
+ }
+
+ /* We shouldn't have any UIDs still set. Loop over all the
+ * UIDs to make sure there's nothing left there and warn if
+ * any is found.
+ */
+ for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
+ if (WARN_ONCE(mvm->scan_uid[i],
+ "UMAC scan UID %d was not cleaned\n",
+ mvm->scan_uid[i]))
+ mvm->scan_uid[i] = 0;
+ }
+ } else {
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_NONE:
+ break;
+ case IWL_MVM_SCAN_OS:
+ ieee80211_scan_completed(mvm->hw, true);
+ break;
+ case IWL_MVM_SCAN_SCHED:
+ /*
+ * Sched scan will be restarted by mac80211 in
+ * restart_hw, so do not report if FW is about to be
+ * restarted.
+ */
+ if (!mvm->restart_fw)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ break;
+ }
+ }
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 7eb78e2c240a..b0f59fdd287c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -99,7 +99,35 @@ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
/*
* Aging and idle timeouts for the different possible scenarios
- * in SF_FULL_ON state.
+ * in default configuration
+ */
+static const
+__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+ {
+ cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+ cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
+ cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
+ cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_BA_AGING_TIMER_DEF),
+ cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
+ cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
+ },
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
*/
static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
{
@@ -124,7 +152,8 @@ static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
},
};
-static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
+static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
+ struct iwl_sf_cfg_cmd *sf_cmd,
struct ieee80211_sta *sta)
{
int i, j, watermark;
@@ -163,24 +192,38 @@ static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
}
}
- BUILD_BUG_ON(sizeof(sf_full_timeout) !=
- sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
- memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
- sizeof(sf_full_timeout));
+ if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
+ BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+ sizeof(__le32) * SF_NUM_SCENARIO *
+ SF_NUM_TIMEOUT_TYPES);
+
+ memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
+ sizeof(sf_full_timeout));
+ } else {
+ BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
+ sizeof(__le32) * SF_NUM_SCENARIO *
+ SF_NUM_TIMEOUT_TYPES);
+
+ memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
+ sizeof(sf_full_timeout_def));
+ }
+
}
static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
enum iwl_sf_state new_state)
{
struct iwl_sf_cfg_cmd sf_cmd = {
- .state = cpu_to_le32(new_state),
+ .state = cpu_to_le32(SF_FULL_ON),
};
struct ieee80211_sta *sta;
int ret = 0;
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF &&
- mvm->cfg->disable_dummy_notification)
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
+ sf_cmd.state = cpu_to_le32(new_state);
+
+ if (mvm->cfg->disable_dummy_notification)
sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
/*
@@ -192,6 +235,8 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
switch (new_state) {
case SF_UNINIT:
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
break;
case SF_FULL_ON:
if (sta_id == IWL_MVM_STATION_COUNT) {
@@ -206,11 +251,11 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
rcu_read_unlock();
return -EINVAL;
}
- iwl_mvm_fill_sf_command(&sf_cmd, sta);
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
rcu_read_unlock();
break;
case SF_INIT_OFF:
- iwl_mvm_fill_sf_command(&sf_cmd, NULL);
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
break;
default:
WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 5c23cddaaae3..1845b79487c8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -209,9 +209,8 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
{
unsigned long used_hw_queues;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
- mvm->cfg->base_params->wd_timeout :
- IWL_WATCHDOG_DISABLED;
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
u32 ac;
lockdep_assert_held(&mvm->mutex);
@@ -273,7 +272,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
else
sta_id = mvm_sta->sta_id;
- if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
+ if (sta_id == IWL_MVM_STATION_COUNT)
return -ENOSPC;
spin_lock_init(&mvm_sta->lock);
@@ -491,8 +490,18 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+ if (ret)
+ return ret;
/* flush its queues here since we are freeing mvm_sta */
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
+ if (ret)
+ return ret;
+ ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
+ mvm_sta->tfd_queue_msk);
+ if (ret)
+ return ret;
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* if we are associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
@@ -971,9 +980,8 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
- unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
- mvm->cfg->base_params->wd_timeout :
- IWL_WATCHDOG_DISABLED;
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
int queue, fifo, ret;
u16 ssn;
@@ -1120,8 +1128,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
spin_unlock_bh(&mvmsta->lock);
if (old_state >= IWL_AGG_ON) {
+ iwl_mvm_drain_sta(mvm, mvmsta, true);
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ iwl_trans_wait_tx_queue_empty(mvm->trans,
+ mvmsta->tfd_queue_msk);
+ iwl_mvm_drain_sta(mvm, mvmsta, false);
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
@@ -1681,9 +1693,6 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
};
int ret;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_DISABLE_STA_TX))
- return;
-
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
@@ -1705,8 +1714,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
mvm_sta->disable_tx = disable;
/*
- * Tell mac80211 to start/stop queueing tx for this station,
- * but don't stop queueing if there are still pending frames
+ * Tell mac80211 to start/stop queuing tx for this station,
+ * but don't stop queuing if there are still pending frames
* for this station.
*/
if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index d8f48975ad08..748f5dc3f9f4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -150,7 +150,7 @@ struct iwl_mvm_vif;
* DOC: station table - AP Station in STA mode
*
* %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
- * %ap_sta_id. To get the point to the coresponsding %ieee80211_sta,
+ * %ap_sta_id. To get the point to the corresponding %ieee80211_sta,
* &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
* the AP station from the fw before setting the MAC context as unassociated.
* Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
@@ -209,14 +209,14 @@ struct iwl_mvm_vif;
* When a trigger frame is received, mac80211 tells the driver to send frames
* from the AMPDU queues or sends frames to non-aggregation queues itself,
* depending on which ACs are delivery-enabled and what TID has frames to
- * transmit. Note that mac80211 has all the knowledege since all the non-agg
+ * transmit. Note that mac80211 has all the knowledge since all the non-agg
* frames are buffered / filtered, and the driver tells mac80211 about agg
* frames). The driver needs to tell the fw to let frames out even if the
* station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
*
* When we receive a frame from that station with PM bit unset, the driver
* needs to let the fw know that this station isn't asleep any more. This is
- * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signalling the
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the
* station's wakeup.
*
* For a GO, the Service Period might be cut short due to an absence period
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 4b81c0bf63b0..fd7b0d36f9a6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -119,7 +119,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
/*
* Flush the offchannel queue -- this is called when the time
- * event finishes or is cancelled, so that frames queued for it
+ * event finishes or is canceled, so that frames queued for it
* won't get stuck on the queue and be transmitted in the next
* time event.
* We have to send the command asynchronously since this cannot
@@ -187,7 +187,8 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
return false;
if (errmsg)
IWL_ERR(mvm, "%s\n", errmsg);
- ieee80211_connection_loss(vif);
+
+ iwl_mvm_connection_loss(mvm, vif, errmsg);
return true;
}
@@ -196,19 +197,24 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data,
struct iwl_time_event_notif *notif)
{
- if (!le32_to_cpu(notif->status)) {
- if (te_data->vif->type == NL80211_IFTYPE_STATION)
- ieee80211_connection_loss(te_data->vif);
+ struct ieee80211_vif *vif = te_data->vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!notif->status)
IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
- iwl_mvm_te_clear_data(mvm, te_data);
- return;
- }
switch (te_data->vif->type) {
case NL80211_IFTYPE_AP:
+ if (!notif->status)
+ mvmvif->csa_failed = true;
iwl_mvm_csa_noa_start(mvm);
break;
case NL80211_IFTYPE_STATION:
+ if (!notif->status) {
+ iwl_mvm_connection_loss(mvm, vif,
+ "CSA TE failed to start");
+ break;
+ }
iwl_mvm_csa_client_absent(mvm, te_data->vif);
ieee80211_chswitch_done(te_data->vif, true);
break;
@@ -222,6 +228,44 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
}
+static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
+ struct iwl_time_event_notif *notif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_time_event *te_trig;
+ int i;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
+ te_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
+ u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
+ u32 trig_action_bitmap =
+ le32_to_cpu(te_trig->time_events[i].action_bitmap);
+ u32 trig_status_bitmap =
+ le32_to_cpu(te_trig->time_events[i].status_bitmap);
+
+ if (trig_te_id != te_data->id ||
+ !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
+ !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
+ continue;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+ "Time event %d Action 0x%x received status: %d",
+ te_data->id,
+ le32_to_cpu(notif->action),
+ le32_to_cpu(notif->status));
+ break;
+ }
+}
+
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -239,6 +283,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
le32_to_cpu(notif->unique_id),
le32_to_cpu(notif->action));
+ iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
/*
* The FW sends the start/end time event notifications even for events
* that it fails to schedule. This is indicated in the status field of
@@ -248,11 +294,16 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* events in the system).
*/
if (!le32_to_cpu(notif->status)) {
- bool start = le32_to_cpu(notif->action) &
- TE_V2_NOTIF_HOST_EVENT_START;
- IWL_WARN(mvm, "Time Event %s notification failure\n",
- start ? "start" : "end");
- if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
+ const char *msg;
+
+ if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
+ msg = "Time Event start notification failure";
+ else
+ msg = "Time Event end notification failure";
+
+ IWL_DEBUG_TE(mvm, "%s\n", msg);
+
+ if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
iwl_mvm_te_clear_data(mvm, te_data);
return;
}
@@ -263,17 +314,23 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
"TE ended - current time %lu, estimated end %lu\n",
jiffies, te_data->end_jiffies);
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ switch (te_data->vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
ieee80211_remain_on_channel_expired(mvm->hw);
iwl_mvm_roc_finished(mvm);
+ break;
+ case NL80211_IFTYPE_STATION:
+ /*
+ * By now, we should have finished association
+ * and know the dtim period.
+ */
+ iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+ "No association and the time event is over already...");
+ break;
+ default:
+ break;
}
- /*
- * By now, we should have finished association
- * and know the dtim period.
- */
- iwl_mvm_te_check_disconnect(mvm, te_data->vif,
- "No association and the time event is over already...");
iwl_mvm_te_clear_data(mvm, te_data);
} else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
te_data->running = true;
@@ -309,6 +366,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
if (!aux_roc_te) /* Not a Aux ROC time event */
return -EINVAL;
+ iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
if (!le32_to_cpu(notif->status)) {
IWL_DEBUG_TE(mvm,
"ERROR: Aux ROC Time Event %s notification failure\n",
@@ -763,7 +822,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
* Iterate over the list of aux roc time events and find the time
* event that is associated with a BSS interface.
* This assumes that a BSS interface can have only a single time
- * event at any given time and this time event coresponds to a ROC
+ * event at any given time and this time event corresponds to a ROC
* request
*/
list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index 6f6b35db3ab8..de4fbc6d57f1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -147,7 +147,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
* @vif: the virtual interface for which the session is issued
*
* This functions cancels the session protection which is an act of good
- * citizenship. If it is not needed any more it should be cancelled because
+ * citizenship. If it is not needed any more it should be canceled because
* the other bindings wait for the medium during that time.
* This funtions doesn't sleep.
*/
@@ -162,7 +162,7 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
/**
- * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionlity
+ * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
* @mvm: the mvm component
* @vif: the virtual interface for which the roc is requested. It is assumed
* that the vif type is NL80211_IFTYPE_P2P_DEVICE
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 96a05406babf..ef32e177f662 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -664,6 +664,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
info->status.rates[0].count = tx_resp->failure_frame + 1;
iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
info);
+ info->status.status_driver_data[1] =
+ (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
/* Single frame failure in an AMPDU queue => send BAR */
if (txq_id >= mvm->first_agg_queue &&
@@ -909,6 +911,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
info->status.tx_time = tid_data->tx_time;
info->status.status_driver_data[0] =
(void *)(uintptr_t)tid_data->reduced_tpc;
+ info->status.status_driver_data[1] =
+ (void *)(uintptr_t)tid_data->rate_n_flags;
}
int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
@@ -1045,6 +1049,14 @@ out:
return 0;
}
+/*
+ * Note that there are transports that buffer frames before they reach
+ * the firmware. This means that after flush_tx_path is called, the
+ * queue might not be empty. The race-free way to handle this is to:
+ * 1) set the station as draining
+ * 2) flush the Tx path
+ * 3) wait for the transport queues to be empty
+ */
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
{
int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 8decf9953229..bc55a8b82db6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -122,7 +122,7 @@ int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
}
/*
- * We assume that the caller set the status to the sucess value
+ * We assume that the caller set the status to the success value
*/
int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
u32 *status)
@@ -332,7 +332,7 @@ static const char *desc_lookup(u32 num)
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
-struct iwl_error_event_table {
+struct iwl_error_event_table_v1 {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 pc; /* program counter */
@@ -377,7 +377,55 @@ struct iwl_error_event_table {
u32 u_timestamp; /* indicate when the date and time of the
* compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
-} __packed;
+} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
+
+struct iwl_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 pc; /* program counter */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 bcon_time; /* beacon timer */
+ u32 tsf_low; /* network timestamp function timer */
+ u32 tsf_hi; /* network timestamp function timer */
+ u32 gp1; /* GP1 timer register */
+ u32 gp2; /* GP2 timer register */
+ u32 gp3; /* GP3 timer register */
+ u32 major; /* uCode version major */
+ u32 minor; /* uCode version minor */
+ u32 hw_ver; /* HW Silicon version */
+ u32 brd_ver; /* HW board version */
+ u32 log_pc; /* log program counter */
+ u32 frame_ptr; /* frame pointer */
+ u32 stack_ptr; /* stack pointer */
+ u32 hcmd; /* last host command header */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
+ u32 wait_event; /* wait event() caller address */
+ u32 l2p_control; /* L2pControlField */
+ u32 l2p_duration; /* L2pDurationField */
+ u32 l2p_mhvalid; /* L2pMhValidBits */
+ u32 l2p_addr_match; /* L2pAddrMatchStat */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ u32 flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */;
/*
* UMAC error struct - relevant starting from family 8000 chip.
@@ -396,11 +444,11 @@ struct iwl_umac_error_event_table {
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
- u32 umac_fw_ver; /* UMAC version */
- u32 umac_fw_api_ver; /* UMAC FW API ver */
+ u32 umac_major;
+ u32 umac_minor;
u32 frame_pointer; /* core register 27*/
u32 stack_pointer; /* core register 28 */
- u32 cmd_header; /* latest host cmd sent to UMAC */
+ u32 cmd_header; /* latest host cmd sent to UMAC */
u32 nic_isr_pref; /* ISR status register */
} __packed;
@@ -441,18 +489,18 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
- IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_fw_ver);
- IWL_ERR(mvm, "0x%08X | umac api version\n", table.umac_fw_api_ver);
+ IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
+ IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
- struct iwl_error_event_table table;
+ struct iwl_error_event_table_v1 table;
u32 base;
base = mvm->error_event_table;
@@ -489,7 +537,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
table.data1, table.data2, table.data3,
table.blink1, table.blink2, table.ilink1,
table.ilink2, table.bcon_time, table.gp1,
- table.gp2, table.gp3, table.ucode_ver,
+ table.gp2, table.gp3, table.ucode_ver, 0,
table.hw_ver, table.brd_ver);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
@@ -530,6 +578,92 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_error_event_table table;
+ u32 base;
+
+ if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
+ iwl_mvm_dump_nic_error_log_old(mvm);
+ return;
+ }
+
+ base = mvm->error_event_table;
+ if (mvm->cur_ucode == IWL_UCODE_INIT) {
+ if (!base)
+ base = mvm->fw->init_errlog_ptr;
+ } else {
+ if (!base)
+ base = mvm->fw->inst_errlog_ptr;
+ }
+
+ if (base < 0x800000) {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (mvm->cur_ucode == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ mvm->status, table.valid);
+ }
+
+ /* Do not change this output - scripts rely on it */
+
+ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
+ trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
+ table.data1, table.data2, table.data3,
+ table.blink1, table.blink2, table.ilink1,
+ table.ilink2, table.bcon_time, table.gp1,
+ table.gp2, table.gp3, table.major,
+ table.minor, table.hw_ver, table.brd_ver);
+ IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
+ IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
+ IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
+ IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
+ IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
+ IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
+ IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
+ IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
+ IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
+ IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
+ IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
+ IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
+ IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
+ IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
+ IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
+ IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
+ IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
+ IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
+ IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
+ IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
+ IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+
+ if (mvm->support_umac_log)
+ iwl_mvm_dump_umac_error_log(mvm);
+}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
@@ -603,7 +737,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
}
/**
- * iwl_mvm_update_smps - Get a requst to change the SMPS mode
+ * iwl_mvm_update_smps - Get a request to change the SMPS mode
* @req_type: The part of the driver who call for a change.
* @smps_requests: The request to change the SMPS mode.
*
@@ -643,6 +777,40 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
+{
+ struct iwl_statistics_cmd scmd = {
+ .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
+ };
+ struct iwl_host_cmd cmd = {
+ .id = STATISTICS_CMD,
+ .len[0] = sizeof(scmd),
+ .data[0] = &scmd,
+ .flags = CMD_WANT_SKB,
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
+ iwl_free_resp(&cmd);
+
+ if (clear)
+ iwl_mvm_accu_radio_stats(mvm);
+
+ return 0;
+}
+
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
+{
+ mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
+ mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
+ mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
+ mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
+}
+
static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -689,7 +857,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmvif->low_latency = value;
- res = iwl_mvm_update_quotas(mvm, NULL);
+ res = iwl_mvm_update_quotas(mvm, false, NULL);
if (res)
return res;
@@ -717,25 +885,6 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
return result;
}
-static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
-{
- bool *idle = _data;
-
- if (!vif->bss_conf.idle)
- *idle = false;
-}
-
-bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
-{
- bool idle = true;
-
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_idle_iter, &idle);
-
- return idle;
-}
-
struct iwl_bss_iter_data {
struct ieee80211_vif *vif;
bool error;
@@ -772,3 +921,71 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
return bss_iter_data.vif;
}
+
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool tdls, bool cmd_q)
+{
+ struct iwl_fw_dbg_trigger_tlv *trigger;
+ struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
+ unsigned int default_timeout =
+ cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS))
+ return iwlmvm_mod_params.tfd_q_hang_detect ?
+ default_timeout : IWL_WATCHDOG_DISABLED;
+
+ trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
+ txq_timer = (void *)trigger->data;
+
+ if (tdls)
+ return le32_to_cpu(txq_timer->tdls);
+
+ if (cmd_q)
+ return le32_to_cpu(txq_timer->command_queue);
+
+ if (WARN_ON(!vif))
+ return default_timeout;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_ADHOC:
+ return le32_to_cpu(txq_timer->ibss);
+ case NL80211_IFTYPE_STATION:
+ return le32_to_cpu(txq_timer->bss);
+ case NL80211_IFTYPE_AP:
+ return le32_to_cpu(txq_timer->softap);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return le32_to_cpu(txq_timer->p2p_client);
+ case NL80211_IFTYPE_P2P_GO:
+ return le32_to_cpu(txq_timer->p2p_go);
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return le32_to_cpu(txq_timer->p2p_device);
+ default:
+ WARN_ON(1);
+ return mvm->cfg->base_params->wd_timeout;
+ }
+}
+
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ const char *errmsg)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+ goto out;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
+ trig_mlme = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+ goto out;
+
+ if (trig_mlme->stop_connection_loss &&
+ --trig_mlme->stop_connection_loss)
+ goto out;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg);
+
+out:
+ ieee80211_connection_loss(vif);
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 686dd301cd53..b18569734922 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -415,10 +415,35 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 8000 Series */
{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
{IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index cae0eb8835ce..01996c9d98a7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -217,6 +217,8 @@ struct iwl_pcie_txq_scratch_buf {
* @active: stores if queue is active
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
+ * @frozen: tx stuck queue timer is frozen
+ * @frozen_expiry_remainder: remember how long until the timer fires
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
@@ -228,9 +230,11 @@ struct iwl_txq {
dma_addr_t scratchbufs_dma;
struct iwl_pcie_txq_entry *entries;
spinlock_t lock;
+ unsigned long frozen_expiry_remainder;
struct timer_list stuck_timer;
struct iwl_trans_pcie *trans_pcie;
bool need_update;
+ bool frozen;
u8 active;
bool ampdu;
unsigned long wd_timeout;
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 7b7e2f223fb2..7ff69c642103 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -600,9 +600,11 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
break;
- IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
- rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
- pkt->hdr.cmd);
+ IWL_DEBUG_RX(trans,
+ "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
+ rxcb._offset,
+ get_cmd_string(trans_pcie, pkt->hdr.cmd),
+ pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
len = iwl_rx_packet_len(pkt);
len += sizeof(u32); /* account for status word */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 69935aa5a1b3..47bbf573fdc8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,8 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,8 +31,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -104,7 +104,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct page *page;
+ struct page *page = NULL;
dma_addr_t phys;
u32 size;
u8 power;
@@ -131,6 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, phys)) {
__free_pages(page, order);
+ page = NULL;
continue;
}
IWL_INFO(trans,
@@ -682,10 +683,51 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
- const struct fw_img *image,
- int cpu,
- int *first_ucode_section)
+/*
+ * Driver Takes the ownership on secure machine before FW load
+ * and prevent race with the BT load.
+ * W/A for ROM bug. (should be remove in the next Si step)
+ */
+static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+{
+ u32 val, loop = 1000;
+
+ /*
+ * Check the RSA semaphore is accessible.
+ * If the HW isn't locked and the rsa semaphore isn't accessible,
+ * we are in trouble.
+ */
+ val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+ if (val & (BIT(1) | BIT(17))) {
+ IWL_INFO(trans,
+ "can't access the RSA semaphore it is write protected\n");
+ return 0;
+ }
+
+ /* take ownership on the AUX IF */
+ iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
+ iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
+
+ do {
+ iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
+ val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
+ if (val == 0x1) {
+ iwl_write_prph(trans, RSA_ENABLE, 0);
+ return 0;
+ }
+
+ udelay(10);
+ loop--;
+ } while (loop > 0);
+
+ IWL_ERR(trans, "Failed to take ownership on secure machine\n");
+ return -EIO;
+}
+
+static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
{
int shift_param;
int i, ret = 0, sec_num = 0x1;
@@ -880,64 +922,50 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
}
/* release CPU reset */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
- else
- iwl_write32(trans, CSR_RESET, 0);
+ iwl_write32(trans, CSR_RESET, 0);
return 0;
}
-static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
- const struct fw_img *image)
+static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
+ const struct fw_img *image)
{
int ret = 0;
int first_ucode_section;
- u32 reg;
IWL_DEBUG_FW(trans, "working with %s CPU\n",
image->is_dual_cpus ? "Dual" : "Single");
+ if (trans->dbg_dest_tlv)
+ iwl_pcie_apply_destination(trans);
+
+ /* TODO: remove in the next Si step */
+ ret = iwl_pcie_rsa_race_bug_wa(trans);
+ if (ret)
+ return ret;
+
/* configure the ucode to be ready to get the secured image */
/* release CPU reset */
iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
/* load to FW the binary Secured sections of CPU1 */
- ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 1,
- &first_ucode_section);
+ ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
+ &first_ucode_section);
if (ret)
return ret;
/* load to FW the binary sections of CPU2 */
- ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 2,
- &first_ucode_section);
+ ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+ &first_ucode_section);
if (ret)
return ret;
- if (trans->dbg_dest_tlv)
- iwl_pcie_apply_destination(trans);
-
- /* wait for image verification to complete */
- ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
- LMPM_SECURE_BOOT_STATUS_SUCCESS,
- LMPM_SECURE_BOOT_STATUS_SUCCESS,
- LMPM_SECURE_TIME_OUT);
- if (ret < 0) {
- reg = iwl_read_prph(trans,
- LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0);
-
- IWL_ERR(trans, "Timeout on secure boot process, reg = %x\n",
- reg);
- return ret;
- }
-
return 0;
}
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
bool hw_rfkill;
@@ -967,9 +995,6 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
return ret;
}
- /* init ref_count to 1 (should be cleared when ucode is loaded) */
- trans_pcie->ref_count = 1;
-
/* make sure rfkill handshake bits are cleared */
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
@@ -984,9 +1009,8 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
- (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP))
- return iwl_pcie_load_given_ucode_8000b(trans, fw);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ return iwl_pcie_load_given_ucode_8000(trans, fw);
else
return iwl_pcie_load_given_ucode(trans, fw);
}
@@ -997,7 +1021,7 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_pcie_tx_start(trans, scd_addr);
}
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill, was_hw_rfkill;
@@ -1092,7 +1116,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
{
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
- iwl_trans_pcie_stop_device(trans);
+ iwl_trans_pcie_stop_device(trans, true);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -1177,7 +1201,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
{
bool hw_rfkill;
int err;
@@ -1288,6 +1312,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
+ /* init ref_count to 1 (should be cleared when ucode is loaded) */
+ trans_pcie->ref_count = 1;
+
/* Initialize NAPI here - it should be before registering to mac80211
* in the opmode but after the HW struct is allocated.
* As this function may be called again in some corner cases don't
@@ -1462,6 +1489,60 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
return ret;
}
+static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs,
+ bool freeze)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int queue;
+
+ for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+ struct iwl_txq *txq = &trans_pcie->txq[queue];
+ unsigned long now;
+
+ spin_lock_bh(&txq->lock);
+
+ now = jiffies;
+
+ if (txq->frozen == freeze)
+ goto next_queue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+ freeze ? "Freezing" : "Waking", queue);
+
+ txq->frozen = freeze;
+
+ if (txq->q.read_ptr == txq->q.write_ptr)
+ goto next_queue;
+
+ if (freeze) {
+ if (unlikely(time_after(now,
+ txq->stuck_timer.expires))) {
+ /*
+ * The timer should have fired, maybe it is
+ * spinning right now on the lock.
+ */
+ goto next_queue;
+ }
+ /* remember how long until the timer fires */
+ txq->frozen_expiry_remainder =
+ txq->stuck_timer.expires - now;
+ del_timer(&txq->stuck_timer);
+ goto next_queue;
+ }
+
+ /*
+ * Wake a non-empty queue -> arm timer with the
+ * remainder before it froze
+ */
+ mod_timer(&txq->stuck_timer,
+ now + txq->frozen_expiry_remainder);
+
+next_queue:
+ spin_unlock_bh(&txq->lock);
+ }
+}
+
#define IWL_FLUSH_WAIT_MS 2000
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
@@ -1713,7 +1794,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
int ret;
size_t bufsz;
- bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
+ bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
if (!trans_pcie->txq)
return -EAGAIN;
@@ -1726,11 +1807,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
txq = &trans_pcie->txq[cnt];
q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n",
+ "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
cnt, q->read_ptr, q->write_ptr,
!!test_bit(cnt, trans_pcie->queue_used),
!!test_bit(cnt, trans_pcie->queue_stopped),
- txq->need_update,
+ txq->need_update, txq->frozen,
(cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -1961,24 +2042,25 @@ static const struct {
{ .start = 0x00a01c7c, .end = 0x00a01c7c },
{ .start = 0x00a01c28, .end = 0x00a01c54 },
{ .start = 0x00a01c5c, .end = 0x00a01c5c },
- { .start = 0x00a01c84, .end = 0x00a01c84 },
+ { .start = 0x00a01c60, .end = 0x00a01cdc },
{ .start = 0x00a01ce0, .end = 0x00a01d0c },
{ .start = 0x00a01d18, .end = 0x00a01d20 },
{ .start = 0x00a01d2c, .end = 0x00a01d30 },
{ .start = 0x00a01d40, .end = 0x00a01d5c },
{ .start = 0x00a01d80, .end = 0x00a01d80 },
- { .start = 0x00a01d98, .end = 0x00a01d98 },
+ { .start = 0x00a01d98, .end = 0x00a01d9c },
+ { .start = 0x00a01da8, .end = 0x00a01da8 },
+ { .start = 0x00a01db8, .end = 0x00a01df4 },
{ .start = 0x00a01dc0, .end = 0x00a01dfc },
{ .start = 0x00a01e00, .end = 0x00a01e2c },
{ .start = 0x00a01e40, .end = 0x00a01e60 },
+ { .start = 0x00a01e68, .end = 0x00a01e6c },
+ { .start = 0x00a01e74, .end = 0x00a01e74 },
{ .start = 0x00a01e84, .end = 0x00a01e90 },
{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
- { .start = 0x00a01ed0, .end = 0x00a01ed0 },
- { .start = 0x00a01f00, .end = 0x00a01f14 },
- { .start = 0x00a01f44, .end = 0x00a01f58 },
- { .start = 0x00a01f80, .end = 0x00a01fa8 },
- { .start = 0x00a01fb0, .end = 0x00a01fbc },
- { .start = 0x00a01ff8, .end = 0x00a01ffc },
+ { .start = 0x00a01ed0, .end = 0x00a01ee0 },
+ { .start = 0x00a01f00, .end = 0x00a01f1c },
+ { .start = 0x00a01f44, .end = 0x00a01ffc },
{ .start = 0x00a02000, .end = 0x00a02048 },
{ .start = 0x00a02068, .end = 0x00a020f0 },
{ .start = 0x00a02100, .end = 0x00a02118 },
@@ -2305,6 +2387,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
+ .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
.write8 = iwl_trans_pcie_write8,
.write32 = iwl_trans_pcie_write32,
@@ -2423,10 +2506,45 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* "dash" value). To keep hw_rev backwards compatible - we'll store it
* in the old format.
*/
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ unsigned long flags;
+ int ret;
+
trans->hw_rev = (trans->hw_rev & 0xfff0) |
(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
+ /*
+ * in-order to recognize C step driver should read chip version
+ * id located at the AUX bus MISC address space.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ udelay(2);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
+ goto out_pci_disable_msi;
+ }
+
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ u32 hw_step;
+
+ hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
+ hw_step |= ENABLE_WFPM;
+ __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
+ hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
+ hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
+ if (hw_step == 0x3)
+ trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
+ (SILICON_C_STEP << 2);
+ iwl_trans_release_nic_access(trans, &flags);
+ }
+ }
+
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
"PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index af0bce736358..06952aadfd7b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -725,33 +725,50 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
iwl_pcie_tx_start(trans, 0);
}
+static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+ int ch, ret;
+ u32 mask = 0;
+
+ spin_lock(&trans_pcie->irq_lock);
+
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ goto out;
+
+ /* Stop each Tx DMA channel */
+ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+ iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
+ }
+
+ /* Wait for DMA channels to be idle */
+ ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
+ if (ret < 0)
+ IWL_ERR(trans,
+ "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+ ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
+
+ iwl_trans_release_nic_access(trans, &flags);
+
+out:
+ spin_unlock(&trans_pcie->irq_lock);
+}
+
/*
* iwl_pcie_tx_stop - Stop all Tx DMA channels
*/
int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ch, txq_id, ret;
+ int txq_id;
/* Turn off all Tx DMA fifos */
- spin_lock(&trans_pcie->irq_lock);
-
iwl_scd_deactivate_fifos(trans);
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
- iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
- if (ret < 0)
- IWL_ERR(trans,
- "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
- ch,
- iwl_read_direct32(trans,
- FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock(&trans_pcie->irq_lock);
+ /* Turn off all Tx DMA channels */
+ iwl_pcie_tx_stop_fh(trans);
/*
* This function can be called before the op_mode disabled the
@@ -912,10 +929,19 @@ error:
static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
{
+ lockdep_assert_held(&txq->lock);
+
if (!txq->wd_timeout)
return;
/*
+ * station is asleep and we send data - that must
+ * be uAPSD or PS-Poll. Don't rearm the timer.
+ */
+ if (txq->frozen)
+ return;
+
+ /*
* if empty delete timer, otherwise move timer forward
* since we're making progress on this queue
*/
@@ -1248,6 +1274,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
+ trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
+ trans_pcie->txq[txq_id].frozen = false;
+
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
* in the op_mode. Just for the sake of the simplicity of the op_mode,
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index a92985a6ea21..1a4d558022d8 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1356,8 +1356,8 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
/* Find the BSS we want using available scan results */
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
- sme->ssid, sme->ssid_len,
- WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ sme->ssid, sme->ssid_len, IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
if (!bss) {
wiphy_err(wiphy, "assoc: bss %pM not in scan results\n",
sme->bssid);
@@ -2000,7 +2000,7 @@ static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
* bss list is populated already */
bss = cfg80211_get_bss(wiphy, params->chandef.chan, params->bssid,
params->ssid, params->ssid_len,
- WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+ IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY_ANY);
if (bss) {
ret = lbs_ibss_join_existing(priv, params, bss);
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index cc6a0a586f0b..26cbf1dcc662 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -742,8 +742,7 @@ void lbs_debugfs_init(void)
void lbs_debugfs_remove(void)
{
- if (lbs_dir)
- debugfs_remove(lbs_dir);
+ debugfs_remove(lbs_dir);
}
void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 569b64ecc607..8079560f4965 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -667,7 +667,7 @@ static int lbs_setup_firmware(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_FW);
/* Read MAC address from firmware */
- memset(priv->current_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->current_addr);
ret = lbs_update_hw_spec(priv);
if (ret)
goto done;
@@ -871,7 +871,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_MAIN);
- memset(priv->current_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->current_addr);
priv->connect_status = LBS_DISCONNECTED;
priv->channel = DEFAULT_AD_HOC_CHANNEL;
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index d576dd6665d3..1a20cee5febe 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -365,7 +365,6 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
return ret;
}
-EXPORT_SYMBOL_GPL(if_usb_reset_device);
/**
* usb_tx_block - transfer data to the device
@@ -907,7 +906,6 @@ restart:
lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
return ret;
}
-EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
#define if_usb_suspend NULL
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 25c5acc78bd1..ed02e4bf2c26 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -152,7 +152,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
/*
* Read priv address from HW
*/
- memset(priv->current_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->current_addr);
ret = lbtf_update_hw_spec(priv);
if (ret) {
ret = -1;
@@ -199,7 +199,7 @@ out:
static int lbtf_init_adapter(struct lbtf_private *priv)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
- memset(priv->current_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->current_addr);
mutex_init(&priv->lock);
priv->vif = NULL;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 8908be6dbc48..d5c0a1af08b9 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -330,6 +330,83 @@ static const struct ieee80211_rate hwsim_rates[] = {
{ .bitrate = 540 }
};
+#define OUI_QCA 0x001374
+#define QCA_NL80211_SUBCMD_TEST 1
+enum qca_nl80211_vendor_subcmds {
+ QCA_WLAN_VENDOR_ATTR_TEST = 8,
+ QCA_WLAN_VENDOR_ATTR_MAX = QCA_WLAN_VENDOR_ATTR_TEST
+};
+
+static const struct nla_policy
+hwsim_vendor_test_policy[QCA_WLAN_VENDOR_ATTR_MAX + 1] = {
+ [QCA_WLAN_VENDOR_ATTR_MAX] = { .type = NLA_U32 },
+};
+
+static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct sk_buff *skb;
+ struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1];
+ int err;
+ u32 val;
+
+ err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len,
+ hwsim_vendor_test_policy);
+ if (err)
+ return err;
+ if (!tb[QCA_WLAN_VENDOR_ATTR_TEST])
+ return -EINVAL;
+ val = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_TEST]);
+ wiphy_debug(wiphy, "%s: test=%u\n", __func__, val);
+
+ /* Send a vendor event as a test. Note that this would not normally be
+ * done within a command handler, but rather, based on some other
+ * trigger. For simplicity, this command is used to trigger the event
+ * here.
+ *
+ * event_idx = 0 (index in mac80211_hwsim_vendor_commands)
+ */
+ skb = cfg80211_vendor_event_alloc(wiphy, wdev, 100, 0, GFP_KERNEL);
+ if (skb) {
+ /* skb_put() or nla_put() will fill up data within
+ * NL80211_ATTR_VENDOR_DATA.
+ */
+
+ /* Add vendor data */
+ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1);
+
+ /* Send the event - this will call nla_nest_end() */
+ cfg80211_vendor_event(skb, GFP_KERNEL);
+ }
+
+ /* Send a response to the command */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 10);
+ if (!skb)
+ return -ENOMEM;
+
+ /* skb_put() or nla_put() will fill up data within
+ * NL80211_ATTR_VENDOR_DATA
+ */
+ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 2);
+
+ return cfg80211_vendor_cmd_reply(skb);
+}
+
+static struct wiphy_vendor_command mac80211_hwsim_vendor_commands[] = {
+ {
+ .info = { .vendor_id = OUI_QCA,
+ .subcmd = QCA_NL80211_SUBCMD_TEST },
+ .flags = WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = mac80211_hwsim_vendor_cmd_test,
+ }
+};
+
+/* Advertise support vendor specific events */
+static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = {
+ { .vendor_id = OUI_QCA, .subcmd = 1 },
+};
+
static const struct ieee80211_iface_limit hwsim_if_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
{ .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
@@ -906,8 +983,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
}
- if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
- ETH_ALEN, data->addresses[1].addr))
+ if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, hdr->addr2))
goto nla_put_failure;
/* We get the skb->data */
@@ -1522,21 +1598,16 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
vp->aid = info->aid;
}
- if (changed & BSS_CHANGED_BEACON_INT) {
- wiphy_debug(hw->wiphy, " BCNINT: %d\n", info->beacon_int);
- data->beacon_int = info->beacon_int * 1024;
- }
-
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- wiphy_debug(hw->wiphy, " BCN EN: %d\n", info->enable_beacon);
+ wiphy_debug(hw->wiphy, " BCN EN: %d (BI=%u)\n",
+ info->enable_beacon, info->beacon_int);
vp->bcn_en = info->enable_beacon;
if (data->started &&
!hrtimer_is_queued(&data->beacon_timer.timer) &&
info->enable_beacon) {
u64 tsf, until_tbtt;
u32 bcn_int;
- if (WARN_ON(!data->beacon_int))
- data->beacon_int = 1000 * 1024;
+ data->beacon_int = info->beacon_int * 1024;
tsf = mac80211_hwsim_get_tsf(hw, vif);
bcn_int = data->beacon_int;
until_tbtt = bcn_int - do_div(tsf, bcn_int);
@@ -1550,8 +1621,10 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
mac80211_hwsim_bcn_en_iter, &count);
wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
count);
- if (count == 0)
+ if (count == 0) {
tasklet_hrtimer_cancel(&data->beacon_timer);
+ data->beacon_int = 0;
+ }
}
}
@@ -1911,7 +1984,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw,
printk(KERN_DEBUG "hwsim sw_scan_complete\n");
hwsim->scanning = false;
- memset(hwsim->scan_addr, 0, ETH_ALEN);
+ eth_zero_addr(hwsim->scan_addr);
mutex_unlock(&hwsim->mutex);
}
@@ -2267,7 +2340,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
skb_queue_head_init(&data->pending);
SET_IEEE80211_DEV(hw, data->dev);
- memset(addr, 0, ETH_ALEN);
+ eth_zero_addr(addr);
addr[0] = 0x02;
addr[3] = idx >> 8;
addr[4] = idx;
@@ -2420,6 +2493,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->max_rates = 4;
hw->max_rate_tries = 11;
+ hw->wiphy->vendor_commands = mac80211_hwsim_vendor_commands;
+ hw->wiphy->n_vendor_commands =
+ ARRAY_SIZE(mac80211_hwsim_vendor_commands);
+ hw->wiphy->vendor_events = mac80211_hwsim_vendor_events;
+ hw->wiphy->n_vendor_events = ARRAY_SIZE(mac80211_hwsim_vendor_events);
+
if (param->reg_strict)
hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
if (param->regd) {
@@ -2600,7 +2679,7 @@ static void hwsim_mon_setup(struct net_device *dev)
ether_setup(dev);
dev->tx_queue_len = 0;
dev->type = ARPHRD_IEEE80211_RADIOTAP;
- memset(dev->dev_addr, 0, ETH_ALEN);
+ eth_zero_addr(dev->dev_addr);
dev->dev_addr[0] = 0x12;
}
@@ -2611,7 +2690,7 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry(data, &hwsim_radios, list) {
- if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
+ if (mac80211_hwsim_addr_match(data, addr)) {
_found = true;
break;
}
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 543148d27b01..433bd6837c79 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -159,6 +159,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
int tid;
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
+ struct mwifiex_ra_list_tbl *ra_list;
u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -166,7 +167,13 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
>> BLOCKACKPARAM_TID_POS;
+ ra_list = mwifiex_wmm_get_ralist_node(priv, tid, add_ba_rsp->
+ peer_mac_addr);
if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
+ if (ra_list) {
+ ra_list->ba_status = BA_SETUP_NONE;
+ ra_list->amsdu_in_ampdu = false;
+ }
mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
TYPE_DELBA_SENT, true);
if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
@@ -185,6 +192,10 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
tx_ba_tbl->amsdu = true;
else
tx_ba_tbl->amsdu = false;
+ if (ra_list) {
+ ra_list->amsdu_in_ampdu = tx_ba_tbl->amsdu;
+ ra_list->ba_status = BA_SETUP_COMPLETE;
+ }
} else {
dev_err(priv->adapter->dev, "BA stream not created\n");
}
@@ -515,6 +526,7 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
enum mwifiex_ba_status ba_status)
{
struct mwifiex_tx_ba_stream_tbl *new_node;
+ struct mwifiex_ra_list_tbl *ra_list;
unsigned long flags;
if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
@@ -522,7 +534,11 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
GFP_ATOMIC);
if (!new_node)
return;
-
+ ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra);
+ if (ra_list) {
+ ra_list->ba_status = ba_status;
+ ra_list->amsdu_in_ampdu = false;
+ }
INIT_LIST_HEAD(&new_node->list);
new_node->tid = tid;
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 8e2e39422ad8..afdd58aa90de 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -77,22 +77,6 @@ mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
}
-/* This function checks whether AMSDU is allowed for BA stream. */
-static inline u8
-mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int tid)
-{
- struct mwifiex_tx_ba_stream_tbl *tx_tbl;
-
- if (is_broadcast_ether_addr(ptr->ra))
- return false;
- tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
- if (tx_tbl)
- return tx_tbl->amsdu;
-
- return false;
-}
-
/* This function checks whether AMPDU is allowed or not for a particular TID. */
static inline u8
mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
@@ -182,22 +166,6 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
}
/*
- * This function checks whether BA stream is set up or not.
- */
-static inline int
-mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int tid)
-{
- struct mwifiex_tx_ba_stream_tbl *tx_tbl;
-
- tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
- if (tx_tbl && IS_BASTREAM_SETUP(tx_tbl))
- return true;
-
- return false;
-}
-
-/*
* This function checks whether associated station is 11n enabled
*/
static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 9b983b5cebbd..6183e255e62a 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -170,7 +170,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct sk_buff *skb_aggr, *skb_src;
struct mwifiex_txinfo *tx_info_aggr, *tx_info_src;
- int pad = 0, ret;
+ int pad = 0, aggr_num = 0, ret;
struct mwifiex_tx_param tx_param;
struct txpd *ptx_pd = NULL;
struct timeval tv;
@@ -184,7 +184,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
}
tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
- skb_aggr = dev_alloc_skb(adapter->tx_buf_size);
+ skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
+ GFP_ATOMIC | GFP_DMA);
if (!skb_aggr) {
dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
@@ -200,6 +201,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+ tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT;
skb_aggr->priority = skb_src->priority;
do_gettimeofday(&tv);
@@ -211,11 +213,9 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
break;
skb_src = skb_dequeue(&pra_list->skb_head);
-
pra_list->total_pkt_count--;
-
atomic_dec(&priv->wmm.tx_pkts_queued);
-
+ aggr_num++;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
@@ -251,6 +251,12 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
ptx_pd = (struct txpd *)skb_aggr->data;
skb_push(skb_aggr, headroom);
+ tx_info_aggr->aggr_num = aggr_num * 2;
+ if (adapter->data_sent || adapter->tx_lock_flag) {
+ atomic_add(aggr_num * 2, &adapter->tx_queued);
+ skb_queue_tail(&adapter->tx_data_q, skb_aggr);
+ return 0;
+ }
if (adapter->iface_type == MWIFIEX_USB) {
adapter->data_sent = true;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index a2e8817b56d8..f75f8acfaca0 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -659,6 +659,7 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
{
struct mwifiex_rx_reorder_tbl *tbl;
struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
+ struct mwifiex_ra_list_tbl *ra_list;
u8 cleanup_rx_reorder_tbl;
unsigned long flags;
@@ -686,7 +687,11 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
"event: TID, RA not found in table\n");
return;
}
-
+ ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac);
+ if (ra_list) {
+ ra_list->amsdu_in_ampdu = false;
+ ra_list->ba_status = BA_SETUP_NONE;
+ }
spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 41c8e25df954..bf9020ff2d33 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -717,6 +717,9 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ unsigned long flags;
+
priv->mgmt_frame_mask = 0;
if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
HostCmd_ACT_GEN_SET, 0,
@@ -727,6 +730,25 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
}
mwifiex_deauthenticate(priv, NULL);
+
+ spin_lock_irqsave(&adapter->main_proc_lock, flags);
+ adapter->main_locked = true;
+ if (adapter->mwifiex_processing) {
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+ flush_workqueue(adapter->workqueue);
+ } else {
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+ }
+
+ spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ adapter->rx_locked = true;
+ if (adapter->rx_processing) {
+ spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ flush_workqueue(adapter->rx_workqueue);
+ } else {
+ spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ }
+
mwifiex_free_priv(priv);
priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -740,6 +762,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
struct net_device *dev,
enum nl80211_iftype type)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ unsigned long flags;
+
mwifiex_init_priv(priv);
priv->bss_mode = type;
@@ -770,6 +795,14 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
return -EOPNOTSUPP;
}
+ spin_lock_irqsave(&adapter->main_proc_lock, flags);
+ adapter->main_locked = false;
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+
+ spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ adapter->rx_locked = false;
+ spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+
return 0;
}
@@ -1563,7 +1596,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac);
- memset(deauth_mac, 0, ETH_ALEN);
+ eth_zero_addr(deauth_mac);
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_node = mwifiex_get_sta_entry(priv, params->mac);
@@ -1786,7 +1819,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
" reason code %d\n", priv->cfg_bssid, reason_code);
- memset(priv->cfg_bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->cfg_bssid);
priv->hs2_enabled = false;
return 0;
@@ -1954,13 +1987,13 @@ done:
if (mode == NL80211_IFTYPE_ADHOC)
bss = cfg80211_get_bss(priv->wdev.wiphy, channel,
bssid, ssid, ssid_len,
- WLAN_CAPABILITY_IBSS,
- WLAN_CAPABILITY_IBSS);
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_PRIVACY_ANY);
else
bss = cfg80211_get_bss(priv->wdev.wiphy, channel,
bssid, ssid, ssid_len,
- WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
if (!bss) {
if (is_scanning_required) {
@@ -2046,7 +2079,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
dev_dbg(priv->adapter->dev,
"info: association to bssid %pM failed\n",
priv->cfg_bssid);
- memset(priv->cfg_bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->cfg_bssid);
if (ret > 0)
cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
@@ -2194,7 +2227,7 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
- memset(priv->cfg_bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->cfg_bssid);
return 0;
}
@@ -2397,12 +2430,12 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
}
-#define MWIFIEX_MAX_WQ_LEN 30
/*
- * create a new virtual interface with the given name
+ * create a new virtual interface with the given name and name assign type
*/
struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params)
@@ -2411,7 +2444,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
struct mwifiex_private *priv;
struct net_device *dev;
void *mdev_priv;
- char dfs_cac_str[MWIFIEX_MAX_WQ_LEN], dfs_chsw_str[MWIFIEX_MAX_WQ_LEN];
if (!adapter)
return ERR_PTR(-EFAULT);
@@ -2523,7 +2555,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
}
dev = alloc_netdev_mqs(sizeof(struct mwifiex_private *), name,
- NET_NAME_UNKNOWN, ether_setup,
+ name_assign_type, ether_setup,
IEEE80211_NUM_ACS, 1);
if (!dev) {
wiphy_err(wiphy, "no memory available for netdevice\n");
@@ -2576,12 +2608,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EFAULT);
}
- strcpy(dfs_cac_str, "MWIFIEX_DFS_CAC");
- strcat(dfs_cac_str, name);
- priv->dfs_cac_workqueue = alloc_workqueue(dfs_cac_str,
+ priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s",
WQ_HIGHPRI |
WQ_MEM_RECLAIM |
- WQ_UNBOUND, 1);
+ WQ_UNBOUND, 1, name);
if (!priv->dfs_cac_workqueue) {
wiphy_err(wiphy, "cannot register virtual network device\n");
free_netdev(dev);
@@ -2594,11 +2624,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue);
- strcpy(dfs_chsw_str, "MWIFIEX_DFS_CHSW");
- strcat(dfs_chsw_str, name);
- priv->dfs_chan_sw_workqueue = alloc_workqueue(dfs_chsw_str,
+ priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW%s",
WQ_HIGHPRI | WQ_UNBOUND |
- WQ_MEM_RECLAIM, 1);
+ WQ_MEM_RECLAIM, 1, name);
if (!priv->dfs_chan_sw_workqueue) {
wiphy_err(wiphy, "cannot register virtual network device\n");
free_netdev(dev);
@@ -2738,24 +2766,71 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
}
#ifdef CONFIG_PM
-static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
- struct cfg80211_wowlan *wowlan)
+static void mwifiex_set_auto_arp_mef_entry(struct mwifiex_private *priv,
+ struct mwifiex_mef_entry *mef_entry)
+{
+ int i, filt_num = 0, num_ipv4 = 0;
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ __be32 ips[MWIFIEX_MAX_SUPPORTED_IPADDR];
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ mef_entry->mode = MEF_MODE_HOST_SLEEP;
+ mef_entry->action = MEF_ACTION_AUTO_ARP;
+
+ /* Enable ARP offload feature */
+ memset(ips, 0, sizeof(ips));
+ for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++) {
+ if (adapter->priv[i]->netdev) {
+ in_dev = __in_dev_get_rtnl(adapter->priv[i]->netdev);
+ if (!in_dev)
+ continue;
+ ifa = in_dev->ifa_list;
+ if (!ifa || !ifa->ifa_local)
+ continue;
+ ips[i] = ifa->ifa_local;
+ num_ipv4++;
+ }
+ }
+
+ for (i = 0; i < num_ipv4; i++) {
+ if (!ips[i])
+ continue;
+ mef_entry->filter[filt_num].repeat = 1;
+ memcpy(mef_entry->filter[filt_num].byte_seq,
+ (u8 *)&ips[i], sizeof(ips[i]));
+ mef_entry->filter[filt_num].
+ byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
+ sizeof(ips[i]);
+ mef_entry->filter[filt_num].offset = 46;
+ mef_entry->filter[filt_num].filt_type = TYPE_EQ;
+ if (filt_num) {
+ mef_entry->filter[filt_num].filt_action =
+ TYPE_OR;
+ }
+ filt_num++;
+ }
+
+ mef_entry->filter[filt_num].repeat = 1;
+ mef_entry->filter[filt_num].byte_seq[0] = 0x08;
+ mef_entry->filter[filt_num].byte_seq[1] = 0x06;
+ mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = 2;
+ mef_entry->filter[filt_num].offset = 20;
+ mef_entry->filter[filt_num].filt_type = TYPE_EQ;
+ mef_entry->filter[filt_num].filt_action = TYPE_AND;
+}
+
+static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
+ struct mwifiex_ds_mef_cfg *mef_cfg,
+ struct mwifiex_mef_entry *mef_entry,
+ struct cfg80211_wowlan *wowlan)
{
int i, filt_num = 0, ret = 0;
bool first_pat = true;
u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
const u8 ipv4_mc_mac[] = {0x33, 0x33};
const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
- struct mwifiex_ds_mef_cfg mef_cfg;
- struct mwifiex_mef_entry *mef_entry;
- mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL);
- if (!mef_entry)
- return -ENOMEM;
-
- memset(&mef_cfg, 0, sizeof(mef_cfg));
- mef_cfg.num_entries = 1;
- mef_cfg.mef_entry = mef_entry;
mef_entry->mode = MEF_MODE_HOST_SLEEP;
mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST;
@@ -2772,20 +2847,19 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
if (!wowlan->patterns[i].pkt_offset) {
if (!(byte_seq[0] & 0x01) &&
(byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
- mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
+ mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
continue;
} else if (is_broadcast_ether_addr(byte_seq)) {
- mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
+ mef_cfg->criteria |= MWIFIEX_CRITERIA_BROADCAST;
continue;
} else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
(byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) ||
(!memcmp(byte_seq, ipv6_mc_mac, 3) &&
(byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) {
- mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
+ mef_cfg->criteria |= MWIFIEX_CRITERIA_MULTICAST;
continue;
}
}
-
mef_entry->filter[filt_num].repeat = 1;
mef_entry->filter[filt_num].offset =
wowlan->patterns[i].pkt_offset;
@@ -2802,7 +2876,7 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
}
if (wowlan->magic_pkt) {
- mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
+ mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
mef_entry->filter[filt_num].repeat = 16;
memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
ETH_ALEN);
@@ -2823,6 +2897,34 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
mef_entry->filter[filt_num].filt_action = TYPE_OR;
}
+ return ret;
+}
+
+static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret = 0, num_entries = 1;
+ struct mwifiex_ds_mef_cfg mef_cfg;
+ struct mwifiex_mef_entry *mef_entry;
+
+ if (wowlan->n_patterns || wowlan->magic_pkt)
+ num_entries++;
+
+ mef_entry = kcalloc(num_entries, sizeof(*mef_entry), GFP_KERNEL);
+ if (!mef_entry)
+ return -ENOMEM;
+
+ memset(&mef_cfg, 0, sizeof(mef_cfg));
+ mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST |
+ MWIFIEX_CRITERIA_UNICAST;
+ mef_cfg.num_entries = num_entries;
+ mef_cfg.mef_entry = mef_entry;
+
+ mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
+
+ if (wowlan->n_patterns || wowlan->magic_pkt)
+ ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
+ &mef_entry[1], wowlan);
if (!mef_cfg.criteria)
mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -2830,8 +2932,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
MWIFIEX_CRITERIA_MULTICAST;
ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
- HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
-
+ HostCmd_ACT_GEN_SET, 0,
+ &mef_cfg, true);
kfree(mef_entry);
return ret;
}
@@ -2841,27 +2943,33 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_ds_hs_cfg hs_cfg;
- int ret = 0;
- struct mwifiex_private *priv =
- mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+ int i, ret = 0;
+ struct mwifiex_private *priv;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ mwifiex_abort_cac(priv);
+ }
+
+ mwifiex_cancel_all_pending_cmd(adapter);
if (!wowlan) {
dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
return 0;
}
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
if (!priv->media_connected) {
dev_warn(adapter->dev,
"Can not configure WOWLAN in disconnected state\n");
return 0;
}
- if (wowlan->n_patterns || wowlan->magic_pkt) {
- ret = mwifiex_set_mef_filter(priv, wowlan);
- if (ret) {
- dev_err(adapter->dev, "Failed to set MEF filter\n");
- return ret;
- }
+ ret = mwifiex_set_mef_filter(priv, wowlan);
+ if (ret) {
+ dev_err(adapter->dev, "Failed to set MEF filter\n");
+ return ret;
}
if (wowlan->disconnect) {
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 88d0eade6bb1..38f24e0427d2 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -33,6 +33,7 @@
#define MWIFIEX_MAX_BSS_NUM (3)
#define MWIFIEX_DMA_ALIGN_SZ 64
+#define MWIFIEX_RX_HEADROOM 64
#define MAX_TXPD_SZ 32
#define INTF_HDR_ALIGN 4
@@ -82,6 +83,7 @@
#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2)
#define MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS BIT(3)
#define MWIFIEX_BUF_FLAG_ACTION_TX_STATUS BIT(4)
+#define MWIFIEX_BUF_FLAG_AGGR_PKT BIT(5)
#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
@@ -110,6 +112,11 @@
#define MWIFIEX_A_BAND_START_FREQ 5000
+/* SDIO Aggr data packet special info */
+#define SDIO_MAX_AGGR_BUF_SIZE (256 * 255)
+#define BLOCK_NUMBER_OFFSET 15
+#define SDIO_HEADER_OFFSET 28
+
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
@@ -167,10 +174,11 @@ struct mwifiex_wait_queue {
};
struct mwifiex_rxinfo {
+ struct sk_buff *parent;
u8 bss_num;
u8 bss_type;
- struct sk_buff *parent;
u8 use_count;
+ u8 buf_type;
};
struct mwifiex_txinfo {
@@ -178,6 +186,7 @@ struct mwifiex_txinfo {
u8 flags;
u8 bss_num;
u8 bss_type;
+ u8 aggr_num;
u32 pkt_len;
u8 ack_frame_id;
u64 cookie;
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index df553e86a0ad..59d8964dd0dc 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -197,6 +197,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
+#define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
@@ -353,6 +354,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
#define HostCmd_CMD_TDLS_OPER 0x0122
+#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -523,9 +525,11 @@ enum P2P_MODES {
#define TYPE_OR (MAX_OPERAND+5)
#define MEF_MODE_HOST_SLEEP 1
#define MEF_ACTION_ALLOW_AND_WAKEUP_HOST 3
+#define MEF_ACTION_AUTO_ARP 0x10
#define MWIFIEX_CRITERIA_BROADCAST BIT(0)
#define MWIFIEX_CRITERIA_UNICAST BIT(1)
#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
+#define MWIFIEX_MAX_SUPPORTED_IPADDR 4
#define ACT_TDLS_DELETE 0x00
#define ACT_TDLS_CREATE 0x01
@@ -1240,6 +1244,12 @@ struct host_cmd_ds_chan_rpt_event {
u8 tlvbuf[0];
} __packed;
+struct host_cmd_sdio_sp_rx_aggr_cfg {
+ u8 action;
+ u8 enable;
+ __le16 block_size;
+} __packed;
+
struct mwifiex_fixed_bcn_param {
__le64 timestamp;
__le16 beacon_period;
@@ -1962,6 +1972,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
struct host_cmd_ds_tdls_oper tdls_oper;
struct host_cmd_ds_chan_rpt_req chan_rpt_req;
+ struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index b77ba743e1c4..e12192f5cfad 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -76,7 +76,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
u32 i;
priv->media_connected = false;
- memset(priv->curr_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->curr_addr);
priv->pkt_tx_ctrl = 0;
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -266,18 +266,15 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
mwifiex_wmm_init(adapter);
- if (adapter->sleep_cfm) {
- sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
- adapter->sleep_cfm->data;
- memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
- sleep_cfm_buf->command =
- cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
- sleep_cfm_buf->size =
- cpu_to_le16(adapter->sleep_cfm->len);
- sleep_cfm_buf->result = 0;
- sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM);
- sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED);
- }
+ sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
+ adapter->sleep_cfm->data;
+ memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
+ sleep_cfm_buf->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
+ sleep_cfm_buf->size = cpu_to_le16(adapter->sleep_cfm->len);
+ sleep_cfm_buf->result = 0;
+ sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM);
+ sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED);
+
memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params));
memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period));
adapter->tx_lock_flag = false;
@@ -296,10 +293,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
- adapter->ext_scan = false;
adapter->key_api_major_ver = 0;
adapter->key_api_minor_ver = 0;
- memset(adapter->perm_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(adapter->perm_addr);
adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
@@ -482,6 +478,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&adapter->rx_proc_lock);
skb_queue_head_init(&adapter->rx_data_q);
+ skb_queue_head_init(&adapter->tx_data_q);
for (i = 0; i < adapter->priv_num; ++i) {
INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
@@ -689,6 +686,10 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
}
}
+ atomic_set(&adapter->tx_queued, 0);
+ while ((skb = skb_dequeue(&adapter->tx_data_q)))
+ mwifiex_write_data_complete(adapter, skb, 0, 0);
+
spin_lock_irqsave(&adapter->rx_proc_lock, flags);
while ((skb = skb_dequeue(&adapter->rx_data_q))) {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 7e74b4fccddd..03a95c7d34bf 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -131,10 +131,39 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
return 0;
}
+void mwifiex_queue_main_work(struct mwifiex_adapter *adapter)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->main_proc_lock, flags);
+ if (adapter->mwifiex_processing) {
+ adapter->more_task_flag = true;
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+ queue_work(adapter->workqueue, &adapter->main_work);
+ }
+}
+EXPORT_SYMBOL_GPL(mwifiex_queue_main_work);
+
+static void mwifiex_queue_rx_work(struct mwifiex_adapter *adapter)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ if (adapter->rx_processing) {
+ spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ queue_work(adapter->rx_workqueue, &adapter->rx_work);
+ }
+}
+
static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
{
unsigned long flags;
struct sk_buff *skb;
+ struct mwifiex_rxinfo *rx_info;
spin_lock_irqsave(&adapter->rx_proc_lock, flags);
if (adapter->rx_processing || adapter->rx_locked) {
@@ -154,9 +183,16 @@ static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
if (adapter->if_ops.submit_rem_rx_urbs)
adapter->if_ops.submit_rem_rx_urbs(adapter);
adapter->delay_main_work = false;
- queue_work(adapter->workqueue, &adapter->main_work);
+ mwifiex_queue_main_work(adapter);
+ }
+ rx_info = MWIFIEX_SKB_RXCB(skb);
+ if (rx_info->buf_type == MWIFIEX_TYPE_AGGR_DATA) {
+ if (adapter->if_ops.deaggr_pkt)
+ adapter->if_ops.deaggr_pkt(adapter, skb);
+ dev_kfree_skb_any(skb);
+ } else {
+ mwifiex_handle_rx_packet(adapter, skb);
}
- mwifiex_handle_rx_packet(adapter, skb);
}
spin_lock_irqsave(&adapter->rx_proc_lock, flags);
adapter->rx_processing = false;
@@ -189,15 +225,17 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
spin_lock_irqsave(&adapter->main_proc_lock, flags);
/* Check if already processing */
- if (adapter->mwifiex_processing) {
+ if (adapter->mwifiex_processing || adapter->main_locked) {
+ adapter->more_task_flag = true;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
goto exit_main_proc;
} else {
adapter->mwifiex_processing = true;
- spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
}
process_start:
do {
+ adapter->more_task_flag = false;
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
(adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
break;
@@ -212,9 +250,7 @@ process_start:
if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING &&
adapter->iface_type != MWIFIEX_USB) {
adapter->delay_main_work = true;
- if (!adapter->rx_processing)
- queue_work(adapter->rx_workqueue,
- &adapter->rx_work);
+ mwifiex_queue_rx_work(adapter);
break;
}
@@ -227,24 +263,26 @@ process_start:
}
if (adapter->rx_work_enabled && adapter->data_received)
- queue_work(adapter->rx_workqueue, &adapter->rx_work);
+ mwifiex_queue_rx_work(adapter);
/* Need to wake up the card ? */
if ((adapter->ps_state == PS_STATE_SLEEP) &&
(adapter->pm_wakeup_card_req &&
!adapter->pm_wakeup_fw_try) &&
(is_command_pending(adapter) ||
+ !skb_queue_empty(&adapter->tx_data_q) ||
!mwifiex_wmm_lists_empty(adapter))) {
adapter->pm_wakeup_fw_try = true;
mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
adapter->if_ops.wakeup(adapter);
+ spin_lock_irqsave(&adapter->main_proc_lock, flags);
continue;
}
if (IS_CARD_RX_RCVD(adapter)) {
adapter->data_received = false;
adapter->pm_wakeup_fw_try = false;
- del_timer_sync(&adapter->wakeup_timer);
+ del_timer(&adapter->wakeup_timer);
if (adapter->ps_state == PS_STATE_SLEEP)
adapter->ps_state = PS_STATE_AWAKE;
} else {
@@ -257,7 +295,8 @@ process_start:
if ((!adapter->scan_chan_gap_enabled &&
adapter->scan_processing) || adapter->data_sent ||
- mwifiex_wmm_lists_empty(adapter)) {
+ (mwifiex_wmm_lists_empty(adapter) &&
+ skb_queue_empty(&adapter->tx_data_q))) {
if (adapter->cmd_sent || adapter->curr_cmd ||
(!is_command_pending(adapter)))
break;
@@ -295,8 +334,10 @@ process_start:
if ((adapter->ps_state == PS_STATE_SLEEP) ||
(adapter->ps_state == PS_STATE_PRE_SLEEP) ||
(adapter->ps_state == PS_STATE_SLEEP_CFM) ||
- adapter->tx_lock_flag)
+ adapter->tx_lock_flag){
+ spin_lock_irqsave(&adapter->main_proc_lock, flags);
continue;
+ }
if (!adapter->cmd_sent && !adapter->curr_cmd) {
if (mwifiex_exec_next_cmd(adapter) == -1) {
@@ -307,6 +348,20 @@ process_start:
if ((adapter->scan_chan_gap_enabled ||
!adapter->scan_processing) &&
+ !adapter->data_sent &&
+ !skb_queue_empty(&adapter->tx_data_q)) {
+ mwifiex_process_tx_queue(adapter);
+ if (adapter->hs_activated) {
+ adapter->is_hs_configured = false;
+ mwifiex_hs_activated_event
+ (mwifiex_get_priv
+ (adapter, MWIFIEX_BSS_ROLE_ANY),
+ false);
+ }
+ }
+
+ if ((adapter->scan_chan_gap_enabled ||
+ !adapter->scan_processing) &&
!adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
@@ -320,7 +375,8 @@ process_start:
if (adapter->delay_null_pkt && !adapter->cmd_sent &&
!adapter->curr_cmd && !is_command_pending(adapter) &&
- mwifiex_wmm_lists_empty(adapter)) {
+ (mwifiex_wmm_lists_empty(adapter) &&
+ skb_queue_empty(&adapter->tx_data_q))) {
if (!mwifiex_send_null_packet
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
@@ -330,15 +386,12 @@ process_start:
}
break;
}
+ spin_lock_irqsave(&adapter->main_proc_lock, flags);
} while (true);
spin_lock_irqsave(&adapter->main_proc_lock, flags);
- if (!adapter->delay_main_work &&
- (adapter->int_status || IS_CARD_RX_RCVD(adapter))) {
- spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+ if (adapter->more_task_flag)
goto process_start;
- }
-
adapter->mwifiex_processing = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
@@ -466,7 +519,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
rtnl_lock();
/* Create station interface by default */
- wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d",
+ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
NL80211_IFTYPE_STATION, NULL, NULL);
if (IS_ERR(wdev)) {
dev_err(adapter->dev, "cannot create default STA interface\n");
@@ -475,7 +528,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
}
if (driver_mode & MWIFIEX_DRIVER_MODE_UAP) {
- wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
+ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM,
NL80211_IFTYPE_AP, NULL, NULL);
if (IS_ERR(wdev)) {
dev_err(adapter->dev, "cannot create AP interface\n");
@@ -485,7 +538,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
}
if (driver_mode & MWIFIEX_DRIVER_MODE_P2P) {
- wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
+ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d", NET_NAME_ENUM,
NL80211_IFTYPE_P2P_CLIENT, NULL,
NULL);
if (IS_ERR(wdev)) {
@@ -604,7 +657,7 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
atomic_inc(&priv->adapter->tx_pending);
mwifiex_wmm_add_buf_txqueue(priv, skb);
- queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+ mwifiex_queue_main_work(priv->adapter);
return 0;
}
@@ -1096,9 +1149,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
}
- if (adapter->if_ops.iface_work)
- INIT_WORK(&adapter->iface_work, adapter->if_ops.iface_work);
-
/* Register the device. Fill up the private data structure with relevant
information from the card. */
if (adapter->if_ops.register_dev(adapter)) {
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index f0a6af179af0..fe1256044a6c 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -35,6 +35,7 @@
#include <linux/ctype.h>
#include <linux/of.h>
#include <linux/idr.h>
+#include <linux/inetdevice.h>
#include "decl.h"
#include "ioctl.h"
@@ -58,6 +59,8 @@ enum {
#define MWIFIEX_MAX_AP 64
+#define MWIFIEX_MAX_PKTS_TXQ 16
+
#define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ)
#define MWIFIEX_TIMER_10S 10000
@@ -118,6 +121,7 @@ enum {
#define MWIFIEX_TYPE_CMD 1
#define MWIFIEX_TYPE_DATA 0
+#define MWIFIEX_TYPE_AGGR_DATA 10
#define MWIFIEX_TYPE_EVENT 3
#define MAX_BITMAP_RATES_SIZE 18
@@ -140,6 +144,9 @@ enum {
#define MWIFIEX_DRV_INFO_SIZE_MAX 0x40000
+/* Address alignment */
+#define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1))
+
struct mwifiex_dbg {
u32 num_cmd_host_to_card_failure;
u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -207,6 +214,12 @@ struct mwifiex_tx_aggr {
u8 amsdu;
};
+enum mwifiex_ba_status {
+ BA_SETUP_NONE = 0,
+ BA_SETUP_INPROGRESS,
+ BA_SETUP_COMPLETE
+};
+
struct mwifiex_ra_list_tbl {
struct list_head list;
struct sk_buff_head skb_head;
@@ -215,6 +228,8 @@ struct mwifiex_ra_list_tbl {
u16 max_amsdu;
u16 ba_pkt_count;
u8 ba_packet_thr;
+ enum mwifiex_ba_status ba_status;
+ u8 amsdu_in_ampdu;
u16 total_pkt_count;
bool tdls_link;
};
@@ -598,11 +613,6 @@ struct mwifiex_private {
struct mwifiex_11h_intf_state state_11h;
};
-enum mwifiex_ba_status {
- BA_SETUP_NONE = 0,
- BA_SETUP_INPROGRESS,
- BA_SETUP_COMPLETE
-};
struct mwifiex_tx_ba_stream_tbl {
struct list_head list;
@@ -735,6 +745,7 @@ struct mwifiex_if_ops {
int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
void (*iface_work)(struct work_struct *work);
void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
+ void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
};
struct mwifiex_adapter {
@@ -768,14 +779,18 @@ struct mwifiex_adapter {
bool rx_processing;
bool delay_main_work;
bool rx_locked;
+ bool main_locked;
struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM];
/* spin lock for init/shutdown */
spinlock_t mwifiex_lock;
/* spin lock for main process */
spinlock_t main_proc_lock;
u32 mwifiex_processing;
+ u8 more_task_flag;
u16 tx_buf_size;
u16 curr_tx_buf_size;
+ bool sdio_rx_aggr_enable;
+ u16 sdio_rx_block_size;
u32 ioport;
enum MWIFIEX_HARDWARE_STATUS hw_status;
u16 number_of_antenna;
@@ -810,6 +825,8 @@ struct mwifiex_adapter {
spinlock_t scan_pending_q_lock;
/* spin lock for RX processing routine */
spinlock_t rx_proc_lock;
+ struct sk_buff_head tx_data_q;
+ atomic_t tx_queued;
u32 scan_processing;
u16 region_code;
struct mwifiex_802_11d_domain_reg domain_reg;
@@ -881,8 +898,6 @@ struct mwifiex_adapter {
bool ext_scan;
u8 fw_api_ver;
u8 key_api_major_ver, key_api_minor_ver;
- struct work_struct iface_work;
- unsigned long iface_work_flags;
struct memory_type_mapping *mem_type_mapping_tbl;
u8 num_mem_types;
u8 curr_mem_idx;
@@ -896,6 +911,8 @@ struct mwifiex_adapter {
bool auto_tdls;
};
+void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
+
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
void mwifiex_set_trans_start(struct net_device *dev);
@@ -1318,6 +1335,7 @@ u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type);
struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params);
@@ -1417,6 +1435,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
u8 rx_rate, u8 ht_info);
void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
+void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
+void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index a5828da59365..bcc7751d883c 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -203,7 +203,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->pcie.reg = data->reg;
card->pcie.blksz_fw_dl = data->blksz_fw_dl;
card->pcie.tx_buf_size = data->tx_buf_size;
- card->pcie.supports_fw_dump = data->supports_fw_dump;
+ card->pcie.can_dump_fw = data->can_dump_fw;
card->pcie.can_ext_scan = data->can_ext_scan;
}
@@ -234,8 +234,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
if (!adapter || !adapter->priv_num)
return;
- cancel_work_sync(&adapter->iface_work);
-
if (user_rmmod) {
#ifdef CONFIG_PM_SLEEP
if (adapter->is_suspended)
@@ -498,7 +496,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
/* Allocate skb here so that firmware can DMA data from it */
- skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+ skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
+ GFP_KERNEL | GFP_DMA);
if (!skb) {
dev_err(adapter->dev,
"Unable to allocate skb for RX ring.\n");
@@ -1297,7 +1296,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
}
}
- skb_tmp = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+ skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
+ GFP_KERNEL | GFP_DMA);
if (!skb_tmp) {
dev_err(adapter->dev,
"Unable to allocate skb.\n");
@@ -2099,7 +2099,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
goto exit;
mwifiex_interrupt_status(adapter);
- queue_work(adapter->workqueue, &adapter->main_work);
+ mwifiex_queue_main_work(adapter);
exit:
return IRQ_HANDLED;
@@ -2271,7 +2271,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
int ret;
static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL };
- if (!card->pcie.supports_fw_dump)
+ if (!card->pcie.can_dump_fw)
return;
for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
@@ -2371,25 +2371,26 @@ done:
adapter->curr_mem_idx = 0;
}
+static unsigned long iface_work_flags;
+static struct mwifiex_adapter *save_adapter;
static void mwifiex_pcie_work(struct work_struct *work)
{
- struct mwifiex_adapter *adapter =
- container_of(work, struct mwifiex_adapter, iface_work);
-
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
- &adapter->iface_work_flags))
- mwifiex_pcie_fw_dump_work(adapter);
+ &iface_work_flags))
+ mwifiex_pcie_fw_dump_work(save_adapter);
}
+static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
/* This function dumps FW information */
static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
{
- if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags))
+ save_adapter = adapter;
+ if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags);
+ set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
- schedule_work(&adapter->iface_work);
+ schedule_work(&pcie_work);
}
/*
@@ -2617,7 +2618,6 @@ static struct mwifiex_if_ops pcie_ops = {
.init_fw_port = mwifiex_pcie_init_fw_port,
.clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
.fw_dump = mwifiex_pcie_fw_dump,
- .iface_work = mwifiex_pcie_work,
};
/*
@@ -2663,6 +2663,7 @@ static void mwifiex_pcie_cleanup_module(void)
/* Set the flag as user is removing this module. */
user_rmmod = 1;
+ cancel_work_sync(&pcie_work);
pci_unregister_driver(&mwifiex_pcie);
}
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 666d40e9dbc3..0e7ee8b72358 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -205,7 +205,7 @@ struct mwifiex_pcie_device {
const struct mwifiex_pcie_card_reg *reg;
u16 blksz_fw_dl;
u16 tx_buf_size;
- bool supports_fw_dump;
+ bool can_dump_fw;
bool can_ext_scan;
};
@@ -214,7 +214,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
.reg = &mwifiex_reg_8766,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .supports_fw_dump = false,
+ .can_dump_fw = false,
.can_ext_scan = true,
};
@@ -223,7 +223,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
.reg = &mwifiex_reg_8897,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .supports_fw_dump = true,
+ .can_dump_fw = true,
.can_ext_scan = true,
};
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 91e36cda9543..d10320f89bc1 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -47,6 +47,7 @@
static u8 user_rmmod;
static struct mwifiex_if_ops sdio_ops;
+static unsigned long iface_work_flags;
static struct semaphore add_remove_card_sem;
@@ -105,8 +106,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->tx_buf_size = data->tx_buf_size;
card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
- card->supports_fw_dump = data->supports_fw_dump;
- card->auto_tdls = data->auto_tdls;
+ card->can_dump_fw = data->can_dump_fw;
+ card->can_auto_tdls = data->can_auto_tdls;
card->can_ext_scan = data->can_ext_scan;
}
@@ -200,8 +201,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (!adapter || !adapter->priv_num)
return;
- cancel_work_sync(&adapter->iface_work);
-
if (user_rmmod) {
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
@@ -1043,6 +1042,59 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
}
/*
+ * This function decode sdio aggreation pkt.
+ *
+ * Based on the the data block size and pkt_len,
+ * skb data will be decoded to few packets.
+ */
+static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u32 total_pkt_len, pkt_len;
+ struct sk_buff *skb_deaggr;
+ u32 pkt_type;
+ u16 blk_size;
+ u8 blk_num;
+ u8 *data;
+
+ data = skb->data;
+ total_pkt_len = skb->len;
+
+ while (total_pkt_len >= (SDIO_HEADER_OFFSET + INTF_HEADER_LEN)) {
+ if (total_pkt_len < adapter->sdio_rx_block_size)
+ break;
+ blk_num = *(data + BLOCK_NUMBER_OFFSET);
+ blk_size = adapter->sdio_rx_block_size * blk_num;
+ if (blk_size > total_pkt_len) {
+ dev_err(adapter->dev, "%s: error in pkt,\t"
+ "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
+ __func__, blk_num, blk_size, total_pkt_len);
+ break;
+ }
+ pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
+ pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
+ 2));
+ if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
+ dev_err(adapter->dev, "%s: error in pkt,\t"
+ "pkt_len=%d, blk_size=%d\n",
+ __func__, pkt_len, blk_size);
+ break;
+ }
+ skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
+ GFP_KERNEL | GFP_DMA);
+ if (!skb_deaggr)
+ break;
+ skb_put(skb_deaggr, pkt_len);
+ memcpy(skb_deaggr->data, data + SDIO_HEADER_OFFSET, pkt_len);
+ skb_pull(skb_deaggr, INTF_HEADER_LEN);
+
+ mwifiex_handle_rx_packet(adapter, skb_deaggr);
+ data += blk_size;
+ total_pkt_len -= blk_size;
+ }
+}
+
+/*
* This function decodes a received packet.
*
* Based on the type, the packet is treated as either a data, or
@@ -1055,11 +1107,28 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
u8 *cmd_buf;
__le16 *curr_ptr = (__le16 *)skb->data;
u16 pkt_len = le16_to_cpu(*curr_ptr);
+ struct mwifiex_rxinfo *rx_info;
- skb_trim(skb, pkt_len);
- skb_pull(skb, INTF_HEADER_LEN);
+ if (upld_typ != MWIFIEX_TYPE_AGGR_DATA) {
+ skb_trim(skb, pkt_len);
+ skb_pull(skb, INTF_HEADER_LEN);
+ }
switch (upld_typ) {
+ case MWIFIEX_TYPE_AGGR_DATA:
+ dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n");
+ rx_info = MWIFIEX_SKB_RXCB(skb);
+ rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA;
+ if (adapter->rx_work_enabled) {
+ skb_queue_tail(&adapter->rx_data_q, skb);
+ atomic_inc(&adapter->rx_pending);
+ adapter->data_received = true;
+ } else {
+ mwifiex_deaggr_sdio_pkt(adapter, skb);
+ dev_kfree_skb_any(skb);
+ }
+ break;
+
case MWIFIEX_TYPE_DATA:
dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
if (adapter->rx_work_enabled) {
@@ -1127,17 +1196,17 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
* provided there is space left, processed and finally uploaded.
*/
static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
- struct sk_buff *skb, u8 port)
+ u16 rx_len, u8 port)
{
struct sdio_mmc_card *card = adapter->card;
s32 f_do_rx_aggr = 0;
s32 f_do_rx_cur = 0;
s32 f_aggr_cur = 0;
+ s32 f_post_aggr_cur = 0;
struct sk_buff *skb_deaggr;
- u32 pind;
- u32 pkt_len, pkt_type, mport;
+ struct sk_buff *skb = NULL;
+ u32 pkt_len, pkt_type, mport, pind;
u8 *curr_ptr;
- u32 rx_len = skb->len;
if ((card->has_control_mask) && (port == CTRL_PORT)) {
/* Read the command Resp without aggr */
@@ -1164,12 +1233,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
if (MP_RX_AGGR_IN_PROGRESS(card)) {
- if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) {
+ if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) {
f_aggr_cur = 1;
} else {
/* No room in Aggr buf, do rx aggr now */
f_do_rx_aggr = 1;
- f_do_rx_cur = 1;
+ f_post_aggr_cur = 1;
}
} else {
/* Rx aggr not in progress */
@@ -1182,7 +1251,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if (MP_RX_AGGR_IN_PROGRESS(card)) {
f_do_rx_aggr = 1;
- if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len))
+ if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len))
f_aggr_cur = 1;
else
/* No room in Aggr buf, do rx aggr now */
@@ -1195,7 +1264,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if (f_aggr_cur) {
dev_dbg(adapter->dev, "info: current packet aggregation\n");
/* Curr pkt can be aggregated */
- mp_rx_aggr_setup(card, skb, port);
+ mp_rx_aggr_setup(card, rx_len, port);
if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
mp_rx_aggr_port_limit_reached(card)) {
@@ -1238,16 +1307,29 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
curr_ptr = card->mpa_rx.buf;
for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
+ u32 *len_arr = card->mpa_rx.len_arr;
/* get curr PKT len & type */
pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
/* copy pkt to deaggr buf */
- skb_deaggr = card->mpa_rx.skb_arr[pind];
+ skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
+ GFP_KERNEL |
+ GFP_DMA);
+ if (!skb_deaggr) {
+ dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n",
+ pkt_len, pkt_type);
+ curr_ptr += len_arr[pind];
+ continue;
+ }
- if ((pkt_type == MWIFIEX_TYPE_DATA) && (pkt_len <=
- card->mpa_rx.len_arr[pind])) {
+ skb_put(skb_deaggr, len_arr[pind]);
+
+ if ((pkt_type == MWIFIEX_TYPE_DATA ||
+ (pkt_type == MWIFIEX_TYPE_AGGR_DATA &&
+ adapter->sdio_rx_aggr_enable)) &&
+ (pkt_len <= len_arr[pind])) {
memcpy(skb_deaggr->data, curr_ptr, pkt_len);
@@ -1257,13 +1339,15 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
mwifiex_decode_rx_packet(adapter, skb_deaggr,
pkt_type);
} else {
- dev_err(adapter->dev, "wrong aggr pkt:"
- " type=%d len=%d max_len=%d\n",
+ dev_err(adapter->dev, " drop wrong aggr pkt:\t"
+ "sdio_single_port_rx_aggr=%d\t"
+ "type=%d len=%d max_len=%d\n",
+ adapter->sdio_rx_aggr_enable,
pkt_type, pkt_len,
- card->mpa_rx.len_arr[pind]);
+ len_arr[pind]);
dev_kfree_skb_any(skb_deaggr);
}
- curr_ptr += card->mpa_rx.len_arr[pind];
+ curr_ptr += len_arr[pind];
}
MP_RX_AGGR_BUF_RESET(card);
}
@@ -1273,28 +1357,46 @@ rx_curr_single:
dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
port, rx_len);
+ skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
+ if (!skb) {
+ dev_err(adapter->dev, "single skb allocated fail,\t"
+ "drop pkt port=%d len=%d\n", port, rx_len);
+ if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
+ card->mpa_rx.buf, rx_len,
+ adapter->ioport + port))
+ goto error;
+ return 0;
+ }
+
+ skb_put(skb, rx_len);
+
if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
skb->data, skb->len,
adapter->ioport + port))
goto error;
+ if (!adapter->sdio_rx_aggr_enable &&
+ pkt_type == MWIFIEX_TYPE_AGGR_DATA) {
+ dev_err(adapter->dev, "drop wrong pkt type %d\t"
+ "current SDIO RX Aggr not enabled\n",
+ pkt_type);
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
+ if (f_post_aggr_cur) {
+ dev_dbg(adapter->dev, "info: current packet aggregation\n");
+ /* Curr pkt can be aggregated */
+ mp_rx_aggr_setup(card, rx_len, port);
+ }
return 0;
-
error:
- if (MP_RX_AGGR_IN_PROGRESS(card)) {
- /* Multiport-aggregation transfer failed - cleanup */
- for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
- /* copy pkt to deaggr buf */
- skb_deaggr = card->mpa_rx.skb_arr[pind];
- dev_kfree_skb_any(skb_deaggr);
- }
+ if (MP_RX_AGGR_IN_PROGRESS(card))
MP_RX_AGGR_BUF_RESET(card);
- }
- if (f_do_rx_cur)
+ if (f_do_rx_cur && skb)
/* Single transfer pending. Free curr buff also */
dev_kfree_skb_any(skb);
@@ -1356,8 +1458,9 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
MWIFIEX_RX_DATA_BUF_SIZE)
return -1;
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
+ dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
- skb = dev_alloc_skb(rx_len);
+ skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
if (!skb)
return -1;
@@ -1447,27 +1550,16 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1) / MWIFIEX_SDIO_BLOCK_SIZE;
if (rx_len <= INTF_HEADER_LEN ||
(rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
- MWIFIEX_RX_DATA_BUF_SIZE) {
+ card->mpa_rx.buf_size) {
dev_err(adapter->dev, "invalid rx_len=%d\n",
rx_len);
return -1;
}
- rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
-
- skb = dev_alloc_skb(rx_len);
-
- if (!skb) {
- dev_err(adapter->dev, "%s: failed to alloc skb",
- __func__);
- return -1;
- }
- skb_put(skb, rx_len);
-
- dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n",
- rx_len, skb->len);
+ rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
+ dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
- if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
+ if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len,
port)) {
dev_err(adapter->dev, "card_to_host_mpa failed:"
" int status=%#x\n", sdio_ireg);
@@ -1735,6 +1827,7 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
u32 mpa_tx_buf_size, u32 mpa_rx_buf_size)
{
struct sdio_mmc_card *card = adapter->card;
+ u32 rx_buf_size;
int ret = 0;
card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL);
@@ -1745,13 +1838,15 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
card->mpa_tx.buf_size = mpa_tx_buf_size;
- card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL);
+ rx_buf_size = max_t(u32, mpa_rx_buf_size,
+ (u32)SDIO_MAX_AGGR_BUF_SIZE);
+ card->mpa_rx.buf = kzalloc(rx_buf_size, GFP_KERNEL);
if (!card->mpa_rx.buf) {
ret = -1;
goto error;
}
- card->mpa_rx.buf_size = mpa_rx_buf_size;
+ card->mpa_rx.buf_size = rx_buf_size;
error:
if (ret) {
@@ -1887,7 +1982,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
return -1;
}
- adapter->auto_tdls = card->auto_tdls;
+ adapter->auto_tdls = card->can_auto_tdls;
adapter->ext_scan = card->can_ext_scan;
return ret;
}
@@ -1950,6 +2045,7 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
port, card->mp_data_port_mask);
}
+static struct mwifiex_adapter *save_adapter;
static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
@@ -2018,10 +2114,8 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
}
/* This function dump firmware memory to file */
-static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
+static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
{
- struct mwifiex_adapter *adapter =
- container_of(work, struct mwifiex_adapter, iface_work);
struct sdio_mmc_card *card = adapter->card;
int ret = 0;
unsigned int reg, reg_start, reg_end;
@@ -2032,7 +2126,7 @@ static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
mwifiex_dump_drv_info(adapter);
- if (!card->supports_fw_dump)
+ if (!card->can_dump_fw)
return;
for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
@@ -2143,36 +2237,36 @@ done:
static void mwifiex_sdio_work(struct work_struct *work)
{
- struct mwifiex_adapter *adapter =
- container_of(work, struct mwifiex_adapter, iface_work);
-
- if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
- &adapter->iface_work_flags))
- mwifiex_sdio_card_reset_work(adapter);
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
- &adapter->iface_work_flags))
- mwifiex_sdio_fw_dump_work(work);
+ &iface_work_flags))
+ mwifiex_sdio_fw_dump_work(save_adapter);
+ if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
+ &iface_work_flags))
+ mwifiex_sdio_card_reset_work(save_adapter);
}
+static DECLARE_WORK(sdio_work, mwifiex_sdio_work);
/* This function resets the card */
static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
{
- if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags))
+ save_adapter = adapter;
+ if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags);
+ set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
- schedule_work(&adapter->iface_work);
+ schedule_work(&sdio_work);
}
/* This function dumps FW information */
static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
{
- if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags))
+ save_adapter = adapter;
+ if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags);
- schedule_work(&adapter->iface_work);
+ set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+ schedule_work(&sdio_work);
}
/* Function to dump SDIO function registers and SDIO scratch registers in case
@@ -2288,9 +2382,9 @@ static struct mwifiex_if_ops sdio_ops = {
.cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
.event_complete = mwifiex_sdio_event_complete,
.card_reset = mwifiex_sdio_card_reset,
- .iface_work = mwifiex_sdio_work,
.fw_dump = mwifiex_sdio_fw_dump,
.reg_dump = mwifiex_sdio_reg_dump,
+ .deaggr_pkt = mwifiex_deaggr_sdio_pkt,
};
/*
@@ -2327,6 +2421,7 @@ mwifiex_sdio_cleanup_module(void)
/* Set the flag as user is removing this module. */
user_rmmod = 1;
+ cancel_work_sync(&sdio_work);
sdio_unregister_driver(&mwifiex_sdio);
}
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 957cca246618..6f645cf47369 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -67,6 +67,8 @@
#define MWIFIEX_MP_AGGR_BUF_SIZE_16K (16384)
#define MWIFIEX_MP_AGGR_BUF_SIZE_32K (32768)
+/* we leave one block of 256 bytes for DMA alignment*/
+#define MWIFIEX_MP_AGGR_BUF_SIZE_MAX (65280)
/* Misc. Config Register : Auto Re-enable interrupts */
#define AUTO_RE_ENABLE_INT BIT(4)
@@ -238,9 +240,6 @@ struct sdio_mmc_card {
const struct mwifiex_sdio_card_reg *reg;
u8 max_ports;
u8 mp_agg_pkt_limit;
- bool supports_sdio_new_mode;
- bool has_control_mask;
- bool supports_fw_dump;
u16 tx_buf_size;
u32 mp_tx_agg_buf_size;
u32 mp_rx_agg_buf_size;
@@ -255,7 +254,10 @@ struct sdio_mmc_card {
u8 curr_wr_port;
u8 *mp_regs;
- u8 auto_tdls;
+ bool supports_sdio_new_mode;
+ bool has_control_mask;
+ bool can_dump_fw;
+ bool can_auto_tdls;
bool can_ext_scan;
struct mwifiex_sdio_mpa_tx mpa_tx;
@@ -267,13 +269,13 @@ struct mwifiex_sdio_device {
const struct mwifiex_sdio_card_reg *reg;
u8 max_ports;
u8 mp_agg_pkt_limit;
- bool supports_sdio_new_mode;
- bool has_control_mask;
- bool supports_fw_dump;
u16 tx_buf_size;
u32 mp_tx_agg_buf_size;
u32 mp_rx_agg_buf_size;
- u8 auto_tdls;
+ bool supports_sdio_new_mode;
+ bool has_control_mask;
+ bool can_dump_fw;
+ bool can_auto_tdls;
bool can_ext_scan;
};
@@ -412,13 +414,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.reg = &mwifiex_reg_sd87xx,
.max_ports = 16,
.mp_agg_pkt_limit = 8,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_fw_dump = false,
- .auto_tdls = false,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
.can_ext_scan = false,
};
@@ -427,13 +429,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.reg = &mwifiex_reg_sd87xx,
.max_ports = 16,
.mp_agg_pkt_limit = 8,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_fw_dump = false,
- .auto_tdls = false,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
.can_ext_scan = true,
};
@@ -442,13 +444,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.reg = &mwifiex_reg_sd87xx,
.max_ports = 16,
.mp_agg_pkt_limit = 8,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_fw_dump = false,
- .auto_tdls = false,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
.can_ext_scan = true,
};
@@ -457,13 +459,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.reg = &mwifiex_reg_sd8897,
.max_ports = 32,
.mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
.supports_sdio_new_mode = true,
.has_control_mask = false,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
- .supports_fw_dump = true,
- .auto_tdls = false,
+ .can_dump_fw = true,
+ .can_auto_tdls = false,
.can_ext_scan = true,
};
@@ -472,13 +474,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.reg = &mwifiex_reg_sd8887,
.max_ports = 32,
.mp_agg_pkt_limit = 16,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
- .supports_fw_dump = false,
- .auto_tdls = true,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = false,
+ .can_auto_tdls = true,
.can_ext_scan = true,
};
@@ -492,8 +494,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_fw_dump = false,
- .auto_tdls = false,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
.can_ext_scan = true,
};
@@ -571,9 +573,9 @@ mp_tx_aggr_port_limit_reached(struct sdio_mmc_card *card)
/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
- struct sk_buff *skb, u8 port)
+ u16 rx_len, u8 port)
{
- card->mpa_rx.buf_len += skb->len;
+ card->mpa_rx.buf_len += rx_len;
if (!card->mpa_rx.pkt_cnt)
card->mpa_rx.start_port = port;
@@ -586,8 +588,8 @@ static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
else
card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1);
}
- card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb;
- card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len;
+ card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = NULL;
+ card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = rx_len;
card->mpa_rx.pkt_cnt++;
}
#endif /* _MWIFIEX_SDIO_H */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index f7d204ffd6e9..49422f2a5380 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1370,22 +1370,29 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
struct mwifiex_ds_mef_cfg *mef)
{
struct host_cmd_ds_mef_cfg *mef_cfg = &cmd->params.mef_cfg;
+ struct mwifiex_fw_mef_entry *mef_entry = NULL;
u8 *pos = (u8 *)mef_cfg;
+ u16 i;
cmd->command = cpu_to_le16(HostCmd_CMD_MEF_CFG);
mef_cfg->criteria = cpu_to_le32(mef->criteria);
mef_cfg->num_entries = cpu_to_le16(mef->num_entries);
pos += sizeof(*mef_cfg);
- mef_cfg->mef_entry->mode = mef->mef_entry->mode;
- mef_cfg->mef_entry->action = mef->mef_entry->action;
- pos += sizeof(*(mef_cfg->mef_entry));
- if (mwifiex_cmd_append_rpn_expression(priv, mef->mef_entry, &pos))
- return -1;
+ for (i = 0; i < mef->num_entries; i++) {
+ mef_entry = (struct mwifiex_fw_mef_entry *)pos;
+ mef_entry->mode = mef->mef_entry[i].mode;
+ mef_entry->action = mef->mef_entry[i].action;
+ pos += sizeof(*mef_cfg->mef_entry);
+
+ if (mwifiex_cmd_append_rpn_expression(priv,
+ &mef->mef_entry[i], &pos))
+ return -1;
- mef_cfg->mef_entry->exprsize =
- cpu_to_le16(pos - mef_cfg->mef_entry->expr);
+ mef_entry->exprsize =
+ cpu_to_le16(pos - mef_entry->expr);
+ }
cmd->size = cpu_to_le16((u16) (pos - (u8 *)mef_cfg) + S_DS_GEN);
return 0;
@@ -1664,6 +1671,25 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
return 0;
}
+
+/* This function prepares command of sdio rx aggr info. */
+static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_sdio_sp_rx_aggr_cfg *cfg =
+ &cmd->params.sdio_rx_aggr_cfg;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_SDIO_SP_RX_AGGR_CFG);
+ cmd->size =
+ cpu_to_le16(sizeof(struct host_cmd_sdio_sp_rx_aggr_cfg) +
+ S_DS_GEN);
+ cfg->action = cmd_action;
+ if (cmd_action == HostCmd_ACT_GEN_SET)
+ cfg->enable = *(u8 *)data_buf;
+
+ return 0;
+}
+
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1901,6 +1927,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
data_buf);
break;
+ case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
+ ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
+ data_buf);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1940,6 +1970,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
struct mwifiex_ds_auto_ds auto_ds;
enum state_11d_t state_11d;
struct mwifiex_ds_11n_tx_cfg tx_cfg;
+ u8 sdio_sp_rx_aggr_enable;
if (first_sta) {
if (priv->adapter->iface_type == MWIFIEX_PCIE) {
@@ -1983,6 +2014,22 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
if (ret)
return -1;
+ /** Set SDIO Single Port RX Aggr Info */
+ if (priv->adapter->iface_type == MWIFIEX_SDIO &&
+ ISSUPP_SDIO_SPA_ENABLED(priv->adapter->fw_cap_info)) {
+ sdio_sp_rx_aggr_enable = true;
+ ret = mwifiex_send_cmd(priv,
+ HostCmd_CMD_SDIO_SP_RX_AGGR_CFG,
+ HostCmd_ACT_GEN_SET, 0,
+ &sdio_sp_rx_aggr_enable,
+ true);
+ if (ret) {
+ dev_err(priv->adapter->dev,
+ "error while enabling SP aggregation..disable it");
+ adapter->sdio_rx_aggr_enable = false;
+ }
+ }
+
/* Reconfigure tx buf size */
ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
HostCmd_ACT_GEN_SET, 0,
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 5f8da5924666..88dc6b672ef4 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -90,6 +90,10 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
case HostCmd_CMD_MAC_CONTROL:
break;
+ case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
+ dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n");
+ break;
+
default:
break;
}
@@ -943,6 +947,20 @@ static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
return 0;
}
+/** This Function handles the command response of sdio rx aggr */
+static int mwifiex_ret_sdio_rx_aggr_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct host_cmd_sdio_sp_rx_aggr_cfg *cfg =
+ &resp->params.sdio_rx_aggr_cfg;
+
+ adapter->sdio_rx_aggr_enable = cfg->enable;
+ adapter->sdio_rx_block_size = le16_to_cpu(cfg->block_size);
+
+ return 0;
+}
+
/*
* This function handles the command responses.
*
@@ -1124,6 +1142,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
break;
+ case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
+ ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 80ffe7412496..0dc7a1d3993d 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -135,7 +135,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
GFP_KERNEL);
}
- memset(priv->cfg_bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->cfg_bssid);
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
@@ -312,7 +312,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->ps_state = PS_STATE_AWAKE;
adapter->pm_wakeup_card_req = false;
adapter->pm_wakeup_fw_try = false;
- del_timer_sync(&adapter->wakeup_timer);
+ del_timer(&adapter->wakeup_timer);
break;
}
if (!mwifiex_send_null_packet
@@ -327,7 +327,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->ps_state = PS_STATE_AWAKE;
adapter->pm_wakeup_card_req = false;
adapter->pm_wakeup_fw_try = false;
- del_timer_sync(&adapter->wakeup_timer);
+ del_timer(&adapter->wakeup_timer);
break;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index ac93557cbdc9..a245f444aeec 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -80,23 +80,29 @@ EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
struct mwifiex_tx_param *tx_param)
{
- int ret = -1;
+ int hroom, ret = -1;
struct mwifiex_adapter *adapter = priv->adapter;
u8 *head_ptr;
struct txpd *local_tx_pd = NULL;
+ hroom = (adapter->iface_type == MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
+
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
head_ptr = mwifiex_process_uap_txpd(priv, skb);
else
head_ptr = mwifiex_process_sta_txpd(priv, skb);
+ if ((adapter->data_sent || adapter->tx_lock_flag) && head_ptr) {
+ skb_queue_tail(&adapter->tx_data_q, skb);
+ atomic_inc(&adapter->tx_queued);
+ return 0;
+ }
+
if (head_ptr) {
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
- local_tx_pd =
- (struct txpd *) (head_ptr + INTF_HEADER_LEN);
+ local_tx_pd = (struct txpd *)(head_ptr + hroom);
if (adapter->iface_type == MWIFIEX_USB) {
adapter->data_sent = true;
- skb_pull(skb, INTF_HEADER_LEN);
ret = adapter->if_ops.host_to_card(adapter,
MWIFIEX_USB_EP_DATA,
skb, NULL);
@@ -142,6 +148,123 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
return ret;
}
+static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb,
+ struct mwifiex_tx_param *tx_param)
+{
+ struct txpd *local_tx_pd = NULL;
+ u8 *head_ptr = skb->data;
+ int ret = 0;
+ struct mwifiex_private *priv;
+ struct mwifiex_txinfo *tx_info;
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
+ tx_info->bss_type);
+ if (!priv) {
+ dev_err(adapter->dev, "data: priv not found. Drop TX packet\n");
+ adapter->dbg.num_tx_host_to_card_failure++;
+ mwifiex_write_data_complete(adapter, skb, 0, 0);
+ return ret;
+ }
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
+ if (adapter->iface_type == MWIFIEX_USB)
+ local_tx_pd = (struct txpd *)head_ptr;
+ else
+ local_tx_pd = (struct txpd *) (head_ptr +
+ INTF_HEADER_LEN);
+ }
+
+ if (adapter->iface_type == MWIFIEX_USB) {
+ adapter->data_sent = true;
+ ret = adapter->if_ops.host_to_card(adapter,
+ MWIFIEX_USB_EP_DATA,
+ skb, NULL);
+ } else {
+ ret = adapter->if_ops.host_to_card(adapter,
+ MWIFIEX_TYPE_DATA,
+ skb, tx_param);
+ }
+ switch (ret) {
+ case -ENOSR:
+ dev_err(adapter->dev, "data: -ENOSR is returned\n");
+ break;
+ case -EBUSY:
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ (adapter->pps_uapsd_mode) &&
+ (adapter->tx_lock_flag)) {
+ priv->adapter->tx_lock_flag = false;
+ if (local_tx_pd)
+ local_tx_pd->flags = 0;
+ }
+ skb_queue_head(&adapter->tx_data_q, skb);
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+ atomic_add(tx_info->aggr_num, &adapter->tx_queued);
+ else
+ atomic_inc(&adapter->tx_queued);
+ dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+ break;
+ case -1:
+ if (adapter->iface_type != MWIFIEX_PCIE)
+ adapter->data_sent = false;
+ dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
+ ret);
+ adapter->dbg.num_tx_host_to_card_failure++;
+ mwifiex_write_data_complete(adapter, skb, 0, ret);
+ break;
+ case -EINPROGRESS:
+ if (adapter->iface_type != MWIFIEX_PCIE)
+ adapter->data_sent = false;
+ break;
+ case 0:
+ mwifiex_write_data_complete(adapter, skb, 0, ret);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int
+mwifiex_dequeue_tx_queue(struct mwifiex_adapter *adapter)
+{
+ struct sk_buff *skb, *skb_next;
+ struct mwifiex_txinfo *tx_info;
+ struct mwifiex_tx_param tx_param;
+
+ skb = skb_dequeue(&adapter->tx_data_q);
+ if (!skb)
+ return -1;
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+ atomic_sub(tx_info->aggr_num, &adapter->tx_queued);
+ else
+ atomic_dec(&adapter->tx_queued);
+
+ if (!skb_queue_empty(&adapter->tx_data_q))
+ skb_next = skb_peek(&adapter->tx_data_q);
+ else
+ skb_next = NULL;
+ tx_param.next_pkt_len = ((skb_next) ? skb_next->len : 0);
+ if (!tx_param.next_pkt_len) {
+ if (!mwifiex_wmm_lists_empty(adapter))
+ tx_param.next_pkt_len = 1;
+ }
+ return mwifiex_host_to_card(adapter, skb, &tx_param);
+}
+
+void
+mwifiex_process_tx_queue(struct mwifiex_adapter *adapter)
+{
+ do {
+ if (adapter->data_sent || adapter->tx_lock_flag)
+ break;
+ if (mwifiex_dequeue_tx_queue(adapter))
+ break;
+ } while (!skb_queue_empty(&adapter->tx_data_q));
+}
+
/*
* Packet send completion callback handler.
*
@@ -179,8 +302,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
priv->stats.tx_errors++;
}
- if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
atomic_dec_return(&adapter->pending_bridged_pkts);
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+ goto done;
+ }
if (aggr)
/* For skb_aggr, do not wake up tx queue */
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 223873022ffe..fd8027f200a0 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -193,7 +193,7 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
recv_length, status);
if (status == -EINPROGRESS) {
- queue_work(adapter->workqueue, &adapter->main_work);
+ mwifiex_queue_main_work(adapter);
/* urb for data_ep is re-submitted now;
* urb for cmd_ep will be re-submitted in callback
@@ -262,7 +262,7 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
urb->status ? -1 : 0);
}
- queue_work(adapter->workqueue, &adapter->main_work);
+ mwifiex_queue_main_work(adapter);
return;
}
@@ -1006,7 +1006,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
/* Simulation of HS_AWAKE event */
adapter->pm_wakeup_fw_try = false;
- del_timer_sync(&adapter->wakeup_timer);
+ del_timer(&adapter->wakeup_timer);
adapter->pm_wakeup_card_req = false;
adapter->ps_state = PS_STATE_AWAKE;
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 308550611f22..b8a45872354d 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -367,6 +367,13 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
if (!skb)
return -1;
+ if (!priv->mgmt_frame_mask ||
+ priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) {
+ dev_dbg(priv->adapter->dev,
+ "do not receive mgmt frames on uninitialized intf");
+ return -1;
+ }
+
rx_pd = (struct rxpd *)skb->data;
skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
@@ -624,3 +631,26 @@ void mwifiex_hist_data_reset(struct mwifiex_private *priv)
for (ix = 0; ix < MWIFIEX_MAX_SIG_STRENGTH; ix++)
atomic_set(&phist_data->sig_str[ix], 0);
}
+
+void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags)
+{
+ struct sk_buff *skb;
+ int buf_len, pad;
+
+ buf_len = rx_len + MWIFIEX_RX_HEADROOM + MWIFIEX_DMA_ALIGN_SZ;
+
+ skb = __dev_alloc_skb(buf_len, flags);
+
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MWIFIEX_RX_HEADROOM);
+
+ pad = MWIFIEX_ALIGN_ADDR(skb->data, MWIFIEX_DMA_ALIGN_SZ) -
+ (long)skb->data;
+
+ skb_reserve(skb, pad);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mwifiex_alloc_dma_align_buf);
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index ef717acec8b7..b2e99569a0f8 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -157,6 +157,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
ra_list->is_11n_enabled = 0;
ra_list->tdls_link = false;
+ ra_list->ba_status = BA_SETUP_NONE;
+ ra_list->amsdu_in_ampdu = false;
if (!mwifiex_queuing_ra_based(priv)) {
if (mwifiex_get_tdls_link_status(priv, ra) ==
TDLS_SETUP_COMPLETE) {
@@ -574,7 +576,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
* This function retrieves a particular RA list node, matching with the
* given TID and RA address.
*/
-static struct mwifiex_ra_list_tbl *
+struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
const u8 *ra_addr)
{
@@ -730,7 +732,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
} else {
memcpy(ra, skb->data, ETH_ALEN);
if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
- memset(ra, 0xff, ETH_ALEN);
+ eth_broadcast_addr(ra);
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
}
@@ -942,14 +944,11 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
struct mwifiex_ra_list_tbl *ptr;
struct mwifiex_tid_tbl *tid_ptr;
atomic_t *hqp;
- unsigned long flags_bss, flags_ra;
+ unsigned long flags_ra;
int i, j;
/* check the BSS with highest priority first */
for (j = adapter->priv_num - 1; j >= 0; --j) {
- spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
- flags_bss);
-
/* iterate over BSS with the equal priority */
list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
&adapter->bss_prio_tbl[j].bss_prio_head,
@@ -985,19 +984,15 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
}
}
- spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
- flags_bss);
}
return NULL;
found:
- /* holds bss_prio_lock / ra_list_spinlock */
+ /* holds ra_list_spinlock */
if (atomic_read(hqp) > i)
atomic_set(hqp, i);
spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
- spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
- flags_bss);
*priv = priv_tmp;
*tid = tos_to_tid[i];
@@ -1179,6 +1174,14 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
skb = skb_dequeue(&ptr->skb_head);
+ if (adapter->data_sent || adapter->tx_lock_flag) {
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ ra_list_flags);
+ skb_queue_tail(&adapter->tx_data_q, skb);
+ atomic_inc(&adapter->tx_queued);
+ return;
+ }
+
if (!skb_queue_empty(&ptr->skb_head))
skb_next = skb_peek(&ptr->skb_head);
else
@@ -1276,13 +1279,13 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
}
if (!ptr->is_11n_enabled ||
- mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
- priv->wps.session_enable) {
+ ptr->ba_status ||
+ priv->wps.session_enable) {
if (ptr->is_11n_enabled &&
- mwifiex_is_ba_stream_setup(priv, ptr, tid) &&
- mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) &&
- mwifiex_is_amsdu_allowed(priv, tid) &&
- mwifiex_is_11n_aggragation_possible(priv, ptr,
+ ptr->ba_status &&
+ ptr->amsdu_in_ampdu &&
+ mwifiex_is_amsdu_allowed(priv, tid) &&
+ mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
/* ra_list_spinlock has been freed in
@@ -1329,11 +1332,16 @@ void
mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
{
do {
- /* Check if busy */
- if (adapter->data_sent || adapter->tx_lock_flag)
- break;
-
if (mwifiex_dequeue_tx_packet(adapter))
break;
+ if (adapter->iface_type != MWIFIEX_SDIO) {
+ if (adapter->data_sent ||
+ adapter->tx_lock_flag)
+ break;
+ } else {
+ if (atomic_read(&adapter->tx_queued) >=
+ MWIFIEX_MAX_PKTS_TXQ)
+ break;
+ }
} while (!mwifiex_wmm_lists_empty(adapter));
}
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 569bd73f33c5..48ece0b35591 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -127,4 +127,6 @@ mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
const u8 *ra_addr);
u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
+struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
+ *priv, u8 tid, const u8 *ra_addr);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index f9b1218c761a..95921167b53f 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1277,7 +1277,7 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
struct mwl8k_priv *priv = hw->priv;
priv->capture_beacon = false;
- memset(priv->capture_bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->capture_bssid);
/*
* Use GFP_ATOMIC as rxq_process is called from
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index 6d831d4d1b5f..f6fa3f4e294f 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -2,7 +2,7 @@ config HERMES
tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
depends on (PPC_PMAC || PCI || PCMCIA)
depends on CFG80211
- select CFG80211_WEXT
+ select CFG80211_WEXT_EXPORT
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c
index 0ca8b1455cd9..77e6c53040a3 100644
--- a/drivers/net/wireless/orinoco/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -228,7 +228,7 @@ MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("Driver for the Apple Airport wireless card.");
MODULE_LICENSE("Dual MPL/GPL");
-static struct of_device_id airport_match[] = {
+static const struct of_device_id airport_match[] = {
{
.name = "radio",
},
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 6abdaf0aa052..1d4dae422106 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -168,7 +168,7 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
if (is_zero_ether_addr(ap_addr->sa_data) ||
is_broadcast_ether_addr(ap_addr->sa_data)) {
priv->bssid_fixed = 0;
- memset(priv->desired_bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->desired_bssid);
/* "off" means keep existing connection */
if (ap_addr->sa_data[0] == 0) {
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 5367d510b22d..275408eaf95e 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -671,7 +671,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len,
if (addr)
memcpy(rxkey->mac, addr, ETH_ALEN);
else
- memset(rxkey->mac, ~0, ETH_ALEN);
+ eth_broadcast_addr(rxkey->mac);
switch (algo) {
case P54_CRYPTO_WEP:
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index b9250d75d253..e79674f73dc5 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -182,7 +182,7 @@ static int p54_start(struct ieee80211_hw *dev)
if (err)
goto out;
- memset(priv->bssid, ~0, ETH_ALEN);
+ eth_broadcast_addr(priv->bssid);
priv->mode = NL80211_IFTYPE_MONITOR;
err = p54_setup_mac(priv);
if (err) {
@@ -274,8 +274,8 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ);
}
priv->mode = NL80211_IFTYPE_MONITOR;
- memset(priv->mac_addr, 0, ETH_ALEN);
- memset(priv->bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->mac_addr);
+ eth_zero_addr(priv->bssid);
p54_setup_mac(priv);
mutex_unlock(&priv->conf_mutex);
}
@@ -794,7 +794,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
init_completion(&priv->beacon_comp);
INIT_DELAYED_WORK(&priv->work, p54_work);
- memset(&priv->mc_maclist[0], ~0, ETH_ALEN);
+ eth_broadcast_addr(priv->mc_maclist[0]);
priv->curchan = NULL;
p54_reset_stats(priv);
return dev;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 8330fa33e50b..477f86354dc5 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -808,7 +808,7 @@ static int ray_dev_init(struct net_device *dev)
/* copy mac and broadcast addresses to linux device */
memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
- memset(dev->broadcast, 0xff, ETH_ALEN);
+ eth_broadcast_addr(dev->broadcast);
dev_dbg(&link->dev, "ray_dev_init ending\n");
return 0;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 60d44ce9c017..d72ff8e7125d 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -199,13 +199,13 @@ enum ndis_80211_pmkid_cand_list_flag_bits {
struct ndis_80211_auth_request {
__le32 length;
- u8 bssid[6];
+ u8 bssid[ETH_ALEN];
u8 padding[2];
__le32 flags;
} __packed;
struct ndis_80211_pmkid_candidate {
- u8 bssid[6];
+ u8 bssid[ETH_ALEN];
u8 padding[2];
__le32 flags;
} __packed;
@@ -248,7 +248,7 @@ struct ndis_80211_conf {
struct ndis_80211_bssid_ex {
__le32 length;
- u8 mac[6];
+ u8 mac[ETH_ALEN];
u8 padding[2];
struct ndis_80211_ssid ssid;
__le32 privacy;
@@ -283,7 +283,7 @@ struct ndis_80211_key {
__le32 size;
__le32 index;
__le32 length;
- u8 bssid[6];
+ u8 bssid[ETH_ALEN];
u8 padding[6];
u8 rsc[8];
u8 material[32];
@@ -292,7 +292,7 @@ struct ndis_80211_key {
struct ndis_80211_remove_key {
__le32 size;
__le32 index;
- u8 bssid[6];
+ u8 bssid[ETH_ALEN];
u8 padding[2];
} __packed;
@@ -310,7 +310,7 @@ struct ndis_80211_assoc_info {
struct req_ie {
__le16 capa;
__le16 listen_interval;
- u8 cur_ap_address[6];
+ u8 cur_ap_address[ETH_ALEN];
} req_ie;
__le32 req_ie_length;
__le32 offset_req_ies;
@@ -338,7 +338,7 @@ struct ndis_80211_capability {
} __packed;
struct ndis_80211_bssid_info {
- u8 bssid[6];
+ u8 bssid[ETH_ALEN];
u8 pmkid[16];
} __packed;
@@ -1037,7 +1037,7 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
bssid, &len);
if (ret != 0)
- memset(bssid, 0, ETH_ALEN);
+ eth_zero_addr(bssid);
return ret;
}
@@ -1391,7 +1391,7 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
priv->encr_keys[index].len = key_len;
priv->encr_keys[index].cipher = cipher;
memcpy(&priv->encr_keys[index].material, key, key_len);
- memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->encr_keys[index].bssid);
return 0;
}
@@ -1466,7 +1466,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
} else {
/* group key */
if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
- memset(ndis_key.bssid, 0xff, ETH_ALEN);
+ eth_broadcast_addr(ndis_key.bssid);
else
get_bssid(usbdev, ndis_key.bssid);
}
@@ -1486,7 +1486,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY)
memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN);
else
- memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN);
+ eth_broadcast_addr(priv->encr_keys[index].bssid);
if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY)
priv->encr_tx_key_index = index;
@@ -2280,7 +2280,7 @@ static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev,
netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code);
priv->connected = false;
- memset(priv->bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->bssid);
return deauthenticate(usbdev);
}
@@ -2392,7 +2392,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n");
priv->connected = false;
- memset(priv->bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->bssid);
return deauthenticate(usbdev);
}
@@ -2857,7 +2857,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
if (priv->connected) {
priv->connected = false;
- memset(priv->bssid, 0, ETH_ALEN);
+ eth_zero_addr(priv->bssid);
deauthenticate(usbdev);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 8444313eabe2..6ec2466b52b6 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -233,6 +233,7 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
{
__le32 *reg;
u32 fw_mode;
+ int ret;
reg = kmalloc(sizeof(*reg), GFP_KERNEL);
if (reg == NULL)
@@ -242,11 +243,14 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
* magic value USB_MODE_AUTORUN (0x11) to the device, thus the
* returned value would be invalid.
*/
- rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
- USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
- reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
+ ret = rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
+ USB_VENDOR_REQUEST_IN, 0,
+ USB_MODE_AUTORUN, reg, sizeof(*reg),
+ REGISTER_TIMEOUT_FIRMWARE);
fw_mode = le32_to_cpu(*reg);
kfree(reg);
+ if (ret < 0)
+ return ret;
if ((fw_mode & 0x00000003) == 2)
return 1;
@@ -289,6 +293,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
if (retval) {
rt2x00_info(rt2x00dev,
"Firmware loading not required - NIC in AutoRun mode\n");
+ __clear_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
} else {
rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
data + offset, length);
@@ -374,7 +379,6 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
{
rt2800_disable_radio(rt2x00dev);
- rt2x00usb_disable_radio(rt2x00dev);
}
static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
@@ -1040,6 +1044,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c17) },
{ USB_DEVICE(0x2001, 0x3317) },
{ USB_DEVICE(0x2001, 0x3c1b) },
+ { USB_DEVICE(0x2001, 0x3c25) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
/* DVICO */
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 8f85fbd5f237..569363da00a2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -199,7 +199,7 @@ static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u32 *value)
{
- __le32 reg;
+ __le32 reg = 0;
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
&reg, sizeof(reg));
@@ -219,7 +219,7 @@ static inline void rt2x00usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u32 *value)
{
- __le32 reg;
+ __le32 reg = 0;
rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
&reg, sizeof(reg), REGISTER_TIMEOUT);
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 074f716020aa..01f56c7df8b5 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1315,7 +1315,8 @@ static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc)
}
/*should call before software enc*/
-u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
+ bool is_enc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@@ -1344,7 +1345,9 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
break;
}
- offset = mac_hdr_len + SNAP_SIZE + encrypt_header_len;
+ offset = mac_hdr_len + SNAP_SIZE;
+ if (is_enc)
+ offset += encrypt_header_len;
ether_type = be16_to_cpup((__be16 *)(skb->data + offset));
if (ETH_P_IP == ether_type) {
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index c6cb49c3ee32..74233d601a90 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -45,9 +45,6 @@ enum ap_peer {
#define RTL_TX_DESC_SIZE 32
#define RTL_TX_HEADER_SIZE (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE)
-#define HT_AMSDU_SIZE_4K 3839
-#define HT_AMSDU_SIZE_8K 7935
-
#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
@@ -61,9 +58,6 @@ enum ap_peer {
#define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS9 390 /* Mbps */
#define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS7 293 /* Mbps */
-#define RTL_RATE_COUNT_LEGACY 12
-#define RTL_CHANNEL_COUNT 14
-
#define FRAME_OFFSET_FRAME_CONTROL 0
#define FRAME_OFFSET_DURATION 2
#define FRAME_OFFSET_ADDRESS1 4
@@ -126,10 +120,10 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
bool isvht, u8 desc_rate);
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
-u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
+ bool is_enc);
void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_watch_dog_timer_callback(unsigned long data);
int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
index 35508087c0c5..e2e647d511c1 100644
--- a/drivers/net/wireless/rtlwifi/cam.h
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -28,13 +28,11 @@
#define CAM_CONTENT_COUNT 8
-#define CFG_DEFAULT_KEY BIT(5)
#define CFG_VALID BIT(15)
#define PAIRWISE_KEYIDX 0
#define CAM_PAIRWISE_KEY_POSITION 4
-#define CAM_CONFIG_USEDK 1
#define CAM_CONFIG_NO_USEDK 0
void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index a31a12775f1a..3b3a88b53b11 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -195,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
if (!(support_remote_wakeup &&
rtlhal->enter_pnp_sleep)) {
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ eth_zero_addr(mac->bssid);
mac->vendor = PEER_UNKNOWN;
/* reset sec info */
@@ -357,7 +357,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mac->p2p = 0;
mac->vif = NULL;
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, ETH_ALEN);
+ eth_zero_addr(mac->bssid);
mac->vendor = PEER_UNKNOWN;
mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -1157,7 +1157,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, ETH_ALEN);
+ eth_zero_addr(mac->bssid);
mac->vendor = PEER_UNKNOWN;
mac->mode = 0;
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 7b64e34f421e..82733c6b8c46 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -33,8 +33,6 @@
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC)
-#define RTL_SUPPORTED_CTRL_FILTER 0xFF
-
#define DM_DIG_THRESH_HIGH 40
#define DM_DIG_THRESH_LOW 35
#define DM_FALSEALARM_THRESH_LOW 400
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index fdab8240a5d7..be02e7894c61 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -40,12 +40,6 @@
#define PG_STATE_WORD_3 0x10
#define PG_STATE_DATA 0x20
-#define PG_SWBYTE_H 0x01
-#define PG_SWBYTE_L 0x02
-
-#define _POWERON_DELAY_
-#define _PRE_EXECUTE_READ_CMD_
-
#define EFUSE_REPEAT_THRESHOLD_ 3
#define EFUSE_ERROE_HANDLE 1
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 8c45cf44ce24..f46c9d7f6528 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -887,7 +887,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
unicast = true;
rtlpriv->stats.rxbytesunicast += skb->len;
}
- rtl_is_special_data(hw, skb, false);
+ rtl_is_special_data(hw, skb, false, true);
if (ieee80211_is_data(fc)) {
rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index 7863bd278b22..74c14ce28238 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -56,7 +56,8 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
wireless_mode = sta_entry->wireless_mode;
}
- if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true) || not_data) {
+ if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true, false) ||
+ not_data) {
return 0;
} else {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
@@ -201,7 +202,7 @@ static void rtl_tx_status(void *ppriv,
if (!priv_sta || !ieee80211_is_data(fc))
return;
- if (rtl_is_special_data(mac->hw, skb, true))
+ if (rtl_is_special_data(mac->hw, skb, true, true))
return;
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/rtlwifi/rtl8188ee/def.h
index d9ea9d0c79a5..0532b9852444 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/def.h
@@ -26,53 +26,12 @@
#ifndef __RTL92C_DEF_H__
#define __RTL92C_DEF_H__
-#define HAL_RETRY_LIMIT_INFRA 48
-#define HAL_RETRY_LIMIT_AP_ADHOC 7
-
-#define RESET_DELAY_8185 20
-
-#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE 10
-#define NUM_OF_PAGES_IN_FW 0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
-
-#define MAX_LINES_HWCONFIG_TXT 1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT 256
-
-#define SW_THREE_WIRE 0
-#define HW_THREE_WIRE 2
-
-#define BT_DEMO_BOARD 0
-#define BT_QA_BOARD 1
-#define BT_FPGA 2
-
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-#define MAX_H2C_QUEUE_NUM 10
-
#define RX_MPDU_QUEUE 0
#define RX_CMD_QUEUE 1
-#define RX_MAX_QUEUE 2
-#define AC2QUEUEID(_AC) (_AC)
#define C2H_RX_CMD_HDR_LEN 8
#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index f2b9713c456e..86ce5b1930e6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -30,6 +30,7 @@
#include "../cam.h"
#include "../ps.h"
#include "../pci.h"
+#include "../pwrseqcmd.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -566,7 +567,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VIQEN);
break;
case AC3_VO:
- acm_ctrl &= (~ACMHW_BEQEN);
+ acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -885,7 +886,7 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_CR, 0x2ff);
rtl_write_byte(rtlpriv, REG_CR+1, 0x06);
- rtl_write_byte(rtlpriv, REG_CR+2, 0x00);
+ rtl_write_byte(rtlpriv, MSR, 0x00);
if (!rtlhal->mac_func_enable) {
if (_rtl88ee_llt_table_init(hw) == false) {
@@ -1277,7 +1278,7 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
mode);
}
- rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+ rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
rtlpriv->cfg->ops->led_control(hw, ledaction);
if (mode == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index 3f6c59cdeaba..a2bb02c7b837 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -452,9 +452,10 @@ static void handle_branch1(struct ieee80211_hw *hw, u16 arraylen,
READ_NEXT_PAIR(v1, v2, i);
while (v2 != 0xDEAD &&
v2 != 0xCDEF &&
- v2 != 0xCDCD && i < arraylen - 2)
+ v2 != 0xCDCD && i < arraylen - 2) {
_rtl8188e_config_bb_reg(hw, v1, v2);
READ_NEXT_PAIR(v1, v2, i);
+ }
while (v2 != 0xDEAD && i < arraylen - 2)
READ_NEXT_PAIR(v1, v2, i);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
index 5c1472d88fd4..0eca030e3238 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
@@ -27,7 +27,6 @@
#define __RTL92C_RF_H__
#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
u8 bandwidth);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 9b660df6fd71..690a7a1675e2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -30,59 +30,18 @@
#ifndef __RTL92C_DEF_H__
#define __RTL92C_DEF_H__
-#define HAL_RETRY_LIMIT_INFRA 48
-#define HAL_RETRY_LIMIT_AP_ADHOC 7
-
#define PHY_RSSI_SLID_WIN_MAX 100
#define PHY_LINKQUALITY_SLID_WIN_MAX 20
#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
-#define RESET_DELAY_8185 20
-
-#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE 10
-#define NUM_OF_PAGES_IN_FW 0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
-
-#define MAX_LINES_HWCONFIG_TXT 1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT 256
-
-#define SW_THREE_WIRE 0
-#define HW_THREE_WIRE 2
-
-#define BT_DEMO_BOARD 0
-#define BT_QA_BOARD 1
-#define BT_FPGA 2
-
#define RX_SMOOTH_FACTOR 20
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-#define MAX_H2C_QUEUE_NUM 10
-
#define RX_MPDU_QUEUE 0
#define RX_CMD_QUEUE 1
-#define RX_MAX_QUEUE 2
-#define AC2QUEUEID(_AC) (_AC)
#define C2H_RX_CMD_HDR_LEN 8
#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 303b299376c9..04eb5c3f8464 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -363,7 +363,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~AcmHw_ViqEn);
break;
case AC3_VO:
- acm_ctrl &= (~AcmHw_BeqEn);
+ acm_ctrl &= (~AcmHw_VoqEn);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index d8fe68b389d2..ebd72cae10b6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -31,7 +31,6 @@
#define __RTL92C_RF_H__
#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index fe4b699a12f5..d310d55d800e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1364,7 +1364,7 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
"Network type %d not supported!\n", type);
goto error_out;
}
- rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtl_write_byte(rtlpriv, MSR, bt_msr);
rtlpriv->cfg->ops->led_control(hw, ledaction);
if ((bt_msr & MSR_MASK) == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
@@ -1471,8 +1471,7 @@ static void _InitBeaconParameters(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
}
-static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable,
- bool Linked)
+static void _beacon_function_enable(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1517,7 +1516,7 @@ void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
}
- _beacon_function_enable(hw, true, true);
+ _beacon_function_enable(hw);
}
void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
@@ -1589,6 +1588,8 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_DATA_FILTER:
*((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
@@ -1871,7 +1872,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~AcmHw_ViqEn);
break;
case AC3_VO:
- acm_ctrl &= (~AcmHw_BeqEn);
+ acm_ctrl &= (~AcmHw_VoqEn);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index c1e33b0228c0..67588083e6cc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -32,8 +32,6 @@
#define H2C_RA_MASK 6
-#define LLT_POLLING_LLT_THRESHOLD 20
-#define LLT_POLLING_READY_TIMEOUT_COUNT 100
#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
#define RX_PAGE_SIZE_REG_VALUE PBP_128
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 133e395b7401..adb810794eef 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -497,7 +497,7 @@ int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
"Network type %d not supported!\n", type);
return -EOPNOTSUPP;
}
- rtl_write_byte(rtlpriv, (REG_CR + 2), value);
+ rtl_write_byte(rtlpriv, MSR, value);
return 0;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
index 11b439d6b671..6f987de5b441 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -31,7 +31,6 @@
#define __RTL92CU_RF_H__
#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 90a714c189a8..23806c243a53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
{RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
index 939c905f547f..0a443ed17cf4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
@@ -35,61 +35,22 @@
#define MAX_MSS_DENSITY_1T 0x0A
#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
-#define HAL_RETRY_LIMIT_INFRA 48
-#define HAL_RETRY_LIMIT_AP_ADHOC 7
-
#define PHY_RSSI_SLID_WIN_MAX 100
#define PHY_LINKQUALITY_SLID_WIN_MAX 20
#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
-#define RESET_DELAY_8185 20
-
-#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-#define NUM_OF_FIRMWARE_QUEUE 10
-#define NUM_OF_PAGES_IN_FW 0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
-
-#define MAX_LINES_HWCONFIG_TXT 1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT 256
-
-#define SW_THREE_WIRE 0
-#define HW_THREE_WIRE 2
-
-#define BT_DEMO_BOARD 0
-#define BT_QA_BOARD 1
-#define BT_FPGA 2
-
#define RX_SMOOTH_FACTOR 20
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-#define MAX_H2C_QUEUE_NUM 10
-
#define RX_MPDU_QUEUE 0
#define RX_CMD_QUEUE 1
-#define RX_MAX_QUEUE 2
#define C2H_RX_CMD_HDR_LEN 8
#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 01bcc2d218dc..f49b60d31450 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1126,7 +1126,7 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
break;
}
- rtl_write_byte(rtlpriv, REG_CR + 2, bt_msr);
+ rtl_write_byte(rtlpriv, MSR, bt_msr);
rtlpriv->cfg->ops->led_control(hw, ledaction);
if ((bt_msr & MSR_MASK) == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
index b461b3128da5..da0a6125f314 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
@@ -562,7 +562,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VIQEN);
break;
case AC3_VO:
- acm_ctrl &= (~ACMHW_BEQEN);
+ acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
@@ -1510,7 +1510,7 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
mode);
}
- rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+ rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
rtlpriv->cfg->ops->led_control(hw, ledaction);
if (mode == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h
index 8bdeed3c064e..039c0133ad6b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h
@@ -27,7 +27,6 @@
#define __RTL92E_RF_H__
#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
u8 bandwidth);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index ef87c09b77d0..41466f957cdc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -31,7 +31,6 @@
#define RX_MPDU_QUEUE 0
#define RX_CMD_QUEUE 1
-#define RX_MAX_QUEUE 2
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 5761d5b49e39..12b0978ba4fa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -293,7 +293,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~AcmHw_ViqEn);
break;
case AC3_VO:
- acm_ctrl &= (~AcmHw_BeqEn);
+ acm_ctrl &= (~AcmHw_VoqEn);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -1204,7 +1204,7 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
if (type != NL80211_IFTYPE_AP &&
rtlpriv->mac80211.link_state < MAC80211_LINKED)
bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK;
- rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtl_write_byte(rtlpriv, MSR, bt_msr);
temp = rtl_read_dword(rtlpriv, TCR);
rtl_write_dword(rtlpriv, TCR, temp & (~BIT(8)));
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
index 94bdd4bbca5d..bcdf2273688e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
@@ -26,53 +26,12 @@
#ifndef __RTL8723E_DEF_H__
#define __RTL8723E_DEF_H__
-#define HAL_RETRY_LIMIT_INFRA 48
-#define HAL_RETRY_LIMIT_AP_ADHOC 7
-
-#define RESET_DELAY_8185 20
-
-#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE 10
-#define NUM_OF_PAGES_IN_FW 0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
-
-#define MAX_LINES_HWCONFIG_TXT 1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT 256
-
-#define SW_THREE_WIRE 0
-#define HW_THREE_WIRE 2
-
-#define BT_DEMO_BOARD 0
-#define BT_QA_BOARD 1
-#define BT_FPGA 2
-
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-#define MAX_H2C_QUEUE_NUM 10
-
#define RX_MPDU_QUEUE 0
#define RX_CMD_QUEUE 1
-#define RX_MAX_QUEUE 2
-#define AC2QUEUEID(_AC) (_AC)
#define C2H_RX_CMD_HDR_LEN 8
#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index aa085462d0e9..67bb47d77b68 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -362,7 +362,7 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VIQEN);
break;
case AC3_VO:
- acm_ctrl &= (~ACMHW_BEQEN);
+ acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
@@ -1183,7 +1183,7 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
mode);
}
- rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+ rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
rtlpriv->cfg->ops->led_control(hw, ledaction);
if (mode == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
index f3f45b16361f..7b44ebc0fac9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
@@ -27,7 +27,6 @@
#define __RTL8723E_RF_H__
#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
u8 bandwidth);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
index 2367e8f47a5b..e77c3a46c94a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
@@ -309,7 +309,7 @@ static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw)
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnet PWDB = 0x%x\n",
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
rtl_dm_dig->min_undec_pwdb_for_dm);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
index 6dad28e77bbb..b681af3c7a35 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -603,7 +603,7 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VIQEN);
break;
case AC3_VO:
- acm_ctrl &= (~ACMHW_BEQEN);
+ acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
@@ -1558,7 +1558,7 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
mode);
}
- rtl_write_byte(rtlpriv, (MSR), bt_msr | mode);
+ rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
rtlpriv->cfg->ops->led_control(hw, ledaction);
if (mode == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
index a6fea106ced4..f423e157020f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
@@ -27,7 +27,6 @@
#define __RTL8723BE_RF_H__
#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
u8 bandwidth);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
index ee7c208bd070..dfbdf539de1a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
@@ -118,55 +118,14 @@
#define WIFI_NAV_UPPER_US 30000
#define HAL_92C_NAV_UPPER_UNIT 128
-#define HAL_RETRY_LIMIT_INFRA 48
-#define HAL_RETRY_LIMIT_AP_ADHOC 7
-
-#define RESET_DELAY_8185 20
-
-#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
-#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
-
-#define NUM_OF_FIRMWARE_QUEUE 10
-#define NUM_OF_PAGES_IN_FW 0x100
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
-#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
-#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
-
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
-
#define MAX_RX_DMA_BUFFER_SIZE 0x3E80
-#define MAX_LINES_HWCONFIG_TXT 1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT 256
-
-#define SW_THREE_WIRE 0
-#define HW_THREE_WIRE 2
-
-#define BT_DEMO_BOARD 0
-#define BT_QA_BOARD 1
-#define BT_FPGA 2
-
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-#define MAX_H2C_QUEUE_NUM 10
-
#define RX_MPDU_QUEUE 0
#define RX_CMD_QUEUE 1
-#define RX_MAX_QUEUE 2
-#define AC2QUEUEID(_AC) (_AC)
#define MAX_RX_DMA_BUFFER_SIZE_8812 0x3E80
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
index 0b2082dc48f1..342678d2ed42 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
@@ -873,7 +873,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Abnornally false alarm case.\n");
+ "Abnormally false alarm case.\n");
if (dm_digtable->large_fa_hit != 3)
dm_digtable->large_fa_hit++;
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 8ec8200002c7..8704eee9f3a4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -423,7 +423,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4);
break;
case HW_VAR_MEDIA_STATUS:
- val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3;
+ val[0] = rtl_read_byte(rtlpriv, MSR) & 0x3;
break;
case HW_VAR_SLOT_TIME:
*((u8 *)(val)) = mac->slot_time;
@@ -667,7 +667,7 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VIQEN);
break;
case AC3_VO:
- acm_ctrl &= (~ACMHW_BEQEN);
+ acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
@@ -1515,7 +1515,7 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
(u8 *)(&support_remote_wakeup));
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "boundary=0x%#X, NPQ_RQPNValue=0x%#X, RQPNValue=0x%#X\n",
+ "boundary=%#X, NPQ_RQPNValue=%#X, RQPNValue=%#X\n",
boundary, npq_rqpn_value, rqpn_val);
/* stop PCIe DMA
@@ -2178,7 +2178,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
return 1;
}
- rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtl_write_byte(rtlpriv, MSR, bt_msr);
rtlpriv->cfg->ops->led_control(hw, ledaction);
if ((bt_msr & 0xfc) == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h
index d9582ee1c335..efd22bd0b139 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h
@@ -27,7 +27,6 @@
#define __RTL8821AE_RF_H__
#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
u8 bandwidth);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
index 72af4b9ee32b..174743aef943 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
@@ -64,6 +64,20 @@ static u16 odm_cfo(char value)
return ret_val;
}
+static u8 _rtl8821ae_evm_dbm_jaguar(char value)
+{
+ char ret_val = value;
+
+ /* -33dB~0dB to 33dB ~ 0dB*/
+ if (ret_val == -128)
+ ret_val = 127;
+ else if (ret_val < 0)
+ ret_val = 0 - ret_val;
+
+ ret_val = ret_val >> 1;
+ return ret_val;
+}
+
static void query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstatus, u8 *pdesc,
struct rx_fwinfo_8821ae *p_drvinfo,
@@ -246,7 +260,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
for (i = 0; i < max_spatial_stream; i++) {
evm = rtl_evm_db_to_percentage(p_phystrpt->rxevm[i]);
- evmdbm = rtl_evm_dbm_jaguar(p_phystrpt->rxevm[i]);
+ evmdbm = _rtl8821ae_evm_dbm_jaguar(p_phystrpt->rxevm[i]);
if (bpacket_match_bssid) {
/* Fill value in RFD, Get the first
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/rtlwifi/stats.c
index 2d0736a09fc0..d8b30690b00d 100644
--- a/drivers/net/wireless/rtlwifi/stats.c
+++ b/drivers/net/wireless/rtlwifi/stats.c
@@ -39,15 +39,8 @@ EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
u8 rtl_evm_db_to_percentage(char value)
{
- char ret_val;
- ret_val = value;
+ char ret_val = clamp(-value, 0, 33) * 3;
- if (ret_val >= 0)
- ret_val = 0;
- if (ret_val <= -33)
- ret_val = -33;
- ret_val = 0 - ret_val;
- ret_val *= 3;
if (ret_val == 99)
ret_val = 100;
@@ -55,21 +48,6 @@ u8 rtl_evm_db_to_percentage(char value)
}
EXPORT_SYMBOL(rtl_evm_db_to_percentage);
-u8 rtl_evm_dbm_jaguar(char value)
-{
- char ret_val = value;
-
- /* -33dB~0dB to 33dB ~ 0dB*/
- if (ret_val == -128)
- ret_val = 127;
- else if (ret_val < 0)
- ret_val = 0 - ret_val;
-
- ret_val = ret_val >> 1;
- return ret_val;
-}
-EXPORT_SYMBOL(rtl_evm_dbm_jaguar);
-
static long rtl_translate_todbm(struct ieee80211_hw *hw,
u8 signal_strength_index)
{
diff --git a/drivers/net/wireless/rtlwifi/stats.h b/drivers/net/wireless/rtlwifi/stats.h
index aa4eec80ccf7..2b57dffef572 100644
--- a/drivers/net/wireless/rtlwifi/stats.h
+++ b/drivers/net/wireless/rtlwifi/stats.h
@@ -35,7 +35,6 @@
u8 rtl_query_rxpwrpercentage(char antpower);
u8 rtl_evm_db_to_percentage(char value);
-u8 rtl_evm_dbm_jaguar(char value);
long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
struct rtl_stats *pstatus);
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 46ee956d0235..2721cf89fb16 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
do {
status = usb_control_msg(udev, pipe, request, reqtype, value,
- index, pdata, len, 0); /*max. timeout*/
+ index, pdata, len, 1000);
if (status < 0) {
/* firmware download is checksumed, don't retry */
if ((value >= FW_8192C_START_ADDRESS &&
@@ -701,12 +701,18 @@ free:
static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
struct urb *urb;
usb_kill_anchored_urbs(&rtlusb->rx_submitted);
tasklet_kill(&rtlusb->rx_work_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_change_work);
+
+ flush_workqueue(rtlpriv->works.rtl_wq);
+ destroy_workqueue(rtlpriv->works.rtl_wq);
+
skb_queue_purge(&rtlusb->rx_queue);
while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
@@ -794,8 +800,6 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw)
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
struct ieee80211_tx_info *txinfo;
- SET_USB_STOP(rtlusb);
-
/* clean up rx stuff. */
_rtl_usb_cleanup_rx(hw);
@@ -834,7 +838,6 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
/* Enable software */
SET_USB_STOP(rtlusb);
- rtl_usb_deinit(hw);
rtlpriv->cfg->ops->hw_disable(hw);
}
@@ -1147,9 +1150,9 @@ void rtl_usb_disconnect(struct usb_interface *intf)
if (unlikely(!rtlpriv))
return;
-
/* just in case driver is removed before firmware callback */
wait_for_completion(&rtlpriv->firmware_loading_complete);
+ clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
/*ieee80211_unregister_hw will call ops_stop */
if (rtlmac->mac80211_registered == 1) {
ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/ti/wilink_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c
index a92bd3e89796..ea0e359bdb43 100644
--- a/drivers/net/wireless/ti/wilink_platform_data.c
+++ b/drivers/net/wireless/ti/wilink_platform_data.c
@@ -23,31 +23,6 @@
#include <linux/err.h>
#include <linux/wl12xx.h>
-static struct wl12xx_platform_data *wl12xx_platform_data;
-
-int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
-{
- if (wl12xx_platform_data)
- return -EBUSY;
- if (!data)
- return -EINVAL;
-
- wl12xx_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
- if (!wl12xx_platform_data)
- return -ENOMEM;
-
- return 0;
-}
-
-struct wl12xx_platform_data *wl12xx_get_platform_data(void)
-{
- if (!wl12xx_platform_data)
- return ERR_PTR(-ENODEV);
-
- return wl12xx_platform_data;
-}
-EXPORT_SYMBOL(wl12xx_get_platform_data);
-
static struct wl1251_platform_data *wl1251_platform_data;
int __init wl1251_set_platform_data(const struct wl1251_platform_data *data)
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index d4ba009ac9aa..5d54d16a59e7 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -468,7 +468,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
wl1251_tx_flush(wl);
wl1251_power_off(wl);
- memset(wl->bssid, 0, ETH_ALEN);
+ eth_zero_addr(wl->bssid);
wl->listen_int = 1;
wl->bss_type = MAX_BSS_TYPE;
@@ -547,7 +547,7 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
wl->vif = NULL;
- memset(wl->bssid, 0, ETH_ALEN);
+ eth_zero_addr(wl->bssid);
mutex_unlock(&wl->mutex);
}
@@ -1608,7 +1608,7 @@ int wl1251_free_hw(struct wl1251 *wl)
}
EXPORT_SYMBOL_GPL(wl1251_free_hw);
-MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
+MODULE_DESCRIPTION("TI wl1251 Wireless LAN Driver Core");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_FIRMWARE(WL1251_FW_NAME);
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 144d1f8ba473..af0fe2e17151 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -24,8 +24,6 @@
#include <linux/err.h>
-#include <linux/wl12xx.h>
-
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
#include "../wlcore/io.h"
@@ -1770,11 +1768,44 @@ wl12xx_iface_combinations[] = {
},
};
+static const struct wl12xx_clock wl12xx_refclock_table[] = {
+ { 19200000, false, WL12XX_REFCLOCK_19 },
+ { 26000000, false, WL12XX_REFCLOCK_26 },
+ { 26000000, true, WL12XX_REFCLOCK_26_XTAL },
+ { 38400000, false, WL12XX_REFCLOCK_38 },
+ { 38400000, true, WL12XX_REFCLOCK_38_XTAL },
+ { 52000000, false, WL12XX_REFCLOCK_52 },
+ { 0, false, 0 }
+};
+
+static const struct wl12xx_clock wl12xx_tcxoclock_table[] = {
+ { 16368000, true, WL12XX_TCXOCLOCK_16_368 },
+ { 16800000, true, WL12XX_TCXOCLOCK_16_8 },
+ { 19200000, true, WL12XX_TCXOCLOCK_19_2 },
+ { 26000000, true, WL12XX_TCXOCLOCK_26 },
+ { 32736000, true, WL12XX_TCXOCLOCK_32_736 },
+ { 33600000, true, WL12XX_TCXOCLOCK_33_6 },
+ { 38400000, true, WL12XX_TCXOCLOCK_38_4 },
+ { 52000000, true, WL12XX_TCXOCLOCK_52 },
+ { 0, false, 0 }
+};
+
+static int wl12xx_get_clock_idx(const struct wl12xx_clock *table,
+ u32 freq, bool xtal)
+{
+ int i;
+
+ for (i = 0; table[i].freq != 0; i++)
+ if ((table[i].freq == freq) && (table[i].xtal == xtal))
+ return table[i].hw_idx;
+
+ return -EINVAL;
+}
+
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
- struct wl12xx_platform_data *pdata = pdev_data->pdata;
BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS);
BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS);
@@ -1799,7 +1830,17 @@ static int wl12xx_setup(struct wl1271 *wl)
wl12xx_conf_init(wl);
if (!fref_param) {
- priv->ref_clock = pdata->board_ref_clock;
+ priv->ref_clock = wl12xx_get_clock_idx(wl12xx_refclock_table,
+ pdev_data->ref_clock_freq,
+ pdev_data->ref_clock_xtal);
+ if (priv->ref_clock < 0) {
+ wl1271_error("Invalid ref_clock frequency (%d Hz, %s)",
+ pdev_data->ref_clock_freq,
+ pdev_data->ref_clock_xtal ?
+ "XTAL" : "not XTAL");
+
+ return priv->ref_clock;
+ }
} else {
if (!strcmp(fref_param, "19.2"))
priv->ref_clock = WL12XX_REFCLOCK_19;
@@ -1817,9 +1858,17 @@ static int wl12xx_setup(struct wl1271 *wl)
wl1271_error("Invalid fref parameter %s", fref_param);
}
- if (!tcxo_param) {
- priv->tcxo_clock = pdata->board_tcxo_clock;
- } else {
+ if (!tcxo_param && pdev_data->tcxo_clock_freq) {
+ priv->tcxo_clock = wl12xx_get_clock_idx(wl12xx_tcxoclock_table,
+ pdev_data->tcxo_clock_freq,
+ true);
+ if (priv->tcxo_clock < 0) {
+ wl1271_error("Invalid tcxo_clock frequency (%d Hz)",
+ pdev_data->tcxo_clock_freq);
+
+ return priv->tcxo_clock;
+ }
+ } else if (tcxo_param) {
if (!strcmp(tcxo_param, "19.2"))
priv->tcxo_clock = WL12XX_TCXOCLOCK_19_2;
else if (!strcmp(tcxo_param, "26"))
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 75c92658bfea..5952e99ace1b 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -82,6 +82,34 @@ struct wl12xx_priv {
struct wl127x_rx_mem_pool_addr *rx_mem_addr;
};
+/* Reference clock values */
+enum {
+ WL12XX_REFCLOCK_19 = 0, /* 19.2 MHz */
+ WL12XX_REFCLOCK_26 = 1, /* 26 MHz */
+ WL12XX_REFCLOCK_38 = 2, /* 38.4 MHz */
+ WL12XX_REFCLOCK_52 = 3, /* 52 MHz */
+ WL12XX_REFCLOCK_38_XTAL = 4, /* 38.4 MHz, XTAL */
+ WL12XX_REFCLOCK_26_XTAL = 5, /* 26 MHz, XTAL */
+};
+
+/* TCXO clock values */
+enum {
+ WL12XX_TCXOCLOCK_19_2 = 0, /* 19.2MHz */
+ WL12XX_TCXOCLOCK_26 = 1, /* 26 MHz */
+ WL12XX_TCXOCLOCK_38_4 = 2, /* 38.4MHz */
+ WL12XX_TCXOCLOCK_52 = 3, /* 52 MHz */
+ WL12XX_TCXOCLOCK_16_368 = 4, /* 16.368 MHz */
+ WL12XX_TCXOCLOCK_32_736 = 5, /* 32.736 MHz */
+ WL12XX_TCXOCLOCK_16_8 = 6, /* 16.8 MHz */
+ WL12XX_TCXOCLOCK_33_6 = 7, /* 33.6 MHz */
+};
+
+struct wl12xx_clock {
+ u32 freq;
+ bool xtal;
+ u8 hw_idx;
+};
+
struct wl12xx_fw_packet_counters {
/* Cumulative counter of released packets per AC */
u8 tx_released_pkts[NUM_TX_QUEUES];
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index c93fae95baac..5fbd2230f372 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index c28f06854195..548bb9e7e91e 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -77,7 +77,7 @@ static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
wl1271_debug(DEBUG_EVENT,
"SMART_CONFIG_SYNC_EVENT_ID, freq: %d (chan: %d band %d)",
freq, sync_channel, sync_band);
- skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, 20,
+ skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, NULL, 20,
WLCORE_VENDOR_EVENT_SC_SYNC,
GFP_KERNEL);
@@ -98,7 +98,7 @@ static int wlcore_smart_config_decode_event(struct wl1271 *wl,
wl1271_debug(DEBUG_EVENT, "SMART_CONFIG_DECODE_EVENT_ID");
wl1271_dump_ascii(DEBUG_EVENT, "SSID:", ssid, ssid_len);
- skb = cfg80211_vendor_event_alloc(wl->hw->wiphy,
+ skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, NULL,
ssid_len + pwd_len + 20,
WLCORE_VENDOR_EVENT_SC_DECODE,
GFP_KERNEL);
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 77752b03f189..19b7ec7b69c2 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -22,7 +22,6 @@
*/
#include <linux/slab.h>
-#include <linux/wl12xx.h>
#include <linux/export.h>
#include "debug.h"
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index c26fc2106e5b..68919f8d4310 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -367,7 +367,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
wl->links[*hlid].allocated_pkts = 0;
wl->links[*hlid].prev_freed_pkts = 0;
wl->links[*hlid].ba_bitmap = 0;
- memset(wl->links[*hlid].addr, 0, ETH_ALEN);
+ eth_zero_addr(wl->links[*hlid].addr);
/*
* At this point op_tx() will not add more packets to the queues. We
@@ -1293,7 +1293,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
hdr->frame_control = cpu_to_le16(fc);
memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
memcpy(hdr->addr2, vif->addr, ETH_ALEN);
- memset(hdr->addr3, 0xff, ETH_ALEN);
+ eth_broadcast_addr(hdr->addr3);
ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP,
skb->data, skb->len, 0,
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 68f3bf229b5a..eb43f94a1597 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -502,7 +502,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_HEX(irq);
/* TODO: ref_clock and tcxo_clock were moved to wl12xx priv */
DRIVER_STATE_PRINT_HEX(hw_pg_ver);
- DRIVER_STATE_PRINT_HEX(platform_quirks);
+ DRIVER_STATE_PRINT_HEX(irq_flags);
DRIVER_STATE_PRINT_HEX(chip.id);
DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index 0f2cfb0d2a9e..bf14676e6515 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -26,8 +26,8 @@
#include "wlcore.h"
-int wl1271_format_buffer(char __user *userbuf, size_t count,
- loff_t *ppos, char *fmt, ...);
+__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...);
int wl1271_debugfs_init(struct wl1271 *wl);
void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 1e136993580f..0be807951afe 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -25,8 +25,8 @@
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
-#include <linux/wl12xx.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include "wlcore.h"
#include "debug.h"
@@ -538,7 +538,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
* In case edge triggered interrupt must be used, we cannot iterate
* more than once without introducing race conditions with the hardirq.
*/
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
loopcount = 1;
wl1271_debug(DEBUG_IRQ, "IRQ work");
@@ -6249,7 +6249,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
- wl->platform_quirks = 0;
wl->system_hlid = WL12XX_SYSTEM_HLID;
wl->active_sta_count = 0;
wl->active_link_count = 0;
@@ -6390,8 +6389,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
struct wl1271 *wl = context;
struct platform_device *pdev = wl->pdev;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
- struct wl12xx_platform_data *pdata = pdev_data->pdata;
- unsigned long irqflags;
+ struct resource *res;
+
int ret;
irq_handler_t hardirq_fn = NULL;
@@ -6418,19 +6417,23 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
/* adjust some runtime configuration parameters */
wlcore_adjust_conf(wl);
- wl->irq = platform_get_irq(pdev, 0);
- wl->platform_quirks = pdata->platform_quirks;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ wl1271_error("Could not get IRQ resource");
+ goto out_free_nvs;
+ }
+
+ wl->irq = res->start;
+ wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
wl->if_ops = pdev_data->if_ops;
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
- irqflags = IRQF_TRIGGER_RISING;
+ if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
hardirq_fn = wlcore_hardirq;
- } else {
- irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
- }
+ else
+ wl->irq_flags |= IRQF_ONESHOT;
ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
- irqflags, pdev->name, wl);
+ wl->irq_flags, pdev->name, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free_nvs;
@@ -6441,7 +6444,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
if (!ret) {
wl->irq_wake_enabled = true;
device_init_wakeup(wl->dev, 1);
- if (pdata->pwr_in_suspend)
+ if (pdev_data->pwr_in_suspend)
wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
}
#endif
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index d3dd7bfdf3f1..ea7e07abca4e 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -31,9 +31,10 @@
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/gpio.h>
-#include <linux/wl12xx.h>
#include <linux/pm_runtime.h>
#include <linux/printk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include "wlcore.h"
#include "wl12xx_80211.h"
@@ -214,6 +215,52 @@ static struct wl1271_if_operations sdio_ops = {
.set_block_size = wl1271_sdio_set_block_size,
};
+#ifdef CONFIG_OF
+static const struct of_device_id wlcore_sdio_of_match_table[] = {
+ { .compatible = "ti,wl1271" },
+ { .compatible = "ti,wl1273" },
+ { .compatible = "ti,wl1281" },
+ { .compatible = "ti,wl1283" },
+ { .compatible = "ti,wl1801" },
+ { .compatible = "ti,wl1805" },
+ { .compatible = "ti,wl1807" },
+ { .compatible = "ti,wl1831" },
+ { .compatible = "ti,wl1835" },
+ { .compatible = "ti,wl1837" },
+ { }
+};
+
+static int wlcore_probe_of(struct device *dev, int *irq,
+ struct wlcore_platdev_data *pdev_data)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!np || !of_match_node(wlcore_sdio_of_match_table, np))
+ return -ENODATA;
+
+ *irq = irq_of_parse_and_map(np, 0);
+ if (!*irq) {
+ dev_err(dev, "No irq in platform data\n");
+ kfree(pdev_data);
+ return -EINVAL;
+ }
+
+ /* optional clock frequency params */
+ of_property_read_u32(np, "ref-clock-frequency",
+ &pdev_data->ref_clock_freq);
+ of_property_read_u32(np, "tcxo-clock-frequency",
+ &pdev_data->tcxo_clock_freq);
+
+ return 0;
+}
+#else
+static int wlcore_probe_of(struct device *dev, int *irq,
+ struct wlcore_platdev_data *pdev_data)
+{
+ return -ENODATA;
+}
+#endif
+
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -222,6 +269,7 @@ static int wl1271_probe(struct sdio_func *func,
struct resource res[1];
mmc_pm_flag_t mmcflags;
int ret = -ENOMEM;
+ int irq;
const char *chip_family;
/* We are only able to handle the wlan function */
@@ -245,19 +293,15 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- pdev_data.pdata = wl12xx_get_platform_data();
- if (IS_ERR(pdev_data.pdata)) {
- ret = PTR_ERR(pdev_data.pdata);
- dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
+ if (wlcore_probe_of(&func->dev, &irq, &pdev_data))
goto out_free_glue;
- }
/* if sdio can keep power while host is suspended, enable wow */
mmcflags = sdio_get_host_pm_caps(func);
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
- pdev_data.pdata->pwr_in_suspend = true;
+ pdev_data.pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
@@ -286,8 +330,9 @@ static int wl1271_probe(struct sdio_func *func,
memset(res, 0x00, sizeof(res));
- res[0].start = pdev_data.pdata->irq;
- res[0].flags = IORESOURCE_IRQ;
+ res[0].start = irq;
+ res[0].flags = IORESOURCE_IRQ |
+ irqd_get_trigger_type(irq_get_irq_data(irq));
res[0].name = "irq";
ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 69601f6741d9..f1ac2839d97c 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -331,11 +331,7 @@ static int wl1271_probe(struct spi_device *spi)
memset(&pdev_data, 0x00, sizeof(pdev_data));
- pdev_data.pdata = dev_get_platdata(&spi->dev);
- if (!pdev_data.pdata) {
- dev_err(&spi->dev, "no platform data\n");
- return -ENODEV;
- }
+ /* TODO: add DT parsing when needed */
pdev_data.if_ops = &spi_ops;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index d599c869e6e8..7f363fa566a3 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -197,6 +197,8 @@ struct wl1271 {
int irq;
+ int irq_flags;
+
spinlock_t wl_lock;
enum wlcore_state state;
@@ -404,9 +406,6 @@ struct wl1271 {
/* Quirks of specific hardware revisions */
unsigned int quirks;
- /* Platform limitations */
- unsigned int platform_quirks;
-
/* number of currently active RX BA sessions */
int ba_rx_session_count;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 3396ce5a934d..39efc6d78b10 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -201,8 +201,12 @@ struct wl1271_if_operations {
};
struct wlcore_platdev_data {
- struct wl12xx_platform_data *pdata;
struct wl1271_if_operations *if_ops;
+
+ bool ref_clock_xtal; /* specify whether the clock is XTAL or not */
+ u32 ref_clock_freq; /* in Hertz */
+ u32 tcxo_clock_freq; /* in Hertz, tcxo is always XTAL */
+ bool pwr_in_suspend;
};
#define MAX_NUM_KEYS 14
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 589fa256256b..8a495b318b6f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -238,6 +238,8 @@ struct xenvif {
unsigned int num_queues; /* active queues, resource allocated */
unsigned int stalled_queues;
+ struct xenbus_watch credit_watch;
+
spinlock_t lock;
#ifdef CONFIG_DEBUG_FS
@@ -260,6 +262,8 @@ static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
return to_xenbus_device(vif->dev->dev.parent);
}
+void xenvif_tx_credit_callback(unsigned long data);
+
struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
unsigned int handle);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 3aa8648080c8..1a83e190fc15 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -437,7 +437,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
* stolen by an Ethernet bridge for STP purposes.
* (FE:FF:FF:FF:FF:FF)
*/
- memset(dev->dev_addr, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(dev->dev_addr);
dev->dev_addr[0] &= ~0x01;
netif_carrier_off(dev);
@@ -463,6 +463,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
queue->credit_bytes = queue->remaining_credit = ~0UL;
queue->credit_usec = 0UL;
init_timer(&queue->credit_timeout);
+ queue->credit_timeout.function = xenvif_tx_credit_callback;
queue->credit_window_start = get_jiffies_64();
queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 997cf0901ac2..4de46aa61d95 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -642,7 +642,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
queue->remaining_credit = min(max_credit, max_burst);
}
-static void tx_credit_callback(unsigned long data)
+void xenvif_tx_credit_callback(unsigned long data)
{
struct xenvif_queue *queue = (struct xenvif_queue *)data;
tx_add_credit(queue);
@@ -1165,8 +1165,6 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
if (size > queue->remaining_credit) {
queue->credit_timeout.data =
(unsigned long)queue;
- queue->credit_timeout.function =
- tx_credit_callback;
mod_timer(&queue->credit_timeout,
next_credit);
queue->credit_window_start = next_credit;
@@ -1782,7 +1780,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
int err = -ENOMEM;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
- tx_ring_ref, &addr);
+ &tx_ring_ref, 1, &addr);
if (err)
goto err;
@@ -1790,7 +1788,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
- rx_ring_ref, &addr);
+ &rx_ring_ref, 1, &addr);
if (err)
goto err;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 794204e34fba..3d8dbf5f2d39 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -41,6 +41,7 @@ static void connect(struct backend_info *be);
static int read_xenbus_vif_flags(struct backend_info *be);
static int backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be);
+static void xen_unregister_watchers(struct xenvif *vif);
static void set_backend_state(struct backend_info *be,
enum xenbus_state state);
@@ -232,6 +233,7 @@ static int netback_remove(struct xenbus_device *dev)
unregister_hotplug_status_watch(be);
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+ xen_unregister_watchers(be->vif);
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
xenvif_free(be->vif);
be->vif = NULL;
@@ -430,6 +432,7 @@ static int backend_create_xenvif(struct backend_info *be)
static void backend_disconnect(struct backend_info *be)
{
if (be->vif) {
+ xen_unregister_watchers(be->vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(be->vif);
#endif /* CONFIG_DEBUG_FS */
@@ -645,6 +648,59 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
return 0;
}
+static void xen_net_rate_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
+ struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
+ unsigned long credit_bytes;
+ unsigned long credit_usec;
+ unsigned int queue_index;
+
+ xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+ for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
+ struct xenvif_queue *queue = &vif->queues[queue_index];
+
+ queue->credit_bytes = credit_bytes;
+ queue->credit_usec = credit_usec;
+ if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
+ queue->remaining_credit > queue->credit_bytes) {
+ queue->remaining_credit = queue->credit_bytes;
+ }
+ }
+}
+
+static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
+{
+ int err = 0;
+ char *node;
+ unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
+
+ node = kmalloc(maxlen, GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ snprintf(node, maxlen, "%s/rate", dev->nodename);
+ vif->credit_watch.node = node;
+ vif->credit_watch.callback = xen_net_rate_changed;
+ err = register_xenbus_watch(&vif->credit_watch);
+ if (err) {
+ pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
+ kfree(node);
+ vif->credit_watch.node = NULL;
+ vif->credit_watch.callback = NULL;
+ }
+ return err;
+}
+
+static void xen_unregister_watchers(struct xenvif *vif)
+{
+ if (vif->credit_watch.node) {
+ unregister_xenbus_watch(&vif->credit_watch);
+ kfree(vif->credit_watch.node);
+ vif->credit_watch.node = NULL;
+ }
+}
+
static void unregister_hotplug_status_watch(struct backend_info *be)
{
if (be->have_hotplug_status_watch) {
@@ -709,6 +765,7 @@ static void connect(struct backend_info *be)
}
xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+ xen_register_watchers(dev, be->vif);
read_xenbus_vif_flags(be);
/* Use the number of queues requested by the frontend */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 720aaf6313d2..3f45afd4382e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -560,7 +560,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!netif_carrier_ok(dev) ||
(slots > 1 && !xennet_can_sg(dev)) ||
- netif_needs_gso(dev, skb, netif_skb_features(skb)))) {
+ netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irqrestore(&queue->tx_lock, flags);
goto drop;
}
@@ -1483,6 +1483,7 @@ static int setup_netfront(struct xenbus_device *dev,
{
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
+ grant_ref_t gref;
int err;
queue->tx_ring_ref = GRANT_INVALID_REF;
@@ -1499,10 +1500,10 @@ static int setup_netfront(struct xenbus_device *dev,
SHARED_RING_INIT(txs);
FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
- err = xenbus_grant_ring(dev, virt_to_mfn(txs));
+ err = xenbus_grant_ring(dev, txs, 1, &gref);
if (err < 0)
goto grant_tx_ring_fail;
- queue->tx_ring_ref = err;
+ queue->tx_ring_ref = gref;
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!rxs) {
@@ -1513,10 +1514,10 @@ static int setup_netfront(struct xenbus_device *dev,
SHARED_RING_INIT(rxs);
FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
- err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
+ err = xenbus_grant_ring(dev, rxs, 1, &gref);
if (err < 0)
goto grant_rx_ring_fail;
- queue->rx_ring_ref = err;
+ queue->rx_ring_ref = gref;
if (feature_split_evtchn)
err = setup_netfront_split(queue);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 7929fac13e1c..107714e4405f 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -73,4 +73,5 @@ source "drivers/nfc/microread/Kconfig"
source "drivers/nfc/nfcmrvl/Kconfig"
source "drivers/nfc/st21nfca/Kconfig"
source "drivers/nfc/st21nfcb/Kconfig"
+source "drivers/nfc/nxp-nci/Kconfig"
endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 6b23a2c6e34a..a4292d790f9b 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -13,5 +13,6 @@ obj-$(CONFIG_NFC_MRVL) += nfcmrvl/
obj-$(CONFIG_NFC_TRF7970A) += trf7970a.o
obj-$(CONFIG_NFC_ST21NFCA) += st21nfca/
obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb/
+obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci/
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index df85cd3d9db0..661e2c8143c4 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -286,7 +286,7 @@ static int microread_i2c_probe(struct i2c_client *client,
if (r < 0)
goto err_irq;
- nfc_info(&client->dev, "Probed");
+ nfc_info(&client->dev, "Probed\n");
return 0;
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 85e8bcf98693..ad4933cefbd1 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -111,7 +111,7 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols, 0, 0);
if (!priv->ndev) {
- nfc_err(dev, "nci_allocate_device failed");
+ nfc_err(dev, "nci_allocate_device failed\n");
rc = -ENOMEM;
goto error;
}
@@ -120,7 +120,7 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
rc = nci_register_device(priv->ndev);
if (rc) {
- nfc_err(dev, "nci_register_device failed %d", rc);
+ nfc_err(dev, "nci_register_device failed %d\n", rc);
nci_free_device(priv->ndev);
goto error;
}
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 3221ca37d6c9..6cf15c1a2618 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -80,7 +80,7 @@ static void nfcmrvl_bulk_complete(struct urb *urb)
if (!urb->status) {
if (nfcmrvl_nci_recv_frame(drv_data->priv, urb->transfer_buffer,
urb->actual_length) < 0)
- nfc_err(&drv_data->udev->dev, "corrupted Rx packet");
+ nfc_err(&drv_data->udev->dev, "corrupted Rx packet\n");
}
if (!test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags))
@@ -96,7 +96,7 @@ static void nfcmrvl_bulk_complete(struct urb *urb)
*/
if (err != -EPERM && err != -ENODEV)
nfc_err(&drv_data->udev->dev,
- "urb %p failed to resubmit (%d)", urb, -err);
+ "urb %p failed to resubmit (%d)\n", urb, -err);
usb_unanchor_urb(urb);
}
}
@@ -137,7 +137,7 @@ nfcmrvl_submit_bulk_urb(struct nfcmrvl_usb_drv_data *drv_data, gfp_t mem_flags)
if (err) {
if (err != -EPERM && err != -ENODEV)
nfc_err(&drv_data->udev->dev,
- "urb %p submission failed (%d)", urb, -err);
+ "urb %p submission failed (%d)\n", urb, -err);
usb_unanchor_urb(urb);
}
@@ -153,7 +153,7 @@ static void nfcmrvl_tx_complete(struct urb *urb)
struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
- nfc_info(priv->dev, "urb %p status %d count %d",
+ nfc_info(priv->dev, "urb %p status %d count %d\n",
urb, urb->status, urb->actual_length);
spin_lock(&drv_data->txlock);
@@ -253,7 +253,7 @@ static int nfcmrvl_usb_nci_send(struct nfcmrvl_private *priv,
if (err) {
if (err != -EPERM && err != -ENODEV)
nfc_err(&drv_data->udev->dev,
- "urb %p submission failed (%d)", urb, -err);
+ "urb %p submission failed (%d)\n", urb, -err);
kfree(urb->setup_packet);
usb_unanchor_urb(urb);
} else {
@@ -293,7 +293,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
int i;
struct usb_device *udev = interface_to_usbdev(intf);
- nfc_info(&udev->dev, "intf %p id %p", intf, id);
+ nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
drv_data = devm_kzalloc(&intf->dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data)
@@ -348,7 +348,7 @@ static void nfcmrvl_disconnect(struct usb_interface *intf)
if (!drv_data)
return;
- nfc_info(&drv_data->udev->dev, "intf %p", intf);
+ nfc_info(&drv_data->udev->dev, "intf %p\n", intf);
nfcmrvl_nci_unregister_dev(drv_data->priv);
@@ -360,7 +360,7 @@ static int nfcmrvl_suspend(struct usb_interface *intf, pm_message_t message)
{
struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
- nfc_info(&drv_data->udev->dev, "intf %p", intf);
+ nfc_info(&drv_data->udev->dev, "intf %p\n", intf);
if (drv_data->suspend_count++)
return 0;
@@ -401,7 +401,7 @@ static int nfcmrvl_resume(struct usb_interface *intf)
struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
int err = 0;
- nfc_info(&drv_data->udev->dev, "intf %p", intf);
+ nfc_info(&drv_data->udev->dev, "intf %p\n", intf);
if (--drv_data->suspend_count)
return 0;
diff --git a/drivers/nfc/nxp-nci/Kconfig b/drivers/nfc/nxp-nci/Kconfig
new file mode 100644
index 000000000000..37b40612520d
--- /dev/null
+++ b/drivers/nfc/nxp-nci/Kconfig
@@ -0,0 +1,25 @@
+config NFC_NXP_NCI
+ tristate "NXP-NCI NFC driver"
+ depends on NFC_NCI
+ default n
+ ---help---
+ Generic core driver for NXP NCI chips such as the NPC100
+ or PN7150 families.
+ This is a driver based on the NCI NFC kernel layers and
+ will thus not work with NXP libnfc library.
+
+ To compile this driver as a module, choose m here. The module will
+ be called nxp_nci.
+ Say N if unsure.
+
+config NFC_NXP_NCI_I2C
+ tristate "NXP-NCI I2C support"
+ depends on NFC_NXP_NCI && I2C
+ ---help---
+ This module adds support for an I2C interface to the NXP NCI
+ chips.
+ Select this if your platform is using the I2C bus.
+
+ To compile this driver as a module, choose m here. The module will
+ be called nxp_nci_i2c.
+ Say Y if unsure.
diff --git a/drivers/nfc/nxp-nci/Makefile b/drivers/nfc/nxp-nci/Makefile
new file mode 100644
index 000000000000..c008be30bb18
--- /dev/null
+++ b/drivers/nfc/nxp-nci/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for NXP-NCI NFC driver
+#
+
+nxp-nci-objs = core.o firmware.o
+nxp-nci_i2c-objs = i2c.o
+
+obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci.o
+obj-$(CONFIG_NFC_NXP_NCI_I2C) += nxp-nci_i2c.o
+
+ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
new file mode 100644
index 000000000000..8979636d48ea
--- /dev/null
+++ b/drivers/nfc/nxp-nci/core.c
@@ -0,0 +1,186 @@
+/*
+ * Generic driver for NXP NCI NFC chips
+ *
+ * Copyright (C) 2014 NXP Semiconductors All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * Derived from PN544 device driver:
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/platform_data/nxp-nci.h>
+
+#include <net/nfc/nci_core.h>
+
+#include "nxp-nci.h"
+
+#define NXP_NCI_HDR_LEN 4
+
+#define NXP_NCI_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+ NFC_PROTO_MIFARE_MASK | \
+ NFC_PROTO_FELICA_MASK | \
+ NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_ISO14443_B_MASK | \
+ NFC_PROTO_NFC_DEP_MASK)
+
+static int nxp_nci_open(struct nci_dev *ndev)
+{
+ struct nxp_nci_info *info = nci_get_drvdata(ndev);
+ int r = 0;
+
+ mutex_lock(&info->info_lock);
+
+ if (info->mode != NXP_NCI_MODE_COLD) {
+ r = -EBUSY;
+ goto open_exit;
+ }
+
+ if (info->phy_ops->set_mode)
+ r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_NCI);
+
+ info->mode = NXP_NCI_MODE_NCI;
+
+open_exit:
+ mutex_unlock(&info->info_lock);
+ return r;
+}
+
+static int nxp_nci_close(struct nci_dev *ndev)
+{
+ struct nxp_nci_info *info = nci_get_drvdata(ndev);
+ int r = 0;
+
+ mutex_lock(&info->info_lock);
+
+ if (info->phy_ops->set_mode)
+ r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
+
+ info->mode = NXP_NCI_MODE_COLD;
+
+ mutex_unlock(&info->info_lock);
+ return r;
+}
+
+static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct nxp_nci_info *info = nci_get_drvdata(ndev);
+ int r;
+
+ if (!info->phy_ops->write) {
+ r = -ENOTSUPP;
+ goto send_exit;
+ }
+
+ if (info->mode != NXP_NCI_MODE_NCI) {
+ r = -EINVAL;
+ goto send_exit;
+ }
+
+ r = info->phy_ops->write(info->phy_id, skb);
+ if (r < 0)
+ kfree_skb(skb);
+
+send_exit:
+ return r;
+}
+
+static struct nci_ops nxp_nci_ops = {
+ .open = nxp_nci_open,
+ .close = nxp_nci_close,
+ .send = nxp_nci_send,
+ .fw_download = nxp_nci_fw_download,
+};
+
+int nxp_nci_probe(void *phy_id, struct device *pdev,
+ struct nxp_nci_phy_ops *phy_ops, unsigned int max_payload,
+ struct nci_dev **ndev)
+{
+ struct nxp_nci_info *info;
+ int r;
+
+ info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL);
+ if (!info) {
+ r = -ENOMEM;
+ goto probe_exit;
+ }
+
+ info->phy_id = phy_id;
+ info->pdev = pdev;
+ info->phy_ops = phy_ops;
+ info->max_payload = max_payload;
+ INIT_WORK(&info->fw_info.work, nxp_nci_fw_work);
+ init_completion(&info->fw_info.cmd_completion);
+ mutex_init(&info->info_lock);
+
+ if (info->phy_ops->set_mode) {
+ r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
+ if (r < 0)
+ goto probe_exit;
+ }
+
+ info->mode = NXP_NCI_MODE_COLD;
+
+ info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS,
+ NXP_NCI_HDR_LEN, 0);
+ if (!info->ndev) {
+ r = -ENOMEM;
+ goto probe_exit;
+ }
+
+ nci_set_parent_dev(info->ndev, pdev);
+ nci_set_drvdata(info->ndev, info);
+ r = nci_register_device(info->ndev);
+ if (r < 0)
+ goto probe_exit_free_nci;
+
+ *ndev = info->ndev;
+
+ goto probe_exit;
+
+probe_exit_free_nci:
+ nci_free_device(info->ndev);
+probe_exit:
+ return r;
+}
+EXPORT_SYMBOL(nxp_nci_probe);
+
+void nxp_nci_remove(struct nci_dev *ndev)
+{
+ struct nxp_nci_info *info = nci_get_drvdata(ndev);
+
+ if (info->mode == NXP_NCI_MODE_FW)
+ nxp_nci_fw_work_complete(info, -ESHUTDOWN);
+ cancel_work_sync(&info->fw_info.work);
+
+ mutex_lock(&info->info_lock);
+
+ if (info->phy_ops->set_mode)
+ info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+
+ mutex_unlock(&info->info_lock);
+}
+EXPORT_SYMBOL(nxp_nci_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NXP NCI NFC driver");
+MODULE_AUTHOR("Clément Perrochaud <clement.perrochaud@nxp.com>");
diff --git a/drivers/nfc/nxp-nci/firmware.c b/drivers/nfc/nxp-nci/firmware.c
new file mode 100644
index 000000000000..5291797324ba
--- /dev/null
+++ b/drivers/nfc/nxp-nci/firmware.c
@@ -0,0 +1,325 @@
+/*
+ * Generic driver for NXP NCI NFC chips
+ *
+ * Copyright (C) 2014 NXP Semiconductors All rights reserved.
+ *
+ * Author: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * Derived from PN544 device driver:
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/nfc.h>
+#include <linux/unaligned/access_ok.h>
+
+#include "nxp-nci.h"
+
+/* Crypto operations can take up to 30 seconds */
+#define NXP_NCI_FW_ANSWER_TIMEOUT msecs_to_jiffies(30000)
+
+#define NXP_NCI_FW_CMD_RESET 0xF0
+#define NXP_NCI_FW_CMD_GETVERSION 0xF1
+#define NXP_NCI_FW_CMD_CHECKINTEGRITY 0xE0
+#define NXP_NCI_FW_CMD_WRITE 0xC0
+#define NXP_NCI_FW_CMD_READ 0xA2
+#define NXP_NCI_FW_CMD_GETSESSIONSTATE 0xF2
+#define NXP_NCI_FW_CMD_LOG 0xA7
+#define NXP_NCI_FW_CMD_FORCE 0xD0
+#define NXP_NCI_FW_CMD_GET_DIE_ID 0xF4
+
+#define NXP_NCI_FW_CHUNK_FLAG 0x0400
+
+#define NXP_NCI_FW_RESULT_OK 0x00
+#define NXP_NCI_FW_RESULT_INVALID_ADDR 0x01
+#define NXP_NCI_FW_RESULT_GENERIC_ERROR 0x02
+#define NXP_NCI_FW_RESULT_UNKNOWN_CMD 0x0B
+#define NXP_NCI_FW_RESULT_ABORTED_CMD 0x0C
+#define NXP_NCI_FW_RESULT_PLL_ERROR 0x0D
+#define NXP_NCI_FW_RESULT_ADDR_RANGE_OFL_ERROR 0x1E
+#define NXP_NCI_FW_RESULT_BUFFER_OFL_ERROR 0x1F
+#define NXP_NCI_FW_RESULT_MEM_BSY 0x20
+#define NXP_NCI_FW_RESULT_SIGNATURE_ERROR 0x21
+#define NXP_NCI_FW_RESULT_FIRMWARE_VERSION_ERROR 0x24
+#define NXP_NCI_FW_RESULT_PROTOCOL_ERROR 0x28
+#define NXP_NCI_FW_RESULT_SFWU_DEGRADED 0x2A
+#define NXP_NCI_FW_RESULT_PH_STATUS_FIRST_CHUNK 0x2D
+#define NXP_NCI_FW_RESULT_PH_STATUS_NEXT_CHUNK 0x2E
+#define NXP_NCI_FW_RESULT_PH_STATUS_INTERNAL_ERROR_5 0xC5
+
+void nxp_nci_fw_work_complete(struct nxp_nci_info *info, int result)
+{
+ struct nxp_nci_fw_info *fw_info = &info->fw_info;
+ int r;
+
+ if (info->phy_ops->set_mode) {
+ r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
+ if (r < 0 && result == 0)
+ result = -r;
+ }
+
+ info->mode = NXP_NCI_MODE_COLD;
+
+ if (fw_info->fw) {
+ release_firmware(fw_info->fw);
+ fw_info->fw = NULL;
+ }
+
+ nfc_fw_download_done(info->ndev->nfc_dev, fw_info->name, (u32) -result);
+}
+
+/* crc_ccitt cannot be used since it is computed MSB first and not LSB first */
+static u16 nxp_nci_fw_crc(u8 const *buffer, size_t len)
+{
+ u16 crc = 0xffff;
+
+ while (len--) {
+ crc = ((crc >> 8) | (crc << 8)) ^ *buffer++;
+ crc ^= (crc & 0xff) >> 4;
+ crc ^= (crc & 0xff) << 12;
+ crc ^= (crc & 0xff) << 5;
+ }
+
+ return crc;
+}
+
+static int nxp_nci_fw_send_chunk(struct nxp_nci_info *info)
+{
+ struct nxp_nci_fw_info *fw_info = &info->fw_info;
+ u16 header, crc;
+ struct sk_buff *skb;
+ size_t chunk_len;
+ size_t remaining_len;
+ int r;
+
+ skb = nci_skb_alloc(info->ndev, info->max_payload, GFP_KERNEL);
+ if (!skb) {
+ r = -ENOMEM;
+ goto chunk_exit;
+ }
+
+ chunk_len = info->max_payload - NXP_NCI_FW_HDR_LEN - NXP_NCI_FW_CRC_LEN;
+ remaining_len = fw_info->frame_size - fw_info->written;
+
+ if (remaining_len > chunk_len) {
+ header = NXP_NCI_FW_CHUNK_FLAG;
+ } else {
+ chunk_len = remaining_len;
+ header = 0x0000;
+ }
+
+ header |= chunk_len & NXP_NCI_FW_FRAME_LEN_MASK;
+ put_unaligned_be16(header, skb_put(skb, NXP_NCI_FW_HDR_LEN));
+
+ memcpy(skb_put(skb, chunk_len), fw_info->data + fw_info->written,
+ chunk_len);
+
+ crc = nxp_nci_fw_crc(skb->data, chunk_len + NXP_NCI_FW_HDR_LEN);
+ put_unaligned_be16(crc, skb_put(skb, NXP_NCI_FW_CRC_LEN));
+
+ r = info->phy_ops->write(info->phy_id, skb);
+ if (r >= 0)
+ r = chunk_len;
+
+ kfree_skb(skb);
+
+chunk_exit:
+ return r;
+}
+
+static int nxp_nci_fw_send(struct nxp_nci_info *info)
+{
+ struct nxp_nci_fw_info *fw_info = &info->fw_info;
+ long completion_rc;
+ int r;
+
+ reinit_completion(&fw_info->cmd_completion);
+
+ if (fw_info->written == 0) {
+ fw_info->frame_size = get_unaligned_be16(fw_info->data) &
+ NXP_NCI_FW_FRAME_LEN_MASK;
+ fw_info->data += NXP_NCI_FW_HDR_LEN;
+ fw_info->size -= NXP_NCI_FW_HDR_LEN;
+ }
+
+ if (fw_info->frame_size > fw_info->size)
+ return -EMSGSIZE;
+
+ r = nxp_nci_fw_send_chunk(info);
+ if (r < 0)
+ return r;
+
+ fw_info->written += r;
+
+ if (*fw_info->data == NXP_NCI_FW_CMD_RESET) {
+ fw_info->cmd_result = 0;
+ if (fw_info->fw)
+ schedule_work(&fw_info->work);
+ } else {
+ completion_rc = wait_for_completion_interruptible_timeout(
+ &fw_info->cmd_completion, NXP_NCI_FW_ANSWER_TIMEOUT);
+ if (completion_rc == 0)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+void nxp_nci_fw_work(struct work_struct *work)
+{
+ struct nxp_nci_info *info;
+ struct nxp_nci_fw_info *fw_info;
+ int r;
+
+ fw_info = container_of(work, struct nxp_nci_fw_info, work);
+ info = container_of(fw_info, struct nxp_nci_info, fw_info);
+
+ mutex_lock(&info->info_lock);
+
+ r = fw_info->cmd_result;
+ if (r < 0)
+ goto exit_work;
+
+ if (fw_info->written == fw_info->frame_size) {
+ fw_info->data += fw_info->frame_size;
+ fw_info->size -= fw_info->frame_size;
+ fw_info->written = 0;
+ }
+
+ if (fw_info->size > 0)
+ r = nxp_nci_fw_send(info);
+
+exit_work:
+ if (r < 0 || fw_info->size == 0)
+ nxp_nci_fw_work_complete(info, r);
+ mutex_unlock(&info->info_lock);
+}
+
+int nxp_nci_fw_download(struct nci_dev *ndev, const char *firmware_name)
+{
+ struct nxp_nci_info *info = nci_get_drvdata(ndev);
+ struct nxp_nci_fw_info *fw_info = &info->fw_info;
+ int r;
+
+ mutex_lock(&info->info_lock);
+
+ if (!info->phy_ops->set_mode || !info->phy_ops->write) {
+ r = -ENOTSUPP;
+ goto fw_download_exit;
+ }
+
+ if (!firmware_name || firmware_name[0] == '\0') {
+ r = -EINVAL;
+ goto fw_download_exit;
+ }
+
+ strcpy(fw_info->name, firmware_name);
+
+ r = request_firmware(&fw_info->fw, firmware_name,
+ ndev->nfc_dev->dev.parent);
+ if (r < 0)
+ goto fw_download_exit;
+
+ r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_FW);
+ if (r < 0) {
+ release_firmware(fw_info->fw);
+ goto fw_download_exit;
+ }
+
+ info->mode = NXP_NCI_MODE_FW;
+
+ fw_info->data = fw_info->fw->data;
+ fw_info->size = fw_info->fw->size;
+ fw_info->written = 0;
+ fw_info->frame_size = 0;
+ fw_info->cmd_result = 0;
+
+ schedule_work(&fw_info->work);
+
+fw_download_exit:
+ mutex_unlock(&info->info_lock);
+ return r;
+}
+
+static int nxp_nci_fw_read_status(u8 stat)
+{
+ switch (stat) {
+ case NXP_NCI_FW_RESULT_OK:
+ return 0;
+ case NXP_NCI_FW_RESULT_INVALID_ADDR:
+ return -EINVAL;
+ case NXP_NCI_FW_RESULT_UNKNOWN_CMD:
+ return -EINVAL;
+ case NXP_NCI_FW_RESULT_ABORTED_CMD:
+ return -EMSGSIZE;
+ case NXP_NCI_FW_RESULT_ADDR_RANGE_OFL_ERROR:
+ return -EADDRNOTAVAIL;
+ case NXP_NCI_FW_RESULT_BUFFER_OFL_ERROR:
+ return -ENOBUFS;
+ case NXP_NCI_FW_RESULT_MEM_BSY:
+ return -ENOKEY;
+ case NXP_NCI_FW_RESULT_SIGNATURE_ERROR:
+ return -EKEYREJECTED;
+ case NXP_NCI_FW_RESULT_FIRMWARE_VERSION_ERROR:
+ return -EALREADY;
+ case NXP_NCI_FW_RESULT_PROTOCOL_ERROR:
+ return -EPROTO;
+ case NXP_NCI_FW_RESULT_SFWU_DEGRADED:
+ return -EHWPOISON;
+ case NXP_NCI_FW_RESULT_PH_STATUS_FIRST_CHUNK:
+ return 0;
+ case NXP_NCI_FW_RESULT_PH_STATUS_NEXT_CHUNK:
+ return 0;
+ case NXP_NCI_FW_RESULT_PH_STATUS_INTERNAL_ERROR_5:
+ return -EINVAL;
+ default:
+ return -EIO;
+ }
+}
+
+static u16 nxp_nci_fw_check_crc(struct sk_buff *skb)
+{
+ u16 crc, frame_crc;
+ size_t len = skb->len - NXP_NCI_FW_CRC_LEN;
+
+ crc = nxp_nci_fw_crc(skb->data, len);
+ frame_crc = get_unaligned_be16(skb->data + len);
+
+ return (crc ^ frame_crc);
+}
+
+void nxp_nci_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct nxp_nci_info *info = nci_get_drvdata(ndev);
+ struct nxp_nci_fw_info *fw_info = &info->fw_info;
+
+ complete(&fw_info->cmd_completion);
+
+ if (skb) {
+ if (nxp_nci_fw_check_crc(skb) != 0x00)
+ fw_info->cmd_result = -EBADMSG;
+ else
+ fw_info->cmd_result = nxp_nci_fw_read_status(
+ *skb_pull(skb, NXP_NCI_FW_HDR_LEN));
+ kfree_skb(skb);
+ } else {
+ fw_info->cmd_result = -EIO;
+ }
+
+ if (fw_info->fw)
+ schedule_work(&fw_info->work);
+}
+EXPORT_SYMBOL(nxp_nci_fw_recv_frame);
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
new file mode 100644
index 000000000000..17bd67dbebf0
--- /dev/null
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -0,0 +1,415 @@
+/*
+ * I2C link layer for the NXP NCI driver
+ *
+ * Copyright (C) 2014 NXP Semiconductors All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * Derived from PN544 device driver:
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/platform_data/nxp-nci.h>
+#include <linux/unaligned/access_ok.h>
+
+#include <net/nfc/nfc.h>
+
+#include "nxp-nci.h"
+
+#define NXP_NCI_I2C_DRIVER_NAME "nxp-nci_i2c"
+
+#define NXP_NCI_I2C_MAX_PAYLOAD 32
+
+struct nxp_nci_i2c_phy {
+ struct i2c_client *i2c_dev;
+ struct nci_dev *ndev;
+
+ unsigned int gpio_en;
+ unsigned int gpio_fw;
+
+ int hard_fault; /*
+ * < 0 if hardware error occurred (e.g. i2c err)
+ * and prevents normal operation.
+ */
+};
+
+static int nxp_nci_i2c_set_mode(void *phy_id,
+ enum nxp_nci_mode mode)
+{
+ struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id;
+
+ gpio_set_value(phy->gpio_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0);
+ gpio_set_value(phy->gpio_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0);
+ usleep_range(10000, 15000);
+
+ if (mode == NXP_NCI_MODE_COLD)
+ phy->hard_fault = 0;
+
+ return 0;
+}
+
+static int nxp_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+ int r;
+ struct nxp_nci_i2c_phy *phy = phy_id;
+ struct i2c_client *client = phy->i2c_dev;
+
+ if (phy->hard_fault != 0)
+ return phy->hard_fault;
+
+ r = i2c_master_send(client, skb->data, skb->len);
+ if (r == -EREMOTEIO) {
+ /* Retry, chip was in standby */
+ usleep_range(110000, 120000);
+ r = i2c_master_send(client, skb->data, skb->len);
+ }
+
+ if (r < 0) {
+ nfc_err(&client->dev, "Error %d on I2C send\n", r);
+ } else if (r != skb->len) {
+ nfc_err(&client->dev,
+ "Invalid length sent: %u (expected %u)\n",
+ r, skb->len);
+ r = -EREMOTEIO;
+ } else {
+ /* Success but return 0 and not number of bytes */
+ r = 0;
+ }
+
+ return r;
+}
+
+static struct nxp_nci_phy_ops i2c_phy_ops = {
+ .set_mode = nxp_nci_i2c_set_mode,
+ .write = nxp_nci_i2c_write,
+};
+
+static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy,
+ struct sk_buff **skb)
+{
+ struct i2c_client *client = phy->i2c_dev;
+ u16 header;
+ size_t frame_len;
+ int r;
+
+ r = i2c_master_recv(client, (u8 *) &header, NXP_NCI_FW_HDR_LEN);
+ if (r < 0) {
+ goto fw_read_exit;
+ } else if (r != NXP_NCI_FW_HDR_LEN) {
+ nfc_err(&client->dev, "Incorrect header length: %u\n", r);
+ r = -EBADMSG;
+ goto fw_read_exit;
+ }
+
+ frame_len = (get_unaligned_be16(&header) & NXP_NCI_FW_FRAME_LEN_MASK) +
+ NXP_NCI_FW_CRC_LEN;
+
+ *skb = alloc_skb(NXP_NCI_FW_HDR_LEN + frame_len, GFP_KERNEL);
+ if (*skb == NULL) {
+ r = -ENOMEM;
+ goto fw_read_exit;
+ }
+
+ memcpy(skb_put(*skb, NXP_NCI_FW_HDR_LEN), &header, NXP_NCI_FW_HDR_LEN);
+
+ r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len);
+ if (r != frame_len) {
+ nfc_err(&client->dev,
+ "Invalid frame length: %u (expected %zu)\n",
+ r, frame_len);
+ r = -EBADMSG;
+ goto fw_read_exit_free_skb;
+ }
+
+ return 0;
+
+fw_read_exit_free_skb:
+ kfree_skb(*skb);
+fw_read_exit:
+ return r;
+}
+
+static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy,
+ struct sk_buff **skb)
+{
+ struct nci_ctrl_hdr header; /* May actually be a data header */
+ struct i2c_client *client = phy->i2c_dev;
+ int r;
+
+ r = i2c_master_recv(client, (u8 *) &header, NCI_CTRL_HDR_SIZE);
+ if (r < 0) {
+ goto nci_read_exit;
+ } else if (r != NCI_CTRL_HDR_SIZE) {
+ nfc_err(&client->dev, "Incorrect header length: %u\n", r);
+ r = -EBADMSG;
+ goto nci_read_exit;
+ }
+
+ *skb = alloc_skb(NCI_CTRL_HDR_SIZE + header.plen, GFP_KERNEL);
+ if (*skb == NULL) {
+ r = -ENOMEM;
+ goto nci_read_exit;
+ }
+
+ memcpy(skb_put(*skb, NCI_CTRL_HDR_SIZE), (void *) &header,
+ NCI_CTRL_HDR_SIZE);
+
+ r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
+ if (r != header.plen) {
+ nfc_err(&client->dev,
+ "Invalid frame payload length: %u (expected %u)\n",
+ r, header.plen);
+ r = -EBADMSG;
+ goto nci_read_exit_free_skb;
+ }
+
+ return 0;
+
+nci_read_exit_free_skb:
+ kfree_skb(*skb);
+nci_read_exit:
+ return r;
+}
+
+static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+ struct nxp_nci_i2c_phy *phy = phy_id;
+ struct i2c_client *client;
+ struct nxp_nci_info *info;
+
+ struct sk_buff *skb = NULL;
+ int r = 0;
+
+ if (!phy || !phy->ndev)
+ goto exit_irq_none;
+
+ client = phy->i2c_dev;
+
+ if (!client || irq != client->irq)
+ goto exit_irq_none;
+
+ info = nci_get_drvdata(phy->ndev);
+
+ if (!info)
+ goto exit_irq_none;
+
+ mutex_lock(&info->info_lock);
+
+ if (phy->hard_fault != 0)
+ goto exit_irq_handled;
+
+ switch (info->mode) {
+ case NXP_NCI_MODE_NCI:
+ r = nxp_nci_i2c_nci_read(phy, &skb);
+ break;
+ case NXP_NCI_MODE_FW:
+ r = nxp_nci_i2c_fw_read(phy, &skb);
+ break;
+ case NXP_NCI_MODE_COLD:
+ r = -EREMOTEIO;
+ break;
+ }
+
+ if (r == -EREMOTEIO) {
+ phy->hard_fault = r;
+ skb = NULL;
+ } else if (r < 0) {
+ nfc_err(&client->dev, "Read failed with error %d\n", r);
+ goto exit_irq_handled;
+ }
+
+ switch (info->mode) {
+ case NXP_NCI_MODE_NCI:
+ nci_recv_frame(phy->ndev, skb);
+ break;
+ case NXP_NCI_MODE_FW:
+ nxp_nci_fw_recv_frame(phy->ndev, skb);
+ break;
+ case NXP_NCI_MODE_COLD:
+ break;
+ }
+
+exit_irq_handled:
+ mutex_unlock(&info->info_lock);
+ return IRQ_HANDLED;
+exit_irq_none:
+ WARN_ON_ONCE(1);
+ return IRQ_NONE;
+}
+
+#ifdef CONFIG_OF
+
+static int nxp_nci_i2c_parse_devtree(struct i2c_client *client)
+{
+ struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client);
+ struct device_node *pp;
+ int r;
+
+ pp = client->dev.of_node;
+ if (!pp)
+ return -ENODEV;
+
+ r = of_get_named_gpio(pp, "enable-gpios", 0);
+ if (r == -EPROBE_DEFER)
+ r = of_get_named_gpio(pp, "enable-gpios", 0);
+ if (r < 0) {
+ nfc_err(&client->dev, "Failed to get EN gpio, error: %d\n", r);
+ return r;
+ }
+ phy->gpio_en = r;
+
+ r = of_get_named_gpio(pp, "firmware-gpios", 0);
+ if (r == -EPROBE_DEFER)
+ r = of_get_named_gpio(pp, "firmware-gpios", 0);
+ if (r < 0) {
+ nfc_err(&client->dev, "Failed to get FW gpio, error: %d\n", r);
+ return r;
+ }
+ phy->gpio_fw = r;
+
+ r = irq_of_parse_and_map(pp, 0);
+ if (r < 0) {
+ nfc_err(&client->dev, "Unable to get irq, error: %d\n", r);
+ return r;
+ }
+ client->irq = r;
+
+ return 0;
+}
+
+#else
+
+static int nxp_nci_i2c_parse_devtree(struct i2c_client *client)
+{
+ return -ENODEV;
+}
+
+#endif
+
+static int nxp_nci_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct nxp_nci_i2c_phy *phy;
+ struct nxp_nci_nfc_platform_data *pdata;
+ int r;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
+ r = -ENODEV;
+ goto probe_exit;
+ }
+
+ phy = devm_kzalloc(&client->dev, sizeof(struct nxp_nci_i2c_phy),
+ GFP_KERNEL);
+ if (!phy) {
+ r = -ENOMEM;
+ goto probe_exit;
+ }
+
+ phy->i2c_dev = client;
+ i2c_set_clientdata(client, phy);
+
+ pdata = client->dev.platform_data;
+
+ if (!pdata && client->dev.of_node) {
+ r = nxp_nci_i2c_parse_devtree(client);
+ if (r < 0) {
+ nfc_err(&client->dev, "Failed to get DT data\n");
+ goto probe_exit;
+ }
+ } else if (pdata) {
+ phy->gpio_en = pdata->gpio_en;
+ phy->gpio_fw = pdata->gpio_fw;
+ client->irq = pdata->irq;
+ } else {
+ nfc_err(&client->dev, "No platform data\n");
+ r = -EINVAL;
+ goto probe_exit;
+ }
+
+ r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en,
+ GPIOF_OUT_INIT_LOW, "nxp_nci_en");
+ if (r < 0)
+ goto probe_exit;
+
+ r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw,
+ GPIOF_OUT_INIT_LOW, "nxp_nci_fw");
+ if (r < 0)
+ goto probe_exit;
+
+ r = nxp_nci_probe(phy, &client->dev, &i2c_phy_ops,
+ NXP_NCI_I2C_MAX_PAYLOAD, &phy->ndev);
+ if (r < 0)
+ goto probe_exit;
+
+ r = request_threaded_irq(client->irq, NULL,
+ nxp_nci_i2c_irq_thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ NXP_NCI_I2C_DRIVER_NAME, phy);
+ if (r < 0)
+ nfc_err(&client->dev, "Unable to register IRQ handler\n");
+
+probe_exit:
+ return r;
+}
+
+static int nxp_nci_i2c_remove(struct i2c_client *client)
+{
+ struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client);
+
+ nxp_nci_remove(phy->ndev);
+ free_irq(client->irq, phy);
+
+ return 0;
+}
+
+static struct i2c_device_id nxp_nci_i2c_id_table[] = {
+ {"nxp-nci_i2c", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, nxp_nci_i2c_id_table);
+
+static const struct of_device_id of_nxp_nci_i2c_match[] = {
+ { .compatible = "nxp,nxp-nci-i2c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match);
+
+static struct i2c_driver nxp_nci_i2c_driver = {
+ .driver = {
+ .name = NXP_NCI_I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_nxp_nci_i2c_match),
+ },
+ .probe = nxp_nci_i2c_probe,
+ .id_table = nxp_nci_i2c_id_table,
+ .remove = nxp_nci_i2c_remove,
+};
+
+module_i2c_driver(nxp_nci_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("I2C driver for NXP NCI NFC controllers");
+MODULE_AUTHOR("Clément Perrochaud <clement.perrochaud@nxp.com>");
diff --git a/drivers/nfc/nxp-nci/nxp-nci.h b/drivers/nfc/nxp-nci/nxp-nci.h
new file mode 100644
index 000000000000..f1fecc4e2457
--- /dev/null
+++ b/drivers/nfc/nxp-nci/nxp-nci.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2014 NXP Semiconductors All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * Derived from PN544 device driver:
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __LOCAL_NXP_NCI_H_
+#define __LOCAL_NXP_NCI_H_
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/nfc.h>
+#include <linux/platform_data/nxp-nci.h>
+
+#include <net/nfc/nci_core.h>
+
+#define NXP_NCI_FW_HDR_LEN 2
+#define NXP_NCI_FW_CRC_LEN 2
+
+#define NXP_NCI_FW_FRAME_LEN_MASK 0x03FF
+
+enum nxp_nci_mode {
+ NXP_NCI_MODE_COLD,
+ NXP_NCI_MODE_NCI,
+ NXP_NCI_MODE_FW
+};
+
+struct nxp_nci_phy_ops {
+ int (*set_mode)(void *id, enum nxp_nci_mode mode);
+ int (*write)(void *id, struct sk_buff *skb);
+};
+
+struct nxp_nci_fw_info {
+ char name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+ const struct firmware *fw;
+
+ size_t size;
+ size_t written;
+
+ const u8 *data;
+ size_t frame_size;
+
+ struct work_struct work;
+ struct completion cmd_completion;
+
+ int cmd_result;
+};
+
+struct nxp_nci_info {
+ struct nci_dev *ndev;
+ void *phy_id;
+ struct device *pdev;
+
+ enum nxp_nci_mode mode;
+
+ struct nxp_nci_phy_ops *phy_ops;
+ unsigned int max_payload;
+
+ struct mutex info_lock;
+
+ struct nxp_nci_fw_info fw_info;
+};
+
+int nxp_nci_fw_download(struct nci_dev *ndev, const char *firmware_name);
+void nxp_nci_fw_work(struct work_struct *work);
+void nxp_nci_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+void nxp_nci_fw_work_complete(struct nxp_nci_info *info, int result);
+
+int nxp_nci_probe(void *phy_id, struct device *pdev,
+ struct nxp_nci_phy_ops *phy_ops, unsigned int max_payload,
+ struct nci_dev **ndev);
+void nxp_nci_remove(struct nci_dev *ndev);
+
+#endif /* __LOCAL_NXP_NCI_H_ */
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index d46a700a9637..a03e4eb5fe29 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -1820,7 +1820,7 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg,
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
- nfc_err(&dev->interface->dev, "RF setting error %d", rc);
+ nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
return rc;
}
@@ -2554,8 +2554,10 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
}
skb = pn533_build_response(dev);
- if (!skb)
+ if (!skb) {
+ rc = -ENOMEM;
goto error;
+ }
arg->cb(arg->cb_context, skb, 0);
kfree(arg);
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index cdde745b96bd..6fd986f5ac3e 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -953,7 +953,7 @@ static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client)
}
nfc_info(dev, "GPIO resource, no:%d irq:%d\n",
- desc_to_gpio(gpiod_irq), ret);
+ desc_to_gpio(gpiod_irq), ret);
client->irq = ret;
return 0;
@@ -1062,11 +1062,8 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
phy = devm_kzalloc(&client->dev, sizeof(struct pn544_i2c_phy),
GFP_KERNEL);
- if (!phy) {
- nfc_err(&client->dev,
- "Cannot allocate memory for pn544 i2c phy.\n");
+ if (!phy)
return -ENOMEM;
- }
INIT_WORK(&phy->fw_work, pn544_hci_i2c_fw_work);
phy->fw_work_state = FW_WORK_STATE_IDLE;
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 4ac4d31f6c59..87d509996704 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -604,11 +604,11 @@ static void port100_recv_response(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
nfc_err(&dev->interface->dev,
- "The urb has been canceled (status %d)", urb->status);
+ "The urb has been canceled (status %d)\n", urb->status);
goto sched_wq;
case -ESHUTDOWN:
default:
- nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
urb->status);
goto sched_wq;
}
@@ -616,7 +616,7 @@ static void port100_recv_response(struct urb *urb)
in_frame = dev->in_urb->transfer_buffer;
if (!port100_rx_frame_is_valid(in_frame)) {
- nfc_err(&dev->interface->dev, "Received an invalid frame");
+ nfc_err(&dev->interface->dev, "Received an invalid frame\n");
cmd->status = -EIO;
goto sched_wq;
}
@@ -626,7 +626,7 @@ static void port100_recv_response(struct urb *urb)
if (!port100_rx_frame_is_cmd_response(dev, in_frame)) {
nfc_err(&dev->interface->dev,
- "It's not the response to the last command");
+ "It's not the response to the last command\n");
cmd->status = -EIO;
goto sched_wq;
}
@@ -657,11 +657,11 @@ static void port100_recv_ack(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
nfc_err(&dev->interface->dev,
- "The urb has been stopped (status %d)", urb->status);
+ "The urb has been stopped (status %d)\n", urb->status);
goto sched_wq;
case -ESHUTDOWN:
default:
- nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
urb->status);
goto sched_wq;
}
@@ -669,7 +669,7 @@ static void port100_recv_ack(struct urb *urb)
in_frame = dev->in_urb->transfer_buffer;
if (!port100_rx_frame_is_ack(in_frame)) {
- nfc_err(&dev->interface->dev, "Received an invalid ack");
+ nfc_err(&dev->interface->dev, "Received an invalid ack\n");
cmd->status = -EIO;
goto sched_wq;
}
@@ -677,7 +677,7 @@ static void port100_recv_ack(struct urb *urb)
rc = port100_submit_urb_for_response(dev, GFP_ATOMIC);
if (rc) {
nfc_err(&dev->interface->dev,
- "usb_submit_urb failed with result %d", rc);
+ "usb_submit_urb failed with result %d\n", rc);
cmd->status = rc;
goto sched_wq;
}
@@ -873,11 +873,11 @@ static void port100_send_complete(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
nfc_err(&dev->interface->dev,
- "The urb has been stopped (status %d)", urb->status);
+ "The urb has been stopped (status %d)\n", urb->status);
break;
case -ESHUTDOWN:
default:
- nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+ nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
urb->status);
}
}
@@ -1094,7 +1094,7 @@ static void port100_in_comm_rf_complete(struct port100 *dev, void *arg,
if (resp->len < 4) {
nfc_err(&dev->interface->dev,
- "Invalid packet length received.\n");
+ "Invalid packet length received\n");
rc = -EIO;
goto error;
}
@@ -1250,7 +1250,7 @@ static bool port100_tg_target_activated(struct port100 *dev, u8 tgt_activated)
PORT100_MDAA_TGT_WAS_ACTIVATED_MASK;
break;
default:
- nfc_err(&dev->interface->dev, "Unknonwn command type.\n");
+ nfc_err(&dev->interface->dev, "Unknown command type\n");
return false;
}
@@ -1481,7 +1481,7 @@ static int port100_probe(struct usb_interface *interface,
cmd_type_mask = port100_get_command_type_mask(dev);
if (!cmd_type_mask) {
nfc_err(&interface->dev,
- "Could not get supported command types.\n");
+ "Could not get supported command types\n");
rc = -ENODEV;
goto error;
}
@@ -1494,7 +1494,7 @@ static int port100_probe(struct usb_interface *interface,
rc = port100_set_command_type(dev, dev->cmd_type);
if (rc) {
nfc_err(&interface->dev,
- "The device does not support command type %u.\n",
+ "The device does not support command type %u\n",
dev->cmd_type);
goto error;
}
@@ -1502,7 +1502,7 @@ static int port100_probe(struct usb_interface *interface,
fw_version = port100_get_firmware_version(dev);
if (!fw_version)
nfc_err(&interface->dev,
- "Could not get device firmware version.\n");
+ "Could not get device firmware version\n");
nfc_info(&interface->dev,
"Sony NFC Port-100 Series attached (firmware v%x.%02x)\n",
@@ -1515,7 +1515,7 @@ static int port100_probe(struct usb_interface *interface,
dev->skb_tailroom);
if (!dev->nfc_digital_dev) {
nfc_err(&interface->dev,
- "Could not allocate nfc_digital_dev.\n");
+ "Could not allocate nfc_digital_dev\n");
rc = -ENOMEM;
goto error;
}
@@ -1526,7 +1526,7 @@ static int port100_probe(struct usb_interface *interface,
rc = nfc_digital_register_device(dev->nfc_digital_dev);
if (rc) {
nfc_err(&interface->dev,
- "Could not register digital device.\n");
+ "Could not register digital device\n");
goto free_nfc_dev;
}
@@ -1562,7 +1562,7 @@ static void port100_disconnect(struct usb_interface *interface)
kfree(dev->cmd);
- nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected");
+ nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected\n");
}
static struct usb_driver port100_driver = {
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
index 24d3d240d5f4..d251f7229c4e 100644
--- a/drivers/nfc/st21nfca/st21nfca.c
+++ b/drivers/nfc/st21nfca/st21nfca.c
@@ -572,7 +572,7 @@ exit:
return r;
}
-static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
+static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *uid,
int *len)
{
int r;
@@ -588,7 +588,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
goto exit;
}
- gate = uid_skb->data;
+ memcpy(uid, uid_skb->data, uid_skb->len);
*len = uid_skb->len;
exit:
kfree_skb(uid_skb);
diff --git a/drivers/nfc/st21nfca/st21nfca_se.c b/drivers/nfc/st21nfca/st21nfca_se.c
index bd13cac9c66a..3197e9bb66f7 100644
--- a/drivers/nfc/st21nfca/st21nfca_se.c
+++ b/drivers/nfc/st21nfca/st21nfca_se.c
@@ -310,6 +310,13 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
case ST21NFCA_EVT_CONNECTIVITY:
break;
case ST21NFCA_EVT_TRANSACTION:
+ /*
+ * According to specification etsi 102 622
+ * 11.2.2.4 EVT_TRANSACTION Table 52
+ * Description Tag Length
+ * AID 81 5 to 16
+ * PARAMETERS 82 0 to 255
+ */
if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
@@ -318,8 +325,10 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
skb->len - 2, GFP_KERNEL);
transaction->aid_len = skb->data[1];
- memcpy(transaction->aid, &skb->data[2], skb->data[1]);
+ memcpy(transaction->aid, &skb->data[2],
+ transaction->aid_len);
+ /* Check next byte is PARAMETERS tag (82) */
if (skb->data[transaction->aid_len + 2] !=
NFC_EVT_TRANSACTION_PARAMS_TAG)
return -EPROTO;
diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
index eb886932d972..76a4cad41cec 100644
--- a/drivers/nfc/st21nfcb/i2c.c
+++ b/drivers/nfc/st21nfcb/i2c.c
@@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
return phy->ndlc->hard_fault;
r = i2c_master_send(client, skb->data, skb->len);
- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ if (r < 0) { /* Retry, chip was in standby */
usleep_range(1000, 4000);
r = i2c_master_send(client, skb->data, skb->len);
}
@@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
struct i2c_client *client = phy->i2c_dev;
r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ if (r < 0) { /* Retry, chip was in standby */
usleep_range(1000, 4000);
r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
}
@@ -313,11 +313,8 @@ static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
phy = devm_kzalloc(&client->dev, sizeof(struct st21nfcb_i2c_phy),
GFP_KERNEL);
- if (!phy) {
- nfc_err(&client->dev,
- "Cannot allocate memory for st21nfcb i2c phy.\n");
+ if (!phy)
return -ENOMEM;
- }
phy->i2c_dev = client;
diff --git a/drivers/nfc/st21nfcb/ndlc.c b/drivers/nfc/st21nfcb/ndlc.c
index 5fbf59d2138c..6014b5859465 100644
--- a/drivers/nfc/st21nfcb/ndlc.c
+++ b/drivers/nfc/st21nfcb/ndlc.c
@@ -256,10 +256,9 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
struct llt_ndlc *ndlc;
ndlc = devm_kzalloc(dev, sizeof(struct llt_ndlc), GFP_KERNEL);
- if (!ndlc) {
- nfc_err(dev, "Cannot allocate memory for ndlc.\n");
+ if (!ndlc)
return -ENOMEM;
- }
+
ndlc->ops = phy_ops;
ndlc->phy_id = phy_id;
ndlc->dev = dev;
diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.c b/drivers/nfc/st21nfcb/st21nfcb_se.c
index 7c82e9d87a65..24862a525fb5 100644
--- a/drivers/nfc/st21nfcb/st21nfcb_se.c
+++ b/drivers/nfc/st21nfcb/st21nfcb_se.c
@@ -321,6 +321,12 @@ static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
break;
case ST21NFCB_EVT_TRANSACTION:
+ /* According to specification etsi 102 622
+ * 11.2.2.4 EVT_TRANSACTION Table 52
+ * Description Tag Length
+ * AID 81 5 to 16
+ * PARAMETERS 82 0 to 255
+ */
if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
@@ -329,8 +335,9 @@ static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
skb->len - 2, GFP_KERNEL);
transaction->aid_len = skb->data[1];
- memcpy(transaction->aid, &skb->data[2], skb->data[1]);
+ memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
+ /* Check next byte is PARAMETERS tag (82) */
if (skb->data[transaction->aid_len + 2] !=
NFC_EVT_TRANSACTION_PARAMS_TAG)
return -EPROTO;
@@ -340,6 +347,7 @@ static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
transaction->aid_len + 4, transaction->params_len);
r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
+ break;
default:
return 1;
}
@@ -542,14 +550,12 @@ static int st21nfcb_hci_network_init(struct nci_dev *ndev)
r = nci_hci_dev_session_init(ndev);
if (r != NCI_HCI_ANY_OK)
- goto exit;
+ goto free_dest_params;
r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
NCI_NFCEE_ENABLE);
if (r != NCI_STATUS_OK)
- goto exit;
-
- return 0;
+ goto free_dest_params;
free_dest_params:
kfree(dest_params);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 7bcaeec876c0..07bb3c8f191b 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -34,7 +34,11 @@ config OF_PROMTREE
# Hardly any platforms need this. It is safe to select, but only do so if you
# need it.
config OF_DYNAMIC
- bool
+ bool "Support for dynamic device trees" if OF_UNITTEST
+ help
+ On some platforms, the device tree can be manipulated at runtime.
+ While this option is selected automatically on such platforms, you
+ can enable it manually to improve device tree unit test coverage.
config OF_ADDRESS
def_bool y
@@ -46,7 +50,7 @@ config OF_ADDRESS_PCI
config OF_IRQ
def_bool y
- depends on !SPARC
+ depends on !SPARC && IRQ_DOMAIN
config OF_NET
depends on NETDEVICES
@@ -87,5 +91,10 @@ config OF_OVERLAY
bool "Device Tree overlays"
select OF_DYNAMIC
select OF_RESOLVE
+ help
+ Overlays are a method to dynamically modify part of the kernel's
+ device tree with dynamically loaded data.
+ While this option is selected automatically when needed, you can
+ enable it manually to improve device tree unit test coverage.
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 7563f36c71db..fcacb186a67b 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -6,8 +6,7 @@ obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
obj-$(CONFIG_OF_NET) += of_net.o
-obj-$(CONFIG_OF_UNITTEST) += of_unittest.o
-of_unittest-objs := unittest.o unittest-data/testcases.dtb.o
+obj-$(CONFIG_OF_UNITTEST) += unittest.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
@@ -16,5 +15,7 @@ obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
obj-$(CONFIG_OF_OVERLAY) += overlay.o
+obj-$(CONFIG_OF_UNITTEST) += unittest-data/
+
CFLAGS_fdt.o = -I$(src)/../../scripts/dtc/libfdt
CFLAGS_fdt_address.o = -I$(src)/../../scripts/dtc/libfdt
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 8f165b112e03..99764db0875a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -568,6 +568,29 @@ bool of_device_is_available(const struct device_node *device)
EXPORT_SYMBOL(of_device_is_available);
/**
+ * of_device_is_big_endian - check if a device has BE registers
+ *
+ * @device: Node to check for endianness
+ *
+ * Returns true if the device has a "big-endian" property, or if the kernel
+ * was compiled for BE *and* the device has a "native-endian" property.
+ * Returns false otherwise.
+ *
+ * Callers would nominally use ioread32be/iowrite32be if
+ * of_device_is_big_endian() == true, or readl/writel otherwise.
+ */
+bool of_device_is_big_endian(const struct device_node *device)
+{
+ if (of_property_read_bool(device, "big-endian"))
+ return true;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+ of_property_read_bool(device, "native-endian"))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(of_device_is_big_endian);
+
+/**
* of_get_parent - Get a node's parent if any
* @node: Node to get parent
*
@@ -640,8 +663,9 @@ static struct device_node *__of_get_next_child(const struct device_node *node,
* @node: parent node
* @prev: previous child of the parent node, or NULL to get first
*
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * Returns a node pointer with refcount incremented, use of_node_put() on
+ * it when done. Returns NULL when prev is the last child. Decrements the
+ * refcount of prev.
*/
struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev)
@@ -1960,6 +1984,32 @@ int of_alias_get_id(struct device_node *np, const char *stem)
}
EXPORT_SYMBOL_GPL(of_alias_get_id);
+/**
+ * of_alias_get_highest_id - Get highest alias id for the given stem
+ * @stem: Alias stem to be examined
+ *
+ * The function travels the lookup table to get the highest alias id for the
+ * given alias stem. It returns the alias id if found.
+ */
+int of_alias_get_highest_id(const char *stem)
+{
+ struct alias_prop *app;
+ int id = -ENODEV;
+
+ mutex_lock(&of_mutex);
+ list_for_each_entry(app, &aliases_lookup, link) {
+ if (strcmp(app->stem, stem) != 0)
+ continue;
+
+ if (app->id > id)
+ id = app->id;
+ }
+ mutex_unlock(&of_mutex);
+
+ return id;
+}
+EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
+
const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
u32 *pu)
{
@@ -2083,13 +2133,44 @@ int of_graph_parse_endpoint(const struct device_node *node,
EXPORT_SYMBOL(of_graph_parse_endpoint);
/**
+ * of_graph_get_port_by_id() - get the port matching a given id
+ * @parent: pointer to the parent device node
+ * @id: id of the port
+ *
+ * Return: A 'port' node pointer with refcount incremented. The caller
+ * has to use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_port_by_id(struct device_node *parent, u32 id)
+{
+ struct device_node *node, *port;
+
+ node = of_get_child_by_name(parent, "ports");
+ if (node)
+ parent = node;
+
+ for_each_child_of_node(parent, port) {
+ u32 port_id = 0;
+
+ if (of_node_cmp(port->name, "port") != 0)
+ continue;
+ of_property_read_u32(port, "reg", &port_id);
+ if (id == port_id)
+ break;
+ }
+
+ of_node_put(node);
+
+ return port;
+}
+EXPORT_SYMBOL(of_graph_get_port_by_id);
+
+/**
* of_graph_get_next_endpoint() - get next endpoint node
* @parent: pointer to the parent device node
* @prev: previous endpoint node, or NULL to get first
*
* Return: An 'endpoint' node pointer with refcount incremented. Refcount
- * of the passed @prev node is not decremented, the caller have to use
- * of_node_put() on it when done.
+ * of the passed @prev node is decremented.
*/
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *prev)
@@ -2125,12 +2206,6 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
if (WARN_ONCE(!port, "%s(): endpoint %s has no parent node\n",
__func__, prev->full_name))
return NULL;
-
- /*
- * Avoid dropping prev node refcount to 0 when getting the next
- * child below.
- */
- of_node_get(prev);
}
while (1) {
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 3a896c9aeb74..cde35c5d0191 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -109,6 +109,25 @@ int of_fdt_is_compatible(const void *blob,
}
/**
+ * of_fdt_is_big_endian - Return true if given node needs BE MMIO accesses
+ * @blob: A device tree blob
+ * @node: node to test
+ *
+ * Returns true if the node has a "big-endian" property, or if the kernel
+ * was compiled for BE *and* the node has a "native-endian" property.
+ * Returns false otherwise.
+ */
+bool of_fdt_is_big_endian(const void *blob, unsigned long node)
+{
+ if (fdt_getprop(blob, node, "big-endian", NULL))
+ return true;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+ fdt_getprop(blob, node, "native-endian", NULL))
+ return true;
+ return false;
+}
+
+/**
* of_fdt_match - Return true if node matches a list of compatible values
*/
int of_fdt_match(const void *blob, unsigned long node,
@@ -172,7 +191,7 @@ static void * unflatten_dt_node(void *blob,
if (!pathp)
return mem;
- allocl = l++;
+ allocl = ++l;
/* version 0x10 has a more compact unit name here instead of the full
* path. we accumulate the full path size using "fpsize", we'll rebuild
@@ -879,8 +898,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
endp = reg + (l / sizeof(__be32));
- pr_debug("memory scan node %s, reg size %d, data: %x %x %x %x,\n",
- uname, l, reg[0], reg[1], reg[2], reg[3]);
+ pr_debug("memory scan node %s, reg size %d,\n", uname, l);
while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
u64 base, size;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 1bd43053b8c7..0c064485d1c2 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -88,7 +88,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
return 0;
}
-static int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
+int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
{
u32 addr;
int ret;
@@ -108,6 +108,7 @@ static int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
return addr;
}
+EXPORT_SYMBOL(of_mdio_parse_addr);
/**
* of_mdiobus_register - Register mii_bus and create PHYs from the device tree
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 73e14184aafe..d820f3edd431 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -38,6 +38,15 @@ int of_get_phy_mode(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_get_phy_mode);
+static const void *of_get_mac_addr(struct device_node *np, const char *name)
+{
+ struct property *pp = of_find_property(np, name, NULL);
+
+ if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value))
+ return pp->value;
+ return NULL;
+}
+
/**
* Search the device tree for the best MAC address to use. 'mac-address' is
* checked first, because that is supposed to contain to "most recent" MAC
@@ -58,20 +67,16 @@ EXPORT_SYMBOL_GPL(of_get_phy_mode);
*/
const void *of_get_mac_address(struct device_node *np)
{
- struct property *pp;
+ const void *addr;
- pp = of_find_property(np, "mac-address", NULL);
- if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
- return pp->value;
+ addr = of_get_mac_addr(np, "mac-address");
+ if (addr)
+ return addr;
- pp = of_find_property(np, "local-mac-address", NULL);
- if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
- return pp->value;
+ addr = of_get_mac_addr(np, "local-mac-address");
+ if (addr)
+ return addr;
- pp = of_find_property(np, "address", NULL);
- if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
- return pp->value;
-
- return NULL;
+ return of_get_mac_addr(np, "address");
}
EXPORT_SYMBOL(of_get_mac_address);
diff --git a/drivers/of/unittest-data/.gitignore b/drivers/of/unittest-data/.gitignore
new file mode 100644
index 000000000000..4b3cf8b16de2
--- /dev/null
+++ b/drivers/of/unittest-data/.gitignore
@@ -0,0 +1,2 @@
+testcases.dtb
+testcases.dtb.S
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
new file mode 100644
index 000000000000..1ac5cc01d627
--- /dev/null
+++ b/drivers/of/unittest-data/Makefile
@@ -0,0 +1,7 @@
+obj-y += testcases.dtb.o
+
+targets += testcases.dtb testcases.dtb.S
+
+.SECONDARY: \
+ $(obj)/testcases.dtb.S \
+ $(obj)/testcases.dtb
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 244226cbb5a3..02ba56c20fe1 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -4,94 +4,94 @@
overlay-node {
/* test bus */
- selftestbus: test-bus {
+ unittestbus: test-bus {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <0>;
- selftest100: test-selftest100 {
- compatible = "selftest";
+ unittest100: test-unittest100 {
+ compatible = "unittest";
status = "okay";
reg = <100>;
};
- selftest101: test-selftest101 {
- compatible = "selftest";
+ unittest101: test-unittest101 {
+ compatible = "unittest";
status = "disabled";
reg = <101>;
};
- selftest0: test-selftest0 {
- compatible = "selftest";
+ unittest0: test-unittest0 {
+ compatible = "unittest";
status = "disabled";
reg = <0>;
};
- selftest1: test-selftest1 {
- compatible = "selftest";
+ unittest1: test-unittest1 {
+ compatible = "unittest";
status = "okay";
reg = <1>;
};
- selftest2: test-selftest2 {
- compatible = "selftest";
+ unittest2: test-unittest2 {
+ compatible = "unittest";
status = "disabled";
reg = <2>;
};
- selftest3: test-selftest3 {
- compatible = "selftest";
+ unittest3: test-unittest3 {
+ compatible = "unittest";
status = "okay";
reg = <3>;
};
- selftest5: test-selftest5 {
- compatible = "selftest";
+ unittest5: test-unittest5 {
+ compatible = "unittest";
status = "disabled";
reg = <5>;
};
- selftest6: test-selftest6 {
- compatible = "selftest";
+ unittest6: test-unittest6 {
+ compatible = "unittest";
status = "disabled";
reg = <6>;
};
- selftest7: test-selftest7 {
- compatible = "selftest";
+ unittest7: test-unittest7 {
+ compatible = "unittest";
status = "disabled";
reg = <7>;
};
- selftest8: test-selftest8 {
- compatible = "selftest";
+ unittest8: test-unittest8 {
+ compatible = "unittest";
status = "disabled";
reg = <8>;
};
i2c-test-bus {
- compatible = "selftest-i2c-bus";
+ compatible = "unittest-i2c-bus";
status = "okay";
reg = <50>;
#address-cells = <1>;
#size-cells = <0>;
- test-selftest12 {
+ test-unittest12 {
reg = <8>;
- compatible = "selftest-i2c-dev";
+ compatible = "unittest-i2c-dev";
status = "disabled";
};
- test-selftest13 {
+ test-unittest13 {
reg = <9>;
- compatible = "selftest-i2c-dev";
+ compatible = "unittest-i2c-dev";
status = "okay";
};
- test-selftest14 {
+ test-unittest14 {
reg = <10>;
- compatible = "selftest-i2c-mux";
+ compatible = "unittest-i2c-mux";
status = "okay";
#address-cells = <1>;
@@ -104,7 +104,7 @@
test-mux-dev {
reg = <32>;
- compatible = "selftest-i2c-dev";
+ compatible = "unittest-i2c-dev";
status = "okay";
};
};
@@ -116,7 +116,7 @@
/* test enable using absolute target path */
overlay0 {
fragment@0 {
- target-path = "/testcase-data/overlay-node/test-bus/test-selftest0";
+ target-path = "/testcase-data/overlay-node/test-bus/test-unittest0";
__overlay__ {
status = "okay";
};
@@ -126,7 +126,7 @@
/* test disable using absolute target path */
overlay1 {
fragment@0 {
- target-path = "/testcase-data/overlay-node/test-bus/test-selftest1";
+ target-path = "/testcase-data/overlay-node/test-bus/test-unittest1";
__overlay__ {
status = "disabled";
};
@@ -136,7 +136,7 @@
/* test enable using label */
overlay2 {
fragment@0 {
- target = <&selftest2>;
+ target = <&unittest2>;
__overlay__ {
status = "okay";
};
@@ -146,7 +146,7 @@
/* test disable using label */
overlay3 {
fragment@0 {
- target = <&selftest3>;
+ target = <&unittest3>;
__overlay__ {
status = "disabled";
};
@@ -156,15 +156,15 @@
/* test insertion of a full node */
overlay4 {
fragment@0 {
- target = <&selftestbus>;
+ target = <&unittestbus>;
__overlay__ {
/* suppress DTC warning */
#address-cells = <1>;
#size-cells = <0>;
- test-selftest4 {
- compatible = "selftest";
+ test-unittest4 {
+ compatible = "unittest";
status = "okay";
reg = <4>;
};
@@ -175,7 +175,7 @@
/* test overlay apply revert */
overlay5 {
fragment@0 {
- target-path = "/testcase-data/overlay-node/test-bus/test-selftest5";
+ target-path = "/testcase-data/overlay-node/test-bus/test-unittest5";
__overlay__ {
status = "okay";
};
@@ -185,7 +185,7 @@
/* test overlays application and removal in sequence */
overlay6 {
fragment@0 {
- target-path = "/testcase-data/overlay-node/test-bus/test-selftest6";
+ target-path = "/testcase-data/overlay-node/test-bus/test-unittest6";
__overlay__ {
status = "okay";
};
@@ -193,7 +193,7 @@
};
overlay7 {
fragment@0 {
- target-path = "/testcase-data/overlay-node/test-bus/test-selftest7";
+ target-path = "/testcase-data/overlay-node/test-bus/test-unittest7";
__overlay__ {
status = "okay";
};
@@ -203,7 +203,7 @@
/* test overlays application and removal in bad sequence */
overlay8 {
fragment@0 {
- target-path = "/testcase-data/overlay-node/test-bus/test-selftest8";
+ target-path = "/testcase-data/overlay-node/test-bus/test-unittest8";
__overlay__ {
status = "okay";
};
@@ -211,7 +211,7 @@
};
overlay9 {
fragment@0 {
- target-path = "/testcase-data/overlay-node/test-bus/test-selftest8";
+ target-path = "/testcase-data/overlay-node/test-bus/test-unittest8";
__overlay__ {
property-foo = "bar";
};
@@ -227,16 +227,16 @@
#address-cells = <1>;
#size-cells = <0>;
- test-selftest10 {
- compatible = "selftest";
+ test-unittest10 {
+ compatible = "unittest";
status = "okay";
reg = <10>;
#address-cells = <1>;
#size-cells = <0>;
- test-selftest101 {
- compatible = "selftest";
+ test-unittest101 {
+ compatible = "unittest";
status = "okay";
reg = <1>;
};
@@ -255,16 +255,16 @@
#address-cells = <1>;
#size-cells = <0>;
- test-selftest11 {
- compatible = "selftest";
+ test-unittest11 {
+ compatible = "unittest";
status = "okay";
reg = <11>;
#address-cells = <1>;
#size-cells = <0>;
- test-selftest111 {
- compatible = "selftest";
+ test-unittest111 {
+ compatible = "unittest";
status = "okay";
reg = <1>;
};
@@ -277,7 +277,7 @@
/* test enable using absolute target path (i2c) */
overlay12 {
fragment@0 {
- target-path = "/testcase-data/overlay-node/test-bus/i2c-test-bus/test-selftest12";
+ target-path = "/testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12";
__overlay__ {
status = "okay";
};
@@ -287,7 +287,7 @@
/* test disable using absolute target path (i2c) */
overlay13 {
fragment@0 {
- target-path = "/testcase-data/overlay-node/test-bus/i2c-test-bus/test-selftest13";
+ target-path = "/testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13";
__overlay__ {
status = "disabled";
};
@@ -301,9 +301,9 @@
__overlay__ {
#address-cells = <1>;
#size-cells = <0>;
- test-selftest15 {
+ test-unittest15 {
reg = <11>;
- compatible = "selftest-i2c-mux";
+ compatible = "unittest-i2c-mux";
status = "okay";
#address-cells = <1>;
@@ -316,7 +316,7 @@
test-mux-dev {
reg = <32>;
- compatible = "selftest-i2c-dev";
+ compatible = "unittest-i2c-dev";
status = "okay";
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 52c45c7df07f..18016341d5a9 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -23,117 +23,119 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/bitops.h>
+
#include "of_private.h"
-static struct selftest_results {
+static struct unittest_results {
int passed;
int failed;
-} selftest_results;
+} unittest_results;
-#define selftest(result, fmt, ...) ({ \
+#define unittest(result, fmt, ...) ({ \
bool failed = !(result); \
if (failed) { \
- selftest_results.failed++; \
+ unittest_results.failed++; \
pr_err("FAIL %s():%i " fmt, __func__, __LINE__, ##__VA_ARGS__); \
} else { \
- selftest_results.passed++; \
+ unittest_results.passed++; \
pr_debug("pass %s():%i\n", __func__, __LINE__); \
} \
failed; \
})
-static void __init of_selftest_find_node_by_name(void)
+static void __init of_unittest_find_node_by_name(void)
{
struct device_node *np;
const char *options;
np = of_find_node_by_path("/testcase-data");
- selftest(np && !strcmp("/testcase-data", np->full_name),
+ unittest(np && !strcmp("/testcase-data", np->full_name),
"find /testcase-data failed\n");
of_node_put(np);
/* Test if trailing '/' works */
np = of_find_node_by_path("/testcase-data/");
- selftest(!np, "trailing '/' on /testcase-data/ should fail\n");
+ unittest(!np, "trailing '/' on /testcase-data/ should fail\n");
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
- selftest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", np->full_name),
+ unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", np->full_name),
"find /testcase-data/phandle-tests/consumer-a failed\n");
of_node_put(np);
np = of_find_node_by_path("testcase-alias");
- selftest(np && !strcmp("/testcase-data", np->full_name),
+ unittest(np && !strcmp("/testcase-data", np->full_name),
"find testcase-alias failed\n");
of_node_put(np);
/* Test if trailing '/' works on aliases */
np = of_find_node_by_path("testcase-alias/");
- selftest(!np, "trailing '/' on testcase-alias/ should fail\n");
+ unittest(!np, "trailing '/' on testcase-alias/ should fail\n");
np = of_find_node_by_path("testcase-alias/phandle-tests/consumer-a");
- selftest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", np->full_name),
+ unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", np->full_name),
"find testcase-alias/phandle-tests/consumer-a failed\n");
of_node_put(np);
np = of_find_node_by_path("/testcase-data/missing-path");
- selftest(!np, "non-existent path returned node %s\n", np->full_name);
+ unittest(!np, "non-existent path returned node %s\n", np->full_name);
of_node_put(np);
np = of_find_node_by_path("missing-alias");
- selftest(!np, "non-existent alias returned node %s\n", np->full_name);
+ unittest(!np, "non-existent alias returned node %s\n", np->full_name);
of_node_put(np);
np = of_find_node_by_path("testcase-alias/missing-path");
- selftest(!np, "non-existent alias with relative path returned node %s\n", np->full_name);
+ unittest(!np, "non-existent alias with relative path returned node %s\n", np->full_name);
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:testoption", &options);
- selftest(np && !strcmp("testoption", options),
+ unittest(np && !strcmp("testoption", options),
"option path test failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:test/option", &options);
- selftest(np && !strcmp("test/option", options),
+ unittest(np && !strcmp("test/option", options),
"option path test, subcase #1 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options);
- selftest(np && !strcmp("test/option", options),
+ unittest(np && !strcmp("test/option", options),
"option path test, subcase #2 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
- selftest(np, "NULL option path test failed\n");
+ unittest(np, "NULL option path test failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("testcase-alias:testaliasoption",
&options);
- selftest(np && !strcmp("testaliasoption", options),
+ unittest(np && !strcmp("testaliasoption", options),
"option alias path test failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("testcase-alias:test/alias/option",
&options);
- selftest(np && !strcmp("test/alias/option", options),
+ unittest(np && !strcmp("test/alias/option", options),
"option alias path test, subcase #1 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
- selftest(np, "NULL option alias path test failed\n");
+ unittest(np, "NULL option alias path test failed\n");
of_node_put(np);
options = "testoption";
np = of_find_node_opts_by_path("testcase-alias", &options);
- selftest(np && !options, "option clearing test failed\n");
+ unittest(np && !options, "option clearing test failed\n");
of_node_put(np);
options = "testoption";
np = of_find_node_opts_by_path("/", &options);
- selftest(np && !options, "option clearing root node test failed\n");
+ unittest(np && !options, "option clearing root node test failed\n");
of_node_put(np);
}
-static void __init of_selftest_dynamic(void)
+static void __init of_unittest_dynamic(void)
{
struct device_node *np;
struct property *prop;
@@ -147,7 +149,7 @@ static void __init of_selftest_dynamic(void)
/* Array of 4 properties for the purpose of testing */
prop = kzalloc(sizeof(*prop) * 4, GFP_KERNEL);
if (!prop) {
- selftest(0, "kzalloc() failed\n");
+ unittest(0, "kzalloc() failed\n");
return;
}
@@ -155,20 +157,20 @@ static void __init of_selftest_dynamic(void)
prop->name = "new-property";
prop->value = "new-property-data";
prop->length = strlen(prop->value);
- selftest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
+ unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
/* Try to add an existing property - should fail */
prop++;
prop->name = "new-property";
prop->value = "new-property-data-should-fail";
prop->length = strlen(prop->value);
- selftest(of_add_property(np, prop) != 0,
+ unittest(of_add_property(np, prop) != 0,
"Adding an existing property should have failed\n");
/* Try to modify an existing property - should pass */
prop->value = "modify-property-data-should-pass";
prop->length = strlen(prop->value);
- selftest(of_update_property(np, prop) == 0,
+ unittest(of_update_property(np, prop) == 0,
"Updating an existing property should have passed\n");
/* Try to modify non-existent property - should pass*/
@@ -176,11 +178,11 @@ static void __init of_selftest_dynamic(void)
prop->name = "modify-property";
prop->value = "modify-missing-property-data-should-pass";
prop->length = strlen(prop->value);
- selftest(of_update_property(np, prop) == 0,
+ unittest(of_update_property(np, prop) == 0,
"Updating a missing property should have passed\n");
/* Remove property - should pass */
- selftest(of_remove_property(np, prop) == 0,
+ unittest(of_remove_property(np, prop) == 0,
"Removing a property should have passed\n");
/* Adding very large property - should pass */
@@ -188,13 +190,13 @@ static void __init of_selftest_dynamic(void)
prop->name = "large-property-PAGE_SIZEx8";
prop->length = PAGE_SIZE * 8;
prop->value = kzalloc(prop->length, GFP_KERNEL);
- selftest(prop->value != NULL, "Unable to allocate large buffer\n");
+ unittest(prop->value != NULL, "Unable to allocate large buffer\n");
if (prop->value)
- selftest(of_add_property(np, prop) == 0,
+ unittest(of_add_property(np, prop) == 0,
"Adding a large property should have passed\n");
}
-static int __init of_selftest_check_node_linkage(struct device_node *np)
+static int __init of_unittest_check_node_linkage(struct device_node *np)
{
struct device_node *child;
int count = 0, rc;
@@ -206,7 +208,7 @@ static int __init of_selftest_check_node_linkage(struct device_node *np)
return -EINVAL;
}
- rc = of_selftest_check_node_linkage(child);
+ rc = of_unittest_check_node_linkage(child);
if (rc < 0)
return rc;
count += rc;
@@ -215,7 +217,7 @@ static int __init of_selftest_check_node_linkage(struct device_node *np)
return count + 1;
}
-static void __init of_selftest_check_tree_linkage(void)
+static void __init of_unittest_check_tree_linkage(void)
{
struct device_node *np;
int allnode_count = 0, child_count;
@@ -225,11 +227,12 @@ static void __init of_selftest_check_tree_linkage(void)
for_each_of_allnodes(np)
allnode_count++;
- child_count = of_selftest_check_node_linkage(of_root);
+ child_count = of_unittest_check_node_linkage(of_root);
- selftest(child_count > 0, "Device node data structure is corrupted\n");
- selftest(child_count == allnode_count, "allnodes list size (%i) doesn't match"
- "sibling lists size (%i)\n", allnode_count, child_count);
+ unittest(child_count > 0, "Device node data structure is corrupted\n");
+ unittest(child_count == allnode_count,
+ "allnodes list size (%i) doesn't match sibling lists size (%i)\n",
+ allnode_count, child_count);
pr_debug("allnodes list size (%i); sibling lists size (%i)\n", allnode_count, child_count);
}
@@ -239,7 +242,7 @@ struct node_hash {
};
static DEFINE_HASHTABLE(phandle_ht, 8);
-static void __init of_selftest_check_phandles(void)
+static void __init of_unittest_check_phandles(void)
{
struct device_node *np;
struct node_hash *nh;
@@ -267,7 +270,7 @@ static void __init of_selftest_check_phandles(void)
hash_add(phandle_ht, &nh->node, np->phandle);
phandle_count++;
}
- selftest(dup_count == 0, "Found %i duplicates in %i phandles\n",
+ unittest(dup_count == 0, "Found %i duplicates in %i phandles\n",
dup_count, phandle_count);
/* Clean up */
@@ -277,7 +280,7 @@ static void __init of_selftest_check_phandles(void)
}
}
-static void __init of_selftest_parse_phandle_with_args(void)
+static void __init of_unittest_parse_phandle_with_args(void)
{
struct device_node *np;
struct of_phandle_args args;
@@ -290,10 +293,11 @@ static void __init of_selftest_parse_phandle_with_args(void)
}
rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
- selftest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
+ unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
for (i = 0; i < 8; i++) {
bool passed = true;
+
rc = of_parse_phandle_with_args(np, "phandle-list",
"#phandle-cells", i, &args);
@@ -342,44 +346,44 @@ static void __init of_selftest_parse_phandle_with_args(void)
passed = false;
}
- selftest(passed, "index %i - data error on node %s rc=%i\n",
+ unittest(passed, "index %i - data error on node %s rc=%i\n",
i, args.np->full_name, rc);
}
/* Check for missing list property */
rc = of_parse_phandle_with_args(np, "phandle-list-missing",
"#phandle-cells", 0, &args);
- selftest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
+ unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
rc = of_count_phandle_with_args(np, "phandle-list-missing",
"#phandle-cells");
- selftest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
+ unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
/* Check for missing cells property */
rc = of_parse_phandle_with_args(np, "phandle-list",
"#phandle-cells-missing", 0, &args);
- selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
rc = of_count_phandle_with_args(np, "phandle-list",
"#phandle-cells-missing");
- selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for bad phandle in list */
rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
"#phandle-cells", 0, &args);
- selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
rc = of_count_phandle_with_args(np, "phandle-list-bad-phandle",
"#phandle-cells");
- selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for incorrectly formed argument list */
rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells", 1, &args);
- selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells");
- selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
-static void __init of_selftest_property_string(void)
+static void __init of_unittest_property_string(void)
{
const char *strings[4];
struct device_node *np;
@@ -392,79 +396,79 @@ static void __init of_selftest_property_string(void)
}
rc = of_property_match_string(np, "phandle-list-names", "first");
- selftest(rc == 0, "first expected:0 got:%i\n", rc);
+ unittest(rc == 0, "first expected:0 got:%i\n", rc);
rc = of_property_match_string(np, "phandle-list-names", "second");
- selftest(rc == 1, "second expected:1 got:%i\n", rc);
+ unittest(rc == 1, "second expected:1 got:%i\n", rc);
rc = of_property_match_string(np, "phandle-list-names", "third");
- selftest(rc == 2, "third expected:2 got:%i\n", rc);
+ unittest(rc == 2, "third expected:2 got:%i\n", rc);
rc = of_property_match_string(np, "phandle-list-names", "fourth");
- selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
+ unittest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
rc = of_property_match_string(np, "missing-property", "blah");
- selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
+ unittest(rc == -EINVAL, "missing property; rc=%i\n", rc);
rc = of_property_match_string(np, "empty-property", "blah");
- selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
+ unittest(rc == -ENODATA, "empty property; rc=%i\n", rc);
rc = of_property_match_string(np, "unterminated-string", "blah");
- selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
+ unittest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
/* of_property_count_strings() tests */
rc = of_property_count_strings(np, "string-property");
- selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
+ unittest(rc == 1, "Incorrect string count; rc=%i\n", rc);
rc = of_property_count_strings(np, "phandle-list-names");
- selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
+ unittest(rc == 3, "Incorrect string count; rc=%i\n", rc);
rc = of_property_count_strings(np, "unterminated-string");
- selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
+ unittest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
rc = of_property_count_strings(np, "unterminated-string-list");
- selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
+ unittest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
/* of_property_read_string_index() tests */
rc = of_property_read_string_index(np, "string-property", 0, strings);
- selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
+ unittest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
strings[0] = NULL;
rc = of_property_read_string_index(np, "string-property", 1, strings);
- selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+ unittest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
- selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
+ unittest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
- selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
+ unittest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
- selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
+ unittest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
strings[0] = NULL;
rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
- selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+ unittest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
strings[0] = NULL;
rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
- selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+ unittest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
- selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
+ unittest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
strings[0] = NULL;
rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
- selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
+ unittest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
strings[1] = NULL;
/* of_property_read_string_array() tests */
rc = of_property_read_string_array(np, "string-property", strings, 4);
- selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
+ unittest(rc == 1, "Incorrect string count; rc=%i\n", rc);
rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
- selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
+ unittest(rc == 3, "Incorrect string count; rc=%i\n", rc);
rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
- selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
+ unittest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
/* -- An incorrectly formed string should cause a failure */
rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
- selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
+ unittest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
/* -- parsing the correctly formed strings should still work: */
strings[2] = NULL;
rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
- selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
+ unittest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
strings[1] = NULL;
rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
- selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
+ unittest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
}
#define propcmp(p1, p2) (((p1)->length == (p2)->length) && \
(p1)->value && (p2)->value && \
!memcmp((p1)->value, (p2)->value, (p1)->length) && \
!strcmp((p1)->name, (p2)->name))
-static void __init of_selftest_property_copy(void)
+static void __init of_unittest_property_copy(void)
{
#ifdef CONFIG_OF_DYNAMIC
struct property p1 = { .name = "p1", .length = 0, .value = "" };
@@ -472,20 +476,20 @@ static void __init of_selftest_property_copy(void)
struct property *new;
new = __of_prop_dup(&p1, GFP_KERNEL);
- selftest(new && propcmp(&p1, new), "empty property didn't copy correctly\n");
+ unittest(new && propcmp(&p1, new), "empty property didn't copy correctly\n");
kfree(new->value);
kfree(new->name);
kfree(new);
new = __of_prop_dup(&p2, GFP_KERNEL);
- selftest(new && propcmp(&p2, new), "non-empty property didn't copy correctly\n");
+ unittest(new && propcmp(&p2, new), "non-empty property didn't copy correctly\n");
kfree(new->value);
kfree(new->name);
kfree(new);
#endif
}
-static void __init of_selftest_changeset(void)
+static void __init of_unittest_changeset(void)
{
#ifdef CONFIG_OF_DYNAMIC
struct property *ppadd, padd = { .name = "prop-add", .length = 0, .value = "" };
@@ -495,51 +499,51 @@ static void __init of_selftest_changeset(void)
struct of_changeset chgset;
n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1");
- selftest(n1, "testcase setup failure\n");
+ unittest(n1, "testcase setup failure\n");
n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2");
- selftest(n2, "testcase setup failure\n");
+ unittest(n2, "testcase setup failure\n");
n21 = __of_node_dup(NULL, "%s/%s", "/testcase-data/changeset/n2", "n21");
- selftest(n21, "testcase setup failure %p\n", n21);
+ unittest(n21, "testcase setup failure %p\n", n21);
nremove = of_find_node_by_path("/testcase-data/changeset/node-remove");
- selftest(nremove, "testcase setup failure\n");
+ unittest(nremove, "testcase setup failure\n");
ppadd = __of_prop_dup(&padd, GFP_KERNEL);
- selftest(ppadd, "testcase setup failure\n");
+ unittest(ppadd, "testcase setup failure\n");
ppupdate = __of_prop_dup(&pupdate, GFP_KERNEL);
- selftest(ppupdate, "testcase setup failure\n");
+ unittest(ppupdate, "testcase setup failure\n");
parent = nremove->parent;
n1->parent = parent;
n2->parent = parent;
n21->parent = n2;
n2->child = n21;
ppremove = of_find_property(parent, "prop-remove", NULL);
- selftest(ppremove, "failed to find removal prop");
+ unittest(ppremove, "failed to find removal prop");
of_changeset_init(&chgset);
- selftest(!of_changeset_attach_node(&chgset, n1), "fail attach n1\n");
- selftest(!of_changeset_attach_node(&chgset, n2), "fail attach n2\n");
- selftest(!of_changeset_detach_node(&chgset, nremove), "fail remove node\n");
- selftest(!of_changeset_attach_node(&chgset, n21), "fail attach n21\n");
- selftest(!of_changeset_add_property(&chgset, parent, ppadd), "fail add prop\n");
- selftest(!of_changeset_update_property(&chgset, parent, ppupdate), "fail update prop\n");
- selftest(!of_changeset_remove_property(&chgset, parent, ppremove), "fail remove prop\n");
+ unittest(!of_changeset_attach_node(&chgset, n1), "fail attach n1\n");
+ unittest(!of_changeset_attach_node(&chgset, n2), "fail attach n2\n");
+ unittest(!of_changeset_detach_node(&chgset, nremove), "fail remove node\n");
+ unittest(!of_changeset_attach_node(&chgset, n21), "fail attach n21\n");
+ unittest(!of_changeset_add_property(&chgset, parent, ppadd), "fail add prop\n");
+ unittest(!of_changeset_update_property(&chgset, parent, ppupdate), "fail update prop\n");
+ unittest(!of_changeset_remove_property(&chgset, parent, ppremove), "fail remove prop\n");
mutex_lock(&of_mutex);
- selftest(!of_changeset_apply(&chgset), "apply failed\n");
+ unittest(!of_changeset_apply(&chgset), "apply failed\n");
mutex_unlock(&of_mutex);
/* Make sure node names are constructed correctly */
- selftest((np = of_find_node_by_path("/testcase-data/changeset/n2/n21")),
+ unittest((np = of_find_node_by_path("/testcase-data/changeset/n2/n21")),
"'%s' not added\n", n21->full_name);
of_node_put(np);
mutex_lock(&of_mutex);
- selftest(!of_changeset_revert(&chgset), "revert failed\n");
+ unittest(!of_changeset_revert(&chgset), "revert failed\n");
mutex_unlock(&of_mutex);
of_changeset_destroy(&chgset);
#endif
}
-static void __init of_selftest_parse_interrupts(void)
+static void __init of_unittest_parse_interrupts(void)
{
struct device_node *np;
struct of_phandle_args args;
@@ -553,6 +557,7 @@ static void __init of_selftest_parse_interrupts(void)
for (i = 0; i < 4; i++) {
bool passed = true;
+
args.args_count = 0;
rc = of_irq_parse_one(np, i, &args);
@@ -560,7 +565,7 @@ static void __init of_selftest_parse_interrupts(void)
passed &= (args.args_count == 1);
passed &= (args.args[0] == (i + 1));
- selftest(passed, "index %i - data error on node %s rc=%i\n",
+ unittest(passed, "index %i - data error on node %s rc=%i\n",
i, args.np->full_name, rc);
}
of_node_put(np);
@@ -573,6 +578,7 @@ static void __init of_selftest_parse_interrupts(void)
for (i = 0; i < 4; i++) {
bool passed = true;
+
args.args_count = 0;
rc = of_irq_parse_one(np, i, &args);
@@ -605,13 +611,13 @@ static void __init of_selftest_parse_interrupts(void)
default:
passed = false;
}
- selftest(passed, "index %i - data error on node %s rc=%i\n",
+ unittest(passed, "index %i - data error on node %s rc=%i\n",
i, args.np->full_name, rc);
}
of_node_put(np);
}
-static void __init of_selftest_parse_interrupts_extended(void)
+static void __init of_unittest_parse_interrupts_extended(void)
{
struct device_node *np;
struct of_phandle_args args;
@@ -625,6 +631,7 @@ static void __init of_selftest_parse_interrupts_extended(void)
for (i = 0; i < 7; i++) {
bool passed = true;
+
rc = of_irq_parse_one(np, i, &args);
/* Test the values from tests-phandle.dtsi */
@@ -674,13 +681,13 @@ static void __init of_selftest_parse_interrupts_extended(void)
passed = false;
}
- selftest(passed, "index %i - data error on node %s rc=%i\n",
+ unittest(passed, "index %i - data error on node %s rc=%i\n",
i, args.np->full_name, rc);
}
of_node_put(np);
}
-static struct of_device_id match_node_table[] = {
+static const struct of_device_id match_node_table[] = {
{ .data = "A", .name = "name0", }, /* Name alone is lowest priority */
{ .data = "B", .type = "type1", }, /* followed by type alone */
@@ -715,7 +722,7 @@ static struct {
{ .path = "/testcase-data/match-node/name9", .data = "K", },
};
-static void __init of_selftest_match_node(void)
+static void __init of_unittest_match_node(void)
{
struct device_node *np;
const struct of_device_id *match;
@@ -724,37 +731,37 @@ static void __init of_selftest_match_node(void)
for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) {
np = of_find_node_by_path(match_node_tests[i].path);
if (!np) {
- selftest(0, "missing testcase node %s\n",
+ unittest(0, "missing testcase node %s\n",
match_node_tests[i].path);
continue;
}
match = of_match_node(match_node_table, np);
if (!match) {
- selftest(0, "%s didn't match anything\n",
+ unittest(0, "%s didn't match anything\n",
match_node_tests[i].path);
continue;
}
if (strcmp(match->data, match_node_tests[i].data) != 0) {
- selftest(0, "%s got wrong match. expected %s, got %s\n",
+ unittest(0, "%s got wrong match. expected %s, got %s\n",
match_node_tests[i].path, match_node_tests[i].data,
(const char *)match->data);
continue;
}
- selftest(1, "passed");
+ unittest(1, "passed");
}
}
-struct device test_bus = {
- .init_name = "unittest-bus",
+static const struct platform_device_info test_bus_info = {
+ .name = "unittest-bus",
};
-static void __init of_selftest_platform_populate(void)
+static void __init of_unittest_platform_populate(void)
{
int irq, rc;
struct device_node *np, *child, *grandchild;
- struct platform_device *pdev;
- struct of_device_id match[] = {
+ struct platform_device *pdev, *test_bus;
+ const struct of_device_id match[] = {
{ .compatible = "test-device", },
{}
};
@@ -765,43 +772,47 @@ static void __init of_selftest_platform_populate(void)
/* Test that a missing irq domain returns -EPROBE_DEFER */
np = of_find_node_by_path("/testcase-data/testcase-device1");
pdev = of_find_device_by_node(np);
- selftest(pdev, "device 1 creation failed\n");
+ unittest(pdev, "device 1 creation failed\n");
irq = platform_get_irq(pdev, 0);
- selftest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+ unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
/* Test that a parsing failure does not return -EPROBE_DEFER */
np = of_find_node_by_path("/testcase-data/testcase-device2");
pdev = of_find_device_by_node(np);
- selftest(pdev, "device 2 creation failed\n");
+ unittest(pdev, "device 2 creation failed\n");
irq = platform_get_irq(pdev, 0);
- selftest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+ unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
- if (selftest(np = of_find_node_by_path("/testcase-data/platform-tests"),
- "No testcase data in device tree\n"));
+ np = of_find_node_by_path("/testcase-data/platform-tests");
+ unittest(np, "No testcase data in device tree\n");
+ if (!np)
return;
- if (selftest(!(rc = device_register(&test_bus)),
- "testbus registration failed; rc=%i\n", rc));
+ test_bus = platform_device_register_full(&test_bus_info);
+ rc = PTR_ERR_OR_ZERO(test_bus);
+ unittest(!rc, "testbus registration failed; rc=%i\n", rc);
+ if (rc)
return;
+ test_bus->dev.of_node = np;
+ of_platform_populate(np, match, NULL, &test_bus->dev);
for_each_child_of_node(np, child) {
- of_platform_populate(child, match, NULL, &test_bus);
for_each_child_of_node(child, grandchild)
- selftest(of_find_device_by_node(grandchild),
+ unittest(of_find_device_by_node(grandchild),
"Could not create device for node '%s'\n",
grandchild->name);
}
- of_platform_depopulate(&test_bus);
+ of_platform_depopulate(&test_bus->dev);
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
- selftest(!of_find_device_by_node(grandchild),
+ unittest(!of_find_device_by_node(grandchild),
"device didn't get destroyed '%s'\n",
grandchild->name);
}
- device_unregister(&test_bus);
+ platform_device_unregister(test_bus);
of_node_put(np);
}
@@ -866,13 +877,17 @@ static int attach_node_and_children(struct device_node *np)
}
/**
- * selftest_data_add - Reads, copies data from
+ * unittest_data_add - Reads, copies data from
* linked tree and attaches it to the live tree
*/
-static int __init selftest_data_add(void)
+static int __init unittest_data_add(void)
{
- void *selftest_data;
- struct device_node *selftest_data_node, *np;
+ void *unittest_data;
+ struct device_node *unittest_data_node, *np;
+ /*
+ * __dtb_testcases_begin[] and __dtb_testcases_end[] are magically
+ * created by cmd_dt_S_dtb in scripts/Makefile.lib
+ */
extern uint8_t __dtb_testcases_begin[];
extern uint8_t __dtb_testcases_end[];
const int size = __dtb_testcases_end - __dtb_testcases_begin;
@@ -885,27 +900,27 @@ static int __init selftest_data_add(void)
}
/* creating copy */
- selftest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL);
+ unittest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL);
- if (!selftest_data) {
- pr_warn("%s: Failed to allocate memory for selftest_data; "
+ if (!unittest_data) {
+ pr_warn("%s: Failed to allocate memory for unittest_data; "
"not running tests\n", __func__);
return -ENOMEM;
}
- of_fdt_unflatten_tree(selftest_data, &selftest_data_node);
- if (!selftest_data_node) {
+ of_fdt_unflatten_tree(unittest_data, &unittest_data_node);
+ if (!unittest_data_node) {
pr_warn("%s: No tree to attach; not running tests\n", __func__);
return -ENODATA;
}
- of_node_set_flag(selftest_data_node, OF_DETACHED);
- rc = of_resolve_phandles(selftest_data_node);
+ of_node_set_flag(unittest_data_node, OF_DETACHED);
+ rc = of_resolve_phandles(unittest_data_node);
if (rc) {
pr_err("%s: Failed to resolve phandles (rc=%i)\n", __func__, rc);
return -EINVAL;
}
if (!of_root) {
- of_root = selftest_data_node;
+ of_root = unittest_data_node;
for_each_of_allnodes(np)
__of_attach_node_sysfs(np);
of_aliases = of_find_node_by_path("/aliases");
@@ -914,9 +929,10 @@ static int __init selftest_data_add(void)
}
/* attach the sub-tree to live tree */
- np = selftest_data_node->child;
+ np = unittest_data_node->child;
while (np) {
struct device_node *next = np->sibling;
+
np->parent = of_root;
attach_node_and_children(np);
np = next;
@@ -926,7 +942,7 @@ static int __init selftest_data_add(void)
#ifdef CONFIG_OF_OVERLAY
-static int selftest_probe(struct platform_device *pdev)
+static int unittest_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -944,7 +960,7 @@ static int selftest_probe(struct platform_device *pdev)
return 0;
}
-static int selftest_remove(struct platform_device *pdev)
+static int unittest_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -953,18 +969,18 @@ static int selftest_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id selftest_match[] = {
- { .compatible = "selftest", },
+static const struct of_device_id unittest_match[] = {
+ { .compatible = "unittest", },
{},
};
-static struct platform_driver selftest_driver = {
- .probe = selftest_probe,
- .remove = selftest_remove,
+static struct platform_driver unittest_driver = {
+ .probe = unittest_probe,
+ .remove = unittest_remove,
.driver = {
- .name = "selftest",
+ .name = "unittest",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(selftest_match),
+ .of_match_table = of_match_ptr(unittest_match),
},
};
@@ -1046,7 +1062,7 @@ static int of_path_device_type_exists(const char *path,
return 0;
}
-static const char *selftest_path(int nr, enum overlay_type ovtype)
+static const char *unittest_path(int nr, enum overlay_type ovtype)
{
const char *base;
static char buf[256];
@@ -1062,16 +1078,16 @@ static const char *selftest_path(int nr, enum overlay_type ovtype)
buf[0] = '\0';
return buf;
}
- snprintf(buf, sizeof(buf) - 1, "%s/test-selftest%d", base, nr);
+ snprintf(buf, sizeof(buf) - 1, "%s/test-unittest%d", base, nr);
buf[sizeof(buf) - 1] = '\0';
return buf;
}
-static int of_selftest_device_exists(int selftest_nr, enum overlay_type ovtype)
+static int of_unittest_device_exists(int unittest_nr, enum overlay_type ovtype)
{
const char *path;
- path = selftest_path(selftest_nr, ovtype);
+ path = unittest_path(unittest_nr, ovtype);
switch (ovtype) {
case PDEV_OVERLAY:
@@ -1095,7 +1111,60 @@ static const char *overlay_path(int nr)
static const char *bus_path = "/testcase-data/overlay-node/test-bus";
-static int of_selftest_apply_overlay(int selftest_nr, int overlay_nr,
+/* it is guaranteed that overlay ids are assigned in sequence */
+#define MAX_UNITTEST_OVERLAYS 256
+static unsigned long overlay_id_bits[BITS_TO_LONGS(MAX_UNITTEST_OVERLAYS)];
+static int overlay_first_id = -1;
+
+static void of_unittest_track_overlay(int id)
+{
+ if (overlay_first_id < 0)
+ overlay_first_id = id;
+ id -= overlay_first_id;
+
+ /* we shouldn't need that many */
+ BUG_ON(id >= MAX_UNITTEST_OVERLAYS);
+ overlay_id_bits[BIT_WORD(id)] |= BIT_MASK(id);
+}
+
+static void of_unittest_untrack_overlay(int id)
+{
+ if (overlay_first_id < 0)
+ return;
+ id -= overlay_first_id;
+ BUG_ON(id >= MAX_UNITTEST_OVERLAYS);
+ overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
+}
+
+static void of_unittest_destroy_tracked_overlays(void)
+{
+ int id, ret, defers;
+
+ if (overlay_first_id < 0)
+ return;
+
+ /* try until no defers */
+ do {
+ defers = 0;
+ /* remove in reverse order */
+ for (id = MAX_UNITTEST_OVERLAYS - 1; id >= 0; id--) {
+ if (!(overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id)))
+ continue;
+
+ ret = of_overlay_destroy(id + overlay_first_id);
+ if (ret != 0) {
+ defers++;
+ pr_warn("%s: overlay destroy failed for #%d\n",
+ __func__, id + overlay_first_id);
+ continue;
+ }
+
+ overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
+ }
+ } while (defers > 0);
+}
+
+static int of_unittest_apply_overlay(int unittest_nr, int overlay_nr,
int *overlay_id)
{
struct device_node *np = NULL;
@@ -1103,7 +1172,7 @@ static int of_selftest_apply_overlay(int selftest_nr, int overlay_nr,
np = of_find_node_by_path(overlay_path(overlay_nr));
if (np == NULL) {
- selftest(0, "could not find overlay node @\"%s\"\n",
+ unittest(0, "could not find overlay node @\"%s\"\n",
overlay_path(overlay_nr));
ret = -EINVAL;
goto out;
@@ -1111,11 +1180,12 @@ static int of_selftest_apply_overlay(int selftest_nr, int overlay_nr,
ret = of_overlay_create(np);
if (ret < 0) {
- selftest(0, "could not create overlay from \"%s\"\n",
+ unittest(0, "could not create overlay from \"%s\"\n",
overlay_path(overlay_nr));
goto out;
}
id = ret;
+ of_unittest_track_overlay(id);
ret = 0;
@@ -1129,31 +1199,31 @@ out:
}
/* apply an overlay while checking before and after states */
-static int of_selftest_apply_overlay_check(int overlay_nr, int selftest_nr,
+static int of_unittest_apply_overlay_check(int overlay_nr, int unittest_nr,
int before, int after, enum overlay_type ovtype)
{
int ret;
- /* selftest device must not be in before state */
- if (of_selftest_device_exists(selftest_nr, ovtype) != before) {
- selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ /* unittest device must not be in before state */
+ if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
+ unittest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr, ovtype),
+ unittest_path(unittest_nr, ovtype),
!before ? "enabled" : "disabled");
return -EINVAL;
}
- ret = of_selftest_apply_overlay(overlay_nr, selftest_nr, NULL);
+ ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, NULL);
if (ret != 0) {
- /* of_selftest_apply_overlay already called selftest() */
+ /* of_unittest_apply_overlay already called unittest() */
return ret;
}
- /* selftest device must be to set to after state */
- if (of_selftest_device_exists(selftest_nr, ovtype) != after) {
- selftest(0, "overlay @\"%s\" failed to create @\"%s\" %s\n",
+ /* unittest device must be to set to after state */
+ if (of_unittest_device_exists(unittest_nr, ovtype) != after) {
+ unittest(0, "overlay @\"%s\" failed to create @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr, ovtype),
+ unittest_path(unittest_nr, ovtype),
!after ? "enabled" : "disabled");
return -EINVAL;
}
@@ -1162,50 +1232,50 @@ static int of_selftest_apply_overlay_check(int overlay_nr, int selftest_nr,
}
/* apply an overlay and then revert it while checking before, after states */
-static int of_selftest_apply_revert_overlay_check(int overlay_nr,
- int selftest_nr, int before, int after,
+static int of_unittest_apply_revert_overlay_check(int overlay_nr,
+ int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
int ret, ov_id;
- /* selftest device must be in before state */
- if (of_selftest_device_exists(selftest_nr, ovtype) != before) {
- selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ /* unittest device must be in before state */
+ if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
+ unittest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr, ovtype),
+ unittest_path(unittest_nr, ovtype),
!before ? "enabled" : "disabled");
return -EINVAL;
}
/* apply the overlay */
- ret = of_selftest_apply_overlay(overlay_nr, selftest_nr, &ov_id);
+ ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, &ov_id);
if (ret != 0) {
- /* of_selftest_apply_overlay already called selftest() */
+ /* of_unittest_apply_overlay already called unittest() */
return ret;
}
- /* selftest device must be in after state */
- if (of_selftest_device_exists(selftest_nr, ovtype) != after) {
- selftest(0, "overlay @\"%s\" failed to create @\"%s\" %s\n",
+ /* unittest device must be in after state */
+ if (of_unittest_device_exists(unittest_nr, ovtype) != after) {
+ unittest(0, "overlay @\"%s\" failed to create @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr, ovtype),
+ unittest_path(unittest_nr, ovtype),
!after ? "enabled" : "disabled");
return -EINVAL;
}
ret = of_overlay_destroy(ov_id);
if (ret != 0) {
- selftest(0, "overlay @\"%s\" failed to be destroyed @\"%s\"\n",
+ unittest(0, "overlay @\"%s\" failed to be destroyed @\"%s\"\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr, ovtype));
+ unittest_path(unittest_nr, ovtype));
return ret;
}
- /* selftest device must be again in before state */
- if (of_selftest_device_exists(selftest_nr, PDEV_OVERLAY) != before) {
- selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ /* unittest device must be again in before state */
+ if (of_unittest_device_exists(unittest_nr, PDEV_OVERLAY) != before) {
+ unittest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr),
- selftest_path(selftest_nr, ovtype),
+ unittest_path(unittest_nr, ovtype),
!before ? "enabled" : "disabled");
return -EINVAL;
}
@@ -1214,98 +1284,98 @@ static int of_selftest_apply_revert_overlay_check(int overlay_nr,
}
/* test activation of device */
-static void of_selftest_overlay_0(void)
+static void of_unittest_overlay_0(void)
{
int ret;
/* device should enable */
- ret = of_selftest_apply_overlay_check(0, 0, 0, 1, PDEV_OVERLAY);
+ ret = of_unittest_apply_overlay_check(0, 0, 0, 1, PDEV_OVERLAY);
if (ret != 0)
return;
- selftest(1, "overlay test %d passed\n", 0);
+ unittest(1, "overlay test %d passed\n", 0);
}
/* test deactivation of device */
-static void of_selftest_overlay_1(void)
+static void of_unittest_overlay_1(void)
{
int ret;
/* device should disable */
- ret = of_selftest_apply_overlay_check(1, 1, 1, 0, PDEV_OVERLAY);
+ ret = of_unittest_apply_overlay_check(1, 1, 1, 0, PDEV_OVERLAY);
if (ret != 0)
return;
- selftest(1, "overlay test %d passed\n", 1);
+ unittest(1, "overlay test %d passed\n", 1);
}
/* test activation of device */
-static void of_selftest_overlay_2(void)
+static void of_unittest_overlay_2(void)
{
int ret;
/* device should enable */
- ret = of_selftest_apply_overlay_check(2, 2, 0, 1, PDEV_OVERLAY);
+ ret = of_unittest_apply_overlay_check(2, 2, 0, 1, PDEV_OVERLAY);
if (ret != 0)
return;
- selftest(1, "overlay test %d passed\n", 2);
+ unittest(1, "overlay test %d passed\n", 2);
}
/* test deactivation of device */
-static void of_selftest_overlay_3(void)
+static void of_unittest_overlay_3(void)
{
int ret;
/* device should disable */
- ret = of_selftest_apply_overlay_check(3, 3, 1, 0, PDEV_OVERLAY);
+ ret = of_unittest_apply_overlay_check(3, 3, 1, 0, PDEV_OVERLAY);
if (ret != 0)
return;
- selftest(1, "overlay test %d passed\n", 3);
+ unittest(1, "overlay test %d passed\n", 3);
}
/* test activation of a full device node */
-static void of_selftest_overlay_4(void)
+static void of_unittest_overlay_4(void)
{
int ret;
/* device should disable */
- ret = of_selftest_apply_overlay_check(4, 4, 0, 1, PDEV_OVERLAY);
+ ret = of_unittest_apply_overlay_check(4, 4, 0, 1, PDEV_OVERLAY);
if (ret != 0)
return;
- selftest(1, "overlay test %d passed\n", 4);
+ unittest(1, "overlay test %d passed\n", 4);
}
/* test overlay apply/revert sequence */
-static void of_selftest_overlay_5(void)
+static void of_unittest_overlay_5(void)
{
int ret;
/* device should disable */
- ret = of_selftest_apply_revert_overlay_check(5, 5, 0, 1, PDEV_OVERLAY);
+ ret = of_unittest_apply_revert_overlay_check(5, 5, 0, 1, PDEV_OVERLAY);
if (ret != 0)
return;
- selftest(1, "overlay test %d passed\n", 5);
+ unittest(1, "overlay test %d passed\n", 5);
}
/* test overlay application in sequence */
-static void of_selftest_overlay_6(void)
+static void of_unittest_overlay_6(void)
{
struct device_node *np;
int ret, i, ov_id[2];
- int overlay_nr = 6, selftest_nr = 6;
+ int overlay_nr = 6, unittest_nr = 6;
int before = 0, after = 1;
- /* selftest device must be in before state */
+ /* unittest device must be in before state */
for (i = 0; i < 2; i++) {
- if (of_selftest_device_exists(selftest_nr + i, PDEV_OVERLAY)
+ if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY)
!= before) {
- selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ unittest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr + i,
+ unittest_path(unittest_nr + i,
PDEV_OVERLAY),
!before ? "enabled" : "disabled");
return;
@@ -1317,27 +1387,28 @@ static void of_selftest_overlay_6(void)
np = of_find_node_by_path(overlay_path(overlay_nr + i));
if (np == NULL) {
- selftest(0, "could not find overlay node @\"%s\"\n",
+ unittest(0, "could not find overlay node @\"%s\"\n",
overlay_path(overlay_nr + i));
return;
}
ret = of_overlay_create(np);
if (ret < 0) {
- selftest(0, "could not create overlay from \"%s\"\n",
+ unittest(0, "could not create overlay from \"%s\"\n",
overlay_path(overlay_nr + i));
return;
}
ov_id[i] = ret;
+ of_unittest_track_overlay(ov_id[i]);
}
for (i = 0; i < 2; i++) {
- /* selftest device must be in after state */
- if (of_selftest_device_exists(selftest_nr + i, PDEV_OVERLAY)
+ /* unittest device must be in after state */
+ if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY)
!= after) {
- selftest(0, "overlay @\"%s\" failed @\"%s\" %s\n",
+ unittest(0, "overlay @\"%s\" failed @\"%s\" %s\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr + i,
+ unittest_path(unittest_nr + i,
PDEV_OVERLAY),
!after ? "enabled" : "disabled");
return;
@@ -1347,36 +1418,37 @@ static void of_selftest_overlay_6(void)
for (i = 1; i >= 0; i--) {
ret = of_overlay_destroy(ov_id[i]);
if (ret != 0) {
- selftest(0, "overlay @\"%s\" failed destroy @\"%s\"\n",
+ unittest(0, "overlay @\"%s\" failed destroy @\"%s\"\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr + i,
+ unittest_path(unittest_nr + i,
PDEV_OVERLAY));
return;
}
+ of_unittest_untrack_overlay(ov_id[i]);
}
for (i = 0; i < 2; i++) {
- /* selftest device must be again in before state */
- if (of_selftest_device_exists(selftest_nr + i, PDEV_OVERLAY)
+ /* unittest device must be again in before state */
+ if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY)
!= before) {
- selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ unittest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr + i,
+ unittest_path(unittest_nr + i,
PDEV_OVERLAY),
!before ? "enabled" : "disabled");
return;
}
}
- selftest(1, "overlay test %d passed\n", 6);
+ unittest(1, "overlay test %d passed\n", 6);
}
/* test overlay application in sequence */
-static void of_selftest_overlay_8(void)
+static void of_unittest_overlay_8(void)
{
struct device_node *np;
int ret, i, ov_id[2];
- int overlay_nr = 8, selftest_nr = 8;
+ int overlay_nr = 8, unittest_nr = 8;
/* we don't care about device state in this test */
@@ -1385,26 +1457,27 @@ static void of_selftest_overlay_8(void)
np = of_find_node_by_path(overlay_path(overlay_nr + i));
if (np == NULL) {
- selftest(0, "could not find overlay node @\"%s\"\n",
+ unittest(0, "could not find overlay node @\"%s\"\n",
overlay_path(overlay_nr + i));
return;
}
ret = of_overlay_create(np);
if (ret < 0) {
- selftest(0, "could not create overlay from \"%s\"\n",
+ unittest(0, "could not create overlay from \"%s\"\n",
overlay_path(overlay_nr + i));
return;
}
ov_id[i] = ret;
+ of_unittest_track_overlay(ov_id[i]);
}
/* now try to remove first overlay (it should fail) */
ret = of_overlay_destroy(ov_id[0]);
if (ret == 0) {
- selftest(0, "overlay @\"%s\" was destroyed @\"%s\"\n",
+ unittest(0, "overlay @\"%s\" was destroyed @\"%s\"\n",
overlay_path(overlay_nr + 0),
- selftest_path(selftest_nr,
+ unittest_path(unittest_nr,
PDEV_OVERLAY));
return;
}
@@ -1413,85 +1486,86 @@ static void of_selftest_overlay_8(void)
for (i = 1; i >= 0; i--) {
ret = of_overlay_destroy(ov_id[i]);
if (ret != 0) {
- selftest(0, "overlay @\"%s\" not destroyed @\"%s\"\n",
+ unittest(0, "overlay @\"%s\" not destroyed @\"%s\"\n",
overlay_path(overlay_nr + i),
- selftest_path(selftest_nr,
+ unittest_path(unittest_nr,
PDEV_OVERLAY));
return;
}
+ of_unittest_untrack_overlay(ov_id[i]);
}
- selftest(1, "overlay test %d passed\n", 8);
+ unittest(1, "overlay test %d passed\n", 8);
}
/* test insertion of a bus with parent devices */
-static void of_selftest_overlay_10(void)
+static void of_unittest_overlay_10(void)
{
int ret;
char *child_path;
/* device should disable */
- ret = of_selftest_apply_overlay_check(10, 10, 0, 1, PDEV_OVERLAY);
- if (selftest(ret == 0,
+ ret = of_unittest_apply_overlay_check(10, 10, 0, 1, PDEV_OVERLAY);
+ if (unittest(ret == 0,
"overlay test %d failed; overlay application\n", 10))
return;
- child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101",
- selftest_path(10, PDEV_OVERLAY));
- if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10))
+ child_path = kasprintf(GFP_KERNEL, "%s/test-unittest101",
+ unittest_path(10, PDEV_OVERLAY));
+ if (unittest(child_path, "overlay test %d failed; kasprintf\n", 10))
return;
ret = of_path_device_type_exists(child_path, PDEV_OVERLAY);
kfree(child_path);
- if (selftest(ret, "overlay test %d failed; no child device\n", 10))
+ if (unittest(ret, "overlay test %d failed; no child device\n", 10))
return;
}
/* test insertion of a bus with parent devices (and revert) */
-static void of_selftest_overlay_11(void)
+static void of_unittest_overlay_11(void)
{
int ret;
/* device should disable */
- ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1,
+ ret = of_unittest_apply_revert_overlay_check(11, 11, 0, 1,
PDEV_OVERLAY);
- if (selftest(ret == 0,
+ if (unittest(ret == 0,
"overlay test %d failed; overlay application\n", 11))
return;
}
#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
-struct selftest_i2c_bus_data {
+struct unittest_i2c_bus_data {
struct platform_device *pdev;
struct i2c_adapter adap;
};
-static int selftest_i2c_master_xfer(struct i2c_adapter *adap,
+static int unittest_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
- struct selftest_i2c_bus_data *std = i2c_get_adapdata(adap);
+ struct unittest_i2c_bus_data *std = i2c_get_adapdata(adap);
(void)std;
return num;
}
-static u32 selftest_i2c_functionality(struct i2c_adapter *adap)
+static u32 unittest_i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static const struct i2c_algorithm selftest_i2c_algo = {
- .master_xfer = selftest_i2c_master_xfer,
- .functionality = selftest_i2c_functionality,
+static const struct i2c_algorithm unittest_i2c_algo = {
+ .master_xfer = unittest_i2c_master_xfer,
+ .functionality = unittest_i2c_functionality,
};
-static int selftest_i2c_bus_probe(struct platform_device *pdev)
+static int unittest_i2c_bus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct selftest_i2c_bus_data *std;
+ struct unittest_i2c_bus_data *std;
struct i2c_adapter *adap;
int ret;
@@ -1505,7 +1579,7 @@ static int selftest_i2c_bus_probe(struct platform_device *pdev)
std = devm_kzalloc(dev, sizeof(*std), GFP_KERNEL);
if (!std) {
- dev_err(dev, "Failed to allocate selftest i2c data\n");
+ dev_err(dev, "Failed to allocate unittest i2c data\n");
return -ENOMEM;
}
@@ -1518,7 +1592,7 @@ static int selftest_i2c_bus_probe(struct platform_device *pdev)
adap->nr = -1;
strlcpy(adap->name, pdev->name, sizeof(adap->name));
adap->class = I2C_CLASS_DEPRECATED;
- adap->algo = &selftest_i2c_algo;
+ adap->algo = &unittest_i2c_algo;
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
adap->timeout = 5 * HZ;
@@ -1533,11 +1607,11 @@ static int selftest_i2c_bus_probe(struct platform_device *pdev)
return 0;
}
-static int selftest_i2c_bus_remove(struct platform_device *pdev)
+static int unittest_i2c_bus_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct selftest_i2c_bus_data *std = platform_get_drvdata(pdev);
+ struct unittest_i2c_bus_data *std = platform_get_drvdata(pdev);
dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
i2c_del_adapter(&std->adap);
@@ -1545,21 +1619,21 @@ static int selftest_i2c_bus_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id selftest_i2c_bus_match[] = {
- { .compatible = "selftest-i2c-bus", },
+static const struct of_device_id unittest_i2c_bus_match[] = {
+ { .compatible = "unittest-i2c-bus", },
{},
};
-static struct platform_driver selftest_i2c_bus_driver = {
- .probe = selftest_i2c_bus_probe,
- .remove = selftest_i2c_bus_remove,
+static struct platform_driver unittest_i2c_bus_driver = {
+ .probe = unittest_i2c_bus_probe,
+ .remove = unittest_i2c_bus_remove,
.driver = {
- .name = "selftest-i2c-bus",
- .of_match_table = of_match_ptr(selftest_i2c_bus_match),
+ .name = "unittest-i2c-bus",
+ .of_match_table = of_match_ptr(unittest_i2c_bus_match),
},
};
-static int selftest_i2c_dev_probe(struct i2c_client *client,
+static int unittest_i2c_dev_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
@@ -1575,7 +1649,7 @@ static int selftest_i2c_dev_probe(struct i2c_client *client,
return 0;
};
-static int selftest_i2c_dev_remove(struct i2c_client *client)
+static int unittest_i2c_dev_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
@@ -1584,42 +1658,42 @@ static int selftest_i2c_dev_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id selftest_i2c_dev_id[] = {
- { .name = "selftest-i2c-dev" },
+static const struct i2c_device_id unittest_i2c_dev_id[] = {
+ { .name = "unittest-i2c-dev" },
{ }
};
-static struct i2c_driver selftest_i2c_dev_driver = {
+static struct i2c_driver unittest_i2c_dev_driver = {
.driver = {
- .name = "selftest-i2c-dev",
+ .name = "unittest-i2c-dev",
.owner = THIS_MODULE,
},
- .probe = selftest_i2c_dev_probe,
- .remove = selftest_i2c_dev_remove,
- .id_table = selftest_i2c_dev_id,
+ .probe = unittest_i2c_dev_probe,
+ .remove = unittest_i2c_dev_remove,
+ .id_table = unittest_i2c_dev_id,
};
#if IS_BUILTIN(CONFIG_I2C_MUX)
-struct selftest_i2c_mux_data {
+struct unittest_i2c_mux_data {
int nchans;
struct i2c_adapter *adap[];
};
-static int selftest_i2c_mux_select_chan(struct i2c_adapter *adap,
+static int unittest_i2c_mux_select_chan(struct i2c_adapter *adap,
void *client, u32 chan)
{
return 0;
}
-static int selftest_i2c_mux_probe(struct i2c_client *client,
+static int unittest_i2c_mux_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret, i, nchans, size;
struct device *dev = &client->dev;
struct i2c_adapter *adap = to_i2c_adapter(dev->parent);
struct device_node *np = client->dev.of_node, *child;
- struct selftest_i2c_mux_data *stm;
+ struct unittest_i2c_mux_data *stm;
u32 reg, max_reg;
dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
@@ -1643,7 +1717,7 @@ static int selftest_i2c_mux_probe(struct i2c_client *client,
return -EINVAL;
}
- size = offsetof(struct selftest_i2c_mux_data, adap[nchans]);
+ size = offsetof(struct unittest_i2c_mux_data, adap[nchans]);
stm = devm_kzalloc(dev, size, GFP_KERNEL);
if (!stm) {
dev_err(dev, "Out of memory\n");
@@ -1652,7 +1726,7 @@ static int selftest_i2c_mux_probe(struct i2c_client *client,
stm->nchans = nchans;
for (i = 0; i < nchans; i++) {
stm->adap[i] = i2c_add_mux_adapter(adap, dev, client,
- 0, i, 0, selftest_i2c_mux_select_chan, NULL);
+ 0, i, 0, unittest_i2c_mux_select_chan, NULL);
if (!stm->adap[i]) {
dev_err(dev, "Failed to register mux #%d\n", i);
for (i--; i >= 0; i--)
@@ -1666,11 +1740,11 @@ static int selftest_i2c_mux_probe(struct i2c_client *client,
return 0;
};
-static int selftest_i2c_mux_remove(struct i2c_client *client)
+static int unittest_i2c_mux_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
- struct selftest_i2c_mux_data *stm = i2c_get_clientdata(client);
+ struct unittest_i2c_mux_data *stm = i2c_get_clientdata(client);
int i;
dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
@@ -1679,183 +1753,185 @@ static int selftest_i2c_mux_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id selftest_i2c_mux_id[] = {
- { .name = "selftest-i2c-mux" },
+static const struct i2c_device_id unittest_i2c_mux_id[] = {
+ { .name = "unittest-i2c-mux" },
{ }
};
-static struct i2c_driver selftest_i2c_mux_driver = {
+static struct i2c_driver unittest_i2c_mux_driver = {
.driver = {
- .name = "selftest-i2c-mux",
+ .name = "unittest-i2c-mux",
.owner = THIS_MODULE,
},
- .probe = selftest_i2c_mux_probe,
- .remove = selftest_i2c_mux_remove,
- .id_table = selftest_i2c_mux_id,
+ .probe = unittest_i2c_mux_probe,
+ .remove = unittest_i2c_mux_remove,
+ .id_table = unittest_i2c_mux_id,
};
#endif
-static int of_selftest_overlay_i2c_init(void)
+static int of_unittest_overlay_i2c_init(void)
{
int ret;
- ret = i2c_add_driver(&selftest_i2c_dev_driver);
- if (selftest(ret == 0,
- "could not register selftest i2c device driver\n"))
+ ret = i2c_add_driver(&unittest_i2c_dev_driver);
+ if (unittest(ret == 0,
+ "could not register unittest i2c device driver\n"))
return ret;
- ret = platform_driver_register(&selftest_i2c_bus_driver);
- if (selftest(ret == 0,
- "could not register selftest i2c bus driver\n"))
+ ret = platform_driver_register(&unittest_i2c_bus_driver);
+ if (unittest(ret == 0,
+ "could not register unittest i2c bus driver\n"))
return ret;
#if IS_BUILTIN(CONFIG_I2C_MUX)
- ret = i2c_add_driver(&selftest_i2c_mux_driver);
- if (selftest(ret == 0,
- "could not register selftest i2c mux driver\n"))
+ ret = i2c_add_driver(&unittest_i2c_mux_driver);
+ if (unittest(ret == 0,
+ "could not register unittest i2c mux driver\n"))
return ret;
#endif
return 0;
}
-static void of_selftest_overlay_i2c_cleanup(void)
+static void of_unittest_overlay_i2c_cleanup(void)
{
#if IS_BUILTIN(CONFIG_I2C_MUX)
- i2c_del_driver(&selftest_i2c_mux_driver);
+ i2c_del_driver(&unittest_i2c_mux_driver);
#endif
- platform_driver_unregister(&selftest_i2c_bus_driver);
- i2c_del_driver(&selftest_i2c_dev_driver);
+ platform_driver_unregister(&unittest_i2c_bus_driver);
+ i2c_del_driver(&unittest_i2c_dev_driver);
}
-static void of_selftest_overlay_i2c_12(void)
+static void of_unittest_overlay_i2c_12(void)
{
int ret;
/* device should enable */
- ret = of_selftest_apply_overlay_check(12, 12, 0, 1, I2C_OVERLAY);
+ ret = of_unittest_apply_overlay_check(12, 12, 0, 1, I2C_OVERLAY);
if (ret != 0)
return;
- selftest(1, "overlay test %d passed\n", 12);
+ unittest(1, "overlay test %d passed\n", 12);
}
/* test deactivation of device */
-static void of_selftest_overlay_i2c_13(void)
+static void of_unittest_overlay_i2c_13(void)
{
int ret;
/* device should disable */
- ret = of_selftest_apply_overlay_check(13, 13, 1, 0, I2C_OVERLAY);
+ ret = of_unittest_apply_overlay_check(13, 13, 1, 0, I2C_OVERLAY);
if (ret != 0)
return;
- selftest(1, "overlay test %d passed\n", 13);
+ unittest(1, "overlay test %d passed\n", 13);
}
/* just check for i2c mux existence */
-static void of_selftest_overlay_i2c_14(void)
+static void of_unittest_overlay_i2c_14(void)
{
}
-static void of_selftest_overlay_i2c_15(void)
+static void of_unittest_overlay_i2c_15(void)
{
int ret;
/* device should enable */
- ret = of_selftest_apply_overlay_check(16, 15, 0, 1, I2C_OVERLAY);
+ ret = of_unittest_apply_overlay_check(16, 15, 0, 1, I2C_OVERLAY);
if (ret != 0)
return;
- selftest(1, "overlay test %d passed\n", 15);
+ unittest(1, "overlay test %d passed\n", 15);
}
#else
-static inline void of_selftest_overlay_i2c_14(void) { }
-static inline void of_selftest_overlay_i2c_15(void) { }
+static inline void of_unittest_overlay_i2c_14(void) { }
+static inline void of_unittest_overlay_i2c_15(void) { }
#endif
-static void __init of_selftest_overlay(void)
+static void __init of_unittest_overlay(void)
{
struct device_node *bus_np = NULL;
int ret;
- ret = platform_driver_register(&selftest_driver);
+ ret = platform_driver_register(&unittest_driver);
if (ret != 0) {
- selftest(0, "could not register selftest driver\n");
+ unittest(0, "could not register unittest driver\n");
goto out;
}
bus_np = of_find_node_by_path(bus_path);
if (bus_np == NULL) {
- selftest(0, "could not find bus_path \"%s\"\n", bus_path);
+ unittest(0, "could not find bus_path \"%s\"\n", bus_path);
goto out;
}
ret = of_platform_populate(bus_np, of_default_bus_match_table,
NULL, NULL);
if (ret != 0) {
- selftest(0, "could not populate bus @ \"%s\"\n", bus_path);
+ unittest(0, "could not populate bus @ \"%s\"\n", bus_path);
goto out;
}
- if (!of_selftest_device_exists(100, PDEV_OVERLAY)) {
- selftest(0, "could not find selftest0 @ \"%s\"\n",
- selftest_path(100, PDEV_OVERLAY));
+ if (!of_unittest_device_exists(100, PDEV_OVERLAY)) {
+ unittest(0, "could not find unittest0 @ \"%s\"\n",
+ unittest_path(100, PDEV_OVERLAY));
goto out;
}
- if (of_selftest_device_exists(101, PDEV_OVERLAY)) {
- selftest(0, "selftest1 @ \"%s\" should not exist\n",
- selftest_path(101, PDEV_OVERLAY));
+ if (of_unittest_device_exists(101, PDEV_OVERLAY)) {
+ unittest(0, "unittest1 @ \"%s\" should not exist\n",
+ unittest_path(101, PDEV_OVERLAY));
goto out;
}
- selftest(1, "basic infrastructure of overlays passed");
+ unittest(1, "basic infrastructure of overlays passed");
/* tests in sequence */
- of_selftest_overlay_0();
- of_selftest_overlay_1();
- of_selftest_overlay_2();
- of_selftest_overlay_3();
- of_selftest_overlay_4();
- of_selftest_overlay_5();
- of_selftest_overlay_6();
- of_selftest_overlay_8();
-
- of_selftest_overlay_10();
- of_selftest_overlay_11();
+ of_unittest_overlay_0();
+ of_unittest_overlay_1();
+ of_unittest_overlay_2();
+ of_unittest_overlay_3();
+ of_unittest_overlay_4();
+ of_unittest_overlay_5();
+ of_unittest_overlay_6();
+ of_unittest_overlay_8();
+
+ of_unittest_overlay_10();
+ of_unittest_overlay_11();
#if IS_BUILTIN(CONFIG_I2C)
- if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n"))
+ if (unittest(of_unittest_overlay_i2c_init() == 0, "i2c init failed\n"))
goto out;
- of_selftest_overlay_i2c_12();
- of_selftest_overlay_i2c_13();
- of_selftest_overlay_i2c_14();
- of_selftest_overlay_i2c_15();
+ of_unittest_overlay_i2c_12();
+ of_unittest_overlay_i2c_13();
+ of_unittest_overlay_i2c_14();
+ of_unittest_overlay_i2c_15();
- of_selftest_overlay_i2c_cleanup();
+ of_unittest_overlay_i2c_cleanup();
#endif
+ of_unittest_destroy_tracked_overlays();
+
out:
of_node_put(bus_np);
}
#else
-static inline void __init of_selftest_overlay(void) { }
+static inline void __init of_unittest_overlay(void) { }
#endif
-static int __init of_selftest(void)
+static int __init of_unittest(void)
{
struct device_node *np;
int res;
- /* adding data for selftest */
- res = selftest_data_add();
+ /* adding data for unittest */
+ res = unittest_data_add();
if (res)
return res;
if (!of_aliases)
@@ -1868,27 +1944,27 @@ static int __init of_selftest(void)
}
of_node_put(np);
- pr_info("start of selftest - you will see error messages\n");
- of_selftest_check_tree_linkage();
- of_selftest_check_phandles();
- of_selftest_find_node_by_name();
- of_selftest_dynamic();
- of_selftest_parse_phandle_with_args();
- of_selftest_property_string();
- of_selftest_property_copy();
- of_selftest_changeset();
- of_selftest_parse_interrupts();
- of_selftest_parse_interrupts_extended();
- of_selftest_match_node();
- of_selftest_platform_populate();
- of_selftest_overlay();
+ pr_info("start of unittest - you will see error messages\n");
+ of_unittest_check_tree_linkage();
+ of_unittest_check_phandles();
+ of_unittest_find_node_by_name();
+ of_unittest_dynamic();
+ of_unittest_parse_phandle_with_args();
+ of_unittest_property_string();
+ of_unittest_property_copy();
+ of_unittest_changeset();
+ of_unittest_parse_interrupts();
+ of_unittest_parse_interrupts_extended();
+ of_unittest_match_node();
+ of_unittest_platform_populate();
+ of_unittest_overlay();
/* Double check linkage after removing testcase data */
- of_selftest_check_tree_linkage();
+ of_unittest_check_tree_linkage();
- pr_info("end of selftest - %i passed, %i failed\n",
- selftest_results.passed, selftest_results.failed);
+ pr_info("end of unittest - %i passed, %i failed\n",
+ unittest_results.passed, unittest_results.failed);
return 0;
}
-late_initcall(of_selftest);
+late_initcall(of_unittest);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index d93b2b6b1f7a..82f7000a285d 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -21,6 +21,7 @@
* objects.
*/
+#include <linux/file.h>
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
@@ -224,10 +225,18 @@ static inline unsigned long fast_get_dcookie(struct path *path)
static unsigned long get_exec_dcookie(struct mm_struct *mm)
{
unsigned long cookie = NO_COOKIE;
+ struct file *exe_file;
- if (mm && mm->exe_file)
- cookie = fast_get_dcookie(&mm->exe_file->f_path);
+ if (!mm)
+ goto done;
+
+ exe_file = get_mm_exe_file(mm);
+ if (!exe_file)
+ goto done;
+ cookie = fast_get_dcookie(&exe_file->f_path);
+ fput(exe_file);
+done:
return cookie;
}
@@ -236,6 +245,8 @@ static unsigned long get_exec_dcookie(struct mm_struct *mm)
* pair that can then be added to the global event buffer. We make
* sure to do this lookup before a mm->mmap modification happens so
* we don't lose track.
+ *
+ * The caller must ensure the mm is not nil (ie: not a kernel thread).
*/
static unsigned long
lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
@@ -243,6 +254,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
unsigned long cookie = NO_COOKIE;
struct vm_area_struct *vma;
+ down_read(&mm->mmap_sem);
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
if (addr < vma->vm_start || addr >= vma->vm_end)
@@ -262,6 +274,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
if (!vma)
cookie = INVALID_COOKIE;
+ up_read(&mm->mmap_sem);
return cookie;
}
@@ -402,20 +415,9 @@ static void release_mm(struct mm_struct *mm)
{
if (!mm)
return;
- up_read(&mm->mmap_sem);
mmput(mm);
}
-
-static struct mm_struct *take_tasks_mm(struct task_struct *task)
-{
- struct mm_struct *mm = get_task_mm(task);
- if (mm)
- down_read(&mm->mmap_sem);
- return mm;
-}
-
-
static inline int is_code(unsigned long val)
{
return val == ESCAPE_CODE;
@@ -532,7 +534,7 @@ void sync_buffer(int cpu)
new = (struct task_struct *)val;
oldmm = mm;
release_mm(oldmm);
- mm = take_tasks_mm(new);
+ mm = get_task_mm(new);
if (mm != oldmm)
cookie = get_exec_dcookie(mm);
add_user_ctx_switch(new, cookie);
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 3f493459378f..dd92c5edf219 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -138,22 +138,22 @@ static int __oprofilefs_create_file(struct dentry *root, char const *name,
struct dentry *dentry;
struct inode *inode;
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
dentry = d_alloc_name(root, name);
if (!dentry) {
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
return -ENOMEM;
}
inode = oprofilefs_get_inode(root->d_sb, S_IFREG | perm);
if (!inode) {
dput(dentry);
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
return -ENOMEM;
}
inode->i_fop = fops;
inode->i_private = priv;
d_add(dentry, inode);
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
return 0;
}
@@ -215,22 +215,22 @@ struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name)
struct dentry *dentry;
struct inode *inode;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
dentry = d_alloc_name(parent, name);
if (!dentry) {
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return NULL;
}
inode = oprofilefs_get_inode(parent->d_sb, S_IFDIR | 0755);
if (!inode) {
dput(dentry);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return NULL;
}
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_add(dentry, inode);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return dentry;
}
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 8b490d77054f..02ff84fcfa61 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -916,7 +916,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg_dma_address(sglist) = ccio_map_single(dev,
- (void *)sg_virt_addr(sglist), sglist->length,
+ sg_virt(sglist), sglist->length,
direction);
sg_dma_len(sglist) = sglist->length;
return 1;
@@ -983,8 +983,8 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
- DBG_RUN_SG("%s() START %d entries, %08lx,%x\n",
- __func__, nents, sg_virt_addr(sglist), sglist->length);
+ DBG_RUN_SG("%s() START %d entries, %p,%x\n",
+ __func__, nents, sg_virt(sglist), sglist->length);
#ifdef CCIO_COLLECT_STATS
ioc->usg_calls++;
@@ -1021,7 +1021,6 @@ static struct hppa_dma_ops ccio_ops = {
#ifdef CONFIG_PROC_FS
static int ccio_proc_info(struct seq_file *m, void *p)
{
- int len = 0;
struct ioc *ioc = ioc_list;
while (ioc != NULL) {
@@ -1031,22 +1030,22 @@ static int ccio_proc_info(struct seq_file *m, void *p)
int j;
#endif
- len += seq_printf(m, "%s\n", ioc->name);
+ seq_printf(m, "%s\n", ioc->name);
- len += seq_printf(m, "Cujo 2.0 bug : %s\n",
- (ioc->cujo20_bug ? "yes" : "no"));
+ seq_printf(m, "Cujo 2.0 bug : %s\n",
+ (ioc->cujo20_bug ? "yes" : "no"));
- len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
- total_pages * 8, total_pages);
+ seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
+ total_pages * 8, total_pages);
#ifdef CCIO_COLLECT_STATS
- len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
- total_pages - ioc->used_pages, ioc->used_pages,
- (int)(ioc->used_pages * 100 / total_pages));
+ seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
+ total_pages - ioc->used_pages, ioc->used_pages,
+ (int)(ioc->used_pages * 100 / total_pages));
#endif
- len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
- ioc->res_size, total_pages);
+ seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
+ ioc->res_size, total_pages);
#ifdef CCIO_COLLECT_STATS
min = max = ioc->avg_search[0];
@@ -1058,26 +1057,26 @@ static int ccio_proc_info(struct seq_file *m, void *p)
min = ioc->avg_search[j];
}
avg /= CCIO_SEARCH_SAMPLE;
- len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
- min, avg, max);
+ seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
+ min, avg, max);
- len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
- ioc->msingle_calls, ioc->msingle_pages,
- (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
+ seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
+ ioc->msingle_calls, ioc->msingle_pages,
+ (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
/* KLUGE - unmap_sg calls unmap_single for each mapped page */
min = ioc->usingle_calls - ioc->usg_calls;
max = ioc->usingle_pages - ioc->usg_pages;
- len += seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
- min, max, (int)((max * 1000)/min));
+ seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
+ min, max, (int)((max * 1000)/min));
- len += seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
- ioc->msg_calls, ioc->msg_pages,
- (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
+ seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
+ ioc->msg_calls, ioc->msg_pages,
+ (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
- len += seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
- ioc->usg_calls, ioc->usg_pages,
- (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
+ seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
+ ioc->usg_calls, ioc->usg_pages,
+ (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
#endif /* CCIO_COLLECT_STATS */
ioc = ioc->next;
@@ -1101,7 +1100,6 @@ static const struct file_operations ccio_proc_info_fops = {
static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
{
- int len = 0;
struct ioc *ioc = ioc_list;
while (ioc != NULL) {
@@ -1110,11 +1108,11 @@ static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) {
if ((j & 7) == 0)
- len += seq_puts(m, "\n ");
- len += seq_printf(m, "%08x", *res_ptr);
+ seq_puts(m, "\n ");
+ seq_printf(m, "%08x", *res_ptr);
res_ptr++;
}
- len += seq_puts(m, "\n\n");
+ seq_puts(m, "\n\n");
ioc = ioc->next;
break; /* XXX - remove me */
}
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
index caa153133754..a656d9e83343 100644
--- a/drivers/parisc/eisa_enumerator.c
+++ b/drivers/parisc/eisa_enumerator.c
@@ -357,7 +357,7 @@ static int parse_slot_config(int slot,
}
if (flags & HPEE_FUNCTION_INFO_CFG_FREE_FORM) {
/* I have no idea how to handle this */
- printk("function %d have free-form confgiuration, skipping ",
+ printk("function %d have free-form configuration, skipping ",
num_func);
pos = p0 + function_len;
continue;
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 8c33491b21fe..761e77bfce5d 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -30,9 +30,9 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long vaddr;
long size;
- DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents,
+ DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
(unsigned long)sg_dma_address(startsg), cnt,
- sg_virt_addr(startsg), startsg->length
+ sg_virt(startsg), startsg->length
);
@@ -66,7 +66,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
BUG_ON(pdirp == NULL);
- vaddr = sg_virt_addr(startsg);
+ vaddr = (unsigned long)sg_virt(startsg);
sg_dma_len(dma_sg) += startsg->length;
size = startsg->length + dma_offset;
dma_offset = 0;
@@ -113,7 +113,7 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
*/
contig_sg = startsg;
dma_len = startsg->length;
- dma_offset = sg_virt_addr(startsg) & ~IOVP_MASK;
+ dma_offset = startsg->offset;
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
@@ -124,14 +124,13 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
** it's always looking one "ahead".
*/
while(--nents > 0) {
- unsigned long prevstartsg_end, startsg_end;
+ unsigned long prev_end, sg_start;
- prevstartsg_end = sg_virt_addr(startsg) +
- startsg->length;
+ prev_end = (unsigned long)sg_virt(startsg) +
+ startsg->length;
startsg++;
- startsg_end = sg_virt_addr(startsg) +
- startsg->length;
+ sg_start = (unsigned long)sg_virt(startsg);
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
@@ -150,10 +149,13 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
break;
/*
- ** Next see if we can append the next chunk (i.e.
- ** it must end on one page and begin on another
+ * Next see if we can append the next chunk (i.e.
+ * it must end on one page and begin on another, or
+ * it must start on the same address as the previous
+ * entry ended.
*/
- if (unlikely(((prevstartsg_end | sg_virt_addr(startsg)) & ~PAGE_MASK) != 0))
+ if (unlikely((prev_end != sg_start) ||
+ ((prev_end | sg_start) & ~PAGE_MASK)))
break;
dma_len += startsg->length;
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 1ff1b67e8b27..f1441e466c06 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -278,7 +278,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
nents,
(unsigned long) sg_dma_address(startsg),
sg_dma_len(startsg),
- sg_virt_addr(startsg), startsg->length);
+ sg_virt(startsg), startsg->length);
startsg++;
}
}
@@ -945,8 +945,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
- sg_dma_address(sglist) = sba_map_single(dev,
- (void *)sg_virt_addr(sglist),
+ sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
sglist->length, direction);
sg_dma_len(sglist) = sglist->length;
return 1;
@@ -1025,7 +1024,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
#endif
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
- __func__, nents, sg_virt_addr(sglist), sglist->length);
+ __func__, nents, sg_virt(sglist), sglist->length);
ioc = GET_IOC(dev);
@@ -1774,37 +1773,35 @@ static int sba_proc_info(struct seq_file *m, void *p)
#ifdef SBA_COLLECT_STATS
unsigned long avg = 0, min, max;
#endif
- int i, len = 0;
-
- len += seq_printf(m, "%s rev %d.%d\n",
- sba_dev->name,
- (sba_dev->hw_rev & 0x7) + 1,
- (sba_dev->hw_rev & 0x18) >> 3
- );
- len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
- (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
- total_pages);
-
- len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
- ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
-
- len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
- READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
- READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
- READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)
- );
+ int i;
+
+ seq_printf(m, "%s rev %d.%d\n",
+ sba_dev->name,
+ (sba_dev->hw_rev & 0x7) + 1,
+ (sba_dev->hw_rev & 0x18) >> 3);
+ seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
+ (int)((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
+ total_pages);
+
+ seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
+ ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
+
+ seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
+ READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
+ READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
+ READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE));
for (i=0; i<4; i++)
- len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i,
- READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
- READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
- READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)
- );
+ seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n",
+ i,
+ READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
+ READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
+ READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18));
#ifdef SBA_COLLECT_STATS
- len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
- total_pages - ioc->used_pages, ioc->used_pages,
- (int) (ioc->used_pages * 100 / total_pages));
+ seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
+ total_pages - ioc->used_pages, ioc->used_pages,
+ (int)(ioc->used_pages * 100 / total_pages));
min = max = ioc->avg_search[0];
for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
@@ -1813,26 +1810,26 @@ static int sba_proc_info(struct seq_file *m, void *p)
if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
}
avg /= SBA_SEARCH_SAMPLE;
- len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
- min, avg, max);
+ seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
+ min, avg, max);
- len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
- ioc->msingle_calls, ioc->msingle_pages,
- (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
+ seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
+ ioc->msingle_calls, ioc->msingle_pages,
+ (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
/* KLUGE - unmap_sg calls unmap_single for each mapped page */
min = ioc->usingle_calls;
max = ioc->usingle_pages - ioc->usg_pages;
- len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
- min, max, (int) ((max * 1000)/min));
+ seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
+ min, max, (int)((max * 1000)/min));
- len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
- ioc->msg_calls, ioc->msg_pages,
- (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
+ seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
+ ioc->msg_calls, ioc->msg_pages,
+ (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
- len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
- ioc->usg_calls, ioc->usg_pages,
- (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
+ seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
+ ioc->usg_calls, ioc->usg_pages,
+ (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
#endif
return 0;
@@ -1858,14 +1855,14 @@ sba_proc_bitmap_info(struct seq_file *m, void *p)
struct sba_device *sba_dev = sba_list;
struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
unsigned int *res_ptr = (unsigned int *)ioc->res_map;
- int i, len = 0;
+ int i;
for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
if ((i & 7) == 0)
- len += seq_printf(m, "\n ");
- len += seq_printf(m, " %08x", *res_ptr);
+ seq_puts(m, "\n ");
+ seq_printf(m, " %08x", *res_ptr);
}
- len += seq_printf(m, "\n");
+ seq_putc(m, '\n');
return 0;
}
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 8be2096c8423..deeaed544222 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -348,7 +348,7 @@ int superio_fixup_irq(struct pci_dev *pcidev)
BUG();
return -1;
}
- printk("superio_fixup_irq(%s) ven 0x%x dev 0x%x from %pf\n",
+ printk(KERN_DEBUG "superio_fixup_irq(%s) ven 0x%x dev 0x%x from %ps\n",
pci_name(pcidev),
pcidev->vendor, pcidev->device,
__builtin_return_address(0));
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 7660232ef460..e12bafdc42e0 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -146,7 +146,7 @@ static void dlpar_pci_add_bus(struct device_node *dn)
struct pci_controller *phb = pdn->phb;
struct pci_dev *dev = NULL;
- eeh_add_device_tree_early(dn);
+ eeh_add_device_tree_early(pdn);
/* Add EADS device to PHB bus, adding new entry to bus->devices */
dev = of_create_pci_dev(dn, phb->bus, pdn->devfn);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 4b3a4eaad996..ee0ebff103a4 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -19,16 +19,59 @@
#define VIRTFN_ID_LEN 16
-static inline u8 virtfn_bus(struct pci_dev *dev, int id)
+int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
{
+ if (!dev->is_physfn)
+ return -EINVAL;
return dev->bus->number + ((dev->devfn + dev->sriov->offset +
- dev->sriov->stride * id) >> 8);
+ dev->sriov->stride * vf_id) >> 8);
}
-static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
+int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
{
+ if (!dev->is_physfn)
+ return -EINVAL;
return (dev->devfn + dev->sriov->offset +
- dev->sriov->stride * id) & 0xff;
+ dev->sriov->stride * vf_id) & 0xff;
+}
+
+/*
+ * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
+ * change when NumVFs changes.
+ *
+ * Update iov->offset and iov->stride when NumVFs is written.
+ */
+static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
+{
+ struct pci_sriov *iov = dev->sriov;
+
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+}
+
+/*
+ * The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
+ * determine how many additional bus numbers will be consumed by VFs.
+ *
+ * Iterate over all valid NumVFs and calculate the maximum number of bus
+ * numbers that could ever be required.
+ */
+static inline u8 virtfn_max_buses(struct pci_dev *dev)
+{
+ struct pci_sriov *iov = dev->sriov;
+ int nr_virtfn;
+ u8 max = 0;
+ int busnr;
+
+ for (nr_virtfn = 1; nr_virtfn <= iov->total_VFs; nr_virtfn++) {
+ pci_iov_set_numvfs(dev, nr_virtfn);
+ busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
+ if (busnr > max)
+ max = busnr;
+ }
+
+ return max;
}
static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
@@ -57,6 +100,14 @@ static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
pci_remove_bus(virtbus);
}
+resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
+{
+ if (!dev->is_physfn)
+ return 0;
+
+ return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
+}
+
static int virtfn_add(struct pci_dev *dev, int id, int reset)
{
int i;
@@ -69,7 +120,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
struct pci_bus *bus;
mutex_lock(&iov->dev->sriov->lock);
- bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
+ bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
if (!bus)
goto failed;
@@ -77,7 +128,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
if (!virtfn)
goto failed0;
- virtfn->devfn = virtfn_devfn(dev, id);
+ virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
pci_setup_device(virtfn);
@@ -87,13 +138,12 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
virtfn->multifunction = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = dev->resource + PCI_IOV_RESOURCES + i;
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
if (!res->parent)
continue;
virtfn->resource[i].name = pci_name(virtfn);
virtfn->resource[i].flags = res->flags;
- size = resource_size(res);
- do_div(size, iov->total_VFs);
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
virtfn->resource[i].start = res->start + size * id;
virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
rc = request_resource(res, &virtfn->resource[i]);
@@ -140,8 +190,8 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
struct pci_sriov *iov = dev->sriov;
virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
- virtfn_bus(dev, id),
- virtfn_devfn(dev, id));
+ pci_iov_virtfn_bus(dev, id),
+ pci_iov_virtfn_devfn(dev, id));
if (!virtfn)
return;
@@ -170,6 +220,11 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
pci_dev_put(dev);
}
+int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ return 0;
+}
+
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
@@ -180,6 +235,8 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
int bars = 0;
+ int bus;
+ int retval;
if (!nr_virtfn)
return 0;
@@ -204,7 +261,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
bars |= (1 << (i + PCI_IOV_RESOURCES));
- res = dev->resource + PCI_IOV_RESOURCES + i;
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
if (res->parent)
nres++;
}
@@ -216,8 +273,10 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
iov->offset = offset;
iov->stride = stride;
- if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->busn_res.end) {
- dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
+ bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
+ if (bus > dev->bus->busn_res.end) {
+ dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
+ nr_virtfn, bus, &dev->bus->busn_res);
return -ENOMEM;
}
@@ -243,7 +302,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return rc;
}
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
+ pci_iov_set_numvfs(dev, nr_virtfn);
iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
@@ -254,6 +313,12 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
if (nr_virtfn < initial)
initial = nr_virtfn;
+ if ((retval = pcibios_sriov_enable(dev, initial))) {
+ dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n",
+ retval);
+ return retval;
+ }
+
for (i = 0; i < initial; i++) {
rc = virtfn_add(dev, i, 0);
if (rc)
@@ -272,7 +337,7 @@ failed:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
+ pci_iov_set_numvfs(dev, 0);
ssleep(1);
pci_cfg_access_unlock(dev);
@@ -282,6 +347,11 @@ failed:
return rc;
}
+int __weak pcibios_sriov_disable(struct pci_dev *pdev)
+{
+ return 0;
+}
+
static void sriov_disable(struct pci_dev *dev)
{
int i;
@@ -293,6 +363,8 @@ static void sriov_disable(struct pci_dev *dev)
for (i = 0; i < iov->num_VFs; i++)
virtfn_remove(dev, i, 0);
+ pcibios_sriov_disable(dev);
+
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
@@ -303,12 +375,12 @@ static void sriov_disable(struct pci_dev *dev)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
iov->num_VFs = 0;
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
+ pci_iov_set_numvfs(dev, 0);
}
static int sriov_init(struct pci_dev *dev, int pos)
{
- int i;
+ int i, bar64;
int rc;
int nres;
u32 pgsz;
@@ -357,27 +429,29 @@ found:
pgsz &= ~(pgsz - 1);
pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
+ iov = kzalloc(sizeof(*iov), GFP_KERNEL);
+ if (!iov)
+ return -ENOMEM;
+
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = dev->resource + PCI_IOV_RESOURCES + i;
- i += __pci_read_base(dev, pci_bar_unknown, res,
- pos + PCI_SRIOV_BAR + i * 4);
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ bar64 = __pci_read_base(dev, pci_bar_unknown, res,
+ pos + PCI_SRIOV_BAR + i * 4);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
rc = -EIO;
goto failed;
}
+ iov->barsz[i] = resource_size(res);
res->end = res->start + resource_size(res) * total - 1;
+ dev_info(&dev->dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
+ i, res, i, total);
+ i += bar64;
nres++;
}
- iov = kzalloc(sizeof(*iov), GFP_KERNEL);
- if (!iov) {
- rc = -ENOMEM;
- goto failed;
- }
-
iov->pos = pos;
iov->nres = nres;
iov->ctrl = ctrl;
@@ -400,15 +474,17 @@ found:
dev->sriov = iov;
dev->is_physfn = 1;
+ iov->max_VF_buses = virtfn_max_buses(dev);
return 0;
failed:
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = dev->resource + PCI_IOV_RESOURCES + i;
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
res->flags = 0;
}
+ kfree(iov);
return rc;
}
@@ -439,7 +515,7 @@ static void sriov_restore_state(struct pci_dev *dev)
pci_update_resource(dev, i);
pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->num_VFs);
+ pci_iov_set_numvfs(dev, iov->num_VFs);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
msleep(100);
@@ -493,6 +569,12 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno)
4 * (resno - PCI_IOV_RESOURCES);
}
+resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
+ int resno)
+{
+ return pci_iov_resource_size(dev, resno);
+}
+
/**
* pci_sriov_resource_alignment - get resource alignment for VF BAR
* @dev: the PCI device
@@ -505,14 +587,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno)
*/
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
{
- struct resource tmp;
- int reg = pci_iov_resource_bar(dev, resno);
-
- if (!reg)
- return 0;
-
- __pci_read_base(dev, pci_bar_unknown, &tmp, reg);
- return resource_alignment(&tmp);
+ return pcibios_iov_resource_alignment(dev, resno);
}
/**
@@ -535,15 +610,13 @@ void pci_restore_iov_state(struct pci_dev *dev)
int pci_iov_bus_range(struct pci_bus *bus)
{
int max = 0;
- u8 busnr;
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
if (!dev->is_physfn)
continue;
- busnr = virtfn_bus(dev, dev->sriov->total_VFs - 1);
- if (busnr > max)
- max = busnr;
+ if (dev->sriov->max_VF_buses > max)
+ max = dev->sriov->max_VF_buses;
}
return max ? max - bus->number : 0;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d72f849174a4..9bd762c237ab 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -243,10 +243,12 @@ struct pci_sriov {
u16 stride; /* following VF stride */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
+ u8 max_VF_buses; /* max buses consumed by VFs */
u16 driver_max_VFs; /* max num VFs driver supports */
struct pci_dev *dev; /* lowest numbered PF */
struct pci_dev *self; /* this PF */
struct mutex lock; /* lock for VF bus */
+ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
};
#ifdef CONFIG_PCI_ATS
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8169597e47cb..4fd0cacf7ca0 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -99,8 +99,8 @@ static void remove_from_list(struct list_head *head,
}
}
-static resource_size_t get_res_add_size(struct list_head *head,
- struct resource *res)
+static struct pci_dev_resource *res_to_dev_res(struct list_head *head,
+ struct resource *res)
{
struct pci_dev_resource *dev_res;
@@ -109,17 +109,37 @@ static resource_size_t get_res_add_size(struct list_head *head,
int idx = res - &dev_res->dev->resource[0];
dev_printk(KERN_DEBUG, &dev_res->dev->dev,
- "res[%d]=%pR get_res_add_size add_size %llx\n",
+ "res[%d]=%pR res_to_dev_res add_size %llx min_align %llx\n",
idx, dev_res->res,
- (unsigned long long)dev_res->add_size);
+ (unsigned long long)dev_res->add_size,
+ (unsigned long long)dev_res->min_align);
- return dev_res->add_size;
+ return dev_res;
}
}
- return 0;
+ return NULL;
}
+static resource_size_t get_res_add_size(struct list_head *head,
+ struct resource *res)
+{
+ struct pci_dev_resource *dev_res;
+
+ dev_res = res_to_dev_res(head, res);
+ return dev_res ? dev_res->add_size : 0;
+}
+
+static resource_size_t get_res_add_align(struct list_head *head,
+ struct resource *res)
+{
+ struct pci_dev_resource *dev_res;
+
+ dev_res = res_to_dev_res(head, res);
+ return dev_res ? dev_res->min_align : 0;
+}
+
+
/* Sort resources by alignment */
static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
{
@@ -215,7 +235,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
struct resource *res;
struct pci_dev_resource *add_res, *tmp;
struct pci_dev_resource *dev_res;
- resource_size_t add_size;
+ resource_size_t add_size, align;
int idx;
list_for_each_entry_safe(add_res, tmp, realloc_head, list) {
@@ -238,13 +258,13 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
idx = res - &add_res->dev->resource[0];
add_size = add_res->add_size;
+ align = add_res->min_align;
if (!resource_size(res)) {
- res->start = add_res->start;
+ res->start = align;
res->end = res->start + add_size - 1;
if (pci_assign_resource(add_res->dev, idx))
reset_resource(res);
} else {
- resource_size_t align = add_res->min_align;
res->flags |= add_res->flags &
(IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
if (pci_reassign_resource(add_res->dev, idx,
@@ -368,8 +388,9 @@ static void __assign_resources_sorted(struct list_head *head,
LIST_HEAD(save_head);
LIST_HEAD(local_fail_head);
struct pci_dev_resource *save_res;
- struct pci_dev_resource *dev_res, *tmp_res;
+ struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
unsigned long fail_type;
+ resource_size_t add_align, align;
/* Check if optional add_size is there */
if (!realloc_head || list_empty(realloc_head))
@@ -384,10 +405,44 @@ static void __assign_resources_sorted(struct list_head *head,
}
/* Update res in head list with add_size in realloc_head list */
- list_for_each_entry(dev_res, head, list)
+ list_for_each_entry_safe(dev_res, tmp_res, head, list) {
dev_res->res->end += get_res_add_size(realloc_head,
dev_res->res);
+ /*
+ * There are two kinds of additional resources in the list:
+ * 1. bridge resource -- IORESOURCE_STARTALIGN
+ * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
+ * Here just fix the additional alignment for bridge
+ */
+ if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
+ continue;
+
+ add_align = get_res_add_align(realloc_head, dev_res->res);
+
+ /*
+ * The "head" list is sorted by the alignment to make sure
+ * resources with bigger alignment will be assigned first.
+ * After we change the alignment of a dev_res in "head" list,
+ * we need to reorder the list by alignment to make it
+ * consistent.
+ */
+ if (add_align > dev_res->res->start) {
+ dev_res->res->start = add_align;
+ dev_res->res->end = add_align +
+ resource_size(dev_res->res);
+
+ list_for_each_entry(dev_res2, head, list) {
+ align = pci_resource_alignment(dev_res2->dev,
+ dev_res2->res);
+ if (add_align > align)
+ list_move_tail(&dev_res->list,
+ &dev_res2->list);
+ }
+ }
+
+ }
+
/* Try updated head list with add_size added */
assign_requested_resources_sorted(head, &local_fail_head);
@@ -962,6 +1017,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct resource *b_res = find_free_bus_resource(bus,
mask | IORESOURCE_PREFETCH, type);
resource_size_t children_add_size = 0;
+ resource_size_t children_add_align = 0;
+ resource_size_t add_align = 0;
if (!b_res)
return -ENOSPC;
@@ -986,6 +1043,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
/* put SRIOV requested res to the optional list */
if (realloc_head && i >= PCI_IOV_RESOURCES &&
i <= PCI_IOV_RESOURCE_END) {
+ add_align = max(pci_resource_alignment(dev, r), add_align);
r->end = r->start - 1;
add_to_list(realloc_head, dev, r, r_size, 0/* don't care */);
children_add_size += r_size;
@@ -1016,19 +1074,23 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (order > max_order)
max_order = order;
- if (realloc_head)
+ if (realloc_head) {
children_add_size += get_res_add_size(realloc_head, r);
+ children_add_align = get_res_add_align(realloc_head, r);
+ add_align = max(add_align, children_add_align);
+ }
}
}
min_align = calculate_mem_align(aligns, max_order);
min_align = max(min_align, window_alignment(bus, b_res->flags));
size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
+ add_align = max(min_align, add_align);
if (children_add_size > add_size)
add_size = children_add_size;
size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
calculate_memsize(size, min_size, add_size,
- resource_size(b_res), min_align);
+ resource_size(b_res), add_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n",
@@ -1040,10 +1102,11 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->end = size0 + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
if (size1 > size0 && realloc_head) {
- add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
- dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx\n",
+ add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
+ dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx add_align %llx\n",
b_res, &bus->busn_res,
- (unsigned long long)size1-size0);
+ (unsigned long long) (size1 - size0),
+ (unsigned long long) add_align);
}
return 0;
}
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index b1ffebec9b9e..7cfd2db02deb 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -777,12 +777,13 @@ static int pcifront_publish_info(struct pcifront_device *pdev)
{
int err = 0;
struct xenbus_transaction trans;
+ grant_ref_t gref;
- err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
+ err = xenbus_grant_ring(pdev->xdev, pdev->sh_info, 1, &gref);
if (err < 0)
goto out;
- pdev->gnt_ref = err;
+ pdev->gnt_ref = gref;
err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
if (err)
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 45f67c63d385..a65f821f52eb 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -275,6 +275,7 @@ config BFIN_CFPCMCIA
config AT91_CF
tristate "AT91 CompactFlash Controller"
+ depends on PCI
depends on PCMCIA && ARCH_AT91
depends on !ARCH_MULTIPLATFORM
help
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index bfb799c7b343..e7775a41ae5d 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -317,13 +317,14 @@ static int at91_cf_probe(struct platform_device *pdev)
} else
cf->socket.pci_irq = nr_irqs + 1;
- /* pcmcia layer only remaps "real" memory not iospace */
- cf->socket.io_offset = (unsigned long) devm_ioremap(&pdev->dev,
- cf->phys_baseaddr + CF_IO_PHYS, SZ_2K);
- if (!cf->socket.io_offset) {
- status = -ENXIO;
+ /*
+ * pcmcia layer only remaps "real" memory not iospace
+ * io_offset is set to 0x10000 to avoid the check in static_find_io().
+ * */
+ cf->socket.io_offset = 0x10000;
+ status = pci_ioremap_io(0x10000, cf->phys_baseaddr + CF_IO_PHYS);
+ if (status)
goto fail0a;
- }
/* reserve chip-select regions */
if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), "at91_cf")) {
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 8170102d1e93..4e2f501e5548 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -220,9 +220,7 @@ static int __init omap_cf_probe(struct platform_device *pdev)
cf = kzalloc(sizeof *cf, GFP_KERNEL);
if (!cf)
return -ENOMEM;
- init_timer(&cf->timer);
- cf->timer.function = omap_cf_timer;
- cf->timer.data = (unsigned long) cf;
+ setup_timer(&cf->timer, omap_cf_timer, (unsigned long)cf);
cf->pdev = pdev;
platform_set_drvdata(pdev, cf);
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 34ace4854dc2..0f70b4d58f9e 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -707,11 +707,9 @@ static int pd6729_pci_probe(struct pci_dev *dev,
}
} else {
/* poll Card status change */
- init_timer(&socket->poll_timer);
- socket->poll_timer.function = pd6729_interrupt_wrapper;
- socket->poll_timer.data = (unsigned long)socket;
- socket->poll_timer.expires = jiffies + HZ;
- add_timer(&socket->poll_timer);
+ setup_timer(&socket->poll_timer, pd6729_interrupt_wrapper,
+ (unsigned long)socket);
+ mod_timer(&socket->poll_timer, jiffies + HZ);
}
for (i = 0; i < MAX_SOCKETS; i++) {
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 933f4657515b..eed5e9c05353 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -726,9 +726,8 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
{
int ret;
- init_timer(&skt->poll_timer);
- skt->poll_timer.function = soc_common_pcmcia_poll_event;
- skt->poll_timer.data = (unsigned long)skt;
+ setup_timer(&skt->poll_timer, soc_common_pcmcia_poll_event,
+ (unsigned long)skt);
skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;
ret = request_resource(&iomem_resource, &skt->res_skt);
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 8a23ccb41213..965bd8491233 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1236,11 +1236,9 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!socket->cb_irq || request_irq(socket->cb_irq, yenta_interrupt, IRQF_SHARED, "yenta", socket)) {
/* No IRQ or request_irq failed. Poll */
socket->cb_irq = 0; /* But zero is a valid IRQ number. */
- init_timer(&socket->poll_timer);
- socket->poll_timer.function = yenta_interrupt_wrapper;
- socket->poll_timer.data = (unsigned long)socket;
- socket->poll_timer.expires = jiffies + HZ;
- add_timer(&socket->poll_timer);
+ setup_timer(&socket->poll_timer, yenta_interrupt_wrapper,
+ (unsigned long)socket);
+ mod_timer(&socket->poll_timer, jiffies + HZ);
dev_printk(KERN_INFO, &dev->dev,
"no PCI IRQ, CardBus support disabled for this "
"socket.\n");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index ee9f44ad7f02..aeb5729fbda6 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -67,23 +67,20 @@ config PINCTRL_AT91
help
Say Y here to enable the at91 pinctrl driver
-config PINCTRL_BCM2835
- bool
- select PINMUX
- select PINCONF
-
-config PINCTRL_BCM281XX
- bool "Broadcom BCM281xx pinctrl driver"
- depends on OF && (ARCH_BCM_MOBILE || COMPILE_TEST)
- select PINMUX
+config PINCTRL_AMD
+ bool "AMD GPIO pin control"
+ depends on GPIOLIB
+ select GPIOLIB_IRQCHIP
select PINCONF
select GENERIC_PINCONF
- select REGMAP_MMIO
help
- Say Y here to support Broadcom BCM281xx pinctrl driver, which is used
- for the BCM281xx SoC family, including BCM11130, BCM11140, BCM11351,
- BCM28145, and BCM28155 SoCs. This driver requires the pinctrl
- framework. GPIO is provided by a separate GPIO driver.
+ driver for memory mapped GPIO functionality on AMD platforms
+ (x86 or arm).Most pins are usually muxed to some other
+ functionality by firmware,so only a small amount is available
+ for gpio use.
+
+ Requires ACPI/FDT device enumeration code to set up a platform
+ device.
config PINCTRL_LANTIQ
bool
@@ -98,9 +95,11 @@ config PINCTRL_FALCON
config PINCTRL_MESON
bool
+ depends on OF
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select OF_GPIO
select REGMAP_MMIO
@@ -154,6 +153,10 @@ config PINCTRL_TEGRA124
bool
select PINCTRL_TEGRA
+config PINCTRL_TEGRA210
+ bool
+ select PINCTRL_TEGRA
+
config PINCTRL_TEGRA_XUSB
def_bool y if ARCH_TEGRA
select GENERIC_PHY
@@ -207,6 +210,7 @@ config PINCTRL_ZYNQ
help
This selectes the pinctrl driver for Xilinx Zynq.
+source "drivers/pinctrl/bcm/Kconfig"
source "drivers/pinctrl/berlin/Kconfig"
source "drivers/pinctrl/freescale/Kconfig"
source "drivers/pinctrl/intel/Kconfig"
@@ -218,6 +222,7 @@ source "drivers/pinctrl/sh-pfc/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/sunxi/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
+source "drivers/pinctrl/mediatek/Kconfig"
config PINCTRL_XWAY
bool
@@ -226,7 +231,8 @@ config PINCTRL_XWAY
config PINCTRL_TB10X
bool
- depends on ARC_PLAT_TB10X
+ depends on OF && ARC_PLAT_TB10X
+ select GPIOLIB
endmenu
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 0475206dd600..6eadf04a33b3 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -14,8 +14,7 @@ obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
-obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
-obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
+obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_MESON) += meson/
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
@@ -27,6 +26,7 @@ obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
obj-$(CONFIG_PINCTRL_TEGRA124) += pinctrl-tegra124.o
+obj-$(CONFIG_PINCTRL_TEGRA210) += pinctrl-tegra210.o
obj-$(CONFIG_PINCTRL_TEGRA_XUSB) += pinctrl-tegra-xusb.o
obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o
obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o
@@ -38,6 +38,7 @@ obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
+obj-$(CONFIG_ARCH_BCM) += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-y += freescale/
obj-$(CONFIG_X86) += intel/
@@ -49,3 +50,4 @@ obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_VT8500) += vt8500/
+obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
new file mode 100644
index 000000000000..cd11d4d9ad58
--- /dev/null
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -0,0 +1,56 @@
+#
+# Broadcom pinctrl drivers
+#
+
+config PINCTRL_BCM281XX
+ bool "Broadcom BCM281xx pinctrl driver"
+ depends on OF && (ARCH_BCM_MOBILE || COMPILE_TEST)
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select REGMAP_MMIO
+ help
+ Say Y here to support Broadcom BCM281xx pinctrl driver, which is used
+ for the BCM281xx SoC family, including BCM11130, BCM11140, BCM11351,
+ BCM28145, and BCM28155 SoCs. This driver requires the pinctrl
+ framework. GPIO is provided by a separate GPIO driver.
+
+config PINCTRL_BCM2835
+ bool
+ select PINMUX
+ select PINCONF
+
+config PINCTRL_CYGNUS_GPIO
+ bool "Broadcom Cygnus GPIO (with PINCONF) driver"
+ depends on OF_GPIO && ARCH_BCM_CYGNUS
+ select GPIOLIB_IRQCHIP
+ select PINCONF
+ select GENERIC_PINCONF
+ default ARCH_BCM_CYGNUS
+ help
+ Say yes here to enable the Broadcom Cygnus GPIO driver.
+
+ The Broadcom Cygnus SoC has 3 GPIO controllers including the ASIU
+ GPIO controller (ASIU), the chipCommonG GPIO controller (CCM), and
+ the always-ON GPIO controller (CRMU/AON). All 3 GPIO controllers are
+ supported by this driver.
+
+ All 3 Cygnus GPIO controllers support basic PINCONF functions such
+ as bias pull up, pull down, and drive strength configurations, when
+ these pins are muxed to GPIO.
+
+ Pins from the ASIU GPIO can be individually muxed to GPIO function,
+ through interaction with the Cygnus IOMUX controller.
+
+config PINCTRL_CYGNUS_MUX
+ bool "Broadcom Cygnus IOMUX driver"
+ depends on (ARCH_BCM_CYGNUS || COMPILE_TEST)
+ select PINMUX
+ select GENERIC_PINCONF
+ default ARCH_BCM_CYGNUS
+ help
+ Say yes here to enable the Broadcom Cygnus IOMUX driver.
+
+ The Broadcom Cygnus IOMUX driver supports group based IOMUX
+ configuration, with the exception that certain individual pins
+ can be overrided to GPIO function
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
new file mode 100644
index 000000000000..2b2f70ee804c
--- /dev/null
+++ b/drivers/pinctrl/bcm/Makefile
@@ -0,0 +1,6 @@
+# Broadcom pinctrl support
+
+obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
+obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
+obj-$(CONFIG_PINCTRL_CYGNUS_GPIO) += pinctrl-cygnus-gpio.o
+obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o
diff --git a/drivers/pinctrl/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index b88cfe5ed55a..9641f1c7617e 100644
--- a/drivers/pinctrl/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -21,8 +21,8 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include "core.h"
-#include "pinctrl-utils.h"
+#include "../core.h"
+#include "../pinctrl-utils.h"
/* BCM281XX Pin Control Registers Definitions */
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 9aa8a3f10b10..8d908e3f42c3 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -403,15 +403,7 @@ static irqreturn_t bcm2835_gpio_irq_handler(int irq, void *dev_id)
gpio = (32 * bank) + offset;
type = pc->irq_type[gpio];
- /* ack edge triggered IRQs immediately */
- if (!(type & IRQ_TYPE_LEVEL_MASK))
- bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
-
generic_handle_irq(irq_linear_revmap(pc->irq_domain, gpio));
-
- /* ack level triggered IRQ after handling them */
- if (type & IRQ_TYPE_LEVEL_MASK)
- bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
}
return events ? IRQ_HANDLED : IRQ_NONE;
}
@@ -591,16 +583,32 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
else
ret = __bcm2835_gpio_irq_set_type_disabled(pc, gpio, type);
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ __irq_set_handler_locked(data->irq, handle_edge_irq);
+ else
+ __irq_set_handler_locked(data->irq, handle_level_irq);
+
spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
return ret;
}
+static void bcm2835_gpio_irq_ack(struct irq_data *data)
+{
+ struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data);
+ unsigned gpio = irqd_to_hwirq(data);
+
+ bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
+}
+
static struct irq_chip bcm2835_gpio_irq_chip = {
.name = MODULE_NAME,
.irq_enable = bcm2835_gpio_irq_enable,
.irq_disable = bcm2835_gpio_irq_disable,
.irq_set_type = bcm2835_gpio_irq_set_type,
+ .irq_ack = bcm2835_gpio_irq_ack,
+ .irq_mask = bcm2835_gpio_irq_disable,
+ .irq_unmask = bcm2835_gpio_irq_enable,
};
static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
@@ -977,7 +985,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
int irq = irq_create_mapping(pc->irq_domain, i);
irq_set_lockdep_class(irq, &gpio_lock_class);
irq_set_chip_and_handler(irq, &bcm2835_gpio_irq_chip,
- handle_simple_irq);
+ handle_level_irq);
irq_set_chip_data(irq, pc);
set_irq_flags(irq, IRQF_VALID);
}
@@ -1051,7 +1059,7 @@ static int bcm2835_pinctrl_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id bcm2835_pinctrl_match[] = {
+static const struct of_device_id bcm2835_pinctrl_match[] = {
{ .compatible = "brcm,bcm2835-gpio" },
{}
};
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
new file mode 100644
index 000000000000..4ad5c1a996e3
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
@@ -0,0 +1,907 @@
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file contains the Broadcom Cygnus GPIO driver that supports 3
+ * GPIO controllers on Cygnus including the ASIU GPIO controller, the
+ * chipCommonG GPIO controller, and the always-on GPIO controller. Basic
+ * PINCONF such as bias pull up/down, and drive strength are also supported
+ * in this driver.
+ *
+ * Pins from the ASIU GPIO can be individually muxed to GPIO function,
+ * through the interaction with the Cygnus IOMUX controller
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/ioport.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "../pinctrl-utils.h"
+
+#define CYGNUS_GPIO_DATA_IN_OFFSET 0x00
+#define CYGNUS_GPIO_DATA_OUT_OFFSET 0x04
+#define CYGNUS_GPIO_OUT_EN_OFFSET 0x08
+#define CYGNUS_GPIO_IN_TYPE_OFFSET 0x0c
+#define CYGNUS_GPIO_INT_DE_OFFSET 0x10
+#define CYGNUS_GPIO_INT_EDGE_OFFSET 0x14
+#define CYGNUS_GPIO_INT_MSK_OFFSET 0x18
+#define CYGNUS_GPIO_INT_STAT_OFFSET 0x1c
+#define CYGNUS_GPIO_INT_MSTAT_OFFSET 0x20
+#define CYGNUS_GPIO_INT_CLR_OFFSET 0x24
+#define CYGNUS_GPIO_PAD_RES_OFFSET 0x34
+#define CYGNUS_GPIO_RES_EN_OFFSET 0x38
+
+/* drive strength control for ASIU GPIO */
+#define CYGNUS_GPIO_ASIU_DRV0_CTRL_OFFSET 0x58
+
+/* drive strength control for CCM/CRMU (AON) GPIO */
+#define CYGNUS_GPIO_DRV0_CTRL_OFFSET 0x00
+
+#define GPIO_BANK_SIZE 0x200
+#define NGPIOS_PER_BANK 32
+#define GPIO_BANK(pin) ((pin) / NGPIOS_PER_BANK)
+
+#define CYGNUS_GPIO_REG(pin, reg) (GPIO_BANK(pin) * GPIO_BANK_SIZE + (reg))
+#define CYGNUS_GPIO_SHIFT(pin) ((pin) % NGPIOS_PER_BANK)
+
+#define GPIO_DRV_STRENGTH_BIT_SHIFT 20
+#define GPIO_DRV_STRENGTH_BITS 3
+#define GPIO_DRV_STRENGTH_BIT_MASK ((1 << GPIO_DRV_STRENGTH_BITS) - 1)
+
+/*
+ * Cygnus GPIO core
+ *
+ * @dev: pointer to device
+ * @base: I/O register base for Cygnus GPIO controller
+ * @io_ctrl: I/O register base for certain type of Cygnus GPIO controller that
+ * has the PINCONF support implemented outside of the GPIO block
+ * @lock: lock to protect access to I/O registers
+ * @gc: GPIO chip
+ * @num_banks: number of GPIO banks, each bank supports up to 32 GPIOs
+ * @pinmux_is_supported: flag to indicate this GPIO controller contains pins
+ * that can be individually muxed to GPIO
+ * @pctl: pointer to pinctrl_dev
+ * @pctldesc: pinctrl descriptor
+ */
+struct cygnus_gpio {
+ struct device *dev;
+
+ void __iomem *base;
+ void __iomem *io_ctrl;
+
+ spinlock_t lock;
+
+ struct gpio_chip gc;
+ unsigned num_banks;
+
+ bool pinmux_is_supported;
+
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc pctldesc;
+};
+
+static inline struct cygnus_gpio *to_cygnus_gpio(struct gpio_chip *gc)
+{
+ return container_of(gc, struct cygnus_gpio, gc);
+}
+
+/*
+ * Mapping from PINCONF pins to GPIO pins is 1-to-1
+ */
+static inline unsigned cygnus_pin_to_gpio(unsigned pin)
+{
+ return pin;
+}
+
+/**
+ * cygnus_set_bit - set or clear one bit (corresponding to the GPIO pin) in a
+ * Cygnus GPIO register
+ *
+ * @cygnus_gpio: Cygnus GPIO device
+ * @reg: register offset
+ * @gpio: GPIO pin
+ * @set: set or clear
+ */
+static inline void cygnus_set_bit(struct cygnus_gpio *chip, unsigned int reg,
+ unsigned gpio, bool set)
+{
+ unsigned int offset = CYGNUS_GPIO_REG(gpio, reg);
+ unsigned int shift = CYGNUS_GPIO_SHIFT(gpio);
+ u32 val;
+
+ val = readl(chip->base + offset);
+ if (set)
+ val |= BIT(shift);
+ else
+ val &= ~BIT(shift);
+ writel(val, chip->base + offset);
+}
+
+static inline bool cygnus_get_bit(struct cygnus_gpio *chip, unsigned int reg,
+ unsigned gpio)
+{
+ unsigned int offset = CYGNUS_GPIO_REG(gpio, reg);
+ unsigned int shift = CYGNUS_GPIO_SHIFT(gpio);
+
+ return !!(readl(chip->base + offset) & BIT(shift));
+}
+
+static void cygnus_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+ int i, bit;
+
+ chained_irq_enter(irq_chip, desc);
+
+ /* go through the entire GPIO banks and handle all interrupts */
+ for (i = 0; i < chip->num_banks; i++) {
+ unsigned long val = readl(chip->base + (i * GPIO_BANK_SIZE) +
+ CYGNUS_GPIO_INT_MSTAT_OFFSET);
+
+ for_each_set_bit(bit, &val, NGPIOS_PER_BANK) {
+ unsigned pin = NGPIOS_PER_BANK * i + bit;
+ int child_irq = irq_find_mapping(gc->irqdomain, pin);
+
+ /*
+ * Clear the interrupt before invoking the
+ * handler, so we do not leave any window
+ */
+ writel(BIT(bit), chip->base + (i * GPIO_BANK_SIZE) +
+ CYGNUS_GPIO_INT_CLR_OFFSET);
+
+ generic_handle_irq(child_irq);
+ }
+ }
+
+ chained_irq_exit(irq_chip, desc);
+}
+
+
+static void cygnus_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned gpio = d->hwirq;
+ unsigned int offset = CYGNUS_GPIO_REG(gpio,
+ CYGNUS_GPIO_INT_CLR_OFFSET);
+ unsigned int shift = CYGNUS_GPIO_SHIFT(gpio);
+ u32 val = BIT(shift);
+
+ writel(val, chip->base + offset);
+}
+
+/**
+ * cygnus_gpio_irq_set_mask - mask/unmask a GPIO interrupt
+ *
+ * @d: IRQ chip data
+ * @unmask: mask/unmask GPIO interrupt
+ */
+static void cygnus_gpio_irq_set_mask(struct irq_data *d, bool unmask)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned gpio = d->hwirq;
+
+ cygnus_set_bit(chip, CYGNUS_GPIO_INT_MSK_OFFSET, gpio, unmask);
+}
+
+static void cygnus_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ cygnus_gpio_irq_set_mask(d, false);
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+
+static void cygnus_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ cygnus_gpio_irq_set_mask(d, true);
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+
+static int cygnus_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned gpio = d->hwirq;
+ bool level_triggered = false;
+ bool dual_edge = false;
+ bool rising_or_high = false;
+ unsigned long flags;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ rising_or_high = true;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ dual_edge = true;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ level_triggered = true;
+ rising_or_high = true;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ level_triggered = true;
+ break;
+
+ default:
+ dev_err(chip->dev, "invalid GPIO IRQ type 0x%x\n",
+ type);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&chip->lock, flags);
+ cygnus_set_bit(chip, CYGNUS_GPIO_IN_TYPE_OFFSET, gpio,
+ level_triggered);
+ cygnus_set_bit(chip, CYGNUS_GPIO_INT_DE_OFFSET, gpio, dual_edge);
+ cygnus_set_bit(chip, CYGNUS_GPIO_INT_EDGE_OFFSET, gpio,
+ rising_or_high);
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ dev_dbg(chip->dev,
+ "gpio:%u level_triggered:%d dual_edge:%d rising_or_high:%d\n",
+ gpio, level_triggered, dual_edge, rising_or_high);
+
+ return 0;
+}
+
+static struct irq_chip cygnus_gpio_irq_chip = {
+ .name = "bcm-cygnus-gpio",
+ .irq_ack = cygnus_gpio_irq_ack,
+ .irq_mask = cygnus_gpio_irq_mask,
+ .irq_unmask = cygnus_gpio_irq_unmask,
+ .irq_set_type = cygnus_gpio_irq_set_type,
+};
+
+/*
+ * Request the Cygnus IOMUX pinmux controller to mux individual pins to GPIO
+ */
+static int cygnus_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned gpio = gc->base + offset;
+
+ /* not all Cygnus GPIO pins can be muxed individually */
+ if (!chip->pinmux_is_supported)
+ return 0;
+
+ return pinctrl_request_gpio(gpio);
+}
+
+static void cygnus_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned gpio = gc->base + offset;
+
+ if (!chip->pinmux_is_supported)
+ return;
+
+ pinctrl_free_gpio(gpio);
+}
+
+static int cygnus_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
+{
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ cygnus_set_bit(chip, CYGNUS_GPIO_OUT_EN_OFFSET, gpio, false);
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ dev_dbg(chip->dev, "gpio:%u set input\n", gpio);
+
+ return 0;
+}
+
+static int cygnus_gpio_direction_output(struct gpio_chip *gc, unsigned gpio,
+ int val)
+{
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ cygnus_set_bit(chip, CYGNUS_GPIO_OUT_EN_OFFSET, gpio, true);
+ cygnus_set_bit(chip, CYGNUS_GPIO_DATA_OUT_OFFSET, gpio, !!(val));
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ dev_dbg(chip->dev, "gpio:%u set output, value:%d\n", gpio, val);
+
+ return 0;
+}
+
+static void cygnus_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
+{
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ cygnus_set_bit(chip, CYGNUS_GPIO_DATA_OUT_OFFSET, gpio, !!(val));
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
+}
+
+static int cygnus_gpio_get(struct gpio_chip *gc, unsigned gpio)
+{
+ struct cygnus_gpio *chip = to_cygnus_gpio(gc);
+ unsigned int offset = CYGNUS_GPIO_REG(gpio,
+ CYGNUS_GPIO_DATA_IN_OFFSET);
+ unsigned int shift = CYGNUS_GPIO_SHIFT(gpio);
+
+ return !!(readl(chip->base + offset) & BIT(shift));
+}
+
+static int cygnus_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return 1;
+}
+
+/*
+ * Only one group: "gpio_grp", since this local pinctrl device only performs
+ * GPIO specific PINCONF configurations
+ */
+static const char *cygnus_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return "gpio_grp";
+}
+
+static const struct pinctrl_ops cygnus_pctrl_ops = {
+ .get_groups_count = cygnus_get_groups_count,
+ .get_group_name = cygnus_get_group_name,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int cygnus_gpio_set_pull(struct cygnus_gpio *chip, unsigned gpio,
+ bool disable, bool pull_up)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+
+ if (disable) {
+ cygnus_set_bit(chip, CYGNUS_GPIO_RES_EN_OFFSET, gpio, false);
+ } else {
+ cygnus_set_bit(chip, CYGNUS_GPIO_PAD_RES_OFFSET, gpio,
+ pull_up);
+ cygnus_set_bit(chip, CYGNUS_GPIO_RES_EN_OFFSET, gpio, true);
+ }
+
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ dev_dbg(chip->dev, "gpio:%u set pullup:%d\n", gpio, pull_up);
+
+ return 0;
+}
+
+static void cygnus_gpio_get_pull(struct cygnus_gpio *chip, unsigned gpio,
+ bool *disable, bool *pull_up)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ *disable = !cygnus_get_bit(chip, CYGNUS_GPIO_RES_EN_OFFSET, gpio);
+ *pull_up = cygnus_get_bit(chip, CYGNUS_GPIO_PAD_RES_OFFSET, gpio);
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+
+static int cygnus_gpio_set_strength(struct cygnus_gpio *chip, unsigned gpio,
+ unsigned strength)
+{
+ void __iomem *base;
+ unsigned int i, offset, shift;
+ u32 val;
+ unsigned long flags;
+
+ /* make sure drive strength is supported */
+ if (strength < 2 || strength > 16 || (strength % 2))
+ return -ENOTSUPP;
+
+ if (chip->io_ctrl) {
+ base = chip->io_ctrl;
+ offset = CYGNUS_GPIO_DRV0_CTRL_OFFSET;
+ } else {
+ base = chip->base;
+ offset = CYGNUS_GPIO_REG(gpio,
+ CYGNUS_GPIO_ASIU_DRV0_CTRL_OFFSET);
+ }
+
+ shift = CYGNUS_GPIO_SHIFT(gpio);
+
+ dev_dbg(chip->dev, "gpio:%u set drive strength:%d mA\n", gpio,
+ strength);
+
+ spin_lock_irqsave(&chip->lock, flags);
+ strength = (strength / 2) - 1;
+ for (i = 0; i < GPIO_DRV_STRENGTH_BITS; i++) {
+ val = readl(base + offset);
+ val &= ~BIT(shift);
+ val |= ((strength >> i) & 0x1) << shift;
+ writel(val, base + offset);
+ offset += 4;
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int cygnus_gpio_get_strength(struct cygnus_gpio *chip, unsigned gpio,
+ u16 *strength)
+{
+ void __iomem *base;
+ unsigned int i, offset, shift;
+ u32 val;
+ unsigned long flags;
+
+ if (chip->io_ctrl) {
+ base = chip->io_ctrl;
+ offset = CYGNUS_GPIO_DRV0_CTRL_OFFSET;
+ } else {
+ base = chip->base;
+ offset = CYGNUS_GPIO_REG(gpio,
+ CYGNUS_GPIO_ASIU_DRV0_CTRL_OFFSET);
+ }
+
+ shift = CYGNUS_GPIO_SHIFT(gpio);
+
+ spin_lock_irqsave(&chip->lock, flags);
+ *strength = 0;
+ for (i = 0; i < GPIO_DRV_STRENGTH_BITS; i++) {
+ val = readl(base + offset) & BIT(shift);
+ val >>= shift;
+ *strength += (val << i);
+ offset += 4;
+ }
+
+ /* convert to mA */
+ *strength = (*strength + 1) * 2;
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int cygnus_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ struct cygnus_gpio *chip = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ unsigned gpio = cygnus_pin_to_gpio(pin);
+ u16 arg;
+ bool disable, pull_up;
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ cygnus_gpio_get_pull(chip, gpio, &disable, &pull_up);
+ if (disable)
+ return 0;
+ else
+ return -EINVAL;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ cygnus_gpio_get_pull(chip, gpio, &disable, &pull_up);
+ if (!disable && pull_up)
+ return 0;
+ else
+ return -EINVAL;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ cygnus_gpio_get_pull(chip, gpio, &disable, &pull_up);
+ if (!disable && !pull_up)
+ return 0;
+ else
+ return -EINVAL;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = cygnus_gpio_get_strength(chip, gpio, &arg);
+ if (ret)
+ return ret;
+ else
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int cygnus_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, unsigned num_configs)
+{
+ struct cygnus_gpio *chip = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ u16 arg;
+ unsigned i, gpio = cygnus_pin_to_gpio(pin);
+ int ret = -ENOTSUPP;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = cygnus_gpio_set_pull(chip, gpio, true, false);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = cygnus_gpio_set_pull(chip, gpio, false, true);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = cygnus_gpio_set_pull(chip, gpio, false, false);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = cygnus_gpio_set_strength(chip, gpio, arg);
+ if (ret < 0)
+ goto out;
+ break;
+
+ default:
+ dev_err(chip->dev, "invalid configuration\n");
+ return -ENOTSUPP;
+ }
+ } /* for each config */
+
+out:
+ return ret;
+}
+
+static const struct pinconf_ops cygnus_pconf_ops = {
+ .is_generic = true,
+ .pin_config_get = cygnus_pin_config_get,
+ .pin_config_set = cygnus_pin_config_set,
+};
+
+/*
+ * Map a GPIO in the local gpio_chip pin space to a pin in the Cygnus IOMUX
+ * pinctrl pin space
+ */
+struct cygnus_gpio_pin_range {
+ unsigned offset;
+ unsigned pin_base;
+ unsigned num_pins;
+};
+
+#define CYGNUS_PINRANGE(o, p, n) { .offset = o, .pin_base = p, .num_pins = n }
+
+/*
+ * Pin mapping table for mapping local GPIO pins to Cygnus IOMUX pinctrl pins
+ */
+static const struct cygnus_gpio_pin_range cygnus_gpio_pintable[] = {
+ CYGNUS_PINRANGE(0, 42, 1),
+ CYGNUS_PINRANGE(1, 44, 3),
+ CYGNUS_PINRANGE(4, 48, 1),
+ CYGNUS_PINRANGE(5, 50, 3),
+ CYGNUS_PINRANGE(8, 126, 1),
+ CYGNUS_PINRANGE(9, 155, 1),
+ CYGNUS_PINRANGE(10, 152, 1),
+ CYGNUS_PINRANGE(11, 154, 1),
+ CYGNUS_PINRANGE(12, 153, 1),
+ CYGNUS_PINRANGE(13, 127, 3),
+ CYGNUS_PINRANGE(16, 140, 1),
+ CYGNUS_PINRANGE(17, 145, 7),
+ CYGNUS_PINRANGE(24, 130, 10),
+ CYGNUS_PINRANGE(34, 141, 4),
+ CYGNUS_PINRANGE(38, 54, 1),
+ CYGNUS_PINRANGE(39, 56, 3),
+ CYGNUS_PINRANGE(42, 60, 3),
+ CYGNUS_PINRANGE(45, 64, 3),
+ CYGNUS_PINRANGE(48, 68, 2),
+ CYGNUS_PINRANGE(50, 84, 6),
+ CYGNUS_PINRANGE(56, 94, 6),
+ CYGNUS_PINRANGE(62, 72, 1),
+ CYGNUS_PINRANGE(63, 70, 1),
+ CYGNUS_PINRANGE(64, 80, 1),
+ CYGNUS_PINRANGE(65, 74, 3),
+ CYGNUS_PINRANGE(68, 78, 1),
+ CYGNUS_PINRANGE(69, 82, 1),
+ CYGNUS_PINRANGE(70, 156, 17),
+ CYGNUS_PINRANGE(87, 104, 12),
+ CYGNUS_PINRANGE(99, 102, 2),
+ CYGNUS_PINRANGE(101, 90, 4),
+ CYGNUS_PINRANGE(105, 116, 10),
+ CYGNUS_PINRANGE(123, 11, 1),
+ CYGNUS_PINRANGE(124, 38, 4),
+ CYGNUS_PINRANGE(128, 43, 1),
+ CYGNUS_PINRANGE(129, 47, 1),
+ CYGNUS_PINRANGE(130, 49, 1),
+ CYGNUS_PINRANGE(131, 53, 1),
+ CYGNUS_PINRANGE(132, 55, 1),
+ CYGNUS_PINRANGE(133, 59, 1),
+ CYGNUS_PINRANGE(134, 63, 1),
+ CYGNUS_PINRANGE(135, 67, 1),
+ CYGNUS_PINRANGE(136, 71, 1),
+ CYGNUS_PINRANGE(137, 73, 1),
+ CYGNUS_PINRANGE(138, 77, 1),
+ CYGNUS_PINRANGE(139, 79, 1),
+ CYGNUS_PINRANGE(140, 81, 1),
+ CYGNUS_PINRANGE(141, 83, 1),
+ CYGNUS_PINRANGE(142, 10, 1)
+};
+
+/*
+ * The Cygnus IOMUX controller mainly supports group based mux configuration,
+ * but certain pins can be muxed to GPIO individually. Only the ASIU GPIO
+ * controller can support this, so it's an optional configuration
+ *
+ * Return -ENODEV means no support and that's fine
+ */
+static int cygnus_gpio_pinmux_add_range(struct cygnus_gpio *chip)
+{
+ struct device_node *node = chip->dev->of_node;
+ struct device_node *pinmux_node;
+ struct platform_device *pinmux_pdev;
+ struct gpio_chip *gc = &chip->gc;
+ int i, ret = 0;
+
+ /* parse DT to find the phandle to the pinmux controller */
+ pinmux_node = of_parse_phandle(node, "pinmux", 0);
+ if (!pinmux_node)
+ return -ENODEV;
+
+ pinmux_pdev = of_find_device_by_node(pinmux_node);
+ /* no longer need the pinmux node */
+ of_node_put(pinmux_node);
+ if (!pinmux_pdev) {
+ dev_err(chip->dev, "failed to get pinmux device\n");
+ return -EINVAL;
+ }
+
+ /* now need to create the mapping between local GPIO and PINMUX pins */
+ for (i = 0; i < ARRAY_SIZE(cygnus_gpio_pintable); i++) {
+ ret = gpiochip_add_pin_range(gc, dev_name(&pinmux_pdev->dev),
+ cygnus_gpio_pintable[i].offset,
+ cygnus_gpio_pintable[i].pin_base,
+ cygnus_gpio_pintable[i].num_pins);
+ if (ret) {
+ dev_err(chip->dev, "unable to add GPIO pin range\n");
+ goto err_put_device;
+ }
+ }
+
+ chip->pinmux_is_supported = true;
+
+ /* no need for pinmux_pdev device reference anymore */
+ put_device(&pinmux_pdev->dev);
+ return 0;
+
+err_put_device:
+ put_device(&pinmux_pdev->dev);
+ gpiochip_remove_pin_ranges(gc);
+ return ret;
+}
+
+/*
+ * Cygnus GPIO controller supports some PINCONF related configurations such as
+ * pull up, pull down, and drive strength, when the pin is configured to GPIO
+ *
+ * Here a local pinctrl device is created with simple 1-to-1 pin mapping to the
+ * local GPIO pins
+ */
+static int cygnus_gpio_register_pinconf(struct cygnus_gpio *chip)
+{
+ struct pinctrl_desc *pctldesc = &chip->pctldesc;
+ struct pinctrl_pin_desc *pins;
+ struct gpio_chip *gc = &chip->gc;
+ int i;
+
+ pins = devm_kcalloc(chip->dev, gc->ngpio, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < gc->ngpio; i++) {
+ pins[i].number = i;
+ pins[i].name = devm_kasprintf(chip->dev, GFP_KERNEL,
+ "gpio-%d", i);
+ if (!pins[i].name)
+ return -ENOMEM;
+ }
+
+ pctldesc->name = dev_name(chip->dev);
+ pctldesc->pctlops = &cygnus_pctrl_ops;
+ pctldesc->pins = pins;
+ pctldesc->npins = gc->ngpio;
+ pctldesc->confops = &cygnus_pconf_ops;
+
+ chip->pctl = pinctrl_register(pctldesc, chip->dev, chip);
+ if (!chip->pctl) {
+ dev_err(chip->dev, "unable to register pinctrl device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void cygnus_gpio_unregister_pinconf(struct cygnus_gpio *chip)
+{
+ if (chip->pctl)
+ pinctrl_unregister(chip->pctl);
+}
+
+struct cygnus_gpio_data {
+ unsigned num_gpios;
+};
+
+static const struct cygnus_gpio_data cygnus_cmm_gpio_data = {
+ .num_gpios = 24,
+};
+
+static const struct cygnus_gpio_data cygnus_asiu_gpio_data = {
+ .num_gpios = 146,
+};
+
+static const struct cygnus_gpio_data cygnus_crmu_gpio_data = {
+ .num_gpios = 6,
+};
+
+static const struct of_device_id cygnus_gpio_of_match[] = {
+ {
+ .compatible = "brcm,cygnus-ccm-gpio",
+ .data = &cygnus_cmm_gpio_data,
+ },
+ {
+ .compatible = "brcm,cygnus-asiu-gpio",
+ .data = &cygnus_asiu_gpio_data,
+ },
+ {
+ .compatible = "brcm,cygnus-crmu-gpio",
+ .data = &cygnus_crmu_gpio_data,
+ }
+};
+
+static int cygnus_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct cygnus_gpio *chip;
+ struct gpio_chip *gc;
+ u32 ngpios;
+ int irq, ret;
+ const struct of_device_id *match;
+ const struct cygnus_gpio_data *gpio_data;
+
+ match = of_match_device(cygnus_gpio_of_match, dev);
+ if (!match)
+ return -ENODEV;
+ gpio_data = match->data;
+ ngpios = gpio_data->num_gpios;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = dev;
+ platform_set_drvdata(pdev, chip);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chip->base)) {
+ dev_err(dev, "unable to map I/O memory\n");
+ return PTR_ERR(chip->base);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ chip->io_ctrl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chip->io_ctrl)) {
+ dev_err(dev, "unable to map I/O memory\n");
+ return PTR_ERR(chip->io_ctrl);
+ }
+ }
+
+ spin_lock_init(&chip->lock);
+
+ gc = &chip->gc;
+ gc->base = -1;
+ gc->ngpio = ngpios;
+ chip->num_banks = (ngpios + NGPIOS_PER_BANK - 1) / NGPIOS_PER_BANK;
+ gc->label = dev_name(dev);
+ gc->dev = dev;
+ gc->of_node = dev->of_node;
+ gc->request = cygnus_gpio_request;
+ gc->free = cygnus_gpio_free;
+ gc->direction_input = cygnus_gpio_direction_input;
+ gc->direction_output = cygnus_gpio_direction_output;
+ gc->set = cygnus_gpio_set;
+ gc->get = cygnus_gpio_get;
+
+ ret = gpiochip_add(gc);
+ if (ret < 0) {
+ dev_err(dev, "unable to add GPIO chip\n");
+ return ret;
+ }
+
+ ret = cygnus_gpio_pinmux_add_range(chip);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev, "unable to add GPIO pin range\n");
+ goto err_rm_gpiochip;
+ }
+
+ ret = cygnus_gpio_register_pinconf(chip);
+ if (ret) {
+ dev_err(dev, "unable to register pinconf\n");
+ goto err_rm_gpiochip;
+ }
+
+ /* optional GPIO interrupt support */
+ irq = platform_get_irq(pdev, 0);
+ if (irq) {
+ ret = gpiochip_irqchip_add(gc, &cygnus_gpio_irq_chip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "no GPIO irqchip\n");
+ goto err_unregister_pinconf;
+ }
+
+ gpiochip_set_chained_irqchip(gc, &cygnus_gpio_irq_chip, irq,
+ cygnus_gpio_irq_handler);
+ }
+
+ return 0;
+
+err_unregister_pinconf:
+ cygnus_gpio_unregister_pinconf(chip);
+
+err_rm_gpiochip:
+ gpiochip_remove(gc);
+
+ return ret;
+}
+
+static struct platform_driver cygnus_gpio_driver = {
+ .driver = {
+ .name = "cygnus-gpio",
+ .of_match_table = cygnus_gpio_of_match,
+ },
+ .probe = cygnus_gpio_probe,
+};
+
+static int __init cygnus_gpio_init(void)
+{
+ return platform_driver_probe(&cygnus_gpio_driver, cygnus_gpio_probe);
+}
+arch_initcall_sync(cygnus_gpio_init);
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
new file mode 100644
index 000000000000..f9a9283caf81
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -0,0 +1,1022 @@
+/* Copyright (C) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file contains the Cygnus IOMUX driver that supports group based PINMUX
+ * configuration. Although PINMUX configuration is mainly group based, the
+ * Cygnus IOMUX controller allows certain pins to be individually muxed to GPIO
+ * function, and therefore be controlled by the Cygnus ASIU GPIO controller
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define CYGNUS_NUM_IOMUX_REGS 8
+#define CYGNUS_NUM_MUX_PER_REG 8
+#define CYGNUS_NUM_IOMUX (CYGNUS_NUM_IOMUX_REGS * \
+ CYGNUS_NUM_MUX_PER_REG)
+
+/*
+ * Cygnus IOMUX register description
+ *
+ * @offset: register offset for mux configuration of a group
+ * @shift: bit shift for mux configuration of a group
+ * @alt: alternate function to set to
+ */
+struct cygnus_mux {
+ unsigned int offset;
+ unsigned int shift;
+ unsigned int alt;
+};
+
+/*
+ * Keep track of Cygnus IOMUX configuration and prevent double configuration
+ *
+ * @cygnus_mux: Cygnus IOMUX register description
+ * @is_configured: flag to indicate whether a mux setting has already been
+ * configured
+ */
+struct cygnus_mux_log {
+ struct cygnus_mux mux;
+ bool is_configured;
+};
+
+/*
+ * Group based IOMUX configuration
+ *
+ * @name: name of the group
+ * @pins: array of pins used by this group
+ * @num_pins: total number of pins used by this group
+ * @mux: Cygnus group based IOMUX configuration
+ */
+struct cygnus_pin_group {
+ const char *name;
+ const unsigned *pins;
+ unsigned num_pins;
+ struct cygnus_mux mux;
+};
+
+/*
+ * Cygnus mux function and supported pin groups
+ *
+ * @name: name of the function
+ * @groups: array of groups that can be supported by this function
+ * @num_groups: total number of groups that can be supported by this function
+ */
+struct cygnus_pin_function {
+ const char *name;
+ const char * const *groups;
+ unsigned num_groups;
+};
+
+/*
+ * Cygnus IOMUX pinctrl core
+ *
+ * @pctl: pointer to pinctrl_dev
+ * @dev: pointer to device
+ * @base0: first I/O register base of the Cygnus IOMUX controller
+ * @base1: second I/O register base
+ * @groups: pointer to array of groups
+ * @num_groups: total number of groups
+ * @functions: pointer to array of functions
+ * @num_functions: total number of functions
+ * @mux_log: pointer to the array of mux logs
+ * @lock: lock to protect register access
+ */
+struct cygnus_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct device *dev;
+ void __iomem *base0;
+ void __iomem *base1;
+
+ const struct cygnus_pin_group *groups;
+ unsigned num_groups;
+
+ const struct cygnus_pin_function *functions;
+ unsigned num_functions;
+
+ struct cygnus_mux_log *mux_log;
+
+ spinlock_t lock;
+};
+
+/*
+ * Certain pins can be individually muxed to GPIO function
+ *
+ * @is_supported: flag to indicate GPIO mux is supported for this pin
+ * @offset: register offset for GPIO mux override of a pin
+ * @shift: bit shift for GPIO mux override of a pin
+ */
+struct cygnus_gpio_mux {
+ int is_supported;
+ unsigned int offset;
+ unsigned int shift;
+};
+
+/*
+ * Description of a pin in Cygnus
+ *
+ * @pin: pin number
+ * @name: pin name
+ * @gpio_mux: GPIO override related information
+ */
+struct cygnus_pin {
+ unsigned pin;
+ char *name;
+ struct cygnus_gpio_mux gpio_mux;
+};
+
+#define CYGNUS_PIN_DESC(p, n, i, o, s) \
+{ \
+ .pin = p, \
+ .name = n, \
+ .gpio_mux = { \
+ .is_supported = i, \
+ .offset = o, \
+ .shift = s, \
+ }, \
+}
+
+/*
+ * List of pins in Cygnus
+ */
+static struct cygnus_pin cygnus_pins[] = {
+ CYGNUS_PIN_DESC(0, "ext_device_reset_n", 0, 0, 0),
+ CYGNUS_PIN_DESC(1, "chip_mode0", 0, 0, 0),
+ CYGNUS_PIN_DESC(2, "chip_mode1", 0, 0, 0),
+ CYGNUS_PIN_DESC(3, "chip_mode2", 0, 0, 0),
+ CYGNUS_PIN_DESC(4, "chip_mode3", 0, 0, 0),
+ CYGNUS_PIN_DESC(5, "chip_mode4", 0, 0, 0),
+ CYGNUS_PIN_DESC(6, "bsc0_scl", 0, 0, 0),
+ CYGNUS_PIN_DESC(7, "bsc0_sda", 0, 0, 0),
+ CYGNUS_PIN_DESC(8, "bsc1_scl", 0, 0, 0),
+ CYGNUS_PIN_DESC(9, "bsc1_sda", 0, 0, 0),
+ CYGNUS_PIN_DESC(10, "d1w_dq", 1, 0x28, 0),
+ CYGNUS_PIN_DESC(11, "d1wowstz_l", 1, 0x4, 28),
+ CYGNUS_PIN_DESC(12, "gpio0", 0, 0, 0),
+ CYGNUS_PIN_DESC(13, "gpio1", 0, 0, 0),
+ CYGNUS_PIN_DESC(14, "gpio2", 0, 0, 0),
+ CYGNUS_PIN_DESC(15, "gpio3", 0, 0, 0),
+ CYGNUS_PIN_DESC(16, "gpio4", 0, 0, 0),
+ CYGNUS_PIN_DESC(17, "gpio5", 0, 0, 0),
+ CYGNUS_PIN_DESC(18, "gpio6", 0, 0, 0),
+ CYGNUS_PIN_DESC(19, "gpio7", 0, 0, 0),
+ CYGNUS_PIN_DESC(20, "gpio8", 0, 0, 0),
+ CYGNUS_PIN_DESC(21, "gpio9", 0, 0, 0),
+ CYGNUS_PIN_DESC(22, "gpio10", 0, 0, 0),
+ CYGNUS_PIN_DESC(23, "gpio11", 0, 0, 0),
+ CYGNUS_PIN_DESC(24, "gpio12", 0, 0, 0),
+ CYGNUS_PIN_DESC(25, "gpio13", 0, 0, 0),
+ CYGNUS_PIN_DESC(26, "gpio14", 0, 0, 0),
+ CYGNUS_PIN_DESC(27, "gpio15", 0, 0, 0),
+ CYGNUS_PIN_DESC(28, "gpio16", 0, 0, 0),
+ CYGNUS_PIN_DESC(29, "gpio17", 0, 0, 0),
+ CYGNUS_PIN_DESC(30, "gpio18", 0, 0, 0),
+ CYGNUS_PIN_DESC(31, "gpio19", 0, 0, 0),
+ CYGNUS_PIN_DESC(32, "gpio20", 0, 0, 0),
+ CYGNUS_PIN_DESC(33, "gpio21", 0, 0, 0),
+ CYGNUS_PIN_DESC(34, "gpio22", 0, 0, 0),
+ CYGNUS_PIN_DESC(35, "gpio23", 0, 0, 0),
+ CYGNUS_PIN_DESC(36, "mdc", 0, 0, 0),
+ CYGNUS_PIN_DESC(37, "mdio", 0, 0, 0),
+ CYGNUS_PIN_DESC(38, "pwm0", 1, 0x10, 30),
+ CYGNUS_PIN_DESC(39, "pwm1", 1, 0x10, 28),
+ CYGNUS_PIN_DESC(40, "pwm2", 1, 0x10, 26),
+ CYGNUS_PIN_DESC(41, "pwm3", 1, 0x10, 24),
+ CYGNUS_PIN_DESC(42, "sc0_clk", 1, 0x10, 22),
+ CYGNUS_PIN_DESC(43, "sc0_cmdvcc_l", 1, 0x10, 20),
+ CYGNUS_PIN_DESC(44, "sc0_detect", 1, 0x10, 18),
+ CYGNUS_PIN_DESC(45, "sc0_fcb", 1, 0x10, 16),
+ CYGNUS_PIN_DESC(46, "sc0_io", 1, 0x10, 14),
+ CYGNUS_PIN_DESC(47, "sc0_rst_l", 1, 0x10, 12),
+ CYGNUS_PIN_DESC(48, "sc1_clk", 1, 0x10, 10),
+ CYGNUS_PIN_DESC(49, "sc1_cmdvcc_l", 1, 0x10, 8),
+ CYGNUS_PIN_DESC(50, "sc1_detect", 1, 0x10, 6),
+ CYGNUS_PIN_DESC(51, "sc1_fcb", 1, 0x10, 4),
+ CYGNUS_PIN_DESC(52, "sc1_io", 1, 0x10, 2),
+ CYGNUS_PIN_DESC(53, "sc1_rst_l", 1, 0x10, 0),
+ CYGNUS_PIN_DESC(54, "spi0_clk", 1, 0x18, 10),
+ CYGNUS_PIN_DESC(55, "spi0_mosi", 1, 0x18, 6),
+ CYGNUS_PIN_DESC(56, "spi0_miso", 1, 0x18, 8),
+ CYGNUS_PIN_DESC(57, "spi0_ss", 1, 0x18, 4),
+ CYGNUS_PIN_DESC(58, "spi1_clk", 1, 0x18, 2),
+ CYGNUS_PIN_DESC(59, "spi1_mosi", 1, 0x1c, 30),
+ CYGNUS_PIN_DESC(60, "spi1_miso", 1, 0x18, 0),
+ CYGNUS_PIN_DESC(61, "spi1_ss", 1, 0x1c, 28),
+ CYGNUS_PIN_DESC(62, "spi2_clk", 1, 0x1c, 26),
+ CYGNUS_PIN_DESC(63, "spi2_mosi", 1, 0x1c, 22),
+ CYGNUS_PIN_DESC(64, "spi2_miso", 1, 0x1c, 24),
+ CYGNUS_PIN_DESC(65, "spi2_ss", 1, 0x1c, 20),
+ CYGNUS_PIN_DESC(66, "spi3_clk", 1, 0x1c, 18),
+ CYGNUS_PIN_DESC(67, "spi3_mosi", 1, 0x1c, 14),
+ CYGNUS_PIN_DESC(68, "spi3_miso", 1, 0x1c, 16),
+ CYGNUS_PIN_DESC(69, "spi3_ss", 1, 0x1c, 12),
+ CYGNUS_PIN_DESC(70, "uart0_cts", 1, 0x1c, 10),
+ CYGNUS_PIN_DESC(71, "uart0_rts", 1, 0x1c, 8),
+ CYGNUS_PIN_DESC(72, "uart0_rx", 1, 0x1c, 6),
+ CYGNUS_PIN_DESC(73, "uart0_tx", 1, 0x1c, 4),
+ CYGNUS_PIN_DESC(74, "uart1_cts", 1, 0x1c, 2),
+ CYGNUS_PIN_DESC(75, "uart1_dcd", 1, 0x1c, 0),
+ CYGNUS_PIN_DESC(76, "uart1_dsr", 1, 0x20, 14),
+ CYGNUS_PIN_DESC(77, "uart1_dtr", 1, 0x20, 12),
+ CYGNUS_PIN_DESC(78, "uart1_ri", 1, 0x20, 10),
+ CYGNUS_PIN_DESC(79, "uart1_rts", 1, 0x20, 8),
+ CYGNUS_PIN_DESC(80, "uart1_rx", 1, 0x20, 6),
+ CYGNUS_PIN_DESC(81, "uart1_tx", 1, 0x20, 4),
+ CYGNUS_PIN_DESC(82, "uart3_rx", 1, 0x20, 2),
+ CYGNUS_PIN_DESC(83, "uart3_tx", 1, 0x20, 0),
+ CYGNUS_PIN_DESC(84, "sdio1_clk_sdcard", 1, 0x14, 6),
+ CYGNUS_PIN_DESC(85, "sdio1_cmd", 1, 0x14, 4),
+ CYGNUS_PIN_DESC(86, "sdio1_data0", 1, 0x14, 2),
+ CYGNUS_PIN_DESC(87, "sdio1_data1", 1, 0x14, 0),
+ CYGNUS_PIN_DESC(88, "sdio1_data2", 1, 0x18, 30),
+ CYGNUS_PIN_DESC(89, "sdio1_data3", 1, 0x18, 28),
+ CYGNUS_PIN_DESC(90, "sdio1_wp_n", 1, 0x18, 24),
+ CYGNUS_PIN_DESC(91, "sdio1_card_rst", 1, 0x14, 10),
+ CYGNUS_PIN_DESC(92, "sdio1_led_on", 1, 0x18, 26),
+ CYGNUS_PIN_DESC(93, "sdio1_cd", 1, 0x14, 8),
+ CYGNUS_PIN_DESC(94, "sdio0_clk_sdcard", 1, 0x14, 26),
+ CYGNUS_PIN_DESC(95, "sdio0_cmd", 1, 0x14, 24),
+ CYGNUS_PIN_DESC(96, "sdio0_data0", 1, 0x14, 22),
+ CYGNUS_PIN_DESC(97, "sdio0_data1", 1, 0x14, 20),
+ CYGNUS_PIN_DESC(98, "sdio0_data2", 1, 0x14, 18),
+ CYGNUS_PIN_DESC(99, "sdio0_data3", 1, 0x14, 16),
+ CYGNUS_PIN_DESC(100, "sdio0_wp_n", 1, 0x14, 12),
+ CYGNUS_PIN_DESC(101, "sdio0_card_rst", 1, 0x14, 30),
+ CYGNUS_PIN_DESC(102, "sdio0_led_on", 1, 0x14, 14),
+ CYGNUS_PIN_DESC(103, "sdio0_cd", 1, 0x14, 28),
+ CYGNUS_PIN_DESC(104, "sflash_clk", 1, 0x18, 22),
+ CYGNUS_PIN_DESC(105, "sflash_cs_l", 1, 0x18, 20),
+ CYGNUS_PIN_DESC(106, "sflash_mosi", 1, 0x18, 14),
+ CYGNUS_PIN_DESC(107, "sflash_miso", 1, 0x18, 16),
+ CYGNUS_PIN_DESC(108, "sflash_wp_n", 1, 0x18, 12),
+ CYGNUS_PIN_DESC(109, "sflash_hold_n", 1, 0x18, 18),
+ CYGNUS_PIN_DESC(110, "nand_ale", 1, 0xc, 30),
+ CYGNUS_PIN_DESC(111, "nand_ce0_l", 1, 0xc, 28),
+ CYGNUS_PIN_DESC(112, "nand_ce1_l", 1, 0xc, 26),
+ CYGNUS_PIN_DESC(113, "nand_cle", 1, 0xc, 24),
+ CYGNUS_PIN_DESC(114, "nand_dq0", 1, 0xc, 22),
+ CYGNUS_PIN_DESC(115, "nand_dq1", 1, 0xc, 20),
+ CYGNUS_PIN_DESC(116, "nand_dq2", 1, 0xc, 18),
+ CYGNUS_PIN_DESC(117, "nand_dq3", 1, 0xc, 16),
+ CYGNUS_PIN_DESC(118, "nand_dq4", 1, 0xc, 14),
+ CYGNUS_PIN_DESC(119, "nand_dq5", 1, 0xc, 12),
+ CYGNUS_PIN_DESC(120, "nand_dq6", 1, 0xc, 10),
+ CYGNUS_PIN_DESC(121, "nand_dq7", 1, 0xc, 8),
+ CYGNUS_PIN_DESC(122, "nand_rb_l", 1, 0xc, 6),
+ CYGNUS_PIN_DESC(123, "nand_re_l", 1, 0xc, 4),
+ CYGNUS_PIN_DESC(124, "nand_we_l", 1, 0xc, 2),
+ CYGNUS_PIN_DESC(125, "nand_wp_l", 1, 0xc, 0),
+ CYGNUS_PIN_DESC(126, "lcd_clac", 1, 0x4, 26),
+ CYGNUS_PIN_DESC(127, "lcd_clcp", 1, 0x4, 24),
+ CYGNUS_PIN_DESC(128, "lcd_cld0", 1, 0x4, 22),
+ CYGNUS_PIN_DESC(129, "lcd_cld1", 1, 0x4, 0),
+ CYGNUS_PIN_DESC(130, "lcd_cld10", 1, 0x4, 20),
+ CYGNUS_PIN_DESC(131, "lcd_cld11", 1, 0x4, 18),
+ CYGNUS_PIN_DESC(132, "lcd_cld12", 1, 0x4, 16),
+ CYGNUS_PIN_DESC(133, "lcd_cld13", 1, 0x4, 14),
+ CYGNUS_PIN_DESC(134, "lcd_cld14", 1, 0x4, 12),
+ CYGNUS_PIN_DESC(135, "lcd_cld15", 1, 0x4, 10),
+ CYGNUS_PIN_DESC(136, "lcd_cld16", 1, 0x4, 8),
+ CYGNUS_PIN_DESC(137, "lcd_cld17", 1, 0x4, 6),
+ CYGNUS_PIN_DESC(138, "lcd_cld18", 1, 0x4, 4),
+ CYGNUS_PIN_DESC(139, "lcd_cld19", 1, 0x4, 2),
+ CYGNUS_PIN_DESC(140, "lcd_cld2", 1, 0x8, 22),
+ CYGNUS_PIN_DESC(141, "lcd_cld20", 1, 0x8, 30),
+ CYGNUS_PIN_DESC(142, "lcd_cld21", 1, 0x8, 28),
+ CYGNUS_PIN_DESC(143, "lcd_cld22", 1, 0x8, 26),
+ CYGNUS_PIN_DESC(144, "lcd_cld23", 1, 0x8, 24),
+ CYGNUS_PIN_DESC(145, "lcd_cld3", 1, 0x8, 20),
+ CYGNUS_PIN_DESC(146, "lcd_cld4", 1, 0x8, 18),
+ CYGNUS_PIN_DESC(147, "lcd_cld5", 1, 0x8, 16),
+ CYGNUS_PIN_DESC(148, "lcd_cld6", 1, 0x8, 14),
+ CYGNUS_PIN_DESC(149, "lcd_cld7", 1, 0x8, 12),
+ CYGNUS_PIN_DESC(150, "lcd_cld8", 1, 0x8, 10),
+ CYGNUS_PIN_DESC(151, "lcd_cld9", 1, 0x8, 8),
+ CYGNUS_PIN_DESC(152, "lcd_clfp", 1, 0x8, 6),
+ CYGNUS_PIN_DESC(153, "lcd_clle", 1, 0x8, 4),
+ CYGNUS_PIN_DESC(154, "lcd_cllp", 1, 0x8, 2),
+ CYGNUS_PIN_DESC(155, "lcd_clpower", 1, 0x8, 0),
+ CYGNUS_PIN_DESC(156, "camera_vsync", 1, 0x4, 30),
+ CYGNUS_PIN_DESC(157, "camera_trigger", 1, 0x0, 0),
+ CYGNUS_PIN_DESC(158, "camera_strobe", 1, 0x0, 2),
+ CYGNUS_PIN_DESC(159, "camera_standby", 1, 0x0, 4),
+ CYGNUS_PIN_DESC(160, "camera_reset_n", 1, 0x0, 6),
+ CYGNUS_PIN_DESC(161, "camera_pixdata9", 1, 0x0, 8),
+ CYGNUS_PIN_DESC(162, "camera_pixdata8", 1, 0x0, 10),
+ CYGNUS_PIN_DESC(163, "camera_pixdata7", 1, 0x0, 12),
+ CYGNUS_PIN_DESC(164, "camera_pixdata6", 1, 0x0, 14),
+ CYGNUS_PIN_DESC(165, "camera_pixdata5", 1, 0x0, 16),
+ CYGNUS_PIN_DESC(166, "camera_pixdata4", 1, 0x0, 18),
+ CYGNUS_PIN_DESC(167, "camera_pixdata3", 1, 0x0, 20),
+ CYGNUS_PIN_DESC(168, "camera_pixdata2", 1, 0x0, 22),
+ CYGNUS_PIN_DESC(169, "camera_pixdata1", 1, 0x0, 24),
+ CYGNUS_PIN_DESC(170, "camera_pixdata0", 1, 0x0, 26),
+ CYGNUS_PIN_DESC(171, "camera_pixclk", 1, 0x0, 28),
+ CYGNUS_PIN_DESC(172, "camera_hsync", 1, 0x0, 30),
+ CYGNUS_PIN_DESC(173, "camera_pll_ref_clk", 0, 0, 0),
+ CYGNUS_PIN_DESC(174, "usb_id_indication", 0, 0, 0),
+ CYGNUS_PIN_DESC(175, "usb_vbus_indication", 0, 0, 0),
+ CYGNUS_PIN_DESC(176, "gpio0_3p3", 0, 0, 0),
+ CYGNUS_PIN_DESC(177, "gpio1_3p3", 0, 0, 0),
+ CYGNUS_PIN_DESC(178, "gpio2_3p3", 0, 0, 0),
+ CYGNUS_PIN_DESC(179, "gpio3_3p3", 0, 0, 0),
+};
+
+/*
+ * List of groups of pins
+ */
+static const unsigned bsc1_pins[] = { 8, 9 };
+static const unsigned pcie_clkreq_pins[] = { 8, 9 };
+
+static const unsigned i2s2_0_pins[] = { 12 };
+static const unsigned i2s2_1_pins[] = { 13 };
+static const unsigned i2s2_2_pins[] = { 14 };
+static const unsigned i2s2_3_pins[] = { 15 };
+static const unsigned i2s2_4_pins[] = { 16 };
+
+static const unsigned pwm4_pins[] = { 17 };
+static const unsigned pwm5_pins[] = { 18 };
+
+static const unsigned key0_pins[] = { 20 };
+static const unsigned key1_pins[] = { 21 };
+static const unsigned key2_pins[] = { 22 };
+static const unsigned key3_pins[] = { 23 };
+static const unsigned key4_pins[] = { 24 };
+static const unsigned key5_pins[] = { 25 };
+
+static const unsigned key6_pins[] = { 26 };
+static const unsigned audio_dte0_pins[] = { 26 };
+
+static const unsigned key7_pins[] = { 27 };
+static const unsigned audio_dte1_pins[] = { 27 };
+
+static const unsigned key8_pins[] = { 28 };
+static const unsigned key9_pins[] = { 29 };
+static const unsigned key10_pins[] = { 30 };
+static const unsigned key11_pins[] = { 31 };
+static const unsigned key12_pins[] = { 32 };
+static const unsigned key13_pins[] = { 33 };
+
+static const unsigned key14_pins[] = { 34 };
+static const unsigned audio_dte2_pins[] = { 34 };
+
+static const unsigned key15_pins[] = { 35 };
+static const unsigned audio_dte3_pins[] = { 35 };
+
+static const unsigned pwm0_pins[] = { 38 };
+static const unsigned pwm1_pins[] = { 39 };
+static const unsigned pwm2_pins[] = { 40 };
+static const unsigned pwm3_pins[] = { 41 };
+
+static const unsigned sdio0_pins[] = { 94, 95, 96, 97, 98, 99 };
+
+static const unsigned smart_card0_pins[] = { 42, 43, 44, 46, 47 };
+static const unsigned i2s0_0_pins[] = { 42, 43, 44, 46 };
+static const unsigned spdif_pins[] = { 47 };
+
+static const unsigned smart_card1_pins[] = { 48, 49, 50, 52, 53 };
+static const unsigned i2s1_0_pins[] = { 48, 49, 50, 52 };
+
+static const unsigned spi0_pins[] = { 54, 55, 56, 57 };
+
+static const unsigned spi1_pins[] = { 58, 59, 60, 61 };
+
+static const unsigned spi2_pins[] = { 62, 63, 64, 65 };
+
+static const unsigned spi3_pins[] = { 66, 67, 68, 69 };
+static const unsigned sw_led0_0_pins[] = { 66, 67, 68, 69 };
+
+static const unsigned d1w_pins[] = { 10, 11 };
+static const unsigned uart4_pins[] = { 10, 11 };
+static const unsigned sw_led2_0_pins[] = { 10, 11 };
+
+static const unsigned lcd_pins[] = { 126, 127, 128, 129, 130, 131, 132, 133,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155 };
+static const unsigned sram_0_pins[] = { 126, 127, 128, 129, 130, 131, 132, 133,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155 };
+static const unsigned spi5_pins[] = { 141, 142, 143, 144 };
+
+static const unsigned uart0_pins[] = { 70, 71, 72, 73 };
+static const unsigned sw_led0_1_pins[] = { 70, 71, 72, 73 };
+
+static const unsigned uart1_dte_pins[] = { 75, 76, 77, 78 };
+static const unsigned uart2_pins[] = { 75, 76, 77, 78 };
+
+static const unsigned uart1_pins[] = { 74, 79, 80, 81 };
+
+static const unsigned uart3_pins[] = { 82, 83 };
+
+static const unsigned qspi_0_pins[] = { 104, 105, 106, 107 };
+
+static const unsigned nand_pins[] = { 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125 };
+
+static const unsigned sdio0_cd_pins[] = { 103 };
+
+static const unsigned sdio0_mmc_pins[] = { 100, 101, 102 };
+
+static const unsigned sdio1_data_0_pins[] = { 86, 87 };
+static const unsigned can0_pins[] = { 86, 87 };
+static const unsigned spi4_0_pins[] = { 86, 87 };
+
+static const unsigned sdio1_data_1_pins[] = { 88, 89 };
+static const unsigned can1_pins[] = { 88, 89 };
+static const unsigned spi4_1_pins[] = { 88, 89 };
+
+static const unsigned sdio1_cd_pins[] = { 93 };
+
+static const unsigned sdio1_led_pins[] = { 84, 85 };
+static const unsigned sw_led2_1_pins[] = { 84, 85 };
+
+static const unsigned sdio1_mmc_pins[] = { 90, 91, 92 };
+
+static const unsigned cam_led_pins[] = { 156, 157, 158, 159, 160 };
+static const unsigned sw_led1_pins[] = { 156, 157, 158, 159 };
+
+static const unsigned cam_0_pins[] = { 169, 170, 171, 169, 170 };
+
+static const unsigned cam_1_pins[] = { 161, 162, 163, 164, 165, 166, 167,
+ 168 };
+static const unsigned sram_1_pins[] = { 161, 162, 163, 164, 165, 166, 167,
+ 168 };
+
+static const unsigned qspi_1_pins[] = { 108, 109 };
+
+static const unsigned smart_card0_fcb_pins[] = { 45 };
+static const unsigned i2s0_1_pins[] = { 45 };
+
+static const unsigned smart_card1_fcb_pins[] = { 51 };
+static const unsigned i2s1_1_pins[] = { 51 };
+
+static const unsigned gpio0_3p3_pins[] = { 176 };
+static const unsigned usb0_oc_pins[] = { 176 };
+
+static const unsigned gpio1_3p3_pins[] = { 177 };
+static const unsigned usb1_oc_pins[] = { 177 };
+
+static const unsigned gpio2_3p3_pins[] = { 178 };
+static const unsigned usb2_oc_pins[] = { 178 };
+
+#define CYGNUS_PIN_GROUP(group_name, off, sh, al) \
+{ \
+ .name = __stringify(group_name) "_grp", \
+ .pins = group_name ## _pins, \
+ .num_pins = ARRAY_SIZE(group_name ## _pins), \
+ .mux = { \
+ .offset = off, \
+ .shift = sh, \
+ .alt = al, \
+ } \
+}
+
+/*
+ * List of Cygnus pin groups
+ */
+static const struct cygnus_pin_group cygnus_pin_groups[] = {
+ CYGNUS_PIN_GROUP(i2s2_0, 0x0, 0, 2),
+ CYGNUS_PIN_GROUP(i2s2_1, 0x0, 4, 2),
+ CYGNUS_PIN_GROUP(i2s2_2, 0x0, 8, 2),
+ CYGNUS_PIN_GROUP(i2s2_3, 0x0, 12, 2),
+ CYGNUS_PIN_GROUP(i2s2_4, 0x0, 16, 2),
+ CYGNUS_PIN_GROUP(pwm4, 0x0, 20, 0),
+ CYGNUS_PIN_GROUP(pwm5, 0x0, 24, 2),
+ CYGNUS_PIN_GROUP(key0, 0x4, 0, 1),
+ CYGNUS_PIN_GROUP(key1, 0x4, 4, 1),
+ CYGNUS_PIN_GROUP(key2, 0x4, 8, 1),
+ CYGNUS_PIN_GROUP(key3, 0x4, 12, 1),
+ CYGNUS_PIN_GROUP(key4, 0x4, 16, 1),
+ CYGNUS_PIN_GROUP(key5, 0x4, 20, 1),
+ CYGNUS_PIN_GROUP(key6, 0x4, 24, 1),
+ CYGNUS_PIN_GROUP(audio_dte0, 0x4, 24, 2),
+ CYGNUS_PIN_GROUP(key7, 0x4, 28, 1),
+ CYGNUS_PIN_GROUP(audio_dte1, 0x4, 28, 2),
+ CYGNUS_PIN_GROUP(key8, 0x8, 0, 1),
+ CYGNUS_PIN_GROUP(key9, 0x8, 4, 1),
+ CYGNUS_PIN_GROUP(key10, 0x8, 8, 1),
+ CYGNUS_PIN_GROUP(key11, 0x8, 12, 1),
+ CYGNUS_PIN_GROUP(key12, 0x8, 16, 1),
+ CYGNUS_PIN_GROUP(key13, 0x8, 20, 1),
+ CYGNUS_PIN_GROUP(key14, 0x8, 24, 1),
+ CYGNUS_PIN_GROUP(audio_dte2, 0x8, 24, 2),
+ CYGNUS_PIN_GROUP(key15, 0x8, 28, 1),
+ CYGNUS_PIN_GROUP(audio_dte3, 0x8, 28, 2),
+ CYGNUS_PIN_GROUP(pwm0, 0xc, 0, 0),
+ CYGNUS_PIN_GROUP(pwm1, 0xc, 4, 0),
+ CYGNUS_PIN_GROUP(pwm2, 0xc, 8, 0),
+ CYGNUS_PIN_GROUP(pwm3, 0xc, 12, 0),
+ CYGNUS_PIN_GROUP(sdio0, 0xc, 16, 0),
+ CYGNUS_PIN_GROUP(smart_card0, 0xc, 20, 0),
+ CYGNUS_PIN_GROUP(i2s0_0, 0xc, 20, 1),
+ CYGNUS_PIN_GROUP(spdif, 0xc, 20, 1),
+ CYGNUS_PIN_GROUP(smart_card1, 0xc, 24, 0),
+ CYGNUS_PIN_GROUP(i2s1_0, 0xc, 24, 1),
+ CYGNUS_PIN_GROUP(spi0, 0x10, 0, 0),
+ CYGNUS_PIN_GROUP(spi1, 0x10, 4, 0),
+ CYGNUS_PIN_GROUP(spi2, 0x10, 8, 0),
+ CYGNUS_PIN_GROUP(spi3, 0x10, 12, 0),
+ CYGNUS_PIN_GROUP(sw_led0_0, 0x10, 12, 2),
+ CYGNUS_PIN_GROUP(d1w, 0x10, 16, 0),
+ CYGNUS_PIN_GROUP(uart4, 0x10, 16, 1),
+ CYGNUS_PIN_GROUP(sw_led2_0, 0x10, 16, 2),
+ CYGNUS_PIN_GROUP(lcd, 0x10, 20, 0),
+ CYGNUS_PIN_GROUP(sram_0, 0x10, 20, 1),
+ CYGNUS_PIN_GROUP(spi5, 0x10, 20, 2),
+ CYGNUS_PIN_GROUP(uart0, 0x14, 0, 0),
+ CYGNUS_PIN_GROUP(sw_led0_1, 0x14, 0, 2),
+ CYGNUS_PIN_GROUP(uart1_dte, 0x14, 4, 0),
+ CYGNUS_PIN_GROUP(uart2, 0x14, 4, 1),
+ CYGNUS_PIN_GROUP(uart1, 0x14, 8, 0),
+ CYGNUS_PIN_GROUP(uart3, 0x14, 12, 0),
+ CYGNUS_PIN_GROUP(qspi_0, 0x14, 16, 0),
+ CYGNUS_PIN_GROUP(nand, 0x14, 20, 0),
+ CYGNUS_PIN_GROUP(sdio0_cd, 0x18, 0, 0),
+ CYGNUS_PIN_GROUP(sdio0_mmc, 0x18, 4, 0),
+ CYGNUS_PIN_GROUP(sdio1_data_0, 0x18, 8, 0),
+ CYGNUS_PIN_GROUP(can0, 0x18, 8, 1),
+ CYGNUS_PIN_GROUP(spi4_0, 0x18, 8, 2),
+ CYGNUS_PIN_GROUP(sdio1_data_1, 0x18, 12, 0),
+ CYGNUS_PIN_GROUP(can1, 0x18, 12, 1),
+ CYGNUS_PIN_GROUP(spi4_1, 0x18, 12, 2),
+ CYGNUS_PIN_GROUP(sdio1_cd, 0x18, 16, 0),
+ CYGNUS_PIN_GROUP(sdio1_led, 0x18, 20, 0),
+ CYGNUS_PIN_GROUP(sw_led2_1, 0x18, 20, 2),
+ CYGNUS_PIN_GROUP(sdio1_mmc, 0x18, 24, 0),
+ CYGNUS_PIN_GROUP(cam_led, 0x1c, 0, 0),
+ CYGNUS_PIN_GROUP(sw_led1, 0x1c, 0, 1),
+ CYGNUS_PIN_GROUP(cam_0, 0x1c, 4, 0),
+ CYGNUS_PIN_GROUP(cam_1, 0x1c, 8, 0),
+ CYGNUS_PIN_GROUP(sram_1, 0x1c, 8, 1),
+ CYGNUS_PIN_GROUP(qspi_1, 0x1c, 12, 0),
+ CYGNUS_PIN_GROUP(bsc1, 0x1c, 16, 0),
+ CYGNUS_PIN_GROUP(pcie_clkreq, 0x1c, 16, 1),
+ CYGNUS_PIN_GROUP(smart_card0_fcb, 0x20, 0, 0),
+ CYGNUS_PIN_GROUP(i2s0_1, 0x20, 0, 1),
+ CYGNUS_PIN_GROUP(smart_card1_fcb, 0x20, 4, 0),
+ CYGNUS_PIN_GROUP(i2s1_1, 0x20, 4, 1),
+ CYGNUS_PIN_GROUP(gpio0_3p3, 0x28, 0, 0),
+ CYGNUS_PIN_GROUP(usb0_oc, 0x28, 0, 1),
+ CYGNUS_PIN_GROUP(gpio1_3p3, 0x28, 4, 0),
+ CYGNUS_PIN_GROUP(usb1_oc, 0x28, 4, 1),
+ CYGNUS_PIN_GROUP(gpio2_3p3, 0x28, 8, 0),
+ CYGNUS_PIN_GROUP(usb2_oc, 0x28, 8, 1),
+};
+
+/*
+ * List of groups supported by functions
+ */
+static const char * const i2s0_grps[] = { "i2s0_0_grp", "i2s0_1_grp" };
+static const char * const i2s1_grps[] = { "i2s1_0_grp", "i2s1_1_grp" };
+static const char * const i2s2_grps[] = { "i2s2_0_grp", "i2s2_1_grp",
+ "i2s2_2_grp", "i2s2_3_grp", "i2s2_4_grp" };
+static const char * const spdif_grps[] = { "spdif_grp" };
+static const char * const pwm0_grps[] = { "pwm0_grp" };
+static const char * const pwm1_grps[] = { "pwm1_grp" };
+static const char * const pwm2_grps[] = { "pwm2_grp" };
+static const char * const pwm3_grps[] = { "pwm3_grp" };
+static const char * const pwm4_grps[] = { "pwm4_grp" };
+static const char * const pwm5_grps[] = { "pwm5_grp" };
+static const char * const key_grps[] = { "key0_grp", "key1_grp", "key2_grp",
+ "key3_grp", "key4_grp", "key5_grp", "key6_grp", "key7_grp", "key8_grp",
+ "key9_grp", "key10_grp", "key11_grp", "key12_grp", "key13_grp",
+ "key14_grp", "key15_grp" };
+static const char * const audio_dte_grps[] = { "audio_dte0_grp",
+ "audio_dte1_grp", "audio_dte2_grp", "audio_dte3_grp" };
+static const char * const smart_card0_grps[] = { "smart_card0_grp",
+ "smart_card0_fcb_grp" };
+static const char * const smart_card1_grps[] = { "smart_card1_grp",
+ "smart_card1_fcb_grp" };
+static const char * const spi0_grps[] = { "spi0_grp" };
+static const char * const spi1_grps[] = { "spi1_grp" };
+static const char * const spi2_grps[] = { "spi2_grp" };
+static const char * const spi3_grps[] = { "spi3_grp" };
+static const char * const spi4_grps[] = { "spi4_0_grp", "spi4_1_grp" };
+static const char * const spi5_grps[] = { "spi5_grp" };
+
+static const char * const sw_led0_grps[] = { "sw_led0_0_grp",
+ "sw_led0_1_grp" };
+static const char * const sw_led1_grps[] = { "sw_led1_grp" };
+static const char * const sw_led2_grps[] = { "sw_led2_0_grp",
+ "sw_led2_1_grp" };
+static const char * const d1w_grps[] = { "d1w_grp" };
+static const char * const lcd_grps[] = { "lcd_grp" };
+static const char * const sram_grps[] = { "sram_0_grp", "sram_1_grp" };
+
+static const char * const uart0_grps[] = { "uart0_grp" };
+static const char * const uart1_grps[] = { "uart1_grp", "uart1_dte_grp" };
+static const char * const uart2_grps[] = { "uart2_grp" };
+static const char * const uart3_grps[] = { "uart3_grp" };
+static const char * const uart4_grps[] = { "uart4_grp" };
+static const char * const qspi_grps[] = { "qspi_0_grp", "qspi_1_grp" };
+static const char * const nand_grps[] = { "nand_grp" };
+static const char * const sdio0_grps[] = { "sdio0_grp", "sdio0_cd_grp",
+ "sdio0_mmc_grp" };
+static const char * const sdio1_grps[] = { "sdio1_data_0_grp",
+ "sdio1_data_1_grp", "sdio1_cd_grp", "sdio1_led_grp", "sdio1_mmc_grp" };
+static const char * const can0_grps[] = { "can0_grp" };
+static const char * const can1_grps[] = { "can1_grp" };
+static const char * const cam_grps[] = { "cam_led_grp", "cam_0_grp",
+ "cam_1_grp" };
+static const char * const bsc1_grps[] = { "bsc1_grp" };
+static const char * const pcie_clkreq_grps[] = { "pcie_clkreq_grp" };
+static const char * const usb0_oc_grps[] = { "usb0_oc_grp" };
+static const char * const usb1_oc_grps[] = { "usb1_oc_grp" };
+static const char * const usb2_oc_grps[] = { "usb2_oc_grp" };
+
+#define CYGNUS_PIN_FUNCTION(func) \
+{ \
+ .name = #func, \
+ .groups = func ## _grps, \
+ .num_groups = ARRAY_SIZE(func ## _grps), \
+}
+
+/*
+ * List of supported functions in Cygnus
+ */
+static const struct cygnus_pin_function cygnus_pin_functions[] = {
+ CYGNUS_PIN_FUNCTION(i2s0),
+ CYGNUS_PIN_FUNCTION(i2s1),
+ CYGNUS_PIN_FUNCTION(i2s2),
+ CYGNUS_PIN_FUNCTION(spdif),
+ CYGNUS_PIN_FUNCTION(pwm0),
+ CYGNUS_PIN_FUNCTION(pwm1),
+ CYGNUS_PIN_FUNCTION(pwm2),
+ CYGNUS_PIN_FUNCTION(pwm3),
+ CYGNUS_PIN_FUNCTION(pwm4),
+ CYGNUS_PIN_FUNCTION(pwm5),
+ CYGNUS_PIN_FUNCTION(key),
+ CYGNUS_PIN_FUNCTION(audio_dte),
+ CYGNUS_PIN_FUNCTION(smart_card0),
+ CYGNUS_PIN_FUNCTION(smart_card1),
+ CYGNUS_PIN_FUNCTION(spi0),
+ CYGNUS_PIN_FUNCTION(spi1),
+ CYGNUS_PIN_FUNCTION(spi2),
+ CYGNUS_PIN_FUNCTION(spi3),
+ CYGNUS_PIN_FUNCTION(spi4),
+ CYGNUS_PIN_FUNCTION(spi5),
+ CYGNUS_PIN_FUNCTION(sw_led0),
+ CYGNUS_PIN_FUNCTION(sw_led1),
+ CYGNUS_PIN_FUNCTION(sw_led2),
+ CYGNUS_PIN_FUNCTION(d1w),
+ CYGNUS_PIN_FUNCTION(lcd),
+ CYGNUS_PIN_FUNCTION(sram),
+ CYGNUS_PIN_FUNCTION(uart0),
+ CYGNUS_PIN_FUNCTION(uart1),
+ CYGNUS_PIN_FUNCTION(uart2),
+ CYGNUS_PIN_FUNCTION(uart3),
+ CYGNUS_PIN_FUNCTION(uart4),
+ CYGNUS_PIN_FUNCTION(qspi),
+ CYGNUS_PIN_FUNCTION(nand),
+ CYGNUS_PIN_FUNCTION(sdio0),
+ CYGNUS_PIN_FUNCTION(sdio1),
+ CYGNUS_PIN_FUNCTION(can0),
+ CYGNUS_PIN_FUNCTION(can1),
+ CYGNUS_PIN_FUNCTION(cam),
+ CYGNUS_PIN_FUNCTION(bsc1),
+ CYGNUS_PIN_FUNCTION(pcie_clkreq),
+ CYGNUS_PIN_FUNCTION(usb0_oc),
+ CYGNUS_PIN_FUNCTION(usb1_oc),
+ CYGNUS_PIN_FUNCTION(usb2_oc),
+};
+
+static int cygnus_get_groups_count(struct pinctrl_dev *pctrl_dev)
+{
+ struct cygnus_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->num_groups;
+}
+
+static const char *cygnus_get_group_name(struct pinctrl_dev *pctrl_dev,
+ unsigned selector)
+{
+ struct cygnus_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->groups[selector].name;
+}
+
+static int cygnus_get_group_pins(struct pinctrl_dev *pctrl_dev,
+ unsigned selector, const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct cygnus_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ *pins = pinctrl->groups[selector].pins;
+ *num_pins = pinctrl->groups[selector].num_pins;
+
+ return 0;
+}
+
+static void cygnus_pin_dbg_show(struct pinctrl_dev *pctrl_dev,
+ struct seq_file *s, unsigned offset)
+{
+ seq_printf(s, " %s", dev_name(pctrl_dev->dev));
+}
+
+static const struct pinctrl_ops cygnus_pinctrl_ops = {
+ .get_groups_count = cygnus_get_groups_count,
+ .get_group_name = cygnus_get_group_name,
+ .get_group_pins = cygnus_get_group_pins,
+ .pin_dbg_show = cygnus_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int cygnus_get_functions_count(struct pinctrl_dev *pctrl_dev)
+{
+ struct cygnus_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->num_functions;
+}
+
+static const char *cygnus_get_function_name(struct pinctrl_dev *pctrl_dev,
+ unsigned selector)
+{
+ struct cygnus_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->functions[selector].name;
+}
+
+static int cygnus_get_function_groups(struct pinctrl_dev *pctrl_dev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct cygnus_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ *groups = pinctrl->functions[selector].groups;
+ *num_groups = pinctrl->functions[selector].num_groups;
+
+ return 0;
+}
+
+static int cygnus_pinmux_set(struct cygnus_pinctrl *pinctrl,
+ const struct cygnus_pin_function *func,
+ const struct cygnus_pin_group *grp,
+ struct cygnus_mux_log *mux_log)
+{
+ const struct cygnus_mux *mux = &grp->mux;
+ int i;
+ u32 val, mask = 0x7;
+ unsigned long flags;
+
+ for (i = 0; i < CYGNUS_NUM_IOMUX; i++) {
+ if (mux->offset != mux_log[i].mux.offset ||
+ mux->shift != mux_log[i].mux.shift)
+ continue;
+
+ /* match found if we reach here */
+
+ /* if this is a new configuration, just do it! */
+ if (!mux_log[i].is_configured)
+ break;
+
+ /*
+ * IOMUX has been configured previously and one is trying to
+ * configure it to a different function
+ */
+ if (mux_log[i].mux.alt != mux->alt) {
+ dev_err(pinctrl->dev,
+ "double configuration error detected!\n");
+ dev_err(pinctrl->dev, "func:%s grp:%s\n",
+ func->name, grp->name);
+ return -EINVAL;
+ } else {
+ /*
+ * One tries to configure it to the same function.
+ * Just quit and don't bother
+ */
+ return 0;
+ }
+ }
+
+ mux_log[i].mux.alt = mux->alt;
+ mux_log[i].is_configured = true;
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+
+ val = readl(pinctrl->base0 + grp->mux.offset);
+ val &= ~(mask << grp->mux.shift);
+ val |= grp->mux.alt << grp->mux.shift;
+ writel(val, pinctrl->base0 + grp->mux.offset);
+
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ return 0;
+}
+
+static int cygnus_pinmux_set_mux(struct pinctrl_dev *pctrl_dev,
+ unsigned func_select, unsigned grp_select)
+{
+ struct cygnus_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ const struct cygnus_pin_function *func =
+ &pinctrl->functions[func_select];
+ const struct cygnus_pin_group *grp = &pinctrl->groups[grp_select];
+
+ dev_dbg(pctrl_dev->dev, "func:%u name:%s grp:%u name:%s\n",
+ func_select, func->name, grp_select, grp->name);
+
+ dev_dbg(pctrl_dev->dev, "offset:0x%08x shift:%u alt:%u\n",
+ grp->mux.offset, grp->mux.shift, grp->mux.alt);
+
+ return cygnus_pinmux_set(pinctrl, func, grp, pinctrl->mux_log);
+}
+
+static int cygnus_gpio_request_enable(struct pinctrl_dev *pctrl_dev,
+ struct pinctrl_gpio_range *range,
+ unsigned pin)
+{
+ struct cygnus_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ const struct cygnus_gpio_mux *mux = pctrl_dev->desc->pins[pin].drv_data;
+ u32 val;
+ unsigned long flags;
+
+ /* not all pins support GPIO pinmux override */
+ if (!mux->is_supported)
+ return -ENOTSUPP;
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+
+ val = readl(pinctrl->base1 + mux->offset);
+ val |= 0x3 << mux->shift;
+ writel(val, pinctrl->base1 + mux->offset);
+
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ dev_dbg(pctrl_dev->dev,
+ "gpio request enable pin=%u offset=0x%x shift=%u\n",
+ pin, mux->offset, mux->shift);
+
+ return 0;
+}
+
+static void cygnus_gpio_disable_free(struct pinctrl_dev *pctrl_dev,
+ struct pinctrl_gpio_range *range,
+ unsigned pin)
+{
+ struct cygnus_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ struct cygnus_gpio_mux *mux = pctrl_dev->desc->pins[pin].drv_data;
+ u32 val;
+ unsigned long flags;
+
+ if (!mux->is_supported)
+ return;
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+
+ val = readl(pinctrl->base1 + mux->offset);
+ val &= ~(0x3 << mux->shift);
+ writel(val, pinctrl->base1 + mux->offset);
+
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ dev_err(pctrl_dev->dev,
+ "gpio disable free pin=%u offset=0x%x shift=%u\n",
+ pin, mux->offset, mux->shift);
+}
+
+static const struct pinmux_ops cygnus_pinmux_ops = {
+ .get_functions_count = cygnus_get_functions_count,
+ .get_function_name = cygnus_get_function_name,
+ .get_function_groups = cygnus_get_function_groups,
+ .set_mux = cygnus_pinmux_set_mux,
+ .gpio_request_enable = cygnus_gpio_request_enable,
+ .gpio_disable_free = cygnus_gpio_disable_free,
+};
+
+static struct pinctrl_desc cygnus_pinctrl_desc = {
+ .name = "cygnus-pinmux",
+ .pctlops = &cygnus_pinctrl_ops,
+ .pmxops = &cygnus_pinmux_ops,
+};
+
+static int cygnus_mux_log_init(struct cygnus_pinctrl *pinctrl)
+{
+ struct cygnus_mux_log *log;
+ unsigned int i, j;
+
+ pinctrl->mux_log = devm_kcalloc(pinctrl->dev, CYGNUS_NUM_IOMUX,
+ sizeof(struct cygnus_mux_log),
+ GFP_KERNEL);
+ if (!pinctrl->mux_log)
+ return -ENOMEM;
+
+ log = pinctrl->mux_log;
+ for (i = 0; i < CYGNUS_NUM_IOMUX_REGS; i++) {
+ for (j = 0; j < CYGNUS_NUM_MUX_PER_REG; j++) {
+ log = &pinctrl->mux_log[i * CYGNUS_NUM_MUX_PER_REG
+ + j];
+ log->mux.offset = i * 4;
+ log->mux.shift = j * 4;
+ log->mux.alt = 0;
+ log->is_configured = false;
+ }
+ }
+
+ return 0;
+}
+
+static int cygnus_pinmux_probe(struct platform_device *pdev)
+{
+ struct cygnus_pinctrl *pinctrl;
+ struct resource *res;
+ int i, ret;
+ struct pinctrl_pin_desc *pins;
+ unsigned num_pins = ARRAY_SIZE(cygnus_pins);
+
+ pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
+ if (!pinctrl)
+ return -ENOMEM;
+
+ pinctrl->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pinctrl);
+ spin_lock_init(&pinctrl->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pinctrl->base0)) {
+ dev_err(&pdev->dev, "unable to map I/O space\n");
+ return PTR_ERR(pinctrl->base0);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pinctrl->base1 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pinctrl->base1)) {
+ dev_err(&pdev->dev, "unable to map I/O space\n");
+ return PTR_ERR(pinctrl->base1);
+ }
+
+ ret = cygnus_mux_log_init(pinctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to initialize IOMUX log\n");
+ return ret;
+ }
+
+ pins = devm_kcalloc(&pdev->dev, num_pins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pins; i++) {
+ pins[i].number = cygnus_pins[i].pin;
+ pins[i].name = cygnus_pins[i].name;
+ pins[i].drv_data = &cygnus_pins[i].gpio_mux;
+ }
+
+ pinctrl->groups = cygnus_pin_groups;
+ pinctrl->num_groups = ARRAY_SIZE(cygnus_pin_groups);
+ pinctrl->functions = cygnus_pin_functions;
+ pinctrl->num_functions = ARRAY_SIZE(cygnus_pin_functions);
+ cygnus_pinctrl_desc.pins = pins;
+ cygnus_pinctrl_desc.npins = num_pins;
+
+ pinctrl->pctl = pinctrl_register(&cygnus_pinctrl_desc, &pdev->dev,
+ pinctrl);
+ if (!pinctrl->pctl) {
+ dev_err(&pdev->dev, "unable to register Cygnus IOMUX pinctrl\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cygnus_pinmux_of_match[] = {
+ { .compatible = "brcm,cygnus-pinmux" },
+ { }
+};
+
+static struct platform_driver cygnus_pinmux_driver = {
+ .driver = {
+ .name = "cygnus-pinmux",
+ .of_match_table = cygnus_pinmux_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = cygnus_pinmux_probe,
+};
+
+static int __init cygnus_pinmux_init(void)
+{
+ return platform_driver_register(&cygnus_pinmux_driver);
+}
+arch_initcall(cygnus_pinmux_init);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Cygnus IOMUX driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 89dca77ca038..18ee2089df4a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p)
EXPORT_SYMBOL_GPL(devm_pinctrl_put);
int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
- bool dup, bool locked)
+ bool dup)
{
int i, ret;
struct pinctrl_maps *maps_node;
@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
maps_node->maps = maps;
}
- if (!locked)
- mutex_lock(&pinctrl_maps_mutex);
+ mutex_lock(&pinctrl_maps_mutex);
list_add_tail(&maps_node->node, &pinctrl_maps);
- if (!locked)
- mutex_unlock(&pinctrl_maps_mutex);
+ mutex_unlock(&pinctrl_maps_mutex);
return 0;
}
@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
int pinctrl_register_mappings(struct pinctrl_map const *maps,
unsigned num_maps)
{
- return pinctrl_register_map(maps, num_maps, true, false);
+ return pinctrl_register_map(maps, num_maps, true);
}
void pinctrl_unregister_map(struct pinctrl_map const *map)
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 75476b3d87da..b24ea846c867 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
}
int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
- bool dup, bool locked);
+ bool dup);
void pinctrl_unregister_map(struct pinctrl_map const *map);
extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index eda13de2e7c0..0bbf7d71b281 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
dt_map->num_maps = num_maps;
list_add_tail(&dt_map->node, &p->dt_maps);
- return pinctrl_register_map(map, num_maps, false, true);
+ return pinctrl_register_map(map, num_maps, false);
}
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 448f10986c28..e261f1cf85c6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -542,10 +542,13 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
struct imx_pin_reg *pin_reg;
struct imx_pin *pin = &grp->pins[i];
- if (info->flags & SHARE_MUX_CONF_REG)
+ if (info->flags & SHARE_MUX_CONF_REG) {
conf_reg = mux_reg;
- else
+ } else {
conf_reg = be32_to_cpu(*list++);
+ if (!conf_reg)
+ conf_reg = -1;
+ }
pin_id = mux_reg ? mux_reg / 4 : conf_reg / 4;
pin_reg = &info->pin_regs[pin_id];
@@ -645,7 +648,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
{
struct imx_pinctrl *ipctl;
struct resource *res;
- int ret;
+ int ret, i;
if (!info || !info->pins || !info->npins) {
dev_err(&pdev->dev, "wrong pinctrl info\n");
@@ -662,7 +665,11 @@ int imx_pinctrl_probe(struct platform_device *pdev,
info->npins, GFP_KERNEL);
if (!info->pin_regs)
return -ENOMEM;
- memset(info->pin_regs, 0xff, sizeof(*info->pin_regs) * info->npins);
+
+ for (i = 0; i < info->npins; i++) {
+ info->pin_regs[i].mux_reg = -1;
+ info->pin_regs[i].conf_reg = -1;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ipctl->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index fc86276892fd..37a037543d29 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -302,7 +302,7 @@ static struct imx_pinctrl_soc_info vf610_pinctrl_info = {
.flags = SHARE_MUX_CONF_REG,
};
-static struct of_device_id vf610_pinctrl_of_match[] = {
+static const struct of_device_id vf610_pinctrl_of_match[] = {
{ .compatible = "fsl,vf610-iomuxc", },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index b801d869e91c..fe5e07db0a95 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -25,3 +25,20 @@ config PINCTRL_CHERRYVIEW
help
Cherryview/Braswell pinctrl driver provides an interface that
allows configuring of SoC pins and using them as GPIOs.
+
+config PINCTRL_INTEL
+ tristate
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+
+config PINCTRL_SUNRISEPOINT
+ tristate "Intel Sunrisepoint pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ Sunrisepoint is the PCH of Intel Skylake. This pinctrl driver
+ provides an interface that allows configuring of PCH pins and
+ using them as GPIOs.
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 4c210e4139e2..fee756e1255b 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -2,3 +2,5 @@
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
+obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
+obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
new file mode 100644
index 000000000000..00768e53deec
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -0,0 +1,1149 @@
+/*
+ * Intel pinctrl/GPIO core driver.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "pinctrl-intel.h"
+
+/* Maximum number of pads in each group */
+#define NPADS_IN_GPP 24
+
+/* Offset from regs */
+#define PADBAR 0x00c
+#define GPI_IS 0x100
+#define GPI_GPE_STS 0x140
+#define GPI_GPE_EN 0x160
+
+#define PADOWN_BITS 4
+#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
+#define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p))
+
+/* Offset from pad_regs */
+#define PADCFG0 0x000
+#define PADCFG0_RXEVCFG_SHIFT 25
+#define PADCFG0_RXEVCFG_MASK (3 << PADCFG0_RXEVCFG_SHIFT)
+#define PADCFG0_RXEVCFG_LEVEL 0
+#define PADCFG0_RXEVCFG_EDGE 1
+#define PADCFG0_RXEVCFG_DISABLED 2
+#define PADCFG0_RXEVCFG_EDGE_BOTH 3
+#define PADCFG0_RXINV BIT(23)
+#define PADCFG0_GPIROUTIOXAPIC BIT(20)
+#define PADCFG0_GPIROUTSCI BIT(19)
+#define PADCFG0_GPIROUTSMI BIT(18)
+#define PADCFG0_GPIROUTNMI BIT(17)
+#define PADCFG0_PMODE_SHIFT 10
+#define PADCFG0_PMODE_MASK (0xf << PADCFG0_PMODE_SHIFT)
+#define PADCFG0_GPIORXDIS BIT(9)
+#define PADCFG0_GPIOTXDIS BIT(8)
+#define PADCFG0_GPIORXSTATE BIT(1)
+#define PADCFG0_GPIOTXSTATE BIT(0)
+
+#define PADCFG1 0x004
+#define PADCFG1_TERM_UP BIT(13)
+#define PADCFG1_TERM_SHIFT 10
+#define PADCFG1_TERM_MASK (7 << PADCFG1_TERM_SHIFT)
+#define PADCFG1_TERM_20K 4
+#define PADCFG1_TERM_2K 3
+#define PADCFG1_TERM_5K 2
+#define PADCFG1_TERM_1K 1
+
+struct intel_pad_context {
+ u32 padcfg0;
+ u32 padcfg1;
+};
+
+struct intel_community_context {
+ u32 *intmask;
+};
+
+struct intel_pinctrl_context {
+ struct intel_pad_context *pads;
+ struct intel_community_context *communities;
+};
+
+/**
+ * struct intel_pinctrl - Intel pinctrl private structure
+ * @dev: Pointer to the device structure
+ * @lock: Lock to serialize register access
+ * @pctldesc: Pin controller description
+ * @pctldev: Pointer to the pin controller device
+ * @chip: GPIO chip in this pin controller
+ * @soc: SoC/PCH specific pin configuration data
+ * @communities: All communities in this pin controller
+ * @ncommunities: Number of communities in this pin controller
+ * @context: Configuration saved over system sleep
+ */
+struct intel_pinctrl {
+ struct device *dev;
+ spinlock_t lock;
+ struct pinctrl_desc pctldesc;
+ struct pinctrl_dev *pctldev;
+ struct gpio_chip chip;
+ const struct intel_pinctrl_soc_data *soc;
+ struct intel_community *communities;
+ size_t ncommunities;
+ struct intel_pinctrl_context context;
+};
+
+#define gpiochip_to_pinctrl(c) container_of(c, struct intel_pinctrl, chip)
+#define pin_to_padno(c, p) ((p) - (c)->pin_base)
+
+static struct intel_community *intel_get_community(struct intel_pinctrl *pctrl,
+ unsigned pin)
+{
+ struct intel_community *community;
+ int i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ community = &pctrl->communities[i];
+ if (pin >= community->pin_base &&
+ pin < community->pin_base + community->npins)
+ return community;
+ }
+
+ dev_warn(pctrl->dev, "failed to find community for pin %u\n", pin);
+ return NULL;
+}
+
+static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
+ unsigned reg)
+{
+ const struct intel_community *community;
+ unsigned padno;
+
+ community = intel_get_community(pctrl, pin);
+ if (!community)
+ return NULL;
+
+ padno = pin_to_padno(community, pin);
+ return community->pad_regs + reg + padno * 8;
+}
+
+static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
+{
+ const struct intel_community *community;
+ unsigned padno, gpp, gpp_offset, offset;
+ void __iomem *padown;
+
+ community = intel_get_community(pctrl, pin);
+ if (!community)
+ return false;
+ if (!community->padown_offset)
+ return true;
+
+ padno = pin_to_padno(community, pin);
+ gpp = padno / NPADS_IN_GPP;
+ gpp_offset = padno % NPADS_IN_GPP;
+ offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4;
+ padown = community->regs + offset;
+
+ return !(readl(padown) & PADOWN_MASK(padno));
+}
+
+static bool intel_pad_reserved_for_acpi(struct intel_pinctrl *pctrl,
+ unsigned pin)
+{
+ const struct intel_community *community;
+ unsigned padno, gpp, offset;
+ void __iomem *hostown;
+
+ community = intel_get_community(pctrl, pin);
+ if (!community)
+ return true;
+ if (!community->hostown_offset)
+ return false;
+
+ padno = pin_to_padno(community, pin);
+ gpp = padno / NPADS_IN_GPP;
+ offset = community->hostown_offset + gpp * 4;
+ hostown = community->regs + offset;
+
+ return !(readl(hostown) & BIT(padno % NPADS_IN_GPP));
+}
+
+static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
+{
+ struct intel_community *community;
+ unsigned padno, gpp, offset;
+ u32 value;
+
+ community = intel_get_community(pctrl, pin);
+ if (!community)
+ return true;
+ if (!community->padcfglock_offset)
+ return false;
+
+ padno = pin_to_padno(community, pin);
+ gpp = padno / NPADS_IN_GPP;
+
+ /*
+ * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad,
+ * the pad is considered unlocked. Any other case means that it is
+ * either fully or partially locked and we don't touch it.
+ */
+ offset = community->padcfglock_offset + gpp * 8;
+ value = readl(community->regs + offset);
+ if (value & BIT(pin % NPADS_IN_GPP))
+ return true;
+
+ offset = community->padcfglock_offset + 4 + gpp * 8;
+ value = readl(community->regs + offset);
+ if (value & BIT(pin % NPADS_IN_GPP))
+ return true;
+
+ return false;
+}
+
+static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned pin)
+{
+ return intel_pad_owned_by_host(pctrl, pin) &&
+ !intel_pad_reserved_for_acpi(pctrl, pin) &&
+ !intel_pad_locked(pctrl, pin);
+}
+
+static int intel_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->soc->ngroups;
+}
+
+static const char *intel_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->soc->groups[group].name;
+}
+
+static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
+ const unsigned **pins, unsigned *npins)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->soc->groups[group].pins;
+ *npins = pctrl->soc->groups[group].npins;
+ return 0;
+}
+
+static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned pin)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ u32 cfg0, cfg1, mode;
+ bool locked, acpi;
+
+ if (!intel_pad_owned_by_host(pctrl, pin)) {
+ seq_puts(s, "not available");
+ return;
+ }
+
+ cfg0 = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
+ cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
+
+ mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
+ if (!mode)
+ seq_puts(s, "GPIO ");
+ else
+ seq_printf(s, "mode %d ", mode);
+
+ seq_printf(s, "0x%08x 0x%08x", cfg0, cfg1);
+
+ locked = intel_pad_locked(pctrl, pin);
+ acpi = intel_pad_reserved_for_acpi(pctrl, pin);
+
+ if (locked || acpi) {
+ seq_puts(s, " [");
+ if (locked) {
+ seq_puts(s, "LOCKED");
+ if (acpi)
+ seq_puts(s, ", ");
+ }
+ if (acpi)
+ seq_puts(s, "ACPI");
+ seq_puts(s, "]");
+ }
+}
+
+static const struct pinctrl_ops intel_pinctrl_ops = {
+ .get_groups_count = intel_get_groups_count,
+ .get_group_name = intel_get_group_name,
+ .get_group_pins = intel_get_group_pins,
+ .pin_dbg_show = intel_pin_dbg_show,
+};
+
+static int intel_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->soc->nfunctions;
+}
+
+static const char *intel_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->soc->functions[function].name;
+}
+
+static int intel_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const ngroups)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->soc->functions[function].groups;
+ *ngroups = pctrl->soc->functions[function].ngroups;
+ return 0;
+}
+
+static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned group)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pingroup *grp = &pctrl->soc->groups[group];
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ /*
+ * All pins in the groups needs to be accessible and writable
+ * before we can enable the mux for this group.
+ */
+ for (i = 0; i < grp->npins; i++) {
+ if (!intel_pad_usable(pctrl, grp->pins[i])) {
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ return -EBUSY;
+ }
+ }
+
+ /* Now enable the mux setting for each pin in the group */
+ for (i = 0; i < grp->npins; i++) {
+ void __iomem *padcfg0;
+ u32 value;
+
+ padcfg0 = intel_get_padcfg(pctrl, grp->pins[i], PADCFG0);
+ value = readl(padcfg0);
+
+ value &= ~PADCFG0_PMODE_MASK;
+ value |= grp->mode << PADCFG0_PMODE_SHIFT;
+
+ writel(value, padcfg0);
+ }
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned pin)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *padcfg0;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ if (!intel_pad_usable(pctrl, pin)) {
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ return -EBUSY;
+ }
+
+ padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+ /* Put the pad into GPIO mode */
+ value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
+ /* Disable SCI/SMI/NMI generation */
+ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
+ value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
+ /* Disable TX buffer and enable RX (this will be input) */
+ value &= ~PADCFG0_GPIORXDIS;
+ value |= PADCFG0_GPIOTXDIS;
+ writel(value, padcfg0);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned pin, bool input)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *padcfg0;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+
+ value = readl(padcfg0);
+ if (input)
+ value |= PADCFG0_GPIOTXDIS;
+ else
+ value &= ~PADCFG0_GPIOTXDIS;
+ writel(value, padcfg0);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static const struct pinmux_ops intel_pinmux_ops = {
+ .get_functions_count = intel_get_functions_count,
+ .get_function_name = intel_get_function_name,
+ .get_function_groups = intel_get_function_groups,
+ .set_mux = intel_pinmux_set_mux,
+ .gpio_request_enable = intel_gpio_request_enable,
+ .gpio_set_direction = intel_gpio_set_direction,
+};
+
+static int intel_config_get(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 value, term;
+ u16 arg = 0;
+
+ if (!intel_pad_owned_by_host(pctrl, pin))
+ return -ENOTSUPP;
+
+ value = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
+ term = (value & PADCFG1_TERM_MASK) >> PADCFG1_TERM_SHIFT;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (term)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (!term || !(value & PADCFG1_TERM_UP))
+ return -EINVAL;
+
+ switch (term) {
+ case PADCFG1_TERM_1K:
+ arg = 1000;
+ break;
+ case PADCFG1_TERM_2K:
+ arg = 2000;
+ break;
+ case PADCFG1_TERM_5K:
+ arg = 5000;
+ break;
+ case PADCFG1_TERM_20K:
+ arg = 20000;
+ break;
+ }
+
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!term || value & PADCFG1_TERM_UP)
+ return -EINVAL;
+
+ switch (term) {
+ case PADCFG1_TERM_5K:
+ arg = 5000;
+ break;
+ case PADCFG1_TERM_20K:
+ arg = 20000;
+ break;
+ }
+
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
+ unsigned long config)
+{
+ unsigned param = pinconf_to_config_param(config);
+ unsigned arg = pinconf_to_config_argument(config);
+ void __iomem *padcfg1;
+ unsigned long flags;
+ int ret = 0;
+ u32 value;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
+ value = readl(padcfg1);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ value &= ~(PADCFG1_TERM_MASK | PADCFG1_TERM_UP);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value &= ~PADCFG1_TERM_MASK;
+
+ value |= PADCFG1_TERM_UP;
+
+ switch (arg) {
+ case 20000:
+ value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+ break;
+ case 5000:
+ value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
+ break;
+ case 2000:
+ value |= PADCFG1_TERM_2K << PADCFG1_TERM_SHIFT;
+ break;
+ case 1000:
+ value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
+
+ switch (arg) {
+ case 20000:
+ value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+ break;
+ case 5000:
+ value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ break;
+ }
+
+ if (!ret)
+ writel(value, padcfg1);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return ret;
+}
+
+static int intel_config_set(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, unsigned nconfigs)
+{
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ int i, ret;
+
+ if (!intel_pad_usable(pctrl, pin))
+ return -ENOTSUPP;
+
+ for (i = 0; i < nconfigs; i++) {
+ switch (pinconf_to_config_param(configs[i])) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = intel_config_set_pull(pctrl, pin, configs[i]);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops intel_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = intel_config_get,
+ .pin_config_set = intel_config_set,
+};
+
+static const struct pinctrl_desc intel_pinctrl_desc = {
+ .pctlops = &intel_pinctrl_ops,
+ .pmxops = &intel_pinmux_ops,
+ .confops = &intel_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int intel_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void intel_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
+ void __iomem *reg;
+
+ reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ if (!reg)
+ return -EINVAL;
+
+ return !!(readl(reg) & PADCFG0_GPIORXSTATE);
+}
+
+static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
+ void __iomem *reg;
+
+ reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ if (reg) {
+ unsigned long flags;
+ u32 padcfg0;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ padcfg0 = readl(reg);
+ if (value)
+ padcfg0 |= PADCFG0_GPIOTXSTATE;
+ else
+ padcfg0 &= ~PADCFG0_GPIOTXSTATE;
+ writel(padcfg0, reg);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+}
+
+static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ intel_gpio_set(chip, offset, value);
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static const struct gpio_chip intel_gpio_chip = {
+ .owner = THIS_MODULE,
+ .request = intel_gpio_request,
+ .free = intel_gpio_free,
+ .direction_input = intel_gpio_direction_input,
+ .direction_output = intel_gpio_direction_output,
+ .get = intel_gpio_get,
+ .set = intel_gpio_set,
+};
+
+static void intel_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+ const struct intel_community *community;
+ unsigned pin = irqd_to_hwirq(d);
+
+ spin_lock(&pctrl->lock);
+
+ community = intel_get_community(pctrl, pin);
+ if (community) {
+ unsigned padno = pin_to_padno(community, pin);
+ unsigned gpp_offset = padno % NPADS_IN_GPP;
+ unsigned gpp = padno / NPADS_IN_GPP;
+
+ writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+ }
+
+ spin_unlock(&pctrl->lock);
+}
+
+static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+ const struct intel_community *community;
+ unsigned pin = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ community = intel_get_community(pctrl, pin);
+ if (community) {
+ unsigned padno = pin_to_padno(community, pin);
+ unsigned gpp_offset = padno % NPADS_IN_GPP;
+ unsigned gpp = padno / NPADS_IN_GPP;
+ void __iomem *reg;
+ u32 value;
+
+ reg = community->regs + community->ie_offset + gpp * 4;
+ value = readl(reg);
+ if (mask)
+ value &= ~BIT(gpp_offset);
+ else
+ value |= BIT(gpp_offset);
+ writel(value, reg);
+ }
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void intel_gpio_irq_mask(struct irq_data *d)
+{
+ intel_gpio_irq_mask_unmask(d, true);
+}
+
+static void intel_gpio_irq_unmask(struct irq_data *d)
+{
+ intel_gpio_irq_mask_unmask(d, false);
+}
+
+static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+ unsigned pin = irqd_to_hwirq(d);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 value;
+
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
+ if (!reg)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ value = readl(reg);
+
+ value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
+
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+ value |= PADCFG0_RXEVCFG_EDGE_BOTH << PADCFG0_RXEVCFG_SHIFT;
+ } else if (type & IRQ_TYPE_EDGE_FALLING) {
+ value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
+ value |= PADCFG0_RXINV;
+ } else if (type & IRQ_TYPE_EDGE_RISING) {
+ value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
+ } else if (type & IRQ_TYPE_LEVEL_LOW) {
+ value |= PADCFG0_RXINV;
+ } else {
+ value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
+ }
+
+ writel(value, reg);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+ const struct intel_community *community;
+ unsigned pin = irqd_to_hwirq(d);
+ unsigned padno, gpp, gpp_offset;
+ u32 gpe_en;
+
+ community = intel_get_community(pctrl, pin);
+ if (!community)
+ return -EINVAL;
+
+ padno = pin_to_padno(community, pin);
+ gpp = padno / NPADS_IN_GPP;
+ gpp_offset = padno % NPADS_IN_GPP;
+
+ /* Clear the existing wake status */
+ writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4);
+
+ /*
+ * The controller will generate wake when GPE of the corresponding
+ * pad is enabled and it is not routed to SCI (GPIROUTSCI is not
+ * set).
+ */
+ gpe_en = readl(community->regs + GPI_GPE_EN + gpp * 4);
+ if (on)
+ gpe_en |= BIT(gpp_offset);
+ else
+ gpe_en &= ~BIT(gpp_offset);
+ writel(gpe_en, community->regs + GPI_GPE_EN + gpp * 4);
+
+ dev_dbg(pctrl->dev, "%sable wake for pin %u\n", on ? "en" : "dis", pin);
+ return 0;
+}
+
+static void intel_gpio_community_irq_handler(struct gpio_chip *gc,
+ const struct intel_community *community)
+{
+ int gpp;
+
+ for (gpp = 0; gpp < community->ngpps; gpp++) {
+ unsigned long pending, enabled, gpp_offset;
+
+ pending = readl(community->regs + GPI_IS + gpp * 4);
+ enabled = readl(community->regs + community->ie_offset +
+ gpp * 4);
+
+ /* Only interrupts that are enabled */
+ pending &= enabled;
+
+ for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) {
+ unsigned padno, irq;
+
+ /*
+ * The last group in community can have less pins
+ * than NPADS_IN_GPP.
+ */
+ padno = gpp_offset + gpp * NPADS_IN_GPP;
+ if (padno >= community->npins)
+ break;
+
+ irq = irq_find_mapping(gc->irqdomain,
+ community->pin_base + padno);
+ generic_handle_irq(irq);
+ }
+ }
+}
+
+static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+ struct irq_chip *chip = irq_get_chip(irq);
+ int i;
+
+ chained_irq_enter(chip, desc);
+
+ /* Need to check all communities for pending interrupts */
+ for (i = 0; i < pctrl->ncommunities; i++)
+ intel_gpio_community_irq_handler(gc, &pctrl->communities[i]);
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip intel_gpio_irqchip = {
+ .name = "intel-gpio",
+ .irq_ack = intel_gpio_irq_ack,
+ .irq_mask = intel_gpio_irq_mask,
+ .irq_unmask = intel_gpio_irq_unmask,
+ .irq_set_type = intel_gpio_irq_type,
+ .irq_set_wake = intel_gpio_irq_wake,
+};
+
+static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
+{
+ size_t i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ const struct intel_community *community;
+ void __iomem *base;
+ unsigned gpp;
+
+ community = &pctrl->communities[i];
+ base = community->regs;
+
+ for (gpp = 0; gpp < community->ngpps; gpp++) {
+ /* Mask and clear all interrupts */
+ writel(0, base + community->ie_offset + gpp * 4);
+ writel(0xffff, base + GPI_IS + gpp * 4);
+ }
+ }
+}
+
+static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
+{
+ int ret;
+
+ pctrl->chip = intel_gpio_chip;
+
+ pctrl->chip.ngpio = pctrl->soc->npins;
+ pctrl->chip.label = dev_name(pctrl->dev);
+ pctrl->chip.dev = pctrl->dev;
+ pctrl->chip.base = -1;
+
+ ret = gpiochip_add(&pctrl->chip);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to register gpiochip\n");
+ return ret;
+ }
+
+ ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
+ 0, 0, pctrl->soc->npins);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+ gpiochip_remove(&pctrl->chip);
+ return ret;
+ }
+
+ ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add irqchip\n");
+ gpiochip_remove(&pctrl->chip);
+ return ret;
+ }
+
+ gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
+ intel_gpio_irq_handler);
+ return 0;
+}
+
+static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
+{
+#ifdef CONFIG_PM_SLEEP
+ const struct intel_pinctrl_soc_data *soc = pctrl->soc;
+ struct intel_community_context *communities;
+ struct intel_pad_context *pads;
+ int i;
+
+ pads = devm_kcalloc(pctrl->dev, soc->npins, sizeof(*pads), GFP_KERNEL);
+ if (!pads)
+ return -ENOMEM;
+
+ communities = devm_kcalloc(pctrl->dev, pctrl->ncommunities,
+ sizeof(*communities), GFP_KERNEL);
+ if (!communities)
+ return -ENOMEM;
+
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ struct intel_community *community = &pctrl->communities[i];
+ u32 *intmask;
+
+ intmask = devm_kcalloc(pctrl->dev, community->ngpps,
+ sizeof(*intmask), GFP_KERNEL);
+ if (!intmask)
+ return -ENOMEM;
+
+ communities[i].intmask = intmask;
+ }
+
+ pctrl->context.pads = pads;
+ pctrl->context.communities = communities;
+#endif
+
+ return 0;
+}
+
+int intel_pinctrl_probe(struct platform_device *pdev,
+ const struct intel_pinctrl_soc_data *soc_data)
+{
+ struct intel_pinctrl *pctrl;
+ int i, ret, irq;
+
+ if (!soc_data)
+ return -EINVAL;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = &pdev->dev;
+ pctrl->soc = soc_data;
+ spin_lock_init(&pctrl->lock);
+
+ /*
+ * Make a copy of the communities which we can use to hold pointers
+ * to the registers.
+ */
+ pctrl->ncommunities = pctrl->soc->ncommunities;
+ pctrl->communities = devm_kcalloc(&pdev->dev, pctrl->ncommunities,
+ sizeof(*pctrl->communities), GFP_KERNEL);
+ if (!pctrl->communities)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ struct intel_community *community = &pctrl->communities[i];
+ struct resource *res;
+ void __iomem *regs;
+ u32 padbar;
+
+ *community = pctrl->soc->communities[i];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ community->barno);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /* Read offset of the pad configuration registers */
+ padbar = readl(regs + PADBAR);
+
+ community->regs = regs;
+ community->pad_regs = regs + padbar;
+ community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get interrupt number\n");
+ return irq;
+ }
+
+ ret = intel_pinctrl_pm_init(pctrl);
+ if (ret)
+ return ret;
+
+ pctrl->pctldesc = intel_pinctrl_desc;
+ pctrl->pctldesc.name = dev_name(&pdev->dev);
+ pctrl->pctldesc.pins = pctrl->soc->pins;
+ pctrl->pctldesc.npins = pctrl->soc->npins;
+
+ pctrl->pctldev = pinctrl_register(&pctrl->pctldesc, &pdev->dev, pctrl);
+ if (!pctrl->pctldev) {
+ dev_err(&pdev->dev, "failed to register pinctrl driver\n");
+ return -ENODEV;
+ }
+
+ ret = intel_gpio_probe(pctrl, irq);
+ if (ret) {
+ pinctrl_unregister(pctrl->pctldev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pctrl);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pinctrl_probe);
+
+int intel_pinctrl_remove(struct platform_device *pdev)
+{
+ struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&pctrl->chip);
+ pinctrl_unregister(pctrl->pctldev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
+
+#ifdef CONFIG_PM_SLEEP
+int intel_pinctrl_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
+ struct intel_community_context *communities;
+ struct intel_pad_context *pads;
+ int i;
+
+ pads = pctrl->context.pads;
+ for (i = 0; i < pctrl->soc->npins; i++) {
+ const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
+ u32 val;
+
+ if (!intel_pad_usable(pctrl, desc->number))
+ continue;
+
+ val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
+ pads[i].padcfg0 = val & ~PADCFG0_GPIORXSTATE;
+ val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG1));
+ pads[i].padcfg1 = val;
+ }
+
+ communities = pctrl->context.communities;
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ struct intel_community *community = &pctrl->communities[i];
+ void __iomem *base;
+ unsigned gpp;
+
+ base = community->regs + community->ie_offset;
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ communities[i].intmask[gpp] = readl(base + gpp * 4);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pinctrl_suspend);
+
+int intel_pinctrl_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
+ const struct intel_community_context *communities;
+ const struct intel_pad_context *pads;
+ int i;
+
+ /* Mask all interrupts */
+ intel_gpio_irq_init(pctrl);
+
+ pads = pctrl->context.pads;
+ for (i = 0; i < pctrl->soc->npins; i++) {
+ const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
+ void __iomem *padcfg;
+ u32 val;
+
+ if (!intel_pad_usable(pctrl, desc->number))
+ continue;
+
+ padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
+ val = readl(padcfg) & ~PADCFG0_GPIORXSTATE;
+ if (val != pads[i].padcfg0) {
+ writel(pads[i].padcfg0, padcfg);
+ dev_dbg(dev, "restored pin %u padcfg0 %#08x\n",
+ desc->number, readl(padcfg));
+ }
+
+ padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG1);
+ val = readl(padcfg);
+ if (val != pads[i].padcfg1) {
+ writel(pads[i].padcfg1, padcfg);
+ dev_dbg(dev, "restored pin %u padcfg1 %#08x\n",
+ desc->number, readl(padcfg));
+ }
+ }
+
+ communities = pctrl->context.communities;
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ struct intel_community *community = &pctrl->communities[i];
+ void __iomem *base;
+ unsigned gpp;
+
+ base = community->regs + community->ie_offset;
+ for (gpp = 0; gpp < community->ngpps; gpp++) {
+ writel(communities[i].intmask[gpp], base + gpp * 4);
+ dev_dbg(dev, "restored mask %d/%u %#08x\n", i, gpp,
+ readl(base + gpp * 4));
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pinctrl_resume);
+#endif
+
+MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel pinctrl/GPIO core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
new file mode 100644
index 000000000000..4ec8b572a288
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -0,0 +1,128 @@
+/*
+ * Core pinctrl/GPIO driver for Intel GPIO controllers
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef PINCTRL_INTEL_H
+#define PINCTRL_INTEL_H
+
+struct pinctrl_pin_desc;
+struct platform_device;
+struct device;
+
+/**
+ * struct intel_pingroup - Description about group of pins
+ * @name: Name of the groups
+ * @pins: All pins in this group
+ * @npins: Number of pins in this groups
+ * @mode: Native mode in which the group is muxed out @pins
+ */
+struct intel_pingroup {
+ const char *name;
+ const unsigned *pins;
+ size_t npins;
+ unsigned short mode;
+};
+
+/**
+ * struct intel_function - Description about a function
+ * @name: Name of the function
+ * @groups: An array of groups for this function
+ * @ngroups: Number of groups in @groups
+ */
+struct intel_function {
+ const char *name;
+ const char * const *groups;
+ size_t ngroups;
+};
+
+/**
+ * struct intel_community - Intel pin community description
+ * @barno: MMIO BAR number where registers for this community reside
+ * @padown_offset: Register offset of PAD_OWN register from @regs. If %0
+ * then there is no support for owner.
+ * @padcfglock_offset: Register offset of PADCFGLOCK from @regs. If %0 then
+ * locking is not supported.
+ * @hostown_offset: Register offset of HOSTSW_OWN from @regs. If %0 then it
+ * is assumed that the host owns the pin (rather than
+ * ACPI).
+ * @ie_offset: Register offset of GPI_IE from @regs.
+ * @pin_base: Starting pin of pins in this community
+ * @npins: Number of pins in this community
+ * @regs: Community specific common registers (reserved for core driver)
+ * @pad_regs: Community specific pad registers (reserved for core driver)
+ * @ngpps: Number of groups (hw groups) in this community (reserved for
+ * core driver)
+ */
+struct intel_community {
+ unsigned barno;
+ unsigned padown_offset;
+ unsigned padcfglock_offset;
+ unsigned hostown_offset;
+ unsigned ie_offset;
+ unsigned pin_base;
+ size_t npins;
+ void __iomem *regs;
+ void __iomem *pad_regs;
+ size_t ngpps;
+};
+
+#define PIN_GROUP(n, p, m) \
+ { \
+ .name = (n), \
+ .pins = (p), \
+ .npins = ARRAY_SIZE((p)), \
+ .mode = (m), \
+ }
+
+#define FUNCTION(n, g) \
+ { \
+ .name = (n), \
+ .groups = (g), \
+ .ngroups = ARRAY_SIZE((g)), \
+ }
+
+/**
+ * struct intel_pinctrl_soc_data - Intel pin controller per-SoC configuration
+ * @uid: ACPI _UID for the probe driver use if needed
+ * @pins: Array if pins this pinctrl controls
+ * @npins: Number of pins in the array
+ * @groups: Array of pin groups
+ * @ngroups: Number of groups in the array
+ * @functions: Array of functions
+ * @nfunctions: Number of functions in the array
+ * @communities: Array of communities this pinctrl handles
+ * @ncommunities: Number of communities in the array
+ *
+ * The @communities is used as a template by the core driver. It will make
+ * copy of all communities and fill in rest of the information.
+ */
+struct intel_pinctrl_soc_data {
+ const char *uid;
+ const struct pinctrl_pin_desc *pins;
+ size_t npins;
+ const struct intel_pingroup *groups;
+ size_t ngroups;
+ const struct intel_function *functions;
+ size_t nfunctions;
+ const struct intel_community *communities;
+ size_t ncommunities;
+};
+
+int intel_pinctrl_probe(struct platform_device *pdev,
+ const struct intel_pinctrl_soc_data *soc_data);
+int intel_pinctrl_remove(struct platform_device *pdev);
+
+#ifdef CONFIG_PM_SLEEP
+int intel_pinctrl_suspend(struct device *dev);
+int intel_pinctrl_resume(struct device *dev);
+#endif
+
+#endif /* PINCTRL_INTEL_H */
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
new file mode 100644
index 000000000000..55d025dc89e8
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -0,0 +1,336 @@
+/*
+ * Intel Sunrisepoint PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define SPT_PAD_OWN 0x020
+#define SPT_PADCFGLOCK 0x0a0
+#define SPT_HOSTSW_OWN 0x0d0
+#define SPT_GPI_IE 0x120
+
+#define SPT_COMMUNITY(b, s, e) \
+ { \
+ .barno = (b), \
+ .padown_offset = SPT_PAD_OWN, \
+ .padcfglock_offset = SPT_PADCFGLOCK, \
+ .hostown_offset = SPT_HOSTSW_OWN, \
+ .ie_offset = SPT_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ }
+
+/* Sunrisepoint-LP */
+static const struct pinctrl_pin_desc sptlp_pins[] = {
+ /* GPP_A */
+ PINCTRL_PIN(0, "RCINB"),
+ PINCTRL_PIN(1, "LAD_0"),
+ PINCTRL_PIN(2, "LAD_1"),
+ PINCTRL_PIN(3, "LAD_2"),
+ PINCTRL_PIN(4, "LAD_3"),
+ PINCTRL_PIN(5, "LFRAMEB"),
+ PINCTRL_PIN(6, "SERIQ"),
+ PINCTRL_PIN(7, "PIRQAB"),
+ PINCTRL_PIN(8, "CLKRUNB"),
+ PINCTRL_PIN(9, "CLKOUT_LPC_0"),
+ PINCTRL_PIN(10, "CLKOUT_LPC_1"),
+ PINCTRL_PIN(11, "PMEB"),
+ PINCTRL_PIN(12, "BM_BUSYB"),
+ PINCTRL_PIN(13, "SUSWARNB_SUS_PWRDNACK"),
+ PINCTRL_PIN(14, "SUS_STATB"),
+ PINCTRL_PIN(15, "SUSACKB"),
+ PINCTRL_PIN(16, "SD_1P8_SEL"),
+ PINCTRL_PIN(17, "SD_PWR_EN_B"),
+ PINCTRL_PIN(18, "ISH_GP_0"),
+ PINCTRL_PIN(19, "ISH_GP_1"),
+ PINCTRL_PIN(20, "ISH_GP_2"),
+ PINCTRL_PIN(21, "ISH_GP_3"),
+ PINCTRL_PIN(22, "ISH_GP_4"),
+ PINCTRL_PIN(23, "ISH_GP_5"),
+ /* GPP_B */
+ PINCTRL_PIN(24, "CORE_VID_0"),
+ PINCTRL_PIN(25, "CORE_VID_1"),
+ PINCTRL_PIN(26, "VRALERTB"),
+ PINCTRL_PIN(27, "CPU_GP_2"),
+ PINCTRL_PIN(28, "CPU_GP_3"),
+ PINCTRL_PIN(29, "SRCCLKREQB_0"),
+ PINCTRL_PIN(30, "SRCCLKREQB_1"),
+ PINCTRL_PIN(31, "SRCCLKREQB_2"),
+ PINCTRL_PIN(32, "SRCCLKREQB_3"),
+ PINCTRL_PIN(33, "SRCCLKREQB_4"),
+ PINCTRL_PIN(34, "SRCCLKREQB_5"),
+ PINCTRL_PIN(35, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(36, "SLP_S0B"),
+ PINCTRL_PIN(37, "PLTRSTB"),
+ PINCTRL_PIN(38, "SPKR"),
+ PINCTRL_PIN(39, "GSPI0_CSB"),
+ PINCTRL_PIN(40, "GSPI0_CLK"),
+ PINCTRL_PIN(41, "GSPI0_MISO"),
+ PINCTRL_PIN(42, "GSPI0_MOSI"),
+ PINCTRL_PIN(43, "GSPI1_CSB"),
+ PINCTRL_PIN(44, "GSPI1_CLK"),
+ PINCTRL_PIN(45, "GSPI1_MISO"),
+ PINCTRL_PIN(46, "GSPI1_MOSI"),
+ PINCTRL_PIN(47, "SML1ALERTB"),
+ /* GPP_C */
+ PINCTRL_PIN(48, "SMBCLK"),
+ PINCTRL_PIN(49, "SMBDATA"),
+ PINCTRL_PIN(50, "SMBALERTB"),
+ PINCTRL_PIN(51, "SML0CLK"),
+ PINCTRL_PIN(52, "SML0DATA"),
+ PINCTRL_PIN(53, "SML0ALERTB"),
+ PINCTRL_PIN(54, "SML1CLK"),
+ PINCTRL_PIN(55, "SML1DATA"),
+ PINCTRL_PIN(56, "UART0_RXD"),
+ PINCTRL_PIN(57, "UART0_TXD"),
+ PINCTRL_PIN(58, "UART0_RTSB"),
+ PINCTRL_PIN(59, "UART0_CTSB"),
+ PINCTRL_PIN(60, "UART1_RXD"),
+ PINCTRL_PIN(61, "UART1_TXD"),
+ PINCTRL_PIN(62, "UART1_RTSB"),
+ PINCTRL_PIN(63, "UART1_CTSB"),
+ PINCTRL_PIN(64, "I2C0_SDA"),
+ PINCTRL_PIN(65, "I2C0_SCL"),
+ PINCTRL_PIN(66, "I2C1_SDA"),
+ PINCTRL_PIN(67, "I2C1_SCL"),
+ PINCTRL_PIN(68, "UART2_RXD"),
+ PINCTRL_PIN(69, "UART2_TXD"),
+ PINCTRL_PIN(70, "UART2_RTSB"),
+ PINCTRL_PIN(71, "UART2_CTSB"),
+ /* GPP_D */
+ PINCTRL_PIN(72, "SPI1_CSB"),
+ PINCTRL_PIN(73, "SPI1_CLK"),
+ PINCTRL_PIN(74, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(75, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(76, "FLASHTRIG"),
+ PINCTRL_PIN(77, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(78, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(79, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(80, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(81, "ISH_SPI_CSB"),
+ PINCTRL_PIN(82, "ISH_SPI_CLK"),
+ PINCTRL_PIN(83, "ISH_SPI_MISO"),
+ PINCTRL_PIN(84, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(85, "ISH_UART0_RXD"),
+ PINCTRL_PIN(86, "ISH_UART0_TXD"),
+ PINCTRL_PIN(87, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(88, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(89, "DMIC_CLK_1"),
+ PINCTRL_PIN(90, "DMIC_DATA_1"),
+ PINCTRL_PIN(91, "DMIC_CLK_0"),
+ PINCTRL_PIN(92, "DMIC_DATA_0"),
+ PINCTRL_PIN(93, "SPI1_IO_2"),
+ PINCTRL_PIN(94, "SPI1_IO_3"),
+ PINCTRL_PIN(95, "SSP_MCLK"),
+ /* GPP_E */
+ PINCTRL_PIN(96, "SATAXPCIE_0"),
+ PINCTRL_PIN(97, "SATAXPCIE_1"),
+ PINCTRL_PIN(98, "SATAXPCIE_2"),
+ PINCTRL_PIN(99, "CPU_GP_0"),
+ PINCTRL_PIN(100, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(101, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(102, "SATA_DEVSLP_2"),
+ PINCTRL_PIN(103, "CPU_GP_1"),
+ PINCTRL_PIN(104, "SATA_LEDB"),
+ PINCTRL_PIN(105, "USB2_OCB_0"),
+ PINCTRL_PIN(106, "USB2_OCB_1"),
+ PINCTRL_PIN(107, "USB2_OCB_2"),
+ PINCTRL_PIN(108, "USB2_OCB_3"),
+ PINCTRL_PIN(109, "DDSP_HPD_0"),
+ PINCTRL_PIN(110, "DDSP_HPD_1"),
+ PINCTRL_PIN(111, "DDSP_HPD_2"),
+ PINCTRL_PIN(112, "DDSP_HPD_3"),
+ PINCTRL_PIN(113, "EDP_HPD"),
+ PINCTRL_PIN(114, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(115, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(116, "DDPC_CTRLCLK"),
+ PINCTRL_PIN(117, "DDPC_CTRLDATA"),
+ PINCTRL_PIN(118, "DDPD_CTRLCLK"),
+ PINCTRL_PIN(119, "DDPD_CTRLDATA"),
+ /* GPP_F */
+ PINCTRL_PIN(120, "SSP2_SCLK"),
+ PINCTRL_PIN(121, "SSP2_SFRM"),
+ PINCTRL_PIN(122, "SSP2_TXD"),
+ PINCTRL_PIN(123, "SSP2_RXD"),
+ PINCTRL_PIN(124, "I2C2_SDA"),
+ PINCTRL_PIN(125, "I2C2_SCL"),
+ PINCTRL_PIN(126, "I2C3_SDA"),
+ PINCTRL_PIN(127, "I2C3_SCL"),
+ PINCTRL_PIN(128, "I2C4_SDA"),
+ PINCTRL_PIN(129, "I2C4_SCL"),
+ PINCTRL_PIN(130, "I2C5_SDA"),
+ PINCTRL_PIN(131, "I2C5_SCL"),
+ PINCTRL_PIN(132, "EMMC_CMD"),
+ PINCTRL_PIN(133, "EMMC_DATA_0"),
+ PINCTRL_PIN(134, "EMMC_DATA_1"),
+ PINCTRL_PIN(135, "EMMC_DATA_2"),
+ PINCTRL_PIN(136, "EMMC_DATA_3"),
+ PINCTRL_PIN(137, "EMMC_DATA_4"),
+ PINCTRL_PIN(138, "EMMC_DATA_5"),
+ PINCTRL_PIN(139, "EMMC_DATA_6"),
+ PINCTRL_PIN(140, "EMMC_DATA_7"),
+ PINCTRL_PIN(141, "EMMC_RCLK"),
+ PINCTRL_PIN(142, "EMMC_CLK"),
+ PINCTRL_PIN(143, "GPP_F_23"),
+ /* GPP_G */
+ PINCTRL_PIN(144, "SD_CMD"),
+ PINCTRL_PIN(145, "SD_DATA_0"),
+ PINCTRL_PIN(146, "SD_DATA_1"),
+ PINCTRL_PIN(147, "SD_DATA_2"),
+ PINCTRL_PIN(148, "SD_DATA_3"),
+ PINCTRL_PIN(149, "SD_CDB"),
+ PINCTRL_PIN(150, "SD_CLK"),
+ PINCTRL_PIN(151, "SD_WP"),
+};
+
+static const unsigned sptlp_spi0_pins[] = { 39, 40, 41, 42 };
+static const unsigned sptlp_spi1_pins[] = { 43, 44, 45, 46 };
+static const unsigned sptlp_uart0_pins[] = { 56, 57, 58, 59 };
+static const unsigned sptlp_uart1_pins[] = { 60, 61, 62, 63 };
+static const unsigned sptlp_uart2_pins[] = { 68, 69, 71, 71 };
+static const unsigned sptlp_i2c0_pins[] = { 64, 65 };
+static const unsigned sptlp_i2c1_pins[] = { 66, 67 };
+static const unsigned sptlp_i2c2_pins[] = { 124, 125 };
+static const unsigned sptlp_i2c3_pins[] = { 126, 127 };
+static const unsigned sptlp_i2c4_pins[] = { 128, 129 };
+static const unsigned sptlp_i2c4b_pins[] = { 85, 86 };
+static const unsigned sptlp_i2c5_pins[] = { 130, 131 };
+static const unsigned sptlp_ssp2_pins[] = { 120, 121, 122, 123 };
+static const unsigned sptlp_emmc_pins[] = {
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+};
+static const unsigned sptlp_sd_pins[] = {
+ 144, 145, 146, 147, 148, 149, 150, 151,
+};
+
+static const struct intel_pingroup sptlp_groups[] = {
+ PIN_GROUP("spi0_grp", sptlp_spi0_pins, 1),
+ PIN_GROUP("spi1_grp", sptlp_spi1_pins, 1),
+ PIN_GROUP("uart0_grp", sptlp_uart0_pins, 1),
+ PIN_GROUP("uart1_grp", sptlp_uart1_pins, 1),
+ PIN_GROUP("uart2_grp", sptlp_uart2_pins, 1),
+ PIN_GROUP("i2c0_grp", sptlp_i2c0_pins, 1),
+ PIN_GROUP("i2c1_grp", sptlp_i2c1_pins, 1),
+ PIN_GROUP("i2c2_grp", sptlp_i2c2_pins, 1),
+ PIN_GROUP("i2c3_grp", sptlp_i2c3_pins, 1),
+ PIN_GROUP("i2c4_grp", sptlp_i2c4_pins, 1),
+ PIN_GROUP("i2c4b_grp", sptlp_i2c4b_pins, 3),
+ PIN_GROUP("i2c5_grp", sptlp_i2c5_pins, 1),
+ PIN_GROUP("ssp2_grp", sptlp_ssp2_pins, 1),
+ PIN_GROUP("emmc_grp", sptlp_emmc_pins, 1),
+ PIN_GROUP("sd_grp", sptlp_sd_pins, 1),
+};
+
+static const char * const sptlp_spi0_groups[] = { "spi0_grp" };
+static const char * const sptlp_spi1_groups[] = { "spi0_grp" };
+static const char * const sptlp_uart0_groups[] = { "uart0_grp" };
+static const char * const sptlp_uart1_groups[] = { "uart1_grp" };
+static const char * const sptlp_uart2_groups[] = { "uart2_grp" };
+static const char * const sptlp_i2c0_groups[] = { "i2c0_grp" };
+static const char * const sptlp_i2c1_groups[] = { "i2c1_grp" };
+static const char * const sptlp_i2c2_groups[] = { "i2c2_grp" };
+static const char * const sptlp_i2c3_groups[] = { "i2c3_grp" };
+static const char * const sptlp_i2c4_groups[] = { "i2c4_grp", "i2c4b_grp" };
+static const char * const sptlp_i2c5_groups[] = { "i2c5_grp" };
+static const char * const sptlp_ssp2_groups[] = { "ssp2_grp" };
+static const char * const sptlp_emmc_groups[] = { "emmc_grp" };
+static const char * const sptlp_sd_groups[] = { "sd_grp" };
+
+static const struct intel_function sptlp_functions[] = {
+ FUNCTION("spi0", sptlp_spi0_groups),
+ FUNCTION("spi1", sptlp_spi1_groups),
+ FUNCTION("uart0", sptlp_uart0_groups),
+ FUNCTION("uart1", sptlp_uart1_groups),
+ FUNCTION("uart2", sptlp_uart2_groups),
+ FUNCTION("i2c0", sptlp_i2c0_groups),
+ FUNCTION("i2c1", sptlp_i2c1_groups),
+ FUNCTION("i2c2", sptlp_i2c2_groups),
+ FUNCTION("i2c3", sptlp_i2c3_groups),
+ FUNCTION("i2c4", sptlp_i2c4_groups),
+ FUNCTION("i2c5", sptlp_i2c5_groups),
+ FUNCTION("ssp2", sptlp_ssp2_groups),
+ FUNCTION("emmc", sptlp_emmc_groups),
+ FUNCTION("sd", sptlp_sd_groups),
+};
+
+static const struct intel_community sptlp_communities[] = {
+ SPT_COMMUNITY(0, 0, 47),
+ SPT_COMMUNITY(1, 48, 119),
+ SPT_COMMUNITY(2, 120, 151),
+};
+
+static const struct intel_pinctrl_soc_data sptlp_soc_data = {
+ .pins = sptlp_pins,
+ .npins = ARRAY_SIZE(sptlp_pins),
+ .groups = sptlp_groups,
+ .ngroups = ARRAY_SIZE(sptlp_groups),
+ .functions = sptlp_functions,
+ .nfunctions = ARRAY_SIZE(sptlp_functions),
+ .communities = sptlp_communities,
+ .ncommunities = ARRAY_SIZE(sptlp_communities),
+};
+
+static const struct acpi_device_id spt_pinctrl_acpi_match[] = {
+ { "INT344B", (kernel_ulong_t)&sptlp_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, spt_pinctrl_acpi_match);
+
+static int spt_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct intel_pinctrl_soc_data *soc_data;
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(spt_pinctrl_acpi_match, &pdev->dev);
+ if (!id || !id->driver_data)
+ return -ENODEV;
+
+ soc_data = (const struct intel_pinctrl_soc_data *)id->driver_data;
+ return intel_pinctrl_probe(pdev, soc_data);
+}
+
+static const struct dev_pm_ops spt_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
+ intel_pinctrl_resume)
+};
+
+static struct platform_driver spt_pinctrl_driver = {
+ .probe = spt_pinctrl_probe,
+ .remove = intel_pinctrl_remove,
+ .driver = {
+ .name = "sunrisepoint-pinctrl",
+ .acpi_match_table = spt_pinctrl_acpi_match,
+ .pm = &spt_pinctrl_pm_ops,
+ },
+};
+
+static int __init spt_pinctrl_init(void)
+{
+ return platform_driver_register(&spt_pinctrl_driver);
+}
+subsys_initcall(spt_pinctrl_init);
+
+static void __exit spt_pinctrl_exit(void)
+{
+ platform_driver_unregister(&spt_pinctrl_driver);
+}
+module_exit(spt_pinctrl_exit);
+
+MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Sunrisepoint PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
new file mode 100644
index 000000000000..6b3551cad111
--- /dev/null
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -0,0 +1,26 @@
+if ARCH_MEDIATEK || COMPILE_TEST
+
+config PINCTRL_MTK_COMMON
+ bool
+ depends on OF
+ select PINMUX
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select OF_GPIO
+
+# For ARMv7 SoCs
+config PINCTRL_MT8135
+ bool "Mediatek MT8135 pin control" if COMPILE_TEST && !MACH_MT8135
+ depends on OF
+ default MACH_MT8135
+ select PINCTRL_MTK_COMMON
+
+# For ARMv8 SoCs
+config PINCTRL_MT8173
+ bool "Mediatek MT8173 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_COMMON
+
+endif
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
new file mode 100644
index 000000000000..d8606a2179cf
--- /dev/null
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -0,0 +1,6 @@
+# Core
+obj-$(CONFIG_PINCTRL_MTK_COMMON) += pinctrl-mtk-common.o
+
+# SoC Drivers
+obj-$(CONFIG_PINCTRL_MT8135) += pinctrl-mt8135.o
+obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8135.c b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
new file mode 100644
index 000000000000..f1e1e187ce96
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt8135.h"
+
+#define DRV_BASE1 0x500
+#define DRV_BASE2 0x510
+#define PUPD_BASE1 0x400
+#define PUPD_BASE2 0x450
+#define R0_BASE1 0x4d0
+#define R1_BASE1 0x200
+#define R1_BASE2 0x250
+
+struct mtk_spec_pull_set {
+ unsigned int pin;
+ unsigned int pupd_offset;
+ unsigned char pupd_bit;
+ unsigned int r0_offset;
+ unsigned char r0_bit;
+ unsigned int r1_offset;
+ unsigned char r1_bit;
+};
+
+#define SPEC_PULL(_pin, _pupd_offset, _pupd_bit, _r0_offset, \
+ _r0_bit, _r1_offset, _r1_bit) \
+ { \
+ .pin = _pin, \
+ .pupd_offset = _pupd_offset, \
+ .pupd_bit = _pupd_bit, \
+ .r0_offset = _r0_offset, \
+ .r0_bit = _r0_bit, \
+ .r1_offset = _r1_offset, \
+ .r1_bit = _r1_bit, \
+ }
+
+static const struct mtk_drv_group_desc mt8135_drv_grp[] = {
+ /* E8E4E2 2/4/6/8/10/12/14/16 */
+ MTK_DRV_GRP(2, 16, 0, 2, 2),
+ /* E8E4 4/8/12/16 */
+ MTK_DRV_GRP(4, 16, 1, 2, 4),
+ /* E4E2 2/4/6/8 */
+ MTK_DRV_GRP(2, 8, 0, 1, 2),
+ /* E16E8E4 4/8/12/16/20/24/28/32 */
+ MTK_DRV_GRP(4, 32, 0, 2, 4)
+};
+
+static const struct mtk_pin_drv_grp mt8135_pin_drv[] = {
+ MTK_PIN_DRV_GRP(0, DRV_BASE1, 0, 0),
+ MTK_PIN_DRV_GRP(1, DRV_BASE1, 0, 0),
+ MTK_PIN_DRV_GRP(2, DRV_BASE1, 0, 0),
+ MTK_PIN_DRV_GRP(3, DRV_BASE1, 0, 0),
+ MTK_PIN_DRV_GRP(4, DRV_BASE1, 4, 0),
+ MTK_PIN_DRV_GRP(5, DRV_BASE1, 8, 0),
+ MTK_PIN_DRV_GRP(6, DRV_BASE1, 0, 0),
+ MTK_PIN_DRV_GRP(7, DRV_BASE1, 0, 0),
+ MTK_PIN_DRV_GRP(8, DRV_BASE1, 0, 0),
+ MTK_PIN_DRV_GRP(9, DRV_BASE1, 0, 0),
+
+ MTK_PIN_DRV_GRP(10, DRV_BASE1, 12, 1),
+ MTK_PIN_DRV_GRP(11, DRV_BASE1, 12, 1),
+ MTK_PIN_DRV_GRP(12, DRV_BASE1, 12, 1),
+ MTK_PIN_DRV_GRP(13, DRV_BASE1, 12, 1),
+ MTK_PIN_DRV_GRP(14, DRV_BASE1, 12, 1),
+ MTK_PIN_DRV_GRP(15, DRV_BASE1, 12, 1),
+ MTK_PIN_DRV_GRP(16, DRV_BASE1, 12, 1),
+ MTK_PIN_DRV_GRP(17, DRV_BASE1, 16, 1),
+ MTK_PIN_DRV_GRP(18, DRV_BASE1, 16, 1),
+ MTK_PIN_DRV_GRP(19, DRV_BASE1, 16, 1),
+ MTK_PIN_DRV_GRP(20, DRV_BASE1, 16, 1),
+ MTK_PIN_DRV_GRP(21, DRV_BASE1, 16, 1),
+ MTK_PIN_DRV_GRP(22, DRV_BASE1, 16, 1),
+ MTK_PIN_DRV_GRP(23, DRV_BASE1, 16, 1),
+ MTK_PIN_DRV_GRP(24, DRV_BASE1, 16, 1),
+ MTK_PIN_DRV_GRP(33, DRV_BASE1, 24, 1),
+ MTK_PIN_DRV_GRP(34, DRV_BASE2, 12, 2),
+ MTK_PIN_DRV_GRP(37, DRV_BASE2, 20, 1),
+ MTK_PIN_DRV_GRP(38, DRV_BASE2, 20, 1),
+ MTK_PIN_DRV_GRP(39, DRV_BASE2, 20, 1),
+ MTK_PIN_DRV_GRP(40, DRV_BASE2, 24, 1),
+ MTK_PIN_DRV_GRP(41, DRV_BASE2, 24, 1),
+ MTK_PIN_DRV_GRP(42, DRV_BASE2, 24, 1),
+ MTK_PIN_DRV_GRP(43, DRV_BASE2, 28, 1),
+ MTK_PIN_DRV_GRP(44, DRV_BASE2, 28, 1),
+ MTK_PIN_DRV_GRP(45, DRV_BASE2, 28, 1),
+ MTK_PIN_DRV_GRP(46, DRV_BASE2, 28, 1),
+ MTK_PIN_DRV_GRP(47, DRV_BASE2, 28, 1),
+
+ MTK_PIN_DRV_GRP(49, DRV_BASE2+0x10, 0, 1),
+ MTK_PIN_DRV_GRP(50, DRV_BASE2+0x10, 4, 1),
+ MTK_PIN_DRV_GRP(51, DRV_BASE2+0x10, 8, 1),
+ MTK_PIN_DRV_GRP(52, DRV_BASE2+0x10, 12, 2),
+ MTK_PIN_DRV_GRP(53, DRV_BASE2+0x10, 16, 1),
+ MTK_PIN_DRV_GRP(54, DRV_BASE2+0x10, 20, 1),
+ MTK_PIN_DRV_GRP(55, DRV_BASE2+0x10, 24, 1),
+ MTK_PIN_DRV_GRP(56, DRV_BASE2+0x10, 28, 1),
+
+ MTK_PIN_DRV_GRP(57, DRV_BASE2+0x20, 0, 1),
+ MTK_PIN_DRV_GRP(58, DRV_BASE2+0x20, 0, 1),
+ MTK_PIN_DRV_GRP(59, DRV_BASE2+0x20, 0, 1),
+ MTK_PIN_DRV_GRP(60, DRV_BASE2+0x20, 0, 1),
+ MTK_PIN_DRV_GRP(61, DRV_BASE2+0x20, 0, 1),
+ MTK_PIN_DRV_GRP(62, DRV_BASE2+0x20, 0, 1),
+ MTK_PIN_DRV_GRP(63, DRV_BASE2+0x20, 4, 1),
+ MTK_PIN_DRV_GRP(64, DRV_BASE2+0x20, 8, 1),
+ MTK_PIN_DRV_GRP(65, DRV_BASE2+0x20, 12, 1),
+ MTK_PIN_DRV_GRP(66, DRV_BASE2+0x20, 16, 1),
+ MTK_PIN_DRV_GRP(67, DRV_BASE2+0x20, 20, 1),
+ MTK_PIN_DRV_GRP(68, DRV_BASE2+0x20, 24, 1),
+ MTK_PIN_DRV_GRP(69, DRV_BASE2+0x20, 28, 1),
+
+ MTK_PIN_DRV_GRP(70, DRV_BASE2+0x30, 0, 1),
+ MTK_PIN_DRV_GRP(71, DRV_BASE2+0x30, 4, 1),
+ MTK_PIN_DRV_GRP(72, DRV_BASE2+0x30, 8, 1),
+ MTK_PIN_DRV_GRP(73, DRV_BASE2+0x30, 12, 1),
+ MTK_PIN_DRV_GRP(74, DRV_BASE2+0x30, 16, 1),
+ MTK_PIN_DRV_GRP(75, DRV_BASE2+0x30, 20, 1),
+ MTK_PIN_DRV_GRP(76, DRV_BASE2+0x30, 24, 1),
+ MTK_PIN_DRV_GRP(77, DRV_BASE2+0x30, 28, 3),
+ MTK_PIN_DRV_GRP(78, DRV_BASE2+0x30, 28, 3),
+
+ MTK_PIN_DRV_GRP(79, DRV_BASE2+0x40, 0, 3),
+ MTK_PIN_DRV_GRP(80, DRV_BASE2+0x40, 4, 3),
+
+ MTK_PIN_DRV_GRP(81, DRV_BASE2+0x30, 28, 3),
+ MTK_PIN_DRV_GRP(82, DRV_BASE2+0x30, 28, 3),
+
+ MTK_PIN_DRV_GRP(83, DRV_BASE2+0x40, 8, 3),
+ MTK_PIN_DRV_GRP(84, DRV_BASE2+0x40, 8, 3),
+ MTK_PIN_DRV_GRP(85, DRV_BASE2+0x40, 12, 3),
+ MTK_PIN_DRV_GRP(86, DRV_BASE2+0x40, 16, 3),
+ MTK_PIN_DRV_GRP(87, DRV_BASE2+0x40, 8, 3),
+ MTK_PIN_DRV_GRP(88, DRV_BASE2+0x40, 8, 3),
+
+ MTK_PIN_DRV_GRP(89, DRV_BASE2+0x50, 12, 0),
+ MTK_PIN_DRV_GRP(90, DRV_BASE2+0x50, 12, 0),
+ MTK_PIN_DRV_GRP(91, DRV_BASE2+0x50, 12, 0),
+ MTK_PIN_DRV_GRP(92, DRV_BASE2+0x50, 12, 0),
+ MTK_PIN_DRV_GRP(93, DRV_BASE2+0x50, 12, 0),
+ MTK_PIN_DRV_GRP(94, DRV_BASE2+0x50, 12, 0),
+ MTK_PIN_DRV_GRP(95, DRV_BASE2+0x50, 12, 0),
+
+ MTK_PIN_DRV_GRP(96, DRV_BASE1+0xb0, 28, 0),
+
+ MTK_PIN_DRV_GRP(97, DRV_BASE2+0x50, 12, 0),
+ MTK_PIN_DRV_GRP(98, DRV_BASE2+0x50, 16, 0),
+ MTK_PIN_DRV_GRP(99, DRV_BASE2+0x50, 20, 1),
+ MTK_PIN_DRV_GRP(102, DRV_BASE2+0x50, 24, 1),
+ MTK_PIN_DRV_GRP(103, DRV_BASE2+0x50, 28, 1),
+
+
+ MTK_PIN_DRV_GRP(104, DRV_BASE2+0x60, 0, 1),
+ MTK_PIN_DRV_GRP(105, DRV_BASE2+0x60, 4, 1),
+ MTK_PIN_DRV_GRP(106, DRV_BASE2+0x60, 4, 1),
+ MTK_PIN_DRV_GRP(107, DRV_BASE2+0x60, 4, 1),
+ MTK_PIN_DRV_GRP(108, DRV_BASE2+0x60, 4, 1),
+ MTK_PIN_DRV_GRP(109, DRV_BASE2+0x60, 8, 2),
+ MTK_PIN_DRV_GRP(110, DRV_BASE2+0x60, 12, 2),
+ MTK_PIN_DRV_GRP(111, DRV_BASE2+0x60, 16, 2),
+ MTK_PIN_DRV_GRP(112, DRV_BASE2+0x60, 20, 2),
+ MTK_PIN_DRV_GRP(113, DRV_BASE2+0x60, 24, 2),
+ MTK_PIN_DRV_GRP(114, DRV_BASE2+0x60, 28, 2),
+
+ MTK_PIN_DRV_GRP(115, DRV_BASE2+0x70, 0, 2),
+ MTK_PIN_DRV_GRP(116, DRV_BASE2+0x70, 4, 2),
+ MTK_PIN_DRV_GRP(117, DRV_BASE2+0x70, 8, 2),
+ MTK_PIN_DRV_GRP(118, DRV_BASE2+0x70, 12, 2),
+ MTK_PIN_DRV_GRP(119, DRV_BASE2+0x70, 16, 2),
+ MTK_PIN_DRV_GRP(120, DRV_BASE2+0x70, 20, 2),
+
+ MTK_PIN_DRV_GRP(181, DRV_BASE1+0xa0, 12, 1),
+ MTK_PIN_DRV_GRP(182, DRV_BASE1+0xa0, 16, 1),
+ MTK_PIN_DRV_GRP(183, DRV_BASE1+0xa0, 20, 1),
+ MTK_PIN_DRV_GRP(184, DRV_BASE1+0xa0, 24, 1),
+ MTK_PIN_DRV_GRP(185, DRV_BASE1+0xa0, 28, 1),
+
+ MTK_PIN_DRV_GRP(186, DRV_BASE1+0xb0, 0, 2),
+ MTK_PIN_DRV_GRP(187, DRV_BASE1+0xb0, 0, 2),
+ MTK_PIN_DRV_GRP(188, DRV_BASE1+0xb0, 0, 2),
+ MTK_PIN_DRV_GRP(189, DRV_BASE1+0xb0, 0, 2),
+ MTK_PIN_DRV_GRP(190, DRV_BASE1+0xb0, 4, 1),
+ MTK_PIN_DRV_GRP(191, DRV_BASE1+0xb0, 8, 1),
+ MTK_PIN_DRV_GRP(192, DRV_BASE1+0xb0, 12, 1),
+
+ MTK_PIN_DRV_GRP(197, DRV_BASE1+0xb0, 16, 0),
+ MTK_PIN_DRV_GRP(198, DRV_BASE1+0xb0, 16, 0),
+ MTK_PIN_DRV_GRP(199, DRV_BASE1+0xb0, 20, 0),
+ MTK_PIN_DRV_GRP(200, DRV_BASE1+0xb0, 24, 0),
+ MTK_PIN_DRV_GRP(201, DRV_BASE1+0xb0, 16, 0),
+ MTK_PIN_DRV_GRP(202, DRV_BASE1+0xb0, 16, 0)
+};
+
+static const struct mtk_spec_pull_set spec_pupd[] = {
+ SPEC_PULL(0, PUPD_BASE1, 0, R0_BASE1, 9, R1_BASE1, 0),
+ SPEC_PULL(1, PUPD_BASE1, 1, R0_BASE1, 8, R1_BASE1, 1),
+ SPEC_PULL(2, PUPD_BASE1, 2, R0_BASE1, 7, R1_BASE1, 2),
+ SPEC_PULL(3, PUPD_BASE1, 3, R0_BASE1, 6, R1_BASE1, 3),
+ SPEC_PULL(4, PUPD_BASE1, 4, R0_BASE1, 1, R1_BASE1, 4),
+ SPEC_PULL(5, PUPD_BASE1, 5, R0_BASE1, 0, R1_BASE1, 5),
+ SPEC_PULL(6, PUPD_BASE1, 6, R0_BASE1, 5, R1_BASE1, 6),
+ SPEC_PULL(7, PUPD_BASE1, 7, R0_BASE1, 4, R1_BASE1, 7),
+ SPEC_PULL(8, PUPD_BASE1, 8, R0_BASE1, 3, R1_BASE1, 8),
+ SPEC_PULL(9, PUPD_BASE1, 9, R0_BASE1, 2, R1_BASE1, 9),
+ SPEC_PULL(89, PUPD_BASE2, 9, R0_BASE1, 18, R1_BASE2, 9),
+ SPEC_PULL(90, PUPD_BASE2, 10, R0_BASE1, 19, R1_BASE2, 10),
+ SPEC_PULL(91, PUPD_BASE2, 11, R0_BASE1, 23, R1_BASE2, 11),
+ SPEC_PULL(92, PUPD_BASE2, 12, R0_BASE1, 24, R1_BASE2, 12),
+ SPEC_PULL(93, PUPD_BASE2, 13, R0_BASE1, 25, R1_BASE2, 13),
+ SPEC_PULL(94, PUPD_BASE2, 14, R0_BASE1, 22, R1_BASE2, 14),
+ SPEC_PULL(95, PUPD_BASE2, 15, R0_BASE1, 20, R1_BASE2, 15),
+ SPEC_PULL(96, PUPD_BASE2+0x10, 0, R0_BASE1, 16, R1_BASE2+0x10, 0),
+ SPEC_PULL(97, PUPD_BASE2+0x10, 1, R0_BASE1, 21, R1_BASE2+0x10, 1),
+ SPEC_PULL(98, PUPD_BASE2+0x10, 2, R0_BASE1, 17, R1_BASE2+0x10, 2),
+ SPEC_PULL(197, PUPD_BASE1+0xc0, 5, R0_BASE1, 13, R1_BASE2+0xc0, 5),
+ SPEC_PULL(198, PUPD_BASE2+0xc0, 6, R0_BASE1, 14, R1_BASE2+0xc0, 6),
+ SPEC_PULL(199, PUPD_BASE2+0xc0, 7, R0_BASE1, 11, R1_BASE2+0xc0, 7),
+ SPEC_PULL(200, PUPD_BASE2+0xc0, 8, R0_BASE1, 10, R1_BASE2+0xc0, 8),
+ SPEC_PULL(201, PUPD_BASE2+0xc0, 9, R0_BASE1, 13, R1_BASE2+0xc0, 9),
+ SPEC_PULL(202, PUPD_BASE2+0xc0, 10, R0_BASE1, 12, R1_BASE2+0xc0, 10)
+};
+
+static int spec_pull_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, bool isup, unsigned int r1r0)
+{
+ unsigned int i;
+ unsigned int reg_pupd, reg_set_r0, reg_set_r1;
+ unsigned int reg_rst_r0, reg_rst_r1;
+ bool find = false;
+
+ for (i = 0; i < ARRAY_SIZE(spec_pupd); i++) {
+ if (pin == spec_pupd[i].pin) {
+ find = true;
+ break;
+ }
+ }
+
+ if (!find)
+ return -EINVAL;
+
+ if (isup)
+ reg_pupd = spec_pupd[i].pupd_offset + align;
+ else
+ reg_pupd = spec_pupd[i].pupd_offset + (align << 1);
+
+ regmap_write(regmap, reg_pupd, spec_pupd[i].pupd_bit);
+
+ reg_set_r0 = spec_pupd[i].r0_offset + align;
+ reg_rst_r0 = spec_pupd[i].r0_offset + (align << 1);
+ reg_set_r1 = spec_pupd[i].r1_offset + align;
+ reg_rst_r1 = spec_pupd[i].r1_offset + (align << 1);
+
+ switch (r1r0) {
+ case MTK_PUPD_SET_R1R0_00:
+ regmap_write(regmap, reg_rst_r0, spec_pupd[i].r0_bit);
+ regmap_write(regmap, reg_rst_r1, spec_pupd[i].r1_bit);
+ break;
+ case MTK_PUPD_SET_R1R0_01:
+ regmap_write(regmap, reg_set_r0, spec_pupd[i].r0_bit);
+ regmap_write(regmap, reg_rst_r1, spec_pupd[i].r1_bit);
+ break;
+ case MTK_PUPD_SET_R1R0_10:
+ regmap_write(regmap, reg_rst_r0, spec_pupd[i].r0_bit);
+ regmap_write(regmap, reg_set_r1, spec_pupd[i].r1_bit);
+ break;
+ case MTK_PUPD_SET_R1R0_11:
+ regmap_write(regmap, reg_set_r0, spec_pupd[i].r0_bit);
+ regmap_write(regmap, reg_set_r1, spec_pupd[i].r1_bit);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct mtk_pinctrl_devdata mt8135_pinctrl_data = {
+ .pins = mtk_pins_mt8135,
+ .npins = ARRAY_SIZE(mtk_pins_mt8135),
+ .grp_desc = mt8135_drv_grp,
+ .n_grp_cls = ARRAY_SIZE(mt8135_drv_grp),
+ .pin_drv_grp = mt8135_pin_drv,
+ .n_pin_drv_grps = ARRAY_SIZE(mt8135_pin_drv),
+ .spec_pull_set = spec_pull_set,
+ .dir_offset = 0x0000,
+ .ies_offset = 0x0100,
+ .pullen_offset = 0x0200,
+ .smt_offset = 0x0300,
+ .pullsel_offset = 0x0400,
+ .invser_offset = 0x0600,
+ .dout_offset = 0x0800,
+ .din_offset = 0x0A00,
+ .pinmux_offset = 0x0C00,
+ .type1_start = 34,
+ .type1_end = 149,
+ .port_shf = 4,
+ .port_mask = 0xf,
+ .port_align = 4,
+ .chip_type = MTK_CHIP_TYPE_BASE,
+ .eint_offsets = {
+ .name = "mt8135_eint",
+ .stat = 0x000,
+ .ack = 0x040,
+ .mask = 0x080,
+ .mask_set = 0x0c0,
+ .mask_clr = 0x100,
+ .sens = 0x140,
+ .sens_set = 0x180,
+ .sens_clr = 0x1c0,
+ .soft = 0x200,
+ .soft_set = 0x240,
+ .soft_clr = 0x280,
+ .pol = 0x300,
+ .pol_set = 0x340,
+ .pol_clr = 0x380,
+ .dom_en = 0x400,
+ .dbnc_ctrl = 0x500,
+ .dbnc_set = 0x600,
+ .dbnc_clr = 0x700,
+ .port_mask = 7,
+ .ports = 6,
+ },
+ .ap_num = 192,
+ .db_cnt = 16,
+};
+
+static int mt8135_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_pctrl_init(pdev, &mt8135_pinctrl_data);
+}
+
+static const struct of_device_id mt8135_pctrl_match[] = {
+ {
+ .compatible = "mediatek,mt8135-pinctrl",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mt8135_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+ .probe = mt8135_pinctrl_probe,
+ .driver = {
+ .name = "mediatek-mt8135-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = mt8135_pctrl_match,
+ },
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+ return platform_driver_register(&mtk_pinctrl_driver);
+}
+
+module_init(mtk_pinctrl_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MediaTek Pinctrl Driver");
+MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
new file mode 100644
index 000000000000..412ea84836a1
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2014-2015 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt8173.h"
+
+#define DRV_BASE 0xb00
+
+/**
+ * struct mtk_pin_ies_smt_set - For special pins' ies and smt setting.
+ * @start: The start pin number of those special pins.
+ * @end: The end pin number of those special pins.
+ * @offset: The offset of special setting register.
+ * @bit: The bit of special setting register.
+ */
+struct mtk_pin_ies_smt_set {
+ unsigned int start;
+ unsigned int end;
+ unsigned int offset;
+ unsigned char bit;
+};
+
+#define MTK_PIN_IES_SMT_SET(_start, _end, _offset, _bit) \
+ { \
+ .start = _start, \
+ .end = _end, \
+ .bit = _bit, \
+ .offset = _offset, \
+ }
+
+/**
+ * struct mtk_pin_spec_pupd_set - For special pins' pull up/down setting.
+ * @pin: The pin number.
+ * @offset: The offset of special pull up/down setting register.
+ * @pupd_bit: The pull up/down bit in this register.
+ * @r0_bit: The r0 bit of pull resistor.
+ * @r1_bit: The r1 bit of pull resistor.
+ */
+struct mtk_pin_spec_pupd_set {
+ unsigned int pin;
+ unsigned int offset;
+ unsigned char pupd_bit;
+ unsigned char r1_bit;
+ unsigned char r0_bit;
+};
+
+#define MTK_PIN_PUPD_SPEC(_pin, _offset, _pupd, _r1, _r0) \
+ { \
+ .pin = _pin, \
+ .offset = _offset, \
+ .pupd_bit = _pupd, \
+ .r1_bit = _r1, \
+ .r0_bit = _r0, \
+ }
+
+static const struct mtk_pin_spec_pupd_set mt8173_spec_pupd[] = {
+ MTK_PIN_PUPD_SPEC(119, 0xe00, 2, 1, 0), /* KROW0 */
+ MTK_PIN_PUPD_SPEC(120, 0xe00, 6, 5, 4), /* KROW1 */
+ MTK_PIN_PUPD_SPEC(121, 0xe00, 10, 9, 8), /* KROW2 */
+ MTK_PIN_PUPD_SPEC(122, 0xe10, 2, 1, 0), /* KCOL0 */
+ MTK_PIN_PUPD_SPEC(123, 0xe10, 6, 5, 4), /* KCOL1 */
+ MTK_PIN_PUPD_SPEC(124, 0xe10, 10, 9, 8), /* KCOL2 */
+
+ MTK_PIN_PUPD_SPEC(67, 0xd10, 2, 1, 0), /* ms0 DS */
+ MTK_PIN_PUPD_SPEC(68, 0xd00, 2, 1, 0), /* ms0 RST */
+ MTK_PIN_PUPD_SPEC(66, 0xc10, 2, 1, 0), /* ms0 cmd */
+ MTK_PIN_PUPD_SPEC(65, 0xc00, 2, 1, 0), /* ms0 clk */
+ MTK_PIN_PUPD_SPEC(57, 0xc20, 2, 1, 0), /* ms0 data0 */
+ MTK_PIN_PUPD_SPEC(58, 0xc20, 2, 1, 0), /* ms0 data1 */
+ MTK_PIN_PUPD_SPEC(59, 0xc20, 2, 1, 0), /* ms0 data2 */
+ MTK_PIN_PUPD_SPEC(60, 0xc20, 2, 1, 0), /* ms0 data3 */
+ MTK_PIN_PUPD_SPEC(61, 0xc20, 2, 1, 0), /* ms0 data4 */
+ MTK_PIN_PUPD_SPEC(62, 0xc20, 2, 1, 0), /* ms0 data5 */
+ MTK_PIN_PUPD_SPEC(63, 0xc20, 2, 1, 0), /* ms0 data6 */
+ MTK_PIN_PUPD_SPEC(64, 0xc20, 2, 1, 0), /* ms0 data7 */
+
+ MTK_PIN_PUPD_SPEC(78, 0xc50, 2, 1, 0), /* ms1 cmd */
+ MTK_PIN_PUPD_SPEC(73, 0xd20, 2, 1, 0), /* ms1 dat0 */
+ MTK_PIN_PUPD_SPEC(74, 0xd20, 6, 5, 4), /* ms1 dat1 */
+ MTK_PIN_PUPD_SPEC(75, 0xd20, 10, 9, 8), /* ms1 dat2 */
+ MTK_PIN_PUPD_SPEC(76, 0xd20, 14, 13, 12), /* ms1 dat3 */
+ MTK_PIN_PUPD_SPEC(77, 0xc40, 2, 1, 0), /* ms1 clk */
+
+ MTK_PIN_PUPD_SPEC(100, 0xd40, 2, 1, 0), /* ms2 dat0 */
+ MTK_PIN_PUPD_SPEC(101, 0xd40, 6, 5, 4), /* ms2 dat1 */
+ MTK_PIN_PUPD_SPEC(102, 0xd40, 10, 9, 8), /* ms2 dat2 */
+ MTK_PIN_PUPD_SPEC(103, 0xd40, 14, 13, 12), /* ms2 dat3 */
+ MTK_PIN_PUPD_SPEC(104, 0xc80, 2, 1, 0), /* ms2 clk */
+ MTK_PIN_PUPD_SPEC(105, 0xc90, 2, 1, 0), /* ms2 cmd */
+
+ MTK_PIN_PUPD_SPEC(22, 0xd60, 2, 1, 0), /* ms3 dat0 */
+ MTK_PIN_PUPD_SPEC(23, 0xd60, 6, 5, 4), /* ms3 dat1 */
+ MTK_PIN_PUPD_SPEC(24, 0xd60, 10, 9, 8), /* ms3 dat2 */
+ MTK_PIN_PUPD_SPEC(25, 0xd60, 14, 13, 12), /* ms3 dat3 */
+ MTK_PIN_PUPD_SPEC(26, 0xcc0, 2, 1, 0), /* ms3 clk */
+ MTK_PIN_PUPD_SPEC(27, 0xcd0, 2, 1, 0) /* ms3 cmd */
+};
+
+static int spec_pull_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, bool isup, unsigned int r1r0)
+{
+ unsigned int i;
+ unsigned int reg_pupd, reg_set, reg_rst;
+ unsigned int bit_pupd, bit_r0, bit_r1;
+ const struct mtk_pin_spec_pupd_set *spec_pupd_pin;
+ bool find = false;
+
+ for (i = 0; i < ARRAY_SIZE(mt8173_spec_pupd); i++) {
+ if (pin == mt8173_spec_pupd[i].pin) {
+ find = true;
+ break;
+ }
+ }
+
+ if (!find)
+ return -EINVAL;
+
+ spec_pupd_pin = mt8173_spec_pupd + i;
+ reg_set = spec_pupd_pin->offset + align;
+ reg_rst = spec_pupd_pin->offset + (align << 1);
+
+ if (isup)
+ reg_pupd = reg_rst;
+ else
+ reg_pupd = reg_set;
+
+ bit_pupd = BIT(spec_pupd_pin->pupd_bit);
+ regmap_write(regmap, reg_pupd, bit_pupd);
+
+ bit_r0 = BIT(spec_pupd_pin->r0_bit);
+ bit_r1 = BIT(spec_pupd_pin->r1_bit);
+
+ switch (r1r0) {
+ case MTK_PUPD_SET_R1R0_00:
+ regmap_write(regmap, reg_rst, bit_r0);
+ regmap_write(regmap, reg_rst, bit_r1);
+ break;
+ case MTK_PUPD_SET_R1R0_01:
+ regmap_write(regmap, reg_set, bit_r0);
+ regmap_write(regmap, reg_rst, bit_r1);
+ break;
+ case MTK_PUPD_SET_R1R0_10:
+ regmap_write(regmap, reg_rst, bit_r0);
+ regmap_write(regmap, reg_set, bit_r1);
+ break;
+ case MTK_PUPD_SET_R1R0_11:
+ regmap_write(regmap, reg_set, bit_r0);
+ regmap_write(regmap, reg_set, bit_r1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct mtk_pin_ies_smt_set mt8173_ies_smt_set[] = {
+ MTK_PIN_IES_SMT_SET(0, 4, 0x930, 1),
+ MTK_PIN_IES_SMT_SET(5, 9, 0x930, 2),
+ MTK_PIN_IES_SMT_SET(10, 13, 0x930, 10),
+ MTK_PIN_IES_SMT_SET(14, 15, 0x940, 10),
+ MTK_PIN_IES_SMT_SET(16, 16, 0x930, 0),
+ MTK_PIN_IES_SMT_SET(17, 17, 0x950, 2),
+ MTK_PIN_IES_SMT_SET(18, 21, 0x940, 3),
+ MTK_PIN_IES_SMT_SET(29, 32, 0x930, 3),
+ MTK_PIN_IES_SMT_SET(33, 33, 0x930, 4),
+ MTK_PIN_IES_SMT_SET(34, 36, 0x930, 5),
+ MTK_PIN_IES_SMT_SET(37, 38, 0x930, 6),
+ MTK_PIN_IES_SMT_SET(39, 39, 0x930, 7),
+ MTK_PIN_IES_SMT_SET(40, 41, 0x930, 9),
+ MTK_PIN_IES_SMT_SET(42, 42, 0x940, 0),
+ MTK_PIN_IES_SMT_SET(43, 44, 0x930, 11),
+ MTK_PIN_IES_SMT_SET(45, 46, 0x930, 12),
+ MTK_PIN_IES_SMT_SET(57, 64, 0xc20, 13),
+ MTK_PIN_IES_SMT_SET(65, 65, 0xc10, 13),
+ MTK_PIN_IES_SMT_SET(66, 66, 0xc00, 13),
+ MTK_PIN_IES_SMT_SET(67, 67, 0xd10, 13),
+ MTK_PIN_IES_SMT_SET(68, 68, 0xd00, 13),
+ MTK_PIN_IES_SMT_SET(69, 72, 0x940, 14),
+ MTK_PIN_IES_SMT_SET(73, 76, 0xc60, 13),
+ MTK_PIN_IES_SMT_SET(77, 77, 0xc40, 13),
+ MTK_PIN_IES_SMT_SET(78, 78, 0xc50, 13),
+ MTK_PIN_IES_SMT_SET(79, 82, 0x940, 15),
+ MTK_PIN_IES_SMT_SET(83, 83, 0x950, 0),
+ MTK_PIN_IES_SMT_SET(84, 85, 0x950, 1),
+ MTK_PIN_IES_SMT_SET(86, 91, 0x950, 2),
+ MTK_PIN_IES_SMT_SET(92, 92, 0x930, 13),
+ MTK_PIN_IES_SMT_SET(93, 95, 0x930, 14),
+ MTK_PIN_IES_SMT_SET(96, 99, 0x930, 15),
+ MTK_PIN_IES_SMT_SET(100, 103, 0xca0, 13),
+ MTK_PIN_IES_SMT_SET(104, 104, 0xc80, 13),
+ MTK_PIN_IES_SMT_SET(105, 105, 0xc90, 13),
+ MTK_PIN_IES_SMT_SET(106, 107, 0x940, 4),
+ MTK_PIN_IES_SMT_SET(108, 112, 0x940, 1),
+ MTK_PIN_IES_SMT_SET(113, 116, 0x940, 2),
+ MTK_PIN_IES_SMT_SET(117, 118, 0x940, 5),
+ MTK_PIN_IES_SMT_SET(119, 124, 0x940, 6),
+ MTK_PIN_IES_SMT_SET(125, 126, 0x940, 7),
+ MTK_PIN_IES_SMT_SET(127, 127, 0x940, 0),
+ MTK_PIN_IES_SMT_SET(128, 128, 0x950, 8),
+ MTK_PIN_IES_SMT_SET(129, 130, 0x950, 9),
+ MTK_PIN_IES_SMT_SET(131, 132, 0x950, 8),
+ MTK_PIN_IES_SMT_SET(133, 134, 0x910, 8)
+};
+
+static int spec_ies_smt_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, int value)
+{
+ unsigned int i, reg_addr, bit;
+ bool find = false;
+
+ for (i = 0; i < ARRAY_SIZE(mt8173_ies_smt_set); i++) {
+ if (pin >= mt8173_ies_smt_set[i].start &&
+ pin <= mt8173_ies_smt_set[i].end) {
+ find = true;
+ break;
+ }
+ }
+
+ if (!find)
+ return -EINVAL;
+
+ if (value)
+ reg_addr = mt8173_ies_smt_set[i].offset + align;
+ else
+ reg_addr = mt8173_ies_smt_set[i].offset + (align << 1);
+
+ bit = BIT(mt8173_ies_smt_set[i].bit);
+ regmap_write(regmap, reg_addr, bit);
+ return 0;
+}
+
+static const struct mtk_drv_group_desc mt8173_drv_grp[] = {
+ /* 0E4E8SR 4/8/12/16 */
+ MTK_DRV_GRP(4, 16, 1, 2, 4),
+ /* 0E2E4SR 2/4/6/8 */
+ MTK_DRV_GRP(2, 8, 1, 2, 2),
+ /* E8E4E2 2/4/6/8/10/12/14/16 */
+ MTK_DRV_GRP(2, 16, 0, 2, 2)
+};
+
+static const struct mtk_pin_drv_grp mt8173_pin_drv[] = {
+ MTK_PIN_DRV_GRP(0, DRV_BASE+0x20, 12, 0),
+ MTK_PIN_DRV_GRP(1, DRV_BASE+0x20, 12, 0),
+ MTK_PIN_DRV_GRP(2, DRV_BASE+0x20, 12, 0),
+ MTK_PIN_DRV_GRP(3, DRV_BASE+0x20, 12, 0),
+ MTK_PIN_DRV_GRP(4, DRV_BASE+0x20, 12, 0),
+ MTK_PIN_DRV_GRP(5, DRV_BASE+0x30, 0, 0),
+ MTK_PIN_DRV_GRP(6, DRV_BASE+0x30, 0, 0),
+ MTK_PIN_DRV_GRP(7, DRV_BASE+0x30, 0, 0),
+ MTK_PIN_DRV_GRP(8, DRV_BASE+0x30, 0, 0),
+ MTK_PIN_DRV_GRP(9, DRV_BASE+0x30, 0, 0),
+ MTK_PIN_DRV_GRP(10, DRV_BASE+0x30, 4, 1),
+ MTK_PIN_DRV_GRP(11, DRV_BASE+0x30, 4, 1),
+ MTK_PIN_DRV_GRP(12, DRV_BASE+0x30, 4, 1),
+ MTK_PIN_DRV_GRP(13, DRV_BASE+0x30, 4, 1),
+ MTK_PIN_DRV_GRP(14, DRV_BASE+0x40, 8, 1),
+ MTK_PIN_DRV_GRP(15, DRV_BASE+0x40, 8, 1),
+ MTK_PIN_DRV_GRP(16, DRV_BASE, 8, 1),
+ MTK_PIN_DRV_GRP(17, 0xce0, 8, 2),
+ MTK_PIN_DRV_GRP(22, 0xce0, 8, 2),
+ MTK_PIN_DRV_GRP(23, 0xce0, 8, 2),
+ MTK_PIN_DRV_GRP(24, 0xce0, 8, 2),
+ MTK_PIN_DRV_GRP(25, 0xce0, 8, 2),
+ MTK_PIN_DRV_GRP(26, 0xcc0, 8, 2),
+ MTK_PIN_DRV_GRP(27, 0xcd0, 8, 2),
+ MTK_PIN_DRV_GRP(28, 0xd70, 8, 2),
+ MTK_PIN_DRV_GRP(29, DRV_BASE+0x80, 12, 1),
+ MTK_PIN_DRV_GRP(30, DRV_BASE+0x80, 12, 1),
+ MTK_PIN_DRV_GRP(31, DRV_BASE+0x80, 12, 1),
+ MTK_PIN_DRV_GRP(32, DRV_BASE+0x80, 12, 1),
+ MTK_PIN_DRV_GRP(33, DRV_BASE+0x10, 12, 1),
+ MTK_PIN_DRV_GRP(34, DRV_BASE+0x10, 8, 1),
+ MTK_PIN_DRV_GRP(35, DRV_BASE+0x10, 8, 1),
+ MTK_PIN_DRV_GRP(36, DRV_BASE+0x10, 8, 1),
+ MTK_PIN_DRV_GRP(37, DRV_BASE+0x10, 4, 1),
+ MTK_PIN_DRV_GRP(38, DRV_BASE+0x10, 4, 1),
+ MTK_PIN_DRV_GRP(39, DRV_BASE+0x20, 0, 0),
+ MTK_PIN_DRV_GRP(40, DRV_BASE+0x20, 8, 0),
+ MTK_PIN_DRV_GRP(41, DRV_BASE+0x20, 8, 0),
+ MTK_PIN_DRV_GRP(42, DRV_BASE+0x50, 8, 1),
+ MTK_PIN_DRV_GRP(57, 0xc20, 8, 2),
+ MTK_PIN_DRV_GRP(58, 0xc20, 8, 2),
+ MTK_PIN_DRV_GRP(59, 0xc20, 8, 2),
+ MTK_PIN_DRV_GRP(60, 0xc20, 8, 2),
+ MTK_PIN_DRV_GRP(61, 0xc20, 8, 2),
+ MTK_PIN_DRV_GRP(62, 0xc20, 8, 2),
+ MTK_PIN_DRV_GRP(63, 0xc20, 8, 2),
+ MTK_PIN_DRV_GRP(64, 0xc20, 8, 2),
+ MTK_PIN_DRV_GRP(65, 0xc00, 8, 2),
+ MTK_PIN_DRV_GRP(66, 0xc10, 8, 2),
+ MTK_PIN_DRV_GRP(67, 0xd10, 8, 2),
+ MTK_PIN_DRV_GRP(68, 0xd00, 8, 2),
+ MTK_PIN_DRV_GRP(69, DRV_BASE+0x80, 0, 1),
+ MTK_PIN_DRV_GRP(70, DRV_BASE+0x80, 0, 1),
+ MTK_PIN_DRV_GRP(71, DRV_BASE+0x80, 0, 1),
+ MTK_PIN_DRV_GRP(72, DRV_BASE+0x80, 0, 1),
+ MTK_PIN_DRV_GRP(73, 0xc60, 8, 2),
+ MTK_PIN_DRV_GRP(74, 0xc60, 8, 2),
+ MTK_PIN_DRV_GRP(75, 0xc60, 8, 2),
+ MTK_PIN_DRV_GRP(76, 0xc60, 8, 2),
+ MTK_PIN_DRV_GRP(77, 0xc40, 8, 2),
+ MTK_PIN_DRV_GRP(78, 0xc50, 8, 2),
+ MTK_PIN_DRV_GRP(79, DRV_BASE+0x70, 12, 1),
+ MTK_PIN_DRV_GRP(80, DRV_BASE+0x70, 12, 1),
+ MTK_PIN_DRV_GRP(81, DRV_BASE+0x70, 12, 1),
+ MTK_PIN_DRV_GRP(82, DRV_BASE+0x70, 12, 1),
+ MTK_PIN_DRV_GRP(83, DRV_BASE, 4, 1),
+ MTK_PIN_DRV_GRP(84, DRV_BASE, 0, 1),
+ MTK_PIN_DRV_GRP(85, DRV_BASE, 0, 1),
+ MTK_PIN_DRV_GRP(85, DRV_BASE+0x60, 8, 1),
+ MTK_PIN_DRV_GRP(86, DRV_BASE+0x60, 8, 1),
+ MTK_PIN_DRV_GRP(87, DRV_BASE+0x60, 8, 1),
+ MTK_PIN_DRV_GRP(88, DRV_BASE+0x60, 8, 1),
+ MTK_PIN_DRV_GRP(89, DRV_BASE+0x60, 8, 1),
+ MTK_PIN_DRV_GRP(90, DRV_BASE+0x60, 8, 1),
+ MTK_PIN_DRV_GRP(91, DRV_BASE+0x60, 8, 1),
+ MTK_PIN_DRV_GRP(92, DRV_BASE+0x60, 4, 0),
+ MTK_PIN_DRV_GRP(93, DRV_BASE+0x60, 0, 0),
+ MTK_PIN_DRV_GRP(94, DRV_BASE+0x60, 0, 0),
+ MTK_PIN_DRV_GRP(95, DRV_BASE+0x60, 0, 0),
+ MTK_PIN_DRV_GRP(96, DRV_BASE+0x80, 8, 1),
+ MTK_PIN_DRV_GRP(97, DRV_BASE+0x80, 8, 1),
+ MTK_PIN_DRV_GRP(98, DRV_BASE+0x80, 8, 1),
+ MTK_PIN_DRV_GRP(99, DRV_BASE+0x80, 8, 1),
+ MTK_PIN_DRV_GRP(100, 0xca0, 8, 2),
+ MTK_PIN_DRV_GRP(101, 0xca0, 8, 2),
+ MTK_PIN_DRV_GRP(102, 0xca0, 8, 2),
+ MTK_PIN_DRV_GRP(103, 0xca0, 8, 2),
+ MTK_PIN_DRV_GRP(104, 0xc80, 8, 2),
+ MTK_PIN_DRV_GRP(105, 0xc90, 8, 2),
+ MTK_PIN_DRV_GRP(108, DRV_BASE+0x50, 0, 1),
+ MTK_PIN_DRV_GRP(109, DRV_BASE+0x50, 0, 1),
+ MTK_PIN_DRV_GRP(110, DRV_BASE+0x50, 0, 1),
+ MTK_PIN_DRV_GRP(111, DRV_BASE+0x50, 0, 1),
+ MTK_PIN_DRV_GRP(112, DRV_BASE+0x50, 0, 1),
+ MTK_PIN_DRV_GRP(113, DRV_BASE+0x80, 4, 1),
+ MTK_PIN_DRV_GRP(114, DRV_BASE+0x80, 4, 1),
+ MTK_PIN_DRV_GRP(115, DRV_BASE+0x80, 4, 1),
+ MTK_PIN_DRV_GRP(116, DRV_BASE+0x80, 4, 1),
+ MTK_PIN_DRV_GRP(117, DRV_BASE+0x90, 0, 1),
+ MTK_PIN_DRV_GRP(118, DRV_BASE+0x90, 0, 1),
+ MTK_PIN_DRV_GRP(119, DRV_BASE+0x50, 4, 1),
+ MTK_PIN_DRV_GRP(120, DRV_BASE+0x50, 4, 1),
+ MTK_PIN_DRV_GRP(121, DRV_BASE+0x50, 4, 1),
+ MTK_PIN_DRV_GRP(122, DRV_BASE+0x50, 4, 1),
+ MTK_PIN_DRV_GRP(123, DRV_BASE+0x50, 4, 1),
+ MTK_PIN_DRV_GRP(124, DRV_BASE+0x50, 4, 1),
+ MTK_PIN_DRV_GRP(125, DRV_BASE+0x30, 12, 1),
+ MTK_PIN_DRV_GRP(126, DRV_BASE+0x30, 12, 1),
+ MTK_PIN_DRV_GRP(127, DRV_BASE+0x50, 8, 1),
+ MTK_PIN_DRV_GRP(128, DRV_BASE+0x40, 0, 1),
+ MTK_PIN_DRV_GRP(129, DRV_BASE+0x40, 0, 1),
+ MTK_PIN_DRV_GRP(130, DRV_BASE+0x40, 0, 1),
+ MTK_PIN_DRV_GRP(131, DRV_BASE+0x40, 0, 1),
+ MTK_PIN_DRV_GRP(132, DRV_BASE+0x40, 0, 1)
+};
+
+static const struct mtk_pinctrl_devdata mt8173_pinctrl_data = {
+ .pins = mtk_pins_mt8173,
+ .npins = ARRAY_SIZE(mtk_pins_mt8173),
+ .grp_desc = mt8173_drv_grp,
+ .n_grp_cls = ARRAY_SIZE(mt8173_drv_grp),
+ .pin_drv_grp = mt8173_pin_drv,
+ .n_pin_drv_grps = ARRAY_SIZE(mt8173_pin_drv),
+ .spec_pull_set = spec_pull_set,
+ .spec_ies_smt_set = spec_ies_smt_set,
+ .dir_offset = 0x0000,
+ .pullen_offset = 0x0100,
+ .pullsel_offset = 0x0200,
+ .dout_offset = 0x0400,
+ .din_offset = 0x0500,
+ .pinmux_offset = 0x0600,
+ .type1_start = 135,
+ .type1_end = 135,
+ .port_shf = 4,
+ .port_mask = 0xf,
+ .port_align = 4,
+ .eint_offsets = {
+ .name = "mt8173_eint",
+ .stat = 0x000,
+ .ack = 0x040,
+ .mask = 0x080,
+ .mask_set = 0x0c0,
+ .mask_clr = 0x100,
+ .sens = 0x140,
+ .sens_set = 0x180,
+ .sens_clr = 0x1c0,
+ .soft = 0x200,
+ .soft_set = 0x240,
+ .soft_clr = 0x280,
+ .pol = 0x300,
+ .pol_set = 0x340,
+ .pol_clr = 0x380,
+ .dom_en = 0x400,
+ .dbnc_ctrl = 0x500,
+ .dbnc_set = 0x600,
+ .dbnc_clr = 0x700,
+ .port_mask = 7,
+ .ports = 6,
+ },
+ .ap_num = 224,
+ .db_cnt = 16,
+};
+
+static int mt8173_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_pctrl_init(pdev, &mt8173_pinctrl_data);
+}
+
+static const struct of_device_id mt8173_pctrl_match[] = {
+ {
+ .compatible = "mediatek,mt8173-pinctrl",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mt8173_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+ .probe = mt8173_pinctrl_probe,
+ .driver = {
+ .name = "mediatek-mt8173-pinctrl",
+ .of_match_table = mt8173_pctrl_match,
+ },
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+ return platform_driver_register(&mtk_pinctrl_driver);
+}
+
+module_init(mtk_pinctrl_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek Pinctrl Driver");
+MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
new file mode 100644
index 000000000000..474812e2b0cb
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -0,0 +1,1259 @@
+/*
+ * mt65xx pinctrl driver based on Allwinner A1X pinctrl driver.
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-mtk-common.h"
+
+#define MAX_GPIO_MODE_PER_REG 5
+#define GPIO_MODE_BITS 3
+
+static const char * const mtk_gpio_functions[] = {
+ "func0", "func1", "func2", "func3",
+ "func4", "func5", "func6", "func7",
+};
+
+/*
+ * There are two base address for pull related configuration
+ * in mt8135, and different GPIO pins use different base address.
+ * When pin number greater than type1_start and less than type1_end,
+ * should use the second base address.
+ */
+static struct regmap *mtk_get_regmap(struct mtk_pinctrl *pctl,
+ unsigned long pin)
+{
+ if (pin >= pctl->devdata->type1_start && pin < pctl->devdata->type1_end)
+ return pctl->regmap2;
+ return pctl->regmap1;
+}
+
+static unsigned int mtk_get_port(struct mtk_pinctrl *pctl, unsigned long pin)
+{
+ /* Different SoC has different mask and port shift. */
+ return ((pin >> 4) & pctl->devdata->port_mask)
+ << pctl->devdata->port_shf;
+}
+
+static int mtk_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned offset,
+ bool input)
+{
+ unsigned int reg_addr;
+ unsigned int bit;
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
+ bit = BIT(offset & 0xf);
+
+ if (input)
+ /* Different SoC has different alignment offset. */
+ reg_addr = CLR_ADDR(reg_addr, pctl);
+ else
+ reg_addr = SET_ADDR(reg_addr, pctl);
+
+ regmap_write(mtk_get_regmap(pctl, offset), reg_addr, bit);
+ return 0;
+}
+
+static void mtk_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ unsigned int reg_addr;
+ unsigned int bit;
+ struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
+
+ reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dout_offset;
+ bit = BIT(offset & 0xf);
+
+ if (value)
+ reg_addr = SET_ADDR(reg_addr, pctl);
+ else
+ reg_addr = CLR_ADDR(reg_addr, pctl);
+
+ regmap_write(mtk_get_regmap(pctl, offset), reg_addr, bit);
+}
+
+static void mtk_pconf_set_ies_smt(struct mtk_pinctrl *pctl, unsigned pin,
+ int value, enum pin_config_param param)
+{
+ unsigned int reg_addr, offset;
+ unsigned int bit;
+ int ret;
+
+ /*
+ * Due to some pins are irregular, their input enable and smt
+ * control register are discontinuous, but they are mapping together.
+ * So we need this special handle.
+ */
+ if (pctl->devdata->spec_ies_smt_set) {
+ ret = pctl->devdata->spec_ies_smt_set(mtk_get_regmap(pctl, pin),
+ pin, pctl->devdata->port_align, value);
+ if (!ret)
+ return;
+ }
+
+ bit = BIT(pin & 0xf);
+
+ if (param == PIN_CONFIG_INPUT_ENABLE)
+ offset = pctl->devdata->ies_offset;
+ else
+ offset = pctl->devdata->smt_offset;
+
+ if (value)
+ reg_addr = SET_ADDR(mtk_get_port(pctl, pin) + offset, pctl);
+ else
+ reg_addr = CLR_ADDR(mtk_get_port(pctl, pin) + offset, pctl);
+
+ regmap_write(mtk_get_regmap(pctl, pin), reg_addr, bit);
+}
+
+static const struct mtk_pin_drv_grp *mtk_find_pin_drv_grp_by_pin(
+ struct mtk_pinctrl *pctl, unsigned long pin) {
+ int i;
+
+ for (i = 0; i < pctl->devdata->n_pin_drv_grps; i++) {
+ const struct mtk_pin_drv_grp *pin_drv =
+ pctl->devdata->pin_drv_grp + i;
+ if (pin == pin_drv->pin)
+ return pin_drv;
+ }
+
+ return NULL;
+}
+
+static int mtk_pconf_set_driving(struct mtk_pinctrl *pctl,
+ unsigned int pin, unsigned char driving)
+{
+ const struct mtk_pin_drv_grp *pin_drv;
+ unsigned int val;
+ unsigned int bits, mask, shift;
+ const struct mtk_drv_group_desc *drv_grp;
+
+ if (pin >= pctl->devdata->npins)
+ return -EINVAL;
+
+ pin_drv = mtk_find_pin_drv_grp_by_pin(pctl, pin);
+ if (!pin_drv || pin_drv->grp > pctl->devdata->n_grp_cls)
+ return -EINVAL;
+
+ drv_grp = pctl->devdata->grp_desc + pin_drv->grp;
+ if (driving >= drv_grp->min_drv && driving <= drv_grp->max_drv
+ && !(driving % drv_grp->step)) {
+ val = driving / drv_grp->step - 1;
+ bits = drv_grp->high_bit - drv_grp->low_bit + 1;
+ mask = BIT(bits) - 1;
+ shift = pin_drv->bit + drv_grp->low_bit;
+ mask <<= shift;
+ val <<= shift;
+ return regmap_update_bits(mtk_get_regmap(pctl, pin),
+ pin_drv->offset, mask, val);
+ }
+
+ return -EINVAL;
+}
+
+static int mtk_pconf_set_pull_select(struct mtk_pinctrl *pctl,
+ unsigned int pin, bool enable, bool isup, unsigned int arg)
+{
+ unsigned int bit;
+ unsigned int reg_pullen, reg_pullsel;
+ int ret;
+
+ /* Some pins' pull setting are very different,
+ * they have separate pull up/down bit, R0 and R1
+ * resistor bit, so we need this special handle.
+ */
+ if (pctl->devdata->spec_pull_set) {
+ ret = pctl->devdata->spec_pull_set(mtk_get_regmap(pctl, pin),
+ pin, pctl->devdata->port_align, isup, arg);
+ if (!ret)
+ return 0;
+ }
+
+ /* For generic pull config, default arg value should be 0 or 1. */
+ if (arg != 0 && arg != 1) {
+ dev_err(pctl->dev, "invalid pull-up argument %d on pin %d .\n",
+ arg, pin);
+ return -EINVAL;
+ }
+
+ bit = BIT(pin & 0xf);
+ if (enable)
+ reg_pullen = SET_ADDR(mtk_get_port(pctl, pin) +
+ pctl->devdata->pullen_offset, pctl);
+ else
+ reg_pullen = CLR_ADDR(mtk_get_port(pctl, pin) +
+ pctl->devdata->pullen_offset, pctl);
+
+ if (isup)
+ reg_pullsel = SET_ADDR(mtk_get_port(pctl, pin) +
+ pctl->devdata->pullsel_offset, pctl);
+ else
+ reg_pullsel = CLR_ADDR(mtk_get_port(pctl, pin) +
+ pctl->devdata->pullsel_offset, pctl);
+
+ regmap_write(mtk_get_regmap(pctl, pin), reg_pullen, bit);
+ regmap_write(mtk_get_regmap(pctl, pin), reg_pullsel, bit);
+ return 0;
+}
+
+static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
+ unsigned int pin, enum pin_config_param param,
+ enum pin_config_param arg)
+{
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mtk_pconf_set_pull_select(pctl, pin, false, false, arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mtk_pconf_set_pull_select(pctl, pin, true, true, arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mtk_pconf_set_pull_select(pctl, pin, true, false, arg);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ mtk_pconf_set_ies_smt(pctl, pin, arg, param);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ mtk_gpio_set(pctl->chip, pin, arg);
+ mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ mtk_pconf_set_ies_smt(pctl, pin, arg, param);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mtk_pconf_set_driving(pctl, pin, arg);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mtk_pconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned group,
+ unsigned long *config)
+{
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *config = pctl->groups[group].config;
+
+ return 0;
+}
+
+static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
+ unsigned long *configs, unsigned num_configs)
+{
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct mtk_pinctrl_group *g = &pctl->groups[group];
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ mtk_pconf_parse_conf(pctldev, g->pin,
+ pinconf_to_config_param(configs[i]),
+ pinconf_to_config_argument(configs[i]));
+
+ g->config = configs[i];
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops mtk_pconf_ops = {
+ .pin_config_group_get = mtk_pconf_group_get,
+ .pin_config_group_set = mtk_pconf_group_set,
+};
+
+static struct mtk_pinctrl_group *
+mtk_pctrl_find_group_by_pin(struct mtk_pinctrl *pctl, u32 pin)
+{
+ int i;
+
+ for (i = 0; i < pctl->ngroups; i++) {
+ struct mtk_pinctrl_group *grp = pctl->groups + i;
+
+ if (grp->pin == pin)
+ return grp;
+ }
+
+ return NULL;
+}
+
+static const struct mtk_desc_function *mtk_pctrl_find_function_by_pin(
+ struct mtk_pinctrl *pctl, u32 pin_num, u32 fnum)
+{
+ const struct mtk_desc_pin *pin = pctl->devdata->pins + pin_num;
+ const struct mtk_desc_function *func = pin->functions;
+
+ while (func && func->name) {
+ if (func->muxval == fnum)
+ return func;
+ func++;
+ }
+
+ return NULL;
+}
+
+static bool mtk_pctrl_is_function_valid(struct mtk_pinctrl *pctl,
+ u32 pin_num, u32 fnum)
+{
+ int i;
+
+ for (i = 0; i < pctl->devdata->npins; i++) {
+ const struct mtk_desc_pin *pin = pctl->devdata->pins + i;
+
+ if (pin->pin.number == pin_num) {
+ const struct mtk_desc_function *func =
+ pin->functions;
+
+ while (func && func->name) {
+ if (func->muxval == fnum)
+ return true;
+ func++;
+ }
+
+ break;
+ }
+ }
+
+ return false;
+}
+
+static int mtk_pctrl_dt_node_to_map_func(struct mtk_pinctrl *pctl,
+ u32 pin, u32 fnum, struct mtk_pinctrl_group *grp,
+ struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps)
+{
+ bool ret;
+
+ if (*num_maps == *reserved_maps)
+ return -ENOSPC;
+
+ (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)[*num_maps].data.mux.group = grp->name;
+
+ ret = mtk_pctrl_is_function_valid(pctl, pin, fnum);
+ if (!ret) {
+ dev_err(pctl->dev, "invalid function %d on pin %d .\n",
+ fnum, pin);
+ return -EINVAL;
+ }
+
+ (*map)[*num_maps].data.mux.function = mtk_gpio_functions[fnum];
+ (*num_maps)++;
+
+ return 0;
+}
+
+static int mtk_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *node,
+ struct pinctrl_map **map,
+ unsigned *reserved_maps,
+ unsigned *num_maps)
+{
+ struct property *pins;
+ u32 pinfunc, pin, func;
+ int num_pins, num_funcs, maps_per_pin;
+ unsigned long *configs;
+ unsigned int num_configs;
+ bool has_config = 0;
+ int i, err;
+ unsigned reserve = 0;
+ struct mtk_pinctrl_group *grp;
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ pins = of_find_property(node, "pinmux", NULL);
+ if (!pins) {
+ dev_err(pctl->dev, "missing pins property in node %s .\n",
+ node->name);
+ return -EINVAL;
+ }
+
+ err = pinconf_generic_parse_dt_config(node, pctldev, &configs,
+ &num_configs);
+ if (num_configs)
+ has_config = 1;
+
+ num_pins = pins->length / sizeof(u32);
+ num_funcs = num_pins;
+ maps_per_pin = 0;
+ if (num_funcs)
+ maps_per_pin++;
+ if (has_config && num_pins >= 1)
+ maps_per_pin++;
+
+ if (!num_pins || !maps_per_pin)
+ return -EINVAL;
+
+ reserve = num_pins * maps_per_pin;
+
+ err = pinctrl_utils_reserve_map(pctldev, map,
+ reserved_maps, num_maps, reserve);
+ if (err < 0)
+ goto fail;
+
+ for (i = 0; i < num_pins; i++) {
+ err = of_property_read_u32_index(node, "pinmux",
+ i, &pinfunc);
+ if (err)
+ goto fail;
+
+ pin = MTK_GET_PIN_NO(pinfunc);
+ func = MTK_GET_PIN_FUNC(pinfunc);
+
+ if (pin >= pctl->devdata->npins ||
+ func >= ARRAY_SIZE(mtk_gpio_functions)) {
+ dev_err(pctl->dev, "invalid pins value.\n");
+ err = -EINVAL;
+ goto fail;
+ }
+
+ grp = mtk_pctrl_find_group_by_pin(pctl, pin);
+ if (!grp) {
+ dev_err(pctl->dev, "unable to match pin %d to group\n",
+ pin);
+ return -EINVAL;
+ }
+
+ err = mtk_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map,
+ reserved_maps, num_maps);
+ if (err < 0)
+ goto fail;
+
+ if (has_config) {
+ err = pinctrl_utils_add_map_configs(pctldev, map,
+ reserved_maps, num_maps, grp->name,
+ configs, num_configs,
+ PIN_MAP_TYPE_CONFIGS_GROUP);
+ if (err < 0)
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ return err;
+}
+
+static int mtk_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ struct device_node *np;
+ unsigned reserved_maps;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ reserved_maps = 0;
+
+ for_each_child_of_node(np_config, np) {
+ ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map,
+ &reserved_maps, num_maps);
+ if (ret < 0) {
+ pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mtk_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->ngroups;
+}
+
+static const char *mtk_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->groups[group].name;
+}
+
+static int mtk_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = (unsigned *)&pctl->groups[group].pin;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops mtk_pctrl_ops = {
+ .dt_node_to_map = mtk_pctrl_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+ .get_groups_count = mtk_pctrl_get_groups_count,
+ .get_group_name = mtk_pctrl_get_group_name,
+ .get_group_pins = mtk_pctrl_get_group_pins,
+};
+
+static int mtk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(mtk_gpio_functions);
+}
+
+static const char *mtk_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return mtk_gpio_functions[selector];
+}
+
+static int mtk_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->grp_names;
+ *num_groups = pctl->ngroups;
+
+ return 0;
+}
+
+static int mtk_pmx_set_mode(struct pinctrl_dev *pctldev,
+ unsigned long pin, unsigned long mode)
+{
+ unsigned int reg_addr;
+ unsigned char bit;
+ unsigned int val;
+ unsigned int mask = (1L << GPIO_MODE_BITS) - 1;
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ reg_addr = ((pin / MAX_GPIO_MODE_PER_REG) << pctl->devdata->port_shf)
+ + pctl->devdata->pinmux_offset;
+
+ bit = pin % MAX_GPIO_MODE_PER_REG;
+ mask <<= (GPIO_MODE_BITS * bit);
+ val = (mode << (GPIO_MODE_BITS * bit));
+ return regmap_update_bits(mtk_get_regmap(pctl, pin),
+ reg_addr, mask, val);
+}
+
+static const struct mtk_desc_pin *
+mtk_find_pin_by_eint_num(struct mtk_pinctrl *pctl, unsigned int eint_num)
+{
+ int i;
+ const struct mtk_desc_pin *pin;
+
+ for (i = 0; i < pctl->devdata->npins; i++) {
+ pin = pctl->devdata->pins + i;
+ if (pin->eint.eintnum == eint_num)
+ return pin;
+ }
+
+ return NULL;
+}
+
+static int mtk_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ bool ret;
+ const struct mtk_desc_function *desc;
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct mtk_pinctrl_group *g = pctl->groups + group;
+
+ ret = mtk_pctrl_is_function_valid(pctl, g->pin, function);
+ if (!ret) {
+ dev_err(pctl->dev, "invaild function %d on group %d .\n",
+ function, group);
+ return -EINVAL;
+ }
+
+ desc = mtk_pctrl_find_function_by_pin(pctl, g->pin, function);
+ if (!desc)
+ return -EINVAL;
+ mtk_pmx_set_mode(pctldev, g->pin, desc->muxval);
+ return 0;
+}
+
+static const struct pinmux_ops mtk_pmx_ops = {
+ .get_functions_count = mtk_pmx_get_funcs_cnt,
+ .get_function_name = mtk_pmx_get_func_name,
+ .get_function_groups = mtk_pmx_get_func_groups,
+ .set_mux = mtk_pmx_set_mux,
+ .gpio_set_direction = mtk_pmx_gpio_set_direction,
+};
+
+static int mtk_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void mtk_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int mtk_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int mtk_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ mtk_gpio_set(chip, offset, value);
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned int reg_addr;
+ unsigned int bit;
+ unsigned int read_val = 0;
+
+ struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
+
+ reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
+ bit = BIT(offset & 0xf);
+ regmap_read(pctl->regmap1, reg_addr, &read_val);
+ return !!(read_val & bit);
+}
+
+static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned int reg_addr;
+ unsigned int bit;
+ unsigned int read_val = 0;
+ struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
+
+ if (mtk_gpio_get_direction(chip, offset))
+ reg_addr = mtk_get_port(pctl, offset) +
+ pctl->devdata->dout_offset;
+ else
+ reg_addr = mtk_get_port(pctl, offset) +
+ pctl->devdata->din_offset;
+
+ bit = BIT(offset & 0xf);
+ regmap_read(pctl->regmap1, reg_addr, &read_val);
+ return !!(read_val & bit);
+}
+
+static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ const struct mtk_desc_pin *pin;
+ struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
+ int irq;
+
+ pin = pctl->devdata->pins + offset;
+ if (pin->eint.eintnum == NO_EINT_SUPPORT)
+ return -EINVAL;
+
+ irq = irq_find_mapping(pctl->domain, pin->eint.eintnum);
+ if (!irq)
+ return -EINVAL;
+
+ return irq;
+}
+
+static int mtk_pinctrl_irq_request_resources(struct irq_data *d)
+{
+ struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ const struct mtk_desc_pin *pin;
+ int ret;
+
+ pin = mtk_find_pin_by_eint_num(pctl, d->hwirq);
+
+ if (!pin) {
+ dev_err(pctl->dev, "Can not find pin\n");
+ return -EINVAL;
+ }
+
+ ret = gpiochip_lock_as_irq(pctl->chip, pin->pin.number);
+ if (ret) {
+ dev_err(pctl->dev, "unable to lock HW IRQ %lu for IRQ\n",
+ irqd_to_hwirq(d));
+ return ret;
+ }
+
+ /* set mux to INT mode */
+ mtk_pmx_set_mode(pctl->pctl_dev, pin->pin.number, pin->eint.eintmux);
+
+ return 0;
+}
+
+static void mtk_pinctrl_irq_release_resources(struct irq_data *d)
+{
+ struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ const struct mtk_desc_pin *pin;
+
+ pin = mtk_find_pin_by_eint_num(pctl, d->hwirq);
+
+ if (!pin) {
+ dev_err(pctl->dev, "Can not find pin\n");
+ return;
+ }
+
+ gpiochip_unlock_as_irq(pctl->chip, pin->pin.number);
+}
+
+static void __iomem *mtk_eint_get_offset(struct mtk_pinctrl *pctl,
+ unsigned int eint_num, unsigned int offset)
+{
+ unsigned int eint_base = 0;
+ void __iomem *reg;
+
+ if (eint_num >= pctl->devdata->ap_num)
+ eint_base = pctl->devdata->ap_num;
+
+ reg = pctl->eint_reg_base + offset + ((eint_num - eint_base) / 32) * 4;
+
+ return reg;
+}
+
+/*
+ * mtk_can_en_debounce: Check the EINT number is able to enable debounce or not
+ * @eint_num: the EINT number to setmtk_pinctrl
+ */
+static unsigned int mtk_eint_can_en_debounce(struct mtk_pinctrl *pctl,
+ unsigned int eint_num)
+{
+ unsigned int sens;
+ unsigned int bit = BIT(eint_num % 32);
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+
+ void __iomem *reg = mtk_eint_get_offset(pctl, eint_num,
+ eint_offsets->sens);
+
+ if (readl(reg) & bit)
+ sens = MT_LEVEL_SENSITIVE;
+ else
+ sens = MT_EDGE_SENSITIVE;
+
+ if ((eint_num < pctl->devdata->db_cnt) && (sens != MT_EDGE_SENSITIVE))
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * mtk_eint_get_mask: To get the eint mask
+ * @eint_num: the EINT number to get
+ */
+static unsigned int mtk_eint_get_mask(struct mtk_pinctrl *pctl,
+ unsigned int eint_num)
+{
+ unsigned int bit = BIT(eint_num % 32);
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+
+ void __iomem *reg = mtk_eint_get_offset(pctl, eint_num,
+ eint_offsets->mask);
+
+ return !!(readl(reg) & bit);
+}
+
+static int mtk_eint_flip_edge(struct mtk_pinctrl *pctl, int hwirq)
+{
+ int start_level, curr_level;
+ unsigned int reg_offset;
+ const struct mtk_eint_offsets *eint_offsets = &(pctl->devdata->eint_offsets);
+ u32 mask = 1 << (hwirq & 0x1f);
+ u32 port = (hwirq >> 5) & eint_offsets->port_mask;
+ void __iomem *reg = pctl->eint_reg_base + (port << 2);
+ const struct mtk_desc_pin *pin;
+
+ pin = mtk_find_pin_by_eint_num(pctl, hwirq);
+ curr_level = mtk_gpio_get(pctl->chip, pin->pin.number);
+ do {
+ start_level = curr_level;
+ if (start_level)
+ reg_offset = eint_offsets->pol_clr;
+ else
+ reg_offset = eint_offsets->pol_set;
+ writel(mask, reg + reg_offset);
+
+ curr_level = mtk_gpio_get(pctl->chip, pin->pin.number);
+ } while (start_level != curr_level);
+
+ return start_level;
+}
+
+static void mtk_eint_mask(struct irq_data *d)
+{
+ struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg = mtk_eint_get_offset(pctl, d->hwirq,
+ eint_offsets->mask_set);
+
+ writel(mask, reg);
+}
+
+static void mtk_eint_unmask(struct irq_data *d)
+{
+ struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg = mtk_eint_get_offset(pctl, d->hwirq,
+ eint_offsets->mask_clr);
+
+ writel(mask, reg);
+
+ if (pctl->eint_dual_edges[d->hwirq])
+ mtk_eint_flip_edge(pctl, d->hwirq);
+}
+
+static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ unsigned debounce)
+{
+ struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
+ int eint_num, virq, eint_offset;
+ unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
+ static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
+ const struct mtk_desc_pin *pin;
+ struct irq_data *d;
+
+ pin = pctl->devdata->pins + offset;
+ if (pin->eint.eintnum == NO_EINT_SUPPORT)
+ return -EINVAL;
+
+ eint_num = pin->eint.eintnum;
+ virq = irq_find_mapping(pctl->domain, eint_num);
+ eint_offset = (eint_num % 4) * 8;
+ d = irq_get_irq_data(virq);
+
+ set_offset = (eint_num / 4) * 4 + pctl->devdata->eint_offsets.dbnc_set;
+ clr_offset = (eint_num / 4) * 4 + pctl->devdata->eint_offsets.dbnc_clr;
+ if (!mtk_eint_can_en_debounce(pctl, eint_num))
+ return -ENOSYS;
+
+ dbnc = ARRAY_SIZE(dbnc_arr);
+ for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
+ if (debounce <= dbnc_arr[i]) {
+ dbnc = i;
+ break;
+ }
+ }
+
+ if (!mtk_eint_get_mask(pctl, eint_num)) {
+ mtk_eint_mask(d);
+ unmask = 1;
+ } else {
+ unmask = 0;
+ }
+
+ clr_bit = 0xff << eint_offset;
+ writel(clr_bit, pctl->eint_reg_base + clr_offset);
+
+ bit = ((dbnc << EINT_DBNC_SET_DBNC_BITS) | EINT_DBNC_SET_EN) <<
+ eint_offset;
+ rst = EINT_DBNC_RST_BIT << eint_offset;
+ writel(rst | bit, pctl->eint_reg_base + set_offset);
+
+ /* Delay a while (more than 2T) to wait for hw debounce counter reset
+ work correctly */
+ udelay(1);
+ if (unmask == 1)
+ mtk_eint_unmask(d);
+
+ return 0;
+}
+
+static struct gpio_chip mtk_gpio_chip = {
+ .owner = THIS_MODULE,
+ .request = mtk_gpio_request,
+ .free = mtk_gpio_free,
+ .direction_input = mtk_gpio_direction_input,
+ .direction_output = mtk_gpio_direction_output,
+ .get = mtk_gpio_get,
+ .set = mtk_gpio_set,
+ .to_irq = mtk_gpio_to_irq,
+ .set_debounce = mtk_gpio_set_debounce,
+ .of_gpio_n_cells = 2,
+};
+
+static int mtk_eint_set_type(struct irq_data *d,
+ unsigned int type)
+{
+ struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg;
+
+ if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
+ ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
+ dev_err(pctl->dev, "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
+ d->irq, d->hwirq, type);
+ return -EINVAL;
+ }
+
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ pctl->eint_dual_edges[d->hwirq] = 1;
+ else
+ pctl->eint_dual_edges[d->hwirq] = 0;
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
+ reg = mtk_eint_get_offset(pctl, d->hwirq,
+ eint_offsets->pol_clr);
+ writel(mask, reg);
+ } else {
+ reg = mtk_eint_get_offset(pctl, d->hwirq,
+ eint_offsets->pol_set);
+ writel(mask, reg);
+ }
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
+ reg = mtk_eint_get_offset(pctl, d->hwirq,
+ eint_offsets->sens_clr);
+ writel(mask, reg);
+ } else {
+ reg = mtk_eint_get_offset(pctl, d->hwirq,
+ eint_offsets->sens_set);
+ writel(mask, reg);
+ }
+
+ if (pctl->eint_dual_edges[d->hwirq])
+ mtk_eint_flip_edge(pctl, d->hwirq);
+
+ return 0;
+}
+
+static void mtk_eint_ack(struct irq_data *d)
+{
+ struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg = mtk_eint_get_offset(pctl, d->hwirq,
+ eint_offsets->ack);
+
+ writel(mask, reg);
+}
+
+static struct irq_chip mtk_pinctrl_irq_chip = {
+ .name = "mt-eint",
+ .irq_mask = mtk_eint_mask,
+ .irq_unmask = mtk_eint_unmask,
+ .irq_ack = mtk_eint_ack,
+ .irq_set_type = mtk_eint_set_type,
+ .irq_request_resources = mtk_pinctrl_irq_request_resources,
+ .irq_release_resources = mtk_pinctrl_irq_release_resources,
+};
+
+static unsigned int mtk_eint_init(struct mtk_pinctrl *pctl)
+{
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+ void __iomem *reg = pctl->eint_reg_base + eint_offsets->dom_en;
+ unsigned int i;
+
+ for (i = 0; i < pctl->devdata->ap_num; i += 32) {
+ writel(0xffffffff, reg);
+ reg += 4;
+ }
+ return 0;
+}
+
+static inline void
+mtk_eint_debounce_process(struct mtk_pinctrl *pctl, int index)
+{
+ unsigned int rst, ctrl_offset;
+ unsigned int bit, dbnc;
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+
+ ctrl_offset = (index / 4) * 4 + eint_offsets->dbnc_ctrl;
+ dbnc = readl(pctl->eint_reg_base + ctrl_offset);
+ bit = EINT_DBNC_SET_EN << ((index % 4) * 8);
+ if ((bit & dbnc) > 0) {
+ ctrl_offset = (index / 4) * 4 + eint_offsets->dbnc_set;
+ rst = EINT_DBNC_RST_BIT << ((index % 4) * 8);
+ writel(rst, pctl->eint_reg_base + ctrl_offset);
+ }
+}
+
+static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_get_chip(irq);
+ struct mtk_pinctrl *pctl = irq_get_handler_data(irq);
+ unsigned int status, eint_num;
+ int offset, index, virq;
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+ void __iomem *reg = mtk_eint_get_offset(pctl, 0, eint_offsets->stat);
+ int dual_edges, start_level, curr_level;
+ const struct mtk_desc_pin *pin;
+
+ chained_irq_enter(chip, desc);
+ for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
+ status = readl(reg);
+ reg += 4;
+ while (status) {
+ offset = __ffs(status);
+ index = eint_num + offset;
+ virq = irq_find_mapping(pctl->domain, index);
+ status &= ~BIT(offset);
+
+ dual_edges = pctl->eint_dual_edges[index];
+ if (dual_edges) {
+ /* Clear soft-irq in case we raised it
+ last time */
+ writel(BIT(offset), reg - eint_offsets->stat +
+ eint_offsets->soft_clr);
+
+ pin = mtk_find_pin_by_eint_num(pctl, index);
+ start_level = mtk_gpio_get(pctl->chip,
+ pin->pin.number);
+ }
+
+ generic_handle_irq(virq);
+
+ if (dual_edges) {
+ curr_level = mtk_eint_flip_edge(pctl, index);
+
+ /* If level changed, we might lost one edge
+ interrupt, raised it through soft-irq */
+ if (start_level != curr_level)
+ writel(BIT(offset), reg -
+ eint_offsets->stat +
+ eint_offsets->soft_set);
+ }
+
+ if (index < pctl->devdata->db_cnt)
+ mtk_eint_debounce_process(pctl , index);
+ }
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static int mtk_pctrl_build_state(struct platform_device *pdev)
+{
+ struct mtk_pinctrl *pctl = platform_get_drvdata(pdev);
+ int i;
+
+ pctl->ngroups = pctl->devdata->npins;
+
+ /* Allocate groups */
+ pctl->groups = devm_kcalloc(&pdev->dev, pctl->ngroups,
+ sizeof(*pctl->groups), GFP_KERNEL);
+ if (!pctl->groups)
+ return -ENOMEM;
+
+ /* We assume that one pin is one group, use pin name as group name. */
+ pctl->grp_names = devm_kcalloc(&pdev->dev, pctl->ngroups,
+ sizeof(*pctl->grp_names), GFP_KERNEL);
+ if (!pctl->grp_names)
+ return -ENOMEM;
+
+ for (i = 0; i < pctl->devdata->npins; i++) {
+ const struct mtk_desc_pin *pin = pctl->devdata->pins + i;
+ struct mtk_pinctrl_group *group = pctl->groups + i;
+
+ group->name = pin->pin.name;
+ group->pin = pin->pin.number;
+
+ pctl->grp_names[i] = pin->pin.name;
+ }
+
+ return 0;
+}
+
+static struct pinctrl_desc mtk_pctrl_desc = {
+ .confops = &mtk_pconf_ops,
+ .pctlops = &mtk_pctrl_ops,
+ .pmxops = &mtk_pmx_ops,
+};
+
+int mtk_pctrl_init(struct platform_device *pdev,
+ const struct mtk_pinctrl_devdata *data)
+{
+ struct pinctrl_pin_desc *pins;
+ struct mtk_pinctrl *pctl;
+ struct device_node *np = pdev->dev.of_node, *node;
+ struct property *prop;
+ struct resource *res;
+ int i, ret, irq;
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pctl);
+
+ prop = of_find_property(np, "pins-are-numbered", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "only support pins-are-numbered format\n");
+ return -EINVAL;
+ }
+
+ node = of_parse_phandle(np, "mediatek,pctl-regmap", 0);
+ if (node) {
+ pctl->regmap1 = syscon_node_to_regmap(node);
+ if (IS_ERR(pctl->regmap1))
+ return PTR_ERR(pctl->regmap1);
+ }
+
+ /* Only 8135 has two base addr, other SoCs have only one. */
+ node = of_parse_phandle(np, "mediatek,pctl-regmap", 1);
+ if (node) {
+ pctl->regmap2 = syscon_node_to_regmap(node);
+ if (IS_ERR(pctl->regmap2))
+ return PTR_ERR(pctl->regmap2);
+ }
+
+ pctl->devdata = data;
+ ret = mtk_pctrl_build_state(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "build state failed: %d\n", ret);
+ return -EINVAL;
+ }
+
+ pins = devm_kcalloc(&pdev->dev, pctl->devdata->npins, sizeof(*pins),
+ GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < pctl->devdata->npins; i++)
+ pins[i] = pctl->devdata->pins[i].pin;
+ mtk_pctrl_desc.name = dev_name(&pdev->dev);
+ mtk_pctrl_desc.owner = THIS_MODULE;
+ mtk_pctrl_desc.pins = pins;
+ mtk_pctrl_desc.npins = pctl->devdata->npins;
+ pctl->dev = &pdev->dev;
+ pctl->pctl_dev = pinctrl_register(&mtk_pctrl_desc, &pdev->dev, pctl);
+ if (!pctl->pctl_dev) {
+ dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
+ return -EINVAL;
+ }
+
+ pctl->chip = devm_kzalloc(&pdev->dev, sizeof(*pctl->chip), GFP_KERNEL);
+ if (!pctl->chip) {
+ ret = -ENOMEM;
+ goto pctrl_error;
+ }
+
+ pctl->chip = &mtk_gpio_chip;
+ pctl->chip->ngpio = pctl->devdata->npins;
+ pctl->chip->label = dev_name(&pdev->dev);
+ pctl->chip->dev = &pdev->dev;
+ pctl->chip->base = 0;
+
+ ret = gpiochip_add(pctl->chip);
+ if (ret) {
+ ret = -EINVAL;
+ goto pctrl_error;
+ }
+
+ /* Register the GPIO to pin mappings. */
+ ret = gpiochip_add_pin_range(pctl->chip, dev_name(&pdev->dev),
+ 0, 0, pctl->devdata->npins);
+ if (ret) {
+ ret = -EINVAL;
+ goto chip_error;
+ }
+
+ /* Get EINT register base from dts. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get Pinctrl resource\n");
+ ret = -EINVAL;
+ goto chip_error;
+ }
+
+ pctl->eint_reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctl->eint_reg_base)) {
+ ret = -EINVAL;
+ goto chip_error;
+ }
+
+ pctl->eint_dual_edges = devm_kcalloc(&pdev->dev, pctl->devdata->ap_num,
+ sizeof(int), GFP_KERNEL);
+ if (!pctl->eint_dual_edges) {
+ ret = -ENOMEM;
+ goto chip_error;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "couldn't parse and map irq\n");
+ ret = -EINVAL;
+ goto chip_error;
+ }
+
+ pctl->domain = irq_domain_add_linear(np,
+ pctl->devdata->ap_num, &irq_domain_simple_ops, NULL);
+ if (!pctl->domain) {
+ dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
+ ret = -ENOMEM;
+ goto chip_error;
+ }
+
+ mtk_eint_init(pctl);
+ for (i = 0; i < pctl->devdata->ap_num; i++) {
+ int virq = irq_create_mapping(pctl->domain, i);
+
+ irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, pctl);
+ set_irq_flags(virq, IRQF_VALID);
+ };
+
+ irq_set_chained_handler(irq, mtk_eint_irq_handler);
+ irq_set_handler_data(irq, pctl);
+ set_irq_flags(irq, IRQF_VALID);
+ return 0;
+
+chip_error:
+ gpiochip_remove(pctl->chip);
+pctrl_error:
+ pinctrl_unregister(pctl->pctl_dev);
+ return ret;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MediaTek Pinctrl Driver");
+MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
new file mode 100644
index 000000000000..375771db9bd0
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_MTK_COMMON_H
+#define __PINCTRL_MTK_COMMON_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+
+#define NO_EINT_SUPPORT 255
+#define MTK_CHIP_TYPE_BASE 0
+#define MTK_CHIP_TYPE_PMIC 1
+#define MT_EDGE_SENSITIVE 0
+#define MT_LEVEL_SENSITIVE 1
+#define EINT_DBNC_SET_DBNC_BITS 4
+#define EINT_DBNC_RST_BIT (0x1 << 1)
+#define EINT_DBNC_SET_EN (0x1 << 0)
+
+struct mtk_desc_function {
+ const char *name;
+ unsigned char muxval;
+};
+
+struct mtk_desc_eint {
+ unsigned char eintmux;
+ unsigned char eintnum;
+};
+
+struct mtk_desc_pin {
+ struct pinctrl_pin_desc pin;
+ const char *chip;
+ const struct mtk_desc_eint eint;
+ const struct mtk_desc_function *functions;
+};
+
+#define MTK_PIN(_pin, _pad, _chip, _eint, ...) \
+ { \
+ .pin = _pin, \
+ .chip = _chip, \
+ .eint = _eint, \
+ .functions = (struct mtk_desc_function[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+#define MTK_EINT_FUNCTION(_eintmux, _eintnum) \
+ { \
+ .eintmux = _eintmux, \
+ .eintnum = _eintnum, \
+ }
+
+#define MTK_FUNCTION(_val, _name) \
+ { \
+ .muxval = _val, \
+ .name = _name, \
+ }
+
+#define SET_ADDR(x, y) (x + (y->devdata->port_align))
+#define CLR_ADDR(x, y) (x + (y->devdata->port_align << 1))
+
+struct mtk_pinctrl_group {
+ const char *name;
+ unsigned long config;
+ unsigned pin;
+};
+
+/**
+ * struct mtk_drv_group_desc - Provide driving group data.
+ * @max_drv: The maximum current of this group.
+ * @min_drv: The minimum current of this group.
+ * @low_bit: The lowest bit of this group.
+ * @high_bit: The highest bit of this group.
+ * @step: The step current of this group.
+ */
+struct mtk_drv_group_desc {
+ unsigned char min_drv;
+ unsigned char max_drv;
+ unsigned char low_bit;
+ unsigned char high_bit;
+ unsigned char step;
+};
+
+#define MTK_DRV_GRP(_min, _max, _low, _high, _step) \
+ { \
+ .min_drv = _min, \
+ .max_drv = _max, \
+ .low_bit = _low, \
+ .high_bit = _high, \
+ .step = _step, \
+ }
+
+/**
+ * struct mtk_pin_drv_grp - Provide each pin driving info.
+ * @pin: The pin number.
+ * @offset: The offset of driving register for this pin.
+ * @bit: The bit of driving register for this pin.
+ * @grp: The group for this pin belongs to.
+ */
+struct mtk_pin_drv_grp {
+ unsigned int pin;
+ unsigned int offset;
+ unsigned char bit;
+ unsigned char grp;
+};
+
+#define MTK_PIN_DRV_GRP(_pin, _offset, _bit, _grp) \
+ { \
+ .pin = _pin, \
+ .offset = _offset, \
+ .bit = _bit, \
+ .grp = _grp, \
+ }
+
+struct mtk_eint_offsets {
+ const char *name;
+ unsigned int stat;
+ unsigned int ack;
+ unsigned int mask;
+ unsigned int mask_set;
+ unsigned int mask_clr;
+ unsigned int sens;
+ unsigned int sens_set;
+ unsigned int sens_clr;
+ unsigned int soft;
+ unsigned int soft_set;
+ unsigned int soft_clr;
+ unsigned int pol;
+ unsigned int pol_set;
+ unsigned int pol_clr;
+ unsigned int dom_en;
+ unsigned int dbnc_ctrl;
+ unsigned int dbnc_set;
+ unsigned int dbnc_clr;
+ u8 port_mask;
+ u8 ports;
+};
+
+/**
+ * struct mtk_pinctrl_devdata - Provide HW GPIO related data.
+ * @pins: An array describing all pins the pin controller affects.
+ * @npins: The number of entries in @pins.
+ *
+ * @grp_desc: The driving group info.
+ * @pin_drv_grp: The driving group for all pins.
+ * @spec_pull_set: Each SoC may have special pins for pull up/down setting,
+ * these pins' pull setting are very different, they have separate pull
+ * up/down bit, R0 and R1 resistor bit, so they need special pull setting.
+ * If special setting is success, this should return 0, otherwise it should
+ * return non-zero value.
+ * @spec_ies_smt_set: Some pins are irregular, their input enable and smt
+ * control register are discontinuous, but they are mapping together. That
+ * means when user set smt, input enable is set at the same time. So they
+ * also need special control. If special control is success, this should
+ * return 0, otherwise return non-zero value.
+ *
+ * @dir_offset: The direction register offset.
+ * @pullen_offset: The pull-up/pull-down enable register offset.
+ * @pinmux_offset: The pinmux register offset.
+ *
+ * @type1_start: Some chips have two base addresses for pull select register,
+ * that means some pins use the first address and others use the second. This
+ * member record the start of pin number to use the second address.
+ * @type1_end: The end of pin number to use the second address.
+ *
+ * @port_shf: The shift between two registers.
+ * @port_mask: The mask of register.
+ * @port_align: Provide clear register and set register step.
+ */
+struct mtk_pinctrl_devdata {
+ const struct mtk_desc_pin *pins;
+ unsigned int npins;
+ const struct mtk_drv_group_desc *grp_desc;
+ unsigned int n_grp_cls;
+ const struct mtk_pin_drv_grp *pin_drv_grp;
+ unsigned int n_pin_drv_grps;
+ int (*spec_pull_set)(struct regmap *reg, unsigned int pin,
+ unsigned char align, bool isup, unsigned int arg);
+ int (*spec_ies_smt_set)(struct regmap *reg, unsigned int pin,
+ unsigned char align, int value);
+ unsigned int dir_offset;
+ unsigned int ies_offset;
+ unsigned int smt_offset;
+ unsigned int pullen_offset;
+ unsigned int pullsel_offset;
+ unsigned int drv_offset;
+ unsigned int invser_offset;
+ unsigned int dout_offset;
+ unsigned int din_offset;
+ unsigned int pinmux_offset;
+ unsigned short type1_start;
+ unsigned short type1_end;
+ unsigned char port_shf;
+ unsigned char port_mask;
+ unsigned char port_align;
+ unsigned char chip_type;
+ struct mtk_eint_offsets eint_offsets;
+ unsigned int ap_num;
+ unsigned int db_cnt;
+};
+
+struct mtk_pinctrl {
+ struct regmap *regmap1;
+ struct regmap *regmap2;
+ struct device *dev;
+ struct gpio_chip *chip;
+ struct mtk_pinctrl_group *groups;
+ unsigned ngroups;
+ const char **grp_names;
+ struct pinctrl_dev *pctl_dev;
+ const struct mtk_pinctrl_devdata *devdata;
+ void __iomem *eint_reg_base;
+ struct irq_domain *domain;
+ int *eint_dual_edges;
+};
+
+int mtk_pctrl_init(struct platform_device *pdev,
+ const struct mtk_pinctrl_devdata *data);
+
+#endif /* __PINCTRL_MTK_COMMON_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8135.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8135.h
new file mode 100644
index 000000000000..e17aedb73c8d
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8135.h
@@ -0,0 +1,2114 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_MTK_MT8135_H
+#define __PINCTRL_MTK_MT8135_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt8135[] = {
+ MTK_PIN(
+ PINCTRL_PIN(0, "MSDC0_DAT7"),
+ "D21", "mt8135",
+ MTK_EINT_FUNCTION(2, 49),
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "MSDC0_DAT7"),
+ MTK_FUNCTION(2, "EINT49"),
+ MTK_FUNCTION(3, "I2SOUT_DAT"),
+ MTK_FUNCTION(4, "DAC_DAT_OUT"),
+ MTK_FUNCTION(5, "PCM1_DO"),
+ MTK_FUNCTION(6, "SPI1_MO"),
+ MTK_FUNCTION(7, "NALE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(1, "MSDC0_DAT6"),
+ "D22", "mt8135",
+ MTK_EINT_FUNCTION(2, 48),
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "MSDC0_DAT6"),
+ MTK_FUNCTION(2, "EINT48"),
+ MTK_FUNCTION(3, "I2SIN_WS"),
+ MTK_FUNCTION(4, "DAC_WS"),
+ MTK_FUNCTION(5, "PCM1_WS"),
+ MTK_FUNCTION(6, "SPI1_CSN"),
+ MTK_FUNCTION(7, "NCLE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(2, "MSDC0_DAT5"),
+ "E22", "mt8135",
+ MTK_EINT_FUNCTION(2, 47),
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "MSDC0_DAT5"),
+ MTK_FUNCTION(2, "EINT47"),
+ MTK_FUNCTION(3, "I2SIN_CK"),
+ MTK_FUNCTION(4, "DAC_CK"),
+ MTK_FUNCTION(5, "PCM1_CK"),
+ MTK_FUNCTION(6, "SPI1_CLK"),
+ MTK_FUNCTION(7, "NLD4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(3, "MSDC0_DAT4"),
+ "F21", "mt8135",
+ MTK_EINT_FUNCTION(2, 46),
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "MSDC0_DAT4"),
+ MTK_FUNCTION(2, "EINT46"),
+ MTK_FUNCTION(3, "A_FUNC_CK"),
+ MTK_FUNCTION(6, "LSCE1B_2X"),
+ MTK_FUNCTION(7, "NLD5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(4, "MSDC0_CMD"),
+ "F20", "mt8135",
+ MTK_EINT_FUNCTION(2, 41),
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "MSDC0_CMD"),
+ MTK_FUNCTION(2, "EINT41"),
+ MTK_FUNCTION(3, "A_FUNC_DOUT[0]"),
+ MTK_FUNCTION(5, "USB_TEST_IO[0]"),
+ MTK_FUNCTION(6, "LRSTB_2X"),
+ MTK_FUNCTION(7, "NRNB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(5, "MSDC0_CLK"),
+ "G18", "mt8135",
+ MTK_EINT_FUNCTION(2, 40),
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(2, "EINT40"),
+ MTK_FUNCTION(3, "A_FUNC_DOUT[1]"),
+ MTK_FUNCTION(5, "USB_TEST_IO[1]"),
+ MTK_FUNCTION(6, "LPTE"),
+ MTK_FUNCTION(7, "NREB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(6, "MSDC0_DAT3"),
+ "G21", "mt8135",
+ MTK_EINT_FUNCTION(2, 45),
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "MSDC0_DAT3"),
+ MTK_FUNCTION(2, "EINT45"),
+ MTK_FUNCTION(3, "A_FUNC_DOUT[2]"),
+ MTK_FUNCTION(5, "USB_TEST_IO[2]"),
+ MTK_FUNCTION(6, "LSCE0B_2X"),
+ MTK_FUNCTION(7, "NLD7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(7, "MSDC0_DAT2"),
+ "E21", "mt8135",
+ MTK_EINT_FUNCTION(2, 44),
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "MSDC0_DAT2"),
+ MTK_FUNCTION(2, "EINT44"),
+ MTK_FUNCTION(3, "A_FUNC_DOUT[3]"),
+ MTK_FUNCTION(5, "USB_TEST_IO[3]"),
+ MTK_FUNCTION(6, "LSA0_2X"),
+ MTK_FUNCTION(7, "NLD14")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(8, "MSDC0_DAT1"),
+ "E23", "mt8135",
+ MTK_EINT_FUNCTION(2, 43),
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "MSDC0_DAT1"),
+ MTK_FUNCTION(2, "EINT43"),
+ MTK_FUNCTION(5, "USB_TEST_IO[4]"),
+ MTK_FUNCTION(6, "LSCK_2X"),
+ MTK_FUNCTION(7, "NLD11")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(9, "MSDC0_DAT0"),
+ "F22", "mt8135",
+ MTK_EINT_FUNCTION(2, 42),
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "MSDC0_DAT0"),
+ MTK_FUNCTION(2, "EINT42"),
+ MTK_FUNCTION(5, "USB_TEST_IO[5]"),
+ MTK_FUNCTION(6, "LSDA_2X")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(10, "NCEB0"),
+ "G20", "mt8135",
+ MTK_EINT_FUNCTION(2, 139),
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "NCEB0"),
+ MTK_FUNCTION(2, "EINT139"),
+ MTK_FUNCTION(7, "TESTA_OUT4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(11, "NCEB1"),
+ "L17", "mt8135",
+ MTK_EINT_FUNCTION(2, 140),
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "NCEB1"),
+ MTK_FUNCTION(2, "EINT140"),
+ MTK_FUNCTION(6, "USB_DRVVBUS"),
+ MTK_FUNCTION(7, "TESTA_OUT5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(12, "NRNB"),
+ "G19", "mt8135",
+ MTK_EINT_FUNCTION(2, 141),
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "NRNB"),
+ MTK_FUNCTION(2, "EINT141"),
+ MTK_FUNCTION(3, "A_FUNC_DOUT[4]"),
+ MTK_FUNCTION(7, "TESTA_OUT6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(13, "NCLE"),
+ "J18", "mt8135",
+ MTK_EINT_FUNCTION(2, 142),
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "NCLE"),
+ MTK_FUNCTION(2, "EINT142"),
+ MTK_FUNCTION(3, "A_FUNC_DOUT[5]"),
+ MTK_FUNCTION(4, "CM2PDN_1X"),
+ MTK_FUNCTION(6, "NALE"),
+ MTK_FUNCTION(7, "TESTA_OUT7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(14, "NALE"),
+ "J19", "mt8135",
+ MTK_EINT_FUNCTION(2, 143),
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "NALE"),
+ MTK_FUNCTION(2, "EINT143"),
+ MTK_FUNCTION(3, "A_FUNC_DOUT[6]"),
+ MTK_FUNCTION(4, "CM2MCLK_1X"),
+ MTK_FUNCTION(5, "IRDA_RXD"),
+ MTK_FUNCTION(6, "NCLE"),
+ MTK_FUNCTION(7, "TESTA_OUT8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(15, "NREB"),
+ "L18", "mt8135",
+ MTK_EINT_FUNCTION(2, 144),
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "NREB"),
+ MTK_FUNCTION(2, "EINT144"),
+ MTK_FUNCTION(3, "A_FUNC_DOUT[7]"),
+ MTK_FUNCTION(4, "CM2RST_1X"),
+ MTK_FUNCTION(5, "IRDA_TXD"),
+ MTK_FUNCTION(7, "TESTA_OUT9")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(16, "NWEB"),
+ "J20", "mt8135",
+ MTK_EINT_FUNCTION(2, 145),
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "NWEB"),
+ MTK_FUNCTION(2, "EINT145"),
+ MTK_FUNCTION(3, "A_FUNC_DIN[0]"),
+ MTK_FUNCTION(4, "CM2PCLK_1X"),
+ MTK_FUNCTION(5, "IRDA_PDN"),
+ MTK_FUNCTION(7, "TESTA_OUT10")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(17, "NLD0"),
+ "K21", "mt8135",
+ MTK_EINT_FUNCTION(2, 146),
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "NLD0"),
+ MTK_FUNCTION(2, "EINT146"),
+ MTK_FUNCTION(3, "A_FUNC_DIN[1]"),
+ MTK_FUNCTION(4, "CM2DAT_1X[0]"),
+ MTK_FUNCTION(5, "I2SIN_CK"),
+ MTK_FUNCTION(6, "DAC_CK"),
+ MTK_FUNCTION(7, "TESTA_OUT11")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(18, "NLD1"),
+ "K22", "mt8135",
+ MTK_EINT_FUNCTION(2, 147),
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "NLD1"),
+ MTK_FUNCTION(2, "EINT147"),
+ MTK_FUNCTION(3, "A_FUNC_DIN[2]"),
+ MTK_FUNCTION(4, "CM2DAT_1X[1]"),
+ MTK_FUNCTION(5, "I2SIN_WS"),
+ MTK_FUNCTION(6, "DAC_WS"),
+ MTK_FUNCTION(7, "TESTA_OUT12")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(19, "NLD2"),
+ "J21", "mt8135",
+ MTK_EINT_FUNCTION(2, 148),
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "NLD2"),
+ MTK_FUNCTION(2, "EINT148"),
+ MTK_FUNCTION(3, "A_FUNC_DIN[3]"),
+ MTK_FUNCTION(4, "CM2DAT_1X[2]"),
+ MTK_FUNCTION(5, "I2SOUT_DAT"),
+ MTK_FUNCTION(6, "DAC_DAT_OUT"),
+ MTK_FUNCTION(7, "TESTA_OUT13")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(20, "NLD3"),
+ "J23", "mt8135",
+ MTK_EINT_FUNCTION(2, 149),
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "NLD3"),
+ MTK_FUNCTION(2, "EINT149"),
+ MTK_FUNCTION(3, "A_FUNC_DIN[4]"),
+ MTK_FUNCTION(4, "CM2DAT_1X[3]"),
+ MTK_FUNCTION(7, "TESTA_OUT14")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(21, "NLD4"),
+ "J22", "mt8135",
+ MTK_EINT_FUNCTION(2, 150),
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "NLD4"),
+ MTK_FUNCTION(2, "EINT150"),
+ MTK_FUNCTION(3, "A_FUNC_DIN[5]"),
+ MTK_FUNCTION(4, "CM2DAT_1X[4]"),
+ MTK_FUNCTION(7, "TESTA_OUT15")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(22, "NLD5"),
+ "H21", "mt8135",
+ MTK_EINT_FUNCTION(2, 151),
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "NLD5"),
+ MTK_FUNCTION(2, "EINT151"),
+ MTK_FUNCTION(3, "A_FUNC_DIN[6]"),
+ MTK_FUNCTION(4, "CM2DAT_1X[5]"),
+ MTK_FUNCTION(7, "TESTA_OUT16")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(23, "NLD6"),
+ "H22", "mt8135",
+ MTK_EINT_FUNCTION(2, 152),
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "NLD6"),
+ MTK_FUNCTION(2, "EINT152"),
+ MTK_FUNCTION(3, "A_FUNC_DIN[7]"),
+ MTK_FUNCTION(4, "CM2DAT_1X[6]"),
+ MTK_FUNCTION(7, "TESTA_OUT17")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(24, "NLD7"),
+ "H20", "mt8135",
+ MTK_EINT_FUNCTION(2, 153),
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "NLD7"),
+ MTK_FUNCTION(2, "EINT153"),
+ MTK_FUNCTION(3, "A_FUNC_DIN[8]"),
+ MTK_FUNCTION(4, "CM2DAT_1X[7]"),
+ MTK_FUNCTION(7, "TESTA_OUT18")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(25, "NLD8"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 154),
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "NLD8"),
+ MTK_FUNCTION(2, "EINT154"),
+ MTK_FUNCTION(4, "CM2DAT_1X[8]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(26, "NLD9"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 155),
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "NLD9"),
+ MTK_FUNCTION(2, "EINT155"),
+ MTK_FUNCTION(4, "CM2DAT_1X[9]"),
+ MTK_FUNCTION(5, "PWM1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(27, "NLD10"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 156),
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "NLD10"),
+ MTK_FUNCTION(2, "EINT156"),
+ MTK_FUNCTION(4, "CM2VSYNC_1X"),
+ MTK_FUNCTION(5, "PWM2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(28, "NLD11"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 157),
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "NLD11"),
+ MTK_FUNCTION(2, "EINT157"),
+ MTK_FUNCTION(4, "CM2HSYNC_1X"),
+ MTK_FUNCTION(5, "PWM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(29, "NLD12"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 158),
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "NLD12"),
+ MTK_FUNCTION(2, "EINT158"),
+ MTK_FUNCTION(3, "I2SIN_CK"),
+ MTK_FUNCTION(4, "DAC_CK"),
+ MTK_FUNCTION(5, "PCM1_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(30, "NLD13"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 159),
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "NLD13"),
+ MTK_FUNCTION(2, "EINT159"),
+ MTK_FUNCTION(3, "I2SIN_WS"),
+ MTK_FUNCTION(4, "DAC_WS"),
+ MTK_FUNCTION(5, "PCM1_WS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(31, "NLD14"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 160),
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "NLD14"),
+ MTK_FUNCTION(2, "EINT160"),
+ MTK_FUNCTION(3, "I2SOUT_DAT"),
+ MTK_FUNCTION(4, "DAC_DAT_OUT"),
+ MTK_FUNCTION(5, "PCM1_DO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(32, "NLD15"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 161),
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "NLD15"),
+ MTK_FUNCTION(2, "EINT161"),
+ MTK_FUNCTION(3, "DISP_PWM"),
+ MTK_FUNCTION(4, "PWM4"),
+ MTK_FUNCTION(5, "PCM1_DI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(33, "MSDC0_RSTB"),
+ "G22", "mt8135",
+ MTK_EINT_FUNCTION(2, 50),
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "MSDC0_RSTB"),
+ MTK_FUNCTION(2, "EINT50"),
+ MTK_FUNCTION(3, "I2SIN_DAT"),
+ MTK_FUNCTION(5, "PCM1_DI"),
+ MTK_FUNCTION(6, "SPI1_MI"),
+ MTK_FUNCTION(7, "NLD10")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(34, "IDDIG"),
+ "N17", "mt8135",
+ MTK_EINT_FUNCTION(2, 34),
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "EINT34")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(35, "SCL3"),
+ "L19", "mt8135",
+ MTK_EINT_FUNCTION(2, 96),
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(2, "EINT96"),
+ MTK_FUNCTION(3, "CLKM6"),
+ MTK_FUNCTION(4, "PWM6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(36, "SDA3"),
+ "L20", "mt8135",
+ MTK_EINT_FUNCTION(2, 97),
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(2, "EINT97")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(37, "AUD_CLK_MOSI"),
+ "L21", "mt8135",
+ MTK_EINT_FUNCTION(4, 19),
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "AUD_CLK"),
+ MTK_FUNCTION(2, "ADC_CK"),
+ MTK_FUNCTION(3, " HDMI_SDATA0"),
+ MTK_FUNCTION(4, "EINT19"),
+ MTK_FUNCTION(5, "USB_TEST_IO[6]"),
+ MTK_FUNCTION(7, "TESTA_OUT19")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(38, "AUD_DAT_MOSI"),
+ "L23", "mt8135",
+ MTK_EINT_FUNCTION(4, 21),
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI"),
+ MTK_FUNCTION(2, "ADC_WS"),
+ MTK_FUNCTION(3, "AUD_DAT_MISO"),
+ MTK_FUNCTION(4, "EINT21"),
+ MTK_FUNCTION(5, "USB_TEST_IO[7]"),
+ MTK_FUNCTION(7, "TESTA_OUT20")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(39, "AUD_DAT_MISO"),
+ "L22", "mt8135",
+ MTK_EINT_FUNCTION(4, 20),
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO"),
+ MTK_FUNCTION(2, "ADC_DAT_IN"),
+ MTK_FUNCTION(3, "AUD_DAT_MOSI"),
+ MTK_FUNCTION(4, "EINT20"),
+ MTK_FUNCTION(5, "USB_TEST_IO[8]"),
+ MTK_FUNCTION(7, "TESTA_OUT21")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(40, "DAC_CLK"),
+ "P21", "mt8135",
+ MTK_EINT_FUNCTION(2, 22),
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "DAC_CK"),
+ MTK_FUNCTION(2, "EINT22"),
+ MTK_FUNCTION(3, " HDMI_SDATA1"),
+ MTK_FUNCTION(5, "USB_TEST_IO[9]"),
+ MTK_FUNCTION(7, "TESTA_OUT22")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(41, "DAC_WS"),
+ "N18", "mt8135",
+ MTK_EINT_FUNCTION(2, 24),
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "DAC_WS"),
+ MTK_FUNCTION(2, "EINT24"),
+ MTK_FUNCTION(3, " HDMI_SDATA2"),
+ MTK_FUNCTION(5, "USB_TEST_IO[10]"),
+ MTK_FUNCTION(7, "TESTA_OUT23")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(42, "DAC_DAT_OUT"),
+ "N22", "mt8135",
+ MTK_EINT_FUNCTION(2, 23),
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "DAC_DAT_OUT"),
+ MTK_FUNCTION(2, "EINT23"),
+ MTK_FUNCTION(3, " HDMI_SDATA3"),
+ MTK_FUNCTION(5, "USB_TEST_IO[11]"),
+ MTK_FUNCTION(7, "TESTA_OUT24")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(43, "PWRAP_SPI0_MO"),
+ "M22", "mt8135",
+ MTK_EINT_FUNCTION(2, 29),
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "PWRAP_SPIDI"),
+ MTK_FUNCTION(2, "EINT29")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(44, "PWRAP_SPI0_MI"),
+ "P23", "mt8135",
+ MTK_EINT_FUNCTION(2, 28),
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "PWRAP_SPIDO"),
+ MTK_FUNCTION(2, "EINT28")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(45, "PWRAP_SPI0_CSN"),
+ "M21", "mt8135",
+ MTK_EINT_FUNCTION(2, 27),
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "PWRAP_SPICS_B_I"),
+ MTK_FUNCTION(2, "EINT27")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(46, "PWRAP_SPI0_CLK"),
+ "P22", "mt8135",
+ MTK_EINT_FUNCTION(2, 26),
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "PWRAP_SPICK_I"),
+ MTK_FUNCTION(2, "EINT26")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(47, "PWRAP_EVENT"),
+ "M23", "mt8135",
+ MTK_EINT_FUNCTION(2, 25),
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "PWRAP_EVENT_IN"),
+ MTK_FUNCTION(2, "EINT25"),
+ MTK_FUNCTION(7, "TESTA_OUT2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(48, "RTC32K_CK"),
+ "N20", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(49, "WATCHDOG"),
+ "R22", "mt8135",
+ MTK_EINT_FUNCTION(2, 36),
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "WATCHDOG"),
+ MTK_FUNCTION(2, "EINT36")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(50, "SRCLKENA"),
+ "T22", "mt8135",
+ MTK_EINT_FUNCTION(2, 38),
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "SRCLKENA"),
+ MTK_FUNCTION(2, "EINT38")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(51, "SRCVOLTEN"),
+ "T23", "mt8135",
+ MTK_EINT_FUNCTION(2, 37),
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "SRCVOLTEN"),
+ MTK_FUNCTION(2, "EINT37")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(52, "EINT0"),
+ "T21", "mt8135",
+ MTK_EINT_FUNCTION(1, 0),
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "EINT0"),
+ MTK_FUNCTION(2, "PWM1"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(4, " SPDIF_OUT"),
+ MTK_FUNCTION(5, "USB_TEST_IO[12]"),
+ MTK_FUNCTION(7, "USB_SCL")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(53, "URXD2"),
+ "R18", "mt8135",
+ MTK_EINT_FUNCTION(2, 83),
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "URXD2"),
+ MTK_FUNCTION(2, "EINT83"),
+ MTK_FUNCTION(4, " HDMI_LRCK"),
+ MTK_FUNCTION(5, "CLKM3"),
+ MTK_FUNCTION(7, "UTXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(54, "UTXD2"),
+ "R17", "mt8135",
+ MTK_EINT_FUNCTION(2, 82),
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "UTXD2"),
+ MTK_FUNCTION(2, "EINT82"),
+ MTK_FUNCTION(4, " HDMI_BCK_OUT"),
+ MTK_FUNCTION(5, "CLKM2"),
+ MTK_FUNCTION(7, "URXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(55, "UCTS2"),
+ "R20", "mt8135",
+ MTK_EINT_FUNCTION(2, 84),
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "UCTS2"),
+ MTK_FUNCTION(2, "EINT84"),
+ MTK_FUNCTION(5, "PWM1"),
+ MTK_FUNCTION(7, "URTS2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(56, "URTS2"),
+ "R19", "mt8135",
+ MTK_EINT_FUNCTION(2, 85),
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "URTS2"),
+ MTK_FUNCTION(2, "EINT85"),
+ MTK_FUNCTION(5, "PWM2"),
+ MTK_FUNCTION(7, "UCTS2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(57, "JTCK"),
+ "V17", "mt8135",
+ MTK_EINT_FUNCTION(2, 188),
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "JTCK"),
+ MTK_FUNCTION(2, "EINT188"),
+ MTK_FUNCTION(3, "DSP1_ICK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(58, "JTDO"),
+ "T16", "mt8135",
+ MTK_EINT_FUNCTION(2, 190),
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "JTDO"),
+ MTK_FUNCTION(2, "EINT190"),
+ MTK_FUNCTION(3, "DSP2_IMS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(59, "JTRST_B"),
+ "T19", "mt8135",
+ MTK_EINT_FUNCTION(2, 0),
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "JTRST_B"),
+ MTK_FUNCTION(2, "EINT0"),
+ MTK_FUNCTION(3, "DSP2_ICK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(60, "JTDI"),
+ "T18", "mt8135",
+ MTK_EINT_FUNCTION(2, 189),
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "JTDI"),
+ MTK_FUNCTION(2, "EINT189"),
+ MTK_FUNCTION(3, "DSP1_IMS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(61, "JRTCK"),
+ "T20", "mt8135",
+ MTK_EINT_FUNCTION(2, 187),
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "JRTCK"),
+ MTK_FUNCTION(2, "EINT187"),
+ MTK_FUNCTION(3, "DSP1_ID")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(62, "JTMS"),
+ "T17", "mt8135",
+ MTK_EINT_FUNCTION(2, 191),
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "JTMS"),
+ MTK_FUNCTION(2, "EINT191"),
+ MTK_FUNCTION(3, "DSP2_ID")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(63, "MSDC1_INSI"),
+ "V18", "mt8135",
+ MTK_EINT_FUNCTION(1, 15),
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "MSDC1_INSI"),
+ MTK_FUNCTION(3, "SCL5"),
+ MTK_FUNCTION(4, "PWM6"),
+ MTK_FUNCTION(5, "CLKM5"),
+ MTK_FUNCTION(7, "TESTB_OUT6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(64, "MSDC1_SDWPI"),
+ "W18", "mt8135",
+ MTK_EINT_FUNCTION(2, 58),
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "MSDC1_SDWPI"),
+ MTK_FUNCTION(2, "EINT58"),
+ MTK_FUNCTION(3, "SDA5"),
+ MTK_FUNCTION(4, "PWM7"),
+ MTK_FUNCTION(5, "CLKM6"),
+ MTK_FUNCTION(7, "TESTB_OUT7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(65, "MSDC2_INSI"),
+ "U22", "mt8135",
+ MTK_EINT_FUNCTION(1, 14),
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "MSDC2_INSI"),
+ MTK_FUNCTION(5, "USB_TEST_IO[27]"),
+ MTK_FUNCTION(7, "TESTA_OUT3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(66, "MSDC2_SDWPI"),
+ "U21", "mt8135",
+ MTK_EINT_FUNCTION(2, 66),
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "MSDC2_SDWPI"),
+ MTK_FUNCTION(2, "EINT66"),
+ MTK_FUNCTION(5, "USB_TEST_IO[28]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(67, "URXD4"),
+ "V23", "mt8135",
+ MTK_EINT_FUNCTION(2, 89),
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "URXD4"),
+ MTK_FUNCTION(2, "EINT89"),
+ MTK_FUNCTION(3, "URXD1"),
+ MTK_FUNCTION(6, "UTXD4"),
+ MTK_FUNCTION(7, "TESTB_OUT10")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(68, "UTXD4"),
+ "V22", "mt8135",
+ MTK_EINT_FUNCTION(2, 88),
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "UTXD4"),
+ MTK_FUNCTION(2, "EINT88"),
+ MTK_FUNCTION(3, "UTXD1"),
+ MTK_FUNCTION(6, "URXD4"),
+ MTK_FUNCTION(7, "TESTB_OUT11")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(69, "URXD1"),
+ "W22", "mt8135",
+ MTK_EINT_FUNCTION(2, 79),
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "EINT79"),
+ MTK_FUNCTION(3, "URXD4"),
+ MTK_FUNCTION(6, "UTXD1"),
+ MTK_FUNCTION(7, "TESTB_OUT24")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(70, "UTXD1"),
+ "V21", "mt8135",
+ MTK_EINT_FUNCTION(2, 78),
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "EINT78"),
+ MTK_FUNCTION(3, "UTXD4"),
+ MTK_FUNCTION(6, "URXD1"),
+ MTK_FUNCTION(7, "TESTB_OUT25")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(71, "UCTS1"),
+ "V19", "mt8135",
+ MTK_EINT_FUNCTION(2, 80),
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "UCTS1"),
+ MTK_FUNCTION(2, "EINT80"),
+ MTK_FUNCTION(5, "CLKM0"),
+ MTK_FUNCTION(6, "URTS1"),
+ MTK_FUNCTION(7, "TESTB_OUT31")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(72, "URTS1"),
+ "V20", "mt8135",
+ MTK_EINT_FUNCTION(2, 81),
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "URTS1"),
+ MTK_FUNCTION(2, "EINT81"),
+ MTK_FUNCTION(5, "CLKM1"),
+ MTK_FUNCTION(6, "UCTS1"),
+ MTK_FUNCTION(7, "TESTB_OUT21")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(73, "PWM1"),
+ "W17", "mt8135",
+ MTK_EINT_FUNCTION(2, 73),
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "PWM1"),
+ MTK_FUNCTION(2, "EINT73"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(6, "DISP_PWM"),
+ MTK_FUNCTION(7, "TESTB_OUT8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(74, "PWM2"),
+ "Y17", "mt8135",
+ MTK_EINT_FUNCTION(2, 74),
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "PWM2"),
+ MTK_FUNCTION(2, "EINT74"),
+ MTK_FUNCTION(3, "DPI33_CK"),
+ MTK_FUNCTION(4, "PWM5"),
+ MTK_FUNCTION(5, "URXD2"),
+ MTK_FUNCTION(6, "DISP_PWM"),
+ MTK_FUNCTION(7, "TESTB_OUT9")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(75, "PWM3"),
+ "Y19", "mt8135",
+ MTK_EINT_FUNCTION(2, 75),
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "PWM3"),
+ MTK_FUNCTION(2, "EINT75"),
+ MTK_FUNCTION(3, "DPI33_D0"),
+ MTK_FUNCTION(4, "PWM6"),
+ MTK_FUNCTION(5, "UTXD2"),
+ MTK_FUNCTION(6, "DISP_PWM"),
+ MTK_FUNCTION(7, "TESTB_OUT12")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(76, "PWM4"),
+ "W19", "mt8135",
+ MTK_EINT_FUNCTION(2, 76),
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "PWM4"),
+ MTK_FUNCTION(2, "EINT76"),
+ MTK_FUNCTION(3, "DPI33_D1"),
+ MTK_FUNCTION(4, "PWM7"),
+ MTK_FUNCTION(6, "DISP_PWM"),
+ MTK_FUNCTION(7, "TESTB_OUT13")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(77, "MSDC2_DAT2"),
+ "W21", "mt8135",
+ MTK_EINT_FUNCTION(2, 63),
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(2, "EINT63"),
+ MTK_FUNCTION(4, "DSP2_IMS"),
+ MTK_FUNCTION(6, "DPI33_D6"),
+ MTK_FUNCTION(7, "TESTA_OUT25")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(78, "MSDC2_DAT3"),
+ "AA23", "mt8135",
+ MTK_EINT_FUNCTION(2, 64),
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(2, "EINT64"),
+ MTK_FUNCTION(4, "DSP2_ID"),
+ MTK_FUNCTION(6, "DPI33_D7"),
+ MTK_FUNCTION(7, "TESTA_OUT26")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(79, "MSDC2_CMD"),
+ "Y22", "mt8135",
+ MTK_EINT_FUNCTION(2, 60),
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(2, "EINT60"),
+ MTK_FUNCTION(4, "DSP1_IMS"),
+ MTK_FUNCTION(5, "PCM1_WS"),
+ MTK_FUNCTION(6, "DPI33_D3"),
+ MTK_FUNCTION(7, "TESTA_OUT0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(80, "MSDC2_CLK"),
+ "AA22", "mt8135",
+ MTK_EINT_FUNCTION(2, 59),
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(2, "EINT59"),
+ MTK_FUNCTION(4, "DSP1_ICK"),
+ MTK_FUNCTION(5, "PCM1_CK"),
+ MTK_FUNCTION(6, "DPI33_D2"),
+ MTK_FUNCTION(7, "TESTA_OUT1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(81, "MSDC2_DAT1"),
+ "Y21", "mt8135",
+ MTK_EINT_FUNCTION(2, 62),
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(2, "EINT62"),
+ MTK_FUNCTION(4, "DSP2_ICK"),
+ MTK_FUNCTION(5, "PCM1_DO"),
+ MTK_FUNCTION(6, "DPI33_D5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(82, "MSDC2_DAT0"),
+ "AB22", "mt8135",
+ MTK_EINT_FUNCTION(2, 61),
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(2, "EINT61"),
+ MTK_FUNCTION(4, "DSP1_ID"),
+ MTK_FUNCTION(5, "PCM1_DI"),
+ MTK_FUNCTION(6, "DPI33_D4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(83, "MSDC1_DAT0"),
+ "AC19", "mt8135",
+ MTK_EINT_FUNCTION(2, 53),
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "EINT53"),
+ MTK_FUNCTION(3, "SCL1"),
+ MTK_FUNCTION(4, "PWM2"),
+ MTK_FUNCTION(5, "CLKM1"),
+ MTK_FUNCTION(7, "TESTB_OUT2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(84, "MSDC1_DAT1"),
+ "AA19", "mt8135",
+ MTK_EINT_FUNCTION(2, 54),
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "EINT54"),
+ MTK_FUNCTION(3, "SDA1"),
+ MTK_FUNCTION(4, "PWM3"),
+ MTK_FUNCTION(5, "CLKM2"),
+ MTK_FUNCTION(7, "TESTB_OUT3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(85, "MSDC1_CMD"),
+ "AA20", "mt8135",
+ MTK_EINT_FUNCTION(2, 52),
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "EINT52"),
+ MTK_FUNCTION(3, "SDA0"),
+ MTK_FUNCTION(4, "PWM1"),
+ MTK_FUNCTION(5, "CLKM0"),
+ MTK_FUNCTION(7, "TESTB_OUT1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(86, "MSDC1_CLK"),
+ "AB19", "mt8135",
+ MTK_EINT_FUNCTION(2, 51),
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "EINT51"),
+ MTK_FUNCTION(3, "SCL0"),
+ MTK_FUNCTION(4, "DISP_PWM"),
+ MTK_FUNCTION(7, "TESTB_OUT0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(87, "MSDC1_DAT2"),
+ "AA21", "mt8135",
+ MTK_EINT_FUNCTION(2, 55),
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "EINT55"),
+ MTK_FUNCTION(3, "SCL4"),
+ MTK_FUNCTION(4, "PWM4"),
+ MTK_FUNCTION(5, "CLKM3"),
+ MTK_FUNCTION(7, "TESTB_OUT4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(88, "MSDC1_DAT3"),
+ "AB20", "mt8135",
+ MTK_EINT_FUNCTION(2, 56),
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "EINT56"),
+ MTK_FUNCTION(3, "SDA4"),
+ MTK_FUNCTION(4, "PWM5"),
+ MTK_FUNCTION(5, "CLKM4"),
+ MTK_FUNCTION(7, "TESTB_OUT5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(89, "MSDC4_DAT0"),
+ "AB8", "mt8135",
+ MTK_EINT_FUNCTION(2, 133),
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "MSDC4_DAT0"),
+ MTK_FUNCTION(2, "EINT133"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(6, "A_FUNC_DIN[9]"),
+ MTK_FUNCTION(7, "LPTE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(90, "MSDC4_DAT1"),
+ "AB7", "mt8135",
+ MTK_EINT_FUNCTION(2, 134),
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "MSDC4_DAT1"),
+ MTK_FUNCTION(2, "EINT134"),
+ MTK_FUNCTION(6, "A_FUNC_DIN[10]"),
+ MTK_FUNCTION(7, "LRSTB_1X")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(91, "MSDC4_DAT5"),
+ "AA8", "mt8135",
+ MTK_EINT_FUNCTION(2, 136),
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "MSDC4_DAT5"),
+ MTK_FUNCTION(2, "EINT136"),
+ MTK_FUNCTION(3, "I2SIN_WS"),
+ MTK_FUNCTION(4, "DAC_WS"),
+ MTK_FUNCTION(5, "PCM1_WS"),
+ MTK_FUNCTION(6, "A_FUNC_DIN[11]"),
+ MTK_FUNCTION(7, "SPI1_CSN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(92, "MSDC4_DAT6"),
+ "AC4", "mt8135",
+ MTK_EINT_FUNCTION(2, 137),
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "MSDC4_DAT6"),
+ MTK_FUNCTION(2, "EINT137"),
+ MTK_FUNCTION(3, "I2SOUT_DAT"),
+ MTK_FUNCTION(4, "DAC_DAT_OUT"),
+ MTK_FUNCTION(5, "PCM1_DO"),
+ MTK_FUNCTION(6, "A_FUNC_DIN[12]"),
+ MTK_FUNCTION(7, "SPI1_MO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(93, "MSDC4_DAT7"),
+ "AC6", "mt8135",
+ MTK_EINT_FUNCTION(2, 138),
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "MSDC4_DAT7"),
+ MTK_FUNCTION(2, "EINT138"),
+ MTK_FUNCTION(3, "I2SIN_DAT"),
+ MTK_FUNCTION(5, "PCM1_DI"),
+ MTK_FUNCTION(6, "A_FUNC_DIN[13]"),
+ MTK_FUNCTION(7, "SPI1_MI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(94, "MSDC4_DAT4"),
+ "AA7", "mt8135",
+ MTK_EINT_FUNCTION(2, 135),
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "MSDC4_DAT4"),
+ MTK_FUNCTION(2, "EINT135"),
+ MTK_FUNCTION(3, "I2SIN_CK"),
+ MTK_FUNCTION(4, "DAC_CK"),
+ MTK_FUNCTION(5, "PCM1_CK"),
+ MTK_FUNCTION(6, "A_FUNC_DIN[14]"),
+ MTK_FUNCTION(7, "SPI1_CLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(95, "MSDC4_DAT2"),
+ "AB6", "mt8135",
+ MTK_EINT_FUNCTION(2, 131),
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "MSDC4_DAT2"),
+ MTK_FUNCTION(2, "EINT131"),
+ MTK_FUNCTION(3, "I2SIN_WS"),
+ MTK_FUNCTION(4, "CM2PDN_2X"),
+ MTK_FUNCTION(5, "DAC_WS"),
+ MTK_FUNCTION(6, "PCM1_WS"),
+ MTK_FUNCTION(7, "LSCE0B_1X")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(96, "MSDC4_CLK"),
+ "AB5", "mt8135",
+ MTK_EINT_FUNCTION(2, 129),
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "MSDC4_CLK"),
+ MTK_FUNCTION(2, "EINT129"),
+ MTK_FUNCTION(3, "DPI1_CK_2X"),
+ MTK_FUNCTION(4, "CM2PCLK_2X"),
+ MTK_FUNCTION(5, "PWM4"),
+ MTK_FUNCTION(6, "PCM1_DI"),
+ MTK_FUNCTION(7, "LSCK_1X")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(97, "MSDC4_DAT3"),
+ "Y8", "mt8135",
+ MTK_EINT_FUNCTION(2, 132),
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "MSDC4_DAT3"),
+ MTK_FUNCTION(2, "EINT132"),
+ MTK_FUNCTION(3, "I2SOUT_DAT"),
+ MTK_FUNCTION(4, "CM2RST_2X"),
+ MTK_FUNCTION(5, "DAC_DAT_OUT"),
+ MTK_FUNCTION(6, "PCM1_DO"),
+ MTK_FUNCTION(7, "LSCE1B_1X")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(98, "MSDC4_CMD"),
+ "AC3", "mt8135",
+ MTK_EINT_FUNCTION(2, 128),
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "MSDC4_CMD"),
+ MTK_FUNCTION(2, "EINT128"),
+ MTK_FUNCTION(3, "DPI1_DE_2X"),
+ MTK_FUNCTION(5, "PWM3"),
+ MTK_FUNCTION(7, "LSDA_1X")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(99, "MSDC4_RSTB"),
+ "AB4", "mt8135",
+ MTK_EINT_FUNCTION(2, 130),
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "MSDC4_RSTB"),
+ MTK_FUNCTION(2, "EINT130"),
+ MTK_FUNCTION(3, "I2SIN_CK"),
+ MTK_FUNCTION(4, "CM2MCLK_2X"),
+ MTK_FUNCTION(5, "DAC_CK"),
+ MTK_FUNCTION(6, "PCM1_CK"),
+ MTK_FUNCTION(7, "LSA0_1X")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(100, "SDA0"),
+ "W9", "mt8135",
+ MTK_EINT_FUNCTION(2, 91),
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "SDA0"),
+ MTK_FUNCTION(2, "EINT91"),
+ MTK_FUNCTION(3, "CLKM1"),
+ MTK_FUNCTION(4, "PWM1"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[15]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(101, "SCL0"),
+ "W11", "mt8135",
+ MTK_EINT_FUNCTION(2, 90),
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "SCL0"),
+ MTK_FUNCTION(2, "EINT90"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(4, "DISP_PWM"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[16]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(102, "EINT10_AUXIN2"),
+ "AA3", "mt8135",
+ MTK_EINT_FUNCTION(1, 10),
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "EINT10"),
+ MTK_FUNCTION(5, "USB_TEST_IO[16]"),
+ MTK_FUNCTION(6, "TESTB_OUT16"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[17]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(103, "EINT11_AUXIN3"),
+ "AB2", "mt8135",
+ MTK_EINT_FUNCTION(1, 11),
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "EINT11"),
+ MTK_FUNCTION(5, "USB_TEST_IO[17]"),
+ MTK_FUNCTION(6, "TESTB_OUT17"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[18]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(104, "EINT16_AUXIN4"),
+ "AB3", "mt8135",
+ MTK_EINT_FUNCTION(1, 16),
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "EINT16"),
+ MTK_FUNCTION(5, "USB_TEST_IO[18]"),
+ MTK_FUNCTION(6, "TESTB_OUT18"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[19]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(105, "I2S_CLK"),
+ "W6", "mt8135",
+ MTK_EINT_FUNCTION(2, 10),
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "I2SIN_CK"),
+ MTK_FUNCTION(2, "EINT10"),
+ MTK_FUNCTION(3, "DAC_CK"),
+ MTK_FUNCTION(4, "PCM1_CK"),
+ MTK_FUNCTION(5, "USB_TEST_IO[19]"),
+ MTK_FUNCTION(6, "TESTB_OUT19"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[20]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(106, "I2S_WS"),
+ "AA6", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "I2SIN_WS"),
+ MTK_FUNCTION(3, "DAC_WS"),
+ MTK_FUNCTION(4, "PCM1_WS"),
+ MTK_FUNCTION(5, "USB_TEST_IO[20]"),
+ MTK_FUNCTION(6, "TESTB_OUT20"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[21]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(107, "I2S_DATA_IN"),
+ "AA5", "mt8135",
+ MTK_EINT_FUNCTION(2, 11),
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "I2SIN_DAT"),
+ MTK_FUNCTION(2, "EINT11"),
+ MTK_FUNCTION(4, "PCM1_DI"),
+ MTK_FUNCTION(5, "USB_TEST_IO[21]"),
+ MTK_FUNCTION(6, "TESTB_OUT22"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[22]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(108, "I2S_DATA_OUT"),
+ "AA4", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "I2SOUT_DAT"),
+ MTK_FUNCTION(3, "DAC_DAT_OUT"),
+ MTK_FUNCTION(4, "PCM1_DO"),
+ MTK_FUNCTION(5, "USB_TEST_IO[22]"),
+ MTK_FUNCTION(6, "TESTB_OUT23"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[23]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(109, "EINT5"),
+ "W5", "mt8135",
+ MTK_EINT_FUNCTION(1, 5),
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "EINT5"),
+ MTK_FUNCTION(2, "PWM5"),
+ MTK_FUNCTION(3, "CLKM3"),
+ MTK_FUNCTION(4, "GPU_JTRSTB"),
+ MTK_FUNCTION(5, "USB_TEST_IO[23]"),
+ MTK_FUNCTION(6, "TESTB_OUT26"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[24]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(110, "EINT6"),
+ "V5", "mt8135",
+ MTK_EINT_FUNCTION(1, 6),
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "EINT6"),
+ MTK_FUNCTION(2, "PWM6"),
+ MTK_FUNCTION(3, "CLKM4"),
+ MTK_FUNCTION(4, "GPU_JTMS"),
+ MTK_FUNCTION(5, "USB_TEST_IO[24]"),
+ MTK_FUNCTION(6, "TESTB_OUT27"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[25]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(111, "EINT7"),
+ "W3", "mt8135",
+ MTK_EINT_FUNCTION(1, 7),
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "EINT7"),
+ MTK_FUNCTION(2, "PWM7"),
+ MTK_FUNCTION(3, "CLKM5"),
+ MTK_FUNCTION(4, "GPU_JTDO"),
+ MTK_FUNCTION(5, "USB_TEST_IO[25]"),
+ MTK_FUNCTION(6, "TESTB_OUT28"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[26]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(112, "EINT8"),
+ "V6", "mt8135",
+ MTK_EINT_FUNCTION(1, 8),
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "EINT8"),
+ MTK_FUNCTION(2, "DISP_PWM"),
+ MTK_FUNCTION(3, "CLKM6"),
+ MTK_FUNCTION(4, "GPU_JTDI"),
+ MTK_FUNCTION(5, "USB_TEST_IO[26]"),
+ MTK_FUNCTION(6, "TESTB_OUT29"),
+ MTK_FUNCTION(7, "EXT_FRAME_SYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(113, "EINT9"),
+ "W8", "mt8135",
+ MTK_EINT_FUNCTION(1, 9),
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "EINT9"),
+ MTK_FUNCTION(4, "GPU_JTCK"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(6, "TESTB_OUT30"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[27]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(114, "LPCE1B"),
+ "W4", "mt8135",
+ MTK_EINT_FUNCTION(2, 127),
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "LPCE1B"),
+ MTK_FUNCTION(2, "EINT127"),
+ MTK_FUNCTION(5, "PWM2"),
+ MTK_FUNCTION(6, "TESTB_OUT14"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[28]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(115, "LPCE0B"),
+ "T5", "mt8135",
+ MTK_EINT_FUNCTION(2, 126),
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "LPCE0B"),
+ MTK_FUNCTION(2, "EINT126"),
+ MTK_FUNCTION(5, "PWM1"),
+ MTK_FUNCTION(6, "TESTB_OUT15"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[29]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(116, "DISP_PWM"),
+ "V4", "mt8135",
+ MTK_EINT_FUNCTION(2, 77),
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "DISP_PWM"),
+ MTK_FUNCTION(2, "EINT77"),
+ MTK_FUNCTION(3, "LSDI"),
+ MTK_FUNCTION(4, "PWM1"),
+ MTK_FUNCTION(5, "PWM2"),
+ MTK_FUNCTION(7, "PWM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(117, "EINT1"),
+ "T6", "mt8135",
+ MTK_EINT_FUNCTION(1, 1),
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "EINT1"),
+ MTK_FUNCTION(2, "PWM2"),
+ MTK_FUNCTION(3, "CLKM1"),
+ MTK_FUNCTION(5, "USB_TEST_IO[13]"),
+ MTK_FUNCTION(7, "USB_SDA")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(118, "EINT2"),
+ "T4", "mt8135",
+ MTK_EINT_FUNCTION(1, 2),
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "EINT2"),
+ MTK_FUNCTION(2, "PWM3"),
+ MTK_FUNCTION(3, "CLKM2"),
+ MTK_FUNCTION(5, "USB_TEST_IO[14]"),
+ MTK_FUNCTION(6, "SRCLKENAI2"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[30]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(119, "EINT3"),
+ "R4", "mt8135",
+ MTK_EINT_FUNCTION(1, 3),
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "EINT3"),
+ MTK_FUNCTION(5, "USB_TEST_IO[15]"),
+ MTK_FUNCTION(6, "SRCLKENAI1"),
+ MTK_FUNCTION(7, "EXT_26M_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(120, "EINT4"),
+ "R5", "mt8135",
+ MTK_EINT_FUNCTION(1, 4),
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "EINT4"),
+ MTK_FUNCTION(2, "PWM4"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(7, "A_FUNC_DIN[31]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(121, "DPIDE"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 100),
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "DPI0_DE"),
+ MTK_FUNCTION(2, "EINT100"),
+ MTK_FUNCTION(3, "I2SOUT_DAT"),
+ MTK_FUNCTION(4, "DAC_DAT_OUT"),
+ MTK_FUNCTION(5, "PCM1_DO"),
+ MTK_FUNCTION(6, "IRDA_TXD")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(122, "DPICK"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 101),
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "DPI0_CK"),
+ MTK_FUNCTION(2, "EINT101"),
+ MTK_FUNCTION(3, "I2SIN_DAT"),
+ MTK_FUNCTION(5, "PCM1_DI"),
+ MTK_FUNCTION(6, "IRDA_PDN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(123, "DPIG4"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 114),
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "DPI0_G4"),
+ MTK_FUNCTION(2, "EINT114"),
+ MTK_FUNCTION(4, "CM2DAT_2X[0]"),
+ MTK_FUNCTION(5, "DSP2_ID")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(124, "DPIG5"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 115),
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "DPI0_G5"),
+ MTK_FUNCTION(2, "EINT115"),
+ MTK_FUNCTION(4, "CM2DAT_2X[1]"),
+ MTK_FUNCTION(5, "DSP2_ICK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(125, "DPIR3"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 121),
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "DPI0_R3"),
+ MTK_FUNCTION(2, "EINT121"),
+ MTK_FUNCTION(4, "CM2DAT_2X[7]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(126, "DPIG1"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 111),
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "DPI0_G1"),
+ MTK_FUNCTION(2, "EINT111"),
+ MTK_FUNCTION(5, "DSP1_ICK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(127, "DPIVSYNC"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 98),
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "DPI0_VSYNC"),
+ MTK_FUNCTION(2, "EINT98"),
+ MTK_FUNCTION(3, "I2SIN_CK"),
+ MTK_FUNCTION(4, "DAC_CK"),
+ MTK_FUNCTION(5, "PCM1_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(128, "DPIHSYNC"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 99),
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "DPI0_HSYNC"),
+ MTK_FUNCTION(2, "EINT99"),
+ MTK_FUNCTION(3, "I2SIN_WS"),
+ MTK_FUNCTION(4, "DAC_WS"),
+ MTK_FUNCTION(5, "PCM1_WS"),
+ MTK_FUNCTION(6, "IRDA_RXD")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(129, "DPIB0"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 102),
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "DPI0_B0"),
+ MTK_FUNCTION(2, "EINT102"),
+ MTK_FUNCTION(4, "SCL0"),
+ MTK_FUNCTION(5, "DISP_PWM")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(130, "DPIB1"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 103),
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "DPI0_B1"),
+ MTK_FUNCTION(2, "EINT103"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(4, "SDA0"),
+ MTK_FUNCTION(5, "PWM1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(131, "DPIB2"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 104),
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "DPI0_B2"),
+ MTK_FUNCTION(2, "EINT104"),
+ MTK_FUNCTION(3, "CLKM1"),
+ MTK_FUNCTION(4, "SCL1"),
+ MTK_FUNCTION(5, "PWM2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(132, "DPIB3"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 105),
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "DPI0_B3"),
+ MTK_FUNCTION(2, "EINT105"),
+ MTK_FUNCTION(3, "CLKM2"),
+ MTK_FUNCTION(4, "SDA1"),
+ MTK_FUNCTION(5, "PWM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(133, "DPIB4"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 106),
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "DPI0_B4"),
+ MTK_FUNCTION(2, "EINT106"),
+ MTK_FUNCTION(3, "CLKM3"),
+ MTK_FUNCTION(4, "SCL2"),
+ MTK_FUNCTION(5, "PWM4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(134, "DPIB5"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 107),
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "DPI0_B5"),
+ MTK_FUNCTION(2, "EINT107"),
+ MTK_FUNCTION(3, "CLKM4"),
+ MTK_FUNCTION(4, "SDA2"),
+ MTK_FUNCTION(5, "PWM5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(135, "DPIB6"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 108),
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "DPI0_B6"),
+ MTK_FUNCTION(2, "EINT108"),
+ MTK_FUNCTION(3, "CLKM5"),
+ MTK_FUNCTION(4, "SCL3"),
+ MTK_FUNCTION(5, "PWM6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(136, "DPIB7"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 109),
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "DPI0_B7"),
+ MTK_FUNCTION(2, "EINT109"),
+ MTK_FUNCTION(3, "CLKM6"),
+ MTK_FUNCTION(4, "SDA3"),
+ MTK_FUNCTION(5, "PWM7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(137, "DPIG0"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 110),
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "DPI0_G0"),
+ MTK_FUNCTION(2, "EINT110"),
+ MTK_FUNCTION(5, "DSP1_ID")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(138, "DPIG2"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 112),
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "DPI0_G2"),
+ MTK_FUNCTION(2, "EINT112"),
+ MTK_FUNCTION(5, "DSP1_IMS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(139, "DPIG3"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 113),
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "DPI0_G3"),
+ MTK_FUNCTION(2, "EINT113"),
+ MTK_FUNCTION(5, "DSP2_IMS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(140, "DPIG6"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 116),
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "DPI0_G6"),
+ MTK_FUNCTION(2, "EINT116"),
+ MTK_FUNCTION(4, "CM2DAT_2X[2]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(141, "DPIG7"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 117),
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "DPI0_G7"),
+ MTK_FUNCTION(2, "EINT117"),
+ MTK_FUNCTION(4, "CM2DAT_2X[3]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(142, "DPIR0"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 118),
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "DPI0_R0"),
+ MTK_FUNCTION(2, "EINT118"),
+ MTK_FUNCTION(4, "CM2DAT_2X[4]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(143, "DPIR1"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 119),
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "DPI0_R1"),
+ MTK_FUNCTION(2, "EINT119"),
+ MTK_FUNCTION(4, "CM2DAT_2X[5]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(144, "DPIR2"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 120),
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "DPI0_R2"),
+ MTK_FUNCTION(2, "EINT120"),
+ MTK_FUNCTION(4, "CM2DAT_2X[6]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(145, "DPIR4"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 122),
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "DPI0_R4"),
+ MTK_FUNCTION(2, "EINT122"),
+ MTK_FUNCTION(4, "CM2DAT_2X[8]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(146, "DPIR5"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 123),
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "DPI0_R5"),
+ MTK_FUNCTION(2, "EINT123"),
+ MTK_FUNCTION(4, "CM2DAT_2X[9]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(147, "DPIR6"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 124),
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "DPI0_R6"),
+ MTK_FUNCTION(2, "EINT124"),
+ MTK_FUNCTION(4, "CM2VSYNC_2X")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(148, "DPIR7"),
+ NULL, "mt8135",
+ MTK_EINT_FUNCTION(2, 125),
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "DPI0_R7"),
+ MTK_FUNCTION(2, "EINT125"),
+ MTK_FUNCTION(4, "CM2HSYNC_2X")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(149, "TDN3/LVDS(TDN3)"),
+ "AA2", "mt8135",
+ MTK_EINT_FUNCTION(2, 36),
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(2, "EINT36")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(150, "TDP3/LVDS(TDP3)"),
+ "AA1", "mt8135",
+ MTK_EINT_FUNCTION(2, 35),
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(2, "EINT35")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(151, "TDN2/LVDS(TCN)"),
+ "Y2", "mt8135",
+ MTK_EINT_FUNCTION(2, 169),
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(2, "EINT169")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(152, "TDP2/LVDS(TCP)"),
+ "Y1", "mt8135",
+ MTK_EINT_FUNCTION(2, 168),
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(2, "EINT168")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(153, "TCN/LVDS(TDN2)"),
+ "W2", "mt8135",
+ MTK_EINT_FUNCTION(2, 163),
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(2, "EINT163")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(154, "TCP/LVDS(TDP2)"),
+ "W1", "mt8135",
+ MTK_EINT_FUNCTION(2, 162),
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(2, "EINT162")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(155, "TDN1/LVDS(TDN1)"),
+ "V3", "mt8135",
+ MTK_EINT_FUNCTION(2, 167),
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(2, "EINT167")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(156, "TDP1/LVDS(TDP1)"),
+ "V2", "mt8135",
+ MTK_EINT_FUNCTION(2, 166),
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(2, "EINT166")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(157, "TDN0/LVDS(TDN0)"),
+ "U3", "mt8135",
+ MTK_EINT_FUNCTION(2, 165),
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(2, "EINT165")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(158, "TDP0/LVDS(TDP0)"),
+ "U2", "mt8135",
+ MTK_EINT_FUNCTION(2, 164),
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(2, "EINT164")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(159, "RDN3"),
+ "N5", "mt8135",
+ MTK_EINT_FUNCTION(2, 18),
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(2, "EINT18")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(160, "RDP3"),
+ "N4", "mt8135",
+ MTK_EINT_FUNCTION(2, 30),
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(2, "EINT30")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(161, "RDN2"),
+ "T2", "mt8135",
+ MTK_EINT_FUNCTION(2, 31),
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(2, "EINT31")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(162, "RDP2"),
+ "T3", "mt8135",
+ MTK_EINT_FUNCTION(2, 32),
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(2, "EINT32")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(163, "RCN"),
+ "P2", "mt8135",
+ MTK_EINT_FUNCTION(2, 33),
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(2, "EINT33")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(164, "RCP"),
+ "P3", "mt8135",
+ MTK_EINT_FUNCTION(2, 39),
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(2, "EINT39")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(165, "RDN1"),
+ "R3", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO165")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(166, "RDP1"),
+ "R2", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO166")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(167, "RDN0"),
+ "N3", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO167")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(168, "RDP0"),
+ "N2", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO168")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(169, "RDN1_A"),
+ "M4", "mt8135",
+ MTK_EINT_FUNCTION(2, 175),
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "CMDAT6"),
+ MTK_FUNCTION(2, "EINT175")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(170, "RDP1_A"),
+ "M3", "mt8135",
+ MTK_EINT_FUNCTION(2, 174),
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "CMDAT7"),
+ MTK_FUNCTION(2, "EINT174")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(171, "RCN_A"),
+ "L3", "mt8135",
+ MTK_EINT_FUNCTION(2, 171),
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "CMDAT8"),
+ MTK_FUNCTION(2, "EINT171")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(172, "RCP_A"),
+ "L2", "mt8135",
+ MTK_EINT_FUNCTION(2, 170),
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "CMDAT9"),
+ MTK_FUNCTION(2, "EINT170")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(173, "RDN0_A"),
+ "M2", "mt8135",
+ MTK_EINT_FUNCTION(2, 173),
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "CMHSYNC"),
+ MTK_FUNCTION(2, "EINT173")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(174, "RDP0_A"),
+ "M1", "mt8135",
+ MTK_EINT_FUNCTION(2, 172),
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "CMVSYNC"),
+ MTK_FUNCTION(2, "EINT172")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(175, "RDN1_B"),
+ "H2", "mt8135",
+ MTK_EINT_FUNCTION(2, 181),
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "CMDAT2"),
+ MTK_FUNCTION(2, "EINT181"),
+ MTK_FUNCTION(3, "CMCSD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(176, "RDP1_B"),
+ "H1", "mt8135",
+ MTK_EINT_FUNCTION(2, 180),
+ MTK_FUNCTION(0, "GPIO176"),
+ MTK_FUNCTION(1, "CMDAT3"),
+ MTK_FUNCTION(2, "EINT180"),
+ MTK_FUNCTION(3, "CMCSD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(177, "RCN_B"),
+ "K3", "mt8135",
+ MTK_EINT_FUNCTION(2, 177),
+ MTK_FUNCTION(0, "GPIO177"),
+ MTK_FUNCTION(1, "CMDAT4"),
+ MTK_FUNCTION(2, "EINT177")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(178, "RCP_B"),
+ "K2", "mt8135",
+ MTK_EINT_FUNCTION(2, 176),
+ MTK_FUNCTION(0, "GPIO178"),
+ MTK_FUNCTION(1, "CMDAT5"),
+ MTK_FUNCTION(2, "EINT176")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(179, "RDN0_B"),
+ "J3", "mt8135",
+ MTK_EINT_FUNCTION(2, 179),
+ MTK_FUNCTION(0, "GPIO179"),
+ MTK_FUNCTION(1, "CMDAT0"),
+ MTK_FUNCTION(2, "EINT179"),
+ MTK_FUNCTION(3, "CMCSD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(180, "RDP0_B"),
+ "J2", "mt8135",
+ MTK_EINT_FUNCTION(2, 178),
+ MTK_FUNCTION(0, "GPIO180"),
+ MTK_FUNCTION(1, "CMDAT1"),
+ MTK_FUNCTION(2, "EINT178"),
+ MTK_FUNCTION(3, "CMCSD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(181, "CMPCLK"),
+ "K4", "mt8135",
+ MTK_EINT_FUNCTION(2, 182),
+ MTK_FUNCTION(0, "GPIO181"),
+ MTK_FUNCTION(1, "CMPCLK"),
+ MTK_FUNCTION(2, "EINT182"),
+ MTK_FUNCTION(3, "CMCSK"),
+ MTK_FUNCTION(4, "CM2MCLK_4X"),
+ MTK_FUNCTION(5, "TS_AUXADC_SEL[3]"),
+ MTK_FUNCTION(6, "VENC_TEST_CK"),
+ MTK_FUNCTION(7, "TESTA_OUT27")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(182, "CMMCLK"),
+ "J5", "mt8135",
+ MTK_EINT_FUNCTION(2, 183),
+ MTK_FUNCTION(0, "GPIO182"),
+ MTK_FUNCTION(1, "CMMCLK"),
+ MTK_FUNCTION(2, "EINT183"),
+ MTK_FUNCTION(5, "TS_AUXADC_SEL[2]"),
+ MTK_FUNCTION(7, "TESTA_OUT28")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(183, "CMRST"),
+ "J6", "mt8135",
+ MTK_EINT_FUNCTION(2, 185),
+ MTK_FUNCTION(0, "GPIO183"),
+ MTK_FUNCTION(1, "CMRST"),
+ MTK_FUNCTION(2, "EINT185"),
+ MTK_FUNCTION(5, "TS_AUXADC_SEL[1]"),
+ MTK_FUNCTION(7, "TESTA_OUT30")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(184, "CMPDN"),
+ "J4", "mt8135",
+ MTK_EINT_FUNCTION(2, 184),
+ MTK_FUNCTION(0, "GPIO184"),
+ MTK_FUNCTION(1, "CMPDN"),
+ MTK_FUNCTION(2, "EINT184"),
+ MTK_FUNCTION(5, "TS_AUXADC_SEL[0]"),
+ MTK_FUNCTION(7, "TESTA_OUT29")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(185, "CMFLASH"),
+ "G4", "mt8135",
+ MTK_EINT_FUNCTION(2, 186),
+ MTK_FUNCTION(0, "GPIO185"),
+ MTK_FUNCTION(1, "CMFLASH"),
+ MTK_FUNCTION(2, "EINT186"),
+ MTK_FUNCTION(3, "CM2MCLK_3X"),
+ MTK_FUNCTION(6, "MFG_TEST_CK_1"),
+ MTK_FUNCTION(7, "TESTA_OUT31")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(186, "MRG_I2S_PCM_CLK"),
+ "F5", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO186"),
+ MTK_FUNCTION(1, "MRG_I2S_PCM_CLK"),
+ MTK_FUNCTION(3, "I2SIN_CK"),
+ MTK_FUNCTION(4, "PCM0_CK"),
+ MTK_FUNCTION(5, "DSP2_ICK"),
+ MTK_FUNCTION(6, "IMG_TEST_CK"),
+ MTK_FUNCTION(7, "USB_SCL")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(187, "MRG_I2S_PCM_SYNC"),
+ "G6", "mt8135",
+ MTK_EINT_FUNCTION(2, 16),
+ MTK_FUNCTION(0, "GPIO187"),
+ MTK_FUNCTION(1, "MRG_I2S_PCM_SYNC"),
+ MTK_FUNCTION(2, "EINT16"),
+ MTK_FUNCTION(3, "I2SIN_WS"),
+ MTK_FUNCTION(4, "PCM0_WS"),
+ MTK_FUNCTION(6, "DISP_TEST_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(188, "MRG_I2S_PCM_RX"),
+ "G3", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO188"),
+ MTK_FUNCTION(1, "MRG_I2S_PCM_RX"),
+ MTK_FUNCTION(3, "I2SIN_DAT"),
+ MTK_FUNCTION(4, "PCM0_DI"),
+ MTK_FUNCTION(5, "DSP2_ID"),
+ MTK_FUNCTION(6, "MFG_TEST_CK"),
+ MTK_FUNCTION(7, "USB_SDA")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(189, "MRG_I2S_PCM_TX"),
+ "G5", "mt8135",
+ MTK_EINT_FUNCTION(2, 17),
+ MTK_FUNCTION(0, "GPIO189"),
+ MTK_FUNCTION(1, "MRG_I2S_PCM_TX"),
+ MTK_FUNCTION(2, "EINT17"),
+ MTK_FUNCTION(3, "I2SOUT_DAT"),
+ MTK_FUNCTION(4, "PCM0_DO"),
+ MTK_FUNCTION(6, "VDEC_TEST_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(190, "SRCLKENAI"),
+ "K5", "mt8135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO190"),
+ MTK_FUNCTION(1, "SRCLKENAI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(191, "URXD3"),
+ "C3", "mt8135",
+ MTK_EINT_FUNCTION(2, 87),
+ MTK_FUNCTION(0, "GPIO191"),
+ MTK_FUNCTION(1, "URXD3"),
+ MTK_FUNCTION(2, "EINT87"),
+ MTK_FUNCTION(3, "UTXD3"),
+ MTK_FUNCTION(5, "TS_AUX_ST"),
+ MTK_FUNCTION(6, "PWM4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(192, "UTXD3"),
+ "B2", "mt8135",
+ MTK_EINT_FUNCTION(2, 86),
+ MTK_FUNCTION(0, "GPIO192"),
+ MTK_FUNCTION(1, "UTXD3"),
+ MTK_FUNCTION(2, "EINT86"),
+ MTK_FUNCTION(3, "URXD3"),
+ MTK_FUNCTION(5, "TS_AUX_CS_B"),
+ MTK_FUNCTION(6, "PWM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(193, "SDA2"),
+ "G2", "mt8135",
+ MTK_EINT_FUNCTION(2, 95),
+ MTK_FUNCTION(0, "GPIO193"),
+ MTK_FUNCTION(1, "SDA2"),
+ MTK_FUNCTION(2, "EINT95"),
+ MTK_FUNCTION(3, "CLKM5"),
+ MTK_FUNCTION(4, "PWM5"),
+ MTK_FUNCTION(5, "TS_AUX_PWDB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(194, "SCL2"),
+ "F4", "mt8135",
+ MTK_EINT_FUNCTION(2, 94),
+ MTK_FUNCTION(0, "GPIO194"),
+ MTK_FUNCTION(1, "SCL2"),
+ MTK_FUNCTION(2, "EINT94"),
+ MTK_FUNCTION(3, "CLKM4"),
+ MTK_FUNCTION(4, "PWM4"),
+ MTK_FUNCTION(5, "TS_AUXADC_TEST_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(195, "SDA1"),
+ "F2", "mt8135",
+ MTK_EINT_FUNCTION(2, 93),
+ MTK_FUNCTION(0, "GPIO195"),
+ MTK_FUNCTION(1, "SDA1"),
+ MTK_FUNCTION(2, "EINT93"),
+ MTK_FUNCTION(3, "CLKM3"),
+ MTK_FUNCTION(4, "PWM3"),
+ MTK_FUNCTION(5, "TS_AUX_SCLK_PWDB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(196, "SCL1"),
+ "F3", "mt8135",
+ MTK_EINT_FUNCTION(2, 92),
+ MTK_FUNCTION(0, "GPIO196"),
+ MTK_FUNCTION(1, "SCL1"),
+ MTK_FUNCTION(2, "EINT92"),
+ MTK_FUNCTION(3, "CLKM2"),
+ MTK_FUNCTION(4, "PWM2"),
+ MTK_FUNCTION(5, "TS_AUX_DIN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(197, "MSDC3_DAT2"),
+ "E1", "mt8135",
+ MTK_EINT_FUNCTION(2, 71),
+ MTK_FUNCTION(0, "GPIO197"),
+ MTK_FUNCTION(1, "MSDC3_DAT2"),
+ MTK_FUNCTION(2, "EINT71"),
+ MTK_FUNCTION(3, "SCL6"),
+ MTK_FUNCTION(4, "PWM5"),
+ MTK_FUNCTION(5, "CLKM4"),
+ MTK_FUNCTION(6, "MFG_TEST_CK_2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(198, "MSDC3_DAT3"),
+ "C2", "mt8135",
+ MTK_EINT_FUNCTION(2, 72),
+ MTK_FUNCTION(0, "GPIO198"),
+ MTK_FUNCTION(1, "MSDC3_DAT3"),
+ MTK_FUNCTION(2, "EINT72"),
+ MTK_FUNCTION(3, "SDA6"),
+ MTK_FUNCTION(4, "PWM6"),
+ MTK_FUNCTION(5, "CLKM5"),
+ MTK_FUNCTION(6, "MFG_TEST_CK_3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(199, "MSDC3_CMD"),
+ "D2", "mt8135",
+ MTK_EINT_FUNCTION(2, 68),
+ MTK_FUNCTION(0, "GPIO199"),
+ MTK_FUNCTION(1, "MSDC3_CMD"),
+ MTK_FUNCTION(2, "EINT68"),
+ MTK_FUNCTION(3, "SDA2"),
+ MTK_FUNCTION(4, "PWM2"),
+ MTK_FUNCTION(5, "CLKM1"),
+ MTK_FUNCTION(6, "MFG_TEST_CK_4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(200, "MSDC3_CLK"),
+ "E2", "mt8135",
+ MTK_EINT_FUNCTION(2, 67),
+ MTK_FUNCTION(0, "GPIO200"),
+ MTK_FUNCTION(1, "MSDC3_CLK"),
+ MTK_FUNCTION(2, "EINT67"),
+ MTK_FUNCTION(3, "SCL2"),
+ MTK_FUNCTION(4, "PWM1"),
+ MTK_FUNCTION(5, "CLKM0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(201, "MSDC3_DAT1"),
+ "D3", "mt8135",
+ MTK_EINT_FUNCTION(2, 70),
+ MTK_FUNCTION(0, "GPIO201"),
+ MTK_FUNCTION(1, "MSDC3_DAT1"),
+ MTK_FUNCTION(2, "EINT70"),
+ MTK_FUNCTION(3, "SDA3"),
+ MTK_FUNCTION(4, "PWM4"),
+ MTK_FUNCTION(5, "CLKM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(202, "MSDC3_DAT0"),
+ "E3", "mt8135",
+ MTK_EINT_FUNCTION(2, 69),
+ MTK_FUNCTION(0, "GPIO202"),
+ MTK_FUNCTION(1, "MSDC3_DAT0"),
+ MTK_FUNCTION(2, "EINT69"),
+ MTK_FUNCTION(3, "SCL3"),
+ MTK_FUNCTION(4, "PWM3"),
+ MTK_FUNCTION(5, "CLKM2")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT8135_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h
new file mode 100644
index 000000000000..13e5b68bfe1b
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_MTK_MT8173_H
+#define __PINCTRL_MTK_MT8173_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt8173[] = {
+ MTK_PIN(
+ PINCTRL_PIN(0, "EINT0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 0),
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "IRDA_PDN"),
+ MTK_FUNCTION(2, "I2S1_WS"),
+ MTK_FUNCTION(3, "AUD_SPDIF"),
+ MTK_FUNCTION(4, "UTXD0"),
+ MTK_FUNCTION(7, "DBG_MON_A_20_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(1, "EINT1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 1),
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "IRDA_RXD"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(3, "SDA5"),
+ MTK_FUNCTION(4, "URXD0"),
+ MTK_FUNCTION(7, "DBG_MON_A_21_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(2, "EINT2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 2),
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "IRDA_TXD"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(3, "SCL5"),
+ MTK_FUNCTION(4, "UTXD3"),
+ MTK_FUNCTION(7, "DBG_MON_A_22_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(3, "EINT3"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 3),
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "DSI1_TE"),
+ MTK_FUNCTION(2, "I2S1_DO_1"),
+ MTK_FUNCTION(3, "SDA3"),
+ MTK_FUNCTION(4, "URXD3"),
+ MTK_FUNCTION(7, "DBG_MON_A_23_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(4, "EINT4"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 4),
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "DISP_PWM1"),
+ MTK_FUNCTION(2, "I2S1_DO_2"),
+ MTK_FUNCTION(3, "SCL3"),
+ MTK_FUNCTION(4, "UCTS3"),
+ MTK_FUNCTION(6, "SFWP_B")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(5, "EINT5"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 5),
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "PCM1_CLK"),
+ MTK_FUNCTION(2, "I2S2_WS"),
+ MTK_FUNCTION(3, "SPI_CK_3_"),
+ MTK_FUNCTION(4, "URTS3"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TMS"),
+ MTK_FUNCTION(6, "SFOUT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(6, "EINT6"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 6),
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "PCM1_SYNC"),
+ MTK_FUNCTION(2, "I2S2_BCK"),
+ MTK_FUNCTION(3, "SPI_MI_3_"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TCK"),
+ MTK_FUNCTION(6, "SFCS0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(7, "EINT7"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 7),
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "PCM1_DI"),
+ MTK_FUNCTION(2, "I2S2_DI_1"),
+ MTK_FUNCTION(3, "SPI_MO_3_"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TDI"),
+ MTK_FUNCTION(6, "SFHOLD")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(8, "EINT8"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 8),
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "PCM1_DO"),
+ MTK_FUNCTION(2, "I2S2_DI_2"),
+ MTK_FUNCTION(3, "SPI_CS_3_"),
+ MTK_FUNCTION(4, "AUD_SPDIF"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TDO"),
+ MTK_FUNCTION(6, "SFIN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(9, "EINT9"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 9),
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_P0"),
+ MTK_FUNCTION(2, "I2S2_MCK"),
+ MTK_FUNCTION(4, "USB_DRVVBUS_P1"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TRST"),
+ MTK_FUNCTION(6, "SFCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(10, "EINT10"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 10),
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "CLKM0"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(3, "DISP_PWM1"),
+ MTK_FUNCTION(4, "PWM4"),
+ MTK_FUNCTION(5, "IRDA_RXD")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(11, "EINT11"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 11),
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "CLKM1"),
+ MTK_FUNCTION(2, "I2S3_WS"),
+ MTK_FUNCTION(3, "USB_DRVVBUS_P0"),
+ MTK_FUNCTION(4, "PWM5"),
+ MTK_FUNCTION(5, "IRDA_TXD"),
+ MTK_FUNCTION(6, "USB_DRVVBUS_P1"),
+ MTK_FUNCTION(7, "DBG_MON_B_30_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(12, "EINT12"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 12),
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "CLKM2"),
+ MTK_FUNCTION(2, "I2S3_BCK"),
+ MTK_FUNCTION(3, "SRCLKENA0"),
+ MTK_FUNCTION(5, "I2S2_WS"),
+ MTK_FUNCTION(7, "DBG_MON_B_32_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(13, "EINT13"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 13),
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "CLKM3"),
+ MTK_FUNCTION(2, "I2S3_MCK"),
+ MTK_FUNCTION(3, "SRCLKENA0"),
+ MTK_FUNCTION(5, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A_32_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(14, "EINT14"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 14),
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "CMDAT0"),
+ MTK_FUNCTION(2, "CMCSD0"),
+ MTK_FUNCTION(4, "CLKM2"),
+ MTK_FUNCTION(7, "DBG_MON_B_6_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(15, "EINT15"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 15),
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "CMDAT1"),
+ MTK_FUNCTION(2, "CMCSD1"),
+ MTK_FUNCTION(3, "CMFLASH"),
+ MTK_FUNCTION(4, "CLKM3"),
+ MTK_FUNCTION(7, "DBG_MON_B_29_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(16, "IDDIG"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 16),
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "CMFLASH"),
+ MTK_FUNCTION(4, "PWM5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(17, "WATCHDOG"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 17),
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "WATCHDOG_AO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(18, "CEC"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 18),
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "CEC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(19, "HDMISCK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 19),
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "HDMISCK"),
+ MTK_FUNCTION(2, "HDCP_SCL")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(20, "HDMISD"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 20),
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "HDMISD"),
+ MTK_FUNCTION(2, "HDCP_SDA")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(21, "HTPLG"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 21),
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "HTPLG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(22, "MSDC3_DAT0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 22),
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "MSDC3_DAT0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(23, "MSDC3_DAT1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 23),
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "MSDC3_DAT1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(24, "MSDC3_DAT2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 24),
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "MSDC3_DAT2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(25, "MSDC3_DAT3"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 25),
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "MSDC3_DAT3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(26, "MSDC3_CLK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 26),
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "MSDC3_CLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(27, "MSDC3_CMD"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 27),
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "MSDC3_CMD")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(28, "MSDC3_DSL"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 28),
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "MSDC3_DSL")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(29, "UCTS2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 29),
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "UCTS2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(30, "URTS2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 30),
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "URTS2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(31, "URXD2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 31),
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "URXD2"),
+ MTK_FUNCTION(2, "UTXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(32, "UTXD2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 32),
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "UTXD2"),
+ MTK_FUNCTION(2, "URXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(33, "DAICLK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 33),
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, " MRG_CLK"),
+ MTK_FUNCTION(2, "PCM0_CLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(34, "DAIPCMIN"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 34),
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, " MRG_DI"),
+ MTK_FUNCTION(2, "PCM0_DI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(35, "DAIPCMOUT"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 35),
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, " MRG_DO"),
+ MTK_FUNCTION(2, "PCM0_DO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(36, "DAISYNC"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 36),
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, " MRG_SYNC"),
+ MTK_FUNCTION(2, "PCM0_SYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(37, "EINT16"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 37),
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_P0"),
+ MTK_FUNCTION(2, "USB_DRVVBUS_P1"),
+ MTK_FUNCTION(3, "PWM0"),
+ MTK_FUNCTION(4, "PWM1"),
+ MTK_FUNCTION(5, "PWM2"),
+ MTK_FUNCTION(6, "CLKM0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(38, "CONN_RST"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 38),
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_P0"),
+ MTK_FUNCTION(2, "USB_DRVVBUS_P1"),
+ MTK_FUNCTION(6, "CLKM1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(39, "CM2MCLK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 39),
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "CM2MCLK"),
+ MTK_FUNCTION(2, "CMCSD0"),
+ MTK_FUNCTION(7, "DBG_MON_A_17_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(40, "CMPCLK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 40),
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "CMPCLK"),
+ MTK_FUNCTION(2, "CMCSK"),
+ MTK_FUNCTION(3, "CMCSD2"),
+ MTK_FUNCTION(7, "DBG_MON_A_18_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(41, "CMMCLK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 41),
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "CMMCLK"),
+ MTK_FUNCTION(7, "DBG_MON_A_19_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(42, "DSI_TE"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 42),
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "DSI_TE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(43, "SDA2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 43),
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "SDA2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(44, "SCL2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 44),
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "SCL2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(45, "SDA0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 45),
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "SDA0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(46, "SCL0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 46),
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "SCL0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(47, "RDN0_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 47),
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "CMDAT2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(48, "RDP0_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 48),
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "CMDAT3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(49, "RDN1_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 49),
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "CMDAT4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(50, "RDP1_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 50),
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "CMDAT5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(51, "RCN_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 51),
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "CMDAT6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(52, "RCP_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 52),
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "CMDAT7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(53, "RDN2_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 53),
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "CMDAT8"),
+ MTK_FUNCTION(2, "CMCSD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(54, "RDP2_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 54),
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "CMDAT9"),
+ MTK_FUNCTION(2, "CMCSD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(55, "RDN3_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 55),
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "CMHSYNC"),
+ MTK_FUNCTION(2, "CMCSD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(56, "RDP3_A"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 56),
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "CMVSYNC"),
+ MTK_FUNCTION(2, "CMCSD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(57, "MSDC0_DAT0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 57),
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "MSDC0_DAT0"),
+ MTK_FUNCTION(2, "I2S1_WS"),
+ MTK_FUNCTION(7, "DBG_MON_B_7_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(58, "MSDC0_DAT1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 58),
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "MSDC0_DAT1"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B_8_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(59, "MSDC0_DAT2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 59),
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "MSDC0_DAT2"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B_9_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(60, "MSDC0_DAT3"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 60),
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "MSDC0_DAT3"),
+ MTK_FUNCTION(2, "I2S1_DO_1"),
+ MTK_FUNCTION(7, "DBG_MON_B_10_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(61, "MSDC0_DAT4"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 61),
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "MSDC0_DAT4"),
+ MTK_FUNCTION(2, "I2S1_DO_2"),
+ MTK_FUNCTION(7, "DBG_MON_B_11_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(62, "MSDC0_DAT5"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 62),
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "MSDC0_DAT5"),
+ MTK_FUNCTION(2, "I2S2_WS"),
+ MTK_FUNCTION(7, "DBG_MON_B_12_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(63, "MSDC0_DAT6"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 63),
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "MSDC0_DAT6"),
+ MTK_FUNCTION(2, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B_13_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(64, "MSDC0_DAT7"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 64),
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "MSDC0_DAT7"),
+ MTK_FUNCTION(2, "I2S2_DI_1"),
+ MTK_FUNCTION(7, "DBG_MON_B_14_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(65, "MSDC0_CLK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 65),
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_B_16_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(66, "MSDC0_CMD"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 66),
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "MSDC0_CMD"),
+ MTK_FUNCTION(2, "I2S2_DI_2"),
+ MTK_FUNCTION(7, "DBG_MON_B_15_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(67, "MSDC0_DSL"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 67),
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "MSDC0_DSL"),
+ MTK_FUNCTION(7, "DBG_MON_B_17_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(68, "MSDC0_RST_"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 68),
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "MSDC0_RSTB"),
+ MTK_FUNCTION(2, "I2S2_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B_18_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(69, "SPI_CK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 69),
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "SPI_CK_0_"),
+ MTK_FUNCTION(2, "I2S3_DO_1"),
+ MTK_FUNCTION(3, "PWM0"),
+ MTK_FUNCTION(4, "PWM5"),
+ MTK_FUNCTION(5, "I2S2_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B_19_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(70, "SPI_MI"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 70),
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "SPI_MI_0_"),
+ MTK_FUNCTION(2, "I2S3_DO_2"),
+ MTK_FUNCTION(3, "PWM1"),
+ MTK_FUNCTION(4, "SPI_MO_0_"),
+ MTK_FUNCTION(5, "I2S2_DI_1"),
+ MTK_FUNCTION(6, "DSI1_TE"),
+ MTK_FUNCTION(7, "DBG_MON_B_20_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(71, "SPI_MO"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 71),
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "SPI_MO_0_"),
+ MTK_FUNCTION(2, "I2S3_DO_3"),
+ MTK_FUNCTION(3, "PWM2"),
+ MTK_FUNCTION(4, "SPI_MI_0_"),
+ MTK_FUNCTION(5, "I2S2_DI_2"),
+ MTK_FUNCTION(7, "DBG_MON_B_21_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(72, "SPI_CS"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 72),
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "SPI_CS_0_"),
+ MTK_FUNCTION(2, "I2S3_DO_4"),
+ MTK_FUNCTION(3, "PWM3"),
+ MTK_FUNCTION(4, "PWM6"),
+ MTK_FUNCTION(5, "DISP_PWM1"),
+ MTK_FUNCTION(7, "DBG_MON_B_22_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(73, "MSDC1_DAT0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 73),
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(7, "DBG_MON_B_24_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(74, "MSDC1_DAT1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 74),
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(7, "DBG_MON_B_25_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(75, "MSDC1_DAT2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 75),
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(7, "DBG_MON_B_26_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(76, "MSDC1_DAT3"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 76),
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(7, "DBG_MON_B_27_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(77, "MSDC1_CLK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 77),
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_B_28_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(78, "MSDC1_CMD"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 78),
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(7, "DBG_MON_B_23_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(79, "PWRAP_SPI0_MI"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 79),
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "PWRAP_SPIMI"),
+ MTK_FUNCTION(2, "PWRAP_SPIMO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(80, "PWRAP_SPI0_MO"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 80),
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "PWRAP_SPIMO"),
+ MTK_FUNCTION(2, "PWRAP_SPIMI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(81, "PWRAP_SPI0_CK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 81),
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "PWRAP_SPICK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(82, "PWRAP_SPI0_CSN"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 82),
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "PWRAP_SPICS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(83, "AUD_CLK_MOSI"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 83),
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(84, "AUD_DAT_MISO"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 84),
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO"),
+ MTK_FUNCTION(2, "AUD_DAT_MOSI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(85, "AUD_DAT_MOSI"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 85),
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI"),
+ MTK_FUNCTION(2, "AUD_DAT_MISO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(86, "RTC32K_CK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 86),
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(87, "DISP_PWM0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 87),
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "DISP_PWM0"),
+ MTK_FUNCTION(2, "DISP_PWM1"),
+ MTK_FUNCTION(7, "DBG_MON_B_31_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(88, "SRCLKENAI"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 88),
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "SRCLKENAI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(89, "SRCLKENAI2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 89),
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "SRCLKENAI2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(90, "SRCLKENA0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 90),
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(91, "SRCLKENA1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 91),
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "SRCLKENA1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(92, "PCM_CLK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 92),
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "PCM1_CLK"),
+ MTK_FUNCTION(2, "I2S0_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A_24_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(93, "PCM_SYNC"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 93),
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "PCM1_SYNC"),
+ MTK_FUNCTION(2, "I2S0_WS"),
+ MTK_FUNCTION(7, "DBG_MON_A_25_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(94, "PCM_RX"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 94),
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "PCM1_DI"),
+ MTK_FUNCTION(2, "I2S0_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A_26_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(95, "PCM_TX"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 95),
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "PCM1_DO"),
+ MTK_FUNCTION(2, "I2S0_DO"),
+ MTK_FUNCTION(7, "DBG_MON_A_27_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(96, "URXD1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 96),
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A_28_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(97, "UTXD1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 97),
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A_29_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(98, "URTS1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 98),
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "URTS1"),
+ MTK_FUNCTION(2, "UCTS1"),
+ MTK_FUNCTION(7, "DBG_MON_A_30_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(99, "UCTS1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 99),
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "UCTS1"),
+ MTK_FUNCTION(2, "URTS1"),
+ MTK_FUNCTION(7, "DBG_MON_A_31_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(100, "MSDC2_DAT0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 100),
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(3, "USB_DRVVBUS_P0"),
+ MTK_FUNCTION(4, "SDA5"),
+ MTK_FUNCTION(5, "USB_DRVVBUS_P1"),
+ MTK_FUNCTION(7, "DBG_MON_B_0_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(101, "MSDC2_DAT1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 101),
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(3, "AUD_SPDIF"),
+ MTK_FUNCTION(4, "SCL5"),
+ MTK_FUNCTION(7, "DBG_MON_B_1_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(102, "MSDC2_DAT2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 102),
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(3, "UTXD0"),
+ MTK_FUNCTION(5, "PWM0"),
+ MTK_FUNCTION(6, "SPI_CK_1_"),
+ MTK_FUNCTION(7, "DBG_MON_B_2_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(103, "MSDC2_DAT3"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 103),
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(3, "URXD0"),
+ MTK_FUNCTION(5, "PWM1"),
+ MTK_FUNCTION(6, "SPI_MI_1_"),
+ MTK_FUNCTION(7, "DBG_MON_B_3_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(104, "MSDC2_CLK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 104),
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(3, "UTXD3"),
+ MTK_FUNCTION(4, "SDA3"),
+ MTK_FUNCTION(5, "PWM2"),
+ MTK_FUNCTION(6, "SPI_MO_1_"),
+ MTK_FUNCTION(7, "DBG_MON_B_4_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(105, "MSDC2_CMD"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 105),
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(3, "URXD3"),
+ MTK_FUNCTION(4, "SCL3"),
+ MTK_FUNCTION(5, "PWM3"),
+ MTK_FUNCTION(6, "SPI_CS_1_"),
+ MTK_FUNCTION(7, "DBG_MON_B_5_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(106, "SDA3"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 106),
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "SDA3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(107, "SCL3"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 107),
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "SCL3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(108, "JTMS"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 108),
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "JTMS"),
+ MTK_FUNCTION(2, " MFG_JTAG_TMS"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TMS"),
+ MTK_FUNCTION(6, "DFD_TMS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(109, "JTCK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 109),
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "JTCK"),
+ MTK_FUNCTION(2, " MFG_JTAG_TCK"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TCK"),
+ MTK_FUNCTION(6, "DFD_TCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(110, "JTDI"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 110),
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "JTDI"),
+ MTK_FUNCTION(2, " MFG_JTAG_TDI"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TDI"),
+ MTK_FUNCTION(6, "DFD_TDI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(111, "JTDO"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 111),
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "JTDO"),
+ MTK_FUNCTION(2, "MFG_JTAG_TDO"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TDO"),
+ MTK_FUNCTION(6, "DFD_TDO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(112, "JTRST_B"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 112),
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "JTRST_B"),
+ MTK_FUNCTION(2, " MFG_JTAG_TRSTN"),
+ MTK_FUNCTION(5, "AP_MD32_JTAG_TRST"),
+ MTK_FUNCTION(6, "DFD_NTRST")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(113, "URXD0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 113),
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "UTXD0"),
+ MTK_FUNCTION(6, "I2S2_WS"),
+ MTK_FUNCTION(7, "DBG_MON_A_0_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(114, "UTXD0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 114),
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "URXD0"),
+ MTK_FUNCTION(6, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A_1_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(115, "URTS0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 115),
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "URTS0"),
+ MTK_FUNCTION(2, "UCTS0"),
+ MTK_FUNCTION(6, "I2S2_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A_2_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(116, "UCTS0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 116),
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "UCTS0"),
+ MTK_FUNCTION(2, "URTS0"),
+ MTK_FUNCTION(6, "I2S2_DI_1"),
+ MTK_FUNCTION(7, "DBG_MON_A_3_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(117, "URXD3"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 117),
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "URXD3"),
+ MTK_FUNCTION(2, "UTXD3"),
+ MTK_FUNCTION(7, "DBG_MON_A_9_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(118, "UTXD3"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 118),
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "UTXD3"),
+ MTK_FUNCTION(2, "URXD3"),
+ MTK_FUNCTION(7, "DBG_MON_A_10_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(119, "KPROW0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 119),
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "KROW0"),
+ MTK_FUNCTION(7, "DBG_MON_A_11_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(120, "KPROW1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 120),
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "KROW1"),
+ MTK_FUNCTION(3, "PWM6"),
+ MTK_FUNCTION(7, "DBG_MON_A_12_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(121, "KPROW2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 121),
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "KROW2"),
+ MTK_FUNCTION(2, "IRDA_PDN"),
+ MTK_FUNCTION(3, "USB_DRVVBUS_P0"),
+ MTK_FUNCTION(4, "PWM4"),
+ MTK_FUNCTION(5, "USB_DRVVBUS_P1"),
+ MTK_FUNCTION(7, "DBG_MON_A_13_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(122, "KPCOL0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 122),
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "KCOL0"),
+ MTK_FUNCTION(7, "DBG_MON_A_14_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(123, "KPCOL1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 123),
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "KCOL1"),
+ MTK_FUNCTION(2, "IRDA_RXD"),
+ MTK_FUNCTION(3, "PWM5"),
+ MTK_FUNCTION(7, "DBG_MON_A_15_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(124, "KPCOL2"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 124),
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "KCOL2"),
+ MTK_FUNCTION(2, "IRDA_TXD"),
+ MTK_FUNCTION(3, "USB_DRVVBUS_P0"),
+ MTK_FUNCTION(4, "PWM3"),
+ MTK_FUNCTION(5, "USB_DRVVBUS_P1"),
+ MTK_FUNCTION(7, "DBG_MON_A_16_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(125, "SDA1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 125),
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "SDA1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(126, "SCL1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 126),
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "SCL1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(127, "LCM_RST"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 127),
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "LCM_RST")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(128, "I2S0_LRCK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 128),
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "I2S0_WS"),
+ MTK_FUNCTION(2, "I2S1_WS"),
+ MTK_FUNCTION(3, "I2S2_WS"),
+ MTK_FUNCTION(5, "SPI_CK_2_"),
+ MTK_FUNCTION(7, "DBG_MON_A_4_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(129, "I2S0_BCK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 129),
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "I2S0_BCK"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(5, "SPI_MI_2_"),
+ MTK_FUNCTION(7, "DBG_MON_A_5_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(130, "I2S0_MCK"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 130),
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "I2S0_MCK"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(3, "I2S2_MCK"),
+ MTK_FUNCTION(5, "SPI_MO_2_"),
+ MTK_FUNCTION(7, "DBG_MON_A_6_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(131, "I2S0_DATA0"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 131),
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "I2S0_DO"),
+ MTK_FUNCTION(2, "I2S1_DO_1"),
+ MTK_FUNCTION(3, "I2S2_DI_1"),
+ MTK_FUNCTION(5, "SPI_CS_2_"),
+ MTK_FUNCTION(7, "DBG_MON_A_7_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(132, "I2S0_DATA1"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 132),
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "I2S0_DI"),
+ MTK_FUNCTION(2, "I2S1_DO_2"),
+ MTK_FUNCTION(3, "I2S2_DI_2"),
+ MTK_FUNCTION(7, "DBG_MON_A_8_")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(133, "SDA4"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 133),
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "SDA4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(134, "SCL4"),
+ NULL, "mt8173",
+ MTK_EINT_FUNCTION(0, 134),
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "SCL4")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT8173_H */
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index eafc216067a4..c751d22fdf29 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -1,2 +1,2 @@
-obj-y += pinctrl-meson8.o
+obj-y += pinctrl-meson8.o pinctrl-meson8b.o
obj-y += pinctrl-meson.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index a2bf49ce16e7..edcd140e0899 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -13,8 +13,9 @@
/*
* The available pins are organized in banks (A,B,C,D,E,X,Y,Z,AO,
- * BOOT,CARD for meson6 and X,Y,DV,H,Z,AO,BOOT,CARD for meson8) and
- * each bank has a variable number of pins.
+ * BOOT,CARD for meson6, X,Y,DV,H,Z,AO,BOOT,CARD for meson8 and
+ * X,Y,DV,H,AO,BOOT,CARD,DIF for meson8b) and each bank has a
+ * variable number of pins.
*
* The AO bank is special because it belongs to the Always-On power
* domain which can't be powered off; the bank also uses a set of
@@ -544,6 +545,10 @@ static const struct of_device_id meson_pinctrl_dt_match[] = {
.compatible = "amlogic,meson8-pinctrl",
.data = &meson8_pinctrl_data,
},
+ {
+ .compatible = "amlogic,meson8b-pinctrl",
+ .data = &meson8b_pinctrl_data,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, meson_pinctrl_dt_match);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index bfea8adc7953..0fe7d53849ce 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -155,6 +155,8 @@ struct meson_pinctrl {
struct meson_domain *domains;
};
+#define PIN(x, b) (b + x)
+
#define GROUP(grp, r, b) \
{ \
.name = #grp, \
@@ -165,10 +167,10 @@ struct meson_pinctrl {
.domain = 0, \
}
-#define GPIO_GROUP(gpio) \
+#define GPIO_GROUP(gpio, b) \
{ \
.name = #gpio, \
- .pins = (const unsigned int[]){ PIN_ ## gpio}, \
+ .pins = (const unsigned int[]){ PIN(gpio, b) }, \
.num_pins = 1, \
.is_gpio = true, \
}
@@ -204,6 +206,7 @@ struct meson_pinctrl {
}, \
}
-#define MESON_PIN(x) PINCTRL_PIN(PIN_ ## x, #x)
+#define MESON_PIN(x, b) PINCTRL_PIN(PIN(x, b), #x)
extern struct meson_pinctrl_data meson8_pinctrl_data;
+extern struct meson_pinctrl_data meson8b_pinctrl_data;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index f8aa3a281767..7b1cc91733ef 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -14,620 +14,482 @@
#include <dt-bindings/gpio/meson8-gpio.h>
#include "pinctrl-meson.h"
-#define AO_OFFSET 120
-
-#define PIN_GPIOX_0 GPIOX_0
-#define PIN_GPIOX_1 GPIOX_1
-#define PIN_GPIOX_2 GPIOX_2
-#define PIN_GPIOX_3 GPIOX_3
-#define PIN_GPIOX_4 GPIOX_4
-#define PIN_GPIOX_5 GPIOX_5
-#define PIN_GPIOX_6 GPIOX_6
-#define PIN_GPIOX_7 GPIOX_7
-#define PIN_GPIOX_8 GPIOX_8
-#define PIN_GPIOX_9 GPIOX_9
-#define PIN_GPIOX_10 GPIOX_10
-#define PIN_GPIOX_11 GPIOX_11
-#define PIN_GPIOX_12 GPIOX_12
-#define PIN_GPIOX_13 GPIOX_13
-#define PIN_GPIOX_14 GPIOX_14
-#define PIN_GPIOX_15 GPIOX_15
-#define PIN_GPIOX_16 GPIOX_16
-#define PIN_GPIOX_17 GPIOX_17
-#define PIN_GPIOX_18 GPIOX_18
-#define PIN_GPIOX_19 GPIOX_19
-#define PIN_GPIOX_20 GPIOX_20
-#define PIN_GPIOX_21 GPIOX_21
-#define PIN_GPIOY_0 GPIOY_0
-#define PIN_GPIOY_1 GPIOY_1
-#define PIN_GPIOY_2 GPIOY_2
-#define PIN_GPIOY_3 GPIOY_3
-#define PIN_GPIOY_4 GPIOY_4
-#define PIN_GPIOY_5 GPIOY_5
-#define PIN_GPIOY_6 GPIOY_6
-#define PIN_GPIOY_7 GPIOY_7
-#define PIN_GPIOY_8 GPIOY_8
-#define PIN_GPIOY_9 GPIOY_9
-#define PIN_GPIOY_10 GPIOY_10
-#define PIN_GPIOY_11 GPIOY_11
-#define PIN_GPIOY_12 GPIOY_12
-#define PIN_GPIOY_13 GPIOY_13
-#define PIN_GPIOY_14 GPIOY_14
-#define PIN_GPIOY_15 GPIOY_15
-#define PIN_GPIOY_16 GPIOY_16
-#define PIN_GPIODV_0 GPIODV_0
-#define PIN_GPIODV_1 GPIODV_1
-#define PIN_GPIODV_2 GPIODV_2
-#define PIN_GPIODV_3 GPIODV_3
-#define PIN_GPIODV_4 GPIODV_4
-#define PIN_GPIODV_5 GPIODV_5
-#define PIN_GPIODV_6 GPIODV_6
-#define PIN_GPIODV_7 GPIODV_7
-#define PIN_GPIODV_8 GPIODV_8
-#define PIN_GPIODV_9 GPIODV_9
-#define PIN_GPIODV_10 GPIODV_10
-#define PIN_GPIODV_11 GPIODV_11
-#define PIN_GPIODV_12 GPIODV_12
-#define PIN_GPIODV_13 GPIODV_13
-#define PIN_GPIODV_14 GPIODV_14
-#define PIN_GPIODV_15 GPIODV_15
-#define PIN_GPIODV_16 GPIODV_16
-#define PIN_GPIODV_17 GPIODV_17
-#define PIN_GPIODV_18 GPIODV_18
-#define PIN_GPIODV_19 GPIODV_19
-#define PIN_GPIODV_20 GPIODV_20
-#define PIN_GPIODV_21 GPIODV_21
-#define PIN_GPIODV_22 GPIODV_22
-#define PIN_GPIODV_23 GPIODV_23
-#define PIN_GPIODV_24 GPIODV_24
-#define PIN_GPIODV_25 GPIODV_25
-#define PIN_GPIODV_26 GPIODV_26
-#define PIN_GPIODV_27 GPIODV_27
-#define PIN_GPIODV_28 GPIODV_28
-#define PIN_GPIODV_29 GPIODV_29
-#define PIN_GPIOH_0 GPIOH_0
-#define PIN_GPIOH_1 GPIOH_1
-#define PIN_GPIOH_2 GPIOH_2
-#define PIN_GPIOH_3 GPIOH_3
-#define PIN_GPIOH_4 GPIOH_4
-#define PIN_GPIOH_5 GPIOH_5
-#define PIN_GPIOH_6 GPIOH_6
-#define PIN_GPIOH_7 GPIOH_7
-#define PIN_GPIOH_8 GPIOH_8
-#define PIN_GPIOH_9 GPIOH_9
-#define PIN_GPIOZ_0 GPIOZ_0
-#define PIN_GPIOZ_1 GPIOZ_1
-#define PIN_GPIOZ_2 GPIOZ_2
-#define PIN_GPIOZ_3 GPIOZ_3
-#define PIN_GPIOZ_4 GPIOZ_4
-#define PIN_GPIOZ_5 GPIOZ_5
-#define PIN_GPIOZ_6 GPIOZ_6
-#define PIN_GPIOZ_7 GPIOZ_7
-#define PIN_GPIOZ_8 GPIOZ_8
-#define PIN_GPIOZ_9 GPIOZ_9
-#define PIN_GPIOZ_10 GPIOZ_10
-#define PIN_GPIOZ_11 GPIOZ_11
-#define PIN_GPIOZ_12 GPIOZ_12
-#define PIN_GPIOZ_13 GPIOZ_13
-#define PIN_GPIOZ_14 GPIOZ_14
-#define PIN_CARD_0 CARD_0
-#define PIN_CARD_1 CARD_1
-#define PIN_CARD_2 CARD_2
-#define PIN_CARD_3 CARD_3
-#define PIN_CARD_4 CARD_4
-#define PIN_CARD_5 CARD_5
-#define PIN_CARD_6 CARD_6
-#define PIN_BOOT_0 BOOT_0
-#define PIN_BOOT_1 BOOT_1
-#define PIN_BOOT_2 BOOT_2
-#define PIN_BOOT_3 BOOT_3
-#define PIN_BOOT_4 BOOT_4
-#define PIN_BOOT_5 BOOT_5
-#define PIN_BOOT_6 BOOT_6
-#define PIN_BOOT_7 BOOT_7
-#define PIN_BOOT_8 BOOT_8
-#define PIN_BOOT_9 BOOT_9
-#define PIN_BOOT_10 BOOT_10
-#define PIN_BOOT_11 BOOT_11
-#define PIN_BOOT_12 BOOT_12
-#define PIN_BOOT_13 BOOT_13
-#define PIN_BOOT_14 BOOT_14
-#define PIN_BOOT_15 BOOT_15
-#define PIN_BOOT_16 BOOT_16
-#define PIN_BOOT_17 BOOT_17
-#define PIN_BOOT_18 BOOT_18
-
-#define PIN_GPIOAO_0 (AO_OFFSET + GPIOAO_0)
-#define PIN_GPIOAO_1 (AO_OFFSET + GPIOAO_1)
-#define PIN_GPIOAO_2 (AO_OFFSET + GPIOAO_2)
-#define PIN_GPIOAO_3 (AO_OFFSET + GPIOAO_3)
-#define PIN_GPIOAO_4 (AO_OFFSET + GPIOAO_4)
-#define PIN_GPIOAO_5 (AO_OFFSET + GPIOAO_5)
-#define PIN_GPIOAO_6 (AO_OFFSET + GPIOAO_6)
-#define PIN_GPIOAO_7 (AO_OFFSET + GPIOAO_7)
-#define PIN_GPIOAO_8 (AO_OFFSET + GPIOAO_8)
-#define PIN_GPIOAO_9 (AO_OFFSET + GPIOAO_9)
-#define PIN_GPIOAO_10 (AO_OFFSET + GPIOAO_10)
-#define PIN_GPIOAO_11 (AO_OFFSET + GPIOAO_11)
-#define PIN_GPIOAO_12 (AO_OFFSET + GPIOAO_12)
-#define PIN_GPIOAO_13 (AO_OFFSET + GPIOAO_13)
-#define PIN_GPIO_BSD_EN (AO_OFFSET + GPIO_BSD_EN)
-#define PIN_GPIO_TEST_N (AO_OFFSET + GPIO_TEST_N)
+#define AO_OFF 120
static const struct pinctrl_pin_desc meson8_pins[] = {
- MESON_PIN(GPIOX_0),
- MESON_PIN(GPIOX_1),
- MESON_PIN(GPIOX_2),
- MESON_PIN(GPIOX_3),
- MESON_PIN(GPIOX_4),
- MESON_PIN(GPIOX_5),
- MESON_PIN(GPIOX_6),
- MESON_PIN(GPIOX_7),
- MESON_PIN(GPIOX_8),
- MESON_PIN(GPIOX_9),
- MESON_PIN(GPIOX_10),
- MESON_PIN(GPIOX_11),
- MESON_PIN(GPIOX_12),
- MESON_PIN(GPIOX_13),
- MESON_PIN(GPIOX_14),
- MESON_PIN(GPIOX_15),
- MESON_PIN(GPIOX_16),
- MESON_PIN(GPIOX_17),
- MESON_PIN(GPIOX_18),
- MESON_PIN(GPIOX_19),
- MESON_PIN(GPIOX_20),
- MESON_PIN(GPIOX_21),
- MESON_PIN(GPIOY_0),
- MESON_PIN(GPIOY_1),
- MESON_PIN(GPIOY_2),
- MESON_PIN(GPIOY_3),
- MESON_PIN(GPIOY_4),
- MESON_PIN(GPIOY_5),
- MESON_PIN(GPIOY_6),
- MESON_PIN(GPIOY_7),
- MESON_PIN(GPIOY_8),
- MESON_PIN(GPIOY_9),
- MESON_PIN(GPIOY_10),
- MESON_PIN(GPIOY_11),
- MESON_PIN(GPIOY_12),
- MESON_PIN(GPIOY_13),
- MESON_PIN(GPIOY_14),
- MESON_PIN(GPIOY_15),
- MESON_PIN(GPIOY_16),
- MESON_PIN(GPIODV_0),
- MESON_PIN(GPIODV_1),
- MESON_PIN(GPIODV_2),
- MESON_PIN(GPIODV_3),
- MESON_PIN(GPIODV_4),
- MESON_PIN(GPIODV_5),
- MESON_PIN(GPIODV_6),
- MESON_PIN(GPIODV_7),
- MESON_PIN(GPIODV_8),
- MESON_PIN(GPIODV_9),
- MESON_PIN(GPIODV_10),
- MESON_PIN(GPIODV_11),
- MESON_PIN(GPIODV_12),
- MESON_PIN(GPIODV_13),
- MESON_PIN(GPIODV_14),
- MESON_PIN(GPIODV_15),
- MESON_PIN(GPIODV_16),
- MESON_PIN(GPIODV_17),
- MESON_PIN(GPIODV_18),
- MESON_PIN(GPIODV_19),
- MESON_PIN(GPIODV_20),
- MESON_PIN(GPIODV_21),
- MESON_PIN(GPIODV_22),
- MESON_PIN(GPIODV_23),
- MESON_PIN(GPIODV_24),
- MESON_PIN(GPIODV_25),
- MESON_PIN(GPIODV_26),
- MESON_PIN(GPIODV_27),
- MESON_PIN(GPIODV_28),
- MESON_PIN(GPIODV_29),
- MESON_PIN(GPIOH_0),
- MESON_PIN(GPIOH_1),
- MESON_PIN(GPIOH_2),
- MESON_PIN(GPIOH_3),
- MESON_PIN(GPIOH_4),
- MESON_PIN(GPIOH_5),
- MESON_PIN(GPIOH_6),
- MESON_PIN(GPIOH_7),
- MESON_PIN(GPIOH_8),
- MESON_PIN(GPIOH_9),
- MESON_PIN(GPIOZ_0),
- MESON_PIN(GPIOZ_1),
- MESON_PIN(GPIOZ_2),
- MESON_PIN(GPIOZ_3),
- MESON_PIN(GPIOZ_4),
- MESON_PIN(GPIOZ_5),
- MESON_PIN(GPIOZ_6),
- MESON_PIN(GPIOZ_7),
- MESON_PIN(GPIOZ_8),
- MESON_PIN(GPIOZ_9),
- MESON_PIN(GPIOZ_10),
- MESON_PIN(GPIOZ_11),
- MESON_PIN(GPIOZ_12),
- MESON_PIN(GPIOZ_13),
- MESON_PIN(GPIOZ_14),
- MESON_PIN(CARD_0),
- MESON_PIN(CARD_1),
- MESON_PIN(CARD_2),
- MESON_PIN(CARD_3),
- MESON_PIN(CARD_4),
- MESON_PIN(CARD_5),
- MESON_PIN(CARD_6),
- MESON_PIN(BOOT_0),
- MESON_PIN(BOOT_1),
- MESON_PIN(BOOT_2),
- MESON_PIN(BOOT_3),
- MESON_PIN(BOOT_4),
- MESON_PIN(BOOT_5),
- MESON_PIN(BOOT_6),
- MESON_PIN(BOOT_7),
- MESON_PIN(BOOT_8),
- MESON_PIN(BOOT_9),
- MESON_PIN(BOOT_10),
- MESON_PIN(BOOT_11),
- MESON_PIN(BOOT_12),
- MESON_PIN(BOOT_13),
- MESON_PIN(BOOT_14),
- MESON_PIN(BOOT_15),
- MESON_PIN(BOOT_16),
- MESON_PIN(BOOT_17),
- MESON_PIN(BOOT_18),
- MESON_PIN(GPIOAO_0),
- MESON_PIN(GPIOAO_1),
- MESON_PIN(GPIOAO_2),
- MESON_PIN(GPIOAO_3),
- MESON_PIN(GPIOAO_4),
- MESON_PIN(GPIOAO_5),
- MESON_PIN(GPIOAO_6),
- MESON_PIN(GPIOAO_7),
- MESON_PIN(GPIOAO_8),
- MESON_PIN(GPIOAO_9),
- MESON_PIN(GPIOAO_10),
- MESON_PIN(GPIOAO_11),
- MESON_PIN(GPIOAO_12),
- MESON_PIN(GPIOAO_13),
- MESON_PIN(GPIO_BSD_EN),
- MESON_PIN(GPIO_TEST_N),
+ MESON_PIN(GPIOX_0, 0),
+ MESON_PIN(GPIOX_1, 0),
+ MESON_PIN(GPIOX_2, 0),
+ MESON_PIN(GPIOX_3, 0),
+ MESON_PIN(GPIOX_4, 0),
+ MESON_PIN(GPIOX_5, 0),
+ MESON_PIN(GPIOX_6, 0),
+ MESON_PIN(GPIOX_7, 0),
+ MESON_PIN(GPIOX_8, 0),
+ MESON_PIN(GPIOX_9, 0),
+ MESON_PIN(GPIOX_10, 0),
+ MESON_PIN(GPIOX_11, 0),
+ MESON_PIN(GPIOX_12, 0),
+ MESON_PIN(GPIOX_13, 0),
+ MESON_PIN(GPIOX_14, 0),
+ MESON_PIN(GPIOX_15, 0),
+ MESON_PIN(GPIOX_16, 0),
+ MESON_PIN(GPIOX_17, 0),
+ MESON_PIN(GPIOX_18, 0),
+ MESON_PIN(GPIOX_19, 0),
+ MESON_PIN(GPIOX_20, 0),
+ MESON_PIN(GPIOX_21, 0),
+ MESON_PIN(GPIOY_0, 0),
+ MESON_PIN(GPIOY_1, 0),
+ MESON_PIN(GPIOY_2, 0),
+ MESON_PIN(GPIOY_3, 0),
+ MESON_PIN(GPIOY_4, 0),
+ MESON_PIN(GPIOY_5, 0),
+ MESON_PIN(GPIOY_6, 0),
+ MESON_PIN(GPIOY_7, 0),
+ MESON_PIN(GPIOY_8, 0),
+ MESON_PIN(GPIOY_9, 0),
+ MESON_PIN(GPIOY_10, 0),
+ MESON_PIN(GPIOY_11, 0),
+ MESON_PIN(GPIOY_12, 0),
+ MESON_PIN(GPIOY_13, 0),
+ MESON_PIN(GPIOY_14, 0),
+ MESON_PIN(GPIOY_15, 0),
+ MESON_PIN(GPIOY_16, 0),
+ MESON_PIN(GPIODV_0, 0),
+ MESON_PIN(GPIODV_1, 0),
+ MESON_PIN(GPIODV_2, 0),
+ MESON_PIN(GPIODV_3, 0),
+ MESON_PIN(GPIODV_4, 0),
+ MESON_PIN(GPIODV_5, 0),
+ MESON_PIN(GPIODV_6, 0),
+ MESON_PIN(GPIODV_7, 0),
+ MESON_PIN(GPIODV_8, 0),
+ MESON_PIN(GPIODV_9, 0),
+ MESON_PIN(GPIODV_10, 0),
+ MESON_PIN(GPIODV_11, 0),
+ MESON_PIN(GPIODV_12, 0),
+ MESON_PIN(GPIODV_13, 0),
+ MESON_PIN(GPIODV_14, 0),
+ MESON_PIN(GPIODV_15, 0),
+ MESON_PIN(GPIODV_16, 0),
+ MESON_PIN(GPIODV_17, 0),
+ MESON_PIN(GPIODV_18, 0),
+ MESON_PIN(GPIODV_19, 0),
+ MESON_PIN(GPIODV_20, 0),
+ MESON_PIN(GPIODV_21, 0),
+ MESON_PIN(GPIODV_22, 0),
+ MESON_PIN(GPIODV_23, 0),
+ MESON_PIN(GPIODV_24, 0),
+ MESON_PIN(GPIODV_25, 0),
+ MESON_PIN(GPIODV_26, 0),
+ MESON_PIN(GPIODV_27, 0),
+ MESON_PIN(GPIODV_28, 0),
+ MESON_PIN(GPIODV_29, 0),
+ MESON_PIN(GPIOH_0, 0),
+ MESON_PIN(GPIOH_1, 0),
+ MESON_PIN(GPIOH_2, 0),
+ MESON_PIN(GPIOH_3, 0),
+ MESON_PIN(GPIOH_4, 0),
+ MESON_PIN(GPIOH_5, 0),
+ MESON_PIN(GPIOH_6, 0),
+ MESON_PIN(GPIOH_7, 0),
+ MESON_PIN(GPIOH_8, 0),
+ MESON_PIN(GPIOH_9, 0),
+ MESON_PIN(GPIOZ_0, 0),
+ MESON_PIN(GPIOZ_1, 0),
+ MESON_PIN(GPIOZ_2, 0),
+ MESON_PIN(GPIOZ_3, 0),
+ MESON_PIN(GPIOZ_4, 0),
+ MESON_PIN(GPIOZ_5, 0),
+ MESON_PIN(GPIOZ_6, 0),
+ MESON_PIN(GPIOZ_7, 0),
+ MESON_PIN(GPIOZ_8, 0),
+ MESON_PIN(GPIOZ_9, 0),
+ MESON_PIN(GPIOZ_10, 0),
+ MESON_PIN(GPIOZ_11, 0),
+ MESON_PIN(GPIOZ_12, 0),
+ MESON_PIN(GPIOZ_13, 0),
+ MESON_PIN(GPIOZ_14, 0),
+ MESON_PIN(CARD_0, 0),
+ MESON_PIN(CARD_1, 0),
+ MESON_PIN(CARD_2, 0),
+ MESON_PIN(CARD_3, 0),
+ MESON_PIN(CARD_4, 0),
+ MESON_PIN(CARD_5, 0),
+ MESON_PIN(CARD_6, 0),
+ MESON_PIN(BOOT_0, 0),
+ MESON_PIN(BOOT_1, 0),
+ MESON_PIN(BOOT_2, 0),
+ MESON_PIN(BOOT_3, 0),
+ MESON_PIN(BOOT_4, 0),
+ MESON_PIN(BOOT_5, 0),
+ MESON_PIN(BOOT_6, 0),
+ MESON_PIN(BOOT_7, 0),
+ MESON_PIN(BOOT_8, 0),
+ MESON_PIN(BOOT_9, 0),
+ MESON_PIN(BOOT_10, 0),
+ MESON_PIN(BOOT_11, 0),
+ MESON_PIN(BOOT_12, 0),
+ MESON_PIN(BOOT_13, 0),
+ MESON_PIN(BOOT_14, 0),
+ MESON_PIN(BOOT_15, 0),
+ MESON_PIN(BOOT_16, 0),
+ MESON_PIN(BOOT_17, 0),
+ MESON_PIN(BOOT_18, 0),
+ MESON_PIN(GPIOAO_0, AO_OFF),
+ MESON_PIN(GPIOAO_1, AO_OFF),
+ MESON_PIN(GPIOAO_2, AO_OFF),
+ MESON_PIN(GPIOAO_3, AO_OFF),
+ MESON_PIN(GPIOAO_4, AO_OFF),
+ MESON_PIN(GPIOAO_5, AO_OFF),
+ MESON_PIN(GPIOAO_6, AO_OFF),
+ MESON_PIN(GPIOAO_7, AO_OFF),
+ MESON_PIN(GPIOAO_8, AO_OFF),
+ MESON_PIN(GPIOAO_9, AO_OFF),
+ MESON_PIN(GPIOAO_10, AO_OFF),
+ MESON_PIN(GPIOAO_11, AO_OFF),
+ MESON_PIN(GPIOAO_12, AO_OFF),
+ MESON_PIN(GPIOAO_13, AO_OFF),
+ MESON_PIN(GPIO_BSD_EN, AO_OFF),
+ MESON_PIN(GPIO_TEST_N, AO_OFF),
};
/* bank X */
-static const unsigned int sd_d0_a_pins[] = { PIN_GPIOX_0 };
-static const unsigned int sd_d1_a_pins[] = { PIN_GPIOX_1 };
-static const unsigned int sd_d2_a_pins[] = { PIN_GPIOX_2 };
-static const unsigned int sd_d3_a_pins[] = { PIN_GPIOX_3 };
-static const unsigned int sd_clk_a_pins[] = { PIN_GPIOX_8 };
-static const unsigned int sd_cmd_a_pins[] = { PIN_GPIOX_9 };
-
-static const unsigned int sdxc_d0_a_pins[] = { PIN_GPIOX_0 };
-static const unsigned int sdxc_d13_a_pins[] = { PIN_GPIOX_1, PIN_GPIOX_2,
- PIN_GPIOX_3 };
-static const unsigned int sdxc_d47_a_pins[] = { PIN_GPIOX_4, PIN_GPIOX_5,
- PIN_GPIOX_6, PIN_GPIOX_7 };
-static const unsigned int sdxc_clk_a_pins[] = { PIN_GPIOX_8 };
-static const unsigned int sdxc_cmd_a_pins[] = { PIN_GPIOX_9 };
-
-static const unsigned int pcm_out_a_pins[] = { PIN_GPIOX_4 };
-static const unsigned int pcm_in_a_pins[] = { PIN_GPIOX_5 };
-static const unsigned int pcm_fs_a_pins[] = { PIN_GPIOX_6 };
-static const unsigned int pcm_clk_a_pins[] = { PIN_GPIOX_7 };
-
-static const unsigned int uart_tx_a0_pins[] = { PIN_GPIOX_4 };
-static const unsigned int uart_rx_a0_pins[] = { PIN_GPIOX_5 };
-static const unsigned int uart_cts_a0_pins[] = { PIN_GPIOX_6 };
-static const unsigned int uart_rts_a0_pins[] = { PIN_GPIOX_7 };
-
-static const unsigned int uart_tx_a1_pins[] = { PIN_GPIOX_12 };
-static const unsigned int uart_rx_a1_pins[] = { PIN_GPIOX_13 };
-static const unsigned int uart_cts_a1_pins[] = { PIN_GPIOX_14 };
-static const unsigned int uart_rts_a1_pins[] = { PIN_GPIOX_15 };
-
-static const unsigned int uart_tx_b0_pins[] = { PIN_GPIOX_16 };
-static const unsigned int uart_rx_b0_pins[] = { PIN_GPIOX_17 };
-static const unsigned int uart_cts_b0_pins[] = { PIN_GPIOX_18 };
-static const unsigned int uart_rts_b0_pins[] = { PIN_GPIOX_19 };
-
-static const unsigned int iso7816_det_pins[] = { PIN_GPIOX_16 };
-static const unsigned int iso7816_reset_pins[] = { PIN_GPIOX_17 };
-static const unsigned int iso7816_clk_pins[] = { PIN_GPIOX_18 };
-static const unsigned int iso7816_data_pins[] = { PIN_GPIOX_19 };
-
-static const unsigned int i2c_sda_d0_pins[] = { PIN_GPIOX_16 };
-static const unsigned int i2c_sck_d0_pins[] = { PIN_GPIOX_17 };
-
-static const unsigned int xtal_32k_out_pins[] = { PIN_GPIOX_10 };
-static const unsigned int xtal_24m_out_pins[] = { PIN_GPIOX_11 };
+static const unsigned int sd_d0_a_pins[] = { PIN(GPIOX_0, 0) };
+static const unsigned int sd_d1_a_pins[] = { PIN(GPIOX_1, 0) };
+static const unsigned int sd_d2_a_pins[] = { PIN(GPIOX_2, 0) };
+static const unsigned int sd_d3_a_pins[] = { PIN(GPIOX_3, 0) };
+static const unsigned int sd_clk_a_pins[] = { PIN(GPIOX_8, 0) };
+static const unsigned int sd_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
+
+static const unsigned int sdxc_d0_a_pins[] = { PIN(GPIOX_0, 0) };
+static const unsigned int sdxc_d13_a_pins[] = { PIN(GPIOX_1, 0), PIN(GPIOX_2, 0),
+ PIN(GPIOX_3, 0) };
+static const unsigned int sdxc_d47_a_pins[] = { PIN(GPIOX_4, 0), PIN(GPIOX_5, 0),
+ PIN(GPIOX_6, 0), PIN(GPIOX_7, 0) };
+static const unsigned int sdxc_clk_a_pins[] = { PIN(GPIOX_8, 0) };
+static const unsigned int sdxc_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
+
+static const unsigned int pcm_out_a_pins[] = { PIN(GPIOX_4, 0) };
+static const unsigned int pcm_in_a_pins[] = { PIN(GPIOX_5, 0) };
+static const unsigned int pcm_fs_a_pins[] = { PIN(GPIOX_6, 0) };
+static const unsigned int pcm_clk_a_pins[] = { PIN(GPIOX_7, 0) };
+
+static const unsigned int uart_tx_a0_pins[] = { PIN(GPIOX_4, 0) };
+static const unsigned int uart_rx_a0_pins[] = { PIN(GPIOX_5, 0) };
+static const unsigned int uart_cts_a0_pins[] = { PIN(GPIOX_6, 0) };
+static const unsigned int uart_rts_a0_pins[] = { PIN(GPIOX_7, 0) };
+
+static const unsigned int uart_tx_a1_pins[] = { PIN(GPIOX_12, 0) };
+static const unsigned int uart_rx_a1_pins[] = { PIN(GPIOX_13, 0) };
+static const unsigned int uart_cts_a1_pins[] = { PIN(GPIOX_14, 0) };
+static const unsigned int uart_rts_a1_pins[] = { PIN(GPIOX_15, 0) };
+
+static const unsigned int uart_tx_b0_pins[] = { PIN(GPIOX_16, 0) };
+static const unsigned int uart_rx_b0_pins[] = { PIN(GPIOX_17, 0) };
+static const unsigned int uart_cts_b0_pins[] = { PIN(GPIOX_18, 0) };
+static const unsigned int uart_rts_b0_pins[] = { PIN(GPIOX_19, 0) };
+
+static const unsigned int iso7816_det_pins[] = { PIN(GPIOX_16, 0) };
+static const unsigned int iso7816_reset_pins[] = { PIN(GPIOX_17, 0) };
+static const unsigned int iso7816_clk_pins[] = { PIN(GPIOX_18, 0) };
+static const unsigned int iso7816_data_pins[] = { PIN(GPIOX_19, 0) };
+
+static const unsigned int i2c_sda_d0_pins[] = { PIN(GPIOX_16, 0) };
+static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
+
+static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
+static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
/* bank Y */
-static const unsigned int uart_tx_c_pins[] = { PIN_GPIOY_0 };
-static const unsigned int uart_rx_c_pins[] = { PIN_GPIOY_1 };
-static const unsigned int uart_cts_c_pins[] = { PIN_GPIOY_2 };
-static const unsigned int uart_rts_c_pins[] = { PIN_GPIOY_3 };
+static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_0, 0) };
+static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_1, 0) };
+static const unsigned int uart_cts_c_pins[] = { PIN(GPIOY_2, 0) };
+static const unsigned int uart_rts_c_pins[] = { PIN(GPIOY_3, 0) };
-static const unsigned int pcm_out_b_pins[] = { PIN_GPIOY_4 };
-static const unsigned int pcm_in_b_pins[] = { PIN_GPIOY_5 };
-static const unsigned int pcm_fs_b_pins[] = { PIN_GPIOY_6 };
-static const unsigned int pcm_clk_b_pins[] = { PIN_GPIOY_7 };
+static const unsigned int pcm_out_b_pins[] = { PIN(GPIOY_4, 0) };
+static const unsigned int pcm_in_b_pins[] = { PIN(GPIOY_5, 0) };
+static const unsigned int pcm_fs_b_pins[] = { PIN(GPIOY_6, 0) };
+static const unsigned int pcm_clk_b_pins[] = { PIN(GPIOY_7, 0) };
-static const unsigned int i2c_sda_c0_pins[] = { PIN_GPIOY_0 };
-static const unsigned int i2c_sck_c0_pins[] = { PIN_GPIOY_1 };
+static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIOY_0, 0) };
+static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIOY_1, 0) };
/* bank DV */
-static const unsigned int dvin_rgb_pins[] = { PIN_GPIODV_0, PIN_GPIODV_1,
- PIN_GPIODV_2, PIN_GPIODV_3,
- PIN_GPIODV_4, PIN_GPIODV_5,
- PIN_GPIODV_6, PIN_GPIODV_7,
- PIN_GPIODV_8, PIN_GPIODV_9,
- PIN_GPIODV_10, PIN_GPIODV_11,
- PIN_GPIODV_12, PIN_GPIODV_13,
- PIN_GPIODV_14, PIN_GPIODV_15,
- PIN_GPIODV_16, PIN_GPIODV_17,
- PIN_GPIODV_18, PIN_GPIODV_19,
- PIN_GPIODV_20, PIN_GPIODV_21,
- PIN_GPIODV_22, PIN_GPIODV_23 };
-static const unsigned int dvin_vs_pins[] = { PIN_GPIODV_24 };
-static const unsigned int dvin_hs_pins[] = { PIN_GPIODV_25 };
-static const unsigned int dvin_clk_pins[] = { PIN_GPIODV_26 };
-static const unsigned int dvin_de_pins[] = { PIN_GPIODV_27 };
-
-static const unsigned int enc_0_pins[] = { PIN_GPIODV_0 };
-static const unsigned int enc_1_pins[] = { PIN_GPIODV_1 };
-static const unsigned int enc_2_pins[] = { PIN_GPIODV_2 };
-static const unsigned int enc_3_pins[] = { PIN_GPIODV_3 };
-static const unsigned int enc_4_pins[] = { PIN_GPIODV_4 };
-static const unsigned int enc_5_pins[] = { PIN_GPIODV_5 };
-static const unsigned int enc_6_pins[] = { PIN_GPIODV_6 };
-static const unsigned int enc_7_pins[] = { PIN_GPIODV_7 };
-static const unsigned int enc_8_pins[] = { PIN_GPIODV_8 };
-static const unsigned int enc_9_pins[] = { PIN_GPIODV_9 };
-static const unsigned int enc_10_pins[] = { PIN_GPIODV_10 };
-static const unsigned int enc_11_pins[] = { PIN_GPIODV_11 };
-static const unsigned int enc_12_pins[] = { PIN_GPIODV_12 };
-static const unsigned int enc_13_pins[] = { PIN_GPIODV_13 };
-static const unsigned int enc_14_pins[] = { PIN_GPIODV_14 };
-static const unsigned int enc_15_pins[] = { PIN_GPIODV_15 };
-static const unsigned int enc_16_pins[] = { PIN_GPIODV_16 };
-static const unsigned int enc_17_pins[] = { PIN_GPIODV_17 };
-
-static const unsigned int uart_tx_b1_pins[] = { PIN_GPIODV_24 };
-static const unsigned int uart_rx_b1_pins[] = { PIN_GPIODV_25 };
-static const unsigned int uart_cts_b1_pins[] = { PIN_GPIODV_26 };
-static const unsigned int uart_rts_b1_pins[] = { PIN_GPIODV_27 };
-
-static const unsigned int vga_vs_pins[] = { PIN_GPIODV_24 };
-static const unsigned int vga_hs_pins[] = { PIN_GPIODV_25 };
+static const unsigned int dvin_rgb_pins[] = { PIN(GPIODV_0, 0), PIN(GPIODV_1, 0),
+ PIN(GPIODV_2, 0), PIN(GPIODV_3, 0),
+ PIN(GPIODV_4, 0), PIN(GPIODV_5, 0),
+ PIN(GPIODV_6, 0), PIN(GPIODV_7, 0),
+ PIN(GPIODV_8, 0), PIN(GPIODV_9, 0),
+ PIN(GPIODV_10, 0), PIN(GPIODV_11, 0),
+ PIN(GPIODV_12, 0), PIN(GPIODV_13, 0),
+ PIN(GPIODV_14, 0), PIN(GPIODV_15, 0),
+ PIN(GPIODV_16, 0), PIN(GPIODV_17, 0),
+ PIN(GPIODV_18, 0), PIN(GPIODV_19, 0),
+ PIN(GPIODV_20, 0), PIN(GPIODV_21, 0),
+ PIN(GPIODV_22, 0), PIN(GPIODV_23, 0) };
+static const unsigned int dvin_vs_pins[] = { PIN(GPIODV_24, 0) };
+static const unsigned int dvin_hs_pins[] = { PIN(GPIODV_25, 0) };
+static const unsigned int dvin_clk_pins[] = { PIN(GPIODV_26, 0) };
+static const unsigned int dvin_de_pins[] = { PIN(GPIODV_27, 0) };
+
+static const unsigned int enc_0_pins[] = { PIN(GPIODV_0, 0) };
+static const unsigned int enc_1_pins[] = { PIN(GPIODV_1, 0) };
+static const unsigned int enc_2_pins[] = { PIN(GPIODV_2, 0) };
+static const unsigned int enc_3_pins[] = { PIN(GPIODV_3, 0) };
+static const unsigned int enc_4_pins[] = { PIN(GPIODV_4, 0) };
+static const unsigned int enc_5_pins[] = { PIN(GPIODV_5, 0) };
+static const unsigned int enc_6_pins[] = { PIN(GPIODV_6, 0) };
+static const unsigned int enc_7_pins[] = { PIN(GPIODV_7, 0) };
+static const unsigned int enc_8_pins[] = { PIN(GPIODV_8, 0) };
+static const unsigned int enc_9_pins[] = { PIN(GPIODV_9, 0) };
+static const unsigned int enc_10_pins[] = { PIN(GPIODV_10, 0) };
+static const unsigned int enc_11_pins[] = { PIN(GPIODV_11, 0) };
+static const unsigned int enc_12_pins[] = { PIN(GPIODV_12, 0) };
+static const unsigned int enc_13_pins[] = { PIN(GPIODV_13, 0) };
+static const unsigned int enc_14_pins[] = { PIN(GPIODV_14, 0) };
+static const unsigned int enc_15_pins[] = { PIN(GPIODV_15, 0) };
+static const unsigned int enc_16_pins[] = { PIN(GPIODV_16, 0) };
+static const unsigned int enc_17_pins[] = { PIN(GPIODV_17, 0) };
+
+static const unsigned int uart_tx_b1_pins[] = { PIN(GPIODV_24, 0) };
+static const unsigned int uart_rx_b1_pins[] = { PIN(GPIODV_25, 0) };
+static const unsigned int uart_cts_b1_pins[] = { PIN(GPIODV_26, 0) };
+static const unsigned int uart_rts_b1_pins[] = { PIN(GPIODV_27, 0) };
+
+static const unsigned int vga_vs_pins[] = { PIN(GPIODV_24, 0) };
+static const unsigned int vga_hs_pins[] = { PIN(GPIODV_25, 0) };
/* bank H */
-static const unsigned int hdmi_hpd_pins[] = { PIN_GPIOH_0 };
-static const unsigned int hdmi_sda_pins[] = { PIN_GPIOH_1 };
-static const unsigned int hdmi_scl_pins[] = { PIN_GPIOH_2 };
-static const unsigned int hdmi_cec_pins[] = { PIN_GPIOH_3 };
+static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
+static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
+static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, 0) };
+static const unsigned int hdmi_cec_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int spi_ss0_0_pins[] = { PIN_GPIOH_3 };
-static const unsigned int spi_miso_0_pins[] = { PIN_GPIOH_4 };
-static const unsigned int spi_mosi_0_pins[] = { PIN_GPIOH_5 };
-static const unsigned int spi_sclk_0_pins[] = { PIN_GPIOH_6 };
+static const unsigned int spi_ss0_0_pins[] = { PIN(GPIOH_3, 0) };
+static const unsigned int spi_miso_0_pins[] = { PIN(GPIOH_4, 0) };
+static const unsigned int spi_mosi_0_pins[] = { PIN(GPIOH_5, 0) };
+static const unsigned int spi_sclk_0_pins[] = { PIN(GPIOH_6, 0) };
-static const unsigned int i2c_sda_d1_pins[] = { PIN_GPIOH_7 };
-static const unsigned int i2c_sck_d1_pins[] = { PIN_GPIOH_8 };
+static const unsigned int i2c_sda_d1_pins[] = { PIN(GPIOH_7, 0) };
+static const unsigned int i2c_sck_d1_pins[] = { PIN(GPIOH_8, 0) };
/* bank Z */
-static const unsigned int spi_ss0_1_pins[] = { PIN_GPIOZ_9 };
-static const unsigned int spi_ss1_1_pins[] = { PIN_GPIOZ_10 };
-static const unsigned int spi_sclk_1_pins[] = { PIN_GPIOZ_11 };
-static const unsigned int spi_mosi_1_pins[] = { PIN_GPIOZ_12 };
-static const unsigned int spi_miso_1_pins[] = { PIN_GPIOZ_13 };
-static const unsigned int spi_ss2_1_pins[] = { PIN_GPIOZ_14 };
-
-static const unsigned int eth_tx_clk_50m_pins[] = { PIN_GPIOZ_4 };
-static const unsigned int eth_tx_en_pins[] = { PIN_GPIOZ_5 };
-static const unsigned int eth_txd1_pins[] = { PIN_GPIOZ_6 };
-static const unsigned int eth_txd0_pins[] = { PIN_GPIOZ_7 };
-static const unsigned int eth_rx_clk_in_pins[] = { PIN_GPIOZ_8 };
-static const unsigned int eth_rx_dv_pins[] = { PIN_GPIOZ_9 };
-static const unsigned int eth_rxd1_pins[] = { PIN_GPIOZ_10 };
-static const unsigned int eth_rxd0_pins[] = { PIN_GPIOZ_11 };
-static const unsigned int eth_mdio_pins[] = { PIN_GPIOZ_12 };
-static const unsigned int eth_mdc_pins[] = { PIN_GPIOZ_13 };
-
-static const unsigned int i2c_sda_a0_pins[] = { PIN_GPIOZ_0 };
-static const unsigned int i2c_sck_a0_pins[] = { PIN_GPIOZ_1 };
-
-static const unsigned int i2c_sda_b_pins[] = { PIN_GPIOZ_2 };
-static const unsigned int i2c_sck_b_pins[] = { PIN_GPIOZ_3 };
-
-static const unsigned int i2c_sda_c1_pins[] = { PIN_GPIOZ_4 };
-static const unsigned int i2c_sck_c1_pins[] = { PIN_GPIOZ_5 };
-
-static const unsigned int i2c_sda_a1_pins[] = { PIN_GPIOZ_0 };
-static const unsigned int i2c_sck_a1_pins[] = { PIN_GPIOZ_1 };
-
-static const unsigned int i2c_sda_a2_pins[] = { PIN_GPIOZ_0 };
-static const unsigned int i2c_sck_a2_pins[] = { PIN_GPIOZ_1 };
+static const unsigned int spi_ss0_1_pins[] = { PIN(GPIOZ_9, 0) };
+static const unsigned int spi_ss1_1_pins[] = { PIN(GPIOZ_10, 0) };
+static const unsigned int spi_sclk_1_pins[] = { PIN(GPIOZ_11, 0) };
+static const unsigned int spi_mosi_1_pins[] = { PIN(GPIOZ_12, 0) };
+static const unsigned int spi_miso_1_pins[] = { PIN(GPIOZ_13, 0) };
+static const unsigned int spi_ss2_1_pins[] = { PIN(GPIOZ_14, 0) };
+
+static const unsigned int eth_tx_clk_50m_pins[] = { PIN(GPIOZ_4, 0) };
+static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_5, 0) };
+static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_6, 0) };
+static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_7, 0) };
+static const unsigned int eth_rx_clk_in_pins[] = { PIN(GPIOZ_8, 0) };
+static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_9, 0) };
+static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_10, 0) };
+static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_11, 0) };
+static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_12, 0) };
+static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_13, 0) };
+
+static const unsigned int i2c_sda_a0_pins[] = { PIN(GPIOZ_0, 0) };
+static const unsigned int i2c_sck_a0_pins[] = { PIN(GPIOZ_1, 0) };
+
+static const unsigned int i2c_sda_b_pins[] = { PIN(GPIOZ_2, 0) };
+static const unsigned int i2c_sck_b_pins[] = { PIN(GPIOZ_3, 0) };
+
+static const unsigned int i2c_sda_c1_pins[] = { PIN(GPIOZ_4, 0) };
+static const unsigned int i2c_sck_c1_pins[] = { PIN(GPIOZ_5, 0) };
+
+static const unsigned int i2c_sda_a1_pins[] = { PIN(GPIOZ_0, 0) };
+static const unsigned int i2c_sck_a1_pins[] = { PIN(GPIOZ_1, 0) };
+
+static const unsigned int i2c_sda_a2_pins[] = { PIN(GPIOZ_0, 0) };
+static const unsigned int i2c_sck_a2_pins[] = { PIN(GPIOZ_1, 0) };
/* bank BOOT */
-static const unsigned int sd_d0_c_pins[] = { PIN_BOOT_0 };
-static const unsigned int sd_d1_c_pins[] = { PIN_BOOT_1 };
-static const unsigned int sd_d2_c_pins[] = { PIN_BOOT_2 };
-static const unsigned int sd_d3_c_pins[] = { PIN_BOOT_3 };
-static const unsigned int sd_cmd_c_pins[] = { PIN_BOOT_16 };
-static const unsigned int sd_clk_c_pins[] = { PIN_BOOT_17 };
-
-static const unsigned int sdxc_d0_c_pins[] = { PIN_BOOT_0};
-static const unsigned int sdxc_d13_c_pins[] = { PIN_BOOT_1, PIN_BOOT_2,
- PIN_BOOT_3 };
-static const unsigned int sdxc_d47_c_pins[] = { PIN_BOOT_4, PIN_BOOT_5,
- PIN_BOOT_6, PIN_BOOT_7 };
-static const unsigned int sdxc_cmd_c_pins[] = { PIN_BOOT_16 };
-static const unsigned int sdxc_clk_c_pins[] = { PIN_BOOT_17 };
-
-static const unsigned int nand_io_pins[] = { PIN_BOOT_0, PIN_BOOT_1,
- PIN_BOOT_2, PIN_BOOT_3,
- PIN_BOOT_4, PIN_BOOT_5,
- PIN_BOOT_6, PIN_BOOT_7 };
-static const unsigned int nand_io_ce0_pins[] = { PIN_BOOT_8 };
-static const unsigned int nand_io_ce1_pins[] = { PIN_BOOT_9 };
-static const unsigned int nand_io_rb0_pins[] = { PIN_BOOT_10 };
-static const unsigned int nand_ale_pins[] = { PIN_BOOT_11 };
-static const unsigned int nand_cle_pins[] = { PIN_BOOT_12 };
-static const unsigned int nand_wen_clk_pins[] = { PIN_BOOT_13 };
-static const unsigned int nand_ren_clk_pins[] = { PIN_BOOT_14 };
-static const unsigned int nand_dqs_pins[] = { PIN_BOOT_15 };
-static const unsigned int nand_ce2_pins[] = { PIN_BOOT_16 };
-static const unsigned int nand_ce3_pins[] = { PIN_BOOT_17 };
-
-static const unsigned int nor_d_pins[] = { PIN_BOOT_11 };
-static const unsigned int nor_q_pins[] = { PIN_BOOT_12 };
-static const unsigned int nor_c_pins[] = { PIN_BOOT_13 };
-static const unsigned int nor_cs_pins[] = { PIN_BOOT_18 };
+static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
+static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
+static const unsigned int sd_d2_c_pins[] = { PIN(BOOT_2, 0) };
+static const unsigned int sd_d3_c_pins[] = { PIN(BOOT_3, 0) };
+static const unsigned int sd_cmd_c_pins[] = { PIN(BOOT_16, 0) };
+static const unsigned int sd_clk_c_pins[] = { PIN(BOOT_17, 0) };
+
+static const unsigned int sdxc_d0_c_pins[] = { PIN(BOOT_0, 0)};
+static const unsigned int sdxc_d13_c_pins[] = { PIN(BOOT_1, 0), PIN(BOOT_2, 0),
+ PIN(BOOT_3, 0) };
+static const unsigned int sdxc_d47_c_pins[] = { PIN(BOOT_4, 0), PIN(BOOT_5, 0),
+ PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
+static const unsigned int sdxc_cmd_c_pins[] = { PIN(BOOT_16, 0) };
+static const unsigned int sdxc_clk_c_pins[] = { PIN(BOOT_17, 0) };
+
+static const unsigned int nand_io_pins[] = { PIN(BOOT_0, 0), PIN(BOOT_1, 0),
+ PIN(BOOT_2, 0), PIN(BOOT_3, 0),
+ PIN(BOOT_4, 0), PIN(BOOT_5, 0),
+ PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
+static const unsigned int nand_io_ce0_pins[] = { PIN(BOOT_8, 0) };
+static const unsigned int nand_io_ce1_pins[] = { PIN(BOOT_9, 0) };
+static const unsigned int nand_io_rb0_pins[] = { PIN(BOOT_10, 0) };
+static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, 0) };
+static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, 0) };
+static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, 0) };
+static const unsigned int nand_ren_clk_pins[] = { PIN(BOOT_14, 0) };
+static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, 0) };
+static const unsigned int nand_ce2_pins[] = { PIN(BOOT_16, 0) };
+static const unsigned int nand_ce3_pins[] = { PIN(BOOT_17, 0) };
+
+static const unsigned int nor_d_pins[] = { PIN(BOOT_11, 0) };
+static const unsigned int nor_q_pins[] = { PIN(BOOT_12, 0) };
+static const unsigned int nor_c_pins[] = { PIN(BOOT_13, 0) };
+static const unsigned int nor_cs_pins[] = { PIN(BOOT_18, 0) };
/* bank CARD */
-static const unsigned int sd_d1_b_pins[] = { PIN_CARD_0 };
-static const unsigned int sd_d0_b_pins[] = { PIN_CARD_1 };
-static const unsigned int sd_clk_b_pins[] = { PIN_CARD_2 };
-static const unsigned int sd_cmd_b_pins[] = { PIN_CARD_3 };
-static const unsigned int sd_d3_b_pins[] = { PIN_CARD_4 };
-static const unsigned int sd_d2_b_pins[] = { PIN_CARD_5 };
-
-static const unsigned int sdxc_d13_b_pins[] = { PIN_CARD_0, PIN_CARD_4,
- PIN_CARD_5 };
-static const unsigned int sdxc_d0_b_pins[] = { PIN_CARD_1 };
-static const unsigned int sdxc_clk_b_pins[] = { PIN_CARD_2 };
-static const unsigned int sdxc_cmd_b_pins[] = { PIN_CARD_3 };
+static const unsigned int sd_d1_b_pins[] = { PIN(CARD_0, 0) };
+static const unsigned int sd_d0_b_pins[] = { PIN(CARD_1, 0) };
+static const unsigned int sd_clk_b_pins[] = { PIN(CARD_2, 0) };
+static const unsigned int sd_cmd_b_pins[] = { PIN(CARD_3, 0) };
+static const unsigned int sd_d3_b_pins[] = { PIN(CARD_4, 0) };
+static const unsigned int sd_d2_b_pins[] = { PIN(CARD_5, 0) };
+
+static const unsigned int sdxc_d13_b_pins[] = { PIN(CARD_0, 0), PIN(CARD_4, 0),
+ PIN(CARD_5, 0) };
+static const unsigned int sdxc_d0_b_pins[] = { PIN(CARD_1, 0) };
+static const unsigned int sdxc_clk_b_pins[] = { PIN(CARD_2, 0) };
+static const unsigned int sdxc_cmd_b_pins[] = { PIN(CARD_3, 0) };
/* bank AO */
-static const unsigned int uart_tx_ao_a_pins[] = { PIN_GPIOAO_0 };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN_GPIOAO_1 };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN_GPIOAO_2 };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN_GPIOAO_3 };
+static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, AO_OFF) };
+static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, AO_OFF) };
+static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
+static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int remote_input_pins[] = { PIN_GPIOAO_7 };
+static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int i2c_slave_sck_ao_pins[] = { PIN_GPIOAO_4 };
-static const unsigned int i2c_slave_sda_ao_pins[] = { PIN_GPIOAO_5 };
+static const unsigned int i2c_slave_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
+static const unsigned int i2c_slave_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int uart_tx_ao_b0_pins[] = { PIN_GPIOAO_0 };
-static const unsigned int uart_rx_ao_b0_pins[] = { PIN_GPIOAO_1 };
+static const unsigned int uart_tx_ao_b0_pins[] = { PIN(GPIOAO_0, AO_OFF) };
+static const unsigned int uart_rx_ao_b0_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_tx_ao_b1_pins[] = { PIN_GPIOAO_4 };
-static const unsigned int uart_rx_ao_b1_pins[] = { PIN_GPIOAO_5 };
+static const unsigned int uart_tx_ao_b1_pins[] = { PIN(GPIOAO_4, AO_OFF) };
+static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int i2c_mst_sck_ao_pins[] = { PIN_GPIOAO_4 };
-static const unsigned int i2c_mst_sda_ao_pins[] = { PIN_GPIOAO_5 };
+static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
+static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
static struct meson_pmx_group meson8_groups[] = {
- GPIO_GROUP(GPIOX_0),
- GPIO_GROUP(GPIOX_1),
- GPIO_GROUP(GPIOX_2),
- GPIO_GROUP(GPIOX_3),
- GPIO_GROUP(GPIOX_4),
- GPIO_GROUP(GPIOX_5),
- GPIO_GROUP(GPIOX_6),
- GPIO_GROUP(GPIOX_7),
- GPIO_GROUP(GPIOX_8),
- GPIO_GROUP(GPIOX_9),
- GPIO_GROUP(GPIOX_10),
- GPIO_GROUP(GPIOX_11),
- GPIO_GROUP(GPIOX_12),
- GPIO_GROUP(GPIOX_13),
- GPIO_GROUP(GPIOX_14),
- GPIO_GROUP(GPIOX_15),
- GPIO_GROUP(GPIOX_16),
- GPIO_GROUP(GPIOX_17),
- GPIO_GROUP(GPIOX_18),
- GPIO_GROUP(GPIOX_19),
- GPIO_GROUP(GPIOX_20),
- GPIO_GROUP(GPIOX_21),
- GPIO_GROUP(GPIOY_0),
- GPIO_GROUP(GPIOY_1),
- GPIO_GROUP(GPIOY_2),
- GPIO_GROUP(GPIOY_3),
- GPIO_GROUP(GPIOY_4),
- GPIO_GROUP(GPIOY_5),
- GPIO_GROUP(GPIOY_6),
- GPIO_GROUP(GPIOY_7),
- GPIO_GROUP(GPIOY_8),
- GPIO_GROUP(GPIOY_9),
- GPIO_GROUP(GPIOY_10),
- GPIO_GROUP(GPIOY_11),
- GPIO_GROUP(GPIOY_12),
- GPIO_GROUP(GPIOY_13),
- GPIO_GROUP(GPIOY_14),
- GPIO_GROUP(GPIOY_15),
- GPIO_GROUP(GPIOY_16),
- GPIO_GROUP(GPIODV_0),
- GPIO_GROUP(GPIODV_1),
- GPIO_GROUP(GPIODV_2),
- GPIO_GROUP(GPIODV_3),
- GPIO_GROUP(GPIODV_4),
- GPIO_GROUP(GPIODV_5),
- GPIO_GROUP(GPIODV_6),
- GPIO_GROUP(GPIODV_7),
- GPIO_GROUP(GPIODV_8),
- GPIO_GROUP(GPIODV_9),
- GPIO_GROUP(GPIODV_10),
- GPIO_GROUP(GPIODV_11),
- GPIO_GROUP(GPIODV_12),
- GPIO_GROUP(GPIODV_13),
- GPIO_GROUP(GPIODV_14),
- GPIO_GROUP(GPIODV_15),
- GPIO_GROUP(GPIODV_16),
- GPIO_GROUP(GPIODV_17),
- GPIO_GROUP(GPIODV_18),
- GPIO_GROUP(GPIODV_19),
- GPIO_GROUP(GPIODV_20),
- GPIO_GROUP(GPIODV_21),
- GPIO_GROUP(GPIODV_22),
- GPIO_GROUP(GPIODV_23),
- GPIO_GROUP(GPIODV_24),
- GPIO_GROUP(GPIODV_25),
- GPIO_GROUP(GPIODV_26),
- GPIO_GROUP(GPIODV_27),
- GPIO_GROUP(GPIODV_28),
- GPIO_GROUP(GPIODV_29),
- GPIO_GROUP(GPIOH_0),
- GPIO_GROUP(GPIOH_1),
- GPIO_GROUP(GPIOH_2),
- GPIO_GROUP(GPIOH_3),
- GPIO_GROUP(GPIOH_4),
- GPIO_GROUP(GPIOH_5),
- GPIO_GROUP(GPIOH_6),
- GPIO_GROUP(GPIOH_7),
- GPIO_GROUP(GPIOH_8),
- GPIO_GROUP(GPIOH_9),
- GPIO_GROUP(GPIOZ_0),
- GPIO_GROUP(GPIOZ_1),
- GPIO_GROUP(GPIOZ_2),
- GPIO_GROUP(GPIOZ_3),
- GPIO_GROUP(GPIOZ_4),
- GPIO_GROUP(GPIOZ_5),
- GPIO_GROUP(GPIOZ_6),
- GPIO_GROUP(GPIOZ_7),
- GPIO_GROUP(GPIOZ_8),
- GPIO_GROUP(GPIOZ_9),
- GPIO_GROUP(GPIOZ_10),
- GPIO_GROUP(GPIOZ_11),
- GPIO_GROUP(GPIOZ_12),
- GPIO_GROUP(GPIOZ_13),
- GPIO_GROUP(GPIOZ_14),
- GPIO_GROUP(GPIOAO_0),
- GPIO_GROUP(GPIOAO_1),
- GPIO_GROUP(GPIOAO_2),
- GPIO_GROUP(GPIOAO_3),
- GPIO_GROUP(GPIOAO_4),
- GPIO_GROUP(GPIOAO_5),
- GPIO_GROUP(GPIOAO_6),
- GPIO_GROUP(GPIOAO_7),
- GPIO_GROUP(GPIOAO_8),
- GPIO_GROUP(GPIOAO_9),
- GPIO_GROUP(GPIOAO_10),
- GPIO_GROUP(GPIOAO_11),
- GPIO_GROUP(GPIOAO_12),
- GPIO_GROUP(GPIOAO_13),
- GPIO_GROUP(GPIO_BSD_EN),
- GPIO_GROUP(GPIO_TEST_N),
+ GPIO_GROUP(GPIOX_0, 0),
+ GPIO_GROUP(GPIOX_1, 0),
+ GPIO_GROUP(GPIOX_2, 0),
+ GPIO_GROUP(GPIOX_3, 0),
+ GPIO_GROUP(GPIOX_4, 0),
+ GPIO_GROUP(GPIOX_5, 0),
+ GPIO_GROUP(GPIOX_6, 0),
+ GPIO_GROUP(GPIOX_7, 0),
+ GPIO_GROUP(GPIOX_8, 0),
+ GPIO_GROUP(GPIOX_9, 0),
+ GPIO_GROUP(GPIOX_10, 0),
+ GPIO_GROUP(GPIOX_11, 0),
+ GPIO_GROUP(GPIOX_12, 0),
+ GPIO_GROUP(GPIOX_13, 0),
+ GPIO_GROUP(GPIOX_14, 0),
+ GPIO_GROUP(GPIOX_15, 0),
+ GPIO_GROUP(GPIOX_16, 0),
+ GPIO_GROUP(GPIOX_17, 0),
+ GPIO_GROUP(GPIOX_18, 0),
+ GPIO_GROUP(GPIOX_19, 0),
+ GPIO_GROUP(GPIOX_20, 0),
+ GPIO_GROUP(GPIOX_21, 0),
+ GPIO_GROUP(GPIOY_0, 0),
+ GPIO_GROUP(GPIOY_1, 0),
+ GPIO_GROUP(GPIOY_2, 0),
+ GPIO_GROUP(GPIOY_3, 0),
+ GPIO_GROUP(GPIOY_4, 0),
+ GPIO_GROUP(GPIOY_5, 0),
+ GPIO_GROUP(GPIOY_6, 0),
+ GPIO_GROUP(GPIOY_7, 0),
+ GPIO_GROUP(GPIOY_8, 0),
+ GPIO_GROUP(GPIOY_9, 0),
+ GPIO_GROUP(GPIOY_10, 0),
+ GPIO_GROUP(GPIOY_11, 0),
+ GPIO_GROUP(GPIOY_12, 0),
+ GPIO_GROUP(GPIOY_13, 0),
+ GPIO_GROUP(GPIOY_14, 0),
+ GPIO_GROUP(GPIOY_15, 0),
+ GPIO_GROUP(GPIOY_16, 0),
+ GPIO_GROUP(GPIODV_0, 0),
+ GPIO_GROUP(GPIODV_1, 0),
+ GPIO_GROUP(GPIODV_2, 0),
+ GPIO_GROUP(GPIODV_3, 0),
+ GPIO_GROUP(GPIODV_4, 0),
+ GPIO_GROUP(GPIODV_5, 0),
+ GPIO_GROUP(GPIODV_6, 0),
+ GPIO_GROUP(GPIODV_7, 0),
+ GPIO_GROUP(GPIODV_8, 0),
+ GPIO_GROUP(GPIODV_9, 0),
+ GPIO_GROUP(GPIODV_10, 0),
+ GPIO_GROUP(GPIODV_11, 0),
+ GPIO_GROUP(GPIODV_12, 0),
+ GPIO_GROUP(GPIODV_13, 0),
+ GPIO_GROUP(GPIODV_14, 0),
+ GPIO_GROUP(GPIODV_15, 0),
+ GPIO_GROUP(GPIODV_16, 0),
+ GPIO_GROUP(GPIODV_17, 0),
+ GPIO_GROUP(GPIODV_18, 0),
+ GPIO_GROUP(GPIODV_19, 0),
+ GPIO_GROUP(GPIODV_20, 0),
+ GPIO_GROUP(GPIODV_21, 0),
+ GPIO_GROUP(GPIODV_22, 0),
+ GPIO_GROUP(GPIODV_23, 0),
+ GPIO_GROUP(GPIODV_24, 0),
+ GPIO_GROUP(GPIODV_25, 0),
+ GPIO_GROUP(GPIODV_26, 0),
+ GPIO_GROUP(GPIODV_27, 0),
+ GPIO_GROUP(GPIODV_28, 0),
+ GPIO_GROUP(GPIODV_29, 0),
+ GPIO_GROUP(GPIOH_0, 0),
+ GPIO_GROUP(GPIOH_1, 0),
+ GPIO_GROUP(GPIOH_2, 0),
+ GPIO_GROUP(GPIOH_3, 0),
+ GPIO_GROUP(GPIOH_4, 0),
+ GPIO_GROUP(GPIOH_5, 0),
+ GPIO_GROUP(GPIOH_6, 0),
+ GPIO_GROUP(GPIOH_7, 0),
+ GPIO_GROUP(GPIOH_8, 0),
+ GPIO_GROUP(GPIOH_9, 0),
+ GPIO_GROUP(GPIOZ_0, 0),
+ GPIO_GROUP(GPIOZ_1, 0),
+ GPIO_GROUP(GPIOZ_2, 0),
+ GPIO_GROUP(GPIOZ_3, 0),
+ GPIO_GROUP(GPIOZ_4, 0),
+ GPIO_GROUP(GPIOZ_5, 0),
+ GPIO_GROUP(GPIOZ_6, 0),
+ GPIO_GROUP(GPIOZ_7, 0),
+ GPIO_GROUP(GPIOZ_8, 0),
+ GPIO_GROUP(GPIOZ_9, 0),
+ GPIO_GROUP(GPIOZ_10, 0),
+ GPIO_GROUP(GPIOZ_11, 0),
+ GPIO_GROUP(GPIOZ_12, 0),
+ GPIO_GROUP(GPIOZ_13, 0),
+ GPIO_GROUP(GPIOZ_14, 0),
+ GPIO_GROUP(GPIOAO_0, AO_OFF),
+ GPIO_GROUP(GPIOAO_1, AO_OFF),
+ GPIO_GROUP(GPIOAO_2, AO_OFF),
+ GPIO_GROUP(GPIOAO_3, AO_OFF),
+ GPIO_GROUP(GPIOAO_4, AO_OFF),
+ GPIO_GROUP(GPIOAO_5, AO_OFF),
+ GPIO_GROUP(GPIOAO_6, AO_OFF),
+ GPIO_GROUP(GPIOAO_7, AO_OFF),
+ GPIO_GROUP(GPIOAO_8, AO_OFF),
+ GPIO_GROUP(GPIOAO_9, AO_OFF),
+ GPIO_GROUP(GPIOAO_10, AO_OFF),
+ GPIO_GROUP(GPIOAO_11, AO_OFF),
+ GPIO_GROUP(GPIOAO_12, AO_OFF),
+ GPIO_GROUP(GPIOAO_13, AO_OFF),
+ GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
+ GPIO_GROUP(GPIO_TEST_N, AO_OFF),
/* bank X */
GROUP(sd_d0_a, 8, 5),
@@ -1045,19 +907,19 @@ static struct meson_pmx_func meson8_functions[] = {
};
static struct meson_bank meson8_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("X", PIN_GPIOX_0, PIN_GPIOX_21, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
- BANK("Y", PIN_GPIOY_0, PIN_GPIOY_16, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN_GPIODV_0, PIN_GPIODV_29, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
- BANK("H", PIN_GPIOH_0, PIN_GPIOH_9, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
- BANK("Z", PIN_GPIOZ_0, PIN_GPIOZ_14, 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
- BANK("CARD", PIN_CARD_0, PIN_CARD_6, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
- BANK("BOOT", PIN_BOOT_0, PIN_BOOT_18, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+ /* name first last pullen pull dir out in */
+ BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_16, 0), 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", PIN(GPIODV_0, 0), PIN(GPIODV_29, 0), 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("Z", PIN(GPIOZ_0, 0), PIN(GPIOZ_14, 0), 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
+ BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
};
static struct meson_bank meson8_ao_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("AO", PIN_GPIOAO_0, PIN_GPIO_TEST_N, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last pullen pull dir out in */
+ BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
static struct meson_domain_data meson8_domain_data[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
new file mode 100644
index 000000000000..2f7ea6229880
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -0,0 +1,899 @@
+/*
+ * Pin controller and GPIO driver for Amlogic Meson8b.
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/gpio/meson8b-gpio.h>
+#include "pinctrl-meson.h"
+
+#define AO_OFF 130
+
+static const struct pinctrl_pin_desc meson8b_pins[] = {
+ MESON_PIN(GPIOX_0, 0),
+ MESON_PIN(GPIOX_1, 0),
+ MESON_PIN(GPIOX_2, 0),
+ MESON_PIN(GPIOX_3, 0),
+ MESON_PIN(GPIOX_4, 0),
+ MESON_PIN(GPIOX_5, 0),
+ MESON_PIN(GPIOX_6, 0),
+ MESON_PIN(GPIOX_7, 0),
+ MESON_PIN(GPIOX_8, 0),
+ MESON_PIN(GPIOX_9, 0),
+ MESON_PIN(GPIOX_10, 0),
+ MESON_PIN(GPIOX_11, 0),
+ MESON_PIN(GPIOX_16, 0),
+ MESON_PIN(GPIOX_17, 0),
+ MESON_PIN(GPIOX_18, 0),
+ MESON_PIN(GPIOX_19, 0),
+ MESON_PIN(GPIOX_20, 0),
+ MESON_PIN(GPIOX_21, 0),
+
+ MESON_PIN(GPIOY_0, 0),
+ MESON_PIN(GPIOY_1, 0),
+ MESON_PIN(GPIOY_3, 0),
+ MESON_PIN(GPIOY_6, 0),
+ MESON_PIN(GPIOY_7, 0),
+ MESON_PIN(GPIOY_8, 0),
+ MESON_PIN(GPIOY_9, 0),
+ MESON_PIN(GPIOY_10, 0),
+ MESON_PIN(GPIOY_11, 0),
+ MESON_PIN(GPIOY_12, 0),
+ MESON_PIN(GPIOY_13, 0),
+ MESON_PIN(GPIOY_14, 0),
+
+ MESON_PIN(GPIODV_9, 0),
+ MESON_PIN(GPIODV_24, 0),
+ MESON_PIN(GPIODV_25, 0),
+ MESON_PIN(GPIODV_26, 0),
+ MESON_PIN(GPIODV_27, 0),
+ MESON_PIN(GPIODV_28, 0),
+ MESON_PIN(GPIODV_29, 0),
+
+ MESON_PIN(GPIOH_0, 0),
+ MESON_PIN(GPIOH_1, 0),
+ MESON_PIN(GPIOH_2, 0),
+ MESON_PIN(GPIOH_3, 0),
+ MESON_PIN(GPIOH_4, 0),
+ MESON_PIN(GPIOH_5, 0),
+ MESON_PIN(GPIOH_6, 0),
+ MESON_PIN(GPIOH_7, 0),
+ MESON_PIN(GPIOH_8, 0),
+ MESON_PIN(GPIOH_9, 0),
+
+ MESON_PIN(CARD_0, 0),
+ MESON_PIN(CARD_1, 0),
+ MESON_PIN(CARD_2, 0),
+ MESON_PIN(CARD_3, 0),
+ MESON_PIN(CARD_4, 0),
+ MESON_PIN(CARD_5, 0),
+ MESON_PIN(CARD_6, 0),
+
+ MESON_PIN(BOOT_0, 0),
+ MESON_PIN(BOOT_1, 0),
+ MESON_PIN(BOOT_2, 0),
+ MESON_PIN(BOOT_3, 0),
+ MESON_PIN(BOOT_4, 0),
+ MESON_PIN(BOOT_5, 0),
+ MESON_PIN(BOOT_6, 0),
+ MESON_PIN(BOOT_7, 0),
+ MESON_PIN(BOOT_8, 0),
+ MESON_PIN(BOOT_9, 0),
+ MESON_PIN(BOOT_10, 0),
+ MESON_PIN(BOOT_11, 0),
+ MESON_PIN(BOOT_12, 0),
+ MESON_PIN(BOOT_13, 0),
+ MESON_PIN(BOOT_14, 0),
+ MESON_PIN(BOOT_15, 0),
+ MESON_PIN(BOOT_16, 0),
+ MESON_PIN(BOOT_17, 0),
+ MESON_PIN(BOOT_18, 0),
+
+ MESON_PIN(DIF_0_P, 0),
+ MESON_PIN(DIF_0_N, 0),
+ MESON_PIN(DIF_1_P, 0),
+ MESON_PIN(DIF_1_N, 0),
+ MESON_PIN(DIF_2_P, 0),
+ MESON_PIN(DIF_2_N, 0),
+ MESON_PIN(DIF_3_P, 0),
+ MESON_PIN(DIF_3_N, 0),
+ MESON_PIN(DIF_4_P, 0),
+ MESON_PIN(DIF_4_N, 0),
+
+ MESON_PIN(GPIOAO_0, AO_OFF),
+ MESON_PIN(GPIOAO_1, AO_OFF),
+ MESON_PIN(GPIOAO_2, AO_OFF),
+ MESON_PIN(GPIOAO_3, AO_OFF),
+ MESON_PIN(GPIOAO_4, AO_OFF),
+ MESON_PIN(GPIOAO_5, AO_OFF),
+ MESON_PIN(GPIOAO_6, AO_OFF),
+ MESON_PIN(GPIOAO_7, AO_OFF),
+ MESON_PIN(GPIOAO_8, AO_OFF),
+ MESON_PIN(GPIOAO_9, AO_OFF),
+ MESON_PIN(GPIOAO_10, AO_OFF),
+ MESON_PIN(GPIOAO_11, AO_OFF),
+ MESON_PIN(GPIOAO_12, AO_OFF),
+ MESON_PIN(GPIOAO_13, AO_OFF),
+ MESON_PIN(GPIO_BSD_EN, AO_OFF),
+ MESON_PIN(GPIO_TEST_N, AO_OFF),
+};
+
+/* bank X */
+static const unsigned int sd_d0_a_pins[] = { PIN(GPIOX_0, 0) };
+static const unsigned int sd_d1_a_pins[] = { PIN(GPIOX_1, 0) };
+static const unsigned int sd_d2_a_pins[] = { PIN(GPIOX_2, 0) };
+static const unsigned int sd_d3_a_pins[] = { PIN(GPIOX_3, 0) };
+static const unsigned int sdxc_d0_0_a_pins[] = { PIN(GPIOX_4, 0) };
+static const unsigned int sdxc_d47_a_pins[] = { PIN(GPIOX_4, 0), PIN(GPIOX_5, 0),
+ PIN(GPIOX_6, 0), PIN(GPIOX_7, 0) };
+static const unsigned int sdxc_d13_0_a_pins[] = { PIN(GPIOX_5, 0), PIN(GPIOX_6, 0),
+ PIN(GPIOX_7, 0) };
+static const unsigned int sd_clk_a_pins[] = { PIN(GPIOX_8, 0) };
+static const unsigned int sd_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
+static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
+static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
+static const unsigned int uart_tx_b0_pins[] = { PIN(GPIOX_16, 0) };
+static const unsigned int uart_rx_b0_pins[] = { PIN(GPIOX_17, 0) };
+static const unsigned int uart_cts_b0_pins[] = { PIN(GPIOX_18, 0) };
+static const unsigned int uart_rts_b0_pins[] = { PIN(GPIOX_19, 0) };
+
+static const unsigned int sdxc_d0_1_a_pins[] = { PIN(GPIOX_0, 0) };
+static const unsigned int sdxc_d13_1_a_pins[] = { PIN(GPIOX_1, 0), PIN(GPIOX_2, 0),
+ PIN(GPIOX_3, 0) };
+static const unsigned int pcm_out_a_pins[] = { PIN(GPIOX_4, 0) };
+static const unsigned int pcm_in_a_pins[] = { PIN(GPIOX_5, 0) };
+static const unsigned int pcm_fs_a_pins[] = { PIN(GPIOX_6, 0) };
+static const unsigned int pcm_clk_a_pins[] = { PIN(GPIOX_7, 0) };
+static const unsigned int sdxc_clk_a_pins[] = { PIN(GPIOX_8, 0) };
+static const unsigned int sdxc_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
+static const unsigned int pwm_vs_0_pins[] = { PIN(GPIOX_10, 0) };
+static const unsigned int pwm_e_pins[] = { PIN(GPIOX_10, 0) };
+static const unsigned int pwm_vs_1_pins[] = { PIN(GPIOX_11, 0) };
+
+static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_4, 0) };
+static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_5, 0) };
+static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_6, 0) };
+static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_7, 0) };
+static const unsigned int uart_tx_b1_pins[] = { PIN(GPIOX_8, 0) };
+static const unsigned int uart_rx_b1_pins[] = { PIN(GPIOX_9, 0) };
+static const unsigned int uart_cts_b1_pins[] = { PIN(GPIOX_10, 0) };
+static const unsigned int uart_rts_b1_pins[] = { PIN(GPIOX_20, 0) };
+
+static const unsigned int iso7816_0_clk_pins[] = { PIN(GPIOX_6, 0) };
+static const unsigned int iso7816_0_data_pins[] = { PIN(GPIOX_7, 0) };
+static const unsigned int spi_sclk_0_pins[] = { PIN(GPIOX_8, 0) };
+static const unsigned int spi_miso_0_pins[] = { PIN(GPIOX_9, 0) };
+static const unsigned int spi_mosi_0_pins[] = { PIN(GPIOX_10, 0) };
+static const unsigned int iso7816_det_pins[] = { PIN(GPIOX_16, 0) };
+static const unsigned int iso7816_reset_pins[] = { PIN(GPIOX_17, 0) };
+static const unsigned int iso7816_1_clk_pins[] = { PIN(GPIOX_18, 0) };
+static const unsigned int iso7816_1_data_pins[] = { PIN(GPIOX_19, 0) };
+static const unsigned int spi_ss0_0_pins[] = { PIN(GPIOX_20, 0) };
+
+static const unsigned int tsin_clk_b_pins[] = { PIN(GPIOX_8, 0) };
+static const unsigned int tsin_sop_b_pins[] = { PIN(GPIOX_9, 0) };
+static const unsigned int tsin_d0_b_pins[] = { PIN(GPIOX_10, 0) };
+static const unsigned int pwm_b_pins[] = { PIN(GPIOX_11, 0) };
+static const unsigned int i2c_sda_d0_pins[] = { PIN(GPIOX_16, 0) };
+static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
+static const unsigned int tsin_d_valid_b_pins[] = { PIN(GPIOX_20, 0) };
+
+/* bank Y */
+static const unsigned int tsin_d_valid_a_pins[] = { PIN(GPIOY_0, 0) };
+static const unsigned int tsin_sop_a_pins[] = { PIN(GPIOY_1, 0) };
+static const unsigned int tsin_d17_a_pins[] = { PIN(GPIOY_6, 0), PIN(GPIOY_7, 0),
+ PIN(GPIOY_10, 0), PIN(GPIOY_11, 0),
+ PIN(GPIOY_12, 0), PIN(GPIOY_13, 0),
+ PIN(GPIOY_14, 0) };
+static const unsigned int tsin_clk_a_pins[] = { PIN(GPIOY_8, 0) };
+static const unsigned int tsin_d0_a_pins[] = { PIN(GPIOY_9, 0) };
+
+static const unsigned int spdif_out_0_pins[] = { PIN(GPIOY_3, 0) };
+
+static const unsigned int xtal_24m_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int iso7816_2_clk_pins[] = { PIN(GPIOY_13, 0) };
+static const unsigned int iso7816_2_data_pins[] = { PIN(GPIOY_14, 0) };
+
+/* bank DV */
+static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, 0) };
+static const unsigned int pwm_c0_pins[] = { PIN(GPIODV_29, 0) };
+
+static const unsigned int pwm_vs_2_pins[] = { PIN(GPIODV_9, 0) };
+static const unsigned int pwm_vs_3_pins[] = { PIN(GPIODV_28, 0) };
+static const unsigned int pwm_vs_4_pins[] = { PIN(GPIODV_29, 0) };
+
+static const unsigned int xtal24_out_pins[] = { PIN(GPIODV_29, 0) };
+
+static const unsigned int uart_tx_c_pins[] = { PIN(GPIODV_24, 0) };
+static const unsigned int uart_rx_c_pins[] = { PIN(GPIODV_25, 0) };
+static const unsigned int uart_cts_c_pins[] = { PIN(GPIODV_26, 0) };
+static const unsigned int uart_rts_c_pins[] = { PIN(GPIODV_27, 0) };
+
+static const unsigned int pwm_c1_pins[] = { PIN(GPIODV_9, 0) };
+
+static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, 0) };
+static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, 0) };
+static const unsigned int i2c_sda_b0_pins[] = { PIN(GPIODV_26, 0) };
+static const unsigned int i2c_sck_b0_pins[] = { PIN(GPIODV_27, 0) };
+static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIODV_28, 0) };
+static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIODV_29, 0) };
+
+/* bank H */
+static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
+static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
+static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, 0) };
+static const unsigned int hdmi_cec_0_pins[] = { PIN(GPIOH_3, 0) };
+static const unsigned int eth_txd1_0_pins[] = { PIN(GPIOH_5, 0) };
+static const unsigned int eth_txd0_0_pins[] = { PIN(GPIOH_6, 0) };
+static const unsigned int clk_24m_out_pins[] = { PIN(GPIOH_9, 0) };
+
+static const unsigned int spi_ss1_pins[] = { PIN(GPIOH_0, 0) };
+static const unsigned int spi_ss2_pins[] = { PIN(GPIOH_1, 0) };
+static const unsigned int spi_ss0_1_pins[] = { PIN(GPIOH_3, 0) };
+static const unsigned int spi_miso_1_pins[] = { PIN(GPIOH_4, 0) };
+static const unsigned int spi_mosi_1_pins[] = { PIN(GPIOH_5, 0) };
+static const unsigned int spi_sclk_1_pins[] = { PIN(GPIOH_6, 0) };
+
+static const unsigned int eth_txd3_pins[] = { PIN(GPIOH_7, 0) };
+static const unsigned int eth_txd2_pins[] = { PIN(GPIOH_8, 0) };
+static const unsigned int eth_tx_clk_pins[] = { PIN(GPIOH_9, 0) };
+
+static const unsigned int i2c_sda_b1_pins[] = { PIN(GPIOH_3, 0) };
+static const unsigned int i2c_sck_b1_pins[] = { PIN(GPIOH_4, 0) };
+static const unsigned int i2c_sda_c1_pins[] = { PIN(GPIOH_5, 0) };
+static const unsigned int i2c_sck_c1_pins[] = { PIN(GPIOH_6, 0) };
+static const unsigned int i2c_sda_d1_pins[] = { PIN(GPIOH_7, 0) };
+static const unsigned int i2c_sck_d1_pins[] = { PIN(GPIOH_8, 0) };
+
+/* bank BOOT */
+static const unsigned int nand_io_pins[] = { PIN(BOOT_0, 0), PIN(BOOT_1, 0),
+ PIN(BOOT_2, 0), PIN(BOOT_3, 0),
+ PIN(BOOT_4, 0), PIN(BOOT_5, 0),
+ PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
+static const unsigned int nand_io_ce0_pins[] = { PIN(BOOT_8, 0) };
+static const unsigned int nand_io_ce1_pins[] = { PIN(BOOT_9, 0) };
+static const unsigned int nand_io_rb0_pins[] = { PIN(BOOT_10, 0) };
+static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, 0) };
+static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, 0) };
+static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, 0) };
+static const unsigned int nand_ren_clk_pins[] = { PIN(BOOT_14, 0) };
+static const unsigned int nand_dqs_0_pins[] = { PIN(BOOT_15, 0) };
+static const unsigned int nand_dqs_1_pins[] = { PIN(BOOT_18, 0) };
+
+static const unsigned int sdxc_d0_c_pins[] = { PIN(BOOT_0, 0)};
+static const unsigned int sdxc_d13_c_pins[] = { PIN(BOOT_1, 0), PIN(BOOT_2, 0),
+ PIN(BOOT_3, 0) };
+static const unsigned int sdxc_d47_c_pins[] = { PIN(BOOT_4, 0), PIN(BOOT_5, 0),
+ PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
+static const unsigned int sdxc_clk_c_pins[] = { PIN(BOOT_8, 0) };
+static const unsigned int sdxc_cmd_c_pins[] = { PIN(BOOT_10, 0) };
+static const unsigned int nor_d_pins[] = { PIN(BOOT_11, 0) };
+static const unsigned int nor_q_pins[] = { PIN(BOOT_12, 0) };
+static const unsigned int nor_c_pins[] = { PIN(BOOT_13, 0) };
+static const unsigned int nor_cs_pins[] = { PIN(BOOT_18, 0) };
+
+static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
+static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
+static const unsigned int sd_d2_c_pins[] = { PIN(BOOT_2, 0) };
+static const unsigned int sd_d3_c_pins[] = { PIN(BOOT_3, 0) };
+static const unsigned int sd_cmd_c_pins[] = { PIN(BOOT_8, 0) };
+static const unsigned int sd_clk_c_pins[] = { PIN(BOOT_10, 0) };
+
+/* bank CARD */
+static const unsigned int sd_d1_b_pins[] = { PIN(CARD_0, 0) };
+static const unsigned int sd_d0_b_pins[] = { PIN(CARD_1, 0) };
+static const unsigned int sd_clk_b_pins[] = { PIN(CARD_2, 0) };
+static const unsigned int sd_cmd_b_pins[] = { PIN(CARD_3, 0) };
+static const unsigned int sd_d3_b_pins[] = { PIN(CARD_4, 0) };
+static const unsigned int sd_d2_b_pins[] = { PIN(CARD_5, 0) };
+
+static const unsigned int sdxc_d13_b_pins[] = { PIN(CARD_0, 0), PIN(CARD_4, 0),
+ PIN(CARD_5, 0) };
+static const unsigned int sdxc_d0_b_pins[] = { PIN(CARD_1, 0) };
+static const unsigned int sdxc_clk_b_pins[] = { PIN(CARD_2, 0) };
+static const unsigned int sdxc_cmd_b_pins[] = { PIN(CARD_3, 0) };
+
+/* bank AO */
+static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, AO_OFF) };
+static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, AO_OFF) };
+static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
+static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
+static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
+static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int clk_32k_in_out_pins[] = { PIN(GPIOAO_6, AO_OFF) };
+static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
+static const unsigned int hdmi_cec_1_pins[] = { PIN(GPIOAO_12, AO_OFF) };
+static const unsigned int ir_blaster_pins[] = { PIN(GPIOAO_13, AO_OFF) };
+
+static const unsigned int pwm_c2_pins[] = { PIN(GPIOAO_3, AO_OFF) };
+static const unsigned int i2c_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
+static const unsigned int i2c_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int ir_remote_out_pins[] = { PIN(GPIOAO_7, AO_OFF) };
+static const unsigned int i2s_am_clk_out_pins[] = { PIN(GPIOAO_8, AO_OFF) };
+static const unsigned int i2s_ao_clk_out_pins[] = { PIN(GPIOAO_9, AO_OFF) };
+static const unsigned int i2s_lr_clk_out_pins[] = { PIN(GPIOAO_10, AO_OFF) };
+static const unsigned int i2s_out_01_pins[] = { PIN(GPIOAO_11, AO_OFF) };
+
+static const unsigned int uart_tx_ao_b0_pins[] = { PIN(GPIOAO_0, AO_OFF) };
+static const unsigned int uart_rx_ao_b0_pins[] = { PIN(GPIOAO_1, AO_OFF) };
+static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, AO_OFF) };
+static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, AO_OFF) };
+static const unsigned int uart_tx_ao_b1_pins[] = { PIN(GPIOAO_4, AO_OFF) };
+static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int spdif_out_1_pins[] = { PIN(GPIOAO_6, AO_OFF) };
+
+static const unsigned int i2s_in_ch01_pins[] = { PIN(GPIOAO_6, AO_OFF) };
+static const unsigned int i2s_ao_clk_in_pins[] = { PIN(GPIOAO_9, AO_OFF) };
+static const unsigned int i2s_lr_clk_in_pins[] = { PIN(GPIOAO_10, AO_OFF) };
+
+/* bank DIF */
+static const unsigned int eth_rxd1_pins[] = { PIN(DIF_0_P, 0) };
+static const unsigned int eth_rxd0_pins[] = { PIN(DIF_0_N, 0) };
+static const unsigned int eth_rx_dv_pins[] = { PIN(DIF_1_P, 0) };
+static const unsigned int eth_rx_clk_pins[] = { PIN(DIF_1_N, 0) };
+static const unsigned int eth_txd0_1_pins[] = { PIN(DIF_2_P, 0) };
+static const unsigned int eth_txd1_1_pins[] = { PIN(DIF_2_N, 0) };
+static const unsigned int eth_tx_en_pins[] = { PIN(DIF_3_P, 0) };
+static const unsigned int eth_ref_clk_pins[] = { PIN(DIF_3_N, 0) };
+static const unsigned int eth_mdc_pins[] = { PIN(DIF_4_P, 0) };
+static const unsigned int eth_mdio_en_pins[] = { PIN(DIF_4_N, 0) };
+
+static struct meson_pmx_group meson8b_groups[] = {
+ GPIO_GROUP(GPIOX_0, 0),
+ GPIO_GROUP(GPIOX_1, 0),
+ GPIO_GROUP(GPIOX_2, 0),
+ GPIO_GROUP(GPIOX_3, 0),
+ GPIO_GROUP(GPIOX_4, 0),
+ GPIO_GROUP(GPIOX_5, 0),
+ GPIO_GROUP(GPIOX_6, 0),
+ GPIO_GROUP(GPIOX_7, 0),
+ GPIO_GROUP(GPIOX_8, 0),
+ GPIO_GROUP(GPIOX_9, 0),
+ GPIO_GROUP(GPIOX_10, 0),
+ GPIO_GROUP(GPIOX_11, 0),
+ GPIO_GROUP(GPIOX_16, 0),
+ GPIO_GROUP(GPIOX_17, 0),
+ GPIO_GROUP(GPIOX_18, 0),
+ GPIO_GROUP(GPIOX_19, 0),
+ GPIO_GROUP(GPIOX_20, 0),
+ GPIO_GROUP(GPIOX_21, 0),
+
+ GPIO_GROUP(GPIOY_0, 0),
+ GPIO_GROUP(GPIOY_1, 0),
+ GPIO_GROUP(GPIOY_3, 0),
+ GPIO_GROUP(GPIOY_6, 0),
+ GPIO_GROUP(GPIOY_7, 0),
+ GPIO_GROUP(GPIOY_8, 0),
+ GPIO_GROUP(GPIOY_9, 0),
+ GPIO_GROUP(GPIOY_10, 0),
+ GPIO_GROUP(GPIOY_11, 0),
+ GPIO_GROUP(GPIOY_12, 0),
+ GPIO_GROUP(GPIOY_13, 0),
+ GPIO_GROUP(GPIOY_14, 0),
+
+ GPIO_GROUP(GPIODV_9, 0),
+ GPIO_GROUP(GPIODV_24, 0),
+ GPIO_GROUP(GPIODV_25, 0),
+ GPIO_GROUP(GPIODV_26, 0),
+ GPIO_GROUP(GPIODV_27, 0),
+ GPIO_GROUP(GPIODV_28, 0),
+ GPIO_GROUP(GPIODV_29, 0),
+
+ GPIO_GROUP(GPIOH_0, 0),
+ GPIO_GROUP(GPIOH_1, 0),
+ GPIO_GROUP(GPIOH_2, 0),
+ GPIO_GROUP(GPIOH_3, 0),
+ GPIO_GROUP(GPIOH_4, 0),
+ GPIO_GROUP(GPIOH_5, 0),
+ GPIO_GROUP(GPIOH_6, 0),
+ GPIO_GROUP(GPIOH_7, 0),
+ GPIO_GROUP(GPIOH_8, 0),
+ GPIO_GROUP(GPIOH_9, 0),
+
+ GPIO_GROUP(DIF_0_P, 0),
+ GPIO_GROUP(DIF_0_N, 0),
+ GPIO_GROUP(DIF_1_P, 0),
+ GPIO_GROUP(DIF_1_N, 0),
+ GPIO_GROUP(DIF_2_P, 0),
+ GPIO_GROUP(DIF_2_N, 0),
+ GPIO_GROUP(DIF_3_P, 0),
+ GPIO_GROUP(DIF_3_N, 0),
+ GPIO_GROUP(DIF_4_P, 0),
+ GPIO_GROUP(DIF_4_N, 0),
+
+ GPIO_GROUP(GPIOAO_0, AO_OFF),
+ GPIO_GROUP(GPIOAO_1, AO_OFF),
+ GPIO_GROUP(GPIOAO_2, AO_OFF),
+ GPIO_GROUP(GPIOAO_3, AO_OFF),
+ GPIO_GROUP(GPIOAO_4, AO_OFF),
+ GPIO_GROUP(GPIOAO_5, AO_OFF),
+ GPIO_GROUP(GPIOAO_6, AO_OFF),
+ GPIO_GROUP(GPIOAO_7, AO_OFF),
+ GPIO_GROUP(GPIOAO_8, AO_OFF),
+ GPIO_GROUP(GPIOAO_9, AO_OFF),
+ GPIO_GROUP(GPIOAO_10, AO_OFF),
+ GPIO_GROUP(GPIOAO_11, AO_OFF),
+ GPIO_GROUP(GPIOAO_12, AO_OFF),
+ GPIO_GROUP(GPIOAO_13, AO_OFF),
+ GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
+ GPIO_GROUP(GPIO_TEST_N, AO_OFF),
+
+ /* bank X */
+ GROUP(sd_d0_a, 8, 5),
+ GROUP(sd_d1_a, 8, 4),
+ GROUP(sd_d2_a, 8, 3),
+ GROUP(sd_d3_a, 8, 2),
+ GROUP(sdxc_d0_0_a, 5, 29),
+ GROUP(sdxc_d47_a, 5, 12),
+ GROUP(sdxc_d13_0_a, 5, 28),
+ GROUP(sd_clk_a, 8, 1),
+ GROUP(sd_cmd_a, 8, 0),
+ GROUP(xtal_32k_out, 3, 22),
+ GROUP(xtal_24m_out, 3, 20),
+ GROUP(uart_tx_b0, 4, 9),
+ GROUP(uart_rx_b0, 4, 8),
+ GROUP(uart_cts_b0, 4, 7),
+ GROUP(uart_rts_b0, 4, 6),
+ GROUP(sdxc_d0_1_a, 5, 14),
+ GROUP(sdxc_d13_1_a, 5, 13),
+ GROUP(pcm_out_a, 3, 30),
+ GROUP(pcm_in_a, 3, 29),
+ GROUP(pcm_fs_a, 3, 28),
+ GROUP(pcm_clk_a, 3, 27),
+ GROUP(sdxc_clk_a, 5, 11),
+ GROUP(sdxc_cmd_a, 5, 10),
+ GROUP(pwm_vs_0, 7, 31),
+ GROUP(pwm_e, 9, 19),
+ GROUP(pwm_vs_1, 7, 30),
+ GROUP(uart_tx_a, 4, 17),
+ GROUP(uart_rx_a, 4, 16),
+ GROUP(uart_cts_a, 4, 15),
+ GROUP(uart_rts_a, 4, 14),
+ GROUP(uart_tx_b1, 6, 19),
+ GROUP(uart_rx_b1, 6, 18),
+ GROUP(uart_cts_b1, 6, 17),
+ GROUP(uart_rts_b1, 6, 16),
+ GROUP(iso7816_0_clk, 5, 9),
+ GROUP(iso7816_0_data, 5, 8),
+ GROUP(spi_sclk_0, 4, 22),
+ GROUP(spi_miso_0, 4, 24),
+ GROUP(spi_mosi_0, 4, 23),
+ GROUP(iso7816_det, 4, 21),
+ GROUP(iso7816_reset, 4, 20),
+ GROUP(iso7816_1_clk, 4, 19),
+ GROUP(iso7816_1_data, 4, 18),
+ GROUP(spi_ss0_0, 4, 25),
+ GROUP(tsin_clk_b, 3, 6),
+ GROUP(tsin_sop_b, 3, 7),
+ GROUP(tsin_d0_b, 3, 8),
+ GROUP(pwm_b, 2, 3),
+ GROUP(i2c_sda_d0, 4, 5),
+ GROUP(i2c_sck_d0, 4, 4),
+ GROUP(tsin_d_valid_b, 3, 9),
+
+ /* bank Y */
+ GROUP(tsin_d_valid_a, 3, 2),
+ GROUP(tsin_sop_a, 3, 1),
+ GROUP(tsin_d17_a, 3, 5),
+ GROUP(tsin_clk_a, 3, 0),
+ GROUP(tsin_d0_a, 3, 4),
+ GROUP(spdif_out_0, 1, 7),
+ GROUP(xtal_24m, 3, 18),
+ GROUP(iso7816_2_clk, 5, 7),
+ GROUP(iso7816_2_data, 5, 6),
+
+ /* bank DV */
+ GROUP(pwm_d, 3, 26),
+ GROUP(pwm_c0, 3, 25),
+ GROUP(pwm_vs_2, 7, 28),
+ GROUP(pwm_vs_3, 7, 27),
+ GROUP(pwm_vs_4, 7, 26),
+ GROUP(xtal24_out, 7, 25),
+ GROUP(uart_tx_c, 6, 23),
+ GROUP(uart_rx_c, 6, 22),
+ GROUP(uart_cts_c, 6, 21),
+ GROUP(uart_rts_c, 6, 20),
+ GROUP(pwm_c1, 3, 24),
+ GROUP(i2c_sda_a, 9, 31),
+ GROUP(i2c_sck_a, 9, 30),
+ GROUP(i2c_sda_b0, 9, 29),
+ GROUP(i2c_sck_b0, 9, 28),
+ GROUP(i2c_sda_c0, 9, 27),
+ GROUP(i2c_sck_c0, 9, 26),
+
+ /* bank H */
+ GROUP(hdmi_hpd, 1, 26),
+ GROUP(hdmi_sda, 1, 25),
+ GROUP(hdmi_scl, 1, 24),
+ GROUP(hdmi_cec_0, 1, 23),
+ GROUP(eth_txd1_0, 7, 21),
+ GROUP(eth_txd0_0, 7, 20),
+ GROUP(clk_24m_out, 4, 1),
+ GROUP(spi_ss1, 8, 11),
+ GROUP(spi_ss2, 8, 12),
+ GROUP(spi_ss0_1, 9, 13),
+ GROUP(spi_miso_1, 9, 12),
+ GROUP(spi_mosi_1, 9, 11),
+ GROUP(spi_sclk_1, 9, 10),
+ GROUP(eth_txd3, 6, 13),
+ GROUP(eth_txd2, 6, 12),
+ GROUP(eth_tx_clk, 6, 11),
+ GROUP(i2c_sda_b1, 5, 27),
+ GROUP(i2c_sck_b1, 5, 26),
+ GROUP(i2c_sda_c1, 5, 25),
+ GROUP(i2c_sck_c1, 5, 24),
+ GROUP(i2c_sda_d1, 4, 3),
+ GROUP(i2c_sck_d1, 4, 2),
+
+ /* bank BOOT */
+ GROUP(nand_io, 2, 26),
+ GROUP(nand_io_ce0, 2, 25),
+ GROUP(nand_io_ce1, 2, 24),
+ GROUP(nand_io_rb0, 2, 17),
+ GROUP(nand_ale, 2, 21),
+ GROUP(nand_cle, 2, 20),
+ GROUP(nand_wen_clk, 2, 19),
+ GROUP(nand_ren_clk, 2, 18),
+ GROUP(nand_dqs_0, 2, 27),
+ GROUP(nand_dqs_1, 2, 28),
+ GROUP(sdxc_d0_c, 4, 30),
+ GROUP(sdxc_d13_c, 4, 29),
+ GROUP(sdxc_d47_c, 4, 28),
+ GROUP(sdxc_clk_c, 7, 19),
+ GROUP(sdxc_cmd_c, 7, 18),
+ GROUP(nor_d, 5, 1),
+ GROUP(nor_q, 5, 3),
+ GROUP(nor_c, 5, 2),
+ GROUP(nor_cs, 5, 0),
+ GROUP(sd_d0_c, 6, 29),
+ GROUP(sd_d1_c, 6, 28),
+ GROUP(sd_d2_c, 6, 27),
+ GROUP(sd_d3_c, 6, 26),
+ GROUP(sd_cmd_c, 6, 30),
+ GROUP(sd_clk_c, 6, 31),
+
+ /* bank CARD */
+ GROUP(sd_d1_b, 2, 14),
+ GROUP(sd_d0_b, 2, 15),
+ GROUP(sd_clk_b, 2, 11),
+ GROUP(sd_cmd_b, 2, 10),
+ GROUP(sd_d3_b, 2, 12),
+ GROUP(sd_d2_b, 2, 13),
+ GROUP(sdxc_d13_b, 2, 6),
+ GROUP(sdxc_d0_b, 2, 7),
+ GROUP(sdxc_clk_b, 2, 5),
+ GROUP(sdxc_cmd_b, 2, 4),
+
+ /* bank AO */
+ GROUP(uart_tx_ao_a, 0, 12),
+ GROUP(uart_rx_ao_a, 0, 11),
+ GROUP(uart_cts_ao_a, 0, 10),
+ GROUP(uart_rts_ao_a, 0, 9),
+ GROUP(i2c_mst_sck_ao, 0, 6),
+ GROUP(i2c_mst_sda_ao, 0, 5),
+ GROUP(clk_32k_in_out, 0, 18),
+ GROUP(remote_input, 0, 0),
+ GROUP(hdmi_cec_1, 0, 17),
+ GROUP(ir_blaster, 0, 31),
+ GROUP(pwm_c2, 0, 22),
+ GROUP(i2c_sck_ao, 0, 2),
+ GROUP(i2c_sda_ao, 0, 1),
+ GROUP(ir_remote_out, 0, 21),
+ GROUP(i2s_am_clk_out, 0, 30),
+ GROUP(i2s_ao_clk_out, 0, 29),
+ GROUP(i2s_lr_clk_out, 0, 28),
+ GROUP(i2s_out_01, 0, 27),
+ GROUP(uart_tx_ao_b0, 0, 26),
+ GROUP(uart_rx_ao_b0, 0, 25),
+ GROUP(uart_cts_ao_b, 0, 8),
+ GROUP(uart_rts_ao_b, 0, 7),
+ GROUP(uart_tx_ao_b1, 0, 24),
+ GROUP(uart_rx_ao_b1, 0, 23),
+ GROUP(spdif_out_1, 0, 16),
+ GROUP(i2s_in_ch01, 0, 13),
+ GROUP(i2s_ao_clk_in, 0, 15),
+ GROUP(i2s_lr_clk_in, 0, 14),
+
+ /* bank DIF */
+ GROUP(eth_rxd1, 6, 0),
+ GROUP(eth_rxd0, 6, 1),
+ GROUP(eth_rx_dv, 6, 2),
+ GROUP(eth_rx_clk, 6, 3),
+ GROUP(eth_txd0_1, 6, 4),
+ GROUP(eth_txd1_1, 6, 5),
+ GROUP(eth_tx_en, 6, 0),
+ GROUP(eth_ref_clk, 6, 8),
+ GROUP(eth_mdc, 6, 9),
+ GROUP(eth_mdio_en, 6, 10),
+};
+
+static const char * const gpio_groups[] = {
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+ "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+ "GPIOX_10", "GPIOX_11", "GPIOX_16", "GPIOX_17", "GPIOX_18",
+ "GPIOX_19", "GPIOX_20", "GPIOX_21",
+
+ "GPIOY_0", "GPIOY_1", "GPIOY_3", "GPIOY_6", "GPIOY_7",
+ "GPIOY_8", "GPIOY_9", "GPIOY_10", "GPIOY_11", "GPIOY_12",
+ "GPIOY_13", "GPIOY_14",
+
+ "GPIODV_9", "GPIODV_24", "GPIODV_25", "GPIODV_26",
+ "GPIODV_27", "GPIODV_28", "GPIODV_29",
+
+ "GPIOH_0", "GPIOH_1", "GPIOH_2", "GPIOH_3", "GPIOH_4",
+ "GPIOH_5", "GPIOH_6", "GPIOH_7", "GPIOH_8", "GPIOH_9",
+
+ "CARD_0", "CARD_1", "CARD_2", "CARD_3", "CARD_4",
+ "CARD_5", "CARD_6",
+
+ "BOOT_0", "BOOT_1", "BOOT_2", "BOOT_3", "BOOT_4",
+ "BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
+ "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
+ "BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
+
+ "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
+ "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
+ "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
+ "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N",
+
+ "DIF_0_P", "DIF_0_N", "DIF_1_P", "DIF_1_N",
+ "DIF_2_P", "DIF_2_N", "DIF_3_P", "DIF_3_N",
+ "DIF_4_P", "DIF_4_N"
+};
+
+static const char * const sd_a_groups[] = {
+ "sd_d0_a", "sd_d1_a", "sd_d2_a", "sd_d3_a", "sd_clk_a",
+ "sd_cmd_a"
+};
+
+static const char * const sdxc_a_groups[] = {
+ "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
+ "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
+};
+
+static const char * const pcm_a_groups[] = {
+ "pcm_out_a", "pcm_in_a", "pcm_fs_a", "pcm_clk_a"
+};
+
+static const char * const uart_a_groups[] = {
+ "uart_tx_a", "uart_rx_a", "uart_cts_a", "uart_rts_a"
+};
+
+static const char * const uart_b_groups[] = {
+ "uart_tx_b0", "uart_rx_b0", "uart_cts_b0", "uart_rts_b0",
+ "uart_tx_b1", "uart_rx_b1", "uart_cts_b1", "uart_rts_b1"
+};
+
+static const char * const iso7816_groups[] = {
+ "iso7816_det", "iso7816_reset", "iso7816_0_clk", "iso7816_0_data",
+ "iso7816_1_clk", "iso7816_1_data", "iso7816_2_clk", "iso7816_2_data"
+};
+
+static const char * const i2c_d_groups[] = {
+ "i2c_sda_d0", "i2c_sck_d0", "i2c_sda_d1", "i2c_sck_d1"
+};
+
+static const char * const xtal_groups[] = {
+ "xtal_32k_out", "xtal_24m_out", "xtal_24m", "xtal24_out"
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_tx_c", "uart_rx_c", "uart_cts_c", "uart_rts_c"
+};
+
+static const char * const i2c_c_groups[] = {
+ "i2c_sda_c0", "i2c_sck_c0", "i2c_sda_c1", "i2c_sck_c1"
+};
+
+static const char * const hdmi_groups[] = {
+ "hdmi_hpd", "hdmi_sda", "hdmi_scl", "hdmi_cec_0",
+ "hdmi_cec_1"
+};
+
+static const char * const spi_groups[] = {
+ "spi_ss0_0", "spi_miso_0", "spi_mosi_0", "spi_sclk_0",
+ "spi_ss0_1", "spi_ss1", "spi_sclk_1", "spi_mosi_1",
+ "spi_miso_1", "spi_ss2"
+};
+
+static const char * const ethernet_groups[] = {
+ "eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
+ "eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
+ "eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
+ "eth_txd2", "eth_txd3"
+};
+
+static const char * const i2c_a_groups[] = {
+ "i2c_sda_a", "i2c_sck_a",
+};
+
+static const char * const i2c_b_groups[] = {
+ "i2c_sda_b0", "i2c_sck_b0", "i2c_sda_b1", "i2c_sck_b1"
+};
+
+static const char * const sd_c_groups[] = {
+ "sd_d0_c", "sd_d1_c", "sd_d2_c", "sd_d3_c",
+ "sd_cmd_c", "sd_clk_c"
+};
+
+static const char * const sdxc_c_groups[] = {
+ "sdxc_d0_c", "sdxc_d13_c", "sdxc_d47_c", "sdxc_cmd_c",
+ "sdxc_clk_c"
+};
+
+static const char * const nand_groups[] = {
+ "nand_io", "nand_io_ce0", "nand_io_ce1",
+ "nand_io_rb0", "nand_ale", "nand_cle",
+ "nand_wen_clk", "nand_ren_clk", "nand_dqs0",
+ "nand_dqs1"
+};
+
+static const char * const nor_groups[] = {
+ "nor_d", "nor_q", "nor_c", "nor_cs"
+};
+
+static const char * const sd_b_groups[] = {
+ "sd_d1_b", "sd_d0_b", "sd_clk_b", "sd_cmd_b",
+ "sd_d3_b", "sd_d2_b"
+};
+
+static const char * const sdxc_b_groups[] = {
+ "sdxc_d13_b", "sdxc_d0_b", "sdxc_clk_b", "sdxc_cmd_b"
+};
+
+static const char * const uart_ao_groups[] = {
+ "uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a"
+};
+
+static const char * const remote_groups[] = {
+ "remote_input", "ir_blaster", "ir_remote_out"
+};
+
+static const char * const i2c_slave_ao_groups[] = {
+ "i2c_sck_ao", "i2c_sda_ao"
+};
+
+static const char * const uart_ao_b_groups[] = {
+ "uart_tx_ao_b0", "uart_rx_ao_b0", "uart_tx_ao_b1", "uart_rx_ao_b1",
+ "uart_cts_ao_b", "uart_rts_ao_b"
+};
+
+static const char * const i2c_mst_ao_groups[] = {
+ "i2c_mst_sck_ao", "i2c_mst_sda_ao"
+};
+
+static const char * const clk_groups[] = {
+ "clk_24m_out", "clk_32k_in_out"
+};
+
+static const char * const spdif_groups[] = {
+ "spdif_out_1", "spdif_out_0"
+};
+
+static const char * const i2s_groups[] = {
+ "i2s_am_clk_out", "i2s_ao_clk_out", "i2s_lr_clk_out",
+ "i2s_out_01", "i2s_in_ch01", "i2s_ao_clk_in",
+ "i2s_lr_clk_in"
+};
+
+static const char * const pwm_b_groups[] = {
+ "pwm_b"
+};
+
+static const char * const pwm_c_groups[] = {
+ "pwm_c0", "pwm_c1", "pwm_c2"
+};
+
+static const char * const pwm_d_groups[] = {
+ "pwm_d"
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e"
+};
+
+static const char * const pwm_vs_groups[] = {
+ "pwm_vs_0", "pwm_vs_1", "pwm_vs_2",
+ "pwm_vs_3", "pwm_vs_4"
+};
+
+static const char * const tsin_a_groups[] = {
+ "tsin_d0_a", "tsin_d17_a", "tsin_clk_a", "tsin_sop_a",
+ "tsin_d_valid_a"
+};
+
+static const char * const tsin_b_groups[] = {
+ "tsin_d0_b", "tsin_clk_b", "tsin_sop_b", "tsin_d_valid_b"
+};
+
+static struct meson_pmx_func meson8b_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(sd_a),
+ FUNCTION(sdxc_a),
+ FUNCTION(pcm_a),
+ FUNCTION(uart_a),
+ FUNCTION(uart_b),
+ FUNCTION(iso7816),
+ FUNCTION(i2c_d),
+ FUNCTION(xtal),
+ FUNCTION(uart_c),
+ FUNCTION(i2c_c),
+ FUNCTION(hdmi),
+ FUNCTION(spi),
+ FUNCTION(ethernet),
+ FUNCTION(i2c_a),
+ FUNCTION(i2c_b),
+ FUNCTION(sd_c),
+ FUNCTION(sdxc_c),
+ FUNCTION(nand),
+ FUNCTION(nor),
+ FUNCTION(sd_b),
+ FUNCTION(sdxc_b),
+ FUNCTION(uart_ao),
+ FUNCTION(remote),
+ FUNCTION(i2c_slave_ao),
+ FUNCTION(uart_ao_b),
+ FUNCTION(i2c_mst_ao),
+ FUNCTION(clk),
+ FUNCTION(spdif),
+ FUNCTION(i2s),
+ FUNCTION(pwm_b),
+ FUNCTION(pwm_c),
+ FUNCTION(pwm_d),
+ FUNCTION(pwm_e),
+ FUNCTION(pwm_vs),
+ FUNCTION(tsin_a),
+ FUNCTION(tsin_b),
+};
+
+static struct meson_bank meson8b_banks[] = {
+ /* name first last pullen pull dir out in */
+ BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_14, 0), 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", PIN(GPIODV_9, 0), PIN(GPIODV_29, 0), 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+ BANK("DIF", PIN(DIF_0_P, 0), PIN(DIF_4_N, 0), 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
+};
+
+static struct meson_bank meson8b_ao_banks[] = {
+ /* name first last pullen pull dir out in */
+ BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+};
+
+static struct meson_domain_data meson8b_domain_data[] = {
+ {
+ .name = "banks",
+ .banks = meson8b_banks,
+ .num_banks = ARRAY_SIZE(meson8b_banks),
+ .pin_base = 0,
+ .num_pins = 83,
+ },
+ {
+ .name = "ao-bank",
+ .banks = meson8b_ao_banks,
+ .num_banks = ARRAY_SIZE(meson8b_ao_banks),
+ .pin_base = 83,
+ .num_pins = 16,
+ },
+};
+
+struct meson_pinctrl_data meson8b_pinctrl_data = {
+ .pins = meson8b_pins,
+ .groups = meson8b_groups,
+ .funcs = meson8b_functions,
+ .domain_data = meson8b_domain_data,
+ .num_pins = ARRAY_SIZE(meson8b_pins),
+ .num_groups = ARRAY_SIZE(meson8b_groups),
+ .num_funcs = ARRAY_SIZE(meson8b_functions),
+ .num_domains = ARRAY_SIZE(meson8b_domain_data),
+};
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index d6dd8358a6f6..170602407c0d 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -26,6 +26,10 @@ config PINCTRL_ARMADA_38X
bool
select PINCTRL_MVEBU
+config PINCTRL_ARMADA_39X
+ bool
+ select PINCTRL_MVEBU
+
config PINCTRL_ARMADA_XP
bool
select PINCTRL_MVEBU
diff --git a/drivers/pinctrl/mvebu/Makefile b/drivers/pinctrl/mvebu/Makefile
index a0818e96374b..554d8af14eeb 100644
--- a/drivers/pinctrl/mvebu/Makefile
+++ b/drivers/pinctrl/mvebu/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_PINCTRL_KIRKWOOD) += pinctrl-kirkwood.o
obj-$(CONFIG_PINCTRL_ARMADA_370) += pinctrl-armada-370.o
obj-$(CONFIG_PINCTRL_ARMADA_375) += pinctrl-armada-375.o
obj-$(CONFIG_PINCTRL_ARMADA_38X) += pinctrl-armada-38x.o
+obj-$(CONFIG_PINCTRL_ARMADA_39X) += pinctrl-armada-39x.o
obj-$(CONFIG_PINCTRL_ARMADA_XP) += pinctrl-armada-xp.o
obj-$(CONFIG_PINCTRL_ORION) += pinctrl-orion.o
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index c4f51d0cd2cc..03aa58c4cb85 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -364,7 +364,7 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
MPP_FUNCTION(0x5, "audio", "mclk"),
MPP_FUNCTION(0x6, "uart0", "cts")),
MPP_MODE(63,
- MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "spi0", "sck"),
MPP_FUNCTION(0x2, "tclk", NULL)),
MPP_MODE(64,
@@ -379,7 +379,7 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info;
-static struct of_device_id armada_370_pinctrl_of_match[] = {
+static const struct of_device_id armada_370_pinctrl_of_match[] = {
{ .compatible = "marvell,mv88f6710-pinctrl" },
{ },
};
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
index cd7c8f51f7d9..ca1e7571fedb 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-375.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
@@ -399,7 +399,7 @@ static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
static struct mvebu_pinctrl_soc_info armada_375_pinctrl_info;
-static struct of_device_id armada_375_pinctrl_of_match[] = {
+static const struct of_device_id armada_375_pinctrl_of_match[] = {
{ .compatible = "marvell,mv88f6720-pinctrl" },
{ },
};
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
index 7302f66f4f19..83bbcc72be1f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -389,7 +389,7 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
static struct mvebu_pinctrl_soc_info armada_38x_pinctrl_info;
-static struct of_device_id armada_38x_pinctrl_of_match[] = {
+static const struct of_device_id armada_38x_pinctrl_of_match[] = {
{
.compatible = "marvell,mv88f6810-pinctrl",
.data = (void *) V_88F6810,
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
new file mode 100644
index 000000000000..42491624d660
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
@@ -0,0 +1,432 @@
+/*
+ * Marvell Armada 39x pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static void __iomem *mpp_base;
+
+static int armada_39x_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_39x_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+enum {
+ V_88F6920 = BIT(0),
+ V_88F6928 = BIT(1),
+ V_88F6920_PLUS = (V_88F6920 | V_88F6928),
+};
+
+static struct mvebu_mpp_mode armada_39x_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "ua0", "rxd", V_88F6920_PLUS)),
+ MPP_MODE(1,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "ua0", "txd", V_88F6920_PLUS)),
+ MPP_MODE(2,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c0", "sck", V_88F6920_PLUS)),
+ MPP_MODE(3,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c0", "sda", V_88F6920_PLUS)),
+ MPP_MODE(4,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "ua1", "txd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "rts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "smi", "mdc", V_88F6920_PLUS)),
+ MPP_MODE(5,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "ua1", "rxd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "cts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "smi", "mdio", V_88F6920_PLUS)),
+ MPP_MODE(6,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs3", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "xsmi", "mdio", V_88F6920_PLUS)),
+ MPP_MODE(7,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad9", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "xsmi", "mdc", V_88F6920_PLUS)),
+ MPP_MODE(8,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad10", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ptp", "trig", V_88F6920_PLUS)),
+ MPP_MODE(9,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad11", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ptp", "clk", V_88F6920_PLUS)),
+ MPP_MODE(10,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad12", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ptp", "event", V_88F6920_PLUS)),
+ MPP_MODE(11,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad13", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "led", "clk", V_88F6920_PLUS)),
+ MPP_MODE(12,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad14", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "led", "stb", V_88F6920_PLUS)),
+ MPP_MODE(13,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad15", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "led", "data", V_88F6920_PLUS)),
+ MPP_MODE(14,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "vtt", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "wen1", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ua1", "txd", V_88F6920_PLUS)),
+ MPP_MODE(15,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie0", "rstout", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "mosi", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "i2c1", "sck", V_88F6920_PLUS)),
+ MPP_MODE(16,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "decc", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "miso", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "i2c1", "sda", V_88F6920_PLUS)),
+ MPP_MODE(17,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "rxd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "sck", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "smi", "mdio", V_88F6920_PLUS)),
+ MPP_MODE(18,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "txd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "cs0", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "i2c2", "sck", V_88F6920_PLUS)),
+ MPP_MODE(19,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sata1", "present", V_88F6928),
+ MPP_VAR_FUNCTION(5, "ua0", "cts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "rxd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "i2c2", "sda", V_88F6920_PLUS)),
+ MPP_MODE(20,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sata0", "present", V_88F6928),
+ MPP_VAR_FUNCTION(5, "ua0", "rts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "txd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "smi", "mdc", V_88F6920_PLUS)),
+ MPP_MODE(21,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs1", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "sata0", "present", V_88F6928),
+ MPP_VAR_FUNCTION(4, "sd", "cmd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "bootcs", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "rxd0", V_88F6920_PLUS)),
+ MPP_MODE(22,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "mosi", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad0", V_88F6920_PLUS)),
+ MPP_MODE(23,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "sck", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad2", V_88F6920_PLUS)),
+ MPP_MODE(24,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "miso", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "ua0", "cts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "rxd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sd", "d4", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "readyn", V_88F6920_PLUS)),
+ MPP_MODE(25,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs0", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "ua0", "rts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "txd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sd", "d5", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs0", V_88F6920_PLUS)),
+ MPP_MODE(26,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs2", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "i2c1", "sck", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sd", "d6", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs1", V_88F6920_PLUS)),
+ MPP_MODE(27,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs3", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "i2c1", "sda", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sd", "d7", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs2", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "txclkout", V_88F6920_PLUS)),
+ MPP_MODE(28,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sd", "clk", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad5", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "txd0", V_88F6920_PLUS)),
+ MPP_MODE(29,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ale0", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "txd1", V_88F6920_PLUS)),
+ MPP_MODE(30,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "oen", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "txd2", V_88F6920_PLUS)),
+ MPP_MODE(31,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ale1", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "txd3", V_88F6920_PLUS)),
+ MPP_MODE(32,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "wen0", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "txctl", V_88F6920_PLUS)),
+ MPP_MODE(33,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "m", "decc", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad3", V_88F6920_PLUS)),
+ MPP_MODE(34,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad1", V_88F6920_PLUS)),
+ MPP_MODE(35,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "ref", "clk", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "a1", V_88F6920_PLUS)),
+ MPP_MODE(36,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "a0", V_88F6920_PLUS)),
+ MPP_MODE(37,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sd", "d3", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad8", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "rxclk", V_88F6920_PLUS)),
+ MPP_MODE(38,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ref", "clk", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sd", "d0", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad4", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "rxd1", V_88F6920_PLUS)),
+ MPP_MODE(39,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c1", "sck", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "cts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sd", "d1", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "a2", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "rxd2", V_88F6920_PLUS)),
+ MPP_MODE(40,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c1", "sda", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "rts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "sd", "d2", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad6", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "rxd3", V_88F6920_PLUS)),
+ MPP_MODE(41,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "rxd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "cts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs3", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "burstn", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "nd", "rbn0", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(8, "ge", "rxctl", V_88F6920_PLUS)),
+ MPP_MODE(42,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "txd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "rts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad7", V_88F6920_PLUS)),
+ MPP_MODE(43,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "clkreq", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "m", "vtt", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "decc", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs2", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "clkout", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "nd", "rbn1", V_88F6920_PLUS)),
+ MPP_MODE(44,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "present", V_88F6928),
+ MPP_VAR_FUNCTION(2, "sata1", "present", V_88F6928),
+ MPP_VAR_FUNCTION(7, "led", "clk", V_88F6920_PLUS)),
+ MPP_MODE(45,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "ref", "clk", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "rxd", V_88F6920_PLUS)),
+ MPP_MODE(46,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "ref", "clk", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "txd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "led", "stb", V_88F6920_PLUS)),
+ MPP_MODE(47,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "present", V_88F6928),
+ MPP_VAR_FUNCTION(2, "sata1", "present", V_88F6928),
+ MPP_VAR_FUNCTION(7, "led", "data", V_88F6920_PLUS)),
+ MPP_MODE(48,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "present", V_88F6928),
+ MPP_VAR_FUNCTION(2, "m", "vtt", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm", "pclk", V_88F6928),
+ MPP_VAR_FUNCTION(4, "audio", "mclk", V_88F6928),
+ MPP_VAR_FUNCTION(5, "sd", "d4", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "pcie0", "clkreq", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ua1", "txd", V_88F6920_PLUS)),
+ MPP_MODE(49,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm", "fsync", V_88F6928),
+ MPP_VAR_FUNCTION(4, "audio", "lrclk", V_88F6928),
+ MPP_VAR_FUNCTION(5, "sd", "d5", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ua2", "rxd", V_88F6920_PLUS)),
+ MPP_MODE(50,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm", "drx", V_88F6928),
+ MPP_VAR_FUNCTION(4, "audio", "extclk", V_88F6928),
+ MPP_VAR_FUNCTION(5, "sd", "cmd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ua2", "rxd", V_88F6920_PLUS)),
+ MPP_MODE(51,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm", "dtx", V_88F6928),
+ MPP_VAR_FUNCTION(4, "audio", "sdo", V_88F6928),
+ MPP_VAR_FUNCTION(5, "m", "decc", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ua2", "txd", V_88F6920_PLUS)),
+ MPP_MODE(52,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm", "intn", V_88F6928),
+ MPP_VAR_FUNCTION(4, "audio", "sdi", V_88F6928),
+ MPP_VAR_FUNCTION(5, "sd", "d6", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "i2c3", "sck", V_88F6920_PLUS)),
+ MPP_MODE(53,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "sata1", "present", V_88F6928),
+ MPP_VAR_FUNCTION(2, "sata0", "present", V_88F6928),
+ MPP_VAR_FUNCTION(3, "tdm", "rstn", V_88F6928),
+ MPP_VAR_FUNCTION(4, "audio", "bclk", V_88F6928),
+ MPP_VAR_FUNCTION(5, "sd", "d7", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "i2c3", "sda", V_88F6920_PLUS)),
+ MPP_MODE(54,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "present", V_88F6928),
+ MPP_VAR_FUNCTION(2, "sata1", "present", V_88F6928),
+ MPP_VAR_FUNCTION(3, "pcie0", "rstout", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "sd", "d3", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ua3", "txd", V_88F6920_PLUS)),
+ MPP_MODE(55,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "cts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs1", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "sd", "d0", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "rxd", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(7, "ua3", "rxd", V_88F6920_PLUS)),
+ MPP_MODE(56,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "rts", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "decc", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "mosi", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "txd", V_88F6920_PLUS)),
+ MPP_MODE(57,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "sck", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "sd", "clk", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "txd", V_88F6920_PLUS)),
+ MPP_MODE(58,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "i2c1", "sck", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie2", "clkreq", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "miso", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "sd", "d1", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(6, "ua1", "rxd", V_88F6920_PLUS)),
+ MPP_MODE(59,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(2, "i2c1", "sda", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs0", V_88F6920_PLUS),
+ MPP_VAR_FUNCTION(5, "sd", "d2", V_88F6920_PLUS)),
+};
+
+static struct mvebu_pinctrl_soc_info armada_39x_pinctrl_info;
+
+static const struct of_device_id armada_39x_pinctrl_of_match[] = {
+ {
+ .compatible = "marvell,mv88f6920-pinctrl",
+ .data = (void *) V_88F6920,
+ },
+ {
+ .compatible = "marvell,mv88f6928-pinctrl",
+ .data = (void *) V_88F6928,
+ },
+ { },
+};
+
+static struct mvebu_mpp_ctrl armada_39x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 59, NULL, armada_39x_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 27),
+};
+
+static int armada_39x_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = &armada_39x_pinctrl_info;
+ const struct of_device_id *match =
+ of_match_device(armada_39x_pinctrl_of_match, &pdev->dev);
+ struct resource *res;
+
+ if (!match)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
+ soc->variant = (unsigned) match->data & 0xff;
+ soc->controls = armada_39x_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(armada_39x_mpp_controls);
+ soc->gpioranges = armada_39x_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(armada_39x_mpp_gpio_ranges);
+ soc->modes = armada_39x_mpp_modes;
+ soc->nmodes = armada_39x_mpp_controls[0].npins;
+
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+static int armada_39x_pinctrl_remove(struct platform_device *pdev)
+{
+ return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_39x_pinctrl_driver = {
+ .driver = {
+ .name = "armada-39x-pinctrl",
+ .of_match_table = of_match_ptr(armada_39x_pinctrl_of_match),
+ },
+ .probe = armada_39x_pinctrl_probe,
+ .remove = armada_39x_pinctrl_remove,
+};
+
+module_platform_driver(armada_39x_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 39x pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index fc3376147c18..578db9f033b2 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -34,6 +34,7 @@
#include "pinctrl-mvebu.h"
static void __iomem *mpp_base;
+static u32 *mpp_saved_regs;
static int armada_xp_mpp_ctrl_get(unsigned pid, unsigned long *config)
{
@@ -361,7 +362,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
static struct mvebu_pinctrl_soc_info armada_xp_pinctrl_info;
-static struct of_device_id armada_xp_pinctrl_of_match[] = {
+static const struct of_device_id armada_xp_pinctrl_of_match[] = {
{
.compatible = "marvell,mv78230-pinctrl",
.data = (void *) V_MV78230,
@@ -406,12 +407,42 @@ static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(2, 64, 64, 3),
};
+static int armada_xp_pinctrl_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct mvebu_pinctrl_soc_info *soc =
+ platform_get_drvdata(pdev);
+ int i, nregs;
+
+ nregs = DIV_ROUND_UP(soc->nmodes, MVEBU_MPPS_PER_REG);
+
+ for (i = 0; i < nregs; i++)
+ mpp_saved_regs[i] = readl(mpp_base + i * 4);
+
+ return 0;
+}
+
+static int armada_xp_pinctrl_resume(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc =
+ platform_get_drvdata(pdev);
+ int i, nregs;
+
+ nregs = DIV_ROUND_UP(soc->nmodes, MVEBU_MPPS_PER_REG);
+
+ for (i = 0; i < nregs; i++)
+ writel(mpp_saved_regs[i], mpp_base + i * 4);
+
+ return 0;
+}
+
static int armada_xp_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
const struct of_device_id *match =
of_match_device(armada_xp_pinctrl_of_match, &pdev->dev);
struct resource *res;
+ int nregs;
if (!match)
return -ENODEV;
@@ -459,6 +490,13 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
break;
}
+ nregs = DIV_ROUND_UP(soc->nmodes, MVEBU_MPPS_PER_REG);
+
+ mpp_saved_regs = devm_kmalloc(&pdev->dev, nregs * sizeof(u32),
+ GFP_KERNEL);
+ if (!mpp_saved_regs)
+ return -ENOMEM;
+
pdev->dev.platform_data = soc;
return mvebu_pinctrl_probe(pdev);
@@ -476,6 +514,8 @@ static struct platform_driver armada_xp_pinctrl_driver = {
},
.probe = armada_xp_pinctrl_probe,
.remove = armada_xp_pinctrl_remove,
+ .suspend = armada_xp_pinctrl_suspend,
+ .resume = armada_xp_pinctrl_resume,
};
module_platform_driver(armada_xp_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index dbc673cf7131..0f07dc554a1d 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -456,7 +456,7 @@ static struct mvebu_pinctrl_soc_info mv98dx4122_info = {
.ngpioranges = ARRAY_SIZE(mv88f628x_gpio_ranges),
};
-static struct of_device_id kirkwood_pinctrl_of_match[] = {
+static const struct of_device_id kirkwood_pinctrl_of_match[] = {
{ .compatible = "marvell,88f6180-pinctrl", .data = &mv88f6180_info },
{ .compatible = "marvell,88f6190-pinctrl", .data = &mv88f6190_info },
{ .compatible = "marvell,88f6192-pinctrl", .data = &mv88f6192_info },
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 3a632efb56bb..3b7122d826e4 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -211,7 +211,7 @@ static struct mvebu_pinctrl_soc_info mv88f5281_info = {
* There are multiple variants of the Orion SoCs, but in terms of pin
* muxing, they are identical.
*/
-static struct of_device_id orion_pinctrl_of_match[] = {
+static const struct of_device_id orion_pinctrl_of_match[] = {
{ .compatible = "marvell,88f5181l-pinctrl", .data = &mv88f5181l_info },
{ .compatible = "marvell,88f5182-pinctrl", .data = &mv88f5182_info },
{ .compatible = "marvell,88f5281-pinctrl", .data = &mv88f5281_info },
diff --git a/drivers/pinctrl/nomadik/Kconfig b/drivers/pinctrl/nomadik/Kconfig
index d48a5aa24a29..f4fcebfce68c 100644
--- a/drivers/pinctrl/nomadik/Kconfig
+++ b/drivers/pinctrl/nomadik/Kconfig
@@ -30,9 +30,9 @@ if (ARCH_U8500 || ARCH_NOMADIK)
config PINCTRL_NOMADIK
bool "Nomadik pin controller driver"
depends on ARCH_U8500 || ARCH_NOMADIK
+ depends on OF && GPIOLIB
select PINMUX
select PINCONF
- select GPIOLIB
select OF_GPIO
select GPIOLIB_IRQCHIP
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 1806b24faa14..23db4c9ac76c 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -466,7 +466,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
break;
default:
- dev_dbg(pct->dev, "unknow alt_setting %d\n", alt_setting);
+ dev_dbg(pct->dev, "unknown alt_setting %d\n", alt_setting);
return -EINVAL;
}
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 4db92f64b4de..e63ad9fbd388 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -283,23 +283,40 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device *dev = pctldev->dev;
unsigned long *configs = NULL;
unsigned num_configs = 0;
- unsigned reserve;
+ unsigned reserve, strings_count;
struct property *prop;
const char *group;
const char *subnode_target_type = "pins";
+ ret = of_property_count_strings(np, "pins");
+ if (ret < 0) {
+ ret = of_property_count_strings(np, "groups");
+ if (ret < 0)
+ /* skip this node; may contain config child nodes */
+ return 0;
+ if (type == PIN_MAP_TYPE_INVALID)
+ type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ subnode_target_type = "groups";
+ } else {
+ if (type == PIN_MAP_TYPE_INVALID)
+ type = PIN_MAP_TYPE_CONFIGS_PIN;
+ }
+ strings_count = ret;
+
ret = of_property_read_string(np, "function", &function);
if (ret < 0) {
/* EINVAL=missing, which is fine since it's optional */
if (ret != -EINVAL)
- dev_err(dev, "could not parse property function\n");
+ dev_err(dev, "%s: could not parse property function\n",
+ of_node_full_name(np));
function = NULL;
}
ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
&num_configs);
if (ret < 0) {
- dev_err(dev, "could not parse node property\n");
+ dev_err(dev, "%s: could not parse node property\n",
+ of_node_full_name(np));
return ret;
}
@@ -309,21 +326,7 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (num_configs)
reserve++;
- ret = of_property_count_strings(np, "pins");
- if (ret < 0) {
- ret = of_property_count_strings(np, "groups");
- if (ret < 0) {
- dev_err(dev, "could not parse property pins/groups\n");
- goto exit;
- }
- if (type == PIN_MAP_TYPE_INVALID)
- type = PIN_MAP_TYPE_CONFIGS_GROUP;
- subnode_target_type = "groups";
- } else {
- if (type == PIN_MAP_TYPE_INVALID)
- type = PIN_MAP_TYPE_CONFIGS_PIN;
- }
- reserve *= ret;
+ reserve *= strings_count;
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
num_maps, reserve);
@@ -367,15 +370,22 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
*map = NULL;
*num_maps = 0;
+ ret = pinconf_generic_dt_subnode_to_map(pctldev, np_config, map,
+ &reserved_maps, num_maps, type);
+ if (ret < 0)
+ goto exit;
+
for_each_child_of_node(np_config, np) {
ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps, type);
- if (ret < 0) {
- pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
- return ret;
- }
+ if (ret < 0)
+ goto exit;
}
return 0;
+
+exit:
+ pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ return ret;
}
EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
new file mode 100644
index 000000000000..7de3b64bf142
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -0,0 +1,869 @@
+/*
+ * GPIO driver for AMD
+ *
+ * Copyright (c) 2014,2015 AMD Corporation.
+ * Authors: Ken Xue <Ken.Xue@amd.com>
+ * Wu, Jeff <Jeff.Wu@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/acpi.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "pinctrl-utils.h"
+#include "pinctrl-amd.h"
+
+static inline struct amd_gpio *to_amd_gpio(struct gpio_chip *gc)
+{
+ return container_of(gc, struct amd_gpio, gc);
+}
+
+static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ unsigned long flags;
+ u32 pin_reg;
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + offset * 4);
+ /*
+ * Suppose BIOS or Bootloader sets specific debounce for the
+ * GPIO. if not, set debounce to be 2.75ms and remove glitch.
+ */
+ if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
+ pin_reg |= 0xf;
+ pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+ pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+ }
+
+ pin_reg &= ~BIT(OUTPUT_ENABLE_OFF);
+ writel(pin_reg, gpio_dev->base + offset * 4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ return 0;
+}
+
+static int amd_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ u32 pin_reg;
+ unsigned long flags;
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + offset * 4);
+ pin_reg |= BIT(OUTPUT_ENABLE_OFF);
+ if (value)
+ pin_reg |= BIT(OUTPUT_VALUE_OFF);
+ else
+ pin_reg &= ~BIT(OUTPUT_VALUE_OFF);
+ writel(pin_reg, gpio_dev->base + offset * 4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ return 0;
+}
+
+static int amd_gpio_get_value(struct gpio_chip *gc, unsigned offset)
+{
+ u32 pin_reg;
+ unsigned long flags;
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + offset * 4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ return !!(pin_reg & BIT(PIN_STS_OFF));
+}
+
+static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
+{
+ u32 pin_reg;
+ unsigned long flags;
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + offset * 4);
+ if (value)
+ pin_reg |= BIT(OUTPUT_VALUE_OFF);
+ else
+ pin_reg &= ~BIT(OUTPUT_VALUE_OFF);
+ writel(pin_reg, gpio_dev->base + offset * 4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+}
+
+static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
+ unsigned debounce)
+{
+ u32 time;
+ u32 pin_reg;
+ int ret = 0;
+ unsigned long flags;
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + offset * 4);
+
+ if (debounce) {
+ pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ pin_reg &= ~DB_TMR_OUT_MASK;
+ /*
+ Debounce Debounce Timer Max
+ TmrLarge TmrOutUnit Unit Debounce
+ Time
+ 0 0 61 usec (2 RtcClk) 976 usec
+ 0 1 244 usec (8 RtcClk) 3.9 msec
+ 1 0 15.6 msec (512 RtcClk) 250 msec
+ 1 1 62.5 msec (2048 RtcClk) 1 sec
+ */
+
+ if (debounce < 61) {
+ pin_reg |= 1;
+ pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
+ pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+ } else if (debounce < 976) {
+ time = debounce / 61;
+ pin_reg |= time & DB_TMR_OUT_MASK;
+ pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
+ pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+ } else if (debounce < 3900) {
+ time = debounce / 244;
+ pin_reg |= time & DB_TMR_OUT_MASK;
+ pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+ pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+ } else if (debounce < 250000) {
+ time = debounce / 15600;
+ pin_reg |= time & DB_TMR_OUT_MASK;
+ pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
+ pin_reg |= BIT(DB_TMR_LARGE_OFF);
+ } else if (debounce < 1000000) {
+ time = debounce / 62500;
+ pin_reg |= time & DB_TMR_OUT_MASK;
+ pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+ pin_reg |= BIT(DB_TMR_LARGE_OFF);
+ } else {
+ pin_reg &= ~DB_CNTRl_MASK;
+ ret = -EINVAL;
+ }
+ } else {
+ pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
+ pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+ pin_reg &= ~DB_TMR_OUT_MASK;
+ pin_reg &= ~DB_CNTRl_MASK;
+ }
+ writel(pin_reg, gpio_dev->base + offset * 4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+{
+ u32 pin_reg;
+ unsigned long flags;
+ unsigned int bank, i, pin_num;
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ char *level_trig;
+ char *active_level;
+ char *interrupt_enable;
+ char *interrupt_mask;
+ char *wake_cntrl0;
+ char *wake_cntrl1;
+ char *wake_cntrl2;
+ char *pin_sts;
+ char *pull_up_sel;
+ char *pull_up_enable;
+ char *pull_down_enable;
+ char *output_value;
+ char *output_enable;
+
+ for (bank = 0; bank < AMD_GPIO_TOTAL_BANKS; bank++) {
+ seq_printf(s, "GPIO bank%d\t", bank);
+
+ switch (bank) {
+ case 0:
+ i = 0;
+ pin_num = AMD_GPIO_PINS_BANK0;
+ break;
+ case 1:
+ i = 64;
+ pin_num = AMD_GPIO_PINS_BANK1 + i;
+ break;
+ case 2:
+ i = 128;
+ pin_num = AMD_GPIO_PINS_BANK2 + i;
+ break;
+ }
+
+ for (; i < pin_num; i++) {
+ seq_printf(s, "pin%d\t", i);
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + i * 4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
+ interrupt_enable = "interrupt is enabled|";
+
+ if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF))
+ && !(pin_reg & BIT(ACTIVE_LEVEL_OFF+1)))
+ active_level = "Active low|";
+ else if (pin_reg & BIT(ACTIVE_LEVEL_OFF)
+ && !(pin_reg & BIT(ACTIVE_LEVEL_OFF+1)))
+ active_level = "Active high|";
+ else if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF))
+ && pin_reg & BIT(ACTIVE_LEVEL_OFF+1))
+ active_level = "Active on both|";
+ else
+ active_level = "Unknow Active level|";
+
+ if (pin_reg & BIT(LEVEL_TRIG_OFF))
+ level_trig = "Level trigger|";
+ else
+ level_trig = "Edge trigger|";
+
+ } else {
+ interrupt_enable =
+ "interrupt is disabled|";
+ active_level = " ";
+ level_trig = " ";
+ }
+
+ if (pin_reg & BIT(INTERRUPT_MASK_OFF))
+ interrupt_mask =
+ "interrupt is unmasked|";
+ else
+ interrupt_mask =
+ "interrupt is masked|";
+
+ if (pin_reg & BIT(WAKE_CNTRL_OFF))
+ wake_cntrl0 = "enable wakeup in S0i3 state|";
+ else
+ wake_cntrl0 = "disable wakeup in S0i3 state|";
+
+ if (pin_reg & BIT(WAKE_CNTRL_OFF))
+ wake_cntrl1 = "enable wakeup in S3 state|";
+ else
+ wake_cntrl1 = "disable wakeup in S3 state|";
+
+ if (pin_reg & BIT(WAKE_CNTRL_OFF))
+ wake_cntrl2 = "enable wakeup in S4/S5 state|";
+ else
+ wake_cntrl2 = "disable wakeup in S4/S5 state|";
+
+ if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
+ pull_up_enable = "pull-up is enabled|";
+ if (pin_reg & BIT(PULL_UP_SEL_OFF))
+ pull_up_sel = "8k pull-up|";
+ else
+ pull_up_sel = "4k pull-up|";
+ } else {
+ pull_up_enable = "pull-up is disabled|";
+ pull_up_sel = " ";
+ }
+
+ if (pin_reg & BIT(PULL_DOWN_ENABLE_OFF))
+ pull_down_enable = "pull-down is enabled|";
+ else
+ pull_down_enable = "Pull-down is disabled|";
+
+ if (pin_reg & BIT(OUTPUT_ENABLE_OFF)) {
+ pin_sts = " ";
+ output_enable = "output is enabled|";
+ if (pin_reg & BIT(OUTPUT_VALUE_OFF))
+ output_value = "output is high|";
+ else
+ output_value = "output is low|";
+ } else {
+ output_enable = "output is disabled|";
+ output_value = " ";
+
+ if (pin_reg & BIT(PIN_STS_OFF))
+ pin_sts = "input is high|";
+ else
+ pin_sts = "input is low|";
+ }
+
+ seq_printf(s, "%s %s %s %s %s %s\n"
+ " %s %s %s %s %s %s %s 0x%x\n",
+ level_trig, active_level, interrupt_enable,
+ interrupt_mask, wake_cntrl0, wake_cntrl1,
+ wake_cntrl2, pin_sts, pull_up_sel,
+ pull_up_enable, pull_down_enable,
+ output_value, output_enable, pin_reg);
+ }
+ }
+}
+#else
+#define amd_gpio_dbg_show NULL
+#endif
+
+static void amd_gpio_irq_enable(struct irq_data *d)
+{
+ u32 pin_reg;
+ unsigned long flags;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ /*
+ Suppose BIOS or Bootloader sets specific debounce for the
+ GPIO. if not, set debounce to be 2.75ms.
+ */
+ if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
+ pin_reg |= 0xf;
+ pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+ pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+ }
+ pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
+ pin_reg |= BIT(INTERRUPT_MASK_OFF);
+ writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+}
+
+static void amd_gpio_irq_disable(struct irq_data *d)
+{
+ u32 pin_reg;
+ unsigned long flags;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg &= ~BIT(INTERRUPT_ENABLE_OFF);
+ pin_reg &= ~BIT(INTERRUPT_MASK_OFF);
+ writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+}
+
+static void amd_gpio_irq_mask(struct irq_data *d)
+{
+ u32 pin_reg;
+ unsigned long flags;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg &= ~BIT(INTERRUPT_MASK_OFF);
+ writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+}
+
+static void amd_gpio_irq_unmask(struct irq_data *d)
+{
+ u32 pin_reg;
+ unsigned long flags;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg |= BIT(INTERRUPT_MASK_OFF);
+ writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+}
+
+static void amd_gpio_irq_eoi(struct irq_data *d)
+{
+ u32 reg;
+ unsigned long flags;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+ reg |= EOI_MASK;
+ writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+}
+
+static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ int ret = 0;
+ u32 pin_reg;
+ unsigned long flags;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+ pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
+ pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+ pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
+ pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+ pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
+ pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
+ pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+ pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
+ pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+ pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ break;
+
+ case IRQ_TYPE_NONE:
+ break;
+
+ default:
+ dev_err(&gpio_dev->pdev->dev, "Invalid type value\n");
+ ret = -EINVAL;
+ }
+
+ pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
+ writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ return ret;
+}
+
+static void amd_irq_ack(struct irq_data *d)
+{
+ /*
+ * based on HW design,there is no need to ack HW
+ * before handle current irq. But this routine is
+ * necessary for handle_edge_irq
+ */
+}
+
+static struct irq_chip amd_gpio_irqchip = {
+ .name = "amd_gpio",
+ .irq_ack = amd_irq_ack,
+ .irq_enable = amd_gpio_irq_enable,
+ .irq_disable = amd_gpio_irq_disable,
+ .irq_mask = amd_gpio_irq_mask,
+ .irq_unmask = amd_gpio_irq_unmask,
+ .irq_eoi = amd_gpio_irq_eoi,
+ .irq_set_type = amd_gpio_irq_set_type,
+};
+
+static void amd_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ u32 i;
+ u32 off;
+ u32 reg;
+ u32 pin_reg;
+ u64 reg64;
+ int handled = 0;
+ unsigned long flags;
+ struct irq_chip *chip = irq_get_chip(irq);
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct amd_gpio *gpio_dev = to_amd_gpio(gc);
+
+ chained_irq_enter(chip, desc);
+ /*enable GPIO interrupt again*/
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
+ reg64 = reg;
+ reg64 = reg64 << 32;
+
+ reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
+ reg64 |= reg;
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ /*
+ * first 46 bits indicates interrupt status.
+ * one bit represents four interrupt sources.
+ */
+ for (off = 0; off < 46 ; off++) {
+ if (reg64 & BIT(off)) {
+ for (i = 0; i < 4; i++) {
+ pin_reg = readl(gpio_dev->base +
+ (off * 4 + i) * 4);
+ if ((pin_reg & BIT(INTERRUPT_STS_OFF)) ||
+ (pin_reg & BIT(WAKE_STS_OFF))) {
+ irq = irq_find_mapping(gc->irqdomain,
+ off * 4 + i);
+ generic_handle_irq(irq);
+ writel(pin_reg,
+ gpio_dev->base
+ + (off * 4 + i) * 4);
+ handled++;
+ }
+ }
+ }
+ }
+
+ if (handled == 0)
+ handle_bad_irq(irq, desc);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+ reg |= EOI_MASK;
+ writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int amd_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
+
+ return gpio_dev->ngroups;
+}
+
+static const char *amd_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
+
+ return gpio_dev->groups[group].name;
+}
+
+static int amd_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = gpio_dev->groups[group].pins;
+ *num_pins = gpio_dev->groups[group].npins;
+ return 0;
+}
+
+static const struct pinctrl_ops amd_pinctrl_ops = {
+ .get_groups_count = amd_get_groups_count,
+ .get_group_name = amd_get_group_name,
+ .get_group_pins = amd_get_group_pins,
+#ifdef CONFIG_OF
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+#endif
+};
+
+static int amd_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *config)
+{
+ u32 pin_reg;
+ unsigned arg;
+ unsigned long flags;
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ pin_reg = readl(gpio_dev->base + pin*4);
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ switch (param) {
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ arg = pin_reg & DB_TMR_OUT_MASK;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = (pin_reg >> PULL_DOWN_ENABLE_OFF) & BIT(0);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = (pin_reg >> PULL_UP_SEL_OFF) & (BIT(0) | BIT(1));
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg = (pin_reg >> DRV_STRENGTH_SEL_OFF) & DRV_STRENGTH_SEL_MASK;
+ break;
+
+ default:
+ dev_err(&gpio_dev->pdev->dev, "Invalid config param %04x\n",
+ param);
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned num_configs)
+{
+ int i;
+ u32 arg;
+ int ret = 0;
+ u32 pin_reg;
+ unsigned long flags;
+ enum pin_config_param param;
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
+
+ spin_lock_irqsave(&gpio_dev->lock, flags);
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+ pin_reg = readl(gpio_dev->base + pin*4);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ pin_reg &= ~DB_TMR_OUT_MASK;
+ pin_reg |= arg & DB_TMR_OUT_MASK;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pin_reg &= ~BIT(PULL_DOWN_ENABLE_OFF);
+ pin_reg |= (arg & BIT(0)) << PULL_DOWN_ENABLE_OFF;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pin_reg &= ~BIT(PULL_UP_SEL_OFF);
+ pin_reg |= (arg & BIT(0)) << PULL_UP_SEL_OFF;
+ pin_reg &= ~BIT(PULL_UP_ENABLE_OFF);
+ pin_reg |= ((arg>>1) & BIT(0)) << PULL_UP_ENABLE_OFF;
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ pin_reg &= ~(DRV_STRENGTH_SEL_MASK
+ << DRV_STRENGTH_SEL_OFF);
+ pin_reg |= (arg & DRV_STRENGTH_SEL_MASK)
+ << DRV_STRENGTH_SEL_OFF;
+ break;
+
+ default:
+ dev_err(&gpio_dev->pdev->dev,
+ "Invalid config param %04x\n", param);
+ ret = -ENOTSUPP;
+ }
+
+ writel(pin_reg, gpio_dev->base + pin*4);
+ }
+ spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ return ret;
+}
+
+static int amd_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const unsigned *pins;
+ unsigned npins;
+ int ret;
+
+ ret = amd_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ if (amd_pinconf_get(pctldev, pins[0], config))
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+static int amd_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned group, unsigned long *configs,
+ unsigned num_configs)
+{
+ const unsigned *pins;
+ unsigned npins;
+ int i, ret;
+
+ ret = amd_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+ for (i = 0; i < npins; i++) {
+ if (amd_pinconf_set(pctldev, pins[i], configs, num_configs))
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+static const struct pinconf_ops amd_pinconf_ops = {
+ .pin_config_get = amd_pinconf_get,
+ .pin_config_set = amd_pinconf_set,
+ .pin_config_group_get = amd_pinconf_group_get,
+ .pin_config_group_set = amd_pinconf_group_set,
+};
+
+static struct pinctrl_desc amd_pinctrl_desc = {
+ .pins = kerncz_pins,
+ .npins = ARRAY_SIZE(kerncz_pins),
+ .pctlops = &amd_pinctrl_ops,
+ .confops = &amd_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int amd_gpio_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int irq_base;
+ struct resource *res;
+ struct amd_gpio *gpio_dev;
+
+ gpio_dev = devm_kzalloc(&pdev->dev,
+ sizeof(struct amd_gpio), GFP_KERNEL);
+ if (!gpio_dev)
+ return -ENOMEM;
+
+ spin_lock_init(&gpio_dev->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get gpio io resource.\n");
+ return -EINVAL;
+ }
+
+ gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(gpio_dev->base))
+ return PTR_ERR(gpio_dev->base);
+
+ irq_base = platform_get_irq(pdev, 0);
+ if (irq_base < 0) {
+ dev_err(&pdev->dev, "Failed to get gpio IRQ.\n");
+ return -EINVAL;
+ }
+
+ gpio_dev->pdev = pdev;
+ gpio_dev->gc.direction_input = amd_gpio_direction_input;
+ gpio_dev->gc.direction_output = amd_gpio_direction_output;
+ gpio_dev->gc.get = amd_gpio_get_value;
+ gpio_dev->gc.set = amd_gpio_set_value;
+ gpio_dev->gc.set_debounce = amd_gpio_set_debounce;
+ gpio_dev->gc.dbg_show = amd_gpio_dbg_show;
+
+ gpio_dev->gc.base = 0;
+ gpio_dev->gc.label = pdev->name;
+ gpio_dev->gc.owner = THIS_MODULE;
+ gpio_dev->gc.dev = &pdev->dev;
+ gpio_dev->gc.ngpio = TOTAL_NUMBER_OF_PINS;
+#if defined(CONFIG_OF_GPIO)
+ gpio_dev->gc.of_node = pdev->dev.of_node;
+#endif
+
+ gpio_dev->groups = kerncz_groups;
+ gpio_dev->ngroups = ARRAY_SIZE(kerncz_groups);
+
+ amd_pinctrl_desc.name = dev_name(&pdev->dev);
+ gpio_dev->pctrl = pinctrl_register(&amd_pinctrl_desc,
+ &pdev->dev, gpio_dev);
+ if (!gpio_dev->pctrl) {
+ dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+ return -ENODEV;
+ }
+
+ ret = gpiochip_add(&gpio_dev->gc);
+ if (ret)
+ goto out1;
+
+ ret = gpiochip_add_pin_range(&gpio_dev->gc, dev_name(&pdev->dev),
+ 0, 0, TOTAL_NUMBER_OF_PINS);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add pin range\n");
+ goto out2;
+ }
+
+ ret = gpiochip_irqchip_add(&gpio_dev->gc,
+ &amd_gpio_irqchip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(&pdev->dev, "could not add irqchip\n");
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ gpiochip_set_chained_irqchip(&gpio_dev->gc,
+ &amd_gpio_irqchip,
+ irq_base,
+ amd_gpio_irq_handler);
+
+ platform_set_drvdata(pdev, gpio_dev);
+
+ dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
+ return ret;
+
+out2:
+ gpiochip_remove(&gpio_dev->gc);
+
+out1:
+ pinctrl_unregister(gpio_dev->pctrl);
+ return ret;
+}
+
+static int amd_gpio_remove(struct platform_device *pdev)
+{
+ struct amd_gpio *gpio_dev;
+
+ gpio_dev = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&gpio_dev->gc);
+ pinctrl_unregister(gpio_dev->pctrl);
+
+ return 0;
+}
+
+static const struct acpi_device_id amd_gpio_acpi_match[] = {
+ { "AMD0030", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match);
+
+static struct platform_driver amd_gpio_driver = {
+ .driver = {
+ .name = "amd_gpio",
+ .owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(amd_gpio_acpi_match),
+ },
+ .probe = amd_gpio_probe,
+ .remove = amd_gpio_remove,
+};
+
+module_platform_driver(amd_gpio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ken Xue <Ken.Xue@amd.com>, Jeff Wu <Jeff.Wu@amd.com>");
+MODULE_DESCRIPTION("AMD GPIO pinctrl driver");
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
new file mode 100644
index 000000000000..7bfea47dbb47
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -0,0 +1,261 @@
+/*
+ * GPIO driver for AMD
+ *
+ * Copyright (c) 2014,2015 Ken Xue <Ken.Xue@amd.com>
+ * Jeff Wu <Jeff.Wu@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _PINCTRL_AMD_H
+#define _PINCTRL_AMD_H
+
+#define TOTAL_NUMBER_OF_PINS 192
+#define AMD_GPIO_PINS_PER_BANK 64
+#define AMD_GPIO_TOTAL_BANKS 3
+
+#define AMD_GPIO_PINS_BANK0 63
+#define AMD_GPIO_PINS_BANK1 64
+#define AMD_GPIO_PINS_BANK2 56
+
+#define WAKE_INT_MASTER_REG 0xfc
+#define EOI_MASK (1 << 29)
+
+#define WAKE_INT_STATUS_REG0 0x2f8
+#define WAKE_INT_STATUS_REG1 0x2fc
+
+#define DB_TMR_OUT_OFF 0
+#define DB_TMR_OUT_UNIT_OFF 4
+#define DB_CNTRL_OFF 5
+#define DB_TMR_LARGE_OFF 7
+#define LEVEL_TRIG_OFF 8
+#define ACTIVE_LEVEL_OFF 9
+#define INTERRUPT_ENABLE_OFF 11
+#define INTERRUPT_MASK_OFF 12
+#define WAKE_CNTRL_OFF 13
+#define PIN_STS_OFF 16
+#define DRV_STRENGTH_SEL_OFF 17
+#define PULL_UP_SEL_OFF 19
+#define PULL_UP_ENABLE_OFF 20
+#define PULL_DOWN_ENABLE_OFF 21
+#define OUTPUT_VALUE_OFF 22
+#define OUTPUT_ENABLE_OFF 23
+#define SW_CNTRL_IN_OFF 24
+#define SW_CNTRL_EN_OFF 25
+#define INTERRUPT_STS_OFF 28
+#define WAKE_STS_OFF 29
+
+#define DB_TMR_OUT_MASK 0xFUL
+#define DB_CNTRl_MASK 0x3UL
+#define ACTIVE_LEVEL_MASK 0x3UL
+#define DRV_STRENGTH_SEL_MASK 0x3UL
+
+#define DB_TYPE_NO_DEBOUNCE 0x0UL
+#define DB_TYPE_PRESERVE_LOW_GLITCH 0x1UL
+#define DB_TYPE_PRESERVE_HIGH_GLITCH 0x2UL
+#define DB_TYPE_REMOVE_GLITCH 0x3UL
+
+#define EDGE_TRAGGER 0x0UL
+#define LEVEL_TRIGGER 0x1UL
+
+#define ACTIVE_HIGH 0x0UL
+#define ACTIVE_LOW 0x1UL
+#define BOTH_EADGE 0x2UL
+
+#define ENABLE_INTERRUPT 0x1UL
+#define DISABLE_INTERRUPT 0x0UL
+
+#define ENABLE_INTERRUPT_MASK 0x0UL
+#define DISABLE_INTERRUPT_MASK 0x1UL
+
+#define CLR_INTR_STAT 0x1UL
+
+struct amd_pingroup {
+ const char *name;
+ const unsigned *pins;
+ unsigned npins;
+};
+
+struct amd_function {
+ const char *name;
+ const char * const *groups;
+ unsigned ngroups;
+};
+
+struct amd_gpio {
+ spinlock_t lock;
+ void __iomem *base;
+
+ const struct amd_pingroup *groups;
+ u32 ngroups;
+ struct pinctrl_dev *pctrl;
+ struct gpio_chip gc;
+ struct resource *res;
+ struct platform_device *pdev;
+};
+
+/* KERNCZ configuration*/
+static const struct pinctrl_pin_desc kerncz_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(43, "GPIO_42"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+};
+
+static const unsigned i2c0_pins[] = {145, 146};
+static const unsigned i2c1_pins[] = {147, 148};
+static const unsigned i2c2_pins[] = {113, 114};
+static const unsigned i2c3_pins[] = {19, 20};
+
+static const unsigned uart0_pins[] = {135, 136, 137, 138, 139};
+static const unsigned uart1_pins[] = {140, 141, 142, 143, 144};
+
+static const struct amd_pingroup kerncz_groups[] = {
+ {
+ .name = "i2c0",
+ .pins = i2c0_pins,
+ .npins = 2,
+ },
+ {
+ .name = "i2c1",
+ .pins = i2c1_pins,
+ .npins = 2,
+ },
+ {
+ .name = "i2c2",
+ .pins = i2c2_pins,
+ .npins = 2,
+ },
+ {
+ .name = "i2c3",
+ .pins = i2c3_pins,
+ .npins = 2,
+ },
+ {
+ .name = "uart0",
+ .pins = uart0_pins,
+ .npins = 9,
+ },
+ {
+ .name = "uart1",
+ .pins = uart1_pins,
+ .npins = 5,
+ },
+};
+
+#endif
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 169b1bfa00c8..db0571ffbe99 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -625,7 +625,7 @@ static int as3722_pinctrl_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id as3722_pinctrl_of_match[] = {
+static const struct of_device_id as3722_pinctrl_of_match[] = {
{ .compatible = "ams,as3722-pinctrl", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index a4814066ea08..2f797cb7e205 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -451,18 +451,18 @@ static enum at91_mux at91_mux_get_periph(void __iomem *pio, unsigned mask)
static bool at91_mux_get_deglitch(void __iomem *pio, unsigned pin)
{
- return (__raw_readl(pio + PIO_IFSR) >> pin) & 0x1;
+ return (readl_relaxed(pio + PIO_IFSR) >> pin) & 0x1;
}
static void at91_mux_set_deglitch(void __iomem *pio, unsigned mask, bool is_on)
{
- __raw_writel(mask, pio + (is_on ? PIO_IFER : PIO_IFDR));
+ writel_relaxed(mask, pio + (is_on ? PIO_IFER : PIO_IFDR));
}
static bool at91_mux_pio3_get_deglitch(void __iomem *pio, unsigned pin)
{
- if ((__raw_readl(pio + PIO_IFSR) >> pin) & 0x1)
- return !((__raw_readl(pio + PIO_IFSCSR) >> pin) & 0x1);
+ if ((readl_relaxed(pio + PIO_IFSR) >> pin) & 0x1)
+ return !((readl_relaxed(pio + PIO_IFSCSR) >> pin) & 0x1);
return false;
}
@@ -470,55 +470,55 @@ static bool at91_mux_pio3_get_deglitch(void __iomem *pio, unsigned pin)
static void at91_mux_pio3_set_deglitch(void __iomem *pio, unsigned mask, bool is_on)
{
if (is_on)
- __raw_writel(mask, pio + PIO_IFSCDR);
+ writel_relaxed(mask, pio + PIO_IFSCDR);
at91_mux_set_deglitch(pio, mask, is_on);
}
static bool at91_mux_pio3_get_debounce(void __iomem *pio, unsigned pin, u32 *div)
{
- *div = __raw_readl(pio + PIO_SCDR);
+ *div = readl_relaxed(pio + PIO_SCDR);
- return ((__raw_readl(pio + PIO_IFSR) >> pin) & 0x1) &&
- ((__raw_readl(pio + PIO_IFSCSR) >> pin) & 0x1);
+ return ((readl_relaxed(pio + PIO_IFSR) >> pin) & 0x1) &&
+ ((readl_relaxed(pio + PIO_IFSCSR) >> pin) & 0x1);
}
static void at91_mux_pio3_set_debounce(void __iomem *pio, unsigned mask,
bool is_on, u32 div)
{
if (is_on) {
- __raw_writel(mask, pio + PIO_IFSCER);
- __raw_writel(div & PIO_SCDR_DIV, pio + PIO_SCDR);
- __raw_writel(mask, pio + PIO_IFER);
+ writel_relaxed(mask, pio + PIO_IFSCER);
+ writel_relaxed(div & PIO_SCDR_DIV, pio + PIO_SCDR);
+ writel_relaxed(mask, pio + PIO_IFER);
} else
- __raw_writel(mask, pio + PIO_IFSCDR);
+ writel_relaxed(mask, pio + PIO_IFSCDR);
}
static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin)
{
- return !((__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1);
+ return !((readl_relaxed(pio + PIO_PPDSR) >> pin) & 0x1);
}
static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on)
{
if (is_on)
- __raw_writel(mask, pio + PIO_PUDR);
+ writel_relaxed(mask, pio + PIO_PUDR);
- __raw_writel(mask, pio + (is_on ? PIO_PPDER : PIO_PPDDR));
+ writel_relaxed(mask, pio + (is_on ? PIO_PPDER : PIO_PPDDR));
}
static void at91_mux_pio3_disable_schmitt_trig(void __iomem *pio, unsigned mask)
{
- __raw_writel(__raw_readl(pio + PIO_SCHMITT) | mask, pio + PIO_SCHMITT);
+ writel_relaxed(readl_relaxed(pio + PIO_SCHMITT) | mask, pio + PIO_SCHMITT);
}
static bool at91_mux_pio3_get_schmitt_trig(void __iomem *pio, unsigned pin)
{
- return (__raw_readl(pio + PIO_SCHMITT) >> pin) & 0x1;
+ return (readl_relaxed(pio + PIO_SCHMITT) >> pin) & 0x1;
}
static inline u32 read_drive_strength(void __iomem *reg, unsigned pin)
{
- unsigned tmp = __raw_readl(reg);
+ unsigned tmp = readl_relaxed(reg);
tmp = tmp >> two_bit_pin_value_shift_amount(pin);
@@ -554,13 +554,13 @@ static unsigned at91_mux_sam9x5_get_drivestrength(void __iomem *pio,
static void set_drive_strength(void __iomem *reg, unsigned pin, u32 strength)
{
- unsigned tmp = __raw_readl(reg);
+ unsigned tmp = readl_relaxed(reg);
unsigned shift = two_bit_pin_value_shift_amount(pin);
tmp &= ~(DRIVE_STRENGTH_MASK << shift);
tmp |= strength << shift;
- __raw_writel(tmp, reg);
+ writel_relaxed(tmp, reg);
}
static void at91_mux_sama5d3_set_drivestrength(void __iomem *pio, unsigned pin,
@@ -1114,7 +1114,7 @@ static int at91_pinctrl_parse_functions(struct device_node *np,
return 0;
}
-static struct of_device_id at91_pinctrl_of_match[] = {
+static const struct of_device_id at91_pinctrl_of_match[] = {
{ .compatible = "atmel,sama5d3-pinctrl", .data = &sama5d3_ops },
{ .compatible = "atmel,at91sam9x5-pinctrl", .data = &at91sam9x5_ops },
{ .compatible = "atmel,at91rm9200-pinctrl", .data = &at91rm9200_ops },
@@ -1240,8 +1240,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
if (!info->pctl) {
dev_err(&pdev->dev, "could not register AT91 pinctrl driver\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
/* We will handle a range of GPIO pins */
@@ -1252,9 +1251,6 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
return 0;
-
-err:
- return ret;
}
static int at91_pinctrl_remove(struct platform_device *pdev)
@@ -1535,9 +1531,9 @@ void at91_pinctrl_gpio_suspend(void)
pio = gpio_chips[i]->regbase;
- backups[i] = __raw_readl(pio + PIO_IMR);
- __raw_writel(backups[i], pio + PIO_IDR);
- __raw_writel(wakeups[i], pio + PIO_IER);
+ backups[i] = readl_relaxed(pio + PIO_IMR);
+ writel_relaxed(backups[i], pio + PIO_IDR);
+ writel_relaxed(wakeups[i], pio + PIO_IER);
if (!wakeups[i])
clk_disable_unprepare(gpio_chips[i]->clock);
@@ -1562,8 +1558,8 @@ void at91_pinctrl_gpio_resume(void)
if (!wakeups[i])
clk_prepare_enable(gpio_chips[i]->clock);
- __raw_writel(wakeups[i], pio + PIO_IDR);
- __raw_writel(backups[i], pio + PIO_IER);
+ writel_relaxed(wakeups[i], pio + PIO_IDR);
+ writel_relaxed(backups[i], pio + PIO_IER);
}
}
@@ -1694,7 +1690,7 @@ static struct gpio_chip at91_gpio_template = {
.ngpio = MAX_NB_GPIO_PER_BANK,
};
-static struct of_device_id at91_gpio_of_match[] = {
+static const struct of_device_id at91_gpio_of_match[] = {
{ .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
{ .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
{ /* sentinel */ }
diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h
index c7cfad5527d7..eb89ba045228 100644
--- a/drivers/pinctrl/pinctrl-lantiq.h
+++ b/drivers/pinctrl/pinctrl-lantiq.h
@@ -193,4 +193,4 @@ enum ltq_pin {
extern int ltq_pinctrl_register(struct platform_device *pdev,
struct ltq_pinmux_info *info);
extern int ltq_pinctrl_unregister(struct platform_device *pdev);
-#endif /* __PINCTRL_PXA3XX_H */
+#endif /* __PINCTRL_LANTIQ_H */
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index 26461e30f0ae..2631df0504bd 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -987,7 +987,7 @@ static struct palmas_pinctrl_data tps80036_pinctrl_data = {
.num_pin_groups = ARRAY_SIZE(tps80036_pingroups),
};
-static struct of_device_id palmas_pinctrl_of_match[] = {
+static const struct of_device_id palmas_pinctrl_of_match[] = {
{ .compatible = "ti,palmas-pinctrl", .data = &tps65913_pinctrl_data},
{ .compatible = "ti,tps65913-pinctrl", .data = &tps65913_pinctrl_data},
{ .compatible = "ti,tps80036-pinctrl", .data = &tps80036_pinctrl_data},
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 69e84427f913..13b45f297727 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1501,7 +1501,7 @@ static void pcs_free_resources(struct pcs_device *pcs)
} \
} while (0);
-static struct of_device_id pcs_of_match[];
+static const struct of_device_id pcs_of_match[];
static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
{
@@ -2000,7 +2000,7 @@ static const struct pcs_soc_data pinconf_single = {
.flags = PCS_FEAT_PINCONF,
};
-static struct of_device_id pcs_of_match[] = {
+static const struct of_device_id pcs_of_match[] = {
{ .compatible = "ti,omap3-padconf", .data = &pinctrl_single_omap_wkup },
{ .compatible = "ti,omap4-padconf", .data = &pinctrl_single_omap_wkup },
{ .compatible = "ti,omap5-padconf", .data = &pinctrl_single_omap_wkup },
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 9e5ec00084bb..65bf73b70e34 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -206,6 +206,8 @@
#define gpio_chip_to_bank(chip) \
container_of(chip, struct st_gpio_bank, gpio_chip)
+#define pc_to_bank(pc) \
+ container_of(pc, struct st_gpio_bank, pc)
enum st_retime_style {
st_retime_style_none,
@@ -398,6 +400,16 @@ static const struct st_pctl_data stih407_flashdata = {
.rt = 100,
};
+static struct st_pio_control *st_get_pio_control(
+ struct pinctrl_dev *pctldev, int pin)
+{
+ struct pinctrl_gpio_range *range =
+ pinctrl_find_gpio_range_from_pin(pctldev, pin);
+ struct st_gpio_bank *bank = gpio_range_to_bank(range);
+
+ return &bank->pc;
+}
+
/* Low level functions.. */
static inline int st_gpio_bank(int gpio)
{
@@ -460,6 +472,20 @@ static void st_pctl_set_function(struct st_pio_control *pc,
regmap_field_write(alt, val);
}
+static unsigned int st_pctl_get_pin_function(struct st_pio_control *pc, int pin)
+{
+ struct regmap_field *alt = pc->alt;
+ unsigned int val;
+ int offset = pin * 4;
+
+ if (!alt)
+ return 0;
+
+ regmap_field_read(alt, &val);
+
+ return (val >> offset) & 0xf;
+}
+
static unsigned long st_pinconf_delay_to_bit(unsigned int delay,
const struct st_pctl_data *data, unsigned long config)
{
@@ -757,6 +783,35 @@ static int st_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static int st_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct st_gpio_bank *bank = gpio_chip_to_bank(chip);
+ struct st_pio_control pc = bank->pc;
+ unsigned long config;
+ unsigned int direction = 0;
+ unsigned int function;
+ unsigned int value;
+ int i = 0;
+
+ /* Alternate function direction is handled by Pinctrl */
+ function = st_pctl_get_pin_function(&pc, offset);
+ if (function) {
+ st_pinconf_get_direction(&pc, offset, &config);
+ return !ST_PINCONF_UNPACK_OE(config);
+ }
+
+ /*
+ * GPIO direction is handled differently
+ * - See st_gpio_direction() above for an explanation
+ */
+ for (i = 0; i <= 2; i++) {
+ value = readl(bank->base + REG_PIO_PC(i));
+ direction |= ((value >> offset) & 0x1) << i;
+ }
+
+ return (direction == ST_GPIO_DIRECTION_IN);
+}
+
static int st_gpio_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags)
{
@@ -904,16 +959,6 @@ static int st_pmx_get_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static struct st_pio_control *st_get_pio_control(
- struct pinctrl_dev *pctldev, int pin)
-{
- struct pinctrl_gpio_range *range =
- pinctrl_find_gpio_range_from_pin(pctldev, pin);
- struct st_gpio_bank *bank = gpio_range_to_bank(range);
-
- return &bank->pc;
-}
-
static int st_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
unsigned group)
{
@@ -1011,17 +1056,30 @@ static int st_pinconf_get(struct pinctrl_dev *pctldev,
static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
+ struct st_pio_control *pc;
unsigned long config;
+ unsigned int function;
+ int offset = st_gpio_pin(pin_id);
+ char f[16];
mutex_unlock(&pctldev->mutex);
+ pc = st_get_pio_control(pctldev, pin_id);
st_pinconf_get(pctldev, pin_id, &config);
mutex_lock(&pctldev->mutex);
- seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n"
+
+ function = st_pctl_get_pin_function(pc, offset);
+ if (function)
+ snprintf(f, 10, "Alt Fn %d", function);
+ else
+ snprintf(f, 5, "GPIO");
+
+ seq_printf(s, "[OE:%d,PU:%ld,OD:%ld]\t%s\n"
"\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
"de:%ld,rt-clk:%ld,rt-delay:%ld]",
- ST_PINCONF_UNPACK_OE(config),
+ !st_gpio_get_direction(&pc_to_bank(pc)->gpio_chip, offset),
ST_PINCONF_UNPACK_PU(config),
ST_PINCONF_UNPACK_OD(config),
+ f,
ST_PINCONF_UNPACK_RT(config),
ST_PINCONF_UNPACK_RT_INVERTCLK(config),
ST_PINCONF_UNPACK_RT_CLKNOTDATA(config),
@@ -1438,6 +1496,7 @@ static struct gpio_chip st_gpio_template = {
.set = st_gpio_set,
.direction_input = st_gpio_direction_input,
.direction_output = st_gpio_direction_output,
+ .get_direction = st_gpio_get_direction,
.ngpio = ST_GPIO_PINS_PER_BANK,
.of_gpio_n_cells = 1,
.of_xlate = st_gpio_xlate,
@@ -1531,7 +1590,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
return 0;
}
-static struct of_device_id st_pctl_of_match[] = {
+static const struct of_device_id st_pctl_of_match[] = {
{ .compatible = "st,stih415-sbc-pinctrl", .data = &stih415_sbc_data },
{ .compatible = "st,stih415-rear-pinctrl", .data = &stih415_rear_data },
{ .compatible = "st,stih415-left-pinctrl", .data = &stih415_left_data },
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index e5949d51bc52..4c95c2024a1c 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -103,6 +103,7 @@ static const struct cfg_param {
{"nvidia,lock", TEGRA_PINCONF_PARAM_LOCK},
{"nvidia,io-reset", TEGRA_PINCONF_PARAM_IORESET},
{"nvidia,rcv-sel", TEGRA_PINCONF_PARAM_RCV_SEL},
+ {"nvidia,io-hv", TEGRA_PINCONF_PARAM_RCV_SEL},
{"nvidia,high-speed-mode", TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE},
{"nvidia,schmitt", TEGRA_PINCONF_PARAM_SCHMITT},
{"nvidia,low-power-mode", TEGRA_PINCONF_PARAM_LOW_POWER_MODE},
@@ -348,14 +349,24 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
*width = 1;
break;
case TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE:
- *bank = g->drv_bank;
- *reg = g->drv_reg;
+ if (pmx->soc->hsm_in_mux) {
+ *bank = g->mux_bank;
+ *reg = g->mux_reg;
+ } else {
+ *bank = g->drv_bank;
+ *reg = g->drv_reg;
+ }
*bit = g->hsm_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_SCHMITT:
- *bank = g->drv_bank;
- *reg = g->drv_reg;
+ if (pmx->soc->schmitt_in_mux) {
+ *bank = g->mux_bank;
+ *reg = g->mux_reg;
+ } else {
+ *bank = g->drv_bank;
+ *reg = g->drv_reg;
+ }
*bit = g->schmitt_bit;
*width = 1;
break;
@@ -390,8 +401,13 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
*width = g->slwr_width;
break;
case TEGRA_PINCONF_PARAM_DRIVE_TYPE:
- *bank = g->drv_bank;
- *reg = g->drv_reg;
+ if (pmx->soc->drvtype_in_mux) {
+ *bank = g->mux_bank;
+ *reg = g->mux_reg;
+ } else {
+ *bank = g->drv_bank;
+ *reg = g->drv_reg;
+ }
*bit = g->drvtype_bit;
*width = 2;
break;
diff --git a/drivers/pinctrl/pinctrl-tegra.h b/drivers/pinctrl/pinctrl-tegra.h
index 8d94d1332e7b..1615db7e3a4b 100644
--- a/drivers/pinctrl/pinctrl-tegra.h
+++ b/drivers/pinctrl/pinctrl-tegra.h
@@ -139,26 +139,26 @@ struct tegra_pingroup {
u32 pupd_bank:2;
u32 tri_bank:2;
u32 drv_bank:2;
- u32 mux_bit:6;
- u32 pupd_bit:6;
- u32 tri_bit:6;
- u32 einput_bit:6;
- u32 odrain_bit:6;
- u32 lock_bit:6;
- u32 ioreset_bit:6;
- u32 rcv_sel_bit:6;
- u32 hsm_bit:6;
- u32 schmitt_bit:6;
- u32 lpmd_bit:6;
- u32 drvdn_bit:6;
- u32 drvup_bit:6;
- u32 slwr_bit:6;
- u32 slwf_bit:6;
- u32 drvtype_bit:6;
- u32 drvdn_width:6;
- u32 drvup_width:6;
- u32 slwr_width:6;
- u32 slwf_width:6;
+ s32 mux_bit:6;
+ s32 pupd_bit:6;
+ s32 tri_bit:6;
+ s32 einput_bit:6;
+ s32 odrain_bit:6;
+ s32 lock_bit:6;
+ s32 ioreset_bit:6;
+ s32 rcv_sel_bit:6;
+ s32 hsm_bit:6;
+ s32 schmitt_bit:6;
+ s32 lpmd_bit:6;
+ s32 drvdn_bit:6;
+ s32 drvup_bit:6;
+ s32 slwr_bit:6;
+ s32 slwf_bit:6;
+ s32 drvtype_bit:6;
+ s32 drvdn_width:6;
+ s32 drvup_width:6;
+ s32 slwr_width:6;
+ s32 slwf_width:6;
};
/**
@@ -182,6 +182,9 @@ struct tegra_pinctrl_soc_data {
unsigned nfunctions;
const struct tegra_pingroup *groups;
unsigned ngroups;
+ bool hsm_in_mux;
+ bool schmitt_in_mux;
+ bool drvtype_in_mux;
};
int tegra_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index 52e4ec6386b4..05e49d5137ab 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -1547,6 +1547,7 @@ static struct tegra_function tegra114_functions[] = {
#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
#define PINGROUP_REG_A 0x3000 /* bank 1 */
+#define DRV_PINGROUP_REG(r) ((r) - DRV_PINGROUP_REG_A)
#define PINGROUP_REG(r) ((r) - PINGROUP_REG_A)
#define PINGROUP_BIT_Y(b) (b)
@@ -1572,20 +1573,17 @@ static struct tegra_function tegra114_functions[] = {
.tri_reg = PINGROUP_REG(r), \
.tri_bank = 1, \
.tri_bit = 4, \
- .einput_bit = PINGROUP_BIT_Y(5), \
+ .einput_bit = 5, \
.odrain_bit = PINGROUP_BIT_##od(6), \
- .lock_bit = PINGROUP_BIT_Y(7), \
+ .lock_bit = 7, \
.ioreset_bit = PINGROUP_BIT_##ior(8), \
.rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9), \
.drv_reg = -1, \
}
-#define DRV_PINGROUP_REG(r) ((r) - DRV_PINGROUP_REG_A)
-
-#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
- drvdn_b, drvdn_w, drvup_b, drvup_w, \
- slwr_b, slwr_w, slwf_b, slwf_w, \
- drvtype) \
+#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, \
+ drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, \
+ slwf_b, slwf_w, drvtype) \
{ \
.name = "drive_" #pg_name, \
.pins = drive_##pg_name##_pins, \
@@ -1843,6 +1841,9 @@ static const struct tegra_pinctrl_soc_data tegra114_pinctrl = {
.nfunctions = ARRAY_SIZE(tegra114_functions),
.groups = tegra114_groups,
.ngroups = ARRAY_SIZE(tegra114_groups),
+ .hsm_in_mux = false,
+ .schmitt_in_mux = false,
+ .drvtype_in_mux = false,
};
static int tegra114_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/pinctrl-tegra124.c
index 2b20906c5356..7cd44c7c296d 100644
--- a/drivers/pinctrl/pinctrl-tegra124.c
+++ b/drivers/pinctrl/pinctrl-tegra124.c
@@ -1536,6 +1536,7 @@ enum tegra_mux {
TEGRA_MUX_CLK,
TEGRA_MUX_CLK12,
TEGRA_MUX_CPU,
+ TEGRA_MUX_CSI,
TEGRA_MUX_DAP,
TEGRA_MUX_DAP1,
TEGRA_MUX_DAP2,
@@ -1544,6 +1545,7 @@ enum tegra_mux {
TEGRA_MUX_DISPLAYA_ALT,
TEGRA_MUX_DISPLAYB,
TEGRA_MUX_DP,
+ TEGRA_MUX_DSI_B,
TEGRA_MUX_DTV,
TEGRA_MUX_EXTPERIPH1,
TEGRA_MUX_EXTPERIPH2,
@@ -1613,8 +1615,6 @@ enum tegra_mux {
TEGRA_MUX_VI_ALT3,
TEGRA_MUX_VIMCLK2,
TEGRA_MUX_VIMCLK2_ALT,
- TEGRA_MUX_CSI,
- TEGRA_MUX_DSI_B,
};
#define FUNCTION(fname) \
@@ -1630,6 +1630,7 @@ static struct tegra_function tegra124_functions[] = {
FUNCTION(clk),
FUNCTION(clk12),
FUNCTION(cpu),
+ FUNCTION(csi),
FUNCTION(dap),
FUNCTION(dap1),
FUNCTION(dap2),
@@ -1638,6 +1639,7 @@ static struct tegra_function tegra124_functions[] = {
FUNCTION(displaya_alt),
FUNCTION(displayb),
FUNCTION(dp),
+ FUNCTION(dsi_b),
FUNCTION(dtv),
FUNCTION(extperiph1),
FUNCTION(extperiph2),
@@ -1707,15 +1709,15 @@ static struct tegra_function tegra124_functions[] = {
FUNCTION(vi_alt3),
FUNCTION(vimclk2),
FUNCTION(vimclk2_alt),
- FUNCTION(csi),
- FUNCTION(dsi_b),
};
#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
#define PINGROUP_REG_A 0x3000 /* bank 1 */
#define MIPI_PAD_CTRL_PINGROUP_REG_A 0x820 /* bank 2 */
+#define DRV_PINGROUP_REG(r) ((r) - DRV_PINGROUP_REG_A)
#define PINGROUP_REG(r) ((r) - PINGROUP_REG_A)
+#define MIPI_PAD_CTRL_PINGROUP_REG_Y(r) ((r) - MIPI_PAD_CTRL_PINGROUP_REG_A)
#define PINGROUP_BIT_Y(b) (b)
#define PINGROUP_BIT_N(b) (-1)
@@ -1740,20 +1742,17 @@ static struct tegra_function tegra124_functions[] = {
.tri_reg = PINGROUP_REG(r), \
.tri_bank = 1, \
.tri_bit = 4, \
- .einput_bit = PINGROUP_BIT_Y(5), \
+ .einput_bit = 5, \
.odrain_bit = PINGROUP_BIT_##od(6), \
- .lock_bit = PINGROUP_BIT_Y(7), \
+ .lock_bit = 7, \
.ioreset_bit = PINGROUP_BIT_##ior(8), \
.rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9), \
.drv_reg = -1, \
}
-#define DRV_PINGROUP_REG(r) ((r) - DRV_PINGROUP_REG_A)
-
-#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
- drvdn_b, drvdn_w, drvup_b, drvup_w, \
- slwr_b, slwr_w, slwf_b, slwf_w, \
- drvtype) \
+#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, \
+ drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, \
+ slwf_b, slwf_w, drvtype) \
{ \
.name = "drive_" #pg_name, \
.pins = drive_##pg_name##_pins, \
@@ -1782,8 +1781,6 @@ static struct tegra_function tegra124_functions[] = {
.drvtype_bit = PINGROUP_BIT_##drvtype(6), \
}
-#define MIPI_PAD_CTRL_PINGROUP_REG_Y(r) ((r) - MIPI_PAD_CTRL_PINGROUP_REG_A)
-
#define MIPI_PAD_CTRL_PINGROUP(pg_name, r, b, f0, f1) \
{ \
.name = "mipi_pad_ctrl_" #pg_name, \
@@ -2044,8 +2041,8 @@ static const struct tegra_pingroup tegra124_groups[] = {
DRV_PINGROUP(sdio4, 0x9c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
DRV_PINGROUP(ao4, 0x9c8, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
- /* pg_name, r b f0, f1 */
- MIPI_PAD_CTRL_PINGROUP(dsi_b, 0x820, 1, CSI, DSI_B)
+ /* pg_name, r, b, f0, f1 */
+ MIPI_PAD_CTRL_PINGROUP(dsi_b, 0x820, 1, CSI, DSI_B),
};
static const struct tegra_pinctrl_soc_data tegra124_pinctrl = {
@@ -2056,6 +2053,9 @@ static const struct tegra_pinctrl_soc_data tegra124_pinctrl = {
.nfunctions = ARRAY_SIZE(tegra124_functions),
.groups = tegra124_groups,
.ngroups = ARRAY_SIZE(tegra124_groups),
+ .hsm_in_mux = false,
+ .schmitt_in_mux = false,
+ .drvtype_in_mux = false,
};
static int tegra124_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/pinctrl-tegra20.c
index d3a5722e4acb..4833db4433d9 100644
--- a/drivers/pinctrl/pinctrl-tegra20.c
+++ b/drivers/pinctrl/pinctrl-tegra20.c
@@ -2221,6 +2221,9 @@ static const struct tegra_pinctrl_soc_data tegra20_pinctrl = {
.nfunctions = ARRAY_SIZE(tegra20_functions),
.groups = tegra20_groups,
.ngroups = ARRAY_SIZE(tegra20_groups),
+ .hsm_in_mux = false,
+ .schmitt_in_mux = false,
+ .drvtype_in_mux = false,
};
static int tegra20_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/pinctrl-tegra210.c b/drivers/pinctrl/pinctrl-tegra210.c
new file mode 100644
index 000000000000..252b464901c0
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-tegra210.c
@@ -0,0 +1,1588 @@
+/*
+ * Pinctrl data for the NVIDIA Tegra210 pinmux
+ *
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-tegra.h"
+
+/*
+ * Most pins affected by the pinmux can also be GPIOs. Define these first.
+ * These must match how the GPIO driver names/numbers its pins.
+ */
+#define _GPIO(offset) (offset)
+
+#define TEGRA_PIN_PEX_L0_RST_N_PA0 _GPIO(0)
+#define TEGRA_PIN_PEX_L0_CLKREQ_N_PA1 _GPIO(1)
+#define TEGRA_PIN_PEX_WAKE_N_PA2 _GPIO(2)
+#define TEGRA_PIN_PEX_L1_RST_N_PA3 _GPIO(3)
+#define TEGRA_PIN_PEX_L1_CLKREQ_N_PA4 _GPIO(4)
+#define TEGRA_PIN_SATA_LED_ACTIVE_PA5 _GPIO(5)
+#define TEGRA_PIN_PA6 _GPIO(6)
+#define TEGRA_PIN_DAP1_FS_PB0 _GPIO(8)
+#define TEGRA_PIN_DAP1_DIN_PB1 _GPIO(9)
+#define TEGRA_PIN_DAP1_DOUT_PB2 _GPIO(10)
+#define TEGRA_PIN_DAP1_SCLK_PB3 _GPIO(11)
+#define TEGRA_PIN_SPI2_MOSI_PB4 _GPIO(12)
+#define TEGRA_PIN_SPI2_MISO_PB5 _GPIO(13)
+#define TEGRA_PIN_SPI2_SCK_PB6 _GPIO(14)
+#define TEGRA_PIN_SPI2_CS0_PB7 _GPIO(15)
+#define TEGRA_PIN_SPI1_MOSI_PC0 _GPIO(16)
+#define TEGRA_PIN_SPI1_MISO_PC1 _GPIO(17)
+#define TEGRA_PIN_SPI1_SCK_PC2 _GPIO(18)
+#define TEGRA_PIN_SPI1_CS0_PC3 _GPIO(19)
+#define TEGRA_PIN_SPI1_CS1_PC4 _GPIO(20)
+#define TEGRA_PIN_SPI4_SCK_PC5 _GPIO(21)
+#define TEGRA_PIN_SPI4_CS0_PC6 _GPIO(22)
+#define TEGRA_PIN_SPI4_MOSI_PC7 _GPIO(23)
+#define TEGRA_PIN_SPI4_MISO_PD0 _GPIO(24)
+#define TEGRA_PIN_UART3_TX_PD1 _GPIO(25)
+#define TEGRA_PIN_UART3_RX_PD2 _GPIO(26)
+#define TEGRA_PIN_UART3_RTS_PD3 _GPIO(27)
+#define TEGRA_PIN_UART3_CTS_PD4 _GPIO(28)
+#define TEGRA_PIN_DMIC1_CLK_PE0 _GPIO(32)
+#define TEGRA_PIN_DMIC1_DAT_PE1 _GPIO(33)
+#define TEGRA_PIN_DMIC2_CLK_PE2 _GPIO(34)
+#define TEGRA_PIN_DMIC2_DAT_PE3 _GPIO(35)
+#define TEGRA_PIN_DMIC3_CLK_PE4 _GPIO(36)
+#define TEGRA_PIN_DMIC3_DAT_PE5 _GPIO(37)
+#define TEGRA_PIN_PE6 _GPIO(38)
+#define TEGRA_PIN_PE7 _GPIO(39)
+#define TEGRA_PIN_GEN3_I2C_SCL_PF0 _GPIO(40)
+#define TEGRA_PIN_GEN3_I2C_SDA_PF1 _GPIO(41)
+#define TEGRA_PIN_UART2_TX_PG0 _GPIO(48)
+#define TEGRA_PIN_UART2_RX_PG1 _GPIO(49)
+#define TEGRA_PIN_UART2_RTS_PG2 _GPIO(50)
+#define TEGRA_PIN_UART2_CTS_PG3 _GPIO(51)
+#define TEGRA_PIN_WIFI_EN_PH0 _GPIO(56)
+#define TEGRA_PIN_WIFI_RST_PH1 _GPIO(57)
+#define TEGRA_PIN_WIFI_WAKE_AP_PH2 _GPIO(58)
+#define TEGRA_PIN_AP_WAKE_BT_PH3 _GPIO(59)
+#define TEGRA_PIN_BT_RST_PH4 _GPIO(60)
+#define TEGRA_PIN_BT_WAKE_AP_PH5 _GPIO(61)
+#define TEGRA_PIN_PH6 _GPIO(62)
+#define TEGRA_PIN_AP_WAKE_NFC_PH7 _GPIO(63)
+#define TEGRA_PIN_NFC_EN_PI0 _GPIO(64)
+#define TEGRA_PIN_NFC_INT_PI1 _GPIO(65)
+#define TEGRA_PIN_GPS_EN_PI2 _GPIO(66)
+#define TEGRA_PIN_GPS_RST_PI3 _GPIO(67)
+#define TEGRA_PIN_UART4_TX_PI4 _GPIO(68)
+#define TEGRA_PIN_UART4_RX_PI5 _GPIO(69)
+#define TEGRA_PIN_UART4_RTS_PI6 _GPIO(70)
+#define TEGRA_PIN_UART4_CTS_PI7 _GPIO(71)
+#define TEGRA_PIN_GEN1_I2C_SDA_PJ0 _GPIO(72)
+#define TEGRA_PIN_GEN1_I2C_SCL_PJ1 _GPIO(73)
+#define TEGRA_PIN_GEN2_I2C_SCL_PJ2 _GPIO(74)
+#define TEGRA_PIN_GEN2_I2C_SDA_PJ3 _GPIO(75)
+#define TEGRA_PIN_DAP4_FS_PJ4 _GPIO(76)
+#define TEGRA_PIN_DAP4_DIN_PJ5 _GPIO(77)
+#define TEGRA_PIN_DAP4_DOUT_PJ6 _GPIO(78)
+#define TEGRA_PIN_DAP4_SCLK_PJ7 _GPIO(79)
+#define TEGRA_PIN_PK0 _GPIO(80)
+#define TEGRA_PIN_PK1 _GPIO(81)
+#define TEGRA_PIN_PK2 _GPIO(82)
+#define TEGRA_PIN_PK3 _GPIO(83)
+#define TEGRA_PIN_PK4 _GPIO(84)
+#define TEGRA_PIN_PK5 _GPIO(85)
+#define TEGRA_PIN_PK6 _GPIO(86)
+#define TEGRA_PIN_PK7 _GPIO(87)
+#define TEGRA_PIN_PL0 _GPIO(88)
+#define TEGRA_PIN_PL1 _GPIO(89)
+#define TEGRA_PIN_SDMMC1_CLK_PM0 _GPIO(96)
+#define TEGRA_PIN_SDMMC1_CMD_PM1 _GPIO(97)
+#define TEGRA_PIN_SDMMC1_DAT3_PM2 _GPIO(98)
+#define TEGRA_PIN_SDMMC1_DAT2_PM3 _GPIO(99)
+#define TEGRA_PIN_SDMMC1_DAT1_PM4 _GPIO(100)
+#define TEGRA_PIN_SDMMC1_DAT0_PM5 _GPIO(101)
+#define TEGRA_PIN_SDMMC3_CLK_PP0 _GPIO(120)
+#define TEGRA_PIN_SDMMC3_CMD_PP1 _GPIO(121)
+#define TEGRA_PIN_SDMMC3_DAT3_PP2 _GPIO(122)
+#define TEGRA_PIN_SDMMC3_DAT2_PP3 _GPIO(123)
+#define TEGRA_PIN_SDMMC3_DAT1_PP4 _GPIO(124)
+#define TEGRA_PIN_SDMMC3_DAT0_PP5 _GPIO(125)
+#define TEGRA_PIN_CAM1_MCLK_PS0 _GPIO(144)
+#define TEGRA_PIN_CAM2_MCLK_PS1 _GPIO(145)
+#define TEGRA_PIN_CAM_I2C_SCL_PS2 _GPIO(146)
+#define TEGRA_PIN_CAM_I2C_SDA_PS3 _GPIO(147)
+#define TEGRA_PIN_CAM_RST_PS4 _GPIO(148)
+#define TEGRA_PIN_CAM_AF_EN_PS5 _GPIO(149)
+#define TEGRA_PIN_CAM_FLASH_EN_PS6 _GPIO(150)
+#define TEGRA_PIN_CAM1_PWDN_PS7 _GPIO(151)
+#define TEGRA_PIN_CAM2_PWDN_PT0 _GPIO(152)
+#define TEGRA_PIN_CAM1_STROBE_PT1 _GPIO(153)
+#define TEGRA_PIN_UART1_TX_PU0 _GPIO(160)
+#define TEGRA_PIN_UART1_RX_PU1 _GPIO(161)
+#define TEGRA_PIN_UART1_RTS_PU2 _GPIO(162)
+#define TEGRA_PIN_UART1_CTS_PU3 _GPIO(163)
+#define TEGRA_PIN_LCD_BL_PWM_PV0 _GPIO(168)
+#define TEGRA_PIN_LCD_BL_EN_PV1 _GPIO(169)
+#define TEGRA_PIN_LCD_RST_PV2 _GPIO(170)
+#define TEGRA_PIN_LCD_GPIO1_PV3 _GPIO(171)
+#define TEGRA_PIN_LCD_GPIO2_PV4 _GPIO(172)
+#define TEGRA_PIN_AP_READY_PV5 _GPIO(173)
+#define TEGRA_PIN_TOUCH_RST_PV6 _GPIO(174)
+#define TEGRA_PIN_TOUCH_CLK_PV7 _GPIO(175)
+#define TEGRA_PIN_MODEM_WAKE_AP_PX0 _GPIO(184)
+#define TEGRA_PIN_TOUCH_INT_PX1 _GPIO(185)
+#define TEGRA_PIN_MOTION_INT_PX2 _GPIO(186)
+#define TEGRA_PIN_ALS_PROX_INT_PX3 _GPIO(187)
+#define TEGRA_PIN_TEMP_ALERT_PX4 _GPIO(188)
+#define TEGRA_PIN_BUTTON_POWER_ON_PX5 _GPIO(189)
+#define TEGRA_PIN_BUTTON_VOL_UP_PX6 _GPIO(190)
+#define TEGRA_PIN_BUTTON_VOL_DOWN_PX7 _GPIO(191)
+#define TEGRA_PIN_BUTTON_SLIDE_SW_PY0 _GPIO(192)
+#define TEGRA_PIN_BUTTON_HOME_PY1 _GPIO(193)
+#define TEGRA_PIN_LCD_TE_PY2 _GPIO(194)
+#define TEGRA_PIN_PWR_I2C_SCL_PY3 _GPIO(195)
+#define TEGRA_PIN_PWR_I2C_SDA_PY4 _GPIO(196)
+#define TEGRA_PIN_CLK_32K_OUT_PY5 _GPIO(197)
+#define TEGRA_PIN_PZ0 _GPIO(200)
+#define TEGRA_PIN_PZ1 _GPIO(201)
+#define TEGRA_PIN_PZ2 _GPIO(202)
+#define TEGRA_PIN_PZ3 _GPIO(203)
+#define TEGRA_PIN_PZ4 _GPIO(204)
+#define TEGRA_PIN_PZ5 _GPIO(205)
+#define TEGRA_PIN_DAP2_FS_PAA0 _GPIO(208)
+#define TEGRA_PIN_DAP2_SCLK_PAA1 _GPIO(209)
+#define TEGRA_PIN_DAP2_DIN_PAA2 _GPIO(210)
+#define TEGRA_PIN_DAP2_DOUT_PAA3 _GPIO(211)
+#define TEGRA_PIN_AUD_MCLK_PBB0 _GPIO(216)
+#define TEGRA_PIN_DVFS_PWM_PBB1 _GPIO(217)
+#define TEGRA_PIN_DVFS_CLK_PBB2 _GPIO(218)
+#define TEGRA_PIN_GPIO_X1_AUD_PBB3 _GPIO(219)
+#define TEGRA_PIN_GPIO_X3_AUD_PBB4 _GPIO(220)
+#define TEGRA_PIN_HDMI_CEC_PCC0 _GPIO(224)
+#define TEGRA_PIN_HDMI_INT_DP_HPD_PCC1 _GPIO(225)
+#define TEGRA_PIN_SPDIF_OUT_PCC2 _GPIO(226)
+#define TEGRA_PIN_SPDIF_IN_PCC3 _GPIO(227)
+#define TEGRA_PIN_USB_VBUS_EN0_PCC4 _GPIO(228)
+#define TEGRA_PIN_USB_VBUS_EN1_PCC5 _GPIO(229)
+#define TEGRA_PIN_DP_HPD0_PCC6 _GPIO(230)
+#define TEGRA_PIN_PCC7 _GPIO(231)
+#define TEGRA_PIN_SPI2_CS1_PDD0 _GPIO(232)
+#define TEGRA_PIN_QSPI_SCK_PEE0 _GPIO(240)
+#define TEGRA_PIN_QSPI_CS_N_PEE1 _GPIO(241)
+#define TEGRA_PIN_QSPI_IO0_PEE2 _GPIO(242)
+#define TEGRA_PIN_QSPI_IO1_PEE3 _GPIO(243)
+#define TEGRA_PIN_QSPI_IO2_PEE4 _GPIO(244)
+#define TEGRA_PIN_QSPI_IO3_PEE5 _GPIO(245)
+
+/* All non-GPIO pins follow */
+#define NUM_GPIOS (TEGRA_PIN_QSPI_IO3_PEE5 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
+
+/* Non-GPIO pins */
+#define TEGRA_PIN_CORE_PWR_REQ _PIN(0)
+#define TEGRA_PIN_CPU_PWR_REQ _PIN(1)
+#define TEGRA_PIN_PWR_INT_N _PIN(2)
+#define TEGRA_PIN_CLK_32K_IN _PIN(3)
+#define TEGRA_PIN_JTAG_RTCK _PIN(4)
+#define TEGRA_PIN_BATT_BCL _PIN(5)
+#define TEGRA_PIN_CLK_REQ _PIN(6)
+#define TEGRA_PIN_SHUTDOWN _PIN(7)
+
+static const struct pinctrl_pin_desc tegra210_pins[] = {
+ PINCTRL_PIN(TEGRA_PIN_PEX_L0_RST_N_PA0, "PEX_L0_RST_N PA0"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L0_CLKREQ_N_PA1, "PEX_L0_CLKREQ_N PA1"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_WAKE_N_PA2, "PEX_WAKE_N PA2"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L1_RST_N_PA3, "PEX_L1_RST_N PA3"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L1_CLKREQ_N_PA4, "PEX_L1_CLKREQ_N PA4"),
+ PINCTRL_PIN(TEGRA_PIN_SATA_LED_ACTIVE_PA5, "SATA_LED_ACTIVE PA5"),
+ PINCTRL_PIN(TEGRA_PIN_PA6, "PA6"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_FS_PB0, "DAP1_FS PB0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DIN_PB1, "DAP1_DIN PB1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DOUT_PB2, "DAP1_DOUT PB2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_SCLK_PB3, "DAP1_SCLK PB3"),
+ PINCTRL_PIN(TEGRA_PIN_SPI2_MOSI_PB4, "SPI2_MOSI PB4"),
+ PINCTRL_PIN(TEGRA_PIN_SPI2_MISO_PB5, "SPI2_MISO PB5"),
+ PINCTRL_PIN(TEGRA_PIN_SPI2_SCK_PB6, "SPI2_SCK PB6"),
+ PINCTRL_PIN(TEGRA_PIN_SPI2_CS0_PB7, "SPI2_CS0 PB7"),
+ PINCTRL_PIN(TEGRA_PIN_SPI1_MOSI_PC0, "SPI1_MOSI PC0"),
+ PINCTRL_PIN(TEGRA_PIN_SPI1_MISO_PC1, "SPI1_MISO PC1"),
+ PINCTRL_PIN(TEGRA_PIN_SPI1_SCK_PC2, "SPI1_SCK PC2"),
+ PINCTRL_PIN(TEGRA_PIN_SPI1_CS0_PC3, "SPI1_CS0 PC3"),
+ PINCTRL_PIN(TEGRA_PIN_SPI1_CS1_PC4, "SPI1_CS1 PC4"),
+ PINCTRL_PIN(TEGRA_PIN_SPI4_SCK_PC5, "SPI4_SCK PC5"),
+ PINCTRL_PIN(TEGRA_PIN_SPI4_CS0_PC6, "SPI4_CS0 PC6"),
+ PINCTRL_PIN(TEGRA_PIN_SPI4_MOSI_PC7, "SPI4_MOSI PC7"),
+ PINCTRL_PIN(TEGRA_PIN_SPI4_MISO_PD0, "SPI4_MISO PD0"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_TX_PD1, "UART3_TX PD1"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RX_PD2, "UART3_RX PD2"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RTS_PD3, "UART3_RTS PD3"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_CTS_PD4, "UART3_CTS PD4"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC1_CLK_PE0, "DMIC1_CLK PE0"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC1_DAT_PE1, "DMIC1_DAT PE1"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC2_CLK_PE2, "DMIC2_CLK PE2"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC2_DAT_PE3, "DMIC2_DAT PE3"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC3_CLK_PE4, "DMIC3_CLK PE4"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC3_DAT_PE5, "DMIC3_DAT PE5"),
+ PINCTRL_PIN(TEGRA_PIN_PE6, "PE6"),
+ PINCTRL_PIN(TEGRA_PIN_PE7, "PE7"),
+ PINCTRL_PIN(TEGRA_PIN_GEN3_I2C_SCL_PF0, "GEN3_I2C_SCL PF0"),
+ PINCTRL_PIN(TEGRA_PIN_GEN3_I2C_SDA_PF1, "GEN3_I2C_SDA PF1"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_TX_PG0, "UART2_TX PG0"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RX_PG1, "UART2_RX PG1"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RTS_PG2, "UART2_RTS PG2"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_CTS_PG3, "UART2_CTS PG3"),
+ PINCTRL_PIN(TEGRA_PIN_WIFI_EN_PH0, "WIFI_EN PH0"),
+ PINCTRL_PIN(TEGRA_PIN_WIFI_RST_PH1, "WIFI_RST PH1"),
+ PINCTRL_PIN(TEGRA_PIN_WIFI_WAKE_AP_PH2, "WIFI_WAKE_AP PH2"),
+ PINCTRL_PIN(TEGRA_PIN_AP_WAKE_BT_PH3, "AP_WAKE_BT PH3"),
+ PINCTRL_PIN(TEGRA_PIN_BT_RST_PH4, "BT_RST PH4"),
+ PINCTRL_PIN(TEGRA_PIN_BT_WAKE_AP_PH5, "BT_WAKE_AP PH5"),
+ PINCTRL_PIN(TEGRA_PIN_PH6, "PH6"),
+ PINCTRL_PIN(TEGRA_PIN_AP_WAKE_NFC_PH7, "AP_WAKE_NFC PH7"),
+ PINCTRL_PIN(TEGRA_PIN_NFC_EN_PI0, "NFC_EN PI0"),
+ PINCTRL_PIN(TEGRA_PIN_NFC_INT_PI1, "NFC_INT PI1"),
+ PINCTRL_PIN(TEGRA_PIN_GPS_EN_PI2, "GPS_EN PI2"),
+ PINCTRL_PIN(TEGRA_PIN_GPS_RST_PI3, "GPS_RST PI3"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_TX_PI4, "UART4_TX PI4"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_RX_PI5, "UART4_RX PI5"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_RTS_PI6, "UART4_RTS PI6"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_CTS_PI7, "UART4_CTS PI7"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SDA_PJ0, "GEN1_I2C_SDA PJ0"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SCL_PJ1, "GEN1_I2C_SCL PJ1"),
+ PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PJ2, "GEN2_I2C_SCL PJ2"),
+ PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PJ3, "GEN2_I2C_SDA PJ3"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_FS_PJ4, "DAP4_FS PJ4"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DIN_PJ5, "DAP4_DIN PJ5"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DOUT_PJ6, "DAP4_DOUT PJ6"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_SCLK_PJ7, "DAP4_SCLK PJ7"),
+ PINCTRL_PIN(TEGRA_PIN_PK0, "PK0"),
+ PINCTRL_PIN(TEGRA_PIN_PK1, "PK1"),
+ PINCTRL_PIN(TEGRA_PIN_PK2, "PK2"),
+ PINCTRL_PIN(TEGRA_PIN_PK3, "PK3"),
+ PINCTRL_PIN(TEGRA_PIN_PK4, "PK4"),
+ PINCTRL_PIN(TEGRA_PIN_PK5, "PK5"),
+ PINCTRL_PIN(TEGRA_PIN_PK6, "PK6"),
+ PINCTRL_PIN(TEGRA_PIN_PK7, "PK7"),
+ PINCTRL_PIN(TEGRA_PIN_PL0, "PL0"),
+ PINCTRL_PIN(TEGRA_PIN_PL1, "PL1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CLK_PM0, "SDMMC1_CLK PM0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CMD_PM1, "SDMMC1_CMD PM1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT3_PM2, "SDMMC1_DAT3 PM2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT2_PM3, "SDMMC1_DAT2 PM3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT1_PM4, "SDMMC1_DAT1 PM4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT0_PM5, "SDMMC1_DAT0 PM5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_PP0, "SDMMC3_CLK PP0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CMD_PP1, "SDMMC3_CMD PP1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT3_PP2, "SDMMC3_DAT3 PP2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT2_PP3, "SDMMC3_DAT2 PP3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT1_PP4, "SDMMC3_DAT1 PP4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT0_PP5, "SDMMC3_DAT0 PP5"),
+ PINCTRL_PIN(TEGRA_PIN_CAM1_MCLK_PS0, "CAM1_MCLK PS0"),
+ PINCTRL_PIN(TEGRA_PIN_CAM2_MCLK_PS1, "CAM2_MCLK PS1"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SCL_PS2, "CAM_I2C_SCL PS2"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SDA_PS3, "CAM_I2C_SDA PS3"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_RST_PS4, "CAM_RST PS4"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_AF_EN_PS5, "CAM_AF_EN PS5"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_FLASH_EN_PS6, "CAM_FLASH_EN PS6"),
+ PINCTRL_PIN(TEGRA_PIN_CAM1_PWDN_PS7, "CAM1_PWDN PS7"),
+ PINCTRL_PIN(TEGRA_PIN_CAM2_PWDN_PT0, "CAM2_PWDN PT0"),
+ PINCTRL_PIN(TEGRA_PIN_CAM1_STROBE_PT1, "CAM1_STROBE PT1"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_TX_PU0, "UART1_TX PU0"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_RX_PU1, "UART1_RX PU1"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_RTS_PU2, "UART1_RTS PU2"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_CTS_PU3, "UART1_CTS PU3"),
+ PINCTRL_PIN(TEGRA_PIN_LCD_BL_PWM_PV0, "LCD_BL_PWM PV0"),
+ PINCTRL_PIN(TEGRA_PIN_LCD_BL_EN_PV1, "LCD_BL_EN PV1"),
+ PINCTRL_PIN(TEGRA_PIN_LCD_RST_PV2, "LCD_RST PV2"),
+ PINCTRL_PIN(TEGRA_PIN_LCD_GPIO1_PV3, "LCD_GPIO1 PV3"),
+ PINCTRL_PIN(TEGRA_PIN_LCD_GPIO2_PV4, "LCD_GPIO2 PV4"),
+ PINCTRL_PIN(TEGRA_PIN_AP_READY_PV5, "AP_READY PV5"),
+ PINCTRL_PIN(TEGRA_PIN_TOUCH_RST_PV6, "TOUCH_RST PV6"),
+ PINCTRL_PIN(TEGRA_PIN_TOUCH_CLK_PV7, "TOUCH_CLK PV7"),
+ PINCTRL_PIN(TEGRA_PIN_MODEM_WAKE_AP_PX0, "MODEM_WAKE_AP PX0"),
+ PINCTRL_PIN(TEGRA_PIN_TOUCH_INT_PX1, "TOUCH_INT PX1"),
+ PINCTRL_PIN(TEGRA_PIN_MOTION_INT_PX2, "MOTION_INT PX2"),
+ PINCTRL_PIN(TEGRA_PIN_ALS_PROX_INT_PX3, "ALS_PROX_INT PX3"),
+ PINCTRL_PIN(TEGRA_PIN_TEMP_ALERT_PX4, "TEMP_ALERT PX4"),
+ PINCTRL_PIN(TEGRA_PIN_BUTTON_POWER_ON_PX5, "BUTTON_POWER_ON PX5"),
+ PINCTRL_PIN(TEGRA_PIN_BUTTON_VOL_UP_PX6, "BUTTON_VOL_UP PX6"),
+ PINCTRL_PIN(TEGRA_PIN_BUTTON_VOL_DOWN_PX7, "BUTTON_VOL_DOWN PX7"),
+ PINCTRL_PIN(TEGRA_PIN_BUTTON_SLIDE_SW_PY0, "BUTTON_SLIDE_SW PY0"),
+ PINCTRL_PIN(TEGRA_PIN_BUTTON_HOME_PY1, "BUTTON_HOME PY1"),
+ PINCTRL_PIN(TEGRA_PIN_LCD_TE_PY2, "LCD_TE PY2"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SCL_PY3, "PWR_I2C_SCL PY3"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SDA_PY4, "PWR_I2C_SDA PY4"),
+ PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PY5, "CLK_32K_OUT PY5"),
+ PINCTRL_PIN(TEGRA_PIN_PZ0, "PZ0"),
+ PINCTRL_PIN(TEGRA_PIN_PZ1, "PZ1"),
+ PINCTRL_PIN(TEGRA_PIN_PZ2, "PZ2"),
+ PINCTRL_PIN(TEGRA_PIN_PZ3, "PZ3"),
+ PINCTRL_PIN(TEGRA_PIN_PZ4, "PZ4"),
+ PINCTRL_PIN(TEGRA_PIN_PZ5, "PZ5"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PAA0, "DAP2_FS PAA0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_SCLK_PAA1, "DAP2_SCLK PAA1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DIN_PAA2, "DAP2_DIN PAA2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DOUT_PAA3, "DAP2_DOUT PAA3"),
+ PINCTRL_PIN(TEGRA_PIN_AUD_MCLK_PBB0, "AUD_MCLK PBB0"),
+ PINCTRL_PIN(TEGRA_PIN_DVFS_PWM_PBB1, "DVFS_PWM PBB1"),
+ PINCTRL_PIN(TEGRA_PIN_DVFS_CLK_PBB2, "DVFS_CLK PBB2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X1_AUD_PBB3, "GPIO_X1_AUD PBB3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X3_AUD_PBB4, "GPIO_X3_AUD PBB4"),
+ PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PCC0, "HDMI_CEC PCC0"),
+ PINCTRL_PIN(TEGRA_PIN_HDMI_INT_DP_HPD_PCC1, "HDMI_INT_DP_HPD PCC1"),
+ PINCTRL_PIN(TEGRA_PIN_SPDIF_OUT_PCC2, "SPDIF_OUT PCC2"),
+ PINCTRL_PIN(TEGRA_PIN_SPDIF_IN_PCC3, "SPDIF_IN PCC3"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN0_PCC4, "USB_VBUS_EN0 PCC4"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN1_PCC5, "USB_VBUS_EN1 PCC5"),
+ PINCTRL_PIN(TEGRA_PIN_DP_HPD0_PCC6, "DP_HPD0 PCC6"),
+ PINCTRL_PIN(TEGRA_PIN_PCC7, "PCC7"),
+ PINCTRL_PIN(TEGRA_PIN_SPI2_CS1_PDD0, "SPI2_CS1 PDD0"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_SCK_PEE0, "QSPI_SCK PEE0"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_CS_N_PEE1, "QSPI_CS_N PEE1"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO0_PEE2, "QSPI_IO0 PEE2"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO1_PEE3, "QSPI_IO1 PEE3"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO2_PEE4, "QSPI_IO2 PEE4"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO3_PEE5, "QSPI_IO3 PEE5"),
+ PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
+ PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
+ PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
+ PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
+ PINCTRL_PIN(TEGRA_PIN_BATT_BCL, "BATT_BCL"),
+ PINCTRL_PIN(TEGRA_PIN_CLK_REQ, "CLK_REQ"),
+ PINCTRL_PIN(TEGRA_PIN_SHUTDOWN, "SHUTDOWN"),
+};
+
+static const unsigned pex_l0_rst_n_pa0_pins[] = {
+ TEGRA_PIN_PEX_L0_RST_N_PA0,
+};
+
+static const unsigned pex_l0_clkreq_n_pa1_pins[] = {
+ TEGRA_PIN_PEX_L0_CLKREQ_N_PA1,
+};
+
+static const unsigned pex_wake_n_pa2_pins[] = {
+ TEGRA_PIN_PEX_WAKE_N_PA2,
+};
+
+static const unsigned pex_l1_rst_n_pa3_pins[] = {
+ TEGRA_PIN_PEX_L1_RST_N_PA3,
+};
+
+static const unsigned pex_l1_clkreq_n_pa4_pins[] = {
+ TEGRA_PIN_PEX_L1_CLKREQ_N_PA4,
+};
+
+static const unsigned sata_led_active_pa5_pins[] = {
+ TEGRA_PIN_SATA_LED_ACTIVE_PA5,
+};
+
+static const unsigned pa6_pins[] = {
+ TEGRA_PIN_PA6,
+};
+
+static const unsigned dap1_fs_pb0_pins[] = {
+ TEGRA_PIN_DAP1_FS_PB0,
+};
+
+static const unsigned dap1_din_pb1_pins[] = {
+ TEGRA_PIN_DAP1_DIN_PB1,
+};
+
+static const unsigned dap1_dout_pb2_pins[] = {
+ TEGRA_PIN_DAP1_DOUT_PB2,
+};
+
+static const unsigned dap1_sclk_pb3_pins[] = {
+ TEGRA_PIN_DAP1_SCLK_PB3,
+};
+
+static const unsigned spi2_mosi_pb4_pins[] = {
+ TEGRA_PIN_SPI2_MOSI_PB4,
+};
+
+static const unsigned spi2_miso_pb5_pins[] = {
+ TEGRA_PIN_SPI2_MISO_PB5,
+};
+
+static const unsigned spi2_sck_pb6_pins[] = {
+ TEGRA_PIN_SPI2_SCK_PB6,
+};
+
+static const unsigned spi2_cs0_pb7_pins[] = {
+ TEGRA_PIN_SPI2_CS0_PB7,
+};
+
+static const unsigned spi1_mosi_pc0_pins[] = {
+ TEGRA_PIN_SPI1_MOSI_PC0,
+};
+
+static const unsigned spi1_miso_pc1_pins[] = {
+ TEGRA_PIN_SPI1_MISO_PC1,
+};
+
+static const unsigned spi1_sck_pc2_pins[] = {
+ TEGRA_PIN_SPI1_SCK_PC2,
+};
+
+static const unsigned spi1_cs0_pc3_pins[] = {
+ TEGRA_PIN_SPI1_CS0_PC3,
+};
+
+static const unsigned spi1_cs1_pc4_pins[] = {
+ TEGRA_PIN_SPI1_CS1_PC4,
+};
+
+static const unsigned spi4_sck_pc5_pins[] = {
+ TEGRA_PIN_SPI4_SCK_PC5,
+};
+
+static const unsigned spi4_cs0_pc6_pins[] = {
+ TEGRA_PIN_SPI4_CS0_PC6,
+};
+
+static const unsigned spi4_mosi_pc7_pins[] = {
+ TEGRA_PIN_SPI4_MOSI_PC7,
+};
+
+static const unsigned spi4_miso_pd0_pins[] = {
+ TEGRA_PIN_SPI4_MISO_PD0,
+};
+
+static const unsigned uart3_tx_pd1_pins[] = {
+ TEGRA_PIN_UART3_TX_PD1,
+};
+
+static const unsigned uart3_rx_pd2_pins[] = {
+ TEGRA_PIN_UART3_RX_PD2,
+};
+
+static const unsigned uart3_rts_pd3_pins[] = {
+ TEGRA_PIN_UART3_RTS_PD3,
+};
+
+static const unsigned uart3_cts_pd4_pins[] = {
+ TEGRA_PIN_UART3_CTS_PD4,
+};
+
+static const unsigned dmic1_clk_pe0_pins[] = {
+ TEGRA_PIN_DMIC1_CLK_PE0,
+};
+
+static const unsigned dmic1_dat_pe1_pins[] = {
+ TEGRA_PIN_DMIC1_DAT_PE1,
+};
+
+static const unsigned dmic2_clk_pe2_pins[] = {
+ TEGRA_PIN_DMIC2_CLK_PE2,
+};
+
+static const unsigned dmic2_dat_pe3_pins[] = {
+ TEGRA_PIN_DMIC2_DAT_PE3,
+};
+
+static const unsigned dmic3_clk_pe4_pins[] = {
+ TEGRA_PIN_DMIC3_CLK_PE4,
+};
+
+static const unsigned dmic3_dat_pe5_pins[] = {
+ TEGRA_PIN_DMIC3_DAT_PE5,
+};
+
+static const unsigned pe6_pins[] = {
+ TEGRA_PIN_PE6,
+};
+
+static const unsigned pe7_pins[] = {
+ TEGRA_PIN_PE7,
+};
+
+static const unsigned gen3_i2c_scl_pf0_pins[] = {
+ TEGRA_PIN_GEN3_I2C_SCL_PF0,
+};
+
+static const unsigned gen3_i2c_sda_pf1_pins[] = {
+ TEGRA_PIN_GEN3_I2C_SDA_PF1,
+};
+
+static const unsigned uart2_tx_pg0_pins[] = {
+ TEGRA_PIN_UART2_TX_PG0,
+};
+
+static const unsigned uart2_rx_pg1_pins[] = {
+ TEGRA_PIN_UART2_RX_PG1,
+};
+
+static const unsigned uart2_rts_pg2_pins[] = {
+ TEGRA_PIN_UART2_RTS_PG2,
+};
+
+static const unsigned uart2_cts_pg3_pins[] = {
+ TEGRA_PIN_UART2_CTS_PG3,
+};
+
+static const unsigned wifi_en_ph0_pins[] = {
+ TEGRA_PIN_WIFI_EN_PH0,
+};
+
+static const unsigned wifi_rst_ph1_pins[] = {
+ TEGRA_PIN_WIFI_RST_PH1,
+};
+
+static const unsigned wifi_wake_ap_ph2_pins[] = {
+ TEGRA_PIN_WIFI_WAKE_AP_PH2,
+};
+
+static const unsigned ap_wake_bt_ph3_pins[] = {
+ TEGRA_PIN_AP_WAKE_BT_PH3,
+};
+
+static const unsigned bt_rst_ph4_pins[] = {
+ TEGRA_PIN_BT_RST_PH4,
+};
+
+static const unsigned bt_wake_ap_ph5_pins[] = {
+ TEGRA_PIN_BT_WAKE_AP_PH5,
+};
+
+static const unsigned ph6_pins[] = {
+ TEGRA_PIN_PH6,
+};
+
+static const unsigned ap_wake_nfc_ph7_pins[] = {
+ TEGRA_PIN_AP_WAKE_NFC_PH7,
+};
+
+static const unsigned nfc_en_pi0_pins[] = {
+ TEGRA_PIN_NFC_EN_PI0,
+};
+
+static const unsigned nfc_int_pi1_pins[] = {
+ TEGRA_PIN_NFC_INT_PI1,
+};
+
+static const unsigned gps_en_pi2_pins[] = {
+ TEGRA_PIN_GPS_EN_PI2,
+};
+
+static const unsigned gps_rst_pi3_pins[] = {
+ TEGRA_PIN_GPS_RST_PI3,
+};
+
+static const unsigned uart4_tx_pi4_pins[] = {
+ TEGRA_PIN_UART4_TX_PI4,
+};
+
+static const unsigned uart4_rx_pi5_pins[] = {
+ TEGRA_PIN_UART4_RX_PI5,
+};
+
+static const unsigned uart4_rts_pi6_pins[] = {
+ TEGRA_PIN_UART4_RTS_PI6,
+};
+
+static const unsigned uart4_cts_pi7_pins[] = {
+ TEGRA_PIN_UART4_CTS_PI7,
+};
+
+static const unsigned gen1_i2c_sda_pj0_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SDA_PJ0,
+};
+
+static const unsigned gen1_i2c_scl_pj1_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SCL_PJ1,
+};
+
+static const unsigned gen2_i2c_scl_pj2_pins[] = {
+ TEGRA_PIN_GEN2_I2C_SCL_PJ2,
+};
+
+static const unsigned gen2_i2c_sda_pj3_pins[] = {
+ TEGRA_PIN_GEN2_I2C_SDA_PJ3,
+};
+
+static const unsigned dap4_fs_pj4_pins[] = {
+ TEGRA_PIN_DAP4_FS_PJ4,
+};
+
+static const unsigned dap4_din_pj5_pins[] = {
+ TEGRA_PIN_DAP4_DIN_PJ5,
+};
+
+static const unsigned dap4_dout_pj6_pins[] = {
+ TEGRA_PIN_DAP4_DOUT_PJ6,
+};
+
+static const unsigned dap4_sclk_pj7_pins[] = {
+ TEGRA_PIN_DAP4_SCLK_PJ7,
+};
+
+static const unsigned pk0_pins[] = {
+ TEGRA_PIN_PK0,
+};
+
+static const unsigned pk1_pins[] = {
+ TEGRA_PIN_PK1,
+};
+
+static const unsigned pk2_pins[] = {
+ TEGRA_PIN_PK2,
+};
+
+static const unsigned pk3_pins[] = {
+ TEGRA_PIN_PK3,
+};
+
+static const unsigned pk4_pins[] = {
+ TEGRA_PIN_PK4,
+};
+
+static const unsigned pk5_pins[] = {
+ TEGRA_PIN_PK5,
+};
+
+static const unsigned pk6_pins[] = {
+ TEGRA_PIN_PK6,
+};
+
+static const unsigned pk7_pins[] = {
+ TEGRA_PIN_PK7,
+};
+
+static const unsigned pl0_pins[] = {
+ TEGRA_PIN_PL0,
+};
+
+static const unsigned pl1_pins[] = {
+ TEGRA_PIN_PL1,
+};
+
+static const unsigned sdmmc1_clk_pm0_pins[] = {
+ TEGRA_PIN_SDMMC1_CLK_PM0,
+};
+
+static const unsigned sdmmc1_cmd_pm1_pins[] = {
+ TEGRA_PIN_SDMMC1_CMD_PM1,
+};
+
+static const unsigned sdmmc1_dat3_pm2_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT3_PM2,
+};
+
+static const unsigned sdmmc1_dat2_pm3_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT2_PM3,
+};
+
+static const unsigned sdmmc1_dat1_pm4_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT1_PM4,
+};
+
+static const unsigned sdmmc1_dat0_pm5_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT0_PM5,
+};
+
+static const unsigned sdmmc3_clk_pp0_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_PP0,
+};
+
+static const unsigned sdmmc3_cmd_pp1_pins[] = {
+ TEGRA_PIN_SDMMC3_CMD_PP1,
+};
+
+static const unsigned sdmmc3_dat3_pp2_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT3_PP2,
+};
+
+static const unsigned sdmmc3_dat2_pp3_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT2_PP3,
+};
+
+static const unsigned sdmmc3_dat1_pp4_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT1_PP4,
+};
+
+static const unsigned sdmmc3_dat0_pp5_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT0_PP5,
+};
+
+static const unsigned cam1_mclk_ps0_pins[] = {
+ TEGRA_PIN_CAM1_MCLK_PS0,
+};
+
+static const unsigned cam2_mclk_ps1_pins[] = {
+ TEGRA_PIN_CAM2_MCLK_PS1,
+};
+
+static const unsigned cam_i2c_scl_ps2_pins[] = {
+ TEGRA_PIN_CAM_I2C_SCL_PS2,
+};
+
+static const unsigned cam_i2c_sda_ps3_pins[] = {
+ TEGRA_PIN_CAM_I2C_SDA_PS3,
+};
+
+static const unsigned cam_rst_ps4_pins[] = {
+ TEGRA_PIN_CAM_RST_PS4,
+};
+
+static const unsigned cam_af_en_ps5_pins[] = {
+ TEGRA_PIN_CAM_AF_EN_PS5,
+};
+
+static const unsigned cam_flash_en_ps6_pins[] = {
+ TEGRA_PIN_CAM_FLASH_EN_PS6,
+};
+
+static const unsigned cam1_pwdn_ps7_pins[] = {
+ TEGRA_PIN_CAM1_PWDN_PS7,
+};
+
+static const unsigned cam2_pwdn_pt0_pins[] = {
+ TEGRA_PIN_CAM2_PWDN_PT0,
+};
+
+static const unsigned cam1_strobe_pt1_pins[] = {
+ TEGRA_PIN_CAM1_STROBE_PT1,
+};
+
+static const unsigned uart1_tx_pu0_pins[] = {
+ TEGRA_PIN_UART1_TX_PU0,
+};
+
+static const unsigned uart1_rx_pu1_pins[] = {
+ TEGRA_PIN_UART1_RX_PU1,
+};
+
+static const unsigned uart1_rts_pu2_pins[] = {
+ TEGRA_PIN_UART1_RTS_PU2,
+};
+
+static const unsigned uart1_cts_pu3_pins[] = {
+ TEGRA_PIN_UART1_CTS_PU3,
+};
+
+static const unsigned lcd_bl_pwm_pv0_pins[] = {
+ TEGRA_PIN_LCD_BL_PWM_PV0,
+};
+
+static const unsigned lcd_bl_en_pv1_pins[] = {
+ TEGRA_PIN_LCD_BL_EN_PV1,
+};
+
+static const unsigned lcd_rst_pv2_pins[] = {
+ TEGRA_PIN_LCD_RST_PV2,
+};
+
+static const unsigned lcd_gpio1_pv3_pins[] = {
+ TEGRA_PIN_LCD_GPIO1_PV3,
+};
+
+static const unsigned lcd_gpio2_pv4_pins[] = {
+ TEGRA_PIN_LCD_GPIO2_PV4,
+};
+
+static const unsigned ap_ready_pv5_pins[] = {
+ TEGRA_PIN_AP_READY_PV5,
+};
+
+static const unsigned touch_rst_pv6_pins[] = {
+ TEGRA_PIN_TOUCH_RST_PV6,
+};
+
+static const unsigned touch_clk_pv7_pins[] = {
+ TEGRA_PIN_TOUCH_CLK_PV7,
+};
+
+static const unsigned modem_wake_ap_px0_pins[] = {
+ TEGRA_PIN_MODEM_WAKE_AP_PX0,
+};
+
+static const unsigned touch_int_px1_pins[] = {
+ TEGRA_PIN_TOUCH_INT_PX1,
+};
+
+static const unsigned motion_int_px2_pins[] = {
+ TEGRA_PIN_MOTION_INT_PX2,
+};
+
+static const unsigned als_prox_int_px3_pins[] = {
+ TEGRA_PIN_ALS_PROX_INT_PX3,
+};
+
+static const unsigned temp_alert_px4_pins[] = {
+ TEGRA_PIN_TEMP_ALERT_PX4,
+};
+
+static const unsigned button_power_on_px5_pins[] = {
+ TEGRA_PIN_BUTTON_POWER_ON_PX5,
+};
+
+static const unsigned button_vol_up_px6_pins[] = {
+ TEGRA_PIN_BUTTON_VOL_UP_PX6,
+};
+
+static const unsigned button_vol_down_px7_pins[] = {
+ TEGRA_PIN_BUTTON_VOL_DOWN_PX7,
+};
+
+static const unsigned button_slide_sw_py0_pins[] = {
+ TEGRA_PIN_BUTTON_SLIDE_SW_PY0,
+};
+
+static const unsigned button_home_py1_pins[] = {
+ TEGRA_PIN_BUTTON_HOME_PY1,
+};
+
+static const unsigned lcd_te_py2_pins[] = {
+ TEGRA_PIN_LCD_TE_PY2,
+};
+
+static const unsigned pwr_i2c_scl_py3_pins[] = {
+ TEGRA_PIN_PWR_I2C_SCL_PY3,
+};
+
+static const unsigned pwr_i2c_sda_py4_pins[] = {
+ TEGRA_PIN_PWR_I2C_SDA_PY4,
+};
+
+static const unsigned clk_32k_out_py5_pins[] = {
+ TEGRA_PIN_CLK_32K_OUT_PY5,
+};
+
+static const unsigned pz0_pins[] = {
+ TEGRA_PIN_PZ0,
+};
+
+static const unsigned pz1_pins[] = {
+ TEGRA_PIN_PZ1,
+};
+
+static const unsigned pz2_pins[] = {
+ TEGRA_PIN_PZ2,
+};
+
+static const unsigned pz3_pins[] = {
+ TEGRA_PIN_PZ3,
+};
+
+static const unsigned pz4_pins[] = {
+ TEGRA_PIN_PZ4,
+};
+
+static const unsigned pz5_pins[] = {
+ TEGRA_PIN_PZ5,
+};
+
+static const unsigned dap2_fs_paa0_pins[] = {
+ TEGRA_PIN_DAP2_FS_PAA0,
+};
+
+static const unsigned dap2_sclk_paa1_pins[] = {
+ TEGRA_PIN_DAP2_SCLK_PAA1,
+};
+
+static const unsigned dap2_din_paa2_pins[] = {
+ TEGRA_PIN_DAP2_DIN_PAA2,
+};
+
+static const unsigned dap2_dout_paa3_pins[] = {
+ TEGRA_PIN_DAP2_DOUT_PAA3,
+};
+
+static const unsigned aud_mclk_pbb0_pins[] = {
+ TEGRA_PIN_AUD_MCLK_PBB0,
+};
+
+static const unsigned dvfs_pwm_pbb1_pins[] = {
+ TEGRA_PIN_DVFS_PWM_PBB1,
+};
+
+static const unsigned dvfs_clk_pbb2_pins[] = {
+ TEGRA_PIN_DVFS_CLK_PBB2,
+};
+
+static const unsigned gpio_x1_aud_pbb3_pins[] = {
+ TEGRA_PIN_GPIO_X1_AUD_PBB3,
+};
+
+static const unsigned gpio_x3_aud_pbb4_pins[] = {
+ TEGRA_PIN_GPIO_X3_AUD_PBB4,
+};
+
+static const unsigned hdmi_cec_pcc0_pins[] = {
+ TEGRA_PIN_HDMI_CEC_PCC0,
+};
+
+static const unsigned hdmi_int_dp_hpd_pcc1_pins[] = {
+ TEGRA_PIN_HDMI_INT_DP_HPD_PCC1,
+};
+
+static const unsigned spdif_out_pcc2_pins[] = {
+ TEGRA_PIN_SPDIF_OUT_PCC2,
+};
+
+static const unsigned spdif_in_pcc3_pins[] = {
+ TEGRA_PIN_SPDIF_IN_PCC3,
+};
+
+static const unsigned usb_vbus_en0_pcc4_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN0_PCC4,
+};
+
+static const unsigned usb_vbus_en1_pcc5_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN1_PCC5,
+};
+
+static const unsigned dp_hpd0_pcc6_pins[] = {
+ TEGRA_PIN_DP_HPD0_PCC6,
+};
+
+static const unsigned pcc7_pins[] = {
+ TEGRA_PIN_PCC7,
+};
+
+static const unsigned spi2_cs1_pdd0_pins[] = {
+ TEGRA_PIN_SPI2_CS1_PDD0,
+};
+
+static const unsigned qspi_sck_pee0_pins[] = {
+ TEGRA_PIN_QSPI_SCK_PEE0,
+};
+
+static const unsigned qspi_cs_n_pee1_pins[] = {
+ TEGRA_PIN_QSPI_CS_N_PEE1,
+};
+
+static const unsigned qspi_io0_pee2_pins[] = {
+ TEGRA_PIN_QSPI_IO0_PEE2,
+};
+
+static const unsigned qspi_io1_pee3_pins[] = {
+ TEGRA_PIN_QSPI_IO1_PEE3,
+};
+
+static const unsigned qspi_io2_pee4_pins[] = {
+ TEGRA_PIN_QSPI_IO2_PEE4,
+};
+
+static const unsigned qspi_io3_pee5_pins[] = {
+ TEGRA_PIN_QSPI_IO3_PEE5,
+};
+
+static const unsigned core_pwr_req_pins[] = {
+ TEGRA_PIN_CORE_PWR_REQ,
+};
+
+static const unsigned cpu_pwr_req_pins[] = {
+ TEGRA_PIN_CPU_PWR_REQ,
+};
+
+static const unsigned pwr_int_n_pins[] = {
+ TEGRA_PIN_PWR_INT_N,
+};
+
+static const unsigned clk_32k_in_pins[] = {
+ TEGRA_PIN_CLK_32K_IN,
+};
+
+static const unsigned jtag_rtck_pins[] = {
+ TEGRA_PIN_JTAG_RTCK,
+};
+
+static const unsigned batt_bcl_pins[] = {
+ TEGRA_PIN_BATT_BCL,
+};
+
+static const unsigned clk_req_pins[] = {
+ TEGRA_PIN_CLK_REQ,
+};
+
+static const unsigned shutdown_pins[] = {
+ TEGRA_PIN_SHUTDOWN,
+};
+
+static const unsigned drive_pa6_pins[] = {
+ TEGRA_PIN_PA6,
+};
+
+static const unsigned drive_pcc7_pins[] = {
+ TEGRA_PIN_PCC7,
+};
+
+static const unsigned drive_pe6_pins[] = {
+ TEGRA_PIN_PE6,
+};
+
+static const unsigned drive_pe7_pins[] = {
+ TEGRA_PIN_PE7,
+};
+
+static const unsigned drive_ph6_pins[] = {
+ TEGRA_PIN_PH6,
+};
+
+static const unsigned drive_pk0_pins[] = {
+ TEGRA_PIN_PK0,
+};
+
+static const unsigned drive_pk1_pins[] = {
+ TEGRA_PIN_PK1,
+};
+
+static const unsigned drive_pk2_pins[] = {
+ TEGRA_PIN_PK2,
+};
+
+static const unsigned drive_pk3_pins[] = {
+ TEGRA_PIN_PK3,
+};
+
+static const unsigned drive_pk4_pins[] = {
+ TEGRA_PIN_PK4,
+};
+
+static const unsigned drive_pk5_pins[] = {
+ TEGRA_PIN_PK5,
+};
+
+static const unsigned drive_pk6_pins[] = {
+ TEGRA_PIN_PK6,
+};
+
+static const unsigned drive_pk7_pins[] = {
+ TEGRA_PIN_PK7,
+};
+
+static const unsigned drive_pl0_pins[] = {
+ TEGRA_PIN_PL0,
+};
+
+static const unsigned drive_pl1_pins[] = {
+ TEGRA_PIN_PL1,
+};
+
+static const unsigned drive_pz0_pins[] = {
+ TEGRA_PIN_PZ0,
+};
+
+static const unsigned drive_pz1_pins[] = {
+ TEGRA_PIN_PZ1,
+};
+
+static const unsigned drive_pz2_pins[] = {
+ TEGRA_PIN_PZ2,
+};
+
+static const unsigned drive_pz3_pins[] = {
+ TEGRA_PIN_PZ3,
+};
+
+static const unsigned drive_pz4_pins[] = {
+ TEGRA_PIN_PZ4,
+};
+
+static const unsigned drive_pz5_pins[] = {
+ TEGRA_PIN_PZ5,
+};
+
+static const unsigned drive_sdmmc1_pins[] = {
+ TEGRA_PIN_SDMMC1_CLK_PM0,
+ TEGRA_PIN_SDMMC1_CMD_PM1,
+ TEGRA_PIN_SDMMC1_DAT3_PM2,
+ TEGRA_PIN_SDMMC1_DAT2_PM3,
+ TEGRA_PIN_SDMMC1_DAT1_PM4,
+ TEGRA_PIN_SDMMC1_DAT0_PM5,
+};
+
+static const unsigned drive_sdmmc2_pins[] = {
+};
+
+static const unsigned drive_sdmmc3_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_PP0,
+ TEGRA_PIN_SDMMC3_CMD_PP1,
+ TEGRA_PIN_SDMMC3_DAT3_PP2,
+ TEGRA_PIN_SDMMC3_DAT2_PP3,
+ TEGRA_PIN_SDMMC3_DAT1_PP4,
+ TEGRA_PIN_SDMMC3_DAT0_PP5,
+};
+
+static const unsigned drive_sdmmc4_pins[] = {
+};
+
+enum tegra_mux {
+ TEGRA_MUX_AUD,
+ TEGRA_MUX_BCL,
+ TEGRA_MUX_BLINK,
+ TEGRA_MUX_CCLA,
+ TEGRA_MUX_CEC,
+ TEGRA_MUX_CLDVFS,
+ TEGRA_MUX_CLK,
+ TEGRA_MUX_CORE,
+ TEGRA_MUX_CPU,
+ TEGRA_MUX_DISPLAYA,
+ TEGRA_MUX_DISPLAYB,
+ TEGRA_MUX_DMIC1,
+ TEGRA_MUX_DMIC2,
+ TEGRA_MUX_DMIC3,
+ TEGRA_MUX_DP,
+ TEGRA_MUX_DTV,
+ TEGRA_MUX_EXTPERIPH3,
+ TEGRA_MUX_I2C1,
+ TEGRA_MUX_I2C2,
+ TEGRA_MUX_I2C3,
+ TEGRA_MUX_I2CPMU,
+ TEGRA_MUX_I2CVI,
+ TEGRA_MUX_I2S1,
+ TEGRA_MUX_I2S2,
+ TEGRA_MUX_I2S3,
+ TEGRA_MUX_I2S4A,
+ TEGRA_MUX_I2S4B,
+ TEGRA_MUX_I2S5A,
+ TEGRA_MUX_I2S5B,
+ TEGRA_MUX_IQC0,
+ TEGRA_MUX_IQC1,
+ TEGRA_MUX_JTAG,
+ TEGRA_MUX_PE,
+ TEGRA_MUX_PE0,
+ TEGRA_MUX_PE1,
+ TEGRA_MUX_PMI,
+ TEGRA_MUX_PWM0,
+ TEGRA_MUX_PWM1,
+ TEGRA_MUX_PWM2,
+ TEGRA_MUX_PWM3,
+ TEGRA_MUX_QSPI,
+ TEGRA_MUX_RSVD0,
+ TEGRA_MUX_RSVD1,
+ TEGRA_MUX_RSVD2,
+ TEGRA_MUX_RSVD3,
+ TEGRA_MUX_SATA,
+ TEGRA_MUX_SDMMC1,
+ TEGRA_MUX_SDMMC3,
+ TEGRA_MUX_SHUTDOWN,
+ TEGRA_MUX_SOC,
+ TEGRA_MUX_SOR0,
+ TEGRA_MUX_SOR1,
+ TEGRA_MUX_SPDIF,
+ TEGRA_MUX_SPI1,
+ TEGRA_MUX_SPI2,
+ TEGRA_MUX_SPI3,
+ TEGRA_MUX_SPI4,
+ TEGRA_MUX_SYS,
+ TEGRA_MUX_TOUCH,
+ TEGRA_MUX_UART,
+ TEGRA_MUX_UARTA,
+ TEGRA_MUX_UARTB,
+ TEGRA_MUX_UARTC,
+ TEGRA_MUX_UARTD,
+ TEGRA_MUX_USB,
+ TEGRA_MUX_VGP1,
+ TEGRA_MUX_VGP2,
+ TEGRA_MUX_VGP3,
+ TEGRA_MUX_VGP4,
+ TEGRA_MUX_VGP5,
+ TEGRA_MUX_VGP6,
+ TEGRA_MUX_VIMCLK,
+ TEGRA_MUX_VIMCLK2,
+};
+
+#define FUNCTION(fname) \
+ { \
+ .name = #fname, \
+ }
+
+static struct tegra_function tegra210_functions[] = {
+ FUNCTION(aud),
+ FUNCTION(bcl),
+ FUNCTION(blink),
+ FUNCTION(ccla),
+ FUNCTION(cec),
+ FUNCTION(cldvfs),
+ FUNCTION(clk),
+ FUNCTION(core),
+ FUNCTION(cpu),
+ FUNCTION(displaya),
+ FUNCTION(displayb),
+ FUNCTION(dmic1),
+ FUNCTION(dmic2),
+ FUNCTION(dmic3),
+ FUNCTION(dp),
+ FUNCTION(dtv),
+ FUNCTION(extperiph3),
+ FUNCTION(i2c1),
+ FUNCTION(i2c2),
+ FUNCTION(i2c3),
+ FUNCTION(i2cpmu),
+ FUNCTION(i2cvi),
+ FUNCTION(i2s1),
+ FUNCTION(i2s2),
+ FUNCTION(i2s3),
+ FUNCTION(i2s4a),
+ FUNCTION(i2s4b),
+ FUNCTION(i2s5a),
+ FUNCTION(i2s5b),
+ FUNCTION(iqc0),
+ FUNCTION(iqc1),
+ FUNCTION(jtag),
+ FUNCTION(pe),
+ FUNCTION(pe0),
+ FUNCTION(pe1),
+ FUNCTION(pmi),
+ FUNCTION(pwm0),
+ FUNCTION(pwm1),
+ FUNCTION(pwm2),
+ FUNCTION(pwm3),
+ FUNCTION(qspi),
+ FUNCTION(rsvd0),
+ FUNCTION(rsvd1),
+ FUNCTION(rsvd2),
+ FUNCTION(rsvd3),
+ FUNCTION(sata),
+ FUNCTION(sdmmc1),
+ FUNCTION(sdmmc3),
+ FUNCTION(shutdown),
+ FUNCTION(soc),
+ FUNCTION(sor0),
+ FUNCTION(sor1),
+ FUNCTION(spdif),
+ FUNCTION(spi1),
+ FUNCTION(spi2),
+ FUNCTION(spi3),
+ FUNCTION(spi4),
+ FUNCTION(sys),
+ FUNCTION(touch),
+ FUNCTION(uart),
+ FUNCTION(uarta),
+ FUNCTION(uartb),
+ FUNCTION(uartc),
+ FUNCTION(uartd),
+ FUNCTION(usb),
+ FUNCTION(vgp1),
+ FUNCTION(vgp2),
+ FUNCTION(vgp3),
+ FUNCTION(vgp4),
+ FUNCTION(vgp5),
+ FUNCTION(vgp6),
+ FUNCTION(vimclk),
+ FUNCTION(vimclk2),
+};
+
+#define DRV_PINGROUP_REG_A 0x8d4 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
+
+#define DRV_PINGROUP_REG(r) ((r) - DRV_PINGROUP_REG_A)
+#define PINGROUP_REG(r) ((r) - PINGROUP_REG_A)
+
+#define PINGROUP_BIT_Y(b) (b)
+#define PINGROUP_BIT_N(b) (-1)
+
+#define PINGROUP(pg_name, f0, f1, f2, f3, r, hsm, drvtype, e_io_hv, \
+ rdrv, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, \
+ slwr_w, slwf_b, slwf_w) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .funcs = { \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
+ }, \
+ .mux_reg = PINGROUP_REG(r), \
+ .mux_bank = 1, \
+ .mux_bit = 0, \
+ .pupd_reg = PINGROUP_REG(r), \
+ .pupd_bank = 1, \
+ .pupd_bit = 2, \
+ .tri_reg = PINGROUP_REG(r), \
+ .tri_bank = 1, \
+ .tri_bit = 4, \
+ .einput_bit = 6, \
+ .odrain_bit = 11, \
+ .lock_bit = 7, \
+ .ioreset_bit = -1, \
+ .rcv_sel_bit = PINGROUP_BIT_##e_io_hv(10), \
+ .hsm_bit = PINGROUP_BIT_##hsm(9), \
+ .schmitt_bit = 12, \
+ .drvtype_bit = PINGROUP_BIT_##drvtype(13), \
+ .drv_reg = DRV_PINGROUP_REG(rdrv), \
+ .drv_bank = 0, \
+ .lpmd_bit = -1, \
+ .drvdn_bit = drvdn_b, \
+ .drvdn_width = drvdn_w, \
+ .drvup_bit = drvup_b, \
+ .drvup_width = drvup_w, \
+ .slwr_bit = slwr_b, \
+ .slwr_width = slwr_w, \
+ .slwf_bit = slwf_b, \
+ .slwf_width = slwf_w, \
+ }
+
+#define DRV_PINGROUP(pg_name, r, drvdn_b, drvdn_w, drvup_b, drvup_w, \
+ slwr_b, slwr_w, slwf_b, slwf_w) \
+ { \
+ .name = "drive_" #pg_name, \
+ .pins = drive_##pg_name##_pins, \
+ .npins = ARRAY_SIZE(drive_##pg_name##_pins), \
+ .mux_reg = -1, \
+ .pupd_reg = -1, \
+ .tri_reg = -1, \
+ .einput_bit = -1, \
+ .odrain_bit = -1, \
+ .lock_bit = -1, \
+ .ioreset_bit = -1, \
+ .rcv_sel_bit = -1, \
+ .drv_reg = DRV_PINGROUP_REG(r), \
+ .drv_bank = 0, \
+ .hsm_bit = -1, \
+ .schmitt_bit = -1, \
+ .lpmd_bit = -1, \
+ .drvdn_bit = drvdn_b, \
+ .drvdn_width = drvdn_w, \
+ .drvup_bit = drvup_b, \
+ .drvup_width = drvup_w, \
+ .slwr_bit = slwr_b, \
+ .slwr_width = slwr_w, \
+ .slwf_bit = slwf_b, \
+ .slwf_width = slwf_w, \
+ .drvtype_bit = -1, \
+ }
+
+static const struct tegra_pingroup tegra210_groups[] = {
+ /* pg_name, f0, f1, f2, f3, r, hsm, drvtype, e_io_hv, rdrv, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w */
+ PINGROUP(sdmmc1_clk_pm0, SDMMC1, RSVD1, RSVD2, RSVD3, 0x3000, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc1_cmd_pm1, SDMMC1, SPI3, RSVD2, RSVD3, 0x3004, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc1_dat3_pm2, SDMMC1, SPI3, RSVD2, RSVD3, 0x3008, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc1_dat2_pm3, SDMMC1, SPI3, RSVD2, RSVD3, 0x300c, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc1_dat1_pm4, SDMMC1, SPI3, RSVD2, RSVD3, 0x3010, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc1_dat0_pm5, SDMMC1, RSVD1, RSVD2, RSVD3, 0x3014, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc3_clk_pp0, SDMMC3, RSVD1, RSVD2, RSVD3, 0x301c, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc3_cmd_pp1, SDMMC3, RSVD1, RSVD2, RSVD3, 0x3020, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc3_dat0_pp5, SDMMC3, RSVD1, RSVD2, RSVD3, 0x3024, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc3_dat1_pp4, SDMMC3, RSVD1, RSVD2, RSVD3, 0x3028, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc3_dat2_pp3, SDMMC3, RSVD1, RSVD2, RSVD3, 0x302c, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(sdmmc3_dat3_pp2, SDMMC3, RSVD1, RSVD2, RSVD3, 0x3030, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pex_l0_rst_n_pa0, PE0, RSVD1, RSVD2, RSVD3, 0x3038, N, N, Y, 0xa5c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(pex_l0_clkreq_n_pa1, PE0, RSVD1, RSVD2, RSVD3, 0x303c, N, N, Y, 0xa58, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(pex_wake_n_pa2, PE, RSVD1, RSVD2, RSVD3, 0x3040, N, N, Y, 0xa68, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(pex_l1_rst_n_pa3, PE1, RSVD1, RSVD2, RSVD3, 0x3044, N, N, Y, 0xa64, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(pex_l1_clkreq_n_pa4, PE1, RSVD1, RSVD2, RSVD3, 0x3048, N, N, Y, 0xa60, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(sata_led_active_pa5, SATA, RSVD1, RSVD2, RSVD3, 0x304c, N, N, N, 0xa94, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(spi1_mosi_pc0, SPI1, RSVD1, RSVD2, RSVD3, 0x3050, Y, Y, N, 0xae0, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi1_miso_pc1, SPI1, RSVD1, RSVD2, RSVD3, 0x3054, Y, Y, N, 0xadc, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi1_sck_pc2, SPI1, RSVD1, RSVD2, RSVD3, 0x3058, Y, Y, N, 0xae4, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi1_cs0_pc3, SPI1, RSVD1, RSVD2, RSVD3, 0x305c, Y, Y, N, 0xad4, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi1_cs1_pc4, SPI1, RSVD1, RSVD2, RSVD3, 0x3060, Y, Y, N, 0xad8, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi2_mosi_pb4, SPI2, DTV, RSVD2, RSVD3, 0x3064, Y, Y, N, 0xaf4, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi2_miso_pb5, SPI2, DTV, RSVD2, RSVD3, 0x3068, Y, Y, N, 0xaf0, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi2_sck_pb6, SPI2, DTV, RSVD2, RSVD3, 0x306c, Y, Y, N, 0xaf8, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi2_cs0_pb7, SPI2, DTV, RSVD2, RSVD3, 0x3070, Y, Y, N, 0xae8, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi2_cs1_pdd0, SPI2, RSVD1, RSVD2, RSVD3, 0x3074, Y, Y, N, 0xaec, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi4_mosi_pc7, SPI4, RSVD1, RSVD2, RSVD3, 0x3078, Y, Y, N, 0xb04, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi4_miso_pd0, SPI4, RSVD1, RSVD2, RSVD3, 0x307c, Y, Y, N, 0xb00, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi4_sck_pc5, SPI4, RSVD1, RSVD2, RSVD3, 0x3080, Y, Y, N, 0xb08, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(spi4_cs0_pc6, SPI4, RSVD1, RSVD2, RSVD3, 0x3084, Y, Y, N, 0xafc, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(qspi_sck_pee0, QSPI, RSVD1, RSVD2, RSVD3, 0x3088, Y, Y, N, 0xa90, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(qspi_cs_n_pee1, QSPI, RSVD1, RSVD2, RSVD3, 0x308c, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(qspi_io0_pee2, QSPI, RSVD1, RSVD2, RSVD3, 0x3090, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(qspi_io1_pee3, QSPI, RSVD1, RSVD2, RSVD3, 0x3094, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(qspi_io2_pee4, QSPI, RSVD1, RSVD2, RSVD3, 0x3098, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(qspi_io3_pee5, QSPI, RSVD1, RSVD2, RSVD3, 0x309c, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(dmic1_clk_pe0, DMIC1, I2S3, RSVD2, RSVD3, 0x30a4, N, N, N, 0x984, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dmic1_dat_pe1, DMIC1, I2S3, RSVD2, RSVD3, 0x30a8, N, N, N, 0x988, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dmic2_clk_pe2, DMIC2, I2S3, RSVD2, RSVD3, 0x30ac, N, N, N, 0x98c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dmic2_dat_pe3, DMIC2, I2S3, RSVD2, RSVD3, 0x30b0, N, N, N, 0x990, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dmic3_clk_pe4, DMIC3, I2S5A, RSVD2, RSVD3, 0x30b4, N, N, N, 0x994, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dmic3_dat_pe5, DMIC3, I2S5A, RSVD2, RSVD3, 0x30b8, N, N, N, 0x998, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gen1_i2c_scl_pj1, I2C1, RSVD1, RSVD2, RSVD3, 0x30bc, N, N, Y, 0x9a8, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gen1_i2c_sda_pj0, I2C1, RSVD1, RSVD2, RSVD3, 0x30c0, N, N, Y, 0x9ac, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gen2_i2c_scl_pj2, I2C2, RSVD1, RSVD2, RSVD3, 0x30c4, N, N, Y, 0x9b0, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gen2_i2c_sda_pj3, I2C2, RSVD1, RSVD2, RSVD3, 0x30c8, N, N, Y, 0x9b4, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gen3_i2c_scl_pf0, I2C3, RSVD1, RSVD2, RSVD3, 0x30cc, N, N, Y, 0x9b8, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gen3_i2c_sda_pf1, I2C3, RSVD1, RSVD2, RSVD3, 0x30d0, N, N, Y, 0x9bc, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam_i2c_scl_ps2, I2C3, I2CVI, RSVD2, RSVD3, 0x30d4, N, N, Y, 0x934, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam_i2c_sda_ps3, I2C3, I2CVI, RSVD2, RSVD3, 0x30d8, N, N, Y, 0x938, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(pwr_i2c_scl_py3, I2CPMU, RSVD1, RSVD2, RSVD3, 0x30dc, N, N, Y, 0xa6c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(pwr_i2c_sda_py4, I2CPMU, RSVD1, RSVD2, RSVD3, 0x30e0, N, N, Y, 0xa70, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart1_tx_pu0, UARTA, RSVD1, RSVD2, RSVD3, 0x30e4, N, N, N, 0xb28, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart1_rx_pu1, UARTA, RSVD1, RSVD2, RSVD3, 0x30e8, N, N, N, 0xb24, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart1_rts_pu2, UARTA, RSVD1, RSVD2, RSVD3, 0x30ec, N, N, N, 0xb20, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart1_cts_pu3, UARTA, RSVD1, RSVD2, RSVD3, 0x30f0, N, N, N, 0xb1c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart2_tx_pg0, UARTB, I2S4A, SPDIF, UART, 0x30f4, N, N, N, 0xb38, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart2_rx_pg1, UARTB, I2S4A, SPDIF, UART, 0x30f8, N, N, N, 0xb34, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart2_rts_pg2, UARTB, I2S4A, RSVD2, UART, 0x30fc, N, N, N, 0xb30, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart2_cts_pg3, UARTB, I2S4A, RSVD2, UART, 0x3100, N, N, N, 0xb2c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart3_tx_pd1, UARTC, SPI4, RSVD2, RSVD3, 0x3104, N, N, N, 0xb48, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart3_rx_pd2, UARTC, SPI4, RSVD2, RSVD3, 0x3108, N, N, N, 0xb44, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart3_rts_pd3, UARTC, SPI4, RSVD2, RSVD3, 0x310c, N, N, N, 0xb40, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart3_cts_pd4, UARTC, SPI4, RSVD2, RSVD3, 0x3110, N, N, N, 0xb3c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart4_tx_pi4, UARTD, UART, RSVD2, RSVD3, 0x3114, N, N, N, 0xb58, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart4_rx_pi5, UARTD, UART, RSVD2, RSVD3, 0x3118, N, N, N, 0xb54, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart4_rts_pi6, UARTD, UART, RSVD2, RSVD3, 0x311c, N, N, N, 0xb50, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(uart4_cts_pi7, UARTD, UART, RSVD2, RSVD3, 0x3120, N, N, N, 0xb4c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dap1_fs_pb0, I2S1, RSVD1, RSVD2, RSVD3, 0x3124, Y, Y, N, 0x95c, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(dap1_din_pb1, I2S1, RSVD1, RSVD2, RSVD3, 0x3128, Y, Y, N, 0x954, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(dap1_dout_pb2, I2S1, RSVD1, RSVD2, RSVD3, 0x312c, Y, Y, N, 0x958, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(dap1_sclk_pb3, I2S1, RSVD1, RSVD2, RSVD3, 0x3130, Y, Y, N, 0x960, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(dap2_fs_paa0, I2S2, RSVD1, RSVD2, RSVD3, 0x3134, Y, Y, N, 0x96c, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(dap2_din_paa2, I2S2, RSVD1, RSVD2, RSVD3, 0x3138, Y, Y, N, 0x964, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(dap2_dout_paa3, I2S2, RSVD1, RSVD2, RSVD3, 0x313c, Y, Y, N, 0x968, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(dap2_sclk_paa1, I2S2, RSVD1, RSVD2, RSVD3, 0x3140, Y, Y, N, 0x970, -1, -1, -1, -1, 28, 2, 30, 2),
+ PINGROUP(dap4_fs_pj4, I2S4B, RSVD1, RSVD2, RSVD3, 0x3144, N, N, N, 0x97c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dap4_din_pj5, I2S4B, RSVD1, RSVD2, RSVD3, 0x3148, N, N, N, 0x974, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dap4_dout_pj6, I2S4B, RSVD1, RSVD2, RSVD3, 0x314c, N, N, N, 0x978, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dap4_sclk_pj7, I2S4B, RSVD1, RSVD2, RSVD3, 0x3150, N, N, N, 0x980, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam1_mclk_ps0, EXTPERIPH3, RSVD1, RSVD2, RSVD3, 0x3154, N, N, N, 0x918, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam2_mclk_ps1, EXTPERIPH3, RSVD1, RSVD2, RSVD3, 0x3158, N, N, N, 0x924, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(jtag_rtck, JTAG, RSVD1, RSVD2, RSVD3, 0x315c, N, N, N, 0xa2c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(clk_32k_in, CLK, RSVD1, RSVD2, RSVD3, 0x3160, N, N, N, 0x940, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(clk_32k_out_py5, SOC, BLINK, RSVD2, RSVD3, 0x3164, N, N, N, 0x944, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(batt_bcl, BCL, RSVD1, RSVD2, RSVD3, 0x3168, N, N, Y, 0x8f8, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(clk_req, SYS, RSVD1, RSVD2, RSVD3, 0x316c, N, N, N, 0x948, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cpu_pwr_req, CPU, RSVD1, RSVD2, RSVD3, 0x3170, N, N, N, 0x950, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(pwr_int_n, PMI, RSVD1, RSVD2, RSVD3, 0x3174, N, N, N, 0xa74, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(shutdown, SHUTDOWN, RSVD1, RSVD2, RSVD3, 0x3178, N, N, N, 0xac8, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(core_pwr_req, CORE, RSVD1, RSVD2, RSVD3, 0x317c, N, N, N, 0x94c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(aud_mclk_pbb0, AUD, RSVD1, RSVD2, RSVD3, 0x3180, N, N, N, 0x8f4, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dvfs_pwm_pbb1, RSVD0, CLDVFS, SPI3, RSVD3, 0x3184, N, N, N, 0x9a4, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dvfs_clk_pbb2, RSVD0, CLDVFS, SPI3, RSVD3, 0x3188, N, N, N, 0x9a0, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gpio_x1_aud_pbb3, RSVD0, RSVD1, SPI3, RSVD3, 0x318c, N, N, N, 0xa14, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gpio_x3_aud_pbb4, RSVD0, RSVD1, SPI3, RSVD3, 0x3190, N, N, N, 0xa18, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(pcc7, RSVD0, RSVD1, RSVD2, RSVD3, 0x3194, N, N, Y, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(hdmi_cec_pcc0, CEC, RSVD1, RSVD2, RSVD3, 0x3198, N, N, Y, 0xa24, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(hdmi_int_dp_hpd_pcc1, DP, RSVD1, RSVD2, RSVD3, 0x319c, N, N, Y, 0xa28, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(spdif_out_pcc2, SPDIF, RSVD1, RSVD2, RSVD3, 0x31a0, N, N, N, 0xad0, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(spdif_in_pcc3, SPDIF, RSVD1, RSVD2, RSVD3, 0x31a4, N, N, N, 0xacc, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(usb_vbus_en0_pcc4, USB, RSVD1, RSVD2, RSVD3, 0x31a8, N, N, Y, 0xb5c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(usb_vbus_en1_pcc5, USB, RSVD1, RSVD2, RSVD3, 0x31ac, N, N, Y, 0xb60, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(dp_hpd0_pcc6, DP, RSVD1, RSVD2, RSVD3, 0x31b0, N, N, N, 0x99c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(wifi_en_ph0, RSVD0, RSVD1, RSVD2, RSVD3, 0x31b4, N, N, N, 0xb64, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(wifi_rst_ph1, RSVD0, RSVD1, RSVD2, RSVD3, 0x31b8, N, N, N, 0xb68, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(wifi_wake_ap_ph2, RSVD0, RSVD1, RSVD2, RSVD3, 0x31bc, N, N, N, 0xb6c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(ap_wake_bt_ph3, RSVD0, UARTB, SPDIF, RSVD3, 0x31c0, N, N, N, 0x8ec, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(bt_rst_ph4, RSVD0, UARTB, SPDIF, RSVD3, 0x31c4, N, N, N, 0x8fc, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(bt_wake_ap_ph5, RSVD0, RSVD1, RSVD2, RSVD3, 0x31c8, N, N, N, 0x900, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(ap_wake_nfc_ph7, RSVD0, RSVD1, RSVD2, RSVD3, 0x31cc, N, N, N, 0x8f0, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(nfc_en_pi0, RSVD0, RSVD1, RSVD2, RSVD3, 0x31d0, N, N, N, 0xa50, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(nfc_int_pi1, RSVD0, RSVD1, RSVD2, RSVD3, 0x31d4, N, N, N, 0xa54, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gps_en_pi2, RSVD0, RSVD1, RSVD2, RSVD3, 0x31d8, N, N, N, 0xa1c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(gps_rst_pi3, RSVD0, RSVD1, RSVD2, RSVD3, 0x31dc, N, N, N, 0xa20, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam_rst_ps4, VGP1, RSVD1, RSVD2, RSVD3, 0x31e0, N, N, N, 0x93c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam_af_en_ps5, VIMCLK, VGP2, RSVD2, RSVD3, 0x31e4, N, N, N, 0x92c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam_flash_en_ps6, VIMCLK, VGP3, RSVD2, RSVD3, 0x31e8, N, N, N, 0x930, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam1_pwdn_ps7, VGP4, RSVD1, RSVD2, RSVD3, 0x31ec, N, N, N, 0x91c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam2_pwdn_pt0, VGP5, RSVD1, RSVD2, RSVD3, 0x31f0, N, N, N, 0x928, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(cam1_strobe_pt1, VGP6, RSVD1, RSVD2, RSVD3, 0x31f4, N, N, N, 0x920, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(lcd_te_py2, DISPLAYA, RSVD1, RSVD2, RSVD3, 0x31f8, N, N, N, 0xa44, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(lcd_bl_pwm_pv0, DISPLAYA, PWM0, SOR0, RSVD3, 0x31fc, N, N, N, 0xa34, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(lcd_bl_en_pv1, RSVD0, RSVD1, RSVD2, RSVD3, 0x3200, N, N, N, 0xa30, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(lcd_rst_pv2, RSVD0, RSVD1, RSVD2, RSVD3, 0x3204, N, N, N, 0xa40, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(lcd_gpio1_pv3, DISPLAYB, RSVD1, RSVD2, RSVD3, 0x3208, N, N, N, 0xa38, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(lcd_gpio2_pv4, DISPLAYB, PWM1, RSVD2, SOR1, 0x320c, N, N, N, 0xa3c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(ap_ready_pv5, RSVD0, RSVD1, RSVD2, RSVD3, 0x3210, N, N, N, 0x8e8, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(touch_rst_pv6, RSVD0, RSVD1, RSVD2, RSVD3, 0x3214, N, N, N, 0xb18, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(touch_clk_pv7, TOUCH, RSVD1, RSVD2, RSVD3, 0x3218, N, N, N, 0xb10, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(modem_wake_ap_px0, RSVD0, RSVD1, RSVD2, RSVD3, 0x321c, N, N, N, 0xa48, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(touch_int_px1, RSVD0, RSVD1, RSVD2, RSVD3, 0x3220, N, N, N, 0xb14, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(motion_int_px2, RSVD0, RSVD1, RSVD2, RSVD3, 0x3224, N, N, N, 0xa4c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(als_prox_int_px3, RSVD0, RSVD1, RSVD2, RSVD3, 0x3228, N, N, N, 0x8e4, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(temp_alert_px4, RSVD0, RSVD1, RSVD2, RSVD3, 0x322c, N, N, N, 0xb0c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(button_power_on_px5, RSVD0, RSVD1, RSVD2, RSVD3, 0x3230, N, N, N, 0x908, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(button_vol_up_px6, RSVD0, RSVD1, RSVD2, RSVD3, 0x3234, N, N, N, 0x914, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(button_vol_down_px7, RSVD0, RSVD1, RSVD2, RSVD3, 0x3238, N, N, N, 0x910, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(button_slide_sw_py0, RSVD0, RSVD1, RSVD2, RSVD3, 0x323c, N, N, N, 0x90c, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(button_home_py1, RSVD0, RSVD1, RSVD2, RSVD3, 0x3240, N, N, N, 0x904, 12, 5, 20, 5, -1, -1, -1, -1),
+ PINGROUP(pa6, SATA, RSVD1, RSVD2, RSVD3, 0x3244, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pe6, RSVD0, I2S5A, PWM2, RSVD3, 0x3248, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pe7, RSVD0, I2S5A, PWM3, RSVD3, 0x324c, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(ph6, RSVD0, RSVD1, RSVD2, RSVD3, 0x3250, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pk0, IQC0, I2S5B, RSVD2, RSVD3, 0x3254, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pk1, IQC0, I2S5B, RSVD2, RSVD3, 0x3258, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pk2, IQC0, I2S5B, RSVD2, RSVD3, 0x325c, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pk3, IQC0, I2S5B, RSVD2, RSVD3, 0x3260, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pk4, IQC1, RSVD1, RSVD2, RSVD3, 0x3264, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pk5, IQC1, RSVD1, RSVD2, RSVD3, 0x3268, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pk6, IQC1, RSVD1, RSVD2, RSVD3, 0x326c, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pk7, IQC1, RSVD1, RSVD2, RSVD3, 0x3270, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pl0, RSVD0, RSVD1, RSVD2, RSVD3, 0x3274, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pl1, SOC, RSVD1, RSVD2, RSVD3, 0x3278, Y, Y, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pz0, VIMCLK2, RSVD1, RSVD2, RSVD3, 0x327c, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pz1, VIMCLK2, SDMMC1, RSVD2, RSVD3, 0x3280, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pz2, SDMMC3, CCLA, RSVD2, RSVD3, 0x3284, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pz3, SDMMC3, RSVD1, RSVD2, RSVD3, 0x3288, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pz4, SDMMC1, RSVD1, RSVD2, RSVD3, 0x328c, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ PINGROUP(pz5, SOC, RSVD1, RSVD2, RSVD3, 0x3290, N, N, N, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+
+ /* pg_name, r, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w */
+ DRV_PINGROUP(pa6, 0x9c0, 12, 5, 20, 5, -1, -1, -1, -1),
+ DRV_PINGROUP(pcc7, 0x9c4, 12, 5, 20, 5, -1, -1, -1, -1),
+ DRV_PINGROUP(pe6, 0x9c8, 12, 5, 20, 5, -1, -1, -1, -1),
+ DRV_PINGROUP(pe7, 0x9cc, 12, 5, 20, 5, -1, -1, -1, -1),
+ DRV_PINGROUP(ph6, 0x9d0, 12, 5, 20, 5, -1, -1, -1, -1),
+ DRV_PINGROUP(pk0, 0x9d4, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pk1, 0x9d8, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pk2, 0x9dc, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pk3, 0x9e0, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pk4, 0x9e4, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pk5, 0x9e8, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pk6, 0x9ec, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pk7, 0x9f0, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pl0, 0x9f4, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pl1, 0x9f8, -1, -1, -1, -1, 28, 2, 30, 2),
+ DRV_PINGROUP(pz0, 0x9fc, 12, 7, 20, 7, -1, -1, -1, -1),
+ DRV_PINGROUP(pz1, 0xa00, 12, 7, 20, 7, -1, -1, -1, -1),
+ DRV_PINGROUP(pz2, 0xa04, 12, 7, 20, 7, -1, -1, -1, -1),
+ DRV_PINGROUP(pz3, 0xa08, 12, 7, 20, 7, -1, -1, -1, -1),
+ DRV_PINGROUP(pz4, 0xa0c, 12, 7, 20, 7, -1, -1, -1, -1),
+ DRV_PINGROUP(pz5, 0xa10, 12, 7, 20, 7, -1, -1, -1, -1),
+ DRV_PINGROUP(sdmmc1, 0xa98, 12, 7, 20, 7, 28, 2, 30, 2),
+ DRV_PINGROUP(sdmmc2, 0xa9c, 2, 6, 8, 6, 28, 2, 30, 2),
+ DRV_PINGROUP(sdmmc3, 0xab0, 12, 7, 20, 7, 28, 2, 30, 2),
+ DRV_PINGROUP(sdmmc4, 0xab4, 2, 6, 8, 6, 28, 2, 30, 2),
+};
+
+static const struct tegra_pinctrl_soc_data tegra210_pinctrl = {
+ .ngpios = NUM_GPIOS,
+ .pins = tegra210_pins,
+ .npins = ARRAY_SIZE(tegra210_pins),
+ .functions = tegra210_functions,
+ .nfunctions = ARRAY_SIZE(tegra210_functions),
+ .groups = tegra210_groups,
+ .ngroups = ARRAY_SIZE(tegra210_groups),
+ .hsm_in_mux = true,
+ .schmitt_in_mux = true,
+ .drvtype_in_mux = true,
+};
+
+static int tegra210_pinctrl_probe(struct platform_device *pdev)
+{
+ return tegra_pinctrl_probe(pdev, &tegra210_pinctrl);
+}
+
+static const struct of_device_id tegra210_pinctrl_of_match[] = {
+ { .compatible = "nvidia,tegra210-pinmux", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra210_pinctrl_of_match);
+
+static struct platform_driver tegra210_pinctrl_driver = {
+ .driver = {
+ .name = "tegra210-pinctrl",
+ .of_match_table = tegra210_pinctrl_of_match,
+ },
+ .probe = tegra210_pinctrl_probe,
+ .remove = tegra_pinctrl_remove,
+};
+module_platform_driver(tegra210_pinctrl_driver);
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("NVIDIA Tegra210 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index f6edc2ff5494..47b2fd8bb2e9 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -2108,70 +2108,69 @@ static struct tegra_function tegra30_functions[] = {
#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
#define PINGROUP_REG_A 0x3000 /* bank 1 */
+#define DRV_PINGROUP_REG(r) ((r) - DRV_PINGROUP_REG_A)
#define PINGROUP_REG(r) ((r) - PINGROUP_REG_A)
#define PINGROUP_BIT_Y(b) (b)
#define PINGROUP_BIT_N(b) (-1)
-#define PINGROUP(pg_name, f0, f1, f2, f3, r, od, ior) \
- { \
- .name = #pg_name, \
- .pins = pg_name##_pins, \
- .npins = ARRAY_SIZE(pg_name##_pins), \
- .funcs = { \
- TEGRA_MUX_##f0, \
- TEGRA_MUX_##f1, \
- TEGRA_MUX_##f2, \
- TEGRA_MUX_##f3, \
- }, \
- .mux_reg = PINGROUP_REG(r), \
- .mux_bank = 1, \
- .mux_bit = 0, \
- .pupd_reg = PINGROUP_REG(r), \
- .pupd_bank = 1, \
- .pupd_bit = 2, \
- .tri_reg = PINGROUP_REG(r), \
- .tri_bank = 1, \
- .tri_bit = 4, \
- .einput_bit = PINGROUP_BIT_Y(5), \
- .odrain_bit = PINGROUP_BIT_##od(6), \
- .lock_bit = PINGROUP_BIT_Y(7), \
- .ioreset_bit = PINGROUP_BIT_##ior(8), \
- .rcv_sel_bit = -1, \
- .drv_reg = -1, \
+#define PINGROUP(pg_name, f0, f1, f2, f3, r, od, ior) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .funcs = { \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
+ }, \
+ .mux_reg = PINGROUP_REG(r), \
+ .mux_bank = 1, \
+ .mux_bit = 0, \
+ .pupd_reg = PINGROUP_REG(r), \
+ .pupd_bank = 1, \
+ .pupd_bit = 2, \
+ .tri_reg = PINGROUP_REG(r), \
+ .tri_bank = 1, \
+ .tri_bit = 4, \
+ .einput_bit = 5, \
+ .odrain_bit = PINGROUP_BIT_##od(6), \
+ .lock_bit = 7, \
+ .ioreset_bit = PINGROUP_BIT_##ior(8), \
+ .rcv_sel_bit = -1, \
+ .drv_reg = -1, \
}
-#define DRV_PINGROUP_REG(r) ((r) - DRV_PINGROUP_REG_A)
-
-#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
- drvdn_b, drvdn_w, drvup_b, drvup_w, \
- slwr_b, slwr_w, slwf_b, slwf_w) \
- { \
- .name = "drive_" #pg_name, \
- .pins = drive_##pg_name##_pins, \
- .npins = ARRAY_SIZE(drive_##pg_name##_pins), \
- .mux_reg = -1, \
- .pupd_reg = -1, \
- .tri_reg = -1, \
- .einput_bit = -1, \
- .odrain_bit = -1, \
- .lock_bit = -1, \
- .ioreset_bit = -1, \
- .rcv_sel_bit = -1, \
- .drv_reg = DRV_PINGROUP_REG(r), \
- .drv_bank = 0, \
- .hsm_bit = hsm_b, \
- .schmitt_bit = schmitt_b, \
- .lpmd_bit = lpmd_b, \
- .drvdn_bit = drvdn_b, \
- .drvdn_width = drvdn_w, \
- .drvup_bit = drvup_b, \
- .drvup_width = drvup_w, \
- .slwr_bit = slwr_b, \
- .slwr_width = slwr_w, \
- .slwf_bit = slwf_b, \
- .slwf_width = slwf_w, \
- .drvtype_bit = -1, \
+#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, \
+ drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, \
+ slwf_b, slwf_w) \
+ { \
+ .name = "drive_" #pg_name, \
+ .pins = drive_##pg_name##_pins, \
+ .npins = ARRAY_SIZE(drive_##pg_name##_pins), \
+ .mux_reg = -1, \
+ .pupd_reg = -1, \
+ .tri_reg = -1, \
+ .einput_bit = -1, \
+ .odrain_bit = -1, \
+ .lock_bit = -1, \
+ .ioreset_bit = -1, \
+ .rcv_sel_bit = -1, \
+ .drv_reg = DRV_PINGROUP_REG(r), \
+ .drv_bank = 0, \
+ .hsm_bit = hsm_b, \
+ .schmitt_bit = schmitt_b, \
+ .lpmd_bit = lpmd_b, \
+ .drvdn_bit = drvdn_b, \
+ .drvdn_width = drvdn_w, \
+ .drvup_bit = drvup_b, \
+ .drvup_width = drvup_w, \
+ .slwr_bit = slwr_b, \
+ .slwr_width = slwr_w, \
+ .slwf_bit = slwf_b, \
+ .slwf_width = slwf_w, \
+ .drvtype_bit = -1, \
}
static const struct tegra_pingroup tegra30_groups[] = {
@@ -2477,6 +2476,9 @@ static const struct tegra_pinctrl_soc_data tegra30_pinctrl = {
.nfunctions = ARRAY_SIZE(tegra30_functions),
.groups = tegra30_groups,
.ngroups = ARRAY_SIZE(tegra30_groups),
+ .hsm_in_mux = false,
+ .schmitt_in_mux = false,
+ .drvtype_in_mux = false,
};
static int tegra30_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
index fab6aafa6a9f..8a8911bb883a 100644
--- a/drivers/pinctrl/pinctrl-tz1090-pdc.c
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -969,7 +969,7 @@ static int tz1090_pdc_pinctrl_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tz1090_pdc_pinctrl_of_match[] = {
+static const struct of_device_id tz1090_pdc_pinctrl_of_match[] = {
{ .compatible = "img,tz1090-pdc-pinctrl", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
index 8bd73075f9dd..fc5594a530c2 100644
--- a/drivers/pinctrl/pinctrl-tz1090.c
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -1984,7 +1984,7 @@ static int tz1090_pinctrl_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tz1090_pinctrl_of_match[] = {
+static const struct of_device_id tz1090_pinctrl_of_match[] = {
{ .compatible = "img,tz1090-pinctrl", },
{ },
};
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index a535f9c23678..f3d800f796c2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -193,11 +193,11 @@ static int msm_config_reg(struct msm_pinctrl *pctrl,
*mask = 7;
break;
case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_INPUT_ENABLE:
*bit = g->oe_bit;
*mask = 1;
break;
default:
- dev_err(pctrl->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
}
@@ -261,10 +261,14 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
val = readl(pctrl->regs + g->io_reg);
arg = !!(val & BIT(g->in_bit));
break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* Pin is output */
+ if (arg)
+ return -EINVAL;
+ arg = 1;
+ break;
default:
- dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
- param);
- return -EINVAL;
+ return -ENOTSUPP;
}
*config = pinconf_to_config_packed(param, arg);
@@ -333,6 +337,10 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
/* enable output */
arg = 1;
break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* disable output */
+ arg = 0;
+ break;
default:
dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
param);
@@ -357,6 +365,7 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
}
static const struct pinconf_ops msm_pinconf_ops = {
+ .is_generic = true,
.pin_config_group_get = msm_config_group_get,
.pin_config_group_set = msm_config_group_set,
};
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 0f11a26d932b..ae4115e4b4ef 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -260,6 +260,7 @@ static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function,
val = 1;
}
+ val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
@@ -417,7 +418,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT;
- val = pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
+ val |= pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val);
if (ret < 0)
@@ -466,12 +467,13 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
seq_puts(s, " ---");
} else {
- if (!pad->input_enabled) {
+ if (pad->input_enabled) {
ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS);
- if (!ret) {
- ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
- pad->out_value = ret;
- }
+ if (ret < 0)
+ return;
+
+ ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
+ pad->out_value = ret;
}
seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
@@ -810,6 +812,7 @@ static int pmic_gpio_remove(struct platform_device *pdev)
}
static const struct of_device_id pmic_gpio_of_match[] = {
+ { .compatible = "qcom,pm8916-gpio" }, /* 4 GPIO's */
{ .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */
{ .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */
{ },
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index a8924dba335e..211b942ad6d5 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -370,6 +370,7 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
}
}
+ val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
@@ -576,10 +577,11 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
if (pad->input_enabled) {
ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
- if (!ret) {
- ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
- pad->out_value = ret;
- }
+ if (ret < 0)
+ return;
+
+ ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
+ pad->out_value = ret;
}
seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
@@ -925,6 +927,7 @@ static int pmic_mpp_remove(struct platform_device *pdev)
static const struct of_device_id pmic_mpp_of_match[] = {
{ .compatible = "qcom,pm8841-mpp" }, /* 4 MPP's */
+ { .compatible = "qcom,pm8916-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */
{ },
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index c8f83f96546c..0b7afa50121a 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -1240,6 +1240,159 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
},
};
+/* pin banks of exynos5433 pin-controller - ALIVE */
+static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = {
+ EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+};
+
+/* pin banks of exynos5433 pin-controller - AUD */
+static const struct samsung_pin_bank_data exynos5433_pin_banks1[] = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+};
+
+/* pin banks of exynos5433 pin-controller - CPIF */
+static const struct samsung_pin_bank_data exynos5433_pin_banks2[] = {
+ EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - eSE */
+static const struct samsung_pin_bank_data exynos5433_pin_banks3[] = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - FINGER */
+static const struct samsung_pin_bank_data exynos5433_pin_banks4[] = {
+ EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - FSYS */
+static const struct samsung_pin_bank_data exynos5433_pin_banks5[] = {
+ EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
+ EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
+ EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
+};
+
+/* pin banks of exynos5433 pin-controller - IMEM */
+static const struct samsung_pin_bank_data exynos5433_pin_banks6[] = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - NFC */
+static const struct samsung_pin_bank_data exynos5433_pin_banks7[] = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - PERIC */
+static const struct samsung_pin_bank_data exynos5433_pin_banks8[] = {
+ EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
+ EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
+ EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
+ EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
+ EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
+ EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
+ EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
+ EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
+};
+
+/* pin banks of exynos5433 pin-controller - TOUCH */
+static const struct samsung_pin_bank_data exynos5433_pin_banks9[] = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes
+ * ten gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos5433_pin_ctrl[] = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos5433_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks0),
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos5433_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos5433_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 3 data */
+ .pin_banks = exynos5433_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 4 data */
+ .pin_banks = exynos5433_pin_banks4,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks4),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 5 data */
+ .pin_banks = exynos5433_pin_banks5,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks5),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 6 data */
+ .pin_banks = exynos5433_pin_banks6,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks6),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 7 data */
+ .pin_banks = exynos5433_pin_banks7,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks7),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 8 data */
+ .pin_banks = exynos5433_pin_banks8,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks8),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 9 data */
+ .pin_banks = exynos5433_pin_banks9,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks9),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ },
+};
+
/* pin banks of exynos7 pin-controller - ALIVE */
static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
@@ -1324,7 +1477,6 @@ const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
/* pin-controller instance 0 Alive data */
.pin_banks = exynos7_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos7_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
}, {
/* pin-controller instance 1 BUS0 data */
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index ec580af35856..ed165ba2eb2f 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1239,6 +1239,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = (void *)exynos5260_pin_ctrl },
{ .compatible = "samsung,exynos5420-pinctrl",
.data = (void *)exynos5420_pin_ctrl },
+ { .compatible = "samsung,exynos5433-pinctrl",
+ .data = (void *)exynos5433_pin_ctrl },
{ .compatible = "samsung,s5pv210-pinctrl",
.data = (void *)s5pv210_pin_ctrl },
{ .compatible = "samsung,exynos7-pinctrl",
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 1b8c0139d604..c1239ff6157d 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -271,6 +271,7 @@ extern const struct samsung_pin_ctrl exynos4415_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[];
+extern const struct samsung_pin_ctrl exynos5433_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos7_pin_ctrl[];
extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[];
extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[];
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index a56280814a3f..7b2c9495c383 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -92,10 +92,10 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc,
return 0;
}
-static void __iomem *sh_pfc_phys_to_virt(struct sh_pfc *pfc,
- unsigned long address)
+static void __iomem *sh_pfc_phys_to_virt(struct sh_pfc *pfc, u32 reg)
{
struct sh_pfc_window *window;
+ phys_addr_t address = reg;
unsigned int i;
/* scan through physical windows and convert address */
@@ -144,8 +144,7 @@ static int sh_pfc_enum_in_range(u16 enum_id, const struct pinmux_range *r)
return 1;
}
-unsigned long sh_pfc_read_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width)
+u32 sh_pfc_read_raw_reg(void __iomem *mapped_reg, unsigned int reg_width)
{
switch (reg_width) {
case 8:
@@ -160,8 +159,8 @@ unsigned long sh_pfc_read_raw_reg(void __iomem *mapped_reg,
return 0;
}
-void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned long reg_width,
- unsigned long data)
+void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
+ u32 data)
{
switch (reg_width) {
case 8:
@@ -180,10 +179,9 @@ void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned long reg_width,
static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
const struct pinmux_cfg_reg *crp,
- unsigned long in_pos,
- void __iomem **mapped_regp,
- unsigned long *maskp,
- unsigned long *posp)
+ unsigned int in_pos,
+ void __iomem **mapped_regp, u32 *maskp,
+ unsigned int *posp)
{
unsigned int k;
@@ -202,15 +200,16 @@ static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
static void sh_pfc_write_config_reg(struct sh_pfc *pfc,
const struct pinmux_cfg_reg *crp,
- unsigned long field, unsigned long value)
+ unsigned int field, u32 value)
{
void __iomem *mapped_reg;
- unsigned long mask, pos, data;
+ unsigned int pos;
+ u32 mask, data;
sh_pfc_config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
- dev_dbg(pfc->dev, "write_reg addr = %lx, value = %ld, field = %ld, "
- "r_width = %ld, f_width = %ld\n",
+ dev_dbg(pfc->dev, "write_reg addr = %x, value = 0x%x, field = %u, "
+ "r_width = %u, f_width = %u\n",
crp->reg, value, field, crp->reg_width, crp->field_width);
mask = ~(mask << pos);
@@ -229,26 +228,28 @@ static void sh_pfc_write_config_reg(struct sh_pfc *pfc,
}
static int sh_pfc_get_config_reg(struct sh_pfc *pfc, u16 enum_id,
- const struct pinmux_cfg_reg **crp, int *fieldp,
- int *valuep)
+ const struct pinmux_cfg_reg **crp,
+ unsigned int *fieldp, u32 *valuep)
{
- const struct pinmux_cfg_reg *config_reg;
- unsigned long r_width, f_width, curr_width, ncomb;
- unsigned int k, m, n, pos, bit_pos;
+ unsigned int k = 0;
- k = 0;
while (1) {
- config_reg = pfc->info->cfg_regs + k;
-
- r_width = config_reg->reg_width;
- f_width = config_reg->field_width;
+ const struct pinmux_cfg_reg *config_reg =
+ pfc->info->cfg_regs + k;
+ unsigned int r_width = config_reg->reg_width;
+ unsigned int f_width = config_reg->field_width;
+ unsigned int curr_width;
+ unsigned int bit_pos;
+ unsigned int pos = 0;
+ unsigned int m = 0;
if (!r_width)
break;
- pos = 0;
- m = 0;
for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width) {
+ u32 ncomb;
+ u32 n;
+
if (f_width)
curr_width = f_width;
else
@@ -297,11 +298,8 @@ static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, u16 mark, int pos,
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
{
- const struct pinmux_cfg_reg *cr = NULL;
- u16 enum_id;
const struct pinmux_range *range;
- int in_range, pos, field, value;
- int ret;
+ int pos = 0;
switch (pinmux_type) {
case PINMUX_TYPE_GPIO:
@@ -321,13 +319,15 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
return -EINVAL;
}
- pos = 0;
- enum_id = 0;
- field = 0;
- value = 0;
-
/* Iterate over all the configuration fields we need to update. */
while (1) {
+ const struct pinmux_cfg_reg *cr;
+ unsigned int field;
+ u16 enum_id;
+ u32 value;
+ int in_range;
+ int ret;
+
pos = sh_pfc_mark_to_enum(pfc, mark, pos, &enum_id);
if (pos < 0)
return pos;
@@ -579,9 +579,6 @@ static int sh_pfc_remove(struct platform_device *pdev)
}
static const struct platform_device_id sh_pfc_id_table[] = {
-#ifdef CONFIG_PINCTRL_PFC_EMEV2
- { "pfc-emev2", (kernel_ulong_t)&emev2_pinmux_info },
-#endif
#ifdef CONFIG_PINCTRL_PFC_R8A73A4
{ "pfc-r8a73a4", (kernel_ulong_t)&r8a73a4_pinmux_info },
#endif
@@ -594,12 +591,6 @@ static const struct platform_device_id sh_pfc_id_table[] = {
#ifdef CONFIG_PINCTRL_PFC_R8A7779
{ "pfc-r8a7779", (kernel_ulong_t)&r8a7779_pinmux_info },
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7790
- { "pfc-r8a7790", (kernel_ulong_t)&r8a7790_pinmux_info },
-#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7791
- { "pfc-r8a7791", (kernel_ulong_t)&r8a7791_pinmux_info },
-#endif
#ifdef CONFIG_PINCTRL_PFC_SH7203
{ "pfc-sh7203", (kernel_ulong_t)&sh7203_pinmux_info },
#endif
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 6b59d63b9c01..6dc8a6fc2746 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -57,10 +57,9 @@ int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc);
int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc);
-unsigned long sh_pfc_read_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width);
-void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned long reg_width,
- unsigned long data);
+u32 sh_pfc_read_raw_reg(void __iomem *mapped_reg, unsigned int reg_width);
+void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
+ u32 data);
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 80f641ee4dea..ba353735ecf2 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -21,7 +21,7 @@
struct sh_pfc_gpio_data_reg {
const struct pinmux_data_reg *info;
- unsigned long shadow;
+ u32 shadow;
};
struct sh_pfc_gpio_pin {
@@ -59,19 +59,20 @@ static void gpio_get_data_reg(struct sh_pfc_chip *chip, unsigned int offset,
*bit = gpio_pin->dbit;
}
-static unsigned long gpio_read_data_reg(struct sh_pfc_chip *chip,
- const struct pinmux_data_reg *dreg)
+static u32 gpio_read_data_reg(struct sh_pfc_chip *chip,
+ const struct pinmux_data_reg *dreg)
{
- void __iomem *mem = dreg->reg - chip->mem->phys + chip->mem->virt;
+ phys_addr_t address = dreg->reg;
+ void __iomem *mem = address - chip->mem->phys + chip->mem->virt;
return sh_pfc_read_raw_reg(mem, dreg->reg_width);
}
static void gpio_write_data_reg(struct sh_pfc_chip *chip,
- const struct pinmux_data_reg *dreg,
- unsigned long value)
+ const struct pinmux_data_reg *dreg, u32 value)
{
- void __iomem *mem = dreg->reg - chip->mem->phys + chip->mem->virt;
+ phys_addr_t address = dreg->reg;
+ void __iomem *mem = address - chip->mem->phys + chip->mem->virt;
sh_pfc_write_raw_reg(mem, dreg->reg_width, value);
}
@@ -85,7 +86,7 @@ static void gpio_setup_data_reg(struct sh_pfc_chip *chip, unsigned idx)
unsigned int bit;
unsigned int i;
- for (i = 0, dreg = pfc->info->data_regs; dreg->reg; ++i, ++dreg) {
+ for (i = 0, dreg = pfc->info->data_regs; dreg->reg_width; ++i, ++dreg) {
for (bit = 0; bit < dreg->reg_width; bit++) {
if (dreg->enum_ids[bit] == pin->enum_id) {
gpio_pin->dreg = i;
@@ -154,17 +155,17 @@ static void gpio_pin_set_value(struct sh_pfc_chip *chip, unsigned offset,
int value)
{
struct sh_pfc_gpio_data_reg *reg;
- unsigned long pos;
unsigned int bit;
+ unsigned int pos;
gpio_get_data_reg(chip, offset, &reg, &bit);
pos = reg->info->reg_width - (bit + 1);
if (value)
- set_bit(pos, &reg->shadow);
+ reg->shadow |= BIT(pos);
else
- clear_bit(pos, &reg->shadow);
+ reg->shadow &= ~BIT(pos);
gpio_write_data_reg(chip, reg->info, reg->shadow);
}
@@ -186,8 +187,8 @@ static int gpio_pin_get(struct gpio_chip *gc, unsigned offset)
{
struct sh_pfc_chip *chip = gpio_to_pfc_chip(gc);
struct sh_pfc_gpio_data_reg *reg;
- unsigned long pos;
unsigned int bit;
+ unsigned int pos;
gpio_get_data_reg(chip, offset, &reg, &bit);
@@ -341,6 +342,7 @@ sh_pfc_add_gpiochip(struct sh_pfc *pfc, int(*setup)(struct sh_pfc_chip *),
int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
{
struct sh_pfc_chip *chip;
+ phys_addr_t address;
unsigned int i;
int ret;
@@ -352,11 +354,12 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
* that covers the data registers. In that case don't try to handle
* GPIOs.
*/
+ address = pfc->info->data_regs[0].reg;
for (i = 0; i < pfc->num_windows; ++i) {
struct sh_pfc_window *window = &pfc->windows[i];
- if (pfc->info->data_regs[0].reg >= window->phys &&
- pfc->info->data_regs[0].reg < window->phys + window->size)
+ if (address >= window->phys &&
+ address < window->phys + window->size)
break;
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 80c1843bb6ad..22a5470889f5 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -1799,6 +1799,81 @@ static const unsigned int audio_clkout_d_pins[] = {
static const unsigned int audio_clkout_d_mux[] = {
AUDIO_CLKOUT_D_MARK,
};
+/* - AVB -------------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int avb_link_mux[] = {
+ AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int avb_magic_mux[] = {
+ AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int avb_phy_int_mux[] = {
+ AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdio_pins[] = {
+ RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
+};
+static const unsigned int avb_mdio_mux[] = {
+ AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_pins[] = {
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 11),
+
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+ RCAR_GP_PIN(2, 2),
+
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 10),
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int avb_mii_mux[] = {
+ AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK,
+ AVB_TXD3_MARK,
+
+ AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+ AVB_RXD3_MARK,
+
+ AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK,
+ AVB_CRS_MARK, AVB_TX_EN_MARK, AVB_TX_CLK_MARK,
+ AVB_COL_MARK,
+};
+static const unsigned int avb_gmii_pins[] = {
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 4),
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 6),
+
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 13), RCAR_GP_PIN(2, 16),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 10),
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int avb_gmii_mux[] = {
+ AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK,
+ AVB_TXD3_MARK, AVB_TXD4_MARK, AVB_TXD5_MARK,
+ AVB_TXD6_MARK, AVB_TXD7_MARK,
+
+ AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+ AVB_RXD3_MARK, AVB_RXD4_MARK, AVB_RXD5_MARK,
+ AVB_RXD6_MARK, AVB_RXD7_MARK,
+
+ AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK,
+ AVB_CRS_MARK, AVB_GTX_CLK_MARK, AVB_GTXREFCLK_MARK,
+ AVB_TX_EN_MARK, AVB_TX_ER_MARK, AVB_TX_CLK_MARK,
+ AVB_COL_MARK,
+};
/* - DU RGB ----------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
/* R[7:2], G[7:2], B[7:2] */
@@ -3823,6 +3898,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(audio_clkout_b),
SH_PFC_PIN_GROUP(audio_clkout_c),
SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_gmii),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
@@ -4101,6 +4182,15 @@ static const char * const audio_clk_groups[] = {
"audio_clkout_d",
};
+static const char * const avb_groups[] = {
+ "avb_link",
+ "avb_magic",
+ "avb_phy_int",
+ "avb_mdio",
+ "avb_mii",
+ "avb_gmii",
+};
+
static const char * const du_groups[] = {
"du_rgb666",
"du_rgb888",
@@ -4507,6 +4597,7 @@ static const char * const vin3_groups[] = {
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(du),
SH_PFC_FUNCTION(du0),
SH_PFC_FUNCTION(du1),
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index c83728626906..c7508d5f6886 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -69,9 +69,10 @@ struct pinmux_func {
};
struct pinmux_cfg_reg {
- unsigned long reg, reg_width, field_width;
+ u32 reg;
+ u8 reg_width, field_width;
const u16 *enum_ids;
- const unsigned long *var_field_width;
+ const u8 *var_field_width;
};
#define PINMUX_CFG_REG(name, r, r_width, f_width) \
@@ -80,12 +81,13 @@ struct pinmux_cfg_reg {
#define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \
.reg = r, .reg_width = r_width, \
- .var_field_width = (const unsigned long [r_width]) \
+ .var_field_width = (const u8 [r_width]) \
{ var_fw0, var_fwn, 0 }, \
.enum_ids = (const u16 [])
struct pinmux_data_reg {
- unsigned long reg, reg_width;
+ u32 reg;
+ u8 reg_width;
const u16 *enum_ids;
};
@@ -148,7 +150,7 @@ struct sh_pfc_soc_info {
const struct pinmux_irq *gpio_irq;
unsigned int gpio_irq_size;
- unsigned long unlock_reg;
+ u32 unlock_reg;
};
/* -----------------------------------------------------------------------------
@@ -302,20 +304,21 @@ struct sh_pfc_soc_info {
/*
* PORTnCR macro
*/
-#define _PCRH(in, in_pd, in_pu, out) \
- 0, (out), (in), 0, \
- 0, 0, 0, 0, \
- 0, 0, (in_pd), 0, \
- 0, 0, (in_pu), 0
-
#define PORTCR(nr, reg) \
{ \
- PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
- _PCRH(PORT##nr##_IN, 0, 0, PORT##nr##_OUT), \
- PORT##nr##_FN0, PORT##nr##_FN1, \
- PORT##nr##_FN2, PORT##nr##_FN3, \
- PORT##nr##_FN4, PORT##nr##_FN5, \
- PORT##nr##_FN6, PORT##nr##_FN7 } \
+ PINMUX_CFG_REG_VAR("PORT" nr "CR", reg, 8, 2, 2, 1, 3) {\
+ /* PULMD[1:0], handled by .set_bias() */ \
+ 0, 0, 0, 0, \
+ /* IE and OE */ \
+ 0, PORT##nr##_OUT, PORT##nr##_IN, 0, \
+ /* SEC, not supported */ \
+ 0, 0, \
+ /* PTMD[2:0] */ \
+ PORT##nr##_FN0, PORT##nr##_FN1, \
+ PORT##nr##_FN2, PORT##nr##_FN3, \
+ PORT##nr##_FN4, PORT##nr##_FN5, \
+ PORT##nr##_FN6, PORT##nr##_FN7 \
+ } \
}
#endif /* __SH_PFC_H */
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 2a1f07249b2f..e2efbbae4061 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -39,10 +39,9 @@ struct sirfsoc_gpio_bank {
struct sirfsoc_gpio_chip {
struct of_mm_gpio_chip chip;
struct sirfsoc_gpio_bank sgpio_bank[SIRFSOC_GPIO_NO_OF_BANKS];
+ spinlock_t lock;
};
-static DEFINE_SPINLOCK(sgpio_lock);
-
static struct sirfsoc_pin_group *sirfsoc_pin_groups;
static int sirfsoc_pingrp_cnt;
@@ -427,13 +426,13 @@ static void sirfsoc_gpio_irq_ack(struct irq_data *d)
offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
- spin_lock_irqsave(&sgpio_lock, flags);
+ spin_lock_irqsave(&sgpio->lock, flags);
val = readl(sgpio->chip.regs + offset);
writel(val, sgpio->chip.regs + offset);
- spin_unlock_irqrestore(&sgpio_lock, flags);
+ spin_unlock_irqrestore(&sgpio->lock, flags);
}
static void __sirfsoc_gpio_irq_mask(struct sirfsoc_gpio_chip *sgpio,
@@ -445,14 +444,14 @@ static void __sirfsoc_gpio_irq_mask(struct sirfsoc_gpio_chip *sgpio,
offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
- spin_lock_irqsave(&sgpio_lock, flags);
+ spin_lock_irqsave(&sgpio->lock, flags);
val = readl(sgpio->chip.regs + offset);
val &= ~SIRFSOC_GPIO_CTL_INTR_EN_MASK;
val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
writel(val, sgpio->chip.regs + offset);
- spin_unlock_irqrestore(&sgpio_lock, flags);
+ spin_unlock_irqrestore(&sgpio->lock, flags);
}
static void sirfsoc_gpio_irq_mask(struct irq_data *d)
@@ -475,14 +474,14 @@ static void sirfsoc_gpio_irq_unmask(struct irq_data *d)
offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
- spin_lock_irqsave(&sgpio_lock, flags);
+ spin_lock_irqsave(&sgpio->lock, flags);
val = readl(sgpio->chip.regs + offset);
val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
val |= SIRFSOC_GPIO_CTL_INTR_EN_MASK;
writel(val, sgpio->chip.regs + offset);
- spin_unlock_irqrestore(&sgpio_lock, flags);
+ spin_unlock_irqrestore(&sgpio->lock, flags);
}
static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
@@ -496,7 +495,7 @@ static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
- spin_lock_irqsave(&sgpio_lock, flags);
+ spin_lock_irqsave(&sgpio->lock, flags);
val = readl(sgpio->chip.regs + offset);
val &= ~(SIRFSOC_GPIO_CTL_INTR_STS_MASK | SIRFSOC_GPIO_CTL_OUT_EN_MASK);
@@ -533,7 +532,7 @@ static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
writel(val, sgpio->chip.regs + offset);
- spin_unlock_irqrestore(&sgpio_lock, flags);
+ spin_unlock_irqrestore(&sgpio->lock, flags);
return 0;
}
@@ -568,7 +567,7 @@ static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
status = readl(sgpio->chip.regs + SIRFSOC_GPIO_INT_STATUS(bank->id));
if (!status) {
printk(KERN_WARNING
- "%s: gpio id %d status %#x no interrupt is flaged\n",
+ "%s: gpio id %d status %#x no interrupt is flagged\n",
__func__, bank->id, status);
handle_bad_irq(irq, desc);
return;
@@ -697,11 +696,11 @@ static int sirfsoc_gpio_direction_output(struct gpio_chip *chip,
offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
- spin_lock_irqsave(&sgpio_lock, flags);
+ spin_lock_irqsave(&sgpio->lock, flags);
sirfsoc_gpio_set_output(sgpio, bank, offset, value);
- spin_unlock_irqrestore(&sgpio_lock, flags);
+ spin_unlock_irqrestore(&sgpio->lock, flags);
return 0;
}
@@ -793,6 +792,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
sgpio = devm_kzalloc(&pdev->dev, sizeof(*sgpio), GFP_KERNEL);
if (!sgpio)
return -ENOMEM;
+ spin_lock_init(&sgpio->lock);
regs = of_iomap(np, 0);
if (!regs)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 3c68a8e5e0dd..7376a97b5e65 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1020,7 +1020,7 @@ static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
&sun4i_a10_pinctrl_data);
}
-static struct of_device_id sun4i_a10_pinctrl_match[] = {
+static const struct of_device_id sun4i_a10_pinctrl_match[] = {
{ .compatible = "allwinner,sun4i-a10-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
index 45a351affa59..63676617bc59 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
@@ -670,7 +670,7 @@ static int sun5i_a10s_pinctrl_probe(struct platform_device *pdev)
&sun5i_a10s_pinctrl_data);
}
-static struct of_device_id sun5i_a10s_pinctrl_match[] = {
+static const struct of_device_id sun5i_a10s_pinctrl_match[] = {
{ .compatible = "allwinner,sun5i-a10s-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
index 4bd23471412c..2bb07b38834f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
@@ -388,7 +388,7 @@ static int sun5i_a13_pinctrl_probe(struct platform_device *pdev)
&sun5i_a13_pinctrl_data);
}
-static struct of_device_id sun5i_a13_pinctrl_match[] = {
+static const struct of_device_id sun5i_a13_pinctrl_match[] = {
{ .compatible = "allwinner,sun5i-a13-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index 02174fa57997..9596b0a3df6b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -120,7 +120,7 @@ static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev)
return ret;
}
-static struct of_device_id sun6i_a31_r_pinctrl_match[] = {
+static const struct of_device_id sun6i_a31_r_pinctrl_match[] = {
{ .compatible = "allwinner,sun6i-a31-r-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index 18038f0d6b52..022863ab0c58 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -922,7 +922,7 @@ static int sun6i_a31_pinctrl_probe(struct platform_device *pdev)
&sun6i_a31_pinctrl_data);
}
-static struct of_device_id sun6i_a31_pinctrl_match[] = {
+static const struct of_device_id sun6i_a31_pinctrl_match[] = {
{ .compatible = "allwinner,sun6i-a31-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
index 9b5a91f610c7..d3725dcd6979 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
@@ -794,7 +794,7 @@ static int sun6i_a31s_pinctrl_probe(struct platform_device *pdev)
&sun6i_a31s_pinctrl_data);
}
-static struct of_device_id sun6i_a31s_pinctrl_match[] = {
+static const struct of_device_id sun6i_a31s_pinctrl_match[] = {
{ .compatible = "allwinner,sun6i-a31s-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
index 6af6cc8547b0..cf1ce0c02600 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
@@ -1045,7 +1045,7 @@ static int sun7i_a20_pinctrl_probe(struct platform_device *pdev)
&sun7i_a20_pinctrl_data);
}
-static struct of_device_id sun7i_a20_pinctrl_match[] = {
+static const struct of_device_id sun7i_a20_pinctrl_match[] = {
{ .compatible = "allwinner,sun7i-a20-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 327e03ff7c4d..056287635873 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -119,7 +119,7 @@ static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
return ret;
}
-static struct of_device_id sun8i_a23_r_pinctrl_match[] = {
+static const struct of_device_id sun8i_a23_r_pinctrl_match[] = {
{ .compatible = "allwinner,sun8i-a23-r-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index 62695c9a92c2..55083d278bb1 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -571,7 +571,7 @@ static int sun8i_a23_pinctrl_probe(struct platform_device *pdev)
&sun8i_a23_pinctrl_data);
}
-static struct of_device_id sun8i_a23_pinctrl_match[] = {
+static const struct of_device_id sun8i_a23_pinctrl_match[] = {
{ .compatible = "allwinner,sun8i-a23-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index adb29422efc9..1b580ba76453 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -729,7 +729,7 @@ static int sun9i_a80_pinctrl_probe(struct platform_device *pdev)
&sun9i_a80_pinctrl_data);
}
-static struct of_device_id sun9i_a80_pinctrl_match[] = {
+static const struct of_device_id sun9i_a80_pinctrl_match[] = {
{ .compatible = "allwinner,sun9i-a80-pinctrl", },
{}
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-vt8500.c b/drivers/pinctrl/vt8500/pinctrl-vt8500.c
index cf8bbc946ff7..ca946b3dbdb4 100644
--- a/drivers/pinctrl/vt8500/pinctrl-vt8500.c
+++ b/drivers/pinctrl/vt8500/pinctrl-vt8500.c
@@ -478,7 +478,7 @@ static int vt8500_pinctrl_remove(struct platform_device *pdev)
return wmt_pinctrl_remove(pdev);
}
-static struct of_device_id wmt_pinctrl_of_match[] = {
+static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "via,vt8500-pinctrl" },
{ /* sentinel */ },
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8505.c b/drivers/pinctrl/vt8500/pinctrl-wm8505.c
index 3f9c32dcb3d0..626fc7ec0174 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8505.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8505.c
@@ -509,7 +509,7 @@ static int wm8505_pinctrl_remove(struct platform_device *pdev)
return wmt_pinctrl_remove(pdev);
}
-static struct of_device_id wmt_pinctrl_of_match[] = {
+static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "wm,wm8505-pinctrl" },
{ /* sentinel */ },
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8650.c b/drivers/pinctrl/vt8500/pinctrl-wm8650.c
index 4e80f98c2ba7..8953aba8bfc2 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8650.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8650.c
@@ -347,7 +347,7 @@ static int wm8650_pinctrl_remove(struct platform_device *pdev)
return wmt_pinctrl_remove(pdev);
}
-static struct of_device_id wmt_pinctrl_of_match[] = {
+static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "wm,wm8650-pinctrl" },
{ /* sentinel */ },
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
index 47b52a7cacac..c79053d430db 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8750.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
@@ -386,7 +386,7 @@ static int wm8750_pinctrl_remove(struct platform_device *pdev)
return wmt_pinctrl_remove(pdev);
}
-static struct of_device_id wmt_pinctrl_of_match[] = {
+static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "wm,wm8750-pinctrl" },
{ /* sentinel */ },
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8850.c b/drivers/pinctrl/vt8500/pinctrl-wm8850.c
index 8bbb38c931f6..f232b163c735 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8850.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8850.c
@@ -365,7 +365,7 @@ static int wm8850_pinctrl_remove(struct platform_device *pdev)
return wmt_pinctrl_remove(pdev);
}
-static struct of_device_id wmt_pinctrl_of_match[] = {
+static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "wm,wm8850-pinctrl" },
{ /* sentinel */ },
};
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 09fde58b12e0..0adccbf5c83f 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -1,6 +1,9 @@
if X86
source "drivers/platform/x86/Kconfig"
endif
+if MIPS
+source "drivers/platform/mips/Kconfig"
+endif
if GOLDFISH
source "drivers/platform/goldfish/Kconfig"
endif
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 3656b7b17b99..ca2692510733 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_X86) += x86/
+obj-$(CONFIG_MIPS) += mips/
obj-$(CONFIG_OLPC) += olpc/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 440ed776efd4..2a6531a5fde8 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,7 @@
menuconfig CHROME_PLATFORMS
bool "Platform support for Chrome hardware"
- depends on X86
+ depends on X86 || ARM
---help---
Say Y here to get to see options for platform support for
various Chromebooks and Chromeboxes. This option alone does
@@ -16,8 +16,7 @@ if CHROME_PLATFORMS
config CHROMEOS_LAPTOP
tristate "Chrome OS Laptop"
- depends on I2C
- depends on DMI
+ depends on I2C && DMI && X86
---help---
This driver instantiates i2c and smbus devices such as
light sensors and touchpads.
@@ -27,6 +26,7 @@ config CHROMEOS_LAPTOP
config CHROMEOS_PSTORE
tristate "Chrome OS pstore support"
+ depends on X86
---help---
This module instantiates the persistent storage on x86 ChromeOS
devices. It can be used to store away console logs and crash
@@ -38,5 +38,25 @@ config CHROMEOS_PSTORE
If you have a supported Chromebook, choose Y or M here.
The module will be called chromeos_pstore.
+config CROS_EC_CHARDEV
+ tristate "Chrome OS Embedded Controller userspace device interface"
+ depends on MFD_CROS_EC
+ ---help---
+ This driver adds support to talk with the ChromeOS EC from userspace.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called cros_ec_dev.
+
+config CROS_EC_LPC
+ tristate "ChromeOS Embedded Controller (LPC)"
+ depends on MFD_CROS_EC && (X86 || COMPILE_TEST)
+ help
+ If you say Y here, you get support for talking to the ChromeOS EC
+ over an LPC bus. This uses a simple byte-level protocol with a
+ checksum. This is used for userspace access only. The kernel
+ typically has its own communication methods.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_lpc.
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 2b860ca7450f..bd8d8601e875 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,3 +1,6 @@
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
+cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o cros_ec_lightbar.o
+obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
+obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index b84fdd6b629b..a04019ab9feb 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -133,12 +133,13 @@ static struct i2c_client *__add_probed_i2c_device(
const char *name,
int bus,
struct i2c_board_info *info,
- const unsigned short *addrs)
+ const unsigned short *alt_addr_list)
{
const struct dmi_device *dmi_dev;
const struct dmi_dev_onboard *dev_data;
struct i2c_adapter *adapter;
- struct i2c_client *client;
+ struct i2c_client *client = NULL;
+ const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
if (bus < 0)
return NULL;
@@ -169,8 +170,28 @@ static struct i2c_client *__add_probed_i2c_device(
return NULL;
}
- /* add the i2c device */
- client = i2c_new_probed_device(adapter, info, addrs, NULL);
+ /*
+ * Add the i2c device. If we can't detect it at the primary
+ * address we scan secondary addresses. In any case the client
+ * structure gets assigned primary address.
+ */
+ client = i2c_new_probed_device(adapter, info, addr_list, NULL);
+ if (!client && alt_addr_list) {
+ struct i2c_board_info dummy_info = {
+ I2C_BOARD_INFO("dummy", info->addr),
+ };
+ struct i2c_client *dummy;
+
+ dummy = i2c_new_probed_device(adapter, &dummy_info,
+ alt_addr_list, NULL);
+ if (dummy) {
+ pr_debug("%s %d-%02x is probed at %02x\n",
+ __func__, bus, info->addr, dummy->addr);
+ i2c_unregister_device(dummy);
+ client = i2c_new_device(adapter, info);
+ }
+ }
+
if (!client)
pr_notice("%s failed to register device %d-%02x\n",
__func__, bus, info->addr);
@@ -254,12 +275,10 @@ static struct i2c_client *add_i2c_device(const char *name,
enum i2c_adapter_type type,
struct i2c_board_info *info)
{
- const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
-
return __add_probed_i2c_device(name,
find_i2c_adapter_num(type),
info,
- addr_list);
+ NULL);
}
static int setup_cyapa_tp(enum i2c_adapter_type type)
@@ -275,7 +294,6 @@ static int setup_cyapa_tp(enum i2c_adapter_type type)
static int setup_atmel_224s_tp(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
- ATMEL_TP_I2C_ADDR,
I2C_CLIENT_END };
if (tp)
return 0;
@@ -289,7 +307,6 @@ static int setup_atmel_224s_tp(enum i2c_adapter_type type)
static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
- ATMEL_TS_I2C_ADDR,
I2C_CLIENT_END };
if (ts)
return 0;
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
new file mode 100644
index 000000000000..6090d0b2826f
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -0,0 +1,274 @@
+/*
+ * cros_ec_dev - expose the Chrome OS Embedded Controller to user-space
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include "cros_ec_dev.h"
+
+/* Device variables */
+#define CROS_MAX_DEV 128
+static struct class *cros_class;
+static int ec_major;
+
+/* Basic communication */
+static int ec_get_version(struct cros_ec_device *ec, char *str, int maxlen)
+{
+ struct ec_response_get_version *resp;
+ static const char * const current_image_name[] = {
+ "unknown", "read-only", "read-write", "invalid",
+ };
+ struct cros_ec_command msg = {
+ .version = 0,
+ .command = EC_CMD_GET_VERSION,
+ .outdata = { 0 },
+ .outsize = 0,
+ .indata = { 0 },
+ .insize = sizeof(*resp),
+ };
+ int ret;
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS) {
+ snprintf(str, maxlen,
+ "%s\nUnknown EC version: EC returned %d\n",
+ CROS_EC_DEV_VERSION, msg.result);
+ return 0;
+ }
+
+ resp = (struct ec_response_get_version *)msg.indata;
+ if (resp->current_image >= ARRAY_SIZE(current_image_name))
+ resp->current_image = 3; /* invalid */
+
+ snprintf(str, maxlen, "%s\n%s\n%s\n%s\n", CROS_EC_DEV_VERSION,
+ resp->version_string_ro, resp->version_string_rw,
+ current_image_name[resp->current_image]);
+
+ return 0;
+}
+
+/* Device file ops */
+static int ec_device_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = container_of(inode->i_cdev,
+ struct cros_ec_device, cdev);
+ return 0;
+}
+
+static int ec_device_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static ssize_t ec_device_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct cros_ec_device *ec = filp->private_data;
+ char msg[sizeof(struct ec_response_get_version) +
+ sizeof(CROS_EC_DEV_VERSION)];
+ size_t count;
+ int ret;
+
+ if (*offset != 0)
+ return 0;
+
+ ret = ec_get_version(ec, msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ count = min(length, strlen(msg));
+
+ if (copy_to_user(buffer, msg, count))
+ return -EFAULT;
+
+ *offset = count;
+ return count;
+}
+
+/* Ioctls */
+static long ec_device_ioctl_xcmd(struct cros_ec_device *ec, void __user *arg)
+{
+ long ret;
+ struct cros_ec_command s_cmd = { };
+
+ if (copy_from_user(&s_cmd, arg, sizeof(s_cmd)))
+ return -EFAULT;
+
+ ret = cros_ec_cmd_xfer(ec, &s_cmd);
+ /* Only copy data to userland if data was received. */
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(arg, &s_cmd, sizeof(s_cmd)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long ec_device_ioctl_readmem(struct cros_ec_device *ec, void __user *arg)
+{
+ struct cros_ec_readmem s_mem = { };
+ long num;
+
+ /* Not every platform supports direct reads */
+ if (!ec->cmd_readmem)
+ return -ENOTTY;
+
+ if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
+ return -EFAULT;
+
+ num = ec->cmd_readmem(ec, s_mem.offset, s_mem.bytes, s_mem.buffer);
+ if (num <= 0)
+ return num;
+
+ if (copy_to_user((void __user *)arg, &s_mem, sizeof(s_mem)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long ec_device_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct cros_ec_device *ec = filp->private_data;
+
+ if (_IOC_TYPE(cmd) != CROS_EC_DEV_IOC)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case CROS_EC_DEV_IOCXCMD:
+ return ec_device_ioctl_xcmd(ec, (void __user *)arg);
+ case CROS_EC_DEV_IOCRDMEM:
+ return ec_device_ioctl_readmem(ec, (void __user *)arg);
+ }
+
+ return -ENOTTY;
+}
+
+/* Module initialization */
+static const struct file_operations fops = {
+ .open = ec_device_open,
+ .release = ec_device_release,
+ .read = ec_device_read,
+ .unlocked_ioctl = ec_device_ioctl,
+};
+
+static int ec_device_probe(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ int retval = -ENOTTY;
+ dev_t devno = MKDEV(ec_major, 0);
+
+ /* Instantiate it (and remember the EC) */
+ cdev_init(&ec->cdev, &fops);
+
+ retval = cdev_add(&ec->cdev, devno, 1);
+ if (retval) {
+ dev_err(&pdev->dev, ": failed to add character device\n");
+ return retval;
+ }
+
+ ec->vdev = device_create(cros_class, NULL, devno, ec,
+ CROS_EC_DEV_NAME);
+ if (IS_ERR(ec->vdev)) {
+ retval = PTR_ERR(ec->vdev);
+ dev_err(&pdev->dev, ": failed to create device\n");
+ cdev_del(&ec->cdev);
+ return retval;
+ }
+
+ /* Initialize extra interfaces */
+ ec_dev_sysfs_init(ec);
+ ec_dev_lightbar_init(ec);
+
+ return 0;
+}
+
+static int ec_device_remove(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+
+ ec_dev_lightbar_remove(ec);
+ ec_dev_sysfs_remove(ec);
+ device_destroy(cros_class, MKDEV(ec_major, 0));
+ cdev_del(&ec->cdev);
+ return 0;
+}
+
+static struct platform_driver cros_ec_dev_driver = {
+ .driver = {
+ .name = "cros-ec-ctl",
+ },
+ .probe = ec_device_probe,
+ .remove = ec_device_remove,
+};
+
+static int __init cros_ec_dev_init(void)
+{
+ int ret;
+ dev_t dev = 0;
+
+ cros_class = class_create(THIS_MODULE, "chromeos");
+ if (IS_ERR(cros_class)) {
+ pr_err(CROS_EC_DEV_NAME ": failed to register device class\n");
+ return PTR_ERR(cros_class);
+ }
+
+ /* Get a range of minor numbers (starting with 0) to work with */
+ ret = alloc_chrdev_region(&dev, 0, CROS_MAX_DEV, CROS_EC_DEV_NAME);
+ if (ret < 0) {
+ pr_err(CROS_EC_DEV_NAME ": alloc_chrdev_region() failed\n");
+ goto failed_chrdevreg;
+ }
+ ec_major = MAJOR(dev);
+
+ /* Register the driver */
+ ret = platform_driver_register(&cros_ec_dev_driver);
+ if (ret < 0) {
+ pr_warn(CROS_EC_DEV_NAME ": can't register driver: %d\n", ret);
+ goto failed_devreg;
+ }
+ return 0;
+
+failed_devreg:
+ unregister_chrdev_region(MKDEV(ec_major, 0), CROS_MAX_DEV);
+failed_chrdevreg:
+ class_destroy(cros_class);
+ return ret;
+}
+
+static void __exit cros_ec_dev_exit(void)
+{
+ platform_driver_unregister(&cros_ec_dev_driver);
+ unregister_chrdev(ec_major, CROS_EC_DEV_NAME);
+ class_destroy(cros_class);
+}
+
+module_init(cros_ec_dev_init);
+module_exit(cros_ec_dev_exit);
+
+MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
+MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_dev.h b/drivers/platform/chrome/cros_ec_dev.h
new file mode 100644
index 000000000000..45d67f7e518c
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_dev.h
@@ -0,0 +1,53 @@
+/*
+ * cros_ec_dev - expose the Chrome OS Embedded Controller to userspace
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CROS_EC_DEV_H_
+#define _CROS_EC_DEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/mfd/cros_ec.h>
+
+#define CROS_EC_DEV_NAME "cros_ec"
+#define CROS_EC_DEV_VERSION "1.0.0"
+
+/*
+ * @offset: within EC_LPC_ADDR_MEMMAP region
+ * @bytes: number of bytes to read. zero means "read a string" (including '\0')
+ * (at most only EC_MEMMAP_SIZE bytes can be read)
+ * @buffer: where to store the result
+ * ioctl returns the number of bytes read, negative on error
+ */
+struct cros_ec_readmem {
+ uint32_t offset;
+ uint32_t bytes;
+ uint8_t buffer[EC_MEMMAP_SIZE];
+};
+
+#define CROS_EC_DEV_IOC 0xEC
+#define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command)
+#define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem)
+
+void ec_dev_sysfs_init(struct cros_ec_device *);
+void ec_dev_sysfs_remove(struct cros_ec_device *);
+
+void ec_dev_lightbar_init(struct cros_ec_device *);
+void ec_dev_lightbar_remove(struct cros_ec_device *);
+
+#endif /* _CROS_EC_DEV_H_ */
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
new file mode 100644
index 000000000000..b4ff47a9069a
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -0,0 +1,367 @@
+/*
+ * cros_ec_lightbar - expose the Chromebook Pixel lightbar to userspace
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "cros_ec_lightbar: " fmt
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "cros_ec_dev.h"
+
+/* Rate-limit the lightbar interface to prevent DoS. */
+static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
+
+static ssize_t interval_msec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long msec = lb_interval_jiffies * 1000 / HZ;
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", msec);
+}
+
+static ssize_t interval_msec_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long msec;
+
+ if (kstrtoul(buf, 0, &msec))
+ return -EINVAL;
+
+ lb_interval_jiffies = msec * HZ / 1000;
+
+ return count;
+}
+
+static DEFINE_MUTEX(lb_mutex);
+/* Return 0 if able to throttle correctly, error otherwise */
+static int lb_throttle(void)
+{
+ static unsigned long last_access;
+ unsigned long now, next_timeslot;
+ long delay;
+ int ret = 0;
+
+ mutex_lock(&lb_mutex);
+
+ now = jiffies;
+ next_timeslot = last_access + lb_interval_jiffies;
+
+ if (time_before(now, next_timeslot)) {
+ delay = (long)(next_timeslot) - (long)now;
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(delay) > 0) {
+ /* interrupted - just abort */
+ ret = -EINTR;
+ goto out;
+ }
+ now = jiffies;
+ }
+
+ last_access = now;
+out:
+ mutex_unlock(&lb_mutex);
+
+ return ret;
+}
+
+#define INIT_MSG(P, R) { \
+ .command = EC_CMD_LIGHTBAR_CMD, \
+ .outsize = sizeof(*P), \
+ .insize = sizeof(*R), \
+ }
+
+static int get_lightbar_version(struct cros_ec_device *ec,
+ uint32_t *ver_ptr, uint32_t *flg_ptr)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ int ret;
+
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_VERSION;
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return 0;
+
+ switch (msg.result) {
+ case EC_RES_INVALID_PARAM:
+ /* Pixel had no version command. */
+ if (ver_ptr)
+ *ver_ptr = 0;
+ if (flg_ptr)
+ *flg_ptr = 0;
+ return 1;
+
+ case EC_RES_SUCCESS:
+ resp = (struct ec_response_lightbar *)msg.indata;
+
+ /* Future devices w/lightbars should implement this command */
+ if (ver_ptr)
+ *ver_ptr = resp->version.num;
+ if (flg_ptr)
+ *flg_ptr = resp->version.flags;
+ return 1;
+ }
+
+ /* Anything else (ie, EC_RES_INVALID_COMMAND) - no lightbar */
+ return 0;
+}
+
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ uint32_t version, flags;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+ int ret;
+
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+
+ /* This should always succeed, because we check during init. */
+ if (!get_lightbar_version(ec, &version, &flags))
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "%d %d\n", version, flags);
+}
+
+static ssize_t brightness_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ int ret;
+ unsigned int val;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_BRIGHTNESS;
+ param->brightness.num = val;
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS)
+ return -EINVAL;
+
+ return count;
+}
+
+
+/*
+ * We expect numbers, and we'll keep reading until we find them, skipping over
+ * any whitespace (sysfs guarantees that the input is null-terminated). Every
+ * four numbers are sent to the lightbar as <LED,R,G,B>. We fail at the first
+ * parsing error, if we don't parse any numbers, or if we have numbers left
+ * over.
+ */
+static ssize_t led_rgb_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+ unsigned int val[4];
+ int ret, i = 0, j = 0, ok = 0;
+
+ do {
+ /* Skip any whitespace */
+ while (*buf && isspace(*buf))
+ buf++;
+
+ if (!*buf)
+ break;
+
+ ret = sscanf(buf, "%i", &val[i++]);
+ if (ret == 0)
+ return -EINVAL;
+
+ if (i == 4) {
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_RGB;
+ param->rgb.led = val[0];
+ param->rgb.red = val[1];
+ param->rgb.green = val[2];
+ param->rgb.blue = val[3];
+ /*
+ * Throttle only the first of every four transactions,
+ * so that the user can update all four LEDs at once.
+ */
+ if ((j++ % 4) == 0) {
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+ }
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS)
+ return -EINVAL;
+
+ i = 0;
+ ok = 1;
+ }
+
+ /* Skip over the number we just read */
+ while (*buf && !isspace(*buf))
+ buf++;
+
+ } while (*buf);
+
+ return (ok && i == 0) ? count : -EINVAL;
+}
+
+static char const *seqname[] = {
+ "ERROR", "S5", "S3", "S0", "S5S3", "S3S0",
+ "S0S3", "S3S5", "STOP", "RUN", "PULSE", "TEST", "KONAMI",
+};
+
+static ssize_t sequence_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ int ret;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_GET_SEQ;
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS)
+ return scnprintf(buf, PAGE_SIZE,
+ "ERROR: EC returned %d\n", msg.result);
+
+ resp = (struct ec_response_lightbar *)msg.indata;
+ if (resp->get_seq.num >= ARRAY_SIZE(seqname))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", resp->get_seq.num);
+ else
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ seqname[resp->get_seq.num]);
+}
+
+static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ unsigned int num;
+ int ret, len;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ for (len = 0; len < count; len++)
+ if (!isalnum(buf[len]))
+ break;
+
+ for (num = 0; num < ARRAY_SIZE(seqname); num++)
+ if (!strncasecmp(seqname[num], buf, len))
+ break;
+
+ if (num >= ARRAY_SIZE(seqname)) {
+ ret = kstrtouint(buf, 0, &num);
+ if (ret)
+ return ret;
+ }
+
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_SEQ;
+ param->seq.num = num;
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS)
+ return -EINVAL;
+
+ return count;
+}
+
+/* Module initialization */
+
+static DEVICE_ATTR_RW(interval_msec);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_WO(brightness);
+static DEVICE_ATTR_WO(led_rgb);
+static DEVICE_ATTR_RW(sequence);
+static struct attribute *__lb_cmds_attrs[] = {
+ &dev_attr_interval_msec.attr,
+ &dev_attr_version.attr,
+ &dev_attr_brightness.attr,
+ &dev_attr_led_rgb.attr,
+ &dev_attr_sequence.attr,
+ NULL,
+};
+static struct attribute_group lb_cmds_attr_group = {
+ .name = "lightbar",
+ .attrs = __lb_cmds_attrs,
+};
+
+void ec_dev_lightbar_init(struct cros_ec_device *ec)
+{
+ int ret = 0;
+
+ /* Only instantiate this stuff if the EC has a lightbar */
+ if (!get_lightbar_version(ec, NULL, NULL))
+ return;
+
+ ret = sysfs_create_group(&ec->vdev->kobj, &lb_cmds_attr_group);
+ if (ret)
+ pr_warn("sysfs_create_group() failed: %d\n", ret);
+}
+
+void ec_dev_lightbar_remove(struct cros_ec_device *ec)
+{
+ sysfs_remove_group(&ec->vdev->kobj, &lb_cmds_attr_group);
+}
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
new file mode 100644
index 000000000000..8f9ac4d7bbd0
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -0,0 +1,319 @@
+/*
+ * cros_ec_lpc - LPC access to the Chrome OS Embedded Controller
+ *
+ * Copyright (C) 2012-2015 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver uses the Chrome OS EC byte-level message-based protocol for
+ * communicating the keyboard state (which keys are pressed) from a keyboard EC
+ * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
+ * but everything else (including deghosting) is done here. The main
+ * motivation for this is to keep the EC firmware as simple as possible, since
+ * it cannot be easily upgraded and EC flash/IRAM space is relatively
+ * expensive.
+ */
+
+#include <linux/dmi.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#define DRV_NAME "cros_ec_lpc"
+
+static int ec_response_timed_out(void)
+{
+ unsigned long one_second = jiffies + HZ;
+
+ usleep_range(200, 300);
+ do {
+ if (!(inb(EC_LPC_ADDR_HOST_CMD) & EC_LPC_STATUS_BUSY_MASK))
+ return 0;
+ usleep_range(100, 200);
+ } while (time_before(jiffies, one_second));
+
+ return 1;
+}
+
+static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
+ struct cros_ec_command *msg)
+{
+ struct ec_lpc_host_args args;
+ int csum;
+ int i;
+ int ret = 0;
+
+ if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE ||
+ msg->insize > EC_PROTO2_MAX_PARAM_SIZE) {
+ dev_err(ec->dev,
+ "invalid buffer sizes (out %d, in %d)\n",
+ msg->outsize, msg->insize);
+ return -EINVAL;
+ }
+
+ /* Now actually send the command to the EC and get the result */
+ args.flags = EC_HOST_ARGS_FLAG_FROM_HOST;
+ args.command_version = msg->version;
+ args.data_size = msg->outsize;
+
+ /* Initialize checksum */
+ csum = msg->command + args.flags +
+ args.command_version + args.data_size;
+
+ /* Copy data and update checksum */
+ for (i = 0; i < msg->outsize; i++) {
+ outb(msg->outdata[i], EC_LPC_ADDR_HOST_PARAM + i);
+ csum += msg->outdata[i];
+ }
+
+ /* Finalize checksum and write args */
+ args.checksum = csum & 0xFF;
+ outb(args.flags, EC_LPC_ADDR_HOST_ARGS);
+ outb(args.command_version, EC_LPC_ADDR_HOST_ARGS + 1);
+ outb(args.data_size, EC_LPC_ADDR_HOST_ARGS + 2);
+ outb(args.checksum, EC_LPC_ADDR_HOST_ARGS + 3);
+
+ /* Here we go */
+ outb(msg->command, EC_LPC_ADDR_HOST_CMD);
+
+ if (ec_response_timed_out()) {
+ dev_warn(ec->dev, "EC responsed timed out\n");
+ ret = -EIO;
+ goto done;
+ }
+
+ /* Check result */
+ msg->result = inb(EC_LPC_ADDR_HOST_DATA);
+
+ switch (msg->result) {
+ case EC_RES_SUCCESS:
+ break;
+ case EC_RES_IN_PROGRESS:
+ ret = -EAGAIN;
+ dev_dbg(ec->dev, "command 0x%02x in progress\n",
+ msg->command);
+ goto done;
+ default:
+ dev_dbg(ec->dev, "command 0x%02x returned %d\n",
+ msg->command, msg->result);
+ }
+
+ /* Read back args */
+ args.flags = inb(EC_LPC_ADDR_HOST_ARGS);
+ args.command_version = inb(EC_LPC_ADDR_HOST_ARGS + 1);
+ args.data_size = inb(EC_LPC_ADDR_HOST_ARGS + 2);
+ args.checksum = inb(EC_LPC_ADDR_HOST_ARGS + 3);
+
+ if (args.data_size > msg->insize) {
+ dev_err(ec->dev,
+ "packet too long (%d bytes, expected %d)",
+ args.data_size, msg->insize);
+ ret = -ENOSPC;
+ goto done;
+ }
+
+ /* Start calculating response checksum */
+ csum = msg->command + args.flags +
+ args.command_version + args.data_size;
+
+ /* Read response and update checksum */
+ for (i = 0; i < args.data_size; i++) {
+ msg->indata[i] = inb(EC_LPC_ADDR_HOST_PARAM + i);
+ csum += msg->indata[i];
+ }
+
+ /* Verify checksum */
+ if (args.checksum != (csum & 0xFF)) {
+ dev_err(ec->dev,
+ "bad packet checksum, expected %02x, got %02x\n",
+ args.checksum, csum & 0xFF);
+ ret = -EBADMSG;
+ goto done;
+ }
+
+ /* Return actual amount of data received */
+ ret = args.data_size;
+done:
+ return ret;
+}
+
+/* Returns num bytes read, or negative on error. Doesn't need locking. */
+static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
+ unsigned int bytes, void *dest)
+{
+ int i = offset;
+ char *s = dest;
+ int cnt = 0;
+
+ if (offset >= EC_MEMMAP_SIZE - bytes)
+ return -EINVAL;
+
+ /* fixed length */
+ if (bytes) {
+ for (; cnt < bytes; i++, s++, cnt++)
+ *s = inb(EC_LPC_ADDR_MEMMAP + i);
+ return cnt;
+ }
+
+ /* string */
+ for (; i < EC_MEMMAP_SIZE; i++, s++) {
+ *s = inb(EC_LPC_ADDR_MEMMAP + i);
+ cnt++;
+ if (!*s)
+ break;
+ }
+
+ return cnt;
+}
+
+static int cros_ec_lpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_device *ec_dev;
+ int ret;
+
+ if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE,
+ dev_name(dev))) {
+ dev_err(dev, "couldn't reserve memmap region\n");
+ return -EBUSY;
+ }
+
+ if ((inb(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID) != 'E') ||
+ (inb(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID + 1) != 'C')) {
+ dev_err(dev, "EC ID not detected\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_region(dev, EC_HOST_CMD_REGION0,
+ EC_HOST_CMD_REGION_SIZE, dev_name(dev))) {
+ dev_err(dev, "couldn't reserve region0\n");
+ return -EBUSY;
+ }
+ if (!devm_request_region(dev, EC_HOST_CMD_REGION1,
+ EC_HOST_CMD_REGION_SIZE, dev_name(dev))) {
+ dev_err(dev, "couldn't reserve region1\n");
+ return -EBUSY;
+ }
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ec_dev);
+ ec_dev->dev = dev;
+ ec_dev->ec_name = pdev->name;
+ ec_dev->phys_name = dev_name(dev);
+ ec_dev->parent = dev;
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_lpc;
+ ec_dev->cmd_readmem = cros_ec_lpc_readmem;
+
+ ret = cros_ec_register(ec_dev);
+ if (ret) {
+ dev_err(dev, "couldn't register ec_dev (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_lpc_remove(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec_dev;
+
+ ec_dev = platform_get_drvdata(pdev);
+ cros_ec_remove(ec_dev);
+
+ return 0;
+}
+
+static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = {
+ {
+ /*
+ * Today all Chromebooks/boxes ship with Google_* as version and
+ * coreboot as bios vendor. No other systems with this
+ * combination are known to date.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Google_"),
+ },
+ },
+ {
+ /* x86-link, the Chromebook Pixel. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ },
+ {
+ /* x86-peppy, the Acer C720 Chromebook. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
+ },
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
+
+static struct platform_driver cros_ec_lpc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_lpc_probe,
+ .remove = cros_ec_lpc_remove,
+};
+
+static struct platform_device cros_ec_lpc_device = {
+ .name = DRV_NAME
+};
+
+static int __init cros_ec_lpc_init(void)
+{
+ int ret;
+
+ if (!dmi_check_system(cros_ec_lpc_dmi_table)) {
+ pr_err(DRV_NAME ": unsupported system.\n");
+ return -ENODEV;
+ }
+
+ /* Register the driver */
+ ret = platform_driver_register(&cros_ec_lpc_driver);
+ if (ret) {
+ pr_err(DRV_NAME ": can't register driver: %d\n", ret);
+ return ret;
+ }
+
+ /* Register the device, and it'll get hooked up automatically */
+ ret = platform_device_register(&cros_ec_lpc_device);
+ if (ret) {
+ pr_err(DRV_NAME ": can't register device: %d\n", ret);
+ platform_driver_unregister(&cros_ec_lpc_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit cros_ec_lpc_exit(void)
+{
+ platform_device_unregister(&cros_ec_lpc_device);
+ platform_driver_unregister(&cros_ec_lpc_driver);
+}
+
+module_init(cros_ec_lpc_init);
+module_exit(cros_ec_lpc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC LPC driver");
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
new file mode 100644
index 000000000000..fb62ab6cc659
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -0,0 +1,271 @@
+/*
+ * cros_ec_sysfs - expose the Chrome OS EC through sysfs
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "cros_ec_sysfs: " fmt
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "cros_ec_dev.h"
+
+/* Accessor functions */
+
+static ssize_t show_ec_reboot(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int count = 0;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "ro|rw|cancel|cold|disable-jump|hibernate");
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ " [at-shutdown]\n");
+ return count;
+}
+
+static ssize_t store_ec_reboot(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ static const struct {
+ const char * const str;
+ uint8_t cmd;
+ uint8_t flags;
+ } words[] = {
+ {"cancel", EC_REBOOT_CANCEL, 0},
+ {"ro", EC_REBOOT_JUMP_RO, 0},
+ {"rw", EC_REBOOT_JUMP_RW, 0},
+ {"cold", EC_REBOOT_COLD, 0},
+ {"disable-jump", EC_REBOOT_DISABLE_JUMP, 0},
+ {"hibernate", EC_REBOOT_HIBERNATE, 0},
+ {"at-shutdown", -1, EC_REBOOT_FLAG_ON_AP_SHUTDOWN},
+ };
+ struct cros_ec_command msg = { 0 };
+ struct ec_params_reboot_ec *param =
+ (struct ec_params_reboot_ec *)msg.outdata;
+ int got_cmd = 0, offset = 0;
+ int i;
+ int ret;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ param->flags = 0;
+ while (1) {
+ /* Find word to start scanning */
+ while (buf[offset] && isspace(buf[offset]))
+ offset++;
+ if (!buf[offset])
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(words); i++) {
+ if (!strncasecmp(words[i].str, buf+offset,
+ strlen(words[i].str))) {
+ if (words[i].flags) {
+ param->flags |= words[i].flags;
+ } else {
+ param->cmd = words[i].cmd;
+ got_cmd = 1;
+ }
+ break;
+ }
+ }
+
+ /* On to the next word, if any */
+ while (buf[offset] && !isspace(buf[offset]))
+ offset++;
+ }
+
+ if (!got_cmd)
+ return -EINVAL;
+
+ msg.command = EC_CMD_REBOOT_EC;
+ msg.outsize = sizeof(param);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (msg.result != EC_RES_SUCCESS) {
+ dev_dbg(ec->dev, "EC result %d\n", msg.result);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t show_ec_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const image_names[] = {"unknown", "RO", "RW"};
+ struct ec_response_get_version *r_ver;
+ struct ec_response_get_chip_info *r_chip;
+ struct ec_response_board_version *r_board;
+ struct cros_ec_command msg = { 0 };
+ int ret;
+ int count = 0;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ /* Get versions. RW may change. */
+ msg.command = EC_CMD_GET_VERSION;
+ msg.insize = sizeof(*r_ver);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (msg.result != EC_RES_SUCCESS)
+ return scnprintf(buf, PAGE_SIZE,
+ "ERROR: EC returned %d\n", msg.result);
+
+ r_ver = (struct ec_response_get_version *)msg.indata;
+ /* Strings should be null-terminated, but let's be sure. */
+ r_ver->version_string_ro[sizeof(r_ver->version_string_ro) - 1] = '\0';
+ r_ver->version_string_rw[sizeof(r_ver->version_string_rw) - 1] = '\0';
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "RO version: %s\n", r_ver->version_string_ro);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "RW version: %s\n", r_ver->version_string_rw);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Firmware copy: %s\n",
+ (r_ver->current_image < ARRAY_SIZE(image_names) ?
+ image_names[r_ver->current_image] : "?"));
+
+ /* Get build info. */
+ msg.command = EC_CMD_GET_BUILD_INFO;
+ msg.insize = sizeof(msg.indata);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: XFER ERROR %d\n", ret);
+ else if (msg.result != EC_RES_SUCCESS)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: EC error %d\n", msg.result);
+ else {
+ msg.indata[sizeof(msg.indata) - 1] = '\0';
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: %s\n", msg.indata);
+ }
+
+ /* Get chip info. */
+ msg.command = EC_CMD_GET_CHIP_INFO;
+ msg.insize = sizeof(*r_chip);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip info: XFER ERROR %d\n", ret);
+ else if (msg.result != EC_RES_SUCCESS)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip info: EC error %d\n", msg.result);
+ else {
+ r_chip = (struct ec_response_get_chip_info *)msg.indata;
+
+ r_chip->vendor[sizeof(r_chip->vendor) - 1] = '\0';
+ r_chip->name[sizeof(r_chip->name) - 1] = '\0';
+ r_chip->revision[sizeof(r_chip->revision) - 1] = '\0';
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip vendor: %s\n", r_chip->vendor);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip name: %s\n", r_chip->name);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip revision: %s\n", r_chip->revision);
+ }
+
+ /* Get board version */
+ msg.command = EC_CMD_GET_BOARD_VERSION;
+ msg.insize = sizeof(*r_board);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: XFER ERROR %d\n", ret);
+ else if (msg.result != EC_RES_SUCCESS)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: EC error %d\n", msg.result);
+ else {
+ r_board = (struct ec_response_board_version *)msg.indata;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: %d\n",
+ r_board->board_version);
+ }
+
+ return count;
+}
+
+static ssize_t show_ec_flashinfo(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ec_response_flash_info *resp;
+ struct cros_ec_command msg = { 0 };
+ int ret;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ /* The flash info shouldn't ever change, but ask each time anyway. */
+ msg.command = EC_CMD_FLASH_INFO;
+ msg.insize = sizeof(*resp);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (msg.result != EC_RES_SUCCESS)
+ return scnprintf(buf, PAGE_SIZE,
+ "ERROR: EC returned %d\n", msg.result);
+
+ resp = (struct ec_response_flash_info *)msg.indata;
+
+ return scnprintf(buf, PAGE_SIZE,
+ "FlashSize %d\nWriteSize %d\n"
+ "EraseSize %d\nProtectSize %d\n",
+ resp->flash_size, resp->write_block_size,
+ resp->erase_block_size, resp->protect_block_size);
+}
+
+/* Module initialization */
+
+static DEVICE_ATTR(reboot, S_IWUSR | S_IRUGO, show_ec_reboot, store_ec_reboot);
+static DEVICE_ATTR(version, S_IRUGO, show_ec_version, NULL);
+static DEVICE_ATTR(flashinfo, S_IRUGO, show_ec_flashinfo, NULL);
+
+static struct attribute *__ec_attrs[] = {
+ &dev_attr_reboot.attr,
+ &dev_attr_version.attr,
+ &dev_attr_flashinfo.attr,
+ NULL,
+};
+
+static struct attribute_group ec_attr_group = {
+ .attrs = __ec_attrs,
+};
+
+void ec_dev_sysfs_init(struct cros_ec_device *ec)
+{
+ int error;
+
+ error = sysfs_create_group(&ec->vdev->kobj, &ec_attr_group);
+ if (error)
+ pr_warn("failed to create group: %d\n", error);
+}
+
+void ec_dev_sysfs_remove(struct cros_ec_device *ec)
+{
+ sysfs_remove_group(&ec->vdev->kobj, &ec_attr_group);
+}
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
new file mode 100644
index 000000000000..125e569017be
--- /dev/null
+++ b/drivers/platform/mips/Kconfig
@@ -0,0 +1,30 @@
+#
+# MIPS Platform Specific Drivers
+#
+
+menuconfig MIPS_PLATFORM_DEVICES
+ bool "MIPS Platform Specific Device Drivers"
+ default y
+ help
+ Say Y here to get to see options for device drivers of various
+ MIPS platforms, including vendor-specific netbook/laptop/desktop
+ extension and hardware monitor drivers. This option itself does
+ not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if MIPS_PLATFORM_DEVICES
+
+config MIPS_ACPI
+ bool
+ default y if LOONGSON_MACH3X
+
+config CPU_HWMON
+ tristate "Loongson CPU HWMon Driver"
+ depends on LOONGSON_MACH3X
+ select HWMON
+ default y
+ help
+ Loongson-3A/3B CPU Hwmon (temperature sensor) driver.
+
+endif # MIPS_PLATFORM_DEVICES
diff --git a/drivers/platform/mips/Makefile b/drivers/platform/mips/Makefile
new file mode 100644
index 000000000000..43412849b195
--- /dev/null
+++ b/drivers/platform/mips/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MIPS_ACPI) += acpi_init.o
+obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o
diff --git a/drivers/platform/mips/acpi_init.c b/drivers/platform/mips/acpi_init.c
new file mode 100644
index 000000000000..dbdad79ead8f
--- /dev/null
+++ b/drivers/platform/mips/acpi_init.c
@@ -0,0 +1,150 @@
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/export.h>
+
+#define SBX00_ACPI_IO_BASE 0x800
+#define SBX00_ACPI_IO_SIZE 0x100
+
+#define ACPI_PM_EVT_BLK (SBX00_ACPI_IO_BASE + 0x00) /* 4 bytes */
+#define ACPI_PM_CNT_BLK (SBX00_ACPI_IO_BASE + 0x04) /* 2 bytes */
+#define ACPI_PMA_CNT_BLK (SBX00_ACPI_IO_BASE + 0x0F) /* 1 byte */
+#define ACPI_PM_TMR_BLK (SBX00_ACPI_IO_BASE + 0x18) /* 4 bytes */
+#define ACPI_GPE0_BLK (SBX00_ACPI_IO_BASE + 0x10) /* 8 bytes */
+#define ACPI_END (SBX00_ACPI_IO_BASE + 0x80)
+
+#define PM_INDEX 0xCD6
+#define PM_DATA 0xCD7
+#define PM2_INDEX 0xCD0
+#define PM2_DATA 0xCD1
+
+/*
+ * SCI interrupt need acpi space, allocate here
+ */
+
+static int __init register_acpi_resource(void)
+{
+ request_region(SBX00_ACPI_IO_BASE, SBX00_ACPI_IO_SIZE, "acpi");
+ return 0;
+}
+
+static void pmio_write_index(u16 index, u8 reg, u8 value)
+{
+ outb(reg, index);
+ outb(value, index + 1);
+}
+
+static u8 pmio_read_index(u16 index, u8 reg)
+{
+ outb(reg, index);
+ return inb(index + 1);
+}
+
+void pm_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm_iowrite);
+
+u8 pm_ioread(u8 reg)
+{
+ return pmio_read_index(PM_INDEX, reg);
+}
+EXPORT_SYMBOL(pm_ioread);
+
+void pm2_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM2_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm2_iowrite);
+
+u8 pm2_ioread(u8 reg)
+{
+ return pmio_read_index(PM2_INDEX, reg);
+}
+EXPORT_SYMBOL(pm2_ioread);
+
+static void acpi_hw_clear_status(void)
+{
+ u16 value;
+
+ /* PMStatus: Clear WakeStatus/PwrBtnStatus */
+ value = inw(ACPI_PM_EVT_BLK);
+ value |= (1 << 8 | 1 << 15);
+ outw(value, ACPI_PM_EVT_BLK);
+
+ /* GPEStatus: Clear all generated events */
+ outl(inl(ACPI_GPE0_BLK), ACPI_GPE0_BLK);
+}
+
+void acpi_registers_setup(void)
+{
+ u32 value;
+
+ /* PM Status Base */
+ pm_iowrite(0x20, ACPI_PM_EVT_BLK & 0xff);
+ pm_iowrite(0x21, ACPI_PM_EVT_BLK >> 8);
+
+ /* PM Control Base */
+ pm_iowrite(0x22, ACPI_PM_CNT_BLK & 0xff);
+ pm_iowrite(0x23, ACPI_PM_CNT_BLK >> 8);
+
+ /* GPM Base */
+ pm_iowrite(0x28, ACPI_GPE0_BLK & 0xff);
+ pm_iowrite(0x29, ACPI_GPE0_BLK >> 8);
+
+ /* ACPI End */
+ pm_iowrite(0x2e, ACPI_END & 0xff);
+ pm_iowrite(0x2f, ACPI_END >> 8);
+
+ /* IO Decode: When AcpiDecodeEnable set, South-Bridge uses the contents
+ * of the PM registers at index 0x20~0x2B to decode ACPI I/O address. */
+ pm_iowrite(0x0e, 1 << 3);
+
+ /* SCI_EN set */
+ outw(1, ACPI_PM_CNT_BLK);
+
+ /* Enable to generate SCI */
+ pm_iowrite(0x10, pm_ioread(0x10) | 1);
+
+ /* GPM3/GPM9 enable */
+ value = inl(ACPI_GPE0_BLK + 4);
+ outl(value | (1 << 14) | (1 << 22), ACPI_GPE0_BLK + 4);
+
+ /* Set GPM9 as input */
+ pm_iowrite(0x8d, pm_ioread(0x8d) & (~(1 << 1)));
+
+ /* Set GPM9 as non-output */
+ pm_iowrite(0x94, pm_ioread(0x94) | (1 << 3));
+
+ /* GPM3 config ACPI trigger SCIOUT */
+ pm_iowrite(0x33, pm_ioread(0x33) & (~(3 << 4)));
+
+ /* GPM9 config ACPI trigger SCIOUT */
+ pm_iowrite(0x3d, pm_ioread(0x3d) & (~(3 << 2)));
+
+ /* GPM3 config falling edge trigger */
+ pm_iowrite(0x37, pm_ioread(0x37) & (~(1 << 6)));
+
+ /* No wait for STPGNT# in ACPI Sx state */
+ pm_iowrite(0x7c, pm_ioread(0x7c) | (1 << 6));
+
+ /* Set GPM3 pull-down enable */
+ value = pm2_ioread(0xf6);
+ value |= ((1 << 7) | (1 << 3));
+ pm2_iowrite(0xf6, value);
+
+ /* Set GPM9 pull-down enable */
+ value = pm2_ioread(0xf8);
+ value |= ((1 << 5) | (1 << 1));
+ pm2_iowrite(0xf8, value);
+}
+
+int __init sbx00_acpi_init(void)
+{
+ register_acpi_resource();
+ acpi_registers_setup();
+ acpi_hw_clear_status();
+
+ return 0;
+}
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
new file mode 100644
index 000000000000..0f6c63e17049
--- /dev/null
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -0,0 +1,207 @@
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/jiffies.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <loongson.h>
+#include <boot_param.h>
+#include <loongson_hwmon.h>
+
+/*
+ * Loongson-3 series cpu has two sensors inside,
+ * each of them from 0 to 255,
+ * if more than 127, that is dangerous.
+ * here only provide sensor1 data, because it always hot than sensor0
+ */
+int loongson3_cpu_temp(int cpu)
+{
+ u32 reg;
+
+ reg = LOONGSON_CHIPTEMP(cpu);
+ if (loongson_sysconf.cputype == Loongson_3A)
+ reg = (reg >> 8) & 0xff;
+ else if (loongson_sysconf.cputype == Loongson_3B)
+ reg = ((reg >> 8) & 0xff) - 100;
+
+ return (int)reg * 1000;
+}
+
+static struct device *cpu_hwmon_dev;
+
+static ssize_t get_hwmon_name(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, get_hwmon_name, NULL, 0);
+
+static struct attribute *cpu_hwmon_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ NULL
+};
+
+/* Hwmon device attribute group */
+static struct attribute_group cpu_hwmon_attribute_group = {
+ .attrs = cpu_hwmon_attributes,
+};
+
+/* Hwmon device get name */
+static ssize_t get_hwmon_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "cpu-hwmon\n");
+}
+
+static ssize_t get_cpu0_temp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t get_cpu1_temp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t cpu0_temp_label(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t cpu1_temp_label(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_cpu0_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, cpu0_temp_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_cpu1_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, cpu1_temp_label, NULL, 2);
+
+static const struct attribute *hwmon_cputemp1[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute *hwmon_cputemp2[] = {
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ NULL
+};
+
+static ssize_t cpu0_temp_label(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "CPU 0 Temprature\n");
+}
+
+static ssize_t cpu1_temp_label(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "CPU 1 Temprature\n");
+}
+
+static ssize_t get_cpu0_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int value = loongson3_cpu_temp(0);
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t get_cpu1_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int value = loongson3_cpu_temp(1);
+ return sprintf(buf, "%d\n", value);
+}
+
+static int create_sysfs_cputemp_files(struct kobject *kobj)
+{
+ int ret;
+
+ ret = sysfs_create_files(kobj, hwmon_cputemp1);
+ if (ret)
+ goto sysfs_create_temp1_fail;
+
+ if (loongson_sysconf.nr_cpus <= loongson_sysconf.cores_per_package)
+ return 0;
+
+ ret = sysfs_create_files(kobj, hwmon_cputemp2);
+ if (ret)
+ goto sysfs_create_temp2_fail;
+
+ return 0;
+
+sysfs_create_temp2_fail:
+ sysfs_remove_files(kobj, hwmon_cputemp1);
+
+sysfs_create_temp1_fail:
+ return -1;
+}
+
+static void remove_sysfs_cputemp_files(struct kobject *kobj)
+{
+ sysfs_remove_files(&cpu_hwmon_dev->kobj, hwmon_cputemp1);
+
+ if (loongson_sysconf.nr_cpus > loongson_sysconf.cores_per_package)
+ sysfs_remove_files(&cpu_hwmon_dev->kobj, hwmon_cputemp2);
+}
+
+#define CPU_THERMAL_THRESHOLD 90000
+static struct delayed_work thermal_work;
+
+static void do_thermal_timer(struct work_struct *work)
+{
+ int value = loongson3_cpu_temp(0);
+ if (value <= CPU_THERMAL_THRESHOLD)
+ schedule_delayed_work(&thermal_work, msecs_to_jiffies(5000));
+ else
+ orderly_poweroff(true);
+}
+
+static int __init loongson_hwmon_init(void)
+{
+ int ret;
+
+ pr_info("Loongson Hwmon Enter...\n");
+
+ cpu_hwmon_dev = hwmon_device_register(NULL);
+ if (IS_ERR(cpu_hwmon_dev)) {
+ ret = -ENOMEM;
+ pr_err("hwmon_device_register fail!\n");
+ goto fail_hwmon_device_register;
+ }
+
+ ret = sysfs_create_group(&cpu_hwmon_dev->kobj,
+ &cpu_hwmon_attribute_group);
+ if (ret) {
+ pr_err("fail to create loongson hwmon!\n");
+ goto fail_sysfs_create_group_hwmon;
+ }
+
+ ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
+ if (ret) {
+ pr_err("fail to create cpu temprature interface!\n");
+ goto fail_create_sysfs_cputemp_files;
+ }
+
+ INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer);
+ schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000));
+
+ return ret;
+
+fail_create_sysfs_cputemp_files:
+ sysfs_remove_group(&cpu_hwmon_dev->kobj,
+ &cpu_hwmon_attribute_group);
+
+fail_sysfs_create_group_hwmon:
+ hwmon_device_unregister(cpu_hwmon_dev);
+
+fail_hwmon_device_register:
+ return ret;
+}
+
+static void __exit loongson_hwmon_exit(void)
+{
+ cancel_delayed_work_sync(&thermal_work);
+ remove_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
+ sysfs_remove_group(&cpu_hwmon_dev->kobj,
+ &cpu_hwmon_attribute_group);
+ hwmon_device_unregister(cpu_hwmon_dev);
+}
+
+module_init(loongson_hwmon_init);
+module_exit(loongson_hwmon_exit);
+
+MODULE_AUTHOR("Yu Xiang <xiangy@lemote.com>");
+MODULE_AUTHOR("Huacai Chen <chenhc@lemote.com>");
+MODULE_DESCRIPTION("Loongson CPU Hwmon driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 97527614141b..f9f205cb1f11 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -614,6 +614,7 @@ config ACPI_TOSHIBA
depends on INPUT
depends on RFKILL || RFKILL = n
depends on SERIO_I8042 || SERIO_I8042 = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_POLLDEV
select INPUT_SPARSEKMAP
---help---
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index b9429fbf1cd8..6808715003f6 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vga_switcheroo.h>
+#include <linux/vgaarb.h>
#include <acpi/video.h>
#include <asm/io.h>
@@ -31,6 +32,7 @@ struct apple_gmux_data {
bool indexed;
struct mutex index_lock;
+ struct pci_dev *pdev;
struct backlight_device *bdev;
/* switcheroo data */
@@ -415,6 +417,23 @@ static int gmux_resume(struct device *dev)
return 0;
}
+static struct pci_dev *gmux_get_io_pdev(void)
+{
+ struct pci_dev *pdev = NULL;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) {
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_IO))
+ continue;
+
+ return pdev;
+ }
+
+ return NULL;
+}
+
static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
{
struct apple_gmux_data *gmux_data;
@@ -425,6 +444,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
int ret = -ENXIO;
acpi_status status;
unsigned long long gpe;
+ struct pci_dev *pdev = NULL;
if (apple_gmux_data)
return -EBUSY;
@@ -475,7 +495,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
ver_minor = (version >> 16) & 0xff;
ver_release = (version >> 8) & 0xff;
} else {
- pr_info("gmux device not present\n");
+ pr_info("gmux device not present or IO disabled\n");
ret = -ENODEV;
goto err_release;
}
@@ -483,6 +503,23 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
ver_release, (gmux_data->indexed ? "indexed" : "classic"));
+ /*
+ * Apple systems with gmux are EFI based and normally don't use
+ * VGA. In addition changing IO+MEM ownership between IGP and dGPU
+ * disables IO/MEM used for backlight control on some systems.
+ * Lock IO+MEM to GPU with active IO to prevent switch.
+ */
+ pdev = gmux_get_io_pdev();
+ if (pdev && vga_tryget(pdev,
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM)) {
+ pr_err("IO+MEM vgaarb-locking for PCI:%s failed\n",
+ pci_name(pdev));
+ ret = -EBUSY;
+ goto err_release;
+ } else if (pdev)
+ pr_info("locked IO for PCI:%s\n", pci_name(pdev));
+ gmux_data->pdev = pdev;
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
@@ -574,6 +611,10 @@ err_enable_gpe:
err_notify:
backlight_device_unregister(bdev);
err_release:
+ if (gmux_data->pdev)
+ vga_put(gmux_data->pdev,
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
+ pci_dev_put(pdev);
release_region(gmux_data->iostart, gmux_data->iolen);
err_free:
kfree(gmux_data);
@@ -593,6 +634,11 @@ static void gmux_remove(struct pnp_dev *pnp)
&gmux_notify_handler);
}
+ if (gmux_data->pdev) {
+ vga_put(gmux_data->pdev,
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
+ pci_dev_put(gmux_data->pdev);
+ }
backlight_device_unregister(gmux_data->bdev);
release_region(gmux_data->iostart, gmux_data->iolen);
@@ -624,19 +670,7 @@ static struct pnp_driver gmux_pnp_driver = {
},
};
-static int __init apple_gmux_init(void)
-{
- return pnp_register_driver(&gmux_pnp_driver);
-}
-
-static void __exit apple_gmux_exit(void)
-{
- pnp_unregister_driver(&gmux_pnp_driver);
-}
-
-module_init(apple_gmux_init);
-module_exit(apple_gmux_exit);
-
+module_pnp_driver(gmux_pnp_driver);
MODULE_AUTHOR("Seth Forshee <seth.forshee@canonical.com>");
MODULE_DESCRIPTION("Apple Gmux Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 3d21efe11d7b..d688d806a8a5 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2,9 +2,11 @@
* Driver for Dell laptop extras
*
* Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
*
- * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
- * Inc.
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -32,6 +34,13 @@
#include "../../firmware/dcdbas.h"
#define BRIGHTNESS_TOKEN 0x7d
+#define KBD_LED_OFF_TOKEN 0x01E1
+#define KBD_LED_ON_TOKEN 0x01E2
+#define KBD_LED_AUTO_TOKEN 0x01E3
+#define KBD_LED_AUTO_25_TOKEN 0x02EA
+#define KBD_LED_AUTO_50_TOKEN 0x02EB
+#define KBD_LED_AUTO_75_TOKEN 0x02EC
+#define KBD_LED_AUTO_100_TOKEN 0x02F6
/* This structure will be modified by the firmware when we enter
* system management mode, hence the volatiles */
@@ -62,6 +71,13 @@ struct calling_interface_structure {
struct quirk_entry {
u8 touchpad_led;
+
+ int needs_kbd_timeouts;
+ /*
+ * Ordered list of timeouts expressed in seconds.
+ * The list must end with -1
+ */
+ int kbd_timeouts[];
};
static struct quirk_entry *quirks;
@@ -76,6 +92,15 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
return 1;
}
+/*
+ * These values come from Windows utility provided by Dell. If any other value
+ * is used then BIOS silently set timeout to 0 without any error message.
+ */
+static struct quirk_entry quirk_dell_xps13_9333 = {
+ .needs_kbd_timeouts = 1,
+ .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
+};
+
static int da_command_address;
static int da_command_code;
static int da_num_tokens;
@@ -267,6 +292,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_vostro_v130,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell XPS13 9333",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+ },
+ .driver_data = &quirk_dell_xps13_9333,
+ },
{ }
};
@@ -331,17 +365,29 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int find_token_location(int tokenid)
+static int find_token_id(int tokenid)
{
int i;
+
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
- return da_tokens[i].location;
+ return i;
}
return -1;
}
+static int find_token_location(int tokenid)
+{
+ int id;
+
+ id = find_token_id(tokenid);
+ if (id == -1)
+ return -1;
+
+ return da_tokens[id].location;
+}
+
static struct calling_interface_buffer *
dell_send_request(struct calling_interface_buffer *buffer, int class,
int select)
@@ -362,6 +408,20 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
+static inline int dell_smi_error(int value)
+{
+ switch (value) {
+ case 0: /* Completed successfully */
+ return 0;
+ case -1: /* Completed with error */
+ return -EIO;
+ case -2: /* Function not supported */
+ return -ENXIO;
+ default: /* Unknown error */
+ return -EINVAL;
+ }
+}
+
/* Derived from information in DellWirelessCtl.cpp:
Class 17, select 11 is radio control. It returns an array of 32-bit values.
@@ -716,7 +776,7 @@ static int dell_send_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 1, 1);
-out:
+ out:
release_buffer();
return ret;
}
@@ -740,7 +800,7 @@ static int dell_get_intensity(struct backlight_device *bd)
ret = buffer->output[1];
-out:
+ out:
release_buffer();
return ret;
}
@@ -789,6 +849,1018 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
+/*
+ * Derived from information in smbios-keyboard-ctl:
+ *
+ * cbClass 4
+ * cbSelect 11
+ * Keyboard illumination
+ * cbArg1 determines the function to be performed
+ *
+ * cbArg1 0x0 = Get Feature Information
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of user-selectable modes
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * cbRES2, byte2 Reserved for future use
+ * cbRES2, byte3 Keyboard illumination type
+ * 0 Reserved
+ * 1 Tasklight
+ * 2 Backlight
+ * 3-255 Reserved for future use
+ * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte1 Supported timeout unit bitmap
+ * bit 0 Seconds
+ * bit 1 Minutes
+ * bit 2 Hours
+ * bit 3 Days
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte2 Number of keyboard light brightness levels
+ * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
+ * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
+ * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
+ * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
+ *
+ * cbArg1 0x1 = Get Current State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbRES2, byte2 Currently active auto keyboard illumination triggers.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES2, byte3 Current Timeout
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
+ * are set upon return from the [Get feature information] call.
+ * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
+ * cbRES3, byte1 Current ALS reading
+ * cbRES3, byte2 Current keyboard light level.
+ *
+ * cbArg1 0x2 = Set New State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbArg2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
+ * keyboard to turn off automatically.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbArg2, byte3 Desired Timeout
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
+ * cbArg3, byte2 Desired keyboard light level.
+ */
+
+
+enum kbd_timeout_unit {
+ KBD_TIMEOUT_SECONDS = 0,
+ KBD_TIMEOUT_MINUTES,
+ KBD_TIMEOUT_HOURS,
+ KBD_TIMEOUT_DAYS,
+};
+
+enum kbd_mode_bit {
+ KBD_MODE_BIT_OFF = 0,
+ KBD_MODE_BIT_ON,
+ KBD_MODE_BIT_ALS,
+ KBD_MODE_BIT_TRIGGER_ALS,
+ KBD_MODE_BIT_TRIGGER,
+ KBD_MODE_BIT_TRIGGER_25,
+ KBD_MODE_BIT_TRIGGER_50,
+ KBD_MODE_BIT_TRIGGER_75,
+ KBD_MODE_BIT_TRIGGER_100,
+};
+
+#define kbd_is_als_mode_bit(bit) \
+ ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
+#define kbd_is_trigger_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+#define kbd_is_level_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+
+struct kbd_info {
+ u16 modes;
+ u8 type;
+ u8 triggers;
+ u8 levels;
+ u8 seconds;
+ u8 minutes;
+ u8 hours;
+ u8 days;
+};
+
+struct kbd_state {
+ u8 mode_bit;
+ u8 triggers;
+ u8 timeout_value;
+ u8 timeout_unit;
+ u8 als_setting;
+ u8 als_value;
+ u8 level;
+};
+
+static const int kbd_tokens[] = {
+ KBD_LED_OFF_TOKEN,
+ KBD_LED_AUTO_25_TOKEN,
+ KBD_LED_AUTO_50_TOKEN,
+ KBD_LED_AUTO_75_TOKEN,
+ KBD_LED_AUTO_100_TOKEN,
+ KBD_LED_ON_TOKEN,
+};
+
+static u16 kbd_token_bits;
+
+static struct kbd_info kbd_info;
+static bool kbd_als_supported;
+static bool kbd_triggers_supported;
+
+static u8 kbd_mode_levels[16];
+static int kbd_mode_levels_count;
+
+static u8 kbd_previous_level;
+static u8 kbd_previous_mode_bit;
+
+static bool kbd_led_present;
+
+/*
+ * NOTE: there are three ways to set the keyboard backlight level.
+ * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
+ * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
+ * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
+ *
+ * There are laptops which support only one of these methods. If we want to
+ * support as many machines as possible we need to implement all three methods.
+ * The first two methods use the kbd_state structure. The third uses SMBIOS
+ * tokens. If kbd_info.levels == 0, the machine does not support setting the
+ * keyboard backlight level via kbd_state.level.
+ */
+
+static int kbd_get_info(struct kbd_info *info)
+{
+ u8 units;
+ int ret;
+
+ get_buffer();
+
+ buffer->input[0] = 0x0;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+
+ if (ret) {
+ ret = dell_smi_error(ret);
+ goto out;
+ }
+
+ info->modes = buffer->output[1] & 0xFFFF;
+ info->type = (buffer->output[1] >> 24) & 0xFF;
+ info->triggers = buffer->output[2] & 0xFF;
+ units = (buffer->output[2] >> 8) & 0xFF;
+ info->levels = (buffer->output[2] >> 16) & 0xFF;
+
+ if (units & BIT(0))
+ info->seconds = (buffer->output[3] >> 0) & 0xFF;
+ if (units & BIT(1))
+ info->minutes = (buffer->output[3] >> 8) & 0xFF;
+ if (units & BIT(2))
+ info->hours = (buffer->output[3] >> 16) & 0xFF;
+ if (units & BIT(3))
+ info->days = (buffer->output[3] >> 24) & 0xFF;
+
+ out:
+ release_buffer();
+ return ret;
+}
+
+static unsigned int kbd_get_max_level(void)
+{
+ if (kbd_info.levels != 0)
+ return kbd_info.levels;
+ if (kbd_mode_levels_count > 0)
+ return kbd_mode_levels_count - 1;
+ return 0;
+}
+
+static int kbd_get_level(struct kbd_state *state)
+{
+ int i;
+
+ if (kbd_info.levels != 0)
+ return state->level;
+
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < kbd_mode_levels_count; ++i)
+ if (kbd_mode_levels[i] == state->mode_bit)
+ return i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_set_level(struct kbd_state *state, u8 level)
+{
+ if (kbd_info.levels != 0) {
+ if (level != 0)
+ kbd_previous_level = level;
+ if (state->level == level)
+ return 0;
+ state->level = level;
+ if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
+ state->mode_bit = kbd_previous_mode_bit;
+ else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit = state->mode_bit;
+ state->mode_bit = KBD_MODE_BIT_OFF;
+ }
+ return 0;
+ }
+
+ if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
+ if (level != 0)
+ kbd_previous_level = level;
+ state->mode_bit = kbd_mode_levels[level];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_get_state(struct kbd_state *state)
+{
+ int ret;
+
+ get_buffer();
+
+ buffer->input[0] = 0x1;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+
+ if (ret) {
+ ret = dell_smi_error(ret);
+ goto out;
+ }
+
+ state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
+ if (state->mode_bit != 0)
+ state->mode_bit--;
+
+ state->triggers = (buffer->output[1] >> 16) & 0xFF;
+ state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
+ state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
+ state->als_setting = buffer->output[2] & 0xFF;
+ state->als_value = (buffer->output[2] >> 8) & 0xFF;
+ state->level = (buffer->output[2] >> 16) & 0xFF;
+
+ out:
+ release_buffer();
+ return ret;
+}
+
+static int kbd_set_state(struct kbd_state *state)
+{
+ int ret;
+
+ get_buffer();
+ buffer->input[0] = 0x2;
+ buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
+ buffer->input[1] |= (state->triggers & 0xFF) << 16;
+ buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
+ buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
+ buffer->input[2] = state->als_setting & 0xFF;
+ buffer->input[2] |= (state->level & 0xFF) << 16;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+ release_buffer();
+
+ return dell_smi_error(ret);
+}
+
+static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
+{
+ int ret;
+
+ ret = kbd_set_state(state);
+ if (ret == 0)
+ return 0;
+
+ /*
+ * When setting the new state fails,try to restore the previous one.
+ * This is needed on some machines where BIOS sets a default state when
+ * setting a new state fails. This default state could be all off.
+ */
+
+ if (kbd_set_state(old))
+ pr_err("Setting old previous keyboard state failed\n");
+
+ return ret;
+}
+
+static int kbd_set_token_bit(u8 bit)
+{
+ int id;
+ int ret;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ id = find_token_id(kbd_tokens[bit]);
+ if (id == -1)
+ return -EINVAL;
+
+ get_buffer();
+ buffer->input[0] = da_tokens[id].location;
+ buffer->input[1] = da_tokens[id].value;
+ dell_send_request(buffer, 1, 0);
+ ret = buffer->output[0];
+ release_buffer();
+
+ return dell_smi_error(ret);
+}
+
+static int kbd_get_token_bit(u8 bit)
+{
+ int id;
+ int ret;
+ int val;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ id = find_token_id(kbd_tokens[bit]);
+ if (id == -1)
+ return -EINVAL;
+
+ get_buffer();
+ buffer->input[0] = da_tokens[id].location;
+ dell_send_request(buffer, 0, 0);
+ ret = buffer->output[0];
+ val = buffer->output[1];
+ release_buffer();
+
+ if (ret)
+ return dell_smi_error(ret);
+
+ return (val == da_tokens[id].value);
+}
+
+static int kbd_get_first_active_token_bit(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
+ ret = kbd_get_token_bit(i);
+ if (ret == 1)
+ return i;
+ }
+
+ return ret;
+}
+
+static int kbd_get_valid_token_counts(void)
+{
+ return hweight16(kbd_token_bits);
+}
+
+static inline int kbd_init_info(void)
+{
+ struct kbd_state state;
+ int ret;
+ int i;
+
+ ret = kbd_get_info(&kbd_info);
+ if (ret)
+ return ret;
+
+ kbd_get_state(&state);
+
+ /* NOTE: timeout value is stored in 6 bits so max value is 63 */
+ if (kbd_info.seconds > 63)
+ kbd_info.seconds = 63;
+ if (kbd_info.minutes > 63)
+ kbd_info.minutes = 63;
+ if (kbd_info.hours > 63)
+ kbd_info.hours = 63;
+ if (kbd_info.days > 63)
+ kbd_info.days = 63;
+
+ /* NOTE: On tested machines ON mode did not work and caused
+ * problems (turned backlight off) so do not use it
+ */
+ kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
+
+ kbd_previous_level = kbd_get_level(&state);
+ kbd_previous_mode_bit = state.mode_bit;
+
+ if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
+ kbd_previous_level = 1;
+
+ if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit =
+ ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
+ if (kbd_previous_mode_bit != 0)
+ kbd_previous_mode_bit--;
+ }
+
+ if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
+ BIT(KBD_MODE_BIT_TRIGGER_ALS)))
+ kbd_als_supported = true;
+
+ if (kbd_info.modes & (
+ BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
+ BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
+ BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
+ ))
+ kbd_triggers_supported = true;
+
+ /* kbd_mode_levels[0] is reserved, see below */
+ for (i = 0; i < 16; ++i)
+ if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
+ kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
+
+ /*
+ * Find the first supported mode and assign to kbd_mode_levels[0].
+ * This should be 0 (off), but we cannot depend on the BIOS to
+ * support 0.
+ */
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < 16; ++i) {
+ if (BIT(i) & kbd_info.modes) {
+ kbd_mode_levels[0] = i;
+ break;
+ }
+ }
+ kbd_mode_levels_count++;
+ }
+
+ return 0;
+
+}
+
+static inline void kbd_init_tokens(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
+ if (find_token_id(kbd_tokens[i]) != -1)
+ kbd_token_bits |= BIT(i);
+}
+
+static void kbd_init(void)
+{
+ int ret;
+
+ ret = kbd_init_info();
+ kbd_init_tokens();
+
+ if (kbd_token_bits != 0 || ret == 0)
+ kbd_led_present = true;
+}
+
+static ssize_t kbd_led_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool convert;
+ int value;
+ int ret;
+ char ch;
+ u8 unit;
+ int i;
+
+ ret = sscanf(buf, "%d %c", &value, &ch);
+ if (ret < 1)
+ return -EINVAL;
+ else if (ret == 1)
+ ch = 's';
+
+ if (value < 0)
+ return -EINVAL;
+
+ convert = false;
+
+ switch (ch) {
+ case 's':
+ if (value > kbd_info.seconds)
+ convert = true;
+ unit = KBD_TIMEOUT_SECONDS;
+ break;
+ case 'm':
+ if (value > kbd_info.minutes)
+ convert = true;
+ unit = KBD_TIMEOUT_MINUTES;
+ break;
+ case 'h':
+ if (value > kbd_info.hours)
+ convert = true;
+ unit = KBD_TIMEOUT_HOURS;
+ break;
+ case 'd':
+ if (value > kbd_info.days)
+ convert = true;
+ unit = KBD_TIMEOUT_DAYS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts)
+ convert = true;
+
+ if (convert) {
+ /* Convert value from current units to seconds */
+ switch (unit) {
+ case KBD_TIMEOUT_DAYS:
+ value *= 24;
+ case KBD_TIMEOUT_HOURS:
+ value *= 60;
+ case KBD_TIMEOUT_MINUTES:
+ value *= 60;
+ unit = KBD_TIMEOUT_SECONDS;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts) {
+ for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
+ if (value <= quirks->kbd_timeouts[i]) {
+ value = quirks->kbd_timeouts[i];
+ break;
+ }
+ }
+ }
+
+ if (value <= kbd_info.seconds && kbd_info.seconds) {
+ unit = KBD_TIMEOUT_SECONDS;
+ } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
+ value /= 60;
+ unit = KBD_TIMEOUT_MINUTES;
+ } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
+ value /= (60 * 60);
+ unit = KBD_TIMEOUT_HOURS;
+ } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
+ value /= (60 * 60 * 24);
+ unit = KBD_TIMEOUT_DAYS;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ new_state = state;
+ new_state.timeout_value = value;
+ new_state.timeout_unit = unit;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t kbd_led_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ int ret;
+ int len;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ len = sprintf(buf, "%d", state.timeout_value);
+
+ switch (state.timeout_unit) {
+ case KBD_TIMEOUT_SECONDS:
+ return len + sprintf(buf+len, "s\n");
+ case KBD_TIMEOUT_MINUTES:
+ return len + sprintf(buf+len, "m\n");
+ case KBD_TIMEOUT_HOURS:
+ return len + sprintf(buf+len, "h\n");
+ case KBD_TIMEOUT_DAYS:
+ return len + sprintf(buf+len, "d\n");
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
+ kbd_led_timeout_show, kbd_led_timeout_store);
+
+static const char * const kbd_led_triggers[] = {
+ "keyboard",
+ "touchpad",
+ /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
+ "mouse",
+};
+
+static ssize_t kbd_led_triggers_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool triggers_enabled = false;
+ int trigger_bit = -1;
+ char trigger[21];
+ int i, ret;
+
+ ret = sscanf(buf, "%20s", trigger);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (trigger[0] != '+' && trigger[0] != '-')
+ return -EINVAL;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ if (kbd_triggers_supported)
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+
+ if (kbd_triggers_supported) {
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
+ continue;
+ if (trigger[0] == '+' &&
+ triggers_enabled && (state.triggers & BIT(i)))
+ return count;
+ if (trigger[0] == '-' &&
+ (!triggers_enabled || !(state.triggers & BIT(i))))
+ return count;
+ trigger_bit = i;
+ break;
+ }
+ }
+
+ if (trigger_bit != -1) {
+ new_state = state;
+ if (trigger[0] == '+')
+ new_state.triggers |= BIT(trigger_bit);
+ else {
+ new_state.triggers &= ~BIT(trigger_bit);
+ /* NOTE: trackstick bit (2) must be disabled when
+ * disabling touchpad bit (1), otherwise touchpad
+ * bit (1) will not be disabled */
+ if (trigger_bit == 1)
+ new_state.triggers &= ~BIT(2);
+ }
+ if ((kbd_info.triggers & new_state.triggers) !=
+ new_state.triggers)
+ return -EINVAL;
+ if (new_state.triggers && !triggers_enabled) {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ } else if (new_state.triggers == 0) {
+ kbd_set_level(&new_state, 0);
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit)))
+ return -EINVAL;
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+ if (new_state.mode_bit != KBD_MODE_BIT_OFF)
+ kbd_previous_mode_bit = new_state.mode_bit;
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t kbd_led_triggers_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ bool triggers_enabled;
+ int level, i, ret;
+ int len = 0;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ len = 0;
+
+ if (kbd_triggers_supported) {
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+ level = kbd_get_level(&state);
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if ((triggers_enabled || level <= 0) &&
+ (state.triggers & BIT(i)))
+ buf[len++] = '+';
+ else
+ buf[len++] = '-';
+ len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
+ }
+ }
+
+ if (len)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
+ kbd_led_triggers_show, kbd_led_triggers_store);
+
+static ssize_t kbd_led_als_enabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool triggers_enabled = false;
+ int enable;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &enable);
+ if (ret)
+ return ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ if (enable == kbd_is_als_mode_bit(state.mode_bit))
+ return count;
+
+ new_state = state;
+
+ if (kbd_triggers_supported)
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+
+ if (enable) {
+ if (triggers_enabled)
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
+ else
+ new_state.mode_bit = KBD_MODE_BIT_ALS;
+ } else {
+ if (triggers_enabled) {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ } else {
+ new_state.mode_bit = KBD_MODE_BIT_ON;
+ }
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit)))
+ return -EINVAL;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+ kbd_previous_mode_bit = new_state.mode_bit;
+
+ return count;
+}
+
+static ssize_t kbd_led_als_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kbd_state state;
+ bool enabled = false;
+ int ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+ enabled = kbd_is_als_mode_bit(state.mode_bit);
+
+ return sprintf(buf, "%d\n", enabled ? 1 : 0);
+}
+
+static DEVICE_ATTR(als_enabled, S_IRUGO | S_IWUSR,
+ kbd_led_als_enabled_show, kbd_led_als_enabled_store);
+
+static ssize_t kbd_led_als_setting_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u8 setting;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &setting);
+ if (ret)
+ return ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ new_state = state;
+ new_state.als_setting = setting;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t kbd_led_als_setting_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kbd_state state;
+ int ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", state.als_setting);
+}
+
+static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
+ kbd_led_als_setting_show, kbd_led_als_setting_store);
+
+static struct attribute *kbd_led_attrs[] = {
+ &dev_attr_stop_timeout.attr,
+ &dev_attr_start_triggers.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_led_group = {
+ .attrs = kbd_led_attrs,
+};
+
+static struct attribute *kbd_led_als_attrs[] = {
+ &dev_attr_als_enabled.attr,
+ &dev_attr_als_setting.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_led_als_group = {
+ .attrs = kbd_led_als_attrs,
+};
+
+static const struct attribute_group *kbd_led_groups[] = {
+ &kbd_led_group,
+ &kbd_led_als_group,
+ NULL,
+};
+
+static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
+{
+ int ret;
+ u16 num;
+ struct kbd_state state;
+
+ if (kbd_get_max_level()) {
+ ret = kbd_get_state(&state);
+ if (ret)
+ return 0;
+ ret = kbd_get_level(&state);
+ if (ret < 0)
+ return 0;
+ return ret;
+ }
+
+ if (kbd_get_valid_token_counts()) {
+ ret = kbd_get_first_active_token_bit();
+ if (ret < 0)
+ return 0;
+ for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ return 0;
+ return ffs(num) - 1;
+ }
+
+ pr_warn("Keyboard brightness level control not supported\n");
+ return 0;
+}
+
+static void kbd_led_level_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u16 num;
+
+ if (kbd_get_max_level()) {
+ if (kbd_get_state(&state))
+ return;
+ new_state = state;
+ if (kbd_set_level(&new_state, value))
+ return;
+ kbd_set_state_safe(&new_state, &state);
+ return;
+ }
+
+ if (kbd_get_valid_token_counts()) {
+ for (num = kbd_token_bits; num != 0 && value > 0; --value)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ return;
+ kbd_set_token_bit(ffs(num) - 1);
+ return;
+ }
+
+ pr_warn("Keyboard brightness level control not supported\n");
+}
+
+static struct led_classdev kbd_led = {
+ .name = "dell::kbd_backlight",
+ .brightness_set = kbd_led_level_set,
+ .brightness_get = kbd_led_level_get,
+ .groups = kbd_led_groups,
+};
+
+static int __init kbd_led_init(struct device *dev)
+{
+ kbd_init();
+ if (!kbd_led_present)
+ return -ENODEV;
+ if (!kbd_als_supported)
+ kbd_led_groups[1] = NULL;
+ kbd_led.max_brightness = kbd_get_max_level();
+ if (!kbd_led.max_brightness) {
+ kbd_led.max_brightness = kbd_get_valid_token_counts();
+ if (kbd_led.max_brightness)
+ kbd_led.max_brightness--;
+ }
+ return led_classdev_register(dev, &kbd_led);
+}
+
+static void brightness_set_exit(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ /* Don't change backlight level on exit */
+};
+
+static void kbd_led_exit(void)
+{
+ if (!kbd_led_present)
+ return;
+ kbd_led.brightness_set = brightness_set_exit;
+ led_classdev_unregister(&kbd_led);
+}
+
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -841,6 +1913,8 @@ static int __init dell_init(void)
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
+ kbd_led_init(&platform_device->dev);
+
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
if (dell_laptop_dir != NULL)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -908,6 +1982,7 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
+ kbd_led_exit();
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
@@ -924,5 +1999,7 @@ module_init(dell_init);
module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index 53bdbb01bd3f..baea077a02cc 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -59,7 +59,7 @@ static unsigned short keymap_Lifebook_Tseries[KEYMAP_LEN] __initdata = {
KEY_RESERVED,
KEY_SCROLLDOWN,
KEY_SCROLLUP,
- KEY_DIRECTION,
+ KEY_ROTATE_DISPLAY,
KEY_LEFTCTRL,
KEY_BRIGHTNESSUP,
KEY_BRIGHTNESSDOWN,
@@ -116,7 +116,7 @@ static unsigned short keymap_Lifebook_U810[KEYMAP_LEN] __initdata = {
KEY_RESERVED,
KEY_PROG1,
KEY_PROG2,
- KEY_DIRECTION,
+ KEY_ROTATE_DISPLAY,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
@@ -153,7 +153,7 @@ static unsigned short keymap_Stylistic_ST5xxx[KEYMAP_LEN] __initdata = {
KEY_RESERVED,
KEY_RESERVED,
KEY_MAIL,
- KEY_DIRECTION,
+ KEY_ROTATE_DISPLAY,
KEY_ESC,
KEY_ENTER,
KEY_BRIGHTNESSUP,
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 0ab2b377a778..06697315a088 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -144,7 +144,7 @@ static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x20e8, { KEY_MEDIA } },
{ KE_KEY, 0x2142, { KEY_MEDIA } },
{ KE_KEY, 0x213b, { KEY_INFO } },
- { KE_KEY, 0x2169, { KEY_DIRECTION } },
+ { KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } },
{ KE_KEY, 0x216a, { KEY_SETUP } },
{ KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index b3d419a84723..b496db87bc05 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -830,6 +830,13 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
*/
static const struct dmi_system_id no_hw_rfkill_list[] = {
{
+ .ident = "Lenovo G40-30",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"),
+ },
+ },
+ {
.ident = "Lenovo Yoga 2 11 / 13 / Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index a4a4258f6134..8037c8b46241 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -62,7 +62,7 @@
* (1 << 1): Bluetooth enable/disable, RW.
* (1 << 2): GPS enable/disable, RW.
* (1 << 3): WiFi enable/disable, RW.
- * (1 << 4): WWAN (3G) enable/disalbe, RW.
+ * (1 << 4): WWAN (3G) enable/disable, RW.
* (1 << 5): Touchscreen enable/disable, Read Only.
*/
#define OT_EC_DEVICE_STATE_ADDRESS 0xD6
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 3b8ceee7c5cb..9bb9ad6d4a1b 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -319,6 +319,7 @@ static struct {
u32 sensors_pdrv_attrs_registered:1;
u32 sensors_pdev_attrs_registered:1;
u32 hotkey_poll_active:1;
+ u32 has_adaptive_kbd:1;
} tp_features;
static struct {
@@ -1911,6 +1912,27 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_UNK7,
TP_ACPI_HOTKEYSCAN_UNK8,
+ TP_ACPI_HOTKEYSCAN_MUTE2,
+ TP_ACPI_HOTKEYSCAN_BRIGHTNESS_ZERO,
+ TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL,
+ TP_ACPI_HOTKEYSCAN_CLOUD,
+ TP_ACPI_HOTKEYSCAN_UNK9,
+ TP_ACPI_HOTKEYSCAN_VOICE,
+ TP_ACPI_HOTKEYSCAN_UNK10,
+ TP_ACPI_HOTKEYSCAN_GESTURES,
+ TP_ACPI_HOTKEYSCAN_UNK11,
+ TP_ACPI_HOTKEYSCAN_UNK12,
+ TP_ACPI_HOTKEYSCAN_UNK13,
+ TP_ACPI_HOTKEYSCAN_CONFIG,
+ TP_ACPI_HOTKEYSCAN_NEW_TAB,
+ TP_ACPI_HOTKEYSCAN_RELOAD,
+ TP_ACPI_HOTKEYSCAN_BACK,
+ TP_ACPI_HOTKEYSCAN_MIC_DOWN,
+ TP_ACPI_HOTKEYSCAN_MIC_UP,
+ TP_ACPI_HOTKEYSCAN_MIC_CANCELLATION,
+ TP_ACPI_HOTKEYSCAN_CAMERA_MODE,
+ TP_ACPI_HOTKEYSCAN_ROTATE_DISPLAY,
+
/* Hotkey keymap size */
TPACPI_HOTKEY_MAP_LEN
};
@@ -2093,7 +2115,7 @@ static int hotkey_mask_get(void)
return 0;
}
-void static hotkey_mask_warn_incomplete_mask(void)
+static void hotkey_mask_warn_incomplete_mask(void)
{
/* log only what the user can fix... */
const u32 wantedmask = hotkey_driver_mask &
@@ -2647,9 +2669,7 @@ static ssize_t hotkey_enable_store(struct device *dev,
return count;
}
-static struct device_attribute dev_attr_hotkey_enable =
- __ATTR(hotkey_enable, S_IWUSR | S_IRUGO,
- hotkey_enable_show, hotkey_enable_store);
+static DEVICE_ATTR_RW(hotkey_enable);
/* sysfs hotkey mask --------------------------------------------------- */
static ssize_t hotkey_mask_show(struct device *dev,
@@ -2685,9 +2705,7 @@ static ssize_t hotkey_mask_store(struct device *dev,
return (res) ? res : count;
}
-static struct device_attribute dev_attr_hotkey_mask =
- __ATTR(hotkey_mask, S_IWUSR | S_IRUGO,
- hotkey_mask_show, hotkey_mask_store);
+static DEVICE_ATTR_RW(hotkey_mask);
/* sysfs hotkey bios_enabled ------------------------------------------- */
static ssize_t hotkey_bios_enabled_show(struct device *dev,
@@ -2697,8 +2715,7 @@ static ssize_t hotkey_bios_enabled_show(struct device *dev,
return sprintf(buf, "0\n");
}
-static struct device_attribute dev_attr_hotkey_bios_enabled =
- __ATTR(hotkey_bios_enabled, S_IRUGO, hotkey_bios_enabled_show, NULL);
+static DEVICE_ATTR_RO(hotkey_bios_enabled);
/* sysfs hotkey bios_mask ---------------------------------------------- */
static ssize_t hotkey_bios_mask_show(struct device *dev,
@@ -2710,8 +2727,7 @@ static ssize_t hotkey_bios_mask_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_orig_mask);
}
-static struct device_attribute dev_attr_hotkey_bios_mask =
- __ATTR(hotkey_bios_mask, S_IRUGO, hotkey_bios_mask_show, NULL);
+static DEVICE_ATTR_RO(hotkey_bios_mask);
/* sysfs hotkey all_mask ----------------------------------------------- */
static ssize_t hotkey_all_mask_show(struct device *dev,
@@ -2722,8 +2738,7 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
hotkey_all_mask | hotkey_source_mask);
}
-static struct device_attribute dev_attr_hotkey_all_mask =
- __ATTR(hotkey_all_mask, S_IRUGO, hotkey_all_mask_show, NULL);
+static DEVICE_ATTR_RO(hotkey_all_mask);
/* sysfs hotkey recommended_mask --------------------------------------- */
static ssize_t hotkey_recommended_mask_show(struct device *dev,
@@ -2735,9 +2750,7 @@ static ssize_t hotkey_recommended_mask_show(struct device *dev,
& ~hotkey_reserved_mask);
}
-static struct device_attribute dev_attr_hotkey_recommended_mask =
- __ATTR(hotkey_recommended_mask, S_IRUGO,
- hotkey_recommended_mask_show, NULL);
+static DEVICE_ATTR_RO(hotkey_recommended_mask);
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
@@ -2792,9 +2805,7 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
return (rc < 0) ? rc : count;
}
-static struct device_attribute dev_attr_hotkey_source_mask =
- __ATTR(hotkey_source_mask, S_IWUSR | S_IRUGO,
- hotkey_source_mask_show, hotkey_source_mask_store);
+static DEVICE_ATTR_RW(hotkey_source_mask);
/* sysfs hotkey hotkey_poll_freq --------------------------------------- */
static ssize_t hotkey_poll_freq_show(struct device *dev,
@@ -2826,9 +2837,7 @@ static ssize_t hotkey_poll_freq_store(struct device *dev,
return count;
}
-static struct device_attribute dev_attr_hotkey_poll_freq =
- __ATTR(hotkey_poll_freq, S_IWUSR | S_IRUGO,
- hotkey_poll_freq_show, hotkey_poll_freq_store);
+static DEVICE_ATTR_RW(hotkey_poll_freq);
#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
@@ -2849,8 +2858,7 @@ static ssize_t hotkey_radio_sw_show(struct device *dev,
(res == TPACPI_RFK_RADIO_OFF) ? 0 : 1);
}
-static struct device_attribute dev_attr_hotkey_radio_sw =
- __ATTR(hotkey_radio_sw, S_IRUGO, hotkey_radio_sw_show, NULL);
+static DEVICE_ATTR_RO(hotkey_radio_sw);
static void hotkey_radio_sw_notify_change(void)
{
@@ -2872,8 +2880,7 @@ static ssize_t hotkey_tablet_mode_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
}
-static struct device_attribute dev_attr_hotkey_tablet_mode =
- __ATTR(hotkey_tablet_mode, S_IRUGO, hotkey_tablet_mode_show, NULL);
+static DEVICE_ATTR_RO(hotkey_tablet_mode);
static void hotkey_tablet_mode_notify_change(void)
{
@@ -2890,8 +2897,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
}
-static struct device_attribute dev_attr_hotkey_wakeup_reason =
- __ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
+static DEVICE_ATTR_RO(hotkey_wakeup_reason);
static void hotkey_wakeup_reason_notify_change(void)
{
@@ -2907,9 +2913,7 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
}
-static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
- __ATTR(wakeup_hotunplug_complete, S_IRUGO,
- hotkey_wakeup_hotunplug_complete_show, NULL);
+static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete);
static void hotkey_wakeup_hotunplug_complete_notify_change(void)
{
@@ -2917,6 +2921,57 @@ static void hotkey_wakeup_hotunplug_complete_notify_change(void)
"wakeup_hotunplug_complete");
}
+/* sysfs adaptive kbd mode --------------------------------------------- */
+
+static int adaptive_keyboard_get_mode(void);
+static int adaptive_keyboard_set_mode(int new_mode);
+
+enum ADAPTIVE_KEY_MODE {
+ HOME_MODE,
+ WEB_BROWSER_MODE,
+ WEB_CONFERENCE_MODE,
+ FUNCTION_MODE,
+ LAYFLAT_MODE
+};
+
+static ssize_t adaptive_kbd_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int current_mode;
+
+ current_mode = adaptive_keyboard_get_mode();
+ if (current_mode < 0)
+ return current_mode;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", current_mode);
+}
+
+static ssize_t adaptive_kbd_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ int res;
+
+ if (parse_strtoul(buf, LAYFLAT_MODE, &t))
+ return -EINVAL;
+
+ res = adaptive_keyboard_set_mode(t);
+ return (res < 0) ? res : count;
+}
+
+static DEVICE_ATTR_RW(adaptive_kbd_mode);
+
+static struct attribute *adaptive_kbd_attributes[] = {
+ &dev_attr_adaptive_kbd_mode.attr,
+ NULL
+};
+
+static const struct attribute_group adaptive_kbd_attr_group = {
+ .attrs = adaptive_kbd_attributes,
+};
+
/* --------------------------------------------------------------------- */
static struct attribute *hotkey_attributes[] __initdata = {
@@ -3118,6 +3173,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+
+ /* No assignments, only used for Adaptive keyboards. */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
},
/* Generic keymap for Lenovo ThinkPads */
@@ -3174,6 +3236,35 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Extra keys in use since the X240 / T440 / T540 */
KEY_CONFIG, KEY_SEARCH, KEY_SCALE, KEY_FILE,
+
+ /*
+ * These are the adaptive keyboard keycodes for Carbon X1 2014.
+ * The first item in this list is the Mute button which is
+ * emitted with 0x103 through
+ * adaptive_keyboard_hotkey_notify_hotkey() when the sound
+ * symbol is held.
+ * We'll need to offset those by 0x20.
+ */
+ KEY_RESERVED, /* Mute held, 0x103 */
+ KEY_BRIGHTNESS_MIN, /* Backlight off */
+ KEY_RESERVED, /* Clipping tool */
+ KEY_RESERVED, /* Cloud */
+ KEY_RESERVED,
+ KEY_VOICECOMMAND, /* Voice */
+ KEY_RESERVED,
+ KEY_RESERVED, /* Gestures */
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_CONFIG, /* Settings */
+ KEY_RESERVED, /* New tab */
+ KEY_REFRESH, /* Reload */
+ KEY_BACK, /* Back */
+ KEY_RESERVED, /* Microphone down */
+ KEY_RESERVED, /* Microphone up */
+ KEY_RESERVED, /* Microphone cancellation */
+ KEY_RESERVED, /* Camera mode */
+ KEY_RESERVED, /* Rotate display, 0x116 */
},
};
@@ -3227,6 +3318,20 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (!tp_features.hotkey)
return 1;
+ /*
+ * Check if we have an adaptive keyboard, like on the
+ * Lenovo Carbon X1 2014 (2nd Gen).
+ */
+ if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+ if ((hkeyv >> 8) == 2) {
+ tp_features.has_adaptive_kbd = true;
+ res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
+ &adaptive_kbd_attr_group);
+ if (res)
+ goto err_exit;
+ }
+ }
+
quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
ARRAY_SIZE(tpacpi_hotkey_qtable));
@@ -3437,6 +3542,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
err_exit:
delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
+ sysfs_remove_group(&tpacpi_pdev->dev.kobj,
+ &adaptive_kbd_attr_group);
+
hotkey_dev_attributes = NULL;
return (res < 0) ? res : 1;
@@ -3449,14 +3557,6 @@ err_exit:
* Will consider support rest of modes in future.
*
*/
-enum ADAPTIVE_KEY_MODE {
- HOME_MODE,
- WEB_BROWSER_MODE,
- WEB_CONFERENCE_MODE,
- FUNCTION_MODE,
- LAYFLAT_MODE
-};
-
static const int adaptive_keyboard_modes[] = {
HOME_MODE,
/* WEB_BROWSER_MODE = 2,
@@ -3466,6 +3566,8 @@ static const int adaptive_keyboard_modes[] = {
#define DFR_CHANGE_ROW 0x101
#define DFR_SHOW_QUICKVIEW_ROW 0x102
+#define FIRST_ADAPTIVE_KEY 0x103
+#define ADAPTIVE_KEY_OFFSET 0x020
/* press Fn key a while second, it will switch to Function Mode. Then
* release Fn key, previous mode be restored.
@@ -3473,6 +3575,32 @@ static const int adaptive_keyboard_modes[] = {
static bool adaptive_keyboard_mode_is_saved;
static int adaptive_keyboard_prev_mode;
+static int adaptive_keyboard_get_mode(void)
+{
+ int mode = 0;
+
+ if (!acpi_evalf(hkey_handle, &mode, "GTRW", "dd", 0)) {
+ pr_err("Cannot read adaptive keyboard mode\n");
+ return -EIO;
+ }
+
+ return mode;
+}
+
+static int adaptive_keyboard_set_mode(int new_mode)
+{
+ if (new_mode < 0 ||
+ new_mode > LAYFLAT_MODE)
+ return -EINVAL;
+
+ if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd", new_mode)) {
+ pr_err("Cannot set adaptive keyboard mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int adaptive_keyboard_get_next_mode(int mode)
{
size_t i;
@@ -3493,8 +3621,9 @@ static int adaptive_keyboard_get_next_mode(int mode)
static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
{
- u32 current_mode = 0;
+ int current_mode = 0;
int new_mode = 0;
+ int keycode;
switch (scancode) {
case DFR_CHANGE_ROW:
@@ -3502,43 +3631,51 @@ static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
new_mode = adaptive_keyboard_prev_mode;
adaptive_keyboard_mode_is_saved = false;
} else {
- if (!acpi_evalf(
- hkey_handle, &current_mode,
- "GTRW", "dd", 0)) {
- pr_err("Cannot read adaptive keyboard mode\n");
+ current_mode = adaptive_keyboard_get_mode();
+ if (current_mode < 0)
return false;
- } else {
- new_mode = adaptive_keyboard_get_next_mode(
- current_mode);
- }
+ new_mode = adaptive_keyboard_get_next_mode(
+ current_mode);
}
- if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd", new_mode)) {
- pr_err("Cannot set adaptive keyboard mode\n");
+ if (adaptive_keyboard_set_mode(new_mode) < 0)
return false;
- }
return true;
case DFR_SHOW_QUICKVIEW_ROW:
- if (!acpi_evalf(hkey_handle,
- &adaptive_keyboard_prev_mode,
- "GTRW", "dd", 0)) {
- pr_err("Cannot read adaptive keyboard mode\n");
+ current_mode = adaptive_keyboard_get_mode();
+ if (current_mode < 0)
return false;
- } else {
- adaptive_keyboard_mode_is_saved = true;
- if (!acpi_evalf(hkey_handle,
- NULL, "STRW", "vd", FUNCTION_MODE)) {
- pr_err("Cannot set adaptive keyboard mode\n");
- return false;
- }
- }
+ adaptive_keyboard_prev_mode = current_mode;
+ adaptive_keyboard_mode_is_saved = true;
+
+ if (adaptive_keyboard_set_mode (FUNCTION_MODE) < 0)
+ return false;
return true;
default:
- return false;
+ if (scancode < FIRST_ADAPTIVE_KEY ||
+ scancode >= FIRST_ADAPTIVE_KEY + TPACPI_HOTKEY_MAP_LEN -
+ ADAPTIVE_KEY_OFFSET) {
+ pr_info("Unhandled adaptive keyboard key: 0x%x\n",
+ scancode);
+ return false;
+ }
+ keycode = hotkey_keycode_map[scancode - FIRST_ADAPTIVE_KEY + ADAPTIVE_KEY_OFFSET];
+ if (keycode != KEY_RESERVED) {
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ input_report_key(tpacpi_inputdev, keycode, 1);
+ input_sync(tpacpi_inputdev);
+
+ input_report_key(tpacpi_inputdev, keycode, 0);
+ input_sync(tpacpi_inputdev);
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+ }
+ return true;
}
}
@@ -3836,28 +3973,21 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
static void hotkey_suspend(void)
{
- int hkeyv;
-
/* Do these on suspend, we get the events on early resume! */
hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
hotkey_autosleep_ack = 0;
/* save previous mode of adaptive keyboard of X1 Carbon */
- if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) == 2) {
- if (!acpi_evalf(hkey_handle,
- &adaptive_keyboard_prev_mode,
- "GTRW", "dd", 0)) {
- pr_err("Cannot read adaptive keyboard mode.\n");
- }
+ if (tp_features.has_adaptive_kbd) {
+ if (!acpi_evalf(hkey_handle, &adaptive_keyboard_prev_mode,
+ "GTRW", "dd", 0)) {
+ pr_err("Cannot read adaptive keyboard mode.\n");
}
}
}
static void hotkey_resume(void)
{
- int hkeyv;
-
tpacpi_disable_brightness_delay();
if (hotkey_status_set(true) < 0 ||
@@ -3872,14 +4002,10 @@ static void hotkey_resume(void)
hotkey_poll_setup_safe(false);
/* restore previous mode of adapive keyboard of X1 Carbon */
- if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) == 2) {
- if (!acpi_evalf(hkey_handle,
- NULL,
- "STRW", "vd",
- adaptive_keyboard_prev_mode)) {
- pr_err("Cannot set adaptive keyboard mode.\n");
- }
+ if (tp_features.has_adaptive_kbd) {
+ if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd",
+ adaptive_keyboard_prev_mode)) {
+ pr_err("Cannot set adaptive keyboard mode.\n");
}
}
}
@@ -4079,9 +4205,7 @@ static ssize_t bluetooth_enable_store(struct device *dev,
attr, buf, count);
}
-static struct device_attribute dev_attr_bluetooth_enable =
- __ATTR(bluetooth_enable, S_IWUSR | S_IRUGO,
- bluetooth_enable_show, bluetooth_enable_store);
+static DEVICE_ATTR_RW(bluetooth_enable);
/* --------------------------------------------------------------------- */
@@ -4269,9 +4393,7 @@ static ssize_t wan_enable_store(struct device *dev,
attr, buf, count);
}
-static struct device_attribute dev_attr_wan_enable =
- __ATTR(wwan_enable, S_IWUSR | S_IRUGO,
- wan_enable_show, wan_enable_store);
+static DEVICE_ATTR_RW(wan_enable);
/* --------------------------------------------------------------------- */
@@ -5048,8 +5170,7 @@ static ssize_t cmos_command_store(struct device *dev,
return (res) ? res : count;
}
-static struct device_attribute dev_attr_cmos_command =
- __ATTR(cmos_command, S_IWUSR, NULL, cmos_command_store);
+static DEVICE_ATTR_WO(cmos_command);
/* --------------------------------------------------------------------- */
@@ -8017,9 +8138,7 @@ static ssize_t fan_pwm1_enable_store(struct device *dev,
return count;
}
-static struct device_attribute dev_attr_fan_pwm1_enable =
- __ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
- fan_pwm1_enable_show, fan_pwm1_enable_store);
+static DEVICE_ATTR_RW(fan_pwm1_enable);
/* sysfs fan pwm1 ------------------------------------------------------ */
static ssize_t fan_pwm1_show(struct device *dev,
@@ -8079,9 +8198,7 @@ static ssize_t fan_pwm1_store(struct device *dev,
return (rc) ? rc : count;
}
-static struct device_attribute dev_attr_fan_pwm1 =
- __ATTR(pwm1, S_IWUSR | S_IRUGO,
- fan_pwm1_show, fan_pwm1_store);
+static DEVICE_ATTR_RW(fan_pwm1);
/* sysfs fan fan1_input ------------------------------------------------ */
static ssize_t fan_fan1_input_show(struct device *dev,
@@ -8098,9 +8215,7 @@ static ssize_t fan_fan1_input_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", speed);
}
-static struct device_attribute dev_attr_fan_fan1_input =
- __ATTR(fan1_input, S_IRUGO,
- fan_fan1_input_show, NULL);
+static DEVICE_ATTR_RO(fan_fan1_input);
/* sysfs fan fan2_input ------------------------------------------------ */
static ssize_t fan_fan2_input_show(struct device *dev,
@@ -8117,9 +8232,7 @@ static ssize_t fan_fan2_input_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", speed);
}
-static struct device_attribute dev_attr_fan_fan2_input =
- __ATTR(fan2_input, S_IRUGO,
- fan_fan2_input_show, NULL);
+static DEVICE_ATTR_RO(fan_fan2_input);
/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
@@ -8735,8 +8848,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
}
-static struct device_attribute dev_attr_thinkpad_acpi_pdev_name =
- __ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL);
+static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name);
/* --------------------------------------------------------------------- */
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index dbcb7a8915b8..9956b9902bb4 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -51,6 +51,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/uaccess.h>
+#include <acpi/video.h>
MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
@@ -116,6 +117,7 @@ MODULE_LICENSE("GPL");
#define HCI_KBD_ILLUMINATION 0x0095
#define HCI_ECO_MODE 0x0097
#define HCI_ACCELEROMETER2 0x00a6
+#define HCI_SYSTEM_INFO 0xc000
#define SCI_PANEL_POWER_ON 0x010d
#define SCI_ILLUMINATION 0x014e
#define SCI_USB_SLEEP_CHARGE 0x0150
@@ -129,10 +131,13 @@ MODULE_LICENSE("GPL");
#define HCI_ACCEL_MASK 0x7fff
#define HCI_HOTKEY_DISABLE 0x0b
#define HCI_HOTKEY_ENABLE 0x09
+#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
#define HCI_LCD_BRIGHTNESS_LEVELS (1 << HCI_LCD_BRIGHTNESS_BITS)
#define HCI_MISC_SHIFT 0x10
+#define HCI_SYSTEM_TYPE1 0x10
+#define HCI_SYSTEM_TYPE2 0x11
#define HCI_VIDEO_OUT_LCD 0x1
#define HCI_VIDEO_OUT_CRT 0x2
#define HCI_VIDEO_OUT_TV 0x4
@@ -147,9 +152,10 @@ MODULE_LICENSE("GPL");
#define SCI_KBD_MODE_OFF 0x10
#define SCI_KBD_TIME_MAX 0x3c001a
#define SCI_USB_CHARGE_MODE_MASK 0xff
-#define SCI_USB_CHARGE_DISABLED 0x30000
-#define SCI_USB_CHARGE_ALTERNATE 0x30009
-#define SCI_USB_CHARGE_AUTO 0x30021
+#define SCI_USB_CHARGE_DISABLED 0x00
+#define SCI_USB_CHARGE_ALTERNATE 0x09
+#define SCI_USB_CHARGE_TYPICAL 0x11
+#define SCI_USB_CHARGE_AUTO 0x21
#define SCI_USB_CHARGE_BAT_MASK 0x7
#define SCI_USB_CHARGE_BAT_LVL_OFF 0x1
#define SCI_USB_CHARGE_BAT_LVL_ON 0x4
@@ -174,6 +180,8 @@ struct toshiba_acpi_dev {
int kbd_mode;
int kbd_time;
int usbsc_bat_level;
+ int usbsc_mode_base;
+ int hotkey_event_type;
unsigned int illumination_supported:1;
unsigned int video_supported:1;
@@ -243,29 +251,6 @@ static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_END, 0 },
};
-/* alternative keymap */
-static const struct dmi_system_id toshiba_alt_keymap_dmi[] = {
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite M840"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Qosmio X75-A"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A50-A"),
- },
- },
- {}
-};
-
static const struct key_entry toshiba_acpi_alt_keymap[] = {
{ KE_KEY, 0x157, { KEY_MUTE } },
{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
@@ -281,6 +266,14 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = {
};
/*
+ * List of models which have a broken acpi-video backlight interface and thus
+ * need to use the toshiba (vendor) interface instead.
+ */
+static const struct dmi_system_id toshiba_vendor_backlight_dmi[] = {
+ {}
+};
+
+/*
* Utility
*/
@@ -819,6 +812,54 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
}
/* Sleep (Charge and Music) utilities support */
+static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ /* Set the feature to "not supported" in case of error */
+ dev->usb_sleep_charge_supported = 0;
+
+ if (!sci_open(dev))
+ return;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+ pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
+ sci_close(dev);
+ return;
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
+ pr_info("USB Sleep and Charge not supported\n");
+ sci_close(dev);
+ return;
+ } else if (out[0] == TOS_SUCCESS) {
+ dev->usbsc_mode_base = out[4];
+ }
+
+ in[5] = SCI_USB_CHARGE_BAT_LVL;
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+ pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
+ sci_close(dev);
+ return;
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
+ pr_info("USB Sleep and Charge not supported\n");
+ sci_close(dev);
+ return;
+ } else if (out[0] == TOS_SUCCESS) {
+ dev->usbsc_bat_level = out[2];
+ /*
+ * If we reach this point, it means that the laptop has support
+ * for this feature and all values are initialized.
+ * Set it as supported.
+ */
+ dev->usb_sleep_charge_supported = 1;
+ }
+
+ sci_close(dev);
+}
+
static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
u32 *mode)
{
@@ -934,11 +975,11 @@ static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
status = tci_raw(dev, in, out);
sci_close(dev);
if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
- pr_err("ACPI call to get USB S&C battery level failed\n");
+ pr_err("ACPI call to get USB Rapid Charge failed\n");
return -EIO;
} else if (out[0] == TOS_NOT_SUPPORTED ||
out[0] == TOS_INPUT_DATA_ERROR) {
- pr_info("USB Sleep and Charge not supported\n");
+ pr_info("USB Rapid Charge not supported\n");
return -ENODEV;
}
@@ -962,10 +1003,10 @@ static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
status = tci_raw(dev, in, out);
sci_close(dev);
if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
- pr_err("ACPI call to set USB S&C battery level failed\n");
+ pr_err("ACPI call to set USB Rapid Charge failed\n");
return -EIO;
} else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
+ pr_info("USB Rapid Charge not supported\n");
return -ENODEV;
} else if (out[0] == TOS_INPUT_DATA_ERROR) {
return -EIO;
@@ -984,10 +1025,10 @@ static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state);
sci_close(dev);
if (result == TOS_FAILURE) {
- pr_err("ACPI call to set USB S&C mode failed\n");
+ pr_err("ACPI call to get Sleep and Music failed\n");
return -EIO;
} else if (result == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
+ pr_info("Sleep and Music not supported\n");
return -ENODEV;
} else if (result == TOS_INPUT_DATA_ERROR) {
return -EIO;
@@ -1006,10 +1047,10 @@ static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state);
sci_close(dev);
if (result == TOS_FAILURE) {
- pr_err("ACPI call to set USB S&C mode failed\n");
+ pr_err("ACPI call to set Sleep and Music failed\n");
return -EIO;
} else if (result == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
+ pr_info("Sleep and Music not supported\n");
return -ENODEV;
} else if (result == TOS_INPUT_DATA_ERROR) {
return -EIO;
@@ -1149,6 +1190,28 @@ static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
return 0;
}
+/* Hotkey Event type */
+static int toshiba_hotkey_event_type_get(struct toshiba_acpi_dev *dev,
+ u32 *type)
+{
+ u32 val1 = 0x03;
+ u32 val2 = 0;
+ u32 result;
+
+ result = hci_read2(dev, HCI_SYSTEM_INFO, &val1, &val2);
+ if (result == TOS_FAILURE) {
+ pr_err("ACPI call to get System type failed\n");
+ return -EIO;
+ } else if (result == TOS_NOT_SUPPORTED) {
+ pr_info("System type not supported\n");
+ return -ENODEV;
+ }
+
+ *type = val2;
+
+ return 0;
+}
+
/* Bluetooth rfkill handlers */
static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
@@ -1973,17 +2036,21 @@ static ssize_t usb_sleep_charge_store(struct device *dev,
* 0 - Disabled
* 1 - Alternate (Non USB conformant devices that require more power)
* 2 - Auto (USB conformant devices)
+ * 3 - Typical
*/
- if (state != 0 && state != 1 && state != 2)
+ if (state != 0 && state != 1 && state != 2 && state != 3)
return -EINVAL;
/* Set the USB charging mode to internal value */
+ mode = toshiba->usbsc_mode_base;
if (state == 0)
- mode = SCI_USB_CHARGE_DISABLED;
+ mode |= SCI_USB_CHARGE_DISABLED;
else if (state == 1)
- mode = SCI_USB_CHARGE_ALTERNATE;
+ mode |= SCI_USB_CHARGE_ALTERNATE;
else if (state == 2)
- mode = SCI_USB_CHARGE_AUTO;
+ mode |= SCI_USB_CHARGE_AUTO;
+ else if (state == 3)
+ mode |= SCI_USB_CHARGE_TYPICAL;
ret = toshiba_usb_sleep_charge_set(toshiba, mode);
if (ret)
@@ -2333,6 +2400,20 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
return 0;
}
+static void toshiba_acpi_enable_special_functions(struct toshiba_acpi_dev *dev)
+{
+ u32 result;
+
+ /*
+ * Re-activate the hotkeys, but this time, we are using the
+ * "Special Functions" mode.
+ */
+ result = hci_write1(dev, HCI_HOTKEY_EVENT,
+ HCI_HOTKEY_SPECIAL_FUNCTIONS);
+ if (result != TOS_SUCCESS)
+ pr_err("Could not enable the Special Function mode\n");
+}
+
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
@@ -2434,10 +2515,22 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
+ const struct key_entry *keymap = toshiba_acpi_keymap;
acpi_handle ec_handle;
- int error;
+ u32 events_type;
u32 hci_result;
- const struct key_entry *keymap = toshiba_acpi_keymap;
+ int error;
+
+ error = toshiba_acpi_enable_hotkeys(dev);
+ if (error)
+ return error;
+
+ error = toshiba_hotkey_event_type_get(dev, &events_type);
+ if (error) {
+ pr_err("Unable to query Hotkey Event Type\n");
+ return error;
+ }
+ dev->hotkey_event_type = events_type;
dev->hotkey_dev = input_allocate_device();
if (!dev->hotkey_dev)
@@ -2447,8 +2540,14 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
dev->hotkey_dev->phys = "toshiba_acpi/input0";
dev->hotkey_dev->id.bustype = BUS_HOST;
- if (dmi_check_system(toshiba_alt_keymap_dmi))
+ if (events_type == HCI_SYSTEM_TYPE1 ||
+ !dev->kbd_function_keys_supported)
+ keymap = toshiba_acpi_keymap;
+ else if (events_type == HCI_SYSTEM_TYPE2 ||
+ dev->kbd_function_keys_supported)
keymap = toshiba_acpi_alt_keymap;
+ else
+ pr_info("Unknown event type received %x\n", events_type);
error = sparse_keymap_setup(dev->hotkey_dev, keymap, NULL);
if (error)
goto err_free_dev;
@@ -2490,12 +2589,6 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
goto err_remove_filter;
}
- error = toshiba_acpi_enable_hotkeys(dev);
- if (error) {
- pr_info("Unable to enable hotkeys\n");
- goto err_remove_filter;
- }
-
error = input_register_device(dev->hotkey_dev);
if (error) {
pr_info("Unable to register input device\n");
@@ -2541,6 +2634,20 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
ret = get_tr_backlight_status(dev, &enabled);
dev->tr_backlight_supported = !ret;
+ /*
+ * Tell acpi-video-detect code to prefer vendor backlight on all
+ * systems with transflective backlight and on dmi matched systems.
+ */
+ if (dev->tr_backlight_supported ||
+ dmi_check_system(toshiba_vendor_backlight_dmi))
+ acpi_video_dmi_promote_vendor();
+
+ if (acpi_video_backlight_support())
+ return 0;
+
+ /* acpi-video may have loaded before we called dmi_promote_vendor() */
+ acpi_video_unregister_backlight();
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -2624,6 +2731,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
{
struct toshiba_acpi_dev *dev;
const char *hci_method;
+ u32 special_functions;
u32 dummy;
bool bt_present;
int ret = 0;
@@ -2648,6 +2756,16 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
acpi_dev->driver_data = dev;
dev_set_drvdata(&acpi_dev->dev, dev);
+ /* Query the BIOS for supported features */
+
+ /*
+ * The "Special Functions" are always supported by the laptops
+ * with the new keyboard layout, query for its presence to help
+ * determine the keymap layout to use.
+ */
+ ret = toshiba_function_keys_get(dev, &special_functions);
+ dev->kbd_function_keys_supported = !ret;
+
if (toshiba_acpi_setup_keyboard(dev))
pr_info("Unable to activate hotkeys\n");
@@ -2716,8 +2834,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
ret = toshiba_accelerometer_supported(dev);
dev->accelerometer_supported = !ret;
- ret = toshiba_usb_sleep_charge_get(dev, &dummy);
- dev->usb_sleep_charge_supported = !ret;
+ toshiba_usb_sleep_charge_available(dev);
ret = toshiba_usb_rapid_charge_get(dev, &dummy);
dev->usb_rapid_charge_supported = !ret;
@@ -2725,23 +2842,25 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
ret = toshiba_usb_sleep_music_get(dev, &dummy);
dev->usb_sleep_music_supported = !ret;
- ret = toshiba_function_keys_get(dev, &dummy);
- dev->kbd_function_keys_supported = !ret;
-
ret = toshiba_panel_power_on_get(dev, &dummy);
dev->panel_power_on_supported = !ret;
ret = toshiba_usb_three_get(dev, &dummy);
dev->usb_three_supported = !ret;
- /* Determine whether or not BIOS supports fan and video interfaces */
-
ret = get_video_status(dev, &dummy);
dev->video_supported = !ret;
ret = get_fan_status(dev, &dummy);
dev->fan_supported = !ret;
+ /*
+ * Enable the "Special Functions" mode only if they are
+ * supported and if they are activated.
+ */
+ if (dev->kbd_function_keys_supported && special_functions)
+ toshiba_acpi_enable_special_functions(dev);
+
ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
&toshiba_attr_group);
if (ret) {
@@ -2770,6 +2889,21 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
case 0x80: /* Hotkeys and some system events */
toshiba_acpi_process_hotkeys(dev);
break;
+ case 0x81: /* Dock events */
+ case 0x82:
+ case 0x83:
+ pr_info("Dock event received %x\n", event);
+ break;
+ case 0x88: /* Thermal events */
+ pr_info("Thermal event received\n");
+ break;
+ case 0x8f: /* LID closed */
+ case 0x90: /* LID is closed and Dock has been ejected */
+ break;
+ case 0x8c: /* SATA power events */
+ case 0x8b:
+ pr_info("SATA power event received %x\n", event);
+ break;
case 0x92: /* Keyboard backlight mode changed */
/* Update sysfs entries */
ret = sysfs_update_group(&acpi_dev->dev.kobj,
@@ -2777,17 +2911,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
if (ret)
pr_err("Unable to update sysfs entries\n");
break;
- case 0x81: /* Unknown */
- case 0x82: /* Unknown */
- case 0x83: /* Unknown */
- case 0x8c: /* Unknown */
+ case 0x85: /* Unknown */
+ case 0x8d: /* Unknown */
case 0x8e: /* Unknown */
- case 0x8f: /* Unknown */
- case 0x90: /* Unknown */
+ case 0x94: /* Unknown */
+ case 0x95: /* Unknown */
default:
pr_info("Unknown event received %x\n", event);
break;
}
+
+ acpi_bus_generate_netlink_event(acpi_dev->pnp.device_class,
+ dev_name(&acpi_dev->dev),
+ event, 0);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 2cb1ea62b4a7..249800763362 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -2,6 +2,7 @@
* Toshiba Bluetooth Enable Driver
*
* Copyright (C) 2009 Jes Sorensen <Jes.Sorensen@gmail.com>
+ * Copyright (C) 2015 Azael Avalos <coproscefalo@gmail.com>
*
* Thanks to Matthew Garrett for background info on ACPI innards which
* normal people aren't meant to understand :-)
@@ -25,6 +26,10 @@
#include <linux/types.h>
#include <linux/acpi.h>
+#define BT_KILLSWITCH_MASK 0x01
+#define BT_PLUGGED_MASK 0x40
+#define BT_POWER_MASK 0x80
+
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver");
MODULE_LICENSE("GPL");
@@ -57,32 +62,107 @@ static struct acpi_driver toshiba_bt_rfkill_driver = {
.drv.pm = &toshiba_bt_pm,
};
+static int toshiba_bluetooth_present(acpi_handle handle)
+{
+ acpi_status result;
+ u64 bt_present;
+
+ /*
+ * Some Toshiba laptops may have a fake TOS6205 device in
+ * their ACPI BIOS, so query the _STA method to see if there
+ * is really anything there.
+ */
+ result = acpi_evaluate_integer(handle, "_STA", NULL, &bt_present);
+ if (ACPI_FAILURE(result)) {
+ pr_err("ACPI call to query Bluetooth presence failed");
+ return -ENXIO;
+ } else if (!bt_present) {
+ pr_info("Bluetooth device not present\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int toshiba_bluetooth_status(acpi_handle handle)
+{
+ acpi_status result;
+ u64 status;
+
+ result = acpi_evaluate_integer(handle, "BTST", NULL, &status);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not get Bluetooth device status\n");
+ return -ENXIO;
+ }
+
+ pr_info("Bluetooth status %llu\n", status);
+
+ return status;
+}
static int toshiba_bluetooth_enable(acpi_handle handle)
{
- acpi_status res1, res2;
- u64 result;
+ acpi_status result;
+ bool killswitch;
+ bool powered;
+ bool plugged;
+ int status;
/*
* Query ACPI to verify RFKill switch is set to 'on'.
* If not, we return silently, no need to report it as
* an error.
*/
- res1 = acpi_evaluate_integer(handle, "BTST", NULL, &result);
- if (ACPI_FAILURE(res1))
- return res1;
- if (!(result & 0x01))
- return 0;
+ status = toshiba_bluetooth_status(handle);
+ if (status < 0)
+ return status;
+
+ killswitch = (status & BT_KILLSWITCH_MASK) ? true : false;
+ powered = (status & BT_POWER_MASK) ? true : false;
+ plugged = (status & BT_PLUGGED_MASK) ? true : false;
- pr_info("Re-enabling Toshiba Bluetooth\n");
- res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
- res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
- if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2))
+ if (!killswitch)
return 0;
+ /*
+ * This check ensures to only enable the device if it is powered
+ * off or detached, as some recent devices somehow pass the killswitch
+ * test, causing a loop enabling/disabling the device, see bug 93911.
+ */
+ if (powered || plugged)
+ return 0;
+
+ result = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not attach USB Bluetooth device\n");
+ return -ENXIO;
+ }
+
+ result = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not power ON Bluetooth device\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int toshiba_bluetooth_disable(acpi_handle handle)
+{
+ acpi_status result;
+
+ result = acpi_evaluate_object(handle, "BTPF", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not power OFF Bluetooth device\n");
+ return -ENXIO;
+ }
- pr_warn("Failed to re-enable Toshiba Bluetooth\n");
+ result = acpi_evaluate_object(handle, "DUSB", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not detach USB Bluetooth device\n");
+ return -ENXIO;
+ }
- return -ENODEV;
+ return 0;
}
static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
@@ -99,23 +179,18 @@ static int toshiba_bt_resume(struct device *dev)
static int toshiba_bt_rfkill_add(struct acpi_device *device)
{
- acpi_status status;
- u64 bt_present;
- int result = -ENODEV;
+ int result;
- /*
- * Some Toshiba laptops may have a fake TOS6205 device in
- * their ACPI BIOS, so query the _STA method to see if there
- * is really anything there, before trying to enable it.
- */
- status = acpi_evaluate_integer(device->handle, "_STA", NULL,
- &bt_present);
+ result = toshiba_bluetooth_present(device->handle);
+ if (result)
+ return result;
- if (!ACPI_FAILURE(status) && bt_present) {
- pr_info("Detected Toshiba ACPI Bluetooth device - "
- "installing RFKill handler\n");
- result = toshiba_bluetooth_enable(device->handle);
- }
+ pr_info("Toshiba ACPI Bluetooth device driver\n");
+
+ /* Enable the BT device */
+ result = toshiba_bluetooth_enable(device->handle);
+ if (result)
+ return result;
return result;
}
@@ -123,7 +198,7 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
static int toshiba_bt_rfkill_remove(struct acpi_device *device)
{
/* clean up */
- return 0;
+ return toshiba_bluetooth_disable(device->handle);
}
module_acpi_driver(toshiba_bt_rfkill_driver);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 737e56d46f61..aac47573f9ed 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -45,7 +45,6 @@ MODULE_LICENSE("GPL");
#define ACPI_WMI_CLASS "wmi"
-static DEFINE_MUTEX(wmi_data_lock);
static LIST_HEAD(wmi_block_list);
struct guid_block {
@@ -240,10 +239,10 @@ static bool find_guid(const char *guid_string, struct wmi_block **out)
if (memcmp(block->guid, guid_input, 16) == 0) {
if (out)
*out = wblock;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index c8873b0ca551..3151fd164614 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -3,7 +3,7 @@
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*/
-extern spinlock_t pnp_lock;
+extern struct mutex pnp_lock;
extern const struct attribute_group *pnp_dev_groups[];
void *pnp_alloc(long size);
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index 874c236ac1a7..31ad9fc3f701 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -5,6 +5,7 @@
*/
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/pnp.h>
@@ -244,10 +245,10 @@ int pnp_add_card(struct pnp_card *card)
}
pnp_interface_attach_card(card);
- spin_lock(&pnp_lock);
+ mutex_lock(&pnp_lock);
list_add_tail(&card->global_list, &pnp_cards);
list_add_tail(&card->protocol_list, &card->protocol->cards);
- spin_unlock(&pnp_lock);
+ mutex_unlock(&pnp_lock);
/* we wait until now to add devices in order to ensure the drivers
* will be able to use all of the related devices on the card
@@ -276,10 +277,10 @@ void pnp_remove_card(struct pnp_card *card)
struct list_head *pos, *temp;
device_unregister(&card->dev);
- spin_lock(&pnp_lock);
+ mutex_lock(&pnp_lock);
list_del(&card->global_list);
list_del(&card->protocol_list);
- spin_unlock(&pnp_lock);
+ mutex_unlock(&pnp_lock);
list_for_each_safe(pos, temp, &card->devices) {
struct pnp_dev *dev = card_to_pnp_dev(pos);
pnp_remove_card_device(dev);
@@ -297,10 +298,10 @@ int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev)
dev->card_link = NULL;
dev_set_name(&dev->dev, "%02x:%02x.%02x",
dev->protocol->number, card->number, dev->number);
- spin_lock(&pnp_lock);
+ mutex_lock(&pnp_lock);
dev->card = card;
list_add_tail(&dev->card_list, &card->devices);
- spin_unlock(&pnp_lock);
+ mutex_unlock(&pnp_lock);
return 0;
}
@@ -310,10 +311,10 @@ int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev)
*/
void pnp_remove_card_device(struct pnp_dev *dev)
{
- spin_lock(&pnp_lock);
+ mutex_lock(&pnp_lock);
dev->card = NULL;
list_del(&dev->card_list);
- spin_unlock(&pnp_lock);
+ mutex_unlock(&pnp_lock);
__pnp_remove_device(dev);
}
@@ -426,9 +427,9 @@ int pnp_register_card_driver(struct pnp_card_driver *drv)
if (error < 0)
return error;
- spin_lock(&pnp_lock);
+ mutex_lock(&pnp_lock);
list_add_tail(&drv->global_list, &pnp_card_drivers);
- spin_unlock(&pnp_lock);
+ mutex_unlock(&pnp_lock);
list_for_each_safe(pos, temp, &pnp_cards) {
struct pnp_card *card =
@@ -444,9 +445,9 @@ int pnp_register_card_driver(struct pnp_card_driver *drv)
*/
void pnp_unregister_card_driver(struct pnp_card_driver *drv)
{
- spin_lock(&pnp_lock);
+ mutex_lock(&pnp_lock);
list_del(&drv->global_list);
- spin_unlock(&pnp_lock);
+ mutex_unlock(&pnp_lock);
pnp_unregister_driver(&drv->link);
}
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index cb6ce42f8e77..b54620e53830 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -19,7 +20,7 @@
static LIST_HEAD(pnp_protocols);
LIST_HEAD(pnp_global);
-DEFINE_SPINLOCK(pnp_lock);
+DEFINE_MUTEX(pnp_lock);
/*
* ACPI or PNPBIOS should tell us about all platform devices, so we can
@@ -41,6 +42,13 @@ void *pnp_alloc(long size)
return result;
}
+static void pnp_remove_protocol(struct pnp_protocol *protocol)
+{
+ mutex_lock(&pnp_lock);
+ list_del(&protocol->protocol_list);
+ mutex_unlock(&pnp_lock);
+}
+
/**
* pnp_protocol_register - adds a pnp protocol to the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
@@ -49,13 +57,14 @@ void *pnp_alloc(long size)
*/
int pnp_register_protocol(struct pnp_protocol *protocol)
{
- int nodenum;
struct list_head *pos;
+ int nodenum, ret;
INIT_LIST_HEAD(&protocol->devices);
INIT_LIST_HEAD(&protocol->cards);
nodenum = 0;
- spin_lock(&pnp_lock);
+
+ mutex_lock(&pnp_lock);
/* assign the lowest unused number */
list_for_each(pos, &pnp_protocols) {
@@ -66,12 +75,18 @@ int pnp_register_protocol(struct pnp_protocol *protocol)
}
}
- list_add_tail(&protocol->protocol_list, &pnp_protocols);
- spin_unlock(&pnp_lock);
-
protocol->number = nodenum;
dev_set_name(&protocol->dev, "pnp%d", nodenum);
- return device_register(&protocol->dev);
+
+ list_add_tail(&protocol->protocol_list, &pnp_protocols);
+
+ mutex_unlock(&pnp_lock);
+
+ ret = device_register(&protocol->dev);
+ if (ret)
+ pnp_remove_protocol(protocol);
+
+ return ret;
}
/**
@@ -80,9 +95,7 @@ int pnp_register_protocol(struct pnp_protocol *protocol)
*/
void pnp_unregister_protocol(struct pnp_protocol *protocol)
{
- spin_lock(&pnp_lock);
- list_del(&protocol->protocol_list);
- spin_unlock(&pnp_lock);
+ pnp_remove_protocol(protocol);
device_unregister(&protocol->dev);
}
@@ -157,18 +170,36 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
return dev;
}
+static void pnp_delist_device(struct pnp_dev *dev)
+{
+ mutex_lock(&pnp_lock);
+ list_del(&dev->global_list);
+ list_del(&dev->protocol_list);
+ mutex_unlock(&pnp_lock);
+}
+
int __pnp_add_device(struct pnp_dev *dev)
{
+ int ret;
+
pnp_fixup_device(dev);
dev->status = PNP_READY;
- spin_lock(&pnp_lock);
+
+ mutex_lock(&pnp_lock);
+
list_add_tail(&dev->global_list, &pnp_global);
list_add_tail(&dev->protocol_list, &dev->protocol->devices);
- spin_unlock(&pnp_lock);
- if (dev->protocol->can_wakeup)
+
+ mutex_unlock(&pnp_lock);
+
+ ret = device_register(&dev->dev);
+ if (ret)
+ pnp_delist_device(dev);
+ else if (dev->protocol->can_wakeup)
device_set_wakeup_capable(&dev->dev,
dev->protocol->can_wakeup(dev));
- return device_register(&dev->dev);
+
+ return ret;
}
/*
@@ -203,10 +234,7 @@ int pnp_add_device(struct pnp_dev *dev)
void __pnp_remove_device(struct pnp_dev *dev)
{
- spin_lock(&pnp_lock);
- list_del(&dev->global_list);
- list_del(&dev->protocol_list);
- spin_unlock(&pnp_lock);
+ pnp_delist_device(dev);
device_unregister(&dev->dev);
}
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 4e57d3370368..153a493b5413 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -58,22 +58,22 @@ static const struct pnp_device_id *match_device(struct pnp_driver *drv,
int pnp_device_attach(struct pnp_dev *pnp_dev)
{
- spin_lock(&pnp_lock);
+ mutex_lock(&pnp_lock);
if (pnp_dev->status != PNP_READY) {
- spin_unlock(&pnp_lock);
+ mutex_unlock(&pnp_lock);
return -EBUSY;
}
pnp_dev->status = PNP_ATTACHED;
- spin_unlock(&pnp_lock);
+ mutex_unlock(&pnp_lock);
return 0;
}
void pnp_device_detach(struct pnp_dev *pnp_dev)
{
- spin_lock(&pnp_lock);
+ mutex_lock(&pnp_lock);
if (pnp_dev->status == PNP_ATTACHED)
pnp_dev->status = PNP_READY;
- spin_unlock(&pnp_lock);
+ mutex_unlock(&pnp_lock);
pnp_disable_dev(pnp_dev);
}
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index d2b780aade89..5153d1d69aee 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -248,6 +248,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
if (!dev)
return -ENOMEM;
+ ACPI_COMPANION_SET(&dev->dev, device);
dev->data = device;
/* .enabled means the device can decode the resources */
dev->active = device->status.enabled;
@@ -290,11 +291,9 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
return error;
}
- error = acpi_bind_one(&dev->dev, device);
-
num++;
- return error;
+ return 0;
}
static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
diff --git a/drivers/power/axp288_fuel_gauge.c b/drivers/power/axp288_fuel_gauge.c
index ca1cc5a47eb1..bd1dbfee2515 100644
--- a/drivers/power/axp288_fuel_gauge.c
+++ b/drivers/power/axp288_fuel_gauge.c
@@ -1149,6 +1149,7 @@ static struct platform_driver axp288_fuel_gauge_driver = {
module_platform_driver(axp288_fuel_gauge_driver);
+MODULE_AUTHOR("Ramakrishna Pallala <ramakrishna.pallala@intel.com>");
MODULE_AUTHOR("Todd Brandt <todd.e.brandt@linux.intel.com>");
MODULE_DESCRIPTION("Xpower AXP288 Fuel Gauge Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index a57433de5c24..b6b98378faa3 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -1109,6 +1109,14 @@ static void __exit bq27x00_battery_exit(void)
}
module_exit(bq27x00_battery_exit);
+#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
+MODULE_ALIAS("platform:bq27000-battery");
+#endif
+
+#ifdef CONFIG_BATTERY_BQ27X00_I2C
+MODULE_ALIAS("i2c:bq27000-battery");
+#endif
+
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("BQ27x00 battery monitor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 2da9ed8ccbb5..8a971b3dbe58 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -347,7 +347,7 @@ static int collie_bat_probe(struct ucb1x00_dev *dev)
goto err_psy_reg_main;
}
- psy_main_cfg.drv_data = &collie_bat_bu;
+ psy_bu_cfg.drv_data = &collie_bat_bu;
collie_bat_bu.psy = power_supply_register(&dev->ucb->dev,
&collie_bat_bu_desc,
&psy_bu_cfg);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index aad9c3318c02..17d93a73c513 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -41,6 +41,7 @@ config POWER_RESET_AXXIA
config POWER_RESET_BRCMSTB
bool "Broadcom STB reset driver"
depends on ARM || MIPS || COMPILE_TEST
+ depends on MFD_SYSCON
default ARCH_BRCMSTB
help
This driver provides restart support for Broadcom STB boards.
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 01c7055c4200..ca461ebc7ae8 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -212,9 +212,9 @@ static int at91_reset_platform_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 );
at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(at91_ramc_base[idx])) {
+ if (!at91_ramc_base[idx]) {
dev_err(&pdev->dev, "Could not map ram controller address\n");
- return PTR_ERR(at91_ramc_base[idx]);
+ return -ENOMEM;
}
}
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 7ef193b6f7fe..1e08195551fe 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -120,18 +120,7 @@ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer)
static void ltc2952_poweroff_start_wde(struct ltc2952_poweroff *data)
{
- if (hrtimer_start(&data->timer_wde, data->wde_interval,
- HRTIMER_MODE_REL)) {
- /*
- * The device will not toggle the watchdog reset,
- * thus shut down is only safe if the PowerPath controller
- * has a long enough time-off before triggering a hardware
- * power-off.
- *
- * Only sending a warning as the system will power-off anyway
- */
- dev_err(data->dev, "unable to start the timer\n");
- }
+ hrtimer_start(&data->timer_wde, data->wde_interval, HRTIMER_MODE_REL);
}
static enum hrtimer_restart
@@ -165,9 +154,8 @@ static irqreturn_t ltc2952_poweroff_handler(int irq, void *dev_id)
}
if (gpiod_get_value(data->gpio_trigger)) {
- if (hrtimer_start(&data->timer_trigger, data->trigger_delay,
- HRTIMER_MODE_REL))
- dev_err(data->dev, "unable to start the wait timer\n");
+ hrtimer_start(&data->timer_trigger, data->trigger_delay,
+ HRTIMER_MODE_REL);
} else {
hrtimer_cancel(&data->timer_trigger);
/* omitting return value check, timer should have been valid */
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 63d4033eb683..fd243231620a 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1054,7 +1054,7 @@ static const struct rapl_defaults rapl_defaults_atom = {
.driver_data = (kernel_ulong_t)&_ops, \
}
-static const struct x86_cpu_id rapl_ids[] = {
+static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */
RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */
RAPL_CPU(0x37, rapl_defaults_atom),/* Valleyview */
@@ -1062,7 +1062,9 @@ static const struct x86_cpu_id rapl_ids[] = {
RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
+ RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
+ RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index b139b7792e9f..cb7d3a67380d 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -105,7 +105,7 @@ struct ps3_lpm_shadow_regs {
* @open: An atomic variable indicating the lpm driver has been opened.
* @rights: The lpm rigths granted by the system policy module. A logical
* OR of enum ps3_lpm_rights.
- * @node_id: The node id of a BE prosessor whose performance monitor this
+ * @node_id: The node id of a BE processor whose performance monitor this
* lpar has the right to use.
* @pu_id: The lv1 id of the logical PU.
* @lpm_id: The lv1 id of this lpm instance.
@@ -412,7 +412,7 @@ u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg)
result = lv1_set_lpm_interval(lpm_priv->lpm_id, 0, 0, &val);
if (result) {
val = 0;
- dev_dbg(sbd_core(), "%s:%u: lv1 set_inteval failed: "
+ dev_dbg(sbd_core(), "%s:%u: lv1 set_interval failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index f8a76090cbca..da7bae991552 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -124,7 +124,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_time *pct;
- struct timespec ts;
+ struct timespec64 ts;
int enable, err = 0;
unsigned int i, pin_index;
@@ -197,16 +197,16 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
}
pct = &sysoff->ts[0];
for (i = 0; i < sysoff->n_samples; i++) {
- getnstimeofday(&ts);
+ getnstimeofday64(&ts);
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
- ptp->info->gettime(ptp->info, &ts);
+ ptp->info->gettime64(ptp->info, &ts);
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
}
- getnstimeofday(&ts);
+ getnstimeofday64(&ts);
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 296b0ec8744d..2e481b9e8ea5 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -107,13 +107,21 @@ static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
- return ptp->info->settime(ptp->info, tp);
+ struct timespec64 ts = timespec_to_timespec64(*tp);
+
+ return ptp->info->settime64(ptp->info, &ts);
}
static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
- return ptp->info->gettime(ptp->info, tp);
+ struct timespec64 ts;
+ int err;
+
+ err = ptp->info->gettime64(ptp->info, &ts);
+ if (!err)
+ *tp = timespec64_to_timespec(ts);
+ return err;
}
static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index 604d340f2095..934c139916c6 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -175,7 +175,7 @@ static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
u32 remainder;
@@ -195,7 +195,7 @@ static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
}
static int ptp_ixp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
@@ -248,8 +248,8 @@ static struct ptp_clock_info ptp_ixp_caps = {
.pps = 0,
.adjfreq = ptp_ixp_adjfreq,
.adjtime = ptp_ixp_adjtime,
- .gettime = ptp_ixp_gettime,
- .settime = ptp_ixp_settime,
+ .gettime64 = ptp_ixp_gettime,
+ .settime64 = ptp_ixp_settime,
.enable = ptp_ixp_enable,
};
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 255487272859..3aa22ae4d94c 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -449,7 +449,7 @@ static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
u32 remainder;
@@ -467,7 +467,7 @@ static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
}
static int ptp_pch_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
@@ -518,8 +518,8 @@ static struct ptp_clock_info ptp_pch_caps = {
.pps = 0,
.adjfreq = ptp_pch_adjfreq,
.adjtime = ptp_pch_adjtime,
- .gettime = ptp_pch_gettime,
- .settime = ptp_pch_settime,
+ .gettime64 = ptp_pch_gettime,
+ .settime64 = ptp_pch_settime,
.enable = ptp_pch_enable,
};
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 810aef3f4c3e..ba34c7d89042 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(of_pwm_get);
* @table: array of consumers to register
* @num: number of consumers in table
*/
-void __init pwm_add_table(struct pwm_lookup *table, size_t num)
+void pwm_add_table(struct pwm_lookup *table, size_t num)
{
mutex_lock(&pwm_lookup_lock);
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 522f7075bb1a..fa5feaba25a5 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -225,6 +225,10 @@ static const struct of_device_id atmel_hlcdc_dt_ids[] = {
.compatible = "atmel,sama5d3-hlcdc",
.data = &atmel_hlcdc_pwm_sama5d3_errata,
},
+ {
+ .compatible = "atmel,sama5d4-hlcdc",
+ .data = &atmel_hlcdc_pwm_sama5d3_errata,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 476171a768d6..8a029f9bc18c 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -16,6 +16,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -38,7 +39,22 @@
#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
-#define MAX_TMBASE_STEPS 65536
+/*
+ * PWM period is specified with a timebase register,
+ * in number of step periods. The PWM duty cycle is also
+ * specified in step periods, in the [0, $timebase] range.
+ * In other words, the timebase imposes the duty cycle
+ * resolution. Therefore, let's constraint the timebase to
+ * a minimum value to allow a sane range of duty cycle values.
+ * Imposing a minimum timebase, will impose a maximum PWM frequency.
+ *
+ * The value chosen is completely arbitrary.
+ */
+#define MIN_TMBASE_STEPS 16
+
+struct img_pwm_soc_data {
+ u32 max_timebase;
+};
struct img_pwm_chip {
struct device *dev;
@@ -47,6 +63,9 @@ struct img_pwm_chip {
struct clk *sys_clk;
void __iomem *base;
struct regmap *periph_regs;
+ int max_period_ns;
+ int min_period_ns;
+ const struct img_pwm_soc_data *data;
};
static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
@@ -72,24 +91,31 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
u32 val, div, duty, timebase;
unsigned long mul, output_clk_hz, input_clk_hz;
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+ unsigned int max_timebase = pwm_chip->data->max_timebase;
+
+ if (period_ns < pwm_chip->min_period_ns ||
+ period_ns > pwm_chip->max_period_ns) {
+ dev_err(chip->dev, "configured period not in range\n");
+ return -ERANGE;
+ }
input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
- if (mul <= MAX_TMBASE_STEPS) {
+ if (mul <= max_timebase) {
div = PWM_CTRL_CFG_NO_SUB_DIV;
timebase = DIV_ROUND_UP(mul, 1);
- } else if (mul <= MAX_TMBASE_STEPS * 8) {
+ } else if (mul <= max_timebase * 8) {
div = PWM_CTRL_CFG_SUB_DIV0;
timebase = DIV_ROUND_UP(mul, 8);
- } else if (mul <= MAX_TMBASE_STEPS * 64) {
+ } else if (mul <= max_timebase * 64) {
div = PWM_CTRL_CFG_SUB_DIV1;
timebase = DIV_ROUND_UP(mul, 64);
- } else if (mul <= MAX_TMBASE_STEPS * 512) {
+ } else if (mul <= max_timebase * 512) {
div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
timebase = DIV_ROUND_UP(mul, 512);
- } else if (mul > MAX_TMBASE_STEPS * 512) {
+ } else if (mul > max_timebase * 512) {
dev_err(chip->dev,
"failed to configure timebase steps/divider value\n");
return -EINVAL;
@@ -143,11 +169,27 @@ static const struct pwm_ops img_pwm_ops = {
.owner = THIS_MODULE,
};
+static const struct img_pwm_soc_data pistachio_pwm = {
+ .max_timebase = 255,
+};
+
+static const struct of_device_id img_pwm_of_match[] = {
+ {
+ .compatible = "img,pistachio-pwm",
+ .data = &pistachio_pwm,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, img_pwm_of_match);
+
static int img_pwm_probe(struct platform_device *pdev)
{
int ret;
+ u64 val;
+ unsigned long clk_rate;
struct resource *res;
struct img_pwm_chip *pwm;
+ const struct of_device_id *of_dev_id;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (!pwm)
@@ -160,6 +202,11 @@ static int img_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->base))
return PTR_ERR(pwm->base);
+ of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
+ if (!of_dev_id)
+ return -ENODEV;
+ pwm->data = of_dev_id->data;
+
pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"img,cr-periph");
if (IS_ERR(pwm->periph_regs))
@@ -189,6 +236,17 @@ static int img_pwm_probe(struct platform_device *pdev)
goto disable_sysclk;
}
+ clk_rate = clk_get_rate(pwm->pwm_clk);
+
+ /* The maximum input clock divider is 512 */
+ val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
+ do_div(val, clk_rate);
+ pwm->max_period_ns = val;
+
+ val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS;
+ do_div(val, clk_rate);
+ pwm->min_period_ns = val;
+
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &img_pwm_ops;
pwm->chip.base = -1;
@@ -228,12 +286,6 @@ static int img_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pwm_chip->chip);
}
-static const struct of_device_id img_pwm_of_match[] = {
- { .compatible = "img,pistachio-pwm", },
- { }
-};
-MODULE_DEVICE_TABLE(of, img_pwm_of_match);
-
static struct platform_driver img_pwm_driver = {
.driver = {
.name = "img-pwm",
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index f75ecb09d97d..b430811e14f5 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -35,6 +35,10 @@
#define PERIOD_CDIV(div) (((div) & 0x7) << 20)
#define PERIOD_CDIV_MAX 8
+static const unsigned int cdiv[PERIOD_CDIV_MAX] = {
+ 1, 2, 4, 8, 16, 64, 256, 1024
+};
+
struct mxs_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
@@ -54,13 +58,13 @@ static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
rate = clk_get_rate(mxs->clk);
while (1) {
- c = rate / (1 << div);
+ c = rate / cdiv[div];
c = c * period_ns;
do_div(c, 1000000000);
if (c < PERIOD_PERIOD_MAX)
break;
div++;
- if (div > PERIOD_CDIV_MAX)
+ if (div >= PERIOD_CDIV_MAX)
return -EINVAL;
}
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 3fb775ded0df..34b5c275a92a 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -202,7 +202,7 @@ static const struct pwm_ops pca9685_pwm_ops = {
.owner = THIS_MODULE,
};
-static struct regmap_config pca9685_regmap_i2c_config = {
+static const struct regmap_config pca9685_regmap_i2c_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = PCA9685_NUMREGS,
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 3e9b5835a4af..ff201e1b9219 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -269,12 +269,31 @@ static void pwm_samsung_disable(struct pwm_chip *chip, struct pwm_device *pwm)
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
+static void pwm_samsung_manual_update(struct samsung_pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ unsigned int tcon_chan = to_tcon_channel(pwm->hwpwm);
+ u32 tcon;
+ unsigned long flags;
+
+ spin_lock_irqsave(&samsung_pwm_lock, flags);
+
+ tcon = readl(chip->base + REG_TCON);
+ tcon |= TCON_MANUALUPDATE(tcon_chan);
+ writel(tcon, chip->base + REG_TCON);
+
+ tcon &= ~TCON_MANUALUPDATE(tcon_chan);
+ writel(tcon, chip->base + REG_TCON);
+
+ spin_unlock_irqrestore(&samsung_pwm_lock, flags);
+}
+
static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm);
- u32 tin_ns = chan->tin_ns, tcnt, tcmp;
+ u32 tin_ns = chan->tin_ns, tcnt, tcmp, oldtcmp;
/*
* We currently avoid using 64bit arithmetic by using the
@@ -288,6 +307,7 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
tcnt = readl(our_chip->base + REG_TCNTB(pwm->hwpwm));
+ oldtcmp = readl(our_chip->base + REG_TCMPB(pwm->hwpwm));
/* We need tick count for calculation, not last tick. */
++tcnt;
@@ -335,6 +355,16 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
writel(tcnt, our_chip->base + REG_TCNTB(pwm->hwpwm));
writel(tcmp, our_chip->base + REG_TCMPB(pwm->hwpwm));
+ /*
+ * In case the PWM is currently at 100% duty cycle, force a manual
+ * update to prevent the signal staying high if the PWM is disabled
+ * shortly afer this update (before it autoreloaded the new values).
+ */
+ if (oldtcmp == (u32) -1) {
+ dev_dbg(our_chip->chip.dev, "Forcing manual update");
+ pwm_samsung_manual_update(our_chip, pwm);
+ }
+
chan->period_ns = period_ns;
chan->tin_ns = tin_ns;
chan->duty_ns = duty_ns;
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index 4116e74effa4..fcb98dbda837 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -246,6 +246,7 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
*/
switch (arizona->type) {
case WM5110:
+ case WM8280:
desc = &arizona_micsupp_ext;
micsupp->init_data = arizona_micsupp_ext_default;
break;
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 89fd057e5f1d..f8d6a0661c14 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -224,6 +224,7 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
drproc = rproc->priv;
drproc->rproc = rproc;
+ rproc->has_iommu = false;
platform_set_drvdata(pdev, rproc);
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index e85f30370760..b74368a91235 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -202,6 +202,8 @@ static int omap_rproc_probe(struct platform_device *pdev)
oproc = rproc->priv;
oproc->rproc = rproc;
+ /* All existing OMAP IPU and DSP processors have an MMU */
+ rproc->has_iommu = true;
platform_set_drvdata(pdev, rproc);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 3cd85a638afa..11cdb119e4f3 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -94,19 +94,8 @@ static int rproc_enable_iommu(struct rproc *rproc)
struct device *dev = rproc->dev.parent;
int ret;
- /*
- * We currently use iommu_present() to decide if an IOMMU
- * setup is needed.
- *
- * This works for simple cases, but will easily fail with
- * platforms that do have an IOMMU, but not for this specific
- * rproc.
- *
- * This will be easily solved by introducing hw capabilities
- * that will be set by the remoteproc driver.
- */
- if (!iommu_present(dev->bus)) {
- dev_dbg(dev, "iommu not found\n");
+ if (!rproc->has_iommu) {
+ dev_dbg(dev, "iommu not present\n");
return 0;
}
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
index 16b7b7bd805b..dd193f35a1ff 100644
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ b/drivers/remoteproc/ste_modem_rproc.c
@@ -289,6 +289,7 @@ static int sproc_probe(struct platform_device *pdev)
sproc = rproc->priv;
sproc->mdev = mdev;
sproc->rproc = rproc;
+ rproc->has_iommu = false;
mdev->drv_data = sproc;
/* Provide callback functions to modem device */
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b5b5c3d485d6..0fe4ad8826b2 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -164,6 +164,16 @@ config RTC_DRV_ABB5ZES3
This driver can also be built as a module. If so, the module
will be called rtc-ab-b5ze-s3.
+config RTC_DRV_ABX80X
+ tristate "Abracon ABx80x"
+ help
+ If you say yes here you get support for Abracon AB080X and AB180X
+ families of ultra-low-power battery- and capacitor-backed real-time
+ clock chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-abx80x.
+
config RTC_DRV_AS3722
tristate "ams AS3722 RTC driver"
depends on MFD_AS3722
@@ -1111,6 +1121,16 @@ config RTC_DRV_DAVINCI
This driver can also be built as a module. If so, the module
will be called rtc-davinci.
+config RTC_DRV_DIGICOLOR
+ tristate "Conexant Digicolor RTC"
+ depends on ARCH_DIGICOLOR
+ help
+ If you say yes here you get support for the RTC on Conexant
+ Digicolor platforms. This currently includes the CX92755 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-digicolor.
+
config RTC_DRV_IMXDI
tristate "Freescale IMX DryIce Real Time Clock"
depends on ARCH_MXC
@@ -1121,11 +1141,11 @@ config RTC_DRV_IMXDI
will be called "rtc-imxdi".
config RTC_DRV_OMAP
- tristate "TI OMAP1"
- depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX || SOC_AM33XX
+ tristate "TI OMAP Real Time Clock"
+ depends on ARCH_OMAP || ARCH_DAVINCI
help
Say "yes" here to support the on chip real time clock
- present on TI OMAP1, AM33xx and DA8xx/OMAP-L13x.
+ present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx.
This driver can also be built as a module, if so, module
will be called rtc-omap.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 69c87062b098..2b82e2b0311b 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o
+obj-$(CONFIG_RTC_DRV_ABX80X) += rtc-abx80x.o
obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o
obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
@@ -40,6 +41,7 @@ obj-$(CONFIG_RTC_DRV_DA9052) += rtc-da9052.o
obj-$(CONFIG_RTC_DRV_DA9055) += rtc-da9055.o
obj-$(CONFIG_RTC_DRV_DA9063) += rtc-da9063.o
obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o
+obj-$(CONFIG_RTC_DRV_DIGICOLOR) += rtc-digicolor.o
obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index c29ba7e14304..ea2a315df6b7 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -221,15 +221,15 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc->pie_timer.function = rtc_pie_update_irq;
rtc->pie_enabled = 0;
+ strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
+ dev_set_name(&rtc->dev, "rtc%d", id);
+
/* Check to see if there is an ALARM already set in hw */
err = __rtc_read_alarm(rtc, &alrm);
if (!err && !rtc_valid_tm(&alrm.time))
rtc_initialize_alarm(rtc, &alrm);
- strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
- dev_set_name(&rtc->dev, "rtc%d", id);
-
rtc_dev_prepare(rtc);
err = device_register(&rtc->dev);
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index 6c719f23520a..e1cfa06810ef 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/rtc.h>
/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
@@ -32,8 +34,8 @@ static int __init rtc_hctosys(void)
struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
if (rtc == NULL) {
- pr_err("%s: unable to open rtc device (%s)\n",
- __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ pr_info("unable to open rtc device (%s)\n",
+ CONFIG_RTC_HCTOSYS_DEVICE);
goto err_open;
}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index d43ee409a5f2..166fc60d8b55 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -31,13 +31,14 @@ static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
memset(tm, 0, sizeof(struct rtc_time));
err = rtc->ops->read_time(rtc->dev.parent, tm);
if (err < 0) {
- dev_err(&rtc->dev, "read_time: fail to read\n");
+ dev_dbg(&rtc->dev, "read_time: fail to read: %d\n",
+ err);
return err;
}
err = rtc_valid_tm(tm);
if (err < 0)
- dev_err(&rtc->dev, "read_time: rtc_time isn't valid\n");
+ dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n");
}
return err;
}
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index cfc2ef98d393..b5cbc1bf5a3e 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -881,7 +881,7 @@ static const struct rtc_class_ops rtc_ops = {
.alarm_irq_enable = abb5zes3_rtc_alarm_irq_enable,
};
-static struct regmap_config abb5zes3_rtc_regmap_config = {
+static const struct regmap_config abb5zes3_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
new file mode 100644
index 000000000000..4337c3bc6ace
--- /dev/null
+++ b/drivers/rtc/rtc-abx80x.c
@@ -0,0 +1,307 @@
+/*
+ * A driver for the I2C members of the Abracon AB x8xx RTC family,
+ * and compatible: AB 1805 and AB 0805
+ *
+ * Copyright 2014-2015 Macq S.A.
+ *
+ * Author: Philippe De Muyter <phdm@macqel.be>
+ * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+
+#define ABX8XX_REG_HTH 0x00
+#define ABX8XX_REG_SC 0x01
+#define ABX8XX_REG_MN 0x02
+#define ABX8XX_REG_HR 0x03
+#define ABX8XX_REG_DA 0x04
+#define ABX8XX_REG_MO 0x05
+#define ABX8XX_REG_YR 0x06
+#define ABX8XX_REG_WD 0x07
+
+#define ABX8XX_REG_CTRL1 0x10
+#define ABX8XX_CTRL_WRITE BIT(1)
+#define ABX8XX_CTRL_12_24 BIT(6)
+
+#define ABX8XX_REG_CFG_KEY 0x1f
+#define ABX8XX_CFG_KEY_MISC 0x9d
+
+#define ABX8XX_REG_ID0 0x28
+
+#define ABX8XX_REG_TRICKLE 0x20
+#define ABX8XX_TRICKLE_CHARGE_ENABLE 0xa0
+#define ABX8XX_TRICKLE_STANDARD_DIODE 0x8
+#define ABX8XX_TRICKLE_SCHOTTKY_DIODE 0x4
+
+static u8 trickle_resistors[] = {0, 3, 6, 11};
+
+enum abx80x_chip {AB0801, AB0803, AB0804, AB0805,
+ AB1801, AB1803, AB1804, AB1805, ABX80X};
+
+struct abx80x_cap {
+ u16 pn;
+ bool has_tc;
+};
+
+static struct abx80x_cap abx80x_caps[] = {
+ [AB0801] = {.pn = 0x0801},
+ [AB0803] = {.pn = 0x0803},
+ [AB0804] = {.pn = 0x0804, .has_tc = true},
+ [AB0805] = {.pn = 0x0805, .has_tc = true},
+ [AB1801] = {.pn = 0x1801},
+ [AB1803] = {.pn = 0x1803},
+ [AB1804] = {.pn = 0x1804, .has_tc = true},
+ [AB1805] = {.pn = 0x1805, .has_tc = true},
+ [ABX80X] = {.pn = 0}
+};
+
+static struct i2c_driver abx80x_driver;
+
+static int abx80x_enable_trickle_charger(struct i2c_client *client,
+ u8 trickle_cfg)
+{
+ int err;
+
+ /*
+ * Write the configuration key register to enable access to the Trickle
+ * register
+ */
+ err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
+ ABX8XX_CFG_KEY_MISC);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write configuration key\n");
+ return -EIO;
+ }
+
+ err = i2c_smbus_write_byte_data(client, ABX8XX_REG_TRICKLE,
+ ABX8XX_TRICKLE_CHARGE_ENABLE |
+ trickle_cfg);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write trickle register\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int abx80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[8];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_HTH,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to read date\n");
+ return -EIO;
+ }
+
+ tm->tm_sec = bcd2bin(buf[ABX8XX_REG_SC] & 0x7F);
+ tm->tm_min = bcd2bin(buf[ABX8XX_REG_MN] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[ABX8XX_REG_HR] & 0x3F);
+ tm->tm_wday = buf[ABX8XX_REG_WD] & 0x7;
+ tm->tm_mday = bcd2bin(buf[ABX8XX_REG_DA] & 0x3F);
+ tm->tm_mon = bcd2bin(buf[ABX8XX_REG_MO] & 0x1F) - 1;
+ tm->tm_year = bcd2bin(buf[ABX8XX_REG_YR]) + 100;
+
+ err = rtc_valid_tm(tm);
+ if (err < 0)
+ dev_err(&client->dev, "retrieved date/time is not valid.\n");
+
+ return err;
+}
+
+static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[8];
+ int err;
+
+ if (tm->tm_year < 100)
+ return -EINVAL;
+
+ buf[ABX8XX_REG_HTH] = 0;
+ buf[ABX8XX_REG_SC] = bin2bcd(tm->tm_sec);
+ buf[ABX8XX_REG_MN] = bin2bcd(tm->tm_min);
+ buf[ABX8XX_REG_HR] = bin2bcd(tm->tm_hour);
+ buf[ABX8XX_REG_DA] = bin2bcd(tm->tm_mday);
+ buf[ABX8XX_REG_MO] = bin2bcd(tm->tm_mon + 1);
+ buf[ABX8XX_REG_YR] = bin2bcd(tm->tm_year - 100);
+ buf[ABX8XX_REG_WD] = tm->tm_wday;
+
+ err = i2c_smbus_write_i2c_block_data(client, ABX8XX_REG_HTH,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write to date registers\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct rtc_class_ops abx80x_rtc_ops = {
+ .read_time = abx80x_rtc_read_time,
+ .set_time = abx80x_rtc_set_time,
+};
+
+static int abx80x_dt_trickle_cfg(struct device_node *np)
+{
+ const char *diode;
+ int trickle_cfg = 0;
+ int i, ret;
+ u32 tmp;
+
+ ret = of_property_read_string(np, "abracon,tc-diode", &diode);
+ if (ret)
+ return ret;
+
+ if (!strcmp(diode, "standard"))
+ trickle_cfg |= ABX8XX_TRICKLE_STANDARD_DIODE;
+ else if (!strcmp(diode, "schottky"))
+ trickle_cfg |= ABX8XX_TRICKLE_SCHOTTKY_DIODE;
+ else
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "abracon,tc-resistor", &tmp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < sizeof(trickle_resistors); i++)
+ if (trickle_resistors[i] == tmp)
+ break;
+
+ if (i == sizeof(trickle_resistors))
+ return -EINVAL;
+
+ return (trickle_cfg | i);
+}
+
+static int abx80x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device_node *np = client->dev.of_node;
+ struct rtc_device *rtc;
+ int i, data, err, trickle_cfg = -EINVAL;
+ char buf[7];
+ unsigned int part = id->driver_data;
+ unsigned int partnumber;
+ unsigned int majrev, minrev;
+ unsigned int lot;
+ unsigned int wafer;
+ unsigned int uid;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_ID0,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to read partnumber\n");
+ return -EIO;
+ }
+
+ partnumber = (buf[0] << 8) | buf[1];
+ majrev = buf[2] >> 3;
+ minrev = buf[2] & 0x7;
+ lot = ((buf[4] & 0x80) << 2) | ((buf[6] & 0x80) << 1) | buf[3];
+ uid = ((buf[4] & 0x7f) << 8) | buf[5];
+ wafer = (buf[6] & 0x7c) >> 2;
+ dev_info(&client->dev, "model %04x, revision %u.%u, lot %x, wafer %x, uid %x\n",
+ partnumber, majrev, minrev, lot, wafer, uid);
+
+ data = i2c_smbus_read_byte_data(client, ABX8XX_REG_CTRL1);
+ if (data < 0) {
+ dev_err(&client->dev, "Unable to read control register\n");
+ return -EIO;
+ }
+
+ err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CTRL1,
+ ((data & ~ABX8XX_CTRL_12_24) |
+ ABX8XX_CTRL_WRITE));
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write control register\n");
+ return -EIO;
+ }
+
+ /* part autodetection */
+ if (part == ABX80X) {
+ for (i = 0; abx80x_caps[i].pn; i++)
+ if (partnumber == abx80x_caps[i].pn)
+ break;
+ if (abx80x_caps[i].pn == 0) {
+ dev_err(&client->dev, "Unknown part: %04x\n",
+ partnumber);
+ return -EINVAL;
+ }
+ part = i;
+ }
+
+ if (partnumber != abx80x_caps[part].pn) {
+ dev_err(&client->dev, "partnumber mismatch %04x != %04x\n",
+ partnumber, abx80x_caps[part].pn);
+ return -EINVAL;
+ }
+
+ if (np && abx80x_caps[part].has_tc)
+ trickle_cfg = abx80x_dt_trickle_cfg(np);
+
+ if (trickle_cfg > 0) {
+ dev_info(&client->dev, "Enabling trickle charger: %02x\n",
+ trickle_cfg);
+ abx80x_enable_trickle_charger(client, trickle_cfg);
+ }
+
+ rtc = devm_rtc_device_register(&client->dev, abx80x_driver.driver.name,
+ &abx80x_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ i2c_set_clientdata(client, rtc);
+
+ return 0;
+}
+
+static int abx80x_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static const struct i2c_device_id abx80x_id[] = {
+ { "abx80x", ABX80X },
+ { "ab0801", AB0801 },
+ { "ab0803", AB0803 },
+ { "ab0804", AB0804 },
+ { "ab0805", AB0805 },
+ { "ab1801", AB1801 },
+ { "ab1803", AB1803 },
+ { "ab1804", AB1804 },
+ { "ab1805", AB1805 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, abx80x_id);
+
+static struct i2c_driver abx80x_driver = {
+ .driver = {
+ .name = "rtc-abx80x",
+ },
+ .probe = abx80x_probe,
+ .remove = abx80x_remove,
+ .id_table = abx80x_id,
+};
+
+module_i2c_driver(abx80x_driver);
+
+MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
+MODULE_DESCRIPTION("Abracon ABX80X RTC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 43e04af39e09..4b62d1a875e4 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -40,6 +40,13 @@ struct armada38x_rtc {
void __iomem *regs;
void __iomem *regs_soc;
spinlock_t lock;
+ /*
+ * While setting the time, the RTC TIME register should not be
+ * accessed. Setting the RTC time involves sleeping during
+ * 100ms, so a mutex instead of a spinlock is used to protect
+ * it
+ */
+ struct mutex mutex_time;
int irq;
};
@@ -57,10 +64,9 @@ static void rtc_delayed_write(u32 val, struct armada38x_rtc *rtc, int offset)
static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct armada38x_rtc *rtc = dev_get_drvdata(dev);
- unsigned long time, time_check, flags;
-
- spin_lock_irqsave(&rtc->lock, flags);
+ unsigned long time, time_check;
+ mutex_lock(&rtc->mutex_time);
time = readl(rtc->regs + RTC_TIME);
/*
* WA for failing time set attempts. As stated in HW ERRATA if
@@ -71,7 +77,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
if ((time_check - time) > 1)
time_check = readl(rtc->regs + RTC_TIME);
- spin_unlock_irqrestore(&rtc->lock, flags);
+ mutex_unlock(&rtc->mutex_time);
rtc_time_to_tm(time_check, tm);
@@ -94,19 +100,12 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
* then wait for 100ms before writing to the time register to be
* sure that the data will be taken into account.
*/
- spin_lock_irqsave(&rtc->lock, flags);
-
+ mutex_lock(&rtc->mutex_time);
rtc_delayed_write(0, rtc, RTC_STATUS);
-
- spin_unlock_irqrestore(&rtc->lock, flags);
-
msleep(100);
-
- spin_lock_irqsave(&rtc->lock, flags);
-
rtc_delayed_write(time, rtc, RTC_TIME);
+ mutex_unlock(&rtc->mutex_time);
- spin_unlock_irqrestore(&rtc->lock, flags);
out:
return ret;
}
@@ -230,6 +229,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&rtc->lock);
+ mutex_init(&rtc->mutex_time);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
rtc->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b283a1a573b3..35efd3f75b18 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -37,9 +37,9 @@
#include "rtc-at91rm9200.h"
#define at91_rtc_read(field) \
- __raw_readl(at91_rtc_regs + field)
+ readl_relaxed(at91_rtc_regs + field)
#define at91_rtc_write(field, val) \
- __raw_writel((val), at91_rtc_regs + field)
+ writel_relaxed((val), at91_rtc_regs + field)
#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5b2e76159b41..a82556a0757a 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -28,6 +28,9 @@
* interrupts disabled, holding the global rtc_lock, to exclude those
* other drivers and utilities on correctly configured systems.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -385,8 +388,7 @@ static bool alarm_disable_quirk;
static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
{
alarm_disable_quirk = true;
- pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
- pr_info("RTC alarms disabled\n");
+ pr_info("BIOS has alarm-disable quirk - RTC alarms disabled\n");
return 0;
}
@@ -459,23 +461,25 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
/* NOTE: at least ICH6 reports battery status using a different
* (non-RTC) bit; and SQWE is ignored on many current systems.
*/
- return seq_printf(seq,
- "periodic_IRQ\t: %s\n"
- "update_IRQ\t: %s\n"
- "HPET_emulated\t: %s\n"
- // "square_wave\t: %s\n"
- "BCD\t\t: %s\n"
- "DST_enable\t: %s\n"
- "periodic_freq\t: %d\n"
- "batt_status\t: %s\n",
- (rtc_control & RTC_PIE) ? "yes" : "no",
- (rtc_control & RTC_UIE) ? "yes" : "no",
- is_hpet_enabled() ? "yes" : "no",
- // (rtc_control & RTC_SQWE) ? "yes" : "no",
- (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
- (rtc_control & RTC_DST_EN) ? "yes" : "no",
- cmos->rtc->irq_freq,
- (valid & RTC_VRT) ? "okay" : "dead");
+ seq_printf(seq,
+ "periodic_IRQ\t: %s\n"
+ "update_IRQ\t: %s\n"
+ "HPET_emulated\t: %s\n"
+ // "square_wave\t: %s\n"
+ "BCD\t\t: %s\n"
+ "DST_enable\t: %s\n"
+ "periodic_freq\t: %d\n"
+ "batt_status\t: %s\n",
+ (rtc_control & RTC_PIE) ? "yes" : "no",
+ (rtc_control & RTC_UIE) ? "yes" : "no",
+ is_hpet_enabled() ? "yes" : "no",
+ // (rtc_control & RTC_SQWE) ? "yes" : "no",
+ (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
+ (rtc_control & RTC_DST_EN) ? "yes" : "no",
+ cmos->rtc->irq_freq,
+ (valid & RTC_VRT) ? "okay" : "dead");
+
+ return 0;
}
#else
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 613c43b7e9ae..1ba4371cbc2d 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
@@ -23,6 +24,8 @@
#define rtc_err(rtc, fmt, ...) \
dev_err(rtc->da9052->dev, "%s: " fmt, __func__, ##__VA_ARGS__)
+#define DA9052_GET_TIME_RETRIES 5
+
struct da9052_rtc {
struct rtc_device *rtc;
struct da9052 *da9052;
@@ -58,22 +61,43 @@ static irqreturn_t da9052_rtc_irq(int irq, void *data)
static int da9052_read_alarm(struct da9052_rtc *rtc, struct rtc_time *rtc_tm)
{
int ret;
- uint8_t v[5];
+ uint8_t v[2][5];
+ int idx = 1;
+ int timeout = DA9052_GET_TIME_RETRIES;
- ret = da9052_group_read(rtc->da9052, DA9052_ALARM_MI_REG, 5, v);
- if (ret != 0) {
+ ret = da9052_group_read(rtc->da9052, DA9052_ALARM_MI_REG, 5, &v[0][0]);
+ if (ret) {
rtc_err(rtc, "Failed to group read ALM: %d\n", ret);
return ret;
}
- rtc_tm->tm_year = (v[4] & DA9052_RTC_YEAR) + 100;
- rtc_tm->tm_mon = (v[3] & DA9052_RTC_MONTH) - 1;
- rtc_tm->tm_mday = v[2] & DA9052_RTC_DAY;
- rtc_tm->tm_hour = v[1] & DA9052_RTC_HOUR;
- rtc_tm->tm_min = v[0] & DA9052_RTC_MIN;
+ do {
+ ret = da9052_group_read(rtc->da9052,
+ DA9052_ALARM_MI_REG, 5, &v[idx][0]);
+ if (ret) {
+ rtc_err(rtc, "Failed to group read ALM: %d\n", ret);
+ return ret;
+ }
- ret = rtc_valid_tm(rtc_tm);
- return ret;
+ if (memcmp(&v[0][0], &v[1][0], 5) == 0) {
+ rtc_tm->tm_year = (v[0][4] & DA9052_RTC_YEAR) + 100;
+ rtc_tm->tm_mon = (v[0][3] & DA9052_RTC_MONTH) - 1;
+ rtc_tm->tm_mday = v[0][2] & DA9052_RTC_DAY;
+ rtc_tm->tm_hour = v[0][1] & DA9052_RTC_HOUR;
+ rtc_tm->tm_min = v[0][0] & DA9052_RTC_MIN;
+
+ ret = rtc_valid_tm(rtc_tm);
+ return ret;
+ }
+
+ idx = (1-idx);
+ msleep(20);
+
+ } while (timeout--);
+
+ rtc_err(rtc, "Timed out reading alarm time\n");
+
+ return -EIO;
}
static int da9052_set_alarm(struct da9052_rtc *rtc, struct rtc_time *rtc_tm)
@@ -135,24 +159,45 @@ static int da9052_rtc_get_alarm_status(struct da9052_rtc *rtc)
static int da9052_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
{
struct da9052_rtc *rtc = dev_get_drvdata(dev);
- uint8_t v[6];
int ret;
+ uint8_t v[2][6];
+ int idx = 1;
+ int timeout = DA9052_GET_TIME_RETRIES;
- ret = da9052_group_read(rtc->da9052, DA9052_COUNT_S_REG, 6, v);
- if (ret < 0) {
+ ret = da9052_group_read(rtc->da9052, DA9052_COUNT_S_REG, 6, &v[0][0]);
+ if (ret) {
rtc_err(rtc, "Failed to read RTC time : %d\n", ret);
return ret;
}
- rtc_tm->tm_year = (v[5] & DA9052_RTC_YEAR) + 100;
- rtc_tm->tm_mon = (v[4] & DA9052_RTC_MONTH) - 1;
- rtc_tm->tm_mday = v[3] & DA9052_RTC_DAY;
- rtc_tm->tm_hour = v[2] & DA9052_RTC_HOUR;
- rtc_tm->tm_min = v[1] & DA9052_RTC_MIN;
- rtc_tm->tm_sec = v[0] & DA9052_RTC_SEC;
+ do {
+ ret = da9052_group_read(rtc->da9052,
+ DA9052_COUNT_S_REG, 6, &v[idx][0]);
+ if (ret) {
+ rtc_err(rtc, "Failed to read RTC time : %d\n", ret);
+ return ret;
+ }
- ret = rtc_valid_tm(rtc_tm);
- return ret;
+ if (memcmp(&v[0][0], &v[1][0], 6) == 0) {
+ rtc_tm->tm_year = (v[0][5] & DA9052_RTC_YEAR) + 100;
+ rtc_tm->tm_mon = (v[0][4] & DA9052_RTC_MONTH) - 1;
+ rtc_tm->tm_mday = v[0][3] & DA9052_RTC_DAY;
+ rtc_tm->tm_hour = v[0][2] & DA9052_RTC_HOUR;
+ rtc_tm->tm_min = v[0][1] & DA9052_RTC_MIN;
+ rtc_tm->tm_sec = v[0][0] & DA9052_RTC_SEC;
+
+ ret = rtc_valid_tm(rtc_tm);
+ return ret;
+ }
+
+ idx = (1-idx);
+ msleep(20);
+
+ } while (timeout--);
+
+ rtc_err(rtc, "Timed out reading time\n");
+
+ return -EIO;
}
static int da9052_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -161,6 +206,10 @@ static int da9052_rtc_set_time(struct device *dev, struct rtc_time *tm)
uint8_t v[6];
int ret;
+ /* DA9052 only has 6 bits for year - to represent 2000-2063 */
+ if ((tm->tm_year < 100) || (tm->tm_year > 163))
+ return -EINVAL;
+
rtc = dev_get_drvdata(dev);
v[0] = tm->tm_sec;
@@ -198,6 +247,10 @@ static int da9052_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *tm = &alrm->time;
struct da9052_rtc *rtc = dev_get_drvdata(dev);
+ /* DA9052 only has 6 bits for year - to represent 2000-2063 */
+ if ((tm->tm_year < 100) || (tm->tm_year > 163))
+ return -EINVAL;
+
ret = da9052_rtc_enable_alarm(rtc, 0);
if (ret < 0)
return ret;
@@ -256,6 +309,8 @@ static int da9052_rtc_probe(struct platform_device *pdev)
return ret;
}
+ device_init_wakeup(&pdev->dev, true);
+
rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&da9052_rtc_ops, THIS_MODULE);
return PTR_ERR_OR_ZERO(rtc->rtc);
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
new file mode 100644
index 000000000000..8d05596a6765
--- /dev/null
+++ b/drivers/rtc/rtc-digicolor.c
@@ -0,0 +1,227 @@
+/*
+ * Real Time Clock driver for Conexant Digicolor
+ *
+ * Copyright (C) 2015 Paradox Innovation Ltd.
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/of.h>
+
+#define DC_RTC_CONTROL 0x0
+#define DC_RTC_TIME 0x8
+#define DC_RTC_REFERENCE 0xc
+#define DC_RTC_ALARM 0x10
+#define DC_RTC_INTFLAG_CLEAR 0x14
+#define DC_RTC_INTENABLE 0x16
+
+#define DC_RTC_CMD_MASK 0xf
+#define DC_RTC_GO_BUSY BIT(7)
+
+#define CMD_NOP 0
+#define CMD_RESET 1
+#define CMD_WRITE 3
+#define CMD_READ 4
+
+#define CMD_DELAY_US (10*1000)
+#define CMD_TIMEOUT_US (500*CMD_DELAY_US)
+
+struct dc_rtc {
+ struct rtc_device *rtc_dev;
+ void __iomem *regs;
+};
+
+static int dc_rtc_cmds(struct dc_rtc *rtc, const u8 *cmds, int len)
+{
+ u8 val;
+ int i, ret;
+
+ for (i = 0; i < len; i++) {
+ writeb_relaxed((cmds[i] & DC_RTC_CMD_MASK) | DC_RTC_GO_BUSY,
+ rtc->regs + DC_RTC_CONTROL);
+ ret = readb_relaxed_poll_timeout(
+ rtc->regs + DC_RTC_CONTROL, val,
+ !(val & DC_RTC_GO_BUSY), CMD_DELAY_US, CMD_TIMEOUT_US);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dc_rtc_read(struct dc_rtc *rtc, unsigned long *val)
+{
+ static const u8 read_cmds[] = {CMD_READ, CMD_NOP};
+ u32 reference, time1, time2;
+ int ret;
+
+ ret = dc_rtc_cmds(rtc, read_cmds, ARRAY_SIZE(read_cmds));
+ if (ret < 0)
+ return ret;
+
+ reference = readl_relaxed(rtc->regs + DC_RTC_REFERENCE);
+ time1 = readl_relaxed(rtc->regs + DC_RTC_TIME);
+ /* Read twice to ensure consistency */
+ while (1) {
+ time2 = readl_relaxed(rtc->regs + DC_RTC_TIME);
+ if (time1 == time2)
+ break;
+ time1 = time2;
+ }
+
+ *val = reference + time1;
+ return 0;
+}
+
+static int dc_rtc_write(struct dc_rtc *rtc, u32 val)
+{
+ static const u8 write_cmds[] = {CMD_WRITE, CMD_NOP, CMD_RESET, CMD_NOP};
+
+ writel_relaxed(val, rtc->regs + DC_RTC_REFERENCE);
+ return dc_rtc_cmds(rtc, write_cmds, ARRAY_SIZE(write_cmds));
+}
+
+static int dc_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long now;
+ int ret;
+
+ ret = dc_rtc_read(rtc, &now);
+ if (ret < 0)
+ return ret;
+ rtc_time64_to_tm(now, tm);
+
+ return 0;
+}
+
+static int dc_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+
+ return dc_rtc_write(rtc, secs);
+}
+
+static int dc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+ u32 alarm_reg, reference;
+ unsigned long now;
+ int ret;
+
+ alarm_reg = readl_relaxed(rtc->regs + DC_RTC_ALARM);
+ reference = readl_relaxed(rtc->regs + DC_RTC_REFERENCE);
+ rtc_time64_to_tm(reference + alarm_reg, &alarm->time);
+
+ ret = dc_rtc_read(rtc, &now);
+ if (ret < 0)
+ return ret;
+
+ alarm->pending = alarm_reg + reference > now;
+ alarm->enabled = readl_relaxed(rtc->regs + DC_RTC_INTENABLE);
+
+ return 0;
+}
+
+static int dc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+ time64_t alarm_time;
+ u32 reference;
+
+ alarm_time = rtc_tm_to_time64(&alarm->time);
+
+ reference = readl_relaxed(rtc->regs + DC_RTC_REFERENCE);
+ writel_relaxed(alarm_time - reference, rtc->regs + DC_RTC_ALARM);
+
+ writeb_relaxed(!!alarm->enabled, rtc->regs + DC_RTC_INTENABLE);
+
+ return 0;
+}
+
+static int dc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct dc_rtc *rtc = dev_get_drvdata(dev);
+
+ writeb_relaxed(!!enabled, rtc->regs + DC_RTC_INTENABLE);
+
+ return 0;
+}
+
+static struct rtc_class_ops dc_rtc_ops = {
+ .read_time = dc_rtc_read_time,
+ .set_mmss = dc_rtc_set_mmss,
+ .read_alarm = dc_rtc_read_alarm,
+ .set_alarm = dc_rtc_set_alarm,
+ .alarm_irq_enable = dc_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t dc_rtc_irq(int irq, void *dev_id)
+{
+ struct dc_rtc *rtc = dev_id;
+
+ writeb_relaxed(1, rtc->regs + DC_RTC_INTFLAG_CLEAR);
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
+
+ return IRQ_HANDLED;
+}
+
+static int __init dc_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct dc_rtc *rtc;
+ int irq, ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rtc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rtc->regs))
+ return PTR_ERR(rtc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(&pdev->dev, irq, dc_rtc_irq, 0, pdev->name, rtc);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, rtc);
+ rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &dc_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dc_dt_ids[] = {
+ { .compatible = "cnxt,cx92755-rtc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_dt_ids);
+
+static struct platform_driver dc_rtc_driver = {
+ .driver = {
+ .name = "digicolor_rtc",
+ .of_match_table = of_match_ptr(dc_dt_ids),
+ },
+};
+module_platform_driver_probe(dc_rtc_driver, dc_rtc_probe);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Conexant Digicolor Realtime Clock Driver (RTC)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 129add77065d..12b07158a366 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -434,9 +434,9 @@ static int ds1305_proc(struct device *dev, struct seq_file *seq)
}
done:
- return seq_printf(seq,
- "trickle_charge\t: %s%s\n",
- diodes, resistors);
+ seq_printf(seq, "trickle_charge\t: %s%s\n", diodes, resistors);
+
+ return 0;
}
#else
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 8605fde394b2..167783fa7ac1 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -18,6 +18,8 @@
* "Sending and receiving", using SMBus level communication is preferred.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -406,7 +408,7 @@ static int ds1374_wdt_settimeout(unsigned int timeout)
/* Set new watchdog time */
ret = ds1374_write_rtc(save_client, timeout, DS1374_REG_WDALM0, 3);
if (ret) {
- pr_info("rtc-ds1374 - couldn't set new watchdog time\n");
+ pr_info("couldn't set new watchdog time\n");
goto out;
}
@@ -539,12 +541,12 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
- pr_info("rtc-ds1374: disable watchdog\n");
+ pr_info("disable watchdog\n");
ds1374_wdt_disable();
}
if (options & WDIOS_ENABLECARD) {
- pr_info("rtc-ds1374: enable watchdog\n");
+ pr_info("enable watchdog\n");
ds1374_wdt_settimeout(wdt_margin);
ds1374_wdt_ping();
}
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 803869c7d7c2..818a3635a8c8 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -16,6 +16,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -799,7 +801,7 @@ ds1685_rtc_proc(struct device *dev, struct seq_file *seq)
struct platform_device *pdev = to_platform_device(dev);
struct ds1685_priv *rtc = platform_get_drvdata(pdev);
u8 ctrla, ctrlb, ctrlc, ctrld, ctrl4a, ctrl4b, ssn[8];
- char *model = '\0';
+ char *model;
#ifdef CONFIG_RTC_DS1685_PROC_REGS
char bits[NUM_REGS][(NUM_BITS * NUM_SPACES) + NUM_BITS + 1];
#endif
@@ -2139,7 +2141,6 @@ ds1685_rtc_remove(struct platform_device *pdev)
static struct platform_driver ds1685_rtc_driver = {
.driver = {
.name = "rtc-ds1685",
- .owner = THIS_MODULE,
},
.probe = ds1685_rtc_probe,
.remove = ds1685_rtc_remove,
@@ -2175,7 +2176,7 @@ module_exit(ds1685_rtc_exit);
* ds1685_rtc_poweroff - uses the RTC chip to power the system off.
* @pdev: pointer to platform_device structure.
*/
-extern void __noreturn
+void __noreturn
ds1685_rtc_poweroff(struct platform_device *pdev)
{
u8 ctrla, ctrl4a, ctrl4b;
@@ -2183,7 +2184,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
/* Check for valid RTC data, else, spin forever. */
if (unlikely(!pdev)) {
- pr_emerg("rtc-ds1685: platform device data not available, spinning forever ...\n");
+ pr_emerg("platform device data not available, spinning forever ...\n");
unreachable();
} else {
/* Get the rtc data. */
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index adaf06c41479..7e48e532214f 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -15,6 +15,8 @@
* "Sending and receiving", using SMBus level communication is preferred.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -373,8 +375,8 @@ static void ds3232_work(struct work_struct *work)
if (stat & DS3232_REG_SR_A1F) {
control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
if (control < 0) {
- pr_warn("Read DS3232 Control Register error."
- "Disable IRQ%d.\n", client->irq);
+ pr_warn("Read Control Register error - Disable IRQ%d\n",
+ client->irq);
} else {
/* disable alarm1 interrupt */
control &= ~(DS3232_REG_CR_A1IE);
diff --git a/drivers/rtc/rtc-efi-platform.c b/drivers/rtc/rtc-efi-platform.c
index b40fbe332af4..1a7f1d1bc174 100644
--- a/drivers/rtc/rtc-efi-platform.c
+++ b/drivers/rtc/rtc-efi-platform.c
@@ -8,6 +8,9 @@
* Copyright (C) 1999-2000 VA Linux Systems
* Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index fccf36699245..4f4930a2004c 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -15,6 +15,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/module.h>
+#include <linux/of.h>
/* Registers */
#define EM3027_REG_ON_OFF_CTRL 0x00
@@ -135,10 +136,20 @@ static struct i2c_device_id em3027_id[] = {
{ "em3027", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, em3027_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id em3027_of_match[] = {
+ { .compatible = "emmicro,em3027", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, em3027_of_match);
+#endif
static struct i2c_driver em3027_driver = {
.driver = {
.name = "rtc-em3027",
+ .of_match_table = of_match_ptr(em3027_of_match),
},
.probe = &em3027_probe,
.id_table = em3027_id,
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index b936bb4096b5..0f710e98538f 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -66,7 +66,7 @@
#define HYM8563_ALM_BIT_DISABLE BIT(7)
#define HYM8563_CLKOUT 0x0d
-#define HYM8563_CLKOUT_DISABLE BIT(7)
+#define HYM8563_CLKOUT_ENABLE BIT(7)
#define HYM8563_CLKOUT_32768 0
#define HYM8563_CLKOUT_1024 1
#define HYM8563_CLKOUT_32 2
@@ -309,7 +309,7 @@ static unsigned long hym8563_clkout_recalc_rate(struct clk_hw *hw,
struct i2c_client *client = hym8563->client;
int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
- if (ret < 0 || ret & HYM8563_CLKOUT_DISABLE)
+ if (ret < 0)
return 0;
ret &= HYM8563_CLKOUT_MASK;
@@ -360,9 +360,9 @@ static int hym8563_clkout_control(struct clk_hw *hw, bool enable)
return ret;
if (enable)
- ret &= ~HYM8563_CLKOUT_DISABLE;
+ ret |= HYM8563_CLKOUT_ENABLE;
else
- ret |= HYM8563_CLKOUT_DISABLE;
+ ret &= ~HYM8563_CLKOUT_ENABLE;
return i2c_smbus_write_byte_data(client, HYM8563_CLKOUT, ret);
}
@@ -386,7 +386,7 @@ static int hym8563_clkout_is_prepared(struct clk_hw *hw)
if (ret < 0)
return ret;
- return !(ret & HYM8563_CLKOUT_DISABLE);
+ return !!(ret & HYM8563_CLKOUT_ENABLE);
}
static const struct clk_ops hym8563_clkout_ops = {
@@ -407,7 +407,7 @@ static struct clk *hym8563_clkout_register_clk(struct hym8563 *hym8563)
int ret;
ret = i2c_smbus_write_byte_data(client, HYM8563_CLKOUT,
- HYM8563_CLKOUT_DISABLE);
+ 0);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 7ff7427c2e6a..a82937e2f824 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -13,6 +13,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -513,12 +515,12 @@ static int wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (rv & WDIOS_DISABLECARD) {
- pr_info("rtc-m41t80: disable watchdog\n");
+ pr_info("disable watchdog\n");
wdt_disable();
}
if (rv & WDIOS_ENABLECARD) {
- pr_info("rtc-m41t80: enable watchdog\n");
+ pr_info("enable watchdog\n");
wdt_ping();
}
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 9d71328e59b9..7632a87784c3 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -12,6 +12,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/delay.h>
@@ -103,8 +105,8 @@ static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
if (tm->tm_year < 100) {
- pr_warn("%s: MAX77686 RTC cannot handle the year %d."
- "Assume it's 2000.\n", __func__, 1900 + tm->tm_year);
+ pr_warn("RTC cannot handle the year %d. Assume it's 2000.\n",
+ 1900 + tm->tm_year);
return -EINVAL;
}
return 0;
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
index 67fbe559d535..9e02bcda0c09 100644
--- a/drivers/rtc/rtc-max8997.c
+++ b/drivers/rtc/rtc-max8997.c
@@ -12,6 +12,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/delay.h>
@@ -107,8 +109,8 @@ static int max8997_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
if (tm->tm_year < 100) {
- pr_warn("%s: MAX8997 RTC cannot handle the year %d."
- "Assume it's 2000.\n", __func__, 1900 + tm->tm_year);
+ pr_warn("RTC cannot handle the year %d. Assume it's 2000.\n",
+ 1900 + tm->tm_year);
return -EINVAL;
}
return 0;
@@ -424,7 +426,7 @@ static void max8997_rtc_enable_smpl(struct max8997_rtc_info *info, bool enable)
val = 0;
max8997_read_reg(info->rtc, MAX8997_RTC_WTSR_SMPL, &val);
- pr_info("%s: WTSR_SMPL(0x%02x)\n", __func__, val);
+ pr_info("WTSR_SMPL(0x%02x)\n", val);
}
static int max8997_rtc_init_reg(struct max8997_rtc_info *info)
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 3a6fd3a8a2ec..548ea6f6f384 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -277,13 +277,15 @@ static int mrst_procfs(struct device *dev, struct seq_file *seq)
valid = vrtc_cmos_read(RTC_VALID);
spin_unlock_irq(&rtc_lock);
- return seq_printf(seq,
- "periodic_IRQ\t: %s\n"
- "alarm\t\t: %s\n"
- "BCD\t\t: no\n"
- "periodic_freq\t: daily (not adjustable)\n",
- (rtc_control & RTC_PIE) ? "on" : "off",
- (rtc_control & RTC_AIE) ? "on" : "off");
+ seq_printf(seq,
+ "periodic_IRQ\t: %s\n"
+ "alarm\t\t: %s\n"
+ "BCD\t\t: no\n"
+ "periodic_freq\t: daily (not adjustable)\n",
+ (rtc_control & RTC_PIE) ? "on" : "off",
+ (rtc_control & RTC_AIE) ? "on" : "off");
+
+ return 0;
}
#else
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index 9bf877bdf836..c1c5c4e3b3b4 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -7,6 +7,8 @@
* Copyright (C) 1993 Hamish Macdonald
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -111,7 +113,7 @@ static void msm6242_lock(struct msm6242_priv *priv)
}
if (!cnt)
- pr_warn("msm6242: timed out waiting for RTC (0x%x)\n",
+ pr_warn("timed out waiting for RTC (0x%x)\n",
msm6242_read(priv, MSM6242_CD));
}
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 8e5851aa4369..8b6355ffaff9 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -118,12 +118,15 @@
#define KICK0_VALUE 0x83e70b13
#define KICK1_VALUE 0x95a4f1e0
+struct omap_rtc;
+
struct omap_rtc_device_type {
bool has_32kclk_en;
- bool has_kicker;
bool has_irqwakeen;
bool has_pmic_mode;
bool has_power_up_reset;
+ void (*lock)(struct omap_rtc *rtc);
+ void (*unlock)(struct omap_rtc *rtc);
};
struct omap_rtc {
@@ -156,6 +159,26 @@ static inline void rtc_writel(struct omap_rtc *rtc, unsigned int reg, u32 val)
writel(val, rtc->base + reg);
}
+static void am3352_rtc_unlock(struct omap_rtc *rtc)
+{
+ rtc_writel(rtc, OMAP_RTC_KICK0_REG, KICK0_VALUE);
+ rtc_writel(rtc, OMAP_RTC_KICK1_REG, KICK1_VALUE);
+}
+
+static void am3352_rtc_lock(struct omap_rtc *rtc)
+{
+ rtc_writel(rtc, OMAP_RTC_KICK0_REG, 0);
+ rtc_writel(rtc, OMAP_RTC_KICK1_REG, 0);
+}
+
+static void default_rtc_unlock(struct omap_rtc *rtc)
+{
+}
+
+static void default_rtc_lock(struct omap_rtc *rtc)
+{
+}
+
/*
* We rely on the rtc framework to handle locking (rtc->ops_lock),
* so the only other requirement is that register accesses which
@@ -186,7 +209,9 @@ static irqreturn_t rtc_irq(int irq, void *dev_id)
/* alarm irq? */
if (irq_data & OMAP_RTC_STATUS_ALARM) {
+ rtc->type->unlock(rtc);
rtc_write(rtc, OMAP_RTC_STATUS_REG, OMAP_RTC_STATUS_ALARM);
+ rtc->type->lock(rtc);
events |= RTC_IRQF | RTC_AF;
}
@@ -218,9 +243,11 @@ static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
irqwake_reg &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
}
rtc_wait_not_busy(rtc);
+ rtc->type->unlock(rtc);
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, reg);
if (rtc->type->has_irqwakeen)
rtc_write(rtc, OMAP_RTC_IRQWAKEEN, irqwake_reg);
+ rtc->type->lock(rtc);
local_irq_enable();
return 0;
@@ -293,12 +320,14 @@ static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm)
local_irq_disable();
rtc_wait_not_busy(rtc);
+ rtc->type->unlock(rtc);
rtc_write(rtc, OMAP_RTC_YEARS_REG, tm->tm_year);
rtc_write(rtc, OMAP_RTC_MONTHS_REG, tm->tm_mon);
rtc_write(rtc, OMAP_RTC_DAYS_REG, tm->tm_mday);
rtc_write(rtc, OMAP_RTC_HOURS_REG, tm->tm_hour);
rtc_write(rtc, OMAP_RTC_MINUTES_REG, tm->tm_min);
rtc_write(rtc, OMAP_RTC_SECONDS_REG, tm->tm_sec);
+ rtc->type->lock(rtc);
local_irq_enable();
@@ -341,6 +370,7 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
local_irq_disable();
rtc_wait_not_busy(rtc);
+ rtc->type->unlock(rtc);
rtc_write(rtc, OMAP_RTC_ALARM_YEARS_REG, alm->time.tm_year);
rtc_write(rtc, OMAP_RTC_ALARM_MONTHS_REG, alm->time.tm_mon);
rtc_write(rtc, OMAP_RTC_ALARM_DAYS_REG, alm->time.tm_mday);
@@ -362,6 +392,7 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, reg);
if (rtc->type->has_irqwakeen)
rtc_write(rtc, OMAP_RTC_IRQWAKEEN, irqwake_reg);
+ rtc->type->lock(rtc);
local_irq_enable();
@@ -391,6 +422,7 @@ static void omap_rtc_power_off(void)
unsigned long now;
u32 val;
+ rtc->type->unlock(rtc);
/* enable pmic_power_en control */
val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
rtc_writel(rtc, OMAP_RTC_PMIC_REG, val | OMAP_RTC_PMIC_POWER_EN_EN);
@@ -423,6 +455,7 @@ static void omap_rtc_power_off(void)
val = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
rtc_writel(rtc, OMAP_RTC_INTERRUPTS_REG,
val | OMAP_RTC_INTERRUPTS_IT_ALARM2);
+ rtc->type->lock(rtc);
/*
* Wait for alarm to trigger (within two seconds) and external PMIC to
@@ -442,17 +475,21 @@ static struct rtc_class_ops omap_rtc_ops = {
static const struct omap_rtc_device_type omap_rtc_default_type = {
.has_power_up_reset = true,
+ .lock = default_rtc_lock,
+ .unlock = default_rtc_unlock,
};
static const struct omap_rtc_device_type omap_rtc_am3352_type = {
.has_32kclk_en = true,
- .has_kicker = true,
.has_irqwakeen = true,
.has_pmic_mode = true,
+ .lock = am3352_rtc_lock,
+ .unlock = am3352_rtc_unlock,
};
static const struct omap_rtc_device_type omap_rtc_da830_type = {
- .has_kicker = true,
+ .lock = am3352_rtc_lock,
+ .unlock = am3352_rtc_unlock,
};
static const struct platform_device_id omap_rtc_id_table[] = {
@@ -484,7 +521,7 @@ static const struct of_device_id omap_rtc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
-static int __init omap_rtc_probe(struct platform_device *pdev)
+static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
struct resource *res;
@@ -527,10 +564,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- if (rtc->type->has_kicker) {
- rtc_writel(rtc, OMAP_RTC_KICK0_REG, KICK0_VALUE);
- rtc_writel(rtc, OMAP_RTC_KICK1_REG, KICK1_VALUE);
- }
+ rtc->type->unlock(rtc);
/*
* disable interrupts
@@ -593,6 +627,8 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
if (reg != new_ctrl)
rtc_write(rtc, OMAP_RTC_CTRL_REG, new_ctrl);
+ rtc->type->lock(rtc);
+
device_init_wakeup(&pdev->dev, true);
rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
@@ -626,8 +662,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
err:
device_init_wakeup(&pdev->dev, false);
- if (rtc->type->has_kicker)
- rtc_writel(rtc, OMAP_RTC_KICK0_REG, 0);
+ rtc->type->lock(rtc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -646,11 +681,11 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
+ rtc->type->unlock(rtc);
/* leave rtc running, but disable irqs */
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
- if (rtc->type->has_kicker)
- rtc_writel(rtc, OMAP_RTC_KICK0_REG, 0);
+ rtc->type->lock(rtc);
/* Disable the clock/module */
pm_runtime_put_sync(&pdev->dev);
@@ -666,6 +701,7 @@ static int omap_rtc_suspend(struct device *dev)
rtc->interrupts_reg = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
+ rtc->type->unlock(rtc);
/*
* FIXME: the RTC alarm is not currently acting as a wakeup event
* source on some platforms, and in fact this enable() call is just
@@ -675,6 +711,7 @@ static int omap_rtc_suspend(struct device *dev)
enable_irq_wake(rtc->irq_alarm);
else
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
+ rtc->type->lock(rtc);
/* Disable the clock/module */
pm_runtime_put_sync(dev);
@@ -689,10 +726,12 @@ static int omap_rtc_resume(struct device *dev)
/* Enable the clock/module so that we can access the registers */
pm_runtime_get_sync(dev);
+ rtc->type->unlock(rtc);
if (device_may_wakeup(dev))
disable_irq_wake(rtc->irq_alarm);
else
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg);
+ rtc->type->lock(rtc);
return 0;
}
@@ -709,12 +748,15 @@ static void omap_rtc_shutdown(struct platform_device *pdev)
* Keep the ALARM interrupt enabled to allow the system to power up on
* alarm events.
*/
+ rtc->type->unlock(rtc);
mask = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
mask &= OMAP_RTC_INTERRUPTS_IT_ALARM;
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, mask);
+ rtc->type->lock(rtc);
}
static struct platform_driver omap_rtc_driver = {
+ .probe = omap_rtc_probe,
.remove = __exit_p(omap_rtc_remove),
.shutdown = omap_rtc_shutdown,
.driver = {
@@ -725,7 +767,7 @@ static struct platform_driver omap_rtc_driver = {
.id_table = omap_rtc_id_table,
};
-module_platform_driver_probe(omap_rtc_driver, omap_rtc_probe);
+module_platform_driver(omap_rtc_driver);
MODULE_ALIAS("platform:omap_rtc");
MODULE_AUTHOR("George G. Davis (and others)");
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 95f652165fe9..7061dcae2b09 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -16,8 +16,9 @@
* along with this program.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRVNAME "rtc-opal"
-#define pr_fmt(fmt) DRVNAME ": " fmt
#include <linux/module.h>
#include <linux/err.h>
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 96fb32e7d6f8..0ba7e59929be 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -246,7 +246,6 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
- int err;
unsigned char buf[9];
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
@@ -272,12 +271,8 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[PCF8563_REG_DW] = tm->tm_wday & 0x07;
- err = pcf8563_write_block_data(client, PCF8563_REG_SC,
+ return pcf8563_write_block_data(client, PCF8563_REG_SC,
9 - PCF8563_REG_SC, buf + PCF8563_REG_SC);
- if (err)
- return err;
-
- return 0;
}
#ifdef CONFIG_RTC_INTF_DEV
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index f4cf6851fae9..76cbad7a99d3 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -39,7 +39,6 @@ struct s3c_rtc {
void __iomem *base;
struct clk *rtc_clk;
struct clk *rtc_src_clk;
- bool enabled;
struct s3c_rtc_data *data;
@@ -67,26 +66,25 @@ struct s3c_rtc_data {
void (*disable) (struct s3c_rtc *info);
};
-static void s3c_rtc_alarm_clk_enable(struct s3c_rtc *info, bool enable)
+static void s3c_rtc_enable_clk(struct s3c_rtc *info)
{
unsigned long irq_flags;
spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
- if (enable) {
- if (!info->enabled) {
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
- info->enabled = true;
- }
- } else {
- if (info->enabled) {
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
- info->enabled = false;
- }
- }
+ clk_enable(info->rtc_clk);
+ if (info->data->needs_src_clk)
+ clk_enable(info->rtc_src_clk);
+ spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
+}
+
+static void s3c_rtc_disable_clk(struct s3c_rtc *info)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
+ if (info->data->needs_src_clk)
+ clk_disable(info->rtc_src_clk);
+ clk_disable(info->rtc_clk);
spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
}
@@ -119,20 +117,16 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
dev_dbg(info->dev, "%s: aie=%d\n", __func__, enabled);
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
+
tmp = readb(info->base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
if (enabled)
tmp |= S3C2410_RTCALM_ALMEN;
writeb(tmp, info->base + S3C2410_RTCALM);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
- s3c_rtc_alarm_clk_enable(info, enabled);
+ s3c_rtc_disable_clk(info);
return 0;
}
@@ -143,18 +137,12 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
if (!is_power_of_2(freq))
return -EINVAL;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
spin_lock_irq(&info->pie_lock);
if (info->data->set_freq)
info->data->set_freq(info, freq);
spin_unlock_irq(&info->pie_lock);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
return 0;
}
@@ -165,9 +153,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
struct s3c_rtc *info = dev_get_drvdata(dev);
unsigned int have_retried = 0;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
retry_get_time:
rtc_tm->tm_min = readb(info->base + S3C2410_RTCMIN);
@@ -194,6 +180,8 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
+ s3c_rtc_disable_clk(info);
+
rtc_tm->tm_year += 100;
dev_dbg(dev, "read time %04d.%02d.%02d %02d:%02d:%02d\n",
@@ -202,10 +190,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_mon -= 1;
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
return rtc_valid_tm(rtc_tm);
}
@@ -225,9 +209,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
writeb(bin2bcd(tm->tm_sec), info->base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), info->base + S3C2410_RTCMIN);
@@ -236,9 +218,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
writeb(bin2bcd(tm->tm_mon + 1), info->base + S3C2410_RTCMON);
writeb(bin2bcd(year), info->base + S3C2410_RTCYEAR);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
+ s3c_rtc_disable_clk(info);
return 0;
}
@@ -249,9 +229,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *alm_tm = &alrm->time;
unsigned int alm_en;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
alm_tm->tm_sec = readb(info->base + S3C2410_ALMSEC);
alm_tm->tm_min = readb(info->base + S3C2410_ALMMIN);
@@ -262,6 +240,8 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
alm_en = readb(info->base + S3C2410_RTCALM);
+ s3c_rtc_disable_clk(info);
+
alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
dev_dbg(dev, "read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
@@ -269,9 +249,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
-
/* decode the alarm enable field */
-
if (alm_en & S3C2410_RTCALM_SECEN)
alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
else
@@ -304,10 +282,6 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
else
alm_tm->tm_year = -1;
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
return 0;
}
@@ -317,15 +291,13 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *tm = &alrm->time;
unsigned int alrm_en;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
dev_dbg(dev, "s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alrm->enabled,
1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
+ s3c_rtc_enable_clk(info);
+
alrm_en = readb(info->base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
writeb(0x00, info->base + S3C2410_RTCALM);
@@ -348,11 +320,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
writeb(alrm_en, info->base + S3C2410_RTCALM);
- s3c_rtc_setaie(dev, alrm->enabled);
+ s3c_rtc_disable_clk(info);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
+ s3c_rtc_setaie(dev, alrm->enabled);
return 0;
}
@@ -361,16 +331,12 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
if (info->data->enable_tick)
info->data->enable_tick(info, seq);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
+ s3c_rtc_disable_clk(info);
return 0;
}
@@ -388,10 +354,6 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info)
{
unsigned int con, tmp;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
con = readw(info->base + S3C2410_RTCCON);
/* re-enable the device, and check it is ok */
if ((con & S3C2410_RTCCON_RTCEN) == 0) {
@@ -417,20 +379,12 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info)
writew(tmp & ~S3C2410_RTCCON_CLKRST,
info->base + S3C2410_RTCCON);
}
-
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
}
static void s3c24xx_rtc_disable(struct s3c_rtc *info)
{
unsigned int con;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
con = readw(info->base + S3C2410_RTCCON);
con &= ~S3C2410_RTCCON_RTCEN;
writew(con, info->base + S3C2410_RTCCON);
@@ -438,28 +392,16 @@ static void s3c24xx_rtc_disable(struct s3c_rtc *info)
con = readb(info->base + S3C2410_TICNT);
con &= ~S3C2410_TICNT_ENABLE;
writeb(con, info->base + S3C2410_TICNT);
-
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
}
static void s3c6410_rtc_disable(struct s3c_rtc *info)
{
unsigned int con;
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
con = readw(info->base + S3C2410_RTCCON);
con &= ~S3C64XX_RTCCON_TICEN;
con &= ~S3C2410_RTCCON_RTCEN;
writew(con, info->base + S3C2410_RTCCON);
-
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
}
static int s3c_rtc_remove(struct platform_device *pdev)
@@ -554,6 +496,20 @@ static int s3c_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
+ /* Check RTC Time */
+ if (s3c_rtc_gettime(&pdev->dev, &rtc_tm)) {
+ rtc_tm.tm_year = 100;
+ rtc_tm.tm_mon = 0;
+ rtc_tm.tm_mday = 1;
+ rtc_tm.tm_hour = 0;
+ rtc_tm.tm_min = 0;
+ rtc_tm.tm_sec = 0;
+
+ s3c_rtc_settime(&pdev->dev, &rtc_tm);
+
+ dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
+ }
+
/* register RTC and exit */
info->rtc = devm_rtc_device_register(&pdev->dev, "s3c", &s3c_rtcops,
THIS_MODULE);
@@ -577,36 +533,21 @@ static int s3c_rtc_probe(struct platform_device *pdev)
goto err_nortc;
}
- /* Check RTC Time */
- s3c_rtc_gettime(&pdev->dev, &rtc_tm);
-
- if (rtc_valid_tm(&rtc_tm)) {
- rtc_tm.tm_year = 100;
- rtc_tm.tm_mon = 0;
- rtc_tm.tm_mday = 1;
- rtc_tm.tm_hour = 0;
- rtc_tm.tm_min = 0;
- rtc_tm.tm_sec = 0;
-
- s3c_rtc_settime(&pdev->dev, &rtc_tm);
-
- dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
- }
-
if (info->data->select_tick_clk)
info->data->select_tick_clk(info);
s3c_rtc_setfreq(info, 1);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
+ s3c_rtc_disable_clk(info);
return 0;
err_nortc:
if (info->data->disable)
info->data->disable(info);
+
+ if (info->data->needs_src_clk)
+ clk_disable_unprepare(info->rtc_src_clk);
clk_disable_unprepare(info->rtc_clk);
return ret;
@@ -618,9 +559,7 @@ static int s3c_rtc_suspend(struct device *dev)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ s3c_rtc_enable_clk(info);
/* save TICNT for anyone using periodic interrupts */
if (info->data->save_tick_cnt)
@@ -636,10 +575,6 @@ static int s3c_rtc_suspend(struct device *dev)
dev_err(dev, "enable_irq_wake failed\n");
}
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
return 0;
}
@@ -647,25 +582,19 @@ static int s3c_rtc_resume(struct device *dev)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
-
if (info->data->enable)
info->data->enable(info);
if (info->data->restore_tick_cnt)
info->data->restore_tick_cnt(info);
+ s3c_rtc_disable_clk(info);
+
if (device_may_wakeup(dev) && info->wake_en) {
disable_irq_wake(info->irq_alarm);
info->wake_en = false;
}
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
return 0;
}
#endif
@@ -673,29 +602,13 @@ static SIMPLE_DEV_PM_OPS(s3c_rtc_pm_ops, s3c_rtc_suspend, s3c_rtc_resume);
static void s3c24xx_rtc_irq(struct s3c_rtc *info, int mask)
{
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
rtc_update_irq(info->rtc, 1, RTC_AF | RTC_IRQF);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
- s3c_rtc_alarm_clk_enable(info, false);
}
static void s3c6410_rtc_irq(struct s3c_rtc *info, int mask)
{
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
rtc_update_irq(info->rtc, 1, RTC_AF | RTC_IRQF);
writeb(mask, info->base + S3C2410_INTP);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
-
- s3c_rtc_alarm_clk_enable(info, false);
}
static void s3c2410_rtc_setfreq(struct s3c_rtc *info, int freq)
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 89ac1d5083c6..8c70d785ba73 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
@@ -48,8 +50,6 @@ struct s5m_rtc_reg_config {
unsigned int alarm0;
/* First register for alarm 1, seconds */
unsigned int alarm1;
- /* SMPL/WTSR register */
- unsigned int smpl_wtsr;
/*
* Register for update flag (UDR). Typically setting UDR field to 1
* will enable update of time or alarm register. Then it will be
@@ -67,7 +67,6 @@ static const struct s5m_rtc_reg_config s5m_rtc_regs = {
.ctrl = S5M_ALARM1_CONF,
.alarm0 = S5M_ALARM0_SEC,
.alarm1 = S5M_ALARM1_SEC,
- .smpl_wtsr = S5M_WTSR_SMPL_CNTL,
.rtc_udr_update = S5M_RTC_UDR_CON,
.rtc_udr_mask = S5M_RTC_UDR_MASK,
};
@@ -82,7 +81,6 @@ static const struct s5m_rtc_reg_config s2mps_rtc_regs = {
.ctrl = S2MPS_RTC_CTRL,
.alarm0 = S2MPS_ALARM0_SEC,
.alarm1 = S2MPS_ALARM1_SEC,
- .smpl_wtsr = S2MPS_WTSR_SMPL_CNTL,
.rtc_udr_update = S2MPS_RTC_UDR_CON,
.rtc_udr_mask = S2MPS_RTC_WUDR_MASK,
};
@@ -94,9 +92,8 @@ struct s5m_rtc_info {
struct regmap *regmap;
struct rtc_device *rtc_dev;
int irq;
- int device_type;
+ enum sec_device_type device_type;
int rtc_24hr_mode;
- bool wtsr_smpl;
const struct s5m_rtc_reg_config *regs;
};
@@ -151,7 +148,7 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_YEAR1] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
if (tm->tm_year < 100) {
- pr_err("s5m8767 RTC cannot handle the year %d.\n",
+ pr_err("RTC cannot handle the year %d\n",
1900 + tm->tm_year);
return -EINVAL;
} else {
@@ -192,6 +189,7 @@ static inline int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
val &= S5M_ALARM0_STATUS;
break;
case S2MPS14X:
+ case S2MPS13X:
ret = regmap_read(info->s5m87xx->regmap_pmic, S2MPS14_REG_ST2,
&val);
val &= S2MPS_ALARM0_STATUS;
@@ -257,6 +255,9 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
case S2MPS14X:
data |= S2MPS_RTC_RUDR_MASK;
break;
+ case S2MPS13X:
+ data |= S2MPS13_RTC_AUDR_MASK;
+ break;
default:
return -EINVAL;
}
@@ -270,6 +271,11 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
ret = s5m8767_wait_for_udr_update(info);
+ /* On S2MPS13 the AUDR is not auto-cleared */
+ if (info->device_type == S2MPS13X)
+ regmap_update_bits(info->regmap, info->regs->rtc_udr_update,
+ S2MPS13_RTC_AUDR_MASK, 0);
+
return ret;
}
@@ -311,7 +317,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
u8 data[info->regs->regs_count];
int ret;
- if (info->device_type == S2MPS14X) {
+ if (info->device_type == S2MPS14X || info->device_type == S2MPS13X) {
ret = regmap_update_bits(info->regmap,
info->regs->rtc_udr_update,
S2MPS_RTC_RUDR_MASK, S2MPS_RTC_RUDR_MASK);
@@ -334,6 +340,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
s5m8767_data_to_tm(data, tm, info->rtc_24hr_mode);
break;
@@ -360,6 +367,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
break;
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
ret = s5m8767_tm_to_data(tm, data);
break;
default:
@@ -407,6 +415,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
s5m8767_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
alrm->enabled = 0;
for (i = 0; i < info->regs->regs_count; i++) {
@@ -455,6 +464,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
for (i = 0; i < info->regs->regs_count; i++)
data[i] &= ~ALARM_ENABLE_MASK;
@@ -499,6 +509,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
data[RTC_SEC] |= ALARM_ENABLE_MASK;
data[RTC_MIN] |= ALARM_ENABLE_MASK;
data[RTC_HOUR] |= ALARM_ENABLE_MASK;
@@ -538,6 +549,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
case S5M8767X:
case S2MPS14X:
+ case S2MPS13X:
s5m8767_tm_to_data(&alrm->time, data);
break;
@@ -597,28 +609,6 @@ static const struct rtc_class_ops s5m_rtc_ops = {
.alarm_irq_enable = s5m_rtc_alarm_irq_enable,
};
-static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
-{
- int ret;
- ret = regmap_update_bits(info->regmap, info->regs->smpl_wtsr,
- WTSR_ENABLE_MASK,
- enable ? WTSR_ENABLE_MASK : 0);
- if (ret < 0)
- dev_err(info->dev, "%s: fail to update WTSR reg(%d)\n",
- __func__, ret);
-}
-
-static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable)
-{
- int ret;
- ret = regmap_update_bits(info->regmap, info->regs->smpl_wtsr,
- SMPL_ENABLE_MASK,
- enable ? SMPL_ENABLE_MASK : 0);
- if (ret < 0)
- dev_err(info->dev, "%s: fail to update SMPL reg(%d)\n",
- __func__, ret);
-}
-
static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
{
u8 data[2];
@@ -642,6 +632,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
break;
case S2MPS14X:
+ case S2MPS13X:
data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
break;
@@ -677,8 +668,9 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- switch (pdata->device_type) {
+ switch (platform_get_device_id(pdev)->driver_data) {
case S2MPS14X:
+ case S2MPS13X:
regmap_cfg = &s2mps14_rtc_regmap_config;
info->regs = &s2mps_rtc_regs;
alarm_irq = S2MPS14_IRQ_RTCA0;
@@ -694,7 +686,9 @@ static int s5m_rtc_probe(struct platform_device *pdev)
alarm_irq = S5M8767_IRQ_RTCA1;
break;
default:
- dev_err(&pdev->dev, "Device type is not supported by RTC driver\n");
+ dev_err(&pdev->dev,
+ "Device type %lu is not supported by RTC driver\n",
+ platform_get_device_id(pdev)->driver_data);
return -ENODEV;
}
@@ -714,8 +708,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
info->s5m87xx = s5m87xx;
- info->device_type = s5m87xx->device_type;
- info->wtsr_smpl = s5m87xx->wtsr_smpl;
+ info->device_type = platform_get_device_id(pdev)->driver_data;
if (s5m87xx->irq_data) {
info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq);
@@ -731,11 +724,6 @@ static int s5m_rtc_probe(struct platform_device *pdev)
ret = s5m8767_rtc_init_reg(info);
- if (info->wtsr_smpl) {
- s5m_rtc_enable_wtsr(info, true);
- s5m_rtc_enable_smpl(info, true);
- }
-
device_init_wakeup(&pdev->dev, 1);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, "s5m-rtc",
@@ -768,36 +756,10 @@ err:
return ret;
}
-static void s5m_rtc_shutdown(struct platform_device *pdev)
-{
- struct s5m_rtc_info *info = platform_get_drvdata(pdev);
- int i;
- unsigned int val = 0;
- if (info->wtsr_smpl) {
- for (i = 0; i < 3; i++) {
- s5m_rtc_enable_wtsr(info, false);
- regmap_read(info->regmap, info->regs->smpl_wtsr, &val);
- pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
- if (val & WTSR_ENABLE_MASK)
- pr_emerg("%s: fail to disable WTSR\n",
- __func__);
- else {
- pr_info("%s: success to disable WTSR\n",
- __func__);
- break;
- }
- }
- }
- /* Disable SMPL when power off */
- s5m_rtc_enable_smpl(info, false);
-}
-
static int s5m_rtc_remove(struct platform_device *pdev)
{
struct s5m_rtc_info *info = platform_get_drvdata(pdev);
- /* Perform also all shutdown steps when removing */
- s5m_rtc_shutdown(pdev);
i2c_unregister_device(info->i2c);
return 0;
@@ -831,6 +793,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", S5M8767X },
+ { "s2mps13-rtc", S2MPS13X },
{ "s2mps14-rtc", S2MPS14X },
{ },
};
@@ -842,7 +805,6 @@ static struct platform_driver s5m_rtc_driver = {
},
.probe = s5m_rtc_probe,
.remove = s5m_rtc_remove,
- .shutdown = s5m_rtc_shutdown,
.id_table = s5m_rtc_id,
};
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 2939cdcb2688..eb09eddf39b8 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -42,6 +42,8 @@
#define STMP3XXX_RTC_STAT 0x10
#define STMP3XXX_RTC_STAT_STALE_SHIFT 16
#define STMP3XXX_RTC_STAT_RTC_PRESENT 0x80000000
+#define STMP3XXX_RTC_STAT_XTAL32000_PRESENT 0x10000000
+#define STMP3XXX_RTC_STAT_XTAL32768_PRESENT 0x08000000
#define STMP3XXX_RTC_SECONDS 0x30
@@ -52,9 +54,13 @@
#define STMP3XXX_RTC_PERSISTENT0 0x60
#define STMP3XXX_RTC_PERSISTENT0_SET 0x64
#define STMP3XXX_RTC_PERSISTENT0_CLR 0x68
-#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN 0x00000002
-#define STMP3XXX_RTC_PERSISTENT0_ALARM_EN 0x00000004
-#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE 0x00000080
+#define STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE (1 << 0)
+#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN (1 << 1)
+#define STMP3XXX_RTC_PERSISTENT0_ALARM_EN (1 << 2)
+#define STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP (1 << 4)
+#define STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP (1 << 5)
+#define STMP3XXX_RTC_PERSISTENT0_XTAL32_FREQ (1 << 6)
+#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE (1 << 7)
#define STMP3XXX_RTC_PERSISTENT1 0x70
/* missing bitmask in headers */
@@ -248,6 +254,9 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
{
struct stmp3xxx_rtc_data *rtc_data;
struct resource *r;
+ u32 rtc_stat;
+ u32 pers0_set, pers0_clr;
+ u32 crystalfreq = 0;
int err;
rtc_data = devm_kzalloc(&pdev->dev, sizeof(*rtc_data), GFP_KERNEL);
@@ -268,8 +277,8 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
rtc_data->irq_alarm = platform_get_irq(pdev, 0);
- if (!(readl(STMP3XXX_RTC_STAT + rtc_data->io) &
- STMP3XXX_RTC_STAT_RTC_PRESENT)) {
+ rtc_stat = readl(rtc_data->io + STMP3XXX_RTC_STAT);
+ if (!(rtc_stat & STMP3XXX_RTC_STAT_RTC_PRESENT)) {
dev_err(&pdev->dev, "no device onboard\n");
return -ENODEV;
}
@@ -282,9 +291,54 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
return err;
}
+ /*
+ * Obviously the rtc needs a clock input to be able to run.
+ * This clock can be provided by an external 32k crystal. If that one is
+ * missing XTAL must not be disabled in suspend which consumes a
+ * lot of power. Normally the presence and exact frequency (supported
+ * are 32000 Hz and 32768 Hz) is detectable from fuses, but as reality
+ * proves these fuses are not blown correctly on all machines, so the
+ * frequency can be overridden in the device tree.
+ */
+ if (rtc_stat & STMP3XXX_RTC_STAT_XTAL32000_PRESENT)
+ crystalfreq = 32000;
+ else if (rtc_stat & STMP3XXX_RTC_STAT_XTAL32768_PRESENT)
+ crystalfreq = 32768;
+
+ of_property_read_u32(pdev->dev.of_node, "stmp,crystal-freq",
+ &crystalfreq);
+
+ switch (crystalfreq) {
+ case 32000:
+ /* keep 32kHz crystal running in low-power mode */
+ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL32_FREQ |
+ STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP |
+ STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE;
+ pers0_clr = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP;
+ break;
+ case 32768:
+ /* keep 32.768kHz crystal running in low-power mode */
+ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP |
+ STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE;
+ pers0_clr = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP |
+ STMP3XXX_RTC_PERSISTENT0_XTAL32_FREQ;
+ break;
+ default:
+ dev_warn(&pdev->dev,
+ "invalid crystal-freq specified in device-tree. Assuming no crystal\n");
+ /* fall-through */
+ case 0:
+ /* keep XTAL on in low-power mode */
+ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP;
+ pers0_clr = STMP3XXX_RTC_PERSISTENT0_XTAL32KHZ_PWRUP |
+ STMP3XXX_RTC_PERSISTENT0_CLOCKSOURCE;
+ }
+
+ writel(pers0_set, rtc_data->io + STMP3XXX_RTC_PERSISTENT0_SET);
+
writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN |
STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN |
- STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE,
+ STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE | pers0_clr,
rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR);
writel(STMP3XXX_RTC_CTRL_ONEMSEC_IRQ_EN |
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index d948277057d8..60232bd366ef 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -261,7 +261,9 @@ static int tegra_rtc_proc(struct device *dev, struct seq_file *seq)
if (!dev || !dev->driver)
return 0;
- return seq_printf(seq, "name\t\t: %s\n", dev_name(dev));
+ seq_printf(seq, "name\t\t: %s\n", dev_name(dev));
+
+ return 0;
}
static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 5baea3f54926..2dc787dc06c1 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -18,6 +18,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -145,8 +147,7 @@ static int twl_rtc_read_u8(u8 *data, u8 reg)
ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
if (ret < 0)
- pr_err("twl_rtc: Could not read TWL"
- "register %X - error %d\n", reg, ret);
+ pr_err("Could not read TWL register %X - error %d\n", reg, ret);
return ret;
}
@@ -159,8 +160,8 @@ static int twl_rtc_write_u8(u8 data, u8 reg)
ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
if (ret < 0)
- pr_err("twl_rtc: Could not write TWL"
- "register %X - error %d\n", reg, ret);
+ pr_err("Could not write TWL register %X - error %d\n",
+ reg, ret);
return ret;
}
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index b1de58e0b3d0..5638b7ba8b06 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -22,6 +22,7 @@
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/bitops.h>
#define DRV_VERSION "1.0.8"
@@ -366,8 +367,7 @@ static int x1205_get_atrim(struct i2c_client *client, int *trim)
* perform sign extension. The formula is
* Catr = (atr * 0.25pF) + 11.00pF.
*/
- if (atr & 0x20)
- atr |= 0xC0;
+ atr = sign_extend32(atr, 5);
dev_dbg(&client->dev, "%s: raw atr=%x (%d)\n", __func__, atr, atr);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index be34ef41b7c7..57fd66357b95 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -579,7 +579,8 @@ void dasd_kick_device(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_kick_device to the kernel event daemon. */
- schedule_work(&device->kick_work);
+ if (!schedule_work(&device->kick_work))
+ dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_kick_device);
@@ -599,7 +600,8 @@ void dasd_reload_device(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_reload_device to the kernel event daemon. */
- schedule_work(&device->reload_device);
+ if (!schedule_work(&device->reload_device))
+ dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_reload_device);
@@ -619,7 +621,8 @@ void dasd_restore_device(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_restore_device to the kernel event daemon. */
- schedule_work(&device->restore_device);
+ if (!schedule_work(&device->restore_device))
+ dasd_put_device(device);
}
/*
@@ -1237,7 +1240,6 @@ EXPORT_SYMBOL(dasd_smalloc_request);
*/
void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
-#ifdef CONFIG_64BIT
struct ccw1 *ccw;
/* Clear any idals used for the request. */
@@ -1245,7 +1247,6 @@ void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
do {
clear_normalized_cda(ccw);
} while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
-#endif
kfree(cqr->cpaddr);
kfree(cqr->data);
kfree(cqr);
@@ -2165,18 +2166,22 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
cqr->intrc = -ENOLINK;
continue;
}
- /* Don't try to start requests if device is stopped */
- if (interruptible) {
- rc = wait_event_interruptible(
- generic_waitq, !(device->stopped));
- if (rc == -ERESTARTSYS) {
- cqr->status = DASD_CQR_FAILED;
- maincqr->intrc = rc;
- continue;
- }
- } else
- wait_event(generic_waitq, !(device->stopped));
-
+ /*
+ * Don't try to start requests if device is stopped
+ * except path verification requests
+ */
+ if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+ if (interruptible) {
+ rc = wait_event_interruptible(
+ generic_waitq, !(device->stopped));
+ if (rc == -ERESTARTSYS) {
+ cqr->status = DASD_CQR_FAILED;
+ maincqr->intrc = rc;
+ continue;
+ }
+ } else
+ wait_event(generic_waitq, !(device->stopped));
+ }
if (!cqr->callback)
cqr->callback = dasd_wakeup_cb;
@@ -2526,6 +2531,11 @@ static void __dasd_process_request_queue(struct dasd_block *block)
__blk_end_request_all(req, -EIO);
return;
}
+
+ /* if device ist stopped do not fetch new requests */
+ if (basedev->stopped)
+ return;
+
/* Now we try to fetch requests from the request queue */
while ((req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
@@ -2967,8 +2977,6 @@ enum blk_eh_timer_return dasd_times_out(struct request *req)
*/
static int dasd_alloc_queue(struct dasd_block *block)
{
- int rc;
-
block->request_queue = blk_init_queue(do_dasd_request,
&block->request_queue_lock);
if (block->request_queue == NULL)
@@ -2976,14 +2984,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
block->request_queue->queuedata = block;
- elevator_exit(block->request_queue->elevator);
- block->request_queue->elevator = NULL;
- mutex_lock(&block->request_queue->sysfs_lock);
- rc = elevator_init(block->request_queue, "deadline");
- if (rc)
- blk_cleanup_queue(block->request_queue);
- mutex_unlock(&block->request_queue->sysfs_lock);
- return rc;
+ return 0;
}
/*
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index a803cc731586..e84a5468d810 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -38,8 +38,6 @@ struct dasd_diag_characteristics {
u8 rdev_features;
} __attribute__ ((packed, aligned(4)));
-
-#ifdef CONFIG_64BIT
#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
typedef u64 blocknum_t;
@@ -80,43 +78,3 @@ struct dasd_diag_rw_io {
struct dasd_diag_bio *bio_list;
u8 spare4[8];
} __attribute__ ((packed, aligned(8)));
-#else /* CONFIG_64BIT */
-#define DASD_DIAG_FLAGA_DEFAULT 0x0
-
-typedef u32 blocknum_t;
-typedef s32 sblocknum_t;
-
-struct dasd_diag_bio {
- u8 type;
- u8 status;
- u16 spare1;
- blocknum_t block_number;
- u32 alet;
- void *buffer;
-} __attribute__ ((packed, aligned(8)));
-
-struct dasd_diag_init_io {
- u16 dev_nr;
- u8 flaga;
- u8 spare1[21];
- u32 block_size;
- blocknum_t offset;
- sblocknum_t start_block;
- blocknum_t end_block;
- u8 spare2[24];
-} __attribute__ ((packed, aligned(8)));
-
-struct dasd_diag_rw_io {
- u16 dev_nr;
- u8 flaga;
- u8 spare1[21];
- u8 key;
- u8 flags;
- u8 spare2[2];
- u32 block_count;
- u32 alet;
- struct dasd_diag_bio *bio_list;
- u32 interrupt_params;
- u8 spare3[20];
-} __attribute__ ((packed, aligned(8)));
-#endif /* CONFIG_64BIT */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index d47f5b99623a..6215f6455eb8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1628,12 +1628,12 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
return;
}
/* queue call to do_validate_server to the kernel event daemon. */
- schedule_work(&device->kick_validate);
+ if (!schedule_work(&device->kick_validate))
+ dasd_put_device(device);
}
static u32 get_fcx_max_data(struct dasd_device *device)
{
-#if defined(CONFIG_64BIT)
int tpm, mdc;
int fcx_in_css, fcx_in_gneq, fcx_in_features;
struct dasd_eckd_private *private;
@@ -1657,9 +1657,6 @@ static u32 get_fcx_max_data(struct dasd_device *device)
return 0;
} else
return mdc * FCX_MAX_DATA_FACTOR;
-#else
- return 0;
-#endif
}
/*
@@ -2615,10 +2612,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
/* Eckd can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv.bv_len >> (block->s2b_shift + 9);
-#if defined(CONFIG_64BIT)
if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
cidaw += bv.bv_len >> (block->s2b_shift + 9);
-#endif
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 2c8e68bf9a1c..c9262e78938b 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -287,10 +287,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv.bv_len >> (block->s2b_shift + 9);
-#if defined(CONFIG_64BIT)
if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
cidaw += bv.bv_len / blksize;
-#endif
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index a5c6f7e157aa..eaca3e006301 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -115,7 +115,7 @@ config SCLP_ASYNC_ID
config HMC_DRV
def_tristate m
prompt "Support for file transfers from HMC drive CD/DVD-ROM"
- depends on S390 && 64BIT
+ depends on S390
select CRC16
help
This option enables support for file transfers from a Hardware
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index c43aca69fb30..0fc3fe5fd5b8 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -667,6 +667,8 @@ static struct raw3215_info *raw3215_alloc_info(void)
info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
if (!info->buffer || !info->inbuf) {
+ kfree(info->inbuf);
+ kfree(info->buffer);
kfree(info);
return NULL;
}
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 6e14999f9e8f..7be782116dab 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -315,10 +315,29 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size,
rc |= sclp_assign_storage(incr->rn);
else
sclp_unassign_storage(incr->rn);
+ if (rc == 0)
+ incr->standby = online ? 0 : 1;
}
return rc ? -EIO : 0;
}
+static bool contains_standby_increment(unsigned long start, unsigned long end)
+{
+ struct memory_increment *incr;
+ unsigned long istart;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ istart = rn2addr(incr->rn);
+ if (end - 1 < istart)
+ continue;
+ if (start > istart + sclp_rzm - 1)
+ continue;
+ if (incr->standby)
+ return true;
+ }
+ return false;
+}
+
static int sclp_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -334,8 +353,16 @@ static int sclp_mem_notifier(struct notifier_block *nb,
for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
sclp_attach_storage(id);
switch (action) {
- case MEM_ONLINE:
case MEM_GOING_OFFLINE:
+ /*
+ * We do not allow to set memory blocks offline that contain
+ * standby memory. This is done to simplify the "memory online"
+ * case.
+ */
+ if (contains_standby_increment(start, start + size))
+ rc = -EPERM;
+ break;
+ case MEM_ONLINE:
case MEM_CANCEL_OFFLINE:
break;
case MEM_GOING_ONLINE:
@@ -361,6 +388,21 @@ static struct notifier_block sclp_mem_nb = {
.notifier_call = sclp_mem_notifier,
};
+static void __init align_to_block_size(unsigned long long *start,
+ unsigned long long *size)
+{
+ unsigned long long start_align, size_align, alignment;
+
+ alignment = memory_block_size_bytes();
+ start_align = roundup(*start, alignment);
+ size_align = rounddown(*start + *size, alignment) - start_align;
+
+ pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
+ *start, size_align >> 20, *size >> 20);
+ *start = start_align;
+ *size = size_align;
+}
+
static void __init add_memory_merged(u16 rn)
{
static u16 first_rn, num;
@@ -382,7 +424,9 @@ static void __init add_memory_merged(u16 rn)
goto skip_add;
if (memory_end_set && (start + size > memory_end))
size = memory_end - start;
- add_memory(0, start, size);
+ align_to_block_size(&start, &size);
+ if (size)
+ add_memory(0, start, size);
skip_add:
first_rn = rn;
num = 1;
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 561a0414b352..eb7cb076c001 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -178,11 +178,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
-#ifdef CONFIG_64BIT
sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
-#else
- sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32;
-#endif
sccb.evbuf.event_status = 0;
sccb.evbuf.blk_cnt = nr_blks;
sccb.evbuf.asa = (unsigned long)dest;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index efcf48481c5f..a68fcfd1d48c 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -212,11 +212,7 @@ static struct zcore_header zcore_header = {
.dump_level = 0,
.page_size = PAGE_SIZE,
.mem_start = 0,
-#ifdef CONFIG_64BIT
.build_arch = DUMP_ARCH_S390X,
-#else
- .build_arch = DUMP_ARCH_S390,
-#endif
};
/*
@@ -516,23 +512,6 @@ static const struct file_operations zcore_hsa_fops = {
.llseek = no_llseek,
};
-#ifdef CONFIG_32BIT
-
-static void __init set_lc_mask(struct save_area *map)
-{
- memset(&map->ext_save, 0xff, sizeof(map->ext_save));
- memset(&map->timer, 0xff, sizeof(map->timer));
- memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
- memset(&map->psw, 0xff, sizeof(map->psw));
- memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
- memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
- memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
- memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
- memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
-}
-
-#else /* CONFIG_32BIT */
-
static void __init set_lc_mask(struct save_area *map)
{
memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
@@ -547,8 +526,6 @@ static void __init set_lc_mask(struct save_area *map)
memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
}
-#endif /* CONFIG_32BIT */
-
/*
* Initialize dump globals for a given architecture
*/
@@ -688,21 +665,12 @@ static int __init zcore_init(void)
if (rc)
goto fail;
-#ifdef CONFIG_64BIT
if (arch == ARCH_S390) {
pr_alert("The 64-bit dump tool cannot be used for a "
"32-bit system\n");
rc = -EINVAL;
goto fail;
}
-#else /* CONFIG_64BIT */
- if (arch == ARCH_S390X) {
- pr_alert("The 32-bit dump tool cannot be used for a "
- "64-bit system\n");
- rc = -EINVAL;
- goto fail;
- }
-#endif /* CONFIG_64BIT */
rc = get_mem_info(&mem_size, &mem_end);
if (rc)
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index b3f791b2c1f8..20314aad7ab7 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -330,18 +330,20 @@ cio_ignore_proc_seq_show(struct seq_file *s, void *it)
if (!iter->in_range) {
/* First device in range. */
if ((iter->devno == __MAX_SUBCHANNEL) ||
- !is_blacklisted(iter->ssid, iter->devno + 1))
+ !is_blacklisted(iter->ssid, iter->devno + 1)) {
/* Singular device. */
- return seq_printf(s, "0.%x.%04x\n",
- iter->ssid, iter->devno);
+ seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+ return 0;
+ }
iter->in_range = 1;
- return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
+ seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
+ return 0;
}
if ((iter->devno == __MAX_SUBCHANNEL) ||
!is_blacklisted(iter->ssid, iter->devno + 1)) {
/* Last device in range. */
iter->in_range = 0;
- return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+ seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
}
return 0;
}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 3578105989a0..07fc5d9e7f10 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -143,13 +143,11 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
orb->cmd.spnd = priv->options.suspend;
orb->cmd.ssic = priv->options.suspend && priv->options.inter;
orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
-#ifdef CONFIG_64BIT
/*
* for 64 bit we always support 64 bit IDAWs with 4k page size only
*/
orb->cmd.c64 = 1;
orb->cmd.i2k = 0;
-#endif
orb->cmd.key = key >> 4;
/* issue "Start Subchannel" */
orb->cmd.cpa = (__u32) __pa(cpa);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index a563e4c00590..7e70f9298cc1 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -84,7 +84,6 @@ enum qdio_irq_states {
#define QDIO_SIGA_WRITEQ 0x04
#define QDIO_SIGA_QEBSM_FLAG 0x80
-#ifdef CONFIG_64BIT
static inline int do_sqbs(u64 token, unsigned char state, int queue,
int *start, int *count)
{
@@ -122,12 +121,6 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue,
return (_ccq >> 32) & 0xff;
}
-#else
-static inline int do_sqbs(u64 token, unsigned char state, int queue,
- int *start, int *count) { return 0; }
-static inline int do_eqbs(u64 token, unsigned char *state, int queue,
- int *start, int *count, int ack) { return 0; }
-#endif /* CONFIG_64BIT */
struct qdio_irq;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index f76bff68d1de..48b3866a9ded 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -91,10 +91,7 @@ EXPORT_SYMBOL_GPL(qdio_reset_buffers);
*/
static inline int qebsm_possible(void)
{
-#ifdef CONFIG_64BIT
return css_general_characteristics.qebsm;
-#endif
- return 0;
}
/*
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 3d7f19fb9a4e..f0b9871a4bbd 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -165,7 +165,7 @@ static inline int ap_instructions_available(void)
*/
static int ap_interrupts_available(void)
{
- return test_facility(2) && test_facility(65);
+ return test_facility(65);
}
/**
@@ -174,12 +174,10 @@ static int ap_interrupts_available(void)
*
* Returns 1 if AP configuration information is available.
*/
-#ifdef CONFIG_64BIT
static int ap_configuration_available(void)
{
- return test_facility(2) && test_facility(12);
+ return test_facility(12);
}
-#endif
/**
* ap_test_queue(): Test adjunct processor queue.
@@ -239,7 +237,6 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
return reg1;
}
-#ifdef CONFIG_64BIT
/**
* ap_queue_interruption_control(): Enable interruption for a specific AP.
* @qid: The AP queue number
@@ -261,9 +258,7 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
: "cc" );
return reg1_out;
}
-#endif
-#ifdef CONFIG_64BIT
static inline struct ap_queue_status
__ap_query_functions(ap_qid_t qid, unsigned int *functions)
{
@@ -282,9 +277,7 @@ __ap_query_functions(ap_qid_t qid, unsigned int *functions)
*functions = (unsigned int)(reg2 >> 32);
return reg1;
}
-#endif
-#ifdef CONFIG_64BIT
static inline int __ap_query_configuration(struct ap_config_info *config)
{
register unsigned long reg0 asm ("0") = 0x04000000UL;
@@ -302,7 +295,6 @@ static inline int __ap_query_configuration(struct ap_config_info *config)
return reg1;
}
-#endif
/**
* ap_query_functions(): Query supported functions.
@@ -317,7 +309,6 @@ static inline int __ap_query_configuration(struct ap_config_info *config)
*/
static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
{
-#ifdef CONFIG_64BIT
struct ap_queue_status status;
int i;
status = __ap_query_functions(qid, functions);
@@ -348,9 +339,6 @@ static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
}
}
return -EBUSY;
-#else
- return -EINVAL;
-#endif
}
/**
@@ -364,7 +352,6 @@ static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
*/
static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
{
-#ifdef CONFIG_64BIT
struct ap_queue_status status;
int t_depth, t_device_type, rc, i;
@@ -404,9 +391,6 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
}
}
return rc;
-#else
- return -EINVAL;
-#endif
}
/**
@@ -1238,7 +1222,6 @@ static struct bus_attribute *const ap_bus_attrs[] = {
*/
static void ap_query_configuration(void)
{
-#ifdef CONFIG_64BIT
if (ap_configuration_available()) {
if (!ap_configuration)
ap_configuration =
@@ -1248,9 +1231,6 @@ static void ap_query_configuration(void)
__ap_query_configuration(ap_configuration);
} else
ap_configuration = NULL;
-#else
- ap_configuration = NULL;
-#endif
}
/**
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 71d7802aa8b4..6f1fa1773e76 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -1201,13 +1201,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model;
- if (virtio_device_is_legacy_only(vcdev->vdev.id)) {
- vcdev->revision = 0;
- } else {
- ret = virtio_ccw_set_transport_rev(vcdev);
- if (ret)
- goto out_free;
- }
+ ret = virtio_ccw_set_transport_rev(vcdev);
+ if (ret)
+ goto out_free;
ret = register_virtio_device(&vcdev->vdev);
if (ret) {
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index f1b5111bbaba..b2837b1c70b7 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -57,17 +57,6 @@ config SMSGIUCV_EVENT
To compile as a module, choose M. The module name is "smsgiucv_app".
-config CLAW
- def_tristate m
- prompt "CLAW device support"
- depends on CCW && NETDEVICES
- help
- This driver supports channel attached CLAW devices.
- CLAW is Common Link Access for Workstation. Common devices
- that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
- To compile as a module, choose M. The module name is claw.
- To compile into the kernel, choose Y.
-
config QETH
def_tristate y
prompt "Gigabit Ethernet device support"
@@ -106,6 +95,6 @@ config QETH_IPV6
config CCWGROUP
tristate
- default (LCS || CTCM || QETH || CLAW)
+ default (LCS || CTCM || QETH)
endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index d28f05d0c75a..c351b07603e0 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
obj-$(CONFIG_LCS) += lcs.o
-obj-$(CONFIG_CLAW) += claw.o
qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
obj-$(CONFIG_QETH) += qeth.o
qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
deleted file mode 100644
index d609ca09aa94..000000000000
--- a/drivers/s390/net/claw.c
+++ /dev/null
@@ -1,3377 +0,0 @@
-/*
- * ESCON CLAW network driver
- *
- * Linux for zSeries version
- * Copyright IBM Corp. 2002, 2009
- * Author(s) Original code written by:
- * Kazuo Iimura <iimura@jp.ibm.com>
- * Rewritten by
- * Andy Richter <richtera@us.ibm.com>
- * Marc Price <mwprice@us.ibm.com>
- *
- * sysfs parms:
- * group x.x.rrrr,x.x.wwww
- * read_buffer nnnnnnn
- * write_buffer nnnnnn
- * host_name aaaaaaaa
- * adapter_name aaaaaaaa
- * api_type aaaaaaaa
- *
- * eg.
- * group 0.0.0200 0.0.0201
- * read_buffer 25
- * write_buffer 20
- * host_name LINUX390
- * adapter_name RS6K
- * api_type TCPIP
- *
- * where
- *
- * The device id is decided by the order entries
- * are added to the group the first is claw0 the second claw1
- * up to CLAW_MAX_DEV
- *
- * rrrr - the first of 2 consecutive device addresses used for the
- * CLAW protocol.
- * The specified address is always used as the input (Read)
- * channel and the next address is used as the output channel.
- *
- * wwww - the second of 2 consecutive device addresses used for
- * the CLAW protocol.
- * The specified address is always used as the output
- * channel and the previous address is used as the input channel.
- *
- * read_buffer - specifies number of input buffers to allocate.
- * write_buffer - specifies number of output buffers to allocate.
- * host_name - host name
- * adaptor_name - adaptor name
- * api_type - API type TCPIP or API will be sent and expected
- * as ws_name
- *
- * Note the following requirements:
- * 1) host_name must match the configured adapter_name on the remote side
- * 2) adaptor_name must match the configured host name on the remote side
- *
- * Change History
- * 1.00 Initial release shipped
- * 1.10 Changes for Buffer allocation
- * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
- * 1.25 Added Packing support
- * 1.5
- */
-
-#define KMSG_COMPONENT "claw"
-
-#include <asm/ccwdev.h>
-#include <asm/ccwgroup.h>
-#include <asm/debug.h>
-#include <asm/idals.h>
-#include <asm/io.h>
-#include <linux/bitops.h>
-#include <linux/ctype.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/tcp.h>
-#include <linux/timer.h>
-#include <linux/types.h>
-
-#include "claw.h"
-
-/*
- CLAW uses the s390dbf file system see claw_trace and claw_setup
-*/
-
-static char version[] __initdata = "CLAW driver";
-static char debug_buffer[255];
-/**
- * Debug Facility Stuff
- */
-static debug_info_t *claw_dbf_setup;
-static debug_info_t *claw_dbf_trace;
-
-/**
- * CLAW Debug Facility functions
- */
-static void
-claw_unregister_debug_facility(void)
-{
- debug_unregister(claw_dbf_setup);
- debug_unregister(claw_dbf_trace);
-}
-
-static int
-claw_register_debug_facility(void)
-{
- claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
- claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
- if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
- claw_unregister_debug_facility();
- return -ENOMEM;
- }
- debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
- debug_set_level(claw_dbf_setup, 2);
- debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
- debug_set_level(claw_dbf_trace, 2);
- return 0;
-}
-
-static inline void
-claw_set_busy(struct net_device *dev)
-{
- ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
-}
-
-static inline void
-claw_clear_busy(struct net_device *dev)
-{
- clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
- netif_wake_queue(dev);
-}
-
-static inline int
-claw_check_busy(struct net_device *dev)
-{
- return ((struct claw_privbk *) dev->ml_priv)->tbusy;
-}
-
-static inline void
-claw_setbit_busy(int nr,struct net_device *dev)
-{
- netif_stop_queue(dev);
- set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
-}
-
-static inline void
-claw_clearbit_busy(int nr,struct net_device *dev)
-{
- clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
- netif_wake_queue(dev);
-}
-
-static inline int
-claw_test_and_setbit_busy(int nr,struct net_device *dev)
-{
- netif_stop_queue(dev);
- return test_and_set_bit(nr,
- (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
-}
-
-
-/* Functions for the DEV methods */
-
-static int claw_probe(struct ccwgroup_device *cgdev);
-static void claw_remove_device(struct ccwgroup_device *cgdev);
-static void claw_purge_skb_queue(struct sk_buff_head *q);
-static int claw_new_device(struct ccwgroup_device *cgdev);
-static int claw_shutdown_device(struct ccwgroup_device *cgdev);
-static int claw_tx(struct sk_buff *skb, struct net_device *dev);
-static int claw_change_mtu( struct net_device *dev, int new_mtu);
-static int claw_open(struct net_device *dev);
-static void claw_irq_handler(struct ccw_device *cdev,
- unsigned long intparm, struct irb *irb);
-static void claw_irq_tasklet ( unsigned long data );
-static int claw_release(struct net_device *dev);
-static void claw_write_retry ( struct chbk * p_ch );
-static void claw_write_next ( struct chbk * p_ch );
-static void claw_timer ( struct chbk * p_ch );
-
-/* Functions */
-static int add_claw_reads(struct net_device *dev,
- struct ccwbk* p_first, struct ccwbk* p_last);
-static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
-static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
-static int find_link(struct net_device *dev, char *host_name, char *ws_name );
-static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
-static int init_ccw_bk(struct net_device *dev);
-static void probe_error( struct ccwgroup_device *cgdev);
-static struct net_device_stats *claw_stats(struct net_device *dev);
-static int pages_to_order_of_mag(int num_of_pages);
-static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
-/* sysfs Functions */
-static ssize_t claw_hname_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t claw_hname_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static ssize_t claw_adname_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t claw_adname_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static ssize_t claw_apname_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t claw_apname_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static ssize_t claw_wbuff_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t claw_wbuff_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static ssize_t claw_rbuff_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t claw_rbuff_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-
-/* Functions for System Validate */
-static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
-static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
- __u8 correlator, __u8 rc , char *local_name, char *remote_name);
-static int claw_snd_conn_req(struct net_device *dev, __u8 link);
-static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
-static int claw_snd_sys_validate_rsp(struct net_device *dev,
- struct clawctl * p_ctl, __u32 return_code);
-static int claw_strt_conn_req(struct net_device *dev );
-static void claw_strt_read(struct net_device *dev, int lock);
-static void claw_strt_out_IO(struct net_device *dev);
-static void claw_free_wrt_buf(struct net_device *dev);
-
-/* Functions for unpack reads */
-static void unpack_read(struct net_device *dev);
-
-static int claw_pm_prepare(struct ccwgroup_device *gdev)
-{
- return -EPERM;
-}
-
-/* the root device for claw group devices */
-static struct device *claw_root_dev;
-
-/* ccwgroup table */
-
-static struct ccwgroup_driver claw_group_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "claw",
- },
- .setup = claw_probe,
- .remove = claw_remove_device,
- .set_online = claw_new_device,
- .set_offline = claw_shutdown_device,
- .prepare = claw_pm_prepare,
-};
-
-static struct ccw_device_id claw_ids[] = {
- {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
- {},
-};
-MODULE_DEVICE_TABLE(ccw, claw_ids);
-
-static struct ccw_driver claw_ccw_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "claw",
- },
- .ids = claw_ids,
- .probe = ccwgroup_probe_ccwdev,
- .remove = ccwgroup_remove_ccwdev,
- .int_class = IRQIO_CLW,
-};
-
-static ssize_t claw_driver_group_store(struct device_driver *ddrv,
- const char *buf, size_t count)
-{
- int err;
- err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf);
- return err ? err : count;
-}
-static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
-
-static struct attribute *claw_drv_attrs[] = {
- &driver_attr_group.attr,
- NULL,
-};
-static struct attribute_group claw_drv_attr_group = {
- .attrs = claw_drv_attrs,
-};
-static const struct attribute_group *claw_drv_attr_groups[] = {
- &claw_drv_attr_group,
- NULL,
-};
-
-/*
-* Key functions
-*/
-
-/*-------------------------------------------------------------------*
- * claw_tx *
- *-------------------------------------------------------------------*/
-
-static int
-claw_tx(struct sk_buff *skb, struct net_device *dev)
-{
- int rc;
- struct claw_privbk *privptr = dev->ml_priv;
- unsigned long saveflags;
- struct chbk *p_ch;
-
- CLAW_DBF_TEXT(4, trace, "claw_tx");
- p_ch = &privptr->channel[WRITE_CHANNEL];
- spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
- rc=claw_hw_tx( skb, dev, 1 );
- spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
- CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
- if (rc)
- rc = NETDEV_TX_BUSY;
- else
- rc = NETDEV_TX_OK;
- return rc;
-} /* end of claw_tx */
-
-/*------------------------------------------------------------------*
- * pack the collect queue into an skb and return it *
- * If not packing just return the top skb from the queue *
- *------------------------------------------------------------------*/
-
-static struct sk_buff *
-claw_pack_skb(struct claw_privbk *privptr)
-{
- struct sk_buff *new_skb,*held_skb;
- struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
- struct claw_env *p_env = privptr->p_env;
- int pkt_cnt,pk_ind,so_far;
-
- new_skb = NULL; /* assume no dice */
- pkt_cnt = 0;
- CLAW_DBF_TEXT(4, trace, "PackSKBe");
- if (!skb_queue_empty(&p_ch->collect_queue)) {
- /* some data */
- held_skb = skb_dequeue(&p_ch->collect_queue);
- if (held_skb)
- dev_kfree_skb_any(held_skb);
- else
- return NULL;
- if (p_env->packing != DO_PACKED)
- return held_skb;
- /* get a new SKB we will pack at least one */
- new_skb = dev_alloc_skb(p_env->write_size);
- if (new_skb == NULL) {
- atomic_inc(&held_skb->users);
- skb_queue_head(&p_ch->collect_queue,held_skb);
- return NULL;
- }
- /* we have packed packet and a place to put it */
- pk_ind = 1;
- so_far = 0;
- new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
- while ((pk_ind) && (held_skb != NULL)) {
- if (held_skb->len+so_far <= p_env->write_size-8) {
- memcpy(skb_put(new_skb,held_skb->len),
- held_skb->data,held_skb->len);
- privptr->stats.tx_packets++;
- so_far += held_skb->len;
- pkt_cnt++;
- dev_kfree_skb_any(held_skb);
- held_skb = skb_dequeue(&p_ch->collect_queue);
- if (held_skb)
- atomic_dec(&held_skb->users);
- } else {
- pk_ind = 0;
- atomic_inc(&held_skb->users);
- skb_queue_head(&p_ch->collect_queue,held_skb);
- }
- }
- }
- CLAW_DBF_TEXT(4, trace, "PackSKBx");
- return new_skb;
-}
-
-/*-------------------------------------------------------------------*
- * claw_change_mtu *
- * *
- *-------------------------------------------------------------------*/
-
-static int
-claw_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct claw_privbk *privptr = dev->ml_priv;
- int buff_size;
- CLAW_DBF_TEXT(4, trace, "setmtu");
- buff_size = privptr->p_env->write_size;
- if ((new_mtu < 60) || (new_mtu > buff_size)) {
- return -EINVAL;
- }
- dev->mtu = new_mtu;
- return 0;
-} /* end of claw_change_mtu */
-
-
-/*-------------------------------------------------------------------*
- * claw_open *
- * *
- *-------------------------------------------------------------------*/
-static int
-claw_open(struct net_device *dev)
-{
-
- int rc;
- int i;
- unsigned long saveflags=0;
- unsigned long parm;
- struct claw_privbk *privptr;
- DECLARE_WAITQUEUE(wait, current);
- struct timer_list timer;
- struct ccwbk *p_buf;
-
- CLAW_DBF_TEXT(4, trace, "open");
- privptr = (struct claw_privbk *)dev->ml_priv;
- /* allocate and initialize CCW blocks */
- if (privptr->buffs_alloc == 0) {
- rc=init_ccw_bk(dev);
- if (rc) {
- CLAW_DBF_TEXT(2, trace, "openmem");
- return -ENOMEM;
- }
- }
- privptr->system_validate_comp=0;
- privptr->release_pend=0;
- if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
- privptr->p_env->read_size=DEF_PACK_BUFSIZE;
- privptr->p_env->write_size=DEF_PACK_BUFSIZE;
- privptr->p_env->packing=PACKING_ASK;
- } else {
- privptr->p_env->packing=0;
- privptr->p_env->read_size=CLAW_FRAME_SIZE;
- privptr->p_env->write_size=CLAW_FRAME_SIZE;
- }
- claw_set_busy(dev);
- tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
- (unsigned long) &privptr->channel[READ_CHANNEL]);
- for ( i = 0; i < 2; i++) {
- CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
- init_waitqueue_head(&privptr->channel[i].wait);
- /* skb_queue_head_init(&p_ch->io_queue); */
- if (i == WRITE_CHANNEL)
- skb_queue_head_init(
- &privptr->channel[WRITE_CHANNEL].collect_queue);
- privptr->channel[i].flag_a = 0;
- privptr->channel[i].IO_active = 0;
- privptr->channel[i].flag &= ~CLAW_TIMER;
- init_timer(&timer);
- timer.function = (void *)claw_timer;
- timer.data = (unsigned long)(&privptr->channel[i]);
- timer.expires = jiffies + 15*HZ;
- add_timer(&timer);
- spin_lock_irqsave(get_ccwdev_lock(
- privptr->channel[i].cdev), saveflags);
- parm = (unsigned long) &privptr->channel[i];
- privptr->channel[i].claw_state = CLAW_START_HALT_IO;
- rc = 0;
- add_wait_queue(&privptr->channel[i].wait, &wait);
- rc = ccw_device_halt(
- (struct ccw_device *)privptr->channel[i].cdev,parm);
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(
- get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
- schedule();
- remove_wait_queue(&privptr->channel[i].wait, &wait);
- if(rc != 0)
- ccw_check_return_code(privptr->channel[i].cdev, rc);
- if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
- del_timer(&timer);
- }
- if ((((privptr->channel[READ_CHANNEL].last_dstat |
- privptr->channel[WRITE_CHANNEL].last_dstat) &
- ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
- (((privptr->channel[READ_CHANNEL].flag |
- privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
- dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
- "%s: remote side is not ready\n", dev->name);
- CLAW_DBF_TEXT(2, trace, "notrdy");
-
- for ( i = 0; i < 2; i++) {
- spin_lock_irqsave(
- get_ccwdev_lock(privptr->channel[i].cdev),
- saveflags);
- parm = (unsigned long) &privptr->channel[i];
- privptr->channel[i].claw_state = CLAW_STOP;
- rc = ccw_device_halt(
- (struct ccw_device *)&privptr->channel[i].cdev,
- parm);
- spin_unlock_irqrestore(
- get_ccwdev_lock(privptr->channel[i].cdev),
- saveflags);
- if (rc != 0) {
- ccw_check_return_code(
- privptr->channel[i].cdev, rc);
- }
- }
- free_pages((unsigned long)privptr->p_buff_ccw,
- (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
- if (privptr->p_env->read_size < PAGE_SIZE) {
- free_pages((unsigned long)privptr->p_buff_read,
- (int)pages_to_order_of_mag(
- privptr->p_buff_read_num));
- }
- else {
- p_buf=privptr->p_read_active_first;
- while (p_buf!=NULL) {
- free_pages((unsigned long)p_buf->p_buffer,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perread ));
- p_buf=p_buf->next;
- }
- }
- if (privptr->p_env->write_size < PAGE_SIZE ) {
- free_pages((unsigned long)privptr->p_buff_write,
- (int)pages_to_order_of_mag(
- privptr->p_buff_write_num));
- }
- else {
- p_buf=privptr->p_write_active_first;
- while (p_buf!=NULL) {
- free_pages((unsigned long)p_buf->p_buffer,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perwrite ));
- p_buf=p_buf->next;
- }
- }
- privptr->buffs_alloc = 0;
- privptr->channel[READ_CHANNEL].flag = 0x00;
- privptr->channel[WRITE_CHANNEL].flag = 0x00;
- privptr->p_buff_ccw=NULL;
- privptr->p_buff_read=NULL;
- privptr->p_buff_write=NULL;
- claw_clear_busy(dev);
- CLAW_DBF_TEXT(2, trace, "open EIO");
- return -EIO;
- }
-
- /* Send SystemValidate command */
-
- claw_clear_busy(dev);
- CLAW_DBF_TEXT(4, trace, "openok");
- return 0;
-} /* end of claw_open */
-
-/*-------------------------------------------------------------------*
-* *
-* claw_irq_handler *
-* *
-*--------------------------------------------------------------------*/
-static void
-claw_irq_handler(struct ccw_device *cdev,
- unsigned long intparm, struct irb *irb)
-{
- struct chbk *p_ch = NULL;
- struct claw_privbk *privptr = NULL;
- struct net_device *dev = NULL;
- struct claw_env *p_env;
- struct chbk *p_ch_r=NULL;
-
- CLAW_DBF_TEXT(4, trace, "clawirq");
- /* Bypass all 'unsolicited interrupts' */
- privptr = dev_get_drvdata(&cdev->dev);
- if (!privptr) {
- dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
- " IRQ, c-%02x d-%02x\n",
- irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
- CLAW_DBF_TEXT(2, trace, "badirq");
- return;
- }
-
- /* Try to extract channel from driver data. */
- if (privptr->channel[READ_CHANNEL].cdev == cdev)
- p_ch = &privptr->channel[READ_CHANNEL];
- else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
- p_ch = &privptr->channel[WRITE_CHANNEL];
- else {
- dev_warn(&cdev->dev, "The device is not a CLAW device\n");
- CLAW_DBF_TEXT(2, trace, "badchan");
- return;
- }
- CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
-
- dev = (struct net_device *) (p_ch->ndev);
- p_env=privptr->p_env;
-
- /* Copy interruption response block. */
- memcpy(p_ch->irb, irb, sizeof(struct irb));
-
- /* Check for good subchannel return code, otherwise info message */
- if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
- dev_info(&cdev->dev,
- "%s: subchannel check for device: %04x -"
- " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
- dev->name, p_ch->devno,
- irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
- irb->scsw.cmd.cpa);
- CLAW_DBF_TEXT(2, trace, "chanchk");
- /* return; */
- }
-
- /* Check the reason-code of a unit check */
- if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
- ccw_check_unit_check(p_ch, irb->ecw[0]);
-
- /* State machine to bring the connection up, down and to restart */
- p_ch->last_dstat = irb->scsw.cmd.dstat;
-
- switch (p_ch->claw_state) {
- case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
- if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
- (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
- (p_ch->irb->scsw.cmd.stctl ==
- (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
- return;
- wake_up(&p_ch->wait); /* wake up claw_release */
- CLAW_DBF_TEXT(4, trace, "stop");
- return;
- case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
- if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
- (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
- (p_ch->irb->scsw.cmd.stctl ==
- (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
- CLAW_DBF_TEXT(4, trace, "haltio");
- return;
- }
- if (p_ch->flag == CLAW_READ) {
- p_ch->claw_state = CLAW_START_READ;
- wake_up(&p_ch->wait); /* wake claw_open (READ)*/
- } else if (p_ch->flag == CLAW_WRITE) {
- p_ch->claw_state = CLAW_START_WRITE;
- /* send SYSTEM_VALIDATE */
- claw_strt_read(dev, LOCK_NO);
- claw_send_control(dev,
- SYSTEM_VALIDATE_REQUEST,
- 0, 0, 0,
- p_env->host_name,
- p_env->adapter_name);
- } else {
- dev_warn(&cdev->dev, "The CLAW device received"
- " an unexpected IRQ, "
- "c-%02x d-%02x\n",
- irb->scsw.cmd.cstat,
- irb->scsw.cmd.dstat);
- return;
- }
- CLAW_DBF_TEXT(4, trace, "haltio");
- return;
- case CLAW_START_READ:
- CLAW_DBF_TEXT(4, trace, "ReadIRQ");
- if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
- clear_bit(0, (void *)&p_ch->IO_active);
- if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
- (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
- (p_ch->irb->ecw[0]) == 0) {
- privptr->stats.rx_errors++;
- dev_info(&cdev->dev,
- "%s: Restart is required after remote "
- "side recovers \n",
- dev->name);
- }
- CLAW_DBF_TEXT(4, trace, "notrdy");
- return;
- }
- if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
- (p_ch->irb->scsw.cmd.dstat == 0)) {
- if (test_and_set_bit(CLAW_BH_ACTIVE,
- (void *)&p_ch->flag_a) == 0)
- tasklet_schedule(&p_ch->tasklet);
- else
- CLAW_DBF_TEXT(4, trace, "PCINoBH");
- CLAW_DBF_TEXT(4, trace, "PCI_read");
- return;
- }
- if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
- (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
- (p_ch->irb->scsw.cmd.stctl ==
- (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
- CLAW_DBF_TEXT(4, trace, "SPend_rd");
- return;
- }
- clear_bit(0, (void *)&p_ch->IO_active);
- claw_clearbit_busy(TB_RETRY, dev);
- if (test_and_set_bit(CLAW_BH_ACTIVE,
- (void *)&p_ch->flag_a) == 0)
- tasklet_schedule(&p_ch->tasklet);
- else
- CLAW_DBF_TEXT(4, trace, "RdBHAct");
- CLAW_DBF_TEXT(4, trace, "RdIRQXit");
- return;
- case CLAW_START_WRITE:
- if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
- dev_info(&cdev->dev,
- "%s: Unit Check Occurred in "
- "write channel\n", dev->name);
- clear_bit(0, (void *)&p_ch->IO_active);
- if (p_ch->irb->ecw[0] & 0x80) {
- dev_info(&cdev->dev,
- "%s: Resetting Event "
- "occurred:\n", dev->name);
- init_timer(&p_ch->timer);
- p_ch->timer.function =
- (void *)claw_write_retry;
- p_ch->timer.data = (unsigned long)p_ch;
- p_ch->timer.expires = jiffies + 10*HZ;
- add_timer(&p_ch->timer);
- dev_info(&cdev->dev,
- "%s: write connection "
- "restarting\n", dev->name);
- }
- CLAW_DBF_TEXT(4, trace, "rstrtwrt");
- return;
- }
- if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
- clear_bit(0, (void *)&p_ch->IO_active);
- dev_info(&cdev->dev,
- "%s: Unit Exception "
- "occurred in write channel\n",
- dev->name);
- }
- if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
- (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
- (p_ch->irb->scsw.cmd.stctl ==
- (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
- CLAW_DBF_TEXT(4, trace, "writeUE");
- return;
- }
- clear_bit(0, (void *)&p_ch->IO_active);
- if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
- claw_write_next(p_ch);
- claw_clearbit_busy(TB_TX, dev);
- claw_clear_busy(dev);
- }
- p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
- if (test_and_set_bit(CLAW_BH_ACTIVE,
- (void *)&p_ch_r->flag_a) == 0)
- tasklet_schedule(&p_ch_r->tasklet);
- CLAW_DBF_TEXT(4, trace, "StWtExit");
- return;
- default:
- dev_warn(&cdev->dev,
- "The CLAW device for %s received an unexpected IRQ\n",
- dev->name);
- CLAW_DBF_TEXT(2, trace, "badIRQ");
- return;
- }
-
-} /* end of claw_irq_handler */
-
-
-/*-------------------------------------------------------------------*
-* claw_irq_tasklet *
-* *
-*--------------------------------------------------------------------*/
-static void
-claw_irq_tasklet ( unsigned long data )
-{
- struct chbk * p_ch;
- struct net_device *dev;
-
- p_ch = (struct chbk *) data;
- dev = (struct net_device *)p_ch->ndev;
- CLAW_DBF_TEXT(4, trace, "IRQtask");
- unpack_read(dev);
- clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
- CLAW_DBF_TEXT(4, trace, "TskletXt");
- return;
-} /* end of claw_irq_bh */
-
-/*-------------------------------------------------------------------*
-* claw_release *
-* *
-*--------------------------------------------------------------------*/
-static int
-claw_release(struct net_device *dev)
-{
- int rc;
- int i;
- unsigned long saveflags;
- unsigned long parm;
- struct claw_privbk *privptr;
- DECLARE_WAITQUEUE(wait, current);
- struct ccwbk* p_this_ccw;
- struct ccwbk* p_buf;
-
- if (!dev)
- return 0;
- privptr = (struct claw_privbk *)dev->ml_priv;
- if (!privptr)
- return 0;
- CLAW_DBF_TEXT(4, trace, "release");
- privptr->release_pend=1;
- claw_setbit_busy(TB_STOP,dev);
- for ( i = 1; i >=0 ; i--) {
- spin_lock_irqsave(
- get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
- /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
- privptr->channel[i].claw_state = CLAW_STOP;
- privptr->channel[i].IO_active = 0;
- parm = (unsigned long) &privptr->channel[i];
- if (i == WRITE_CHANNEL)
- claw_purge_skb_queue(
- &privptr->channel[WRITE_CHANNEL].collect_queue);
- rc = ccw_device_halt (privptr->channel[i].cdev, parm);
- if (privptr->system_validate_comp==0x00) /* never opened? */
- init_waitqueue_head(&privptr->channel[i].wait);
- add_wait_queue(&privptr->channel[i].wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(
- get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
- schedule();
- remove_wait_queue(&privptr->channel[i].wait, &wait);
- if (rc != 0) {
- ccw_check_return_code(privptr->channel[i].cdev, rc);
- }
- }
- if (privptr->pk_skb != NULL) {
- dev_kfree_skb_any(privptr->pk_skb);
- privptr->pk_skb = NULL;
- }
- if(privptr->buffs_alloc != 1) {
- CLAW_DBF_TEXT(4, trace, "none2fre");
- return 0;
- }
- CLAW_DBF_TEXT(4, trace, "freebufs");
- if (privptr->p_buff_ccw != NULL) {
- free_pages((unsigned long)privptr->p_buff_ccw,
- (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
- }
- CLAW_DBF_TEXT(4, trace, "freeread");
- if (privptr->p_env->read_size < PAGE_SIZE) {
- if (privptr->p_buff_read != NULL) {
- free_pages((unsigned long)privptr->p_buff_read,
- (int)pages_to_order_of_mag(privptr->p_buff_read_num));
- }
- }
- else {
- p_buf=privptr->p_read_active_first;
- while (p_buf!=NULL) {
- free_pages((unsigned long)p_buf->p_buffer,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perread ));
- p_buf=p_buf->next;
- }
- }
- CLAW_DBF_TEXT(4, trace, "freewrit");
- if (privptr->p_env->write_size < PAGE_SIZE ) {
- free_pages((unsigned long)privptr->p_buff_write,
- (int)pages_to_order_of_mag(privptr->p_buff_write_num));
- }
- else {
- p_buf=privptr->p_write_active_first;
- while (p_buf!=NULL) {
- free_pages((unsigned long)p_buf->p_buffer,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perwrite ));
- p_buf=p_buf->next;
- }
- }
- CLAW_DBF_TEXT(4, trace, "clearptr");
- privptr->buffs_alloc = 0;
- privptr->p_buff_ccw=NULL;
- privptr->p_buff_read=NULL;
- privptr->p_buff_write=NULL;
- privptr->system_validate_comp=0;
- privptr->release_pend=0;
- /* Remove any writes that were pending and reset all reads */
- p_this_ccw=privptr->p_read_active_first;
- while (p_this_ccw!=NULL) {
- p_this_ccw->header.length=0xffff;
- p_this_ccw->header.opcode=0xff;
- p_this_ccw->header.flag=0x00;
- p_this_ccw=p_this_ccw->next;
- }
-
- while (privptr->p_write_active_first!=NULL) {
- p_this_ccw=privptr->p_write_active_first;
- p_this_ccw->header.flag=CLAW_PENDING;
- privptr->p_write_active_first=p_this_ccw->next;
- p_this_ccw->next=privptr->p_write_free_chain;
- privptr->p_write_free_chain=p_this_ccw;
- ++privptr->write_free_count;
- }
- privptr->p_write_active_last=NULL;
- privptr->mtc_logical_link = -1;
- privptr->mtc_skipping = 1;
- privptr->mtc_offset=0;
-
- if (((privptr->channel[READ_CHANNEL].last_dstat |
- privptr->channel[WRITE_CHANNEL].last_dstat) &
- ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
- dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
- "Deactivating %s completed with incorrect"
- " subchannel status "
- "(read %02x, write %02x)\n",
- dev->name,
- privptr->channel[READ_CHANNEL].last_dstat,
- privptr->channel[WRITE_CHANNEL].last_dstat);
- CLAW_DBF_TEXT(2, trace, "badclose");
- }
- CLAW_DBF_TEXT(4, trace, "rlsexit");
- return 0;
-} /* end of claw_release */
-
-/*-------------------------------------------------------------------*
-* claw_write_retry *
-* *
-*--------------------------------------------------------------------*/
-
-static void
-claw_write_retry ( struct chbk *p_ch )
-{
-
- struct net_device *dev=p_ch->ndev;
-
- CLAW_DBF_TEXT(4, trace, "w_retry");
- if (p_ch->claw_state == CLAW_STOP) {
- return;
- }
- claw_strt_out_IO( dev );
- CLAW_DBF_TEXT(4, trace, "rtry_xit");
- return;
-} /* end of claw_write_retry */
-
-
-/*-------------------------------------------------------------------*
-* claw_write_next *
-* *
-*--------------------------------------------------------------------*/
-
-static void
-claw_write_next ( struct chbk * p_ch )
-{
-
- struct net_device *dev;
- struct claw_privbk *privptr=NULL;
- struct sk_buff *pk_skb;
-
- CLAW_DBF_TEXT(4, trace, "claw_wrt");
- if (p_ch->claw_state == CLAW_STOP)
- return;
- dev = (struct net_device *) p_ch->ndev;
- privptr = (struct claw_privbk *) dev->ml_priv;
- claw_free_wrt_buf( dev );
- if ((privptr->write_free_count > 0) &&
- !skb_queue_empty(&p_ch->collect_queue)) {
- pk_skb = claw_pack_skb(privptr);
- while (pk_skb != NULL) {
- claw_hw_tx(pk_skb, dev, 1);
- if (privptr->write_free_count > 0) {
- pk_skb = claw_pack_skb(privptr);
- } else
- pk_skb = NULL;
- }
- }
- if (privptr->p_write_active_first!=NULL) {
- claw_strt_out_IO(dev);
- }
- return;
-} /* end of claw_write_next */
-
-/*-------------------------------------------------------------------*
-* *
-* claw_timer *
-*--------------------------------------------------------------------*/
-
-static void
-claw_timer ( struct chbk * p_ch )
-{
- CLAW_DBF_TEXT(4, trace, "timer");
- p_ch->flag |= CLAW_TIMER;
- wake_up(&p_ch->wait);
- return;
-} /* end of claw_timer */
-
-/*
-*
-* functions
-*/
-
-
-/*-------------------------------------------------------------------*
-* *
-* pages_to_order_of_mag *
-* *
-* takes a number of pages from 1 to 512 and returns the *
-* log(num_pages)/log(2) get_free_pages() needs a base 2 order *
-* of magnitude get_free_pages() has an upper order of 9 *
-*--------------------------------------------------------------------*/
-
-static int
-pages_to_order_of_mag(int num_of_pages)
-{
- int order_of_mag=1; /* assume 2 pages */
- int nump;
-
- CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
- if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
- /* 512 pages = 2Meg on 4k page systems */
- if (num_of_pages >= 512) {return 9; }
- /* we have two or more pages order is at least 1 */
- for (nump=2 ;nump <= 512;nump*=2) {
- if (num_of_pages <= nump)
- break;
- order_of_mag +=1;
- }
- if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
- CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
- return order_of_mag;
-}
-
-/*-------------------------------------------------------------------*
-* *
-* add_claw_reads *
-* *
-*--------------------------------------------------------------------*/
-static int
-add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
- struct ccwbk* p_last)
-{
- struct claw_privbk *privptr;
- struct ccw1 temp_ccw;
- struct endccw * p_end;
- CLAW_DBF_TEXT(4, trace, "addreads");
- privptr = dev->ml_priv;
- p_end = privptr->p_end_ccw;
-
- /* first CCW and last CCW contains a new set of read channel programs
- * to apend the running channel programs
- */
- if ( p_first==NULL) {
- CLAW_DBF_TEXT(4, trace, "addexit");
- return 0;
- }
-
- /* set up ending CCW sequence for this segment */
- if (p_end->read1) {
- p_end->read1=0x00; /* second ending CCW is now active */
- /* reset ending CCWs and setup TIC CCWs */
- p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
- p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
- p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
- p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
- p_end->read2_nop2.cda=0;
- p_end->read2_nop2.count=1;
- }
- else {
- p_end->read1=0x01; /* first ending CCW is now active */
- /* reset ending CCWs and setup TIC CCWs */
- p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
- p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
- p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
- p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
- p_end->read1_nop2.cda=0;
- p_end->read1_nop2.count=1;
- }
-
- if ( privptr-> p_read_active_first ==NULL ) {
- privptr->p_read_active_first = p_first; /* set new first */
- privptr->p_read_active_last = p_last; /* set new last */
- }
- else {
-
- /* set up TIC ccw */
- temp_ccw.cda= (__u32)__pa(&p_first->read);
- temp_ccw.count=0;
- temp_ccw.flags=0;
- temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
-
-
- if (p_end->read1) {
-
- /* first set of CCW's is chained to the new read */
- /* chain, so the second set is chained to the active chain. */
- /* Therefore modify the second set to point to the new */
- /* read chain set up TIC CCWs */
- /* make sure we update the CCW so channel doesn't fetch it */
- /* when it's only half done */
- memcpy( &p_end->read2_nop2, &temp_ccw ,
- sizeof(struct ccw1));
- privptr->p_read_active_last->r_TIC_1.cda=
- (__u32)__pa(&p_first->read);
- privptr->p_read_active_last->r_TIC_2.cda=
- (__u32)__pa(&p_first->read);
- }
- else {
- /* make sure we update the CCW so channel doesn't */
- /* fetch it when it is only half done */
- memcpy( &p_end->read1_nop2, &temp_ccw ,
- sizeof(struct ccw1));
- privptr->p_read_active_last->r_TIC_1.cda=
- (__u32)__pa(&p_first->read);
- privptr->p_read_active_last->r_TIC_2.cda=
- (__u32)__pa(&p_first->read);
- }
- /* chain in new set of blocks */
- privptr->p_read_active_last->next = p_first;
- privptr->p_read_active_last=p_last;
- } /* end of if ( privptr-> p_read_active_first ==NULL) */
- CLAW_DBF_TEXT(4, trace, "addexit");
- return 0;
-} /* end of add_claw_reads */
-
-/*-------------------------------------------------------------------*
- * ccw_check_return_code *
- * *
- *-------------------------------------------------------------------*/
-
-static void
-ccw_check_return_code(struct ccw_device *cdev, int return_code)
-{
- CLAW_DBF_TEXT(4, trace, "ccwret");
- if (return_code != 0) {
- switch (return_code) {
- case -EBUSY: /* BUSY is a transient state no action needed */
- break;
- case -ENODEV:
- dev_err(&cdev->dev, "The remote channel adapter is not"
- " available\n");
- break;
- case -EINVAL:
- dev_err(&cdev->dev,
- "The status of the remote channel adapter"
- " is not valid\n");
- break;
- default:
- dev_err(&cdev->dev, "The common device layer"
- " returned error code %d\n",
- return_code);
- }
- }
- CLAW_DBF_TEXT(4, trace, "ccwret");
-} /* end of ccw_check_return_code */
-
-/*-------------------------------------------------------------------*
-* ccw_check_unit_check *
-*--------------------------------------------------------------------*/
-
-static void
-ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
-{
- struct net_device *ndev = p_ch->ndev;
- struct device *dev = &p_ch->cdev->dev;
-
- CLAW_DBF_TEXT(4, trace, "unitchek");
- dev_warn(dev, "The communication peer of %s disconnected\n",
- ndev->name);
-
- if (sense & 0x40) {
- if (sense & 0x01) {
- dev_warn(dev, "The remote channel adapter for"
- " %s has been reset\n",
- ndev->name);
- }
- } else if (sense & 0x20) {
- if (sense & 0x04) {
- dev_warn(dev, "A data streaming timeout occurred"
- " for %s\n",
- ndev->name);
- } else if (sense & 0x10) {
- dev_warn(dev, "The remote channel adapter for %s"
- " is faulty\n",
- ndev->name);
- } else {
- dev_warn(dev, "A data transfer parity error occurred"
- " for %s\n",
- ndev->name);
- }
- } else if (sense & 0x10) {
- dev_warn(dev, "A read data parity error occurred"
- " for %s\n",
- ndev->name);
- }
-
-} /* end of ccw_check_unit_check */
-
-/*-------------------------------------------------------------------*
-* find_link *
-*--------------------------------------------------------------------*/
-static int
-find_link(struct net_device *dev, char *host_name, char *ws_name )
-{
- struct claw_privbk *privptr;
- struct claw_env *p_env;
- int rc=0;
-
- CLAW_DBF_TEXT(2, setup, "findlink");
- privptr = dev->ml_priv;
- p_env=privptr->p_env;
- switch (p_env->packing)
- {
- case PACKING_ASK:
- if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
- (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
- rc = EINVAL;
- break;
- case DO_PACKED:
- case PACK_SEND:
- if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
- (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
- rc = EINVAL;
- break;
- default:
- if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
- (memcmp(p_env->api_type , ws_name, 8)!=0))
- rc = EINVAL;
- break;
- }
-
- return rc;
-} /* end of find_link */
-
-/*-------------------------------------------------------------------*
- * claw_hw_tx *
- * *
- * *
- *-------------------------------------------------------------------*/
-
-static int
-claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
-{
- int rc=0;
- struct claw_privbk *privptr;
- struct ccwbk *p_this_ccw;
- struct ccwbk *p_first_ccw;
- struct ccwbk *p_last_ccw;
- __u32 numBuffers;
- signed long len_of_data;
- unsigned long bytesInThisBuffer;
- unsigned char *pDataAddress;
- struct endccw *pEnd;
- struct ccw1 tempCCW;
- struct claw_env *p_env;
- struct clawph *pk_head;
- struct chbk *ch;
-
- CLAW_DBF_TEXT(4, trace, "hw_tx");
- privptr = (struct claw_privbk *)(dev->ml_priv);
- p_env =privptr->p_env;
- claw_free_wrt_buf(dev); /* Clean up free chain if posible */
- /* scan the write queue to free any completed write packets */
- p_first_ccw=NULL;
- p_last_ccw=NULL;
- if ((p_env->packing >= PACK_SEND) &&
- (skb->cb[1] != 'P')) {
- skb_push(skb,sizeof(struct clawph));
- pk_head=(struct clawph *)skb->data;
- pk_head->len=skb->len-sizeof(struct clawph);
- if (pk_head->len%4) {
- pk_head->len+= 4-(pk_head->len%4);
- skb_pad(skb,4-(pk_head->len%4));
- skb_put(skb,4-(pk_head->len%4));
- }
- if (p_env->packing == DO_PACKED)
- pk_head->link_num = linkid;
- else
- pk_head->link_num = 0;
- pk_head->flag = 0x00;
- skb_pad(skb,4);
- skb->cb[1] = 'P';
- }
- if (linkid == 0) {
- if (claw_check_busy(dev)) {
- if (privptr->write_free_count!=0) {
- claw_clear_busy(dev);
- }
- else {
- claw_strt_out_IO(dev );
- claw_free_wrt_buf( dev );
- if (privptr->write_free_count==0) {
- ch = &privptr->channel[WRITE_CHANNEL];
- atomic_inc(&skb->users);
- skb_queue_tail(&ch->collect_queue, skb);
- goto Done;
- }
- else {
- claw_clear_busy(dev);
- }
- }
- }
- /* tx lock */
- if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
- ch = &privptr->channel[WRITE_CHANNEL];
- atomic_inc(&skb->users);
- skb_queue_tail(&ch->collect_queue, skb);
- claw_strt_out_IO(dev );
- rc=-EBUSY;
- goto Done2;
- }
- }
- /* See how many write buffers are required to hold this data */
- numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
-
- /* If that number of buffers isn't available, give up for now */
- if (privptr->write_free_count < numBuffers ||
- privptr->p_write_free_chain == NULL ) {
-
- claw_setbit_busy(TB_NOBUFFER,dev);
- ch = &privptr->channel[WRITE_CHANNEL];
- atomic_inc(&skb->users);
- skb_queue_tail(&ch->collect_queue, skb);
- CLAW_DBF_TEXT(2, trace, "clawbusy");
- goto Done2;
- }
- pDataAddress=skb->data;
- len_of_data=skb->len;
-
- while (len_of_data > 0) {
- p_this_ccw=privptr->p_write_free_chain; /* get a block */
- if (p_this_ccw == NULL) { /* lost the race */
- ch = &privptr->channel[WRITE_CHANNEL];
- atomic_inc(&skb->users);
- skb_queue_tail(&ch->collect_queue, skb);
- goto Done2;
- }
- privptr->p_write_free_chain=p_this_ccw->next;
- p_this_ccw->next=NULL;
- --privptr->write_free_count; /* -1 */
- if (len_of_data >= privptr->p_env->write_size)
- bytesInThisBuffer = privptr->p_env->write_size;
- else
- bytesInThisBuffer = len_of_data;
- memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
- len_of_data-=bytesInThisBuffer;
- pDataAddress+=(unsigned long)bytesInThisBuffer;
- /* setup write CCW */
- p_this_ccw->write.cmd_code = (linkid * 8) +1;
- if (len_of_data>0) {
- p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
- }
- p_this_ccw->write.count=bytesInThisBuffer;
- /* now add to end of this chain */
- if (p_first_ccw==NULL) {
- p_first_ccw=p_this_ccw;
- }
- if (p_last_ccw!=NULL) {
- p_last_ccw->next=p_this_ccw;
- /* set up TIC ccws */
- p_last_ccw->w_TIC_1.cda=
- (__u32)__pa(&p_this_ccw->write);
- }
- p_last_ccw=p_this_ccw; /* save new last block */
- }
-
- /* FirstCCW and LastCCW now contain a new set of write channel
- * programs to append to the running channel program
- */
-
- if (p_first_ccw!=NULL) {
- /* setup ending ccw sequence for this segment */
- pEnd=privptr->p_end_ccw;
- if (pEnd->write1) {
- pEnd->write1=0x00; /* second end ccw is now active */
- /* set up Tic CCWs */
- p_last_ccw->w_TIC_1.cda=
- (__u32)__pa(&pEnd->write2_nop1);
- pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
- pEnd->write2_nop2.flags =
- CCW_FLAG_SLI | CCW_FLAG_SKIP;
- pEnd->write2_nop2.cda=0;
- pEnd->write2_nop2.count=1;
- }
- else { /* end of if (pEnd->write1)*/
- pEnd->write1=0x01; /* first end ccw is now active */
- /* set up Tic CCWs */
- p_last_ccw->w_TIC_1.cda=
- (__u32)__pa(&pEnd->write1_nop1);
- pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
- pEnd->write1_nop2.flags =
- CCW_FLAG_SLI | CCW_FLAG_SKIP;
- pEnd->write1_nop2.cda=0;
- pEnd->write1_nop2.count=1;
- } /* end if if (pEnd->write1) */
-
- if (privptr->p_write_active_first==NULL ) {
- privptr->p_write_active_first=p_first_ccw;
- privptr->p_write_active_last=p_last_ccw;
- }
- else {
- /* set up Tic CCWs */
-
- tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
- tempCCW.count=0;
- tempCCW.flags=0;
- tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
-
- if (pEnd->write1) {
-
- /*
- * first set of ending CCW's is chained to the new write
- * chain, so the second set is chained to the active chain
- * Therefore modify the second set to point the new write chain.
- * make sure we update the CCW atomically
- * so channel does not fetch it when it's only half done
- */
- memcpy( &pEnd->write2_nop2, &tempCCW ,
- sizeof(struct ccw1));
- privptr->p_write_active_last->w_TIC_1.cda=
- (__u32)__pa(&p_first_ccw->write);
- }
- else {
-
- /*make sure we update the CCW atomically
- *so channel does not fetch it when it's only half done
- */
- memcpy(&pEnd->write1_nop2, &tempCCW ,
- sizeof(struct ccw1));
- privptr->p_write_active_last->w_TIC_1.cda=
- (__u32)__pa(&p_first_ccw->write);
-
- } /* end if if (pEnd->write1) */
-
- privptr->p_write_active_last->next=p_first_ccw;
- privptr->p_write_active_last=p_last_ccw;
- }
-
- } /* endif (p_first_ccw!=NULL) */
- dev_kfree_skb_any(skb);
- claw_strt_out_IO(dev );
- /* if write free count is zero , set NOBUFFER */
- if (privptr->write_free_count==0) {
- claw_setbit_busy(TB_NOBUFFER,dev);
- }
-Done2:
- claw_clearbit_busy(TB_TX,dev);
-Done:
- return(rc);
-} /* end of claw_hw_tx */
-
-/*-------------------------------------------------------------------*
-* *
-* init_ccw_bk *
-* *
-*--------------------------------------------------------------------*/
-
-static int
-init_ccw_bk(struct net_device *dev)
-{
-
- __u32 ccw_blocks_required;
- __u32 ccw_blocks_perpage;
- __u32 ccw_pages_required;
- __u32 claw_reads_perpage=1;
- __u32 claw_read_pages;
- __u32 claw_writes_perpage=1;
- __u32 claw_write_pages;
- void *p_buff=NULL;
- struct ccwbk*p_free_chain;
- struct ccwbk*p_buf;
- struct ccwbk*p_last_CCWB;
- struct ccwbk*p_first_CCWB;
- struct endccw *p_endccw=NULL;
- addr_t real_address;
- struct claw_privbk *privptr = dev->ml_priv;
- struct clawh *pClawH=NULL;
- addr_t real_TIC_address;
- int i,j;
- CLAW_DBF_TEXT(4, trace, "init_ccw");
-
- /* initialize statistics field */
- privptr->active_link_ID=0;
- /* initialize ccwbk pointers */
- privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
- privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
- privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
- privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
- privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
- privptr->p_end_ccw=NULL; /* pointer to ending ccw */
- privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
- privptr->buffs_alloc = 0;
- memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
- memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
- /* initialize free write ccwbk counter */
- privptr->write_free_count=0; /* number of free bufs on write chain */
- p_last_CCWB = NULL;
- p_first_CCWB= NULL;
- /*
- * We need 1 CCW block for each read buffer, 1 for each
- * write buffer, plus 1 for ClawSignalBlock
- */
- ccw_blocks_required =
- privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
- /*
- * compute number of CCW blocks that will fit in a page
- */
- ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
- ccw_pages_required=
- DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
-
- /*
- * read and write sizes are set by 2 constants in claw.h
- * 4k and 32k. Unpacked values other than 4k are not going to
- * provide good performance. With packing buffers support 32k
- * buffers are used.
- */
- if (privptr->p_env->read_size < PAGE_SIZE) {
- claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
- claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
- claw_reads_perpage);
- }
- else { /* > or equal */
- privptr->p_buff_pages_perread =
- DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
- claw_read_pages = privptr->p_env->read_buffers *
- privptr->p_buff_pages_perread;
- }
- if (privptr->p_env->write_size < PAGE_SIZE) {
- claw_writes_perpage =
- PAGE_SIZE / privptr->p_env->write_size;
- claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
- claw_writes_perpage);
-
- }
- else { /* > or equal */
- privptr->p_buff_pages_perwrite =
- DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
- claw_write_pages = privptr->p_env->write_buffers *
- privptr->p_buff_pages_perwrite;
- }
- /*
- * allocate ccw_pages_required
- */
- if (privptr->p_buff_ccw==NULL) {
- privptr->p_buff_ccw=
- (void *)__get_free_pages(__GFP_DMA,
- (int)pages_to_order_of_mag(ccw_pages_required ));
- if (privptr->p_buff_ccw==NULL) {
- return -ENOMEM;
- }
- privptr->p_buff_ccw_num=ccw_pages_required;
- }
- memset(privptr->p_buff_ccw, 0x00,
- privptr->p_buff_ccw_num * PAGE_SIZE);
-
- /*
- * obtain ending ccw block address
- *
- */
- privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
- real_address = (__u32)__pa(privptr->p_end_ccw);
- /* Initialize ending CCW block */
- p_endccw=privptr->p_end_ccw;
- p_endccw->real=real_address;
- p_endccw->write1=0x00;
- p_endccw->read1=0x00;
-
- /* write1_nop1 */
- p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
- p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_endccw->write1_nop1.count = 1;
- p_endccw->write1_nop1.cda = 0;
-
- /* write1_nop2 */
- p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
- p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
- p_endccw->write1_nop2.count = 1;
- p_endccw->write1_nop2.cda = 0;
-
- /* write2_nop1 */
- p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
- p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_endccw->write2_nop1.count = 1;
- p_endccw->write2_nop1.cda = 0;
-
- /* write2_nop2 */
- p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
- p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
- p_endccw->write2_nop2.count = 1;
- p_endccw->write2_nop2.cda = 0;
-
- /* read1_nop1 */
- p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
- p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_endccw->read1_nop1.count = 1;
- p_endccw->read1_nop1.cda = 0;
-
- /* read1_nop2 */
- p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
- p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
- p_endccw->read1_nop2.count = 1;
- p_endccw->read1_nop2.cda = 0;
-
- /* read2_nop1 */
- p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
- p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_endccw->read2_nop1.count = 1;
- p_endccw->read2_nop1.cda = 0;
-
- /* read2_nop2 */
- p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
- p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
- p_endccw->read2_nop2.count = 1;
- p_endccw->read2_nop2.cda = 0;
-
- /*
- * Build a chain of CCWs
- *
- */
- p_buff=privptr->p_buff_ccw;
-
- p_free_chain=NULL;
- for (i=0 ; i < ccw_pages_required; i++ ) {
- real_address = (__u32)__pa(p_buff);
- p_buf=p_buff;
- for (j=0 ; j < ccw_blocks_perpage ; j++) {
- p_buf->next = p_free_chain;
- p_free_chain = p_buf;
- p_buf->real=(__u32)__pa(p_buf);
- ++p_buf;
- }
- p_buff+=PAGE_SIZE;
- }
- /*
- * Initialize ClawSignalBlock
- *
- */
- if (privptr->p_claw_signal_blk==NULL) {
- privptr->p_claw_signal_blk=p_free_chain;
- p_free_chain=p_free_chain->next;
- pClawH=(struct clawh *)privptr->p_claw_signal_blk;
- pClawH->length=0xffff;
- pClawH->opcode=0xff;
- pClawH->flag=CLAW_BUSY;
- }
-
- /*
- * allocate write_pages_required and add to free chain
- */
- if (privptr->p_buff_write==NULL) {
- if (privptr->p_env->write_size < PAGE_SIZE) {
- privptr->p_buff_write=
- (void *)__get_free_pages(__GFP_DMA,
- (int)pages_to_order_of_mag(claw_write_pages ));
- if (privptr->p_buff_write==NULL) {
- privptr->p_buff_ccw=NULL;
- return -ENOMEM;
- }
- /*
- * Build CLAW write free chain
- *
- */
-
- memset(privptr->p_buff_write, 0x00,
- ccw_pages_required * PAGE_SIZE);
- privptr->p_write_free_chain=NULL;
-
- p_buff=privptr->p_buff_write;
-
- for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
- p_buf = p_free_chain; /* get a CCW */
- p_free_chain = p_buf->next;
- p_buf->next =privptr->p_write_free_chain;
- privptr->p_write_free_chain = p_buf;
- p_buf-> p_buffer = (struct clawbuf *)p_buff;
- p_buf-> write.cda = (__u32)__pa(p_buff);
- p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
- p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> w_read_FF.count = 1;
- p_buf-> w_read_FF.cda =
- (__u32)__pa(&p_buf-> header.flag);
- p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
- p_buf-> w_TIC_1.flags = 0;
- p_buf-> w_TIC_1.count = 0;
-
- if (((unsigned long)p_buff +
- privptr->p_env->write_size) >=
- ((unsigned long)(p_buff+2*
- (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
- p_buff = p_buff+privptr->p_env->write_size;
- }
- }
- }
- else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
- {
- privptr->p_write_free_chain=NULL;
- for (i = 0; i< privptr->p_env->write_buffers ; i++) {
- p_buff=(void *)__get_free_pages(__GFP_DMA,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perwrite) );
- if (p_buff==NULL) {
- free_pages((unsigned long)privptr->p_buff_ccw,
- (int)pages_to_order_of_mag(
- privptr->p_buff_ccw_num));
- privptr->p_buff_ccw=NULL;
- p_buf=privptr->p_buff_write;
- while (p_buf!=NULL) {
- free_pages((unsigned long)
- p_buf->p_buffer,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perwrite));
- p_buf=p_buf->next;
- }
- return -ENOMEM;
- } /* Error on get_pages */
- memset(p_buff, 0x00, privptr->p_env->write_size );
- p_buf = p_free_chain;
- p_free_chain = p_buf->next;
- p_buf->next = privptr->p_write_free_chain;
- privptr->p_write_free_chain = p_buf;
- privptr->p_buff_write = p_buf;
- p_buf->p_buffer=(struct clawbuf *)p_buff;
- p_buf-> write.cda = (__u32)__pa(p_buff);
- p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
- p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> w_read_FF.count = 1;
- p_buf-> w_read_FF.cda =
- (__u32)__pa(&p_buf-> header.flag);
- p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
- p_buf-> w_TIC_1.flags = 0;
- p_buf-> w_TIC_1.count = 0;
- } /* for all write_buffers */
-
- } /* else buffers are PAGE_SIZE or bigger */
-
- }
- privptr->p_buff_write_num=claw_write_pages;
- privptr->write_free_count=privptr->p_env->write_buffers;
-
-
- /*
- * allocate read_pages_required and chain to free chain
- */
- if (privptr->p_buff_read==NULL) {
- if (privptr->p_env->read_size < PAGE_SIZE) {
- privptr->p_buff_read=
- (void *)__get_free_pages(__GFP_DMA,
- (int)pages_to_order_of_mag(claw_read_pages) );
- if (privptr->p_buff_read==NULL) {
- free_pages((unsigned long)privptr->p_buff_ccw,
- (int)pages_to_order_of_mag(
- privptr->p_buff_ccw_num));
- /* free the write pages size is < page size */
- free_pages((unsigned long)privptr->p_buff_write,
- (int)pages_to_order_of_mag(
- privptr->p_buff_write_num));
- privptr->p_buff_ccw=NULL;
- privptr->p_buff_write=NULL;
- return -ENOMEM;
- }
- memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
- privptr->p_buff_read_num=claw_read_pages;
- /*
- * Build CLAW read free chain
- *
- */
- p_buff=privptr->p_buff_read;
- for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
- p_buf = p_free_chain;
- p_free_chain = p_buf->next;
-
- if (p_last_CCWB==NULL) {
- p_buf->next=NULL;
- real_TIC_address=0;
- p_last_CCWB=p_buf;
- }
- else {
- p_buf->next=p_first_CCWB;
- real_TIC_address=
- (__u32)__pa(&p_first_CCWB -> read );
- }
-
- p_first_CCWB=p_buf;
-
- p_buf->p_buffer=(struct clawbuf *)p_buff;
- /* initialize read command */
- p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
- p_buf-> read.cda = (__u32)__pa(p_buff);
- p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> read.count = privptr->p_env->read_size;
-
- /* initialize read_h command */
- p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
- p_buf-> read_h.cda =
- (__u32)__pa(&(p_buf->header));
- p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> read_h.count = sizeof(struct clawh);
-
- /* initialize Signal command */
- p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
- p_buf-> signal.cda =
- (__u32)__pa(&(pClawH->flag));
- p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> signal.count = 1;
-
- /* initialize r_TIC_1 command */
- p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
- p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
- p_buf-> r_TIC_1.flags = 0;
- p_buf-> r_TIC_1.count = 0;
-
- /* initialize r_read_FF command */
- p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
- p_buf-> r_read_FF.cda =
- (__u32)__pa(&(pClawH->flag));
- p_buf-> r_read_FF.flags =
- CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
- p_buf-> r_read_FF.count = 1;
-
- /* initialize r_TIC_2 */
- memcpy(&p_buf->r_TIC_2,
- &p_buf->r_TIC_1, sizeof(struct ccw1));
-
- /* initialize Header */
- p_buf->header.length=0xffff;
- p_buf->header.opcode=0xff;
- p_buf->header.flag=CLAW_PENDING;
-
- if (((unsigned long)p_buff+privptr->p_env->read_size) >=
- ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
- -1)
- & PAGE_MASK)) {
- p_buff= p_buff+privptr->p_env->read_size;
- }
- else {
- p_buff=
- (void *)((unsigned long)
- (p_buff+2*(privptr->p_env->read_size)-1)
- & PAGE_MASK) ;
- }
- } /* for read_buffers */
- } /* read_size < PAGE_SIZE */
- else { /* read Size >= PAGE_SIZE */
- for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
- p_buff = (void *)__get_free_pages(__GFP_DMA,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perread));
- if (p_buff==NULL) {
- free_pages((unsigned long)privptr->p_buff_ccw,
- (int)pages_to_order_of_mag(privptr->
- p_buff_ccw_num));
- /* free the write pages */
- p_buf=privptr->p_buff_write;
- while (p_buf!=NULL) {
- free_pages(
- (unsigned long)p_buf->p_buffer,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perwrite));
- p_buf=p_buf->next;
- }
- /* free any read pages already alloc */
- p_buf=privptr->p_buff_read;
- while (p_buf!=NULL) {
- free_pages(
- (unsigned long)p_buf->p_buffer,
- (int)pages_to_order_of_mag(
- privptr->p_buff_pages_perread));
- p_buf=p_buf->next;
- }
- privptr->p_buff_ccw=NULL;
- privptr->p_buff_write=NULL;
- return -ENOMEM;
- }
- memset(p_buff, 0x00, privptr->p_env->read_size);
- p_buf = p_free_chain;
- privptr->p_buff_read = p_buf;
- p_free_chain = p_buf->next;
-
- if (p_last_CCWB==NULL) {
- p_buf->next=NULL;
- real_TIC_address=0;
- p_last_CCWB=p_buf;
- }
- else {
- p_buf->next=p_first_CCWB;
- real_TIC_address=
- (addr_t)__pa(
- &p_first_CCWB -> read );
- }
-
- p_first_CCWB=p_buf;
- /* save buff address */
- p_buf->p_buffer=(struct clawbuf *)p_buff;
- /* initialize read command */
- p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
- p_buf-> read.cda = (__u32)__pa(p_buff);
- p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> read.count = privptr->p_env->read_size;
-
- /* initialize read_h command */
- p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
- p_buf-> read_h.cda =
- (__u32)__pa(&(p_buf->header));
- p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> read_h.count = sizeof(struct clawh);
-
- /* initialize Signal command */
- p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
- p_buf-> signal.cda =
- (__u32)__pa(&(pClawH->flag));
- p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
- p_buf-> signal.count = 1;
-
- /* initialize r_TIC_1 command */
- p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
- p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
- p_buf-> r_TIC_1.flags = 0;
- p_buf-> r_TIC_1.count = 0;
-
- /* initialize r_read_FF command */
- p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
- p_buf-> r_read_FF.cda =
- (__u32)__pa(&(pClawH->flag));
- p_buf-> r_read_FF.flags =
- CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
- p_buf-> r_read_FF.count = 1;
-
- /* initialize r_TIC_2 */
- memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
- sizeof(struct ccw1));
-
- /* initialize Header */
- p_buf->header.length=0xffff;
- p_buf->header.opcode=0xff;
- p_buf->header.flag=CLAW_PENDING;
-
- } /* For read_buffers */
- } /* read_size >= PAGE_SIZE */
- } /* pBuffread = NULL */
- add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
- privptr->buffs_alloc = 1;
-
- return 0;
-} /* end of init_ccw_bk */
-
-/*-------------------------------------------------------------------*
-* *
-* probe_error *
-* *
-*--------------------------------------------------------------------*/
-
-static void
-probe_error( struct ccwgroup_device *cgdev)
-{
- struct claw_privbk *privptr;
-
- CLAW_DBF_TEXT(4, trace, "proberr");
- privptr = dev_get_drvdata(&cgdev->dev);
- if (privptr != NULL) {
- dev_set_drvdata(&cgdev->dev, NULL);
- kfree(privptr->p_env);
- kfree(privptr->p_mtc_envelope);
- kfree(privptr);
- }
-} /* probe_error */
-
-/*-------------------------------------------------------------------*
-* claw_process_control *
-* *
-* *
-*--------------------------------------------------------------------*/
-
-static int
-claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
-{
-
- struct clawbuf *p_buf;
- struct clawctl ctlbk;
- struct clawctl *p_ctlbk;
- char temp_host_name[8];
- char temp_ws_name[8];
- struct claw_privbk *privptr;
- struct claw_env *p_env;
- struct sysval *p_sysval;
- struct conncmd *p_connect=NULL;
- int rc;
- struct chbk *p_ch = NULL;
- struct device *tdev;
- CLAW_DBF_TEXT(2, setup, "clw_cntl");
- udelay(1000); /* Wait a ms for the control packets to
- *catch up to each other */
- privptr = dev->ml_priv;
- p_env=privptr->p_env;
- tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
- memcpy( &temp_host_name, p_env->host_name, 8);
- memcpy( &temp_ws_name, p_env->adapter_name , 8);
- dev_info(tdev, "%s: CLAW device %.8s: "
- "Received Control Packet\n",
- dev->name, temp_ws_name);
- if (privptr->release_pend==1) {
- return 0;
- }
- p_buf=p_ccw->p_buffer;
- p_ctlbk=&ctlbk;
- if (p_env->packing == DO_PACKED) { /* packing in progress?*/
- memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
- } else {
- memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
- }
- switch (p_ctlbk->command)
- {
- case SYSTEM_VALIDATE_REQUEST:
- if (p_ctlbk->version != CLAW_VERSION_ID) {
- claw_snd_sys_validate_rsp(dev, p_ctlbk,
- CLAW_RC_WRONG_VERSION);
- dev_warn(tdev, "The communication peer of %s"
- " uses an incorrect API version %d\n",
- dev->name, p_ctlbk->version);
- }
- p_sysval = (struct sysval *)&(p_ctlbk->data);
- dev_info(tdev, "%s: Recv Sys Validate Request: "
- "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
- "Host name=%.8s\n",
- dev->name, p_ctlbk->version,
- p_ctlbk->linkid,
- p_ctlbk->correlator,
- p_sysval->WS_name,
- p_sysval->host_name);
- if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
- claw_snd_sys_validate_rsp(dev, p_ctlbk,
- CLAW_RC_NAME_MISMATCH);
- CLAW_DBF_TEXT(2, setup, "HSTBAD");
- CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
- CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
- dev_warn(tdev,
- "Host name %s for %s does not match the"
- " remote adapter name %s\n",
- p_sysval->host_name,
- dev->name,
- temp_host_name);
- }
- if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
- claw_snd_sys_validate_rsp(dev, p_ctlbk,
- CLAW_RC_NAME_MISMATCH);
- CLAW_DBF_TEXT(2, setup, "WSNBAD");
- CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
- CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
- dev_warn(tdev, "Adapter name %s for %s does not match"
- " the remote host name %s\n",
- p_sysval->WS_name,
- dev->name,
- temp_ws_name);
- }
- if ((p_sysval->write_frame_size < p_env->write_size) &&
- (p_env->packing == 0)) {
- claw_snd_sys_validate_rsp(dev, p_ctlbk,
- CLAW_RC_HOST_RCV_TOO_SMALL);
- dev_warn(tdev,
- "The local write buffer is smaller than the"
- " remote read buffer\n");
- CLAW_DBF_TEXT(2, setup, "wrtszbad");
- }
- if ((p_sysval->read_frame_size < p_env->read_size) &&
- (p_env->packing == 0)) {
- claw_snd_sys_validate_rsp(dev, p_ctlbk,
- CLAW_RC_HOST_RCV_TOO_SMALL);
- dev_warn(tdev,
- "The local read buffer is smaller than the"
- " remote write buffer\n");
- CLAW_DBF_TEXT(2, setup, "rdsizbad");
- }
- claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
- dev_info(tdev,
- "CLAW device %.8s: System validate"
- " completed.\n", temp_ws_name);
- dev_info(tdev,
- "%s: sys Validate Rsize:%d Wsize:%d\n",
- dev->name, p_sysval->read_frame_size,
- p_sysval->write_frame_size);
- privptr->system_validate_comp = 1;
- if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
- p_env->packing = PACKING_ASK;
- claw_strt_conn_req(dev);
- break;
- case SYSTEM_VALIDATE_RESPONSE:
- p_sysval = (struct sysval *)&(p_ctlbk->data);
- dev_info(tdev,
- "Settings for %s validated (version=%d, "
- "remote device=%d, rc=%d, adapter name=%.8s, "
- "host name=%.8s)\n",
- dev->name,
- p_ctlbk->version,
- p_ctlbk->correlator,
- p_ctlbk->rc,
- p_sysval->WS_name,
- p_sysval->host_name);
- switch (p_ctlbk->rc) {
- case 0:
- dev_info(tdev, "%s: CLAW device "
- "%.8s: System validate completed.\n",
- dev->name, temp_ws_name);
- if (privptr->system_validate_comp == 0)
- claw_strt_conn_req(dev);
- privptr->system_validate_comp = 1;
- break;
- case CLAW_RC_NAME_MISMATCH:
- dev_warn(tdev, "Validating %s failed because of"
- " a host or adapter name mismatch\n",
- dev->name);
- break;
- case CLAW_RC_WRONG_VERSION:
- dev_warn(tdev, "Validating %s failed because of a"
- " version conflict\n",
- dev->name);
- break;
- case CLAW_RC_HOST_RCV_TOO_SMALL:
- dev_warn(tdev, "Validating %s failed because of a"
- " frame size conflict\n",
- dev->name);
- break;
- default:
- dev_warn(tdev, "The communication peer of %s rejected"
- " the connection\n",
- dev->name);
- break;
- }
- break;
-
- case CONNECTION_REQUEST:
- p_connect = (struct conncmd *)&(p_ctlbk->data);
- dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
- "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
- dev->name,
- p_ctlbk->version,
- p_ctlbk->linkid,
- p_ctlbk->correlator,
- p_connect->host_name,
- p_connect->WS_name);
- if (privptr->active_link_ID != 0) {
- claw_snd_disc(dev, p_ctlbk);
- dev_info(tdev, "%s rejected a connection request"
- " because it is already active\n",
- dev->name);
- }
- if (p_ctlbk->linkid != 1) {
- claw_snd_disc(dev, p_ctlbk);
- dev_info(tdev, "%s rejected a request to open multiple"
- " connections\n",
- dev->name);
- }
- rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
- if (rc != 0) {
- claw_snd_disc(dev, p_ctlbk);
- dev_info(tdev, "%s rejected a connection request"
- " because of a type mismatch\n",
- dev->name);
- }
- claw_send_control(dev,
- CONNECTION_CONFIRM, p_ctlbk->linkid,
- p_ctlbk->correlator,
- 0, p_connect->host_name,
- p_connect->WS_name);
- if (p_env->packing == PACKING_ASK) {
- p_env->packing = PACK_SEND;
- claw_snd_conn_req(dev, 0);
- }
- dev_info(tdev, "%s: CLAW device %.8s: Connection "
- "completed link_id=%d.\n",
- dev->name, temp_ws_name,
- p_ctlbk->linkid);
- privptr->active_link_ID = p_ctlbk->linkid;
- p_ch = &privptr->channel[WRITE_CHANNEL];
- wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
- break;
- case CONNECTION_RESPONSE:
- p_connect = (struct conncmd *)&(p_ctlbk->data);
- dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
- "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
- dev->name,
- p_ctlbk->version,
- p_ctlbk->linkid,
- p_ctlbk->correlator,
- p_ctlbk->rc,
- p_connect->host_name,
- p_connect->WS_name);
-
- if (p_ctlbk->rc != 0) {
- dev_warn(tdev, "The communication peer of %s rejected"
- " a connection request\n",
- dev->name);
- return 1;
- }
- rc = find_link(dev,
- p_connect->host_name, p_connect->WS_name);
- if (rc != 0) {
- claw_snd_disc(dev, p_ctlbk);
- dev_warn(tdev, "The communication peer of %s"
- " rejected a connection "
- "request because of a type mismatch\n",
- dev->name);
- }
- /* should be until CONNECTION_CONFIRM */
- privptr->active_link_ID = -(p_ctlbk->linkid);
- break;
- case CONNECTION_CONFIRM:
- p_connect = (struct conncmd *)&(p_ctlbk->data);
- dev_info(tdev,
- "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
- "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
- dev->name,
- p_ctlbk->version,
- p_ctlbk->linkid,
- p_ctlbk->correlator,
- p_connect->host_name,
- p_connect->WS_name);
- if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
- privptr->active_link_ID = p_ctlbk->linkid;
- if (p_env->packing > PACKING_ASK) {
- dev_info(tdev,
- "%s: Confirmed Now packing\n", dev->name);
- p_env->packing = DO_PACKED;
- }
- p_ch = &privptr->channel[WRITE_CHANNEL];
- wake_up(&p_ch->wait);
- } else {
- dev_warn(tdev, "Activating %s failed because of"
- " an incorrect link ID=%d\n",
- dev->name, p_ctlbk->linkid);
- claw_snd_disc(dev, p_ctlbk);
- }
- break;
- case DISCONNECT:
- dev_info(tdev, "%s: Disconnect: "
- "Vers=%d,link_id=%d,Corr=%d\n",
- dev->name, p_ctlbk->version,
- p_ctlbk->linkid, p_ctlbk->correlator);
- if ((p_ctlbk->linkid == 2) &&
- (p_env->packing == PACK_SEND)) {
- privptr->active_link_ID = 1;
- p_env->packing = DO_PACKED;
- } else
- privptr->active_link_ID = 0;
- break;
- case CLAW_ERROR:
- dev_warn(tdev, "The communication peer of %s failed\n",
- dev->name);
- break;
- default:
- dev_warn(tdev, "The communication peer of %s sent"
- " an unknown command code\n",
- dev->name);
- break;
- }
-
- return 0;
-} /* end of claw_process_control */
-
-
-/*-------------------------------------------------------------------*
-* claw_send_control *
-* *
-*--------------------------------------------------------------------*/
-
-static int
-claw_send_control(struct net_device *dev, __u8 type, __u8 link,
- __u8 correlator, __u8 rc, char *local_name, char *remote_name)
-{
- struct claw_privbk *privptr;
- struct clawctl *p_ctl;
- struct sysval *p_sysval;
- struct conncmd *p_connect;
- struct sk_buff *skb;
-
- CLAW_DBF_TEXT(2, setup, "sndcntl");
- privptr = dev->ml_priv;
- p_ctl=(struct clawctl *)&privptr->ctl_bk;
-
- p_ctl->command=type;
- p_ctl->version=CLAW_VERSION_ID;
- p_ctl->linkid=link;
- p_ctl->correlator=correlator;
- p_ctl->rc=rc;
-
- p_sysval=(struct sysval *)&p_ctl->data;
- p_connect=(struct conncmd *)&p_ctl->data;
-
- switch (p_ctl->command) {
- case SYSTEM_VALIDATE_REQUEST:
- case SYSTEM_VALIDATE_RESPONSE:
- memcpy(&p_sysval->host_name, local_name, 8);
- memcpy(&p_sysval->WS_name, remote_name, 8);
- if (privptr->p_env->packing > 0) {
- p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
- p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
- } else {
- /* how big is the biggest group of packets */
- p_sysval->read_frame_size =
- privptr->p_env->read_size;
- p_sysval->write_frame_size =
- privptr->p_env->write_size;
- }
- memset(&p_sysval->reserved, 0x00, 4);
- break;
- case CONNECTION_REQUEST:
- case CONNECTION_RESPONSE:
- case CONNECTION_CONFIRM:
- case DISCONNECT:
- memcpy(&p_sysval->host_name, local_name, 8);
- memcpy(&p_sysval->WS_name, remote_name, 8);
- if (privptr->p_env->packing > 0) {
- /* How big is the biggest packet */
- p_connect->reserved1[0]=CLAW_FRAME_SIZE;
- p_connect->reserved1[1]=CLAW_FRAME_SIZE;
- } else {
- memset(&p_connect->reserved1, 0x00, 4);
- memset(&p_connect->reserved2, 0x00, 4);
- }
- break;
- default:
- break;
- }
-
- /* write Control Record to the device */
-
-
- skb = dev_alloc_skb(sizeof(struct clawctl));
- if (!skb) {
- return -ENOMEM;
- }
- memcpy(skb_put(skb, sizeof(struct clawctl)),
- p_ctl, sizeof(struct clawctl));
- if (privptr->p_env->packing >= PACK_SEND)
- claw_hw_tx(skb, dev, 1);
- else
- claw_hw_tx(skb, dev, 0);
- return 0;
-} /* end of claw_send_control */
-
-/*-------------------------------------------------------------------*
-* claw_snd_conn_req *
-* *
-*--------------------------------------------------------------------*/
-static int
-claw_snd_conn_req(struct net_device *dev, __u8 link)
-{
- int rc;
- struct claw_privbk *privptr = dev->ml_priv;
- struct clawctl *p_ctl;
-
- CLAW_DBF_TEXT(2, setup, "snd_conn");
- rc = 1;
- p_ctl=(struct clawctl *)&privptr->ctl_bk;
- p_ctl->linkid = link;
- if ( privptr->system_validate_comp==0x00 ) {
- return rc;
- }
- if (privptr->p_env->packing == PACKING_ASK )
- rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
- WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
- if (privptr->p_env->packing == PACK_SEND) {
- rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
- WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
- }
- if (privptr->p_env->packing == 0)
- rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
- HOST_APPL_NAME, privptr->p_env->api_type);
- return rc;
-
-} /* end of claw_snd_conn_req */
-
-
-/*-------------------------------------------------------------------*
-* claw_snd_disc *
-* *
-*--------------------------------------------------------------------*/
-
-static int
-claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
-{
- int rc;
- struct conncmd * p_connect;
-
- CLAW_DBF_TEXT(2, setup, "snd_dsc");
- p_connect=(struct conncmd *)&p_ctl->data;
-
- rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
- p_ctl->correlator, 0,
- p_connect->host_name, p_connect->WS_name);
- return rc;
-} /* end of claw_snd_disc */
-
-
-/*-------------------------------------------------------------------*
-* claw_snd_sys_validate_rsp *
-* *
-*--------------------------------------------------------------------*/
-
-static int
-claw_snd_sys_validate_rsp(struct net_device *dev,
- struct clawctl *p_ctl, __u32 return_code)
-{
- struct claw_env * p_env;
- struct claw_privbk *privptr;
- int rc;
-
- CLAW_DBF_TEXT(2, setup, "chkresp");
- privptr = dev->ml_priv;
- p_env=privptr->p_env;
- rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
- p_ctl->linkid,
- p_ctl->correlator,
- return_code,
- p_env->host_name,
- p_env->adapter_name );
- return rc;
-} /* end of claw_snd_sys_validate_rsp */
-
-/*-------------------------------------------------------------------*
-* claw_strt_conn_req *
-* *
-*--------------------------------------------------------------------*/
-
-static int
-claw_strt_conn_req(struct net_device *dev )
-{
- int rc;
-
- CLAW_DBF_TEXT(2, setup, "conn_req");
- rc=claw_snd_conn_req(dev, 1);
- return rc;
-} /* end of claw_strt_conn_req */
-
-
-
-/*-------------------------------------------------------------------*
- * claw_stats *
- *-------------------------------------------------------------------*/
-
-static struct
-net_device_stats *claw_stats(struct net_device *dev)
-{
- struct claw_privbk *privptr;
-
- CLAW_DBF_TEXT(4, trace, "stats");
- privptr = dev->ml_priv;
- return &privptr->stats;
-} /* end of claw_stats */
-
-
-/*-------------------------------------------------------------------*
-* unpack_read *
-* *
-*--------------------------------------------------------------------*/
-static void
-unpack_read(struct net_device *dev )
-{
- struct sk_buff *skb;
- struct claw_privbk *privptr;
- struct claw_env *p_env;
- struct ccwbk *p_this_ccw;
- struct ccwbk *p_first_ccw;
- struct ccwbk *p_last_ccw;
- struct clawph *p_packh;
- void *p_packd;
- struct clawctl *p_ctlrec=NULL;
- struct device *p_dev;
-
- __u32 len_of_data;
- __u32 pack_off;
- __u8 link_num;
- __u8 mtc_this_frm=0;
- __u32 bytes_to_mov;
- int i=0;
- int p=0;
-
- CLAW_DBF_TEXT(4, trace, "unpkread");
- p_first_ccw=NULL;
- p_last_ccw=NULL;
- p_packh=NULL;
- p_packd=NULL;
- privptr = dev->ml_priv;
-
- p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
- p_env = privptr->p_env;
- p_this_ccw=privptr->p_read_active_first;
- while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
- pack_off = 0;
- p = 0;
- p_this_ccw->header.flag=CLAW_PENDING;
- privptr->p_read_active_first=p_this_ccw->next;
- p_this_ccw->next=NULL;
- p_packh = (struct clawph *)p_this_ccw->p_buffer;
- if ((p_env->packing == PACK_SEND) &&
- (p_packh->len == 32) &&
- (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
- p_packh++; /* peek past pack header */
- p_ctlrec = (struct clawctl *)p_packh;
- p_packh--; /* un peek */
- if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
- (p_ctlrec->command == CONNECTION_CONFIRM))
- p_env->packing = DO_PACKED;
- }
- if (p_env->packing == DO_PACKED)
- link_num=p_packh->link_num;
- else
- link_num=p_this_ccw->header.opcode / 8;
- if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
- mtc_this_frm=1;
- if (p_this_ccw->header.length!=
- privptr->p_env->read_size ) {
- dev_warn(p_dev,
- "The communication peer of %s"
- " sent a faulty"
- " frame of length %02x\n",
- dev->name, p_this_ccw->header.length);
- }
- }
-
- if (privptr->mtc_skipping) {
- /*
- * We're in the mode of skipping past a
- * multi-frame message
- * that we can't process for some reason or other.
- * The first frame without the More-To-Come flag is
- * the last frame of the skipped message.
- */
- /* in case of More-To-Come not set in this frame */
- if (mtc_this_frm==0) {
- privptr->mtc_skipping=0; /* Ok, the end */
- privptr->mtc_logical_link=-1;
- }
- goto NextFrame;
- }
-
- if (link_num==0) {
- claw_process_control(dev, p_this_ccw);
- CLAW_DBF_TEXT(4, trace, "UnpkCntl");
- goto NextFrame;
- }
-unpack_next:
- if (p_env->packing == DO_PACKED) {
- if (pack_off > p_env->read_size)
- goto NextFrame;
- p_packd = p_this_ccw->p_buffer+pack_off;
- p_packh = (struct clawph *) p_packd;
- if ((p_packh->len == 0) || /* done with this frame? */
- (p_packh->flag != 0))
- goto NextFrame;
- bytes_to_mov = p_packh->len;
- pack_off += bytes_to_mov+sizeof(struct clawph);
- p++;
- } else {
- bytes_to_mov=p_this_ccw->header.length;
- }
- if (privptr->mtc_logical_link<0) {
-
- /*
- * if More-To-Come is set in this frame then we don't know
- * length of entire message, and hence have to allocate
- * large buffer */
-
- /* We are starting a new envelope */
- privptr->mtc_offset=0;
- privptr->mtc_logical_link=link_num;
- }
-
- if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
- /* error */
- privptr->stats.rx_frame_errors++;
- goto NextFrame;
- }
- if (p_env->packing == DO_PACKED) {
- memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
- p_packd+sizeof(struct clawph), bytes_to_mov);
-
- } else {
- memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
- p_this_ccw->p_buffer, bytes_to_mov);
- }
- if (mtc_this_frm==0) {
- len_of_data=privptr->mtc_offset+bytes_to_mov;
- skb=dev_alloc_skb(len_of_data);
- if (skb) {
- memcpy(skb_put(skb,len_of_data),
- privptr->p_mtc_envelope,
- len_of_data);
- skb->dev=dev;
- skb_reset_mac_header(skb);
- skb->protocol=htons(ETH_P_IP);
- skb->ip_summed=CHECKSUM_UNNECESSARY;
- privptr->stats.rx_packets++;
- privptr->stats.rx_bytes+=len_of_data;
- netif_rx(skb);
- }
- else {
- dev_info(p_dev, "Allocating a buffer for"
- " incoming data failed\n");
- privptr->stats.rx_dropped++;
- }
- privptr->mtc_offset=0;
- privptr->mtc_logical_link=-1;
- }
- else {
- privptr->mtc_offset+=bytes_to_mov;
- }
- if (p_env->packing == DO_PACKED)
- goto unpack_next;
-NextFrame:
- /*
- * Remove ThisCCWblock from active read queue, and add it
- * to queue of free blocks to be reused.
- */
- i++;
- p_this_ccw->header.length=0xffff;
- p_this_ccw->header.opcode=0xff;
- /*
- * add this one to the free queue for later reuse
- */
- if (p_first_ccw==NULL) {
- p_first_ccw = p_this_ccw;
- }
- else {
- p_last_ccw->next = p_this_ccw;
- }
- p_last_ccw = p_this_ccw;
- /*
- * chain to next block on active read queue
- */
- p_this_ccw = privptr->p_read_active_first;
- CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
- } /* end of while */
-
- /* check validity */
-
- CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
- add_claw_reads(dev, p_first_ccw, p_last_ccw);
- claw_strt_read(dev, LOCK_YES);
- return;
-} /* end of unpack_read */
-
-/*-------------------------------------------------------------------*
-* claw_strt_read *
-* *
-*--------------------------------------------------------------------*/
-static void
-claw_strt_read (struct net_device *dev, int lock )
-{
- int rc = 0;
- __u32 parm;
- unsigned long saveflags = 0;
- struct claw_privbk *privptr = dev->ml_priv;
- struct ccwbk*p_ccwbk;
- struct chbk *p_ch;
- struct clawh *p_clawh;
- p_ch = &privptr->channel[READ_CHANNEL];
-
- CLAW_DBF_TEXT(4, trace, "StRdNter");
- p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
- p_clawh->flag=CLAW_IDLE; /* 0x00 */
-
- if ((privptr->p_write_active_first!=NULL &&
- privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
- (privptr->p_read_active_first!=NULL &&
- privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
- p_clawh->flag=CLAW_BUSY; /* 0xff */
- }
- if (lock==LOCK_YES) {
- spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
- }
- if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
- CLAW_DBF_TEXT(4, trace, "HotRead");
- p_ccwbk=privptr->p_read_active_first;
- parm = (unsigned long) p_ch;
- rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
- 0xff, 0);
- if (rc != 0) {
- ccw_check_return_code(p_ch->cdev, rc);
- }
- }
- else {
- CLAW_DBF_TEXT(2, trace, "ReadAct");
- }
-
- if (lock==LOCK_YES) {
- spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
- }
- CLAW_DBF_TEXT(4, trace, "StRdExit");
- return;
-} /* end of claw_strt_read */
-
-/*-------------------------------------------------------------------*
-* claw_strt_out_IO *
-* *
-*--------------------------------------------------------------------*/
-
-static void
-claw_strt_out_IO( struct net_device *dev )
-{
- int rc = 0;
- unsigned long parm;
- struct claw_privbk *privptr;
- struct chbk *p_ch;
- struct ccwbk *p_first_ccw;
-
- if (!dev) {
- return;
- }
- privptr = (struct claw_privbk *)dev->ml_priv;
- p_ch = &privptr->channel[WRITE_CHANNEL];
-
- CLAW_DBF_TEXT(4, trace, "strt_io");
- p_first_ccw=privptr->p_write_active_first;
-
- if (p_ch->claw_state == CLAW_STOP)
- return;
- if (p_first_ccw == NULL) {
- return;
- }
- if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
- parm = (unsigned long) p_ch;
- CLAW_DBF_TEXT(2, trace, "StWrtIO");
- rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
- 0xff, 0);
- if (rc != 0) {
- ccw_check_return_code(p_ch->cdev, rc);
- }
- }
- dev->trans_start = jiffies;
- return;
-} /* end of claw_strt_out_IO */
-
-/*-------------------------------------------------------------------*
-* Free write buffers *
-* *
-*--------------------------------------------------------------------*/
-
-static void
-claw_free_wrt_buf( struct net_device *dev )
-{
-
- struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
- struct ccwbk*p_this_ccw;
- struct ccwbk*p_next_ccw;
-
- CLAW_DBF_TEXT(4, trace, "freewrtb");
- /* scan the write queue to free any completed write packets */
- p_this_ccw=privptr->p_write_active_first;
- while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
- {
- p_next_ccw = p_this_ccw->next;
- if (((p_next_ccw!=NULL) &&
- (p_next_ccw->header.flag!=CLAW_PENDING)) ||
- ((p_this_ccw == privptr->p_write_active_last) &&
- (p_this_ccw->header.flag!=CLAW_PENDING))) {
- /* The next CCW is OK or this is */
- /* the last CCW...free it @A1A */
- privptr->p_write_active_first=p_this_ccw->next;
- p_this_ccw->header.flag=CLAW_PENDING;
- p_this_ccw->next=privptr->p_write_free_chain;
- privptr->p_write_free_chain=p_this_ccw;
- ++privptr->write_free_count;
- privptr->stats.tx_bytes+= p_this_ccw->write.count;
- p_this_ccw=privptr->p_write_active_first;
- privptr->stats.tx_packets++;
- }
- else {
- break;
- }
- }
- if (privptr->write_free_count!=0) {
- claw_clearbit_busy(TB_NOBUFFER,dev);
- }
- /* whole chain removed? */
- if (privptr->p_write_active_first==NULL) {
- privptr->p_write_active_last=NULL;
- }
- CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
- return;
-}
-
-/*-------------------------------------------------------------------*
-* claw free netdevice *
-* *
-*--------------------------------------------------------------------*/
-static void
-claw_free_netdevice(struct net_device * dev, int free_dev)
-{
- struct claw_privbk *privptr;
-
- CLAW_DBF_TEXT(2, setup, "free_dev");
- if (!dev)
- return;
- CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
- privptr = dev->ml_priv;
- if (dev->flags & IFF_RUNNING)
- claw_release(dev);
- if (privptr) {
- privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
- }
- dev->ml_priv = NULL;
-#ifdef MODULE
- if (free_dev) {
- free_netdev(dev);
- }
-#endif
- CLAW_DBF_TEXT(2, setup, "free_ok");
-}
-
-/**
- * Claw init netdevice
- * Initialize everything of the net device except the name and the
- * channel structs.
- */
-static const struct net_device_ops claw_netdev_ops = {
- .ndo_open = claw_open,
- .ndo_stop = claw_release,
- .ndo_get_stats = claw_stats,
- .ndo_start_xmit = claw_tx,
- .ndo_change_mtu = claw_change_mtu,
-};
-
-static void
-claw_init_netdevice(struct net_device * dev)
-{
- CLAW_DBF_TEXT(2, setup, "init_dev");
- CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
- dev->mtu = CLAW_DEFAULT_MTU_SIZE;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->type = ARPHRD_SLIP;
- dev->tx_queue_len = 1300;
- dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->netdev_ops = &claw_netdev_ops;
- CLAW_DBF_TEXT(2, setup, "initok");
- return;
-}
-
-/**
- * Init a new channel in the privptr->channel[i].
- *
- * @param cdev The ccw_device to be added.
- *
- * @return 0 on success, !0 on error.
- */
-static int
-add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
-{
- struct chbk *p_ch;
- struct ccw_dev_id dev_id;
-
- CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
- privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
- p_ch = &privptr->channel[i];
- p_ch->cdev = cdev;
- snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
- ccw_device_get_id(cdev, &dev_id);
- p_ch->devno = dev_id.devno;
- if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
- return -ENOMEM;
- }
- return 0;
-}
-
-
-/**
- *
- * Setup an interface.
- *
- * @param cgdev Device to be setup.
- *
- * @returns 0 on success, !0 on failure.
- */
-static int
-claw_new_device(struct ccwgroup_device *cgdev)
-{
- struct claw_privbk *privptr;
- struct claw_env *p_env;
- struct net_device *dev;
- int ret;
- struct ccw_dev_id dev_id;
-
- dev_info(&cgdev->dev, "add for %s\n",
- dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
- CLAW_DBF_TEXT(2, setup, "new_dev");
- privptr = dev_get_drvdata(&cgdev->dev);
- dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
- if (!privptr)
- return -ENODEV;
- p_env = privptr->p_env;
- ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
- p_env->devno[READ_CHANNEL] = dev_id.devno;
- ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
- p_env->devno[WRITE_CHANNEL] = dev_id.devno;
- ret = add_channel(cgdev->cdev[0],0,privptr);
- if (ret == 0)
- ret = add_channel(cgdev->cdev[1],1,privptr);
- if (ret != 0) {
- dev_warn(&cgdev->dev, "Creating a CLAW group device"
- " failed with error code %d\n", ret);
- goto out;
- }
- ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
- if (ret != 0) {
- dev_warn(&cgdev->dev,
- "Setting the read subchannel online"
- " failed with error code %d\n", ret);
- goto out;
- }
- ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
- if (ret != 0) {
- dev_warn(&cgdev->dev,
- "Setting the write subchannel online "
- "failed with error code %d\n", ret);
- goto out;
- }
- dev = alloc_netdev(0, "claw%d", NET_NAME_UNKNOWN, claw_init_netdevice);
- if (!dev) {
- dev_warn(&cgdev->dev,
- "Activating the CLAW device failed\n");
- goto out;
- }
- dev->ml_priv = privptr;
- dev_set_drvdata(&cgdev->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
- /* sysfs magic */
- SET_NETDEV_DEV(dev, &cgdev->dev);
- if (register_netdev(dev) != 0) {
- claw_free_netdevice(dev, 1);
- CLAW_DBF_TEXT(2, trace, "regfail");
- goto out;
- }
- dev->flags &=~IFF_RUNNING;
- if (privptr->buffs_alloc == 0) {
- ret=init_ccw_bk(dev);
- if (ret !=0) {
- unregister_netdev(dev);
- claw_free_netdevice(dev,1);
- CLAW_DBF_TEXT(2, trace, "ccwmem");
- goto out;
- }
- }
- privptr->channel[READ_CHANNEL].ndev = dev;
- privptr->channel[WRITE_CHANNEL].ndev = dev;
- privptr->p_env->ndev = dev;
-
- dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
- "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
- dev->name, p_env->read_size,
- p_env->write_size, p_env->read_buffers,
- p_env->write_buffers, p_env->devno[READ_CHANNEL],
- p_env->devno[WRITE_CHANNEL]);
- dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
- ":%.8s api_type: %.8s\n",
- dev->name, p_env->host_name,
- p_env->adapter_name , p_env->api_type);
- return 0;
-out:
- ccw_device_set_offline(cgdev->cdev[1]);
- ccw_device_set_offline(cgdev->cdev[0]);
- return -ENODEV;
-}
-
-static void
-claw_purge_skb_queue(struct sk_buff_head *q)
-{
- struct sk_buff *skb;
-
- CLAW_DBF_TEXT(4, trace, "purgque");
- while ((skb = skb_dequeue(q))) {
- atomic_dec(&skb->users);
- dev_kfree_skb_any(skb);
- }
-}
-
-/**
- * Shutdown an interface.
- *
- * @param cgdev Device to be shut down.
- *
- * @returns 0 on success, !0 on failure.
- */
-static int
-claw_shutdown_device(struct ccwgroup_device *cgdev)
-{
- struct claw_privbk *priv;
- struct net_device *ndev;
- int ret = 0;
-
- CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
- priv = dev_get_drvdata(&cgdev->dev);
- if (!priv)
- return -ENODEV;
- ndev = priv->channel[READ_CHANNEL].ndev;
- if (ndev) {
- /* Close the device */
- dev_info(&cgdev->dev, "%s: shutting down\n",
- ndev->name);
- if (ndev->flags & IFF_RUNNING)
- ret = claw_release(ndev);
- ndev->flags &=~IFF_RUNNING;
- unregister_netdev(ndev);
- ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
- claw_free_netdevice(ndev, 1);
- priv->channel[READ_CHANNEL].ndev = NULL;
- priv->channel[WRITE_CHANNEL].ndev = NULL;
- priv->p_env->ndev = NULL;
- }
- ccw_device_set_offline(cgdev->cdev[1]);
- ccw_device_set_offline(cgdev->cdev[0]);
- return ret;
-}
-
-static void
-claw_remove_device(struct ccwgroup_device *cgdev)
-{
- struct claw_privbk *priv;
-
- CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
- priv = dev_get_drvdata(&cgdev->dev);
- dev_info(&cgdev->dev, " will be removed.\n");
- if (cgdev->state == CCWGROUP_ONLINE)
- claw_shutdown_device(cgdev);
- kfree(priv->p_mtc_envelope);
- priv->p_mtc_envelope=NULL;
- kfree(priv->p_env);
- priv->p_env=NULL;
- kfree(priv->channel[0].irb);
- priv->channel[0].irb=NULL;
- kfree(priv->channel[1].irb);
- priv->channel[1].irb=NULL;
- kfree(priv);
- dev_set_drvdata(&cgdev->dev, NULL);
- dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
- dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
- put_device(&cgdev->dev);
-
- return;
-}
-
-
-/*
- * sysfs attributes
- */
-static ssize_t
-claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct claw_privbk *priv;
- struct claw_env * p_env;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- return sprintf(buf, "%s\n",p_env->host_name);
-}
-
-static ssize_t
-claw_hname_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct claw_privbk *priv;
- struct claw_env * p_env;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- if (count > MAX_NAME_LEN+1)
- return -EINVAL;
- memset(p_env->host_name, 0x20, MAX_NAME_LEN);
- strncpy(p_env->host_name,buf, count);
- p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
- p_env->host_name[MAX_NAME_LEN] = 0x00;
- CLAW_DBF_TEXT(2, setup, "HstnSet");
- CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
-
- return count;
-}
-
-static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
-
-static ssize_t
-claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct claw_privbk *priv;
- struct claw_env * p_env;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- return sprintf(buf, "%s\n", p_env->adapter_name);
-}
-
-static ssize_t
-claw_adname_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct claw_privbk *priv;
- struct claw_env * p_env;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- if (count > MAX_NAME_LEN+1)
- return -EINVAL;
- memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
- strncpy(p_env->adapter_name,buf, count);
- p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
- p_env->adapter_name[MAX_NAME_LEN] = 0x00;
- CLAW_DBF_TEXT(2, setup, "AdnSet");
- CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
-
- return count;
-}
-
-static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
-
-static ssize_t
-claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct claw_privbk *priv;
- struct claw_env * p_env;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- return sprintf(buf, "%s\n",
- p_env->api_type);
-}
-
-static ssize_t
-claw_apname_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct claw_privbk *priv;
- struct claw_env * p_env;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- if (count > MAX_NAME_LEN+1)
- return -EINVAL;
- memset(p_env->api_type, 0x20, MAX_NAME_LEN);
- strncpy(p_env->api_type,buf, count);
- p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
- p_env->api_type[MAX_NAME_LEN] = 0x00;
- if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
- p_env->read_size=DEF_PACK_BUFSIZE;
- p_env->write_size=DEF_PACK_BUFSIZE;
- p_env->packing=PACKING_ASK;
- CLAW_DBF_TEXT(2, setup, "PACKING");
- }
- else {
- p_env->packing=0;
- p_env->read_size=CLAW_FRAME_SIZE;
- p_env->write_size=CLAW_FRAME_SIZE;
- CLAW_DBF_TEXT(2, setup, "ApiSet");
- }
- CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
- return count;
-}
-
-static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
-
-static ssize_t
-claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct claw_privbk *priv;
- struct claw_env * p_env;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- return sprintf(buf, "%d\n", p_env->write_buffers);
-}
-
-static ssize_t
-claw_wbuff_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct claw_privbk *priv;
- struct claw_env * p_env;
- int nnn,max;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- sscanf(buf, "%i", &nnn);
- if (p_env->packing) {
- max = 64;
- }
- else {
- max = 512;
- }
- if ((nnn > max ) || (nnn < 2))
- return -EINVAL;
- p_env->write_buffers = nnn;
- CLAW_DBF_TEXT(2, setup, "Wbufset");
- CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
- return count;
-}
-
-static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
-
-static ssize_t
-claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct claw_privbk *priv;
- struct claw_env * p_env;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- return sprintf(buf, "%d\n", p_env->read_buffers);
-}
-
-static ssize_t
-claw_rbuff_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct claw_privbk *priv;
- struct claw_env *p_env;
- int nnn,max;
-
- priv = dev_get_drvdata(dev);
- if (!priv)
- return -ENODEV;
- p_env = priv->p_env;
- sscanf(buf, "%i", &nnn);
- if (p_env->packing) {
- max = 64;
- }
- else {
- max = 512;
- }
- if ((nnn > max ) || (nnn < 2))
- return -EINVAL;
- p_env->read_buffers = nnn;
- CLAW_DBF_TEXT(2, setup, "Rbufset");
- CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
- return count;
-}
-static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
-
-static struct attribute *claw_attr[] = {
- &dev_attr_read_buffer.attr,
- &dev_attr_write_buffer.attr,
- &dev_attr_adapter_name.attr,
- &dev_attr_api_type.attr,
- &dev_attr_host_name.attr,
- NULL,
-};
-static struct attribute_group claw_attr_group = {
- .attrs = claw_attr,
-};
-static const struct attribute_group *claw_attr_groups[] = {
- &claw_attr_group,
- NULL,
-};
-static const struct device_type claw_devtype = {
- .name = "claw",
- .groups = claw_attr_groups,
-};
-
-/*----------------------------------------------------------------*
- * claw_probe *
- * this function is called for each CLAW device. *
- *----------------------------------------------------------------*/
-static int claw_probe(struct ccwgroup_device *cgdev)
-{
- struct claw_privbk *privptr = NULL;
-
- CLAW_DBF_TEXT(2, setup, "probe");
- if (!get_device(&cgdev->dev))
- return -ENODEV;
- privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
- dev_set_drvdata(&cgdev->dev, privptr);
- if (privptr == NULL) {
- probe_error(cgdev);
- put_device(&cgdev->dev);
- CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
- return -ENOMEM;
- }
- privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL);
- privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
- if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) {
- probe_error(cgdev);
- put_device(&cgdev->dev);
- CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
- return -ENOMEM;
- }
- memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8);
- memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8);
- memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8);
- privptr->p_env->packing = 0;
- privptr->p_env->write_buffers = 5;
- privptr->p_env->read_buffers = 5;
- privptr->p_env->read_size = CLAW_FRAME_SIZE;
- privptr->p_env->write_size = CLAW_FRAME_SIZE;
- privptr->p_env->p_priv = privptr;
- cgdev->cdev[0]->handler = claw_irq_handler;
- cgdev->cdev[1]->handler = claw_irq_handler;
- cgdev->dev.type = &claw_devtype;
- CLAW_DBF_TEXT(2, setup, "prbext 0");
-
- return 0;
-} /* end of claw_probe */
-
-/*--------------------------------------------------------------------*
-* claw_init and cleanup *
-*---------------------------------------------------------------------*/
-
-static void __exit claw_cleanup(void)
-{
- ccwgroup_driver_unregister(&claw_group_driver);
- ccw_driver_unregister(&claw_ccw_driver);
- root_device_unregister(claw_root_dev);
- claw_unregister_debug_facility();
- pr_info("Driver unloaded\n");
-}
-
-/**
- * Initialize module.
- * This is called just after the module is loaded.
- *
- * @return 0 on success, !0 on error.
- */
-static int __init claw_init(void)
-{
- int ret = 0;
-
- pr_info("Loading %s\n", version);
- ret = claw_register_debug_facility();
- if (ret) {
- pr_err("Registering with the S/390 debug feature"
- " failed with error code %d\n", ret);
- goto out_err;
- }
- CLAW_DBF_TEXT(2, setup, "init_mod");
- claw_root_dev = root_device_register("claw");
- ret = PTR_ERR_OR_ZERO(claw_root_dev);
- if (ret)
- goto register_err;
- ret = ccw_driver_register(&claw_ccw_driver);
- if (ret)
- goto ccw_err;
- claw_group_driver.driver.groups = claw_drv_attr_groups;
- ret = ccwgroup_driver_register(&claw_group_driver);
- if (ret)
- goto ccwgroup_err;
- return 0;
-
-ccwgroup_err:
- ccw_driver_unregister(&claw_ccw_driver);
-ccw_err:
- root_device_unregister(claw_root_dev);
-register_err:
- CLAW_DBF_TEXT(2, setup, "init_bad");
- claw_unregister_debug_facility();
-out_err:
- pr_err("Initializing the claw device driver failed\n");
- return ret;
-}
-
-module_init(claw_init);
-module_exit(claw_cleanup);
-
-MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
-MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
- "Copyright IBM Corp. 2000, 2008\n");
-MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
deleted file mode 100644
index 3339b9b607b3..000000000000
--- a/drivers/s390/net/claw.h
+++ /dev/null
@@ -1,348 +0,0 @@
-/*******************************************************
-* Define constants *
-* *
-********************************************************/
-
-/*-----------------------------------------------------*
-* CCW command codes for CLAW protocol *
-*------------------------------------------------------*/
-
-#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */
-#define CCW_CLAW_CMD_READ 0x02 /* read */
-#define CCW_CLAW_CMD_NOP 0x03 /* NOP */
-#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */
-#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */
-#define CCW_CLAW_CMD_TIC 0x08 /* TIC */
-#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */
-#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */
-#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */
-
-
-/*-----------------------------------------------------*
-* CLAW Unique constants *
-*------------------------------------------------------*/
-
-#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */
-#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */
-#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */
-#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */
-#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */
-
-/*-----------------------------------------------------*
-* CLAW control command code *
-*------------------------------------------------------*/
-
-#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */
-#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */
-#define CONNECTION_REQUEST 0x21 /* Connection request */
-#define CONNECTION_RESPONSE 0x22 /* Connection response */
-#define CONNECTION_CONFIRM 0x23 /* Connection confirm */
-#define DISCONNECT 0x24 /* Disconnect */
-#define CLAW_ERROR 0x41 /* CLAW error message */
-#define CLAW_VERSION_ID 2 /* CLAW version ID */
-
-/*-----------------------------------------------------*
-* CLAW adater sense bytes *
-*------------------------------------------------------*/
-
-#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */
-
-/*-----------------------------------------------------*
-* CLAW control command return codes *
-*------------------------------------------------------*/
-
-#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */
-#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */
-#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */
- /* less than Linux on zSeries*/
- /* transmit size */
-
-/*-----------------------------------------------------*
-* CLAW Constants application name *
-*------------------------------------------------------*/
-
-#define HOST_APPL_NAME "TCPIP "
-#define WS_APPL_NAME_IP_LINK "TCPIP "
-#define WS_APPL_NAME_IP_NAME "IP "
-#define WS_APPL_NAME_API_LINK "API "
-#define WS_APPL_NAME_PACKED "PACKED "
-#define WS_NAME_NOT_DEF "NOT_DEF "
-#define PACKING_ASK 1
-#define PACK_SEND 2
-#define DO_PACKED 3
-
-#define MAX_ENVELOPE_SIZE 65536
-#define CLAW_DEFAULT_MTU_SIZE 4096
-#define DEF_PACK_BUFSIZE 32768
-#define READ_CHANNEL 0
-#define WRITE_CHANNEL 1
-
-#define TB_TX 0 /* sk buffer handling in process */
-#define TB_STOP 1 /* network device stop in process */
-#define TB_RETRY 2 /* retry in process */
-#define TB_NOBUFFER 3 /* no buffer on free queue */
-#define CLAW_MAX_LINK_ID 1
-#define CLAW_MAX_DEV 256 /* max claw devices */
-#define MAX_NAME_LEN 8 /* host name, adapter name length */
-#define CLAW_FRAME_SIZE 4096
-#define CLAW_ID_SIZE 20+3
-
-/* state machine codes used in claw_irq_handler */
-
-#define CLAW_STOP 0
-#define CLAW_START_HALT_IO 1
-#define CLAW_START_SENSEID 2
-#define CLAW_START_READ 3
-#define CLAW_START_WRITE 4
-
-/*-----------------------------------------------------*
-* Lock flag *
-*------------------------------------------------------*/
-#define LOCK_YES 0
-#define LOCK_NO 1
-
-/*-----------------------------------------------------*
-* DBF Debug macros *
-*------------------------------------------------------*/
-#define CLAW_DBF_TEXT(level, name, text) \
- do { \
- debug_text_event(claw_dbf_##name, level, text); \
- } while (0)
-
-#define CLAW_DBF_HEX(level,name,addr,len) \
-do { \
- debug_event(claw_dbf_##name,level,(void*)(addr),len); \
-} while (0)
-
-#define CLAW_DBF_TEXT_(level,name,text...) \
- do { \
- if (debug_level_enabled(claw_dbf_##name, level)) { \
- sprintf(debug_buffer, text); \
- debug_text_event(claw_dbf_##name, level, \
- debug_buffer); \
- } \
- } while (0)
-
-/**
- * Enum for classifying detected devices.
- */
-enum claw_channel_types {
- /* Device is not a channel */
- claw_channel_type_none,
-
- /* Device is a CLAW channel device */
- claw_channel_type_claw
-};
-
-
-/*******************************************************
-* Define Control Blocks *
-* *
-********************************************************/
-
-/*------------------------------------------------------*/
-/* CLAW header */
-/*------------------------------------------------------*/
-
-struct clawh {
- __u16 length; /* length of data read by preceding read CCW */
- __u8 opcode; /* equivalent read CCW */
- __u8 flag; /* flag of FF to indicate read was completed */
-};
-
-/*------------------------------------------------------*/
-/* CLAW Packing header 4 bytes */
-/*------------------------------------------------------*/
-struct clawph {
- __u16 len; /* Length of Packed Data Area */
- __u8 flag; /* Reserved not used */
- __u8 link_num; /* Link ID */
-};
-
-/*------------------------------------------------------*/
-/* CLAW Ending struct ccwbk */
-/*------------------------------------------------------*/
-struct endccw {
- __u32 real; /* real address of this block */
- __u8 write1; /* write 1 is active */
- __u8 read1; /* read 1 is active */
- __u16 reserved; /* reserved for future use */
- struct ccw1 write1_nop1;
- struct ccw1 write1_nop2;
- struct ccw1 write2_nop1;
- struct ccw1 write2_nop2;
- struct ccw1 read1_nop1;
- struct ccw1 read1_nop2;
- struct ccw1 read2_nop1;
- struct ccw1 read2_nop2;
-};
-
-/*------------------------------------------------------*/
-/* CLAW struct ccwbk */
-/*------------------------------------------------------*/
-struct ccwbk {
- void *next; /* pointer to next ccw block */
- __u32 real; /* real address of this ccw */
- void *p_buffer; /* virtual address of data */
- struct clawh header; /* claw header */
- struct ccw1 write; /* write CCW */
- struct ccw1 w_read_FF; /* read FF */
- struct ccw1 w_TIC_1; /* TIC */
- struct ccw1 read; /* read CCW */
- struct ccw1 read_h; /* read header */
- struct ccw1 signal; /* signal SMOD */
- struct ccw1 r_TIC_1; /* TIC1 */
- struct ccw1 r_read_FF; /* read FF */
- struct ccw1 r_TIC_2; /* TIC2 */
-};
-
-/*------------------------------------------------------*/
-/* CLAW control block */
-/*------------------------------------------------------*/
-struct clawctl {
- __u8 command; /* control command */
- __u8 version; /* CLAW protocol version */
- __u8 linkid; /* link ID */
- __u8 correlator; /* correlator */
- __u8 rc; /* return code */
- __u8 reserved1; /* reserved */
- __u8 reserved2; /* reserved */
- __u8 reserved3; /* reserved */
- __u8 data[24]; /* command specific fields */
-};
-
-/*------------------------------------------------------*/
-/* Data for SYSTEMVALIDATE command */
-/*------------------------------------------------------*/
-struct sysval {
- char WS_name[8]; /* Workstation System name */
- char host_name[8]; /* Host system name */
- __u16 read_frame_size; /* read frame size */
- __u16 write_frame_size; /* write frame size */
- __u8 reserved[4]; /* reserved */
-};
-
-/*------------------------------------------------------*/
-/* Data for Connect command */
-/*------------------------------------------------------*/
-struct conncmd {
- char WS_name[8]; /* Workstation application name */
- char host_name[8]; /* Host application name */
- __u16 reserved1[2]; /* read frame size */
- __u8 reserved2[4]; /* reserved */
-};
-
-/*------------------------------------------------------*/
-/* Data for CLAW error */
-/*------------------------------------------------------*/
-struct clawwerror {
- char reserved1[8]; /* reserved */
- char reserved2[8]; /* reserved */
- char reserved3[8]; /* reserved */
-};
-
-/*------------------------------------------------------*/
-/* Data buffer for CLAW */
-/*------------------------------------------------------*/
-struct clawbuf {
- char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */
-};
-
-/*------------------------------------------------------*/
-/* Channel control block for read and write channel */
-/*------------------------------------------------------*/
-
-struct chbk {
- unsigned int devno;
- int irq;
- char id[CLAW_ID_SIZE];
- __u32 IO_active;
- __u8 claw_state;
- struct irb *irb;
- struct ccw_device *cdev; /* pointer to the channel device */
- struct net_device *ndev;
- wait_queue_head_t wait;
- struct tasklet_struct tasklet;
- struct timer_list timer;
- unsigned long flag_a; /* atomic flags */
-#define CLAW_BH_ACTIVE 0
- unsigned long flag_b; /* atomic flags */
-#define CLAW_WRITE_ACTIVE 0
- __u8 last_dstat;
- __u8 flag;
- struct sk_buff_head collect_queue;
- spinlock_t collect_lock;
-#define CLAW_WRITE 0x02 /* - Set if this is a write channel */
-#define CLAW_READ 0x01 /* - Set if this is a read channel */
-#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */
-};
-
-/*--------------------------------------------------------------*
-* CLAW environment block *
-*---------------------------------------------------------------*/
-
-struct claw_env {
- unsigned int devno[2]; /* device number */
- char host_name[9]; /* Host name */
- char adapter_name [9]; /* adapter name */
- char api_type[9]; /* TCPIP, API or PACKED */
- void *p_priv; /* privptr */
- __u16 read_buffers; /* read buffer number */
- __u16 write_buffers; /* write buffer number */
- __u16 read_size; /* read buffer size */
- __u16 write_size; /* write buffer size */
- __u16 dev_id; /* device ident */
- __u8 packing; /* are we packing? */
- __u8 in_use; /* device active flag */
- struct net_device *ndev; /* backward ptr to the net dev*/
-};
-
-/*--------------------------------------------------------------*
-* CLAW main control block *
-*---------------------------------------------------------------*/
-
-struct claw_privbk {
- void *p_buff_ccw;
- __u32 p_buff_ccw_num;
- void *p_buff_read;
- __u32 p_buff_read_num;
- __u32 p_buff_pages_perread;
- void *p_buff_write;
- __u32 p_buff_write_num;
- __u32 p_buff_pages_perwrite;
- long active_link_ID; /* Active logical link ID */
- struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */
- struct ccwbk *p_write_active_first; /* ptr to the first write ccw */
- struct ccwbk *p_write_active_last; /* ptr to the last write ccw */
- struct ccwbk *p_read_active_first; /* ptr to the first read ccw */
- struct ccwbk *p_read_active_last; /* ptr to the last read ccw */
- struct endccw *p_end_ccw; /*ptr to ending ccw */
- struct ccwbk *p_claw_signal_blk; /* ptr to signal block */
- __u32 write_free_count; /* number of free bufs for write */
- struct net_device_stats stats; /* device status */
- struct chbk channel[2]; /* Channel control blocks */
- __u8 mtc_skipping;
- int mtc_offset;
- int mtc_logical_link;
- void *p_mtc_envelope;
- struct sk_buff *pk_skb; /* packing buffer */
- int pk_cnt;
- struct clawctl ctl_bk;
- struct claw_env *p_env;
- __u8 system_validate_comp;
- __u8 release_pend;
- __u8 checksum_received_ip_pkts;
- __u8 buffs_alloc;
- struct endccw end_ccw;
- unsigned long tbusy;
-
-};
-
-
-/************************************************************/
-/* define global constants */
-/************************************************************/
-
-#define CCWBK_SIZE sizeof(struct ccwbk)
-
-
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 2dbc77b5137b..edf16bfba8ee 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -130,11 +130,7 @@ void ctcmpc_dumpit(char *buf, int len)
__u32 ct, sw, rm, dup;
char *ptr, *rptr;
char tbuf[82], tdup[82];
- #ifdef CONFIG_64BIT
char addr[22];
- #else
- char addr[12];
- #endif
char boff[12];
char bhex[82], duphex[82];
char basc[40];
@@ -147,11 +143,7 @@ void ctcmpc_dumpit(char *buf, int len)
for (ct = 0; ct < len; ct++, ptr++, rptr++) {
if (sw == 0) {
- #ifdef CONFIG_64BIT
sprintf(addr, "%16.16llx", (__u64)rptr);
- #else
- sprintf(addr, "%8.8X", (__u32)rptr);
- #endif
sprintf(boff, "%4.4X", (__u32)ct);
bhex[0] = '\0';
@@ -162,11 +154,7 @@ void ctcmpc_dumpit(char *buf, int len)
if (sw == 8)
strcat(bhex, " ");
- #if CONFIG_64BIT
sprintf(tbuf, "%2.2llX", (__u64)*ptr);
- #else
- sprintf(tbuf, "%2.2X", (__u32)*ptr);
- #endif
tbuf[2] = '\0';
strcat(bhex, tbuf);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 642c77c76b84..3466d3cb7647 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4218,7 +4218,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
- sizeof(struct qeth_ipacmd_setadpparms));
+ sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
if (!iob)
return;
cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
@@ -4290,7 +4290,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
QETH_CARD_TEXT(card, 4, "chgmac");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
- sizeof(struct qeth_ipacmd_setadpparms));
+ sizeof(struct qeth_ipacmd_setadpparms_hdr) +
+ sizeof(struct qeth_change_addr));
if (!iob)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 2c5d4567d1da..acde3f5d6e9e 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -738,11 +738,11 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
return ZFCP_ERP_FAILED;
if (mempool_resize(act->adapter->pool.sr_data,
- act->adapter->stat_read_buf_num, GFP_KERNEL))
+ act->adapter->stat_read_buf_num))
return ZFCP_ERP_FAILED;
if (mempool_resize(act->adapter->pool.status_read_req,
- act->adapter->stat_read_buf_num, GFP_KERNEL))
+ act->adapter->stat_read_buf_num))
return ZFCP_ERP_FAILED;
atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 0787b9756165..228c782d6433 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -160,8 +160,7 @@ static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp)
printk(KERN_CRIT "kenvctrld: Shutting down the system now.\n");
shutting_down = 1;
- if (orderly_poweroff(true) < 0)
- printk(KERN_CRIT "envctrl: shutdown execution failed\n");
+ orderly_poweroff(true);
}
#define WARN_INTERVAL (30 * HZ)
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index e244cf3d9ec8..5609b602c54d 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -970,18 +970,13 @@ static struct i2c_child_t *envctrl_get_i2c_child(unsigned char mon_type)
static void envctrl_do_shutdown(void)
{
static int inprog = 0;
- int ret;
if (inprog != 0)
return;
inprog = 1;
printk(KERN_CRIT "kenvctrld: WARNING: Shutting down the system now.\n");
- ret = orderly_poweroff(true);
- if (ret < 0) {
- printk(KERN_CRIT "kenvctrld: WARNING: system shutdown failed!\n");
- inprog = 0; /* unlikely to succeed, but we could try again */
- }
+ orderly_poweroff(true);
}
static struct task_struct *kenvctrld_task;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 7600639db4c4..add419d6ff34 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
/* Functions */
@@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
}
/* Now complete the io */
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
- twa_unmap_scsi_data(tw_dev, request_id);
}
/* Check for valid status after each drain */
@@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
}
} /* End twa_load_sgl() */
-/* This function will perform a pci-dma mapping for a scatter gather list */
-static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
-{
- int use_sg;
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
- use_sg = scsi_dma_map(cmd);
- if (!use_sg)
- return 0;
- else if (use_sg < 0) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
- return 0;
- }
-
- cmd->SCp.phase = TW_PHASE_SGLIST;
- cmd->SCp.have_data_in = use_sg;
-
- return use_sg;
-} /* End twa_map_scsi_sg_data() */
-
/* This function will poll for a response interrupt of a request */
static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
{
@@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
(tw_dev->state[i] != TW_S_INITIAL) &&
(tw_dev->state[i] != TW_S_COMPLETED)) {
if (tw_dev->srb[i]) {
- tw_dev->srb[i]->result = (DID_RESET << 16);
- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
- twa_unmap_scsi_data(tw_dev, i);
+ struct scsi_cmnd *cmd = tw_dev->srb[i];
+
+ cmd->result = (DID_RESET << 16);
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
}
}
}
@@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
/* Save the scsi command for use by the ISR */
tw_dev->srb[request_id] = SCpnt;
- /* Initialize phase to zero */
- SCpnt->SCp.phase = TW_PHASE_INITIAL;
-
retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
switch (retval) {
case SCSI_MLQUEUE_HOST_BUSY:
+ scsi_dma_unmap(SCpnt);
twa_free_request_id(tw_dev, request_id);
- twa_unmap_scsi_data(tw_dev, request_id);
break;
case 1:
- tw_dev->state[request_id] = TW_S_COMPLETED;
- twa_free_request_id(tw_dev, request_id);
- twa_unmap_scsi_data(tw_dev, request_id);
SCpnt->result = (DID_ERROR << 16);
+ scsi_dma_unmap(SCpnt);
done(SCpnt);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
retval = 0;
}
out:
@@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
} else {
- sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
- if (sg_count == 0)
+ sg_count = scsi_dma_map(srb);
+ if (sg_count < 0)
goto out;
scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
return(table[index].text);
} /* End twa_string_lookup() */
-/* This function will perform a pci-dma unmap */
-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
-{
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
- if (cmd->SCp.phase == TW_PHASE_SGLIST)
- scsi_dma_unmap(cmd);
-} /* End twa_unmap_scsi_data() */
-
/* This function gets called when a disk is coming on-line */
static int twa_slave_configure(struct scsi_device *sdev)
{
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 040f7214e5b7..0fdc83cfa0e1 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
#define TW_CURRENT_DRIVER_BUILD 0
#define TW_CURRENT_DRIVER_BRANCH 0
-/* Phase defines */
-#define TW_PHASE_INITIAL 0
-#define TW_PHASE_SINGLE 1
-#define TW_PHASE_SGLIST 2
-
/* Misc defines */
#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
#define TW_SECTOR_SIZE 512
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 2361772d5909..f8374850f714 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
return 0;
} /* End twl_post_command_packet() */
-/* This function will perform a pci-dma mapping for a scatter gather list */
-static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
-{
- int use_sg;
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
- use_sg = scsi_dma_map(cmd);
- if (!use_sg)
- return 0;
- else if (use_sg < 0) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
- return 0;
- }
-
- cmd->SCp.phase = TW_PHASE_SGLIST;
- cmd->SCp.have_data_in = use_sg;
-
- return use_sg;
-} /* End twl_map_scsi_sg_data() */
-
/* This function hands scsi cdb's to the firmware */
static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
{
@@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
if (!sglistarg) {
/* Map sglist from scsi layer to cmd packet */
if (scsi_sg_count(srb)) {
- sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
- if (sg_count == 0)
+ sg_count = scsi_dma_map(srb);
+ if (sg_count <= 0)
goto out;
scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1102,15 +1082,6 @@ out:
return retval;
} /* End twl_initialize_device_extension() */
-/* This function will perform a pci-dma unmap */
-static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
-{
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
- if (cmd->SCp.phase == TW_PHASE_SGLIST)
- scsi_dma_unmap(cmd);
-} /* End twl_unmap_scsi_data() */
-
/* This function will handle attention interrupts */
static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
{
@@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
}
/* Now complete the io */
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twl_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
- twl_unmap_scsi_data(tw_dev, request_id);
}
/* Check for another response interrupt */
@@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
if ((tw_dev->state[i] != TW_S_FINISHED) &&
(tw_dev->state[i] != TW_S_INITIAL) &&
(tw_dev->state[i] != TW_S_COMPLETED)) {
- if (tw_dev->srb[i]) {
- tw_dev->srb[i]->result = (DID_RESET << 16);
- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
- twl_unmap_scsi_data(tw_dev, i);
+ struct scsi_cmnd *cmd = tw_dev->srb[i];
+
+ if (cmd) {
+ cmd->result = (DID_RESET << 16);
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
}
}
}
@@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
/* Save the scsi command for use by the ISR */
tw_dev->srb[request_id] = SCpnt;
- /* Initialize phase to zero */
- SCpnt->SCp.phase = TW_PHASE_INITIAL;
-
retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
if (retval) {
tw_dev->state[request_id] = TW_S_COMPLETED;
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index d474892701d4..fec6449c7595 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
#define TW_CURRENT_DRIVER_BUILD 0
#define TW_CURRENT_DRIVER_BRANCH 0
-/* Phase defines */
-#define TW_PHASE_INITIAL 0
-#define TW_PHASE_SGLIST 2
-
/* Misc defines */
#define TW_SECTOR_SIZE 512
#define TW_MAX_UNITS 32
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c75f2048319f..2940bd769936 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
return 0;
} /* End tw_initialize_device_extension() */
-static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
- int use_sg;
-
- dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
-
- use_sg = scsi_dma_map(cmd);
- if (use_sg < 0) {
- printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
- return 0;
- }
-
- cmd->SCp.phase = TW_PHASE_SGLIST;
- cmd->SCp.have_data_in = use_sg;
-
- return use_sg;
-} /* End tw_map_scsi_sg_data() */
-
-static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
- dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
-
- if (cmd->SCp.phase == TW_PHASE_SGLIST)
- scsi_dma_unmap(cmd);
-} /* End tw_unmap_scsi_data() */
-
/* This function will reset a device extension */
static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
{
@@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
srb = tw_dev->srb[i];
if (srb != NULL) {
srb->result = (DID_RESET << 16);
- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
+ scsi_dma_unmap(srb);
+ srb->scsi_done(srb);
}
}
}
@@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
command_packet->byte8.io.lba = lba;
command_packet->byte6.block_count = num_sectors;
- use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
- if (!use_sg)
+ use_sg = scsi_dma_map(srb);
+ if (use_sg <= 0)
return 1;
scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
@@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
/* Save the scsi command for use by the ISR */
tw_dev->srb[request_id] = SCpnt;
- /* Initialize phase to zero */
- SCpnt->SCp.phase = TW_PHASE_INITIAL;
-
switch (*command) {
case READ_10:
case READ_6:
@@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
/* Now complete the io */
if ((error != TW_ISR_DONT_COMPLETE)) {
+ scsi_dma_unmap(tw_dev->srb[request_id]);
+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->posted_request_count--;
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
-
- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
}
}
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 29b0b84ed69e..6f65e663d393 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
#define TW_AEN_SMART_FAIL 0x000F
#define TW_AEN_SBUF_FAIL 0x0024
-/* Phase defines */
-#define TW_PHASE_INITIAL 0
-#define TW_PHASE_SINGLE 1
-#define TW_PHASE_SGLIST 2
-
/* Misc defines */
#define TW_ALIGNMENT_6000 64 /* 64 bytes */
#define TW_ALIGNMENT_7000 4 /* 4 bytes */
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 8981701802ca..a777e5c412df 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -474,11 +474,11 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
*/
#ifndef USLEEP_SLEEP
/* 20 ms (reasonable hard disk speed) */
-#define USLEEP_SLEEP (20*HZ/1000)
+#define USLEEP_SLEEP msecs_to_jiffies(20)
#endif
/* 300 RPM (floppy speed) */
#ifndef USLEEP_POLL
-#define USLEEP_POLL (200*HZ/1000)
+#define USLEEP_POLL msecs_to_jiffies(200)
#endif
#ifndef USLEEP_WAITLONG
/* RvC: (reasonable time to wait on select error) */
@@ -576,7 +576,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
trying_irqs |= mask;
- timeout = jiffies + (250 * HZ / 1000);
+ timeout = jiffies + msecs_to_jiffies(250);
probe_irq = NO_IRQ;
/*
@@ -634,7 +634,7 @@ static void prepare_info(struct Scsi_Host *instance)
"sg_tablesize %d, this_id %d, "
"flags { %s%s%s}, "
#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG)
- "USLEEP_POLL %d, USLEEP_WAITLONG %d, "
+ "USLEEP_POLL %lu, USLEEP_WAITLONG %lu, "
#endif
"options { %s} ",
instance->hostt->name, instance->io_port, instance->n_io_port,
@@ -1346,7 +1346,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
* selection.
*/
- timeout = jiffies + (250 * HZ / 1000);
+ timeout = jiffies + msecs_to_jiffies(250);
/*
* XXX very interesting - we're seeing a bounce where the BSY we
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index b32e77db0c48..9b3dd6ef6a0b 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -111,6 +111,41 @@
#define BYTE2(x) (unsigned char)((x) >> 16)
#define BYTE3(x) (unsigned char)((x) >> 24)
+/* MODE_SENSE data format */
+typedef struct {
+ struct {
+ u8 data_length;
+ u8 med_type;
+ u8 dev_par;
+ u8 bd_length;
+ } __attribute__((packed)) hd;
+ struct {
+ u8 dens_code;
+ u8 block_count[3];
+ u8 reserved;
+ u8 block_length[3];
+ } __attribute__((packed)) bd;
+ u8 mpc_buf[3];
+} __attribute__((packed)) aac_modep_data;
+
+/* MODE_SENSE_10 data format */
+typedef struct {
+ struct {
+ u8 data_length[2];
+ u8 med_type;
+ u8 dev_par;
+ u8 rsrvd[2];
+ u8 bd_length[2];
+ } __attribute__((packed)) hd;
+ struct {
+ u8 dens_code;
+ u8 block_count[3];
+ u8 reserved;
+ u8 block_length[3];
+ } __attribute__((packed)) bd;
+ u8 mpc_buf[3];
+} __attribute__((packed)) aac_modep10_data;
+
/*------------------------------------------------------------------------------
* S T R U C T S / T Y P E D E F S
*----------------------------------------------------------------------------*/
@@ -128,6 +163,48 @@ struct inquiry_data {
u8 inqd_prl[4]; /* Product Revision Level */
};
+/* Added for VPD 0x83 */
+typedef struct {
+ u8 CodeSet:4; /* VPD_CODE_SET */
+ u8 Reserved:4;
+ u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
+ u8 Reserved2:4;
+ u8 Reserved3;
+ u8 IdentifierLength;
+ u8 VendId[8];
+ u8 ProductId[16];
+ u8 SerialNumber[8]; /* SN in ASCII */
+
+} TVPD_ID_Descriptor_Type_1;
+
+typedef struct {
+ u8 CodeSet:4; /* VPD_CODE_SET */
+ u8 Reserved:4;
+ u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
+ u8 Reserved2:4;
+ u8 Reserved3;
+ u8 IdentifierLength;
+ struct TEU64Id {
+ u32 Serial;
+ /* The serial number supposed to be 40 bits,
+ * bit we only support 32, so make the last byte zero. */
+ u8 Reserved;
+ u8 VendId[3];
+ } EU64Id;
+
+} TVPD_ID_Descriptor_Type_2;
+
+typedef struct {
+ u8 DeviceType:5;
+ u8 DeviceTypeQualifier:3;
+ u8 PageCode;
+ u8 Reserved;
+ u8 PageLength;
+ TVPD_ID_Descriptor_Type_1 IdDescriptorType1;
+ TVPD_ID_Descriptor_Type_2 IdDescriptorType2;
+
+} TVPD_Page83;
+
/*
* M O D U L E G L O B A L S
*/
@@ -385,6 +462,11 @@ int aac_get_containers(struct aac_dev *dev)
if (status >= 0) {
dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
+ if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_SUPPORTED_240_VOLUMES) {
+ maximum_num_containers =
+ le32_to_cpu(dresp->MaxSimpleVolumes);
+ }
aac_fib_complete(fibptr);
}
/* FIB should be freed only after getting the response from the F/W */
@@ -438,7 +520,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
if ((le32_to_cpu(get_name_reply->status) == CT_OK)
&& (get_name_reply->data[0] != '\0')) {
char *sp = get_name_reply->data;
- sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
+ sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0';
while (*sp == ' ')
++sp;
if (*sp) {
@@ -539,6 +621,14 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
+ if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
+ dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
+ fsa_dev_ptr->block_size = 0x200;
+ } else {
+ fsa_dev_ptr->block_size =
+ le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
+ }
fsa_dev_ptr->valid = 1;
/* sense_key holds the current state of the spin-up */
if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
@@ -571,7 +661,9 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
int status;
dresp = (struct aac_mount *) fib_data(fibptr);
- dresp->mnt[0].capacityhigh = 0;
+ if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE))
+ dresp->mnt[0].capacityhigh = 0;
if ((le32_to_cpu(dresp->status) != ST_OK) ||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
_aac_probe_container2(context, fibptr);
@@ -586,7 +678,12 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
dinfo = (struct aac_query_mount *)fib_data(fibptr);
- dinfo->command = cpu_to_le32(VM_NameServe64);
+ if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE)
+ dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
+ else
+ dinfo->command = cpu_to_le32(VM_NameServe64);
+
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
@@ -621,7 +718,12 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
dinfo = (struct aac_query_mount *)fib_data(fibptr);
- dinfo->command = cpu_to_le32(VM_NameServe);
+ if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE)
+ dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
+ else
+ dinfo->command = cpu_to_le32(VM_NameServe);
+
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
scsicmd->SCp.ptr = (char *)callback;
@@ -835,14 +937,88 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
/* Failure is irrelevant, using default value instead */
if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
- char sp[13];
- /* EVPD bit set */
- sp[0] = INQD_PDT_DA;
- sp[1] = scsicmd->cmnd[2];
- sp[2] = 0;
- sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
- le32_to_cpu(get_serial_reply->uid));
- scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp));
+ /*Check to see if it's for VPD 0x83 or 0x80 */
+ if (scsicmd->cmnd[2] == 0x83) {
+ /* vpd page 0x83 - Device Identification Page */
+ int i;
+ TVPD_Page83 VPDPage83Data;
+
+ memset(((u8 *)&VPDPage83Data), 0,
+ sizeof(VPDPage83Data));
+
+ /* DIRECT_ACCESS_DEVIC */
+ VPDPage83Data.DeviceType = 0;
+ /* DEVICE_CONNECTED */
+ VPDPage83Data.DeviceTypeQualifier = 0;
+ /* VPD_DEVICE_IDENTIFIERS */
+ VPDPage83Data.PageCode = 0x83;
+ VPDPage83Data.Reserved = 0;
+ VPDPage83Data.PageLength =
+ sizeof(VPDPage83Data.IdDescriptorType1) +
+ sizeof(VPDPage83Data.IdDescriptorType2);
+
+ /* T10 Vendor Identifier Field Format */
+ /* VpdCodeSetAscii */
+ VPDPage83Data.IdDescriptorType1.CodeSet = 2;
+ /* VpdIdentifierTypeVendorId */
+ VPDPage83Data.IdDescriptorType1.IdentifierType = 1;
+ VPDPage83Data.IdDescriptorType1.IdentifierLength =
+ sizeof(VPDPage83Data.IdDescriptorType1) - 4;
+
+ /* "ADAPTEC " for adaptec */
+ memcpy(VPDPage83Data.IdDescriptorType1.VendId,
+ "ADAPTEC ",
+ sizeof(VPDPage83Data.IdDescriptorType1.VendId));
+ memcpy(VPDPage83Data.IdDescriptorType1.ProductId,
+ "ARRAY ",
+ sizeof(
+ VPDPage83Data.IdDescriptorType1.ProductId));
+
+ /* Convert to ascii based serial number.
+ * The LSB is the the end.
+ */
+ for (i = 0; i < 8; i++) {
+ u8 temp =
+ (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
+ if (temp > 0x9) {
+ VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+ 'A' + (temp - 0xA);
+ } else {
+ VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+ '0' + temp;
+ }
+ }
+
+ /* VpdCodeSetBinary */
+ VPDPage83Data.IdDescriptorType2.CodeSet = 1;
+ /* VpdIdentifierTypeEUI64 */
+ VPDPage83Data.IdDescriptorType2.IdentifierType = 2;
+ VPDPage83Data.IdDescriptorType2.IdentifierLength =
+ sizeof(VPDPage83Data.IdDescriptorType2) - 4;
+
+ VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0;
+ VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0;
+ VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0;
+
+ VPDPage83Data.IdDescriptorType2.EU64Id.Serial =
+ get_serial_reply->uid;
+ VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0;
+
+ /* Move the inquiry data to the response buffer. */
+ scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data,
+ sizeof(VPDPage83Data));
+ } else {
+ /* It must be for VPD 0x80 */
+ char sp[13];
+ /* EVPD bit set */
+ sp[0] = INQD_PDT_DA;
+ sp[1] = scsicmd->cmnd[2];
+ sp[2] = 0;
+ sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
+ le32_to_cpu(get_serial_reply->uid));
+ scsi_sg_copy_from_buffer(scsicmd, sp,
+ sizeof(sp));
+ }
}
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
@@ -982,7 +1158,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
memset(readcmd2, 0, sizeof(struct aac_raw_io2));
readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- readcmd2->byteCount = cpu_to_le32(count<<9);
+ readcmd2->byteCount = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
readcmd2->cid = cpu_to_le16(scmd_id(cmd));
readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
ret = aac_build_sgraw2(cmd, readcmd2,
@@ -997,7 +1174,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd = (struct aac_raw_io *) fib_data(fib);
readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- readcmd->count = cpu_to_le32(count<<9);
+ readcmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
readcmd->bpTotal = 0;
@@ -1062,6 +1240,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
{
u16 fibsize;
struct aac_read *readcmd;
+ struct aac_dev *dev = fib->dev;
long ret;
aac_fib_init(fib);
@@ -1069,7 +1248,8 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
readcmd->command = cpu_to_le32(VM_CtBlockRead);
readcmd->cid = cpu_to_le32(scmd_id(cmd));
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
- readcmd->count = cpu_to_le32(count * 512);
+ readcmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
ret = aac_build_sg(cmd, &readcmd->sg);
if (ret < 0)
@@ -1104,7 +1284,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
memset(writecmd2, 0, sizeof(struct aac_raw_io2));
writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- writecmd2->byteCount = cpu_to_le32(count<<9);
+ writecmd2->byteCount = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd2->cid = cpu_to_le16(scmd_id(cmd));
writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
@@ -1122,7 +1303,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
writecmd = (struct aac_raw_io *) fib_data(fib);
writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- writecmd->count = cpu_to_le32(count<<9);
+ writecmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
@@ -1190,6 +1372,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
{
u16 fibsize;
struct aac_write *writecmd;
+ struct aac_dev *dev = fib->dev;
long ret;
aac_fib_init(fib);
@@ -1197,7 +1380,8 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
writecmd->cid = cpu_to_le32(scmd_id(cmd));
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
- writecmd->count = cpu_to_le32(count * 512);
+ writecmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
@@ -2246,9 +2430,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
INQD_PDT_PROC : INQD_PDT_DA;
if (scsicmd->cmnd[2] == 0) {
/* supported vital product data pages */
- arr[3] = 2;
+ arr[3] = 3;
arr[4] = 0x0;
arr[5] = 0x80;
+ arr[6] = 0x83;
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
@@ -2264,7 +2449,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- /* SLES 10 SP1 special */
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ } else if (scsicmd->cmnd[2] == 0x83) {
+ /* vpd page 0x83 - Device Identification Page */
+ char *sno = (char *)&inq_data;
+ sno[3] = setinqserial(dev, &sno[4],
+ scmd_id(scsicmd));
+ if (aac_wwn != 2)
+ return aac_get_container_serial(
+ scsicmd);
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
} else {
@@ -2329,10 +2523,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[5] = (capacity >> 16) & 0xff;
cp[6] = (capacity >> 8) & 0xff;
cp[7] = (capacity >> 0) & 0xff;
- cp[8] = 0;
- cp[9] = 0;
- cp[10] = 2;
- cp[11] = 0;
+ cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
+ cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
cp[12] = 0;
alloc_len = ((scsicmd->cmnd[10] << 24)
@@ -2369,10 +2563,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[1] = (capacity >> 16) & 0xff;
cp[2] = (capacity >> 8) & 0xff;
cp[3] = (capacity >> 0) & 0xff;
- cp[4] = 0;
- cp[5] = 0;
- cp[6] = 2;
- cp[7] = 0;
+ cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
+ cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
@@ -2385,30 +2579,79 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case MODE_SENSE:
{
- char mode_buf[7];
int mode_buf_length = 4;
+ u32 capacity;
+ aac_modep_data mpd;
+
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+ capacity = fsa_dev_ptr[cid].size - 1;
+ else
+ capacity = (u32)-1;
dprintk((KERN_DEBUG "MODE SENSE command.\n"));
- mode_buf[0] = 3; /* Mode data length */
- mode_buf[1] = 0; /* Medium type - default */
- mode_buf[2] = 0; /* Device-specific param,
- bit 8: 0/1 = write enabled/protected
- bit 4: 0/1 = FUA enabled */
+ memset((char *)&mpd, 0, sizeof(aac_modep_data));
+
+ /* Mode data length */
+ mpd.hd.data_length = sizeof(mpd.hd) - 1;
+ /* Medium type - default */
+ mpd.hd.med_type = 0;
+ /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ mpd.hd.dev_par = 0;
+
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
- mode_buf[2] = 0x10;
- mode_buf[3] = 0; /* Block descriptor length */
+ mpd.hd.dev_par = 0x10;
+ if (scsicmd->cmnd[1] & 0x8)
+ mpd.hd.bd_length = 0; /* Block descriptor length */
+ else {
+ mpd.hd.bd_length = sizeof(mpd.bd);
+ mpd.hd.data_length += mpd.hd.bd_length;
+ mpd.bd.block_length[0] =
+ (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ mpd.bd.block_length[1] =
+ (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ mpd.bd.block_length[2] =
+ fsa_dev_ptr[cid].block_size & 0xff;
+
+ mpd.mpc_buf[0] = scsicmd->cmnd[2];
+ if (scsicmd->cmnd[2] == 0x1C) {
+ /* page length */
+ mpd.mpc_buf[1] = 0xa;
+ /* Mode data length */
+ mpd.hd.data_length = 23;
+ } else {
+ /* Mode data length */
+ mpd.hd.data_length = 15;
+ }
+
+ if (capacity > 0xffffff) {
+ mpd.bd.block_count[0] = 0xff;
+ mpd.bd.block_count[1] = 0xff;
+ mpd.bd.block_count[2] = 0xff;
+ } else {
+ mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
+ mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
+ mpd.bd.block_count[2] = capacity & 0xff;
+ }
+ }
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
- mode_buf[0] = 6;
- mode_buf[4] = 8;
- mode_buf[5] = 1;
- mode_buf[6] = ((aac_cache & 6) == 2)
+ mpd.hd.data_length += 3;
+ mpd.mpc_buf[0] = 8;
+ mpd.mpc_buf[1] = 1;
+ mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
? 0 : 0x04; /* WCE */
- mode_buf_length = 7;
- if (mode_buf_length > scsicmd->cmnd[4])
- mode_buf_length = scsicmd->cmnd[4];
+ mode_buf_length = sizeof(mpd);
}
- scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
+
+ if (mode_buf_length > scsicmd->cmnd[4])
+ mode_buf_length = scsicmd->cmnd[4];
+ else
+ mode_buf_length = sizeof(mpd);
+ scsi_sg_copy_from_buffer(scsicmd,
+ (char *)&mpd,
+ mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@@ -2416,34 +2659,77 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
case MODE_SENSE_10:
{
- char mode_buf[11];
+ u32 capacity;
int mode_buf_length = 8;
+ aac_modep10_data mpd10;
+
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+ capacity = fsa_dev_ptr[cid].size - 1;
+ else
+ capacity = (u32)-1;
dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
- mode_buf[0] = 0; /* Mode data length (MSB) */
- mode_buf[1] = 6; /* Mode data length (LSB) */
- mode_buf[2] = 0; /* Medium type - default */
- mode_buf[3] = 0; /* Device-specific param,
- bit 8: 0/1 = write enabled/protected
- bit 4: 0/1 = FUA enabled */
+ memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
+ /* Mode data length (MSB) */
+ mpd10.hd.data_length[0] = 0;
+ /* Mode data length (LSB) */
+ mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
+ /* Medium type - default */
+ mpd10.hd.med_type = 0;
+ /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ mpd10.hd.dev_par = 0;
+
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
- mode_buf[3] = 0x10;
- mode_buf[4] = 0; /* reserved */
- mode_buf[5] = 0; /* reserved */
- mode_buf[6] = 0; /* Block descriptor length (MSB) */
- mode_buf[7] = 0; /* Block descriptor length (LSB) */
+ mpd10.hd.dev_par = 0x10;
+ mpd10.hd.rsrvd[0] = 0; /* reserved */
+ mpd10.hd.rsrvd[1] = 0; /* reserved */
+ if (scsicmd->cmnd[1] & 0x8) {
+ /* Block descriptor length (MSB) */
+ mpd10.hd.bd_length[0] = 0;
+ /* Block descriptor length (LSB) */
+ mpd10.hd.bd_length[1] = 0;
+ } else {
+ mpd10.hd.bd_length[0] = 0;
+ mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
+
+ mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
+
+ mpd10.bd.block_length[0] =
+ (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ mpd10.bd.block_length[1] =
+ (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ mpd10.bd.block_length[2] =
+ fsa_dev_ptr[cid].block_size & 0xff;
+
+ if (capacity > 0xffffff) {
+ mpd10.bd.block_count[0] = 0xff;
+ mpd10.bd.block_count[1] = 0xff;
+ mpd10.bd.block_count[2] = 0xff;
+ } else {
+ mpd10.bd.block_count[0] =
+ (capacity >> 16) & 0xff;
+ mpd10.bd.block_count[1] =
+ (capacity >> 8) & 0xff;
+ mpd10.bd.block_count[2] =
+ capacity & 0xff;
+ }
+ }
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
- mode_buf[1] = 9;
- mode_buf[8] = 8;
- mode_buf[9] = 1;
- mode_buf[10] = ((aac_cache & 6) == 2)
+ mpd10.hd.data_length[1] += 3;
+ mpd10.mpc_buf[0] = 8;
+ mpd10.mpc_buf[1] = 1;
+ mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
? 0 : 0x04; /* WCE */
- mode_buf_length = 11;
+ mode_buf_length = sizeof(mpd10);
if (mode_buf_length > scsicmd->cmnd[8])
mode_buf_length = scsicmd->cmnd[8];
}
- scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
+ scsi_sg_copy_from_buffer(scsicmd,
+ (char *)&mpd10,
+ mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index eaaf8705a5f4..40fe65c91b41 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -6,13 +6,63 @@
#define nblank(x) _nblank(x)[0]
#include <linux/interrupt.h>
+#include <linux/pci.h>
/*------------------------------------------------------------------------------
* D E F I N E S
*----------------------------------------------------------------------------*/
+#define AAC_MAX_MSIX 8 /* vectors */
+#define AAC_PCI_MSI_ENABLE 0x8000
+
+enum {
+ AAC_ENABLE_INTERRUPT = 0x0,
+ AAC_DISABLE_INTERRUPT,
+ AAC_ENABLE_MSIX,
+ AAC_DISABLE_MSIX,
+ AAC_CLEAR_AIF_BIT,
+ AAC_CLEAR_SYNC_BIT,
+ AAC_ENABLE_INTX
+};
+
+#define AAC_INT_MODE_INTX (1<<0)
+#define AAC_INT_MODE_MSI (1<<1)
+#define AAC_INT_MODE_AIF (1<<2)
+#define AAC_INT_MODE_SYNC (1<<3)
+
+#define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb
+#define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa
+#define AAC_INT_DISABLE_ALL 0xffffffff
+
+/* Bit definitions in IOA->Host Interrupt Register */
+#define PMC_TRANSITION_TO_OPERATIONAL (1<<31)
+#define PMC_IOARCB_TRANSFER_FAILED (1<<28)
+#define PMC_IOA_UNIT_CHECK (1<<27)
+#define PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE (1<<26)
+#define PMC_CRITICAL_IOA_OP_IN_PROGRESS (1<<25)
+#define PMC_IOARRIN_LOST (1<<4)
+#define PMC_SYSTEM_BUS_MMIO_ERROR (1<<3)
+#define PMC_IOA_PROCESSOR_IN_ERROR_STATE (1<<2)
+#define PMC_HOST_RRQ_VALID (1<<1)
+#define PMC_OPERATIONAL_STATUS (1<<31)
+#define PMC_ALLOW_MSIX_VECTOR0 (1<<0)
+
+#define PMC_IOA_ERROR_INTERRUPTS (PMC_IOARCB_TRANSFER_FAILED | \
+ PMC_IOA_UNIT_CHECK | \
+ PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE | \
+ PMC_IOARRIN_LOST | \
+ PMC_SYSTEM_BUS_MMIO_ERROR | \
+ PMC_IOA_PROCESSOR_IN_ERROR_STATE)
+
+#define PMC_ALL_INTERRUPT_BITS (PMC_IOA_ERROR_INTERRUPTS | \
+ PMC_HOST_RRQ_VALID | \
+ PMC_TRANSITION_TO_OPERATIONAL | \
+ PMC_ALLOW_MSIX_VECTOR0)
+#define PMC_GLOBAL_INT_BIT2 0x00000004
+#define PMC_GLOBAL_INT_BIT0 0x00000001
+
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 30300
+# define AAC_DRIVER_BUILD 40709
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -36,6 +86,7 @@
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
+#define PMC_DEVICE_S6 0x28b
#define PMC_DEVICE_S7 0x28c
#define PMC_DEVICE_S8 0x28d
#define PMC_DEVICE_S9 0x28f
@@ -434,7 +485,7 @@ enum fib_xfer_state {
struct aac_init
{
__le32 InitStructRevision;
- __le32 MiniPortRevision;
+ __le32 Sa_MSIXVectors;
__le32 fsrev;
__le32 CommHeaderAddress;
__le32 FastIoCommAreaAddress;
@@ -582,7 +633,8 @@ struct aac_queue {
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
- u32 numpending; /* Number of entries on outstanding queue. */
+ /* Number of entries on outstanding queue. */
+ atomic_t numpending;
struct aac_dev * dev; /* Back pointer to adapter structure */
};
@@ -755,7 +807,8 @@ struct rkt_registers {
struct src_mu_registers {
/* PCI*| Name */
- __le32 reserved0[8]; /* 00h | Reserved */
+ __le32 reserved0[6]; /* 00h | Reserved */
+ __le32 IOAR[2]; /* 18h | IOA->host interrupt register */
__le32 IDR; /* 20h | Inbound Doorbell Register */
__le32 IISR; /* 24h | Inbound Int. Status Register */
__le32 reserved1[3]; /* 28h | Reserved */
@@ -767,17 +820,18 @@ struct src_mu_registers {
__le32 OMR; /* bch | Outbound Message Register */
__le32 IQ_L; /* c0h | Inbound Queue (Low address) */
__le32 IQ_H; /* c4h | Inbound Queue (High address) */
+ __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */
};
struct src_registers {
- struct src_mu_registers MUnit; /* 00h - c7h */
+ struct src_mu_registers MUnit; /* 00h - cbh */
union {
struct {
- __le32 reserved1[130790]; /* c8h - 7fc5fh */
+ __le32 reserved1[130789]; /* cch - 7fc5fh */
struct src_inbound IndexRegs; /* 7fc60h */
} tupelo;
struct {
- __le32 reserved1[974]; /* c8h - fffh */
+ __le32 reserved1[973]; /* cch - fffh */
struct src_inbound IndexRegs; /* 1000h */
} denali;
} u;
@@ -857,6 +911,7 @@ struct fsa_dev_info {
u8 deleted;
char devname[8];
struct sense_data sense_data;
+ u32 block_size;
};
struct fib {
@@ -960,6 +1015,10 @@ struct aac_supplement_adapter_info
#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004)
#define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000)
+/* 4KB sector size */
+#define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000)
+/* 240 simple volume support */
+#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000)
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@@ -1026,6 +1085,11 @@ struct aac_bus_info_response {
#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
+/* MSIX context */
+struct aac_msix_ctx {
+ int vector_no;
+ struct aac_dev *dev;
+};
struct aac_dev
{
@@ -1081,8 +1145,10 @@ struct aac_dev
* if AAC_COMM_MESSAGE_TYPE1 */
dma_addr_t host_rrq_pa; /* phys. address */
- u32 host_rrq_idx; /* index into rrq buffer */
-
+ /* index into rrq buffer */
+ u32 host_rrq_idx[AAC_MAX_MSIX];
+ atomic_t rrq_outstanding[AAC_MAX_MSIX];
+ u32 fibs_pushed_no;
struct pci_dev *pdev; /* Our PCI interface */
void * printfbuf; /* pointer to buffer used for printf's from the adapter */
void * comm_addr; /* Base address of Comm area */
@@ -1151,6 +1217,13 @@ struct aac_dev
int sync_mode;
struct fib *sync_fib;
struct list_head sync_fib_list;
+ u32 doorbell_mask;
+ u32 max_msix; /* max. MSI-X vectors */
+ u32 vector_cap; /* MSI-X vector capab.*/
+ int msi_enabled; /* MSI/MSI-X enabled */
+ struct msix_entry msixentry[AAC_MAX_MSIX];
+ struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
+ u8 adapter_shutdown;
};
#define aac_adapter_interrupt(dev) \
@@ -1589,6 +1662,7 @@ struct aac_srb_reply
#define VM_CtHostWrite64 20
#define VM_DrvErrTblLog 21
#define VM_NameServe64 22
+#define VM_NameServeAllBlk 30
#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
@@ -1611,8 +1685,13 @@ struct aac_fsinfo {
__le32 fsInodeDensity;
}; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+struct aac_blockdevinfo {
+ __le32 block_size;
+};
+
union aac_contentinfo {
- struct aac_fsinfo filesys; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+ struct aac_fsinfo filesys;
+ struct aac_blockdevinfo bdevinfo;
};
/*
@@ -1677,6 +1756,7 @@ struct aac_get_container_count_resp {
__le32 MaxContainers;
__le32 ContainerSwitchEntries;
__le32 MaxPartitions;
+ __le32 MaxSimpleVolumes;
};
@@ -1951,6 +2031,8 @@ extern struct aac_common aac_config;
#define AifEnEnclosureManagement 13 /* EM_DRIVE_* */
#define EM_DRIVE_INSERTION 31
#define EM_DRIVE_REMOVAL 32
+#define EM_SES_DRIVE_INSERTION 33
+#define EM_SES_DRIVE_REMOVAL 26
#define AifEnBatteryEvent 14 /* Change in Battery State */
#define AifEnAddContainer 15 /* A new array was created */
#define AifEnDeleteContainer 16 /* A container was deleted */
@@ -1983,6 +2065,9 @@ extern struct aac_common aac_config;
/* PMC NEW COMM: Request the event data */
#define AifReqEvent 200
+/* RAW device deleted */
+#define AifRawDeviceRemove 203
+
/*
* Adapter Initiated FIB command structures. Start with the adapter
* initiated FIBs that really come from the adapter, and get responded
@@ -2025,6 +2110,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
int aac_fib_complete(struct fib * context);
#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
+void aac_src_access_devreg(struct aac_dev *dev, int mode);
int aac_get_config_status(struct aac_dev *dev, int commit_flag);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index fbcd48d0bfc3..54195a117f72 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -689,7 +689,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
kfree (usg);
}
srbcmd->count = cpu_to_le32(byte_count);
- psg->count = cpu_to_le32(sg_indx+1);
+ if (user_srbcmd->sg.count)
+ psg->count = cpu_to_le32(sg_indx+1);
+ else
+ psg->count = 0;
status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
} else {
struct user_sgmap* upsg = &user_srbcmd->sg;
@@ -775,7 +778,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
}
}
srbcmd->count = cpu_to_le32(byte_count);
- psg->count = cpu_to_le32(sg_indx+1);
+ if (user_srbcmd->sg.count)
+ psg->count = cpu_to_le32(sg_indx+1);
+ else
+ psg->count = 0;
status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
}
if (status == -ERESTARTSYS) {
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 177b094c7792..45db84ad322f 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -43,6 +43,8 @@
#include "aacraid.h"
+static void aac_define_int_mode(struct aac_dev *dev);
+
struct aac_common aac_config = {
.irq_mod = 1
};
@@ -51,7 +53,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
{
unsigned char *base;
unsigned long size, align;
- const unsigned long fibsize = 4096;
+ const unsigned long fibsize = dev->max_fib_size;
const unsigned long printfbufsiz = 256;
unsigned long host_rrq_size = 0;
struct aac_init *init;
@@ -91,7 +93,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
if (dev->max_fib_size != sizeof(struct hw_fib))
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
- init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
+ init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION);
init->fsrev = cpu_to_le32(dev->fsrev);
/*
@@ -140,7 +142,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
- init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */
+ /* number of MSI-X */
+ init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
}
@@ -179,7 +182,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
{
- q->numpending = 0;
+ atomic_set(&q->numpending, 0);
q->dev = dev;
init_waitqueue_head(&q->cmdready);
INIT_LIST_HEAD(&q->cmdq);
@@ -228,6 +231,12 @@ int aac_send_shutdown(struct aac_dev * dev)
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibctx);
+ dev->adapter_shutdown = 1;
+ if ((dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9) &&
+ dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_ENABLE_INTX);
return status;
}
@@ -350,8 +359,10 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->raw_io_interface = dev->raw_io_64 = 0;
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
- 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
+ 0, 0, 0, 0, 0, 0,
+ status+0, status+1, status+2, status+3, NULL)) &&
(status[0] == 0x00000001)) {
+ dev->doorbell_mask = status[3];
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
dev->raw_io_64 = 1;
dev->sync_mode = aac_sync_mode;
@@ -388,6 +399,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
}
}
+ dev->max_msix = 0;
+ dev->msi_enabled = 0;
+ dev->adapter_shutdown = 0;
if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
0, 0, 0, 0, 0, 0,
status+0, status+1, status+2, status+3, status+4))
@@ -461,6 +475,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
if (host->can_queue > AAC_NUM_IO_FIB)
host->can_queue = AAC_NUM_IO_FIB;
+ if (dev->pdev->device == PMC_DEVICE_S6 ||
+ dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9)
+ aac_define_int_mode(dev);
/*
* Ok now init the communication subsystem
*/
@@ -489,4 +508,79 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
return dev;
}
-
+static void aac_define_int_mode(struct aac_dev *dev)
+{
+
+ int i, msi_count;
+
+ msi_count = i = 0;
+ /* max. vectors from GET_COMM_PREFERRED_SETTINGS */
+ if (dev->max_msix == 0 ||
+ dev->pdev->device == PMC_DEVICE_S6 ||
+ dev->sync_mode) {
+ dev->max_msix = 1;
+ dev->vector_cap =
+ dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB;
+ return;
+ }
+
+ msi_count = min(dev->max_msix,
+ (unsigned int)num_online_cpus());
+
+ dev->max_msix = msi_count;
+
+ if (msi_count > AAC_MAX_MSIX)
+ msi_count = AAC_MAX_MSIX;
+
+ for (i = 0; i < msi_count; i++)
+ dev->msixentry[i].entry = i;
+
+ if (msi_count > 1 &&
+ pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
+ i = pci_enable_msix(dev->pdev,
+ dev->msixentry,
+ msi_count);
+ /* Check how many MSIX vectors are allocated */
+ if (i >= 0) {
+ dev->msi_enabled = 1;
+ if (i) {
+ msi_count = i;
+ if (pci_enable_msix(dev->pdev,
+ dev->msixentry,
+ msi_count)) {
+ dev->msi_enabled = 0;
+ printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+ } else {
+ dev->msi_enabled = 0;
+ printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+
+ if (!dev->msi_enabled) {
+ msi_count = 1;
+ i = pci_enable_msi(dev->pdev);
+
+ if (!i) {
+ dev->msi_enabled = 1;
+ dev->msi = 1;
+ } else {
+ printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+
+ if (!dev->msi_enabled)
+ dev->max_msix = msi_count = 1;
+ else {
+ if (dev->max_msix > msi_count)
+ dev->max_msix = msi_count;
+ }
+ dev->vector_cap =
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) /
+ msi_count;
+}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index cab190af6345..4da574925284 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -208,14 +208,10 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
void aac_fib_free(struct fib *fibptr)
{
- unsigned long flags, flagsv;
+ unsigned long flags;
- spin_lock_irqsave(&fibptr->event_lock, flagsv);
- if (fibptr->done == 2) {
- spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
+ if (fibptr->done == 2)
return;
- }
- spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
@@ -321,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
/* Queue is full */
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
- qid, q->numpending);
+ qid, atomic_read(&q->numpending));
return 0;
} else {
*entry = q->base + *index;
@@ -414,7 +410,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
struct aac_dev * dev = fibptr->dev;
struct hw_fib * hw_fib = fibptr->hw_fib_va;
unsigned long flags = 0;
- unsigned long qflags;
unsigned long mflags = 0;
unsigned long sflags = 0;
@@ -568,9 +563,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
int blink;
if (time_is_before_eq_jiffies(timeout)) {
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
- spin_lock_irqsave(q->lock, qflags);
- q->numpending--;
- spin_unlock_irqrestore(q->lock, qflags);
+ atomic_dec(&q->numpending);
if (wait == -1) {
printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
"Usually a result of a PCI interrupt routing problem;\n"
@@ -775,7 +768,6 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
int aac_fib_complete(struct fib *fibptr)
{
- unsigned long flags;
struct hw_fib * hw_fib = fibptr->hw_fib_va;
/*
@@ -798,12 +790,6 @@ int aac_fib_complete(struct fib *fibptr)
* command is complete that we had sent to the adapter and this
* cdb could be reused.
*/
- spin_lock_irqsave(&fibptr->event_lock, flags);
- if (fibptr->done == 2) {
- spin_unlock_irqrestore(&fibptr->event_lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&fibptr->event_lock, flags);
if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
@@ -868,7 +854,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
* dispatches it to the appropriate routine for handling.
*/
-#define AIF_SNIFF_TIMEOUT (30*HZ)
+#define AIF_SNIFF_TIMEOUT (500*HZ)
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib_va;
@@ -897,6 +883,39 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
switch (le32_to_cpu(aifcmd->command)) {
case AifCmdDriverNotify:
switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
+ case AifRawDeviceRemove:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if ((container >> 28)) {
+ container = (u32)-1;
+ break;
+ }
+ channel = (container >> 24) & 0xF;
+ if (channel >= dev->maximum_num_channels) {
+ container = (u32)-1;
+ break;
+ }
+ id = container & 0xFFFF;
+ if (id >= dev->maximum_num_physicals) {
+ container = (u32)-1;
+ break;
+ }
+ lun = (container >> 16) & 0xFF;
+ container = (u32)-1;
+ channel = aac_phys_to_logical(channel);
+ device_config_needed =
+ (((__le32 *)aifcmd->data)[0] ==
+ cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD;
+
+ if (device_config_needed == ADD) {
+ device = scsi_device_lookup(
+ dev->scsi_host_ptr,
+ channel, id, lun);
+ if (device) {
+ scsi_remove_device(device);
+ scsi_device_put(device);
+ }
+ }
+ break;
/*
* Morph or Expand complete
*/
@@ -1044,6 +1063,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
case EM_DRIVE_INSERTION:
case EM_DRIVE_REMOVAL:
+ case EM_SES_DRIVE_INSERTION:
+ case EM_SES_DRIVE_REMOVAL:
container = le32_to_cpu(
((__le32 *)aifcmd->data)[2]);
if ((container >> 28)) {
@@ -1069,8 +1090,10 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
}
channel = aac_phys_to_logical(channel);
device_config_needed =
- (((__le32 *)aifcmd->data)[3]
- == cpu_to_le32(EM_DRIVE_INSERTION)) ?
+ ((((__le32 *)aifcmd->data)[3]
+ == cpu_to_le32(EM_DRIVE_INSERTION)) ||
+ (((__le32 *)aifcmd->data)[3]
+ == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
ADD : DELETE;
break;
}
@@ -1247,12 +1270,13 @@ retry_next:
static int _aac_reset_adapter(struct aac_dev *aac, int forced)
{
int index, quirks;
- int retval;
+ int retval, i;
struct Scsi_Host *host;
struct scsi_device *dev;
struct scsi_cmnd *command;
struct scsi_cmnd *command_list;
int jafo = 0;
+ int cpu;
/*
* Assumptions:
@@ -1315,7 +1339,33 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
aac->comm_phys = 0;
kfree(aac->queues);
aac->queues = NULL;
- free_irq(aac->pdev->irq, aac);
+ cpu = cpumask_first(cpu_online_mask);
+ if (aac->pdev->device == PMC_DEVICE_S6 ||
+ aac->pdev->device == PMC_DEVICE_S7 ||
+ aac->pdev->device == PMC_DEVICE_S8 ||
+ aac->pdev->device == PMC_DEVICE_S9) {
+ if (aac->max_msix > 1) {
+ for (i = 0; i < aac->max_msix; i++) {
+ if (irq_set_affinity_hint(
+ aac->msixentry[i].vector,
+ NULL)) {
+ printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
+ aac->name,
+ aac->id,
+ cpu);
+ }
+ cpu = cpumask_next(cpu,
+ cpu_online_mask);
+ free_irq(aac->msixentry[i].vector,
+ &(aac->aac_msix[i]));
+ }
+ pci_disable_msix(aac->pdev);
+ } else {
+ free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
+ }
+ } else {
+ free_irq(aac->pdev->irq, aac);
+ }
if (aac->msi)
pci_disable_msi(aac->pdev);
kfree(aac->fsa_dev);
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index d81b2810f0f7..da9d9936e995 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -84,7 +84,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
* continue. The caller has already been notified that
* the fib timed out.
*/
- dev->queues->queue[AdapNormCmdQueue].numpending--;
+ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
spin_unlock_irqrestore(q->lock, flags);
@@ -354,7 +354,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
* continue. The caller has already been notified that
* the fib timed out.
*/
- dev->queues->queue[AdapNormCmdQueue].numpending--;
+ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
aac_fib_complete(fib);
@@ -389,8 +389,13 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
- fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
- fib->callback(fib->callback_data, fib);
+ if (likely(fib->callback && fib->callback_data)) {
+ fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
+ fib->callback(fib->callback_data, fib);
+ } else {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
} else {
unsigned long flagv;
dprintk((KERN_INFO "event_wait up\n"));
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index fdcdf9f781bc..9eec02733c86 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -56,7 +56,7 @@
#include "aacraid.h"
-#define AAC_DRIVER_VERSION "1.2-0"
+#define AAC_DRIVER_VERSION "1.2-1"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
@@ -251,27 +251,15 @@ static struct aac_driver_ident aac_drivers[] = {
* TODO: unify with aac_scsi_cmd().
*/
-static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int aac_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *cmd)
{
- struct Scsi_Host *host = cmd->device->host;
- struct aac_dev *dev = (struct aac_dev *)host->hostdata;
- u32 count = 0;
- cmd->scsi_done = done;
- for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
- struct fib * fib = &dev->fibs[count];
- struct scsi_cmnd * command;
- if (fib->hw_fib_va->header.XferState &&
- ((command = fib->callback_data)) &&
- (command == cmd) &&
- (cmd->SCp.phase == AAC_OWNER_FIRMWARE))
- return 0; /* Already owned by Adapter */
- }
+ int r = 0;
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
- return (aac_scsi_cmd(cmd) ? FAILED : 0);
+ r = (aac_scsi_cmd(cmd) ? FAILED : 0);
+ return r;
}
-static DEF_SCSI_QCMD(aac_queuecommand)
-
/**
* aac_info - Returns the host adapter name
* @shost: Scsi host to report on
@@ -713,7 +701,9 @@ static long aac_cfg_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int ret;
- if (!capable(CAP_SYS_RAWIO))
+ struct aac_dev *aac;
+ aac = (struct aac_dev *)file->private_data;
+ if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown)
return -EPERM;
mutex_lock(&aac_mutex);
ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
@@ -1082,6 +1072,9 @@ static struct scsi_host_template aac_driver_template = {
static void __aac_shutdown(struct aac_dev * aac)
{
+ int i;
+ int cpu;
+
if (aac->aif_thread) {
int i;
/* Clear out events first */
@@ -1095,9 +1088,37 @@ static void __aac_shutdown(struct aac_dev * aac)
}
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
- free_irq(aac->pdev->irq, aac);
+ cpu = cpumask_first(cpu_online_mask);
+ if (aac->pdev->device == PMC_DEVICE_S6 ||
+ aac->pdev->device == PMC_DEVICE_S7 ||
+ aac->pdev->device == PMC_DEVICE_S8 ||
+ aac->pdev->device == PMC_DEVICE_S9) {
+ if (aac->max_msix > 1) {
+ for (i = 0; i < aac->max_msix; i++) {
+ if (irq_set_affinity_hint(
+ aac->msixentry[i].vector,
+ NULL)) {
+ printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
+ aac->name,
+ aac->id,
+ cpu);
+ }
+ cpu = cpumask_next(cpu,
+ cpu_online_mask);
+ free_irq(aac->msixentry[i].vector,
+ &(aac->aac_msix[i]));
+ }
+ } else {
+ free_irq(aac->pdev->irq,
+ &(aac->aac_msix[0]));
+ }
+ } else {
+ free_irq(aac->pdev->irq, aac);
+ }
if (aac->msi)
pci_disable_msi(aac->pdev);
+ else if (aac->max_msix > 1)
+ pci_disable_msix(aac->pdev);
}
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 5c6a8703f535..9570612b80ce 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -400,16 +400,13 @@ int aac_rx_deliver_producer(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
- unsigned long qflags;
u32 Index;
unsigned long nointr = 0;
- spin_lock_irqsave(q->lock, qflags);
aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
- q->numpending++;
+ atomic_inc(&q->numpending);
*(q->headers.producer) = cpu_to_le32(Index + 1);
- spin_unlock_irqrestore(q->lock, qflags);
if (!(nointr & aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormCmdQueue);
@@ -426,15 +423,12 @@ static int aac_rx_deliver_message(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
- unsigned long qflags;
u32 Index;
u64 addr;
volatile void __iomem *device;
unsigned long count = 10000000L; /* 50 seconds */
- spin_lock_irqsave(q->lock, qflags);
- q->numpending++;
- spin_unlock_irqrestore(q->lock, qflags);
+ atomic_inc(&q->numpending);
for(;;) {
Index = rx_readl(dev, MUnit.InboundQueue);
if (unlikely(Index == 0xFFFFFFFFL))
@@ -442,9 +436,7 @@ static int aac_rx_deliver_message(struct fib * fib)
if (likely(Index != 0xFFFFFFFFL))
break;
if (--count == 0) {
- spin_lock_irqsave(q->lock, qflags);
- q->numpending--;
- spin_unlock_irqrestore(q->lock, qflags);
+ atomic_dec(&q->numpending);
return -ETIMEDOUT;
}
udelay(5);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 9c65aed26212..4596e9dd757c 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -44,98 +44,128 @@
#include "aacraid.h"
-static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+static int aac_src_get_sync_status(struct aac_dev *dev);
+
+irqreturn_t aac_src_intr_message(int irq, void *dev_id)
{
- struct aac_dev *dev = dev_id;
+ struct aac_msix_ctx *ctx;
+ struct aac_dev *dev;
unsigned long bellbits, bellbits_shifted;
- int our_interrupt = 0;
- int isFastResponse;
+ int vector_no;
+ int isFastResponse, mode;
u32 index, handle;
- bellbits = src_readl(dev, MUnit.ODR_R);
- if (bellbits & PmDoorBellResponseSent) {
- bellbits = PmDoorBellResponseSent;
- /* handle async. status */
- src_writel(dev, MUnit.ODR_C, bellbits);
- src_readl(dev, MUnit.ODR_C);
- our_interrupt = 1;
- index = dev->host_rrq_idx;
- for (;;) {
- isFastResponse = 0;
- /* remove toggle bit (31) */
- handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff;
- /* check fast response bit (30) */
- if (handle & 0x40000000)
- isFastResponse = 1;
- handle &= 0x0000ffff;
- if (handle == 0)
- break;
-
- aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
-
- dev->host_rrq[index++] = 0;
- if (index == dev->scsi_host_ptr->can_queue +
- AAC_NUM_MGT_FIB)
- index = 0;
- dev->host_rrq_idx = index;
+ ctx = (struct aac_msix_ctx *)dev_id;
+ dev = ctx->dev;
+ vector_no = ctx->vector_no;
+
+ if (dev->msi_enabled) {
+ mode = AAC_INT_MODE_MSI;
+ if (vector_no == 0) {
+ bellbits = src_readl(dev, MUnit.ODR_MSI);
+ if (bellbits & 0x40000)
+ mode |= AAC_INT_MODE_AIF;
+ if (bellbits & 0x1000)
+ mode |= AAC_INT_MODE_SYNC;
}
} else {
- bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
- if (bellbits_shifted & DoorBellAifPending) {
+ mode = AAC_INT_MODE_INTX;
+ bellbits = src_readl(dev, MUnit.ODR_R);
+ if (bellbits & PmDoorBellResponseSent) {
+ bellbits = PmDoorBellResponseSent;
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+ } else {
+ bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
- our_interrupt = 1;
- /* handle AIF */
- aac_intr_normal(dev, 0, 2, 0, NULL);
- } else if (bellbits_shifted & OUTBOUNDDOORBELL_0) {
- unsigned long sflags;
- struct list_head *entry;
- int send_it = 0;
- extern int aac_sync_mode;
+ if (bellbits_shifted & DoorBellAifPending)
+ mode |= AAC_INT_MODE_AIF;
+ else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
+ mode |= AAC_INT_MODE_SYNC;
+ }
+ }
+
+ if (mode & AAC_INT_MODE_SYNC) {
+ unsigned long sflags;
+ struct list_head *entry;
+ int send_it = 0;
+ extern int aac_sync_mode;
+
+ if (!aac_sync_mode && !dev->msi_enabled) {
src_writel(dev, MUnit.ODR_C, bellbits);
src_readl(dev, MUnit.ODR_C);
+ }
- if (!aac_sync_mode) {
- src_writel(dev, MUnit.ODR_C, bellbits);
- src_readl(dev, MUnit.ODR_C);
- our_interrupt = 1;
+ if (dev->sync_fib) {
+ if (dev->sync_fib->callback)
+ dev->sync_fib->callback(dev->sync_fib->callback_data,
+ dev->sync_fib);
+ spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
+ if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
+ dev->management_fib_count--;
+ up(&dev->sync_fib->event_wait);
}
-
- if (dev->sync_fib) {
- our_interrupt = 1;
- if (dev->sync_fib->callback)
- dev->sync_fib->callback(dev->sync_fib->callback_data,
- dev->sync_fib);
- spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
- if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
- dev->management_fib_count--;
- up(&dev->sync_fib->event_wait);
- }
- spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags);
- spin_lock_irqsave(&dev->sync_lock, sflags);
- if (!list_empty(&dev->sync_fib_list)) {
- entry = dev->sync_fib_list.next;
- dev->sync_fib = list_entry(entry, struct fib, fiblink);
- list_del(entry);
- send_it = 1;
- } else {
- dev->sync_fib = NULL;
- }
- spin_unlock_irqrestore(&dev->sync_lock, sflags);
- if (send_it) {
- aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
- (u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0,
- NULL, NULL, NULL, NULL, NULL);
- }
+ spin_unlock_irqrestore(&dev->sync_fib->event_lock,
+ sflags);
+ spin_lock_irqsave(&dev->sync_lock, sflags);
+ if (!list_empty(&dev->sync_fib_list)) {
+ entry = dev->sync_fib_list.next;
+ dev->sync_fib = list_entry(entry,
+ struct fib,
+ fiblink);
+ list_del(entry);
+ send_it = 1;
+ } else {
+ dev->sync_fib = NULL;
+ }
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ if (send_it) {
+ aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+ (u32)dev->sync_fib->hw_fib_pa,
+ 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
}
}
+ if (!dev->msi_enabled)
+ mode = 0;
+
+ }
+
+ if (mode & AAC_INT_MODE_AIF) {
+ /* handle AIF */
+ aac_intr_normal(dev, 0, 2, 0, NULL);
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
+ mode = 0;
}
- if (our_interrupt) {
- return IRQ_HANDLED;
+ if (mode) {
+ index = dev->host_rrq_idx[vector_no];
+
+ for (;;) {
+ isFastResponse = 0;
+ /* remove toggle bit (31) */
+ handle = (dev->host_rrq[index] & 0x7fffffff);
+ /* check fast response bit (30) */
+ if (handle & 0x40000000)
+ isFastResponse = 1;
+ handle &= 0x0000ffff;
+ if (handle == 0)
+ break;
+ if (dev->msi_enabled && dev->max_msix > 1)
+ atomic_dec(&dev->rrq_outstanding[vector_no]);
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+ dev->host_rrq[index++] = 0;
+ if (index == (vector_no + 1) * dev->vector_cap)
+ index = vector_no * dev->vector_cap;
+ dev->host_rrq_idx[vector_no] = index;
+ }
+ mode = 0;
}
- return IRQ_NONE;
+
+ return IRQ_HANDLED;
}
/**
@@ -155,7 +185,7 @@ static void aac_src_disable_interrupt(struct aac_dev *dev)
static void aac_src_enable_interrupt_message(struct aac_dev *dev)
{
- src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8);
+ aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
}
/**
@@ -174,6 +204,7 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
{
unsigned long start;
+ unsigned long delay;
int ok;
/*
@@ -191,7 +222,10 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Clear the synch command doorbell to start on a clean slate.
*/
- src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ if (!dev->msi_enabled)
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
/*
* Disable doorbell interrupts
@@ -213,19 +247,29 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
ok = 0;
start = jiffies;
- /*
- * Wait up to 5 minutes
- */
- while (time_before(jiffies, start+300*HZ)) {
+ if (command == IOP_RESET_ALWAYS) {
+ /* Wait up to 10 sec */
+ delay = 10*HZ;
+ } else {
+ /* Wait up to 5 minutes */
+ delay = 300*HZ;
+ }
+ while (time_before(jiffies, start+delay)) {
udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
/*
* Mon960 will set doorbell0 bit when it has completed the command.
*/
- if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
+ if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
/*
* Clear the doorbell.
*/
- src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev,
+ AAC_CLEAR_SYNC_BIT);
+ else
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
ok = 1;
break;
}
@@ -254,11 +298,16 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
*r3 = readl(&dev->IndexRegs->Mailbox[3]);
if (r4)
*r4 = readl(&dev->IndexRegs->Mailbox[4]);
-
+ if (command == GET_COMM_PREFERRED_SETTINGS)
+ dev->max_msix =
+ readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
/*
* Clear the synch command doorbell.
*/
- src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ if (!dev->msi_enabled)
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
}
/*
@@ -335,9 +384,14 @@ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
static void aac_src_start_adapter(struct aac_dev *dev)
{
struct aac_init *init;
+ int i;
/* reset host_rrq_idx first */
- dev->host_rrq_idx = 0;
+ for (i = 0; i < dev->max_msix; i++) {
+ dev->host_rrq_idx[i] = i * dev->vector_cap;
+ atomic_set(&dev->rrq_outstanding[i], 0);
+ }
+ dev->fibs_pushed_no = 0;
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
@@ -390,15 +444,39 @@ static int aac_src_deliver_message(struct fib *fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
- unsigned long qflags;
u32 fibsize;
dma_addr_t address;
struct aac_fib_xporthdr *pFibX;
u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
- spin_lock_irqsave(q->lock, qflags);
- q->numpending++;
- spin_unlock_irqrestore(q->lock, qflags);
+ atomic_inc(&q->numpending);
+
+ if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
+ dev->max_msix > 1) {
+ u_int16_t vector_no, first_choice = 0xffff;
+
+ vector_no = dev->fibs_pushed_no % dev->max_msix;
+ do {
+ vector_no += 1;
+ if (vector_no == dev->max_msix)
+ vector_no = 1;
+ if (atomic_read(&dev->rrq_outstanding[vector_no]) <
+ dev->vector_cap)
+ break;
+ if (0xffff == first_choice)
+ first_choice = vector_no;
+ else if (vector_no == first_choice)
+ break;
+ } while (1);
+ if (vector_no == first_choice)
+ vector_no = 0;
+ atomic_inc(&dev->rrq_outstanding[vector_no]);
+ if (dev->fibs_pushed_no == 0xffffffff)
+ dev->fibs_pushed_no = 0;
+ else
+ dev->fibs_pushed_no++;
+ fib->hw_fib_va->header.Handle += (vector_no << 16);
+ }
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
/* Calculate the amount to the fibsize bits */
@@ -498,15 +576,34 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
if (bled)
printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
dev->name, dev->id, bled);
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
- if (bled || (var != 0x00000001))
- return -EINVAL;
- if (dev->supplement_adapter_info.SupportedOptions2 &
- AAC_OPTION_DOORBELL_RESET) {
- src_writel(dev, MUnit.IDR, reset_mask);
+ if ((bled || (var != 0x00000001)) &&
+ !dev->doorbell_mask)
+ return -EINVAL;
+ else if (dev->doorbell_mask) {
+ reset_mask = dev->doorbell_mask;
+ bled = 0;
+ var = 0x00000001;
+ }
+
+ if ((dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) {
+ aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+ dev->msi_enabled = 0;
msleep(5000); /* Delay 5 seconds */
}
+
+ if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET)) {
+ src_writel(dev, MUnit.IDR, reset_mask);
+ ssleep(45);
+ } else {
+ src_writel(dev, MUnit.IDR, 0x100);
+ ssleep(45);
+ }
}
if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
@@ -527,7 +624,6 @@ int aac_src_select_comm(struct aac_dev *dev, int comm)
{
switch (comm) {
case AAC_COMM_MESSAGE:
- dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
dev->a_ops.adapter_intr = aac_src_intr_message;
dev->a_ops.adapter_deliver = aac_src_deliver_message;
break;
@@ -625,6 +721,7 @@ int aac_src_init(struct aac_dev *dev)
*/
dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
dev->a_ops.adapter_notify = aac_src_notify_adapter;
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_check_health = aac_src_check_health;
@@ -646,8 +743,11 @@ int aac_src_init(struct aac_dev *dev)
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+ dev->aac_msix[0].vector_no = 0;
+ dev->aac_msix[0].dev = dev;
+
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED, "aacraid", dev) < 0) {
+ IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
@@ -659,6 +759,7 @@ int aac_src_init(struct aac_dev *dev)
dev->dbg_base = pci_resource_start(dev->pdev, 2);
dev->dbg_base_mapped = dev->regs.src.bar1;
dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
+ dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
aac_adapter_enable_int(dev);
@@ -688,7 +789,9 @@ int aac_srcv_init(struct aac_dev *dev)
unsigned long status;
int restart = 0;
int instance = dev->id;
+ int i, j;
const char *name = dev->name;
+ int cpu;
dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
dev->a_ops.adapter_comm = aac_src_select_comm;
@@ -784,6 +887,7 @@ int aac_srcv_init(struct aac_dev *dev)
*/
dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
dev->a_ops.adapter_notify = aac_src_notify_adapter;
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_check_health = aac_src_check_health;
@@ -802,18 +906,54 @@ int aac_srcv_init(struct aac_dev *dev)
goto error_iounmap;
if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
goto error_iounmap;
- dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
- if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED, "aacraid", dev) < 0) {
- if (dev->msi)
- pci_disable_msi(dev->pdev);
- printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
- name, instance);
- goto error_iounmap;
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
+ if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < dev->max_msix; i++) {
+ dev->aac_msix[i].vector_no = i;
+ dev->aac_msix[i].dev = dev;
+
+ if (request_irq(dev->msixentry[i].vector,
+ dev->a_ops.adapter_intr,
+ 0,
+ "aacraid",
+ &(dev->aac_msix[i]))) {
+ printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
+ name, instance, i);
+ for (j = 0 ; j < i ; j++)
+ free_irq(dev->msixentry[j].vector,
+ &(dev->aac_msix[j]));
+ pci_disable_msix(dev->pdev);
+ goto error_iounmap;
+ }
+ if (irq_set_affinity_hint(
+ dev->msixentry[i].vector,
+ get_cpu_mask(cpu))) {
+ printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
+ name, instance, cpu);
+ }
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ } else {
+ dev->aac_msix[0].vector_no = 0;
+ dev->aac_msix[0].dev = dev;
+
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED,
+ "aacraid",
+ &(dev->aac_msix[0])) < 0) {
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
}
dev->dbg_base = dev->base_start;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
+ dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
aac_adapter_enable_int(dev);
@@ -831,3 +971,93 @@ error_iounmap:
return -1;
}
+void aac_src_access_devreg(struct aac_dev *dev, int mode)
+{
+ u_int32_t val;
+
+ switch (mode) {
+ case AAC_ENABLE_INTERRUPT:
+ src_writel(dev,
+ MUnit.OIMR,
+ dev->OIMR = (dev->msi_enabled ?
+ AAC_INT_ENABLE_TYPE1_MSIX :
+ AAC_INT_ENABLE_TYPE1_INTX));
+ break;
+
+ case AAC_DISABLE_INTERRUPT:
+ src_writel(dev,
+ MUnit.OIMR,
+ dev->OIMR = AAC_INT_DISABLE_ALL);
+ break;
+
+ case AAC_ENABLE_MSIX:
+ /* set bit 6 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x40;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ /* unmask int. */
+ val = PMC_ALL_INTERRUPT_BITS;
+ src_writel(dev, MUnit.IOAR, val);
+ val = src_readl(dev, MUnit.OIMR);
+ src_writel(dev,
+ MUnit.OIMR,
+ val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
+ break;
+
+ case AAC_DISABLE_MSIX:
+ /* reset bit 6 */
+ val = src_readl(dev, MUnit.IDR);
+ val &= ~0x40;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ break;
+
+ case AAC_CLEAR_AIF_BIT:
+ /* set bit 5 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x20;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ break;
+
+ case AAC_CLEAR_SYNC_BIT:
+ /* set bit 4 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x10;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ break;
+
+ case AAC_ENABLE_INTX:
+ /* set bit 7 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x80;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ /* unmask int. */
+ val = PMC_ALL_INTERRUPT_BITS;
+ src_writel(dev, MUnit.IOAR, val);
+ src_readl(dev, MUnit.IOAR);
+ val = src_readl(dev, MUnit.OIMR);
+ src_writel(dev, MUnit.OIMR,
+ val & (~(PMC_GLOBAL_INT_BIT2)));
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int aac_src_get_sync_status(struct aac_dev *dev)
+{
+
+ int val;
+
+ if (dev->msi_enabled)
+ val = src_readl(dev, MUnit.ODR_MSI) & 0x1000 ? 1 : 0;
+ else
+ val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
+
+ return val;
+}
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 770c48ddbe5e..b95d2779f467 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -1,28 +1,9 @@
-/* $Id: aha1542.c,v 1.1 1992/07/24 06:27:38 root Exp root $
- * linux/kernel/aha1542.c
+/*
+ * Driver for Adaptec AHA-1542 SCSI host adapters
*
* Copyright (C) 1992 Tommy Thorn
* Copyright (C) 1993, 1994, 1995 Eric Youngdale
- *
- * Modified by Eric Youngdale
- * Use request_irq and request_dma to help prevent unexpected conflicts
- * Set up on-board DMA controller, such that we do not have to
- * have the bios enabled to use the aha1542.
- * Modified by David Gentzel
- * Don't call request_dma if dma mask is 0 (for BusLogic BT-445S VL-Bus
- * controller).
- * Modified by Matti Aarnio
- * Accept parameters from LILO cmd-line. -- 1-Oct-94
- * Modified by Mike McLagan <mike.mclagan@linux.org>
- * Recognise extended mode on AHA1542CP, different bit than 1542CF
- * 1-Jan-97
- * Modified by Bjorn L. Thordarson and Einar Thor Einarsson
- * Recognize that DMA0 is valid DMA channel -- 13-Jul-98
- * Modified by Chris Faulhaber <jedgar@fxp.org>
- * Added module command-line options
- * 19-Jul-99
- * Modified by Adam Fritzler
- * Added proper detection of the AHA-1640 (MCA, now deleted)
+ * Copyright (C) 2015 Ondrej Zary
*/
#include <linux/module.h>
@@ -30,96 +11,44 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/isapnp.h>
-#include <linux/blkdev.h>
+#include <linux/isa.h>
+#include <linux/pnp.h>
#include <linux/slab.h>
-
+#include <linux/io.h>
#include <asm/dma.h>
-#include <asm/io.h>
-
-#include "scsi.h"
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include "aha1542.h"
-#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
-#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
-
-#include <linux/stat.h>
-
-#ifdef DEBUG
-#define DEB(x) x
-#else
-#define DEB(x)
-#endif
-
-/*
- static const char RCSid[] = "$Header: /usr/src/linux/kernel/blk_drv/scsi/RCS/aha1542.c,v 1.1 1992/07/24 06:27:38 root Exp root $";
- */
-
-/* The adaptec can be configured for quite a number of addresses, but
- I generally do not want the card poking around at random. We allow
- two addresses - this allows people to use the Adaptec with a Midi
- card, which also used 0x330 -- can be overridden with LILO! */
+#define MAXBOARDS 4
-#define MAXBOARDS 4 /* Increase this and the sizes of the
- arrays below, if you need more.. */
-
-/* Boards 3,4 slots are reserved for ISAPnP scans */
-
-static unsigned int bases[MAXBOARDS] __initdata = {0x330, 0x334, 0, 0};
-
-/* set by aha1542_setup according to the command line; they also may
- be marked __initdata, but require zero initializers then */
-
-static int setup_called[MAXBOARDS];
-static int setup_buson[MAXBOARDS];
-static int setup_busoff[MAXBOARDS];
-static int setup_dmaspeed[MAXBOARDS] __initdata = { -1, -1, -1, -1 };
-
-/*
- * LILO/Module params: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]
- *
- * Where: <PORTBASE> is any of the valid AHA addresses:
- * 0x130, 0x134, 0x230, 0x234, 0x330, 0x334
- * <BUSON> is the time (in microsecs) that AHA spends on the AT-bus
- * when transferring data. 1542A power-on default is 11us,
- * valid values are in range: 2..15 (decimal)
- * <BUSOFF> is the time that AHA spends OFF THE BUS after while
- * it is transferring data (not to monopolize the bus).
- * Power-on default is 4us, valid range: 1..64 microseconds.
- * <DMASPEED> Default is jumper selected (1542A: on the J1),
- * but experimenter can alter it with this.
- * Valid values: 5, 6, 7, 8, 10 (MB/s)
- * Factory default is 5 MB/s.
- */
-
-#if defined(MODULE)
-static bool isapnp = 0;
-static int aha1542[] = {0x330, 11, 4, -1};
-module_param_array(aha1542, int, NULL, 0);
+static bool isapnp = 1;
module_param(isapnp, bool, 0);
+MODULE_PARM_DESC(isapnp, "enable PnP support (default=1)");
-static struct isapnp_device_id id_table[] __initdata = {
- {
- ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1542),
- 0
- },
- {0}
-};
+static int io[MAXBOARDS] = { 0x330, 0x334, 0, 0 };
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "base IO address of controller (0x130,0x134,0x230,0x234,0x330,0x334, default=0x330,0x334)");
-MODULE_DEVICE_TABLE(isapnp, id_table);
+/* time AHA spends on the AT-bus during data transfer */
+static int bus_on[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 11us */
+module_param_array(bus_on, int, NULL, 0);
+MODULE_PARM_DESC(bus_on, "bus on time [us] (2-15, default=-1 [HW default: 11])");
-#else
-static int isapnp = 1;
-#endif
+/* time AHA spends off the bus (not to monopolize it) during data transfer */
+static int bus_off[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 4us */
+module_param_array(bus_off, int, NULL, 0);
+MODULE_PARM_DESC(bus_off, "bus off time [us] (1-64, default=-1 [HW default: 4])");
+
+/* default is jumper selected (J1 on 1542A), factory default = 5 MB/s */
+static int dma_speed[MAXBOARDS] = { -1, -1, -1, -1 };
+module_param_array(dma_speed, int, NULL, 0);
+MODULE_PARM_DESC(dma_speed, "DMA speed [MB/s] (5,6,7,8,10, default=-1 [by jumper])");
-#define BIOS_TRANSLATION_1632 0 /* Used by some old 1542A boards */
#define BIOS_TRANSLATION_6432 1 /* Default case these days */
#define BIOS_TRANSLATION_25563 2 /* Big disk case */
@@ -128,134 +57,71 @@ struct aha1542_hostdata {
int bios_translation; /* Mapping bios uses - for compatibility */
int aha1542_last_mbi_used;
int aha1542_last_mbo_used;
- Scsi_Cmnd *SCint[AHA1542_MAILBOXES];
+ struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES];
struct mailbox mb[2 * AHA1542_MAILBOXES];
struct ccb ccb[AHA1542_MAILBOXES];
};
-#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
-
-static DEFINE_SPINLOCK(aha1542_lock);
-
-
-
-#define WAITnexttimeout 3000000
-
-static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt);
-static int aha1542_restart(struct Scsi_Host *shost);
-static void aha1542_intr_handle(struct Scsi_Host *shost);
+static inline void aha1542_intr_reset(u16 base)
+{
+ outb(IRST, CONTROL(base));
+}
-#define aha1542_intr_reset(base) outb(IRST, CONTROL(base))
+static inline bool wait_mask(u16 port, u8 mask, u8 allof, u8 noneof, int timeout)
+{
+ bool delayed = true;
-#define WAIT(port, mask, allof, noneof) \
- { register int WAITbits; \
- register int WAITtimeout = WAITnexttimeout; \
- while (1) { \
- WAITbits = inb(port) & (mask); \
- if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
- break; \
- if (--WAITtimeout == 0) goto fail; \
- } \
- }
+ if (timeout == 0) {
+ timeout = 3000000;
+ delayed = false;
+ }
-/* Similar to WAIT, except we use the udelay call to regulate the
- amount of time we wait. */
-#define WAITd(port, mask, allof, noneof, timeout) \
- { register int WAITbits; \
- register int WAITtimeout = timeout; \
- while (1) { \
- WAITbits = inb(port) & (mask); \
- if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
- break; \
- mdelay(1); \
- if (--WAITtimeout == 0) goto fail; \
- } \
- }
+ while (1) {
+ u8 bits = inb(port) & mask;
+ if ((bits & allof) == allof && ((bits & noneof) == 0))
+ break;
+ if (delayed)
+ mdelay(1);
+ if (--timeout == 0)
+ return false;
+ }
-static void aha1542_stat(void)
-{
-/* int s = inb(STATUS), i = inb(INTRFLAGS);
- printk("status=%x intrflags=%x\n", s, i, WAITnexttimeout-WAITtimeout); */
+ return true;
}
-/* This is a bit complicated, but we need to make sure that an interrupt
- routine does not send something out while we are in the middle of this.
- Fortunately, it is only at boot time that multi-byte messages
- are ever sent. */
-static int aha1542_out(unsigned int base, unchar * cmdp, int len)
+static int aha1542_outb(unsigned int base, u8 val)
{
- unsigned long flags = 0;
- int got_lock;
-
- if (len == 1) {
- got_lock = 0;
- while (1 == 1) {
- WAIT(STATUS(base), CDF, 0, CDF);
- spin_lock_irqsave(&aha1542_lock, flags);
- if (inb(STATUS(base)) & CDF) {
- spin_unlock_irqrestore(&aha1542_lock, flags);
- continue;
- }
- outb(*cmdp, DATA(base));
- spin_unlock_irqrestore(&aha1542_lock, flags);
- return 0;
- }
- } else {
- spin_lock_irqsave(&aha1542_lock, flags);
- got_lock = 1;
- while (len--) {
- WAIT(STATUS(base), CDF, 0, CDF);
- outb(*cmdp++, DATA(base));
- }
- spin_unlock_irqrestore(&aha1542_lock, flags);
- }
+ if (!wait_mask(STATUS(base), CDF, 0, CDF, 0))
+ return 1;
+ outb(val, DATA(base));
+
return 0;
-fail:
- if (got_lock)
- spin_unlock_irqrestore(&aha1542_lock, flags);
- printk(KERN_ERR "aha1542_out failed(%d): ", len + 1);
- aha1542_stat();
- return 1;
}
-/* Only used at boot time, so we do not need to worry about latency as much
- here */
-
-static int __init aha1542_in(unsigned int base, unchar * cmdp, int len)
+static int aha1542_out(unsigned int base, u8 *buf, int len)
{
- unsigned long flags;
-
- spin_lock_irqsave(&aha1542_lock, flags);
while (len--) {
- WAIT(STATUS(base), DF, DF, 0);
- *cmdp++ = inb(DATA(base));
+ if (!wait_mask(STATUS(base), CDF, 0, CDF, 0))
+ return 1;
+ outb(*buf++, DATA(base));
}
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ if (!wait_mask(INTRFLAGS(base), INTRMASK, HACC, 0, 0))
+ return 1;
+
return 0;
-fail:
- spin_unlock_irqrestore(&aha1542_lock, flags);
- printk(KERN_ERR "aha1542_in failed(%d): ", len + 1);
- aha1542_stat();
- return 1;
}
-/* Similar to aha1542_in, except that we wait a very short period of time.
- We use this if we know the board is alive and awake, but we are not sure
- if the board will respond to the command we are about to send or not */
-static int __init aha1542_in1(unsigned int base, unchar * cmdp, int len)
-{
- unsigned long flags;
+/* Only used at boot time, so we do not need to worry about latency as much
+ here */
- spin_lock_irqsave(&aha1542_lock, flags);
+static int aha1542_in(unsigned int base, u8 *buf, int len, int timeout)
+{
while (len--) {
- WAITd(STATUS(base), DF, DF, 0, 100);
- *cmdp++ = inb(DATA(base));
+ if (!wait_mask(STATUS(base), DF, DF, 0, timeout))
+ return 1;
+ *buf++ = inb(DATA(base));
}
- spin_unlock_irqrestore(&aha1542_lock, flags);
return 0;
-fail:
- spin_unlock_irqrestore(&aha1542_lock, flags);
- return 1;
}
static int makecode(unsigned hosterr, unsigned scsierr)
@@ -297,7 +163,9 @@ static int makecode(unsigned hosterr, unsigned scsierr)
case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero
length segment or invalid segment list boundaries was received.
A CCB parameter was invalid. */
- DEB(printk("Aha1542: %x %x\n", hosterr, scsierr));
+#ifdef DEBUG
+ printk("Aha1542: %x %x\n", hosterr, scsierr);
+#endif
hosterr = DID_ERROR; /* Couldn't find any better */
break;
@@ -314,106 +182,74 @@ static int makecode(unsigned hosterr, unsigned scsierr)
return scsierr | (hosterr << 16);
}
-static int __init aha1542_test_port(int bse, struct Scsi_Host *shpnt)
+static int aha1542_test_port(struct Scsi_Host *sh)
{
- unchar inquiry_cmd[] = {CMD_INQUIRY};
- unchar inquiry_result[4];
- unchar *cmdp;
- int len;
- volatile int debug = 0;
+ u8 inquiry_result[4];
+ int i;
/* Quick and dirty test for presence of the card. */
- if (inb(STATUS(bse)) == 0xff)
+ if (inb(STATUS(sh->io_port)) == 0xff)
return 0;
/* Reset the adapter. I ought to make a hard reset, but it's not really necessary */
- /* DEB(printk("aha1542_test_port called \n")); */
-
/* In case some other card was probing here, reset interrupts */
- aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
+ aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
- outb(SRST | IRST /*|SCRST */ , CONTROL(bse));
+ outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port));
mdelay(20); /* Wait a little bit for things to settle down. */
- debug = 1;
/* Expect INIT and IDLE, any of the others are bad */
- WAIT(STATUS(bse), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
+ if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0))
+ return 0;
- debug = 2;
/* Shouldn't have generated any interrupts during reset */
- if (inb(INTRFLAGS(bse)) & INTRMASK)
- goto fail;
-
+ if (inb(INTRFLAGS(sh->io_port)) & INTRMASK)
+ return 0;
/* Perform a host adapter inquiry instead so we do not need to set
up the mailboxes ahead of time */
- aha1542_out(bse, inquiry_cmd, 1);
+ aha1542_outb(sh->io_port, CMD_INQUIRY);
- debug = 3;
- len = 4;
- cmdp = &inquiry_result[0];
-
- while (len--) {
- WAIT(STATUS(bse), DF, DF, 0);
- *cmdp++ = inb(DATA(bse));
+ for (i = 0; i < 4; i++) {
+ if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0))
+ return 0;
+ inquiry_result[i] = inb(DATA(sh->io_port));
}
- debug = 8;
/* Reading port should reset DF */
- if (inb(STATUS(bse)) & DF)
- goto fail;
+ if (inb(STATUS(sh->io_port)) & DF)
+ return 0;
- debug = 9;
/* When HACC, command is completed, and we're though testing */
- WAIT(INTRFLAGS(bse), HACC, HACC, 0);
- /* now initialize adapter */
+ if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0))
+ return 0;
- debug = 10;
/* Clear interrupts */
- outb(IRST, CONTROL(bse));
-
- debug = 11;
-
- return debug; /* 1 = ok */
-fail:
- return 0; /* 0 = not ok */
-}
+ outb(IRST, CONTROL(sh->io_port));
-/* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */
-static irqreturn_t do_aha1542_intr_handle(int dummy, void *dev_id)
-{
- unsigned long flags;
- struct Scsi_Host *shost = dev_id;
-
- spin_lock_irqsave(shost->host_lock, flags);
- aha1542_intr_handle(shost);
- spin_unlock_irqrestore(shost->host_lock, flags);
- return IRQ_HANDLED;
+ return 1;
}
-/* A "high" level interrupt handler */
-static void aha1542_intr_handle(struct Scsi_Host *shost)
+static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
{
- void (*my_done) (Scsi_Cmnd *) = NULL;
+ struct Scsi_Host *sh = dev_id;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ void (*my_done)(struct scsi_cmnd *) = NULL;
int errstatus, mbi, mbo, mbistatus;
int number_serviced;
unsigned long flags;
- Scsi_Cmnd *SCtmp;
+ struct scsi_cmnd *tmp_cmd;
int flag;
- int needs_restart;
- struct mailbox *mb;
- struct ccb *ccb;
-
- mb = HOSTDATA(shost)->mb;
- ccb = HOSTDATA(shost)->ccb;
+ struct mailbox *mb = aha1542->mb;
+ struct ccb *ccb = aha1542->ccb;
#ifdef DEBUG
{
- flag = inb(INTRFLAGS(shost->io_port));
- printk(KERN_DEBUG "aha1542_intr_handle: ");
+ flag = inb(INTRFLAGS(sh->io_port));
+ shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: ");
if (!(flag & ANYINTR))
printk("no interrupt?");
if (flag & MBIF)
@@ -424,14 +260,14 @@ static void aha1542_intr_handle(struct Scsi_Host *shost)
printk("HACC ");
if (flag & SCRD)
printk("SCRD ");
- printk("status %02x\n", inb(STATUS(shost->io_port)));
+ printk("status %02x\n", inb(STATUS(sh->io_port)));
};
#endif
number_serviced = 0;
- needs_restart = 0;
- while (1 == 1) {
- flag = inb(INTRFLAGS(shost->io_port));
+ spin_lock_irqsave(sh->host_lock, flags);
+ while (1) {
+ flag = inb(INTRFLAGS(sh->io_port));
/* Check for unusual interrupts. If any of these happen, we should
probably do something special, but for now just printing a message
@@ -442,15 +278,12 @@ static void aha1542_intr_handle(struct Scsi_Host *shost)
printk("MBOF ");
if (flag & HACC)
printk("HACC ");
- if (flag & SCRD) {
- needs_restart = 1;
+ if (flag & SCRD)
printk("SCRD ");
- }
}
- aha1542_intr_reset(shost->io_port);
+ aha1542_intr_reset(sh->io_port);
- spin_lock_irqsave(&aha1542_lock, flags);
- mbi = HOSTDATA(shost)->aha1542_last_mbi_used + 1;
+ mbi = aha1542->aha1542_last_mbi_used + 1;
if (mbi >= 2 * AHA1542_MAILBOXES)
mbi = AHA1542_MAILBOXES;
@@ -460,57 +293,51 @@ static void aha1542_intr_handle(struct Scsi_Host *shost)
mbi++;
if (mbi >= 2 * AHA1542_MAILBOXES)
mbi = AHA1542_MAILBOXES;
- } while (mbi != HOSTDATA(shost)->aha1542_last_mbi_used);
+ } while (mbi != aha1542->aha1542_last_mbi_used);
if (mb[mbi].status == 0) {
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ spin_unlock_irqrestore(sh->host_lock, flags);
/* Hmm, no mail. Must have read it the last time around */
- if (!number_serviced && !needs_restart)
- printk(KERN_WARNING "aha1542.c: interrupt received, but no mail.\n");
- /* We detected a reset. Restart all pending commands for
- devices that use the hard reset option */
- if (needs_restart)
- aha1542_restart(shost);
- return;
+ if (!number_serviced)
+ shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n");
+ return IRQ_HANDLED;
};
- mbo = (scsi2int(mb[mbi].ccbptr) - (SCSI_BUF_PA(&ccb[0]))) / sizeof(struct ccb);
+ mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb);
mbistatus = mb[mbi].status;
mb[mbi].status = 0;
- HOSTDATA(shost)->aha1542_last_mbi_used = mbi;
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ aha1542->aha1542_last_mbi_used = mbi;
#ifdef DEBUG
- {
- if (ccb[mbo].tarstat | ccb[mbo].hastat)
- printk(KERN_DEBUG "aha1542_command: returning %x (status %d)\n",
- ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status);
- };
+ if (ccb[mbo].tarstat | ccb[mbo].hastat)
+ shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n",
+ ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status);
#endif
if (mbistatus == 3)
continue; /* Aborted command not found */
#ifdef DEBUG
- printk(KERN_DEBUG "...done %d %d\n", mbo, mbi);
+ shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi);
#endif
- SCtmp = HOSTDATA(shost)->SCint[mbo];
+ tmp_cmd = aha1542->int_cmds[mbo];
- if (!SCtmp || !SCtmp->scsi_done) {
- printk(KERN_WARNING "aha1542_intr_handle: Unexpected interrupt\n");
- printk(KERN_WARNING "tarstat=%x, hastat=%x idlun=%x ccb#=%d \n", ccb[mbo].tarstat,
+ if (!tmp_cmd || !tmp_cmd->scsi_done) {
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n");
+ shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat,
ccb[mbo].hastat, ccb[mbo].idlun, mbo);
- return;
+ return IRQ_HANDLED;
}
- my_done = SCtmp->scsi_done;
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
+ my_done = tmp_cmd->scsi_done;
+ kfree(tmp_cmd->host_scribble);
+ tmp_cmd->host_scribble = NULL;
/* Fetch the sense data, and tuck it away, in the required slot. The
Adaptec automatically fetches it, and there is no guarantee that
we will still have it in the cdb when we come back */
if (ccb[mbo].tarstat == 2)
- memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
+ memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
SCSI_SENSE_BUFFERSIZE);
@@ -525,166 +352,121 @@ static void aha1542_intr_handle(struct Scsi_Host *shost)
#ifdef DEBUG
if (errstatus)
- printk(KERN_DEBUG "(aha1542 error:%x %x %x) ", errstatus,
+ shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus,
ccb[mbo].hastat, ccb[mbo].tarstat);
+ if (ccb[mbo].tarstat == 2)
+ print_hex_dump_bytes("sense: ", DUMP_PREFIX_NONE, &ccb[mbo].cdb[ccb[mbo].cdblen], 12);
+ if (errstatus)
+ printk("aha1542_intr_handle: returning %6x\n", errstatus);
#endif
-
- if (ccb[mbo].tarstat == 2) {
-#ifdef DEBUG
- int i;
-#endif
- DEB(printk("aha1542_intr_handle: sense:"));
-#ifdef DEBUG
- for (i = 0; i < 12; i++)
- printk("%02x ", ccb[mbo].cdb[ccb[mbo].cdblen + i]);
- printk("\n");
-#endif
- /*
- DEB(printk("aha1542_intr_handle: buf:"));
- for (i = 0; i < bufflen; i++)
- printk("%02x ", ((unchar *)buff)[i]);
- printk("\n");
- */
- }
- DEB(if (errstatus) printk("aha1542_intr_handle: returning %6x\n", errstatus));
- SCtmp->result = errstatus;
- HOSTDATA(shost)->SCint[mbo] = NULL; /* This effectively frees up the mailbox slot, as
- far as queuecommand is concerned */
- my_done(SCtmp);
+ tmp_cmd->result = errstatus;
+ aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as
+ far as queuecommand is concerned */
+ my_done(tmp_cmd);
number_serviced++;
};
}
-static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
+static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
- unchar ahacmd = CMD_START_SCSI;
- unchar direction;
- unchar *cmd = (unchar *) SCpnt->cmnd;
- unchar target = SCpnt->device->id;
- unchar lun = SCpnt->device->lun;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ u8 direction;
+ u8 target = cmd->device->id;
+ u8 lun = cmd->device->lun;
unsigned long flags;
- int bufflen = scsi_bufflen(SCpnt);
- int mbo;
- struct mailbox *mb;
- struct ccb *ccb;
-
- DEB(int i);
+ int bufflen = scsi_bufflen(cmd);
+ int mbo, sg_count;
+ struct mailbox *mb = aha1542->mb;
+ struct ccb *ccb = aha1542->ccb;
+ struct chain *cptr;
- mb = HOSTDATA(SCpnt->device->host)->mb;
- ccb = HOSTDATA(SCpnt->device->host)->ccb;
-
- DEB(if (target > 1) {
- SCpnt->result = DID_TIME_OUT << 16;
- done(SCpnt); return 0;
- }
- );
-
- if (*cmd == REQUEST_SENSE) {
+ if (*cmd->cmnd == REQUEST_SENSE) {
/* Don't do the command - we have the sense data already */
-#if 0
- /* scsi_request_sense() provides a buffer of size 256,
- so there is no reason to expect equality */
- if (bufflen != SCSI_SENSE_BUFFERSIZE)
- printk(KERN_CRIT "aha1542: Wrong buffer length supplied "
- "for request sense (%d)\n", bufflen);
-#endif
- SCpnt->result = 0;
- done(SCpnt);
+ cmd->result = 0;
+ cmd->scsi_done(cmd);
return 0;
}
#ifdef DEBUG
- if (*cmd == READ_10 || *cmd == WRITE_10)
- i = xscsi2int(cmd + 2);
- else if (*cmd == READ_6 || *cmd == WRITE_6)
- i = scsi2int(cmd + 2);
- else
- i = -1;
- if (done)
- printk(KERN_DEBUG "aha1542_queuecommand: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
- else
- printk(KERN_DEBUG "aha1542_command: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
- aha1542_stat();
- printk(KERN_DEBUG "aha1542_queuecommand: dumping scsi cmd:");
- for (i = 0; i < SCpnt->cmd_len; i++)
- printk("%02x ", cmd[i]);
- printk("\n");
- if (*cmd == WRITE_10 || *cmd == WRITE_6)
- return 0; /* we are still testing, so *don't* write */
+ {
+ int i = -1;
+ if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10)
+ i = xscsi2int(cmd->cmnd + 2);
+ else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6)
+ i = scsi2int(cmd->cmnd + 2);
+ shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d",
+ target, *cmd->cmnd, i, bufflen);
+ print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
+ }
#endif
+ if (bufflen) { /* allocate memory before taking host_lock */
+ sg_count = scsi_sg_count(cmd);
+ cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA);
+ if (!cptr)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
/* Use the outgoing mailboxes in a round-robin fashion, because this
is how the host adapter will scan for them */
- spin_lock_irqsave(&aha1542_lock, flags);
- mbo = HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used + 1;
+ spin_lock_irqsave(sh->host_lock, flags);
+ mbo = aha1542->aha1542_last_mbo_used + 1;
if (mbo >= AHA1542_MAILBOXES)
mbo = 0;
do {
- if (mb[mbo].status == 0 && HOSTDATA(SCpnt->device->host)->SCint[mbo] == NULL)
+ if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL)
break;
mbo++;
if (mbo >= AHA1542_MAILBOXES)
mbo = 0;
- } while (mbo != HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used);
+ } while (mbo != aha1542->aha1542_last_mbo_used);
- if (mb[mbo].status || HOSTDATA(SCpnt->device->host)->SCint[mbo])
+ if (mb[mbo].status || aha1542->int_cmds[mbo])
panic("Unable to find empty mailbox for aha1542.\n");
- HOSTDATA(SCpnt->device->host)->SCint[mbo] = SCpnt; /* This will effectively prevent someone else from
- screwing with this cdb. */
+ aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from
+ screwing with this cdb. */
- HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used = mbo;
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ aha1542->aha1542_last_mbo_used = mbo;
#ifdef DEBUG
- printk(KERN_DEBUG "Sending command (%d %x)...", mbo, done);
+ shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
#endif
- any2scsi(mb[mbo].ccbptr, SCSI_BUF_PA(&ccb[mbo])); /* This gets trashed for some reason */
+ any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
memset(&ccb[mbo], 0, sizeof(struct ccb));
- ccb[mbo].cdblen = SCpnt->cmd_len;
+ ccb[mbo].cdblen = cmd->cmd_len;
direction = 0;
- if (*cmd == READ_10 || *cmd == READ_6)
+ if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6)
direction = 8;
- else if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6)
direction = 16;
- memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
+ memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen);
if (bufflen) {
struct scatterlist *sg;
- struct chain *cptr;
-#ifdef DEBUG
- unsigned char *ptr;
-#endif
- int i, sg_count = scsi_sg_count(SCpnt);
+ int i;
+
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
- SCpnt->host_scribble = kmalloc(sizeof(*cptr)*sg_count,
- GFP_KERNEL | GFP_DMA);
- cptr = (struct chain *) SCpnt->host_scribble;
- if (cptr == NULL) {
- /* free the claimed mailbox slot */
- HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- scsi_for_each_sg(SCpnt, sg, sg_count, i) {
- any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
+ cmd->host_scribble = (void *)cptr;
+ scsi_for_each_sg(cmd, sg, sg_count, i) {
+ any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
+ + sg->offset);
any2scsi(cptr[i].datalen, sg->length);
};
any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
- any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
+ any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr));
#ifdef DEBUG
- printk("cptr %x: ", cptr);
- ptr = (unsigned char *) cptr;
- for (i = 0; i < 18; i++)
- printk("%02x ", ptr[i]);
+ shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr);
+ print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18);
#endif
} else {
ccb[mbo].op = 0; /* SCSI Initiator Command */
- SCpnt->host_scribble = NULL;
+ cmd->host_scribble = NULL;
any2scsi(ccb[mbo].datalen, 0);
any2scsi(ccb[mbo].dataptr, 0);
};
@@ -694,139 +476,116 @@ static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *
ccb[mbo].commlinkid = 0;
#ifdef DEBUG
- {
- int i;
- printk(KERN_DEBUG "aha1542_command: sending.. ");
- for (i = 0; i < sizeof(ccb[mbo]) - 10; i++)
- printk("%02x ", ((unchar *) & ccb[mbo])[i]);
- };
+ print_hex_dump_bytes("sending: ", DUMP_PREFIX_NONE, &ccb[mbo], sizeof(ccb[mbo]) - 10);
+ printk("aha1542_queuecommand: now waiting for interrupt ");
#endif
-
- if (done) {
- DEB(printk("aha1542_queuecommand: now waiting for interrupt ");
- aha1542_stat());
- SCpnt->scsi_done = done;
- mb[mbo].status = 1;
- aha1542_out(SCpnt->device->host->io_port, &ahacmd, 1); /* start scsi command */
- DEB(aha1542_stat());
- } else
- printk("aha1542_queuecommand: done can't be NULL\n");
+ mb[mbo].status = 1;
+ aha1542_outb(cmd->device->host->io_port, CMD_START_SCSI);
+ spin_unlock_irqrestore(sh->host_lock, flags);
return 0;
}
-static DEF_SCSI_QCMD(aha1542_queuecommand)
-
/* Initialize mailboxes */
-static void setup_mailboxes(int bse, struct Scsi_Host *shpnt)
+static void setup_mailboxes(struct Scsi_Host *sh)
{
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
int i;
- struct mailbox *mb;
- struct ccb *ccb;
-
- unchar cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
+ struct mailbox *mb = aha1542->mb;
+ struct ccb *ccb = aha1542->ccb;
- mb = HOSTDATA(shpnt)->mb;
- ccb = HOSTDATA(shpnt)->ccb;
+ u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
for (i = 0; i < AHA1542_MAILBOXES; i++) {
mb[i].status = mb[AHA1542_MAILBOXES + i].status = 0;
- any2scsi(mb[i].ccbptr, SCSI_BUF_PA(&ccb[i]));
+ any2scsi(mb[i].ccbptr, isa_virt_to_bus(&ccb[i]));
};
- aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
- any2scsi((cmd + 2), SCSI_BUF_PA(mb));
- aha1542_out(bse, cmd, 5);
- WAIT(INTRFLAGS(bse), INTRMASK, HACC, 0);
- while (0) {
-fail:
- printk(KERN_ERR "aha1542_detect: failed setting up mailboxes\n");
- }
- aha1542_intr_reset(bse);
+ aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
+ any2scsi((mb_cmd + 2), isa_virt_to_bus(mb));
+ if (aha1542_out(sh->io_port, mb_cmd, 5))
+ shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n");
+ aha1542_intr_reset(sh->io_port);
}
-static int __init aha1542_getconfig(int base_io, unsigned char *irq_level, unsigned char *dma_chan, unsigned char *scsi_id)
+static int aha1542_getconfig(struct Scsi_Host *sh)
{
- unchar inquiry_cmd[] = {CMD_RETCONF};
- unchar inquiry_result[3];
+ u8 inquiry_result[3];
int i;
- i = inb(STATUS(base_io));
+ i = inb(STATUS(sh->io_port));
if (i & DF) {
- i = inb(DATA(base_io));
+ i = inb(DATA(sh->io_port));
};
- aha1542_out(base_io, inquiry_cmd, 1);
- aha1542_in(base_io, inquiry_result, 3);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- while (0) {
-fail:
- printk(KERN_ERR "aha1542_detect: query board settings\n");
- }
- aha1542_intr_reset(base_io);
+ aha1542_outb(sh->io_port, CMD_RETCONF);
+ aha1542_in(sh->io_port, inquiry_result, 3, 0);
+ if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
+ shost_printk(KERN_ERR, sh, "error querying board settings\n");
+ aha1542_intr_reset(sh->io_port);
switch (inquiry_result[0]) {
case 0x80:
- *dma_chan = 7;
+ sh->dma_channel = 7;
break;
case 0x40:
- *dma_chan = 6;
+ sh->dma_channel = 6;
break;
case 0x20:
- *dma_chan = 5;
+ sh->dma_channel = 5;
break;
case 0x01:
- *dma_chan = 0;
+ sh->dma_channel = 0;
break;
case 0:
/* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */
- *dma_chan = 0xFF;
+ sh->dma_channel = 0xFF;
break;
default:
- printk(KERN_ERR "Unable to determine Adaptec DMA priority. Disabling board\n");
+ shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n");
return -1;
};
switch (inquiry_result[1]) {
case 0x40:
- *irq_level = 15;
+ sh->irq = 15;
break;
case 0x20:
- *irq_level = 14;
+ sh->irq = 14;
break;
case 0x8:
- *irq_level = 12;
+ sh->irq = 12;
break;
case 0x4:
- *irq_level = 11;
+ sh->irq = 11;
break;
case 0x2:
- *irq_level = 10;
+ sh->irq = 10;
break;
case 0x1:
- *irq_level = 9;
+ sh->irq = 9;
break;
default:
- printk(KERN_ERR "Unable to determine Adaptec IRQ level. Disabling board\n");
+ shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n");
return -1;
};
- *scsi_id = inquiry_result[2] & 7;
+ sh->this_id = inquiry_result[2] & 7;
return 0;
}
/* This function should only be called for 1542C boards - we can detect
the special firmware settings and unlock the board */
-static int __init aha1542_mbenable(int base)
+static int aha1542_mbenable(struct Scsi_Host *sh)
{
- static unchar mbenable_cmd[3];
- static unchar mbenable_result[2];
+ static u8 mbenable_cmd[3];
+ static u8 mbenable_result[2];
int retval;
retval = BIOS_TRANSLATION_6432;
- mbenable_cmd[0] = CMD_EXTBIOS;
- aha1542_out(base, mbenable_cmd, 1);
- if (aha1542_in1(base, mbenable_result, 2))
+ aha1542_outb(sh->io_port, CMD_EXTBIOS);
+ if (aha1542_in(sh->io_port, mbenable_result, 2, 100))
return retval;
- WAITd(INTRFLAGS(base), INTRMASK, HACC, 0, 100);
- aha1542_intr_reset(base);
+ if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100))
+ goto fail;
+ aha1542_intr_reset(sh->io_port);
if ((mbenable_result[0] & 0x08) || mbenable_result[1]) {
mbenable_cmd[0] = CMD_MBENABLE;
@@ -836,37 +595,34 @@ static int __init aha1542_mbenable(int base)
if ((mbenable_result[0] & 0x08) && (mbenable_result[1] & 0x03))
retval = BIOS_TRANSLATION_25563;
- aha1542_out(base, mbenable_cmd, 3);
- WAIT(INTRFLAGS(base), INTRMASK, HACC, 0);
+ if (aha1542_out(sh->io_port, mbenable_cmd, 3))
+ goto fail;
};
while (0) {
fail:
- printk(KERN_ERR "aha1542_mbenable: Mailbox init failed\n");
+ shost_printk(KERN_ERR, sh, "Mailbox init failed\n");
}
- aha1542_intr_reset(base);
+ aha1542_intr_reset(sh->io_port);
return retval;
}
/* Query the board to find out if it is a 1542 or a 1740, or whatever. */
-static int __init aha1542_query(int base_io, int *transl)
+static int aha1542_query(struct Scsi_Host *sh)
{
- unchar inquiry_cmd[] = {CMD_INQUIRY};
- unchar inquiry_result[4];
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ u8 inquiry_result[4];
int i;
- i = inb(STATUS(base_io));
+ i = inb(STATUS(sh->io_port));
if (i & DF) {
- i = inb(DATA(base_io));
+ i = inb(DATA(sh->io_port));
};
- aha1542_out(base_io, inquiry_cmd, 1);
- aha1542_in(base_io, inquiry_result, 4);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- while (0) {
-fail:
- printk(KERN_ERR "aha1542_detect: query card type\n");
- }
- aha1542_intr_reset(base_io);
+ aha1542_outb(sh->io_port, CMD_INQUIRY);
+ aha1542_in(sh->io_port, inquiry_result, 4, 0);
+ if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
+ shost_printk(KERN_ERR, sh, "error querying card type\n");
+ aha1542_intr_reset(sh->io_port);
- *transl = BIOS_TRANSLATION_6432; /* Default case */
+ aha1542->bios_translation = BIOS_TRANSLATION_6432; /* Default case */
/* For an AHA1740 series board, we ignore the board since there is a
hardware bug which can lead to wrong blocks being returned if the board
@@ -875,391 +631,198 @@ fail:
*/
if (inquiry_result[0] == 0x43) {
- printk(KERN_INFO "aha1542.c: Emulation mode not supported for AHA 174N hardware.\n");
+ shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n");
return 1;
};
/* Always call this - boards that do not support extended bios translation
will ignore the command, and we will set the proper default */
- *transl = aha1542_mbenable(base_io);
+ aha1542->bios_translation = aha1542_mbenable(sh);
return 0;
}
-#ifndef MODULE
-static char *setup_str[MAXBOARDS] __initdata;
-static int setup_idx = 0;
-
-static void __init aha1542_setup(char *str, int *ints)
+static u8 dma_speed_hw(int dma_speed)
{
- const char *ahausage = "aha1542: usage: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]\n";
- int setup_portbase;
-
- if (setup_idx >= MAXBOARDS) {
- printk(KERN_ERR "aha1542: aha1542_setup called too many times! Bad LILO params ?\n");
- printk(KERN_ERR " Entryline 1: %s\n", setup_str[0]);
- printk(KERN_ERR " Entryline 2: %s\n", setup_str[1]);
- printk(KERN_ERR " This line: %s\n", str);
- return;
- }
- if (ints[0] < 1 || ints[0] > 4) {
- printk(KERN_ERR "aha1542: %s\n", str);
- printk(ahausage);
- printk(KERN_ERR "aha1542: Wrong parameters may cause system malfunction.. We try anyway..\n");
- }
- setup_called[setup_idx] = ints[0];
- setup_str[setup_idx] = str;
-
- setup_portbase = ints[0] >= 1 ? ints[1] : 0; /* Preserve the default value.. */
- setup_buson[setup_idx] = ints[0] >= 2 ? ints[2] : 7;
- setup_busoff[setup_idx] = ints[0] >= 3 ? ints[3] : 5;
- if (ints[0] >= 4)
- {
- int atbt = -1;
- switch (ints[4]) {
- case 5:
- atbt = 0x00;
- break;
- case 6:
- atbt = 0x04;
- break;
- case 7:
- atbt = 0x01;
- break;
- case 8:
- atbt = 0x02;
- break;
- case 10:
- atbt = 0x03;
- break;
- default:
- printk(KERN_ERR "aha1542: %s\n", str);
- printk(ahausage);
- printk(KERN_ERR "aha1542: Valid values for DMASPEED are 5-8, 10 MB/s. Using jumper defaults.\n");
- break;
- }
- setup_dmaspeed[setup_idx] = atbt;
+ switch (dma_speed) {
+ case 5:
+ return 0x00;
+ case 6:
+ return 0x04;
+ case 7:
+ return 0x01;
+ case 8:
+ return 0x02;
+ case 10:
+ return 0x03;
}
- if (setup_portbase != 0)
- bases[setup_idx] = setup_portbase;
- ++setup_idx;
+ return 0xff; /* invalid */
}
-static int __init do_setup(char *str)
+/* Set the Bus on/off-times as not to ruin floppy performance */
+static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed)
{
- int ints[5];
+ if (bus_on > 0) {
+ u8 oncmd[] = { CMD_BUSON_TIME, clamp(bus_on, 2, 15) };
- int count=setup_idx;
+ aha1542_intr_reset(sh->io_port);
+ if (aha1542_out(sh->io_port, oncmd, 2))
+ goto fail;
+ }
- get_options(str, ARRAY_SIZE(ints), ints);
- aha1542_setup(str,ints);
+ if (bus_off > 0) {
+ u8 offcmd[] = { CMD_BUSOFF_TIME, clamp(bus_off, 1, 64) };
- return count<setup_idx;
-}
+ aha1542_intr_reset(sh->io_port);
+ if (aha1542_out(sh->io_port, offcmd, 2))
+ goto fail;
+ }
-__setup("aha1542=",do_setup);
-#endif
+ if (dma_speed_hw(dma_speed) != 0xff) {
+ u8 dmacmd[] = { CMD_DMASPEED, dma_speed_hw(dma_speed) };
+
+ aha1542_intr_reset(sh->io_port);
+ if (aha1542_out(sh->io_port, dmacmd, 2))
+ goto fail;
+ }
+ aha1542_intr_reset(sh->io_port);
+ return;
+fail:
+ shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n");
+ aha1542_intr_reset(sh->io_port);
+}
/* return non-zero on detection */
-static int __init aha1542_detect(struct scsi_host_template * tpnt)
+static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct device *pdev, int indx)
{
- unsigned char dma_chan;
- unsigned char irq_level;
- unsigned char scsi_id;
- unsigned long flags;
- unsigned int base_io;
- int trans;
- struct Scsi_Host *shpnt = NULL;
- int count = 0;
- int indx;
-
- DEB(printk("aha1542_detect: \n"));
-
- tpnt->proc_name = "aha1542";
-
-#ifdef MODULE
- bases[0] = aha1542[0];
- setup_buson[0] = aha1542[1];
- setup_busoff[0] = aha1542[2];
- {
- int atbt = -1;
- switch (aha1542[3]) {
- case 5:
- atbt = 0x00;
- break;
- case 6:
- atbt = 0x04;
- break;
- case 7:
- atbt = 0x01;
- break;
- case 8:
- atbt = 0x02;
- break;
- case 10:
- atbt = 0x03;
- break;
- };
- setup_dmaspeed[0] = atbt;
+ unsigned int base_io = io[indx];
+ struct Scsi_Host *sh;
+ struct aha1542_hostdata *aha1542;
+ char dma_info[] = "no DMA";
+
+ if (base_io == 0)
+ return NULL;
+
+ if (!request_region(base_io, AHA1542_REGION_SIZE, "aha1542"))
+ return NULL;
+
+ sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata));
+ if (!sh)
+ goto release;
+ aha1542 = shost_priv(sh);
+
+ sh->unique_id = base_io;
+ sh->io_port = base_io;
+ sh->n_io_port = AHA1542_REGION_SIZE;
+ aha1542->aha1542_last_mbi_used = 2 * AHA1542_MAILBOXES - 1;
+ aha1542->aha1542_last_mbo_used = AHA1542_MAILBOXES - 1;
+
+ if (!aha1542_test_port(sh))
+ goto unregister;
+
+ aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]);
+ if (aha1542_query(sh))
+ goto unregister;
+ if (aha1542_getconfig(sh) == -1)
+ goto unregister;
+
+ if (sh->dma_channel != 0xFF)
+ snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel);
+ shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n",
+ sh->this_id, base_io, sh->irq, dma_info);
+ if (aha1542->bios_translation == BIOS_TRANSLATION_25563)
+ shost_printk(KERN_INFO, sh, "Using extended bios translation\n");
+
+ setup_mailboxes(sh);
+
+ if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) {
+ shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n");
+ goto unregister;
}
-#endif
-
- /*
- * Hunt for ISA Plug'n'Pray Adaptecs (AHA1535)
- */
-
- if(isapnp)
- {
- struct pnp_dev *pdev = NULL;
- for(indx = 0; indx < ARRAY_SIZE(bases); indx++) {
- if(bases[indx])
- continue;
- pdev = pnp_find_dev(NULL, ISAPNP_VENDOR('A', 'D', 'P'),
- ISAPNP_FUNCTION(0x1542), pdev);
- if(pdev==NULL)
- break;
- /*
- * Activate the PnP card
- */
-
- if(pnp_device_attach(pdev)<0)
- continue;
-
- if(pnp_activate_dev(pdev)<0) {
- pnp_device_detach(pdev);
- continue;
- }
-
- if(!pnp_port_valid(pdev, 0)) {
- pnp_device_detach(pdev);
- continue;
- }
-
- bases[indx] = pnp_port_start(pdev, 0);
-
- /* The card can be queried for its DMA, we have
- the DMA set up that is enough */
-
- printk(KERN_INFO "ISAPnP found an AHA1535 at I/O 0x%03X\n", bases[indx]);
+ if (sh->dma_channel != 0xFF) {
+ if (request_dma(sh->dma_channel, "aha1542")) {
+ shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n");
+ goto free_irq;
+ }
+ if (sh->dma_channel == 0 || sh->dma_channel >= 5) {
+ set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE);
+ enable_dma(sh->dma_channel);
}
}
- for (indx = 0; indx < ARRAY_SIZE(bases); indx++)
- if (bases[indx] != 0 && request_region(bases[indx], 4, "aha1542")) {
- shpnt = scsi_register(tpnt,
- sizeof(struct aha1542_hostdata));
-
- if(shpnt==NULL) {
- release_region(bases[indx], 4);
- continue;
- }
- if (!aha1542_test_port(bases[indx], shpnt))
- goto unregister;
-
- base_io = bases[indx];
-
- /* Set the Bus on/off-times as not to ruin floppy performance */
- {
- unchar oncmd[] = {CMD_BUSON_TIME, 7};
- unchar offcmd[] = {CMD_BUSOFF_TIME, 5};
-
- if (setup_called[indx]) {
- oncmd[1] = setup_buson[indx];
- offcmd[1] = setup_busoff[indx];
- }
- aha1542_intr_reset(base_io);
- aha1542_out(base_io, oncmd, 2);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- aha1542_intr_reset(base_io);
- aha1542_out(base_io, offcmd, 2);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- if (setup_dmaspeed[indx] >= 0) {
- unchar dmacmd[] = {CMD_DMASPEED, 0};
- dmacmd[1] = setup_dmaspeed[indx];
- aha1542_intr_reset(base_io);
- aha1542_out(base_io, dmacmd, 2);
- WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
- }
- while (0) {
-fail:
- printk(KERN_ERR "aha1542_detect: setting bus on/off-time failed\n");
- }
- aha1542_intr_reset(base_io);
- }
- if (aha1542_query(base_io, &trans))
- goto unregister;
-
- if (aha1542_getconfig(base_io, &irq_level, &dma_chan, &scsi_id) == -1)
- goto unregister;
-
- printk(KERN_INFO "Configuring Adaptec (SCSI-ID %d) at IO:%x, IRQ %d", scsi_id, base_io, irq_level);
- if (dma_chan != 0xFF)
- printk(", DMA priority %d", dma_chan);
- printk("\n");
- DEB(aha1542_stat());
- setup_mailboxes(base_io, shpnt);
+ if (scsi_add_host(sh, pdev))
+ goto free_dma;
- DEB(aha1542_stat());
+ scsi_scan_host(sh);
- DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
- spin_lock_irqsave(&aha1542_lock, flags);
- if (request_irq(irq_level, do_aha1542_intr_handle, 0,
- "aha1542", shpnt)) {
- printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n");
- spin_unlock_irqrestore(&aha1542_lock, flags);
- goto unregister;
- }
- if (dma_chan != 0xFF) {
- if (request_dma(dma_chan, "aha1542")) {
- printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n");
- free_irq(irq_level, shpnt);
- spin_unlock_irqrestore(&aha1542_lock, flags);
- goto unregister;
- }
- if (dma_chan == 0 || dma_chan >= 5) {
- set_dma_mode(dma_chan, DMA_MODE_CASCADE);
- enable_dma(dma_chan);
- }
- }
-
- shpnt->this_id = scsi_id;
- shpnt->unique_id = base_io;
- shpnt->io_port = base_io;
- shpnt->n_io_port = 4; /* Number of bytes of I/O space used */
- shpnt->dma_channel = dma_chan;
- shpnt->irq = irq_level;
- HOSTDATA(shpnt)->bios_translation = trans;
- if (trans == BIOS_TRANSLATION_25563)
- printk(KERN_INFO "aha1542.c: Using extended bios translation\n");
- HOSTDATA(shpnt)->aha1542_last_mbi_used = (2 * AHA1542_MAILBOXES - 1);
- HOSTDATA(shpnt)->aha1542_last_mbo_used = (AHA1542_MAILBOXES - 1);
- memset(HOSTDATA(shpnt)->SCint, 0, sizeof(HOSTDATA(shpnt)->SCint));
- spin_unlock_irqrestore(&aha1542_lock, flags);
-#if 0
- DEB(printk(" *** READ CAPACITY ***\n"));
-
- {
- unchar buf[8];
- static unchar cmd[] = { READ_CAPACITY, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int i;
-
- for (i = 0; i < sizeof(buf); ++i)
- buf[i] = 0x87;
- for (i = 0; i < 2; ++i)
- if (!aha1542_command(i, cmd, buf, sizeof(buf))) {
- printk(KERN_DEBUG "aha_detect: LU %d sector_size %d device_size %d\n",
- i, xscsi2int(buf + 4), xscsi2int(buf));
- }
- }
-
- DEB(printk(" *** NOW RUNNING MY OWN TEST *** \n"));
-
- for (i = 0; i < 4; ++i) {
- unsigned char cmd[10];
- static buffer[512];
-
- cmd[0] = READ_10;
- cmd[1] = 0;
- xany2scsi(cmd + 2, i);
- cmd[6] = 0;
- cmd[7] = 0;
- cmd[8] = 1;
- cmd[9] = 0;
- aha1542_command(0, cmd, buffer, 512);
- }
-#endif
- count++;
- continue;
+ return sh;
+free_dma:
+ if (sh->dma_channel != 0xff)
+ free_dma(sh->dma_channel);
+free_irq:
+ free_irq(sh->irq, sh);
unregister:
- release_region(bases[indx], 4);
- scsi_unregister(shpnt);
- continue;
+ scsi_host_put(sh);
+release:
+ release_region(base_io, AHA1542_REGION_SIZE);
- };
-
- return count;
+ return NULL;
}
-static int aha1542_release(struct Scsi_Host *shost)
+static int aha1542_release(struct Scsi_Host *sh)
{
- if (shost->irq)
- free_irq(shost->irq, shost);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
+ scsi_remove_host(sh);
+ if (sh->dma_channel != 0xff)
+ free_dma(sh->dma_channel);
+ if (sh->irq)
+ free_irq(sh->irq, sh);
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ scsi_host_put(sh);
return 0;
}
-static int aha1542_restart(struct Scsi_Host *shost)
-{
- int i;
- int count = 0;
-#if 0
- unchar ahacmd = CMD_START_SCSI;
-#endif
-
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(shost)->SCint[i] &&
- !(HOSTDATA(shost)->SCint[i]->device->soft_reset)) {
-#if 0
- HOSTDATA(shost)->mb[i].status = 1; /* Indicate ready to restart... */
-#endif
- count++;
- }
- printk(KERN_DEBUG "Potential to restart %d stalled commands...\n", count);
-#if 0
- /* start scsi command */
- if (count)
- aha1542_out(shost->io_port, &ahacmd, 1);
-#endif
- return 0;
-}
/*
* This is a device reset. This is handled by sending a special command
* to the device.
*/
-static int aha1542_dev_reset(Scsi_Cmnd * SCpnt)
+static int aha1542_dev_reset(struct scsi_cmnd *cmd)
{
+ struct Scsi_Host *sh = cmd->device->host;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
unsigned long flags;
- struct mailbox *mb;
- unchar target = SCpnt->device->id;
- unchar lun = SCpnt->device->lun;
+ struct mailbox *mb = aha1542->mb;
+ u8 target = cmd->device->id;
+ u8 lun = cmd->device->lun;
int mbo;
- struct ccb *ccb;
- unchar ahacmd = CMD_START_SCSI;
+ struct ccb *ccb = aha1542->ccb;
- ccb = HOSTDATA(SCpnt->device->host)->ccb;
- mb = HOSTDATA(SCpnt->device->host)->mb;
-
- spin_lock_irqsave(&aha1542_lock, flags);
- mbo = HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used + 1;
+ spin_lock_irqsave(sh->host_lock, flags);
+ mbo = aha1542->aha1542_last_mbo_used + 1;
if (mbo >= AHA1542_MAILBOXES)
mbo = 0;
do {
- if (mb[mbo].status == 0 && HOSTDATA(SCpnt->device->host)->SCint[mbo] == NULL)
+ if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL)
break;
mbo++;
if (mbo >= AHA1542_MAILBOXES)
mbo = 0;
- } while (mbo != HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used);
+ } while (mbo != aha1542->aha1542_last_mbo_used);
- if (mb[mbo].status || HOSTDATA(SCpnt->device->host)->SCint[mbo])
+ if (mb[mbo].status || aha1542->int_cmds[mbo])
panic("Unable to find empty mailbox for aha1542.\n");
- HOSTDATA(SCpnt->device->host)->SCint[mbo] = SCpnt; /* This will effectively
- prevent someone else from
- screwing with this cdb. */
+ aha1542->int_cmds[mbo] = cmd; /* This will effectively
+ prevent someone else from
+ screwing with this cdb. */
- HOSTDATA(SCpnt->device->host)->aha1542_last_mbo_used = mbo;
- spin_unlock_irqrestore(&aha1542_lock, flags);
+ aha1542->aha1542_last_mbo_used = mbo;
- any2scsi(mb[mbo].ccbptr, SCSI_BUF_PA(&ccb[mbo])); /* This gets trashed for some reason */
+ any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
memset(&ccb[mbo], 0, sizeof(struct ccb));
@@ -1274,141 +837,43 @@ static int aha1542_dev_reset(Scsi_Cmnd * SCpnt)
* Now tell the 1542 to flush all pending commands for this
* target
*/
- aha1542_out(SCpnt->device->host->io_port, &ahacmd, 1);
+ aha1542_outb(sh->io_port, CMD_START_SCSI);
+ spin_unlock_irqrestore(sh->host_lock, flags);
- scmd_printk(KERN_WARNING, SCpnt,
+ scmd_printk(KERN_WARNING, cmd,
"Trying device reset for target\n");
return SUCCESS;
-
-
-#ifdef ERIC_neverdef
- /*
- * With the 1542 we apparently never get an interrupt to
- * acknowledge a device reset being sent. Then again, Leonard
- * says we are doing this wrong in the first place...
- *
- * Take a wait and see attitude. If we get spurious interrupts,
- * then the device reset is doing something sane and useful, and
- * we will wait for the interrupt to post completion.
- */
- printk(KERN_WARNING "Sent BUS DEVICE RESET to target %d\n", SCpnt->target);
-
- /*
- * Free the command block for all commands running on this
- * target...
- */
- for (i = 0; i < AHA1542_MAILBOXES; i++) {
- if (HOSTDATA(SCpnt->host)->SCint[i] &&
- HOSTDATA(SCpnt->host)->SCint[i]->target == SCpnt->target) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- HOSTDATA(SCpnt->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->host)->mb[i].status = 0;
- }
- }
- return SUCCESS;
-
- return FAILED;
-#endif /* ERIC_neverdef */
}
-static int aha1542_bus_reset(Scsi_Cmnd * SCpnt)
+static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
{
+ struct Scsi_Host *sh = cmd->device->host;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ unsigned long flags;
int i;
+ spin_lock_irqsave(sh->host_lock, flags);
/*
* This does a scsi reset for all devices on the bus.
* In principle, we could also reset the 1542 - should
* we do this? Try this first, and we can add that later
* if it turns out to be useful.
*/
- outb(SCRST, CONTROL(SCpnt->device->host->io_port));
-
- /*
- * Wait for the thing to settle down a bit. Unfortunately
- * this is going to basically lock up the machine while we
- * wait for this to complete. To be 100% correct, we need to
- * check for timeout, and if we are doing something like this
- * we are pretty desperate anyways.
- */
- ssleep(4);
-
- spin_lock_irq(SCpnt->device->host->host_lock);
-
- WAIT(STATUS(SCpnt->device->host->io_port),
- STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
-
- /*
- * Now try to pick up the pieces. For all pending commands,
- * free any internal data structures, and basically clear things
- * out. We do not try and restart any commands or anything -
- * the strategy handler takes care of that crap.
- */
- printk(KERN_WARNING "Sent BUS RESET to scsi host %d\n", SCpnt->device->host->host_no);
-
- for (i = 0; i < AHA1542_MAILBOXES; i++) {
- if (HOSTDATA(SCpnt->device->host)->SCint[i] != NULL) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->device->host)->SCint[i];
+ outb(reset_cmd, CONTROL(cmd->device->host->io_port));
-
- if (SCtmp->device->soft_reset) {
- /*
- * If this device implements the soft reset option,
- * then it is still holding onto the command, and
- * may yet complete it. In this case, we don't
- * flush the data.
- */
- continue;
- }
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- HOSTDATA(SCpnt->device->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->device->host)->mb[i].status = 0;
- }
+ if (!wait_mask(STATUS(cmd->device->host->io_port),
+ STATMASK, IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) {
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ return FAILED;
}
- spin_unlock_irq(SCpnt->device->host->host_lock);
- return SUCCESS;
-
-fail:
- spin_unlock_irq(SCpnt->device->host->host_lock);
- return FAILED;
-}
-
-static int aha1542_host_reset(Scsi_Cmnd * SCpnt)
-{
- int i;
-
- /*
- * This does a scsi reset for all devices on the bus.
- * In principle, we could also reset the 1542 - should
- * we do this? Try this first, and we can add that later
- * if it turns out to be useful.
- */
- outb(HRST | SCRST, CONTROL(SCpnt->device->host->io_port));
-
- /*
- * Wait for the thing to settle down a bit. Unfortunately
- * this is going to basically lock up the machine while we
- * wait for this to complete. To be 100% correct, we need to
- * check for timeout, and if we are doing something like this
- * we are pretty desperate anyways.
- */
- ssleep(4);
- spin_lock_irq(SCpnt->device->host->host_lock);
-
- WAIT(STATUS(SCpnt->device->host->io_port),
- STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
-
/*
* We need to do this too before the 1542 can interact with
- * us again.
+ * us again after host reset.
*/
- setup_mailboxes(SCpnt->device->host->io_port, SCpnt->device->host);
+ if (reset_cmd & HRST)
+ setup_mailboxes(cmd->device->host);
/*
* Now try to pick up the pieces. For all pending commands,
@@ -1416,14 +881,14 @@ static int aha1542_host_reset(Scsi_Cmnd * SCpnt)
* out. We do not try and restart any commands or anything -
* the strategy handler takes care of that crap.
*/
- printk(KERN_WARNING "Sent BUS RESET to scsi host %d\n", SCpnt->device->host->host_no);
+ shost_printk(KERN_WARNING, cmd->device->host, "Sent BUS RESET to scsi host %d\n", cmd->device->host->host_no);
for (i = 0; i < AHA1542_MAILBOXES; i++) {
- if (HOSTDATA(SCpnt->device->host)->SCint[i] != NULL) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->device->host)->SCint[i];
+ if (aha1542->int_cmds[i] != NULL) {
+ struct scsi_cmnd *tmp_cmd;
+ tmp_cmd = aha1542->int_cmds[i];
- if (SCtmp->device->soft_reset) {
+ if (tmp_cmd->device->soft_reset) {
/*
* If this device implements the soft reset option,
* then it is still holding onto the command, and
@@ -1432,241 +897,51 @@ static int aha1542_host_reset(Scsi_Cmnd * SCpnt)
*/
continue;
}
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- HOSTDATA(SCpnt->device->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->device->host)->mb[i].status = 0;
+ kfree(tmp_cmd->host_scribble);
+ tmp_cmd->host_scribble = NULL;
+ aha1542->int_cmds[i] = NULL;
+ aha1542->mb[i].status = 0;
}
}
- spin_unlock_irq(SCpnt->device->host->host_lock);
+ spin_unlock_irqrestore(sh->host_lock, flags);
return SUCCESS;
-
-fail:
- spin_unlock_irq(SCpnt->device->host->host_lock);
- return FAILED;
}
-#if 0
-/*
- * These are the old error handling routines. They are only temporarily
- * here while we play with the new error handling code.
- */
-static int aha1542_old_abort(Scsi_Cmnd * SCpnt)
+static int aha1542_bus_reset(struct scsi_cmnd *cmd)
{
-#if 0
- unchar ahacmd = CMD_START_SCSI;
- unsigned long flags;
- struct mailbox *mb;
- int mbi, mbo, i;
-
- printk(KERN_DEBUG "In aha1542_abort: %x %x\n",
- inb(STATUS(SCpnt->host->io_port)),
- inb(INTRFLAGS(SCpnt->host->io_port)));
-
- spin_lock_irqsave(&aha1542_lock, flags);
- mb = HOSTDATA(SCpnt->host)->mb;
- mbi = HOSTDATA(SCpnt->host)->aha1542_last_mbi_used + 1;
- if (mbi >= 2 * AHA1542_MAILBOXES)
- mbi = AHA1542_MAILBOXES;
-
- do {
- if (mb[mbi].status != 0)
- break;
- mbi++;
- if (mbi >= 2 * AHA1542_MAILBOXES)
- mbi = AHA1542_MAILBOXES;
- } while (mbi != HOSTDATA(SCpnt->host)->aha1542_last_mbi_used);
- spin_unlock_irqrestore(&aha1542_lock, flags);
-
- if (mb[mbi].status) {
- printk(KERN_ERR "Lost interrupt discovered on irq %d - attempting to recover\n",
- SCpnt->host->irq);
- aha1542_intr_handle(SCpnt->host, NULL);
- return 0;
- }
- /* OK, no lost interrupt. Try looking to see how many pending commands
- we think we have. */
-
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(SCpnt->host)->SCint[i]) {
- if (HOSTDATA(SCpnt->host)->SCint[i] == SCpnt) {
- printk(KERN_ERR "Timed out command pending for %s\n",
- SCpnt->request->rq_disk ?
- SCpnt->request->rq_disk->disk_name : "?"
- );
- if (HOSTDATA(SCpnt->host)->mb[i].status) {
- printk(KERN_ERR "OGMB still full - restarting\n");
- aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
- };
- } else
- printk(KERN_ERR "Other pending command %s\n",
- SCpnt->request->rq_disk ?
- SCpnt->request->rq_disk->disk_name : "?"
- );
- }
-#endif
-
- DEB(printk("aha1542_abort\n"));
-#if 0
- spin_lock_irqsave(&aha1542_lock, flags);
- for (mbo = 0; mbo < AHA1542_MAILBOXES; mbo++) {
- if (SCpnt == HOSTDATA(SCpnt->host)->SCint[mbo]) {
- mb[mbo].status = 2; /* Abort command */
- aha1542_out(SCpnt->host->io_port, &ahacmd, 1); /* start scsi command */
- spin_unlock_irqrestore(&aha1542_lock, flags);
- break;
- }
- }
- if (AHA1542_MAILBOXES == mbo)
- spin_unlock_irqrestore(&aha1542_lock, flags);
-#endif
- return SCSI_ABORT_SNOOZE;
+ return aha1542_reset(cmd, SCRST);
}
-/* We do not implement a reset function here, but the upper level code
- assumes that it will get some kind of response for the command in
- SCpnt. We must oblige, or the command will hang the scsi system.
- For a first go, we assume that the 1542 notifies us with all of the
- pending commands (it does implement soft reset, after all). */
-
-static int aha1542_old_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+static int aha1542_host_reset(struct scsi_cmnd *cmd)
{
- unchar ahacmd = CMD_START_SCSI;
- int i;
-
- /*
- * See if a bus reset was suggested.
- */
- if (reset_flags & SCSI_RESET_SUGGEST_BUS_RESET) {
- /*
- * This does a scsi reset for all devices on the bus.
- * In principle, we could also reset the 1542 - should
- * we do this? Try this first, and we can add that later
- * if it turns out to be useful.
- */
- outb(HRST | SCRST, CONTROL(SCpnt->host->io_port));
-
- /*
- * Wait for the thing to settle down a bit. Unfortunately
- * this is going to basically lock up the machine while we
- * wait for this to complete. To be 100% correct, we need to
- * check for timeout, and if we are doing something like this
- * we are pretty desperate anyways.
- */
- WAIT(STATUS(SCpnt->host->io_port),
- STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
-
- /*
- * We need to do this too before the 1542 can interact with
- * us again.
- */
- setup_mailboxes(SCpnt->host->io_port, SCpnt->host);
-
- /*
- * Now try to pick up the pieces. Restart all commands
- * that are currently active on the bus, and reset all of
- * the datastructures. We have some time to kill while
- * things settle down, so print a nice message.
- */
- printk(KERN_WARNING "Sent BUS RESET to scsi host %d\n", SCpnt->host->host_no);
-
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(SCpnt->host)->SCint[i] != NULL) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
- SCtmp->result = DID_RESET << 16;
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- printk(KERN_WARNING "Sending DID_RESET for target %d\n", SCpnt->target);
- SCtmp->scsi_done(SCpnt);
-
- HOSTDATA(SCpnt->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->host)->mb[i].status = 0;
- }
- /*
- * Now tell the mid-level code what we did here. Since
- * we have restarted all of the outstanding commands,
- * then report SUCCESS.
- */
- return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET);
-fail:
- printk(KERN_CRIT "aha1542.c: Unable to perform hard reset.\n");
- printk(KERN_CRIT "Power cycle machine to reset\n");
- return (SCSI_RESET_ERROR | SCSI_RESET_BUS_RESET);
-
-
- } else {
- /* This does a selective reset of just the one device */
- /* First locate the ccb for this command */
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(SCpnt->host)->SCint[i] == SCpnt) {
- HOSTDATA(SCpnt->host)->ccb[i].op = 0x81; /* BUS DEVICE RESET */
- /* Now tell the 1542 to flush all pending commands for this target */
- aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
-
- /* Here is the tricky part. What to do next. Do we get an interrupt
- for the commands that we aborted with the specified target, or
- do we generate this on our own? Try it without first and see
- what happens */
- printk(KERN_WARNING "Sent BUS DEVICE RESET to target %d\n", SCpnt->target);
-
- /* If the first does not work, then try the second. I think the
- first option is more likely to be correct. Free the command
- block for all commands running on this target... */
- for (i = 0; i < AHA1542_MAILBOXES; i++)
- if (HOSTDATA(SCpnt->host)->SCint[i] &&
- HOSTDATA(SCpnt->host)->SCint[i]->target == SCpnt->target) {
- Scsi_Cmnd *SCtmp;
- SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
- SCtmp->result = DID_RESET << 16;
- kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
- printk(KERN_WARNING "Sending DID_RESET for target %d\n", SCpnt->target);
- SCtmp->scsi_done(SCpnt);
-
- HOSTDATA(SCpnt->host)->SCint[i] = NULL;
- HOSTDATA(SCpnt->host)->mb[i].status = 0;
- }
- return SCSI_RESET_SUCCESS;
- }
- }
- /* No active command at this time, so this means that each time we got
- some kind of response the last time through. Tell the mid-level code
- to request sense information in order to decide what to do next. */
- return SCSI_RESET_PUNT;
+ return aha1542_reset(cmd, HRST | SCRST);
}
-#endif /* end of big comment block around old_abort + old_reset */
static int aha1542_biosparam(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *ip)
+ struct block_device *bdev, sector_t capacity, int geom[])
{
- int translation_algorithm;
- int size = capacity;
+ struct aha1542_hostdata *aha1542 = shost_priv(sdev->host);
- translation_algorithm = HOSTDATA(sdev->host)->bios_translation;
-
- if ((size >> 11) > 1024 && translation_algorithm == BIOS_TRANSLATION_25563) {
+ if (capacity >= 0x200000 &&
+ aha1542->bios_translation == BIOS_TRANSLATION_25563) {
/* Please verify that this is the same as what DOS returns */
- ip[0] = 255;
- ip[1] = 63;
- ip[2] = size / 255 / 63;
+ geom[0] = 255; /* heads */
+ geom[1] = 63; /* sectors */
} else {
- ip[0] = 64;
- ip[1] = 32;
- ip[2] = size >> 11;
+ geom[0] = 64; /* heads */
+ geom[1] = 32; /* sectors */
}
+ geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */
return 0;
}
MODULE_LICENSE("GPL");
-
static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
.proc_name = "aha1542",
.name = "Adaptec 1542",
- .detect = aha1542_detect,
- .release = aha1542_release,
.queuecommand = aha1542_queuecommand,
.eh_device_reset_handler= aha1542_dev_reset,
.eh_bus_reset_handler = aha1542_bus_reset,
@@ -1674,9 +949,124 @@ static struct scsi_host_template driver_template = {
.bios_param = aha1542_biosparam,
.can_queue = AHA1542_MAILBOXES,
.this_id = 7,
- .sg_tablesize = AHA1542_SCATTER,
- .cmd_per_lun = AHA1542_CMDLUN,
+ .sg_tablesize = 16,
+ .cmd_per_lun = 1,
.unchecked_isa_dma = 1,
.use_clustering = ENABLE_CLUSTERING,
};
-#include "scsi_module.c"
+
+static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
+{
+ struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev);
+
+ if (!sh)
+ return 0;
+
+ dev_set_drvdata(pdev, sh);
+ return 1;
+}
+
+static int aha1542_isa_remove(struct device *pdev,
+ unsigned int ndev)
+{
+ aha1542_release(dev_get_drvdata(pdev));
+ dev_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct isa_driver aha1542_isa_driver = {
+ .match = aha1542_isa_match,
+ .remove = aha1542_isa_remove,
+ .driver = {
+ .name = "aha1542"
+ },
+};
+static int isa_registered;
+
+#ifdef CONFIG_PNP
+static struct pnp_device_id aha1542_pnp_ids[] = {
+ { .id = "ADP1542" },
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, aha1542_pnp_ids);
+
+static int aha1542_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
+{
+ int indx;
+ struct Scsi_Host *sh;
+
+ for (indx = 0; indx < ARRAY_SIZE(io); indx++) {
+ if (io[indx])
+ continue;
+
+ if (pnp_activate_dev(pdev) < 0)
+ continue;
+
+ io[indx] = pnp_port_start(pdev, 0);
+
+ /* The card can be queried for its DMA, we have
+ the DMA set up that is enough */
+
+ dev_info(&pdev->dev, "ISAPnP found an AHA1535 at I/O 0x%03X", io[indx]);
+ }
+
+ sh = aha1542_hw_init(&driver_template, &pdev->dev, indx);
+ if (!sh)
+ return -ENODEV;
+
+ pnp_set_drvdata(pdev, sh);
+ return 0;
+}
+
+static void aha1542_pnp_remove(struct pnp_dev *pdev)
+{
+ aha1542_release(pnp_get_drvdata(pdev));
+ pnp_set_drvdata(pdev, NULL);
+}
+
+static struct pnp_driver aha1542_pnp_driver = {
+ .name = "aha1542",
+ .id_table = aha1542_pnp_ids,
+ .probe = aha1542_pnp_probe,
+ .remove = aha1542_pnp_remove,
+};
+static int pnp_registered;
+#endif /* CONFIG_PNP */
+
+static int __init aha1542_init(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_PNP
+ if (isapnp) {
+ ret = pnp_register_driver(&aha1542_pnp_driver);
+ if (!ret)
+ pnp_registered = 1;
+ }
+#endif
+ ret = isa_register_driver(&aha1542_isa_driver, MAXBOARDS);
+ if (!ret)
+ isa_registered = 1;
+
+#ifdef CONFIG_PNP
+ if (pnp_registered)
+ ret = 0;
+#endif
+ if (isa_registered)
+ ret = 0;
+
+ return ret;
+}
+
+static void __exit aha1542_exit(void)
+{
+#ifdef CONFIG_PNP
+ if (pnp_registered)
+ pnp_unregister_driver(&aha1542_pnp_driver);
+#endif
+ if (isa_registered)
+ isa_unregister_driver(&aha1542_isa_driver);
+}
+
+module_init(aha1542_init);
+module_exit(aha1542_exit);
diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h
index b871d2b57f93..0fe9bae1b3d1 100644
--- a/drivers/scsi/aha1542.h
+++ b/drivers/scsi/aha1542.h
@@ -1,64 +1,35 @@
-#ifndef _AHA1542_H
-
-/* $Id: aha1542.h,v 1.1 1992/07/24 06:27:38 root Exp root $
- *
- * Header file for the adaptec 1542 driver for Linux
- *
- * $Log: aha1542.h,v $
- * Revision 1.1 1992/07/24 06:27:38 root
- * Initial revision
- *
- * Revision 1.2 1992/07/04 18:41:49 root
- * Replaced distribution with current drivers
- *
- * Revision 1.3 1992/06/23 23:58:20 root
- * Fixes.
- *
- * Revision 1.2 1992/05/26 22:13:23 root
- * Changed bug that prevented DMA above first 2 mbytes.
- *
- * Revision 1.1 1992/05/22 21:00:29 root
- * Initial revision
- *
- * Revision 1.1 1992/04/24 18:01:50 root
- * Initial revision
- *
- * Revision 1.1 1992/04/02 03:23:13 drew
- * Initial revision
- *
- * Revision 1.3 1992/01/27 14:46:29 tthorn
- * *** empty log message ***
- *
- */
+#ifndef _AHA1542_H_
+#define _AHA1542_H_
#include <linux/types.h>
/* I/O Port interface 4.2 */
/* READ */
#define STATUS(base) base
-#define STST 0x80 /* Self Test in Progress */
-#define DIAGF 0x40 /* Internal Diagnostic Failure */
-#define INIT 0x20 /* Mailbox Initialization Required */
-#define IDLE 0x10 /* SCSI Host Adapter Idle */
-#define CDF 0x08 /* Command/Data Out Port Full */
-#define DF 0x04 /* Data In Port Full */
-#define INVDCMD 0x01 /* Invalid H A Command */
-#define STATMASK 0xfd /* 0x02 is reserved */
+#define STST BIT(7) /* Self Test in Progress */
+#define DIAGF BIT(6) /* Internal Diagnostic Failure */
+#define INIT BIT(5) /* Mailbox Initialization Required */
+#define IDLE BIT(4) /* SCSI Host Adapter Idle */
+#define CDF BIT(3) /* Command/Data Out Port Full */
+#define DF BIT(2) /* Data In Port Full */
+/* BIT(1) is reserved */
+#define INVDCMD BIT(0) /* Invalid H A Command */
+#define STATMASK (STST | DIAGF | INIT | IDLE | CDF | DF | INVDCMD)
#define INTRFLAGS(base) (STATUS(base)+2)
-#define ANYINTR 0x80 /* Any Interrupt */
-#define SCRD 0x08 /* SCSI Reset Detected */
-#define HACC 0x04 /* HA Command Complete */
-#define MBOA 0x02 /* MBO Empty */
-#define MBIF 0x01 /* MBI Full */
-#define INTRMASK 0x8f
+#define ANYINTR BIT(7) /* Any Interrupt */
+#define SCRD BIT(3) /* SCSI Reset Detected */
+#define HACC BIT(2) /* HA Command Complete */
+#define MBOA BIT(1) /* MBO Empty */
+#define MBIF BIT(0) /* MBI Full */
+#define INTRMASK (ANYINTR | SCRD | HACC | MBOA | MBIF)
/* WRITE */
#define CONTROL(base) STATUS(base)
-#define HRST 0x80 /* Hard Reset */
-#define SRST 0x40 /* Soft Reset */
-#define IRST 0x20 /* Interrupt Reset */
-#define SCRST 0x10 /* SCSI Bus Reset */
+#define HRST BIT(7) /* Hard Reset */
+#define SRST BIT(6) /* Soft Reset */
+#define IRST BIT(5) /* Interrupt Reset */
+#define SCRST BIT(4) /* SCSI Bus Reset */
/* READ/WRITE */
#define DATA(base) (STATUS(base)+1)
@@ -80,14 +51,14 @@
/* Mailbox Definition 5.2.1 and 5.2.2 */
struct mailbox {
- unchar status; /* Command/Status */
- unchar ccbptr[3]; /* msb, .., lsb */
+ u8 status; /* Command/Status */
+ u8 ccbptr[3]; /* msb, .., lsb */
};
/* This is used with scatter-gather */
struct chain {
- unchar datalen[3]; /* Size of this part of chain */
- unchar dataptr[3]; /* Location of data */
+ u8 datalen[3]; /* Size of this part of chain */
+ u8 dataptr[3]; /* Location of data */
};
/* These belong in scsi.h also */
@@ -100,51 +71,32 @@ static inline void any2scsi(u8 *p, u32 v)
#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
-#define xany2scsi(up, p) \
-(up)[0] = ((long)(p)) >> 24; \
-(up)[1] = ((long)(p)) >> 16; \
-(up)[2] = ((long)(p)) >> 8; \
-(up)[3] = ((long)(p));
-
#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ (((long)(up)[2]) << 8) + ((long)(up)[3]) )
#define MAX_CDB 12
#define MAX_SENSE 14
-struct ccb { /* Command Control Block 5.3 */
- unchar op; /* Command Control Block Operation Code */
- unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
- /* Outbound data transfer, length is checked*/
- /* Inbound data transfer, length is checked */
- /* Logical Unit Number */
- unchar cdblen; /* SCSI Command Length */
- unchar rsalen; /* Request Sense Allocation Length/Disable */
- unchar datalen[3]; /* Data Length (msb, .., lsb) */
- unchar dataptr[3]; /* Data Pointer */
- unchar linkptr[3]; /* Link Pointer */
- unchar commlinkid; /* Command Linking Identifier */
- unchar hastat; /* Host Adapter Status (HASTAT) */
- unchar tarstat; /* Target Device Status */
- unchar reserved[2];
- unchar cdb[MAX_CDB+MAX_SENSE];/* SCSI Command Descriptor Block */
- /* REQUEST SENSE */
+struct ccb { /* Command Control Block 5.3 */
+ u8 op; /* Command Control Block Operation Code */
+ u8 idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked*/
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ u8 cdblen; /* SCSI Command Length */
+ u8 rsalen; /* Request Sense Allocation Length/Disable */
+ u8 datalen[3]; /* Data Length (msb, .., lsb) */
+ u8 dataptr[3]; /* Data Pointer */
+ u8 linkptr[3]; /* Link Pointer */
+ u8 commlinkid; /* Command Linking Identifier */
+ u8 hastat; /* Host Adapter Status (HASTAT) */
+ u8 tarstat; /* Target Device Status */
+ u8 reserved[2];
+ u8 cdb[MAX_CDB+MAX_SENSE]; /* SCSI Command Descriptor Block */
+ /* REQUEST SENSE */
};
-static int aha1542_detect(struct scsi_host_template *);
-static int aha1542_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
-static int aha1542_bus_reset(Scsi_Cmnd * SCpnt);
-static int aha1542_dev_reset(Scsi_Cmnd * SCpnt);
-static int aha1542_host_reset(Scsi_Cmnd * SCpnt);
-#if 0
-static int aha1542_old_abort(Scsi_Cmnd * SCpnt);
-static int aha1542_old_reset(Scsi_Cmnd *, unsigned int);
-#endif
-static int aha1542_biosparam(struct scsi_device *, struct block_device *,
- sector_t, int *);
-
+#define AHA1542_REGION_SIZE 4
#define AHA1542_MAILBOXES 8
-#define AHA1542_SCATTER 16
-#define AHA1542_CMDLUN 1
-#endif
+#endif /* _AHA1542_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 97f2accd3dbb..109e2c99e6c1 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -10437,14 +10437,13 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
return;
}
}
- lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC);
+ lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC);
if (lstate == NULL) {
xpt_print_path(ccb->ccb_h.path);
printk("Couldn't allocate lstate\n");
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
return;
}
- memset(lstate, 0, sizeof(*lstate));
status = xpt_create_path(&lstate->path, /*periph*/NULL,
xpt_path_path_id(ccb->ccb_h.path),
xpt_path_target_id(ccb->ccb_h.path),
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index d5c7b193d8d3..ce96a0be3282 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1326,10 +1326,9 @@ int
ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
{
ahd->platform_data =
- kmalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
+ kzalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
if (ahd->platform_data == NULL)
return (ENOMEM);
- memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
ahd->platform_data->irq = AHD_LINUX_NOIRQ;
ahd_lockinit(ahd);
ahd->seltime = (aic79xx_seltime & 0x3) << 4;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 10172a3af1b9..c4829d84b335 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -4464,10 +4464,9 @@ ahc_softc_init(struct ahc_softc *ahc)
ahc->pause = ahc->unpause | PAUSE;
/* XXX The shared scb data stuff should be deprecated */
if (ahc->scb_data == NULL) {
- ahc->scb_data = kmalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
+ ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
if (ahc->scb_data == NULL)
return (ENOMEM);
- memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
}
return (0);
@@ -4780,10 +4779,10 @@ ahc_init_scbdata(struct ahc_softc *ahc)
SLIST_INIT(&scb_data->sg_maps);
/* Allocate SCB resources */
- scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
+ scb_data->scbarray = kzalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
+ GFP_ATOMIC);
if (scb_data->scbarray == NULL)
return (ENOMEM);
- memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
/* Determine the number of hardware SCBs and initialize them */
@@ -7558,14 +7557,13 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
return;
}
}
- lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC);
+ lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC);
if (lstate == NULL) {
xpt_print_path(ccb->ccb_h.path);
printk("Couldn't allocate lstate\n");
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
return;
}
- memset(lstate, 0, sizeof(*lstate));
status = xpt_create_path(&lstate->path, /*periph*/NULL,
xpt_path_path_id(ccb->ccb_h.path),
xpt_path_target_id(ccb->ccb_h.path),
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 88360116dbcb..a2f2c774cd6b 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1214,10 +1214,9 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
{
ahc->platform_data =
- kmalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
+ kzalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
if (ahc->platform_data == NULL)
return (ENOMEM);
- memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
ahc->platform_data->irq = AHC_LINUX_NOIRQ;
ahc_lockinit(ahc);
ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index a6f5ee80fadc..beea30e5a34a 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -476,6 +476,8 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
goto fail_unmap_regs;
}
+ pci_set_drvdata(pdev, pep);
+
err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED,
DRV_MODULE_NAME, esp);
if (err < 0) {
@@ -496,8 +498,6 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
/* Assume 40MHz clock */
esp->cfreq = 40000000;
- pci_set_drvdata(pdev, pep);
-
err = scsi_esp_register(esp, &pdev->dev);
if (err)
goto fail_free_irq;
@@ -507,6 +507,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
fail_free_irq:
free_irq(pdev->irq, esp);
fail_unmap_command_block:
+ pci_set_drvdata(pdev, NULL);
pci_free_consistent(pdev, 16, esp->command_block,
esp->command_block_dma);
fail_unmap_regs:
@@ -530,6 +531,7 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
scsi_esp_unregister(esp);
free_irq(pdev->irq, esp);
+ pci_set_drvdata(pdev, NULL);
pci_free_consistent(pdev, 16, esp->command_block,
esp->command_block_dma);
pci_iounmap(pdev, esp->regs);
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index a70255413e7f..db87ece6edb2 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -1486,7 +1486,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
* selection.
*/
- timeout = jiffies + (250 * HZ / 1000);
+ timeout = jiffies + msecs_to_jiffies(250);
/*
* XXX very interesting - we're seeing a bounce where the BSY we
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index d1c37a386947..5ede3daa93dc 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -1014,7 +1014,6 @@ static struct platform_driver atari_scsi_driver = {
.remove = __exit_p(atari_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 80d97f3d2ed9..1028760b8a22 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -237,7 +237,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
- "BC_%d : Insufficent Buffer Error "
+ "BC_%d : Insufficient Buffer Error "
"Resp_Len : %d Actual_Resp_Len : %d\n",
mbx_resp_hdr->response_length,
mbx_resp_hdr->actual_resp_len);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index e90a3742f09d..cc3b9d3d6d40 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1079,22 +1079,18 @@ bfad_start_ops(struct bfad_s *bfad) {
int
bfad_worker(void *ptr)
{
- struct bfad_s *bfad;
- unsigned long flags;
-
- bfad = (struct bfad_s *)ptr;
-
- while (!kthread_should_stop()) {
+ struct bfad_s *bfad = ptr;
+ unsigned long flags;
- /* Send event BFAD_E_INIT_SUCCESS */
- bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
+ if (kthread_should_stop())
+ return 0;
- spin_lock_irqsave(&bfad->bfad_lock, flags);
- bfad->bfad_tsk = NULL;
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ /* Send event BFAD_E_INIT_SUCCESS */
+ bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
- break;
- }
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_tsk = NULL;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 0045742fab7d..dad959fcf6d8 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -339,7 +339,7 @@ ch_readconfig(scsi_changer *ch)
ch->firsts[CHET_DT],
ch->counts[CHET_DT]);
} else {
- VPRINTK(KERN_INFO, "reading element address assigment page failed!\n");
+ VPRINTK(KERN_INFO, "reading element address assignment page failed!\n");
}
/* vendor specific element types */
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index d9631e15f7b5..dbe416ff46c2 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1172,7 +1172,7 @@ static struct pci_error_handlers csio_err_handler = {
* Macros needed to support the PCI Device ID Table ...
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
- static struct pci_device_id csio_pci_tbl[] = {
+ static const struct pci_device_id csio_pci_tbl[] = {
/* Define for FCoE uses PF6 */
#define CH_PCI_DEVICE_ID_FUNCTION 0x6
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index f35792f7051c..f8d2478b11cc 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -57,9 +57,9 @@
*/
/* settings for DTC3181E card with only Mustek scanner attached */
-#define USLEEP_POLL 1
-#define USLEEP_SLEEP 20
-#define USLEEP_WAITLONG 500
+#define USLEEP_POLL msecs_to_jiffies(10)
+#define USLEEP_SLEEP msecs_to_jiffies(200)
+#define USLEEP_WAITLONG msecs_to_jiffies(5000)
#define AUTOPROBE_IRQ
@@ -723,7 +723,7 @@ module_param(ncr_53c400a, int, 0);
module_param(dtc_3181e, int, 0);
MODULE_LICENSE("GPL");
-#ifndef SCSI_G_NCR5380_MEM
+#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE)
static struct isapnp_device_id id_table[] = {
{
ISAPNP_ANY_ID, ISAPNP_ANY_ID,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a1cfbd3dda47..8eab107b53fb 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -6632,14 +6632,12 @@ static void fail_all_outstanding_cmds(struct ctlr_info *h)
static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
{
- int i, cpu;
+ int cpu;
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(cpu) {
u32 *lockup_detected;
lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
*lockup_detected = value;
- cpu = cpumask_next(cpu, cpu_online_mask);
}
wmb(); /* be sure the per-cpu variables are out to memory */
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d9afc51af7d3..882744852aac 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -99,6 +99,7 @@ static unsigned int ipr_debug = 0;
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
static unsigned int ipr_number_of_msix = 2;
+static unsigned int ipr_fast_reboot;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
@@ -221,6 +222,8 @@ MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
+module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@@ -495,6 +498,10 @@ struct ipr_error_table_t ipr_error_table[] = {
"4061: Multipath redundancy level got better"},
{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
"4060: Multipath redundancy level got worse"},
+ {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9083: Device raw mode enabled"},
+ {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9084: Device raw mode disabled"},
{0x07270000, 0, 0,
"Failure due to other device"},
{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -1462,7 +1469,8 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ioasc) {
- if (ioasc != IPR_IOASC_IOA_WAS_RESET)
+ if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
+ ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
@@ -2566,7 +2574,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
ipr_handle_log_data(ioa_cfg, hostrcb);
if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
- } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
+ } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
+ ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
}
@@ -4491,11 +4500,83 @@ static struct device_attribute ipr_resource_type_attr = {
.show = ipr_show_resource_type
};
+/**
+ * ipr_show_raw_mode - Show the adapter's raw mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_raw_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res)
+ len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
+ else
+ len = -ENXIO;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+/**
+ * ipr_store_raw_mode - Change the adapter's raw mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_raw_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res) {
+ if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
+ res->raw_mode = simple_strtoul(buf, NULL, 10);
+ len = strlen(buf);
+ if (res->sdev)
+ sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
+ res->raw_mode ? "enabled" : "disabled");
+ } else
+ len = -EINVAL;
+ } else
+ len = -ENXIO;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_raw_mode_attr = {
+ .attr = {
+ .name = "raw_mode",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_raw_mode,
+ .store = ipr_store_raw_mode
+};
+
static struct device_attribute *ipr_dev_attrs[] = {
&ipr_adapter_handle_attr,
&ipr_resource_path_attr,
&ipr_device_id_attr,
&ipr_resource_type_attr,
+ &ipr_raw_mode_attr,
NULL,
};
@@ -5379,9 +5460,6 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
/* Mask the interrupt */
writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
-
- /* Clear the interrupt */
- writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
list_del(&ioa_cfg->reset_cmd->queue);
@@ -6150,6 +6228,13 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break;
case IPR_IOASC_NR_INIT_CMD_REQUIRED:
break;
+ case IPR_IOASC_IR_NON_OPTIMIZED:
+ if (res->raw_mode) {
+ res->raw_mode = 0;
+ scsi_cmd->result |= (DID_IMM_RETRY << 16);
+ } else
+ scsi_cmd->result |= (DID_ERROR << 16);
+ break;
default:
if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
scsi_cmd->result |= (DID_ERROR << 16);
@@ -6289,6 +6374,8 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
(!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
}
+ if (res->raw_mode && ipr_is_af_dasd_device(res))
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
@@ -6402,7 +6489,6 @@ static struct scsi_host_template driver_template = {
.shost_attrs = ipr_ioa_attrs,
.sdev_attrs = ipr_dev_attrs,
.proc_name = IPR_NAME,
- .no_write_same = 1,
.use_blk_tags = 1,
};
@@ -8318,7 +8404,6 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
{
ENTER;
- pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
ipr_cmd->job_step = ipr_reset_bist_done;
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
LEAVE;
@@ -8326,6 +8411,32 @@ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_reset_reset_work - Pulse a PCIe fundamental reset
+ * @work: work struct
+ *
+ * Description: This pulses warm reset to a slot.
+ *
+ **/
+static void ipr_reset_reset_work(struct work_struct *work)
+{
+ struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct pci_dev *pdev = ioa_cfg->pdev;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+ pci_set_pcie_reset_state(pdev, pcie_warm_reset);
+ msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
+ pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->reset_cmd == ipr_cmd)
+ ipr_reset_ioa_job(ipr_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+}
+
+/**
* ipr_reset_slot_reset - Reset the PCI slot of the adapter.
* @ipr_cmd: ipr command struct
*
@@ -8337,12 +8448,11 @@ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- pci_set_pcie_reset_state(pdev, pcie_warm_reset);
+ INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
+ queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
ipr_cmd->job_step = ipr_reset_slot_reset_done;
- ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
LEAVE;
return IPR_RC_JOB_RETURN;
}
@@ -8480,6 +8590,122 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_reset_quiesce_done - Complete IOA disconnect
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Freeze the adapter to complete quiesce processing
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_ioa_bringdown_done;
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_cancel_hcam_done - Check for outstanding commands
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Ensure nothing is outstanding to the IOA and
+ * proceed with IOA disconnect. Otherwise reset the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_cmnd *loop_cmd;
+ struct ipr_hrr_queue *hrrq;
+ int rc = IPR_RC_JOB_CONTINUE;
+ int count = 0;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_quiesce_done;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
+ count++;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ rc = IPR_RC_JOB_RETURN;
+ break;
+ }
+ spin_unlock(&hrrq->_lock);
+
+ if (count)
+ break;
+ }
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Cancel any oustanding HCAMs to the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = IPR_RC_JOB_CONTINUE;
+ struct ipr_cmd_pkt *cmd_pkt;
+ struct ipr_cmnd *hcam_cmd;
+ struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
+
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
+
+ if (!hrrq->ioa_is_dead) {
+ if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
+ list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
+ continue;
+
+ ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+ cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
+ cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
+ cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
+ cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
+ cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
+ cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
+ cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
+ cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
+ cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
+ cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_CANCEL_TIMEOUT);
+
+ rc = IPR_RC_JOB_RETURN;
+ ipr_cmd->job_step = ipr_reset_cancel_hcam;
+ break;
+ }
+ }
+ } else
+ ipr_cmd->job_step = ipr_reset_alert;
+
+ LEAVE;
+ return rc;
+}
+
+/**
* ipr_reset_ucode_download_done - Microcode download completion
* @ipr_cmd: ipr command struct
*
@@ -8561,7 +8787,9 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
int rc = IPR_RC_JOB_CONTINUE;
ENTER;
- if (shutdown_type != IPR_SHUTDOWN_NONE &&
+ if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
+ ipr_cmd->job_step = ipr_reset_cancel_hcam;
+ else if (shutdown_type != IPR_SHUTDOWN_NONE &&
!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
@@ -8917,13 +9145,15 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
{
int i;
- for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
- if (ioa_cfg->ipr_cmnd_list[i])
- dma_pool_free(ioa_cfg->ipr_cmd_pool,
- ioa_cfg->ipr_cmnd_list[i],
- ioa_cfg->ipr_cmnd_list_dma[i]);
+ if (ioa_cfg->ipr_cmnd_list) {
+ for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
+ if (ioa_cfg->ipr_cmnd_list[i])
+ dma_pool_free(ioa_cfg->ipr_cmd_pool,
+ ioa_cfg->ipr_cmnd_list[i],
+ ioa_cfg->ipr_cmnd_list_dma[i]);
- ioa_cfg->ipr_cmnd_list[i] = NULL;
+ ioa_cfg->ipr_cmnd_list[i] = NULL;
+ }
}
if (ioa_cfg->ipr_cmd_pool)
@@ -8973,26 +9203,25 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
}
/**
- * ipr_free_all_resources - Free all allocated resources for an adapter.
- * @ipr_cmd: ipr command struct
+ * ipr_free_irqs - Free all allocated IRQs for the adapter.
+ * @ioa_cfg: ipr cfg struct
*
- * This function frees all allocated resources for the
+ * This function frees all allocated IRQs for the
* specified adapter.
*
* Return value:
* none
**/
-static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
+static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
- ENTER;
if (ioa_cfg->intr_flag == IPR_USE_MSI ||
ioa_cfg->intr_flag == IPR_USE_MSIX) {
int i;
for (i = 0; i < ioa_cfg->nvectors; i++)
free_irq(ioa_cfg->vectors_info[i].vec,
- &ioa_cfg->hrrq[i]);
+ &ioa_cfg->hrrq[i]);
} else
free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
@@ -9003,7 +9232,26 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
pci_disable_msix(pdev);
ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
}
+}
+/**
+ * ipr_free_all_resources - Free all allocated resources for an adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function frees all allocated resources for the
+ * specified adapter.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct pci_dev *pdev = ioa_cfg->pdev;
+
+ ENTER;
+ ipr_free_irqs(ioa_cfg);
+ if (ioa_cfg->reset_work_q)
+ destroy_workqueue(ioa_cfg->reset_work_q);
iounmap(ioa_cfg->hdw_dma_regs);
pci_release_regions(pdev);
ipr_free_mem(ioa_cfg);
@@ -9823,6 +10071,14 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
(dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
ioa_cfg->needs_warm_reset = 1;
ioa_cfg->reset = ipr_reset_slot_reset;
+
+ ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
+ WQ_MEM_RECLAIM, host->host_no);
+
+ if (!ioa_cfg->reset_work_q) {
+ dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
+ goto out_free_irq;
+ }
} else
ioa_cfg->reset = ipr_reset_start_bist;
@@ -9834,6 +10090,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
out:
return rc;
+out_free_irq:
+ ipr_free_irqs(ioa_cfg);
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
@@ -9914,6 +10172,8 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
flush_work(&ioa_cfg->work_q);
+ if (ioa_cfg->reset_work_q)
+ flush_workqueue(ioa_cfg->reset_work_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -10036,6 +10296,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
unsigned long lock_flags = 0;
+ enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -10051,9 +10312,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
- ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+ if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
+ shutdown_type = IPR_SHUTDOWN_QUIESCE;
+
+ ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
+ ipr_free_irqs(ioa_cfg);
+ pci_disable_device(ioa_cfg->pdev);
+ }
}
static struct pci_device_id ipr_pci_table[] = {
@@ -10211,7 +10479,8 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
+ (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
continue;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index ec03b42fa2b9..47412cf4eaac 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -39,8 +39,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.6.0"
-#define IPR_DRIVER_DATE "(November 16, 2012)"
+#define IPR_DRIVER_VERSION "2.6.1"
+#define IPR_DRIVER_DATE "(March 12, 2015)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -138,6 +138,7 @@
#define IPR_IOASC_BUS_WAS_RESET 0x06290000
#define IPR_IOASC_BUS_WAS_RESET_BY_OTHER 0x06298000
#define IPR_IOASC_ABORTED_CMD_TERM_BY_HOST 0x0B5A0000
+#define IPR_IOASC_IR_NON_OPTIMIZED 0x05258200
#define IPR_FIRST_DRIVER_IOASC 0x10000000
#define IPR_IOASC_IOA_WAS_RESET 0x10000001
@@ -196,6 +197,8 @@
/*
* Adapter Commands
*/
+#define IPR_CANCEL_REQUEST 0xC0
+#define IPR_CANCEL_64BIT_IOARCB 0x01
#define IPR_QUERY_RSRC_STATE 0xC2
#define IPR_RESET_DEVICE 0xC3
#define IPR_RESET_TYPE_SELECT 0x80
@@ -222,6 +225,7 @@
#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ)
#define IPR_DUAL_IOA_ABBR_SHUTDOWN_TO (2 * 60 * HZ)
#define IPR_DEVICE_RESET_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
+#define IPR_CANCEL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
@@ -518,6 +522,7 @@ struct ipr_cmd_pkt {
#define IPR_RQTYPE_IOACMD 0x01
#define IPR_RQTYPE_HCAM 0x02
#define IPR_RQTYPE_ATA_PASSTHRU 0x04
+#define IPR_RQTYPE_PIPE 0x05
u8 reserved2;
@@ -1271,6 +1276,7 @@ struct ipr_resource_entry {
u8 del_from_ml:1;
u8 resetting_device:1;
u8 reset_occurred:1;
+ u8 raw_mode:1;
u32 bus; /* AKA channel */
u32 target; /* AKA id */
@@ -1402,7 +1408,8 @@ enum ipr_shutdown_type {
IPR_SHUTDOWN_NORMAL = 0x00,
IPR_SHUTDOWN_PREPARE_FOR_NORMAL = 0x40,
IPR_SHUTDOWN_ABBREV = 0x80,
- IPR_SHUTDOWN_NONE = 0x100
+ IPR_SHUTDOWN_NONE = 0x100,
+ IPR_SHUTDOWN_QUIESCE = 0x101,
};
struct ipr_trace_entry {
@@ -1536,6 +1543,7 @@ struct ipr_ioa_cfg {
u8 saved_mode_page_len;
struct work_struct work_q;
+ struct workqueue_struct *reset_work_q;
wait_queue_head_t reset_wait_q;
wait_queue_head_t msi_wait_q;
@@ -1587,6 +1595,7 @@ struct ipr_cmnd {
struct ata_queued_cmd *qc;
struct completion completion;
struct timer_list timer;
+ struct work_struct work;
void (*fast_done) (struct ipr_cmnd *);
void (*done) (struct ipr_cmnd *);
int (*job_step) (struct ipr_cmnd *);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 434e9037908e..9b81a34d7449 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -413,6 +413,9 @@ struct lpfc_vport {
uint32_t cfg_fcp_class;
uint32_t cfg_use_adisc;
uint32_t cfg_fdmi_on;
+#define LPFC_FDMI_SUPPORT 1 /* bit 0 - FDMI supported? */
+#define LPFC_FDMI_REG_DELAY 2 /* bit 1 - 60 sec registration delay */
+#define LPFC_FDMI_ALL_ATTRIB 4 /* bit 2 - register ALL attributes? */
uint32_t cfg_discovery_threads;
uint32_t cfg_log_verbose;
uint32_t cfg_max_luns;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2f9b96826ac0..d65bd178d131 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -406,8 +406,13 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ char fwrev[FW_REV_STR_SIZE];
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
- return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
}
/**
@@ -4568,12 +4573,18 @@ LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
/*
# lpfc_fdmi_on: controls FDMI support.
-# 0 = no FDMI support
-# 1 = support FDMI without attribute of hostname
-# 2 = support FDMI with attribute of hostname
-# Value range [0,2]. Default value is 0.
+# Set NOT Set
+# bit 0 = FDMI support no FDMI support
+# LPFC_FDMI_SUPPORT just turns basic support on/off
+# bit 1 = Register delay no register delay (60 seconds)
+# LPFC_FDMI_REG_DELAY 60 sec registration delay after FDMI login
+# bit 2 = All attributes Use a attribute subset
+# LPFC_FDMI_ALL_ATTRIB applies to both port and HBA attributes
+# Port attrutes subset: 1 thru 6 OR all: 1 thru 0xd 0x101 0x102 0x103
+# HBA attributes subset: 1 thru 0xb OR all: 1 thru 0xc
+# Value range [0,7]. Default value is 0.
*/
-LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
+LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 7, "Enable FDMI support");
/*
# Specifies the maximum number of ELS cmds we can have outstanding (for
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index a7bf359aa0c6..b705068079c0 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -3194,6 +3194,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
cmdiocbq->vport = phba->pport;
cmdiocbq->iocb_cmpl = NULL;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
@@ -4179,6 +4180,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
switch (opcode) {
case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
+ case COMN_OPCODE_GET_PROFILE_CONFIG:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 928ef609f363..e557bcdbcb19 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2010-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -246,6 +246,7 @@ struct lpfc_sli_config_emb1_subsys {
#define lpfc_emb1_subcmnd_subsys_WORD word6
/* Subsystem COMN (0x01) OpCodes */
#define SLI_CONFIG_SUBSYS_COMN 0x01
+#define COMN_OPCODE_GET_PROFILE_CONFIG 0xA4
#define COMN_OPCODE_READ_OBJECT 0xAB
#define COMN_OPCODE_WRITE_OBJECT 0xAC
#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 00665a5d92fd..587e3e962f2b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -284,6 +284,7 @@ void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
@@ -354,6 +355,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
+extern struct scsi_host_template lpfc_template_s3;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 61a32cd23f79..af129966bd11 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -555,7 +555,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
}
}
}
- if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
+ if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
goto nsout1;
Cnt -= sizeof (uint32_t);
}
@@ -641,7 +641,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
- be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+ cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0208 NameServer Rsp Data: x%x\n",
vport->fc_flag);
@@ -1074,11 +1074,48 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
- n = snprintf(symbol, size, "Emulex %s FV%s DV%s",
- vport->phba->ModelName, fwrev, lpfc_release_version);
+ n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
+
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " FV%s", fwrev);
+
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " DV%s", lpfc_release_version);
+
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " HN:%s", init_utsname()->nodename);
+
+ /* Note :- OS name is "Linux" */
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " OS:%s", init_utsname()->sysname);
+
return n;
}
+static uint32_t
+lpfc_find_map_node(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct Scsi_Host *shost;
+ uint32_t cnt = 0;
+
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_type & NLP_FABRIC)
+ continue;
+ if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) ||
+ (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
+ cnt++;
+ }
+ spin_unlock_irq(shost->host_lock);
+ return cnt;
+}
+
/*
* lpfc_ns_cmd
* Description:
@@ -1177,7 +1214,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
switch (cmdcode) {
case SLI_CTNS_GID_FT:
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_GID_FT);
+ cpu_to_be16(SLI_CTNS_GID_FT);
CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
if (vport->port_state < LPFC_NS_QRY)
vport->port_state = LPFC_NS_QRY;
@@ -1188,7 +1225,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_GFF_ID:
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_GFF_ID);
+ cpu_to_be16(SLI_CTNS_GFF_ID);
CtReq->un.gff.PortId = cpu_to_be32(context);
cmpl = lpfc_cmpl_ct_cmd_gff_id;
break;
@@ -1196,7 +1233,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_RFT_ID:
vport->ct_flags &= ~FC_CT_RFT_ID;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RFT_ID);
+ cpu_to_be16(SLI_CTNS_RFT_ID);
CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
CtReq->un.rft.fcpReg = 1;
cmpl = lpfc_cmpl_ct_cmd_rft_id;
@@ -1205,7 +1242,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_RNN_ID:
vport->ct_flags &= ~FC_CT_RNN_ID;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RNN_ID);
+ cpu_to_be16(SLI_CTNS_RNN_ID);
CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID);
memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
@@ -1215,7 +1252,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_RSPN_ID:
vport->ct_flags &= ~FC_CT_RSPN_ID;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RSPN_ID);
+ cpu_to_be16(SLI_CTNS_RSPN_ID);
CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID);
size = sizeof(CtReq->un.rspn.symbname);
CtReq->un.rspn.len =
@@ -1226,7 +1263,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_RSNN_NN:
vport->ct_flags &= ~FC_CT_RSNN_NN;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RSNN_NN);
+ cpu_to_be16(SLI_CTNS_RSNN_NN);
memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
size = sizeof(CtReq->un.rsnn.symbname);
@@ -1238,14 +1275,14 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_DA_ID:
/* Implement DA_ID Nameserver request */
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_DA_ID);
+ cpu_to_be16(SLI_CTNS_DA_ID);
CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID);
cmpl = lpfc_cmpl_ct_cmd_da_id;
break;
case SLI_CTNS_RFF_ID:
vport->ct_flags &= ~FC_CT_RFF_ID;
CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RFF_ID);
+ cpu_to_be16(SLI_CTNS_RFF_ID);
CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
CtReq->un.rff.fbits = FC4_FEATURE_INIT;
CtReq->un.rff.type_code = FC_TYPE_FCP;
@@ -1299,7 +1336,6 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
uint32_t latt;
latt = lpfc_els_chk_latt(vport);
-
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"FDMI cmpl: status:x%x/x%x latt:%d",
irsp->ulpStatus, irsp->un.ulpWord[4], latt);
@@ -1310,29 +1346,49 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"ulpStatus: x%x, rid x%x\n",
be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
irsp->un.ulpWord[4]);
- lpfc_ct_free_iocb(phba, cmdiocb);
- return;
+ goto fail_out;
}
ndlp = lpfc_findnode_did(vport, FDMI_DID);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
goto fail_out;
- if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0220 FDMI rsp failed Data: x%x\n",
be16_to_cpu(fdmi_cmd));
}
+fail_out:
+ lpfc_ct_free_iocb(phba, cmdiocb);
+}
+
+static void
+lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_dmabuf *inp = cmdiocb->context1;
+ struct lpfc_sli_ct_request *CTcmd = inp->virt;
+ uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
+ struct lpfc_nodelist *ndlp;
+
+ lpfc_cmpl_ct_cmd_fdmi(phba, cmdiocb, rspiocb);
+
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ /*
+ * Need to cycle thru FDMI registration for discovery
+ * DHBA -> DPRT -> RHBA -> RPA
+ */
switch (be16_to_cpu(fdmi_cmd)) {
case SLI_MGMT_RHBA:
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA);
break;
- case SLI_MGMT_RPA:
- break;
-
case SLI_MGMT_DHBA:
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT);
break;
@@ -1341,12 +1397,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
break;
}
-
-fail_out:
- lpfc_ct_free_iocb(phba, cmdiocb);
- return;
}
+
int
lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
{
@@ -1355,18 +1408,28 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
struct lpfc_sli_ct_request *CtReq;
struct ulp_bde64 *bpl;
uint32_t size;
- REG_HBA *rh;
- PORT_ENTRY *pe;
- REG_PORT_ATTRIBUTE *pab;
- ATTRIBUTE_BLOCK *ab;
- ATTRIBUTE_ENTRY *ae;
+ uint32_t rsp_size;
+ struct lpfc_fdmi_reg_hba *rh;
+ struct lpfc_fdmi_port_entry *pe;
+ struct lpfc_fdmi_reg_portattr *pab = NULL;
+ struct lpfc_fdmi_attr_block *ab = NULL;
+ struct lpfc_fdmi_attr_entry *ae;
+ struct lpfc_fdmi_attr_def *ad;
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+ if (ndlp == NULL) {
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return 0;
+ cmpl = lpfc_cmpl_ct_cmd_fdmi; /* cmd interface */
+ } else {
+ cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
+ }
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!mp)
goto fdmi_cmd_exit;
@@ -1375,7 +1438,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
goto fdmi_cmd_free_mp;
/* Allocate buffer for Buffer ptr list */
- bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp)
goto fdmi_cmd_free_mpvirt;
@@ -1390,205 +1453,330 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0218 FDMI Request Data: x%x x%x x%x\n",
vport->fc_flag, vport->port_state, cmdcode);
- CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+ CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+ /* First populate the CT_IU preamble */
memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
CtReq->RevisionId.bits.InId = 0;
CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
+
+ CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
+ rsp_size = LPFC_BPL_SIZE;
size = 0;
+ /* Next fill in the specific FDMI cmd information */
switch (cmdcode) {
+ case SLI_MGMT_RHAT:
case SLI_MGMT_RHBA:
{
lpfc_vpd_t *vp = &phba->vpd;
uint32_t i, j, incr;
- int len;
+ int len = 0;
- CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_MGMT_RHBA);
- CtReq->CommandResponse.bits.Size = 0;
- rh = (REG_HBA *) & CtReq->un.PortID;
+ rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID;
+ /* HBA Identifier */
memcpy(&rh->hi.PortName, &vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
- /* One entry (port) per adapter */
- rh->rpl.EntryCnt = be32_to_cpu(1);
- memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
-
- /* point to the HBA attribute block */
- size = 2 * sizeof (struct lpfc_name) + FOURBYTES;
- ab = (ATTRIBUTE_BLOCK *) ((uint8_t *) rh + size);
+ sizeof(struct lpfc_name));
+
+ if (cmdcode == SLI_MGMT_RHBA) {
+ /* Registered Port List */
+ /* One entry (port) per adapter */
+ rh->rpl.EntryCnt = cpu_to_be32(1);
+ memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+
+ /* point to the HBA attribute block */
+ size = 2 * sizeof(struct lpfc_name) +
+ FOURBYTES;
+ } else {
+ size = sizeof(struct lpfc_name);
+ }
+ ab = (struct lpfc_fdmi_attr_block *)
+ ((uint8_t *)rh + size);
ab->EntryCnt = 0;
+ size += FOURBYTES;
- /* Point to the beginning of the first HBA attribute
- entry */
+ /*
+ * Point to beginning of first HBA attribute entry
+ */
/* #1 HBA attribute entry */
- size += FOURBYTES;
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
- + sizeof (struct lpfc_name));
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RHBA_NODENAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
- sizeof (struct lpfc_name));
+ sizeof(struct lpfc_name));
ab->EntryCnt++;
- size += FOURBYTES + sizeof (struct lpfc_name);
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #2 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
- strncpy(ae->un.Manufacturer, "Emulex Corporation", 64);
- len = strlen(ae->un.Manufacturer);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.Manufacturer));
+ ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER);
+ strncpy(ae->un.Manufacturer, "Emulex Corporation",
+ sizeof(ae->un.Manufacturer));
+ len = strnlen(ae->un.Manufacturer,
+ sizeof(ae->un.Manufacturer));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #3 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
- strncpy(ae->un.SerialNumber, phba->SerialNumber, 64);
- len = strlen(ae->un.SerialNumber);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.SerialNumber));
+ ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER);
+ strncpy(ae->un.SerialNumber, phba->SerialNumber,
+ sizeof(ae->un.SerialNumber));
+ len = strnlen(ae->un.SerialNumber,
+ sizeof(ae->un.SerialNumber));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #4 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(MODEL);
- strncpy(ae->un.Model, phba->ModelName, 256);
- len = strlen(ae->un.Model);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.Model));
+ ad->AttrType = cpu_to_be16(RHBA_MODEL);
+ strncpy(ae->un.Model, phba->ModelName,
+ sizeof(ae->un.Model));
+ len = strnlen(ae->un.Model, sizeof(ae->un.Model));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #5 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
- strncpy(ae->un.ModelDescription, phba->ModelDesc, 256);
- len = strlen(ae->un.ModelDescription);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.ModelDescription));
+ ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION);
+ strncpy(ae->un.ModelDescription, phba->ModelDesc,
+ sizeof(ae->un.ModelDescription));
+ len = strnlen(ae->un.ModelDescription,
+ sizeof(ae->un.ModelDescription));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + 8) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #6 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(HARDWARE_VERSION);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 8);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, 8);
+ ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 8);
/* Convert JEDEC ID to ascii for hardware version */
incr = vp->rev.biuRev;
for (i = 0; i < 8; i++) {
j = (incr & 0xf);
if (j <= 9)
ae->un.HardwareVersion[7 - i] =
- (char)((uint8_t) 0x30 +
- (uint8_t) j);
+ (char)((uint8_t)0x30 +
+ (uint8_t)j);
else
ae->un.HardwareVersion[7 - i] =
- (char)((uint8_t) 0x61 +
- (uint8_t) (j - 10));
+ (char)((uint8_t)0x61 +
+ (uint8_t)(j - 10));
incr = (incr >> 4);
}
ab->EntryCnt++;
size += FOURBYTES + 8;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #7 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
- strncpy(ae->un.DriverVersion,
- lpfc_release_version, 256);
- len = strlen(ae->un.DriverVersion);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.DriverVersion));
+ ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION);
+ strncpy(ae->un.DriverVersion, lpfc_release_version,
+ sizeof(ae->un.DriverVersion));
+ len = strnlen(ae->un.DriverVersion,
+ sizeof(ae->un.DriverVersion));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #8 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
- strncpy(ae->un.OptionROMVersion,
- phba->OptionROMVersion, 256);
- len = strlen(ae->un.OptionROMVersion);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.OptionROMVersion));
+ ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION);
+ strncpy(ae->un.OptionROMVersion, phba->OptionROMVersion,
+ sizeof(ae->un.OptionROMVersion));
+ len = strnlen(ae->un.OptionROMVersion,
+ sizeof(ae->un.OptionROMVersion));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #9 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(FIRMWARE_VERSION);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.FirmwareVersion));
+ ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION);
lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion,
1);
- len = strlen(ae->un.FirmwareVersion);
+ len = strnlen(ae->un.FirmwareVersion,
+ sizeof(ae->un.FirmwareVersion));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #10 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
- sprintf(ae->un.OsNameVersion, "%s %s %s",
- init_utsname()->sysname,
- init_utsname()->release,
- init_utsname()->version);
- len = strlen(ae->un.OsNameVersion);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.OsNameVersion));
+ ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION);
+ snprintf(ae->un.OsNameVersion,
+ sizeof(ae->un.OsNameVersion),
+ "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version);
+ len = strnlen(ae->un.OsNameVersion,
+ sizeof(ae->un.OsNameVersion));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
ab->EntryCnt++;
size += FOURBYTES + len;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
/* #11 HBA attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
- ae->ad.bits.AttrType = be16_to_cpu(MAX_CT_PAYLOAD_LEN);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
- ae->un.MaxCTPayloadLen = (65 * 4096);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType =
+ cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ ae->un.MaxCTPayloadLen = cpu_to_be32(LPFC_MAX_CT_SIZE);
ab->EntryCnt++;
size += FOURBYTES + 4;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
- ab->EntryCnt = be32_to_cpu(ab->EntryCnt);
+ /*
+ * Currently switches don't seem to support the
+ * following extended HBA attributes.
+ */
+ if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB))
+ goto hba_out;
+
+ /* #12 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.NodeSymName));
+ ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME);
+ len = lpfc_vport_symbolic_node_name(vport,
+ ae->un.NodeSymName, sizeof(ae->un.NodeSymName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+hba_out:
+ ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
/* Total size */
size = GID_REQUEST_SZ - 4 + size;
}
break;
+ case SLI_MGMT_RPRT:
case SLI_MGMT_RPA:
{
lpfc_vpd_t *vp;
struct serv_parm *hsp;
- int len;
+ int len = 0;
vp = &phba->vpd;
- CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_MGMT_RPA);
- CtReq->CommandResponse.bits.Size = 0;
- pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
- size = sizeof (struct lpfc_name) + FOURBYTES;
- memcpy((uint8_t *) & pab->PortName,
- (uint8_t *) & vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
+ if (cmdcode == SLI_MGMT_RPRT) {
+ rh = (struct lpfc_fdmi_reg_hba *)
+ &CtReq->un.PortID;
+ /* HBA Identifier */
+ memcpy(&rh->hi.PortName,
+ &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ pab = (struct lpfc_fdmi_reg_portattr *)
+ &rh->rpl.EntryCnt;
+ } else
+ pab = (struct lpfc_fdmi_reg_portattr *)
+ &CtReq->un.PortID;
+ size = sizeof(struct lpfc_name) + FOURBYTES;
+ memcpy((uint8_t *)&pab->PortName,
+ (uint8_t *)&vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
pab->ab.EntryCnt = 0;
/* #1 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_FC4_TYPES);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 32);
- ae->un.SupportFC4Types[2] = 1;
- ae->un.SupportFC4Types[7] = 1;
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.FC4Types));
+ ad->AttrType =
+ cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 32);
+ ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */
+ ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */
+ ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */
pab->ab.EntryCnt++;
size += FOURBYTES + 32;
/* #2 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_SPEED);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
-
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
ae->un.SupportSpeed = 0;
if (phba->lmt & LMT_16Gb)
ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
@@ -1602,15 +1790,19 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
ae->un.SupportSpeed |= HBA_PORTSPEED_2GBIT;
if (phba->lmt & LMT_1Gb)
ae->un.SupportSpeed |= HBA_PORTSPEED_1GBIT;
+ ae->un.SupportSpeed =
+ cpu_to_be32(ae->un.SupportSpeed);
pab->ab.EntryCnt++;
size += FOURBYTES + 4;
/* #3 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
- switch(phba->fc_linkspeed) {
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ switch (phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
break;
@@ -1633,93 +1825,273 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
break;
}
+ ae->un.PortSpeed = cpu_to_be32(ae->un.PortSpeed);
pab->ab.EntryCnt++;
size += FOURBYTES + 4;
/* #4 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
- hsp = (struct serv_parm *) & vport->fc_sparam;
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ hsp = (struct serv_parm *)&vport->fc_sparam;
ae->un.MaxFrameSize =
- (((uint32_t) hsp->cmn.
- bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
+ (((uint32_t)hsp->cmn.
+ bbRcvSizeMsb) << 8) | (uint32_t)hsp->cmn.
bbRcvSizeLsb;
+ ae->un.MaxFrameSize =
+ cpu_to_be32(ae->un.MaxFrameSize);
pab->ab.EntryCnt++;
size += FOURBYTES + 4;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
/* #5 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
- ae->ad.bits.AttrType = be16_to_cpu(OS_DEVICE_NAME);
- strcpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME);
- len = strlen((char *)ae->un.OsDeviceName);
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.OsDeviceName));
+ ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME);
+ strncpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME,
+ sizeof(ae->un.OsDeviceName));
+ len = strnlen((char *)ae->un.OsDeviceName,
+ sizeof(ae->un.OsDeviceName));
len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
pab->ab.EntryCnt++;
size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #6 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.HostName));
+ snprintf(ae->un.HostName, sizeof(ae->un.HostName), "%s",
+ init_utsname()->nodename);
+ ad->AttrType = cpu_to_be16(RPRT_HOST_NAME);
+ len = strnlen(ae->un.HostName,
+ sizeof(ae->un.HostName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen =
+ cpu_to_be16(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + sizeof(struct lpfc_name)) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
- if (vport->cfg_fdmi_on == 2) {
- /* #6 Port attribute entry */
- ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab +
- size);
- ae->ad.bits.AttrType = be16_to_cpu(HOST_NAME);
- sprintf(ae->un.HostName, "%s",
- init_utsname()->nodename);
- len = strlen(ae->un.HostName);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- ae->ad.bits.AttrLen =
- be16_to_cpu(FOURBYTES + len);
- pab->ab.EntryCnt++;
- size += FOURBYTES + len;
- }
-
- pab->ab.EntryCnt = be32_to_cpu(pab->ab.EntryCnt);
+ /*
+ * Currently switches don't seem to support the
+ * following extended Port attributes.
+ */
+ if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB))
+ goto port_out;
+
+ /* #7 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RPRT_NODENAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + sizeof(struct lpfc_name)) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #8 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RPRT_PORTNAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.PortName, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #9 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.NodeSymName));
+ ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME);
+ len = lpfc_vport_symbolic_port_name(vport,
+ ae->un.NodeSymName, sizeof(ae->un.NodeSymName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #10 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE);
+ ae->un.PortState = 0;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #11 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS);
+ ae->un.SupportClass =
+ cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + sizeof(struct lpfc_name)) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #12 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RPRT_FABRICNAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.FabricName, &vport->fabric_nodename,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #13 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.FC4Types));
+ ad->AttrType =
+ cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 32);
+ ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */
+ ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */
+ ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 32;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #257 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_STATE);
+ ae->un.PortState = 0;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #258 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_DISC_PORT);
+ ae->un.PortState = lpfc_find_map_node(vport);
+ ae->un.PortState = cpu_to_be32(ae->un.PortState);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #259 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_ID);
+ ae->un.PortId = cpu_to_be32(vport->fc_myDID);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+port_out:
+ pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
/* Total size */
size = GID_REQUEST_SZ - 4 + size;
}
break;
+ case SLI_MGMT_GHAT:
+ case SLI_MGMT_GRPL:
+ rsp_size = FC_MAX_NS_RSP;
case SLI_MGMT_DHBA:
- CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DHBA);
- CtReq->CommandResponse.bits.Size = 0;
- pe = (PORT_ENTRY *) & CtReq->un.PortID;
- memcpy((uint8_t *) & pe->PortName,
- (uint8_t *) & vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
- size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+ case SLI_MGMT_DHAT:
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ memcpy((uint8_t *)&pe->PortName,
+ (uint8_t *)&vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
break;
+ case SLI_MGMT_GPAT:
+ case SLI_MGMT_GPAS:
+ rsp_size = FC_MAX_NS_RSP;
case SLI_MGMT_DPRT:
- CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DPRT);
- CtReq->CommandResponse.bits.Size = 0;
- pe = (PORT_ENTRY *) & CtReq->un.PortID;
- memcpy((uint8_t *) & pe->PortName,
- (uint8_t *) & vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
- size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+ case SLI_MGMT_DPA:
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ memcpy((uint8_t *)&pe->PortName,
+ (uint8_t *)&vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
+ break;
+ case SLI_MGMT_GRHL:
+ size = GID_REQUEST_SZ - 4;
break;
+ default:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+ "0298 FDMI cmdcode x%x not supported\n",
+ cmdcode);
+ goto fdmi_cmd_free_bmpvirt;
}
+ CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
- bpl = (struct ulp_bde64 *) bmp->virt;
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
- bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
bpl->tus.f.bdeFlags = 0;
bpl->tus.f.bdeSize = size;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
-
- cmpl = lpfc_cmpl_ct_cmd_fdmi;
- /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+ /*
+ * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
* to hold ndlp reference for the corresponding callback function.
*/
- if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
return 0;
- /* Decrement ndlp reference count to release ndlp reference held
+ /*
+ * Decrement ndlp reference count to release ndlp reference held
* for the failed command's callback function.
*/
lpfc_nlp_put(ndlp);
+fdmi_cmd_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
fdmi_cmd_free_bmp:
kfree(bmp);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5633e7dadc08..513edcb0c2da 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c66088d0fd2a..851e8efe364e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2243,8 +2243,7 @@ lpfc_adisc_done(struct lpfc_vport *vport)
*/
if (vport->port_state < LPFC_VPORT_READY) {
/* If we get here, there is nothing to ADISC */
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_clear_la(phba, vport);
if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
vport->num_disc_nodes = 0;
/* go thru NPR list, issue ELS PLOGIs */
@@ -3338,7 +3337,11 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FLOGI retry policy */
retry = 1;
/* retry FLOGI forever */
- maxretry = 0;
+ if (phba->link_flag != LS_LOOPBACK_MODE)
+ maxretry = 0;
+ else
+ maxretry = 2;
+
if (cmdiocb->retry >= 100)
delay = 5000;
else if (cmdiocb->retry >= 32)
@@ -3701,6 +3704,11 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
if (NLP_CHK_NODE_ACT(ndlp)) {
lpfc_nlp_put(ndlp);
/* This is the end of the default RPI cleanup logic for
@@ -5198,7 +5206,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
port_state = vport->port_state;
vport->fc_flag |= FC_PT2PT;
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- vport->port_state = LPFC_FLOGI;
spin_unlock_irq(shost->host_lock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3311 Rcv Flogi PS x%x new PS x%x "
@@ -7173,7 +7180,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return;
}
- if (vport->cfg_fdmi_on) {
+ if (vport->cfg_fdmi_on & LPFC_FDMI_SUPPORT) {
/* If this is the first time, allocate an ndlp and initialize
* it. Otherwise, make sure the node is enabled and then do the
* login.
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 5452f1f4220e..2500f15d437f 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -3439,6 +3439,11 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@@ -3855,6 +3860,11 @@ out:
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
@@ -4250,8 +4260,15 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0008 rpi:%x DID:%x flg:%x refcnt:%d "
+ "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+ }
if (state != NLP_STE_UNUSED_NODE)
@@ -4276,9 +4293,12 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
- lpfc_nlp_put(ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
+ } else {
+ lpfc_nlp_put(ndlp);
+ }
return;
}
@@ -4515,7 +4535,17 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mbox->context1 = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else {
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (!(vport->load_flag & FC_UNLOADING)) &&
+ (bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)) {
+ mbox->context1 = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl =
+ lpfc_sli4_unreg_rpi_cmpl_clr;
+ } else
+ mbox->mbox_cmpl =
+ lpfc_sli_def_mbox_cmpl;
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4741,6 +4771,11 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
!= NULL) {
rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
@@ -5070,8 +5105,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
!(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_clear_la(phba, vport);
lpfc_issue_reg_vpi(phba, vport);
return;
}
@@ -5082,8 +5116,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
*/
if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
/* If we get here, there is nothing to ADISC */
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_clear_la(phba, vport);
if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
vport->num_disc_nodes = 0;
@@ -5484,18 +5517,22 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
-
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
/*
* Start issuing Fabric-Device Management Interface (FDMI) command to
* 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
* fdmi-on=2 (supporting RPA/hostnmae)
*/
- if (vport->cfg_fdmi_on == 1)
- lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
- else
+ if (vport->cfg_fdmi_on & LPFC_FDMI_REG_DELAY)
mod_timer(&vport->fc_fdmitmo,
jiffies + msecs_to_jiffies(1000 * 60));
+ else
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
/* decrement the node reference count held for this callback
* function.
@@ -5650,6 +5687,13 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0007 rpi:%x DID:%x flg:%x refcnt:%d "
+ "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
GFP_KERNEL);
@@ -5684,9 +5728,9 @@ lpfc_nlp_release(struct kref *kref)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0279 lpfc_nlp_release: ndlp:x%p did %x "
- "usgmap:x%x refcnt:%d\n",
+ "usgmap:x%x refcnt:%d rpi:%x\n",
(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi);
/* remove ndlp from action. */
lpfc_nlp_remove(ndlp->vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 236259252379..37beb9dc1311 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -107,6 +107,7 @@ struct lpfc_sli_ct_request {
uint8_t ReasonCode;
uint8_t Explanation;
uint8_t VendorUnique;
+#define LPFC_CT_PREAMBLE 20 /* Size of CTReq + 4 up to here */
union {
uint32_t PortID;
@@ -170,6 +171,8 @@ struct lpfc_sli_ct_request {
} un;
};
+#define LPFC_MAX_CT_SIZE (60 * 4096)
+
#define SLI_CT_REVISION 1
#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gid))
@@ -1007,78 +1010,45 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */
} un;
} ELS_PKT;
-/*
- * FDMI
- * HBA MAnagement Operations Command Codes
- */
-#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
-#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
-#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
-#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
-#define SLI_MGMT_RHBA 0x200 /* Register HBA */
-#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */
-#define SLI_MGMT_RPRT 0x210 /* Register Port */
-#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
-#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
-#define SLI_MGMT_DPRT 0x310 /* De-register Port */
+/******** FDMI ********/
-/*
- * Management Service Subtypes
- */
-#define SLI_CT_FDMI_Subtypes 0x10
+/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
+#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
/*
- * HBA Management Service Reject Code
+ * Registered Port List Format
*/
-#define REJECT_CODE 0x9 /* Unable to perform command request */
+struct lpfc_fdmi_reg_port_list {
+ uint32_t EntryCnt;
+ uint32_t pe; /* Variable-length array */
+};
-/*
- * HBA Management Service Reject Reason Code
- * Please refer to the Reason Codes above
- */
-/*
- * HBA Attribute Types
- */
-#define NODE_NAME 0x1
-#define MANUFACTURER 0x2
-#define SERIAL_NUMBER 0x3
-#define MODEL 0x4
-#define MODEL_DESCRIPTION 0x5
-#define HARDWARE_VERSION 0x6
-#define DRIVER_VERSION 0x7
-#define OPTION_ROM_VERSION 0x8
-#define FIRMWARE_VERSION 0x9
-#define OS_NAME_VERSION 0xa
-#define MAX_CT_PAYLOAD_LEN 0xb
+/* Definitions for HBA / Port attribute entries */
-/*
- * Port Attrubute Types
- */
-#define SUPPORTED_FC4_TYPES 0x1
-#define SUPPORTED_SPEED 0x2
-#define PORT_SPEED 0x3
-#define MAX_FRAME_SIZE 0x4
-#define OS_DEVICE_NAME 0x5
-#define HOST_NAME 0x6
-
-union AttributesDef {
+struct lpfc_fdmi_attr_def { /* Defined in TLV format */
/* Structure is in Big Endian format */
- struct {
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- } bits;
- uint32_t word;
+ uint32_t AttrType:16;
+ uint32_t AttrLen:16;
+ uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
};
-/*
- * HBA Attribute Entry (8 - 260 bytes)
- */
-typedef struct {
- union AttributesDef ad;
+/* Attribute Entry */
+struct lpfc_fdmi_attr_entry {
union {
uint32_t VendorSpecific;
+ uint32_t SupportClass;
+ uint32_t SupportSpeed;
+ uint32_t PortSpeed;
+ uint32_t MaxFrameSize;
+ uint32_t MaxCTPayloadLen;
+ uint32_t PortState;
+ uint32_t PortId;
+ struct lpfc_name NodeName;
+ struct lpfc_name PortName;
+ struct lpfc_name FabricName;
+ uint8_t FC4Types[32];
uint8_t Manufacturer[64];
uint8_t SerialNumber[64];
uint8_t Model[256];
@@ -1087,97 +1057,115 @@ typedef struct {
uint8_t DriverVersion[256];
uint8_t OptionROMVersion[256];
uint8_t FirmwareVersion[256];
- struct lpfc_name NodeName;
- uint8_t SupportFC4Types[32];
- uint32_t SupportSpeed;
- uint32_t PortSpeed;
- uint32_t MaxFrameSize;
+ uint8_t OsHostName[256];
+ uint8_t NodeSymName[256];
uint8_t OsDeviceName[256];
uint8_t OsNameVersion[256];
- uint32_t MaxCTPayloadLen;
uint8_t HostName[256];
} un;
-} ATTRIBUTE_ENTRY;
+};
+
+#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
/*
* HBA Attribute Block
*/
-typedef struct {
- uint32_t EntryCnt; /* Number of HBA attribute entries */
- ATTRIBUTE_ENTRY Entry; /* Variable-length array */
-} ATTRIBUTE_BLOCK;
+struct lpfc_fdmi_attr_block {
+ uint32_t EntryCnt; /* Number of HBA attribute entries */
+ struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */
+};
/*
* Port Entry
*/
-typedef struct {
+struct lpfc_fdmi_port_entry {
struct lpfc_name PortName;
-} PORT_ENTRY;
+};
/*
* HBA Identifier
*/
-typedef struct {
+struct lpfc_fdmi_hba_ident {
struct lpfc_name PortName;
-} HBA_IDENTIFIER;
-
-/*
- * Registered Port List Format
- */
-typedef struct {
- uint32_t EntryCnt;
- PORT_ENTRY pe; /* Variable-length array */
-} REG_PORT_LIST;
+};
/*
* Register HBA(RHBA)
*/
-typedef struct {
- HBA_IDENTIFIER hi;
- REG_PORT_LIST rpl; /* variable-length array */
-/* ATTRIBUTE_BLOCK ab; */
-} REG_HBA;
+struct lpfc_fdmi_reg_hba {
+ struct lpfc_fdmi_hba_ident hi;
+ struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
+/* struct lpfc_fdmi_attr_block ab; */
+};
/*
* Register HBA Attributes (RHAT)
*/
-typedef struct {
+struct lpfc_fdmi_reg_hbaattr {
struct lpfc_name HBA_PortName;
- ATTRIBUTE_BLOCK ab;
-} REG_HBA_ATTRIBUTE;
+ struct lpfc_fdmi_attr_block ab;
+};
/*
* Register Port Attributes (RPA)
*/
-typedef struct {
+struct lpfc_fdmi_reg_portattr {
struct lpfc_name PortName;
- ATTRIBUTE_BLOCK ab;
-} REG_PORT_ATTRIBUTE;
+ struct lpfc_fdmi_attr_block ab;
+};
/*
- * Get Registered HBA List (GRHL) Accept Payload Format
+ * HBA MAnagement Operations Command Codes
*/
-typedef struct {
- uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */
- struct lpfc_name HBA_PortName; /* Variable-length array */
-} GRHL_ACC_PAYLOAD;
+#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
+#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
+#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
+#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
+#define SLI_MGMT_GPAS 0x120 /* Get Port Statistics */
+#define SLI_MGMT_RHBA 0x200 /* Register HBA */
+#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */
+#define SLI_MGMT_RPRT 0x210 /* Register Port */
+#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
+#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
+#define SLI_MGMT_DHAT 0x301 /* De-register HBA attributes */
+#define SLI_MGMT_DPRT 0x310 /* De-register Port */
+#define SLI_MGMT_DPA 0x311 /* De-register Port attributes */
/*
- * Get Registered Port List (GRPL) Accept Payload Format
+ * HBA Attribute Types
*/
-typedef struct {
- uint32_t RPL_Entry_Cnt; /* Number of Registered Port Entries */
- PORT_ENTRY Reg_Port_Entry[1]; /* Variable-length array */
-} GRPL_ACC_PAYLOAD;
+#define RHBA_NODENAME 0x1 /* 8 byte WWNN */
+#define RHBA_MANUFACTURER 0x2 /* 4 to 64 byte ASCII string */
+#define RHBA_SERIAL_NUMBER 0x3 /* 4 to 64 byte ASCII string */
+#define RHBA_MODEL 0x4 /* 4 to 256 byte ASCII string */
+#define RHBA_MODEL_DESCRIPTION 0x5 /* 4 to 256 byte ASCII string */
+#define RHBA_HARDWARE_VERSION 0x6 /* 4 to 256 byte ASCII string */
+#define RHBA_DRIVER_VERSION 0x7 /* 4 to 256 byte ASCII string */
+#define RHBA_OPTION_ROM_VERSION 0x8 /* 4 to 256 byte ASCII string */
+#define RHBA_FIRMWARE_VERSION 0x9 /* 4 to 256 byte ASCII string */
+#define RHBA_OS_NAME_VERSION 0xa /* 4 to 256 byte ASCII string */
+#define RHBA_MAX_CT_PAYLOAD_LEN 0xb /* 32-bit unsigned int */
+#define RHBA_SYM_NODENAME 0xc /* 4 to 256 byte ASCII string */
/*
- * Get Port Attributes (GPAT) Accept Payload Format
+ * Port Attrubute Types
*/
-
-typedef struct {
- ATTRIBUTE_BLOCK pab;
-} GPAT_ACC_PAYLOAD;
-
+#define RPRT_SUPPORTED_FC4_TYPES 0x1 /* 32 byte binary array */
+#define RPRT_SUPPORTED_SPEED 0x2 /* 32-bit unsigned int */
+#define RPRT_PORT_SPEED 0x3 /* 32-bit unsigned int */
+#define RPRT_MAX_FRAME_SIZE 0x4 /* 32-bit unsigned int */
+#define RPRT_OS_DEVICE_NAME 0x5 /* 4 to 256 byte ASCII string */
+#define RPRT_HOST_NAME 0x6 /* 4 to 256 byte ASCII string */
+#define RPRT_NODENAME 0x7 /* 8 byte WWNN */
+#define RPRT_PORTNAME 0x8 /* 8 byte WWNN */
+#define RPRT_SYM_PORTNAME 0x9 /* 4 to 256 byte ASCII string */
+#define RPRT_PORT_TYPE 0xa /* 32-bit unsigned int */
+#define RPRT_SUPPORTED_CLASS 0xb /* 32-bit unsigned int */
+#define RPRT_FABRICNAME 0xc /* 8 byte Fabric WWNN */
+#define RPRT_ACTIVE_FC4_TYPES 0xd /* 32 byte binary array */
+#define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */
+#define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */
+#define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */
/*
* Begin HBA configuration parameters.
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f432ec180cf8..1813c45946f4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -3085,6 +3085,9 @@ struct lpfc_acqe_link {
#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
+#define LPFC_ASYNC_LINK_SPEED_20GBPS 0x5
+#define LPFC_ASYNC_LINK_SPEED_25GBPS 0x6
+#define LPFC_ASYNC_LINK_SPEED_40GBPS 0x7
#define lpfc_acqe_link_duplex_SHIFT 16
#define lpfc_acqe_link_duplex_MASK 0x000000FF
#define lpfc_acqe_link_duplex_WORD word0
@@ -3166,7 +3169,7 @@ struct lpfc_acqe_fc_la {
#define lpfc_acqe_fc_la_speed_SHIFT 24
#define lpfc_acqe_fc_la_speed_MASK 0x000000FF
#define lpfc_acqe_fc_la_speed_WORD word0
-#define LPFC_FC_LA_SPEED_UNKOWN 0x0
+#define LPFC_FC_LA_SPEED_UNKNOWN 0x0
#define LPFC_FC_LA_SPEED_1G 0x1
#define LPFC_FC_LA_SPEED_2G 0x2
#define LPFC_FC_LA_SPEED_4G 0x4
@@ -3244,6 +3247,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
+#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
};
/*
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0b2c53af85c7..e8c8c1ecc1f5 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -1330,13 +1330,14 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
void
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
+ spin_lock_irq(&phba->hbalock);
+ phba->link_state = LPFC_HBA_ERROR;
+ spin_unlock_irq(&phba->hbalock);
+
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_offline(phba);
- lpfc_sli4_brdreset(phba);
lpfc_hba_down_post(phba);
- lpfc_sli4_post_status_check(phba);
lpfc_unblock_mgmt_io(phba);
- phba->link_state = LPFC_HBA_ERROR;
}
/**
@@ -1629,6 +1630,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t uerrlo_reg, uemasklo_reg;
uint32_t pci_rd_rc1, pci_rd_rc2;
bool en_rn_msg = true;
+ struct temp_event temp_event_data;
int rc;
/* If the pci channel is offline, ignore possible errors, since
@@ -1636,9 +1638,6 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
*/
if (pci_channel_offline(phba->pcidev))
return;
- /* If resets are disabled then leave the HBA alone and return */
- if (!phba->cfg_enable_hba_reset)
- return;
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
@@ -1654,6 +1653,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
return;
lpfc_sli4_offline_eratt(phba);
break;
+
case LPFC_SLI_INTF_IF_TYPE_2:
pci_rd_rc1 = lpfc_readl(
phba->sli4_hba.u.if_type2.STATUSregaddr,
@@ -1668,15 +1668,27 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
- /* TODO: Register for Overtemp async events. */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2889 Port Overtemperature event, "
- "taking port offline\n");
+ "taking port offline Data: x%x x%x\n",
+ reg_err1, reg_err2);
+
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_CRIT_TEMP;
+ temp_event_data.data = 0xFFFFFFFF;
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *)&temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+
spin_lock_irq(&phba->hbalock);
phba->over_temp_state = HBA_OVER_TEMP;
spin_unlock_irq(&phba->hbalock);
lpfc_sli4_offline_eratt(phba);
- break;
+ return;
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
@@ -1693,6 +1705,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3145 Port Down: Provisioning\n");
+ /* If resets are disabled then leave the HBA alone and return */
+ if (!phba->cfg_enable_hba_reset)
+ return;
+
/* Check port status register for function reset */
rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
en_rn_msg);
@@ -2759,9 +2775,19 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp))
+ if (NLP_CHK_NODE_ACT(ndlp)) {
ndlp->nlp_rpi =
lpfc_sli4_alloc_rpi(phba);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE,
+ "0009 rpi:%x DID:%x "
+ "flg:%x map:%x %p\n",
+ ndlp->nlp_rpi,
+ ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ ndlp->nlp_usg_map,
+ ndlp);
+ }
}
}
}
@@ -2925,8 +2951,18 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* RPI. Get a new RPI when the adapter port
* comes back online.
*/
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_printf_vlog(ndlp->vport,
+ KERN_INFO, LOG_NODE,
+ "0011 lpfc_offline: "
+ "ndlp:x%p did %x "
+ "usgmap:x%x rpi:%x\n",
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_usg_map,
+ ndlp->nlp_rpi);
+
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ }
lpfc_unreg_rpi(vports[i], ndlp);
}
}
@@ -3241,12 +3277,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
struct Scsi_Host *shost;
int error = 0;
- if (dev != &phba->pcidev->dev)
+ if (dev != &phba->pcidev->dev) {
shost = scsi_host_alloc(&lpfc_vport_template,
sizeof(struct lpfc_vport));
- else
- shost = scsi_host_alloc(&lpfc_template,
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ shost = scsi_host_alloc(&lpfc_template,
sizeof(struct lpfc_vport));
+ else
+ shost = scsi_host_alloc(&lpfc_template_s3,
+ sizeof(struct lpfc_vport));
+ }
if (!shost)
goto out;
@@ -3685,6 +3726,11 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
case LPFC_ASYNC_LINK_SPEED_10GBPS:
link_speed = LPFC_LINK_SPEED_10GHZ;
break;
+ case LPFC_ASYNC_LINK_SPEED_20GBPS:
+ case LPFC_ASYNC_LINK_SPEED_25GBPS:
+ case LPFC_ASYNC_LINK_SPEED_40GBPS:
+ link_speed = LPFC_LINK_SPEED_UNKNOWN;
+ break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0483 Invalid link-attention link speed: x%x\n",
@@ -3756,46 +3802,55 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
switch (evt_code) {
case LPFC_TRAILER_CODE_LINK:
switch (speed_code) {
- case LPFC_EVT_CODE_LINK_NO_LINK:
+ case LPFC_ASYNC_LINK_SPEED_ZERO:
port_speed = 0;
break;
- case LPFC_EVT_CODE_LINK_10_MBIT:
+ case LPFC_ASYNC_LINK_SPEED_10MBPS:
port_speed = 10;
break;
- case LPFC_EVT_CODE_LINK_100_MBIT:
+ case LPFC_ASYNC_LINK_SPEED_100MBPS:
port_speed = 100;
break;
- case LPFC_EVT_CODE_LINK_1_GBIT:
+ case LPFC_ASYNC_LINK_SPEED_1GBPS:
port_speed = 1000;
break;
- case LPFC_EVT_CODE_LINK_10_GBIT:
+ case LPFC_ASYNC_LINK_SPEED_10GBPS:
port_speed = 10000;
break;
+ case LPFC_ASYNC_LINK_SPEED_20GBPS:
+ port_speed = 20000;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_25GBPS:
+ port_speed = 25000;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_40GBPS:
+ port_speed = 40000;
+ break;
default:
port_speed = 0;
}
break;
case LPFC_TRAILER_CODE_FC:
switch (speed_code) {
- case LPFC_EVT_CODE_FC_NO_LINK:
+ case LPFC_FC_LA_SPEED_UNKNOWN:
port_speed = 0;
break;
- case LPFC_EVT_CODE_FC_1_GBAUD:
+ case LPFC_FC_LA_SPEED_1G:
port_speed = 1000;
break;
- case LPFC_EVT_CODE_FC_2_GBAUD:
+ case LPFC_FC_LA_SPEED_2G:
port_speed = 2000;
break;
- case LPFC_EVT_CODE_FC_4_GBAUD:
+ case LPFC_FC_LA_SPEED_4G:
port_speed = 4000;
break;
- case LPFC_EVT_CODE_FC_8_GBAUD:
+ case LPFC_FC_LA_SPEED_8G:
port_speed = 8000;
break;
- case LPFC_EVT_CODE_FC_10_GBAUD:
+ case LPFC_FC_LA_SPEED_10G:
port_speed = 10000;
break;
- case LPFC_EVT_CODE_FC_16_GBAUD:
+ case LPFC_FC_LA_SPEED_16G:
port_speed = 16000;
break;
default:
@@ -4044,18 +4099,21 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
char port_name;
char message[128];
uint8_t status;
+ uint8_t evt_type;
+ struct temp_event temp_event_data;
struct lpfc_acqe_misconfigured_event *misconfigured;
+ struct Scsi_Host *shost;
+
+ evt_type = bf_get(lpfc_trailer_type, acqe_sli);
- /* special case misconfigured event as it contains data for all ports */
- if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_2) ||
- (bf_get(lpfc_trailer_type, acqe_sli) !=
- LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
+ /* Special case Lancer */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2901 Async SLI event - Event Data1:x%08x Event Data2:"
"x%08x SLI Event Type:%d\n",
acqe_sli->event_data1, acqe_sli->event_data2,
- bf_get(lpfc_trailer_type, acqe_sli));
+ evt_type);
return;
}
@@ -4063,58 +4121,107 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
if (port_name == 0x00)
port_name = '?'; /* get port name is empty */
- misconfigured = (struct lpfc_acqe_misconfigured_event *)
+ switch (evt_type) {
+ case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+ temp_event_data.data = (uint32_t)acqe_sli->event_data1;
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3190 Over Temperature:%d Celsius- Port Name %c\n",
+ acqe_sli->event_data1, port_name);
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *)&temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+ break;
+ case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_NORMAL_TEMP;
+ temp_event_data.data = (uint32_t)acqe_sli->event_data1;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3191 Normal Temperature:%d Celsius - Port Name %c\n",
+ acqe_sli->event_data1, port_name);
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *)&temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+ break;
+ case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
+ misconfigured = (struct lpfc_acqe_misconfigured_event *)
&acqe_sli->event_data1;
- /* fetch the status for this port */
- switch (phba->sli4_hba.lnk_info.lnk_no) {
- case LPFC_LINK_NUMBER_0:
- status = bf_get(lpfc_sli_misconfigured_port0,
+ /* fetch the status for this port */
+ switch (phba->sli4_hba.lnk_info.lnk_no) {
+ case LPFC_LINK_NUMBER_0:
+ status = bf_get(lpfc_sli_misconfigured_port0,
&misconfigured->theEvent);
- break;
- case LPFC_LINK_NUMBER_1:
- status = bf_get(lpfc_sli_misconfigured_port1,
+ break;
+ case LPFC_LINK_NUMBER_1:
+ status = bf_get(lpfc_sli_misconfigured_port1,
&misconfigured->theEvent);
- break;
- case LPFC_LINK_NUMBER_2:
- status = bf_get(lpfc_sli_misconfigured_port2,
+ break;
+ case LPFC_LINK_NUMBER_2:
+ status = bf_get(lpfc_sli_misconfigured_port2,
&misconfigured->theEvent);
- break;
- case LPFC_LINK_NUMBER_3:
- status = bf_get(lpfc_sli_misconfigured_port3,
+ break;
+ case LPFC_LINK_NUMBER_3:
+ status = bf_get(lpfc_sli_misconfigured_port3,
&misconfigured->theEvent);
- break;
- default:
- status = ~LPFC_SLI_EVENT_STATUS_VALID;
- break;
- }
+ break;
+ default:
+ status = ~LPFC_SLI_EVENT_STATUS_VALID;
+ break;
+ }
- switch (status) {
- case LPFC_SLI_EVENT_STATUS_VALID:
- return; /* no message if the sfp is okay */
- case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
- sprintf(message, "Optics faulted/incorrectly installed/not " \
- "installed - Reseat optics, if issue not "
- "resolved, replace.");
- break;
- case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
- sprintf(message,
- "Optics of two types installed - Remove one optic or " \
- "install matching pair of optics.");
- break;
- case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
- sprintf(message, "Incompatible optics - Replace with " \
+ switch (status) {
+ case LPFC_SLI_EVENT_STATUS_VALID:
+ return; /* no message if the sfp is okay */
+ case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
+ sprintf(message, "Optics faulted/incorrectly "
+ "installed/not installed - Reseat optics, "
+ "if issue not resolved, replace.");
+ break;
+ case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
+ sprintf(message,
+ "Optics of two types installed - Remove one "
+ "optic or install matching pair of optics.");
+ break;
+ case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
+ sprintf(message, "Incompatible optics - Replace with "
"compatible optics for card to function.");
+ break;
+ default:
+ /* firmware is reporting a status we don't know about */
+ sprintf(message, "Unknown event status x%02x", status);
+ break;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3176 Misconfigured Physical Port - "
+ "Port Name %c %s\n", port_name, message);
+ break;
+ case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3192 Remote DPort Test Initiated - "
+ "Event Data1:x%08x Event Data2: x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2);
break;
default:
- /* firmware is reporting a status we don't know about */
- sprintf(message, "Unknown event status x%02x", status);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3193 Async SLI event - Event Data1:x%08x Event Data2:"
+ "x%08x SLI Event Type:%d\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ evt_type);
break;
}
-
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "3176 Misconfigured Physical Port - "
- "Port Name %c %s\n", port_name, message);
}
/**
@@ -5183,6 +5290,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc))
return -ENODEV;
+ phba->temp_sensor_support = 1;
}
/* Create the bootstrap mailbox command */
@@ -7647,6 +7755,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
goto out_destroy_els_rq;
}
}
+
+ /*
+ * Configure EQ delay multipier for interrupt coalescing using
+ * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
+ */
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
+ fcp_eqidx += LPFC_MAX_EQ_DELAY)
+ lpfc_modify_fcp_eq_delay(phba, fcp_eqidx);
return 0;
out_destroy_els_rq:
@@ -7953,7 +8069,7 @@ wait:
* up to 30 seconds. If the port doesn't respond, treat
* it as an error.
*/
- for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) {
+ for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
if (lpfc_readl(phba->sli4_hba.u.if_type2.
STATUSregaddr, &reg_data.word0)) {
rc = -ENODEV;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 06241f590c1e..816f596cda60 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 5cc1103d811e..4cb9882af157 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -276,6 +276,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
+ uint64_t nlp_portwwn = 0;
uint32_t *lp;
IOCB_t *icmd;
struct serv_parm *sp;
@@ -332,6 +333,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NULL);
return 0;
}
+
+ nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
@@ -367,7 +370,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /* no need to reg_login if we are already in one of these states */
+ /* if already logged in, do implicit logout */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -376,8 +379,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
- return 1;
+ /* lpfc_plogi_confirm_nport skips fabric did, handle it here */
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+ ndlp, NULL);
+ return 1;
+ }
+ if (nlp_portwwn != 0 &&
+ nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0143 PLOGI recv'd from DID: x%x "
+ "WWPN changed: old %llx new %llx\n",
+ ndlp->nlp_DID,
+ (unsigned long long)nlp_portwwn,
+ (unsigned long long)
+ wwn_to_u64(sp->portName.u.wwn));
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ /* rport needs to be unregistered first */
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ break;
}
/* Check for Nport to NPort pt2pt protocol */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 4f9222eb2266..cb73cf9e9ba5 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -1130,6 +1130,25 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
}
/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+ i += sizeof(uint32_t), j++) {
+ ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+ }
+}
+
+/**
* lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
* @phba: The Hba for which this call is being executed.
* @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -1264,6 +1283,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
return 0;
}
@@ -4127,24 +4147,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
/**
- * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
- * @data: A pointer to the immediate command data portion of the IOCB.
- * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
- *
- * The routine copies the entire FCP command from @fcp_cmnd to @data while
- * byte swapping the data to big endian format for transmission on the wire.
- **/
-static void
-lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
-{
- int i, j;
- for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
- i += sizeof(uint32_t), j++) {
- ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
- }
-}
-
-/**
* lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: The scsi command which needs to send.
@@ -4223,9 +4225,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcp_cmnd->fcpCntl3 = 0;
phba->fc4ControlRequests++;
}
- if (phba->sli_rev == 3 &&
- !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
- lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
@@ -5118,9 +5117,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int status;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
- if (!rdata) {
+ if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0798 Device Reset rport failure: rdata x%p\n", rdata);
+ "0798 Device Reset rport failure: rdata x%p\n",
+ rdata);
return FAILED;
}
pnode = rdata->pnode;
@@ -5202,10 +5202,12 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0722 Target Reset rport failure: rdata x%p\n", rdata);
- spin_lock_irq(shost->host_lock);
- pnode->nlp_flag &= ~NLP_NPR_ADISC;
- pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- spin_unlock_irq(shost->host_lock);
+ if (pnode) {
+ spin_lock_irq(shost->host_lock);
+ pnode->nlp_flag &= ~NLP_NPR_ADISC;
+ pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
return FAST_IO_FAIL;
@@ -5857,6 +5859,31 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
return false;
}
+struct scsi_host_template lpfc_template_s3 = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = lpfc_bus_reset_handler,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = lpfc_hba_attrs,
+ .max_sectors = 0xFFFF,
+ .vendor_id = LPFC_NL_VENDOR_ID,
+ .change_queue_depth = scsi_change_queue_depth,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 0389ac1e7b83..474e30cdee6e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 207a43d952fa..56f73682d4bd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -918,12 +918,16 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
ndlp = lpfc_cmd->rdata->pnode;
} else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
- !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
+ !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
ndlp = piocbq->context_un.ndlp;
- else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
- ndlp = piocbq->context_un.ndlp;
- else
+ } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
+ ndlp = NULL;
+ else
+ ndlp = piocbq->context_un.ndlp;
+ } else {
ndlp = piocbq->context1;
+ }
list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
start_sglq = sglq;
@@ -2213,6 +2217,46 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else
mempool_free(pmb, phba->mbox_mem_pool);
}
+ /**
+ * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the unreg rpi mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. An additional refrenece is put on the ndlp to prevent
+ * lpfc_nlp_release from freeing the rpi bit in the bitmask before
+ * the unreg mailbox command completes, this routine puts the
+ * reference back.
+ *
+ **/
+void
+lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = pmb->context1;
+ if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)) {
+ if (ndlp) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0010 UNREG_LOGIN vpi:%x "
+ "rpi:%x DID:%x map:%x %p\n",
+ vport->vpi, ndlp->nlp_rpi,
+ ndlp->nlp_DID,
+ ndlp->nlp_usg_map, ndlp);
+
+ lpfc_nlp_put(ndlp);
+ }
+ }
+ }
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
/**
* lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
@@ -12842,7 +12886,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* fails this function will return -ENXIO.
**/
int
-lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
+lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
{
struct lpfc_mbx_modify_eq_delay *eq_delay;
LPFC_MBOXQ_t *mbox;
@@ -12959,11 +13003,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
LPFC_EQE_SIZE);
bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
- /* Calculate delay multiper from maximum interrupt per second */
- if (imax > LPFC_DMULT_CONST)
- dmult = 0;
- else
- dmult = LPFC_DMULT_CONST/imax - 1;
+ /* don't setup delay multiplier using EQ_CREATE */
+ dmult = 0;
bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
dmult);
switch (eq->entry_count) {
@@ -15662,14 +15703,14 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
unsigned long iflag;
- max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
- rpi_limit = phba->sli4_hba.next_rpi;
-
/*
* Fetch the next logical rpi. Because this index is logical,
* the driver starts at 0 each time.
*/
spin_lock_irqsave(&phba->hbalock, iflag);
+ max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ rpi_limit = phba->sli4_hba.next_rpi;
+
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
@@ -15678,6 +15719,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0001 rpi:%x max:%x lim:%x\n",
+ (int) rpi, max_rpi, rpi_limit);
/*
* Don't try to allocate more rpi header regions if the device limit
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 4a01452415cf..7fe99ff80846 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -80,6 +80,7 @@ struct lpfc_iocbq {
#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */
+#define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 22ceb2b05ba1..6eca3b8124d3 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -671,7 +671,7 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
-int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
+int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint32_t);
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 89413add2252..c37bb9f91c3b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "10.4.8000.0."
+#define LPFC_DRIVER_VERSION "10.5.0.0."
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -30,4 +30,4 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved."
+#define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex. All rights reserved."
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index e5cd8d8d4ce7..0adb2e015597 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -382,16 +382,16 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
if (dma_len > 0xffff)
panic("mac53c94: scatterlist element >= 64k");
total += dma_len;
- st_le16(&dcmds->req_count, dma_len);
- st_le16(&dcmds->command, dma_cmd);
- st_le32(&dcmds->phy_addr, dma_addr);
+ dcmds->req_count = cpu_to_le16(dma_len);
+ dcmds->command = cpu_to_le16(dma_cmd);
+ dcmds->phy_addr = cpu_to_le32(dma_addr);
dcmds->xfer_status = 0;
++dcmds;
}
dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
- st_le16(&dcmds[-1].command, dma_cmd);
- st_le16(&dcmds->command, DBDMA_STOP);
+ dcmds[-1].command = cpu_to_le16(dma_cmd);
+ dcmds->command = cpu_to_le16(DBDMA_STOP);
cmd->SCp.this_residual = total;
}
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 1e85c07e3b62..d64a769b8155 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -483,7 +483,6 @@ static struct platform_driver mac_scsi_driver = {
.remove = __exit_p(mac_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 675b5e7aba94..5a0800d19970 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1584,11 +1584,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
fp_possible = io_info.fpOkForIo;
}
- /* Use smp_processor_id() for now until cmd->request->cpu is CPU
+ /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
id by default, not CPU group id, otherwise all MSI-X queues won't
be utilized */
cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
- smp_processor_id() % instance->msix_vectors : 0;
+ raw_smp_processor_id() % instance->msix_vectors : 0;
if (fp_possible) {
megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
@@ -1693,7 +1693,10 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
cmd->request_desc->SCSIIO.MSIxIndex =
- instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
+ instance->msix_vectors ?
+ raw_smp_processor_id() %
+ instance->msix_vectors :
+ 0;
os_timeout_value = scmd->request->timeout / HZ;
if (instance->secure_jbod_support &&
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 57a95e2c3442..555367f00228 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1287,9 +1287,9 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
}
if (dma_len > 0xffff)
panic("mesh: scatterlist element >= 64k");
- st_le16(&dcmds->req_count, dma_len - off);
- st_le16(&dcmds->command, dma_cmd);
- st_le32(&dcmds->phy_addr, dma_addr + off);
+ dcmds->req_count = cpu_to_le16(dma_len - off);
+ dcmds->command = cpu_to_le16(dma_cmd);
+ dcmds->phy_addr = cpu_to_le32(dma_addr + off);
dcmds->xfer_status = 0;
++dcmds;
dtot += dma_len - off;
@@ -1303,15 +1303,15 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
static char mesh_extra_buf[64];
dtot = sizeof(mesh_extra_buf);
- st_le16(&dcmds->req_count, dtot);
- st_le32(&dcmds->phy_addr, virt_to_phys(mesh_extra_buf));
+ dcmds->req_count = cpu_to_le16(dtot);
+ dcmds->phy_addr = cpu_to_le32(virt_to_phys(mesh_extra_buf));
dcmds->xfer_status = 0;
++dcmds;
}
dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
- st_le16(&dcmds[-1].command, dma_cmd);
+ dcmds[-1].command = cpu_to_le16(dma_cmd);
memset(dcmds, 0, sizeof(*dcmds));
- st_le16(&dcmds->command, DBDMA_STOP);
+ dcmds->command = cpu_to_le16(DBDMA_STOP);
ms->dma_count = dtot;
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 2d5ab6d969ec..454536c49315 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
static int mvs_task_prep_ata(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
- struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct asd_sas_port *sas_port = dev->port;
- struct sas_phy *sphy = dev->phy;
- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct mvs_slot_info *slot;
void *buf_prd;
u32 tag = tei->tag, hdr_tag;
@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
del_q = TXQ_MODE_I | tag |
(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
+ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 488c3929f19a..0cccd6033feb 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -186,7 +186,7 @@ static int _osd_get_print_system_info(struct osd_dev *od,
if (unlikely(len > sizeof(odi->systemid))) {
OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
- "device idetification might not work\n", len);
+ "device identification might not work\n", len);
len = sizeof(odi->systemid);
}
odi->systemid_len = len;
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 113e6c9826a1..33f60c92e20e 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -18,6 +18,9 @@ config SCSI_QLA_FC
2322, 6322 ql2322_fw.bin
24xx, 54xx ql2400_fw.bin
25xx ql2500_fw.bin
+ 2031 ql2600_fw.bin
+ 8031 ql8300_fw.bin
+ 27xx ql2700_fw.bin
Upon request, the driver caches the firmware image until
the driver is unloaded.
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d77fe43793b6..0e6ee3ca30e6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,9 +11,9 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x017d | 0x0144,0x0146 |
+ * | Module Init and Probe | 0x017f | 0x0146 |
* | | | 0x015b-0x0160 |
- * | | | 0x016e-0x0170 |
+ * | | | 0x016e-0x0170 |
* | Mailbox commands | 0x118d | 0x1115-0x1116 |
* | | | 0x111a-0x111b |
* | Device Discovery | 0x2016 | 0x2020-0x2022, |
@@ -60,7 +60,7 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
- * | Misc | 0xd213 | 0xd011-0xd017 |
+ * | Misc | 0xd300 | 0xd016-0xd017 |
* | | | 0xd021,0xd024 |
* | | | 0xd025,0xd029 |
* | | | 0xd02a,0xd02e |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 5f6b2960cccb..e86201d3b8c6 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2163,7 +2163,7 @@ struct ct_fdmi_hba_attr {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[64];
uint8_t serial_num[32];
- uint8_t model[16];
+ uint8_t model[16+1];
uint8_t model_desc[80];
uint8_t hw_version[32];
uint8_t driver_version[32];
@@ -2184,9 +2184,9 @@ struct ct_fdmiv2_hba_attr {
uint16_t len;
union {
uint8_t node_name[WWN_SIZE];
- uint8_t manufacturer[32];
+ uint8_t manufacturer[64];
uint8_t serial_num[32];
- uint8_t model[16];
+ uint8_t model[16+1];
uint8_t model_desc[80];
uint8_t hw_version[16];
uint8_t driver_version[32];
@@ -2252,7 +2252,7 @@ struct ct_fdmiv2_port_attr {
uint32_t cur_speed;
uint32_t max_frame_size;
uint8_t os_dev_name[32];
- uint8_t host_name[32];
+ uint8_t host_name[256];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t port_sym_name[128];
@@ -2283,7 +2283,7 @@ struct ct_fdmi_port_attr {
uint32_t cur_speed;
uint32_t max_frame_size;
uint8_t os_dev_name[32];
- uint8_t host_name[32];
+ uint8_t host_name[256];
} a;
};
@@ -3132,7 +3132,8 @@ struct qla_hw_data {
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
IS_QLA8044(ha) || IS_QLA27XX(ha))
-#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
IS_QLA27XX(ha))
@@ -3300,6 +3301,8 @@ struct qla_hw_data {
#define RISC_RDY_AFT_RESET 3
#define RISC_SRAM_DUMP_CMPL 4
#define RISC_EXT_MEM_DUMP_CMPL 5
+#define ISP_MBX_RDY 6
+#define ISP_SOFT_RESET_CMPL 7
int fw_dump_reading;
int prev_minidump_failed;
dma_addr_t eft_dma;
@@ -3587,6 +3590,7 @@ typedef struct scsi_qla_host {
#define VP_BIND_NEEDED 2
#define VP_DELETE_NEEDED 3
#define VP_SCR_NEEDED 4 /* State Change Request registration */
+#define VP_CONFIG_OK 5 /* Flag to cfg VP, if FW is ready */
atomic_t vp_state;
#define VP_OFFLINE 0
#define VP_ACTIVE 1
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e59f25bff7ab..285cb204f300 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1121,7 +1121,7 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-static inline void
+static inline int
qla24xx_reset_risc(scsi_qla_host_t *vha)
{
unsigned long flags = 0;
@@ -1130,6 +1130,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
uint32_t cnt, d2;
uint16_t wd;
static int abts_cnt; /* ISP abort retry counts */
+ int rval = QLA_SUCCESS;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1142,26 +1143,57 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
udelay(10);
}
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
+ "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_DWORD(&reg->ctrl_status),
+ (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
+
WRT_REG_DWORD(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
udelay(100);
+
/* Wait for firmware to complete NVRAM accesses. */
d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
- for (cnt = 10000 ; cnt && d2; cnt--) {
- udelay(5);
- d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
barrier();
+ if (cnt)
+ udelay(5);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
}
+ if (rval == QLA_SUCCESS)
+ set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
+ "HCCR: 0x%x, MailBox0 Status 0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_DWORD(&reg->mailbox0));
+
/* Wait for soft-reset to complete. */
d2 = RD_REG_DWORD(&reg->ctrl_status);
- for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
- udelay(5);
- d2 = RD_REG_DWORD(&reg->ctrl_status);
+ for (cnt = 0; cnt < 6000000; cnt++) {
barrier();
+ if ((RD_REG_DWORD(&reg->ctrl_status) &
+ CSRX_ISP_SOFT_RESET) == 0)
+ break;
+
+ udelay(5);
}
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
+ "HCCR: 0x%x, Soft Reset status: 0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_DWORD(&reg->ctrl_status));
/* If required, do an MPI FW reset now */
if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
@@ -1190,16 +1222,32 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
RD_REG_DWORD(&reg->hccr);
d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
- for (cnt = 6000000 ; cnt && d2; cnt--) {
- udelay(5);
- d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
barrier();
+ if (cnt)
+ udelay(5);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
}
+ if (rval == QLA_SUCCESS)
+ set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
+ "Host Risc 0x%x, mailbox0 0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_WORD(&reg->mailbox0));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
+ "Driver in %s mode\n",
+ IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
+
if (IS_NOPOLLING_TYPE(ha))
ha->isp_ops->enable_intrs(ha);
+
+ return rval;
}
static void
@@ -2243,8 +2291,11 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
rval = QLA_SUCCESS;
- /* 20 seconds for loop down. */
- min_wait = 20;
+ /* Time to wait for loop down */
+ if (IS_P3P_TYPE(ha))
+ min_wait = 30;
+ else
+ min_wait = 20;
/*
* Firmware should take at most one RATOV to login, plus 5 seconds for
@@ -5364,7 +5415,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
blob = qla2x00_request_firmware(vha);
if (!blob) {
ql_log(ql_log_info, vha, 0x0083,
- "Fimware image unavailable.\n");
+ "Firmware image unavailable.\n");
ql_log(ql_log_info, vha, 0x0084,
"Firmware images can be retrieved from: "QLA_FW_URL ".\n");
return QLA_FUNCTION_FAILED;
@@ -5467,7 +5518,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
blob = qla2x00_request_firmware(vha);
if (!blob) {
ql_log(ql_log_warn, vha, 0x0090,
- "Fimware image unavailable.\n");
+ "Firmware image unavailable.\n");
ql_log(ql_log_warn, vha, 0x0091,
"Firmware images can be retrieved from: "
QLA_FW_URL ".\n");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a04a1b1f7f32..6dc14cd782b2 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -756,11 +756,21 @@ skip_rio:
/*
* In case of loop down, restore WWPN from
* NVRAM in case of FA-WWPN capable ISP
+ * Restore for Physical Port only
*/
- if (ha->flags.fawwpn_enabled) {
- void *wwpn = ha->init_cb->port_name;
+ if (!vha->vp_idx) {
+ if (ha->flags.fawwpn_enabled) {
+ void *wwpn = ha->init_cb->port_name;
+ memcpy(vha->port_name, wwpn, WWN_SIZE);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose,
+ vha, 0x0144, "LOOP DOWN detected,"
+ "restore WWPN %016llx\n",
+ wwn_to_u64(vha->port_name));
+ }
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+ clear_bit(VP_CONFIG_OK, &vha->vp_flags);
}
vha->device_flags |= DFLG_NO_CABLE;
@@ -947,6 +957,7 @@ skip_rio:
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(VP_CONFIG_OK, &vha->vp_flags);
qlt_async_event(mb[0], vha, mb);
break;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 72971daa2552..02b1c1c5355b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -33,7 +33,7 @@
static int
qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
{
- int rval;
+ int rval, i;
unsigned long flags = 0;
device_reg_t *reg;
uint8_t abort_active;
@@ -43,10 +43,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint16_t __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
+ uint16_t __iomem *mbx_reg;
unsigned long wait_time;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
if (ha->pdev->error_state > pci_channel_io_frozen) {
@@ -376,6 +378,18 @@ mbx_done:
ql_dbg(ql_dbg_disc, base_vha, 0x1020,
"**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+
+ ql_dbg(ql_dbg_disc, vha, 0x1115,
+ "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n",
+ RD_REG_DWORD(&reg->isp24.host_status),
+ ha->fw_dump_cap_flags,
+ RD_REG_DWORD(&reg->isp24.ictrl),
+ RD_REG_DWORD(&reg->isp24.istatus));
+
+ mbx_reg = &reg->isp24.mailbox0;
+ for (i = 0; i < 6; i++)
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x1116,
+ "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
@@ -2838,7 +2852,7 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA2031(vha->hw))
+ if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
@@ -2846,7 +2860,11 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
mcp->mb[0] = MBC_WRITE_SERDES;
mcp->mb[1] = addr;
- mcp->mb[2] = data & 0xff;
+ if (IS_QLA2031(vha->hw))
+ mcp->mb[2] = data & 0xff;
+ else
+ mcp->mb[2] = data;
+
mcp->mb[3] = 0;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
@@ -2872,7 +2890,7 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA2031(vha->hw))
+ if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
@@ -2887,7 +2905,10 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
- *data = mcp->mb[1] & 0xff;
+ if (IS_QLA2031(vha->hw))
+ *data = mcp->mb[1] & 0xff;
+ else
+ *data = mcp->mb[1];
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1186,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 5c2e0317f1c0..cc94192511cf 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -306,19 +306,25 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
qla2x00_do_work(vha);
- if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
- /* VP acquired. complete port configuration */
- ql_dbg(ql_dbg_dpc, vha, 0x4014,
- "Configure VP scheduled.\n");
- qla24xx_configure_vp(vha);
- ql_dbg(ql_dbg_dpc, vha, 0x4015,
- "Configure VP end.\n");
- return 0;
+ /* Check if Fw is ready to configure VP first */
+ if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
+ if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
+ /* VP acquired. complete port configuration */
+ ql_dbg(ql_dbg_dpc, vha, 0x4014,
+ "Configure VP scheduled.\n");
+ qla24xx_configure_vp(vha);
+ ql_dbg(ql_dbg_dpc, vha, 0x4015,
+ "Configure VP end.\n");
+ return 0;
+ }
}
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
@@ -788,7 +794,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->msix = &ha->msix_entries[que_id + 1];
else
ql_log(ql_log_warn, base_vha, 0x00e3,
- "MSIX not enalbled.\n");
+ "MSIX not enabled.\n");
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 54cb2ac9339b..7d2b18f2675c 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1274,7 +1274,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
if (off == ADDR_ERROR) {
ql_log(ql_log_fatal, vha, 0x0116,
- "Unknow addr: 0x%08lx.\n", buf[i].addr);
+ "Unknown addr: 0x%08lx.\n", buf[i].addr);
continue;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index cce1cbc1a927..7462dd70b150 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -446,11 +446,11 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
}
ha->flags.cpu_affinity_enabled = 1;
ql_dbg(ql_dbg_multiq, vha, 0xc007,
- "CPU affinity mode enalbed, "
+ "CPU affinity mode enabled, "
"no. of response queues:%d no. of request queues:%d.\n",
ha->max_rsp_queues, ha->max_req_queues);
ql_dbg(ql_dbg_init, vha, 0x00e9,
- "CPU affinity mode enalbed, "
+ "CPU affinity mode enabled, "
"no. of response queues:%d no. of request queues:%d.\n",
ha->max_rsp_queues, ha->max_req_queues);
}
@@ -4102,7 +4102,7 @@ qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
break;
default:
ql_log(ql_log_warn, base_vha, 0xb05f,
- "Unknow work-code=0x%x.\n", work_code);
+ "Unknown work-code=0x%x.\n", work_code);
}
return;
@@ -4702,7 +4702,7 @@ qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
break;
default:
ql_log(ql_log_warn, base_vha, 0xb071,
- "Unknow Device State: %x.\n", dev_state);
+ "Unknown Device State: %x.\n", dev_state);
qla83xx_idc_unlock(base_vha, 0);
qla8xxx_dev_failed_handler(base_vha);
rval = QLA_FUNCTION_FAILED;
@@ -5834,3 +5834,6 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
MODULE_FIRMWARE(FW_FILE_ISP2322);
MODULE_FIRMWARE(FW_FILE_ISP24XX);
MODULE_FIRMWARE(FW_FILE_ISP25XX);
+MODULE_FIRMWARE(FW_FILE_ISP2031);
+MODULE_FIRMWARE(FW_FILE_ISP8031);
+MODULE_FIRMWARE(FW_FILE_ISP27XX);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b656a05613e8..028e8c8a7de9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1718,13 +1718,16 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
uint16_t orig_led_cfg[6];
uint32_t led_10_value, led_43_value;
- if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha))
return;
if (!ha->beacon_blink_led)
return;
- if (IS_QLA2031(ha)) {
+ if (IS_QLA27XX(ha)) {
+ qla2x00_write_ram_word(vha, 0x1003, 0x40000230);
+ qla2x00_write_ram_word(vha, 0x1004, 0x40000230);
+ } else if (IS_QLA2031(ha)) {
led_select_value = qla83xx_select_led_port(ha);
qla83xx_wr_reg(vha, led_select_value, 0x40000230);
@@ -1811,7 +1814,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
return QLA_FUNCTION_FAILED;
}
- if (IS_QLA2031(ha))
+ if (IS_QLA2031(ha) || IS_QLA27XX(ha))
goto skip_gpio;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1848,7 +1851,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
ha->beacon_blink_led = 0;
- if (IS_QLA2031(ha))
+ if (IS_QLA2031(ha) || IS_QLA27XX(ha))
goto set_fw_options;
if (IS_QLA8031(ha) || IS_QLA81XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 57418258c101..fe8a8d157e22 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3065,7 +3065,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
{
struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd;
- struct target_core_fabric_ops *tfo;
+ const struct target_core_fabric_ops *tfo;
struct qla_tgt_cmd *cmd;
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index a8c0c7362e48..962cb89fe0ae 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -190,7 +190,7 @@ static inline void
qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
uint offset, uint32_t data, void *buf)
{
- __iomem void *window = reg + offset;
+ __iomem void *window = (void __iomem *)reg + offset;
if (buf) {
WRT_REG_DWORD(window, data);
@@ -219,6 +219,8 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
{
if (buf)
ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011,
+ "Skipping entry %d\n", ent->hdr.entry_type);
}
static int
@@ -784,6 +786,13 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd01b,
"%s: len=%lx\n", __func__, *len);
+
+ if (buf) {
+ ql_log(ql_log_warn, vha, 0xd015,
+ "Firmware dump saved to temp buffer (%ld/%p)\n",
+ vha->host_no, vha->hw->fw_dump);
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+ }
}
static void
@@ -938,6 +947,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
else if (!vha->hw->fw_dump_template)
ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
+ else if (vha->hw->fw_dumped)
+ ql_log(ql_log_warn, vha, 0xd300,
+ "Firmware has been previously dumped (%p),"
+ " -- ignoring request\n", vha->hw->fw_dump);
else
qla27xx_execute_fwdt_template(vha);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d88b86214ec5..2ed9ab90a455 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.16-k"
+#define QLA2XXX_VERSION "8.07.00.18-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index ab4879e12ea7..68c2002e78bf 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -53,9 +53,8 @@
static struct workqueue_struct *tcm_qla2xxx_free_wq;
static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
-static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+static const struct target_core_fabric_ops tcm_qla2xxx_ops;
+static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops;
/*
* Parse WWN.
@@ -336,6 +335,14 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg
return tpg->tpg_attrib.demo_mode_login_only;
}
+static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->tpg_attrib.fabric_prot_type;
+}
+
static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
@@ -1082,8 +1089,53 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+static ssize_t tcm_qla2xxx_tpg_show_dynamic_sessions(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return target_show_dynamic_sessions(se_tpg, page);
+}
+
+TF_TPG_BASE_ATTR_RO(tcm_qla2xxx, dynamic_sessions);
+
+static ssize_t tcm_qla2xxx_tpg_store_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
+
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
+ }
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tpg->tpg_attrib.fabric_prot_type = val;
+
+ return count;
+}
+
+static ssize_t tcm_qla2xxx_tpg_show_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
+}
+TF_TPG_BASE_ATTR(tcm_qla2xxx, fabric_prot_type, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
&tcm_qla2xxx_tpg_enable.attr,
+ &tcm_qla2xxx_tpg_dynamic_sessions.attr,
+ &tcm_qla2xxx_tpg_fabric_prot_type.attr,
NULL,
};
@@ -1124,7 +1176,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
tpg->tpg_attrib.cache_dynamic_acls = 1;
tpg->tpg_attrib.demo_mode_login_only = 1;
- ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
+ ret = core_tpg_register(&tcm_qla2xxx_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
@@ -1244,7 +1296,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
tpg->tpg_attrib.cache_dynamic_acls = 1;
tpg->tpg_attrib.demo_mode_login_only = 1;
- ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
+ ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
@@ -1560,7 +1612,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
se_sess = transport_init_session_tags(num_tags,
sizeof(struct qla_tgt_cmd),
- TARGET_PROT_NORMAL);
+ TARGET_PROT_ALL);
if (IS_ERR(se_sess)) {
pr_err("Unable to initialize struct se_session\n");
return PTR_ERR(se_sess);
@@ -1934,7 +1986,9 @@ static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops tcm_qla2xxx_ops = {
+static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
+ .module = THIS_MODULE,
+ .name = "qla2xxx",
.get_fabric_name = tcm_qla2xxx_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
@@ -1949,6 +2003,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
tcm_qla2xxx_check_demo_write_protect,
.tpg_check_prod_mode_write_protect =
tcm_qla2xxx_check_prod_write_protect,
+ .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only,
.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
@@ -1983,9 +2038,15 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
.fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+
+ .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
+ .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs,
+ .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs,
};
-static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+ .module = THIS_MODULE,
+ .name = "qla2xxx_npiv",
.get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
@@ -2033,94 +2094,26 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
.fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+
+ .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
+ .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs,
};
static int tcm_qla2xxx_register_configfs(void)
{
- struct target_fabric_configfs *fabric, *npiv_fabric;
int ret;
pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
- */
- fabric->tf_ops = tcm_qla2xxx_ops;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =
- tcm_qla2xxx_tpg_attrib_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * Register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+
+ ret = target_register_template(&tcm_qla2xxx_ops);
+ if (ret)
return ret;
- }
- /*
- * Setup our local pointer to *fabric
- */
- tcm_qla2xxx_fabric_configfs = fabric;
- pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
- /*
- * Register the top level struct config_item_type for NPIV with TCM core
- */
- npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
- if (IS_ERR(npiv_fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- ret = PTR_ERR(npiv_fabric);
- goto out_fabric;
- }
- /*
- * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
- */
- npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
- /*
- * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
- */
- npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
- tcm_qla2xxx_npiv_tpg_attrs;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * Register the npiv_fabric for use within TCM
- */
- ret = target_fabric_configfs_register(npiv_fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+ ret = target_register_template(&tcm_qla2xxx_npiv_ops);
+ if (ret)
goto out_fabric;
- }
- /*
- * Setup our local pointer to *npiv_fabric
- */
- tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
- pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
WQ_MEM_RECLAIM, 0);
@@ -2140,9 +2133,9 @@ static int tcm_qla2xxx_register_configfs(void)
out_free_wq:
destroy_workqueue(tcm_qla2xxx_free_wq);
out_fabric_npiv:
- target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+ target_unregister_template(&tcm_qla2xxx_npiv_ops);
out_fabric:
- target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+ target_unregister_template(&tcm_qla2xxx_ops);
return ret;
}
@@ -2151,13 +2144,8 @@ static void tcm_qla2xxx_deregister_configfs(void)
destroy_workqueue(tcm_qla2xxx_cmd_wq);
destroy_workqueue(tcm_qla2xxx_free_wq);
- target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
- tcm_qla2xxx_fabric_configfs = NULL;
- pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
-
- target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
- tcm_qla2xxx_npiv_fabric_configfs = NULL;
- pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
+ target_unregister_template(&tcm_qla2xxx_ops);
+ target_unregister_template(&tcm_qla2xxx_npiv_ops);
}
static int __init tcm_qla2xxx_init(void)
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 10c002145648..23295115c9fc 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -33,6 +33,7 @@ struct tcm_qla2xxx_tpg_attrib {
int demo_mode_write_protect;
int prod_mode_write_protect;
int demo_mode_login_only;
+ int fabric_prot_type;
};
struct tcm_qla2xxx_tpg {
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c9c3b579eece..3833bf59fb66 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -972,18 +972,24 @@ EXPORT_SYMBOL(scsi_report_opcode);
* Description: Gets a reference to the scsi_device and increments the use count
* of the underlying LLDD module. You must hold host_lock of the
* parent Scsi_Host or already have a reference when calling this.
+ *
+ * This will fail if a device is deleted or cancelled, or when the LLD module
+ * is in the process of being unloaded.
*/
int scsi_device_get(struct scsi_device *sdev)
{
- if (sdev->sdev_state == SDEV_DEL)
- return -ENXIO;
+ if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
+ goto fail;
if (!get_device(&sdev->sdev_gendev))
- return -ENXIO;
- /* We can fail try_module_get if we're doing SCSI operations
- * from module exit (like cache flush) */
- __module_get(sdev->host->hostt->module);
-
+ goto fail;
+ if (!try_module_get(sdev->host->hostt->module))
+ goto fail_put_device;
return 0;
+
+fail_put_device:
+ put_device(&sdev->sdev_gendev);
+fail:
+ return -ENXIO;
}
EXPORT_SYMBOL(scsi_device_get);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 262ab837a704..9f77d23239a2 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -226,6 +226,7 @@ static struct {
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
{"Promise", "", NULL, BLIST_SPARSELUN},
+ {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 9c0a520d933c..6efab1c455e1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
*/
if (*bflags & BLIST_MAX_512)
blk_queue_max_hw_sectors(sdev->request_queue, 512);
+ /*
+ * Max 1024 sector transfer length for targets that report incorrect
+ * max/optimal lengths and relied on the old block layer safe default
+ */
+ else if (*bflags & BLIST_MAX_1024)
+ blk_queue_max_hw_sectors(sdev->request_queue, 1024);
/*
* Some devices may not want to have a start command automatically
@@ -1570,16 +1576,15 @@ EXPORT_SYMBOL(scsi_add_device);
void scsi_rescan_device(struct device *dev)
{
- if (!dev->driver)
- return;
-
- if (try_module_get(dev->driver->owner)) {
+ device_lock(dev);
+ if (dev->driver && try_module_get(dev->driver->owner)) {
struct scsi_driver *drv = to_scsi_driver(dev->driver);
if (drv->rescan)
drv->rescan(dev);
module_put(dev->driver->owner);
}
+ device_unlock(dev);
}
EXPORT_SYMBOL(scsi_rescan_device);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 5d6f348eb3d8..24eaaf66af71 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -265,6 +265,7 @@ static const struct {
{ FC_PORTSPEED_40GBIT, "40 Gbit" },
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
+ { FC_PORTSPEED_25GBIT, "25 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6b78476d04bb..79beebf53302 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -564,10 +564,12 @@ static int sd_major(int major_idx)
}
}
-static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
+static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
{
struct scsi_disk *sdkp = NULL;
+ mutex_lock(&sd_ref_mutex);
+
if (disk->private_data) {
sdkp = scsi_disk(disk);
if (scsi_device_get(sdkp->device) == 0)
@@ -575,27 +577,6 @@ static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
else
sdkp = NULL;
}
- return sdkp;
-}
-
-static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
-{
- struct scsi_disk *sdkp;
-
- mutex_lock(&sd_ref_mutex);
- sdkp = __scsi_disk_get(disk);
- mutex_unlock(&sd_ref_mutex);
- return sdkp;
-}
-
-static struct scsi_disk *scsi_disk_get_from_dev(struct device *dev)
-{
- struct scsi_disk *sdkp;
-
- mutex_lock(&sd_ref_mutex);
- sdkp = dev_get_drvdata(dev);
- if (sdkp)
- sdkp = __scsi_disk_get(sdkp->disk);
mutex_unlock(&sd_ref_mutex);
return sdkp;
}
@@ -610,8 +591,6 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
mutex_unlock(&sd_ref_mutex);
}
-
-
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
@@ -1525,12 +1504,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
static void sd_rescan(struct device *dev)
{
- struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
- if (sdkp) {
- revalidate_disk(sdkp->disk);
- scsi_disk_put(sdkp);
- }
+ revalidate_disk(sdkp->disk);
}
@@ -2235,11 +2211,11 @@ got_data:
{
char cap_str_2[10], cap_str_10[10];
- u64 sz = (u64)sdkp->capacity << ilog2(sector_size);
- string_get_size(sz, STRING_UNITS_2, cap_str_2,
- sizeof(cap_str_2));
- string_get_size(sz, STRING_UNITS_10, cap_str_10,
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_10, cap_str_10,
sizeof(cap_str_10));
if (sdkp->first_scan || old_capacity != sdkp->capacity) {
@@ -3100,6 +3076,7 @@ static void scsi_disk_release(struct device *dev)
ida_remove(&sd_index_ida, sdkp->index);
spin_unlock(&sd_index_lock);
+ blk_integrity_unregister(disk);
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
@@ -3149,13 +3126,13 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
*/
static void sd_shutdown(struct device *dev)
{
- struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
if (!sdkp)
return; /* this can happen */
if (pm_runtime_suspended(dev))
- goto exit;
+ return;
if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
@@ -3166,14 +3143,11 @@ static void sd_shutdown(struct device *dev)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
-
-exit:
- scsi_disk_put(sdkp);
}
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
{
- struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret = 0;
if (!sdkp)
@@ -3199,7 +3173,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
}
done:
- scsi_disk_put(sdkp);
return ret;
}
@@ -3215,18 +3188,13 @@ static int sd_suspend_runtime(struct device *dev)
static int sd_resume(struct device *dev)
{
- struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
- int ret = 0;
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
if (!sdkp->device->manage_start_stop)
- goto done;
+ return 0;
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
- ret = sd_start_stop_device(sdkp, 1);
-
-done:
- scsi_disk_put(sdkp);
- return ret;
+ return sd_start_stop_device(sdkp, 1);
}
/**
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 14c7d42a11c2..5c06d292b94c 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
- if (!sdkp)
+ if (!sdkp->ATO)
return;
if (type == SD_DIF_TYPE3_PROTECTION)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 2270bd51f9c2..9d7b7db75e4b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -33,7 +33,6 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <linux/aio.h>
#include <linux/errno.h>
#include <linux/mtio.h>
#include <linux/ioctl.h>
@@ -51,6 +50,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/mutex.h>
#include <linux/atomic.h>
#include <linux/ratelimit.h>
+#include <linux/uio.h>
#include "scsi.h"
#include <scsi/scsi_dbg.h>
@@ -1745,17 +1745,14 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
}
if (iov_count) {
- int size = sizeof(struct iovec) * iov_count;
- struct iovec *iov;
+ struct iovec *iov = NULL;
struct iov_iter i;
- iov = memdup_user(hp->dxferp, size);
- if (IS_ERR(iov))
- return PTR_ERR(iov);
+ res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
+ if (res < 0)
+ return res;
- iov_iter_init(&i, rw, iov, iov_count,
- min_t(size_t, hp->dxfer_len,
- iov_length(iov, iov_count)));
+ iov_iter_truncate(&i, hp->dxfer_len);
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index efc6e446b6c8..d9dad90344d5 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -308,11 +308,16 @@ enum storvsc_request_type {
* This is the end of Protocol specific defines.
*/
-static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
+static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static u32 max_outstanding_req_per_channel;
+
+static int storvsc_vcpus_per_sub_channel = 4;
module_param(storvsc_ringbuffer_size, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
+module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
+MODULE_PARM_DESC(vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
/*
* Timeout in seconds for all devices managed by this driver.
*/
@@ -320,7 +325,6 @@ static int storvsc_timeout = 180;
static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
-#define STORVSC_MAX_IO_REQUESTS 200
static void storvsc_on_channel_callback(void *context);
@@ -347,7 +351,10 @@ struct storvsc_cmd_request {
/* Synchronize the request/response if needed */
struct completion wait_event;
- struct hv_multipage_buffer data_buffer;
+ struct vmbus_channel_packet_multipage_buffer mpb;
+ struct vmbus_packet_mpb_array *payload;
+ u32 payload_sz;
+
struct vstor_packet vstor_packet;
};
@@ -373,6 +380,10 @@ struct storvsc_device {
unsigned char path_id;
unsigned char target_id;
+ /*
+ * Max I/O, the device can support.
+ */
+ u32 max_transfer_bytes;
/* Used for vsc/vsp channel reset process */
struct storvsc_cmd_request init_request;
struct storvsc_cmd_request reset_request;
@@ -618,19 +629,6 @@ cleanup:
return NULL;
}
-/* Disgusting wrapper functions */
-static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
-{
- void *addr = kmap_atomic(sg_page(sgl + idx));
- return (unsigned long)addr;
-}
-
-static inline void sg_kunmap_atomic(unsigned long addr)
-{
- kunmap_atomic((void *)addr);
-}
-
-
/* Assume the original sgl has enough room */
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
struct scatterlist *bounce_sgl,
@@ -645,32 +643,38 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
unsigned long bounce_addr = 0;
unsigned long dest_addr = 0;
unsigned long flags;
+ struct scatterlist *cur_dest_sgl;
+ struct scatterlist *cur_src_sgl;
local_irq_save(flags);
-
+ cur_dest_sgl = orig_sgl;
+ cur_src_sgl = bounce_sgl;
for (i = 0; i < orig_sgl_count; i++) {
- dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
+ dest_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_dest_sgl)) +
+ cur_dest_sgl->offset;
dest = dest_addr;
- destlen = orig_sgl[i].length;
+ destlen = cur_dest_sgl->length;
if (bounce_addr == 0)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ bounce_addr = (unsigned long)kmap_atomic(
+ sg_page(cur_src_sgl));
while (destlen) {
- src = bounce_addr + bounce_sgl[j].offset;
- srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
+ src = bounce_addr + cur_src_sgl->offset;
+ srclen = cur_src_sgl->length - cur_src_sgl->offset;
copylen = min(srclen, destlen);
memcpy((void *)dest, (void *)src, copylen);
total_copied += copylen;
- bounce_sgl[j].offset += copylen;
+ cur_src_sgl->offset += copylen;
destlen -= copylen;
dest += copylen;
- if (bounce_sgl[j].offset == bounce_sgl[j].length) {
+ if (cur_src_sgl->offset == cur_src_sgl->length) {
/* full */
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
j++;
/*
@@ -684,21 +688,27 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
/*
* We are done; cleanup and return.
*/
- sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
+ kunmap_atomic((void *)(dest_addr -
+ cur_dest_sgl->offset));
local_irq_restore(flags);
return total_copied;
}
/* if we need to use another bounce buffer */
- if (destlen || i != orig_sgl_count - 1)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ if (destlen || i != orig_sgl_count - 1) {
+ cur_src_sgl = sg_next(cur_src_sgl);
+ bounce_addr = (unsigned long)
+ kmap_atomic(
+ sg_page(cur_src_sgl));
+ }
} else if (destlen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
}
}
- sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
+ kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
+ cur_dest_sgl = sg_next(cur_dest_sgl);
}
local_irq_restore(flags);
@@ -719,48 +729,62 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
unsigned long bounce_addr = 0;
unsigned long src_addr = 0;
unsigned long flags;
+ struct scatterlist *cur_src_sgl;
+ struct scatterlist *cur_dest_sgl;
local_irq_save(flags);
+ cur_src_sgl = orig_sgl;
+ cur_dest_sgl = bounce_sgl;
+
for (i = 0; i < orig_sgl_count; i++) {
- src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
+ src_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_src_sgl)) +
+ cur_src_sgl->offset;
src = src_addr;
- srclen = orig_sgl[i].length;
+ srclen = cur_src_sgl->length;
if (bounce_addr == 0)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ bounce_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_dest_sgl));
while (srclen) {
/* assume bounce offset always == 0 */
- dest = bounce_addr + bounce_sgl[j].length;
- destlen = PAGE_SIZE - bounce_sgl[j].length;
+ dest = bounce_addr + cur_dest_sgl->length;
+ destlen = PAGE_SIZE - cur_dest_sgl->length;
copylen = min(srclen, destlen);
memcpy((void *)dest, (void *)src, copylen);
total_copied += copylen;
- bounce_sgl[j].length += copylen;
+ cur_dest_sgl->length += copylen;
srclen -= copylen;
src += copylen;
- if (bounce_sgl[j].length == PAGE_SIZE) {
+ if (cur_dest_sgl->length == PAGE_SIZE) {
/* full..move to next entry */
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
+ bounce_addr = 0;
j++;
+ }
- /* if we need to use another bounce buffer */
- if (srclen || i != orig_sgl_count - 1)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
-
- } else if (srclen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- sg_kunmap_atomic(bounce_addr);
+ /* if we need to use another bounce buffer */
+ if (srclen && bounce_addr == 0) {
+ cur_dest_sgl = sg_next(cur_dest_sgl);
+ bounce_addr = (unsigned long)
+ kmap_atomic(
+ sg_page(cur_dest_sgl));
}
+
}
- sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
+ cur_src_sgl = sg_next(cur_src_sgl);
}
+ if (bounce_addr)
+ kunmap_atomic((void *)bounce_addr);
+
local_irq_restore(flags);
return total_copied;
@@ -970,6 +994,8 @@ static int storvsc_channel_init(struct hv_device *device)
STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
process_sub_channels = true;
}
+ stor_device->max_transfer_bytes =
+ vstor_packet->storage_channel_properties.max_transfer_bytes;
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
@@ -1080,6 +1106,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
struct Scsi_Host *host;
struct storvsc_device *stor_dev;
struct hv_device *dev = host_dev->dev;
+ u32 payload_sz = cmd_request->payload_sz;
+ void *payload = cmd_request->payload;
stor_dev = get_in_stor_device(dev);
host = stor_dev->host;
@@ -1109,10 +1137,14 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
sense_hdr.ascq);
scsi_set_resid(scmnd,
- cmd_request->data_buffer.len -
+ cmd_request->payload->range.len -
vm_srb->data_transfer_length);
scmnd->scsi_done(scmnd);
+
+ if (payload_sz >
+ sizeof(struct vmbus_channel_packet_multipage_buffer))
+ kfree(payload);
}
static void storvsc_on_io_completion(struct hv_device *device,
@@ -1314,7 +1346,7 @@ static int storvsc_dev_remove(struct hv_device *device)
}
static int storvsc_do_io(struct hv_device *device,
- struct storvsc_cmd_request *request)
+ struct storvsc_cmd_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
@@ -1346,19 +1378,20 @@ static int storvsc_do_io(struct hv_device *device,
vstor_packet->vm_srb.data_transfer_length =
- request->data_buffer.len;
+ request->payload->range.len;
vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
- if (request->data_buffer.len) {
- ret = vmbus_sendpacket_multipagebuffer(outgoing_channel,
- &request->data_buffer,
+ if (request->payload->range.len) {
+
+ ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
+ request->payload, request->payload_sz,
vstor_packet,
(sizeof(struct vstor_packet) -
vmscsi_size_delta),
(unsigned long)request);
} else {
- ret = vmbus_sendpacket(device->channel, vstor_packet,
+ ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
(sizeof(struct vstor_packet) -
vmscsi_size_delta),
(unsigned long)request,
@@ -1376,7 +1409,6 @@ static int storvsc_do_io(struct hv_device *device,
static int storvsc_device_configure(struct scsi_device *sdevice)
{
- scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS);
blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
@@ -1526,6 +1558,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct scatterlist *sgl;
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
+ struct scatterlist *cur_sgl;
+ struct vmbus_packet_mpb_array *payload;
+ u32 payload_sz;
+ u32 length;
if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
/*
@@ -1579,46 +1615,71 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
- cmd_request->data_buffer.len = scsi_bufflen(scmnd);
- if (scsi_sg_count(scmnd)) {
- sgl = (struct scatterlist *)scsi_sglist(scmnd);
- sg_count = scsi_sg_count(scmnd);
+ sgl = (struct scatterlist *)scsi_sglist(scmnd);
+ sg_count = scsi_sg_count(scmnd);
+
+ length = scsi_bufflen(scmnd);
+ payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
+ payload_sz = sizeof(cmd_request->mpb);
+ if (sg_count) {
/* check if we need to bounce the sgl */
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
cmd_request->bounce_sgl =
- create_bounce_buffer(sgl, scsi_sg_count(scmnd),
- scsi_bufflen(scmnd),
+ create_bounce_buffer(sgl, sg_count,
+ length,
vm_srb->data_in);
if (!cmd_request->bounce_sgl)
return SCSI_MLQUEUE_HOST_BUSY;
cmd_request->bounce_sgl_count =
- ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
- PAGE_SHIFT;
+ ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT;
if (vm_srb->data_in == WRITE_TYPE)
copy_to_bounce_buffer(sgl,
- cmd_request->bounce_sgl,
- scsi_sg_count(scmnd));
+ cmd_request->bounce_sgl, sg_count);
sgl = cmd_request->bounce_sgl;
sg_count = cmd_request->bounce_sgl_count;
}
- cmd_request->data_buffer.offset = sgl[0].offset;
- for (i = 0; i < sg_count; i++)
- cmd_request->data_buffer.pfn_array[i] =
- page_to_pfn(sg_page((&sgl[i])));
+ if (sg_count > MAX_PAGE_BUFFER_COUNT) {
+
+ payload_sz = (sg_count * sizeof(void *) +
+ sizeof(struct vmbus_packet_mpb_array));
+ payload = kmalloc(payload_sz, GFP_ATOMIC);
+ if (!payload) {
+ if (cmd_request->bounce_sgl_count)
+ destroy_bounce_buffer(
+ cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+ }
+
+ payload->range.len = length;
+ payload->range.offset = sgl[0].offset;
+
+ cur_sgl = sgl;
+ for (i = 0; i < sg_count; i++) {
+ payload->range.pfn_array[i] =
+ page_to_pfn(sg_page((cur_sgl)));
+ cur_sgl = sg_next(cur_sgl);
+ }
} else if (scsi_sglist(scmnd)) {
- cmd_request->data_buffer.offset =
+ payload->range.len = length;
+ payload->range.offset =
virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
- cmd_request->data_buffer.pfn_array[0] =
+ payload->range.pfn_array[0] =
virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
}
+ cmd_request->payload = payload;
+ cmd_request->payload_sz = payload_sz;
+
/* Invokes the vsc to start an IO */
ret = storvsc_do_io(dev, cmd_request);
@@ -1646,12 +1707,8 @@ static struct scsi_host_template scsi_driver = {
.eh_timed_out = storvsc_eh_timed_out,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 255,
- .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
.this_id = -1,
- /* no use setting to 0 since ll_blk_rw reset it to 1 */
- /* currently 32 */
- .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
- .use_clustering = DISABLE_CLUSTERING,
+ .use_clustering = ENABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
.no_write_same = 1,
@@ -1686,6 +1743,7 @@ static int storvsc_probe(struct hv_device *device,
const struct hv_vmbus_device_id *dev_id)
{
int ret;
+ int num_cpus = num_online_cpus();
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
@@ -1694,6 +1752,7 @@ static int storvsc_probe(struct hv_device *device,
int max_luns_per_target;
int max_targets;
int max_channels;
+ int max_sub_channels = 0;
/*
* Based on the windows host we are running on,
@@ -1719,12 +1778,18 @@ static int storvsc_probe(struct hv_device *device,
max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
max_targets = STORVSC_MAX_TARGETS;
max_channels = STORVSC_MAX_CHANNELS;
+ /*
+ * On Windows8 and above, we support sub-channels for storage.
+ * The number of sub-channels offerred is based on the number of
+ * VCPUs in the guest.
+ */
+ max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
break;
}
- if (dev_id->driver_data == SFC_GUID)
- scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
- STORVSC_FC_MAX_TARGETS);
+ scsi_driver.can_queue = (max_outstanding_req_per_channel *
+ (max_sub_channels + 1));
+
host = scsi_host_alloc(&scsi_driver,
sizeof(struct hv_host_device));
if (!host)
@@ -1780,6 +1845,12 @@ static int storvsc_probe(struct hv_device *device,
/* max cmd length */
host->max_cmd_len = STORVSC_MAX_CMD_LEN;
+ /*
+ * set the table size based on the info we got
+ * from the host.
+ */
+ host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
+
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
if (ret != 0)
@@ -1837,7 +1908,6 @@ static struct hv_driver storvsc_drv = {
static int __init storvsc_drv_init(void)
{
- u32 max_outstanding_req_per_channel;
/*
* Divide the ring buffer data size (which is 1 page less
@@ -1852,10 +1922,6 @@ static int __init storvsc_drv_init(void)
vmscsi_size_delta,
sizeof(u64)));
- if (max_outstanding_req_per_channel <
- STORVSC_MAX_IO_REQUESTS)
- return -EINVAL;
-
return vmbus_driver_register(&storvsc_drv);
}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 2a906d1d34ba..22a42836d193 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -676,7 +676,6 @@ static struct platform_driver sun3_scsi_driver = {
.remove = __exit_p(sun3_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9217af9bf734..6652a8171de6 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -214,8 +214,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
struct ufs_qcom_host *host = hba->priv;
struct phy *phy = host->generic_phy;
int ret = 0;
- u8 major;
- u16 minor, step;
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
@@ -224,8 +222,6 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
/* provide 1ms delay to let the reset pulse propagate */
usleep_range(1000, 1100);
- ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
- ufs_qcom_phy_save_controller_version(phy, major, minor, step);
ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
if (ret) {
dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
@@ -698,16 +694,24 @@ out:
*/
static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
- u8 major;
- u16 minor, step;
+ struct ufs_qcom_host *host = hba->priv;
- ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
+ if (host->hw_ver.major == 0x1)
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
- /*
- * TBD
- * here we should be advertising controller quirks according to
- * controller version.
- */
+ if (host->hw_ver.major >= 0x2) {
+ if (!ufs_qcom_cap_qunipro(host))
+ /* Legacy UniPro mode still need following quirks */
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
+ }
+}
+
+static void ufs_qcom_set_caps(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+
+ if (host->hw_ver.major >= 0x2)
+ host->caps = UFS_QCOM_CAP_QUNIPRO;
}
static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
@@ -929,6 +933,13 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (err)
goto out_host_free;
+ ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
+ &host->hw_ver.minor, &host->hw_ver.step);
+
+ /* update phy revision information before calling phy_init() */
+ ufs_qcom_phy_save_controller_version(host->generic_phy,
+ host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
+
phy_init(host->generic_phy);
err = phy_power_on(host->generic_phy);
if (err)
@@ -938,6 +949,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (err)
goto out_disable_phy;
+ ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 9a6febd007df..db2c0a00e846 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -151,7 +151,23 @@ struct ufs_qcom_bus_vote {
struct device_attribute max_bus_bw;
};
+/* Host controller hardware version: major.minor.step */
+struct ufs_hw_version {
+ u16 step;
+ u16 minor;
+ u8 major;
+};
struct ufs_qcom_host {
+
+ /*
+ * Set this capability if host controller supports the QUniPro mode
+ * and if driver wants the Host controller to operate in QUniPro mode.
+ * Note: By default this capability will be kept enabled if host
+ * controller supports the QUniPro mode.
+ */
+ #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0)
+ u32 caps;
+
struct phy *generic_phy;
struct ufs_hba *hba;
struct ufs_qcom_bus_vote bus_vote;
@@ -161,10 +177,20 @@ struct ufs_qcom_host {
struct clk *rx_l1_sync_clk;
struct clk *tx_l1_sync_clk;
bool is_lane_clks_enabled;
+
+ struct ufs_hw_version hw_ver;
};
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
+{
+ if (host->caps & UFS_QCOM_CAP_QUNIPRO)
+ return true;
+ else
+ return false;
+}
+
#endif /* UFS_QCOM_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2aa85e398f76..648a44675880 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -183,6 +183,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
@@ -972,6 +973,8 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ufshcd_hold(hba, false);
mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
spin_lock_irqsave(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -2058,6 +2061,37 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
return ret;
}
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
+{
+ #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
+ unsigned long min_sleep_time_us;
+
+ if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
+ return;
+
+ /*
+ * last_dme_cmd_tstamp will be 0 only for 1st call to
+ * this function
+ */
+ if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
+ min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
+ } else {
+ unsigned long delta =
+ (unsigned long) ktime_to_us(
+ ktime_sub(ktime_get(),
+ hba->last_dme_cmd_tstamp));
+
+ if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
+ min_sleep_time_us =
+ MIN_DELAY_BEFORE_DME_CMDS_US - delta;
+ else
+ return; /* no more delay required */
+ }
+
+ /* allow sleep for extra 50us if needed */
+ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+}
+
/**
* ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
* @hba: per adapter instance
@@ -2157,6 +2191,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
mutex_lock(&hba->uic_cmd_mutex);
init_completion(&uic_async_done);
+ ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->uic_async_done = &uic_async_done;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 4a574aa45855..b47ff07698e8 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -366,6 +366,7 @@ struct ufs_init_prefetch {
* @saved_err: sticky error mask
* @saved_uic_err: sticky UIC error mask
* @dev_cmd: ufs device management command information
+ * @last_dme_cmd_tstamp: time stamp of the last completed DME command
* @auto_bkops_enabled: to track whether bkops is enabled in device
* @vreg_info: UFS device voltage regulator information
* @clk_list_head: UFS host controller clocks list node head
@@ -416,6 +417,13 @@ struct ufs_hba {
unsigned int irq;
bool is_irq_enabled;
+ /*
+ * delay before each dme command is required as the unipro
+ * layer has shown instabilities
+ */
+ #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(0)
+
+ unsigned int quirks; /* Deviations from standard UFSHCI spec. */
wait_queue_head_t tm_wq;
wait_queue_head_t tm_tag_wq;
@@ -446,6 +454,7 @@ struct ufs_hba {
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
+ ktime_t last_dme_cmd_tstamp;
/* Keeps information of the UFS device connected to this host */
struct ufs_dev_info dev_info;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 34199d206ba6..fad22caf0eff 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -63,6 +63,7 @@
#define VSCSIFRONT_OP_ADD_LUN 1
#define VSCSIFRONT_OP_DEL_LUN 2
+#define VSCSIFRONT_OP_READD_LUN 3
/* Tuning point. */
#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
@@ -113,8 +114,13 @@ struct vscsifrnt_info {
DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS);
struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS];
+ /* Following items are protected by the host lock. */
wait_queue_head_t wq_sync;
+ wait_queue_head_t wq_pause;
unsigned int wait_ring_available:1;
+ unsigned int waiting_pause:1;
+ unsigned int pause:1;
+ unsigned callers;
char dev_state_path[64];
struct task_struct *curr;
@@ -274,31 +280,31 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
wake_up(&shadow->wq_reset);
}
-static int scsifront_cmd_done(struct vscsifrnt_info *info)
+static void scsifront_do_response(struct vscsifrnt_info *info,
+ struct vscsiif_response *ring_rsp)
+{
+ if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
+ test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
+ "illegal rqid %u returned by backend!\n", ring_rsp->rqid))
+ return;
+
+ if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
+ scsifront_cdb_cmd_done(info, ring_rsp);
+ else
+ scsifront_sync_cmd_done(info, ring_rsp);
+}
+
+static int scsifront_ring_drain(struct vscsifrnt_info *info)
{
struct vscsiif_response *ring_rsp;
RING_IDX i, rp;
int more_to_do = 0;
- unsigned long flags;
-
- spin_lock_irqsave(info->host->host_lock, flags);
rp = info->ring.sring->rsp_prod;
rmb(); /* ordering required respective to dom0 */
for (i = info->ring.rsp_cons; i != rp; i++) {
-
ring_rsp = RING_GET_RESPONSE(&info->ring, i);
-
- if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
- test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
- "illegal rqid %u returned by backend!\n",
- ring_rsp->rqid))
- continue;
-
- if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
- scsifront_cdb_cmd_done(info, ring_rsp);
- else
- scsifront_sync_cmd_done(info, ring_rsp);
+ scsifront_do_response(info, ring_rsp);
}
info->ring.rsp_cons = i;
@@ -308,6 +314,18 @@ static int scsifront_cmd_done(struct vscsifrnt_info *info)
else
info->ring.sring->rsp_event = i + 1;
+ return more_to_do;
+}
+
+static int scsifront_cmd_done(struct vscsifrnt_info *info)
+{
+ int more_to_do;
+ unsigned long flags;
+
+ spin_lock_irqsave(info->host->host_lock, flags);
+
+ more_to_do = scsifront_ring_drain(info);
+
info->wait_ring_available = 0;
spin_unlock_irqrestore(info->host->host_lock, flags);
@@ -328,6 +346,24 @@ static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void scsifront_finish_all(struct vscsifrnt_info *info)
+{
+ unsigned i;
+ struct vscsiif_response resp;
+
+ scsifront_ring_drain(info);
+
+ for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
+ if (test_bit(i, info->shadow_free_bitmap))
+ continue;
+ resp.rqid = i;
+ resp.sense_len = 0;
+ resp.rslt = DID_RESET << 16;
+ resp.residual_len = 0;
+ scsifront_do_response(info, &resp);
+ }
+}
+
static int map_data_for_request(struct vscsifrnt_info *info,
struct scsi_cmnd *sc,
struct vscsiif_request *ring_req,
@@ -475,6 +511,27 @@ static struct vscsiif_request *scsifront_command2ring(
return ring_req;
}
+static int scsifront_enter(struct vscsifrnt_info *info)
+{
+ if (info->pause)
+ return 1;
+ info->callers++;
+ return 0;
+}
+
+static void scsifront_return(struct vscsifrnt_info *info)
+{
+ info->callers--;
+ if (info->callers)
+ return;
+
+ if (!info->waiting_pause)
+ return;
+
+ info->waiting_pause = 0;
+ wake_up(&info->wq_pause);
+}
+
static int scsifront_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *sc)
{
@@ -486,6 +543,10 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
uint16_t rqid;
spin_lock_irqsave(shost->host_lock, flags);
+ if (scsifront_enter(info)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
if (RING_FULL(&info->ring))
goto busy;
@@ -505,6 +566,7 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
if (err < 0) {
pr_debug("%s: err %d\n", __func__, err);
scsifront_put_rqid(info, rqid);
+ scsifront_return(info);
spin_unlock_irqrestore(shost->host_lock, flags);
if (err == -ENOMEM)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -514,11 +576,13 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
}
scsifront_do_request(info);
+ scsifront_return(info);
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
busy:
+ scsifront_return(info);
spin_unlock_irqrestore(shost->host_lock, flags);
pr_debug("%s: busy\n", __func__);
return SCSI_MLQUEUE_HOST_BUSY;
@@ -549,7 +613,7 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
if (ring_req)
break;
}
- if (err) {
+ if (err || info->pause) {
spin_unlock_irq(host->host_lock);
kfree(shadow);
return FAILED;
@@ -561,6 +625,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
spin_lock_irq(host->host_lock);
}
+ if (scsifront_enter(info)) {
+ spin_unlock_irq(host->host_lock);
+ return FAILED;
+ }
+
ring_req->act = act;
ring_req->ref_rqid = s->rqid;
@@ -587,6 +656,7 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
err = FAILED;
}
+ scsifront_return(info);
spin_unlock_irq(host->host_lock);
return err;
}
@@ -644,6 +714,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
{
struct xenbus_device *dev = info->dev;
struct vscsiif_sring *sring;
+ grant_ref_t gref;
int err = -ENOMEM;
/***** Frontend to Backend ring start *****/
@@ -656,14 +727,14 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
- err = xenbus_grant_ring(dev, virt_to_mfn(sring));
+ err = xenbus_grant_ring(dev, sring, 1, &gref);
if (err < 0) {
free_page((unsigned long)sring);
xenbus_dev_fatal(dev, err,
"fail to grant shared ring (Front to Back)");
return err;
}
- info->ring_ref = err;
+ info->ring_ref = gref;
err = xenbus_alloc_evtchn(dev, &info->evtchn);
if (err) {
@@ -698,6 +769,13 @@ free_gnttab:
return err;
}
+static void scsifront_free_ring(struct vscsifrnt_info *info)
+{
+ unbind_from_irqhandler(info->irq, info);
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+}
+
static int scsifront_init_ring(struct vscsifrnt_info *info)
{
struct xenbus_device *dev = info->dev;
@@ -744,9 +822,7 @@ again:
fail:
xenbus_transaction_end(xbt, 1);
free_sring:
- unbind_from_irqhandler(info->irq, info);
- gnttab_end_foreign_access(info->ring_ref, 0,
- (unsigned long)info->ring.sring);
+ scsifront_free_ring(info);
return err;
}
@@ -779,6 +855,7 @@ static int scsifront_probe(struct xenbus_device *dev,
}
init_waitqueue_head(&info->wq_sync);
+ init_waitqueue_head(&info->wq_pause);
spin_lock_init(&info->shadow_lock);
snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no);
@@ -802,13 +879,60 @@ static int scsifront_probe(struct xenbus_device *dev,
return 0;
free_sring:
- unbind_from_irqhandler(info->irq, info);
- gnttab_end_foreign_access(info->ring_ref, 0,
- (unsigned long)info->ring.sring);
+ scsifront_free_ring(info);
scsi_host_put(host);
return err;
}
+static int scsifront_resume(struct xenbus_device *dev)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+ struct Scsi_Host *host = info->host;
+ int err;
+
+ spin_lock_irq(host->host_lock);
+
+ /* Finish all still pending commands. */
+ scsifront_finish_all(info);
+
+ spin_unlock_irq(host->host_lock);
+
+ /* Reconnect to dom0. */
+ scsifront_free_ring(info);
+ err = scsifront_init_ring(info);
+ if (err) {
+ dev_err(&dev->dev, "fail to resume %d\n", err);
+ scsi_host_put(host);
+ return err;
+ }
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+}
+
+static int scsifront_suspend(struct xenbus_device *dev)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+ struct Scsi_Host *host = info->host;
+ int err = 0;
+
+ /* No new commands for the backend. */
+ spin_lock_irq(host->host_lock);
+ info->pause = 1;
+ while (info->callers && !err) {
+ info->waiting_pause = 1;
+ info->wait_ring_available = 0;
+ spin_unlock_irq(host->host_lock);
+ wake_up(&info->wq_sync);
+ err = wait_event_interruptible(info->wq_pause,
+ !info->waiting_pause);
+ spin_lock_irq(host->host_lock);
+ }
+ spin_unlock_irq(host->host_lock);
+ return err;
+}
+
static int scsifront_remove(struct xenbus_device *dev)
{
struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
@@ -823,10 +947,7 @@ static int scsifront_remove(struct xenbus_device *dev)
}
mutex_unlock(&scsifront_mutex);
- gnttab_end_foreign_access(info->ring_ref, 0,
- (unsigned long)info->ring.sring);
- unbind_from_irqhandler(info->irq, info);
-
+ scsifront_free_ring(info);
scsi_host_put(info->host);
return 0;
@@ -919,6 +1040,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
scsi_device_put(sdev);
}
break;
+ case VSCSIFRONT_OP_READD_LUN:
+ if (device_state == XenbusStateConnected)
+ xenbus_printf(XBT_NIL, dev->nodename,
+ info->dev_state_path,
+ "%d", XenbusStateConnected);
+ break;
default:
break;
}
@@ -932,21 +1059,29 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
static void scsifront_read_backend_params(struct xenbus_device *dev,
struct vscsifrnt_info *info)
{
- unsigned int sg_grant;
+ unsigned int sg_grant, nr_segs;
int ret;
struct Scsi_Host *host = info->host;
ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u",
&sg_grant);
- if (ret == 1 && sg_grant) {
- sg_grant = min_t(unsigned int, sg_grant, SG_ALL);
- sg_grant = max_t(unsigned int, sg_grant, VSCSIIF_SG_TABLESIZE);
- host->sg_tablesize = min_t(unsigned int, sg_grant,
+ if (ret != 1)
+ sg_grant = 0;
+ nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
+ nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
+ nr_segs = min_t(unsigned int, nr_segs,
VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
sizeof(struct scsiif_request_segment));
- host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
- }
- dev_info(&dev->dev, "using up to %d SG entries\n", host->sg_tablesize);
+
+ if (!info->pause && sg_grant)
+ dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs);
+ else if (info->pause && nr_segs < host->sg_tablesize)
+ dev_warn(&dev->dev,
+ "SG entries decreased from %d to %u - device may not work properly anymore\n",
+ host->sg_tablesize, nr_segs);
+
+ host->sg_tablesize = nr_segs;
+ host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512;
}
static void scsifront_backend_changed(struct xenbus_device *dev,
@@ -965,6 +1100,14 @@ static void scsifront_backend_changed(struct xenbus_device *dev,
case XenbusStateConnected:
scsifront_read_backend_params(dev, info);
+
+ if (info->pause) {
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_READD_LUN);
+ xenbus_switch_state(dev, XenbusStateConnected);
+ info->pause = 0;
+ return;
+ }
+
if (xenbus_read_driver_state(dev->nodename) ==
XenbusStateInitialised)
scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
@@ -1002,6 +1145,8 @@ static struct xenbus_driver scsifront_driver = {
.ids = scsifront_ids,
.probe = scsifront_probe,
.remove = scsifront_remove,
+ .resume = scsifront_resume,
+ .suspend = scsifront_suspend,
.otherend_changed = scsifront_backend_changed,
};
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index cd4c293f0dd0..fe8875f0d7be 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -80,9 +80,10 @@ static int __init sh_pm_runtime_init(void)
if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
if (!of_machine_is_compatible("renesas,emev2") &&
!of_machine_is_compatible("renesas,r7s72100") &&
- !of_machine_is_compatible("renesas,r8a73a4") &&
#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
+ !of_machine_is_compatible("renesas,r8a73a4") &&
!of_machine_is_compatible("renesas,r8a7740") &&
+ !of_machine_is_compatible("renesas,sh73a0") &&
#endif
!of_machine_is_compatible("renesas,r8a7778") &&
!of_machine_is_compatible("renesas,r8a7779") &&
@@ -90,9 +91,7 @@ static int __init sh_pm_runtime_init(void)
!of_machine_is_compatible("renesas,r8a7791") &&
!of_machine_is_compatible("renesas,r8a7792") &&
!of_machine_is_compatible("renesas,r8a7793") &&
- !of_machine_is_compatible("renesas,r8a7794") &&
- !of_machine_is_compatible("renesas,sh7372") &&
- !of_machine_is_compatible("renesas,sh73a0"))
+ !of_machine_is_compatible("renesas,r8a7794"))
return 0;
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 76d6bd4da138..d8bde82f0370 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,5 +1,6 @@
menu "SOC (System On Chip) specific Drivers"
+source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/ti/Kconfig"
source "drivers/soc/versatile/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 063113d0bd38..70042b259744 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -2,6 +2,7 @@
# Makefile for the Linux Kernel SOC specific device drivers.
#
+obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_SOC_TI) += ti/
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
new file mode 100644
index 000000000000..bcdb22d5e215
--- /dev/null
+++ b/drivers/soc/mediatek/Kconfig
@@ -0,0 +1,11 @@
+#
+# MediaTek SoC drivers
+#
+config MTK_PMIC_WRAP
+ tristate "MediaTek PMIC Wrapper Support"
+ depends on ARCH_MEDIATEK
+ select REGMAP
+ help
+ Say yes here to add support for MediaTek PMIC Wrapper found
+ on different MediaTek SoCs. The PMIC wrapper is a proprietary
+ hardware to connect the PMIC.
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
new file mode 100644
index 000000000000..ecaf4defd7f6
--- /dev/null
+++ b/drivers/soc/mediatek/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
new file mode 100644
index 000000000000..db5be1eec54c
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -0,0 +1,975 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define PWRAP_MT8135_BRIDGE_IORD_ARB_EN 0x4
+#define PWRAP_MT8135_BRIDGE_WACS3_EN 0x10
+#define PWRAP_MT8135_BRIDGE_INIT_DONE3 0x14
+#define PWRAP_MT8135_BRIDGE_WACS4_EN 0x24
+#define PWRAP_MT8135_BRIDGE_INIT_DONE4 0x28
+#define PWRAP_MT8135_BRIDGE_INT_EN 0x38
+#define PWRAP_MT8135_BRIDGE_TIMER_EN 0x48
+#define PWRAP_MT8135_BRIDGE_WDT_UNIT 0x50
+#define PWRAP_MT8135_BRIDGE_WDT_SRC_EN 0x54
+
+/* macro for wrapper status */
+#define PWRAP_GET_WACS_RDATA(x) (((x) >> 0) & 0x0000ffff)
+#define PWRAP_GET_WACS_FSM(x) (((x) >> 16) & 0x00000007)
+#define PWRAP_GET_WACS_REQ(x) (((x) >> 19) & 0x00000001)
+#define PWRAP_STATE_SYNC_IDLE0 (1 << 20)
+#define PWRAP_STATE_INIT_DONE0 (1 << 21)
+
+/* macro for WACS FSM */
+#define PWRAP_WACS_FSM_IDLE 0x00
+#define PWRAP_WACS_FSM_REQ 0x02
+#define PWRAP_WACS_FSM_WFDLE 0x04
+#define PWRAP_WACS_FSM_WFVLDCLR 0x06
+#define PWRAP_WACS_INIT_DONE 0x01
+#define PWRAP_WACS_WACS_SYNC_IDLE 0x01
+#define PWRAP_WACS_SYNC_BUSY 0x00
+
+/* macro for device wrapper default value */
+#define PWRAP_DEW_READ_TEST_VAL 0x5aa5
+#define PWRAP_DEW_WRITE_TEST_VAL 0xa55a
+
+/* macro for manual command */
+#define PWRAP_MAN_CMD_SPI_WRITE (1 << 13)
+#define PWRAP_MAN_CMD_OP_CSH (0x0 << 8)
+#define PWRAP_MAN_CMD_OP_CSL (0x1 << 8)
+#define PWRAP_MAN_CMD_OP_CK (0x2 << 8)
+#define PWRAP_MAN_CMD_OP_OUTS (0x8 << 8)
+#define PWRAP_MAN_CMD_OP_OUTD (0x9 << 8)
+#define PWRAP_MAN_CMD_OP_OUTQ (0xa << 8)
+
+/* macro for slave device wrapper registers */
+#define PWRAP_DEW_BASE 0xbc00
+#define PWRAP_DEW_EVENT_OUT_EN (PWRAP_DEW_BASE + 0x0)
+#define PWRAP_DEW_DIO_EN (PWRAP_DEW_BASE + 0x2)
+#define PWRAP_DEW_EVENT_SRC_EN (PWRAP_DEW_BASE + 0x4)
+#define PWRAP_DEW_EVENT_SRC (PWRAP_DEW_BASE + 0x6)
+#define PWRAP_DEW_EVENT_FLAG (PWRAP_DEW_BASE + 0x8)
+#define PWRAP_DEW_READ_TEST (PWRAP_DEW_BASE + 0xa)
+#define PWRAP_DEW_WRITE_TEST (PWRAP_DEW_BASE + 0xc)
+#define PWRAP_DEW_CRC_EN (PWRAP_DEW_BASE + 0xe)
+#define PWRAP_DEW_CRC_VAL (PWRAP_DEW_BASE + 0x10)
+#define PWRAP_DEW_MON_GRP_SEL (PWRAP_DEW_BASE + 0x12)
+#define PWRAP_DEW_MON_FLAG_SEL (PWRAP_DEW_BASE + 0x14)
+#define PWRAP_DEW_EVENT_TEST (PWRAP_DEW_BASE + 0x16)
+#define PWRAP_DEW_CIPHER_KEY_SEL (PWRAP_DEW_BASE + 0x18)
+#define PWRAP_DEW_CIPHER_IV_SEL (PWRAP_DEW_BASE + 0x1a)
+#define PWRAP_DEW_CIPHER_LOAD (PWRAP_DEW_BASE + 0x1c)
+#define PWRAP_DEW_CIPHER_START (PWRAP_DEW_BASE + 0x1e)
+#define PWRAP_DEW_CIPHER_RDY (PWRAP_DEW_BASE + 0x20)
+#define PWRAP_DEW_CIPHER_MODE (PWRAP_DEW_BASE + 0x22)
+#define PWRAP_DEW_CIPHER_SWRST (PWRAP_DEW_BASE + 0x24)
+#define PWRAP_MT8173_DEW_CIPHER_IV0 (PWRAP_DEW_BASE + 0x26)
+#define PWRAP_MT8173_DEW_CIPHER_IV1 (PWRAP_DEW_BASE + 0x28)
+#define PWRAP_MT8173_DEW_CIPHER_IV2 (PWRAP_DEW_BASE + 0x2a)
+#define PWRAP_MT8173_DEW_CIPHER_IV3 (PWRAP_DEW_BASE + 0x2c)
+#define PWRAP_MT8173_DEW_CIPHER_IV4 (PWRAP_DEW_BASE + 0x2e)
+#define PWRAP_MT8173_DEW_CIPHER_IV5 (PWRAP_DEW_BASE + 0x30)
+
+enum pwrap_regs {
+ PWRAP_MUX_SEL,
+ PWRAP_WRAP_EN,
+ PWRAP_DIO_EN,
+ PWRAP_SIDLY,
+ PWRAP_CSHEXT_WRITE,
+ PWRAP_CSHEXT_READ,
+ PWRAP_CSLEXT_START,
+ PWRAP_CSLEXT_END,
+ PWRAP_STAUPD_PRD,
+ PWRAP_STAUPD_GRPEN,
+ PWRAP_STAUPD_MAN_TRIG,
+ PWRAP_STAUPD_STA,
+ PWRAP_WRAP_STA,
+ PWRAP_HARB_INIT,
+ PWRAP_HARB_HPRIO,
+ PWRAP_HIPRIO_ARB_EN,
+ PWRAP_HARB_STA0,
+ PWRAP_HARB_STA1,
+ PWRAP_MAN_EN,
+ PWRAP_MAN_CMD,
+ PWRAP_MAN_RDATA,
+ PWRAP_MAN_VLDCLR,
+ PWRAP_WACS0_EN,
+ PWRAP_INIT_DONE0,
+ PWRAP_WACS0_CMD,
+ PWRAP_WACS0_RDATA,
+ PWRAP_WACS0_VLDCLR,
+ PWRAP_WACS1_EN,
+ PWRAP_INIT_DONE1,
+ PWRAP_WACS1_CMD,
+ PWRAP_WACS1_RDATA,
+ PWRAP_WACS1_VLDCLR,
+ PWRAP_WACS2_EN,
+ PWRAP_INIT_DONE2,
+ PWRAP_WACS2_CMD,
+ PWRAP_WACS2_RDATA,
+ PWRAP_WACS2_VLDCLR,
+ PWRAP_INT_EN,
+ PWRAP_INT_FLG_RAW,
+ PWRAP_INT_FLG,
+ PWRAP_INT_CLR,
+ PWRAP_SIG_ADR,
+ PWRAP_SIG_MODE,
+ PWRAP_SIG_VALUE,
+ PWRAP_SIG_ERRVAL,
+ PWRAP_CRC_EN,
+ PWRAP_TIMER_EN,
+ PWRAP_TIMER_STA,
+ PWRAP_WDT_UNIT,
+ PWRAP_WDT_SRC_EN,
+ PWRAP_WDT_FLG,
+ PWRAP_DEBUG_INT_SEL,
+ PWRAP_CIPHER_KEY_SEL,
+ PWRAP_CIPHER_IV_SEL,
+ PWRAP_CIPHER_RDY,
+ PWRAP_CIPHER_MODE,
+ PWRAP_CIPHER_SWRST,
+ PWRAP_DCM_EN,
+ PWRAP_DCM_DBC_PRD,
+
+ /* MT8135 only regs */
+ PWRAP_CSHEXT,
+ PWRAP_EVENT_IN_EN,
+ PWRAP_EVENT_DST_EN,
+ PWRAP_RRARB_INIT,
+ PWRAP_RRARB_EN,
+ PWRAP_RRARB_STA0,
+ PWRAP_RRARB_STA1,
+ PWRAP_EVENT_STA,
+ PWRAP_EVENT_STACLR,
+ PWRAP_CIPHER_LOAD,
+ PWRAP_CIPHER_START,
+
+ /* MT8173 only regs */
+ PWRAP_RDDMY,
+ PWRAP_SI_CK_CON,
+ PWRAP_DVFS_ADR0,
+ PWRAP_DVFS_WDATA0,
+ PWRAP_DVFS_ADR1,
+ PWRAP_DVFS_WDATA1,
+ PWRAP_DVFS_ADR2,
+ PWRAP_DVFS_WDATA2,
+ PWRAP_DVFS_ADR3,
+ PWRAP_DVFS_WDATA3,
+ PWRAP_DVFS_ADR4,
+ PWRAP_DVFS_WDATA4,
+ PWRAP_DVFS_ADR5,
+ PWRAP_DVFS_WDATA5,
+ PWRAP_DVFS_ADR6,
+ PWRAP_DVFS_WDATA6,
+ PWRAP_DVFS_ADR7,
+ PWRAP_DVFS_WDATA7,
+ PWRAP_SPMINF_STA,
+ PWRAP_CIPHER_EN,
+};
+
+static int mt8173_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1c,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2c,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4c,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5c,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6c,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7c,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8c,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9c,
+ [PWRAP_WACS2_CMD] = 0xa0,
+ [PWRAP_WACS2_RDATA] = 0xa4,
+ [PWRAP_WACS2_VLDCLR] = 0xa8,
+ [PWRAP_INT_EN] = 0xac,
+ [PWRAP_INT_FLG_RAW] = 0xb0,
+ [PWRAP_INT_FLG] = 0xb4,
+ [PWRAP_INT_CLR] = 0xb8,
+ [PWRAP_SIG_ADR] = 0xbc,
+ [PWRAP_SIG_MODE] = 0xc0,
+ [PWRAP_SIG_VALUE] = 0xc4,
+ [PWRAP_SIG_ERRVAL] = 0xc8,
+ [PWRAP_CRC_EN] = 0xcc,
+ [PWRAP_TIMER_EN] = 0xd0,
+ [PWRAP_TIMER_STA] = 0xd4,
+ [PWRAP_WDT_UNIT] = 0xd8,
+ [PWRAP_WDT_SRC_EN] = 0xdc,
+ [PWRAP_WDT_FLG] = 0xe0,
+ [PWRAP_DEBUG_INT_SEL] = 0xe4,
+ [PWRAP_DVFS_ADR0] = 0xe8,
+ [PWRAP_DVFS_WDATA0] = 0xec,
+ [PWRAP_DVFS_ADR1] = 0xf0,
+ [PWRAP_DVFS_WDATA1] = 0xf4,
+ [PWRAP_DVFS_ADR2] = 0xf8,
+ [PWRAP_DVFS_WDATA2] = 0xfc,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10c,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11c,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_SPMINF_STA] = 0x128,
+ [PWRAP_CIPHER_KEY_SEL] = 0x12c,
+ [PWRAP_CIPHER_IV_SEL] = 0x130,
+ [PWRAP_CIPHER_EN] = 0x134,
+ [PWRAP_CIPHER_RDY] = 0x138,
+ [PWRAP_CIPHER_MODE] = 0x13c,
+ [PWRAP_CIPHER_SWRST] = 0x140,
+ [PWRAP_DCM_EN] = 0x144,
+ [PWRAP_DCM_DBC_PRD] = 0x148,
+};
+
+static int mt8135_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_CSHEXT] = 0x10,
+ [PWRAP_CSHEXT_WRITE] = 0x14,
+ [PWRAP_CSHEXT_READ] = 0x18,
+ [PWRAP_CSLEXT_START] = 0x1c,
+ [PWRAP_CSLEXT_END] = 0x20,
+ [PWRAP_STAUPD_PRD] = 0x24,
+ [PWRAP_STAUPD_GRPEN] = 0x28,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x2c,
+ [PWRAP_STAUPD_STA] = 0x30,
+ [PWRAP_EVENT_IN_EN] = 0x34,
+ [PWRAP_EVENT_DST_EN] = 0x38,
+ [PWRAP_WRAP_STA] = 0x3c,
+ [PWRAP_RRARB_INIT] = 0x40,
+ [PWRAP_RRARB_EN] = 0x44,
+ [PWRAP_RRARB_STA0] = 0x48,
+ [PWRAP_RRARB_STA1] = 0x4c,
+ [PWRAP_HARB_INIT] = 0x50,
+ [PWRAP_HARB_HPRIO] = 0x54,
+ [PWRAP_HIPRIO_ARB_EN] = 0x58,
+ [PWRAP_HARB_STA0] = 0x5c,
+ [PWRAP_HARB_STA1] = 0x60,
+ [PWRAP_MAN_EN] = 0x64,
+ [PWRAP_MAN_CMD] = 0x68,
+ [PWRAP_MAN_RDATA] = 0x6c,
+ [PWRAP_MAN_VLDCLR] = 0x70,
+ [PWRAP_WACS0_EN] = 0x74,
+ [PWRAP_INIT_DONE0] = 0x78,
+ [PWRAP_WACS0_CMD] = 0x7c,
+ [PWRAP_WACS0_RDATA] = 0x80,
+ [PWRAP_WACS0_VLDCLR] = 0x84,
+ [PWRAP_WACS1_EN] = 0x88,
+ [PWRAP_INIT_DONE1] = 0x8c,
+ [PWRAP_WACS1_CMD] = 0x90,
+ [PWRAP_WACS1_RDATA] = 0x94,
+ [PWRAP_WACS1_VLDCLR] = 0x98,
+ [PWRAP_WACS2_EN] = 0x9c,
+ [PWRAP_INIT_DONE2] = 0xa0,
+ [PWRAP_WACS2_CMD] = 0xa4,
+ [PWRAP_WACS2_RDATA] = 0xa8,
+ [PWRAP_WACS2_VLDCLR] = 0xac,
+ [PWRAP_INT_EN] = 0xb0,
+ [PWRAP_INT_FLG_RAW] = 0xb4,
+ [PWRAP_INT_FLG] = 0xb8,
+ [PWRAP_INT_CLR] = 0xbc,
+ [PWRAP_SIG_ADR] = 0xc0,
+ [PWRAP_SIG_MODE] = 0xc4,
+ [PWRAP_SIG_VALUE] = 0xc8,
+ [PWRAP_SIG_ERRVAL] = 0xcc,
+ [PWRAP_CRC_EN] = 0xd0,
+ [PWRAP_EVENT_STA] = 0xd4,
+ [PWRAP_EVENT_STACLR] = 0xd8,
+ [PWRAP_TIMER_EN] = 0xdc,
+ [PWRAP_TIMER_STA] = 0xe0,
+ [PWRAP_WDT_UNIT] = 0xe4,
+ [PWRAP_WDT_SRC_EN] = 0xe8,
+ [PWRAP_WDT_FLG] = 0xec,
+ [PWRAP_DEBUG_INT_SEL] = 0xf0,
+ [PWRAP_CIPHER_KEY_SEL] = 0x134,
+ [PWRAP_CIPHER_IV_SEL] = 0x138,
+ [PWRAP_CIPHER_LOAD] = 0x13c,
+ [PWRAP_CIPHER_START] = 0x140,
+ [PWRAP_CIPHER_RDY] = 0x144,
+ [PWRAP_CIPHER_MODE] = 0x148,
+ [PWRAP_CIPHER_SWRST] = 0x14c,
+ [PWRAP_DCM_EN] = 0x15c,
+ [PWRAP_DCM_DBC_PRD] = 0x160,
+};
+
+enum pwrap_type {
+ PWRAP_MT8135,
+ PWRAP_MT8173,
+};
+
+struct pmic_wrapper_type {
+ int *regs;
+ enum pwrap_type type;
+ u32 arb_en_all;
+};
+
+static struct pmic_wrapper_type pwrap_mt8135 = {
+ .regs = mt8135_regs,
+ .type = PWRAP_MT8135,
+ .arb_en_all = 0x1ff,
+};
+
+static struct pmic_wrapper_type pwrap_mt8173 = {
+ .regs = mt8173_regs,
+ .type = PWRAP_MT8173,
+ .arb_en_all = 0x3f,
+};
+
+struct pmic_wrapper {
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *regmap;
+ int *regs;
+ enum pwrap_type type;
+ u32 arb_en_all;
+ struct clk *clk_spi;
+ struct clk *clk_wrap;
+ struct reset_control *rstc;
+
+ struct reset_control *rstc_bridge;
+ void __iomem *bridge_base;
+};
+
+static inline int pwrap_is_mt8135(struct pmic_wrapper *wrp)
+{
+ return wrp->type == PWRAP_MT8135;
+}
+
+static inline int pwrap_is_mt8173(struct pmic_wrapper *wrp)
+{
+ return wrp->type == PWRAP_MT8173;
+}
+
+static u32 pwrap_readl(struct pmic_wrapper *wrp, enum pwrap_regs reg)
+{
+ return readl(wrp->base + wrp->regs[reg]);
+}
+
+static void pwrap_writel(struct pmic_wrapper *wrp, u32 val, enum pwrap_regs reg)
+{
+ writel(val, wrp->base + wrp->regs[reg]);
+}
+
+static bool pwrap_is_fsm_idle(struct pmic_wrapper *wrp)
+{
+ u32 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+
+ return PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_IDLE;
+}
+
+static bool pwrap_is_fsm_vldclr(struct pmic_wrapper *wrp)
+{
+ u32 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+
+ return PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR;
+}
+
+static bool pwrap_is_sync_idle(struct pmic_wrapper *wrp)
+{
+ return pwrap_readl(wrp, PWRAP_WACS2_RDATA) & PWRAP_STATE_SYNC_IDLE0;
+}
+
+static bool pwrap_is_fsm_idle_and_sync_idle(struct pmic_wrapper *wrp)
+{
+ u32 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+
+ return (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_IDLE) &&
+ (val & PWRAP_STATE_SYNC_IDLE0);
+}
+
+static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
+ bool (*fp)(struct pmic_wrapper *))
+{
+ unsigned long timeout;
+
+ timeout = jiffies + usecs_to_jiffies(255);
+
+ do {
+ if (time_after(jiffies, timeout))
+ return fp(wrp) ? 0 : -ETIMEDOUT;
+ if (fp(wrp))
+ return 0;
+ } while (1);
+}
+
+static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ int ret;
+ u32 val;
+
+ val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+ if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret)
+ return ret;
+
+ pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata,
+ PWRAP_WACS2_CMD);
+
+ return 0;
+}
+
+static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
+{
+ int ret;
+ u32 val;
+
+ val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+ if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret)
+ return ret;
+
+ pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ if (ret)
+ return ret;
+
+ *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
+
+ return 0;
+}
+
+static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata)
+{
+ return pwrap_read(context, adr, rdata);
+}
+
+static int pwrap_regmap_write(void *context, u32 adr, u32 wdata)
+{
+ return pwrap_write(context, adr, wdata);
+}
+
+static int pwrap_reset_spislave(struct pmic_wrapper *wrp)
+{
+ int ret, i;
+
+ pwrap_writel(wrp, 0, PWRAP_HIPRIO_ARB_EN);
+ pwrap_writel(wrp, 0, PWRAP_WRAP_EN);
+ pwrap_writel(wrp, 1, PWRAP_MUX_SEL);
+ pwrap_writel(wrp, 1, PWRAP_MAN_EN);
+ pwrap_writel(wrp, 0, PWRAP_DIO_EN);
+
+ pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_CSL,
+ PWRAP_MAN_CMD);
+ pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_OUTS,
+ PWRAP_MAN_CMD);
+ pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_CSH,
+ PWRAP_MAN_CMD);
+
+ for (i = 0; i < 4; i++)
+ pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_OUTS,
+ PWRAP_MAN_CMD);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_sync_idle);
+ if (ret) {
+ dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 0, PWRAP_MAN_EN);
+ pwrap_writel(wrp, 0, PWRAP_MUX_SEL);
+
+ return 0;
+}
+
+/*
+ * pwrap_init_sidly - configure serial input delay
+ *
+ * This configures the serial input delay. We can configure 0, 2, 4 or 6ns
+ * delay. Do a read test with all possible values and chose the best delay.
+ */
+static int pwrap_init_sidly(struct pmic_wrapper *wrp)
+{
+ u32 rdata;
+ u32 i;
+ u32 pass = 0;
+ signed char dly[16] = {
+ -1, 0, 1, 0, 2, -1, 1, 1, 3, -1, -1, -1, 3, -1, 2, 1
+ };
+
+ for (i = 0; i < 4; i++) {
+ pwrap_writel(wrp, i, PWRAP_SIDLY);
+ pwrap_read(wrp, PWRAP_DEW_READ_TEST, &rdata);
+ if (rdata == PWRAP_DEW_READ_TEST_VAL) {
+ dev_dbg(wrp->dev, "[Read Test] pass, SIDLY=%x\n", i);
+ pass |= 1 << i;
+ }
+ }
+
+ if (dly[pass] < 0) {
+ dev_err(wrp->dev, "sidly pass range 0x%x not continuous\n",
+ pass);
+ return -EIO;
+ }
+
+ pwrap_writel(wrp, dly[pass], PWRAP_SIDLY);
+
+ return 0;
+}
+
+static int pwrap_init_reg_clock(struct pmic_wrapper *wrp)
+{
+ unsigned long rate_spi;
+ int ck_mhz;
+
+ rate_spi = clk_get_rate(wrp->clk_spi);
+
+ if (rate_spi > 26000000)
+ ck_mhz = 26;
+ else if (rate_spi > 18000000)
+ ck_mhz = 18;
+ else
+ ck_mhz = 0;
+
+ switch (ck_mhz) {
+ case 18:
+ if (pwrap_is_mt8135(wrp))
+ pwrap_writel(wrp, 0xc, PWRAP_CSHEXT);
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ);
+ pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
+ break;
+ case 26:
+ if (pwrap_is_mt8135(wrp))
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+ pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+ pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
+ break;
+ case 0:
+ if (pwrap_is_mt8135(wrp))
+ pwrap_writel(wrp, 0xf, PWRAP_CSHEXT);
+ pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ);
+ pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool pwrap_is_cipher_ready(struct pmic_wrapper *wrp)
+{
+ return pwrap_readl(wrp, PWRAP_CIPHER_RDY) & 1;
+}
+
+static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
+{
+ u32 rdata;
+ int ret;
+
+ ret = pwrap_read(wrp, PWRAP_DEW_CIPHER_RDY, &rdata);
+ if (ret)
+ return 0;
+
+ return rdata == 1;
+}
+
+static int pwrap_init_cipher(struct pmic_wrapper *wrp)
+{
+ int ret;
+ u32 rdata;
+
+ pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
+ pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
+ pwrap_writel(wrp, 0x1, PWRAP_CIPHER_KEY_SEL);
+ pwrap_writel(wrp, 0x2, PWRAP_CIPHER_IV_SEL);
+
+ if (pwrap_is_mt8135(wrp)) {
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_LOAD);
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_START);
+ } else {
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
+ }
+
+ /* Config cipher mode @PMIC */
+ pwrap_write(wrp, PWRAP_DEW_CIPHER_SWRST, 0x1);
+ pwrap_write(wrp, PWRAP_DEW_CIPHER_SWRST, 0x0);
+ pwrap_write(wrp, PWRAP_DEW_CIPHER_KEY_SEL, 0x1);
+ pwrap_write(wrp, PWRAP_DEW_CIPHER_IV_SEL, 0x2);
+ pwrap_write(wrp, PWRAP_DEW_CIPHER_LOAD, 0x1);
+ pwrap_write(wrp, PWRAP_DEW_CIPHER_START, 0x1);
+
+ /* wait for cipher data ready@AP */
+ ret = pwrap_wait_for_state(wrp, pwrap_is_cipher_ready);
+ if (ret) {
+ dev_err(wrp->dev, "cipher data ready@AP fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* wait for cipher data ready@PMIC */
+ ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready);
+ if (ret) {
+ dev_err(wrp->dev, "timeout waiting for cipher data ready@PMIC\n");
+ return ret;
+ }
+
+ /* wait for cipher mode idle */
+ pwrap_write(wrp, PWRAP_DEW_CIPHER_MODE, 0x1);
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
+ if (ret) {
+ dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_MODE);
+
+ /* Write Test */
+ if (pwrap_write(wrp, PWRAP_DEW_WRITE_TEST, PWRAP_DEW_WRITE_TEST_VAL) ||
+ pwrap_read(wrp, PWRAP_DEW_WRITE_TEST, &rdata) ||
+ (rdata != PWRAP_DEW_WRITE_TEST_VAL)) {
+ dev_err(wrp->dev, "rdata=0x%04X\n", rdata);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int pwrap_init(struct pmic_wrapper *wrp)
+{
+ int ret;
+ u32 rdata;
+
+ reset_control_reset(wrp->rstc);
+ if (wrp->rstc_bridge)
+ reset_control_reset(wrp->rstc_bridge);
+
+ if (pwrap_is_mt8173(wrp)) {
+ /* Enable DCM */
+ pwrap_writel(wrp, 3, PWRAP_DCM_EN);
+ pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
+ }
+
+ /* Reset SPI slave */
+ ret = pwrap_reset_spislave(wrp);
+ if (ret)
+ return ret;
+
+ pwrap_writel(wrp, 1, PWRAP_WRAP_EN);
+
+ pwrap_writel(wrp, wrp->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_EN);
+
+ ret = pwrap_init_reg_clock(wrp);
+ if (ret)
+ return ret;
+
+ /* Setup serial input delay */
+ ret = pwrap_init_sidly(wrp);
+ if (ret)
+ return ret;
+
+ /* Enable dual IO mode */
+ pwrap_write(wrp, PWRAP_DEW_DIO_EN, 1);
+
+ /* Check IDLE & INIT_DONE in advance */
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
+ if (ret) {
+ dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_DIO_EN);
+
+ /* Read Test */
+ pwrap_read(wrp, PWRAP_DEW_READ_TEST, &rdata);
+ if (rdata != PWRAP_DEW_READ_TEST_VAL) {
+ dev_err(wrp->dev, "Read test failed after switch to DIO mode: 0x%04x != 0x%04x\n",
+ PWRAP_DEW_READ_TEST_VAL, rdata);
+ return -EFAULT;
+ }
+
+ /* Enable encryption */
+ ret = pwrap_init_cipher(wrp);
+ if (ret)
+ return ret;
+
+ /* Signature checking - using CRC */
+ if (pwrap_write(wrp, PWRAP_DEW_CRC_EN, 0x1))
+ return -EFAULT;
+
+ pwrap_writel(wrp, 0x1, PWRAP_CRC_EN);
+ pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE);
+ pwrap_writel(wrp, PWRAP_DEW_CRC_VAL, PWRAP_SIG_ADR);
+ pwrap_writel(wrp, wrp->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+
+ if (pwrap_is_mt8135(wrp))
+ pwrap_writel(wrp, 0x7, PWRAP_RRARB_EN);
+
+ pwrap_writel(wrp, 0x1, PWRAP_WACS0_EN);
+ pwrap_writel(wrp, 0x1, PWRAP_WACS1_EN);
+ pwrap_writel(wrp, 0x1, PWRAP_WACS2_EN);
+ pwrap_writel(wrp, 0x5, PWRAP_STAUPD_PRD);
+ pwrap_writel(wrp, 0xff, PWRAP_STAUPD_GRPEN);
+ pwrap_writel(wrp, 0xf, PWRAP_WDT_UNIT);
+ pwrap_writel(wrp, 0xffffffff, PWRAP_WDT_SRC_EN);
+ pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
+ pwrap_writel(wrp, ~((1 << 31) | (1 << 1)), PWRAP_INT_EN);
+
+ if (pwrap_is_mt8135(wrp)) {
+ /* enable pwrap events and pwrap bridge in AP side */
+ pwrap_writel(wrp, 0x1, PWRAP_EVENT_IN_EN);
+ pwrap_writel(wrp, 0xffff, PWRAP_EVENT_DST_EN);
+ writel(0x7f, wrp->bridge_base + PWRAP_MT8135_BRIDGE_IORD_ARB_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS3_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS4_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_UNIT);
+ writel(0xffff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_SRC_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_TIMER_EN);
+ writel(0x7ff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INT_EN);
+
+ /* enable PMIC event out and sources */
+ if (pwrap_write(wrp, PWRAP_DEW_EVENT_OUT_EN, 0x1) ||
+ pwrap_write(wrp, PWRAP_DEW_EVENT_SRC_EN, 0xffff)) {
+ dev_err(wrp->dev, "enable dewrap fail\n");
+ return -EFAULT;
+ }
+ } else {
+ /* PMIC_DEWRAP enables */
+ if (pwrap_write(wrp, PWRAP_DEW_EVENT_OUT_EN, 0x1) ||
+ pwrap_write(wrp, PWRAP_DEW_EVENT_SRC_EN, 0xffff)) {
+ dev_err(wrp->dev, "enable dewrap fail\n");
+ return -EFAULT;
+ }
+ }
+
+ /* Setup the init done registers */
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE2);
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE0);
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE1);
+
+ if (pwrap_is_mt8135(wrp)) {
+ writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE3);
+ writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE4);
+ }
+
+ return 0;
+}
+
+static irqreturn_t pwrap_interrupt(int irqno, void *dev_id)
+{
+ u32 rdata;
+ struct pmic_wrapper *wrp = dev_id;
+
+ rdata = pwrap_readl(wrp, PWRAP_INT_FLG);
+
+ dev_err(wrp->dev, "unexpected interrupt int=0x%x\n", rdata);
+
+ pwrap_writel(wrp, 0xffffffff, PWRAP_INT_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config pwrap_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_read = pwrap_regmap_read,
+ .reg_write = pwrap_regmap_write,
+ .max_register = 0xffff,
+};
+
+static struct of_device_id of_pwrap_match_tbl[] = {
+ {
+ .compatible = "mediatek,mt8135-pwrap",
+ .data = &pwrap_mt8135,
+ }, {
+ .compatible = "mediatek,mt8173-pwrap",
+ .data = &pwrap_mt8173,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl);
+
+static int pwrap_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ struct pmic_wrapper *wrp;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(of_pwrap_match_tbl, &pdev->dev);
+ const struct pmic_wrapper_type *type;
+ struct resource *res;
+
+ wrp = devm_kzalloc(&pdev->dev, sizeof(*wrp), GFP_KERNEL);
+ if (!wrp)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, wrp);
+
+ type = of_id->data;
+ wrp->regs = type->regs;
+ wrp->type = type->type;
+ wrp->arb_en_all = type->arb_en_all;
+ wrp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap");
+ wrp->base = devm_ioremap_resource(wrp->dev, res);
+ if (IS_ERR(wrp->base))
+ return PTR_ERR(wrp->base);
+
+ wrp->rstc = devm_reset_control_get(wrp->dev, "pwrap");
+ if (IS_ERR(wrp->rstc)) {
+ ret = PTR_ERR(wrp->rstc);
+ dev_dbg(wrp->dev, "cannot get pwrap reset: %d\n", ret);
+ return ret;
+ }
+
+ if (pwrap_is_mt8135(wrp)) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pwrap-bridge");
+ wrp->bridge_base = devm_ioremap_resource(wrp->dev, res);
+ if (IS_ERR(wrp->bridge_base))
+ return PTR_ERR(wrp->bridge_base);
+
+ wrp->rstc_bridge = devm_reset_control_get(wrp->dev, "pwrap-bridge");
+ if (IS_ERR(wrp->rstc_bridge)) {
+ ret = PTR_ERR(wrp->rstc_bridge);
+ dev_dbg(wrp->dev, "cannot get pwrap-bridge reset: %d\n", ret);
+ return ret;
+ }
+ }
+
+ wrp->clk_spi = devm_clk_get(wrp->dev, "spi");
+ if (IS_ERR(wrp->clk_spi)) {
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n", PTR_ERR(wrp->clk_spi));
+ return PTR_ERR(wrp->clk_spi);
+ }
+
+ wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap");
+ if (IS_ERR(wrp->clk_wrap)) {
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n", PTR_ERR(wrp->clk_wrap));
+ return PTR_ERR(wrp->clk_wrap);
+ }
+
+ ret = clk_prepare_enable(wrp->clk_spi);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(wrp->clk_wrap);
+ if (ret)
+ goto err_out1;
+
+ /* Enable internal dynamic clock */
+ pwrap_writel(wrp, 1, PWRAP_DCM_EN);
+ pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
+
+ /*
+ * The PMIC could already be initialized by the bootloader.
+ * Skip initialization here in this case.
+ */
+ if (!pwrap_readl(wrp, PWRAP_INIT_DONE2)) {
+ ret = pwrap_init(wrp);
+ if (ret) {
+ dev_dbg(wrp->dev, "init failed with %d\n", ret);
+ goto err_out2;
+ }
+ }
+
+ if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & PWRAP_STATE_INIT_DONE0)) {
+ dev_dbg(wrp->dev, "initialization isn't finished\n");
+ return -ENODEV;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH,
+ "mt-pmic-pwrap", wrp);
+ if (ret)
+ goto err_out2;
+
+ wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, &pwrap_regmap_config);
+ if (IS_ERR(wrp->regmap))
+ return PTR_ERR(wrp->regmap);
+
+ ret = of_platform_populate(np, NULL, NULL, wrp->dev);
+ if (ret) {
+ dev_dbg(wrp->dev, "failed to create child devices at %s\n",
+ np->full_name);
+ goto err_out2;
+ }
+
+ return 0;
+
+err_out2:
+ clk_disable_unprepare(wrp->clk_wrap);
+err_out1:
+ clk_disable_unprepare(wrp->clk_spi);
+
+ return ret;
+}
+
+static struct platform_driver pwrap_drv = {
+ .driver = {
+ .name = "mt-pmic-pwrap",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_pwrap_match_tbl),
+ },
+ .probe = pwrap_probe,
+};
+
+module_platform_driver(pwrap_drv);
+
+MODULE_AUTHOR("Flora Fu, MediaTek");
+MODULE_DESCRIPTION("MediaTek MT8135 PMIC Wrapper Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 7bd2c94f54a4..460b2dba109c 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -4,6 +4,7 @@
config QCOM_GSBI
tristate "QCOM General Serial Bus Interface"
depends on ARCH_QCOM
+ select MFD_SYSCON
help
Say y here to enable GSBI support. The GSBI provides control
functions for connecting the underlying serial UART, SPI, and I2C
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 729425ddfd3e..09c669e70d63 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -18,22 +18,129 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/soc/qcom,gsbi.h>
#define GSBI_CTRL_REG 0x0000
#define GSBI_PROTOCOL_SHIFT 4
+#define MAX_GSBI 12
+
+#define TCSR_ADM_CRCI_BASE 0x70
+
+struct crci_config {
+ u32 num_rows;
+ const u32 (*array)[MAX_GSBI];
+};
+
+static const u32 crci_ipq8064[][MAX_GSBI] = {
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+};
+
+static const struct crci_config config_ipq8064 = {
+ .num_rows = ARRAY_SIZE(crci_ipq8064),
+ .array = crci_ipq8064,
+};
+
+static const unsigned int crci_apq8064[][MAX_GSBI] = {
+ {
+ 0x001800, 0x006000, 0x000030, 0x0000c0,
+ 0x000300, 0x000400, 0x000000, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+ {
+ 0x000000, 0x000000, 0x000000, 0x000000,
+ 0x000000, 0x000020, 0x0000c0, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+};
+
+static const struct crci_config config_apq8064 = {
+ .num_rows = ARRAY_SIZE(crci_apq8064),
+ .array = crci_apq8064,
+};
+
+static const unsigned int crci_msm8960[][MAX_GSBI] = {
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000400, 0x000000, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+ {
+ 0x000000, 0x000000, 0x000000, 0x000000,
+ 0x000000, 0x000020, 0x0000c0, 0x000300,
+ 0x001800, 0x006000, 0x000000, 0x000000
+ },
+};
+
+static const struct crci_config config_msm8960 = {
+ .num_rows = ARRAY_SIZE(crci_msm8960),
+ .array = crci_msm8960,
+};
+
+static const unsigned int crci_msm8660[][MAX_GSBI] = {
+ { /* ADM 0 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 0 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 1 - A */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 1 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+};
+
+static const struct crci_config config_msm8660 = {
+ .num_rows = ARRAY_SIZE(crci_msm8660),
+ .array = crci_msm8660,
+};
struct gsbi_info {
struct clk *hclk;
u32 mode;
u32 crci;
+ struct regmap *tcsr;
+};
+
+static const struct of_device_id tcsr_dt_match[] = {
+ { .compatible = "qcom,tcsr-ipq8064", .data = &config_ipq8064},
+ { .compatible = "qcom,tcsr-apq8064", .data = &config_apq8064},
+ { .compatible = "qcom,tcsr-msm8960", .data = &config_msm8960},
+ { .compatible = "qcom,tcsr-msm8660", .data = &config_msm8660},
+ { },
};
static int gsbi_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ struct device_node *tcsr_node;
+ const struct of_device_id *match;
struct resource *res;
void __iomem *base;
struct gsbi_info *gsbi;
+ int i;
+ u32 mask, gsbi_num;
+ const struct crci_config *config = NULL;
gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL);
@@ -45,6 +152,32 @@ static int gsbi_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
+ /* get the tcsr node and setup the config and regmap */
+ gsbi->tcsr = syscon_regmap_lookup_by_phandle(node, "syscon-tcsr");
+
+ if (!IS_ERR(gsbi->tcsr)) {
+ tcsr_node = of_parse_phandle(node, "syscon-tcsr", 0);
+ if (tcsr_node) {
+ match = of_match_node(tcsr_dt_match, tcsr_node);
+ if (match)
+ config = match->data;
+ else
+ dev_warn(&pdev->dev, "no matching TCSR\n");
+
+ of_node_put(tcsr_node);
+ }
+ }
+
+ if (of_property_read_u32(node, "cell-index", &gsbi_num)) {
+ dev_err(&pdev->dev, "missing cell-index\n");
+ return -EINVAL;
+ }
+
+ if (gsbi_num < 1 || gsbi_num > MAX_GSBI) {
+ dev_err(&pdev->dev, "invalid cell-index\n");
+ return -EINVAL;
+ }
+
if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
dev_err(&pdev->dev, "missing mode configuration\n");
return -EINVAL;
@@ -64,6 +197,25 @@ static int gsbi_probe(struct platform_device *pdev)
writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci,
base + GSBI_CTRL_REG);
+ /*
+ * modify tcsr to reflect mode and ADM CRCI mux
+ * Each gsbi contains a pair of bits, one for RX and one for TX
+ * SPI mode requires both bits cleared, otherwise they are set
+ */
+ if (config) {
+ for (i = 0; i < config->num_rows; i++) {
+ mask = config->array[i][gsbi_num - 1];
+
+ if (gsbi->mode == GSBI_PROT_SPI)
+ regmap_update_bits(gsbi->tcsr,
+ TCSR_ADM_CRCI_BASE + 4 * i, mask, 0);
+ else
+ regmap_update_bits(gsbi->tcsr,
+ TCSR_ADM_CRCI_BASE + 4 * i, mask, mask);
+
+ }
+ }
+
/* make sure the gsbi control write is not reordered */
wmb();
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 198f96b7fb45..72b059081559 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -78,6 +78,7 @@ config SPI_ATMEL
config SPI_BCM2835
tristate "BCM2835 SPI controller"
depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on GPIOLIB
help
This selects a driver for the Broadcom BCM2835 SPI master.
@@ -302,7 +303,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
- depends on SOC_VF610 || COMPILE_TEST
+ depends on SOC_VF610 || SOC_LS1021A || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index f63864a893c5..37875cf942f7 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -164,13 +164,12 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
unsigned long xfer_time_us)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
- unsigned long timeout = jiffies +
- max(4 * xfer_time_us * HZ / 1000000, 2uL);
+ /* set timeout to 1 second of maximum polling */
+ unsigned long timeout = jiffies + HZ;
/* enable HW block without interrupts */
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
- /* set timeout to 4x the expected time, or 2 jiffies */
/* loop until finished the transfer */
while (bs->rx_len) {
/* read from fifo as much as possible */
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 5ef6638d5e8a..840a4984d365 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -180,7 +180,6 @@ int spi_bitbang_setup(struct spi_device *spi)
{
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
- int retval;
unsigned long flags;
bitbang = spi_master_get_devdata(spi->master);
@@ -197,9 +196,11 @@ int spi_bitbang_setup(struct spi_device *spi)
if (!cs->txrx_word)
return -EINVAL;
- retval = bitbang->setup_transfer(spi, NULL);
- if (retval < 0)
- return retval;
+ if (bitbang->setup_transfer) {
+ int retval = bitbang->setup_transfer(spi, NULL);
+ if (retval < 0)
+ return retval;
+ }
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
@@ -295,9 +296,11 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
/* init (-1) or override (1) transfer params */
if (do_setup != 0) {
- status = bitbang->setup_transfer(spi, t);
- if (status < 0)
- break;
+ if (bitbang->setup_transfer) {
+ status = bitbang->setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ }
if (do_setup == -1)
do_setup = 0;
}
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 9c46a3058743..896add8cfd3b 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -24,6 +24,7 @@
#include <linux/of_address.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
+#include <linux/platform_device.h>
#include "spi-fsl-cpm.h"
#include "spi-fsl-lib.h"
@@ -269,17 +270,6 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
if (mspi->flags & SPI_CPM2) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
out_be16(spi_base, pram_ofs);
- } else {
- struct spi_pram __iomem *pram = spi_base;
- u16 rpbase = in_be16(&pram->rpbase);
-
- /* Microcode relocation patch applied? */
- if (rpbase) {
- pram_ofs = rpbase;
- } else {
- pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- out_be16(spi_base, pram_ofs);
- }
}
iounmap(spi_base);
@@ -292,7 +282,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
- unsigned long pram_ofs;
unsigned long bds_ofs;
if (!(mspi->flags & SPI_CPM_MODE))
@@ -319,8 +308,26 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
}
}
- pram_ofs = fsl_spi_cpm_get_pram(mspi);
- if (IS_ERR_VALUE(pram_ofs)) {
+ if (mspi->flags & SPI_CPM1) {
+ struct resource *res;
+ void *pram;
+
+ res = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM, 1);
+ pram = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pram))
+ mspi->pram = NULL;
+ else
+ mspi->pram = pram;
+ } else {
+ unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
+
+ if (IS_ERR_VALUE(pram_ofs))
+ mspi->pram = NULL;
+ else
+ mspi->pram = cpm_muram_addr(pram_ofs);
+ }
+ if (mspi->pram == NULL) {
dev_err(dev, "can't allocate spi parameter ram\n");
goto err_pram;
}
@@ -346,8 +353,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
goto err_dummy_rx;
}
- mspi->pram = cpm_muram_addr(pram_ofs);
-
mspi->tx_bd = cpm_muram_addr(bds_ofs);
mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
@@ -375,7 +380,8 @@ err_dummy_rx:
err_dummy_tx:
cpm_muram_free(bds_ofs);
err_bds:
- cpm_muram_free(pram_ofs);
+ if (!(mspi->flags & SPI_CPM1))
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
err_pram:
fsl_spi_free_dummy_rx();
return -ENOMEM;
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index d0a73a09a9bd..80d245ac846f 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -359,14 +359,16 @@ static void fsl_espi_rw_trans(struct spi_message *m,
struct fsl_espi_transfer *trans, u8 *rx_buff)
{
struct fsl_espi_transfer *espi_trans = trans;
- unsigned int n_tx = espi_trans->n_tx;
- unsigned int n_rx = espi_trans->n_rx;
+ unsigned int total_len = espi_trans->len;
struct spi_transfer *t;
u8 *local_buf;
u8 *rx_buf = rx_buff;
unsigned int trans_len;
unsigned int addr;
- int i, pos, loop;
+ unsigned int tx_only;
+ unsigned int rx_pos = 0;
+ unsigned int pos;
+ int i, loop;
local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
if (!local_buf) {
@@ -374,36 +376,48 @@ static void fsl_espi_rw_trans(struct spi_message *m,
return;
}
- for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) {
- trans_len = n_rx - pos;
- if (trans_len > SPCOM_TRANLEN_MAX - n_tx)
- trans_len = SPCOM_TRANLEN_MAX - n_tx;
+ for (pos = 0, loop = 0; pos < total_len; pos += trans_len, loop++) {
+ trans_len = total_len - pos;
i = 0;
+ tx_only = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf) {
memcpy(local_buf + i, t->tx_buf, t->len);
i += t->len;
+ if (!t->rx_buf)
+ tx_only += t->len;
}
}
+ /* Add additional TX bytes to compensate SPCOM_TRANLEN_MAX */
+ if (loop > 0)
+ trans_len += tx_only;
+
+ if (trans_len > SPCOM_TRANLEN_MAX)
+ trans_len = SPCOM_TRANLEN_MAX;
+
+ /* Update device offset */
if (pos > 0) {
addr = fsl_espi_cmd2addr(local_buf);
- addr += pos;
+ addr += rx_pos;
fsl_espi_addr2cmd(addr, local_buf);
}
- espi_trans->n_tx = n_tx;
- espi_trans->n_rx = trans_len;
- espi_trans->len = trans_len + n_tx;
+ espi_trans->len = trans_len;
espi_trans->tx_buf = local_buf;
espi_trans->rx_buf = local_buf;
fsl_espi_do_trans(m, espi_trans);
- memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
+ /* If there is at least one RX byte then copy it to rx_buf */
+ if (tx_only < SPCOM_TRANLEN_MAX)
+ memcpy(rx_buf + rx_pos, espi_trans->rx_buf + tx_only,
+ trans_len - tx_only);
+
+ rx_pos += trans_len - tx_only;
if (loop > 0)
- espi_trans->actual_length += espi_trans->len - n_tx;
+ espi_trans->actual_length += espi_trans->len - tx_only;
else
espi_trans->actual_length += espi_trans->len;
}
@@ -418,6 +432,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
u8 *rx_buf = NULL;
unsigned int n_tx = 0;
unsigned int n_rx = 0;
+ unsigned int xfer_len = 0;
struct fsl_espi_transfer espi_trans;
list_for_each_entry(t, &m->transfers, transfer_list) {
@@ -427,11 +442,13 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
n_rx += t->len;
rx_buf = t->rx_buf;
}
+ if ((t->tx_buf) || (t->rx_buf))
+ xfer_len += t->len;
}
espi_trans.n_tx = n_tx;
espi_trans.n_rx = n_rx;
- espi_trans.len = n_tx + n_rx;
+ espi_trans.len = xfer_len;
espi_trans.actual_length = 0;
espi_trans.status = 0;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 4df8942058de..d1a5b9fc3eba 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1210,6 +1210,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
struct spi_transfer *t;
+ int status;
spi = m->spi;
mcspi = spi_master_get_devdata(master);
@@ -1229,7 +1230,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
tx_buf ? "tx" : "",
rx_buf ? "rx" : "",
t->bits_per_word);
- return -EINVAL;
+ status = -EINVAL;
+ goto out;
}
if (m->is_dma_mapped || len < DMA_MIN_BYTES)
@@ -1241,7 +1243,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
'T', len);
- return -EINVAL;
+ status = -EINVAL;
+ goto out;
}
}
if (mcspi_dma->dma_rx && rx_buf != NULL) {
@@ -1253,14 +1256,19 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
if (tx_buf != NULL)
dma_unmap_single(mcspi->dev, t->tx_dma,
len, DMA_TO_DEVICE);
- return -EINVAL;
+ status = -EINVAL;
+ goto out;
}
}
}
omap2_mcspi_work(mcspi, m);
+ /* spi_finalize_current_message() changes the status inside the
+ * spi_message, save the status here. */
+ status = m->status;
+out:
spi_finalize_current_message(master);
- return 0;
+ return status;
}
static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 186924aa4740..f6bac9e77d06 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1023,7 +1023,6 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
}
memset(&cfg, 0, sizeof(cfg));
- cfg.slave_id = id;
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index e57eec0b2f46..bcc7c635d8e7 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1030,7 +1030,6 @@ static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
}
memset(&cfg, 0, sizeof(cfg));
- cfg.slave_id = id;
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index d5d7d2235163..50910d85df5a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -583,6 +583,15 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
rx_dev = master->dma_rx->device->dev;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /*
+ * Restore the original value of tx_buf or rx_buf if they are
+ * NULL.
+ */
+ if (xfer->tx_buf == master->dummy_tx)
+ xfer->tx_buf = NULL;
+ if (xfer->rx_buf == master->dummy_rx)
+ xfer->rx_buf = NULL;
+
if (!master->can_dma(master, msg->spi, xfer))
continue;
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index bf1295e19f89..c8d99563d245 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -12,7 +12,6 @@ if SPMI
config SPMI_MSM_PMIC_ARB
tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
- depends on ARM
depends on IRQ_DOMAIN
depends on ARCH_QCOM || COMPILE_TEST
default ARCH_QCOM
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 20559ab3466d..d7119db49cfe 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,22 +26,18 @@
/* PMIC Arbiter configuration registers */
#define PMIC_ARB_VERSION 0x0000
+#define PMIC_ARB_VERSION_V2_MIN 0x20010000
#define PMIC_ARB_INT_EN 0x0004
-/* PMIC Arbiter channel registers */
-#define PMIC_ARB_CMD(N) (0x0800 + (0x80 * (N)))
-#define PMIC_ARB_CONFIG(N) (0x0804 + (0x80 * (N)))
-#define PMIC_ARB_STATUS(N) (0x0808 + (0x80 * (N)))
-#define PMIC_ARB_WDATA0(N) (0x0810 + (0x80 * (N)))
-#define PMIC_ARB_WDATA1(N) (0x0814 + (0x80 * (N)))
-#define PMIC_ARB_RDATA0(N) (0x0818 + (0x80 * (N)))
-#define PMIC_ARB_RDATA1(N) (0x081C + (0x80 * (N)))
-
-/* Interrupt Controller */
-#define SPMI_PIC_OWNER_ACC_STATUS(M, N) (0x0000 + ((32 * (M)) + (4 * (N))))
-#define SPMI_PIC_ACC_ENABLE(N) (0x0200 + (4 * (N)))
-#define SPMI_PIC_IRQ_STATUS(N) (0x0600 + (4 * (N)))
-#define SPMI_PIC_IRQ_CLEAR(N) (0x0A00 + (4 * (N)))
+/* PMIC Arbiter channel registers offsets */
+#define PMIC_ARB_CMD 0x00
+#define PMIC_ARB_CONFIG 0x04
+#define PMIC_ARB_STATUS 0x08
+#define PMIC_ARB_WDATA0 0x10
+#define PMIC_ARB_WDATA1 0x14
+#define PMIC_ARB_RDATA0 0x18
+#define PMIC_ARB_RDATA1 0x1C
+#define PMIC_ARB_REG_CHNL(N) (0x800 + 0x4 * (N))
/* Mapping Table */
#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
@@ -52,6 +49,7 @@
#define SPMI_MAPPING_TABLE_LEN 255
#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
+#define PPID_TO_CHAN_TABLE_SZ BIT(12) /* PPID is 12bit chan is 1byte*/
/* Ownership Table */
#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
@@ -88,6 +86,7 @@ enum pmic_arb_cmd_op_code {
/* Maximum number of support PMIC peripherals */
#define PMIC_ARB_MAX_PERIPHS 256
+#define PMIC_ARB_MAX_CHNL 128
#define PMIC_ARB_PERIPH_ID_VALID (1 << 15)
#define PMIC_ARB_TIMEOUT_US 100
#define PMIC_ARB_MAX_TRANS_BYTES (8)
@@ -98,14 +97,17 @@ enum pmic_arb_cmd_op_code {
/* interrupt enable bit */
#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
+struct pmic_arb_ver_ops;
+
/**
* spmi_pmic_arb_dev - SPMI PMIC Arbiter object
*
- * @base: address of the PMIC Arbiter core registers.
+ * @rd_base: on v1 "core", on v2 "observer" register base off DT.
+ * @wr_base: on v1 "core", on v2 "chnls" register base off DT.
* @intr: address of the SPMI interrupt control registers.
* @cnfg: address of the PMIC Arbiter configuration registers.
* @lock: lock to synchronize accesses.
- * @channel: which channel to use for accesses.
+ * @channel: execution environment channel to use for accesses.
* @irq: PMIC ARB interrupt.
* @ee: the current Execution Environment
* @min_apid: minimum APID (used for bounding IRQ search)
@@ -113,10 +115,14 @@ enum pmic_arb_cmd_op_code {
* @mapping_table: in-memory copy of PPID -> APID mapping table.
* @domain: irq domain object for PMIC IRQ domain
* @spmic: SPMI controller object
- * @apid_to_ppid: cached mapping from APID to PPID
+ * @apid_to_ppid: in-memory copy of APID -> PPID mapping table.
+ * @ver_ops: version dependent operations.
+ * @ppid_to_chan in-memory copy of PPID -> channel (APID) mapping table.
+ * v2 only.
*/
struct spmi_pmic_arb_dev {
- void __iomem *base;
+ void __iomem *rd_base;
+ void __iomem *wr_base;
void __iomem *intr;
void __iomem *cnfg;
raw_spinlock_t lock;
@@ -129,17 +135,54 @@ struct spmi_pmic_arb_dev {
struct irq_domain *domain;
struct spmi_controller *spmic;
u16 apid_to_ppid[256];
+ const struct pmic_arb_ver_ops *ver_ops;
+ u8 *ppid_to_chan;
+};
+
+/**
+ * pmic_arb_ver: version dependent functionality.
+ *
+ * @non_data_cmd: on v1 issues an spmi non-data command.
+ * on v2 no HW support, returns -EOPNOTSUPP.
+ * @offset: on v1 offset of per-ee channel.
+ * on v2 offset of per-ee and per-ppid channel.
+ * @fmt_cmd: formats a GENI/SPMI command.
+ * @owner_acc_status: on v1 offset of PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn
+ * on v2 offset of SPMI_PIC_OWNERm_ACC_STATUSn.
+ * @acc_enable: on v1 offset of PMIC_ARB_SPMI_PIC_ACC_ENABLEn
+ * on v2 offset of SPMI_PIC_ACC_ENABLEn.
+ * @irq_status: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_STATUSn
+ * on v2 offset of SPMI_PIC_IRQ_STATUSn.
+ * @irq_clear: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_CLEARn
+ * on v2 offset of SPMI_PIC_IRQ_CLEARn.
+ */
+struct pmic_arb_ver_ops {
+ /* spmi commands (read_cmd, write_cmd, cmd) functionality */
+ u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr);
+ u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
+ int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
+ /* Interrupts controller functionality (offset of PIC registers) */
+ u32 (*owner_acc_status)(u8 m, u8 n);
+ u32 (*acc_enable)(u8 n);
+ u32 (*irq_status)(u8 n);
+ u32 (*irq_clear)(u8 n);
};
static inline u32 pmic_arb_base_read(struct spmi_pmic_arb_dev *dev, u32 offset)
{
- return readl_relaxed(dev->base + offset);
+ return readl_relaxed(dev->rd_base + offset);
}
static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
u32 offset, u32 val)
{
- writel_relaxed(val, dev->base + offset);
+ writel_relaxed(val, dev->wr_base + offset);
+}
+
+static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev,
+ u32 offset, u32 val)
+{
+ writel_relaxed(val, dev->rd_base + offset);
}
/**
@@ -168,15 +211,16 @@ pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
pmic_arb_base_write(dev, reg, data);
}
-static int pmic_arb_wait_for_done(struct spmi_controller *ctrl)
+static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
+ void __iomem *base, u8 sid, u16 addr)
{
struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
u32 status = 0;
u32 timeout = PMIC_ARB_TIMEOUT_US;
- u32 offset = PMIC_ARB_STATUS(dev->channel);
+ u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS;
while (timeout--) {
- status = pmic_arb_base_read(dev, offset);
+ status = readl_relaxed(base + offset);
if (status & PMIC_ARB_STATUS_DONE) {
if (status & PMIC_ARB_STATUS_DENIED) {
@@ -211,28 +255,45 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl)
return -ETIMEDOUT;
}
-/* Non-data command */
-static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
+static int
+pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u32 cmd;
int rc;
-
- /* Check for valid non-data command */
- if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
- return -EINVAL;
+ u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0);
cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
raw_spin_lock_irqsave(&pmic_arb->lock, flags);
- pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
- rc = pmic_arb_wait_for_done(ctrl);
+ pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
+ rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0);
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
}
+static int
+pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid)
+{
+ return -EOPNOTSUPP;
+}
+
+/* Non-data command */
+static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
+{
+ struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+
+ dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
+
+ /* Check for valid non-data command */
+ if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
+ return -EINVAL;
+
+ return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
+}
+
static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u16 addr, u8 *buf, size_t len)
{
@@ -241,10 +302,11 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u8 bc = len - 1;
u32 cmd;
int rc;
+ u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(&ctrl->dev,
- "pmic-arb supports 1..%d bytes per trans, but %d requested",
+ "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
@@ -259,20 +321,20 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
+ cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
raw_spin_lock_irqsave(&pmic_arb->lock, flags);
- pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
- rc = pmic_arb_wait_for_done(ctrl);
+ pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
+ rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr);
if (rc)
goto done;
- pa_read_data(pmic_arb, buf, PMIC_ARB_RDATA0(pmic_arb->channel),
+ pa_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
min_t(u8, bc, 3));
if (bc > 3)
pa_read_data(pmic_arb, buf + 4,
- PMIC_ARB_RDATA1(pmic_arb->channel), bc - 4);
+ offset + PMIC_ARB_RDATA1, bc - 4);
done:
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
@@ -287,10 +349,11 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u8 bc = len - 1;
u32 cmd;
int rc;
+ u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(&ctrl->dev,
- "pmic-arb supports 1..%d bytes per trans, but:%d requested",
+ "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
@@ -307,19 +370,19 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
+ cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
/* Write data to FIFOs */
raw_spin_lock_irqsave(&pmic_arb->lock, flags);
- pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel)
- , min_t(u8, bc, 3));
+ pa_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
+ min_t(u8, bc, 3));
if (bc > 3)
pa_write_data(pmic_arb, buf + 4,
- PMIC_ARB_WDATA1(pmic_arb->channel), bc - 4);
+ offset + PMIC_ARB_WDATA1, bc - 4);
/* Start the transaction */
- pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
- rc = pmic_arb_wait_for_done(ctrl);
+ pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
+ rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr);
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
@@ -376,7 +439,7 @@ static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
u32 status;
int id;
- status = readl_relaxed(pa->intr + SPMI_PIC_IRQ_STATUS(apid));
+ status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
while (status) {
id = ffs(status) - 1;
status &= ~(1 << id);
@@ -402,7 +465,7 @@ static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
for (i = first; i <= last; ++i) {
status = readl_relaxed(intr +
- SPMI_PIC_OWNER_ACC_STATUS(pa->ee, i));
+ pa->ver_ops->owner_acc_status(pa->ee, i));
while (status) {
id = ffs(status) - 1;
status &= ~(1 << id);
@@ -422,7 +485,7 @@ static void qpnpint_irq_ack(struct irq_data *d)
u8 data;
raw_spin_lock_irqsave(&pa->lock, flags);
- writel_relaxed(1 << irq, pa->intr + SPMI_PIC_IRQ_CLEAR(apid));
+ writel_relaxed(1 << irq, pa->intr + pa->ver_ops->irq_clear(apid));
raw_spin_unlock_irqrestore(&pa->lock, flags);
data = 1 << irq;
@@ -439,10 +502,11 @@ static void qpnpint_irq_mask(struct irq_data *d)
u8 data;
raw_spin_lock_irqsave(&pa->lock, flags);
- status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
if (status & SPMI_PIC_ACC_ENABLE_BIT) {
status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
- writel_relaxed(status, pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ writel_relaxed(status, pa->intr +
+ pa->ver_ops->acc_enable(apid));
}
raw_spin_unlock_irqrestore(&pa->lock, flags);
@@ -460,10 +524,10 @@ static void qpnpint_irq_unmask(struct irq_data *d)
u8 data;
raw_spin_lock_irqsave(&pa->lock, flags);
- status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT,
- pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ pa->intr + pa->ver_ops->acc_enable(apid));
}
raw_spin_unlock_irqrestore(&pa->lock, flags);
@@ -624,6 +688,91 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
return 0;
}
+/* v1 offset per ee */
+static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+{
+ return 0x800 + 0x80 * pa->channel;
+}
+
+/* v2 offset per ppid (chan) and per ee */
+static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+{
+ u16 ppid = (sid << 8) | (addr >> 8);
+ u8 chan = pa->ppid_to_chan[ppid];
+
+ return 0x1000 * pa->ee + 0x8000 * chan;
+}
+
+static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
+{
+ return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
+}
+
+static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
+{
+ return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
+}
+
+static u32 pmic_arb_owner_acc_status_v1(u8 m, u8 n)
+{
+ return 0x20 * m + 0x4 * n;
+}
+
+static u32 pmic_arb_owner_acc_status_v2(u8 m, u8 n)
+{
+ return 0x100000 + 0x1000 * m + 0x4 * n;
+}
+
+static u32 pmic_arb_acc_enable_v1(u8 n)
+{
+ return 0x200 + 0x4 * n;
+}
+
+static u32 pmic_arb_acc_enable_v2(u8 n)
+{
+ return 0x1000 * n;
+}
+
+static u32 pmic_arb_irq_status_v1(u8 n)
+{
+ return 0x600 + 0x4 * n;
+}
+
+static u32 pmic_arb_irq_status_v2(u8 n)
+{
+ return 0x4 + 0x1000 * n;
+}
+
+static u32 pmic_arb_irq_clear_v1(u8 n)
+{
+ return 0xA00 + 0x4 * n;
+}
+
+static u32 pmic_arb_irq_clear_v2(u8 n)
+{
+ return 0x8 + 0x1000 * n;
+}
+
+static const struct pmic_arb_ver_ops pmic_arb_v1 = {
+ .non_data_cmd = pmic_arb_non_data_cmd_v1,
+ .offset = pmic_arb_offset_v1,
+ .fmt_cmd = pmic_arb_fmt_cmd_v1,
+ .owner_acc_status = pmic_arb_owner_acc_status_v1,
+ .acc_enable = pmic_arb_acc_enable_v1,
+ .irq_status = pmic_arb_irq_status_v1,
+ .irq_clear = pmic_arb_irq_clear_v1,
+};
+
+static const struct pmic_arb_ver_ops pmic_arb_v2 = {
+ .non_data_cmd = pmic_arb_non_data_cmd_v2,
+ .offset = pmic_arb_offset_v2,
+ .fmt_cmd = pmic_arb_fmt_cmd_v2,
+ .owner_acc_status = pmic_arb_owner_acc_status_v2,
+ .acc_enable = pmic_arb_acc_enable_v2,
+ .irq_status = pmic_arb_irq_status_v2,
+ .irq_clear = pmic_arb_irq_clear_v2,
+};
+
static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
.map = qpnpint_irq_domain_map,
.xlate = qpnpint_irq_domain_dt_translate,
@@ -634,8 +783,10 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
struct spmi_pmic_arb_dev *pa;
struct spmi_controller *ctrl;
struct resource *res;
- u32 channel, ee;
+ void __iomem *core;
+ u32 channel, ee, hw_ver;
int err, i;
+ bool is_v1;
ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
if (!ctrl)
@@ -645,12 +796,65 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
pa->spmic = ctrl;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- pa->base = devm_ioremap_resource(&ctrl->dev, res);
- if (IS_ERR(pa->base)) {
- err = PTR_ERR(pa->base);
+ core = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(core)) {
+ err = PTR_ERR(core);
goto err_put_ctrl;
}
+ hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
+ is_v1 = (hw_ver < PMIC_ARB_VERSION_V2_MIN);
+
+ dev_info(&ctrl->dev, "PMIC Arb Version-%d (0x%x)\n", (is_v1 ? 1 : 2),
+ hw_ver);
+
+ if (is_v1) {
+ pa->ver_ops = &pmic_arb_v1;
+ pa->wr_base = core;
+ pa->rd_base = core;
+ } else {
+ u8 chan;
+ u16 ppid;
+ u32 regval;
+
+ pa->ver_ops = &pmic_arb_v2;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "obsrvr");
+ pa->rd_base = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->rd_base)) {
+ err = PTR_ERR(pa->rd_base);
+ goto err_put_ctrl;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "chnls");
+ pa->wr_base = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->wr_base)) {
+ err = PTR_ERR(pa->wr_base);
+ goto err_put_ctrl;
+ }
+
+ pa->ppid_to_chan = devm_kzalloc(&ctrl->dev,
+ PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL);
+ if (!pa->ppid_to_chan) {
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
+ /*
+ * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
+ * ppid_to_chan is an in-memory invert of that table.
+ */
+ for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) {
+ regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan));
+ if (!regval)
+ continue;
+
+ ppid = (regval >> 8) & 0xFFF;
+ pa->ppid_to_chan[ppid] = chan;
+ }
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
pa->intr = devm_ioremap_resource(&ctrl->dev, res);
if (IS_ERR(pa->intr)) {
@@ -731,9 +935,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
if (err)
goto err_domain_remove;
- dev_dbg(&ctrl->dev, "PMIC Arb Version 0x%x\n",
- pmic_arb_base_read(pa, PMIC_ARB_VERSION));
-
return 0;
err_domain_remove:
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 1d92f5103ebf..94938436aef9 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -316,11 +317,6 @@ static int spmi_drv_probe(struct device *dev)
struct spmi_device *sdev = to_spmi_device(dev);
int err;
- /* Ensure the slave is in ACTIVE state */
- err = spmi_command_wakeup(sdev);
- if (err)
- goto fail_wakeup;
-
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -335,7 +331,6 @@ fail_probe:
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
-fail_wakeup:
return err;
}
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 75b3603906c1..f0d22cdb51cd 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -130,6 +130,7 @@ config SSB_DRIVER_MIPS
bool "SSB Broadcom MIPS core driver"
depends on SSB && MIPS
select SSB_SERIAL
+ select SSB_SFLASH
help
Driver for the Sonics Silicon Backplane attached
Broadcom MIPS core.
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 1173a091b402..09428412139e 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#ifdef CONFIG_BCM47XX
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
#endif
#include "ssb_private.h"
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 7b986f9f213f..f87efef42252 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -16,7 +16,7 @@
#include <linux/serial_reg.h>
#include <linux/time.h>
#ifdef CONFIG_BCM47XX
-#include <bcm47xx_nvram.h>
+#include <linux/bcm47xx_nvram.h>
#endif
#include "ssb_private.h"
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index d75b72ba2672..15a7ee3859dd 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -357,6 +357,15 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
pcicore_write32(pc, SSB_PCICORE_SBTOPCI2,
SSB_PCICORE_SBTOPCI_MEM | SSB_PCI_DMA);
+ /*
+ * Accessing PCI config without a proper delay after devices reset (not
+ * GPIO reset) was causing reboots on WRT300N v1.0.
+ * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it
+ * completely. Flushing all writes was also tested but with no luck.
+ */
+ if (pc->dev->bus->chip_id == 0x4704)
+ usleep_range(1000, 2000);
+
/* Enable PCI bridge BAR0 prefetch and burst */
val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
ssb_extpci_write_config(pc, 0, 0, 0, PCI_COMMAND, &val, 2);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 1e180c400f17..a48a7439a206 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -1135,6 +1135,8 @@ static u32 ssb_tmslow_reject_bitmask(struct ssb_device *dev)
case SSB_IDLOW_SSBREV_25: /* TODO - find the proper REJECT bit */
case SSB_IDLOW_SSBREV_27: /* same here */
return SSB_TMSLOW_REJECT; /* this is a guess */
+ case SSB_IDLOW_SSBREV:
+ break;
default:
WARN(1, KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev);
}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index d140b733940c..c5c037ccf32c 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -310,7 +310,7 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
* be destroyed until all references to the file are dropped and
* ashmem_release is called.
*/
- ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ ret = __vfs_read(asma->file, buf, len, pos);
if (ret >= 0) {
/** Update backing file pos, since f_ops->read() doesn't */
asma->file->f_pos = *pos;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 0e3d8c7add24..b0b96ab31954 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1106,6 +1106,7 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
struct ion_buffer *buffer;
struct dma_buf *dmabuf;
bool valid_handle;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
@@ -1118,8 +1119,12 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
ion_buffer_get(buffer);
mutex_unlock(&client->lock);
- dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR,
- NULL);
+ exp_info.ops = &dma_buf_ops;
+ exp_info.size = buffer->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = buffer;
+
+ dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ion_buffer_put(buffer);
return dmabuf;
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index ad35ed6e93f0..304ebff119ee 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -108,24 +108,16 @@ static int serial2002_tty_write(struct file *f, unsigned char *buf, int count)
{
const char __user *p = (__force const char __user *)buf;
int result;
+ loff_t offset = 0;
mm_segment_t oldfs;
oldfs = get_fs();
set_fs(KERNEL_DS);
- f->f_pos = 0;
- result = f->f_op->write(f, p, count, &f->f_pos);
+ result = __vfs_write(f, p, count, &offset);
set_fs(oldfs);
return result;
}
-static int serial2002_tty_readb(struct file *f, unsigned char *buf)
-{
- char __user *p = (__force char __user *)buf;
-
- f->f_pos = 0;
- return f->f_op->read(f, p, 1, &f->f_pos);
-}
-
static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
{
struct poll_wqueues table;
@@ -161,13 +153,15 @@ static int serial2002_tty_read(struct file *f, int timeout)
result = -1;
if (!IS_ERR(f)) {
mm_segment_t oldfs;
+ char __user *p = (__force char __user *)&ch;
+ loff_t offset = 0;
oldfs = get_fs();
set_fs(KERNEL_DS);
if (f->f_op->poll) {
serial2002_tty_read_poll_wait(f, timeout);
- if (serial2002_tty_readb(f, &ch) == 1)
+ if (__vfs_read(f, p, 1, &offset) == 1)
result = ch;
} else {
/* Device does not support poll, busy wait */
@@ -178,7 +172,7 @@ static int serial2002_tty_read(struct file *f, int timeout)
if (retries >= timeout)
break;
- if (serial2002_tty_readb(f, &ch) == 1) {
+ if (__vfs_read(f, p, 1, &offset) == 1) {
result = ch;
break;
}
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 8199b0a697bb..1cf24e4edf25 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -158,7 +158,7 @@ static int up_to_host(struct mux_rx *r)
unsigned int start_flag;
unsigned int payload_size;
unsigned short packet_type;
- int dummy_cnt;
+ int total_len;
u32 packet_size_sum = r->offset;
int index;
int ret = TO_HOST_INVALID_PACKET;
@@ -176,10 +176,10 @@ static int up_to_host(struct mux_rx *r)
break;
}
- dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
+ total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
if (len - packet_size_sum <
- MUX_HEADER_SIZE + payload_size + dummy_cnt) {
+ total_len) {
pr_err("invalid payload : %d %d %04x\n",
payload_size, len, packet_type);
break;
@@ -202,7 +202,7 @@ static int up_to_host(struct mux_rx *r)
break;
}
- packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
+ packet_size_sum += total_len;
if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
ret = r->callback(NULL,
0,
@@ -361,7 +361,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
struct mux_pkt_header *mux_header;
struct mux_tx *t = NULL;
static u32 seq_num = 1;
- int dummy_cnt;
int total_len;
int ret;
unsigned long flags;
@@ -374,9 +373,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
spin_lock_irqsave(&mux_dev->write_lock, flags);
- dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4);
-
- total_len = len + MUX_HEADER_SIZE + dummy_cnt;
+ total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
t = alloc_mux_tx(total_len);
if (!t) {
@@ -392,7 +389,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
memcpy(t->buf+MUX_HEADER_SIZE, data, len);
- memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
+ memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE -
+ len);
t->len = total_len;
t->callback = cb;
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 6725467ef4d0..62c7bba75274 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -10,6 +10,7 @@ config LUSTRE_FS
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ depends on MULTIUSER
help
This option enables Lustre file system client support. Choose Y
here if you want to access a Lustre file system cluster. To compile
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index a260e99a4447..d72605864b0a 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -55,7 +55,9 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
if (PagePrivate(page))
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
- cancel_dirty_page(page, PAGE_SIZE);
+ if (TestClearPageDirty(page))
+ account_page_cleaned(page, mapping);
+
ClearPageMappedToDisk(page);
ll_delete_from_page_cache(page);
}
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index fe1fd05423e9..5af01351306d 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -153,7 +153,7 @@ static int ll_ddelete(const struct dentry *de)
CDEBUG(D_DENTRY, "%s dentry %pd (%p, parent %p, inode %p) %s%s\n",
d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping",
- de, de, de->d_parent, de->d_inode,
+ de, de, de->d_parent, d_inode(de),
d_unhashed(de) ? "" : "hashed,",
list_empty(&de->d_subdirs) ? "" : "subdirs");
@@ -167,8 +167,8 @@ static int ll_ddelete(const struct dentry *de)
#if 0
/* if not ldlm lock for this inode, set i_nlink to 0 so that
* this inode can be recycled later b=20433 */
- if (de->d_inode && !find_cbdata(de->d_inode))
- clear_nlink(de->d_inode);
+ if (d_really_is_positive(de) && !find_cbdata(d_inode(de)))
+ clear_nlink(d_inode(de));
#endif
if (d_lustre_invalid((struct dentry *)de))
@@ -181,7 +181,7 @@ int ll_d_init(struct dentry *de)
LASSERT(de != NULL);
CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
- de, de, de->d_parent, de->d_inode,
+ de, de, de->d_parent, d_inode(de),
d_count(de));
if (de->d_fsdata == NULL) {
@@ -261,7 +261,7 @@ void ll_invalidate_aliases(struct inode *inode)
ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
dentry, dentry, dentry->d_parent,
- dentry->d_inode, dentry->d_flags);
+ d_inode(dentry), dentry->d_flags);
d_lustre_invalidate(dentry, 0);
}
@@ -309,7 +309,7 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
static int ll_revalidate_dentry(struct dentry *dentry,
unsigned int lookup_flags)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
/*
* if open&create is set, talk to MDS to make sure file is created if
@@ -329,7 +329,7 @@ static int ll_revalidate_dentry(struct dentry *dentry,
if (lookup_flags & LOOKUP_RCU)
return -ECHILD;
- do_statahead_enter(dir, &dentry, dentry->d_inode == NULL);
+ do_statahead_enter(dir, &dentry, d_inode(dentry) == NULL);
ll_statahead_mark(dir, dentry);
return 1;
}
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 85e74d18d1c7..4b44c634fcc3 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -388,7 +388,7 @@ int ll_file_release(struct inode *inode, struct file *file)
static int ll_intent_file_open(struct dentry *dentry, void *lmm,
int lmmsize, struct lookup_intent *itp)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct dentry *parent = dentry->d_parent;
const char *name = dentry->d_name.name;
@@ -413,7 +413,7 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
opc = LUSTRE_OPC_CREATE;
}
- op_data = ll_prep_md_op_data(NULL, parent->d_inode,
+ op_data = ll_prep_md_op_data(NULL, d_inode(parent),
inode, name, len,
O_RDWR, opc, NULL);
if (IS_ERR(op_data))
@@ -2896,7 +2896,7 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc)
static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ptlrpc_request *req = NULL;
struct obd_export *exp;
int rc = 0;
@@ -2948,12 +2948,12 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
do_lookup() -> ll_revalidate_it(). We cannot use d_drop
here to preserve get_cwd functionality on 2.6.
Bug 10503 */
- if (!dentry->d_inode->i_nlink)
+ if (!d_inode(dentry)->i_nlink)
d_lustre_invalidate(dentry, 0);
ll_lookup_finish_locks(&oit, inode);
- } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
- struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
+ } else if (!ll_have_md_lock(d_inode(dentry), &ibits, LCK_MINMODE)) {
+ struct ll_sb_info *sbi = ll_i2sbi(d_inode(dentry));
u64 valid = OBD_MD_FLGETATTR;
struct md_op_data *op_data;
int ealen = 0;
@@ -2991,7 +2991,7 @@ out:
static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int rc;
rc = __ll_inode_revalidate(dentry, ibits);
@@ -3019,7 +3019,7 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
{
- struct inode *inode = de->d_inode;
+ struct inode *inode = d_inode(de);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
int res = 0;
@@ -3135,9 +3135,7 @@ int ll_inode_permission(struct inode *inode, int mask)
/* -o localflock - only provides locally consistent flock locks */
struct file_operations ll_file_operations = {
- .read = new_sync_read,
.read_iter = ll_file_read_iter,
- .write = new_sync_write,
.write_iter = ll_file_write_iter,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
@@ -3150,9 +3148,7 @@ struct file_operations ll_file_operations = {
};
struct file_operations ll_file_operations_flock = {
- .read = new_sync_read,
.read_iter = ll_file_read_iter,
- .write = new_sync_write,
.write_iter = ll_file_write_iter,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
@@ -3168,9 +3164,7 @@ struct file_operations ll_file_operations_flock = {
/* These are for -o noflock - to return ENOSYS on flock calls */
struct file_operations ll_file_operations_noflock = {
- .read = new_sync_read,
.read_iter = ll_file_read_iter,
- .write = new_sync_write,
.write_iter = ll_file_write_iter,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 2af1d7286250..5f918e3c4683 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -938,10 +938,8 @@ struct ll_cl_context {
};
struct vvp_thread_info {
- struct iovec vti_local_iov;
struct vvp_io_args vti_args;
struct ra_io_arg vti_ria;
- struct kiocb vti_kiocb;
struct ll_cl_context vti_io_ctx;
};
@@ -1490,7 +1488,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
{
CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
dentry, dentry,
- dentry->d_parent, dentry->d_inode, d_count(dentry));
+ dentry->d_parent, d_inode(dentry), d_count(dentry));
spin_lock_nested(&dentry->d_lock,
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index bf1ec277a1dc..a27af7882170 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -1166,7 +1166,7 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
struct md_open_data **mod)
{
struct lustre_md md;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *request = NULL;
int rc, ia_valid;
@@ -1290,7 +1290,7 @@ static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
*/
int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ll_inode_info *lli = ll_i2info(inode);
struct md_op_data *op_data = NULL;
struct md_open_data *mod = NULL;
@@ -1465,7 +1465,7 @@ out:
int ll_setattr(struct dentry *de, struct iattr *attr)
{
- int mode = de->d_inode->i_mode;
+ int mode = d_inode(de)->i_mode;
if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
(ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 243a7840457f..db43b81386f7 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -230,11 +230,11 @@ static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
static int ll_get_name(struct dentry *dentry, char *name,
struct dentry *child)
{
- struct inode *dir = dentry->d_inode;
+ struct inode *dir = d_inode(dentry);
int rc;
struct ll_getname_data lgd = {
.lgd_name = name,
- .lgd_fid = ll_i2info(child->d_inode)->lli_fid,
+ .lgd_fid = ll_i2info(d_inode(child))->lli_fid,
.ctx.actor = ll_nfs_get_name_filldir,
};
@@ -282,7 +282,7 @@ static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
static struct dentry *ll_get_parent(struct dentry *dchild)
{
struct ptlrpc_request *req = NULL;
- struct inode *dir = dchild->d_inode;
+ struct inode *dir = d_inode(dchild);
struct ll_sb_info *sbi;
struct dentry *result = NULL;
struct mdt_body *body;
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 49f1cb067ea2..5a25dcd10126 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -155,7 +155,7 @@ static void ll_invalidate_negative_children(struct inode *dir)
list_for_each_entry_safe(child, tmp_subdir,
&dentry->d_subdirs,
d_child) {
- if (child->d_inode == NULL)
+ if (d_really_is_negative(child))
d_lustre_invalidate(child, 1);
}
}
@@ -392,7 +392,7 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
iput(inode);
CDEBUG(D_DENTRY,
"Reuse dentry %p inode %p refc %d flags %#x\n",
- new, new->d_inode, d_count(new), new->d_flags);
+ new, d_inode(new), d_count(new), new->d_flags);
return new;
}
}
@@ -401,7 +401,7 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
return ERR_PTR(rc);
d_add(de, inode);
CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
- de, de->d_inode, d_count(de), de->d_flags);
+ de, d_inode(de), d_count(de), de->d_flags);
return de;
}
@@ -448,7 +448,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
!it_disposition(it, DISP_OPEN_CREATE)) {
/* With DISP_OPEN_CREATE dentry will
instantiated in ll_create_it. */
- LASSERT((*de)->d_inode == NULL);
+ LASSERT(d_inode(*de) == NULL);
d_instantiate(*de, inode);
}
@@ -541,7 +541,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
goto out;
}
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if ((it->it_op & IT_OPEN) && inode &&
!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) {
@@ -638,9 +638,9 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
*opened |= FILE_CREATED;
}
- if (dentry->d_inode && it_disposition(it, DISP_OPEN_OPEN)) {
+ if (d_really_is_positive(dentry) && it_disposition(it, DISP_OPEN_OPEN)) {
/* Open dentry. */
- if (S_ISFIFO(dentry->d_inode->i_mode)) {
+ if (S_ISFIFO(d_inode(dentry)->i_mode)) {
/* We cannot call open here as it would
* deadlock.
*/
@@ -862,8 +862,8 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
static inline void ll_get_child_fid(struct dentry *child, struct lu_fid *fid)
{
- if (child->d_inode)
- *fid = *ll_inode2fid(child->d_inode);
+ if (d_really_is_positive(child))
+ *fid = *ll_inode2fid(d_inode(child));
}
/**
@@ -1076,7 +1076,7 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry,
static int ll_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
- struct inode *src = old_dentry->d_inode;
+ struct inode *src = d_inode(old_dentry);
struct ll_sb_info *sbi = ll_i2sbi(dir);
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 91442fab5725..c6c824356464 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -359,8 +359,8 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t file_offset)
+static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t file_offset)
{
struct lu_env *env;
struct cl_io *io;
@@ -399,7 +399,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
* size changing by concurrent truncates and writes.
* 1. Need inode mutex to operate transient pages.
*/
- if (rw == READ)
+ if (iov_iter_rw(iter) == READ)
mutex_lock(&inode->i_mutex);
LASSERT(obj->cob_transient_pages == 0);
@@ -408,7 +408,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
size_t offs;
count = min_t(size_t, iov_iter_count(iter), size);
- if (rw == READ) {
+ if (iov_iter_rw(iter) == READ) {
if (file_offset >= i_size_read(inode))
break;
if (file_offset + count > i_size_read(inode))
@@ -418,11 +418,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
if (likely(result > 0)) {
int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
- result = ll_direct_IO_26_seg(env, io, rw, inode,
- file->f_mapping,
- result, file_offset,
- pages, n);
- ll_free_user_pages(pages, n, rw==READ);
+ result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
+ inode, file->f_mapping,
+ result, file_offset, pages,
+ n);
+ ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
}
if (unlikely(result <= 0)) {
/* If we can't allocate a large enough buffer
@@ -449,11 +449,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
}
out:
LASSERT(obj->cob_transient_pages == 0);
- if (rw == READ)
+ if (iov_iter_rw(iter) == READ)
mutex_unlock(&inode->i_mutex);
if (tot_bytes > 0) {
- if (rw == WRITE) {
+ if (iov_iter_rw(iter) == WRITE) {
struct lov_stripe_md *lsm;
lsm = ccc_inode_lsm_get(inode);
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index b75562c6b5de..7f8071242f23 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -880,7 +880,7 @@ static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct lookup_intent it = { .it_op = IT_GETATTR,
.d.lustre.it_lock_handle = 0 };
struct md_enqueue_info *minfo;
@@ -926,7 +926,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
static void ll_statahead_one(struct dentry *parent, const char *entry_name,
int entry_name_len)
{
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct ll_inode_info *lli = ll_i2info(dir);
struct ll_statahead_info *sai = lli->lli_sai;
struct dentry *dentry = NULL;
@@ -944,8 +944,8 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name,
rc = do_sa_lookup(dir, entry);
} else {
rc = do_sa_revalidate(dir, entry, dentry);
- if (rc == 1 && agl_should_run(sai, dentry->d_inode))
- ll_agl_add(sai, dentry->d_inode, entry->se_index);
+ if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
+ ll_agl_add(sai, d_inode(dentry), entry->se_index);
}
if (dentry != NULL)
@@ -968,7 +968,7 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name,
static int ll_agl_thread(void *arg)
{
struct dentry *parent = (struct dentry *)arg;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct ll_inode_info *plli = ll_i2info(dir);
struct ll_inode_info *clli;
struct ll_sb_info *sbi = ll_i2sbi(dir);
@@ -1042,7 +1042,7 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
sai, parent);
- plli = ll_i2info(parent->d_inode);
+ plli = ll_i2info(d_inode(parent));
task = kthread_run(ll_agl_thread, parent,
"ll_agl_%u", plli->lli_opendir_pid);
if (IS_ERR(task)) {
@@ -1059,7 +1059,7 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
static int ll_statahead_thread(void *arg)
{
struct dentry *parent = (struct dentry *)arg;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct ll_inode_info *plli = ll_i2info(dir);
struct ll_inode_info *clli;
struct ll_sb_info *sbi = ll_i2sbi(dir);
@@ -1604,7 +1604,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
ll_inode2fid(inode), &bits);
if (rc == 1) {
- if ((*dentryp)->d_inode == NULL) {
+ if (d_inode(*dentryp) == NULL) {
struct dentry *alias;
alias = ll_splice_alias(inode,
@@ -1614,13 +1614,13 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
return PTR_ERR(alias);
}
*dentryp = alias;
- } else if ((*dentryp)->d_inode != inode) {
+ } else if (d_inode(*dentryp) != inode) {
/* revalidate, but inode is recreated */
CDEBUG(D_READA,
"stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
*dentryp,
- (*dentryp)->d_inode->i_ino,
- (*dentryp)->d_inode->i_generation,
+ d_inode(*dentryp)->i_ino,
+ d_inode(*dentryp)->i_generation,
inode->i_ino,
inode->i_generation);
ll_sai_unplug(sai, entry);
@@ -1666,8 +1666,8 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
/* get parent reference count here, and put it in ll_statahead_thread */
parent = dget((*dentryp)->d_parent);
- if (unlikely(sai->sai_inode != parent->d_inode)) {
- struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
+ if (unlikely(sai->sai_inode != d_inode(parent))) {
+ struct ll_inode_info *nlli = ll_i2info(d_inode(parent));
CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
*dentryp,
@@ -1689,7 +1689,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
ll_sai_get(sai);
lli->lli_sai = sai;
- plli = ll_i2info(parent->d_inode);
+ plli = ll_i2info(d_inode(parent));
rc = PTR_ERR(kthread_run(ll_statahead_thread, parent,
"ll_sa_%u", plli->lli_opendir_pid));
thread = &sai->sai_thread;
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 686b6a574cc5..3711e671a4df 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -120,7 +120,7 @@ failed:
static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ptlrpc_request *request = NULL;
int rc;
char *symname = NULL;
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index b439936b4524..e0fcbe1395fd 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -214,7 +214,7 @@ int ll_setxattr_common(struct inode *inode, const char *name,
int ll_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
LASSERT(inode);
LASSERT(name);
@@ -267,7 +267,7 @@ int ll_setxattr(struct dentry *dentry, const char *name,
int ll_removexattr(struct dentry *dentry, const char *name)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
LASSERT(inode);
LASSERT(name);
@@ -457,7 +457,7 @@ out:
ssize_t ll_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
LASSERT(inode);
LASSERT(name);
@@ -545,7 +545,7 @@ out:
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int rc = 0, rc2 = 0;
struct lov_mds_md *lmm = NULL;
struct ptlrpc_request *request = NULL;
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index f28ffef0d1f0..e9d0691b21d3 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -279,7 +279,7 @@ struct region_info {
struct bcm2048_device {
struct i2c_client *client;
- struct video_device *videodev;
+ struct video_device videodev;
struct work_struct work;
struct completion compl;
struct mutex mutex;
@@ -1579,7 +1579,7 @@ static void bcm2048_parse_rt_match_d(struct bcm2048_device *bdev, int i,
bcm2048_parse_rds_rt_block(bdev, i, index+2, crc);
}
-static int bcm2048_parse_rds_rt(struct bcm2048_device *bdev)
+static void bcm2048_parse_rds_rt(struct bcm2048_device *bdev)
{
int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
@@ -1615,8 +1615,6 @@ static int bcm2048_parse_rds_rt(struct bcm2048_device *bdev)
match_b = 1;
}
}
-
- return 0;
}
static void bcm2048_parse_rds_ps_block(struct bcm2048_device *bdev, int i,
@@ -2273,7 +2271,7 @@ done:
*/
static const struct v4l2_file_operations bcm2048_fops = {
.owner = THIS_MODULE,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
/* for RDS read support */
.open = bcm2048_fops_open,
.release = bcm2048_fops_release,
@@ -2584,7 +2582,7 @@ static struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
static struct video_device bcm2048_viddev_template = {
.fops = &bcm2048_fops,
.name = BCM2048_DRIVER_NAME,
- .release = video_device_release,
+ .release = video_device_release_empty,
.ioctl_ops = &bcm2048_ioctl_ops,
};
@@ -2603,13 +2601,6 @@ static int bcm2048_i2c_driver_probe(struct i2c_client *client,
goto exit;
}
- bdev->videodev = video_device_alloc();
- if (!bdev->videodev) {
- dev_dbg(&client->dev, "Failed to alloc video device.\n");
- err = -ENOMEM;
- goto free_bdev;
- }
-
bdev->client = client;
i2c_set_clientdata(client, bdev);
mutex_init(&bdev->mutex);
@@ -2622,16 +2613,16 @@ static int bcm2048_i2c_driver_probe(struct i2c_client *client,
client->name, bdev);
if (err < 0) {
dev_err(&client->dev, "Could not request IRQ\n");
- goto free_vdev;
+ goto free_bdev;
}
dev_dbg(&client->dev, "IRQ requested.\n");
} else {
dev_dbg(&client->dev, "IRQ not configured. Using timeouts.\n");
}
- *bdev->videodev = bcm2048_viddev_template;
- video_set_drvdata(bdev->videodev, bdev);
- if (video_register_device(bdev->videodev, VFL_TYPE_RADIO, radio_nr)) {
+ bdev->videodev = bcm2048_viddev_template;
+ video_set_drvdata(&bdev->videodev, bdev);
+ if (video_register_device(&bdev->videodev, VFL_TYPE_RADIO, radio_nr)) {
dev_dbg(&client->dev, "Could not register video device.\n");
err = -EIO;
goto free_irq;
@@ -2654,18 +2645,13 @@ static int bcm2048_i2c_driver_probe(struct i2c_client *client,
free_sysfs:
bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
free_registration:
- video_unregister_device(bdev->videodev);
- /* video_unregister_device frees bdev->videodev */
- bdev->videodev = NULL;
+ video_unregister_device(&bdev->videodev);
skip_release = 1;
free_irq:
if (client->irq)
free_irq(client->irq, bdev);
-free_vdev:
- if (!skip_release)
- video_device_release(bdev->videodev);
- i2c_set_clientdata(client, NULL);
free_bdev:
+ i2c_set_clientdata(client, NULL);
kfree(bdev);
exit:
return err;
@@ -2674,16 +2660,13 @@ exit:
static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
{
struct bcm2048_device *bdev = i2c_get_clientdata(client);
- struct video_device *vd;
if (!client->adapter)
return -ENODEV;
if (bdev) {
- vd = bdev->videodev;
-
bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
- video_unregister_device(vd);
+ video_unregister_device(&bdev->videodev);
if (bdev->power_state)
bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index a425f71dfb97..1bbb90ce0086 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1414,17 +1414,17 @@ static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
* __ipipe_get_format() - helper function for getting ipipe format
* @ipipe: pointer to ipipe private structure.
* @pad: pad number.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @which: wanted subdev format.
*
*/
static struct v4l2_mbus_framefmt *
__ipipe_get_format(struct vpfe_ipipe_device *ipipe,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&ipipe->subdev, cfg, pad);
return &ipipe->formats[pad];
}
@@ -1432,14 +1432,14 @@ __ipipe_get_format(struct vpfe_ipipe_device *ipipe,
/*
* ipipe_try_format() - Handle try format by pad subdev method
* @ipipe: VPFE ipipe device.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @pad: pad num.
* @fmt: pointer to v4l2 format structure.
* @which : wanted subdev format
*/
static void
ipipe_try_format(struct vpfe_ipipe_device *ipipe,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1475,22 +1475,22 @@ ipipe_try_format(struct vpfe_ipipe_device *ipipe,
/*
* ipipe_set_format() - Handle set format by pads subdev method
* @sd: pointer to v4l2 subdev structure
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
static int
-ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+ format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ipipe_try_format(ipipe, fh, fmt->pad, &fmt->format, fmt->which);
+ ipipe_try_format(ipipe, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
@@ -1512,11 +1512,11 @@ ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ipipe_get_format() - Handle get format by pads subdev method.
* @sd: pointer to v4l2 subdev structure.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure.
*/
static int
-ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
@@ -1524,7 +1524,7 @@ ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
fmt->format = ipipe->formats[fmt->pad];
else
- fmt->format = *(v4l2_subdev_get_try_format(fh, fmt->pad));
+ fmt->format = *(v4l2_subdev_get_try_format(sd, cfg, fmt->pad));
return 0;
}
@@ -1532,11 +1532,11 @@ ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ipipe_enum_frame_size() - enum frame sizes on pads
* @sd: pointer to v4l2 subdev structure.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @fse: pointer to v4l2_subdev_frame_size_enum structure.
*/
static int
-ipipe_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ipipe_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
@@ -1548,8 +1548,7 @@ ipipe_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ipipe_try_format(ipipe, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1559,8 +1558,7 @@ ipipe_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ipipe_try_format(ipipe, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1570,11 +1568,11 @@ ipipe_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ipipe_enum_mbus_code() - enum mbus codes for pads
* @sd: pointer to v4l2 subdev structure.
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @code: pointer to v4l2_subdev_mbus_code_enum structure
*/
static int
-ipipe_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ipipe_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
@@ -1630,9 +1628,8 @@ static int ipipe_s_ctrl(struct v4l2_ctrl *ctrl)
* @sd: pointer to v4l2 subdev structure.
* @fh: V4L2 subdev file handle
*
- * Initialize all pad formats with default values. If fh is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
+ * Initialize all pad formats with default values. Try formats are initialized
+ * on the file handle.
*/
static int
ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
@@ -1641,19 +1638,19 @@ ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = IPIPE_PAD_SINK;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipe_set_format(sd, fh, &format);
+ ipipe_set_format(sd, fh->pad, &format);
memset(&format, 0, sizeof(format));
format.pad = IPIPE_PAD_SOURCE;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipe_set_format(sd, fh, &format);
+ ipipe_set_format(sd, fh->pad, &format);
return 0;
}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index 17e105e7d892..8b230541b1d1 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -544,12 +544,12 @@ static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
/*
* ipipeif_enum_mbus_code() - Handle pixel format enumeration
* @sd: pointer to v4l2 subdev structure
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @code: pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
@@ -577,11 +577,11 @@ static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
/*
* ipipeif_get_format() - Handle get format by pads subdev method
* @sd: pointer to v4l2 subdev structure
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
*/
static int
-ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -589,7 +589,7 @@ ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
fmt->format = ipipeif->formats[fmt->pad];
else
- fmt->format = *(v4l2_subdev_get_try_format(fh, fmt->pad));
+ fmt->format = *(v4l2_subdev_get_try_format(sd, cfg, fmt->pad));
return 0;
}
@@ -600,14 +600,14 @@ ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ipipeif_try_format() - Handle try format by pad subdev method
* @ipipeif: VPFE ipipeif device.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @pad: pad num.
* @fmt: pointer to v4l2 format structure.
* @which : wanted subdev format
*/
static void
ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -641,7 +641,7 @@ ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
}
static int
-ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -653,8 +653,7 @@ ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ipipeif_try_format(ipipeif, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -664,8 +663,7 @@ ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ipipeif_try_format(ipipeif, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -675,18 +673,18 @@ ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* __ipipeif_get_format() - helper function for getting ipipeif format
* @ipipeif: pointer to ipipeif private structure.
+ * @cfg: V4L2 subdev pad config
* @pad: pad number.
- * @fh: V4L2 subdev file handle.
* @which: wanted subdev format.
*
*/
static struct v4l2_mbus_framefmt *
__ipipeif_get_format(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&ipipeif->subdev, cfg, pad);
return &ipipeif->formats[pad];
}
@@ -694,22 +692,22 @@ __ipipeif_get_format(struct vpfe_ipipeif_device *ipipeif,
/*
* ipipeif_set_format() - Handle set format by pads subdev method
* @sd: pointer to v4l2 subdev structure
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
static int
-ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+ format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ipipeif_try_format(ipipeif, fh, fmt->pad, &fmt->format, fmt->which);
+ ipipeif_try_format(ipipeif, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
@@ -756,9 +754,8 @@ static void ipipeif_set_default_config(struct vpfe_ipipeif_device *ipipeif)
* @sd: VPFE ipipeif V4L2 subdevice
* @fh: V4L2 subdev file handle
*
- * Initialize all pad formats with default values. If fh is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
+ * Initialize all pad formats with default values. Try formats are initialized
+ * on the file handle.
*/
static int
ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
@@ -768,19 +765,19 @@ ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = IPIPEIF_PAD_SINK;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipeif_set_format(sd, fh, &format);
+ ipipeif_set_format(sd, fh->pad, &format);
memset(&format, 0, sizeof(format));
format.pad = IPIPEIF_PAD_SOURCE;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipeif_set_format(sd, fh, &format);
+ ipipeif_set_format(sd, fh->pad, &format);
ipipeif_set_default_config(ipipeif);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index bcf762bc233d..80907b464412 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -278,11 +278,11 @@ isif_config_format(struct vpfe_device *vpfe_dev, unsigned int pad)
/*
* isif_try_format() - Try video format on a pad
* @isif: VPFE isif device
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
*/
static void
-isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_fh *fh,
+isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
unsigned int width = fmt->format.width;
@@ -1394,11 +1394,11 @@ static int isif_set_stream(struct v4l2_subdev *sd, int enable)
* __isif_get_format() - helper function for getting isif format
* @isif: pointer to isif private structure.
* @pad: pad number.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @which: wanted subdev format.
*/
static struct v4l2_mbus_framefmt *
-__isif_get_format(struct vpfe_isif_device *isif, struct v4l2_subdev_fh *fh,
+__isif_get_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY) {
@@ -1407,32 +1407,32 @@ __isif_get_format(struct vpfe_isif_device *isif, struct v4l2_subdev_fh *fh,
fmt.pad = pad;
fmt.which = which;
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&isif->subdev, cfg, pad);
}
return &isif->formats[pad];
}
/*
-* isif_set_format() - set format on pad
-* @sd : VPFE ISIF device
-* @fh : V4L2 subdev file handle
-* @fmt : pointer to v4l2 subdev format structure
-*
-* Return 0 on success or -EINVAL if format or pad is invalid
-*/
+ * isif_set_format() - set format on pad
+ * @sd : VPFE ISIF device
+ * @cfg : V4L2 subdev pad config
+ * @fmt : pointer to v4l2 subdev format structure
+ *
+ * Return 0 on success or -EINVAL if format or pad is invalid
+ */
static int
-isif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+isif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
struct v4l2_mbus_framefmt *format;
- format = __isif_get_format(isif, fh, fmt->pad, fmt->which);
+ format = __isif_get_format(isif, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- isif_try_format(isif, fh, fmt);
+ isif_try_format(isif, cfg, fmt);
memcpy(format, &fmt->format, sizeof(*format));
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
@@ -1447,20 +1447,20 @@ isif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* isif_get_format() - Retrieve the video format on a pad
* @sd: VPFE ISIF V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
static int
-isif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+isif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __isif_get_format(vpfe_isif, fh, fmt->pad, fmt->which);
+ format = __isif_get_format(vpfe_isif, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -1472,11 +1472,11 @@ isif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* isif_enum_frame_size() - enum frame sizes on pads
* @sd: VPFE isif V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @code: pointer to v4l2_subdev_frame_size_enum structure
*/
static int
-isif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+isif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
@@ -1489,8 +1489,8 @@ isif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
format.format.code = fse->code;
format.format.width = 1;
format.format.height = 1;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- isif_try_format(isif, fh, &format);
+ format.which = fse->which;
+ isif_try_format(isif, cfg, &format);
fse->min_width = format.format.width;
fse->min_height = format.format.height;
@@ -1501,8 +1501,8 @@ isif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
format.format.code = fse->code;
format.format.width = -1;
format.format.height = -1;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- isif_try_format(isif, fh, &format);
+ format.which = fse->which;
+ isif_try_format(isif, cfg, &format);
fse->max_width = format.format.width;
fse->max_height = format.format.height;
@@ -1512,11 +1512,11 @@ isif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* isif_enum_mbus_code() - enum mbus codes for pads
* @sd: VPFE isif V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @code: pointer to v4l2_subdev_mbus_code_enum structure
*/
static int
-isif_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+isif_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
@@ -1537,14 +1537,14 @@ isif_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* isif_pad_set_selection() - set crop rectangle on pad
* @sd: VPFE isif V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @code: pointer to v4l2_subdev_mbus_code_enum structure
*
* Return 0 on success, -EINVAL if pad is invalid
*/
static int
isif_pad_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
@@ -1554,7 +1554,7 @@ isif_pad_set_selection(struct v4l2_subdev *sd,
if (sel->pad != ISIF_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- format = __isif_get_format(vpfe_isif, fh, sel->pad, sel->which);
+ format = __isif_get_format(vpfe_isif, cfg, sel->pad, sel->which);
if (format == NULL)
return -EINVAL;
@@ -1577,7 +1577,7 @@ isif_pad_set_selection(struct v4l2_subdev *sd,
} else {
struct v4l2_rect *rect;
- rect = v4l2_subdev_get_try_crop(fh, ISIF_PAD_SINK);
+ rect = v4l2_subdev_get_try_crop(sd, cfg, ISIF_PAD_SINK);
memcpy(rect, &vpfe_isif->crop, sizeof(*rect));
}
return 0;
@@ -1586,14 +1586,14 @@ isif_pad_set_selection(struct v4l2_subdev *sd,
/*
* isif_pad_get_selection() - get crop rectangle on pad
* @sd: VPFE isif V4L2 subdevice
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @code: pointer to v4l2_subdev_mbus_code_enum structure
*
* Return 0 on success, -EINVAL if pad is invalid
*/
static int
isif_pad_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
@@ -1605,7 +1605,7 @@ isif_pad_get_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_rect *rect;
- rect = v4l2_subdev_get_try_crop(fh, ISIF_PAD_SINK);
+ rect = v4l2_subdev_get_try_crop(sd, cfg, ISIF_PAD_SINK);
memcpy(&sel->r, rect, sizeof(*rect));
} else {
sel->r = vpfe_isif->crop;
@@ -1619,9 +1619,8 @@ isif_pad_get_selection(struct v4l2_subdev *sd,
* @sd: VPFE isif V4L2 subdevice
* @fh: V4L2 subdev file handle
*
- * Initialize all pad formats with default values. If fh is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
+ * Initialize all pad formats with default values. Try formats are initialized
+ * on the file handle.
*/
static int
isif_init_formats(struct v4l2_subdev *sd,
@@ -1632,27 +1631,27 @@ isif_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = ISIF_PAD_SINK;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
format.format.width = MAX_WIDTH;
format.format.height = MAX_HEIGHT;
- isif_set_format(sd, fh, &format);
+ isif_set_format(sd, fh->pad, &format);
memset(&format, 0, sizeof(format));
format.pad = ISIF_PAD_SOURCE;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
format.format.width = MAX_WIDTH;
format.format.height = MAX_HEIGHT;
- isif_set_format(sd, fh, &format);
+ isif_set_format(sd, fh->pad, &format);
memset(&sel, 0, sizeof(sel));
sel.pad = ISIF_PAD_SINK;
- sel.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.which = V4L2_SUBDEV_FORMAT_TRY;
sel.target = V4L2_SEL_TGT_CROP;
sel.r.width = MAX_WIDTH;
sel.r.height = MAX_HEIGHT;
- isif_pad_set_selection(sd, fh, &sel);
+ isif_pad_set_selection(sd, fh->pad, &sel);
return 0;
}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 7cc8d1b4d737..b6498137de56 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -1288,19 +1288,19 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
/*
* __resizer_get_format() - helper function for getting resizer format
* @sd: pointer to subdev.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @pad: pad number.
* @which: wanted subdev format.
* Retun wanted mbus frame format.
*/
static struct v4l2_mbus_framefmt *
-__resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+__resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(sd, cfg, pad);
if (&resizer->crop_resizer.subdev == sd)
return &resizer->crop_resizer.formats[pad];
if (&resizer->resizer_a.subdev == sd)
@@ -1313,13 +1313,13 @@ __resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* resizer_try_format() - Handle try format by pad subdev method
* @sd: pointer to subdev.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @pad: pad num.
* @fmt: pointer to v4l2 format structure.
* @which: wanted subdev format.
*/
static void
-resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1387,21 +1387,21 @@ resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* resizer_set_format() - Handle set format by pads subdev method
* @sd: pointer to v4l2 subdev structure
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(sd, fh, fmt->pad, fmt->which);
+ format = __resizer_get_format(sd, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- resizer_try_format(sd, fh, fmt->pad, &fmt->format, fmt->which);
+ resizer_try_format(sd, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
@@ -1447,16 +1447,16 @@ static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* resizer_get_format() - Retrieve the video format on a pad
* @sd: pointer to v4l2 subdev structure.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(sd, fh, fmt->pad, fmt->which);
+ format = __resizer_get_format(sd, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -1468,11 +1468,11 @@ static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* resizer_enum_frame_size() - enum frame sizes on pads
* @sd: Pointer to subdevice.
- * @fh: V4L2 subdev file handle.
+ * @cfg: V4L2 subdev pad config
* @code: pointer to v4l2_subdev_frame_size_enum structure.
*/
static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct v4l2_mbus_framefmt format;
@@ -1483,8 +1483,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- resizer_try_format(sd, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ resizer_try_format(sd, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1494,8 +1493,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- resizer_try_format(sd, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ resizer_try_format(sd, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1505,11 +1503,11 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
/*
* resizer_enum_mbus_code() - enum mbus codes for pads
* @sd: Pointer to subdevice.
- * @fh: V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @code: pointer to v4l2_subdev_mbus_code_enum structure
*/
static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad == RESIZER_PAD_SINK) {
@@ -1532,14 +1530,13 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
* @sd: Pointer to subdevice.
* @fh: V4L2 subdev file handle.
*
- * Initialize all pad formats with default values. If fh is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
+ * Initialize all pad formats with default values. Try formats are
+ * initialized on the file handle.
*/
static int resizer_init_formats(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
- __u32 which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ __u32 which = V4L2_SUBDEV_FORMAT_TRY;
struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_subdev_format format;
@@ -1550,7 +1547,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_HEIGHT;
- resizer_set_format(sd, fh, &format);
+ resizer_set_format(sd, fh->pad, &format);
memset(&format, 0, sizeof(format));
format.pad = RESIZER_CROP_PAD_SOURCE;
@@ -1558,7 +1555,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_WIDTH;
- resizer_set_format(sd, fh, &format);
+ resizer_set_format(sd, fh->pad, &format);
memset(&format, 0, sizeof(format));
format.pad = RESIZER_CROP_PAD_SOURCE2;
@@ -1566,7 +1563,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_WIDTH;
- resizer_set_format(sd, fh, &format);
+ resizer_set_format(sd, fh->pad, &format);
} else if (&resizer->resizer_a.subdev == sd) {
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SINK;
@@ -1574,7 +1571,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_HEIGHT;
- resizer_set_format(sd, fh, &format);
+ resizer_set_format(sd, fh->pad, &format);
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SOURCE;
@@ -1582,7 +1579,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- resizer_set_format(sd, fh, &format);
+ resizer_set_format(sd, fh->pad, &format);
} else if (&resizer->resizer_b.subdev == sd) {
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SINK;
@@ -1590,7 +1587,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_HEIGHT;
- resizer_set_format(sd, fh, &format);
+ resizer_set_format(sd, fh->pad, &format);
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SOURCE;
@@ -1598,7 +1595,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_B;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_B;
- resizer_set_format(sd, fh, &format);
+ resizer_set_format(sd, fh->pad, &format);
}
return 0;
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 293ffda503e0..52a8ffe560b1 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -244,7 +244,7 @@ dt3155_wait_prepare(struct vb2_queue *q)
{
struct dt3155_priv *pd = vb2_get_drv_priv(q);
- mutex_unlock(pd->vdev->lock);
+ mutex_unlock(pd->vdev.lock);
}
static void
@@ -252,7 +252,7 @@ dt3155_wait_finish(struct vb2_queue *q)
{
struct dt3155_priv *pd = vb2_get_drv_priv(q);
- mutex_lock(pd->vdev->lock);
+ mutex_lock(pd->vdev.lock);
}
static int
@@ -824,7 +824,7 @@ static struct video_device dt3155_vdev = {
.fops = &dt3155_fops,
.ioctl_ops = &dt3155_ioctl_ops,
.minor = -1,
- .release = video_device_release,
+ .release = video_device_release_empty,
.tvnorms = DT3155_CURRENT_NORM,
};
@@ -901,28 +901,24 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
return -ENODEV;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
- pd->vdev = video_device_alloc();
- if (!pd->vdev) {
- err = -ENOMEM;
- goto err_video_device_alloc;
- }
- *pd->vdev = dt3155_vdev;
+
+ pd->vdev = dt3155_vdev;
pci_set_drvdata(pdev, pd); /* for use in dt3155_remove() */
- video_set_drvdata(pd->vdev, pd); /* for use in video_fops */
+ video_set_drvdata(&pd->vdev, pd); /* for use in video_fops */
pd->users = 0;
pd->pdev = pdev;
INIT_LIST_HEAD(&pd->dmaq);
mutex_init(&pd->mux);
- pd->vdev->lock = &pd->mux; /* for locking v4l2_file_operations */
+ pd->vdev.lock = &pd->mux; /* for locking v4l2_file_operations */
spin_lock_init(&pd->lock);
pd->csr2 = csr2_init;
pd->config = config_init;
err = pci_enable_device(pdev);
if (err)
- goto err_enable_dev;
+ return err;
err = pci_request_region(pdev, 0, pci_name(pdev));
if (err)
goto err_req_region;
@@ -934,13 +930,13 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dt3155_init_board(pdev);
if (err)
goto err_init_board;
- err = video_register_device(pd->vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&pd->vdev, VFL_TYPE_GRABBER, -1);
if (err)
goto err_init_board;
if (dt3155_alloc_coherent(&pdev->dev, DT3155_CHUNK_SIZE,
DMA_MEMORY_MAP))
dev_info(&pdev->dev, "preallocated 8 buffers\n");
- dev_info(&pdev->dev, "/dev/video%i is ready\n", pd->vdev->minor);
+ dev_info(&pdev->dev, "/dev/video%i is ready\n", pd->vdev.minor);
return 0; /* success */
err_init_board:
@@ -949,10 +945,6 @@ err_pci_iomap:
pci_release_region(pdev, 0);
err_req_region:
pci_disable_device(pdev);
-err_enable_dev:
- video_device_release(pd->vdev);
-err_video_device_alloc:
- kfree(pd);
return err;
}
@@ -962,15 +954,10 @@ dt3155_remove(struct pci_dev *pdev)
struct dt3155_priv *pd = pci_get_drvdata(pdev);
dt3155_free_coherent(&pdev->dev);
- video_unregister_device(pd->vdev);
+ video_unregister_device(&pd->vdev);
pci_iounmap(pdev, pd->regs);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
- /*
- * video_device_release() is invoked automatically
- * see: struct video_device dt3155_vdev
- */
- kfree(pd);
}
static const struct pci_device_id pci_ids[] = {
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.h b/drivers/staging/media/dt3155v4l/dt3155v4l.h
index 2e4f89d402e4..96f01a0c7581 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.h
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.h
@@ -178,7 +178,7 @@ struct dt3155_stats {
/**
* struct dt3155_priv - private data structure
*
- * @vdev: pointer to video_device structure
+ * @vdev: video_device structure
* @pdev: pointer to pci_dev structure
* @q pointer to vb2_queue structure
* @curr_buf: pointer to curren buffer
@@ -193,7 +193,7 @@ struct dt3155_stats {
* @config: local copy of config register
*/
struct dt3155_priv {
- struct video_device *vdev;
+ struct video_device vdev;
struct pci_dev *pdev;
struct vb2_queue *q;
struct vb2_buffer *curr_buf;
diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c
index 2a68582e7f71..a4cfcf57c99c 100644
--- a/drivers/staging/media/mn88472/mn88472.c
+++ b/drivers/staging/media/mn88472/mn88472.c
@@ -19,7 +19,7 @@
static int mn88472_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
- s->min_delay_ms = 400;
+ s->min_delay_ms = 800;
return 0;
}
@@ -178,8 +178,32 @@ static int mn88472_set_frontend(struct dvb_frontend *fe)
ret = regmap_write(dev->regmap[0], 0x46, 0x00);
ret = regmap_write(dev->regmap[0], 0xae, 0x00);
- ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
- ret = regmap_write(dev->regmap[0], 0xd9, 0xe3);
+
+ switch (dev->ts_mode) {
+ case SERIAL_TS_MODE:
+ ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
+ break;
+ case PARALLEL_TS_MODE:
+ ret = regmap_write(dev->regmap[2], 0x08, 0x00);
+ break;
+ default:
+ dev_dbg(&client->dev, "ts_mode error: %d\n", dev->ts_mode);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (dev->ts_clock) {
+ case VARIABLE_TS_CLOCK:
+ ret = regmap_write(dev->regmap[0], 0xd9, 0xe3);
+ break;
+ case FIXED_TS_CLOCK:
+ ret = regmap_write(dev->regmap[0], 0xd9, 0xe1);
+ break;
+ default:
+ dev_dbg(&client->dev, "ts_clock error: %d\n", dev->ts_clock);
+ ret = -EINVAL;
+ goto err;
+ }
/* Reset demod */
ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
@@ -201,6 +225,7 @@ static int mn88472_read_status(struct dvb_frontend *fe, fe_status_t *status)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
unsigned int utmp;
+ int lock = 0;
*status = 0;
@@ -211,21 +236,36 @@ static int mn88472_read_status(struct dvb_frontend *fe, fe_status_t *status)
switch (c->delivery_system) {
case SYS_DVBT:
+ ret = regmap_read(dev->regmap[0], 0x7F, &utmp);
+ if (ret)
+ goto err;
+ if ((utmp & 0xF) >= 0x09)
+ lock = 1;
+ break;
case SYS_DVBT2:
- /* FIXME: implement me */
- utmp = 0x08; /* DVB-C lock value */
+ ret = regmap_read(dev->regmap[2], 0x92, &utmp);
+ if (ret)
+ goto err;
+ if ((utmp & 0xF) >= 0x07)
+ *status |= FE_HAS_SIGNAL;
+ if ((utmp & 0xF) >= 0x0a)
+ *status |= FE_HAS_CARRIER;
+ if ((utmp & 0xF) >= 0x0d)
+ *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
break;
case SYS_DVBC_ANNEX_A:
ret = regmap_read(dev->regmap[1], 0x84, &utmp);
if (ret)
goto err;
+ if ((utmp & 0xF) >= 0x08)
+ lock = 1;
break;
default:
ret = -EINVAL;
goto err;
}
- if (utmp == 0x08)
+ if (lock)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
FE_HAS_SYNC | FE_HAS_LOCK;
@@ -242,6 +282,7 @@ static int mn88472_init(struct dvb_frontend *fe)
int ret, len, remaining;
const struct firmware *fw = NULL;
u8 *fw_file = MN88472_FIRMWARE;
+ unsigned int tmp;
dev_dbg(&client->dev, "\n");
@@ -257,6 +298,17 @@ static int mn88472_init(struct dvb_frontend *fe)
if (ret)
goto err;
+ /* check if firmware is already running */
+ ret = regmap_read(dev->regmap[0], 0xf5, &tmp);
+ if (ret)
+ goto err;
+
+ if (!(tmp & 0x1)) {
+ dev_info(&client->dev, "firmware already running\n");
+ dev->warm = true;
+ return 0;
+ }
+
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, &client->dev);
if (ret) {
@@ -270,7 +322,7 @@ static int mn88472_init(struct dvb_frontend *fe)
ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
if (ret)
- goto err;
+ goto firmware_release;
for (remaining = fw->size; remaining > 0;
remaining -= (dev->i2c_wr_max - 1)) {
@@ -283,13 +335,27 @@ static int mn88472_init(struct dvb_frontend *fe)
if (ret) {
dev_err(&client->dev,
"firmware download failed=%d\n", ret);
- goto err;
+ goto firmware_release;
}
}
+ /* parity check of firmware */
+ ret = regmap_read(dev->regmap[0], 0xf8, &tmp);
+ if (ret) {
+ dev_err(&client->dev,
+ "parity reg read failed=%d\n", ret);
+ goto err;
+ }
+ if (tmp & 0x10) {
+ dev_err(&client->dev,
+ "firmware parity check failed=0x%x\n", tmp);
+ goto err;
+ }
+ dev_err(&client->dev, "firmware parity check succeeded=0x%x\n", tmp);
+
ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
if (ret)
- goto err;
+ goto firmware_release;
release_firmware(fw);
fw = NULL;
@@ -298,10 +364,9 @@ static int mn88472_init(struct dvb_frontend *fe)
dev->warm = true;
return 0;
+firmware_release:
+ release_firmware(fw);
err:
- if (fw)
- release_firmware(fw);
-
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -336,6 +401,8 @@ static struct dvb_frontend_ops mn88472_ops = {
.delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
.info = {
.name = "Panasonic MN88472",
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 7200000,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
@@ -396,6 +463,8 @@ static int mn88472_probe(struct i2c_client *client,
dev->i2c_wr_max = config->i2c_wr_max;
dev->xtal = config->xtal;
+ dev->ts_mode = config->ts_mode;
+ dev->ts_clock = config->ts_clock;
dev->client[0] = client;
dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
if (IS_ERR(dev->regmap[0])) {
diff --git a/drivers/staging/media/mn88472/mn88472_priv.h b/drivers/staging/media/mn88472/mn88472_priv.h
index b12b731e2d4e..9ba8c8b3823e 100644
--- a/drivers/staging/media/mn88472/mn88472_priv.h
+++ b/drivers/staging/media/mn88472/mn88472_priv.h
@@ -32,6 +32,8 @@ struct mn88472_dev {
fe_delivery_system_t delivery_system;
bool warm; /* FW running */
u32 xtal;
+ int ts_mode;
+ int ts_clock;
};
#endif
diff --git a/drivers/staging/media/mn88473/mn88473.c b/drivers/staging/media/mn88473/mn88473.c
index 5baeb03ab3d1..8b6736c70057 100644
--- a/drivers/staging/media/mn88473/mn88473.c
+++ b/drivers/staging/media/mn88473/mn88473.c
@@ -30,6 +30,7 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u32 if_frequency;
+ u64 tmp;
u8 delivery_system_val, if_val[3], bw_val[7];
dev_dbg(&client->dev,
@@ -62,32 +63,13 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
goto err;
}
- switch (c->delivery_system) {
- case SYS_DVBT:
- case SYS_DVBT2:
- if (c->bandwidth_hz <= 6000000) {
- /* IF 3570000 Hz, BW 6000000 Hz */
- memcpy(if_val, "\x24\x8e\x8a", 3);
- memcpy(bw_val, "\xe9\x55\x55\x1c\x29\x1c\x29", 7);
- } else if (c->bandwidth_hz <= 7000000) {
- /* IF 4570000 Hz, BW 7000000 Hz */
- memcpy(if_val, "\x2e\xcb\xfb", 3);
- memcpy(bw_val, "\xc8\x00\x00\x17\x0a\x17\x0a", 7);
- } else if (c->bandwidth_hz <= 8000000) {
- /* IF 4570000 Hz, BW 8000000 Hz */
- memcpy(if_val, "\x2e\xcb\xfb", 3);
- memcpy(bw_val, "\xaf\x00\x00\x11\xec\x11\xec", 7);
- } else {
- ret = -EINVAL;
- goto err;
- }
- break;
- case SYS_DVBC_ANNEX_A:
- /* IF 5070000 Hz, BW 8000000 Hz */
- memcpy(if_val, "\x33\xea\xb3", 3);
+ if (c->bandwidth_hz <= 6000000) {
+ memcpy(bw_val, "\xe9\x55\x55\x1c\x29\x1c\x29", 7);
+ } else if (c->bandwidth_hz <= 7000000) {
+ memcpy(bw_val, "\xc8\x00\x00\x17\x0a\x17\x0a", 7);
+ } else if (c->bandwidth_hz <= 8000000) {
memcpy(bw_val, "\xaf\x00\x00\x11\xec\x11\xec", 7);
- break;
- default:
+ } else {
ret = -EINVAL;
goto err;
}
@@ -109,17 +91,12 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
if_frequency = 0;
}
- switch (if_frequency) {
- case 3570000:
- case 4570000:
- case 5070000:
- break;
- default:
- dev_err(&client->dev, "IF frequency %d not supported\n",
- if_frequency);
- ret = -EINVAL;
- goto err;
- }
+ /* Calculate IF registers ( (1<<24)*IF / Xtal ) */
+ tmp = div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2),
+ dev->xtal);
+ if_val[0] = ((tmp >> 16) & 0xff);
+ if_val[1] = ((tmp >> 8) & 0xff);
+ if_val[2] = ((tmp >> 0) & 0xff);
ret = regmap_write(dev->regmap[2], 0x05, 0x00);
ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
@@ -194,7 +171,10 @@ static int mn88473_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct i2c_client *client = fe->demodulator_priv;
struct mn88473_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
+ unsigned int utmp;
+ int lock = 0;
*status = 0;
@@ -203,8 +183,51 @@ static int mn88473_read_status(struct dvb_frontend *fe, fe_status_t *status)
goto err;
}
- *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
- FE_HAS_SYNC | FE_HAS_LOCK;
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ ret = regmap_read(dev->regmap[0], 0x62, &utmp);
+ if (ret)
+ goto err;
+ if (!(utmp & 0xA0)) {
+ if ((utmp & 0xF) >= 0x03)
+ *status |= FE_HAS_SIGNAL;
+ if ((utmp & 0xF) >= 0x09)
+ lock = 1;
+ }
+ break;
+ case SYS_DVBT2:
+ ret = regmap_read(dev->regmap[2], 0x8B, &utmp);
+ if (ret)
+ goto err;
+ if (!(utmp & 0x40)) {
+ if ((utmp & 0xF) >= 0x07)
+ *status |= FE_HAS_SIGNAL;
+ if ((utmp & 0xF) >= 0x0a)
+ *status |= FE_HAS_CARRIER;
+ if ((utmp & 0xF) >= 0x0d)
+ *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ }
+ break;
+ case SYS_DVBC_ANNEX_A:
+ ret = regmap_read(dev->regmap[1], 0x85, &utmp);
+ if (ret)
+ goto err;
+ if (!(utmp & 0x40)) {
+ ret = regmap_read(dev->regmap[1], 0x89, &utmp);
+ if (ret)
+ goto err;
+ if (utmp & 0x01)
+ lock = 1;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (lock)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
+ FE_HAS_SYNC | FE_HAS_LOCK;
return 0;
err:
@@ -219,11 +242,23 @@ static int mn88473_init(struct dvb_frontend *fe)
int ret, len, remaining;
const struct firmware *fw = NULL;
u8 *fw_file = MN88473_FIRMWARE;
+ unsigned int tmp;
dev_dbg(&client->dev, "\n");
- if (dev->warm)
+ /* set cold state by default */
+ dev->warm = false;
+
+ /* check if firmware is already running */
+ ret = regmap_read(dev->regmap[0], 0xf5, &tmp);
+ if (ret)
+ goto err;
+
+ if (!(tmp & 0x1)) {
+ dev_info(&client->dev, "firmware already running\n");
+ dev->warm = true;
return 0;
+ }
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, &client->dev);
@@ -254,6 +289,20 @@ static int mn88473_init(struct dvb_frontend *fe)
}
}
+ /* parity check of firmware */
+ ret = regmap_read(dev->regmap[0], 0xf8, &tmp);
+ if (ret) {
+ dev_err(&client->dev,
+ "parity reg read failed=%d\n", ret);
+ goto err;
+ }
+ if (tmp & 0x10) {
+ dev_err(&client->dev,
+ "firmware parity check failed=0x%x\n", tmp);
+ goto err;
+ }
+ dev_err(&client->dev, "firmware parity check succeeded=0x%x\n", tmp);
+
ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
if (ret)
goto err;
@@ -297,6 +346,8 @@ static struct dvb_frontend_ops mn88473_ops = {
.delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_AC},
.info = {
.name = "Panasonic MN88473",
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 7200000,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
@@ -356,6 +407,10 @@ static int mn88473_probe(struct i2c_client *client,
}
dev->i2c_wr_max = config->i2c_wr_max;
+ if (!config->xtal)
+ dev->xtal = 25000000;
+ else
+ dev->xtal = config->xtal;
dev->client[0] = client;
dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
if (IS_ERR(dev->regmap[0])) {
diff --git a/drivers/staging/media/mn88473/mn88473_priv.h b/drivers/staging/media/mn88473/mn88473_priv.h
index 78af112fb41d..ef6f01323ac9 100644
--- a/drivers/staging/media/mn88473/mn88473_priv.h
+++ b/drivers/staging/media/mn88473/mn88473_priv.h
@@ -31,6 +31,7 @@ struct mn88473_dev {
u16 i2c_wr_max;
fe_delivery_system_t delivery_system;
bool warm; /* FW running */
+ u32 xtal;
};
#endif
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index b78643f907e7..072dac04a750 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_OMAP4
bool "OMAP 4 Camera support"
depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
depends on HAS_DMA
+ select MFD_SYSCON
select VIDEOBUF2_DMA_CONTIG
---help---
Driver for an OMAP 4 ISS controller.
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index e0ad5e520e2d..7ced940bd807 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -1386,6 +1387,16 @@ static int iss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iss);
+ /*
+ * TODO: When implementing DT support switch to syscon regmap lookup by
+ * phandle.
+ */
+ iss->syscon = syscon_regmap_lookup_by_compatible("syscon");
+ if (IS_ERR(iss->syscon)) {
+ ret = PTR_ERR(iss->syscon);
+ goto error;
+ }
+
/* Clocks */
ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
if (ret < 0)
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h
index 734cfeeb0314..35df8b4709e6 100644
--- a/drivers/staging/media/omap4iss/iss.h
+++ b/drivers/staging/media/omap4iss/iss.h
@@ -29,6 +29,8 @@
#include "iss_ipipe.h"
#include "iss_resizer.h"
+struct regmap;
+
#define to_iss_device(ptr_module) \
container_of(ptr_module, struct iss_device, ptr_module)
#define to_device(ptr_module) \
@@ -79,6 +81,7 @@ struct iss_reg {
/*
* struct iss_device - ISS device structure.
+ * @syscon: Regmap for the syscon register space
* @crashed: Bitmask of crashed entities (indexed by entity ID)
*/
struct iss_device {
@@ -93,6 +96,7 @@ struct iss_device {
struct resource *res[OMAP4_ISS_MEM_LAST];
void __iomem *regs[OMAP4_ISS_MEM_LAST];
+ struct regmap *syscon;
u64 raw_dmamask;
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index 2d96fb3eca53..d7ff7698a067 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -828,17 +828,17 @@ static const struct iss_video_operations csi2_issvideo_ops = {
*/
static struct v4l2_mbus_framefmt *
-__csi2_get_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+__csi2_get_format(struct iss_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
return &csi2->formats[pad];
}
static void
-csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -868,7 +868,7 @@ csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
* compression.
*/
pixelcode = fmt->code;
- format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK, which);
+ format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK, which);
memcpy(fmt, format, sizeof(*fmt));
/*
@@ -889,12 +889,12 @@ csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
/*
* csi2_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg : V4L2 subdev pad config
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -907,8 +907,8 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csi2_input_fmts[code->index];
} else {
- format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK,
- V4L2_SUBDEV_FORMAT_TRY);
+ format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK,
+ code->which);
switch (code->index) {
case 0:
/* Passthrough sink pad code */
@@ -931,7 +931,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
}
static int csi2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -943,7 +943,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -953,7 +953,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -963,17 +963,17 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
/*
* csi2_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -984,29 +984,29 @@ static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* csi2_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- csi2_try_format(csi2, fh, fmt->pad, &fmt->format, fmt->which);
+ csi2_try_format(csi2, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CSI2_PAD_SINK) {
- format = __csi2_get_format(csi2, fh, CSI2_PAD_SOURCE,
+ format = __csi2_get_format(csi2, cfg, CSI2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- csi2_try_format(csi2, fh, CSI2_PAD_SOURCE, format, fmt->which);
+ csi2_try_format(csi2, cfg, CSI2_PAD_SOURCE, format, fmt->which);
}
return 0;
@@ -1048,7 +1048,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- csi2_set_format(sd, fh, &format);
+ csi2_set_format(sd, fh ? fh->pad : NULL, &format);
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.c b/drivers/staging/media/omap4iss/iss_csiphy.c
index 7c3d55d811ef..748607f8918f 100644
--- a/drivers/staging/media/omap4iss/iss_csiphy.c
+++ b/drivers/staging/media/omap4iss/iss_csiphy.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/regmap.h>
#include "../../../../arch/arm/mach-omap2/control.h"
@@ -140,9 +141,11 @@ int omap4iss_csiphy_config(struct iss_device *iss,
* - bit [18] : CSIPHY1 CTRLCLK enable
* - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
*/
- cam_rx_ctrl = omap4_ctrl_pad_readl(
- OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
-
+ /*
+ * TODO: When implementing DT support specify the CONTROL_CAMERA_RX
+ * register offset in the syscon property instead of hardcoding it.
+ */
+ regmap_read(iss->syscon, 0x68, &cam_rx_ctrl);
if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
@@ -166,8 +169,7 @@ int omap4iss_csiphy_config(struct iss_device *iss,
cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
}
- omap4_ctrl_pad_writel(cam_rx_ctrl,
- OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
+ regmap_write(iss->syscon, 0x68, cam_rx_ctrl);
/* Reset used lane count */
csi2->phy->used_data_lanes = 0;
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
index a1a46ef8319b..eaa82da30f50 100644
--- a/drivers/staging/media/omap4iss/iss_ipipe.c
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -24,7 +24,7 @@
#include "iss_ipipe.h"
static struct v4l2_mbus_framefmt *
-__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which);
static const unsigned int ipipe_fmts[] = {
@@ -176,11 +176,11 @@ static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
}
static struct v4l2_mbus_framefmt *
-__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&ipipe->subdev, cfg, pad);
return &ipipe->formats[pad];
}
@@ -188,12 +188,12 @@ __ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
/*
* ipipe_try_format - Try video format on a pad
* @ipipe: ISS IPIPE device
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @pad: Pad number
* @fmt: Format
*/
static void
-ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_pad_config *cfg,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -220,7 +220,7 @@ ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
break;
case IPIPE_PAD_SOURCE_VP:
- format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SINK, which);
+ format = __ipipe_get_format(ipipe, cfg, IPIPE_PAD_SINK, which);
memcpy(fmt, format, sizeof(*fmt));
fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
@@ -236,12 +236,12 @@ ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
/*
* ipipe_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg : V4L2 subdev pad config
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
@@ -268,7 +268,7 @@ static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
@@ -280,7 +280,7 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ipipe_try_format(ipipe, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -290,7 +290,7 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ipipe_try_format(ipipe, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -300,19 +300,19 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
/*
* ipipe_get_format - Retrieve the video format on a pad
* @sd : ISP IPIPE V4L2 subdevice
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+ format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -323,31 +323,31 @@ static int ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ipipe_set_format - Set the video format on a pad
* @sd : ISP IPIPE V4L2 subdevice
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+ format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ipipe_try_format(ipipe, fh, fmt->pad, &fmt->format, fmt->which);
+ ipipe_try_format(ipipe, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == IPIPE_PAD_SINK) {
- format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SOURCE_VP,
+ format = __ipipe_get_format(ipipe, cfg, IPIPE_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
- ipipe_try_format(ipipe, fh, IPIPE_PAD_SOURCE_VP, format,
+ ipipe_try_format(ipipe, cfg, IPIPE_PAD_SOURCE_VP, format,
fmt->which);
}
@@ -388,7 +388,7 @@ static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ipipe_set_format(sd, fh, &format);
+ ipipe_set_format(sd, fh ? fh->pad : NULL, &format);
return 0;
}
@@ -516,8 +516,6 @@ static int ipipe_init_entities(struct iss_ipipe_device *ipipe)
void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe)
{
- media_entity_cleanup(&ipipe->subdev.entity);
-
v4l2_device_unregister_subdev(&ipipe->subdev);
}
@@ -566,5 +564,7 @@ int omap4iss_ipipe_init(struct iss_device *iss)
*/
void omap4iss_ipipe_cleanup(struct iss_device *iss)
{
- /* FIXME: are you sure there's nothing to do? */
+ struct iss_ipipe_device *ipipe = &iss->ipipe;
+
+ media_entity_cleanup(&ipipe->subdev.entity);
}
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
index 3943fae699ee..530ac8426b5b 100644
--- a/drivers/staging/media/omap4iss/iss_ipipeif.c
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -361,24 +361,24 @@ static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
static struct v4l2_mbus_framefmt *
__ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&ipipeif->subdev, cfg, pad);
return &ipipeif->formats[pad];
}
/*
* ipipeif_try_format - Try video format on a pad
* @ipipeif: ISS IPIPEIF device
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @pad: Pad number
* @fmt: Format
*/
static void
ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -407,7 +407,7 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
break;
case IPIPEIF_PAD_SOURCE_ISIF_SF:
- format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+ format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
@@ -422,7 +422,7 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
break;
case IPIPEIF_PAD_SOURCE_VP:
- format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+ format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
@@ -441,12 +441,12 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
/*
* ipipeif_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg : V4L2 subdev pad config
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -466,8 +466,8 @@ static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
- V4L2_SUBDEV_FORMAT_TRY);
+ format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
+ code->which);
code->code = format->code;
break;
@@ -480,7 +480,7 @@ static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -492,8 +492,7 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ipipeif_try_format(ipipeif, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -503,8 +502,7 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ipipeif_try_format(ipipeif, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -514,19 +512,19 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
/*
* ipipeif_get_format - Retrieve the video format on a pad
* @sd : ISP IPIPEIF V4L2 subdevice
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+ format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -537,39 +535,39 @@ static int ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* ipipeif_set_format - Set the video format on a pad
* @sd : ISP IPIPEIF V4L2 subdevice
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+ format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ipipeif_try_format(ipipeif, fh, fmt->pad, &fmt->format, fmt->which);
+ ipipeif_try_format(ipipeif, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == IPIPEIF_PAD_SINK) {
- format = __ipipeif_get_format(ipipeif, fh,
+ format = __ipipeif_get_format(ipipeif, cfg,
IPIPEIF_PAD_SOURCE_ISIF_SF,
fmt->which);
*format = fmt->format;
- ipipeif_try_format(ipipeif, fh, IPIPEIF_PAD_SOURCE_ISIF_SF,
+ ipipeif_try_format(ipipeif, cfg, IPIPEIF_PAD_SOURCE_ISIF_SF,
format, fmt->which);
- format = __ipipeif_get_format(ipipeif, fh,
+ format = __ipipeif_get_format(ipipeif, cfg,
IPIPEIF_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
- ipipeif_try_format(ipipeif, fh, IPIPEIF_PAD_SOURCE_VP, format,
+ ipipeif_try_format(ipipeif, cfg, IPIPEIF_PAD_SOURCE_VP, format,
fmt->which);
}
@@ -612,7 +610,7 @@ static int ipipeif_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ipipeif_set_format(sd, fh, &format);
+ ipipeif_set_format(sd, fh ? fh->pad : NULL, &format);
return 0;
}
@@ -773,8 +771,6 @@ static int ipipeif_init_entities(struct iss_ipipeif_device *ipipeif)
void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif)
{
- media_entity_cleanup(&ipipeif->subdev.entity);
-
v4l2_device_unregister_subdev(&ipipeif->subdev);
omap4iss_video_unregister(&ipipeif->video_out);
}
@@ -828,5 +824,7 @@ int omap4iss_ipipeif_init(struct iss_device *iss)
*/
void omap4iss_ipipeif_cleanup(struct iss_device *iss)
{
- /* FIXME: are you sure there's nothing to do? */
+ struct iss_ipipeif_device *ipipeif = &iss->ipipeif;
+
+ media_entity_cleanup(&ipipeif->subdev.entity);
}
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
index 3ab972818f1b..5f69012c4deb 100644
--- a/drivers/staging/media/omap4iss/iss_resizer.c
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -420,24 +420,24 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
static struct v4l2_mbus_framefmt *
__resizer_get_format(struct iss_resizer_device *resizer,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(fh, pad);
+ return v4l2_subdev_get_try_format(&resizer->subdev, cfg, pad);
return &resizer->formats[pad];
}
/*
* resizer_try_format - Try video format on a pad
* @resizer: ISS RESIZER device
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @pad: Pad number
* @fmt: Format
*/
static void
resizer_try_format(struct iss_resizer_device *resizer,
- struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -465,7 +465,7 @@ resizer_try_format(struct iss_resizer_device *resizer,
case RESIZER_PAD_SOURCE_MEM:
pixelcode = fmt->code;
- format = __resizer_get_format(resizer, fh, RESIZER_PAD_SINK,
+ format = __resizer_get_format(resizer, cfg, RESIZER_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
@@ -492,12 +492,12 @@ resizer_try_format(struct iss_resizer_device *resizer,
/*
* resizer_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
@@ -512,8 +512,8 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
break;
case RESIZER_PAD_SOURCE_MEM:
- format = __resizer_get_format(resizer, fh, RESIZER_PAD_SINK,
- V4L2_SUBDEV_FORMAT_TRY);
+ format = __resizer_get_format(resizer, cfg, RESIZER_PAD_SINK,
+ code->which);
if (code->index == 0) {
code->code = format->code;
@@ -542,7 +542,7 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
}
static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
@@ -554,8 +554,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- resizer_try_format(resizer, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ resizer_try_format(resizer, cfg, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -565,8 +564,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- resizer_try_format(resizer, fh, fse->pad, &format,
- V4L2_SUBDEV_FORMAT_TRY);
+ resizer_try_format(resizer, cfg, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -576,19 +574,19 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
/*
* resizer_get_format - Retrieve the video format on a pad
* @sd : ISP RESIZER V4L2 subdevice
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(resizer, fh, fmt->pad, fmt->which);
+ format = __resizer_get_format(resizer, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -599,32 +597,32 @@ static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/*
* resizer_set_format - Set the video format on a pad
* @sd : ISP RESIZER V4L2 subdevice
- * @fh : V4L2 subdev file handle
+ * @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(resizer, fh, fmt->pad, fmt->which);
+ format = __resizer_get_format(resizer, cfg, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- resizer_try_format(resizer, fh, fmt->pad, &fmt->format, fmt->which);
+ resizer_try_format(resizer, cfg, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == RESIZER_PAD_SINK) {
- format = __resizer_get_format(resizer, fh,
+ format = __resizer_get_format(resizer, cfg,
RESIZER_PAD_SOURCE_MEM,
fmt->which);
*format = fmt->format;
- resizer_try_format(resizer, fh, RESIZER_PAD_SOURCE_MEM, format,
+ resizer_try_format(resizer, cfg, RESIZER_PAD_SOURCE_MEM, format,
fmt->which);
}
@@ -667,7 +665,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_UYVY8_1X16;
format.format.width = 4096;
format.format.height = 4096;
- resizer_set_format(sd, fh, &format);
+ resizer_set_format(sd, fh ? fh->pad : NULL, &format);
return 0;
}
@@ -817,8 +815,6 @@ static int resizer_init_entities(struct iss_resizer_device *resizer)
void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer)
{
- media_entity_cleanup(&resizer->subdev.entity);
-
v4l2_device_unregister_subdev(&resizer->subdev);
omap4iss_video_unregister(&resizer->video_out);
}
@@ -872,5 +868,7 @@ int omap4iss_resizer_init(struct iss_device *iss)
*/
void omap4iss_resizer_cleanup(struct iss_device *iss)
{
- /* FIXME: are you sure there's nothing to do? */
+ struct iss_resizer_device *resizer = &iss->resizer;
+
+ media_entity_cleanup(&resizer->subdev.entity);
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 55938cccde7f..85c54fedddda 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -171,14 +171,14 @@ static void iss_video_pix_to_mbus(const struct v4l2_pix_format *pix,
mbus->width = pix->width;
mbus->height = pix->height;
- for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ /* Skip the last format in the loop so that it will be selected if no
+ * match is found.
+ */
+ for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
if (formats[i].pixelformat == pix->pixelformat)
break;
}
- if (WARN_ON(i == ARRAY_SIZE(formats)))
- return;
-
mbus->code = formats[i].code;
mbus->colorspace = pix->colorspace;
mbus->field = pix->field;
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index b7a7854d3f7e..5b9ac1f6d6f0 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -274,6 +274,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
/* Build the PKO command */
pko_command.u64 = 0;
+#ifdef __LITTLE_ENDIAN
+ pko_command.s.le = 1;
+#endif
pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
pko_command.s.segs = 1;
pko_command.s.total_bytes = skb->len;
@@ -410,7 +413,7 @@ dont_put_skbuff_in_hw:
/* Check if we can use the hardware checksumming */
if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
(ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
- ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
+ ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == htons(1 << 14)))
&& ((ip_hdr(skb)->protocol == IPPROTO_TCP)
|| (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
/* Use hardware checksum calc */
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index f539d82f2f11..fbbe866485c7 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -170,6 +170,16 @@ static void cvm_oct_configure_common_hw(void)
cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+#ifdef __LITTLE_ENDIAN
+ {
+ union cvmx_ipd_ctl_status ipd_ctl_status;
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_status.s.pkt_lend = 1;
+ ipd_ctl_status.s.wqe_lend = 1;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+ }
+#endif
+
if (USE_RED)
cvmx_helper_setup_red(num_packet_buffers / 4,
num_packet_buffers / 8);
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 42fba3f5b593..cb0b6387789f 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -1900,23 +1900,20 @@ static int r871x_mp_ioctl_hdl(struct net_device *dev,
struct mp_ioctl_handler *phandler;
struct mp_ioctl_param *poidparam;
unsigned long BytesRead, BytesWritten, BytesNeeded;
- u8 *pparmbuf = NULL, bset;
+ u8 *pparmbuf, bset;
u16 len;
uint status;
int ret = 0;
- if ((!p->length) || (!p->pointer)) {
- ret = -EINVAL;
- goto _r871x_mp_ioctl_hdl_exit;
- }
+ if ((!p->length) || (!p->pointer))
+ return -EINVAL;
+
bset = (u8)(p->flags & 0xFFFF);
len = p->length;
- pparmbuf = NULL;
pparmbuf = memdup_user(p->pointer, len);
- if (IS_ERR(pparmbuf)) {
- ret = PTR_ERR(pparmbuf);
- goto _r871x_mp_ioctl_hdl_exit;
- }
+ if (IS_ERR(pparmbuf))
+ return PTR_ERR(pparmbuf);
+
poidparam = (struct mp_ioctl_param *)pparmbuf;
if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
ret = -EINVAL;
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index b139ed40cb27..bc95ce89af06 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -2574,6 +2574,7 @@ static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
};
static int rtw_cfg80211_add_monitor_if(struct rtw_adapter *padapter, char *name,
+ unsigned char name_assign_type,
struct net_device **ndev)
{
int ret = 0;
@@ -2606,6 +2607,7 @@ static int rtw_cfg80211_add_monitor_if(struct rtw_adapter *padapter, char *name,
mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
strncpy(mon_ndev->name, name, IFNAMSIZ);
mon_ndev->name[IFNAMSIZ - 1] = 0;
+ mon_ndev->name_assign_type = name_assign_type;
mon_ndev->destructor = rtw_ndev_destructor;
mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
@@ -2648,6 +2650,7 @@ out:
static struct wireless_dev *
cfg80211_rtw_add_virtual_intf(struct wiphy *wiphy, const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
@@ -2667,7 +2670,8 @@ cfg80211_rtw_add_virtual_intf(struct wiphy *wiphy, const char *name,
break;
case NL80211_IFTYPE_MONITOR:
ret =
- rtw_cfg80211_add_monitor_if(padapter, (char *)name, &ndev);
+ rtw_cfg80211_add_monitor_if(padapter, (char *)name,
+ name_assign_type, &ndev);
break;
case NL80211_IFTYPE_P2P_CLIENT:
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 3c7ea95dd9f9..dbbb2f879a29 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1250,7 +1250,7 @@ err_enable:
return -ENODEV;
}
-static void __exit lynxfb_pci_remove(struct pci_dev *pdev)
+static void lynxfb_pci_remove(struct pci_dev *pdev)
{
struct fb_info *info;
struct lynx_share *share;
diff --git a/drivers/staging/unisys/include/timskmod.h b/drivers/staging/unisys/include/timskmod.h
index 5a933d7bf39f..cde2494ad896 100644
--- a/drivers/staging/unisys/include/timskmod.h
+++ b/drivers/staging/unisys/include/timskmod.h
@@ -46,7 +46,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/fcntl.h>
-#include <linux/aio.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 1cdcf49b2445..e00c0605d154 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -362,12 +362,16 @@ bool CARDbSetPhyParameter(struct vnt_private *pDevice, u8 bb_type)
* Return Value: none
*/
bool CARDbUpdateTSF(struct vnt_private *pDevice, unsigned char byRxRate,
- u64 qwBSSTimestamp, u64 qwLocalTSF)
+ u64 qwBSSTimestamp)
{
+ u64 local_tsf;
u64 qwTSFOffset = 0;
- if (qwBSSTimestamp != qwLocalTSF) {
- qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, qwLocalTSF);
+ CARDbGetCurrentTSF(pDevice, &local_tsf);
+
+ if (qwBSSTimestamp != local_tsf) {
+ qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
+ local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST, (u32)qwTSFOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST + 4, (u32)(qwTSFOffset >> 32));
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 2dfc41952271..16cca49e680a 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -83,7 +83,7 @@ bool CARDbRadioPowerOff(struct vnt_private *);
bool CARDbRadioPowerOn(struct vnt_private *);
bool CARDbSetPhyParameter(struct vnt_private *, u8);
bool CARDbUpdateTSF(struct vnt_private *, unsigned char byRxRate,
- u64 qwBSSTimestamp, u64 qwLocalTSF);
+ u64 qwBSSTimestamp);
bool CARDbSetBeaconPeriod(struct vnt_private *, unsigned short wBeaconInterval);
#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 4bb4f8ee4132..0343ae386f03 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -912,7 +912,11 @@ static int vnt_int_report_rate(struct vnt_private *priv,
if (!(tsr1 & TSR1_TERR)) {
info->status.rates[0].idx = idx;
- info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ info->flags |= IEEE80211_TX_STAT_ACK;
}
return 0;
@@ -937,9 +941,6 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
/* Only the status of first TD in the chain is correct */
if (pTD->m_td1TD1.byTCR & TCR_STP) {
if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
-
- vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
-
if (!(byTsr1 & TSR1_TERR)) {
if (byTsr0 != 0) {
pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
@@ -958,6 +959,9 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
(int)uIdx, byTsr1, byTsr0);
}
}
+
+ vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
+
device_free_tx_buf(pDevice, pTD);
pDevice->iTDUsed[uIdx]--;
}
@@ -989,10 +993,8 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
skb->len, DMA_TO_DEVICE);
}
- if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
+ if (skb)
ieee80211_tx_status_irqsafe(pDevice->hw, skb);
- else
- dev_kfree_skb_irq(skb);
pTDInfo->skb_dma = 0;
pTDInfo->skb = NULL;
@@ -1204,14 +1206,6 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
if (dma_idx == TYPE_AC0DMA)
head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
- priv->iTDUsed[dma_idx]++;
-
- /* Take ownership */
- wmb();
- head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
-
- /* get Next */
- wmb();
priv->apCurrTD[dma_idx] = head_td->next;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1232,11 +1226,18 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
+ /* Poll Transmit the adapter */
+ wmb();
+ head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
+ wmb(); /* second memory barrier */
+
if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
MACvTransmitAC0(priv->PortOffset);
else
MACvTransmit0(priv->PortOffset);
+ priv->iTDUsed[dma_idx]++;
+
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
@@ -1416,9 +1417,16 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->current_aid = conf->aid;
- if (changed & BSS_CHANGED_BSSID)
+ if (changed & BSS_CHANGED_BSSID) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
if (changed & BSS_CHANGED_BASIC_RATES) {
priv->basic_rates = conf->basic_rates;
@@ -1477,7 +1485,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
if (conf->assoc) {
CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
- conf->sync_device_ts, conf->sync_tsf);
+ conf->sync_tsf);
CARDbSetBeaconPeriod(priv, conf->beacon_int);
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index f6c2cf8590c4..5c589962a1e8 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -805,10 +805,18 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
vnt_schedule_command(priv, WLAN_CMD_SETPOWER);
}
- if (current_rate > RATE_11M)
- pkt_type = priv->packet_type;
- else
+ if (current_rate > RATE_11M) {
+ if (info->band == IEEE80211_BAND_5GHZ) {
+ pkt_type = PK_TYPE_11A;
+ } else {
+ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
+ pkt_type = PK_TYPE_11GB;
+ else
+ pkt_type = PK_TYPE_11GA;
+ }
+ } else {
pkt_type = PK_TYPE_11B;
+ }
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 81d44c477a5b..257361280510 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,12 +31,13 @@ config TCM_PSCSI
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
-config TCM_USER
+config TCM_USER2
tristate "TCM/USER Subsystem Plugin for Linux"
depends on UIO && NET
help
Say Y here to enable the TCM/USER subsystem plugin for a userspace
- process to handle requests
+ process to handle requests. This is version 2 of the ABI; version 1
+ is obsolete.
source "drivers/target/loopback/Kconfig"
source "drivers/target/tcm_fc/Kconfig"
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index bbb4a7d638ef..e619c0266a79 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
-obj-$(CONFIG_TCM_USER) += target_core_user.o
+obj-$(CONFIG_TCM_USER2) += target_core_user.o
# Fabric modules
obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 13a92403fe3e..0f43be9c3453 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -1,6 +1,5 @@
iscsi_target_mod-y += iscsi_target_parameters.o \
iscsi_target_seq_pdu_list.o \
- iscsi_target_tq.o \
iscsi_target_auth.o \
iscsi_target_datain_values.o \
iscsi_target_device.o \
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 77d64251af40..34871a628b11 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -33,8 +33,6 @@
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h"
-#include "iscsi_target_tq.h"
-#include "iscsi_target_configfs.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
@@ -537,7 +535,7 @@ static struct iscsit_transport iscsi_target_transport = {
static int __init iscsi_target_init_module(void)
{
- int ret = 0;
+ int ret = 0, size;
pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
@@ -546,24 +544,21 @@ static int __init iscsi_target_init_module(void)
pr_err("Unable to allocate memory for iscsit_global\n");
return -1;
}
+ spin_lock_init(&iscsit_global->ts_bitmap_lock);
mutex_init(&auth_id_lock);
spin_lock_init(&sess_idr_lock);
idr_init(&tiqn_idr);
idr_init(&sess_idr);
- ret = iscsi_target_register_configfs();
- if (ret < 0)
+ ret = target_register_template(&iscsi_ops);
+ if (ret)
goto out;
- ret = iscsi_thread_set_init();
- if (ret < 0)
+ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
+ iscsit_global->ts_bitmap = vzalloc(size);
+ if (!iscsit_global->ts_bitmap) {
+ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
goto configfs_out;
-
- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
- TARGET_THREAD_SET_COUNT) {
- pr_err("iscsi_allocate_thread_sets() returned"
- " unexpected value!\n");
- goto ts_out1;
}
lio_qr_cache = kmem_cache_create("lio_qr_cache",
@@ -572,7 +567,7 @@ static int __init iscsi_target_init_module(void)
if (!lio_qr_cache) {
pr_err("nable to kmem_cache_create() for"
" lio_qr_cache\n");
- goto ts_out2;
+ goto bitmap_out;
}
lio_dr_cache = kmem_cache_create("lio_dr_cache",
@@ -617,12 +612,13 @@ dr_out:
kmem_cache_destroy(lio_dr_cache);
qr_out:
kmem_cache_destroy(lio_qr_cache);
-ts_out2:
- iscsi_deallocate_thread_sets();
-ts_out1:
- iscsi_thread_set_free();
+bitmap_out:
+ vfree(iscsit_global->ts_bitmap);
configfs_out:
- iscsi_target_deregister_configfs();
+ /* XXX: this probably wants it to be it's own unwind step.. */
+ if (iscsit_global->discovery_tpg)
+ iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+ target_unregister_template(&iscsi_ops);
out:
kfree(iscsit_global);
return -ENOMEM;
@@ -630,8 +626,6 @@ out:
static void __exit iscsi_target_cleanup_module(void)
{
- iscsi_deallocate_thread_sets();
- iscsi_thread_set_free();
iscsit_release_discovery_tpg();
iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_qr_cache);
@@ -639,8 +633,15 @@ static void __exit iscsi_target_cleanup_module(void)
kmem_cache_destroy(lio_ooo_cache);
kmem_cache_destroy(lio_r2t_cache);
- iscsi_target_deregister_configfs();
+ /*
+ * Shutdown discovery sessions and disable discovery TPG
+ */
+ if (iscsit_global->discovery_tpg)
+ iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+ target_unregister_template(&iscsi_ops);
+
+ vfree(iscsit_global->ts_bitmap);
kfree(iscsit_global);
}
@@ -990,7 +991,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
- transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops,
+ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
conn->sess->se_sess, be32_to_cpu(hdr->data_length),
cmd->data_direction, sam_task_attr,
cmd->sense_buffer + 2);
@@ -1805,8 +1806,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
u8 tcm_function;
int ret;
- transport_init_se_cmd(&cmd->se_cmd,
- &lio_target_fabric_configfs->tf_ops,
+ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
@@ -2155,7 +2155,6 @@ reject:
cmd->text_in_ptr = NULL;
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
-EXPORT_SYMBOL(iscsit_handle_text_cmd);
int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
@@ -3715,17 +3714,16 @@ static int iscsit_send_reject(
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
{
- struct iscsi_thread_set *ts = conn->thread_set;
int ord, cpu;
/*
- * thread_id is assigned from iscsit_global->ts_bitmap from
- * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
+ * bitmap_id is assigned from iscsit_global->ts_bitmap from
+ * within iscsit_start_kthreads()
*
- * Here we use thread_id to determine which CPU that this
- * iSCSI connection's iscsi_thread_set will be scheduled to
+ * Here we use bitmap_id to determine which CPU that this
+ * iSCSI connection's RX/TX threads will be scheduled to
* execute upon.
*/
- ord = ts->thread_id % cpumask_weight(cpu_online_mask);
+ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
for_each_online_cpu(cpu) {
if (ord-- == 0) {
cpumask_set_cpu(cpu, conn->conn_cpumask);
@@ -3914,7 +3912,7 @@ check_rsp_state:
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
if (!iscsit_logout_post_handler(cmd, conn))
- goto restart;
+ return -ECONNRESET;
/* fall through */
case ISTATE_SEND_STATUS:
case ISTATE_SEND_ASYNCMSG:
@@ -3942,8 +3940,6 @@ check_rsp_state:
err:
return -1;
-restart:
- return -EAGAIN;
}
static int iscsit_handle_response_queue(struct iscsi_conn *conn)
@@ -3970,21 +3966,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
int iscsi_target_tx_thread(void *arg)
{
int ret = 0;
- struct iscsi_conn *conn;
- struct iscsi_thread_set *ts = arg;
+ struct iscsi_conn *conn = arg;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
*/
allow_signal(SIGINT);
-restart:
- conn = iscsi_tx_thread_pre_handler(ts);
- if (!conn)
- goto out;
-
- ret = 0;
-
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@@ -3993,11 +3981,9 @@ restart:
iscsit_thread_check_cpumask(conn, current, 1);
wait_event_interruptible(conn->queues_wq,
- !iscsit_conn_all_queues_empty(conn) ||
- ts->status == ISCSI_THREAD_SET_RESET);
+ !iscsit_conn_all_queues_empty(conn));
- if ((ts->status == ISCSI_THREAD_SET_RESET) ||
- signal_pending(current))
+ if (signal_pending(current))
goto transport_err;
get_immediate:
@@ -4008,15 +3994,14 @@ get_immediate:
ret = iscsit_handle_response_queue(conn);
if (ret == 1)
goto get_immediate;
- else if (ret == -EAGAIN)
- goto restart;
+ else if (ret == -ECONNRESET)
+ goto out;
else if (ret < 0)
goto transport_err;
}
transport_err:
iscsit_take_action_for_connection_exit(conn);
- goto restart;
out:
return 0;
}
@@ -4111,8 +4096,7 @@ int iscsi_target_rx_thread(void *arg)
int ret;
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
- struct iscsi_conn *conn = NULL;
- struct iscsi_thread_set *ts = arg;
+ struct iscsi_conn *conn = arg;
struct kvec iov;
/*
* Allow ourselves to be interrupted by SIGINT so that a
@@ -4120,11 +4104,6 @@ int iscsi_target_rx_thread(void *arg)
*/
allow_signal(SIGINT);
-restart:
- conn = iscsi_rx_thread_pre_handler(ts);
- if (!conn)
- goto out;
-
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
struct completion comp;
int rc;
@@ -4134,7 +4113,7 @@ restart:
if (rc < 0)
goto transport_err;
- goto out;
+ goto transport_err;
}
while (!kthread_should_stop()) {
@@ -4210,8 +4189,6 @@ transport_err:
if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1);
iscsit_take_action_for_connection_exit(conn);
- goto restart;
-out:
return 0;
}
@@ -4273,7 +4250,24 @@ int iscsit_close_connection(
if (conn->conn_transport->transport_type == ISCSI_TCP)
complete(&conn->conn_logout_comp);
- iscsi_release_thread_set(conn);
+ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
+ if (conn->tx_thread &&
+ cmpxchg(&conn->tx_thread_active, true, false)) {
+ send_sig(SIGINT, conn->tx_thread, 1);
+ kthread_stop(conn->tx_thread);
+ }
+ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
+ if (conn->rx_thread &&
+ cmpxchg(&conn->rx_thread_active, true, false)) {
+ send_sig(SIGINT, conn->rx_thread, 1);
+ kthread_stop(conn->rx_thread);
+ }
+ }
+
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
@@ -4383,8 +4377,6 @@ int iscsit_close_connection(
iscsit_put_transport(conn->conn_transport);
- conn->thread_set = NULL;
-
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
conn->conn_state = TARG_CONN_STATE_FREE;
kfree(conn);
@@ -4551,15 +4543,13 @@ static void iscsit_logout_post_handler_closesession(
struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
-
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
iscsit_dec_conn_usage_count(conn);
- iscsit_stop_session(sess, 1, 1);
+ iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
target_put_session(sess->se_sess);
}
@@ -4567,13 +4557,12 @@ static void iscsit_logout_post_handler_closesession(
static void iscsit_logout_post_handler_samecid(
struct iscsi_conn *conn)
{
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
- iscsit_cause_connection_reinstatement(conn, 1);
+ iscsit_cause_connection_reinstatement(conn, sleep);
iscsit_dec_conn_usage_count(conn);
}
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index e936d56fb523..7d0f9c00d9c2 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -35,7 +35,7 @@ extern void iscsit_stop_session(struct iscsi_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
extern struct iscsit_global *iscsit_global;
-extern struct target_fabric_configfs *lio_target_fabric_configfs;
+extern const struct target_core_fabric_ops iscsi_ops;
extern struct kmem_cache *lio_dr_cache;
extern struct kmem_cache *lio_ooo_cache;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 48384b675e62..469fce44ebad 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -37,9 +37,6 @@
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include <target/iscsi/iscsi_target_stat.h>
-#include "iscsi_target_configfs.h"
-
-struct target_fabric_configfs *lio_target_fabric_configfs;
struct lio_target_configfs_attribute {
struct configfs_attribute attr;
@@ -1052,6 +1049,11 @@ TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
*/
DEF_TPG_ATTRIB(t10_pi);
TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_fabric_prot_type
+ */
+DEF_TPG_ATTRIB(fabric_prot_type);
+TPG_ATTR(fabric_prot_type, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_authentication.attr,
@@ -1065,6 +1067,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_demo_mode_discovery.attr,
&iscsi_tpg_attrib_default_erl.attr,
&iscsi_tpg_attrib_t10_pi.attr,
+ &iscsi_tpg_attrib_fabric_prot_type.attr,
NULL,
};
@@ -1410,8 +1413,18 @@ out:
TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
+static ssize_t lio_target_tpg_show_dynamic_sessions(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return target_show_dynamic_sessions(se_tpg, page);
+}
+
+TF_TPG_BASE_ATTR_RO(lio_target, dynamic_sessions);
+
static struct configfs_attribute *lio_target_tpg_attrs[] = {
&lio_target_tpg_enable.attr,
+ &lio_target_tpg_dynamic_sessions.attr,
NULL,
};
@@ -1450,10 +1463,8 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
if (!tpg)
return NULL;
- ret = core_tpg_register(
- &lio_target_fabric_configfs->tf_ops,
- wwn, &tpg->tpg_se_tpg, tpg,
- TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(&iscsi_ops, wwn, &tpg->tpg_se_tpg,
+ tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return NULL;
@@ -1872,6 +1883,20 @@ static int lio_tpg_check_prod_mode_write_protect(
return tpg->tpg_attrib.prod_mode_write_protect;
}
+static int lio_tpg_check_prot_fabric_only(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+ /*
+ * Only report fabric_prot_type if t10_pi has also been enabled
+ * for incoming ib_isert sessions.
+ */
+ if (!tpg->tpg_attrib.t10_pi)
+ return 0;
+
+ return tpg->tpg_attrib.fabric_prot_type;
+}
+
static void lio_tpg_release_fabric_acl(
struct se_portal_group *se_tpg,
struct se_node_acl *se_acl)
@@ -1953,115 +1978,60 @@ static void lio_release_cmd(struct se_cmd *se_cmd)
iscsit_release_cmd(cmd);
}
-/* End functions for target_core_fabric_ops */
-
-int iscsi_target_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- lio_target_fabric_configfs = NULL;
- fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() for"
- " LIO-Target failed!\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup the fabric API of function pointers used by target_core_mod..
- */
- fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
- fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
- fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
- fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
- fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
- fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
- fabric->tf_ops.tpg_get_pr_transport_id_len =
- &iscsi_get_pr_transport_id_len;
- fabric->tf_ops.tpg_parse_pr_out_transport_id =
- &iscsi_parse_pr_out_transport_id;
- fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
- fabric->tf_ops.tpg_check_demo_mode_cache =
- &lio_tpg_check_demo_mode_cache;
- fabric->tf_ops.tpg_check_demo_mode_write_protect =
- &lio_tpg_check_demo_mode_write_protect;
- fabric->tf_ops.tpg_check_prod_mode_write_protect =
- &lio_tpg_check_prod_mode_write_protect;
- fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
- fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
- fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
- fabric->tf_ops.check_stop_free = &lio_check_stop_free,
- fabric->tf_ops.release_cmd = &lio_release_cmd;
- fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
- fabric->tf_ops.close_session = &lio_tpg_close_session;
- fabric->tf_ops.sess_get_index = &lio_sess_get_index;
- fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
- fabric->tf_ops.write_pending = &lio_write_pending;
- fabric->tf_ops.write_pending_status = &lio_write_pending_status;
- fabric->tf_ops.set_default_node_attributes =
- &lio_set_default_node_attributes;
- fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
- fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
- fabric->tf_ops.queue_data_in = &lio_queue_data_in;
- fabric->tf_ops.queue_status = &lio_queue_status;
- fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
- fabric->tf_ops.aborted_task = &lio_aborted_task;
- /*
- * Setup function pointers for generic logic in target_core_fabric_configfs.c
- */
- fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
- fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
- fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
- fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
- fabric->tf_ops.fabric_post_link = NULL;
- fabric->tf_ops.fabric_pre_unlink = NULL;
- fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
- fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
- fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
- fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- * sturct config_item_type's
- */
- fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
-
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() for"
- " LIO-Target failed!\n");
- target_fabric_configfs_free(fabric);
- return ret;
- }
-
- lio_target_fabric_configfs = fabric;
- pr_debug("LIO_TARGET[0] - Set fabric ->"
- " lio_target_fabric_configfs\n");
- return 0;
-}
-
-
-void iscsi_target_deregister_configfs(void)
-{
- if (!lio_target_fabric_configfs)
- return;
- /*
- * Shutdown discovery sessions and disable discovery TPG
- */
- if (iscsit_global->discovery_tpg)
- iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
-
- target_fabric_configfs_deregister(lio_target_fabric_configfs);
- lio_target_fabric_configfs = NULL;
- pr_debug("LIO_TARGET[0] - Cleared"
- " lio_target_fabric_configfs\n");
-}
+const struct target_core_fabric_ops iscsi_ops = {
+ .module = THIS_MODULE,
+ .name = "iscsi",
+ .get_fabric_name = iscsi_get_fabric_name,
+ .get_fabric_proto_ident = iscsi_get_fabric_proto_ident,
+ .tpg_get_wwn = lio_tpg_get_endpoint_wwn,
+ .tpg_get_tag = lio_tpg_get_tag,
+ .tpg_get_default_depth = lio_tpg_get_default_depth,
+ .tpg_get_pr_transport_id = iscsi_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = iscsi_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = iscsi_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = lio_tpg_check_demo_mode,
+ .tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ lio_tpg_check_demo_mode_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ lio_tpg_check_prod_mode_write_protect,
+ .tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only,
+ .tpg_alloc_fabric_acl = lio_tpg_alloc_fabric_acl,
+ .tpg_release_fabric_acl = lio_tpg_release_fabric_acl,
+ .tpg_get_inst_index = lio_tpg_get_inst_index,
+ .check_stop_free = lio_check_stop_free,
+ .release_cmd = lio_release_cmd,
+ .shutdown_session = lio_tpg_shutdown_session,
+ .close_session = lio_tpg_close_session,
+ .sess_get_index = lio_sess_get_index,
+ .sess_get_initiator_sid = lio_sess_get_initiator_sid,
+ .write_pending = lio_write_pending,
+ .write_pending_status = lio_write_pending_status,
+ .set_default_node_attributes = lio_set_default_node_attributes,
+ .get_task_tag = iscsi_get_task_tag,
+ .get_cmd_state = iscsi_get_cmd_state,
+ .queue_data_in = lio_queue_data_in,
+ .queue_status = lio_queue_status,
+ .queue_tm_rsp = lio_queue_tm_rsp,
+ .aborted_task = lio_aborted_task,
+ .fabric_make_wwn = lio_target_call_coreaddtiqn,
+ .fabric_drop_wwn = lio_target_call_coredeltiqn,
+ .fabric_make_tpg = lio_target_tiqn_addtpg,
+ .fabric_drop_tpg = lio_target_tiqn_deltpg,
+ .fabric_make_np = lio_target_call_addnptotpg,
+ .fabric_drop_np = lio_target_call_delnpfromtpg,
+ .fabric_make_nodeacl = lio_target_make_nodeacl,
+ .fabric_drop_nodeacl = lio_target_drop_nodeacl,
+
+ .tfc_discovery_attrs = lio_target_discovery_auth_attrs,
+ .tfc_wwn_attrs = lio_target_wwn_attrs,
+ .tfc_tpg_base_attrs = lio_target_tpg_attrs,
+ .tfc_tpg_attrib_attrs = lio_target_tpg_attrib_attrs,
+ .tfc_tpg_auth_attrs = lio_target_tpg_auth_attrs,
+ .tfc_tpg_param_attrs = lio_target_tpg_param_attrs,
+ .tfc_tpg_np_base_attrs = lio_target_portal_attrs,
+ .tfc_tpg_nacl_base_attrs = lio_target_initiator_attrs,
+ .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs,
+ .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs,
+ .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs,
+};
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h
deleted file mode 100644
index 8cd5a63c4edc..000000000000
--- a/drivers/target/iscsi/iscsi_target_configfs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef ISCSI_TARGET_CONFIGFS_H
-#define ISCSI_TARGET_CONFIGFS_H
-
-extern int iscsi_target_register_configfs(void);
-extern void iscsi_target_deregister_configfs(void);
-
-#endif /* ISCSI_TARGET_CONFIGFS_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index bdd8731a4daa..959a14c9dd5d 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -23,7 +23,6 @@
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
-#include "iscsi_target_tq.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
@@ -860,7 +859,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
}
spin_unlock_bh(&conn->state_lock);
- iscsi_thread_set_force_reinstatement(conn);
+ if (conn->tx_thread && conn->tx_thread_active)
+ send_sig(SIGINT, conn->tx_thread, 1);
+ if (conn->rx_thread && conn->rx_thread_active)
+ send_sig(SIGINT, conn->rx_thread, 1);
sleep:
wait_for_completion(&conn->conn_wait_rcfr_comp);
@@ -885,10 +887,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
return;
}
- if (iscsi_thread_set_force_reinstatement(conn) < 0) {
- spin_unlock_bh(&conn->state_lock);
- return;
- }
+ if (conn->tx_thread && conn->tx_thread_active)
+ send_sig(SIGINT, conn->tx_thread, 1);
+ if (conn->rx_thread && conn->rx_thread_active)
+ send_sig(SIGINT, conn->rx_thread, 1);
atomic_set(&conn->connection_reinstatement, 1);
if (!sleep) {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 153fb66ac1b8..8ce94ff744e6 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -26,7 +26,6 @@
#include <target/iscsi/iscsi_target_core.h>
#include <target/iscsi/iscsi_target_stat.h>
-#include "iscsi_target_tq.h"
#include "iscsi_target_device.h"
#include "iscsi_target_nego.h"
#include "iscsi_target_erl0.h"
@@ -699,6 +698,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
iscsit_start_nopin_timer(conn);
}
+static int iscsit_start_kthreads(struct iscsi_conn *conn)
+{
+ int ret = 0;
+
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+ ISCSIT_BITMAP_BITS, get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+
+ if (conn->bitmap_id < 0) {
+ pr_err("bitmap_find_free_region() failed for"
+ " iscsit_start_kthreads()\n");
+ return -ENOMEM;
+ }
+
+ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
+ "%s", ISCSI_TX_THREAD_NAME);
+ if (IS_ERR(conn->tx_thread)) {
+ pr_err("Unable to start iscsi_target_tx_thread\n");
+ ret = PTR_ERR(conn->tx_thread);
+ goto out_bitmap;
+ }
+ conn->tx_thread_active = true;
+
+ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
+ "%s", ISCSI_RX_THREAD_NAME);
+ if (IS_ERR(conn->rx_thread)) {
+ pr_err("Unable to start iscsi_target_rx_thread\n");
+ ret = PTR_ERR(conn->rx_thread);
+ goto out_tx;
+ }
+ conn->rx_thread_active = true;
+
+ return 0;
+out_tx:
+ kthread_stop(conn->tx_thread);
+ conn->tx_thread_active = false;
+out_bitmap:
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+ return ret;
+}
+
int iscsi_post_login_handler(
struct iscsi_np *np,
struct iscsi_conn *conn,
@@ -709,7 +753,7 @@ int iscsi_post_login_handler(
struct se_session *se_sess = sess->se_sess;
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
- struct iscsi_thread_set *ts;
+ int rc;
iscsit_inc_conn_usage_count(conn);
@@ -724,7 +768,6 @@ int iscsi_post_login_handler(
/*
* SCSI Initiator -> SCSI Target Port Mapping
*/
- ts = iscsi_get_thread_set();
if (!zero_tsih) {
iscsi_set_session_parameters(sess->sess_ops,
conn->param_list, 0);
@@ -751,9 +794,11 @@ int iscsi_post_login_handler(
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
- iscsi_post_login_start_timers(conn);
+ rc = iscsit_start_kthreads(conn);
+ if (rc)
+ return rc;
- iscsi_activate_thread_set(conn, ts);
+ iscsi_post_login_start_timers(conn);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
@@ -810,8 +855,11 @@ int iscsi_post_login_handler(
" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
spin_unlock_bh(&se_tpg->session_lock);
+ rc = iscsit_start_kthreads(conn);
+ if (rc)
+ return rc;
+
iscsi_post_login_start_timers(conn);
- iscsi_activate_thread_set(conn, ts);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index bdd127c0e3ae..e8a240818353 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -68,10 +68,8 @@ int iscsit_load_discovery_tpg(void)
return -1;
}
- ret = core_tpg_register(
- &lio_target_fabric_configfs->tf_ops,
- NULL, &tpg->tpg_se_tpg, tpg,
- TRANSPORT_TPG_TYPE_DISCOVERY);
+ ret = core_tpg_register(&iscsi_ops, NULL, &tpg->tpg_se_tpg,
+ tpg, TRANSPORT_TPG_TYPE_DISCOVERY);
if (ret < 0) {
kfree(tpg);
return -1;
@@ -228,6 +226,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
a->default_erl = TA_DEFAULT_ERL;
a->t10_pi = TA_DEFAULT_T10_PI;
+ a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -878,3 +877,21 @@ int iscsit_ta_t10_pi(
return 0;
}
+
+int iscsit_ta_fabric_prot_type(
+ struct iscsi_portal_group *tpg,
+ u32 prot_type)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((prot_type != 0) && (prot_type != 1) && (prot_type != 3)) {
+ pr_err("Illegal value for fabric_prot_type: %u\n", prot_type);
+ return -EINVAL;
+ }
+
+ a->fabric_prot_type = prot_type;
+ pr_debug("iSCSI_TPG[%hu] - T10 Fabric Protection Type: %u\n",
+ tpg->tpgt, prot_type);
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index e7265337bc43..95ff5bdecd71 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@ extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
deleted file mode 100644
index 26aa50996473..000000000000
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ /dev/null
@@ -1,495 +0,0 @@
-/*******************************************************************************
- * This file contains the iSCSI Login Thread and Thread Queue functions.
- *
- * (c) Copyright 2007-2013 Datera, Inc.
- *
- * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- ******************************************************************************/
-
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/bitmap.h>
-
-#include <target/iscsi/iscsi_target_core.h>
-#include "iscsi_target_tq.h"
-#include "iscsi_target.h"
-
-static LIST_HEAD(inactive_ts_list);
-static DEFINE_SPINLOCK(inactive_ts_lock);
-static DEFINE_SPINLOCK(ts_bitmap_lock);
-
-static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
-{
- if (!list_empty(&ts->ts_list)) {
- WARN_ON(1);
- return;
- }
- spin_lock(&inactive_ts_lock);
- list_add_tail(&ts->ts_list, &inactive_ts_list);
- iscsit_global->inactive_ts++;
- spin_unlock(&inactive_ts_lock);
-}
-
-static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
-{
- struct iscsi_thread_set *ts;
-
- spin_lock(&inactive_ts_lock);
- if (list_empty(&inactive_ts_list)) {
- spin_unlock(&inactive_ts_lock);
- return NULL;
- }
-
- ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
-
- list_del_init(&ts->ts_list);
- iscsit_global->inactive_ts--;
- spin_unlock(&inactive_ts_lock);
-
- return ts;
-}
-
-int iscsi_allocate_thread_sets(u32 thread_pair_count)
-{
- int allocated_thread_pair_count = 0, i, thread_id;
- struct iscsi_thread_set *ts = NULL;
-
- for (i = 0; i < thread_pair_count; i++) {
- ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
- if (!ts) {
- pr_err("Unable to allocate memory for"
- " thread set.\n");
- return allocated_thread_pair_count;
- }
- /*
- * Locate the next available regision in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
- iscsit_global->ts_bitmap_count, get_order(1));
- spin_unlock(&ts_bitmap_lock);
- if (thread_id < 0) {
- pr_err("bitmap_find_free_region() failed for"
- " thread_set_bitmap\n");
- kfree(ts);
- return allocated_thread_pair_count;
- }
-
- ts->thread_id = thread_id;
- ts->status = ISCSI_THREAD_SET_FREE;
- INIT_LIST_HEAD(&ts->ts_list);
- spin_lock_init(&ts->ts_state_lock);
- init_completion(&ts->rx_restart_comp);
- init_completion(&ts->tx_restart_comp);
- init_completion(&ts->rx_start_comp);
- init_completion(&ts->tx_start_comp);
- sema_init(&ts->ts_activate_sem, 0);
-
- ts->create_threads = 1;
- ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
- ISCSI_TX_THREAD_NAME);
- if (IS_ERR(ts->tx_thread)) {
- dump_stack();
- pr_err("Unable to start iscsi_target_tx_thread\n");
- break;
- }
-
- ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
- ISCSI_RX_THREAD_NAME);
- if (IS_ERR(ts->rx_thread)) {
- kthread_stop(ts->tx_thread);
- pr_err("Unable to start iscsi_target_rx_thread\n");
- break;
- }
- ts->create_threads = 0;
-
- iscsi_add_ts_to_inactive_list(ts);
- allocated_thread_pair_count++;
- }
-
- pr_debug("Spawned %d thread set(s) (%d total threads).\n",
- allocated_thread_pair_count, allocated_thread_pair_count * 2);
- return allocated_thread_pair_count;
-}
-
-static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts)
-{
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_DIE;
-
- if (ts->rx_thread) {
- complete(&ts->rx_start_comp);
- spin_unlock_bh(&ts->ts_state_lock);
- kthread_stop(ts->rx_thread);
- spin_lock_bh(&ts->ts_state_lock);
- }
- if (ts->tx_thread) {
- complete(&ts->tx_start_comp);
- spin_unlock_bh(&ts->ts_state_lock);
- kthread_stop(ts->tx_thread);
- spin_lock_bh(&ts->ts_state_lock);
- }
- spin_unlock_bh(&ts->ts_state_lock);
- /*
- * Release this thread_id in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- bitmap_release_region(iscsit_global->ts_bitmap,
- ts->thread_id, get_order(1));
- spin_unlock(&ts_bitmap_lock);
-
- kfree(ts);
-}
-
-void iscsi_deallocate_thread_sets(void)
-{
- struct iscsi_thread_set *ts = NULL;
- u32 released_count = 0;
-
- while ((ts = iscsi_get_ts_from_inactive_list())) {
-
- iscsi_deallocate_thread_one(ts);
- released_count++;
- }
-
- if (released_count)
- pr_debug("Stopped %d thread set(s) (%d total threads)."
- "\n", released_count, released_count * 2);
-}
-
-static void iscsi_deallocate_extra_thread_sets(void)
-{
- u32 orig_count, released_count = 0;
- struct iscsi_thread_set *ts = NULL;
-
- orig_count = TARGET_THREAD_SET_COUNT;
-
- while ((iscsit_global->inactive_ts + 1) > orig_count) {
- ts = iscsi_get_ts_from_inactive_list();
- if (!ts)
- break;
-
- iscsi_deallocate_thread_one(ts);
- released_count++;
- }
-
- if (released_count)
- pr_debug("Stopped %d thread set(s) (%d total threads)."
- "\n", released_count, released_count * 2);
-}
-
-void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
-{
- spin_lock_bh(&ts->ts_state_lock);
- conn->thread_set = ts;
- ts->conn = conn;
- ts->status = ISCSI_THREAD_SET_ACTIVE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- complete(&ts->rx_start_comp);
- complete(&ts->tx_start_comp);
-
- down(&ts->ts_activate_sem);
-}
-
-struct iscsi_thread_set *iscsi_get_thread_set(void)
-{
- struct iscsi_thread_set *ts;
-
-get_set:
- ts = iscsi_get_ts_from_inactive_list();
- if (!ts) {
- iscsi_allocate_thread_sets(1);
- goto get_set;
- }
-
- ts->delay_inactive = 1;
- ts->signal_sent = 0;
- ts->thread_count = 2;
- init_completion(&ts->rx_restart_comp);
- init_completion(&ts->tx_restart_comp);
- sema_init(&ts->ts_activate_sem, 0);
-
- return ts;
-}
-
-void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
-{
- struct iscsi_thread_set *ts = NULL;
-
- if (!conn->thread_set) {
- pr_err("struct iscsi_conn->thread_set is NULL\n");
- return;
- }
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->thread_clear &= ~thread_clear;
-
- if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
- (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
- complete(&ts->rx_restart_comp);
- else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
- (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
- complete(&ts->tx_restart_comp);
- spin_unlock_bh(&ts->ts_state_lock);
-}
-
-void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
-{
- struct iscsi_thread_set *ts = NULL;
-
- if (!conn->thread_set) {
- pr_err("struct iscsi_conn->thread_set is NULL\n");
- return;
- }
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->signal_sent |= signal_sent;
- spin_unlock_bh(&ts->ts_state_lock);
-}
-
-int iscsi_release_thread_set(struct iscsi_conn *conn)
-{
- int thread_called = 0;
- struct iscsi_thread_set *ts = NULL;
-
- if (!conn || !conn->thread_set) {
- pr_err("connection or thread set pointer is NULL\n");
- BUG();
- }
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_RESET;
-
- if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
- strlen(ISCSI_RX_THREAD_NAME)))
- thread_called = ISCSI_RX_THREAD;
- else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
- strlen(ISCSI_TX_THREAD_NAME)))
- thread_called = ISCSI_TX_THREAD;
-
- if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
- (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
-
- if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
- send_sig(SIGINT, ts->rx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
- }
- ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
- wait_for_completion(&ts->rx_restart_comp);
- spin_lock_bh(&ts->ts_state_lock);
- ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
- }
- if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
- (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
-
- if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
- send_sig(SIGINT, ts->tx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
- }
- ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
- wait_for_completion(&ts->tx_restart_comp);
- spin_lock_bh(&ts->ts_state_lock);
- ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
- }
-
- ts->conn = NULL;
- ts->status = ISCSI_THREAD_SET_FREE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- return 0;
-}
-
-int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
-{
- struct iscsi_thread_set *ts;
-
- if (!conn->thread_set)
- return -1;
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
- spin_unlock_bh(&ts->ts_state_lock);
- return -1;
- }
-
- if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
- send_sig(SIGINT, ts->tx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
- }
- if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
- send_sig(SIGINT, ts->rx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
- }
- spin_unlock_bh(&ts->ts_state_lock);
-
- return 0;
-}
-
-static void iscsi_check_to_add_additional_sets(void)
-{
- int thread_sets_add;
-
- spin_lock(&inactive_ts_lock);
- thread_sets_add = iscsit_global->inactive_ts;
- spin_unlock(&inactive_ts_lock);
- if (thread_sets_add == 1)
- iscsi_allocate_thread_sets(1);
-}
-
-static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
-{
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() ||
- signal_pending(current)) {
- spin_unlock_bh(&ts->ts_state_lock);
- return -1;
- }
- spin_unlock_bh(&ts->ts_state_lock);
-
- return 0;
-}
-
-struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
-{
- int ret;
-
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->create_threads) {
- spin_unlock_bh(&ts->ts_state_lock);
- goto sleep;
- }
-
- if (ts->status != ISCSI_THREAD_SET_DIE)
- flush_signals(current);
-
- if (ts->delay_inactive && (--ts->thread_count == 0)) {
- spin_unlock_bh(&ts->ts_state_lock);
-
- if (!iscsit_global->in_shutdown)
- iscsi_deallocate_extra_thread_sets();
-
- iscsi_add_ts_to_inactive_list(ts);
- spin_lock_bh(&ts->ts_state_lock);
- }
-
- if ((ts->status == ISCSI_THREAD_SET_RESET) &&
- (ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
- complete(&ts->rx_restart_comp);
-
- ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-sleep:
- ret = wait_for_completion_interruptible(&ts->rx_start_comp);
- if (ret != 0)
- return NULL;
-
- if (iscsi_signal_thread_pre_handler(ts) < 0)
- return NULL;
-
- iscsi_check_to_add_additional_sets();
-
- spin_lock_bh(&ts->ts_state_lock);
- if (!ts->conn) {
- pr_err("struct iscsi_thread_set->conn is NULL for"
- " RX thread_id: %s/%d\n", current->comm, current->pid);
- spin_unlock_bh(&ts->ts_state_lock);
- return NULL;
- }
- ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-
- up(&ts->ts_activate_sem);
-
- return ts->conn;
-}
-
-struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
-{
- int ret;
-
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->create_threads) {
- spin_unlock_bh(&ts->ts_state_lock);
- goto sleep;
- }
-
- if (ts->status != ISCSI_THREAD_SET_DIE)
- flush_signals(current);
-
- if (ts->delay_inactive && (--ts->thread_count == 0)) {
- spin_unlock_bh(&ts->ts_state_lock);
-
- if (!iscsit_global->in_shutdown)
- iscsi_deallocate_extra_thread_sets();
-
- iscsi_add_ts_to_inactive_list(ts);
- spin_lock_bh(&ts->ts_state_lock);
- }
- if ((ts->status == ISCSI_THREAD_SET_RESET) &&
- (ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
- complete(&ts->tx_restart_comp);
-
- ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-sleep:
- ret = wait_for_completion_interruptible(&ts->tx_start_comp);
- if (ret != 0)
- return NULL;
-
- if (iscsi_signal_thread_pre_handler(ts) < 0)
- return NULL;
-
- iscsi_check_to_add_additional_sets();
-
- spin_lock_bh(&ts->ts_state_lock);
- if (!ts->conn) {
- pr_err("struct iscsi_thread_set->conn is NULL for"
- " TX thread_id: %s/%d\n", current->comm, current->pid);
- spin_unlock_bh(&ts->ts_state_lock);
- return NULL;
- }
- ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-
- up(&ts->ts_activate_sem);
-
- return ts->conn;
-}
-
-int iscsi_thread_set_init(void)
-{
- int size;
-
- iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
-
- size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
- iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
- if (!iscsit_global->ts_bitmap) {
- pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void iscsi_thread_set_free(void)
-{
- kfree(iscsit_global->ts_bitmap);
-}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
deleted file mode 100644
index cc1eede5ab3a..000000000000
--- a/drivers/target/iscsi/iscsi_target_tq.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef ISCSI_THREAD_QUEUE_H
-#define ISCSI_THREAD_QUEUE_H
-
-/*
- * Defines for thread sets.
- */
-extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
-extern int iscsi_allocate_thread_sets(u32);
-extern void iscsi_deallocate_thread_sets(void);
-extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
-extern struct iscsi_thread_set *iscsi_get_thread_set(void);
-extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
-extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
-extern int iscsi_release_thread_set(struct iscsi_conn *);
-extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
-extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
-extern int iscsi_thread_set_init(void);
-extern void iscsi_thread_set_free(void);
-
-extern int iscsi_target_tx_thread(void *);
-extern int iscsi_target_rx_thread(void *);
-
-#define TARGET_THREAD_SET_COUNT 4
-
-#define ISCSI_RX_THREAD 1
-#define ISCSI_TX_THREAD 2
-#define ISCSI_RX_THREAD_NAME "iscsi_trx"
-#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
-#define ISCSI_BLOCK_RX_THREAD 0x1
-#define ISCSI_BLOCK_TX_THREAD 0x2
-#define ISCSI_CLEAR_RX_THREAD 0x1
-#define ISCSI_CLEAR_TX_THREAD 0x2
-#define ISCSI_SIGNAL_RX_THREAD 0x1
-#define ISCSI_SIGNAL_TX_THREAD 0x2
-
-/* struct iscsi_thread_set->status */
-#define ISCSI_THREAD_SET_FREE 1
-#define ISCSI_THREAD_SET_ACTIVE 2
-#define ISCSI_THREAD_SET_DIE 3
-#define ISCSI_THREAD_SET_RESET 4
-#define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5
-
-/* By default allow a maximum of 32K iSCSI connections */
-#define ISCSI_TS_BITMAP_BITS 32768
-
-struct iscsi_thread_set {
- /* flags used for blocking and restarting sets */
- int blocked_threads;
- /* flag for creating threads */
- int create_threads;
- /* flag for delaying readding to inactive list */
- int delay_inactive;
- /* status for thread set */
- int status;
- /* which threads have had signals sent */
- int signal_sent;
- /* flag for which threads exited first */
- int thread_clear;
- /* Active threads in the thread set */
- int thread_count;
- /* Unique thread ID */
- u32 thread_id;
- /* pointer to connection if set is active */
- struct iscsi_conn *conn;
- /* used for controlling ts state accesses */
- spinlock_t ts_state_lock;
- /* used for restarting thread queue */
- struct completion rx_restart_comp;
- /* used for restarting thread queue */
- struct completion tx_restart_comp;
- /* used for normal unused blocking */
- struct completion rx_start_comp;
- /* used for normal unused blocking */
- struct completion tx_start_comp;
- /* OS descriptor for rx thread */
- struct task_struct *rx_thread;
- /* OS descriptor for tx thread */
- struct task_struct *tx_thread;
- /* struct iscsi_thread_set in list list head*/
- struct list_head ts_list;
- struct semaphore ts_activate_sem;
-};
-
-#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 390df8ed72b2..b18edda3e8af 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -33,7 +33,6 @@
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_tpg.h"
-#include "iscsi_target_tq.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index c36bd7c29136..51f0c895c6a5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -41,8 +41,7 @@
#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_loop_fabric_configfs;
+static const struct target_core_fabric_ops loop_ops;
static struct workqueue_struct *tcm_loop_workqueue;
static struct kmem_cache *tcm_loop_cmd_cache;
@@ -108,7 +107,7 @@ static struct device_driver tcm_loop_driverfs = {
/*
* Used with root_device_register() in tcm_loop_alloc_core_bus() below
*/
-struct device *tcm_loop_primary;
+static struct device *tcm_loop_primary;
static void tcm_loop_submission_work(struct work_struct *work)
{
@@ -697,6 +696,13 @@ static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg
return 0;
}
+static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+ tl_se_tpg);
+ return tl_tpg->tl_fabric_prot_type;
+}
+
static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
@@ -912,6 +918,46 @@ static void tcm_loop_port_unlink(
/* End items for tcm_loop_port_cit */
+static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+ tl_se_tpg);
+
+ return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
+}
+
+static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+ tl_se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
+
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
+ }
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tl_tpg->tl_fabric_prot_type = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
+ &tcm_loop_tpg_attrib_fabric_prot_type.attr,
+ NULL,
+};
+
/* Start items for tcm_loop_nexus_cit */
static int tcm_loop_make_nexus(
@@ -937,7 +983,8 @@ static int tcm_loop_make_nexus(
/*
* Initialize the struct se_session pointer
*/
- tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
+ tl_nexus->se_sess = transport_init_session(
+ TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess);
goto out;
@@ -1165,21 +1212,19 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
struct tcm_loop_tpg *tl_tpg;
- char *tpgt_str, *end_ptr;
int ret;
- unsigned short int tpgt;
+ unsigned long tpgt;
- tpgt_str = strstr(name, "tpgt_");
- if (!tpgt_str) {
+ if (strstr(name, "tpgt_") != name) {
pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return ERR_PTR(-EINVAL);
}
- tpgt_str += 5; /* Skip ahead of "tpgt_" */
- tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+ if (kstrtoul(name+5, 10, &tpgt))
+ return ERR_PTR(-EINVAL);
if (tpgt >= TL_TPGS_PER_HBA) {
- pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
+ pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
" %u\n", tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
@@ -1189,14 +1234,13 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
/*
* Register the tl_tpg as a emulated SAS TCM Target Endpoint
*/
- ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
- wwn, &tl_tpg->tl_se_tpg, tl_tpg,
+ ret = core_tpg_register(&loop_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return ERR_PTR(-ENOMEM);
pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
- " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
+ " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
return &tl_tpg->tl_se_tpg;
@@ -1338,127 +1382,51 @@ static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
/* End items for tcm_loop_cit */
-static int tcm_loop_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
- /*
- * Set the TCM Loop HBA counter to zero
- */
- tcm_loop_hba_no_cnt = 0;
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
- if (IS_ERR(fabric)) {
- pr_err("tcm_loop_register_configfs() failed!\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup the fabric API of function pointers used by target_core_mod
- */
- fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
- fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
- fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
- fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
- fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
- fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
- fabric->tf_ops.tpg_get_pr_transport_id_len =
- &tcm_loop_get_pr_transport_id_len;
- fabric->tf_ops.tpg_parse_pr_out_transport_id =
- &tcm_loop_parse_pr_out_transport_id;
- fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
- fabric->tf_ops.tpg_check_demo_mode_cache =
- &tcm_loop_check_demo_mode_cache;
- fabric->tf_ops.tpg_check_demo_mode_write_protect =
- &tcm_loop_check_demo_mode_write_protect;
- fabric->tf_ops.tpg_check_prod_mode_write_protect =
- &tcm_loop_check_prod_mode_write_protect;
- /*
- * The TCM loopback fabric module runs in demo-mode to a local
- * virtual SCSI device, so fabric dependent initator ACLs are
- * not required.
- */
- fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
- fabric->tf_ops.tpg_release_fabric_acl =
- &tcm_loop_tpg_release_fabric_acl;
- fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
- /*
- * Used for setting up remaining TCM resources in process context
- */
- fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
- fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
- fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
- fabric->tf_ops.close_session = &tcm_loop_close_session;
- fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
- fabric->tf_ops.sess_get_initiator_sid = NULL;
- fabric->tf_ops.write_pending = &tcm_loop_write_pending;
- fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
- /*
- * Not used for TCM loopback
- */
- fabric->tf_ops.set_default_node_attributes =
- &tcm_loop_set_default_node_attributes;
- fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
- fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
- fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
- fabric->tf_ops.queue_status = &tcm_loop_queue_status;
- fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
- fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
-
- /*
- * Setup function pointers for generic logic in target_core_fabric_configfs.c
- */
- fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
- fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
- fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
- fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
- /*
- * fabric_post_link() and fabric_pre_unlink() are used for
- * registration and release of TCM Loop Virtual SCSI LUNs.
- */
- fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
- fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
- fabric->tf_ops.fabric_make_np = NULL;
- fabric->tf_ops.fabric_drop_np = NULL;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- /*
- * Once fabric->tf_ops has been setup, now register the fabric for
- * use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() for"
- " TCM_Loop failed!\n");
- target_fabric_configfs_free(fabric);
- return -1;
- }
- /*
- * Setup our local pointer to *fabric.
- */
- tcm_loop_fabric_configfs = fabric;
- pr_debug("TCM_LOOP[0] - Set fabric ->"
- " tcm_loop_fabric_configfs\n");
- return 0;
-}
-
-static void tcm_loop_deregister_configfs(void)
-{
- if (!tcm_loop_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
- tcm_loop_fabric_configfs = NULL;
- pr_debug("TCM_LOOP[0] - Cleared"
- " tcm_loop_fabric_configfs\n");
-}
+static const struct target_core_fabric_ops loop_ops = {
+ .module = THIS_MODULE,
+ .name = "loopback",
+ .get_fabric_name = tcm_loop_get_fabric_name,
+ .get_fabric_proto_ident = tcm_loop_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
+ .tpg_get_tag = tcm_loop_get_tag,
+ .tpg_get_default_depth = tcm_loop_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_loop_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_loop_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_loop_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_loop_check_demo_mode,
+ .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ tcm_loop_check_demo_mode_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ tcm_loop_check_prod_mode_write_protect,
+ .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
+ .tpg_alloc_fabric_acl = tcm_loop_tpg_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_loop_tpg_release_fabric_acl,
+ .tpg_get_inst_index = tcm_loop_get_inst_index,
+ .check_stop_free = tcm_loop_check_stop_free,
+ .release_cmd = tcm_loop_release_cmd,
+ .shutdown_session = tcm_loop_shutdown_session,
+ .close_session = tcm_loop_close_session,
+ .sess_get_index = tcm_loop_sess_get_index,
+ .write_pending = tcm_loop_write_pending,
+ .write_pending_status = tcm_loop_write_pending_status,
+ .set_default_node_attributes = tcm_loop_set_default_node_attributes,
+ .get_task_tag = tcm_loop_get_task_tag,
+ .get_cmd_state = tcm_loop_get_cmd_state,
+ .queue_data_in = tcm_loop_queue_data_in,
+ .queue_status = tcm_loop_queue_status,
+ .queue_tm_rsp = tcm_loop_queue_tm_rsp,
+ .aborted_task = tcm_loop_aborted_task,
+ .fabric_make_wwn = tcm_loop_make_scsi_hba,
+ .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
+ .fabric_make_tpg = tcm_loop_make_naa_tpg,
+ .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
+ .fabric_post_link = tcm_loop_port_link,
+ .fabric_pre_unlink = tcm_loop_port_unlink,
+ .tfc_wwn_attrs = tcm_loop_wwn_attrs,
+ .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
+ .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
+};
static int __init tcm_loop_fabric_init(void)
{
@@ -1482,7 +1450,7 @@ static int __init tcm_loop_fabric_init(void)
if (ret)
goto out_destroy_cache;
- ret = tcm_loop_register_configfs();
+ ret = target_register_template(&loop_ops);
if (ret)
goto out_release_core_bus;
@@ -1500,7 +1468,7 @@ out:
static void __exit tcm_loop_fabric_exit(void)
{
- tcm_loop_deregister_configfs();
+ target_unregister_template(&loop_ops);
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
destroy_workqueue(tcm_loop_workqueue);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 6ae49f272ba6..1e72ff77cac9 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -43,6 +43,7 @@ struct tcm_loop_nacl {
struct tcm_loop_tpg {
unsigned short tl_tpgt;
unsigned short tl_transport_status;
+ enum target_prot_type tl_fabric_prot_type;
atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 9512af6a8114..18b0f9703ff2 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -42,8 +42,7 @@
#include "sbp_target.h"
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *sbp_fabric_configfs;
+static const struct target_core_fabric_ops sbp_ops;
/* FireWire address region for management and command block address handlers */
static const struct fw_address_region sbp_register_region = {
@@ -2215,8 +2214,7 @@ static struct se_portal_group *sbp_make_tpg(
goto out_free_tpg;
}
- ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
- &tpg->se_tpg, (void *)tpg,
+ ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
goto out_unreg_mgt_agt;
@@ -2503,7 +2501,9 @@ static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops sbp_ops = {
+static const struct target_core_fabric_ops sbp_ops = {
+ .module = THIS_MODULE,
+ .name = "sbp",
.get_fabric_name = sbp_get_fabric_name,
.get_fabric_proto_ident = sbp_get_fabric_proto_ident,
.tpg_get_wwn = sbp_get_fabric_wwn,
@@ -2544,68 +2544,20 @@ static struct target_core_fabric_ops sbp_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = sbp_make_nodeacl,
.fabric_drop_nodeacl = sbp_drop_nodeacl,
-};
-
-static int sbp_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
-
- fabric->tf_ops = sbp_ops;
-
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
-
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed for SBP\n");
- return ret;
- }
- sbp_fabric_configfs = fabric;
-
- return 0;
-};
-
-static void sbp_deregister_configfs(void)
-{
- if (!sbp_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(sbp_fabric_configfs);
- sbp_fabric_configfs = NULL;
+ .tfc_wwn_attrs = sbp_wwn_attrs,
+ .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
+ .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
};
static int __init sbp_init(void)
{
- int ret;
-
- ret = sbp_register_configfs();
- if (ret < 0)
- return ret;
-
- return 0;
+ return target_register_template(&sbp_ops);
};
static void __exit sbp_exit(void)
{
- sbp_deregister_configfs();
+ target_unregister_template(&sbp_ops);
};
MODULE_DESCRIPTION("FireWire SBP fabric driver");
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 75d89adfccc0..ddaf76a4ac2a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -142,8 +142,8 @@ static struct config_group *target_core_register_fabric(
tf = target_core_get_fabric(name);
if (!tf) {
- pr_err("target_core_register_fabric() trying autoload for %s\n",
- name);
+ pr_debug("target_core_register_fabric() trying autoload for %s\n",
+ name);
/*
* Below are some hardcoded request_module() calls to automatically
@@ -165,8 +165,8 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("iscsi_target_mod");
if (ret < 0) {
- pr_err("request_module() failed for"
- " iscsi_target_mod.ko: %d\n", ret);
+ pr_debug("request_module() failed for"
+ " iscsi_target_mod.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
} else if (!strncmp(name, "loopback", 8)) {
@@ -178,8 +178,8 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("tcm_loop");
if (ret < 0) {
- pr_err("request_module() failed for"
- " tcm_loop.ko: %d\n", ret);
+ pr_debug("request_module() failed for"
+ " tcm_loop.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
}
@@ -188,8 +188,8 @@ static struct config_group *target_core_register_fabric(
}
if (!tf) {
- pr_err("target_core_get_fabric() failed for %s\n",
- name);
+ pr_debug("target_core_get_fabric() failed for %s\n",
+ name);
return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
@@ -300,81 +300,17 @@ struct configfs_subsystem *target_core_subsystem[] = {
// Start functions called by external Target Fabrics Modules
//############################################################################*/
-/*
- * First function called by fabric modules to:
- *
- * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
- * 2) Add struct target_fabric_configfs to g_tf_list
- * 3) Return struct target_fabric_configfs to fabric module to be passed
- * into target_fabric_configfs_register().
- */
-struct target_fabric_configfs *target_fabric_configfs_init(
- struct module *fabric_mod,
- const char *name)
+static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
{
- struct target_fabric_configfs *tf;
-
- if (!(name)) {
- pr_err("Unable to locate passed fabric name\n");
- return ERR_PTR(-EINVAL);
+ if (!tfo->name) {
+ pr_err("Missing tfo->name\n");
+ return -EINVAL;
}
- if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
+ if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) {
pr_err("Passed name: %s exceeds TARGET_FABRIC"
- "_NAME_SIZE\n", name);
- return ERR_PTR(-EINVAL);
+ "_NAME_SIZE\n", tfo->name);
+ return -EINVAL;
}
-
- tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
- if (!tf)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&tf->tf_list);
- atomic_set(&tf->tf_access_cnt, 0);
- /*
- * Setup the default generic struct config_item_type's (cits) in
- * struct target_fabric_configfs->tf_cit_tmpl
- */
- tf->tf_module = fabric_mod;
- target_fabric_setup_cits(tf);
-
- tf->tf_subsys = target_core_subsystem[0];
- snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
-
- mutex_lock(&g_tf_lock);
- list_add_tail(&tf->tf_list, &g_tf_list);
- mutex_unlock(&g_tf_lock);
-
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
- ">>>>>>>>>>>>>>\n");
- pr_debug("Initialized struct target_fabric_configfs: %p for"
- " %s\n", tf, tf->tf_name);
- return tf;
-}
-EXPORT_SYMBOL(target_fabric_configfs_init);
-
-/*
- * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
- */
-void target_fabric_configfs_free(
- struct target_fabric_configfs *tf)
-{
- mutex_lock(&g_tf_lock);
- list_del(&tf->tf_list);
- mutex_unlock(&g_tf_lock);
-
- kfree(tf);
-}
-EXPORT_SYMBOL(target_fabric_configfs_free);
-
-/*
- * Perform a sanity check of the passed tf->tf_ops before completing
- * TCM fabric module registration.
- */
-static int target_fabric_tf_ops_check(
- struct target_fabric_configfs *tf)
-{
- struct target_core_fabric_ops *tfo = &tf->tf_ops;
-
if (!tfo->get_fabric_name) {
pr_err("Missing tfo->get_fabric_name()\n");
return -EINVAL;
@@ -508,77 +444,59 @@ static int target_fabric_tf_ops_check(
return 0;
}
-/*
- * Called 2nd from fabric module with returned parameter of
- * struct target_fabric_configfs * from target_fabric_configfs_init().
- *
- * Upon a successful registration, the new fabric's struct config_item is
- * return. Also, a pointer to this struct is set in the passed
- * struct target_fabric_configfs.
- */
-int target_fabric_configfs_register(
- struct target_fabric_configfs *tf)
+int target_register_template(const struct target_core_fabric_ops *fo)
{
+ struct target_fabric_configfs *tf;
int ret;
+ ret = target_fabric_tf_ops_check(fo);
+ if (ret)
+ return ret;
+
+ tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
if (!tf) {
- pr_err("Unable to locate target_fabric_configfs"
- " pointer\n");
- return -EINVAL;
- }
- if (!tf->tf_subsys) {
- pr_err("Unable to target struct config_subsystem"
- " pointer\n");
- return -EINVAL;
+ pr_err("%s: could not allocate memory!\n", __func__);
+ return -ENOMEM;
}
- ret = target_fabric_tf_ops_check(tf);
- if (ret < 0)
- return ret;
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
- ">>>>>>>>>>\n");
+ INIT_LIST_HEAD(&tf->tf_list);
+ atomic_set(&tf->tf_access_cnt, 0);
+
+ /*
+ * Setup the default generic struct config_item_type's (cits) in
+ * struct target_fabric_configfs->tf_cit_tmpl
+ */
+ tf->tf_module = fo->module;
+ tf->tf_subsys = target_core_subsystem[0];
+ snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name);
+
+ tf->tf_ops = *fo;
+ target_fabric_setup_cits(tf);
+
+ mutex_lock(&g_tf_lock);
+ list_add_tail(&tf->tf_list, &g_tf_list);
+ mutex_unlock(&g_tf_lock);
+
return 0;
}
-EXPORT_SYMBOL(target_fabric_configfs_register);
+EXPORT_SYMBOL(target_register_template);
-void target_fabric_configfs_deregister(
- struct target_fabric_configfs *tf)
+void target_unregister_template(const struct target_core_fabric_ops *fo)
{
- struct configfs_subsystem *su;
+ struct target_fabric_configfs *t;
- if (!tf) {
- pr_err("Unable to locate passed target_fabric_"
- "configfs\n");
- return;
- }
- su = tf->tf_subsys;
- if (!su) {
- pr_err("Unable to locate passed tf->tf_subsys"
- " pointer\n");
- return;
- }
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
- ">>>>>>>>>>>>\n");
mutex_lock(&g_tf_lock);
- if (atomic_read(&tf->tf_access_cnt)) {
- mutex_unlock(&g_tf_lock);
- pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
- tf->tf_name);
- BUG();
+ list_for_each_entry(t, &g_tf_list, tf_list) {
+ if (!strcmp(t->tf_name, fo->name)) {
+ BUG_ON(atomic_read(&t->tf_access_cnt));
+ list_del(&t->tf_list);
+ kfree(t);
+ break;
+ }
}
- list_del(&tf->tf_list);
mutex_unlock(&g_tf_lock);
-
- pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
- " %s\n", tf->tf_name);
- tf->tf_module = NULL;
- tf->tf_subsys = NULL;
- kfree(tf);
-
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
- ">>>>>\n");
}
-EXPORT_SYMBOL(target_fabric_configfs_deregister);
+EXPORT_SYMBOL(target_unregister_template);
/*##############################################################################
// Stop functions called by external Target Fabrics Modules
@@ -945,7 +863,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
struct se_lun *lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg;
- struct target_core_fabric_ops *tfo;
+ const struct target_core_fabric_ops *tfo;
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
@@ -979,7 +897,7 @@ SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
struct se_device *dev, char *page)
{
- struct target_core_fabric_ops *tfo;
+ const struct target_core_fabric_ops *tfo;
struct t10_pr_registration *pr_reg;
unsigned char buf[384];
char i_buf[PR_REG_ISID_ID_LEN];
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 0c3f90130b7d..1f7886bb16bf 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -56,6 +56,20 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
+#define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{ \
+ struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
+ struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
+ struct configfs_attribute **attrs = tf->tf_ops.tfc_##_name##_attrs; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = attrs; \
+ cit->ct_owner = tf->tf_module; \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
+}
+
/* Start of tfc_tpg_mappedlun_cit */
static int target_fabric_mappedlun_link(
@@ -278,7 +292,7 @@ static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
.store_attribute = target_fabric_nacl_attrib_attr_store,
};
-TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL);
/* End of tfc_tpg_nacl_attrib_cit */
@@ -291,7 +305,7 @@ static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
.store_attribute = target_fabric_nacl_auth_attr_store,
};
-TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL);
/* End of tfc_tpg_nacl_auth_cit */
@@ -304,7 +318,7 @@ static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
.store_attribute = target_fabric_nacl_param_attr_store,
};
-TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL);
/* End of tfc_tpg_nacl_param_cit */
@@ -461,8 +475,8 @@ static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
.drop_item = target_fabric_drop_mappedlun,
};
-TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
- &target_fabric_nacl_base_group_ops, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+ &target_fabric_nacl_base_group_ops);
/* End of tfc_tpg_nacl_base_cit */
@@ -570,7 +584,7 @@ static struct configfs_item_operations target_fabric_np_base_item_ops = {
.store_attribute = target_fabric_np_base_attr_store,
};
-TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_np_base, &target_fabric_np_base_item_ops, NULL);
/* End of tfc_tpg_np_base_cit */
@@ -966,7 +980,7 @@ static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
.store_attribute = target_fabric_tpg_attrib_attr_store,
};
-TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL);
/* End of tfc_tpg_attrib_cit */
@@ -979,7 +993,7 @@ static struct configfs_item_operations target_fabric_tpg_auth_item_ops = {
.store_attribute = target_fabric_tpg_auth_attr_store,
};
-TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL);
/* End of tfc_tpg_attrib_cit */
@@ -992,7 +1006,7 @@ static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
.store_attribute = target_fabric_tpg_param_attr_store,
};
-TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_param, &target_fabric_tpg_param_item_ops, NULL);
/* End of tfc_tpg_param_cit */
@@ -1018,7 +1032,7 @@ static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
.store_attribute = target_fabric_tpg_attr_store,
};
-TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL);
/* End of tfc_tpg_base_cit */
@@ -1192,7 +1206,7 @@ static struct configfs_item_operations target_fabric_wwn_item_ops = {
.store_attribute = target_fabric_wwn_attr_store,
};
-TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
+TF_CIT_SETUP_DRV(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops);
/* End of tfc_wwn_cit */
@@ -1206,7 +1220,7 @@ static struct configfs_item_operations target_fabric_discovery_item_ops = {
.store_attribute = target_fabric_discovery_attr_store,
};
-TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(discovery, &target_fabric_discovery_item_ops, NULL);
/* End of tfc_discovery_cit */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 44620fb6bd45..f7e6e51aed36 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = FD_DEV(se_dev);
struct file *prot_fd = dev->fd_prot_file;
- struct scatterlist *sg;
loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
unsigned char *buf;
- u32 prot_size, len, size;
- int rc, ret = 1, i;
+ u32 prot_size;
+ int rc, ret = 1;
prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
se_dev->prot_length;
if (!is_write) {
- fd_prot->prot_buf = vzalloc(prot_size);
+ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
if (!fd_prot->prot_buf) {
pr_err("Unable to allocate fd_prot->prot_buf\n");
return -ENOMEM;
}
buf = fd_prot->prot_buf;
- fd_prot->prot_sg_nents = cmd->t_prot_nents;
- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
- fd_prot->prot_sg_nents, GFP_KERNEL);
+ fd_prot->prot_sg_nents = 1;
+ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
+ GFP_KERNEL);
if (!fd_prot->prot_sg) {
pr_err("Unable to allocate fd_prot->prot_sg\n");
- vfree(fd_prot->prot_buf);
+ kfree(fd_prot->prot_buf);
return -ENOMEM;
}
- size = prot_size;
-
- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
-
- len = min_t(u32, PAGE_SIZE, size);
- sg_set_buf(sg, buf, len);
- size -= len;
- buf += len;
- }
+ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
+ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
}
if (is_write) {
@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
if (is_write || ret < 0) {
kfree(fd_prot->prot_sg);
- vfree(fd_prot->prot_buf);
+ kfree(fd_prot->prot_buf);
}
return ret;
@@ -331,36 +323,33 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
struct fd_dev *dev = FD_DEV(se_dev);
struct file *fd = dev->fd_file;
struct scatterlist *sg;
- struct iovec *iov;
- mm_segment_t old_fs;
+ struct iov_iter iter;
+ struct bio_vec *bvec;
+ ssize_t len = 0;
loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
int ret = 0, i;
- iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
- if (!iov) {
+ bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
+ if (!bvec) {
pr_err("Unable to allocate fd_do_readv iov[]\n");
return -ENOMEM;
}
for_each_sg(sgl, sg, sgl_nents, i) {
- iov[i].iov_len = sg->length;
- iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
- }
+ bvec[i].bv_page = sg_page(sg);
+ bvec[i].bv_len = sg->length;
+ bvec[i].bv_offset = sg->offset;
- old_fs = get_fs();
- set_fs(get_ds());
+ len += sg->length;
+ }
+ iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
if (is_write)
- ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
+ ret = vfs_iter_write(fd, &iter, &pos);
else
- ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
-
- set_fs(old_fs);
-
- for_each_sg(sgl, sg, sgl_nents, i)
- kunmap(sg_page(sg));
+ ret = vfs_iter_read(fd, &iter, &pos);
- kfree(iov);
+ kfree(bvec);
if (is_write) {
if (ret < 0 || ret != cmd->data_length) {
@@ -436,59 +425,17 @@ fd_execute_sync_cache(struct se_cmd *cmd)
return 0;
}
-static unsigned char *
-fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
- unsigned int len)
-{
- struct se_device *se_dev = cmd->se_dev;
- unsigned int block_size = se_dev->dev_attrib.block_size;
- unsigned int i = 0, end;
- unsigned char *buf, *p, *kmap_buf;
-
- buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate fd_execute_write_same buf\n");
- return NULL;
- }
-
- kmap_buf = kmap(sg_page(sg)) + sg->offset;
- if (!kmap_buf) {
- pr_err("kmap() failed in fd_setup_write_same\n");
- kfree(buf);
- return NULL;
- }
- /*
- * Fill local *buf to contain multiple WRITE_SAME blocks up to
- * min(len, PAGE_SIZE)
- */
- p = buf;
- end = min_t(unsigned int, len, PAGE_SIZE);
-
- while (i < end) {
- memcpy(p, kmap_buf, block_size);
-
- i += block_size;
- p += block_size;
- }
- kunmap(sg_page(sg));
-
- return buf;
-}
-
static sense_reason_t
fd_execute_write_same(struct se_cmd *cmd)
{
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *fd_dev = FD_DEV(se_dev);
- struct file *f = fd_dev->fd_file;
- struct scatterlist *sg;
- struct iovec *iov;
- mm_segment_t old_fs;
- sector_t nolb = sbc_get_write_same_sectors(cmd);
loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
- unsigned int len, len_tmp, iov_num;
- int i, rc;
- unsigned char *buf;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ struct iov_iter iter;
+ struct bio_vec *bvec;
+ unsigned int len = 0, i;
+ ssize_t ret;
if (!nolb) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -499,56 +446,92 @@ fd_execute_write_same(struct se_cmd *cmd)
" backends not supported\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- sg = &cmd->t_data_sg[0];
if (cmd->t_data_nents > 1 ||
- sg->length != cmd->se_dev->dev_attrib.block_size) {
+ cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
- " block_size: %u\n", cmd->t_data_nents, sg->length,
+ " block_size: %u\n",
+ cmd->t_data_nents,
+ cmd->t_data_sg[0].length,
cmd->se_dev->dev_attrib.block_size);
return TCM_INVALID_CDB_FIELD;
}
- len = len_tmp = nolb * se_dev->dev_attrib.block_size;
- iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
-
- buf = fd_setup_write_same_buf(cmd, sg, len);
- if (!buf)
+ bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
+ if (!bvec)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- iov = vzalloc(sizeof(struct iovec) * iov_num);
- if (!iov) {
- pr_err("Unable to allocate fd_execute_write_same iovecs\n");
- kfree(buf);
+ for (i = 0; i < nolb; i++) {
+ bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
+ bvec[i].bv_len = cmd->t_data_sg[0].length;
+ bvec[i].bv_offset = cmd->t_data_sg[0].offset;
+
+ len += se_dev->dev_attrib.block_size;
+ }
+
+ iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len);
+ ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos);
+
+ kfree(bvec);
+ if (ret < 0 || ret != len) {
+ pr_err("vfs_iter_write() returned %zd for write same\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- /*
- * Map the single fabric received scatterlist block now populated
- * in *buf into each iovec for I/O submission.
- */
- for (i = 0; i < iov_num; i++) {
- iov[i].iov_base = buf;
- iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
- len_tmp -= iov[i].iov_len;
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+}
+
+static int
+fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
+ void *buf, size_t bufsize)
+{
+ struct fd_dev *fd_dev = FD_DEV(se_dev);
+ struct file *prot_fd = fd_dev->fd_prot_file;
+ sector_t prot_length, prot;
+ loff_t pos = lba * se_dev->prot_length;
+
+ if (!prot_fd) {
+ pr_err("Unable to locate fd_dev->fd_prot_file\n");
+ return -ENODEV;
}
- old_fs = get_fs();
- set_fs(get_ds());
- rc = vfs_writev(f, &iov[0], iov_num, &pos);
- set_fs(old_fs);
+ prot_length = nolb * se_dev->prot_length;
- vfree(iov);
- kfree(buf);
+ for (prot = 0; prot < prot_length;) {
+ sector_t len = min_t(sector_t, bufsize, prot_length - prot);
+ ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
- if (rc < 0 || rc != len) {
- pr_err("vfs_writev() returned %d for write same\n", rc);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (ret != len) {
+ pr_err("vfs_write to prot file failed: %zd\n", ret);
+ return ret < 0 ? ret : -ENODEV;
+ }
+ prot += ret;
}
- target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
+static int
+fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
+{
+ void *buf;
+ int rc;
+
+ buf = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+ memset(buf, 0xff, PAGE_SIZE);
+
+ rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
+
+ free_page((unsigned long)buf);
+
+ return rc;
+}
+
static sense_reason_t
fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
{
@@ -556,6 +539,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
struct inode *inode = file->f_mapping->host;
int ret;
+ if (cmd->se_dev->dev_attrib.pi_prot_type) {
+ ret = fd_do_prot_unmap(cmd, lba, nolb);
+ if (ret)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */
struct block_device *bdev = inode->i_bdev;
@@ -595,7 +584,7 @@ fd_execute_write_same_unmap(struct se_cmd *cmd)
struct file *file = fd_dev->fd_file;
sector_t lba = cmd->t_task_lba;
sector_t nolb = sbc_get_write_same_sectors(cmd);
- int ret;
+ sense_reason_t ret;
if (!nolb) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -643,7 +632,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (data_direction == DMA_FROM_DEVICE) {
memset(&fd_prot, 0, sizeof(struct fd_prot));
- if (cmd->prot_type) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_rw(cmd, &fd_prot, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -651,23 +640,23 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
- if (ret > 0 && cmd->prot_type) {
+ if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
0, fd_prot.prot_sg, 0);
if (rc) {
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
return rc;
}
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
}
} else {
memset(&fd_prot, 0, sizeof(struct fd_prot));
- if (cmd->prot_type) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
ret = fd_do_prot_rw(cmd, &fd_prot, false);
@@ -678,7 +667,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
0, fd_prot.prot_sg, 0);
if (rc) {
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
return rc;
}
}
@@ -705,7 +694,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
- if (ret > 0 && cmd->prot_type) {
+ if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_rw(cmd, &fd_prot, true);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -714,7 +703,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (ret < 0) {
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -878,48 +867,28 @@ static int fd_init_prot(struct se_device *dev)
static int fd_format_prot(struct se_device *dev)
{
- struct fd_dev *fd_dev = FD_DEV(dev);
- struct file *prot_fd = fd_dev->fd_prot_file;
- sector_t prot_length, prot;
unsigned char *buf;
- loff_t pos = 0;
int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
- int rc, ret = 0, size, len;
+ int ret;
if (!dev->dev_attrib.pi_prot_type) {
pr_err("Unable to format_prot while pi_prot_type == 0\n");
return -ENODEV;
}
- if (!prot_fd) {
- pr_err("Unable to locate fd_dev->fd_prot_file\n");
- return -ENODEV;
- }
buf = vzalloc(unit_size);
if (!buf) {
pr_err("Unable to allocate FILEIO prot buf\n");
return -ENOMEM;
}
- prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
- size = prot_length;
pr_debug("Using FILEIO prot_length: %llu\n",
- (unsigned long long)prot_length);
+ (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
+ dev->prot_length);
memset(buf, 0xff, unit_size);
- for (prot = 0; prot < prot_length; prot += unit_size) {
- len = min(unit_size, size);
- rc = kernel_write(prot_fd, buf, len, pos);
- if (rc != len) {
- pr_err("vfs_write to prot file failed: %d\n", rc);
- ret = -ENODEV;
- goto out;
- }
- pos += len;
- size -= len;
- }
-
-out:
+ ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
+ buf, unit_size);
vfree(buf);
return ret;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index d4a4b0fb444a..1b7947c2510f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -444,7 +444,7 @@ iblock_execute_write_same_unmap(struct se_cmd *cmd)
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
sector_t lba = cmd->t_task_lba;
sector_t nolb = sbc_get_write_same_sectors(cmd);
- int ret;
+ sense_reason_t ret;
ret = iblock_do_unmap(cmd, bdev, lba, nolb);
if (ret)
@@ -774,7 +774,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
sg_num--;
}
- if (cmd->prot_type) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
int rc = iblock_alloc_bip(cmd, bio_start);
if (rc)
goto fail_put_bios;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 60381db90026..874a9bc988d8 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -4,7 +4,13 @@
/* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp;
+/* target_core_configfs.c */
+extern struct configfs_subsystem *target_core_subsystem[];
+
/* target_core_device.c */
+extern struct mutex g_device_mutex;
+extern struct list_head g_device_list;
+
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 2de6fb8cee8d..c1aa9655e96e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -78,6 +78,22 @@ enum preempt_type {
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
struct t10_pr_registration *, int, int);
+static int is_reservation_holder(
+ struct t10_pr_registration *pr_res_holder,
+ struct t10_pr_registration *pr_reg)
+{
+ int pr_res_type;
+
+ if (pr_res_holder) {
+ pr_res_type = pr_res_holder->pr_res_type;
+
+ return pr_res_holder == pr_reg ||
+ pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
+ pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG;
+ }
+ return 0;
+}
+
static sense_reason_t
target_scsi2_reservation_check(struct se_cmd *cmd)
{
@@ -664,7 +680,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
struct se_dev_entry *deve_tmp;
struct se_node_acl *nacl_tmp;
struct se_port *port, *port_tmp;
- struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
int ret;
/*
@@ -963,7 +979,7 @@ int core_scsi3_check_aptpl_registration(
}
static void __core_scsi3_dump_registration(
- struct target_core_fabric_ops *tfo,
+ const struct target_core_fabric_ops *tfo,
struct se_device *dev,
struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg,
@@ -1004,7 +1020,7 @@ static void __core_scsi3_add_registration(
enum register_type register_type,
int register_move)
{
- struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
@@ -1220,8 +1236,10 @@ static void __core_scsi3_free_registration(
struct t10_pr_registration *pr_reg,
struct list_head *preempt_and_abort_list,
int dec_holders)
+ __releases(&pr_tmpl->registration_lock)
+ __acquires(&pr_tmpl->registration_lock)
{
- struct target_core_fabric_ops *tfo =
+ const struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
@@ -1445,7 +1463,7 @@ core_scsi3_decode_spec_i_port(
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
LIST_HEAD(tid_dest_list);
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
- struct target_core_fabric_ops *tmp_tf_ops;
+ const struct target_core_fabric_ops *tmp_tf_ops;
unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
@@ -2287,7 +2305,6 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
if (pr_res_holder) {
- int pr_res_type = pr_res_holder->pr_res_type;
/*
* From spc4r17 Section 5.7.9: Reserving:
*
@@ -2298,9 +2315,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
* the logical unit, then the command shall be completed with
* RESERVATION CONFLICT status.
*/
- if ((pr_res_holder != pr_reg) &&
- (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
- (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ if (!is_reservation_holder(pr_res_holder, pr_reg)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s while reservation already held by"
@@ -2409,7 +2424,7 @@ static void __core_scsi3_complete_pro_release(
int explicit,
int unreg)
{
- struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+ const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
int pr_res_type = 0, pr_res_scope = 0;
@@ -2477,7 +2492,6 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- int all_reg = 0;
sense_reason_t ret = 0;
if (!se_sess || !se_lun) {
@@ -2514,13 +2528,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
spin_unlock(&dev->dev_reservation_lock);
goto out_put_pr_reg;
}
- if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
- (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
- all_reg = 1;
- if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
+ if (!is_reservation_holder(pr_res_holder, pr_reg)) {
/*
- * Non 'All Registrants' PR Type cases..
* Release request from a registered I_T nexus that is not a
* persistent reservation holder. return GOOD status.
*/
@@ -2726,7 +2736,7 @@ static void __core_scsi3_complete_pro_preempt(
enum preempt_type preempt_type)
{
struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
- struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3111,7 +3121,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
struct se_port *se_port;
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
- struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+ const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
@@ -3375,7 +3385,7 @@ after_iport_check:
* From spc4r17 section 5.7.8 Table 50 --
* Register behaviors for a REGISTER AND MOVE service action
*/
- if (pr_res_holder != pr_reg) {
+ if (!is_reservation_holder(pr_res_holder, pr_reg)) {
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 98e83ac5661b..a263bf5fab8d 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -139,10 +139,22 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
unsigned char *p;
while (total_sg_needed) {
+ unsigned int chain_entry = 0;
+
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
- sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+#ifdef CONFIG_ARCH_HAS_SG_CHAIN
+
+ /*
+ * Reserve extra element for chain entry
+ */
+ if (sg_per_table < total_sg_needed)
+ chain_entry = 1;
+
+#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
+
+ sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
GFP_KERNEL);
if (!sg) {
pr_err("Unable to allocate scatterlist array"
@@ -150,7 +162,16 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
return -ENOMEM;
}
- sg_init_table(sg, sg_per_table);
+ sg_init_table(sg, sg_per_table + chain_entry);
+
+#ifdef CONFIG_ARCH_HAS_SG_CHAIN
+
+ if (i > 0) {
+ sg_chain(sg_table[i - 1].sg_table,
+ max_sg_per_table + 1, sg);
+ }
+
+#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
@@ -382,6 +403,76 @@ static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page
return NULL;
}
+typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int,
+ unsigned int, struct scatterlist *, int);
+
+static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ struct rd_dev *dev = RD_DEV(se_dev);
+ struct rd_dev_sg_table *prot_table;
+ bool need_to_release = false;
+ struct scatterlist *prot_sg;
+ u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+ u32 prot_offset, prot_page;
+ u32 prot_npages __maybe_unused;
+ u64 tmp;
+ sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ tmp = cmd->t_task_lba * se_dev->prot_length;
+ prot_offset = do_div(tmp, PAGE_SIZE);
+ prot_page = tmp;
+
+ prot_table = rd_get_prot_table(dev, prot_page);
+ if (!prot_table)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ prot_sg = &prot_table->sg_table[prot_page -
+ prot_table->page_start_offset];
+
+#ifndef CONFIG_ARCH_HAS_SG_CHAIN
+
+ prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
+ PAGE_SIZE);
+
+ /*
+ * Allocate temporaly contiguous scatterlist entries if prot pages
+ * straddles multiple scatterlist tables.
+ */
+ if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
+ int i;
+
+ prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
+ if (!prot_sg)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ need_to_release = true;
+ sg_init_table(prot_sg, prot_npages);
+
+ for (i = 0; i < prot_npages; i++) {
+ if (prot_page + i > prot_table->page_end_offset) {
+ prot_table = rd_get_prot_table(dev,
+ prot_page + i);
+ if (!prot_table) {
+ kfree(prot_sg);
+ return rc;
+ }
+ sg_unmark_end(&prot_sg[i - 1]);
+ }
+ prot_sg[i] = prot_table->sg_table[prot_page + i -
+ prot_table->page_start_offset];
+ }
+ }
+
+#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
+
+ rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset);
+ if (need_to_release)
+ kfree(prot_sg);
+
+ return rc;
+}
+
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
@@ -419,24 +510,9 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
cmd->t_task_lba, rd_size, rd_page, rd_offset);
- if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
- struct rd_dev_sg_table *prot_table;
- struct scatterlist *prot_sg;
- u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
- u32 prot_offset, prot_page;
-
- tmp = cmd->t_task_lba * se_dev->prot_length;
- prot_offset = do_div(tmp, PAGE_SIZE);
- prot_page = tmp;
-
- prot_table = rd_get_prot_table(dev, prot_page);
- if (!prot_table)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
- prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
-
- rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
- prot_sg, prot_offset);
+ if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
+ data_direction == DMA_TO_DEVICE) {
+ rc = rd_do_prot_rw(cmd, sbc_dif_verify_write);
if (rc)
return rc;
}
@@ -502,24 +578,9 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
sg_miter_stop(&m);
- if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
- struct rd_dev_sg_table *prot_table;
- struct scatterlist *prot_sg;
- u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
- u32 prot_offset, prot_page;
-
- tmp = cmd->t_task_lba * se_dev->prot_length;
- prot_offset = do_div(tmp, PAGE_SIZE);
- prot_page = tmp;
-
- prot_table = rd_get_prot_table(dev, prot_page);
- if (!prot_table)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
- prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
-
- rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
- prot_sg, prot_offset);
+ if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
+ data_direction == DMA_FROM_DEVICE) {
+ rc = rd_do_prot_rw(cmd, sbc_dif_verify_read);
if (rc)
return rc;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 3e7297411110..8855781ac653 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -93,6 +93,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
+ int pi_prot_type = dev->dev_attrib.pi_prot_type;
+
unsigned char *rbuf;
unsigned char buf[32];
unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -114,8 +116,15 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
* Set P_TYPE and PROT_EN bits for DIF support
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
- if (dev->dev_attrib.pi_prot_type)
- buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+ /*
+ * Only override a device's pi_prot_type if no T10-PI is
+ * available, and sess_prot_type has been explicitly enabled.
+ */
+ if (!pi_prot_type)
+ pi_prot_type = sess->sess_prot_type;
+
+ if (pi_prot_type)
+ buf[12] = (pi_prot_type - 1) << 1 | 0x1;
}
if (dev->transport->get_lbppbe)
@@ -312,7 +321,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
return 0;
}
-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
{
unsigned char *buf, *addr;
struct scatterlist *sg;
@@ -376,7 +385,7 @@ sbc_execute_rw(struct se_cmd *cmd)
cmd->data_direction);
}
-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
{
struct se_device *dev = cmd->se_dev;
@@ -399,7 +408,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
return TCM_NO_SENSE;
}
-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *write_sg = NULL, *sg;
@@ -414,11 +423,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
/*
* Handle early failure in transport_generic_request_failure(),
- * which will not have taken ->caw_mutex yet..
+ * which will not have taken ->caw_sem yet..
*/
- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
+ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
return TCM_NO_SENSE;
/*
+ * Handle special case for zero-length COMPARE_AND_WRITE
+ */
+ if (!cmd->data_length)
+ goto out;
+ /*
* Immediately exit + release dev->caw_sem if command has already
* been failed with a non-zero SCSI status.
*/
@@ -581,12 +595,13 @@ sbc_compare_and_write(struct se_cmd *cmd)
}
static int
-sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
+sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
bool is_write, struct se_cmd *cmd)
{
if (is_write) {
- cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
- TARGET_PROT_DOUT_INSERT;
+ cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
+ protect ? TARGET_PROT_DOUT_PASS :
+ TARGET_PROT_DOUT_INSERT;
switch (protect) {
case 0x0:
case 0x3:
@@ -610,8 +625,9 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
return -EINVAL;
}
} else {
- cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
- TARGET_PROT_DIN_STRIP;
+ cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
+ protect ? TARGET_PROT_DIN_PASS :
+ TARGET_PROT_DIN_STRIP;
switch (protect) {
case 0x0:
case 0x1:
@@ -644,11 +660,15 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
u32 sectors, bool is_write)
{
u8 protect = cdb[1] >> 5;
+ int sp_ops = cmd->se_sess->sup_prot_ops;
+ int pi_prot_type = dev->dev_attrib.pi_prot_type;
+ bool fabric_prot = false;
if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
- if (protect && !dev->dev_attrib.pi_prot_type) {
- pr_err("CDB contains protect bit, but device does not"
- " advertise PROTECT=1 feature bit\n");
+ if (unlikely(protect &&
+ !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
+ pr_err("CDB contains protect bit, but device + fabric does"
+ " not advertise PROTECT=1 feature bit\n");
return TCM_INVALID_CDB_FIELD;
}
if (cmd->prot_pto)
@@ -669,15 +689,32 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
cmd->reftag_seed = cmd->t_task_lba;
break;
case TARGET_DIF_TYPE0_PROT:
+ /*
+ * See if the fabric supports T10-PI, and the session has been
+ * configured to allow export PROTECT=1 feature bit with backend
+ * devices that don't support T10-PI.
+ */
+ fabric_prot = is_write ?
+ !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
+ !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
+
+ if (fabric_prot && cmd->se_sess->sess_prot_type) {
+ pi_prot_type = cmd->se_sess->sess_prot_type;
+ break;
+ }
+ if (!protect)
+ return TCM_NO_SENSE;
+ /* Fallthrough */
default:
- return TCM_NO_SENSE;
+ pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
+ "PROTECT: 0x%02x\n", cdb[0], protect);
+ return TCM_INVALID_CDB_FIELD;
}
- if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
- is_write, cmd))
+ if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
return TCM_INVALID_CDB_FIELD;
- cmd->prot_type = dev->dev_attrib.pi_prot_type;
+ cmd->prot_type = pi_prot_type;
cmd->prot_length = dev->prot_length * sectors;
/**
@@ -1166,14 +1203,16 @@ sbc_dif_generate(struct se_cmd *cmd)
sdt = paddr + offset;
sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
dev->dev_attrib.block_size));
- if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
sdt->app_tag = 0;
- pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
+ pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
" app_tag: 0x%04x ref_tag: %u\n",
- (unsigned long long)sector, sdt->guard_tag,
- sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+ (cmd->data_direction == DMA_TO_DEVICE) ?
+ "WRITE" : "READ", (unsigned long long)sector,
+ sdt->guard_tag, sdt->app_tag,
+ be32_to_cpu(sdt->ref_tag));
sector++;
offset += sizeof(struct se_dif_v1_tuple);
@@ -1185,12 +1224,16 @@ sbc_dif_generate(struct se_cmd *cmd)
}
static sense_reason_t
-sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
+sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt,
const void *p, sector_t sector, unsigned int ei_lba)
{
+ struct se_device *dev = cmd->se_dev;
int block_size = dev->dev_attrib.block_size;
__be16 csum;
+ if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+ goto check_ref;
+
csum = cpu_to_be16(crc_t10dif(p, block_size));
if (sdt->guard_tag != csum) {
@@ -1200,7 +1243,11 @@ sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
}
- if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
+check_ref:
+ if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
+ return 0;
+
+ if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
" sector MSB: 0x%08x\n", (unsigned long long)sector,
@@ -1208,7 +1255,7 @@ sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
}
- if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
+ if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
be32_to_cpu(sdt->ref_tag) != ei_lba) {
pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
" ei_lba: 0x%08x\n", (unsigned long long)sector,
@@ -1229,6 +1276,9 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
unsigned int i, len, left;
unsigned int offset = sg_off;
+ if (!sg)
+ return;
+
left = sectors * dev->prot_length;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
@@ -1292,7 +1342,7 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
(unsigned long long)sector, sdt->guard_tag,
sdt->app_tag, be32_to_cpu(sdt->ref_tag));
- rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
ei_lba);
if (rc) {
kunmap_atomic(paddr);
@@ -1309,6 +1359,9 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
kunmap_atomic(paddr);
kunmap_atomic(daddr);
}
+ if (!sg)
+ return 0;
+
sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
return 0;
@@ -1353,7 +1406,7 @@ __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
continue;
}
- rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
ei_lba);
if (rc) {
kunmap_atomic(paddr);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 6c8bd6bc175c..7912aa124385 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -103,10 +103,12 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
buf[5] |= 0x8;
/*
* Set Protection (PROTECT) bit when DIF has been enabled on the
- * device, and the transport supports VERIFY + PASS.
+ * device, and the fabric supports VERIFY + PASS. Also report
+ * PROTECT=1 if sess_prot_type has been configured to allow T10-PI
+ * to unprotected devices.
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
- if (dev->dev_attrib.pi_prot_type)
+ if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)
buf[5] |= 0x1;
}
@@ -467,9 +469,11 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
* only for TYPE3 protection.
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
- if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT ||
+ cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
buf[4] = 0x5;
- else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+ else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
+ cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
buf[4] = 0x4;
}
@@ -861,7 +865,7 @@ static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
* TAG field.
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
- if (dev->dev_attrib.pi_prot_type)
+ if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type)
p[5] |= 0x80;
}
@@ -1099,7 +1103,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
unsigned char *buf;
unsigned char tbuf[SE_MODE_PAGE_BUF];
int length;
- int ret = 0;
+ sense_reason_t ret = 0;
int i;
if (!cmd->data_length) {
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index fa5e157db47b..315ec3458eeb 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -125,8 +125,8 @@ void core_tmr_abort_task(
if (dev != se_cmd->se_dev)
continue;
- /* skip se_cmd associated with tmr */
- if (tmr->task_cmd == se_cmd)
+ /* skip task management functions, including tmr->task_cmd */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
continue;
ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 0696de9553d3..47f064415bf6 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -672,7 +672,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
}
int core_tpg_register(
- struct target_core_fabric_ops *tfo,
+ const struct target_core_fabric_ops *tfo,
struct se_wwn *se_wwn,
struct se_portal_group *se_tpg,
void *tpg_fabric_ptr,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ac3cbabdbdf0..3fe5cb240b6f 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -322,6 +322,7 @@ void __transport_register_session(
struct se_session *se_sess,
void *fabric_sess_ptr)
{
+ const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
se_sess->se_tpg = se_tpg;
@@ -334,6 +335,21 @@ void __transport_register_session(
*/
if (se_nacl) {
/*
+ *
+ * Determine if fabric allows for T10-PI feature bits exposed to
+ * initiators for device backends with !dev->dev_attrib.pi_prot_type.
+ *
+ * If so, then always save prot_type on a per se_node_acl node
+ * basis and re-instate the previous sess_prot_type to avoid
+ * disabling PI from below any previously initiator side
+ * registered LUNs.
+ */
+ if (se_nacl->saved_prot_type)
+ se_sess->sess_prot_type = se_nacl->saved_prot_type;
+ else if (tfo->tpg_check_prot_fabric_only)
+ se_sess->sess_prot_type = se_nacl->saved_prot_type =
+ tfo->tpg_check_prot_fabric_only(se_tpg);
+ /*
* If the fabric module supports an ISID based TransportID,
* save this value in binary from the fabric I_T Nexus now.
*/
@@ -404,6 +420,30 @@ void target_put_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(target_put_session);
+ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
+{
+ struct se_session *se_sess;
+ ssize_t len = 0;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
+ if (!se_sess->se_node_acl)
+ continue;
+ if (!se_sess->se_node_acl->dynamic_node_acl)
+ continue;
+ if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
+ break;
+
+ len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
+ se_sess->se_node_acl->initiatorname);
+ len += 1; /* Include NULL terminator */
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ return len;
+}
+EXPORT_SYMBOL(target_show_dynamic_sessions);
+
static void target_complete_nacl(struct kref *kref)
{
struct se_node_acl *nacl = container_of(kref,
@@ -462,7 +502,7 @@ EXPORT_SYMBOL(transport_free_session);
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
- struct target_core_fabric_ops *se_tfo;
+ const struct target_core_fabric_ops *se_tfo;
struct se_node_acl *se_nacl;
unsigned long flags;
bool comp_nacl = true;
@@ -1118,7 +1158,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
*/
void transport_init_se_cmd(
struct se_cmd *cmd,
- struct target_core_fabric_ops *tfo,
+ const struct target_core_fabric_ops *tfo,
struct se_session *se_sess,
u32 data_length,
int data_direction,
@@ -1570,6 +1610,8 @@ EXPORT_SYMBOL(target_submit_tmr);
* has completed.
*/
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
+ __releases(&cmd->t_state_lock)
+ __acquires(&cmd->t_state_lock)
{
bool was_active = false;
@@ -1615,11 +1657,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
transport_complete_task_attr(cmd);
/*
* Handle special case for COMPARE_AND_WRITE failure, where the
- * callback is expected to drop the per device ->caw_mutex.
+ * callback is expected to drop the per device ->caw_sem.
*/
if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd);
+ cmd->transport_complete_callback(cmd, false);
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
@@ -1706,6 +1748,41 @@ void __target_execute_cmd(struct se_cmd *cmd)
}
}
+static int target_write_prot_action(struct se_cmd *cmd)
+{
+ u32 sectors;
+ /*
+ * Perform WRITE_INSERT of PI using software emulation when backend
+ * device has PI enabled, if the transport has not already generated
+ * PI using hardware WRITE_INSERT offload.
+ */
+ switch (cmd->prot_op) {
+ case TARGET_PROT_DOUT_INSERT:
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+ sbc_dif_generate(cmd);
+ break;
+ case TARGET_PROT_DOUT_STRIP:
+ if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
+ break;
+
+ sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
+ cmd->pi_err = sbc_dif_verify_write(cmd, cmd->t_task_lba,
+ sectors, 0, NULL, 0);
+ if (unlikely(cmd->pi_err)) {
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
+ spin_unlock_irq(&cmd->t_state_lock);
+ transport_generic_request_failure(cmd, cmd->pi_err);
+ return -1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1785,15 +1862,9 @@ void target_execute_cmd(struct se_cmd *cmd)
cmd->t_state = TRANSPORT_PROCESSING;
cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
- /*
- * Perform WRITE_INSERT of PI using software emulation when backend
- * device has PI enabled, if the transport has not already generated
- * PI using hardware WRITE_INSERT offload.
- */
- if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
- if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
- sbc_dif_generate(cmd);
- }
+
+ if (target_write_prot_action(cmd))
+ return;
if (target_handle_task_attr(cmd)) {
spin_lock_irq(&cmd->t_state_lock);
@@ -1919,16 +1990,28 @@ static void transport_handle_queue_full(
schedule_work(&cmd->se_dev->qf_work_queue);
}
-static bool target_check_read_strip(struct se_cmd *cmd)
+static bool target_read_prot_action(struct se_cmd *cmd)
{
sense_reason_t rc;
- if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
- rc = sbc_dif_read_strip(cmd);
- if (rc) {
- cmd->pi_err = rc;
- return true;
+ switch (cmd->prot_op) {
+ case TARGET_PROT_DIN_STRIP:
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+ rc = sbc_dif_read_strip(cmd);
+ if (rc) {
+ cmd->pi_err = rc;
+ return true;
+ }
}
+ break;
+ case TARGET_PROT_DIN_INSERT:
+ if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
+ break;
+
+ sbc_dif_generate(cmd);
+ break;
+ default:
+ break;
}
return false;
@@ -1975,8 +2058,12 @@ static void target_complete_ok_work(struct work_struct *work)
if (cmd->transport_complete_callback) {
sense_reason_t rc;
- rc = cmd->transport_complete_callback(cmd);
+ rc = cmd->transport_complete_callback(cmd, true);
if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ !cmd->data_length)
+ goto queue_rsp;
+
return;
} else if (rc) {
ret = transport_send_check_condition_and_sense(cmd,
@@ -1990,6 +2077,7 @@ static void target_complete_ok_work(struct work_struct *work)
}
}
+queue_rsp:
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock);
@@ -2003,8 +2091,7 @@ static void target_complete_ok_work(struct work_struct *work)
* backend had PI enabled, if the transport will not be
* performing hardware READ_STRIP offload.
*/
- if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
- target_check_read_strip(cmd)) {
+ if (target_read_prot_action(cmd)) {
ret = transport_send_check_condition_and_sense(cmd,
cmd->pi_err, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
@@ -2094,6 +2181,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
static inline void transport_free_pages(struct se_cmd *cmd)
{
if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ /*
+ * Release special case READ buffer payload required for
+ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
+ */
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+ transport_free_sgl(cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ cmd->t_bidi_data_sg = NULL;
+ cmd->t_bidi_data_nents = 0;
+ }
transport_reset_sgl_orig(cmd);
return;
}
@@ -2246,6 +2343,7 @@ sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd)
{
int ret = 0;
+ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
/*
* Determine is the TCM fabric module has already allocated physical
@@ -2254,7 +2352,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
*/
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
cmd->data_length) {
- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
if ((cmd->se_cmd_flags & SCF_BIDI) ||
(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
@@ -2285,6 +2382,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length, zero_flag);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->data_length) {
+ /*
+ * Special case for COMPARE_AND_WRITE with fabrics
+ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
+ */
+ u32 caw_length = cmd->t_task_nolb *
+ cmd->se_dev->dev_attrib.block_size;
+
+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
+ &cmd->t_bidi_data_nents,
+ caw_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* If this command is not a write we can execute it right here,
@@ -2376,10 +2487,8 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref) {
+ if (ack_kref)
kref_get(&se_cmd->cmd_kref);
- se_cmd->se_cmd_flags |= SCF_ACK_KREF;
- }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2398,6 +2507,7 @@ out:
EXPORT_SYMBOL(target_get_sess_cmd);
static void target_release_cmd_kref(struct kref *kref)
+ __releases(&se_cmd->se_sess->sess_cmd_lock)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 1a1bcf71ec9d..dbc872a6c981 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -344,8 +344,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry = (void *) mb + CMDR_OFF + cmd_head;
tcmu_flush_dcache_range(entry, sizeof(*entry));
- tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD);
- tcmu_hdr_set_len(&entry->hdr, pad_size);
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
+ tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
+ entry->hdr.cmd_id = 0; /* not used for PAD */
+ entry->hdr.kflags = 0;
+ entry->hdr.uflags = 0;
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
@@ -355,9 +358,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry = (void *) mb + CMDR_OFF + cmd_head;
tcmu_flush_dcache_range(entry, sizeof(*entry));
- tcmu_hdr_set_op(&entry->hdr, TCMU_OP_CMD);
- tcmu_hdr_set_len(&entry->hdr, command_size);
- entry->cmd_id = tcmu_cmd->cmd_id;
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
+ tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
+ entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+ entry->hdr.kflags = 0;
+ entry->hdr.uflags = 0;
/*
* Fix up iovecs, and handle if allocation in data ring wrapped.
@@ -376,7 +381,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
/* Even iov_base is relative to mb_addr */
iov->iov_len = copy_bytes;
- iov->iov_base = (void *) udev->data_off + udev->data_head;
+ iov->iov_base = (void __user *) udev->data_off +
+ udev->data_head;
iov_cnt++;
iov++;
@@ -388,7 +394,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
copy_bytes = sg->length - copy_bytes;
iov->iov_len = copy_bytes;
- iov->iov_base = (void *) udev->data_off + udev->data_head;
+ iov->iov_base = (void __user *) udev->data_off +
+ udev->data_head;
if (se_cmd->data_direction == DMA_TO_DEVICE) {
to = (void *) mb + udev->data_off + udev->data_head;
@@ -405,6 +412,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
kunmap_atomic(from);
}
entry->req.iov_cnt = iov_cnt;
+ entry->req.iov_bidi_cnt = 0;
+ entry->req.iov_dif_cnt = 0;
/* All offsets relative to mb_addr, not start of entry! */
cdb_off = CMDR_OFF + cmd_head + base_command_size;
@@ -462,6 +471,17 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
return;
}
+ if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
+ UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+ pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
+ cmd->se_cmd);
+ transport_generic_request_failure(cmd->se_cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+ cmd->se_cmd = NULL;
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return;
+ }
+
if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
se_cmd->scsi_sense_length);
@@ -540,14 +560,16 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
tcmu_flush_dcache_range(entry, sizeof(*entry));
- if (tcmu_hdr_get_op(&entry->hdr) == TCMU_OP_PAD) {
- UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
+ if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
+ UPDATE_HEAD(udev->cmdr_last_cleaned,
+ tcmu_hdr_get_len(entry->hdr.len_op),
+ udev->cmdr_size);
continue;
}
- WARN_ON(tcmu_hdr_get_op(&entry->hdr) != TCMU_OP_CMD);
+ WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
spin_lock(&udev->commands_lock);
- cmd = idr_find(&udev->commands, entry->cmd_id);
+ cmd = idr_find(&udev->commands, entry->hdr.cmd_id);
if (cmd)
idr_remove(&udev->commands, cmd->cmd_id);
spin_unlock(&udev->commands_lock);
@@ -560,7 +582,9 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
tcmu_handle_completion(cmd, entry);
- UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
+ UPDATE_HEAD(udev->cmdr_last_cleaned,
+ tcmu_hdr_get_len(entry->hdr.len_op),
+ udev->cmdr_size);
handled++;
}
@@ -838,14 +862,14 @@ static int tcmu_configure_device(struct se_device *dev)
udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
mb = udev->mb_addr;
- mb->version = 1;
+ mb->version = TCMU_MAILBOX_VERSION;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
WARN_ON(!PAGE_ALIGNED(udev->data_off));
WARN_ON(udev->data_size % PAGE_SIZE);
- info->version = "1";
+ info->version = xstr(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t) udev->mb_addr;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 33ac39bf75e5..a600ff15dcfd 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -34,20 +34,12 @@
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
#include "target_core_xcopy.h"
static struct workqueue_struct *xcopy_wq = NULL;
-/*
- * From target_core_device.c
- */
-extern struct mutex g_device_mutex;
-extern struct list_head g_device_list;
-/*
- * From target_core_configfs.c
- */
-extern struct configfs_subsystem *target_core_subsystem[];
static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
{
@@ -433,7 +425,7 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
return 0;
}
-static struct target_core_fabric_ops xcopy_pt_tfo = {
+static const struct target_core_fabric_ops xcopy_pt_tfo = {
.get_fabric_name = xcopy_pt_get_fabric_name,
.get_task_tag = xcopy_pt_get_tag,
.get_cmd_state = xcopy_pt_get_cmd_state,
@@ -548,33 +540,22 @@ static void target_xcopy_setup_pt_port(
}
}
-static int target_xcopy_init_pt_lun(
- struct xcopy_pt_cmd *xpt_cmd,
- struct xcopy_op *xop,
- struct se_device *se_dev,
- struct se_cmd *pt_cmd,
- bool remote_port)
+static void target_xcopy_init_pt_lun(struct se_device *se_dev,
+ struct se_cmd *pt_cmd, bool remote_port)
{
/*
* Don't allocate + init an pt_cmd->se_lun if honoring local port for
* reservations. The pt_cmd->se_lun pointer will be setup from within
* target_xcopy_setup_pt_port()
*/
- if (!remote_port) {
- pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
- return 0;
+ if (remote_port) {
+ pr_debug("Setup emulated se_dev: %p from se_dev\n",
+ pt_cmd->se_dev);
+ pt_cmd->se_lun = &se_dev->xcopy_lun;
+ pt_cmd->se_dev = se_dev;
}
- pt_cmd->se_lun = &se_dev->xcopy_lun;
- pt_cmd->se_dev = se_dev;
-
- pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
- pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
-
- pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
- pt_cmd->se_lun->lun_se_dev);
-
- return 0;
+ pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
static int target_xcopy_setup_pt_cmd(
@@ -592,11 +573,8 @@ static int target_xcopy_setup_pt_cmd(
* Setup LUN+port to honor reservations based upon xop->op_origin for
* X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
*/
- rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port);
- if (rc < 0) {
- ret = rc;
- goto out;
- }
+ target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
+
xpt_cmd->xcopy_op = xop;
target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index a0bcfd3e7e7d..881deb3d499a 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -129,7 +129,6 @@ struct ft_cmd {
extern struct mutex ft_lport_lock;
extern struct fc4_prov ft_prov;
-extern struct target_fabric_configfs *ft_configfs;
extern unsigned int ft_debug_logging;
/*
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index efdcb9663a1a..65dce1345966 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -48,7 +48,7 @@
#include "tcm_fc.h"
-struct target_fabric_configfs *ft_configfs;
+static const struct target_core_fabric_ops ft_fabric_ops;
static LIST_HEAD(ft_wwn_list);
DEFINE_MUTEX(ft_lport_lock);
@@ -337,7 +337,7 @@ static struct se_portal_group *ft_add_tpg(
return NULL;
}
- ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
+ ret = core_tpg_register(&ft_fabric_ops, wwn, &tpg->se_tpg,
tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
destroy_workqueue(wq);
@@ -507,7 +507,9 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
return tpg->index;
}
-static struct target_core_fabric_ops ft_fabric_ops = {
+static const struct target_core_fabric_ops ft_fabric_ops = {
+ .module = THIS_MODULE,
+ .name = "fc",
.get_fabric_name = ft_get_fabric_name,
.get_fabric_proto_ident = fc_get_fabric_proto_ident,
.tpg_get_wwn = ft_get_fabric_wwn,
@@ -552,62 +554,10 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = &ft_add_acl,
.fabric_drop_nodeacl = &ft_del_acl,
-};
-
-static int ft_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
- if (IS_ERR(fabric)) {
- pr_err("%s: target_fabric_configfs_init() failed!\n",
- __func__);
- return PTR_ERR(fabric);
- }
- fabric->tf_ops = ft_fabric_ops;
-
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
- ft_nacl_base_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_debug("target_fabric_configfs_register() for"
- " FC Target failed!\n");
- target_fabric_configfs_free(fabric);
- return -1;
- }
-
- /*
- * Setup our local pointer to *fabric.
- */
- ft_configfs = fabric;
- return 0;
-}
-static void ft_deregister_configfs(void)
-{
- if (!ft_configfs)
- return;
- target_fabric_configfs_deregister(ft_configfs);
- ft_configfs = NULL;
-}
+ .tfc_wwn_attrs = ft_wwn_attrs,
+ .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs,
+};
static struct notifier_block ft_notifier = {
.notifier_call = ft_lport_notify
@@ -615,15 +565,24 @@ static struct notifier_block ft_notifier = {
static int __init ft_init(void)
{
- if (ft_register_configfs())
- return -1;
- if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
- ft_deregister_configfs();
- return -1;
- }
+ int ret;
+
+ ret = target_register_template(&ft_fabric_ops);
+ if (ret)
+ goto out;
+
+ ret = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov);
+ if (ret)
+ goto out_unregister_template;
+
blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
fc_lport_iterate(ft_lport_add, NULL);
return 0;
+
+out_unregister_template:
+ target_unregister_template(&ft_fabric_ops);
+out:
+ return ret;
}
static void __exit ft_exit(void)
@@ -632,7 +591,7 @@ static void __exit ft_exit(void)
&ft_notifier);
fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
fc_lport_iterate(ft_lport_del, NULL);
- ft_deregister_configfs();
+ target_unregister_template(&ft_fabric_ops);
synchronize_rcu();
}
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 12623bc02f46..725718e97a0b 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -206,51 +206,57 @@ static void find_target_mwait(void)
}
+struct pkg_cstate_info {
+ bool skip;
+ int msr_index;
+ int cstate_id;
+};
+
+#define PKG_CSTATE_INIT(id) { \
+ .msr_index = MSR_PKG_C##id##_RESIDENCY, \
+ .cstate_id = id \
+ }
+
+static struct pkg_cstate_info pkg_cstates[] = {
+ PKG_CSTATE_INIT(2),
+ PKG_CSTATE_INIT(3),
+ PKG_CSTATE_INIT(6),
+ PKG_CSTATE_INIT(7),
+ PKG_CSTATE_INIT(8),
+ PKG_CSTATE_INIT(9),
+ PKG_CSTATE_INIT(10),
+ {NULL},
+};
+
static bool has_pkg_state_counter(void)
{
- u64 tmp;
- return !rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &tmp) ||
- !rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &tmp) ||
- !rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &tmp) ||
- !rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &tmp);
+ u64 val;
+ struct pkg_cstate_info *info = pkg_cstates;
+
+ /* check if any one of the counter msrs exists */
+ while (info->msr_index) {
+ if (!rdmsrl_safe(info->msr_index, &val))
+ return true;
+ info++;
+ }
+
+ return false;
}
static u64 pkg_state_counter(void)
{
u64 val;
u64 count = 0;
-
- static bool skip_c2;
- static bool skip_c3;
- static bool skip_c6;
- static bool skip_c7;
-
- if (!skip_c2) {
- if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val))
- count += val;
- else
- skip_c2 = true;
- }
-
- if (!skip_c3) {
- if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val))
- count += val;
- else
- skip_c3 = true;
- }
-
- if (!skip_c6) {
- if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val))
- count += val;
- else
- skip_c6 = true;
- }
-
- if (!skip_c7) {
- if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val))
- count += val;
- else
- skip_c7 = true;
+ struct pkg_cstate_info *info = pkg_cstates;
+
+ while (info->msr_index) {
+ if (!info->skip) {
+ if (!rdmsrl_safe(info->msr_index, &val))
+ count += val;
+ else
+ info->skip = true;
+ }
+ info++;
}
return count;
@@ -667,7 +673,7 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
};
/* runs on Nehalem and later */
-static const struct x86_cpu_id intel_powerclamp_ids[] = {
+static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
{ X86_VENDOR_INTEL, 6, 0x1a},
{ X86_VENDOR_INTEL, 6, 0x1c},
{ X86_VENDOR_INTEL, 6, 0x1e},
@@ -689,12 +695,13 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x46},
{ X86_VENDOR_INTEL, 6, 0x4c},
{ X86_VENDOR_INTEL, 6, 0x4d},
+ { X86_VENDOR_INTEL, 6, 0x4f},
{ X86_VENDOR_INTEL, 6, 0x56},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
-static int powerclamp_probe(void)
+static int __init powerclamp_probe(void)
{
if (!x86_match_cpu(intel_powerclamp_ids)) {
pr_err("Intel powerclamp does not run on family %d model %d\n",
@@ -760,7 +767,7 @@ file_error:
debugfs_remove_recursive(debug_dir);
}
-static int powerclamp_init(void)
+static int __init powerclamp_init(void)
{
int retval;
int bitmap_size;
@@ -809,7 +816,7 @@ exit_free:
}
module_init(powerclamp_init);
-static void powerclamp_exit(void)
+static void __exit powerclamp_exit(void)
{
unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
end_power_clamp();
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 3aa46ac7cdbc..cd8f5f93b42c 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -529,7 +529,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
thermal->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
if (IS_ERR(thermal->pclk)) {
- error = PTR_ERR(thermal->clk);
+ error = PTR_ERR(thermal->pclk);
dev_err(&pdev->dev, "failed to get apb_pclk clock: %d\n",
error);
return error;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 0531c752fbbb..8e391812e503 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -103,7 +103,7 @@ static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
int trip)
{
- return 0;
+ return false;
}
static inline const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index b24aa010f68c..c01f45095877 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -419,4 +419,51 @@ config DA_CONSOLE
help
This enables a console on a Dash channel.
+config MIPS_EJTAG_FDC_TTY
+ bool "MIPS EJTAG Fast Debug Channel TTY"
+ depends on MIPS_CDMM
+ help
+ This enables a TTY and console on the MIPS EJTAG Fast Debug Channels,
+ if they are present. This can be useful when working with an EJTAG
+ probe which supports it, to get console output and a login prompt via
+ EJTAG without needing to connect a serial cable.
+
+ TTY devices are named e.g. ttyFDC3c2 (for FDC channel 2 of the FDC on
+ CPU3).
+
+ The console can be enabled with console=fdc1 (for FDC channel 1 on all
+ CPUs). Do not use the console unless there is a debug probe attached
+ to drain the FDC TX FIFO.
+
+ If unsure, say N.
+
+config MIPS_EJTAG_FDC_EARLYCON
+ bool "Early FDC console"
+ depends on MIPS_EJTAG_FDC_TTY
+ help
+ This registers a console on FDC channel 1 very early during boot (from
+ MIPS arch code). This is useful for bring-up and debugging early boot
+ issues.
+
+ Do not enable unless there is a debug probe attached to drain the FDC
+ TX FIFO.
+
+ If unsure, say N.
+
+config MIPS_EJTAG_FDC_KGDB
+ bool "Use KGDB over an FDC channel"
+ depends on MIPS_EJTAG_FDC_TTY && KGDB
+ default y
+ help
+ This enables the use of KGDB over an FDC channel, allowing KGDB to be
+ used remotely or when a serial port isn't available.
+
+config MIPS_EJTAG_FDC_KGDB_CHAN
+ int "KGDB FDC channel"
+ depends on MIPS_EJTAG_FDC_KGDB
+ range 2 15
+ default 3
+ help
+ FDC channel number to use for KGDB.
+
endif # TTY
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 58ad1c05b7f8..5817e2397463 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -29,5 +29,6 @@ obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o
obj-$(CONFIG_DA_TTY) += metag_da.o
+obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o
obj-y += ipwireless/
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 967b2c2b7cf1..0655fecf8240 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -229,7 +229,6 @@ static int goldfish_tty_probe(struct platform_device *pdev)
{
struct goldfish_tty *qtty;
int ret = -EINVAL;
- int i;
struct resource *r;
struct device *ttydev;
void __iomem *base;
@@ -293,7 +292,6 @@ static int goldfish_tty_probe(struct platform_device *pdev)
mutex_unlock(&goldfish_tty_lock);
return 0;
- tty_unregister_device(goldfish_tty_driver, i);
err_tty_register_device_failed:
free_irq(irq, pdev);
err_request_irq_failed:
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 071551bf3e9a..543b234e70fc 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -41,7 +41,7 @@
static const char hvc_opal_name[] = "hvc_opal";
-static struct of_device_id hvc_opal_match[] = {
+static const struct of_device_id hvc_opal_match[] = {
{ .name = "serial", .compatible = "ibm,opal-console-raw" },
{ .name = "serial", .compatible = "ibm,opal-console-hvsi" },
{ },
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index f1e57425e39f..5bab1c684bb1 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
return 0;
}
+static void xen_console_update_evtchn(struct xencons_info *info)
+{
+ if (xen_hvm_domain()) {
+ uint64_t v;
+ int err;
+
+ err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
+ if (!err && v)
+ info->evtchn = v;
+ } else
+ info->evtchn = xen_start_info->console.domU.evtchn;
+}
+
void xen_console_resume(void)
{
struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
- if (info != NULL && info->irq)
+ if (info != NULL && info->irq) {
+ if (!xen_initial_domain())
+ xen_console_update_evtchn(info);
rebind_evtchn_irq(info->evtchn, info->irq);
+ }
}
static void xencons_disconnect_backend(struct xencons_info *info)
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index 5c77e1eac4ee..ad7031a4f3c4 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -1455,7 +1455,7 @@ static void __handle_setup_get_version_rsp(struct ipw_hardware *hw)
return;
}
- set_RTS(hw, PRIO_SETUP, channel_idx,
+ ret = set_RTS(hw, PRIO_SETUP, channel_idx,
(hw->control_lines [channel_idx] &
IPW_CONTROL_LINE_RTS) != 0);
if (ret) {
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
new file mode 100644
index 000000000000..04d9e23d1ee1
--- /dev/null
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -0,0 +1,1303 @@
+/*
+ * TTY driver for MIPS EJTAG Fast Debug Channels.
+ *
+ * Copyright (C) 2007-2015 Imagination Technologies Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for more
+ * details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kgdb.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/uaccess.h>
+
+#include <asm/cdmm.h>
+#include <asm/irq.h>
+
+/* Register offsets */
+#define REG_FDACSR 0x00 /* FDC Access Control and Status Register */
+#define REG_FDCFG 0x08 /* FDC Configuration Register */
+#define REG_FDSTAT 0x10 /* FDC Status Register */
+#define REG_FDRX 0x18 /* FDC Receive Register */
+#define REG_FDTX(N) (0x20+0x8*(N)) /* FDC Transmit Register n (0..15) */
+
+/* Register fields */
+
+#define REG_FDCFG_TXINTTHRES_SHIFT 18
+#define REG_FDCFG_TXINTTHRES (0x3 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_TXINTTHRES_DISABLED (0x0 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_TXINTTHRES_EMPTY (0x1 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_TXINTTHRES_NOTFULL (0x2 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_TXINTTHRES_NEAREMPTY (0x3 << REG_FDCFG_TXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_SHIFT 16
+#define REG_FDCFG_RXINTTHRES (0x3 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_DISABLED (0x0 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_FULL (0x1 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_NOTEMPTY (0x2 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_RXINTTHRES_NEARFULL (0x3 << REG_FDCFG_RXINTTHRES_SHIFT)
+#define REG_FDCFG_TXFIFOSIZE_SHIFT 8
+#define REG_FDCFG_TXFIFOSIZE (0xff << REG_FDCFG_TXFIFOSIZE_SHIFT)
+#define REG_FDCFG_RXFIFOSIZE_SHIFT 0
+#define REG_FDCFG_RXFIFOSIZE (0xff << REG_FDCFG_RXFIFOSIZE_SHIFT)
+
+#define REG_FDSTAT_TXCOUNT_SHIFT 24
+#define REG_FDSTAT_TXCOUNT (0xff << REG_FDSTAT_TXCOUNT_SHIFT)
+#define REG_FDSTAT_RXCOUNT_SHIFT 16
+#define REG_FDSTAT_RXCOUNT (0xff << REG_FDSTAT_RXCOUNT_SHIFT)
+#define REG_FDSTAT_RXCHAN_SHIFT 4
+#define REG_FDSTAT_RXCHAN (0xf << REG_FDSTAT_RXCHAN_SHIFT)
+#define REG_FDSTAT_RXE BIT(3) /* Rx Empty */
+#define REG_FDSTAT_RXF BIT(2) /* Rx Full */
+#define REG_FDSTAT_TXE BIT(1) /* Tx Empty */
+#define REG_FDSTAT_TXF BIT(0) /* Tx Full */
+
+/* Default channel for the early console */
+#define CONSOLE_CHANNEL 1
+
+#define NUM_TTY_CHANNELS 16
+
+#define RX_BUF_SIZE 1024
+
+/*
+ * When the IRQ is unavailable, the FDC state must be polled for incoming data
+ * and space becoming available in TX FIFO.
+ */
+#define FDC_TTY_POLL (HZ / 50)
+
+struct mips_ejtag_fdc_tty;
+
+/**
+ * struct mips_ejtag_fdc_tty_port - Wrapper struct for FDC tty_port.
+ * @port: TTY port data
+ * @driver: TTY driver.
+ * @rx_lock: Lock for rx_buf.
+ * This protects between the hard interrupt and user
+ * context. It's also held during read SWITCH operations.
+ * @rx_buf: Read buffer.
+ * @xmit_lock: Lock for xmit_*, and port.xmit_buf.
+ * This protects between user context and kernel thread.
+ * It is used from chars_in_buffer()/write_room() TTY
+ * callbacks which are used during wait operations, so a
+ * mutex is unsuitable.
+ * @xmit_cnt: Size of xmit buffer contents.
+ * @xmit_head: Head of xmit buffer where data is written.
+ * @xmit_tail: Tail of xmit buffer where data is read.
+ * @xmit_empty: Completion for xmit buffer being empty.
+ */
+struct mips_ejtag_fdc_tty_port {
+ struct tty_port port;
+ struct mips_ejtag_fdc_tty *driver;
+ raw_spinlock_t rx_lock;
+ void *rx_buf;
+ spinlock_t xmit_lock;
+ unsigned int xmit_cnt;
+ unsigned int xmit_head;
+ unsigned int xmit_tail;
+ struct completion xmit_empty;
+};
+
+/**
+ * struct mips_ejtag_fdc_tty - Driver data for FDC as a whole.
+ * @dev: FDC device (for dev_*() logging).
+ * @driver: TTY driver.
+ * @cpu: CPU number for this FDC.
+ * @fdc_name: FDC name (not for base of channel names).
+ * @driver_name: Base of driver name.
+ * @ports: Per-channel data.
+ * @waitqueue: Wait queue for waiting for TX data, or for space in TX
+ * FIFO.
+ * @lock: Lock to protect FDCFG (interrupt enable).
+ * @thread: KThread for writing out data to FDC.
+ * @reg: FDC registers.
+ * @tx_fifo: TX FIFO size.
+ * @xmit_size: Size of each port's xmit buffer.
+ * @xmit_total: Total number of bytes (from all ports) to transmit.
+ * @xmit_next: Next port number to transmit from (round robin).
+ * @xmit_full: Indicates TX FIFO is full, we're waiting for space.
+ * @irq: IRQ number (negative if no IRQ).
+ * @removing: Indicates the device is being removed and @poll_timer
+ * should not be restarted.
+ * @poll_timer: Timer for polling for interrupt events when @irq < 0.
+ * @sysrq_pressed: Whether the magic sysrq key combination has been
+ * detected. See mips_ejtag_fdc_handle().
+ */
+struct mips_ejtag_fdc_tty {
+ struct device *dev;
+ struct tty_driver *driver;
+ unsigned int cpu;
+ char fdc_name[16];
+ char driver_name[16];
+ struct mips_ejtag_fdc_tty_port ports[NUM_TTY_CHANNELS];
+ wait_queue_head_t waitqueue;
+ raw_spinlock_t lock;
+ struct task_struct *thread;
+
+ void __iomem *reg;
+ u8 tx_fifo;
+
+ unsigned int xmit_size;
+ atomic_t xmit_total;
+ unsigned int xmit_next;
+ bool xmit_full;
+
+ int irq;
+ bool removing;
+ struct timer_list poll_timer;
+
+#ifdef CONFIG_MAGIC_SYSRQ
+ bool sysrq_pressed;
+#endif
+};
+
+/* Hardware access */
+
+static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv,
+ unsigned int offs, unsigned int data)
+{
+ iowrite32(data, priv->reg + offs);
+}
+
+static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv,
+ unsigned int offs)
+{
+ return ioread32(priv->reg + offs);
+}
+
+/* Encoding of byte stream in FDC words */
+
+/**
+ * struct fdc_word - FDC word encoding some number of bytes of data.
+ * @word: Raw FDC word.
+ * @bytes: Number of bytes encoded by @word.
+ */
+struct fdc_word {
+ u32 word;
+ unsigned int bytes;
+};
+
+/*
+ * This is a compact encoding which allows every 1 byte, 2 byte, and 3 byte
+ * sequence to be encoded in a single word, while allowing the majority of 4
+ * byte sequences (including all ASCII and common binary data) to be encoded in
+ * a single word too.
+ * _______________________ _____________
+ * | FDC Word | |
+ * |31-24|23-16|15-8 | 7-0 | Bytes |
+ * |_____|_____|_____|_____|_____________|
+ * | | | | | |
+ * |0x80 |0x80 |0x80 | WW | WW |
+ * |0x81 |0x81 | XX | WW | WW XX |
+ * |0x82 | YY | XX | WW | WW XX YY |
+ * | ZZ | YY | XX | WW | WW XX YY ZZ |
+ * |_____|_____|_____|_____|_____________|
+ *
+ * Note that the 4-byte encoding can only be used where none of the other 3
+ * encodings match, otherwise it must fall back to the 3 byte encoding.
+ */
+
+/* ranges >= 1 && sizes[0] >= 1 */
+static struct fdc_word mips_ejtag_fdc_encode(const char **ptrs,
+ unsigned int *sizes,
+ unsigned int ranges)
+{
+ struct fdc_word word = { 0, 0 };
+ const char **ptrs_end = ptrs + ranges;
+
+ for (; ptrs < ptrs_end; ++ptrs) {
+ const char *ptr = *(ptrs++);
+ const char *end = ptr + *(sizes++);
+
+ for (; ptr < end; ++ptr) {
+ word.word |= (u8)*ptr << (8*word.bytes);
+ ++word.bytes;
+ if (word.bytes == 4)
+ goto done;
+ }
+ }
+done:
+ /* Choose the appropriate encoding */
+ switch (word.bytes) {
+ case 4:
+ /* 4 byte encoding, but don't match the 1-3 byte encodings */
+ if ((word.word >> 8) != 0x808080 &&
+ (word.word >> 16) != 0x8181 &&
+ (word.word >> 24) != 0x82)
+ break;
+ /* Fall back to a 3 byte encoding */
+ word.bytes = 3;
+ word.word &= 0x00ffffff;
+ case 3:
+ /* 3 byte encoding */
+ word.word |= 0x82000000;
+ break;
+ case 2:
+ /* 2 byte encoding */
+ word.word |= 0x81810000;
+ break;
+ case 1:
+ /* 1 byte encoding */
+ word.word |= 0x80808000;
+ break;
+ }
+ return word;
+}
+
+static unsigned int mips_ejtag_fdc_decode(u32 word, char *buf)
+{
+ buf[0] = (u8)word;
+ word >>= 8;
+ if (word == 0x808080)
+ return 1;
+ buf[1] = (u8)word;
+ word >>= 8;
+ if (word == 0x8181)
+ return 2;
+ buf[2] = (u8)word;
+ word >>= 8;
+ if (word == 0x82)
+ return 3;
+ buf[3] = (u8)word;
+ return 4;
+}
+
+/* Console operations */
+
+/**
+ * struct mips_ejtag_fdc_console - Wrapper struct for FDC consoles.
+ * @cons: Console object.
+ * @tty_drv: TTY driver associated with this console.
+ * @lock: Lock to protect concurrent access to other fields.
+ * This is raw because it may be used very early.
+ * @initialised: Whether the console is initialised.
+ * @regs: Registers base address for each CPU.
+ */
+struct mips_ejtag_fdc_console {
+ struct console cons;
+ struct tty_driver *tty_drv;
+ raw_spinlock_t lock;
+ bool initialised;
+ void __iomem *regs[NR_CPUS];
+};
+
+/* Low level console write shared by early console and normal console */
+static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
+ unsigned int count)
+{
+ struct mips_ejtag_fdc_console *cons =
+ container_of(c, struct mips_ejtag_fdc_console, cons);
+ void __iomem *regs;
+ struct fdc_word word;
+ unsigned long flags;
+ unsigned int i, buf_len, cpu;
+ bool done_cr = false;
+ char buf[4];
+ const char *buf_ptr = buf;
+ /* Number of bytes of input data encoded up to each byte in buf */
+ u8 inc[4];
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ regs = cons->regs[cpu];
+ /* First console output on this CPU? */
+ if (!regs) {
+ regs = mips_cdmm_early_probe(0xfd);
+ cons->regs[cpu] = regs;
+ }
+ /* Already tried and failed to find FDC on this CPU? */
+ if (IS_ERR(regs))
+ goto out;
+ while (count) {
+ /*
+ * Copy the next few characters to a buffer so we can inject
+ * carriage returns before newlines.
+ */
+ for (buf_len = 0, i = 0; buf_len < 4 && i < count; ++buf_len) {
+ if (s[i] == '\n' && !done_cr) {
+ buf[buf_len] = '\r';
+ done_cr = true;
+ } else {
+ buf[buf_len] = s[i];
+ done_cr = false;
+ ++i;
+ }
+ inc[buf_len] = i;
+ }
+ word = mips_ejtag_fdc_encode(&buf_ptr, &buf_len, 1);
+ count -= inc[word.bytes - 1];
+ s += inc[word.bytes - 1];
+
+ /* Busy wait until there's space in fifo */
+ while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+ ;
+ iowrite32(word.word, regs + REG_FDTX(c->index));
+ }
+out:
+ local_irq_restore(flags);
+}
+
+static struct tty_driver *mips_ejtag_fdc_console_device(struct console *c,
+ int *index)
+{
+ struct mips_ejtag_fdc_console *cons =
+ container_of(c, struct mips_ejtag_fdc_console, cons);
+
+ *index = c->index;
+ return cons->tty_drv;
+}
+
+/* Initialise an FDC console (early or normal */
+static int __init mips_ejtag_fdc_console_init(struct mips_ejtag_fdc_console *c)
+{
+ void __iomem *regs;
+ unsigned long flags;
+ int ret = 0;
+
+ raw_spin_lock_irqsave(&c->lock, flags);
+ /* Don't init twice */
+ if (c->initialised)
+ goto out;
+ /* Look for the FDC device */
+ regs = mips_cdmm_early_probe(0xfd);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto out;
+ }
+
+ c->initialised = true;
+ c->regs[smp_processor_id()] = regs;
+ register_console(&c->cons);
+out:
+ raw_spin_unlock_irqrestore(&c->lock, flags);
+ return ret;
+}
+
+static struct mips_ejtag_fdc_console mips_ejtag_fdc_con = {
+ .cons = {
+ .name = "fdc",
+ .write = mips_ejtag_fdc_console_write,
+ .device = mips_ejtag_fdc_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ },
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(mips_ejtag_fdc_con.lock),
+};
+
+/* TTY RX/TX operations */
+
+/**
+ * mips_ejtag_fdc_put_chan() - Write out a block of channel data.
+ * @priv: Pointer to driver private data.
+ * @chan: Channel number.
+ *
+ * Write a single block of data out to the debug adapter. If the circular buffer
+ * is wrapped then only the first block is written.
+ *
+ * Returns: The number of bytes that were written.
+ */
+static unsigned int mips_ejtag_fdc_put_chan(struct mips_ejtag_fdc_tty *priv,
+ unsigned int chan)
+{
+ struct mips_ejtag_fdc_tty_port *dport;
+ struct tty_struct *tty;
+ const char *ptrs[2];
+ unsigned int sizes[2] = { 0 };
+ struct fdc_word word = { .bytes = 0 };
+ unsigned long flags;
+
+ dport = &priv->ports[chan];
+ spin_lock(&dport->xmit_lock);
+ if (dport->xmit_cnt) {
+ ptrs[0] = dport->port.xmit_buf + dport->xmit_tail;
+ sizes[0] = min_t(unsigned int,
+ priv->xmit_size - dport->xmit_tail,
+ dport->xmit_cnt);
+ ptrs[1] = dport->port.xmit_buf;
+ sizes[1] = dport->xmit_cnt - sizes[0];
+ word = mips_ejtag_fdc_encode(ptrs, sizes, 1 + !!sizes[1]);
+
+ dev_dbg(priv->dev, "%s%u: out %08x: \"%*pE%*pE\"\n",
+ priv->driver_name, chan, word.word,
+ min_t(int, word.bytes, sizes[0]), ptrs[0],
+ max_t(int, 0, word.bytes - sizes[0]), ptrs[1]);
+
+ local_irq_save(flags);
+ /* Maybe we raced with the console and TX FIFO is full */
+ if (mips_ejtag_fdc_read(priv, REG_FDSTAT) & REG_FDSTAT_TXF)
+ word.bytes = 0;
+ else
+ mips_ejtag_fdc_write(priv, REG_FDTX(chan), word.word);
+ local_irq_restore(flags);
+
+ dport->xmit_cnt -= word.bytes;
+ if (!dport->xmit_cnt) {
+ /* Reset pointers to avoid wraps */
+ dport->xmit_head = 0;
+ dport->xmit_tail = 0;
+ complete(&dport->xmit_empty);
+ } else {
+ dport->xmit_tail += word.bytes;
+ if (dport->xmit_tail >= priv->xmit_size)
+ dport->xmit_tail -= priv->xmit_size;
+ }
+ atomic_sub(word.bytes, &priv->xmit_total);
+ }
+ spin_unlock(&dport->xmit_lock);
+
+ /* If we've made more data available, wake up tty */
+ if (sizes[0] && word.bytes) {
+ tty = tty_port_tty_get(&dport->port);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ }
+
+ return word.bytes;
+}
+
+/**
+ * mips_ejtag_fdc_put() - Kernel thread to write out channel data to FDC.
+ * @arg: Driver pointer.
+ *
+ * This kernel thread runs while @priv->xmit_total != 0, and round robins the
+ * channels writing out blocks of buffered data to the FDC TX FIFO.
+ */
+static int mips_ejtag_fdc_put(void *arg)
+{
+ struct mips_ejtag_fdc_tty *priv = arg;
+ struct mips_ejtag_fdc_tty_port *dport;
+ unsigned int ret;
+ u32 cfg;
+
+ __set_current_state(TASK_RUNNING);
+ while (!kthread_should_stop()) {
+ /* Wait for data to actually write */
+ wait_event_interruptible(priv->waitqueue,
+ atomic_read(&priv->xmit_total) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ /* Wait for TX FIFO space to write data */
+ raw_spin_lock_irq(&priv->lock);
+ if (mips_ejtag_fdc_read(priv, REG_FDSTAT) & REG_FDSTAT_TXF) {
+ priv->xmit_full = true;
+ if (priv->irq >= 0) {
+ /* Enable TX interrupt */
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ cfg &= ~REG_FDCFG_TXINTTHRES;
+ cfg |= REG_FDCFG_TXINTTHRES_NOTFULL;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ }
+ }
+ raw_spin_unlock_irq(&priv->lock);
+ wait_event_interruptible(priv->waitqueue,
+ !(mips_ejtag_fdc_read(priv, REG_FDSTAT)
+ & REG_FDSTAT_TXF) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ /* Find next channel with data to output */
+ for (;;) {
+ dport = &priv->ports[priv->xmit_next];
+ spin_lock(&dport->xmit_lock);
+ ret = dport->xmit_cnt;
+ spin_unlock(&dport->xmit_lock);
+ if (ret)
+ break;
+ /* Round robin */
+ ++priv->xmit_next;
+ if (priv->xmit_next >= NUM_TTY_CHANNELS)
+ priv->xmit_next = 0;
+ }
+
+ /* Try writing data to the chosen channel */
+ ret = mips_ejtag_fdc_put_chan(priv, priv->xmit_next);
+
+ /*
+ * If anything was output, move on to the next channel so as not
+ * to starve other channels.
+ */
+ if (ret) {
+ ++priv->xmit_next;
+ if (priv->xmit_next >= NUM_TTY_CHANNELS)
+ priv->xmit_next = 0;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mips_ejtag_fdc_handle() - Handle FDC events.
+ * @priv: Pointer to driver private data.
+ *
+ * Handle FDC events, such as new incoming data which needs draining out of the
+ * RX FIFO and feeding into the appropriate TTY ports, and space becoming
+ * available in the TX FIFO which would allow more data to be written out.
+ */
+static void mips_ejtag_fdc_handle(struct mips_ejtag_fdc_tty *priv)
+{
+ struct mips_ejtag_fdc_tty_port *dport;
+ unsigned int stat, channel, data, cfg, i, flipped;
+ int len;
+ char buf[4];
+
+ for (;;) {
+ /* Find which channel the next FDC word is destined for */
+ stat = mips_ejtag_fdc_read(priv, REG_FDSTAT);
+ if (stat & REG_FDSTAT_RXE)
+ break;
+ channel = (stat & REG_FDSTAT_RXCHAN) >> REG_FDSTAT_RXCHAN_SHIFT;
+ dport = &priv->ports[channel];
+
+ /* Read out the FDC word, decode it, and pass to tty layer */
+ raw_spin_lock(&dport->rx_lock);
+ data = mips_ejtag_fdc_read(priv, REG_FDRX);
+
+ len = mips_ejtag_fdc_decode(data, buf);
+ dev_dbg(priv->dev, "%s%u: in %08x: \"%*pE\"\n",
+ priv->driver_name, channel, data, len, buf);
+
+ flipped = 0;
+ for (i = 0; i < len; ++i) {
+#ifdef CONFIG_MAGIC_SYSRQ
+#ifdef CONFIG_MIPS_EJTAG_FDC_KGDB
+ /* Support just Ctrl+C with KGDB channel */
+ if (channel == CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN) {
+ if (buf[i] == '\x03') { /* ^C */
+ handle_sysrq('g');
+ continue;
+ }
+ }
+#endif
+ /* Support Ctrl+O for console channel */
+ if (channel == mips_ejtag_fdc_con.cons.index) {
+ if (buf[i] == '\x0f') { /* ^O */
+ priv->sysrq_pressed =
+ !priv->sysrq_pressed;
+ if (priv->sysrq_pressed)
+ continue;
+ } else if (priv->sysrq_pressed) {
+ handle_sysrq(buf[i]);
+ priv->sysrq_pressed = false;
+ continue;
+ }
+ }
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+ /* Check the port isn't being shut down */
+ if (!dport->rx_buf)
+ continue;
+
+ flipped += tty_insert_flip_char(&dport->port, buf[i],
+ TTY_NORMAL);
+ }
+ if (flipped)
+ tty_flip_buffer_push(&dport->port);
+
+ raw_spin_unlock(&dport->rx_lock);
+ }
+
+ /* If TX FIFO no longer full we may be able to write more data */
+ raw_spin_lock(&priv->lock);
+ if (priv->xmit_full && !(stat & REG_FDSTAT_TXF)) {
+ priv->xmit_full = false;
+
+ /* Disable TX interrupt */
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ cfg &= ~REG_FDCFG_TXINTTHRES;
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+
+ /* Wait the kthread so it can try writing more data */
+ wake_up_interruptible(&priv->waitqueue);
+ }
+ raw_spin_unlock(&priv->lock);
+}
+
+/**
+ * mips_ejtag_fdc_isr() - Interrupt handler.
+ * @irq: IRQ number.
+ * @dev_id: Pointer to driver private data.
+ *
+ * This is the interrupt handler, used when interrupts are enabled.
+ *
+ * It simply triggers the common FDC handler code.
+ *
+ * Returns: IRQ_HANDLED if an FDC interrupt was pending.
+ * IRQ_NONE otherwise.
+ */
+static irqreturn_t mips_ejtag_fdc_isr(int irq, void *dev_id)
+{
+ struct mips_ejtag_fdc_tty *priv = dev_id;
+
+ /*
+ * We're not using proper per-cpu IRQs, so we must be careful not to
+ * handle IRQs on CPUs we're not interested in.
+ *
+ * Ideally proper per-cpu IRQ handlers could be used, but that doesn't
+ * fit well with the whole sharing of the main CPU IRQ lines. When we
+ * have something with a GIC that routes the FDC IRQs (i.e. no sharing
+ * between handlers) then support could be added more easily.
+ */
+ if (smp_processor_id() != priv->cpu)
+ return IRQ_NONE;
+
+ /* If no FDC interrupt pending, it wasn't for us */
+ if (!(read_c0_cause() & CAUSEF_FDCI))
+ return IRQ_NONE;
+
+ mips_ejtag_fdc_handle(priv);
+ return IRQ_HANDLED;
+}
+
+/**
+ * mips_ejtag_fdc_tty_timer() - Poll FDC for incoming data.
+ * @opaque: Pointer to driver private data.
+ *
+ * This is the timer handler for when interrupts are disabled and polling the
+ * FDC state is required.
+ *
+ * It simply triggers the common FDC handler code and arranges for further
+ * polling.
+ */
+static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
+{
+ struct mips_ejtag_fdc_tty *priv = (void *)opaque;
+
+ mips_ejtag_fdc_handle(priv);
+ if (!priv->removing)
+ mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
+}
+
+/* TTY Port operations */
+
+static int mips_ejtag_fdc_tty_port_activate(struct tty_port *port,
+ struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty_port *dport =
+ container_of(port, struct mips_ejtag_fdc_tty_port, port);
+ void *rx_buf;
+
+ /* Allocate the buffer we use for writing data */
+ if (tty_port_alloc_xmit_buf(port) < 0)
+ goto err;
+
+ /* Allocate the buffer we use for reading data */
+ rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
+ if (!rx_buf)
+ goto err_free_xmit;
+
+ raw_spin_lock_irq(&dport->rx_lock);
+ dport->rx_buf = rx_buf;
+ raw_spin_unlock_irq(&dport->rx_lock);
+
+ return 0;
+err_free_xmit:
+ tty_port_free_xmit_buf(port);
+err:
+ return -ENOMEM;
+}
+
+static void mips_ejtag_fdc_tty_port_shutdown(struct tty_port *port)
+{
+ struct mips_ejtag_fdc_tty_port *dport =
+ container_of(port, struct mips_ejtag_fdc_tty_port, port);
+ struct mips_ejtag_fdc_tty *priv = dport->driver;
+ void *rx_buf;
+ unsigned int count;
+
+ spin_lock(&dport->xmit_lock);
+ count = dport->xmit_cnt;
+ spin_unlock(&dport->xmit_lock);
+ if (count) {
+ /*
+ * There's still data to write out, so wake and wait for the
+ * writer thread to drain the buffer.
+ */
+ wake_up_interruptible(&priv->waitqueue);
+ wait_for_completion(&dport->xmit_empty);
+ }
+
+ /* Null the read buffer (timer could still be running!) */
+ raw_spin_lock_irq(&dport->rx_lock);
+ rx_buf = dport->rx_buf;
+ dport->rx_buf = NULL;
+ raw_spin_unlock_irq(&dport->rx_lock);
+ /* Free the read buffer */
+ kfree(rx_buf);
+
+ /* Free the write buffer */
+ tty_port_free_xmit_buf(port);
+}
+
+static const struct tty_port_operations mips_ejtag_fdc_tty_port_ops = {
+ .activate = mips_ejtag_fdc_tty_port_activate,
+ .shutdown = mips_ejtag_fdc_tty_port_shutdown,
+};
+
+/* TTY operations */
+
+static int mips_ejtag_fdc_tty_install(struct tty_driver *driver,
+ struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty *priv = driver->driver_state;
+
+ tty->driver_data = &priv->ports[tty->index];
+ return tty_port_install(&priv->ports[tty->index].port, driver, tty);
+}
+
+static int mips_ejtag_fdc_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_open(tty->port, tty, filp);
+}
+
+static void mips_ejtag_fdc_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_close(tty->port, tty, filp);
+}
+
+static void mips_ejtag_fdc_tty_hangup(struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
+ struct mips_ejtag_fdc_tty *priv = dport->driver;
+
+ /* Drop any data in the xmit buffer */
+ spin_lock(&dport->xmit_lock);
+ if (dport->xmit_cnt) {
+ atomic_sub(dport->xmit_cnt, &priv->xmit_total);
+ dport->xmit_cnt = 0;
+ dport->xmit_head = 0;
+ dport->xmit_tail = 0;
+ complete(&dport->xmit_empty);
+ }
+ spin_unlock(&dport->xmit_lock);
+
+ tty_port_hangup(tty->port);
+}
+
+static int mips_ejtag_fdc_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int total)
+{
+ int count, block;
+ struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
+ struct mips_ejtag_fdc_tty *priv = dport->driver;
+
+ /*
+ * Write to output buffer.
+ *
+ * The reason that we asynchronously write the buffer is because if we
+ * were to write the buffer synchronously then because the channels are
+ * per-CPU the buffer would be written to the channel of whatever CPU
+ * we're running on.
+ *
+ * What we actually want to happen is have all input and output done on
+ * one CPU.
+ */
+ spin_lock(&dport->xmit_lock);
+ /* Work out how many bytes we can write to the xmit buffer */
+ total = min(total, (int)(priv->xmit_size - dport->xmit_cnt));
+ atomic_add(total, &priv->xmit_total);
+ dport->xmit_cnt += total;
+ /* Write the actual bytes (may need splitting if it wraps) */
+ for (count = total; count; count -= block) {
+ block = min(count, (int)(priv->xmit_size - dport->xmit_head));
+ memcpy(dport->port.xmit_buf + dport->xmit_head, buf, block);
+ dport->xmit_head += block;
+ if (dport->xmit_head >= priv->xmit_size)
+ dport->xmit_head -= priv->xmit_size;
+ buf += block;
+ }
+ count = dport->xmit_cnt;
+ /* Xmit buffer no longer empty? */
+ if (count)
+ reinit_completion(&dport->xmit_empty);
+ spin_unlock(&dport->xmit_lock);
+
+ /* Wake up the kthread */
+ if (total)
+ wake_up_interruptible(&priv->waitqueue);
+ return total;
+}
+
+static int mips_ejtag_fdc_tty_write_room(struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
+ struct mips_ejtag_fdc_tty *priv = dport->driver;
+ int room;
+
+ /* Report the space in the xmit buffer */
+ spin_lock(&dport->xmit_lock);
+ room = priv->xmit_size - dport->xmit_cnt;
+ spin_unlock(&dport->xmit_lock);
+
+ return room;
+}
+
+static int mips_ejtag_fdc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
+ int chars;
+
+ /* Report the number of bytes in the xmit buffer */
+ spin_lock(&dport->xmit_lock);
+ chars = dport->xmit_cnt;
+ spin_unlock(&dport->xmit_lock);
+
+ return chars;
+}
+
+static const struct tty_operations mips_ejtag_fdc_tty_ops = {
+ .install = mips_ejtag_fdc_tty_install,
+ .open = mips_ejtag_fdc_tty_open,
+ .close = mips_ejtag_fdc_tty_close,
+ .hangup = mips_ejtag_fdc_tty_hangup,
+ .write = mips_ejtag_fdc_tty_write,
+ .write_room = mips_ejtag_fdc_tty_write_room,
+ .chars_in_buffer = mips_ejtag_fdc_tty_chars_in_buffer,
+};
+
+static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
+{
+ int ret, nport;
+ struct mips_ejtag_fdc_tty_port *dport;
+ struct mips_ejtag_fdc_tty *priv;
+ struct tty_driver *driver;
+ unsigned int cfg, tx_fifo;
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->cpu = dev->cpu;
+ priv->dev = &dev->dev;
+ mips_cdmm_set_drvdata(dev, priv);
+ atomic_set(&priv->xmit_total, 0);
+ raw_spin_lock_init(&priv->lock);
+
+ priv->reg = devm_ioremap_nocache(priv->dev, dev->res.start,
+ resource_size(&dev->res));
+ if (!priv->reg) {
+ dev_err(priv->dev, "ioremap failed for resource %pR\n",
+ &dev->res);
+ return -ENOMEM;
+ }
+
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ tx_fifo = (cfg & REG_FDCFG_TXFIFOSIZE) >> REG_FDCFG_TXFIFOSIZE_SHIFT;
+ /* Disable interrupts */
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+
+ /* Make each port's xmit FIFO big enough to fill FDC TX FIFO */
+ priv->xmit_size = min(tx_fifo * 4, (unsigned int)SERIAL_XMIT_SIZE);
+
+ driver = tty_alloc_driver(NUM_TTY_CHANNELS, TTY_DRIVER_REAL_RAW);
+ if (IS_ERR(driver))
+ return PTR_ERR(driver);
+ priv->driver = driver;
+
+ driver->driver_name = "ejtag_fdc";
+ snprintf(priv->fdc_name, sizeof(priv->fdc_name), "ttyFDC%u", dev->cpu);
+ snprintf(priv->driver_name, sizeof(priv->driver_name), "%sc",
+ priv->fdc_name);
+ driver->name = priv->driver_name;
+ driver->major = 0; /* Auto-allocate */
+ driver->minor_start = 0;
+ driver->type = TTY_DRIVER_TYPE_SERIAL;
+ driver->subtype = SERIAL_TYPE_NORMAL;
+ driver->init_termios = tty_std_termios;
+ driver->init_termios.c_cflag |= CLOCAL;
+ driver->driver_state = priv;
+
+ tty_set_operations(driver, &mips_ejtag_fdc_tty_ops);
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &priv->ports[nport];
+ dport->driver = priv;
+ tty_port_init(&dport->port);
+ dport->port.ops = &mips_ejtag_fdc_tty_port_ops;
+ raw_spin_lock_init(&dport->rx_lock);
+ spin_lock_init(&dport->xmit_lock);
+ /* The xmit buffer starts empty, i.e. completely written */
+ init_completion(&dport->xmit_empty);
+ complete(&dport->xmit_empty);
+ }
+
+ /* Set up the console */
+ mips_ejtag_fdc_con.regs[dev->cpu] = priv->reg;
+ if (dev->cpu == 0)
+ mips_ejtag_fdc_con.tty_drv = driver;
+
+ init_waitqueue_head(&priv->waitqueue);
+ priv->thread = kthread_create(mips_ejtag_fdc_put, priv, priv->fdc_name);
+ if (IS_ERR(priv->thread)) {
+ ret = PTR_ERR(priv->thread);
+ dev_err(priv->dev, "Couldn't create kthread (%d)\n", ret);
+ goto err_destroy_ports;
+ }
+ /*
+ * Bind the writer thread to the right CPU so it can't migrate.
+ * The channels are per-CPU and we want all channel I/O to be on a
+ * single predictable CPU.
+ */
+ kthread_bind(priv->thread, dev->cpu);
+ wake_up_process(priv->thread);
+
+ /* Look for an FDC IRQ */
+ priv->irq = -1;
+ if (get_c0_fdc_int)
+ priv->irq = get_c0_fdc_int();
+
+ /* Try requesting the IRQ */
+ if (priv->irq >= 0) {
+ /*
+ * IRQF_SHARED, IRQF_NO_SUSPEND: The FDC IRQ may be shared with
+ * other local interrupts such as the timer which sets
+ * IRQF_TIMER (including IRQF_NO_SUSPEND).
+ *
+ * IRQF_NO_THREAD: The FDC IRQ isn't individually maskable so it
+ * cannot be deferred and handled by a thread on RT kernels. For
+ * this reason any spinlocks used from the ISR are raw.
+ */
+ ret = devm_request_irq(priv->dev, priv->irq, mips_ejtag_fdc_isr,
+ IRQF_PERCPU | IRQF_SHARED |
+ IRQF_NO_THREAD | IRQF_NO_SUSPEND,
+ priv->fdc_name, priv);
+ if (ret)
+ priv->irq = -1;
+ }
+ if (priv->irq >= 0) {
+ /* IRQ is usable, enable RX interrupt */
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ cfg &= ~REG_FDCFG_RXINTTHRES;
+ cfg |= REG_FDCFG_RXINTTHRES_NOTEMPTY;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ /* If we didn't get an usable IRQ, poll instead */
+ setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+ (unsigned long)priv);
+ priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
+ /*
+ * Always attach the timer to the right CPU. The channels are
+ * per-CPU so all polling should be from a single CPU.
+ */
+ add_timer_on(&priv->poll_timer, dev->cpu);
+
+ dev_info(priv->dev, "No usable IRQ, polling enabled\n");
+ }
+
+ ret = tty_register_driver(driver);
+ if (ret < 0) {
+ dev_err(priv->dev, "Couldn't install tty driver (%d)\n", ret);
+ goto err_stop_irq;
+ }
+
+ return 0;
+
+err_stop_irq:
+ if (priv->irq >= 0) {
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ /* Disable interrupts */
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ priv->removing = true;
+ del_timer_sync(&priv->poll_timer);
+ }
+ kthread_stop(priv->thread);
+err_destroy_ports:
+ if (dev->cpu == 0)
+ mips_ejtag_fdc_con.tty_drv = NULL;
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &priv->ports[nport];
+ tty_port_destroy(&dport->port);
+ }
+ put_tty_driver(priv->driver);
+ return ret;
+}
+
+static int mips_ejtag_fdc_tty_remove(struct mips_cdmm_device *dev)
+{
+ struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
+ struct mips_ejtag_fdc_tty_port *dport;
+ int nport;
+ unsigned int cfg;
+
+ if (priv->irq >= 0) {
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ /* Disable interrupts */
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ priv->removing = true;
+ del_timer_sync(&priv->poll_timer);
+ }
+ kthread_stop(priv->thread);
+ if (dev->cpu == 0)
+ mips_ejtag_fdc_con.tty_drv = NULL;
+ tty_unregister_driver(priv->driver);
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &priv->ports[nport];
+ tty_port_destroy(&dport->port);
+ }
+ put_tty_driver(priv->driver);
+ return 0;
+}
+
+static int mips_ejtag_fdc_tty_cpu_down(struct mips_cdmm_device *dev)
+{
+ struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
+ unsigned int cfg;
+
+ if (priv->irq >= 0) {
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ /* Disable interrupts */
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ priv->removing = true;
+ del_timer_sync(&priv->poll_timer);
+ }
+ kthread_stop(priv->thread);
+
+ return 0;
+}
+
+static int mips_ejtag_fdc_tty_cpu_up(struct mips_cdmm_device *dev)
+{
+ struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
+ unsigned int cfg;
+ int ret = 0;
+
+ if (priv->irq >= 0) {
+ /*
+ * IRQ is usable, enable RX interrupt
+ * This must be before kthread is restarted, as kthread may
+ * enable TX interrupt.
+ */
+ raw_spin_lock_irq(&priv->lock);
+ cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
+ cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
+ cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
+ cfg |= REG_FDCFG_RXINTTHRES_NOTEMPTY;
+ mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
+ raw_spin_unlock_irq(&priv->lock);
+ } else {
+ /* Restart poll timer */
+ priv->removing = false;
+ add_timer_on(&priv->poll_timer, dev->cpu);
+ }
+
+ /* Restart the kthread */
+ priv->thread = kthread_create(mips_ejtag_fdc_put, priv, priv->fdc_name);
+ if (IS_ERR(priv->thread)) {
+ ret = PTR_ERR(priv->thread);
+ dev_err(priv->dev, "Couldn't re-create kthread (%d)\n", ret);
+ goto out;
+ }
+ /* Bind it back to the right CPU and set it off */
+ kthread_bind(priv->thread, dev->cpu);
+ wake_up_process(priv->thread);
+out:
+ return ret;
+}
+
+static struct mips_cdmm_device_id mips_ejtag_fdc_tty_ids[] = {
+ { .type = 0xfd },
+ { }
+};
+
+static struct mips_cdmm_driver mips_ejtag_fdc_tty_driver = {
+ .drv = {
+ .name = "mips_ejtag_fdc",
+ },
+ .probe = mips_ejtag_fdc_tty_probe,
+ .remove = mips_ejtag_fdc_tty_remove,
+ .cpu_down = mips_ejtag_fdc_tty_cpu_down,
+ .cpu_up = mips_ejtag_fdc_tty_cpu_up,
+ .id_table = mips_ejtag_fdc_tty_ids,
+};
+module_mips_cdmm_driver(mips_ejtag_fdc_tty_driver);
+
+static int __init mips_ejtag_fdc_init_console(void)
+{
+ return mips_ejtag_fdc_console_init(&mips_ejtag_fdc_con);
+}
+console_initcall(mips_ejtag_fdc_init_console);
+
+#ifdef CONFIG_MIPS_EJTAG_FDC_EARLYCON
+static struct mips_ejtag_fdc_console mips_ejtag_fdc_earlycon = {
+ .cons = {
+ .name = "early_fdc",
+ .write = mips_ejtag_fdc_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = CONSOLE_CHANNEL,
+ },
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(mips_ejtag_fdc_earlycon.lock),
+};
+
+int __init setup_early_fdc_console(void)
+{
+ return mips_ejtag_fdc_console_init(&mips_ejtag_fdc_earlycon);
+}
+#endif
+
+#ifdef CONFIG_MIPS_EJTAG_FDC_KGDB
+
+/* read buffer to allow decompaction */
+static unsigned int kgdbfdc_rbuflen;
+static unsigned int kgdbfdc_rpos;
+static char kgdbfdc_rbuf[4];
+
+/* write buffer to allow compaction */
+static unsigned int kgdbfdc_wbuflen;
+static char kgdbfdc_wbuf[4];
+
+static void __iomem *kgdbfdc_setup(void)
+{
+ void __iomem *regs;
+ unsigned int cpu;
+
+ /* Find address, piggy backing off console percpu regs */
+ cpu = smp_processor_id();
+ regs = mips_ejtag_fdc_con.regs[cpu];
+ /* First console output on this CPU? */
+ if (!regs) {
+ regs = mips_cdmm_early_probe(0xfd);
+ mips_ejtag_fdc_con.regs[cpu] = regs;
+ }
+ /* Already tried and failed to find FDC on this CPU? */
+ if (IS_ERR(regs))
+ return regs;
+
+ return regs;
+}
+
+/* read a character from the read buffer, filling from FDC RX FIFO */
+static int kgdbfdc_read_char(void)
+{
+ unsigned int stat, channel, data;
+ void __iomem *regs;
+
+ /* No more data, try and read another FDC word from RX FIFO */
+ if (kgdbfdc_rpos >= kgdbfdc_rbuflen) {
+ kgdbfdc_rpos = 0;
+ kgdbfdc_rbuflen = 0;
+
+ regs = kgdbfdc_setup();
+ if (IS_ERR(regs))
+ return NO_POLL_CHAR;
+
+ /* Read next word from KGDB channel */
+ do {
+ stat = ioread32(regs + REG_FDSTAT);
+
+ /* No data waiting? */
+ if (stat & REG_FDSTAT_RXE)
+ return NO_POLL_CHAR;
+
+ /* Read next word */
+ channel = (stat & REG_FDSTAT_RXCHAN) >>
+ REG_FDSTAT_RXCHAN_SHIFT;
+ data = ioread32(regs + REG_FDRX);
+ } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN);
+
+ /* Decode into rbuf */
+ kgdbfdc_rbuflen = mips_ejtag_fdc_decode(data, kgdbfdc_rbuf);
+ }
+ pr_devel("kgdbfdc r %c\n", kgdbfdc_rbuf[kgdbfdc_rpos]);
+ return kgdbfdc_rbuf[kgdbfdc_rpos++];
+}
+
+/* push an FDC word from write buffer to TX FIFO */
+static void kgdbfdc_push_one(void)
+{
+ const char *bufs[1] = { kgdbfdc_wbuf };
+ struct fdc_word word;
+ void __iomem *regs;
+ unsigned int i;
+
+ /* Construct a word from any data in buffer */
+ word = mips_ejtag_fdc_encode(bufs, &kgdbfdc_wbuflen, 1);
+ /* Relocate any remaining data to beginnning of buffer */
+ kgdbfdc_wbuflen -= word.bytes;
+ for (i = 0; i < kgdbfdc_wbuflen; ++i)
+ kgdbfdc_wbuf[i] = kgdbfdc_wbuf[i + word.bytes];
+
+ regs = kgdbfdc_setup();
+ if (IS_ERR(regs))
+ return;
+
+ /* Busy wait until there's space in fifo */
+ while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+ ;
+ iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
+}
+
+/* flush the whole write buffer to the TX FIFO */
+static void kgdbfdc_flush(void)
+{
+ while (kgdbfdc_wbuflen)
+ kgdbfdc_push_one();
+}
+
+/* write a character into the write buffer, writing out if full */
+static void kgdbfdc_write_char(u8 chr)
+{
+ pr_devel("kgdbfdc w %c\n", chr);
+ kgdbfdc_wbuf[kgdbfdc_wbuflen++] = chr;
+ if (kgdbfdc_wbuflen >= sizeof(kgdbfdc_wbuf))
+ kgdbfdc_push_one();
+}
+
+static struct kgdb_io kgdbfdc_io_ops = {
+ .name = "kgdbfdc",
+ .read_char = kgdbfdc_read_char,
+ .write_char = kgdbfdc_write_char,
+ .flush = kgdbfdc_flush,
+};
+
+static int __init kgdbfdc_init(void)
+{
+ kgdb_register_io_module(&kgdbfdc_io_ops);
+ return 0;
+}
+early_initcall(kgdbfdc_init);
+#endif
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index c4343764cc5b..2c34c3249972 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2669,7 +2669,7 @@ static inline void muxnet_put(struct gsm_mux_net *mux_net)
static int gsm_mux_net_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
- struct gsm_mux_net *mux_net = (struct gsm_mux_net *)netdev_priv(net);
+ struct gsm_mux_net *mux_net = netdev_priv(net);
struct gsm_dlci *dlci = mux_net->dlci;
muxnet_get(mux_net);
@@ -2698,7 +2698,7 @@ static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
{
struct net_device *net = dlci->net;
struct sk_buff *skb;
- struct gsm_mux_net *mux_net = (struct gsm_mux_net *)netdev_priv(net);
+ struct gsm_mux_net *mux_net = netdev_priv(net);
muxnet_get(mux_net);
/* Allocate an sk_buff */
@@ -2727,7 +2727,7 @@ static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
static int gsm_change_mtu(struct net_device *net, int new_mtu)
{
- struct gsm_mux_net *mux_net = (struct gsm_mux_net *)netdev_priv(net);
+ struct gsm_mux_net *mux_net = netdev_priv(net);
if ((new_mtu < 8) || (new_mtu > mux_net->dlci->gsm->mtu))
return -EINVAL;
net->mtu = new_mtu;
@@ -2763,7 +2763,7 @@ static void gsm_destroy_network(struct gsm_dlci *dlci)
pr_debug("destroy network interface");
if (!dlci->net)
return;
- mux_net = (struct gsm_mux_net *)netdev_priv(dlci->net);
+ mux_net = netdev_priv(dlci->net);
muxnet_put(mux_net);
}
@@ -2801,7 +2801,7 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
return -ENOMEM;
}
net->mtu = dlci->gsm->mtu;
- mux_net = (struct gsm_mux_net *)netdev_priv(net);
+ mux_net = netdev_priv(net);
mux_net->dlci = dlci;
kref_init(&mux_net->ref);
strncpy(nc->if_name, net->name, IFNAMSIZ); /* return net name */
@@ -2824,7 +2824,7 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
}
/* Line discipline for real tty */
-struct tty_ldisc_ops tty_ldisc_packet = {
+static struct tty_ldisc_ops tty_ldisc_packet = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
.name = "n_gsm",
@@ -3170,7 +3170,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
return gsmtty_modem_update(dlci, encode);
}
-static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
+static void gsmtty_cleanup(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
struct gsm_mux *gsm = dlci->gsm;
@@ -3178,7 +3178,6 @@ static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
dlci_put(dlci);
dlci_put(gsm->dlci[0]);
mux_put(gsm);
- driver->ttys[tty->index] = NULL;
}
/* Virtual ttys for the demux */
@@ -3199,7 +3198,7 @@ static const struct tty_operations gsmtty_ops = {
.tiocmget = gsmtty_tiocmget,
.tiocmset = gsmtty_tiocmset,
.break_ctl = gsmtty_break_ctl,
- .remove = gsmtty_remove,
+ .cleanup = gsmtty_cleanup,
};
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 644ddb841d9f..bbc4ce66c2c1 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
add_wait_queue(&tty->read_wait, &wait);
for (;;) {
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+ if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
ret = -EIO;
break;
}
@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
/* set bits for operations that won't block */
if (n_hdlc->rx_buf_list.head)
mask |= POLLIN | POLLRDNORM; /* readable */
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ if (test_bit(TTY_OTHER_DONE, &tty->flags))
mask |= POLLHUP;
if (tty_hung_up_p(filp))
mask |= POLLHUP;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cf6e0f2e1331..cc57a3a6b02b 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1949,6 +1949,18 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
return ldata->commit_head - ldata->read_tail >= amt;
}
+static inline int check_other_done(struct tty_struct *tty)
+{
+ int done = test_bit(TTY_OTHER_DONE, &tty->flags);
+ if (done) {
+ /* paired with cmpxchg() in check_other_closed(); ensures
+ * read buffer head index is not stale
+ */
+ smp_mb__after_atomic();
+ }
+ return done;
+}
+
/**
* copy_from_read_buf - copy read data directly
* @tty: terminal device
@@ -2167,7 +2179,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
struct n_tty_data *ldata = tty->disc_data;
unsigned char __user *b = buf;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int c;
+ int c, done;
int minimum, time;
ssize_t retval = 0;
long timeout;
@@ -2235,8 +2247,10 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
((minimum - (b - buf)) >= 1))
ldata->minimum_to_wake = (minimum - (b - buf));
+ done = check_other_done(tty);
+
if (!input_available_p(tty, 0)) {
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+ if (done) {
retval = -EIO;
break;
}
@@ -2443,12 +2457,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
+ if (check_other_done(tty))
+ mask |= POLLHUP;
if (input_available_p(tty, 1))
mask |= POLLIN | POLLRDNORM;
if (tty->packet && tty->link->ctrl_status)
mask |= POLLPRI | POLLIN | POLLRDNORM;
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
- mask |= POLLHUP;
if (tty_hung_up_p(file))
mask |= POLLHUP;
if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e72ee629cead..4d5e8409769c 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -53,9 +53,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
/* Review - krefs on tty_link ?? */
if (!tty->link)
return;
- tty_flush_to_ldisc(tty->link);
set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
- wake_up_interruptible(&tty->link->read_wait);
+ tty_flip_buffer_push(tty->link->port);
wake_up_interruptible(&tty->link->write_wait);
if (tty->driver->subtype == PTY_TYPE_MASTER) {
set_bit(TTY_OTHER_CLOSED, &tty->flags);
@@ -243,7 +242,9 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
goto out;
clear_bit(TTY_IO_ERROR, &tty->flags);
+ /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+ clear_bit(TTY_OTHER_DONE, &tty->link->flags);
set_bit(TTY_THROTTLED, &tty->flags);
return 0;
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index b00836851061..c43f74c53cd9 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -21,7 +21,6 @@ struct uart_8250_dma {
/* Filter function */
dma_filter_fn fn;
-
/* Parameter to the filter function */
void *rx_param;
void *tx_param;
@@ -53,7 +52,7 @@ struct old_serial_port {
unsigned int baud_base;
unsigned int port;
unsigned int irq;
- unsigned int flags;
+ upf_t flags;
unsigned char hub6;
unsigned char io_type;
unsigned char __iomem *iomem_base;
@@ -85,9 +84,6 @@ struct serial8250_config {
#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */
#define UART_BUG_PARITY (1 << 4) /* UART mishandles parity if FIFO enabled */
-#define PROBE_RSA (1 << 0)
-#define PROBE_ANY (~0)
-
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
#ifdef CONFIG_SERIAL_8250_SHARE_IRQ
@@ -198,3 +194,20 @@ static inline int serial8250_request_dma(struct uart_8250_port *p)
}
static inline void serial8250_release_dma(struct uart_8250_port *p) { }
#endif
+
+static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
+{
+ unsigned char status;
+
+ status = serial_in(up, 0x04); /* EXCR2 */
+#define PRESL(x) ((x) & 0x30)
+ if (PRESL(status) == 0x10) {
+ /* already in high speed mode */
+ return 0;
+ } else {
+ status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
+ status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
+ serial_out(up, 0x04, status);
+ }
+ return 1;
+}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index deae122c9c4b..4506e405c8f3 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -31,7 +31,6 @@
#include <linux/tty.h>
#include <linux/ratelimit.h>
#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/nmi.h>
@@ -61,7 +60,7 @@ static struct uart_driver serial8250_reg;
static int serial_index(struct uart_port *port)
{
- return (serial8250_reg.minor - 64) + port->line;
+ return port->minor - 64;
}
static unsigned int skip_txen_test; /* force skip of txen test at init time */
@@ -358,34 +357,46 @@ static void default_serial_dl_write(struct uart_8250_port *up, int value)
#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
/* Au1x00/RT288x UART hardware has a weird register layout */
-static const u8 au_io_in_map[] = {
- [UART_RX] = 0,
- [UART_IER] = 2,
- [UART_IIR] = 3,
- [UART_LCR] = 5,
- [UART_MCR] = 6,
- [UART_LSR] = 7,
- [UART_MSR] = 8,
+static const s8 au_io_in_map[8] = {
+ 0, /* UART_RX */
+ 2, /* UART_IER */
+ 3, /* UART_IIR */
+ 5, /* UART_LCR */
+ 6, /* UART_MCR */
+ 7, /* UART_LSR */
+ 8, /* UART_MSR */
+ -1, /* UART_SCR (unmapped) */
};
-static const u8 au_io_out_map[] = {
- [UART_TX] = 1,
- [UART_IER] = 2,
- [UART_FCR] = 4,
- [UART_LCR] = 5,
- [UART_MCR] = 6,
+static const s8 au_io_out_map[8] = {
+ 1, /* UART_TX */
+ 2, /* UART_IER */
+ 4, /* UART_FCR */
+ 5, /* UART_LCR */
+ 6, /* UART_MCR */
+ -1, /* UART_LSR (unmapped) */
+ -1, /* UART_MSR (unmapped) */
+ -1, /* UART_SCR (unmapped) */
};
static unsigned int au_serial_in(struct uart_port *p, int offset)
{
- offset = au_io_in_map[offset] << p->regshift;
- return __raw_readl(p->membase + offset);
+ if (offset >= ARRAY_SIZE(au_io_in_map))
+ return UINT_MAX;
+ offset = au_io_in_map[offset];
+ if (offset < 0)
+ return UINT_MAX;
+ return __raw_readl(p->membase + (offset << p->regshift));
}
static void au_serial_out(struct uart_port *p, int offset, int value)
{
- offset = au_io_out_map[offset] << p->regshift;
- __raw_writel(value, p->membase + offset);
+ if (offset >= ARRAY_SIZE(au_io_out_map))
+ return;
+ offset = au_io_out_map[offset];
+ if (offset < 0)
+ return;
+ __raw_writel(value, p->membase + (offset << p->regshift));
}
/* Au1x00 haven't got a standard divisor latch */
@@ -439,6 +450,18 @@ static unsigned int mem32_serial_in(struct uart_port *p, int offset)
return readl(p->membase + offset);
}
+static void mem32be_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = offset << p->regshift;
+ iowrite32be(value, p->membase + offset);
+}
+
+static unsigned int mem32be_serial_in(struct uart_port *p, int offset)
+{
+ offset = offset << p->regshift;
+ return ioread32be(p->membase + offset);
+}
+
static unsigned int io_serial_in(struct uart_port *p, int offset)
{
offset = offset << p->regshift;
@@ -477,6 +500,11 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = mem32_serial_out;
break;
+ case UPIO_MEM32BE:
+ p->serial_in = mem32be_serial_in;
+ p->serial_out = mem32be_serial_out;
+ break;
+
#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
case UPIO_AU:
p->serial_in = au_serial_in;
@@ -502,6 +530,7 @@ serial_port_out_sync(struct uart_port *p, int offset, int value)
switch (p->iotype) {
case UPIO_MEM:
case UPIO_MEM32:
+ case UPIO_MEM32BE:
case UPIO_AU:
p->serial_out(p, offset, value);
p->serial_in(p, UART_LCR); /* safe, no side-effects */
@@ -895,7 +924,7 @@ static int broken_efr(struct uart_8250_port *up)
/*
* Exar ST16C2550 "A2" devices incorrectly detect as
* having an EFR, and report an ID of 0x0201. See
- * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html
+ * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html
*/
if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
return 1;
@@ -903,23 +932,6 @@ static int broken_efr(struct uart_8250_port *up)
return 0;
}
-static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
-{
- unsigned char status;
-
- status = serial_in(up, 0x04); /* EXCR2 */
-#define PRESL(x) ((x) & 0x30)
- if (PRESL(status) == 0x10) {
- /* already in high speed mode */
- return 0;
- } else {
- status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
- status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
- serial_out(up, 0x04, status);
- }
- return 1;
-}
-
/*
* We know that the chip has FIFOs. Does it have an EFR? The
* EFR is located in the same register position as the IIR and
@@ -1122,7 +1134,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
* whether or not this UART is a 16550A or not, since this will
* determine whether or not we can use its FIFO features or not.
*/
-static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
+static void autoconfig(struct uart_8250_port *up)
{
unsigned char status1, scratch, scratch2, scratch3;
unsigned char save_lcr, save_mcr;
@@ -1245,22 +1257,15 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
/*
* Only probe for RSA ports if we got the region.
*/
- if (port->type == PORT_16550A && probeflags & PROBE_RSA) {
- int i;
-
- for (i = 0 ; i < probe_rsa_count; ++i) {
- if (probe_rsa[i] == port->iobase && __enable_rsa(up)) {
- port->type = PORT_RSA;
- break;
- }
- }
- }
+ if (port->type == PORT_16550A && up->probe & UART_PROBE_RSA &&
+ __enable_rsa(up))
+ port->type = PORT_RSA;
#endif
serial_out(up, UART_LCR, save_lcr);
port->fifosize = uart_config[up->port.type].fifo_size;
- old_capabilities = up->capabilities;
+ old_capabilities = up->capabilities;
up->capabilities = uart_config[port->type].flags;
up->tx_loadsz = uart_config[port->type].tx_loadsz;
@@ -1907,6 +1912,48 @@ static void serial8250_backup_timeout(unsigned long data)
jiffies + uart_poll_timeout(&up->port) + HZ / 5);
}
+static int univ8250_setup_irq(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+ int retval = 0;
+
+ /*
+ * The above check will only give an accurate result the first time
+ * the port is opened so this value needs to be preserved.
+ */
+ if (up->bugs & UART_BUG_THRE) {
+ pr_debug("ttyS%d - using backup timer\n", serial_index(port));
+
+ up->timer.function = serial8250_backup_timeout;
+ up->timer.data = (unsigned long)up;
+ mod_timer(&up->timer, jiffies +
+ uart_poll_timeout(port) + HZ / 5);
+ }
+
+ /*
+ * If the "interrupt" for this port doesn't correspond with any
+ * hardware interrupt, we use a timer-based system. The original
+ * driver used to do this with IRQ0.
+ */
+ if (!port->irq) {
+ up->timer.data = (unsigned long)up;
+ mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
+ } else
+ retval = serial_link_irq_chain(up);
+
+ return retval;
+}
+
+static void univ8250_release_irq(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+
+ del_timer_sync(&up->timer);
+ up->timer.function = serial8250_timeout;
+ if (port->irq)
+ serial_unlink_irq_chain(up);
+}
+
static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -2211,35 +2258,12 @@ int serial8250_do_startup(struct uart_port *port)
if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
up->port.flags & UPF_BUG_THRE) {
up->bugs |= UART_BUG_THRE;
- pr_debug("ttyS%d - using backup timer\n",
- serial_index(port));
}
}
- /*
- * The above check will only give an accurate result the first time
- * the port is opened so this value needs to be preserved.
- */
- if (up->bugs & UART_BUG_THRE) {
- up->timer.function = serial8250_backup_timeout;
- up->timer.data = (unsigned long)up;
- mod_timer(&up->timer, jiffies +
- uart_poll_timeout(port) + HZ / 5);
- }
-
- /*
- * If the "interrupt" for this port doesn't correspond with any
- * hardware interrupt, we use a timer-based system. The original
- * driver used to do this with IRQ0.
- */
- if (!port->irq) {
- up->timer.data = (unsigned long)up;
- mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
- } else {
- retval = serial_link_irq_chain(up);
- if (retval)
- goto out;
- }
+ retval = up->ops->setup_irq(up);
+ if (retval)
+ goto out;
/*
* Now, initialize the UART
@@ -2270,7 +2294,7 @@ int serial8250_do_startup(struct uart_port *port)
is variable. So, let's just don't test if we receive
TX irq. This way, we'll never enable UART_BUG_TXEN.
*/
- if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
+ if (up->port.flags & UPF_NO_TXEN_TEST)
goto dont_test_tx_en;
/*
@@ -2397,10 +2421,7 @@ void serial8250_do_shutdown(struct uart_port *port)
serial_port_in(port, UART_RX);
serial8250_rpm_put(up);
- del_timer_sync(&up->timer);
- up->timer.function = serial8250_timeout;
- if (port->irq)
- serial_unlink_irq_chain(up);
+ up->ops->release_irq(up);
}
EXPORT_SYMBOL_GPL(serial8250_do_shutdown);
@@ -2719,6 +2740,8 @@ serial8250_pm(struct uart_port *port, unsigned int state,
static unsigned int serial8250_port_size(struct uart_8250_port *pt)
{
+ if (pt->port.mapsize)
+ return pt->port.mapsize;
if (pt->port.iotype == UPIO_AU) {
if (pt->port.type == PORT_RT2880)
return 0x100;
@@ -2743,6 +2766,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
case UPIO_AU:
case UPIO_TSI:
case UPIO_MEM32:
+ case UPIO_MEM32BE:
case UPIO_MEM:
if (!port->mapbase)
break;
@@ -2779,6 +2803,7 @@ static void serial8250_release_std_resource(struct uart_8250_port *up)
case UPIO_AU:
case UPIO_TSI:
case UPIO_MEM32:
+ case UPIO_MEM32BE:
case UPIO_MEM:
if (!port->mapbase)
break;
@@ -2798,6 +2823,7 @@ static void serial8250_release_std_resource(struct uart_8250_port *up)
}
}
+#ifdef CONFIG_SERIAL_8250_RSA
static int serial8250_request_rsa_resource(struct uart_8250_port *up)
{
unsigned long start = UART_RSA_BASE << up->port.regshift;
@@ -2832,14 +2858,13 @@ static void serial8250_release_rsa_resource(struct uart_8250_port *up)
break;
}
}
+#endif
static void serial8250_release_port(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
serial8250_release_std_resource(up);
- if (port->type == PORT_RSA)
- serial8250_release_rsa_resource(up);
}
static int serial8250_request_port(struct uart_port *port)
@@ -2851,11 +2876,6 @@ static int serial8250_request_port(struct uart_port *port)
return -ENODEV;
ret = serial8250_request_std_resource(up);
- if (ret == 0 && port->type == PORT_RSA) {
- ret = serial8250_request_rsa_resource(up);
- if (ret < 0)
- serial8250_release_std_resource(up);
- }
return ret;
}
@@ -3003,7 +3023,6 @@ static void register_dev_spec_attr_grp(struct uart_8250_port *up)
static void serial8250_config_port(struct uart_port *port, int flags)
{
struct uart_8250_port *up = up_to_u8250p(port);
- int probeflags = PROBE_ANY;
int ret;
if (port->type == PORT_8250_CIR)
@@ -3017,15 +3036,11 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (ret < 0)
return;
- ret = serial8250_request_rsa_resource(up);
- if (ret < 0)
- probeflags &= ~PROBE_RSA;
-
if (port->iotype != up->cur_iotype)
set_io_from_upio(port);
if (flags & UART_CONFIG_TYPE)
- autoconfig(up, probeflags);
+ autoconfig(up);
/* if access method is AU, it is a 16550 with a quirk */
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
@@ -3038,8 +3053,6 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
autoconfig_irq(up);
- if (port->type != PORT_RSA && probeflags & PROBE_RSA)
- serial8250_release_rsa_resource(up);
if (port->type == PORT_UNKNOWN)
serial8250_release_std_resource(up);
@@ -3073,7 +3086,7 @@ serial8250_type(struct uart_port *port)
return uart_config[type].name;
}
-static struct uart_ops serial8250_pops = {
+static const struct uart_ops serial8250_pops = {
.tx_empty = serial8250_tx_empty,
.set_mctrl = serial8250_set_mctrl,
.get_mctrl = serial8250_get_mctrl,
@@ -3100,6 +3113,14 @@ static struct uart_ops serial8250_pops = {
#endif
};
+static const struct uart_ops *base_ops;
+static struct uart_ops univ8250_port_ops;
+
+static const struct uart_8250_ops univ8250_driver_ops = {
+ .setup_irq = univ8250_setup_irq,
+ .release_irq = univ8250_release_irq,
+};
+
static struct uart_8250_port serial8250_ports[UART_NR];
/**
@@ -3130,6 +3151,105 @@ void serial8250_set_isa_configurator(
}
EXPORT_SYMBOL(serial8250_set_isa_configurator);
+static void serial8250_init_port(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+
+ spin_lock_init(&port->lock);
+ port->ops = &serial8250_pops;
+
+ up->cur_iotype = 0xFF;
+}
+
+static void serial8250_set_defaults(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+
+ if (up->port.flags & UPF_FIXED_TYPE) {
+ unsigned int type = up->port.type;
+
+ if (!up->port.fifosize)
+ up->port.fifosize = uart_config[type].fifo_size;
+ if (!up->tx_loadsz)
+ up->tx_loadsz = uart_config[type].tx_loadsz;
+ if (!up->capabilities)
+ up->capabilities = uart_config[type].flags;
+ }
+
+ set_io_from_upio(port);
+
+ /* default dma handlers */
+ if (up->dma) {
+ if (!up->dma->tx_dma)
+ up->dma->tx_dma = serial8250_tx_dma;
+ if (!up->dma->rx_dma)
+ up->dma->rx_dma = serial8250_rx_dma;
+ }
+}
+
+#ifdef CONFIG_SERIAL_8250_RSA
+
+static void univ8250_config_port(struct uart_port *port, int flags)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ up->probe &= ~UART_PROBE_RSA;
+ if (port->type == PORT_RSA) {
+ if (serial8250_request_rsa_resource(up) == 0)
+ up->probe |= UART_PROBE_RSA;
+ } else if (flags & UART_CONFIG_TYPE) {
+ int i;
+
+ for (i = 0; i < probe_rsa_count; i++) {
+ if (probe_rsa[i] == up->port.iobase) {
+ if (serial8250_request_rsa_resource(up) == 0)
+ up->probe |= UART_PROBE_RSA;
+ break;
+ }
+ }
+ }
+
+ base_ops->config_port(port, flags);
+
+ if (port->type != PORT_RSA && up->probe & UART_PROBE_RSA)
+ serial8250_release_rsa_resource(up);
+}
+
+static int univ8250_request_port(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ int ret;
+
+ ret = base_ops->request_port(port);
+ if (ret == 0 && port->type == PORT_RSA) {
+ ret = serial8250_request_rsa_resource(up);
+ if (ret < 0)
+ base_ops->release_port(port);
+ }
+
+ return ret;
+}
+
+static void univ8250_release_port(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ if (port->type == PORT_RSA)
+ serial8250_release_rsa_resource(up);
+ base_ops->release_port(port);
+}
+
+static void univ8250_rsa_support(struct uart_ops *ops)
+{
+ ops->config_port = univ8250_config_port;
+ ops->request_port = univ8250_request_port;
+ ops->release_port = univ8250_release_port;
+}
+
+#else
+#define univ8250_rsa_support(x) do { } while (0)
+#endif /* CONFIG_SERIAL_8250_RSA */
+
static void __init serial8250_isa_init_ports(void)
{
struct uart_8250_port *up;
@@ -3148,21 +3268,27 @@ static void __init serial8250_isa_init_ports(void)
struct uart_port *port = &up->port;
port->line = i;
- spin_lock_init(&port->lock);
+ serial8250_init_port(up);
+ if (!base_ops)
+ base_ops = port->ops;
+ port->ops = &univ8250_port_ops;
init_timer(&up->timer);
up->timer.function = serial8250_timeout;
- up->cur_iotype = 0xFF;
+
+ up->ops = &univ8250_driver_ops;
/*
* ALPHA_KLUDGE_MCR needs to be killed.
*/
up->mcr_mask = ~ALPHA_KLUDGE_MCR;
up->mcr_force = ALPHA_KLUDGE_MCR;
-
- port->ops = &serial8250_pops;
}
+ /* chain base port ops to support Remote Supervisor Adapter */
+ univ8250_port_ops = *base_ops;
+ univ8250_rsa_support(&univ8250_port_ops);
+
if (share_irqs)
irqflag = IRQF_SHARED;
@@ -3180,26 +3306,14 @@ static void __init serial8250_isa_init_ports(void)
port->membase = old_serial_port[i].iomem_base;
port->iotype = old_serial_port[i].io_type;
port->regshift = old_serial_port[i].iomem_reg_shift;
- set_io_from_upio(port);
+ serial8250_set_defaults(up);
+
port->irqflags |= irqflag;
if (serial8250_isa_config != NULL)
serial8250_isa_config(i, &up->port, &up->capabilities);
-
}
}
-static void
-serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
-{
- up->port.type = type;
- if (!up->port.fifosize)
- up->port.fifosize = uart_config[type].fifo_size;
- if (!up->tx_loadsz)
- up->tx_loadsz = uart_config[type].tx_loadsz;
- if (!up->capabilities)
- up->capabilities = uart_config[type].flags;
-}
-
static void __init
serial8250_register_ports(struct uart_driver *drv, struct device *dev)
{
@@ -3213,8 +3327,8 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
up->port.dev = dev;
- if (up->port.flags & UPF_FIXED_TYPE)
- serial8250_init_fixed_type_port(up, up->port.type);
+ if (skip_txen_test)
+ up->port.flags |= UPF_NO_TXEN_TEST;
uart_add_one_port(drv, &up->port);
}
@@ -3236,10 +3350,9 @@ static void serial8250_console_putchar(struct uart_port *port, int ch)
*
* The console_lock must be held when we get here.
*/
-static void
-serial8250_console_write(struct console *co, const char *s, unsigned int count)
+static void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ unsigned int count)
{
- struct uart_8250_port *up = &serial8250_ports[co->index];
struct uart_port *port = &up->port;
unsigned long flags;
unsigned int ier;
@@ -3311,14 +3424,51 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
serial8250_rpm_put(up);
}
-static int serial8250_console_setup(struct console *co, char *options)
+static void univ8250_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_8250_port *up = &serial8250_ports[co->index];
+
+ serial8250_console_write(up, s, count);
+}
+
+static unsigned int probe_baud(struct uart_port *port)
+{
+ unsigned char lcr, dll, dlm;
+ unsigned int quot;
+
+ lcr = serial_port_in(port, UART_LCR);
+ serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
+ dll = serial_port_in(port, UART_DLL);
+ dlm = serial_port_in(port, UART_DLM);
+ serial_port_out(port, UART_LCR, lcr);
+
+ quot = (dlm << 8) | dll;
+ return (port->uartclk / 16) / quot;
+}
+
+static int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
- struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
+ if (!port->iobase && !port->membase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else if (probe)
+ baud = probe_baud(port);
+
+ return uart_set_options(port, port->cons, baud, parity, bits, flow);
+}
+
+static int univ8250_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
@@ -3327,53 +3477,84 @@ static int serial8250_console_setup(struct console *co, char *options)
if (co->index >= nr_uarts)
co->index = 0;
port = &serial8250_ports[co->index].port;
- if (!port->iobase && !port->membase)
- return -ENODEV;
-
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
+ /* link port to console */
+ port->cons = co;
- return uart_set_options(port, co, baud, parity, bits, flow);
+ return serial8250_console_setup(port, options, false);
}
-static int serial8250_console_early_setup(void)
+/**
+ * univ8250_console_match - non-standard console matching
+ * @co: registering console
+ * @name: name from console command line
+ * @idx: index from console command line
+ * @options: ptr to option string from console command line
+ *
+ * Only attempts to match console command lines of the form:
+ * console=uart[8250],io|mmio|mmio32,<addr>[,<options>]
+ * console=uart[8250],0x<addr>[,<options>]
+ * This form is used to register an initial earlycon boot console and
+ * replace it with the serial8250_console at 8250 driver init.
+ *
+ * Performs console setup for a match (as required by interface)
+ * If no <options> are specified, then assume the h/w is already setup.
+ *
+ * Returns 0 if console matches; otherwise non-zero to use default matching
+ */
+static int univ8250_console_match(struct console *co, char *name, int idx,
+ char *options)
{
- return serial8250_find_port_for_earlycon();
+ char match[] = "uart"; /* 8250-specific earlycon name */
+ unsigned char iotype;
+ unsigned long addr;
+ int i;
+
+ if (strncmp(name, match, 4) != 0)
+ return -ENODEV;
+
+ if (uart_parse_earlycon(options, &iotype, &addr, &options))
+ return -ENODEV;
+
+ /* try to match the port specified on the command line */
+ for (i = 0; i < nr_uarts; i++) {
+ struct uart_port *port = &serial8250_ports[i].port;
+
+ if (port->iotype != iotype)
+ continue;
+ if ((iotype == UPIO_MEM || iotype == UPIO_MEM32) &&
+ (port->mapbase != addr))
+ continue;
+ if (iotype == UPIO_PORT && port->iobase != addr)
+ continue;
+
+ co->index = i;
+ port->cons = co;
+ return serial8250_console_setup(port, options, true);
+ }
+
+ return -ENODEV;
}
-static struct console serial8250_console = {
+static struct console univ8250_console = {
.name = "ttyS",
- .write = serial8250_console_write,
+ .write = univ8250_console_write,
.device = uart_console_device,
- .setup = serial8250_console_setup,
- .early_setup = serial8250_console_early_setup,
+ .setup = univ8250_console_setup,
+ .match = univ8250_console_match,
.flags = CON_PRINTBUFFER | CON_ANYTIME,
.index = -1,
.data = &serial8250_reg,
};
-static int __init serial8250_console_init(void)
+static int __init univ8250_console_init(void)
{
serial8250_isa_init_ports();
- register_console(&serial8250_console);
+ register_console(&univ8250_console);
return 0;
}
-console_initcall(serial8250_console_init);
-
-int serial8250_find_port(struct uart_port *p)
-{
- int line;
- struct uart_port *port;
-
- for (line = 0; line < nr_uarts; line++) {
- port = &serial8250_ports[line].port;
- if (uart_match_port(p, port))
- return line;
- }
- return -ENODEV;
-}
+console_initcall(univ8250_console_init);
-#define SERIAL8250_CONSOLE &serial8250_console
+#define SERIAL8250_CONSOLE &univ8250_console
#else
#define SERIAL8250_CONSOLE NULL
#endif
@@ -3412,19 +3593,19 @@ int __init early_serial_setup(struct uart_port *port)
p->iotype = port->iotype;
p->flags = port->flags;
p->mapbase = port->mapbase;
+ p->mapsize = port->mapsize;
p->private_data = port->private_data;
p->type = port->type;
p->line = port->line;
- set_io_from_upio(p);
+ serial8250_set_defaults(up_to_u8250p(p));
+
if (port->serial_in)
p->serial_in = port->serial_in;
if (port->serial_out)
p->serial_out = port->serial_out;
if (port->handle_irq)
p->handle_irq = port->handle_irq;
- else
- p->handle_irq = serial8250_default_handle_irq;
return 0;
}
@@ -3444,7 +3625,8 @@ void serial8250_suspend_port(int line)
port->type != PORT_8250) {
unsigned char canary = 0xa5;
serial_out(up, UART_SCR, canary);
- up->canary = canary;
+ if (serial_in(up, UART_SCR) == canary)
+ up->canary = canary;
}
uart_suspend_port(&serial8250_reg, port);
@@ -3666,6 +3848,7 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->port.flags = up->port.flags | UPF_BOOT_AUTOCONF;
uart->bugs = up->bugs;
uart->port.mapbase = up->port.mapbase;
+ uart->port.mapsize = up->port.mapsize;
uart->port.private_data = up->port.private_data;
uart->port.fifosize = up->port.fifosize;
uart->tx_loadsz = up->tx_loadsz;
@@ -3674,6 +3857,7 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->port.unthrottle = up->port.unthrottle;
uart->port.rs485_config = up->port.rs485_config;
uart->port.rs485 = up->port.rs485;
+ uart->dma = up->dma;
/* Take tx_loadsz from fifosize if it wasn't set separately */
if (uart->port.fifosize && !uart->tx_loadsz)
@@ -3682,10 +3866,14 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
if (up->port.dev)
uart->port.dev = up->port.dev;
+ if (skip_txen_test)
+ uart->port.flags |= UPF_NO_TXEN_TEST;
+
if (up->port.flags & UPF_FIXED_TYPE)
- serial8250_init_fixed_type_port(uart, up->port.type);
+ uart->port.type = up->port.type;
+
+ serial8250_set_defaults(uart);
- set_io_from_upio(&uart->port);
/* Possibly override default I/O functions. */
if (up->port.serial_in)
uart->port.serial_in = up->port.serial_in;
@@ -3710,13 +3898,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->dl_read = up->dl_read;
if (up->dl_write)
uart->dl_write = up->dl_write;
- if (up->dma) {
- uart->dma = up->dma;
- if (!uart->dma->tx_dma)
- uart->dma->tx_dma = serial8250_tx_dma;
- if (!uart->dma->rx_dma)
- uart->dma->rx_dma = serial8250_rx_dma;
- }
if (serial8250_isa_config != NULL)
serial8250_isa_config(0, &uart->port,
@@ -3747,9 +3928,11 @@ void serial8250_unregister_port(int line)
uart_remove_one_port(&serial8250_reg, &uart->port);
if (serial8250_isa_devs) {
uart->port.flags &= ~UPF_BOOT_AUTOCONF;
+ if (skip_txen_test)
+ uart->port.flags |= UPF_NO_TXEN_TEST;
uart->port.type = PORT_UNKNOWN;
uart->port.dev = &serial8250_isa_devs->dev;
- uart->capabilities = uart_config[uart->port.type].flags;
+ uart->capabilities = 0;
uart_add_one_port(&serial8250_reg, &uart->port);
} else {
uart->port.dev = NULL;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 6ae5b8560e4d..176f18f2e3ab 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -17,7 +17,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
-#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -364,9 +363,9 @@ static int dw8250_probe_of(struct uart_port *p,
}
if (of_property_read_bool(np, "cts-override")) {
- /* Always report DSR as active */
- data->msr_mask_on |= UART_MSR_DSR;
- data->msr_mask_off |= UART_MSR_DDSR;
+ /* Always report CTS as active */
+ data->msr_mask_on |= UART_MSR_CTS;
+ data->msr_mask_off |= UART_MSR_DCTS;
}
if (of_property_read_bool(np, "ri-override")) {
@@ -375,37 +374,16 @@ static int dw8250_probe_of(struct uart_port *p,
data->msr_mask_off |= UART_MSR_TERI;
}
- /* clock got configured through clk api, all done */
- if (p->uartclk)
- return 0;
-
- /* try to find out clock frequency from DT as fallback */
- if (of_property_read_u32(np, "clock-frequency", &val)) {
- dev_err(p->dev, "clk or clock-frequency not defined\n");
- return -EINVAL;
- }
- p->uartclk = val;
-
return 0;
}
static int dw8250_probe_acpi(struct uart_8250_port *up,
struct dw8250_data *data)
{
- const struct acpi_device_id *id;
struct uart_port *p = &up->port;
dw8250_setup_port(up);
- id = acpi_match_device(p->dev->driver->acpi_match_table, p->dev);
- if (!id)
- return -ENODEV;
-
- if (!p->uartclk)
- if (device_property_read_u32(p->dev, "clock-frequency",
- &p->uartclk))
- return -EINVAL;
-
p->iotype = UPIO_MEM32;
p->serial_in = dw8250_serial_in32;
p->serial_out = dw8250_serial_out32;
@@ -425,18 +403,24 @@ static int dw8250_probe(struct platform_device *pdev)
{
struct uart_8250_port uart = {};
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ int irq = platform_get_irq(pdev, 0);
struct dw8250_data *data;
int err;
- if (!regs || !irq) {
- dev_err(&pdev->dev, "no registers/irq defined\n");
+ if (!regs) {
+ dev_err(&pdev->dev, "no registers defined\n");
return -EINVAL;
}
+ if (irq < 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "cannot get irq\n");
+ return irq;
+ }
+
spin_lock_init(&uart.port.lock);
uart.port.mapbase = regs->start;
- uart.port.irq = irq->start;
+ uart.port.irq = irq;
uart.port.handle_irq = dw8250_handle_irq;
uart.port.pm = dw8250_do_pm;
uart.port.type = PORT_8250;
@@ -453,12 +437,18 @@ static int dw8250_probe(struct platform_device *pdev)
return -ENOMEM;
data->usr_reg = DW_UART_USR;
+
+ /* Always ask for fixed clock rate from a property. */
+ device_property_read_u32(&pdev->dev, "clock-frequency",
+ &uart.port.uartclk);
+
+ /* If there is separate baudclk, get the rate from it. */
data->clk = devm_clk_get(&pdev->dev, "baudclk");
if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER)
data->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- if (!IS_ERR(data->clk)) {
+ if (!IS_ERR_OR_NULL(data->clk)) {
err = clk_prepare_enable(data->clk);
if (err)
dev_warn(&pdev->dev, "could not enable optional baudclk: %d\n",
@@ -467,6 +457,12 @@ static int dw8250_probe(struct platform_device *pdev)
uart.port.uartclk = clk_get_rate(data->clk);
}
+ /* If no clock rate is defined, fail. */
+ if (!uart.port.uartclk) {
+ dev_err(&pdev->dev, "clock rate not defined\n");
+ return -EINVAL;
+ }
+
data->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
@@ -629,6 +625,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "80860F0A", 0 },
{ "8086228A", 0 },
{ "APMC0D08", 0},
+ { "AMD0020", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
@@ -649,3 +646,4 @@ module_platform_driver(dw8250_platform_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver");
+MODULE_ALIAS("platform:dw-apb-uart");
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index c31a22b4f845..6c0fd8b9d1c3 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -29,15 +29,12 @@
#include <linux/tty.h>
#include <linux/init.h>
#include <linux/console.h>
-#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <asm/io.h>
#include <asm/serial.h>
-static struct earlycon_device *early_device;
-
unsigned int __weak __init serial8250_early_in(struct uart_port *port, int offset)
{
switch (port->iotype) {
@@ -45,6 +42,8 @@ unsigned int __weak __init serial8250_early_in(struct uart_port *port, int offse
return readb(port->membase + offset);
case UPIO_MEM32:
return readl(port->membase + (offset << 2));
+ case UPIO_MEM32BE:
+ return ioread32be(port->membase + (offset << 2));
case UPIO_PORT:
return inb(port->iobase + offset);
default:
@@ -61,6 +60,9 @@ void __weak __init serial8250_early_out(struct uart_port *port, int offset, int
case UPIO_MEM32:
writel(value, port->membase + (offset << 2));
break;
+ case UPIO_MEM32BE:
+ iowrite32be(value, port->membase + (offset << 2));
+ break;
case UPIO_PORT:
outb(value, port->iobase + offset);
break;
@@ -90,7 +92,8 @@ static void __init serial_putc(struct uart_port *port, int c)
static void __init early_serial8250_write(struct console *console,
const char *s, unsigned int count)
{
- struct uart_port *port = &early_device->port;
+ struct earlycon_device *device = console->data;
+ struct uart_port *port = &device->port;
unsigned int ier;
/* Save the IER and disable interrupts preserving the UUE bit */
@@ -107,21 +110,6 @@ static void __init early_serial8250_write(struct console *console,
serial8250_early_out(port, UART_IER, ier);
}
-static unsigned int __init probe_baud(struct uart_port *port)
-{
- unsigned char lcr, dll, dlm;
- unsigned int quot;
-
- lcr = serial8250_early_in(port, UART_LCR);
- serial8250_early_out(port, UART_LCR, lcr | UART_LCR_DLAB);
- dll = serial8250_early_in(port, UART_DLL);
- dlm = serial8250_early_in(port, UART_DLM);
- serial8250_early_out(port, UART_LCR, lcr);
-
- quot = (dlm << 8) | dll;
- return (port->uartclk / 16) / quot;
-}
-
static void __init init_port(struct earlycon_device *device)
{
struct uart_port *port = &device->port;
@@ -147,52 +135,20 @@ static int __init early_serial8250_setup(struct earlycon_device *device,
const char *options)
{
if (!(device->port.membase || device->port.iobase))
- return 0;
+ return -ENODEV;
if (!device->baud) {
- device->baud = probe_baud(&device->port);
- snprintf(device->options, sizeof(device->options), "%u",
- device->baud);
- }
+ struct uart_port *port = &device->port;
+ unsigned int ier;
- init_port(device);
+ /* assume the device was initialized, only mask interrupts */
+ ier = serial8250_early_in(port, UART_IER);
+ serial8250_early_out(port, UART_IER, ier & UART_IER_UUE);
+ } else
+ init_port(device);
- early_device = device;
device->con->write = early_serial8250_write;
return 0;
}
EARLYCON_DECLARE(uart8250, early_serial8250_setup);
EARLYCON_DECLARE(uart, early_serial8250_setup);
-
-int __init setup_early_serial8250_console(char *cmdline)
-{
- char match[] = "uart8250";
-
- if (cmdline && cmdline[4] == ',')
- match[4] = '\0';
-
- return setup_earlycon(cmdline, match, early_serial8250_setup);
-}
-
-int serial8250_find_port_for_earlycon(void)
-{
- struct earlycon_device *device = early_device;
- struct uart_port *port = device ? &device->port : NULL;
- int line;
- int ret;
-
- if (!port || (!port->membase && !port->iobase))
- return -ENODEV;
-
- line = serial8250_find_port(port);
- if (line < 0)
- return -ENODEV;
-
- ret = update_console_cmdline("uart", 8250,
- "ttyS", line, device->options);
- if (ret < 0)
- ret = update_console_cmdline("uart", 0,
- "ttyS", line, device->options);
-
- return ret;
-}
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index ae5eaed6aa85..0b6381214917 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -21,7 +21,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
-#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 1e6899bc9429..5815e81b5fc6 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -234,18 +234,7 @@ static struct pnp_driver fintek_8250_driver = {
.id_table = fintek_dev_table,
};
-static int fintek_8250_init(void)
-{
- return pnp_register_driver(&fintek_8250_driver);
-}
-module_init(fintek_8250_init);
-
-static void fintek_8250_exit(void)
-{
- pnp_unregister_driver(&fintek_8250_driver);
-}
-module_exit(fintek_8250_exit);
-
+module_pnp_driver(fintek_8250_driver);
MODULE_DESCRIPTION("Fintek F812164 module");
MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_hp300.c b/drivers/tty/serial/8250/8250_hp300.c
index b4882082b247..2891958cd842 100644
--- a/drivers/tty/serial/8250/8250_hp300.c
+++ b/drivers/tty/serial/8250/8250_hp300.c
@@ -10,7 +10,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/serial.h>
-#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/delay.h>
#include <linux/dio.h>
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index fe6d2e51da09..9289999cb7c6 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
-#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 892eb32cdef4..46bcebba54b2 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -21,12 +21,14 @@
#include <linux/serial_core.h>
#include <linux/8250_pci.h>
#include <linux/bitops.h>
+#include <linux/rational.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <linux/dmaengine.h>
#include <linux/platform_data/dma-dw.h>
+#include <linux/platform_data/dma-hsu.h>
#include "8250.h"
@@ -1392,45 +1394,22 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
- unsigned int m, n;
+ unsigned long fref = 100000000, fuart = baud * 16;
+ unsigned long w = BIT(15) - 1;
+ unsigned long m, n;
u32 reg;
+ /* Get Fuart closer to Fref */
+ fuart *= rounddown_pow_of_two(fref / fuart);
+
/*
* For baud rates 0.5M, 1M, 1.5M, 2M, 2.5M, 3M, 3.5M and 4M the
* dividers must be adjusted.
*
* uartclk = (m / n) * 100 MHz, where m <= n
*/
- switch (baud) {
- case 500000:
- case 1000000:
- case 2000000:
- case 4000000:
- m = 64;
- n = 100;
- p->uartclk = 64000000;
- break;
- case 3500000:
- m = 56;
- n = 100;
- p->uartclk = 56000000;
- break;
- case 1500000:
- case 3000000:
- m = 48;
- n = 100;
- p->uartclk = 48000000;
- break;
- case 2500000:
- m = 40;
- n = 100;
- p->uartclk = 40000000;
- break;
- default:
- m = 2304;
- n = 3125;
- p->uartclk = 73728000;
- }
+ rational_best_approximation(fuart, fref, w, w, &m, &n);
+ p->uartclk = fuart;
/* Reset the clock */
reg = (m << BYT_PRV_CLK_M_VAL_SHIFT) | (n << BYT_PRV_CLK_N_VAL_SHIFT);
@@ -1525,6 +1504,167 @@ byt_serial_setup(struct serial_private *priv,
return ret;
}
+#define INTEL_MID_UART_PS 0x30
+#define INTEL_MID_UART_MUL 0x34
+#define INTEL_MID_UART_DIV 0x38
+
+static void intel_mid_set_termios(struct uart_port *p,
+ struct ktermios *termios,
+ struct ktermios *old,
+ unsigned long fref)
+{
+ unsigned int baud = tty_termios_baud_rate(termios);
+ unsigned short ps = 16;
+ unsigned long fuart = baud * ps;
+ unsigned long w = BIT(24) - 1;
+ unsigned long mul, div;
+
+ if (fref < fuart) {
+ /* Find prescaler value that satisfies Fuart < Fref */
+ if (fref > baud)
+ ps = fref / baud; /* baud rate too high */
+ else
+ ps = 1; /* PLL case */
+ fuart = baud * ps;
+ } else {
+ /* Get Fuart closer to Fref */
+ fuart *= rounddown_pow_of_two(fref / fuart);
+ }
+
+ rational_best_approximation(fuart, fref, w, w, &mul, &div);
+ p->uartclk = fuart * 16 / ps; /* core uses ps = 16 always */
+
+ writel(ps, p->membase + INTEL_MID_UART_PS); /* set PS */
+ writel(mul, p->membase + INTEL_MID_UART_MUL); /* set MUL */
+ writel(div, p->membase + INTEL_MID_UART_DIV);
+
+ serial8250_do_set_termios(p, termios, old);
+}
+
+static void intel_mid_set_termios_38_4M(struct uart_port *p,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ intel_mid_set_termios(p, termios, old, 38400000);
+}
+
+static void intel_mid_set_termios_50M(struct uart_port *p,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ /*
+ * The uart clk is 50Mhz, and the baud rate come from:
+ * baud = 50M * MUL / (DIV * PS * DLAB)
+ */
+ intel_mid_set_termios(p, termios, old, 50000000);
+}
+
+static bool intel_mid_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct hsu_dma_slave *s = param;
+
+ if (s->dma_dev != chan->device->dev || s->chan_id != chan->chan_id)
+ return false;
+
+ chan->private = s;
+ return true;
+}
+
+static int intel_mid_serial_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx,
+ int index, struct pci_dev *dma_dev)
+{
+ struct device *dev = port->port.dev;
+ struct uart_8250_dma *dma;
+ struct hsu_dma_slave *tx_param, *rx_param;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ tx_param = devm_kzalloc(dev, sizeof(*tx_param), GFP_KERNEL);
+ if (!tx_param)
+ return -ENOMEM;
+
+ rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
+ if (!rx_param)
+ return -ENOMEM;
+
+ rx_param->chan_id = index * 2 + 1;
+ tx_param->chan_id = index * 2;
+
+ dma->rxconf.src_maxburst = 64;
+ dma->txconf.dst_maxburst = 64;
+
+ rx_param->dma_dev = &dma_dev->dev;
+ tx_param->dma_dev = &dma_dev->dev;
+
+ dma->fn = intel_mid_dma_filter;
+ dma->rx_param = rx_param;
+ dma->tx_param = tx_param;
+
+ port->port.type = PORT_16750;
+ port->port.flags |= UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ port->dma = dma;
+
+ return pci_default_setup(priv, board, port, idx);
+}
+
+#define PCI_DEVICE_ID_INTEL_PNW_UART1 0x081b
+#define PCI_DEVICE_ID_INTEL_PNW_UART2 0x081c
+#define PCI_DEVICE_ID_INTEL_PNW_UART3 0x081d
+
+static int pnw_serial_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ struct pci_dev *pdev = priv->dev;
+ struct pci_dev *dma_dev;
+ int index;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_PNW_UART1:
+ index = 0;
+ break;
+ case PCI_DEVICE_ID_INTEL_PNW_UART2:
+ index = 1;
+ break;
+ case PCI_DEVICE_ID_INTEL_PNW_UART3:
+ index = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 3));
+
+ port->port.set_termios = intel_mid_set_termios_50M;
+
+ return intel_mid_serial_setup(priv, board, port, idx, index, dma_dev);
+}
+
+#define PCI_DEVICE_ID_INTEL_TNG_UART 0x1191
+
+static int tng_serial_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ struct pci_dev *pdev = priv->dev;
+ struct pci_dev *dma_dev;
+ int index = PCI_FUNC(pdev->devfn);
+
+ /* Currently no support for HSU port0 */
+ if (index-- == 0)
+ return -ENODEV;
+
+ dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(5, 0));
+
+ port->port.set_termios = intel_mid_set_termios_38_4M;
+
+ return intel_mid_serial_setup(priv, board, port, idx, index, dma_dev);
+}
+
static int
pci_omegapci_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1550,95 +1690,71 @@ static int pci_fintek_setup(struct serial_private *priv,
struct uart_8250_port *port, int idx)
{
struct pci_dev *pdev = priv->dev;
- unsigned long base;
- unsigned long iobase;
- unsigned long ciobase = 0;
u8 config_base;
+ u16 iobase;
+
+ config_base = 0x40 + 0x08 * idx;
+
+ /* Get the io address from configuration space */
+ pci_read_config_word(pdev, config_base + 4, &iobase);
+
+ dev_dbg(&pdev->dev, "%s: idx=%d iobase=0x%x", __func__, idx, iobase);
+
+ port->port.iotype = UPIO_PORT;
+ port->port.iobase = iobase;
+
+ return 0;
+}
+
+static int pci_fintek_init(struct pci_dev *dev)
+{
+ unsigned long iobase;
+ u32 max_port, i;
u32 bar_data[3];
+ u8 config_base;
- /*
- * Find each UARTs offset in PCI configuraion space
- */
- switch (idx) {
- case 0:
- config_base = 0x40;
- break;
- case 1:
- config_base = 0x48;
- break;
- case 2:
- config_base = 0x50;
- break;
- case 3:
- config_base = 0x58;
- break;
- case 4:
- config_base = 0x60;
- break;
- case 5:
- config_base = 0x68;
- break;
- case 6:
- config_base = 0x70;
- break;
- case 7:
- config_base = 0x78;
- break;
- case 8:
- config_base = 0x80;
+ switch (dev->device) {
+ case 0x1104: /* 4 ports */
+ case 0x1108: /* 8 ports */
+ max_port = dev->device & 0xff;
break;
- case 9:
- config_base = 0x88;
- break;
- case 10:
- config_base = 0x90;
- break;
- case 11:
- config_base = 0x98;
+ case 0x1112: /* 12 ports */
+ max_port = 12;
break;
default:
- /* Unknown number of ports, get out of here */
return -EINVAL;
}
- if (idx < 4) {
- base = pci_resource_start(priv->dev, 3);
- ciobase = (int)(base + (0x8 * idx));
- }
-
/* Get the io address dispatch from the BIOS */
- pci_read_config_dword(pdev, 0x24, &bar_data[0]);
- pci_read_config_dword(pdev, 0x20, &bar_data[1]);
- pci_read_config_dword(pdev, 0x1c, &bar_data[2]);
-
- /* Calculate Real IO Port */
- iobase = (bar_data[idx/4] & 0xffffffe0) + (idx % 4) * 8;
+ pci_read_config_dword(dev, 0x24, &bar_data[0]);
+ pci_read_config_dword(dev, 0x20, &bar_data[1]);
+ pci_read_config_dword(dev, 0x1c, &bar_data[2]);
- dev_dbg(&pdev->dev, "%s: idx=%d iobase=0x%lx ciobase=0x%lx config_base=0x%2x\n",
- __func__, idx, iobase, ciobase, config_base);
+ for (i = 0; i < max_port; ++i) {
+ /* UART0 configuration offset start from 0x40 */
+ config_base = 0x40 + 0x08 * i;
- /* Enable UART I/O port */
- pci_write_config_byte(pdev, config_base + 0x00, 0x01);
+ /* Calculate Real IO Port */
+ iobase = (bar_data[i / 4] & 0xffffffe0) + (i % 4) * 8;
- /* Select 128-byte FIFO and 8x FIFO threshold */
- pci_write_config_byte(pdev, config_base + 0x01, 0x33);
+ /* Enable UART I/O port */
+ pci_write_config_byte(dev, config_base + 0x00, 0x01);
- /* LSB UART */
- pci_write_config_byte(pdev, config_base + 0x04, (u8)(iobase & 0xff));
+ /* Select 128-byte FIFO and 8x FIFO threshold */
+ pci_write_config_byte(dev, config_base + 0x01, 0x33);
- /* MSB UART */
- pci_write_config_byte(pdev, config_base + 0x05, (u8)((iobase & 0xff00) >> 8));
+ /* LSB UART */
+ pci_write_config_byte(dev, config_base + 0x04,
+ (u8)(iobase & 0xff));
- /* irq number, this usually fails, but the spec says to do it anyway. */
- pci_write_config_byte(pdev, config_base + 0x06, pdev->irq);
+ /* MSB UART */
+ pci_write_config_byte(dev, config_base + 0x05,
+ (u8)((iobase & 0xff00) >> 8));
- port->port.iotype = UPIO_PORT;
- port->port.iobase = iobase;
- port->port.mapbase = 0;
- port->port.membase = NULL;
- port->port.regshift = 0;
+ pci_write_config_byte(dev, config_base + 0x06, dev->irq);
+ }
- return 0;
+ return max_port;
}
static int skip_tx_en_setup(struct serial_private *priv,
@@ -1882,6 +1998,8 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
+#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
+
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
@@ -1989,6 +2107,34 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
},
{
.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_PNW_UART1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pnw_serial_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_PNW_UART2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pnw_serial_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_PNW_UART3,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pnw_serial_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_TNG_UART,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = tng_serial_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_BSW_UART1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
@@ -2376,6 +2522,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_xr17v35x_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_EXAR,
+ .device = PCI_DEVICE_ID_EXAR_XR17V8358,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_xr17v35x_setup,
+ },
/*
* Xircom cards
*/
@@ -2653,6 +2806,7 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_fintek_setup,
+ .init = pci_fintek_init,
},
{
.vendor = 0x1c29,
@@ -2660,6 +2814,7 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_fintek_setup,
+ .init = pci_fintek_init,
},
{
.vendor = 0x1c29,
@@ -2667,6 +2822,7 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_fintek_setup,
+ .init = pci_fintek_init,
},
/*
@@ -2852,6 +3008,7 @@ enum pci_board_num_t {
pbn_exar_XR17V352,
pbn_exar_XR17V354,
pbn_exar_XR17V358,
+ pbn_exar_XR17V8358,
pbn_exar_ibm_saturn,
pbn_pasemi_1682M,
pbn_ni8430_2,
@@ -2864,6 +3021,8 @@ enum pci_board_num_t {
pbn_ADDIDATA_PCIe_8_3906250,
pbn_ce4100_1_115200,
pbn_byt,
+ pbn_pnw,
+ pbn_tng,
pbn_qrk,
pbn_omegapci,
pbn_NETMOS9900_2s_115200,
@@ -3536,6 +3695,14 @@ static struct pciserial_board pci_boards[] = {
.reg_shift = 0,
.first_offset = 0,
},
+ [pbn_exar_XR17V8358] = {
+ .flags = FL_BASE0,
+ .num_ports = 16,
+ .base_baud = 7812500,
+ .uart_offset = 0x400,
+ .reg_shift = 0,
+ .first_offset = 0,
+ },
[pbn_exar_ibm_saturn] = {
.flags = FL_BASE0,
.num_ports = 1,
@@ -3630,6 +3797,16 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 0x80,
.reg_shift = 2,
},
+ [pbn_pnw] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 115200,
+ },
+ [pbn_tng] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 1843200,
+ },
[pbn_qrk] = {
.flags = FL_BASE0,
.num_ports = 1,
@@ -4006,41 +4183,41 @@ static void pciserial_remove_one(struct pci_dev *dev)
pci_disable_device(dev);
}
-#ifdef CONFIG_PM
-static int pciserial_suspend_one(struct pci_dev *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pciserial_suspend_one(struct device *dev)
{
- struct serial_private *priv = pci_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct serial_private *priv = pci_get_drvdata(pdev);
if (priv)
pciserial_suspend_ports(priv);
- pci_save_state(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
return 0;
}
-static int pciserial_resume_one(struct pci_dev *dev)
+static int pciserial_resume_one(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct serial_private *priv = pci_get_drvdata(pdev);
int err;
- struct serial_private *priv = pci_get_drvdata(dev);
-
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
if (priv) {
/*
* The device may have been disabled. Re-enable it.
*/
- err = pci_enable_device(dev);
+ err = pci_enable_device(pdev);
/* FIXME: We cannot simply error out here */
if (err)
- dev_err(&dev->dev, "Unable to re-enable ports, trying to continue.\n");
+ dev_err(dev, "Unable to re-enable ports, trying to continue.\n");
pciserial_resume_ports(priv);
}
return 0;
}
#endif
+static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one,
+ pciserial_resume_one);
+
static struct pci_device_id serial_pci_tbl[] = {
/* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
{ PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
@@ -4921,7 +5098,7 @@ static struct pci_device_id serial_pci_tbl[] = {
0,
0, pbn_exar_XR17C158 },
/*
- * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs
+ * Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs
*/
{ PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352,
PCI_ANY_ID, PCI_ANY_ID,
@@ -4935,7 +5112,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0,
0, pbn_exar_XR17V358 },
-
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0,
+ 0, pbn_exar_XR17V8358 },
/*
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
*/
@@ -5363,6 +5543,26 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_byt },
/*
+ * Intel Penwell
+ */
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PNW_UART1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pnw},
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PNW_UART2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pnw},
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PNW_UART3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pnw},
+
+ /*
+ * Intel Tangier
+ */
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TNG_UART,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_tng},
+
+ /*
* Intel Quark x1000
*/
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART,
@@ -5510,10 +5710,9 @@ static struct pci_driver serial_pci_driver = {
.name = "serial",
.probe = pciserial_init_one,
.remove = pciserial_remove_one,
-#ifdef CONFIG_PM
- .suspend = pciserial_suspend_one,
- .resume = pciserial_resume_one,
-#endif
+ .driver = {
+ .pm = &pciserial_pm_ops,
+ },
.id_table = serial_pci_tbl,
.err_handler = &serial8250_err_handler,
};
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 6f7f2d753def..c35070356528 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -108,6 +108,7 @@ config SERIAL_8250_PCI
tristate "8250/16550 PCI device support" if EXPERT
depends on SERIAL_8250 && PCI
default SERIAL_8250
+ select RATIONAL
help
This builds standard PCI serial support. You may be able to
disable this feature if you only need legacy serial support.
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index d2501f01cd03..f8120c1bde14 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -20,7 +20,7 @@ comment "Non-8250 serial port support"
config SERIAL_AMBA_PL010
tristate "ARM AMBA PL010 serial port support"
- depends on ARM_AMBA && (BROKEN || !ARCH_VERSATILE)
+ depends on ARM_AMBA
select SERIAL_CORE
help
This selects the ARM(R) AMBA(R) PrimeCell PL010 UART. If you have
@@ -483,16 +483,6 @@ config SERIAL_SA1100_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
-config SERIAL_MFD_HSU
- tristate "Medfield High Speed UART support"
- depends on PCI
- select SERIAL_CORE
-
-config SERIAL_MFD_HSU_CONSOLE
- bool "Medfile HSU serial console support"
- depends on SERIAL_MFD_HSU=y
- select SERIAL_CORE_CONSOLE
-
config SERIAL_BFIN
tristate "Blackfin serial port support"
depends on BLACKFIN
@@ -835,7 +825,7 @@ config SERIAL_MCF_CONSOLE
config SERIAL_PMACZILOG
tristate "Mac or PowerMac z85c30 ESCC support"
- depends on (M68K && MAC) || (PPC_OF && PPC_PMAC)
+ depends on (M68K && MAC) || PPC_PMAC
select SERIAL_CORE
help
This driver supports the Zilog z85C30 serial ports found on
@@ -878,7 +868,7 @@ config SERIAL_PMACZILOG_CONSOLE
config SERIAL_CPM
tristate "CPM SCC/SMC serial port support"
- depends on CPM2 || 8xx
+ depends on CPM2 || CPM1
select SERIAL_CORE
help
This driver supports the SCC and SMC serial ports on Motorola
@@ -1054,7 +1044,7 @@ config SERIAL_SGI_IOC3
config SERIAL_MSM
bool "MSM on-chip serial port support"
- depends on ARCH_MSM || ARCH_QCOM
+ depends on ARCH_QCOM
select SERIAL_CORE
config SERIAL_MSM_CONSOLE
@@ -1063,18 +1053,6 @@ config SERIAL_MSM_CONSOLE
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
-config SERIAL_MSM_HS
- tristate "MSM UART High Speed: Serial Driver"
- depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
- select SERIAL_CORE
- help
- If you have a machine based on MSM family of SoCs, you
- can enable its onboard high speed serial port by enabling
- this option.
-
- Choose M here to compile it as a module. The module will be
- called msm_serial_hs.
-
config SERIAL_VT8500
bool "VIA VT8500 on-chip serial port support"
depends on ARCH_VT8500
@@ -1153,7 +1131,7 @@ config SERIAL_OMAP_CONSOLE
config SERIAL_OF_PLATFORM_NWPSERIAL
tristate "NWP serial port driver"
- depends on PPC_OF && PPC_DCR
+ depends on PPC_DCR
select SERIAL_OF_PLATFORM
select SERIAL_CORE_CONSOLE
select SERIAL_CORE
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 599be4b05a26..c3ac3d930b33 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -62,7 +62,6 @@ obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
-obj-$(CONFIG_SERIAL_MSM_HS) += msm_serial_hs.o
obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
@@ -78,7 +77,6 @@ obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
-obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8d94c194f090..6f5a0720a8c8 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -58,6 +58,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/sizes.h>
#include <linux/io.h>
+#include <linux/workqueue.h>
#define UART_NR 14
@@ -156,7 +157,9 @@ struct uart_amba_port {
unsigned int lcrh_tx; /* vendor-specific */
unsigned int lcrh_rx; /* vendor-specific */
unsigned int old_cr; /* state during shutdown */
+ struct delayed_work tx_softirq_work;
bool autorts;
+ unsigned int tx_irq_seen; /* 0=none, 1=1, 2=2 or more */
char type[12];
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
@@ -164,6 +167,7 @@ struct uart_amba_port {
bool using_rx_dma;
struct pl011_dmarx_data dmarx;
struct pl011_dmatx_data dmatx;
+ bool dma_probed;
#endif
};
@@ -261,10 +265,11 @@ static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
}
}
-static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *uap)
+static void pl011_dma_probe(struct uart_amba_port *uap)
{
/* DMA is the sole user of the platform data right now */
struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
+ struct device *dev = uap->port.dev;
struct dma_slave_config tx_conf = {
.dst_addr = uap->port.mapbase + UART01x_DR,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
@@ -275,9 +280,14 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *
struct dma_chan *chan;
dma_cap_mask_t mask;
- chan = dma_request_slave_channel(dev, "tx");
+ uap->dma_probed = true;
+ chan = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(chan)) {
+ if (PTR_ERR(chan) == -EPROBE_DEFER) {
+ uap->dma_probed = false;
+ return;
+ }
- if (!chan) {
/* We need platform data */
if (!plat || !plat->dma_filter) {
dev_info(uap->port.dev, "no DMA platform data\n");
@@ -385,63 +395,17 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *
}
}
-#ifndef MODULE
-/*
- * Stack up the UARTs and let the above initcall be done at device
- * initcall time, because the serial driver is called as an arch
- * initcall, and at this time the DMA subsystem is not yet registered.
- * At this point the driver will switch over to using DMA where desired.
- */
-struct dma_uap {
- struct list_head node;
- struct uart_amba_port *uap;
- struct device *dev;
-};
-
-static LIST_HEAD(pl011_dma_uarts);
-
-static int __init pl011_dma_initcall(void)
-{
- struct list_head *node, *tmp;
-
- list_for_each_safe(node, tmp, &pl011_dma_uarts) {
- struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
- pl011_dma_probe_initcall(dmau->dev, dmau->uap);
- list_del(node);
- kfree(dmau);
- }
- return 0;
-}
-
-device_initcall(pl011_dma_initcall);
-
-static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
-{
- struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
- if (dmau) {
- dmau->uap = uap;
- dmau->dev = dev;
- list_add_tail(&dmau->node, &pl011_dma_uarts);
- }
-}
-#else
-static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
-{
- pl011_dma_probe_initcall(dev, uap);
-}
-#endif
-
static void pl011_dma_remove(struct uart_amba_port *uap)
{
- /* TODO: remove the initcall if it has not yet executed */
if (uap->dmatx.chan)
dma_release_channel(uap->dmatx.chan);
if (uap->dmarx.chan)
dma_release_channel(uap->dmarx.chan);
}
-/* Forward declare this for the refill routine */
+/* Forward declare these for the refill routine */
static int pl011_dma_tx_refill(struct uart_amba_port *uap);
+static void pl011_start_tx_pio(struct uart_amba_port *uap);
/*
* The current DMA TX buffer has been sent.
@@ -479,14 +443,13 @@ static void pl011_dma_tx_callback(void *data)
return;
}
- if (pl011_dma_tx_refill(uap) <= 0) {
+ if (pl011_dma_tx_refill(uap) <= 0)
/*
* We didn't queue a DMA buffer for some reason, but we
* have data pending to be sent. Re-enable the TX IRQ.
*/
- uap->im |= UART011_TXIM;
- writew(uap->im, uap->port.membase + UART011_IMSC);
- }
+ pl011_start_tx_pio(uap);
+
spin_unlock_irqrestore(&uap->port.lock, flags);
}
@@ -664,12 +627,10 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
if (!uap->dmatx.queued) {
if (pl011_dma_tx_refill(uap) > 0) {
uap->im &= ~UART011_TXIM;
- ret = true;
- } else {
- uap->im |= UART011_TXIM;
+ writew(uap->im, uap->port.membase +
+ UART011_IMSC);
+ } else
ret = false;
- }
- writew(uap->im, uap->port.membase + UART011_IMSC);
} else if (!(uap->dmacr & UART011_TXDMAE)) {
uap->dmacr |= UART011_TXDMAE;
writew(uap->dmacr,
@@ -1021,6 +982,9 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
{
int ret;
+ if (!uap->dma_probed)
+ pl011_dma_probe(uap);
+
if (!uap->dmatx.chan)
return;
@@ -1142,7 +1106,7 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#else
/* Blank functions if the DMA engine is not available */
-static inline void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
+static inline void pl011_dma_probe(struct uart_amba_port *uap)
{
}
@@ -1208,15 +1172,24 @@ static void pl011_stop_tx(struct uart_port *port)
pl011_dma_tx_stop(uap);
}
+static bool pl011_tx_chars(struct uart_amba_port *uap);
+
+/* Start TX with programmed I/O only (no DMA) */
+static void pl011_start_tx_pio(struct uart_amba_port *uap)
+{
+ uap->im |= UART011_TXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ if (!uap->tx_irq_seen)
+ pl011_tx_chars(uap);
+}
+
static void pl011_start_tx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- if (!pl011_dma_tx_start(uap)) {
- uap->im |= UART011_TXIM;
- writew(uap->im, uap->port.membase + UART011_IMSC);
- }
+ if (!pl011_dma_tx_start(uap))
+ pl011_start_tx_pio(uap);
}
static void pl011_stop_rx(struct uart_port *port)
@@ -1274,40 +1247,87 @@ __acquires(&uap->port.lock)
spin_lock(&uap->port.lock);
}
-static void pl011_tx_chars(struct uart_amba_port *uap)
+/*
+ * Transmit a character
+ * There must be at least one free entry in the TX FIFO to accept the char.
+ *
+ * Returns true if the FIFO might have space in it afterwards;
+ * returns false if the FIFO definitely became full.
+ */
+static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
+{
+ writew(c, uap->port.membase + UART01x_DR);
+ uap->port.icount.tx++;
+
+ if (likely(uap->tx_irq_seen > 1))
+ return true;
+
+ return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF);
+}
+
+static bool pl011_tx_chars(struct uart_amba_port *uap)
{
struct circ_buf *xmit = &uap->port.state->xmit;
int count;
+ if (unlikely(uap->tx_irq_seen < 2))
+ /*
+ * Initial FIFO fill level unknown: we must check TXFF
+ * after each write, so just try to fill up the FIFO.
+ */
+ count = uap->fifosize;
+ else /* tx_irq_seen >= 2 */
+ /*
+ * FIFO initially at least half-empty, so we can simply
+ * write half the FIFO without polling TXFF.
+
+ * Note: the *first* TX IRQ can still race with
+ * pl011_start_tx_pio(), which can result in the FIFO
+ * being fuller than expected in that case.
+ */
+ count = uap->fifosize >> 1;
+
+ /*
+ * If the FIFO is full we're guaranteed a TX IRQ at some later point,
+ * and can't transmit immediately in any case:
+ */
+ if (unlikely(uap->tx_irq_seen < 2 &&
+ readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF))
+ return false;
+
if (uap->port.x_char) {
- writew(uap->port.x_char, uap->port.membase + UART01x_DR);
- uap->port.icount.tx++;
+ pl011_tx_char(uap, uap->port.x_char);
uap->port.x_char = 0;
- return;
+ --count;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
pl011_stop_tx(&uap->port);
- return;
+ goto done;
}
/* If we are using DMA mode, try to send some characters. */
if (pl011_dma_tx_irq(uap))
- return;
+ goto done;
- count = uap->fifosize >> 1;
- do {
- writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
+ while (count-- > 0 && pl011_tx_char(uap, xmit->buf[xmit->tail])) {
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- uap->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
- } while (--count > 0);
+ }
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit)) {
pl011_stop_tx(&uap->port);
+ goto done;
+ }
+
+ if (unlikely(!uap->tx_irq_seen))
+ schedule_delayed_work(&uap->tx_softirq_work, uap->port.timeout);
+
+done:
+ return false;
}
static void pl011_modem_status(struct uart_amba_port *uap)
@@ -1334,6 +1354,28 @@ static void pl011_modem_status(struct uart_amba_port *uap)
wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
}
+static void pl011_tx_softirq(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct uart_amba_port *uap =
+ container_of(dwork, struct uart_amba_port, tx_softirq_work);
+
+ spin_lock(&uap->port.lock);
+ while (pl011_tx_chars(uap)) ;
+ spin_unlock(&uap->port.lock);
+}
+
+static void pl011_tx_irq_seen(struct uart_amba_port *uap)
+{
+ if (likely(uap->tx_irq_seen > 1))
+ return;
+
+ uap->tx_irq_seen++;
+ if (uap->tx_irq_seen < 2)
+ /* first TX IRQ */
+ cancel_delayed_work(&uap->tx_softirq_work);
+}
+
static irqreturn_t pl011_int(int irq, void *dev_id)
{
struct uart_amba_port *uap = dev_id;
@@ -1372,8 +1414,10 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
if (status & (UART011_DSRMIS|UART011_DCDMIS|
UART011_CTSMIS|UART011_RIMIS))
pl011_modem_status(uap);
- if (status & UART011_TXIS)
+ if (status & UART011_TXIS) {
+ pl011_tx_irq_seen(uap);
pl011_tx_chars(uap);
+ }
if (pass_counter-- == 0)
break;
@@ -1577,7 +1621,7 @@ static int pl011_startup(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- unsigned int cr, lcr_h, fbrd, ibrd;
+ unsigned int cr;
int retval;
retval = pl011_hwinit(port);
@@ -1595,29 +1639,10 @@ static int pl011_startup(struct uart_port *port)
writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
- /*
- * Provoke TX FIFO interrupt into asserting. Taking care to preserve
- * baud rate and data format specified by FBRD, IBRD and LCRH as the
- * UART may already be in use as a console.
- */
- spin_lock_irq(&uap->port.lock);
+ /* Assume that TX IRQ doesn't work until we see one: */
+ uap->tx_irq_seen = 0;
- fbrd = readw(uap->port.membase + UART011_FBRD);
- ibrd = readw(uap->port.membase + UART011_IBRD);
- lcr_h = readw(uap->port.membase + uap->lcrh_rx);
-
- cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
- writew(cr, uap->port.membase + UART011_CR);
- writew(0, uap->port.membase + UART011_FBRD);
- writew(1, uap->port.membase + UART011_IBRD);
- pl011_write_lcr_h(uap, 0);
- writew(0, uap->port.membase + UART01x_DR);
- while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
- barrier();
-
- writew(fbrd, uap->port.membase + UART011_FBRD);
- writew(ibrd, uap->port.membase + UART011_IBRD);
- pl011_write_lcr_h(uap, lcr_h);
+ spin_lock_irq(&uap->port.lock);
/* restore RTS and DTR */
cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
@@ -1672,6 +1697,8 @@ static void pl011_shutdown(struct uart_port *port)
container_of(port, struct uart_amba_port, port);
unsigned int cr;
+ cancel_delayed_work_sync(&uap->tx_softirq_work);
+
/*
* disable all interrupts
*/
@@ -2218,7 +2245,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.ops = &amba_pl011_pops;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = i;
- pl011_dma_probe(&dev->dev, uap);
+ INIT_DELAYED_WORK(&uap->tx_softirq_work, pl011_tx_softirq);
/* Ensure interrupts from this UART are masked and cleared */
writew(0, uap->port.membase + UART011_IMSC);
@@ -2233,7 +2260,8 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
if (!amba_reg.state) {
ret = uart_register_driver(&amba_reg);
if (ret < 0) {
- pr_err("Failed to register AMBA-PL011 driver\n");
+ dev_err(&dev->dev,
+ "Failed to register AMBA-PL011 driver\n");
return ret;
}
}
@@ -2242,7 +2270,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
if (ret) {
amba_ports[i] = NULL;
uart_unregister_driver(&amba_reg);
- pl011_dma_remove(uap);
}
return ret;
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 4f0f95e358e8..f3af317131ac 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -572,7 +572,7 @@ static int apbuart_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id apbuart_match[] = {
+static const struct of_device_id apbuart_match[] = {
{
.name = "GAISLER_APBUART",
},
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 77fc9faa74a4..1519d2ca7705 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -649,7 +649,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
id = 0;
}
- if (id > CONFIG_SERIAL_AR933X_NR_UARTS)
+ if (id >= CONFIG_SERIAL_AR933X_NR_UARTS)
return -EINVAL;
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4e959c43f680..27dade29646b 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -855,7 +855,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
spin_lock_init(&atmel_port->lock_tx);
sg_init_table(&atmel_port->sg_tx, 1);
/* UART circular tx buffer is an aligned page. */
- BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
+ BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
sg_set_page(&atmel_port->sg_tx,
virt_to_page(port->state->xmit.buf),
UART_XMIT_SIZE,
@@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
config.direction = DMA_MEM_TO_DEV;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
config.dst_addr = port->mapbase + ATMEL_US_THR;
+ config.dst_maxburst = 1;
ret = dmaengine_slave_config(atmel_port->chan_tx,
&config);
@@ -1034,10 +1035,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
spin_lock_init(&atmel_port->lock_rx);
sg_init_table(&atmel_port->sg_rx, 1);
/* UART circular rx buffer is an aligned page. */
- BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
+ BUG_ON(!PAGE_ALIGNED(ring->buf));
sg_set_page(&atmel_port->sg_rx,
virt_to_page(ring->buf),
- ATMEL_SERIAL_RINGSIZE,
+ sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
(int)ring->buf & ~PAGE_MASK);
nent = dma_map_sg(port->dev,
&atmel_port->sg_rx,
@@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
config.direction = DMA_DEV_TO_MEM;
config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
config.src_addr = port->mapbase + ATMEL_US_RHR;
+ config.src_maxburst = 1;
ret = dmaengine_slave_config(atmel_port->chan_rx,
&config);
@@ -1554,7 +1556,7 @@ static void atmel_tasklet_func(unsigned long data)
spin_unlock(&port->lock);
}
-static int atmel_init_property(struct atmel_uart_port *atmel_port,
+static void atmel_init_property(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1595,7 +1597,6 @@ static int atmel_init_property(struct atmel_uart_port *atmel_port,
atmel_port->use_dma_tx = false;
}
- return 0;
}
static void atmel_init_rs485(struct uart_port *port,
@@ -1777,10 +1778,13 @@ static int atmel_startup(struct uart_port *port)
if (retval)
goto free_irq;
+ tasklet_enable(&atmel_port->tasklet);
+
/*
* Initialize DMA (if necessary)
*/
atmel_init_property(atmel_port, pdev);
+ atmel_set_ops(port);
if (atmel_port->prepare_rx) {
retval = atmel_port->prepare_rx(port);
@@ -1879,6 +1883,7 @@ static void atmel_shutdown(struct uart_port *port)
* Clear out any scheduled tasklets before
* we destroy the buffers
*/
+ tasklet_disable(&atmel_port->tasklet);
tasklet_kill(&atmel_port->tasklet);
/*
@@ -2256,8 +2261,8 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
struct uart_port *port = &atmel_port->uart;
struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
- if (!atmel_init_property(atmel_port, pdev))
- atmel_set_ops(port);
+ atmel_init_property(atmel_port, pdev);
+ atmel_set_ops(port);
atmel_init_rs485(port, pdev);
@@ -2272,6 +2277,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
(unsigned long)port);
+ tasklet_disable(&atmel_port->tasklet);
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
@@ -2581,8 +2587,8 @@ static int atmel_init_gpios(struct atmel_uart_port *p, struct device *dev)
struct gpio_desc *gpiod;
p->gpios = mctrl_gpio_init(dev, 0);
- if (IS_ERR_OR_NULL(p->gpios))
- return -1;
+ if (IS_ERR(p->gpios))
+ return PTR_ERR(p->gpios);
for (i = 0; i < UART_GPIO_MAX; i++) {
gpiod = mctrl_gpio_to_gpiod(p->gpios, i);
@@ -2635,9 +2641,10 @@ static int atmel_serial_probe(struct platform_device *pdev)
spin_lock_init(&port->lock_suspended);
ret = atmel_init_gpios(port, &pdev->dev);
- if (ret < 0)
- dev_err(&pdev->dev, "%s",
- "Failed to initialize GPIOs. The serial port may not work as expected");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize GPIOs.");
+ goto err;
+ }
ret = atmel_init_port(port, pdev);
if (ret)
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 01d83df08e3d..681e0f3d5e0e 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -854,7 +854,7 @@ static int bcm_uart_probe(struct platform_device *pdev)
ret = uart_add_one_port(&bcm_uart_driver, port);
if (ret) {
- ports[pdev->id].membase = 0;
+ ports[pdev->id].membase = NULL;
return ret;
}
platform_set_drvdata(pdev, port);
@@ -868,7 +868,7 @@ static int bcm_uart_remove(struct platform_device *pdev)
port = platform_get_drvdata(pdev);
uart_remove_one_port(&bcm_uart_driver, port);
/* mark port as free */
- ports[pdev->id].membase = 0;
+ ports[pdev->id].membase = NULL;
return 0;
}
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 43b3e2c233ff..155781ece050 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -464,6 +464,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
int x_pos, pos;
unsigned long flags;
+ dma_disable_irq_nosync(uart->rx_dma_channel);
spin_lock_irqsave(&uart->rx_lock, flags);
/* 2D DMA RX buffer ring is used. Because curr_y_count and
@@ -496,6 +497,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
}
spin_unlock_irqrestore(&uart->rx_lock, flags);
+ dma_enable_irq(uart->rx_dma_channel);
mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 6e11c275f2ab..d5d2dd7c7917 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -501,6 +501,8 @@ static int uart_clps711x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, s);
s->gpios = mctrl_gpio_init(&pdev->dev, 0);
+ if (IS_ERR(s->gpios))
+ return PTR_ERR(s->gpios);
ret = uart_add_one_port(&clps711x_uart, &s->port);
if (ret)
diff --git a/drivers/tty/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile
index e072724ea754..896a5d57881c 100644
--- a/drivers/tty/serial/cpm_uart/Makefile
+++ b/drivers/tty/serial/cpm_uart/Makefile
@@ -6,6 +6,6 @@ obj-$(CONFIG_SERIAL_CPM) += cpm_uart.o
# Select the correct platform objects.
cpm_uart-objs-$(CONFIG_CPM2) += cpm_uart_cpm2.o
-cpm_uart-objs-$(CONFIG_8xx) += cpm_uart_cpm1.o
+cpm_uart-objs-$(CONFIG_CPM1) += cpm_uart_cpm1.o
cpm_uart-objs := cpm_uart_core.o $(cpm_uart-objs-y)
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index cf34d26ff6cd..0ad027b95873 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -19,7 +19,7 @@
#if defined(CONFIG_CPM2)
#include "cpm_uart_cpm2.h"
-#elif defined(CONFIG_8xx)
+#elif defined(CONFIG_CPM1)
#include "cpm_uart_cpm1.h"
#endif
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index fddb1fd4d9d3..08431adeacd5 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1435,7 +1435,7 @@ static int cpm_uart_remove(struct platform_device *ofdev)
return uart_remove_one_port(&cpm_reg, &pinfo->port);
}
-static struct of_device_id cpm_uart_match[] = {
+static const struct of_device_id cpm_uart_match[] = {
{
.compatible = "fsl,cpm1-smc-uart",
},
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 64fe25a4285c..6dc471e30e79 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -10,6 +10,9 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -34,6 +37,10 @@ static struct earlycon_device early_console_dev = {
.con = &early_con,
};
+extern struct earlycon_id __earlycon_table[];
+static const struct earlycon_id __earlycon_table_sentinel
+ __used __section(__earlycon_table_end);
+
static const struct of_device_id __earlycon_of_table_sentinel
__used __section(__earlycon_of_table_end);
@@ -54,44 +61,29 @@ static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
return base;
}
-static int __init parse_options(struct earlycon_device *device,
- char *options)
+static int __init parse_options(struct earlycon_device *device, char *options)
{
struct uart_port *port = &device->port;
- int mmio, mmio32, length;
+ int length;
unsigned long addr;
- if (!options)
- return -ENODEV;
+ if (uart_parse_earlycon(options, &port->iotype, &addr, &options))
+ return -EINVAL;
- mmio = !strncmp(options, "mmio,", 5);
- mmio32 = !strncmp(options, "mmio32,", 7);
- if (mmio || mmio32) {
- port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32);
- options += mmio ? 5 : 7;
- addr = simple_strtoul(options, NULL, 0);
+ switch (port->iotype) {
+ case UPIO_MEM32:
+ port->regshift = 2; /* fall-through */
+ case UPIO_MEM:
port->mapbase = addr;
- if (mmio32)
- port->regshift = 2;
- } else if (!strncmp(options, "io,", 3)) {
- port->iotype = UPIO_PORT;
- options += 3;
- addr = simple_strtoul(options, NULL, 0);
+ break;
+ case UPIO_PORT:
port->iobase = addr;
- mmio = 0;
- } else if (!strncmp(options, "0x", 2)) {
- port->iotype = UPIO_MEM;
- addr = simple_strtoul(options, NULL, 0);
- port->mapbase = addr;
- } else {
+ break;
+ default:
return -EINVAL;
}
- port->uartclk = BASE_BAUD * 16;
-
- options = strchr(options, ',');
if (options) {
- options++;
device->baud = simple_strtoul(options, NULL, 0);
length = min(strcspn(options, " ") + 1,
(size_t)(sizeof(device->options)));
@@ -100,7 +92,7 @@ static int __init parse_options(struct earlycon_device *device,
if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM32)
pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n",
- mmio32 ? "32" : "",
+ (port->iotype == UPIO_MEM32) ? "32" : "",
(unsigned long long)port->mapbase,
device->options);
else
@@ -111,34 +103,21 @@ static int __init parse_options(struct earlycon_device *device,
return 0;
}
-int __init setup_earlycon(char *buf, const char *match,
- int (*setup)(struct earlycon_device *, const char *))
+static int __init register_earlycon(char *buf, const struct earlycon_id *match)
{
int err;
- size_t len;
struct uart_port *port = &early_console_dev.port;
- if (!buf || !match || !setup)
- return 0;
-
- len = strlen(match);
- if (strncmp(buf, match, len))
- return 0;
- if (buf[len] && (buf[len] != ','))
- return 0;
-
- buf += len + 1;
-
- err = parse_options(&early_console_dev, buf);
/* On parsing error, pass the options buf to the setup function */
- if (!err)
+ if (buf && !parse_options(&early_console_dev, buf))
buf = NULL;
+ port->uartclk = BASE_BAUD * 16;
if (port->mapbase)
port->membase = earlycon_map(port->mapbase, 64);
early_console_dev.con->data = &early_console_dev;
- err = setup(&early_console_dev, buf);
+ err = match->setup(&early_console_dev, buf);
if (err < 0)
return err;
if (!early_console_dev.con->write)
@@ -148,6 +127,72 @@ int __init setup_earlycon(char *buf, const char *match,
return 0;
}
+/**
+ * setup_earlycon - match and register earlycon console
+ * @buf: earlycon param string
+ *
+ * Registers the earlycon console matching the earlycon specified
+ * in the param string @buf. Acceptable param strings are of the form
+ * <name>,io|mmio|mmio32,<addr>,<options>
+ * <name>,0x<addr>,<options>
+ * <name>,<options>
+ * <name>
+ *
+ * Only for the third form does the earlycon setup() method receive the
+ * <options> string in the 'options' parameter; all other forms set
+ * the parameter to NULL.
+ *
+ * Returns 0 if an attempt to register the earlycon was made,
+ * otherwise negative error code
+ */
+int __init setup_earlycon(char *buf)
+{
+ const struct earlycon_id *match;
+
+ if (!buf || !buf[0])
+ return -EINVAL;
+
+ if (early_con.flags & CON_ENABLED)
+ return -EALREADY;
+
+ for (match = __earlycon_table; match->name[0]; match++) {
+ size_t len = strlen(match->name);
+
+ if (strncmp(buf, match->name, len))
+ continue;
+
+ if (buf[len]) {
+ if (buf[len] != ',')
+ continue;
+ buf += len + 1;
+ } else
+ buf = NULL;
+
+ return register_earlycon(buf, match);
+ }
+
+ return -ENOENT;
+}
+
+/* early_param wrapper for setup_earlycon() */
+static int __init param_setup_earlycon(char *buf)
+{
+ int err;
+
+ /*
+ * Just 'earlycon' is a valid param for devicetree earlycons;
+ * don't generate a warning from parse_early_params() in that case
+ */
+ if (!buf || !buf[0])
+ return 0;
+
+ err = setup_earlycon(buf);
+ if (err == -ENOENT || err == -EALREADY)
+ return 0;
+ return err;
+}
+early_param("earlycon", param_setup_earlycon);
+
int __init of_setup_earlycon(unsigned long addr,
int (*setup)(struct earlycon_device *, const char *))
{
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 3ad1458bfeb0..08ce76f4f261 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -257,7 +257,7 @@ struct lpuart_port {
struct timer_list lpuart_timer;
};
-static struct of_device_id lpuart_dt_ids[] = {
+static const struct of_device_id lpuart_dt_ids[] = {
{
.compatible = "fsl,vf610-lpuart",
},
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 0eb29b1c47ac..c8cfa0637128 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1,13 +1,10 @@
/*
- * Driver for Motorola IMX serial ports
+ * Driver for Motorola/Freescale IMX serial ports
*
- * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
- * Author: Sascha Hauer <sascha@saschahauer.de>
- * Copyright (C) 2004 Pengutronix
- *
- * Copyright (C) 2009 emlix GmbH
- * Author: Fabian Godehardt (added IrDA support for iMX)
+ * Author: Sascha Hauer <sascha@saschahauer.de>
+ * Copyright (C) 2004 Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,13 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * [29-Mar-2005] Mike Lee
- * Added hardware handshake
*/
#if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -189,7 +179,7 @@
#define UART_NR 8
-/* i.mx21 type uart runs on all i.mx except i.mx1 */
+/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
enum imx_uart_type {
IMX1_UART,
IMX21_UART,
@@ -206,10 +196,8 @@ struct imx_port {
struct uart_port port;
struct timer_list timer;
unsigned int old_status;
- int txirq, rxirq, rtsirq;
unsigned int have_rtscts:1;
unsigned int dte_mode:1;
- unsigned int use_irda:1;
unsigned int irda_inv_rx:1;
unsigned int irda_inv_tx:1;
unsigned short trcv_delay; /* transceiver delay */
@@ -236,12 +224,6 @@ struct imx_port_ucrs {
unsigned int ucr3;
};
-#ifdef CONFIG_IRDA
-#define USE_IRDA(sport) ((sport)->use_irda)
-#else
-#define USE_IRDA(sport) (0)
-#endif
-
static struct imx_uart_data imx_uart_devdata[] = {
[IMX1_UART] = {
.uts_reg = IMX1_UTS,
@@ -273,7 +255,7 @@ static struct platform_device_id imx_uart_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
-static struct of_device_id imx_uart_dt_ids[] = {
+static const struct of_device_id imx_uart_dt_ids[] = {
{ .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
{ .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
{ .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
@@ -376,48 +358,6 @@ static void imx_stop_tx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
- if (USE_IRDA(sport)) {
- /* half duplex - wait for end of transmission */
- int n = 256;
- while ((--n > 0) &&
- !(readl(sport->port.membase + USR2) & USR2_TXDC)) {
- udelay(5);
- barrier();
- }
- /*
- * irda transceiver - wait a bit more to avoid
- * cutoff, hardware dependent
- */
- udelay(sport->trcv_delay);
-
- /*
- * half duplex - reactivate receive mode,
- * flush receive pipe echo crap
- */
- if (readl(sport->port.membase + USR2) & USR2_TXDC) {
- temp = readl(sport->port.membase + UCR1);
- temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN);
- writel(temp, sport->port.membase + UCR1);
-
- temp = readl(sport->port.membase + UCR4);
- temp &= ~(UCR4_TCEN);
- writel(temp, sport->port.membase + UCR4);
-
- while (readl(sport->port.membase + URXD0) &
- URXD_CHARRDY)
- barrier();
-
- temp = readl(sport->port.membase + UCR1);
- temp |= UCR1_RRDYEN;
- writel(temp, sport->port.membase + UCR1);
-
- temp = readl(sport->port.membase + UCR4);
- temp |= UCR4_DREN;
- writel(temp, sport->port.membase + UCR4);
- }
- return;
- }
-
/*
* We are maybe in the SMP context, so if the DMA TX thread is running
* on other cpu, we have to wait for it to finish.
@@ -425,8 +365,23 @@ static void imx_stop_tx(struct uart_port *port)
if (sport->dma_is_enabled && sport->dma_is_txing)
return;
- temp = readl(sport->port.membase + UCR1);
- writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
+ temp = readl(port->membase + UCR1);
+ writel(temp & ~UCR1_TXMPTYEN, port->membase + UCR1);
+
+ /* in rs485 mode disable transmitter if shifter is empty */
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ readl(port->membase + USR2) & USR2_TXDC) {
+ temp = readl(port->membase + UCR2);
+ if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ temp &= ~UCR2_CTS;
+ else
+ temp |= UCR2_CTS;
+ writel(temp, port->membase + UCR2);
+
+ temp = readl(port->membase + UCR4);
+ temp &= ~UCR4_TCEN;
+ writel(temp, port->membase + UCR4);
+ }
}
/*
@@ -620,15 +575,18 @@ static void imx_start_tx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
- if (USE_IRDA(sport)) {
- /* half duplex in IrDA mode; have to disable receive mode */
- temp = readl(sport->port.membase + UCR4);
- temp &= ~(UCR4_DREN);
- writel(temp, sport->port.membase + UCR4);
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ /* enable transmitter and shifter empty irq */
+ temp = readl(port->membase + UCR2);
+ if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+ temp &= ~UCR2_CTS;
+ else
+ temp |= UCR2_CTS;
+ writel(temp, port->membase + UCR2);
- temp = readl(sport->port.membase + UCR1);
- temp &= ~(UCR1_RRDYEN);
- writel(temp, sport->port.membase + UCR1);
+ temp = readl(port->membase + UCR4);
+ temp |= UCR4_TCEN;
+ writel(temp, port->membase + UCR4);
}
if (!sport->dma_is_enabled) {
@@ -636,16 +594,6 @@ static void imx_start_tx(struct uart_port *port)
writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
}
- if (USE_IRDA(sport)) {
- temp = readl(sport->port.membase + UCR1);
- temp |= UCR1_TRDYEN;
- writel(temp, sport->port.membase + UCR1);
-
- temp = readl(sport->port.membase + UCR4);
- temp |= UCR4_TCEN;
- writel(temp, sport->port.membase + UCR4);
- }
-
if (sport->dma_is_enabled) {
if (sport->port.x_char) {
/* We have X-char to send, so enable TX IRQ and
@@ -796,6 +744,7 @@ static irqreturn_t imx_int(int irq, void *dev_id)
unsigned int sts2;
sts = readl(sport->port.membase + USR1);
+ sts2 = readl(sport->port.membase + USR2);
if (sts & USR1_RRDY) {
if (sport->dma_is_enabled)
@@ -804,8 +753,10 @@ static irqreturn_t imx_int(int irq, void *dev_id)
imx_rxint(irq, dev_id);
}
- if (sts & USR1_TRDY &&
- readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN)
+ if ((sts & USR1_TRDY &&
+ readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) ||
+ (sts2 & USR2_TXDC &&
+ readl(sport->port.membase + UCR4) & UCR4_TCEN))
imx_txint(irq, dev_id);
if (sts & USR1_RTSD)
@@ -814,11 +765,10 @@ static irqreturn_t imx_int(int irq, void *dev_id)
if (sts & USR1_AWAKE)
writel(USR1_AWAKE, sport->port.membase + USR1);
- sts2 = readl(sport->port.membase + USR2);
if (sts2 & USR2_ORE) {
dev_err(sport->port.dev, "Rx FIFO overrun\n");
sport->port.icount.overrun++;
- writel(sts2 | USR2_ORE, sport->port.membase + USR2);
+ writel(USR2_ORE, sport->port.membase + USR2);
}
return IRQ_HANDLED;
@@ -866,11 +816,13 @@ static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
- temp = readl(sport->port.membase + UCR2) & ~(UCR2_CTS | UCR2_CTSC);
- if (mctrl & TIOCM_RTS)
- temp |= UCR2_CTS | UCR2_CTSC;
-
- writel(temp, sport->port.membase + UCR2);
+ if (!(port->rs485.flags & SER_RS485_ENABLED)) {
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~(UCR2_CTS | UCR2_CTSC);
+ if (mctrl & TIOCM_RTS)
+ temp |= UCR2_CTS | UCR2_CTSC;
+ writel(temp, sport->port.membase + UCR2);
+ }
temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP;
if (mctrl & TIOCM_LOOP)
@@ -1156,9 +1108,6 @@ static int imx_startup(struct uart_port *port)
*/
temp = readl(sport->port.membase + UCR4);
- if (USE_IRDA(sport))
- temp |= UCR4_IRSC;
-
/* set the trigger level for CTS */
temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
temp |= CTSTL << UCR4_CTSTL_SHF;
@@ -1181,10 +1130,12 @@ static int imx_startup(struct uart_port *port)
imx_uart_dma_init(sport);
spin_lock_irqsave(&sport->port.lock, flags);
+
/*
* Finally, clear and enable interrupts
*/
writel(USR1_RTSD, sport->port.membase + USR1);
+ writel(USR2_ORE, sport->port.membase + USR2);
if (sport->dma_is_inited && !sport->dma_is_enabled)
imx_enable_dma(sport);
@@ -1192,17 +1143,8 @@ static int imx_startup(struct uart_port *port)
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
- if (USE_IRDA(sport)) {
- temp |= UCR1_IREN;
- temp &= ~(UCR1_RTSDEN);
- }
-
writel(temp, sport->port.membase + UCR1);
- /* Clear any pending ORE flag before enabling interrupt */
- temp = readl(sport->port.membase + USR2);
- writel(temp | USR2_ORE, sport->port.membase + USR2);
-
temp = readl(sport->port.membase + UCR4);
temp |= UCR4_OREN;
writel(temp, sport->port.membase + UCR4);
@@ -1219,38 +1161,12 @@ static int imx_startup(struct uart_port *port)
writel(temp, sport->port.membase + UCR3);
}
- if (USE_IRDA(sport)) {
- temp = readl(sport->port.membase + UCR4);
- if (sport->irda_inv_rx)
- temp |= UCR4_INVR;
- else
- temp &= ~(UCR4_INVR);
- writel(temp | UCR4_DREN, sport->port.membase + UCR4);
-
- temp = readl(sport->port.membase + UCR3);
- if (sport->irda_inv_tx)
- temp |= UCR3_INVT;
- else
- temp &= ~(UCR3_INVT);
- writel(temp, sport->port.membase + UCR3);
- }
-
/*
* Enable modem status interrupts
*/
imx_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock, flags);
- if (USE_IRDA(sport)) {
- struct imxuart_platform_data *pdata;
- pdata = dev_get_platdata(sport->port.dev);
- sport->irda_inv_rx = pdata->irda_inv_rx;
- sport->irda_inv_tx = pdata->irda_inv_tx;
- sport->trcv_delay = pdata->transceiver_delay;
- if (pdata->irda_enable)
- pdata->irda_enable(1);
- }
-
return 0;
}
@@ -1286,13 +1202,6 @@ static void imx_shutdown(struct uart_port *port)
writel(temp, sport->port.membase + UCR2);
spin_unlock_irqrestore(&sport->port.lock, flags);
- if (USE_IRDA(sport)) {
- struct imxuart_platform_data *pdata;
- pdata = dev_get_platdata(sport->port.dev);
- if (pdata->irda_enable)
- pdata->irda_enable(0);
- }
-
/*
* Stop our timer.
*/
@@ -1305,8 +1214,6 @@ static void imx_shutdown(struct uart_port *port)
spin_lock_irqsave(&sport->port.lock, flags);
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
- if (USE_IRDA(sport))
- temp &= ~(UCR1_IREN);
writel(temp, sport->port.membase + UCR1);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1320,7 +1227,7 @@ static void imx_flush_buffer(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
struct scatterlist *sgl = &sport->tx_sgl[0];
unsigned long temp;
- int i = 100, ubir, ubmr, ubrc, uts;
+ int i = 100, ubir, ubmr, uts;
if (!sport->dma_chan_tx)
return;
@@ -1345,7 +1252,6 @@ static void imx_flush_buffer(struct uart_port *port)
*/
ubir = readl(sport->port.membase + UBIR);
ubmr = readl(sport->port.membase + UBMR);
- ubrc = readl(sport->port.membase + UBRC);
uts = readl(sport->port.membase + IMX21_UTS);
temp = readl(sport->port.membase + UCR2);
@@ -1358,7 +1264,6 @@ static void imx_flush_buffer(struct uart_port *port)
/* Restore the registers */
writel(ubir, sport->port.membase + UBIR);
writel(ubmr, sport->port.membase + UBMR);
- writel(ubrc, sport->port.membase + UBRC);
writel(uts, sport->port.membase + IMX21_UTS);
}
@@ -1375,15 +1280,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
uint64_t tdiv64;
/*
- * If we don't support modem control lines, don't allow
- * these to be set.
- */
- if (0) {
- termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR);
- termios->c_cflag |= CLOCAL;
- }
-
- /*
* We only support CS7 and CS8.
*/
while ((termios->c_cflag & CSIZE) != CS7 &&
@@ -1401,11 +1297,26 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
if (termios->c_cflag & CRTSCTS) {
if (sport->have_rtscts) {
ucr2 &= ~UCR2_IRTS;
- ucr2 |= UCR2_CTSC;
+
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ /*
+ * RTS is mandatory for rs485 operation, so keep
+ * it under manual control and keep transmitter
+ * disabled.
+ */
+ if (!(port->rs485.flags &
+ SER_RS485_RTS_AFTER_SEND))
+ ucr2 |= UCR2_CTS;
+ } else {
+ ucr2 |= UCR2_CTSC;
+ }
} else {
termios->c_cflag &= ~CRTSCTS;
}
- }
+ } else if (port->rs485.flags & SER_RS485_ENABLED)
+ /* disable transmitter */
+ if (!(port->rs485.flags & SER_RS485_RTS_AFTER_SEND))
+ ucr2 |= UCR2_CTS;
if (termios->c_cflag & CSTOPB)
ucr2 |= UCR2_STPB;
@@ -1471,24 +1382,16 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.membase + UCR2);
old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
- if (USE_IRDA(sport)) {
- /*
- * use maximum available submodule frequency to
- * avoid missing short pulses due to low sampling rate
- */
+ /* custom-baudrate handling */
+ div = sport->port.uartclk / (baud * 16);
+ if (baud == 38400 && quot != div)
+ baud = sport->port.uartclk / (quot * 16);
+
+ div = sport->port.uartclk / (baud * 16);
+ if (div > 7)
+ div = 7;
+ if (!div)
div = 1;
- } else {
- /* custom-baudrate handling */
- div = sport->port.uartclk / (baud * 16);
- if (baud == 38400 && quot != div)
- baud = sport->port.uartclk / (quot * 16);
-
- div = sport->port.uartclk / (baud * 16);
- if (div > 7)
- div = 7;
- if (!div)
- div = 1;
- }
rational_best_approximation(16 * div * baud, sport->port.uartclk,
1 << 16, 1 << 16, &num, &denom);
@@ -1635,6 +1538,38 @@ static void imx_poll_put_char(struct uart_port *port, unsigned char c)
}
#endif
+static int imx_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485conf)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+
+ /* unimplemented */
+ rs485conf->delay_rts_before_send = 0;
+ rs485conf->delay_rts_after_send = 0;
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
+ /* RTS is required to control the transmitter */
+ if (!sport->have_rtscts)
+ rs485conf->flags &= ~SER_RS485_ENABLED;
+
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ unsigned long temp;
+
+ /* disable transmitter */
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~UCR2_CTSC;
+ if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
+ temp &= ~UCR2_CTS;
+ else
+ temp |= UCR2_CTS;
+ writel(temp, sport->port.membase + UCR2);
+ }
+
+ port->rs485 = *rs485conf;
+
+ return 0;
+}
+
static struct uart_ops imx_pops = {
.tx_empty = imx_tx_empty,
.set_mctrl = imx_set_mctrl,
@@ -1927,9 +1862,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
sport->have_rtscts = 1;
- if (of_get_property(np, "fsl,irda-mode", NULL))
- sport->use_irda = 1;
-
if (of_get_property(np, "fsl,dte-mode", NULL))
sport->dte_mode = 1;
@@ -1958,9 +1890,6 @@ static void serial_imx_probe_pdata(struct imx_port *sport,
if (pdata->flags & IMXUART_HAVE_RTSCTS)
sport->have_rtscts = 1;
-
- if (pdata->flags & IMXUART_IRDA)
- sport->use_irda = 1;
}
static int serial_imx_probe(struct platform_device *pdev)
@@ -1969,6 +1898,7 @@ static int serial_imx_probe(struct platform_device *pdev)
void __iomem *base;
int ret = 0;
struct resource *res;
+ int txirq, rxirq, rtsirq;
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
if (!sport)
@@ -1985,17 +1915,21 @@ static int serial_imx_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
+ rxirq = platform_get_irq(pdev, 0);
+ txirq = platform_get_irq(pdev, 1);
+ rtsirq = platform_get_irq(pdev, 2);
+
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
sport->port.membase = base;
sport->port.type = PORT_IMX,
sport->port.iotype = UPIO_MEM;
- sport->port.irq = platform_get_irq(pdev, 0);
- sport->rxirq = platform_get_irq(pdev, 0);
- sport->txirq = platform_get_irq(pdev, 1);
- sport->rtsirq = platform_get_irq(pdev, 2);
+ sport->port.irq = rxirq;
sport->port.fifosize = 32;
sport->port.ops = &imx_pops;
+ sport->port.rs485_config = imx_rs485_config;
+ sport->port.rs485.flags =
+ SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX;
sport->port.flags = UPF_BOOT_AUTOCONF;
init_timer(&sport->timer);
sport->timer.function = imx_timeout;
@@ -2021,27 +1955,18 @@ static int serial_imx_probe(struct platform_device *pdev)
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
* chips only have one interrupt.
*/
- if (sport->txirq > 0) {
- ret = devm_request_irq(&pdev->dev, sport->rxirq, imx_rxint, 0,
+ if (txirq > 0) {
+ ret = devm_request_irq(&pdev->dev, rxirq, imx_rxint, 0,
dev_name(&pdev->dev), sport);
if (ret)
return ret;
- ret = devm_request_irq(&pdev->dev, sport->txirq, imx_txint, 0,
+ ret = devm_request_irq(&pdev->dev, txirq, imx_txint, 0,
dev_name(&pdev->dev), sport);
if (ret)
return ret;
-
- /* do not use RTS IRQ on IrDA */
- if (!USE_IRDA(sport)) {
- ret = devm_request_irq(&pdev->dev, sport->rtsirq,
- imx_rtsint, 0,
- dev_name(&pdev->dev), sport);
- if (ret)
- return ret;
- }
} else {
- ret = devm_request_irq(&pdev->dev, sport->port.irq, imx_int, 0,
+ ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
dev_name(&pdev->dev), sport);
if (ret)
return ret;
diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c
index bfb0681195b6..4eb12a9cae76 100644
--- a/drivers/tty/serial/jsm/jsm_cls.c
+++ b/drivers/tty/serial/jsm/jsm_cls.c
@@ -570,7 +570,7 @@ static inline void cls_parse_isr(struct jsm_board *brd, uint port)
* verified in the interrupt routine.
*/
- if (port > brd->nasync)
+ if (port >= brd->nasync)
return;
ch = brd->channels[port];
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 7291c2117daa..932b2accd06f 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -724,7 +724,7 @@ static inline void neo_parse_isr(struct jsm_board *brd, u32 port)
if (!brd)
return;
- if (port > brd->maxports)
+ if (port >= brd->maxports)
return;
ch = brd->channels[port];
@@ -840,7 +840,7 @@ static inline void neo_parse_lsr(struct jsm_board *brd, u32 port)
if (!brd)
return;
- if (port > brd->maxports)
+ if (port >= brd->maxports)
return;
ch = brd->channels[port];
@@ -1180,7 +1180,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
*/
/* Verify the port is in range. */
- if (port > brd->nasync)
+ if (port >= brd->nasync)
continue;
ch = brd->channels[port];
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 79f9a9eff545..077377259a2c 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -782,7 +782,7 @@ static int max3100_probe(struct spi_device *spi)
pdata = dev_get_platdata(&spi->dev);
max3100s[i]->crystal = pdata->crystal;
max3100s[i]->loopback = pdata->loopback;
- max3100s[i]->poll_time = pdata->poll_time * HZ / 1000;
+ max3100s[i]->poll_time = msecs_to_jiffies(pdata->poll_time);
if (pdata->poll_time > 0 && max3100s[i]->poll_time == 0)
max3100s[i]->poll_time = 1;
max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
deleted file mode 100644
index 8fe4501d7565..000000000000
--- a/drivers/tty/serial/mfd.c
+++ /dev/null
@@ -1,1505 +0,0 @@
-/*
- * mfd.c: driver for High Speed UART device of Intel Medfield platform
- *
- * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
- *
- * (C) Copyright 2010 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-/* Notes:
- * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
- * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans
- * are used for RX, odd chans for TX
- *
- * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
- * asserted, only when the HW is reset the DDCD and DDSR will
- * be triggered
- */
-
-#if defined(CONFIG_SERIAL_MFD_HSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/slab.h>
-#include <linux/serial_reg.h>
-#include <linux/circ_buf.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial_mfd.h>
-#include <linux/dma-mapping.h>
-#include <linux/pci.h>
-#include <linux/nmi.h>
-#include <linux/io.h>
-#include <linux/debugfs.h>
-#include <linux/pm_runtime.h>
-
-#define HSU_DMA_BUF_SIZE 2048
-
-#define chan_readl(chan, offset) readl(chan->reg + offset)
-#define chan_writel(chan, offset, val) writel(val, chan->reg + offset)
-
-#define mfd_readl(obj, offset) readl(obj->reg + offset)
-#define mfd_writel(obj, offset, val) writel(val, obj->reg + offset)
-
-static int hsu_dma_enable;
-module_param(hsu_dma_enable, int, 0);
-MODULE_PARM_DESC(hsu_dma_enable,
- "It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode.");
-
-struct hsu_dma_buffer {
- u8 *buf;
- dma_addr_t dma_addr;
- u32 dma_size;
- u32 ofs;
-};
-
-struct hsu_dma_chan {
- u32 id;
- enum dma_data_direction dirt;
- struct uart_hsu_port *uport;
- void __iomem *reg;
-};
-
-struct uart_hsu_port {
- struct uart_port port;
- unsigned char ier;
- unsigned char lcr;
- unsigned char mcr;
- unsigned int lsr_break_flag;
- char name[12];
- int index;
- struct device *dev;
-
- struct hsu_dma_chan *txc;
- struct hsu_dma_chan *rxc;
- struct hsu_dma_buffer txbuf;
- struct hsu_dma_buffer rxbuf;
- int use_dma; /* flag for DMA/PIO */
- int running;
- int dma_tx_on;
-};
-
-/* Top level data structure of HSU */
-struct hsu_port {
- void __iomem *reg;
- unsigned long paddr;
- unsigned long iolen;
- u32 irq;
-
- struct uart_hsu_port port[3];
- struct hsu_dma_chan chans[10];
-
- struct dentry *debugfs;
-};
-
-static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
-{
- unsigned int val;
-
- if (offset > UART_MSR) {
- offset <<= 2;
- val = readl(up->port.membase + offset);
- } else
- val = (unsigned int)readb(up->port.membase + offset);
-
- return val;
-}
-
-static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
-{
- if (offset > UART_MSR) {
- offset <<= 2;
- writel(value, up->port.membase + offset);
- } else {
- unsigned char val = value & 0xff;
- writeb(val, up->port.membase + offset);
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#define HSU_REGS_BUFSIZE 1024
-
-
-static ssize_t port_show_regs(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct uart_hsu_port *up = file->private_data;
- char *buf;
- u32 len = 0;
- ssize_t ret;
-
- buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MFD HSU port[%d] regs:\n", up->index);
-
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "=================================\n");
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "IER: \t\t0x%08x\n", serial_in(up, UART_IER));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "PS: \t\t0x%08x\n", serial_in(up, UART_PS));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
-
- if (len > HSU_REGS_BUFSIZE)
- len = HSU_REGS_BUFSIZE;
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return ret;
-}
-
-static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hsu_dma_chan *chan = file->private_data;
- char *buf;
- u32 len = 0;
- ssize_t ret;
-
- buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MFD HSU DMA channel [%d] regs:\n", chan->id);
-
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "=================================\n");
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
- len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
- "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
-
- if (len > HSU_REGS_BUFSIZE)
- len = HSU_REGS_BUFSIZE;
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return ret;
-}
-
-static const struct file_operations port_regs_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = port_show_regs,
- .llseek = default_llseek,
-};
-
-static const struct file_operations dma_regs_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = dma_show_regs,
- .llseek = default_llseek,
-};
-
-static int hsu_debugfs_init(struct hsu_port *hsu)
-{
- int i;
- char name[32];
-
- hsu->debugfs = debugfs_create_dir("hsu", NULL);
- if (!hsu->debugfs)
- return -ENOMEM;
-
- for (i = 0; i < 3; i++) {
- snprintf(name, sizeof(name), "port_%d_regs", i);
- debugfs_create_file(name, S_IFREG | S_IRUGO,
- hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
- }
-
- for (i = 0; i < 6; i++) {
- snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
- debugfs_create_file(name, S_IFREG | S_IRUGO,
- hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
- }
-
- return 0;
-}
-
-static void hsu_debugfs_remove(struct hsu_port *hsu)
-{
- if (hsu->debugfs)
- debugfs_remove_recursive(hsu->debugfs);
-}
-
-#else
-static inline int hsu_debugfs_init(struct hsu_port *hsu)
-{
- return 0;
-}
-
-static inline void hsu_debugfs_remove(struct hsu_port *hsu)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
-
-static void serial_hsu_enable_ms(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
-
- up->ier |= UART_IER_MSI;
- serial_out(up, UART_IER, up->ier);
-}
-
-static void hsu_dma_tx(struct uart_hsu_port *up)
-{
- struct circ_buf *xmit = &up->port.state->xmit;
- struct hsu_dma_buffer *dbuf = &up->txbuf;
- int count;
-
- /* test_and_set_bit may be better, but anyway it's in lock protected mode */
- if (up->dma_tx_on)
- return;
-
- /* Update the circ buf info */
- xmit->tail += dbuf->ofs;
- xmit->tail &= UART_XMIT_SIZE - 1;
-
- up->port.icount.tx += dbuf->ofs;
- dbuf->ofs = 0;
-
- /* Disable the channel */
- chan_writel(up->txc, HSU_CH_CR, 0x0);
-
- if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
- dma_sync_single_for_device(up->port.dev,
- dbuf->dma_addr,
- dbuf->dma_size,
- DMA_TO_DEVICE);
-
- count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
- dbuf->ofs = count;
-
- /* Reprogram the channel */
- chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail);
- chan_writel(up->txc, HSU_CH_D0TSR, count);
-
- /* Reenable the channel */
- chan_writel(up->txc, HSU_CH_DCR, 0x1
- | (0x1 << 8)
- | (0x1 << 16)
- | (0x1 << 24));
- up->dma_tx_on = 1;
- chan_writel(up->txc, HSU_CH_CR, 0x1);
- }
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&up->port);
-}
-
-/* The buffer is already cache coherent */
-static void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc,
- struct hsu_dma_buffer *dbuf)
-{
- dbuf->ofs = 0;
-
- chan_writel(rxc, HSU_CH_BSR, 32);
- chan_writel(rxc, HSU_CH_MOTSR, 4);
-
- chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
- chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
- chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
- | (0x1 << 16)
- | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
- );
- chan_writel(rxc, HSU_CH_CR, 0x3);
-}
-
-/* Protected by spin_lock_irqsave(port->lock) */
-static void serial_hsu_start_tx(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
-
- if (up->use_dma) {
- hsu_dma_tx(up);
- } else if (!(up->ier & UART_IER_THRI)) {
- up->ier |= UART_IER_THRI;
- serial_out(up, UART_IER, up->ier);
- }
-}
-
-static void serial_hsu_stop_tx(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- struct hsu_dma_chan *txc = up->txc;
-
- if (up->use_dma)
- chan_writel(txc, HSU_CH_CR, 0x0);
- else if (up->ier & UART_IER_THRI) {
- up->ier &= ~UART_IER_THRI;
- serial_out(up, UART_IER, up->ier);
- }
-}
-
-/* This is always called in spinlock protected mode, so
- * modify timeout timer is safe here */
-static void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts,
- unsigned long *flags)
-{
- struct hsu_dma_buffer *dbuf = &up->rxbuf;
- struct hsu_dma_chan *chan = up->rxc;
- struct uart_port *port = &up->port;
- struct tty_port *tport = &port->state->port;
- int count;
-
- /*
- * First need to know how many is already transferred,
- * then check if its a timeout DMA irq, and return
- * the trail bytes out, push them up and reenable the
- * channel
- */
-
- /* Timeout IRQ, need wait some time, see Errata 2 */
- if (int_sts & 0xf00)
- udelay(2);
-
- /* Stop the channel */
- chan_writel(chan, HSU_CH_CR, 0x0);
-
- count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
- if (!count) {
- /* Restart the channel before we leave */
- chan_writel(chan, HSU_CH_CR, 0x3);
- return;
- }
-
- dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
- dbuf->dma_size, DMA_FROM_DEVICE);
-
- /*
- * Head will only wrap around when we recycle
- * the DMA buffer, and when that happens, we
- * explicitly set tail to 0. So head will
- * always be greater than tail.
- */
- tty_insert_flip_string(tport, dbuf->buf, count);
- port->icount.rx += count;
-
- dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
- dbuf->dma_size, DMA_FROM_DEVICE);
-
- /* Reprogram the channel */
- chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
- chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
- chan_writel(chan, HSU_CH_DCR, 0x1
- | (0x1 << 8)
- | (0x1 << 16)
- | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
- );
- spin_unlock_irqrestore(&up->port.lock, *flags);
- tty_flip_buffer_push(tport);
- spin_lock_irqsave(&up->port.lock, *flags);
-
- chan_writel(chan, HSU_CH_CR, 0x3);
-
-}
-
-static void serial_hsu_stop_rx(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- struct hsu_dma_chan *chan = up->rxc;
-
- if (up->use_dma)
- chan_writel(chan, HSU_CH_CR, 0x2);
- else {
- up->ier &= ~UART_IER_RLSI;
- up->port.read_status_mask &= ~UART_LSR_DR;
- serial_out(up, UART_IER, up->ier);
- }
-}
-
-static inline void receive_chars(struct uart_hsu_port *up, int *status,
- unsigned long *flags)
-{
- unsigned int ch, flag;
- unsigned int max_count = 256;
-
- do {
- ch = serial_in(up, UART_RX);
- flag = TTY_NORMAL;
- up->port.icount.rx++;
-
- if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
- UART_LSR_FE | UART_LSR_OE))) {
-
- dev_warn(up->dev, "We really rush into ERR/BI case"
- "status = 0x%02x", *status);
- /* For statistics only */
- if (*status & UART_LSR_BI) {
- *status &= ~(UART_LSR_FE | UART_LSR_PE);
- up->port.icount.brk++;
- /*
- * We do the SysRQ and SAK checking
- * here because otherwise the break
- * may get masked by ignore_status_mask
- * or read_status_mask.
- */
- if (uart_handle_break(&up->port))
- goto ignore_char;
- } else if (*status & UART_LSR_PE)
- up->port.icount.parity++;
- else if (*status & UART_LSR_FE)
- up->port.icount.frame++;
- if (*status & UART_LSR_OE)
- up->port.icount.overrun++;
-
- /* Mask off conditions which should be ignored. */
- *status &= up->port.read_status_mask;
-
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
- if (up->port.cons &&
- up->port.cons->index == up->port.line) {
- /* Recover the break flag from console xmit */
- *status |= up->lsr_break_flag;
- up->lsr_break_flag = 0;
- }
-#endif
- if (*status & UART_LSR_BI) {
- flag = TTY_BREAK;
- } else if (*status & UART_LSR_PE)
- flag = TTY_PARITY;
- else if (*status & UART_LSR_FE)
- flag = TTY_FRAME;
- }
-
- if (uart_handle_sysrq_char(&up->port, ch))
- goto ignore_char;
-
- uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
- ignore_char:
- *status = serial_in(up, UART_LSR);
- } while ((*status & UART_LSR_DR) && max_count--);
-
- spin_unlock_irqrestore(&up->port.lock, *flags);
- tty_flip_buffer_push(&up->port.state->port);
- spin_lock_irqsave(&up->port.lock, *flags);
-}
-
-static void transmit_chars(struct uart_hsu_port *up)
-{
- struct circ_buf *xmit = &up->port.state->xmit;
- int count;
-
- if (up->port.x_char) {
- serial_out(up, UART_TX, up->port.x_char);
- up->port.icount.tx++;
- up->port.x_char = 0;
- return;
- }
- if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
- serial_hsu_stop_tx(&up->port);
- return;
- }
-
- /* The IRQ is for TX FIFO half-empty */
- count = up->port.fifosize / 2;
-
- do {
- serial_out(up, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-
- up->port.icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- } while (--count > 0);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&up->port);
-
- if (uart_circ_empty(xmit))
- serial_hsu_stop_tx(&up->port);
-}
-
-static inline void check_modem_status(struct uart_hsu_port *up)
-{
- int status;
-
- status = serial_in(up, UART_MSR);
-
- if ((status & UART_MSR_ANY_DELTA) == 0)
- return;
-
- if (status & UART_MSR_TERI)
- up->port.icount.rng++;
- if (status & UART_MSR_DDSR)
- up->port.icount.dsr++;
- /* We may only get DDCD when HW init and reset */
- if (status & UART_MSR_DDCD)
- uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
- /* Will start/stop_tx accordingly */
- if (status & UART_MSR_DCTS)
- uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
-
- wake_up_interruptible(&up->port.state->port.delta_msr_wait);
-}
-
-/*
- * This handles the interrupt from one port.
- */
-static irqreturn_t port_irq(int irq, void *dev_id)
-{
- struct uart_hsu_port *up = dev_id;
- unsigned int iir, lsr;
- unsigned long flags;
-
- if (unlikely(!up->running))
- return IRQ_NONE;
-
- spin_lock_irqsave(&up->port.lock, flags);
- if (up->use_dma) {
- lsr = serial_in(up, UART_LSR);
- if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
- UART_LSR_FE | UART_LSR_OE)))
- dev_warn(up->dev,
- "Got lsr irq while using DMA, lsr = 0x%2x\n",
- lsr);
- check_modem_status(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
- return IRQ_HANDLED;
- }
-
- iir = serial_in(up, UART_IIR);
- if (iir & UART_IIR_NO_INT) {
- spin_unlock_irqrestore(&up->port.lock, flags);
- return IRQ_NONE;
- }
-
- lsr = serial_in(up, UART_LSR);
- if (lsr & UART_LSR_DR)
- receive_chars(up, &lsr, &flags);
- check_modem_status(up);
-
- /* lsr will be renewed during the receive_chars */
- if (lsr & UART_LSR_THRE)
- transmit_chars(up);
-
- spin_unlock_irqrestore(&up->port.lock, flags);
- return IRQ_HANDLED;
-}
-
-static inline void dma_chan_irq(struct hsu_dma_chan *chan)
-{
- struct uart_hsu_port *up = chan->uport;
- unsigned long flags;
- u32 int_sts;
-
- spin_lock_irqsave(&up->port.lock, flags);
-
- if (!up->use_dma || !up->running)
- goto exit;
-
- /*
- * No matter what situation, need read clear the IRQ status
- * There is a bug, see Errata 5, HSD 2900918
- */
- int_sts = chan_readl(chan, HSU_CH_SR);
-
- /* Rx channel */
- if (chan->dirt == DMA_FROM_DEVICE)
- hsu_dma_rx(up, int_sts, &flags);
-
- /* Tx channel */
- if (chan->dirt == DMA_TO_DEVICE) {
- chan_writel(chan, HSU_CH_CR, 0x0);
- up->dma_tx_on = 0;
- hsu_dma_tx(up);
- }
-
-exit:
- spin_unlock_irqrestore(&up->port.lock, flags);
- return;
-}
-
-static irqreturn_t dma_irq(int irq, void *dev_id)
-{
- struct hsu_port *hsu = dev_id;
- u32 int_sts, i;
-
- int_sts = mfd_readl(hsu, HSU_GBL_DMAISR);
-
- /* Currently we only have 6 channels may be used */
- for (i = 0; i < 6; i++) {
- if (int_sts & 0x1)
- dma_chan_irq(&hsu->chans[i]);
- int_sts >>= 1;
- }
-
- return IRQ_HANDLED;
-}
-
-static unsigned int serial_hsu_tx_empty(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned long flags;
- unsigned int ret;
-
- spin_lock_irqsave(&up->port.lock, flags);
- ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->port.lock, flags);
-
- return ret;
-}
-
-static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned char status;
- unsigned int ret;
-
- status = serial_in(up, UART_MSR);
-
- ret = 0;
- if (status & UART_MSR_DCD)
- ret |= TIOCM_CAR;
- if (status & UART_MSR_RI)
- ret |= TIOCM_RNG;
- if (status & UART_MSR_DSR)
- ret |= TIOCM_DSR;
- if (status & UART_MSR_CTS)
- ret |= TIOCM_CTS;
- return ret;
-}
-
-static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned char mcr = 0;
-
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
- mcr |= up->mcr;
-
- serial_out(up, UART_MCR, mcr);
-}
-
-static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned long flags;
-
- spin_lock_irqsave(&up->port.lock, flags);
- if (break_state == -1)
- up->lcr |= UART_LCR_SBC;
- else
- up->lcr &= ~UART_LCR_SBC;
- serial_out(up, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-/*
- * What special to do:
- * 1. chose the 64B fifo mode
- * 2. start dma or pio depends on configuration
- * 3. we only allocate dma memory when needed
- */
-static int serial_hsu_startup(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned long flags;
-
- pm_runtime_get_sync(up->dev);
-
- /*
- * Clear the FIFO buffers and disable them.
- * (they will be reenabled in set_termios())
- */
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_out(up, UART_FCR, 0);
-
- /* Clear the interrupt registers. */
- (void) serial_in(up, UART_LSR);
- (void) serial_in(up, UART_RX);
- (void) serial_in(up, UART_IIR);
- (void) serial_in(up, UART_MSR);
-
- /* Now, initialize the UART, default is 8n1 */
- serial_out(up, UART_LCR, UART_LCR_WLEN8);
-
- spin_lock_irqsave(&up->port.lock, flags);
-
- up->port.mctrl |= TIOCM_OUT2;
- serial_hsu_set_mctrl(&up->port, up->port.mctrl);
-
- /*
- * Finally, enable interrupts. Note: Modem status interrupts
- * are set via set_termios(), which will be occurring imminently
- * anyway, so we don't enable them here.
- */
- if (!up->use_dma)
- up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
- else
- up->ier = 0;
- serial_out(up, UART_IER, up->ier);
-
- spin_unlock_irqrestore(&up->port.lock, flags);
-
- /* DMA init */
- if (up->use_dma) {
- struct hsu_dma_buffer *dbuf;
- struct circ_buf *xmit = &port->state->xmit;
-
- up->dma_tx_on = 0;
-
- /* First allocate the RX buffer */
- dbuf = &up->rxbuf;
- dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
- if (!dbuf->buf) {
- up->use_dma = 0;
- goto exit;
- }
- dbuf->dma_addr = dma_map_single(port->dev,
- dbuf->buf,
- HSU_DMA_BUF_SIZE,
- DMA_FROM_DEVICE);
- dbuf->dma_size = HSU_DMA_BUF_SIZE;
-
- /* Start the RX channel right now */
- hsu_dma_start_rx_chan(up->rxc, dbuf);
-
- /* Next init the TX DMA */
- dbuf = &up->txbuf;
- dbuf->buf = xmit->buf;
- dbuf->dma_addr = dma_map_single(port->dev,
- dbuf->buf,
- UART_XMIT_SIZE,
- DMA_TO_DEVICE);
- dbuf->dma_size = UART_XMIT_SIZE;
-
- /* This should not be changed all around */
- chan_writel(up->txc, HSU_CH_BSR, 32);
- chan_writel(up->txc, HSU_CH_MOTSR, 4);
- dbuf->ofs = 0;
- }
-
-exit:
- /* And clear the interrupt registers again for luck. */
- (void) serial_in(up, UART_LSR);
- (void) serial_in(up, UART_RX);
- (void) serial_in(up, UART_IIR);
- (void) serial_in(up, UART_MSR);
-
- up->running = 1;
- return 0;
-}
-
-static void serial_hsu_shutdown(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned long flags;
-
- /* Disable interrupts from this port */
- up->ier = 0;
- serial_out(up, UART_IER, 0);
- up->running = 0;
-
- spin_lock_irqsave(&up->port.lock, flags);
- up->port.mctrl &= ~TIOCM_OUT2;
- serial_hsu_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
-
- /* Disable break condition and FIFOs */
- serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT);
- serial_out(up, UART_FCR, 0);
-
- pm_runtime_put(up->dev);
-}
-
-static void
-serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- unsigned char cval, fcr = 0;
- unsigned long flags;
- unsigned int baud, quot;
- u32 ps, mul;
-
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- cval = UART_LCR_WLEN5;
- break;
- case CS6:
- cval = UART_LCR_WLEN6;
- break;
- case CS7:
- cval = UART_LCR_WLEN7;
- break;
- default:
- case CS8:
- cval = UART_LCR_WLEN8;
- break;
- }
-
- /* CMSPAR isn't supported by this driver */
- termios->c_cflag &= ~CMSPAR;
-
- if (termios->c_cflag & CSTOPB)
- cval |= UART_LCR_STOP;
- if (termios->c_cflag & PARENB)
- cval |= UART_LCR_PARITY;
- if (!(termios->c_cflag & PARODD))
- cval |= UART_LCR_EPAR;
-
- /*
- * The base clk is 50Mhz, and the baud rate come from:
- * baud = 50M * MUL / (DIV * PS * DLAB)
- *
- * For those basic low baud rate we can get the direct
- * scalar from 2746800, like 115200 = 2746800/24. For those
- * higher baud rate, we handle them case by case, mainly by
- * adjusting the MUL/PS registers, and DIV register is kept
- * as default value 0x3d09 to make things simple
- */
- baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
-
- quot = 1;
- ps = 0x10;
- mul = 0x3600;
- switch (baud) {
- case 3500000:
- mul = 0x3345;
- ps = 0xC;
- break;
- case 1843200:
- mul = 0x2400;
- break;
- case 3000000:
- case 2500000:
- case 2000000:
- case 1500000:
- case 1000000:
- case 500000:
- /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */
- mul = baud / 500000 * 0x9C4;
- break;
- default:
- /* Use uart_get_divisor to get quot for other baud rates */
- quot = 0;
- }
-
- if (!quot)
- quot = uart_get_divisor(port, baud);
-
- if ((up->port.uartclk / quot) < (2400 * 16))
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
- else if ((up->port.uartclk / quot) < (230400 * 16))
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
- else
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
-
- fcr |= UART_FCR_HSU_64B_FIFO;
-
- /*
- * Ok, we're now changing the port state. Do it with
- * interrupts disabled.
- */
- spin_lock_irqsave(&up->port.lock, flags);
-
- /* Update the per-port timeout */
- uart_update_timeout(port, termios->c_cflag, baud);
-
- up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (termios->c_iflag & INPCK)
- up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- up->port.read_status_mask |= UART_LSR_BI;
-
- /* Characters to ignore */
- up->port.ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
- if (termios->c_iflag & IGNBRK) {
- up->port.ignore_status_mask |= UART_LSR_BI;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns too (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- up->port.ignore_status_mask |= UART_LSR_OE;
- }
-
- /* Ignore all characters if CREAD is not set */
- if ((termios->c_cflag & CREAD) == 0)
- up->port.ignore_status_mask |= UART_LSR_DR;
-
- /*
- * CTS flow control flag and modem status interrupts, disable
- * MSI by default
- */
- up->ier &= ~UART_IER_MSI;
- if (UART_ENABLE_MS(&up->port, termios->c_cflag))
- up->ier |= UART_IER_MSI;
-
- serial_out(up, UART_IER, up->ier);
-
- if (termios->c_cflag & CRTSCTS)
- up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
- else
- up->mcr &= ~UART_MCR_AFE;
-
- serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
- serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
- serial_out(up, UART_LCR, cval); /* reset DLAB */
- serial_out(up, UART_MUL, mul); /* set MUL */
- serial_out(up, UART_PS, ps); /* set PS */
- up->lcr = cval; /* Save LCR */
- serial_hsu_set_mctrl(&up->port, up->port.mctrl);
- serial_out(up, UART_FCR, fcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-static void
-serial_hsu_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate)
-{
-}
-
-static void serial_hsu_release_port(struct uart_port *port)
-{
-}
-
-static int serial_hsu_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-static void serial_hsu_config_port(struct uart_port *port, int flags)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- up->port.type = PORT_MFD;
-}
-
-static int
-serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- /* We don't want the core code to modify any port params */
- return -EINVAL;
-}
-
-static const char *
-serial_hsu_type(struct uart_port *port)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
- return up->name;
-}
-
-/* Mainly for uart console use */
-static struct uart_hsu_port *serial_hsu_ports[3];
-static struct uart_driver serial_hsu_reg;
-
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-/* Wait for transmitter & holding register to empty */
-static inline void wait_for_xmitr(struct uart_hsu_port *up)
-{
- unsigned int status, tmout = 1000;
-
- /* Wait up to 1ms for the character to be sent. */
- do {
- status = serial_in(up, UART_LSR);
-
- if (status & UART_LSR_BI)
- up->lsr_break_flag = UART_LSR_BI;
-
- if (--tmout == 0)
- break;
- udelay(1);
- } while (!(status & BOTH_EMPTY));
-
- /* Wait up to 1s for flow control if necessary */
- if (up->port.flags & UPF_CONS_FLOW) {
- tmout = 1000000;
- while (--tmout &&
- ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
- udelay(1);
- }
-}
-
-static void serial_hsu_console_putchar(struct uart_port *port, int ch)
-{
- struct uart_hsu_port *up =
- container_of(port, struct uart_hsu_port, port);
-
- wait_for_xmitr(up);
- serial_out(up, UART_TX, ch);
-}
-
-/*
- * Print a string to the serial port trying not to disturb
- * any possible real use of the port...
- *
- * The console_lock must be held when we get here.
- */
-static void
-serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct uart_hsu_port *up = serial_hsu_ports[co->index];
- unsigned long flags;
- unsigned int ier;
- int locked = 1;
-
- touch_nmi_watchdog();
-
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
-
- /* First save the IER then disable the interrupts */
- ier = serial_in(up, UART_IER);
- serial_out(up, UART_IER, 0);
-
- uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
-
- /*
- * Finally, wait for transmitter to become empty
- * and restore the IER
- */
- wait_for_xmitr(up);
- serial_out(up, UART_IER, ier);
-
- if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
-}
-
-static struct console serial_hsu_console;
-
-static int __init
-serial_hsu_console_setup(struct console *co, char *options)
-{
- struct uart_hsu_port *up;
- int baud = 115200;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- if (co->index == -1 || co->index >= serial_hsu_reg.nr)
- co->index = 0;
- up = serial_hsu_ports[co->index];
- if (!up)
- return -ENODEV;
-
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
-
- return uart_set_options(&up->port, co, baud, parity, bits, flow);
-}
-
-static struct console serial_hsu_console = {
- .name = "ttyMFD",
- .write = serial_hsu_console_write,
- .device = uart_console_device,
- .setup = serial_hsu_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &serial_hsu_reg,
-};
-
-#define SERIAL_HSU_CONSOLE (&serial_hsu_console)
-#else
-#define SERIAL_HSU_CONSOLE NULL
-#endif
-
-static struct uart_ops serial_hsu_pops = {
- .tx_empty = serial_hsu_tx_empty,
- .set_mctrl = serial_hsu_set_mctrl,
- .get_mctrl = serial_hsu_get_mctrl,
- .stop_tx = serial_hsu_stop_tx,
- .start_tx = serial_hsu_start_tx,
- .stop_rx = serial_hsu_stop_rx,
- .enable_ms = serial_hsu_enable_ms,
- .break_ctl = serial_hsu_break_ctl,
- .startup = serial_hsu_startup,
- .shutdown = serial_hsu_shutdown,
- .set_termios = serial_hsu_set_termios,
- .pm = serial_hsu_pm,
- .type = serial_hsu_type,
- .release_port = serial_hsu_release_port,
- .request_port = serial_hsu_request_port,
- .config_port = serial_hsu_config_port,
- .verify_port = serial_hsu_verify_port,
-};
-
-static struct uart_driver serial_hsu_reg = {
- .owner = THIS_MODULE,
- .driver_name = "MFD serial",
- .dev_name = "ttyMFD",
- .major = TTY_MAJOR,
- .minor = 128,
- .nr = 3,
- .cons = SERIAL_HSU_CONSOLE,
-};
-
-#ifdef CONFIG_PM
-static int serial_hsu_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- void *priv = pci_get_drvdata(pdev);
- struct uart_hsu_port *up;
-
- /* Make sure this is not the internal dma controller */
- if (priv && (pdev->device != 0x081E)) {
- up = priv;
- uart_suspend_port(&serial_hsu_reg, &up->port);
- }
-
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
-}
-
-static int serial_hsu_resume(struct pci_dev *pdev)
-{
- void *priv = pci_get_drvdata(pdev);
- struct uart_hsu_port *up;
- int ret;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- ret = pci_enable_device(pdev);
- if (ret)
- dev_warn(&pdev->dev,
- "HSU: can't re-enable device, try to continue\n");
-
- if (priv && (pdev->device != 0x081E)) {
- up = priv;
- uart_resume_port(&serial_hsu_reg, &up->port);
- }
- return 0;
-}
-
-static int serial_hsu_runtime_idle(struct device *dev)
-{
- pm_schedule_suspend(dev, 500);
- return -EBUSY;
-}
-
-static int serial_hsu_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int serial_hsu_runtime_resume(struct device *dev)
-{
- return 0;
-}
-#else
-#define serial_hsu_suspend NULL
-#define serial_hsu_resume NULL
-#define serial_hsu_runtime_idle NULL
-#define serial_hsu_runtime_suspend NULL
-#define serial_hsu_runtime_resume NULL
-#endif
-
-static const struct dev_pm_ops serial_hsu_pm_ops = {
- .runtime_suspend = serial_hsu_runtime_suspend,
- .runtime_resume = serial_hsu_runtime_resume,
- .runtime_idle = serial_hsu_runtime_idle,
-};
-
-/* temp global pointer before we settle down on using one or four PCI dev */
-static struct hsu_port *phsu;
-
-static int serial_hsu_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct uart_hsu_port *uport;
- int index, ret;
-
- printk(KERN_INFO "HSU: found PCI Serial controller(ID: %04x:%04x)\n",
- pdev->vendor, pdev->device);
-
- switch (pdev->device) {
- case 0x081B:
- index = 0;
- break;
- case 0x081C:
- index = 1;
- break;
- case 0x081D:
- index = 2;
- break;
- case 0x081E:
- /* internal DMA controller */
- index = 3;
- break;
- default:
- dev_err(&pdev->dev, "HSU: out of index!");
- return -ENODEV;
- }
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- if (index == 3) {
- /* DMA controller */
- ret = request_irq(pdev->irq, dma_irq, 0, "hsu_dma", phsu);
- if (ret) {
- dev_err(&pdev->dev, "can not get IRQ\n");
- goto err_disable;
- }
- pci_set_drvdata(pdev, phsu);
- } else {
- /* UART port 0~2 */
- uport = &phsu->port[index];
- uport->port.irq = pdev->irq;
- uport->port.dev = &pdev->dev;
- uport->dev = &pdev->dev;
-
- ret = request_irq(pdev->irq, port_irq, 0, uport->name, uport);
- if (ret) {
- dev_err(&pdev->dev, "can not get IRQ\n");
- goto err_disable;
- }
- uart_add_one_port(&serial_hsu_reg, &uport->port);
-
- pci_set_drvdata(pdev, uport);
- }
-
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-
-err_disable:
- pci_disable_device(pdev);
- return ret;
-}
-
-static void hsu_global_init(void)
-{
- struct hsu_port *hsu;
- struct uart_hsu_port *uport;
- struct hsu_dma_chan *dchan;
- int i, ret;
-
- hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL);
- if (!hsu)
- return;
-
- /* Get basic io resource and map it */
- hsu->paddr = 0xffa28000;
- hsu->iolen = 0x1000;
-
- if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global")))
- pr_warn("HSU: error in request mem region\n");
-
- hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen);
- if (!hsu->reg) {
- pr_err("HSU: error in ioremap\n");
- ret = -ENOMEM;
- goto err_free_region;
- }
-
- /* Initialise the 3 UART ports */
- uport = hsu->port;
- for (i = 0; i < 3; i++) {
- uport->port.type = PORT_MFD;
- uport->port.iotype = UPIO_MEM;
- uport->port.mapbase = (resource_size_t)hsu->paddr
- + HSU_PORT_REG_OFFSET
- + i * HSU_PORT_REG_LENGTH;
- uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET
- + i * HSU_PORT_REG_LENGTH;
-
- sprintf(uport->name, "hsu_port%d", i);
- uport->port.fifosize = 64;
- uport->port.ops = &serial_hsu_pops;
- uport->port.line = i;
- uport->port.flags = UPF_IOREMAP;
- /* set the scalable maxim support rate to 2746800 bps */
- uport->port.uartclk = 115200 * 24 * 16;
-
- uport->running = 0;
- uport->txc = &hsu->chans[i * 2];
- uport->rxc = &hsu->chans[i * 2 + 1];
-
- serial_hsu_ports[i] = uport;
- uport->index = i;
-
- if (hsu_dma_enable & (1<<i))
- uport->use_dma = 1;
- else
- uport->use_dma = 0;
-
- uport++;
- }
-
- /* Initialise 6 dma channels */
- dchan = hsu->chans;
- for (i = 0; i < 6; i++) {
- dchan->id = i;
- dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- dchan->uport = &hsu->port[i/2];
- dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET +
- i * HSU_DMA_CHANS_REG_LENGTH;
-
- dchan++;
- }
-
- phsu = hsu;
- hsu_debugfs_init(hsu);
- return;
-
-err_free_region:
- release_mem_region(hsu->paddr, hsu->iolen);
- kfree(hsu);
- return;
-}
-
-static void serial_hsu_remove(struct pci_dev *pdev)
-{
- void *priv = pci_get_drvdata(pdev);
- struct uart_hsu_port *up;
-
- if (!priv)
- return;
-
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
-
- /* For port 0/1/2, priv is the address of uart_hsu_port */
- if (pdev->device != 0x081E) {
- up = priv;
- uart_remove_one_port(&serial_hsu_reg, &up->port);
- }
-
- free_irq(pdev->irq, priv);
- pci_disable_device(pdev);
-}
-
-/* First 3 are UART ports, and the 4th is the DMA */
-static const struct pci_device_id pci_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081E) },
- {},
-};
-
-static struct pci_driver hsu_pci_driver = {
- .name = "HSU serial",
- .id_table = pci_ids,
- .probe = serial_hsu_probe,
- .remove = serial_hsu_remove,
- .suspend = serial_hsu_suspend,
- .resume = serial_hsu_resume,
- .driver = {
- .pm = &serial_hsu_pm_ops,
- },
-};
-
-static int __init hsu_pci_init(void)
-{
- int ret;
-
- hsu_global_init();
-
- ret = uart_register_driver(&serial_hsu_reg);
- if (ret)
- return ret;
-
- return pci_register_driver(&hsu_pci_driver);
-}
-
-static void __exit hsu_pci_exit(void)
-{
- pci_unregister_driver(&hsu_pci_driver);
- uart_unregister_driver(&serial_hsu_reg);
-
- hsu_debugfs_remove(phsu);
-
- kfree(phsu);
-}
-
-module_init(hsu_pci_init);
-module_exit(hsu_pci_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 3308ef243dc3..1589f17c1fca 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1717,7 +1717,7 @@ static struct uart_driver mpc52xx_uart_driver = {
/* OF Platform Driver */
/* ======================================================================== */
-static struct of_device_id mpc52xx_uart_of_match[] = {
+static const struct of_device_id mpc52xx_uart_of_match[] = {
#ifdef CONFIG_PPC_MPC52xx
{ .compatible = "fsl,mpc5200b-psc-uart", .data = &mpc5200b_psc_ops, },
{ .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 3e1c7138d8cd..737f69fe7113 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -170,15 +170,6 @@ void msm_serial_set_mnd_regs_from_uartclk(struct uart_port *port)
msm_serial_set_mnd_regs_tcxoby4(port);
}
-/*
- * TROUT has a specific defect that makes it report it's uartclk
- * as 19.2Mhz (TCXO) when it's actually 4.8Mhz (TCXO/4). This special
- * cases TROUT to use the right clock.
- */
-#ifdef CONFIG_MACH_TROUT
-#define msm_serial_set_mnd_regs msm_serial_set_mnd_regs_tcxoby4
-#else
#define msm_serial_set_mnd_regs msm_serial_set_mnd_regs_from_uartclk
-#endif
#endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
deleted file mode 100644
index 62da8534ba75..000000000000
--- a/drivers/tty/serial/msm_serial_hs.c
+++ /dev/null
@@ -1,1874 +0,0 @@
-/*
- * MSM 7k/8k High speed uart driver
- *
- * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
- * Copyright (c) 2008 Google Inc.
- * Modified: Nick Pelly <npelly@google.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * Has optional support for uart power management independent of linux
- * suspend/resume:
- *
- * RX wakeup.
- * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
- * UART RX pin). This should only be used if there is not a wakeup
- * GPIO on the UART CTS, and the first RX byte is known (for example, with the
- * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
- * always be lost. RTS will be asserted even while the UART is off in this mode
- * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
- */
-
-#include <linux/module.h>
-
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-
-#include <linux/atomic.h>
-#include <asm/irq.h>
-
-#include <mach/hardware.h>
-#include <mach/dma.h>
-#include <linux/platform_data/msm_serial_hs.h>
-
-/* HSUART Registers */
-#define UARTDM_MR1_ADDR 0x0
-#define UARTDM_MR2_ADDR 0x4
-
-/* Data Mover result codes */
-#define RSLT_FIFO_CNTR_BMSK (0xE << 28)
-#define RSLT_VLD BIT(1)
-
-/* write only register */
-#define UARTDM_CSR_ADDR 0x8
-#define UARTDM_CSR_115200 0xFF
-#define UARTDM_CSR_57600 0xEE
-#define UARTDM_CSR_38400 0xDD
-#define UARTDM_CSR_28800 0xCC
-#define UARTDM_CSR_19200 0xBB
-#define UARTDM_CSR_14400 0xAA
-#define UARTDM_CSR_9600 0x99
-#define UARTDM_CSR_7200 0x88
-#define UARTDM_CSR_4800 0x77
-#define UARTDM_CSR_3600 0x66
-#define UARTDM_CSR_2400 0x55
-#define UARTDM_CSR_1200 0x44
-#define UARTDM_CSR_600 0x33
-#define UARTDM_CSR_300 0x22
-#define UARTDM_CSR_150 0x11
-#define UARTDM_CSR_75 0x00
-
-/* write only register */
-#define UARTDM_TF_ADDR 0x70
-#define UARTDM_TF2_ADDR 0x74
-#define UARTDM_TF3_ADDR 0x78
-#define UARTDM_TF4_ADDR 0x7C
-
-/* write only register */
-#define UARTDM_CR_ADDR 0x10
-#define UARTDM_IMR_ADDR 0x14
-
-#define UARTDM_IPR_ADDR 0x18
-#define UARTDM_TFWR_ADDR 0x1c
-#define UARTDM_RFWR_ADDR 0x20
-#define UARTDM_HCR_ADDR 0x24
-#define UARTDM_DMRX_ADDR 0x34
-#define UARTDM_IRDA_ADDR 0x38
-#define UARTDM_DMEN_ADDR 0x3c
-
-/* UART_DM_NO_CHARS_FOR_TX */
-#define UARTDM_NCF_TX_ADDR 0x40
-
-#define UARTDM_BADR_ADDR 0x44
-
-#define UARTDM_SIM_CFG_ADDR 0x80
-/* Read Only register */
-#define UARTDM_SR_ADDR 0x8
-
-/* Read Only register */
-#define UARTDM_RF_ADDR 0x70
-#define UARTDM_RF2_ADDR 0x74
-#define UARTDM_RF3_ADDR 0x78
-#define UARTDM_RF4_ADDR 0x7C
-
-/* Read Only register */
-#define UARTDM_MISR_ADDR 0x10
-
-/* Read Only register */
-#define UARTDM_ISR_ADDR 0x14
-#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38
-
-#define UARTDM_RXFS_ADDR 0x50
-
-/* Register field Mask Mapping */
-#define UARTDM_SR_PAR_FRAME_BMSK BIT(5)
-#define UARTDM_SR_OVERRUN_BMSK BIT(4)
-#define UARTDM_SR_TXEMT_BMSK BIT(3)
-#define UARTDM_SR_TXRDY_BMSK BIT(2)
-#define UARTDM_SR_RXRDY_BMSK BIT(0)
-
-#define UARTDM_CR_TX_DISABLE_BMSK BIT(3)
-#define UARTDM_CR_RX_DISABLE_BMSK BIT(1)
-#define UARTDM_CR_TX_EN_BMSK BIT(2)
-#define UARTDM_CR_RX_EN_BMSK BIT(0)
-
-/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */
-#define RESET_RX 0x10
-#define RESET_TX 0x20
-#define RESET_ERROR_STATUS 0x30
-#define RESET_BREAK_INT 0x40
-#define START_BREAK 0x50
-#define STOP_BREAK 0x60
-#define RESET_CTS 0x70
-#define RESET_STALE_INT 0x80
-#define RFR_LOW 0xD0
-#define RFR_HIGH 0xE0
-#define CR_PROTECTION_EN 0x100
-#define STALE_EVENT_ENABLE 0x500
-#define STALE_EVENT_DISABLE 0x600
-#define FORCE_STALE_EVENT 0x400
-#define CLEAR_TX_READY 0x300
-#define RESET_TX_ERROR 0x800
-#define RESET_TX_DONE 0x810
-
-#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00
-#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f
-#define UARTDM_MR1_CTS_CTL_BMSK 0x40
-#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80
-
-#define UARTDM_MR2_ERROR_MODE_BMSK 0x40
-#define UARTDM_MR2_BITS_PER_CHAR_BMSK 0x30
-
-/* bits per character configuration */
-#define FIVE_BPC (0 << 4)
-#define SIX_BPC (1 << 4)
-#define SEVEN_BPC (2 << 4)
-#define EIGHT_BPC (3 << 4)
-
-#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc
-#define STOP_BIT_ONE (1 << 2)
-#define STOP_BIT_TWO (3 << 2)
-
-#define UARTDM_MR2_PARITY_MODE_BMSK 0x3
-
-/* Parity configuration */
-#define NO_PARITY 0x0
-#define EVEN_PARITY 0x1
-#define ODD_PARITY 0x2
-#define SPACE_PARITY 0x3
-
-#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80
-#define UARTDM_IPR_STALE_LSB_BMSK 0x1f
-
-/* These can be used for both ISR and IMR register */
-#define UARTDM_ISR_TX_READY_BMSK BIT(7)
-#define UARTDM_ISR_CURRENT_CTS_BMSK BIT(6)
-#define UARTDM_ISR_DELTA_CTS_BMSK BIT(5)
-#define UARTDM_ISR_RXLEV_BMSK BIT(4)
-#define UARTDM_ISR_RXSTALE_BMSK BIT(3)
-#define UARTDM_ISR_RXBREAK_BMSK BIT(2)
-#define UARTDM_ISR_RXHUNT_BMSK BIT(1)
-#define UARTDM_ISR_TXLEV_BMSK BIT(0)
-
-/* Field definitions for UART_DM_DMEN*/
-#define UARTDM_TX_DM_EN_BMSK 0x1
-#define UARTDM_RX_DM_EN_BMSK 0x2
-
-#define UART_FIFOSIZE 64
-#define UARTCLK 7372800
-
-/* Rx DMA request states */
-enum flush_reason {
- FLUSH_NONE,
- FLUSH_DATA_READY,
- FLUSH_DATA_INVALID, /* values after this indicate invalid data */
- FLUSH_IGNORE = FLUSH_DATA_INVALID,
- FLUSH_STOP,
- FLUSH_SHUTDOWN,
-};
-
-/* UART clock states */
-enum msm_hs_clk_states_e {
- MSM_HS_CLK_PORT_OFF, /* port not in use */
- MSM_HS_CLK_OFF, /* clock disabled */
- MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */
- MSM_HS_CLK_ON, /* clock enabled */
-};
-
-/* Track the forced RXSTALE flush during clock off sequence.
- * These states are only valid during MSM_HS_CLK_REQUEST_OFF */
-enum msm_hs_clk_req_off_state_e {
- CLK_REQ_OFF_START,
- CLK_REQ_OFF_RXSTALE_ISSUED,
- CLK_REQ_OFF_FLUSH_ISSUED,
- CLK_REQ_OFF_RXSTALE_FLUSHED,
-};
-
-/**
- * struct msm_hs_tx
- * @tx_ready_int_en: ok to dma more tx?
- * @dma_in_flight: tx dma in progress
- * @xfer: top level DMA command pointer structure
- * @command_ptr: third level command struct pointer
- * @command_ptr_ptr: second level command list struct pointer
- * @mapped_cmd_ptr: DMA view of third level command struct
- * @mapped_cmd_ptr_ptr: DMA view of second level command list struct
- * @tx_count: number of bytes to transfer in DMA transfer
- * @dma_base: DMA view of UART xmit buffer
- *
- * This structure describes a single Tx DMA transaction. MSM DMA
- * commands have two levels of indirection. The top level command
- * ptr points to a list of command ptr which in turn points to a
- * single DMA 'command'. In our case each Tx transaction consists
- * of a single second level pointer pointing to a 'box type' command.
- */
-struct msm_hs_tx {
- unsigned int tx_ready_int_en;
- unsigned int dma_in_flight;
- struct msm_dmov_cmd xfer;
- dmov_box *command_ptr;
- u32 *command_ptr_ptr;
- dma_addr_t mapped_cmd_ptr;
- dma_addr_t mapped_cmd_ptr_ptr;
- int tx_count;
- dma_addr_t dma_base;
-};
-
-/**
- * struct msm_hs_rx
- * @flush: Rx DMA request state
- * @xfer: top level DMA command pointer structure
- * @cmdptr_dmaaddr: DMA view of second level command structure
- * @command_ptr: third level DMA command pointer structure
- * @command_ptr_ptr: second level DMA command list pointer
- * @mapped_cmd_ptr: DMA view of the third level command structure
- * @wait: wait for DMA completion before shutdown
- * @buffer: destination buffer for RX DMA
- * @rbuffer: DMA view of buffer
- * @pool: dma pool out of which coherent rx buffer is allocated
- * @tty_work: private work-queue for tty flip buffer push task
- *
- * This structure describes a single Rx DMA transaction. Rx DMA
- * transactions use box mode DMA commands.
- */
-struct msm_hs_rx {
- enum flush_reason flush;
- struct msm_dmov_cmd xfer;
- dma_addr_t cmdptr_dmaaddr;
- dmov_box *command_ptr;
- u32 *command_ptr_ptr;
- dma_addr_t mapped_cmd_ptr;
- wait_queue_head_t wait;
- dma_addr_t rbuffer;
- unsigned char *buffer;
- struct dma_pool *pool;
- struct work_struct tty_work;
-};
-
-/**
- * struct msm_hs_rx_wakeup
- * @irq: IRQ line to be configured as interrupt source on Rx activity
- * @ignore: boolean value. 1 = ignore the wakeup interrupt
- * @rx_to_inject: extra character to be inserted to Rx tty on wakeup
- * @inject_rx: 1 = insert rx_to_inject. 0 = do not insert extra character
- *
- * This is an optional structure required for UART Rx GPIO IRQ based
- * wakeup from low power state. UART wakeup can be triggered by RX activity
- * (using a wakeup GPIO on the UART RX pin). This should only be used if
- * there is not a wakeup GPIO on the UART CTS, and the first RX byte is
- * known (eg., with the Bluetooth Texas Instruments HCILL protocol),
- * since the first RX byte will always be lost. RTS will be asserted even
- * while the UART is clocked off in this mode of operation.
- */
-struct msm_hs_rx_wakeup {
- int irq; /* < 0 indicates low power wakeup disabled */
- unsigned char ignore;
- unsigned char inject_rx;
- char rx_to_inject;
-};
-
-/**
- * struct msm_hs_port
- * @uport: embedded uart port structure
- * @imr_reg: shadow value of UARTDM_IMR
- * @clk: uart input clock handle
- * @tx: Tx transaction related data structure
- * @rx: Rx transaction related data structure
- * @dma_tx_channel: Tx DMA command channel
- * @dma_rx_channel Rx DMA command channel
- * @dma_tx_crci: Tx channel rate control interface number
- * @dma_rx_crci: Rx channel rate control interface number
- * @clk_off_timer: Timer to poll DMA event completion before clock off
- * @clk_off_delay: clk_off_timer poll interval
- * @clk_state: overall clock state
- * @clk_req_off_state: post flush clock states
- * @rx_wakeup: optional rx_wakeup feature related data
- * @exit_lpm_cb: optional callback to exit low power mode
- *
- * Low level serial port structure.
- */
-struct msm_hs_port {
- struct uart_port uport;
- unsigned long imr_reg;
- struct clk *clk;
- struct msm_hs_tx tx;
- struct msm_hs_rx rx;
-
- int dma_tx_channel;
- int dma_rx_channel;
- int dma_tx_crci;
- int dma_rx_crci;
-
- struct hrtimer clk_off_timer;
- ktime_t clk_off_delay;
- enum msm_hs_clk_states_e clk_state;
- enum msm_hs_clk_req_off_state_e clk_req_off_state;
-
- struct msm_hs_rx_wakeup rx_wakeup;
- void (*exit_lpm_cb)(struct uart_port *);
-};
-
-#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
-#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
-#define UARTDM_RX_BUF_SIZE 512
-
-#define UARTDM_NR 2
-
-static struct msm_hs_port q_uart_port[UARTDM_NR];
-static struct platform_driver msm_serial_hs_platform_driver;
-static struct uart_driver msm_hs_driver;
-static struct uart_ops msm_hs_ops;
-static struct workqueue_struct *msm_hs_workqueue;
-
-#define UARTDM_TO_MSM(uart_port) \
- container_of((uart_port), struct msm_hs_port, uport)
-
-static unsigned int use_low_power_rx_wakeup(struct msm_hs_port
- *msm_uport)
-{
- return (msm_uport->rx_wakeup.irq >= 0);
-}
-
-static unsigned int msm_hs_read(struct uart_port *uport,
- unsigned int offset)
-{
- return ioread32(uport->membase + offset);
-}
-
-static void msm_hs_write(struct uart_port *uport, unsigned int offset,
- unsigned int value)
-{
- iowrite32(value, uport->membase + offset);
-}
-
-static void msm_hs_release_port(struct uart_port *port)
-{
- iounmap(port->membase);
-}
-
-static int msm_hs_request_port(struct uart_port *port)
-{
- port->membase = ioremap(port->mapbase, PAGE_SIZE);
- if (unlikely(!port->membase))
- return -ENOMEM;
-
- /* configure the CR Protection to Enable */
- msm_hs_write(port, UARTDM_CR_ADDR, CR_PROTECTION_EN);
- return 0;
-}
-
-static int msm_hs_remove(struct platform_device *pdev)
-{
-
- struct msm_hs_port *msm_uport;
- struct device *dev;
-
- if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
- printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
- return -EINVAL;
- }
-
- msm_uport = &q_uart_port[pdev->id];
- dev = msm_uport->uport.dev;
-
- dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box),
- DMA_TO_DEVICE);
- dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
- msm_uport->rx.rbuffer);
- dma_pool_destroy(msm_uport->rx.pool);
-
- dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32),
- DMA_TO_DEVICE);
- dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32),
- DMA_TO_DEVICE);
- dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
- DMA_TO_DEVICE);
-
- uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
- clk_put(msm_uport->clk);
-
- /* Free the tx resources */
- kfree(msm_uport->tx.command_ptr);
- kfree(msm_uport->tx.command_ptr_ptr);
-
- /* Free the rx resources */
- kfree(msm_uport->rx.command_ptr);
- kfree(msm_uport->rx.command_ptr_ptr);
-
- iounmap(msm_uport->uport.membase);
-
- return 0;
-}
-
-static int msm_hs_init_clk_locked(struct uart_port *uport)
-{
- int ret;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- ret = clk_enable(msm_uport->clk);
- if (ret) {
- printk(KERN_ERR "Error could not turn on UART clk\n");
- return ret;
- }
-
- /* Set up the MREG/NREG/DREG/MNDREG */
- ret = clk_set_rate(msm_uport->clk, uport->uartclk);
- if (ret) {
- printk(KERN_WARNING "Error setting clock rate on UART\n");
- clk_disable(msm_uport->clk);
- return ret;
- }
-
- msm_uport->clk_state = MSM_HS_CLK_ON;
- return 0;
-}
-
-/* Enable and Disable clocks (Used for power management) */
-static void msm_hs_pm(struct uart_port *uport, unsigned int state,
- unsigned int oldstate)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- if (use_low_power_rx_wakeup(msm_uport) ||
- msm_uport->exit_lpm_cb)
- return; /* ignore linux PM states,
- use msm_hs_request_clock API */
-
- switch (state) {
- case 0:
- clk_enable(msm_uport->clk);
- break;
- case 3:
- clk_disable(msm_uport->clk);
- break;
- default:
- dev_err(uport->dev, "msm_serial: Unknown PM state %d\n",
- state);
- }
-}
-
-/*
- * programs the UARTDM_CSR register with correct bit rates
- *
- * Interrupts should be disabled before we are called, as
- * we modify Set Baud rate
- * Set receive stale interrupt level, dependent on Bit Rate
- * Goal is to have around 8 ms before indicate stale.
- * roundup (((Bit Rate * .008) / 10) + 1
- */
-static void msm_hs_set_bps_locked(struct uart_port *uport,
- unsigned int bps)
-{
- unsigned long rxstale;
- unsigned long data;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- switch (bps) {
- case 300:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_75);
- rxstale = 1;
- break;
- case 600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_150);
- rxstale = 1;
- break;
- case 1200:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_300);
- rxstale = 1;
- break;
- case 2400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_600);
- rxstale = 1;
- break;
- case 4800:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_1200);
- rxstale = 1;
- break;
- case 9600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400);
- rxstale = 2;
- break;
- case 14400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_3600);
- rxstale = 3;
- break;
- case 19200:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_4800);
- rxstale = 4;
- break;
- case 28800:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_7200);
- rxstale = 6;
- break;
- case 38400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_9600);
- rxstale = 8;
- break;
- case 57600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_14400);
- rxstale = 16;
- break;
- case 76800:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_19200);
- rxstale = 16;
- break;
- case 115200:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_28800);
- rxstale = 31;
- break;
- case 230400:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_57600);
- rxstale = 31;
- break;
- case 460800:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200);
- rxstale = 31;
- break;
- case 4000000:
- case 3686400:
- case 3200000:
- case 3500000:
- case 3000000:
- case 2500000:
- case 1500000:
- case 1152000:
- case 1000000:
- case 921600:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200);
- rxstale = 31;
- break;
- default:
- msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400);
- /* default to 9600 */
- bps = 9600;
- rxstale = 2;
- break;
- }
- if (bps > 460800)
- uport->uartclk = bps * 16;
- else
- uport->uartclk = UARTCLK;
-
- if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
- printk(KERN_WARNING "Error setting clock rate on UART\n");
- return;
- }
-
- data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
- data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
-
- msm_hs_write(uport, UARTDM_IPR_ADDR, data);
-}
-
-/*
- * termios : new ktermios
- * oldtermios: old ktermios previous setting
- *
- * Configure the serial port
- */
-static void msm_hs_set_termios(struct uart_port *uport,
- struct ktermios *termios,
- struct ktermios *oldtermios)
-{
- unsigned int bps;
- unsigned long data;
- unsigned long flags;
- unsigned int c_cflag = termios->c_cflag;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- spin_lock_irqsave(&uport->lock, flags);
- clk_enable(msm_uport->clk);
-
- /* 300 is the minimum baud support by the driver */
- bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
-
- /* Temporary remapping 200 BAUD to 3.2 mbps */
- if (bps == 200)
- bps = 3200000;
-
- msm_hs_set_bps_locked(uport, bps);
-
- data = msm_hs_read(uport, UARTDM_MR2_ADDR);
- data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
- /* set parity */
- if (PARENB == (c_cflag & PARENB)) {
- if (PARODD == (c_cflag & PARODD))
- data |= ODD_PARITY;
- else if (CMSPAR == (c_cflag & CMSPAR))
- data |= SPACE_PARITY;
- else
- data |= EVEN_PARITY;
- }
-
- /* Set bits per char */
- data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
-
- switch (c_cflag & CSIZE) {
- case CS5:
- data |= FIVE_BPC;
- break;
- case CS6:
- data |= SIX_BPC;
- break;
- case CS7:
- data |= SEVEN_BPC;
- break;
- default:
- data |= EIGHT_BPC;
- break;
- }
- /* stop bits */
- if (c_cflag & CSTOPB) {
- data |= STOP_BIT_TWO;
- } else {
- /* otherwise 1 stop bit */
- data |= STOP_BIT_ONE;
- }
- data |= UARTDM_MR2_ERROR_MODE_BMSK;
- /* write parity/bits per char/stop bit configuration */
- msm_hs_write(uport, UARTDM_MR2_ADDR, data);
-
- /* Configure HW flow control */
- data = msm_hs_read(uport, UARTDM_MR1_ADDR);
-
- data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
-
- if (c_cflag & CRTSCTS) {
- data |= UARTDM_MR1_CTS_CTL_BMSK;
- data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
- }
-
- msm_hs_write(uport, UARTDM_MR1_ADDR, data);
-
- uport->ignore_status_mask = termios->c_iflag & INPCK;
- uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
- uport->read_status_mask = (termios->c_cflag & CREAD);
-
- msm_hs_write(uport, UARTDM_IMR_ADDR, 0);
-
- /* Set Transmit software time out */
- uart_update_timeout(uport, c_cflag, bps);
-
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
-
- if (msm_uport->rx.flush == FLUSH_NONE) {
- msm_uport->rx.flush = FLUSH_IGNORE;
- msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1);
- }
-
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
-
- clk_disable(msm_uport->clk);
- spin_unlock_irqrestore(&uport->lock, flags);
-}
-
-/*
- * Standard API, Transmitter
- * Any character in the transmit shift register is sent
- */
-static unsigned int msm_hs_tx_empty(struct uart_port *uport)
-{
- unsigned int data;
- unsigned int ret = 0;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- clk_enable(msm_uport->clk);
-
- data = msm_hs_read(uport, UARTDM_SR_ADDR);
- if (data & UARTDM_SR_TXEMT_BMSK)
- ret = TIOCSER_TEMT;
-
- clk_disable(msm_uport->clk);
-
- return ret;
-}
-
-/*
- * Standard API, Stop transmitter.
- * Any character in the transmit shift register is sent as
- * well as the current data mover transfer .
- */
-static void msm_hs_stop_tx_locked(struct uart_port *uport)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- msm_uport->tx.tx_ready_int_en = 0;
-}
-
-/*
- * Standard API, Stop receiver as soon as possible.
- *
- * Function immediately terminates the operation of the
- * channel receiver and any incoming characters are lost. None
- * of the receiver status bits are affected by this command and
- * characters that are already in the receive FIFO there.
- */
-static void msm_hs_stop_rx_locked(struct uart_port *uport)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- unsigned int data;
-
- clk_enable(msm_uport->clk);
-
- /* disable dlink */
- data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
- data &= ~UARTDM_RX_DM_EN_BMSK;
- msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
-
- /* Disable the receiver */
- if (msm_uport->rx.flush == FLUSH_NONE)
- msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1);
-
- if (msm_uport->rx.flush != FLUSH_SHUTDOWN)
- msm_uport->rx.flush = FLUSH_STOP;
-
- clk_disable(msm_uport->clk);
-}
-
-/* Transmit the next chunk of data */
-static void msm_hs_submit_tx_locked(struct uart_port *uport)
-{
- int left;
- int tx_count;
- dma_addr_t src_addr;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- struct msm_hs_tx *tx = &msm_uport->tx;
- struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
-
- if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
- msm_hs_stop_tx_locked(uport);
- return;
- }
-
- tx->dma_in_flight = 1;
-
- tx_count = uart_circ_chars_pending(tx_buf);
-
- if (UARTDM_TX_BUF_SIZE < tx_count)
- tx_count = UARTDM_TX_BUF_SIZE;
-
- left = UART_XMIT_SIZE - tx_buf->tail;
-
- if (tx_count > left)
- tx_count = left;
-
- src_addr = tx->dma_base + tx_buf->tail;
- dma_sync_single_for_device(uport->dev, src_addr, tx_count,
- DMA_TO_DEVICE);
-
- tx->command_ptr->num_rows = (((tx_count + 15) >> 4) << 16) |
- ((tx_count + 15) >> 4);
- tx->command_ptr->src_row_addr = src_addr;
-
- dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr,
- sizeof(dmov_box), DMA_TO_DEVICE);
-
- *tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
-
- dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
- sizeof(u32), DMA_TO_DEVICE);
-
- /* Save tx_count to use in Callback */
- tx->tx_count = tx_count;
- msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count);
-
- /* Disable the tx_ready interrupt */
- msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
- msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer);
-}
-
-/* Start to receive the next chunk of data */
-static void msm_hs_start_rx_locked(struct uart_port *uport)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
- msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
- msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
- msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
-
- msm_uport->rx.flush = FLUSH_NONE;
- msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel, &msm_uport->rx.xfer);
-
- /* might have finished RX and be ready to clock off */
- hrtimer_start(&msm_uport->clk_off_timer, msm_uport->clk_off_delay,
- HRTIMER_MODE_REL);
-}
-
-/* Enable the transmitter Interrupt */
-static void msm_hs_start_tx_locked(struct uart_port *uport)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- clk_enable(msm_uport->clk);
-
- if (msm_uport->exit_lpm_cb)
- msm_uport->exit_lpm_cb(uport);
-
- if (msm_uport->tx.tx_ready_int_en == 0) {
- msm_uport->tx.tx_ready_int_en = 1;
- msm_hs_submit_tx_locked(uport);
- }
-
- clk_disable(msm_uport->clk);
-}
-
-/*
- * This routine is called when we are done with a DMA transfer
- *
- * This routine is registered with Data mover when we set
- * up a Data Mover transfer. It is called from Data mover ISR
- * when the DMA transfer is done.
- */
-static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr,
- unsigned int result,
- struct msm_dmov_errdata *err)
-{
- unsigned long flags;
- struct msm_hs_port *msm_uport;
-
- /* DMA did not finish properly */
- WARN_ON((((result & RSLT_FIFO_CNTR_BMSK) >> 28) == 1) &&
- !(result & RSLT_VLD));
-
- msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer);
-
- spin_lock_irqsave(&msm_uport->uport.lock, flags);
- clk_enable(msm_uport->clk);
-
- msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
- msm_hs_write(&msm_uport->uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
-
- clk_disable(msm_uport->clk);
- spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
-}
-
-/*
- * This routine is called when we are done with a DMA transfer or the
- * a flush has been sent to the data mover driver.
- *
- * This routine is registered with Data mover when we set up a Data Mover
- * transfer. It is called from Data mover ISR when the DMA transfer is done.
- */
-static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
- unsigned int result,
- struct msm_dmov_errdata *err)
-{
- int retval;
- int rx_count;
- unsigned long status;
- unsigned int error_f = 0;
- unsigned long flags;
- unsigned int flush;
- struct tty_port *port;
- struct uart_port *uport;
- struct msm_hs_port *msm_uport;
-
- msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer);
- uport = &msm_uport->uport;
-
- spin_lock_irqsave(&uport->lock, flags);
- clk_enable(msm_uport->clk);
-
- port = &uport->state->port;
-
- msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
-
- status = msm_hs_read(uport, UARTDM_SR_ADDR);
-
- /* overflow is not connect to data in a FIFO */
- if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
- (uport->read_status_mask & CREAD))) {
- tty_insert_flip_char(port, 0, TTY_OVERRUN);
- uport->icount.buf_overrun++;
- error_f = 1;
- }
-
- if (!(uport->ignore_status_mask & INPCK))
- status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
-
- if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
- /* Can not tell difference between parity & frame error */
- uport->icount.parity++;
- error_f = 1;
- if (uport->ignore_status_mask & IGNPAR)
- tty_insert_flip_char(port, 0, TTY_PARITY);
- }
-
- if (error_f)
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
-
- if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
- msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
-
- flush = msm_uport->rx.flush;
- if (flush == FLUSH_IGNORE)
- msm_hs_start_rx_locked(uport);
- if (flush == FLUSH_STOP)
- msm_uport->rx.flush = FLUSH_SHUTDOWN;
- if (flush >= FLUSH_DATA_INVALID)
- goto out;
-
- rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
-
- if (0 != (uport->read_status_mask & CREAD)) {
- retval = tty_insert_flip_string(port, msm_uport->rx.buffer,
- rx_count);
- BUG_ON(retval != rx_count);
- }
-
- msm_hs_start_rx_locked(uport);
-
-out:
- clk_disable(msm_uport->clk);
-
- spin_unlock_irqrestore(&uport->lock, flags);
-
- if (flush < FLUSH_DATA_INVALID)
- queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
-}
-
-static void msm_hs_tty_flip_buffer_work(struct work_struct *work)
-{
- struct msm_hs_port *msm_uport =
- container_of(work, struct msm_hs_port, rx.tty_work);
-
- tty_flip_buffer_push(&msm_uport->uport.state->port);
-}
-
-/*
- * Standard API, Current states of modem control inputs
- *
- * Since CTS can be handled entirely by HARDWARE we always
- * indicate clear to send and count on the TX FIFO to block when
- * it fills up.
- *
- * - TIOCM_DCD
- * - TIOCM_CTS
- * - TIOCM_DSR
- * - TIOCM_RI
- * (Unsupported) DCD and DSR will return them high. RI will return low.
- */
-static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
-{
- return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
-}
-
-/*
- * True enables UART auto RFR, which indicates we are ready for data if the RX
- * buffer is not full. False disables auto RFR, and deasserts RFR to indicate
- * we are not ready for data. Must be called with UART clock on.
- */
-static void set_rfr_locked(struct uart_port *uport, int auto_rfr)
-{
- unsigned int data;
-
- data = msm_hs_read(uport, UARTDM_MR1_ADDR);
-
- if (auto_rfr) {
- /* enable auto ready-for-receiving */
- data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
- msm_hs_write(uport, UARTDM_MR1_ADDR, data);
- } else {
- /* disable auto ready-for-receiving */
- data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
- msm_hs_write(uport, UARTDM_MR1_ADDR, data);
- /* RFR is active low, set high */
- msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
- }
-}
-
-/*
- * Standard API, used to set or clear RFR
- */
-static void msm_hs_set_mctrl_locked(struct uart_port *uport,
- unsigned int mctrl)
-{
- unsigned int auto_rfr;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- clk_enable(msm_uport->clk);
-
- auto_rfr = TIOCM_RTS & mctrl ? 1 : 0;
- set_rfr_locked(uport, auto_rfr);
-
- clk_disable(msm_uport->clk);
-}
-
-/* Standard API, Enable modem status (CTS) interrupt */
-static void msm_hs_enable_ms_locked(struct uart_port *uport)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- clk_enable(msm_uport->clk);
-
- /* Enable DELTA_CTS Interrupt */
- msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
-
- clk_disable(msm_uport->clk);
-
-}
-
-/*
- * Standard API, Break Signal
- *
- * Control the transmission of a break signal. ctl eq 0 => break
- * signal terminate ctl ne 0 => start break signal
- */
-static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- clk_enable(msm_uport->clk);
- msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK);
- clk_disable(msm_uport->clk);
-}
-
-static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&uport->lock, flags);
- if (cfg_flags & UART_CONFIG_TYPE) {
- uport->type = PORT_MSM;
- msm_hs_request_port(uport);
- }
- spin_unlock_irqrestore(&uport->lock, flags);
-}
-
-/* Handle CTS changes (Called from interrupt handler) */
-static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- clk_enable(msm_uport->clk);
-
- /* clear interrupt */
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
- uport->icount.cts++;
-
- clk_disable(msm_uport->clk);
-
- /* clear the IOCTL TIOCMIWAIT if called */
- wake_up_interruptible(&uport->state->port.delta_msr_wait);
-}
-
-/* check if the TX path is flushed, and if so clock off
- * returns 0 did not clock off, need to retry (still sending final byte)
- * -1 did not clock off, do not retry
- * 1 if we clocked off
- */
-static int msm_hs_check_clock_off_locked(struct uart_port *uport)
-{
- unsigned long sr_status;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- struct circ_buf *tx_buf = &uport->state->xmit;
-
- /* Cancel if tx tty buffer is not empty, dma is in flight,
- * or tx fifo is not empty, or rx fifo is not empty */
- if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
- !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
- (msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) ||
- !(msm_uport->imr_reg & UARTDM_ISR_RXLEV_BMSK)) {
- return -1;
- }
-
- /* Make sure the uart is finished with the last byte */
- sr_status = msm_hs_read(uport, UARTDM_SR_ADDR);
- if (!(sr_status & UARTDM_SR_TXEMT_BMSK))
- return 0; /* retry */
-
- /* Make sure forced RXSTALE flush complete */
- switch (msm_uport->clk_req_off_state) {
- case CLK_REQ_OFF_START:
- msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
- msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
- return 0; /* RXSTALE flush not complete - retry */
- case CLK_REQ_OFF_RXSTALE_ISSUED:
- case CLK_REQ_OFF_FLUSH_ISSUED:
- return 0; /* RXSTALE flush not complete - retry */
- case CLK_REQ_OFF_RXSTALE_FLUSHED:
- break; /* continue */
- }
-
- if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
- if (msm_uport->rx.flush == FLUSH_NONE)
- msm_hs_stop_rx_locked(uport);
- return 0; /* come back later to really clock off */
- }
-
- /* we really want to clock off */
- clk_disable(msm_uport->clk);
- msm_uport->clk_state = MSM_HS_CLK_OFF;
-
- if (use_low_power_rx_wakeup(msm_uport)) {
- msm_uport->rx_wakeup.ignore = 1;
- enable_irq(msm_uport->rx_wakeup.irq);
- }
- return 1;
-}
-
-static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer)
-{
- unsigned long flags;
- int ret = HRTIMER_NORESTART;
- struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port,
- clk_off_timer);
- struct uart_port *uport = &msm_uport->uport;
-
- spin_lock_irqsave(&uport->lock, flags);
-
- if (!msm_hs_check_clock_off_locked(uport)) {
- hrtimer_forward_now(timer, msm_uport->clk_off_delay);
- ret = HRTIMER_RESTART;
- }
-
- spin_unlock_irqrestore(&uport->lock, flags);
-
- return ret;
-}
-
-static irqreturn_t msm_hs_isr(int irq, void *dev)
-{
- unsigned long flags;
- unsigned long isr_status;
- struct msm_hs_port *msm_uport = dev;
- struct uart_port *uport = &msm_uport->uport;
- struct circ_buf *tx_buf = &uport->state->xmit;
- struct msm_hs_tx *tx = &msm_uport->tx;
- struct msm_hs_rx *rx = &msm_uport->rx;
-
- spin_lock_irqsave(&uport->lock, flags);
-
- isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR);
-
- /* Uart RX starting */
- if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
- msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
- }
- /* Stale rx interrupt */
- if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
- msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
-
- if (msm_uport->clk_req_off_state == CLK_REQ_OFF_RXSTALE_ISSUED)
- msm_uport->clk_req_off_state =
- CLK_REQ_OFF_FLUSH_ISSUED;
- if (rx->flush == FLUSH_NONE) {
- rx->flush = FLUSH_DATA_READY;
- msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1);
- }
- }
- /* tx ready interrupt */
- if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
- /* Clear TX Ready */
- msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY);
-
- if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
- msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR,
- msm_uport->imr_reg);
- }
-
- /* Complete DMA TX transactions and submit new transactions */
- tx_buf->tail = (tx_buf->tail + tx->tx_count) & ~UART_XMIT_SIZE;
-
- tx->dma_in_flight = 0;
-
- uport->icount.tx += tx->tx_count;
- if (tx->tx_ready_int_en)
- msm_hs_submit_tx_locked(uport);
-
- if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
- uart_write_wakeup(uport);
- }
- if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
- /* TX FIFO is empty */
- msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
- if (!msm_hs_check_clock_off_locked(uport))
- hrtimer_start(&msm_uport->clk_off_timer,
- msm_uport->clk_off_delay,
- HRTIMER_MODE_REL);
- }
-
- /* Change in CTS interrupt */
- if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
- msm_hs_handle_delta_cts_locked(uport);
-
- spin_unlock_irqrestore(&uport->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-void msm_hs_request_clock_off_locked(struct uart_port *uport)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- if (msm_uport->clk_state == MSM_HS_CLK_ON) {
- msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
- msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
- if (!use_low_power_rx_wakeup(msm_uport))
- set_rfr_locked(uport, 0);
- msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
- }
-}
-
-/**
- * msm_hs_request_clock_off - request to (i.e. asynchronously) turn off uart
- * clock once pending TX is flushed and Rx DMA command is terminated.
- * @uport: uart_port structure for the device instance.
- *
- * This functions puts the device into a partially active low power mode. It
- * waits to complete all pending tx transactions, flushes ongoing Rx DMA
- * command and terminates UART side Rx transaction, puts UART HW in non DMA
- * mode and then clocks off the device. A client calls this when no UART
- * data is expected. msm_request_clock_on() must be called before any further
- * UART can be sent or received.
- */
-void msm_hs_request_clock_off(struct uart_port *uport)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&uport->lock, flags);
- msm_hs_request_clock_off_locked(uport);
- spin_unlock_irqrestore(&uport->lock, flags);
-}
-
-void msm_hs_request_clock_on_locked(struct uart_port *uport)
-{
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- unsigned int data;
-
- switch (msm_uport->clk_state) {
- case MSM_HS_CLK_OFF:
- clk_enable(msm_uport->clk);
- disable_irq_nosync(msm_uport->rx_wakeup.irq);
- /* fall-through */
- case MSM_HS_CLK_REQUEST_OFF:
- if (msm_uport->rx.flush == FLUSH_STOP ||
- msm_uport->rx.flush == FLUSH_SHUTDOWN) {
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
- data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
- data |= UARTDM_RX_DM_EN_BMSK;
- msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
- }
- hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
- if (msm_uport->rx.flush == FLUSH_SHUTDOWN)
- msm_hs_start_rx_locked(uport);
- if (!use_low_power_rx_wakeup(msm_uport))
- set_rfr_locked(uport, 1);
- if (msm_uport->rx.flush == FLUSH_STOP)
- msm_uport->rx.flush = FLUSH_IGNORE;
- msm_uport->clk_state = MSM_HS_CLK_ON;
- break;
- case MSM_HS_CLK_ON:
- break;
- case MSM_HS_CLK_PORT_OFF:
- break;
- }
-}
-
-/**
- * msm_hs_request_clock_on - Switch the device from partially active low
- * power mode to fully active (i.e. clock on) mode.
- * @uport: uart_port structure for the device.
- *
- * This function switches on the input clock, puts UART HW into DMA mode
- * and enqueues an Rx DMA command if the device was in partially active
- * mode. It has no effect if called with the device in inactive state.
- */
-void msm_hs_request_clock_on(struct uart_port *uport)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&uport->lock, flags);
- msm_hs_request_clock_on_locked(uport);
- spin_unlock_irqrestore(&uport->lock, flags);
-}
-
-static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev)
-{
- unsigned int wakeup = 0;
- unsigned long flags;
- struct msm_hs_port *msm_uport = dev;
- struct uart_port *uport = &msm_uport->uport;
-
- spin_lock_irqsave(&uport->lock, flags);
- if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
- /* ignore the first irq - it is a pending irq that occurred
- * before enable_irq() */
- if (msm_uport->rx_wakeup.ignore)
- msm_uport->rx_wakeup.ignore = 0;
- else
- wakeup = 1;
- }
-
- if (wakeup) {
- /* the uart was clocked off during an rx, wake up and
- * optionally inject char into tty rx */
- msm_hs_request_clock_on_locked(uport);
- if (msm_uport->rx_wakeup.inject_rx) {
- tty_insert_flip_char(&uport->state->port,
- msm_uport->rx_wakeup.rx_to_inject,
- TTY_NORMAL);
- queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
- }
- }
-
- spin_unlock_irqrestore(&uport->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-static const char *msm_hs_type(struct uart_port *port)
-{
- return (port->type == PORT_MSM) ? "MSM_HS_UART" : NULL;
-}
-
-/* Called when port is opened */
-static int msm_hs_startup(struct uart_port *uport)
-{
- int ret;
- int rfr_level;
- unsigned long flags;
- unsigned int data;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- struct circ_buf *tx_buf = &uport->state->xmit;
- struct msm_hs_tx *tx = &msm_uport->tx;
- struct msm_hs_rx *rx = &msm_uport->rx;
-
- rfr_level = uport->fifosize;
- if (rfr_level > 16)
- rfr_level -= 16;
-
- tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
- DMA_TO_DEVICE);
-
- /* do not let tty layer execute RX in global workqueue, use a
- * dedicated workqueue managed by this driver */
- uport->state->port.low_latency = 1;
-
- /* turn on uart clk */
- ret = msm_hs_init_clk_locked(uport);
- if (unlikely(ret)) {
- printk(KERN_ERR "Turning uartclk failed!\n");
- goto err_msm_hs_init_clk;
- }
-
- /* Set auto RFR Level */
- data = msm_hs_read(uport, UARTDM_MR1_ADDR);
- data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
- data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
- data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
- data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
- msm_hs_write(uport, UARTDM_MR1_ADDR, data);
-
- /* Make sure RXSTALE count is non-zero */
- data = msm_hs_read(uport, UARTDM_IPR_ADDR);
- if (!data) {
- data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
- msm_hs_write(uport, UARTDM_IPR_ADDR, data);
- }
-
- /* Enable Data Mover Mode */
- data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK;
- msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
-
- /* Reset TX */
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
- msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
- msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW);
- /* Turn on Uart Receiver */
- msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK);
-
- /* Turn on Uart Transmitter */
- msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK);
-
- /* Initialize the tx */
- tx->tx_ready_int_en = 0;
- tx->dma_in_flight = 0;
-
- tx->xfer.complete_func = msm_hs_dmov_tx_callback;
- tx->xfer.execute_func = NULL;
-
- tx->command_ptr->cmd = CMD_LC |
- CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX;
-
- tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
- | (MSM_UARTDM_BURST_SIZE);
-
- tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16);
-
- tx->command_ptr->dst_row_addr =
- msm_uport->uport.mapbase + UARTDM_TF_ADDR;
-
-
- /* Turn on Uart Receive */
- rx->xfer.complete_func = msm_hs_dmov_rx_callback;
- rx->xfer.execute_func = NULL;
-
- rx->command_ptr->cmd = CMD_LC |
- CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX;
-
- rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
- | (MSM_UARTDM_BURST_SIZE);
- rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE;
- rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR;
-
-
- msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
- /* Enable reading the current CTS, no harm even if CTS is ignored */
- msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
-
- msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */
-
-
- ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
- "msm_hs_uart", msm_uport);
- if (unlikely(ret)) {
- printk(KERN_ERR "Request msm_hs_uart IRQ failed!\n");
- goto err_request_irq;
- }
- if (use_low_power_rx_wakeup(msm_uport)) {
- ret = request_irq(msm_uport->rx_wakeup.irq,
- msm_hs_rx_wakeup_isr,
- IRQF_TRIGGER_FALLING,
- "msm_hs_rx_wakeup", msm_uport);
- if (unlikely(ret)) {
- printk(KERN_ERR "Request msm_hs_rx_wakeup IRQ failed!\n");
- free_irq(uport->irq, msm_uport);
- goto err_request_irq;
- }
- disable_irq(msm_uport->rx_wakeup.irq);
- }
-
- spin_lock_irqsave(&uport->lock, flags);
-
- msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
- msm_hs_start_rx_locked(uport);
-
- spin_unlock_irqrestore(&uport->lock, flags);
- ret = pm_runtime_set_active(uport->dev);
- if (ret)
- dev_err(uport->dev, "set active error:%d\n", ret);
- pm_runtime_enable(uport->dev);
-
- return 0;
-
-err_request_irq:
-err_msm_hs_init_clk:
- dma_unmap_single(uport->dev, tx->dma_base,
- UART_XMIT_SIZE, DMA_TO_DEVICE);
- return ret;
-}
-
-/* Initialize tx and rx data structures */
-static int uartdm_init_port(struct uart_port *uport)
-{
- int ret = 0;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- struct msm_hs_tx *tx = &msm_uport->tx;
- struct msm_hs_rx *rx = &msm_uport->rx;
-
- /* Allocate the command pointer. Needs to be 64 bit aligned */
- tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
- if (!tx->command_ptr)
- return -ENOMEM;
-
- tx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
- if (!tx->command_ptr_ptr) {
- ret = -ENOMEM;
- goto err_tx_command_ptr_ptr;
- }
-
- tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr,
- sizeof(dmov_box), DMA_TO_DEVICE);
- tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
- tx->command_ptr_ptr,
- sizeof(u32), DMA_TO_DEVICE);
- tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
-
- init_waitqueue_head(&rx->wait);
-
- rx->pool = dma_pool_create("rx_buffer_pool", uport->dev,
- UARTDM_RX_BUF_SIZE, 16, 0);
- if (!rx->pool) {
- pr_err("%s(): cannot allocate rx_buffer_pool", __func__);
- ret = -ENOMEM;
- goto err_dma_pool_create;
- }
-
- rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer);
- if (!rx->buffer) {
- pr_err("%s(): cannot allocate rx->buffer", __func__);
- ret = -ENOMEM;
- goto err_dma_pool_alloc;
- }
-
- /* Allocate the command pointer. Needs to be 64 bit aligned */
- rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
- if (!rx->command_ptr) {
- pr_err("%s(): cannot allocate rx->command_ptr", __func__);
- ret = -ENOMEM;
- goto err_rx_command_ptr;
- }
-
- rx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
- if (!rx->command_ptr_ptr) {
- pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
- ret = -ENOMEM;
- goto err_rx_command_ptr_ptr;
- }
-
- rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) |
- (UARTDM_RX_BUF_SIZE >> 4);
-
- rx->command_ptr->dst_row_addr = rx->rbuffer;
-
- rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr,
- sizeof(dmov_box), DMA_TO_DEVICE);
-
- *rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
-
- rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
- sizeof(u32), DMA_TO_DEVICE);
- rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
-
- INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work);
-
- return ret;
-
-err_rx_command_ptr_ptr:
- kfree(rx->command_ptr);
-err_rx_command_ptr:
- dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
- msm_uport->rx.rbuffer);
-err_dma_pool_alloc:
- dma_pool_destroy(msm_uport->rx.pool);
-err_dma_pool_create:
- dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
- sizeof(u32), DMA_TO_DEVICE);
- dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
- sizeof(dmov_box), DMA_TO_DEVICE);
- kfree(msm_uport->tx.command_ptr_ptr);
-err_tx_command_ptr_ptr:
- kfree(msm_uport->tx.command_ptr);
- return ret;
-}
-
-static int msm_hs_probe(struct platform_device *pdev)
-{
- int ret;
- struct uart_port *uport;
- struct msm_hs_port *msm_uport;
- struct resource *resource;
- const struct msm_serial_hs_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
-
- if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
- printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
- return -EINVAL;
- }
-
- msm_uport = &q_uart_port[pdev->id];
- uport = &msm_uport->uport;
-
- uport->dev = &pdev->dev;
-
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!resource))
- return -ENXIO;
-
- uport->mapbase = resource->start;
- uport->irq = platform_get_irq(pdev, 0);
- if (unlikely(uport->irq < 0))
- return -ENXIO;
-
- if (unlikely(irq_set_irq_wake(uport->irq, 1)))
- return -ENXIO;
-
- if (pdata == NULL || pdata->rx_wakeup_irq < 0)
- msm_uport->rx_wakeup.irq = -1;
- else {
- msm_uport->rx_wakeup.irq = pdata->rx_wakeup_irq;
- msm_uport->rx_wakeup.ignore = 1;
- msm_uport->rx_wakeup.inject_rx = pdata->inject_rx_on_wakeup;
- msm_uport->rx_wakeup.rx_to_inject = pdata->rx_to_inject;
-
- if (unlikely(msm_uport->rx_wakeup.irq < 0))
- return -ENXIO;
-
- if (unlikely(irq_set_irq_wake(msm_uport->rx_wakeup.irq, 1)))
- return -ENXIO;
- }
-
- if (pdata == NULL)
- msm_uport->exit_lpm_cb = NULL;
- else
- msm_uport->exit_lpm_cb = pdata->exit_lpm_cb;
-
- resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- "uartdm_channels");
- if (unlikely(!resource))
- return -ENXIO;
-
- msm_uport->dma_tx_channel = resource->start;
- msm_uport->dma_rx_channel = resource->end;
-
- resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- "uartdm_crci");
- if (unlikely(!resource))
- return -ENXIO;
-
- msm_uport->dma_tx_crci = resource->start;
- msm_uport->dma_rx_crci = resource->end;
-
- uport->iotype = UPIO_MEM;
- uport->fifosize = UART_FIFOSIZE;
- uport->ops = &msm_hs_ops;
- uport->flags = UPF_BOOT_AUTOCONF;
- uport->uartclk = UARTCLK;
- msm_uport->imr_reg = 0x0;
- msm_uport->clk = clk_get(&pdev->dev, "uartdm_clk");
- if (IS_ERR(msm_uport->clk))
- return PTR_ERR(msm_uport->clk);
-
- ret = uartdm_init_port(uport);
- if (unlikely(ret))
- return ret;
-
- msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
- hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
- msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */
-
- uport->line = pdev->id;
- return uart_add_one_port(&msm_hs_driver, uport);
-}
-
-static int __init msm_serial_hs_init(void)
-{
- int ret, i;
-
- /* Init all UARTS as non-configured */
- for (i = 0; i < UARTDM_NR; i++)
- q_uart_port[i].uport.type = PORT_UNKNOWN;
-
- msm_hs_workqueue = create_singlethread_workqueue("msm_serial_hs");
- if (unlikely(!msm_hs_workqueue))
- return -ENOMEM;
-
- ret = uart_register_driver(&msm_hs_driver);
- if (unlikely(ret)) {
- printk(KERN_ERR "%s failed to load\n", __func__);
- goto err_uart_register_driver;
- }
-
- ret = platform_driver_register(&msm_serial_hs_platform_driver);
- if (ret) {
- printk(KERN_ERR "%s failed to load\n", __func__);
- goto err_platform_driver_register;
- }
-
- return ret;
-
-err_platform_driver_register:
- uart_unregister_driver(&msm_hs_driver);
-err_uart_register_driver:
- destroy_workqueue(msm_hs_workqueue);
- return ret;
-}
-module_init(msm_serial_hs_init);
-
-/*
- * Called by the upper layer when port is closed.
- * - Disables the port
- * - Unhook the ISR
- */
-static void msm_hs_shutdown(struct uart_port *uport)
-{
- unsigned long flags;
- struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
- BUG_ON(msm_uport->rx.flush < FLUSH_STOP);
-
- spin_lock_irqsave(&uport->lock, flags);
- clk_enable(msm_uport->clk);
-
- /* Disable the transmitter */
- msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
- /* Disable the receiver */
- msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK);
-
- pm_runtime_disable(uport->dev);
- pm_runtime_set_suspended(uport->dev);
-
- /* Free the interrupt */
- free_irq(uport->irq, msm_uport);
- if (use_low_power_rx_wakeup(msm_uport))
- free_irq(msm_uport->rx_wakeup.irq, msm_uport);
-
- msm_uport->imr_reg = 0;
- msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
-
- wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
-
- clk_disable(msm_uport->clk); /* to balance local clk_enable() */
- if (msm_uport->clk_state != MSM_HS_CLK_OFF)
- clk_disable(msm_uport->clk); /* to balance clk_state */
- msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
-
- dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
- UART_XMIT_SIZE, DMA_TO_DEVICE);
-
- spin_unlock_irqrestore(&uport->lock, flags);
-
- if (cancel_work_sync(&msm_uport->rx.tty_work))
- msm_hs_tty_flip_buffer_work(&msm_uport->rx.tty_work);
-}
-
-static void __exit msm_serial_hs_exit(void)
-{
- flush_workqueue(msm_hs_workqueue);
- destroy_workqueue(msm_hs_workqueue);
- platform_driver_unregister(&msm_serial_hs_platform_driver);
- uart_unregister_driver(&msm_hs_driver);
-}
-module_exit(msm_serial_hs_exit);
-
-#ifdef CONFIG_PM
-static int msm_hs_runtime_idle(struct device *dev)
-{
- /*
- * returning success from idle results in runtime suspend to be
- * called
- */
- return 0;
-}
-
-static int msm_hs_runtime_resume(struct device *dev)
-{
- struct platform_device *pdev = container_of(dev, struct
- platform_device, dev);
- struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
-
- msm_hs_request_clock_on(&msm_uport->uport);
- return 0;
-}
-
-static int msm_hs_runtime_suspend(struct device *dev)
-{
- struct platform_device *pdev = container_of(dev, struct
- platform_device, dev);
- struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
-
- msm_hs_request_clock_off(&msm_uport->uport);
- return 0;
-}
-#else
-#define msm_hs_runtime_idle NULL
-#define msm_hs_runtime_resume NULL
-#define msm_hs_runtime_suspend NULL
-#endif
-
-static const struct dev_pm_ops msm_hs_dev_pm_ops = {
- .runtime_suspend = msm_hs_runtime_suspend,
- .runtime_resume = msm_hs_runtime_resume,
- .runtime_idle = msm_hs_runtime_idle,
-};
-
-static struct platform_driver msm_serial_hs_platform_driver = {
- .probe = msm_hs_probe,
- .remove = msm_hs_remove,
- .driver = {
- .name = "msm_serial_hs",
- .pm = &msm_hs_dev_pm_ops,
- },
-};
-
-static struct uart_driver msm_hs_driver = {
- .owner = THIS_MODULE,
- .driver_name = "msm_serial_hs",
- .dev_name = "ttyHS",
- .nr = UARTDM_NR,
- .cons = 0,
-};
-
-static struct uart_ops msm_hs_ops = {
- .tx_empty = msm_hs_tx_empty,
- .set_mctrl = msm_hs_set_mctrl_locked,
- .get_mctrl = msm_hs_get_mctrl_locked,
- .stop_tx = msm_hs_stop_tx_locked,
- .start_tx = msm_hs_start_tx_locked,
- .stop_rx = msm_hs_stop_rx_locked,
- .enable_ms = msm_hs_enable_ms_locked,
- .break_ctl = msm_hs_break_ctl,
- .startup = msm_hs_startup,
- .shutdown = msm_hs_shutdown,
- .set_termios = msm_hs_set_termios,
- .pm = msm_hs_pm,
- .type = msm_hs_type,
- .config_port = msm_hs_config_port,
- .release_port = msm_hs_release_port,
- .request_port = msm_hs_request_port,
-};
-
-MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
-MODULE_VERSION("1.2");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index d1298b6cc68e..f7e5825b55ab 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -176,7 +176,7 @@ static struct platform_device_id mxs_auart_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, mxs_auart_devtype);
-static struct of_device_id mxs_auart_dt_ids[] = {
+static const struct of_device_id mxs_auart_dt_ids[] = {
{
.compatible = "fsl,imx28-auart",
.data = &mxs_auart_devtype[IMX28_AUART]
@@ -1155,14 +1155,14 @@ static int serial_mxs_probe_dt(struct mxs_auart_port *s,
return 0;
}
-static bool mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev)
+static int mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev)
{
enum mctrl_gpio_idx i;
struct gpio_desc *gpiod;
s->gpios = mctrl_gpio_init(dev, 0);
- if (IS_ERR_OR_NULL(s->gpios))
- return false;
+ if (IS_ERR(s->gpios))
+ return PTR_ERR(s->gpios);
/* Block (enabled before) DMA option if RTS or CTS is GPIO line */
if (!RTS_AT_AUART() || !CTS_AT_AUART()) {
@@ -1180,7 +1180,7 @@ static bool mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev)
s->gpio_irq[i] = -EINVAL;
}
- return true;
+ return 0;
}
static void mxs_auart_free_gpio_irq(struct mxs_auart_port *s)
@@ -1276,9 +1276,11 @@ static int mxs_auart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, s);
- if (!mxs_auart_init_gpios(s, &pdev->dev))
- dev_err(&pdev->dev,
- "Failed to initialize GPIOs. The serial port may not work as expected\n");
+ ret = mxs_auart_init_gpios(s, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize GPIOs.\n");
+ return ret;
+ }
/*
* Get the GPIO lines IRQ
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 33fb94f78967..137381e649e5 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -89,6 +89,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
spin_lock_init(&port->lock);
port->mapbase = resource.start;
+ port->mapsize = resource_size(&resource);
/* Check for shifted address mapping */
if (of_property_read_u32(np, "reg-offset", &prop) == 0)
@@ -115,7 +116,8 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->iotype = UPIO_MEM;
break;
case 4:
- port->iotype = UPIO_MEM32;
+ port->iotype = of_device_is_big_endian(np) ?
+ UPIO_MEM32BE : UPIO_MEM32;
break;
default:
dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n",
@@ -155,7 +157,7 @@ out:
/*
* Try to register a serial port
*/
-static struct of_device_id of_platform_serial_table[];
+static const struct of_device_id of_platform_serial_table[];
static int of_platform_serial_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
@@ -320,7 +322,7 @@ static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume);
/*
* A few common types, add more as needed.
*/
-static struct of_device_id of_platform_serial_table[] = {
+static const struct of_device_id of_platform_serial_table[] = {
{ .compatible = "ns8250", .data = (void *)PORT_8250, },
{ .compatible = "ns16450", .data = (void *)PORT_16450, },
{ .compatible = "ns16550a", .data = (void *)PORT_16550A, },
@@ -344,7 +346,6 @@ static struct of_device_id of_platform_serial_table[] = {
{ .compatible = "ibm,qpace-nwp-serial",
.data = (void *)PORT_NWPSERIAL, },
#endif
- { .type = "serial", .data = (void *)PORT_UNKNOWN, },
{ /* end of list */ },
};
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 10256fa04b40..7f49172ccd86 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1654,11 +1654,6 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.type = PORT_OMAP;
up->port.iotype = UPIO_MEM;
up->port.irq = uartirq;
- up->wakeirq = wakeirq;
- if (!up->wakeirq)
- dev_info(up->port.dev, "no wakeirq for uart%d\n",
- up->port.line);
-
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
@@ -1682,6 +1677,11 @@ static int serial_omap_probe(struct platform_device *pdev)
goto err_port_line;
}
+ up->wakeirq = wakeirq;
+ if (!up->wakeirq)
+ dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ up->port.line);
+
ret = serial_omap_probe_rs485(up, pdev->dev.of_node);
if (ret < 0)
goto err_rs485;
@@ -1735,6 +1735,8 @@ static int serial_omap_probe(struct platform_device *pdev)
err_add_port:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_qos_remove_request(&up->pm_qos_request);
+ device_init_wakeup(up->dev, false);
err_rs485:
err_port_line:
return ret;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 8f515799c9c1..e156e39d620c 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1846,7 +1846,7 @@ static int __init pmz_register(void)
#ifdef CONFIG_PPC_PMAC
-static struct of_device_id pmz_match[] =
+static const struct of_device_id pmz_match[] =
{
{
.name = "ch-a",
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index d5d062694bd3..9becba654892 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -824,7 +824,7 @@ static const struct dev_pm_ops serial_pxa_pm_ops = {
};
#endif
-static struct of_device_id serial_pxa_dt_ids[] = {
+static const struct of_device_id serial_pxa_dt_ids[] = {
{ .compatible = "mrvl,pxa-uart", },
{ .compatible = "mrvl,mmp-uart", },
{}
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index cf08876922f1..a0ae942d9562 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1068,8 +1068,9 @@ static int s3c64xx_serial_startup(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
ufcon = rd_regl(port, S3C2410_UFCON);
- ufcon |= S3C2410_UFCON_RESETRX | S3C2410_UFCON_RESETTX |
- S5PV210_UFCON_RXTRIG8;
+ ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
+ if (!uart_console(port))
+ ufcon |= S3C2410_UFCON_RESETTX;
wr_regl(port, S3C2410_UFCON, ufcon);
enable_rx_pio(ourport);
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index df9a384dfbda..468354ef7baa 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -829,16 +829,32 @@ static void sc16is7xx_set_termios(struct uart_port *port,
}
static int sc16is7xx_config_rs485(struct uart_port *port,
- struct serial_rs485 *rs485)
+ struct serial_rs485 *rs485)
{
- if (port->rs485.flags & SER_RS485_ENABLED)
- sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
- SC16IS7XX_EFCR_AUTO_RS485_BIT,
- SC16IS7XX_EFCR_AUTO_RS485_BIT);
- else
- sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
- SC16IS7XX_EFCR_AUTO_RS485_BIT,
- 0);
+ const u32 mask = SC16IS7XX_EFCR_AUTO_RS485_BIT |
+ SC16IS7XX_EFCR_RTS_INVERT_BIT;
+ u32 efcr = 0;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ bool rts_during_rx, rts_during_tx;
+
+ rts_during_rx = rs485->flags & SER_RS485_RTS_AFTER_SEND;
+ rts_during_tx = rs485->flags & SER_RS485_RTS_ON_SEND;
+
+ efcr |= SC16IS7XX_EFCR_AUTO_RS485_BIT;
+
+ if (!rts_during_rx && rts_during_tx)
+ /* default */;
+ else if (rts_during_rx && !rts_during_tx)
+ efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
+ else
+ dev_err(port->dev,
+ "unsupported RTS signalling on_send:%d after_send:%d - exactly one of RS485 RTS flags should be set\n",
+ rts_during_tx, rts_during_rx);
+ }
+
+ sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
+
port->rs485 = *rs485;
return 0;
@@ -903,9 +919,11 @@ static void sc16is7xx_shutdown(struct uart_port *port)
/* Disable all interrupts */
sc16is7xx_port_write(port, SC16IS7XX_IER_REG, 0);
/* Disable TX/RX */
- sc16is7xx_port_write(port, SC16IS7XX_EFCR_REG,
- SC16IS7XX_EFCR_RXDISABLE_BIT |
- SC16IS7XX_EFCR_TXDISABLE_BIT);
+ sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
+ SC16IS7XX_EFCR_RXDISABLE_BIT |
+ SC16IS7XX_EFCR_TXDISABLE_BIT,
+ SC16IS7XX_EFCR_RXDISABLE_BIT |
+ SC16IS7XX_EFCR_TXDISABLE_BIT);
sc16is7xx_power(port, 0);
}
@@ -1048,6 +1066,7 @@ static int sc16is7xx_probe(struct device *dev,
else
return PTR_ERR(s->clk);
} else {
+ clk_prepare_enable(s->clk);
freq = clk_get_rate(s->clk);
}
@@ -1120,6 +1139,9 @@ static int sc16is7xx_probe(struct device *dev,
if (!ret)
return 0;
+ for (i = 0; i < s->uart.nr; i++)
+ uart_remove_one_port(&s->uart, &s->p[i].port);
+
mutex_destroy(&s->mutex);
#ifdef CONFIG_GPIOLIB
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 48e6e41636b2..1d5ea3964ee5 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1251,7 +1251,7 @@ static struct tegra_uart_chip_data tegra30_uart_chip_data = {
.support_clk_src_div = true,
};
-static struct of_device_id tegra_uart_of_match[] = {
+static const struct of_device_id tegra_uart_of_match[] = {
{
.compatible = "nvidia,tegra30-hsuart",
.data = &tegra30_uart_chip_data,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 6a1055ae3437..0b7bb12dfc68 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1118,8 +1118,7 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg)
cprev = cnow;
}
-
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&port->delta_msr_wait, &wait);
return ret;
@@ -1766,12 +1765,12 @@ static const struct file_operations uart_proc_fops = {
#endif
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
-/*
+/**
* uart_console_write - write a console message to a serial port
* @port: the port to write the message
* @s: array of characters
* @count: number of characters in string to write
- * @write: function to write character to port
+ * @putchar: function to write character to port
*/
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
@@ -1810,6 +1809,52 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
}
/**
+ * uart_parse_earlycon - Parse earlycon options
+ * @p: ptr to 2nd field (ie., just beyond '<name>,')
+ * @iotype: ptr for decoded iotype (out)
+ * @addr: ptr for decoded mapbase/iobase (out)
+ * @options: ptr for <options> field; NULL if not present (out)
+ *
+ * Decodes earlycon kernel command line parameters of the form
+ * earlycon=<name>,io|mmio|mmio32,<addr>,<options>
+ * console=<name>,io|mmio|mmio32,<addr>,<options>
+ *
+ * The optional form
+ * earlycon=<name>,0x<addr>,<options>
+ * console=<name>,0x<addr>,<options>
+ * is also accepted; the returned @iotype will be UPIO_MEM.
+ *
+ * Returns 0 on success or -EINVAL on failure
+ */
+int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
+ char **options)
+{
+ if (strncmp(p, "mmio,", 5) == 0) {
+ *iotype = UPIO_MEM;
+ p += 5;
+ } else if (strncmp(p, "mmio32,", 7) == 0) {
+ *iotype = UPIO_MEM32;
+ p += 7;
+ } else if (strncmp(p, "io,", 3) == 0) {
+ *iotype = UPIO_PORT;
+ p += 3;
+ } else if (strncmp(p, "0x", 2) == 0) {
+ *iotype = UPIO_MEM;
+ } else {
+ return -EINVAL;
+ }
+
+ *addr = simple_strtoul(p, NULL, 0);
+ p = strchr(p, ',');
+ if (p)
+ p++;
+
+ *options = p;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uart_parse_earlycon);
+
+/**
* uart_parse_options - Parse serial port baud/parity/bits/flow control.
* @options: pointer to option string
* @baud: pointer to an 'int' variable for the baud rate.
@@ -2637,6 +2682,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
state->pm_state = UART_PM_STATE_UNDEFINED;
uport->cons = drv->cons;
+ uport->minor = drv->tty_driver->minor_start + uport->line;
/*
* If this port is a console, then the spinlock is already
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index a38596c5194e..0ec756c62bcf 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -48,9 +48,6 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
int value_array[UART_GPIO_MAX];
unsigned int count = 0;
- if (IS_ERR_OR_NULL(gpios))
- return;
-
for (i = 0; i < UART_GPIO_MAX; i++)
if (!IS_ERR_OR_NULL(gpios->gpio[i]) &&
mctrl_gpios_desc[i].dir_out) {
@@ -65,10 +62,7 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
- if (!IS_ERR_OR_NULL(gpios) && !IS_ERR_OR_NULL(gpios->gpio[gidx]))
- return gpios->gpio[gidx];
- else
- return NULL;
+ return gpios->gpio[gidx];
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
@@ -76,15 +70,8 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
{
enum mctrl_gpio_idx i;
- /*
- * return it unchanged if the structure is not allocated
- */
- if (IS_ERR_OR_NULL(gpios))
- return *mctrl;
-
for (i = 0; i < UART_GPIO_MAX; i++) {
- if (!IS_ERR_OR_NULL(gpios->gpio[i]) &&
- !mctrl_gpios_desc[i].dir_out) {
+ if (gpios->gpio[i] && !mctrl_gpios_desc[i].dir_out) {
if (gpiod_get_value(gpios->gpio[i]))
*mctrl |= mctrl_gpios_desc[i].mctrl;
else
@@ -100,34 +87,26 @@ struct mctrl_gpios *mctrl_gpio_init(struct device *dev, unsigned int idx)
{
struct mctrl_gpios *gpios;
enum mctrl_gpio_idx i;
- int err;
gpios = devm_kzalloc(dev, sizeof(*gpios), GFP_KERNEL);
if (!gpios)
return ERR_PTR(-ENOMEM);
for (i = 0; i < UART_GPIO_MAX; i++) {
- gpios->gpio[i] = devm_gpiod_get_index(dev,
- mctrl_gpios_desc[i].name,
- idx);
-
- /*
- * The GPIOs are maybe not all filled,
- * this is not an error.
- */
- if (IS_ERR_OR_NULL(gpios->gpio[i]))
- continue;
+ enum gpiod_flags flags;
if (mctrl_gpios_desc[i].dir_out)
- err = gpiod_direction_output(gpios->gpio[i], 0);
+ flags = GPIOD_OUT_LOW;
else
- err = gpiod_direction_input(gpios->gpio[i]);
- if (err) {
- dev_dbg(dev, "Unable to set direction for %s GPIO",
- mctrl_gpios_desc[i].name);
- devm_gpiod_put(dev, gpios->gpio[i]);
- gpios->gpio[i] = NULL;
- }
+ flags = GPIOD_IN;
+
+ gpios->gpio[i] =
+ devm_gpiod_get_index_optional(dev,
+ mctrl_gpios_desc[i].name,
+ idx, flags);
+
+ if (IS_ERR(gpios->gpio[i]))
+ return ERR_CAST(gpios->gpio[i]);
}
return gpios;
@@ -138,9 +117,6 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
- if (IS_ERR_OR_NULL(gpios))
- return;
-
for (i = 0; i < UART_GPIO_MAX; i++)
if (!IS_ERR_OR_NULL(gpios->gpio[i]))
devm_gpiod_put(dev, gpios->gpio[i]);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 5b50c792ad5f..e7d6566fafaf 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -844,14 +844,32 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
struct plat_sci_reg *reg;
- int copied = 0;
+ int copied = 0, offset;
+ u16 status, bit;
+
+ switch (port->type) {
+ case PORT_SCIF:
+ case PORT_HSCIF:
+ offset = SCLSR;
+ break;
+ case PORT_SCIFA:
+ case PORT_SCIFB:
+ offset = SCxSR;
+ break;
+ default:
+ return 0;
+ }
- reg = sci_getreg(port, SCLSR);
+ reg = sci_getreg(port, offset);
if (!reg->size)
return 0;
- if ((serial_port_in(port, SCLSR) & (1 << s->overrun_bit))) {
- serial_port_out(port, SCLSR, 0);
+ status = serial_port_in(port, offset);
+ bit = 1 << s->overrun_bit;
+
+ if (status & bit) {
+ status &= ~bit;
+ serial_port_out(port, offset, status);
port->icount.overrun++;
@@ -996,16 +1014,24 @@ static inline unsigned long port_rx_irq_mask(struct uart_port *port)
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
{
- unsigned short ssr_status, scr_status, err_enabled;
- unsigned short slr_status = 0;
+ unsigned short ssr_status, scr_status, err_enabled, orer_status = 0;
struct uart_port *port = ptr;
struct sci_port *s = to_sci_port(port);
irqreturn_t ret = IRQ_NONE;
ssr_status = serial_port_in(port, SCxSR);
scr_status = serial_port_in(port, SCSCR);
- if (port->type == PORT_SCIF || port->type == PORT_HSCIF)
- slr_status = serial_port_in(port, SCLSR);
+ switch (port->type) {
+ case PORT_SCIF:
+ case PORT_HSCIF:
+ orer_status = serial_port_in(port, SCLSR);
+ break;
+ case PORT_SCIFA:
+ case PORT_SCIFB:
+ orer_status = ssr_status;
+ break;
+ }
+
err_enabled = scr_status & port_rx_irq_mask(port);
/* Tx Interrupt */
@@ -1033,10 +1059,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
ret = sci_br_interrupt(irq, ptr);
/* Overrun Interrupt */
- if (port->type == PORT_SCIF || port->type == PORT_HSCIF) {
- if (slr_status & 0x01)
- sci_handle_fifo_overrun(port);
- }
+ if (orer_status & (1 << s->overrun_bit))
+ sci_handle_fifo_overrun(port);
return ret;
}
@@ -1967,18 +1991,40 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
#ifdef CONFIG_SERIAL_SH_SCI_DMA
/*
- * Calculate delay for 1.5 DMA buffers: see
- * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
- * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
+ * Calculate delay for 2 DMA buffers (4 FIFO).
+ * See drivers/serial/serial_core.c::uart_update_timeout(). With 10
+ * bits (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
* calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
- * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
- * sizes), but it has been found out experimentally, that this is not
- * enough: the driver too often needlessly runs on a DMA timeout. 20ms
- * as a minimum seem to work perfectly.
+ * Then below we calculate 5 jiffies (20ms) for 2 DMA buffers (4 FIFO
+ * sizes), but when performing a faster transfer, value obtained by
+ * this formula is may not enough. Therefore, if value is smaller than
+ * 20msec, this sets 20msec as timeout of DMA.
*/
if (s->chan_rx) {
- s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
- port->fifosize / 2;
+ unsigned int bits;
+
+ /* byte size and parity */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ bits = 7;
+ break;
+ case CS6:
+ bits = 8;
+ break;
+ case CS7:
+ bits = 9;
+ break;
+ default:
+ bits = 10;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ bits++;
+ if (termios->c_cflag & PARENB)
+ bits++;
+ s->rx_timeout = DIV_ROUND_UP((s->buf_len_rx * 2 * bits * HZ) /
+ (baud / 10), 10);
dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
s->rx_timeout * 1000 / HZ, port->timeout);
if (s->rx_timeout < msecs_to_jiffies(20))
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 27ed0e960880..9de3eabe5737 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -1269,7 +1269,7 @@ static struct uart_driver sirfsoc_uart_drv = {
#endif
};
-static struct of_device_id sirfsoc_uart_ids[] = {
+static const struct of_device_id sirfsoc_uart_ids[] = {
{ .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
{ .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart},
{ .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index bca975f5093b..582d2729f700 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -493,6 +493,8 @@ static int sprd_verify_port(struct uart_port *port,
return -EINVAL;
if (port->irq != ser->irq)
return -EINVAL;
+ if (port->iotype != ser->io_type)
+ return -EINVAL;
return 0;
}
@@ -707,7 +709,7 @@ static int sprd_probe(struct platform_device *pdev)
up->dev = &pdev->dev;
up->line = index;
up->type = PORT_SPRD;
- up->iotype = SERIAL_IO_PORT;
+ up->iotype = UPIO_MEM;
up->uartclk = SPRD_DEF_RATE;
up->fifosize = SPRD_FIFO_SIZE;
up->ops = &serial_sprd_ops;
@@ -754,6 +756,7 @@ static int sprd_probe(struct platform_device *pdev)
return ret;
}
+#ifdef CONFIG_PM_SLEEP
static int sprd_suspend(struct device *dev)
{
struct sprd_uart_port *sup = dev_get_drvdata(dev);
@@ -771,6 +774,7 @@ static int sprd_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(sprd_pm_ops, sprd_suspend, sprd_resume);
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 712b03a076b8..d625664ce1b5 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -720,7 +720,7 @@ static struct asc_port *asc_of_get_asc_port(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id asc_match[] = {
+static const struct of_device_id asc_match[] = {
{ .compatible = "st,asc", },
{},
};
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 189f52e3111f..b1c6bd3d483f 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -622,7 +622,7 @@ static int ulite_release(struct device *dev)
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
-static struct of_device_id ulite_of_match[] = {
+static const struct of_device_id ulite_of_match[] = {
{ .compatible = "xlnx,opb-uartlite-1.00.b", },
{ .compatible = "xlnx,xps-uartlite-1.00.a", },
{}
@@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
static int ulite_probe(struct platform_device *pdev)
{
- struct resource *res, *res2;
+ struct resource *res;
+ int irq;
int id = pdev->id;
#ifdef CONFIG_OF
const __be32 *prop;
@@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res2)
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
- return ulite_assign(&pdev->dev, id, res->start, res2->start);
+ return ulite_assign(&pdev->dev, id, res->start, irq);
}
static int ulite_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 14d10fcfd210..7d2532b23969 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1473,7 +1473,7 @@ static int ucc_uart_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id ucc_uart_match[] = {
+static const struct of_device_id ucc_uart_match[] = {
{
.type = "serial",
.compatible = "ucc_uart",
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index cff531a51a78..3ddbac767db3 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -37,10 +37,7 @@
#define CDNS_UART_MINOR 0 /* works best with devtmpfs */
#define CDNS_UART_NR_PORTS 2
#define CDNS_UART_FIFO_SIZE 64 /* FIFO size */
-#define CDNS_UART_REGISTER_SPACE 0xFFF
-
-#define cdns_uart_readl(offset) ioread32(port->membase + offset)
-#define cdns_uart_writel(val, offset) iowrite32(val, port->membase + offset)
+#define CDNS_UART_REGISTER_SPACE 0x1000
/* Rx Trigger level */
static int rx_trigger_level = 56;
@@ -195,7 +192,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
/* Read the interrupt status register to determine which
* interrupt(s) is/are active.
*/
- isrstatus = cdns_uart_readl(CDNS_UART_ISR_OFFSET);
+ isrstatus = readl(port->membase + CDNS_UART_ISR_OFFSET);
/*
* There is no hardware break detection, so we interpret framing
@@ -203,14 +200,15 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
* there's another non-zero byte at the end of the sequence.
*/
if (isrstatus & CDNS_UART_IXR_FRAMING) {
- while (!(cdns_uart_readl(CDNS_UART_SR_OFFSET) &
+ while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
CDNS_UART_SR_RXEMPTY)) {
- if (!cdns_uart_readl(CDNS_UART_FIFO_OFFSET)) {
+ if (!readl(port->membase + CDNS_UART_FIFO_OFFSET)) {
port->read_status_mask |= CDNS_UART_IXR_BRK;
isrstatus &= ~CDNS_UART_IXR_FRAMING;
}
}
- cdns_uart_writel(CDNS_UART_IXR_FRAMING, CDNS_UART_ISR_OFFSET);
+ writel(CDNS_UART_IXR_FRAMING,
+ port->membase + CDNS_UART_ISR_OFFSET);
}
/* drop byte with parity error if IGNPAR specified */
@@ -223,9 +221,9 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
if ((isrstatus & CDNS_UART_IXR_TOUT) ||
(isrstatus & CDNS_UART_IXR_RXTRIG)) {
/* Receive Timeout Interrupt */
- while ((cdns_uart_readl(CDNS_UART_SR_OFFSET) &
- CDNS_UART_SR_RXEMPTY) != CDNS_UART_SR_RXEMPTY) {
- data = cdns_uart_readl(CDNS_UART_FIFO_OFFSET);
+ while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+ CDNS_UART_SR_RXEMPTY)) {
+ data = readl(port->membase + CDNS_UART_FIFO_OFFSET);
/* Non-NULL byte after BREAK is garbage (99%) */
if (data && (port->read_status_mask &
@@ -275,8 +273,8 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
/* Dispatch an appropriate handler */
if ((isrstatus & CDNS_UART_IXR_TXEMPTY) == CDNS_UART_IXR_TXEMPTY) {
if (uart_circ_empty(&port->state->xmit)) {
- cdns_uart_writel(CDNS_UART_IXR_TXEMPTY,
- CDNS_UART_IDR_OFFSET);
+ writel(CDNS_UART_IXR_TXEMPTY,
+ port->membase + CDNS_UART_IDR_OFFSET);
} else {
numbytes = port->fifosize;
/* Break if no more data available in the UART buffer */
@@ -287,9 +285,9 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
* and write it to the cdns_uart's TX_FIFO
* register.
*/
- cdns_uart_writel(
- port->state->xmit.buf[port->state->xmit.
- tail], CDNS_UART_FIFO_OFFSET);
+ writel(port->state->xmit.buf[
+ port->state->xmit.tail],
+ port->membase + CDNS_UART_FIFO_OFFSET);
port->icount.tx++;
@@ -307,7 +305,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
}
}
- cdns_uart_writel(isrstatus, CDNS_UART_ISR_OFFSET);
+ writel(isrstatus, port->membase + CDNS_UART_ISR_OFFSET);
/* be sure to release the lock and tty before leaving */
spin_unlock_irqrestore(&port->lock, flags);
@@ -397,14 +395,14 @@ static unsigned int cdns_uart_set_baud_rate(struct uart_port *port,
&div8);
/* Write new divisors to hardware */
- mreg = cdns_uart_readl(CDNS_UART_MR_OFFSET);
+ mreg = readl(port->membase + CDNS_UART_MR_OFFSET);
if (div8)
mreg |= CDNS_UART_MR_CLKSEL;
else
mreg &= ~CDNS_UART_MR_CLKSEL;
- cdns_uart_writel(mreg, CDNS_UART_MR_OFFSET);
- cdns_uart_writel(cd, CDNS_UART_BAUDGEN_OFFSET);
- cdns_uart_writel(bdiv, CDNS_UART_BAUDDIV_OFFSET);
+ writel(mreg, port->membase + CDNS_UART_MR_OFFSET);
+ writel(cd, port->membase + CDNS_UART_BAUDGEN_OFFSET);
+ writel(bdiv, port->membase + CDNS_UART_BAUDDIV_OFFSET);
cdns_uart->baud = baud;
return calc_baud;
@@ -451,9 +449,9 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
spin_lock_irqsave(&cdns_uart->port->lock, flags);
/* Disable the TX and RX to set baud rate */
- ctrl_reg = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
- cdns_uart_writel(ctrl_reg, CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
@@ -478,11 +476,11 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
spin_lock_irqsave(&cdns_uart->port->lock, flags);
/* Set TX/RX Reset */
- ctrl_reg = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
- cdns_uart_writel(ctrl_reg, CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
- while (cdns_uart_readl(CDNS_UART_CR_OFFSET) &
+ while (readl(port->membase + CDNS_UART_CR_OFFSET) &
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
@@ -491,11 +489,11 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
* enable bit and RX enable bit to enable the transmitter and
* receiver.
*/
- cdns_uart_writel(rx_timeout, CDNS_UART_RXTOUT_OFFSET);
- ctrl_reg = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
- cdns_uart_writel(ctrl_reg, CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
@@ -517,14 +515,14 @@ static void cdns_uart_start_tx(struct uart_port *port)
if (uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port))
return;
- status = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ status = readl(port->membase + CDNS_UART_CR_OFFSET);
/* Set the TX enable bit and clear the TX disable bit to enable the
* transmitter.
*/
- cdns_uart_writel((status & ~CDNS_UART_CR_TX_DIS) | CDNS_UART_CR_TX_EN,
- CDNS_UART_CR_OFFSET);
+ writel((status & ~CDNS_UART_CR_TX_DIS) | CDNS_UART_CR_TX_EN,
+ port->membase + CDNS_UART_CR_OFFSET);
- while (numbytes-- && ((cdns_uart_readl(CDNS_UART_SR_OFFSET) &
+ while (numbytes-- && ((readl(port->membase + CDNS_UART_SR_OFFSET) &
CDNS_UART_SR_TXFULL)) != CDNS_UART_SR_TXFULL) {
/* Break if no more data available in the UART buffer */
if (uart_circ_empty(&port->state->xmit))
@@ -533,9 +531,8 @@ static void cdns_uart_start_tx(struct uart_port *port)
/* Get the data from the UART circular buffer and
* write it to the cdns_uart's TX_FIFO register.
*/
- cdns_uart_writel(
- port->state->xmit.buf[port->state->xmit.tail],
- CDNS_UART_FIFO_OFFSET);
+ writel(port->state->xmit.buf[port->state->xmit.tail],
+ port->membase + CDNS_UART_FIFO_OFFSET);
port->icount.tx++;
/* Adjust the tail of the UART buffer and wrap
@@ -544,9 +541,9 @@ static void cdns_uart_start_tx(struct uart_port *port)
port->state->xmit.tail = (port->state->xmit.tail + 1) &
(UART_XMIT_SIZE - 1);
}
- cdns_uart_writel(CDNS_UART_IXR_TXEMPTY, CDNS_UART_ISR_OFFSET);
+ writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_ISR_OFFSET);
/* Enable the TX Empty interrupt */
- cdns_uart_writel(CDNS_UART_IXR_TXEMPTY, CDNS_UART_IER_OFFSET);
+ writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IER_OFFSET);
if (uart_circ_chars_pending(&port->state->xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -560,10 +557,10 @@ static void cdns_uart_stop_tx(struct uart_port *port)
{
unsigned int regval;
- regval = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ regval = readl(port->membase + CDNS_UART_CR_OFFSET);
regval |= CDNS_UART_CR_TX_DIS;
/* Disable the transmitter */
- cdns_uart_writel(regval, CDNS_UART_CR_OFFSET);
+ writel(regval, port->membase + CDNS_UART_CR_OFFSET);
}
/**
@@ -574,10 +571,10 @@ static void cdns_uart_stop_rx(struct uart_port *port)
{
unsigned int regval;
- regval = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ regval = readl(port->membase + CDNS_UART_CR_OFFSET);
regval |= CDNS_UART_CR_RX_DIS;
/* Disable the receiver */
- cdns_uart_writel(regval, CDNS_UART_CR_OFFSET);
+ writel(regval, port->membase + CDNS_UART_CR_OFFSET);
}
/**
@@ -590,7 +587,8 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port)
{
unsigned int status;
- status = cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY;
+ status = readl(port->membase + CDNS_UART_SR_OFFSET) &
+ CDNS_UART_SR_TXEMPTY;
return status ? TIOCSER_TEMT : 0;
}
@@ -607,15 +605,15 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
spin_lock_irqsave(&port->lock, flags);
- status = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ status = readl(port->membase + CDNS_UART_CR_OFFSET);
if (ctl == -1)
- cdns_uart_writel(CDNS_UART_CR_STARTBRK | status,
- CDNS_UART_CR_OFFSET);
+ writel(CDNS_UART_CR_STARTBRK | status,
+ port->membase + CDNS_UART_CR_OFFSET);
else {
if ((status & CDNS_UART_CR_STOPBRK) == 0)
- cdns_uart_writel(CDNS_UART_CR_STOPBRK | status,
- CDNS_UART_CR_OFFSET);
+ writel(CDNS_UART_CR_STOPBRK | status,
+ port->membase + CDNS_UART_CR_OFFSET);
}
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -638,17 +636,18 @@ static void cdns_uart_set_termios(struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
/* Wait for the transmit FIFO to empty before making changes */
- if (!(cdns_uart_readl(CDNS_UART_CR_OFFSET) & CDNS_UART_CR_TX_DIS)) {
- while (!(cdns_uart_readl(CDNS_UART_SR_OFFSET) &
+ if (!(readl(port->membase + CDNS_UART_CR_OFFSET) &
+ CDNS_UART_CR_TX_DIS)) {
+ while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
CDNS_UART_SR_TXEMPTY)) {
cpu_relax();
}
}
/* Disable the TX and RX to set baud rate */
- ctrl_reg = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
- cdns_uart_writel(ctrl_reg, CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
/*
* Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk
@@ -667,20 +666,20 @@ static void cdns_uart_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
/* Set TX/RX Reset */
- ctrl_reg = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
- cdns_uart_writel(ctrl_reg, CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
/*
* Clear the RX disable and TX disable bits and then set the TX enable
* bit and RX enable bit to enable the transmitter and receiver.
*/
- ctrl_reg = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
- cdns_uart_writel(ctrl_reg, CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
- cdns_uart_writel(rx_timeout, CDNS_UART_RXTOUT_OFFSET);
+ writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
@@ -700,7 +699,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
CDNS_UART_IXR_TOUT | CDNS_UART_IXR_PARITY |
CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN;
- mode_reg = cdns_uart_readl(CDNS_UART_MR_OFFSET);
+ mode_reg = readl(port->membase + CDNS_UART_MR_OFFSET);
/* Handling Data Size */
switch (termios->c_cflag & CSIZE) {
@@ -741,7 +740,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
cval |= CDNS_UART_MR_PARITY_NONE;
}
cval |= mode_reg & 1;
- cdns_uart_writel(cval, CDNS_UART_MR_OFFSET);
+ writel(cval, port->membase + CDNS_UART_MR_OFFSET);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -762,52 +761,53 @@ static int cdns_uart_startup(struct uart_port *port)
return retval;
/* Disable the TX and RX */
- cdns_uart_writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
- CDNS_UART_CR_OFFSET);
+ writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
+ port->membase + CDNS_UART_CR_OFFSET);
/* Set the Control Register with TX/RX Enable, TX/RX Reset,
* no break chars.
*/
- cdns_uart_writel(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST,
- CDNS_UART_CR_OFFSET);
+ writel(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST,
+ port->membase + CDNS_UART_CR_OFFSET);
- status = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ status = readl(port->membase + CDNS_UART_CR_OFFSET);
/* Clear the RX disable and TX disable bits and then set the TX enable
* bit and RX enable bit to enable the transmitter and receiver.
*/
- cdns_uart_writel((status & ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS))
+ writel((status & ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS))
| (CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN |
- CDNS_UART_CR_STOPBRK), CDNS_UART_CR_OFFSET);
+ CDNS_UART_CR_STOPBRK),
+ port->membase + CDNS_UART_CR_OFFSET);
/* Set the Mode Register with normal mode,8 data bits,1 stop bit,
* no parity.
*/
- cdns_uart_writel(CDNS_UART_MR_CHMODE_NORM | CDNS_UART_MR_STOPMODE_1_BIT
+ writel(CDNS_UART_MR_CHMODE_NORM | CDNS_UART_MR_STOPMODE_1_BIT
| CDNS_UART_MR_PARITY_NONE | CDNS_UART_MR_CHARLEN_8_BIT,
- CDNS_UART_MR_OFFSET);
+ port->membase + CDNS_UART_MR_OFFSET);
/*
* Set the RX FIFO Trigger level to use most of the FIFO, but it
* can be tuned with a module parameter
*/
- cdns_uart_writel(rx_trigger_level, CDNS_UART_RXWM_OFFSET);
+ writel(rx_trigger_level, port->membase + CDNS_UART_RXWM_OFFSET);
/*
* Receive Timeout register is enabled but it
* can be tuned with a module parameter
*/
- cdns_uart_writel(rx_timeout, CDNS_UART_RXTOUT_OFFSET);
+ writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
/* Clear out any pending interrupts before enabling them */
- cdns_uart_writel(cdns_uart_readl(CDNS_UART_ISR_OFFSET),
- CDNS_UART_ISR_OFFSET);
+ writel(readl(port->membase + CDNS_UART_ISR_OFFSET),
+ port->membase + CDNS_UART_ISR_OFFSET);
/* Set the Interrupt Registers with desired interrupts */
- cdns_uart_writel(CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_PARITY |
+ writel(CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_PARITY |
CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN |
CDNS_UART_IXR_RXTRIG | CDNS_UART_IXR_TOUT,
- CDNS_UART_IER_OFFSET);
+ port->membase + CDNS_UART_IER_OFFSET);
return retval;
}
@@ -821,12 +821,12 @@ static void cdns_uart_shutdown(struct uart_port *port)
int status;
/* Disable interrupts */
- status = cdns_uart_readl(CDNS_UART_IMR_OFFSET);
- cdns_uart_writel(status, CDNS_UART_IDR_OFFSET);
+ status = readl(port->membase + CDNS_UART_IMR_OFFSET);
+ writel(status, port->membase + CDNS_UART_IDR_OFFSET);
/* Disable the TX and RX */
- cdns_uart_writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
- CDNS_UART_CR_OFFSET);
+ writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
+ port->membase + CDNS_UART_CR_OFFSET);
free_irq(port->irq, port);
}
@@ -928,7 +928,7 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
u32 val;
- val = cdns_uart_readl(CDNS_UART_MODEMCR_OFFSET);
+ val = readl(port->membase + CDNS_UART_MODEMCR_OFFSET);
val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR);
@@ -937,7 +937,7 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (mctrl & TIOCM_DTR)
val |= CDNS_UART_MODEMCR_DTR;
- cdns_uart_writel(val, CDNS_UART_MODEMCR_OFFSET);
+ writel(val, port->membase + CDNS_UART_MODEMCR_OFFSET);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -947,17 +947,18 @@ static int cdns_uart_poll_get_char(struct uart_port *port)
int c;
/* Disable all interrupts */
- imr = cdns_uart_readl(CDNS_UART_IMR_OFFSET);
- cdns_uart_writel(imr, CDNS_UART_IDR_OFFSET);
+ imr = readl(port->membase + CDNS_UART_IMR_OFFSET);
+ writel(imr, port->membase + CDNS_UART_IDR_OFFSET);
/* Check if FIFO is empty */
- if (cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_RXEMPTY)
+ if (readl(port->membase + CDNS_UART_SR_OFFSET) & CDNS_UART_SR_RXEMPTY)
c = NO_POLL_CHAR;
else /* Read a character */
- c = (unsigned char) cdns_uart_readl(CDNS_UART_FIFO_OFFSET);
+ c = (unsigned char) readl(
+ port->membase + CDNS_UART_FIFO_OFFSET);
/* Enable interrupts */
- cdns_uart_writel(imr, CDNS_UART_IER_OFFSET);
+ writel(imr, port->membase + CDNS_UART_IER_OFFSET);
return c;
}
@@ -967,22 +968,24 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
u32 imr;
/* Disable all interrupts */
- imr = cdns_uart_readl(CDNS_UART_IMR_OFFSET);
- cdns_uart_writel(imr, CDNS_UART_IDR_OFFSET);
+ imr = readl(port->membase + CDNS_UART_IMR_OFFSET);
+ writel(imr, port->membase + CDNS_UART_IDR_OFFSET);
/* Wait until FIFO is empty */
- while (!(cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY))
+ while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+ CDNS_UART_SR_TXEMPTY))
cpu_relax();
/* Write a character */
- cdns_uart_writel(c, CDNS_UART_FIFO_OFFSET);
+ writel(c, port->membase + CDNS_UART_FIFO_OFFSET);
/* Wait until FIFO is empty */
- while (!(cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY))
+ while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+ CDNS_UART_SR_TXEMPTY))
cpu_relax();
/* Enable interrupts */
- cdns_uart_writel(imr, CDNS_UART_IER_OFFSET);
+ writel(imr, port->membase + CDNS_UART_IER_OFFSET);
return;
}
@@ -1010,7 +1013,7 @@ static struct uart_ops cdns_uart_ops = {
#endif
};
-static struct uart_port cdns_uart_port[2];
+static struct uart_port cdns_uart_port[CDNS_UART_NR_PORTS];
/**
* cdns_uart_get_port - Configure the port from platform device resource info
@@ -1038,7 +1041,6 @@ static struct uart_port *cdns_uart_get_port(int id)
/* At this point, we've got an empty uart_port struct, initialize it */
spin_lock_init(&port->lock);
port->membase = NULL;
- port->iobase = 1; /* mark port in use */
port->irq = 0;
port->type = PORT_UNKNOWN;
port->iotype = UPIO_MEM32;
@@ -1057,8 +1059,8 @@ static struct uart_port *cdns_uart_get_port(int id)
*/
static void cdns_uart_console_wait_tx(struct uart_port *port)
{
- while ((cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY)
- != CDNS_UART_SR_TXEMPTY)
+ while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+ CDNS_UART_SR_TXEMPTY))
barrier();
}
@@ -1070,7 +1072,7 @@ static void cdns_uart_console_wait_tx(struct uart_port *port)
static void cdns_uart_console_putchar(struct uart_port *port, int ch)
{
cdns_uart_console_wait_tx(port);
- cdns_uart_writel(ch, CDNS_UART_FIFO_OFFSET);
+ writel(ch, port->membase + CDNS_UART_FIFO_OFFSET);
}
static void cdns_early_write(struct console *con, const char *s, unsigned n)
@@ -1112,24 +1114,24 @@ static void cdns_uart_console_write(struct console *co, const char *s,
spin_lock_irqsave(&port->lock, flags);
/* save and disable interrupt */
- imr = cdns_uart_readl(CDNS_UART_IMR_OFFSET);
- cdns_uart_writel(imr, CDNS_UART_IDR_OFFSET);
+ imr = readl(port->membase + CDNS_UART_IMR_OFFSET);
+ writel(imr, port->membase + CDNS_UART_IDR_OFFSET);
/*
* Make sure that the tx part is enabled. Set the TX enable bit and
* clear the TX disable bit to enable the transmitter.
*/
- ctrl = cdns_uart_readl(CDNS_UART_CR_OFFSET);
- cdns_uart_writel((ctrl & ~CDNS_UART_CR_TX_DIS) | CDNS_UART_CR_TX_EN,
- CDNS_UART_CR_OFFSET);
+ ctrl = readl(port->membase + CDNS_UART_CR_OFFSET);
+ writel((ctrl & ~CDNS_UART_CR_TX_DIS) | CDNS_UART_CR_TX_EN,
+ port->membase + CDNS_UART_CR_OFFSET);
uart_console_write(port, s, count, cdns_uart_console_putchar);
cdns_uart_console_wait_tx(port);
- cdns_uart_writel(ctrl, CDNS_UART_CR_OFFSET);
+ writel(ctrl, port->membase + CDNS_UART_CR_OFFSET);
/* restore interrupt state */
- cdns_uart_writel(imr, CDNS_UART_IER_OFFSET);
+ writel(imr, port->membase + CDNS_UART_IER_OFFSET);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
@@ -1153,8 +1155,9 @@ static int __init cdns_uart_console_setup(struct console *co, char *options)
if (co->index < 0 || co->index >= CDNS_UART_NR_PORTS)
return -EINVAL;
- if (!port->mapbase) {
- pr_debug("console on ttyPS%i not present\n", co->index);
+ if (!port->membase) {
+ pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
+ co->index);
return -ENODEV;
}
@@ -1240,13 +1243,14 @@ static int cdns_uart_suspend(struct device *device)
spin_lock_irqsave(&port->lock, flags);
/* Empty the receive FIFO 1st before making changes */
- while (!(cdns_uart_readl(CDNS_UART_SR_OFFSET) &
+ while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
CDNS_UART_SR_RXEMPTY))
- cdns_uart_readl(CDNS_UART_FIFO_OFFSET);
+ readl(port->membase + CDNS_UART_FIFO_OFFSET);
/* set RX trigger level to 1 */
- cdns_uart_writel(1, CDNS_UART_RXWM_OFFSET);
+ writel(1, port->membase + CDNS_UART_RXWM_OFFSET);
/* disable RX timeout interrups */
- cdns_uart_writel(CDNS_UART_IXR_TOUT, CDNS_UART_IDR_OFFSET);
+ writel(CDNS_UART_IXR_TOUT,
+ port->membase + CDNS_UART_IDR_OFFSET);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1285,28 +1289,30 @@ static int cdns_uart_resume(struct device *device)
spin_lock_irqsave(&port->lock, flags);
/* Set TX/RX Reset */
- ctrl_reg = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
- cdns_uart_writel(ctrl_reg, CDNS_UART_CR_OFFSET);
- while (cdns_uart_readl(CDNS_UART_CR_OFFSET) &
+ writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+ while (readl(port->membase + CDNS_UART_CR_OFFSET) &
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
/* restore rx timeout value */
- cdns_uart_writel(rx_timeout, CDNS_UART_RXTOUT_OFFSET);
+ writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
/* Enable Tx/Rx */
- ctrl_reg = cdns_uart_readl(CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
- cdns_uart_writel(ctrl_reg, CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
spin_unlock_irqrestore(&port->lock, flags);
} else {
spin_lock_irqsave(&port->lock, flags);
/* restore original rx trigger level */
- cdns_uart_writel(rx_trigger_level, CDNS_UART_RXWM_OFFSET);
+ writel(rx_trigger_level,
+ port->membase + CDNS_UART_RXWM_OFFSET);
/* enable RX timeout interrupt */
- cdns_uart_writel(CDNS_UART_IXR_TOUT, CDNS_UART_IER_OFFSET);
+ writel(CDNS_UART_IXR_TOUT,
+ port->membase + CDNS_UART_IER_OFFSET);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1325,9 +1331,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
*/
static int cdns_uart_probe(struct platform_device *pdev)
{
- int rc, id;
+ int rc, id, irq;
struct uart_port *port;
- struct resource *res, *res2;
+ struct resource *res;
struct cdns_uart *cdns_uart_data;
cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
@@ -1374,9 +1380,9 @@ static int cdns_uart_probe(struct platform_device *pdev)
goto err_out_clk_disable;
}
- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res2) {
- rc = -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ rc = -ENXIO;
goto err_out_clk_disable;
}
@@ -1405,7 +1411,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
* and triggers invocation of the config_port() entry point.
*/
port->mapbase = res->start;
- port->irq = res2->start;
+ port->irq = irq;
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
port->private_data = cdns_uart_data;
@@ -1458,7 +1464,7 @@ static int cdns_uart_remove(struct platform_device *pdev)
}
/* Match table for of_platform binding */
-static struct of_device_id cdns_uart_of_match[] = {
+static const struct of_device_id cdns_uart_of_match[] = {
{ .compatible = "xlnx,xuartps", },
{ .compatible = "cdns,uart-r1p8", },
{}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 75661641f5fe..2f78b77f0f81 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -37,6 +37,28 @@
#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
+/*
+ * If all tty flip buffers have been processed by flush_to_ldisc() or
+ * dropped by tty_buffer_flush(), check if the linked pty has been closed.
+ * If so, wake the reader/poll to process
+ */
+static inline void check_other_closed(struct tty_struct *tty)
+{
+ unsigned long flags, old;
+
+ /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
+ for (flags = ACCESS_ONCE(tty->flags);
+ test_bit(TTY_OTHER_CLOSED, &flags);
+ ) {
+ old = flags;
+ __set_bit(TTY_OTHER_DONE, &flags);
+ flags = cmpxchg(&tty->flags, old, flags);
+ if (old == flags) {
+ wake_up_interruptible(&tty->read_wait);
+ break;
+ }
+ }
+}
/**
* tty_buffer_lock_exclusive - gain exclusive access to buffer
@@ -229,6 +251,8 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
if (ld && ld->ops->flush_buffer)
ld->ops->flush_buffer(tty);
+ check_other_closed(tty);
+
atomic_dec(&buf->priority);
mutex_unlock(&buf->lock);
}
@@ -471,8 +495,10 @@ static void flush_to_ldisc(struct work_struct *work)
smp_rmb();
count = head->commit - head->read;
if (!count) {
- if (next == NULL)
+ if (next == NULL) {
+ check_other_closed(tty);
break;
+ }
buf->head = next;
tty_buffer_free(port, head);
continue;
@@ -489,19 +515,6 @@ static void flush_to_ldisc(struct work_struct *work)
}
/**
- * tty_flush_to_ldisc
- * @tty: tty to push
- *
- * Push the terminal flip buffers to the line discipline.
- *
- * Must not be called from IRQ context.
- */
-void tty_flush_to_ldisc(struct tty_struct *tty)
-{
- flush_work(&tty->port->buf.work);
-}
-
-/**
* tty_flip_buffer_push - terminal
* @port: tty port to push
*
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 2bb4dfc02873..e5695467598f 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1025,11 +1025,17 @@ void start_tty(struct tty_struct *tty)
}
EXPORT_SYMBOL(start_tty);
-/* We limit tty time update visibility to every 8 seconds or so. */
static void tty_update_time(struct timespec *time)
{
unsigned long sec = get_seconds();
- if (abs(sec - time->tv_sec) & ~7)
+
+ /*
+ * We only care if the two values differ in anything other than the
+ * lower three bits (i.e every 8 seconds). If so, then we can update
+ * the time of the tty device, otherwise it could be construded as a
+ * security leak to let userspace know the exact timing of the tty.
+ */
+ if ((sec ^ time->tv_sec) & ~7)
time->tv_sec = sec;
}
@@ -3593,6 +3599,13 @@ static ssize_t show_cons_active(struct device *dev,
}
static DEVICE_ATTR(active, S_IRUGO, show_cons_active, NULL);
+static struct attribute *cons_dev_attrs[] = {
+ &dev_attr_active.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(cons_dev);
+
static struct device *consdev;
void console_sysfs_notify(void)
@@ -3617,12 +3630,11 @@ int __init tty_init(void)
if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
panic("Couldn't register /dev/console driver\n");
- consdev = device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), NULL,
- "console");
+ consdev = device_create_with_groups(tty_class, NULL,
+ MKDEV(TTYAUX_MAJOR, 1), NULL,
+ cons_dev_groups, "console");
if (IS_ERR(consdev))
consdev = NULL;
- else
- WARN_ON(device_create_file(consdev, &dev_attr_active) < 0);
#ifdef CONFIG_VT
vty_init(&console_fops);
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 632fc8152061..8e53fe469664 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -536,7 +536,7 @@ EXPORT_SYMBOL(tty_termios_hw_change);
* Locking: termios_rwsem
*/
-static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
+int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
{
struct ktermios old_termios;
struct tty_ldisc *ld;
@@ -569,6 +569,7 @@ static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
up_write(&tty->termios_rwsem);
return 0;
}
+EXPORT_SYMBOL_GPL(tty_set_termios);
/**
* set_termios - set termios values for a tty
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 6e00572cbeb9..4a24eb2b0ede 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1237,7 +1237,7 @@ static void default_attr(struct vc_data *vc)
struct rgb { u8 r; u8 g; u8 b; };
-struct rgb rgb_from_256(int i)
+static struct rgb rgb_from_256(int i)
{
struct rgb c;
if (i < 8) { /* Standard colours. */
@@ -1573,7 +1573,7 @@ static void setterm_command(struct vc_data *vc)
case 11: /* set bell duration in msec */
if (vc->vc_npar >= 1)
vc->vc_bell_duration = (vc->vc_par[1] < 2000) ?
- vc->vc_par[1] * HZ / 1000 : 0;
+ msecs_to_jiffies(vc->vc_par[1]) : 0;
else
vc->vc_bell_duration = DEFAULT_BELL_DURATION;
break;
@@ -3041,17 +3041,24 @@ static ssize_t show_tty_active(struct device *dev,
}
static DEVICE_ATTR(active, S_IRUGO, show_tty_active, NULL);
+static struct attribute *vt_dev_attrs[] = {
+ &dev_attr_active.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(vt_dev);
+
int __init vty_init(const struct file_operations *console_fops)
{
cdev_init(&vc0_cdev, console_fops);
if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
panic("Couldn't register /dev/tty0 driver\n");
- tty0dev = device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+ tty0dev = device_create_with_groups(tty_class, NULL,
+ MKDEV(TTY_MAJOR, 0), NULL,
+ vt_dev_groups, "tty0");
if (IS_ERR(tty0dev))
tty0dev = NULL;
- else
- WARN_ON(device_create_file(tty0dev, &dev_attr_active) < 0);
vcs_init();
@@ -3423,42 +3430,26 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr,
}
-static struct device_attribute device_attrs[] = {
- __ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind),
- __ATTR(name, S_IRUGO, show_name, NULL),
+static DEVICE_ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind);
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static struct attribute *con_dev_attrs[] = {
+ &dev_attr_bind.attr,
+ &dev_attr_name.attr,
+ NULL
};
+ATTRIBUTE_GROUPS(con_dev);
+
static int vtconsole_init_device(struct con_driver *con)
{
- int i;
- int error = 0;
-
con->flag |= CON_DRIVER_FLAG_ATTR;
- dev_set_drvdata(con->dev, con);
- for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
- error = device_create_file(con->dev, &device_attrs[i]);
- if (error)
- break;
- }
-
- if (error) {
- while (--i >= 0)
- device_remove_file(con->dev, &device_attrs[i]);
- con->flag &= ~CON_DRIVER_FLAG_ATTR;
- }
-
- return error;
+ return 0;
}
static void vtconsole_deinit_device(struct con_driver *con)
{
- int i;
-
- if (con->flag & CON_DRIVER_FLAG_ATTR) {
- for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
- device_remove_file(con->dev, &device_attrs[i]);
- con->flag &= ~CON_DRIVER_FLAG_ATTR;
- }
+ con->flag &= ~CON_DRIVER_FLAG_ATTR;
}
/**
@@ -3621,11 +3612,11 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
if (retval)
goto err;
- con_driver->dev = device_create(vtconsole_class, NULL,
- MKDEV(0, con_driver->node),
- NULL, "vtcon%i",
- con_driver->node);
-
+ con_driver->dev =
+ device_create_with_groups(vtconsole_class, NULL,
+ MKDEV(0, con_driver->node),
+ con_driver, con_dev_groups,
+ "vtcon%i", con_driver->node);
if (IS_ERR(con_driver->dev)) {
printk(KERN_WARNING "Unable to create device for %s; "
"errno = %ld\n", con_driver->desc,
@@ -3739,10 +3730,11 @@ static int __init vtconsole_class_init(void)
struct con_driver *con = &registered_con_driver[i];
if (con->con && !con->dev) {
- con->dev = device_create(vtconsole_class, NULL,
- MKDEV(0, con->node),
- NULL, "vtcon%i",
- con->node);
+ con->dev =
+ device_create_with_groups(vtconsole_class, NULL,
+ MKDEV(0, con->node),
+ con, con_dev_groups,
+ "vtcon%i", con->node);
if (IS_ERR(con->dev)) {
printk(KERN_WARNING "Unable to create "
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 2bd78e2ac8ec..97d5a74558a3 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -388,7 +388,7 @@ int vt_ioctl(struct tty_struct *tty,
* Generate the tone for the appropriate number of ticks.
* If the time is zero, turn off sound ourselves.
*/
- ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
+ ticks = msecs_to_jiffies((arg >> 16) & 0xffff);
count = ticks ? (arg & 0xffff) : 0;
if (count)
count = PIT_TICK_RATE / count;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 6276f13e9e12..65bf0676d54a 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -835,7 +835,15 @@ int __uio_register_device(struct module *owner,
info->uio_dev = idev;
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
- ret = devm_request_irq(idev->dev, info->irq, uio_interrupt,
+ /*
+ * Note that we deliberately don't use devm_request_irq
+ * here. The parent module can unregister the UIO device
+ * and call pci_disable_msi, which requires that this
+ * irq has been freed. However, the device may have open
+ * FDs at the time of unregister and therefore may not be
+ * freed until they are released.
+ */
+ ret = request_irq(info->irq, uio_interrupt,
info->irq_flags, info->name, idev);
if (ret)
goto err_request_irq;
@@ -871,6 +879,8 @@ void uio_unregister_device(struct uio_info *info)
uio_dev_del_attributes(idev);
+ free_irq(idev->info->irq, idev);
+
device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
return;
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index dfb05edcdb96..5b7061a33103 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -88,9 +88,13 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
char buf[32];
int ret;
- if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ count = min_t(size_t, sizeof(buf) - 1, count);
+ if (copy_from_user(buf, ubuf, count))
return -EFAULT;
+ /* sscanf requires a zero terminated string */
+ buf[count] = '\0';
+
if (sscanf(buf, "%u", &mode) != 1)
return -EINVAL;
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 083acf45ad5a..19d655a743b5 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -520,7 +520,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
- mutex_unlock(&fsm->lock);
if (on) {
ci_role_stop(ci);
ci_role_start(ci, CI_ROLE_HOST);
@@ -529,7 +528,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
hw_device_reset(ci);
ci_role_start(ci, CI_ROLE_GADGET);
}
- mutex_lock(&fsm->lock);
return 0;
}
@@ -537,12 +535,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
- mutex_unlock(&fsm->lock);
if (on)
usb_gadget_vbus_connect(&ci->gadget);
else
usb_gadget_vbus_disconnect(&ci->gadget);
- mutex_lock(&fsm->lock);
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 3e15add665e2..5c8f58114677 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1142,11 +1142,16 @@ static int acm_probe(struct usb_interface *intf,
}
while (buflen > 0) {
+ elength = buffer[0];
+ if (!elength) {
+ dev_err(&intf->dev, "skipping garbage byte\n");
+ elength = 1;
+ goto next_desc;
+ }
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
- elength = buffer[0];
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 41e510ae8c83..d85abfed84cc 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -106,6 +106,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04f3, 0x010c), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
+ { USB_DEVICE(0x04f3, 0x0125), .driver_info =
+ USB_QUIRK_DEVICE_QUALIFIER },
+
{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index edba5348be18..6b486a36863c 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -65,8 +65,8 @@
#define USBOTGSS_IRQENABLE_SET_MISC 0x003c
#define USBOTGSS_IRQENABLE_CLR_MISC 0x0040
#define USBOTGSS_IRQMISC_OFFSET 0x03fc
-#define USBOTGSS_UTMI_OTG_CTRL 0x0080
-#define USBOTGSS_UTMI_OTG_STATUS 0x0084
+#define USBOTGSS_UTMI_OTG_STATUS 0x0080
+#define USBOTGSS_UTMI_OTG_CTRL 0x0084
#define USBOTGSS_UTMI_OTG_OFFSET 0x0480
#define USBOTGSS_TXFIFO_DEPTH 0x0508
#define USBOTGSS_RXFIFO_DEPTH 0x050c
@@ -98,20 +98,20 @@
#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL (1 << 3)
#define USBOTGSS_IRQMISC_IDPULLUP_FALL (1 << 0)
-/* UTMI_OTG_CTRL REGISTER */
-#define USBOTGSS_UTMI_OTG_CTRL_DRVVBUS (1 << 5)
-#define USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS (1 << 4)
-#define USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS (1 << 3)
-#define USBOTGSS_UTMI_OTG_CTRL_IDPULLUP (1 << 0)
-
/* UTMI_OTG_STATUS REGISTER */
-#define USBOTGSS_UTMI_OTG_STATUS_SW_MODE (1 << 31)
-#define USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT (1 << 9)
-#define USBOTGSS_UTMI_OTG_STATUS_TXBITSTUFFENABLE (1 << 8)
-#define USBOTGSS_UTMI_OTG_STATUS_IDDIG (1 << 4)
-#define USBOTGSS_UTMI_OTG_STATUS_SESSEND (1 << 3)
-#define USBOTGSS_UTMI_OTG_STATUS_SESSVALID (1 << 2)
-#define USBOTGSS_UTMI_OTG_STATUS_VBUSVALID (1 << 1)
+#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS (1 << 5)
+#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS (1 << 4)
+#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS (1 << 3)
+#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP (1 << 0)
+
+/* UTMI_OTG_CTRL REGISTER */
+#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE (1 << 31)
+#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT (1 << 9)
+#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE (1 << 8)
+#define USBOTGSS_UTMI_OTG_CTRL_IDDIG (1 << 4)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSEND (1 << 3)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID (1 << 2)
+#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID (1 << 1)
struct dwc3_omap {
struct device *dev;
@@ -119,7 +119,7 @@ struct dwc3_omap {
int irq;
void __iomem *base;
- u32 utmi_otg_status;
+ u32 utmi_otg_ctrl;
u32 utmi_otg_offset;
u32 irqmisc_offset;
u32 irq_eoi_offset;
@@ -153,15 +153,15 @@ static inline void dwc3_omap_writel(void __iomem *base, u32 offset, u32 value)
writel(value, base + offset);
}
-static u32 dwc3_omap_read_utmi_status(struct dwc3_omap *omap)
+static u32 dwc3_omap_read_utmi_ctrl(struct dwc3_omap *omap)
{
- return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS +
+ return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_CTRL +
omap->utmi_otg_offset);
}
-static void dwc3_omap_write_utmi_status(struct dwc3_omap *omap, u32 value)
+static void dwc3_omap_write_utmi_ctrl(struct dwc3_omap *omap, u32 value)
{
- dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS +
+ dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_CTRL +
omap->utmi_otg_offset, value);
}
@@ -235,25 +235,25 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
}
}
- val = dwc3_omap_read_utmi_status(omap);
- val &= ~(USBOTGSS_UTMI_OTG_STATUS_IDDIG
- | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
- | USBOTGSS_UTMI_OTG_STATUS_SESSEND);
- val |= USBOTGSS_UTMI_OTG_STATUS_SESSVALID
- | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
- dwc3_omap_write_utmi_status(omap, val);
+ val = dwc3_omap_read_utmi_ctrl(omap);
+ val &= ~(USBOTGSS_UTMI_OTG_CTRL_IDDIG
+ | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_SESSEND);
+ val |= USBOTGSS_UTMI_OTG_CTRL_SESSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+ dwc3_omap_write_utmi_ctrl(omap, val);
break;
case OMAP_DWC3_VBUS_VALID:
dev_dbg(omap->dev, "VBUS Connect\n");
- val = dwc3_omap_read_utmi_status(omap);
- val &= ~USBOTGSS_UTMI_OTG_STATUS_SESSEND;
- val |= USBOTGSS_UTMI_OTG_STATUS_IDDIG
- | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
- | USBOTGSS_UTMI_OTG_STATUS_SESSVALID
- | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
- dwc3_omap_write_utmi_status(omap, val);
+ val = dwc3_omap_read_utmi_ctrl(omap);
+ val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
+ val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG
+ | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_SESSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+ dwc3_omap_write_utmi_ctrl(omap, val);
break;
case OMAP_DWC3_ID_FLOAT:
@@ -263,13 +263,13 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
case OMAP_DWC3_VBUS_OFF:
dev_dbg(omap->dev, "VBUS Disconnect\n");
- val = dwc3_omap_read_utmi_status(omap);
- val &= ~(USBOTGSS_UTMI_OTG_STATUS_SESSVALID
- | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
- | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT);
- val |= USBOTGSS_UTMI_OTG_STATUS_SESSEND
- | USBOTGSS_UTMI_OTG_STATUS_IDDIG;
- dwc3_omap_write_utmi_status(omap, val);
+ val = dwc3_omap_read_utmi_ctrl(omap);
+ val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT);
+ val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND
+ | USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+ dwc3_omap_write_utmi_ctrl(omap, val);
break;
default:
@@ -422,22 +422,22 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
struct device_node *node = omap->dev->of_node;
int utmi_mode = 0;
- reg = dwc3_omap_read_utmi_status(omap);
+ reg = dwc3_omap_read_utmi_ctrl(omap);
of_property_read_u32(node, "utmi-mode", &utmi_mode);
switch (utmi_mode) {
case DWC3_OMAP_UTMI_MODE_SW:
- reg |= USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
+ reg |= USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
break;
case DWC3_OMAP_UTMI_MODE_HW:
- reg &= ~USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
+ reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
break;
default:
dev_dbg(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
}
- dwc3_omap_write_utmi_status(omap, reg);
+ dwc3_omap_write_utmi_ctrl(omap, reg);
}
static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
@@ -614,7 +614,7 @@ static int dwc3_omap_suspend(struct device *dev)
{
struct dwc3_omap *omap = dev_get_drvdata(dev);
- omap->utmi_otg_status = dwc3_omap_read_utmi_status(omap);
+ omap->utmi_otg_ctrl = dwc3_omap_read_utmi_ctrl(omap);
dwc3_omap_disable_irqs(omap);
return 0;
@@ -624,7 +624,7 @@ static int dwc3_omap_resume(struct device *dev)
{
struct dwc3_omap *omap = dev_get_drvdata(dev);
- dwc3_omap_write_utmi_status(omap, omap->utmi_otg_status);
+ dwc3_omap_write_utmi_ctrl(omap, omap->utmi_otg_ctrl);
dwc3_omap_enable_irqs(omap);
pm_runtime_disable(dev);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index c42765b3a060..0495c94a23d7 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1295,6 +1295,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
}
}
c->next_interface_id = 0;
+ memset(c->interface, 0, sizeof(c->interface));
c->superspeed = 0;
c->highspeed = 0;
c->fullspeed = 0;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 175c9956cbe3..6bdb57069044 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -23,6 +23,7 @@
#include <linux/export.h>
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/uio.h>
#include <asm/unaligned.h>
#include <linux/usb/composite.h>
@@ -655,9 +656,10 @@ static void ffs_user_copy_worker(struct work_struct *work)
unuse_mm(io_data->mm);
}
- aio_complete(io_data->kiocb, ret, ret);
+ io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
- if (io_data->ffs->ffs_eventfd && !io_data->kiocb->ki_eventfd)
+ if (io_data->ffs->ffs_eventfd &&
+ !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
usb_ep_free_request(io_data->ep, io_data->req);
@@ -1059,8 +1061,6 @@ static const struct file_operations ffs_epfile_operations = {
.llseek = no_llseek,
.open = ffs_epfile_open,
- .write = new_sync_write,
- .read = new_sync_read,
.write_iter = ffs_epfile_write_iter,
.read_iter = ffs_epfile_read_iter,
.release = ffs_epfile_release,
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 13dfc9915b1d..f7f35a36c09a 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -437,12 +437,20 @@ static int hidg_setup(struct usb_function *f,
| USB_REQ_GET_DESCRIPTOR):
switch (value >> 8) {
case HID_DT_HID:
+ {
+ struct hid_descriptor hidg_desc_copy = hidg_desc;
+
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
+ hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
+ hidg_desc_copy.desc[0].wDescriptorLength =
+ cpu_to_le16(hidg->report_desc_length);
+
length = min_t(unsigned short, length,
- hidg_desc.bLength);
- memcpy(req->buf, &hidg_desc, length);
+ hidg_desc_copy.bLength);
+ memcpy(req->buf, &hidg_desc_copy, length);
goto respond;
break;
+ }
case HID_DT_REPORT:
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
length = min_t(unsigned short, length,
@@ -632,6 +640,10 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ /*
+ * We can use hidg_desc struct here but we should not relay
+ * that its content won't change after returning from this function.
+ */
hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
hidg_desc.desc[0].wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 76891adfba7a..cf0df8fbba89 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -222,7 +222,7 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
v4l2_event.type = UVC_EVENT_DATA;
uvc_event->data.length = req->actual;
memcpy(&uvc_event->data.data, req->buf, req->actual);
- v4l2_event_queue(uvc->vdev, &v4l2_event);
+ v4l2_event_queue(&uvc->vdev, &v4l2_event);
}
}
@@ -256,7 +256,7 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_SETUP;
memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
- v4l2_event_queue(uvc->vdev, &v4l2_event);
+ v4l2_event_queue(&uvc->vdev, &v4l2_event);
return 0;
}
@@ -315,7 +315,7 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_CONNECT;
uvc_event->speed = cdev->gadget->speed;
- v4l2_event_queue(uvc->vdev, &v4l2_event);
+ v4l2_event_queue(&uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_CONNECTED;
}
@@ -343,7 +343,7 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMOFF;
- v4l2_event_queue(uvc->vdev, &v4l2_event);
+ v4l2_event_queue(&uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_CONNECTED;
return 0;
@@ -370,7 +370,7 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMON;
- v4l2_event_queue(uvc->vdev, &v4l2_event);
+ v4l2_event_queue(&uvc->vdev, &v4l2_event);
return USB_GADGET_DELAYED_STATUS;
default:
@@ -388,7 +388,7 @@ uvc_function_disable(struct usb_function *f)
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DISCONNECT;
- v4l2_event_queue(uvc->vdev, &v4l2_event);
+ v4l2_event_queue(&uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_DISCONNECTED;
@@ -435,24 +435,19 @@ static int
uvc_register_video(struct uvc_device *uvc)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
- struct video_device *video;
/* TODO reference counting. */
- video = video_device_alloc();
- if (video == NULL)
- return -ENOMEM;
+ uvc->vdev.v4l2_dev = &uvc->v4l2_dev;
+ uvc->vdev.fops = &uvc_v4l2_fops;
+ uvc->vdev.ioctl_ops = &uvc_v4l2_ioctl_ops;
+ uvc->vdev.release = video_device_release_empty;
+ uvc->vdev.vfl_dir = VFL_DIR_TX;
+ uvc->vdev.lock = &uvc->video.mutex;
+ strlcpy(uvc->vdev.name, cdev->gadget->name, sizeof(uvc->vdev.name));
- video->v4l2_dev = &uvc->v4l2_dev;
- video->fops = &uvc_v4l2_fops;
- video->ioctl_ops = &uvc_v4l2_ioctl_ops;
- video->release = video_device_release;
- video->vfl_dir = VFL_DIR_TX;
- strlcpy(video->name, cdev->gadget->name, sizeof(video->name));
+ video_set_drvdata(&uvc->vdev, uvc);
- uvc->vdev = video;
- video_set_drvdata(video, uvc);
-
- return video_register_device(video, VFL_TYPE_GRABBER, -1);
+ return video_register_device(&uvc->vdev, VFL_TYPE_GRABBER, -1);
}
#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \
@@ -765,8 +760,6 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
error:
v4l2_device_unregister(&uvc->v4l2_dev);
- if (uvc->vdev)
- video_device_release(uvc->vdev);
if (uvc->control_ep)
uvc->control_ep->driver_data = NULL;
@@ -897,7 +890,7 @@ static void uvc_unbind(struct usb_configuration *c, struct usb_function *f)
INFO(cdev, "%s\n", __func__);
- video_unregister_device(uvc->vdev);
+ video_unregister_device(&uvc->vdev);
v4l2_device_unregister(&uvc->v4l2_dev);
uvc->control_ep->driver_data = NULL;
uvc->video.ep->driver_data = NULL;
@@ -918,6 +911,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
if (uvc == NULL)
return ERR_PTR(-ENOMEM);
+ mutex_init(&uvc->video.mutex);
uvc->state = UVC_STATE_DISCONNECTED;
opts = fi_to_f_uvc_opts(fi);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 89179ab20c10..7ee057930ae7 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -113,6 +113,7 @@ struct gs_port {
int write_allocated;
struct gs_buf port_write_buf;
wait_queue_head_t drain_wait; /* wait while writes drain */
+ bool write_busy;
/* REVISIT this state ... */
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
@@ -363,7 +364,7 @@ __acquires(&port->port_lock)
int status = 0;
bool do_tty_wake = false;
- while (!list_empty(pool)) {
+ while (!port->write_busy && !list_empty(pool)) {
struct usb_request *req;
int len;
@@ -393,9 +394,11 @@ __acquires(&port->port_lock)
* NOTE that we may keep sending data for a while after
* the TTY closed (dev->ioport->port_tty is NULL).
*/
+ port->write_busy = true;
spin_unlock(&port->port_lock);
status = usb_ep_queue(in, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
+ port->write_busy = false;
if (status) {
pr_debug("%s: %s %s err %d\n",
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index f67695cb28f8..ebe409b9e419 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -115,6 +115,7 @@ struct uvc_video
unsigned int width;
unsigned int height;
unsigned int imagesize;
+ struct mutex mutex; /* protects frame parameters */
/* Requests */
unsigned int req_size;
@@ -143,7 +144,7 @@ enum uvc_state
struct uvc_device
{
- struct video_device *vdev;
+ struct video_device vdev;
struct v4l2_device v4l2_dev;
enum uvc_state state;
struct usb_function func;
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 8ea8b3b227b4..d617c39a0052 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -104,29 +104,16 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&queue->irqlock, flags);
}
-static void uvc_wait_prepare(struct vb2_queue *vq)
-{
- struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
-
- mutex_unlock(&queue->mutex);
-}
-
-static void uvc_wait_finish(struct vb2_queue *vq)
-{
- struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
-
- mutex_lock(&queue->mutex);
-}
-
static struct vb2_ops uvc_queue_qops = {
.queue_setup = uvc_queue_setup,
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
- .wait_prepare = uvc_wait_prepare,
- .wait_finish = uvc_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
-int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
+int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
+ struct mutex *lock)
{
int ret;
@@ -135,6 +122,7 @@ int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
queue->queue.drv_priv = queue;
queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
queue->queue.ops = &uvc_queue_qops;
+ queue->queue.lock = lock;
queue->queue.mem_ops = &vb2_vmalloc_memops;
queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
| V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
@@ -142,7 +130,6 @@ int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
if (ret)
return ret;
- mutex_init(&queue->mutex);
spin_lock_init(&queue->irqlock);
INIT_LIST_HEAD(&queue->irqqueue);
queue->flags = 0;
@@ -155,9 +142,7 @@ int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
*/
void uvcg_free_buffers(struct uvc_video_queue *queue)
{
- mutex_lock(&queue->mutex);
vb2_queue_release(&queue->queue);
- mutex_unlock(&queue->mutex);
}
/*
@@ -168,22 +153,14 @@ int uvcg_alloc_buffers(struct uvc_video_queue *queue,
{
int ret;
- mutex_lock(&queue->mutex);
ret = vb2_reqbufs(&queue->queue, rb);
- mutex_unlock(&queue->mutex);
return ret ? ret : rb->count;
}
int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_querybuf(&queue->queue, buf);
- mutex_unlock(&queue->mutex);
-
- return ret;
+ return vb2_querybuf(&queue->queue, buf);
}
int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
@@ -191,18 +168,14 @@ int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
unsigned long flags;
int ret;
- mutex_lock(&queue->mutex);
ret = vb2_qbuf(&queue->queue, buf);
if (ret < 0)
- goto done;
+ return ret;
spin_lock_irqsave(&queue->irqlock, flags);
ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
queue->flags &= ~UVC_QUEUE_PAUSED;
spin_unlock_irqrestore(&queue->irqlock, flags);
-
-done:
- mutex_unlock(&queue->mutex);
return ret;
}
@@ -213,13 +186,7 @@ done:
int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
int nonblocking)
{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
- mutex_unlock(&queue->mutex);
-
- return ret;
+ return vb2_dqbuf(&queue->queue, buf, nonblocking);
}
/*
@@ -231,24 +198,12 @@ int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
- unsigned int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_poll(&queue->queue, file, wait);
- mutex_unlock(&queue->mutex);
-
- return ret;
+ return vb2_poll(&queue->queue, file, wait);
}
int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
{
- int ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_mmap(&queue->queue, vma);
- mutex_unlock(&queue->mutex);
-
- return ret;
+ return vb2_mmap(&queue->queue, vma);
}
#ifndef CONFIG_MMU
@@ -260,12 +215,7 @@ int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue,
unsigned long pgoff)
{
- unsigned long ret;
-
- mutex_lock(&queue->mutex);
- ret = vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
- mutex_unlock(&queue->mutex);
- return ret;
+ return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
}
#endif
@@ -327,18 +277,17 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
unsigned long flags;
int ret = 0;
- mutex_lock(&queue->mutex);
if (enable) {
ret = vb2_streamon(&queue->queue, queue->queue.type);
if (ret < 0)
- goto done;
+ return ret;
queue->sequence = 0;
queue->buf_used = 0;
} else {
ret = vb2_streamoff(&queue->queue, queue->queue.type);
if (ret < 0)
- goto done;
+ return ret;
spin_lock_irqsave(&queue->irqlock, flags);
INIT_LIST_HEAD(&queue->irqqueue);
@@ -353,8 +302,6 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
spin_unlock_irqrestore(&queue->irqlock, flags);
}
-done:
- mutex_unlock(&queue->mutex);
return ret;
}
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index 03919c724961..01ca9eab3481 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -41,7 +41,6 @@ struct uvc_buffer {
struct uvc_video_queue {
struct vb2_queue queue;
- struct mutex mutex; /* Protects queue */
unsigned int flags;
__u32 sequence;
@@ -57,7 +56,8 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
return vb2_is_streaming(&queue->queue);
}
-int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type);
+int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
+ struct mutex *lock);
void uvcg_free_buffers(struct uvc_video_queue *queue);
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 8b818fd027b3..f4ccbd56f4d2 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -14,7 +14,6 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
-#include <linux/mutex.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
@@ -77,7 +76,8 @@ uvc_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strlcpy(cap->bus_info, dev_name(&cdev->gadget->dev),
sizeof(cap->bus_info));
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -312,8 +312,10 @@ uvc_v4l2_release(struct file *file)
uvc_function_disconnect(uvc);
+ mutex_lock(&video->mutex);
uvcg_video_enable(video, 0);
uvcg_free_buffers(&video->queue);
+ mutex_unlock(&video->mutex);
file->private_data = NULL;
v4l2_fh_del(&handle->vfh);
@@ -357,7 +359,7 @@ struct v4l2_file_operations uvc_v4l2_fops = {
.owner = THIS_MODULE,
.open = uvc_v4l2_open,
.release = uvc_v4l2_release,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.mmap = uvc_v4l2_mmap,
.poll = uvc_v4l2_poll,
#ifndef CONFIG_MMU
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 50a5e637ca35..3d0d5d94a62f 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -391,7 +391,8 @@ int uvcg_video_init(struct uvc_video *video)
video->imagesize = 320 * 240 * 2;
/* Initialize the video buffers queue. */
- uvcg_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ uvcg_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ &video->mutex);
return 0;
}
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index c30b7b572465..1194b09ae746 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -121,7 +121,7 @@ static struct usb_function *f_msg;
/*
* We _always_ have both ACM and mass storage functions.
*/
-static int __init acm_ms_do_config(struct usb_configuration *c)
+static int acm_ms_do_config(struct usb_configuration *c)
{
struct fsg_opts *opts;
int status;
@@ -174,7 +174,7 @@ static struct usb_configuration acm_ms_config_driver = {
/*-------------------------------------------------------------------------*/
-static int __init acm_ms_bind(struct usb_composite_dev *cdev)
+static int acm_ms_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct fsg_opts *opts;
@@ -249,7 +249,7 @@ fail_get_msg:
return status;
}
-static int __exit acm_ms_unbind(struct usb_composite_dev *cdev)
+static int acm_ms_unbind(struct usb_composite_dev *cdev)
{
usb_put_function(f_msg);
usb_put_function_instance(fi_msg);
@@ -258,13 +258,13 @@ static int __exit acm_ms_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver acm_ms_driver = {
+static struct usb_composite_driver acm_ms_driver = {
.name = "g_acm_ms",
.dev = &device_desc,
.max_speed = USB_SPEED_SUPER,
.strings = dev_strings,
.bind = acm_ms_bind,
- .unbind = __exit_p(acm_ms_unbind),
+ .unbind = acm_ms_unbind,
};
module_usb_composite_driver(acm_ms_driver);
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index f46a3956e43d..f289caf18a45 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -167,7 +167,7 @@ static const struct usb_descriptor_header *otg_desc[] = {
/*-------------------------------------------------------------------------*/
-static int __init audio_do_config(struct usb_configuration *c)
+static int audio_do_config(struct usb_configuration *c)
{
int status;
@@ -216,7 +216,7 @@ static struct usb_configuration audio_config_driver = {
/*-------------------------------------------------------------------------*/
-static int __init audio_bind(struct usb_composite_dev *cdev)
+static int audio_bind(struct usb_composite_dev *cdev)
{
#ifndef CONFIG_GADGET_UAC1
struct f_uac2_opts *uac2_opts;
@@ -276,7 +276,7 @@ fail:
return status;
}
-static int __exit audio_unbind(struct usb_composite_dev *cdev)
+static int audio_unbind(struct usb_composite_dev *cdev)
{
#ifdef CONFIG_GADGET_UAC1
if (!IS_ERR_OR_NULL(f_uac1))
@@ -292,13 +292,13 @@ static int __exit audio_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver audio_driver = {
+static struct usb_composite_driver audio_driver = {
.name = "g_audio",
.dev = &device_desc,
.strings = audio_strings,
.max_speed = USB_SPEED_HIGH,
.bind = audio_bind,
- .unbind = __exit_p(audio_unbind),
+ .unbind = audio_unbind,
};
module_usb_composite_driver(audio_driver);
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index 2e85d9473478..afd3e37921a7 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -104,7 +104,7 @@ static struct usb_function_instance *fi_ecm;
/*
* We _always_ have both CDC ECM and CDC ACM functions.
*/
-static int __init cdc_do_config(struct usb_configuration *c)
+static int cdc_do_config(struct usb_configuration *c)
{
int status;
@@ -153,7 +153,7 @@ static struct usb_configuration cdc_config_driver = {
/*-------------------------------------------------------------------------*/
-static int __init cdc_bind(struct usb_composite_dev *cdev)
+static int cdc_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct f_ecm_opts *ecm_opts;
@@ -211,7 +211,7 @@ fail:
return status;
}
-static int __exit cdc_unbind(struct usb_composite_dev *cdev)
+static int cdc_unbind(struct usb_composite_dev *cdev)
{
usb_put_function(f_acm);
usb_put_function_instance(fi_serial);
@@ -222,13 +222,13 @@ static int __exit cdc_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver cdc_driver = {
+static struct usb_composite_driver cdc_driver = {
.name = "g_cdc",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.bind = cdc_bind,
- .unbind = __exit_p(cdc_unbind),
+ .unbind = cdc_unbind,
};
module_usb_composite_driver(cdc_driver);
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index 633683a72a11..204b10b1a7e7 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -284,7 +284,7 @@ fail_1:
return -ENODEV;
}
-static int __init dbgp_bind(struct usb_gadget *gadget,
+static int dbgp_bind(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
int err, stp;
@@ -406,7 +406,7 @@ fail:
return err;
}
-static __refdata struct usb_gadget_driver dbgp_driver = {
+static struct usb_gadget_driver dbgp_driver = {
.function = "dbgp",
.max_speed = USB_SPEED_HIGH,
.bind = dbgp_bind,
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index c5fdc61cdc4a..a3323dca218f 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -222,7 +222,7 @@ static struct usb_function *f_rndis;
* the first one present. That's to make Microsoft's drivers happy,
* and to follow DOCSIS 1.0 (cable modem standard).
*/
-static int __init rndis_do_config(struct usb_configuration *c)
+static int rndis_do_config(struct usb_configuration *c)
{
int status;
@@ -264,7 +264,7 @@ MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
/*
* We _always_ have an ECM, CDC Subset, or EEM configuration.
*/
-static int __init eth_do_config(struct usb_configuration *c)
+static int eth_do_config(struct usb_configuration *c)
{
int status = 0;
@@ -318,7 +318,7 @@ static struct usb_configuration eth_config_driver = {
/*-------------------------------------------------------------------------*/
-static int __init eth_bind(struct usb_composite_dev *cdev)
+static int eth_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct f_eem_opts *eem_opts = NULL;
@@ -447,7 +447,7 @@ fail:
return status;
}
-static int __exit eth_unbind(struct usb_composite_dev *cdev)
+static int eth_unbind(struct usb_composite_dev *cdev)
{
if (has_rndis()) {
usb_put_function(f_rndis);
@@ -466,13 +466,13 @@ static int __exit eth_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver eth_driver = {
+static struct usb_composite_driver eth_driver = {
.name = "g_ether",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_SUPER,
.bind = eth_bind,
- .unbind = __exit_p(eth_unbind),
+ .unbind = eth_unbind,
};
module_usb_composite_driver(eth_driver);
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index b01b88e1b716..7b9ef7e257d2 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -163,7 +163,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev);
static int gfs_do_config(struct usb_configuration *c);
-static __refdata struct usb_composite_driver gfs_driver = {
+static struct usb_composite_driver gfs_driver = {
.name = DRIVER_NAME,
.dev = &gfs_dev_desc,
.strings = gfs_dev_strings,
diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c
index e02a095294ac..da19c486b61e 100644
--- a/drivers/usb/gadget/legacy/gmidi.c
+++ b/drivers/usb/gadget/legacy/gmidi.c
@@ -118,7 +118,7 @@ static struct usb_gadget_strings *dev_strings[] = {
static struct usb_function_instance *fi_midi;
static struct usb_function *f_midi;
-static int __exit midi_unbind(struct usb_composite_dev *dev)
+static int midi_unbind(struct usb_composite_dev *dev)
{
usb_put_function(f_midi);
usb_put_function_instance(fi_midi);
@@ -133,7 +133,7 @@ static struct usb_configuration midi_config = {
.MaxPower = CONFIG_USB_GADGET_VBUS_DRAW,
};
-static int __init midi_bind_config(struct usb_configuration *c)
+static int midi_bind_config(struct usb_configuration *c)
{
int status;
@@ -150,7 +150,7 @@ static int __init midi_bind_config(struct usb_configuration *c)
return 0;
}
-static int __init midi_bind(struct usb_composite_dev *cdev)
+static int midi_bind(struct usb_composite_dev *cdev)
{
struct f_midi_opts *midi_opts;
int status;
@@ -185,13 +185,13 @@ put:
return status;
}
-static __refdata struct usb_composite_driver midi_driver = {
+static struct usb_composite_driver midi_driver = {
.name = (char *) longname,
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.bind = midi_bind,
- .unbind = __exit_p(midi_unbind),
+ .unbind = midi_unbind,
};
module_usb_composite_driver(midi_driver);
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index 614b06d80b41..2baa572686c6 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -106,7 +106,7 @@ static struct usb_gadget_strings *dev_strings[] = {
/****************************** Configurations ******************************/
-static int __init do_config(struct usb_configuration *c)
+static int do_config(struct usb_configuration *c)
{
struct hidg_func_node *e, *n;
int status = 0;
@@ -147,7 +147,7 @@ static struct usb_configuration config_driver = {
/****************************** Gadget Bind ******************************/
-static int __init hid_bind(struct usb_composite_dev *cdev)
+static int hid_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct list_head *tmp;
@@ -205,7 +205,7 @@ put:
return status;
}
-static int __exit hid_unbind(struct usb_composite_dev *cdev)
+static int hid_unbind(struct usb_composite_dev *cdev)
{
struct hidg_func_node *n;
@@ -216,7 +216,7 @@ static int __exit hid_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static int __init hidg_plat_driver_probe(struct platform_device *pdev)
+static int hidg_plat_driver_probe(struct platform_device *pdev)
{
struct hidg_func_descriptor *func = dev_get_platdata(&pdev->dev);
struct hidg_func_node *entry;
@@ -252,13 +252,13 @@ static int hidg_plat_driver_remove(struct platform_device *pdev)
/****************************** Some noise ******************************/
-static __refdata struct usb_composite_driver hidg_driver = {
+static struct usb_composite_driver hidg_driver = {
.name = "g_hid",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.bind = hid_bind,
- .unbind = __exit_p(hid_unbind),
+ .unbind = hid_unbind,
};
static struct platform_driver hidg_plat_driver = {
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 200f9a584064..2030565c6789 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -26,6 +26,7 @@
#include <linux/poll.h>
#include <linux/mmu_context.h>
#include <linux/aio.h>
+#include <linux/uio.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
@@ -469,7 +470,7 @@ static void ep_user_copy_worker(struct work_struct *work)
ret = -EFAULT;
/* completing the iocb can drop the ctx and mm, don't touch mm after */
- aio_complete(iocb, ret, ret);
+ iocb->ki_complete(iocb, ret, ret);
kfree(priv->buf);
kfree(priv->to_free);
@@ -497,7 +498,8 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
kfree(priv);
iocb->private = NULL;
/* aio_complete() reports bytes-transferred _and_ faults */
- aio_complete(iocb, req->actual ? req->actual : req->status,
+
+ iocb->ki_complete(iocb, req->actual ? req->actual : req->status,
req->status);
} else {
/* ep_copy_to_user() won't report both; we hide some faults */
@@ -697,8 +699,6 @@ static const struct file_operations ep_io_operations = {
.open = ep_open,
.release = ep_release,
.llseek = no_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.unlocked_ioctl = ep_ioctl,
.read_iter = ep_read_iter,
.write_iter = ep_write_iter,
@@ -1505,7 +1505,7 @@ static void destroy_ep_files (struct dev_data *dev)
list_del_init (&ep->epfiles);
dentry = ep->dentry;
ep->dentry = NULL;
- parent = dentry->d_parent->d_inode;
+ parent = d_inode(dentry->d_parent);
/* break link to controller */
if (ep->state == STATE_EP_ENABLED)
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index 8e27a8c96444..e7bfb081f111 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -130,7 +130,7 @@ static int msg_thread_exits(struct fsg_common *common)
return 0;
}
-static int __init msg_do_config(struct usb_configuration *c)
+static int msg_do_config(struct usb_configuration *c)
{
struct fsg_opts *opts;
int ret;
@@ -170,7 +170,7 @@ static struct usb_configuration msg_config_driver = {
/****************************** Gadget Bind ******************************/
-static int __init msg_bind(struct usb_composite_dev *cdev)
+static int msg_bind(struct usb_composite_dev *cdev)
{
static const struct fsg_operations ops = {
.thread_exits = msg_thread_exits,
@@ -248,7 +248,7 @@ static int msg_unbind(struct usb_composite_dev *cdev)
/****************************** Some noise ******************************/
-static __refdata struct usb_composite_driver msg_driver = {
+static struct usb_composite_driver msg_driver = {
.name = "g_mass_storage",
.dev = &msg_device_desc,
.max_speed = USB_SPEED_SUPER,
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index 39d27bb343b4..b21b51f0c9fa 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -149,7 +149,7 @@ static struct usb_function *f_acm_rndis;
static struct usb_function *f_rndis;
static struct usb_function *f_msg_rndis;
-static __init int rndis_do_config(struct usb_configuration *c)
+static int rndis_do_config(struct usb_configuration *c)
{
struct fsg_opts *fsg_opts;
int ret;
@@ -237,7 +237,7 @@ static struct usb_function *f_acm_multi;
static struct usb_function *f_ecm;
static struct usb_function *f_msg_multi;
-static __init int cdc_do_config(struct usb_configuration *c)
+static int cdc_do_config(struct usb_configuration *c)
{
struct fsg_opts *fsg_opts;
int ret;
@@ -466,7 +466,7 @@ fail:
return status;
}
-static int __exit multi_unbind(struct usb_composite_dev *cdev)
+static int multi_unbind(struct usb_composite_dev *cdev)
{
#ifdef CONFIG_USB_G_MULTI_CDC
usb_put_function(f_msg_multi);
@@ -497,13 +497,13 @@ static int __exit multi_unbind(struct usb_composite_dev *cdev)
/****************************** Some noise ******************************/
-static __refdata struct usb_composite_driver multi_driver = {
+static struct usb_composite_driver multi_driver = {
.name = "g_multi",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.bind = multi_bind,
- .unbind = __exit_p(multi_unbind),
+ .unbind = multi_unbind,
.needs_serial = 1,
};
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index e90e23db2acb..6ce7421412e9 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -107,7 +107,7 @@ static struct usb_function *f_ncm;
/*-------------------------------------------------------------------------*/
-static int __init ncm_do_config(struct usb_configuration *c)
+static int ncm_do_config(struct usb_configuration *c)
{
int status;
@@ -143,7 +143,7 @@ static struct usb_configuration ncm_config_driver = {
/*-------------------------------------------------------------------------*/
-static int __init gncm_bind(struct usb_composite_dev *cdev)
+static int gncm_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct f_ncm_opts *ncm_opts;
@@ -186,7 +186,7 @@ fail:
return status;
}
-static int __exit gncm_unbind(struct usb_composite_dev *cdev)
+static int gncm_unbind(struct usb_composite_dev *cdev)
{
if (!IS_ERR_OR_NULL(f_ncm))
usb_put_function(f_ncm);
@@ -195,13 +195,13 @@ static int __exit gncm_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver ncm_driver = {
+static struct usb_composite_driver ncm_driver = {
.name = "g_ncm",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.bind = gncm_bind,
- .unbind = __exit_p(gncm_unbind),
+ .unbind = gncm_unbind,
};
module_usb_composite_driver(ncm_driver);
diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
index 9b8fd701648c..4bb498a38a1c 100644
--- a/drivers/usb/gadget/legacy/nokia.c
+++ b/drivers/usb/gadget/legacy/nokia.c
@@ -118,7 +118,7 @@ static struct usb_function_instance *fi_obex1;
static struct usb_function_instance *fi_obex2;
static struct usb_function_instance *fi_phonet;
-static int __init nokia_bind_config(struct usb_configuration *c)
+static int nokia_bind_config(struct usb_configuration *c)
{
struct usb_function *f_acm;
struct usb_function *f_phonet = NULL;
@@ -224,7 +224,7 @@ err_get_acm:
return status;
}
-static int __init nokia_bind(struct usb_composite_dev *cdev)
+static int nokia_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
int status;
@@ -307,7 +307,7 @@ err_usb:
return status;
}
-static int __exit nokia_unbind(struct usb_composite_dev *cdev)
+static int nokia_unbind(struct usb_composite_dev *cdev)
{
if (!IS_ERR_OR_NULL(f_obex1_cfg2))
usb_put_function(f_obex1_cfg2);
@@ -338,13 +338,13 @@ static int __exit nokia_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver nokia_driver = {
+static struct usb_composite_driver nokia_driver = {
.name = "g_nokia",
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
.bind = nokia_bind,
- .unbind = __exit_p(nokia_unbind),
+ .unbind = nokia_unbind,
};
module_usb_composite_driver(nokia_driver);
diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
index d5b6ee725a2a..1ce7df1060a5 100644
--- a/drivers/usb/gadget/legacy/printer.c
+++ b/drivers/usb/gadget/legacy/printer.c
@@ -126,7 +126,7 @@ static struct usb_configuration printer_cfg_driver = {
.bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
};
-static int __init printer_do_config(struct usb_configuration *c)
+static int printer_do_config(struct usb_configuration *c)
{
struct usb_gadget *gadget = c->cdev->gadget;
int status = 0;
@@ -152,7 +152,7 @@ static int __init printer_do_config(struct usb_configuration *c)
return status;
}
-static int __init printer_bind(struct usb_composite_dev *cdev)
+static int printer_bind(struct usb_composite_dev *cdev)
{
struct f_printer_opts *opts;
int ret, len;
@@ -191,7 +191,7 @@ static int __init printer_bind(struct usb_composite_dev *cdev)
return ret;
}
-static int __exit printer_unbind(struct usb_composite_dev *cdev)
+static int printer_unbind(struct usb_composite_dev *cdev)
{
usb_put_function(f_printer);
usb_put_function_instance(fi_printer);
@@ -199,7 +199,7 @@ static int __exit printer_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver printer_driver = {
+static struct usb_composite_driver printer_driver = {
.name = shortname,
.dev = &device_desc,
.strings = dev_strings,
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index 1f5f978d35d5..8b7528f9b78e 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -174,7 +174,7 @@ out:
return ret;
}
-static int __init gs_bind(struct usb_composite_dev *cdev)
+static int gs_bind(struct usb_composite_dev *cdev)
{
int status;
@@ -230,7 +230,7 @@ static int gs_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver gserial_driver = {
+static struct usb_composite_driver gserial_driver = {
.name = "g_serial",
.dev = &device_desc,
.strings = dev_strings,
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 6e0a019aad54..f9b4882fce52 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -29,7 +29,7 @@
USB_GADGET_COMPOSITE_OPTIONS();
-static struct target_fabric_configfs *usbg_fabric_configfs;
+static const struct target_core_fabric_ops usbg_ops;
static inline struct f_uas *to_f_uas(struct usb_function *f)
{
@@ -1572,8 +1572,7 @@ static struct se_portal_group *usbg_make_tpg(
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&usbg_fabric_configfs->tf_ops, wwn,
- &tpg->se_tpg, tpg,
+ ret = core_tpg_register(&usbg_ops, wwn, &tpg->se_tpg, tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
destroy_workqueue(tpg->workqueue);
@@ -1864,7 +1863,9 @@ static int usbg_check_stop_free(struct se_cmd *se_cmd)
return 1;
}
-static struct target_core_fabric_ops usbg_ops = {
+static const struct target_core_fabric_ops usbg_ops = {
+ .module = THIS_MODULE,
+ .name = "usb_gadget",
.get_fabric_name = usbg_get_fabric_name,
.get_fabric_proto_ident = usbg_get_fabric_proto_ident,
.tpg_get_wwn = usbg_get_fabric_wwn,
@@ -1906,46 +1907,9 @@ static struct target_core_fabric_ops usbg_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = usbg_make_nodeacl,
.fabric_drop_nodeacl = usbg_drop_nodeacl,
-};
-
-static int usbg_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- fabric = target_fabric_configfs_init(THIS_MODULE, "usb_gadget");
- if (IS_ERR(fabric)) {
- printk(KERN_ERR "target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
-
- fabric->tf_ops = usbg_ops;
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- printk(KERN_ERR "target_fabric_configfs_register() failed"
- " for usb-gadget\n");
- return ret;
- }
- usbg_fabric_configfs = fabric;
- return 0;
-};
-static void usbg_deregister_configfs(void)
-{
- if (!(usbg_fabric_configfs))
- return;
-
- target_fabric_configfs_deregister(usbg_fabric_configfs);
- usbg_fabric_configfs = NULL;
+ .tfc_wwn_attrs = usbg_wwn_attrs,
+ .tfc_tpg_base_attrs = usbg_base_attrs,
};
/* Start gadget.c code */
@@ -2433,7 +2397,7 @@ static int usb_target_bind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver usbg_driver = {
+static struct usb_composite_driver usbg_driver = {
.name = "g_target",
.dev = &usbg_device_desc,
.strings = usbg_strings,
@@ -2454,16 +2418,13 @@ static void usbg_detach(struct usbg_tpg *tpg)
static int __init usb_target_gadget_init(void)
{
- int ret;
-
- ret = usbg_register_configfs();
- return ret;
+ return target_register_template(&usbg_ops);
}
module_init(usb_target_gadget_init);
static void __exit usb_target_gadget_exit(void)
{
- usbg_deregister_configfs();
+ target_unregister_template(&usbg_ops);
}
module_exit(usb_target_gadget_exit);
diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
index 04a3da20f742..72c976bf3530 100644
--- a/drivers/usb/gadget/legacy/webcam.c
+++ b/drivers/usb/gadget/legacy/webcam.c
@@ -334,7 +334,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
* USB configuration
*/
-static int __init
+static int
webcam_config_bind(struct usb_configuration *c)
{
int status = 0;
@@ -358,7 +358,7 @@ static struct usb_configuration webcam_config_driver = {
.MaxPower = CONFIG_USB_GADGET_VBUS_DRAW,
};
-static int /* __init_or_exit */
+static int
webcam_unbind(struct usb_composite_dev *cdev)
{
if (!IS_ERR_OR_NULL(f_uvc))
@@ -368,7 +368,7 @@ webcam_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static int __init
+static int
webcam_bind(struct usb_composite_dev *cdev)
{
struct f_uvc_opts *uvc_opts;
@@ -422,7 +422,7 @@ error:
* Driver
*/
-static __refdata struct usb_composite_driver webcam_driver = {
+static struct usb_composite_driver webcam_driver = {
.name = "g_webcam",
.dev = &webcam_device_descriptor,
.strings = webcam_device_strings,
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index 5ee95152493c..c986e8addb90 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -272,7 +272,7 @@ static struct usb_function_instance *func_inst_lb;
module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(qlen, "depth of loopback queue");
-static int __init zero_bind(struct usb_composite_dev *cdev)
+static int zero_bind(struct usb_composite_dev *cdev)
{
struct f_ss_opts *ss_opts;
struct f_lb_opts *lb_opts;
@@ -400,7 +400,7 @@ static int zero_unbind(struct usb_composite_dev *cdev)
return 0;
}
-static __refdata struct usb_composite_driver zero_driver = {
+static struct usb_composite_driver zero_driver = {
.name = "zero",
.dev = &device_desc,
.strings = dev_strings,
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 2fbedca3c2b4..fc4226462f8f 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1942,7 +1942,7 @@ err_unprepare_fclk:
return retval;
}
-static int __exit at91udc_remove(struct platform_device *pdev)
+static int at91udc_remove(struct platform_device *pdev)
{
struct at91_udc *udc = platform_get_drvdata(pdev);
unsigned long flags;
@@ -2018,7 +2018,7 @@ static int at91udc_resume(struct platform_device *pdev)
#endif
static struct platform_driver at91_udc_driver = {
- .remove = __exit_p(at91udc_remove),
+ .remove = at91udc_remove,
.shutdown = at91udc_shutdown,
.suspend = at91udc_suspend,
.resume = at91udc_resume,
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 4c01953a0869..351d48550c33 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2186,7 +2186,7 @@ static int usba_udc_probe(struct platform_device *pdev)
return 0;
}
-static int __exit usba_udc_remove(struct platform_device *pdev)
+static int usba_udc_remove(struct platform_device *pdev)
{
struct usba_udc *udc;
int i;
@@ -2258,7 +2258,7 @@ static int usba_udc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume);
static struct platform_driver udc_driver = {
- .remove = __exit_p(usba_udc_remove),
+ .remove = usba_udc_remove,
.driver = {
.name = "atmel_usba_udc",
.pm = &usba_udc_pm_ops,
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 55fcb930f92e..c60022b46a48 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2525,7 +2525,7 @@ err_kfree:
/* Driver removal function
* Free resources and finish pending transactions
*/
-static int __exit fsl_udc_remove(struct platform_device *pdev)
+static int fsl_udc_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -2663,7 +2663,7 @@ static const struct platform_device_id fsl_udc_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
static struct platform_driver udc_driver = {
- .remove = __exit_p(fsl_udc_remove),
+ .remove = fsl_udc_remove,
/* Just for FSL i.mx SoC currently */
.id_table = fsl_udc_devtype,
/* these suspend and resume are not usb suspend and resume */
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index fb4df159d32d..3970f453de49 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1342,7 +1342,7 @@ static const struct usb_gadget_ops fusb300_gadget_ops = {
.udc_stop = fusb300_udc_stop,
};
-static int __exit fusb300_remove(struct platform_device *pdev)
+static int fusb300_remove(struct platform_device *pdev)
{
struct fusb300 *fusb300 = platform_get_drvdata(pdev);
@@ -1492,7 +1492,7 @@ clean_up:
}
static struct platform_driver fusb300_driver = {
- .remove = __exit_p(fusb300_remove),
+ .remove = fusb300_remove,
.driver = {
.name = (char *) udc_name,
},
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 8c7c83c93713..309706fe4bf0 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1528,7 +1528,7 @@ static const struct usb_gadget_ops m66592_gadget_ops = {
.pullup = m66592_pullup,
};
-static int __exit m66592_remove(struct platform_device *pdev)
+static int m66592_remove(struct platform_device *pdev)
{
struct m66592 *m66592 = platform_get_drvdata(pdev);
@@ -1695,7 +1695,7 @@ clean_up:
/*-------------------------------------------------------------------------*/
static struct platform_driver m66592_driver = {
- .remove = __exit_p(m66592_remove),
+ .remove = m66592_remove,
.driver = {
.name = (char *) udc_name,
},
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 2495fe9c95c5..0293f7169dee 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1820,7 +1820,7 @@ static const struct usb_gadget_ops r8a66597_gadget_ops = {
.set_selfpowered = r8a66597_set_selfpowered,
};
-static int __exit r8a66597_remove(struct platform_device *pdev)
+static int r8a66597_remove(struct platform_device *pdev)
{
struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
@@ -1974,7 +1974,7 @@ clean_up2:
/*-------------------------------------------------------------------------*/
static struct platform_driver r8a66597_driver = {
- .remove = __exit_p(r8a66597_remove),
+ .remove = r8a66597_remove,
.driver = {
.name = (char *) udc_name,
},
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index dd3e9fd31b80..1f24274477ab 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2071,8 +2071,8 @@ static int xudc_probe(struct platform_device *pdev)
/* Map the registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
udc->addr = devm_ioremap_resource(&pdev->dev, res);
- if (!udc->addr)
- return -ENOMEM;
+ if (IS_ERR(udc->addr))
+ return PTR_ERR(udc->addr);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 9db74ca7e5b9..275c92e53a59 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -88,13 +88,20 @@ static int ehci_msm_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- ret = PTR_ERR(hcd->regs);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get memory resource\n");
+ ret = -ENODEV;
goto put_hcd;
}
+
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto put_hcd;
+ }
/*
* OTG driver takes care of PHY initialization, clock management,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f5397a517c54..7d34cbfaf373 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2026,8 +2026,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
break;
case COMP_DEV_ERR:
case COMP_STALL:
+ frame->status = -EPROTO;
+ skip_td = true;
+ break;
case COMP_TX_ERR:
frame->status = -EPROTO;
+ if (event_trb != td->last_trb)
+ return 0;
skip_td = true;
break;
case COMP_STOP:
@@ -2640,7 +2645,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
xhci_halt(xhci);
hw_died:
spin_unlock(&xhci->lock);
- return -ESHUTDOWN;
+ return IRQ_HANDLED;
}
/*
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index dde3959b7a33..59c05653b2ea 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -14,6 +14,13 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM xhci-hcd
+/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR xhci_hcd
+
#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define __XHCI_TRACE_H
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8e421b89632d..ea75e8ccd3c1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1267,7 +1267,7 @@ union xhci_trb {
* since the command ring is 64-byte aligned.
* It must also be greater than 16.
*/
-#define TRBS_PER_SEGMENT 64
+#define TRBS_PER_SEGMENT 256
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 82503a7ff6c8..cce22ff1c2eb 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -69,12 +69,6 @@
#define USB_DEVICE_ID_LD_HYBRID 0x2090 /* USB Product ID of Automotive Hybrid */
#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0 /* USB Product ID of Heat control */
-#define USB_VENDOR_ID_VERNIER 0x08f7
-#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002
-#define USB_DEVICE_ID_VERNIER_SKIP 0x0003
-#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
-#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
-
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define USB_LD_MINOR_BASE 0
#else
@@ -115,10 +109,6 @@ static const struct usb_device_id ld_usb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
- { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
- { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
- { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
- { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ld_usb_table);
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 1e0e10dd6ba5..3af263cc0caa 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -94,7 +94,7 @@ struct isp1301 {
#if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3)
-#if defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE)
+#if defined(CONFIG_TPS65010) || (defined(CONFIG_TPS65010_MODULE) && defined(MODULE))
#include <linux/i2c/tps65010.h>
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 84ce2d74894c..9031750e7404 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -127,6 +127,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
+ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 829604d11f3f..f5257af33ecf 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -61,7 +61,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
{ USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1),
.driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65),
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 71fd9da1d6e7..e3b7af8adfb7 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -62,10 +62,6 @@
#define ALCATEL_VENDOR_ID 0x11f7
#define ALCATEL_PRODUCT_ID 0x02df
-/* Samsung I330 phone cradle */
-#define SAMSUNG_VENDOR_ID 0x04e8
-#define SAMSUNG_PRODUCT_ID 0x8001
-
#define SIEMENS_VENDOR_ID 0x11f5
#define SIEMENS_PRODUCT_ID_SX1 0x0001
#define SIEMENS_PRODUCT_ID_X65 0x0003
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index bf2bd40e5f2a..60afb39eb73c 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -95,7 +95,7 @@ static const struct usb_device_id id_table[] = {
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID),
+ { USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 9893d696fc97..f58caa9e6a27 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt,
}
static int uas_use_uas_driver(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id,
+ unsigned long *flags_ret)
{
struct usb_host_endpoint *eps[4] = { };
struct usb_device *udev = interface_to_usbdev(intf);
@@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf,
* this writing the following versions exist:
* ASM1051 - no uas support version
* ASM1051 - with broken (*) uas support
- * ASM1053 - with working uas support
+ * ASM1053 - with working uas support, but problems with large xfers
* ASM1153 - with working uas support
*
* Devices with these chips re-use a number of device-ids over the
@@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf,
} else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
/* Possibly an ASM1051, disable uas */
flags |= US_FL_IGNORE_UAS;
+ } else {
+ /* ASM1053, these have issues with large transfers */
+ flags |= US_FL_MAX_SECTORS_240;
}
}
@@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf,
return 0;
}
+ if (flags_ret)
+ *flags_ret = flags;
+
return 1;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 6cdabdc119a7..6d3122afeed3 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -759,7 +759,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
static int uas_slave_alloc(struct scsi_device *sdev)
{
- sdev->hostdata = (void *)sdev->host->hostdata;
+ struct uas_dev_info *devinfo =
+ (struct uas_dev_info *)sdev->host->hostdata;
+
+ sdev->hostdata = devinfo;
/* USB has unusual DMA-alignment requirements: Although the
* starting address of each scatter-gather element doesn't matter,
@@ -778,6 +781,11 @@ static int uas_slave_alloc(struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+ if (devinfo->flags & US_FL_MAX_SECTORS_64)
+ blk_queue_max_hw_sectors(sdev->request_queue, 64);
+ else if (devinfo->flags & US_FL_MAX_SECTORS_240)
+ blk_queue_max_hw_sectors(sdev->request_queue, 240);
+
return 0;
}
@@ -887,8 +895,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
struct Scsi_Host *shost = NULL;
struct uas_dev_info *devinfo;
struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long dev_flags;
- if (!uas_use_uas_driver(intf, id))
+ if (!uas_use_uas_driver(intf, id, &dev_flags))
return -ENODEV;
if (uas_switch_interface(udev, intf))
@@ -910,8 +919,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
devinfo->udev = udev;
devinfo->resetting = 0;
devinfo->shutdown = 0;
- devinfo->flags = id->driver_info;
- usb_stor_adjust_quirks(udev, &devinfo->flags);
+ devinfo->flags = dev_flags;
init_usb_anchor(&devinfo->cmd_urbs);
init_usb_anchor(&devinfo->sense_urbs);
init_usb_anchor(&devinfo->data_urbs);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index d684b4b8108f..caf188800c67 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -766,6 +766,13 @@ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_GO_SLOW ),
+/* Reported by Christian Schaller <cschalle@redhat.com> */
+UNUSUAL_DEV( 0x059f, 0x0651, 0x0000, 0x0000,
+ "LaCie",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_WP_DETECT ),
+
/* Submitted by Joel Bourquard <numlock@freesurf.ch>
* Some versions of this device need the SubClass and Protocol overrides
* while others don't.
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 5600c33fcadb..6c10c888f35f 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
- US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES);
+ US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
+ US_FL_MAX_SECTORS_240);
p = quirks;
while (*p) {
@@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'f':
f |= US_FL_NO_REPORT_OPCODES;
break;
+ case 'g':
+ f |= US_FL_MAX_SECTORS_240;
+ break;
case 'h':
f |= US_FL_CAPACITY_HEURISTICS;
break;
@@ -1080,7 +1084,7 @@ static int storage_probe(struct usb_interface *intf,
/* If uas is enabled and this device can do uas then ignore it. */
#if IS_ENABLED(CONFIG_USB_UAS)
- if (uas_use_uas_driver(intf, id))
+ if (uas_use_uas_driver(intf, id, NULL))
return -ENXIO;
#endif
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index 14e27ab32456..7d092ddc8119 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -13,6 +13,11 @@ config VFIO_SPAPR_EEH
depends on EEH && VFIO_IOMMU_SPAPR_TCE
default n
+config VFIO_VIRQFD
+ tristate
+ depends on VFIO && EVENTFD
+ default n
+
menuconfig VFIO
tristate "VFIO Non-Privileged userspace driver framework"
depends on IOMMU_API
@@ -27,3 +32,4 @@ menuconfig VFIO
If you don't know what to do here, say N.
source "drivers/vfio/pci/Kconfig"
+source "drivers/vfio/platform/Kconfig"
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 0b035b12600a..7b8a31f63fea 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,5 +1,9 @@
+vfio_virqfd-y := virqfd.o
+
obj-$(CONFIG_VFIO) += vfio.o
+obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o
obj-$(CONFIG_VFIO_PCI) += pci/
+obj-$(CONFIG_VFIO_PLATFORM) += platform/
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index c6bb5da2d2a7..579d83bf5358 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -1,6 +1,7 @@
config VFIO_PCI
tristate "VFIO support for PCI devices"
depends on VFIO && PCI && EVENTFD
+ select VFIO_VIRQFD
help
Support for the PCI VFIO bus driver. This is required to make
use of PCI drivers using the VFIO framework.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index f8a186381ae8..e9851add6f4e 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -11,6 +11,8 @@
* Author: Tom Lyon, pugs@cisco.com
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/eventfd.h>
#include <linux/file.h>
@@ -25,6 +27,7 @@
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
+#include <linux/vgaarb.h>
#include "vfio_pci_private.h"
@@ -32,13 +35,81 @@
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
+static char ids[1024] __initdata;
+module_param_string(ids, ids, sizeof(ids), 0);
+MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
+
static bool nointxmask;
module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nointxmask,
"Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
+#ifdef CONFIG_VFIO_PCI_VGA
+static bool disable_vga;
+module_param(disable_vga, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
+#endif
+
+static bool disable_idle_d3;
+module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_idle_d3,
+ "Disable using the PCI D3 low power state for idle, unused devices");
+
static DEFINE_MUTEX(driver_lock);
+static inline bool vfio_vga_disabled(void)
+{
+#ifdef CONFIG_VFIO_PCI_VGA
+ return disable_vga;
+#else
+ return true;
+#endif
+}
+
+/*
+ * Our VGA arbiter participation is limited since we don't know anything
+ * about the device itself. However, if the device is the only VGA device
+ * downstream of a bridge and VFIO VGA support is disabled, then we can
+ * safely return legacy VGA IO and memory as not decoded since the user
+ * has no way to get to it and routing can be disabled externally at the
+ * bridge.
+ */
+static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
+{
+ struct vfio_pci_device *vdev = opaque;
+ struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
+ unsigned char max_busnr;
+ unsigned int decodes;
+
+ if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+
+ max_busnr = pci_bus_max_busnr(pdev->bus);
+ decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+
+ while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
+ if (tmp == pdev ||
+ pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
+ pci_is_root_bus(tmp->bus))
+ continue;
+
+ if (tmp->bus->number >= pdev->bus->number &&
+ tmp->bus->number <= max_busnr) {
+ pci_dev_put(tmp);
+ decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ break;
+ }
+ }
+
+ return decodes;
+}
+
+static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
+{
+ return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+}
+
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
static int vfio_pci_enable(struct vfio_pci_device *vdev)
@@ -48,6 +119,8 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
u16 cmd;
u8 msix_pos;
+ pci_set_power_state(pdev, PCI_D0);
+
/* Don't allow our initial saved state to include busmaster */
pci_clear_master(pdev);
@@ -93,10 +166,8 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
} else
vdev->msix_bar = 0xFF;
-#ifdef CONFIG_VFIO_PCI_VGA
- if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
vdev->has_vga = true;
-#endif
return 0;
}
@@ -153,20 +224,17 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
* Try to reset the device. The success of this is dependent on
* being able to lock the device, which is not always possible.
*/
- if (vdev->reset_works) {
- int ret = pci_try_reset_function(pdev);
- if (ret)
- pr_warn("%s: Failed to reset device %s (%d)\n",
- __func__, dev_name(&pdev->dev), ret);
- else
- vdev->needs_reset = false;
- }
+ if (vdev->reset_works && !pci_try_reset_function(pdev))
+ vdev->needs_reset = false;
pci_restore_state(pdev);
out:
pci_disable_device(pdev);
vfio_pci_try_bus_reset(vdev);
+
+ if (!disable_idle_d3)
+ pci_set_power_state(pdev, PCI_D3hot);
}
static void vfio_pci_release(void *device_data)
@@ -839,8 +907,14 @@ static void vfio_pci_request(void *device_data, unsigned int count)
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
- dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
+ if (!(count % 10))
+ dev_notice_ratelimited(&vdev->pdev->dev,
+ "Relaying device request to user (#%u)\n",
+ count);
eventfd_signal(vdev->req_trigger, 1);
+ } else if (count == 0) {
+ dev_warn(&vdev->pdev->dev,
+ "No device request channel registered, blocked until released by user\n");
}
mutex_unlock(&vdev->igate);
@@ -885,6 +959,27 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) {
iommu_group_put(group);
kfree(vdev);
+ return ret;
+ }
+
+ if (vfio_pci_is_vga(pdev)) {
+ vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
+ vga_set_legacy_decoding(pdev,
+ vfio_pci_set_vga_decode(vdev, false));
+ }
+
+ if (!disable_idle_d3) {
+ /*
+ * pci-core sets the device power state to an unknown value at
+ * bootup and after being removed from a driver. The only
+ * transition it allows from this unknown state is to D0, which
+ * typically happens when a driver calls pci_enable_device().
+ * We're not ready to enable the device yet, but we do want to
+ * be able to get to D3. Therefore first do a D0 transition
+ * before going to D3.
+ */
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_power_state(pdev, PCI_D3hot);
}
return ret;
@@ -895,10 +990,21 @@ static void vfio_pci_remove(struct pci_dev *pdev)
struct vfio_pci_device *vdev;
vdev = vfio_del_group_dev(&pdev->dev);
- if (vdev) {
- iommu_group_put(pdev->dev.iommu_group);
- kfree(vdev);
+ if (!vdev)
+ return;
+
+ iommu_group_put(pdev->dev.iommu_group);
+ kfree(vdev);
+
+ if (vfio_pci_is_vga(pdev)) {
+ vga_client_register(pdev, NULL, NULL, NULL);
+ vga_set_legacy_decoding(pdev,
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
}
+
+ if (!disable_idle_d3)
+ pci_set_power_state(pdev, PCI_D0);
}
static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
@@ -1017,10 +1123,13 @@ static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
put_devs:
for (i = 0; i < devs.cur_index; i++) {
- if (!ret) {
- tmp = vfio_device_data(devs.devices[i]);
+ tmp = vfio_device_data(devs.devices[i]);
+ if (!ret)
tmp->needs_reset = false;
- }
+
+ if (!tmp->refcnt && !disable_idle_d3)
+ pci_set_power_state(tmp->pdev, PCI_D3hot);
+
vfio_device_put(devs.devices[i]);
}
@@ -1030,10 +1139,50 @@ put_devs:
static void __exit vfio_pci_cleanup(void)
{
pci_unregister_driver(&vfio_pci_driver);
- vfio_pci_virqfd_exit();
vfio_pci_uninit_perm_bits();
}
+static void __init vfio_pci_fill_ids(void)
+{
+ char *p, *id;
+ int rc;
+
+ /* no ids passed actually */
+ if (ids[0] == '\0')
+ return;
+
+ /* add ids specified in the module parameter */
+ p = ids;
+ while ((id = strsep(&p, ","))) {
+ unsigned int vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
+ int fields;
+
+ if (!strlen(id))
+ continue;
+
+ fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
+ &vendor, &device, &subvendor, &subdevice,
+ &class, &class_mask);
+
+ if (fields < 2) {
+ pr_warn("invalid id string \"%s\"\n", id);
+ continue;
+ }
+
+ rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
+ subvendor, subdevice, class, class_mask, 0);
+ if (rc)
+ pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+ vendor, device, subvendor, subdevice,
+ class, class_mask, rc);
+ else
+ pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+ vendor, device, subvendor, subdevice,
+ class, class_mask);
+ }
+}
+
static int __init vfio_pci_init(void)
{
int ret;
@@ -1043,21 +1192,16 @@ static int __init vfio_pci_init(void)
if (ret)
return ret;
- /* Start the virqfd cleanup handler */
- ret = vfio_pci_virqfd_init();
- if (ret)
- goto out_virqfd;
-
/* Register and scan for devices */
ret = pci_register_driver(&vfio_pci_driver);
if (ret)
goto out_driver;
+ vfio_pci_fill_ids();
+
return 0;
out_driver:
- vfio_pci_virqfd_exit();
-out_virqfd:
vfio_pci_uninit_perm_bits();
return ret;
}
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 2027a27546ef..1f577b4ac126 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -19,230 +19,19 @@
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/file.h>
-#include <linux/poll.h>
#include <linux/vfio.h>
#include <linux/wait.h>
-#include <linux/workqueue.h>
#include <linux/slab.h>
#include "vfio_pci_private.h"
/*
- * IRQfd - generic
- */
-struct virqfd {
- struct vfio_pci_device *vdev;
- struct eventfd_ctx *eventfd;
- int (*handler)(struct vfio_pci_device *, void *);
- void (*thread)(struct vfio_pci_device *, void *);
- void *data;
- struct work_struct inject;
- wait_queue_t wait;
- poll_table pt;
- struct work_struct shutdown;
- struct virqfd **pvirqfd;
-};
-
-static struct workqueue_struct *vfio_irqfd_cleanup_wq;
-
-int __init vfio_pci_virqfd_init(void)
-{
- vfio_irqfd_cleanup_wq =
- create_singlethread_workqueue("vfio-irqfd-cleanup");
- if (!vfio_irqfd_cleanup_wq)
- return -ENOMEM;
-
- return 0;
-}
-
-void vfio_pci_virqfd_exit(void)
-{
- destroy_workqueue(vfio_irqfd_cleanup_wq);
-}
-
-static void virqfd_deactivate(struct virqfd *virqfd)
-{
- queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
-}
-
-static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
-{
- struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
- unsigned long flags = (unsigned long)key;
-
- if (flags & POLLIN) {
- /* An event has been signaled, call function */
- if ((!virqfd->handler ||
- virqfd->handler(virqfd->vdev, virqfd->data)) &&
- virqfd->thread)
- schedule_work(&virqfd->inject);
- }
-
- if (flags & POLLHUP) {
- unsigned long flags;
- spin_lock_irqsave(&virqfd->vdev->irqlock, flags);
-
- /*
- * The eventfd is closing, if the virqfd has not yet been
- * queued for release, as determined by testing whether the
- * vdev pointer to it is still valid, queue it now. As
- * with kvm irqfds, we know we won't race against the virqfd
- * going away because we hold wqh->lock to get here.
- */
- if (*(virqfd->pvirqfd) == virqfd) {
- *(virqfd->pvirqfd) = NULL;
- virqfd_deactivate(virqfd);
- }
-
- spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags);
- }
-
- return 0;
-}
-
-static void virqfd_ptable_queue_proc(struct file *file,
- wait_queue_head_t *wqh, poll_table *pt)
-{
- struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
- add_wait_queue(wqh, &virqfd->wait);
-}
-
-static void virqfd_shutdown(struct work_struct *work)
-{
- struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
- u64 cnt;
-
- eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
- flush_work(&virqfd->inject);
- eventfd_ctx_put(virqfd->eventfd);
-
- kfree(virqfd);
-}
-
-static void virqfd_inject(struct work_struct *work)
-{
- struct virqfd *virqfd = container_of(work, struct virqfd, inject);
- if (virqfd->thread)
- virqfd->thread(virqfd->vdev, virqfd->data);
-}
-
-static int virqfd_enable(struct vfio_pci_device *vdev,
- int (*handler)(struct vfio_pci_device *, void *),
- void (*thread)(struct vfio_pci_device *, void *),
- void *data, struct virqfd **pvirqfd, int fd)
-{
- struct fd irqfd;
- struct eventfd_ctx *ctx;
- struct virqfd *virqfd;
- int ret = 0;
- unsigned int events;
-
- virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
- if (!virqfd)
- return -ENOMEM;
-
- virqfd->pvirqfd = pvirqfd;
- virqfd->vdev = vdev;
- virqfd->handler = handler;
- virqfd->thread = thread;
- virqfd->data = data;
-
- INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
- INIT_WORK(&virqfd->inject, virqfd_inject);
-
- irqfd = fdget(fd);
- if (!irqfd.file) {
- ret = -EBADF;
- goto err_fd;
- }
-
- ctx = eventfd_ctx_fileget(irqfd.file);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto err_ctx;
- }
-
- virqfd->eventfd = ctx;
-
- /*
- * virqfds can be released by closing the eventfd or directly
- * through ioctl. These are both done through a workqueue, so
- * we update the pointer to the virqfd under lock to avoid
- * pushing multiple jobs to release the same virqfd.
- */
- spin_lock_irq(&vdev->irqlock);
-
- if (*pvirqfd) {
- spin_unlock_irq(&vdev->irqlock);
- ret = -EBUSY;
- goto err_busy;
- }
- *pvirqfd = virqfd;
-
- spin_unlock_irq(&vdev->irqlock);
-
- /*
- * Install our own custom wake-up handling so we are notified via
- * a callback whenever someone signals the underlying eventfd.
- */
- init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
- init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
-
- events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt);
-
- /*
- * Check if there was an event already pending on the eventfd
- * before we registered and trigger it as if we didn't miss it.
- */
- if (events & POLLIN) {
- if ((!handler || handler(vdev, data)) && thread)
- schedule_work(&virqfd->inject);
- }
-
- /*
- * Do not drop the file until the irqfd is fully initialized,
- * otherwise we might race against the POLLHUP.
- */
- fdput(irqfd);
-
- return 0;
-err_busy:
- eventfd_ctx_put(ctx);
-err_ctx:
- fdput(irqfd);
-err_fd:
- kfree(virqfd);
-
- return ret;
-}
-
-static void virqfd_disable(struct vfio_pci_device *vdev,
- struct virqfd **pvirqfd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&vdev->irqlock, flags);
-
- if (*pvirqfd) {
- virqfd_deactivate(*pvirqfd);
- *pvirqfd = NULL;
- }
-
- spin_unlock_irqrestore(&vdev->irqlock, flags);
-
- /*
- * Block until we know all outstanding shutdown jobs have completed.
- * Even if we don't queue the job, flush the wq to be sure it's
- * been released.
- */
- flush_workqueue(vfio_irqfd_cleanup_wq);
-}
-
-/*
* INTx
*/
-static void vfio_send_intx_eventfd(struct vfio_pci_device *vdev, void *unused)
+static void vfio_send_intx_eventfd(void *opaque, void *unused)
{
+ struct vfio_pci_device *vdev = opaque;
+
if (likely(is_intx(vdev) && !vdev->virq_disabled))
eventfd_signal(vdev->ctx[0].trigger, 1);
}
@@ -285,9 +74,9 @@ void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
* a signal is necessary, which can then be handled via a work queue
* or directly depending on the caller.
*/
-static int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev,
- void *unused)
+static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
{
+ struct vfio_pci_device *vdev = opaque;
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
int ret = 0;
@@ -440,8 +229,8 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
static void vfio_intx_disable(struct vfio_pci_device *vdev)
{
vfio_intx_set_signal(vdev, -1);
- virqfd_disable(vdev, &vdev->ctx[0].unmask);
- virqfd_disable(vdev, &vdev->ctx[0].mask);
+ vfio_virqfd_disable(&vdev->ctx[0].unmask);
+ vfio_virqfd_disable(&vdev->ctx[0].mask);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0;
kfree(vdev->ctx);
@@ -605,8 +394,8 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
for (i = 0; i < vdev->num_ctx; i++) {
- virqfd_disable(vdev, &vdev->ctx[i].unmask);
- virqfd_disable(vdev, &vdev->ctx[i].mask);
+ vfio_virqfd_disable(&vdev->ctx[i].unmask);
+ vfio_virqfd_disable(&vdev->ctx[i].mask);
}
if (msix) {
@@ -639,11 +428,12 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
if (fd >= 0)
- return virqfd_enable(vdev, vfio_pci_intx_unmask_handler,
- vfio_send_intx_eventfd, NULL,
- &vdev->ctx[0].unmask, fd);
+ return vfio_virqfd_enable((void *) vdev,
+ vfio_pci_intx_unmask_handler,
+ vfio_send_intx_eventfd, NULL,
+ &vdev->ctx[0].unmask, fd);
- virqfd_disable(vdev, &vdev->ctx[0].unmask);
+ vfio_virqfd_disable(&vdev->ctx[0].unmask);
}
return 0;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index c9f9b323f152..ae0e1b4c1711 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -87,9 +87,6 @@ extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
extern int vfio_pci_init_perm_bits(void);
extern void vfio_pci_uninit_perm_bits(void);
-extern int vfio_pci_virqfd_init(void);
-extern void vfio_pci_virqfd_exit(void);
-
extern int vfio_config_init(struct vfio_pci_device *vdev);
extern void vfio_config_free(struct vfio_pci_device *vdev);
#endif /* VFIO_PCI_PRIVATE_H */
diff --git a/drivers/vfio/platform/Kconfig b/drivers/vfio/platform/Kconfig
new file mode 100644
index 000000000000..9a4403e2a36c
--- /dev/null
+++ b/drivers/vfio/platform/Kconfig
@@ -0,0 +1,20 @@
+config VFIO_PLATFORM
+ tristate "VFIO support for platform devices"
+ depends on VFIO && EVENTFD && ARM
+ select VFIO_VIRQFD
+ help
+ Support for platform devices with VFIO. This is required to make
+ use of platform devices present on the system using the VFIO
+ framework.
+
+ If you don't know what to do here, say N.
+
+config VFIO_AMBA
+ tristate "VFIO support for AMBA devices"
+ depends on VFIO_PLATFORM && ARM_AMBA
+ help
+ Support for ARM AMBA devices with VFIO. This is required to make
+ use of ARM AMBA devices present on the system using the VFIO
+ framework.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/platform/Makefile b/drivers/vfio/platform/Makefile
new file mode 100644
index 000000000000..81de144c0eaa
--- /dev/null
+++ b/drivers/vfio/platform/Makefile
@@ -0,0 +1,8 @@
+
+vfio-platform-y := vfio_platform.o vfio_platform_common.o vfio_platform_irq.o
+
+obj-$(CONFIG_VFIO_PLATFORM) += vfio-platform.o
+
+vfio-amba-y := vfio_amba.o
+
+obj-$(CONFIG_VFIO_AMBA) += vfio-amba.o
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
new file mode 100644
index 000000000000..ff0331f72526
--- /dev/null
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2013 - Virtual Open Systems
+ * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vfio.h>
+#include <linux/amba/bus.h>
+
+#include "vfio_platform_private.h"
+
+#define DRIVER_VERSION "0.10"
+#define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
+#define DRIVER_DESC "VFIO for AMBA devices - User Level meta-driver"
+
+/* probing devices from the AMBA bus */
+
+static struct resource *get_amba_resource(struct vfio_platform_device *vdev,
+ int i)
+{
+ struct amba_device *adev = (struct amba_device *) vdev->opaque;
+
+ if (i == 0)
+ return &adev->res;
+
+ return NULL;
+}
+
+static int get_amba_irq(struct vfio_platform_device *vdev, int i)
+{
+ struct amba_device *adev = (struct amba_device *) vdev->opaque;
+ int ret = 0;
+
+ if (i < AMBA_NR_IRQS)
+ ret = adev->irq[i];
+
+ /* zero is an unset IRQ for AMBA devices */
+ return ret ? ret : -ENXIO;
+}
+
+static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct vfio_platform_device *vdev;
+ int ret;
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev)
+ return -ENOMEM;
+
+ vdev->name = kasprintf(GFP_KERNEL, "vfio-amba-%08x", adev->periphid);
+ if (!vdev->name) {
+ kfree(vdev);
+ return -ENOMEM;
+ }
+
+ vdev->opaque = (void *) adev;
+ vdev->flags = VFIO_DEVICE_FLAGS_AMBA;
+ vdev->get_resource = get_amba_resource;
+ vdev->get_irq = get_amba_irq;
+
+ ret = vfio_platform_probe_common(vdev, &adev->dev);
+ if (ret) {
+ kfree(vdev->name);
+ kfree(vdev);
+ }
+
+ return ret;
+}
+
+static int vfio_amba_remove(struct amba_device *adev)
+{
+ struct vfio_platform_device *vdev;
+
+ vdev = vfio_platform_remove_common(&adev->dev);
+ if (vdev) {
+ kfree(vdev->name);
+ kfree(vdev);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct amba_id pl330_ids[] = {
+ { 0, 0 },
+};
+
+MODULE_DEVICE_TABLE(amba, pl330_ids);
+
+static struct amba_driver vfio_amba_driver = {
+ .probe = vfio_amba_probe,
+ .remove = vfio_amba_remove,
+ .id_table = pl330_ids,
+ .drv = {
+ .name = "vfio-amba",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_amba_driver(vfio_amba_driver);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
new file mode 100644
index 000000000000..cef645c83996
--- /dev/null
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2013 - Virtual Open Systems
+ * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vfio.h>
+#include <linux/platform_device.h>
+
+#include "vfio_platform_private.h"
+
+#define DRIVER_VERSION "0.10"
+#define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
+#define DRIVER_DESC "VFIO for platform devices - User Level meta-driver"
+
+/* probing devices from the linux platform bus */
+
+static struct resource *get_platform_resource(struct vfio_platform_device *vdev,
+ int num)
+{
+ struct platform_device *dev = (struct platform_device *) vdev->opaque;
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) {
+ if (!num)
+ return r;
+
+ num--;
+ }
+ }
+ return NULL;
+}
+
+static int get_platform_irq(struct vfio_platform_device *vdev, int i)
+{
+ struct platform_device *pdev = (struct platform_device *) vdev->opaque;
+
+ return platform_get_irq(pdev, i);
+}
+
+static int vfio_platform_probe(struct platform_device *pdev)
+{
+ struct vfio_platform_device *vdev;
+ int ret;
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev)
+ return -ENOMEM;
+
+ vdev->opaque = (void *) pdev;
+ vdev->name = pdev->name;
+ vdev->flags = VFIO_DEVICE_FLAGS_PLATFORM;
+ vdev->get_resource = get_platform_resource;
+ vdev->get_irq = get_platform_irq;
+
+ ret = vfio_platform_probe_common(vdev, &pdev->dev);
+ if (ret)
+ kfree(vdev);
+
+ return ret;
+}
+
+static int vfio_platform_remove(struct platform_device *pdev)
+{
+ struct vfio_platform_device *vdev;
+
+ vdev = vfio_platform_remove_common(&pdev->dev);
+ if (vdev) {
+ kfree(vdev);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct platform_driver vfio_platform_driver = {
+ .probe = vfio_platform_probe,
+ .remove = vfio_platform_remove,
+ .driver = {
+ .name = "vfio-platform",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(vfio_platform_driver);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
new file mode 100644
index 000000000000..abcff7a1aa66
--- /dev/null
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -0,0 +1,521 @@
+/*
+ * Copyright (C) 2013 - Virtual Open Systems
+ * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+
+#include "vfio_platform_private.h"
+
+static DEFINE_MUTEX(driver_lock);
+
+static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
+{
+ int cnt = 0, i;
+
+ while (vdev->get_resource(vdev, cnt))
+ cnt++;
+
+ vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
+ GFP_KERNEL);
+ if (!vdev->regions)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ struct resource *res =
+ vdev->get_resource(vdev, i);
+
+ if (!res)
+ goto err;
+
+ vdev->regions[i].addr = res->start;
+ vdev->regions[i].size = resource_size(res);
+ vdev->regions[i].flags = 0;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_MEM:
+ vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
+ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
+ if (!(res->flags & IORESOURCE_READONLY))
+ vdev->regions[i].flags |=
+ VFIO_REGION_INFO_FLAG_WRITE;
+
+ /*
+ * Only regions addressed with PAGE granularity may be
+ * MMAPed securely.
+ */
+ if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
+ !(vdev->regions[i].size & ~PAGE_MASK))
+ vdev->regions[i].flags |=
+ VFIO_REGION_INFO_FLAG_MMAP;
+
+ break;
+ case IORESOURCE_IO:
+ vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
+ break;
+ default:
+ goto err;
+ }
+ }
+
+ vdev->num_regions = cnt;
+
+ return 0;
+err:
+ kfree(vdev->regions);
+ return -EINVAL;
+}
+
+static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
+{
+ int i;
+
+ for (i = 0; i < vdev->num_regions; i++)
+ iounmap(vdev->regions[i].ioaddr);
+
+ vdev->num_regions = 0;
+ kfree(vdev->regions);
+}
+
+static void vfio_platform_release(void *device_data)
+{
+ struct vfio_platform_device *vdev = device_data;
+
+ mutex_lock(&driver_lock);
+
+ if (!(--vdev->refcnt)) {
+ vfio_platform_regions_cleanup(vdev);
+ vfio_platform_irq_cleanup(vdev);
+ }
+
+ mutex_unlock(&driver_lock);
+
+ module_put(THIS_MODULE);
+}
+
+static int vfio_platform_open(void *device_data)
+{
+ struct vfio_platform_device *vdev = device_data;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ mutex_lock(&driver_lock);
+
+ if (!vdev->refcnt) {
+ ret = vfio_platform_regions_init(vdev);
+ if (ret)
+ goto err_reg;
+
+ ret = vfio_platform_irq_init(vdev);
+ if (ret)
+ goto err_irq;
+ }
+
+ vdev->refcnt++;
+
+ mutex_unlock(&driver_lock);
+ return 0;
+
+err_irq:
+ vfio_platform_regions_cleanup(vdev);
+err_reg:
+ mutex_unlock(&driver_lock);
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static long vfio_platform_ioctl(void *device_data,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_platform_device *vdev = device_data;
+ unsigned long minsz;
+
+ if (cmd == VFIO_DEVICE_GET_INFO) {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = vdev->flags;
+ info.num_regions = vdev->num_regions;
+ info.num_irqs = vdev->num_irqs;
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+
+ } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct vfio_region_info info;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ if (info.index >= vdev->num_regions)
+ return -EINVAL;
+
+ /* map offset to the physical address */
+ info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
+ info.size = vdev->regions[info.index].size;
+ info.flags = vdev->regions[info.index].flags;
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+
+ } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ if (info.index >= vdev->num_irqs)
+ return -EINVAL;
+
+ info.flags = vdev->irqs[info.index].flags;
+ info.count = vdev->irqs[info.index].count;
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+
+ } else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int ret = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz)
+ return -EINVAL;
+
+ if (hdr.index >= vdev->num_irqs)
+ return -EINVAL;
+
+ if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
+ VFIO_IRQ_SET_ACTION_TYPE_MASK))
+ return -EINVAL;
+
+ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
+ size_t size;
+
+ if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
+ size = sizeof(uint8_t);
+ else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
+ size = sizeof(int32_t);
+ else
+ return -EINVAL;
+
+ if (hdr.argsz - minsz < size)
+ return -EINVAL;
+
+ data = memdup_user((void __user *)(arg + minsz), size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+
+ mutex_lock(&vdev->igate);
+
+ ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
+ hdr.start, hdr.count, data);
+ mutex_unlock(&vdev->igate);
+ kfree(data);
+
+ return ret;
+
+ } else if (cmd == VFIO_DEVICE_RESET)
+ return -EINVAL;
+
+ return -ENOTTY;
+}
+
+static ssize_t vfio_platform_read_mmio(struct vfio_platform_region reg,
+ char __user *buf, size_t count,
+ loff_t off)
+{
+ unsigned int done = 0;
+
+ if (!reg.ioaddr) {
+ reg.ioaddr =
+ ioremap_nocache(reg.addr, reg.size);
+
+ if (!reg.ioaddr)
+ return -ENOMEM;
+ }
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(off % 4)) {
+ u32 val;
+
+ val = ioread32(reg.ioaddr + off);
+ if (copy_to_user(buf, &val, 4))
+ goto err;
+
+ filled = 4;
+ } else if (count >= 2 && !(off % 2)) {
+ u16 val;
+
+ val = ioread16(reg.ioaddr + off);
+ if (copy_to_user(buf, &val, 2))
+ goto err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ val = ioread8(reg.ioaddr + off);
+ if (copy_to_user(buf, &val, 1))
+ goto err;
+
+ filled = 1;
+ }
+
+
+ count -= filled;
+ done += filled;
+ off += filled;
+ buf += filled;
+ }
+
+ return done;
+err:
+ return -EFAULT;
+}
+
+static ssize_t vfio_platform_read(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_platform_device *vdev = device_data;
+ unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
+ loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
+
+ if (index >= vdev->num_regions)
+ return -EINVAL;
+
+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
+ return -EINVAL;
+
+ if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
+ return vfio_platform_read_mmio(vdev->regions[index],
+ buf, count, off);
+ else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
+ return -EINVAL; /* not implemented */
+
+ return -EINVAL;
+}
+
+static ssize_t vfio_platform_write_mmio(struct vfio_platform_region reg,
+ const char __user *buf, size_t count,
+ loff_t off)
+{
+ unsigned int done = 0;
+
+ if (!reg.ioaddr) {
+ reg.ioaddr =
+ ioremap_nocache(reg.addr, reg.size);
+
+ if (!reg.ioaddr)
+ return -ENOMEM;
+ }
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(off % 4)) {
+ u32 val;
+
+ if (copy_from_user(&val, buf, 4))
+ goto err;
+ iowrite32(val, reg.ioaddr + off);
+
+ filled = 4;
+ } else if (count >= 2 && !(off % 2)) {
+ u16 val;
+
+ if (copy_from_user(&val, buf, 2))
+ goto err;
+ iowrite16(val, reg.ioaddr + off);
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ if (copy_from_user(&val, buf, 1))
+ goto err;
+ iowrite8(val, reg.ioaddr + off);
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ off += filled;
+ buf += filled;
+ }
+
+ return done;
+err:
+ return -EFAULT;
+}
+
+static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_platform_device *vdev = device_data;
+ unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
+ loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
+
+ if (index >= vdev->num_regions)
+ return -EINVAL;
+
+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
+ return -EINVAL;
+
+ if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
+ return vfio_platform_write_mmio(vdev->regions[index],
+ buf, count, off);
+ else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
+ return -EINVAL; /* not implemented */
+
+ return -EINVAL;
+}
+
+static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
+ struct vm_area_struct *vma)
+{
+ u64 req_len, pgoff, req_start;
+
+ req_len = vma->vm_end - vma->vm_start;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ if (region.size < PAGE_SIZE || req_start + req_len > region.size)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
+
+ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ req_len, vma->vm_page_prot);
+}
+
+static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
+{
+ struct vfio_platform_device *vdev = device_data;
+ unsigned int index;
+
+ index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
+
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+ if (index >= vdev->num_regions)
+ return -EINVAL;
+ if (vma->vm_start & ~PAGE_MASK)
+ return -EINVAL;
+ if (vma->vm_end & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
+ return -EINVAL;
+
+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
+ && (vma->vm_flags & VM_READ))
+ return -EINVAL;
+
+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
+ && (vma->vm_flags & VM_WRITE))
+ return -EINVAL;
+
+ vma->vm_private_data = vdev;
+
+ if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
+ return vfio_platform_mmap_mmio(vdev->regions[index], vma);
+
+ else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
+ return -EINVAL; /* not implemented */
+
+ return -EINVAL;
+}
+
+static const struct vfio_device_ops vfio_platform_ops = {
+ .name = "vfio-platform",
+ .open = vfio_platform_open,
+ .release = vfio_platform_release,
+ .ioctl = vfio_platform_ioctl,
+ .read = vfio_platform_read,
+ .write = vfio_platform_write,
+ .mmap = vfio_platform_mmap,
+};
+
+int vfio_platform_probe_common(struct vfio_platform_device *vdev,
+ struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ if (!vdev)
+ return -EINVAL;
+
+ group = iommu_group_get(dev);
+ if (!group) {
+ pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
+ return -EINVAL;
+ }
+
+ ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
+ if (ret) {
+ iommu_group_put(group);
+ return ret;
+ }
+
+ mutex_init(&vdev->igate);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
+
+struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
+{
+ struct vfio_platform_device *vdev;
+
+ vdev = vfio_del_group_dev(dev);
+ if (vdev)
+ iommu_group_put(dev->iommu_group);
+
+ return vdev;
+}
+EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c
new file mode 100644
index 000000000000..88bba57b30a8
--- /dev/null
+++ b/drivers/vfio/platform/vfio_platform_irq.c
@@ -0,0 +1,336 @@
+/*
+ * VFIO platform devices interrupt handling
+ *
+ * Copyright (C) 2013 - Virtual Open Systems
+ * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/eventfd.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vfio.h>
+#include <linux/irq.h>
+
+#include "vfio_platform_private.h"
+
+static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_ctx->lock, flags);
+
+ if (!irq_ctx->masked) {
+ disable_irq_nosync(irq_ctx->hwirq);
+ irq_ctx->masked = true;
+ }
+
+ spin_unlock_irqrestore(&irq_ctx->lock, flags);
+}
+
+static int vfio_platform_mask_handler(void *opaque, void *unused)
+{
+ struct vfio_platform_irq *irq_ctx = opaque;
+
+ vfio_platform_mask(irq_ctx);
+
+ return 0;
+}
+
+static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags,
+ void *data)
+{
+ if (start != 0 || count != 1)
+ return -EINVAL;
+
+ if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+
+ if (fd >= 0)
+ return vfio_virqfd_enable((void *) &vdev->irqs[index],
+ vfio_platform_mask_handler,
+ NULL, NULL,
+ &vdev->irqs[index].mask, fd);
+
+ vfio_virqfd_disable(&vdev->irqs[index].mask);
+ return 0;
+ }
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ vfio_platform_mask(&vdev->irqs[index]);
+
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t mask = *(uint8_t *)data;
+
+ if (mask)
+ vfio_platform_mask(&vdev->irqs[index]);
+ }
+
+ return 0;
+}
+
+static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_ctx->lock, flags);
+
+ if (irq_ctx->masked) {
+ enable_irq(irq_ctx->hwirq);
+ irq_ctx->masked = false;
+ }
+
+ spin_unlock_irqrestore(&irq_ctx->lock, flags);
+}
+
+static int vfio_platform_unmask_handler(void *opaque, void *unused)
+{
+ struct vfio_platform_irq *irq_ctx = opaque;
+
+ vfio_platform_unmask(irq_ctx);
+
+ return 0;
+}
+
+static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags,
+ void *data)
+{
+ if (start != 0 || count != 1)
+ return -EINVAL;
+
+ if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+
+ if (fd >= 0)
+ return vfio_virqfd_enable((void *) &vdev->irqs[index],
+ vfio_platform_unmask_handler,
+ NULL, NULL,
+ &vdev->irqs[index].unmask,
+ fd);
+
+ vfio_virqfd_disable(&vdev->irqs[index].unmask);
+ return 0;
+ }
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ vfio_platform_unmask(&vdev->irqs[index]);
+
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t unmask = *(uint8_t *)data;
+
+ if (unmask)
+ vfio_platform_unmask(&vdev->irqs[index]);
+ }
+
+ return 0;
+}
+
+static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
+{
+ struct vfio_platform_irq *irq_ctx = dev_id;
+ unsigned long flags;
+ int ret = IRQ_NONE;
+
+ spin_lock_irqsave(&irq_ctx->lock, flags);
+
+ if (!irq_ctx->masked) {
+ ret = IRQ_HANDLED;
+
+ /* automask maskable interrupts */
+ disable_irq_nosync(irq_ctx->hwirq);
+ irq_ctx->masked = true;
+ }
+
+ spin_unlock_irqrestore(&irq_ctx->lock, flags);
+
+ if (ret == IRQ_HANDLED)
+ eventfd_signal(irq_ctx->trigger, 1);
+
+ return ret;
+}
+
+static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
+{
+ struct vfio_platform_irq *irq_ctx = dev_id;
+
+ eventfd_signal(irq_ctx->trigger, 1);
+
+ return IRQ_HANDLED;
+}
+
+static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
+ int fd, irq_handler_t handler)
+{
+ struct vfio_platform_irq *irq = &vdev->irqs[index];
+ struct eventfd_ctx *trigger;
+ int ret;
+
+ if (irq->trigger) {
+ free_irq(irq->hwirq, irq);
+ kfree(irq->name);
+ eventfd_ctx_put(irq->trigger);
+ irq->trigger = NULL;
+ }
+
+ if (fd < 0) /* Disable only */
+ return 0;
+
+ irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
+ irq->hwirq, vdev->name);
+ if (!irq->name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ kfree(irq->name);
+ return PTR_ERR(trigger);
+ }
+
+ irq->trigger = trigger;
+
+ irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
+ ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
+ if (ret) {
+ kfree(irq->name);
+ eventfd_ctx_put(trigger);
+ irq->trigger = NULL;
+ return ret;
+ }
+
+ if (!irq->masked)
+ enable_irq(irq->hwirq);
+
+ return 0;
+}
+
+static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags,
+ void *data)
+{
+ struct vfio_platform_irq *irq = &vdev->irqs[index];
+ irq_handler_t handler;
+
+ if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED)
+ handler = vfio_automasked_irq_handler;
+ else
+ handler = vfio_irq_handler;
+
+ if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
+ return vfio_set_trigger(vdev, index, -1, handler);
+
+ if (start != 0 || count != 1)
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+
+ return vfio_set_trigger(vdev, index, fd, handler);
+ }
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ handler(irq->hwirq, irq);
+
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t trigger = *(uint8_t *)data;
+
+ if (trigger)
+ handler(irq->hwirq, irq);
+ }
+
+ return 0;
+}
+
+int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+ uint32_t flags, unsigned index, unsigned start,
+ unsigned count, void *data)
+{
+ int (*func)(struct vfio_platform_device *vdev, unsigned index,
+ unsigned start, unsigned count, uint32_t flags,
+ void *data) = NULL;
+
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ func = vfio_platform_set_irq_mask;
+ break;
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ func = vfio_platform_set_irq_unmask;
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = vfio_platform_set_irq_trigger;
+ break;
+ }
+
+ if (!func)
+ return -ENOTTY;
+
+ return func(vdev, index, start, count, flags, data);
+}
+
+int vfio_platform_irq_init(struct vfio_platform_device *vdev)
+{
+ int cnt = 0, i;
+
+ while (vdev->get_irq(vdev, cnt) >= 0)
+ cnt++;
+
+ vdev->irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq), GFP_KERNEL);
+ if (!vdev->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ int hwirq = vdev->get_irq(vdev, i);
+
+ if (hwirq < 0)
+ goto err;
+
+ spin_lock_init(&vdev->irqs[i].lock);
+
+ vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
+
+ if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
+ vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
+ | VFIO_IRQ_INFO_AUTOMASKED;
+
+ vdev->irqs[i].count = 1;
+ vdev->irqs[i].hwirq = hwirq;
+ vdev->irqs[i].masked = false;
+ }
+
+ vdev->num_irqs = cnt;
+
+ return 0;
+err:
+ kfree(vdev->irqs);
+ return -EINVAL;
+}
+
+void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
+{
+ int i;
+
+ for (i = 0; i < vdev->num_irqs; i++)
+ vfio_set_trigger(vdev, i, -1, NULL);
+
+ vdev->num_irqs = 0;
+ kfree(vdev->irqs);
+}
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
new file mode 100644
index 000000000000..5d31e0473406
--- /dev/null
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2013 - Virtual Open Systems
+ * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef VFIO_PLATFORM_PRIVATE_H
+#define VFIO_PLATFORM_PRIVATE_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#define VFIO_PLATFORM_OFFSET_SHIFT 40
+#define VFIO_PLATFORM_OFFSET_MASK (((u64)(1) << VFIO_PLATFORM_OFFSET_SHIFT) - 1)
+
+#define VFIO_PLATFORM_OFFSET_TO_INDEX(off) \
+ (off >> VFIO_PLATFORM_OFFSET_SHIFT)
+
+#define VFIO_PLATFORM_INDEX_TO_OFFSET(index) \
+ ((u64)(index) << VFIO_PLATFORM_OFFSET_SHIFT)
+
+struct vfio_platform_irq {
+ u32 flags;
+ u32 count;
+ int hwirq;
+ char *name;
+ struct eventfd_ctx *trigger;
+ bool masked;
+ spinlock_t lock;
+ struct virqfd *unmask;
+ struct virqfd *mask;
+};
+
+struct vfio_platform_region {
+ u64 addr;
+ resource_size_t size;
+ u32 flags;
+ u32 type;
+#define VFIO_PLATFORM_REGION_TYPE_MMIO 1
+#define VFIO_PLATFORM_REGION_TYPE_PIO 2
+ void __iomem *ioaddr;
+};
+
+struct vfio_platform_device {
+ struct vfio_platform_region *regions;
+ u32 num_regions;
+ struct vfio_platform_irq *irqs;
+ u32 num_irqs;
+ int refcnt;
+ struct mutex igate;
+
+ /*
+ * These fields should be filled by the bus specific binder
+ */
+ void *opaque;
+ const char *name;
+ uint32_t flags;
+ /* callbacks to discover device resources */
+ struct resource*
+ (*get_resource)(struct vfio_platform_device *vdev, int i);
+ int (*get_irq)(struct vfio_platform_device *vdev, int i);
+};
+
+extern int vfio_platform_probe_common(struct vfio_platform_device *vdev,
+ struct device *dev);
+extern struct vfio_platform_device *vfio_platform_remove_common
+ (struct device *dev);
+
+extern int vfio_platform_irq_init(struct vfio_platform_device *vdev);
+extern void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
+
+extern int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+ uint32_t flags, unsigned index,
+ unsigned start, unsigned count,
+ void *data);
+
+#endif /* VFIO_PLATFORM_PRIVATE_H */
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 4cde85501444..e1278fe04b1e 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -234,22 +234,21 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
mutex_lock(&vfio.group_lock);
- minor = vfio_alloc_group_minor(group);
- if (minor < 0) {
- vfio_group_unlock_and_free(group);
- return ERR_PTR(minor);
- }
-
/* Did we race creating this group? */
list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
if (tmp->iommu_group == iommu_group) {
vfio_group_get(tmp);
- vfio_free_group_minor(minor);
vfio_group_unlock_and_free(group);
return tmp;
}
}
+ minor = vfio_alloc_group_minor(group);
+ if (minor < 0) {
+ vfio_group_unlock_and_free(group);
+ return ERR_PTR(minor);
+ }
+
dev = device_create(vfio.class, NULL,
MKDEV(MAJOR(vfio.group_devt), minor),
group, "%d", iommu_group_id(iommu_group));
@@ -711,6 +710,8 @@ void *vfio_del_group_dev(struct device *dev)
void *device_data = device->device_data;
struct vfio_unbound_dev *unbound;
unsigned int i = 0;
+ long ret;
+ bool interrupted = false;
/*
* The group exists so long as we have a device reference. Get
@@ -756,9 +757,22 @@ void *vfio_del_group_dev(struct device *dev)
vfio_device_put(device);
- } while (wait_event_interruptible_timeout(vfio.release_q,
- !vfio_dev_present(group, dev),
- HZ * 10) <= 0);
+ if (interrupted) {
+ ret = wait_event_timeout(vfio.release_q,
+ !vfio_dev_present(group, dev), HZ * 10);
+ } else {
+ ret = wait_event_interruptible_timeout(vfio.release_q,
+ !vfio_dev_present(group, dev), HZ * 10);
+ if (ret == -ERESTARTSYS) {
+ interrupted = true;
+ dev_warn(dev,
+ "Device is currently in use, task"
+ " \"%s\" (%d) "
+ "blocked until device is released",
+ current->comm, task_pid_nr(current));
+ }
+ }
+ } while (ret <= 0);
vfio_group_put(group);
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
new file mode 100644
index 000000000000..27c89cd5d70b
--- /dev/null
+++ b/drivers/vfio/virqfd.c
@@ -0,0 +1,226 @@
+/*
+ * VFIO generic eventfd code for IRQFD support.
+ * Derived from drivers/vfio/pci/vfio_pci_intrs.c
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/vfio.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
+#define DRIVER_DESC "IRQFD support for VFIO bus drivers"
+
+static struct workqueue_struct *vfio_irqfd_cleanup_wq;
+static DEFINE_SPINLOCK(virqfd_lock);
+
+static int __init vfio_virqfd_init(void)
+{
+ vfio_irqfd_cleanup_wq =
+ create_singlethread_workqueue("vfio-irqfd-cleanup");
+ if (!vfio_irqfd_cleanup_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit vfio_virqfd_exit(void)
+{
+ destroy_workqueue(vfio_irqfd_cleanup_wq);
+}
+
+static void virqfd_deactivate(struct virqfd *virqfd)
+{
+ queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
+}
+
+static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
+ unsigned long flags = (unsigned long)key;
+
+ if (flags & POLLIN) {
+ /* An event has been signaled, call function */
+ if ((!virqfd->handler ||
+ virqfd->handler(virqfd->opaque, virqfd->data)) &&
+ virqfd->thread)
+ schedule_work(&virqfd->inject);
+ }
+
+ if (flags & POLLHUP) {
+ unsigned long flags;
+ spin_lock_irqsave(&virqfd_lock, flags);
+
+ /*
+ * The eventfd is closing, if the virqfd has not yet been
+ * queued for release, as determined by testing whether the
+ * virqfd pointer to it is still valid, queue it now. As
+ * with kvm irqfds, we know we won't race against the virqfd
+ * going away because we hold the lock to get here.
+ */
+ if (*(virqfd->pvirqfd) == virqfd) {
+ *(virqfd->pvirqfd) = NULL;
+ virqfd_deactivate(virqfd);
+ }
+
+ spin_unlock_irqrestore(&virqfd_lock, flags);
+ }
+
+ return 0;
+}
+
+static void virqfd_ptable_queue_proc(struct file *file,
+ wait_queue_head_t *wqh, poll_table *pt)
+{
+ struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
+ add_wait_queue(wqh, &virqfd->wait);
+}
+
+static void virqfd_shutdown(struct work_struct *work)
+{
+ struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
+ u64 cnt;
+
+ eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
+ flush_work(&virqfd->inject);
+ eventfd_ctx_put(virqfd->eventfd);
+
+ kfree(virqfd);
+}
+
+static void virqfd_inject(struct work_struct *work)
+{
+ struct virqfd *virqfd = container_of(work, struct virqfd, inject);
+ if (virqfd->thread)
+ virqfd->thread(virqfd->opaque, virqfd->data);
+}
+
+int vfio_virqfd_enable(void *opaque,
+ int (*handler)(void *, void *),
+ void (*thread)(void *, void *),
+ void *data, struct virqfd **pvirqfd, int fd)
+{
+ struct fd irqfd;
+ struct eventfd_ctx *ctx;
+ struct virqfd *virqfd;
+ int ret = 0;
+ unsigned int events;
+
+ virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
+ if (!virqfd)
+ return -ENOMEM;
+
+ virqfd->pvirqfd = pvirqfd;
+ virqfd->opaque = opaque;
+ virqfd->handler = handler;
+ virqfd->thread = thread;
+ virqfd->data = data;
+
+ INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
+ INIT_WORK(&virqfd->inject, virqfd_inject);
+
+ irqfd = fdget(fd);
+ if (!irqfd.file) {
+ ret = -EBADF;
+ goto err_fd;
+ }
+
+ ctx = eventfd_ctx_fileget(irqfd.file);
+ if (IS_ERR(ctx)) {
+ ret = PTR_ERR(ctx);
+ goto err_ctx;
+ }
+
+ virqfd->eventfd = ctx;
+
+ /*
+ * virqfds can be released by closing the eventfd or directly
+ * through ioctl. These are both done through a workqueue, so
+ * we update the pointer to the virqfd under lock to avoid
+ * pushing multiple jobs to release the same virqfd.
+ */
+ spin_lock_irq(&virqfd_lock);
+
+ if (*pvirqfd) {
+ spin_unlock_irq(&virqfd_lock);
+ ret = -EBUSY;
+ goto err_busy;
+ }
+ *pvirqfd = virqfd;
+
+ spin_unlock_irq(&virqfd_lock);
+
+ /*
+ * Install our own custom wake-up handling so we are notified via
+ * a callback whenever someone signals the underlying eventfd.
+ */
+ init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
+ init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
+
+ events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt);
+
+ /*
+ * Check if there was an event already pending on the eventfd
+ * before we registered and trigger it as if we didn't miss it.
+ */
+ if (events & POLLIN) {
+ if ((!handler || handler(opaque, data)) && thread)
+ schedule_work(&virqfd->inject);
+ }
+
+ /*
+ * Do not drop the file until the irqfd is fully initialized,
+ * otherwise we might race against the POLLHUP.
+ */
+ fdput(irqfd);
+
+ return 0;
+err_busy:
+ eventfd_ctx_put(ctx);
+err_ctx:
+ fdput(irqfd);
+err_fd:
+ kfree(virqfd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfio_virqfd_enable);
+
+void vfio_virqfd_disable(struct virqfd **pvirqfd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&virqfd_lock, flags);
+
+ if (*pvirqfd) {
+ virqfd_deactivate(*pvirqfd);
+ *pvirqfd = NULL;
+ }
+
+ spin_unlock_irqrestore(&virqfd_lock, flags);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed.
+ * Even if we don't queue the job, flush the wq to be sure it's
+ * been released.
+ */
+ flush_workqueue(vfio_irqfd_cleanup_wq);
+}
+EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
+
+module_init(vfio_virqfd_init);
+module_exit(vfio_virqfd_exit);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2bbfc25e582c..7d137a43cc86 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -357,13 +357,13 @@ static void handle_tx(struct vhost_net *net)
iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
iov_iter_advance(&msg.msg_iter, hdr_size);
/* Sanity check */
- if (!iov_iter_count(&msg.msg_iter)) {
+ if (!msg_data_left(&msg)) {
vq_err(vq, "Unexpected header len for TX: "
"%zd expected %zd\n",
len, hdr_size);
break;
}
- len = iov_iter_count(&msg.msg_iter);
+ len = msg_data_left(&msg);
zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
&& (nvq->upend_idx + 1) % UIO_MAXIOV !=
@@ -390,7 +390,7 @@ static void handle_tx(struct vhost_net *net)
ubufs = NULL;
}
/* TODO: Check specific error and bomb out unless ENOBUFS? */
- err = sock->ops->sendmsg(NULL, sock, &msg, len);
+ err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
vhost_net_ubuf_put(ubufs);
@@ -566,7 +566,7 @@ static void handle_rx(struct vhost_net *net)
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
- err = sock->ops->recvmsg(NULL, sock, &msg,
+ err = sock->ops->recvmsg(sock, &msg,
1, MSG_DONTWAIT | MSG_TRUNC);
pr_debug("Discarded rx packet: len %zd\n", sock_len);
continue;
@@ -592,7 +592,7 @@ static void handle_rx(struct vhost_net *net)
*/
iov_iter_advance(&msg.msg_iter, vhost_hlen);
}
- err = sock->ops->recvmsg(NULL, sock, &msg,
+ err = sock->ops->recvmsg(sock, &msg,
sock_len, MSG_DONTWAIT | MSG_TRUNC);
/* Userspace might have consumed the packet meanwhile:
* it's not supposed to do this usually, but might be hard
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 71df240a467a..5e19bb53b3a9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -131,6 +131,8 @@ struct vhost_scsi_tpg {
int tv_tpg_port_count;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count;
+ /* Used for enabling T10-PI with legacy devices */
+ int tv_fabric_prot_type;
/* list for vhost_scsi_list */
struct list_head tv_tpg_list;
/* Used to protect access for tpg_nexus */
@@ -214,9 +216,7 @@ struct vhost_scsi {
int vs_events_nr; /* num of pending events, protected by vq->mutex */
};
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *vhost_scsi_fabric_configfs;
-
+static struct target_core_fabric_ops vhost_scsi_ops;
static struct workqueue_struct *vhost_scsi_workqueue;
/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
@@ -431,6 +431,14 @@ vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
port_nexus_ptr);
}
+static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+
+ return tpg->tv_fabric_prot_type;
+}
+
static struct se_node_acl *
vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
{
@@ -1878,6 +1886,45 @@ static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
}
}
+static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
+
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
+ }
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tpg->tv_fabric_prot_type = val;
+
+ return count;
+}
+
+static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+
+ return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
+}
+TF_TPG_ATTRIB_ATTR(vhost_scsi, fabric_prot_type, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
+ &vhost_scsi_tpg_attrib_fabric_prot_type.attr,
+ NULL,
+};
+
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
@@ -2155,7 +2202,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn,
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
+ ret = core_tpg_register(&vhost_scsi_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
@@ -2277,6 +2324,8 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
};
static struct target_core_fabric_ops vhost_scsi_ops = {
+ .module = THIS_MODULE,
+ .name = "vhost",
.get_fabric_name = vhost_scsi_get_fabric_name,
.get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident,
.tpg_get_wwn = vhost_scsi_get_fabric_wwn,
@@ -2289,6 +2338,7 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
.tpg_check_demo_mode_cache = vhost_scsi_check_true,
.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
+ .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
.tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl,
.tpg_release_fabric_acl = vhost_scsi_release_fabric_acl,
.tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
@@ -2320,70 +2370,20 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = vhost_scsi_make_nodeacl,
.fabric_drop_nodeacl = vhost_scsi_drop_nodeacl,
+
+ .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
+ .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
+ .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
};
-static int vhost_scsi_register_configfs(void)
+static int __init vhost_scsi_init(void)
{
- struct target_fabric_configfs *fabric;
- int ret;
+ int ret = -ENOMEM;
- pr_debug("vhost-scsi fabric module %s on %s/%s"
+ pr_debug("TCM_VHOST fabric module %s on %s/%s"
" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine);
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup fabric->tf_ops from our local vhost_scsi_ops
- */
- fabric->tf_ops = vhost_scsi_ops;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * Register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed"
- " for TCM_VHOST\n");
- return ret;
- }
- /*
- * Setup our local pointer to *fabric
- */
- vhost_scsi_fabric_configfs = fabric;
- pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
- return 0;
-};
-
-static void vhost_scsi_deregister_configfs(void)
-{
- if (!vhost_scsi_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(vhost_scsi_fabric_configfs);
- vhost_scsi_fabric_configfs = NULL;
- pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n");
-};
-static int __init vhost_scsi_init(void)
-{
- int ret = -ENOMEM;
/*
* Use our own dedicated workqueue for submitting I/O into
* target core to avoid contention within system_wq.
@@ -2396,7 +2396,7 @@ static int __init vhost_scsi_init(void)
if (ret < 0)
goto out_destroy_workqueue;
- ret = vhost_scsi_register_configfs();
+ ret = target_register_template(&vhost_scsi_ops);
if (ret < 0)
goto out_vhost_scsi_deregister;
@@ -2412,7 +2412,7 @@ out:
static void vhost_scsi_exit(void)
{
- vhost_scsi_deregister_configfs();
+ target_unregister_template(&vhost_scsi_ops);
vhost_scsi_deregister();
destroy_workqueue(vhost_scsi_workqueue);
};
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 9a23698b6fe8..2da5862876d1 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -168,10 +168,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
struct device_node *nproot, *np;
int iset = 0;
- nproot = of_node_get(pdev->dev.parent->of_node);
- if (!nproot)
- return -ENODEV;
- nproot = of_find_node_by_name(nproot, "backlights");
+ nproot = of_get_child_by_name(pdev->dev.parent->of_node, "backlights");
if (!nproot) {
dev_err(&pdev->dev, "failed to find backlights node\n");
return -ENODEV;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index efb09046a8cf..2d9923a60076 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -408,6 +408,16 @@ config BACKLIGHT_PANDORA
If you have a Pandora console, say Y to enable the
backlight driver.
+config BACKLIGHT_SKY81452
+ tristate "Backlight driver for SKY81452"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_SKY81452
+ help
+ If you have a Skyworks SKY81452, say Y to enable the
+ backlight driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called sky81452-backlight
+
config BACKLIGHT_TPS65217
tristate "TPS65217 Backlight"
depends on BACKLIGHT_CLASS_DEVICE && MFD_TPS65217
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index fcd50b732165..d67073f9d421 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
+obj-$(CONFIG_BACKLIGHT_SKY81452) += sky81452-backlight.o
obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
obj-$(CONFIG_BACKLIGHT_TPS65217) += tps65217_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index d4bd74bd5070..b1943e7735a1 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -165,6 +165,7 @@ static struct platform_device_id da9052_wled_ids[] = {
.name = "da9052-wled3",
.driver_data = DA9052_TYPE_WLED3,
},
+ { },
};
static struct platform_driver da9052_wled_driver = {
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
new file mode 100644
index 000000000000..052fa1bac03d
--- /dev/null
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -0,0 +1,353 @@
+/*
+ * sky81452-backlight.c SKY81452 backlight driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/platform_data/sky81452-backlight.h>
+#include <linux/slab.h>
+
+/* registers */
+#define SKY81452_REG0 0x00
+#define SKY81452_REG1 0x01
+#define SKY81452_REG2 0x02
+#define SKY81452_REG4 0x04
+#define SKY81452_REG5 0x05
+
+/* bit mask */
+#define SKY81452_CS 0xFF
+#define SKY81452_EN 0x3F
+#define SKY81452_IGPW 0x20
+#define SKY81452_PWMMD 0x10
+#define SKY81452_PHASE 0x08
+#define SKY81452_ILIM 0x04
+#define SKY81452_VSHRT 0x03
+#define SKY81452_OCP 0x80
+#define SKY81452_OTMP 0x40
+#define SKY81452_SHRT 0x3F
+#define SKY81452_OPN 0x3F
+
+#define SKY81452_DEFAULT_NAME "lcd-backlight"
+#define SKY81452_MAX_BRIGHTNESS (SKY81452_CS + 1)
+
+#define CTZ(b) __builtin_ctz(b)
+
+static int sky81452_bl_update_status(struct backlight_device *bd)
+{
+ const struct sky81452_bl_platform_data *pdata =
+ dev_get_platdata(bd->dev.parent);
+ const unsigned int brightness = (unsigned int)bd->props.brightness;
+ struct regmap *regmap = bl_get_data(bd);
+ int ret;
+
+ if (brightness > 0) {
+ ret = regmap_write(regmap, SKY81452_REG0, brightness - 1);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ return regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
+ pdata->enable << CTZ(SKY81452_EN));
+ }
+
+ return regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN, 0);
+}
+
+static const struct backlight_ops sky81452_bl_ops = {
+ .update_status = sky81452_bl_update_status,
+};
+
+static ssize_t sky81452_bl_store_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct regmap *regmap = bl_get_data(to_backlight_device(dev));
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &value);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ ret = regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
+ value << CTZ(SKY81452_EN));
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ return count;
+}
+
+static ssize_t sky81452_bl_show_open_short(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct regmap *regmap = bl_get_data(to_backlight_device(dev));
+ unsigned int reg, value = 0;
+ char tmp[3];
+ int i, ret;
+
+ reg = !strcmp(attr->attr.name, "open") ? SKY81452_REG5 : SKY81452_REG4;
+ ret = regmap_read(regmap, reg, &value);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ if (value & SKY81452_SHRT) {
+ *buf = 0;
+ for (i = 0; i < 6; i++) {
+ if (value & 0x01) {
+ sprintf(tmp, "%d ", i + 1);
+ strcat(buf, tmp);
+ }
+ value >>= 1;
+ }
+ strcat(buf, "\n");
+ } else {
+ strcpy(buf, "none\n");
+ }
+
+ return strlen(buf);
+}
+
+static ssize_t sky81452_bl_show_fault(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct regmap *regmap = bl_get_data(to_backlight_device(dev));
+ unsigned int value = 0;
+ int ret;
+
+ ret = regmap_read(regmap, SKY81452_REG4, &value);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ *buf = 0;
+
+ if (value & SKY81452_OCP)
+ strcat(buf, "over-current ");
+
+ if (value & SKY81452_OTMP)
+ strcat(buf, "over-temperature");
+
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(enable, S_IWGRP | S_IWUSR, NULL, sky81452_bl_store_enable);
+static DEVICE_ATTR(open, S_IRUGO, sky81452_bl_show_open_short, NULL);
+static DEVICE_ATTR(short, S_IRUGO, sky81452_bl_show_open_short, NULL);
+static DEVICE_ATTR(fault, S_IRUGO, sky81452_bl_show_fault, NULL);
+
+static struct attribute *sky81452_bl_attribute[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_open.attr,
+ &dev_attr_short.attr,
+ &dev_attr_fault.attr,
+ NULL
+};
+
+static const struct attribute_group sky81452_bl_attr_group = {
+ .attrs = sky81452_bl_attribute,
+};
+
+#ifdef CONFIG_OF
+static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
+ struct device *dev)
+{
+ struct device_node *np = of_node_get(dev->of_node);
+ struct sky81452_bl_platform_data *pdata;
+ int num_entry;
+ unsigned int sources[6];
+ int ret;
+
+ if (!np) {
+ dev_err(dev, "backlight node not found.\n");
+ return ERR_PTR(-ENODATA);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ of_node_put(np);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ of_property_read_string(np, "name", &pdata->name);
+ pdata->ignore_pwm = of_property_read_bool(np, "skyworks,ignore-pwm");
+ pdata->dpwm_mode = of_property_read_bool(np, "skyworks,dpwm-mode");
+ pdata->phase_shift = of_property_read_bool(np, "skyworks,phase-shift");
+ pdata->gpio_enable = of_get_gpio(np, 0);
+
+ ret = of_property_count_u32_elems(np, "led-sources");
+ if (IS_ERR_VALUE(ret)) {
+ pdata->enable = SKY81452_EN >> CTZ(SKY81452_EN);
+ } else {
+ num_entry = ret;
+ if (num_entry > 6)
+ num_entry = 6;
+
+ ret = of_property_read_u32_array(np, "led-sources", sources,
+ num_entry);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "led-sources node is invalid.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata->enable = 0;
+ while (--num_entry)
+ pdata->enable |= (1 << sources[num_entry]);
+ }
+
+ ret = of_property_read_u32(np,
+ "skyworks,short-detection-threshold-volt",
+ &pdata->short_detection_threshold);
+ if (IS_ERR_VALUE(ret))
+ pdata->short_detection_threshold = 7;
+
+ ret = of_property_read_u32(np, "skyworks,current-limit-mA",
+ &pdata->boost_current_limit);
+ if (IS_ERR_VALUE(ret))
+ pdata->boost_current_limit = 2750;
+
+ of_node_put(np);
+ return pdata;
+}
+#else
+static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
+ struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int sky81452_bl_init_device(struct regmap *regmap,
+ struct sky81452_bl_platform_data *pdata)
+{
+ unsigned int value;
+
+ value = pdata->ignore_pwm ? SKY81452_IGPW : 0;
+ value |= pdata->dpwm_mode ? SKY81452_PWMMD : 0;
+ value |= pdata->phase_shift ? 0 : SKY81452_PHASE;
+
+ if (pdata->boost_current_limit == 2300)
+ value |= SKY81452_ILIM;
+ else if (pdata->boost_current_limit != 2750)
+ return -EINVAL;
+
+ if (pdata->short_detection_threshold < 4 ||
+ pdata->short_detection_threshold > 7)
+ return -EINVAL;
+ value |= (7 - pdata->short_detection_threshold) << CTZ(SKY81452_VSHRT);
+
+ return regmap_write(regmap, SKY81452_REG2, value);
+}
+
+static int sky81452_bl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap = dev_get_drvdata(dev->parent);
+ struct sky81452_bl_platform_data *pdata = dev_get_platdata(dev);
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ const char *name;
+ int ret;
+
+ if (!pdata) {
+ pdata = sky81452_bl_parse_dt(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ }
+
+ if (gpio_is_valid(pdata->gpio_enable)) {
+ ret = devm_gpio_request_one(dev, pdata->gpio_enable,
+ GPIOF_OUT_INIT_HIGH, "sky81452-en");
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "failed to request GPIO. err=%d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = sky81452_bl_init_device(regmap, pdata);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "failed to initialize. err=%d\n", ret);
+ return ret;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = SKY81452_MAX_BRIGHTNESS,
+ name = pdata->name ? pdata->name : SKY81452_DEFAULT_NAME;
+ bd = devm_backlight_device_register(dev, name, dev, regmap,
+ &sky81452_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ dev_err(dev, "failed to register. err=%ld\n", PTR_ERR(bd));
+ return PTR_ERR(bd);
+ }
+
+ platform_set_drvdata(pdev, bd);
+
+ ret = sysfs_create_group(&bd->dev.kobj, &sky81452_bl_attr_group);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "failed to create attribute. err=%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int sky81452_bl_remove(struct platform_device *pdev)
+{
+ const struct sky81452_bl_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ struct backlight_device *bd = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&bd->dev.kobj, &sky81452_bl_attr_group);
+
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.brightness = 0;
+ backlight_update_status(bd);
+
+ if (gpio_is_valid(pdata->gpio_enable))
+ gpio_set_value_cansleep(pdata->gpio_enable, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id sky81452_bl_of_match[] = {
+ { .compatible = "skyworks,sky81452-backlight", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sky81452_bl_of_match);
+#endif
+
+static struct platform_driver sky81452_bl_driver = {
+ .driver = {
+ .name = "sky81452-backlight",
+ .of_match_table = of_match_ptr(sky81452_bl_of_match),
+ },
+ .probe = sky81452_bl_probe,
+ .remove = sky81452_bl_remove,
+};
+
+module_platform_driver(sky81452_bl_driver);
+
+MODULE_DESCRIPTION("Skyworks SKY81452 backlight driver");
+MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@skyworksinc.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index b3dd417b4719..109462303087 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -479,7 +479,7 @@ config FB_ATARI
config FB_OF
bool "Open Firmware frame buffer device support"
- depends on (FB = y) && (PPC64 || PPC_OF) && (!PPC_PSERIES || PCI)
+ depends on (FB = y) && PPC && (!PPC_PSERIES || PCI)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1333,7 +1333,7 @@ config FB_RADEON
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- select FB_MACMODES if PPC_OF
+ select FB_MACMODES if PPC
help
Choose this option if you want to use an ATI Radeon graphics card as
a framebuffer device. There are both PCI and AGP versions. You
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index aedf2fbf9bf6..0156954bf340 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -965,7 +965,7 @@ static void __iomem *aty128_find_mem_vbios(struct aty128fb_par *par)
/* fill in known card constants if pll_block is not available */
static void aty128_timings(struct aty128fb_par *par)
{
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC
/* instead of a table lookup, assume OF has properly
* setup the PLL registers and use their values
* to set the XCLK values and reference divider values */
@@ -979,7 +979,7 @@ static void aty128_timings(struct aty128fb_par *par)
if (!par->constants.ref_clk)
par->constants.ref_clk = 2950;
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC
x_mpll_ref_fb_div = aty_ld_pll(X_MPLL_REF_FB_DIV);
xclk_cntl = aty_ld_pll(XCLK_CNTL) & 0x7;
Nx = (x_mpll_ref_fb_div & 0x00ff00) >> 8;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 26d80a4486fb..01237c8fcdc6 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -74,7 +74,7 @@
#include <asm/io.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC
#include <asm/pci-bridge.h>
#include "../macmodes.h"
@@ -83,7 +83,7 @@
#include <asm/btext.h>
#endif
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC */
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
@@ -418,7 +418,7 @@ static int radeon_find_mem_vbios(struct radeonfb_info *rinfo)
}
#endif
-#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
+#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
/*
* Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
* tree. Hopefully, ATI OF driver is kind enough to fill these
@@ -448,7 +448,7 @@ static int radeon_read_xtal_OF(struct radeonfb_info *rinfo)
return 0;
}
-#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
+#endif /* CONFIG_PPC || CONFIG_SPARC */
/*
* Read PLL infos from chip registers
@@ -653,7 +653,7 @@ static void radeon_get_pllinfo(struct radeonfb_info *rinfo)
rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK;
-#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
+#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
/*
* Retrieve PLL infos from Open Firmware first
*/
@@ -661,7 +661,7 @@ static void radeon_get_pllinfo(struct radeonfb_info *rinfo)
printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n");
goto found;
}
-#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
+#endif /* CONFIG_PPC || CONFIG_SPARC */
/*
* Check out if we have an X86 which gave us some PLL informations
@@ -1910,7 +1910,7 @@ static int radeon_set_fbinfo(struct radeonfb_info *rinfo)
* I put the card's memory at 0 in card space and AGP at some random high
* local (0xe0000000 for now) that will be changed by XFree/DRI anyway
*/
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC
#undef SET_MC_FB_FROM_APERTURE
static void fixup_memory_mappings(struct radeonfb_info *rinfo)
{
@@ -1984,7 +1984,7 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
((aper_base + aper_size - 1) & 0xffff0000) | (aper_base >> 16),
0xffff0000 | (agp_base >> 16));
}
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC */
static void radeon_identify_vram(struct radeonfb_info *rinfo)
@@ -2236,7 +2236,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
rinfo->family == CHIP_FAMILY_RS200)
rinfo->errata |= CHIP_ERRATA_PLL_DELAY;
-#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
+#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
/* On PPC, we obtain the OF device-node pointer to the firmware
* data for this chip
*/
@@ -2245,14 +2245,14 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n",
pci_name(rinfo->pdev));
-#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
-#ifdef CONFIG_PPC_OF
+#endif /* CONFIG_PPC || CONFIG_SPARC */
+#ifdef CONFIG_PPC
/* On PPC, the firmware sets up a memory mapping that tends
* to cause lockups when enabling the engine. We reconfigure
* the card internal memory mappings properly
*/
fixup_memory_mappings(rinfo);
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC */
/* Get VRAM size and type */
radeon_identify_vram(rinfo);
diff --git a/drivers/video/fbdev/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c
index bc078d50d8f1..f1ce229de78d 100644
--- a/drivers/video/fbdev/aty/radeon_monitor.c
+++ b/drivers/video/fbdev/aty/radeon_monitor.c
@@ -55,7 +55,7 @@ static char *radeon_get_mon_name(int type)
}
-#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
+#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
/*
* Try to find monitor informations & EDID data out of the Open Firmware
* device-tree. This also contains some "hacks" to work around a few machine
@@ -160,7 +160,7 @@ static int radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_no,
}
return MT_NONE;
}
-#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
+#endif /* CONFIG_PPC || CONFIG_SPARC */
static int radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
@@ -499,11 +499,11 @@ void radeon_probe_screens(struct radeonfb_info *rinfo,
* Old single head cards
*/
if (!rinfo->has_CRTC2) {
-#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
+#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0,
&rinfo->mon1_EDID);
-#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
+#endif /* CONFIG_PPC || CONFIG_SPARC */
#ifdef CONFIG_FB_RADEON_I2C
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type =
@@ -548,11 +548,11 @@ void radeon_probe_screens(struct radeonfb_info *rinfo,
/*
* Probe primary head (DVI or laptop internal panel)
*/
-#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
+#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0,
&rinfo->mon1_EDID);
-#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
+#endif /* CONFIG_PPC || CONFIG_SPARC */
#ifdef CONFIG_FB_RADEON_I2C
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi,
@@ -576,11 +576,11 @@ void radeon_probe_screens(struct radeonfb_info *rinfo,
/*
* Probe secondary head (mostly VGA, can be DVI)
*/
-#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
+#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
if (rinfo->mon2_type == MT_NONE)
rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1,
&rinfo->mon2_EDID);
-#endif /* CONFIG_PPC_OF || defined(CONFIG_SPARC) */
+#endif /* CONFIG_PPC || defined(CONFIG_SPARC) */
#ifdef CONFIG_FB_RADEON_I2C
if (rinfo->mon2_type == MT_NONE)
rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga,
@@ -653,7 +653,7 @@ void radeon_probe_screens(struct radeonfb_info *rinfo,
*/
static void radeon_fixup_panel_info(struct radeonfb_info *rinfo)
{
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC
/*
* LCD Flat panels should use fixed dividers, we enfore that on
* PPC only for now...
@@ -676,7 +676,7 @@ static void radeon_fixup_panel_info(struct radeonfb_info *rinfo)
(rinfo->panel_info.post_divider << 16),
ppll_div_sel);
}
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC */
}
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index 46a12f1a93c3..1417542738fc 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -523,7 +523,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
OUTPLL(pllVCLK_ECP_CNTL, tmp);
/* X doesn't do that ... hrm, we do on mobility && Macs */
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC
if (rinfo->is_mobility) {
tmp = INPLL(pllMCLK_CNTL);
tmp &= ~(MCLK_CNTL__FORCE_MCLKA |
@@ -541,7 +541,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
OUTPLL(pllMCLK_MISC, tmp);
radeon_msleep(15);
}
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC */
}
#ifdef CONFIG_PM
@@ -1288,7 +1288,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
radeon_pm_enable_dll_m10(rinfo);
radeon_pm_yclk_mclk_sync_m10(rinfo);
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC
if (rinfo->of_node != NULL) {
int size;
@@ -1298,7 +1298,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
else
mrtable = default_mrtable;
}
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC */
/* Program the SDRAM */
sdram_mode_reg = mrtable[0];
@@ -1943,7 +1943,7 @@ static void radeon_reinitialize_M10(struct radeonfb_info *rinfo)
}
#endif
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC
#ifdef CONFIG_PPC_PMAC
static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo)
{
@@ -2512,7 +2512,7 @@ static void radeon_reinitialize_QW(struct radeonfb_info *rinfo)
}
#endif /* 0 */
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC */
static void radeonfb_whack_power_state(struct radeonfb_info *rinfo, pci_power_t state)
{
@@ -2793,7 +2793,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
return rc;
}
-#ifdef CONFIG_PPC_OF__disabled
+#ifdef CONFIG_PPC__disabled
static void radeonfb_early_resume(void *data)
{
struct radeonfb_info *rinfo = data;
@@ -2803,7 +2803,7 @@ static void radeonfb_early_resume(void *data)
radeonfb_pci_resume(rinfo->pdev);
rinfo->no_schedule = 0;
}
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC */
#endif /* CONFIG_PM */
diff --git a/drivers/video/fbdev/aty/radeonfb.h b/drivers/video/fbdev/aty/radeonfb.h
index cb846044f57c..039def41c920 100644
--- a/drivers/video/fbdev/aty/radeonfb.h
+++ b/drivers/video/fbdev/aty/radeonfb.h
@@ -20,7 +20,7 @@
#include <asm/io.h>
-#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
+#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
#include <asm/prom.h>
#endif
@@ -301,7 +301,7 @@ struct radeonfb_info {
unsigned long fb_local_base;
struct pci_dev *pdev;
-#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
+#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
struct device_node *of_node;
#endif
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 080fdd2a70f3..8d14b29aafea 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -315,7 +315,7 @@ static int controlfb_blank(int blank_mode, struct fb_info *info)
container_of(info, struct fb_info_control, info);
unsigned ctrl;
- ctrl = ld_le32(CNTRL_REG(p,ctrl));
+ ctrl = le32_to_cpup(CNTRL_REG(p,ctrl));
if (blank_mode > 0)
switch (blank_mode) {
case FB_BLANK_VSYNC_SUSPEND:
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 868facdec638..01ef1b953390 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -33,10 +33,6 @@
#include <video/edid.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
-#ifdef CONFIG_PPC_OF
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#endif
#include "../edid.h"
/*
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 42543362f163..807ee22ef229 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -415,7 +415,8 @@ static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
struct fb_info *info = hv_get_drvdata(hdev);
struct hvfb_par *par = info->par;
struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
- int t, ret = 0;
+ int ret = 0;
+ unsigned long t;
memset(msg, 0, sizeof(struct synthvid_msg));
msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST;
@@ -488,7 +489,8 @@ static int synthvid_send_config(struct hv_device *hdev)
struct fb_info *info = hv_get_drvdata(hdev);
struct hvfb_par *par = info->par;
struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
- int t, ret = 0;
+ int ret = 0;
+ unsigned long t;
/* Send VRAM location */
memset(msg, 0, sizeof(struct synthvid_msg));
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index aae10ce74f14..9b167f7ef6c6 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1470,15 +1470,13 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long addr, size;
struct imstt_par *par;
struct fb_info *info;
-#ifdef CONFIG_PPC_OF
struct device_node *dp;
dp = pci_device_to_OF_node(pdev);
if(dp)
printk(KERN_INFO "%s: OF name %s\n",__func__, dp->name);
- else
+ else if (IS_ENABLED(CONFIG_OF))
printk(KERN_ERR "imsttfb: no OF node for pci device\n");
-#endif /* CONFIG_PPC_OF */
info = framebuffer_alloc(sizeof(struct imstt_par), &pdev->dev);
@@ -1501,11 +1499,9 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (pdev->device) {
case PCI_DEVICE_ID_IMS_TT128: /* IMS,tt128mbA */
par->ramdac = IBM;
-#ifdef CONFIG_PPC_OF
if (dp && ((strcmp(dp->name, "IMS,tt128mb8") == 0) ||
(strcmp(dp->name, "IMS,tt128mb8A") == 0)))
par->ramdac = TVP;
-#endif /* CONFIG_PPC_OF */
break;
case PCI_DEVICE_ID_IMS_TT3D: /* IMS,tt3d */
par->ramdac = TVP;
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 3b6a3c8c36e2..84d1d29e532c 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -183,7 +183,7 @@ static struct platform_device_id imxfb_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, imxfb_devtype);
-static struct of_device_id imxfb_of_dev_id[] = {
+static const struct of_device_id imxfb_of_dev_id[] = {
{
.compatible = "fsl,imx1-fb",
.data = &imxfb_devtype[IMX1_FB],
diff --git a/drivers/video/fbdev/nvidia/Makefile b/drivers/video/fbdev/nvidia/Makefile
index ca47432113e0..917d3eb05feb 100644
--- a/drivers/video/fbdev/nvidia/Makefile
+++ b/drivers/video/fbdev/nvidia/Makefile
@@ -5,9 +5,8 @@
obj-$(CONFIG_FB_NVIDIA) += nvidiafb.o
nvidiafb-y := nvidia.o nv_hw.o nv_setup.o \
- nv_accel.o
+ nv_accel.o nv_of.o
nvidiafb-$(CONFIG_FB_NVIDIA_I2C) += nv_i2c.o
nvidiafb-$(CONFIG_FB_NVIDIA_BACKLIGHT) += nv_backlight.o
-nvidiafb-$(CONFIG_PPC_OF) += nv_of.o
nvidiafb-objs := $(nvidiafb-y)
diff --git a/drivers/video/fbdev/nvidia/nv_of.c b/drivers/video/fbdev/nvidia/nv_of.c
index 3bc13df4b120..5f3e5179c25a 100644
--- a/drivers/video/fbdev/nvidia/nv_of.c
+++ b/drivers/video/fbdev/nvidia/nv_of.c
@@ -19,9 +19,6 @@
#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-
#include "nv_type.h"
#include "nv_local.h"
#include "nv_proto.h"
diff --git a/drivers/video/fbdev/nvidia/nv_proto.h b/drivers/video/fbdev/nvidia/nv_proto.h
index ff5c410355ea..878a5ce02299 100644
--- a/drivers/video/fbdev/nvidia/nv_proto.h
+++ b/drivers/video/fbdev/nvidia/nv_proto.h
@@ -42,16 +42,8 @@ int nvidia_probe_i2c_connector(struct fb_info *info, int conn,
#define nvidia_probe_i2c_connector(p, c, edid) (-1)
#endif
-#ifdef CONFIG_PPC_OF
int nvidia_probe_of_connector(struct fb_info *info, int conn,
u8 ** out_edid);
-#else
-static inline int nvidia_probe_of_connector(struct fb_info *info, int conn,
- u8 ** out_edid)
-{
- return -1;
-}
-#endif
/* in nv_accel.c */
extern void NVResetGraphics(struct fb_info *info);
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index def041204676..4273c6ee8cf6 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -24,10 +24,6 @@
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
-#ifdef CONFIG_PPC_OF
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#endif
#ifdef CONFIG_BOOTX_TEXT
#include <asm/btext.h>
#endif
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
index 0cdc97413020..a8ce920fa797 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
@@ -37,7 +37,7 @@ static const struct omap_video_timings dvic_default_timings = {
.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
.de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
struct panel_drv_data {
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
index 92919a74e715..d9048b3df495 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
@@ -114,12 +114,21 @@ static void tfp410_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
+static void tfp410_fix_timings(struct omap_video_timings *timings)
+{
+ timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+}
+
static void tfp410_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
+ tfp410_fix_timings(timings);
+
ddata->timings = *timings;
dssdev->panel.timings = *timings;
@@ -140,6 +149,8 @@ static int tfp410_check_timings(struct omap_dss_device *dssdev,
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
+ tfp410_fix_timings(timings);
+
return in->ops.dpi->check_timings(in, timings);
}
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
index 27d4fcfa1824..9974a37a11af 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
@@ -37,7 +37,7 @@ static struct omap_video_timings lb035q02_timings = {
.hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
.de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
struct panel_drv_data {
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
index 18b19b6e1ac2..eae263702964 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
@@ -54,7 +54,7 @@ static const struct omap_video_timings sharp_ls_timings = {
.hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
.de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
index 337ccc5c0f5e..90cbc4c3406c 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
@@ -108,7 +108,7 @@ static const struct omap_video_timings acx565akm_panel_timings = {
.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
.de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
index fbba0b8ca871..9edc51133c59 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
@@ -58,7 +58,7 @@ static struct omap_video_timings td028ttec1_panel_timings = {
.data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
.de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
};
#define JBT_COMMAND 0x000
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
index 5aba76bca25a..79e4a029aab9 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
@@ -91,7 +91,7 @@ static const struct omap_video_timings tpo_td043_timings = {
.hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
.data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
.de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
diff --git a/drivers/video/fbdev/omap2/dss/core.c b/drivers/video/fbdev/omap2/dss/core.c
index d5d92124e019..16751755d433 100644
--- a/drivers/video/fbdev/omap2/dss/core.c
+++ b/drivers/video/fbdev/omap2/dss/core.c
@@ -179,10 +179,14 @@ static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
switch (v) {
case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
DSSDBG("suspending displays\n");
return dss_suspend_all_devices();
case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
DSSDBG("resuming displays\n");
return dss_resume_all_devices();
diff --git a/drivers/video/fbdev/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c
index 31b743c70272..f4fc77d9d3bf 100644
--- a/drivers/video/fbdev/omap2/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/dss/dispc.c
@@ -123,6 +123,9 @@ static struct {
struct regmap *syscon_pol;
u32 syscon_pol_offset;
+
+ /* DISPC_CONTROL & DISPC_CONFIG lock*/
+ spinlock_t control_lock;
} dispc;
enum omap_color_component {
@@ -261,7 +264,16 @@ static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld)
static void mgr_fld_write(enum omap_channel channel,
enum mgr_reg_fields regfld, int val) {
const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
+ const bool need_lock = rfld.reg == DISPC_CONTROL || rfld.reg == DISPC_CONFIG;
+ unsigned long flags;
+
+ if (need_lock)
+ spin_lock_irqsave(&dispc.control_lock, flags);
+
REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low);
+
+ if (need_lock)
+ spin_unlock_irqrestore(&dispc.control_lock, flags);
}
#define SR(reg) \
@@ -1126,6 +1138,7 @@ static void dispc_init_fifos(void)
int fifo;
u8 start, end;
u32 unit;
+ int i;
unit = dss_feat_get_buffer_size_unit();
@@ -1165,6 +1178,20 @@ static void dispc_init_fifos(void)
dispc.fifo_assignment[OMAP_DSS_GFX] = OMAP_DSS_WB;
dispc.fifo_assignment[OMAP_DSS_WB] = OMAP_DSS_GFX;
}
+
+ /*
+ * Setup default fifo thresholds.
+ */
+ for (i = 0; i < dss_feat_get_num_ovls(); ++i) {
+ u32 low, high;
+ const bool use_fifomerge = false;
+ const bool manual_update = false;
+
+ dispc_ovl_compute_fifo_thresholds(i, &low, &high,
+ use_fifomerge, manual_update);
+
+ dispc_ovl_set_fifo_threshold(i, low, high);
+ }
}
static u32 dispc_ovl_get_fifo_size(enum omap_plane plane)
@@ -1278,6 +1305,63 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
}
EXPORT_SYMBOL(dispc_ovl_compute_fifo_thresholds);
+static void dispc_ovl_set_mflag(enum omap_plane plane, bool enable)
+{
+ int bit;
+
+ if (plane == OMAP_DSS_GFX)
+ bit = 14;
+ else
+ bit = 23;
+
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
+}
+
+static void dispc_ovl_set_mflag_threshold(enum omap_plane plane,
+ int low, int high)
+{
+ dispc_write_reg(DISPC_OVL_MFLAG_THRESHOLD(plane),
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_init_mflag(void)
+{
+ int i;
+
+ /*
+ * HACK: NV12 color format and MFLAG seem to have problems working
+ * together: using two displays, and having an NV12 overlay on one of
+ * the displays will cause underflows/synclosts when MFLAG_CTRL=2.
+ * Changing MFLAG thresholds and PRELOAD to certain values seem to
+ * remove the errors, but there doesn't seem to be a clear logic on
+ * which values work and which not.
+ *
+ * As a work-around, set force MFLAG to always on.
+ */
+ dispc_write_reg(DISPC_GLOBAL_MFLAG_ATTRIBUTE,
+ (1 << 0) | /* MFLAG_CTRL = force always on */
+ (0 << 2)); /* MFLAG_START = disable */
+
+ for (i = 0; i < dss_feat_get_num_ovls(); ++i) {
+ u32 size = dispc_ovl_get_fifo_size(i);
+ u32 unit = dss_feat_get_buffer_size_unit();
+ u32 low, high;
+
+ dispc_ovl_set_mflag(i, true);
+
+ /*
+ * Simulation team suggests below thesholds:
+ * HT = fifosize * 5 / 8;
+ * LT = fifosize * 4 / 8;
+ */
+
+ low = size * 4 / 8 / unit;
+ high = size * 5 / 8 / unit;
+
+ dispc_ovl_set_mflag_threshold(i, low, high);
+ }
+}
+
static void dispc_ovl_set_fir(enum omap_plane plane,
int hinc, int vinc,
enum omap_color_component color_comp)
@@ -2322,6 +2406,11 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (width == out_width && height == out_height)
return 0;
+ if (pclk == 0 || mgr_timings->pixelclock == 0) {
+ DSSERR("cannot calculate scaling settings: pclk is zero\n");
+ return -EINVAL;
+ }
+
if ((caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
return -EINVAL;
@@ -2441,7 +2530,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
unsigned long pclk = dispc_plane_pclk_rate(plane);
unsigned long lclk = dispc_plane_lclk_rate(plane);
- if (paddr == 0)
+ if (paddr == 0 && rotation_type != OMAP_DSS_ROT_TILER)
return -EINVAL;
out_width = out_width == 0 ? width : out_width;
@@ -2915,7 +3004,7 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
{
u32 timing_h, timing_v, l;
- bool onoff, rf, ipc;
+ bool onoff, rf, ipc, vs, hs, de;
timing_h = FLD_VAL(hsw-1, dispc.feat->sw_start, 0) |
FLD_VAL(hfp-1, dispc.feat->fp_start, 8) |
@@ -2927,6 +3016,39 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
+ switch (vsync_level) {
+ case OMAPDSS_SIG_ACTIVE_LOW:
+ vs = true;
+ break;
+ case OMAPDSS_SIG_ACTIVE_HIGH:
+ vs = false;
+ break;
+ default:
+ BUG();
+ }
+
+ switch (hsync_level) {
+ case OMAPDSS_SIG_ACTIVE_LOW:
+ hs = true;
+ break;
+ case OMAPDSS_SIG_ACTIVE_HIGH:
+ hs = false;
+ break;
+ default:
+ BUG();
+ }
+
+ switch (de_level) {
+ case OMAPDSS_SIG_ACTIVE_LOW:
+ de = true;
+ break;
+ case OMAPDSS_SIG_ACTIVE_HIGH:
+ de = false;
+ break;
+ default:
+ BUG();
+ }
+
switch (data_pclk_edge) {
case OMAPDSS_DRIVE_SIG_RISING_EDGE:
ipc = false;
@@ -2934,22 +3056,18 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
ipc = true;
break;
- case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES:
default:
BUG();
}
+ /* always use the 'rf' setting */
+ onoff = true;
+
switch (sync_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES:
- onoff = false;
- rf = false;
- break;
case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
- onoff = true;
rf = false;
break;
case OMAPDSS_DRIVE_SIG_RISING_EDGE:
- onoff = true;
rf = true;
break;
default:
@@ -2958,10 +3076,10 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
- FLD_VAL(de_level, 15, 15) |
+ FLD_VAL(de, 15, 15) |
FLD_VAL(ipc, 14, 14) |
- FLD_VAL(hsync_level, 13, 13) |
- FLD_VAL(vsync_level, 12, 12);
+ FLD_VAL(hs, 13, 13) |
+ FLD_VAL(vs, 12, 12);
dispc_write_reg(DISPC_POL_FREQ(channel), l);
@@ -3569,6 +3687,9 @@ static void _omap_dispc_initial_config(void)
if (dispc.feat->mstandby_workaround)
REG_FLD_MOD(DISPC_MSTANDBY_CTRL, 1, 0, 0);
+
+ if (dss_has_feature(FEAT_MFLAG))
+ dispc_init_mflag();
}
static const struct dispc_features omap24xx_dispc_feats __initconst = {
@@ -3770,6 +3891,8 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
dispc.pdev = pdev;
+ spin_lock_init(&dispc.control_lock);
+
r = dispc_init_features(dispc.pdev);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
index 2412a0dd0c13..ef5b9027985d 100644
--- a/drivers/video/fbdev/omap2/dss/display.c
+++ b/drivers/video/fbdev/omap2/dss/display.c
@@ -295,7 +295,7 @@ void videomode_to_omap_video_timings(const struct videomode *vm,
OMAPDSS_DRIVE_SIG_RISING_EDGE :
OMAPDSS_DRIVE_SIG_FALLING_EDGE;
- ovt->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+ ovt->sync_pclk_edge = ovt->data_pclk_edge;
}
EXPORT_SYMBOL(videomode_to_omap_video_timings);
diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c
index 5081f6fb1737..28b0bc11669d 100644
--- a/drivers/video/fbdev/omap2/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/dss/dsi.c
@@ -4137,7 +4137,7 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+ dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
dss_mgr_set_timings(mgr, &dsi->timings);
diff --git a/drivers/video/fbdev/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c
index a6d10d4279f3..7f978b6a34e8 100644
--- a/drivers/video/fbdev/omap2/dss/dss.c
+++ b/drivers/video/fbdev/omap2/dss/dss.c
@@ -38,6 +38,7 @@
#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
#include <video/omapdss.h>
@@ -1138,6 +1139,8 @@ static int __init omap_dsshw_probe(struct platform_device *pdev)
dss_debugfs_create_file("dss", dss_dump_regs);
+ pm_set_vt_switch(0);
+
return 0;
err_pll_init:
diff --git a/drivers/video/fbdev/omap2/dss/dss_features.c b/drivers/video/fbdev/omap2/dss/dss_features.c
index 376270b777f8..b0b6dfd657bf 100644
--- a/drivers/video/fbdev/omap2/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/dss/dss_features.c
@@ -440,7 +440,7 @@ static const struct dss_param_range omap3_dss_param_range[] = {
static const struct dss_param_range am43xx_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 200000000 },
- [FEAT_PARAM_DSS_PCD] = { 2, 255 },
+ [FEAT_PARAM_DSS_PCD] = { 1, 255 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
};
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/dss/hdmi5_core.c
index a3cfe3d708f7..bfc0c4c297d6 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5_core.c
@@ -55,7 +55,7 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
const unsigned ss_scl_low = 4700; /* ns */
const unsigned fs_scl_high = 600; /* ns */
const unsigned fs_scl_low = 1300; /* ns */
- const unsigned sda_hold = 300; /* ns */
+ const unsigned sda_hold = 1000; /* ns */
const unsigned sfr_div = 10;
unsigned long long sfr;
unsigned v;
diff --git a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
index 42b87f95267c..8b6f6d5fdd68 100644
--- a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
+++ b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
@@ -164,20 +164,15 @@ static void __init omapdss_walk_device(struct device_node *node, bool root)
pn = of_graph_get_remote_port_parent(n);
- if (!pn) {
- of_node_put(n);
+ if (!pn)
continue;
- }
if (!of_device_is_available(pn) || omapdss_list_contains(pn)) {
of_node_put(pn);
- of_node_put(n);
continue;
}
omapdss_walk_device(pn, false);
-
- of_node_put(n);
}
}
diff --git a/drivers/video/fbdev/omap2/dss/rfbi.c b/drivers/video/fbdev/omap2/dss/rfbi.c
index 28e694b11ff9..065effca9236 100644
--- a/drivers/video/fbdev/omap2/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/dss/rfbi.c
@@ -869,7 +869,7 @@ static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
rfbi.timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
rfbi.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
rfbi.timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+ rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
dss_mgr_set_timings(mgr, &rfbi.timings);
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 22f07f88bc40..4f0cbb54d4db 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -2073,7 +2073,7 @@ static int omapfb_mode_to_timings(const char *mode_str,
} else {
timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+ timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
}
timings->pixelclock = PICOS2KHZ(var->pixclock) * 1000;
@@ -2223,7 +2223,7 @@ static void fb_videomode_to_omap_timings(struct fb_videomode *m,
} else {
t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
t->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+ t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
}
t->x_res = m->xres;
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 518d1fd38a81..377d3399a3ad 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -168,7 +168,7 @@ static int platinumfb_blank(int blank, struct fb_info *fb)
struct fb_info_platinum *info = (struct fb_info_platinum *) fb;
int ctrl;
- ctrl = ld_le32(&info->platinum_regs->ctrl.r) | 0x33;
+ ctrl = le32_to_cpup(&info->platinum_regs->ctrl.r) | 0x33;
if (blank)
--blank_mode;
if (blank & VESA_VSYNC_SUSPEND)
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index 4bf3273d0433..77b99ed39ad0 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -1479,9 +1479,9 @@ static void pm3fb_remove(struct pci_dev *dev)
fb_dealloc_cmap(&info->cmap);
#ifdef CONFIG_MTRR
- if (par->mtrr_handle >= 0)
- mtrr_del(par->mtrr_handle, info->fix.smem_start,
- info->fix.smem_len);
+ if (par->mtrr_handle >= 0)
+ mtrr_del(par->mtrr_handle, info->fix.smem_start,
+ info->fix.smem_len);
#endif /* CONFIG_MTRR */
iounmap(info->screen_base);
release_mem_region(fix->smem_start, fix->smem_len);
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index da2431eda2fd..7245611ec963 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1285,7 +1285,7 @@ static int pxafb_smart_thread(void *arg)
mutex_unlock(&fbi->ctrlr_lock);
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(30 * HZ / 1000);
+ schedule_timeout(msecs_to_jiffies(30));
}
pr_debug("%s(): task ending\n", __func__);
@@ -1460,7 +1460,7 @@ static void pxafb_disable_controller(struct pxafb_info *fbi)
#ifdef CONFIG_FB_PXA_SMARTPANEL
if (fbi->lccr0 & LCCR0_LCDT) {
wait_for_completion_timeout(&fbi->refresh_done,
- 200 * HZ / 1000);
+ msecs_to_jiffies(200));
return;
}
#endif
@@ -1472,7 +1472,7 @@ static void pxafb_disable_controller(struct pxafb_info *fbi)
lcd_writel(fbi, LCCR0, lccr0);
lcd_writel(fbi, LCCR0, lccr0 | LCCR0_DIS);
- wait_for_completion_timeout(&fbi->disable_done, 200 * HZ / 1000);
+ wait_for_completion_timeout(&fbi->disable_done, msecs_to_jiffies(200));
/* disable LCD controller clock */
clk_disable_unprepare(fbi->clk);
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index be73727c7227..294a80908c8c 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -44,10 +44,6 @@
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
-#ifdef CONFIG_PPC_OF
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/machdep.h>
#include <asm/backlight.h>
@@ -1735,7 +1731,6 @@ static int riva_set_fbinfo(struct fb_info *info)
return (rivafb_check_var(&info->var, info));
}
-#ifdef CONFIG_PPC_OF
static int riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
{
struct riva_par *par = info->par;
@@ -1766,9 +1761,8 @@ static int riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
NVTRACE_LEAVE();
return 0;
}
-#endif /* CONFIG_PPC_OF */
-#if defined(CONFIG_FB_RIVA_I2C) && !defined(CONFIG_PPC_OF)
+#if defined(CONFIG_FB_RIVA_I2C)
static int riva_get_EDID_i2c(struct fb_info *info)
{
struct riva_par *par = info->par;
@@ -1828,10 +1822,13 @@ static void riva_update_default_var(struct fb_var_screeninfo *var,
static void riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
{
NVTRACE_ENTER();
-#ifdef CONFIG_PPC_OF
- if (!riva_get_EDID_OF(info, pdev))
+ if (riva_get_EDID_OF(info, pdev)) {
+ NVTRACE_LEAVE();
+ return;
+ }
+ if (IS_ENABLED(CONFIG_OF))
printk(PFX "could not retrieve EDID from OF\n");
-#elif defined(CONFIG_FB_RIVA_I2C)
+#if defined(CONFIG_FB_RIVA_I2C)
if (!riva_get_EDID_i2c(info))
printk(PFX "could not retrieve EDID from DDC/I2C\n");
#endif
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index d3013cd9f976..82c0a8caa9b8 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1461,7 +1461,7 @@ overlay_rop3_store(struct device *dev, struct device_attribute *attr,
unsigned int rop3;
char *endp;
- rop3 = !!simple_strtoul(buf, &endp, 10);
+ rop3 = simple_strtoul(buf, &endp, 10);
if (isspace(*endp))
endp++;
@@ -2605,7 +2605,6 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch)
unsigned int max_size;
unsigned int i;
- mutex_init(&ch->open_lock);
ch->notify = sh_mobile_lcdc_display_notify;
/* Validate the format. */
@@ -2704,7 +2703,7 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev)
struct resource *res;
int num_channels;
int error;
- int i;
+ int irq, i;
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
@@ -2712,8 +2711,8 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i = platform_get_irq(pdev, 0);
- if (!res || i < 0) {
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
dev_err(&pdev->dev, "cannot get platform resources\n");
return -ENOENT;
}
@@ -2726,16 +2725,18 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->meram_dev = pdata->meram_dev;
+ for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
+ mutex_init(&priv->ch[i].open_lock);
platform_set_drvdata(pdev, priv);
- error = request_irq(i, sh_mobile_lcdc_irq, 0,
+ error = request_irq(irq, sh_mobile_lcdc_irq, 0,
dev_name(&pdev->dev), priv);
if (error) {
dev_err(&pdev->dev, "unable to request irq\n");
goto err1;
}
- priv->irq = i;
+ priv->irq = irq;
atomic_set(&priv->hw_usecnt, -1);
for (i = 0, num_channels = 0; i < ARRAY_SIZE(pdata->ch); i++) {
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index e8d4121783fb..d0a4e2f79a57 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1606,7 +1606,7 @@ static int sm501fb_start(struct sm501fb_info *info,
info->fbmem_len = resource_size(res);
/* clear framebuffer memory - avoids garbage data on unused fb */
- memset(info->fbmem, 0, info->fbmem_len);
+ memset_io(info->fbmem, 0, info->fbmem_len);
/* clear palette ram - undefined at power on */
for (k = 0; k < (256 * 3); k++)
diff --git a/drivers/video/fbdev/via/via_clock.c b/drivers/video/fbdev/via/via_clock.c
index db1e39277e32..bf269fa43977 100644
--- a/drivers/video/fbdev/via/via_clock.c
+++ b/drivers/video/fbdev/via/via_clock.c
@@ -30,7 +30,7 @@
#include "global.h"
#include "debug.h"
-const char *via_slap = "Please slap VIA Technologies to motivate them "
+static const char *via_slap = "Please slap VIA Technologies to motivate them "
"releasing full documentation for your platform!\n";
static inline u32 cle266_encode_pll(struct via_pll_config pll)
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index b546da5d8ea3..cab9f3f63a38 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -48,6 +48,16 @@ config VIRTIO_BALLOON
If unsure, say M.
+config VIRTIO_INPUT
+ tristate "Virtio input driver"
+ depends on VIRTIO
+ depends on INPUT
+ ---help---
+ This driver supports virtio input devices such as
+ keyboards, mice and tablets.
+
+ If unsure, say M.
+
config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
depends on HAS_IOMEM
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index d85565b8ea46..41e30e3dc842 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
+obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 5ce2aa48fc6e..b1877d73fa56 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -278,12 +278,6 @@ static struct bus_type virtio_bus = {
.remove = virtio_dev_remove,
};
-bool virtio_device_is_legacy_only(struct virtio_device_id id)
-{
- return id.device == VIRTIO_ID_BALLOON;
-}
-EXPORT_SYMBOL_GPL(virtio_device_is_legacy_only);
-
int register_virtio_driver(struct virtio_driver *driver)
{
/* Catch this early. */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 6a356e344f82..82e80e034f25 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -214,8 +214,8 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
u16 tag, u64 val)
{
BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
- vb->stats[idx].tag = tag;
- vb->stats[idx].val = val;
+ vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
+ vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
}
#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
@@ -283,18 +283,27 @@ static void virtballoon_changed(struct virtio_device *vdev)
static inline s64 towards_target(struct virtio_balloon *vb)
{
- __le32 v;
s64 target;
+ u32 num_pages;
- virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v);
+ virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
+ &num_pages);
- target = le32_to_cpu(v);
+ /* Legacy balloon config space is LE, unlike all other devices. */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
+ num_pages = le32_to_cpu((__force __le32)num_pages);
+
+ target = num_pages;
return target - vb->num_pages;
}
static void update_balloon_size(struct virtio_balloon *vb)
{
- __le32 actual = cpu_to_le32(vb->num_pages);
+ u32 actual = vb->num_pages;
+
+ /* Legacy balloon config space is LE, unlike all other devices. */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
+ actual = (__force u32)cpu_to_le32(actual);
virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
&actual);
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
new file mode 100644
index 000000000000..60e2a1677563
--- /dev/null
+++ b/drivers/virtio/virtio_input.c
@@ -0,0 +1,384 @@
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/input.h>
+
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_input.h>
+
+struct virtio_input {
+ struct virtio_device *vdev;
+ struct input_dev *idev;
+ char name[64];
+ char serial[64];
+ char phys[64];
+ struct virtqueue *evt, *sts;
+ struct virtio_input_event evts[64];
+ spinlock_t lock;
+ bool ready;
+};
+
+static void virtinput_queue_evtbuf(struct virtio_input *vi,
+ struct virtio_input_event *evtbuf)
+{
+ struct scatterlist sg[1];
+
+ sg_init_one(sg, evtbuf, sizeof(*evtbuf));
+ virtqueue_add_inbuf(vi->evt, sg, 1, evtbuf, GFP_ATOMIC);
+}
+
+static void virtinput_recv_events(struct virtqueue *vq)
+{
+ struct virtio_input *vi = vq->vdev->priv;
+ struct virtio_input_event *event;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ if (vi->ready) {
+ while ((event = virtqueue_get_buf(vi->evt, &len)) != NULL) {
+ spin_unlock_irqrestore(&vi->lock, flags);
+ input_event(vi->idev,
+ le16_to_cpu(event->type),
+ le16_to_cpu(event->code),
+ le32_to_cpu(event->value));
+ spin_lock_irqsave(&vi->lock, flags);
+ virtinput_queue_evtbuf(vi, event);
+ }
+ virtqueue_kick(vq);
+ }
+ spin_unlock_irqrestore(&vi->lock, flags);
+}
+
+/*
+ * On error we are losing the status update, which isn't critical as
+ * this is typically used for stuff like keyboard leds.
+ */
+static int virtinput_send_status(struct virtio_input *vi,
+ u16 type, u16 code, s32 value)
+{
+ struct virtio_input_event *stsbuf;
+ struct scatterlist sg[1];
+ unsigned long flags;
+ int rc;
+
+ stsbuf = kzalloc(sizeof(*stsbuf), GFP_ATOMIC);
+ if (!stsbuf)
+ return -ENOMEM;
+
+ stsbuf->type = cpu_to_le16(type);
+ stsbuf->code = cpu_to_le16(code);
+ stsbuf->value = cpu_to_le32(value);
+ sg_init_one(sg, stsbuf, sizeof(*stsbuf));
+
+ spin_lock_irqsave(&vi->lock, flags);
+ if (vi->ready) {
+ rc = virtqueue_add_outbuf(vi->sts, sg, 1, stsbuf, GFP_ATOMIC);
+ virtqueue_kick(vi->sts);
+ } else {
+ rc = -ENODEV;
+ }
+ spin_unlock_irqrestore(&vi->lock, flags);
+
+ if (rc != 0)
+ kfree(stsbuf);
+ return rc;
+}
+
+static void virtinput_recv_status(struct virtqueue *vq)
+{
+ struct virtio_input *vi = vq->vdev->priv;
+ struct virtio_input_event *stsbuf;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ while ((stsbuf = virtqueue_get_buf(vi->sts, &len)) != NULL)
+ kfree(stsbuf);
+ spin_unlock_irqrestore(&vi->lock, flags);
+}
+
+static int virtinput_status(struct input_dev *idev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct virtio_input *vi = input_get_drvdata(idev);
+
+ return virtinput_send_status(vi, type, code, value);
+}
+
+static u8 virtinput_cfg_select(struct virtio_input *vi,
+ u8 select, u8 subsel)
+{
+ u8 size;
+
+ virtio_cwrite(vi->vdev, struct virtio_input_config, select, &select);
+ virtio_cwrite(vi->vdev, struct virtio_input_config, subsel, &subsel);
+ virtio_cread(vi->vdev, struct virtio_input_config, size, &size);
+ return size;
+}
+
+static void virtinput_cfg_bits(struct virtio_input *vi, int select, int subsel,
+ unsigned long *bits, unsigned int bitcount)
+{
+ unsigned int bit;
+ u8 *virtio_bits;
+ u8 bytes;
+
+ bytes = virtinput_cfg_select(vi, select, subsel);
+ if (!bytes)
+ return;
+ if (bitcount > bytes * 8)
+ bitcount = bytes * 8;
+
+ /*
+ * Bitmap in virtio config space is a simple stream of bytes,
+ * with the first byte carrying bits 0-7, second bits 8-15 and
+ * so on.
+ */
+ virtio_bits = kzalloc(bytes, GFP_KERNEL);
+ if (!virtio_bits)
+ return;
+ virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config,
+ u.bitmap),
+ virtio_bits, bytes);
+ for (bit = 0; bit < bitcount; bit++) {
+ if (virtio_bits[bit / 8] & (1 << (bit % 8)))
+ __set_bit(bit, bits);
+ }
+ kfree(virtio_bits);
+
+ if (select == VIRTIO_INPUT_CFG_EV_BITS)
+ __set_bit(subsel, vi->idev->evbit);
+}
+
+static void virtinput_cfg_abs(struct virtio_input *vi, int abs)
+{
+ u32 mi, ma, re, fu, fl;
+
+ virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
+ input_set_abs_params(vi->idev, abs, mi, ma, fu, fl);
+ input_abs_set_res(vi->idev, abs, re);
+}
+
+static int virtinput_init_vqs(struct virtio_input *vi)
+{
+ struct virtqueue *vqs[2];
+ vq_callback_t *cbs[] = { virtinput_recv_events,
+ virtinput_recv_status };
+ static const char *names[] = { "events", "status" };
+ int err;
+
+ err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names);
+ if (err)
+ return err;
+ vi->evt = vqs[0];
+ vi->sts = vqs[1];
+
+ return 0;
+}
+
+static void virtinput_fill_evt(struct virtio_input *vi)
+{
+ unsigned long flags;
+ int i, size;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ size = virtqueue_get_vring_size(vi->evt);
+ if (size > ARRAY_SIZE(vi->evts))
+ size = ARRAY_SIZE(vi->evts);
+ for (i = 0; i < size; i++)
+ virtinput_queue_evtbuf(vi, &vi->evts[i]);
+ virtqueue_kick(vi->evt);
+ spin_unlock_irqrestore(&vi->lock, flags);
+}
+
+static int virtinput_probe(struct virtio_device *vdev)
+{
+ struct virtio_input *vi;
+ unsigned long flags;
+ size_t size;
+ int abs, err;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ return -ENODEV;
+
+ vi = kzalloc(sizeof(*vi), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
+
+ vdev->priv = vi;
+ vi->vdev = vdev;
+ spin_lock_init(&vi->lock);
+
+ err = virtinput_init_vqs(vi);
+ if (err)
+ goto err_init_vq;
+
+ vi->idev = input_allocate_device();
+ if (!vi->idev) {
+ err = -ENOMEM;
+ goto err_input_alloc;
+ }
+ input_set_drvdata(vi->idev, vi);
+
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_NAME, 0);
+ virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config,
+ u.string),
+ vi->name, min(size, sizeof(vi->name)));
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_SERIAL, 0);
+ virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config,
+ u.string),
+ vi->serial, min(size, sizeof(vi->serial)));
+ snprintf(vi->phys, sizeof(vi->phys),
+ "virtio%d/input0", vdev->index);
+ vi->idev->name = vi->name;
+ vi->idev->phys = vi->phys;
+ vi->idev->uniq = vi->serial;
+
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0);
+ if (size >= sizeof(struct virtio_input_devids)) {
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.bustype, &vi->idev->id.bustype);
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.vendor, &vi->idev->id.vendor);
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.product, &vi->idev->id.product);
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.version, &vi->idev->id.version);
+ } else {
+ vi->idev->id.bustype = BUS_VIRTUAL;
+ }
+
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_PROP_BITS, 0,
+ vi->idev->propbit, INPUT_PROP_CNT);
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_REP);
+ if (size)
+ __set_bit(EV_REP, vi->idev->evbit);
+
+ vi->idev->dev.parent = &vdev->dev;
+ vi->idev->event = virtinput_status;
+
+ /* device -> kernel */
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_KEY,
+ vi->idev->keybit, KEY_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_REL,
+ vi->idev->relbit, REL_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_ABS,
+ vi->idev->absbit, ABS_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_MSC,
+ vi->idev->mscbit, MSC_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_SW,
+ vi->idev->swbit, SW_CNT);
+
+ /* kernel -> device */
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_LED,
+ vi->idev->ledbit, LED_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_SND,
+ vi->idev->sndbit, SND_CNT);
+
+ if (test_bit(EV_ABS, vi->idev->evbit)) {
+ for (abs = 0; abs < ABS_CNT; abs++) {
+ if (!test_bit(abs, vi->idev->absbit))
+ continue;
+ virtinput_cfg_abs(vi, abs);
+ }
+ }
+
+ virtio_device_ready(vdev);
+ vi->ready = true;
+ err = input_register_device(vi->idev);
+ if (err)
+ goto err_input_register;
+
+ virtinput_fill_evt(vi);
+ return 0;
+
+err_input_register:
+ spin_lock_irqsave(&vi->lock, flags);
+ vi->ready = false;
+ spin_unlock_irqrestore(&vi->lock, flags);
+ input_free_device(vi->idev);
+err_input_alloc:
+ vdev->config->del_vqs(vdev);
+err_init_vq:
+ kfree(vi);
+ return err;
+}
+
+static void virtinput_remove(struct virtio_device *vdev)
+{
+ struct virtio_input *vi = vdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ vi->ready = false;
+ spin_unlock_irqrestore(&vi->lock, flags);
+
+ input_unregister_device(vi->idev);
+ vdev->config->del_vqs(vdev);
+ kfree(vi);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtinput_freeze(struct virtio_device *vdev)
+{
+ struct virtio_input *vi = vdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ vi->ready = false;
+ spin_unlock_irqrestore(&vi->lock, flags);
+
+ vdev->config->del_vqs(vdev);
+ return 0;
+}
+
+static int virtinput_restore(struct virtio_device *vdev)
+{
+ struct virtio_input *vi = vdev->priv;
+ int err;
+
+ err = virtinput_init_vqs(vi);
+ if (err)
+ return err;
+
+ virtio_device_ready(vdev);
+ vi->ready = true;
+ virtinput_fill_evt(vi);
+ return 0;
+}
+#endif
+
+static unsigned int features[] = {
+ /* none */
+};
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_INPUT, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_input_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = virtinput_probe,
+ .remove = virtinput_remove,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtinput_freeze,
+ .restore = virtinput_restore,
+#endif
+};
+
+module_virtio_driver(virtio_input_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Virtio input device driver");
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 6010d7ec0a0f..7a5e60dea6c5 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -581,14 +581,6 @@ static int virtio_mmio_probe(struct platform_device *pdev)
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
- /* Reject legacy-only IDs for version 2 devices */
- if (vm_dev->version == 2 &&
- virtio_device_is_legacy_only(vm_dev->vdev.id)) {
- dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n",
- vm_dev->vdev.id.device);
- return -ENODEV;
- }
-
if (vm_dev->version == 1)
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 2aa38e59db2e..e88e0997a889 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -20,6 +20,50 @@
#define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci_common.h"
+/*
+ * Type-safe wrappers for io accesses.
+ * Use these to enforce at compile time the following spec requirement:
+ *
+ * The driver MUST access each field using the “natural†access
+ * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
+ * for 16-bit fields and 8-bit accesses for 8-bit fields.
+ */
+static inline u8 vp_ioread8(u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+static inline u16 vp_ioread16 (u16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 vp_ioread32(u32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void vp_iowrite16(u16 value, u16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void vp_iowrite32(u32 value, u32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static void vp_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo, __le32 __iomem *hi)
+{
+ vp_iowrite32((u32)val, lo);
+ vp_iowrite32(val >> 32, hi);
+}
+
static void __iomem *map_capability(struct pci_dev *dev, int off,
size_t minlen,
u32 align,
@@ -94,22 +138,16 @@ static void __iomem *map_capability(struct pci_dev *dev, int off,
return p;
}
-static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi)
-{
- iowrite32((u32)val, lo);
- iowrite32(val >> 32, hi);
-}
-
/* virtio config->get_features() implementation */
static u64 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u64 features;
- iowrite32(0, &vp_dev->common->device_feature_select);
- features = ioread32(&vp_dev->common->device_feature);
- iowrite32(1, &vp_dev->common->device_feature_select);
- features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32);
+ vp_iowrite32(0, &vp_dev->common->device_feature_select);
+ features = vp_ioread32(&vp_dev->common->device_feature);
+ vp_iowrite32(1, &vp_dev->common->device_feature_select);
+ features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
return features;
}
@@ -128,10 +166,10 @@ static int vp_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- iowrite32(0, &vp_dev->common->guest_feature_select);
- iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
- iowrite32(1, &vp_dev->common->guest_feature_select);
- iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
+ vp_iowrite32(0, &vp_dev->common->guest_feature_select);
+ vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
+ vp_iowrite32(1, &vp_dev->common->guest_feature_select);
+ vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
return 0;
}
@@ -210,14 +248,14 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
static u32 vp_generation(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return ioread8(&vp_dev->common->config_generation);
+ return vp_ioread8(&vp_dev->common->config_generation);
}
/* config->{get,set}_status() implementations */
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return ioread8(&vp_dev->common->device_status);
+ return vp_ioread8(&vp_dev->common->device_status);
}
static void vp_set_status(struct virtio_device *vdev, u8 status)
@@ -225,17 +263,17 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* We should never be setting status to 0. */
BUG_ON(status == 0);
- iowrite8(status, &vp_dev->common->device_status);
+ vp_iowrite8(status, &vp_dev->common->device_status);
}
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
- iowrite8(0, &vp_dev->common->device_status);
+ vp_iowrite8(0, &vp_dev->common->device_status);
/* Flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any. */
- ioread8(&vp_dev->common->device_status);
+ vp_ioread8(&vp_dev->common->device_status);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -243,10 +281,10 @@ static void vp_reset(struct virtio_device *vdev)
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
/* Setup the vector used for configuration events */
- iowrite16(vector, &vp_dev->common->msix_config);
+ vp_iowrite16(vector, &vp_dev->common->msix_config);
/* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */
- return ioread16(&vp_dev->common->msix_config);
+ return vp_ioread16(&vp_dev->common->msix_config);
}
static size_t vring_pci_size(u16 num)
@@ -286,15 +324,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
u16 num, off;
int err;
- if (index >= ioread16(&cfg->num_queues))
+ if (index >= vp_ioread16(&cfg->num_queues))
return ERR_PTR(-ENOENT);
/* Select the queue we're interested in */
- iowrite16(index, &cfg->queue_select);
+ vp_iowrite16(index, &cfg->queue_select);
/* Check if queue is either not available or already active. */
- num = ioread16(&cfg->queue_size);
- if (!num || ioread16(&cfg->queue_enable))
+ num = vp_ioread16(&cfg->queue_size);
+ if (!num || vp_ioread16(&cfg->queue_enable))
return ERR_PTR(-ENOENT);
if (num & (num - 1)) {
@@ -303,7 +341,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* get offset of notification word for this vq */
- off = ioread16(&cfg->queue_notify_off);
+ off = vp_ioread16(&cfg->queue_notify_off);
info->num = num;
info->msix_vector = msix_vec;
@@ -322,13 +360,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* activate the queue */
- iowrite16(num, &cfg->queue_size);
- iowrite64_twopart(virt_to_phys(info->queue),
- &cfg->queue_desc_lo, &cfg->queue_desc_hi);
- iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
- &cfg->queue_avail_lo, &cfg->queue_avail_hi);
- iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
- &cfg->queue_used_lo, &cfg->queue_used_hi);
+ vp_iowrite16(num, &cfg->queue_size);
+ vp_iowrite64_twopart(virt_to_phys(info->queue),
+ &cfg->queue_desc_lo, &cfg->queue_desc_hi);
+ vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
+ &cfg->queue_avail_lo, &cfg->queue_avail_hi);
+ vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
+ &cfg->queue_used_lo, &cfg->queue_used_hi);
if (vp_dev->notify_base) {
/* offset should not wrap */
@@ -357,8 +395,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- iowrite16(msix_vec, &cfg->queue_msix_vector);
- msix_vec = ioread16(&cfg->queue_msix_vector);
+ vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
+ msix_vec = vp_ioread16(&cfg->queue_msix_vector);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto err_assign_vector;
@@ -393,8 +431,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* this, there's no way to go back except reset.
*/
list_for_each_entry(vq, &vdev->vqs, list) {
- iowrite16(vq->index, &vp_dev->common->queue_select);
- iowrite16(1, &vp_dev->common->queue_enable);
+ vp_iowrite16(vq->index, &vp_dev->common->queue_select);
+ vp_iowrite16(1, &vp_dev->common->queue_enable);
}
return 0;
@@ -405,13 +443,13 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- iowrite16(vq->index, &vp_dev->common->queue_select);
+ vp_iowrite16(vq->index, &vp_dev->common->queue_select);
if (vp_dev->msix_enabled) {
- iowrite16(VIRTIO_MSI_NO_VECTOR,
- &vp_dev->common->queue_msix_vector);
+ vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
+ &vp_dev->common->queue_msix_vector);
/* Flush the write out to device */
- ioread16(&vp_dev->common->queue_msix_vector);
+ vp_ioread16(&vp_dev->common->queue_msix_vector);
}
if (!vp_dev->notify_base)
@@ -577,9 +615,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
}
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
- if (virtio_device_is_legacy_only(vp_dev->vdev.id))
- return -ENODEV;
-
/* check for a common config: if not, use legacy mode (bar 0). */
common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
IORESOURCE_IO | IORESOURCE_MEM);
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 53bf2c860ad3..a4621757a47f 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -166,7 +166,7 @@ static int mxc_w1_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id mxc_w1_dt_ids[] = {
+static const struct of_device_id mxc_w1_dt_ids[] = {
{ .compatible = "fsl,imx21-owire" },
{ /* sentinel */ }
};
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 03321d6a2684..e7d448963a24 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -72,7 +72,7 @@ struct hdq_data {
static int omap_hdq_probe(struct platform_device *pdev);
static int omap_hdq_remove(struct platform_device *pdev);
-static struct of_device_id omap_hdq_dt_ids[] = {
+static const struct of_device_id omap_hdq_dt_ids[] = {
{ .compatible = "ti,omap3-1w" },
{}
};
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index b99a932ad901..8f7848c62811 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -68,7 +68,7 @@ static u8 w1_gpio_read_bit(void *data)
}
#if defined(CONFIG_OF)
-static struct of_device_id w1_gpio_dt_ids[] = {
+static const struct of_device_id w1_gpio_dt_ids[] = {
{ .compatible = "w1-gpio" },
{}
};
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 16f202350997..e5e7c5505de7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -154,7 +154,7 @@ config ARM_SP805_WATCHDOG
config AT91RM9200_WATCHDOG
tristate "AT91RM9200 watchdog"
- depends on SOC_AT91RM9200
+ depends on SOC_AT91RM9200 && MFD_SYSCON
help
Watchdog timer embedded into AT91RM9200 chips. This will reboot your
system when the timeout is reached.
@@ -169,7 +169,6 @@ config AT91SAM9X_WATCHDOG
config CADENCE_WATCHDOG
tristate "Cadence Watchdog Timer"
- depends on ARM
select WATCHDOG_CORE
help
Say Y here if you want to include support for the watchdog
@@ -1190,6 +1189,7 @@ config OCTEON_WDT
tristate "Cavium OCTEON SOC family Watchdog Timer"
depends on CAVIUM_OCTEON_SOC
default y
+ select WATCHDOG_CORE
select EXPORT_UASM if OCTEON_WDT = m
help
Hardware driver for OCTEON's on chip watchdog timer.
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index d244112d5b6f..41cecb55766c 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -12,27 +12,32 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/atmel-st.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <mach/at91_st.h>
#define WDT_DEFAULT_TIME 5 /* seconds */
#define WDT_MAX_TIME 256 /* seconds */
static int wdt_time = WDT_DEFAULT_TIME;
static bool nowayout = WATCHDOG_NOWAYOUT;
+static struct regmap *regmap_st;
module_param(wdt_time, int, 0);
MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="
@@ -50,12 +55,33 @@ static unsigned long at91wdt_busy;
/* ......................................................................... */
+static int at91rm9200_restart(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ /*
+ * Perform a hardware reset with the use of the Watchdog timer.
+ */
+ regmap_write(regmap_st, AT91_ST_WDMR,
+ AT91_ST_RSTEN | AT91_ST_EXTEN | 1);
+ regmap_write(regmap_st, AT91_ST_CR, AT91_ST_WDRST);
+
+ mdelay(2000);
+
+ pr_emerg("Unable to restart system\n");
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block at91rm9200_restart_nb = {
+ .notifier_call = at91rm9200_restart,
+ .priority = 192,
+};
+
/*
* Disable the watchdog.
*/
static inline void at91_wdt_stop(void)
{
- at91_st_write(AT91_ST_WDMR, AT91_ST_EXTEN);
+ regmap_write(regmap_st, AT91_ST_WDMR, AT91_ST_EXTEN);
}
/*
@@ -63,9 +89,9 @@ static inline void at91_wdt_stop(void)
*/
static inline void at91_wdt_start(void)
{
- at91_st_write(AT91_ST_WDMR, AT91_ST_EXTEN | AT91_ST_RSTEN |
+ regmap_write(regmap_st, AT91_ST_WDMR, AT91_ST_EXTEN | AT91_ST_RSTEN |
(((65536 * wdt_time) >> 8) & AT91_ST_WDV));
- at91_st_write(AT91_ST_CR, AT91_ST_WDRST);
+ regmap_write(regmap_st, AT91_ST_CR, AT91_ST_WDRST);
}
/*
@@ -73,7 +99,7 @@ static inline void at91_wdt_start(void)
*/
static inline void at91_wdt_reload(void)
{
- at91_st_write(AT91_ST_CR, AT91_ST_WDRST);
+ regmap_write(regmap_st, AT91_ST_CR, AT91_ST_WDRST);
}
/* ......................................................................... */
@@ -203,16 +229,32 @@ static struct miscdevice at91wdt_miscdev = {
static int at91wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device *parent;
int res;
if (at91wdt_miscdev.parent)
return -EBUSY;
at91wdt_miscdev.parent = &pdev->dev;
+ parent = dev->parent;
+ if (!parent) {
+ dev_err(dev, "no parent\n");
+ return -ENODEV;
+ }
+
+ regmap_st = syscon_node_to_regmap(parent->of_node);
+ if (!regmap_st)
+ return -ENODEV;
+
res = misc_register(&at91wdt_miscdev);
if (res)
return res;
+ res = register_restart_handler(&at91rm9200_restart_nb);
+ if (res)
+ dev_warn(dev, "failed to register restart handler\n");
+
pr_info("AT91 Watchdog Timer enabled (%d seconds%s)\n",
wdt_time, nowayout ? ", nowayout" : "");
return 0;
@@ -220,8 +262,13 @@ static int at91wdt_probe(struct platform_device *pdev)
static int at91wdt_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int res;
+ res = unregister_restart_handler(&at91rm9200_restart_nb);
+ if (res)
+ dev_warn(dev, "failed to unregister restart handler\n");
+
res = misc_deregister(&at91wdt_miscdev);
if (!res)
at91wdt_miscdev.parent = NULL;
@@ -267,7 +314,7 @@ static struct platform_driver at91wdt_driver = {
.suspend = at91wdt_suspend,
.resume = at91wdt_resume,
.driver = {
- .name = "at91_wdt",
+ .name = "atmel_st_watchdog",
.of_match_table = at91_wdt_dt_ids,
},
};
@@ -296,4 +343,4 @@ module_exit(at91_wdt_exit);
MODULE_AUTHOR("Andrew Victor");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT91RM9200");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:at91_wdt");
+MODULE_ALIAS("platform:atmel_st_watchdog");
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 4e37db3539a4..22d8ae65772a 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -99,12 +99,14 @@ static int secure_register_read(struct bcm_kona_wdt *wdt, uint32_t offset)
static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
{
- int ctl_val, cur_val, ret;
+ int ctl_val, cur_val;
unsigned long flags;
struct bcm_kona_wdt *wdt = s->private;
- if (!wdt)
- return seq_puts(s, "No device pointer\n");
+ if (!wdt) {
+ seq_puts(s, "No device pointer\n");
+ return 0;
+ }
spin_lock_irqsave(&wdt->lock, flags);
ctl_val = secure_register_read(wdt, SECWDOG_CTRL_REG);
@@ -112,7 +114,7 @@ static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
spin_unlock_irqrestore(&wdt->lock, flags);
if (ctl_val < 0 || cur_val < 0) {
- ret = seq_puts(s, "Error accessing hardware\n");
+ seq_puts(s, "Error accessing hardware\n");
} else {
int ctl, cur, ctl_sec, cur_sec, res;
@@ -121,15 +123,18 @@ static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
cur = cur_val & SECWDOG_COUNT_MASK;
ctl_sec = TICKS_TO_SECS(ctl, wdt);
cur_sec = TICKS_TO_SECS(cur, wdt);
- ret = seq_printf(s, "Resolution: %d / %d\n"
- "Control: %d s / %d (%#x) ticks\n"
- "Current: %d s / %d (%#x) ticks\n"
- "Busy count: %lu\n", res,
- wdt->resolution, ctl_sec, ctl, ctl, cur_sec,
- cur, cur, wdt->busy_count);
+ seq_printf(s,
+ "Resolution: %d / %d\n"
+ "Control: %d s / %d (%#x) ticks\n"
+ "Current: %d s / %d (%#x) ticks\n"
+ "Busy count: %lu\n",
+ res, wdt->resolution,
+ ctl_sec, ctl, ctl,
+ cur_sec, cur, cur,
+ wdt->busy_count);
}
- return ret;
+ return 0;
}
static int bcm_kona_dbg_open(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index 429494b6c822..a9a5210143ae 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -125,9 +125,7 @@ static int wdt_start(struct watchdog_device *dev)
ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
WARN_ON(ret != 0);
kfree(ebc_cmd);
- }
-
- if (MACHINE_IS_LPAR) {
+ } else {
ret = __diag288_lpar(WDT_FUNC_INIT,
dev->timeout, LPARWDT_RESTART);
}
@@ -136,7 +134,6 @@ static int wdt_start(struct watchdog_device *dev)
pr_err("The watchdog cannot be activated\n");
return ret;
}
- pr_info("The watchdog was activated\n");
return 0;
}
@@ -145,7 +142,6 @@ static int wdt_stop(struct watchdog_device *dev)
int ret;
ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
- pr_info("The watchdog was deactivated\n");
return ret;
}
@@ -177,10 +173,9 @@ static int wdt_ping(struct watchdog_device *dev)
ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
WARN_ON(ret != 0);
kfree(ebc_cmd);
- }
-
- if (MACHINE_IS_LPAR)
+ } else {
ret = __diag288_lpar(WDT_FUNC_CHANGE, dev->timeout, 0);
+ }
if (ret)
pr_err("The watchdog timer cannot be started or reset\n");
@@ -202,7 +197,7 @@ static struct watchdog_ops wdt_ops = {
};
static struct watchdog_info wdt_info = {
- .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.firmware_version = 0,
.identity = "z Watchdog",
};
@@ -273,21 +268,16 @@ static int __init diag288_init(void)
watchdog_set_nowayout(&wdt_dev, nowayout_info);
if (MACHINE_IS_VM) {
- pr_info("The watchdog device driver detected a z/VM environment\n");
if (__diag288_vm(WDT_FUNC_INIT, 15,
ebc_begin, sizeof(ebc_begin)) != 0) {
pr_err("The watchdog cannot be initialized\n");
return -EINVAL;
}
- } else if (MACHINE_IS_LPAR) {
- pr_info("The watchdog device driver detected an LPAR environment\n");
+ } else {
if (__diag288_lpar(WDT_FUNC_INIT, 30, LPARWDT_RESTART)) {
pr_err("The watchdog cannot be initialized\n");
return -EINVAL;
}
- } else {
- pr_err("Linux runs in an environment that does not support the diag288 watchdog\n");
- return -ENODEV;
}
if (__diag288_lpar(WDT_FUNC_CANCEL, 0, 0)) {
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 05ee0bf88ce9..3c3fd417ddeb 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -51,6 +51,7 @@
#define DRV_VERSION "1.11"
/* Includes */
+#include <linux/acpi.h> /* For ACPI support */
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
@@ -103,6 +104,8 @@ static struct { /* this is private data for the iTCO_wdt device */
struct platform_device *dev;
/* the PCI-device */
struct pci_dev *pdev;
+ /* whether or not the watchdog has been suspended */
+ bool suspended;
} iTCO_wdt_private;
/* module parameters */
@@ -571,12 +574,60 @@ static void iTCO_wdt_shutdown(struct platform_device *dev)
iTCO_wdt_stop(NULL);
}
+#ifdef CONFIG_PM_SLEEP
+/*
+ * Suspend-to-idle requires this, because it stops the ticks and timekeeping, so
+ * the watchdog cannot be pinged while in that state. In ACPI sleep states the
+ * watchdog is stopped by the platform firmware.
+ */
+
+#ifdef CONFIG_ACPI
+static inline bool need_suspend(void)
+{
+ return acpi_target_system_state() == ACPI_STATE_S0;
+}
+#else
+static inline bool need_suspend(void) { return true; }
+#endif
+
+static int iTCO_wdt_suspend_noirq(struct device *dev)
+{
+ int ret = 0;
+
+ iTCO_wdt_private.suspended = false;
+ if (watchdog_active(&iTCO_wdt_watchdog_dev) && need_suspend()) {
+ ret = iTCO_wdt_stop(&iTCO_wdt_watchdog_dev);
+ if (!ret)
+ iTCO_wdt_private.suspended = true;
+ }
+ return ret;
+}
+
+static int iTCO_wdt_resume_noirq(struct device *dev)
+{
+ if (iTCO_wdt_private.suspended)
+ iTCO_wdt_start(&iTCO_wdt_watchdog_dev);
+
+ return 0;
+}
+
+static struct dev_pm_ops iTCO_wdt_pm = {
+ .suspend_noirq = iTCO_wdt_suspend_noirq,
+ .resume_noirq = iTCO_wdt_resume_noirq,
+};
+
+#define ITCO_WDT_PM_OPS (&iTCO_wdt_pm)
+#else
+#define ITCO_WDT_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver iTCO_wdt_driver = {
.probe = iTCO_wdt_probe,
.remove = iTCO_wdt_remove,
.shutdown = iTCO_wdt_shutdown,
.driver = {
.name = DRV_NAME,
+ .pm = ITCO_WDT_PM_OPS,
},
};
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 8453531545df..14521c8b3d5a 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -3,6 +3,8 @@
*
* Copyright (C) 2007, 2008, 2009, 2010 Cavium Networks
*
+ * Converted to use WATCHDOG_CORE by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
* Some parts derived from wdt.c
*
* (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
@@ -103,13 +105,10 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static unsigned long octeon_wdt_is_open;
-static char expect_close;
-
-static u32 __initdata nmi_stage1_insns[64];
+static u32 nmi_stage1_insns[64] __initdata;
/* We need one branch and therefore one relocation per target label. */
-static struct uasm_label __initdata labels[5];
-static struct uasm_reloc __initdata relocs[5];
+static struct uasm_label labels[5] __initdata;
+static struct uasm_reloc relocs[5] __initdata;
enum lable_id {
label_enter_bootloader = 1
@@ -218,7 +217,8 @@ static void __init octeon_wdt_build_stage1(void)
pr_debug("\t.set pop\n");
if (len > 32)
- panic("NMI stage 1 handler exceeds 32 instructions, was %d\n", len);
+ panic("NMI stage 1 handler exceeds 32 instructions, was %d\n",
+ len);
}
static int cpu2core(int cpu)
@@ -294,6 +294,7 @@ static void octeon_wdt_write_hex(u64 value, int digits)
{
int d;
int v;
+
for (d = 0; d < digits; d++) {
v = (value >> ((digits - d - 1) * 4)) & 0xf;
if (v >= 10)
@@ -303,7 +304,7 @@ static void octeon_wdt_write_hex(u64 value, int digits)
}
}
-const char *reg_name[] = {
+static const char reg_name[][3] = {
"$0", "at", "v0", "v1", "a0", "a1", "a2", "a3",
"a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@@ -444,7 +445,7 @@ static int octeon_wdt_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static void octeon_wdt_ping(void)
+static int octeon_wdt_ping(struct watchdog_device __always_unused *wdog)
{
int cpu;
int coreid;
@@ -457,10 +458,12 @@ static void octeon_wdt_ping(void)
!cpumask_test_cpu(cpu, &irq_enabled_cpus)) {
/* We have to enable the irq */
int irq = OCTEON_IRQ_WDOG0 + coreid;
+
enable_irq(irq);
cpumask_set_cpu(cpu, &irq_enabled_cpus);
}
}
+ return 0;
}
static void octeon_wdt_calc_parameters(int t)
@@ -489,7 +492,8 @@ static void octeon_wdt_calc_parameters(int t)
timeout_cnt = ((octeon_get_io_clock_rate() >> 8) * timeout_sec) >> 8;
}
-static int octeon_wdt_set_heartbeat(int t)
+static int octeon_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int t)
{
int cpu;
int coreid;
@@ -509,158 +513,45 @@ static int octeon_wdt_set_heartbeat(int t)
cvmx_write_csr(CVMX_CIU_WDOGX(coreid), ciu_wdog.u64);
cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1);
}
- octeon_wdt_ping(); /* Get the irqs back on. */
+ octeon_wdt_ping(wdog); /* Get the irqs back on. */
return 0;
}
-/**
- * octeon_wdt_write:
- * @file: file handle to the watchdog
- * @buf: buffer to write (unused as data does not matter here
- * @count: count of bytes
- * @ppos: pointer to the position to write. No seeks allowed
- *
- * A write to a watchdog device is defined as a keepalive signal. Any
- * write of data will do, as we we don't define content meaning.
- */
-
-static ssize_t octeon_wdt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- if (count) {
- if (!nowayout) {
- size_t i;
-
- /* In case it was set long ago */
- expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 1;
- }
- }
- octeon_wdt_ping();
- }
- return count;
-}
-
-/**
- * octeon_wdt_ioctl:
- * @file: file handle to the device
- * @cmd: watchdog command
- * @arg: argument pointer
- *
- * The watchdog API defines a common set of functions for all
- * watchdogs according to their available features. We only
- * actually usefully support querying capabilities and setting
- * the timeout.
- */
-
-static long octeon_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_heartbeat;
-
- static struct watchdog_info ident = {
- .options = WDIOF_SETTIMEOUT|
- WDIOF_MAGICCLOSE|
- WDIOF_KEEPALIVEPING,
- .firmware_version = 1,
- .identity = "OCTEON",
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
- case WDIOC_KEEPALIVE:
- octeon_wdt_ping();
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_heartbeat, p))
- return -EFAULT;
- if (octeon_wdt_set_heartbeat(new_heartbeat))
- return -EINVAL;
- /* Fall through. */
- case WDIOC_GETTIMEOUT:
- return put_user(heartbeat, p);
- default:
- return -ENOTTY;
- }
-}
-
-/**
- * octeon_wdt_open:
- * @inode: inode of device
- * @file: file handle to device
- *
- * The watchdog device has been opened. The watchdog device is single
- * open and on opening we do a ping to reset the counters.
- */
-
-static int octeon_wdt_open(struct inode *inode, struct file *file)
+static int octeon_wdt_start(struct watchdog_device *wdog)
{
- if (test_and_set_bit(0, &octeon_wdt_is_open))
- return -EBUSY;
- /*
- * Activate
- */
- octeon_wdt_ping();
+ octeon_wdt_ping(wdog);
do_coundown = 1;
- return nonseekable_open(inode, file);
+ return 0;
}
-/**
- * octeon_wdt_release:
- * @inode: inode to board
- * @file: file handle to board
- *
- * The watchdog has a configurable API. There is a religious dispute
- * between people who want their watchdog to be able to shut down and
- * those who want to be sure if the watchdog manager dies the machine
- * reboots. In the former case we disable the counters, in the latter
- * case you have to open it again very soon.
- */
-
-static int octeon_wdt_release(struct inode *inode, struct file *file)
+static int octeon_wdt_stop(struct watchdog_device *wdog)
{
- if (expect_close) {
- do_coundown = 0;
- octeon_wdt_ping();
- } else {
- pr_crit("WDT device closed unexpectedly. WDT will not stop!\n");
- }
- clear_bit(0, &octeon_wdt_is_open);
- expect_close = 0;
+ do_coundown = 0;
+ octeon_wdt_ping(wdog);
return 0;
}
-static const struct file_operations octeon_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = octeon_wdt_write,
- .unlocked_ioctl = octeon_wdt_ioctl,
- .open = octeon_wdt_open,
- .release = octeon_wdt_release,
+static struct notifier_block octeon_wdt_cpu_notifier = {
+ .notifier_call = octeon_wdt_cpu_callback,
};
-static struct miscdevice octeon_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &octeon_wdt_fops,
+static const struct watchdog_info octeon_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = "OCTEON",
};
-static struct notifier_block octeon_wdt_cpu_notifier = {
- .notifier_call = octeon_wdt_cpu_callback,
+static const struct watchdog_ops octeon_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = octeon_wdt_start,
+ .stop = octeon_wdt_stop,
+ .ping = octeon_wdt_ping,
+ .set_timeout = octeon_wdt_set_timeout,
};
+static struct watchdog_device octeon_wdt = {
+ .info = &octeon_wdt_info,
+ .ops = &octeon_wdt_ops,
+};
/**
* Module/ driver initialization.
@@ -685,7 +576,8 @@ static int __init octeon_wdt_init(void)
max_timeout_sec = 6;
do {
max_timeout_sec--;
- timeout_cnt = ((octeon_get_io_clock_rate() >> 8) * max_timeout_sec) >> 8;
+ timeout_cnt = ((octeon_get_io_clock_rate() >> 8) *
+ max_timeout_sec) >> 8;
} while (timeout_cnt > 65535);
BUG_ON(timeout_cnt == 0);
@@ -694,11 +586,15 @@ static int __init octeon_wdt_init(void)
pr_info("Initial granularity %d Sec\n", timeout_sec);
- ret = misc_register(&octeon_wdt_miscdev);
+ octeon_wdt.timeout = timeout_sec;
+ octeon_wdt.max_timeout = UINT_MAX;
+
+ watchdog_set_nowayout(&octeon_wdt, nowayout);
+
+ ret = watchdog_register_device(&octeon_wdt);
if (ret) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
- goto out;
+ pr_err("watchdog_register_device() failed: %d\n", ret);
+ return ret;
}
/* Build the NMI handler ... */
@@ -721,8 +617,7 @@ static int __init octeon_wdt_init(void)
__register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
cpu_notifier_register_done();
-out:
- return ret;
+ return 0;
}
/**
@@ -732,7 +627,7 @@ static void __exit octeon_wdt_cleanup(void)
{
int cpu;
- misc_deregister(&octeon_wdt_miscdev);
+ watchdog_unregister_device(&octeon_wdt);
cpu_notifier_register_begin();
__unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 55e220150103..b9c6049c3e78 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -216,7 +216,7 @@ static struct platform_driver platform_wdt_driver = {
module_platform_driver(platform_wdt_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("PNX4008 Watchdog Driver");
module_param(heartbeat, uint, 0);
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index aa85618c4d03..aa03ca8f2d9b 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -20,9 +20,9 @@
#include <linux/reboot.h>
#include <linux/watchdog.h>
-#define WDT_RST 0x0
-#define WDT_EN 0x8
-#define WDT_BITE_TIME 0x24
+#define WDT_RST 0x38
+#define WDT_EN 0x40
+#define WDT_BITE_TIME 0x5C
struct qcom_wdt {
struct watchdog_device wdd;
@@ -117,6 +117,8 @@ static int qcom_wdt_probe(struct platform_device *pdev)
{
struct qcom_wdt *wdt;
struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+ u32 percpu_offset;
int ret;
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
@@ -124,6 +126,14 @@ static int qcom_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* We use CPU0's DGT for the watchdog */
+ if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
+ percpu_offset = 0;
+
+ res->start += percpu_offset;
+ res->end += percpu_offset;
+
wdt->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
@@ -203,9 +213,8 @@ static int qcom_wdt_remove(struct platform_device *pdev)
}
static const struct of_device_id qcom_wdt_of_table[] = {
- { .compatible = "qcom,kpss-wdt-msm8960", },
- { .compatible = "qcom,kpss-wdt-apq8064", },
- { .compatible = "qcom,kpss-wdt-ipq8064", },
+ { .compatible = "qcom,kpss-timer" },
+ { .compatible = "qcom,scss-timer" },
{ },
};
MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index a62b1b6decf4..e7f0d5b60d3d 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -1,7 +1,7 @@
/*
* Watchdog driver for the RTC based watchdog in STMP3xxx and i.MX23/28
*
- * Author: Wolfram Sang <w.sang@pengutronix.de>
+ * Author: Wolfram Sang <kernel@pengutronix.de>
*
* Copyright (C) 2011-12 Wolfram Sang, Pengutronix
*
@@ -129,4 +129,4 @@ module_platform_driver(stmp3xxx_wdt_driver);
MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver");
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 94d96809e686..7cd226da15fe 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -270,4 +270,14 @@ config XEN_EFI
def_bool y
depends on X86_64 && EFI
+config XEN_AUTO_XLATE
+ def_bool y
+ depends on ARM || ARM64 || XEN_PVHVM
+ help
+ Support for auto-translated physmap guests.
+
+config XEN_ACPI
+ def_bool y
+ depends on X86 && ACPI
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 2ccd3592d41f..e293bc507cbc 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -13,7 +13,7 @@ CFLAGS_efi.o += -fshort-wchar
dom0-$(CONFIG_PCI) += pci.o
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
-dom0-$(CONFIG_ACPI) += acpi.o $(xen-pad-y)
+dom0-$(CONFIG_XEN_ACPI) += acpi.o $(xen-pad-y)
xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
@@ -37,6 +37,7 @@ obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
obj-$(CONFIG_XEN_EFI) += efi.o
obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
+obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index 5db43fc100a4..7dd46312c180 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void evtchn_2l_resume(void)
+{
+ int i;
+
+ for_each_online_cpu(i)
+ memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
+ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
+}
+
static const struct evtchn_ops evtchn_ops_2l = {
.max_channels = evtchn_2l_max_channels,
.nr_channels = evtchn_2l_max_channels,
@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = {
.mask = evtchn_2l_mask,
.unmask = evtchn_2l_unmask,
.handle_events = evtchn_2l_handle_events,
+ .resume = evtchn_2l_resume,
};
void __init xen_evtchn_2l_init(void)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 70fba973a107..2b8553bd8715 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq)
if (rc)
goto err;
- bind_evtchn_to_cpu(evtchn, 0);
info->evtchn = evtchn;
+ bind_evtchn_to_cpu(evtchn, 0);
rc = xen_evtchn_port_setup(info);
if (rc)
@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
mutex_unlock(&irq_mapping_update_lock);
- /* new event channels are always bound to cpu 0 */
- irq_set_affinity(irq, cpumask_of(0));
+ bind_evtchn_to_cpu(evtchn, info->cpu);
+ /* This will be deferred until interrupt is processed */
+ irq_set_affinity(irq, cpumask_of(info->cpu));
/* Unmask the event channel. */
enable_irq(irq);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index d5bb1a33d0a3..89274850741b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -327,30 +327,10 @@ static int map_grant_pages(struct grant_map *map)
return err;
}
-struct unmap_grant_pages_callback_data
-{
- struct completion completion;
- int result;
-};
-
-static void unmap_grant_callback(int result,
- struct gntab_unmap_queue_data *data)
-{
- struct unmap_grant_pages_callback_data* d = data->data;
-
- d->result = result;
- complete(&d->completion);
-}
-
static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
int i, err = 0;
struct gntab_unmap_queue_data unmap_data;
- struct unmap_grant_pages_callback_data data;
-
- init_completion(&data.completion);
- unmap_data.data = &data;
- unmap_data.done= &unmap_grant_callback;
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
@@ -367,11 +347,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
unmap_data.pages = map->pages + offset;
unmap_data.count = pages;
- gnttab_unmap_refs_async(&unmap_data);
-
- wait_for_completion(&data.completion);
- if (data.result)
- return data.result;
+ err = gnttab_unmap_refs_sync(&unmap_data);
+ if (err)
+ return err;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 17972fbacddc..b1c7170e5c9e 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -123,6 +123,11 @@ struct gnttab_ops {
int (*query_foreign_access)(grant_ref_t ref);
};
+struct unmap_refs_callback_data {
+ struct completion completion;
+ int result;
+};
+
static struct gnttab_ops *gnttab_interface;
static int grant_table_version;
@@ -863,6 +868,29 @@ void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
+static void unmap_refs_callback(int result,
+ struct gntab_unmap_queue_data *data)
+{
+ struct unmap_refs_callback_data *d = data->data;
+
+ d->result = result;
+ complete(&d->completion);
+}
+
+int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
+{
+ struct unmap_refs_callback_data data;
+
+ init_completion(&data.completion);
+ item->data = &data;
+ item->done = &unmap_refs_callback;
+ gnttab_unmap_refs_async(item);
+ wait_for_completion(&data.completion);
+
+ return data.result;
+}
+EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
+
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{
int rc;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index bf1940706422..9e6a85104a20 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -131,6 +131,8 @@ static void do_suspend(void)
goto out_resume;
}
+ xen_arch_suspend();
+
si.cancelled = 1;
err = stop_machine(xen_suspend, &si, cpumask_of(0));
@@ -148,11 +150,12 @@ static void do_suspend(void)
si.cancelled = 1;
}
+ xen_arch_resume();
+
out_resume:
- if (!si.cancelled) {
- xen_arch_resume();
+ if (!si.cancelled)
xs_resume();
- } else
+ else
xs_suspend_cancel();
dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index 6ab6a79c38a5..a493c7315e94 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -393,14 +393,25 @@ static int bind_virq_for_mce(void)
static int __init xen_late_init_mcelog(void)
{
+ int ret;
+
/* Only DOM0 is responsible for MCE logging */
- if (xen_initial_domain()) {
- /* register character device /dev/mcelog for xen mcelog */
- if (misc_register(&xen_mce_chrdev_device))
- return -ENODEV;
- return bind_virq_for_mce();
- }
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* register character device /dev/mcelog for xen mcelog */
+ ret = misc_register(&xen_mce_chrdev_device);
+ if (ret)
+ return ret;
+
+ ret = bind_virq_for_mce();
+ if (ret)
+ goto deregister;
- return -ENODEV;
+ return 0;
+
+deregister:
+ misc_deregister(&xen_mce_chrdev_device);
+ return ret;
}
device_initcall(xen_late_init_mcelog);
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 95ee4302ffb8..7494dbeb4409 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -19,6 +19,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include <xen/interface/xen.h>
@@ -67,12 +68,22 @@ static int xen_add_device(struct device *dev)
#ifdef CONFIG_ACPI
handle = ACPI_HANDLE(&pci_dev->dev);
- if (!handle && pci_dev->bus->bridge)
- handle = ACPI_HANDLE(pci_dev->bus->bridge);
#ifdef CONFIG_PCI_IOV
if (!handle && pci_dev->is_virtfn)
handle = ACPI_HANDLE(physfn->bus->bridge);
#endif
+ if (!handle) {
+ /*
+ * This device was not listed in the ACPI name space at
+ * all. Try to get acpi handle of parent pci bus.
+ */
+ struct pci_bus *pbus;
+ for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
+ handle = acpi_pci_get_bridge_handle(pbus);
+ if (handle)
+ break;
+ }
+ }
if (handle) {
acpi_status status;
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 0aac403d53fd..49e88f2ce7a1 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -132,6 +132,33 @@ static ssize_t __ref store_online(struct device *dev,
}
static DEVICE_ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online);
+static struct attribute *pcpu_dev_attrs[] = {
+ &dev_attr_online.attr,
+ NULL
+};
+
+static umode_t pcpu_dev_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ /*
+ * Xen never offline cpu0 due to several restrictions
+ * and assumptions. This basically doesn't add a sys control
+ * to user, one cannot attempt to offline BSP.
+ */
+ return dev->id ? attr->mode : 0;
+}
+
+static const struct attribute_group pcpu_dev_group = {
+ .attrs = pcpu_dev_attrs,
+ .is_visible = pcpu_dev_is_visible,
+};
+
+static const struct attribute_group *pcpu_dev_groups[] = {
+ &pcpu_dev_group,
+ NULL
+};
+
static bool xen_pcpu_online(uint32_t flags)
{
return !!(flags & XEN_PCPU_FLAGS_ONLINE);
@@ -181,9 +208,6 @@ static void unregister_and_remove_pcpu(struct pcpu *pcpu)
return;
dev = &pcpu->dev;
- if (dev->id)
- device_remove_file(dev, &dev_attr_online);
-
/* pcpu remove would be implicitly done */
device_unregister(dev);
}
@@ -200,6 +224,7 @@ static int register_pcpu(struct pcpu *pcpu)
dev->bus = &xen_pcpu_subsys;
dev->id = pcpu->cpu_id;
dev->release = pcpu_release;
+ dev->groups = pcpu_dev_groups;
err = device_register(dev);
if (err) {
@@ -207,19 +232,6 @@ static int register_pcpu(struct pcpu *pcpu)
return err;
}
- /*
- * Xen never offline cpu0 due to several restrictions
- * and assumptions. This basically doesn't add a sys control
- * to user, one cannot attempt to offline BSP.
- */
- if (dev->id) {
- err = device_create_file(dev, &dev_attr_online);
- if (err) {
- device_unregister(dev);
- return err;
- }
- }
-
return 0;
}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 59ac71c4a043..5a296161d843 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -159,6 +159,40 @@ static int traverse_pages(unsigned nelem, size_t size,
return ret;
}
+/*
+ * Similar to traverse_pages, but use each page as a "block" of
+ * data to be processed as one unit.
+ */
+static int traverse_pages_block(unsigned nelem, size_t size,
+ struct list_head *pos,
+ int (*fn)(void *data, int nr, void *state),
+ void *state)
+{
+ void *pagedata;
+ unsigned pageidx;
+ int ret = 0;
+
+ BUG_ON(size > PAGE_SIZE);
+
+ pageidx = PAGE_SIZE;
+
+ while (nelem) {
+ int nr = (PAGE_SIZE/size);
+ struct page *page;
+ if (nr > nelem)
+ nr = nelem;
+ pos = pos->next;
+ page = list_entry(pos, struct page, lru);
+ pagedata = page_address(page);
+ ret = (*fn)(pagedata, nr, state);
+ if (ret)
+ break;
+ nelem -= nr;
+ }
+
+ return ret;
+}
+
struct mmap_mfn_state {
unsigned long va;
struct vm_area_struct *vma;
@@ -274,39 +308,25 @@ struct mmap_batch_state {
/* auto translated dom0 note: if domU being created is PV, then mfn is
* mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
*/
-static int mmap_batch_fn(void *data, void *state)
+static int mmap_batch_fn(void *data, int nr, void *state)
{
xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state;
struct vm_area_struct *vma = st->vma;
struct page **pages = vma->vm_private_data;
- struct page *cur_page = NULL;
+ struct page **cur_pages = NULL;
int ret;
if (xen_feature(XENFEAT_auto_translated_physmap))
- cur_page = pages[st->index++];
+ cur_pages = &pages[st->index];
- ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
- st->vma->vm_page_prot, st->domain,
- &cur_page);
+ BUG_ON(nr < 0);
+ ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr,
+ (int *)mfnp, st->vma->vm_page_prot,
+ st->domain, cur_pages);
- /* Store error code for second pass. */
- if (st->version == 1) {
- if (ret < 0) {
- /*
- * V1 encodes the error codes in the 32bit top nibble of the
- * mfn (with its known limitations vis-a-vis 64 bit callers).
- */
- *mfnp |= (ret == -ENOENT) ?
- PRIVCMD_MMAPBATCH_PAGED_ERROR :
- PRIVCMD_MMAPBATCH_MFN_ERROR;
- }
- } else { /* st->version == 2 */
- *((int *) mfnp) = ret;
- }
-
- /* And see if it affects the global_error. */
- if (ret < 0) {
+ /* Adjust the global_error? */
+ if (ret != nr) {
if (ret == -ENOENT)
st->global_error = -ENOENT;
else {
@@ -315,23 +335,35 @@ static int mmap_batch_fn(void *data, void *state)
st->global_error = 1;
}
}
- st->va += PAGE_SIZE;
+ st->va += PAGE_SIZE * nr;
+ st->index += nr;
return 0;
}
-static int mmap_return_errors(void *data, void *state)
+static int mmap_return_error(int err, struct mmap_batch_state *st)
{
- struct mmap_batch_state *st = state;
+ int ret;
if (st->version == 1) {
- xen_pfn_t mfnp = *((xen_pfn_t *) data);
- if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
- return __put_user(mfnp, st->user_mfn++);
- else
+ if (err) {
+ xen_pfn_t mfn;
+
+ ret = get_user(mfn, st->user_mfn);
+ if (ret < 0)
+ return ret;
+ /*
+ * V1 encodes the error codes in the 32bit top
+ * nibble of the mfn (with its known
+ * limitations vis-a-vis 64 bit callers).
+ */
+ mfn |= (err == -ENOENT) ?
+ PRIVCMD_MMAPBATCH_PAGED_ERROR :
+ PRIVCMD_MMAPBATCH_MFN_ERROR;
+ return __put_user(mfn, st->user_mfn++);
+ } else
st->user_mfn++;
} else { /* st->version == 2 */
- int err = *((int *) data);
if (err)
return __put_user(err, st->user_err++);
else
@@ -341,6 +373,21 @@ static int mmap_return_errors(void *data, void *state)
return 0;
}
+static int mmap_return_errors(void *data, int nr, void *state)
+{
+ struct mmap_batch_state *st = state;
+ int *errs = data;
+ int i;
+ int ret;
+
+ for (i = 0; i < nr; i++) {
+ ret = mmap_return_error(errs[i], st);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
* the vma with the page info to use later.
* Returns: 0 if success, otherwise -errno
@@ -472,8 +519,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
state.version = version;
/* mmap_batch_fn guarantees ret == 0 */
- BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
- &pagelist, mmap_batch_fn, &state));
+ BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
+ &pagelist, mmap_batch_fn, &state));
up_write(&mm->mmap_sem);
@@ -481,8 +528,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
/* Write back errors in second pass. */
state.user_mfn = (xen_pfn_t *)m.arr;
state.user_err = m.err;
- ret = traverse_pages(m.num, sizeof(xen_pfn_t),
- &pagelist, mmap_return_errors, &state);
+ ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
+ &pagelist, mmap_return_errors, &state);
} else
ret = 0;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 810ad419e34c..4c549323c605 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -235,7 +235,7 @@ retry:
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
+ xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
if (xen_io_tlb_start)
break;
order--;
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 8a65423bc696..c4211a31612d 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -397,13 +397,15 @@ static int __init xen_tmem_init(void)
#ifdef CONFIG_CLEANCACHE
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
if (tmem_enabled && cleancache) {
- char *s = "";
- struct cleancache_ops *old_ops =
- cleancache_register_ops(&tmem_cleancache_ops);
- if (old_ops)
- s = " (WARNING: cleancache_ops overridden)";
- pr_info("cleancache enabled, RAM provided by Xen Transcendent Memory%s\n",
- s);
+ int err;
+
+ err = cleancache_register_ops(&tmem_cleancache_ops);
+ if (err)
+ pr_warn("xen-tmem: failed to enable cleancache: %d\n",
+ err);
+ else
+ pr_info("cleancache enabled, RAM provided by "
+ "Xen Transcendent Memory\n");
}
#endif
#ifdef CONFIG_XEN_SELFBALLOONING
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e555845d61fa..39e7ef8d3957 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -193,13 +193,18 @@ static DEVICE_ATTR(target, S_IRUGO | S_IWUSR,
show_target, store_target);
-static struct device_attribute *balloon_attrs[] = {
- &dev_attr_target_kb,
- &dev_attr_target,
- &dev_attr_schedule_delay.attr,
- &dev_attr_max_schedule_delay.attr,
- &dev_attr_retry_count.attr,
- &dev_attr_max_retry_count.attr
+static struct attribute *balloon_attrs[] = {
+ &dev_attr_target_kb.attr,
+ &dev_attr_target.attr,
+ &dev_attr_schedule_delay.attr.attr,
+ &dev_attr_max_schedule_delay.attr.attr,
+ &dev_attr_retry_count.attr.attr,
+ &dev_attr_max_retry_count.attr.attr,
+ NULL
+};
+
+static const struct attribute_group balloon_group = {
+ .attrs = balloon_attrs
};
static struct attribute *balloon_info_attrs[] = {
@@ -214,6 +219,12 @@ static const struct attribute_group balloon_info_group = {
.attrs = balloon_info_attrs
};
+static const struct attribute_group *balloon_groups[] = {
+ &balloon_group,
+ &balloon_info_group,
+ NULL
+};
+
static struct bus_type balloon_subsys = {
.name = BALLOON_CLASS_NAME,
.dev_name = BALLOON_CLASS_NAME,
@@ -221,7 +232,7 @@ static struct bus_type balloon_subsys = {
static int register_balloon(struct device *dev)
{
- int i, error;
+ int error;
error = subsys_system_register(&balloon_subsys, NULL);
if (error)
@@ -229,6 +240,7 @@ static int register_balloon(struct device *dev)
dev->id = 0;
dev->bus = &balloon_subsys;
+ dev->groups = balloon_groups;
error = device_register(dev);
if (error) {
@@ -236,24 +248,7 @@ static int register_balloon(struct device *dev)
return error;
}
- for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
- error = device_create_file(dev, balloon_attrs[i]);
- if (error)
- goto fail;
- }
-
- error = sysfs_create_group(&dev->kobj, &balloon_info_group);
- if (error)
- goto fail;
-
return 0;
-
- fail:
- while (--i >= 0)
- device_remove_file(dev, balloon_attrs[i]);
- device_unregister(dev);
- bus_unregister(&balloon_subsys);
- return error;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 75fe3d466515..9c234209d8b5 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -16,8 +16,8 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-bool permissive;
-module_param(permissive, bool, 0644);
+bool xen_pcibk_permissive;
+module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
* xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
* This means that some fields may still be read-only because
* they have entries in the config_field list that intercept
* the write and do nothing. */
- if (dev_data->permissive || permissive) {
+ if (dev_data->permissive || xen_pcibk_permissive) {
switch (size) {
case 1:
err = pci_write_config_byte(dev, offset,
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index 2e1d73d1d5d0..62461a8ba1d6 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -64,7 +64,7 @@ struct config_field_entry {
void *data;
};
-extern bool permissive;
+extern bool xen_pcibk_permissive;
#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index 2d7369391472..ad3d17d29c81 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -88,9 +88,15 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n",
pci_name(dev));
pci_set_master(dev);
+ } else if (dev->is_busmaster && !is_master_cmd(value)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: clear bus master\n",
+ pci_name(dev));
+ pci_clear_master(dev);
}
- if (value & PCI_COMMAND_INVALIDATE) {
+ if (!(cmd->val & PCI_COMMAND_INVALIDATE) &&
+ (value & PCI_COMMAND_INVALIDATE)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG
DRV_NAME ": %s: enable memory-write-invalidate\n",
@@ -101,11 +107,18 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
pci_name(dev), err);
value &= ~PCI_COMMAND_INVALIDATE;
}
+ } else if ((cmd->val & PCI_COMMAND_INVALIDATE) &&
+ !(value & PCI_COMMAND_INVALIDATE)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG
+ DRV_NAME ": %s: disable memory-write-invalidate\n",
+ pci_name(dev));
+ pci_clear_mwi(dev);
}
cmd->val = value;
- if (!permissive && (!dev_data || !dev_data->permissive))
+ if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
return 0;
/* Only allow the guest to control certain bits. */
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index cc3cbb4435f8..258b7c325649 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -118,7 +118,7 @@ static void pcistub_device_release(struct kref *kref)
int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
&ppdev);
- if (err)
+ if (err && err != -ENOSYS)
dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
err);
}
@@ -402,7 +402,7 @@ static int pcistub_init_device(struct pci_dev *dev)
};
err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
- if (err)
+ if (err && err != -ENOSYS)
dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
err);
}
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index fe17c80ff4b7..98bc345f296e 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -113,7 +113,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
"Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
gnt_ref, remote_evtchn);
- err = xenbus_map_ring_valloc(pdev->xdev, gnt_ref, &vaddr);
+ err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr);
if (err < 0) {
xenbus_dev_fatal(pdev->xdev, err,
"Error mapping other domain page in ours.");
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 42bd55a6c237..b7f51504f85a 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -31,6 +31,8 @@
* IN THE SOFTWARE.
*/
+#define pr_fmt(fmt) "xen-pvscsi: " fmt
+
#include <stdarg.h>
#include <linux/module.h>
@@ -69,9 +71,6 @@
#include <xen/interface/grant_table.h>
#include <xen/interface/io/vscsiif.h>
-#define DPRINTK(_f, _a...) \
- pr_debug("(file=%s, line=%d) " _f, __FILE__ , __LINE__ , ## _a)
-
#define VSCSI_VERSION "v0.1"
#define VSCSI_NAMELEN 32
@@ -205,8 +204,7 @@ static LIST_HEAD(scsiback_free_pages);
static DEFINE_MUTEX(scsiback_mutex);
static LIST_HEAD(scsiback_list);
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *scsiback_fabric_configfs;
+static const struct target_core_fabric_ops scsiback_ops;
static void scsiback_get(struct vscsibk_info *info)
{
@@ -271,7 +269,7 @@ static void scsiback_print_status(char *sense_buffer, int errors,
{
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
- pr_err("xen-pvscsi[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
+ pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
tpg->tport->tport_name, pending_req->v2p->lun,
pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
host_byte(errors), driver_byte(errors));
@@ -427,7 +425,7 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
BUG_ON(err);
for (i = 0; i < cnt; i++) {
if (unlikely(map[i].status != GNTST_okay)) {
- pr_err("xen-pvscsi: invalid buffer -- could not remap it\n");
+ pr_err("invalid buffer -- could not remap it\n");
map[i].handle = SCSIBACK_INVALID_HANDLE;
err = -ENOMEM;
} else {
@@ -449,7 +447,7 @@ static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
for (i = 0; i < cnt; i++) {
if (get_free_page(pg + mapcount)) {
put_free_pages(pg, mapcount);
- pr_err("xen-pvscsi: no grant page\n");
+ pr_err("no grant page\n");
return -ENOMEM;
}
gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]),
@@ -492,7 +490,7 @@ static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req,
return 0;
if (nr_segments > VSCSIIF_SG_TABLESIZE) {
- DPRINTK("xen-pvscsi: invalid parameter nr_seg = %d\n",
+ pr_debug("invalid parameter nr_seg = %d\n",
ring_req->nr_segments);
return -EINVAL;
}
@@ -516,13 +514,12 @@ static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req,
nr_segments += n_segs;
}
if (nr_segments > SG_ALL) {
- DPRINTK("xen-pvscsi: invalid nr_seg = %d\n",
- nr_segments);
+ pr_debug("invalid nr_seg = %d\n", nr_segments);
return -EINVAL;
}
}
- /* free of (sgl) in fast_flush_area()*/
+ /* free of (sgl) in fast_flush_area() */
pending_req->sgl = kmalloc_array(nr_segments,
sizeof(struct scatterlist), GFP_KERNEL);
if (!pending_req->sgl)
@@ -679,7 +676,8 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
v2p = scsiback_do_translation(info, &vir);
if (!v2p) {
pending_req->v2p = NULL;
- DPRINTK("xen-pvscsi: doesn't exist.\n");
+ pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
+ vir.chn, vir.tgt, vir.lun);
return -ENODEV;
}
pending_req->v2p = v2p;
@@ -690,14 +688,14 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
(pending_req->sc_data_direction != DMA_TO_DEVICE) &&
(pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
(pending_req->sc_data_direction != DMA_NONE)) {
- DPRINTK("xen-pvscsi: invalid parameter data_dir = %d\n",
+ pr_debug("invalid parameter data_dir = %d\n",
pending_req->sc_data_direction);
return -EINVAL;
}
pending_req->cmd_len = ring_req->cmd_len;
if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
- DPRINTK("xen-pvscsi: invalid parameter cmd_len = %d\n",
+ pr_debug("invalid parameter cmd_len = %d\n",
pending_req->cmd_len);
return -EINVAL;
}
@@ -721,7 +719,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
rc = ring->rsp_prod_pvt;
- pr_warn("xen-pvscsi: Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
+ pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
info->domid, rp, rc, rp - rc);
info->ring_error = 1;
return 0;
@@ -772,7 +770,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
break;
default:
- pr_err_ratelimited("xen-pvscsi: invalid request\n");
+ pr_err_ratelimited("invalid request\n");
scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
0, pending_req);
kmem_cache_free(scsiback_cachep, pending_req);
@@ -810,7 +808,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
if (info->irq)
return -1;
- err = xenbus_map_ring_valloc(info->dev, ring_ref, &area);
+ err = xenbus_map_ring_valloc(info->dev, &ring_ref, 1, &area);
if (err)
return err;
@@ -874,14 +872,13 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
lunp = strrchr(phy, ':');
if (!lunp) {
- pr_err("xen-pvscsi: illegal format of physical device %s\n",
- phy);
+ pr_err("illegal format of physical device %s\n", phy);
return -EINVAL;
}
*lunp = 0;
lunp++;
if (kstrtouint(lunp, 10, &lun) || lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
- pr_err("xen-pvscsi: lun number not valid: %s\n", lunp);
+ pr_err("lun number not valid: %s\n", lunp);
return -EINVAL;
}
@@ -909,7 +906,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
mutex_unlock(&scsiback_mutex);
if (!tpg) {
- pr_err("xen-pvscsi: %s:%d %s\n", phy, lun, error);
+ pr_err("%s:%d %s\n", phy, lun, error);
return -ENODEV;
}
@@ -926,7 +923,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
if ((entry->v.chn == v->chn) &&
(entry->v.tgt == v->tgt) &&
(entry->v.lun == v->lun)) {
- pr_warn("xen-pvscsi: Virtual ID is already used. Assignment was not performed.\n");
+ pr_warn("Virtual ID is already used. Assignment was not performed.\n");
err = -EEXIST;
goto out;
}
@@ -992,15 +989,15 @@ found:
}
static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
- char *phy, struct ids_tuple *vir)
+ char *phy, struct ids_tuple *vir, int try)
{
if (!scsiback_add_translation_entry(info, phy, vir)) {
if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
"%d", XenbusStateInitialised)) {
- pr_err("xen-pvscsi: xenbus_printf error %s\n", state);
+ pr_err("xenbus_printf error %s\n", state);
scsiback_del_translation_entry(info, vir);
}
- } else {
+ } else if (!try) {
xenbus_printf(XBT_NIL, info->dev->nodename, state,
"%d", XenbusStateClosed);
}
@@ -1012,7 +1009,7 @@ static void scsiback_do_del_lun(struct vscsibk_info *info, const char *state,
if (!scsiback_del_translation_entry(info, vir)) {
if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
"%d", XenbusStateClosed))
- pr_err("xen-pvscsi: xenbus_printf error %s\n", state);
+ pr_err("xenbus_printf error %s\n", state);
}
}
@@ -1060,10 +1057,19 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
switch (op) {
case VSCSIBACK_OP_ADD_OR_DEL_LUN:
- if (device_state == XenbusStateInitialising)
- scsiback_do_add_lun(info, state, phy, &vir);
- if (device_state == XenbusStateClosing)
+ switch (device_state) {
+ case XenbusStateInitialising:
+ scsiback_do_add_lun(info, state, phy, &vir, 0);
+ break;
+ case XenbusStateConnected:
+ scsiback_do_add_lun(info, state, phy, &vir, 1);
+ break;
+ case XenbusStateClosing:
scsiback_do_del_lun(info, state, &vir);
+ break;
+ default:
+ break;
+ }
break;
case VSCSIBACK_OP_UPDATEDEV_STATE:
@@ -1071,15 +1077,14 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
/* modify vscsi-devs/dev-x/state */
if (xenbus_printf(XBT_NIL, dev->nodename, state,
"%d", XenbusStateConnected)) {
- pr_err("xen-pvscsi: xenbus_printf error %s\n",
- str);
+ pr_err("xenbus_printf error %s\n", str);
scsiback_del_translation_entry(info, &vir);
xenbus_printf(XBT_NIL, dev->nodename, state,
"%d", XenbusStateClosed);
}
}
break;
- /*When it is necessary, processing is added here.*/
+ /* When it is necessary, processing is added here. */
default:
break;
}
@@ -1196,7 +1201,7 @@ static int scsiback_probe(struct xenbus_device *dev,
struct vscsibk_info *info = kzalloc(sizeof(struct vscsibk_info),
GFP_KERNEL);
- DPRINTK("%p %d\n", dev, dev->otherend_id);
+ pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
if (!info) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure");
@@ -1227,7 +1232,7 @@ static int scsiback_probe(struct xenbus_device *dev,
return 0;
fail:
- pr_warn("xen-pvscsi: %s failed\n", __func__);
+ pr_warn("%s failed\n", __func__);
scsiback_remove(dev);
return err;
@@ -1432,7 +1437,7 @@ check_len:
}
snprintf(&tport->tport_name[0], VSCSI_NAMELEN, "%s", &name[off]);
- pr_debug("xen-pvscsi: Allocated emulated Target %s Address: %s\n",
+ pr_debug("Allocated emulated Target %s Address: %s\n",
scsiback_dump_proto_id(tport), name);
return &tport->tport_wwn;
@@ -1443,7 +1448,7 @@ static void scsiback_drop_tport(struct se_wwn *wwn)
struct scsiback_tport *tport = container_of(wwn,
struct scsiback_tport, tport_wwn);
- pr_debug("xen-pvscsi: Deallocating emulated Target %s Address: %s\n",
+ pr_debug("Deallocating emulated Target %s Address: %s\n",
scsiback_dump_proto_id(tport), tport->tport_name);
kfree(tport);
@@ -1470,8 +1475,8 @@ static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
static int scsiback_check_stop_free(struct se_cmd *se_cmd)
{
/*
- * Do not release struct se_cmd's containing a valid TMR
- * pointer. These will be released directly in scsiback_device_action()
+ * Do not release struct se_cmd's containing a valid TMR pointer.
+ * These will be released directly in scsiback_device_action()
* with transport_generic_free_cmd().
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
@@ -1637,7 +1642,7 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg,
return -ENOMEM;
}
/*
- * Initialize the struct se_session pointer
+ * Initialize the struct se_session pointer
*/
tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
@@ -1705,7 +1710,7 @@ static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
return -EBUSY;
}
- pr_debug("xen-pvscsi: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
+ pr_debug("Removing I_T Nexus to emulated %s Initiator Port: %s\n",
scsiback_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
@@ -1751,7 +1756,7 @@ static ssize_t scsiback_tpg_store_nexus(struct se_portal_group *se_tpg,
unsigned char i_port[VSCSI_NAMELEN], *ptr, *port_ptr;
int ret;
/*
- * Shutdown the active I_T nexus if 'NULL' is passed..
+ * Shutdown the active I_T nexus if 'NULL' is passed.
*/
if (!strncmp(page, "NULL", 4)) {
ret = scsiback_drop_nexus(tpg);
@@ -1896,7 +1901,7 @@ scsiback_make_tpg(struct se_wwn *wwn,
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&scsiback_fabric_configfs->tf_ops, wwn,
+ ret = core_tpg_register(&scsiback_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
@@ -1922,7 +1927,7 @@ static void scsiback_drop_tpg(struct se_portal_group *se_tpg)
*/
scsiback_drop_nexus(tpg);
/*
- * Deregister the se_tpg from TCM..
+ * Deregister the se_tpg from TCM.
*/
core_tpg_deregister(se_tpg);
kfree(tpg);
@@ -1938,7 +1943,9 @@ static int scsiback_check_false(struct se_portal_group *se_tpg)
return 0;
}
-static struct target_core_fabric_ops scsiback_ops = {
+static const struct target_core_fabric_ops scsiback_ops = {
+ .module = THIS_MODULE,
+ .name = "xen-pvscsi",
.get_fabric_name = scsiback_get_fabric_name,
.get_fabric_proto_ident = scsiback_get_fabric_proto_ident,
.tpg_get_wwn = scsiback_get_fabric_wwn,
@@ -1985,62 +1992,10 @@ static struct target_core_fabric_ops scsiback_ops = {
.fabric_make_nodeacl = scsiback_make_nodeacl,
.fabric_drop_nodeacl = scsiback_drop_nodeacl,
#endif
-};
-
-static int scsiback_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
- VSCSI_VERSION, utsname()->sysname, utsname()->machine);
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "xen-pvscsi");
- if (IS_ERR(fabric))
- return PTR_ERR(fabric);
-
- /*
- * Setup fabric->tf_ops from our local scsiback_ops
- */
- fabric->tf_ops = scsiback_ops;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = scsiback_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = scsiback_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = scsiback_param_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * Register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- target_fabric_configfs_free(fabric);
- return ret;
- }
- /*
- * Setup our local pointer to *fabric
- */
- scsiback_fabric_configfs = fabric;
- pr_debug("xen-pvscsi: Set fabric -> scsiback_fabric_configfs\n");
- return 0;
-};
-static void scsiback_deregister_configfs(void)
-{
- if (!scsiback_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(scsiback_fabric_configfs);
- scsiback_fabric_configfs = NULL;
- pr_debug("xen-pvscsi: Cleared scsiback_fabric_configfs\n");
+ .tfc_wwn_attrs = scsiback_wwn_attrs,
+ .tfc_tpg_base_attrs = scsiback_tpg_attrs,
+ .tfc_tpg_param_attrs = scsiback_param_attrs,
};
static const struct xenbus_device_id scsiback_ids[] = {
@@ -2072,6 +2027,9 @@ static int __init scsiback_init(void)
if (!xen_domain())
return -ENODEV;
+ pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
+ VSCSI_VERSION, utsname()->sysname, utsname()->machine);
+
scsiback_cachep = kmem_cache_create("vscsiif_cache",
sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend);
if (!scsiback_cachep)
@@ -2081,7 +2039,7 @@ static int __init scsiback_init(void)
if (ret)
goto out_cache_destroy;
- ret = scsiback_register_configfs();
+ ret = target_register_template(&scsiback_ops);
if (ret)
goto out_unregister_xenbus;
@@ -2091,7 +2049,7 @@ out_unregister_xenbus:
xenbus_unregister_driver(&scsiback_driver);
out_cache_destroy:
kmem_cache_destroy(scsiback_cachep);
- pr_err("xen-pvscsi: %s: error %d\n", __func__, ret);
+ pr_err("%s: error %d\n", __func__, ret);
return ret;
}
@@ -2104,7 +2062,7 @@ static void __exit scsiback_exit(void)
BUG();
gnttab_free_pages(1, &page);
}
- scsiback_deregister_configfs();
+ target_unregister_template(&scsiback_ops);
xenbus_unregister_driver(&scsiback_driver);
kmem_cache_destroy(scsiback_cachep);
}
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index ca744102b666..96b2011d25f3 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -52,17 +52,25 @@
struct xenbus_map_node {
struct list_head next;
union {
- struct vm_struct *area; /* PV */
- struct page *page; /* HVM */
+ struct {
+ struct vm_struct *area;
+ } pv;
+ struct {
+ struct page *pages[XENBUS_MAX_RING_PAGES];
+ void *addr;
+ } hvm;
};
- grant_handle_t handle;
+ grant_handle_t handles[XENBUS_MAX_RING_PAGES];
+ unsigned int nr_handles;
};
static DEFINE_SPINLOCK(xenbus_valloc_lock);
static LIST_HEAD(xenbus_valloc_pages);
struct xenbus_ring_ops {
- int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
+ int (*map)(struct xenbus_device *dev,
+ grant_ref_t *gnt_refs, unsigned int nr_grefs,
+ void **vaddr);
int (*unmap)(struct xenbus_device *dev, void *vaddr);
};
@@ -355,17 +363,39 @@ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
/**
* xenbus_grant_ring
* @dev: xenbus device
- * @ring_mfn: mfn of ring to grant
-
- * Grant access to the given @ring_mfn to the peer of the given device. Return
- * a grant reference on success, or -errno on error. On error, the device will
- * switch to XenbusStateClosing, and the error will be saved in the store.
+ * @vaddr: starting virtual address of the ring
+ * @nr_pages: number of pages to be granted
+ * @grefs: grant reference array to be filled in
+ *
+ * Grant access to the given @vaddr to the peer of the given device.
+ * Then fill in @grefs with grant references. Return 0 on success, or
+ * -errno on error. On error, the device will switch to
+ * XenbusStateClosing, and the error will be saved in the store.
*/
-int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
+int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
+ unsigned int nr_pages, grant_ref_t *grefs)
{
- int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
- if (err < 0)
- xenbus_dev_fatal(dev, err, "granting access to ring page");
+ int err;
+ int i, j;
+
+ for (i = 0; i < nr_pages; i++) {
+ unsigned long addr = (unsigned long)vaddr +
+ (PAGE_SIZE * i);
+ err = gnttab_grant_foreign_access(dev->otherend_id,
+ virt_to_mfn(addr), 0);
+ if (err < 0) {
+ xenbus_dev_fatal(dev, err,
+ "granting access to ring page");
+ goto fail;
+ }
+ grefs[i] = err;
+ }
+
+ return 0;
+
+fail:
+ for (j = 0; j < i; j++)
+ gnttab_end_foreign_access_ref(grefs[j], 0);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_grant_ring);
@@ -419,62 +449,130 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
/**
* xenbus_map_ring_valloc
* @dev: xenbus device
- * @gnt_ref: grant reference
+ * @gnt_refs: grant reference array
+ * @nr_grefs: number of grant references
* @vaddr: pointer to address to be filled out by mapping
*
- * Based on Rusty Russell's skeleton driver's map_page.
- * Map a page of memory into this domain from another domain's grant table.
- * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
- * page to that address, and sets *vaddr to that address.
- * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
- * or -ENOMEM on error. If an error is returned, device will switch to
+ * Map @nr_grefs pages of memory into this domain from another
+ * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
+ * pages of virtual address space, maps the pages to that address, and
+ * sets *vaddr to that address. Returns 0 on success, and GNTST_*
+ * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
+ * error. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore.
*/
-int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
+int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
+ unsigned int nr_grefs, void **vaddr)
{
- return ring_ops->map(dev, gnt_ref, vaddr);
+ return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+/* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
+ * long), e.g. 32-on-64. Caller is responsible for preparing the
+ * right array to feed into this function */
+static int __xenbus_map_ring(struct xenbus_device *dev,
+ grant_ref_t *gnt_refs,
+ unsigned int nr_grefs,
+ grant_handle_t *handles,
+ phys_addr_t *addrs,
+ unsigned int flags,
+ bool *leaked)
+{
+ struct gnttab_map_grant_ref map[XENBUS_MAX_RING_PAGES];
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
+ int i, j;
+ int err = GNTST_okay;
+
+ if (nr_grefs > XENBUS_MAX_RING_PAGES)
+ return -EINVAL;
+
+ for (i = 0; i < nr_grefs; i++) {
+ memset(&map[i], 0, sizeof(map[i]));
+ gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
+ dev->otherend_id);
+ handles[i] = INVALID_GRANT_HANDLE;
+ }
+
+ gnttab_batch_map(map, i);
+
+ for (i = 0; i < nr_grefs; i++) {
+ if (map[i].status != GNTST_okay) {
+ err = map[i].status;
+ xenbus_dev_fatal(dev, map[i].status,
+ "mapping in shared page %d from domain %d",
+ gnt_refs[i], dev->otherend_id);
+ goto fail;
+ } else
+ handles[i] = map[i].handle;
+ }
+
+ return GNTST_okay;
+
+ fail:
+ for (i = j = 0; i < nr_grefs; i++) {
+ if (handles[i] != INVALID_GRANT_HANDLE) {
+ memset(&unmap[j], 0, sizeof(unmap[j]));
+ gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
+ GNTMAP_host_map, handles[i]);
+ j++;
+ }
+ }
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
+ BUG();
+
+ *leaked = false;
+ for (i = 0; i < j; i++) {
+ if (unmap[i].status != GNTST_okay) {
+ *leaked = true;
+ break;
+ }
+ }
+
+ return err;
+}
+
static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
- int gnt_ref, void **vaddr)
+ grant_ref_t *gnt_refs,
+ unsigned int nr_grefs,
+ void **vaddr)
{
- struct gnttab_map_grant_ref op = {
- .flags = GNTMAP_host_map | GNTMAP_contains_pte,
- .ref = gnt_ref,
- .dom = dev->otherend_id,
- };
struct xenbus_map_node *node;
struct vm_struct *area;
- pte_t *pte;
+ pte_t *ptes[XENBUS_MAX_RING_PAGES];
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
+ int err = GNTST_okay;
+ int i;
+ bool leaked;
*vaddr = NULL;
+ if (nr_grefs > XENBUS_MAX_RING_PAGES)
+ return -EINVAL;
+
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
- area = alloc_vm_area(PAGE_SIZE, &pte);
+ area = alloc_vm_area(PAGE_SIZE * nr_grefs, ptes);
if (!area) {
kfree(node);
return -ENOMEM;
}
- op.host_addr = arbitrary_virt_to_machine(pte).maddr;
+ for (i = 0; i < nr_grefs; i++)
+ phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
- gnttab_batch_map(&op, 1);
-
- if (op.status != GNTST_okay) {
- free_vm_area(area);
- kfree(node);
- xenbus_dev_fatal(dev, op.status,
- "mapping in shared page %d from domain %d",
- gnt_ref, dev->otherend_id);
- return op.status;
- }
+ err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
+ phys_addrs,
+ GNTMAP_host_map | GNTMAP_contains_pte,
+ &leaked);
+ if (err)
+ goto failed;
- node->handle = op.handle;
- node->area = area;
+ node->nr_handles = nr_grefs;
+ node->pv.area = area;
spin_lock(&xenbus_valloc_lock);
list_add(&node->next, &xenbus_valloc_pages);
@@ -482,14 +580,33 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
*vaddr = area->addr;
return 0;
+
+failed:
+ if (!leaked)
+ free_vm_area(area);
+ else
+ pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
+
+ kfree(node);
+ return err;
}
static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
- int gnt_ref, void **vaddr)
+ grant_ref_t *gnt_ref,
+ unsigned int nr_grefs,
+ void **vaddr)
{
struct xenbus_map_node *node;
+ int i;
int err;
void *addr;
+ bool leaked = false;
+ /* Why do we need two arrays? See comment of __xenbus_map_ring */
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
+ unsigned long addrs[XENBUS_MAX_RING_PAGES];
+
+ if (nr_grefs > XENBUS_MAX_RING_PAGES)
+ return -EINVAL;
*vaddr = NULL;
@@ -497,15 +614,32 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
if (!node)
return -ENOMEM;
- err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
+ err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages,
+ false /* lowmem */);
if (err)
goto out_err;
- addr = pfn_to_kaddr(page_to_pfn(node->page));
+ for (i = 0; i < nr_grefs; i++) {
+ unsigned long pfn = page_to_pfn(node->hvm.pages[i]);
+ phys_addrs[i] = (unsigned long)pfn_to_kaddr(pfn);
+ addrs[i] = (unsigned long)pfn_to_kaddr(pfn);
+ }
+
+ err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
+ phys_addrs, GNTMAP_host_map, &leaked);
+ node->nr_handles = nr_grefs;
- err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
if (err)
- goto out_err_free_ballooned_pages;
+ goto out_free_ballooned_pages;
+
+ addr = vmap(node->hvm.pages, nr_grefs, VM_MAP | VM_IOREMAP,
+ PAGE_KERNEL);
+ if (!addr) {
+ err = -ENOMEM;
+ goto out_xenbus_unmap_ring;
+ }
+
+ node->hvm.addr = addr;
spin_lock(&xenbus_valloc_lock);
list_add(&node->next, &xenbus_valloc_pages);
@@ -514,8 +648,16 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
*vaddr = addr;
return 0;
- out_err_free_ballooned_pages:
- free_xenballooned_pages(1, &node->page);
+ out_xenbus_unmap_ring:
+ if (!leaked)
+ xenbus_unmap_ring(dev, node->handles, node->nr_handles,
+ addrs);
+ else
+ pr_alert("leaking %p size %u page(s)",
+ addr, nr_grefs);
+ out_free_ballooned_pages:
+ if (!leaked)
+ free_xenballooned_pages(nr_grefs, node->hvm.pages);
out_err:
kfree(node);
return err;
@@ -525,35 +667,37 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
/**
* xenbus_map_ring
* @dev: xenbus device
- * @gnt_ref: grant reference
- * @handle: pointer to grant handle to be filled
- * @vaddr: address to be mapped to
+ * @gnt_refs: grant reference array
+ * @nr_grefs: number of grant reference
+ * @handles: pointer to grant handle to be filled
+ * @vaddrs: addresses to be mapped to
+ * @leaked: fail to clean up a failed map, caller should not free vaddr
*
- * Map a page of memory into this domain from another domain's grant table.
+ * Map pages of memory into this domain from another domain's grant table.
* xenbus_map_ring does not allocate the virtual address space (you must do
- * this yourself!). It only maps in the page to the specified address.
+ * this yourself!). It only maps in the pages to the specified address.
* Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
- * or -ENOMEM on error. If an error is returned, device will switch to
- * XenbusStateClosing and the error message will be saved in XenStore.
+ * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to
+ * XenbusStateClosing and the first error message will be saved in XenStore.
+ * Further more if we fail to map the ring, caller should check @leaked.
+ * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller
+ * should not free the address space of @vaddr.
*/
-int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
- grant_handle_t *handle, void *vaddr)
+int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
+ unsigned int nr_grefs, grant_handle_t *handles,
+ unsigned long *vaddrs, bool *leaked)
{
- struct gnttab_map_grant_ref op;
-
- gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
- dev->otherend_id);
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
+ int i;
- gnttab_batch_map(&op, 1);
+ if (nr_grefs > XENBUS_MAX_RING_PAGES)
+ return -EINVAL;
- if (op.status != GNTST_okay) {
- xenbus_dev_fatal(dev, op.status,
- "mapping in shared page %d from domain %d",
- gnt_ref, dev->otherend_id);
- } else
- *handle = op.handle;
+ for (i = 0; i < nr_grefs; i++)
+ phys_addrs[i] = (unsigned long)vaddrs[i];
- return op.status;
+ return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
+ phys_addrs, GNTMAP_host_map, leaked);
}
EXPORT_SYMBOL_GPL(xenbus_map_ring);
@@ -579,14 +723,15 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
{
struct xenbus_map_node *node;
- struct gnttab_unmap_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- };
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
unsigned int level;
+ int i;
+ bool leaked = false;
+ int err;
spin_lock(&xenbus_valloc_lock);
list_for_each_entry(node, &xenbus_valloc_pages, next) {
- if (node->area->addr == vaddr) {
+ if (node->pv.area->addr == vaddr) {
list_del(&node->next);
goto found;
}
@@ -601,22 +746,41 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
return GNTST_bad_virt_addr;
}
- op.handle = node->handle;
- op.host_addr = arbitrary_virt_to_machine(
- lookup_address((unsigned long)vaddr, &level)).maddr;
+ for (i = 0; i < node->nr_handles; i++) {
+ unsigned long addr;
+
+ memset(&unmap[i], 0, sizeof(unmap[i]));
+ addr = (unsigned long)vaddr + (PAGE_SIZE * i);
+ unmap[i].host_addr = arbitrary_virt_to_machine(
+ lookup_address(addr, &level)).maddr;
+ unmap[i].dev_bus_addr = 0;
+ unmap[i].handle = node->handles[i];
+ }
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
BUG();
- if (op.status == GNTST_okay)
- free_vm_area(node->area);
+ err = GNTST_okay;
+ leaked = false;
+ for (i = 0; i < node->nr_handles; i++) {
+ if (unmap[i].status != GNTST_okay) {
+ leaked = true;
+ xenbus_dev_error(dev, unmap[i].status,
+ "unmapping page at handle %d error %d",
+ node->handles[i], unmap[i].status);
+ err = unmap[i].status;
+ break;
+ }
+ }
+
+ if (!leaked)
+ free_vm_area(node->pv.area);
else
- xenbus_dev_error(dev, op.status,
- "unmapping page at handle %d error %d",
- node->handle, op.status);
+ pr_alert("leaking VM area %p size %u page(s)",
+ node->pv.area, node->nr_handles);
kfree(node);
- return op.status;
+ return err;
}
static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
@@ -624,10 +788,12 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
int rv;
struct xenbus_map_node *node;
void *addr;
+ unsigned long addrs[XENBUS_MAX_RING_PAGES];
+ int i;
spin_lock(&xenbus_valloc_lock);
list_for_each_entry(node, &xenbus_valloc_pages, next) {
- addr = pfn_to_kaddr(page_to_pfn(node->page));
+ addr = node->hvm.addr;
if (addr == vaddr) {
list_del(&node->next);
goto found;
@@ -643,12 +809,16 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
return GNTST_bad_virt_addr;
}
- rv = xenbus_unmap_ring(dev, node->handle, addr);
+ for (i = 0; i < node->nr_handles; i++)
+ addrs[i] = (unsigned long)pfn_to_kaddr(page_to_pfn(node->hvm.pages[i]));
+ rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
+ addrs);
if (!rv)
- free_xenballooned_pages(1, &node->page);
+ vunmap(vaddr);
else
- WARN(1, "Leaking %p\n", vaddr);
+ WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
+ node->nr_handles);
kfree(node);
return rv;
@@ -657,29 +827,44 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
/**
* xenbus_unmap_ring
* @dev: xenbus device
- * @handle: grant handle
- * @vaddr: addr to unmap
+ * @handles: grant handle array
+ * @nr_handles: number of handles in the array
+ * @vaddrs: addresses to unmap
*
- * Unmap a page of memory in this domain that was imported from another domain.
+ * Unmap memory in this domain that was imported from another domain.
* Returns 0 on success and returns GNTST_* on error
* (see xen/include/interface/grant_table.h).
*/
int xenbus_unmap_ring(struct xenbus_device *dev,
- grant_handle_t handle, void *vaddr)
+ grant_handle_t *handles, unsigned int nr_handles,
+ unsigned long *vaddrs)
{
- struct gnttab_unmap_grant_ref op;
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
+ int i;
+ int err;
- gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
+ if (nr_handles > XENBUS_MAX_RING_PAGES)
+ return -EINVAL;
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+ for (i = 0; i < nr_handles; i++)
+ gnttab_set_unmap_op(&unmap[i], vaddrs[i],
+ GNTMAP_host_map, handles[i]);
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
BUG();
- if (op.status != GNTST_okay)
- xenbus_dev_error(dev, op.status,
- "unmapping page at handle %d error %d",
- handle, op.status);
+ err = GNTST_okay;
+ for (i = 0; i < nr_handles; i++) {
+ if (unmap[i].status != GNTST_okay) {
+ xenbus_dev_error(dev, unmap[i].status,
+ "unmapping page at handle %d error %d",
+ handles[i], unmap[i].status);
+ err = unmap[i].status;
+ break;
+ }
+ }
- return op.status;
+ return err;
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 564b31584860..5390a674b5e3 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -57,6 +57,7 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/events.h>
+#include <xen/xen-ops.h>
#include <xen/page.h>
#include <xen/hvm.h>
@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void)
return err;
}
+static int xenbus_resume_cb(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int err = 0;
+
+ if (xen_hvm_domain()) {
+ uint64_t v;
+
+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+ if (!err && v)
+ xen_store_evtchn = v;
+ else
+ pr_warn("Cannot update xenstore event channel: %d\n",
+ err);
+ } else
+ xen_store_evtchn = xen_start_info->store_evtchn;
+
+ return err;
+}
+
+static struct notifier_block xenbus_resume_nb = {
+ .notifier_call = xenbus_resume_cb,
+};
+
static int __init xenbus_init(void)
{
int err = 0;
@@ -793,6 +818,10 @@ static int __init xenbus_init(void)
goto out_error;
}
+ if ((xen_store_domain_type != XS_LOCAL) &&
+ (xen_store_domain_type != XS_UNKNOWN))
+ xen_resume_notifier_register(&xenbus_resume_nb);
+
#ifdef CONFIG_XEN_COMPAT_XENFS
/*
* Create xenfs mountpoint in /proc for compatibility with
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
new file mode 100644
index 000000000000..58a5389aec89
--- /dev/null
+++ b/drivers/xen/xlate_mmu.c
@@ -0,0 +1,143 @@
+/*
+ * MMU operations common to all auto-translated physmap guests.
+ *
+ * Copyright (C) 2015 Citrix Systems R&D Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/memory.h>
+
+/* map fgmfn of domid to lpfn in the current domain */
+static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
+ unsigned int domid)
+{
+ int rc;
+ struct xen_add_to_physmap_range xatp = {
+ .domid = DOMID_SELF,
+ .foreign_domid = domid,
+ .size = 1,
+ .space = XENMAPSPACE_gmfn_foreign,
+ };
+ xen_ulong_t idx = fgmfn;
+ xen_pfn_t gpfn = lpfn;
+ int err = 0;
+
+ set_xen_guest_handle(xatp.idxs, &idx);
+ set_xen_guest_handle(xatp.gpfns, &gpfn);
+ set_xen_guest_handle(xatp.errs, &err);
+
+ rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
+ return rc < 0 ? rc : err;
+}
+
+struct remap_data {
+ xen_pfn_t *fgmfn; /* foreign domain's gmfn */
+ pgprot_t prot;
+ domid_t domid;
+ struct vm_area_struct *vma;
+ int index;
+ struct page **pages;
+ struct xen_remap_mfn_info *info;
+ int *err_ptr;
+ int mapped;
+};
+
+static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct remap_data *info = data;
+ struct page *page = info->pages[info->index++];
+ unsigned long pfn = page_to_pfn(page);
+ pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
+ int rc;
+
+ rc = map_foreign_page(pfn, *info->fgmfn, info->domid);
+ *info->err_ptr++ = rc;
+ if (!rc) {
+ set_pte_at(info->vma->vm_mm, addr, ptep, pte);
+ info->mapped++;
+ }
+ info->fgmfn++;
+
+ return 0;
+}
+
+int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages)
+{
+ int err;
+ struct remap_data data;
+ unsigned long range = nr << PAGE_SHIFT;
+
+ /* Kept here for the purpose of making sure code doesn't break
+ x86 PVOPS */
+ BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
+
+ data.fgmfn = mfn;
+ data.prot = prot;
+ data.domid = domid;
+ data.vma = vma;
+ data.pages = pages;
+ data.index = 0;
+ data.err_ptr = err_ptr;
+ data.mapped = 0;
+
+ err = apply_to_page_range(vma->vm_mm, addr, range,
+ remap_pte_fn, &data);
+ return err < 0 ? err : data.mapped;
+}
+EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array);
+
+int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct xen_remove_from_physmap xrp;
+ unsigned long pfn;
+
+ pfn = page_to_pfn(pages[i]);
+
+ xrp.domid = DOMID_SELF;
+ xrp.gpfn = pfn;
+ (void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
diff --git a/firmware/ihex2fw.c b/firmware/ihex2fw.c
index cf38e159131a..08d90e25abf0 100644
--- a/firmware/ihex2fw.c
+++ b/firmware/ihex2fw.c
@@ -86,6 +86,7 @@ int main(int argc, char **argv)
case 'j':
include_jump = 1;
break;
+ default:
return usage();
}
}
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 8482f2d11606..31c010372660 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -247,7 +247,7 @@ static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name,
if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT)
return v9fs_remote_get_acl(dentry, name, buffer, size, type);
- acl = v9fs_get_cached_acl(dentry->d_inode, type);
+ acl = v9fs_get_cached_acl(d_inode(dentry), type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
@@ -285,7 +285,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
int retval;
struct posix_acl *acl;
struct v9fs_session_info *v9ses;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (strcmp(name, "") != 0)
return -EINVAL;
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 099c7712631c..fb9ffcb43277 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -78,7 +78,6 @@ enum p9_cache_modes {
* @cache: cache mode of type &p9_cache_modes
* @cachetag: the tag of the cache associated with this session
* @fscache: session cookie associated with FS-Cache
- * @options: copy of options string given by user
* @uname: string user name to mount hierarchy as
* @aname: mount specifier for remote hierarchy
* @maxdata: maximum data to be sent/recvd per protocol message
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index b83ebfbf3fdc..5a0db6dec8d1 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -68,14 +68,10 @@ int v9fs_file_open(struct inode *inode, struct file *file);
void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
int v9fs_uflags2omode(int uflags, int extended);
-ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
-ssize_t v9fs_fid_readn(struct p9_fid *, char *, char __user *, u32, u64);
void v9fs_blank_wstat(struct p9_wstat *wstat);
int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *);
int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
int datasync);
-ssize_t v9fs_file_write_internal(struct inode *, struct p9_fid *,
- const char __user *, size_t, loff_t *, int);
int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode);
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode);
static inline void v9fs_invalidate_inode_attr(struct inode *inode)
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index eb14e055ea83..e9e04376c52c 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -33,7 +33,7 @@
#include <linux/pagemap.h>
#include <linux/idr.h>
#include <linux/sched.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -51,12 +51,11 @@
*/
static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
{
- int retval;
- loff_t offset;
- char *buffer;
- struct inode *inode;
+ struct inode *inode = page->mapping->host;
+ struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
+ struct iov_iter to;
+ int retval, err;
- inode = page->mapping->host;
p9_debug(P9_DEBUG_VFS, "\n");
BUG_ON(!PageLocked(page));
@@ -65,16 +64,16 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
if (retval == 0)
return retval;
- buffer = kmap(page);
- offset = page_offset(page);
+ iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE);
- retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset);
- if (retval < 0) {
+ retval = p9_client_read(fid, page_offset(page), &to, &err);
+ if (err) {
v9fs_uncache_page(inode, page);
+ retval = err;
goto done;
}
- memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval);
+ zero_user(page, retval, PAGE_SIZE - retval);
flush_dcache_page(page);
SetPageUptodate(page);
@@ -82,7 +81,6 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
retval = 0;
done:
- kunmap(page);
unlock_page(page);
return retval;
}
@@ -161,41 +159,32 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
static int v9fs_vfs_writepage_locked(struct page *page)
{
- char *buffer;
- int retval, len;
- loff_t offset, size;
- mm_segment_t old_fs;
- struct v9fs_inode *v9inode;
struct inode *inode = page->mapping->host;
+ struct v9fs_inode *v9inode = V9FS_I(inode);
+ loff_t size = i_size_read(inode);
+ struct iov_iter from;
+ struct bio_vec bvec;
+ int err, len;
- v9inode = V9FS_I(inode);
- size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
- set_page_writeback(page);
-
- buffer = kmap(page);
- offset = page_offset(page);
+ bvec.bv_page = page;
+ bvec.bv_offset = 0;
+ bvec.bv_len = len;
+ iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len);
- old_fs = get_fs();
- set_fs(get_ds());
/* We should have writeback_fid always set */
BUG_ON(!v9inode->writeback_fid);
- retval = v9fs_file_write_internal(inode,
- v9inode->writeback_fid,
- (__force const char __user *)buffer,
- len, &offset, 0);
- if (retval > 0)
- retval = 0;
+ set_page_writeback(page);
+
+ p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err);
- set_fs(old_fs);
- kunmap(page);
end_page_writeback(page);
- return retval;
+ return err;
}
static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -241,11 +230,8 @@ static int v9fs_launder_page(struct page *page)
/**
* v9fs_direct_IO - 9P address space operation for direct I/O
- * @rw: direction (read or write)
* @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
* @pos: offset in file to begin the operation
- * @nr_segs: size of iovec array
*
* The presence of v9fs_direct_IO() in the address space ops vector
* allowes open() O_DIRECT flags which would have failed otherwise.
@@ -259,18 +245,23 @@ static int v9fs_launder_page(struct page *page)
*
*/
static ssize_t
-v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
+v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
- /*
- * FIXME
- * Now that we do caching with cache mode enabled, We need
- * to support direct IO
- */
- p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%pD) off/no(%lld/%lu) EINVAL\n",
- iocb->ki_filp,
- (long long)pos, iter->nr_segs);
-
- return -EINVAL;
+ struct file *file = iocb->ki_filp;
+ ssize_t n;
+ int err = 0;
+ if (iov_iter_rw(iter) == WRITE) {
+ n = p9_client_write(file->private_data, pos, iter, &err);
+ if (n) {
+ struct inode *inode = file_inode(file);
+ loff_t i_size = i_size_read(inode);
+ if (pos + n > i_size)
+ inode_add_bytes(inode, pos + n - i_size);
+ }
+ } else {
+ n = p9_client_read(file->private_data, pos, iter, &err);
+ }
+ return n ? n : err;
}
static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index a345b2d659cc..bd456c668d39 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -53,7 +53,7 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
dentry, dentry);
/* Don't cache negative dentries */
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
return 1;
return 0;
}
@@ -83,7 +83,7 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if (!inode)
goto out_valid;
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 4f1151088ebe..5cc00e56206e 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -33,6 +33,7 @@
#include <linux/inet.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/uio.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -115,6 +116,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
int buflen;
int reclen = 0;
struct p9_rdir *rdir;
+ struct kvec kvec;
p9_debug(P9_DEBUG_VFS, "name %pD\n", file);
fid = file->private_data;
@@ -124,16 +126,23 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
rdir = v9fs_alloc_rdir_buf(file, buflen);
if (!rdir)
return -ENOMEM;
+ kvec.iov_base = rdir->buf;
+ kvec.iov_len = buflen;
while (1) {
if (rdir->tail == rdir->head) {
- err = v9fs_file_readn(file, rdir->buf, NULL,
- buflen, ctx->pos);
- if (err <= 0)
+ struct iov_iter to;
+ int n;
+ iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen);
+ n = p9_client_read(file->private_data, ctx->pos, &to,
+ &err);
+ if (err)
return err;
+ if (n == 0)
+ return 0;
rdir->head = 0;
- rdir->tail = err;
+ rdir->tail = n;
}
while (rdir->head < rdir->tail) {
p9stat_init(&st);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index b40133796b87..1ef16bd8280b 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -36,6 +36,8 @@
#include <linux/utsname.h>
#include <asm/uaccess.h>
#include <linux/idr.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -149,7 +151,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
{
struct p9_flock flock;
struct p9_fid *fid;
- uint8_t status;
+ uint8_t status = P9_LOCK_ERROR;
int res = 0;
unsigned char fl_type;
@@ -194,7 +196,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
for (;;) {
res = p9_client_lock_dotl(fid, &flock, &status);
if (res < 0)
- break;
+ goto out_unlock;
if (status != P9_LOCK_BLOCKED)
break;
@@ -212,14 +214,16 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
case P9_LOCK_BLOCKED:
res = -EAGAIN;
break;
+ default:
+ WARN_ONCE(1, "unknown lock status code: %d\n", status);
+ /* fallthough */
case P9_LOCK_ERROR:
case P9_LOCK_GRACE:
res = -ENOLCK;
break;
- default:
- BUG();
}
+out_unlock:
/*
* incase server returned error for lock request, revert
* it locally
@@ -285,6 +289,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
fl->fl_end = glock.start + glock.length - 1;
fl->fl_pid = glock.proc_id;
}
+ kfree(glock.client_id);
return res;
}
@@ -364,63 +369,6 @@ out_err:
}
/**
- * v9fs_fid_readn - read from a fid
- * @fid: fid to read
- * @data: data buffer to read data into
- * @udata: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
- *
- */
-ssize_t
-v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
- u64 offset)
-{
- int n, total, size;
-
- p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n",
- fid->fid, (long long unsigned)offset, count);
- n = 0;
- total = 0;
- size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
- do {
- n = p9_client_read(fid, data, udata, offset, count);
- if (n <= 0)
- break;
-
- if (data)
- data += n;
- if (udata)
- udata += n;
-
- offset += n;
- count -= n;
- total += n;
- } while (count > 0 && n == size);
-
- if (n < 0)
- total = n;
-
- return total;
-}
-
-/**
- * v9fs_file_readn - read from a file
- * @filp: file pointer to read
- * @data: data buffer to read data into
- * @udata: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
- *
- */
-ssize_t
-v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
- u64 offset)
-{
- return v9fs_fid_readn(filp->private_data, data, udata, count, offset);
-}
-
-/**
* v9fs_file_read - read from a file
* @filp: file pointer to read
* @udata: user data buffer to read data into
@@ -430,69 +378,22 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
*/
static ssize_t
-v9fs_file_read(struct file *filp, char __user *udata, size_t count,
- loff_t * offset)
+v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- int ret;
- struct p9_fid *fid;
- size_t size;
-
- p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
- fid = filp->private_data;
+ struct p9_fid *fid = iocb->ki_filp->private_data;
+ int ret, err;
- size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
- if (count > size)
- ret = v9fs_file_readn(filp, NULL, udata, count, *offset);
- else
- ret = p9_client_read(fid, NULL, udata, *offset, count);
+ p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
+ iov_iter_count(to), iocb->ki_pos);
- if (ret > 0)
- *offset += ret;
+ ret = p9_client_read(fid, iocb->ki_pos, to, &err);
+ if (!ret)
+ return err;
+ iocb->ki_pos += ret;
return ret;
}
-ssize_t
-v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
- const char __user *data, size_t count,
- loff_t *offset, int invalidate)
-{
- int n;
- loff_t i_size;
- size_t total = 0;
- loff_t origin = *offset;
- unsigned long pg_start, pg_end;
-
- p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
- data, (int)count, (int)*offset);
-
- do {
- n = p9_client_write(fid, NULL, data+total, origin+total, count);
- if (n <= 0)
- break;
- count -= n;
- total += n;
- } while (count > 0);
-
- if (invalidate && (total > 0)) {
- pg_start = origin >> PAGE_CACHE_SHIFT;
- pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
- if (inode->i_mapping && inode->i_mapping->nrpages)
- invalidate_inode_pages2_range(inode->i_mapping,
- pg_start, pg_end);
- *offset += total;
- i_size = i_size_read(inode);
- if (*offset > i_size) {
- inode_add_bytes(inode, *offset - i_size);
- i_size_write(inode, *offset);
- }
- }
- if (n < 0)
- return n;
-
- return total;
-}
-
/**
* v9fs_file_write - write to a file
* @filp: file pointer to write
@@ -502,35 +403,39 @@ v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
*
*/
static ssize_t
-v9fs_file_write(struct file *filp, const char __user * data,
- size_t count, loff_t *offset)
+v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- ssize_t retval = 0;
- loff_t origin = *offset;
-
-
- retval = generic_write_checks(filp, &origin, &count, 0);
- if (retval)
- goto out;
+ struct file *file = iocb->ki_filp;
+ ssize_t retval;
+ loff_t origin;
+ int err = 0;
- retval = -EINVAL;
- if ((ssize_t) count < 0)
- goto out;
- retval = 0;
- if (!count)
- goto out;
+ retval = generic_write_checks(iocb, from);
+ if (retval <= 0)
+ return retval;
- retval = v9fs_file_write_internal(file_inode(filp),
- filp->private_data,
- data, count, &origin, 1);
- /* update offset on successful write */
- if (retval > 0)
- *offset = origin;
-out:
- return retval;
+ origin = iocb->ki_pos;
+ retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
+ if (retval > 0) {
+ struct inode *inode = file_inode(file);
+ loff_t i_size;
+ unsigned long pg_start, pg_end;
+ pg_start = origin >> PAGE_CACHE_SHIFT;
+ pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT;
+ if (inode->i_mapping && inode->i_mapping->nrpages)
+ invalidate_inode_pages2_range(inode->i_mapping,
+ pg_start, pg_end);
+ iocb->ki_pos += retval;
+ i_size = i_size_read(inode);
+ if (iocb->ki_pos > i_size) {
+ inode_add_bytes(inode, iocb->ki_pos - i_size);
+ i_size_write(inode, iocb->ki_pos);
+ }
+ return retval;
+ }
+ return err;
}
-
static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
int datasync)
{
@@ -657,44 +562,6 @@ out_unlock:
return VM_FAULT_NOPAGE;
}
-static ssize_t
-v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
- loff_t *offsetp)
-{
- loff_t size, offset;
- struct inode *inode;
- struct address_space *mapping;
-
- offset = *offsetp;
- mapping = filp->f_mapping;
- inode = mapping->host;
- if (!count)
- return 0;
- size = i_size_read(inode);
- if (offset < size)
- filemap_write_and_wait_range(mapping, offset,
- offset + count - 1);
-
- return v9fs_file_read(filp, udata, count, offsetp);
-}
-
-/**
- * v9fs_cached_file_read - read from a file
- * @filp: file pointer to read
- * @data: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
- *
- */
-static ssize_t
-v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
- loff_t *offset)
-{
- if (filp->f_flags & O_DIRECT)
- return v9fs_direct_read(filp, data, count, offset);
- return new_sync_read(filp, data, count, offset);
-}
-
/**
* v9fs_mmap_file_read - read from a file
* @filp: file pointer to read
@@ -704,84 +571,12 @@ v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
*
*/
static ssize_t
-v9fs_mmap_file_read(struct file *filp, char __user *data, size_t count,
- loff_t *offset)
+v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
/* TODO: Check if there are dirty pages */
- return v9fs_file_read(filp, data, count, offset);
-}
-
-static ssize_t
-v9fs_direct_write(struct file *filp, const char __user * data,
- size_t count, loff_t *offsetp)
-{
- loff_t offset;
- ssize_t retval;
- struct inode *inode;
- struct address_space *mapping;
-
- offset = *offsetp;
- mapping = filp->f_mapping;
- inode = mapping->host;
- if (!count)
- return 0;
-
- mutex_lock(&inode->i_mutex);
- retval = filemap_write_and_wait_range(mapping, offset,
- offset + count - 1);
- if (retval)
- goto err_out;
- /*
- * After a write we want buffered reads to be sure to go to disk to get
- * the new data. We invalidate clean cached page from the region we're
- * about to write. We do this *before* the write so that if we fail
- * here we fall back to buffered write
- */
- if (mapping->nrpages) {
- pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
- pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
-
- retval = invalidate_inode_pages2_range(mapping,
- pg_start, pg_end);
- /*
- * If a page can not be invalidated, fall back
- * to buffered write.
- */
- if (retval) {
- if (retval == -EBUSY)
- goto buff_write;
- goto err_out;
- }
- }
- retval = v9fs_file_write(filp, data, count, offsetp);
-err_out:
- mutex_unlock(&inode->i_mutex);
- return retval;
-
-buff_write:
- mutex_unlock(&inode->i_mutex);
- return new_sync_write(filp, data, count, offsetp);
-}
-
-/**
- * v9fs_cached_file_write - write to a file
- * @filp: file pointer to write
- * @data: data buffer to write data from
- * @count: size of buffer
- * @offset: offset at which to write data
- *
- */
-static ssize_t
-v9fs_cached_file_write(struct file *filp, const char __user * data,
- size_t count, loff_t *offset)
-{
-
- if (filp->f_flags & O_DIRECT)
- return v9fs_direct_write(filp, data, count, offset);
- return new_sync_write(filp, data, count, offset);
+ return v9fs_file_read_iter(iocb, to);
}
-
/**
* v9fs_mmap_file_write - write to a file
* @filp: file pointer to write
@@ -791,14 +586,13 @@ v9fs_cached_file_write(struct file *filp, const char __user * data,
*
*/
static ssize_t
-v9fs_mmap_file_write(struct file *filp, const char __user *data,
- size_t count, loff_t *offset)
+v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
/*
* TODO: invalidate mmaps on filp's inode between
* offset and offset+count
*/
- return v9fs_file_write(filp, data, count, offset);
+ return v9fs_file_write_iter(iocb, from);
}
static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
@@ -843,8 +637,6 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
- .read = v9fs_cached_file_read,
- .write = v9fs_cached_file_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.open = v9fs_file_open,
@@ -856,8 +648,6 @@ const struct file_operations v9fs_cached_file_operations = {
const struct file_operations v9fs_cached_file_operations_dotl = {
.llseek = generic_file_llseek,
- .read = v9fs_cached_file_read,
- .write = v9fs_cached_file_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.open = v9fs_file_open,
@@ -870,8 +660,8 @@ const struct file_operations v9fs_cached_file_operations_dotl = {
const struct file_operations v9fs_file_operations = {
.llseek = generic_file_llseek,
- .read = v9fs_file_read,
- .write = v9fs_file_write,
+ .read_iter = v9fs_file_read_iter,
+ .write_iter = v9fs_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
@@ -881,8 +671,8 @@ const struct file_operations v9fs_file_operations = {
const struct file_operations v9fs_file_operations_dotl = {
.llseek = generic_file_llseek,
- .read = v9fs_file_read,
- .write = v9fs_file_write,
+ .read_iter = v9fs_file_read_iter,
+ .write_iter = v9fs_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
@@ -893,8 +683,8 @@ const struct file_operations v9fs_file_operations_dotl = {
const struct file_operations v9fs_mmap_file_operations = {
.llseek = generic_file_llseek,
- .read = v9fs_mmap_file_read,
- .write = v9fs_mmap_file_write,
+ .read_iter = v9fs_mmap_file_read_iter,
+ .write_iter = v9fs_mmap_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
@@ -904,8 +694,8 @@ const struct file_operations v9fs_mmap_file_operations = {
const struct file_operations v9fs_mmap_file_operations_dotl = {
.llseek = generic_file_llseek,
- .read = v9fs_mmap_file_read,
- .write = v9fs_mmap_file_write,
+ .read_iter = v9fs_mmap_file_read_iter,
+ .write_iter = v9fs_mmap_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 3662f1d1d9cf..703342e309f5 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -595,7 +595,7 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
dir, dentry, flags);
v9ses = v9fs_inode2v9ses(dir);
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
retval = PTR_ERR(dfid);
@@ -864,7 +864,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
}
/* Only creates */
- if (!(flags & O_CREAT) || dentry->d_inode)
+ if (!(flags & O_CREAT) || d_really_is_positive(dentry))
return finish_no_open(file, res);
err = 0;
@@ -881,7 +881,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
}
v9fs_invalidate_inode_attr(dir);
- v9inode = V9FS_I(dentry->d_inode);
+ v9inode = V9FS_I(d_inode(dentry));
mutex_lock(&v9inode->v_mutex);
if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
!v9inode->writeback_fid &&
@@ -908,7 +908,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
file->private_data = fid;
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- v9fs_cache_inode_set_cookie(dentry->d_inode, file);
+ v9fs_cache_inode_set_cookie(d_inode(dentry), file);
*opened |= FILE_CREATED;
out:
@@ -969,8 +969,8 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
p9_debug(P9_DEBUG_VFS, "\n");
retval = 0;
- old_inode = old_dentry->d_inode;
- new_inode = new_dentry->d_inode;
+ old_inode = d_inode(old_dentry);
+ new_inode = d_inode(new_dentry);
v9ses = v9fs_inode2v9ses(old_inode);
oldfid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(oldfid))
@@ -1061,7 +1061,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- generic_fillattr(dentry->d_inode, stat);
+ generic_fillattr(d_inode(dentry), stat);
return 0;
}
fid = v9fs_fid_lookup(dentry);
@@ -1072,8 +1072,8 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
if (IS_ERR(st))
return PTR_ERR(st);
- v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
- generic_fillattr(dentry->d_inode, stat);
+ v9fs_stat2inode(st, d_inode(dentry), d_inode(dentry)->i_sb);
+ generic_fillattr(d_inode(dentry), stat);
p9stat_free(st);
kfree(st);
@@ -1095,7 +1095,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
struct p9_wstat wstat;
p9_debug(P9_DEBUG_VFS, "\n");
- retval = inode_change_ok(dentry->d_inode, iattr);
+ retval = inode_change_ok(d_inode(dentry), iattr);
if (retval)
return retval;
@@ -1128,20 +1128,20 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
/* Write all dirty data */
if (d_is_reg(dentry))
- filemap_write_and_wait(dentry->d_inode->i_mapping);
+ filemap_write_and_wait(d_inode(dentry)->i_mapping);
retval = p9_client_wstat(fid, &wstat);
if (retval < 0)
return retval;
if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(dentry->d_inode))
- truncate_setsize(dentry->d_inode, iattr->ia_size);
+ iattr->ia_size != i_size_read(d_inode(dentry)))
+ truncate_setsize(d_inode(dentry), iattr->ia_size);
- v9fs_invalidate_inode_attr(dentry->d_inode);
+ v9fs_invalidate_inode_attr(d_inode(dentry));
- setattr_copy(dentry->d_inode, iattr);
- mark_inode_dirty(dentry->d_inode);
+ setattr_copy(d_inode(dentry), iattr);
+ mark_inode_dirty(d_inode(dentry));
return 0;
}
@@ -1403,7 +1403,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
retval = v9fs_vfs_mkspecial(dir, dentry, P9_DMLINK, name);
__putname(name);
if (!retval) {
- v9fs_refresh_inode(oldfid, old_dentry->d_inode);
+ v9fs_refresh_inode(oldfid, d_inode(old_dentry));
v9fs_invalidate_inode_attr(dir);
}
clunk_fid:
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 6054c16b8fae..9861c7c951a6 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -265,7 +265,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
}
/* Only creates */
- if (!(flags & O_CREAT) || dentry->d_inode)
+ if (!(flags & O_CREAT) || d_really_is_positive(dentry))
return finish_no_open(file, res);
v9ses = v9fs_inode2v9ses(dir);
@@ -481,7 +481,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- generic_fillattr(dentry->d_inode, stat);
+ generic_fillattr(d_inode(dentry), stat);
return 0;
}
fid = v9fs_fid_lookup(dentry);
@@ -496,8 +496,8 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
if (IS_ERR(st))
return PTR_ERR(st);
- v9fs_stat2inode_dotl(st, dentry->d_inode);
- generic_fillattr(dentry->d_inode, stat);
+ v9fs_stat2inode_dotl(st, d_inode(dentry));
+ generic_fillattr(d_inode(dentry), stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
@@ -557,7 +557,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
int retval;
struct p9_fid *fid;
struct p9_iattr_dotl p9attr;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
p9_debug(P9_DEBUG_VFS, "\n");
@@ -795,10 +795,10 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
if (IS_ERR(fid))
return PTR_ERR(fid);
- v9fs_refresh_inode_dotl(fid, old_dentry->d_inode);
+ v9fs_refresh_inode_dotl(fid, d_inode(old_dentry));
}
- ihold(old_dentry->d_inode);
- d_instantiate(dentry, old_dentry->d_inode);
+ ihold(d_inode(old_dentry));
+ d_instantiate(dentry, d_inode(old_dentry));
return err;
}
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 0afd0382822b..e99a338a4638 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -168,8 +168,8 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
retval = PTR_ERR(st);
goto release_sb;
}
- root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
- v9fs_stat2inode_dotl(st, root->d_inode);
+ d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
+ v9fs_stat2inode_dotl(st, d_inode(root));
kfree(st);
} else {
struct p9_wstat *st = NULL;
@@ -179,8 +179,8 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
goto release_sb;
}
- root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
- v9fs_stat2inode(st, root->d_inode, sb);
+ d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
+ v9fs_stat2inode(st, d_inode(root), sb);
p9stat_free(st);
kfree(st);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index f95e01e058e4..0cf44b6cccd6 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sched.h>
+#include <linux/uio.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -25,50 +26,34 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
void *buffer, size_t buffer_size)
{
ssize_t retval;
- int msize, read_count;
- u64 offset = 0, attr_size;
+ u64 attr_size;
struct p9_fid *attr_fid;
+ struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size};
+ struct iov_iter to;
+ int err;
+
+ iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size);
attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
if (IS_ERR(attr_fid)) {
retval = PTR_ERR(attr_fid);
p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n",
retval);
- attr_fid = NULL;
- goto error;
- }
- if (!buffer_size) {
- /* request to get the attr_size */
- retval = attr_size;
- goto error;
+ return retval;
}
if (attr_size > buffer_size) {
- retval = -ERANGE;
- goto error;
- }
- msize = attr_fid->clnt->msize;
- while (attr_size) {
- if (attr_size > (msize - P9_IOHDRSZ))
- read_count = msize - P9_IOHDRSZ;
+ if (!buffer_size) /* request to get the attr_size */
+ retval = attr_size;
else
- read_count = attr_size;
- read_count = p9_client_read(attr_fid, ((char *)buffer)+offset,
- NULL, offset, read_count);
- if (read_count < 0) {
- /* error in xattr read */
- retval = read_count;
- goto error;
- }
- offset += read_count;
- attr_size -= read_count;
+ retval = -ERANGE;
+ } else {
+ iov_iter_truncate(&to, attr_size);
+ retval = p9_client_read(attr_fid, 0, &to, &err);
+ if (err)
+ retval = err;
}
- /* Total read xattr bytes */
- retval = offset;
-error:
- if (attr_fid)
- p9_client_clunk(attr_fid);
+ p9_client_clunk(attr_fid);
return retval;
-
}
@@ -120,8 +105,11 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
const void *value, size_t value_len, int flags)
{
- u64 offset = 0;
- int retval, msize, write_count;
+ struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
+ struct iov_iter from;
+ int retval;
+
+ iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
name, value_len, flags);
@@ -135,29 +123,11 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
* On success fid points to xattr
*/
retval = p9_client_xattrcreate(fid, name, value_len, flags);
- if (retval < 0) {
+ if (retval < 0)
p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n",
retval);
- goto err;
- }
- msize = fid->clnt->msize;
- while (value_len) {
- if (value_len > (msize - P9_IOHDRSZ))
- write_count = msize - P9_IOHDRSZ;
- else
- write_count = value_len;
- write_count = p9_client_write(fid, ((char *)value)+offset,
- NULL, offset, write_count);
- if (write_count < 0) {
- /* error in xattr write */
- retval = write_count;
- goto err;
- }
- offset += write_count;
- value_len -= write_count;
- }
- retval = 0;
-err:
+ else
+ p9_client_write(fid, 0, &from, &retval);
p9_client_clunk(fid);
return retval;
}
diff --git a/fs/Kconfig b/fs/Kconfig
index ec35851e5b71..011f43365d7b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -32,6 +32,7 @@ source "fs/gfs2/Kconfig"
source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
+source "fs/f2fs/Kconfig"
config FS_DAX
bool "Direct Access (DAX) support"
@@ -217,7 +218,6 @@ source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
-source "fs/f2fs/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 270c48148f79..2d0cbbd14cfc 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -27,9 +27,6 @@ config COMPAT_BINFMT_ELF
bool
depends on COMPAT && BINFMT_ELF
-config ARCH_BINFMT_ELF_RANDOMIZE_PIE
- bool
-
config ARCH_BINFMT_ELF_STATE
bool
diff --git a/fs/Makefile b/fs/Makefile
index a88ac4838c9e..cb92fd4c3172 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -118,6 +118,7 @@ obj-$(CONFIG_HOSTFS) += hostfs/
obj-$(CONFIG_HPPFS) += hppfs/
obj-$(CONFIG_CACHEFILES) += cachefiles/
obj-$(CONFIG_DEBUG_FS) += debugfs/
+obj-$(CONFIG_TRACING) += tracefs/
obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_BTRFS_FS) += btrfs/
obj-$(CONFIG_GFS2_FS) += gfs2/
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index f2ba88ab4aed..82d14cdf70f9 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -61,6 +61,7 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
kcalloc(size, sizeof(struct buffer_head *),
GFP_KERNEL);
if (!bh_fplus) {
+ ret = -ENOMEM;
adfs_error(sb, "not enough memory for"
" dir object %X (%d blocks)", id, size);
goto out;
diff --git a/fs/adfs/file.c b/fs/adfs/file.c
index 07c9edce5aa7..46c0d5671cd5 100644
--- a/fs/adfs/file.c
+++ b/fs/adfs/file.c
@@ -23,11 +23,9 @@
const struct file_operations adfs_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
.mmap = generic_file_mmap,
.fsync = generic_file_fsync,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.splice_read = generic_file_splice_read,
};
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index b9acadafa4a1..335055d828e4 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -298,7 +298,7 @@ out:
int
adfs_notify_change(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
unsigned int ia_valid = attr->ia_valid;
int error;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 9852bdf34d76..a19c31d3f369 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -316,7 +316,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di
dm = kmalloc(nzones * sizeof(*dm), GFP_KERNEL);
if (dm == NULL) {
adfs_error(sb, "not enough memory");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
for (zone = 0; zone < nzones; zone++, map_addr++) {
@@ -349,7 +349,7 @@ error_free:
brelse(dm[zone].dm_bh);
kfree(dm);
- return NULL;
+ return ERR_PTR(-EIO);
}
static inline unsigned long adfs_discsize(struct adfs_discrecord *dr, int block_bits)
@@ -370,6 +370,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
unsigned char *b_data;
struct adfs_sb_info *asb;
struct inode *root;
+ int ret = -EINVAL;
sb->s_flags |= MS_NODIRATIME;
@@ -391,6 +392,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
sb_set_blocksize(sb, BLOCK_SIZE);
if (!(bh = sb_bread(sb, ADFS_DISCRECORD / BLOCK_SIZE))) {
adfs_error(sb, "unable to read superblock");
+ ret = -EIO;
goto error;
}
@@ -400,6 +402,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
if (!silent)
printk("VFS: Can't find an adfs filesystem on dev "
"%s.\n", sb->s_id);
+ ret = -EINVAL;
goto error_free_bh;
}
@@ -412,6 +415,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
if (!silent)
printk("VPS: Can't find an adfs filesystem on dev "
"%s.\n", sb->s_id);
+ ret = -EINVAL;
goto error_free_bh;
}
@@ -421,11 +425,13 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
if (!bh) {
adfs_error(sb, "couldn't read superblock on "
"2nd try.");
+ ret = -EIO;
goto error;
}
b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize);
if (adfs_checkbblk(b_data)) {
adfs_error(sb, "disc record mismatch, very weird!");
+ ret = -EINVAL;
goto error_free_bh;
}
dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
@@ -433,6 +439,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
if (!silent)
printk(KERN_ERR "VFS: Unsupported blocksize on dev "
"%s.\n", sb->s_id);
+ ret = -EINVAL;
goto error;
}
@@ -447,10 +454,12 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
asb->s_size = adfs_discsize(dr, sb->s_blocksize_bits);
asb->s_version = dr->format_version;
asb->s_log2sharesize = dr->log2sharesize;
-
+
asb->s_map = adfs_read_map(sb, dr);
- if (!asb->s_map)
+ if (IS_ERR(asb->s_map)) {
+ ret = PTR_ERR(asb->s_map);
goto error_free_bh;
+ }
brelse(bh);
@@ -499,6 +508,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
brelse(asb->s_map[i].dm_bh);
kfree(asb->s_map);
adfs_error(sb, "get root inode failed\n");
+ ret = -EIO;
goto error;
}
return 0;
@@ -508,7 +518,7 @@ error_free_bh:
error:
sb->s_fs_info = NULL;
kfree(asb);
- return -EINVAL;
+ return ret;
}
static struct dentry *adfs_mount(struct file_system_type *fs_type,
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index c8764bd7497d..cffe8370fb44 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -106,18 +106,22 @@ struct affs_sb_info {
spinlock_t work_lock; /* protects sb_work and work_queued */
};
-#define SF_INTL 0x0001 /* International filesystem. */
-#define SF_BM_VALID 0x0002 /* Bitmap is valid. */
-#define SF_IMMUTABLE 0x0004 /* Protection bits cannot be changed */
-#define SF_QUIET 0x0008 /* chmod errors will be not reported */
-#define SF_SETUID 0x0010 /* Ignore Amiga uid */
-#define SF_SETGID 0x0020 /* Ignore Amiga gid */
-#define SF_SETMODE 0x0040 /* Ignore Amiga protection bits */
-#define SF_MUFS 0x0100 /* Use MUFS uid/gid mapping */
-#define SF_OFS 0x0200 /* Old filesystem */
-#define SF_PREFIX 0x0400 /* Buffer for prefix is allocated */
-#define SF_VERBOSE 0x0800 /* Talk about fs when mounting */
-#define SF_NO_TRUNCATE 0x1000 /* Don't truncate filenames */
+#define AFFS_MOUNT_SF_INTL 0x0001 /* International filesystem. */
+#define AFFS_MOUNT_SF_BM_VALID 0x0002 /* Bitmap is valid. */
+#define AFFS_MOUNT_SF_IMMUTABLE 0x0004 /* Protection bits cannot be changed */
+#define AFFS_MOUNT_SF_QUIET 0x0008 /* chmod errors will be not reported */
+#define AFFS_MOUNT_SF_SETUID 0x0010 /* Ignore Amiga uid */
+#define AFFS_MOUNT_SF_SETGID 0x0020 /* Ignore Amiga gid */
+#define AFFS_MOUNT_SF_SETMODE 0x0040 /* Ignore Amiga protection bits */
+#define AFFS_MOUNT_SF_MUFS 0x0100 /* Use MUFS uid/gid mapping */
+#define AFFS_MOUNT_SF_OFS 0x0200 /* Old filesystem */
+#define AFFS_MOUNT_SF_PREFIX 0x0400 /* Buffer for prefix is allocated */
+#define AFFS_MOUNT_SF_VERBOSE 0x0800 /* Talk about fs when mounting */
+#define AFFS_MOUNT_SF_NO_TRUNCATE 0x1000 /* Don't truncate filenames */
+
+#define affs_clear_opt(o, opt) (o &= ~AFFS_MOUNT_##opt)
+#define affs_set_opt(o, opt) (o |= AFFS_MOUNT_##opt)
+#define affs_test_opt(o, opt) ((o) & AFFS_MOUNT_##opt)
/* short cut to get to the affs specific sb data */
static inline struct affs_sb_info *AFFS_SB(struct super_block *sb)
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 388da1ea815d..a8f463c028ce 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -138,7 +138,7 @@ affs_fix_dcache(struct inode *inode, u32 entry_ino)
static int
affs_remove_link(struct dentry *dentry)
{
- struct inode *dir, *inode = dentry->d_inode;
+ struct inode *dir, *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
struct buffer_head *bh = NULL, *link_bh = NULL;
u32 link_ino, ino;
@@ -268,11 +268,11 @@ affs_remove_header(struct dentry *dentry)
struct buffer_head *bh = NULL;
int retval;
- dir = dentry->d_parent->d_inode;
+ dir = d_inode(dentry->d_parent);
sb = dir->i_sb;
retval = -ENOENT;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if (!inode)
goto done;
@@ -471,9 +471,9 @@ affs_warning(struct super_block *sb, const char *function, const char *fmt, ...)
bool
affs_nofilenametruncate(const struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
- return AFFS_SB(inode->i_sb)->s_flags & SF_NO_TRUNCATE;
+ struct inode *inode = d_inode(dentry);
+ return affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_NO_TRUNCATE);
}
/* Check if the name is valid for a affs object. */
diff --git a/fs/affs/file.c b/fs/affs/file.c
index a91795e01a7f..659c579c4588 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -12,7 +12,7 @@
* affs regular file handling primitives
*/
-#include <linux/aio.h>
+#include <linux/uio.h>
#include "affs.h"
static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
@@ -389,8 +389,7 @@ static void affs_write_failed(struct address_space *mapping, loff_t to)
}
static ssize_t
-affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
- loff_t offset)
+affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -398,15 +397,15 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
ssize_t ret;
- if (rw == WRITE) {
+ if (iov_iter_rw(iter) == WRITE) {
loff_t size = offset + count;
if (AFFS_I(inode)->mmu_private < size)
return 0;
}
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block);
- if (ret < 0 && (rw & WRITE))
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block);
+ if (ret < 0 && iov_iter_rw(iter) == WRITE)
affs_write_failed(mapping, offset + count);
return ret;
}
@@ -915,7 +914,7 @@ affs_truncate(struct inode *inode)
if (inode->i_size) {
AFFS_I(inode)->i_blkcnt = last_blk + 1;
AFFS_I(inode)->i_extcnt = ext + 1;
- if (AFFS_SB(sb)->s_flags & SF_OFS) {
+ if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) {
struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
u32 tmp;
if (IS_ERR(bh)) {
@@ -969,9 +968,7 @@ int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
}
const struct file_operations affs_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.open = affs_file_open,
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 6f34510449e8..a022f4accd76 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -66,23 +66,23 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
AFFS_I(inode)->i_lastalloc = 0;
AFFS_I(inode)->i_pa_cnt = 0;
- if (sbi->s_flags & SF_SETMODE)
+ if (affs_test_opt(sbi->s_flags, SF_SETMODE))
inode->i_mode = sbi->s_mode;
else
inode->i_mode = prot_to_mode(prot);
id = be16_to_cpu(tail->uid);
- if (id == 0 || sbi->s_flags & SF_SETUID)
+ if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETUID))
inode->i_uid = sbi->s_uid;
- else if (id == 0xFFFF && sbi->s_flags & SF_MUFS)
+ else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS))
i_uid_write(inode, 0);
else
i_uid_write(inode, id);
id = be16_to_cpu(tail->gid);
- if (id == 0 || sbi->s_flags & SF_SETGID)
+ if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETGID))
inode->i_gid = sbi->s_gid;
- else if (id == 0xFFFF && sbi->s_flags & SF_MUFS)
+ else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS))
i_gid_write(inode, 0);
else
i_gid_write(inode, id);
@@ -94,7 +94,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
/* fall through */
case ST_USERDIR:
if (be32_to_cpu(tail->stype) == ST_USERDIR ||
- sbi->s_flags & SF_SETMODE) {
+ affs_test_opt(sbi->s_flags, SF_SETMODE)) {
if (inode->i_mode & S_IRUSR)
inode->i_mode |= S_IXUSR;
if (inode->i_mode & S_IRGRP)
@@ -133,7 +133,8 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
}
if (tail->link_chain)
set_nlink(inode, 2);
- inode->i_mapping->a_ops = (sbi->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops;
+ inode->i_mapping->a_ops = affs_test_opt(sbi->s_flags, SF_OFS) ?
+ &affs_aops_ofs : &affs_aops;
inode->i_op = &affs_file_inode_operations;
inode->i_fop = &affs_file_operations;
break;
@@ -190,15 +191,15 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc)
if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) {
uid = i_uid_read(inode);
gid = i_gid_read(inode);
- if (AFFS_SB(sb)->s_flags & SF_MUFS) {
+ if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_MUFS)) {
if (uid == 0 || uid == 0xFFFF)
uid = uid ^ ~0;
if (gid == 0 || gid == 0xFFFF)
gid = gid ^ ~0;
}
- if (!(AFFS_SB(sb)->s_flags & SF_SETUID))
+ if (!affs_test_opt(AFFS_SB(sb)->s_flags, SF_SETUID))
tail->uid = cpu_to_be16(uid);
- if (!(AFFS_SB(sb)->s_flags & SF_SETGID))
+ if (!affs_test_opt(AFFS_SB(sb)->s_flags, SF_SETGID))
tail->gid = cpu_to_be16(gid);
}
}
@@ -212,7 +213,7 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc)
int
affs_notify_change(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
pr_debug("notify_change(%lu,0x%x)\n", inode->i_ino, attr->ia_valid);
@@ -221,11 +222,14 @@ affs_notify_change(struct dentry *dentry, struct iattr *attr)
if (error)
goto out;
- if (((attr->ia_valid & ATTR_UID) && (AFFS_SB(inode->i_sb)->s_flags & SF_SETUID)) ||
- ((attr->ia_valid & ATTR_GID) && (AFFS_SB(inode->i_sb)->s_flags & SF_SETGID)) ||
+ if (((attr->ia_valid & ATTR_UID) &&
+ affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_SETUID)) ||
+ ((attr->ia_valid & ATTR_GID) &&
+ affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_SETGID)) ||
((attr->ia_valid & ATTR_MODE) &&
- (AFFS_SB(inode->i_sb)->s_flags & (SF_SETMODE | SF_IMMUTABLE)))) {
- if (!(AFFS_SB(inode->i_sb)->s_flags & SF_QUIET))
+ (AFFS_SB(inode->i_sb)->s_flags &
+ (AFFS_MOUNT_SF_SETMODE | AFFS_MOUNT_SF_IMMUTABLE)))) {
+ if (!affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_QUIET))
error = -EPERM;
goto out;
}
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index ffb7bd82c2a5..181e05b46e72 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -53,7 +53,8 @@ affs_intl_toupper(int ch)
static inline toupper_t
affs_get_toupper(struct super_block *sb)
{
- return AFFS_SB(sb)->s_flags & SF_INTL ? affs_intl_toupper : affs_toupper;
+ return affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL) ?
+ affs_intl_toupper : affs_toupper;
}
/*
@@ -250,7 +251,7 @@ int
affs_unlink(struct inode *dir, struct dentry *dentry)
{
pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino,
- dentry->d_inode->i_ino, dentry);
+ d_inode(dentry)->i_ino, dentry);
return affs_remove_header(dentry);
}
@@ -275,7 +276,8 @@ affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
inode->i_op = &affs_file_inode_operations;
inode->i_fop = &affs_file_operations;
- inode->i_mapping->a_ops = (AFFS_SB(sb)->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops;
+ inode->i_mapping->a_ops = affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS) ?
+ &affs_aops_ofs : &affs_aops;
error = affs_add_entry(dir, inode, dentry, ST_FILE);
if (error) {
clear_nlink(inode);
@@ -318,7 +320,7 @@ int
affs_rmdir(struct inode *dir, struct dentry *dentry)
{
pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino,
- dentry->d_inode->i_ino, dentry);
+ d_inode(dentry)->i_ino, dentry);
return affs_remove_header(dentry);
}
@@ -401,7 +403,7 @@ err:
int
affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
pr_debug("%s(%lu, %lu, \"%pd\")\n", __func__, inode->i_ino, dir->i_ino,
dentry);
@@ -428,13 +430,13 @@ affs_rename(struct inode *old_dir, struct dentry *old_dentry,
return retval;
/* Unlink destination if it already exists */
- if (new_dentry->d_inode) {
+ if (d_really_is_positive(new_dentry)) {
retval = affs_remove_header(new_dentry);
if (retval)
return retval;
}
- bh = affs_bread(sb, old_dentry->d_inode->i_ino);
+ bh = affs_bread(sb, d_inode(old_dentry)->i_ino);
if (!bh)
return -EIO;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 4cf0e9113fb6..3f89c9e05b40 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -227,22 +227,22 @@ parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved,
if (match_octal(&args[0], &option))
return 0;
*mode = option & 0777;
- *mount_opts |= SF_SETMODE;
+ affs_set_opt(*mount_opts, SF_SETMODE);
break;
case Opt_mufs:
- *mount_opts |= SF_MUFS;
+ affs_set_opt(*mount_opts, SF_MUFS);
break;
case Opt_notruncate:
- *mount_opts |= SF_NO_TRUNCATE;
+ affs_set_opt(*mount_opts, SF_NO_TRUNCATE);
break;
case Opt_prefix:
*prefix = match_strdup(&args[0]);
if (!*prefix)
return 0;
- *mount_opts |= SF_PREFIX;
+ affs_set_opt(*mount_opts, SF_PREFIX);
break;
case Opt_protect:
- *mount_opts |= SF_IMMUTABLE;
+ affs_set_opt(*mount_opts, SF_IMMUTABLE);
break;
case Opt_reserved:
if (match_int(&args[0], reserved))
@@ -258,7 +258,7 @@ parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved,
*gid = make_kgid(current_user_ns(), option);
if (!gid_valid(*gid))
return 0;
- *mount_opts |= SF_SETGID;
+ affs_set_opt(*mount_opts, SF_SETGID);
break;
case Opt_setuid:
if (match_int(&args[0], &option))
@@ -266,10 +266,10 @@ parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved,
*uid = make_kuid(current_user_ns(), option);
if (!uid_valid(*uid))
return 0;
- *mount_opts |= SF_SETUID;
+ affs_set_opt(*mount_opts, SF_SETUID);
break;
case Opt_verbose:
- *mount_opts |= SF_VERBOSE;
+ affs_set_opt(*mount_opts, SF_VERBOSE);
break;
case Opt_volume: {
char *vol = match_strdup(&args[0]);
@@ -435,30 +435,31 @@ got_root:
case MUFS_FS:
case MUFS_INTLFFS:
case MUFS_DCFFS:
- sbi->s_flags |= SF_MUFS;
+ affs_set_opt(sbi->s_flags, SF_MUFS);
/* fall thru */
case FS_INTLFFS:
case FS_DCFFS:
- sbi->s_flags |= SF_INTL;
+ affs_set_opt(sbi->s_flags, SF_INTL);
break;
case MUFS_FFS:
- sbi->s_flags |= SF_MUFS;
+ affs_set_opt(sbi->s_flags, SF_MUFS);
break;
case FS_FFS:
break;
case MUFS_OFS:
- sbi->s_flags |= SF_MUFS;
+ affs_set_opt(sbi->s_flags, SF_MUFS);
/* fall thru */
case FS_OFS:
- sbi->s_flags |= SF_OFS;
+ affs_set_opt(sbi->s_flags, SF_OFS);
sb->s_flags |= MS_NOEXEC;
break;
case MUFS_DCOFS:
case MUFS_INTLOFS:
- sbi->s_flags |= SF_MUFS;
+ affs_set_opt(sbi->s_flags, SF_MUFS);
case FS_DCOFS:
case FS_INTLOFS:
- sbi->s_flags |= SF_INTL | SF_OFS;
+ affs_set_opt(sbi->s_flags, SF_INTL);
+ affs_set_opt(sbi->s_flags, SF_OFS);
sb->s_flags |= MS_NOEXEC;
break;
default:
@@ -467,7 +468,7 @@ got_root:
return -EINVAL;
}
- if (mount_flags & SF_VERBOSE) {
+ if (affs_test_opt(mount_flags, SF_VERBOSE)) {
u8 len = AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0];
pr_notice("Mounting volume \"%.*s\": Type=%.3s\\%c, Blocksize=%d\n",
len > 31 ? 31 : len,
@@ -478,7 +479,7 @@ got_root:
sb->s_flags |= MS_NODEV | MS_NOSUID;
sbi->s_data_blksize = sb->s_blocksize;
- if (sbi->s_flags & SF_OFS)
+ if (affs_test_opt(sbi->s_flags, SF_OFS))
sbi->s_data_blksize -= 24;
tmp_flags = sb->s_flags;
@@ -493,7 +494,7 @@ got_root:
if (IS_ERR(root_inode))
return PTR_ERR(root_inode);
- if (AFFS_SB(sb)->s_flags & SF_INTL)
+ if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL))
sb->s_d_op = &affs_intl_dentry_operations;
else
sb->s_d_op = &affs_dentry_operations;
@@ -520,10 +521,14 @@ affs_remount(struct super_block *sb, int *flags, char *data)
int root_block;
unsigned long mount_flags;
int res = 0;
- char *new_opts = kstrdup(data, GFP_KERNEL);
+ char *new_opts;
char volume[32];
char *prefix = NULL;
+ new_opts = kstrdup(data, GFP_KERNEL);
+ if (!new_opts)
+ return -ENOMEM;
+
pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
sync_filesystem(sb);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 4ec35e9130e1..e10e17788f06 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -505,7 +505,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
_enter("{%x:%u},%p{%pd},",
vnode->fid.vid, vnode->fid.vnode, dentry, dentry);
- ASSERTCMP(dentry->d_inode, ==, NULL);
+ ASSERTCMP(d_inode(dentry), ==, NULL);
if (dentry->d_name.len >= AFSNAMEMAX) {
_leave(" = -ENAMETOOLONG");
@@ -563,8 +563,8 @@ success:
_leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }",
fid.vnode,
fid.unique,
- dentry->d_inode->i_ino,
- dentry->d_inode->i_generation);
+ d_inode(dentry)->i_ino,
+ d_inode(dentry)->i_generation);
return NULL;
}
@@ -586,9 +586,9 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- vnode = AFS_FS_I(dentry->d_inode);
+ vnode = AFS_FS_I(d_inode(dentry));
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
_enter("{v={%x:%u} n=%pd fl=%lx},",
vnode->fid.vid, vnode->fid.vnode, dentry,
vnode->flags);
@@ -601,7 +601,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
/* lock down the parent dentry so we can peer at it */
parent = dget_parent(dentry);
- dir = AFS_FS_I(parent->d_inode);
+ dir = AFS_FS_I(d_inode(parent));
/* validate the parent directory */
if (test_bit(AFS_VNODE_MODIFIED, &dir->flags))
@@ -623,9 +623,9 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
switch (ret) {
case 0:
/* the filename maps to something */
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
goto out_bad;
- if (is_bad_inode(dentry->d_inode)) {
+ if (is_bad_inode(d_inode(dentry))) {
printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
dentry);
goto out_bad;
@@ -647,7 +647,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
_debug("%pd: file deleted (uq %u -> %u I:%u)",
dentry, fid.unique,
vnode->fid.unique,
- dentry->d_inode->i_generation);
+ d_inode(dentry)->i_generation);
spin_lock(&vnode->lock);
set_bit(AFS_VNODE_DELETED, &vnode->flags);
spin_unlock(&vnode->lock);
@@ -658,7 +658,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
case -ENOENT:
/* the filename is unknown */
_debug("%pd: dirent not found", dentry);
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
goto not_found;
goto out_valid;
@@ -703,9 +703,9 @@ static int afs_d_delete(const struct dentry *dentry)
if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
goto zap;
- if (dentry->d_inode &&
- (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags) ||
- test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(dentry->d_inode)->flags)))
+ if (d_really_is_positive(dentry) &&
+ (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(d_inode(dentry))->flags) ||
+ test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(d_inode(dentry))->flags)))
goto zap;
_leave(" = 0 [keep]");
@@ -814,8 +814,8 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
if (ret < 0)
goto rmdir_error;
- if (dentry->d_inode) {
- vnode = AFS_FS_I(dentry->d_inode);
+ if (d_really_is_positive(dentry)) {
+ vnode = AFS_FS_I(d_inode(dentry));
clear_nlink(&vnode->vfs_inode);
set_bit(AFS_VNODE_DELETED, &vnode->flags);
afs_discard_callback_on_delete(vnode);
@@ -856,8 +856,8 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
goto error;
}
- if (dentry->d_inode) {
- vnode = AFS_FS_I(dentry->d_inode);
+ if (d_really_is_positive(dentry)) {
+ vnode = AFS_FS_I(d_inode(dentry));
/* make sure we have a callback promise on the victim */
ret = afs_validate(vnode, key);
@@ -869,7 +869,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
if (ret < 0)
goto remove_error;
- if (dentry->d_inode) {
+ if (d_really_is_positive(dentry)) {
/* if the file wasn't deleted due to excess hard links, the
* fileserver will break the callback promise on the file - if
* it had one - before it returns to us, and if it was deleted,
@@ -879,7 +879,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
* or it was outstanding on a different server, then it won't
* break it either...
*/
- vnode = AFS_FS_I(dentry->d_inode);
+ vnode = AFS_FS_I(d_inode(dentry));
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
_debug("AFS_VNODE_DELETED");
if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags))
@@ -977,7 +977,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
struct key *key;
int ret;
- vnode = AFS_FS_I(from->d_inode);
+ vnode = AFS_FS_I(d_inode(from));
dvnode = AFS_FS_I(dir);
_enter("{%x:%u},{%x:%u},{%pd}",
@@ -1089,7 +1089,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct key *key;
int ret;
- vnode = AFS_FS_I(old_dentry->d_inode);
+ vnode = AFS_FS_I(d_inode(old_dentry));
orig_dvnode = AFS_FS_I(old_dir);
new_dvnode = AFS_FS_I(new_dir);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 932ce07948b3..999bc3caec92 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -31,8 +31,6 @@ const struct file_operations afs_file_operations = {
.open = afs_open,
.release = afs_release,
.llseek = generic_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = afs_file_write,
.mmap = generic_file_readonly_mmap,
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 8a1d38ef0fc2..e06f5a23352a 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -379,7 +379,7 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
{
struct inode *inode;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
_enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
@@ -458,7 +458,7 @@ void afs_evict_inode(struct inode *inode)
*/
int afs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
+ struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
struct key *key;
int ret;
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 0dd4dafee10b..91ea1aa0d8b3 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -22,9 +22,12 @@
int afs_abort_to_error(u32 abort_code)
{
switch (abort_code) {
+ /* low errno codes inserted into abort namespace */
case 13: return -EACCES;
case 27: return -EFBIG;
case 30: return -EROFS;
+
+ /* VICE "special error" codes; 101 - 111 */
case VSALVAGE: return -EIO;
case VNOVNODE: return -ENOENT;
case VNOVOL: return -ENOMEDIUM;
@@ -36,11 +39,18 @@ int afs_abort_to_error(u32 abort_code)
case VOVERQUOTA: return -EDQUOT;
case VBUSY: return -EBUSY;
case VMOVED: return -ENXIO;
- case 0x2f6df0a: return -EWOULDBLOCK;
+
+ /* Unified AFS error table; ET "uae" == 0x2f6df00 */
+ case 0x2f6df00: return -EPERM;
+ case 0x2f6df01: return -ENOENT;
+ case 0x2f6df04: return -EIO;
+ case 0x2f6df0a: return -EAGAIN;
+ case 0x2f6df0b: return -ENOMEM;
case 0x2f6df0c: return -EACCES;
case 0x2f6df0f: return -EBUSY;
case 0x2f6df10: return -EEXIST;
case 0x2f6df11: return -EXDEV;
+ case 0x2f6df12: return -ENODEV;
case 0x2f6df13: return -ENOTDIR;
case 0x2f6df14: return -EISDIR;
case 0x2f6df15: return -EINVAL;
@@ -54,8 +64,12 @@ int afs_abort_to_error(u32 abort_code)
case 0x2f6df23: return -ENAMETOOLONG;
case 0x2f6df24: return -ENOLCK;
case 0x2f6df26: return -ENOTEMPTY;
+ case 0x2f6df28: return -EWOULDBLOCK;
+ case 0x2f6df69: return -ENOTCONN;
+ case 0x2f6df6c: return -ETIMEDOUT;
case 0x2f6df78: return -EDQUOT;
+ /* RXKAD abort codes; from include/rxrpc/packet.h. ET "RXK" == 0x1260B00 */
case RXKADINCONSISTENCY: return -EPROTO;
case RXKADPACKETSHORT: return -EPROTO;
case RXKADLEVELFAIL: return -EKEYREJECTED;
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 938c5ab06d5a..ccd0b212e82a 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -134,7 +134,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
_enter("{%pd}", mntpt);
- BUG_ON(!mntpt->d_inode);
+ BUG_ON(!d_inode(mntpt));
ret = -ENOMEM;
devname = (char *) get_zeroed_page(GFP_KERNEL);
@@ -145,7 +145,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
if (!options)
goto error_no_options;
- vnode = AFS_FS_I(mntpt->d_inode);
+ vnode = AFS_FS_I(d_inode(mntpt));
if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
/* if the directory is a pseudo directory, use the d_name */
static const char afs_root_cell[] = ":root.cell.";
@@ -169,14 +169,14 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
}
} else {
/* read the contents of the AFS special symlink */
- loff_t size = i_size_read(mntpt->d_inode);
+ loff_t size = i_size_read(d_inode(mntpt));
char *buf;
ret = -EINVAL;
if (size > PAGE_SIZE - 1)
goto error_no_page;
- page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
+ page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto error_no_page;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index dbc732e9a5c0..3a57a1b0fb51 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -770,15 +770,12 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
void afs_send_empty_reply(struct afs_call *call)
{
struct msghdr msg;
- struct kvec iov[1];
_enter("");
- iov[0].iov_base = NULL;
- iov[0].iov_len = 0;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 0, 0); /* WTF? */
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index c4861557e385..1fb4a5129f7d 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -529,7 +529,7 @@ static void afs_destroy_inode(struct inode *inode)
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct afs_volume_status vs;
- struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
+ struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
struct key *key;
int ret;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c13cb08964ed..0714abcd7f32 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -14,7 +14,6 @@
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
-#include <linux/aio.h>
#include "internal.h"
static int afs_write_back_from_locked_page(struct afs_writeback *wb,
diff --git a/fs/aio.c b/fs/aio.c
index a793f7023755..480440f4701f 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -77,6 +77,11 @@ struct kioctx_cpu {
unsigned reqs_available;
};
+struct ctx_rq_wait {
+ struct completion comp;
+ atomic_t count;
+};
+
struct kioctx {
struct percpu_ref users;
atomic_t dead;
@@ -115,7 +120,7 @@ struct kioctx {
/*
* signals when all in-flight requests are done
*/
- struct completion *requests_done;
+ struct ctx_rq_wait *rq_wait;
struct {
/*
@@ -151,6 +156,38 @@ struct kioctx {
unsigned id;
};
+/*
+ * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
+ * cancelled or completed (this makes a certain amount of sense because
+ * successful cancellation - io_cancel() - does deliver the completion to
+ * userspace).
+ *
+ * And since most things don't implement kiocb cancellation and we'd really like
+ * kiocb completion to be lockless when possible, we use ki_cancel to
+ * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
+ * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
+ */
+#define KIOCB_CANCELLED ((void *) (~0ULL))
+
+struct aio_kiocb {
+ struct kiocb common;
+
+ struct kioctx *ki_ctx;
+ kiocb_cancel_fn *ki_cancel;
+
+ struct iocb __user *ki_user_iocb; /* user's aiocb */
+ __u64 ki_user_data; /* user's data for completion */
+
+ struct list_head ki_list; /* the aio core uses this
+ * for cancellation */
+
+ /*
+ * If the aio_resfd field of the userspace iocb is not zero,
+ * this is the underlying eventfd context to deliver events to.
+ */
+ struct eventfd_ctx *ki_eventfd;
+};
+
/*------ sysctl variables----*/
static DEFINE_SPINLOCK(aio_nr_lock);
unsigned long aio_nr; /* current system wide number of aio requests */
@@ -220,7 +257,7 @@ static int __init aio_setup(void)
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
- kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
@@ -484,8 +521,9 @@ static int aio_setup_ring(struct kioctx *ctx)
#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
-void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
+void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
{
+ struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common);
struct kioctx *ctx = req->ki_ctx;
unsigned long flags;
@@ -500,7 +538,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
}
EXPORT_SYMBOL(kiocb_set_cancel_fn);
-static int kiocb_cancel(struct kiocb *kiocb)
+static int kiocb_cancel(struct aio_kiocb *kiocb)
{
kiocb_cancel_fn *old, *cancel;
@@ -518,7 +556,7 @@ static int kiocb_cancel(struct kiocb *kiocb)
cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED);
} while (cancel != old);
- return cancel(kiocb);
+ return cancel(&kiocb->common);
}
static void free_ioctx(struct work_struct *work)
@@ -539,8 +577,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
/* At this point we know that there are no any in-flight requests */
- if (ctx->requests_done)
- complete(ctx->requests_done);
+ if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
+ complete(&ctx->rq_wait->comp);
INIT_WORK(&ctx->free_work, free_ioctx);
schedule_work(&ctx->free_work);
@@ -554,13 +592,13 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
static void free_ioctx_users(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, users);
- struct kiocb *req;
+ struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
while (!list_empty(&ctx->active_reqs)) {
req = list_first_entry(&ctx->active_reqs,
- struct kiocb, ki_list);
+ struct aio_kiocb, ki_list);
list_del_init(&req->ki_list);
kiocb_cancel(req);
@@ -659,8 +697,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
nr_events *= 2;
/* Prevent overflows */
- if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
- (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
+ if (nr_events > (0x10000000U / sizeof(struct io_event))) {
pr_debug("ENOMEM: nr_events too high\n");
return ERR_PTR(-EINVAL);
}
@@ -751,7 +788,7 @@ err:
* the rapid destruction of the kioctx.
*/
static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
- struct completion *requests_done)
+ struct ctx_rq_wait *wait)
{
struct kioctx_table *table;
@@ -781,27 +818,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
- ctx->requests_done = requests_done;
+ ctx->rq_wait = wait;
percpu_ref_kill(&ctx->users);
return 0;
}
-/* wait_on_sync_kiocb:
- * Waits on the given sync kiocb to complete.
- */
-ssize_t wait_on_sync_kiocb(struct kiocb *req)
-{
- while (!req->ki_ctx) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (req->ki_ctx)
- break;
- io_schedule();
- }
- __set_current_state(TASK_RUNNING);
- return req->ki_user_data;
-}
-EXPORT_SYMBOL(wait_on_sync_kiocb);
-
/*
* exit_aio: called when the last user of mm goes away. At this point, there is
* no way for any new requests to be submited or any of the io_* syscalls to be
@@ -813,18 +834,24 @@ EXPORT_SYMBOL(wait_on_sync_kiocb);
void exit_aio(struct mm_struct *mm)
{
struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
- int i;
+ struct ctx_rq_wait wait;
+ int i, skipped;
if (!table)
return;
+ atomic_set(&wait.count, table->nr);
+ init_completion(&wait.comp);
+
+ skipped = 0;
for (i = 0; i < table->nr; ++i) {
struct kioctx *ctx = table->table[i];
- struct completion requests_done =
- COMPLETION_INITIALIZER_ONSTACK(requests_done);
- if (!ctx)
+ if (!ctx) {
+ skipped++;
continue;
+ }
+
/*
* We don't need to bother with munmap() here - exit_mmap(mm)
* is coming and it'll unmap everything. And we simply can't,
@@ -833,10 +860,12 @@ void exit_aio(struct mm_struct *mm)
* that it needs to unmap the area, just set it to 0.
*/
ctx->mmap_size = 0;
- kill_ioctx(mm, ctx, &requests_done);
+ kill_ioctx(mm, ctx, &wait);
+ }
+ if (!atomic_sub_and_test(skipped, &wait.count)) {
/* Wait until all IO for the context are done. */
- wait_for_completion(&requests_done);
+ wait_for_completion(&wait.comp);
}
RCU_INIT_POINTER(mm->ioctx_table, NULL);
@@ -956,9 +985,9 @@ static void user_refill_reqs_available(struct kioctx *ctx)
* Allocate a slot for an aio request.
* Returns NULL if no requests are free.
*/
-static inline struct kiocb *aio_get_req(struct kioctx *ctx)
+static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
{
- struct kiocb *req;
+ struct aio_kiocb *req;
if (!get_reqs_available(ctx)) {
user_refill_reqs_available(ctx);
@@ -979,10 +1008,10 @@ out_put:
return NULL;
}
-static void kiocb_free(struct kiocb *req)
+static void kiocb_free(struct aio_kiocb *req)
{
- if (req->ki_filp)
- fput(req->ki_filp);
+ if (req->common.ki_filp)
+ fput(req->common.ki_filp);
if (req->ki_eventfd != NULL)
eventfd_ctx_put(req->ki_eventfd);
kmem_cache_free(kiocb_cachep, req);
@@ -1018,8 +1047,9 @@ out:
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
-void aio_complete(struct kiocb *iocb, long res, long res2)
+static void aio_complete(struct kiocb *kiocb, long res, long res2)
{
+ struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common);
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring *ring;
struct io_event *ev_page, *event;
@@ -1033,13 +1063,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
* ref, no other paths have a way to get another ref
* - the sync task helpfully left a reference to itself in the iocb
*/
- if (is_sync_kiocb(iocb)) {
- iocb->ki_user_data = res;
- smp_wmb();
- iocb->ki_ctx = ERR_PTR(-EXDEV);
- wake_up_process(iocb->ki_obj.tsk);
- return;
- }
+ BUG_ON(is_sync_kiocb(kiocb));
if (iocb->ki_list.next) {
unsigned long flags;
@@ -1065,7 +1089,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
- event->obj = (u64)(unsigned long)iocb->ki_obj.user;
+ event->obj = (u64)(unsigned long)iocb->ki_user_iocb;
event->data = iocb->ki_user_data;
event->res = res;
event->res2 = res2;
@@ -1074,7 +1098,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
- ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
+ ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
res, res2);
/* after flagging the request as done, we
@@ -1121,7 +1145,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
percpu_ref_put(&ctx->reqs);
}
-EXPORT_SYMBOL(aio_complete);
/* aio_read_events_ring
* Pull an event off of the ioctx's event ring. Returns the number of
@@ -1321,15 +1344,17 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
{
struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) {
- struct completion requests_done =
- COMPLETION_INITIALIZER_ONSTACK(requests_done);
+ struct ctx_rq_wait wait;
int ret;
+ init_completion(&wait.comp);
+ atomic_set(&wait.count, 1);
+
/* Pass requests_done to kill_ioctx() where it can be set
* in a thread-safe way. If we try to set it here then we have
* a race condition if two io_destroy() called simultaneously.
*/
- ret = kill_ioctx(current->mm, ioctx, &requests_done);
+ ret = kill_ioctx(current->mm, ioctx, &wait);
percpu_ref_put(&ioctx->users);
/* Wait until all IO for the context are done. Otherwise kernel
@@ -1337,7 +1362,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
* is destroyed.
*/
if (!ret)
- wait_for_completion(&requests_done);
+ wait_for_completion(&wait.comp);
return ret;
}
@@ -1345,50 +1370,21 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
return -EINVAL;
}
-typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *,
- unsigned long, loff_t);
typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *);
-static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb,
- int rw, char __user *buf,
- unsigned long *nr_segs,
- struct iovec **iovec,
- bool compat)
+static int aio_setup_vectored_rw(int rw, char __user *buf, size_t len,
+ struct iovec **iovec,
+ bool compat,
+ struct iov_iter *iter)
{
- ssize_t ret;
-
- *nr_segs = kiocb->ki_nbytes;
-
#ifdef CONFIG_COMPAT
if (compat)
- ret = compat_rw_copy_check_uvector(rw,
+ return compat_import_iovec(rw,
(struct compat_iovec __user *)buf,
- *nr_segs, UIO_FASTIOV, *iovec, iovec);
- else
+ len, UIO_FASTIOV, iovec, iter);
#endif
- ret = rw_copy_check_uvector(rw,
- (struct iovec __user *)buf,
- *nr_segs, UIO_FASTIOV, *iovec, iovec);
- if (ret < 0)
- return ret;
-
- /* ki_nbytes now reflect bytes instead of segs */
- kiocb->ki_nbytes = ret;
- return 0;
-}
-
-static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
- int rw, char __user *buf,
- unsigned long *nr_segs,
- struct iovec *iovec)
-{
- if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes)))
- return -EFAULT;
-
- iovec->iov_base = buf;
- iovec->iov_len = kiocb->ki_nbytes;
- *nr_segs = 1;
- return 0;
+ return import_iovec(rw, (struct iovec __user *)buf,
+ len, UIO_FASTIOV, iovec, iter);
}
/*
@@ -1396,14 +1392,12 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
* Performs the initial checks and io submission.
*/
static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
- char __user *buf, bool compat)
+ char __user *buf, size_t len, bool compat)
{
struct file *file = req->ki_filp;
ssize_t ret;
- unsigned long nr_segs;
int rw;
fmode_t mode;
- aio_rw_op *rw_op;
rw_iter_op *iter_op;
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
@@ -1413,7 +1407,6 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
case IOCB_CMD_PREADV:
mode = FMODE_READ;
rw = READ;
- rw_op = file->f_op->aio_read;
iter_op = file->f_op->read_iter;
goto rw_common;
@@ -1421,51 +1414,40 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
case IOCB_CMD_PWRITEV:
mode = FMODE_WRITE;
rw = WRITE;
- rw_op = file->f_op->aio_write;
iter_op = file->f_op->write_iter;
goto rw_common;
rw_common:
if (unlikely(!(file->f_mode & mode)))
return -EBADF;
- if (!rw_op && !iter_op)
+ if (!iter_op)
return -EINVAL;
- ret = (opcode == IOCB_CMD_PREADV ||
- opcode == IOCB_CMD_PWRITEV)
- ? aio_setup_vectored_rw(req, rw, buf, &nr_segs,
- &iovec, compat)
- : aio_setup_single_vector(req, rw, buf, &nr_segs,
- iovec);
+ if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV)
+ ret = aio_setup_vectored_rw(rw, buf, len,
+ &iovec, compat, &iter);
+ else {
+ ret = import_single_range(rw, buf, len, iovec, &iter);
+ iovec = NULL;
+ }
if (!ret)
- ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
+ ret = rw_verify_area(rw, file, &req->ki_pos,
+ iov_iter_count(&iter));
if (ret < 0) {
- if (iovec != inline_vecs)
- kfree(iovec);
+ kfree(iovec);
return ret;
}
- req->ki_nbytes = ret;
-
- /* XXX: move/kill - rw_verify_area()? */
- /* This matches the pread()/pwrite() logic */
- if (req->ki_pos < 0) {
- ret = -EINVAL;
- break;
- }
+ len = ret;
if (rw == WRITE)
file_start_write(file);
- if (iter_op) {
- iov_iter_init(&iter, rw, iovec, nr_segs, req->ki_nbytes);
- ret = iter_op(req, &iter);
- } else {
- ret = rw_op(req, iovec, nr_segs, req->ki_pos);
- }
+ ret = iter_op(req, &iter);
if (rw == WRITE)
file_end_write(file);
+ kfree(iovec);
break;
case IOCB_CMD_FDSYNC:
@@ -1487,9 +1469,6 @@ rw_common:
return -EINVAL;
}
- if (iovec != inline_vecs)
- kfree(iovec);
-
if (ret != -EIOCBQUEUED) {
/*
* There's no easy way to restart the syscall since other AIO's
@@ -1508,7 +1487,7 @@ rw_common:
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb, bool compat)
{
- struct kiocb *req;
+ struct aio_kiocb *req;
ssize_t ret;
/* enforce forwards compatibility on users */
@@ -1531,11 +1510,14 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
if (unlikely(!req))
return -EAGAIN;
- req->ki_filp = fget(iocb->aio_fildes);
- if (unlikely(!req->ki_filp)) {
+ req->common.ki_filp = fget(iocb->aio_fildes);
+ if (unlikely(!req->common.ki_filp)) {
ret = -EBADF;
goto out_put_req;
}
+ req->common.ki_pos = iocb->aio_offset;
+ req->common.ki_complete = aio_complete;
+ req->common.ki_flags = iocb_flags(req->common.ki_filp);
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
/*
@@ -1550,6 +1532,8 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
req->ki_eventfd = NULL;
goto out_put_req;
}
+
+ req->common.ki_flags |= IOCB_EVENTFD;
}
ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
@@ -1558,13 +1542,12 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
goto out_put_req;
}
- req->ki_obj.user = user_iocb;
+ req->ki_user_iocb = user_iocb;
req->ki_user_data = iocb->aio_data;
- req->ki_pos = iocb->aio_offset;
- req->ki_nbytes = iocb->aio_nbytes;
- ret = aio_run_iocb(req, iocb->aio_lio_opcode,
+ ret = aio_run_iocb(&req->common, iocb->aio_lio_opcode,
(char __user *)(unsigned long)iocb->aio_buf,
+ iocb->aio_nbytes,
compat);
if (ret)
goto out_put_req;
@@ -1651,10 +1634,10 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
/* lookup_kiocb
* Finds a given iocb for cancellation.
*/
-static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
- u32 key)
+static struct aio_kiocb *
+lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key)
{
- struct list_head *pos;
+ struct aio_kiocb *kiocb;
assert_spin_locked(&ctx->ctx_lock);
@@ -1662,9 +1645,8 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
return NULL;
/* TODO: use a hash or array, this sucks. */
- list_for_each(pos, &ctx->active_reqs) {
- struct kiocb *kiocb = list_kiocb(pos);
- if (kiocb->ki_obj.user == iocb)
+ list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
+ if (kiocb->ki_user_iocb == iocb)
return kiocb;
}
return NULL;
@@ -1684,7 +1666,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
struct io_event __user *, result)
{
struct kioctx *ctx;
- struct kiocb *kiocb;
+ struct aio_kiocb *kiocb;
u32 key;
int ret;
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 8e98cf954bab..5b700ef1e59d 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -213,7 +213,7 @@ void autofs4_clean_ino(struct autofs_info *);
static inline int autofs_prepare_pipe(struct file *pipe)
{
- if (!pipe->f_op->write)
+ if (!(pipe->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
if (!S_ISFIFO(file_inode(pipe)->i_mode))
return -EINVAL;
@@ -235,12 +235,12 @@ static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi)
static inline u64 autofs4_get_ino(struct autofs_sb_info *sbi)
{
- return sbi->sb->s_root->d_inode->i_ino;
+ return d_inode(sbi->sb->s_root)->i_ino;
}
static inline int simple_positive(struct dentry *dentry)
{
- return dentry->d_inode && !d_unhashed(dentry);
+ return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
static inline void __autofs4_add_expiring(struct dentry *dentry)
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 11dd118f75e2..1cebc3c52fa5 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -374,7 +374,7 @@ static struct dentry *should_expire(struct dentry *dentry,
return NULL;
}
- if (dentry->d_inode && d_is_symlink(dentry)) {
+ if (d_really_is_positive(dentry) && d_is_symlink(dentry)) {
DPRINTK("checking symlink %p %pd", dentry, dentry);
/*
* A symlink can't be "busy" in the usual sense so
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 1c55388ae633..a3ae0b2aeb5a 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -71,7 +71,7 @@ void autofs4_kill_sb(struct super_block *sb)
static int autofs4_show_options(struct seq_file *m, struct dentry *root)
{
struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
- struct inode *root_inode = root->d_sb->s_root->d_inode;
+ struct inode *root_inode = d_inode(root->d_sb->s_root);
if (!sbi)
return 0;
@@ -352,8 +352,8 @@ struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode)
inode->i_mode = mode;
if (sb->s_root) {
- inode->i_uid = sb->s_root->d_inode->i_uid;
- inode->i_gid = sb->s_root->d_inode->i_gid;
+ inode->i_uid = d_inode(sb->s_root)->i_uid;
+ inode->i_gid = d_inode(sb->s_root)->i_gid;
}
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_ino = get_next_ino();
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 7e44fdd03e2d..c6d7d3dbd52a 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -240,7 +240,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry,
spin_lock(&expiring->d_lock);
/* We've already been dentry_iput or unlinked */
- if (!expiring->d_inode)
+ if (d_really_is_negative(expiring))
goto next;
qstr = &expiring->d_name;
@@ -371,7 +371,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
* having d_mountpoint() true, so there's no need to call back
* to the daemon.
*/
- if (dentry->d_inode && d_is_symlink(dentry)) {
+ if (d_really_is_positive(dentry) && d_is_symlink(dentry)) {
spin_unlock(&sbi->fs_lock);
goto done;
}
@@ -459,7 +459,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
return 0;
if (d_mountpoint(dentry))
return 0;
- inode = ACCESS_ONCE(dentry->d_inode);
+ inode = d_inode_rcu(dentry);
if (inode && S_ISLNK(inode->i_mode))
return -EISDIR;
if (list_empty(&dentry->d_subdirs))
@@ -485,7 +485,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
* an incorrect ELOOP error return.
*/
if ((!d_mountpoint(dentry) && !simple_empty(dentry)) ||
- (dentry->d_inode && d_is_symlink(dentry)))
+ (d_really_is_positive(dentry) && d_is_symlink(dentry)))
status = -EISDIR;
}
spin_unlock(&sbi->fs_lock);
@@ -625,8 +625,8 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
}
dput(ino->dentry);
- dentry->d_inode->i_size = 0;
- clear_nlink(dentry->d_inode);
+ d_inode(dentry)->i_size = 0;
+ clear_nlink(d_inode(dentry));
dir->i_mtime = CURRENT_TIME;
@@ -719,8 +719,8 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
atomic_dec(&p_ino->count);
}
dput(ino->dentry);
- dentry->d_inode->i_size = 0;
- clear_nlink(dentry->d_inode);
+ d_inode(dentry)->i_size = 0;
+ clear_nlink(d_inode(dentry));
if (dir->i_nlink)
drop_nlink(dir);
@@ -839,7 +839,7 @@ static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p)
*/
int is_autofs4_dentry(struct dentry *dentry)
{
- return dentry && dentry->d_inode &&
+ return dentry && d_really_is_positive(dentry) &&
dentry->d_op == &autofs4_dentry_operations &&
dentry->d_fsdata != NULL;
}
diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
index 1e8ea192be2b..de58cc7b8076 100644
--- a/fs/autofs4/symlink.c
+++ b/fs/autofs4/symlink.c
@@ -18,7 +18,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
struct autofs_info *ino = autofs4_dentry_ino(dentry);
if (ino && !autofs4_oz_mode(sbi))
ino->last_used = jiffies;
- nd_set_link(nd, dentry->d_inode->i_private);
+ nd_set_link(nd, d_inode(dentry)->i_private);
return NULL;
}
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 116fd38ee472..35b755e79c2d 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -70,7 +70,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
mutex_lock(&sbi->pipe_mutex);
while (bytes &&
- (wr = file->f_op->write(file,data,bytes,&file->f_pos)) > 0) {
+ (wr = __vfs_write(file,data,bytes,&file->f_pos)) > 0) {
data += wr;
bytes -= wr;
}
@@ -322,7 +322,7 @@ static int validate_request(struct autofs_wait_queue **wait,
* continue on and create a new request.
*/
if (!IS_ROOT(dentry)) {
- if (dentry->d_inode && d_unhashed(dentry)) {
+ if (d_really_is_positive(dentry) && d_unhashed(dentry)) {
struct dentry *parent = dentry->d_parent;
new = d_lookup(parent, &dentry->d_name);
if (new)
@@ -364,7 +364,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
if (pid == 0 || tgid == 0)
return -ENOENT;
- if (!dentry->d_inode) {
+ if (d_really_is_negative(dentry)) {
/*
* A wait for a negative dentry is invalid for certain
* cases. A direct or offset mount "always" has its mount
diff --git a/fs/befs/befs.h b/fs/befs/befs.h
index 3a7813ab8c95..1fead8d56a98 100644
--- a/fs/befs/befs.h
+++ b/fs/befs/befs.h
@@ -19,16 +19,16 @@ typedef u64 befs_blocknr_t;
* BeFS in memory structures
*/
-typedef struct befs_mount_options {
+struct befs_mount_options {
kgid_t gid;
kuid_t uid;
int use_gid;
int use_uid;
int debug;
char *iocharset;
-} befs_mount_options;
+};
-typedef struct befs_sb_info {
+struct befs_sb_info {
u32 magic1;
u32 block_size;
u32 block_shift;
@@ -52,12 +52,11 @@ typedef struct befs_sb_info {
befs_inode_addr indices;
u32 magic3;
- befs_mount_options mount_opts;
+ struct befs_mount_options mount_opts;
struct nls_table *nls;
+};
-} befs_sb_info;
-
-typedef struct befs_inode_info {
+struct befs_inode_info {
u32 i_flags;
u32 i_type;
@@ -71,8 +70,7 @@ typedef struct befs_inode_info {
} i_data;
struct inode vfs_inode;
-
-} befs_inode_info;
+};
enum befs_err {
BEFS_OK,
@@ -105,13 +103,13 @@ void befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead *);
/* Gets a pointer to the private portion of the super_block
* structure from the public part
*/
-static inline befs_sb_info *
+static inline struct befs_sb_info *
BEFS_SB(const struct super_block *super)
{
- return (befs_sb_info *) super->s_fs_info;
+ return (struct befs_sb_info *) super->s_fs_info;
}
-static inline befs_inode_info *
+static inline struct befs_inode_info *
BEFS_I(const struct inode *inode)
{
return list_entry(inode, struct befs_inode_info, vfs_inode);
diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c
index 1e8e0b8d8836..ebd50718659f 100644
--- a/fs/befs/datastream.c
+++ b/fs/befs/datastream.c
@@ -168,7 +168,7 @@ befs_count_blocks(struct super_block * sb, befs_data_stream * ds)
befs_blocknr_t blocks;
befs_blocknr_t datablocks; /* File data blocks */
befs_blocknr_t metablocks; /* FS metadata blocks */
- befs_sb_info *befs_sb = BEFS_SB(sb);
+ struct befs_sb_info *befs_sb = BEFS_SB(sb);
befs_debug(sb, "---> %s", __func__);
@@ -428,7 +428,7 @@ befs_find_brun_dblindirect(struct super_block *sb,
struct buffer_head *indir_block;
befs_block_run indir_run;
befs_disk_inode_addr *iaddr_array = NULL;
- befs_sb_info *befs_sb = BEFS_SB(sb);
+ struct befs_sb_info *befs_sb = BEFS_SB(sb);
befs_blocknr_t indir_start_blk =
data->max_indirect_range >> befs_sb->block_shift;
diff --git a/fs/befs/io.c b/fs/befs/io.c
index 0408a3d601d0..7a5b4ec21c56 100644
--- a/fs/befs/io.c
+++ b/fs/befs/io.c
@@ -28,7 +28,7 @@ befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr)
{
struct buffer_head *bh = NULL;
befs_blocknr_t block = 0;
- befs_sb_info *befs_sb = BEFS_SB(sb);
+ struct befs_sb_info *befs_sb = BEFS_SB(sb);
befs_debug(sb, "---> Enter %s "
"[%u, %hu, %hu]", __func__, iaddr.allocation_group,
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index e089f1985fca..7943533c3868 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -51,7 +51,7 @@ static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
static void befs_put_super(struct super_block *);
static int befs_remount(struct super_block *, int *, char *);
static int befs_statfs(struct dentry *, struct kstatfs *);
-static int parse_options(char *, befs_mount_options *);
+static int parse_options(char *, struct befs_mount_options *);
static const struct super_operations befs_sops = {
.alloc_inode = befs_alloc_inode, /* allocate a new inode */
@@ -304,9 +304,8 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
{
struct buffer_head *bh = NULL;
befs_inode *raw_inode = NULL;
-
- befs_sb_info *befs_sb = BEFS_SB(sb);
- befs_inode_info *befs_ino = NULL;
+ struct befs_sb_info *befs_sb = BEFS_SB(sb);
+ struct befs_inode_info *befs_ino = NULL;
struct inode *inode;
long ret = -EIO;
@@ -472,7 +471,7 @@ static void *
befs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct super_block *sb = dentry->d_sb;
- befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+ struct befs_inode_info *befs_ino = BEFS_I(d_inode(dentry));
befs_data_stream *data = &befs_ino->i_data.ds;
befs_off_t len = data->size;
char *link;
@@ -502,7 +501,8 @@ befs_follow_link(struct dentry *dentry, struct nameidata *nd)
static void *
befs_fast_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+ struct befs_inode_info *befs_ino = BEFS_I(d_inode(dentry));
+
nd_set_link(nd, befs_ino->i_data.symlink);
return NULL;
}
@@ -669,7 +669,7 @@ static const match_table_t befs_tokens = {
};
static int
-parse_options(char *options, befs_mount_options * opts)
+parse_options(char *options, struct befs_mount_options *opts)
{
char *p;
substring_t args[MAX_OPT_ARGS];
@@ -769,7 +769,7 @@ static int
befs_fill_super(struct super_block *sb, void *data, int silent)
{
struct buffer_head *bh;
- befs_sb_info *befs_sb;
+ struct befs_sb_info *befs_sb;
befs_super_block *disk_sb;
struct inode *root;
long ret = -EINVAL;
diff --git a/fs/befs/super.c b/fs/befs/super.c
index ca40f828f64d..aeafc4d84278 100644
--- a/fs/befs/super.c
+++ b/fs/befs/super.c
@@ -24,7 +24,7 @@
int
befs_load_sb(struct super_block *sb, befs_super_block * disk_sb)
{
- befs_sb_info *befs_sb = BEFS_SB(sb);
+ struct befs_sb_info *befs_sb = BEFS_SB(sb);
/* Check the byte order of the filesystem */
if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_LE)
@@ -59,7 +59,7 @@ befs_load_sb(struct super_block *sb, befs_super_block * disk_sb)
int
befs_check_sb(struct super_block *sb)
{
- befs_sb_info *befs_sb = BEFS_SB(sb);
+ struct befs_sb_info *befs_sb = BEFS_SB(sb);
/* Check magic headers of super block */
if ((befs_sb->magic1 != BEFS_SUPER_MAGIC1)
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 08063ae0a17c..3ec6113146c0 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -86,7 +86,7 @@ static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
inode = new_inode(s);
if (!inode)
- return -ENOSPC;
+ return -ENOMEM;
mutex_lock(&info->bfs_lock);
ino = find_first_zero_bit(info->si_imap, info->si_lasti + 1);
if (ino > info->si_lasti) {
@@ -153,7 +153,7 @@ static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry,
static int bfs_link(struct dentry *old, struct inode *dir,
struct dentry *new)
{
- struct inode *inode = old->d_inode;
+ struct inode *inode = d_inode(old);
struct bfs_sb_info *info = BFS_SB(inode->i_sb);
int err;
@@ -176,7 +176,7 @@ static int bfs_link(struct dentry *old, struct inode *dir,
static int bfs_unlink(struct inode *dir, struct dentry *dentry)
{
int error = -ENOENT;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct buffer_head *bh;
struct bfs_dirent *de;
struct bfs_sb_info *info = BFS_SB(inode->i_sb);
@@ -216,7 +216,7 @@ static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry,
int error = -ENOENT;
old_bh = new_bh = NULL;
- old_inode = old_dentry->d_inode;
+ old_inode = d_inode(old_dentry);
if (S_ISDIR(old_inode->i_mode))
return -EINVAL;
@@ -231,7 +231,7 @@ static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto end_rename;
error = -EPERM;
- new_inode = new_dentry->d_inode;
+ new_inode = d_inode(new_dentry);
new_bh = bfs_find_entry(new_dir,
new_dentry->d_name.name,
new_dentry->d_name.len, &new_de);
@@ -293,7 +293,7 @@ static int bfs_add_entry(struct inode *dir, const unsigned char *name,
for (block = sblock; block <= eblock; block++) {
bh = sb_bread(dir->i_sb, block);
if (!bh)
- return -ENOSPC;
+ return -EIO;
for (off = 0; off < BFS_BSIZE; off += BFS_DIRENT_SIZE) {
de = (struct bfs_dirent *)(bh->b_data + off);
if (!de->ino) {
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index e7f88ace1a25..97f1b5160155 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -23,9 +23,7 @@
const struct file_operations bfs_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = generic_file_splice_read,
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 90bc079d9982..fdcb4d69f430 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -15,6 +15,7 @@
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/writeback.h>
+#include <linux/uio.h>
#include <asm/uaccess.h>
#include "bfs.h"
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 995986b8e36b..241ef68d2893 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -31,6 +31,7 @@
#include <linux/security.h>
#include <linux/random.h>
#include <linux/elf.h>
+#include <linux/elf-randomize.h>
#include <linux/utsname.h>
#include <linux/coredump.h>
#include <linux/sched.h>
@@ -862,6 +863,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0, elf_flags;
unsigned long k, vaddr;
+ unsigned long total_size = 0;
if (elf_ppnt->p_type != PT_LOAD)
continue;
@@ -909,25 +911,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
* default mmap base, as well as whatever program they
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
-#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
- /* Memory randomization might have been switched off
- * in runtime via sysctl or explicit setting of
- * personality flags.
- * If that is the case, retain the original non-zero
- * load_bias value in order to establish proper
- * non-randomized mappings.
- */
+ load_bias = ELF_ET_DYN_BASE - vaddr;
if (current->flags & PF_RANDOMIZE)
- load_bias = 0;
- else
- load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
-#else
- load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
-#endif
+ load_bias += arch_mmap_rnd();
+ load_bias = ELF_PAGESTART(load_bias);
+ total_size = total_mapping_size(elf_phdata,
+ loc->elf_ex.e_phnum);
+ if (!total_size) {
+ error = -EINVAL;
+ goto out_free_dentry;
+ }
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
- elf_prot, elf_flags, 0);
+ elf_prot, elf_flags, total_size);
if (BAD_ADDR(error)) {
retval = IS_ERR((void *)error) ?
PTR_ERR((void*)error) : -EINVAL;
@@ -1053,15 +1050,13 @@ static int load_elf_binary(struct linux_binprm *bprm)
current->mm->end_data = end_data;
current->mm->start_stack = bprm->p;
-#ifdef arch_randomize_brk
if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
current->mm->brk = current->mm->start_brk =
arch_randomize_brk(current->mm);
-#ifdef CONFIG_COMPAT_BRK
+#ifdef compat_brk_randomized
current->brk_randomized = 1;
#endif
}
-#endif
if (current->personality & MMAP_PAGE_ZERO) {
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 97aff2879cda..78f005f37847 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -521,9 +522,8 @@ static int parse_command(const char __user *buffer, size_t count)
static void entry_status(Node *e, char *page)
{
- char *dp;
- char *status = "disabled";
- const char *flags = "flags: ";
+ char *dp = page;
+ const char *status = "disabled";
if (test_bit(Enabled, &e->flags))
status = "enabled";
@@ -533,12 +533,10 @@ static void entry_status(Node *e, char *page)
return;
}
- sprintf(page, "%s\ninterpreter %s\n", status, e->interpreter);
- dp = page + strlen(page);
+ dp += sprintf(dp, "%s\ninterpreter %s\n", status, e->interpreter);
/* print the special flags */
- sprintf(dp, "%s", flags);
- dp += strlen(flags);
+ dp += sprintf(dp, "flags: ");
if (e->flags & MISC_FMT_PRESERVE_ARGV0)
*dp++ = 'P';
if (e->flags & MISC_FMT_OPEN_BINARY)
@@ -550,21 +548,11 @@ static void entry_status(Node *e, char *page)
if (!test_bit(Magic, &e->flags)) {
sprintf(dp, "extension .%s\n", e->magic);
} else {
- int i;
-
- sprintf(dp, "offset %i\nmagic ", e->offset);
- dp = page + strlen(page);
- for (i = 0; i < e->size; i++) {
- sprintf(dp, "%02x", 0xff & (int) (e->magic[i]));
- dp += 2;
- }
+ dp += sprintf(dp, "offset %i\nmagic ", e->offset);
+ dp = bin2hex(dp, e->magic, e->size);
if (e->mask) {
- sprintf(dp, "\nmask ");
- dp += 6;
- for (i = 0; i < e->size; i++) {
- sprintf(dp, "%02x", 0xff & (int) (e->mask[i]));
- dp += 2;
- }
+ dp += sprintf(dp, "\nmask ");
+ dp = bin2hex(dp, e->mask, e->size);
}
*dp++ = '\n';
*dp = '\0';
@@ -603,7 +591,7 @@ static void kill_node(Node *e)
write_unlock(&entries_lock);
if (dentry) {
- drop_nlink(dentry->d_inode);
+ drop_nlink(d_inode(dentry));
d_drop(dentry);
dput(dentry);
simple_release_fs(&bm_mnt, &entry_count);
@@ -650,11 +638,11 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
case 3:
/* Delete this handler. */
root = dget(file->f_path.dentry->d_sb->s_root);
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
kill_node(e);
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
dput(root);
break;
default:
@@ -687,14 +675,14 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
return PTR_ERR(e);
root = dget(sb->s_root);
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
dentry = lookup_one_len(e->name, root, strlen(e->name));
err = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out;
err = -EEXIST;
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
goto out2;
inode = bm_get_inode(sb, S_IFREG | 0644);
@@ -723,7 +711,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
out2:
dput(dentry);
out:
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
dput(root);
if (err) {
@@ -766,12 +754,12 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
case 3:
/* Delete all handlers. */
root = dget(file->f_path.dentry->d_sb->s_root);
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
while (!list_empty(&entries))
kill_node(list_entry(entries.next, Node, list));
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
dput(root);
break;
default:
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 975266be67d3..c7e4163ede87 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -27,7 +27,6 @@
#include <linux/namei.h>
#include <linux/log2.h>
#include <linux/cleancache.h>
-#include <linux/aio.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -147,15 +146,14 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
}
static ssize_t
-blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
- loff_t offset)
+blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
- offset, blkdev_get_block,
- NULL, NULL, 0);
+ return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
+ blkdev_get_block, NULL, NULL,
+ DIO_SKIP_DIO_COUNT);
}
int __sync_blockdev(struct block_device *bdev, int wait)
@@ -1598,9 +1596,22 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
+ struct inode *bd_inode = file->f_mapping->host;
+ loff_t size = i_size_read(bd_inode);
struct blk_plug plug;
ssize_t ret;
+ if (bdev_read_only(I_BDEV(bd_inode)))
+ return -EPERM;
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ if (iocb->ki_pos >= size)
+ return -ENOSPC;
+
+ iov_iter_truncate(from, size - iocb->ki_pos);
+
blk_start_plug(&plug);
ret = __generic_file_write_iter(iocb, from);
if (ret > 0) {
@@ -1660,8 +1671,6 @@ const struct file_operations def_blk_fops = {
.open = blkdev_open,
.release = blkdev_close,
.llseek = block_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = blkdev_read_iter,
.write_iter = blkdev_write_iter,
.mmap = generic_file_mmap,
@@ -1708,7 +1717,7 @@ struct block_device *lookup_bdev(const char *pathname)
if (error)
return ERR_PTR(error);
- inode = path.dentry->d_inode;
+ inode = d_backing_inode(path.dentry);
error = -ENOTBLK;
if (!S_ISBLK(inode->i_mode))
goto fail;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 4dabeb893b7c..df9932b00d08 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -87,7 +87,7 @@ BTRFS_WORK_HELPER(scrubwrc_helper);
BTRFS_WORK_HELPER(scrubnc_helper);
static struct __btrfs_workqueue *
-__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
+__btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active,
int thresh)
{
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
@@ -132,7 +132,7 @@ static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
- int flags,
+ unsigned int flags,
int max_active,
int thresh)
{
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index e386c29ef1f6..ec2ee477f8ba 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -66,7 +66,7 @@ BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
- int flags,
+ unsigned int flags,
int max_active,
int thresh);
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f55721ff9385..9de772ee0031 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1206,7 +1206,7 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
struct ulist *roots = NULL;
struct ulist_iterator uiter;
struct ulist_node *node;
- struct seq_list elem = {};
+ struct seq_list elem = SEQ_LIST_INIT(elem);
int ret = 0;
tmp = ulist_alloc(GFP_NOFS);
@@ -1610,7 +1610,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
struct ulist *roots = NULL;
struct ulist_node *ref_node = NULL;
struct ulist_node *root_node = NULL;
- struct seq_list tree_mod_seq_elem = {};
+ struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
struct ulist_iterator ref_uiter;
struct ulist_iterator root_uiter;
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index de5e4f2adfea..0ef5cc13fae2 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -66,7 +66,11 @@ struct btrfs_inode {
*/
struct btrfs_key location;
- /* Lock for counters */
+ /*
+ * Lock for counters and all fields used to determine if the inode is in
+ * the log or not (last_trans, last_sub_trans, last_log_commit,
+ * logged_trans).
+ */
spinlock_t lock;
/* the extent_tree has caches of all the extent mappings to disk */
@@ -250,6 +254,9 @@ static inline bool btrfs_is_free_space_inode(struct inode *inode)
static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
{
+ int ret = 0;
+
+ spin_lock(&BTRFS_I(inode)->lock);
if (BTRFS_I(inode)->logged_trans == generation &&
BTRFS_I(inode)->last_sub_trans <=
BTRFS_I(inode)->last_log_commit &&
@@ -263,9 +270,10 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
*/
smp_mb();
if (list_empty(&BTRFS_I(inode)->extent_tree.modified_extents))
- return 1;
+ ret = 1;
}
- return 0;
+ spin_unlock(&BTRFS_I(inode)->lock);
+ return ret;
}
#define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index d897ef803b3b..ce7dec88f4b8 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -2990,8 +2990,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
(unsigned long long)bio->bi_iter.bi_sector,
dev_bytenr, bio->bi_bdev);
- mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
- GFP_NOFS);
+ mapped_datav = kmalloc_array(bio->bi_vcnt,
+ sizeof(*mapped_datav), GFP_NOFS);
if (!mapped_datav)
goto leave;
cur_bytenr = dev_bytenr;
@@ -3241,8 +3241,5 @@ void btrfsic_unmount(struct btrfs_root *root,
mutex_unlock(&btrfsic_mutex);
- if (is_vmalloc_addr(state))
- vfree(state);
- else
- kfree(state);
+ kvfree(state);
}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index e9df8862012c..ce62324c78e7 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -622,7 +622,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->orig_bio = bio;
nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE);
- cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
+ cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
GFP_NOFS);
if (!cb->compressed_pages)
goto fail1;
@@ -750,7 +750,7 @@ static int comp_num_workspace[BTRFS_COMPRESS_TYPES];
static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
-static struct btrfs_compress_op *btrfs_compress_op[] = {
+static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zlib_compress,
&btrfs_lzo_compress,
};
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index d181f70caae0..13a4dc0436c9 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -77,7 +77,7 @@ struct btrfs_compress_op {
size_t srclen, size_t destlen);
};
-extern struct btrfs_compress_op btrfs_zlib_compress;
-extern struct btrfs_compress_op btrfs_lzo_compress;
+extern const struct btrfs_compress_op btrfs_zlib_compress;
+extern const struct btrfs_compress_op btrfs_lzo_compress;
#endif
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 6d67f32e648d..0f11ebc92f02 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -578,7 +578,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
if (!tree_mod_need_log(fs_info, eb))
return 0;
- tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
+ tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
if (!tm_list)
return -ENOMEM;
@@ -677,7 +677,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
if (log_removal && btrfs_header_level(old_root) > 0) {
nritems = btrfs_header_nritems(old_root);
- tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
+ tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
flags);
if (!tm_list) {
ret = -ENOMEM;
@@ -814,7 +814,7 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
return 0;
- tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
+ tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
GFP_NOFS);
if (!tm_list)
return -ENOMEM;
@@ -905,8 +905,7 @@ tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
return 0;
nritems = btrfs_header_nritems(eb);
- tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
- GFP_NOFS);
+ tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
if (!tm_list)
return -ENOMEM;
@@ -1073,7 +1072,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
ret = btrfs_dec_ref(trans, root, buf, 1);
BUG_ON(ret); /* -ENOMEM */
}
- clean_tree_block(trans, root, buf);
+ clean_tree_block(trans, root->fs_info, buf);
*last_ref = 1;
}
return 0;
@@ -1678,7 +1677,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
continue;
}
- cur = btrfs_find_tree_block(root, blocknr);
+ cur = btrfs_find_tree_block(root->fs_info, blocknr);
if (cur)
uptodate = btrfs_buffer_uptodate(cur, gen, 0);
else
@@ -1943,7 +1942,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
path->locks[level] = 0;
path->nodes[level] = NULL;
- clean_tree_block(trans, root, mid);
+ clean_tree_block(trans, root->fs_info, mid);
btrfs_tree_unlock(mid);
/* once for the path */
free_extent_buffer(mid);
@@ -1997,7 +1996,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (wret < 0 && wret != -ENOSPC)
ret = wret;
if (btrfs_header_nritems(right) == 0) {
- clean_tree_block(trans, root, right);
+ clean_tree_block(trans, root->fs_info, right);
btrfs_tree_unlock(right);
del_ptr(root, path, level + 1, pslot + 1);
root_sub_used(root, right->len);
@@ -2041,7 +2040,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
BUG_ON(wret == 1);
}
if (btrfs_header_nritems(mid) == 0) {
- clean_tree_block(trans, root, mid);
+ clean_tree_block(trans, root->fs_info, mid);
btrfs_tree_unlock(mid);
del_ptr(root, path, level + 1, pslot);
root_sub_used(root, mid->len);
@@ -2259,7 +2258,7 @@ static void reada_for_search(struct btrfs_root *root,
search = btrfs_node_blockptr(node, slot);
blocksize = root->nodesize;
- eb = btrfs_find_tree_block(root, search);
+ eb = btrfs_find_tree_block(root->fs_info, search);
if (eb) {
free_extent_buffer(eb);
return;
@@ -2319,7 +2318,7 @@ static noinline void reada_for_balance(struct btrfs_root *root,
if (slot > 0) {
block1 = btrfs_node_blockptr(parent, slot - 1);
gen = btrfs_node_ptr_generation(parent, slot - 1);
- eb = btrfs_find_tree_block(root, block1);
+ eb = btrfs_find_tree_block(root->fs_info, block1);
/*
* if we get -eagain from btrfs_buffer_uptodate, we
* don't want to return eagain here. That will loop
@@ -2332,7 +2331,7 @@ static noinline void reada_for_balance(struct btrfs_root *root,
if (slot + 1 < nritems) {
block2 = btrfs_node_blockptr(parent, slot + 1);
gen = btrfs_node_ptr_generation(parent, slot + 1);
- eb = btrfs_find_tree_block(root, block2);
+ eb = btrfs_find_tree_block(root->fs_info, block2);
if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
block2 = 0;
free_extent_buffer(eb);
@@ -2450,7 +2449,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
blocknr = btrfs_node_blockptr(b, slot);
gen = btrfs_node_ptr_generation(b, slot);
- tmp = btrfs_find_tree_block(root, blocknr);
+ tmp = btrfs_find_tree_block(root->fs_info, blocknr);
if (tmp) {
/* first we do an atomic uptodate check */
if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
@@ -3126,7 +3125,8 @@ again:
* higher levels
*
*/
-static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
+static void fixup_low_keys(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
struct btrfs_disk_key *key, int level)
{
int i;
@@ -3137,7 +3137,7 @@ static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
if (!path->nodes[i])
break;
t = path->nodes[i];
- tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
+ tree_mod_log_set_node_key(fs_info, t, tslot, 1);
btrfs_set_node_key(t, key, tslot);
btrfs_mark_buffer_dirty(path->nodes[i]);
if (tslot != 0)
@@ -3151,7 +3151,8 @@ static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
* This function isn't completely safe. It's the caller's responsibility
* that the new key won't break the order
*/
-void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
struct btrfs_key *new_key)
{
struct btrfs_disk_key disk_key;
@@ -3173,7 +3174,7 @@ void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
btrfs_set_item_key(eb, &disk_key, slot);
btrfs_mark_buffer_dirty(eb);
if (slot == 0)
- fixup_low_keys(root, path, &disk_key, 1);
+ fixup_low_keys(fs_info, path, &disk_key, 1);
}
/*
@@ -3692,7 +3693,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (left_nritems)
btrfs_mark_buffer_dirty(left);
else
- clean_tree_block(trans, root, left);
+ clean_tree_block(trans, root->fs_info, left);
btrfs_mark_buffer_dirty(right);
@@ -3704,7 +3705,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (path->slots[0] >= left_nritems) {
path->slots[0] -= left_nritems;
if (btrfs_header_nritems(path->nodes[0]) == 0)
- clean_tree_block(trans, root, path->nodes[0]);
+ clean_tree_block(trans, root->fs_info, path->nodes[0]);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@@ -3928,10 +3929,10 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
if (right_nritems)
btrfs_mark_buffer_dirty(right);
else
- clean_tree_block(trans, root, right);
+ clean_tree_block(trans, root->fs_info, right);
btrfs_item_key(right, &disk_key, 0);
- fixup_low_keys(root, path, &disk_key, 1);
+ fixup_low_keys(root->fs_info, path, &disk_key, 1);
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
@@ -4168,6 +4169,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
int mid;
int slot;
struct extent_buffer *right;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
int wret;
int split;
@@ -4271,10 +4273,10 @@ again:
btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(right, root->root_key.objectid);
btrfs_set_header_level(right, 0);
- write_extent_buffer(right, root->fs_info->fsid,
+ write_extent_buffer(right, fs_info->fsid,
btrfs_header_fsid(), BTRFS_FSID_SIZE);
- write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
+ write_extent_buffer(right, fs_info->chunk_tree_uuid,
btrfs_header_chunk_tree_uuid(right),
BTRFS_UUID_SIZE);
@@ -4297,7 +4299,7 @@ again:
path->nodes[0] = right;
path->slots[0] = 0;
if (path->slots[1] == 0)
- fixup_low_keys(root, path, &disk_key, 1);
+ fixup_low_keys(fs_info, path, &disk_key, 1);
}
btrfs_mark_buffer_dirty(right);
return ret;
@@ -4615,7 +4617,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0)
- fixup_low_keys(root, path, &disk_key, 1);
+ fixup_low_keys(root->fs_info, path, &disk_key, 1);
}
item = btrfs_item_nr(slot);
@@ -4716,7 +4718,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (path->slots[0] == 0) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
- fixup_low_keys(root, path, &disk_key, 1);
+ fixup_low_keys(root->fs_info, path, &disk_key, 1);
}
btrfs_unlock_up_safe(path, 1);
@@ -4888,7 +4890,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0);
- fixup_low_keys(root, path, &disk_key, level + 1);
+ fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
}
btrfs_mark_buffer_dirty(parent);
}
@@ -4981,7 +4983,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_header_level(leaf, 0);
} else {
btrfs_set_path_blocking(path);
- clean_tree_block(trans, root, leaf);
+ clean_tree_block(trans, root->fs_info, leaf);
btrfs_del_leaf(trans, root, path, leaf);
}
} else {
@@ -4990,7 +4992,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0);
- fixup_low_keys(root, path, &disk_key, 1);
+ fixup_low_keys(root->fs_info, path, &disk_key, 1);
}
/* delete the leaf if it is mostly empty */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index f9c89cae39ee..6f364e1d8d3d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1061,6 +1061,12 @@ struct btrfs_block_group_item {
__le64 flags;
} __attribute__ ((__packed__));
+#define BTRFS_QGROUP_LEVEL_SHIFT 48
+static inline u64 btrfs_qgroup_level(u64 qgroupid)
+{
+ return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT;
+}
+
/*
* is subvolume quota turned on?
*/
@@ -1256,6 +1262,20 @@ struct btrfs_caching_control {
atomic_t count;
};
+struct btrfs_io_ctl {
+ void *cur, *orig;
+ struct page *page;
+ struct page **pages;
+ struct btrfs_root *root;
+ struct inode *inode;
+ unsigned long size;
+ int index;
+ int num_pages;
+ int entries;
+ int bitmaps;
+ unsigned check_crcs:1;
+};
+
struct btrfs_block_group_cache {
struct btrfs_key key;
struct btrfs_block_group_item item;
@@ -1321,6 +1341,9 @@ struct btrfs_block_group_cache {
/* For dirty block groups */
struct list_head dirty_list;
+ struct list_head io_list;
+
+ struct btrfs_io_ctl io_ctl;
};
/* delayed seq elem */
@@ -1329,6 +1352,8 @@ struct seq_list {
u64 seq;
};
+#define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 }
+
enum btrfs_orphan_cleanup_state {
ORPHAN_CLEANUP_STARTED = 1,
ORPHAN_CLEANUP_DONE = 2,
@@ -1472,6 +1497,12 @@ struct btrfs_fs_info {
struct mutex chunk_mutex;
struct mutex volume_mutex;
+ /*
+ * this is taken to make sure we don't set block groups ro after
+ * the free space cache has been allocated on them
+ */
+ struct mutex ro_block_group_mutex;
+
/* this is used during read/modify/write to make sure
* no two ios are trying to mod the same stripe at the same
* time
@@ -1513,6 +1544,7 @@ struct btrfs_fs_info {
spinlock_t delayed_iput_lock;
struct list_head delayed_iputs;
+ struct rw_semaphore delayed_iput_sem;
/* this protects tree_mod_seq_list */
spinlock_t tree_mod_seq_lock;
@@ -3295,6 +3327,9 @@ static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
}
/* extent-tree.c */
+
+u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes);
+
static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
unsigned num_items)
{
@@ -3385,6 +3420,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset, int no_quota);
+int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
@@ -3417,7 +3454,7 @@ enum btrfs_reserve_flush_enum {
BTRFS_RESERVE_FLUSH_ALL,
};
-int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
+int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
@@ -3440,6 +3477,7 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
unsigned short type);
void btrfs_free_block_rsv(struct btrfs_root *root,
struct btrfs_block_rsv *rsv);
+void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
@@ -3486,7 +3524,8 @@ int btrfs_previous_item(struct btrfs_root *root,
int type);
int btrfs_previous_extent_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid);
-void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
struct btrfs_key *new_key);
struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
@@ -4180,7 +4219,8 @@ int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
static inline int is_fstree(u64 rootid)
{
if (rootid == BTRFS_FS_TREE_OBJECTID ||
- (s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
+ ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
+ !btrfs_qgroup_level(rootid)))
return 1;
return 0;
}
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 82f0c7c95474..a2ae42720a6a 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1383,7 +1383,7 @@ out:
static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
- struct btrfs_root *root, int nr)
+ struct btrfs_fs_info *fs_info, int nr)
{
struct btrfs_async_delayed_work *async_work;
@@ -1399,7 +1399,7 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
btrfs_async_run_delayed_root, NULL, NULL);
async_work->nr = nr;
- btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
+ btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
return 0;
}
@@ -1426,6 +1426,7 @@ static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
void btrfs_balance_delayed_items(struct btrfs_root *root)
{
struct btrfs_delayed_root *delayed_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
delayed_root = btrfs_get_delayed_root(root);
@@ -1438,7 +1439,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
seq = atomic_read(&delayed_root->items_seq);
- ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
+ ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
if (ret)
return;
@@ -1447,7 +1448,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
return;
}
- btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
+ btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
}
/* Will return 0 or -ENOMEM */
@@ -1801,6 +1802,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
+ BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
+
inode->i_version = btrfs_stack_inode_sequence(inode_item);
inode->i_rdev = 0;
*rdev = btrfs_stack_inode_rdev(inode_item);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 6d16bea94e1c..8f8ed7d20bac 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -489,11 +489,13 @@ update_existing_ref(struct btrfs_trans_handle *trans,
* existing and update must have the same bytenr
*/
static noinline void
-update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
+update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_node *existing,
struct btrfs_delayed_ref_node *update)
{
struct btrfs_delayed_ref_head *existing_ref;
struct btrfs_delayed_ref_head *ref;
+ int old_ref_mod;
existing_ref = btrfs_delayed_node_to_head(existing);
ref = btrfs_delayed_node_to_head(update);
@@ -541,7 +543,20 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
* only need the lock for this case cause we could be processing it
* currently, for refs we just added we know we're a-ok.
*/
+ old_ref_mod = existing_ref->total_ref_mod;
existing->ref_mod += update->ref_mod;
+ existing_ref->total_ref_mod += update->ref_mod;
+
+ /*
+ * If we are going to from a positive ref mod to a negative or vice
+ * versa we need to make sure to adjust pending_csums accordingly.
+ */
+ if (existing_ref->is_data) {
+ if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
+ delayed_refs->pending_csums -= existing->num_bytes;
+ if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
+ delayed_refs->pending_csums += existing->num_bytes;
+ }
spin_unlock(&existing_ref->lock);
}
@@ -605,6 +620,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
head_ref->is_data = is_data;
head_ref->ref_root = RB_ROOT;
head_ref->processing = 0;
+ head_ref->total_ref_mod = count_mod;
spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
@@ -614,7 +630,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
- update_existing_head_ref(&existing->node, ref);
+ update_existing_head_ref(delayed_refs, &existing->node, ref);
/*
* we've updated the existing ref, free the newly
* allocated ref
@@ -622,6 +638,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing;
} else {
+ if (is_data && count_mod < 0)
+ delayed_refs->pending_csums += num_bytes;
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index a764e2340d48..5eb0892396d0 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -88,6 +88,14 @@ struct btrfs_delayed_ref_head {
struct rb_node href_node;
struct btrfs_delayed_extent_op *extent_op;
+
+ /*
+ * This is used to track the final ref_mod from all the refs associated
+ * with this head ref, this is not adjusted as delayed refs are run,
+ * this is meant to track if we need to do the csum accounting or not.
+ */
+ int total_ref_mod;
+
/*
* when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree
@@ -138,6 +146,8 @@ struct btrfs_delayed_ref_root {
/* total number of head nodes ready for processing */
unsigned long num_heads_ready;
+ u64 pending_csums;
+
/*
* set when the tree is flushing before a transaction commit,
* used by the throttling code to decide if new updates need
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 5ec03d999c37..0573848c7333 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -670,8 +670,8 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
srcdev = dev_replace->srcdev;
- args->status.progress_1000 = div64_u64(dev_replace->cursor_left,
- div64_u64(btrfs_device_get_total_bytes(srcdev), 1000));
+ args->status.progress_1000 = div_u64(dev_replace->cursor_left,
+ div_u64(btrfs_device_get_total_bytes(srcdev), 1000));
break;
}
btrfs_dev_replace_unlock(dev_replace);
@@ -806,7 +806,7 @@ static int btrfs_dev_replace_kthread(void *data)
btrfs_dev_replace_status(fs_info, status_args);
progress = status_args->status.progress_1000;
kfree(status_args);
- do_div(progress, 10);
+ progress = div_u64(progress, 10);
printk_in_rcu(KERN_INFO
"BTRFS: continuing dev_replace from %s (devid %llu) to %s @%u%%\n",
dev_replace->srcdev->missing ? "<missing disk>" :
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 639f2663ed3f..2ef9a4b72d06 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -54,7 +54,7 @@
#include <asm/cpufeature.h>
#endif
-static struct extent_io_ops btree_extent_io_ops;
+static const struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@@ -274,10 +274,11 @@ void btrfs_csum_final(u32 crc, char *result)
* compute the csum for a btree block, and either verify it or write it
* into the csum field of the block.
*/
-static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
+static int csum_tree_block(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *buf,
int verify)
{
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
char *result = NULL;
unsigned long len;
unsigned long cur_len;
@@ -302,7 +303,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
offset += cur_len;
}
if (csum_size > sizeof(inline_result)) {
- result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
+ result = kzalloc(csum_size, GFP_NOFS);
if (!result)
return 1;
} else {
@@ -321,7 +322,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
printk_ratelimited(KERN_WARNING
"BTRFS: %s checksum verify failed on %llu wanted %X found %X "
"level %d\n",
- root->fs_info->sb->s_id, buf->start,
+ fs_info->sb->s_id, buf->start,
val, found, btrfs_header_level(buf));
if (result != (char *)&inline_result)
kfree(result);
@@ -418,12 +419,6 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
if (memcmp(raw_disk_sb, result, csum_size))
ret = 1;
-
- if (ret && btrfs_super_generation(disk_sb) < 10) {
- printk(KERN_WARNING
- "BTRFS: super block crcs don't match, older mkfs detected\n");
- ret = 0;
- }
}
if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
@@ -501,7 +496,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
* we only fill in the checksum field in the first page of a multi-page block
*/
-static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
+static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
{
u64 start = page_offset(page);
u64 found_start;
@@ -513,14 +508,14 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
found_start = btrfs_header_bytenr(eb);
if (WARN_ON(found_start != start || !PageUptodate(page)))
return 0;
- csum_tree_block(root, eb, 0);
+ csum_tree_block(fs_info, eb, 0);
return 0;
}
-static int check_tree_block_fsid(struct btrfs_root *root,
+static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb)
{
- struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
u8 fsid[BTRFS_UUID_SIZE];
int ret = 1;
@@ -640,7 +635,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
ret = -EIO;
goto err;
}
- if (check_tree_block_fsid(root, eb)) {
+ if (check_tree_block_fsid(root->fs_info, eb)) {
printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n",
eb->fs_info->sb->s_id, eb->start);
ret = -EIO;
@@ -657,7 +652,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
eb, found_level);
- ret = csum_tree_block(root, eb, 1);
+ ret = csum_tree_block(root->fs_info, eb, 1);
if (ret) {
ret = -EIO;
goto err;
@@ -882,7 +877,7 @@ static int btree_csum_one_bio(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
root = BTRFS_I(bvec->bv_page->mapping->host)->root;
- ret = csum_dirty_buffer(root, bvec->bv_page);
+ ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
if (ret)
break;
}
@@ -1119,10 +1114,10 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
return 0;
}
-struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
+struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr)
{
- return find_extent_buffer(root->fs_info, bytenr);
+ return find_extent_buffer(fs_info, bytenr);
}
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
@@ -1165,11 +1160,10 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
}
-void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+void clean_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
struct extent_buffer *buf)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
if (btrfs_header_generation(buf) ==
fs_info->running_transaction->transid) {
btrfs_assert_tree_locked(buf);
@@ -2146,6 +2140,267 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
}
}
+static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
+{
+ mutex_init(&fs_info->scrub_lock);
+ atomic_set(&fs_info->scrubs_running, 0);
+ atomic_set(&fs_info->scrub_pause_req, 0);
+ atomic_set(&fs_info->scrubs_paused, 0);
+ atomic_set(&fs_info->scrub_cancel_req, 0);
+ init_waitqueue_head(&fs_info->scrub_pause_wait);
+ fs_info->scrub_workers_refcnt = 0;
+}
+
+static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
+{
+ spin_lock_init(&fs_info->balance_lock);
+ mutex_init(&fs_info->balance_mutex);
+ atomic_set(&fs_info->balance_running, 0);
+ atomic_set(&fs_info->balance_pause_req, 0);
+ atomic_set(&fs_info->balance_cancel_req, 0);
+ fs_info->balance_ctl = NULL;
+ init_waitqueue_head(&fs_info->balance_wait_q);
+}
+
+static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info,
+ struct btrfs_root *tree_root)
+{
+ fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
+ set_nlink(fs_info->btree_inode, 1);
+ /*
+ * we set the i_size on the btree inode to the max possible int.
+ * the real end of the address space is determined by all of
+ * the devices in the system
+ */
+ fs_info->btree_inode->i_size = OFFSET_MAX;
+ fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
+
+ RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
+ extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
+ fs_info->btree_inode->i_mapping);
+ BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
+ extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
+
+ BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
+
+ BTRFS_I(fs_info->btree_inode)->root = tree_root;
+ memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
+ sizeof(struct btrfs_key));
+ set_bit(BTRFS_INODE_DUMMY,
+ &BTRFS_I(fs_info->btree_inode)->runtime_flags);
+ btrfs_insert_inode_hash(fs_info->btree_inode);
+}
+
+static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
+{
+ fs_info->dev_replace.lock_owner = 0;
+ atomic_set(&fs_info->dev_replace.nesting_level, 0);
+ mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
+ mutex_init(&fs_info->dev_replace.lock_management_lock);
+ mutex_init(&fs_info->dev_replace.lock);
+ init_waitqueue_head(&fs_info->replace_wait);
+}
+
+static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
+{
+ spin_lock_init(&fs_info->qgroup_lock);
+ mutex_init(&fs_info->qgroup_ioctl_lock);
+ fs_info->qgroup_tree = RB_ROOT;
+ fs_info->qgroup_op_tree = RB_ROOT;
+ INIT_LIST_HEAD(&fs_info->dirty_qgroups);
+ fs_info->qgroup_seq = 1;
+ fs_info->quota_enabled = 0;
+ fs_info->pending_quota_state = 0;
+ fs_info->qgroup_ulist = NULL;
+ mutex_init(&fs_info->qgroup_rescan_lock);
+}
+
+static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
+ struct btrfs_fs_devices *fs_devices)
+{
+ int max_active = fs_info->thread_pool_size;
+ unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
+
+ fs_info->workers =
+ btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
+ max_active, 16);
+
+ fs_info->delalloc_workers =
+ btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
+
+ fs_info->flush_workers =
+ btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
+
+ fs_info->caching_workers =
+ btrfs_alloc_workqueue("cache", flags, max_active, 0);
+
+ /*
+ * a higher idle thresh on the submit workers makes it much more
+ * likely that bios will be send down in a sane order to the
+ * devices
+ */
+ fs_info->submit_workers =
+ btrfs_alloc_workqueue("submit", flags,
+ min_t(u64, fs_devices->num_devices,
+ max_active), 64);
+
+ fs_info->fixup_workers =
+ btrfs_alloc_workqueue("fixup", flags, 1, 0);
+
+ /*
+ * endios are largely parallel and should have a very
+ * low idle thresh
+ */
+ fs_info->endio_workers =
+ btrfs_alloc_workqueue("endio", flags, max_active, 4);
+ fs_info->endio_meta_workers =
+ btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
+ fs_info->endio_meta_write_workers =
+ btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
+ fs_info->endio_raid56_workers =
+ btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
+ fs_info->endio_repair_workers =
+ btrfs_alloc_workqueue("endio-repair", flags, 1, 0);
+ fs_info->rmw_workers =
+ btrfs_alloc_workqueue("rmw", flags, max_active, 2);
+ fs_info->endio_write_workers =
+ btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
+ fs_info->endio_freespace_worker =
+ btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
+ fs_info->delayed_workers =
+ btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
+ fs_info->readahead_workers =
+ btrfs_alloc_workqueue("readahead", flags, max_active, 2);
+ fs_info->qgroup_rescan_workers =
+ btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
+ fs_info->extent_workers =
+ btrfs_alloc_workqueue("extent-refs", flags,
+ min_t(u64, fs_devices->num_devices,
+ max_active), 8);
+
+ if (!(fs_info->workers && fs_info->delalloc_workers &&
+ fs_info->submit_workers && fs_info->flush_workers &&
+ fs_info->endio_workers && fs_info->endio_meta_workers &&
+ fs_info->endio_meta_write_workers &&
+ fs_info->endio_repair_workers &&
+ fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
+ fs_info->endio_freespace_worker && fs_info->rmw_workers &&
+ fs_info->caching_workers && fs_info->readahead_workers &&
+ fs_info->fixup_workers && fs_info->delayed_workers &&
+ fs_info->extent_workers &&
+ fs_info->qgroup_rescan_workers)) {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
+ struct btrfs_fs_devices *fs_devices)
+{
+ int ret;
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *log_tree_root;
+ struct btrfs_super_block *disk_super = fs_info->super_copy;
+ u64 bytenr = btrfs_super_log_root(disk_super);
+
+ if (fs_devices->rw_devices == 0) {
+ printk(KERN_WARNING "BTRFS: log replay required "
+ "on RO media\n");
+ return -EIO;
+ }
+
+ log_tree_root = btrfs_alloc_root(fs_info);
+ if (!log_tree_root)
+ return -ENOMEM;
+
+ __setup_root(tree_root->nodesize, tree_root->sectorsize,
+ tree_root->stripesize, log_tree_root, fs_info,
+ BTRFS_TREE_LOG_OBJECTID);
+
+ log_tree_root->node = read_tree_block(tree_root, bytenr,
+ fs_info->generation + 1);
+ if (!log_tree_root->node ||
+ !extent_buffer_uptodate(log_tree_root->node)) {
+ printk(KERN_ERR "BTRFS: failed to read log tree\n");
+ free_extent_buffer(log_tree_root->node);
+ kfree(log_tree_root);
+ return -EIO;
+ }
+ /* returns with log_tree_root freed on success */
+ ret = btrfs_recover_log_trees(log_tree_root);
+ if (ret) {
+ btrfs_error(tree_root->fs_info, ret,
+ "Failed to recover log tree");
+ free_extent_buffer(log_tree_root->node);
+ kfree(log_tree_root);
+ return ret;
+ }
+
+ if (fs_info->sb->s_flags & MS_RDONLY) {
+ ret = btrfs_commit_super(tree_root);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
+ struct btrfs_root *tree_root)
+{
+ struct btrfs_root *root;
+ struct btrfs_key location;
+ int ret;
+
+ location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
+ location.type = BTRFS_ROOT_ITEM_KEY;
+ location.offset = 0;
+
+ root = btrfs_read_tree_root(tree_root, &location);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->extent_root = root;
+
+ location.objectid = BTRFS_DEV_TREE_OBJECTID;
+ root = btrfs_read_tree_root(tree_root, &location);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->dev_root = root;
+ btrfs_init_devices_late(fs_info);
+
+ location.objectid = BTRFS_CSUM_TREE_OBJECTID;
+ root = btrfs_read_tree_root(tree_root, &location);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->csum_root = root;
+
+ location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
+ root = btrfs_read_tree_root(tree_root, &location);
+ if (!IS_ERR(root)) {
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->quota_enabled = 1;
+ fs_info->pending_quota_state = 1;
+ fs_info->quota_root = root;
+ }
+
+ location.objectid = BTRFS_UUID_TREE_OBJECTID;
+ root = btrfs_read_tree_root(tree_root, &location);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
+ if (ret != -ENOENT)
+ return ret;
+ } else {
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->uuid_root = root;
+ }
+
+ return 0;
+}
+
int open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options)
@@ -2160,21 +2415,12 @@ int open_ctree(struct super_block *sb,
struct btrfs_super_block *disk_super;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *tree_root;
- struct btrfs_root *extent_root;
- struct btrfs_root *csum_root;
struct btrfs_root *chunk_root;
- struct btrfs_root *dev_root;
- struct btrfs_root *quota_root;
- struct btrfs_root *uuid_root;
- struct btrfs_root *log_tree_root;
int ret;
int err = -EINVAL;
int num_backups_tried = 0;
int backup_index = 0;
int max_active;
- int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
- bool create_uuid_tree;
- bool check_uuid_tree;
tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
@@ -2241,11 +2487,12 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->qgroup_op_lock);
spin_lock_init(&fs_info->buffer_lock);
spin_lock_init(&fs_info->unused_bgs_lock);
- mutex_init(&fs_info->unused_bg_unpin_mutex);
rwlock_init(&fs_info->tree_mod_log_lock);
+ mutex_init(&fs_info->unused_bg_unpin_mutex);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
seqlock_init(&fs_info->profiles_lock);
+ init_rwsem(&fs_info->delayed_iput_sem);
init_completion(&fs_info->kobj_unregister);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
@@ -2276,7 +2523,7 @@ int open_ctree(struct super_block *sb,
fs_info->free_chunk_space = 0;
fs_info->tree_mod_log = RB_ROOT;
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
- fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
+ fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
/* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
spin_lock_init(&fs_info->reada_lock);
@@ -2294,55 +2541,18 @@ int open_ctree(struct super_block *sb,
}
btrfs_init_delayed_root(fs_info->delayed_root);
- mutex_init(&fs_info->scrub_lock);
- atomic_set(&fs_info->scrubs_running, 0);
- atomic_set(&fs_info->scrub_pause_req, 0);
- atomic_set(&fs_info->scrubs_paused, 0);
- atomic_set(&fs_info->scrub_cancel_req, 0);
- init_waitqueue_head(&fs_info->replace_wait);
- init_waitqueue_head(&fs_info->scrub_pause_wait);
- fs_info->scrub_workers_refcnt = 0;
+ btrfs_init_scrub(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
fs_info->check_integrity_print_mask = 0;
#endif
-
- spin_lock_init(&fs_info->balance_lock);
- mutex_init(&fs_info->balance_mutex);
- atomic_set(&fs_info->balance_running, 0);
- atomic_set(&fs_info->balance_pause_req, 0);
- atomic_set(&fs_info->balance_cancel_req, 0);
- fs_info->balance_ctl = NULL;
- init_waitqueue_head(&fs_info->balance_wait_q);
+ btrfs_init_balance(fs_info);
btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
sb->s_blocksize = 4096;
sb->s_blocksize_bits = blksize_bits(4096);
sb->s_bdi = &fs_info->bdi;
- fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
- set_nlink(fs_info->btree_inode, 1);
- /*
- * we set the i_size on the btree inode to the max possible int.
- * the real end of the address space is determined by all of
- * the devices in the system
- */
- fs_info->btree_inode->i_size = OFFSET_MAX;
- fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
-
- RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
- extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
- fs_info->btree_inode->i_mapping);
- BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
- extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
-
- BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
-
- BTRFS_I(fs_info->btree_inode)->root = tree_root;
- memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
- sizeof(struct btrfs_key));
- set_bit(BTRFS_INODE_DUMMY,
- &BTRFS_I(fs_info->btree_inode)->runtime_flags);
- btrfs_insert_inode_hash(fs_info->btree_inode);
+ btrfs_init_btree_inode(fs_info, tree_root);
spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT;
@@ -2363,26 +2573,14 @@ int open_ctree(struct super_block *sb,
mutex_init(&fs_info->transaction_kthread_mutex);
mutex_init(&fs_info->cleaner_mutex);
mutex_init(&fs_info->volume_mutex);
+ mutex_init(&fs_info->ro_block_group_mutex);
init_rwsem(&fs_info->commit_root_sem);
init_rwsem(&fs_info->cleanup_work_sem);
init_rwsem(&fs_info->subvol_sem);
sema_init(&fs_info->uuid_tree_rescan_sem, 1);
- fs_info->dev_replace.lock_owner = 0;
- atomic_set(&fs_info->dev_replace.nesting_level, 0);
- mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
- mutex_init(&fs_info->dev_replace.lock_management_lock);
- mutex_init(&fs_info->dev_replace.lock);
- spin_lock_init(&fs_info->qgroup_lock);
- mutex_init(&fs_info->qgroup_ioctl_lock);
- fs_info->qgroup_tree = RB_ROOT;
- fs_info->qgroup_op_tree = RB_ROOT;
- INIT_LIST_HEAD(&fs_info->dirty_qgroups);
- fs_info->qgroup_seq = 1;
- fs_info->quota_enabled = 0;
- fs_info->pending_quota_state = 0;
- fs_info->qgroup_ulist = NULL;
- mutex_init(&fs_info->qgroup_rescan_lock);
+ btrfs_init_dev_replace_locks(fs_info);
+ btrfs_init_qgroup(fs_info);
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
@@ -2554,75 +2752,9 @@ int open_ctree(struct super_block *sb,
max_active = fs_info->thread_pool_size;
- fs_info->workers =
- btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
- max_active, 16);
-
- fs_info->delalloc_workers =
- btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
-
- fs_info->flush_workers =
- btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
-
- fs_info->caching_workers =
- btrfs_alloc_workqueue("cache", flags, max_active, 0);
-
- /*
- * a higher idle thresh on the submit workers makes it much more
- * likely that bios will be send down in a sane order to the
- * devices
- */
- fs_info->submit_workers =
- btrfs_alloc_workqueue("submit", flags,
- min_t(u64, fs_devices->num_devices,
- max_active), 64);
-
- fs_info->fixup_workers =
- btrfs_alloc_workqueue("fixup", flags, 1, 0);
-
- /*
- * endios are largely parallel and should have a very
- * low idle thresh
- */
- fs_info->endio_workers =
- btrfs_alloc_workqueue("endio", flags, max_active, 4);
- fs_info->endio_meta_workers =
- btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
- fs_info->endio_meta_write_workers =
- btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
- fs_info->endio_raid56_workers =
- btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
- fs_info->endio_repair_workers =
- btrfs_alloc_workqueue("endio-repair", flags, 1, 0);
- fs_info->rmw_workers =
- btrfs_alloc_workqueue("rmw", flags, max_active, 2);
- fs_info->endio_write_workers =
- btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
- fs_info->endio_freespace_worker =
- btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
- fs_info->delayed_workers =
- btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
- fs_info->readahead_workers =
- btrfs_alloc_workqueue("readahead", flags, max_active, 2);
- fs_info->qgroup_rescan_workers =
- btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
- fs_info->extent_workers =
- btrfs_alloc_workqueue("extent-refs", flags,
- min_t(u64, fs_devices->num_devices,
- max_active), 8);
-
- if (!(fs_info->workers && fs_info->delalloc_workers &&
- fs_info->submit_workers && fs_info->flush_workers &&
- fs_info->endio_workers && fs_info->endio_meta_workers &&
- fs_info->endio_meta_write_workers &&
- fs_info->endio_repair_workers &&
- fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
- fs_info->endio_freespace_worker && fs_info->rmw_workers &&
- fs_info->caching_workers && fs_info->readahead_workers &&
- fs_info->fixup_workers && fs_info->delayed_workers &&
- fs_info->extent_workers &&
- fs_info->qgroup_rescan_workers)) {
- err = -ENOMEM;
+ ret = btrfs_init_workqueues(fs_info, fs_devices);
+ if (ret) {
+ err = ret;
goto fail_sb_buffer;
}
@@ -2688,7 +2820,7 @@ int open_ctree(struct super_block *sb,
* keep the device that is marked to be the target device for the
* dev_replace procedure
*/
- btrfs_close_extra_devices(fs_info, fs_devices, 0);
+ btrfs_close_extra_devices(fs_devices, 0);
if (!fs_devices->latest_bdev) {
printk(KERN_ERR "BTRFS: failed to read devices on %s\n",
@@ -2714,61 +2846,9 @@ retry_root_backup:
tree_root->commit_root = btrfs_root_node(tree_root);
btrfs_set_root_refs(&tree_root->root_item, 1);
- location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
- location.type = BTRFS_ROOT_ITEM_KEY;
- location.offset = 0;
-
- extent_root = btrfs_read_tree_root(tree_root, &location);
- if (IS_ERR(extent_root)) {
- ret = PTR_ERR(extent_root);
- goto recovery_tree_root;
- }
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &extent_root->state);
- fs_info->extent_root = extent_root;
-
- location.objectid = BTRFS_DEV_TREE_OBJECTID;
- dev_root = btrfs_read_tree_root(tree_root, &location);
- if (IS_ERR(dev_root)) {
- ret = PTR_ERR(dev_root);
- goto recovery_tree_root;
- }
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &dev_root->state);
- fs_info->dev_root = dev_root;
- btrfs_init_devices_late(fs_info);
-
- location.objectid = BTRFS_CSUM_TREE_OBJECTID;
- csum_root = btrfs_read_tree_root(tree_root, &location);
- if (IS_ERR(csum_root)) {
- ret = PTR_ERR(csum_root);
+ ret = btrfs_read_roots(fs_info, tree_root);
+ if (ret)
goto recovery_tree_root;
- }
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &csum_root->state);
- fs_info->csum_root = csum_root;
-
- location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
- quota_root = btrfs_read_tree_root(tree_root, &location);
- if (!IS_ERR(quota_root)) {
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &quota_root->state);
- fs_info->quota_enabled = 1;
- fs_info->pending_quota_state = 1;
- fs_info->quota_root = quota_root;
- }
-
- location.objectid = BTRFS_UUID_TREE_OBJECTID;
- uuid_root = btrfs_read_tree_root(tree_root, &location);
- if (IS_ERR(uuid_root)) {
- ret = PTR_ERR(uuid_root);
- if (ret != -ENOENT)
- goto recovery_tree_root;
- create_uuid_tree = true;
- check_uuid_tree = false;
- } else {
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &uuid_root->state);
- fs_info->uuid_root = uuid_root;
- create_uuid_tree = false;
- check_uuid_tree =
- generation != btrfs_super_uuid_tree_generation(disk_super);
- }
fs_info->generation = generation;
fs_info->last_trans_committed = generation;
@@ -2792,7 +2872,7 @@ retry_root_backup:
goto fail_block_groups;
}
- btrfs_close_extra_devices(fs_info, fs_devices, 1);
+ btrfs_close_extra_devices(fs_devices, 1);
ret = btrfs_sysfs_add_one(fs_info);
if (ret) {
@@ -2806,7 +2886,7 @@ retry_root_backup:
goto fail_sysfs;
}
- ret = btrfs_read_block_groups(extent_root);
+ ret = btrfs_read_block_groups(fs_info->extent_root);
if (ret) {
printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret);
goto fail_sysfs;
@@ -2864,48 +2944,11 @@ retry_root_backup:
/* do not make disk changes in broken FS */
if (btrfs_super_log_root(disk_super) != 0) {
- u64 bytenr = btrfs_super_log_root(disk_super);
-
- if (fs_devices->rw_devices == 0) {
- printk(KERN_WARNING "BTRFS: log replay required "
- "on RO media\n");
- err = -EIO;
- goto fail_qgroup;
- }
-
- log_tree_root = btrfs_alloc_root(fs_info);
- if (!log_tree_root) {
- err = -ENOMEM;
- goto fail_qgroup;
- }
-
- __setup_root(nodesize, sectorsize, stripesize,
- log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
-
- log_tree_root->node = read_tree_block(tree_root, bytenr,
- generation + 1);
- if (!log_tree_root->node ||
- !extent_buffer_uptodate(log_tree_root->node)) {
- printk(KERN_ERR "BTRFS: failed to read log tree\n");
- free_extent_buffer(log_tree_root->node);
- kfree(log_tree_root);
- goto fail_qgroup;
- }
- /* returns with log_tree_root freed on success */
- ret = btrfs_recover_log_trees(log_tree_root);
+ ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) {
- btrfs_error(tree_root->fs_info, ret,
- "Failed to recover log tree");
- free_extent_buffer(log_tree_root->node);
- kfree(log_tree_root);
+ err = ret;
goto fail_qgroup;
}
-
- if (sb->s_flags & MS_RDONLY) {
- ret = btrfs_commit_super(tree_root);
- if (ret)
- goto fail_qgroup;
- }
}
ret = btrfs_find_orphan_roots(tree_root);
@@ -2966,7 +3009,7 @@ retry_root_backup:
btrfs_qgroup_rescan_resume(fs_info);
- if (create_uuid_tree) {
+ if (!fs_info->uuid_root) {
pr_info("BTRFS: creating UUID tree\n");
ret = btrfs_create_uuid_tree(fs_info);
if (ret) {
@@ -2975,8 +3018,9 @@ retry_root_backup:
close_ctree(tree_root);
return ret;
}
- } else if (check_uuid_tree ||
- btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) {
+ } else if (btrfs_test_opt(tree_root, RESCAN_UUID_TREE) ||
+ fs_info->generation !=
+ btrfs_super_uuid_tree_generation(disk_super)) {
pr_info("BTRFS: checking UUID tree\n");
ret = btrfs_check_uuid_tree(fs_info);
if (ret) {
@@ -3668,7 +3712,7 @@ void close_ctree(struct btrfs_root *root)
if (!(fs_info->sb->s_flags & MS_RDONLY)) {
ret = btrfs_commit_super(root);
if (ret)
- btrfs_err(root->fs_info, "commit super ret %d", ret);
+ btrfs_err(fs_info, "commit super ret %d", ret);
}
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
@@ -3680,10 +3724,10 @@ void close_ctree(struct btrfs_root *root)
fs_info->closing = 2;
smp_mb();
- btrfs_free_qgroup_config(root->fs_info);
+ btrfs_free_qgroup_config(fs_info);
if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
- btrfs_info(root->fs_info, "at unmount delalloc count %lld",
+ btrfs_info(fs_info, "at unmount delalloc count %lld",
percpu_counter_sum(&fs_info->delalloc_bytes));
}
@@ -3723,7 +3767,7 @@ void close_ctree(struct btrfs_root *root)
btrfs_free_stripe_hash_table(fs_info);
- btrfs_free_block_rsv(root, root->orphan_block_rsv);
+ __btrfs_free_block_rsv(root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
lock_chunks(root);
@@ -4134,7 +4178,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
while (start <= end) {
- eb = btrfs_find_tree_block(root, start);
+ eb = btrfs_find_tree_block(root->fs_info, start);
start += root->nodesize;
if (!eb)
continue;
@@ -4285,7 +4329,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
return 0;
}
-static struct extent_io_ops btree_extent_io_ops = {
+static const struct extent_io_ops btree_extent_io_ops = {
.readpage_end_io_hook = btree_readpage_end_io_hook,
.readpage_io_failed_hook = btree_io_failed_hook,
.submit_bio_hook = btree_submit_bio_hook,
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 27d44c0fd236..d4cbfeeeedd4 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -52,7 +52,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr);
void clean_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *buf);
+ struct btrfs_fs_info *fs_info, struct extent_buffer *buf);
int open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options);
@@ -61,7 +61,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_commit_super(struct btrfs_root *root);
-struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
+struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr);
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
struct btrfs_key *location);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 37d164540c3a..8d052209f473 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
static struct dentry *btrfs_get_parent(struct dentry *child)
{
- struct inode *dir = child->d_inode;
+ struct inode *dir = d_inode(child);
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct extent_buffer *leaf;
@@ -220,8 +220,8 @@ fail:
static int btrfs_get_name(struct dentry *parent, char *name,
struct dentry *child)
{
- struct inode *inode = child->d_inode;
- struct inode *dir = parent->d_inode;
+ struct inode *inode = d_inode(child);
+ struct inode *dir = d_inode(parent);
struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_inode_ref *iref;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 8b353ad02f03..7effed6f2fa6 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2538,6 +2538,12 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
* list before we release it.
*/
if (btrfs_delayed_ref_is_head(ref)) {
+ if (locked_ref->is_data &&
+ locked_ref->total_ref_mod < 0) {
+ spin_lock(&delayed_refs->lock);
+ delayed_refs->pending_csums -= ref->num_bytes;
+ spin_unlock(&delayed_refs->lock);
+ }
btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
}
@@ -2561,8 +2567,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/
spin_lock(&delayed_refs->lock);
avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
- avg = div64_u64(avg, 4);
- fs_info->avg_delayed_ref_runtime = avg;
+ fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
spin_unlock(&delayed_refs->lock);
}
return 0;
@@ -2624,7 +2629,26 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
* We don't ever fill up leaves all the way so multiply by 2 just to be
* closer to what we're really going to want to ouse.
*/
- return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
+ return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
+}
+
+/*
+ * Takes the number of bytes to be csumm'ed and figures out how many leaves it
+ * would require to store the csums for that many bytes.
+ */
+u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
+{
+ u64 csum_size;
+ u64 num_csums_per_leaf;
+ u64 num_csums;
+
+ csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
+ num_csums_per_leaf = div64_u64(csum_size,
+ (u64)btrfs_super_csum_size(root->fs_info->super_copy));
+ num_csums = div64_u64(csum_bytes, root->sectorsize);
+ num_csums += num_csums_per_leaf - 1;
+ num_csums = div64_u64(num_csums, num_csums_per_leaf);
+ return num_csums;
}
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
@@ -2632,7 +2656,9 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
{
struct btrfs_block_rsv *global_rsv;
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
- u64 num_bytes;
+ u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
+ u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
+ u64 num_bytes, num_dirty_bgs_bytes;
int ret = 0;
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
@@ -2640,17 +2666,22 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
if (num_heads > 1)
num_bytes += (num_heads - 1) * root->nodesize;
num_bytes <<= 1;
+ num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
+ num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
+ num_dirty_bgs);
global_rsv = &root->fs_info->global_block_rsv;
/*
* If we can't allocate any more chunks lets make sure we have _lots_ of
* wiggle room since running delayed refs can create more delayed refs.
*/
- if (global_rsv->space_info->full)
+ if (global_rsv->space_info->full) {
+ num_dirty_bgs_bytes <<= 1;
num_bytes <<= 1;
+ }
spin_lock(&global_rsv->lock);
- if (global_rsv->reserved <= num_bytes)
+ if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
ret = 1;
spin_unlock(&global_rsv->lock);
return ret;
@@ -3147,10 +3178,8 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(path);
fail:
- if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_release_path(path);
return ret;
}
@@ -3193,7 +3222,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
struct inode *inode = NULL;
u64 alloc_hint = 0;
int dcs = BTRFS_DC_ERROR;
- int num_pages = 0;
+ u64 num_pages = 0;
int retries = 0;
int ret = 0;
@@ -3267,15 +3296,14 @@ again:
if (ret)
goto out_put;
- ret = btrfs_truncate_free_space_cache(root, trans, inode);
+ ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
if (ret)
goto out_put;
}
spin_lock(&block_group->lock);
if (block_group->cached != BTRFS_CACHE_FINISHED ||
- !btrfs_test_opt(root, SPACE_CACHE) ||
- block_group->delalloc_bytes) {
+ !btrfs_test_opt(root, SPACE_CACHE)) {
/*
* don't bother trying to write stuff out _if_
* a) we're not cached,
@@ -3293,14 +3321,14 @@ again:
* taking up quite a bit since it's not folded into the other space
* cache.
*/
- num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
+ num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
if (!num_pages)
num_pages = 1;
num_pages *= 16;
num_pages *= PAGE_CACHE_SIZE;
- ret = btrfs_check_data_free_space(inode, num_pages);
+ ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
if (ret)
goto out_put;
@@ -3351,16 +3379,188 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
return 0;
}
+/*
+ * transaction commit does final block group cache writeback during a
+ * critical section where nothing is allowed to change the FS. This is
+ * required in order for the cache to actually match the block group,
+ * but can introduce a lot of latency into the commit.
+ *
+ * So, btrfs_start_dirty_block_groups is here to kick off block group
+ * cache IO. There's a chance we'll have to redo some of it if the
+ * block group changes again during the commit, but it greatly reduces
+ * the commit latency by getting rid of the easy block groups while
+ * we're still allowing others to join the commit.
+ */
+int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_block_group_cache *cache;
+ struct btrfs_transaction *cur_trans = trans->transaction;
+ int ret = 0;
+ int should_put;
+ struct btrfs_path *path = NULL;
+ LIST_HEAD(dirty);
+ struct list_head *io = &cur_trans->io_bgs;
+ int num_started = 0;
+ int loops = 0;
+
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ if (list_empty(&cur_trans->dirty_bgs)) {
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ return 0;
+ }
+ list_splice_init(&cur_trans->dirty_bgs, &dirty);
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+
+again:
+ /*
+ * make sure all the block groups on our dirty list actually
+ * exist
+ */
+ btrfs_create_pending_block_groups(trans, root);
+
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ }
+
+ /*
+ * cache_write_mutex is here only to save us from balance or automatic
+ * removal of empty block groups deleting this block group while we are
+ * writing out the cache
+ */
+ mutex_lock(&trans->transaction->cache_write_mutex);
+ while (!list_empty(&dirty)) {
+ cache = list_first_entry(&dirty,
+ struct btrfs_block_group_cache,
+ dirty_list);
+ /*
+ * this can happen if something re-dirties a block
+ * group that is already under IO. Just wait for it to
+ * finish and then do it all again
+ */
+ if (!list_empty(&cache->io_list)) {
+ list_del_init(&cache->io_list);
+ btrfs_wait_cache_io(root, trans, cache,
+ &cache->io_ctl, path,
+ cache->key.objectid);
+ btrfs_put_block_group(cache);
+ }
+
+
+ /*
+ * btrfs_wait_cache_io uses the cache->dirty_list to decide
+ * if it should update the cache_state. Don't delete
+ * until after we wait.
+ *
+ * Since we're not running in the commit critical section
+ * we need the dirty_bgs_lock to protect from update_block_group
+ */
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ list_del_init(&cache->dirty_list);
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+
+ should_put = 1;
+
+ cache_save_setup(cache, trans, path);
+
+ if (cache->disk_cache_state == BTRFS_DC_SETUP) {
+ cache->io_ctl.inode = NULL;
+ ret = btrfs_write_out_cache(root, trans, cache, path);
+ if (ret == 0 && cache->io_ctl.inode) {
+ num_started++;
+ should_put = 0;
+
+ /*
+ * the cache_write_mutex is protecting
+ * the io_list
+ */
+ list_add_tail(&cache->io_list, io);
+ } else {
+ /*
+ * if we failed to write the cache, the
+ * generation will be bad and life goes on
+ */
+ ret = 0;
+ }
+ }
+ if (!ret) {
+ ret = write_one_cache_group(trans, root, path, cache);
+ /*
+ * Our block group might still be attached to the list
+ * of new block groups in the transaction handle of some
+ * other task (struct btrfs_trans_handle->new_bgs). This
+ * means its block group item isn't yet in the extent
+ * tree. If this happens ignore the error, as we will
+ * try again later in the critical section of the
+ * transaction commit.
+ */
+ if (ret == -ENOENT) {
+ ret = 0;
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ if (list_empty(&cache->dirty_list)) {
+ list_add_tail(&cache->dirty_list,
+ &cur_trans->dirty_bgs);
+ btrfs_get_block_group(cache);
+ }
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ } else if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ }
+ }
+
+ /* if its not on the io list, we need to put the block group */
+ if (should_put)
+ btrfs_put_block_group(cache);
+
+ if (ret)
+ break;
+
+ /*
+ * Avoid blocking other tasks for too long. It might even save
+ * us from writing caches for block groups that are going to be
+ * removed.
+ */
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+ mutex_lock(&trans->transaction->cache_write_mutex);
+ }
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+
+ /*
+ * go through delayed refs for all the stuff we've just kicked off
+ * and then loop back (just once)
+ */
+ ret = btrfs_run_delayed_refs(trans, root, 0);
+ if (!ret && loops == 0) {
+ loops++;
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ list_splice_init(&cur_trans->dirty_bgs, &dirty);
+ /*
+ * dirty_bgs_lock protects us from concurrent block group
+ * deletes too (not just cache_write_mutex).
+ */
+ if (!list_empty(&dirty)) {
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ goto again;
+ }
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ }
+
+ btrfs_free_path(path);
+ return ret;
+}
+
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_block_group_cache *cache;
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
+ int should_put;
struct btrfs_path *path;
-
- if (list_empty(&cur_trans->dirty_bgs))
- return 0;
+ struct list_head *io = &cur_trans->io_bgs;
+ int num_started = 0;
path = btrfs_alloc_path();
if (!path)
@@ -3376,16 +3576,64 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
cache = list_first_entry(&cur_trans->dirty_bgs,
struct btrfs_block_group_cache,
dirty_list);
+
+ /*
+ * this can happen if cache_save_setup re-dirties a block
+ * group that is already under IO. Just wait for it to
+ * finish and then do it all again
+ */
+ if (!list_empty(&cache->io_list)) {
+ list_del_init(&cache->io_list);
+ btrfs_wait_cache_io(root, trans, cache,
+ &cache->io_ctl, path,
+ cache->key.objectid);
+ btrfs_put_block_group(cache);
+ }
+
+ /*
+ * don't remove from the dirty list until after we've waited
+ * on any pending IO
+ */
list_del_init(&cache->dirty_list);
- if (cache->disk_cache_state == BTRFS_DC_CLEAR)
- cache_save_setup(cache, trans, path);
- if (!ret)
- ret = btrfs_run_delayed_refs(trans, root,
- (unsigned long) -1);
- if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP)
- btrfs_write_out_cache(root, trans, cache, path);
+ should_put = 1;
+
+ cache_save_setup(cache, trans, path);
+
if (!ret)
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
+
+ if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
+ cache->io_ctl.inode = NULL;
+ ret = btrfs_write_out_cache(root, trans, cache, path);
+ if (ret == 0 && cache->io_ctl.inode) {
+ num_started++;
+ should_put = 0;
+ list_add_tail(&cache->io_list, io);
+ } else {
+ /*
+ * if we failed to write the cache, the
+ * generation will be bad and life goes on
+ */
+ ret = 0;
+ }
+ }
+ if (!ret) {
ret = write_one_cache_group(trans, root, path, cache);
+ if (ret)
+ btrfs_abort_transaction(trans, root, ret);
+ }
+
+ /* if its not on the io list, we need to put the block group */
+ if (should_put)
+ btrfs_put_block_group(cache);
+ }
+
+ while (!list_empty(io)) {
+ cache = list_first_entry(io, struct btrfs_block_group_cache,
+ io_list);
+ list_del_init(&cache->io_list);
+ btrfs_wait_cache_io(root, trans, cache,
+ &cache->io_ctl, path, cache->key.objectid);
btrfs_put_block_group(cache);
}
@@ -3635,19 +3883,21 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
* This will check the space that the inode allocates from to make sure we have
* enough space for bytes.
*/
-int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
+int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
{
struct btrfs_space_info *data_sinfo;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = root->fs_info;
u64 used;
- int ret = 0, committed = 0, alloc_chunk = 1;
+ int ret = 0;
+ int need_commit = 2;
+ int have_pinned_space;
/* make sure bytes are sectorsize aligned */
bytes = ALIGN(bytes, root->sectorsize);
if (btrfs_is_free_space_inode(inode)) {
- committed = 1;
+ need_commit = 0;
ASSERT(current->journal_info);
}
@@ -3669,7 +3919,7 @@ again:
* if we don't have enough free bytes in this space then we need
* to alloc a new chunk.
*/
- if (!data_sinfo->full && alloc_chunk) {
+ if (!data_sinfo->full) {
u64 alloc_target;
data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
@@ -3697,8 +3947,10 @@ alloc:
if (ret < 0) {
if (ret != -ENOSPC)
return ret;
- else
+ else {
+ have_pinned_space = 1;
goto commit_trans;
+ }
}
if (!data_sinfo)
@@ -3709,26 +3961,39 @@ alloc:
/*
* If we don't have enough pinned space to deal with this
- * allocation don't bother committing the transaction.
+ * allocation, and no removed chunk in current transaction,
+ * don't bother committing the transaction.
*/
- if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
- bytes) < 0)
- committed = 1;
+ have_pinned_space = percpu_counter_compare(
+ &data_sinfo->total_bytes_pinned,
+ used + bytes - data_sinfo->total_bytes);
spin_unlock(&data_sinfo->lock);
/* commit the current transaction and try again */
commit_trans:
- if (!committed &&
+ if (need_commit &&
!atomic_read(&root->fs_info->open_ioctl_trans)) {
- committed = 1;
+ need_commit--;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_commit_transaction(trans, root);
- if (ret)
- return ret;
- goto again;
+ if (have_pinned_space >= 0 ||
+ trans->transaction->have_free_bgs ||
+ need_commit > 0) {
+ ret = btrfs_commit_transaction(trans, root);
+ if (ret)
+ return ret;
+ /*
+ * make sure that all running delayed iput are
+ * done
+ */
+ down_write(&root->fs_info->delayed_iput_sem);
+ up_write(&root->fs_info->delayed_iput_sem);
+ goto again;
+ } else {
+ btrfs_end_transaction(trans, root);
+ }
}
trace_btrfs_space_reservation(root->fs_info,
@@ -3736,12 +4001,16 @@ commit_trans:
data_sinfo->flags, bytes, 1);
return -ENOSPC;
}
+ ret = btrfs_qgroup_reserve(root, write_bytes);
+ if (ret)
+ goto out;
data_sinfo->bytes_may_use += bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
data_sinfo->flags, bytes, 1);
+out:
spin_unlock(&data_sinfo->lock);
- return 0;
+ return ret;
}
/*
@@ -4298,8 +4567,13 @@ out:
static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
struct btrfs_fs_info *fs_info, u64 used)
{
- return (used >= div_factor_fine(space_info->total_bytes, 98) &&
- !btrfs_fs_closing(fs_info) &&
+ u64 thresh = div_factor_fine(space_info->total_bytes, 98);
+
+ /* If we're just plain full then async reclaim just slows us down. */
+ if (space_info->bytes_used >= thresh)
+ return 0;
+
+ return (used >= thresh && !btrfs_fs_closing(fs_info) &&
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}
@@ -4354,10 +4628,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
if (!btrfs_need_do_async_reclaim(space_info, fs_info,
flush_state))
return;
- } while (flush_state <= COMMIT_TRANS);
-
- if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state))
- queue_work(system_unbound_wq, work);
+ } while (flush_state < COMMIT_TRANS);
}
void btrfs_init_async_reclaim_work(struct work_struct *work)
@@ -4700,6 +4971,11 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
kfree(rsv);
}
+void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
+{
+ kfree(rsv);
+}
+
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush)
@@ -4812,10 +5088,10 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
csum_size * 2;
- num_bytes += div64_u64(data_used + meta_used, 50);
+ num_bytes += div_u64(data_used + meta_used, 50);
if (num_bytes * 3 > meta_used)
- num_bytes = div64_u64(meta_used, 3);
+ num_bytes = div_u64(meta_used, 3);
return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
}
@@ -4998,8 +5274,6 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
u64 qgroup_reserved)
{
btrfs_block_rsv_release(root, rsv, (u64)-1);
- if (qgroup_reserved)
- btrfs_qgroup_free(root, qgroup_reserved);
}
/**
@@ -5066,30 +5340,18 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
int reserve)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 csum_size;
- int num_csums_per_leaf;
- int num_csums;
- int old_csums;
+ u64 old_csums, num_csums;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
BTRFS_I(inode)->csum_bytes == 0)
return 0;
- old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
+ old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
if (reserve)
BTRFS_I(inode)->csum_bytes += num_bytes;
else
BTRFS_I(inode)->csum_bytes -= num_bytes;
- csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
- num_csums_per_leaf = (int)div64_u64(csum_size,
- sizeof(struct btrfs_csum_item) +
- sizeof(struct btrfs_disk_key));
- num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
- num_csums = num_csums + num_csums_per_leaf - 1;
- num_csums = num_csums / num_csums_per_leaf;
-
- old_csums = old_csums + num_csums_per_leaf - 1;
- old_csums = old_csums / num_csums_per_leaf;
+ num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
/* No change, no need to reserve more */
if (old_csums == num_csums)
@@ -5163,8 +5425,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
spin_unlock(&BTRFS_I(inode)->lock);
if (root->fs_info->quota_enabled) {
- ret = btrfs_qgroup_reserve(root, num_bytes +
- nr_extents * root->nodesize);
+ ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
if (ret)
goto out_fail;
}
@@ -5172,8 +5433,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
if (unlikely(ret)) {
if (root->fs_info->quota_enabled)
- btrfs_qgroup_free(root, num_bytes +
- nr_extents * root->nodesize);
+ btrfs_qgroup_free(root, nr_extents * root->nodesize);
goto out_fail;
}
@@ -5290,10 +5550,6 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), to_free, 0);
- if (root->fs_info->quota_enabled) {
- btrfs_qgroup_free(root, num_bytes +
- dropped * root->nodesize);
- }
btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
to_free);
@@ -5318,7 +5574,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
{
int ret;
- ret = btrfs_check_data_free_space(inode, num_bytes);
+ ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
if (ret)
return ret;
@@ -5390,14 +5646,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
if (!alloc && cache->cached == BTRFS_CACHE_NO)
cache_block_group(cache, 1);
- spin_lock(&trans->transaction->dirty_bgs_lock);
- if (list_empty(&cache->dirty_list)) {
- list_add_tail(&cache->dirty_list,
- &trans->transaction->dirty_bgs);
- btrfs_get_block_group(cache);
- }
- spin_unlock(&trans->transaction->dirty_bgs_lock);
-
byte_in_group = bytenr - cache->key.objectid;
WARN_ON(byte_in_group > cache->key.offset);
@@ -5446,6 +5694,16 @@ static int update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&info->unused_bgs_lock);
}
}
+
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+ if (list_empty(&cache->dirty_list)) {
+ list_add_tail(&cache->dirty_list,
+ &trans->transaction->dirty_bgs);
+ trans->transaction->num_dirty_bgs++;
+ btrfs_get_block_group(cache);
+ }
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
+
btrfs_put_block_group(cache);
total -= num_bytes;
bytenr += num_bytes;
@@ -6956,15 +7214,15 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
return -ENOSPC;
}
- if (btrfs_test_opt(root, DISCARD))
- ret = btrfs_discard_extent(root, start, len, NULL);
-
if (pin)
pin_down_extent(root, cache, start, len, 1);
else {
+ if (btrfs_test_opt(root, DISCARD))
+ ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
}
+
btrfs_put_block_group(cache);
trace_btrfs_reserved_extent_free(root, start, len);
@@ -7095,9 +7353,9 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
ins, size);
if (ret) {
+ btrfs_free_path(path);
btrfs_free_and_pin_reserved_extent(root, ins->objectid,
root->nodesize);
- btrfs_free_path(path);
return ret;
}
@@ -7217,7 +7475,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_header_generation(buf, trans->transid);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
- clean_tree_block(trans, root, buf);
+ clean_tree_block(trans, root->fs_info, buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
btrfs_set_lock_blocking(buf);
@@ -7311,7 +7569,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
* returns the key for the extent through ins, and a tree buffer for
* the first block of the extent through buf.
*
- * returns the tree buffer or NULL.
+ * returns the tree buffer or an ERR_PTR on error.
*/
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -7322,6 +7580,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_key ins;
struct btrfs_block_rsv *block_rsv;
struct extent_buffer *buf;
+ struct btrfs_delayed_extent_op *extent_op;
u64 flags = 0;
int ret;
u32 blocksize = root->nodesize;
@@ -7342,13 +7601,14 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
ret = btrfs_reserve_extent(root, blocksize, blocksize,
empty_size, hint, &ins, 0, 0);
- if (ret) {
- unuse_block_rsv(root->fs_info, block_rsv, blocksize);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto out_unuse;
buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
- BUG_ON(IS_ERR(buf)); /* -ENOMEM */
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto out_free_reserved;
+ }
if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (parent == 0)
@@ -7358,9 +7618,11 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
BUG_ON(parent > 0);
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
- struct btrfs_delayed_extent_op *extent_op;
extent_op = btrfs_alloc_delayed_extent_op();
- BUG_ON(!extent_op); /* -ENOMEM */
+ if (!extent_op) {
+ ret = -ENOMEM;
+ goto out_free_buf;
+ }
if (key)
memcpy(&extent_op->key, key, sizeof(extent_op->key));
else
@@ -7375,13 +7637,24 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
extent_op->level = level;
ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
- ins.objectid,
- ins.offset, parent, root_objectid,
- level, BTRFS_ADD_DELAYED_EXTENT,
- extent_op, 0);
- BUG_ON(ret); /* -ENOMEM */
+ ins.objectid, ins.offset,
+ parent, root_objectid, level,
+ BTRFS_ADD_DELAYED_EXTENT,
+ extent_op, 0);
+ if (ret)
+ goto out_free_delayed;
}
return buf;
+
+out_free_delayed:
+ btrfs_free_delayed_extent_op(extent_op);
+out_free_buf:
+ free_extent_buffer(buf);
+out_free_reserved:
+ btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
+out_unuse:
+ unuse_block_rsv(root->fs_info, block_rsv, blocksize);
+ return ERR_PTR(ret);
}
struct walk_control {
@@ -7815,7 +8088,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
blocksize = root->nodesize;
- next = btrfs_find_tree_block(root, bytenr);
+ next = btrfs_find_tree_block(root->fs_info, bytenr);
if (!next) {
next = btrfs_find_create_tree_block(root, bytenr);
if (!next)
@@ -8016,7 +8289,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
btrfs_set_lock_blocking(eb);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
}
- clean_tree_block(trans, root, eb);
+ clean_tree_block(trans, root->fs_info, eb);
}
if (eb == root->node) {
@@ -8533,10 +8806,30 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
BUG_ON(cache->ro);
+again:
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
+ /*
+ * we're not allowed to set block groups readonly after the dirty
+ * block groups cache has started writing. If it already started,
+ * back off and let this transaction commit
+ */
+ mutex_lock(&root->fs_info->ro_block_group_mutex);
+ if (trans->transaction->dirty_bg_run) {
+ u64 transid = trans->transid;
+
+ mutex_unlock(&root->fs_info->ro_block_group_mutex);
+ btrfs_end_transaction(trans, root);
+
+ ret = btrfs_wait_for_commit(root, transid);
+ if (ret)
+ return ret;
+ goto again;
+ }
+
+
ret = set_block_group_ro(cache, 0);
if (!ret)
goto out;
@@ -8551,6 +8844,7 @@ out:
alloc_flags = update_block_group_flags(root, cache->flags);
check_system_chunk(trans, root, alloc_flags);
}
+ mutex_unlock(&root->fs_info->ro_block_group_mutex);
btrfs_end_transaction(trans, root);
return ret;
@@ -8720,7 +9014,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
min_free <<= 1;
} else if (index == BTRFS_RAID_RAID0) {
dev_min = fs_devices->rw_devices;
- do_div(min_free, dev_min);
+ min_free = div64_u64(min_free, dev_min);
}
/* We need to do this so that we can look at pending chunks */
@@ -8992,6 +9286,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
INIT_LIST_HEAD(&cache->bg_list);
INIT_LIST_HEAD(&cache->ro_list);
INIT_LIST_HEAD(&cache->dirty_list);
+ INIT_LIST_HEAD(&cache->io_list);
btrfs_init_free_space_ctl(cache);
atomic_set(&cache->trimming, 0);
@@ -9355,7 +9650,38 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
goto out;
}
+ /*
+ * get the inode first so any iput calls done for the io_list
+ * aren't the final iput (no unlinks allowed now)
+ */
inode = lookup_free_space_inode(tree_root, block_group, path);
+
+ mutex_lock(&trans->transaction->cache_write_mutex);
+ /*
+ * make sure our free spache cache IO is done before remove the
+ * free space inode
+ */
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+ if (!list_empty(&block_group->io_list)) {
+ list_del_init(&block_group->io_list);
+
+ WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
+
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
+ btrfs_wait_cache_io(root, trans, block_group,
+ &block_group->io_ctl, path,
+ block_group->key.objectid);
+ btrfs_put_block_group(block_group);
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+ }
+
+ if (!list_empty(&block_group->dirty_list)) {
+ list_del_init(&block_group->dirty_list);
+ btrfs_put_block_group(block_group);
+ }
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+
if (!IS_ERR(inode)) {
ret = btrfs_orphan_add(trans, inode);
if (ret) {
@@ -9448,18 +9774,29 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_lock(&trans->transaction->dirty_bgs_lock);
if (!list_empty(&block_group->dirty_list)) {
- list_del_init(&block_group->dirty_list);
- btrfs_put_block_group(block_group);
+ WARN_ON(1);
+ }
+ if (!list_empty(&block_group->io_list)) {
+ WARN_ON(1);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
-
btrfs_remove_free_space_cache(block_group);
spin_lock(&block_group->space_info->lock);
list_del_init(&block_group->ro_list);
+
+ if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ WARN_ON(block_group->space_info->total_bytes
+ < block_group->key.offset);
+ WARN_ON(block_group->space_info->bytes_readonly
+ < block_group->key.offset);
+ WARN_ON(block_group->space_info->disk_total
+ < block_group->key.offset * factor);
+ }
block_group->space_info->total_bytes -= block_group->key.offset;
block_group->space_info->bytes_readonly -= block_group->key.offset;
block_group->space_info->disk_total -= block_group->key.offset * factor;
+
spin_unlock(&block_group->space_info->lock);
memcpy(&key, &block_group->key, sizeof(key));
@@ -9647,8 +9984,18 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
/* Reset pinned so btrfs_put_block_group doesn't complain */
+ spin_lock(&space_info->lock);
+ spin_lock(&block_group->lock);
+
+ space_info->bytes_pinned -= block_group->pinned;
+ space_info->bytes_readonly += block_group->pinned;
+ percpu_counter_add(&space_info->total_bytes_pinned,
+ -block_group->pinned);
block_group->pinned = 0;
+ spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
+
/*
* Btrfs_remove_chunk will abort the transaction if things go
* horribly wrong.
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d688cfe5d496..c32d226bfecc 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4514,8 +4514,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
em_len, flags);
- if (ret)
+ if (ret) {
+ if (ret == 1)
+ ret = 0;
goto out_free;
+ }
}
out_free:
free_extent_map(em);
@@ -4557,36 +4560,37 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
do {
index--;
page = eb->pages[index];
- if (page && mapped) {
+ if (!page)
+ continue;
+ if (mapped)
spin_lock(&page->mapping->private_lock);
+ /*
+ * We do this since we'll remove the pages after we've
+ * removed the eb from the radix tree, so we could race
+ * and have this page now attached to the new eb. So
+ * only clear page_private if it's still connected to
+ * this eb.
+ */
+ if (PagePrivate(page) &&
+ page->private == (unsigned long)eb) {
+ BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+ BUG_ON(PageDirty(page));
+ BUG_ON(PageWriteback(page));
/*
- * We do this since we'll remove the pages after we've
- * removed the eb from the radix tree, so we could race
- * and have this page now attached to the new eb. So
- * only clear page_private if it's still connected to
- * this eb.
+ * We need to make sure we haven't be attached
+ * to a new eb.
*/
- if (PagePrivate(page) &&
- page->private == (unsigned long)eb) {
- BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
- BUG_ON(PageDirty(page));
- BUG_ON(PageWriteback(page));
- /*
- * We need to make sure we haven't be attached
- * to a new eb.
- */
- ClearPagePrivate(page);
- set_page_private(page, 0);
- /* One for the page private */
- page_cache_release(page);
- }
- spin_unlock(&page->mapping->private_lock);
-
- }
- if (page) {
- /* One for when we alloced the page */
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ /* One for the page private */
page_cache_release(page);
}
+
+ if (mapped)
+ spin_unlock(&page->mapping->private_lock);
+
+ /* One for when we alloced the page */
+ page_cache_release(page);
} while (index != 0);
}
@@ -4768,6 +4772,25 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
start >> PAGE_CACHE_SHIFT);
if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
+ /*
+ * Lock our eb's refs_lock to avoid races with
+ * free_extent_buffer. When we get our eb it might be flagged
+ * with EXTENT_BUFFER_STALE and another task running
+ * free_extent_buffer might have seen that flag set,
+ * eb->refs == 2, that the buffer isn't under IO (dirty and
+ * writeback flags not set) and it's still in the tree (flag
+ * EXTENT_BUFFER_TREE_REF set), therefore being in the process
+ * of decrementing the extent buffer's reference count twice.
+ * So here we could race and increment the eb's reference count,
+ * clear its stale flag, mark it as dirty and drop our reference
+ * before the other task finishes executing free_extent_buffer,
+ * which would later result in an attempt to free an extent
+ * buffer that is dirty.
+ */
+ if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
+ spin_lock(&eb->refs_lock);
+ spin_unlock(&eb->refs_lock);
+ }
mark_extent_buffer_accessed(eb, NULL);
return eb;
}
@@ -4867,6 +4890,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
mark_extent_buffer_accessed(exists, p);
goto free_eb;
}
+ exists = NULL;
/*
* Do this so attach doesn't complain and we need to
@@ -4930,12 +4954,12 @@ again:
return eb;
free_eb:
+ WARN_ON(!atomic_dec_and_test(&eb->refs));
for (i = 0; i < num_pages; i++) {
if (eb->pages[i])
unlock_page(eb->pages[i]);
}
- WARN_ON(!atomic_dec_and_test(&eb->refs));
btrfs_release_extent_buffer(eb);
return exists;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 695b0ccfb755..c668f36898d3 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -97,7 +97,7 @@ struct extent_io_tree {
u64 dirty_bytes;
int track_uptodate;
spinlock_t lock;
- struct extent_io_ops *ops;
+ const struct extent_io_ops *ops;
};
struct extent_state {
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 84a2d1868271..58ece6558430 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -185,8 +185,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
if (!dst) {
if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
- btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
- GFP_NOFS);
+ btrfs_bio->csum_allocated = kmalloc_array(nblocks,
+ csum_size, GFP_NOFS);
if (!btrfs_bio->csum_allocated) {
btrfs_free_path(path);
return -ENOMEM;
@@ -553,7 +553,7 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
btrfs_truncate_item(root, path, new_size, 0);
key->offset = end_byte;
- btrfs_set_item_key_safe(root, path, key);
+ btrfs_set_item_key_safe(root->fs_info, path, key);
} else {
BUG();
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 30982bbd31c3..b072e17479aa 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -24,7 +24,6 @@
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
-#include <linux/aio.h>
#include <linux/falloc.h>
#include <linux/swap.h>
#include <linux/writeback.h>
@@ -32,6 +31,7 @@
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/btrfs.h>
+#include <linux/uio.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -273,11 +273,7 @@ void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
defrag = rb_entry(node, struct inode_defrag, rb_node);
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- if (need_resched()) {
- spin_unlock(&fs_info->defrag_inodes_lock);
- cond_resched();
- spin_lock(&fs_info->defrag_inodes_lock);
- }
+ cond_resched_lock(&fs_info->defrag_inodes_lock);
node = rb_first(&fs_info->defrag_inodes);
}
@@ -868,7 +864,7 @@ next_slot:
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = end;
- btrfs_set_item_key_safe(root, path, &new_key);
+ btrfs_set_item_key_safe(root->fs_info, path, &new_key);
extent_offset += end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
@@ -1126,7 +1122,7 @@ again:
ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
- btrfs_set_item_key_safe(root, path, &new_key);
+ btrfs_set_item_key_safe(root->fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi,
@@ -1160,7 +1156,7 @@ again:
trans->transid);
path->slots[0]++;
new_key.offset = start;
- btrfs_set_item_key_safe(root, path, &new_key);
+ btrfs_set_item_key_safe(root->fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -1485,7 +1481,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
PAGE_CACHE_SIZE / (sizeof(struct page *)));
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
nrptrs = max(nrptrs, 8);
- pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
+ pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -1514,7 +1510,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
}
reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
- ret = btrfs_check_data_free_space(inode, reserve_bytes);
+ ret = btrfs_check_data_free_space(inode, reserve_bytes, write_bytes);
if (ret == -ENOSPC &&
(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC))) {
@@ -1635,8 +1631,8 @@ again:
btrfs_end_write_no_snapshoting(root);
if (only_release_metadata && copied > 0) {
- u64 lockstart = round_down(pos, root->sectorsize);
- u64 lockend = lockstart +
+ lockstart = round_down(pos, root->sectorsize);
+ lockend = lockstart +
(dirty_pages << PAGE_CACHE_SHIFT) - 1;
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
@@ -1739,27 +1735,19 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
u64 start_pos;
u64 end_pos;
ssize_t num_written = 0;
- ssize_t err = 0;
- size_t count = iov_iter_count(from);
bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
- loff_t pos = iocb->ki_pos;
+ ssize_t err;
+ loff_t pos;
+ size_t count;
mutex_lock(&inode->i_mutex);
-
- current->backing_dev_info = inode_to_bdi(inode);
- err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
- if (err) {
+ err = generic_write_checks(iocb, from);
+ if (err <= 0) {
mutex_unlock(&inode->i_mutex);
- goto out;
- }
-
- if (count == 0) {
- mutex_unlock(&inode->i_mutex);
- goto out;
+ return err;
}
- iov_iter_truncate(from, count);
-
+ current->backing_dev_info = inode_to_bdi(inode);
err = file_remove_suid(file);
if (err) {
mutex_unlock(&inode->i_mutex);
@@ -1786,6 +1774,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
*/
update_time_for_write(inode);
+ pos = iocb->ki_pos;
+ count = iov_iter_count(from);
start_pos = round_down(pos, root->sectorsize);
if (start_pos > i_size_read(inode)) {
/* Expand hole size to cover write data, preventing empty gap */
@@ -1800,7 +1790,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
if (sync)
atomic_inc(&BTRFS_I(inode)->sync_writers);
- if (file->f_flags & O_DIRECT) {
+ if (iocb->ki_flags & IOCB_DIRECT) {
num_written = __btrfs_direct_write(iocb, from, pos);
} else {
num_written = __btrfs_buffered_write(file, from, pos);
@@ -1815,7 +1805,9 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
* otherwise subsequent syncs to a file that's been synced in this
* transaction will appear to have already occured.
*/
+ spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->last_sub_trans = root->log_transid;
+ spin_unlock(&BTRFS_I(inode)->lock);
if (num_written > 0) {
err = generic_write_sync(file, pos, num_written);
if (err < 0)
@@ -1870,7 +1862,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
@@ -2168,7 +2160,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
u64 num_bytes;
key.offset = offset;
- btrfs_set_item_key_safe(root, path, &key);
+ btrfs_set_item_key_safe(root->fs_info, path, &key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@@ -2551,7 +2543,6 @@ static long btrfs_fallocate(struct file *file, int mode,
{
struct inode *inode = file_inode(file);
struct extent_state *cached_state = NULL;
- struct btrfs_root *root = BTRFS_I(inode)->root;
u64 cur_offset;
u64 last_byte;
u64 alloc_start;
@@ -2576,14 +2567,9 @@ static long btrfs_fallocate(struct file *file, int mode,
* Make sure we have enough space before we do the
* allocation.
*/
- ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
+ ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start, alloc_end - alloc_start);
if (ret)
return ret;
- if (root->fs_info->quota_enabled) {
- ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
- if (ret)
- goto out_reserve_fail;
- }
mutex_lock(&inode->i_mutex);
ret = inode_newsize_ok(inode, alloc_end);
@@ -2673,23 +2659,35 @@ static long btrfs_fallocate(struct file *file, int mode,
1 << inode->i_blkbits,
offset + len,
&alloc_hint);
-
- if (ret < 0) {
- free_extent_map(em);
- break;
- }
} else if (actual_end > inode->i_size &&
!(mode & FALLOC_FL_KEEP_SIZE)) {
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
/*
* We didn't need to allocate any more space, but we
* still extended the size of the file so we need to
- * update i_size.
+ * update i_size and the inode item.
*/
- inode->i_ctime = CURRENT_TIME;
- i_size_write(inode, actual_end);
- btrfs_ordered_update_i_size(inode, actual_end, NULL);
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ } else {
+ inode->i_ctime = CURRENT_TIME;
+ i_size_write(inode, actual_end);
+ btrfs_ordered_update_i_size(inode, actual_end,
+ NULL);
+ ret = btrfs_update_inode(trans, root, inode);
+ if (ret)
+ btrfs_end_transaction(trans, root);
+ else
+ ret = btrfs_end_transaction(trans,
+ root);
+ }
}
free_extent_map(em);
+ if (ret < 0)
+ break;
cur_offset = last_byte;
if (cur_offset >= alloc_end) {
@@ -2701,9 +2699,6 @@ static long btrfs_fallocate(struct file *file, int mode,
&cached_state, GFP_NOFS);
out:
mutex_unlock(&inode->i_mutex);
- if (root->fs_info->quota_enabled)
- btrfs_qgroup_free(root, alloc_end - alloc_start);
-out_reserve_fail:
/* Let go of our reservation. */
btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
return ret;
@@ -2806,8 +2801,6 @@ out:
const struct file_operations btrfs_file_operations = {
.llseek = btrfs_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.splice_read = generic_file_splice_read,
.write_iter = btrfs_file_write_iter,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index a71978578fa7..9dbe5b548fa6 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -85,7 +85,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
}
mapping_set_gfp_mask(inode->i_mapping,
- mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+ mapping_gfp_mask(inode->i_mapping) &
+ ~(__GFP_FS | __GFP_HIGHMEM));
return inode;
}
@@ -170,13 +171,13 @@ static int __create_free_space_inode(struct btrfs_root *root,
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
key.offset = offset;
key.type = 0;
-
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(struct btrfs_free_space_header));
if (ret < 0) {
btrfs_release_path(path);
return ret;
}
+
leaf = path->nodes[0];
header = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_free_space_header);
@@ -225,9 +226,37 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
struct inode *inode)
{
int ret = 0;
+ struct btrfs_path *path = btrfs_alloc_path();
+
+ if (!path) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (block_group) {
+ mutex_lock(&trans->transaction->cache_write_mutex);
+ if (!list_empty(&block_group->io_list)) {
+ list_del_init(&block_group->io_list);
+
+ btrfs_wait_cache_io(root, trans, block_group,
+ &block_group->io_ctl, path,
+ block_group->key.objectid);
+ btrfs_put_block_group(block_group);
+ }
+
+ /*
+ * now that we've truncated the cache away, its no longer
+ * setup or written
+ */
+ spin_lock(&block_group->lock);
+ block_group->disk_cache_state = BTRFS_DC_CLEAR;
+ spin_unlock(&block_group->lock);
+ }
+ btrfs_free_path(path);
btrfs_i_size_write(inode, 0);
truncate_pagecache(inode, 0);
@@ -235,15 +264,23 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
/*
* We don't need an orphan item because truncating the free space cache
* will never be split across transactions.
+ * We don't need to check for -EAGAIN because we're a free space
+ * cache inode
*/
ret = btrfs_truncate_inode_items(trans, root, inode,
0, BTRFS_EXTENT_DATA_KEY);
if (ret) {
+ mutex_unlock(&trans->transaction->cache_write_mutex);
btrfs_abort_transaction(trans, root, ret);
return ret;
}
ret = btrfs_update_inode(trans, root, inode);
+
+ if (block_group)
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+
+fail:
if (ret)
btrfs_abort_transaction(trans, root, ret);
@@ -269,18 +306,7 @@ static int readahead_cache(struct inode *inode)
return 0;
}
-struct io_ctl {
- void *cur, *orig;
- struct page *page;
- struct page **pages;
- struct btrfs_root *root;
- unsigned long size;
- int index;
- int num_pages;
- unsigned check_crcs:1;
-};
-
-static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
+static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
struct btrfs_root *root, int write)
{
int num_pages;
@@ -296,45 +322,46 @@ static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
(num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
return -ENOSPC;
- memset(io_ctl, 0, sizeof(struct io_ctl));
+ memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
- io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
+ io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
if (!io_ctl->pages)
return -ENOMEM;
io_ctl->num_pages = num_pages;
io_ctl->root = root;
io_ctl->check_crcs = check_crcs;
+ io_ctl->inode = inode;
return 0;
}
-static void io_ctl_free(struct io_ctl *io_ctl)
+static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
{
kfree(io_ctl->pages);
+ io_ctl->pages = NULL;
}
-static void io_ctl_unmap_page(struct io_ctl *io_ctl)
+static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
{
if (io_ctl->cur) {
- kunmap(io_ctl->page);
io_ctl->cur = NULL;
io_ctl->orig = NULL;
}
}
-static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
+static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
{
ASSERT(io_ctl->index < io_ctl->num_pages);
io_ctl->page = io_ctl->pages[io_ctl->index++];
- io_ctl->cur = kmap(io_ctl->page);
+ io_ctl->cur = page_address(io_ctl->page);
io_ctl->orig = io_ctl->cur;
io_ctl->size = PAGE_CACHE_SIZE;
if (clear)
memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
}
-static void io_ctl_drop_pages(struct io_ctl *io_ctl)
+static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
{
int i;
@@ -349,7 +376,7 @@ static void io_ctl_drop_pages(struct io_ctl *io_ctl)
}
}
-static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
+static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode,
int uptodate)
{
struct page *page;
@@ -383,7 +410,7 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
return 0;
}
-static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
+static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
{
__le64 *val;
@@ -406,7 +433,7 @@ static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
io_ctl->cur += sizeof(u64);
}
-static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
+static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
{
__le64 *gen;
@@ -435,7 +462,7 @@ static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
return 0;
}
-static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
+static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
{
u32 *tmp;
u32 crc = ~(u32)0;
@@ -453,13 +480,12 @@ static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
PAGE_CACHE_SIZE - offset);
btrfs_csum_final(crc, (char *)&crc);
io_ctl_unmap_page(io_ctl);
- tmp = kmap(io_ctl->pages[0]);
+ tmp = page_address(io_ctl->pages[0]);
tmp += index;
*tmp = crc;
- kunmap(io_ctl->pages[0]);
}
-static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
+static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
{
u32 *tmp, val;
u32 crc = ~(u32)0;
@@ -473,10 +499,9 @@ static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
if (index == 0)
offset = sizeof(u32) * io_ctl->num_pages;
- tmp = kmap(io_ctl->pages[0]);
+ tmp = page_address(io_ctl->pages[0]);
tmp += index;
val = *tmp;
- kunmap(io_ctl->pages[0]);
io_ctl_map_page(io_ctl, 0);
crc = btrfs_csum_data(io_ctl->orig + offset, crc,
@@ -492,7 +517,7 @@ static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
return 0;
}
-static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
+static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
void *bitmap)
{
struct btrfs_free_space_entry *entry;
@@ -522,7 +547,7 @@ static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
return 0;
}
-static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
+static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
{
if (!io_ctl->cur)
return -ENOSPC;
@@ -545,7 +570,7 @@ static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
return 0;
}
-static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
+static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
{
/*
* If we're not on the boundary we know we've modified the page and we
@@ -562,7 +587,7 @@ static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
}
}
-static int io_ctl_read_entry(struct io_ctl *io_ctl,
+static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
struct btrfs_free_space *entry, u8 *type)
{
struct btrfs_free_space_entry *e;
@@ -589,7 +614,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
return 0;
}
-static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
+static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
struct btrfs_free_space *entry)
{
int ret;
@@ -648,7 +673,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
{
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
- struct io_ctl io_ctl;
+ struct btrfs_io_ctl io_ctl;
struct btrfs_key key;
struct btrfs_free_space *e, *n;
LIST_HEAD(bitmaps);
@@ -877,7 +902,7 @@ out:
}
static noinline_for_stack
-int write_cache_extent_entries(struct io_ctl *io_ctl,
+int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
struct btrfs_free_space_ctl *ctl,
struct btrfs_block_group_cache *block_group,
int *entries, int *bitmaps,
@@ -885,6 +910,7 @@ int write_cache_extent_entries(struct io_ctl *io_ctl,
{
int ret;
struct btrfs_free_cluster *cluster = NULL;
+ struct btrfs_free_cluster *cluster_locked = NULL;
struct rb_node *node = rb_first(&ctl->free_space_offset);
struct btrfs_trim_range *trim_entry;
@@ -896,6 +922,8 @@ int write_cache_extent_entries(struct io_ctl *io_ctl,
}
if (!node && cluster) {
+ cluster_locked = cluster;
+ spin_lock(&cluster_locked->lock);
node = rb_first(&cluster->root);
cluster = NULL;
}
@@ -919,9 +947,15 @@ int write_cache_extent_entries(struct io_ctl *io_ctl,
node = rb_next(node);
if (!node && cluster) {
node = rb_first(&cluster->root);
+ cluster_locked = cluster;
+ spin_lock(&cluster_locked->lock);
cluster = NULL;
}
}
+ if (cluster_locked) {
+ spin_unlock(&cluster_locked->lock);
+ cluster_locked = NULL;
+ }
/*
* Make sure we don't miss any range that was removed from our rbtree
@@ -939,6 +973,8 @@ int write_cache_extent_entries(struct io_ctl *io_ctl,
return 0;
fail:
+ if (cluster_locked)
+ spin_unlock(&cluster_locked->lock);
return -ENOSPC;
}
@@ -1000,7 +1036,7 @@ fail:
static noinline_for_stack int
write_pinned_extent_entries(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group,
- struct io_ctl *io_ctl,
+ struct btrfs_io_ctl *io_ctl,
int *entries)
{
u64 start, extent_start, extent_end, len;
@@ -1050,7 +1086,7 @@ write_pinned_extent_entries(struct btrfs_root *root,
}
static noinline_for_stack int
-write_bitmap_entries(struct io_ctl *io_ctl, struct list_head *bitmap_list)
+write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
{
struct list_head *pos, *n;
int ret;
@@ -1083,10 +1119,7 @@ static int flush_dirty_cache(struct inode *inode)
}
static void noinline_for_stack
-cleanup_write_cache_enospc(struct inode *inode,
- struct io_ctl *io_ctl,
- struct extent_state **cached_state,
- struct list_head *bitmap_list)
+cleanup_bitmap_list(struct list_head *bitmap_list)
{
struct list_head *pos, *n;
@@ -1095,12 +1128,85 @@ cleanup_write_cache_enospc(struct inode *inode,
list_entry(pos, struct btrfs_free_space, list);
list_del_init(&entry->list);
}
+}
+
+static void noinline_for_stack
+cleanup_write_cache_enospc(struct inode *inode,
+ struct btrfs_io_ctl *io_ctl,
+ struct extent_state **cached_state,
+ struct list_head *bitmap_list)
+{
io_ctl_drop_pages(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
i_size_read(inode) - 1, cached_state,
GFP_NOFS);
}
+int btrfs_wait_cache_io(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_io_ctl *io_ctl,
+ struct btrfs_path *path, u64 offset)
+{
+ int ret;
+ struct inode *inode = io_ctl->inode;
+
+ if (!inode)
+ return 0;
+
+ if (block_group)
+ root = root->fs_info->tree_root;
+
+ /* Flush the dirty pages in the cache file. */
+ ret = flush_dirty_cache(inode);
+ if (ret)
+ goto out;
+
+ /* Update the cache item to tell everyone this cache file is valid. */
+ ret = update_cache_item(trans, root, inode, path, offset,
+ io_ctl->entries, io_ctl->bitmaps);
+out:
+ io_ctl_free(io_ctl);
+ if (ret) {
+ invalidate_inode_pages2(inode->i_mapping);
+ BTRFS_I(inode)->generation = 0;
+ if (block_group) {
+#ifdef DEBUG
+ btrfs_err(root->fs_info,
+ "failed to write free space cache for block group %llu",
+ block_group->key.objectid);
+#endif
+ }
+ }
+ btrfs_update_inode(trans, root, inode);
+
+ if (block_group) {
+ /* the dirty list is protected by the dirty_bgs_lock */
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+
+ /* the disk_cache_state is protected by the block group lock */
+ spin_lock(&block_group->lock);
+
+ /*
+ * only mark this as written if we didn't get put back on
+ * the dirty list while waiting for IO. Otherwise our
+ * cache state won't be right, and we won't get written again
+ */
+ if (!ret && list_empty(&block_group->dirty_list))
+ block_group->disk_cache_state = BTRFS_DC_WRITTEN;
+ else if (ret)
+ block_group->disk_cache_state = BTRFS_DC_ERROR;
+
+ spin_unlock(&block_group->lock);
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
+ io_ctl->inode = NULL;
+ iput(inode);
+ }
+
+ return ret;
+
+}
+
/**
* __btrfs_write_out_cache - write out cached info to an inode
* @root - the root the inode belongs to
@@ -1112,27 +1218,29 @@ cleanup_write_cache_enospc(struct inode *inode,
*
* This function writes out a free space cache struct to disk for quick recovery
* on mount. This will return 0 if it was successfull in writing the cache out,
- * and -1 if it was not.
+ * or an errno if it was not.
*/
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl,
struct btrfs_block_group_cache *block_group,
+ struct btrfs_io_ctl *io_ctl,
struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 offset)
{
struct extent_state *cached_state = NULL;
- struct io_ctl io_ctl;
LIST_HEAD(bitmap_list);
int entries = 0;
int bitmaps = 0;
int ret;
+ int must_iput = 0;
if (!i_size_read(inode))
- return -1;
+ return -EIO;
- ret = io_ctl_init(&io_ctl, inode, root, 1);
+ WARN_ON(io_ctl->pages);
+ ret = io_ctl_init(io_ctl, inode, root, 1);
if (ret)
- return -1;
+ return ret;
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
down_write(&block_group->data_rwsem);
@@ -1143,55 +1251,59 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
up_write(&block_group->data_rwsem);
BTRFS_I(inode)->generation = 0;
ret = 0;
+ must_iput = 1;
goto out;
}
spin_unlock(&block_group->lock);
}
/* Lock all pages first so we can lock the extent safely. */
- io_ctl_prepare_pages(&io_ctl, inode, 0);
+ ret = io_ctl_prepare_pages(io_ctl, inode, 0);
+ if (ret)
+ goto out;
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
0, &cached_state);
- io_ctl_set_generation(&io_ctl, trans->transid);
+ io_ctl_set_generation(io_ctl, trans->transid);
mutex_lock(&ctl->cache_writeout_mutex);
/* Write out the extent entries in the free space cache */
- ret = write_cache_extent_entries(&io_ctl, ctl,
+ spin_lock(&ctl->tree_lock);
+ ret = write_cache_extent_entries(io_ctl, ctl,
block_group, &entries, &bitmaps,
&bitmap_list);
- if (ret) {
- mutex_unlock(&ctl->cache_writeout_mutex);
- goto out_nospc;
- }
+ if (ret)
+ goto out_nospc_locked;
/*
* Some spaces that are freed in the current transaction are pinned,
* they will be added into free space cache after the transaction is
* committed, we shouldn't lose them.
+ *
+ * If this changes while we are working we'll get added back to
+ * the dirty list and redo it. No locking needed
*/
- ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
- if (ret) {
- mutex_unlock(&ctl->cache_writeout_mutex);
- goto out_nospc;
- }
+ ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
+ if (ret)
+ goto out_nospc_locked;
/*
* At last, we write out all the bitmaps and keep cache_writeout_mutex
* locked while doing it because a concurrent trim can be manipulating
* or freeing the bitmap.
*/
- ret = write_bitmap_entries(&io_ctl, &bitmap_list);
+ ret = write_bitmap_entries(io_ctl, &bitmap_list);
+ spin_unlock(&ctl->tree_lock);
mutex_unlock(&ctl->cache_writeout_mutex);
if (ret)
goto out_nospc;
/* Zero out the rest of the pages just to make sure */
- io_ctl_zero_remaining_pages(&io_ctl);
+ io_ctl_zero_remaining_pages(io_ctl);
/* Everything is written out, now we dirty the pages in the file. */
- ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
+ ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
0, i_size_read(inode), &cached_state);
if (ret)
goto out_nospc;
@@ -1202,30 +1314,44 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
* Release the pages and unlock the extent, we will flush
* them out later
*/
- io_ctl_drop_pages(&io_ctl);
+ io_ctl_drop_pages(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
i_size_read(inode) - 1, &cached_state, GFP_NOFS);
- /* Flush the dirty pages in the cache file. */
- ret = flush_dirty_cache(inode);
+ /*
+ * at this point the pages are under IO and we're happy,
+ * The caller is responsible for waiting on them and updating the
+ * the cache and the inode
+ */
+ io_ctl->entries = entries;
+ io_ctl->bitmaps = bitmaps;
+
+ ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
if (ret)
goto out;
- /* Update the cache item to tell everyone this cache file is valid. */
- ret = update_cache_item(trans, root, inode, path, offset,
- entries, bitmaps);
+ return 0;
+
out:
- io_ctl_free(&io_ctl);
+ io_ctl->inode = NULL;
+ io_ctl_free(io_ctl);
if (ret) {
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
}
btrfs_update_inode(trans, root, inode);
+ if (must_iput)
+ iput(inode);
return ret;
+out_nospc_locked:
+ cleanup_bitmap_list(&bitmap_list);
+ spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
+
out_nospc:
- cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list);
+ cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
up_write(&block_group->data_rwsem);
@@ -1241,7 +1367,6 @@ int btrfs_write_out_cache(struct btrfs_root *root,
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct inode *inode;
int ret = 0;
- enum btrfs_disk_cache_state dcs = BTRFS_DC_WRITTEN;
root = root->fs_info->tree_root;
@@ -1250,34 +1375,34 @@ int btrfs_write_out_cache(struct btrfs_root *root,
spin_unlock(&block_group->lock);
return 0;
}
-
- if (block_group->delalloc_bytes) {
- block_group->disk_cache_state = BTRFS_DC_WRITTEN;
- spin_unlock(&block_group->lock);
- return 0;
- }
spin_unlock(&block_group->lock);
inode = lookup_free_space_inode(root, block_group, path);
if (IS_ERR(inode))
return 0;
- ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
+ ret = __btrfs_write_out_cache(root, inode, ctl, block_group,
+ &block_group->io_ctl, trans,
path, block_group->key.objectid);
if (ret) {
- dcs = BTRFS_DC_ERROR;
- ret = 0;
#ifdef DEBUG
btrfs_err(root->fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
#endif
+ spin_lock(&block_group->lock);
+ block_group->disk_cache_state = BTRFS_DC_ERROR;
+ spin_unlock(&block_group->lock);
+
+ block_group->io_ctl.inode = NULL;
+ iput(inode);
}
- spin_lock(&block_group->lock);
- block_group->disk_cache_state = dcs;
- spin_unlock(&block_group->lock);
- iput(inode);
+ /*
+ * if ret == 0 the caller is expected to call btrfs_wait_cache_io
+ * to wait for IO and put the inode
+ */
+
return ret;
}
@@ -1298,11 +1423,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
u64 offset)
{
u64 bitmap_start;
- u64 bytes_per_bitmap;
+ u32 bytes_per_bitmap;
bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
bitmap_start = offset - ctl->start;
- bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
+ bitmap_start = div_u64(bitmap_start, bytes_per_bitmap);
bitmap_start *= bytes_per_bitmap;
bitmap_start += ctl->start;
@@ -1521,10 +1646,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
- u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
- int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
+ u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
+ u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg);
- max_bitmaps = max(max_bitmaps, 1);
+ max_bitmaps = max_t(u32, max_bitmaps, 1);
ASSERT(ctl->total_bitmaps <= max_bitmaps);
@@ -1537,7 +1662,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
max_bytes = MAX_CACHE_BYTES_PER_GIG;
else
max_bytes = MAX_CACHE_BYTES_PER_GIG *
- div64_u64(size, 1024 * 1024 * 1024);
+ div_u64(size, 1024 * 1024 * 1024);
/*
* we want to account for 1 more bitmap than what we have so we can make
@@ -1552,14 +1677,14 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
}
/*
- * we want the extent entry threshold to always be at most 1/2 the maxw
+ * we want the extent entry threshold to always be at most 1/2 the max
* bytes we can have, or whatever is less than that.
*/
extent_bytes = max_bytes - bitmap_bytes;
- extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
+ extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
ctl->extents_thresh =
- div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
+ div_u64(extent_bytes, sizeof(struct btrfs_free_space));
}
static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
@@ -1673,7 +1798,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
*/
if (*bytes >= align) {
tmp = entry->offset - ctl->start + align - 1;
- do_div(tmp, align);
+ tmp = div64_u64(tmp, align);
tmp = tmp * align + ctl->start;
align_off = tmp - entry->offset;
} else {
@@ -2402,11 +2527,8 @@ static void __btrfs_remove_free_space_cache_locked(
} else {
free_bitmap(ctl, info);
}
- if (need_resched()) {
- spin_unlock(&ctl->tree_lock);
- cond_resched();
- spin_lock(&ctl->tree_lock);
- }
+
+ cond_resched_lock(&ctl->tree_lock);
}
}
@@ -2431,11 +2553,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
WARN_ON(cluster->block_group != block_group);
__btrfs_return_cluster_to_free_space(block_group, cluster);
- if (need_resched()) {
- spin_unlock(&ctl->tree_lock);
- cond_resched();
- spin_lock(&ctl->tree_lock);
- }
+
+ cond_resched_lock(&ctl->tree_lock);
}
__btrfs_remove_free_space_cache_locked(ctl);
spin_unlock(&ctl->tree_lock);
@@ -3346,13 +3465,29 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
{
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
int ret;
+ struct btrfs_io_ctl io_ctl;
+ bool release_metadata = true;
if (!btrfs_test_opt(root, INODE_MAP_CACHE))
return 0;
- ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
+ memset(&io_ctl, 0, sizeof(io_ctl));
+ ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
+ trans, path, 0);
+ if (!ret) {
+ /*
+ * At this point writepages() didn't error out, so our metadata
+ * reservation is released when the writeback finishes, at
+ * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
+ * with or without an error.
+ */
+ release_metadata = false;
+ ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
+ }
+
if (ret) {
- btrfs_delalloc_release_metadata(inode, inode->i_size);
+ if (release_metadata)
+ btrfs_delalloc_release_metadata(inode, inode->i_size);
#ifdef DEBUG
btrfs_err(root->fs_info,
"failed to write free ino cache for root %llu",
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 88b2238a0aed..a16a029ad3b1 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -48,6 +48,8 @@ struct btrfs_free_space_op {
struct btrfs_free_space *info);
};
+struct btrfs_io_ctl;
+
struct inode *lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_block_group_cache
*block_group, struct btrfs_path *path);
@@ -60,14 +62,19 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
struct inode *inode);
int load_free_space_cache(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group);
+int btrfs_wait_cache_io(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_io_ctl *io_ctl,
+ struct btrfs_path *path, u64 offset);
int btrfs_write_out_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
-
struct inode *lookup_free_ino_inode(struct btrfs_root *root,
struct btrfs_path *path);
int create_free_ino_inode(struct btrfs_root *root,
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 74faea3a516e..f6a596d5a637 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -456,7 +456,7 @@ again:
}
if (i_size_read(inode) > 0) {
- ret = btrfs_truncate_free_space_cache(root, trans, inode);
+ ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
if (ret) {
if (ret != -ENOSPC)
btrfs_abort_transaction(trans, root, ret);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d2e732d7af52..8bb013672aee 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -32,7 +32,6 @@
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
-#include <linux/aio.h>
#include <linux/bit_spinlock.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
@@ -43,6 +42,7 @@
#include <linux/btrfs.h>
#include <linux/blkdev.h>
#include <linux/posix_acl_xattr.h>
+#include <linux/uio.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -59,6 +59,7 @@
#include "backref.h"
#include "hash.h"
#include "props.h"
+#include "qgroup.h"
struct btrfs_iget_args {
struct btrfs_key *location;
@@ -470,7 +471,7 @@ again:
*/
if (inode_need_compress(inode)) {
WARN_ON(pages);
- pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {
/* just bail out to the uncompressed code */
goto cont;
@@ -752,7 +753,6 @@ retry:
}
goto out_free;
}
-
/*
* here we're doing allocation and writeback of the
* compressed pages
@@ -3110,6 +3110,8 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
if (empty)
return;
+ down_read(&fs_info->delayed_iput_sem);
+
spin_lock(&fs_info->delayed_iput_lock);
list_splice_init(&fs_info->delayed_iputs, &list);
spin_unlock(&fs_info->delayed_iput_lock);
@@ -3120,6 +3122,8 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
iput(delayed->inode);
kfree(delayed);
}
+
+ up_read(&root->fs_info->delayed_iput_sem);
}
/*
@@ -3628,25 +3632,28 @@ static void btrfs_read_locked_inode(struct inode *inode)
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
+ inode->i_version = btrfs_inode_sequence(leaf, inode_item);
+ inode->i_generation = BTRFS_I(inode)->generation;
+ inode->i_rdev = 0;
+ rdev = btrfs_inode_rdev(leaf, inode_item);
+
+ BTRFS_I(inode)->index_cnt = (u64)-1;
+ BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
+
+cache_index:
/*
* If we were modified in the current generation and evicted from memory
* and then re-read we need to do a full sync since we don't have any
* idea about which extents were modified before we were evicted from
* cache.
+ *
+ * This is required for both inode re-read from disk and delayed inode
+ * in delayed_nodes_tree.
*/
if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
- inode->i_version = btrfs_inode_sequence(leaf, inode_item);
- inode->i_generation = BTRFS_I(inode)->generation;
- inode->i_rdev = 0;
- rdev = btrfs_inode_rdev(leaf, inode_item);
-
- BTRFS_I(inode)->index_cnt = (u64)-1;
- BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
-
-cache_index:
path->slots[0]++;
if (inode->i_nlink != 1 ||
path->slots[0] >= btrfs_header_nritems(leaf))
@@ -4016,16 +4023,16 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int ret;
trans = __unlink_start_trans(dir);
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
+ btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0);
- ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
+ ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
dentry->d_name.name, dentry->d_name.len);
if (ret)
goto out;
@@ -4124,7 +4131,7 @@ out:
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int err = 0;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
@@ -4151,7 +4158,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
goto out;
/* now the directory is empty */
- err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
+ err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
dentry->d_name.name, dentry->d_name.len);
if (!err)
btrfs_i_size_write(inode, 0);
@@ -4162,6 +4169,21 @@ out:
return err;
}
+static int truncate_space_check(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytes_deleted)
+{
+ int ret;
+
+ bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
+ ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
+ bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
+ if (!ret)
+ trans->bytes_reserved += bytes_deleted;
+ return ret;
+
+}
+
/*
* this can truncate away extent items, csum items and directory items.
* It starts at a high offset and removes keys until it can't find
@@ -4197,9 +4219,21 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
int ret;
int err = 0;
u64 ino = btrfs_ino(inode);
+ u64 bytes_deleted = 0;
+ bool be_nice = 0;
+ bool should_throttle = 0;
+ bool should_end = 0;
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
+ /*
+ * for non-free space inodes and ref cows, we want to back off from
+ * time to time
+ */
+ if (!btrfs_is_free_space_inode(inode) &&
+ test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+ be_nice = 1;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -4229,6 +4263,19 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
key.type = (u8)-1;
search_again:
+ /*
+ * with a 16K leaf size and 128MB extents, you can actually queue
+ * up a huge file in a single leaf. Most of the time that
+ * bytes_deleted is > 0, it will be huge by the time we get here
+ */
+ if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
+ if (btrfs_should_end_transaction(trans, root)) {
+ err = -EAGAIN;
+ goto error;
+ }
+ }
+
+
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
@@ -4371,22 +4418,39 @@ delete:
} else {
break;
}
+ should_throttle = 0;
+
if (found_extent &&
(test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root == root->fs_info->tree_root)) {
btrfs_set_path_blocking(path);
+ bytes_deleted += extent_num_bytes;
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset, 0);
BUG_ON(ret);
+ if (btrfs_should_throttle_delayed_refs(trans, root))
+ btrfs_async_run_delayed_refs(root,
+ trans->delayed_ref_updates * 2, 0);
+ if (be_nice) {
+ if (truncate_space_check(trans, root,
+ extent_num_bytes)) {
+ should_end = 1;
+ }
+ if (btrfs_should_throttle_delayed_refs(trans,
+ root)) {
+ should_throttle = 1;
+ }
+ }
}
if (found_type == BTRFS_INODE_ITEM_KEY)
break;
if (path->slots[0] == 0 ||
- path->slots[0] != pending_del_slot) {
+ path->slots[0] != pending_del_slot ||
+ should_throttle || should_end) {
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
@@ -4399,6 +4463,23 @@ delete:
pending_del_nr = 0;
}
btrfs_release_path(path);
+ if (should_throttle) {
+ unsigned long updates = trans->delayed_ref_updates;
+ if (updates) {
+ trans->delayed_ref_updates = 0;
+ ret = btrfs_run_delayed_refs(trans, root, updates * 2);
+ if (ret && !err)
+ err = ret;
+ }
+ }
+ /*
+ * if we failed to refill our space rsv, bail out
+ * and let the transaction restart
+ */
+ if (should_end) {
+ err = -EAGAIN;
+ goto error;
+ }
goto search_again;
} else {
path->slots[0]--;
@@ -4415,7 +4496,18 @@ error:
if (last_size != (u64)-1 &&
root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
btrfs_ordered_update_i_size(inode, last_size, NULL);
+
btrfs_free_path(path);
+
+ if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
+ unsigned long updates = trans->delayed_ref_updates;
+ if (updates) {
+ trans->delayed_ref_updates = 0;
+ ret = btrfs_run_delayed_refs(trans, root, updates * 2);
+ if (ret && !err)
+ err = ret;
+ }
+ }
return err;
}
@@ -4826,7 +4918,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
int err;
@@ -4924,6 +5016,7 @@ void btrfs_evict_inode(struct inode *inode)
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv, *global_rsv;
+ int steal_from_global = 0;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
int ret;
@@ -4991,9 +5084,20 @@ void btrfs_evict_inode(struct inode *inode)
* hard as possible to get this to work.
*/
if (ret)
- ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
+ steal_from_global++;
+ else
+ steal_from_global = 0;
+ ret = 0;
- if (ret) {
+ /*
+ * steal_from_global == 0: we reserved stuff, hooray!
+ * steal_from_global == 1: we didn't reserve stuff, boo!
+ * steal_from_global == 2: we've committed, still not a lot of
+ * room but maybe we'll have room in the global reserve this
+ * time.
+ * steal_from_global == 3: abandon all hope!
+ */
+ if (steal_from_global > 2) {
btrfs_warn(root->fs_info,
"Could not get space for a delete, will truncate on mount %d",
ret);
@@ -5009,10 +5113,40 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
+ /*
+ * We can't just steal from the global reserve, we need tomake
+ * sure there is room to do it, if not we need to commit and try
+ * again.
+ */
+ if (steal_from_global) {
+ if (!btrfs_check_space_for_delayed_refs(trans, root))
+ ret = btrfs_block_rsv_migrate(global_rsv, rsv,
+ min_size);
+ else
+ ret = -ENOSPC;
+ }
+
+ /*
+ * Couldn't steal from the global reserve, we have too much
+ * pending stuff built up, commit the transaction and try it
+ * again.
+ */
+ if (ret) {
+ ret = btrfs_commit_transaction(trans, root);
+ if (ret) {
+ btrfs_orphan_del(NULL, inode);
+ btrfs_free_block_rsv(root, rsv);
+ goto no_delete;
+ }
+ continue;
+ } else {
+ steal_from_global = 0;
+ }
+
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
- if (ret != -ENOSPC)
+ if (ret != -ENOSPC && ret != -EAGAIN)
break;
trans->block_rsv = &root->fs_info->trans_block_rsv;
@@ -5416,10 +5550,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
static int btrfs_dentry_delete(const struct dentry *dentry)
{
struct btrfs_root *root;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (!inode && !IS_ROOT(dentry))
- inode = dentry->d_parent->d_inode;
+ inode = d_inode(dentry->d_parent);
if (inode) {
root = BTRFS_I(inode)->root;
@@ -6226,7 +6360,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
u64 index;
int err;
int drop_inode = 0;
@@ -8081,7 +8215,7 @@ free_ordered:
bio_endio(dio_bio, ret);
}
-static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
+static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
const struct iov_iter *iter, loff_t offset)
{
int seg;
@@ -8096,7 +8230,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
goto out;
/* If this is a write we don't need to check anymore */
- if (rw & WRITE)
+ if (iov_iter_rw(iter) == WRITE)
return 0;
/*
* Check to make sure we don't have duplicate iov_base's in this
@@ -8114,8 +8248,8 @@ out:
return retval;
}
-static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -8126,10 +8260,10 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
bool relock = false;
ssize_t ret;
- if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
+ if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
return 0;
- atomic_inc(&inode->i_dio_count);
+ inode_dio_begin(inode);
smp_mb__after_atomic();
/*
@@ -8144,7 +8278,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
filemap_fdatawrite_range(inode->i_mapping, offset,
offset + count - 1);
- if (rw & WRITE) {
+ if (iov_iter_rw(iter) == WRITE) {
/*
* If the write DIO is beyond the EOF, we need update
* the isize, but it is protected by i_mutex. So we can
@@ -8169,16 +8303,16 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
current->journal_info = &outstanding_extents;
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags)) {
- inode_dio_done(inode);
+ inode_dio_end(inode);
flags = DIO_LOCKING | DIO_SKIP_HOLES;
wakeup = false;
}
- ret = __blockdev_direct_IO(rw, iocb, inode,
- BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
- iter, offset, btrfs_get_blocks_direct, NULL,
- btrfs_submit_direct, flags);
- if (rw & WRITE) {
+ ret = __blockdev_direct_IO(iocb, inode,
+ BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+ iter, offset, btrfs_get_blocks_direct, NULL,
+ btrfs_submit_direct, flags);
+ if (iov_iter_rw(iter) == WRITE) {
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED)
btrfs_delalloc_release_space(inode, count);
@@ -8188,7 +8322,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
}
out:
if (wakeup)
- inode_dio_done(inode);
+ inode_dio_end(inode);
if (relock)
mutex_lock(&inode->i_mutex);
@@ -8581,7 +8715,7 @@ static int btrfs_truncate(struct inode *inode)
ret = btrfs_truncate_inode_items(trans, root, inode,
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
- if (ret != -ENOSPC) {
+ if (ret != -ENOSPC && ret != -EAGAIN) {
err = ret;
break;
}
@@ -8875,7 +9009,7 @@ static int btrfs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
u64 delalloc_bytes;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
u32 blocksize = inode->i_sb->s_blocksize;
generic_fillattr(inode, stat);
@@ -8896,8 +9030,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
- struct inode *new_inode = new_dentry->d_inode;
- struct inode *old_inode = old_dentry->d_inode;
+ struct inode *new_inode = d_inode(new_dentry);
+ struct inode *old_inode = d_inode(old_dentry);
struct timespec ctime = CURRENT_TIME;
u64 index = 0;
u64 root_objectid;
@@ -9009,7 +9143,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dentry->d_name.len);
} else {
ret = __btrfs_unlink_inode(trans, root, old_dir,
- old_dentry->d_inode,
+ d_inode(old_dentry),
old_dentry->d_name.name,
old_dentry->d_name.len);
if (!ret)
@@ -9033,12 +9167,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, dest, new_dir,
- new_dentry->d_inode,
+ d_inode(new_dentry),
new_dentry->d_name.name,
new_dentry->d_name.len);
}
if (!ret && new_inode->i_nlink == 0)
- ret = btrfs_orphan_add(trans, new_dentry->d_inode);
+ ret = btrfs_orphan_add(trans, d_inode(new_dentry));
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
@@ -9451,6 +9585,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
btrfs_end_transaction(trans, root);
break;
}
+
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 74609b931ba5..1c22c6518504 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -456,6 +456,13 @@ static noinline int create_subvol(struct inode *dir,
if (ret)
return ret;
+ /*
+ * Don't create subvolume whose level is not zero. Or qgroup will be
+ * screwed up since it assume subvolme qgroup's level to be 0.
+ */
+ if (btrfs_qgroup_level(objectid))
+ return -ENOSPC;
+
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
/*
* The same as the snapshot creation, please see the comment
@@ -717,7 +724,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
if (ret)
goto fail;
- inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
+ inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto fail;
@@ -761,10 +768,10 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
{
int error;
- if (!victim->d_inode)
+ if (d_really_is_negative(victim))
return -ENOENT;
- BUG_ON(victim->d_parent->d_inode != dir);
+ BUG_ON(d_inode(victim->d_parent) != dir);
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
@@ -772,8 +779,8 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
return error;
if (IS_APPEND(dir))
return -EPERM;
- if (check_sticky(dir, victim->d_inode) || IS_APPEND(victim->d_inode) ||
- IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
+ if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
+ IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
return -EPERM;
if (isdir) {
if (!d_is_dir(victim))
@@ -792,7 +799,7 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
/* copy of may_create in fs/namei.c() */
static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
{
- if (child->d_inode)
+ if (d_really_is_positive(child))
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
@@ -810,7 +817,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
u64 *async_transid, bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
- struct inode *dir = parent->dentry->d_inode;
+ struct inode *dir = d_inode(parent->dentry);
struct dentry *dentry;
int error;
@@ -824,7 +831,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
goto out_unlock;
error = -EEXIST;
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
goto out_dput;
error = btrfs_may_create(dir, dentry);
@@ -1564,7 +1571,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
goto out_free;
}
- do_div(new_size, root->sectorsize);
+ new_size = div_u64(new_size, root->sectorsize);
new_size *= root->sectorsize;
printk_in_rcu(KERN_INFO "BTRFS: new size for %s is %llu\n",
@@ -2294,7 +2301,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
{
struct dentry *parent = file->f_path.dentry;
struct dentry *dentry;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *dest = NULL;
@@ -2333,12 +2340,12 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
goto out_unlock_dir;
}
- if (!dentry->d_inode) {
+ if (d_really_is_negative(dentry)) {
err = -ENOENT;
goto out_dput;
}
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
dest = BTRFS_I(inode)->root;
if (!capable(CAP_SYS_ADMIN)) {
/*
@@ -2403,7 +2410,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
"Attempt to delete subvolume %llu during send",
dest->root_key.objectid);
err = -EPERM;
- goto out_dput;
+ goto out_unlock_inode;
}
d_invalidate(dentry);
@@ -2498,6 +2505,7 @@ out_up_write:
root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
}
+out_unlock_inode:
mutex_unlock(&inode->i_mutex);
if (!err) {
shrink_dcache_sb(root->fs_info->sb);
@@ -2897,6 +2905,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
if (src == dst)
return -EINVAL;
+ if (len == 0)
+ return 0;
+
btrfs_double_lock(src, loff, dst, dst_loff, len);
ret = extent_same_check_offsets(src, loff, len);
@@ -3039,7 +3050,7 @@ out:
static int check_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u64 disko)
{
- struct seq_list tree_mod_seq_elem = {};
+ struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
struct ulist *roots;
struct ulist_iterator uiter;
struct ulist_node *root_node = NULL;
@@ -3202,6 +3213,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
key.offset = off;
while (1) {
+ u64 next_key_min_offset = key.offset + 1;
+
/*
* note the key will change type as we walk through the
* tree.
@@ -3282,7 +3295,7 @@ process_slot:
} else if (key.offset >= off + len) {
break;
}
-
+ next_key_min_offset = key.offset + datal;
size = btrfs_item_size_nr(leaf, slot);
read_extent_buffer(leaf, buf,
btrfs_item_ptr_offset(leaf, slot),
@@ -3497,7 +3510,7 @@ process_slot:
break;
}
btrfs_release_path(path);
- key.offset++;
+ key.offset = next_key_min_offset;
}
ret = 0;
@@ -3626,6 +3639,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (off + len == src->i_size)
len = ALIGN(src->i_size, bs) - off;
+ if (len == 0) {
+ ret = 0;
+ goto out_unlock;
+ }
+
/* verify the end result is block aligned */
if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
!IS_ALIGNED(destoff, bs))
@@ -4624,6 +4642,11 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
sa->src, sa->dst);
}
+ /* update qgroup status and info */
+ err = btrfs_run_qgroups(trans, root->fs_info);
+ if (err < 0)
+ btrfs_error(root->fs_info, ret,
+ "failed to update qgroup status and info\n");
err = btrfs_end_transaction(trans, root);
if (err && !ret)
ret = err;
@@ -4669,8 +4692,7 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
/* FIXME: check if the IDs really exist */
if (sa->create) {
- ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid,
- NULL);
+ ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid);
} else {
ret = btrfs_remove_qgroup(trans, root->fs_info, sa->qgroupid);
}
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 617553cdb7d3..a2f051347731 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -434,7 +434,7 @@ out:
return ret;
}
-struct btrfs_compress_op btrfs_lzo_compress = {
+const struct btrfs_compress_op btrfs_lzo_compress = {
.alloc_workspace = lzo_alloc_workspace,
.free_workspace = lzo_free_workspace,
.compress_pages = lzo_compress_pages,
diff --git a/fs/btrfs/math.h b/fs/btrfs/math.h
index b7816cefbd13..1b10a3cd1195 100644
--- a/fs/btrfs/math.h
+++ b/fs/btrfs/math.h
@@ -28,8 +28,7 @@ static inline u64 div_factor(u64 num, int factor)
if (factor == 10)
return num;
num *= factor;
- do_div(num, 10);
- return num;
+ return div_u64(num, 10);
}
static inline u64 div_factor_fine(u64 num, int factor)
@@ -37,8 +36,7 @@ static inline u64 div_factor_fine(u64 num, int factor)
if (factor == 100)
return num;
num *= factor;
- do_div(num, 100);
- return num;
+ return div_u64(num, 100);
}
#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 157cc54fc634..760c4a5e096b 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -722,6 +722,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
{
int ret = 0;
+ int ret_wb = 0;
u64 end;
u64 orig_end;
struct btrfs_ordered_extent *ordered;
@@ -741,9 +742,14 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
if (ret)
return ret;
- ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
- if (ret)
- return ret;
+ /*
+ * If we have a writeback error don't return immediately. Wait first
+ * for any ordered extents that haven't completed yet. This is to make
+ * sure no one can dirty the same page ranges and call writepages()
+ * before the ordered extents complete - to avoid failures (-EEXIST)
+ * when adding the new ordered extents to the ordered tree.
+ */
+ ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
end = orig_end;
while (1) {
@@ -767,7 +773,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
break;
end--;
}
- return ret;
+ return ret_wb ? ret_wb : ret;
}
/*
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 129b1dd28527..dca137b04095 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -425,3 +425,5 @@ static const char *prop_compression_extract(struct inode *inode)
return NULL;
}
+
+
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 058c79eecbfb..3d6546581bb9 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -644,9 +644,8 @@ out:
}
static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 qgroupid,
- u64 flags, u64 max_rfer, u64 max_excl,
- u64 rsv_rfer, u64 rsv_excl)
+ struct btrfs_root *root,
+ struct btrfs_qgroup *qgroup)
{
struct btrfs_path *path;
struct btrfs_key key;
@@ -657,7 +656,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
key.objectid = 0;
key.type = BTRFS_QGROUP_LIMIT_KEY;
- key.offset = qgroupid;
+ key.offset = qgroup->qgroupid;
path = btrfs_alloc_path();
if (!path)
@@ -673,11 +672,11 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
l = path->nodes[0];
slot = path->slots[0];
qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
- btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
- btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
- btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
- btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
- btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
+ btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
+ btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
+ btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
+ btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
+ btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
btrfs_mark_buffer_dirty(l);
@@ -967,6 +966,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
fs_info->pending_quota_state = 0;
quota_root = fs_info->quota_root;
fs_info->quota_root = NULL;
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
spin_unlock(&fs_info->qgroup_lock);
btrfs_free_qgroup_config(fs_info);
@@ -982,7 +982,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
list_del(&quota_root->dirty_list);
btrfs_tree_lock(quota_root->node);
- clean_tree_block(trans, tree_root, quota_root->node);
+ clean_tree_block(trans, tree_root->fs_info, quota_root->node);
btrfs_tree_unlock(quota_root->node);
btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
@@ -1001,6 +1001,110 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
}
+/*
+ * The easy accounting, if we are adding/removing the only ref for an extent
+ * then this qgroup and all of the parent qgroups get their refrence and
+ * exclusive counts adjusted.
+ *
+ * Caller should hold fs_info->qgroup_lock.
+ */
+static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
+ struct ulist *tmp, u64 ref_root,
+ u64 num_bytes, int sign)
+{
+ struct btrfs_qgroup *qgroup;
+ struct btrfs_qgroup_list *glist;
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+ int ret = 0;
+
+ qgroup = find_qgroup_rb(fs_info, ref_root);
+ if (!qgroup)
+ goto out;
+
+ qgroup->rfer += sign * num_bytes;
+ qgroup->rfer_cmpr += sign * num_bytes;
+
+ WARN_ON(sign < 0 && qgroup->excl < num_bytes);
+ qgroup->excl += sign * num_bytes;
+ qgroup->excl_cmpr += sign * num_bytes;
+ if (sign > 0)
+ qgroup->reserved -= num_bytes;
+
+ qgroup_dirty(fs_info, qgroup);
+
+ /* Get all of the parent groups that contain this qgroup */
+ list_for_each_entry(glist, &qgroup->groups, next_group) {
+ ret = ulist_add(tmp, glist->group->qgroupid,
+ ptr_to_u64(glist->group), GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Iterate all of the parents and adjust their reference counts */
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(tmp, &uiter))) {
+ qgroup = u64_to_ptr(unode->aux);
+ qgroup->rfer += sign * num_bytes;
+ qgroup->rfer_cmpr += sign * num_bytes;
+ WARN_ON(sign < 0 && qgroup->excl < num_bytes);
+ qgroup->excl += sign * num_bytes;
+ if (sign > 0)
+ qgroup->reserved -= num_bytes;
+ qgroup->excl_cmpr += sign * num_bytes;
+ qgroup_dirty(fs_info, qgroup);
+
+ /* Add any parents of the parents */
+ list_for_each_entry(glist, &qgroup->groups, next_group) {
+ ret = ulist_add(tmp, glist->group->qgroupid,
+ ptr_to_u64(glist->group), GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+
+/*
+ * Quick path for updating qgroup with only excl refs.
+ *
+ * In that case, just update all parent will be enough.
+ * Or we needs to do a full rescan.
+ * Caller should also hold fs_info->qgroup_lock.
+ *
+ * Return 0 for quick update, return >0 for need to full rescan
+ * and mark INCONSISTENT flag.
+ * Return < 0 for other error.
+ */
+static int quick_update_accounting(struct btrfs_fs_info *fs_info,
+ struct ulist *tmp, u64 src, u64 dst,
+ int sign)
+{
+ struct btrfs_qgroup *qgroup;
+ int ret = 1;
+ int err = 0;
+
+ qgroup = find_qgroup_rb(fs_info, src);
+ if (!qgroup)
+ goto out;
+ if (qgroup->excl == qgroup->rfer) {
+ ret = 0;
+ err = __qgroup_excl_accounting(fs_info, tmp, dst,
+ qgroup->excl, sign);
+ if (err < 0) {
+ ret = err;
+ goto out;
+ }
+ }
+out:
+ if (ret)
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ return ret;
+}
+
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
{
@@ -1008,8 +1112,17 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
+ struct ulist *tmp;
int ret = 0;
+ tmp = ulist_alloc(GFP_NOFS);
+ if (!tmp)
+ return -ENOMEM;
+
+ /* Check the level of src and dst first */
+ if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
+ return -EINVAL;
+
mutex_lock(&fs_info->qgroup_ioctl_lock);
quota_root = fs_info->quota_root;
if (!quota_root) {
@@ -1043,23 +1156,33 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
spin_lock(&fs_info->qgroup_lock);
ret = add_relation_rb(quota_root->fs_info, src, dst);
+ if (ret < 0) {
+ spin_unlock(&fs_info->qgroup_lock);
+ goto out;
+ }
+ ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
spin_unlock(&fs_info->qgroup_lock);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ ulist_free(tmp);
return ret;
}
-int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
+int __del_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
{
struct btrfs_root *quota_root;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
+ struct ulist *tmp;
int ret = 0;
int err;
- mutex_lock(&fs_info->qgroup_ioctl_lock);
+ tmp = ulist_alloc(GFP_NOFS);
+ if (!tmp)
+ return -ENOMEM;
+
quota_root = fs_info->quota_root;
if (!quota_root) {
ret = -EINVAL;
@@ -1088,14 +1211,27 @@ exist:
spin_lock(&fs_info->qgroup_lock);
del_relation_rb(fs_info, src, dst);
+ ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
spin_unlock(&fs_info->qgroup_lock);
out:
+ ulist_free(tmp);
+ return ret;
+}
+
+int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+{
+ int ret = 0;
+
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
+ ret = __del_qgroup_relation(trans, fs_info, src, dst);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
+
return ret;
}
int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
+ struct btrfs_fs_info *fs_info, u64 qgroupid)
{
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
@@ -1133,6 +1269,7 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
{
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
+ struct btrfs_qgroup_list *list;
int ret = 0;
mutex_lock(&fs_info->qgroup_ioctl_lock);
@@ -1147,15 +1284,24 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
ret = -ENOENT;
goto out;
} else {
- /* check if there are no relations to this qgroup */
- if (!list_empty(&qgroup->groups) ||
- !list_empty(&qgroup->members)) {
+ /* check if there are no children of this qgroup */
+ if (!list_empty(&qgroup->members)) {
ret = -EBUSY;
goto out;
}
}
ret = del_qgroup_item(trans, quota_root, qgroupid);
+ while (!list_empty(&qgroup->groups)) {
+ list = list_first_entry(&qgroup->groups,
+ struct btrfs_qgroup_list, next_group);
+ ret = __del_qgroup_relation(trans, fs_info,
+ qgroupid,
+ list->group->qgroupid);
+ if (ret)
+ goto out;
+ }
+
spin_lock(&fs_info->qgroup_lock);
del_qgroup_rb(quota_root->fs_info, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
@@ -1184,23 +1330,27 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
ret = -ENOENT;
goto out;
}
- ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
- limit->flags, limit->max_rfer,
- limit->max_excl, limit->rsv_rfer,
- limit->rsv_excl);
+
+ spin_lock(&fs_info->qgroup_lock);
+ if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)
+ qgroup->max_rfer = limit->max_rfer;
+ if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
+ qgroup->max_excl = limit->max_excl;
+ if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)
+ qgroup->rsv_rfer = limit->rsv_rfer;
+ if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)
+ qgroup->rsv_excl = limit->rsv_excl;
+ qgroup->lim_flags |= limit->flags;
+
+ spin_unlock(&fs_info->qgroup_lock);
+
+ ret = update_qgroup_limit_item(trans, quota_root, qgroup);
if (ret) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
btrfs_info(fs_info, "unable to update quota limit for %llu",
qgroupid);
}
- spin_lock(&fs_info->qgroup_lock);
- qgroup->lim_flags = limit->flags;
- qgroup->max_rfer = limit->max_rfer;
- qgroup->max_excl = limit->max_excl;
- qgroup->rsv_rfer = limit->rsv_rfer;
- qgroup->rsv_excl = limit->rsv_excl;
- spin_unlock(&fs_info->qgroup_lock);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
@@ -1256,14 +1406,14 @@ static int comp_oper(struct btrfs_qgroup_operation *oper1,
return -1;
if (oper1->bytenr > oper2->bytenr)
return 1;
- if (oper1->seq < oper2->seq)
- return -1;
- if (oper1->seq > oper2->seq)
- return 1;
if (oper1->ref_root < oper2->ref_root)
return -1;
if (oper1->ref_root > oper2->ref_root)
return 1;
+ if (oper1->seq < oper2->seq)
+ return -1;
+ if (oper1->seq > oper2->seq)
+ return 1;
if (oper1->type < oper2->type)
return -1;
if (oper1->type > oper2->type)
@@ -1372,19 +1522,10 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
return 0;
}
-/*
- * The easy accounting, if we are adding/removing the only ref for an extent
- * then this qgroup and all of the parent qgroups get their refrence and
- * exclusive counts adjusted.
- */
static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_operation *oper)
{
- struct btrfs_qgroup *qgroup;
struct ulist *tmp;
- struct btrfs_qgroup_list *glist;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
int sign = 0;
int ret = 0;
@@ -1395,9 +1536,7 @@ static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
spin_lock(&fs_info->qgroup_lock);
if (!fs_info->quota_root)
goto out;
- qgroup = find_qgroup_rb(fs_info, oper->ref_root);
- if (!qgroup)
- goto out;
+
switch (oper->type) {
case BTRFS_QGROUP_OPER_ADD_EXCL:
sign = 1;
@@ -1408,43 +1547,8 @@ static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
default:
ASSERT(0);
}
- qgroup->rfer += sign * oper->num_bytes;
- qgroup->rfer_cmpr += sign * oper->num_bytes;
-
- WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
- qgroup->excl += sign * oper->num_bytes;
- qgroup->excl_cmpr += sign * oper->num_bytes;
-
- qgroup_dirty(fs_info, qgroup);
-
- /* Get all of the parent groups that contain this qgroup */
- list_for_each_entry(glist, &qgroup->groups, next_group) {
- ret = ulist_add(tmp, glist->group->qgroupid,
- ptr_to_u64(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
-
- /* Iterate all of the parents and adjust their reference counts */
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(tmp, &uiter))) {
- qgroup = u64_to_ptr(unode->aux);
- qgroup->rfer += sign * oper->num_bytes;
- qgroup->rfer_cmpr += sign * oper->num_bytes;
- WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
- qgroup->excl += sign * oper->num_bytes;
- qgroup->excl_cmpr += sign * oper->num_bytes;
- qgroup_dirty(fs_info, qgroup);
-
- /* Add any parents of the parents */
- list_for_each_entry(glist, &qgroup->groups, next_group) {
- ret = ulist_add(tmp, glist->group->qgroupid,
- ptr_to_u64(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
- }
- ret = 0;
+ ret = __qgroup_excl_accounting(fs_info, tmp, oper->ref_root,
+ oper->num_bytes, sign);
out:
spin_unlock(&fs_info->qgroup_lock);
ulist_free(tmp);
@@ -1845,7 +1949,7 @@ static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
struct ulist *roots = NULL;
struct ulist *qgroups, *tmp;
struct btrfs_qgroup *qgroup;
- struct seq_list elem = {};
+ struct seq_list elem = SEQ_LIST_INIT(elem);
u64 seq;
int old_roots = 0;
int new_roots = 0;
@@ -1967,7 +2071,7 @@ static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
int err;
struct btrfs_qgroup *qg;
u64 root_obj = 0;
- struct seq_list elem = {};
+ struct seq_list elem = SEQ_LIST_INIT(elem);
parents = ulist_alloc(GFP_NOFS);
if (!parents)
@@ -2156,6 +2260,10 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
if (ret)
fs_info->qgroup_flags |=
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ ret = update_qgroup_limit_item(trans, quota_root, qgroup);
+ if (ret)
+ fs_info->qgroup_flags |=
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
spin_lock(&fs_info->qgroup_lock);
}
if (fs_info->quota_enabled)
@@ -2219,6 +2327,11 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
ret = -EINVAL;
goto out;
}
+
+ if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
+ ret = -EINVAL;
+ goto out;
+ }
++i_qgroups;
}
}
@@ -2230,17 +2343,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
- ret = update_qgroup_limit_item(trans, quota_root, objectid,
- inherit->lim.flags,
- inherit->lim.max_rfer,
- inherit->lim.max_excl,
- inherit->lim.rsv_rfer,
- inherit->lim.rsv_excl);
- if (ret)
- goto out;
- }
-
if (srcid) {
struct btrfs_root *srcroot;
struct btrfs_key srckey;
@@ -2286,6 +2388,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
goto unlock;
}
+ if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
+ dstgroup->lim_flags = inherit->lim.flags;
+ dstgroup->max_rfer = inherit->lim.max_rfer;
+ dstgroup->max_excl = inherit->lim.max_excl;
+ dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
+ dstgroup->rsv_excl = inherit->lim.rsv_excl;
+
+ ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
+ if (ret) {
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ btrfs_info(fs_info, "unable to update quota limit for %llu",
+ dstgroup->qgroupid);
+ goto unlock;
+ }
+ }
+
if (srcid) {
srcgroup = find_qgroup_rb(fs_info, srcid);
if (!srcgroup)
@@ -2302,6 +2420,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
dstgroup->excl_cmpr = level_size;
srcgroup->excl = level_size;
srcgroup->excl_cmpr = level_size;
+
+ /* inherit the limit info */
+ dstgroup->lim_flags = srcgroup->lim_flags;
+ dstgroup->max_rfer = srcgroup->max_rfer;
+ dstgroup->max_excl = srcgroup->max_excl;
+ dstgroup->rsv_rfer = srcgroup->rsv_rfer;
+ dstgroup->rsv_excl = srcgroup->rsv_excl;
+
qgroup_dirty(fs_info, dstgroup);
qgroup_dirty(fs_info, srcgroup);
}
@@ -2358,12 +2484,6 @@ out:
return ret;
}
-/*
- * reserve some space for a qgroup and all its parents. The reservation takes
- * place with start_transaction or dealloc_reserve, similar to ENOSPC
- * accounting. If not enough space is available, EDQUOT is returned.
- * We assume that the requested space is new for all qgroups.
- */
int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
{
struct btrfs_root *quota_root;
@@ -2513,7 +2633,7 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
/*
* returns < 0 on error, 0 when more leafs are to be scanned.
- * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
+ * returns 1 when done.
*/
static int
qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
@@ -2522,7 +2642,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
{
struct btrfs_key found;
struct ulist *roots = NULL;
- struct seq_list tree_mod_seq_elem = {};
+ struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
u64 num_bytes;
u64 seq;
int new_roots;
@@ -2618,6 +2738,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
struct ulist *tmp = NULL, *qgroups = NULL;
struct extent_buffer *scratch_leaf = NULL;
int err = -ENOMEM;
+ int ret = 0;
path = btrfs_alloc_path();
if (!path)
@@ -2660,7 +2781,7 @@ out:
mutex_lock(&fs_info->qgroup_rescan_lock);
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
- if (err == 2 &&
+ if (err > 0 &&
fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
} else if (err < 0) {
@@ -2668,13 +2789,33 @@ out:
}
mutex_unlock(&fs_info->qgroup_rescan_lock);
+ /*
+ * only update status, since the previous part has alreay updated the
+ * qgroup info.
+ */
+ trans = btrfs_start_transaction(fs_info->quota_root, 1);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ btrfs_err(fs_info,
+ "fail to start transaction for status update: %d\n",
+ err);
+ goto done;
+ }
+ ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
+ if (ret < 0) {
+ err = ret;
+ btrfs_err(fs_info, "fail to update qgroup status: %d\n", err);
+ }
+ btrfs_end_transaction(trans, fs_info->quota_root);
+
if (err >= 0) {
btrfs_info(fs_info, "qgroup scan completed%s",
- err == 2 ? " (inconsistency flag cleared)" : "");
+ err > 0 ? " (inconsistency flag cleared)" : "");
} else {
btrfs_err(fs_info, "qgroup scan failed with %d", err);
}
+done:
complete_all(&fs_info->qgroup_rescan_completion);
}
@@ -2709,7 +2850,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
mutex_unlock(&fs_info->qgroup_rescan_lock);
goto err;
}
-
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 18cc68ca3090..c5242aa9a4b2 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -70,8 +70,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 src, u64 dst);
int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid,
- char *name);
+ struct btrfs_fs_info *fs_info, u64 qgroupid);
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 qgroupid);
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 5264858ed768..fa72068bd256 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -237,12 +237,8 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
}
x = cmpxchg(&info->stripe_hash_table, NULL, table);
- if (x) {
- if (is_vmalloc_addr(x))
- vfree(x);
- else
- kfree(x);
- }
+ if (x)
+ kvfree(x);
return 0;
}
@@ -453,10 +449,7 @@ void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
if (!info->stripe_hash_table)
return;
btrfs_clear_rbio_cache(info);
- if (is_vmalloc_addr(info->stripe_hash_table))
- vfree(info->stripe_hash_table);
- else
- kfree(info->stripe_hash_table);
+ kvfree(info->stripe_hash_table);
info->stripe_hash_table = NULL;
}
@@ -1807,8 +1800,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
int err;
int i;
- pointers = kzalloc(rbio->real_stripes * sizeof(void *),
- GFP_NOFS);
+ pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
if (!pointers) {
err = -ENOMEM;
goto cleanup_io;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index d83085381bcc..74b24b01d574 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3027,7 +3027,7 @@ int prealloc_file_extent_cluster(struct inode *inode,
mutex_lock(&inode->i_mutex);
ret = btrfs_check_data_free_space(inode, cluster->end +
- 1 - cluster->start);
+ 1 - cluster->start, 0);
if (ret)
goto out;
@@ -3430,7 +3430,9 @@ static int block_use_full_backref(struct reloc_control *rc,
}
static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
- struct inode *inode, u64 ino)
+ struct btrfs_block_group_cache *block_group,
+ struct inode *inode,
+ u64 ino)
{
struct btrfs_key key;
struct btrfs_root *root = fs_info->tree_root;
@@ -3463,7 +3465,7 @@ truncate:
goto out;
}
- ret = btrfs_truncate_free_space_cache(root, trans, inode);
+ ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode);
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
@@ -3509,6 +3511,7 @@ static int find_data_references(struct reloc_control *rc,
*/
if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
ret = delete_block_group_cache(rc->extent_root->fs_info,
+ rc->block_group,
NULL, ref_objectid);
if (ret != -ENOENT)
return ret;
@@ -4223,7 +4226,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
btrfs_free_path(path);
if (!IS_ERR(inode))
- ret = delete_block_group_cache(fs_info, inode, 0);
+ ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
else
ret = PTR_ERR(inode);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index ec57687c9a4d..ab5811545a98 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -964,9 +964,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
* the statistics.
*/
- sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
- sizeof(*sblocks_for_recheck),
- GFP_NOFS);
+ sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
+ sizeof(*sblocks_for_recheck), GFP_NOFS);
if (!sblocks_for_recheck) {
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
@@ -2319,7 +2318,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
unsigned long *bitmap,
u64 start, u64 len)
{
- int offset;
+ u32 offset;
int nsectors;
int sectorsize = sparity->sctx->dev_root->sectorsize;
@@ -2329,7 +2328,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
}
start -= sparity->logic_start;
- offset = (int)do_div(start, sparity->stripe_len);
+ start = div_u64_rem(start, sparity->stripe_len, &offset);
offset /= sectorsize;
nsectors = (int)len / sectorsize;
@@ -2612,8 +2611,8 @@ static int get_raid56_logic_offset(u64 physical, int num,
int j = 0;
u64 stripe_nr;
u64 last_offset;
- int stripe_index;
- int rot;
+ u32 stripe_index;
+ u32 rot;
last_offset = (physical - map->stripes[num].physical) *
nr_data_stripes(map);
@@ -2624,12 +2623,11 @@ static int get_raid56_logic_offset(u64 physical, int num,
for (i = 0; i < nr_data_stripes(map); i++) {
*offset = last_offset + i * map->stripe_len;
- stripe_nr = *offset;
- do_div(stripe_nr, map->stripe_len);
- do_div(stripe_nr, nr_data_stripes(map));
+ stripe_nr = div_u64(*offset, map->stripe_len);
+ stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
/* Work out the disk rotation on this stripe-set */
- rot = do_div(stripe_nr, map->num_stripes);
+ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
/* calculate which stripe this data locates */
rot += i;
stripe_index = rot % map->num_stripes;
@@ -2995,10 +2993,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
int extent_mirror_num;
int stop_loop = 0;
- nstripes = length;
physical = map->stripes[num].physical;
offset = 0;
- do_div(nstripes, map->stripe_len);
+ nstripes = div_u64(length, map->stripe_len);
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
offset = map->stripe_len * num;
increment = map->stripe_len * map->num_stripes;
@@ -3563,7 +3560,7 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
int is_dev_replace)
{
int ret = 0;
- int flags = WQ_FREEZABLE | WQ_UNBOUND;
+ unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
int max_active = fs_info->thread_pool_size;
if (fs_info->scrub_workers_refcnt == 0) {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index d6033f540cc7..a1216f9b4917 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -3067,48 +3067,6 @@ static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
return NULL;
}
-static int path_loop(struct send_ctx *sctx, struct fs_path *name,
- u64 ino, u64 gen, u64 *ancestor_ino)
-{
- int ret = 0;
- u64 parent_inode = 0;
- u64 parent_gen = 0;
- u64 start_ino = ino;
-
- *ancestor_ino = 0;
- while (ino != BTRFS_FIRST_FREE_OBJECTID) {
- fs_path_reset(name);
-
- if (is_waiting_for_rm(sctx, ino))
- break;
- if (is_waiting_for_move(sctx, ino)) {
- if (*ancestor_ino == 0)
- *ancestor_ino = ino;
- ret = get_first_ref(sctx->parent_root, ino,
- &parent_inode, &parent_gen, name);
- } else {
- ret = __get_cur_name_and_parent(sctx, ino, gen,
- &parent_inode,
- &parent_gen, name);
- if (ret > 0) {
- ret = 0;
- break;
- }
- }
- if (ret < 0)
- break;
- if (parent_inode == start_ino) {
- ret = 1;
- if (*ancestor_ino == 0)
- *ancestor_ino = ino;
- break;
- }
- ino = parent_inode;
- gen = parent_gen;
- }
- return ret;
-}
-
static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
{
struct fs_path *from_path = NULL;
@@ -3120,7 +3078,6 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
struct waiting_dir_move *dm = NULL;
u64 rmdir_ino = 0;
int ret;
- u64 ancestor = 0;
name = fs_path_alloc();
from_path = fs_path_alloc();
@@ -3152,22 +3109,6 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
goto out;
sctx->send_progress = sctx->cur_ino + 1;
- ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
- if (ret) {
- LIST_HEAD(deleted_refs);
- ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
- ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
- &pm->update_refs, &deleted_refs,
- pm->is_orphan);
- if (ret < 0)
- goto out;
- if (rmdir_ino) {
- dm = get_waiting_dir_move(sctx, pm->ino);
- ASSERT(dm);
- dm->rmdir_ino = rmdir_ino;
- }
- goto out;
- }
fs_path_reset(name);
to_path = name;
name = NULL;
@@ -3610,10 +3551,27 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
if (ret < 0)
goto out;
if (ret) {
+ struct name_cache_entry *nce;
+
ret = orphanize_inode(sctx, ow_inode, ow_gen,
cur->full_path);
if (ret < 0)
goto out;
+ /*
+ * Make sure we clear our orphanized inode's
+ * name from the name cache. This is because the
+ * inode ow_inode might be an ancestor of some
+ * other inode that will be orphanized as well
+ * later and has an inode number greater than
+ * sctx->send_progress. We need to prevent
+ * future name lookups from using the old name
+ * and get instead the orphan name.
+ */
+ nce = name_cache_search(sctx, ow_inode, ow_gen);
+ if (nce) {
+ name_cache_delete(sctx, nce);
+ kfree(nce);
+ }
} else {
ret = send_unlink(sctx, cur->full_path);
if (ret < 0)
@@ -5852,19 +5810,20 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
ret = PTR_ERR(clone_root);
goto out;
}
- clone_sources_to_rollback = i + 1;
spin_lock(&clone_root->root_item_lock);
- clone_root->send_in_progress++;
- if (!btrfs_root_readonly(clone_root)) {
+ if (!btrfs_root_readonly(clone_root) ||
+ btrfs_root_dead(clone_root)) {
spin_unlock(&clone_root->root_item_lock);
srcu_read_unlock(&fs_info->subvol_srcu, index);
ret = -EPERM;
goto out;
}
+ clone_root->send_in_progress++;
spin_unlock(&clone_root->root_item_lock);
srcu_read_unlock(&fs_info->subvol_srcu, index);
sctx->clone_roots[i].root = clone_root;
+ clone_sources_to_rollback = i + 1;
}
vfree(clone_sources_tmp);
clone_sources_tmp = NULL;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 05fef198ff94..9e66f5e724db 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -901,6 +901,15 @@ find_root:
if (IS_ERR(new_root))
return ERR_CAST(new_root);
+ if (!(sb->s_flags & MS_RDONLY)) {
+ int ret;
+ down_read(&fs_info->cleanup_work_sem);
+ ret = btrfs_orphan_cleanup(new_root);
+ up_read(&fs_info->cleanup_work_sem);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
dir_id = btrfs_root_dirid(&new_root->root_item);
setup_root:
location.objectid = dir_id;
@@ -916,7 +925,7 @@ setup_root:
* a reference to the dentry. We will have already gotten a reference
* to the inode in btrfs_fill_super so we're good to go.
*/
- if (!new && sb->s_root->d_inode == inode) {
+ if (!new && d_inode(sb->s_root) == inode) {
iput(inode);
return dget(sb->s_root);
}
@@ -1221,7 +1230,7 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
root = mount_subtree(mnt, subvol_name);
- if (!IS_ERR(root) && !is_subvolume_inode(root->d_inode)) {
+ if (!IS_ERR(root) && !is_subvolume_inode(d_inode(root))) {
struct super_block *s = root->d_sb;
dput(root);
root = ERR_PTR(-EINVAL);
@@ -1714,7 +1723,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
avail_space = device->total_bytes - device->bytes_used;
/* align with stripe_len */
- do_div(avail_space, BTRFS_STRIPE_LEN);
+ avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN);
avail_space *= BTRFS_STRIPE_LEN;
/*
@@ -1886,8 +1895,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
/* Mask in the root object ID too, to disambiguate subvols */
- buf->f_fsid.val[0] ^= BTRFS_I(dentry->d_inode)->root->objectid >> 32;
- buf->f_fsid.val[1] ^= BTRFS_I(dentry->d_inode)->root->objectid;
+ buf->f_fsid.val[0] ^= BTRFS_I(d_inode(dentry))->root->objectid >> 32;
+ buf->f_fsid.val[1] ^= BTRFS_I(d_inode(dentry))->root->objectid;
return 0;
}
@@ -1908,6 +1917,17 @@ static struct file_system_type btrfs_fs_type = {
};
MODULE_ALIAS_FS("btrfs");
+static int btrfs_control_open(struct inode *inode, struct file *file)
+{
+ /*
+ * The control file's private_data is used to hold the
+ * transaction when it is started and is used to keep
+ * track of whether a transaction is already in progress.
+ */
+ file->private_data = NULL;
+ return 0;
+}
+
/*
* used by btrfsctl to scan devices when no FS is mounted
*/
@@ -2009,6 +2029,7 @@ static const struct super_operations btrfs_super_ops = {
};
static const struct file_operations btrfs_ctl_fops = {
+ .open = btrfs_control_open,
.unlocked_ioctl = btrfs_control_ioctl,
.compat_ioctl = btrfs_control_ioctl,
.owner = THIS_MODULE,
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 94edb0a2a026..e8a4c86d274d 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -459,7 +459,7 @@ static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj)
static char btrfs_unknown_feature_names[3][NUM_FEATURE_BITS][13];
static struct btrfs_feature_attr btrfs_feature_attrs[3][NUM_FEATURE_BITS];
-static u64 supported_feature_masks[3] = {
+static const u64 supported_feature_masks[3] = {
[FEAT_COMPAT] = BTRFS_FEATURE_COMPAT_SUPP,
[FEAT_COMPAT_RO] = BTRFS_FEATURE_COMPAT_RO_SUPP,
[FEAT_INCOMPAT] = BTRFS_FEATURE_INCOMPAT_SUPP,
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index f7dd298b3cf6..3a4bbed723fd 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -61,11 +61,23 @@ static struct btrfs_feature_attr btrfs_attr_##_name = { \
BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature)
/* convert from attribute */
-#define to_btrfs_feature_attr(a) \
- container_of(a, struct btrfs_feature_attr, kobj_attr)
-#define attr_to_btrfs_attr(a) container_of(a, struct kobj_attribute, attr)
-#define attr_to_btrfs_feature_attr(a) \
- to_btrfs_feature_attr(attr_to_btrfs_attr(a))
+static inline struct btrfs_feature_attr *
+to_btrfs_feature_attr(struct kobj_attribute *a)
+{
+ return container_of(a, struct btrfs_feature_attr, kobj_attr);
+}
+
+static inline struct kobj_attribute *attr_to_btrfs_attr(struct attribute *attr)
+{
+ return container_of(attr, struct kobj_attribute, attr);
+}
+
+static inline struct btrfs_feature_attr *
+attr_to_btrfs_feature_attr(struct attribute *attr)
+{
+ return to_btrfs_feature_attr(attr_to_btrfs_attr(attr));
+}
+
char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags);
extern const char * const btrfs_feature_set_names[3];
extern struct kobj_type space_info_ktype;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 73f299ebdabb..c32a7ba76bca 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -232,7 +232,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
init_dummy_trans(&trans);
test_msg("Qgroup basic add\n");
- ret = btrfs_create_qgroup(NULL, fs_info, 5, NULL);
+ ret = btrfs_create_qgroup(NULL, fs_info, 5);
if (ret) {
test_msg("Couldn't create a qgroup %d\n", ret);
return ret;
@@ -301,7 +301,7 @@ static int test_multiple_refs(struct btrfs_root *root)
test_msg("Qgroup multiple refs test\n");
/* We have 5 created already from the previous test */
- ret = btrfs_create_qgroup(NULL, fs_info, 256, NULL);
+ ret = btrfs_create_qgroup(NULL, fs_info, 256);
if (ret) {
test_msg("Couldn't create a qgroup %d\n", ret);
return ret;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8be4278e25e8..5628e25250c0 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -35,7 +35,7 @@
#define BTRFS_ROOT_TRANS_TAG 0
-static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
+static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_RUNNING] = 0U,
[TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
__TRANS_START),
@@ -64,6 +64,9 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
if (atomic_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
+ if (transaction->delayed_refs.pending_csums)
+ printk(KERN_ERR "pending csums is %llu\n",
+ transaction->delayed_refs.pending_csums);
while (!list_empty(&transaction->pending_chunks)) {
struct extent_map *em;
@@ -93,11 +96,8 @@ static void clear_btree_io_tree(struct extent_io_tree *tree)
*/
ASSERT(!waitqueue_active(&state->wq));
free_extent_state(state);
- if (need_resched()) {
- spin_unlock(&tree->lock);
- cond_resched();
- spin_lock(&tree->lock);
- }
+
+ cond_resched_lock(&tree->lock);
}
spin_unlock(&tree->lock);
}
@@ -222,10 +222,12 @@ loop:
atomic_set(&cur_trans->use_count, 2);
cur_trans->have_free_bgs = 0;
cur_trans->start_time = get_seconds();
+ cur_trans->dirty_bg_run = 0;
cur_trans->delayed_refs.href_root = RB_ROOT;
atomic_set(&cur_trans->delayed_refs.num_entries, 0);
cur_trans->delayed_refs.num_heads_ready = 0;
+ cur_trans->delayed_refs.pending_csums = 0;
cur_trans->delayed_refs.num_heads = 0;
cur_trans->delayed_refs.flushing = 0;
cur_trans->delayed_refs.run_delayed_start = 0;
@@ -250,6 +252,9 @@ loop:
INIT_LIST_HEAD(&cur_trans->switch_commits);
INIT_LIST_HEAD(&cur_trans->pending_ordered);
INIT_LIST_HEAD(&cur_trans->dirty_bgs);
+ INIT_LIST_HEAD(&cur_trans->io_bgs);
+ mutex_init(&cur_trans->cache_write_mutex);
+ cur_trans->num_dirty_bgs = 0;
spin_lock_init(&cur_trans->dirty_bgs_lock);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages,
@@ -721,7 +726,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
updates = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (updates) {
- err = btrfs_run_delayed_refs(trans, root, updates);
+ err = btrfs_run_delayed_refs(trans, root, updates * 2);
if (err) /* Error code will also eval true */
return err;
}
@@ -1057,6 +1062,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
+ struct list_head *io_bgs = &trans->transaction->io_bgs;
struct list_head *next;
struct extent_buffer *eb;
int ret;
@@ -1110,7 +1116,7 @@ again:
return ret;
}
- while (!list_empty(dirty_bgs)) {
+ while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
ret = btrfs_write_dirty_block_groups(trans, root);
if (ret)
return ret;
@@ -1810,6 +1816,37 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
return ret;
}
+ if (!cur_trans->dirty_bg_run) {
+ int run_it = 0;
+
+ /* this mutex is also taken before trying to set
+ * block groups readonly. We need to make sure
+ * that nobody has set a block group readonly
+ * after a extents from that block group have been
+ * allocated for cache files. btrfs_set_block_group_ro
+ * will wait for the transaction to commit if it
+ * finds dirty_bg_run = 1
+ *
+ * The dirty_bg_run flag is also used to make sure only
+ * one process starts all the block group IO. It wouldn't
+ * hurt to have more than one go through, but there's no
+ * real advantage to it either.
+ */
+ mutex_lock(&root->fs_info->ro_block_group_mutex);
+ if (!cur_trans->dirty_bg_run) {
+ run_it = 1;
+ cur_trans->dirty_bg_run = 1;
+ }
+ mutex_unlock(&root->fs_info->ro_block_group_mutex);
+
+ if (run_it)
+ ret = btrfs_start_dirty_block_groups(trans, root);
+ }
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ return ret;
+ }
+
spin_lock(&root->fs_info->trans_lock);
list_splice(&trans->ordered, &cur_trans->pending_ordered);
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
@@ -2003,6 +2040,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
assert_qgroups_uptodate(trans);
ASSERT(list_empty(&cur_trans->dirty_bgs));
+ ASSERT(list_empty(&cur_trans->io_bgs));
update_super_roots(root);
btrfs_set_super_log_root(root->fs_info->super_copy, 0);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 937050a2b68e..0b24755596ba 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -64,9 +64,19 @@ struct btrfs_transaction {
struct list_head pending_ordered;
struct list_head switch_commits;
struct list_head dirty_bgs;
+ struct list_head io_bgs;
+ u64 num_dirty_bgs;
+
+ /*
+ * we need to make sure block group deletion doesn't race with
+ * free space cache writeout. This mutex keeps them from stomping
+ * on each other
+ */
+ struct mutex cache_write_mutex;
spinlock_t dirty_bgs_lock;
struct btrfs_delayed_ref_root delayed_refs;
int aborted;
+ int dirty_bg_run;
};
#define __TRANS_FREEZABLE (1U << 0)
@@ -136,9 +146,11 @@ struct btrfs_pending_snapshot {
static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
struct inode *inode)
{
+ spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->last_trans = trans->transaction->transid;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
+ spin_unlock(&BTRFS_I(inode)->lock);
}
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c5b8ba37f88e..d04968374e9d 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -492,11 +492,19 @@ insert:
if (btrfs_inode_generation(eb, src_item) == 0) {
struct extent_buffer *dst_eb = path->nodes[0];
+ const u64 ino_size = btrfs_inode_size(eb, src_item);
+ /*
+ * For regular files an ino_size == 0 is used only when
+ * logging that an inode exists, as part of a directory
+ * fsync, and the inode wasn't fsynced before. In this
+ * case don't set the size of the inode in the fs/subvol
+ * tree, otherwise we would be throwing valid data away.
+ */
if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
- S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) {
+ S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
+ ino_size != 0) {
struct btrfs_map_token token;
- u64 ino_size = btrfs_inode_size(eb, src_item);
btrfs_init_map_token(&token);
btrfs_set_token_inode_size(dst_eb, dst_item,
@@ -1951,6 +1959,104 @@ out:
return ret;
}
+static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_root *log,
+ struct btrfs_path *path,
+ const u64 ino)
+{
+ struct btrfs_key search_key;
+ struct btrfs_path *log_path;
+ int i;
+ int nritems;
+ int ret;
+
+ log_path = btrfs_alloc_path();
+ if (!log_path)
+ return -ENOMEM;
+
+ search_key.objectid = ino;
+ search_key.type = BTRFS_XATTR_ITEM_KEY;
+ search_key.offset = 0;
+again:
+ ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+process_leaf:
+ nritems = btrfs_header_nritems(path->nodes[0]);
+ for (i = path->slots[0]; i < nritems; i++) {
+ struct btrfs_key key;
+ struct btrfs_dir_item *di;
+ struct btrfs_dir_item *log_di;
+ u32 total_size;
+ u32 cur;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &key, i);
+ if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
+ ret = 0;
+ goto out;
+ }
+
+ di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
+ total_size = btrfs_item_size_nr(path->nodes[0], i);
+ cur = 0;
+ while (cur < total_size) {
+ u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
+ u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
+ u32 this_len = sizeof(*di) + name_len + data_len;
+ char *name;
+
+ name = kmalloc(name_len, GFP_NOFS);
+ if (!name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ read_extent_buffer(path->nodes[0], name,
+ (unsigned long)(di + 1), name_len);
+
+ log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
+ name, name_len, 0);
+ btrfs_release_path(log_path);
+ if (!log_di) {
+ /* Doesn't exist in log tree, so delete it. */
+ btrfs_release_path(path);
+ di = btrfs_lookup_xattr(trans, root, path, ino,
+ name, name_len, -1);
+ kfree(name);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ ASSERT(di);
+ ret = btrfs_delete_one_dir_name(trans, root,
+ path, di);
+ if (ret)
+ goto out;
+ btrfs_release_path(path);
+ search_key = key;
+ goto again;
+ }
+ kfree(name);
+ if (IS_ERR(log_di)) {
+ ret = PTR_ERR(log_di);
+ goto out;
+ }
+ cur += this_len;
+ di = (struct btrfs_dir_item *)((char *)di + this_len);
+ }
+ }
+ ret = btrfs_next_leaf(root, path);
+ if (ret > 0)
+ ret = 0;
+ else if (ret == 0)
+ goto process_leaf;
+out:
+ btrfs_free_path(log_path);
+ btrfs_release_path(path);
+ return ret;
+}
+
+
/*
* deletion replay happens before we copy any new directory items
* out of the log or out of backreferences from inodes. It
@@ -2104,6 +2210,10 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
inode_item = btrfs_item_ptr(eb, i,
struct btrfs_inode_item);
+ ret = replay_xattr_deletes(wc->trans, root, log,
+ path, key.objectid);
+ if (ret)
+ break;
mode = btrfs_inode_mode(eb, inode_item);
if (S_ISDIR(mode)) {
ret = replay_dir_deletes(wc->trans,
@@ -2230,7 +2340,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, root, next);
+ clean_tree_block(trans, root->fs_info,
+ next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
@@ -2308,7 +2419,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, root, next);
+ clean_tree_block(trans, root->fs_info,
+ next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
@@ -2384,7 +2496,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, log, next);
+ clean_tree_block(trans, log->fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
@@ -3020,6 +3132,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
struct btrfs_path *path,
struct btrfs_path *dst_path, int key_type,
+ struct btrfs_log_ctx *ctx,
u64 min_offset, u64 *last_offset_ret)
{
struct btrfs_key min_key;
@@ -3104,6 +3217,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
src = path->nodes[0];
nritems = btrfs_header_nritems(src);
for (i = path->slots[0]; i < nritems; i++) {
+ struct btrfs_dir_item *di;
+
btrfs_item_key_to_cpu(src, &min_key, i);
if (min_key.objectid != ino || min_key.type != key_type)
@@ -3114,6 +3229,37 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
err = ret;
goto done;
}
+
+ /*
+ * We must make sure that when we log a directory entry,
+ * the corresponding inode, after log replay, has a
+ * matching link count. For example:
+ *
+ * touch foo
+ * mkdir mydir
+ * sync
+ * ln foo mydir/bar
+ * xfs_io -c "fsync" mydir
+ * <crash>
+ * <mount fs and log replay>
+ *
+ * Would result in a fsync log that when replayed, our
+ * file inode would have a link count of 1, but we get
+ * two directory entries pointing to the same inode.
+ * After removing one of the names, it would not be
+ * possible to remove the other name, which resulted
+ * always in stale file handle errors, and would not
+ * be possible to rmdir the parent directory, since
+ * its i_size could never decrement to the value
+ * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
+ */
+ di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
+ btrfs_dir_item_key_to_cpu(src, di, &tmp);
+ if (ctx &&
+ (btrfs_dir_transid(src, di) == trans->transid ||
+ btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
+ tmp.type != BTRFS_ROOT_ITEM_KEY)
+ ctx->log_new_dentries = true;
}
path->slots[0] = nritems;
@@ -3175,7 +3321,8 @@ done:
static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
struct btrfs_path *path,
- struct btrfs_path *dst_path)
+ struct btrfs_path *dst_path,
+ struct btrfs_log_ctx *ctx)
{
u64 min_key;
u64 max_key;
@@ -3187,7 +3334,7 @@ again:
max_key = 0;
while (1) {
ret = log_dir_items(trans, root, inode, path,
- dst_path, key_type, min_key,
+ dst_path, key_type, ctx, min_key,
&max_key);
if (ret)
return ret;
@@ -3963,7 +4110,7 @@ static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
if (ret < 0) {
return ret;
} else if (ret > 0) {
- *size_ret = i_size_read(inode);
+ *size_ret = 0;
} else {
struct btrfs_inode_item *item;
@@ -4070,10 +4217,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
if (S_ISDIR(inode->i_mode)) {
int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
- if (inode_only == LOG_INODE_EXISTS) {
- max_key_type = BTRFS_INODE_EXTREF_KEY;
- max_key.type = max_key_type;
- }
+ if (inode_only == LOG_INODE_EXISTS)
+ max_key_type = BTRFS_XATTR_ITEM_KEY;
ret = drop_objectid_items(trans, log, path, ino, max_key_type);
} else {
if (inode_only == LOG_INODE_EXISTS) {
@@ -4098,7 +4243,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags)) {
if (inode_only == LOG_INODE_EXISTS) {
- max_key.type = BTRFS_INODE_EXTREF_KEY;
+ max_key.type = BTRFS_XATTR_ITEM_KEY;
ret = drop_objectid_items(trans, log, path, ino,
max_key.type);
} else {
@@ -4106,20 +4251,19 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
&BTRFS_I(inode)->runtime_flags);
clear_bit(BTRFS_INODE_COPY_EVERYTHING,
&BTRFS_I(inode)->runtime_flags);
- ret = btrfs_truncate_inode_items(trans, log,
- inode, 0, 0);
+ while(1) {
+ ret = btrfs_truncate_inode_items(trans,
+ log, inode, 0, 0);
+ if (ret != -EAGAIN)
+ break;
+ }
}
- } else if (test_bit(BTRFS_INODE_COPY_EVERYTHING,
- &BTRFS_I(inode)->runtime_flags) ||
+ } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+ &BTRFS_I(inode)->runtime_flags) ||
inode_only == LOG_INODE_EXISTS) {
- if (inode_only == LOG_INODE_ALL) {
- clear_bit(BTRFS_INODE_COPY_EVERYTHING,
- &BTRFS_I(inode)->runtime_flags);
+ if (inode_only == LOG_INODE_ALL)
fast_search = true;
- max_key.type = BTRFS_XATTR_ITEM_KEY;
- } else {
- max_key.type = BTRFS_INODE_EXTREF_KEY;
- }
+ max_key.type = BTRFS_XATTR_ITEM_KEY;
ret = drop_objectid_items(trans, log, path, ino,
max_key.type);
} else {
@@ -4277,15 +4421,18 @@ log_extents:
}
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
- ret = log_directory_changes(trans, root, inode, path, dst_path);
+ ret = log_directory_changes(trans, root, inode, path, dst_path,
+ ctx);
if (ret) {
err = ret;
goto out_unlock;
}
}
+ spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->logged_trans = trans->transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
+ spin_unlock(&BTRFS_I(inode)->lock);
out_unlock:
if (unlikely(err))
btrfs_put_logged_extents(&logged_list);
@@ -4327,9 +4474,9 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
goto out;
if (!S_ISDIR(inode->i_mode)) {
- if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
+ if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
goto out;
- inode = parent->d_inode;
+ inode = d_inode(parent);
}
while (1) {
@@ -4355,7 +4502,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
break;
}
- if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
+ if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
break;
if (IS_ROOT(parent))
@@ -4364,7 +4511,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
parent = dget_parent(parent);
dput(old_parent);
old_parent = parent;
- inode = parent->d_inode;
+ inode = d_inode(parent);
}
dput(old_parent);
@@ -4372,6 +4519,181 @@ out:
return ret;
}
+struct btrfs_dir_list {
+ u64 ino;
+ struct list_head list;
+};
+
+/*
+ * Log the inodes of the new dentries of a directory. See log_dir_items() for
+ * details about the why it is needed.
+ * This is a recursive operation - if an existing dentry corresponds to a
+ * directory, that directory's new entries are logged too (same behaviour as
+ * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
+ * the dentries point to we do not lock their i_mutex, otherwise lockdep
+ * complains about the following circular lock dependency / possible deadlock:
+ *
+ * CPU0 CPU1
+ * ---- ----
+ * lock(&type->i_mutex_dir_key#3/2);
+ * lock(sb_internal#2);
+ * lock(&type->i_mutex_dir_key#3/2);
+ * lock(&sb->s_type->i_mutex_key#14);
+ *
+ * Where sb_internal is the lock (a counter that works as a lock) acquired by
+ * sb_start_intwrite() in btrfs_start_transaction().
+ * Not locking i_mutex of the inodes is still safe because:
+ *
+ * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
+ * that while logging the inode new references (names) are added or removed
+ * from the inode, leaving the logged inode item with a link count that does
+ * not match the number of logged inode reference items. This is fine because
+ * at log replay time we compute the real number of links and correct the
+ * link count in the inode item (see replay_one_buffer() and
+ * link_to_fixup_dir());
+ *
+ * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
+ * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
+ * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
+ * has a size that doesn't match the sum of the lengths of all the logged
+ * names. This does not result in a problem because if a dir_item key is
+ * logged but its matching dir_index key is not logged, at log replay time we
+ * don't use it to replay the respective name (see replay_one_name()). On the
+ * other hand if only the dir_index key ends up being logged, the respective
+ * name is added to the fs/subvol tree with both the dir_item and dir_index
+ * keys created (see replay_one_name()).
+ * The directory's inode item with a wrong i_size is not a problem as well,
+ * since we don't use it at log replay time to set the i_size in the inode
+ * item of the fs/subvol tree (see overwrite_item()).
+ */
+static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *start_inode,
+ struct btrfs_log_ctx *ctx)
+{
+ struct btrfs_root *log = root->log_root;
+ struct btrfs_path *path;
+ LIST_HEAD(dir_list);
+ struct btrfs_dir_list *dir_elem;
+ int ret = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
+ if (!dir_elem) {
+ btrfs_free_path(path);
+ return -ENOMEM;
+ }
+ dir_elem->ino = btrfs_ino(start_inode);
+ list_add_tail(&dir_elem->list, &dir_list);
+
+ while (!list_empty(&dir_list)) {
+ struct extent_buffer *leaf;
+ struct btrfs_key min_key;
+ int nritems;
+ int i;
+
+ dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
+ list);
+ if (ret)
+ goto next_dir_inode;
+
+ min_key.objectid = dir_elem->ino;
+ min_key.type = BTRFS_DIR_ITEM_KEY;
+ min_key.offset = 0;
+again:
+ btrfs_release_path(path);
+ ret = btrfs_search_forward(log, &min_key, path, trans->transid);
+ if (ret < 0) {
+ goto next_dir_inode;
+ } else if (ret > 0) {
+ ret = 0;
+ goto next_dir_inode;
+ }
+
+process_leaf:
+ leaf = path->nodes[0];
+ nritems = btrfs_header_nritems(leaf);
+ for (i = path->slots[0]; i < nritems; i++) {
+ struct btrfs_dir_item *di;
+ struct btrfs_key di_key;
+ struct inode *di_inode;
+ struct btrfs_dir_list *new_dir_elem;
+ int log_mode = LOG_INODE_EXISTS;
+ int type;
+
+ btrfs_item_key_to_cpu(leaf, &min_key, i);
+ if (min_key.objectid != dir_elem->ino ||
+ min_key.type != BTRFS_DIR_ITEM_KEY)
+ goto next_dir_inode;
+
+ di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
+ type = btrfs_dir_type(leaf, di);
+ if (btrfs_dir_transid(leaf, di) < trans->transid &&
+ type != BTRFS_FT_DIR)
+ continue;
+ btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
+ if (di_key.type == BTRFS_ROOT_ITEM_KEY)
+ continue;
+
+ di_inode = btrfs_iget(root->fs_info->sb, &di_key,
+ root, NULL);
+ if (IS_ERR(di_inode)) {
+ ret = PTR_ERR(di_inode);
+ goto next_dir_inode;
+ }
+
+ if (btrfs_inode_in_log(di_inode, trans->transid)) {
+ iput(di_inode);
+ continue;
+ }
+
+ ctx->log_new_dentries = false;
+ if (type == BTRFS_FT_DIR)
+ log_mode = LOG_INODE_ALL;
+ btrfs_release_path(path);
+ ret = btrfs_log_inode(trans, root, di_inode,
+ log_mode, 0, LLONG_MAX, ctx);
+ iput(di_inode);
+ if (ret)
+ goto next_dir_inode;
+ if (ctx->log_new_dentries) {
+ new_dir_elem = kmalloc(sizeof(*new_dir_elem),
+ GFP_NOFS);
+ if (!new_dir_elem) {
+ ret = -ENOMEM;
+ goto next_dir_inode;
+ }
+ new_dir_elem->ino = di_key.objectid;
+ list_add_tail(&new_dir_elem->list, &dir_list);
+ }
+ break;
+ }
+ if (i == nritems) {
+ ret = btrfs_next_leaf(log, path);
+ if (ret < 0) {
+ goto next_dir_inode;
+ } else if (ret > 0) {
+ ret = 0;
+ goto next_dir_inode;
+ }
+ goto process_leaf;
+ }
+ if (min_key.offset < (u64)-1) {
+ min_key.offset++;
+ goto again;
+ }
+next_dir_inode:
+ list_del(&dir_elem->list);
+ kfree(dir_elem);
+ }
+
+ btrfs_free_path(path);
+ return ret;
+}
+
/*
* helper function around btrfs_log_inode to make sure newly created
* parent directories also end up in the log. A minimal inode and backref
@@ -4394,6 +4716,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
const struct dentry * const first_parent = parent;
const bool did_unlink = (BTRFS_I(inode)->last_unlink_trans >
last_committed);
+ bool log_dentries = false;
+ struct inode *orig_inode = inode;
sb = inode->i_sb;
@@ -4449,11 +4773,14 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
goto end_trans;
}
+ if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
+ log_dentries = true;
+
while (1) {
- if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
+ if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
break;
- inode = parent->d_inode;
+ inode = d_inode(parent);
if (root != BTRFS_I(inode)->root)
break;
@@ -4485,7 +4812,10 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
dput(old_parent);
old_parent = parent;
}
- ret = 0;
+ if (log_dentries)
+ ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
+ else
+ ret = 0;
end_trans:
dput(old_parent);
if (ret < 0) {
@@ -4515,7 +4845,7 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
struct dentry *parent = dget_parent(dentry);
int ret;
- ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent,
+ ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent,
start, end, 0, ctx);
dput(parent);
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 154990c26dcb..6916a781ea02 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -29,6 +29,7 @@ struct btrfs_log_ctx {
int log_ret;
int log_transid;
int io_err;
+ bool log_new_dentries;
struct list_head list;
};
@@ -37,6 +38,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
ctx->log_ret = 0;
ctx->log_transid = 0;
ctx->io_err = 0;
+ ctx->log_new_dentries = false;
INIT_LIST_HEAD(&ctx->list);
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 8222f6f74147..96aebf3bcd5b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -366,8 +366,8 @@ loop_lock:
btrfsic_submit_bio(cur->bi_rw, cur);
num_run++;
batch_run++;
- if (need_resched())
- cond_resched();
+
+ cond_resched();
/*
* we made progress, there is more work to do and the bdi
@@ -400,8 +400,7 @@ loop_lock:
* against it before looping
*/
last_waited = ioc->last_waited;
- if (need_resched())
- cond_resched();
+ cond_resched();
continue;
}
spin_lock(&device->io_lock);
@@ -609,8 +608,7 @@ error:
return ERR_PTR(-ENOMEM);
}
-void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
- struct btrfs_fs_devices *fs_devices, int step)
+void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
{
struct btrfs_device *device, *next;
struct btrfs_device *latest_dev = NULL;
@@ -1060,6 +1058,7 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans,
struct extent_map *em;
struct list_head *search_list = &trans->transaction->pending_chunks;
int ret = 0;
+ u64 physical_start = *start;
again:
list_for_each_entry(em, search_list, list) {
@@ -1070,9 +1069,9 @@ again:
for (i = 0; i < map->num_stripes; i++) {
if (map->stripes[i].dev != device)
continue;
- if (map->stripes[i].physical >= *start + len ||
+ if (map->stripes[i].physical >= physical_start + len ||
map->stripes[i].physical + em->orig_block_len <=
- *start)
+ physical_start)
continue;
*start = map->stripes[i].physical +
em->orig_block_len;
@@ -1136,11 +1135,11 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
-again:
+
max_hole_start = search_start;
max_hole_size = 0;
- hole_size = 0;
+again:
if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
ret = -ENOSPC;
goto out;
@@ -1195,8 +1194,14 @@ again:
*/
if (contains_pending_extent(trans, device,
&search_start,
- hole_size))
- hole_size = 0;
+ hole_size)) {
+ if (key.offset >= search_start) {
+ hole_size = key.offset - search_start;
+ } else {
+ WARN_ON_ONCE(1);
+ hole_size = 0;
+ }
+ }
if (hole_size > max_hole_size) {
max_hole_start = search_start;
@@ -1233,21 +1238,23 @@ next:
* allocated dev extents, and when shrinking the device,
* search_end may be smaller than search_start.
*/
- if (search_end > search_start)
+ if (search_end > search_start) {
hole_size = search_end - search_start;
- if (hole_size > max_hole_size) {
- max_hole_start = search_start;
- max_hole_size = hole_size;
- }
+ if (contains_pending_extent(trans, device, &search_start,
+ hole_size)) {
+ btrfs_release_path(path);
+ goto again;
+ }
- if (contains_pending_extent(trans, device, &search_start, hole_size)) {
- btrfs_release_path(path);
- goto again;
+ if (hole_size > max_hole_size) {
+ max_hole_start = search_start;
+ max_hole_size = hole_size;
+ }
}
/* See above. */
- if (hole_size < num_bytes)
+ if (max_hole_size < num_bytes)
ret = -ENOSPC;
else
ret = 0;
@@ -2487,8 +2494,7 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
}
static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 chunk_tree, u64 chunk_objectid,
+ struct btrfs_root *root, u64 chunk_objectid,
u64 chunk_offset)
{
int ret;
@@ -2580,7 +2586,6 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
struct map_lookup *map;
u64 dev_extent_len = 0;
u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
- u64 chunk_tree = root->fs_info->chunk_root->objectid;
int i, ret = 0;
/* Just in case */
@@ -2634,8 +2639,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
}
}
- ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
- chunk_offset);
+ ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
@@ -2664,8 +2668,8 @@ out:
}
static int btrfs_relocate_chunk(struct btrfs_root *root,
- u64 chunk_tree, u64 chunk_objectid,
- u64 chunk_offset)
+ u64 chunk_objectid,
+ u64 chunk_offset)
{
struct btrfs_root *extent_root;
struct btrfs_trans_handle *trans;
@@ -2707,7 +2711,6 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
struct btrfs_chunk *chunk;
struct btrfs_key key;
struct btrfs_key found_key;
- u64 chunk_tree = chunk_root->root_key.objectid;
u64 chunk_type;
bool retried = false;
int failed = 0;
@@ -2744,7 +2747,7 @@ again:
btrfs_release_path(path);
if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
- ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
+ ret = btrfs_relocate_chunk(chunk_root,
found_key.objectid,
found_key.offset);
if (ret == -ENOSPC)
@@ -3022,7 +3025,7 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
stripe_offset = btrfs_stripe_offset(leaf, stripe);
stripe_length = btrfs_chunk_length(leaf, chunk);
- do_div(stripe_length, factor);
+ stripe_length = div_u64(stripe_length, factor);
if (stripe_offset < bargs->pend &&
stripe_offset + stripe_length > bargs->pstart)
@@ -3255,7 +3258,6 @@ again:
}
ret = btrfs_relocate_chunk(chunk_root,
- chunk_root->root_key.objectid,
found_key.objectid,
found_key.offset);
if (ret && ret != -ENOSPC)
@@ -3957,7 +3959,6 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path;
u64 length;
- u64 chunk_tree;
u64 chunk_objectid;
u64 chunk_offset;
int ret;
@@ -4027,13 +4028,11 @@ again:
break;
}
- chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
btrfs_release_path(path);
- ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
- chunk_offset);
+ ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);
if (ret && ret != -ENOSPC)
goto done;
if (ret == -ENOSPC)
@@ -4131,7 +4130,7 @@ static int btrfs_cmp_device_info(const void *a, const void *b)
return 0;
}
-static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
+static const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = {
.sub_stripes = 2,
.dev_stripes = 1,
@@ -4289,7 +4288,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
max_chunk_size);
- devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
+ devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
GFP_NOFS);
if (!devices_info)
return -ENOMEM;
@@ -4400,8 +4399,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
*/
if (stripe_size * data_stripes > max_chunk_size) {
u64 mask = (1ULL << 24) - 1;
- stripe_size = max_chunk_size;
- do_div(stripe_size, data_stripes);
+
+ stripe_size = div_u64(max_chunk_size, data_stripes);
/* bump the answer up to a 16MB boundary */
stripe_size = (stripe_size + mask) & ~mask;
@@ -4413,10 +4412,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
stripe_size = devices_info[ndevs-1].max_avail;
}
- do_div(stripe_size, dev_stripes);
+ stripe_size = div_u64(stripe_size, dev_stripes);
/* align to BTRFS_STRIPE_LEN */
- do_div(stripe_size, raid_stripe_len);
+ stripe_size = div_u64(stripe_size, raid_stripe_len);
stripe_size *= raid_stripe_len;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
@@ -4954,7 +4953,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 stripe_nr_orig;
u64 stripe_nr_end;
u64 stripe_len;
- int stripe_index;
+ u32 stripe_index;
int i;
int ret = 0;
int num_stripes;
@@ -4995,7 +4994,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
* stripe_nr counts the total number of stripes we have to stride
* to get to this block
*/
- do_div(stripe_nr, stripe_len);
+ stripe_nr = div64_u64(stripe_nr, stripe_len);
stripe_offset = stripe_nr * stripe_len;
BUG_ON(offset < stripe_offset);
@@ -5011,7 +5010,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
/* allow a write of a full stripe, but make sure we don't
* allow straddling of stripes
*/
- do_div(raid56_full_stripe_start, full_stripe_len);
+ raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
+ full_stripe_len);
raid56_full_stripe_start *= full_stripe_len;
}
@@ -5136,7 +5136,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
stripe_index = 0;
stripe_nr_orig = stripe_nr;
stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
- do_div(stripe_nr_end, map->stripe_len);
+ stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
stripe_end_offset = stripe_nr_end * map->stripe_len -
(offset + *length);
@@ -5144,7 +5144,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
if (rw & REQ_DISCARD)
num_stripes = min_t(u64, map->num_stripes,
stripe_nr_end - stripe_nr_orig);
- stripe_index = do_div(stripe_nr, map->num_stripes);
+ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
+ &stripe_index);
if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
mirror_num = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
@@ -5170,9 +5171,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
}
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
- int factor = map->num_stripes / map->sub_stripes;
+ u32 factor = map->num_stripes / map->sub_stripes;
- stripe_index = do_div(stripe_nr, factor);
+ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
stripe_index *= map->sub_stripes;
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
@@ -5198,8 +5199,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
mirror_num > 1)) {
/* push stripe_nr back to the start of the full stripe */
- stripe_nr = raid56_full_stripe_start;
- do_div(stripe_nr, stripe_len * nr_data_stripes(map));
+ stripe_nr = div_u64(raid56_full_stripe_start,
+ stripe_len * nr_data_stripes(map));
/* RAID[56] write or recovery. Return all stripes */
num_stripes = map->num_stripes;
@@ -5209,32 +5210,32 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
stripe_index = 0;
stripe_offset = 0;
} else {
- u64 tmp;
-
/*
* Mirror #0 or #1 means the original data block.
* Mirror #2 is RAID5 parity block.
* Mirror #3 is RAID6 Q block.
*/
- stripe_index = do_div(stripe_nr, nr_data_stripes(map));
+ stripe_nr = div_u64_rem(stripe_nr,
+ nr_data_stripes(map), &stripe_index);
if (mirror_num > 1)
stripe_index = nr_data_stripes(map) +
mirror_num - 2;
/* We distribute the parity blocks across stripes */
- tmp = stripe_nr + stripe_index;
- stripe_index = do_div(tmp, map->num_stripes);
+ div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
+ &stripe_index);
if (!(rw & (REQ_WRITE | REQ_DISCARD |
REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
mirror_num = 1;
}
} else {
/*
- * after this do_div call, stripe_nr is the number of stripes
- * on this device we have to walk to find the data, and
- * stripe_index is the number of our device in the stripe array
+ * after this, stripe_nr is the number of stripes on this
+ * device we have to walk to find the data, and stripe_index is
+ * the number of our device in the stripe array
*/
- stripe_index = do_div(stripe_nr, map->num_stripes);
+ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
+ &stripe_index);
mirror_num = stripe_index + 1;
}
BUG_ON(stripe_index >= map->num_stripes);
@@ -5261,7 +5262,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
mirror_num > 1)) {
u64 tmp;
- int i, rot;
+ unsigned rot;
bbio->raid_map = (u64 *)((void *)bbio->stripes +
sizeof(struct btrfs_bio_stripe) *
@@ -5269,8 +5270,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
sizeof(int) * tgtdev_indexes);
/* Work out the disk rotation on this stripe-set */
- tmp = stripe_nr;
- rot = do_div(tmp, num_stripes);
+ div_u64_rem(stripe_nr, num_stripes, &rot);
/* Fill in the logical address of each stripe */
tmp = stripe_nr * nr_data_stripes(map);
@@ -5285,8 +5285,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
}
if (rw & REQ_DISCARD) {
- int factor = 0;
- int sub_stripes = 0;
+ u32 factor = 0;
+ u32 sub_stripes = 0;
u64 stripes_per_dev = 0;
u32 remaining_stripes = 0;
u32 last_stripe = 0;
@@ -5437,9 +5437,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
}
}
if (found) {
- u64 length = map->stripe_len;
-
- if (physical_of_found + length <=
+ if (physical_of_found + map->stripe_len <=
dev_replace->cursor_left) {
struct btrfs_bio_stripe *tgtdev_stripe =
bbio->stripes + num_stripes;
@@ -5535,15 +5533,15 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
rmap_len = map->stripe_len;
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
- do_div(length, map->num_stripes / map->sub_stripes);
+ length = div_u64(length, map->num_stripes / map->sub_stripes);
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
- do_div(length, map->num_stripes);
+ length = div_u64(length, map->num_stripes);
else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- do_div(length, nr_data_stripes(map));
+ length = div_u64(length, nr_data_stripes(map));
rmap_len = map->stripe_len * nr_data_stripes(map);
}
- buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
+ buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
BUG_ON(!buf); /* -ENOMEM */
for (i = 0; i < map->num_stripes; i++) {
@@ -5554,11 +5552,11 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
continue;
stripe_nr = physical - map->stripes[i].physical;
- do_div(stripe_nr, map->stripe_len);
+ stripe_nr = div_u64(stripe_nr, map->stripe_len);
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
stripe_nr = stripe_nr * map->num_stripes + i;
- do_div(stripe_nr, map->sub_stripes);
+ stripe_nr = div_u64(stripe_nr, map->sub_stripes);
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
stripe_nr = stripe_nr * map->num_stripes + i;
} /* else if RAID[56], multiply by nr_data_stripes().
@@ -5835,8 +5833,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
u64 length = 0;
u64 map_length;
int ret;
- int dev_nr = 0;
- int total_devs = 1;
+ int dev_nr;
+ int total_devs;
struct btrfs_bio *bbio = NULL;
length = bio->bi_iter.bi_size;
@@ -5877,11 +5875,10 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
BUG();
}
- while (dev_nr < total_devs) {
+ for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bbio->stripes[dev_nr].dev;
if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
bbio_error(bbio, first_bio, logical);
- dev_nr++;
continue;
}
@@ -5894,7 +5891,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
ret = breakup_stripe_bio(root, bbio, first_bio, dev,
dev_nr, rw, async_submit);
BUG_ON(ret);
- dev_nr++;
continue;
}
@@ -5909,7 +5905,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
submit_stripe_bio(root, bbio, bio,
bbio->stripes[dev_nr].physical, dev_nr, rw,
async_submit);
- dev_nr++;
}
btrfs_bio_counter_dec(root->fs_info);
return 0;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 83069dec6898..ebc31331a837 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -421,8 +421,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
struct btrfs_fs_devices **fs_devices_ret);
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
-void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
- struct btrfs_fs_devices *fs_devices, int step);
+void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step);
int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
char *device_path,
struct btrfs_device **device);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 883b93623bc5..6f518c90e1c1 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -261,7 +261,7 @@ out:
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct btrfs_key key, found_key;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
struct extent_buffer *leaf;
@@ -364,22 +364,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
/*
* Check if the attribute is in a supported namespace.
*
- * This applied after the check for the synthetic attributes in the system
+ * This is applied after the check for the synthetic attributes in the system
* namespace.
*/
-static bool btrfs_is_valid_xattr(const char *name)
+static int btrfs_is_valid_xattr(const char *name)
{
- return !strncmp(name, XATTR_SECURITY_PREFIX,
- XATTR_SECURITY_PREFIX_LEN) ||
- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
- !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
+ int len = strlen(name);
+ int prefixlen = 0;
+
+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN))
+ prefixlen = XATTR_SECURITY_PREFIX_LEN;
+ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
+ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
+ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ prefixlen = XATTR_USER_PREFIX_LEN;
+ else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ prefixlen = XATTR_BTRFS_PREFIX_LEN;
+ else
+ return -EOPNOTSUPP;
+
+ /*
+ * The name cannot consist of just prefix
+ */
+ if (len <= prefixlen)
+ return -EINVAL;
+
+ return 0;
}
ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size)
{
+ int ret;
+
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
@@ -388,15 +408,17 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_getxattr(dentry, name, buffer, size);
- if (!btrfs_is_valid_xattr(name))
- return -EOPNOTSUPP;
- return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
+ ret = btrfs_is_valid_xattr(name);
+ if (ret)
+ return ret;
+ return __btrfs_getxattr(d_inode(dentry), name, buffer, size);
}
int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
{
- struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(d_inode(dentry))->root;
+ int ret;
/*
* The permission on security.* and system.* is not checked
@@ -413,23 +435,25 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_setxattr(dentry, name, value, size, flags);
- if (!btrfs_is_valid_xattr(name))
- return -EOPNOTSUPP;
+ ret = btrfs_is_valid_xattr(name);
+ if (ret)
+ return ret;
if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
- return btrfs_set_prop(dentry->d_inode, name,
+ return btrfs_set_prop(d_inode(dentry), name,
value, size, flags);
if (size == 0)
value = ""; /* empty EA, do not remove */
- return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size,
+ return __btrfs_setxattr(NULL, d_inode(dentry), name, value, size,
flags);
}
int btrfs_removexattr(struct dentry *dentry, const char *name)
{
- struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(d_inode(dentry))->root;
+ int ret;
/*
* The permission on security.* and system.* is not checked
@@ -446,14 +470,15 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_removexattr(dentry, name);
- if (!btrfs_is_valid_xattr(name))
- return -EOPNOTSUPP;
+ ret = btrfs_is_valid_xattr(name);
+ if (ret)
+ return ret;
if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
- return btrfs_set_prop(dentry->d_inode, name,
+ return btrfs_set_prop(d_inode(dentry), name,
NULL, 0, XATTR_REPLACE);
- return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0,
+ return __btrfs_setxattr(NULL, d_inode(dentry), name, NULL, 0,
XATTR_REPLACE);
}
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index fb22fd8d8fb8..82990b8f872b 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -403,7 +403,7 @@ next:
return ret;
}
-struct btrfs_compress_op btrfs_zlib_compress = {
+const struct btrfs_compress_op btrfs_zlib_compress = {
.alloc_workspace = zlib_alloc_workspace,
.free_workspace = zlib_free_workspace,
.compress_pages = zlib_compress_pages,
diff --git a/fs/buffer.c b/fs/buffer.c
index 20805db2c987..c7a5602d01ee 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3243,8 +3243,8 @@ int try_to_free_buffers(struct page *page)
* to synchronise against __set_page_dirty_buffers and prevent the
* dirty bit from being lost.
*/
- if (ret)
- cancel_dirty_page(page, PAGE_CACHE_SIZE);
+ if (ret && TestClearPageDirty(page))
+ account_page_cleaned(page, mapping);
spin_unlock(&mapping->private_lock);
out:
if (buffers_to_free) {
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index fbb08e97438d..6af790fc3df8 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -123,11 +123,11 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
/* check parameters */
ret = -EOPNOTSUPP;
- if (!root->d_inode ||
- !root->d_inode->i_op->lookup ||
- !root->d_inode->i_op->mkdir ||
- !root->d_inode->i_op->setxattr ||
- !root->d_inode->i_op->getxattr ||
+ if (d_is_negative(root) ||
+ !d_backing_inode(root)->i_op->lookup ||
+ !d_backing_inode(root)->i_op->mkdir ||
+ !d_backing_inode(root)->i_op->setxattr ||
+ !d_backing_inode(root)->i_op->getxattr ||
!root->d_sb->s_op->statfs ||
!root->d_sb->s_op->sync_fs)
goto error_unsupported;
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 232426214fdd..afa023dded5b 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -441,12 +441,12 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
fscache_set_store_limit(&object->fscache, ni_size);
- oi_size = i_size_read(object->backer->d_inode);
+ oi_size = i_size_read(d_backing_inode(object->backer));
if (oi_size == ni_size)
return 0;
cachefiles_begin_secure(cache, &saved_cred);
- mutex_lock(&object->backer->d_inode->i_mutex);
+ mutex_lock(&d_inode(object->backer)->i_mutex);
/* if there's an extension to a partial page at the end of the backing
* file, we need to discard the partial page so that we pick up new
@@ -465,7 +465,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
ret = notify_change(object->backer, &newattrs, NULL);
truncate_failed:
- mutex_unlock(&object->backer->d_inode->i_mutex);
+ mutex_unlock(&d_inode(object->backer)->i_mutex);
cachefiles_end_secure(cache, saved_cred);
if (ret == -EIO) {
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 1e51714eb33e..ab857ab9f40d 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -286,13 +286,13 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
if (ret < 0) {
cachefiles_io_error(cache, "Unlink security error");
} else {
- ret = vfs_unlink(dir->d_inode, rep, NULL);
+ ret = vfs_unlink(d_inode(dir), rep, NULL);
if (preemptive)
cachefiles_mark_object_buried(cache, rep);
}
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
if (ret == -EIO)
cachefiles_io_error(cache, "Unlink failed");
@@ -303,7 +303,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
/* directories have to be moved to the graveyard */
_debug("move stale object to graveyard");
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
try_again:
/* first step is to make up a grave dentry in the graveyard */
@@ -355,7 +355,7 @@ try_again:
return -EIO;
}
- if (grave->d_inode) {
+ if (d_is_positive(grave)) {
unlock_rename(cache->graveyard, dir);
dput(grave);
grave = NULL;
@@ -387,8 +387,8 @@ try_again:
if (ret < 0) {
cachefiles_io_error(cache, "Rename security error %d", ret);
} else {
- ret = vfs_rename(dir->d_inode, rep,
- cache->graveyard->d_inode, grave, NULL, 0);
+ ret = vfs_rename(d_inode(dir), rep,
+ d_inode(cache->graveyard), grave, NULL, 0);
if (ret != 0 && ret != -ENOMEM)
cachefiles_io_error(cache,
"Rename failed with error %d", ret);
@@ -415,18 +415,18 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
_enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
ASSERT(object->dentry);
- ASSERT(object->dentry->d_inode);
+ ASSERT(d_backing_inode(object->dentry));
ASSERT(object->dentry->d_parent);
dir = dget_parent(object->dentry);
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
/* object allocation for the same key preemptively deleted this
* object's file so that it could create its own file */
_debug("object preemptively buried");
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
ret = 0;
} else {
/* we need to check that our parent is _still_ our parent - it
@@ -438,7 +438,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
/* it got moved, presumably by cachefilesd culling it,
* so it's no longer in the key path and we can ignore
* it */
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
ret = 0;
}
}
@@ -473,7 +473,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
path.mnt = cache->mnt;
ASSERT(parent->dentry);
- ASSERT(parent->dentry->d_inode);
+ ASSERT(d_backing_inode(parent->dentry));
if (!(d_is_dir(parent->dentry))) {
// TODO: convert file to dir
@@ -497,7 +497,7 @@ lookup_again:
/* search the current directory for the element name */
_debug("lookup '%s'", name);
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
start = jiffies;
next = lookup_one_len(name, dir, nlen);
@@ -505,21 +505,21 @@ lookup_again:
if (IS_ERR(next))
goto lookup_error;
- _debug("next -> %p %s", next, next->d_inode ? "positive" : "negative");
+ _debug("next -> %p %s", next, d_backing_inode(next) ? "positive" : "negative");
if (!key)
- object->new = !next->d_inode;
+ object->new = !d_backing_inode(next);
/* if this element of the path doesn't exist, then the lookup phase
* failed, and we can release any readers in the certain knowledge that
* there's nothing for them to actually read */
- if (!next->d_inode)
+ if (d_is_negative(next))
fscache_object_lookup_negative(&object->fscache);
/* we need to create the object if it's negative */
if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
/* index objects and intervening tree levels must be subdirs */
- if (!next->d_inode) {
+ if (d_is_negative(next)) {
ret = cachefiles_has_space(cache, 1, 0);
if (ret < 0)
goto create_error;
@@ -529,26 +529,26 @@ lookup_again:
if (ret < 0)
goto create_error;
start = jiffies;
- ret = vfs_mkdir(dir->d_inode, next, 0);
+ ret = vfs_mkdir(d_inode(dir), next, 0);
cachefiles_hist(cachefiles_mkdir_histogram, start);
if (ret < 0)
goto create_error;
- ASSERT(next->d_inode);
+ ASSERT(d_backing_inode(next));
_debug("mkdir -> %p{%p{ino=%lu}}",
- next, next->d_inode, next->d_inode->i_ino);
+ next, d_backing_inode(next), d_backing_inode(next)->i_ino);
} else if (!d_can_lookup(next)) {
pr_err("inode %lu is not a directory\n",
- next->d_inode->i_ino);
+ d_backing_inode(next)->i_ino);
ret = -ENOBUFS;
goto error;
}
} else {
/* non-index objects start out life as files */
- if (!next->d_inode) {
+ if (d_is_negative(next)) {
ret = cachefiles_has_space(cache, 1, 0);
if (ret < 0)
goto create_error;
@@ -558,21 +558,21 @@ lookup_again:
if (ret < 0)
goto create_error;
start = jiffies;
- ret = vfs_create(dir->d_inode, next, S_IFREG, true);
+ ret = vfs_create(d_inode(dir), next, S_IFREG, true);
cachefiles_hist(cachefiles_create_histogram, start);
if (ret < 0)
goto create_error;
- ASSERT(next->d_inode);
+ ASSERT(d_backing_inode(next));
_debug("create -> %p{%p{ino=%lu}}",
- next, next->d_inode, next->d_inode->i_ino);
+ next, d_backing_inode(next), d_backing_inode(next)->i_ino);
} else if (!d_can_lookup(next) &&
!d_is_reg(next)
) {
pr_err("inode %lu is not a file or directory\n",
- next->d_inode->i_ino);
+ d_backing_inode(next)->i_ino);
ret = -ENOBUFS;
goto error;
}
@@ -581,7 +581,7 @@ lookup_again:
/* process the next component */
if (key) {
_debug("advance");
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
dput(dir);
dir = next;
next = NULL;
@@ -617,7 +617,7 @@ lookup_again:
/* note that we're now using this object */
ret = cachefiles_mark_object_active(cache, object);
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
dput(dir);
dir = NULL;
@@ -646,7 +646,7 @@ lookup_again:
const struct address_space_operations *aops;
ret = -EPERM;
- aops = object->dentry->d_inode->i_mapping->a_ops;
+ aops = d_backing_inode(object->dentry)->i_mapping->a_ops;
if (!aops->bmap)
goto check_error;
@@ -659,7 +659,7 @@ lookup_again:
object->new = 0;
fscache_obtained_object(&object->fscache);
- _leave(" = 0 [%lu]", object->dentry->d_inode->i_ino);
+ _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino);
return 0;
create_error:
@@ -695,7 +695,7 @@ lookup_error:
cachefiles_io_error(cache, "Lookup failed");
next = NULL;
error:
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
dput(next);
error_out2:
dput(dir);
@@ -719,7 +719,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
_enter(",,%s", dirname);
/* search the current directory for the element name */
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock(&d_inode(dir)->i_mutex);
start = jiffies;
subdir = lookup_one_len(dirname, dir, strlen(dirname));
@@ -731,10 +731,10 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
}
_debug("subdir -> %p %s",
- subdir, subdir->d_inode ? "positive" : "negative");
+ subdir, d_backing_inode(subdir) ? "positive" : "negative");
/* we need to create the subdir if it doesn't exist yet */
- if (!subdir->d_inode) {
+ if (d_is_negative(subdir)) {
ret = cachefiles_has_space(cache, 1, 0);
if (ret < 0)
goto mkdir_error;
@@ -746,22 +746,22 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
ret = security_path_mkdir(&path, subdir, 0700);
if (ret < 0)
goto mkdir_error;
- ret = vfs_mkdir(dir->d_inode, subdir, 0700);
+ ret = vfs_mkdir(d_inode(dir), subdir, 0700);
if (ret < 0)
goto mkdir_error;
- ASSERT(subdir->d_inode);
+ ASSERT(d_backing_inode(subdir));
_debug("mkdir -> %p{%p{ino=%lu}}",
subdir,
- subdir->d_inode,
- subdir->d_inode->i_ino);
+ d_backing_inode(subdir),
+ d_backing_inode(subdir)->i_ino);
}
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
/* we need to make sure the subdir is a directory */
- ASSERT(subdir->d_inode);
+ ASSERT(d_backing_inode(subdir));
if (!d_can_lookup(subdir)) {
pr_err("%s is not a directory\n", dirname);
@@ -770,18 +770,18 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
}
ret = -EPERM;
- if (!subdir->d_inode->i_op->setxattr ||
- !subdir->d_inode->i_op->getxattr ||
- !subdir->d_inode->i_op->lookup ||
- !subdir->d_inode->i_op->mkdir ||
- !subdir->d_inode->i_op->create ||
- (!subdir->d_inode->i_op->rename &&
- !subdir->d_inode->i_op->rename2) ||
- !subdir->d_inode->i_op->rmdir ||
- !subdir->d_inode->i_op->unlink)
+ if (!d_backing_inode(subdir)->i_op->setxattr ||
+ !d_backing_inode(subdir)->i_op->getxattr ||
+ !d_backing_inode(subdir)->i_op->lookup ||
+ !d_backing_inode(subdir)->i_op->mkdir ||
+ !d_backing_inode(subdir)->i_op->create ||
+ (!d_backing_inode(subdir)->i_op->rename &&
+ !d_backing_inode(subdir)->i_op->rename2) ||
+ !d_backing_inode(subdir)->i_op->rmdir ||
+ !d_backing_inode(subdir)->i_op->unlink)
goto check_error;
- _leave(" = [%lu]", subdir->d_inode->i_ino);
+ _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
return subdir;
check_error:
@@ -790,19 +790,19 @@ check_error:
return ERR_PTR(ret);
mkdir_error:
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
dput(subdir);
pr_err("mkdir %s failed with error %d\n", dirname, ret);
return ERR_PTR(ret);
lookup_error:
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
ret = PTR_ERR(subdir);
pr_err("Lookup %s failed with error %d\n", dirname, ret);
return ERR_PTR(ret);
nomem_d_alloc:
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
@@ -827,7 +827,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
// dir, filename);
/* look up the victim */
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
start = jiffies;
victim = lookup_one_len(filename, dir, strlen(filename));
@@ -836,13 +836,13 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
goto lookup_error;
//_debug("victim -> %p %s",
- // victim, victim->d_inode ? "positive" : "negative");
+ // victim, d_backing_inode(victim) ? "positive" : "negative");
/* if the object is no longer there then we probably retired the object
* at the netfs's request whilst the cull was in progress
*/
- if (!victim->d_inode) {
- mutex_unlock(&dir->d_inode->i_mutex);
+ if (d_is_negative(victim)) {
+ mutex_unlock(&d_inode(dir)->i_mutex);
dput(victim);
_leave(" = -ENOENT [absent]");
return ERR_PTR(-ENOENT);
@@ -871,13 +871,13 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
object_in_use:
read_unlock(&cache->active_lock);
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
dput(victim);
//_leave(" = -EBUSY [in use]");
return ERR_PTR(-EBUSY);
lookup_error:
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
ret = PTR_ERR(victim);
if (ret == -ENOENT) {
/* file or dir now absent - probably retired by netfs */
@@ -913,7 +913,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
return PTR_ERR(victim);
_debug("victim -> %p %s",
- victim, victim->d_inode ? "positive" : "negative");
+ victim, d_backing_inode(victim) ? "positive" : "negative");
/* okay... the victim is not being used so we can cull it
* - start by marking it as stale
@@ -936,7 +936,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
return 0;
error_unlock:
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
error:
dput(victim);
if (ret == -ENOENT) {
@@ -971,7 +971,7 @@ int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
if (IS_ERR(victim))
return PTR_ERR(victim);
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
dput(victim);
//_leave(" = 0");
return 0;
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c6cd8d7a4eef..3cbb0e834694 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -74,12 +74,12 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
static int cachefiles_read_reissue(struct cachefiles_object *object,
struct cachefiles_one_read *monitor)
{
- struct address_space *bmapping = object->backer->d_inode->i_mapping;
+ struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
struct page *backpage = monitor->back_page, *backpage2;
int ret;
_enter("{ino=%lx},{%lx,%lx}",
- object->backer->d_inode->i_ino,
+ d_backing_inode(object->backer)->i_ino,
backpage->index, backpage->flags);
/* skip if the page was truncated away completely */
@@ -157,7 +157,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
object = container_of(op->op.object,
struct cachefiles_object, fscache);
- _enter("{ino=%lu}", object->backer->d_inode->i_ino);
+ _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
max = 8;
spin_lock_irq(&object->work_lock);
@@ -247,7 +247,7 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
/* attempt to get hold of the backing page */
- bmapping = object->backer->d_inode->i_mapping;
+ bmapping = d_backing_inode(object->backer)->i_mapping;
newpage = NULL;
for (;;) {
@@ -408,7 +408,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
if (!object->backer)
goto enobufs;
- inode = object->backer->d_inode;
+ inode = d_backing_inode(object->backer);
ASSERT(S_ISREG(inode->i_mode));
ASSERT(inode->i_mapping->a_ops->bmap);
ASSERT(inode->i_mapping->a_ops->readpages);
@@ -468,7 +468,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
struct list_head *list)
{
struct cachefiles_one_read *monitor = NULL;
- struct address_space *bmapping = object->backer->d_inode->i_mapping;
+ struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
int ret = 0;
@@ -705,7 +705,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
space = 0;
- inode = object->backer->d_inode;
+ inode = d_backing_inode(object->backer);
ASSERT(S_ISREG(inode->i_mode));
ASSERT(inode->i_mapping->a_ops->bmap);
ASSERT(inode->i_mapping->a_ops->readpages);
diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
index 396c18ea2764..31bbc0528b11 100644
--- a/fs/cachefiles/security.c
+++ b/fs/cachefiles/security.c
@@ -55,14 +55,14 @@ static int cachefiles_check_cache_dir(struct cachefiles_cache *cache,
{
int ret;
- ret = security_inode_mkdir(root->d_inode, root, 0);
+ ret = security_inode_mkdir(d_backing_inode(root), root, 0);
if (ret < 0) {
pr_err("Security denies permission to make dirs: error %d",
ret);
return ret;
}
- ret = security_inode_create(root->d_inode, root, 0);
+ ret = security_inode_create(d_backing_inode(root), root, 0);
if (ret < 0)
pr_err("Security denies permission to create files: error %d",
ret);
@@ -95,7 +95,7 @@ int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
/* use the cache root dir's security context as the basis with
* which create files */
- ret = set_create_files_as(new, root->d_inode);
+ ret = set_create_files_as(new, d_backing_inode(root));
if (ret < 0) {
abort_creds(new);
cachefiles_begin_secure(cache, _saved_cred);
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index a8a68745e11d..d31c1a72d8a5 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -33,7 +33,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
int ret;
ASSERT(dentry);
- ASSERT(dentry->d_inode);
+ ASSERT(d_backing_inode(dentry));
if (!object->fscache.cookie)
strcpy(type, "C3");
@@ -52,7 +52,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
if (ret != -EEXIST) {
pr_err("Can't set xattr on %pd [%lu] (err %d)\n",
- dentry, dentry->d_inode->i_ino,
+ dentry, d_backing_inode(dentry)->i_ino,
-ret);
goto error;
}
@@ -64,7 +64,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
goto bad_type_length;
pr_err("Can't read xattr on %pd [%lu] (err %d)\n",
- dentry, dentry->d_inode->i_ino,
+ dentry, d_backing_inode(dentry)->i_ino,
-ret);
goto error;
}
@@ -84,14 +84,14 @@ error:
bad_type_length:
pr_err("Cache object %lu type xattr length incorrect\n",
- dentry->d_inode->i_ino);
+ d_backing_inode(dentry)->i_ino);
ret = -EIO;
goto error;
bad_type:
xtype[2] = 0;
pr_err("Cache object %pd [%lu] type %s not %s\n",
- dentry, dentry->d_inode->i_ino,
+ dentry, d_backing_inode(dentry)->i_ino,
xtype, type);
ret = -EIO;
goto error;
@@ -165,7 +165,7 @@ int cachefiles_check_auxdata(struct cachefiles_object *object)
int ret;
ASSERT(dentry);
- ASSERT(dentry->d_inode);
+ ASSERT(d_backing_inode(dentry));
ASSERT(object->fscache.cookie->def->check_aux);
auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL);
@@ -204,7 +204,7 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object,
_enter("%p,#%d", object, auxdata->len);
ASSERT(dentry);
- ASSERT(dentry->d_inode);
+ ASSERT(d_backing_inode(dentry));
auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, cachefiles_gfp);
if (!auxbuf) {
@@ -225,7 +225,7 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object,
cachefiles_io_error_obj(object,
"Can't read xattr on %lu (err %d)",
- dentry->d_inode->i_ino, -ret);
+ d_backing_inode(dentry)->i_ino, -ret);
goto error;
}
@@ -276,7 +276,7 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object,
cachefiles_io_error_obj(object,
"Can't update xattr on %lu"
" (error %d)",
- dentry->d_inode->i_ino, -ret);
+ d_backing_inode(dentry)->i_ino, -ret);
goto error;
}
}
@@ -291,7 +291,7 @@ error:
bad_type_length:
pr_err("Cache object %lu xattr length incorrect\n",
- dentry->d_inode->i_ino);
+ d_backing_inode(dentry)->i_ino);
ret = -EIO;
goto error;
@@ -316,7 +316,7 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
cachefiles_io_error(cache,
"Can't remove xattr from %lu"
" (error %d)",
- dentry->d_inode->i_ino, -ret);
+ d_backing_inode(dentry)->i_ino, -ret);
}
_leave(" = %d", ret);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index fd5599d32362..e162bcd105ee 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1146,6 +1146,10 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
inode, page, (int)pos, (int)len);
r = ceph_update_writeable_page(file, pos, len, page);
+ if (r < 0)
+ page_cache_release(page);
+ else
+ *pagep = page;
} while (r == -EAGAIN);
return r;
@@ -1198,8 +1202,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
* intercept O_DIRECT reads and writes early, this function should
* never get called.
*/
-static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
- struct iov_iter *iter,
+static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos)
{
WARN_ON(1);
@@ -1535,19 +1538,27 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
- err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
- "inline_version", &inline_version,
- sizeof(inline_version),
- CEPH_OSD_CMPXATTR_OP_GT,
- CEPH_OSD_CMPXATTR_MODE_U64);
- if (err)
- goto out_put;
-
- err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
- "inline_version", &inline_version,
- sizeof(inline_version), 0, 0);
- if (err)
- goto out_put;
+ {
+ __le64 xattr_buf = cpu_to_le64(inline_version);
+ err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
+ "inline_version", &xattr_buf,
+ sizeof(xattr_buf),
+ CEPH_OSD_CMPXATTR_OP_GT,
+ CEPH_OSD_CMPXATTR_MODE_U64);
+ if (err)
+ goto out_put;
+ }
+
+ {
+ char xattr_buf[32];
+ int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
+ "%llu", inline_version);
+ err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
+ "inline_version",
+ xattr_buf, xattr_len, 0, 0);
+ if (err)
+ goto out_put;
+ }
ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 8172775428a0..be5ea6af8366 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -896,6 +896,18 @@ int ceph_is_any_caps(struct inode *inode)
return ret;
}
+static void drop_inode_snap_realm(struct ceph_inode_info *ci)
+{
+ struct ceph_snap_realm *realm = ci->i_snap_realm;
+ spin_lock(&realm->inodes_with_caps_lock);
+ list_del_init(&ci->i_snap_realm_item);
+ ci->i_snap_realm_counter++;
+ ci->i_snap_realm = NULL;
+ spin_unlock(&realm->inodes_with_caps_lock);
+ ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
+ realm);
+}
+
/*
* Remove a cap. Take steps to deal with a racing iterate_session_caps.
*
@@ -946,15 +958,13 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
if (removed)
ceph_put_cap(mdsc, cap);
- if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
- struct ceph_snap_realm *realm = ci->i_snap_realm;
- spin_lock(&realm->inodes_with_caps_lock);
- list_del_init(&ci->i_snap_realm_item);
- ci->i_snap_realm_counter++;
- ci->i_snap_realm = NULL;
- spin_unlock(&realm->inodes_with_caps_lock);
- ceph_put_snap_realm(mdsc, realm);
- }
+ /* when reconnect denied, we remove session caps forcibly,
+ * i_wr_ref can be non-zero. If there are ongoing write,
+ * keep i_snap_realm.
+ */
+ if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
+ drop_inode_snap_realm(ci);
+
if (!__ceph_is_any_real_caps(ci))
__cap_delay_cancel(mdsc, ci);
}
@@ -1394,6 +1404,13 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
int was = ci->i_dirty_caps;
int dirty = 0;
+ if (!ci->i_auth_cap) {
+ pr_warn("__mark_dirty_caps %p %llx mask %s, "
+ "but no auth cap (session was closed?)\n",
+ inode, ceph_ino(inode), ceph_cap_string(mask));
+ return 0;
+ }
+
dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
ceph_cap_string(mask), ceph_cap_string(was),
ceph_cap_string(was | mask));
@@ -1404,7 +1421,6 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
ci->i_snap_realm->cached_context);
dout(" inode %p now dirty snapc %p auth cap %p\n",
&ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
- WARN_ON(!ci->i_auth_cap);
BUG_ON(!list_empty(&ci->i_dirty_item));
spin_lock(&mdsc->cap_dirty_lock);
list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
@@ -1545,7 +1561,19 @@ retry_locked:
if (!mdsc->stopping && inode->i_nlink > 0) {
if (want) {
retain |= CEPH_CAP_ANY; /* be greedy */
+ } else if (S_ISDIR(inode->i_mode) &&
+ (issued & CEPH_CAP_FILE_SHARED) &&
+ __ceph_dir_is_complete(ci)) {
+ /*
+ * If a directory is complete, we want to keep
+ * the exclusive cap. So that MDS does not end up
+ * revoking the shared cap on every create/unlink
+ * operation.
+ */
+ want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
+ retain |= want;
} else {
+
retain |= CEPH_CAP_ANY_SHARED;
/*
* keep RD only if we didn't have the file open RW,
@@ -2309,6 +2337,9 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
wake = 1;
}
}
+ /* see comment in __ceph_remove_cap() */
+ if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
+ drop_inode_snap_realm(ci);
}
spin_unlock(&ci->i_ceph_lock);
@@ -3391,7 +3422,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
int ceph_encode_dentry_release(void **p, struct dentry *dentry,
int mds, int drop, int unless)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
struct ceph_mds_request_release *rel = *p;
struct ceph_dentry_info *di = ceph_dentry(dentry);
int force = 0;
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 1b2355109b9f..31f831471ed2 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -84,7 +84,7 @@ static int mdsc_show(struct seq_file *s, void *p)
path = NULL;
spin_lock(&req->r_dentry->d_lock);
seq_printf(s, " #%llx/%pd (%s)",
- ceph_ino(req->r_dentry->d_parent->d_inode),
+ ceph_ino(d_inode(req->r_dentry->d_parent)),
req->r_dentry,
path ? path : "");
spin_unlock(&req->r_dentry->d_lock);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 83e9976f7189..4248307fea90 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -49,9 +49,9 @@ int ceph_init_dentry(struct dentry *dentry)
goto out_unlock;
}
- if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
+ if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
d_set_d_op(dentry, &ceph_dentry_ops);
- else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
+ else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
else
d_set_d_op(dentry, &ceph_snap_dentry_ops);
@@ -77,7 +77,7 @@ struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
spin_lock(&dentry->d_lock);
if (!IS_ROOT(dentry)) {
- inode = dentry->d_parent->d_inode;
+ inode = d_inode(dentry->d_parent);
ihold(inode);
}
spin_unlock(&dentry->d_lock);
@@ -122,7 +122,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
{
struct ceph_file_info *fi = file->private_data;
struct dentry *parent = file->f_path.dentry;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct list_head *p;
struct dentry *dentry, *last;
struct ceph_dentry_info *di;
@@ -161,15 +161,15 @@ more:
}
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
if (di->lease_shared_gen == shared_gen &&
- !d_unhashed(dentry) && dentry->d_inode &&
- ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
- ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
+ !d_unhashed(dentry) && d_really_is_positive(dentry) &&
+ ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR &&
+ ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH &&
fpos_cmp(ctx->pos, di->offset) <= 0)
break;
dout(" skipping %p %pd at %llu (%llu)%s%s\n", dentry,
dentry, di->offset,
ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
- !dentry->d_inode ? " null" : "");
+ !d_inode(dentry) ? " null" : "");
spin_unlock(&dentry->d_lock);
p = p->prev;
dentry = list_entry(p, struct dentry, d_child);
@@ -189,11 +189,11 @@ more:
}
dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
- dentry, dentry, dentry->d_inode);
+ dentry, dentry, d_inode(dentry));
if (!dir_emit(ctx, dentry->d_name.name,
dentry->d_name.len,
- ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
- dentry->d_inode->i_mode >> 12)) {
+ ceph_translate_ino(dentry->d_sb, d_inode(dentry)->i_ino),
+ d_inode(dentry)->i_mode >> 12)) {
if (last) {
/* remember our position */
fi->dentry = last;
@@ -281,6 +281,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
/* can we use the dcache? */
spin_lock(&ci->i_ceph_lock);
if ((ctx->pos == 2 || fi->dentry) &&
+ ceph_test_mount_opt(fsc, DCACHE) &&
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
__ceph_dir_is_complete_ordered(ci) &&
@@ -336,16 +337,23 @@ more:
ceph_mdsc_put_request(req);
return err;
}
- req->r_inode = inode;
- ihold(inode);
- req->r_dentry = dget(file->f_path.dentry);
/* hints to request -> mds selection code */
req->r_direct_mode = USE_AUTH_MDS;
req->r_direct_hash = ceph_frag_value(frag);
req->r_direct_is_hash = true;
- req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
+ if (fi->last_name) {
+ req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
+ if (!req->r_path2) {
+ ceph_mdsc_put_request(req);
+ return -ENOMEM;
+ }
+ }
req->r_readdir_offset = fi->next_offset;
req->r_args.readdir.frag = cpu_to_le32(frag);
+
+ req->r_inode = inode;
+ ihold(inode);
+ req->r_dentry = dget(file->f_path.dentry);
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err < 0) {
ceph_mdsc_put_request(req);
@@ -535,7 +543,7 @@ int ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *dentry, int err)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
- struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
+ struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
/* .snap dir? */
if (err == -ENOENT &&
@@ -571,8 +579,8 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
err = 0;
if (!req->r_reply_info.head->is_dentry) {
dout("ENOENT and no trace, dentry %p inode %p\n",
- dentry, dentry->d_inode);
- if (dentry->d_inode) {
+ dentry, d_inode(dentry));
+ if (d_really_is_positive(dentry)) {
d_drop(dentry);
err = -ENOENT;
} else {
@@ -619,7 +627,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(err);
/* can we conclude ENOENT locally? */
- if (dentry->d_inode == NULL) {
+ if (d_really_is_negative(dentry)) {
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
@@ -629,6 +637,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
fsc->mount_options->snapdir_name,
dentry->d_name.len) &&
!is_root_ceph_dentry(dir, dentry) &&
+ ceph_test_mount_opt(fsc, DCACHE) &&
__ceph_dir_is_complete(ci) &&
(__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
spin_unlock(&ci->i_ceph_lock);
@@ -725,7 +734,7 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
ceph_mdsc_put_request(req);
out:
if (!err)
- ceph_init_inode_acls(dentry->d_inode, &acls);
+ ceph_init_inode_acls(d_inode(dentry), &acls);
else
d_drop(dentry);
ceph_release_acls_info(&acls);
@@ -755,10 +764,15 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
err = PTR_ERR(req);
goto out;
}
- req->r_dentry = dget(dentry);
- req->r_num_caps = 2;
req->r_path2 = kstrdup(dest, GFP_NOFS);
+ if (!req->r_path2) {
+ err = -ENOMEM;
+ ceph_mdsc_put_request(req);
+ goto out;
+ }
req->r_locked_dir = dir;
+ req->r_dentry = dget(dentry);
+ req->r_num_caps = 2;
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
err = ceph_mdsc_do_request(mdsc, dir, req);
@@ -821,7 +835,7 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
ceph_mdsc_put_request(req);
out:
if (!err)
- ceph_init_inode_acls(dentry->d_inode, &acls);
+ ceph_init_inode_acls(d_inode(dentry), &acls);
else
d_drop(dentry);
ceph_release_acls_info(&acls);
@@ -858,8 +872,8 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
if (err) {
d_drop(dentry);
} else if (!req->r_reply_info.head->is_dentry) {
- ihold(old_dentry->d_inode);
- d_instantiate(dentry, old_dentry->d_inode);
+ ihold(d_inode(old_dentry));
+ d_instantiate(dentry, d_inode(old_dentry));
}
ceph_mdsc_put_request(req);
return err;
@@ -892,7 +906,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ceph_mds_request *req;
int err = -EROFS;
int op;
@@ -933,16 +947,20 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
+ int op = CEPH_MDS_OP_RENAME;
int err;
if (ceph_snap(old_dir) != ceph_snap(new_dir))
return -EXDEV;
- if (ceph_snap(old_dir) != CEPH_NOSNAP ||
- ceph_snap(new_dir) != CEPH_NOSNAP)
- return -EROFS;
+ if (ceph_snap(old_dir) != CEPH_NOSNAP) {
+ if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
+ op = CEPH_MDS_OP_RENAMESNAP;
+ else
+ return -EROFS;
+ }
dout("rename dir %p dentry %p to dir %p dentry %p\n",
old_dir, old_dentry, new_dir, new_dentry);
- req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
+ req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
ihold(old_dir);
@@ -957,8 +975,8 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
/* release LINK_RDCACHE on source inode (mds will lock it) */
req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
- if (new_dentry->d_inode)
- req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
+ if (d_really_is_positive(new_dentry))
+ req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
err = ceph_mdsc_do_request(mdsc, old_dir, req);
if (!err && !req->r_reply_info.head->is_dentry) {
/*
@@ -1024,7 +1042,7 @@ static int dentry_lease_is_valid(struct dentry *dentry)
if (di->lease_renew_after &&
time_after(jiffies, di->lease_renew_after)) {
/* we should renew */
- dir = dentry->d_parent->d_inode;
+ dir = d_inode(dentry->d_parent);
session = ceph_get_mds_session(s);
seq = di->lease_seq;
di->lease_renew_after = 0;
@@ -1074,22 +1092,22 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
return -ECHILD;
dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
- dentry, dentry->d_inode, ceph_dentry(dentry)->offset);
+ dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
dir = ceph_get_dentry_parent_inode(dentry);
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
- dentry, dentry->d_inode);
+ dentry, d_inode(dentry));
valid = 1;
- } else if (dentry->d_inode &&
- ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
+ } else if (d_really_is_positive(dentry) &&
+ ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
valid = 1;
} else if (dentry_lease_is_valid(dentry) ||
dir_lease_is_valid(dir, dentry)) {
- if (dentry->d_inode)
- valid = ceph_is_any_caps(dentry->d_inode);
+ if (d_really_is_positive(dentry))
+ valid = ceph_is_any_caps(d_inode(dentry));
else
valid = 1;
}
@@ -1151,7 +1169,7 @@ static void ceph_d_prune(struct dentry *dentry)
* we hold d_lock, so d_parent is stable, and d_fsdata is never
* cleared until d_release
*/
- ceph_dir_clear_complete(dentry->d_parent->d_inode);
+ ceph_dir_clear_complete(d_inode(dentry->d_parent));
}
/*
@@ -1240,11 +1258,12 @@ static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
dout("dir_fsync %p wait on tid %llu (until %llu)\n",
inode, req->r_tid, last_tid);
if (req->r_timeout) {
- ret = wait_for_completion_timeout(
- &req->r_safe_completion, req->r_timeout);
- if (ret > 0)
+ unsigned long time_left = wait_for_completion_timeout(
+ &req->r_safe_completion,
+ req->r_timeout);
+ if (time_left > 0)
ret = 0;
- else if (ret == 0)
+ else
ret = -EIO; /* timed out */
} else {
wait_for_completion(&req->r_safe_completion);
@@ -1372,6 +1391,7 @@ const struct inode_operations ceph_snapdir_iops = {
.getattr = ceph_getattr,
.mkdir = ceph_mkdir,
.rmdir = ceph_unlink,
+ .rename = ceph_rename,
};
const struct dentry_operations ceph_dentry_ops = {
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 8d7d782f4382..fe02ae7f056a 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -136,8 +136,8 @@ static struct dentry *__get_parent(struct super_block *sb,
return ERR_CAST(req);
if (child) {
- req->r_inode = child->d_inode;
- ihold(child->d_inode);
+ req->r_inode = d_inode(child);
+ ihold(d_inode(child));
} else {
req->r_ino1 = (struct ceph_vino) {
.ino = ino,
@@ -164,7 +164,7 @@ static struct dentry *__get_parent(struct super_block *sb,
return ERR_PTR(err);
}
dout("__get_parent ino %llx parent %p ino %llx.%llx\n",
- child ? ceph_ino(child->d_inode) : ino,
+ child ? ceph_ino(d_inode(child)) : ino,
dentry, ceph_vinop(inode));
return dentry;
}
@@ -172,11 +172,11 @@ static struct dentry *__get_parent(struct super_block *sb,
static struct dentry *ceph_get_parent(struct dentry *child)
{
/* don't re-export snaps */
- if (ceph_snap(child->d_inode) != CEPH_NOSNAP)
+ if (ceph_snap(d_inode(child)) != CEPH_NOSNAP)
return ERR_PTR(-EINVAL);
dout("get_parent %p ino %llx.%llx\n",
- child, ceph_vinop(child->d_inode));
+ child, ceph_vinop(d_inode(child)));
return __get_parent(child->d_sb, child, 0);
}
@@ -209,32 +209,32 @@ static int ceph_get_name(struct dentry *parent, char *name,
struct ceph_mds_request *req;
int err;
- mdsc = ceph_inode_to_client(child->d_inode)->mdsc;
+ mdsc = ceph_inode_to_client(d_inode(child))->mdsc;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME,
USE_ANY_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
- req->r_inode = child->d_inode;
- ihold(child->d_inode);
- req->r_ino2 = ceph_vino(parent->d_inode);
- req->r_locked_dir = parent->d_inode;
+ req->r_inode = d_inode(child);
+ ihold(d_inode(child));
+ req->r_ino2 = ceph_vino(d_inode(parent));
+ req->r_locked_dir = d_inode(parent);
req->r_num_caps = 2;
err = ceph_mdsc_do_request(mdsc, NULL, req);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
if (!err) {
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
memcpy(name, rinfo->dname, rinfo->dname_len);
name[rinfo->dname_len] = 0;
dout("get_name %p ino %llx.%llx name %s\n",
- child, ceph_vinop(child->d_inode), name);
+ child, ceph_vinop(d_inode(child)), name);
} else {
dout("get_name %p ino %llx.%llx err %d\n",
- child, ceph_vinop(child->d_inode), err);
+ child, ceph_vinop(d_inode(child)), err);
}
ceph_mdsc_put_request(req);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index d533075a823d..3b6b522b4b31 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -7,7 +7,6 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/writeback.h>
-#include <linux/aio.h>
#include <linux/falloc.h>
#include "super.h"
@@ -292,14 +291,14 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
}
if (err)
goto out_req;
- if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) {
+ if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
/* make vfs retry on splice, ENOENT, or symlink */
dout("atomic_open finish_no_open on dn %p\n", dn);
err = finish_no_open(file, dn);
} else {
dout("atomic_open finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
- ceph_init_inode_acls(dentry->d_inode, &acls);
+ ceph_init_inode_acls(d_inode(dentry), &acls);
*opened |= FILE_CREATED;
}
err = finish_open(file, dentry, ceph_open, opened);
@@ -458,7 +457,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
if (ret < 0)
return ret;
- if (file->f_flags & O_DIRECT) {
+ if (iocb->ki_flags & IOCB_DIRECT) {
while (iov_iter_count(i)) {
size_t start;
ssize_t n;
@@ -808,7 +807,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *filp = iocb->ki_filp;
struct ceph_file_info *fi = filp->private_data;
- size_t len = iocb->ki_nbytes;
+ size_t len = iov_iter_count(to);
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
struct page *pinned_page = NULL;
@@ -829,7 +828,7 @@ again:
return ret;
if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
- (iocb->ki_filp->f_flags & O_DIRECT) ||
+ (iocb->ki_flags & IOCB_DIRECT) ||
(fi->flags & CEPH_F_SYNC)) {
dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
@@ -942,9 +941,9 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc =
&ceph_sb_to_client(inode->i_sb)->client->osdc;
- ssize_t count = iov_iter_count(from), written = 0;
+ ssize_t count, written = 0;
int err, want, got;
- loff_t pos = iocb->ki_pos;
+ loff_t pos;
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
@@ -954,14 +953,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
- err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
- if (err)
- goto out;
-
- if (count == 0)
+ err = generic_write_checks(iocb, from);
+ if (err <= 0)
goto out;
- iov_iter_truncate(from, count);
+ pos = iocb->ki_pos;
+ count = iov_iter_count(from);
err = file_remove_suid(file);
if (err)
goto out;
@@ -998,12 +995,12 @@ retry_snap:
inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
- (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
+ (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
struct iov_iter data;
mutex_unlock(&inode->i_mutex);
/* we might need to revert back to that point */
data = *from;
- if (file->f_flags & O_DIRECT)
+ if (iocb->ki_flags & IOCB_DIRECT)
written = ceph_sync_direct_write(iocb, &data, pos);
else
written = ceph_sync_write(iocb, &data, pos);
@@ -1332,8 +1329,6 @@ const struct file_operations ceph_file_fops = {
.open = ceph_open,
.release = ceph_release,
.llseek = ceph_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = ceph_read_iter,
.write_iter = ceph_write_iter,
.mmap = ceph_mmap,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 119c43c80638..e876e1944519 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -940,7 +940,7 @@ static void update_dentry_lease(struct dentry *dentry,
dentry, duration, ttl);
/* make lease_rdcache_gen match directory */
- dir = dentry->d_parent->d_inode;
+ dir = d_inode(dentry->d_parent);
di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
if (duration == 0)
@@ -980,7 +980,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
{
struct dentry *realdn;
- BUG_ON(dn->d_inode);
+ BUG_ON(d_inode(dn));
/* dn must be unhashed */
if (!d_unhashed(dn))
@@ -998,13 +998,13 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
"inode %p ino %llx.%llx\n",
dn, d_count(dn),
realdn, d_count(realdn),
- realdn->d_inode, ceph_vinop(realdn->d_inode));
+ d_inode(realdn), ceph_vinop(d_inode(realdn)));
dput(dn);
dn = realdn;
} else {
BUG_ON(!ceph_dentry(dn));
dout("dn %p attached to %p ino %llx.%llx\n",
- dn, dn->d_inode, ceph_vinop(dn->d_inode));
+ dn, d_inode(dn), ceph_vinop(d_inode(dn)));
}
if ((!prehash || *prehash) && d_unhashed(dn))
d_rehash(dn);
@@ -1125,11 +1125,11 @@ retry_lookup:
dput(parent);
goto done;
}
- } else if (dn->d_inode &&
- (ceph_ino(dn->d_inode) != vino.ino ||
- ceph_snap(dn->d_inode) != vino.snap)) {
+ } else if (d_really_is_positive(dn) &&
+ (ceph_ino(d_inode(dn)) != vino.ino ||
+ ceph_snap(d_inode(dn)) != vino.snap)) {
dout(" dn %p points to wrong inode %p\n",
- dn, dn->d_inode);
+ dn, d_inode(dn));
d_delete(dn);
dput(dn);
goto retry_lookup;
@@ -1183,7 +1183,7 @@ retry_lookup:
BUG_ON(!dn);
BUG_ON(!dir);
- BUG_ON(dn->d_parent->d_inode != dir);
+ BUG_ON(d_inode(dn->d_parent) != dir);
BUG_ON(ceph_ino(dir) !=
le64_to_cpu(rinfo->diri.in->ino));
BUG_ON(ceph_snap(dir) !=
@@ -1235,7 +1235,7 @@ retry_lookup:
/* null dentry? */
if (!rinfo->head->is_target) {
dout("fill_trace null dentry\n");
- if (dn->d_inode) {
+ if (d_really_is_positive(dn)) {
ceph_dir_clear_ordered(dir);
dout("d_delete %p\n", dn);
d_delete(dn);
@@ -1252,7 +1252,7 @@ retry_lookup:
}
/* attach proper inode */
- if (!dn->d_inode) {
+ if (d_really_is_negative(dn)) {
ceph_dir_clear_ordered(dir);
ihold(in);
dn = splice_dentry(dn, in, &have_lease);
@@ -1261,9 +1261,9 @@ retry_lookup:
goto done;
}
req->r_dentry = dn; /* may have spliced */
- } else if (dn->d_inode && dn->d_inode != in) {
+ } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
- dn, dn->d_inode, ceph_vinop(dn->d_inode),
+ dn, d_inode(dn), ceph_vinop(d_inode(dn)),
ceph_vinop(in));
have_lease = false;
}
@@ -1363,7 +1363,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
return readdir_prepopulate_inodes_only(req, session);
if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
- snapdir = ceph_get_snapdir(parent->d_inode);
+ snapdir = ceph_get_snapdir(d_inode(parent));
parent = d_find_alias(snapdir);
dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
rinfo->dir_nr, parent);
@@ -1371,7 +1371,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
dout("readdir_prepopulate %d items under dn %p\n",
rinfo->dir_nr, parent);
if (rinfo->dir_dir)
- ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
+ ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
}
/* FIXME: release caps/leases if error occurs */
@@ -1405,11 +1405,11 @@ retry_lookup:
err = ret;
goto out;
}
- } else if (dn->d_inode &&
- (ceph_ino(dn->d_inode) != vino.ino ||
- ceph_snap(dn->d_inode) != vino.snap)) {
+ } else if (d_really_is_positive(dn) &&
+ (ceph_ino(d_inode(dn)) != vino.ino ||
+ ceph_snap(d_inode(dn)) != vino.snap)) {
dout(" dn %p points to wrong inode %p\n",
- dn, dn->d_inode);
+ dn, d_inode(dn));
d_delete(dn);
dput(dn);
goto retry_lookup;
@@ -1423,8 +1423,8 @@ retry_lookup:
}
/* inode */
- if (dn->d_inode) {
- in = dn->d_inode;
+ if (d_really_is_positive(dn)) {
+ in = d_inode(dn);
} else {
in = ceph_get_inode(parent->d_sb, vino);
if (IS_ERR(in)) {
@@ -1440,13 +1440,13 @@ retry_lookup:
req->r_request_started, -1,
&req->r_caps_reservation) < 0) {
pr_err("fill_inode badness on %p\n", in);
- if (!dn->d_inode)
+ if (d_really_is_negative(dn))
iput(in);
d_drop(dn);
goto next_item;
}
- if (!dn->d_inode) {
+ if (d_really_is_negative(dn)) {
struct dentry *realdn = splice_dentry(dn, in, NULL);
if (IS_ERR(realdn)) {
err = PTR_ERR(realdn);
@@ -1693,7 +1693,7 @@ retry:
*/
static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
+ struct ceph_inode_info *ci = ceph_inode(d_inode(dentry));
nd_set_link(nd, ci->i_symlink);
return NULL;
}
@@ -1714,7 +1714,7 @@ static const struct inode_operations ceph_symlink_iops = {
*/
int ceph_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
const unsigned int ia_valid = attr->ia_valid;
struct ceph_mds_request *req;
@@ -1990,7 +1990,7 @@ int ceph_permission(struct inode *inode, int mask)
int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
int err;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 71c073f38e54..84f37f34f9aa 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -679,7 +679,7 @@ static struct dentry *get_nonsnap_parent(struct dentry *dentry)
* except to resplice to another snapdir, and either the old or new
* result is a valid result.
*/
- while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
+ while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
dentry = dentry->d_parent;
return dentry;
}
@@ -716,20 +716,20 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
} else if (req->r_dentry) {
/* ignore race with rename; old or new d_parent is okay */
struct dentry *parent = req->r_dentry->d_parent;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
if (dir->i_sb != mdsc->fsc->sb) {
/* not this fs! */
- inode = req->r_dentry->d_inode;
+ inode = d_inode(req->r_dentry);
} else if (ceph_snap(dir) != CEPH_NOSNAP) {
/* direct snapped/virtual snapdir requests
* based on parent dir inode */
struct dentry *dn = get_nonsnap_parent(parent);
- inode = dn->d_inode;
+ inode = d_inode(dn);
dout("__choose_mds using nonsnap parent %p\n", inode);
} else {
/* dentry target */
- inode = req->r_dentry->d_inode;
+ inode = d_inode(req->r_dentry);
if (!inode || mode == USE_AUTH_MDS) {
/* dir + name */
inode = dir;
@@ -1021,6 +1021,33 @@ static void cleanup_cap_releases(struct ceph_mds_session *session)
spin_unlock(&session->s_cap_lock);
}
+static void cleanup_session_requests(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ struct ceph_mds_request *req;
+ struct rb_node *p;
+
+ dout("cleanup_session_requests mds%d\n", session->s_mds);
+ mutex_lock(&mdsc->mutex);
+ while (!list_empty(&session->s_unsafe)) {
+ req = list_first_entry(&session->s_unsafe,
+ struct ceph_mds_request, r_unsafe_item);
+ list_del_init(&req->r_unsafe_item);
+ pr_info(" dropping unsafe request %llu\n", req->r_tid);
+ __unregister_request(mdsc, req);
+ }
+ /* zero r_attempts, so kick_requests() will re-send requests */
+ p = rb_first(&mdsc->request_tree);
+ while (p) {
+ req = rb_entry(p, struct ceph_mds_request, r_node);
+ p = rb_next(p);
+ if (req->r_session &&
+ req->r_session->s_mds == session->s_mds)
+ req->r_attempts = 0;
+ }
+ mutex_unlock(&mdsc->mutex);
+}
+
/*
* Helper to safely iterate over all caps associated with a session, with
* special care taken to handle a racing __ceph_remove_cap().
@@ -1098,7 +1125,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
cap, ci, &ci->vfs_inode);
spin_lock(&ci->i_ceph_lock);
__ceph_remove_cap(cap, false);
- if (!__ceph_is_any_real_caps(ci)) {
+ if (!ci->i_auth_cap) {
struct ceph_mds_client *mdsc =
ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -1120,13 +1147,6 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
mdsc->num_cap_flushing--;
drop = 1;
}
- if (drop && ci->i_wrbuffer_ref) {
- pr_info(" dropping dirty data for %p %lld\n",
- inode, ceph_ino(inode));
- ci->i_wrbuffer_ref = 0;
- ci->i_wrbuffer_ref_head = 0;
- drop++;
- }
spin_unlock(&mdsc->cap_dirty_lock);
}
spin_unlock(&ci->i_ceph_lock);
@@ -1712,7 +1732,7 @@ retry:
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
for (temp = dentry; !IS_ROOT(temp);) {
- struct inode *inode = temp->d_inode;
+ struct inode *inode = d_inode(temp);
if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
len++; /* slash only */
else if (stop_on_nosnap && inode &&
@@ -1736,7 +1756,7 @@ retry:
struct inode *inode;
spin_lock(&temp->d_lock);
- inode = temp->d_inode;
+ inode = d_inode(temp);
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
dout("build_path path+%d: %p SNAPDIR\n",
pos, temp);
@@ -1770,7 +1790,7 @@ retry:
goto retry;
}
- *base = ceph_ino(temp->d_inode);
+ *base = ceph_ino(d_inode(temp));
*plen = len;
dout("build_path on %p %d built %llx '%.*s'\n",
dentry, d_count(dentry), *base, len, path);
@@ -1783,8 +1803,8 @@ static int build_dentry_path(struct dentry *dentry,
{
char *path;
- if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) {
- *pino = ceph_ino(dentry->d_parent->d_inode);
+ if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
+ *pino = ceph_ino(d_inode(dentry->d_parent));
*ppath = dentry->d_name.name;
*ppathlen = dentry->d_name.len;
return 0;
@@ -1853,7 +1873,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
*/
static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req,
- int mds)
+ int mds, bool drop_cap_releases)
{
struct ceph_msg *msg;
struct ceph_mds_request_head *head;
@@ -1925,7 +1945,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
releases = 0;
if (req->r_inode_drop)
releases += ceph_encode_inode_release(&p,
- req->r_inode ? req->r_inode : req->r_dentry->d_inode,
+ req->r_inode ? req->r_inode : d_inode(req->r_dentry),
mds, req->r_inode_drop, req->r_inode_unless, 0);
if (req->r_dentry_drop)
releases += ceph_encode_dentry_release(&p, req->r_dentry,
@@ -1935,8 +1955,14 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
if (req->r_old_inode_drop)
releases += ceph_encode_inode_release(&p,
- req->r_old_dentry->d_inode,
+ d_inode(req->r_old_dentry),
mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
+
+ if (drop_cap_releases) {
+ releases = 0;
+ p = msg->front.iov_base + req->r_request_release_offset;
+ }
+
head->num_releases = cpu_to_le16(releases);
/* time stamp */
@@ -1989,7 +2015,7 @@ static void complete_request(struct ceph_mds_client *mdsc,
*/
static int __prepare_send_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req,
- int mds)
+ int mds, bool drop_cap_releases)
{
struct ceph_mds_request_head *rhead;
struct ceph_msg *msg;
@@ -2048,7 +2074,7 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
ceph_msg_put(req->r_request);
req->r_request = NULL;
}
- msg = create_request_message(mdsc, req, mds);
+ msg = create_request_message(mdsc, req, mds, drop_cap_releases);
if (IS_ERR(msg)) {
req->r_err = PTR_ERR(msg);
complete_request(mdsc, req);
@@ -2132,7 +2158,7 @@ static int __do_request(struct ceph_mds_client *mdsc,
if (req->r_request_started == 0) /* note request start time */
req->r_request_started = jiffies;
- err = __prepare_send_request(mdsc, req, mds);
+ err = __prepare_send_request(mdsc, req, mds, false);
if (!err) {
ceph_msg_get(req->r_request);
ceph_con_send(&session->s_con, req->r_request);
@@ -2590,6 +2616,7 @@ static void handle_session(struct ceph_mds_session *session,
case CEPH_SESSION_CLOSE:
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
pr_info("mds%d reconnect denied\n", session->s_mds);
+ cleanup_session_requests(mdsc, session);
remove_session_caps(session);
wake = 2; /* for good measure */
wake_up_all(&mdsc->session_close_wq);
@@ -2658,7 +2685,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
mutex_lock(&mdsc->mutex);
list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
- err = __prepare_send_request(mdsc, req, session->s_mds);
+ err = __prepare_send_request(mdsc, req, session->s_mds, true);
if (!err) {
ceph_msg_get(req->r_request);
ceph_con_send(&session->s_con, req->r_request);
@@ -2679,7 +2706,8 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
continue; /* only old requests */
if (req->r_session &&
req->r_session->s_mds == session->s_mds) {
- err = __prepare_send_request(mdsc, req, session->s_mds);
+ err = __prepare_send_request(mdsc, req,
+ session->s_mds, true);
if (!err) {
ceph_msg_get(req->r_request);
ceph_con_send(&session->s_con, req->r_request);
@@ -2864,7 +2892,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
spin_unlock(&session->s_cap_lock);
/* trim unused caps to reduce MDS's cache rejoin time */
- shrink_dcache_parent(mdsc->fsc->sb->s_root);
+ if (mdsc->fsc->sb->s_root)
+ shrink_dcache_parent(mdsc->fsc->sb->s_root);
ceph_con_close(&session->s_con);
ceph_con_open(&session->s_con,
@@ -3133,7 +3162,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
di->lease_renew_from &&
di->lease_renew_after == 0) {
unsigned long duration =
- le32_to_cpu(h->duration_ms) * HZ / 1000;
+ msecs_to_jiffies(le32_to_cpu(h->duration_ms));
di->lease_seq = seq;
dentry->d_time = di->lease_renew_from + duration;
diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c
index 51cc23e48111..89e6bc321df3 100644
--- a/fs/ceph/strings.c
+++ b/fs/ceph/strings.c
@@ -75,6 +75,7 @@ const char *ceph_mds_op_name(int op)
case CEPH_MDS_OP_LSSNAP: return "lssnap";
case CEPH_MDS_OP_MKSNAP: return "mksnap";
case CEPH_MDS_OP_RMSNAP: return "rmsnap";
+ case CEPH_MDS_OP_RENAMESNAP: return "renamesnap";
case CEPH_MDS_OP_SETFILELOCK: return "setfilelock";
case CEPH_MDS_OP_GETFILELOCK: return "getfilelock";
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a63997b8bcff..4e9905374078 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -44,7 +44,7 @@ static void ceph_put_super(struct super_block *s)
static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
struct ceph_monmap *monmap = fsc->client->monc.monmap;
struct ceph_statfs st;
u64 fsid;
@@ -345,6 +345,11 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
fsopt->rsize = CEPH_RSIZE_DEFAULT;
fsopt->rasize = CEPH_RASIZE_DEFAULT;
fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
+ if (!fsopt->snapdir_name) {
+ err = -ENOMEM;
+ goto out;
+ }
+
fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
@@ -406,31 +411,20 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
struct ceph_mount_options *fsopt = fsc->mount_options;
- struct ceph_options *opt = fsc->client->options;
-
- if (opt->flags & CEPH_OPT_FSID)
- seq_printf(m, ",fsid=%pU", &opt->fsid);
- if (opt->flags & CEPH_OPT_NOSHARE)
- seq_puts(m, ",noshare");
- if (opt->flags & CEPH_OPT_NOCRC)
- seq_puts(m, ",nocrc");
- if (opt->flags & CEPH_OPT_NOMSGAUTH)
- seq_puts(m, ",nocephx_require_signatures");
- if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
- seq_puts(m, ",notcp_nodelay");
-
- if (opt->name)
- seq_printf(m, ",name=%s", opt->name);
- if (opt->key)
- seq_puts(m, ",secret=<hidden>");
-
- if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
- seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
- if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
- seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
- if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
- seq_printf(m, ",osdkeepalivetimeout=%d",
- opt->osd_keepalive_timeout);
+ size_t pos;
+ int ret;
+
+ /* a comma between MNT/MS and client options */
+ seq_putc(m, ',');
+ pos = m->count;
+
+ ret = ceph_print_client_options(m, fsc->client);
+ if (ret)
+ return ret;
+
+ /* retract our comma if no client options */
+ if (m->count == pos)
+ m->count--;
if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
seq_puts(m, ",dirstat");
@@ -438,14 +432,10 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",norbytes");
if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
seq_puts(m, ",noasyncreaddir");
- if (fsopt->flags & CEPH_MOUNT_OPT_DCACHE)
- seq_puts(m, ",dcache");
- else
+ if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
seq_puts(m, ",nodcache");
if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE)
seq_puts(m, ",fsc");
- else
- seq_puts(m, ",nofsc");
#ifdef CONFIG_CEPH_FS_POSIX_ACL
if (fsopt->sb_flags & MS_POSIXACL)
@@ -477,6 +467,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
+
return 0;
}
@@ -730,6 +721,11 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
if (IS_ERR(req))
return ERR_CAST(req);
req->r_path1 = kstrdup(path, GFP_NOFS);
+ if (!req->r_path1) {
+ root = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
req->r_ino1.ino = CEPH_INO_ROOT;
req->r_ino1.snap = CEPH_NOSNAP;
req->r_started = started;
@@ -976,7 +972,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
if (IS_ERR(res))
goto out_splat;
dout("root %p inode %p ino %llx.%llx\n", res,
- res->d_inode, ceph_vinop(res->d_inode));
+ d_inode(res), ceph_vinop(d_inode(res)));
return res;
out_splat:
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 04c8124ed30e..fa20e1318939 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -36,7 +36,8 @@
#define CEPH_MOUNT_OPT_DCACHE (1<<9) /* use dcache for readdir etc */
#define CEPH_MOUNT_OPT_FSCACHE (1<<10) /* use fscache */
-#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES)
+#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES | \
+ CEPH_MOUNT_OPT_DCACHE)
#define ceph_set_mount_opt(fsc, opt) \
(fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt;
@@ -881,7 +882,6 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
/* file.c */
extern const struct file_operations ceph_file_fops;
-extern const struct address_space_operations ceph_aops;
extern int ceph_open(struct inode *inode, struct file *file);
extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 5a492caf34cb..cd7ffad4041d 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -776,12 +776,12 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_getxattr(dentry, name, value, size);
- return __ceph_getxattr(dentry->d_inode, name, value, size);
+ return __ceph_getxattr(d_inode(dentry), name, value, size);
}
ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
u32 vir_namelen = 0;
@@ -847,7 +847,7 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
const char *value, size_t size, int flags)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -877,16 +877,23 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
err = PTR_ERR(req);
goto out;
}
- req->r_inode = inode;
- ihold(inode);
- req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
- req->r_num_caps = 1;
+
req->r_args.setxattr.flags = cpu_to_le32(flags);
req->r_path2 = kstrdup(name, GFP_NOFS);
+ if (!req->r_path2) {
+ ceph_mdsc_put_request(req);
+ err = -ENOMEM;
+ goto out;
+ }
req->r_pagelist = pagelist;
pagelist = NULL;
+ req->r_inode = inode;
+ ihold(inode);
+ req->r_num_caps = 1;
+ req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
+
dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
err = ceph_mdsc_do_request(mdsc, NULL, req);
ceph_mdsc_put_request(req);
@@ -901,7 +908,7 @@ out:
int __ceph_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ceph_vxattr *vxattr;
struct ceph_inode_info *ci = ceph_inode(inode);
int issued;
@@ -995,7 +1002,7 @@ out:
int ceph_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- if (ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
+ if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
return -EROFS;
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
@@ -1011,7 +1018,7 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ceph_mds_request *req;
int err;
@@ -1019,12 +1026,14 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name)
USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
+ req->r_path2 = kstrdup(name, GFP_NOFS);
+ if (!req->r_path2)
+ return -ENOMEM;
+
req->r_inode = inode;
ihold(inode);
- req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
req->r_num_caps = 1;
- req->r_path2 = kstrdup(name, GFP_NOFS);
-
+ req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
err = ceph_mdsc_do_request(mdsc, NULL, req);
ceph_mdsc_put_request(req);
return err;
@@ -1032,7 +1041,7 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name)
int __ceph_removexattr(struct dentry *dentry, const char *name)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ceph_vxattr *vxattr;
struct ceph_inode_info *ci = ceph_inode(inode);
int issued;
@@ -1098,7 +1107,7 @@ out:
int ceph_removexattr(struct dentry *dentry, const char *name)
{
- if (ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
+ if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
return -EROFS;
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index b8602f199815..430e0348c99e 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -301,7 +301,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
if (full_path == NULL)
goto cdda_exit;
- cifs_sb = CIFS_SB(mntpt->d_inode->i_sb);
+ cifs_sb = CIFS_SB(d_inode(mntpt)->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
mnt = ERR_CAST(tlink);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d72fe37f5420..f5089bde3635 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -607,7 +607,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
p = s = full_path;
do {
- struct inode *dir = dentry->d_inode;
+ struct inode *dir = d_inode(dentry);
struct dentry *child;
if (!dir) {
@@ -906,8 +906,6 @@ const struct inode_operations cifs_symlink_inode_ops = {
};
const struct file_operations cifs_file_ops = {
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = cifs_loose_read_iter,
.write_iter = cifs_file_write_iter,
.open = cifs_open,
@@ -926,8 +924,6 @@ const struct file_operations cifs_file_ops = {
};
const struct file_operations cifs_file_strict_ops = {
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = cifs_strict_readv,
.write_iter = cifs_strict_writev,
.open = cifs_open,
@@ -947,8 +943,6 @@ const struct file_operations cifs_file_strict_ops = {
const struct file_operations cifs_file_direct_ops = {
/* BB reevaluate whether they can be done with directio, no cache */
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = cifs_user_readv,
.write_iter = cifs_user_writev,
.open = cifs_open,
@@ -967,8 +961,6 @@ const struct file_operations cifs_file_direct_ops = {
};
const struct file_operations cifs_file_nobrl_ops = {
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = cifs_loose_read_iter,
.write_iter = cifs_file_write_iter,
.open = cifs_open,
@@ -986,8 +978,6 @@ const struct file_operations cifs_file_nobrl_ops = {
};
const struct file_operations cifs_file_strict_nobrl_ops = {
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = cifs_strict_readv,
.write_iter = cifs_strict_writev,
.open = cifs_open,
@@ -1006,8 +996,6 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
const struct file_operations cifs_file_direct_nobrl_ops = {
/* BB reevaluate whether they can be done with directio, no cache */
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = cifs_user_readv,
.write_iter = cifs_user_writev,
.open = cifs_open,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index fa13d5e79f64..84650a51c7c4 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1898,7 +1898,7 @@ static void
cifs_writev_requeue(struct cifs_writedata *wdata)
{
int i, rc = 0;
- struct inode *inode = wdata->cfile->dentry->d_inode;
+ struct inode *inode = d_inode(wdata->cfile->dentry);
struct TCP_Server_Info *server;
unsigned int rest_len;
@@ -1981,7 +1981,7 @@ cifs_writev_complete(struct work_struct *work)
{
struct cifs_writedata *wdata = container_of(work,
struct cifs_writedata, work);
- struct inode *inode = wdata->cfile->dentry->d_inode;
+ struct inode *inode = d_inode(wdata->cfile->dentry);
int i = 0;
if (wdata->result == 0) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 480cf9c81d50..f3bfe08e177b 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -773,8 +773,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
length = atomic_dec_return(&tcpSesAllocCount);
if (length > 0)
- mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
- GFP_KERNEL);
+ mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
}
static int
@@ -848,8 +847,7 @@ cifs_demultiplex_thread(void *p)
length = atomic_inc_return(&tcpSesAllocCount);
if (length > 1)
- mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
- GFP_KERNEL);
+ mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
set_freezable();
while (server->tcpStatus != CifsExiting) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index b72bc29cba23..338d56936f6a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -745,13 +745,13 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
goto lookup_out;
}
- if (direntry->d_inode != NULL) {
+ if (d_really_is_positive(direntry)) {
cifs_dbg(FYI, "non-NULL inode in lookup\n");
} else {
cifs_dbg(FYI, "NULL inode in lookup\n");
}
cifs_dbg(FYI, "Full path: %s inode = 0x%p\n",
- full_path, direntry->d_inode);
+ full_path, d_inode(direntry));
if (pTcon->unix_ext) {
rc = cifs_get_inode_info_unix(&newInode, full_path,
@@ -792,7 +792,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- if (direntry->d_inode) {
+ if (d_really_is_positive(direntry)) {
if (cifs_revalidate_dentry(direntry))
return 0;
else {
@@ -803,7 +803,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
* attributes will have been updated by
* cifs_revalidate_dentry().
*/
- if (IS_AUTOMOUNT(direntry->d_inode) &&
+ if (IS_AUTOMOUNT(d_inode(direntry)) &&
!(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
spin_lock(&direntry->d_lock);
direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ca30c391a894..cafbf10521d5 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -273,7 +273,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock)
{
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifsFileInfo *cfile;
struct cifs_fid_locks *fdlocks;
@@ -357,7 +357,7 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
*/
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
- struct inode *inode = cifs_file->dentry->d_inode;
+ struct inode *inode = d_inode(cifs_file->dentry);
struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
@@ -386,7 +386,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
if (list_empty(&cifsi->openFileList)) {
cifs_dbg(FYI, "closing last open instance for inode %p\n",
- cifs_file->dentry->d_inode);
+ d_inode(cifs_file->dentry));
/*
* In strict cache mode we need invalidate mapping on the last
* close because it may cause a error when we open this file
@@ -572,7 +572,7 @@ static int
cifs_relock_file(struct cifsFileInfo *cfile)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
@@ -620,7 +620,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
return rc;
}
- inode = cfile->dentry->d_inode;
+ inode = d_inode(cfile->dentry);
cifs_sb = CIFS_SB(inode->i_sb);
tcon = tlink_tcon(cfile->tlink);
server = tcon->ses->server;
@@ -874,7 +874,7 @@ cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
{
bool rc = false;
struct cifs_fid_locks *cur;
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
list_for_each_entry(cur, &cinode->llist, llist) {
rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
@@ -899,7 +899,7 @@ cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
{
int rc = 0;
struct cifsLockInfo *conf_lock;
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
bool exist;
@@ -927,7 +927,7 @@ cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
static void
cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
{
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
down_write(&cinode->lock_sem);
list_add_tail(&lock->llist, &cfile->llist->locks);
up_write(&cinode->lock_sem);
@@ -944,7 +944,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
bool wait)
{
struct cifsLockInfo *conf_lock;
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
bool exist;
int rc = 0;
@@ -1125,7 +1125,7 @@ struct lock_to_push {
static int
cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
- struct inode *inode = cfile->dentry->d_inode;
+ struct inode *inode = d_inode(cfile->dentry);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct file_lock *flock;
struct file_lock_context *flctx = inode->i_flctx;
@@ -1214,7 +1214,7 @@ static int
cifs_push_locks(struct cifsFileInfo *cfile)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
@@ -1382,7 +1382,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
unsigned int max_num, num, max_buf;
LOCKING_ANDX_RANGE *buf, *cur;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifsLockInfo *li, *tmp;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct list_head tmp_llist;
@@ -1488,7 +1488,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
- struct inode *inode = cfile->dentry->d_inode;
+ struct inode *inode = d_inode(cfile->dentry);
if (posix_lck) {
int posix_lock_type;
@@ -1643,7 +1643,7 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
struct TCP_Server_Info *server;
unsigned int xid;
struct dentry *dentry = open_file->dentry;
- struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
+ struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
struct cifs_io_parms io_parms;
cifs_sb = CIFS_SB(dentry->d_sb);
@@ -1676,7 +1676,7 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
break;
}
- len = min(server->ops->wp_retry_size(dentry->d_inode),
+ len = min(server->ops->wp_retry_size(d_inode(dentry)),
(unsigned int)write_size - total_written);
/* iov[0] is reserved for smb header */
iov[1].iov_base = (char *)write_data + total_written;
@@ -1696,9 +1696,9 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
return rc;
}
} else {
- spin_lock(&dentry->d_inode->i_lock);
+ spin_lock(&d_inode(dentry)->i_lock);
cifs_update_eof(cifsi, *offset, bytes_written);
- spin_unlock(&dentry->d_inode->i_lock);
+ spin_unlock(&d_inode(dentry)->i_lock);
*offset += bytes_written;
}
}
@@ -1706,12 +1706,12 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
cifs_stats_bytes_written(tcon, total_written);
if (total_written > 0) {
- spin_lock(&dentry->d_inode->i_lock);
- if (*offset > dentry->d_inode->i_size)
- i_size_write(dentry->d_inode, *offset);
- spin_unlock(&dentry->d_inode->i_lock);
+ spin_lock(&d_inode(dentry)->i_lock);
+ if (*offset > d_inode(dentry)->i_size)
+ i_size_write(d_inode(dentry), *offset);
+ spin_unlock(&d_inode(dentry)->i_lock);
}
- mark_inode_dirty_sync(dentry->d_inode);
+ mark_inode_dirty_sync(d_inode(dentry));
free_xid(xid);
return total_written;
}
@@ -2406,7 +2406,7 @@ cifs_uncached_writev_complete(struct work_struct *work)
{
struct cifs_writedata *wdata = container_of(work,
struct cifs_writedata, work);
- struct inode *inode = wdata->cfile->dentry->d_inode;
+ struct inode *inode = d_inode(wdata->cfile->dentry);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
spin_lock(&inode->i_lock);
@@ -2560,10 +2560,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
return rc;
}
-static ssize_t
-cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
+ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
{
- size_t len;
+ struct file *file = iocb->ki_filp;
ssize_t total_written = 0;
struct cifsFileInfo *open_file;
struct cifs_tcon *tcon;
@@ -2573,15 +2572,15 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
struct iov_iter saved_from;
int rc;
- len = iov_iter_count(from);
- rc = generic_write_checks(file, poffset, &len, 0);
- if (rc)
- return rc;
-
- if (!len)
- return 0;
+ /*
+ * BB - optimize the way when signing is disabled. We can drop this
+ * extra memory-to-memory copying and use iovec buffers for constructing
+ * write request.
+ */
- iov_iter_truncate(from, len);
+ rc = generic_write_checks(iocb, from);
+ if (rc <= 0)
+ return rc;
INIT_LIST_HEAD(&wdata_list);
cifs_sb = CIFS_FILE_SB(file);
@@ -2593,8 +2592,8 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
memcpy(&saved_from, from, sizeof(struct iov_iter));
- rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb,
- &wdata_list);
+ rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
+ open_file, cifs_sb, &wdata_list);
/*
* If at least one write was successfully sent, then discard any rc
@@ -2633,7 +2632,7 @@ restart_loop:
memcpy(&tmp_from, &saved_from,
sizeof(struct iov_iter));
iov_iter_advance(&tmp_from,
- wdata->offset - *poffset);
+ wdata->offset - iocb->ki_pos);
rc = cifs_write_from_iter(wdata->offset,
wdata->bytes, &tmp_from,
@@ -2650,34 +2649,13 @@ restart_loop:
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
- if (total_written > 0)
- *poffset += total_written;
+ if (unlikely(!total_written))
+ return rc;
+ iocb->ki_pos += total_written;
+ set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
cifs_stats_bytes_written(tcon, total_written);
- return total_written ? total_written : (ssize_t)rc;
-}
-
-ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
-{
- ssize_t written;
- struct inode *inode;
- loff_t pos = iocb->ki_pos;
-
- inode = file_inode(iocb->ki_filp);
-
- /*
- * BB - optimize the way when signing is disabled. We can drop this
- * extra memory-to-memory copying and use iovec buffers for constructing
- * write request.
- */
-
- written = cifs_iovec_write(iocb->ki_filp, from, &pos);
- if (written > 0) {
- set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags);
- iocb->ki_pos = pos;
- }
-
- return written;
+ return total_written;
}
static ssize_t
@@ -2688,8 +2666,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file->f_mapping->host;
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
- ssize_t rc = -EACCES;
- loff_t lock_pos = iocb->ki_pos;
+ ssize_t rc;
/*
* We need to hold the sem to be sure nobody modifies lock list
@@ -2697,23 +2674,24 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
*/
down_read(&cinode->lock_sem);
mutex_lock(&inode->i_mutex);
- if (file->f_flags & O_APPEND)
- lock_pos = i_size_read(inode);
- if (!cifs_find_lock_conflict(cfile, lock_pos, iov_iter_count(from),
+
+ rc = generic_write_checks(iocb, from);
+ if (rc <= 0)
+ goto out;
+
+ if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
server->vals->exclusive_lock_type, NULL,
- CIFS_WRITE_OP)) {
+ CIFS_WRITE_OP))
rc = __generic_file_write_iter(iocb, from);
- mutex_unlock(&inode->i_mutex);
-
- if (rc > 0) {
- ssize_t err;
+ else
+ rc = -EACCES;
+out:
+ mutex_unlock(&inode->i_mutex);
- err = generic_write_sync(file, iocb->ki_pos - rc, rc);
- if (err < 0)
- rc = err;
- }
- } else {
- mutex_unlock(&inode->i_mutex);
+ if (rc > 0) {
+ ssize_t err = generic_write_sync(file, iocb->ki_pos - rc, rc);
+ if (err < 0)
+ rc = err;
}
up_read(&cinode->lock_sem);
return rc;
@@ -3816,7 +3794,7 @@ void cifs_oplock_break(struct work_struct *work)
{
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
oplock_break);
- struct inode *inode = cfile->dentry->d_inode;
+ struct inode *inode = d_inode(cfile->dentry);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -3877,8 +3855,7 @@ void cifs_oplock_break(struct work_struct *work)
* Direct IO is not yet supported in the cached mode.
*/
static ssize_t
-cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter,
- loff_t pos)
+cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
/*
* FIXME
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 3e126d7bb2ea..55b58112d122 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1067,7 +1067,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
int rc;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
@@ -1196,7 +1196,7 @@ cifs_drop_nlink(struct inode *inode)
}
/*
- * If dentry->d_inode is null (usually meaning the cached dentry
+ * If d_inode(dentry) is null (usually meaning the cached dentry
* is a negative dentry) then we would attempt a standard SMB delete, but
* if that fails we can not attempt the fall back mechanisms on EACCESS
* but will return the EACCESS to the caller. Note that the VFS does not call
@@ -1207,7 +1207,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
int rc = 0;
unsigned int xid;
char *full_path = NULL;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct cifsInodeInfo *cifs_inode;
struct super_block *sb = dir->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -1551,13 +1551,13 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
cifs_put_tlink(tlink);
if (!rc) {
- spin_lock(&direntry->d_inode->i_lock);
- i_size_write(direntry->d_inode, 0);
- clear_nlink(direntry->d_inode);
- spin_unlock(&direntry->d_inode->i_lock);
+ spin_lock(&d_inode(direntry)->i_lock);
+ i_size_write(d_inode(direntry), 0);
+ clear_nlink(d_inode(direntry));
+ spin_unlock(&d_inode(direntry)->i_lock);
}
- cifsInode = CIFS_I(direntry->d_inode);
+ cifsInode = CIFS_I(d_inode(direntry));
/* force revalidate to go get info when needed */
cifsInode->time = 0;
@@ -1568,7 +1568,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
*/
cifsInode->time = 0;
- direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
+ d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
rmdir_exit:
@@ -1727,7 +1727,7 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
unlink_target:
/* Try unlinking the target dentry if it's not negative */
- if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
+ if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
if (d_is_dir(target_dentry))
tmprc = cifs_rmdir(target_dir, target_dentry);
else
@@ -1867,7 +1867,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
{
unsigned int xid;
int rc = 0;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct super_block *sb = dentry->d_sb;
char *full_path = NULL;
@@ -1919,7 +1919,7 @@ int cifs_revalidate_file(struct file *filp)
int cifs_revalidate_dentry(struct dentry *dentry)
{
int rc;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
rc = cifs_revalidate_dentry_attr(dentry);
if (rc)
@@ -1933,7 +1933,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
{
struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int rc;
/*
@@ -2110,7 +2110,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
int rc;
unsigned int xid;
char *full_path = NULL;
- struct inode *inode = direntry->d_inode;
+ struct inode *inode = d_inode(direntry);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
@@ -2251,7 +2251,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
unsigned int xid;
kuid_t uid = INVALID_UID;
kgid_t gid = INVALID_GID;
- struct inode *inode = direntry->d_inode;
+ struct inode *inode = d_inode(direntry);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
char *full_path = NULL;
@@ -2409,7 +2409,7 @@ cifs_setattr_exit:
int
cifs_setattr(struct dentry *direntry, struct iattr *attrs)
{
- struct inode *inode = direntry->d_inode;
+ struct inode *inode = d_inode(direntry);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 2ec6037f61c7..252e672d5604 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -586,12 +586,12 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
* if source file is cached (oplocked) revalidate will not go to server
* until the file is closed or oplock broken so update nlinks locally
*/
- if (old_file->d_inode) {
- cifsInode = CIFS_I(old_file->d_inode);
+ if (d_really_is_positive(old_file)) {
+ cifsInode = CIFS_I(d_inode(old_file));
if (rc == 0) {
- spin_lock(&old_file->d_inode->i_lock);
- inc_nlink(old_file->d_inode);
- spin_unlock(&old_file->d_inode->i_lock);
+ spin_lock(&d_inode(old_file)->i_lock);
+ inc_nlink(d_inode(old_file));
+ spin_unlock(&d_inode(old_file)->i_lock);
/*
* parent dir timestamps will update from srv within a
@@ -629,7 +629,7 @@ cifs_hl_exit:
void *
cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
{
- struct inode *inode = direntry->d_inode;
+ struct inode *inode = d_inode(direntry);
int rc = -ENOMEM;
unsigned int xid;
char *full_path = NULL;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 337946355b29..8442b8b8e0be 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -473,7 +473,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
continue;
cifs_dbg(FYI, "file id match, oplock break\n");
- pCifsInode = CIFS_I(netfile->dentry->d_inode);
+ pCifsInode = CIFS_I(d_inode(netfile->dentry));
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
&pCifsInode->flags);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index c295338e0a98..b4a47237486b 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -78,7 +78,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
{
struct dentry *dentry, *alias;
struct inode *inode;
- struct super_block *sb = parent->d_inode->i_sb;
+ struct super_block *sb = d_inode(parent)->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
@@ -88,7 +88,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
return;
if (dentry) {
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if (inode) {
/*
* If we're generating inode numbers, then we don't
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index d2979036a4c7..7bfdd6066276 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -722,7 +722,7 @@ cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
static void
cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
{
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
cfile->fid.netfid = fid->netfid;
cifs_set_oplock_level(cinode, oplock);
cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 7198eac5dddd..2ab297dae5a7 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -95,7 +95,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
unsigned int max_num, num = 0, max_buf;
struct smb2_lock_element *buf, *cur;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifsLockInfo *li, *tmp;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct list_head tmp_llist;
@@ -231,7 +231,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
unsigned int xid;
unsigned int max_num, max_buf;
struct smb2_lock_element *buf;
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifs_fid_locks *fdlocks;
xid = get_xid();
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 22dfdf17d065..1c5907019045 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -453,7 +453,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
list_for_each(tmp, &tcon->openFileList) {
cfile = list_entry(tmp, struct cifsFileInfo, tlist);
- cinode = CIFS_I(cfile->dentry->d_inode);
+ cinode = CIFS_I(d_inode(cfile->dentry));
if (memcmp(cinode->lease_key, rsp->LeaseKey,
SMB2_LEASE_KEY_SIZE))
@@ -590,7 +590,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
continue;
cifs_dbg(FYI, "file id match, oplock break\n");
- cinode = CIFS_I(cfile->dentry->d_inode);
+ cinode = CIFS_I(d_inode(cfile->dentry));
if (!CIFS_CACHE_WRITE(cinode) &&
rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index eab05e1aa587..54daee5ad4c1 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -524,7 +524,7 @@ smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
static void
smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
{
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
cfile->fid.persistent_fid = fid->persistent_fid;
@@ -793,7 +793,7 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
* If extending file more than one page make sparse. Many Linux fs
* make files sparse by default when extending via ftruncate
*/
- inode = cfile->dentry->d_inode;
+ inode = d_inode(cfile->dentry);
if (!set_alloc && (size > inode->i_size + 8192)) {
__u8 set_sparse = 1;
@@ -1032,7 +1032,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
xid = get_xid();
- inode = cfile->dentry->d_inode;
+ inode = d_inode(cfile->dentry);
cifsi = CIFS_I(inode);
/* if file not oplocked can't be sure whether asking to extend size */
@@ -1083,7 +1083,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
xid = get_xid();
- inode = cfile->dentry->d_inode;
+ inode = d_inode(cfile->dentry);
cifsi = CIFS_I(inode);
/* Need to make file sparse, if not already, before freeing range. */
@@ -1115,7 +1115,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
xid = get_xid();
- inode = cfile->dentry->d_inode;
+ inode = d_inode(cfile->dentry);
cifsi = CIFS_I(inode);
/* if file not oplocked can't be sure whether asking to extend size */
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 72a4d10653d6..ff9e1f8b16a4 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -50,9 +50,9 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
if (direntry == NULL)
return -EIO;
- if (direntry->d_inode == NULL)
+ if (d_really_is_negative(direntry))
return -EIO;
- sb = direntry->d_inode->i_sb;
+ sb = d_inode(direntry)->i_sb;
if (sb == NULL)
return -EIO;
@@ -111,9 +111,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
if (direntry == NULL)
return -EIO;
- if (direntry->d_inode == NULL)
+ if (d_really_is_negative(direntry))
return -EIO;
- sb = direntry->d_inode->i_sb;
+ sb = d_inode(direntry)->i_sb;
if (sb == NULL)
return -EIO;
@@ -177,12 +177,12 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
memcpy(pacl, ea_value, value_size);
if (pTcon->ses->server->ops->set_acl)
rc = pTcon->ses->server->ops->set_acl(pacl,
- value_size, direntry->d_inode,
+ value_size, d_inode(direntry),
full_path, CIFS_ACL_DACL);
else
rc = -EOPNOTSUPP;
if (rc == 0) /* force revalidate of the inode */
- CIFS_I(direntry->d_inode)->time = 0;
+ CIFS_I(d_inode(direntry))->time = 0;
kfree(pacl);
}
#else
@@ -246,9 +246,9 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
if (direntry == NULL)
return -EIO;
- if (direntry->d_inode == NULL)
+ if (d_really_is_negative(direntry))
return -EIO;
- sb = direntry->d_inode->i_sb;
+ sb = d_inode(direntry)->i_sb;
if (sb == NULL)
return -EIO;
@@ -324,7 +324,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
goto get_ea_exit; /* rc already EOPNOTSUPP */
pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
- direntry->d_inode, full_path, &acllen);
+ d_inode(direntry), full_path, &acllen);
if (IS_ERR(pacl)) {
rc = PTR_ERR(pacl);
cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
@@ -382,9 +382,9 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
if (direntry == NULL)
return -EIO;
- if (direntry->d_inode == NULL)
+ if (d_really_is_negative(direntry))
return -EIO;
- sb = direntry->d_inode->i_sb;
+ sb = d_inode(direntry)->i_sb;
if (sb == NULL)
return -EIO;
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 46ee6f238985..5bb630a769e0 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -94,8 +94,8 @@ static void coda_flag_children(struct dentry *parent, int flag)
spin_lock(&parent->d_lock);
list_for_each_entry(de, &parent->d_subdirs, d_child) {
/* don't know what to do with negative dentries */
- if (de->d_inode )
- coda_flag_inode(de->d_inode, flag);
+ if (d_inode(de) )
+ coda_flag_inode(d_inode(de), flag);
}
spin_unlock(&parent->d_lock);
return;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 60cb88c1dd2b..fda9f4311212 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -201,7 +201,7 @@ err_out:
static int coda_link(struct dentry *source_de, struct inode *dir_inode,
struct dentry *de)
{
- struct inode *inode = source_de->d_inode;
+ struct inode *inode = d_inode(source_de);
const char * name = de->d_name.name;
int len = de->d_name.len;
int error;
@@ -266,7 +266,7 @@ static int coda_unlink(struct inode *dir, struct dentry *de)
return error;
coda_dir_update_mtime(dir);
- drop_nlink(de->d_inode);
+ drop_nlink(d_inode(de));
return 0;
}
@@ -279,8 +279,8 @@ static int coda_rmdir(struct inode *dir, struct dentry *de)
error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len);
if (!error) {
/* VFS may delete the child */
- if (de->d_inode)
- clear_nlink(de->d_inode);
+ if (d_really_is_positive(de))
+ clear_nlink(d_inode(de));
/* fix the link count of the parent */
coda_dir_drop_nlink(dir);
@@ -303,14 +303,14 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
coda_i2f(new_dir), old_length, new_length,
(const char *) old_name, (const char *)new_name);
if (!error) {
- if (new_dentry->d_inode) {
+ if (d_really_is_positive(new_dentry)) {
if (d_is_dir(new_dentry)) {
coda_dir_drop_nlink(old_dir);
coda_dir_inc_nlink(new_dir);
}
coda_dir_update_mtime(old_dir);
coda_dir_update_mtime(new_dir);
- coda_flag_inode(new_dentry->d_inode, C_VATTR);
+ coda_flag_inode(d_inode(new_dentry), C_VATTR);
} else {
coda_flag_inode(old_dir, C_VATTR);
coda_flag_inode(new_dir, C_VATTR);
@@ -449,13 +449,13 @@ static int coda_dentry_revalidate(struct dentry *de, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- inode = de->d_inode;
+ inode = d_inode(de);
if (!inode || is_root_inode(inode))
goto out;
if (is_bad_inode(inode))
goto bad;
- cii = ITOC(de->d_inode);
+ cii = ITOC(d_inode(de));
if (!(cii->c_flags & (C_PURGE | C_FLUSH)))
goto out;
@@ -487,11 +487,11 @@ static int coda_dentry_delete(const struct dentry * dentry)
{
int flags;
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
return 0;
- flags = (ITOC(dentry->d_inode)->c_flags) & C_PURGE;
- if (is_bad_inode(dentry->d_inode) || flags) {
+ flags = (ITOC(d_inode(dentry))->c_flags) & C_PURGE;
+ if (is_bad_inode(d_inode(dentry)) || flags) {
return 1;
}
return 0;
diff --git a/fs/coda/file.c b/fs/coda/file.c
index d244d743a232..1da3805f3ddc 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -27,19 +27,14 @@
#include "coda_int.h"
static ssize_t
-coda_file_read(struct file *coda_file, char __user *buf, size_t count, loff_t *ppos)
+coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct coda_file_info *cfi;
- struct file *host_file;
+ struct file *coda_file = iocb->ki_filp;
+ struct coda_file_info *cfi = CODA_FTOC(coda_file);
- cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
- host_file = cfi->cfi_container;
- if (!host_file->f_op->read)
- return -EINVAL;
-
- return host_file->f_op->read(host_file, buf, count, ppos);
+ return vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos);
}
static ssize_t
@@ -64,32 +59,25 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
}
static ssize_t
-coda_file_write(struct file *coda_file, const char __user *buf, size_t count, loff_t *ppos)
+coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct inode *host_inode, *coda_inode = file_inode(coda_file);
- struct coda_file_info *cfi;
+ struct file *coda_file = iocb->ki_filp;
+ struct inode *coda_inode = file_inode(coda_file);
+ struct coda_file_info *cfi = CODA_FTOC(coda_file);
struct file *host_file;
ssize_t ret;
- cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
- host_file = cfi->cfi_container;
-
- if (!host_file->f_op->write)
- return -EINVAL;
- host_inode = file_inode(host_file);
+ host_file = cfi->cfi_container;
file_start_write(host_file);
mutex_lock(&coda_inode->i_mutex);
-
- ret = host_file->f_op->write(host_file, buf, count, ppos);
-
- coda_inode->i_size = host_inode->i_size;
+ ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos);
+ coda_inode->i_size = file_inode(host_file)->i_size;
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
coda_inode->i_mtime = coda_inode->i_ctime = CURRENT_TIME_SEC;
mutex_unlock(&coda_inode->i_mutex);
file_end_write(host_file);
-
return ret;
}
@@ -231,8 +219,8 @@ int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
const struct file_operations coda_file_operations = {
.llseek = generic_file_llseek,
- .read = coda_file_read,
- .write = coda_file_write,
+ .read_iter = coda_file_read_iter,
+ .write_iter = coda_file_write_iter,
.mmap = coda_file_mmap,
.open = coda_open,
.release = coda_release,
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 82ec68b59208..cac1390b87a3 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -257,15 +257,15 @@ static void coda_evict_inode(struct inode *inode)
int coda_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
- int err = coda_revalidate_inode(dentry->d_inode);
+ int err = coda_revalidate_inode(d_inode(dentry));
if (!err)
- generic_fillattr(dentry->d_inode, stat);
+ generic_fillattr(d_inode(dentry), stat);
return err;
}
int coda_setattr(struct dentry *de, struct iattr *iattr)
{
- struct inode *inode = de->d_inode;
+ struct inode *inode = d_inode(de);
struct coda_vattr vattr;
int error;
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 4326d172fc27..f36a4040afb8 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -72,7 +72,7 @@ static long coda_pioctl(struct file *filp, unsigned int cmd,
if (error)
return error;
- target_inode = path.dentry->d_inode;
+ target_inode = d_inode(path.dentry);
/* return if it is not a Coda inode */
if (target_inode->i_sb != inode->i_sb) {
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index 5bb6e27298a4..9b1ffaa0572e 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -820,8 +820,8 @@ int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out)
case CODA_FLUSH:
coda_cache_clear_all(sb);
shrink_dcache_sb(sb);
- if (sb->s_root->d_inode)
- coda_flag_inode(sb->s_root->d_inode, C_FLUSH);
+ if (d_really_is_positive(sb->s_root))
+ coda_flag_inode(d_inode(sb->s_root), C_FLUSH);
break;
case CODA_PURGEUSER:
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index afec6450450f..6b8e2f091f5b 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -570,6 +570,7 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp)
#define BNEPCONNDEL _IOW('B', 201, int)
#define BNEPGETCONNLIST _IOR('B', 210, int)
#define BNEPGETCONNINFO _IOR('B', 211, int)
+#define BNEPGETSUPPFEAT _IOR('B', 212, int)
#define CMTPCONNADD _IOW('C', 200, int)
#define CMTPCONNDEL _IOW('C', 201, int)
@@ -1247,6 +1248,7 @@ COMPATIBLE_IOCTL(BNEPCONNADD)
COMPATIBLE_IOCTL(BNEPCONNDEL)
COMPATIBLE_IOCTL(BNEPGETCONNLIST)
COMPATIBLE_IOCTL(BNEPGETCONNINFO)
+COMPATIBLE_IOCTL(BNEPGETSUPPFEAT)
COMPATIBLE_IOCTL(CMTPCONNADD)
COMPATIBLE_IOCTL(CMTPCONNDEL)
COMPATIBLE_IOCTL(CMTPGETCONNLIST)
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index cf0db005d2f5..c81ce7f200a6 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -289,7 +289,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata);
error = configfs_create(dentry, mode, init_dir);
if (!error) {
- inc_nlink(p->d_inode);
+ inc_nlink(d_inode(p));
item->ci_dentry = dentry;
} else {
struct configfs_dirent *sd = dentry->d_fsdata;
@@ -375,8 +375,8 @@ static void remove_dir(struct dentry * d)
list_del_init(&sd->s_sibling);
spin_unlock(&configfs_dirent_lock);
configfs_put(sd);
- if (d->d_inode)
- simple_rmdir(parent->d_inode,d);
+ if (d_really_is_positive(d))
+ simple_rmdir(d_inode(parent),d);
pr_debug(" o %pd removing done (%d)\n", d, d_count(d));
@@ -513,7 +513,7 @@ static int configfs_detach_prep(struct dentry *dentry, struct mutex **wait_mutex
/* Abort if racing with mkdir() */
if (sd->s_type & CONFIGFS_USET_IN_MKDIR) {
if (wait_mutex)
- *wait_mutex = &sd->s_dentry->d_inode->i_mutex;
+ *wait_mutex = &d_inode(sd->s_dentry)->i_mutex;
return -EAGAIN;
}
@@ -624,13 +624,13 @@ static void detach_groups(struct config_group *group)
child = sd->s_dentry;
- mutex_lock(&child->d_inode->i_mutex);
+ mutex_lock(&d_inode(child)->i_mutex);
configfs_detach_group(sd->s_element);
- child->d_inode->i_flags |= S_DEAD;
+ d_inode(child)->i_flags |= S_DEAD;
dont_mount(child);
- mutex_unlock(&child->d_inode->i_mutex);
+ mutex_unlock(&d_inode(child)->i_mutex);
d_delete(child);
dput(child);
@@ -672,7 +672,7 @@ static int create_default_group(struct config_group *parent_group,
sd = child->d_fsdata;
sd->s_type |= CONFIGFS_USET_DEFAULT;
} else {
- BUG_ON(child->d_inode);
+ BUG_ON(d_inode(child));
d_drop(child);
dput(child);
}
@@ -818,11 +818,11 @@ static int configfs_attach_item(struct config_item *parent_item,
* the VFS may already have hit and used them. Thus,
* we must lock them as rmdir() would.
*/
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
configfs_remove_dir(item);
- dentry->d_inode->i_flags |= S_DEAD;
+ d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
d_delete(dentry);
}
}
@@ -858,16 +858,16 @@ static int configfs_attach_group(struct config_item *parent_item,
* We must also lock the inode to remove it safely in case of
* error, as rmdir() would.
*/
- mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
+ mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD);
configfs_adjust_dir_dirent_depth_before_populate(sd);
ret = populate_groups(to_config_group(item));
if (ret) {
configfs_detach_item(item);
- dentry->d_inode->i_flags |= S_DEAD;
+ d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
}
configfs_adjust_dir_dirent_depth_after_populate(sd);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
if (ret)
d_delete(dentry);
}
@@ -1075,7 +1075,7 @@ int configfs_depend_item(struct configfs_subsystem *subsys,
* subsystem is really registered, and so we need to lock out
* configfs_[un]register_subsystem().
*/
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
root_sd = root->d_fsdata;
@@ -1111,7 +1111,7 @@ int configfs_depend_item(struct configfs_subsystem *subsys,
out_unlock_dirent_lock:
spin_unlock(&configfs_dirent_lock);
out_unlock_fs:
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
/*
* If we succeeded, the fs is pinned via other methods. If not,
@@ -1453,11 +1453,11 @@ int configfs_rename_dir(struct config_item * item, const char *new_name)
down_write(&configfs_rename_sem);
parent = item->parent->dentry;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
if (!IS_ERR(new_dentry)) {
- if (!new_dentry->d_inode) {
+ if (d_really_is_negative(new_dentry)) {
error = config_item_set_name(item, "%s", new_name);
if (!error) {
d_add(new_dentry, NULL);
@@ -1469,7 +1469,7 @@ int configfs_rename_dir(struct config_item * item, const char *new_name)
error = -EEXIST;
dput(new_dentry);
}
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
up_write(&configfs_rename_sem);
return error;
@@ -1482,7 +1482,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
struct configfs_dirent * parent_sd = dentry->d_fsdata;
int err;
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
/*
* Fake invisibility if dir belongs to a group/default groups hierarchy
* being attached
@@ -1495,7 +1495,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
else
err = 0;
}
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
return err;
}
@@ -1505,11 +1505,11 @@ static int configfs_dir_close(struct inode *inode, struct file *file)
struct dentry * dentry = file->f_path.dentry;
struct configfs_dirent * cursor = file->private_data;
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
spin_lock(&configfs_dirent_lock);
list_del_init(&cursor->s_sibling);
spin_unlock(&configfs_dirent_lock);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
release_configfs_dirent(cursor);
@@ -1567,7 +1567,7 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
spin_lock(&configfs_dirent_lock);
dentry = next->s_dentry;
if (dentry)
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if (inode)
ino = inode->i_ino;
spin_unlock(&configfs_dirent_lock);
@@ -1590,7 +1590,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry * dentry = file->f_path.dentry;
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
switch (whence) {
case 1:
offset += file->f_pos;
@@ -1598,7 +1598,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
if (offset >= 0)
break;
default:
- mutex_unlock(&file_inode(file)->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
@@ -1624,7 +1624,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
spin_unlock(&configfs_dirent_lock);
}
}
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
return offset;
}
@@ -1654,7 +1654,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
sd = root->d_fsdata;
link_group(to_config_group(sd->s_element), group);
- mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&d_inode(root)->i_mutex, I_MUTEX_PARENT);
err = -ENOMEM;
dentry = d_alloc_name(root, group->cg_item.ci_name);
@@ -1664,7 +1664,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
err = configfs_attach_group(sd->s_element, &group->cg_item,
dentry);
if (err) {
- BUG_ON(dentry->d_inode);
+ BUG_ON(d_inode(dentry));
d_drop(dentry);
dput(dentry);
} else {
@@ -1674,7 +1674,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
}
}
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
if (err) {
unlink_group(group);
@@ -1695,9 +1695,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
return;
}
- mutex_lock_nested(&root->d_inode->i_mutex,
+ mutex_lock_nested(&d_inode(root)->i_mutex,
I_MUTEX_PARENT);
- mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
+ mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD);
mutex_lock(&configfs_symlink_mutex);
spin_lock(&configfs_dirent_lock);
if (configfs_detach_prep(dentry, NULL)) {
@@ -1706,13 +1706,13 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
spin_unlock(&configfs_dirent_lock);
mutex_unlock(&configfs_symlink_mutex);
configfs_detach_group(&group->cg_item);
- dentry->d_inode->i_flags |= S_DEAD;
+ d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
d_delete(dentry);
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
dput(dentry);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 56d2cdc9ae0a..403269ffcdf3 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -326,10 +326,10 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
int error = 0;
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
+ mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
CONFIGFS_ITEM_ATTR);
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
return error;
}
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 5423a6a6ecc8..8d89f5fd0331 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -56,7 +56,7 @@ static const struct inode_operations configfs_inode_operations ={
int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
{
- struct inode * inode = dentry->d_inode;
+ struct inode * inode = d_inode(dentry);
struct configfs_dirent * sd = dentry->d_fsdata;
struct iattr * sd_iattr;
unsigned int ia_valid = iattr->ia_valid;
@@ -186,7 +186,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct in
if (!dentry)
return -ENOENT;
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
return -EEXIST;
sd = dentry->d_fsdata;
@@ -194,7 +194,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct in
if (!inode)
return -ENOMEM;
- p_inode = dentry->d_parent->d_inode;
+ p_inode = d_inode(dentry->d_parent);
p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
configfs_set_inode_lock_class(sd, inode);
@@ -236,11 +236,11 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
if (dentry) {
spin_lock(&dentry->d_lock);
- if (!d_unhashed(dentry) && dentry->d_inode) {
+ if (!d_unhashed(dentry) && d_really_is_positive(dentry)) {
dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- simple_unlink(parent->d_inode, dentry);
+ simple_unlink(d_inode(parent), dentry);
} else
spin_unlock(&dentry->d_lock);
}
@@ -251,11 +251,11 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
struct configfs_dirent * sd;
struct configfs_dirent * parent_sd = dir->d_fsdata;
- if (dir->d_inode == NULL)
+ if (d_really_is_negative(dir))
/* no inode means this hasn't been made visible yet */
return;
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock(&d_inode(dir)->i_mutex);
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (!sd->s_element)
continue;
@@ -268,5 +268,5 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
break;
}
}
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index da94e41bdbf6..537356742091 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -173,5 +173,5 @@ MODULE_LICENSE("GPL");
MODULE_VERSION("0.0.2");
MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration.");
-module_init(configfs_init);
+core_initcall(configfs_init);
module_exit(configfs_exit);
diff --git a/fs/coredump.c b/fs/coredump.c
index f319926ddf8c..bbbe139ab280 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -657,7 +657,7 @@ void do_coredump(const siginfo_t *siginfo)
*/
if (!uid_eq(inode->i_uid, current_fsuid()))
goto close_fail;
- if (!cprm.file->f_op->write)
+ if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
goto close_fail;
if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
goto close_fail;
diff --git a/fs/dax.c b/fs/dax.c
index ed1619ec6537..6f65f00e58ec 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -98,9 +98,9 @@ static bool buffer_size_valid(struct buffer_head *bh)
return bh->b_state != 0;
}
-static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
- loff_t start, loff_t end, get_block_t get_block,
- struct buffer_head *bh)
+static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
+ loff_t start, loff_t end, get_block_t get_block,
+ struct buffer_head *bh)
{
ssize_t retval = 0;
loff_t pos = start;
@@ -109,7 +109,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
void *addr;
bool hole = false;
- if (rw != WRITE)
+ if (iov_iter_rw(iter) != WRITE)
end = min(end, i_size_read(inode));
while (pos < end) {
@@ -124,7 +124,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
bh->b_size = PAGE_ALIGN(end - pos);
bh->b_state = 0;
retval = get_block(inode, block, bh,
- rw == WRITE);
+ iov_iter_rw(iter) == WRITE);
if (retval)
break;
if (!buffer_size_valid(bh))
@@ -137,7 +137,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
bh->b_size -= done;
}
- hole = (rw != WRITE) && !buffer_written(bh);
+ hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
if (hole) {
addr = NULL;
size = bh->b_size - first;
@@ -154,7 +154,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
max = min(pos + size, end);
}
- if (rw == WRITE)
+ if (iov_iter_rw(iter) == WRITE)
len = copy_from_iter(addr, max - pos, iter);
else if (!hole)
len = copy_to_iter(addr, max - pos, iter);
@@ -173,7 +173,6 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
/**
* dax_do_io - Perform I/O to a DAX file
- * @rw: READ to read or WRITE to write
* @iocb: The control block for this I/O
* @inode: The file which the I/O is directed at
* @iter: The addresses to do I/O from or to
@@ -189,9 +188,9 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
* As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
* is in progress.
*/
-ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
- struct iov_iter *iter, loff_t pos,
- get_block_t get_block, dio_iodone_t end_io, int flags)
+ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
+ struct iov_iter *iter, loff_t pos, get_block_t get_block,
+ dio_iodone_t end_io, int flags)
{
struct buffer_head bh;
ssize_t retval = -EINVAL;
@@ -199,7 +198,7 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
memset(&bh, 0, sizeof(bh));
- if ((flags & DIO_LOCKING) && (rw == READ)) {
+ if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
struct address_space *mapping = inode->i_mapping;
mutex_lock(&inode->i_mutex);
retval = filemap_write_and_wait_range(mapping, pos, end - 1);
@@ -210,17 +209,17 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
}
/* Protects against truncate */
- atomic_inc(&inode->i_dio_count);
+ inode_dio_begin(inode);
- retval = dax_io(rw, inode, iter, pos, end, get_block, &bh);
+ retval = dax_io(inode, iter, pos, end, get_block, &bh);
- if ((flags & DIO_LOCKING) && (rw == READ))
+ if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
mutex_unlock(&inode->i_mutex);
if ((retval > 0) && end_io)
end_io(iocb, pos, retval, bh.b_private);
- inode_dio_done(inode);
+ inode_dio_end(inode);
out:
return retval;
}
@@ -464,6 +463,23 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
EXPORT_SYMBOL_GPL(dax_fault);
/**
+ * dax_pfn_mkwrite - handle first write to DAX page
+ * @vma: The virtual memory area where the fault occurred
+ * @vmf: The description of the fault
+ *
+ */
+int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct super_block *sb = file_inode(vma->vm_file)->i_sb;
+
+ sb_start_pagefault(sb);
+ file_update_time(vma->vm_file);
+ sb_end_pagefault(sb);
+ return VM_FAULT_NOPAGE;
+}
+EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
+
+/**
* dax_zero_page_range - zero a range within a page of a DAX file
* @inode: The file being truncated
* @from: The file offset that is being truncated to
diff --git a/fs/dcache.c b/fs/dcache.c
index c71e3732e53b..656ce522a218 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -269,6 +269,41 @@ static inline int dname_external(const struct dentry *dentry)
return dentry->d_name.name != dentry->d_iname;
}
+/*
+ * Make sure other CPUs see the inode attached before the type is set.
+ */
+static inline void __d_set_inode_and_type(struct dentry *dentry,
+ struct inode *inode,
+ unsigned type_flags)
+{
+ unsigned flags;
+
+ dentry->d_inode = inode;
+ smp_wmb();
+ flags = READ_ONCE(dentry->d_flags);
+ flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+ flags |= type_flags;
+ WRITE_ONCE(dentry->d_flags, flags);
+}
+
+/*
+ * Ideally, we want to make sure that other CPUs see the flags cleared before
+ * the inode is detached, but this is really a violation of RCU principles
+ * since the ordering suggests we should always set inode before flags.
+ *
+ * We should instead replace or discard the entire dentry - but that sucks
+ * performancewise on mass deletion/rename.
+ */
+static inline void __d_clear_type_and_inode(struct dentry *dentry)
+{
+ unsigned flags = READ_ONCE(dentry->d_flags);
+
+ flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+ WRITE_ONCE(dentry->d_flags, flags);
+ smp_wmb();
+ dentry->d_inode = NULL;
+}
+
static void dentry_free(struct dentry *dentry)
{
WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
@@ -311,7 +346,7 @@ static void dentry_iput(struct dentry * dentry)
{
struct inode *inode = dentry->d_inode;
if (inode) {
- dentry->d_inode = NULL;
+ __d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
@@ -335,8 +370,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
__releases(dentry->d_inode->i_lock)
{
struct inode *inode = dentry->d_inode;
- __d_clear_type(dentry);
- dentry->d_inode = NULL;
+ __d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
dentry_rcuwalk_barrier(dentry);
spin_unlock(&dentry->d_lock);
@@ -1715,11 +1749,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
unsigned add_flags = d_flags_for_inode(inode);
spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
- dentry->d_flags |= add_flags;
if (inode)
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
- dentry->d_inode = inode;
+ __d_set_inode_and_type(dentry, inode, add_flags);
dentry_rcuwalk_barrier(dentry);
spin_unlock(&dentry->d_lock);
fsnotify_d_instantiate(dentry, inode);
@@ -1937,8 +1969,7 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
add_flags |= DCACHE_DISCONNECTED;
spin_lock(&tmp->d_lock);
- tmp->d_inode = inode;
- tmp->d_flags |= add_flags;
+ __d_set_inode_and_type(tmp, inode, add_flags);
hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
hlist_bl_lock(&tmp->d_sb->s_anon);
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
@@ -2690,7 +2721,7 @@ static int __d_unalias(struct inode *inode,
struct dentry *dentry, struct dentry *alias)
{
struct mutex *m1 = NULL, *m2 = NULL;
- int ret = -EBUSY;
+ int ret = -ESTALE;
/* If alias and dentry share a parent, then no extra locks required */
if (alias->d_parent == dentry->d_parent)
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 517e64938438..830a7e76f5c6 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -45,7 +45,7 @@ const struct file_operations debugfs_file_operations = {
static void *debugfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- nd_set_link(nd, dentry->d_inode->i_private);
+ nd_set_link(nd, d_inode(dentry)->i_private);
return NULL;
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 61e72d44cf94..c1e7ffb0dab6 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -46,7 +46,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb)
static inline int debugfs_positive(struct dentry *dentry)
{
- return dentry->d_inode && !d_unhashed(dentry);
+ return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
struct debugfs_mount_opts {
@@ -124,7 +124,7 @@ static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
static int debugfs_apply_options(struct super_block *sb)
{
struct debugfs_fs_info *fsi = sb->s_fs_info;
- struct inode *inode = sb->s_root->d_inode;
+ struct inode *inode = d_inode(sb->s_root);
struct debugfs_mount_opts *opts = &fsi->mount_opts;
inode->i_mode &= ~S_IALLUGO;
@@ -188,7 +188,7 @@ static struct vfsmount *debugfs_automount(struct path *path)
{
struct vfsmount *(*f)(void *);
f = (struct vfsmount *(*)(void *))path->dentry->d_fsdata;
- return f(path->dentry->d_inode->i_private);
+ return f(d_inode(path->dentry)->i_private);
}
static const struct dentry_operations debugfs_dops = {
@@ -270,20 +270,20 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
if (!parent)
parent = debugfs_mount->mnt_root;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(dentry) && dentry->d_inode) {
+ if (!IS_ERR(dentry) && d_really_is_positive(dentry)) {
dput(dentry);
dentry = ERR_PTR(-EEXIST);
}
if (IS_ERR(dentry))
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return dentry;
}
static struct dentry *failed_creating(struct dentry *dentry)
{
- mutex_unlock(&dentry->d_parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry->d_parent)->i_mutex);
dput(dentry);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
return NULL;
@@ -291,7 +291,7 @@ static struct dentry *failed_creating(struct dentry *dentry)
static struct dentry *end_creating(struct dentry *dentry)
{
- mutex_unlock(&dentry->d_parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry->d_parent)->i_mutex);
return dentry;
}
@@ -344,7 +344,7 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode,
inode->i_fop = fops ? fops : &debugfs_file_operations;
inode->i_private = data;
d_instantiate(dentry, inode);
- fsnotify_create(dentry->d_parent->d_inode, dentry);
+ fsnotify_create(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_file);
@@ -384,7 +384,7 @@ struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
if (de)
- de->d_inode->i_size = file_size;
+ d_inode(de)->i_size = file_size;
return de;
}
EXPORT_SYMBOL_GPL(debugfs_create_file_size);
@@ -426,8 +426,8 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
- inc_nlink(dentry->d_parent->d_inode);
- fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
+ inc_nlink(d_inode(dentry->d_parent));
+ fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
@@ -524,10 +524,10 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
if (debugfs_positive(dentry)) {
dget(dentry);
- if (S_ISDIR(dentry->d_inode->i_mode))
- ret = simple_rmdir(parent->d_inode, dentry);
+ if (d_is_dir(dentry))
+ ret = simple_rmdir(d_inode(parent), dentry);
else
- simple_unlink(parent->d_inode, dentry);
+ simple_unlink(d_inode(parent), dentry);
if (!ret)
d_delete(dentry);
dput(dentry);
@@ -557,12 +557,12 @@ void debugfs_remove(struct dentry *dentry)
return;
parent = dentry->d_parent;
- if (!parent || !parent->d_inode)
+ if (!parent || d_really_is_negative(parent))
return;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
ret = __debugfs_remove(dentry, parent);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
if (!ret)
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
@@ -588,12 +588,12 @@ void debugfs_remove_recursive(struct dentry *dentry)
return;
parent = dentry->d_parent;
- if (!parent || !parent->d_inode)
+ if (!parent || d_really_is_negative(parent))
return;
parent = dentry;
down:
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
loop:
/*
* The parent->d_subdirs is protected by the d_lock. Outside that
@@ -608,7 +608,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
/* perhaps simple_empty(child) makes more sense */
if (!list_empty(&child->d_subdirs)) {
spin_unlock(&parent->d_lock);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
parent = child;
goto down;
}
@@ -629,10 +629,10 @@ void debugfs_remove_recursive(struct dentry *dentry)
}
spin_unlock(&parent->d_lock);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
child = parent;
parent = parent->d_parent;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
if (child != dentry)
/* go up */
@@ -640,7 +640,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
if (!__debugfs_remove(child, parent))
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
}
EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
@@ -672,27 +672,27 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
trap = lock_rename(new_dir, old_dir);
/* Source or destination directories don't exist? */
- if (!old_dir->d_inode || !new_dir->d_inode)
+ if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
goto exit;
/* Source does not exist, cyclic rename, or mountpoint? */
- if (!old_dentry->d_inode || old_dentry == trap ||
+ if (d_really_is_negative(old_dentry) || old_dentry == trap ||
d_mountpoint(old_dentry))
goto exit;
dentry = lookup_one_len(new_name, new_dir, strlen(new_name));
/* Lookup failed, cyclic rename or target exists? */
- if (IS_ERR(dentry) || dentry == trap || dentry->d_inode)
+ if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
goto exit;
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
- error = simple_rename(old_dir->d_inode, old_dentry, new_dir->d_inode,
+ error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
dentry);
if (error) {
fsnotify_oldname_free(old_name);
goto exit;
}
d_move(old_dentry, dentry);
- fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name,
+ fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name,
d_is_dir(old_dentry),
NULL, old_dentry);
fsnotify_oldname_free(old_name);
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index cfe8466f7fef..add566303c68 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -253,7 +253,7 @@ static int mknod_ptmx(struct super_block *sb)
if (!uid_valid(root_uid) || !gid_valid(root_gid))
return -EINVAL;
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
/* If we have already created ptmx node, return */
if (fsi->ptmx_dentry) {
@@ -290,7 +290,7 @@ static int mknod_ptmx(struct super_block *sb)
fsi->ptmx_dentry = dentry;
rc = 0;
out:
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
return rc;
}
@@ -298,7 +298,7 @@ static void update_ptmx_mode(struct pts_fs_info *fsi)
{
struct inode *inode;
if (fsi->ptmx_dentry) {
- inode = fsi->ptmx_dentry->d_inode;
+ inode = d_inode(fsi->ptmx_dentry);
inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
}
}
@@ -602,18 +602,18 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
sprintf(s, "%d", index);
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
dentry = d_alloc_name(root, s);
if (dentry) {
d_add(dentry, inode);
- fsnotify_create(root->d_inode, dentry);
+ fsnotify_create(d_inode(root), dentry);
} else {
iput(inode);
inode = ERR_PTR(-ENOMEM);
}
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
return inode;
}
@@ -658,7 +658,7 @@ void devpts_pty_kill(struct inode *inode)
BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
dentry = d_find_alias(inode);
@@ -667,7 +667,7 @@ void devpts_pty_kill(struct inode *inode)
dput(dentry); /* d_alloc_name() in devpts_pty_new() */
dput(dentry); /* d_find_alias above */
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
}
static int __init init_devpts_fs(void)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index e181b6b2e297..745d2342651a 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -37,7 +37,6 @@
#include <linux/uio.h>
#include <linux/atomic.h>
#include <linux/prefetch.h>
-#include <linux/aio.h>
/*
* How many user pages to map in one call to get_user_pages(). This determines
@@ -254,7 +253,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
if (dio->end_io && dio->result)
dio->end_io(dio->iocb, offset, transferred, dio->private);
- inode_dio_done(dio->inode);
+ if (!(dio->flags & DIO_SKIP_DIO_COUNT))
+ inode_dio_end(dio->inode);
+
if (is_async) {
if (dio->rw & WRITE) {
int err;
@@ -265,7 +266,7 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
ret = err;
}
- aio_complete(dio->iocb, ret, 0);
+ dio->iocb->ki_complete(dio->iocb, ret, 0);
}
kmem_cache_free(dio_cache, dio);
@@ -1056,7 +1057,7 @@ static inline int drop_refcount(struct dio *dio)
* operation. AIO can if it was a broken operation described above or
* in fact if all the bios race to complete before we get here. In
* that case dio_complete() translates the EIOCBQUEUED into the proper
- * return code that the caller will hand to aio_complete().
+ * return code that the caller will hand to ->complete().
*
* This is managed by the bio_lock instead of being an atomic_t so that
* completion paths can drop their ref and use the remaining count to
@@ -1094,10 +1095,10 @@ static inline int drop_refcount(struct dio *dio)
* for the whole file.
*/
static inline ssize_t
-do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
- get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags)
+do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ loff_t offset, get_block_t get_block, dio_iodone_t end_io,
+ dio_submit_t submit_io, int flags)
{
unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
unsigned blkbits = i_blkbits;
@@ -1111,9 +1112,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct blk_plug plug;
unsigned long align = offset | iov_iter_alignment(iter);
- if (rw & WRITE)
- rw = WRITE_ODIRECT;
-
/*
* Avoid references to bdev if not absolutely needed to give
* the early prefetch in the caller enough time.
@@ -1128,7 +1126,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
/* watch out for a 0 len io from a tricksy fs */
- if (rw == READ && !iov_iter_count(iter))
+ if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
return 0;
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
@@ -1144,7 +1142,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
dio->flags = flags;
if (dio->flags & DIO_LOCKING) {
- if (rw == READ) {
+ if (iov_iter_rw(iter) == READ) {
struct address_space *mapping =
iocb->ki_filp->f_mapping;
@@ -1170,19 +1168,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (is_sync_kiocb(iocb))
dio->is_async = false;
else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
- (rw & WRITE) && end > i_size_read(inode))
+ iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
dio->is_async = false;
else
dio->is_async = true;
dio->inode = inode;
- dio->rw = rw;
+ dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
/*
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
* so that we can call ->fsync.
*/
- if (dio->is_async && (rw & WRITE) &&
+ if (dio->is_async && iov_iter_rw(iter) == WRITE &&
((iocb->ki_filp->f_flags & O_DSYNC) ||
IS_SYNC(iocb->ki_filp->f_mapping->host))) {
retval = dio_set_defer_completion(dio);
@@ -1199,7 +1197,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
/*
* Will be decremented at I/O completion time.
*/
- atomic_inc(&inode->i_dio_count);
+ if (!(dio->flags & DIO_SKIP_DIO_COUNT))
+ inode_dio_begin(inode);
retval = 0;
sdio.blkbits = blkbits;
@@ -1275,7 +1274,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
* we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
- if (rw == READ && (dio->flags & DIO_LOCKING))
+ if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
mutex_unlock(&dio->inode->i_mutex);
/*
@@ -1287,7 +1286,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
*/
BUG_ON(retval == -EIOCBQUEUED);
if (dio->is_async && retval == 0 && dio->result &&
- (rw == READ || dio->result == count))
+ (iov_iter_rw(iter) == READ || dio->result == count))
retval = -EIOCBQUEUED;
else
dio_await_completion(dio);
@@ -1301,11 +1300,11 @@ out:
return retval;
}
-ssize_t
-__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
- get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags)
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ loff_t offset, get_block_t get_block,
+ dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags)
{
/*
* The block device state is needed in the end to finally
@@ -1319,8 +1318,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
prefetch(bdev->bd_queue);
prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
- return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
- get_block, end_io, submit_io, flags);
+ return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
+ end_io, submit_io, flags);
}
EXPORT_SYMBOL(__blockdev_direct_IO);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 719e1ce1c609..97315f2f6816 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1326,7 +1326,7 @@ static int ecryptfs_read_headers_virt(char *page_virt,
if (rc)
goto out;
if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
- ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
+ ecryptfs_i_size_init(page_virt, d_inode(ecryptfs_dentry));
offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
&bytes_read);
@@ -1425,7 +1425,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
{
int rc;
char *page_virt;
- struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode;
+ struct inode *ecryptfs_inode = d_inode(ecryptfs_dentry);
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 4000f6b3a750..8db0b464483f 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -54,11 +54,11 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
return -ECHILD;
rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
- if (dentry->d_inode) {
+ if (d_really_is_positive(dentry)) {
struct inode *lower_inode =
- ecryptfs_inode_to_lower(dentry->d_inode);
+ ecryptfs_inode_to_lower(d_inode(dentry));
- fsstack_copy_attr_all(dentry->d_inode, lower_inode);
+ fsstack_copy_attr_all(d_inode(dentry), lower_inode);
}
return rc;
}
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index fd39bad6f1bd..72afcc629d7b 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -31,7 +31,6 @@
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/fs_stack.h>
-#include <linux/aio.h>
#include "ecryptfs_kernel.h"
/**
@@ -52,12 +51,6 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
struct file *file = iocb->ki_filp;
rc = generic_file_read_iter(iocb, to);
- /*
- * Even though this is a async interface, we need to wait
- * for IO to finish to update atime
- */
- if (-EIOCBQUEUED == rc)
- rc = wait_on_sync_kiocb(iocb);
if (rc >= 0) {
path = ecryptfs_dentry_to_lower_path(file->f_path.dentry);
touch_atime(path);
@@ -137,7 +130,7 @@ struct kmem_cache *ecryptfs_file_info_cache;
static int read_or_initialize_metadata(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_crypt_stat *crypt_stat;
int rc;
@@ -365,9 +358,7 @@ const struct file_operations ecryptfs_dir_fops = {
const struct file_operations ecryptfs_main_fops = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = ecryptfs_read_update_atime,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.iterate = ecryptfs_readdir,
.unlocked_ioctl = ecryptfs_unlocked_ioctl,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index b08b5187f662..fc850b55db67 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -41,13 +41,13 @@ static struct dentry *lock_parent(struct dentry *dentry)
struct dentry *dir;
dir = dget_parent(dentry);
- mutex_lock_nested(&(dir->d_inode->i_mutex), I_MUTEX_PARENT);
+ mutex_lock_nested(&(d_inode(dir)->i_mutex), I_MUTEX_PARENT);
return dir;
}
static void unlock_dir(struct dentry *dir)
{
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
dput(dir);
}
@@ -131,7 +131,7 @@ struct inode *ecryptfs_get_inode(struct inode *lower_inode,
static int ecryptfs_interpose(struct dentry *lower_dentry,
struct dentry *dentry, struct super_block *sb)
{
- struct inode *inode = ecryptfs_get_inode(lower_dentry->d_inode, sb);
+ struct inode *inode = ecryptfs_get_inode(d_inode(lower_dentry), sb);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -189,21 +189,21 @@ ecryptfs_do_create(struct inode *directory_inode,
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, true);
+ rc = vfs_create(d_inode(lower_dir_dentry), lower_dentry, mode, true);
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
inode = ERR_PTR(rc);
goto out_lock;
}
- inode = __ecryptfs_get_inode(lower_dentry->d_inode,
+ inode = __ecryptfs_get_inode(d_inode(lower_dentry),
directory_inode->i_sb);
if (IS_ERR(inode)) {
- vfs_unlink(lower_dir_dentry->d_inode, lower_dentry, NULL);
+ vfs_unlink(d_inode(lower_dir_dentry), lower_dentry, NULL);
goto out_lock;
}
- fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
- fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
+ fsstack_copy_attr_times(directory_inode, d_inode(lower_dir_dentry));
+ fsstack_copy_inode_size(directory_inode, d_inode(lower_dir_dentry));
out_lock:
unlock_dir(lower_dir_dentry);
return inode;
@@ -332,7 +332,7 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry,
struct inode *dir_inode)
{
- struct inode *inode, *lower_inode = lower_dentry->d_inode;
+ struct inode *inode, *lower_inode = d_inode(lower_dentry);
struct ecryptfs_dentry_info *dentry_info;
struct vfsmount *lower_mnt;
int rc = 0;
@@ -347,14 +347,14 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry,
}
lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
- fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode);
+ fsstack_copy_attr_atime(dir_inode, d_inode(lower_dentry->d_parent));
BUG_ON(!d_count(lower_dentry));
ecryptfs_set_dentry_private(dentry, dentry_info);
dentry_info->lower_path.mnt = lower_mnt;
dentry_info->lower_path.dentry = lower_dentry;
- if (!lower_dentry->d_inode) {
+ if (d_really_is_negative(lower_dentry)) {
/* We want to add because we couldn't find in lower */
d_add(dentry, NULL);
return 0;
@@ -400,11 +400,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
int rc = 0;
lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
- mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(lower_dir_dentry)->i_mutex);
lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
lower_dir_dentry,
ecryptfs_dentry->d_name.len);
- mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(lower_dir_dentry)->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
@@ -412,7 +412,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
ecryptfs_dentry);
goto out;
}
- if (lower_dentry->d_inode)
+ if (d_really_is_positive(lower_dentry))
goto interpose;
mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
@@ -429,11 +429,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
"filename; rc = [%d]\n", __func__, rc);
goto out;
}
- mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(lower_dir_dentry)->i_mutex);
lower_dentry = lookup_one_len(encrypted_and_encoded_name,
lower_dir_dentry,
encrypted_and_encoded_name_size);
- mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(lower_dir_dentry)->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
@@ -458,24 +458,24 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
u64 file_size_save;
int rc;
- file_size_save = i_size_read(old_dentry->d_inode);
+ file_size_save = i_size_read(d_inode(old_dentry));
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
dget(lower_old_dentry);
dget(lower_new_dentry);
lower_dir_dentry = lock_parent(lower_new_dentry);
- rc = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode,
+ rc = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
lower_new_dentry, NULL);
- if (rc || !lower_new_dentry->d_inode)
+ if (rc || d_really_is_negative(lower_new_dentry))
goto out_lock;
rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb);
if (rc)
goto out_lock;
- fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
- fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
- set_nlink(old_dentry->d_inode,
- ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink);
- i_size_write(new_dentry->d_inode, file_size_save);
+ fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
+ fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry));
+ set_nlink(d_inode(old_dentry),
+ ecryptfs_inode_to_lower(d_inode(old_dentry))->i_nlink);
+ i_size_write(d_inode(new_dentry), file_size_save);
out_lock:
unlock_dir(lower_dir_dentry);
dput(lower_new_dentry);
@@ -485,7 +485,7 @@ out_lock:
static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
{
- return ecryptfs_do_unlink(dir, dentry, dentry->d_inode);
+ return ecryptfs_do_unlink(dir, dentry, d_inode(dentry));
}
static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
@@ -510,20 +510,20 @@ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
strlen(symname));
if (rc)
goto out_lock;
- rc = vfs_symlink(lower_dir_dentry->d_inode, lower_dentry,
+ rc = vfs_symlink(d_inode(lower_dir_dentry), lower_dentry,
encoded_symname);
kfree(encoded_symname);
- if (rc || !lower_dentry->d_inode)
+ if (rc || d_really_is_negative(lower_dentry))
goto out_lock;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out_lock;
- fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
- fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
+ fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
+ fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry));
out_lock:
unlock_dir(lower_dir_dentry);
dput(lower_dentry);
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
}
@@ -536,18 +536,18 @@ static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, mode);
- if (rc || !lower_dentry->d_inode)
+ rc = vfs_mkdir(d_inode(lower_dir_dentry), lower_dentry, mode);
+ if (rc || d_really_is_negative(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out;
- fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
- fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
- set_nlink(dir, lower_dir_dentry->d_inode->i_nlink);
+ fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
+ fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry));
+ set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink);
out:
unlock_dir(lower_dir_dentry);
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
}
@@ -562,12 +562,12 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
dget(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
dget(lower_dentry);
- rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
+ rc = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry);
dput(lower_dentry);
- if (!rc && dentry->d_inode)
- clear_nlink(dentry->d_inode);
- fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
- set_nlink(dir, lower_dir_dentry->d_inode->i_nlink);
+ if (!rc && d_really_is_positive(dentry))
+ clear_nlink(d_inode(dentry));
+ fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
+ set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink);
unlock_dir(lower_dir_dentry);
if (!rc)
d_drop(dentry);
@@ -584,17 +584,17 @@ ecryptfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, mode, dev);
- if (rc || !lower_dentry->d_inode)
+ rc = vfs_mknod(d_inode(lower_dir_dentry), lower_dentry, mode, dev);
+ if (rc || d_really_is_negative(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out;
- fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
- fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
+ fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
+ fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry));
out:
unlock_dir(lower_dir_dentry);
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
}
@@ -617,7 +617,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
dget(lower_new_dentry);
lower_old_dir_dentry = dget_parent(lower_old_dentry);
lower_new_dir_dentry = dget_parent(lower_new_dentry);
- target_inode = new_dentry->d_inode;
+ target_inode = d_inode(new_dentry);
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
/* source should not be ancestor of target */
if (trap == lower_old_dentry) {
@@ -629,17 +629,17 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
rc = -ENOTEMPTY;
goto out_lock;
}
- rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
- lower_new_dir_dentry->d_inode, lower_new_dentry,
+ rc = vfs_rename(d_inode(lower_old_dir_dentry), lower_old_dentry,
+ d_inode(lower_new_dir_dentry), lower_new_dentry,
NULL, 0);
if (rc)
goto out_lock;
if (target_inode)
fsstack_copy_attr_all(target_inode,
ecryptfs_inode_to_lower(target_inode));
- fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
+ fsstack_copy_attr_all(new_dir, d_inode(lower_new_dir_dentry));
if (new_dir != old_dir)
- fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
+ fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
out_lock:
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
dput(lower_new_dir_dentry);
@@ -662,7 +662,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
return ERR_PTR(-ENOMEM);
old_fs = get_fs();
set_fs(get_ds());
- rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
+ rc = d_inode(lower_dentry)->i_op->readlink(lower_dentry,
(char __user *)lower_buf,
PATH_MAX);
set_fs(old_fs);
@@ -681,8 +681,8 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
char *buf = ecryptfs_readlink_lower(dentry, &len);
if (IS_ERR(buf))
goto out;
- fsstack_copy_attr_atime(dentry->d_inode,
- ecryptfs_dentry_to_lower(dentry)->d_inode);
+ fsstack_copy_attr_atime(d_inode(dentry),
+ d_inode(ecryptfs_dentry_to_lower(dentry)));
buf[len] = '\0';
out:
nd_set_link(nd, buf);
@@ -738,7 +738,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
struct iattr *lower_ia)
{
int rc = 0;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ecryptfs_crypt_stat *crypt_stat;
loff_t i_size = i_size_read(inode);
loff_t lower_size_before_truncate;
@@ -751,7 +751,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
rc = ecryptfs_get_lower_file(dentry, inode);
if (rc)
return rc;
- crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
+ crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat;
/* Switch on growing or shrinking file */
if (ia->ia_size > i_size) {
char zero[] = { 0x00 };
@@ -858,7 +858,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
struct iattr lower_ia = { .ia_valid = 0 };
int rc;
- rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length);
+ rc = ecryptfs_inode_newsize_ok(d_inode(dentry), new_length);
if (rc)
return rc;
@@ -866,9 +866,9 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
- mutex_lock(&lower_dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(lower_dentry)->i_mutex);
rc = notify_change(lower_dentry, &lower_ia, NULL);
- mutex_unlock(&lower_dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(lower_dentry)->i_mutex);
}
return rc;
}
@@ -900,10 +900,10 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
struct inode *lower_inode;
struct ecryptfs_crypt_stat *crypt_stat;
- crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
+ crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat;
if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
ecryptfs_init_crypt_stat(crypt_stat);
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
lower_inode = ecryptfs_inode_to_lower(inode);
lower_dentry = ecryptfs_dentry_to_lower(dentry);
mutex_lock(&crypt_stat->cs_mutex);
@@ -967,9 +967,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
lower_ia.ia_valid &= ~ATTR_MODE;
- mutex_lock(&lower_dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(lower_dentry)->i_mutex);
rc = notify_change(lower_dentry, &lower_ia, NULL);
- mutex_unlock(&lower_dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(lower_dentry)->i_mutex);
out:
fsstack_copy_attr_all(inode, lower_inode);
return rc;
@@ -983,7 +983,7 @@ static int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
- generic_fillattr(dentry->d_inode, stat);
+ generic_fillattr(d_inode(dentry), stat);
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
char *target;
size_t targetsiz;
@@ -1007,9 +1007,9 @@ static int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat);
if (!rc) {
- fsstack_copy_attr_all(dentry->d_inode,
- ecryptfs_inode_to_lower(dentry->d_inode));
- generic_fillattr(dentry->d_inode, stat);
+ fsstack_copy_attr_all(d_inode(dentry),
+ ecryptfs_inode_to_lower(d_inode(dentry)));
+ generic_fillattr(d_inode(dentry), stat);
stat->blocks = lower_stat.blocks;
}
return rc;
@@ -1023,14 +1023,14 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
struct dentry *lower_dentry;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!lower_dentry->d_inode->i_op->setxattr) {
+ if (!d_inode(lower_dentry)->i_op->setxattr) {
rc = -EOPNOTSUPP;
goto out;
}
rc = vfs_setxattr(lower_dentry, name, value, size, flags);
- if (!rc && dentry->d_inode)
- fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
+ if (!rc && d_really_is_positive(dentry))
+ fsstack_copy_attr_all(d_inode(dentry), d_inode(lower_dentry));
out:
return rc;
}
@@ -1041,14 +1041,14 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
{
int rc = 0;
- if (!lower_dentry->d_inode->i_op->getxattr) {
+ if (!d_inode(lower_dentry)->i_op->getxattr) {
rc = -EOPNOTSUPP;
goto out;
}
- mutex_lock(&lower_dentry->d_inode->i_mutex);
- rc = lower_dentry->d_inode->i_op->getxattr(lower_dentry, name, value,
+ mutex_lock(&d_inode(lower_dentry)->i_mutex);
+ rc = d_inode(lower_dentry)->i_op->getxattr(lower_dentry, name, value,
size);
- mutex_unlock(&lower_dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(lower_dentry)->i_mutex);
out:
return rc;
}
@@ -1068,13 +1068,13 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
struct dentry *lower_dentry;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!lower_dentry->d_inode->i_op->listxattr) {
+ if (!d_inode(lower_dentry)->i_op->listxattr) {
rc = -EOPNOTSUPP;
goto out;
}
- mutex_lock(&lower_dentry->d_inode->i_mutex);
- rc = lower_dentry->d_inode->i_op->listxattr(lower_dentry, list, size);
- mutex_unlock(&lower_dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(lower_dentry)->i_mutex);
+ rc = d_inode(lower_dentry)->i_op->listxattr(lower_dentry, list, size);
+ mutex_unlock(&d_inode(lower_dentry)->i_mutex);
out:
return rc;
}
@@ -1085,13 +1085,13 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
struct dentry *lower_dentry;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!lower_dentry->d_inode->i_op->removexattr) {
+ if (!d_inode(lower_dentry)->i_op->removexattr) {
rc = -EOPNOTSUPP;
goto out;
}
- mutex_lock(&lower_dentry->d_inode->i_mutex);
- rc = lower_dentry->d_inode->i_op->removexattr(lower_dentry, name);
- mutex_unlock(&lower_dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(lower_dentry)->i_mutex);
+ rc = d_inode(lower_dentry)->i_op->removexattr(lower_dentry, name);
+ mutex_unlock(&d_inode(lower_dentry)->i_mutex);
out:
return rc;
}
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index f1ea610362c6..866bb18efefe 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -144,7 +144,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
/* Corresponding dput() and mntput() are done when the
* lower file is fput() when all eCryptfs files for the inode are
* released. */
- flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR;
+ flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
(*lower_file) = dentry_open(&req.path, flags, cred);
if (!IS_ERR(*lower_file))
goto out;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index c095d3264259..4f4d0474bee9 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -546,11 +546,11 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
goto out_free;
}
- if (check_ruid && !uid_eq(path.dentry->d_inode->i_uid, current_uid())) {
+ if (check_ruid && !uid_eq(d_inode(path.dentry)->i_uid, current_uid())) {
rc = -EPERM;
printk(KERN_ERR "Mount of device (uid: %d) not owned by "
"requested user (uid: %d)\n",
- i_uid_read(path.dentry->d_inode),
+ i_uid_read(d_inode(path.dentry)),
from_kuid(&init_user_ns, current_uid()));
goto out_free;
}
@@ -584,7 +584,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
goto out_free;
}
- inode = ecryptfs_get_inode(path.dentry->d_inode, s);
+ inode = ecryptfs_get_inode(d_inode(path.dentry), s);
rc = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free;
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 4626976794e7..cf208522998e 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -420,7 +420,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
void *xattr_virt;
struct dentry *lower_dentry =
ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_path.dentry;
- struct inode *lower_inode = lower_dentry->d_inode;
+ struct inode *lower_inode = d_inode(lower_dentry);
int rc;
if (!lower_inode->i_op->getxattr || !lower_inode->i_op->setxattr) {
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 07ab49745e31..3381b9da9ee6 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -145,12 +145,12 @@ out:
static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
{
- struct efivar_entry *var = dentry->d_inode->i_private;
+ struct efivar_entry *var = d_inode(dentry)->i_private;
if (efivar_entry_delete(var))
return -EINVAL;
- drop_nlink(dentry->d_inode);
+ drop_nlink(d_inode(dentry));
dput(dentry);
return 0;
};
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index ddbce42548c9..86a2121828c3 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -121,7 +121,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
int len, i;
int err = -ENOMEM;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return err;
@@ -144,7 +144,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
- inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0);
+ inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
if (!inode)
goto fail_name;
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index bbee8f063dfa..40ba9cc41bf7 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -111,9 +111,9 @@ struct dentry *efs_get_parent(struct dentry *child)
struct dentry *parent = ERR_PTR(-ENOENT);
efs_ino_t ino;
- ino = efs_find_entry(child->d_inode, "..", 2);
+ ino = efs_find_entry(d_inode(child), "..", 2);
if (ino)
- parent = d_obtain_alias(efs_iget(child->d_inode->i_sb, ino));
+ parent = d_obtain_alias(efs_iget(d_inode(child)->i_sb, ino));
return parent;
}
diff --git a/fs/exec.c b/fs/exec.c
index c7f9b733406d..1977c2a553ac 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -659,6 +659,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
if (stack_base > STACK_SIZE_MAX)
stack_base = STACK_SIZE_MAX;
+ /* Add space for stack randomization. */
+ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
+
/* Make sure we didn't let the argument array grow too large. */
if (vma->vm_end - vma->vm_start > stack_base)
return -ENOMEM;
@@ -926,10 +929,14 @@ static int de_thread(struct task_struct *tsk)
if (!thread_group_leader(tsk)) {
struct task_struct *leader = tsk->group_leader;
- sig->notify_count = -1; /* for exit_notify() */
for (;;) {
threadgroup_change_begin(tsk);
write_lock_irq(&tasklist_lock);
+ /*
+ * Do this under tasklist_lock to ensure that
+ * exit_notify() can't miss ->group_exit_task
+ */
+ sig->notify_count = -1;
if (likely(leader->exit_state))
break;
__set_current_state(TASK_KILLABLE);
@@ -1078,7 +1085,13 @@ int flush_old_exec(struct linux_binprm * bprm)
if (retval)
goto out;
+ /*
+ * Must be called _before_ exec_mmap() as bprm->mm is
+ * not visibile until then. This also enables the update
+ * to be lockless.
+ */
set_mm_exe_file(bprm->mm, bprm->file);
+
/*
* Release all of the old mmap stuff
*/
@@ -1265,6 +1278,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
spin_unlock(&p->fs->lock);
}
+static void bprm_fill_uid(struct linux_binprm *bprm)
+{
+ struct inode *inode;
+ unsigned int mode;
+ kuid_t uid;
+ kgid_t gid;
+
+ /* clear any previous set[ug]id data from a previous binary */
+ bprm->cred->euid = current_euid();
+ bprm->cred->egid = current_egid();
+
+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
+ return;
+
+ if (task_no_new_privs(current))
+ return;
+
+ inode = file_inode(bprm->file);
+ mode = READ_ONCE(inode->i_mode);
+ if (!(mode & (S_ISUID|S_ISGID)))
+ return;
+
+ /* Be careful if suid/sgid is set */
+ mutex_lock(&inode->i_mutex);
+
+ /* reload atomically mode/uid/gid now that lock held */
+ mode = inode->i_mode;
+ uid = inode->i_uid;
+ gid = inode->i_gid;
+ mutex_unlock(&inode->i_mutex);
+
+ /* We ignore suid/sgid if there are no mappings for them in the ns */
+ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
+ !kgid_has_mapping(bprm->cred->user_ns, gid))
+ return;
+
+ if (mode & S_ISUID) {
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+ bprm->cred->euid = uid;
+ }
+
+ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+ bprm->cred->egid = gid;
+ }
+}
+
/*
* Fill the binprm structure from the inode.
* Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
@@ -1273,36 +1333,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
*/
int prepare_binprm(struct linux_binprm *bprm)
{
- struct inode *inode = file_inode(bprm->file);
- umode_t mode = inode->i_mode;
int retval;
-
- /* clear any previous set[ug]id data from a previous binary */
- bprm->cred->euid = current_euid();
- bprm->cred->egid = current_egid();
-
- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
- !task_no_new_privs(current) &&
- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
- /* Set-uid? */
- if (mode & S_ISUID) {
- bprm->per_clear |= PER_CLEAR_ON_SETID;
- bprm->cred->euid = inode->i_uid;
- }
-
- /* Set-gid? */
- /*
- * If setgid is set but no group execute bit then this
- * is a candidate for mandatory locking, not a setgid
- * executable.
- */
- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
- bprm->per_clear |= PER_CLEAR_ON_SETID;
- bprm->cred->egid = inode->i_gid;
- }
- }
+ bprm_fill_uid(bprm);
/* fill in binprm security blob */
retval = security_bprm_set_creds(bprm);
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index d7defd557601..4deb0b05b011 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -379,7 +379,7 @@ ino_t exofs_parent_ino(struct dentry *child)
struct exofs_dir_entry *de;
ino_t ino;
- de = exofs_dotdot(child->d_inode, &page);
+ de = exofs_dotdot(d_inode(child), &page);
if (!de)
return 0;
@@ -429,7 +429,7 @@ int exofs_set_link(struct inode *dir, struct exofs_dir_entry *de,
int exofs_add_link(struct dentry *dentry, struct inode *inode)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const unsigned char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned chunk_size = exofs_chunk_size(dir);
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index 1a376b42d305..906de66e8e7e 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -67,8 +67,6 @@ static int exofs_flush(struct file *file, fl_owner_t id)
const struct file_operations exofs_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index a198e94813fe..786e4cc8c889 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -963,8 +963,8 @@ static void exofs_invalidatepage(struct page *page, unsigned int offset,
/* TODO: Should be easy enough to do proprly */
-static ssize_t exofs_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
return 0;
}
@@ -1028,7 +1028,7 @@ static int _do_truncate(struct inode *inode, loff_t newsize)
*/
int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
/* if we are about to modify an object, and it hasn't been
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 28907460e8fa..5ae25e431191 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -141,7 +141,7 @@ out_fail:
static int exofs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
inode->i_ctime = CURRENT_TIME;
inode_inc_link_count(inode);
@@ -191,7 +191,7 @@ out_dir:
static int exofs_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct exofs_dir_entry *de;
struct page *page;
int err = -ENOENT;
@@ -213,7 +213,7 @@ out:
static int exofs_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int err = -ENOTEMPTY;
if (exofs_empty_dir(inode)) {
@@ -230,8 +230,8 @@ static int exofs_rmdir(struct inode *dir, struct dentry *dentry)
static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct page *dir_page = NULL;
struct exofs_dir_entry *dir_de = NULL;
struct page *old_page;
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index fcc2e565f540..b795c567b5e1 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -958,7 +958,7 @@ static struct dentry *exofs_get_parent(struct dentry *child)
if (!ino)
return ERR_PTR(-ESTALE);
- return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino));
+ return d_obtain_alias(exofs_iget(d_inode(child)->i_sb, ino));
}
static struct inode *exofs_nfs_get_inode(struct super_block *sb,
diff --git a/fs/exofs/symlink.c b/fs/exofs/symlink.c
index 832e2624b80b..6f6f3a4c1365 100644
--- a/fs/exofs/symlink.c
+++ b/fs/exofs/symlink.c
@@ -37,7 +37,7 @@
static void *exofs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct exofs_i_info *oi = exofs_i(dentry->d_inode);
+ struct exofs_i_info *oi = exofs_i(d_inode(dentry));
nd_set_link(nd, (char *)oi->i_data);
return NULL;
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 6e1d4ab09d72..796b491e6978 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -486,7 +486,7 @@ void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
*/
int ext2_add_link (struct dentry *dentry, struct inode *inode)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned chunk_size = ext2_chunk_size(dir);
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 678f9ab08c48..8d15febd0aa3 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -793,7 +793,6 @@ extern int ext2_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
extern const struct inode_operations ext2_file_inode_operations;
extern const struct file_operations ext2_file_operations;
-extern const struct file_operations ext2_dax_file_operations;
/* inode.c */
extern const struct address_space_operations ext2_aops;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index e31701713516..3a0a6c6406d0 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -39,6 +39,7 @@ static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
static const struct vm_operations_struct ext2_dax_vm_ops = {
.fault = ext2_dax_fault,
.page_mkwrite = ext2_dax_mkwrite,
+ .pfn_mkwrite = dax_pfn_mkwrite,
};
static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
@@ -92,8 +93,6 @@ int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
*/
const struct file_operations ext2_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.unlocked_ioctl = ext2_ioctl,
@@ -108,24 +107,6 @@ const struct file_operations ext2_file_operations = {
.splice_write = iter_file_splice_write,
};
-#ifdef CONFIG_FS_DAX
-const struct file_operations ext2_dax_file_operations = {
- .llseek = generic_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
- .unlocked_ioctl = ext2_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ext2_compat_ioctl,
-#endif
- .mmap = ext2_file_mmap,
- .open = dquot_file_open,
- .release = ext2_release_file,
- .fsync = ext2_fsync,
-};
-#endif
-
const struct inode_operations ext2_file_inode_operations = {
#ifdef CONFIG_EXT2_FS_XATTR
.setxattr = generic_setxattr,
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 6c14bb8322fa..5c04a0ddea80 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -278,7 +278,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
avefreeb = free_blocks / ngroups;
ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
- if ((parent == sb->s_root->d_inode) ||
+ if ((parent == d_inode(sb->s_root)) ||
(EXT2_I(parent)->i_flags & EXT2_TOPDIR_FL)) {
struct ext2_group_desc *best_desc = NULL;
int best_ndir = inodes_per_group;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 6434bc000125..f460ae36d5b7 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -31,7 +31,7 @@
#include <linux/mpage.h>
#include <linux/fiemap.h>
#include <linux/namei.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include "ext2.h"
#include "acl.h"
#include "xattr.h"
@@ -851,8 +851,7 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
}
static ssize_t
-ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
- loff_t offset)
+ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -861,12 +860,12 @@ ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
ssize_t ret;
if (IS_DAX(inode))
- ret = dax_do_io(rw, iocb, inode, iter, offset, ext2_get_block,
- NULL, DIO_LOCKING);
+ ret = dax_do_io(iocb, inode, iter, offset, ext2_get_block, NULL,
+ DIO_LOCKING);
else
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
+ ret = blockdev_direct_IO(iocb, inode, iter, offset,
ext2_get_block);
- if (ret < 0 && (rw & WRITE))
+ if (ret < 0 && iov_iter_rw(iter) == WRITE)
ext2_write_failed(mapping, offset + count);
return ret;
}
@@ -1388,10 +1387,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext2_file_inode_operations;
- if (test_opt(inode->i_sb, DAX)) {
- inode->i_mapping->a_ops = &ext2_aops;
- inode->i_fop = &ext2_dax_file_operations;
- } else if (test_opt(inode->i_sb, NOBH)) {
+ if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
} else {
@@ -1548,7 +1544,7 @@ int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
error = inode_change_ok(inode, iattr);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 148f6e3789ea..3e074a9ccbe6 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -79,10 +79,10 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, uns
struct dentry *ext2_get_parent(struct dentry *child)
{
struct qstr dotdot = QSTR_INIT("..", 2);
- unsigned long ino = ext2_inode_by_name(child->d_inode, &dotdot);
+ unsigned long ino = ext2_inode_by_name(d_inode(child), &dotdot);
if (!ino)
return ERR_PTR(-ENOENT);
- return d_obtain_alias(ext2_iget(child->d_inode->i_sb, ino));
+ return d_obtain_alias(ext2_iget(d_inode(child)->i_sb, ino));
}
/*
@@ -104,10 +104,7 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode
return PTR_ERR(inode);
inode->i_op = &ext2_file_inode_operations;
- if (test_opt(inode->i_sb, DAX)) {
- inode->i_mapping->a_ops = &ext2_aops;
- inode->i_fop = &ext2_dax_file_operations;
- } else if (test_opt(inode->i_sb, NOBH)) {
+ if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
} else {
@@ -125,10 +122,7 @@ static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
return PTR_ERR(inode);
inode->i_op = &ext2_file_inode_operations;
- if (test_opt(inode->i_sb, DAX)) {
- inode->i_mapping->a_ops = &ext2_aops;
- inode->i_fop = &ext2_dax_file_operations;
- } else if (test_opt(inode->i_sb, NOBH)) {
+ if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
} else {
@@ -214,7 +208,7 @@ out_fail:
static int ext2_link (struct dentry * old_dentry, struct inode * dir,
struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
int err;
dquot_initialize(dir);
@@ -281,7 +275,7 @@ out_dir:
static int ext2_unlink(struct inode * dir, struct dentry *dentry)
{
- struct inode * inode = dentry->d_inode;
+ struct inode * inode = d_inode(dentry);
struct ext2_dir_entry_2 * de;
struct page * page;
int err = -ENOENT;
@@ -305,7 +299,7 @@ out:
static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
{
- struct inode * inode = dentry->d_inode;
+ struct inode * inode = d_inode(dentry);
int err = -ENOTEMPTY;
if (ext2_empty_dir(inode)) {
@@ -322,8 +316,8 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
struct inode * new_dir, struct dentry * new_dentry )
{
- struct inode * old_inode = old_dentry->d_inode;
- struct inode * new_inode = new_dentry->d_inode;
+ struct inode * old_inode = d_inode(old_dentry);
+ struct inode * new_inode = d_inode(new_dentry);
struct page * dir_page = NULL;
struct ext2_dir_entry_2 * dir_de = NULL;
struct page * old_page;
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c
index 565cf817bbf1..20608f17c2e5 100644
--- a/fs/ext2/symlink.c
+++ b/fs/ext2/symlink.c
@@ -23,7 +23,7 @@
static void *ext2_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct ext2_inode_info *ei = EXT2_I(dentry->d_inode);
+ struct ext2_inode_info *ei = EXT2_I(d_inode(dentry));
nd_set_link(nd, (char *)ei->i_data);
return NULL;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 91426141c33a..0b6bfd3a398b 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -243,7 +243,7 @@ cleanup:
static int
ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct buffer_head *bh = NULL;
struct ext2_xattr_entry *entry;
char *end;
@@ -319,7 +319,7 @@ cleanup:
/*
* Inode operation listxattr()
*
- * dentry->d_inode->i_mutex: don't care
+ * d_inode(dentry)->i_mutex: don't care
*/
ssize_t
ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index c0ebc4db8849..702fc6840246 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -28,7 +28,7 @@ ext2_xattr_security_get(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_SECURITY, name,
+ return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_SECURITY, name,
buffer, size);
}
@@ -38,7 +38,7 @@ ext2_xattr_security_set(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_SECURITY, name,
+ return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_SECURITY, name,
value, size, flags);
}
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 7e192574c001..42b6e9874bcc 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -32,7 +32,7 @@ ext2_xattr_trusted_get(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_TRUSTED, name,
+ return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_TRUSTED, name,
buffer, size);
}
@@ -42,7 +42,7 @@ ext2_xattr_trusted_set(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_TRUSTED, name,
+ return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_TRUSTED, name,
value, size, flags);
}
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index f470e44c4b8d..ecdc4605192c 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -36,7 +36,7 @@ ext2_xattr_user_get(struct dentry *dentry, const char *name,
return -EINVAL;
if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_USER,
+ return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_USER,
name, buffer, size);
}
@@ -49,7 +49,7 @@ ext2_xattr_user_set(struct dentry *dentry, const char *name,
if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_USER,
+ return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_USER,
name, value, size, flags);
}
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index a062fa1e1b11..3b8f650de22c 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -50,8 +50,6 @@ static int ext3_release_file (struct inode * inode, struct file * filp)
const struct file_operations ext3_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.unlocked_ioctl = ext3_ioctl,
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index a1b810230cc5..3ad242e5840e 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -210,7 +210,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
avefreeb = freeb / ngroups;
ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
- if ((parent == sb->s_root->d_inode) ||
+ if ((parent == d_inode(sb->s_root)) ||
(EXT3_I(parent)->i_flags & EXT3_TOPDIR_FL)) {
int best_ndir = inodes_per_group;
int best_group = -1;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 2c6ccc49ba27..2ee2dc4351d1 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -27,7 +27,7 @@
#include <linux/writeback.h>
#include <linux/mpage.h>
#include <linux/namei.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include "ext3.h"
#include "xattr.h"
#include "acl.h"
@@ -1820,8 +1820,8 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
* crashes then stale disk data _may_ be exposed inside the file. But current
* VFS code falls back into buffered path in that case so we are safe.
*/
-static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t ext3_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -1832,9 +1832,9 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
int retries = 0;
- trace_ext3_direct_IO_enter(inode, offset, count, rw);
+ trace_ext3_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
- if (rw == WRITE) {
+ if (iov_iter_rw(iter) == WRITE) {
loff_t final_size = offset + count;
if (final_size > inode->i_size) {
@@ -1856,12 +1856,12 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
}
retry:
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely((rw & WRITE) && ret < 0)) {
+ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
@@ -1908,7 +1908,7 @@ retry:
ret = err;
}
out:
- trace_ext3_direct_IO_exit(inode, offset, count, rw, ret);
+ trace_ext3_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
return ret;
}
@@ -3240,7 +3240,7 @@ int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
*/
int ext3_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error, rc = 0;
const unsigned int ia_valid = attr->ia_valid;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index f197736dccfa..4264b9bd0002 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1049,19 +1049,19 @@ struct dentry *ext3_get_parent(struct dentry *child)
struct ext3_dir_entry_2 * de;
struct buffer_head *bh;
- bh = ext3_find_entry(child->d_inode, &dotdot, &de);
+ bh = ext3_find_entry(d_inode(child), &dotdot, &de);
if (!bh)
return ERR_PTR(-ENOENT);
ino = le32_to_cpu(de->inode);
brelse(bh);
- if (!ext3_valid_inum(child->d_inode->i_sb, ino)) {
- ext3_error(child->d_inode->i_sb, "ext3_get_parent",
+ if (!ext3_valid_inum(d_inode(child)->i_sb, ino)) {
+ ext3_error(d_inode(child)->i_sb, "ext3_get_parent",
"bad inode number: %lu", ino);
return ERR_PTR(-EIO);
}
- return d_obtain_alias(ext3_iget(child->d_inode->i_sb, ino));
+ return d_obtain_alias(ext3_iget(d_inode(child)->i_sb, ino));
}
#define S_SHIFT 12
@@ -1243,7 +1243,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct ext3_dir_entry_2 *de,
struct buffer_head * bh)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned long offset = 0;
@@ -1330,7 +1330,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct buffer_head *bh)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct buffer_head *bh2;
@@ -1435,7 +1435,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
struct buffer_head * bh;
struct ext3_dir_entry_2 *de;
struct super_block * sb;
@@ -1489,7 +1489,7 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
struct dx_entry *entries, *at;
struct dx_hash_info hinfo;
struct buffer_head * bh;
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
struct super_block * sb = dir->i_sb;
struct ext3_dir_entry_2 *de;
int err;
@@ -2111,7 +2111,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
dquot_initialize(dir);
- dquot_initialize(dentry->d_inode);
+ dquot_initialize(d_inode(dentry));
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
@@ -2125,7 +2125,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
retval = -EIO;
if (le32_to_cpu(de->inode) != inode->i_ino)
@@ -2173,7 +2173,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
dquot_initialize(dir);
- dquot_initialize(dentry->d_inode);
+ dquot_initialize(d_inode(dentry));
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
@@ -2187,7 +2187,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
if (!bh)
goto end_unlink;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
retval = -EIO;
if (le32_to_cpu(de->inode) != inode->i_ino)
@@ -2328,7 +2328,7 @@ static int ext3_link (struct dentry * old_dentry,
struct inode * dir, struct dentry *dentry)
{
handle_t *handle;
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
int err, retries = 0;
if (inode->i_nlink >= EXT3_LINK_MAX)
@@ -2391,8 +2391,8 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
/* Initialize quotas before so that eventual writes go
* in separate transaction */
- if (new_dentry->d_inode)
- dquot_initialize(new_dentry->d_inode);
+ if (d_really_is_positive(new_dentry))
+ dquot_initialize(d_inode(new_dentry));
handle = ext3_journal_start(old_dir, 2 *
EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
@@ -2409,12 +2409,12 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
* and merrily kill the link to whatever was created under the
* same name. Goodbye sticky bit ;-<
*/
- old_inode = old_dentry->d_inode;
+ old_inode = d_inode(old_dentry);
retval = -ENOENT;
if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
goto end_rename;
- new_inode = new_dentry->d_inode;
+ new_inode = d_inode(new_dentry);
new_bh = ext3_find_entry(new_dir, &new_dentry->d_name, &new_de);
if (new_bh) {
if (!new_inode) {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index d4dbf3c259b3..a9312f0a54e5 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -789,7 +789,7 @@ static const struct quotactl_ops ext3_qctl_operations = {
.quota_on = ext3_quota_on,
.quota_off = dquot_quota_off,
.quota_sync = dquot_quota_sync,
- .get_info = dquot_get_dqinfo,
+ .get_state = dquot_get_state,
.set_info = dquot_set_dqinfo,
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk
@@ -1170,7 +1170,7 @@ static int parse_options (char *options, struct super_block *sb,
return 0;
}
- journal_inode = path.dentry->d_inode;
+ journal_inode = d_inode(path.dentry);
if (!S_ISBLK(journal_inode->i_mode)) {
ext3_msg(sb, KERN_ERR, "error: journal path %s "
"is not a block device", journal_path);
@@ -2947,7 +2947,7 @@ static int ext3_write_info(struct super_block *sb, int type)
handle_t *handle;
/* Data block + inode block */
- handle = ext3_journal_start(sb->s_root->d_inode, 2);
+ handle = ext3_journal_start(d_inode(sb->s_root), 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit_info(sb, type);
@@ -2994,7 +2994,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
* When we journal data on quota file, we have to flush journal to see
* all updates to the file when we bypass pagecache...
*/
- if (ext3_should_journal_data(path->dentry->d_inode)) {
+ if (ext3_should_journal_data(d_inode(path->dentry))) {
/*
* We don't need to lock updates but journal_flush() could
* otherwise be livelocked...
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
index 6b01c3eab1f3..ea96df3c58db 100644
--- a/fs/ext3/symlink.c
+++ b/fs/ext3/symlink.c
@@ -23,7 +23,7 @@
static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct ext3_inode_info *ei = EXT3_I(dentry->d_inode);
+ struct ext3_inode_info *ei = EXT3_I(d_inode(dentry));
nd_set_link(nd, (char*)ei->i_data);
return NULL;
}
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index c6874be6d58b..7cf36501ccf4 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -137,7 +137,7 @@ ext3_xattr_handler(int name_index)
/*
* Inode operation listxattr()
*
- * dentry->d_inode->i_mutex: don't care
+ * d_inode(dentry)->i_mutex: don't care
*/
ssize_t
ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
@@ -355,7 +355,7 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
static int
ext3_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct buffer_head *bh = NULL;
int error;
@@ -391,7 +391,7 @@ cleanup:
static int
ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ext3_xattr_ibody_header *header;
struct ext3_inode *raw_inode;
struct ext3_iloc iloc;
@@ -432,7 +432,7 @@ ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
int i_error, b_error;
- down_read(&EXT3_I(dentry->d_inode)->xattr_sem);
+ down_read(&EXT3_I(d_inode(dentry))->xattr_sem);
i_error = ext3_xattr_ibody_list(dentry, buffer, buffer_size);
if (i_error < 0) {
b_error = 0;
@@ -445,7 +445,7 @@ ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
if (b_error < 0)
i_error = 0;
}
- up_read(&EXT3_I(dentry->d_inode)->xattr_sem);
+ up_read(&EXT3_I(d_inode(dentry))->xattr_sem);
return i_error + b_error;
}
@@ -546,8 +546,7 @@ ext3_xattr_set_entry(struct ext3_xattr_info *i, struct ext3_xattr_search *s)
free += EXT3_XATTR_LEN(name_len);
}
if (i->value) {
- if (free < EXT3_XATTR_SIZE(i->value_len) ||
- free < EXT3_XATTR_LEN(name_len) +
+ if (free < EXT3_XATTR_LEN(name_len) +
EXT3_XATTR_SIZE(i->value_len))
return -ENOSPC;
}
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index 722c2bf9645d..c9506d5e3b13 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -29,7 +29,7 @@ ext3_xattr_security_get(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY,
+ return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY,
name, buffer, size);
}
@@ -39,7 +39,7 @@ ext3_xattr_security_set(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY,
+ return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY,
name, value, size, flags);
}
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
index d75727cc67fa..206cc66dc285 100644
--- a/fs/ext3/xattr_trusted.c
+++ b/fs/ext3/xattr_trusted.c
@@ -32,7 +32,7 @@ ext3_xattr_trusted_get(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_TRUSTED,
+ return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED,
name, buffer, size);
}
@@ -42,7 +42,7 @@ ext3_xattr_trusted_set(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_TRUSTED, name,
+ return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED, name,
value, size, flags);
}
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
index 5612af3567e0..021508ad1616 100644
--- a/fs/ext3/xattr_user.c
+++ b/fs/ext3/xattr_user.c
@@ -34,7 +34,7 @@ ext3_xattr_user_get(struct dentry *dentry, const char *name, void *buffer,
return -EINVAL;
if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_USER,
+ return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_USER,
name, buffer, size);
}
@@ -46,7 +46,7 @@ ext3_xattr_user_set(struct dentry *dentry, const char *name,
return -EINVAL;
if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_USER,
+ return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_USER,
name, value, size, flags);
}
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index efea5d5c44ce..024f2284d3f6 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -64,6 +64,28 @@ config EXT4_FS_SECURITY
If you are not using a security module that requires using
extended attributes for file security labels, say N.
+config EXT4_ENCRYPTION
+ tristate "Ext4 Encryption"
+ depends on EXT4_FS
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_XTS
+ select CRYPTO_CTS
+ select CRYPTO_SHA256
+ select KEYS
+ select ENCRYPTED_KEYS
+ help
+ Enable encryption of ext4 files and directories. This
+ feature is similar to ecryptfs, but it is more memory
+ efficient since it avoids caching the encrypted and
+ decrypted pages in the page cache.
+
+config EXT4_FS_ENCRYPTION
+ bool
+ default y
+ depends on EXT4_ENCRYPTION
+
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 0310fec2ee3d..75285ea9aa05 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -8,7 +8,9 @@ ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
- xattr_trusted.o inline.o
+ xattr_trusted.o inline.o readpage.o
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
+ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o crypto.o \
+ crypto_key.o crypto_fname.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index d40c8dbbb0d6..69b1e73026a5 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -4,11 +4,6 @@
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 83a6f497c4e0..955bf49a7945 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -14,7 +14,6 @@
#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include "ext4.h"
@@ -641,8 +640,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
* fail EDQUOT for metdata, but we do account for it.
*/
if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_alloc_block_nofail(inode,
EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
}
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index b610779a958c..4a606afb171f 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -8,7 +8,6 @@
*/
#include <linux/buffer_head.h>
-#include <linux/jbd2.h>
#include "ext4.h"
unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 41eb9dcfac7e..3522340c7a99 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -16,7 +16,6 @@
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/blkdev.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
#include "ext4.h"
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
new file mode 100644
index 000000000000..8ff15273ab0c
--- /dev/null
+++ b/fs/ext4/crypto.c
@@ -0,0 +1,558 @@
+/*
+ * linux/fs/ext4/crypto.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption functions for ext4
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ * Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ * Ildar Muslukhov, 2014
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <keys/user-type.h>
+#include <keys/encrypted-type.h>
+#include <linux/crypto.h>
+#include <linux/ecryptfs.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock_types.h>
+
+#include "ext4_extents.h"
+#include "xattr.h"
+
+/* Encryption added and removed here! (L: */
+
+static unsigned int num_prealloc_crypto_pages = 32;
+static unsigned int num_prealloc_crypto_ctxs = 128;
+
+module_param(num_prealloc_crypto_pages, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_pages,
+ "Number of crypto pages to preallocate");
+module_param(num_prealloc_crypto_ctxs, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
+ "Number of crypto contexts to preallocate");
+
+static mempool_t *ext4_bounce_page_pool;
+
+static LIST_HEAD(ext4_free_crypto_ctxs);
+static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
+
+/**
+ * ext4_release_crypto_ctx() - Releases an encryption context
+ * @ctx: The encryption context to release.
+ *
+ * If the encryption context was allocated from the pre-allocated pool, returns
+ * it to that pool. Else, frees it.
+ *
+ * If there's a bounce page in the context, this frees that.
+ */
+void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
+{
+ unsigned long flags;
+
+ if (ctx->bounce_page) {
+ if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
+ __free_page(ctx->bounce_page);
+ else
+ mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
+ ctx->bounce_page = NULL;
+ }
+ ctx->control_page = NULL;
+ if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
+ if (ctx->tfm)
+ crypto_free_tfm(ctx->tfm);
+ kfree(ctx);
+ } else {
+ spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
+ list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
+ spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
+ }
+}
+
+/**
+ * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
+ * @mask: The allocation mask.
+ *
+ * Return: An allocated and initialized encryption context on success. An error
+ * value or NULL otherwise.
+ */
+static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
+{
+ struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
+ mask);
+
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+ return ctx;
+}
+
+/**
+ * ext4_get_crypto_ctx() - Gets an encryption context
+ * @inode: The inode for which we are doing the crypto
+ *
+ * Allocates and initializes an encryption context.
+ *
+ * Return: An allocated and initialized encryption context on success; error
+ * value or NULL otherwise.
+ */
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
+{
+ struct ext4_crypto_ctx *ctx = NULL;
+ int res = 0;
+ unsigned long flags;
+ struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key;
+
+ if (!ext4_read_workqueue)
+ ext4_init_crypto();
+
+ /*
+ * We first try getting the ctx from a free list because in
+ * the common case the ctx will have an allocated and
+ * initialized crypto tfm, so it's probably a worthwhile
+ * optimization. For the bounce page, we first try getting it
+ * from the kernel allocator because that's just about as fast
+ * as getting it from a list and because a cache of free pages
+ * should generally be a "last resort" option for a filesystem
+ * to be able to do its job.
+ */
+ spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
+ ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
+ struct ext4_crypto_ctx, free_list);
+ if (ctx)
+ list_del(&ctx->free_list);
+ spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
+ if (!ctx) {
+ ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
+ if (IS_ERR(ctx)) {
+ res = PTR_ERR(ctx);
+ goto out;
+ }
+ ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ } else {
+ ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ }
+
+ /* Allocate a new Crypto API context if we don't already have
+ * one or if it isn't the right mode. */
+ BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID);
+ if (ctx->tfm && (ctx->mode != key->mode)) {
+ crypto_free_tfm(ctx->tfm);
+ ctx->tfm = NULL;
+ ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
+ }
+ if (!ctx->tfm) {
+ switch (key->mode) {
+ case EXT4_ENCRYPTION_MODE_AES_256_XTS:
+ ctx->tfm = crypto_ablkcipher_tfm(
+ crypto_alloc_ablkcipher("xts(aes)", 0, 0));
+ break;
+ case EXT4_ENCRYPTION_MODE_AES_256_GCM:
+ /* TODO(mhalcrow): AEAD w/ gcm(aes);
+ * crypto_aead_setauthsize() */
+ ctx->tfm = ERR_PTR(-ENOTSUPP);
+ break;
+ default:
+ BUG();
+ }
+ if (IS_ERR_OR_NULL(ctx->tfm)) {
+ res = PTR_ERR(ctx->tfm);
+ ctx->tfm = NULL;
+ goto out;
+ }
+ ctx->mode = key->mode;
+ }
+ BUG_ON(key->size != ext4_encryption_key_size(key->mode));
+
+ /* There shouldn't be a bounce page attached to the crypto
+ * context at this point. */
+ BUG_ON(ctx->bounce_page);
+
+out:
+ if (res) {
+ if (!IS_ERR_OR_NULL(ctx))
+ ext4_release_crypto_ctx(ctx);
+ ctx = ERR_PTR(res);
+ }
+ return ctx;
+}
+
+struct workqueue_struct *ext4_read_workqueue;
+static DEFINE_MUTEX(crypto_init);
+
+/**
+ * ext4_exit_crypto() - Shutdown the ext4 encryption system
+ */
+void ext4_exit_crypto(void)
+{
+ struct ext4_crypto_ctx *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
+ if (pos->bounce_page) {
+ if (pos->flags &
+ EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
+ __free_page(pos->bounce_page);
+ } else {
+ mempool_free(pos->bounce_page,
+ ext4_bounce_page_pool);
+ }
+ }
+ if (pos->tfm)
+ crypto_free_tfm(pos->tfm);
+ kfree(pos);
+ }
+ INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
+ if (ext4_bounce_page_pool)
+ mempool_destroy(ext4_bounce_page_pool);
+ ext4_bounce_page_pool = NULL;
+ if (ext4_read_workqueue)
+ destroy_workqueue(ext4_read_workqueue);
+ ext4_read_workqueue = NULL;
+}
+
+/**
+ * ext4_init_crypto() - Set up for ext4 encryption.
+ *
+ * We only call this when we start accessing encrypted files, since it
+ * results in memory getting allocated that wouldn't otherwise be used.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int ext4_init_crypto(void)
+{
+ int i, res;
+
+ mutex_lock(&crypto_init);
+ if (ext4_read_workqueue)
+ goto already_initialized;
+ ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
+ if (!ext4_read_workqueue) {
+ res = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
+ struct ext4_crypto_ctx *ctx;
+
+ ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
+ if (IS_ERR(ctx)) {
+ res = PTR_ERR(ctx);
+ goto fail;
+ }
+ list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
+ }
+
+ ext4_bounce_page_pool =
+ mempool_create_page_pool(num_prealloc_crypto_pages, 0);
+ if (!ext4_bounce_page_pool) {
+ res = -ENOMEM;
+ goto fail;
+ }
+already_initialized:
+ mutex_unlock(&crypto_init);
+ return 0;
+fail:
+ ext4_exit_crypto();
+ mutex_unlock(&crypto_init);
+ return res;
+}
+
+void ext4_restore_control_page(struct page *data_page)
+{
+ struct ext4_crypto_ctx *ctx =
+ (struct ext4_crypto_ctx *)page_private(data_page);
+
+ set_page_private(data_page, (unsigned long)NULL);
+ ClearPagePrivate(data_page);
+ unlock_page(data_page);
+ ext4_release_crypto_ctx(ctx);
+}
+
+/**
+ * ext4_crypt_complete() - The completion callback for page encryption
+ * @req: The asynchronous encryption request context
+ * @res: The result of the encryption operation
+ */
+static void ext4_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct ext4_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+typedef enum {
+ EXT4_DECRYPT = 0,
+ EXT4_ENCRYPT,
+} ext4_direction_t;
+
+static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
+ struct inode *inode,
+ ext4_direction_t rw,
+ pgoff_t index,
+ struct page *src_page,
+ struct page *dest_page)
+
+{
+ u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
+ struct ablkcipher_request *req = NULL;
+ DECLARE_EXT4_COMPLETION_RESULT(ecr);
+ struct scatterlist dst, src;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
+ int res = 0;
+
+ BUG_ON(!ctx->tfm);
+ BUG_ON(ctx->mode != ei->i_encryption_key.mode);
+
+ if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) {
+ printk_ratelimited(KERN_ERR
+ "%s: unsupported crypto algorithm: %d\n",
+ __func__, ctx->mode);
+ return -ENOTSUPP;
+ }
+
+ crypto_ablkcipher_clear_flags(atfm, ~0);
+ crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+
+ res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
+ ei->i_encryption_key.size);
+ if (res) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_ablkcipher_setkey() failed\n",
+ __func__);
+ return res;
+ }
+ req = ablkcipher_request_alloc(atfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_request_alloc() failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ ablkcipher_request_set_callback(
+ req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ ext4_crypt_complete, &ecr);
+
+ BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
+ memcpy(xts_tweak, &index, sizeof(index));
+ memset(&xts_tweak[sizeof(index)], 0,
+ EXT4_XTS_TWEAK_SIZE - sizeof(index));
+
+ sg_init_table(&dst, 1);
+ sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
+ sg_init_table(&src, 1);
+ sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
+ ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+ xts_tweak);
+ if (rw == EXT4_DECRYPT)
+ res = crypto_ablkcipher_decrypt(req);
+ else
+ res = crypto_ablkcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ ablkcipher_request_free(req);
+ if (res) {
+ printk_ratelimited(
+ KERN_ERR
+ "%s: crypto_ablkcipher_encrypt() returned %d\n",
+ __func__, res);
+ return res;
+ }
+ return 0;
+}
+
+/**
+ * ext4_encrypt() - Encrypts a page
+ * @inode: The inode for which the encryption should take place
+ * @plaintext_page: The page to encrypt. Must be locked.
+ *
+ * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
+ * encryption context.
+ *
+ * Called on the page write path. The caller must call
+ * ext4_restore_control_page() on the returned ciphertext page to
+ * release the bounce buffer and the encryption context.
+ *
+ * Return: An allocated page with the encrypted content on success. Else, an
+ * error value or NULL.
+ */
+struct page *ext4_encrypt(struct inode *inode,
+ struct page *plaintext_page)
+{
+ struct ext4_crypto_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ int err;
+
+ BUG_ON(!PageLocked(plaintext_page));
+
+ ctx = ext4_get_crypto_ctx(inode);
+ if (IS_ERR(ctx))
+ return (struct page *) ctx;
+
+ /* The encryption operation will require a bounce page. */
+ ciphertext_page = alloc_page(GFP_NOFS);
+ if (!ciphertext_page) {
+ /* This is a potential bottleneck, but at least we'll have
+ * forward progress. */
+ ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
+ GFP_NOFS);
+ if (WARN_ON_ONCE(!ciphertext_page)) {
+ ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
+ GFP_NOFS | __GFP_WAIT);
+ }
+ ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ } else {
+ ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ }
+ ctx->bounce_page = ciphertext_page;
+ ctx->control_page = plaintext_page;
+ err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
+ plaintext_page, ciphertext_page);
+ if (err) {
+ ext4_release_crypto_ctx(ctx);
+ return ERR_PTR(err);
+ }
+ SetPagePrivate(ciphertext_page);
+ set_page_private(ciphertext_page, (unsigned long)ctx);
+ lock_page(ciphertext_page);
+ return ciphertext_page;
+}
+
+/**
+ * ext4_decrypt() - Decrypts a page in-place
+ * @ctx: The encryption context.
+ * @page: The page to decrypt. Must be locked.
+ *
+ * Decrypts page in-place using the ctx encryption context.
+ *
+ * Called from the read completion callback.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+
+ return ext4_page_crypto(ctx, page->mapping->host,
+ EXT4_DECRYPT, page->index, page, page);
+}
+
+/*
+ * Convenience function which takes care of allocating and
+ * deallocating the encryption context
+ */
+int ext4_decrypt_one(struct inode *inode, struct page *page)
+{
+ int ret;
+
+ struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
+
+ if (!ctx)
+ return -ENOMEM;
+ ret = ext4_decrypt(ctx, page);
+ ext4_release_crypto_ctx(ctx);
+ return ret;
+}
+
+int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
+{
+ struct ext4_crypto_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ struct bio *bio;
+ ext4_lblk_t lblk = ex->ee_block;
+ ext4_fsblk_t pblk = ext4_ext_pblock(ex);
+ unsigned int len = ext4_ext_get_actual_len(ex);
+ int err = 0;
+
+ BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+
+ ctx = ext4_get_crypto_ctx(inode);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ciphertext_page = alloc_page(GFP_NOFS);
+ if (!ciphertext_page) {
+ /* This is a potential bottleneck, but at least we'll have
+ * forward progress. */
+ ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
+ GFP_NOFS);
+ if (WARN_ON_ONCE(!ciphertext_page)) {
+ ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
+ GFP_NOFS | __GFP_WAIT);
+ }
+ ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ } else {
+ ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ }
+ ctx->bounce_page = ciphertext_page;
+
+ while (len--) {
+ err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
+ ZERO_PAGE(0), ciphertext_page);
+ if (err)
+ goto errout;
+
+ bio = bio_alloc(GFP_KERNEL, 1);
+ if (!bio) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ bio->bi_bdev = inode->i_sb->s_bdev;
+ bio->bi_iter.bi_sector = pblk;
+ err = bio_add_page(bio, ciphertext_page,
+ inode->i_sb->s_blocksize, 0);
+ if (err) {
+ bio_put(bio);
+ goto errout;
+ }
+ err = submit_bio_wait(WRITE, bio);
+ if (err)
+ goto errout;
+ }
+ err = 0;
+errout:
+ ext4_release_crypto_ctx(ctx);
+ return err;
+}
+
+bool ext4_valid_contents_enc_mode(uint32_t mode)
+{
+ return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
+}
+
+/**
+ * ext4_validate_encryption_key_size() - Validate the encryption key size
+ * @mode: The key mode.
+ * @size: The key size to validate.
+ *
+ * Return: The validated key size for @mode. Zero if invalid.
+ */
+uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
+{
+ if (size == ext4_encryption_key_size(mode))
+ return size;
+ return 0;
+}
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
new file mode 100644
index 000000000000..fded02f72299
--- /dev/null
+++ b/fs/ext4/crypto_fname.c
@@ -0,0 +1,719 @@
+/*
+ * linux/fs/ext4/crypto_fname.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains functions for filename crypto management in ext4
+ *
+ * Written by Uday Savagaonkar, 2014.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ */
+
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <keys/encrypted-type.h>
+#include <keys/user-type.h>
+#include <linux/crypto.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/key.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock_types.h>
+
+#include "ext4.h"
+#include "ext4_crypto.h"
+#include "xattr.h"
+
+/**
+ * ext4_dir_crypt_complete() -
+ */
+static void ext4_dir_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct ext4_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+bool ext4_valid_filenames_enc_mode(uint32_t mode)
+{
+ return (mode == EXT4_ENCRYPTION_MODE_AES_256_CTS);
+}
+
+/**
+ * ext4_fname_encrypt() -
+ *
+ * This function encrypts the input filename, and returns the length of the
+ * ciphertext. Errors are returned as negative numbers. We trust the caller to
+ * allocate sufficient memory to oname string.
+ */
+static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct ext4_str *oname)
+{
+ u32 ciphertext_len;
+ struct ablkcipher_request *req = NULL;
+ DECLARE_EXT4_COMPLETION_RESULT(ecr);
+ struct crypto_ablkcipher *tfm = ctx->ctfm;
+ int res = 0;
+ char iv[EXT4_CRYPTO_BLOCK_SIZE];
+ struct scatterlist sg[1];
+ int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
+ char *workbuf;
+
+ if (iname->len <= 0 || iname->len > ctx->lim)
+ return -EIO;
+
+ ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
+ EXT4_CRYPTO_BLOCK_SIZE : iname->len;
+ ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
+ ciphertext_len = (ciphertext_len > ctx->lim)
+ ? ctx->lim : ciphertext_len;
+
+ /* Allocate request */
+ req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(
+ KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ ablkcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ ext4_dir_crypt_complete, &ecr);
+
+ /* Map the workpage */
+ workbuf = kmap(ctx->workpage);
+
+ /* Copy the input */
+ memcpy(workbuf, iname->name, iname->len);
+ if (iname->len < ciphertext_len)
+ memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
+
+ /* Initialize IV */
+ memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
+
+ /* Create encryption request */
+ sg_init_table(sg, 1);
+ sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
+ ablkcipher_request_set_crypt(req, sg, sg, ciphertext_len, iv);
+ res = crypto_ablkcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ if (res >= 0) {
+ /* Copy the result to output */
+ memcpy(oname->name, workbuf, ciphertext_len);
+ res = ciphertext_len;
+ }
+ kunmap(ctx->workpage);
+ ablkcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(
+ KERN_ERR "%s: Error (error code %d)\n", __func__, res);
+ }
+ oname->len = ciphertext_len;
+ return res;
+}
+
+/*
+ * ext4_fname_decrypt()
+ * This function decrypts the input filename, and returns
+ * the length of the plaintext.
+ * Errors are returned as negative numbers.
+ * We trust the caller to allocate sufficient memory to oname string.
+ */
+static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx,
+ const struct ext4_str *iname,
+ struct ext4_str *oname)
+{
+ struct ext4_str tmp_in[2], tmp_out[1];
+ struct ablkcipher_request *req = NULL;
+ DECLARE_EXT4_COMPLETION_RESULT(ecr);
+ struct scatterlist sg[1];
+ struct crypto_ablkcipher *tfm = ctx->ctfm;
+ int res = 0;
+ char iv[EXT4_CRYPTO_BLOCK_SIZE];
+ char *workbuf;
+
+ if (iname->len <= 0 || iname->len > ctx->lim)
+ return -EIO;
+
+ tmp_in[0].name = iname->name;
+ tmp_in[0].len = iname->len;
+ tmp_out[0].name = oname->name;
+
+ /* Allocate request */
+ req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(
+ KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ ablkcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ ext4_dir_crypt_complete, &ecr);
+
+ /* Map the workpage */
+ workbuf = kmap(ctx->workpage);
+
+ /* Copy the input */
+ memcpy(workbuf, iname->name, iname->len);
+
+ /* Initialize IV */
+ memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
+
+ /* Create encryption request */
+ sg_init_table(sg, 1);
+ sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
+ ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv);
+ res = crypto_ablkcipher_decrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ if (res >= 0) {
+ /* Copy the result to output */
+ memcpy(oname->name, workbuf, iname->len);
+ res = iname->len;
+ }
+ kunmap(ctx->workpage);
+ ablkcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(
+ KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
+ __func__, res);
+ return res;
+ }
+
+ oname->len = strnlen(oname->name, iname->len);
+ return oname->len;
+}
+
+static const char *lookup_table =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+
+/**
+ * ext4_fname_encode_digest() -
+ *
+ * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
+ * The encoded string is roughly 4/3 times the size of the input string.
+ */
+static int digest_encode(const char *src, int len, char *dst)
+{
+ int i = 0, bits = 0, ac = 0;
+ char *cp = dst;
+
+ while (i < len) {
+ ac += (((unsigned char) src[i]) << bits);
+ bits += 8;
+ do {
+ *cp++ = lookup_table[ac & 0x3f];
+ ac >>= 6;
+ bits -= 6;
+ } while (bits >= 6);
+ i++;
+ }
+ if (bits)
+ *cp++ = lookup_table[ac & 0x3f];
+ return cp - dst;
+}
+
+static int digest_decode(const char *src, int len, char *dst)
+{
+ int i = 0, bits = 0, ac = 0;
+ const char *p;
+ char *cp = dst;
+
+ while (i < len) {
+ p = strchr(lookup_table, src[i]);
+ if (p == NULL || src[i] == 0)
+ return -2;
+ ac += (p - lookup_table) << bits;
+ bits += 6;
+ if (bits >= 8) {
+ *cp++ = ac & 0xff;
+ ac >>= 8;
+ bits -= 8;
+ }
+ i++;
+ }
+ if (ac)
+ return -1;
+ return cp - dst;
+}
+
+/**
+ * ext4_free_fname_crypto_ctx() -
+ *
+ * Frees up a crypto context.
+ */
+void ext4_free_fname_crypto_ctx(struct ext4_fname_crypto_ctx *ctx)
+{
+ if (ctx == NULL || IS_ERR(ctx))
+ return;
+
+ if (ctx->ctfm && !IS_ERR(ctx->ctfm))
+ crypto_free_ablkcipher(ctx->ctfm);
+ if (ctx->htfm && !IS_ERR(ctx->htfm))
+ crypto_free_hash(ctx->htfm);
+ if (ctx->workpage && !IS_ERR(ctx->workpage))
+ __free_page(ctx->workpage);
+ kfree(ctx);
+}
+
+/**
+ * ext4_put_fname_crypto_ctx() -
+ *
+ * Return: The crypto context onto free list. If the free list is above a
+ * threshold, completely frees up the context, and returns the memory.
+ *
+ * TODO: Currently we directly free the crypto context. Eventually we should
+ * add code it to return to free list. Such an approach will increase
+ * efficiency of directory lookup.
+ */
+void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx)
+{
+ if (*ctx == NULL || IS_ERR(*ctx))
+ return;
+ ext4_free_fname_crypto_ctx(*ctx);
+ *ctx = NULL;
+}
+
+/**
+ * ext4_search_fname_crypto_ctx() -
+ */
+static struct ext4_fname_crypto_ctx *ext4_search_fname_crypto_ctx(
+ const struct ext4_encryption_key *key)
+{
+ return NULL;
+}
+
+/**
+ * ext4_alloc_fname_crypto_ctx() -
+ */
+struct ext4_fname_crypto_ctx *ext4_alloc_fname_crypto_ctx(
+ const struct ext4_encryption_key *key)
+{
+ struct ext4_fname_crypto_ctx *ctx;
+
+ ctx = kmalloc(sizeof(struct ext4_fname_crypto_ctx), GFP_NOFS);
+ if (ctx == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (key->mode == EXT4_ENCRYPTION_MODE_INVALID) {
+ /* This will automatically set key mode to invalid
+ * As enum for ENCRYPTION_MODE_INVALID is zero */
+ memset(&ctx->key, 0, sizeof(ctx->key));
+ } else {
+ memcpy(&ctx->key, key, sizeof(struct ext4_encryption_key));
+ }
+ ctx->has_valid_key = (EXT4_ENCRYPTION_MODE_INVALID == key->mode)
+ ? 0 : 1;
+ ctx->ctfm_key_is_ready = 0;
+ ctx->ctfm = NULL;
+ ctx->htfm = NULL;
+ ctx->workpage = NULL;
+ return ctx;
+}
+
+/**
+ * ext4_get_fname_crypto_ctx() -
+ *
+ * Allocates a free crypto context and initializes it to hold
+ * the crypto material for the inode.
+ *
+ * Return: NULL if not encrypted. Error value on error. Valid pointer otherwise.
+ */
+struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(
+ struct inode *inode, u32 max_ciphertext_len)
+{
+ struct ext4_fname_crypto_ctx *ctx;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ int res;
+
+ /* Check if the crypto policy is set on the inode */
+ res = ext4_encrypted_inode(inode);
+ if (res == 0)
+ return NULL;
+
+ if (!ext4_has_encryption_key(inode))
+ ext4_generate_encryption_key(inode);
+
+ /* Get a crypto context based on the key.
+ * A new context is allocated if no context matches the requested key.
+ */
+ ctx = ext4_search_fname_crypto_ctx(&(ei->i_encryption_key));
+ if (ctx == NULL)
+ ctx = ext4_alloc_fname_crypto_ctx(&(ei->i_encryption_key));
+ if (IS_ERR(ctx))
+ return ctx;
+
+ ctx->flags = ei->i_crypt_policy_flags;
+ if (ctx->has_valid_key) {
+ if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) {
+ printk_once(KERN_WARNING
+ "ext4: unsupported key mode %d\n",
+ ctx->key.mode);
+ return ERR_PTR(-ENOKEY);
+ }
+
+ /* As a first cut, we will allocate new tfm in every call.
+ * later, we will keep the tfm around, in case the key gets
+ * re-used */
+ if (ctx->ctfm == NULL) {
+ ctx->ctfm = crypto_alloc_ablkcipher("cts(cbc(aes))",
+ 0, 0);
+ }
+ if (IS_ERR(ctx->ctfm)) {
+ res = PTR_ERR(ctx->ctfm);
+ printk(
+ KERN_DEBUG "%s: error (%d) allocating crypto tfm\n",
+ __func__, res);
+ ctx->ctfm = NULL;
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(res);
+ }
+ if (ctx->ctfm == NULL) {
+ printk(
+ KERN_DEBUG "%s: could not allocate crypto tfm\n",
+ __func__);
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (ctx->workpage == NULL)
+ ctx->workpage = alloc_page(GFP_NOFS);
+ if (IS_ERR(ctx->workpage)) {
+ res = PTR_ERR(ctx->workpage);
+ printk(
+ KERN_DEBUG "%s: error (%d) allocating work page\n",
+ __func__, res);
+ ctx->workpage = NULL;
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(res);
+ }
+ if (ctx->workpage == NULL) {
+ printk(
+ KERN_DEBUG "%s: could not allocate work page\n",
+ __func__);
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ ctx->lim = max_ciphertext_len;
+ crypto_ablkcipher_clear_flags(ctx->ctfm, ~0);
+ crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctx->ctfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+
+ /* If we are lucky, we will get a context that is already
+ * set up with the right key. Else, we will have to
+ * set the key */
+ if (!ctx->ctfm_key_is_ready) {
+ /* Since our crypto objectives for filename encryption
+ * are pretty weak,
+ * we directly use the inode master key */
+ res = crypto_ablkcipher_setkey(ctx->ctfm,
+ ctx->key.raw, ctx->key.size);
+ if (res) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(-EIO);
+ }
+ ctx->ctfm_key_is_ready = 1;
+ } else {
+ /* In the current implementation, key should never be
+ * marked "ready" for a context that has just been
+ * allocated. So we should never reach here */
+ BUG();
+ }
+ }
+ if (ctx->htfm == NULL)
+ ctx->htfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->htfm)) {
+ res = PTR_ERR(ctx->htfm);
+ printk(KERN_DEBUG "%s: error (%d) allocating hash tfm\n",
+ __func__, res);
+ ctx->htfm = NULL;
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(res);
+ }
+ if (ctx->htfm == NULL) {
+ printk(KERN_DEBUG "%s: could not allocate hash tfm\n",
+ __func__);
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return ctx;
+}
+
+/**
+ * ext4_fname_crypto_round_up() -
+ *
+ * Return: The next multiple of block size
+ */
+u32 ext4_fname_crypto_round_up(u32 size, u32 blksize)
+{
+ return ((size+blksize-1)/blksize)*blksize;
+}
+
+/**
+ * ext4_fname_crypto_namelen_on_disk() -
+ */
+int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
+ u32 namelen)
+{
+ u32 ciphertext_len;
+ int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
+
+ if (ctx == NULL)
+ return -EIO;
+ if (!(ctx->has_valid_key))
+ return -EACCES;
+ ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ?
+ EXT4_CRYPTO_BLOCK_SIZE : namelen;
+ ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
+ ciphertext_len = (ciphertext_len > ctx->lim)
+ ? ctx->lim : ciphertext_len;
+ return (int) ciphertext_len;
+}
+
+/**
+ * ext4_fname_crypto_alloc_obuff() -
+ *
+ * Allocates an output buffer that is sufficient for the crypto operation
+ * specified by the context and the direction.
+ */
+int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
+ u32 ilen, struct ext4_str *crypto_str)
+{
+ unsigned int olen;
+ int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
+
+ if (!ctx)
+ return -EIO;
+ if (padding < EXT4_CRYPTO_BLOCK_SIZE)
+ padding = EXT4_CRYPTO_BLOCK_SIZE;
+ olen = ext4_fname_crypto_round_up(ilen, padding);
+ crypto_str->len = olen;
+ if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
+ olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
+ /* Allocated buffer can hold one more character to null-terminate the
+ * string */
+ crypto_str->name = kmalloc(olen+1, GFP_NOFS);
+ if (!(crypto_str->name))
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * ext4_fname_crypto_free_buffer() -
+ *
+ * Frees the buffer allocated for crypto operation.
+ */
+void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
+{
+ if (!crypto_str)
+ return;
+ kfree(crypto_str->name);
+ crypto_str->name = NULL;
+}
+
+/**
+ * ext4_fname_disk_to_usr() - converts a filename from disk space to user space
+ */
+int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ struct dx_hash_info *hinfo,
+ const struct ext4_str *iname,
+ struct ext4_str *oname)
+{
+ char buf[24];
+ int ret;
+
+ if (ctx == NULL)
+ return -EIO;
+ if (iname->len < 3) {
+ /*Check for . and .. */
+ if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') {
+ oname->name[0] = '.';
+ oname->name[iname->len-1] = '.';
+ oname->len = iname->len;
+ return oname->len;
+ }
+ }
+ if (ctx->has_valid_key)
+ return ext4_fname_decrypt(ctx, iname, oname);
+
+ if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
+ ret = digest_encode(iname->name, iname->len, oname->name);
+ oname->len = ret;
+ return ret;
+ }
+ if (hinfo) {
+ memcpy(buf, &hinfo->hash, 4);
+ memcpy(buf+4, &hinfo->minor_hash, 4);
+ } else
+ memset(buf, 0, 8);
+ memcpy(buf + 8, iname->name + iname->len - 16, 16);
+ oname->name[0] = '_';
+ ret = digest_encode(buf, 24, oname->name+1);
+ oname->len = ret + 1;
+ return ret + 1;
+}
+
+int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ struct dx_hash_info *hinfo,
+ const struct ext4_dir_entry_2 *de,
+ struct ext4_str *oname)
+{
+ struct ext4_str iname = {.name = (unsigned char *) de->name,
+ .len = de->name_len };
+
+ return _ext4_fname_disk_to_usr(ctx, hinfo, &iname, oname);
+}
+
+
+/**
+ * ext4_fname_usr_to_disk() - converts a filename from user space to disk space
+ */
+int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct ext4_str *oname)
+{
+ int res;
+
+ if (ctx == NULL)
+ return -EIO;
+ if (iname->len < 3) {
+ /*Check for . and .. */
+ if (iname->name[0] == '.' &&
+ iname->name[iname->len-1] == '.') {
+ oname->name[0] = '.';
+ oname->name[iname->len-1] = '.';
+ oname->len = iname->len;
+ return oname->len;
+ }
+ }
+ if (ctx->has_valid_key) {
+ res = ext4_fname_encrypt(ctx, iname, oname);
+ return res;
+ }
+ /* Without a proper key, a user is not allowed to modify the filenames
+ * in a directory. Consequently, a user space name cannot be mapped to
+ * a disk-space name */
+ return -EACCES;
+}
+
+/*
+ * Calculate the htree hash from a filename from user space
+ */
+int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct dx_hash_info *hinfo)
+{
+ struct ext4_str tmp;
+ int ret = 0;
+ char buf[EXT4_FNAME_CRYPTO_DIGEST_SIZE+1];
+
+ if (!ctx ||
+ ((iname->name[0] == '.') &&
+ ((iname->len == 1) ||
+ ((iname->name[1] == '.') && (iname->len == 2))))) {
+ ext4fs_dirhash(iname->name, iname->len, hinfo);
+ return 0;
+ }
+
+ if (!ctx->has_valid_key && iname->name[0] == '_') {
+ if (iname->len != 33)
+ return -ENOENT;
+ ret = digest_decode(iname->name+1, iname->len, buf);
+ if (ret != 24)
+ return -ENOENT;
+ memcpy(&hinfo->hash, buf, 4);
+ memcpy(&hinfo->minor_hash, buf + 4, 4);
+ return 0;
+ }
+
+ if (!ctx->has_valid_key && iname->name[0] != '_') {
+ if (iname->len > 43)
+ return -ENOENT;
+ ret = digest_decode(iname->name, iname->len, buf);
+ ext4fs_dirhash(buf, ret, hinfo);
+ return 0;
+ }
+
+ /* First encrypt the plaintext name */
+ ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp);
+ if (ret < 0)
+ return ret;
+
+ ret = ext4_fname_encrypt(ctx, iname, &tmp);
+ if (ret >= 0) {
+ ext4fs_dirhash(tmp.name, tmp.len, hinfo);
+ ret = 0;
+ }
+
+ ext4_fname_crypto_free_buffer(&tmp);
+ return ret;
+}
+
+int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr,
+ int len, const char * const name,
+ struct ext4_dir_entry_2 *de)
+{
+ int ret = -ENOENT;
+ int bigname = (*name == '_');
+
+ if (ctx->has_valid_key) {
+ if (cstr->name == NULL) {
+ struct qstr istr;
+
+ ret = ext4_fname_crypto_alloc_buffer(ctx, len, cstr);
+ if (ret < 0)
+ goto errout;
+ istr.name = name;
+ istr.len = len;
+ ret = ext4_fname_encrypt(ctx, &istr, cstr);
+ if (ret < 0)
+ goto errout;
+ }
+ } else {
+ if (cstr->name == NULL) {
+ cstr->name = kmalloc(32, GFP_KERNEL);
+ if (cstr->name == NULL)
+ return -ENOMEM;
+ if ((bigname && (len != 33)) ||
+ (!bigname && (len > 43)))
+ goto errout;
+ ret = digest_decode(name+bigname, len-bigname,
+ cstr->name);
+ if (ret < 0) {
+ ret = -ENOENT;
+ goto errout;
+ }
+ cstr->len = ret;
+ }
+ if (bigname) {
+ if (de->name_len < 16)
+ return 0;
+ ret = memcmp(de->name + de->name_len - 16,
+ cstr->name + 8, 16);
+ return (ret == 0) ? 1 : 0;
+ }
+ }
+ if (de->name_len != cstr->len)
+ return 0;
+ ret = memcmp(de->name, cstr->name, cstr->len);
+ return (ret == 0) ? 1 : 0;
+errout:
+ kfree(cstr->name);
+ cstr->name = NULL;
+ return ret;
+}
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
new file mode 100644
index 000000000000..52170d0b7c40
--- /dev/null
+++ b/fs/ext4/crypto_key.c
@@ -0,0 +1,166 @@
+/*
+ * linux/fs/ext4/crypto_key.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions for ext4
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#include <keys/encrypted-type.h>
+#include <keys/user-type.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <uapi/linux/keyctl.h>
+
+#include "ext4.h"
+#include "xattr.h"
+
+static void derive_crypt_complete(struct crypto_async_request *req, int rc)
+{
+ struct ext4_completion_result *ecr = req->data;
+
+ if (rc == -EINPROGRESS)
+ return;
+
+ ecr->res = rc;
+ complete(&ecr->completion);
+}
+
+/**
+ * ext4_derive_key_aes() - Derive a key using AES-128-ECB
+ * @deriving_key: Encryption key used for derivatio.
+ * @source_key: Source key to which to apply derivation.
+ * @derived_key: Derived key.
+ *
+ * Return: Zero on success; non-zero otherwise.
+ */
+static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE],
+ char source_key[EXT4_AES_256_XTS_KEY_SIZE],
+ char derived_key[EXT4_AES_256_XTS_KEY_SIZE])
+{
+ int res = 0;
+ struct ablkcipher_request *req = NULL;
+ DECLARE_EXT4_COMPLETION_RESULT(ecr);
+ struct scatterlist src_sg, dst_sg;
+ struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
+ 0);
+
+ if (IS_ERR(tfm)) {
+ res = PTR_ERR(tfm);
+ tfm = NULL;
+ goto out;
+ }
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ res = -ENOMEM;
+ goto out;
+ }
+ ablkcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ derive_crypt_complete, &ecr);
+ res = crypto_ablkcipher_setkey(tfm, deriving_key,
+ EXT4_AES_128_ECB_KEY_SIZE);
+ if (res < 0)
+ goto out;
+ sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE);
+ sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE);
+ ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
+ EXT4_AES_256_XTS_KEY_SIZE, NULL);
+ res = crypto_ablkcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+
+out:
+ if (req)
+ ablkcipher_request_free(req);
+ if (tfm)
+ crypto_free_ablkcipher(tfm);
+ return res;
+}
+
+/**
+ * ext4_generate_encryption_key() - generates an encryption key
+ * @inode: The inode to generate the encryption key for.
+ */
+int ext4_generate_encryption_key(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_encryption_key *crypt_key = &ei->i_encryption_key;
+ char full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
+ (EXT4_KEY_DESCRIPTOR_SIZE * 2) + 1];
+ struct key *keyring_key = NULL;
+ struct ext4_encryption_key *master_key;
+ struct ext4_encryption_context ctx;
+ struct user_key_payload *ukp;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &ctx, sizeof(ctx));
+
+ if (res != sizeof(ctx)) {
+ if (res > 0)
+ res = -EINVAL;
+ goto out;
+ }
+ res = 0;
+
+ ei->i_crypt_policy_flags = ctx.flags;
+ if (S_ISREG(inode->i_mode))
+ crypt_key->mode = ctx.contents_encryption_mode;
+ else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ crypt_key->mode = ctx.filenames_encryption_mode;
+ else {
+ printk(KERN_ERR "ext4 crypto: Unsupported inode type.\n");
+ BUG();
+ }
+ crypt_key->size = ext4_encryption_key_size(crypt_key->mode);
+ BUG_ON(!crypt_key->size);
+ if (DUMMY_ENCRYPTION_ENABLED(sbi)) {
+ memset(crypt_key->raw, 0x42, EXT4_AES_256_XTS_KEY_SIZE);
+ goto out;
+ }
+ memcpy(full_key_descriptor, EXT4_KEY_DESC_PREFIX,
+ EXT4_KEY_DESC_PREFIX_SIZE);
+ sprintf(full_key_descriptor + EXT4_KEY_DESC_PREFIX_SIZE,
+ "%*phN", EXT4_KEY_DESCRIPTOR_SIZE,
+ ctx.master_key_descriptor);
+ full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
+ (2 * EXT4_KEY_DESCRIPTOR_SIZE)] = '\0';
+ keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
+ if (IS_ERR(keyring_key)) {
+ res = PTR_ERR(keyring_key);
+ keyring_key = NULL;
+ goto out;
+ }
+ BUG_ON(keyring_key->type != &key_type_logon);
+ ukp = ((struct user_key_payload *)keyring_key->payload.data);
+ if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
+ res = -EINVAL;
+ goto out;
+ }
+ master_key = (struct ext4_encryption_key *)ukp->data;
+ BUILD_BUG_ON(EXT4_AES_128_ECB_KEY_SIZE !=
+ EXT4_KEY_DERIVATION_NONCE_SIZE);
+ BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE);
+ res = ext4_derive_key_aes(ctx.nonce, master_key->raw, crypt_key->raw);
+out:
+ if (keyring_key)
+ key_put(keyring_key);
+ if (res < 0)
+ crypt_key->mode = EXT4_ENCRYPTION_MODE_INVALID;
+ return res;
+}
+
+int ext4_has_encryption_key(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_encryption_key *crypt_key = &ei->i_encryption_key;
+
+ return (crypt_key->mode != EXT4_ENCRYPTION_MODE_INVALID);
+}
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
new file mode 100644
index 000000000000..a6d6291aea16
--- /dev/null
+++ b/fs/ext4/crypto_policy.c
@@ -0,0 +1,198 @@
+/*
+ * linux/fs/ext4/crypto_policy.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption policy functions for ext4
+ *
+ * Written by Michael Halcrow, 2015.
+ */
+
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "ext4.h"
+#include "xattr.h"
+
+static int ext4_inode_has_encryption_context(struct inode *inode)
+{
+ int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0);
+ return (res > 0);
+}
+
+/*
+ * check whether the policy is consistent with the encryption context
+ * for the inode
+ */
+static int ext4_is_encryption_context_consistent_with_policy(
+ struct inode *inode, const struct ext4_encryption_policy *policy)
+{
+ struct ext4_encryption_context ctx;
+ int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
+ sizeof(ctx));
+ if (res != sizeof(ctx))
+ return 0;
+ return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
+ EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (ctx.flags ==
+ policy->flags) &&
+ (ctx.contents_encryption_mode ==
+ policy->contents_encryption_mode) &&
+ (ctx.filenames_encryption_mode ==
+ policy->filenames_encryption_mode));
+}
+
+static int ext4_create_encryption_context_from_policy(
+ struct inode *inode, const struct ext4_encryption_policy *policy)
+{
+ struct ext4_encryption_context ctx;
+ int res = 0;
+
+ ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
+ memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
+ EXT4_KEY_DESCRIPTOR_SIZE);
+ if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) {
+ printk(KERN_WARNING
+ "%s: Invalid contents encryption mode %d\n", __func__,
+ policy->contents_encryption_mode);
+ return -EINVAL;
+ }
+ if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
+ printk(KERN_WARNING
+ "%s: Invalid filenames encryption mode %d\n", __func__,
+ policy->filenames_encryption_mode);
+ return -EINVAL;
+ }
+ if (policy->flags & ~EXT4_POLICY_FLAGS_VALID)
+ return -EINVAL;
+ ctx.contents_encryption_mode = policy->contents_encryption_mode;
+ ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
+ ctx.flags = policy->flags;
+ BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
+ get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
+
+ res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
+ sizeof(ctx), 0);
+ if (!res)
+ ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+ return res;
+}
+
+int ext4_process_policy(const struct ext4_encryption_policy *policy,
+ struct inode *inode)
+{
+ if (policy->version != 0)
+ return -EINVAL;
+
+ if (!ext4_inode_has_encryption_context(inode)) {
+ if (!ext4_empty_dir(inode))
+ return -ENOTEMPTY;
+ return ext4_create_encryption_context_from_policy(inode,
+ policy);
+ }
+
+ if (ext4_is_encryption_context_consistent_with_policy(inode, policy))
+ return 0;
+
+ printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
+ __func__);
+ return -EINVAL;
+}
+
+int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy)
+{
+ struct ext4_encryption_context ctx;
+
+ int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &ctx, sizeof(ctx));
+ if (res != sizeof(ctx))
+ return -ENOENT;
+ if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1)
+ return -EINVAL;
+ policy->version = 0;
+ policy->contents_encryption_mode = ctx.contents_encryption_mode;
+ policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
+ policy->flags = ctx.flags;
+ memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
+ EXT4_KEY_DESCRIPTOR_SIZE);
+ return 0;
+}
+
+int ext4_is_child_context_consistent_with_parent(struct inode *parent,
+ struct inode *child)
+{
+ struct ext4_encryption_context parent_ctx, child_ctx;
+ int res;
+
+ if ((parent == NULL) || (child == NULL)) {
+ pr_err("parent %p child %p\n", parent, child);
+ BUG_ON(1);
+ }
+ /* no restrictions if the parent directory is not encrypted */
+ if (!ext4_encrypted_inode(parent))
+ return 1;
+ res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &parent_ctx, sizeof(parent_ctx));
+ if (res != sizeof(parent_ctx))
+ return 0;
+ /* if the child directory is not encrypted, this is always a problem */
+ if (!ext4_encrypted_inode(child))
+ return 0;
+ res = ext4_xattr_get(child, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &child_ctx, sizeof(child_ctx));
+ if (res != sizeof(child_ctx))
+ return 0;
+ return (memcmp(parent_ctx.master_key_descriptor,
+ child_ctx.master_key_descriptor,
+ EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (parent_ctx.contents_encryption_mode ==
+ child_ctx.contents_encryption_mode) &&
+ (parent_ctx.filenames_encryption_mode ==
+ child_ctx.filenames_encryption_mode));
+}
+
+/**
+ * ext4_inherit_context() - Sets a child context from its parent
+ * @parent: Parent inode from which the context is inherited.
+ * @child: Child inode that inherits the context from @parent.
+ *
+ * Return: Zero on success, non-zero otherwise
+ */
+int ext4_inherit_context(struct inode *parent, struct inode *child)
+{
+ struct ext4_encryption_context ctx;
+ int res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &ctx, sizeof(ctx));
+
+ if (res != sizeof(ctx)) {
+ if (DUMMY_ENCRYPTION_ENABLED(EXT4_SB(parent->i_sb))) {
+ ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
+ ctx.contents_encryption_mode =
+ EXT4_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.filenames_encryption_mode =
+ EXT4_ENCRYPTION_MODE_AES_256_CTS;
+ ctx.flags = 0;
+ memset(ctx.master_key_descriptor, 0x42,
+ EXT4_KEY_DESCRIPTOR_SIZE);
+ res = 0;
+ } else {
+ goto out;
+ }
+ }
+ get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
+ res = ext4_xattr_set(child, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
+ sizeof(ctx), 0);
+out:
+ if (!res)
+ ext4_set_inode_flag(child, EXT4_INODE_ENCRYPT);
+ return res;
+}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index c24143ea9c08..5665d82d2332 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -22,10 +22,8 @@
*/
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
-#include <linux/rbtree.h>
#include "ext4.h"
#include "xattr.h"
@@ -110,7 +108,10 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
int err;
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
+ struct buffer_head *bh = NULL;
int dir_has_error = 0;
+ struct ext4_fname_crypto_ctx *enc_ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
if (is_dx_dir(inode)) {
err = ext4_dx_readdir(file, ctx);
@@ -127,17 +128,28 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (ext4_has_inline_data(inode)) {
int has_inline_data = 1;
- int ret = ext4_read_inline_dir(file, ctx,
+ err = ext4_read_inline_dir(file, ctx,
&has_inline_data);
if (has_inline_data)
- return ret;
+ return err;
+ }
+
+ enc_ctx = ext4_get_fname_crypto_ctx(inode, EXT4_NAME_LEN);
+ if (IS_ERR(enc_ctx))
+ return PTR_ERR(enc_ctx);
+ if (enc_ctx) {
+ err = ext4_fname_crypto_alloc_buffer(enc_ctx, EXT4_NAME_LEN,
+ &fname_crypto_str);
+ if (err < 0) {
+ ext4_put_fname_crypto_ctx(&enc_ctx);
+ return err;
+ }
}
offset = ctx->pos & (sb->s_blocksize - 1);
while (ctx->pos < inode->i_size) {
struct ext4_map_blocks map;
- struct buffer_head *bh = NULL;
map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
map.m_len = 1;
@@ -180,6 +192,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
(unsigned long long)ctx->pos);
ctx->pos += sb->s_blocksize - offset;
brelse(bh);
+ bh = NULL;
continue;
}
set_buffer_verified(bh);
@@ -226,25 +239,44 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
offset += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
if (le32_to_cpu(de->inode)) {
- if (!dir_emit(ctx, de->name,
- de->name_len,
- le32_to_cpu(de->inode),
- get_dtype(sb, de->file_type))) {
- brelse(bh);
- return 0;
+ if (enc_ctx == NULL) {
+ /* Directory is not encrypted */
+ if (!dir_emit(ctx, de->name,
+ de->name_len,
+ le32_to_cpu(de->inode),
+ get_dtype(sb, de->file_type)))
+ goto done;
+ } else {
+ /* Directory is encrypted */
+ err = ext4_fname_disk_to_usr(enc_ctx,
+ NULL, de, &fname_crypto_str);
+ if (err < 0)
+ goto errout;
+ if (!dir_emit(ctx,
+ fname_crypto_str.name, err,
+ le32_to_cpu(de->inode),
+ get_dtype(sb, de->file_type)))
+ goto done;
}
}
ctx->pos += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
}
- offset = 0;
+ if ((ctx->pos < inode->i_size) && !dir_relax(inode))
+ goto done;
brelse(bh);
- if (ctx->pos < inode->i_size) {
- if (!dir_relax(inode))
- return 0;
- }
+ bh = NULL;
+ offset = 0;
}
- return 0;
+done:
+ err = 0;
+errout:
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ext4_put_fname_crypto_ctx(&enc_ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+#endif
+ brelse(bh);
+ return err;
}
static inline int is_32bit_api(void)
@@ -384,10 +416,15 @@ void ext4_htree_free_dir_info(struct dir_private_info *p)
/*
* Given a directory entry, enter it into the fname rb tree.
+ *
+ * When filename encryption is enabled, the dirent will hold the
+ * encrypted filename, while the htree will hold decrypted filename.
+ * The decrypted filename is passed in via ent_name. parameter.
*/
int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
- struct ext4_dir_entry_2 *dirent)
+ struct ext4_dir_entry_2 *dirent,
+ struct ext4_str *ent_name)
{
struct rb_node **p, *parent = NULL;
struct fname *fname, *new_fn;
@@ -398,17 +435,17 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
p = &info->root.rb_node;
/* Create and allocate the fname structure */
- len = sizeof(struct fname) + dirent->name_len + 1;
+ len = sizeof(struct fname) + ent_name->len + 1;
new_fn = kzalloc(len, GFP_KERNEL);
if (!new_fn)
return -ENOMEM;
new_fn->hash = hash;
new_fn->minor_hash = minor_hash;
new_fn->inode = le32_to_cpu(dirent->inode);
- new_fn->name_len = dirent->name_len;
+ new_fn->name_len = ent_name->len;
new_fn->file_type = dirent->file_type;
- memcpy(new_fn->name, dirent->name, dirent->name_len);
- new_fn->name[dirent->name_len] = 0;
+ memcpy(new_fn->name, ent_name->name, ent_name->len);
+ new_fn->name[ent_name->len] = 0;
while (*p) {
parent = *p;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f63c3d5805c4..9a83f149ac85 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -422,7 +422,7 @@ enum {
EXT4_INODE_DIRTY = 8,
EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
EXT4_INODE_NOCOMPR = 10, /* Don't compress */
- EXT4_INODE_ENCRYPT = 11, /* Compression error */
+ EXT4_INODE_ENCRYPT = 11, /* Encrypted file */
/* End compression flags --- maybe not all used */
EXT4_INODE_INDEX = 12, /* hash-indexed directory */
EXT4_INODE_IMAGIC = 13, /* AFS directory */
@@ -582,6 +582,15 @@ enum {
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
+/* Encryption algorithms */
+#define EXT4_ENCRYPTION_MODE_INVALID 0
+#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
+#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
+#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
+#define EXT4_ENCRYPTION_MODE_AES_256_CTS 4
+
+#include "ext4_crypto.h"
+
/*
* ioctl commands
*/
@@ -603,6 +612,9 @@ enum {
#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
+#define EXT4_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct ext4_encryption_policy)
+#define EXT4_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
+#define EXT4_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct ext4_encryption_policy)
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
@@ -899,6 +911,7 @@ struct ext4_inode_info {
/* on-disk additional length */
__u16 i_extra_isize;
+ char i_crypt_policy_flags;
/* Indicate the inline data space. */
u16 i_inline_off;
@@ -939,6 +952,11 @@ struct ext4_inode_info {
/* Precomputed uuid+inum+igen checksum for seeding inode checksums */
__u32 i_csum_seed;
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ /* Encryption params */
+ struct ext4_encryption_key i_encryption_key;
+#endif
};
/*
@@ -1049,12 +1067,6 @@ extern void ext4_set_bits(void *bm, int cur, int len);
/* Metadata checksum algorithm codes */
#define EXT4_CRC32C_CHKSUM 1
-/* Encryption algorithms */
-#define EXT4_ENCRYPTION_MODE_INVALID 0
-#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
-#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
-#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
-
/*
* Structure of the super block
*/
@@ -1142,7 +1154,8 @@ struct ext4_super_block {
__le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
__u8 s_log_groups_per_flex; /* FLEX_BG group size */
__u8 s_checksum_type; /* metadata checksum algorithm used */
- __le16 s_reserved_pad;
+ __u8 s_encryption_level; /* versioning level for encryption */
+ __u8 s_reserved_pad; /* Padding to next 32bits */
__le64 s_kbytes_written; /* nr of lifetime kilobytes written */
__le32 s_snapshot_inum; /* Inode number of active snapshot */
__le32 s_snapshot_id; /* sequential ID of active snapshot */
@@ -1169,7 +1182,9 @@ struct ext4_super_block {
__le32 s_overhead_clusters; /* overhead blocks/clusters in fs */
__le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */
__u8 s_encrypt_algos[4]; /* Encryption algorithms in use */
- __le32 s_reserved[105]; /* Padding to the end of the block */
+ __u8 s_encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
+ __le32 s_lpf_ino; /* Location of the lost+found inode */
+ __le32 s_reserved[100]; /* Padding to the end of the block */
__le32 s_checksum; /* crc32c(superblock) */
};
@@ -1180,8 +1195,16 @@ struct ext4_super_block {
/*
* run-time mount flags
*/
-#define EXT4_MF_MNTDIR_SAMPLED 0x0001
-#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
+#define EXT4_MF_MNTDIR_SAMPLED 0x0001
+#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
+#define EXT4_MF_TEST_DUMMY_ENCRYPTION 0x0004
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \
+ EXT4_MF_TEST_DUMMY_ENCRYPTION))
+#else
+#define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
+#endif
/* Number of quota types we support */
#define EXT4_MAXQUOTAS 2
@@ -1351,6 +1374,12 @@ struct ext4_sb_info {
struct ratelimit_state s_err_ratelimit_state;
struct ratelimit_state s_warning_ratelimit_state;
struct ratelimit_state s_msg_ratelimit_state;
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ /* Encryption */
+ uint32_t s_file_encryption_mode;
+ uint32_t s_dir_encryption_mode;
+#endif
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1466,6 +1495,18 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_SB(sb) (sb)
#endif
+/*
+ * Returns true if the inode is inode is encrypted
+ */
+static inline int ext4_encrypted_inode(struct inode *inode)
+{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
+#else
+ return 0;
+#endif
+}
+
#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
/*
@@ -1575,8 +1616,9 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
EXT4_FEATURE_INCOMPAT_EXTENTS| \
EXT4_FEATURE_INCOMPAT_64BIT| \
EXT4_FEATURE_INCOMPAT_FLEX_BG| \
- EXT4_FEATURE_INCOMPAT_MMP | \
- EXT4_FEATURE_INCOMPAT_INLINE_DATA)
+ EXT4_FEATURE_INCOMPAT_MMP | \
+ EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
+ EXT4_FEATURE_INCOMPAT_ENCRYPT)
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
@@ -2001,6 +2043,102 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
+/* crypto_policy.c */
+int ext4_is_child_context_consistent_with_parent(struct inode *parent,
+ struct inode *child);
+int ext4_inherit_context(struct inode *parent, struct inode *child);
+void ext4_to_hex(char *dst, char *src, size_t src_size);
+int ext4_process_policy(const struct ext4_encryption_policy *policy,
+ struct inode *inode);
+int ext4_get_policy(struct inode *inode,
+ struct ext4_encryption_policy *policy);
+
+/* crypto.c */
+bool ext4_valid_contents_enc_mode(uint32_t mode);
+uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
+extern struct workqueue_struct *ext4_read_workqueue;
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
+void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
+void ext4_restore_control_page(struct page *data_page);
+struct page *ext4_encrypt(struct inode *inode,
+ struct page *plaintext_page);
+int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page);
+int ext4_decrypt_one(struct inode *inode, struct page *page);
+int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+int ext4_init_crypto(void);
+void ext4_exit_crypto(void);
+static inline int ext4_sb_has_crypto(struct super_block *sb)
+{
+ return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
+}
+#else
+static inline int ext4_init_crypto(void) { return 0; }
+static inline void ext4_exit_crypto(void) { }
+static inline int ext4_sb_has_crypto(struct super_block *sb)
+{
+ return 0;
+}
+#endif
+
+/* crypto_fname.c */
+bool ext4_valid_filenames_enc_mode(uint32_t mode);
+u32 ext4_fname_crypto_round_up(u32 size, u32 blksize);
+int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
+ u32 ilen, struct ext4_str *crypto_str);
+int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ struct dx_hash_info *hinfo,
+ const struct ext4_str *iname,
+ struct ext4_str *oname);
+int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ struct dx_hash_info *hinfo,
+ const struct ext4_dir_entry_2 *de,
+ struct ext4_str *oname);
+int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct ext4_str *oname);
+int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct dx_hash_info *hinfo);
+int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
+ u32 namelen);
+int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr,
+ int len, const char * const name,
+ struct ext4_dir_entry_2 *de);
+
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx);
+struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode,
+ u32 max_len);
+void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str);
+#else
+static inline
+void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx) { }
+static inline
+struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode,
+ u32 max_len)
+{
+ return NULL;
+}
+static inline void ext4_fname_crypto_free_buffer(struct ext4_str *p) { }
+#endif
+
+
+/* crypto_key.c */
+int ext4_generate_encryption_key(struct inode *inode);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+int ext4_has_encryption_key(struct inode *inode);
+#else
+static inline int ext4_has_encryption_key(struct inode *inode)
+{
+ return 0;
+}
+#endif
+
+
/* dir.c */
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
struct file *,
@@ -2011,17 +2149,20 @@ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
(de), (bh), (buf), (size), (offset)))
extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
- __u32 minor_hash,
- struct ext4_dir_entry_2 *dirent);
+ __u32 minor_hash,
+ struct ext4_dir_entry_2 *dirent,
+ struct ext4_str *ent_name);
extern void ext4_htree_free_dir_info(struct dir_private_info *p);
extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
struct buffer_head *bh,
void *buf, int buf_size,
const char *name, int namelen,
struct ext4_dir_entry_2 **dest_de);
-void ext4_insert_dentry(struct inode *inode,
+int ext4_insert_dentry(struct inode *dir,
+ struct inode *inode,
struct ext4_dir_entry_2 *de,
int buf_size,
+ const struct qstr *iname,
const char *name, int namelen);
static inline void ext4_update_dx_flag(struct inode *inode)
{
@@ -2099,6 +2240,7 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
/* inode.c */
+int ext4_inode_is_fast_symlink(struct inode *inode);
struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
int ext4_get_block_write(struct inode *inode, sector_t iblock,
@@ -2152,8 +2294,8 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
-extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset);
+extern ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset);
extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
extern void ext4_ind_truncate(handle_t *, struct inode *inode);
@@ -2189,6 +2331,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
void *entry_buf,
int buf_size,
int csum_size);
+extern int ext4_empty_dir(struct inode *inode);
/* resize.c */
extern int ext4_group_add(struct super_block *sb,
@@ -2593,7 +2736,6 @@ extern const struct file_operations ext4_dir_operations;
/* file.c */
extern const struct inode_operations ext4_file_inode_operations;
extern const struct file_operations ext4_file_operations;
-extern const struct file_operations ext4_dax_file_operations;
extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
/* inline.c */
@@ -2699,6 +2841,10 @@ static inline void ext4_set_de_type(struct super_block *sb,
de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}
+/* readpages.c */
+extern int ext4_mpage_readpages(struct address_space *mapping,
+ struct list_head *pages, struct page *page,
+ unsigned nr_pages);
/* symlink.c */
extern const struct inode_operations ext4_symlink_inode_operations;
@@ -2743,7 +2889,6 @@ extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
extern int ext4_ext_calc_metadata_amount(struct inode *inode,
ext4_lblk_t lblocks);
-extern int ext4_extent_tree_init(handle_t *, struct inode *);
extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
int num,
struct ext4_ext_path *path);
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
new file mode 100644
index 000000000000..d75159c101ce
--- /dev/null
+++ b/fs/ext4/ext4_crypto.h
@@ -0,0 +1,156 @@
+/*
+ * linux/fs/ext4/ext4_crypto.h
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption header content for ext4
+ *
+ * Written by Michael Halcrow, 2015.
+ */
+
+#ifndef _EXT4_CRYPTO_H
+#define _EXT4_CRYPTO_H
+
+#include <linux/fs.h>
+
+#define EXT4_KEY_DESCRIPTOR_SIZE 8
+
+/* Policy provided via an ioctl on the topmost directory */
+struct ext4_encryption_policy {
+ char version;
+ char contents_encryption_mode;
+ char filenames_encryption_mode;
+ char flags;
+ char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
+} __attribute__((__packed__));
+
+#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
+#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
+
+#define EXT4_POLICY_FLAGS_PAD_4 0x00
+#define EXT4_POLICY_FLAGS_PAD_8 0x01
+#define EXT4_POLICY_FLAGS_PAD_16 0x02
+#define EXT4_POLICY_FLAGS_PAD_32 0x03
+#define EXT4_POLICY_FLAGS_PAD_MASK 0x03
+#define EXT4_POLICY_FLAGS_VALID 0x03
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ * 1 byte: Protector format (1 = this version)
+ * 1 byte: File contents encryption mode
+ * 1 byte: File names encryption mode
+ * 1 byte: Reserved
+ * 8 bytes: Master Key descriptor
+ * 16 bytes: Encryption Key derivation nonce
+ */
+struct ext4_encryption_context {
+ char format;
+ char contents_encryption_mode;
+ char filenames_encryption_mode;
+ char flags;
+ char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
+ char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
+} __attribute__((__packed__));
+
+/* Encryption parameters */
+#define EXT4_XTS_TWEAK_SIZE 16
+#define EXT4_AES_128_ECB_KEY_SIZE 16
+#define EXT4_AES_256_GCM_KEY_SIZE 32
+#define EXT4_AES_256_CBC_KEY_SIZE 32
+#define EXT4_AES_256_CTS_KEY_SIZE 32
+#define EXT4_AES_256_XTS_KEY_SIZE 64
+#define EXT4_MAX_KEY_SIZE 64
+
+#define EXT4_KEY_DESC_PREFIX "ext4:"
+#define EXT4_KEY_DESC_PREFIX_SIZE 5
+
+struct ext4_encryption_key {
+ uint32_t mode;
+ char raw[EXT4_MAX_KEY_SIZE];
+ uint32_t size;
+};
+
+#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
+#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002
+
+struct ext4_crypto_ctx {
+ struct crypto_tfm *tfm; /* Crypto API context */
+ struct page *bounce_page; /* Ciphertext page on write path */
+ struct page *control_page; /* Original page on write path */
+ struct bio *bio; /* The bio for this context */
+ struct work_struct work; /* Work queue for read complete path */
+ struct list_head free_list; /* Free list */
+ int flags; /* Flags */
+ int mode; /* Encryption mode for tfm */
+};
+
+struct ext4_completion_result {
+ struct completion completion;
+ int res;
+};
+
+#define DECLARE_EXT4_COMPLETION_RESULT(ecr) \
+ struct ext4_completion_result ecr = { \
+ COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+static inline int ext4_encryption_key_size(int mode)
+{
+ switch (mode) {
+ case EXT4_ENCRYPTION_MODE_AES_256_XTS:
+ return EXT4_AES_256_XTS_KEY_SIZE;
+ case EXT4_ENCRYPTION_MODE_AES_256_GCM:
+ return EXT4_AES_256_GCM_KEY_SIZE;
+ case EXT4_ENCRYPTION_MODE_AES_256_CBC:
+ return EXT4_AES_256_CBC_KEY_SIZE;
+ case EXT4_ENCRYPTION_MODE_AES_256_CTS:
+ return EXT4_AES_256_CTS_KEY_SIZE;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+#define EXT4_FNAME_NUM_SCATTER_ENTRIES 4
+#define EXT4_CRYPTO_BLOCK_SIZE 16
+#define EXT4_FNAME_CRYPTO_DIGEST_SIZE 32
+
+struct ext4_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct ext4_fname_crypto_ctx {
+ u32 lim;
+ char tmp_buf[EXT4_CRYPTO_BLOCK_SIZE];
+ struct crypto_ablkcipher *ctfm;
+ struct crypto_hash *htfm;
+ struct page *workpage;
+ struct ext4_encryption_key key;
+ unsigned flags : 8;
+ unsigned has_valid_key : 1;
+ unsigned ctfm_key_is_ready : 1;
+};
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct ext4_encrypted_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __attribute__((__packed__));
+
+/**
+ * This function is used to calculate the disk space required to
+ * store a filename of length l in encrypted symlink format.
+ */
+static inline u32 encrypted_symlink_data_len(u32 l)
+{
+ if (l < EXT4_CRYPTO_BLOCK_SIZE)
+ l = EXT4_CRYPTO_BLOCK_SIZE;
+ return (l + sizeof(struct ext4_encrypted_symlink_data) - 1);
+}
+
+#endif /* _EXT4_CRYPTO_H */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 3445035c7e01..d41843181818 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -87,6 +87,12 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
ext4_put_nojournal(handle);
return 0;
}
+
+ if (!handle->h_transaction) {
+ err = jbd2_journal_stop(handle);
+ return handle->h_err ? handle->h_err : err;
+ }
+
sb = handle->h_transaction->t_journal->j_private;
err = handle->h_err;
rc = jbd2_journal_stop(handle);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index bed43081720f..e003a1e81dc3 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -377,7 +377,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
ext4_lblk_t last = lblock + len - 1;
- if (lblock > last)
+ if (len == 0 || lblock > last)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
@@ -1717,12 +1717,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
{
unsigned short ext1_ee_len, ext2_ee_len;
- /*
- * Make sure that both extents are initialized. We don't merge
- * unwritten extents so that we can be sure that end_io code has
- * the extent that was written properly split out and conversion to
- * initialized is trivial.
- */
if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
return 0;
@@ -3128,6 +3122,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
+ if (ext4_encrypted_inode(inode))
+ return ext4_encrypted_zeroout(inode, ex);
+
ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
if (ret > 0)
ret = 0;
@@ -4535,19 +4532,7 @@ got_allocated_blocks:
*/
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, allocated);
- if (map_from_cluster) {
- if (reserved_clusters) {
- /*
- * We have clusters reserved for this range.
- * But since we are not doing actual allocation
- * and are simply using blocks from previously
- * allocated cluster, we should release the
- * reservation and not claim quota.
- */
- ext4_da_update_reserve_space(inode,
- reserved_clusters, 0);
- }
- } else {
+ if (!map_from_cluster) {
BUG_ON(allocated_clusters < reserved_clusters);
if (reserved_clusters < allocated_clusters) {
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -4803,12 +4788,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
else
max_blocks -= lblk;
- flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
- EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
- EXT4_EX_NOCACHE;
- if (mode & FALLOC_FL_KEEP_SIZE)
- flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
-
mutex_lock(&inode->i_mutex);
/*
@@ -4825,15 +4804,28 @@ static long ext4_zero_range(struct file *file, loff_t offset,
ret = inode_newsize_ok(inode, new_size);
if (ret)
goto out_mutex;
- /*
- * If we have a partial block after EOF we have to allocate
- * the entire block.
- */
- if (partial_end)
- max_blocks += 1;
}
+ flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
+
+ /* Preallocate the range including the unaligned edges */
+ if (partial_begin || partial_end) {
+ ret = ext4_alloc_file_blocks(file,
+ round_down(offset, 1 << blkbits) >> blkbits,
+ (round_up((offset + len), 1 << blkbits) -
+ round_down(offset, 1 << blkbits)) >> blkbits,
+ new_size, flags, mode);
+ if (ret)
+ goto out_mutex;
+
+ }
+
+ /* Zero range excluding the unaligned edges */
if (max_blocks > 0) {
+ flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+ EXT4_EX_NOCACHE);
/* Now release the pages and zero block aligned part of pages*/
truncate_pagecache_range(inode, start, end - 1);
@@ -4847,19 +4839,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
flags, mode);
if (ret)
goto out_dio;
- /*
- * Remove entire range from the extent status tree.
- *
- * ext4_es_remove_extent(inode, lblk, max_blocks) is
- * NOT sufficient. I'm not sure why this is the case,
- * but let's be conservative and remove the extent
- * status tree for the entire inode. There should be
- * no outstanding delalloc extents thanks to the
- * filemap_write_and_wait_range() call above.
- */
- ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
- if (ret)
- goto out_dio;
}
if (!partial_begin && !partial_end)
goto out_dio;
@@ -4922,6 +4901,20 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
ext4_lblk_t lblk;
unsigned int blkbits = inode->i_blkbits;
+ /*
+ * Encrypted inodes can't handle collapse range or insert
+ * range since we would need to re-encrypt blocks with a
+ * different IV or XTS tweak (which are based on the logical
+ * block number).
+ *
+ * XXX It's not clear why zero range isn't working, but we'll
+ * leave it disabled for encrypted inodes for now. This is a
+ * bug we should fix....
+ */
+ if (ext4_encrypted_inode(inode) &&
+ (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)))
+ return -EOPNOTSUPP;
+
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
@@ -4934,13 +4927,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (ret)
return ret;
- /*
- * currently supporting (pre)allocate mode for extent-based
- * files _only_
- */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return -EOPNOTSUPP;
-
if (mode & FALLOC_FL_COLLAPSE_RANGE)
return ext4_collapse_range(inode, offset, len);
@@ -4962,6 +4948,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
mutex_lock(&inode->i_mutex);
+ /*
+ * We only support preallocation for extent-based files only
+ */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
@@ -5402,6 +5396,14 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
loff_t new_size, ioffset;
int ret;
+ /*
+ * We need to test this early because xfstests assumes that a
+ * collapse range of (0, 1) will return EOPNOTSUPP if the file
+ * system does not support collapse range.
+ */
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ return -EOPNOTSUPP;
+
/* Collapse range works only on fs block size aligned offsets. */
if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
len & (EXT4_CLUSTER_SIZE(sb) - 1))
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index e04d45733976..26724aeece73 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -9,12 +9,10 @@
*
* Ext4 extents status tree core functions.
*/
-#include <linux/rbtree.h>
#include <linux/list_sort.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "ext4.h"
-#include "extents_status.h"
#include <trace/events/ext4.h>
@@ -705,6 +703,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
BUG_ON(end < lblk);
+ if ((status & EXTENT_STATUS_DELAYED) &&
+ (status & EXTENT_STATUS_WRITTEN)) {
+ ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
+ " delayed and written which can potentially "
+ " cause data loss.\n", lblk, len);
+ WARN_ON(1);
+ }
+
newes.es_lblk = lblk;
newes.es_len = len;
ext4_es_store_pblock_status(&newes, pblk, status);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 33a09da16c9c..0613c256c344 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -20,12 +20,11 @@
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/mount.h>
#include <linux/path.h>
-#include <linux/aio.h>
#include <linux/quotaops.h>
#include <linux/pagevec.h>
+#include <linux/uio.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -95,11 +94,9 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file_inode(iocb->ki_filp);
struct mutex *aio_mutex = NULL;
struct blk_plug plug;
- int o_direct = io_is_direct(file);
+ int o_direct = iocb->ki_flags & IOCB_DIRECT;
int overwrite = 0;
- size_t length = iov_iter_count(from);
ssize_t ret;
- loff_t pos = iocb->ki_pos;
/*
* Unaligned direct AIO must be serialized; see comment above
@@ -108,16 +105,17 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (o_direct &&
ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
!is_sync_kiocb(iocb) &&
- (file->f_flags & O_APPEND ||
- ext4_unaligned_aio(inode, from, pos))) {
+ (iocb->ki_flags & IOCB_APPEND ||
+ ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
aio_mutex = ext4_aio_mutex(inode);
mutex_lock(aio_mutex);
ext4_unwritten_wait(inode);
}
mutex_lock(&inode->i_mutex);
- if (file->f_flags & O_APPEND)
- iocb->ki_pos = pos = i_size_read(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out;
/*
* If we have encountered a bitmap-format file, the size limit
@@ -126,22 +124,19 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- if ((pos > sbi->s_bitmap_maxbytes) ||
- (pos == sbi->s_bitmap_maxbytes && length > 0)) {
- mutex_unlock(&inode->i_mutex);
+ if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
ret = -EFBIG;
- goto errout;
+ goto out;
}
-
- if (pos + length > sbi->s_bitmap_maxbytes)
- iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
+ iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
}
iocb->private = &overwrite;
if (o_direct) {
+ size_t length = iov_iter_count(from);
+ loff_t pos = iocb->ki_pos;
blk_start_plug(&plug);
-
/* check whether we do a DIO overwrite or not */
if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
!file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
@@ -185,7 +180,12 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (o_direct)
blk_finish_plug(&plug);
-errout:
+ if (aio_mutex)
+ mutex_unlock(aio_mutex);
+ return ret;
+
+out:
+ mutex_unlock(&inode->i_mutex);
if (aio_mutex)
mutex_unlock(aio_mutex);
return ret;
@@ -206,6 +206,7 @@ static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
static const struct vm_operations_struct ext4_dax_vm_ops = {
.fault = ext4_dax_fault,
.page_mkwrite = ext4_dax_mkwrite,
+ .pfn_mkwrite = dax_pfn_mkwrite,
};
#else
#define ext4_dax_vm_ops ext4_file_vm_ops
@@ -219,6 +220,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct inode *inode = file->f_mapping->host;
+
+ if (ext4_encrypted_inode(inode)) {
+ int err = ext4_generate_encryption_key(inode);
+ if (err)
+ return 0;
+ }
file_accessed(file);
if (IS_DAX(file_inode(file))) {
vma->vm_ops = &ext4_dax_vm_ops;
@@ -236,6 +244,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
struct vfsmount *mnt = filp->f_path.mnt;
struct path path;
char buf[64], *cp;
+ int ret;
if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
!(sb->s_flags & MS_RDONLY))) {
@@ -274,11 +283,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* writing and the journal is present
*/
if (filp->f_mode & FMODE_WRITE) {
- int ret = ext4_inode_attach_jinode(inode);
+ ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
return ret;
}
- return dquot_file_open(inode, filp);
+ ret = dquot_file_open(inode, filp);
+ if (!ret && ext4_encrypted_inode(inode)) {
+ ret = ext4_generate_encryption_key(inode);
+ if (ret)
+ ret = -EACCES;
+ }
+ return ret;
}
/*
@@ -607,8 +622,6 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
const struct file_operations ext4_file_operations = {
.llseek = ext4_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = ext4_file_write_iter,
.unlocked_ioctl = ext4_ioctl,
@@ -624,26 +637,6 @@ const struct file_operations ext4_file_operations = {
.fallocate = ext4_fallocate,
};
-#ifdef CONFIG_FS_DAX
-const struct file_operations ext4_dax_file_operations = {
- .llseek = ext4_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
- .read_iter = generic_file_read_iter,
- .write_iter = ext4_file_write_iter,
- .unlocked_ioctl = ext4_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ext4_compat_ioctl,
-#endif
- .mmap = ext4_file_mmap,
- .open = ext4_file_open,
- .release = ext4_release_file,
- .fsync = ext4_sync_file,
- /* Splice not yet supported with DAX */
- .fallocate = ext4_fallocate,
-};
-#endif
-
const struct inode_operations ext4_file_inode_operations = {
.setattr = ext4_setattr,
.getattr = ext4_getattr,
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index a8bc47f75fa0..8850254136ae 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/writeback.h>
-#include <linux/jbd2.h>
#include <linux/blkdev.h>
#include "ext4.h"
@@ -56,7 +55,7 @@ static int ext4_sync_parent(struct inode *inode)
dentry = d_find_any_alias(inode);
if (!dentry)
break;
- next = igrab(dentry->d_parent->d_inode);
+ next = igrab(d_inode(dentry->d_parent));
dput(dentry);
if (!next)
break;
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index 3d586f02883e..e026aa941fd5 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -10,7 +10,6 @@
*/
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/cryptohash.h>
#include "ext4.h"
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index ac644c31ca67..1eaa6cb96cd0 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -14,7 +14,6 @@
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
@@ -444,7 +443,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
if (S_ISDIR(mode) &&
- ((parent == sb->s_root->d_inode) ||
+ ((parent == d_inode(sb->s_root)) ||
(ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
int best_ndir = inodes_per_group;
int ret = -1;
@@ -997,6 +996,12 @@ got:
ei->i_block_group = group;
ei->i_last_alloc_group = ~0;
+ /* If the directory encrypted, then we should encrypt the inode. */
+ if ((S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) &&
+ (ext4_encrypted_inode(dir) ||
+ DUMMY_ENCRYPTION_ENABLED(sbi)))
+ ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+
ext4_set_inode_flags(inode);
if (IS_DIRSYNC(inode))
ext4_handle_sync(handle);
@@ -1029,11 +1034,28 @@ got:
ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
-
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if ((sbi->s_file_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID) &&
+ (sbi->s_dir_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID)) {
+ ei->i_inline_off = 0;
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+ EXT4_FEATURE_INCOMPAT_INLINE_DATA))
+ ext4_set_inode_state(inode,
+ EXT4_STATE_MAY_INLINE_DATA);
+ } else {
+ /* Inline data and encryption are incompatible
+ * We turn off inline data since encryption is enabled */
+ ei->i_inline_off = 1;
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+ EXT4_FEATURE_INCOMPAT_INLINE_DATA))
+ ext4_clear_inode_state(inode,
+ EXT4_STATE_MAY_INLINE_DATA);
+ }
+#else
ei->i_inline_off = 0;
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
-
+#endif
ret = inode;
err = dquot_alloc_inode(inode);
if (err)
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 45fe924f82bc..958824019509 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -20,9 +20,9 @@
* (sct@redhat.com), 1993, 1998
*/
-#include <linux/aio.h>
#include "ext4_jbd2.h"
#include "truncate.h"
+#include <linux/uio.h>
#include <trace/events/ext4.h>
@@ -642,8 +642,8 @@ out:
* crashes then stale disk data _may_ be exposed inside the file. But current
* VFS code falls back into buffered path in that case so we are safe.
*/
-ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -654,7 +654,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
int retries = 0;
- if (rw == WRITE) {
+ if (iov_iter_rw(iter) == WRITE) {
loff_t final_size = offset + count;
if (final_size > inode->i_size) {
@@ -676,37 +676,38 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
}
retry:
- if (rw == READ && ext4_should_dioread_nolock(inode)) {
+ if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) {
/*
* Nolock dioread optimization may be dynamically disabled
* via ext4_inode_block_unlocked_dio(). Check inode's state
* while holding extra i_dio_count ref.
*/
- atomic_inc(&inode->i_dio_count);
+ inode_dio_begin(inode);
smp_mb();
if (unlikely(ext4_test_inode_state(inode,
EXT4_STATE_DIOREAD_LOCK))) {
- inode_dio_done(inode);
+ inode_dio_end(inode);
goto locked;
}
if (IS_DAX(inode))
- ret = dax_do_io(rw, iocb, inode, iter, offset,
+ ret = dax_do_io(iocb, inode, iter, offset,
ext4_get_block, NULL, 0);
else
- ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iter, offset,
- ext4_get_block, NULL, NULL, 0);
- inode_dio_done(inode);
+ ret = __blockdev_direct_IO(iocb, inode,
+ inode->i_sb->s_bdev, iter,
+ offset, ext4_get_block, NULL,
+ NULL, 0);
+ inode_dio_end(inode);
} else {
locked:
if (IS_DAX(inode))
- ret = dax_do_io(rw, iocb, inode, iter, offset,
+ ret = dax_do_io(iocb, inode, iter, offset,
ext4_get_block, NULL, DIO_LOCKING);
else
- ret = blockdev_direct_IO(rw, iocb, inode, iter,
- offset, ext4_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset,
+ ext4_get_block);
- if (unlikely((rw & WRITE) && ret < 0)) {
+ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 4b143febf21f..095c7a258d97 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -11,11 +11,13 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+#include <linux/fiemap.h>
+
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
#include "truncate.h"
-#include <linux/fiemap.h>
#define EXT4_XATTR_SYSTEM_DATA "data"
#define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS))
@@ -972,7 +974,7 @@ void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
offset = 0;
while ((void *)de < dlimit) {
de_len = ext4_rec_len_from_disk(de->rec_len, inline_size);
- trace_printk("de: off %u rlen %u name %*.s nlen %u ino %u\n",
+ trace_printk("de: off %u rlen %u name %.*s nlen %u ino %u\n",
offset, de_len, de->name_len, de->name,
de->name_len, le32_to_cpu(de->inode));
if (ext4_check_dir_entry(dir, NULL, de, bh,
@@ -998,7 +1000,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
struct ext4_iloc *iloc,
void *inline_start, int inline_size)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
int err;
@@ -1014,7 +1016,8 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
err = ext4_journal_get_write_access(handle, iloc->bh);
if (err)
return err;
- ext4_insert_dentry(inode, de, inline_size, name, namelen);
+ ext4_insert_dentry(dir, inode, de, inline_size, &dentry->d_name,
+ name, namelen);
ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
@@ -1251,7 +1254,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
int ret, inline_size;
void *inline_start;
struct ext4_iloc iloc;
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
ret = ext4_get_inode_loc(dir, &iloc);
if (ret)
@@ -1327,6 +1330,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
struct ext4_iloc iloc;
void *dir_buf = NULL;
struct ext4_dir_entry_2 fake;
+ struct ext4_str tmp_str;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -1398,8 +1402,10 @@ int htree_inlinedir_to_tree(struct file *dir_file,
continue;
if (de->inode == 0)
continue;
- err = ext4_htree_store_dirent(dir_file,
- hinfo->hash, hinfo->minor_hash, de);
+ tmp_str.name = de->name;
+ tmp_str.len = de->name_len;
+ err = ext4_htree_store_dirent(dir_file, hinfo->hash,
+ hinfo->minor_hash, de, &tmp_str);
if (err) {
count = err;
goto out;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5cb9a212b86f..0554b0b5957b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -20,7 +20,6 @@
#include <linux/fs.h>
#include <linux/time.h>
-#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
@@ -36,8 +35,6 @@
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
-#include <linux/ratelimit.h>
-#include <linux/aio.h>
#include <linux/bitops.h>
#include "ext4_jbd2.h"
@@ -141,7 +138,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
/*
* Test whether an inode is a fast symlink.
*/
-static int ext4_inode_is_fast_symlink(struct inode *inode)
+int ext4_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT4_I(inode)->i_file_acl ?
EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
@@ -534,6 +531,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+ !(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
@@ -638,6 +636,7 @@ found:
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+ !(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
@@ -888,6 +887,95 @@ int do_journal_get_write_access(handle_t *handle,
static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block)
+{
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned to = from + len;
+ struct inode *inode = page->mapping->host;
+ unsigned block_start, block_end;
+ sector_t block;
+ int err = 0;
+ unsigned blocksize = inode->i_sb->s_blocksize;
+ unsigned bbits;
+ struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
+ bool decrypt = false;
+
+ BUG_ON(!PageLocked(page));
+ BUG_ON(from > PAGE_CACHE_SIZE);
+ BUG_ON(to > PAGE_CACHE_SIZE);
+ BUG_ON(from > to);
+
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, blocksize, 0);
+ head = page_buffers(page);
+ bbits = ilog2(blocksize);
+ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+
+ for (bh = head, block_start = 0; bh != head || !block_start;
+ block++, block_start = block_end, bh = bh->b_this_page) {
+ block_end = block_start + blocksize;
+ if (block_end <= from || block_start >= to) {
+ if (PageUptodate(page)) {
+ if (!buffer_uptodate(bh))
+ set_buffer_uptodate(bh);
+ }
+ continue;
+ }
+ if (buffer_new(bh))
+ clear_buffer_new(bh);
+ if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
+ err = get_block(inode, block, bh, 1);
+ if (err)
+ break;
+ if (buffer_new(bh)) {
+ unmap_underlying_metadata(bh->b_bdev,
+ bh->b_blocknr);
+ if (PageUptodate(page)) {
+ clear_buffer_new(bh);
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ continue;
+ }
+ if (block_end > to || block_start < from)
+ zero_user_segments(page, to, block_end,
+ block_start, from);
+ continue;
+ }
+ }
+ if (PageUptodate(page)) {
+ if (!buffer_uptodate(bh))
+ set_buffer_uptodate(bh);
+ continue;
+ }
+ if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+ !buffer_unwritten(bh) &&
+ (block_start < from || block_end > to)) {
+ ll_rw_block(READ, 1, &bh);
+ *wait_bh++ = bh;
+ decrypt = ext4_encrypted_inode(inode) &&
+ S_ISREG(inode->i_mode);
+ }
+ }
+ /*
+ * If we issued read requests, let them complete.
+ */
+ while (wait_bh > wait) {
+ wait_on_buffer(*--wait_bh);
+ if (!buffer_uptodate(*wait_bh))
+ err = -EIO;
+ }
+ if (unlikely(err))
+ page_zero_new_buffers(page, from, to);
+ else if (decrypt)
+ err = ext4_decrypt_one(inode, page);
+ return err;
+}
+#endif
+
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -950,11 +1038,19 @@ retry_journal:
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ext4_should_dioread_nolock(inode))
+ ret = ext4_block_write_begin(page, pos, len,
+ ext4_get_block_write);
+ else
+ ret = ext4_block_write_begin(page, pos, len,
+ ext4_get_block);
+#else
if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(page, pos, len, ext4_get_block_write);
else
ret = __block_write_begin(page, pos, len, ext4_get_block);
-
+#endif
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, page_buffers(page),
from, to, NULL,
@@ -2576,7 +2672,12 @@ retry_journal:
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ret = ext4_block_write_begin(page, pos, len,
+ ext4_da_get_block_prep);
+#else
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
+#endif
if (ret < 0) {
unlock_page(page);
ext4_journal_stop(handle);
@@ -2822,7 +2923,7 @@ static int ext4_readpage(struct file *file, struct page *page)
ret = ext4_readpage_inline(inode, page);
if (ret == -EAGAIN)
- return mpage_readpage(page, ext4_get_block);
+ return ext4_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
@@ -2837,7 +2938,7 @@ ext4_readpages(struct file *file, struct address_space *mapping,
if (ext4_has_inline_data(inode))
return 0;
- return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
+ return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
@@ -2953,8 +3054,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
* if the machine crashes during the write.
*
*/
-static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -2967,8 +3068,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
ext4_io_end_t *io_end = NULL;
/* Use the old path for reads and writes beyond i_size. */
- if (rw != WRITE || final_size > inode->i_size)
- return ext4_ind_direct_IO(rw, iocb, iter, offset);
+ if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size)
+ return ext4_ind_direct_IO(iocb, iter, offset);
BUG_ON(iocb->private == NULL);
@@ -2977,8 +3078,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
* conversion. This also disallows race between truncate() and
* overwrite DIO as i_dio_count needs to be incremented under i_mutex.
*/
- if (rw == WRITE)
- atomic_inc(&inode->i_dio_count);
+ if (iov_iter_rw(iter) == WRITE)
+ inode_dio_begin(inode);
/* If we do a overwrite dio, i_mutex locking can be released */
overwrite = *((int *)iocb->private);
@@ -3034,11 +3135,14 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
get_block_func = ext4_get_block_write;
dio_flags = DIO_LOCKING;
}
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
+#endif
if (IS_DAX(inode))
- ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func,
+ ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
ext4_end_io_dio, dio_flags);
else
- ret = __blockdev_direct_IO(rw, iocb, inode,
+ ret = __blockdev_direct_IO(iocb, inode,
inode->i_sb->s_bdev, iter, offset,
get_block_func,
ext4_end_io_dio, NULL, dio_flags);
@@ -3079,8 +3183,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
}
retake_lock:
- if (rw == WRITE)
- inode_dio_done(inode);
+ if (iov_iter_rw(iter) == WRITE)
+ inode_dio_end(inode);
/* take i_mutex locking again if we do a ovewrite dio */
if (overwrite) {
up_read(&EXT4_I(inode)->i_data_sem);
@@ -3090,14 +3194,19 @@ retake_lock:
return ret;
}
-static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+ return 0;
+#endif
+
/*
* If we are doing data journalling we don't support O_DIRECT
*/
@@ -3108,12 +3217,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
if (ext4_has_inline_data(inode))
return 0;
- trace_ext4_direct_IO_enter(inode, offset, count, rw);
+ trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- ret = ext4_ext_direct_IO(rw, iocb, iter, offset);
+ ret = ext4_ext_direct_IO(iocb, iter, offset);
else
- ret = ext4_ind_direct_IO(rw, iocb, iter, offset);
- trace_ext4_direct_IO_exit(inode, offset, count, rw, ret);
+ ret = ext4_ind_direct_IO(iocb, iter, offset);
+ trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
return ret;
}
@@ -3262,6 +3371,13 @@ static int __ext4_block_zero_page_range(handle_t *handle,
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
+ if (S_ISREG(inode->i_mode) &&
+ ext4_encrypted_inode(inode)) {
+ /* We expect the key to be set. */
+ BUG_ON(!ext4_has_encryption_key(inode));
+ BUG_ON(blocksize != PAGE_CACHE_SIZE);
+ WARN_ON_ONCE(ext4_decrypt_one(inode, page));
+ }
}
if (ext4_should_journal_data(inode)) {
BUFFER_TRACE(bh, "get write access");
@@ -4091,16 +4207,14 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext4_file_inode_operations;
- if (test_opt(inode->i_sb, DAX))
- inode->i_fop = &ext4_dax_file_operations;
- else
- inode->i_fop = &ext4_file_operations;
+ inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
- if (ext4_inode_is_fast_symlink(inode)) {
+ if (ext4_inode_is_fast_symlink(inode) &&
+ !ext4_encrypted_inode(inode)) {
inode->i_op = &ext4_fast_symlink_inode_operations;
nd_terminate_link(ei->i_data, inode->i_size,
sizeof(ei->i_data) - 1);
@@ -4231,7 +4345,7 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
int inode_size = EXT4_INODE_SIZE(sb);
oi.orig_ino = orig_ino;
- ino = orig_ino & ~(inodes_per_block - 1);
+ ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
if (ino == orig_ino)
continue;
@@ -4525,7 +4639,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
*/
int ext4_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error, rc = 0;
int orphan = 0;
const unsigned int ia_valid = attr->ia_valid;
@@ -4673,7 +4787,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct inode *inode;
unsigned long long delalloc_blocks;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
generic_fillattr(inode, stat);
/*
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index f58a0d106726..2cb9e178d1c5 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -8,12 +8,12 @@
*/
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/capability.h>
#include <linux/time.h>
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/file.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include "ext4_jbd2.h"
#include "ext4.h"
@@ -196,6 +196,16 @@ journal_err_out:
return err;
}
+static int uuid_is_zero(__u8 u[16])
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ if (u[i])
+ return 0;
+ return 1;
+}
+
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -615,7 +625,78 @@ resizefs_out:
}
case EXT4_IOC_PRECACHE_EXTENTS:
return ext4_ext_precache(inode);
+ case EXT4_IOC_SET_ENCRYPTION_POLICY: {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct ext4_encryption_policy policy;
+ int err = 0;
+
+ if (copy_from_user(&policy,
+ (struct ext4_encryption_policy __user *)arg,
+ sizeof(policy))) {
+ err = -EFAULT;
+ goto encryption_policy_out;
+ }
+ err = ext4_process_policy(&policy, inode);
+encryption_policy_out:
+ return err;
+#else
+ return -EOPNOTSUPP;
+#endif
+ }
+ case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
+ int err, err2;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ handle_t *handle;
+
+ if (!ext4_sb_has_crypto(sb))
+ return -EOPNOTSUPP;
+ if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+ handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto pwsalt_err_exit;
+ }
+ err = ext4_journal_get_write_access(handle, sbi->s_sbh);
+ if (err)
+ goto pwsalt_err_journal;
+ generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
+ err = ext4_handle_dirty_metadata(handle, NULL,
+ sbi->s_sbh);
+ pwsalt_err_journal:
+ err2 = ext4_journal_stop(handle);
+ if (err2 && !err)
+ err = err2;
+ pwsalt_err_exit:
+ mnt_drop_write_file(filp);
+ if (err)
+ return err;
+ }
+ if (copy_to_user((void *) arg, sbi->s_es->s_encrypt_pw_salt,
+ 16))
+ return -EFAULT;
+ return 0;
+ }
+ case EXT4_IOC_GET_ENCRYPTION_POLICY: {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct ext4_encryption_policy policy;
+ int err = 0;
+
+ if (!ext4_encrypted_inode(inode))
+ return -ENOENT;
+ err = ext4_get_policy(inode, &policy);
+ if (err)
+ return err;
+ if (copy_to_user((void *)arg, &policy, sizeof(policy)))
+ return -EFAULT;
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+ }
default:
return -ENOTTY;
}
@@ -680,6 +761,9 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FITRIM:
case EXT4_IOC_RESIZE_FS:
case EXT4_IOC_PRECACHE_EXTENTS:
+ case EXT4_IOC_SET_ENCRYPTION_POLICY:
+ case EXT4_IOC_GET_ENCRYPTION_PWSALT:
+ case EXT4_IOC_GET_ENCRYPTION_POLICY:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 3cb267aee802..b52374e42102 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -475,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
owner[0] = i_uid_read(inode);
owner[1] = i_gid_read(inode);
- tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
+ tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
S_IFREG, NULL, goal, owner);
if (IS_ERR(tmp_inode)) {
retval = PTR_ERR(tmp_inode);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 28fe71a2904c..814f3beb4369 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
-#include <linux/jbd2.h>
#include <linux/time.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
@@ -254,8 +253,9 @@ static struct dx_frame *dx_probe(const struct qstr *d_name,
struct dx_hash_info *hinfo,
struct dx_frame *frame);
static void dx_release(struct dx_frame *frames);
-static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
- struct dx_hash_info *hinfo, struct dx_map_entry map[]);
+static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
+ unsigned blocksize, struct dx_hash_info *hinfo,
+ struct dx_map_entry map[]);
static void dx_sort_map(struct dx_map_entry *map, unsigned count);
static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
struct dx_map_entry *offsets, int count, unsigned blocksize);
@@ -586,8 +586,10 @@ struct stats
unsigned bcount;
};
-static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
- int size, int show_names)
+static struct stats dx_show_leaf(struct inode *dir,
+ struct dx_hash_info *hinfo,
+ struct ext4_dir_entry_2 *de,
+ int size, int show_names)
{
unsigned names = 0, space = 0;
char *base = (char *) de;
@@ -600,12 +602,73 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_ent
{
if (show_names)
{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ int len;
+ char *name;
+ struct ext4_str fname_crypto_str
+ = {.name = NULL, .len = 0};
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ int res;
+
+ name = de->name;
+ len = de->name_len;
+ ctx = ext4_get_fname_crypto_ctx(dir,
+ EXT4_NAME_LEN);
+ if (IS_ERR(ctx)) {
+ printk(KERN_WARNING "Error acquiring"
+ " crypto ctxt--skipping crypto\n");
+ ctx = NULL;
+ }
+ if (ctx == NULL) {
+ /* Directory is not encrypted */
+ ext4fs_dirhash(de->name,
+ de->name_len, &h);
+ printk("%*.s:(U)%x.%u ", len,
+ name, h.hash,
+ (unsigned) ((char *) de
+ - base));
+ } else {
+ /* Directory is encrypted */
+ res = ext4_fname_crypto_alloc_buffer(
+ ctx, de->name_len,
+ &fname_crypto_str);
+ if (res < 0) {
+ printk(KERN_WARNING "Error "
+ "allocating crypto "
+ "buffer--skipping "
+ "crypto\n");
+ ext4_put_fname_crypto_ctx(&ctx);
+ ctx = NULL;
+ }
+ res = ext4_fname_disk_to_usr(ctx, NULL, de,
+ &fname_crypto_str);
+ if (res < 0) {
+ printk(KERN_WARNING "Error "
+ "converting filename "
+ "from disk to usr"
+ "\n");
+ name = "??";
+ len = 2;
+ } else {
+ name = fname_crypto_str.name;
+ len = fname_crypto_str.len;
+ }
+ ext4fs_dirhash(de->name, de->name_len,
+ &h);
+ printk("%*.s:(E)%x.%u ", len, name,
+ h.hash, (unsigned) ((char *) de
+ - base));
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(
+ &fname_crypto_str);
+ }
+#else
int len = de->name_len;
char *name = de->name;
- while (len--) printk("%c", *name++);
ext4fs_dirhash(de->name, de->name_len, &h);
- printk(":%x.%u ", h.hash,
+ printk("%*.s:%x.%u ", len, name, h.hash,
(unsigned) ((char *) de - base));
+#endif
}
space += EXT4_DIR_REC_LEN(de->name_len);
names++;
@@ -623,7 +686,6 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
unsigned count = dx_get_count(entries), names = 0, space = 0, i;
unsigned bcount = 0;
struct buffer_head *bh;
- int err;
printk("%i indexed blocks...\n", count);
for (i = 0; i < count; i++, entries++)
{
@@ -637,7 +699,8 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
continue;
stats = levels?
dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
- dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
+ dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *)
+ bh->b_data, blocksize, 0);
names += stats.names;
space += stats.space;
bcount += stats.bcount;
@@ -687,8 +750,28 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
if (hinfo->hash_version <= DX_HASH_TEA)
hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (d_name) {
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ int res;
+
+ /* Check if the directory is encrypted */
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx)) {
+ ret_err = ERR_PTR(PTR_ERR(ctx));
+ goto fail;
+ }
+ res = ext4_fname_usr_to_hash(ctx, d_name, hinfo);
+ if (res < 0) {
+ ret_err = ERR_PTR(res);
+ goto fail;
+ }
+ ext4_put_fname_crypto_ctx(&ctx);
+ }
+#else
if (d_name)
ext4fs_dirhash(d_name->name, d_name->len, hinfo);
+#endif
hash = hinfo->hash;
if (root->info.unused_flags & 1) {
@@ -773,6 +856,7 @@ fail:
brelse(frame->bh);
frame--;
}
+
if (ret_err == ERR_PTR(ERR_BAD_DX_DIR))
ext4_warning(dir->i_sb,
"Corrupt dir inode %lu, running e2fsck is "
@@ -878,6 +962,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
struct buffer_head *bh;
struct ext4_dir_entry_2 *de, *top;
int err = 0, count = 0;
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}, tmp_str;
dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
(unsigned long)block));
@@ -889,6 +975,24 @@ static int htree_dirblock_to_tree(struct file *dir_file,
top = (struct ext4_dir_entry_2 *) ((char *) de +
dir->i_sb->s_blocksize -
EXT4_DIR_REC_LEN(0));
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ /* Check if the directory is encrypted */
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ brelse(bh);
+ return err;
+ }
+ if (ctx != NULL) {
+ err = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
+ &fname_crypto_str);
+ if (err < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ brelse(bh);
+ return err;
+ }
+ }
+#endif
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
bh->b_data, bh->b_size,
@@ -904,14 +1008,37 @@ static int htree_dirblock_to_tree(struct file *dir_file,
continue;
if (de->inode == 0)
continue;
- if ((err = ext4_htree_store_dirent(dir_file,
- hinfo->hash, hinfo->minor_hash, de)) != 0) {
- brelse(bh);
- return err;
+ if (ctx == NULL) {
+ /* Directory is not encrypted */
+ tmp_str.name = de->name;
+ tmp_str.len = de->name_len;
+ err = ext4_htree_store_dirent(dir_file,
+ hinfo->hash, hinfo->minor_hash, de,
+ &tmp_str);
+ } else {
+ /* Directory is encrypted */
+ err = ext4_fname_disk_to_usr(ctx, hinfo, de,
+ &fname_crypto_str);
+ if (err < 0) {
+ count = err;
+ goto errout;
+ }
+ err = ext4_htree_store_dirent(dir_file,
+ hinfo->hash, hinfo->minor_hash, de,
+ &fname_crypto_str);
+ }
+ if (err != 0) {
+ count = err;
+ goto errout;
}
count++;
}
+errout:
brelse(bh);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+#endif
return count;
}
@@ -935,6 +1062,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
int count = 0;
int ret, err;
__u32 hashval;
+ struct ext4_str tmp_str;
dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
start_hash, start_minor_hash));
@@ -970,14 +1098,22 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
/* Add '.' and '..' from the htree header */
if (!start_hash && !start_minor_hash) {
de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
- if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
+ tmp_str.name = de->name;
+ tmp_str.len = de->name_len;
+ err = ext4_htree_store_dirent(dir_file, 0, 0,
+ de, &tmp_str);
+ if (err != 0)
goto errout;
count++;
}
if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
de = ext4_next_entry(de, dir->i_sb->s_blocksize);
- if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
+ tmp_str.name = de->name;
+ tmp_str.len = de->name_len;
+ err = ext4_htree_store_dirent(dir_file, 2, 0,
+ de, &tmp_str);
+ if (err != 0)
goto errout;
count++;
}
@@ -1035,8 +1171,8 @@ static inline int search_dirblock(struct buffer_head *bh,
* Create map of hash values, offsets, and sizes, stored at end of block.
* Returns number of entries mapped.
*/
-static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
- struct dx_hash_info *hinfo,
+static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
+ unsigned blocksize, struct dx_hash_info *hinfo,
struct dx_map_entry *map_tail)
{
int count = 0;
@@ -1106,57 +1242,89 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
* `len <= EXT4_NAME_LEN' is guaranteed by caller.
* `de != NULL' is guaranteed by caller.
*/
-static inline int ext4_match (int len, const char * const name,
- struct ext4_dir_entry_2 * de)
+static inline int ext4_match(struct ext4_fname_crypto_ctx *ctx,
+ struct ext4_str *fname_crypto_str,
+ int len, const char * const name,
+ struct ext4_dir_entry_2 *de)
{
- if (len != de->name_len)
- return 0;
+ int res;
+
if (!de->inode)
return 0;
- return !memcmp(name, de->name, len);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ctx)
+ return ext4_fname_match(ctx, fname_crypto_str, len, name, de);
+#endif
+ if (len != de->name_len)
+ return 0;
+ res = memcmp(name, de->name, len);
+ return (res == 0) ? 1 : 0;
}
/*
* Returns 0 if not found, -1 on failure, and 1 on success
*/
-int search_dir(struct buffer_head *bh,
- char *search_buf,
- int buf_size,
- struct inode *dir,
- const struct qstr *d_name,
- unsigned int offset,
- struct ext4_dir_entry_2 **res_dir)
+int search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
+ struct inode *dir, const struct qstr *d_name,
+ unsigned int offset, struct ext4_dir_entry_2 **res_dir)
{
struct ext4_dir_entry_2 * de;
char * dlimit;
int de_len;
const char *name = d_name->name;
int namelen = d_name->len;
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
+ int res;
+
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx))
+ return -1;
de = (struct ext4_dir_entry_2 *)search_buf;
dlimit = search_buf + buf_size;
while ((char *) de < dlimit) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
+ if ((char *) de + de->name_len <= dlimit) {
+ res = ext4_match(ctx, &fname_crypto_str, namelen,
+ name, de);
+ if (res < 0) {
+ res = -1;
+ goto return_result;
+ }
+ if (res > 0) {
+ /* found a match - just to be sure, do
+ * a full check */
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ bh->b_data,
+ bh->b_size, offset)) {
+ res = -1;
+ goto return_result;
+ }
+ *res_dir = de;
+ res = 1;
+ goto return_result;
+ }
- if ((char *) de + namelen <= dlimit &&
- ext4_match (namelen, name, de)) {
- /* found a match - just to be sure, do a full check */
- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
- bh->b_size, offset))
- return -1;
- *res_dir = de;
- return 1;
}
/* prevent looping on a bad block */
de_len = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
- if (de_len <= 0)
- return -1;
+ if (de_len <= 0) {
+ res = -1;
+ goto return_result;
+ }
offset += de_len;
de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
}
- return 0;
+
+ res = 0;
+return_result:
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ return res;
}
static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
@@ -1345,6 +1513,9 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
ext4_lblk_t block;
int retval;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ *res_dir = NULL;
+#endif
frame = dx_probe(d_name, dir, &hinfo, frames);
if (IS_ERR(frame))
return (struct buffer_head *) frame;
@@ -1417,6 +1588,18 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
ino);
return ERR_PTR(-EIO);
}
+ if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)) &&
+ !ext4_is_child_context_consistent_with_parent(dir,
+ inode)) {
+ iput(inode);
+ ext4_warning(inode->i_sb,
+ "Inconsistent encryption contexts: %lu/%lu\n",
+ (unsigned long) dir->i_ino,
+ (unsigned long) inode->i_ino);
+ return ERR_PTR(-EPERM);
+ }
}
return d_splice_alias(inode, dentry);
}
@@ -1429,7 +1612,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
struct ext4_dir_entry_2 * de;
struct buffer_head *bh;
- bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
+ bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL);
if (IS_ERR(bh))
return (struct dentry *) bh;
if (!bh)
@@ -1437,13 +1620,13 @@ struct dentry *ext4_get_parent(struct dentry *child)
ino = le32_to_cpu(de->inode);
brelse(bh);
- if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
- EXT4_ERROR_INODE(child->d_inode,
+ if (!ext4_valid_inum(d_inode(child)->i_sb, ino)) {
+ EXT4_ERROR_INODE(d_inode(child),
"bad parent inode number: %u", ino);
return ERR_PTR(-EIO);
}
- return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
+ return d_obtain_alias(ext4_iget_normal(d_inode(child)->i_sb, ino));
}
/*
@@ -1541,7 +1724,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
/* create map in the end of data2 block */
map = (struct dx_map_entry *) (data2 + blocksize);
- count = dx_make_map((struct ext4_dir_entry_2 *) data1,
+ count = dx_make_map(dir, (struct ext4_dir_entry_2 *) data1,
blocksize, hinfo, map);
map -= count;
dx_sort_map(map, count);
@@ -1564,7 +1747,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
hash2, split, count-split));
/* Fancy dance to stay within two buffers */
- de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
+ de2 = dx_move_dirents(data1, data2, map + split, count - split,
+ blocksize);
de = dx_pack_dirents(data1, blocksize);
de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
(char *) de,
@@ -1580,8 +1764,10 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
initialize_dirent_tail(t, blocksize);
}
- dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
- dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
+ dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data1,
+ blocksize, 1));
+ dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
+ blocksize, 1));
/* Which block gets the new entry? */
if (hinfo->hash >= hash2) {
@@ -1618,15 +1804,40 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
int nlen, rlen;
unsigned int offset = 0;
char *top;
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
+ int res;
+
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx))
+ return -1;
+
+ if (ctx != NULL) {
+ /* Calculate record length needed to store the entry */
+ res = ext4_fname_crypto_namelen_on_disk(ctx, namelen);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return res;
+ }
+ reclen = EXT4_DIR_REC_LEN(res);
+ }
de = (struct ext4_dir_entry_2 *)buf;
top = buf + buf_size - reclen;
while ((char *) de <= top) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
- buf, buf_size, offset))
- return -EIO;
- if (ext4_match(namelen, name, de))
- return -EEXIST;
+ buf, buf_size, offset)) {
+ res = -EIO;
+ goto return_result;
+ }
+ /* Provide crypto context and crypto buffer to ext4 match */
+ res = ext4_match(ctx, &fname_crypto_str, namelen, name, de);
+ if (res < 0)
+ goto return_result;
+ if (res > 0) {
+ res = -EEXIST;
+ goto return_result;
+ }
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
if ((de->inode ? rlen - nlen : rlen) >= reclen)
@@ -1634,26 +1845,62 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
offset += rlen;
}
- if ((char *) de > top)
- return -ENOSPC;
- *dest_de = de;
- return 0;
+ if ((char *) de > top)
+ res = -ENOSPC;
+ else {
+ *dest_de = de;
+ res = 0;
+ }
+return_result:
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ return res;
}
-void ext4_insert_dentry(struct inode *inode,
- struct ext4_dir_entry_2 *de,
- int buf_size,
- const char *name, int namelen)
+int ext4_insert_dentry(struct inode *dir,
+ struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int buf_size,
+ const struct qstr *iname,
+ const char *name, int namelen)
{
int nlen, rlen;
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
+ struct ext4_str tmp_str;
+ int res;
+
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx))
+ return -EIO;
+ /* By default, the input name would be written to the disk */
+ tmp_str.name = (unsigned char *)name;
+ tmp_str.len = namelen;
+ if (ctx != NULL) {
+ /* Directory is encrypted */
+ res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
+ &fname_crypto_str);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return -ENOMEM;
+ }
+ res = ext4_fname_usr_to_disk(ctx, iname, &fname_crypto_str);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ return res;
+ }
+ tmp_str.name = fname_crypto_str.name;
+ tmp_str.len = fname_crypto_str.len;
+ }
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
if (de->inode) {
struct ext4_dir_entry_2 *de1 =
- (struct ext4_dir_entry_2 *)((char *)de + nlen);
+ (struct ext4_dir_entry_2 *)((char *)de + nlen);
de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
de = de1;
@@ -1661,9 +1908,14 @@ void ext4_insert_dentry(struct inode *inode,
de->file_type = EXT4_FT_UNKNOWN;
de->inode = cpu_to_le32(inode->i_ino);
ext4_set_de_type(inode->i_sb, de, inode->i_mode);
- de->name_len = namelen;
- memcpy(de->name, name, namelen);
+ de->name_len = tmp_str.len;
+
+ memcpy(de->name, tmp_str.name, tmp_str.len);
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ return 0;
}
+
/*
* Add a new entry into a directory (leaf) block. If de is non-NULL,
* it points to a directory entry which is guaranteed to be large
@@ -1676,7 +1928,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct ext4_dir_entry_2 *de,
struct buffer_head *bh)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned int blocksize = dir->i_sb->s_blocksize;
@@ -1700,8 +1952,12 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
return err;
}
- /* By now the buffer is marked for journaling */
- ext4_insert_dentry(inode, de, blocksize, name, namelen);
+ /* By now the buffer is marked for journaling. Due to crypto operations,
+ * the following function call may fail */
+ err = ext4_insert_dentry(dir, inode, de, blocksize, &dentry->d_name,
+ name, namelen);
+ if (err < 0)
+ return err;
/*
* XXX shouldn't update any times until successful
@@ -1732,9 +1988,14 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct buffer_head *bh)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ int res;
+#else
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
+#endif
struct buffer_head *bh2;
struct dx_root *root;
struct dx_frame frames[2], *frame;
@@ -1748,7 +2009,13 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct dx_hash_info hinfo;
ext4_lblk_t block;
struct fake_dirent *fde;
- int csum_size = 0;
+ int csum_size = 0;
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+#endif
if (ext4_has_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
@@ -1815,7 +2082,18 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ res = ext4_fname_usr_to_hash(ctx, &dentry->d_name, &hinfo);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_mark_inode_dirty(handle, dir);
+ brelse(bh);
+ return res;
+ }
+ ext4_put_fname_crypto_ctx(&ctx);
+#else
ext4fs_dirhash(name, namelen, &hinfo);
+#endif
memset(frames, 0, sizeof(frames));
frame = frames;
frame->entries = entries;
@@ -1864,8 +2142,8 @@ out_frames:
static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
- struct inode *dir = dentry->d_parent->d_inode;
- struct buffer_head *bh;
+ struct inode *dir = d_inode(dentry->d_parent);
+ struct buffer_head *bh = NULL;
struct ext4_dir_entry_2 *de;
struct ext4_dir_entry_tail *t;
struct super_block *sb;
@@ -1889,14 +2167,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
return retval;
if (retval == 1) {
retval = 0;
- return retval;
+ goto out;
}
}
if (is_dx(dir)) {
retval = ext4_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
- return retval;
+ goto out;
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
@@ -1908,14 +2186,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
return PTR_ERR(bh);
retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
- if (retval != -ENOSPC) {
- brelse(bh);
- return retval;
- }
+ if (retval != -ENOSPC)
+ goto out;
if (blocks == 1 && !dx_fallback &&
- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
- return make_indexed_dir(handle, dentry, inode, bh);
+ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
+ retval = make_indexed_dir(handle, dentry, inode, bh);
+ bh = NULL; /* make_indexed_dir releases bh */
+ goto out;
+ }
brelse(bh);
}
bh = ext4_append(handle, dir, &block);
@@ -1931,6 +2210,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
}
retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
+out:
brelse(bh);
if (retval == 0)
ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -1947,7 +2227,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
struct dx_entry *entries, *at;
struct dx_hash_info hinfo;
struct buffer_head *bh;
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
struct super_block *sb = dir->i_sb;
struct ext4_dir_entry_2 *de;
int err;
@@ -2235,12 +2515,22 @@ retry:
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ext4_file_inode_operations;
- if (test_opt(inode->i_sb, DAX))
- inode->i_fop = &ext4_dax_file_operations;
- else
- inode->i_fop = &ext4_file_operations;
+ inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
- err = ext4_add_nondir(handle, dentry, inode);
+ err = 0;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (!err && (ext4_encrypted_inode(dir) ||
+ DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)))) {
+ err = ext4_inherit_context(dir, inode);
+ if (err) {
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ }
+ }
+#endif
+ if (!err)
+ err = ext4_add_nondir(handle, dentry, inode);
if (!err && IS_DIRSYNC(dir))
ext4_handle_sync(handle);
}
@@ -2302,10 +2592,7 @@ retry:
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ext4_file_inode_operations;
- if (test_opt(inode->i_sb, DAX))
- inode->i_fop = &ext4_dax_file_operations;
- else
- inode->i_fop = &ext4_file_operations;
+ inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
d_tmpfile(dentry, inode);
err = ext4_orphan_add(handle, inode);
@@ -2424,6 +2711,14 @@ retry:
err = ext4_init_new_dir(handle, dir, inode);
if (err)
goto out_clear_inode;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ext4_encrypted_inode(dir) ||
+ DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) {
+ err = ext4_inherit_context(dir, inode);
+ if (err)
+ goto out_clear_inode;
+ }
+#endif
err = ext4_mark_inode_dirty(handle, inode);
if (!err)
err = ext4_add_entry(handle, dentry, inode);
@@ -2456,7 +2751,7 @@ out_stop:
/*
* routine to check that the specified directory is empty (for rmdir)
*/
-static int empty_dir(struct inode *inode)
+int ext4_empty_dir(struct inode *inode)
{
unsigned int offset;
struct buffer_head *bh;
@@ -2708,7 +3003,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
dquot_initialize(dir);
- dquot_initialize(dentry->d_inode);
+ dquot_initialize(d_inode(dentry));
retval = -ENOENT;
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
@@ -2717,14 +3012,14 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
if (!bh)
goto end_rmdir;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
retval = -EIO;
if (le32_to_cpu(de->inode) != inode->i_ino)
goto end_rmdir;
retval = -ENOTEMPTY;
- if (!empty_dir(inode))
+ if (!ext4_empty_dir(inode))
goto end_rmdir;
handle = ext4_journal_start(dir, EXT4_HT_DIR,
@@ -2777,7 +3072,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
dquot_initialize(dir);
- dquot_initialize(dentry->d_inode);
+ dquot_initialize(d_inode(dentry));
retval = -ENOENT;
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
@@ -2786,7 +3081,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
if (!bh)
goto end_unlink;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
retval = -EIO;
if (le32_to_cpu(de->inode) != inode->i_ino)
@@ -2834,16 +3129,25 @@ static int ext4_symlink(struct inode *dir,
{
handle_t *handle;
struct inode *inode;
- int l, err, retries = 0;
+ int err, len = strlen(symname);
int credits;
-
- l = strlen(symname)+1;
- if (l > dir->i_sb->s_blocksize)
+ bool encryption_required;
+ struct ext4_str disk_link;
+ struct ext4_encrypted_symlink_data *sd = NULL;
+
+ disk_link.len = len + 1;
+ disk_link.name = (char *) symname;
+
+ encryption_required = (ext4_encrypted_inode(dir) ||
+ DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)));
+ if (encryption_required)
+ disk_link.len = encrypted_symlink_data_len(len) + 1;
+ if (disk_link.len > dir->i_sb->s_blocksize)
return -ENAMETOOLONG;
dquot_initialize(dir);
- if (l > EXT4_N_BLOCKS * 4) {
+ if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
/*
* For non-fast symlinks, we just allocate inode and put it on
* orphan list in the first transaction => we need bitmap,
@@ -2862,16 +3166,49 @@ static int ext4_symlink(struct inode *dir,
credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
}
-retry:
+
inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
&dentry->d_name, 0, NULL,
EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out_stop;
+ if (IS_ERR(inode)) {
+ if (handle)
+ ext4_journal_stop(handle);
+ return PTR_ERR(inode);
+ }
- if (l > EXT4_N_BLOCKS * 4) {
+ if (encryption_required) {
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct qstr istr;
+ struct ext4_str ostr;
+
+ sd = kzalloc(disk_link.len, GFP_NOFS);
+ if (!sd) {
+ err = -ENOMEM;
+ goto err_drop_inode;
+ }
+ err = ext4_inherit_context(dir, inode);
+ if (err)
+ goto err_drop_inode;
+ ctx = ext4_get_fname_crypto_ctx(inode,
+ inode->i_sb->s_blocksize);
+ if (IS_ERR_OR_NULL(ctx)) {
+ /* We just set the policy, so ctx should not be NULL */
+ err = (ctx == NULL) ? -EIO : PTR_ERR(ctx);
+ goto err_drop_inode;
+ }
+ istr.name = (const unsigned char *) symname;
+ istr.len = len;
+ ostr.name = sd->encrypted_path;
+ err = ext4_fname_usr_to_disk(ctx, &istr, &ostr);
+ ext4_put_fname_crypto_ctx(&ctx);
+ if (err < 0)
+ goto err_drop_inode;
+ sd->len = cpu_to_le16(ostr.len);
+ disk_link.name = (char *) sd;
+ }
+
+ if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
inode->i_op = &ext4_symlink_inode_operations;
ext4_set_aops(inode);
/*
@@ -2887,9 +3224,10 @@ retry:
drop_nlink(inode);
err = ext4_orphan_add(handle, inode);
ext4_journal_stop(handle);
+ handle = NULL;
if (err)
goto err_drop_inode;
- err = __page_symlink(inode, symname, l, 1);
+ err = __page_symlink(inode, disk_link.name, disk_link.len, 1);
if (err)
goto err_drop_inode;
/*
@@ -2901,34 +3239,37 @@ retry:
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
+ handle = NULL;
goto err_drop_inode;
}
set_nlink(inode, 1);
err = ext4_orphan_del(handle, inode);
- if (err) {
- ext4_journal_stop(handle);
- clear_nlink(inode);
+ if (err)
goto err_drop_inode;
- }
} else {
/* clear the extent format for fast symlink */
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
- inode->i_op = &ext4_fast_symlink_inode_operations;
- memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
- inode->i_size = l-1;
+ inode->i_op = encryption_required ?
+ &ext4_symlink_inode_operations :
+ &ext4_fast_symlink_inode_operations;
+ memcpy((char *)&EXT4_I(inode)->i_data, disk_link.name,
+ disk_link.len);
+ inode->i_size = disk_link.len - 1;
}
EXT4_I(inode)->i_disksize = inode->i_size;
err = ext4_add_nondir(handle, dentry, inode);
if (!err && IS_DIRSYNC(dir))
ext4_handle_sync(handle);
-out_stop:
if (handle)
ext4_journal_stop(handle);
- if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
- goto retry;
+ kfree(sd);
return err;
err_drop_inode:
+ if (handle)
+ ext4_journal_stop(handle);
+ kfree(sd);
+ clear_nlink(inode);
unlock_new_inode(inode);
iput(inode);
return err;
@@ -2938,12 +3279,14 @@ static int ext4_link(struct dentry *old_dentry,
struct inode *dir, struct dentry *dentry)
{
handle_t *handle;
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
int err, retries = 0;
if (inode->i_nlink >= EXT4_LINK_MAX)
return -EMLINK;
-
+ if (ext4_encrypted_inode(dir) &&
+ !ext4_is_child_context_consistent_with_parent(dir, inode))
+ return -EPERM;
dquot_initialize(dir);
retry:
@@ -3210,12 +3553,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ext4_renament old = {
.dir = old_dir,
.dentry = old_dentry,
- .inode = old_dentry->d_inode,
+ .inode = d_inode(old_dentry),
};
struct ext4_renament new = {
.dir = new_dir,
.dentry = new_dentry,
- .inode = new_dentry->d_inode,
+ .inode = d_inode(new_dentry),
};
int force_reread;
int retval;
@@ -3244,6 +3587,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
goto end_rename;
+ if ((old.dir != new.dir) &&
+ ext4_encrypted_inode(new.dir) &&
+ !ext4_is_child_context_consistent_with_parent(new.dir,
+ old.inode)) {
+ retval = -EPERM;
+ goto end_rename;
+ }
+
new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
@@ -3264,12 +3615,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
if (!(flags & RENAME_WHITEOUT)) {
handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ retval = PTR_ERR(handle);
+ handle = NULL;
+ goto end_rename;
+ }
} else {
whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
- if (IS_ERR(whiteout))
- return PTR_ERR(whiteout);
+ if (IS_ERR(whiteout)) {
+ retval = PTR_ERR(whiteout);
+ whiteout = NULL;
+ goto end_rename;
+ }
}
if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
@@ -3278,7 +3635,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (S_ISDIR(old.inode->i_mode)) {
if (new.inode) {
retval = -ENOTEMPTY;
- if (!empty_dir(new.inode))
+ if (!ext4_empty_dir(new.inode))
goto end_rename;
} else {
retval = -EMLINK;
@@ -3352,8 +3709,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
ext4_dec_count(handle, old.dir);
if (new.inode) {
- /* checked empty_dir above, can't have another parent,
- * ext4_dec_count() won't work for many-linked dirs */
+ /* checked ext4_empty_dir above, can't have another
+ * parent, ext4_dec_count() won't work for many-linked
+ * dirs */
clear_nlink(new.inode);
} else {
ext4_inc_count(handle, new.dir);
@@ -3391,12 +3749,12 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ext4_renament old = {
.dir = old_dir,
.dentry = old_dentry,
- .inode = old_dentry->d_inode,
+ .inode = d_inode(old_dentry),
};
struct ext4_renament new = {
.dir = new_dir,
.dentry = new_dentry,
- .inode = new_dentry->d_inode,
+ .inode = d_inode(new_dentry),
};
u8 new_file_type;
int retval;
@@ -3433,8 +3791,11 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
handle = ext4_journal_start(old.dir, EXT4_HT_DIR,
(2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ retval = PTR_ERR(handle);
+ handle = NULL;
+ goto end_rename;
+ }
if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
ext4_handle_sync(handle);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index b24a2541a9ba..5765f88b3904 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -8,7 +8,6 @@
#include <linux/fs.h>
#include <linux/time.h>
-#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
@@ -18,14 +17,12 @@
#include <linux/pagevec.h>
#include <linux/mpage.h>
#include <linux/namei.h>
-#include <linux/aio.h>
#include <linux/uio.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/ratelimit.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -69,6 +66,10 @@ static void ext4_finish_bio(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct page *data_page = NULL;
+ struct ext4_crypto_ctx *ctx = NULL;
+#endif
struct buffer_head *bh, *head;
unsigned bio_start = bvec->bv_offset;
unsigned bio_end = bio_start + bvec->bv_len;
@@ -78,6 +79,15 @@ static void ext4_finish_bio(struct bio *bio)
if (!page)
continue;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (!page->mapping) {
+ /* The bounce data pages are unmapped. */
+ data_page = page;
+ ctx = (struct ext4_crypto_ctx *)page_private(data_page);
+ page = ctx->control_page;
+ }
+#endif
+
if (error) {
SetPageError(page);
set_bit(AS_EIO, &page->mapping->flags);
@@ -102,8 +112,13 @@ static void ext4_finish_bio(struct bio *bio)
} while ((bh = bh->b_this_page) != head);
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
local_irq_restore(flags);
- if (!under_io)
+ if (!under_io) {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ctx)
+ ext4_restore_control_page(data_page);
+#endif
end_page_writeback(page);
+ }
}
}
@@ -378,6 +393,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
static int io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode,
+ struct page *page,
struct buffer_head *bh)
{
int ret;
@@ -391,7 +407,7 @@ submit_and_retry:
if (ret)
return ret;
}
- ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
+ ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
goto submit_and_retry;
io->io_next_block++;
@@ -404,6 +420,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
struct writeback_control *wbc,
bool keep_towrite)
{
+ struct page *data_page = NULL;
struct inode *inode = page->mapping->host;
unsigned block_start, blocksize;
struct buffer_head *bh, *head;
@@ -463,19 +480,29 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
set_buffer_async_write(bh);
} while ((bh = bh->b_this_page) != head);
- /* Now submit buffers to write */
bh = head = page_buffers(page);
+
+ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ data_page = ext4_encrypt(inode, page);
+ if (IS_ERR(data_page)) {
+ ret = PTR_ERR(data_page);
+ data_page = NULL;
+ goto out;
+ }
+ }
+
+ /* Now submit buffers to write */
do {
if (!buffer_async_write(bh))
continue;
- ret = io_submit_add_bh(io, inode, bh);
+ ret = io_submit_add_bh(io, inode,
+ data_page ? data_page : page, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
* we can do but mark the page as dirty, and
* better luck next time.
*/
- redirty_page_for_writepage(wbc, page);
break;
}
nr_submitted++;
@@ -484,6 +511,11 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
/* Error stopped previous loop? Clean up buffers... */
if (ret) {
+ out:
+ if (data_page)
+ ext4_restore_control_page(data_page);
+ printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
+ redirty_page_for_writepage(wbc, page);
do {
clear_buffer_async_write(bh);
bh = bh->b_this_page;
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
new file mode 100644
index 000000000000..171b9ac4b45e
--- /dev/null
+++ b/fs/ext4/readpage.c
@@ -0,0 +1,328 @@
+/*
+ * linux/fs/ext4/readpage.c
+ *
+ * Copyright (C) 2002, Linus Torvalds.
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This was originally taken from fs/mpage.c
+ *
+ * The intent is the ext4_mpage_readpages() function here is intended
+ * to replace mpage_readpages() in the general case, not just for
+ * encrypted files. It has some limitations (see below), where it
+ * will fall back to read_block_full_page(), but these limitations
+ * should only be hit when page_size != block_size.
+ *
+ * This will allow us to attach a callback function to support ext4
+ * encryption.
+ *
+ * If anything unusual happens, such as:
+ *
+ * - encountering a page which has buffers
+ * - encountering a page which has a non-hole after a hole
+ * - encountering a page with non-contiguous blocks
+ *
+ * then this code just gives up and calls the buffer_head-based read function.
+ * It does handle a page which has holes at the end - that is a common case:
+ * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/kdev_t.h>
+#include <linux/gfp.h>
+#include <linux/bio.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/highmem.h>
+#include <linux/prefetch.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/pagevec.h>
+#include <linux/cleancache.h>
+
+#include "ext4.h"
+
+/*
+ * Call ext4_decrypt on every single page, reusing the encryption
+ * context.
+ */
+static void completion_pages(struct work_struct *work)
+{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct ext4_crypto_ctx *ctx =
+ container_of(work, struct ext4_crypto_ctx, work);
+ struct bio *bio = ctx->bio;
+ struct bio_vec *bv;
+ int i;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+
+ int ret = ext4_decrypt(ctx, page);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ SetPageError(page);
+ } else
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+ ext4_release_crypto_ctx(ctx);
+ bio_put(bio);
+#else
+ BUG();
+#endif
+}
+
+static inline bool ext4_bio_encrypted(struct bio *bio)
+{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ return unlikely(bio->bi_private != NULL);
+#else
+ return false;
+#endif
+}
+
+/*
+ * I/O completion handler for multipage BIOs.
+ *
+ * The mpage code never puts partial pages into a BIO (except for end-of-file).
+ * If a page does not map to a contiguous run of blocks then it simply falls
+ * back to block_read_full_page().
+ *
+ * Why is this? If a page's completion depends on a number of different BIOs
+ * which can complete in any order (or at the same time) then determining the
+ * status of that page is hard. See end_buffer_async_read() for the details.
+ * There is no point in duplicating all that complexity.
+ */
+static void mpage_end_io(struct bio *bio, int err)
+{
+ struct bio_vec *bv;
+ int i;
+
+ if (ext4_bio_encrypted(bio)) {
+ struct ext4_crypto_ctx *ctx = bio->bi_private;
+
+ if (err) {
+ ext4_release_crypto_ctx(ctx);
+ } else {
+ INIT_WORK(&ctx->work, completion_pages);
+ ctx->bio = bio;
+ queue_work(ext4_read_workqueue, &ctx->work);
+ return;
+ }
+ }
+ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+
+ if (!err) {
+ SetPageUptodate(page);
+ } else {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ }
+ unlock_page(page);
+ }
+
+ bio_put(bio);
+}
+
+int ext4_mpage_readpages(struct address_space *mapping,
+ struct list_head *pages, struct page *page,
+ unsigned nr_pages)
+{
+ struct bio *bio = NULL;
+ unsigned page_idx;
+ sector_t last_block_in_bio = 0;
+
+ struct inode *inode = mapping->host;
+ const unsigned blkbits = inode->i_blkbits;
+ const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+ const unsigned blocksize = 1 << blkbits;
+ sector_t block_in_file;
+ sector_t last_block;
+ sector_t last_block_in_file;
+ sector_t blocks[MAX_BUF_PER_PAGE];
+ unsigned page_block;
+ struct block_device *bdev = inode->i_sb->s_bdev;
+ int length;
+ unsigned relative_block = 0;
+ struct ext4_map_blocks map;
+
+ map.m_pblk = 0;
+ map.m_lblk = 0;
+ map.m_len = 0;
+ map.m_flags = 0;
+
+ for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
+ int fully_mapped = 1;
+ unsigned first_hole = blocks_per_page;
+
+ prefetchw(&page->flags);
+ if (pages) {
+ page = list_entry(pages->prev, struct page, lru);
+ list_del(&page->lru);
+ if (add_to_page_cache_lru(page, mapping,
+ page->index, GFP_KERNEL))
+ goto next_page;
+ }
+
+ if (page_has_buffers(page))
+ goto confused;
+
+ block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+ last_block = block_in_file + nr_pages * blocks_per_page;
+ last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+ if (last_block > last_block_in_file)
+ last_block = last_block_in_file;
+ page_block = 0;
+
+ /*
+ * Map blocks using the previous result first.
+ */
+ if ((map.m_flags & EXT4_MAP_MAPPED) &&
+ block_in_file > map.m_lblk &&
+ block_in_file < (map.m_lblk + map.m_len)) {
+ unsigned map_offset = block_in_file - map.m_lblk;
+ unsigned last = map.m_len - map_offset;
+
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == last) {
+ /* needed? */
+ map.m_flags &= ~EXT4_MAP_MAPPED;
+ break;
+ }
+ if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map.m_pblk + map_offset +
+ relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ }
+
+ /*
+ * Then do more ext4_map_blocks() calls until we are
+ * done with this page.
+ */
+ while (page_block < blocks_per_page) {
+ if (block_in_file < last_block) {
+ map.m_lblk = block_in_file;
+ map.m_len = last_block - block_in_file;
+
+ if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
+ set_error_page:
+ SetPageError(page);
+ zero_user_segment(page, 0,
+ PAGE_CACHE_SIZE);
+ unlock_page(page);
+ goto next_page;
+ }
+ }
+ if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
+ fully_mapped = 0;
+ if (first_hole == blocks_per_page)
+ first_hole = page_block;
+ page_block++;
+ block_in_file++;
+ continue;
+ }
+ if (first_hole != blocks_per_page)
+ goto confused; /* hole -> non-hole */
+
+ /* Contiguous blocks? */
+ if (page_block && blocks[page_block-1] != map.m_pblk-1)
+ goto confused;
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == map.m_len) {
+ /* needed? */
+ map.m_flags &= ~EXT4_MAP_MAPPED;
+ break;
+ } else if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map.m_pblk+relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ }
+ if (first_hole != blocks_per_page) {
+ zero_user_segment(page, first_hole << blkbits,
+ PAGE_CACHE_SIZE);
+ if (first_hole == 0) {
+ SetPageUptodate(page);
+ unlock_page(page);
+ goto next_page;
+ }
+ } else if (fully_mapped) {
+ SetPageMappedToDisk(page);
+ }
+ if (fully_mapped && blocks_per_page == 1 &&
+ !PageUptodate(page) && cleancache_get_page(page) == 0) {
+ SetPageUptodate(page);
+ goto confused;
+ }
+
+ /*
+ * This page will go to BIO. Do we need to send this
+ * BIO off first?
+ */
+ if (bio && (last_block_in_bio != blocks[0] - 1)) {
+ submit_and_realloc:
+ submit_bio(READ, bio);
+ bio = NULL;
+ }
+ if (bio == NULL) {
+ struct ext4_crypto_ctx *ctx = NULL;
+
+ if (ext4_encrypted_inode(inode) &&
+ S_ISREG(inode->i_mode)) {
+ ctx = ext4_get_crypto_ctx(inode);
+ if (IS_ERR(ctx))
+ goto set_error_page;
+ }
+ bio = bio_alloc(GFP_KERNEL,
+ min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
+ if (!bio) {
+ if (ctx)
+ ext4_release_crypto_ctx(ctx);
+ goto set_error_page;
+ }
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
+ bio->bi_end_io = mpage_end_io;
+ bio->bi_private = ctx;
+ }
+
+ length = first_hole << blkbits;
+ if (bio_add_page(bio, page, length, 0) < length)
+ goto submit_and_realloc;
+
+ if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
+ (relative_block == map.m_len)) ||
+ (first_hole != blocks_per_page)) {
+ submit_bio(READ, bio);
+ bio = NULL;
+ } else
+ last_block_in_bio = blocks[blocks_per_page - 1];
+ goto next_page;
+ confused:
+ if (bio) {
+ submit_bio(READ, bio);
+ bio = NULL;
+ }
+ if (!PageUptodate(page))
+ block_read_full_page(page, ext4_get_block);
+ else
+ unlock_page(page);
+ next_page:
+ if (pages)
+ page_cache_release(page);
+ }
+ BUG_ON(pages && !list_empty(pages));
+ if (bio)
+ submit_bio(READ, bio);
+ return 0;
+}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 8a8ec6293b19..cf0c472047e3 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1432,12 +1432,15 @@ static int ext4_flex_group_add(struct super_block *sb,
goto exit;
/*
* We will always be modifying at least the superblock and GDT
- * block. If we are adding a group past the last current GDT block,
+ * blocks. If we are adding a group past the last current GDT block,
* we will also modify the inode and the dindirect block. If we
* are adding a group with superblock/GDT backups we will also
* modify each of the reserved GDT dindirect blocks.
*/
- credit = flex_gd->count * 4 + reserved_gdb;
+ credit = 3; /* sb, resize inode, resize inode dindirect */
+ /* GDT blocks */
+ credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
+ credit += reserved_gdb; /* Reserved GDT dindirect blocks */
handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e061e66c8280..ca9d4a2fed41 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -21,7 +21,6 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
-#include <linux/jbd2.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
@@ -295,6 +294,8 @@ static void __save_error_info(struct super_block *sb, const char *func,
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+ if (bdev_read_only(sb->s_bdev))
+ return;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
es->s_last_error_time = cpu_to_le32(get_seconds());
strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
@@ -323,22 +324,6 @@ static void save_error_info(struct super_block *sb, const char *func,
ext4_commit_super(sb, 1);
}
-/*
- * The del_gendisk() function uninitializes the disk-specific data
- * structures, including the bdi structure, without telling anyone
- * else. Once this happens, any attempt to call mark_buffer_dirty()
- * (for example, by ext4_commit_super), will cause a kernel OOPS.
- * This is a kludge to prevent these oops until we can put in a proper
- * hook in del_gendisk() to inform the VFS and file system layers.
- */
-static int block_device_ejected(struct super_block *sb)
-{
- struct inode *bd_inode = sb->s_bdev->bd_inode;
- struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
-
- return bdi->dev == NULL;
-}
-
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
struct super_block *sb = journal->j_private;
@@ -893,6 +878,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
atomic_set(&ei->i_ioend_count, 0);
atomic_set(&ei->i_unwritten, 0);
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID;
+#endif
return &ei->vfs_inode;
}
@@ -1076,7 +1064,7 @@ static const struct quotactl_ops ext4_qctl_operations = {
.quota_on = ext4_quota_on,
.quota_off = ext4_quota_off,
.quota_sync = dquot_quota_sync,
- .get_info = dquot_get_dqinfo,
+ .get_state = dquot_get_state,
.set_info = dquot_set_dqinfo,
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk
@@ -1120,7 +1108,7 @@ enum {
Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
- Opt_data_err_abort, Opt_data_err_ignore,
+ Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
@@ -1211,6 +1199,7 @@ static const match_table_t tokens = {
{Opt_init_itable, "init_itable"},
{Opt_noinit_itable, "noinit_itable"},
{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
+ {Opt_test_dummy_encryption, "test_dummy_encryption"},
{Opt_removed, "check=none"}, /* mount option from ext2/3 */
{Opt_removed, "nocheck"}, /* mount option from ext2/3 */
{Opt_removed, "reservation"}, /* mount option from ext2/3 */
@@ -1412,6 +1401,7 @@ static const struct mount_opts {
{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
{Opt_max_dir_size_kb, 0, MOPT_GTE0},
+ {Opt_test_dummy_encryption, 0, MOPT_GTE0},
{Opt_err, 0, 0}
};
@@ -1568,7 +1558,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
return -1;
}
- journal_inode = path.dentry->d_inode;
+ journal_inode = d_inode(path.dentry);
if (!S_ISBLK(journal_inode->i_mode)) {
ext4_msg(sb, KERN_ERR, "error: journal path %s "
"is not a block device", journal_path);
@@ -1588,6 +1578,15 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
}
*journal_ioprio =
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
+ } else if (token == Opt_test_dummy_encryption) {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
+ ext4_msg(sb, KERN_WARNING,
+ "Test dummy encryption mode enabled");
+#else
+ ext4_msg(sb, KERN_WARNING,
+ "Test dummy encryption mount option ignored");
+#endif
} else if (m->flags & MOPT_DATAJ) {
if (is_remount) {
if (!sbi->s_journal)
@@ -2685,11 +2684,13 @@ static struct attribute *ext4_attrs[] = {
EXT4_INFO_ATTR(lazy_itable_init);
EXT4_INFO_ATTR(batched_discard);
EXT4_INFO_ATTR(meta_bg_resize);
+EXT4_INFO_ATTR(encryption);
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
ATTR_LIST(meta_bg_resize),
+ ATTR_LIST(encryption),
NULL,
};
@@ -3448,6 +3449,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_bdev->bd_part)
sbi->s_sectors_written_start =
part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ /* Modes of operations for file and directory encryption. */
+ sbi->s_file_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
+ sbi->s_dir_encryption_mode = EXT4_ENCRYPTION_MODE_INVALID;
+#endif
/* Cleanup superblock name */
for (cp = sb->s_id; (cp = strchr(cp, '/'));)
@@ -3692,6 +3698,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
}
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT) &&
+ es->s_encryption_level) {
+ ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
+ es->s_encryption_level);
+ goto failed_mount;
+ }
+
if (sb->s_blocksize != blocksize) {
/* Validate the filesystem blocksize */
if (!sb_set_blocksize(sb, blocksize)) {
@@ -4054,6 +4067,13 @@ no_journal:
}
}
+ if (unlikely(sbi->s_mount_flags & EXT4_MF_TEST_DUMMY_ENCRYPTION) &&
+ !(sb->s_flags & MS_RDONLY) &&
+ !EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT)) {
+ EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
+ ext4_commit_super(sb, 1);
+ }
+
/*
* Get the # of file system overhead blocks from the
* superblock if present.
@@ -4570,7 +4590,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
int error = 0;
- if (!sbh || block_device_ejected(sb))
+ if (!sbh)
return error;
if (buffer_write_io_error(sbh)) {
/*
@@ -5199,7 +5219,7 @@ static int ext4_write_info(struct super_block *sb, int type)
handle_t *handle;
/* Data block + inode block */
- handle = ext4_journal_start(sb->s_root->d_inode, EXT4_HT_QUOTA, 2);
+ handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit_info(sb, type);
@@ -5247,7 +5267,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
* all updates to the file when we bypass pagecache...
*/
if (EXT4_SB(sb)->s_journal &&
- ext4_should_journal_data(path->dentry->d_inode)) {
+ ext4_should_journal_data(d_inode(path->dentry))) {
/*
* We don't need to lock updates but journal_flush() could
* otherwise be livelocked...
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index ff3711932018..187b78920314 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -18,22 +18,115 @@
*/
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/namei.h>
#include "ext4.h"
#include "xattr.h"
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct ext4_inode_info *ei = EXT4_I(dentry->d_inode);
+ struct page *cpage = NULL;
+ char *caddr, *paddr = NULL;
+ struct ext4_str cstr, pstr;
+ struct inode *inode = d_inode(dentry);
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_encrypted_symlink_data *sd;
+ loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
+ int res;
+ u32 plen, max_size = inode->i_sb->s_blocksize;
+
+ if (!ext4_encrypted_inode(inode))
+ return page_follow_link_light(dentry, nd);
+
+ ctx = ext4_get_fname_crypto_ctx(inode, inode->i_sb->s_blocksize);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ if (ext4_inode_is_fast_symlink(inode)) {
+ caddr = (char *) EXT4_I(inode)->i_data;
+ max_size = sizeof(EXT4_I(inode)->i_data);
+ } else {
+ cpage = read_mapping_page(inode->i_mapping, 0, NULL);
+ if (IS_ERR(cpage)) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return cpage;
+ }
+ caddr = kmap(cpage);
+ caddr[size] = 0;
+ }
+
+ /* Symlink is encrypted */
+ sd = (struct ext4_encrypted_symlink_data *)caddr;
+ cstr.name = sd->encrypted_path;
+ cstr.len = le32_to_cpu(sd->len);
+ if ((cstr.len +
+ sizeof(struct ext4_encrypted_symlink_data) - 1) >
+ max_size) {
+ /* Symlink data on the disk is corrupted */
+ res = -EIO;
+ goto errout;
+ }
+ plen = (cstr.len < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) ?
+ EXT4_FNAME_CRYPTO_DIGEST_SIZE*2 : cstr.len;
+ paddr = kmalloc(plen + 1, GFP_NOFS);
+ if (!paddr) {
+ res = -ENOMEM;
+ goto errout;
+ }
+ pstr.name = paddr;
+ res = _ext4_fname_disk_to_usr(ctx, NULL, &cstr, &pstr);
+ if (res < 0)
+ goto errout;
+ /* Null-terminate the name */
+ if (res <= plen)
+ paddr[res] = '\0';
+ nd_set_link(nd, paddr);
+ ext4_put_fname_crypto_ctx(&ctx);
+ if (cpage) {
+ kunmap(cpage);
+ page_cache_release(cpage);
+ }
+ return NULL;
+errout:
+ ext4_put_fname_crypto_ctx(&ctx);
+ if (cpage) {
+ kunmap(cpage);
+ page_cache_release(cpage);
+ }
+ kfree(paddr);
+ return ERR_PTR(res);
+}
+
+static void ext4_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ struct page *page = cookie;
+
+ if (!page) {
+ kfree(nd_get_link(nd));
+ } else {
+ kunmap(page);
+ page_cache_release(page);
+ }
+}
+#endif
+
+static void *ext4_follow_fast_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct ext4_inode_info *ei = EXT4_I(d_inode(dentry));
nd_set_link(nd, (char *) ei->i_data);
return NULL;
}
const struct inode_operations ext4_symlink_inode_operations = {
.readlink = generic_readlink,
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ .follow_link = ext4_follow_link,
+ .put_link = ext4_put_link,
+#else
.follow_link = page_follow_link_light,
.put_link = page_put_link,
+#endif
.setattr = ext4_setattr,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
@@ -43,7 +136,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
const struct inode_operations ext4_fast_symlink_inode_operations = {
.readlink = generic_readlink,
- .follow_link = ext4_follow_link,
+ .follow_link = ext4_follow_fast_link,
.setattr = ext4_setattr,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 1e09fc77395c..16e28c08d1e8 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -55,7 +55,6 @@
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
-#include <linux/rwsem.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
@@ -179,7 +178,7 @@ ext4_xattr_handler(int name_index)
/*
* Inode operation listxattr()
*
- * dentry->d_inode->i_mutex: don't care
+ * d_inode(dentry)->i_mutex: don't care
*/
ssize_t
ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
@@ -424,7 +423,7 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
static int
ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct buffer_head *bh = NULL;
int error;
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
@@ -461,7 +460,7 @@ cleanup:
static int
ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ext4_xattr_ibody_header *header;
struct ext4_inode *raw_inode;
struct ext4_iloc iloc;
@@ -502,7 +501,7 @@ ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
int ret, ret2;
- down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
+ down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
if (ret < 0)
goto errout;
@@ -515,7 +514,7 @@ ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
goto errout;
ret += ret2;
errout:
- up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
+ up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
return ret;
}
@@ -639,8 +638,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
free += EXT4_XATTR_LEN(name_len);
}
if (i->value) {
- if (free < EXT4_XATTR_SIZE(i->value_len) ||
- free < EXT4_XATTR_LEN(name_len) +
+ if (free < EXT4_XATTR_LEN(name_len) +
EXT4_XATTR_SIZE(i->value_len))
return -ENOSPC;
}
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 29bedf5589f6..ddc0957760ba 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -23,6 +23,7 @@
#define EXT4_XATTR_INDEX_SECURITY 6
#define EXT4_XATTR_INDEX_SYSTEM 7
#define EXT4_XATTR_INDEX_RICHACL 8
+#define EXT4_XATTR_INDEX_ENCRYPTION 9
struct ext4_xattr_header {
__le32 h_magic; /* magic number for identification */
@@ -98,6 +99,8 @@ extern const struct xattr_handler ext4_xattr_user_handler;
extern const struct xattr_handler ext4_xattr_trusted_handler;
extern const struct xattr_handler ext4_xattr_security_handler;
+#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
+
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index d2a200624af5..95d90e0560f0 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -33,7 +33,7 @@ ext4_xattr_security_get(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_SECURITY,
+ return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_SECURITY,
name, buffer, size);
}
@@ -43,7 +43,7 @@ ext4_xattr_security_set(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_SECURITY,
+ return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_SECURITY,
name, value, size, flags);
}
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index 95f1f4ab59a4..891ee2ddfbd6 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -36,7 +36,7 @@ ext4_xattr_trusted_get(struct dentry *dentry, const char *name, void *buffer,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED,
+ return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_TRUSTED,
name, buffer, size);
}
@@ -46,7 +46,7 @@ ext4_xattr_trusted_set(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED,
+ return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_TRUSTED,
name, value, size, flags);
}
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index 0edb7611ffbe..6ed932b3c043 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -37,7 +37,7 @@ ext4_xattr_user_get(struct dentry *dentry, const char *name,
return -EINVAL;
if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_USER,
+ return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_USER,
name, buffer, size);
}
@@ -49,7 +49,7 @@ ext4_xattr_user_set(struct dentry *dentry, const char *name,
return -EINVAL;
if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_USER,
+ return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_USER,
name, value, size, flags);
}
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 94e2d2ffabe1..05f0f663f14c 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -1,5 +1,5 @@
config F2FS_FS
- tristate "F2FS filesystem support (EXPERIMENTAL)"
+ tristate "F2FS filesystem support"
depends on BLOCK
help
F2FS is based on Log-structured File System (LFS), which supports
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 742202779bd5..4320ffab3495 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -351,13 +351,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
*acl = f2fs_acl_clone(p, GFP_NOFS);
if (!*acl)
- return -ENOMEM;
+ goto no_mem;
ret = f2fs_acl_create_masq(*acl, mode);
- if (ret < 0) {
- posix_acl_release(*acl);
- return -ENOMEM;
- }
+ if (ret < 0)
+ goto no_mem_clone;
if (ret == 0) {
posix_acl_release(*acl);
@@ -378,6 +376,12 @@ no_acl:
*default_acl = NULL;
*acl = NULL;
return 0;
+
+no_mem_clone:
+ posix_acl_release(*acl);
+no_mem:
+ posix_acl_release(p);
+ return -ENOMEM;
}
int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 7f794b72b3b7..a5e17a2a0781 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -276,7 +276,7 @@ continue_unlock:
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- if (f2fs_write_meta_page(page, &wbc)) {
+ if (mapping->a_ops->writepage(page, &wbc)) {
unlock_page(page);
break;
}
@@ -464,20 +464,19 @@ static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
void recover_orphan_inodes(struct f2fs_sb_info *sbi)
{
- block_t start_blk, orphan_blkaddr, i, j;
+ block_t start_blk, orphan_blocks, i, j;
if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
return;
set_sbi_flag(sbi, SBI_POR_DOING);
- start_blk = __start_cp_addr(sbi) + 1 +
- le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
- orphan_blkaddr = __start_sum_addr(sbi) - 1;
+ start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
+ orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
- ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
+ ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP);
- for (i = 0; i < orphan_blkaddr; i++) {
+ for (i = 0; i < orphan_blocks; i++) {
struct page *page = get_meta_page(sbi, start_blk + i);
struct f2fs_orphan_block *orphan_blk;
@@ -615,7 +614,7 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
unsigned long blk_size = sbi->blocksize;
unsigned long long cp1_version = 0, cp2_version = 0;
unsigned long long cp_start_blk_no;
- unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
+ unsigned int cp_blks = 1 + __cp_payload(sbi);
block_t cp_blk_no;
int i;
@@ -796,6 +795,7 @@ retry:
* wribacking dentry pages in the freeing inode.
*/
f2fs_submit_merged_bio(sbi, DATA, WRITE);
+ cond_resched();
}
goto retry;
}
@@ -884,7 +884,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__u32 crc32 = 0;
void *kaddr;
int i;
- int cp_payload_blks = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
+ int cp_payload_blks = __cp_payload(sbi);
/*
* This avoids to conduct wrong roll-forward operations and uses
@@ -1048,17 +1048,18 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long long ckpt_ver;
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
-
mutex_lock(&sbi->cp_mutex);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
- cpc->reason != CP_DISCARD && cpc->reason != CP_UMOUNT)
+ (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC))
goto out;
if (unlikely(f2fs_cp_error(sbi)))
goto out;
if (f2fs_readonly(sbi->sb))
goto out;
+
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
+
if (block_operations(sbi))
goto out;
@@ -1085,6 +1086,10 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);
+
+ if (cpc->reason == CP_RECOVERY)
+ f2fs_msg(sbi->sb, KERN_NOTICE,
+ "checkpoint: version = %llx", ckpt_ver);
out:
mutex_unlock(&sbi->cp_mutex);
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
@@ -1103,14 +1108,9 @@ void init_ino_entry_info(struct f2fs_sb_info *sbi)
im->ino_num = 0;
}
- /*
- * considering 512 blocks in a segment 8 blocks are needed for cp
- * and log segment summaries. Remaining blocks are used to keep
- * orphan entries with the limitation one reserved segment
- * for cp pack we can have max 1020*504 orphan entries
- */
sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
- NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK;
+ NR_CURSEG_TYPE - __cp_payload(sbi)) *
+ F2FS_ORPHANS_PER_BLOCK;
}
int __init create_checkpoint_caches(void)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 985ed023a750..1e1aae669fa8 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -12,12 +12,12 @@
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
-#include <linux/aio.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/prefetch.h>
+#include <linux/uio.h>
#include "f2fs.h"
#include "node.h"
@@ -25,6 +25,9 @@
#include "trace.h"
#include <trace/events/f2fs.h>
+static struct kmem_cache *extent_tree_slab;
+static struct kmem_cache *extent_node_slab;
+
static void f2fs_read_end_io(struct bio *bio, int err)
{
struct bio_vec *bvec;
@@ -197,7 +200,7 @@ alloc_new:
* ->node_page
* update block addresses in the node page
*/
-static void __set_data_blkaddr(struct dnode_of_data *dn)
+void set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn;
__le32 *addr_array;
@@ -226,7 +229,7 @@ int reserve_new_block(struct dnode_of_data *dn)
trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
dn->data_blkaddr = NEW_ADDR;
- __set_data_blkaddr(dn);
+ set_data_blkaddr(dn);
mark_inode_dirty(dn->inode);
sync_inode_page(dn);
return 0;
@@ -248,73 +251,62 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
return err;
}
-static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
- struct buffer_head *bh_result)
+static void f2fs_map_bh(struct super_block *sb, pgoff_t pgofs,
+ struct extent_info *ei, struct buffer_head *bh_result)
+{
+ unsigned int blkbits = sb->s_blocksize_bits;
+ size_t max_size = bh_result->b_size;
+ size_t mapped_size;
+
+ clear_buffer_new(bh_result);
+ map_bh(bh_result, sb, ei->blk + pgofs - ei->fofs);
+ mapped_size = (ei->fofs + ei->len - pgofs) << blkbits;
+ bh_result->b_size = min(max_size, mapped_size);
+}
+
+static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
pgoff_t start_fofs, end_fofs;
block_t start_blkaddr;
- if (is_inode_flag_set(fi, FI_NO_EXTENT))
- return 0;
-
- read_lock(&fi->ext.ext_lock);
+ read_lock(&fi->ext_lock);
if (fi->ext.len == 0) {
- read_unlock(&fi->ext.ext_lock);
- return 0;
+ read_unlock(&fi->ext_lock);
+ return false;
}
stat_inc_total_hit(inode->i_sb);
start_fofs = fi->ext.fofs;
end_fofs = fi->ext.fofs + fi->ext.len - 1;
- start_blkaddr = fi->ext.blk_addr;
+ start_blkaddr = fi->ext.blk;
if (pgofs >= start_fofs && pgofs <= end_fofs) {
- unsigned int blkbits = inode->i_sb->s_blocksize_bits;
- size_t count;
-
- set_buffer_new(bh_result);
- map_bh(bh_result, inode->i_sb,
- start_blkaddr + pgofs - start_fofs);
- count = end_fofs - pgofs + 1;
- if (count < (UINT_MAX >> blkbits))
- bh_result->b_size = (count << blkbits);
- else
- bh_result->b_size = UINT_MAX;
-
+ *ei = fi->ext;
stat_inc_read_hit(inode->i_sb);
- read_unlock(&fi->ext.ext_lock);
- return 1;
+ read_unlock(&fi->ext_lock);
+ return true;
}
- read_unlock(&fi->ext.ext_lock);
- return 0;
+ read_unlock(&fi->ext_lock);
+ return false;
}
-void update_extent_cache(struct dnode_of_data *dn)
+static bool update_extent_info(struct inode *inode, pgoff_t fofs,
+ block_t blkaddr)
{
- struct f2fs_inode_info *fi = F2FS_I(dn->inode);
- pgoff_t fofs, start_fofs, end_fofs;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ pgoff_t start_fofs, end_fofs;
block_t start_blkaddr, end_blkaddr;
int need_update = true;
- f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
-
- /* Update the page address in the parent node */
- __set_data_blkaddr(dn);
-
- if (is_inode_flag_set(fi, FI_NO_EXTENT))
- return;
-
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
- dn->ofs_in_node;
-
- write_lock(&fi->ext.ext_lock);
+ write_lock(&fi->ext_lock);
start_fofs = fi->ext.fofs;
end_fofs = fi->ext.fofs + fi->ext.len - 1;
- start_blkaddr = fi->ext.blk_addr;
- end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
+ start_blkaddr = fi->ext.blk;
+ end_blkaddr = fi->ext.blk + fi->ext.len - 1;
/* Drop and initialize the matched extent */
if (fi->ext.len == 1 && fofs == start_fofs)
@@ -322,24 +314,24 @@ void update_extent_cache(struct dnode_of_data *dn)
/* Initial extent */
if (fi->ext.len == 0) {
- if (dn->data_blkaddr != NULL_ADDR) {
+ if (blkaddr != NULL_ADDR) {
fi->ext.fofs = fofs;
- fi->ext.blk_addr = dn->data_blkaddr;
+ fi->ext.blk = blkaddr;
fi->ext.len = 1;
}
goto end_update;
}
/* Front merge */
- if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) {
+ if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) {
fi->ext.fofs--;
- fi->ext.blk_addr--;
+ fi->ext.blk--;
fi->ext.len++;
goto end_update;
}
/* Back merge */
- if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) {
+ if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) {
fi->ext.len++;
goto end_update;
}
@@ -351,8 +343,7 @@ void update_extent_cache(struct dnode_of_data *dn)
fi->ext.len = fofs - start_fofs;
} else {
fi->ext.fofs = fofs + 1;
- fi->ext.blk_addr = start_blkaddr +
- fofs - start_fofs + 1;
+ fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
fi->ext.len -= fofs - start_fofs + 1;
}
} else {
@@ -366,27 +357,583 @@ void update_extent_cache(struct dnode_of_data *dn)
need_update = true;
}
end_update:
- write_unlock(&fi->ext.ext_lock);
- if (need_update)
- sync_inode_page(dn);
+ write_unlock(&fi->ext_lock);
+ return need_update;
+}
+
+static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_info *ei,
+ struct rb_node *parent, struct rb_node **p)
+{
+ struct extent_node *en;
+
+ en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
+ if (!en)
+ return NULL;
+
+ en->ei = *ei;
+ INIT_LIST_HEAD(&en->list);
+
+ rb_link_node(&en->rb_node, parent, p);
+ rb_insert_color(&en->rb_node, &et->root);
+ et->count++;
+ atomic_inc(&sbi->total_ext_node);
+ return en;
+}
+
+static void __detach_extent_node(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_node *en)
+{
+ rb_erase(&en->rb_node, &et->root);
+ et->count--;
+ atomic_dec(&sbi->total_ext_node);
+
+ if (et->cached_en == en)
+ et->cached_en = NULL;
+}
+
+static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi,
+ nid_t ino)
+{
+ struct extent_tree *et;
+
+ down_read(&sbi->extent_tree_lock);
+ et = radix_tree_lookup(&sbi->extent_tree_root, ino);
+ if (!et) {
+ up_read(&sbi->extent_tree_lock);
+ return NULL;
+ }
+ atomic_inc(&et->refcount);
+ up_read(&sbi->extent_tree_lock);
+
+ return et;
+}
+
+static struct extent_tree *__grab_extent_tree(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et;
+ nid_t ino = inode->i_ino;
+
+ down_write(&sbi->extent_tree_lock);
+ et = radix_tree_lookup(&sbi->extent_tree_root, ino);
+ if (!et) {
+ et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
+ f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
+ memset(et, 0, sizeof(struct extent_tree));
+ et->ino = ino;
+ et->root = RB_ROOT;
+ et->cached_en = NULL;
+ rwlock_init(&et->lock);
+ atomic_set(&et->refcount, 0);
+ et->count = 0;
+ sbi->total_ext_tree++;
+ }
+ atomic_inc(&et->refcount);
+ up_write(&sbi->extent_tree_lock);
+
+ return et;
+}
+
+static struct extent_node *__lookup_extent_tree(struct extent_tree *et,
+ unsigned int fofs)
+{
+ struct rb_node *node = et->root.rb_node;
+ struct extent_node *en;
+
+ if (et->cached_en) {
+ struct extent_info *cei = &et->cached_en->ei;
+
+ if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
+ return et->cached_en;
+ }
+
+ while (node) {
+ en = rb_entry(node, struct extent_node, rb_node);
+
+ if (fofs < en->ei.fofs) {
+ node = node->rb_left;
+ } else if (fofs >= en->ei.fofs + en->ei.len) {
+ node = node->rb_right;
+ } else {
+ et->cached_en = en;
+ return en;
+ }
+ }
+ return NULL;
+}
+
+static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_node *en)
+{
+ struct extent_node *prev;
+ struct rb_node *node;
+
+ node = rb_prev(&en->rb_node);
+ if (!node)
+ return NULL;
+
+ prev = rb_entry(node, struct extent_node, rb_node);
+ if (__is_back_mergeable(&en->ei, &prev->ei)) {
+ en->ei.fofs = prev->ei.fofs;
+ en->ei.blk = prev->ei.blk;
+ en->ei.len += prev->ei.len;
+ __detach_extent_node(sbi, et, prev);
+ return prev;
+ }
+ return NULL;
+}
+
+static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_node *en)
+{
+ struct extent_node *next;
+ struct rb_node *node;
+
+ node = rb_next(&en->rb_node);
+ if (!node)
+ return NULL;
+
+ next = rb_entry(node, struct extent_node, rb_node);
+ if (__is_front_mergeable(&en->ei, &next->ei)) {
+ en->ei.len += next->ei.len;
+ __detach_extent_node(sbi, et, next);
+ return next;
+ }
+ return NULL;
+}
+
+static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_info *ei,
+ struct extent_node **den)
+{
+ struct rb_node **p = &et->root.rb_node;
+ struct rb_node *parent = NULL;
+ struct extent_node *en;
+
+ while (*p) {
+ parent = *p;
+ en = rb_entry(parent, struct extent_node, rb_node);
+
+ if (ei->fofs < en->ei.fofs) {
+ if (__is_front_mergeable(ei, &en->ei)) {
+ f2fs_bug_on(sbi, !den);
+ en->ei.fofs = ei->fofs;
+ en->ei.blk = ei->blk;
+ en->ei.len += ei->len;
+ *den = __try_back_merge(sbi, et, en);
+ return en;
+ }
+ p = &(*p)->rb_left;
+ } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
+ if (__is_back_mergeable(ei, &en->ei)) {
+ f2fs_bug_on(sbi, !den);
+ en->ei.len += ei->len;
+ *den = __try_front_merge(sbi, et, en);
+ return en;
+ }
+ p = &(*p)->rb_right;
+ } else {
+ f2fs_bug_on(sbi, 1);
+ }
+ }
+
+ return __attach_extent_node(sbi, et, ei, parent, p);
+}
+
+static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, bool free_all)
+{
+ struct rb_node *node, *next;
+ struct extent_node *en;
+ unsigned int count = et->count;
+
+ node = rb_first(&et->root);
+ while (node) {
+ next = rb_next(node);
+ en = rb_entry(node, struct extent_node, rb_node);
+
+ if (free_all) {
+ spin_lock(&sbi->extent_lock);
+ if (!list_empty(&en->list))
+ list_del_init(&en->list);
+ spin_unlock(&sbi->extent_lock);
+ }
+
+ if (free_all || list_empty(&en->list)) {
+ __detach_extent_node(sbi, et, en);
+ kmem_cache_free(extent_node_slab, en);
+ }
+ node = next;
+ }
+
+ return count - et->count;
+}
+
+static void f2fs_init_extent_tree(struct inode *inode,
+ struct f2fs_extent *i_ext)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et;
+ struct extent_node *en;
+ struct extent_info ei;
+
+ if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
+ return;
+
+ et = __grab_extent_tree(inode);
+
+ write_lock(&et->lock);
+ if (et->count)
+ goto out;
+
+ set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
+ le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
+
+ en = __insert_extent_tree(sbi, et, &ei, NULL);
+ if (en) {
+ et->cached_en = en;
+
+ spin_lock(&sbi->extent_lock);
+ list_add_tail(&en->list, &sbi->extent_list);
+ spin_unlock(&sbi->extent_lock);
+ }
+out:
+ write_unlock(&et->lock);
+ atomic_dec(&et->refcount);
+}
+
+static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et;
+ struct extent_node *en;
+
+ trace_f2fs_lookup_extent_tree_start(inode, pgofs);
+
+ et = __find_extent_tree(sbi, inode->i_ino);
+ if (!et)
+ return false;
+
+ read_lock(&et->lock);
+ en = __lookup_extent_tree(et, pgofs);
+ if (en) {
+ *ei = en->ei;
+ spin_lock(&sbi->extent_lock);
+ if (!list_empty(&en->list))
+ list_move_tail(&en->list, &sbi->extent_list);
+ spin_unlock(&sbi->extent_lock);
+ stat_inc_read_hit(sbi->sb);
+ }
+ stat_inc_total_hit(sbi->sb);
+ read_unlock(&et->lock);
+
+ trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
+
+ atomic_dec(&et->refcount);
+ return en ? true : false;
+}
+
+static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
+ block_t blkaddr)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et;
+ struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
+ struct extent_node *den = NULL;
+ struct extent_info ei, dei;
+ unsigned int endofs;
+
+ trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
+
+ et = __grab_extent_tree(inode);
+
+ write_lock(&et->lock);
+
+ /* 1. lookup and remove existing extent info in cache */
+ en = __lookup_extent_tree(et, fofs);
+ if (!en)
+ goto update_extent;
+
+ dei = en->ei;
+ __detach_extent_node(sbi, et, en);
+
+ /* 2. if extent can be split more, split and insert the left part */
+ if (dei.len > 1) {
+ /* insert left part of split extent into cache */
+ if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
+ set_extent_info(&ei, dei.fofs, dei.blk,
+ fofs - dei.fofs);
+ en1 = __insert_extent_tree(sbi, et, &ei, NULL);
+ }
+
+ /* insert right part of split extent into cache */
+ endofs = dei.fofs + dei.len - 1;
+ if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) {
+ set_extent_info(&ei, fofs + 1,
+ fofs - dei.fofs + dei.blk, endofs - fofs);
+ en2 = __insert_extent_tree(sbi, et, &ei, NULL);
+ }
+ }
+
+update_extent:
+ /* 3. update extent in extent cache */
+ if (blkaddr) {
+ set_extent_info(&ei, fofs, blkaddr, 1);
+ en3 = __insert_extent_tree(sbi, et, &ei, &den);
+ }
+
+ /* 4. update in global extent list */
+ spin_lock(&sbi->extent_lock);
+ if (en && !list_empty(&en->list))
+ list_del(&en->list);
+ /*
+ * en1 and en2 split from en, they will become more and more smaller
+ * fragments after splitting several times. So if the length is smaller
+ * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
+ */
+ if (en1)
+ list_add_tail(&en1->list, &sbi->extent_list);
+ if (en2)
+ list_add_tail(&en2->list, &sbi->extent_list);
+ if (en3) {
+ if (list_empty(&en3->list))
+ list_add_tail(&en3->list, &sbi->extent_list);
+ else
+ list_move_tail(&en3->list, &sbi->extent_list);
+ }
+ if (den && !list_empty(&den->list))
+ list_del(&den->list);
+ spin_unlock(&sbi->extent_lock);
+
+ /* 5. release extent node */
+ if (en)
+ kmem_cache_free(extent_node_slab, en);
+ if (den)
+ kmem_cache_free(extent_node_slab, den);
+
+ write_unlock(&et->lock);
+ atomic_dec(&et->refcount);
+}
+
+void f2fs_preserve_extent_tree(struct inode *inode)
+{
+ struct extent_tree *et;
+ struct extent_info *ext = &F2FS_I(inode)->ext;
+ bool sync = false;
+
+ if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
+ return;
+
+ et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino);
+ if (!et) {
+ if (ext->len) {
+ ext->len = 0;
+ update_inode_page(inode);
+ }
+ return;
+ }
+
+ read_lock(&et->lock);
+ if (et->count) {
+ struct extent_node *en;
+
+ if (et->cached_en) {
+ en = et->cached_en;
+ } else {
+ struct rb_node *node = rb_first(&et->root);
+
+ if (!node)
+ node = rb_last(&et->root);
+ en = rb_entry(node, struct extent_node, rb_node);
+ }
+
+ if (__is_extent_same(ext, &en->ei))
+ goto out;
+
+ *ext = en->ei;
+ sync = true;
+ } else if (ext->len) {
+ ext->len = 0;
+ sync = true;
+ }
+out:
+ read_unlock(&et->lock);
+ atomic_dec(&et->refcount);
+
+ if (sync)
+ update_inode_page(inode);
+}
+
+void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+ struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
+ struct extent_node *en, *tmp;
+ unsigned long ino = F2FS_ROOT_INO(sbi);
+ struct radix_tree_iter iter;
+ void **slot;
+ unsigned int found;
+ unsigned int node_cnt = 0, tree_cnt = 0;
+
+ if (!test_opt(sbi, EXTENT_CACHE))
+ return;
+
+ if (available_free_memory(sbi, EXTENT_CACHE))
+ return;
+
+ spin_lock(&sbi->extent_lock);
+ list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
+ if (!nr_shrink--)
+ break;
+ list_del_init(&en->list);
+ }
+ spin_unlock(&sbi->extent_lock);
+
+ down_read(&sbi->extent_tree_lock);
+ while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
+ (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
+ unsigned i;
+
+ ino = treevec[found - 1]->ino + 1;
+ for (i = 0; i < found; i++) {
+ struct extent_tree *et = treevec[i];
+
+ atomic_inc(&et->refcount);
+ write_lock(&et->lock);
+ node_cnt += __free_extent_tree(sbi, et, false);
+ write_unlock(&et->lock);
+ atomic_dec(&et->refcount);
+ }
+ }
+ up_read(&sbi->extent_tree_lock);
+
+ down_write(&sbi->extent_tree_lock);
+ radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
+ F2FS_ROOT_INO(sbi)) {
+ struct extent_tree *et = (struct extent_tree *)*slot;
+
+ if (!atomic_read(&et->refcount) && !et->count) {
+ radix_tree_delete(&sbi->extent_tree_root, et->ino);
+ kmem_cache_free(extent_tree_slab, et);
+ sbi->total_ext_tree--;
+ tree_cnt++;
+ }
+ }
+ up_write(&sbi->extent_tree_lock);
+
+ trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
+}
+
+void f2fs_destroy_extent_tree(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et;
+ unsigned int node_cnt = 0;
+
+ if (!test_opt(sbi, EXTENT_CACHE))
+ return;
+
+ et = __find_extent_tree(sbi, inode->i_ino);
+ if (!et)
+ goto out;
+
+ /* free all extent info belong to this extent tree */
+ write_lock(&et->lock);
+ node_cnt = __free_extent_tree(sbi, et, true);
+ write_unlock(&et->lock);
+
+ atomic_dec(&et->refcount);
+
+ /* try to find and delete extent tree entry in radix tree */
+ down_write(&sbi->extent_tree_lock);
+ et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
+ if (!et) {
+ up_write(&sbi->extent_tree_lock);
+ goto out;
+ }
+ f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
+ radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
+ kmem_cache_free(extent_tree_slab, et);
+ sbi->total_ext_tree--;
+ up_write(&sbi->extent_tree_lock);
+out:
+ trace_f2fs_destroy_extent_tree(inode, node_cnt);
return;
}
+void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext)
+{
+ if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
+ f2fs_init_extent_tree(inode, i_ext);
+
+ write_lock(&F2FS_I(inode)->ext_lock);
+ get_extent_info(&F2FS_I(inode)->ext, *i_ext);
+ write_unlock(&F2FS_I(inode)->ext_lock);
+}
+
+static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei)
+{
+ if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+ return false;
+
+ if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
+ return f2fs_lookup_extent_tree(inode, pgofs, ei);
+
+ return lookup_extent_info(inode, pgofs, ei);
+}
+
+void f2fs_update_extent_cache(struct dnode_of_data *dn)
+{
+ struct f2fs_inode_info *fi = F2FS_I(dn->inode);
+ pgoff_t fofs;
+
+ f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
+
+ if (is_inode_flag_set(fi, FI_NO_EXTENT))
+ return;
+
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+ dn->ofs_in_node;
+
+ if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE))
+ return f2fs_update_extent_tree(dn->inode, fofs,
+ dn->data_blkaddr);
+
+ if (update_extent_info(dn->inode, fofs, dn->data_blkaddr))
+ sync_inode_page(dn);
+}
+
struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
+ struct extent_info ei;
int err;
struct f2fs_io_info fio = {
.type = DATA,
.rw = sync ? READ_SYNC : READA,
};
+ /*
+ * If sync is false, it needs to check its block allocation.
+ * This is need and triggered by two flows:
+ * gc and truncate_partial_data_page.
+ */
+ if (!sync)
+ goto search;
+
page = find_get_page(mapping, index);
if (page && PageUptodate(page))
return page;
f2fs_put_page(page, 0);
+search:
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ goto got_it;
+ }
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
@@ -401,6 +948,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
if (unlikely(dn.data_blkaddr == NEW_ADDR))
return ERR_PTR(-EINVAL);
+got_it:
page = grab_cache_page(mapping, index);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -435,6 +983,7 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
+ struct extent_info ei;
int err;
struct f2fs_io_info fio = {
.type = DATA,
@@ -445,6 +994,11 @@ repeat:
if (!page)
return ERR_PTR(-ENOMEM);
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ goto got_it;
+ }
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err) {
@@ -458,6 +1012,7 @@ repeat:
return ERR_PTR(-ENOENT);
}
+got_it:
if (PageUptodate(page))
return page;
@@ -569,19 +1124,26 @@ static int __allocate_data_block(struct dnode_of_data *dn)
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
return -EPERM;
+
+ dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+ if (dn->data_blkaddr == NEW_ADDR)
+ goto alloc;
+
if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
return -ENOSPC;
+alloc:
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
seg = CURSEG_DIRECT_IO;
- allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg);
+ allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+ &sum, seg);
/* direct IO doesn't use extent cache to maximize the performance */
- __set_data_blkaddr(dn);
+ set_data_blkaddr(dn);
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
@@ -615,7 +1177,10 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
while (dn.ofs_in_node < end_offset && len) {
- if (dn.data_blkaddr == NULL_ADDR) {
+ block_t blkaddr;
+
+ blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
if (__allocate_data_block(&dn))
goto sync_out;
allocated = true;
@@ -659,13 +1224,16 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
pgoff_t pgofs, end_offset;
int err = 0, ofs = 1;
+ struct extent_info ei;
bool allocated = false;
/* Get the page offset from the block offset(iblock) */
pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
- if (check_extent_cache(inode, pgofs, bh_result))
+ if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+ f2fs_map_bh(inode->i_sb, pgofs, &ei, bh_result);
goto out;
+ }
if (create)
f2fs_lock_op(F2FS_I_SB(inode));
@@ -682,7 +1250,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
goto put_out;
if (dn.data_blkaddr != NULL_ADDR) {
- set_buffer_new(bh_result);
+ clear_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
} else if (create) {
err = __allocate_data_block(&dn);
@@ -727,6 +1295,7 @@ get_next:
if (err)
goto sync_out;
allocated = true;
+ set_buffer_new(bh_result);
blkaddr = dn.data_blkaddr;
}
/* Give more consecutive addresses for the readahead */
@@ -813,8 +1382,10 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
fio->blk_addr = dn.data_blkaddr;
/* This page is already truncated */
- if (fio->blk_addr == NULL_ADDR)
+ if (fio->blk_addr == NULL_ADDR) {
+ ClearPageUptodate(page);
goto out_writepage;
+ }
set_page_writeback(page);
@@ -827,10 +1398,15 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
need_inplace_update(inode))) {
rewrite_data_page(page, fio);
set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
+ trace_f2fs_do_write_data_page(page, IPU);
} else {
write_data_page(page, &dn, fio);
- update_extent_cache(&dn);
+ set_data_blkaddr(&dn);
+ f2fs_update_extent_cache(&dn);
+ trace_f2fs_do_write_data_page(page, OPU);
set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+ if (page->index == 0)
+ set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
}
out_writepage:
f2fs_put_dnode(&dn);
@@ -909,6 +1485,8 @@ done:
clear_cold_data(page);
out:
inode_dec_dirty_pages(inode);
+ if (err)
+ ClearPageUptodate(page);
unlock_page(page);
if (need_balance_fs)
f2fs_balance_fs(sbi);
@@ -950,6 +1528,10 @@ static int f2fs_write_data_pages(struct address_space *mapping,
available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
+ /* during POR, we don't need to trigger writepage at all. */
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto skip_write;
+
diff = nr_pages_to_write(sbi, DATA, wbc);
if (!S_ISDIR(inode->i_mode)) {
@@ -1118,12 +1700,12 @@ static int f2fs_write_end(struct file *file,
return copied;
}
-static int check_direct_IO(struct inode *inode, int rw,
- struct iov_iter *iter, loff_t offset)
+static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
+ loff_t offset)
{
unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
- if (rw == READ)
+ if (iov_iter_rw(iter) == READ)
return 0;
if (offset & blocksize_mask)
@@ -1135,8 +1717,8 @@ static int check_direct_IO(struct inode *inode, int rw,
return 0;
}
-static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -1151,19 +1733,19 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
return err;
}
- if (check_direct_IO(inode, rw, iter, offset))
+ if (check_direct_IO(inode, iter, offset))
return 0;
- trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+ trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
- if (rw & WRITE)
+ if (iov_iter_rw(iter) == WRITE)
__allocate_data_blocks(inode, offset, count);
- err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
- if (err < 0 && (rw & WRITE))
+ err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
+ if (err < 0 && iov_iter_rw(iter) == WRITE)
f2fs_write_failed(mapping, offset + count);
- trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
+ trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
return err;
}
@@ -1236,6 +1818,37 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, get_data_block);
}
+void init_extent_cache_info(struct f2fs_sb_info *sbi)
+{
+ INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
+ init_rwsem(&sbi->extent_tree_lock);
+ INIT_LIST_HEAD(&sbi->extent_list);
+ spin_lock_init(&sbi->extent_lock);
+ sbi->total_ext_tree = 0;
+ atomic_set(&sbi->total_ext_node, 0);
+}
+
+int __init create_extent_cache(void)
+{
+ extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
+ sizeof(struct extent_tree));
+ if (!extent_tree_slab)
+ return -ENOMEM;
+ extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
+ sizeof(struct extent_node));
+ if (!extent_node_slab) {
+ kmem_cache_destroy(extent_tree_slab);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void destroy_extent_cache(void)
+{
+ kmem_cache_destroy(extent_node_slab);
+ kmem_cache_destroy(extent_tree_slab);
+}
+
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages,
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index e671373cc8ab..f5388f37217e 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -35,6 +35,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
/* validation check of the segment numbers */
si->hit_ext = sbi->read_hit_ext;
si->total_ext = sbi->total_hit_ext;
+ si->ext_tree = sbi->total_ext_tree;
+ si->ext_node = atomic_read(&sbi->total_ext_node);
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
si->ndirty_dirs = sbi->n_dirty_dirs;
@@ -185,6 +187,9 @@ get_cache:
si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry);
for (i = 0; i <= UPDATE_INO; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
+ si->cache_mem += sbi->total_ext_tree * sizeof(struct extent_tree);
+ si->cache_mem += atomic_read(&sbi->total_ext_node) *
+ sizeof(struct extent_node);
si->page_mem = 0;
npages = NODE_MAPPING(sbi)->nrpages;
@@ -260,13 +265,20 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, "CP calls: %d\n", si->cp_count);
seq_printf(s, "GC calls: %d (BG: %d)\n",
si->call_count, si->bg_gc);
- seq_printf(s, " - data segments : %d\n", si->data_segs);
- seq_printf(s, " - node segments : %d\n", si->node_segs);
- seq_printf(s, "Try to move %d blocks\n", si->tot_blks);
- seq_printf(s, " - data blocks : %d\n", si->data_blks);
- seq_printf(s, " - node blocks : %d\n", si->node_blks);
+ seq_printf(s, " - data segments : %d (%d)\n",
+ si->data_segs, si->bg_data_segs);
+ seq_printf(s, " - node segments : %d (%d)\n",
+ si->node_segs, si->bg_node_segs);
+ seq_printf(s, "Try to move %d blocks (BG: %d)\n", si->tot_blks,
+ si->bg_data_blks + si->bg_node_blks);
+ seq_printf(s, " - data blocks : %d (%d)\n", si->data_blks,
+ si->bg_data_blks);
+ seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks,
+ si->bg_node_blks);
seq_printf(s, "\nExtent Hit Ratio: %d / %d\n",
si->hit_ext, si->total_ext);
+ seq_printf(s, "\nExtent Tree Count: %d\n", si->ext_tree);
+ seq_printf(s, "\nExtent Node Count: %d\n", si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
seq_printf(s, " - inmem: %4d, wb: %4d\n",
si->inmem_pages, si->wb_pages);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index b74097a7f6d9..3a3302ab7871 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -59,9 +59,8 @@ static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFLNK >> S_SHIFT] = F2FS_FT_SYMLINK,
};
-void set_de_type(struct f2fs_dir_entry *de, struct inode *inode)
+void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
{
- umode_t mode = inode->i_mode;
de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
}
@@ -127,22 +126,19 @@ struct f2fs_dir_entry *find_target_dentry(struct qstr *name, int *max_slots,
*max_slots = 0;
while (bit_pos < d->max) {
if (!test_bit_le(bit_pos, d->bitmap)) {
- if (bit_pos == 0)
- max_len = 1;
- else if (!test_bit_le(bit_pos - 1, d->bitmap))
- max_len++;
bit_pos++;
+ max_len++;
continue;
}
+
de = &d->dentry[bit_pos];
if (early_match_name(name->len, namehash, de) &&
!memcmp(d->filename[bit_pos], name->name, name->len))
goto found;
- if (max_slots && *max_slots >= 0 && max_len > *max_slots) {
+ if (max_slots && max_len > *max_slots)
*max_slots = max_len;
- max_len = 0;
- }
+ max_len = 0;
/* remain bug on condition */
if (unlikely(!de->name_len))
@@ -219,14 +215,14 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
unsigned int max_depth;
unsigned int level;
+ *res_page = NULL;
+
if (f2fs_has_inline_dentry(dir))
return find_in_inline_dir(dir, child, res_page);
if (npages == 0)
return NULL;
- *res_page = NULL;
-
name_hash = f2fs_dentry_hash(child);
max_depth = F2FS_I(dir)->i_current_depth;
@@ -285,7 +281,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
lock_page(page);
f2fs_wait_on_page_writeback(page, type);
de->ino = cpu_to_le32(inode->i_ino);
- set_de_type(de, inode);
+ set_de_type(de, inode->i_mode);
f2fs_dentry_kunmap(dir, page);
set_page_dirty(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
@@ -331,14 +327,14 @@ void do_make_empty_dir(struct inode *inode, struct inode *parent,
de->hash_code = 0;
de->ino = cpu_to_le32(inode->i_ino);
memcpy(d->filename[0], ".", 1);
- set_de_type(de, inode);
+ set_de_type(de, inode->i_mode);
de = &d->dentry[1];
de->hash_code = 0;
de->name_len = cpu_to_le16(2);
de->ino = cpu_to_le32(parent->i_ino);
memcpy(d->filename[1], "..", 2);
- set_de_type(de, inode);
+ set_de_type(de, parent->i_mode);
test_and_set_bit_le(0, (void *)d->bitmap);
test_and_set_bit_le(1, (void *)d->bitmap);
@@ -435,7 +431,7 @@ error:
void update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth)
{
- if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
+ if (inode && is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
if (S_ISDIR(inode->i_mode)) {
inc_nlink(dir);
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
@@ -450,7 +446,7 @@ void update_parent_metadata(struct inode *dir, struct inode *inode,
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
}
- if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
+ if (inode && is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
}
@@ -474,30 +470,47 @@ next:
goto next;
}
+void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
+ const struct qstr *name, f2fs_hash_t name_hash,
+ unsigned int bit_pos)
+{
+ struct f2fs_dir_entry *de;
+ int slots = GET_DENTRY_SLOTS(name->len);
+ int i;
+
+ de = &d->dentry[bit_pos];
+ de->hash_code = name_hash;
+ de->name_len = cpu_to_le16(name->len);
+ memcpy(d->filename[bit_pos], name->name, name->len);
+ de->ino = cpu_to_le32(ino);
+ set_de_type(de, mode);
+ for (i = 0; i < slots; i++)
+ test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
+}
+
/*
* Caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
*/
int __f2fs_add_link(struct inode *dir, const struct qstr *name,
- struct inode *inode)
+ struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
unsigned int level;
unsigned int current_depth;
unsigned long bidx, block;
f2fs_hash_t dentry_hash;
- struct f2fs_dir_entry *de;
unsigned int nbucket, nblock;
size_t namelen = name->len;
struct page *dentry_page = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
+ struct f2fs_dentry_ptr d;
int slots = GET_DENTRY_SLOTS(namelen);
- struct page *page;
+ struct page *page = NULL;
int err = 0;
- int i;
if (f2fs_has_inline_dentry(dir)) {
- err = f2fs_add_inline_entry(dir, name, inode);
+ err = f2fs_add_inline_entry(dir, name, inode, ino, mode);
if (!err || err != -EAGAIN)
return err;
else
@@ -547,30 +560,31 @@ start:
add_dentry:
f2fs_wait_on_page_writeback(dentry_page, DATA);
- down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, name, NULL);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto fail;
+ if (inode) {
+ down_write(&F2FS_I(inode)->i_sem);
+ page = init_inode_metadata(inode, dir, name, NULL);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto fail;
+ }
}
- de = &dentry_blk->dentry[bit_pos];
- de->hash_code = dentry_hash;
- de->name_len = cpu_to_le16(namelen);
- memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
- de->ino = cpu_to_le32(inode->i_ino);
- set_de_type(de, inode);
- for (i = 0; i < slots; i++)
- test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
+
+ make_dentry_ptr(&d, (void *)dentry_blk, 1);
+ f2fs_update_dentry(ino, mode, &d, name, dentry_hash, bit_pos);
+
set_page_dirty(dentry_page);
- /* we don't need to mark_inode_dirty now */
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
- f2fs_put_page(page, 1);
+ if (inode) {
+ /* we don't need to mark_inode_dirty now */
+ F2FS_I(inode)->i_pino = dir->i_ino;
+ update_inode(inode, page);
+ f2fs_put_page(page, 1);
+ }
update_parent_metadata(dir, inode, current_depth);
fail:
- up_write(&F2FS_I(inode)->i_sem);
+ if (inode)
+ up_write(&F2FS_I(inode)->i_sem);
if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
update_inode_page(dir);
@@ -669,6 +683,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
if (bit_pos == NR_DENTRY_IN_BLOCK) {
truncate_hole(dir, page->index, page->index + 1);
clear_page_dirty_for_io(page);
+ ClearPagePrivate(page);
ClearPageUptodate(page);
inode_dec_dirty_pages(dir);
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 7fa3313ab0e2..8de34ab6d5b1 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -50,6 +50,7 @@
#define F2FS_MOUNT_FLUSH_MERGE 0x00000400
#define F2FS_MOUNT_NOBARRIER 0x00000800
#define F2FS_MOUNT_FASTBOOT 0x00001000
+#define F2FS_MOUNT_EXTENT_CACHE 0x00002000
#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
@@ -102,6 +103,7 @@ enum {
CP_UMOUNT,
CP_FASTBOOT,
CP_SYNC,
+ CP_RECOVERY,
CP_DISCARD,
};
@@ -216,6 +218,15 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4)
#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
+/*
+ * should be same as XFS_IOC_GOINGDOWN.
+ * Flags for going down operation used by FS_IOC_GOINGDOWN
+ */
+#define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */
+#define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */
+#define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */
+#define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */
+
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
* ioctl commands in 32 bit emulation
@@ -273,14 +284,34 @@ enum {
#define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
+/* vector size for gang look-up from extent cache that consists of radix tree */
+#define EXT_TREE_VEC_SIZE 64
+
/* for in-memory extent cache entry */
-#define F2FS_MIN_EXTENT_LEN 16 /* minimum extent length */
+#define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
+
+/* number of extent info in extent cache we try to shrink */
+#define EXTENT_CACHE_SHRINK_NUMBER 128
struct extent_info {
- rwlock_t ext_lock; /* rwlock for consistency */
- unsigned int fofs; /* start offset in a file */
- u32 blk_addr; /* start block address of the extent */
- unsigned int len; /* length of the extent */
+ unsigned int fofs; /* start offset in a file */
+ u32 blk; /* start block address of the extent */
+ unsigned int len; /* length of the extent */
+};
+
+struct extent_node {
+ struct rb_node rb_node; /* rb node located in rb-tree */
+ struct list_head list; /* node in global extent list of sbi */
+ struct extent_info ei; /* extent info */
+};
+
+struct extent_tree {
+ nid_t ino; /* inode number */
+ struct rb_root root; /* root of extent info rb-tree */
+ struct extent_node *cached_en; /* recently accessed extent node */
+ rwlock_t lock; /* protect extent info rb-tree */
+ atomic_t refcount; /* reference count of rb-tree */
+ unsigned int count; /* # of extent node in rb-tree*/
};
/*
@@ -309,6 +340,7 @@ struct f2fs_inode_info {
nid_t i_xattr_nid; /* node id that contains xattrs */
unsigned long long xattr_ver; /* cp version of xattr modification */
struct extent_info ext; /* in-memory extent cache entry */
+ rwlock_t ext_lock; /* rwlock for single extent cache */
struct inode_entry *dirty_dir; /* the pointer of dirty dir */
struct radix_tree_root inmem_root; /* radix tree for inmem pages */
@@ -319,21 +351,51 @@ struct f2fs_inode_info {
static inline void get_extent_info(struct extent_info *ext,
struct f2fs_extent i_ext)
{
- write_lock(&ext->ext_lock);
ext->fofs = le32_to_cpu(i_ext.fofs);
- ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
+ ext->blk = le32_to_cpu(i_ext.blk);
ext->len = le32_to_cpu(i_ext.len);
- write_unlock(&ext->ext_lock);
}
static inline void set_raw_extent(struct extent_info *ext,
struct f2fs_extent *i_ext)
{
- read_lock(&ext->ext_lock);
i_ext->fofs = cpu_to_le32(ext->fofs);
- i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
+ i_ext->blk = cpu_to_le32(ext->blk);
i_ext->len = cpu_to_le32(ext->len);
- read_unlock(&ext->ext_lock);
+}
+
+static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
+ u32 blk, unsigned int len)
+{
+ ei->fofs = fofs;
+ ei->blk = blk;
+ ei->len = len;
+}
+
+static inline bool __is_extent_same(struct extent_info *ei1,
+ struct extent_info *ei2)
+{
+ return (ei1->fofs == ei2->fofs && ei1->blk == ei2->blk &&
+ ei1->len == ei2->len);
+}
+
+static inline bool __is_extent_mergeable(struct extent_info *back,
+ struct extent_info *front)
+{
+ return (back->fofs + back->len == front->fofs &&
+ back->blk + back->len == front->blk);
+}
+
+static inline bool __is_back_mergeable(struct extent_info *cur,
+ struct extent_info *back)
+{
+ return __is_extent_mergeable(back, cur);
+}
+
+static inline bool __is_front_mergeable(struct extent_info *cur,
+ struct extent_info *front)
+{
+ return __is_extent_mergeable(cur, front);
}
struct f2fs_nm_info {
@@ -502,6 +564,10 @@ enum page_type {
META,
NR_PAGE_TYPE,
META_FLUSH,
+ INMEM, /* the below types are used by tracepoints only. */
+ INMEM_DROP,
+ IPU,
+ OPU,
};
struct f2fs_io_info {
@@ -571,6 +637,14 @@ struct f2fs_sb_info {
struct list_head dir_inode_list; /* dir inode list */
spinlock_t dir_inode_lock; /* for dir inode list lock */
+ /* for extent tree cache */
+ struct radix_tree_root extent_tree_root;/* cache extent cache entries */
+ struct rw_semaphore extent_tree_lock; /* locking extent radix tree */
+ struct list_head extent_list; /* lru list for shrinker */
+ spinlock_t extent_lock; /* locking extent lru list */
+ int total_ext_tree; /* extent tree count */
+ atomic_t total_ext_node; /* extent info count */
+
/* basic filesystem units */
unsigned int log_sectors_per_block; /* log2 sectors per block */
unsigned int log_blocksize; /* log2 block size */
@@ -920,12 +994,17 @@ static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
return 0;
}
+static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
+{
+ return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
+}
+
static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
int offset;
- if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload) > 0) {
+ if (__cp_payload(sbi) > 0) {
if (flag == NAT_BITMAP)
return &ckpt->sit_nat_version_bitmap;
else
@@ -1166,8 +1245,10 @@ enum {
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
FI_VOLATILE_FILE, /* indicate volatile file */
+ FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
+ FI_INLINE_DOTS, /* indicate inline dot dentries */
};
static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
@@ -1204,6 +1285,8 @@ static inline void get_inline_info(struct f2fs_inode_info *fi,
set_inode_flag(fi, FI_INLINE_DENTRY);
if (ri->i_inline & F2FS_DATA_EXIST)
set_inode_flag(fi, FI_DATA_EXIST);
+ if (ri->i_inline & F2FS_INLINE_DOTS)
+ set_inode_flag(fi, FI_INLINE_DOTS);
}
static inline void set_raw_inline(struct f2fs_inode_info *fi,
@@ -1219,6 +1302,8 @@ static inline void set_raw_inline(struct f2fs_inode_info *fi,
ri->i_inline |= F2FS_INLINE_DENTRY;
if (is_inode_flag_set(fi, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
+ if (is_inode_flag_set(fi, FI_INLINE_DOTS))
+ ri->i_inline |= F2FS_INLINE_DOTS;
}
static inline int f2fs_has_inline_xattr(struct inode *inode)
@@ -1264,6 +1349,11 @@ static inline int f2fs_exist_data(struct inode *inode)
return is_inode_flag_set(F2FS_I(inode), FI_DATA_EXIST);
}
+static inline int f2fs_has_inline_dots(struct inode *inode)
+{
+ return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DOTS);
+}
+
static inline bool f2fs_is_atomic_file(struct inode *inode)
{
return is_inode_flag_set(F2FS_I(inode), FI_ATOMIC_FILE);
@@ -1274,6 +1364,11 @@ static inline bool f2fs_is_volatile_file(struct inode *inode)
return is_inode_flag_set(F2FS_I(inode), FI_VOLATILE_FILE);
}
+static inline bool f2fs_is_first_block_written(struct inode *inode)
+{
+ return is_inode_flag_set(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+}
+
static inline bool f2fs_is_drop_cache(struct inode *inode)
{
return is_inode_flag_set(F2FS_I(inode), FI_DROP_CACHE);
@@ -1290,12 +1385,6 @@ static inline int f2fs_has_inline_dentry(struct inode *inode)
return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DENTRY);
}
-static inline void *inline_dentry_addr(struct page *page)
-{
- struct f2fs_inode *ri = F2FS_INODE(page);
- return (void *)&(ri->i_addr[1]);
-}
-
static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page)
{
if (!f2fs_has_inline_dentry(dir))
@@ -1363,7 +1452,7 @@ struct dentry *f2fs_get_parent(struct dentry *child);
* dir.c
*/
extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
-void set_de_type(struct f2fs_dir_entry *, struct inode *);
+void set_de_type(struct f2fs_dir_entry *, umode_t);
struct f2fs_dir_entry *find_target_dentry(struct qstr *, int *,
struct f2fs_dentry_ptr *);
bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
@@ -1382,7 +1471,10 @@ ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
struct page *, struct inode *);
int update_dent_inode(struct inode *, const struct qstr *);
-int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
+void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
+ const struct qstr *, f2fs_hash_t , unsigned int);
+int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
+ umode_t);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
struct inode *);
int f2fs_do_tmpfile(struct inode *, struct inode *);
@@ -1391,8 +1483,8 @@ bool f2fs_empty_dir(struct inode *);
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
{
- return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
- inode);
+ return __f2fs_add_link(d_inode(dentry->d_parent), &dentry->d_name,
+ inode, inode->i_ino, inode->i_mode);
}
/*
@@ -1519,14 +1611,22 @@ int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *,
struct f2fs_io_info *);
void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *,
struct f2fs_io_info *);
+void set_data_blkaddr(struct dnode_of_data *);
int reserve_new_block(struct dnode_of_data *);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
-void update_extent_cache(struct dnode_of_data *);
+void f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
+void f2fs_destroy_extent_tree(struct inode *);
+void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *);
+void f2fs_update_extent_cache(struct dnode_of_data *);
+void f2fs_preserve_extent_tree(struct inode *);
struct page *find_data_page(struct inode *, pgoff_t, bool);
struct page *get_lock_data_page(struct inode *, pgoff_t);
struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
int do_write_data_page(struct page *, struct f2fs_io_info *);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
+void init_extent_cache_info(struct f2fs_sb_info *);
+int __init create_extent_cache(void);
+void destroy_extent_cache(void);
void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
int f2fs_release_page(struct page *, gfp_t);
@@ -1554,7 +1654,7 @@ struct f2fs_stat_info {
struct f2fs_sb_info *sbi;
int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
int main_area_segs, main_area_sections, main_area_zones;
- int hit_ext, total_ext;
+ int hit_ext, total_ext, ext_tree, ext_node;
int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
int nats, dirty_nats, sits, dirty_sits, fnids;
int total_count, utilization;
@@ -1566,7 +1666,9 @@ struct f2fs_stat_info {
int dirty_count, node_pages, meta_pages;
int prefree_count, call_count, cp_count;
int tot_segs, node_segs, data_segs, free_segs, free_secs;
+ int bg_node_segs, bg_data_segs;
int tot_blks, data_blks, node_blks;
+ int bg_data_blks, bg_node_blks;
int curseg[NR_CURSEG_TYPE];
int cursec[NR_CURSEG_TYPE];
int curzone[NR_CURSEG_TYPE];
@@ -1615,31 +1717,36 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
((sbi)->block_count[(curseg)->alloc_type]++)
#define stat_inc_inplace_blocks(sbi) \
(atomic_inc(&(sbi)->inplace_count))
-#define stat_inc_seg_count(sbi, type) \
+#define stat_inc_seg_count(sbi, type, gc_type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
(si)->tot_segs++; \
- if (type == SUM_TYPE_DATA) \
+ if (type == SUM_TYPE_DATA) { \
si->data_segs++; \
- else \
+ si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
+ } else { \
si->node_segs++; \
+ si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
+ } \
} while (0)
#define stat_inc_tot_blk_count(si, blks) \
(si->tot_blks += (blks))
-#define stat_inc_data_blk_count(sbi, blks) \
+#define stat_inc_data_blk_count(sbi, blks, gc_type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
stat_inc_tot_blk_count(si, blks); \
si->data_blks += (blks); \
+ si->bg_data_blks += (gc_type == BG_GC) ? (blks) : 0; \
} while (0)
-#define stat_inc_node_blk_count(sbi, blks) \
+#define stat_inc_node_blk_count(sbi, blks, gc_type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
stat_inc_tot_blk_count(si, blks); \
si->node_blks += (blks); \
+ si->bg_node_blks += (gc_type == BG_GC) ? (blks) : 0; \
} while (0)
int f2fs_build_stats(struct f2fs_sb_info *);
@@ -1661,10 +1768,10 @@ void f2fs_destroy_root_stats(void);
#define stat_inc_seg_type(sbi, curseg)
#define stat_inc_block_count(sbi, curseg)
#define stat_inc_inplace_blocks(sbi)
-#define stat_inc_seg_count(si, type)
+#define stat_inc_seg_count(sbi, type, gc_type)
#define stat_inc_tot_blk_count(si, blks)
-#define stat_inc_data_blk_count(si, blks)
-#define stat_inc_node_blk_count(sbi, blks)
+#define stat_inc_data_blk_count(sbi, blks, gc_type)
+#define stat_inc_node_blk_count(sbi, blks, gc_type)
static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
@@ -1688,6 +1795,7 @@ extern struct kmem_cache *inode_entry_slab;
*/
bool f2fs_may_inline(struct inode *);
void read_inline_data(struct page *, struct page *);
+bool truncate_inline_inode(struct page *, u64);
int f2fs_read_inline_data(struct inode *, struct page *);
int f2fs_convert_inline_page(struct dnode_of_data *, struct page *);
int f2fs_convert_inline_inode(struct inode *);
@@ -1697,7 +1805,8 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *, struct qstr *,
struct page **);
struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *, struct page **);
int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
-int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *);
+int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *,
+ nid_t, umode_t);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
struct inode *, struct inode *);
bool f2fs_empty_inline_dir(struct inode *);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 98dac27bc3f7..2b52e48d7482 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -241,6 +241,8 @@ go_write:
* will be used only for fsynced inodes after checkpoint.
*/
try_to_fix_pino(inode);
+ clear_inode_flag(fi, FI_APPEND_WRITE);
+ clear_inode_flag(fi, FI_UPDATE_WRITE);
goto out;
}
sync_nodes:
@@ -433,8 +435,12 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
continue;
dn->data_blkaddr = NULL_ADDR;
- update_extent_cache(dn);
+ set_data_blkaddr(dn);
+ f2fs_update_extent_cache(dn);
invalidate_blocks(sbi, blkaddr);
+ if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
+ clear_inode_flag(F2FS_I(dn->inode),
+ FI_FIRST_BLOCK_WRITTEN);
nr_free++;
}
if (nr_free) {
@@ -454,15 +460,16 @@ void truncate_data_blocks(struct dnode_of_data *dn)
truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}
-static int truncate_partial_data_page(struct inode *inode, u64 from)
+static int truncate_partial_data_page(struct inode *inode, u64 from,
+ bool force)
{
unsigned offset = from & (PAGE_CACHE_SIZE - 1);
struct page *page;
- if (!offset)
+ if (!offset && !force)
return 0;
- page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
+ page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, force);
if (IS_ERR(page))
return 0;
@@ -473,7 +480,8 @@ static int truncate_partial_data_page(struct inode *inode, u64 from)
f2fs_wait_on_page_writeback(page, DATA);
zero_user(page, offset, PAGE_CACHE_SIZE - offset);
- set_page_dirty(page);
+ if (!force)
+ set_page_dirty(page);
out:
f2fs_put_page(page, 1);
return 0;
@@ -487,6 +495,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
pgoff_t free_from;
int count = 0, err = 0;
struct page *ipage;
+ bool truncate_page = false;
trace_f2fs_truncate_blocks_enter(inode, from);
@@ -502,7 +511,10 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
}
if (f2fs_has_inline_data(inode)) {
+ if (truncate_inline_inode(ipage, from))
+ set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
+ truncate_page = true;
goto out;
}
@@ -533,7 +545,7 @@ out:
/* lastly zero out the first data page */
if (!err)
- err = truncate_partial_data_page(inode, from);
+ err = truncate_partial_data_page(inode, from, truncate_page);
trace_f2fs_truncate_blocks_exit(inode, err);
return err;
@@ -562,7 +574,7 @@ void f2fs_truncate(struct inode *inode)
int f2fs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
generic_fillattr(inode, stat);
stat->blocks <<= 3;
return 0;
@@ -601,7 +613,7 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct f2fs_inode_info *fi = F2FS_I(inode);
int err;
@@ -997,6 +1009,9 @@ static int f2fs_ioc_release_volatile_write(struct file *filp)
if (!f2fs_is_volatile_file(inode))
return 0;
+ if (!f2fs_is_first_block_written(inode))
+ return truncate_partial_data_page(inode, 0, true);
+
punch_hole(inode, 0, F2FS_BLKSIZE);
return 0;
}
@@ -1029,6 +1044,41 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
return ret;
}
+static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct super_block *sb = sbi->sb;
+ __u32 in;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(in, (__u32 __user *)arg))
+ return -EFAULT;
+
+ switch (in) {
+ case F2FS_GOING_DOWN_FULLSYNC:
+ sb = freeze_bdev(sb->s_bdev);
+ if (sb && !IS_ERR(sb)) {
+ f2fs_stop_checkpoint(sbi);
+ thaw_bdev(sb->s_bdev, sb);
+ }
+ break;
+ case F2FS_GOING_DOWN_METASYNC:
+ /* do checkpoint only */
+ f2fs_sync_fs(sb, 1);
+ f2fs_stop_checkpoint(sbi);
+ break;
+ case F2FS_GOING_DOWN_NOSYNC:
+ f2fs_stop_checkpoint(sbi);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -1078,6 +1128,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_release_volatile_write(filp);
case F2FS_IOC_ABORT_VOLATILE_WRITE:
return f2fs_ioc_abort_volatile_write(filp);
+ case F2FS_IOC_SHUTDOWN:
+ return f2fs_ioc_shutdown(filp, arg);
case FITRIM:
return f2fs_ioc_fitrim(filp, arg);
default:
@@ -1104,8 +1156,6 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
const struct file_operations f2fs_file_operations = {
.llseek = f2fs_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.open = generic_file_open,
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 76adbc3641f1..ed58211fe79b 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -435,7 +435,7 @@ next_step:
set_page_dirty(node_page);
}
f2fs_put_page(node_page, 1);
- stat_inc_node_blk_count(sbi, 1);
+ stat_inc_node_blk_count(sbi, 1, gc_type);
}
if (initial) {
@@ -622,7 +622,7 @@ next_step:
if (IS_ERR(data_page))
continue;
move_data_page(inode, data_page, gc_type);
- stat_inc_data_blk_count(sbi, 1);
+ stat_inc_data_blk_count(sbi, 1, gc_type);
}
}
@@ -680,7 +680,7 @@ static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
}
blk_finish_plug(&plug);
- stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
+ stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
stat_inc_call_count(sbi->stat_info);
f2fs_put_page(sum_page, 1);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 1484c00133cd..8140e4f0e538 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -21,7 +21,7 @@ bool f2fs_may_inline(struct inode *inode)
if (f2fs_is_atomic_file(inode))
return false;
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
return false;
if (i_size_read(inode) > MAX_INLINE_DATA)
@@ -50,10 +50,19 @@ void read_inline_data(struct page *page, struct page *ipage)
SetPageUptodate(page);
}
-static void truncate_inline_data(struct page *ipage)
+bool truncate_inline_inode(struct page *ipage, u64 from)
{
+ void *addr;
+
+ if (from >= MAX_INLINE_DATA)
+ return false;
+
+ addr = inline_data_addr(ipage);
+
f2fs_wait_on_page_writeback(ipage, NODE);
- memset(inline_data_addr(ipage), 0, MAX_INLINE_DATA);
+ memset(addr + from, 0, MAX_INLINE_DATA - from);
+
+ return true;
}
int f2fs_read_inline_data(struct inode *inode, struct page *page)
@@ -122,7 +131,8 @@ no_update:
set_page_writeback(page);
fio.blk_addr = dn->data_blkaddr;
write_data_page(page, dn, &fio);
- update_extent_cache(dn);
+ set_data_blkaddr(dn);
+ f2fs_update_extent_cache(dn);
f2fs_wait_on_page_writeback(page, DATA);
if (dirty)
inode_dec_dirty_pages(dn->inode);
@@ -131,7 +141,7 @@ no_update:
set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
- truncate_inline_data(dn->inode_page);
+ truncate_inline_inode(dn->inode_page, 0);
clear_out:
stat_dec_inline_inode(dn->inode);
f2fs_clear_inline_inode(dn->inode);
@@ -245,7 +255,7 @@ process_inline:
if (f2fs_has_inline_data(inode)) {
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
- truncate_inline_data(ipage);
+ truncate_inline_inode(ipage, 0);
f2fs_clear_inline_inode(inode);
update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
@@ -363,7 +373,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
set_page_dirty(page);
/* clear inline dir and flag after data writeback */
- truncate_inline_data(ipage);
+ truncate_inline_inode(ipage, 0);
stat_dec_inline_dir(dir);
clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
@@ -380,21 +390,18 @@ out:
}
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
- struct inode *inode)
+ struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
unsigned int bit_pos;
f2fs_hash_t name_hash;
- struct f2fs_dir_entry *de;
size_t namelen = name->len;
struct f2fs_inline_dentry *dentry_blk = NULL;
+ struct f2fs_dentry_ptr d;
int slots = GET_DENTRY_SLOTS(namelen);
- struct page *page;
+ struct page *page = NULL;
int err = 0;
- int i;
-
- name_hash = f2fs_dentry_hash(name);
ipage = get_node_page(sbi, dir->i_ino);
if (IS_ERR(ipage))
@@ -410,32 +417,34 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
goto out;
}
- down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, name, ipage);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto fail;
+ if (inode) {
+ down_write(&F2FS_I(inode)->i_sem);
+ page = init_inode_metadata(inode, dir, name, ipage);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto fail;
+ }
}
f2fs_wait_on_page_writeback(ipage, NODE);
- de = &dentry_blk->dentry[bit_pos];
- de->hash_code = name_hash;
- de->name_len = cpu_to_le16(namelen);
- memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
- de->ino = cpu_to_le32(inode->i_ino);
- set_de_type(de, inode);
- for (i = 0; i < slots; i++)
- test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
+
+ name_hash = f2fs_dentry_hash(name);
+ make_dentry_ptr(&d, (void *)dentry_blk, 2);
+ f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
+
set_page_dirty(ipage);
/* we don't need to mark_inode_dirty now */
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
- f2fs_put_page(page, 1);
+ if (inode) {
+ F2FS_I(inode)->i_pino = dir->i_ino;
+ update_inode(inode, page);
+ f2fs_put_page(page, 1);
+ }
update_parent_metadata(dir, inode, 0);
fail:
- up_write(&F2FS_I(inode)->i_sem);
+ if (inode)
+ up_write(&F2FS_I(inode)->i_sem);
if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
update_inode(dir, ipage);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 2d002e3738a7..e622ec95409e 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -51,6 +51,15 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
}
}
+static bool __written_first_block(struct f2fs_inode *ri)
+{
+ block_t addr = le32_to_cpu(ri->i_addr[0]);
+
+ if (addr != NEW_ADDR && addr != NULL_ADDR)
+ return true;
+ return false;
+}
+
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
{
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
@@ -130,7 +139,8 @@ static int do_read_inode(struct inode *inode)
fi->i_pino = le32_to_cpu(ri->i_pino);
fi->i_dir_level = ri->i_dir_level;
- get_extent_info(&fi->ext, ri->i_ext);
+ f2fs_init_extent_cache(inode, &ri->i_ext);
+
get_inline_info(fi, ri);
/* check data exist */
@@ -140,6 +150,9 @@ static int do_read_inode(struct inode *inode)
/* get rdev by using inline_info */
__get_inode_rdev(inode, ri);
+ if (__written_first_block(ri))
+ set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+
f2fs_put_page(node_page, 1);
stat_inc_inline_inode(inode);
@@ -220,7 +233,11 @@ void update_inode(struct inode *inode, struct page *node_page)
ri->i_links = cpu_to_le32(inode->i_nlink);
ri->i_size = cpu_to_le64(i_size_read(inode));
ri->i_blocks = cpu_to_le64(inode->i_blocks);
+
+ read_lock(&F2FS_I(inode)->ext_lock);
set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext);
+ read_unlock(&F2FS_I(inode)->ext_lock);
+
set_raw_inline(F2FS_I(inode), ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
@@ -328,6 +345,12 @@ void f2fs_evict_inode(struct inode *inode)
no_delete:
stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode);
+
+ /* update extent info in inode */
+ if (inode->i_nlink)
+ f2fs_preserve_extent_tree(inode);
+ f2fs_destroy_extent_tree(inode);
+
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
if (xnid)
invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index e79639a9787a..658e8079aaf9 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/ctype.h>
#include <linux/dcache.h>
+#include <linux/namei.h>
#include "f2fs.h"
#include "node.h"
@@ -150,7 +151,7 @@ out:
static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
int err;
@@ -181,10 +182,48 @@ out:
struct dentry *f2fs_get_parent(struct dentry *child)
{
struct qstr dotdot = QSTR_INIT("..", 2);
- unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot);
+ unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot);
if (!ino)
return ERR_PTR(-ENOENT);
- return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino));
+ return d_obtain_alias(f2fs_iget(d_inode(child)->i_sb, ino));
+}
+
+static int __recover_dot_dentries(struct inode *dir, nid_t pino)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct qstr dot = QSTR_INIT(".", 1);
+ struct qstr dotdot = QSTR_INIT("..", 2);
+ struct f2fs_dir_entry *de;
+ struct page *page;
+ int err = 0;
+
+ f2fs_lock_op(sbi);
+
+ de = f2fs_find_entry(dir, &dot, &page);
+ if (de) {
+ f2fs_dentry_kunmap(dir, page);
+ f2fs_put_page(page, 0);
+ } else {
+ err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
+ if (err)
+ goto out;
+ }
+
+ de = f2fs_find_entry(dir, &dotdot, &page);
+ if (de) {
+ f2fs_dentry_kunmap(dir, page);
+ f2fs_put_page(page, 0);
+ } else {
+ err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
+ }
+out:
+ if (!err) {
+ clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS);
+ mark_inode_dirty(dir);
+ }
+
+ f2fs_unlock_op(sbi);
+ return err;
}
static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
@@ -206,6 +245,16 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
inode = f2fs_iget(dir->i_sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
+
+ if (f2fs_has_inline_dots(inode)) {
+ int err;
+
+ err = __recover_dot_dentries(inode, dir->i_ino);
+ if (err) {
+ iget_failed(inode);
+ return ERR_PTR(err);
+ }
+ }
}
return d_splice_alias(inode, dentry);
@@ -214,7 +263,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct f2fs_dir_entry *de;
struct page *page;
int err = -ENOENT;
@@ -247,6 +296,21 @@ fail:
return err;
}
+static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct page *page = page_follow_link_light(dentry, nd);
+
+ if (IS_ERR_OR_NULL(page))
+ return page;
+
+ /* this is broken symlink case */
+ if (*nd_get_link(nd) == 0) {
+ page_put_link(dentry, nd, page);
+ return ERR_PTR(-ENOENT);
+ }
+ return page;
+}
+
static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
@@ -276,6 +340,17 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ /*
+ * Let's flush symlink data in order to avoid broken symlink as much as
+ * possible. Nevertheless, fsyncing is the best way, but there is no
+ * way to get a file descriptor in order to flush that.
+ *
+ * Note that, it needs to do dir->fsync to make this recoverable.
+ * If the symlink path is stored into inline_data, there is no
+ * performance regression.
+ */
+ filemap_write_and_wait_range(inode->i_mapping, 0, symlen - 1);
+
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
return err;
@@ -326,7 +401,7 @@ out_fail:
static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (f2fs_empty_dir(inode))
return f2fs_unlink(dir, dentry);
return -ENOTEMPTY;
@@ -374,8 +449,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir);
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct page *old_dir_page;
struct page *old_page, *new_page;
struct f2fs_dir_entry *old_dir_entry = NULL;
@@ -501,8 +576,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir);
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct page *old_dir_page, *new_dir_page;
struct page *old_page, *new_page;
struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL;
@@ -693,6 +768,8 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
f2fs_unlock_op(sbi);
alloc_nid_done(sbi, inode->i_ino);
+
+ stat_inc_inline_inode(inode);
d_tmpfile(dentry, inode);
unlock_new_inode(inode);
return 0;
@@ -729,7 +806,7 @@ const struct inode_operations f2fs_dir_inode_operations = {
const struct inode_operations f2fs_symlink_inode_operations = {
.readlink = generic_readlink,
- .follow_link = page_follow_link_light,
+ .follow_link = f2fs_follow_link,
.put_link = page_put_link,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 97bd9d3db882..8ab0cf1930bd 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -41,7 +41,9 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
/* only uses low memory */
avail_ram = val.totalram - val.totalhigh;
- /* give 25%, 25%, 50%, 50% memory for each components respectively */
+ /*
+ * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
+ */
if (type == FREE_NIDS) {
mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
PAGE_CACHE_SHIFT;
@@ -62,6 +64,11 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
mem_size += (sbi->im[i].ino_num *
sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+ } else if (type == EXTENT_CACHE) {
+ mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) +
+ atomic_read(&sbi->total_ext_node) *
+ sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else {
if (sbi->sb->s_bdi->dirty_exceeded)
return false;
@@ -494,7 +501,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
/* if inline_data is set, should not report any block indices */
if (f2fs_has_inline_data(dn->inode) && index) {
- err = -EINVAL;
+ err = -ENOENT;
f2fs_put_page(npage[0], 1);
goto release_out;
}
@@ -995,6 +1002,7 @@ static int read_node_page(struct page *page, int rw)
get_node_info(sbi, page->index, &ni);
if (unlikely(ni.blk_addr == NULL_ADDR)) {
+ ClearPageUptodate(page);
f2fs_put_page(page, 1);
return -ENOENT;
}
@@ -1306,6 +1314,7 @@ static int f2fs_write_node_page(struct page *page,
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
+ ClearPageUptodate(page);
dec_page_count(sbi, F2FS_DIRTY_NODES);
unlock_page(page);
return 0;
@@ -1821,6 +1830,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
struct f2fs_nat_block *nat_blk;
struct nat_entry *ne, *cur;
struct page *page = NULL;
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
/*
* there are two steps to flush nat entries:
@@ -1874,7 +1884,9 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, set->entry_cnt);
+ down_write(&nm_i->nat_tree_lock);
radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
+ up_write(&nm_i->nat_tree_lock);
kmem_cache_free(nat_entry_set_slab, set);
}
@@ -1902,6 +1914,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
remove_nats_in_journal(sbi);
+ down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_set(nm_i,
set_idx, SETVEC_SIZE, setvec))) {
unsigned idx;
@@ -1910,6 +1923,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
__adjust_nat_entry_set(setvec[idx], &sets,
MAX_NAT_JENTRIES(sum));
}
+ up_write(&nm_i->nat_tree_lock);
/* flush dirty nats in nat entry set */
list_for_each_entry_safe(set, tmp, &sets, set_list)
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index f405bbf2435a..c56026f1725c 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -120,6 +120,7 @@ enum mem_type {
NAT_ENTRIES, /* indicates the cached nat entry */
DIRTY_DENTS, /* indicates dirty dentry pages */
INO_ENTRIES, /* indicates inode entries */
+ EXTENT_CACHE, /* indicates extent cache */
BASE_CHECK, /* check kernel status */
};
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 41afb9534bbd..8d8ea99f2156 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -93,10 +93,9 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
}
retry:
de = f2fs_find_entry(dir, &name, &page);
- if (de && inode->i_ino == le32_to_cpu(de->ino)) {
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_unmap_put;
- }
+
if (de) {
einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) {
@@ -115,7 +114,7 @@ retry:
iput(einode);
goto retry;
}
- err = __f2fs_add_link(dir, &name, inode);
+ err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
if (err)
goto out_err;
@@ -187,11 +186,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
goto next;
entry = get_fsync_inode(head, ino_of_node(page));
- if (entry) {
- if (IS_INODE(page) && is_dent_dnode(page))
- set_inode_flag(F2FS_I(entry->inode),
- FI_INC_LINK);
- } else {
+ if (!entry) {
if (IS_INODE(page) && is_dent_dnode(page)) {
err = recover_inode_page(sbi, page);
if (err)
@@ -212,8 +207,10 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
if (IS_ERR(entry->inode)) {
err = PTR_ERR(entry->inode);
kmem_cache_free(fsync_entry_slab, entry);
- if (err == -ENOENT)
+ if (err == -ENOENT) {
+ err = 0;
goto next;
+ }
break;
}
list_add_tail(&entry->list, head);
@@ -256,6 +253,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
struct f2fs_summary_block *sum_node;
struct f2fs_summary sum;
struct page *sum_page, *node_page;
+ struct dnode_of_data tdn = *dn;
nid_t ino, nid;
struct inode *inode;
unsigned int offset;
@@ -283,17 +281,15 @@ got_it:
/* Use the locked dnode page and inode */
nid = le32_to_cpu(sum.nid);
if (dn->inode->i_ino == nid) {
- struct dnode_of_data tdn = *dn;
tdn.nid = nid;
+ if (!dn->inode_page_locked)
+ lock_page(dn->inode_page);
tdn.node_page = dn->inode_page;
tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
- truncate_data_blocks_range(&tdn, 1);
- return 0;
+ goto truncate_out;
} else if (dn->nid == nid) {
- struct dnode_of_data tdn = *dn;
tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
- truncate_data_blocks_range(&tdn, 1);
- return 0;
+ goto truncate_out;
}
/* Get the node page */
@@ -317,18 +313,33 @@ got_it:
bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
le16_to_cpu(sum.ofs_in_node);
- if (ino != dn->inode->i_ino) {
- truncate_hole(inode, bidx, bidx + 1);
+ /*
+ * if inode page is locked, unlock temporarily, but its reference
+ * count keeps alive.
+ */
+ if (ino == dn->inode->i_ino && dn->inode_page_locked)
+ unlock_page(dn->inode_page);
+
+ set_new_dnode(&tdn, inode, NULL, NULL, 0);
+ if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
+ goto out;
+
+ if (tdn.data_blkaddr == blkaddr)
+ truncate_data_blocks_range(&tdn, 1);
+
+ f2fs_put_dnode(&tdn);
+out:
+ if (ino != dn->inode->i_ino)
iput(inode);
- } else {
- struct dnode_of_data tdn;
- set_new_dnode(&tdn, inode, dn->inode_page, NULL, 0);
- if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
- return 0;
- if (tdn.data_blkaddr != NULL_ADDR)
- truncate_data_blocks_range(&tdn, 1);
- f2fs_put_page(tdn.node_page, 1);
- }
+ else if (dn->inode_page_locked)
+ lock_page(dn->inode_page);
+ return 0;
+
+truncate_out:
+ if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
+ truncate_data_blocks_range(&tdn, 1);
+ if (dn->inode->i_ino == nid && !dn->inode_page_locked)
+ unlock_page(dn->inode_page);
return 0;
}
@@ -384,7 +395,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
src = datablock_addr(dn.node_page, dn.ofs_in_node);
dest = datablock_addr(page, dn.ofs_in_node);
- if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
+ if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR &&
+ dest >= MAIN_BLKADDR(sbi) && dest < MAX_BLKADDR(sbi)) {
+
if (src == NULL_ADDR) {
err = reserve_new_block(&dn);
/* We should not get -ENOSPC */
@@ -401,14 +414,13 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
/* write dummy data page */
recover_data_page(sbi, NULL, &sum, src, dest);
dn.data_blkaddr = dest;
- update_extent_cache(&dn);
+ set_data_blkaddr(&dn);
+ f2fs_update_extent_cache(&dn);
recovered++;
}
dn.ofs_in_node++;
}
- /* write node page in place */
- set_summary(&sum, dn.nid, 0, 0);
if (IS_INODE(dn.node_page))
sync_inode_page(&dn);
@@ -552,7 +564,7 @@ out:
mutex_unlock(&sbi->cp_mutex);
} else if (need_writecp) {
struct cp_control cpc = {
- .reason = CP_SYNC,
+ .reason = CP_RECOVERY,
};
mutex_unlock(&sbi->cp_mutex);
write_checkpoint(sbi, &cpc);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index daee4ab913da..f939660941bb 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -205,6 +205,8 @@ retry:
list_add_tail(&new->list, &fi->inmem_pages);
inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
mutex_unlock(&fi->inmem_lock);
+
+ trace_f2fs_register_inmem_page(page, INMEM);
}
void commit_inmem_pages(struct inode *inode, bool abort)
@@ -238,11 +240,13 @@ void commit_inmem_pages(struct inode *inode, bool abort)
f2fs_wait_on_page_writeback(cur->page, DATA);
if (clear_page_dirty_for_io(cur->page))
inode_dec_dirty_pages(inode);
+ trace_f2fs_commit_inmem_page(cur->page, INMEM);
do_write_data_page(cur->page, &fio);
submit_bio = true;
}
f2fs_put_page(cur->page, 1);
} else {
+ trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
put_page(cur->page);
}
radix_tree_delete(&fi->inmem_root, cur->page->index);
@@ -277,6 +281,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
{
+ /* try to shrink extent cache when there is no enough memory */
+ f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
+
/* check the # of cached NAT entries and prefree segments */
if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
excess_prefree_segs(sbi) ||
@@ -549,7 +556,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
- if (end - start < cpc->trim_minlen)
+ if (force && end - start < cpc->trim_minlen)
continue;
__add_discard_entry(sbi, cpc, start, end);
@@ -1164,6 +1171,7 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
+ mutex_lock(&sit_i->sentry_lock);
/* direct_io'ed data is aligned to the segment for better performance */
if (direct_io && curseg->next_blkoff)
@@ -1178,7 +1186,6 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
*/
__add_sum_entry(sbi, type, sum);
- mutex_lock(&sit_i->sentry_lock);
__refresh_next_blkoff(sbi, curseg);
stat_inc_block_count(sbi, curseg);
@@ -1730,6 +1737,9 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
+ if (!sit_i->dirty_sentries)
+ goto out;
+
/*
* add and account sit entries of dirty bitmap in sit entry
* set temporarily
@@ -1744,9 +1754,6 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
remove_sits_in_journal(sbi);
- if (!sit_i->dirty_sentries)
- goto out;
-
/*
* there are two steps to flush sit entries:
* #1, flush sit entries to journal in current cold data summary block.
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 7fd35111cf62..85d7fa7514b2 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -336,7 +336,8 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
clear_bit(segno, free_i->free_segmap);
free_i->free_segments++;
- next = find_next_bit(free_i->free_segmap, MAIN_SEGS(sbi), start_segno);
+ next = find_next_bit(free_i->free_segmap,
+ start_segno + sbi->segs_per_sec, start_segno);
if (next >= start_segno + sbi->segs_per_sec) {
clear_bit(secno, free_i->free_secmap);
free_i->free_sections++;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index f2fe666a6ea9..b2dd1b01f076 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -57,6 +57,8 @@ enum {
Opt_flush_merge,
Opt_nobarrier,
Opt_fastboot,
+ Opt_extent_cache,
+ Opt_noinline_data,
Opt_err,
};
@@ -78,6 +80,8 @@ static match_table_t f2fs_tokens = {
{Opt_flush_merge, "flush_merge"},
{Opt_nobarrier, "nobarrier"},
{Opt_fastboot, "fastboot"},
+ {Opt_extent_cache, "extent_cache"},
+ {Opt_noinline_data, "noinline_data"},
{Opt_err, NULL},
};
@@ -367,6 +371,12 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_fastboot:
set_opt(sbi, FASTBOOT);
break;
+ case Opt_extent_cache:
+ set_opt(sbi, EXTENT_CACHE);
+ break;
+ case Opt_noinline_data:
+ clear_opt(sbi, INLINE_DATA);
+ break;
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
@@ -392,7 +402,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
atomic_set(&fi->dirty_pages, 0);
fi->i_current_depth = 1;
fi->i_advise = 0;
- rwlock_init(&fi->ext.ext_lock);
+ rwlock_init(&fi->ext_lock);
init_rwsem(&fi->i_sem);
INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS);
INIT_LIST_HEAD(&fi->inmem_pages);
@@ -591,6 +601,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",disable_ext_identify");
if (test_opt(sbi, INLINE_DATA))
seq_puts(seq, ",inline_data");
+ else
+ seq_puts(seq, ",noinline_data");
if (test_opt(sbi, INLINE_DENTRY))
seq_puts(seq, ",inline_dentry");
if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
@@ -599,6 +611,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",nobarrier");
if (test_opt(sbi, FASTBOOT))
seq_puts(seq, ",fastboot");
+ if (test_opt(sbi, EXTENT_CACHE))
+ seq_puts(seq, ",extent_cache");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
return 0;
@@ -959,7 +973,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
struct buffer_head *raw_super_buf;
struct inode *root;
long err = -EINVAL;
- bool retry = true;
+ bool retry = true, need_fsck = false;
char *options = NULL;
int i;
@@ -984,6 +998,7 @@ try_onemore:
sbi->active_logs = NR_CURSEG_TYPE;
set_opt(sbi, BG_GC);
+ set_opt(sbi, INLINE_DATA);
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
@@ -1072,6 +1087,8 @@ try_onemore:
INIT_LIST_HEAD(&sbi->dir_inode_list);
spin_lock_init(&sbi->dir_inode_lock);
+ init_extent_cache_info(sbi);
+
init_ino_entry_info(sbi);
/* setup f2fs internal modules */
@@ -1146,9 +1163,6 @@ try_onemore:
if (err)
goto free_proc;
- if (!retry)
- set_sbi_flag(sbi, SBI_NEED_FSCK);
-
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
/*
@@ -1160,8 +1174,13 @@ try_onemore:
err = -EROFS;
goto free_kobj;
}
+
+ if (need_fsck)
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+
err = recover_fsync_data(sbi);
if (err) {
+ need_fsck = true;
f2fs_msg(sb, KERN_ERR,
"Cannot recover all fsync data errno=%ld", err);
goto free_kobj;
@@ -1212,7 +1231,7 @@ free_sbi:
/* give only one another chance */
if (retry) {
- retry = 0;
+ retry = false;
shrink_dcache_sb(sb);
goto try_onemore;
}
@@ -1278,10 +1297,13 @@ static int __init init_f2fs_fs(void)
err = create_checkpoint_caches();
if (err)
goto free_segment_manager_caches;
+ err = create_extent_cache();
+ if (err)
+ goto free_checkpoint_caches;
f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
if (!f2fs_kset) {
err = -ENOMEM;
- goto free_checkpoint_caches;
+ goto free_extent_cache;
}
err = register_filesystem(&f2fs_fs_type);
if (err)
@@ -1292,6 +1314,8 @@ static int __init init_f2fs_fs(void)
free_kset:
kset_unregister(f2fs_kset);
+free_extent_cache:
+ destroy_extent_cache();
free_checkpoint_caches:
destroy_checkpoint_caches();
free_segment_manager_caches:
@@ -1309,6 +1333,7 @@ static void __exit exit_f2fs_fs(void)
remove_proc_entry("fs/f2fs", NULL);
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
+ destroy_extent_cache();
destroy_checkpoint_caches();
destroy_segment_manager_caches();
destroy_node_manager_caches();
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 5072bf9ae0ef..9757f65a05bc 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -83,7 +83,7 @@ static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name,
}
if (strcmp(name, "") == 0)
return -EINVAL;
- return f2fs_getxattr(dentry->d_inode, type, name, buffer, size, NULL);
+ return f2fs_getxattr(d_inode(dentry), type, name, buffer, size, NULL);
}
static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
@@ -108,7 +108,7 @@ static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
if (strcmp(name, "") == 0)
return -EINVAL;
- return f2fs_setxattr(dentry->d_inode, type, name,
+ return f2fs_setxattr(d_inode(dentry), type, name,
value, size, NULL, flags);
}
@@ -130,19 +130,20 @@ static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list,
static int f2fs_xattr_advise_get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (strcmp(name, "") != 0)
return -EINVAL;
- *((char *)buffer) = F2FS_I(inode)->i_advise;
+ if (buffer)
+ *((char *)buffer) = F2FS_I(inode)->i_advise;
return sizeof(char);
}
static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (strcmp(name, "") != 0)
return -EINVAL;
@@ -152,6 +153,7 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
return -EINVAL;
F2FS_I(inode)->i_advise |= *(char *)value;
+ mark_inode_dirty(inode);
return 0;
}
@@ -442,7 +444,7 @@ cleanup:
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct f2fs_xattr_entry *entry;
void *base_addr;
int error = 0;
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 91ad9e1c9441..93fc62232ec2 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -8,9 +8,7 @@
* May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
*/
-#include <linux/fs.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include "fat.h"
/* this must be > 0. */
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index c5d6bb939d19..4afc4d9d2e41 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -13,13 +13,9 @@
* Short name translation 1999, 2001 by Wolfram Pienkoss <wp@bszh.de>
*/
-#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/buffer_head.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
-#include <linux/kernel.h>
#include "fat.h"
/*
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 64e295e8ff38..be5e15323bab 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -2,11 +2,8 @@
#define _FAT_H
#include <linux/buffer_head.h>
-#include <linux/string.h>
#include <linux/nls.h>
-#include <linux/fs.h>
#include <linux/hash.h>
-#include <linux/mutex.h>
#include <linux/ratelimit.h>
#include <linux/msdos_fs.h>
@@ -66,7 +63,7 @@ struct msdos_sb_info {
unsigned short sec_per_clus; /* sectors/cluster */
unsigned short cluster_bits; /* log2(cluster_size) */
unsigned int cluster_size; /* cluster size */
- unsigned char fats, fat_bits; /* number of FATs, FAT bits (12 or 16) */
+ unsigned char fats, fat_bits; /* number of FATs, FAT bits (12,16 or 32) */
unsigned short fat_start;
unsigned long fat_length; /* FAT start & length (sec.) */
unsigned long dir_start;
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 260705c58062..8226557130a2 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -3,9 +3,6 @@
* Released under GPL v2.
*/
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/msdos_fs.h>
#include <linux/blkdev.h>
#include "fat.h"
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 8429c68e3057..442d50a0e33e 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -10,10 +10,6 @@
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/mount.h>
-#include <linux/time.h>
-#include <linux/buffer_head.h>
-#include <linux/writeback.h>
-#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/fsnotify.h>
#include <linux/security.h>
@@ -170,8 +166,6 @@ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
const struct file_operations fat_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
@@ -311,7 +305,7 @@ void fat_truncate_blocks(struct inode *inode, loff_t offset)
int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
generic_fillattr(inode, stat);
stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size;
@@ -383,7 +377,7 @@ static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
int fat_setattr(struct dentry *dentry, struct iattr *attr)
{
struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
unsigned int ia_valid;
int error;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 497c7c5263c7..c06774658345 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -11,21 +11,12 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/mpage.h>
-#include <linux/buffer_head.h>
-#include <linux/mount.h>
-#include <linux/aio.h>
#include <linux/vfs.h>
+#include <linux/seq_file.h>
#include <linux/parser.h>
#include <linux/uio.h>
-#include <linux/writeback.h>
-#include <linux/log2.h>
-#include <linux/hash.h>
#include <linux/blkdev.h>
#include <asm/unaligned.h>
#include "fat.h"
@@ -246,8 +237,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
return err;
}
-static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter,
+static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
@@ -256,7 +246,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
- if (rw == WRITE) {
+ if (iov_iter_rw(iter) == WRITE) {
/*
* FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
* so we need to update the ->mmu_private to block boundary.
@@ -275,8 +265,8 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
* FAT need to use the DIO_LOCKING for avoiding the race
* condition of fat_get_block() and ->truncate().
*/
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block);
- if (ret < 0 && (rw & WRITE))
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block);
+ if (ret < 0 && iov_iter_rw(iter) == WRITE)
fat_write_failed(mapping, offset + count);
return ret;
@@ -1280,8 +1270,7 @@ out:
static int fat_read_root(struct inode *inode)
{
- struct super_block *sb = inode->i_sb;
- struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
int error;
MSDOS_I(inode)->i_pos = MSDOS_ROOT_INO;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index d8da2d2e30ae..c4589e981760 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -6,10 +6,6 @@
* and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru)
*/
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/buffer_head.h>
-#include <linux/time.h>
#include "fat.h"
/*
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index a783b0e1272a..b7e2b33aa793 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -7,8 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/time.h>
-#include <linux/buffer_head.h>
#include "fat.h"
/* Characters that are undesirable in an MS-DOS file name */
@@ -310,7 +308,7 @@ out:
static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
{
struct super_block *sb = dir->i_sb;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct fat_slot_info sinfo;
int err;
@@ -404,7 +402,7 @@ out:
/***** Unlink a file */
static int msdos_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
struct fat_slot_info sinfo;
int err;
@@ -442,8 +440,8 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
int err, old_attrs, is_dir, update_dotdot, corrupt = 0;
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
- old_inode = old_dentry->d_inode;
- new_inode = new_dentry->d_inode;
+ old_inode = d_inode(old_dentry);
+ new_inode = d_inode(new_dentry);
err = fat_scan(old_dir, old_name, &old_sinfo);
if (err) {
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index b8b92c2f9683..7092584f424a 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -16,10 +16,8 @@
*/
#include <linux/module.h>
-#include <linux/jiffies.h>
#include <linux/ctype.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include <linux/namei.h>
#include "fat.h"
@@ -35,7 +33,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry)
{
int ret = 1;
spin_lock(&dentry->d_lock);
- if (dentry->d_time != dentry->d_parent->d_inode->i_version)
+ if (dentry->d_time != d_inode(dentry->d_parent)->i_version)
ret = 0;
spin_unlock(&dentry->d_lock);
return ret;
@@ -47,7 +45,7 @@ static int vfat_revalidate(struct dentry *dentry, unsigned int flags)
return -ECHILD;
/* This is not negative dentry. Always valid. */
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
return 1;
return vfat_revalidate_shortname(dentry);
}
@@ -67,7 +65,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
* positive dentry isn't good idea. So it's unsupported like
* rename("filename", "FILENAME") for now.
*/
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
return 1;
/*
@@ -803,7 +801,7 @@ out:
static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
int err;
@@ -834,7 +832,7 @@ out:
static int vfat_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
int err;
@@ -917,8 +915,8 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
struct super_block *sb = old_dir->i_sb;
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
- old_inode = old_dentry->d_inode;
- new_inode = new_dentry->d_inode;
+ old_inode = d_inode(old_dentry);
+ new_inode = d_inode(new_dentry);
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo);
if (err)
diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
index 93e14933dcb6..eb192656fba2 100644
--- a/fs/fat/nfs.c
+++ b/fs/fat/nfs.c
@@ -266,7 +266,7 @@ struct inode *fat_rebuild_parent(struct super_block *sb, int parent_logstart)
* Find the parent for a directory that is not currently connected to
* the filesystem root.
*
- * On entry, the caller holds child_dir->d_inode->i_mutex.
+ * On entry, the caller holds d_inode(child_dir)->i_mutex.
*/
static struct dentry *fat_get_parent(struct dentry *child_dir)
{
@@ -276,7 +276,7 @@ static struct dentry *fat_get_parent(struct dentry *child_dir)
struct inode *parent_inode = NULL;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
- if (!fat_get_dotdot_entry(child_dir->d_inode, &bh, &de)) {
+ if (!fat_get_dotdot_entry(d_inode(child_dir), &bh, &de)) {
int parent_logstart = fat_get_start(sbi, de);
parent_inode = fat_dget(sb, parent_logstart);
if (!parent_inode && sbi->options.nfs == FAT_NFS_NOSTALE_RO)
diff --git a/fs/file.c b/fs/file.c
index ee738ea028fa..93c5f89c248b 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -638,8 +638,7 @@ static struct file *__fget(unsigned int fd, fmode_t mask)
file = fcheck_files(files, fd);
if (file) {
/* File object ref couldn't be taken */
- if ((file->f_mode & mask) ||
- !atomic_long_inc_not_zero(&file->f_count))
+ if ((file->f_mode & mask) || !get_file_rcu(file))
file = NULL;
}
rcu_read_unlock();
diff --git a/fs/file_table.c b/fs/file_table.c
index 3f85411b03ce..294174dcc226 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -168,10 +168,10 @@ struct file *alloc_file(struct path *path, fmode_t mode,
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
if ((mode & FMODE_READ) &&
- likely(fop->read || fop->aio_read || fop->read_iter))
+ likely(fop->read || fop->read_iter))
mode |= FMODE_CAN_READ;
if ((mode & FMODE_WRITE) &&
- likely(fop->write || fop->aio_write || fop->write_iter))
+ likely(fop->write || fop->write_iter))
mode |= FMODE_CAN_WRITE;
file->f_mode = mode;
file->f_op = fop;
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index c36aeaf92e41..8b9229e2ca5c 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -76,7 +76,7 @@ const struct address_space_operations vxfs_immed_aops = {
static void *
vxfs_immed_follow_link(struct dentry *dp, struct nameidata *np)
{
- struct vxfs_inode_info *vip = VXFS_INO(dp->d_inode);
+ struct vxfs_inode_info *vip = VXFS_INO(d_inode(dp));
nd_set_link(np, vip->vii_immed.vi_immed);
return NULL;
}
diff --git a/fs/fs_pin.c b/fs/fs_pin.c
index b06c98796afb..611b5408f6ec 100644
--- a/fs/fs_pin.c
+++ b/fs/fs_pin.c
@@ -9,8 +9,8 @@ static DEFINE_SPINLOCK(pin_lock);
void pin_remove(struct fs_pin *pin)
{
spin_lock(&pin_lock);
- hlist_del(&pin->m_list);
- hlist_del(&pin->s_list);
+ hlist_del_init(&pin->m_list);
+ hlist_del_init(&pin->s_list);
spin_unlock(&pin_lock);
spin_lock_irq(&pin->wait.lock);
pin->done = 1;
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 205e0d5d5307..f863ac6647ac 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -244,7 +244,7 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
return 0;
parent = fuse_control_sb->s_root;
- inc_nlink(parent->d_inode);
+ inc_nlink(d_inode(parent));
sprintf(name, "%u", fc->dev);
parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500, 2,
&simple_dir_inode_operations,
@@ -283,11 +283,11 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
for (i = fc->ctl_ndents - 1; i >= 0; i--) {
struct dentry *dentry = fc->ctl_dentry[i];
- dentry->d_inode->i_private = NULL;
+ d_inode(dentry)->i_private = NULL;
d_drop(dentry);
dput(dentry);
}
- drop_nlink(fuse_control_sb->s_root->d_inode);
+ drop_nlink(d_inode(fuse_control_sb->s_root));
}
static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 28d0c7abba1c..e5bbf748b698 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -38,7 +38,6 @@
#include <linux/device.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/aio.h>
#include <linux/kdev_t.h>
#include <linux/kthread.h>
#include <linux/list.h>
@@ -48,6 +47,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
+#include <linux/uio.h>
#include "fuse_i.h"
@@ -88,32 +88,23 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
* FUSE file.
*/
-static ssize_t cuse_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
+ struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
loff_t pos = 0;
- struct iovec iov = { .iov_base = buf, .iov_len = count };
- struct fuse_io_priv io = { .async = 0, .file = file };
- struct iov_iter ii;
- iov_iter_init(&ii, READ, &iov, 1, count);
- return fuse_direct_io(&io, &ii, &pos, FUSE_DIO_CUSE);
+ return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
}
-static ssize_t cuse_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
+ struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
loff_t pos = 0;
- struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
- struct fuse_io_priv io = { .async = 0, .file = file };
- struct iov_iter ii;
- iov_iter_init(&ii, WRITE, &iov, 1, count);
-
/*
* No locking or generic_write_checks(), the server is
* responsible for locking and sanity checks.
*/
- return fuse_direct_io(&io, &ii, &pos,
+ return fuse_direct_io(&io, from, &pos,
FUSE_DIO_WRITE | FUSE_DIO_CUSE);
}
@@ -186,8 +177,8 @@ static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations cuse_frontend_fops = {
.owner = THIS_MODULE,
- .read = cuse_read,
- .write = cuse_write,
+ .read_iter = cuse_read_iter,
+ .write_iter = cuse_write_iter,
.open = cuse_open,
.release = cuse_release,
.unlocked_ioctl = cuse_file_ioctl,
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 39706c57ad3c..c8b68ab2e574 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,7 +19,6 @@
#include <linux/pipe_fs_i.h>
#include <linux/swap.h>
#include <linux/splice.h>
-#include <linux/aio.h>
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -711,28 +710,26 @@ struct fuse_copy_state {
struct fuse_conn *fc;
int write;
struct fuse_req *req;
- const struct iovec *iov;
+ struct iov_iter *iter;
struct pipe_buffer *pipebufs;
struct pipe_buffer *currbuf;
struct pipe_inode_info *pipe;
unsigned long nr_segs;
- unsigned long seglen;
- unsigned long addr;
struct page *pg;
unsigned len;
unsigned offset;
unsigned move_pages:1;
};
-static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
+static void fuse_copy_init(struct fuse_copy_state *cs,
+ struct fuse_conn *fc,
int write,
- const struct iovec *iov, unsigned long nr_segs)
+ struct iov_iter *iter)
{
memset(cs, 0, sizeof(*cs));
cs->fc = fc;
cs->write = write;
- cs->iov = iov;
- cs->nr_segs = nr_segs;
+ cs->iter = iter;
}
/* Unmap and put previous page of userspace buffer */
@@ -800,22 +797,16 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
cs->nr_segs++;
}
} else {
- if (!cs->seglen) {
- BUG_ON(!cs->nr_segs);
- cs->seglen = cs->iov[0].iov_len;
- cs->addr = (unsigned long) cs->iov[0].iov_base;
- cs->iov++;
- cs->nr_segs--;
- }
- err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
+ size_t off;
+ err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
if (err < 0)
return err;
- BUG_ON(err != 1);
+ BUG_ON(!err);
+ cs->len = err;
+ cs->offset = off;
cs->pg = page;
- cs->offset = cs->addr % PAGE_SIZE;
- cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
- cs->seglen -= cs->len;
- cs->addr += cs->len;
+ cs->offset = off;
+ iov_iter_advance(cs->iter, err);
}
return lock_request(cs->fc, cs->req);
@@ -1364,8 +1355,7 @@ static int fuse_dev_open(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
{
struct fuse_copy_state cs;
struct file *file = iocb->ki_filp;
@@ -1373,9 +1363,12 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
if (!fc)
return -EPERM;
- fuse_copy_init(&cs, fc, 1, iov, nr_segs);
+ if (!iter_is_iovec(to))
+ return -EINVAL;
- return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
+ fuse_copy_init(&cs, fc, 1, to);
+
+ return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
}
static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
@@ -1395,7 +1388,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
if (!bufs)
return -ENOMEM;
- fuse_copy_init(&cs, fc, 1, NULL, 0);
+ fuse_copy_init(&cs, fc, 1, NULL);
cs.pipebufs = bufs;
cs.pipe = pipe;
ret = fuse_dev_do_read(fc, in, &cs, len);
@@ -1971,17 +1964,19 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
return err;
}
-static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
{
struct fuse_copy_state cs;
struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
if (!fc)
return -EPERM;
- fuse_copy_init(&cs, fc, 0, iov, nr_segs);
+ if (!iter_is_iovec(from))
+ return -EINVAL;
+
+ fuse_copy_init(&cs, fc, 0, from);
- return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
+ return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
}
static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
@@ -2044,8 +2039,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
}
pipe_unlock(pipe);
- fuse_copy_init(&cs, fc, 0, NULL, nbuf);
+ fuse_copy_init(&cs, fc, 0, NULL);
cs.pipebufs = bufs;
+ cs.nr_segs = nbuf;
cs.pipe = pipe;
if (flags & SPLICE_F_MOVE)
@@ -2233,11 +2229,9 @@ const struct file_operations fuse_dev_operations = {
.owner = THIS_MODULE,
.open = fuse_dev_open,
.llseek = no_llseek,
- .read = do_sync_read,
- .aio_read = fuse_dev_read,
+ .read_iter = fuse_dev_read,
.splice_read = fuse_dev_splice_read,
- .write = do_sync_write,
- .aio_write = fuse_dev_write,
+ .write_iter = fuse_dev_write,
.splice_write = fuse_dev_splice_write,
.poll = fuse_dev_poll,
.release = fuse_dev_release,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 1545b711ddcf..0572bca49f15 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -192,7 +192,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
struct fuse_inode *fi;
int ret;
- inode = ACCESS_ONCE(entry->d_inode);
+ inode = d_inode_rcu(entry);
if (inode && is_bad_inode(inode))
goto invalid;
else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
@@ -220,7 +220,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
attr_version = fuse_get_attr_version(fc);
parent = dget_parent(entry);
- fuse_lookup_init(fc, &args, get_node_id(parent->d_inode),
+ fuse_lookup_init(fc, &args, get_node_id(d_inode(parent)),
&entry->d_name, &outarg);
ret = fuse_simple_request(fc, &args);
dput(parent);
@@ -254,7 +254,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
return -ECHILD;
} else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
parent = dget_parent(entry);
- fuse_advise_use_readdirplus(parent->d_inode);
+ fuse_advise_use_readdirplus(d_inode(parent));
dput(parent);
}
}
@@ -487,7 +487,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
entry = res;
}
- if (!(flags & O_CREAT) || entry->d_inode)
+ if (!(flags & O_CREAT) || d_really_is_positive(entry))
goto no_open;
/* Only creates */
@@ -653,7 +653,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
args.in.args[0].value = entry->d_name.name;
err = fuse_simple_request(fc, &args);
if (!err) {
- struct inode *inode = entry->d_inode;
+ struct inode *inode = d_inode(entry);
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
@@ -689,7 +689,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
args.in.args[0].value = entry->d_name.name;
err = fuse_simple_request(fc, &args);
if (!err) {
- clear_nlink(entry->d_inode);
+ clear_nlink(d_inode(entry));
fuse_invalidate_attr(dir);
fuse_invalidate_entry_cache(entry);
} else if (err == -EINTR)
@@ -721,12 +721,12 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
err = fuse_simple_request(fc, &args);
if (!err) {
/* ctime changes */
- fuse_invalidate_attr(oldent->d_inode);
- fuse_update_ctime(oldent->d_inode);
+ fuse_invalidate_attr(d_inode(oldent));
+ fuse_update_ctime(d_inode(oldent));
if (flags & RENAME_EXCHANGE) {
- fuse_invalidate_attr(newent->d_inode);
- fuse_update_ctime(newent->d_inode);
+ fuse_invalidate_attr(d_inode(newent));
+ fuse_update_ctime(d_inode(newent));
}
fuse_invalidate_attr(olddir);
@@ -734,10 +734,10 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
fuse_invalidate_attr(newdir);
/* newent will end up negative */
- if (!(flags & RENAME_EXCHANGE) && newent->d_inode) {
- fuse_invalidate_attr(newent->d_inode);
+ if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent)) {
+ fuse_invalidate_attr(d_inode(newent));
fuse_invalidate_entry_cache(newent);
- fuse_update_ctime(newent->d_inode);
+ fuse_update_ctime(d_inode(newent));
}
} else if (err == -EINTR) {
/* If request was interrupted, DEITY only knows if the
@@ -746,7 +746,7 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
directory), then there can be inconsistency between
the dcache and the real filesystem. Tough luck. */
fuse_invalidate_entry(oldent);
- if (newent->d_inode)
+ if (d_really_is_positive(newent))
fuse_invalidate_entry(newent);
}
@@ -788,7 +788,7 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
{
int err;
struct fuse_link_in inarg;
- struct inode *inode = entry->d_inode;
+ struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
@@ -961,9 +961,9 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
fuse_invalidate_attr(parent);
fuse_invalidate_entry(entry);
- if (child_nodeid != 0 && entry->d_inode) {
- mutex_lock(&entry->d_inode->i_mutex);
- if (get_node_id(entry->d_inode) != child_nodeid) {
+ if (child_nodeid != 0 && d_really_is_positive(entry)) {
+ mutex_lock(&d_inode(entry)->i_mutex);
+ if (get_node_id(d_inode(entry)) != child_nodeid) {
err = -ENOENT;
goto badentry;
}
@@ -977,13 +977,13 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
err = -ENOTEMPTY;
goto badentry;
}
- entry->d_inode->i_flags |= S_DEAD;
+ d_inode(entry)->i_flags |= S_DEAD;
}
dont_mount(entry);
- clear_nlink(entry->d_inode);
+ clear_nlink(d_inode(entry));
err = 0;
badentry:
- mutex_unlock(&entry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(entry)->i_mutex);
if (!err)
d_delete(entry);
} else {
@@ -1169,7 +1169,7 @@ static int fuse_direntplus_link(struct file *file,
struct qstr name = QSTR_INIT(dirent->name, dirent->namelen);
struct dentry *dentry;
struct dentry *alias;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct fuse_conn *fc;
struct inode *inode;
@@ -1205,7 +1205,7 @@ static int fuse_direntplus_link(struct file *file,
name.hash = full_name_hash(name.name, name.len);
dentry = d_lookup(parent, &name);
if (dentry) {
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if (!inode) {
d_drop(dentry);
} else if (get_node_id(inode) != o->nodeid ||
@@ -1367,7 +1367,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
static char *read_link(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
char *link;
@@ -1712,7 +1712,7 @@ error:
static int fuse_setattr(struct dentry *entry, struct iattr *attr)
{
- struct inode *inode = entry->d_inode;
+ struct inode *inode = d_inode(entry);
if (!fuse_allow_current_process(get_fuse_conn(inode)))
return -EACCES;
@@ -1726,7 +1726,7 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
struct kstat *stat)
{
- struct inode *inode = entry->d_inode;
+ struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
if (!fuse_allow_current_process(fc))
@@ -1738,7 +1738,7 @@ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
static int fuse_setxattr(struct dentry *entry, const char *name,
const void *value, size_t size, int flags)
{
- struct inode *inode = entry->d_inode;
+ struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
struct fuse_setxattr_in inarg;
@@ -1774,7 +1774,7 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
void *value, size_t size)
{
- struct inode *inode = entry->d_inode;
+ struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
struct fuse_getxattr_in inarg;
@@ -1815,7 +1815,7 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
{
- struct inode *inode = entry->d_inode;
+ struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
struct fuse_getxattr_in inarg;
@@ -1857,7 +1857,7 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
static int fuse_removexattr(struct dentry *entry, const char *name)
{
- struct inode *inode = entry->d_inode;
+ struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
int err;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c01ec3bdcfd8..5ef05b5c4cff 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -15,8 +15,8 @@
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/swap.h>
-#include <linux/aio.h>
#include <linux/falloc.h>
+#include <linux/uio.h>
static const struct file_operations fuse_direct_io_file_operations;
@@ -528,6 +528,17 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
}
}
+static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
+{
+ if (io->err)
+ return io->err;
+
+ if (io->bytes >= 0 && io->write)
+ return -EIO;
+
+ return io->bytes < 0 ? io->size : io->bytes;
+}
+
/**
* In case of short read, the caller sets 'pos' to the position of
* actual end of fuse request in IO request. Otherwise, if bytes_requested
@@ -546,6 +557,7 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
*/
static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
{
+ bool is_sync = is_sync_kiocb(io->iocb);
int left;
spin_lock(&io->lock);
@@ -555,30 +567,24 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
io->bytes = pos;
left = --io->reqs;
+ if (!left && is_sync)
+ complete(io->done);
spin_unlock(&io->lock);
- if (!left) {
- long res;
+ if (!left && !is_sync) {
+ ssize_t res = fuse_get_res_by_io(io);
- if (io->err)
- res = io->err;
- else if (io->bytes >= 0 && io->write)
- res = -EIO;
- else {
- res = io->bytes < 0 ? io->size : io->bytes;
-
- if (!is_sync_kiocb(io->iocb)) {
- struct inode *inode = file_inode(io->iocb->ki_filp);
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_inode *fi = get_fuse_inode(inode);
+ if (res >= 0) {
+ struct inode *inode = file_inode(io->iocb->ki_filp);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
- spin_unlock(&fc->lock);
- }
+ spin_lock(&fc->lock);
+ fi->attr_version = ++fc->attr_version;
+ spin_unlock(&fc->lock);
}
- aio_complete(io->iocb, res, 0);
+ io->iocb->ki_complete(io->iocb, res, 0);
kfree(io);
}
}
@@ -1139,13 +1145,11 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
- size_t count = iov_iter_count(from);
ssize_t written = 0;
ssize_t written_buffered = 0;
struct inode *inode = mapping->host;
ssize_t err;
loff_t endbyte = 0;
- loff_t pos = iocb->ki_pos;
if (get_fuse_conn(inode)->writeback_cache) {
/* Update size (EOF optimization) and mode (SUID clearing) */
@@ -1161,14 +1165,10 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
- err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
- if (err)
+ err = generic_write_checks(iocb, from);
+ if (err <= 0)
goto out;
- if (count == 0)
- goto out;
-
- iov_iter_truncate(from, count);
err = file_remove_suid(file);
if (err)
goto out;
@@ -1177,7 +1177,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (err)
goto out;
- if (file->f_flags & O_DIRECT) {
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ loff_t pos = iocb->ki_pos;
written = generic_file_direct_write(iocb, from, pos);
if (written < 0 || !iov_iter_count(from))
goto out;
@@ -1203,9 +1204,9 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
written += written_buffered;
iocb->ki_pos = pos + written_buffered;
} else {
- written = fuse_perform_write(file, mapping, from, pos);
+ written = fuse_perform_write(file, mapping, from, iocb->ki_pos);
if (written >= 0)
- iocb->ki_pos = pos + written;
+ iocb->ki_pos += written;
}
out:
current->backing_dev_info = NULL;
@@ -1395,55 +1396,30 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
return res;
}
-static ssize_t fuse_direct_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct fuse_io_priv io = { .async = 0, .file = file };
- struct iovec iov = { .iov_base = buf, .iov_len = count };
- struct iov_iter ii;
- iov_iter_init(&ii, READ, &iov, 1, count);
- return __fuse_direct_read(&io, &ii, ppos);
-}
-
-static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
- struct iov_iter *iter,
- loff_t *ppos)
+static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct file *file = io->file;
- struct inode *inode = file_inode(file);
- size_t count = iov_iter_count(iter);
- ssize_t res;
-
-
- res = generic_write_checks(file, ppos, &count, 0);
- if (!res) {
- iov_iter_truncate(iter, count);
- res = fuse_direct_io(io, iter, ppos, FUSE_DIO_WRITE);
- }
-
- fuse_invalidate_attr(inode);
-
- return res;
+ struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
+ return __fuse_direct_read(&io, to, &iocb->ki_pos);
}
-static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- ssize_t res;
struct fuse_io_priv io = { .async = 0, .file = file };
- struct iov_iter ii;
- iov_iter_init(&ii, WRITE, &iov, 1, count);
+ ssize_t res;
if (is_bad_inode(inode))
return -EIO;
/* Don't allow parallel writes to the same file */
mutex_lock(&inode->i_mutex);
- res = __fuse_direct_write(&io, &ii, ppos);
+ res = generic_write_checks(iocb, from);
if (res > 0)
- fuse_write_update_size(inode, *ppos);
+ res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
+ fuse_invalidate_attr(inode);
+ if (res > 0)
+ fuse_write_update_size(inode, iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
return res;
@@ -2798,9 +2774,9 @@ static inline loff_t fuse_round_up(loff_t off)
}
static ssize_t
-fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
- loff_t offset)
+fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
{
+ DECLARE_COMPLETION_ONSTACK(wait);
ssize_t ret = 0;
struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data;
@@ -2815,15 +2791,15 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
inode = file->f_mapping->host;
i_size = i_size_read(inode);
- if ((rw == READ) && (offset > i_size))
+ if ((iov_iter_rw(iter) == READ) && (offset > i_size))
return 0;
/* optimization for short read */
- if (async_dio && rw != WRITE && offset + count > i_size) {
+ if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
if (offset >= i_size)
return 0;
- count = min_t(loff_t, count, fuse_round_up(i_size - offset));
- iov_iter_truncate(iter, count);
+ iov_iter_truncate(iter, fuse_round_up(i_size - offset));
+ count = iov_iter_count(iter);
}
io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
@@ -2834,7 +2810,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
io->bytes = -1;
io->size = 0;
io->offset = offset;
- io->write = (rw == WRITE);
+ io->write = (iov_iter_rw(iter) == WRITE);
io->err = 0;
io->file = file;
/*
@@ -2849,13 +2825,19 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
* to wait on real async I/O requests, so we must submit this request
* synchronously.
*/
- if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
+ if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
+ iov_iter_rw(iter) == WRITE)
io->async = false;
- if (rw == WRITE)
- ret = __fuse_direct_write(io, iter, &pos);
- else
+ if (io->async && is_sync_kiocb(iocb))
+ io->done = &wait;
+
+ if (iov_iter_rw(iter) == WRITE) {
+ ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
+ fuse_invalidate_attr(inode);
+ } else {
ret = __fuse_direct_read(io, iter, &pos);
+ }
if (io->async) {
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
@@ -2864,12 +2846,13 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
if (!is_sync_kiocb(iocb))
return -EIOCBQUEUED;
- ret = wait_on_sync_kiocb(iocb);
- } else {
- kfree(io);
+ wait_for_completion(&wait);
+ ret = fuse_get_res_by_io(io);
}
- if (rw == WRITE) {
+ kfree(io);
+
+ if (iov_iter_rw(iter) == WRITE) {
if (ret > 0)
fuse_write_update_size(inode, pos);
else if (ret < 0 && offset + count > i_size)
@@ -2957,9 +2940,7 @@ out:
static const struct file_operations fuse_file_operations = {
.llseek = fuse_file_llseek,
- .read = new_sync_read,
.read_iter = fuse_file_read_iter,
- .write = new_sync_write,
.write_iter = fuse_file_write_iter,
.mmap = fuse_file_mmap,
.open = fuse_open,
@@ -2977,8 +2958,8 @@ static const struct file_operations fuse_file_operations = {
static const struct file_operations fuse_direct_io_file_operations = {
.llseek = fuse_file_llseek,
- .read = fuse_direct_read,
- .write = fuse_direct_write,
+ .read_iter = fuse_direct_read_iter,
+ .write_iter = fuse_direct_write_iter,
.mmap = fuse_direct_mmap,
.open = fuse_open,
.flush = fuse_flush,
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 1cdfb07c1376..7354dc142a50 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -263,6 +263,7 @@ struct fuse_io_priv {
int err;
struct kiocb *iocb;
struct file *file;
+ struct completion *done;
};
/**
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e8799c11424b..082ac1c97f39 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -421,7 +421,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
memset(&outarg, 0, sizeof(outarg));
args.in.numargs = 0;
args.in.h.opcode = FUSE_STATFS;
- args.in.h.nodeid = get_node_id(dentry->d_inode);
+ args.in.h.nodeid = get_node_id(d_inode(dentry));
args.out.numargs = 1;
args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
@@ -740,7 +740,7 @@ static struct dentry *fuse_fh_to_parent(struct super_block *sb,
static struct dentry *fuse_get_parent(struct dentry *child)
{
- struct inode *child_inode = child->d_inode;
+ struct inode *child_inode = d_inode(child);
struct fuse_conn *fc = get_fuse_conn(child_inode);
struct inode *inode;
struct dentry *parent;
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 7b3143064af1..1be3b061c05c 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -110,11 +110,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS);
if (error)
goto out;
-
- if (acl)
- set_cached_acl(inode, type, acl);
- else
- forget_cached_acl(inode, type);
+ set_cached_acl(inode, type, acl);
out:
kfree(data);
return error;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 4ad4f94edebe..5551fea0afd7 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -20,7 +20,7 @@
#include <linux/swap.h>
#include <linux/gfs2_ondisk.h>
#include <linux/backing-dev.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include <trace/events/writeback.h>
#include "gfs2.h"
@@ -671,12 +671,12 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
if (alloc_required) {
struct gfs2_alloc_parms ap = { .aflags = 0, };
- error = gfs2_quota_lock_check(ip);
+ requested = data_blocks + ind_blocks;
+ ap.target = requested;
+ error = gfs2_quota_lock_check(ip, &ap);
if (error)
goto out_unlock;
- requested = data_blocks + ind_blocks;
- ap.target = requested;
error = gfs2_inplace_reserve(ip, &ap);
if (error)
goto out_qunlock;
@@ -1016,13 +1016,12 @@ out:
/**
* gfs2_ok_for_dio - check that dio is valid on this file
* @ip: The inode
- * @rw: READ or WRITE
* @offset: The offset at which we are reading or writing
*
* Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
* 1 (to accept the i/o request)
*/
-static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
+static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
{
/*
* Should we return an error here? I can't see that O_DIRECT for
@@ -1039,8 +1038,8 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
-static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -1061,7 +1060,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
rv = gfs2_glock_nq(&gh);
if (rv)
return rv;
- rv = gfs2_ok_for_dio(ip, rw, offset);
+ rv = gfs2_ok_for_dio(ip, offset);
if (rv != 1)
goto out; /* dio not valid, fall back to buffered i/o */
@@ -1091,13 +1090,12 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
rv = filemap_write_and_wait_range(mapping, lstart, end);
if (rv)
goto out;
- if (rw == WRITE)
+ if (iov_iter_rw(iter) == WRITE)
truncate_inode_pages_range(mapping, lstart, end);
}
- rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
- iter, offset,
- gfs2_get_block_direct, NULL, NULL, 0);
+ rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+ offset, gfs2_get_block_direct, NULL, NULL, 0);
out:
gfs2_glock_dq(&gh);
gfs2_holder_uninit(&gh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index f0b945ab853e..61296ecbd0e2 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1224,7 +1224,7 @@ static int do_grow(struct inode *inode, u64 size)
if (gfs2_is_stuffed(ip) &&
(size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
- error = gfs2_quota_lock_check(ip);
+ error = gfs2_quota_lock_check(ip, &ap);
if (error)
return error;
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 589f4ea9381c..30822b148f3e 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -48,9 +48,9 @@ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
return -ECHILD;
parent = dget_parent(dentry);
- sdp = GFS2_SB(parent->d_inode);
- dip = GFS2_I(parent->d_inode);
- inode = dentry->d_inode;
+ sdp = GFS2_SB(d_inode(parent));
+ dip = GFS2_I(d_inode(parent));
+ inode = d_inode(dentry);
if (inode) {
if (is_bad_inode(inode))
@@ -68,7 +68,7 @@ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
goto fail;
}
- error = gfs2_dir_check(parent->d_inode, &dentry->d_name, ip);
+ error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip);
switch (error) {
case 0:
if (!inode)
@@ -113,10 +113,10 @@ static int gfs2_dentry_delete(const struct dentry *dentry)
{
struct gfs2_inode *ginode;
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
return 0;
- ginode = GFS2_I(dentry->d_inode);
+ ginode = GFS2_I(d_inode(dentry));
if (!ginode->i_iopen_gh.gh_gl)
return 0;
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index c41d255b6a7b..5d15e9498b48 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -49,7 +49,7 @@ static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF);
*len = GFS2_SMALL_FH_SIZE;
- if (!parent || inode == sb->s_root->d_inode)
+ if (!parent || inode == d_inode(sb->s_root))
return *len;
ip = GFS2_I(parent);
@@ -88,8 +88,8 @@ static int get_name_filldir(struct dir_context *ctx, const char *name,
static int gfs2_get_name(struct dentry *parent, char *name,
struct dentry *child)
{
- struct inode *dir = parent->d_inode;
- struct inode *inode = child->d_inode;
+ struct inode *dir = d_inode(parent);
+ struct inode *inode = d_inode(child);
struct gfs2_inode *dip, *ip;
struct get_name_filldir gnfd = {
.ctx.actor = get_name_filldir,
@@ -128,7 +128,7 @@ static int gfs2_get_name(struct dentry *parent, char *name,
static struct dentry *gfs2_get_parent(struct dentry *child)
{
- return d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
+ return d_obtain_alias(gfs2_lookupi(d_inode(child), &gfs2_qdotdot, 1));
}
static struct dentry *gfs2_get_dentry(struct super_block *sb,
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 3e32bb8e2d7e..31892871ea87 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -25,7 +25,6 @@
#include <asm/uaccess.h>
#include <linux/dlm.h>
#include <linux/dlm_plock.h>
-#include <linux/aio.h>
#include <linux/delay.h>
#include "gfs2.h"
@@ -429,11 +428,11 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto out_unlock;
- ret = gfs2_quota_lock_check(ip);
- if (ret)
- goto out_unlock;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
ap.target = data_blocks + ind_blocks;
+ ret = gfs2_quota_lock_check(ip, &ap);
+ if (ret)
+ goto out_unlock;
ret = gfs2_inplace_reserve(ip, &ap);
if (ret)
goto out_quota_unlock;
@@ -710,7 +709,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
- if (file->f_flags & O_APPEND) {
+ if (iocb->ki_flags & IOCB_APPEND) {
struct gfs2_holder gh;
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
@@ -765,22 +764,30 @@ out:
brelse(dibh);
return error;
}
-
-static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
- unsigned int *data_blocks, unsigned int *ind_blocks)
+/**
+ * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
+ * blocks, determine how many bytes can be written.
+ * @ip: The inode in question.
+ * @len: Max cap of bytes. What we return in *len must be <= this.
+ * @data_blocks: Compute and return the number of data blocks needed
+ * @ind_blocks: Compute and return the number of indirect blocks needed
+ * @max_blocks: The total blocks available to work with.
+ *
+ * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
+ */
+static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
+ unsigned int *data_blocks, unsigned int *ind_blocks,
+ unsigned int max_blocks)
{
+ loff_t max = *len;
const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- unsigned int max_blocks = ip->i_rgd->rd_free_clone;
unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
for (tmp = max_data; tmp > sdp->sd_diptrs;) {
tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
max_data -= tmp;
}
- /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
- so it might end up with fewer data blocks */
- if (max_data <= *data_blocks)
- return;
+
*data_blocks = max_data;
*ind_blocks = max_blocks - max_data;
*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
@@ -797,7 +804,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
- loff_t bytes, max_bytes;
+ loff_t bytes, max_bytes, max_blks = UINT_MAX;
int error;
const loff_t pos = offset;
const loff_t count = len;
@@ -819,6 +826,9 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
gfs2_size_hint(file, offset, len);
+ gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
+ ap.min_target = data_blocks + ind_blocks;
+
while (len > 0) {
if (len < bytes)
bytes = len;
@@ -827,27 +837,41 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
offset += bytes;
continue;
}
- error = gfs2_quota_lock_check(ip);
+
+ /* We need to determine how many bytes we can actually
+ * fallocate without exceeding quota or going over the
+ * end of the fs. We start off optimistically by assuming
+ * we can write max_bytes */
+ max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
+
+ /* Since max_bytes is most likely a theoretical max, we
+ * calculate a more realistic 'bytes' to serve as a good
+ * starting point for the number of bytes we may be able
+ * to write */
+ gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
+ ap.target = data_blocks + ind_blocks;
+
+ error = gfs2_quota_lock_check(ip, &ap);
if (error)
return error;
-retry:
- gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
+ /* ap.allowed tells us how many blocks quota will allow
+ * us to write. Check if this reduces max_blks */
+ if (ap.allowed && ap.allowed < max_blks)
+ max_blks = ap.allowed;
- ap.target = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip, &ap);
- if (error) {
- if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
- bytes >>= 1;
- bytes &= bsize_mask;
- if (bytes == 0)
- bytes = sdp->sd_sb.sb_bsize;
- goto retry;
- }
+ if (error)
goto out_qunlock;
- }
- max_bytes = bytes;
- calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
- &max_bytes, &data_blocks, &ind_blocks);
+
+ /* check if the selected rgrp limits our max_blks further */
+ if (ap.allowed && ap.allowed < max_blks)
+ max_blks = ap.allowed;
+
+ /* Almost done. Calculate bytes that can be written using
+ * max_blks. We also recompute max_bytes, data_blocks and
+ * ind_blocks */
+ calc_max_reserv(ip, &max_bytes, &data_blocks,
+ &ind_blocks, max_blks);
rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
@@ -931,6 +955,22 @@ out_uninit:
return ret;
}
+static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos,
+ size_t len, unsigned int flags)
+{
+ int error;
+ struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
+
+ error = gfs2_rs_alloc(ip);
+ if (error)
+ return (ssize_t)error;
+
+ gfs2_size_hint(out, *ppos, len);
+
+ return iter_file_splice_write(pipe, out, ppos, len, flags);
+}
+
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
/**
@@ -1065,9 +1105,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
const struct file_operations gfs2_file_fops = {
.llseek = gfs2_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
@@ -1077,7 +1115,7 @@ const struct file_operations gfs2_file_fops = {
.lock = gfs2_lock,
.flock = gfs2_flock,
.splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
+ .splice_write = gfs2_file_splice_write,
.setlease = simple_nosetlease,
.fallocate = gfs2_fallocate,
};
@@ -1097,9 +1135,7 @@ const struct file_operations gfs2_dir_fops = {
const struct file_operations gfs2_file_fops_nolock = {
.llseek = gfs2_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
@@ -1107,7 +1143,7 @@ const struct file_operations gfs2_file_fops_nolock = {
.release = gfs2_release,
.fsync = gfs2_fsync,
.splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
+ .splice_write = gfs2_file_splice_write,
.setlease = generic_setlease,
.fallocate = gfs2_fallocate,
};
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f42dffba056a..0fa8062f85a7 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -2047,34 +2047,41 @@ static const struct file_operations gfs2_sbstats_fops = {
int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
{
- sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
- if (!sdp->debugfs_dir)
- return -ENOMEM;
- sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
- S_IFREG | S_IRUGO,
- sdp->debugfs_dir, sdp,
- &gfs2_glocks_fops);
- if (!sdp->debugfs_dentry_glocks)
+ struct dentry *dent;
+
+ dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
+ if (IS_ERR_OR_NULL(dent))
+ goto fail;
+ sdp->debugfs_dir = dent;
+
+ dent = debugfs_create_file("glocks",
+ S_IFREG | S_IRUGO,
+ sdp->debugfs_dir, sdp,
+ &gfs2_glocks_fops);
+ if (IS_ERR_OR_NULL(dent))
goto fail;
+ sdp->debugfs_dentry_glocks = dent;
- sdp->debugfs_dentry_glstats = debugfs_create_file("glstats",
- S_IFREG | S_IRUGO,
- sdp->debugfs_dir, sdp,
- &gfs2_glstats_fops);
- if (!sdp->debugfs_dentry_glstats)
+ dent = debugfs_create_file("glstats",
+ S_IFREG | S_IRUGO,
+ sdp->debugfs_dir, sdp,
+ &gfs2_glstats_fops);
+ if (IS_ERR_OR_NULL(dent))
goto fail;
+ sdp->debugfs_dentry_glstats = dent;
- sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats",
- S_IFREG | S_IRUGO,
- sdp->debugfs_dir, sdp,
- &gfs2_sbstats_fops);
- if (!sdp->debugfs_dentry_sbstats)
+ dent = debugfs_create_file("sbstats",
+ S_IFREG | S_IRUGO,
+ sdp->debugfs_dir, sdp,
+ &gfs2_sbstats_fops);
+ if (IS_ERR_OR_NULL(dent))
goto fail;
+ sdp->debugfs_dentry_sbstats = dent;
return 0;
fail:
gfs2_delete_debugfs_file(sdp);
- return -ENOMEM;
+ return dent ? PTR_ERR(dent) : -ENOMEM;
}
void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
@@ -2100,6 +2107,8 @@ void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
int gfs2_register_debugfs(void)
{
gfs2_root = debugfs_create_dir("gfs2", NULL);
+ if (IS_ERR(gfs2_root))
+ return PTR_ERR(gfs2_root);
return gfs2_root ? 0 : -ENOMEM;
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 7a2dbbc0d634..58b75abf6ab2 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -301,8 +301,10 @@ struct gfs2_blkreserv {
* to the allocation code.
*/
struct gfs2_alloc_parms {
- u32 target;
+ u64 target;
+ u32 min_target;
u32 aflags;
+ u64 allowed;
};
enum {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 73c72253faac..1b3ca7a2e3fc 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -295,7 +295,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
(name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
- dir == sb->s_root->d_inode)) {
+ dir == d_inode(sb->s_root))) {
igrab(dir);
return dir;
}
@@ -382,7 +382,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks)
struct gfs2_alloc_parms ap = { .target = *dblocks, .aflags = flags, };
int error;
- error = gfs2_quota_lock_check(ip);
+ error = gfs2_quota_lock_check(ip, &ap);
if (error)
goto out;
@@ -525,7 +525,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
int error;
if (da->nr_blocks) {
- error = gfs2_quota_lock_check(dip);
+ error = gfs2_quota_lock_check(dip, &ap);
if (error)
goto fail_quota_locks;
@@ -687,7 +687,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
}
gfs2_set_inode_flags(inode);
- if ((GFS2_I(sdp->sd_root_dir->d_inode) == dip) ||
+ if ((GFS2_I(d_inode(sdp->sd_root_dir)) == dip) ||
(dip->i_diskflags & GFS2_DIF_TOPDIR))
aflags |= GFS2_AF_ORLOV;
@@ -888,7 +888,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
{
struct gfs2_inode *dip = GFS2_I(dir);
struct gfs2_sbd *sdp = GFS2_SB(dir);
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder ghs[2];
struct buffer_head *dibh;
@@ -953,7 +953,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (da.nr_blocks) {
struct gfs2_alloc_parms ap = { .target = da.nr_blocks, };
- error = gfs2_quota_lock_check(dip);
+ error = gfs2_quota_lock_check(dip, &ap);
if (error)
goto out_gunlock;
@@ -1055,7 +1055,7 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
static int gfs2_unlink_inode(struct gfs2_inode *dip,
const struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
int error;
@@ -1091,7 +1091,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
{
struct gfs2_inode *dip = GFS2_I(dir);
struct gfs2_sbd *sdp = GFS2_SB(dir);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder ghs[3];
struct gfs2_rgrpd *rgd;
@@ -1241,7 +1241,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
return PTR_ERR(d);
if (d != NULL)
dentry = d;
- if (dentry->d_inode) {
+ if (d_really_is_positive(dentry)) {
if (!(*opened & FILE_OPENED))
return finish_no_open(file, d);
dput(d);
@@ -1282,7 +1282,7 @@ static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
error = -EINVAL;
break;
}
- if (dir == sb->s_root->d_inode) {
+ if (dir == d_inode(sb->s_root)) {
error = 0;
break;
}
@@ -1321,7 +1321,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
{
struct gfs2_inode *odip = GFS2_I(odir);
struct gfs2_inode *ndip = GFS2_I(ndir);
- struct gfs2_inode *ip = GFS2_I(odentry->d_inode);
+ struct gfs2_inode *ip = GFS2_I(d_inode(odentry));
struct gfs2_inode *nip = NULL;
struct gfs2_sbd *sdp = GFS2_SB(odir);
struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
@@ -1332,8 +1332,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
unsigned int x;
int error;
- if (ndentry->d_inode) {
- nip = GFS2_I(ndentry->d_inode);
+ if (d_really_is_positive(ndentry)) {
+ nip = GFS2_I(d_inode(ndentry));
if (ip == nip)
return 0;
}
@@ -1457,7 +1457,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
/* Check out the dir to be renamed */
if (dir_rename) {
- error = gfs2_permission(odentry->d_inode, MAY_WRITE);
+ error = gfs2_permission(d_inode(odentry), MAY_WRITE);
if (error)
goto out_gunlock;
}
@@ -1470,7 +1470,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (da.nr_blocks) {
struct gfs2_alloc_parms ap = { .target = da.nr_blocks, };
- error = gfs2_quota_lock_check(ndip);
+ error = gfs2_quota_lock_check(ndip, &ap);
if (error)
goto out_gunlock;
@@ -1550,7 +1550,7 @@ out:
static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+ struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
struct gfs2_holder i_gh;
struct buffer_head *dibh;
unsigned int size;
@@ -1669,6 +1669,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
kuid_t ouid, nuid;
kgid_t ogid, ngid;
int error;
+ struct gfs2_alloc_parms ap;
ouid = inode->i_uid;
ogid = inode->i_gid;
@@ -1696,9 +1697,11 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (error)
goto out;
+ ap.target = gfs2_get_inode_blocks(&ip->i_inode);
+
if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
!gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
- error = gfs2_quota_check(ip, nuid, ngid);
+ error = gfs2_quota_check(ip, nuid, ngid, &ap);
if (error)
goto out_gunlock_q;
}
@@ -1713,9 +1716,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
!gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
- u64 blocks = gfs2_get_inode_blocks(&ip->i_inode);
- gfs2_quota_change(ip, -blocks, ouid, ogid);
- gfs2_quota_change(ip, blocks, nuid, ngid);
+ gfs2_quota_change(ip, -ap.target, ouid, ogid);
+ gfs2_quota_change(ip, ap.target, nuid, ngid);
}
out_end_trans:
@@ -1740,7 +1742,7 @@ out:
static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder i_gh;
int error;
@@ -1796,7 +1798,7 @@ out:
static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int error;
@@ -1819,7 +1821,7 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
static int gfs2_setxattr(struct dentry *dentry, const char *name,
const void *data, size_t size, int flags)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int ret;
@@ -1839,7 +1841,7 @@ static int gfs2_setxattr(struct dentry *dentry, const char *name,
static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
void *data, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int ret;
@@ -1860,7 +1862,7 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
static int gfs2_removexattr(struct dentry *dentry, const char *name)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int ret;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index efc8e254787c..35b49f44c72f 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -647,7 +647,7 @@ out_unlock:
static int init_journal(struct gfs2_sbd *sdp, int undo)
{
- struct inode *master = sdp->sd_master_dir->d_inode;
+ struct inode *master = d_inode(sdp->sd_master_dir);
struct gfs2_holder ji_gh;
struct gfs2_inode *ip;
int jindex = 1;
@@ -782,7 +782,7 @@ static struct lock_class_key gfs2_quota_imutex_key;
static int init_inodes(struct gfs2_sbd *sdp, int undo)
{
int error = 0;
- struct inode *master = sdp->sd_master_dir->d_inode;
+ struct inode *master = d_inode(sdp->sd_master_dir);
if (undo)
goto fail_qinode;
@@ -848,7 +848,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
char buf[30];
int error = 0;
struct gfs2_inode *ip;
- struct inode *master = sdp->sd_master_dir->d_inode;
+ struct inode *master = d_inode(sdp->sd_master_dir);
if (sdp->sd_args.ar_spectator)
return 0;
@@ -1357,7 +1357,7 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
return ERR_PTR(error);
}
s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
- path.dentry->d_inode->i_sb->s_bdev);
+ d_inode(path.dentry)->i_sb->s_bdev);
path_put(&path);
if (IS_ERR(s)) {
pr_warn("gfs2 mount does not exist\n");
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 3aa17d4d1cfc..e3065cb9ab08 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -923,6 +923,9 @@ restart:
if (error)
return error;
+ if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
+ force_refresh = FORCE;
+
qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
@@ -974,11 +977,8 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
sizeof(struct gfs2_quota_data *), sort_qd, NULL);
for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
- int force = NO_FORCE;
qd = ip->i_res->rs_qa_qd[x];
- if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
- force = FORCE;
- error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
+ error = do_glock(qd, NO_FORCE, &ip->i_res->rs_qa_qd_ghs[x]);
if (error)
break;
}
@@ -1094,14 +1094,33 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
return 0;
}
-int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
+/**
+ * gfs2_quota_check - check if allocating new blocks will exceed quota
+ * @ip: The inode for which this check is being performed
+ * @uid: The uid to check against
+ * @gid: The gid to check against
+ * @ap: The allocation parameters. ap->target contains the requested
+ * blocks. ap->min_target, if set, contains the minimum blks
+ * requested.
+ *
+ * Returns: 0 on success.
+ * min_req = ap->min_target ? ap->min_target : ap->target;
+ * quota must allow atleast min_req blks for success and
+ * ap->allowed is set to the number of blocks allowed
+ *
+ * -EDQUOT otherwise, quota violation. ap->allowed is set to number
+ * of blocks available.
+ */
+int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
+ struct gfs2_alloc_parms *ap)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qd;
- s64 value;
+ s64 value, warn, limit;
unsigned int x;
int error = 0;
+ ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
return 0;
@@ -1115,30 +1134,37 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
qid_eq(qd->qd_id, make_kqid_gid(gid))))
continue;
+ warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
+ limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
spin_lock(&qd_lock);
value += qd->qd_change;
spin_unlock(&qd_lock);
- if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
- print_message(qd, "exceeded");
- quota_send_warning(qd->qd_id,
- sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
-
- error = -EDQUOT;
- break;
- } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
- (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
+ if (limit > 0 && (limit - value) < ap->allowed)
+ ap->allowed = limit - value;
+ /* If we can't meet the target */
+ if (limit && limit < (value + (s64)ap->target)) {
+ /* If no min_target specified or we don't meet
+ * min_target, return -EDQUOT */
+ if (!ap->min_target || ap->min_target > ap->allowed) {
+ print_message(qd, "exceeded");
+ quota_send_warning(qd->qd_id,
+ sdp->sd_vfs->s_dev,
+ QUOTA_NL_BHARDWARN);
+ error = -EDQUOT;
+ break;
+ }
+ } else if (warn && warn < value &&
time_after_eq(jiffies, qd->qd_last_warn +
- gfs2_tune_get(sdp,
- gt_quota_warn_period) * HZ)) {
+ gfs2_tune_get(sdp, gt_quota_warn_period)
+ * HZ)) {
quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
error = print_message(qd, "warning");
qd->qd_last_warn = jiffies;
}
}
-
return error;
}
@@ -1468,32 +1494,34 @@ int gfs2_quotad(void *data)
return 0;
}
-static int gfs2_quota_get_xstate(struct super_block *sb,
- struct fs_quota_stat *fqs)
+static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
- memset(fqs, 0, sizeof(struct fs_quota_stat));
- fqs->qs_version = FS_QSTAT_VERSION;
+ memset(state, 0, sizeof(*state));
switch (sdp->sd_args.ar_quota) {
case GFS2_QUOTA_ON:
- fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
+ state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
+ state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
/*FALLTHRU*/
case GFS2_QUOTA_ACCOUNT:
- fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
+ state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
+ QCI_SYSFILE;
+ state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
+ QCI_SYSFILE;
break;
case GFS2_QUOTA_OFF:
break;
}
-
if (sdp->sd_quota_inode) {
- fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
- fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
+ state->s_state[USRQUOTA].ino =
+ GFS2_I(sdp->sd_quota_inode)->i_no_addr;
+ state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
}
- fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
- fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
- fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
+ state->s_state[USRQUOTA].nextents = 1; /* unsupported */
+ state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
+ state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
return 0;
}
@@ -1638,7 +1666,7 @@ out_put:
const struct quotactl_ops gfs2_quotactl_ops = {
.quota_sync = gfs2_quota_sync,
- .get_xstate = gfs2_quota_get_xstate,
+ .get_state = gfs2_quota_get_state,
.get_dqblk = gfs2_get_dqblk,
.set_dqblk = gfs2_set_dqblk,
};
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 55d506eb3c4a..ad04b3acae2b 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -24,7 +24,8 @@ extern void gfs2_quota_unhold(struct gfs2_inode *ip);
extern int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
extern void gfs2_quota_unlock(struct gfs2_inode *ip);
-extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
+extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
+ struct gfs2_alloc_parms *ap);
extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
kuid_t uid, kgid_t gid);
@@ -37,7 +38,8 @@ extern int gfs2_quotad(void *data);
extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
-static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
+static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
+ struct gfs2_alloc_parms *ap)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
int ret;
@@ -48,7 +50,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
return ret;
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
- ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
+ ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap);
if (ret)
gfs2_quota_unlock(ip);
return ret;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 9150207f365c..6af2396a317c 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1946,10 +1946,18 @@ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
* @ip: the inode to reserve space for
* @ap: the allocation parameters
*
- * Returns: errno
+ * We try our best to find an rgrp that has at least ap->target blocks
+ * available. After a couple of passes (loops == 2), the prospects of finding
+ * such an rgrp diminish. At this stage, we return the first rgrp that has
+ * atleast ap->min_target blocks available. Either way, we set ap->allowed to
+ * the number of blocks available in the chosen rgrp.
+ *
+ * Returns: 0 on success,
+ * -ENOMEM if a suitable rgrp can't be found
+ * errno otherwise
*/
-int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL;
@@ -2012,7 +2020,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
/* Skip unuseable resource groups */
if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
GFS2_RDF_ERROR)) ||
- (ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
+ (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb)
@@ -2027,11 +2035,13 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
goto check_rgrp;
/* If rgrp has enough free space, use it */
- if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
+ if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
+ (loops == 2 && ap->min_target &&
+ rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
ip->i_rgd = rs->rs_rbm.rgd;
+ ap->allowed = ip->i_rgd->rd_free_clone;
return 0;
}
-
check_rgrp:
/* Check for unlinked inodes which can be reclaimed */
if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index b104f4af3afd..68972ecfbb01 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -41,7 +41,8 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
#define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap);
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip,
+ struct gfs2_alloc_parms *ap);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 1666382b198d..859c6edbf81a 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1171,7 +1171,7 @@ static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *s
static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct super_block *sb = dentry->d_inode->i_sb;
+ struct super_block *sb = d_inode(dentry)->i_sb;
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_statfs_change_host sc;
int error;
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 0b81f783f787..4c096fa9e2a1 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -420,7 +420,7 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
- struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+ struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
struct gfs2_ea_request er;
struct gfs2_holder i_gh;
int error;
@@ -586,7 +586,7 @@ out:
static int gfs2_xattr_get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
- struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+ struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
struct gfs2_ea_location el;
int error;
@@ -732,7 +732,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error)
return error;
- error = gfs2_quota_lock_check(ip);
+ error = gfs2_quota_lock_check(ip, &ap);
if (error)
return error;
@@ -1230,7 +1230,7 @@ int __gfs2_xattr_set(struct inode *inode, const char *name,
static int gfs2_xattr_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
- return __gfs2_xattr_set(dentry->d_inode, name, value,
+ return __gfs2_xattr_set(d_inode(dentry), name, value,
size, flags, type);
}
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index e057ec542a6a..8d931b157bbe 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -16,7 +16,7 @@
int hfs_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
hfs_cat_rec rec;
struct hfs_cat_file *file;
@@ -59,7 +59,7 @@ out:
ssize_t hfs_getxattr(struct dentry *dentry, const char *name,
void *value, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
hfs_cat_rec rec;
struct hfs_cat_file *file;
@@ -105,7 +105,7 @@ out:
ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode))
return -EOPNOTSUPP;
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 145566851e7a..70788e03820a 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -197,7 +197,7 @@ static int hfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
inode = hfs_new_inode(dir, &dentry->d_name, mode);
if (!inode)
- return -ENOSPC;
+ return -ENOMEM;
res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
@@ -226,7 +226,7 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode);
if (!inode)
- return -ENOSPC;
+ return -ENOMEM;
res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
@@ -253,7 +253,7 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
*/
static int hfs_remove(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int res;
if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
@@ -285,18 +285,18 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
int res;
/* Unlink destination if it already exists */
- if (new_dentry->d_inode) {
+ if (d_really_is_positive(new_dentry)) {
res = hfs_remove(new_dir, new_dentry);
if (res)
return res;
}
- res = hfs_cat_move(old_dentry->d_inode->i_ino,
+ res = hfs_cat_move(d_inode(old_dentry)->i_ino,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name);
if (!res)
hfs_cat_build_key(old_dir->i_sb,
- (btree_key *)&HFS_I(old_dentry->d_inode)->cat_key,
+ (btree_key *)&HFS_I(d_inode(old_dentry))->cat_key,
new_dir->i_ino, &new_dentry->d_name);
return res;
}
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index d0929bc81782..b99ebddb10cb 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -14,7 +14,7 @@
#include <linux/pagemap.h>
#include <linux/mpage.h>
#include <linux/sched.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include "hfs_fs.h"
#include "btree.h"
@@ -124,8 +124,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
return res ? try_to_free_buffers(page) : 0;
}
-static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -133,13 +133,13 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely((rw & WRITE) && ret < 0)) {
+ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
@@ -600,7 +600,7 @@ static int hfs_file_release(struct inode *inode, struct file *file)
int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
int error;
@@ -674,9 +674,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
static const struct file_operations hfs_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = generic_file_splice_read,
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 91b91fd3a901..2875961fdc10 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -21,7 +21,7 @@ static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if(!inode)
return 1;
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index c1422d91cd36..528e38b5af7f 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -118,9 +118,7 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd,
int b, e;
int res;
- if (!rec_found)
- BUG();
-
+ BUG_ON(!rec_found);
b = 0;
e = bnode->num_recs - 1;
res = -ENOENT;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 7892e6fddb66..022974ab6e3c 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -350,10 +350,11 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
&fd.search_key->cat.name.unicode,
off + 2, len);
fd.search_key->key_len = cpu_to_be16(6 + len);
- } else
+ } else {
err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
if (unlikely(err))
goto out;
+ }
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index f0235c1640af..d0f39dcbb58e 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -81,7 +81,7 @@ again:
HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
create_date ||
entry.file.create_date ==
- HFSPLUS_I(sb->s_root->d_inode)->
+ HFSPLUS_I(d_inode(sb->s_root))->
create_date) &&
HFSPLUS_SB(sb)->hidden_dir) {
struct qstr str;
@@ -296,8 +296,8 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
struct dentry *dst_dentry)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb);
- struct inode *inode = src_dentry->d_inode;
- struct inode *src_dir = src_dentry->d_parent->d_inode;
+ struct inode *inode = d_inode(src_dentry);
+ struct inode *src_dir = d_inode(src_dentry->d_parent);
struct qstr str;
char name[32];
u32 cnid, id;
@@ -353,7 +353,7 @@ out:
static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct qstr str;
char name[32];
u32 cnid;
@@ -410,7 +410,7 @@ out:
static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int res;
if (inode->i_size != 2)
@@ -434,7 +434,7 @@ static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode;
- int res = -ENOSPC;
+ int res = -ENOMEM;
mutex_lock(&sbi->vh_mutex);
inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO);
@@ -476,7 +476,7 @@ static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode;
- int res = -ENOSPC;
+ int res = -ENOMEM;
mutex_lock(&sbi->vh_mutex);
inode = hfsplus_new_inode(dir->i_sb, mode);
@@ -529,7 +529,7 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
int res;
/* Unlink destination if it already exists */
- if (new_dentry->d_inode) {
+ if (d_really_is_positive(new_dentry)) {
if (d_is_dir(new_dentry))
res = hfsplus_rmdir(new_dir, new_dentry);
else
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 0cf786f2d046..6dd107d7421e 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -14,7 +14,7 @@
#include <linux/pagemap.h>
#include <linux/mpage.h>
#include <linux/sched.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
@@ -122,8 +122,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
return res ? try_to_free_buffers(page) : 0;
}
-static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -131,14 +131,13 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
- hfsplus_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely((rw & WRITE) && ret < 0)) {
+ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
@@ -244,7 +243,7 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
error = inode_change_ok(inode, attr);
@@ -254,6 +253,12 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
inode_dio_wait(inode);
+ if (attr->ia_size > inode->i_size) {
+ error = generic_cont_expand_simple(inode,
+ attr->ia_size);
+ if (error)
+ return error;
+ }
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
}
@@ -341,9 +346,7 @@ static const struct inode_operations hfsplus_file_inode_operations = {
static const struct file_operations hfsplus_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = generic_file_splice_read,
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index d3ff5cc317d7..0624ce4e0702 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -26,7 +26,7 @@
static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
{
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
struct hfsplus_vh *vh = sbi->s_vhdr;
struct hfsplus_vh *bvh = sbi->s_backup_vhdr;
@@ -76,7 +76,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
{
struct inode *inode = file_inode(file);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
- unsigned int flags;
+ unsigned int flags, new_fl = 0;
int err = 0;
err = mnt_want_write_file(file);
@@ -110,14 +110,12 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
}
if (flags & FS_IMMUTABLE_FL)
- inode->i_flags |= S_IMMUTABLE;
- else
- inode->i_flags &= ~S_IMMUTABLE;
+ new_fl |= S_IMMUTABLE;
if (flags & FS_APPEND_FL)
- inode->i_flags |= S_APPEND;
- else
- inode->i_flags &= ~S_APPEND;
+ new_fl |= S_APPEND;
+
+ inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND);
if (flags & FS_NODUMP_FL)
hip->userflags |= HFSPLUS_FLG_NODUMP;
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index d98094a9f476..416b1dbafe51 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -44,7 +44,7 @@ static int strcmp_xattr_acl(const char *name)
return -1;
}
-static inline int is_known_namespace(const char *name)
+static bool is_known_namespace(const char *name)
{
if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
@@ -424,6 +424,28 @@ static int copy_name(char *buffer, const char *xattr_name, int name_len)
return len;
}
+int hfsplus_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags,
+ const char *prefix, size_t prefixlen)
+{
+ char *xattr_name;
+ int res;
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
+ GFP_KERNEL);
+ if (!xattr_name)
+ return -ENOMEM;
+ strcpy(xattr_name, prefix);
+ strcpy(xattr_name + prefixlen, name);
+ res = __hfsplus_setxattr(d_inode(dentry), xattr_name, value, size,
+ flags);
+ kfree(xattr_name);
+ return res;
+}
+
static ssize_t hfsplus_getxattr_finder_info(struct inode *inode,
void *value, size_t size)
{
@@ -560,6 +582,30 @@ failed_getxattr_init:
return res;
}
+ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ void *value, size_t size,
+ const char *prefix, size_t prefixlen)
+{
+ int res;
+ char *xattr_name;
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
+ GFP_KERNEL);
+ if (!xattr_name)
+ return -ENOMEM;
+
+ strcpy(xattr_name, prefix);
+ strcpy(xattr_name + prefixlen, name);
+
+ res = __hfsplus_getxattr(d_inode(dentry), xattr_name, value, size);
+ kfree(xattr_name);
+ return res;
+
+}
+
static inline int can_list(const char *xattr_name)
{
if (!xattr_name)
@@ -574,7 +620,7 @@ static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
char *buffer, size_t size)
{
ssize_t res = 0;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
u16 entry_type;
u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
@@ -642,7 +688,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
ssize_t err;
ssize_t res = 0;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
u16 key_len = 0;
struct hfsplus_attr_key attr_key;
@@ -806,9 +852,6 @@ end_removexattr:
static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
- char *xattr_name;
- int res;
-
if (!strcmp(name, ""))
return -EINVAL;
@@ -818,24 +861,19 @@ static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
*/
if (is_known_namespace(name))
return -EOPNOTSUPP;
- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
- + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
- if (!xattr_name)
- return -ENOMEM;
- strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
- strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
- res = hfsplus_getxattr(dentry, xattr_name, buffer, size);
- kfree(xattr_name);
- return res;
+ /*
+ * osx is the namespace we use to indicate an unprefixed
+ * attribute on the filesystem (like the ones that OS X
+ * creates), so we pass the name through unmodified (after
+ * ensuring it doesn't conflict with another namespace).
+ */
+ return __hfsplus_getxattr(d_inode(dentry), name, buffer, size);
}
static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
const void *buffer, size_t size, int flags, int type)
{
- char *xattr_name;
- int res;
-
if (!strcmp(name, ""))
return -EINVAL;
@@ -845,16 +883,14 @@ static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
*/
if (is_known_namespace(name))
return -EOPNOTSUPP;
- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
- + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
- if (!xattr_name)
- return -ENOMEM;
- strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
- strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
- res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
- kfree(xattr_name);
- return res;
+ /*
+ * osx is the namespace we use to indicate an unprefixed
+ * attribute on the filesystem (like the ones that OS X
+ * creates), so we pass the name through unmodified (after
+ * ensuring it doesn't conflict with another namespace).
+ */
+ return __hfsplus_setxattr(d_inode(dentry), name, buffer, size, flags);
}
static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list,
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index 288530cf80b5..f9b0955b3d28 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -21,22 +21,16 @@ extern const struct xattr_handler *hfsplus_xattr_handlers[];
int __hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags);
-static inline int hfsplus_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
-{
- return __hfsplus_setxattr(dentry->d_inode, name, value, size, flags);
-}
+int hfsplus_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags,
+ const char *prefix, size_t prefixlen);
ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
- void *value, size_t size);
-
-static inline ssize_t hfsplus_getxattr(struct dentry *dentry,
- const char *name,
- void *value,
- size_t size)
-{
- return __hfsplus_getxattr(dentry->d_inode, name, value, size);
-}
+ void *value, size_t size);
+
+ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ void *value, size_t size,
+ const char *prefix, size_t prefixlen);
ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index 6ec5e107691f..aacff00a9ff9 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -16,43 +16,17 @@
static int hfsplus_security_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
- char *xattr_name;
- int res;
-
- if (!strcmp(name, ""))
- return -EINVAL;
-
- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
- GFP_KERNEL);
- if (!xattr_name)
- return -ENOMEM;
- strcpy(xattr_name, XATTR_SECURITY_PREFIX);
- strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name);
-
- res = hfsplus_getxattr(dentry, xattr_name, buffer, size);
- kfree(xattr_name);
- return res;
+ return hfsplus_getxattr(dentry, name, buffer, size,
+ XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN);
}
static int hfsplus_security_setxattr(struct dentry *dentry, const char *name,
const void *buffer, size_t size, int flags, int type)
{
- char *xattr_name;
- int res;
-
- if (!strcmp(name, ""))
- return -EINVAL;
-
- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
- GFP_KERNEL);
- if (!xattr_name)
- return -ENOMEM;
- strcpy(xattr_name, XATTR_SECURITY_PREFIX);
- strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name);
-
- res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
- kfree(xattr_name);
- return res;
+ return hfsplus_setxattr(dentry, name, buffer, size, flags,
+ XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN);
}
static size_t hfsplus_security_listxattr(struct dentry *dentry, char *list,
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
index 3c5f27e4746a..bcf65089b7f7 100644
--- a/fs/hfsplus/xattr_trusted.c
+++ b/fs/hfsplus/xattr_trusted.c
@@ -14,43 +14,16 @@
static int hfsplus_trusted_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
- char *xattr_name;
- int res;
-
- if (!strcmp(name, ""))
- return -EINVAL;
-
- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
- GFP_KERNEL);
- if (!xattr_name)
- return -ENOMEM;
- strcpy(xattr_name, XATTR_TRUSTED_PREFIX);
- strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name);
-
- res = hfsplus_getxattr(dentry, xattr_name, buffer, size);
- kfree(xattr_name);
- return res;
+ return hfsplus_getxattr(dentry, name, buffer, size,
+ XATTR_TRUSTED_PREFIX,
+ XATTR_TRUSTED_PREFIX_LEN);
}
static int hfsplus_trusted_setxattr(struct dentry *dentry, const char *name,
const void *buffer, size_t size, int flags, int type)
{
- char *xattr_name;
- int res;
-
- if (!strcmp(name, ""))
- return -EINVAL;
-
- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
- GFP_KERNEL);
- if (!xattr_name)
- return -ENOMEM;
- strcpy(xattr_name, XATTR_TRUSTED_PREFIX);
- strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name);
-
- res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
- kfree(xattr_name);
- return res;
+ return hfsplus_setxattr(dentry, name, buffer, size, flags,
+ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
}
static size_t hfsplus_trusted_listxattr(struct dentry *dentry, char *list,
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
index 2b625a538b64..5aa0e6dc4a1e 100644
--- a/fs/hfsplus/xattr_user.c
+++ b/fs/hfsplus/xattr_user.c
@@ -14,43 +14,16 @@
static int hfsplus_user_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
- char *xattr_name;
- int res;
- if (!strcmp(name, ""))
- return -EINVAL;
-
- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
- GFP_KERNEL);
- if (!xattr_name)
- return -ENOMEM;
- strcpy(xattr_name, XATTR_USER_PREFIX);
- strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name);
-
- res = hfsplus_getxattr(dentry, xattr_name, buffer, size);
- kfree(xattr_name);
- return res;
+ return hfsplus_getxattr(dentry, name, buffer, size,
+ XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
}
static int hfsplus_user_setxattr(struct dentry *dentry, const char *name,
const void *buffer, size_t size, int flags, int type)
{
- char *xattr_name;
- int res;
-
- if (!strcmp(name, ""))
- return -EINVAL;
-
- xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
- GFP_KERNEL);
- if (!xattr_name)
- return -ENOMEM;
- strcpy(xattr_name, XATTR_USER_PREFIX);
- strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name);
-
- res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
- kfree(xattr_name);
- return res;
+ return hfsplus_setxattr(dentry, name, buffer, size, flags,
+ XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
}
static size_t hfsplus_user_listxattr(struct dentry *dentry, char *list,
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 4fcd40d6f308..91e19f9dffe5 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -66,7 +66,8 @@ extern int stat_file(const char *path, struct hostfs_stat *p, int fd);
extern int access_file(char *path, int r, int w, int x);
extern int open_file(char *path, int r, int w, int append);
extern void *open_dir(char *path, int *err_out);
-extern char *read_dir(void *stream, unsigned long long *pos,
+extern void seek_dir(void *stream, unsigned long long pos);
+extern char *read_dir(void *stream, unsigned long long *pos_out,
unsigned long long *ino_out, int *len_out,
unsigned int *type_out);
extern void close_file(void *stream);
@@ -77,8 +78,7 @@ extern int write_file(int fd, unsigned long long *offset, const char *buf,
int len);
extern int lseek_file(int fd, long long offset, int whence);
extern int fsync_file(int fd, int datasync);
-extern int file_create(char *name, int ur, int uw, int ux, int gr,
- int gw, int gx, int or, int ow, int ox);
+extern int file_create(char *name, int mode);
extern int set_attr(const char *file, struct hostfs_iattr *attrs, int fd);
extern int make_symlink(const char *from, const char *to);
extern int unlink_file(const char *file);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index fd62cae0fdcb..07d8d8f52faf 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -24,6 +24,7 @@ struct hostfs_inode_info {
int fd;
fmode_t mode;
struct inode vfs_inode;
+ struct mutex open_mutex;
};
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
@@ -92,16 +93,22 @@ static char *__dentry_name(struct dentry *dentry, char *name)
__putname(name);
return NULL;
}
+
+ /*
+ * This function relies on the fact that dentry_path_raw() will place
+ * the path name at the end of the provided buffer.
+ */
+ BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
+
strlcpy(name, root, PATH_MAX);
if (len > p - name) {
__putname(name);
return NULL;
}
- if (p > name + len) {
- char *s = name + len;
- while ((*s++ = *p++) != '\0')
- ;
- }
+
+ if (p > name + len)
+ strcpy(name + len, p);
+
return name;
}
@@ -135,21 +142,19 @@ static char *follow_link(char *link)
int len, n;
char *name, *resolved, *end;
- len = 64;
- while (1) {
+ name = __getname();
+ if (!name) {
n = -ENOMEM;
- name = kmalloc(len, GFP_KERNEL);
- if (name == NULL)
- goto out;
-
- n = hostfs_do_readlink(link, name, len);
- if (n < len)
- break;
- len *= 2;
- kfree(name);
+ goto out_free;
}
+
+ n = hostfs_do_readlink(link, name, PATH_MAX);
if (n < 0)
goto out_free;
+ else if (n == PATH_MAX) {
+ n = -E2BIG;
+ goto out_free;
+ }
if (*name == '/')
return name;
@@ -168,13 +173,12 @@ static char *follow_link(char *link)
}
sprintf(resolved, "%s%s", link, name);
- kfree(name);
+ __putname(name);
kfree(link);
return resolved;
out_free:
- kfree(name);
- out:
+ __putname(name);
return ERR_PTR(n);
}
@@ -225,6 +229,7 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb)
hi->fd = -1;
hi->mode = 0;
inode_init_once(&hi->vfs_inode);
+ mutex_init(&hi->open_mutex);
return &hi->vfs_inode;
}
@@ -257,6 +262,9 @@ static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
if (strlen(root_path) > offset)
seq_printf(seq, ",%s", root_path + offset);
+ if (append)
+ seq_puts(seq, ",append");
+
return 0;
}
@@ -284,6 +292,7 @@ static int hostfs_readdir(struct file *file, struct dir_context *ctx)
if (dir == NULL)
return -error;
next = ctx->pos;
+ seek_dir(dir, next);
while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) {
if (!dir_emit(ctx, name, len, ino, type))
break;
@@ -293,13 +302,12 @@ static int hostfs_readdir(struct file *file, struct dir_context *ctx)
return 0;
}
-static int hostfs_file_open(struct inode *ino, struct file *file)
+static int hostfs_open(struct inode *ino, struct file *file)
{
- static DEFINE_MUTEX(open_mutex);
char *name;
- fmode_t mode = 0;
+ fmode_t mode;
int err;
- int r = 0, w = 0, fd;
+ int r, w, fd;
mode = file->f_mode & (FMODE_READ | FMODE_WRITE);
if ((mode & HOSTFS_I(ino)->mode) == mode)
@@ -308,12 +316,12 @@ static int hostfs_file_open(struct inode *ino, struct file *file)
mode |= HOSTFS_I(ino)->mode;
retry:
+ r = w = 0;
+
if (mode & FMODE_READ)
r = 1;
if (mode & FMODE_WRITE)
- w = 1;
- if (w)
- r = 1;
+ r = w = 1;
name = dentry_name(file->f_path.dentry);
if (name == NULL)
@@ -324,15 +332,16 @@ retry:
if (fd < 0)
return fd;
- mutex_lock(&open_mutex);
+ mutex_lock(&HOSTFS_I(ino)->open_mutex);
/* somebody else had handled it first? */
if ((mode & HOSTFS_I(ino)->mode) == mode) {
- mutex_unlock(&open_mutex);
+ mutex_unlock(&HOSTFS_I(ino)->open_mutex);
+ close_file(&fd);
return 0;
}
if ((mode | HOSTFS_I(ino)->mode) != mode) {
mode |= HOSTFS_I(ino)->mode;
- mutex_unlock(&open_mutex);
+ mutex_unlock(&HOSTFS_I(ino)->open_mutex);
close_file(&fd);
goto retry;
}
@@ -342,12 +351,12 @@ retry:
err = replace_file(fd, HOSTFS_I(ino)->fd);
close_file(&fd);
if (err < 0) {
- mutex_unlock(&open_mutex);
+ mutex_unlock(&HOSTFS_I(ino)->open_mutex);
return err;
}
}
HOSTFS_I(ino)->mode = mode;
- mutex_unlock(&open_mutex);
+ mutex_unlock(&HOSTFS_I(ino)->open_mutex);
return 0;
}
@@ -378,13 +387,11 @@ static int hostfs_fsync(struct file *file, loff_t start, loff_t end,
static const struct file_operations hostfs_file_fops = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.splice_read = generic_file_splice_read,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .write = new_sync_write,
.mmap = generic_file_mmap,
- .open = hostfs_file_open,
+ .open = hostfs_open,
.release = hostfs_file_release,
.fsync = hostfs_fsync,
};
@@ -393,6 +400,8 @@ static const struct file_operations hostfs_dir_fops = {
.llseek = generic_file_llseek,
.iterate = hostfs_readdir,
.read = generic_read_dir,
+ .open = hostfs_open,
+ .fsync = hostfs_fsync,
};
static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -400,7 +409,7 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
char *buffer;
- unsigned long long base;
+ loff_t base = page_offset(page);
int count = PAGE_CACHE_SIZE;
int end_index = inode->i_size >> PAGE_CACHE_SHIFT;
int err;
@@ -409,7 +418,6 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
count = inode->i_size & (PAGE_CACHE_SIZE-1);
buffer = kmap(page);
- base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT;
err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count);
if (err != count) {
@@ -434,26 +442,29 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
static int hostfs_readpage(struct file *file, struct page *page)
{
char *buffer;
- long long start;
- int err = 0;
+ loff_t start = page_offset(page);
+ int bytes_read, ret = 0;
- start = (long long) page->index << PAGE_CACHE_SHIFT;
buffer = kmap(page);
- err = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
+ bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
PAGE_CACHE_SIZE);
- if (err < 0)
+ if (bytes_read < 0) {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ ret = bytes_read;
goto out;
+ }
- memset(&buffer[err], 0, PAGE_CACHE_SIZE - err);
+ memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read);
- flush_dcache_page(page);
+ ClearPageError(page);
SetPageUptodate(page);
- if (PageError(page)) ClearPageError(page);
- err = 0;
+
out:
+ flush_dcache_page(page);
kunmap(page);
unlock_page(page);
- return err;
+ return ret;
}
static int hostfs_write_begin(struct file *file, struct address_space *mapping,
@@ -530,11 +541,13 @@ static int read_name(struct inode *ino, char *name)
init_special_inode(ino, st.mode & S_IFMT, rdev);
ino->i_op = &hostfs_iops;
break;
-
- default:
+ case S_IFREG:
ino->i_op = &hostfs_iops;
ino->i_fop = &hostfs_file_fops;
ino->i_mapping->a_ops = &hostfs_aops;
+ break;
+ default:
+ return -EIO;
}
ino->i_ino = st.ino;
@@ -568,10 +581,7 @@ static int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (name == NULL)
goto out_put;
- fd = file_create(name,
- mode & S_IRUSR, mode & S_IWUSR, mode & S_IXUSR,
- mode & S_IRGRP, mode & S_IWGRP, mode & S_IXGRP,
- mode & S_IROTH, mode & S_IWOTH, mode & S_IXOTH);
+ fd = file_create(name, mode & 0777);
if (fd < 0)
error = fd;
else
@@ -797,7 +807,7 @@ static int hostfs_permission(struct inode *ino, int desired)
static int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct hostfs_iattr attrs;
char *name;
int err;
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 9765dab95cbd..9c1e0f019880 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -97,21 +97,27 @@ void *open_dir(char *path, int *err_out)
return dir;
}
-char *read_dir(void *stream, unsigned long long *pos,
+void seek_dir(void *stream, unsigned long long pos)
+{
+ DIR *dir = stream;
+
+ seekdir(dir, pos);
+}
+
+char *read_dir(void *stream, unsigned long long *pos_out,
unsigned long long *ino_out, int *len_out,
unsigned int *type_out)
{
DIR *dir = stream;
struct dirent *ent;
- seekdir(dir, *pos);
ent = readdir(dir);
if (ent == NULL)
return NULL;
*len_out = strlen(ent->d_name);
*ino_out = ent->d_ino;
*type_out = ent->d_type;
- *pos = telldir(dir);
+ *pos_out = ent->d_off;
return ent->d_name;
}
@@ -175,21 +181,10 @@ void close_dir(void *stream)
closedir(stream);
}
-int file_create(char *name, int ur, int uw, int ux, int gr,
- int gw, int gx, int or, int ow, int ox)
+int file_create(char *name, int mode)
{
- int mode, fd;
-
- mode = 0;
- mode |= ur ? S_IRUSR : 0;
- mode |= uw ? S_IWUSR : 0;
- mode |= ux ? S_IXUSR : 0;
- mode |= gr ? S_IRGRP : 0;
- mode |= gw ? S_IWGRP : 0;
- mode |= gx ? S_IXGRP : 0;
- mode |= or ? S_IROTH : 0;
- mode |= ow ? S_IWOTH : 0;
- mode |= ox ? S_IXOTH : 0;
+ int fd;
+
fd = open64(name, O_CREAT | O_RDWR, mode);
if (fd < 0)
return -errno;
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 7f54e5f76cec..6d8cfe9b52d6 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -197,9 +197,7 @@ const struct address_space_operations hpfs_aops = {
const struct file_operations hpfs_file_ops =
{
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.release = hpfs_file_release,
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 7ce4b74234a1..933c73780813 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -257,7 +257,7 @@ void hpfs_write_inode_nolock(struct inode *i)
int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error = -EINVAL;
hpfs_lock(inode->i_sb);
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index bdbc2c3080a4..a0872f239f04 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -359,7 +359,7 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
unsigned len = dentry->d_name.len;
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
dnode_secno dno;
int r;
int rep = 0;
@@ -433,7 +433,7 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
unsigned len = dentry->d_name.len;
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
dnode_secno dno;
int n_items = 0;
int err;
@@ -522,8 +522,8 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned old_len = old_dentry->d_name.len;
const unsigned char *new_name = new_dentry->d_name.name;
unsigned new_len = new_dentry->d_name.len;
- struct inode *i = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *i = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct quad_buffer_head qbh, qbh1;
struct hpfs_dirent *dep, *nde;
struct hpfs_dirent de;
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 043ac9d77262..fa2bd5366ecf 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -153,9 +153,9 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
return ERR_PTR(-ENOENT);
parent = HPPFS_I(ino)->proc_dentry;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
proc_dentry = lookup_one_len(name->name, parent, name->len);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
if (IS_ERR(proc_dentry))
return proc_dentry;
@@ -637,25 +637,25 @@ static const struct super_operations hppfs_sbops = {
static int hppfs_readlink(struct dentry *dentry, char __user *buffer,
int buflen)
{
- struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
- return proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer,
+ struct dentry *proc_dentry = HPPFS_I(d_inode(dentry))->proc_dentry;
+ return d_inode(proc_dentry)->i_op->readlink(proc_dentry, buffer,
buflen);
}
static void *hppfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+ struct dentry *proc_dentry = HPPFS_I(d_inode(dentry))->proc_dentry;
- return proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd);
+ return d_inode(proc_dentry)->i_op->follow_link(proc_dentry, nd);
}
static void hppfs_put_link(struct dentry *dentry, struct nameidata *nd,
void *cookie)
{
- struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+ struct dentry *proc_dentry = HPPFS_I(d_inode(dentry))->proc_dentry;
- if (proc_dentry->d_inode->i_op->put_link)
- proc_dentry->d_inode->i_op->put_link(proc_dentry, nd, cookie);
+ if (d_inode(proc_dentry)->i_op->put_link)
+ d_inode(proc_dentry)->i_op->put_link(proc_dentry, nd, cookie);
}
static const struct inode_operations hppfs_dir_iops = {
@@ -670,7 +670,7 @@ static const struct inode_operations hppfs_link_iops = {
static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
{
- struct inode *proc_ino = dentry->d_inode;
+ struct inode *proc_ino = d_inode(dentry);
struct inode *inode = new_inode(sb);
if (!inode) {
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index c274aca8e8dc..87724c1d7be6 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -34,6 +34,7 @@
#include <linux/security.h>
#include <linux/magic.h>
#include <linux/migrate.h>
+#include <linux/uio.h>
#include <asm/uaccess.h>
@@ -47,9 +48,10 @@ struct hugetlbfs_config {
kuid_t uid;
kgid_t gid;
umode_t mode;
- long nr_blocks;
+ long max_hpages;
long nr_inodes;
struct hstate *hstate;
+ long min_hpages;
};
struct hugetlbfs_inode_info {
@@ -67,7 +69,7 @@ int sysctl_hugetlb_shm_group;
enum {
Opt_size, Opt_nr_inodes,
Opt_mode, Opt_uid, Opt_gid,
- Opt_pagesize,
+ Opt_pagesize, Opt_min_size,
Opt_err,
};
@@ -78,6 +80,7 @@ static const match_table_t tokens = {
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_pagesize, "pagesize=%s"},
+ {Opt_min_size, "min_size=%s"},
{Opt_err, NULL},
};
@@ -179,42 +182,33 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
#endif
-static int
+static size_t
hugetlbfs_read_actor(struct page *page, unsigned long offset,
- char __user *buf, unsigned long count,
- unsigned long size)
+ struct iov_iter *to, unsigned long size)
{
- char *kaddr;
- unsigned long left, copied = 0;
+ size_t copied = 0;
int i, chunksize;
- if (size > count)
- size = count;
-
/* Find which 4k chunk and offset with in that chunk */
i = offset >> PAGE_CACHE_SHIFT;
offset = offset & ~PAGE_CACHE_MASK;
while (size) {
+ size_t n;
chunksize = PAGE_CACHE_SIZE;
if (offset)
chunksize -= offset;
if (chunksize > size)
chunksize = size;
- kaddr = kmap(&page[i]);
- left = __copy_to_user(buf, kaddr + offset, chunksize);
- kunmap(&page[i]);
- if (left) {
- copied += (chunksize - left);
- break;
- }
+ n = copy_page_to_iter(&page[i], offset, chunksize, to);
+ copied += n;
+ if (n != chunksize)
+ return copied;
offset = 0;
size -= chunksize;
- buf += chunksize;
- copied += chunksize;
i++;
}
- return copied ? copied : -EFAULT;
+ return copied;
}
/*
@@ -222,39 +216,34 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
* data. Its *very* similar to do_generic_mapping_read(), we can't use that
* since it has PAGE_CACHE_SIZE assumptions.
*/
-static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
- size_t len, loff_t *ppos)
+static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct hstate *h = hstate_file(filp);
- struct address_space *mapping = filp->f_mapping;
+ struct file *file = iocb->ki_filp;
+ struct hstate *h = hstate_file(file);
+ struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- unsigned long index = *ppos >> huge_page_shift(h);
- unsigned long offset = *ppos & ~huge_page_mask(h);
+ unsigned long index = iocb->ki_pos >> huge_page_shift(h);
+ unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
unsigned long end_index;
loff_t isize;
ssize_t retval = 0;
- /* validate length */
- if (len == 0)
- goto out;
-
- for (;;) {
+ while (iov_iter_count(to)) {
struct page *page;
- unsigned long nr, ret;
- int ra;
+ size_t nr, copied;
/* nr is the maximum number of bytes to copy from this page */
nr = huge_page_size(h);
isize = i_size_read(inode);
if (!isize)
- goto out;
+ break;
end_index = (isize - 1) >> huge_page_shift(h);
- if (index >= end_index) {
- if (index > end_index)
- goto out;
+ if (index > end_index)
+ break;
+ if (index == end_index) {
nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
if (nr <= offset)
- goto out;
+ break;
}
nr = nr - offset;
@@ -265,39 +254,27 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
* We have a HOLE, zero out the user-buffer for the
* length of the hole or request.
*/
- ret = len < nr ? len : nr;
- if (clear_user(buf, ret))
- ra = -EFAULT;
- else
- ra = 0;
+ copied = iov_iter_zero(nr, to);
} else {
unlock_page(page);
/*
* We have the page, copy it to user space buffer.
*/
- ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
- ret = ra;
+ copied = hugetlbfs_read_actor(page, offset, to, nr);
page_cache_release(page);
}
- if (ra < 0) {
- if (retval == 0)
- retval = ra;
- goto out;
+ offset += copied;
+ retval += copied;
+ if (copied != nr && iov_iter_count(to)) {
+ if (!retval)
+ retval = -EFAULT;
+ break;
}
-
- offset += ret;
- retval += ret;
- len -= ret;
index += offset >> huge_page_shift(h);
offset &= ~huge_page_mask(h);
-
- /* short read or no more work */
- if ((ret != nr) || (len == 0))
- break;
}
-out:
- *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
+ iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
return retval;
}
@@ -319,7 +296,7 @@ static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
static void truncate_huge_page(struct page *page)
{
- cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
+ ClearPageDirty(page);
ClearPageUptodate(page);
delete_from_page_cache(page);
}
@@ -416,7 +393,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct hstate *h = hstate_inode(inode);
int error;
unsigned int ia_valid = attr->ia_valid;
@@ -610,7 +587,7 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
- struct hstate *h = hstate_inode(dentry->d_inode);
+ struct hstate *h = hstate_inode(d_inode(dentry));
buf->f_type = HUGETLBFS_MAGIC;
buf->f_bsize = huge_page_size(h);
@@ -721,7 +698,7 @@ static void init_once(void *foo)
}
const struct file_operations hugetlbfs_file_operations = {
- .read = hugetlbfs_read,
+ .read_iter = hugetlbfs_read_iter,
.mmap = hugetlbfs_file_mmap,
.fsync = noop_fsync,
.get_unmapped_area = hugetlb_get_unmapped_area,
@@ -754,14 +731,38 @@ static const struct super_operations hugetlbfs_ops = {
.show_options = generic_show_options,
};
+enum { NO_SIZE, SIZE_STD, SIZE_PERCENT };
+
+/*
+ * Convert size option passed from command line to number of huge pages
+ * in the pool specified by hstate. Size option could be in bytes
+ * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
+ */
+static long long
+hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
+ int val_type)
+{
+ if (val_type == NO_SIZE)
+ return -1;
+
+ if (val_type == SIZE_PERCENT) {
+ size_opt <<= huge_page_shift(h);
+ size_opt *= h->max_huge_pages;
+ do_div(size_opt, 100);
+ }
+
+ size_opt >>= huge_page_shift(h);
+ return size_opt;
+}
+
static int
hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
{
char *p, *rest;
substring_t args[MAX_OPT_ARGS];
int option;
- unsigned long long size = 0;
- enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
+ unsigned long long max_size_opt = 0, min_size_opt = 0;
+ int max_val_type = NO_SIZE, min_val_type = NO_SIZE;
if (!options)
return 0;
@@ -799,10 +800,10 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
/* memparse() will accept a K/M/G without a digit */
if (!isdigit(*args[0].from))
goto bad_val;
- size = memparse(args[0].from, &rest);
- setsize = SIZE_STD;
+ max_size_opt = memparse(args[0].from, &rest);
+ max_val_type = SIZE_STD;
if (*rest == '%')
- setsize = SIZE_PERCENT;
+ max_val_type = SIZE_PERCENT;
break;
}
@@ -825,6 +826,17 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
break;
}
+ case Opt_min_size: {
+ /* memparse() will accept a K/M/G without a digit */
+ if (!isdigit(*args[0].from))
+ goto bad_val;
+ min_size_opt = memparse(args[0].from, &rest);
+ min_val_type = SIZE_STD;
+ if (*rest == '%')
+ min_val_type = SIZE_PERCENT;
+ break;
+ }
+
default:
pr_err("Bad mount option: \"%s\"\n", p);
return -EINVAL;
@@ -832,15 +844,22 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
}
}
- /* Do size after hstate is set up */
- if (setsize > NO_SIZE) {
- struct hstate *h = pconfig->hstate;
- if (setsize == SIZE_PERCENT) {
- size <<= huge_page_shift(h);
- size *= h->max_huge_pages;
- do_div(size, 100);
- }
- pconfig->nr_blocks = (size >> huge_page_shift(h));
+ /*
+ * Use huge page pool size (in hstate) to convert the size
+ * options to number of huge pages. If NO_SIZE, -1 is returned.
+ */
+ pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
+ max_size_opt, max_val_type);
+ pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
+ min_size_opt, min_val_type);
+
+ /*
+ * If max_size was specified, then min_size must be smaller
+ */
+ if (max_val_type > NO_SIZE &&
+ pconfig->min_hpages > pconfig->max_hpages) {
+ pr_err("minimum size can not be greater than maximum size\n");
+ return -EINVAL;
}
return 0;
@@ -859,12 +878,13 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
save_mount_options(sb, data);
- config.nr_blocks = -1; /* No limit on size by default */
+ config.max_hpages = -1; /* No limit on size by default */
config.nr_inodes = -1; /* No limit on number of inodes by default */
config.uid = current_fsuid();
config.gid = current_fsgid();
config.mode = 0755;
config.hstate = &default_hstate;
+ config.min_hpages = -1; /* No default minimum size */
ret = hugetlbfs_parse_options(data, &config);
if (ret)
return ret;
@@ -878,8 +898,15 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
sbinfo->max_inodes = config.nr_inodes;
sbinfo->free_inodes = config.nr_inodes;
sbinfo->spool = NULL;
- if (config.nr_blocks != -1) {
- sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
+ /*
+ * Allocate and initialize subpool if maximum or minimum size is
+ * specified. Any needed reservations (for minimim size) are taken
+ * taken when the subpool is created.
+ */
+ if (config.max_hpages != -1 || config.min_hpages != -1) {
+ sbinfo->spool = hugepage_new_subpool(config.hstate,
+ config.max_hpages,
+ config.min_hpages);
if (!sbinfo->spool)
goto out_free;
}
diff --git a/fs/inode.c b/fs/inode.c
index f00b16f45507..ea37cd17b53f 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1587,7 +1587,7 @@ static int update_time(struct inode *inode, struct timespec *time, int flags)
void touch_atime(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = d_inode(path->dentry);
struct timespec now;
if (inode->i_flags & S_NOATIME)
@@ -1639,7 +1639,7 @@ EXPORT_SYMBOL(touch_atime);
*/
int should_remove_suid(struct dentry *dentry)
{
- umode_t mode = dentry->d_inode->i_mode;
+ umode_t mode = d_inode(dentry)->i_mode;
int kill = 0;
/* suid always must be killed */
@@ -1675,7 +1675,7 @@ static int __remove_suid(struct dentry *dentry, int kill)
int file_remove_suid(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int killsuid;
int killpriv;
int error = 0;
@@ -1946,20 +1946,6 @@ void inode_dio_wait(struct inode *inode)
EXPORT_SYMBOL(inode_dio_wait);
/*
- * inode_dio_done - signal finish of a direct I/O requests
- * @inode: inode the direct I/O happens on
- *
- * This is called once we've finished processing a direct I/O request,
- * and is used to wake up callers waiting for direct I/O to be quiesced.
- */
-void inode_dio_done(struct inode *inode)
-{
- if (atomic_dec_and_test(&inode->i_dio_count))
- wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
-}
-EXPORT_SYMBOL(inode_dio_done);
-
-/*
* inode_set_flags - atomically set some inode flags
*
* Note: the caller should be holding i_mutex, or else be sure that
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 12088d8de3fa..0c5f721b4e91 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -44,7 +44,7 @@ static struct dentry *isofs_export_get_parent(struct dentry *child)
{
unsigned long parent_block = 0;
unsigned long parent_offset = 0;
- struct inode *child_inode = child->d_inode;
+ struct inode *child_inode = d_inode(child);
struct iso_inode_info *e_child_inode = ISOFS_I(child_inode);
struct iso_directory_record *de = NULL;
struct buffer_head * bh = NULL;
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index b5128c6e63ad..a9079d035ae5 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -842,15 +842,23 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
{
jbd2_journal_revoke_header_t *header;
int offset, max;
+ int csum_size = 0;
+ __u32 rcount;
int record_len = 4;
header = (jbd2_journal_revoke_header_t *) bh->b_data;
offset = sizeof(jbd2_journal_revoke_header_t);
- max = be32_to_cpu(header->r_count);
+ rcount = be32_to_cpu(header->r_count);
if (!jbd2_revoke_block_csum_verify(journal, header))
return -EINVAL;
+ if (jbd2_journal_has_csum_v2or3(journal))
+ csum_size = sizeof(struct jbd2_journal_revoke_tail);
+ if (rcount > journal->j_blocksize - csum_size)
+ return -EINVAL;
+ max = rcount;
+
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
record_len = 8;
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index c6cbaef2bda1..14214da80eb8 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -577,7 +577,7 @@ static void write_one_revoke_record(journal_t *journal,
{
int csum_size = 0;
struct buffer_head *descriptor;
- int offset;
+ int sz, offset;
journal_header_t *header;
/* If we are already aborting, this all becomes a noop. We
@@ -594,9 +594,14 @@ static void write_one_revoke_record(journal_t *journal,
if (jbd2_journal_has_csum_v2or3(journal))
csum_size = sizeof(struct jbd2_journal_revoke_tail);
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+ sz = 8;
+ else
+ sz = 4;
+
/* Make sure we have a descriptor with space left for the record */
if (descriptor) {
- if (offset >= journal->j_blocksize - csum_size) {
+ if (offset + sz > journal->j_blocksize - csum_size) {
flush_descriptor(journal, descriptor, offset, write_op);
descriptor = NULL;
}
@@ -619,16 +624,13 @@ static void write_one_revoke_record(journal_t *journal,
*descriptorp = descriptor;
}
- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
* ((__be64 *)(&descriptor->b_data[offset])) =
cpu_to_be64(record->blocknr);
- offset += 8;
-
- } else {
+ else
* ((__be32 *)(&descriptor->b_data[offset])) =
cpu_to_be32(record->blocknr);
- offset += 4;
- }
+ offset += sz;
*offsetp = offset;
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 5f09370c90a8..ff2f2e6ad311 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -551,7 +551,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
int result;
int wanted;
- WARN_ON(!transaction);
if (is_handle_aborted(handle))
return -EROFS;
journal = transaction->t_journal;
@@ -627,7 +626,6 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
tid_t tid;
int need_to_start, ret;
- WARN_ON(!transaction);
/* If we've had an abort of any type, don't even think about
* actually doing the restart! */
if (is_handle_aborted(handle))
@@ -785,7 +783,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
int need_copy = 0;
unsigned long start_lock, time_lock;
- WARN_ON(!transaction);
if (is_handle_aborted(handle))
return -EROFS;
journal = transaction->t_journal;
@@ -1051,7 +1048,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
int err;
jbd_debug(5, "journal_head %p\n", jh);
- WARN_ON(!transaction);
err = -EROFS;
if (is_handle_aborted(handle))
goto out;
@@ -1266,7 +1262,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
int ret = 0;
- WARN_ON(!transaction);
if (is_handle_aborted(handle))
return -EROFS;
journal = transaction->t_journal;
@@ -1397,7 +1392,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
int err = 0;
int was_modified = 0;
- WARN_ON(!transaction);
if (is_handle_aborted(handle))
return -EROFS;
journal = transaction->t_journal;
@@ -1530,8 +1524,22 @@ int jbd2_journal_stop(handle_t *handle)
tid_t tid;
pid_t pid;
- if (!transaction)
- goto free_and_exit;
+ if (!transaction) {
+ /*
+ * Handle is already detached from the transaction so
+ * there is nothing to do other than decrease a refcount,
+ * or free the handle if refcount drops to zero
+ */
+ if (--handle->h_ref > 0) {
+ jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
+ handle->h_ref);
+ return err;
+ } else {
+ if (handle->h_rsv_handle)
+ jbd2_free_handle(handle->h_rsv_handle);
+ goto free_and_exit;
+ }
+ }
journal = transaction->t_journal;
J_ASSERT(journal_current_handle() == handle);
@@ -2373,7 +2381,6 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
transaction_t *transaction = handle->h_transaction;
journal_t *journal;
- WARN_ON(!transaction);
if (is_handle_aborted(handle))
return -EROFS;
journal = transaction->t_journal;
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index f21b6fb5e4c4..1ba5c97943b8 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -224,14 +224,14 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb);
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
- struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode);
+ struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(d_inode(dentry));
int ret;
uint32_t now = get_seconds();
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, dead_f, now);
if (dead_f->inocache)
- set_nlink(dentry->d_inode, dead_f->inocache->pino_nlink);
+ set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink);
if (!ret)
dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
return ret;
@@ -241,8 +241,8 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_inode->i_sb);
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(d_inode(old_dentry)->i_sb);
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry));
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
int ret;
uint8_t type;
@@ -256,7 +256,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
return -EPERM;
/* XXX: This is ugly */
- type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
+ type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12;
if (!type) type = DT_REG;
now = get_seconds();
@@ -264,11 +264,11 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
if (!ret) {
mutex_lock(&f->sem);
- set_nlink(old_dentry->d_inode, ++f->inocache->pino_nlink);
+ set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink);
mutex_unlock(&f->sem);
- d_instantiate(dentry, old_dentry->d_inode);
+ d_instantiate(dentry, d_inode(old_dentry));
dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
- ihold(old_dentry->d_inode);
+ ihold(d_inode(old_dentry));
}
return ret;
}
@@ -585,7 +585,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb);
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(dentry));
struct jffs2_full_dirent *fd;
int ret;
uint32_t now = get_seconds();
@@ -599,7 +599,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
dentry->d_name.len, f, now);
if (!ret) {
dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
- clear_nlink(dentry->d_inode);
+ clear_nlink(d_inode(dentry));
drop_nlink(dir_i);
}
return ret;
@@ -770,8 +770,8 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
* the VFS can't check whether the victim is empty. The filesystem
* needs to do that for itself.
*/
- if (new_dentry->d_inode) {
- victim_f = JFFS2_INODE_INFO(new_dentry->d_inode);
+ if (d_really_is_positive(new_dentry)) {
+ victim_f = JFFS2_INODE_INFO(d_inode(new_dentry));
if (d_is_dir(new_dentry)) {
struct jffs2_full_dirent *fd;
@@ -794,12 +794,12 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
/* Make a hard link */
/* XXX: This is ugly */
- type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
+ type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12;
if (!type) type = DT_REG;
now = get_seconds();
ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i),
- old_dentry->d_inode->i_ino, type,
+ d_inode(old_dentry)->i_ino, type,
new_dentry->d_name.name, new_dentry->d_name.len, now);
if (ret)
@@ -808,9 +808,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
if (victim_f) {
/* There was a victim. Kill it off nicely */
if (d_is_dir(new_dentry))
- clear_nlink(new_dentry->d_inode);
+ clear_nlink(d_inode(new_dentry));
else
- drop_nlink(new_dentry->d_inode);
+ drop_nlink(d_inode(new_dentry));
/* Don't oops if the victim was a dirent pointing to an
inode which didn't exist. */
if (victim_f->inocache) {
@@ -836,9 +836,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
if (ret) {
/* Oh shit. We really ought to make a single node which can do both atomically */
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry));
mutex_lock(&f->sem);
- inc_nlink(old_dentry->d_inode);
+ inc_nlink(d_inode(old_dentry));
if (f->inocache && !d_is_dir(old_dentry))
f->inocache->pino_nlink++;
mutex_unlock(&f->sem);
@@ -846,8 +846,8 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
__func__, ret);
/* Might as well let the VFS know */
- d_instantiate(new_dentry, old_dentry->d_inode);
- ihold(old_dentry->d_inode);
+ d_instantiate(new_dentry, d_inode(old_dentry));
+ ihold(d_inode(old_dentry));
new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
return ret;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 64989ca9ba90..f509f62e12f6 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -51,9 +51,7 @@ const struct file_operations jffs2_file_operations =
{
.llseek = generic_file_llseek,
.open = generic_file_open,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.unlocked_ioctl=jffs2_ioctl,
.mmap = generic_file_readonly_mmap,
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 601afd1afddf..fe5ea080b4ec 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -190,7 +190,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int rc;
rc = inode_change_ok(inode, iattr);
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index aca97f35b292..d4b43fb7adb1 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -54,7 +54,7 @@ static int jffs2_security_getxattr(struct dentry *dentry, const char *name,
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_SECURITY,
+ return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_SECURITY,
name, buffer, size);
}
@@ -64,7 +64,7 @@ static int jffs2_security_setxattr(struct dentry *dentry, const char *name,
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_SECURITY,
+ return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_SECURITY,
name, buffer, size, flags);
}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 3d76f28a2ba9..d86c5e3176a1 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -140,14 +140,14 @@ static struct dentry *jffs2_get_parent(struct dentry *child)
BUG_ON(!d_is_dir(child));
- f = JFFS2_INODE_INFO(child->d_inode);
+ f = JFFS2_INODE_INFO(d_inode(child));
pino = f->inocache->pino_nlink;
JFFS2_DEBUG("Parent of directory ino #%u is #%u\n",
f->inocache->ino, pino);
- return d_obtain_alias(jffs2_iget(child->d_inode->i_sb, pino));
+ return d_obtain_alias(jffs2_iget(d_inode(child)->i_sb, pino));
}
static const struct export_operations jffs2_export_ops = {
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index c7c77b0dfccd..1fefa25d0fa5 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -31,7 +31,7 @@ const struct inode_operations jffs2_symlink_inode_operations =
static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(dentry));
char *p = (char *)f->target;
/*
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index d72817ac51f6..f092fee5be50 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -195,7 +195,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
/* unchecked xdatum is chained with c->xattr_unchecked */
list_del_init(&xd->xindex);
- dbg_xattr("success on verfying xdatum (xid=%u, version=%u)\n",
+ dbg_xattr("success on verifying xdatum (xid=%u, version=%u)\n",
xd->xid, xd->version);
return 0;
@@ -960,7 +960,7 @@ static const struct xattr_handler *xprefix_to_handler(int xprefix) {
ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_inode_cache *ic = f->inocache;
@@ -1266,7 +1266,6 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_
if (rc) {
JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n",
__func__, rc, totlen);
- rc = rc ? rc : -EBADFD;
goto out;
}
rc = save_xattr_ref(c, ref);
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
index 1c868194c504..ceaf9c693225 100644
--- a/fs/jffs2/xattr_trusted.c
+++ b/fs/jffs2/xattr_trusted.c
@@ -21,7 +21,7 @@ static int jffs2_trusted_getxattr(struct dentry *dentry, const char *name,
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED,
+ return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_TRUSTED,
name, buffer, size);
}
@@ -30,7 +30,7 @@ static int jffs2_trusted_setxattr(struct dentry *dentry, const char *name,
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED,
+ return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_TRUSTED,
name, buffer, size, flags);
}
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
index 916b5c966039..a71391eba514 100644
--- a/fs/jffs2/xattr_user.c
+++ b/fs/jffs2/xattr_user.c
@@ -21,7 +21,7 @@ static int jffs2_user_getxattr(struct dentry *dentry, const char *name,
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_USER,
+ return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_USER,
name, buffer, size);
}
@@ -30,7 +30,7 @@ static int jffs2_user_setxattr(struct dentry *dentry, const char *name,
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_USER,
+ return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_USER,
name, buffer, size, flags);
}
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 10815f8dfd8b..e98d39d75cf4 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -100,7 +100,7 @@ static int jfs_release(struct inode *inode, struct file *file)
int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int rc;
rc = inode_change_ok(inode, iattr);
@@ -151,8 +151,6 @@ const struct inode_operations jfs_file_inode_operations = {
const struct file_operations jfs_file_operations = {
.open = jfs_open,
.llseek = generic_file_llseek,
- .write = new_sync_write,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index bd3df1ca3c9b..070dc4b33544 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -22,8 +22,8 @@
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
+#include <linux/uio.h>
#include <linux/writeback.h>
-#include <linux/aio.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
@@ -330,8 +330,8 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, jfs_get_block);
}
-static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -339,13 +339,13 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely((rw & WRITE) && ret < 0)) {
+ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 49ba7ff1bbb9..16a0922beb59 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -183,30 +183,23 @@ static inline void remove_metapage(struct page *page, struct metapage *mp)
#endif
-static void init_once(void *foo)
-{
- struct metapage *mp = (struct metapage *)foo;
-
- mp->lid = 0;
- mp->lsn = 0;
- mp->flag = 0;
- mp->data = NULL;
- mp->clsn = 0;
- mp->log = NULL;
- set_bit(META_free, &mp->flag);
- init_waitqueue_head(&mp->wait);
-}
-
static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
{
- return mempool_alloc(metapage_mempool, gfp_mask);
+ struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
+
+ if (mp) {
+ mp->lid = 0;
+ mp->lsn = 0;
+ mp->data = NULL;
+ mp->clsn = 0;
+ mp->log = NULL;
+ init_waitqueue_head(&mp->wait);
+ }
+ return mp;
}
static inline void free_metapage(struct metapage *mp)
{
- mp->flag = 0;
- set_bit(META_free, &mp->flag);
-
mempool_free(mp, metapage_mempool);
}
@@ -216,7 +209,7 @@ int __init metapage_init(void)
* Allocate the metapage structures
*/
metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
- 0, 0, init_once);
+ 0, 0, NULL);
if (metapage_cache == NULL)
return -ENOMEM;
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index a78beda85f68..337e9e51ac06 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -48,7 +48,6 @@ struct metapage {
/* metapage flag */
#define META_locked 0
-#define META_free 1
#define META_dirty 2
#define META_sync 3
#define META_discard 4
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 38fdc533f4ec..66db7bc0ed10 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -346,7 +346,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
{
int rc;
tid_t tid; /* transaction id */
- struct inode *ip = dentry->d_inode;
+ struct inode *ip = d_inode(dentry);
ino_t ino;
struct component_name dname;
struct inode *iplist[2];
@@ -472,7 +472,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
{
int rc;
tid_t tid; /* transaction id */
- struct inode *ip = dentry->d_inode;
+ struct inode *ip = d_inode(dentry);
ino_t ino;
struct component_name dname; /* object name */
struct inode *iplist[2];
@@ -791,7 +791,7 @@ static int jfs_link(struct dentry *old_dentry,
{
int rc;
tid_t tid;
- struct inode *ip = old_dentry->d_inode;
+ struct inode *ip = d_inode(old_dentry);
ino_t ino;
struct component_name dname;
struct btstack btstack;
@@ -879,7 +879,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
struct component_name dname;
int ssize; /* source pathname size */
struct btstack btstack;
- struct inode *ip = dentry->d_inode;
+ struct inode *ip = d_inode(dentry);
unchar *i_fastsymlink;
s64 xlen = 0;
int bmask = 0, xsize;
@@ -1086,8 +1086,8 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
dquot_initialize(old_dir);
dquot_initialize(new_dir);
- old_ip = old_dentry->d_inode;
- new_ip = new_dentry->d_inode;
+ old_ip = d_inode(old_dentry);
+ new_ip = d_inode(new_dentry);
if ((rc = get_UCSname(&old_dname, old_dentry)))
goto out1;
@@ -1500,9 +1500,9 @@ struct dentry *jfs_get_parent(struct dentry *dentry)
unsigned long parent_ino;
parent_ino =
- le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot);
+ le32_to_cpu(JFS_IP(d_inode(dentry))->i_dtroot.header.idotdot);
- return d_obtain_alias(jfs_iget(dentry->d_inode->i_sb, parent_ino));
+ return d_obtain_alias(jfs_iget(d_inode(dentry)->i_sb, parent_ino));
}
const struct inode_operations jfs_dir_inode_operations = {
@@ -1578,7 +1578,7 @@ static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags)
* positive dentry isn't good idea. So it's unsupported like
* rename("filename", "FILENAME") for now.
*/
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
return 1;
/*
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 5d30c56ae075..4cd9798f4948 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -102,7 +102,7 @@ void jfs_error(struct super_block *sb, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- pr_err("ERROR: (device %s): %pf: %pV\n",
+ pr_err("ERROR: (device %s): %ps: %pV\n",
sb->s_id, __builtin_return_address(0), &vaf);
va_end(args);
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c
index 205b946d8e0d..80f42bcc4ef1 100644
--- a/fs/jfs/symlink.c
+++ b/fs/jfs/symlink.c
@@ -24,7 +24,7 @@
static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- char *s = JFS_IP(dentry->d_inode)->i_inline;
+ char *s = JFS_IP(d_inode(dentry))->i_inline;
nd_set_link(nd, s);
return NULL;
}
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 46325d5c34fc..48b15a6e5558 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -849,7 +849,7 @@ int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name,
int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t value_len, int flags)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct jfs_inode_info *ji = JFS_IP(inode);
int rc;
tid_t tid;
@@ -872,7 +872,7 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
tid = txBegin(inode->i_sb, 0);
mutex_lock(&ji->commit_mutex);
- rc = __jfs_setxattr(tid, dentry->d_inode, name, value, value_len,
+ rc = __jfs_setxattr(tid, d_inode(dentry), name, value, value_len,
flags);
if (!rc)
rc = txCommit(tid, 1, &inode, 0);
@@ -959,7 +959,7 @@ ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
return -EOPNOTSUPP;
}
- err = __jfs_getxattr(dentry->d_inode, name, data, buf_size);
+ err = __jfs_getxattr(d_inode(dentry), name, data, buf_size);
return err;
}
@@ -976,7 +976,7 @@ static inline int can_list(struct jfs_ea *ea)
ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
char *buffer;
ssize_t size = 0;
int xattr_size;
@@ -1029,7 +1029,7 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
int jfs_removexattr(struct dentry *dentry, const char *name)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct jfs_inode_info *ji = JFS_IP(inode);
int rc;
tid_t tid;
@@ -1047,7 +1047,7 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
tid = txBegin(inode->i_sb, 0);
mutex_lock(&ji->commit_mutex);
- rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
+ rc = __jfs_setxattr(tid, d_inode(dentry), name, NULL, 0, XATTR_REPLACE);
if (!rc)
rc = txCommit(tid, 1, &inode, 0);
txEnd(tid);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 6acc9648f986..fffca9517321 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -444,7 +444,7 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
return -ECHILD;
/* Always perform fresh lookup for negatives */
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
goto out_bad_unlocked;
kn = dentry->d_fsdata;
@@ -518,7 +518,14 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
if (!kn)
goto err_out1;
- ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
+ /*
+ * If the ino of the sysfs entry created for a kmem cache gets
+ * allocated from an ida layer, which is accounted to the memcg that
+ * owns the cache, the memcg will get pinned forever. So do not account
+ * ino ida allocations.
+ */
+ ret = ida_simple_get(&root->ino_ida, 1, 0,
+ GFP_KERNEL | __GFP_NOACCOUNT);
if (ret < 0)
goto err_out2;
kn->ino = ret;
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 9000874a945b..2da8493a380b 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -111,7 +111,7 @@ int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct kernfs_node *kn = dentry->d_fsdata;
int error;
@@ -172,11 +172,11 @@ int kernfs_iop_setxattr(struct dentry *dentry, const char *name,
if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
- error = security_inode_setsecurity(dentry->d_inode, suffix,
+ error = security_inode_setsecurity(d_inode(dentry), suffix,
value, size, flags);
if (error)
return error;
- error = security_inode_getsecctx(dentry->d_inode,
+ error = security_inode_getsecctx(d_inode(dentry),
&secdata, &secdata_len);
if (error)
return error;
@@ -271,7 +271,7 @@ int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct kernfs_node *kn = dentry->d_fsdata;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
mutex_lock(&kernfs_mutex);
kernfs_refresh_inode(kn, inode);
diff --git a/fs/libfs.c b/fs/libfs.c
index 0ab65122ee45..cb1fb4b9b637 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -22,13 +22,13 @@
static inline int simple_positive(struct dentry *dentry)
{
- return dentry->d_inode && !d_unhashed(dentry);
+ return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
generic_fillattr(inode, stat);
stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
return 0;
@@ -94,7 +94,7 @@ EXPORT_SYMBOL(dcache_dir_close);
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry *dentry = file->f_path.dentry;
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
switch (whence) {
case 1:
offset += file->f_pos;
@@ -102,7 +102,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
if (offset >= 0)
break;
default:
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
@@ -129,7 +129,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
spin_unlock(&dentry->d_lock);
}
}
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
return offset;
}
EXPORT_SYMBOL(dcache_dir_lseek);
@@ -169,7 +169,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
spin_unlock(&next->d_lock);
spin_unlock(&dentry->d_lock);
if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
- next->d_inode->i_ino, dt_type(next->d_inode)))
+ d_inode(next)->i_ino, dt_type(d_inode(next))))
return 0;
spin_lock(&dentry->d_lock);
spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
@@ -270,7 +270,7 @@ EXPORT_SYMBOL(simple_open);
int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
inc_nlink(inode);
@@ -304,7 +304,7 @@ EXPORT_SYMBOL(simple_empty);
int simple_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
drop_nlink(inode);
@@ -318,7 +318,7 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
if (!simple_empty(dentry))
return -ENOTEMPTY;
- drop_nlink(dentry->d_inode);
+ drop_nlink(d_inode(dentry));
simple_unlink(dir, dentry);
drop_nlink(dir);
return 0;
@@ -328,16 +328,16 @@ EXPORT_SYMBOL(simple_rmdir);
int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
int they_are_dirs = d_is_dir(old_dentry);
if (!simple_empty(new_dentry))
return -ENOTEMPTY;
- if (new_dentry->d_inode) {
+ if (d_really_is_positive(new_dentry)) {
simple_unlink(new_dir, new_dentry);
if (they_are_dirs) {
- drop_nlink(new_dentry->d_inode);
+ drop_nlink(d_inode(new_dentry));
drop_nlink(old_dir);
}
} else if (they_are_dirs) {
@@ -368,7 +368,7 @@ EXPORT_SYMBOL(simple_rename);
*/
int simple_setattr(struct dentry *dentry, struct iattr *iattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
error = inode_change_ok(inode, iattr);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 665ef5a05183..a563ddbc19e6 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -31,7 +31,7 @@
static struct hlist_head nlm_files[FILE_NRHASH];
static DEFINE_MUTEX(nlm_file_mutex);
-#ifdef NFSD_DEBUG
+#ifdef CONFIG_SUNRPC_DEBUG
static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
{
u32 *fhp = (u32*)f->data;
diff --git a/fs/locks.c b/fs/locks.c
index 40bc384728c0..653faabb07f4 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -203,11 +203,11 @@ static struct kmem_cache *flctx_cache __read_mostly;
static struct kmem_cache *filelock_cache __read_mostly;
static struct file_lock_context *
-locks_get_lock_context(struct inode *inode)
+locks_get_lock_context(struct inode *inode, int type)
{
struct file_lock_context *new;
- if (likely(inode->i_flctx))
+ if (likely(inode->i_flctx) || type == F_UNLCK)
goto out;
new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
@@ -223,14 +223,7 @@ locks_get_lock_context(struct inode *inode)
* Assign the pointer if it's not already assigned. If it is, then
* free the context we just allocated.
*/
- spin_lock(&inode->i_lock);
- if (likely(!inode->i_flctx)) {
- inode->i_flctx = new;
- new = NULL;
- }
- spin_unlock(&inode->i_lock);
-
- if (new)
+ if (cmpxchg(&inode->i_flctx, NULL, new))
kmem_cache_free(flctx_cache, new);
out:
return inode->i_flctx;
@@ -276,8 +269,10 @@ void locks_release_private(struct file_lock *fl)
}
if (fl->fl_lmops) {
- if (fl->fl_lmops->lm_put_owner)
- fl->fl_lmops->lm_put_owner(fl);
+ if (fl->fl_lmops->lm_put_owner) {
+ fl->fl_lmops->lm_put_owner(fl->fl_owner);
+ fl->fl_owner = NULL;
+ }
fl->fl_lmops = NULL;
}
}
@@ -333,7 +328,7 @@ void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
if (fl->fl_lmops) {
if (fl->fl_lmops->lm_get_owner)
- fl->fl_lmops->lm_get_owner(new, fl);
+ fl->fl_lmops->lm_get_owner(fl->fl_owner);
}
}
EXPORT_SYMBOL(locks_copy_conflock);
@@ -592,11 +587,15 @@ posix_owner_key(struct file_lock *fl)
static void locks_insert_global_blocked(struct file_lock *waiter)
{
+ lockdep_assert_held(&blocked_lock_lock);
+
hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
}
static void locks_delete_global_blocked(struct file_lock *waiter)
{
+ lockdep_assert_held(&blocked_lock_lock);
+
hash_del(&waiter->fl_link);
}
@@ -730,7 +729,7 @@ static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *s
/* POSIX locks owned by the same process do not conflict with
* each other.
*/
- if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
+ if (posix_same_owner(caller_fl, sys_fl))
return (0);
/* Check whether they overlap */
@@ -748,7 +747,7 @@ static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *s
/* FLOCK locks referring to the same filp do not conflict with
* each other.
*/
- if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
+ if (caller_fl->fl_file == sys_fl->fl_file)
return (0);
if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
return 0;
@@ -838,6 +837,8 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
{
int i = 0;
+ lockdep_assert_held(&blocked_lock_lock);
+
/*
* This deadlock detector can't reasonably detect deadlocks with
* FL_OFDLCK locks, since they aren't owned by a process, per-se.
@@ -871,9 +872,12 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
bool found = false;
LIST_HEAD(dispose);
- ctx = locks_get_lock_context(inode);
- if (!ctx)
- return -ENOMEM;
+ ctx = locks_get_lock_context(inode, request->fl_type);
+ if (!ctx) {
+ if (request->fl_type != F_UNLCK)
+ return -ENOMEM;
+ return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
+ }
if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
new_fl = locks_alloc_lock();
@@ -939,9 +943,9 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
bool added = false;
LIST_HEAD(dispose);
- ctx = locks_get_lock_context(inode);
+ ctx = locks_get_lock_context(inode, request->fl_type);
if (!ctx)
- return -ENOMEM;
+ return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
/*
* We may need two file_lock structures for this operation,
@@ -964,8 +968,6 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
*/
if (request->fl_type != F_UNLCK) {
list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
- if (!IS_POSIX(fl))
- continue;
if (!posix_locks_conflict(request, fl))
continue;
if (conflock)
@@ -1605,7 +1607,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
lease = *flp;
trace_generic_add_lease(inode, lease);
- ctx = locks_get_lock_context(inode);
+ /* Note that arg is never F_UNLCK here */
+ ctx = locks_get_lock_context(inode, arg);
if (!ctx)
return -ENOMEM;
@@ -2555,15 +2558,10 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
: (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
}
if (inode) {
-#ifdef WE_CAN_BREAK_LSLK_NOW
- seq_printf(f, "%d %s:%ld ", fl_pid,
- inode->i_sb->s_id, inode->i_ino);
-#else
- /* userspace relies on this representation of dev_t ;-( */
+ /* userspace relies on this representation of dev_t */
seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
-#endif
} else {
seq_printf(f, "%d <none>:0 ", fl_pid);
}
@@ -2592,6 +2590,44 @@ static int locks_show(struct seq_file *f, void *v)
return 0;
}
+static void __show_fd_locks(struct seq_file *f,
+ struct list_head *head, int *id,
+ struct file *filp, struct files_struct *files)
+{
+ struct file_lock *fl;
+
+ list_for_each_entry(fl, head, fl_list) {
+
+ if (filp != fl->fl_file)
+ continue;
+ if (fl->fl_owner != files &&
+ fl->fl_owner != filp)
+ continue;
+
+ (*id)++;
+ seq_puts(f, "lock:\t");
+ lock_get_status(f, fl, *id, "");
+ }
+}
+
+void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files)
+{
+ struct inode *inode = file_inode(filp);
+ struct file_lock_context *ctx;
+ int id = 0;
+
+ ctx = inode->i_flctx;
+ if (!ctx)
+ return;
+
+ spin_lock(&ctx->flc_lock);
+ __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
+ __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
+ __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
+ spin_unlock(&ctx->flc_lock);
+}
+
static void *locks_start(struct seq_file *f, loff_t *pos)
__acquires(&blocked_lock_lock)
{
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 6bdc347008f5..4cf38f118549 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -213,7 +213,7 @@ static void abort_transaction(struct inode *inode, struct logfs_transaction *ta)
static int logfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct logfs_super *super = logfs_super(dir->i_sb);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct logfs_transaction *ta;
struct page *page;
pgoff_t index;
@@ -271,7 +271,7 @@ static inline int logfs_empty_dir(struct inode *dir)
static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (!logfs_empty_dir(inode))
return -ENOTEMPTY;
@@ -537,7 +537,7 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry,
static int logfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
ihold(inode);
@@ -607,7 +607,7 @@ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry,
/* 2. write target dd */
mutex_lock(&super->s_dirop_mutex);
logfs_add_transaction(new_dir, ta);
- err = logfs_write_dir(new_dir, new_dentry, old_dentry->d_inode);
+ err = logfs_write_dir(new_dir, new_dentry, d_inode(old_dentry));
if (!err)
err = write_inode(new_dir);
@@ -658,8 +658,8 @@ static int logfs_rename_target(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct logfs_super *super = logfs_super(old_dir->i_sb);
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
int isdir = S_ISDIR(old_inode->i_mode);
struct logfs_disk_dentry dd;
struct logfs_transaction *ta;
@@ -719,7 +719,7 @@ out:
static int logfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- if (new_dentry->d_inode)
+ if (d_really_is_positive(new_dentry))
return logfs_rename_target(old_dir, old_dentry,
new_dir, new_dentry);
return logfs_rename_cross(old_dir, old_dentry, new_dir, new_dentry);
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 8538752df2f6..1a6f0167b16a 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -241,7 +241,7 @@ int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
static int logfs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int err = 0;
err = inode_change_ok(inode, attr);
@@ -271,8 +271,6 @@ const struct file_operations logfs_reg_fops = {
.llseek = generic_file_llseek,
.mmap = generic_file_readonly_mmap,
.open = generic_file_open,
- .read = new_sync_read,
- .write = new_sync_write,
};
const struct address_space_operations logfs_reg_aops = {
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index dfaf6fa9b7b5..118e4e7bc935 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -156,7 +156,7 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
{
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
- struct inode * dir = dentry->d_parent->d_inode;
+ struct inode * dir = d_inode(dentry->d_parent);
struct super_block * sb = dir->i_sb;
struct minix_sb_info * sbi = minix_sb(sb);
unsigned long n;
@@ -203,7 +203,7 @@ found:
int minix_add_link(struct dentry *dentry, struct inode *inode)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct super_block * sb = dir->i_sb;
diff --git a/fs/minix/file.c b/fs/minix/file.c
index a967de085ac0..94f0eb9a6e2c 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -14,9 +14,7 @@
*/
const struct file_operations minix_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = generic_file_fsync,
@@ -25,7 +23,7 @@ const struct file_operations minix_file_operations = {
static int minix_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
error = inode_change_ok(inode, attr);
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 3f57af196a7d..1182d1e26a9c 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -626,8 +626,8 @@ static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
int minix_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
struct super_block *sb = dentry->d_sb;
- generic_fillattr(dentry->d_inode, stat);
- if (INODE_VERSION(dentry->d_inode) == MINIX_V1)
+ generic_fillattr(d_inode(dentry), stat);
+ if (INODE_VERSION(d_inode(dentry)) == MINIX_V1)
stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
else
stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb);
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index cd950e2331b6..a795a11e50c7 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -104,7 +104,7 @@ out_fail:
static int minix_link(struct dentry * old_dentry, struct inode * dir,
struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
@@ -151,7 +151,7 @@ out_dir:
static int minix_unlink(struct inode * dir, struct dentry *dentry)
{
int err = -ENOENT;
- struct inode * inode = dentry->d_inode;
+ struct inode * inode = d_inode(dentry);
struct page * page;
struct minix_dir_entry * de;
@@ -171,7 +171,7 @@ end_unlink:
static int minix_rmdir(struct inode * dir, struct dentry *dentry)
{
- struct inode * inode = dentry->d_inode;
+ struct inode * inode = d_inode(dentry);
int err = -ENOTEMPTY;
if (minix_empty_dir(inode)) {
@@ -187,8 +187,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry)
static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
struct inode * new_dir, struct dentry *new_dentry)
{
- struct inode * old_inode = old_dentry->d_inode;
- struct inode * new_inode = new_dentry->d_inode;
+ struct inode * old_inode = d_inode(old_dentry);
+ struct inode * new_inode = d_inode(new_dentry);
struct page * dir_page = NULL;
struct minix_dir_entry * dir_de = NULL;
struct page * old_page;
diff --git a/fs/namei.c b/fs/namei.c
index c83145af4bfc..fe30d3be43a8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -119,15 +119,14 @@
* PATH_MAX includes the nul terminator --RR.
*/
-#define EMBEDDED_NAME_MAX (PATH_MAX - sizeof(struct filename))
+#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
struct filename *
getname_flags(const char __user *filename, int flags, int *empty)
{
- struct filename *result, *err;
- int len;
- long max;
+ struct filename *result;
char *kname;
+ int len;
result = audit_reusename(filename);
if (result)
@@ -136,22 +135,18 @@ getname_flags(const char __user *filename, int flags, int *empty)
result = __getname();
if (unlikely(!result))
return ERR_PTR(-ENOMEM);
- result->refcnt = 1;
/*
* First, try to embed the struct filename inside the names_cache
* allocation
*/
- kname = (char *)result + sizeof(*result);
+ kname = (char *)result->iname;
result->name = kname;
- result->separate = false;
- max = EMBEDDED_NAME_MAX;
-recopy:
- len = strncpy_from_user(kname, filename, max);
+ len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
if (unlikely(len < 0)) {
- err = ERR_PTR(len);
- goto error;
+ __putname(result);
+ return ERR_PTR(len);
}
/*
@@ -160,43 +155,49 @@ recopy:
* names_cache allocation for the pathname, and re-do the copy from
* userland.
*/
- if (len == EMBEDDED_NAME_MAX && max == EMBEDDED_NAME_MAX) {
+ if (unlikely(len == EMBEDDED_NAME_MAX)) {
+ const size_t size = offsetof(struct filename, iname[1]);
kname = (char *)result;
- result = kzalloc(sizeof(*result), GFP_KERNEL);
- if (!result) {
- err = ERR_PTR(-ENOMEM);
- result = (struct filename *)kname;
- goto error;
+ /*
+ * size is chosen that way we to guarantee that
+ * result->iname[0] is within the same object and that
+ * kname can't be equal to result->iname, no matter what.
+ */
+ result = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!result)) {
+ __putname(kname);
+ return ERR_PTR(-ENOMEM);
}
result->name = kname;
- result->separate = true;
- result->refcnt = 1;
- max = PATH_MAX;
- goto recopy;
+ len = strncpy_from_user(kname, filename, PATH_MAX);
+ if (unlikely(len < 0)) {
+ __putname(kname);
+ kfree(result);
+ return ERR_PTR(len);
+ }
+ if (unlikely(len == PATH_MAX)) {
+ __putname(kname);
+ kfree(result);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
}
+ result->refcnt = 1;
/* The empty path is special. */
if (unlikely(!len)) {
if (empty)
*empty = 1;
- err = ERR_PTR(-ENOENT);
- if (!(flags & LOOKUP_EMPTY))
- goto error;
+ if (!(flags & LOOKUP_EMPTY)) {
+ putname(result);
+ return ERR_PTR(-ENOENT);
+ }
}
- err = ERR_PTR(-ENAMETOOLONG);
- if (unlikely(len >= PATH_MAX))
- goto error;
-
result->uptr = filename;
result->aname = NULL;
audit_getname(result);
return result;
-
-error:
- putname(result);
- return err;
}
struct filename *
@@ -216,8 +217,7 @@ getname_kernel(const char * filename)
return ERR_PTR(-ENOMEM);
if (len <= EMBEDDED_NAME_MAX) {
- result->name = (char *)(result) + sizeof(*result);
- result->separate = false;
+ result->name = (char *)result->iname;
} else if (len <= PATH_MAX) {
struct filename *tmp;
@@ -227,7 +227,6 @@ getname_kernel(const char * filename)
return ERR_PTR(-ENOMEM);
}
tmp->name = (char *)result;
- tmp->separate = true;
result = tmp;
} else {
__putname(result);
@@ -249,7 +248,7 @@ void putname(struct filename *name)
if (--name->refcnt > 0)
return;
- if (name->separate) {
+ if (name->name != name->iname) {
__putname(name->name);
kfree(name);
} else
@@ -1416,6 +1415,7 @@ static int lookup_fast(struct nameidata *nd,
*/
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
+ bool negative;
dentry = __d_lookup_rcu(parent, &nd->last, &seq);
if (!dentry)
goto unlazy;
@@ -1425,8 +1425,11 @@ static int lookup_fast(struct nameidata *nd,
* the dentry name information from lookup.
*/
*inode = dentry->d_inode;
+ negative = d_is_negative(dentry);
if (read_seqcount_retry(&dentry->d_seq, seq))
return -ECHILD;
+ if (negative)
+ return -ENOENT;
/*
* This sequence count validates that the parent had no
@@ -1473,6 +1476,10 @@ unlazy:
goto need_lookup;
}
+ if (unlikely(d_is_negative(dentry))) {
+ dput(dentry);
+ return -ENOENT;
+ }
path->mnt = mnt;
path->dentry = dentry;
err = follow_managed(path, nd->flags);
@@ -1584,14 +1591,15 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
goto out_err;
inode = path->dentry->d_inode;
+ err = -ENOENT;
+ if (d_is_negative(path->dentry))
+ goto out_path_put;
}
- err = -ENOENT;
- if (!inode || d_is_negative(path->dentry))
- goto out_path_put;
if (should_follow_link(path->dentry, follow)) {
if (nd->flags & LOOKUP_RCU) {
- if (unlikely(unlazy_walk(nd, path->dentry))) {
+ if (unlikely(nd->path.mnt != path->mnt ||
+ unlazy_walk(nd, path->dentry))) {
err = -ECHILD;
goto out_err;
}
@@ -1851,10 +1859,11 @@ static int link_path_walk(const char *name, struct nameidata *nd)
return err;
}
-static int path_init(int dfd, const char *name, unsigned int flags,
+static int path_init(int dfd, const struct filename *name, unsigned int flags,
struct nameidata *nd)
{
int retval = 0;
+ const char *s = name->name;
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
@@ -1863,7 +1872,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
if (flags & LOOKUP_ROOT) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
- if (*name) {
+ if (*s) {
if (!d_can_lookup(root))
return -ENOTDIR;
retval = inode_permission(inode, MAY_EXEC);
@@ -1885,7 +1894,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
nd->root.mnt = NULL;
nd->m_seq = read_seqbegin(&mount_lock);
- if (*name=='/') {
+ if (*s == '/') {
if (flags & LOOKUP_RCU) {
rcu_read_lock();
nd->seq = set_root_rcu(nd);
@@ -1919,7 +1928,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
dentry = f.file->f_path.dentry;
- if (*name) {
+ if (*s) {
if (!d_can_lookup(dentry)) {
fdput(f);
return -ENOTDIR;
@@ -1949,7 +1958,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
return -ECHILD;
done:
current->total_link_count = 0;
- return link_path_walk(name, nd);
+ return link_path_walk(s, nd);
}
static void path_cleanup(struct nameidata *nd)
@@ -1972,7 +1981,7 @@ static inline int lookup_last(struct nameidata *nd, struct path *path)
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
-static int path_lookupat(int dfd, const char *name,
+static int path_lookupat(int dfd, const struct filename *name,
unsigned int flags, struct nameidata *nd)
{
struct path path;
@@ -2027,31 +2036,17 @@ static int path_lookupat(int dfd, const char *name,
static int filename_lookup(int dfd, struct filename *name,
unsigned int flags, struct nameidata *nd)
{
- int retval = path_lookupat(dfd, name->name, flags | LOOKUP_RCU, nd);
+ int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
if (unlikely(retval == -ECHILD))
- retval = path_lookupat(dfd, name->name, flags, nd);
+ retval = path_lookupat(dfd, name, flags, nd);
if (unlikely(retval == -ESTALE))
- retval = path_lookupat(dfd, name->name,
- flags | LOOKUP_REVAL, nd);
+ retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
if (likely(!retval))
audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
return retval;
}
-static int do_path_lookup(int dfd, const char *name,
- unsigned int flags, struct nameidata *nd)
-{
- struct filename *filename = getname_kernel(name);
- int retval = PTR_ERR(filename);
-
- if (!IS_ERR(filename)) {
- retval = filename_lookup(dfd, filename, flags, nd);
- putname(filename);
- }
- return retval;
-}
-
/* does lookup, returns the object with parent locked */
struct dentry *kern_path_locked(const char *name, struct path *path)
{
@@ -2089,9 +2084,15 @@ out:
int kern_path(const char *name, unsigned int flags, struct path *path)
{
struct nameidata nd;
- int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
- if (!res)
- *path = nd.path;
+ struct filename *filename = getname_kernel(name);
+ int res = PTR_ERR(filename);
+
+ if (!IS_ERR(filename)) {
+ res = filename_lookup(AT_FDCWD, filename, flags, &nd);
+ putname(filename);
+ if (!res)
+ *path = nd.path;
+ }
return res;
}
EXPORT_SYMBOL(kern_path);
@@ -2108,15 +2109,22 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
const char *name, unsigned int flags,
struct path *path)
{
- struct nameidata nd;
- int err;
- nd.root.dentry = dentry;
- nd.root.mnt = mnt;
+ struct filename *filename = getname_kernel(name);
+ int err = PTR_ERR(filename);
+
BUG_ON(flags & LOOKUP_PARENT);
- /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
- err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
- if (!err)
- *path = nd.path;
+
+ /* the first argument of filename_lookup() is ignored with LOOKUP_ROOT */
+ if (!IS_ERR(filename)) {
+ struct nameidata nd;
+ nd.root.dentry = dentry;
+ nd.root.mnt = mnt;
+ err = filename_lookup(AT_FDCWD, filename,
+ flags | LOOKUP_ROOT, &nd);
+ if (!err)
+ *path = nd.path;
+ putname(filename);
+ }
return err;
}
EXPORT_SYMBOL(vfs_path_lookup);
@@ -2138,9 +2146,7 @@ static struct dentry *lookup_hash(struct nameidata *nd)
* @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code. Also note that by using this function the
- * nameidata argument is passed to the filesystem methods and a filesystem
- * using this helper needs to be prepared for that.
+ * not be called by generic code.
*/
struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
{
@@ -2313,7 +2319,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
mutex_unlock(&dir->d_inode->i_mutex);
done:
- if (!dentry->d_inode || d_is_negative(dentry)) {
+ if (d_is_negative(dentry)) {
error = -ENOENT;
dput(dentry);
goto out;
@@ -2341,7 +2347,8 @@ out:
* Returns 0 and "path" will be valid on success; Returns error otherwise.
*/
static int
-path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
+path_mountpoint(int dfd, const struct filename *name, struct path *path,
+ unsigned int flags)
{
struct nameidata nd;
int err;
@@ -2370,20 +2377,20 @@ out:
}
static int
-filename_mountpoint(int dfd, struct filename *s, struct path *path,
+filename_mountpoint(int dfd, struct filename *name, struct path *path,
unsigned int flags)
{
int error;
- if (IS_ERR(s))
- return PTR_ERR(s);
- error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+ error = path_mountpoint(dfd, name, path, flags | LOOKUP_RCU);
if (unlikely(error == -ECHILD))
- error = path_mountpoint(dfd, s->name, path, flags);
+ error = path_mountpoint(dfd, name, path, flags);
if (unlikely(error == -ESTALE))
- error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL);
+ error = path_mountpoint(dfd, name, path, flags | LOOKUP_REVAL);
if (likely(!error))
- audit_inode(s, path->dentry, 0);
- putname(s);
+ audit_inode(name, path->dentry, 0);
+ putname(name);
return error;
}
@@ -3037,17 +3044,17 @@ retry_lookup:
BUG_ON(nd->flags & LOOKUP_RCU);
inode = path->dentry->d_inode;
-finish_lookup:
- /* we _can_ be in RCU mode here */
error = -ENOENT;
- if (!inode || d_is_negative(path->dentry)) {
+ if (d_is_negative(path->dentry)) {
path_to_nameidata(path, nd);
goto out;
}
-
+finish_lookup:
+ /* we _can_ be in RCU mode here */
if (should_follow_link(path->dentry, !symlink_ok)) {
if (nd->flags & LOOKUP_RCU) {
- if (unlikely(unlazy_walk(nd, path->dentry))) {
+ if (unlikely(nd->path.mnt != path->mnt ||
+ unlazy_walk(nd, path->dentry))) {
error = -ECHILD;
goto out;
}
@@ -3079,7 +3086,7 @@ finish_open:
error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
goto out;
- if (!S_ISREG(nd->inode->i_mode))
+ if (!d_is_reg(nd->path.dentry))
will_truncate = false;
if (will_truncate) {
@@ -3156,7 +3163,7 @@ static int do_tmpfile(int dfd, struct filename *pathname,
static const struct qstr name = QSTR_INIT("/", 1);
struct dentry *dentry, *child;
struct inode *dir;
- int error = path_lookupat(dfd, pathname->name,
+ int error = path_lookupat(dfd, pathname,
flags | LOOKUP_DIRECTORY, nd);
if (unlikely(error))
return error;
@@ -3226,10 +3233,10 @@ static struct file *path_openat(int dfd, struct filename *pathname,
if (unlikely(file->f_flags & __O_TMPFILE)) {
error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
- goto out;
+ goto out2;
}
- error = path_init(dfd, pathname->name, flags, nd);
+ error = path_init(dfd, pathname, flags, nd);
if (unlikely(error))
goto out;
@@ -3256,6 +3263,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
}
out:
path_cleanup(nd);
+out2:
if (!(opened & FILE_OPENED)) {
BUG_ON(!error);
put_filp(file);
diff --git a/fs/namespace.c b/fs/namespace.c
index 82ef1405260e..1b9e11167bae 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -632,14 +632,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
*/
struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
{
- struct mount *p, *res;
- res = p = __lookup_mnt(mnt, dentry);
+ struct mount *p, *res = NULL;
+ p = __lookup_mnt(mnt, dentry);
if (!p)
goto out;
+ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
+ res = p;
hlist_for_each_entry_continue(p, mnt_hash) {
if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
break;
- res = p;
+ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
+ res = p;
}
out:
return res;
@@ -795,10 +798,8 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
/*
* vfsmount lock must be held for write
*/
-static void detach_mnt(struct mount *mnt, struct path *old_path)
+static void unhash_mnt(struct mount *mnt)
{
- old_path->dentry = mnt->mnt_mountpoint;
- old_path->mnt = &mnt->mnt_parent->mnt;
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
@@ -811,6 +812,26 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
/*
* vfsmount lock must be held for write
*/
+static void detach_mnt(struct mount *mnt, struct path *old_path)
+{
+ old_path->dentry = mnt->mnt_mountpoint;
+ old_path->mnt = &mnt->mnt_parent->mnt;
+ unhash_mnt(mnt);
+}
+
+/*
+ * vfsmount lock must be held for write
+ */
+static void umount_mnt(struct mount *mnt)
+{
+ /* old mountpoint will be dropped when we can do that */
+ mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
+ unhash_mnt(mnt);
+}
+
+/*
+ * vfsmount lock must be held for write
+ */
void mnt_set_mountpoint(struct mount *mnt,
struct mountpoint *mp,
struct mount *child_mnt)
@@ -1078,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt)
rcu_read_unlock();
list_del(&mnt->mnt_instance);
+
+ if (unlikely(!list_empty(&mnt->mnt_mounts))) {
+ struct mount *p, *tmp;
+ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
+ umount_mnt(p);
+ }
+ }
unlock_mount_hash();
if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
@@ -1298,17 +1326,15 @@ static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static void namespace_unlock(void)
{
- struct hlist_head head = unmounted;
+ struct hlist_head head;
- if (likely(hlist_empty(&head))) {
- up_write(&namespace_sem);
- return;
- }
+ hlist_move_list(&unmounted, &head);
- head.first->pprev = &head.first;
- INIT_HLIST_HEAD(&unmounted);
up_write(&namespace_sem);
+ if (likely(hlist_empty(&head)))
+ return;
+
synchronize_rcu();
group_pin_kill(&head);
@@ -1319,49 +1345,63 @@ static inline void namespace_lock(void)
down_write(&namespace_sem);
}
+enum umount_tree_flags {
+ UMOUNT_SYNC = 1,
+ UMOUNT_PROPAGATE = 2,
+ UMOUNT_CONNECTED = 4,
+};
/*
* mount_lock must be held
* namespace_sem must be held for write
- * how = 0 => just this tree, don't propagate
- * how = 1 => propagate; we know that nobody else has reference to any victims
- * how = 2 => lazy umount
*/
-void umount_tree(struct mount *mnt, int how)
+static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
{
- HLIST_HEAD(tmp_list);
+ LIST_HEAD(tmp_list);
struct mount *p;
+ if (how & UMOUNT_PROPAGATE)
+ propagate_mount_unlock(mnt);
+
+ /* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
- hlist_del_init_rcu(&p->mnt_hash);
- hlist_add_head(&p->mnt_hash, &tmp_list);
+ p->mnt.mnt_flags |= MNT_UMOUNT;
+ list_move(&p->mnt_list, &tmp_list);
}
- hlist_for_each_entry(p, &tmp_list, mnt_hash)
+ /* Hide the mounts from mnt_mounts */
+ list_for_each_entry(p, &tmp_list, mnt_list) {
list_del_init(&p->mnt_child);
+ }
- if (how)
+ /* Add propogated mounts to the tmp_list */
+ if (how & UMOUNT_PROPAGATE)
propagate_umount(&tmp_list);
- while (!hlist_empty(&tmp_list)) {
- p = hlist_entry(tmp_list.first, struct mount, mnt_hash);
- hlist_del_init_rcu(&p->mnt_hash);
+ while (!list_empty(&tmp_list)) {
+ bool disconnect;
+ p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
__touch_mnt_namespace(p->mnt_ns);
p->mnt_ns = NULL;
- if (how < 2)
+ if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
- pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted);
+ disconnect = !(((how & UMOUNT_CONNECTED) &&
+ mnt_has_parent(p) &&
+ (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
+ IS_MNT_LOCKED_AND_LAZY(p));
+
+ pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
+ disconnect ? &unmounted : NULL);
if (mnt_has_parent(p)) {
- hlist_del_init(&p->mnt_mp_list);
- put_mountpoint(p->mnt_mp);
mnt_add_count(p->mnt_parent, -1);
- /* old mountpoint will be dropped when we can do that */
- p->mnt_ex_mountpoint = p->mnt_mountpoint;
- p->mnt_mountpoint = p->mnt.mnt_root;
- p->mnt_parent = p;
- p->mnt_mp = NULL;
+ if (!disconnect) {
+ /* Don't forget about p */
+ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
+ } else {
+ umount_mnt(p);
+ }
}
change_mnt_propagation(p, MS_PRIVATE);
}
@@ -1447,14 +1487,14 @@ static int do_umount(struct mount *mnt, int flags)
if (flags & MNT_DETACH) {
if (!list_empty(&mnt->mnt_list))
- umount_tree(mnt, 2);
+ umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {
if (!list_empty(&mnt->mnt_list))
- umount_tree(mnt, 1);
+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
retval = 0;
}
}
@@ -1480,13 +1520,20 @@ void __detach_mounts(struct dentry *dentry)
namespace_lock();
mp = lookup_mountpoint(dentry);
- if (!mp)
+ if (IS_ERR_OR_NULL(mp))
goto out_unlock;
lock_mount_hash();
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
- umount_tree(mnt, 2);
+ if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
+ struct mount *p, *tmp;
+ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
+ hlist_add_head(&p->mnt_umount.s_list, &unmounted);
+ umount_mnt(p);
+ }
+ }
+ else umount_tree(mnt, UMOUNT_CONNECTED);
}
unlock_mount_hash();
put_mountpoint(mp);
@@ -1648,7 +1695,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
out:
if (res) {
lock_mount_hash();
- umount_tree(res, 0);
+ umount_tree(res, UMOUNT_SYNC);
unlock_mount_hash();
}
return q;
@@ -1660,8 +1707,11 @@ struct vfsmount *collect_mounts(struct path *path)
{
struct mount *tree;
namespace_lock();
- tree = copy_tree(real_mount(path->mnt), path->dentry,
- CL_COPY_ALL | CL_PRIVATE);
+ if (!check_mnt(real_mount(path->mnt)))
+ tree = ERR_PTR(-EINVAL);
+ else
+ tree = copy_tree(real_mount(path->mnt), path->dentry,
+ CL_COPY_ALL | CL_PRIVATE);
namespace_unlock();
if (IS_ERR(tree))
return ERR_CAST(tree);
@@ -1672,7 +1722,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
{
namespace_lock();
lock_mount_hash();
- umount_tree(real_mount(mnt), 0);
+ umount_tree(real_mount(mnt), UMOUNT_SYNC);
unlock_mount_hash();
namespace_unlock();
}
@@ -1855,7 +1905,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
out_cleanup_ids:
while (!hlist_empty(&tree_list)) {
child = hlist_entry(tree_list.first, struct mount, mnt_hash);
- umount_tree(child, 0);
+ umount_tree(child, UMOUNT_SYNC);
}
unlock_mount_hash();
cleanup_group_ids(source_mnt, NULL);
@@ -2035,7 +2085,7 @@ static int do_loopback(struct path *path, const char *old_name,
err = graft_tree(mnt, parent, mp);
if (err) {
lock_mount_hash();
- umount_tree(mnt, 0);
+ umount_tree(mnt, UMOUNT_SYNC);
unlock_mount_hash();
}
out2:
@@ -2406,7 +2456,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
while (!list_empty(&graveyard)) {
mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
touch_mnt_namespace(mnt->mnt_ns);
- umount_tree(mnt, 1);
+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
}
unlock_mount_hash();
namespace_unlock();
@@ -2477,7 +2527,7 @@ static void shrink_submounts(struct mount *mnt)
m = list_first_entry(&graveyard, struct mount,
mnt_expire);
touch_mnt_namespace(m->mnt_ns);
- umount_tree(m, 1);
+ umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
}
}
}
@@ -3129,6 +3179,12 @@ bool fs_fully_visible(struct file_system_type *type)
if (mnt->mnt.mnt_sb->s_type != type)
continue;
+ /* This mount is not fully visible if it's root directory
+ * is not the root directory of the filesystem.
+ */
+ if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
+ continue;
+
/* This mount is not fully visible if there are any child mounts
* that cover anything except for empty directories.
*/
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index e7ca827d7694..80021c709af9 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -127,7 +127,7 @@ static inline int ncp_case_sensitive(const struct inode *i)
static int
ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
{
- struct inode *inode = ACCESS_ONCE(dentry->d_inode);
+ struct inode *inode = d_inode_rcu(dentry);
if (!inode)
return 0;
@@ -162,7 +162,7 @@ ncp_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
if (len != name->len)
return 1;
- pinode = ACCESS_ONCE(parent->d_inode);
+ pinode = d_inode_rcu(parent);
if (!pinode)
return 1;
@@ -180,7 +180,7 @@ ncp_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
static int
ncp_delete_dentry(const struct dentry * dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (inode) {
if (is_bad_inode(inode))
@@ -224,7 +224,7 @@ ncp_force_unlink(struct inode *dir, struct dentry* dentry)
memset(&info, 0, sizeof(info));
/* remove the Read-Only flag on the NW server */
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
old_nwattr = NCP_FINFO(inode)->nwattr;
info.attributes = old_nwattr & ~(aRONLY|aDELETEINHIBIT|aRENAMEINHIBIT);
@@ -254,7 +254,7 @@ ncp_force_rename(struct inode *old_dir, struct dentry* old_dentry, char *_old_na
{
struct nw_modify_dos_info info;
int res=0x90,res2;
- struct inode *old_inode = old_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
__le32 old_nwattr = NCP_FINFO(old_inode)->nwattr;
__le32 new_nwattr = 0; /* shut compiler warning */
int old_nwattr_changed = 0;
@@ -268,8 +268,8 @@ ncp_force_rename(struct inode *old_dir, struct dentry* old_dentry, char *_old_na
res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(old_inode), old_inode, NULL, DM_ATTRIBUTES, &info);
if (!res2)
old_nwattr_changed = 1;
- if (new_dentry && new_dentry->d_inode) {
- new_nwattr = NCP_FINFO(new_dentry->d_inode)->nwattr;
+ if (new_dentry && d_really_is_positive(new_dentry)) {
+ new_nwattr = NCP_FINFO(d_inode(new_dentry))->nwattr;
info.attributes = new_nwattr & ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(new_dir), new_dir, _new_name, DM_ATTRIBUTES, &info);
if (!res2)
@@ -324,9 +324,9 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
return -ECHILD;
parent = dget_parent(dentry);
- dir = parent->d_inode;
+ dir = d_inode(parent);
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
goto finished;
server = NCP_SERVER(dir);
@@ -367,7 +367,7 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
* what we remember, it's not valid any more.
*/
if (!res) {
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
mutex_lock(&inode->i_mutex);
if (finfo.i.dirEntNum == NCP_FINFO(inode)->dirEntNum) {
@@ -388,7 +388,7 @@ finished:
static time_t ncp_obtain_mtime(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ncp_server *server = NCP_SERVER(inode);
struct nw_info_struct i;
@@ -404,7 +404,7 @@ static time_t ncp_obtain_mtime(struct dentry *dentry)
static inline void
ncp_invalidate_dircache_entries(struct dentry *parent)
{
- struct ncp_server *server = NCP_SERVER(parent->d_inode);
+ struct ncp_server *server = NCP_SERVER(d_inode(parent));
struct dentry *dentry;
spin_lock(&parent->d_lock);
@@ -418,7 +418,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
static int ncp_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct page *page = NULL;
struct ncp_server *server = NCP_SERVER(inode);
union ncp_dir_cache *cache = NULL;
@@ -491,13 +491,13 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
goto invalid_cache;
}
spin_unlock(&dentry->d_lock);
- if (!dent->d_inode) {
+ if (d_really_is_negative(dent)) {
dput(dent);
goto invalid_cache;
}
over = !dir_emit(ctx, dent->d_name.name,
dent->d_name.len,
- dent->d_inode->i_ino, DT_UNKNOWN);
+ d_inode(dent)->i_ino, DT_UNKNOWN);
dput(dent);
if (over)
goto finished;
@@ -571,7 +571,7 @@ static void ncp_d_prune(struct dentry *dentry)
{
if (!dentry->d_fsdata) /* not referenced from page cache */
return;
- NCP_FINFO(dentry->d_parent->d_inode)->flags &= ~NCPI_DIR_CACHE;
+ NCP_FINFO(d_inode(dentry->d_parent))->flags &= ~NCPI_DIR_CACHE;
}
static int
@@ -580,7 +580,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
int inval_childs)
{
struct dentry *newdent, *dentry = file->f_path.dentry;
- struct inode *dir = dentry->d_inode;
+ struct inode *dir = d_inode(dentry);
struct ncp_cache_control ctl = *ctrl;
struct qstr qname;
int valid = 0;
@@ -621,7 +621,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
dentry_update_name_case(newdent, &qname);
}
- if (!newdent->d_inode) {
+ if (d_really_is_negative(newdent)) {
struct inode *inode;
entry->opened = 0;
@@ -637,7 +637,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
spin_unlock(&dentry->d_lock);
}
} else {
- struct inode *inode = newdent->d_inode;
+ struct inode *inode = d_inode(newdent);
mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
ncp_update_inode2(inode, entry);
@@ -659,10 +659,10 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
ctl.cache = kmap(ctl.page);
}
if (ctl.cache) {
- if (newdent->d_inode) {
+ if (d_really_is_positive(newdent)) {
newdent->d_fsdata = newdent;
ctl.cache->dentry[ctl.idx] = newdent;
- ino = newdent->d_inode->i_ino;
+ ino = d_inode(newdent)->i_ino;
ncp_new_dentry(newdent);
}
valid = 1;
@@ -807,7 +807,7 @@ int ncp_conn_logged_in(struct super_block *sb)
}
dent = sb->s_root;
if (dent) {
- struct inode* ino = dent->d_inode;
+ struct inode* ino = d_inode(dent);
if (ino) {
ncp_update_known_namespace(server, volNumber, NULL);
NCP_FINFO(ino)->volNumber = volNumber;
@@ -815,7 +815,7 @@ int ncp_conn_logged_in(struct super_block *sb)
NCP_FINFO(ino)->DosDirNum = DosDirNum;
result = 0;
} else {
- ncp_dbg(1, "sb->s_root->d_inode == NULL!\n");
+ ncp_dbg(1, "d_inode(sb->s_root) == NULL!\n");
}
} else {
ncp_dbg(1, "sb->s_root == NULL!\n");
@@ -1055,7 +1055,7 @@ out:
static int ncp_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ncp_server *server;
int error;
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 1dd7007f974d..011324ce9df2 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -98,30 +98,24 @@ out:
}
static ssize_t
-ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ncp_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
size_t already_read = 0;
- off_t pos;
+ off_t pos = iocb->ki_pos;
size_t bufsize;
int error;
- void* freepage;
+ void *freepage;
size_t freelen;
ncp_dbg(1, "enter %pD2\n", file);
- pos = *ppos;
-
- if ((ssize_t) count < 0) {
- return -EINVAL;
- }
- if (!count)
+ if (!iov_iter_count(to))
return 0;
if (pos > inode->i_sb->s_maxbytes)
return 0;
- if (pos + count > inode->i_sb->s_maxbytes) {
- count = inode->i_sb->s_maxbytes - pos;
- }
+ iov_iter_truncate(to, inode->i_sb->s_maxbytes - pos);
error = ncp_make_open(inode, O_RDONLY);
if (error) {
@@ -138,31 +132,29 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
goto outrel;
error = 0;
/* First read in as much as possible for each bufsize. */
- while (already_read < count) {
+ while (iov_iter_count(to)) {
int read_this_time;
- size_t to_read = min_t(unsigned int,
+ size_t to_read = min_t(size_t,
bufsize - (pos % bufsize),
- count - already_read);
+ iov_iter_count(to));
error = ncp_read_bounce(NCP_SERVER(inode),
NCP_FINFO(inode)->file_handle,
- pos, to_read, buf, &read_this_time,
+ pos, to_read, to, &read_this_time,
freepage, freelen);
if (error) {
error = -EIO; /* NW errno -> Linux errno */
break;
}
pos += read_this_time;
- buf += read_this_time;
already_read += read_this_time;
- if (read_this_time != to_read) {
+ if (read_this_time != to_read)
break;
- }
}
vfree(freepage);
- *ppos = pos;
+ iocb->ki_pos = pos;
file_accessed(file);
@@ -173,42 +165,21 @@ outrel:
}
static ssize_t
-ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
+ struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
size_t already_written = 0;
- off_t pos;
size_t bufsize;
int errno;
- void* bouncebuffer;
+ void *bouncebuffer;
+ off_t pos;
ncp_dbg(1, "enter %pD2\n", file);
- if ((ssize_t) count < 0)
- return -EINVAL;
- pos = *ppos;
- if (file->f_flags & O_APPEND) {
- pos = i_size_read(inode);
- }
+ errno = generic_write_checks(iocb, from);
+ if (errno <= 0)
+ return errno;
- if (pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
- if (pos >= MAX_NON_LFS) {
- return -EFBIG;
- }
- if (count > MAX_NON_LFS - (u32)pos) {
- count = MAX_NON_LFS - (u32)pos;
- }
- }
- if (pos >= inode->i_sb->s_maxbytes) {
- if (count || pos > inode->i_sb->s_maxbytes) {
- return -EFBIG;
- }
- }
- if (pos + count > inode->i_sb->s_maxbytes) {
- count = inode->i_sb->s_maxbytes - pos;
- }
-
- if (!count)
- return 0;
errno = ncp_make_open(inode, O_WRONLY);
if (errno) {
ncp_dbg(1, "open failed, error=%d\n", errno);
@@ -216,8 +187,6 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
}
bufsize = NCP_SERVER(inode)->buffer_size;
- already_written = 0;
-
errno = file_update_time(file);
if (errno)
goto outrel;
@@ -227,13 +196,14 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
errno = -EIO; /* -ENOMEM */
goto outrel;
}
- while (already_written < count) {
+ pos = iocb->ki_pos;
+ while (iov_iter_count(from)) {
int written_this_time;
- size_t to_write = min_t(unsigned int,
+ size_t to_write = min_t(size_t,
bufsize - (pos % bufsize),
- count - already_written);
+ iov_iter_count(from));
- if (copy_from_user(bouncebuffer, buf, to_write)) {
+ if (copy_from_iter(bouncebuffer, to_write, from) != to_write) {
errno = -EFAULT;
break;
}
@@ -244,16 +214,14 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
break;
}
pos += written_this_time;
- buf += written_this_time;
already_written += written_this_time;
- if (written_this_time != to_write) {
+ if (written_this_time != to_write)
break;
- }
}
vfree(bouncebuffer);
- *ppos = pos;
+ iocb->ki_pos = pos;
if (pos > i_size_read(inode)) {
mutex_lock(&inode->i_mutex);
@@ -277,8 +245,8 @@ static int ncp_release(struct inode *inode, struct file *file) {
const struct file_operations ncp_file_operations =
{
.llseek = generic_file_llseek,
- .read = ncp_file_read,
- .write = ncp_file_write,
+ .read_iter = ncp_file_read_iter,
+ .write_iter = ncp_file_write_iter,
.unlocked_ioctl = ncp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ncp_compat_ioctl,
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 01a9e16e9782..9605a2f63549 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -812,7 +812,7 @@ static int ncp_statfs(struct dentry *dentry, struct kstatfs *buf)
if (!d) {
goto dflt;
}
- i = d->d_inode;
+ i = d_inode(d);
if (!i) {
goto dflt;
}
@@ -865,7 +865,7 @@ dflt:;
int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int result = 0;
__le32 info_mask;
struct nw_modify_dos_info info;
@@ -878,7 +878,7 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
goto out;
result = -EPERM;
- if (IS_DEADDIR(dentry->d_inode))
+ if (IS_DEADDIR(d_inode(dentry)))
goto out;
/* ageing the dentry to force validation */
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index cf7e043a9447..79b113048eac 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -376,7 +376,7 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
struct dentry* dentry = inode->i_sb->s_root;
if (dentry) {
- struct inode* s_inode = dentry->d_inode;
+ struct inode* s_inode = d_inode(dentry);
if (s_inode) {
sr.volNumber = NCP_FINFO(s_inode)->volNumber;
@@ -384,7 +384,7 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
sr.namespace = server->name_space[sr.volNumber];
result = 0;
} else
- ncp_dbg(1, "s_root->d_inode==NULL\n");
+ ncp_dbg(1, "d_inode(s_root)==NULL\n");
} else
ncp_dbg(1, "s_root==NULL\n");
} else {
@@ -431,7 +431,7 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
if (result == 0) {
dentry = inode->i_sb->s_root;
if (dentry) {
- struct inode* s_inode = dentry->d_inode;
+ struct inode* s_inode = d_inode(dentry);
if (s_inode) {
NCP_FINFO(s_inode)->volNumber = vnum;
@@ -439,7 +439,7 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
NCP_FINFO(s_inode)->DosDirNum = dosde;
server->root_setuped = 1;
} else {
- ncp_dbg(1, "s_root->d_inode==NULL\n");
+ ncp_dbg(1, "d_inode(s_root)==NULL\n");
result = -EIO;
}
} else {
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index 482387532f54..88dbbc9fcf4d 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -727,7 +727,7 @@ int
ncp_del_file_or_subdir2(struct ncp_server *server,
struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
__u8 volnum;
__le32 dirent;
@@ -1001,8 +1001,8 @@ out:
*/
int
ncp_read_bounce(struct ncp_server *server, const char *file_id,
- __u32 offset, __u16 to_read, char __user *target, int *bytes_read,
- void* bounce, __u32 bufsize)
+ __u32 offset, __u16 to_read, struct iov_iter *to,
+ int *bytes_read, void *bounce, __u32 bufsize)
{
int result;
@@ -1025,7 +1025,7 @@ ncp_read_bounce(struct ncp_server *server, const char *file_id,
(offset & 1);
*bytes_read = len;
result = 0;
- if (copy_to_user(target, source, len))
+ if (copy_to_iter(source, len, to) != len)
result = -EFAULT;
}
}
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 250e443a07f3..5233fbc1747a 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -53,7 +53,7 @@ static inline int ncp_read_bounce_size(__u32 size) {
return sizeof(struct ncp_reply_header) + 2 + 2 + size + 8;
};
int ncp_read_bounce(struct ncp_server *, const char *, __u32, __u16,
- char __user *, int *, void* bounce, __u32 bouncelen);
+ struct iov_iter *, int *, void *bounce, __u32 bouncelen);
int ncp_read_kernel(struct ncp_server *, const char *, __u32, __u16,
char *, int *);
int ncp_write_kernel(struct ncp_server *, const char *, __u32, __u16,
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
index 1a63bfdb4a65..421b6f91e8ec 100644
--- a/fs/ncpfs/symlink.c
+++ b/fs/ncpfs/symlink.c
@@ -156,7 +156,7 @@ int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) {
goto failfree;
}
- inode=dentry->d_inode;
+ inode=d_inode(dentry);
if (ncp_make_open(inode, O_WRONLY))
goto failfree;
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index c7abc10279af..f31fd0dd92c6 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -1,6 +1,6 @@
config NFS_FS
tristate "NFS client support"
- depends on INET && FILE_LOCKING
+ depends on INET && FILE_LOCKING && MULTIUSER
select LOCKD
select SUNRPC
select NFS_ACL_SUPPORT if NFS_V3_ACL
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 1e987acf20c9..8664417955a2 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -22,7 +22,7 @@ nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
obj-$(CONFIG_NFS_V4) += nfsv4.o
CFLAGS_nfs4trace.o += -I$(src)
nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
- delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \
+ delegation.o nfs4idmap.o callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o nfs4getroot.o nfs4client.o nfs4session.o \
dns_resolve.o nfs4trace.o
nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 1cac3c175d18..d2554fe140a3 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -890,6 +890,7 @@ static struct pnfs_layoutdriver_type blocklayout_type = {
.free_deviceid_node = bl_free_deviceid_node,
.pg_read_ops = &bl_pg_read_ops,
.pg_write_ops = &bl_pg_write_ops,
+ .sync = pnfs_generic_sync,
};
static int __init nfs4blocklayout_init(void)
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index 5aed4f98df41..e535599a0719 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -33,7 +33,7 @@ bl_free_deviceid_node(struct nfs4_deviceid_node *d)
container_of(d, struct pnfs_block_dev, node);
bl_free_device(dev);
- kfree(dev);
+ kfree_rcu(dev, node.rcu);
}
static int
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 351be9205bf8..8d129bb7355a 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -128,7 +128,7 @@ nfs41_callback_svc(void *vrqstp)
if (try_to_freeze())
continue;
- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
if (!list_empty(&serv->sv_cb_list)) {
req = list_first_entry(&serv->sv_cb_list,
@@ -142,10 +142,10 @@ nfs41_callback_svc(void *vrqstp)
error);
} else {
spin_unlock_bh(&serv->sv_cb_lock);
- /* schedule_timeout to game the hung task watchdog */
- schedule_timeout(60 * HZ);
+ schedule();
finish_wait(&serv->sv_cb_waitq, &wq);
}
+ flush_signals(current);
}
return 0;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 19874151e95c..892aefff3630 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -31,7 +31,6 @@
#include <linux/lockd/bind.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
-#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
#include <linux/inet.h>
#include <linux/in6.h>
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index a6ad68865880..029d688a969f 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -378,7 +378,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
if (freeme == NULL)
goto out;
}
- list_add_rcu(&delegation->super_list, &server->delegations);
+ list_add_tail_rcu(&delegation->super_list, &server->delegations);
rcu_assign_pointer(nfsi->delegation, delegation);
delegation = NULL;
@@ -514,7 +514,7 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode)
delegation = nfs_inode_detach_delegation(inode);
if (delegation != NULL)
- nfs_do_return_delegation(inode, delegation, 0);
+ nfs_do_return_delegation(inode, delegation, 1);
}
/**
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index c19e16f0b2d0..b2c8b31b2be7 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -416,15 +416,14 @@ int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
{
struct nfs_inode *nfsi;
- if (dentry->d_inode == NULL)
- goto different;
+ if (d_really_is_negative(dentry))
+ return 0;
- nfsi = NFS_I(dentry->d_inode);
+ nfsi = NFS_I(d_inode(dentry));
if (entry->fattr->fileid == nfsi->fileid)
return 1;
if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
return 1;
-different:
return 0;
}
@@ -473,7 +472,7 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
struct qstr filename = QSTR_INIT(entry->name, entry->len);
struct dentry *dentry;
struct dentry *alias;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct inode *inode;
int status;
@@ -497,9 +496,9 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
goto out;
if (nfs_same_file(dentry, entry)) {
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- status = nfs_refresh_inode(dentry->d_inode, entry->fattr);
+ status = nfs_refresh_inode(d_inode(dentry), entry->fattr);
if (!status)
- nfs_setsecurity(dentry->d_inode, entry->fattr, entry->label);
+ nfs_setsecurity(d_inode(dentry), entry->fattr, entry->label);
goto out;
} else {
d_invalidate(dentry);
@@ -544,6 +543,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
if (scratch == NULL)
return -ENOMEM;
+ if (buflen == 0)
+ goto out_nopages;
+
xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen);
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
@@ -565,6 +567,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
break;
} while (!entry->eof);
+out_nopages:
if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
array = nfs_readdir_get_array(page);
if (!IS_ERR(array)) {
@@ -870,7 +873,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
static int nfs_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;
struct nfs_open_dir_context *dir_ctx = file->private_data;
@@ -1118,15 +1121,15 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU) {
parent = ACCESS_ONCE(dentry->d_parent);
- dir = ACCESS_ONCE(parent->d_inode);
+ dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
} else {
parent = dget_parent(dentry);
- dir = parent->d_inode;
+ dir = d_inode(parent);
}
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if (!inode) {
if (nfs_neg_need_reval(dir, dentry, flags)) {
@@ -1242,7 +1245,7 @@ out_error:
}
/*
- * A weaker form of d_revalidate for revalidating just the dentry->d_inode
+ * A weaker form of d_revalidate for revalidating just the d_inode(dentry)
* when we don't really care about the dentry name. This is called when a
* pathwalk ends on a dentry that was not found via a normal lookup in the
* parent dir (e.g.: ".", "..", procfs symlinks or mountpoint traversals).
@@ -1253,7 +1256,7 @@ out_error:
static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
{
int error;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
/*
* I believe we can only get a negative dentry here in the case of a
@@ -1287,7 +1290,7 @@ static int nfs_dentry_delete(const struct dentry *dentry)
dentry, dentry->d_flags);
/* Unhash any dentry with a stale inode */
- if (dentry->d_inode != NULL && NFS_STALE(dentry->d_inode))
+ if (d_really_is_positive(dentry) && NFS_STALE(d_inode(dentry)))
return 1;
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
@@ -1491,7 +1494,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
int err;
/* Expect a negative dentry */
- BUG_ON(dentry->d_inode);
+ BUG_ON(d_inode(dentry));
dfprintk(VFS, "NFS: atomic_open(%s/%lu), %pd\n",
dir->i_sb->s_id, dir->i_ino, dentry);
@@ -1587,7 +1590,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1)
goto no_open;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
/* We can't create new files in nfs_open_revalidate(), so we
* optimize away revalidation of negative dentries.
@@ -1598,12 +1601,12 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU) {
parent = ACCESS_ONCE(dentry->d_parent);
- dir = ACCESS_ONCE(parent->d_inode);
+ dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
} else {
parent = dget_parent(dentry);
- dir = parent->d_inode;
+ dir = d_inode(parent);
}
if (!nfs_neg_need_reval(dir, dentry, flags))
ret = 1;
@@ -1643,14 +1646,14 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
struct nfs4_label *label)
{
struct dentry *parent = dget_parent(dentry);
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct inode *inode;
int error = -EACCES;
d_drop(dentry);
/* We may have been initialized further down */
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
goto out;
if (fhandle->size == 0) {
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, NULL);
@@ -1768,7 +1771,7 @@ EXPORT_SYMBOL_GPL(nfs_mkdir);
static void nfs_dentry_handle_enoent(struct dentry *dentry)
{
- if (dentry->d_inode != NULL && !d_unhashed(dentry))
+ if (d_really_is_positive(dentry) && !d_unhashed(dentry))
d_delete(dentry);
}
@@ -1780,13 +1783,13 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
dir->i_sb->s_id, dir->i_ino, dentry);
trace_nfs_rmdir_enter(dir, dentry);
- if (dentry->d_inode) {
+ if (d_really_is_positive(dentry)) {
nfs_wait_on_sillyrename(dentry);
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
/* Ensure the VFS deletes this inode */
switch (error) {
case 0:
- clear_nlink(dentry->d_inode);
+ clear_nlink(d_inode(dentry));
break;
case -ENOENT:
nfs_dentry_handle_enoent(dentry);
@@ -1808,8 +1811,8 @@ EXPORT_SYMBOL_GPL(nfs_rmdir);
*/
static int nfs_safe_remove(struct dentry *dentry)
{
- struct inode *dir = dentry->d_parent->d_inode;
- struct inode *inode = dentry->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
+ struct inode *inode = d_inode(dentry);
int error = -EBUSY;
dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry);
@@ -1853,7 +1856,7 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
if (d_count(dentry) > 1) {
spin_unlock(&dentry->d_lock);
/* Start asynchronous writeout of the inode */
- write_inode_now(dentry->d_inode, 0);
+ write_inode_now(d_inode(dentry), 0);
error = nfs_sillyrename(dir, dentry);
goto out;
}
@@ -1931,7 +1934,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
* No big deal if we can't add this page to the page cache here.
* READLINK will get the missing page from the server if needed.
*/
- if (!add_to_page_cache_lru(page, dentry->d_inode->i_mapping, 0,
+ if (!add_to_page_cache_lru(page, d_inode(dentry)->i_mapping, 0,
GFP_KERNEL)) {
SetPageUptodate(page);
unlock_page(page);
@@ -1950,7 +1953,7 @@ EXPORT_SYMBOL_GPL(nfs_symlink);
int
nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
int error;
dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n",
@@ -1997,8 +2000,8 @@ EXPORT_SYMBOL_GPL(nfs_link);
int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct dentry *dentry = NULL, *rehash = NULL;
struct rpc_task *task;
int error = -EBUSY;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e907c8cf732e..38678d9a5cc4 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -129,22 +129,25 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
int i;
ssize_t count;
- WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count);
-
- count = dreq->mirrors[hdr->pgio_mirror_idx].count;
- if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
- count = hdr->io_start + hdr->good_bytes - dreq->io_start;
- dreq->mirrors[hdr->pgio_mirror_idx].count = count;
- }
-
- /* update the dreq->count by finding the minimum agreed count from all
- * mirrors */
- count = dreq->mirrors[0].count;
+ if (dreq->mirror_count == 1) {
+ dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
+ dreq->count += hdr->good_bytes;
+ } else {
+ /* mirrored writes */
+ count = dreq->mirrors[hdr->pgio_mirror_idx].count;
+ if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
+ count = hdr->io_start + hdr->good_bytes - dreq->io_start;
+ dreq->mirrors[hdr->pgio_mirror_idx].count = count;
+ }
+ /* update the dreq->count by finding the minimum agreed count from all
+ * mirrors */
+ count = dreq->mirrors[0].count;
- for (i = 1; i < dreq->mirror_count; i++)
- count = min(count, dreq->mirrors[i].count);
+ for (i = 1; i < dreq->mirror_count; i++)
+ count = min(count, dreq->mirrors[i].count);
- dreq->count = count;
+ dreq->count = count;
+ }
}
/*
@@ -240,7 +243,6 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
/**
* nfs_direct_IO - NFS address space operation for direct I/O
- * @rw: direction (read or write)
* @iocb: target I/O control block
* @iov: array of vectors that define I/O buffer
* @pos: offset in file to begin the operation
@@ -251,7 +253,7 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
* shunt off direct read and write requests before the VFS gets them,
* so this method is only ever called for swap.
*/
-ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
+ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -259,18 +261,11 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t
if (!IS_SWAPFILE(inode))
return 0;
-#ifndef CONFIG_NFS_SWAP
- dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
- iocb->ki_filp, (long long) pos, iter->nr_segs);
+ VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
- return -EINVAL;
-#else
- VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
-
- if (rw == READ)
+ if (iov_iter_rw(iter) == READ)
return nfs_file_direct_read(iocb, iter, pos);
- return nfs_file_direct_write(iocb, iter, pos);
-#endif /* CONFIG_NFS_SWAP */
+ return nfs_file_direct_write(iocb, iter);
}
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
@@ -387,13 +382,13 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
if (write)
nfs_zap_mapping(inode, inode->i_mapping);
- inode_dio_done(inode);
+ inode_dio_end(inode);
if (dreq->iocb) {
long res = (long) dreq->error;
if (!res)
res = (long) dreq->count;
- aio_complete(dreq->iocb, res, 0);
+ dreq->iocb->ki_complete(dreq->iocb, res, 0);
}
complete_all(&dreq->completion);
@@ -404,8 +399,8 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
static void nfs_direct_readpage_release(struct nfs_page *req)
{
dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
- req->wb_context->dentry->d_inode->i_sb->s_id,
- (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+ d_inode(req->wb_context->dentry)->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
req->wb_bytes,
(long long)req_offset(req));
nfs_release_request(req);
@@ -487,7 +482,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
&nfs_direct_read_completion_ops);
get_dreq(dreq);
desc.pg_dreq = dreq;
- atomic_inc(&inode->i_dio_count);
+ inode_dio_begin(inode);
while (iov_iter_count(iter)) {
struct page **pagevec;
@@ -539,7 +534,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
- inode_dio_done(inode);
+ inode_dio_end(inode);
nfs_direct_req_release(dreq);
return result < 0 ? result : -EIO;
}
@@ -873,7 +868,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
get_dreq(dreq);
- atomic_inc(&inode->i_dio_count);
+ inode_dio_begin(inode);
NFS_I(inode)->write_io += iov_iter_count(iter);
while (iov_iter_count(iter)) {
@@ -929,7 +924,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
- inode_dio_done(inode);
+ inode_dio_end(inode);
nfs_direct_req_release(dreq);
return result < 0 ? result : -EIO;
}
@@ -960,8 +955,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
* Note that O_APPEND is not supported for NFS direct writes, as there
* is no atomic O_APPEND write facility in the NFS protocol.
*/
-ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
- loff_t pos)
+ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
{
ssize_t result = -EINVAL;
struct file *file = iocb->ki_filp;
@@ -969,25 +963,16 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
struct inode *inode = mapping->host;
struct nfs_direct_req *dreq;
struct nfs_lock_context *l_ctx;
- loff_t end;
- size_t count = iov_iter_count(iter);
- end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
-
- nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
+ loff_t pos, end;
dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
- file, count, (long long) pos);
+ file, iov_iter_count(iter), (long long) iocb->ki_pos);
- result = generic_write_checks(file, &pos, &count, 0);
- if (result)
- goto out;
+ nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES,
+ iov_iter_count(iter));
- result = -EINVAL;
- if ((ssize_t) count < 0)
- goto out;
- result = 0;
- if (!count)
- goto out;
+ pos = iocb->ki_pos;
+ end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT;
mutex_lock(&inode->i_mutex);
@@ -1002,7 +987,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
goto out_unlock;
}
- task_io_account_write(count);
+ task_io_account_write(iov_iter_count(iter));
result = -ENOMEM;
dreq = nfs_direct_req_alloc();
@@ -1010,7 +995,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
goto out_unlock;
dreq->inode = inode;
- dreq->bytes_left = count;
+ dreq->bytes_left = iov_iter_count(iter);
dreq->io_start = pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
@@ -1041,6 +1026,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
if (i_size_read(inode) < iocb->ki_pos)
i_size_write(inode, iocb->ki_pos);
spin_unlock(&inode->i_lock);
+ generic_write_sync(file, pos, result);
}
}
nfs_direct_req_release(dreq);
@@ -1050,7 +1036,6 @@ out_release:
nfs_direct_req_release(dreq);
out_unlock:
mutex_unlock(&inode->i_mutex);
-out:
return result;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index e679d24c39d3..8b8d83a526ce 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -26,7 +26,6 @@
#include <linux/nfs_mount.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
-#include <linux/aio.h>
#include <linux/gfp.h>
#include <linux/swap.h>
@@ -171,7 +170,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t result;
- if (iocb->ki_filp->f_flags & O_DIRECT)
+ if (iocb->ki_flags & IOCB_DIRECT)
return nfs_file_direct_read(iocb, to, iocb->ki_pos);
dprintk("NFS: read(%pD2, %zu@%lu)\n",
@@ -281,6 +280,7 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
trace_nfs_fsync_enter(inode);
+ nfs_inode_dio_wait(inode);
do {
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret != 0)
@@ -675,17 +675,20 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
unsigned long written = 0;
ssize_t result;
size_t count = iov_iter_count(from);
- loff_t pos = iocb->ki_pos;
result = nfs_key_timeout_notify(file, inode);
if (result)
return result;
- if (file->f_flags & O_DIRECT)
- return nfs_file_direct_write(iocb, from, pos);
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ result = generic_write_checks(iocb, from);
+ if (result <= 0)
+ return result;
+ return nfs_file_direct_write(iocb, from);
+ }
dprintk("NFS: write(%pD2, %zu@%Ld)\n",
- file, count, (long long) pos);
+ file, count, (long long) iocb->ki_pos);
result = -EBUSY;
if (IS_SWAPFILE(inode))
@@ -693,7 +696,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
/*
* O_APPEND implies that we must revalidate the file length.
*/
- if (file->f_flags & O_APPEND) {
+ if (iocb->ki_flags & IOCB_APPEND) {
result = nfs_revalidate_file_size(inode, file);
if (result)
goto out;
@@ -780,7 +783,7 @@ do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
* Flush all pending writes before doing anything
* with locks..
*/
- nfs_sync_mapping(filp->f_mapping);
+ vfs_fsync(filp, 0);
l_ctx = nfs_get_lock_context(nfs_file_open_context(filp));
if (!IS_ERR(l_ctx)) {
@@ -927,8 +930,6 @@ EXPORT_SYMBOL_GPL(nfs_flock);
const struct file_operations nfs_file_operations = {
.llseek = nfs_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
.mmap = nfs_file_mmap,
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 91e88a7ecef0..a46bf6de9ce4 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -258,7 +258,8 @@ filelayout_set_layoutcommit(struct nfs_pgio_header *hdr)
hdr->res.verf->committed != NFS_DATA_SYNC)
return;
- pnfs_set_layoutcommit(hdr);
+ pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
+ hdr->mds_offset + hdr->res.count);
dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
}
@@ -373,7 +374,7 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
}
if (data->verf.committed == NFS_UNSTABLE)
- pnfs_commit_set_layoutcommit(data);
+ pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
return 0;
}
@@ -1086,7 +1087,7 @@ filelayout_alloc_deviceid_node(struct nfs_server *server,
}
static void
-filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
+filelayout_free_deviceid_node(struct nfs4_deviceid_node *d)
{
nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
}
@@ -1137,7 +1138,8 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.read_pagelist = filelayout_read_pagelist,
.write_pagelist = filelayout_write_pagelist,
.alloc_deviceid_node = filelayout_alloc_deviceid_node,
- .free_deviceid_node = filelayout_free_deveiceid_node,
+ .free_deviceid_node = filelayout_free_deviceid_node,
+ .sync = pnfs_nfs_generic_sync,
};
static int __init nfs4filelayout_init(void)
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index 4f372e224603..4946ef40ba87 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -55,7 +55,7 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
nfs4_pnfs_ds_put(ds);
}
kfree(dsaddr->stripe_indices);
- kfree(dsaddr);
+ kfree_rcu(dsaddr, id_node.rcu);
}
/* Decode opaque device data and return the result */
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 315cc68945b9..7d05089e52d6 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -11,10 +11,10 @@
#include <linux/module.h>
#include <linux/sunrpc/metrics.h>
-#include <linux/nfs_idmap.h>
#include "flexfilelayout.h"
#include "../nfs4session.h"
+#include "../nfs4idmap.h"
#include "../internal.h"
#include "../delegation.h"
#include "../nfs4trace.h"
@@ -891,7 +891,8 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
static void
ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
{
- pnfs_set_layoutcommit(hdr);
+ pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
+ hdr->mds_offset + hdr->res.count);
dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
}
@@ -1074,7 +1075,7 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
}
if (data->verf.committed == NFS_UNSTABLE)
- pnfs_commit_set_layoutcommit(data);
+ pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
return 0;
}
@@ -1414,7 +1415,7 @@ ff_layout_get_ds_info(struct inode *inode)
}
static void
-ff_layout_free_deveiceid_node(struct nfs4_deviceid_node *d)
+ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
{
nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
id_node));
@@ -1498,7 +1499,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
.pg_read_ops = &ff_layout_pg_read_ops,
.pg_write_ops = &ff_layout_pg_write_ops,
.get_ds_info = ff_layout_get_ds_info,
- .free_deviceid_node = ff_layout_free_deveiceid_node,
+ .free_deviceid_node = ff_layout_free_deviceid_node,
.mark_request_commit = pnfs_layout_mark_request_commit,
.clear_request_commit = pnfs_generic_clear_request_commit,
.scan_commit_lists = pnfs_generic_scan_commit_lists,
@@ -1508,6 +1509,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
.write_pagelist = ff_layout_write_pagelist,
.alloc_deviceid_node = ff_layout_alloc_deviceid_node,
.encode_layoutreturn = ff_layout_encode_layoutreturn,
+ .sync = pnfs_nfs_generic_sync,
};
static int __init nfs4flexfilelayout_init(void)
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index e2c01f204a95..77a2d026aa12 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -30,7 +30,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
{
nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
nfs4_pnfs_ds_put(mirror_ds->ds);
- kfree(mirror_ds);
+ kfree_rcu(mirror_ds, id_node.rcu);
}
/* Decode opaque device data and construct new_ds using it */
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 9ac3846cb59e..a608ffd28acc 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -56,11 +56,11 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
* This again causes shrink_dcache_for_umount_subtree() to
* Oops, since the test for IS_ROOT() will fail.
*/
- spin_lock(&sb->s_root->d_inode->i_lock);
+ spin_lock(&d_inode(sb->s_root)->i_lock);
spin_lock(&sb->s_root->d_lock);
hlist_del_init(&sb->s_root->d_u.d_alias);
spin_unlock(&sb->s_root->d_lock);
- spin_unlock(&sb->s_root->d_inode->i_lock);
+ spin_unlock(&d_inode(sb->s_root)->i_lock);
}
return 0;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index d42dff6d5e98..f734562c6d24 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -133,6 +133,13 @@ void nfs_evict_inode(struct inode *inode)
nfs_clear_inode(inode);
}
+int nfs_sync_inode(struct inode *inode)
+{
+ nfs_inode_dio_wait(inode);
+ return nfs_wb_all(inode);
+}
+EXPORT_SYMBOL_GPL(nfs_sync_inode);
+
/**
* nfs_sync_mapping - helper to flush all mmapped dirty data to disk
*/
@@ -192,7 +199,6 @@ void nfs_zap_caches(struct inode *inode)
nfs_zap_caches_locked(inode);
spin_unlock(&inode->i_lock);
}
-EXPORT_SYMBOL_GPL(nfs_zap_caches);
void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
{
@@ -495,7 +501,7 @@ EXPORT_SYMBOL_GPL(nfs_fhget);
int
nfs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct nfs_fattr *fattr;
int error = -ENOMEM;
@@ -525,10 +531,8 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
trace_nfs_setattr_enter(inode);
/* Write all dirty data */
- if (S_ISREG(inode->i_mode)) {
- nfs_inode_dio_wait(inode);
- nfs_wb_all(inode);
- }
+ if (S_ISREG(inode->i_mode))
+ nfs_sync_inode(inode);
fattr = nfs_alloc_fattr();
if (fattr == NULL)
@@ -621,7 +625,7 @@ static void nfs_request_parent_use_readdirplus(struct dentry *dentry)
struct dentry *parent;
parent = dget_parent(dentry);
- nfs_force_use_readdirplus(parent->d_inode);
+ nfs_force_use_readdirplus(d_inode(parent));
dput(parent);
}
@@ -637,15 +641,16 @@ static bool nfs_need_revalidate_inode(struct inode *inode)
int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
int err = 0;
trace_nfs_getattr_enter(inode);
/* Flush out writes to the server in order to update c/mtime. */
if (S_ISREG(inode->i_mode)) {
- nfs_inode_dio_wait(inode);
- err = filemap_write_and_wait(inode->i_mapping);
+ mutex_lock(&inode->i_mutex);
+ err = nfs_sync_inode(inode);
+ mutex_unlock(&inode->i_mutex);
if (err)
goto out;
}
@@ -708,7 +713,7 @@ static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context
struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
{
struct nfs_lock_context *res, *new = NULL;
- struct inode *inode = ctx->dentry->d_inode;
+ struct inode *inode = d_inode(ctx->dentry);
spin_lock(&inode->i_lock);
res = __nfs_find_lock_context(ctx);
@@ -736,7 +741,7 @@ EXPORT_SYMBOL_GPL(nfs_get_lock_context);
void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
{
struct nfs_open_context *ctx = l_ctx->open_context;
- struct inode *inode = ctx->dentry->d_inode;
+ struct inode *inode = d_inode(ctx->dentry);
if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock))
return;
@@ -763,7 +768,7 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
return;
if (!is_sync)
return;
- inode = ctx->dentry->d_inode;
+ inode = d_inode(ctx->dentry);
if (!list_empty(&NFS_I(inode)->open_files))
return;
server = NFS_SERVER(inode);
@@ -810,7 +815,7 @@ EXPORT_SYMBOL_GPL(get_nfs_open_context);
static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
{
- struct inode *inode = ctx->dentry->d_inode;
+ struct inode *inode = d_inode(ctx->dentry);
struct super_block *sb = ctx->dentry->d_sb;
if (!list_empty(&ctx->list)) {
@@ -842,7 +847,7 @@ EXPORT_SYMBOL_GPL(put_nfs_open_context);
*/
void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
{
- struct inode *inode = ctx->dentry->d_inode;
+ struct inode *inode = d_inode(ctx->dentry);
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
@@ -885,7 +890,7 @@ static void nfs_file_clear_open_context(struct file *filp)
struct nfs_open_context *ctx = nfs_file_open_context(filp);
if (ctx) {
- struct inode *inode = ctx->dentry->d_inode;
+ struct inode *inode = d_inode(ctx->dentry);
filp->private_data = NULL;
spin_lock(&inode->i_lock);
@@ -1588,6 +1593,19 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
}
EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
+
+static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
+ struct nfs_fattr *fattr)
+{
+ bool ret1 = true, ret2 = true;
+
+ if (fattr->valid & NFS_ATTR_FATTR_FILEID)
+ ret1 = (nfsi->fileid == fattr->fileid);
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+ ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
+ return ret1 || ret2;
+}
+
/*
* Many nfs protocol calls return the new file attributes after
* an operation. Here we update the inode to reflect the state
@@ -1614,7 +1632,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_display_fhandle_hash(NFS_FH(inode)),
atomic_read(&inode->i_count), fattr->valid);
- if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) {
+ if (!nfs_fileid_valid(nfsi, fattr)) {
printk(KERN_ERR "NFS: server %s error: fileid changed\n"
"fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
NFS_SERVER(inode)->nfs_client->cl_hostname,
@@ -1819,7 +1837,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
struct inode *nfs_alloc_inode(struct super_block *sb)
{
struct nfs_inode *nfsi;
- nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
+ nfsi = kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
if (!nfsi)
return NULL;
nfsi->flags = 0UL;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index b5a0afc3ee10..c8162c660c44 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -139,7 +139,7 @@ EXPORT_SYMBOL_GPL(nfs_path);
struct vfsmount *nfs_d_automount(struct path *path)
{
struct vfsmount *mnt;
- struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
+ struct nfs_server *server = NFS_SERVER(d_inode(path->dentry));
struct nfs_fh *fh = NULL;
struct nfs_fattr *fattr = NULL;
@@ -180,16 +180,16 @@ out_nofree:
static int
nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
- if (NFS_FH(dentry->d_inode)->size != 0)
+ if (NFS_FH(d_inode(dentry))->size != 0)
return nfs_getattr(mnt, dentry, stat);
- generic_fillattr(dentry->d_inode, stat);
+ generic_fillattr(d_inode(dentry), stat);
return 0;
}
static int
nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr)
{
- if (NFS_FH(dentry->d_inode)->size != 0)
+ if (NFS_FH(d_inode(dentry))->size != 0)
return nfs_setattr(dentry, attr);
return -EACCES;
}
@@ -279,7 +279,7 @@ struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
struct dentry *parent = dget_parent(dentry);
/* Look it up again to get its attributes */
- err = server->nfs_client->rpc_ops->lookup(parent->d_inode, &dentry->d_name, fh, fattr, NULL);
+ err = server->nfs_client->rpc_ops->lookup(d_inode(parent), &dentry->d_name, fh, fattr, NULL);
dput(parent);
if (err != 0)
return ERR_PTR(err);
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 658e586ca438..1ebe2fc7cda2 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -279,7 +279,7 @@ nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
ssize_t
nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
ssize_t result = 0;
int error;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 1f11d2533ee4..cb28cceefebe 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -120,7 +120,7 @@ static int
nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct iattr *sattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct nfs3_sattrargs arg = {
.fh = NFS_FH(inode),
.sattr = sattr,
@@ -386,13 +386,13 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
* not sure this buys us anything (and I'd have
* to revamp the NFSv3 XDR code) */
status = nfs3_proc_setattr(dentry, data->res.fattr, sattr);
- nfs_post_op_update_inode(dentry->d_inode, data->res.fattr);
+ nfs_post_op_update_inode(d_inode(dentry), data->res.fattr);
dprintk("NFS reply setattr (post-create): %d\n", status);
if (status != 0)
goto out_release_acls;
}
- status = nfs3_proc_setacls(dentry->d_inode, acl, default_acl);
+ status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
out_release_acls:
posix_acl_release(acl);
@@ -570,7 +570,7 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
if (status != 0)
goto out_release_acls;
- status = nfs3_proc_setacls(dentry->d_inode, acl, default_acl);
+ status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
out_release_acls:
posix_acl_release(acl);
@@ -623,7 +623,7 @@ static int
nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
- struct inode *dir = dentry->d_inode;
+ struct inode *dir = d_inode(dentry);
__be32 *verf = NFS_I(dir)->cookieverf;
struct nfs3_readdirargs arg = {
.fh = NFS_FH(dir),
@@ -715,7 +715,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
if (status != 0)
goto out_release_acls;
- status = nfs3_proc_setacls(dentry->d_inode, acl, default_acl);
+ status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
out_release_acls:
posix_acl_release(acl);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index cb170722769c..3a9e75235f30 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -36,13 +36,16 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
loff_t offset, loff_t len)
{
struct inode *inode = file_inode(filep);
+ struct nfs_server *server = NFS_SERVER(inode);
struct nfs42_falloc_args args = {
.falloc_fh = NFS_FH(inode),
.falloc_offset = offset,
.falloc_length = len,
+ .falloc_bitmask = server->cache_consistency_bitmask,
+ };
+ struct nfs42_falloc_res res = {
+ .falloc_server = server,
};
- struct nfs42_falloc_res res;
- struct nfs_server *server = NFS_SERVER(inode);
int status;
msg->rpc_argp = &args;
@@ -52,8 +55,17 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
if (status)
return status;
- return nfs4_call_sync(server->client, server, msg,
- &args.seq_args, &res.seq_res, 0);
+ res.falloc_fattr = nfs_alloc_fattr();
+ if (!res.falloc_fattr)
+ return -ENOMEM;
+
+ status = nfs4_call_sync(server->client, server, msg,
+ &args.seq_args, &res.seq_res, 0);
+ if (status == 0)
+ status = nfs_post_op_update_inode(inode, res.falloc_fattr);
+
+ kfree(res.falloc_fattr);
+ return status;
}
static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
@@ -84,9 +96,13 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
return -EOPNOTSUPP;
+ mutex_lock(&inode->i_mutex);
+
err = nfs42_proc_fallocate(&msg, filep, offset, len);
if (err == -EOPNOTSUPP)
NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
+
+ mutex_unlock(&inode->i_mutex);
return err;
}
@@ -101,9 +117,16 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
return -EOPNOTSUPP;
+ nfs_wb_all(inode);
+ mutex_lock(&inode->i_mutex);
+
err = nfs42_proc_fallocate(&msg, filep, offset, len);
+ if (err == 0)
+ truncate_pagecache_range(inode, offset, (offset + len) -1);
if (err == -EOPNOTSUPP)
NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
+
+ mutex_unlock(&inode->i_mutex);
return err;
}
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 038a7e1521fa..1a25b27248f2 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -25,16 +25,20 @@
#define NFS4_enc_allocate_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- encode_allocate_maxsz)
+ encode_allocate_maxsz + \
+ encode_getattr_maxsz)
#define NFS4_dec_allocate_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- decode_allocate_maxsz)
+ decode_allocate_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- encode_deallocate_maxsz)
+ encode_deallocate_maxsz + \
+ encode_getattr_maxsz)
#define NFS4_dec_deallocate_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- decode_deallocate_maxsz)
+ decode_deallocate_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_enc_seek_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_seek_maxsz)
@@ -92,6 +96,7 @@ static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->falloc_fh, &hdr);
encode_allocate(xdr, args, &hdr);
+ encode_getfattr(xdr, args->falloc_bitmask, &hdr);
encode_nops(&hdr);
}
@@ -110,6 +115,7 @@ static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->falloc_fh, &hdr);
encode_deallocate(xdr, args, &hdr);
+ encode_getfattr(xdr, args->falloc_bitmask, &hdr);
encode_nops(&hdr);
}
@@ -183,6 +189,9 @@ static int nfs4_xdr_dec_allocate(struct rpc_rqst *rqstp,
if (status)
goto out;
status = decode_allocate(xdr, res);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->falloc_fattr, res->falloc_server);
out:
return status;
}
@@ -207,6 +216,9 @@ static int nfs4_xdr_dec_deallocate(struct rpc_rqst *rqstp,
if (status)
goto out;
status = decode_deallocate(xdr, res);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->falloc_fattr, res->falloc_server);
out:
return status;
}
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 86d6214ea022..e42be52a8c18 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -4,7 +4,6 @@
*/
#include <linux/module.h>
#include <linux/nfs_fs.h>
-#include <linux/nfs_idmap.h>
#include <linux/nfs_mount.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/auth.h>
@@ -15,6 +14,7 @@
#include "callback.h"
#include "delegation.h"
#include "nfs4session.h"
+#include "nfs4idmap.h"
#include "pnfs.h"
#include "netns.h"
@@ -1130,7 +1130,7 @@ error:
*/
static int nfs_probe_destination(struct nfs_server *server)
{
- struct inode *inode = server->super->s_root->d_inode;
+ struct inode *inode = d_inode(server->super->s_root);
struct nfs_fattr *fattr;
int error;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 8b46389c4c5b..f58c17b3b480 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -10,6 +10,8 @@
#include "fscache.h"
#include "pnfs.h"
+#include "nfstrace.h"
+
#ifdef CONFIG_NFS_V4_2
#include "nfs42.h"
#endif
@@ -46,7 +48,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
openflags &= ~(O_CREAT|O_EXCL);
parent = dget_parent(dentry);
- dir = parent->d_inode;
+ dir = d_inode(parent);
ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
err = PTR_ERR(ctx);
@@ -57,7 +59,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
if (openflags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
- nfs_wb_all(inode);
+ nfs_sync_inode(inode);
}
inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, &opened);
@@ -74,7 +76,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
goto out_drop;
}
}
- if (inode != dentry->d_inode)
+ if (inode != d_inode(dentry))
goto out_drop;
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
@@ -100,6 +102,9 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
int ret;
struct inode *inode = file_inode(file);
+ trace_nfs_fsync_enter(inode);
+
+ nfs_inode_dio_wait(inode);
do {
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret != 0)
@@ -107,7 +112,7 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
mutex_lock(&inode->i_mutex);
ret = nfs_file_fsync_commit(file, start, end, datasync);
if (!ret)
- ret = pnfs_layoutcommit_inode(inode, true);
+ ret = pnfs_sync_inode(inode, !!datasync);
mutex_unlock(&inode->i_mutex);
/*
* If nfs_file_fsync_commit detected a server reboot, then
@@ -118,6 +123,7 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
end = LLONG_MAX;
} while (ret == -EAGAIN);
+ trace_nfs_fsync_exit(inode, ret);
return ret;
}
@@ -152,15 +158,9 @@ static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t
if (ret < 0)
return ret;
- mutex_lock(&inode->i_mutex);
if (mode & FALLOC_FL_PUNCH_HOLE)
- ret = nfs42_proc_deallocate(filep, offset, len);
- else
- ret = nfs42_proc_allocate(filep, offset, len);
- mutex_unlock(&inode->i_mutex);
-
- nfs_zap_caches(inode);
- return ret;
+ return nfs42_proc_deallocate(filep, offset, len);
+ return nfs42_proc_allocate(filep, offset, len);
}
#endif /* CONFIG_NFS_V4_2 */
@@ -170,8 +170,6 @@ const struct file_operations nfs4_file_operations = {
#else
.llseek = nfs_file_llseek,
#endif
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
.mmap = nfs_file_mmap,
diff --git a/fs/nfs/idmap.c b/fs/nfs/nfs4idmap.c
index 857e2a99acc8..2e1737c40a29 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -36,7 +36,6 @@
#include <linux/types.h>
#include <linux/parser.h>
#include <linux/fs.h>
-#include <linux/nfs_idmap.h>
#include <net/net_namespace.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/nfs_fs.h>
@@ -49,6 +48,7 @@
#include "internal.h"
#include "netns.h"
+#include "nfs4idmap.h"
#include "nfs4trace.h"
#define NFS_UINT_MAXLEN 11
diff --git a/include/linux/nfs_idmap.h b/fs/nfs/nfs4idmap.h
index 333844e38f66..de44d7330ab3 100644
--- a/include/linux/nfs_idmap.h
+++ b/fs/nfs/nfs4idmap.h
@@ -1,5 +1,5 @@
/*
- * include/linux/nfs_idmap.h
+ * fs/nfs/nfs4idmap.h
*
* UID and GID to name mapping for clients.
*
@@ -46,19 +46,8 @@ struct nfs_server;
struct nfs_fattr;
struct nfs4_string;
-#if IS_ENABLED(CONFIG_NFS_V4)
int nfs_idmap_init(void);
void nfs_idmap_quit(void);
-#else
-static inline int nfs_idmap_init(void)
-{
- return 0;
-}
-
-static inline void nfs_idmap_quit(void)
-{}
-#endif
-
int nfs_idmap_new(struct nfs_client *);
void nfs_idmap_delete(struct nfs_client *);
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 3d83cb1fdc70..f592672373cb 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -375,7 +375,7 @@ static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *
dprintk("%s: getting locations for %pd2\n",
__func__, dentry);
- err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page);
+ err = nfs4_proc_fs_locations(client, d_inode(parent), &dentry->d_name, fs_locations, page);
dput(parent);
if (err != 0 ||
fs_locations->nlocations <= 0 ||
@@ -396,7 +396,7 @@ struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
{
rpc_authflavor_t flavor = server->client->cl_auth->au_flavor;
struct dentry *parent = dget_parent(dentry);
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct qstr *name = &dentry->d_name;
struct rpc_clnt *client;
struct vfsmount *mnt;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 627f37c44456..55e1e3af23a3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -38,6 +38,7 @@
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/file.h>
#include <linux/string.h>
#include <linux/ratelimit.h>
#include <linux/printk.h>
@@ -51,7 +52,6 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/module.h>
-#include <linux/nfs_idmap.h>
#include <linux/xattr.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
@@ -63,6 +63,7 @@
#include "callback.h"
#include "pnfs.h"
#include "netns.h"
+#include "nfs4idmap.h"
#include "nfs4session.h"
#include "fscache.h"
@@ -185,7 +186,8 @@ const u32 nfs4_fattr_bitmap[3] = {
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
| FATTR4_WORD1_TIME_METADATA
- | FATTR4_WORD1_TIME_MODIFY,
+ | FATTR4_WORD1_TIME_MODIFY
+ | FATTR4_WORD1_MOUNTED_ON_FILEID,
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
FATTR4_WORD2_SECURITY_LABEL
#endif
@@ -293,7 +295,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
*p++ = htonl(8); /* attribute buffer length */
- p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
+ p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
}
*p++ = xdr_one; /* next */
@@ -305,7 +307,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
*p++ = htonl(8); /* attribute buffer length */
- p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
+ p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
readdir->pgbase = (char *)p - (char *)start;
readdir->count -= readdir->pgbase;
@@ -1004,7 +1006,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
gfp_t gfp_mask)
{
struct dentry *parent = dget_parent(dentry);
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
struct nfs4_opendata *p;
@@ -1057,7 +1059,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
- p->o_arg.fh = NFS_FH(dentry->d_inode);
+ p->o_arg.fh = NFS_FH(d_inode(dentry));
}
if (attrs != NULL && attrs->ia_valid != 0) {
__u32 verf[2];
@@ -1794,7 +1796,7 @@ static const struct rpc_call_ops nfs4_open_confirm_ops = {
*/
static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
{
- struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
+ struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
@@ -1951,7 +1953,7 @@ static const struct rpc_call_ops nfs4_open_ops = {
static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
{
- struct inode *dir = data->dir->d_inode;
+ struct inode *dir = d_inode(data->dir);
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_openargs *o_arg = &data->o_arg;
struct nfs_openres *o_res = &data->o_res;
@@ -1998,7 +2000,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
{
- struct inode *dir = data->dir->d_inode;
+ struct inode *dir = d_inode(data->dir);
struct nfs_openres *o_res = &data->o_res;
int status;
@@ -2067,7 +2069,7 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
*/
static int _nfs4_proc_open(struct nfs4_opendata *data)
{
- struct inode *dir = data->dir->d_inode;
+ struct inode *dir = d_inode(data->dir);
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_openargs *o_arg = &data->o_arg;
struct nfs_openres *o_res = &data->o_res;
@@ -2314,7 +2316,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
dentry = opendata->dentry;
- if (dentry->d_inode == NULL) {
+ if (d_really_is_negative(dentry)) {
/* FIXME: Is this d_drop() ever needed? */
d_drop(dentry);
dentry = d_add_unique(dentry, igrab(state->inode));
@@ -2325,7 +2327,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
ctx->dentry = dget(dentry);
}
nfs_set_verifier(dentry,
- nfs_save_change_attribute(opendata->dir->d_inode));
+ nfs_save_change_attribute(d_inode(opendata->dir)));
}
ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
@@ -2333,7 +2335,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
goto out;
ctx->state = state;
- if (dentry->d_inode == state->inode) {
+ if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
nfs4_schedule_stateid_recovery(server, state);
@@ -2374,10 +2376,10 @@ static int _nfs4_do_open(struct inode *dir,
status = nfs4_recover_expired_lease(server);
if (status != 0)
goto err_put_state_owner;
- if (dentry->d_inode != NULL)
- nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
+ if (d_really_is_positive(dentry))
+ nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
status = -ENOMEM;
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
claim = NFS4_OPEN_CLAIM_FH;
opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
label, claim, GFP_KERNEL);
@@ -2400,8 +2402,8 @@ static int _nfs4_do_open(struct inode *dir,
}
opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
}
- if (dentry->d_inode != NULL)
- opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
+ if (d_really_is_positive(dentry))
+ opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
if (status != 0)
@@ -3095,16 +3097,13 @@ int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info,
bool auth_probe)
{
- int status;
+ int status = 0;
- switch (auth_probe) {
- case false:
+ if (!auth_probe)
status = nfs4_lookup_root(server, fhandle, info);
- if (status != -NFS4ERR_WRONGSEC)
- break;
- default:
+
+ if (auth_probe || status == NFS4ERR_WRONGSEC)
status = nfs4_do_find_root_sec(server, fhandle, info);
- }
if (status == 0)
status = nfs4_server_capabilities(server, fhandle);
@@ -3254,7 +3253,7 @@ static int
nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct iattr *sattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct rpc_cred *cred = NULL;
struct nfs4_state *state = NULL;
struct nfs4_label *label = NULL;
@@ -3871,13 +3870,13 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
- struct inode *dir = dentry->d_inode;
+ struct inode *dir = d_inode(dentry);
struct nfs4_readdir_arg args = {
.fh = NFS_FH(dir),
.pages = pages,
.pgbase = 0,
.count = count,
- .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
+ .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
.plus = plus,
};
struct nfs4_readdir_res res;
@@ -3914,8 +3913,8 @@ static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
do {
err = _nfs4_proc_readdir(dentry, cred, cookie,
pages, count, plus);
- trace_nfs4_readdir(dentry->d_inode, err);
- err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), err,
+ trace_nfs4_readdir(d_inode(dentry), err);
+ err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
&exception);
} while (exception.retry);
return err;
@@ -4830,7 +4829,7 @@ nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
struct nfs4_label ilabel, *olabel = NULL;
struct nfs_fattr fattr;
struct rpc_cred *cred;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int status;
if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
@@ -5606,6 +5605,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
+ get_file(fl->fl_file);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
@@ -5670,7 +5670,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
data->rpc_status = task->tk_status;
switch (task->tk_status) {
case 0:
- renew_lease(NFS_SERVER(data->ctx->dentry->d_inode),
+ renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
data->timestamp);
if (data->arg.new_lock) {
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
@@ -5718,6 +5718,7 @@ static void nfs4_lock_release(void *calldata)
nfs_free_seqid(data->arg.lock_seqid);
nfs4_put_lock_state(data->lsp);
put_nfs_open_context(data->ctx);
+ fput(data->fl.fl_file);
kfree(data);
dprintk("%s: done!\n", __func__);
}
@@ -6112,7 +6113,7 @@ static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
if (strcmp(key, "") != 0)
return -EINVAL;
- return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
+ return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
}
static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
@@ -6121,7 +6122,7 @@ static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
if (strcmp(key, "") != 0)
return -EINVAL;
- return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
+ return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
}
static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
@@ -6130,7 +6131,7 @@ static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
{
size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
- if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
+ if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))))
return 0;
if (list && len <= list_len)
@@ -6158,7 +6159,7 @@ static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key,
void *buf, size_t buflen, int type)
{
if (security_ismaclabel(key))
- return nfs4_get_security_label(dentry->d_inode, buf, buflen);
+ return nfs4_get_security_label(d_inode(dentry), buf, buflen);
return -EOPNOTSUPP;
}
@@ -6168,10 +6169,10 @@ static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list,
{
size_t len = 0;
- if (nfs_server_capable(dentry->d_inode, NFS_CAP_SECURITY_LABEL)) {
- len = security_inode_listsecurity(dentry->d_inode, NULL, 0);
+ if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
+ len = security_inode_listsecurity(d_inode(dentry), NULL, 0);
if (list && len <= list_len)
- security_inode_listsecurity(dentry->d_inode, list, len);
+ security_inode_listsecurity(d_inode(dentry), list, len);
}
return len;
}
@@ -7944,6 +7945,8 @@ _nfs4_proc_getdeviceinfo(struct nfs_server *server,
{
struct nfs4_getdeviceinfo_args args = {
.pdev = pdev,
+ .notify_types = NOTIFY_DEVICEID4_CHANGE |
+ NOTIFY_DEVICEID4_DELETE,
};
struct nfs4_getdeviceinfo_res res = {
.pdev = pdev,
@@ -7958,6 +7961,11 @@ _nfs4_proc_getdeviceinfo(struct nfs_server *server,
dprintk("--> %s\n", __func__);
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+ if (res.notification & ~args.notify_types)
+ dprintk("%s: unsupported notification\n", __func__);
+ if (res.notification != args.notify_types)
+ pdev->nocache = 1;
+
dprintk("<-- %s status=%d\n", __func__, status);
return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f95e3b58bbc3..2782cfca2265 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -42,7 +42,6 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/nfs_fs.h>
-#include <linux/nfs_idmap.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
@@ -57,6 +56,7 @@
#include "callback.h"
#include "delegation.h"
#include "internal.h"
+#include "nfs4idmap.h"
#include "nfs4session.h"
#include "pnfs.h"
#include "netns.h"
@@ -1902,7 +1902,7 @@ static int nfs4_try_migration(struct nfs_server *server, struct rpc_cred *cred)
goto out;
}
- inode = server->super->s_root->d_inode;
+ inode = d_inode(server->super->s_root);
result = nfs4_proc_get_locations(inode, locations, page, cred);
if (result) {
dprintk("<-- %s: failed to retrieve fs_locations: %d\n",
@@ -2021,7 +2021,7 @@ restart:
rcu_read_unlock();
- inode = server->super->s_root->d_inode;
+ inode = d_inode(server->super->s_root);
status = nfs4_proc_fsid_present(inode, cred);
if (status != -NFS4ERR_MOVED)
goto restart; /* wasn't this one */
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 75090feeafad..6fb7cb6b3f4b 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -3,12 +3,12 @@
*/
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/nfs_idmap.h>
#include <linux/nfs4_mount.h>
#include <linux/nfs_fs.h>
#include "delegation.h"
#include "internal.h"
#include "nfs4_fs.h"
+#include "nfs4idmap.h"
#include "dns_resolve.h"
#include "pnfs.h"
#include "nfs.h"
@@ -91,10 +91,11 @@ static void nfs4_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
- pnfs_return_layout(inode);
- pnfs_destroy_layout(NFS_I(inode));
/* If we are holding a delegation, return it! */
nfs_inode_return_delegation_noreclaim(inode);
+ /* Note that above delegreturn would trigger pnfs return-on-close */
+ pnfs_return_layout(inode);
+ pnfs_destroy_layout(NFS_I(inode));
/* First call standard NFS clear_inode() code */
nfs_clear_inode(inode);
}
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index b6ebe7e445f6..0fbd3ab1be22 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -6,10 +6,10 @@
* Copyright (c) 2006 Trond Myklebust <Trond.Myklebust@netapp.com>
*/
#include <linux/sysctl.h>
-#include <linux/nfs_idmap.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
+#include "nfs4idmap.h"
#include "callback.h"
static const int nfs_set_port_min = 0;
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 1c32adbe728d..470af1a78bec 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -418,7 +418,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
__entry->fileid = 0;
__entry->fhandle = 0;
}
- __entry->dir = NFS_FILEID(ctx->dentry->d_parent->d_inode);
+ __entry->dir = NFS_FILEID(d_inode(ctx->dentry->d_parent));
__assign_str(name, ctx->dentry->d_name.name);
),
@@ -1110,7 +1110,7 @@ TRACE_EVENT(nfs4_layoutget,
),
TP_fast_assign(
- const struct inode *inode = ctx->dentry->d_inode;
+ const struct inode *inode = d_inode(ctx->dentry);
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode);
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 5c399ec41079..0aea97841d30 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -52,10 +52,10 @@
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
-#include <linux/nfs_idmap.h>
#include "nfs4_fs.h"
#include "internal.h"
+#include "nfs4idmap.h"
#include "nfs4session.h"
#include "pnfs.h"
#include "netns.h"
@@ -1920,7 +1920,7 @@ encode_getdeviceinfo(struct xdr_stream *xdr,
p = reserve_space(xdr, 4 + 4);
*p++ = cpu_to_be32(1); /* bitmap length */
- *p++ = cpu_to_be32(NOTIFY_DEVICEID4_CHANGE | NOTIFY_DEVICEID4_DELETE);
+ *p++ = cpu_to_be32(args->notify_types);
}
static void
@@ -5753,8 +5753,9 @@ out_overflow:
#if defined(CONFIG_NFS_V4_1)
static int decode_getdeviceinfo(struct xdr_stream *xdr,
- struct pnfs_device *pdev)
+ struct nfs4_getdeviceinfo_res *res)
{
+ struct pnfs_device *pdev = res->pdev;
__be32 *p;
uint32_t len, type;
int status;
@@ -5802,12 +5803,7 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr,
if (unlikely(!p))
goto out_overflow;
- if (be32_to_cpup(p++) &
- ~(NOTIFY_DEVICEID4_CHANGE | NOTIFY_DEVICEID4_DELETE)) {
- dprintk("%s: unsupported notification\n",
- __func__);
- }
-
+ res->notification = be32_to_cpup(p++);
for (i = 1; i < len; i++) {
if (be32_to_cpup(p++)) {
dprintk("%s: unsupported notification\n",
@@ -7061,7 +7057,7 @@ static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status != 0)
goto out;
- status = decode_getdeviceinfo(xdr, res->pdev);
+ status = decode_getdeviceinfo(xdr, res);
out:
return status;
}
@@ -7365,6 +7361,11 @@ nfs4_stat_to_errno(int stat)
.p_name = #proc, \
}
+#define STUB(proc) \
+[NFSPROC4_CLNT_##proc] = { \
+ .p_name = #proc, \
+}
+
struct rpc_procinfo nfs4_procedures[] = {
PROC(READ, enc_read, dec_read),
PROC(WRITE, enc_write, dec_write),
@@ -7417,6 +7418,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
+ STUB(GETDEVICELIST),
PROC(BIND_CONN_TO_SESSION,
enc_bind_conn_to_session, dec_bind_conn_to_session),
PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
diff --git a/fs/nfs/nfstrace.c b/fs/nfs/nfstrace.c
index 4eb0aead69b6..c74f7af23d77 100644
--- a/fs/nfs/nfstrace.c
+++ b/fs/nfs/nfstrace.c
@@ -7,3 +7,6 @@
#define CREATE_TRACE_POINTS
#include "nfstrace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_fsync_enter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_fsync_exit);
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 24e1d7403c0b..5aaed363556a 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -57,7 +57,7 @@ objio_free_deviceid_node(struct nfs4_deviceid_node *d)
dprintk("%s: free od=%p\n", __func__, de->od.od);
osduld_put_device(de->od.od);
- kfree(de);
+ kfree_rcu(d, rcu);
}
struct objio_segment {
@@ -637,6 +637,8 @@ static struct pnfs_layoutdriver_type objlayout_type = {
.pg_read_ops = &objio_pg_read_ops,
.pg_write_ops = &objio_pg_write_ops,
+ .sync = pnfs_generic_sync,
+
.free_deviceid_node = objio_free_deviceid_node,
.encode_layoutcommit = objlayout_encode_layoutcommit,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index d57190a0d533..282b39369510 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -938,7 +938,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
if (prev) {
if (!nfs_match_open_context(req->wb_context, prev->wb_context))
return false;
- flctx = req->wb_context->dentry->d_inode->i_flctx;
+ flctx = d_inode(req->wb_context->dentry)->i_flctx;
if (flctx != NULL &&
!(list_empty_careful(&flctx->flc_posix) &&
list_empty_careful(&flctx->flc_flock)) &&
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 4f802b02fbb9..230606243be6 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1090,6 +1090,7 @@ bool pnfs_roc(struct inode *ino)
pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
+ pnfs_layoutcommit_inode(ino, true);
return true;
out_noroc:
@@ -1104,8 +1105,10 @@ out_noroc:
}
}
spin_unlock(&ino->i_lock);
- if (layoutreturn)
+ if (layoutreturn) {
+ pnfs_layoutcommit_inode(ino, true);
pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
+ }
return false;
}
@@ -1841,7 +1844,8 @@ void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
{
trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
if (!hdr->pnfs_error) {
- pnfs_set_layoutcommit(hdr);
+ pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
+ hdr->mds_offset + hdr->res.count);
hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
} else
pnfs_ld_handle_write_error(hdr);
@@ -1902,7 +1906,6 @@ static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
pnfs_put_lseg(hdr->lseg);
nfs_pgio_header_free(hdr);
}
-EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
@@ -2032,7 +2035,6 @@ static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
pnfs_put_lseg(hdr->lseg);
nfs_pgio_header_free(hdr);
}
-EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
@@ -2099,64 +2101,34 @@ void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
void
-pnfs_set_layoutcommit(struct nfs_pgio_header *hdr)
+pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
+ loff_t end_pos)
{
- struct inode *inode = hdr->inode;
struct nfs_inode *nfsi = NFS_I(inode);
- loff_t end_pos = hdr->mds_offset + hdr->res.count;
bool mark_as_dirty = false;
spin_lock(&inode->i_lock);
if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
- mark_as_dirty = true;
- dprintk("%s: Set layoutcommit for inode %lu ",
- __func__, inode->i_ino);
- }
- if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
- /* references matched in nfs4_layoutcommit_release */
- pnfs_get_lseg(hdr->lseg);
- }
- if (end_pos > nfsi->layout->plh_lwb)
nfsi->layout->plh_lwb = end_pos;
- spin_unlock(&inode->i_lock);
- dprintk("%s: lseg %p end_pos %llu\n",
- __func__, hdr->lseg, nfsi->layout->plh_lwb);
-
- /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
- * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
- if (mark_as_dirty)
- mark_inode_dirty_sync(inode);
-}
-EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
-
-void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data)
-{
- struct inode *inode = data->inode;
- struct nfs_inode *nfsi = NFS_I(inode);
- bool mark_as_dirty = false;
-
- spin_lock(&inode->i_lock);
- if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
mark_as_dirty = true;
dprintk("%s: Set layoutcommit for inode %lu ",
__func__, inode->i_ino);
- }
- if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &data->lseg->pls_flags)) {
+ } else if (end_pos > nfsi->layout->plh_lwb)
+ nfsi->layout->plh_lwb = end_pos;
+ if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
/* references matched in nfs4_layoutcommit_release */
- pnfs_get_lseg(data->lseg);
+ pnfs_get_lseg(lseg);
}
- if (data->lwb > nfsi->layout->plh_lwb)
- nfsi->layout->plh_lwb = data->lwb;
spin_unlock(&inode->i_lock);
dprintk("%s: lseg %p end_pos %llu\n",
- __func__, data->lseg, nfsi->layout->plh_lwb);
+ __func__, lseg, nfsi->layout->plh_lwb);
/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
* will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
if (mark_as_dirty)
mark_inode_dirty_sync(inode);
}
-EXPORT_SYMBOL_GPL(pnfs_commit_set_layoutcommit);
+EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
{
@@ -2216,7 +2188,6 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
pnfs_list_write_lseg(inode, &data->lseg_list);
end_pos = nfsi->layout->plh_lwb;
- nfsi->layout->plh_lwb = 0;
nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
spin_unlock(&inode->i_lock);
@@ -2233,11 +2204,11 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
status = ld->prepare_layoutcommit(&data->args);
if (status) {
spin_lock(&inode->i_lock);
- if (end_pos < nfsi->layout->plh_lwb)
+ set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
+ if (end_pos > nfsi->layout->plh_lwb)
nfsi->layout->plh_lwb = end_pos;
spin_unlock(&inode->i_lock);
put_rpccred(data->cred);
- set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
goto clear_layoutcommitting;
}
}
@@ -2258,6 +2229,13 @@ clear_layoutcommitting:
}
EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
+int
+pnfs_generic_sync(struct inode *inode, bool datasync)
+{
+ return pnfs_layoutcommit_inode(inode, true);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_sync);
+
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
struct nfs4_threshold *thp;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 635f0865671c..1e6308f82fc3 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -155,6 +155,8 @@ struct pnfs_layoutdriver_type {
int how,
struct nfs_commit_info *cinfo);
+ int (*sync)(struct inode *inode, bool datasync);
+
/*
* Return PNFS_ATTEMPTED to indicate the layout code has attempted
* I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS
@@ -203,6 +205,7 @@ struct pnfs_device {
struct page **pages;
unsigned int pgbase;
unsigned int pglen; /* reply buffer length */
+ unsigned char nocache : 1;/* May not be cached */
};
#define NFS4_PNFS_GETDEVLIST_MAXNUM 16
@@ -263,10 +266,11 @@ bool pnfs_roc(struct inode *ino);
void pnfs_roc_release(struct inode *ino);
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task);
-void pnfs_set_layoutcommit(struct nfs_pgio_header *);
-void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data);
+void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t);
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
+int pnfs_generic_sync(struct inode *inode, bool datasync);
+int pnfs_nfs_generic_sync(struct inode *inode, bool datasync);
int _pnfs_return_layout(struct inode *);
int pnfs_commit_and_return_layout(struct inode *);
void pnfs_ld_write_done(struct nfs_pgio_header *);
@@ -291,6 +295,7 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
enum {
NFS_DEVICEID_INVALID = 0, /* set when MDS clientid recalled */
NFS_DEVICEID_UNAVAILABLE, /* device temporarily unavailable */
+ NFS_DEVICEID_NOCACHE, /* device may not be cached */
};
/* pnfs_dev.c */
@@ -302,6 +307,7 @@ struct nfs4_deviceid_node {
unsigned long flags;
unsigned long timestamp_unavailable;
struct nfs4_deviceid deviceid;
+ struct rcu_head rcu;
atomic_t ref;
};
@@ -426,7 +432,7 @@ static inline bool
pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
struct nfs_commit_info *cinfo, u32 ds_commit_idx)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
+ struct inode *inode = d_inode(req->wb_context->dentry);
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
if (lseg == NULL || ld->mark_request_commit == NULL)
@@ -438,7 +444,7 @@ pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
static inline bool
pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
+ struct inode *inode = d_inode(req->wb_context->dentry);
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
if (ld == NULL || ld->clear_request_commit == NULL)
@@ -486,6 +492,14 @@ pnfs_ld_read_whole_page(struct inode *inode)
return NFS_SERVER(inode)->pnfs_curr_ld->flags & PNFS_READ_WHOLE_PAGE;
}
+static inline int
+pnfs_sync_inode(struct inode *inode, bool datasync)
+{
+ if (!pnfs_enabled_sb(NFS_SERVER(inode)))
+ return 0;
+ return NFS_SERVER(inode)->pnfs_curr_ld->sync(inode, datasync);
+}
+
static inline bool
pnfs_layoutcommit_outstanding(struct inode *inode)
{
@@ -568,6 +582,12 @@ pnfs_ld_read_whole_page(struct inode *inode)
return false;
}
+static inline int
+pnfs_sync_inode(struct inode *inode, bool datasync)
+{
+ return 0;
+}
+
static inline bool
pnfs_roc(struct inode *ino)
{
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index aa2ec0015183..2961fcd7a2df 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -149,6 +149,8 @@ nfs4_get_device_info(struct nfs_server *server,
*/
d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev,
gfp_flags);
+ if (d && pdev->nocache)
+ set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
out_free_pages:
for (i = 0; i < max_pages; i++)
@@ -175,8 +177,8 @@ __nfs4_find_get_deviceid(struct nfs_server *server,
rcu_read_lock();
d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id,
hash);
- if (d != NULL)
- atomic_inc(&d->ref);
+ if (d != NULL && !atomic_inc_not_zero(&d->ref))
+ d = NULL;
rcu_read_unlock();
return d;
}
@@ -235,12 +237,11 @@ nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
return;
}
hlist_del_init_rcu(&d->node);
+ clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
spin_unlock(&nfs4_deviceid_lock);
- synchronize_rcu();
/* balance the initial ref set in pnfs_insert_deviceid */
- if (atomic_dec_and_test(&d->ref))
- d->ld->free_deviceid_node(d);
+ nfs4_put_deviceid_node(d);
}
EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
@@ -271,6 +272,11 @@ EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
bool
nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
{
+ if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) {
+ if (atomic_add_unless(&d->ref, -1, 2))
+ return false;
+ nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid);
+ }
if (!atomic_dec_and_test(&d->ref))
return false;
d->ld->free_deviceid_node(d);
@@ -314,6 +320,7 @@ _deviceid_purge_client(const struct nfs_client *clp, long hash)
if (d->nfs_client == clp && atomic_read(&d->ref)) {
hlist_del_init_rcu(&d->node);
hlist_add_head(&d->tmpnode, &tmp);
+ clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
}
rcu_read_unlock();
spin_unlock(&nfs4_deviceid_lock);
@@ -321,12 +328,10 @@ _deviceid_purge_client(const struct nfs_client *clp, long hash)
if (hlist_empty(&tmp))
return;
- synchronize_rcu();
while (!hlist_empty(&tmp)) {
d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
hlist_del(&d->tmpnode);
- if (atomic_dec_and_test(&d->ref))
- d->ld->free_deviceid_node(d);
+ nfs4_put_deviceid_node(d);
}
}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 54e36b38fb5f..f37e25b6311c 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -561,7 +561,7 @@ static bool load_v3_ds_connect(void)
return(get_v3_ds_connect != NULL);
}
-void __exit nfs4_pnfs_v3_ds_connect_unload(void)
+void nfs4_pnfs_v3_ds_connect_unload(void)
{
if (get_v3_ds_connect) {
symbol_put(nfs3_set_ds_client);
@@ -868,3 +868,13 @@ pnfs_layout_mark_request_commit(struct nfs_page *req,
nfs_request_add_commit_list(req, list, cinfo);
}
EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
+
+int
+pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
+{
+ if (datasync)
+ return 0;
+ return pnfs_layoutcommit_inode(inode, true);
+}
+EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync);
+
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index c63189acd052..b417bbcd9704 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -118,7 +118,7 @@ static int
nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct iattr *sattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct nfs_sattrargs arg = {
.fh = NFS_FH(inode),
.sattr = sattr
@@ -487,7 +487,7 @@ static int
nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
- struct inode *dir = dentry->d_inode;
+ struct inode *dir = d_inode(dentry);
struct nfs_readdirargs arg = {
.fh = NFS_FH(dir),
.cookie = cookie,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 568ecf0a880f..ae0ff7a11b40 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -117,15 +117,15 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
static void nfs_readpage_release(struct nfs_page *req)
{
- struct inode *d_inode = req->wb_context->dentry->d_inode;
+ struct inode *inode = d_inode(req->wb_context->dentry);
- dprintk("NFS: read done (%s/%llu %d@%lld)\n", d_inode->i_sb->s_id,
- (unsigned long long)NFS_FILEID(d_inode), req->wb_bytes,
+ dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
(long long)req_offset(req));
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
if (PageUptodate(req->wb_page))
- nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
+ nfs_readpage_to_fscache(inode, req->wb_page, 0);
unlock_page(req->wb_page);
}
@@ -284,7 +284,7 @@ int nfs_readpage(struct file *file, struct page *page)
dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
page, PAGE_CACHE_SIZE, page_file_index(page));
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
- nfs_inc_stats(inode, NFSIOS_READPAGES);
+ nfs_add_stats(inode, NFSIOS_READPAGES, 1);
/*
* Try to flush any pending writes to the file..
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 322b2de02988..f175b833b6ba 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -43,7 +43,6 @@
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/namei.h>
-#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
#include <linux/inet.h>
#include <linux/in6.h>
@@ -433,7 +432,7 @@ int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct nfs_server *server = NFS_SB(dentry->d_sb);
unsigned char blockbits;
unsigned long blockres;
- struct nfs_fh *fh = NFS_FH(dentry->d_inode);
+ struct nfs_fh *fh = NFS_FH(d_inode(dentry));
struct nfs_fsstat res;
int error = -ENOMEM;
@@ -447,7 +446,7 @@ int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
pd_dentry = dget_parent(dentry);
if (pd_dentry != NULL) {
- nfs_zap_caches(pd_dentry->d_inode);
+ nfs_zap_caches(d_inode(pd_dentry));
dput(pd_dentry);
}
}
@@ -2193,7 +2192,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
data->version != nfss->nfs_client->rpc_ops->version ||
data->minorversion != nfss->nfs_client->cl_minorversion ||
data->retrans != nfss->client->cl_timeout->to_retries ||
- data->selected_flavor != nfss->client->cl_auth->au_flavor ||
+ !nfs_auth_info_match(&data->auth_info, nfss->client->cl_auth->au_flavor) ||
data->acregmin != nfss->acregmin / HZ ||
data->acregmax != nfss->acregmax / HZ ||
data->acdirmin != nfss->acdirmin / HZ ||
@@ -2241,7 +2240,6 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
data->wsize = nfss->wsize;
data->retrans = nfss->client->cl_timeout->to_retries;
data->selected_flavor = nfss->client->cl_auth->au_flavor;
- data->auth_info = nfss->auth_info;
data->acregmin = nfss->acregmin / HZ;
data->acregmax = nfss->acregmax / HZ;
data->acdirmin = nfss->acdirmin / HZ;
@@ -2526,7 +2524,7 @@ int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
struct nfs_mount_info *mount_info)
{
/* clone any lsm security options from the parent to the new sb */
- if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
+ if (d_inode(mntroot)->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
return -ESTALE;
return security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
}
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
index 05c9e02f4153..2d56200655fe 100644
--- a/fs/nfs/symlink.c
+++ b/fs/nfs/symlink.c
@@ -45,7 +45,7 @@ error:
static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct page *page;
void *err;
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index de54129336c6..fa538b2ba251 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -143,7 +143,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
nfs_free_dname(data);
ret = nfs_copy_dname(alias, data);
spin_lock(&alias->d_lock);
- if (ret == 0 && alias->d_inode != NULL &&
+ if (ret == 0 && d_really_is_positive(alias) &&
!(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
devname_garbage = alias->d_fsdata;
alias->d_fsdata = data;
@@ -190,7 +190,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
parent = dget_parent(dentry);
if (parent == NULL)
goto out_free;
- dir = parent->d_inode;
+ dir = d_inode(parent);
/* Non-exclusive lock protects against concurrent lookup() calls */
spin_lock(&dir->i_lock);
if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) {
@@ -210,21 +210,21 @@ out_free:
void nfs_wait_on_sillyrename(struct dentry *dentry)
{
- struct nfs_inode *nfsi = NFS_I(dentry->d_inode);
+ struct nfs_inode *nfsi = NFS_I(d_inode(dentry));
wait_event(nfsi->waitqueue, atomic_read(&nfsi->silly_count) <= 1);
}
void nfs_block_sillyrename(struct dentry *dentry)
{
- struct nfs_inode *nfsi = NFS_I(dentry->d_inode);
+ struct nfs_inode *nfsi = NFS_I(d_inode(dentry));
wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1);
}
void nfs_unblock_sillyrename(struct dentry *dentry)
{
- struct inode *dir = dentry->d_inode;
+ struct inode *dir = d_inode(dentry);
struct nfs_inode *nfsi = NFS_I(dir);
struct nfs_unlinkdata *data;
@@ -367,8 +367,8 @@ static void nfs_async_rename_release(void *calldata)
struct nfs_renamedata *data = calldata;
struct super_block *sb = data->old_dir->i_sb;
- if (data->old_dentry->d_inode)
- nfs_mark_for_revalidate(data->old_dentry->d_inode);
+ if (d_really_is_positive(data->old_dentry))
+ nfs_mark_for_revalidate(d_inode(data->old_dentry));
dput(data->old_dentry);
dput(data->new_dentry);
@@ -529,10 +529,10 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
goto out;
- fileid = NFS_FILEID(dentry->d_inode);
+ fileid = NFS_FILEID(d_inode(dentry));
/* Return delegation in anticipation of the rename */
- NFS_PROTO(dentry->d_inode)->return_delegation(dentry->d_inode);
+ NFS_PROTO(d_inode(dentry))->return_delegation(d_inode(dentry));
sdentry = NULL;
do {
@@ -554,7 +554,7 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
*/
if (IS_ERR(sdentry))
goto out;
- } while (sdentry->d_inode != NULL); /* need negative lookup */
+ } while (d_inode(sdentry) != NULL); /* need negative lookup */
/* queue unlink first. Can't do this from rpc_release as it
* has to allocate memory
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 849ed784d6ac..dfc19f1575a1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -580,7 +580,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
int ret;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
- nfs_inc_stats(inode, NFSIOS_WRITEPAGES);
+ nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
nfs_pageio_cond_complete(pgio, page_file_index(page));
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
@@ -702,7 +702,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
*/
static void nfs_inode_remove_request(struct nfs_page *req)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
+ struct inode *inode = d_inode(req->wb_context->dentry);
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *head;
@@ -861,7 +861,7 @@ static void
nfs_clear_request_commit(struct nfs_page *req)
{
if (test_bit(PG_CLEAN, &req->wb_flags)) {
- struct inode *inode = req->wb_context->dentry->d_inode;
+ struct inode *inode = d_inode(req->wb_context->dentry);
struct nfs_commit_info cinfo;
nfs_init_cinfo_from_inode(&cinfo, inode);
@@ -1591,7 +1591,7 @@ void nfs_init_commit(struct nfs_commit_data *data,
struct nfs_commit_info *cinfo)
{
struct nfs_page *first = nfs_list_entry(head->next);
- struct inode *inode = first->wb_context->dentry->d_inode;
+ struct inode *inode = d_inode(first->wb_context->dentry);
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
@@ -1690,7 +1690,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
dprintk("NFS: commit (%s/%llu %d@%lld)",
req->wb_context->dentry->d_sb->s_id,
- (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+ (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
req->wb_bytes,
(long long)req_offset(req));
if (status < 0) {
@@ -1840,18 +1840,20 @@ EXPORT_SYMBOL_GPL(nfs_write_inode);
*/
int nfs_wb_all(struct inode *inode)
{
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = LONG_MAX,
- .range_start = 0,
- .range_end = LLONG_MAX,
- };
int ret;
trace_nfs_writeback_inode_enter(inode);
- ret = sync_inode(inode, &wbc);
+ ret = filemap_write_and_wait(inode->i_mapping);
+ if (ret)
+ goto out;
+ ret = nfs_commit_inode(inode, FLUSH_SYNC);
+ if (ret < 0)
+ goto out;
+ pnfs_sync_inode(inode, true);
+ ret = 0;
+out:
trace_nfs_writeback_inode_exit(inode, ret);
return ret;
}
@@ -1876,11 +1878,6 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
* request from the inode / page_private pointer and
* release it */
nfs_inode_remove_request(req);
- /*
- * In case nfs_inode_remove_request has marked the
- * page as being dirty
- */
- cancel_dirty_page(page, PAGE_CACHE_SIZE);
nfs_unlock_and_release_request(req);
}
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 683bf718aead..a0b77fc1bd39 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -6,6 +6,7 @@ config NFSD
select SUNRPC
select EXPORTFS
select NFS_ACL_SUPPORT if NFSD_V2_ACL
+ depends on MULTIUSER
help
Choose Y here if you want to allow other computers to access
files residing on this system using Sun's Network File System
@@ -107,7 +108,7 @@ config NFSD_V4_SECURITY_LABEL
config NFSD_FAULT_INJECTION
bool "NFS server manual fault injection"
- depends on NFSD_V4 && DEBUG_KERNEL
+ depends on NFSD_V4 && DEBUG_KERNEL && DEBUG_FS
help
This option enables support for manually injecting faults
into the NFS server. This is intended to be used for
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 03d647bf195d..cdefaa331a07 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -181,6 +181,17 @@ nfsd4_block_proc_layoutcommit(struct inode *inode,
}
const struct nfsd4_layout_ops bl_layout_ops = {
+ /*
+ * Pretend that we send notification to the client. This is a blatant
+ * lie to force recent Linux clients to cache our device IDs.
+ * We rarely ever change the device ID, so the harm of leaking deviceids
+ * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
+ * in this regard, but I filed errata 4119 for this a while ago, and
+ * hopefully the Linux client will eventually start caching deviceids
+ * without this again.
+ */
+ .notify_types =
+ NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
.proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
.encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
.proc_layoutget = nfsd4_block_proc_layoutget,
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c3e3b6e55ae2..f79521a59747 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -599,7 +599,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out4;
}
- err = check_export(exp.ex_path.dentry->d_inode, &exp.ex_flags,
+ err = check_export(d_inode(exp.ex_path.dentry), &exp.ex_flags,
exp.ex_uuid);
if (err)
goto out4;
@@ -691,8 +691,7 @@ static int svc_export_match(struct cache_head *a, struct cache_head *b)
struct svc_export *orig = container_of(a, struct svc_export, h);
struct svc_export *new = container_of(b, struct svc_export, h);
return orig->ex_client == new->ex_client &&
- orig->ex_path.dentry == new->ex_path.dentry &&
- orig->ex_path.mnt == new->ex_path.mnt;
+ path_equal(&orig->ex_path, &new->ex_path);
}
static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
@@ -891,7 +890,7 @@ exp_rootfh(struct net *net, struct auth_domain *clp, char *name,
printk("nfsd: exp_rootfh path not found %s", name);
return err;
}
- inode = path.dentry->d_inode;
+ inode = d_inode(path.dentry);
dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
name, path.dentry, clp->name,
@@ -1159,6 +1158,7 @@ static struct flags {
{ NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
{ NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
{ NFSEXP_V4ROOT, {"v4root", ""}},
+ { NFSEXP_PNFS, {"pnfs", ""}},
{ 0, {"", ""}}
};
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index ac54ea60b3f6..d54701f6dc78 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -42,7 +42,7 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp,
if (nfserr)
RETURN_STATUS(nfserr);
- inode = fh->fh_dentry->d_inode;
+ inode = d_inode(fh->fh_dentry);
if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
RETURN_STATUS(nfserr_inval);
@@ -103,7 +103,7 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
if (nfserr)
goto out;
- inode = fh->fh_dentry->d_inode;
+ inode = d_inode(fh->fh_dentry);
if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
error = -EOPNOTSUPP;
goto out_errno;
@@ -266,9 +266,9 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
* nfsd_dispatch actually ensures the following cannot happen.
* However, it seems fragile to depend on that.
*/
- if (dentry == NULL || dentry->d_inode == NULL)
+ if (dentry == NULL || d_really_is_negative(dentry))
return 0;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
*p++ = htonl(resp->mask);
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 34cbbab6abd7..882b1a14bc3e 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -39,7 +39,7 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst * rqstp,
if (nfserr)
RETURN_STATUS(nfserr);
- inode = fh->fh_dentry->d_inode;
+ inode = d_inode(fh->fh_dentry);
if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
RETURN_STATUS(nfserr_inval);
@@ -94,7 +94,7 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
if (nfserr)
goto out;
- inode = fh->fh_dentry->d_inode;
+ inode = d_inode(fh->fh_dentry);
if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
error = -EOPNOTSUPP;
goto out_errno;
@@ -174,8 +174,8 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
struct dentry *dentry = resp->fh.fh_dentry;
p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh);
- if (resp->status == 0 && dentry && dentry->d_inode) {
- struct inode *inode = dentry->d_inode;
+ if (resp->status == 0 && dentry && d_really_is_positive(dentry)) {
+ struct inode *inode = d_inode(dentry);
struct kvec *head = rqstp->rq_res.head;
unsigned int base;
int n;
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 12f2aab4f614..7b755b7f785c 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -166,7 +166,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
rqstp->rq_vec, argp->vlen,
&resp->count);
if (nfserr == 0) {
- struct inode *inode = resp->fh.fh_dentry->d_inode;
+ struct inode *inode = d_inode(resp->fh.fh_dentry);
resp->eof = (argp->offset + resp->count) >= inode->i_size;
}
@@ -551,7 +551,7 @@ nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
* different read/write sizes for file systems known to have
* problems with large blocks */
if (nfserr == 0) {
- struct super_block *sb = argp->fh.fh_dentry->d_inode->i_sb;
+ struct super_block *sb = d_inode(argp->fh.fh_dentry)->i_sb;
/* Note that we don't care for remote fs's here */
if (sb->s_magic == MSDOS_SUPER_MAGIC) {
@@ -587,7 +587,7 @@ nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
nfserr = fh_verify(rqstp, &argp->fh, 0, NFSD_MAY_NOP);
if (nfserr == 0) {
- struct super_block *sb = argp->fh.fh_dentry->d_inode->i_sb;
+ struct super_block *sb = d_inode(argp->fh.fh_dentry)->i_sb;
/* Note that we don't care for remote fs's here */
switch (sb->s_magic) {
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 39c5eb3ad33a..e4b2b4322553 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -146,7 +146,7 @@ static __be32 *encode_fsid(__be32 *p, struct svc_fh *fhp)
default:
case FSIDSOURCE_DEV:
p = xdr_encode_hyper(p, (u64)huge_encode_dev
- (fhp->fh_dentry->d_inode->i_sb->s_dev));
+ (d_inode(fhp->fh_dentry)->i_sb->s_dev));
break;
case FSIDSOURCE_FSID:
p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid);
@@ -203,14 +203,14 @@ static __be32 *
encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
{
struct dentry *dentry = fhp->fh_dentry;
- if (dentry && dentry->d_inode) {
+ if (dentry && d_really_is_positive(dentry)) {
__be32 err;
struct kstat stat;
err = fh_getattr(fhp, &stat);
if (!err) {
*p++ = xdr_one; /* attributes follow */
- lease_get_mtime(dentry->d_inode, &stat.mtime);
+ lease_get_mtime(d_inode(dentry), &stat.mtime);
return encode_fattr3(rqstp, p, fhp, &stat);
}
}
@@ -233,7 +233,7 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
{
struct dentry *dentry = fhp->fh_dentry;
- if (dentry && dentry->d_inode && fhp->fh_post_saved) {
+ if (dentry && d_really_is_positive(dentry) && fhp->fh_post_saved) {
if (fhp->fh_pre_saved) {
*p++ = xdr_one;
p = xdr_encode_hyper(p, (u64) fhp->fh_pre_size);
@@ -260,11 +260,11 @@ void fill_post_wcc(struct svc_fh *fhp)
printk("nfsd: inode locked twice during operation.\n");
err = fh_getattr(fhp, &fhp->fh_post_attr);
- fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
+ fhp->fh_post_change = d_inode(fhp->fh_dentry)->i_version;
if (err) {
fhp->fh_post_saved = 0;
/* Grab the ctime anyway - set_change_info might use it */
- fhp->fh_post_attr.ctime = fhp->fh_dentry->d_inode->i_ctime;
+ fhp->fh_post_attr.ctime = d_inode(fhp->fh_dentry)->i_ctime;
} else
fhp->fh_post_saved = 1;
}
@@ -628,7 +628,7 @@ nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_attrstat *resp)
{
if (resp->status == 0) {
- lease_get_mtime(resp->fh.fh_dentry->d_inode,
+ lease_get_mtime(d_inode(resp->fh.fh_dentry),
&resp->stat.mtime);
p = encode_fattr3(rqstp, p, &resp->fh, &resp->stat);
}
@@ -828,7 +828,7 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
return rv;
if (d_mountpoint(dchild))
goto out;
- if (!dchild->d_inode)
+ if (d_really_is_negative(dchild))
goto out;
rv = fh_compose(fhp, exp, dchild, &cd->fh);
out:
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 59fd76651781..67242bf7c6cc 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -139,7 +139,7 @@ int
nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
struct nfs4_acl **acl)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error = 0;
struct posix_acl *pacl = NULL, *dpacl = NULL;
unsigned int flags = 0;
@@ -499,43 +499,13 @@ static inline void add_to_mask(struct posix_acl_state *state, struct posix_ace_s
state->mask.allow |= astate->allow;
}
-/*
- * Certain bits (SYNCHRONIZE, DELETE, WRITE_OWNER, READ/WRITE_NAMED_ATTRS,
- * READ_ATTRIBUTES, READ_ACL) are currently unenforceable and don't translate
- * to traditional read/write/execute permissions.
- *
- * It's problematic to reject acls that use certain mode bits, because it
- * places the burden on users to learn the rules about which bits one
- * particular server sets, without giving the user a lot of help--we return an
- * error that could mean any number of different things. To make matters
- * worse, the problematic bits might be introduced by some application that's
- * automatically mapping from some other acl model.
- *
- * So wherever possible we accept anything, possibly erring on the side of
- * denying more permissions than necessary.
- *
- * However we do reject *explicit* DENY's of a few bits representing
- * permissions we could never deny:
- */
-
-static inline int check_deny(u32 mask, int isowner)
-{
- if (mask & (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL))
- return -EINVAL;
- if (!isowner)
- return 0;
- if (mask & (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL))
- return -EINVAL;
- return 0;
-}
-
static struct posix_acl *
posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
{
struct posix_acl_entry *pace;
struct posix_acl *pacl;
int nace;
- int i, error = 0;
+ int i;
/*
* ACLs with no ACEs are treated differently in the inheritable
@@ -560,17 +530,11 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
pace = pacl->a_entries;
pace->e_tag = ACL_USER_OBJ;
- error = check_deny(state->owner.deny, 1);
- if (error)
- goto out_err;
low_mode_from_nfs4(state->owner.allow, &pace->e_perm, flags);
for (i=0; i < state->users->n; i++) {
pace++;
pace->e_tag = ACL_USER;
- error = check_deny(state->users->aces[i].perms.deny, 0);
- if (error)
- goto out_err;
low_mode_from_nfs4(state->users->aces[i].perms.allow,
&pace->e_perm, flags);
pace->e_uid = state->users->aces[i].uid;
@@ -579,18 +543,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
pace++;
pace->e_tag = ACL_GROUP_OBJ;
- error = check_deny(state->group.deny, 0);
- if (error)
- goto out_err;
low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags);
add_to_mask(state, &state->group);
for (i=0; i < state->groups->n; i++) {
pace++;
pace->e_tag = ACL_GROUP;
- error = check_deny(state->groups->aces[i].perms.deny, 0);
- if (error)
- goto out_err;
low_mode_from_nfs4(state->groups->aces[i].perms.allow,
&pace->e_perm, flags);
pace->e_gid = state->groups->aces[i].gid;
@@ -605,15 +563,9 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
pace++;
pace->e_tag = ACL_OTHER;
- error = check_deny(state->other.deny, 0);
- if (error)
- goto out_err;
low_mode_from_nfs4(state->other.allow, &pace->e_perm, flags);
return pacl;
-out_err:
- posix_acl_release(pacl);
- return ERR_PTR(error);
}
static inline void allow_bits(struct posix_ace_state *astate, u32 mask)
@@ -828,7 +780,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
return error;
dentry = fhp->fh_dentry;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
return nfserr_attrnotsupp;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 58277859a467..5694cfb7a47b 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -224,7 +224,7 @@ static int nfs_cb_stat_to_errno(int status)
}
static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
- enum nfsstat4 *status)
+ int *status)
{
__be32 *p;
u32 op;
@@ -235,7 +235,7 @@ static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
op = be32_to_cpup(p++);
if (unlikely(op != expected))
goto out_unexpected;
- *status = be32_to_cpup(p);
+ *status = nfs_cb_stat_to_errno(be32_to_cpup(p));
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -446,22 +446,16 @@ out_overflow:
static int decode_cb_sequence4res(struct xdr_stream *xdr,
struct nfsd4_callback *cb)
{
- enum nfsstat4 nfserr;
int status;
if (cb->cb_minorversion == 0)
return 0;
- status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr);
- if (unlikely(status))
- goto out;
- if (unlikely(nfserr != NFS4_OK))
- goto out_default;
- status = decode_cb_sequence4resok(xdr, cb);
-out:
- return status;
-out_default:
- return nfs_cb_stat_to_errno(nfserr);
+ status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_status);
+ if (unlikely(status || cb->cb_status))
+ return status;
+
+ return decode_cb_sequence4resok(xdr, cb);
}
/*
@@ -524,26 +518,19 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
struct nfsd4_callback *cb)
{
struct nfs4_cb_compound_hdr hdr;
- enum nfsstat4 nfserr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
- goto out;
+ return status;
if (cb != NULL) {
status = decode_cb_sequence4res(xdr, cb);
- if (unlikely(status))
- goto out;
+ if (unlikely(status || cb->cb_status))
+ return status;
}
- status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr);
- if (unlikely(status))
- goto out;
- if (unlikely(nfserr != NFS4_OK))
- status = nfs_cb_stat_to_errno(nfserr);
-out:
- return status;
+ return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
}
#ifdef CONFIG_NFSD_PNFS
@@ -621,24 +608,18 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
struct nfsd4_callback *cb)
{
struct nfs4_cb_compound_hdr hdr;
- enum nfsstat4 nfserr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
- goto out;
+ return status;
+
if (cb) {
status = decode_cb_sequence4res(xdr, cb);
- if (unlikely(status))
- goto out;
+ if (unlikely(status || cb->cb_status))
+ return status;
}
- status = decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &nfserr);
- if (unlikely(status))
- goto out;
- if (unlikely(nfserr != NFS4_OK))
- status = nfs_cb_stat_to_errno(nfserr);
-out:
- return status;
+ return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
}
#endif /* CONFIG_NFSD_PNFS */
@@ -898,13 +879,6 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
if (!nfsd41_cb_get_slot(clp, task))
return;
}
- spin_lock(&clp->cl_lock);
- if (list_empty(&cb->cb_per_client)) {
- /* This is the first call, not a restart */
- cb->cb_done = false;
- list_add(&cb->cb_per_client, &clp->cl_callbacks);
- }
- spin_unlock(&clp->cl_lock);
rpc_call_start(task);
}
@@ -918,22 +892,33 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
if (clp->cl_minorversion) {
/* No need for lock, access serialized in nfsd4_cb_prepare */
- ++clp->cl_cb_session->se_cb_seq_nr;
+ if (!task->tk_status)
+ ++clp->cl_cb_session->se_cb_seq_nr;
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_wake_up_next(&clp->cl_cb_waitq);
dprintk("%s: freed slot, new seqid=%d\n", __func__,
clp->cl_cb_session->se_cb_seq_nr);
}
- if (clp->cl_cb_client != task->tk_client) {
- /* We're shutting down or changing cl_cb_client; leave
- * it to nfsd4_process_cb_update to restart the call if
- * necessary. */
+ /*
+ * If the backchannel connection was shut down while this
+ * task was queued, we need to resubmit it after setting up
+ * a new backchannel connection.
+ *
+ * Note that if we lost our callback connection permanently
+ * the submission code will error out, so we don't need to
+ * handle that case here.
+ */
+ if (task->tk_flags & RPC_TASK_KILLED) {
+ task->tk_status = 0;
+ cb->cb_need_restart = true;
return;
}
- if (cb->cb_done)
- return;
+ if (cb->cb_status) {
+ WARN_ON_ONCE(task->tk_status);
+ task->tk_status = cb->cb_status;
+ }
switch (cb->cb_ops->done(cb, task)) {
case 0:
@@ -949,21 +934,17 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
default:
BUG();
}
- cb->cb_done = true;
}
static void nfsd4_cb_release(void *calldata)
{
struct nfsd4_callback *cb = calldata;
- struct nfs4_client *clp = cb->cb_clp;
-
- if (cb->cb_done) {
- spin_lock(&clp->cl_lock);
- list_del(&cb->cb_per_client);
- spin_unlock(&clp->cl_lock);
+ if (cb->cb_need_restart)
+ nfsd4_run_cb(cb);
+ else
cb->cb_ops->release(cb);
- }
+
}
static const struct rpc_call_ops nfsd4_cb_ops = {
@@ -1058,9 +1039,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
nfsd4_mark_cb_down(clp, err);
return;
}
- /* Yay, the callback channel's back! Restart any callbacks: */
- list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client)
- queue_work(callback_wq, &cb->cb_work);
}
static void
@@ -1071,8 +1049,12 @@ nfsd4_run_cb_work(struct work_struct *work)
struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt;
- if (cb->cb_ops && cb->cb_ops->prepare)
- cb->cb_ops->prepare(cb);
+ if (cb->cb_need_restart) {
+ cb->cb_need_restart = false;
+ } else {
+ if (cb->cb_ops && cb->cb_ops->prepare)
+ cb->cb_ops->prepare(cb);
+ }
if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
nfsd4_process_cb_update(cb);
@@ -1084,6 +1066,15 @@ nfsd4_run_cb_work(struct work_struct *work)
cb->cb_ops->release(cb);
return;
}
+
+ /*
+ * Don't send probe messages for 4.1 or later.
+ */
+ if (!cb->cb_ops && clp->cl_minorversion) {
+ clp->cl_cb_state = NFSD4_CB_UP;
+ return;
+ }
+
cb->cb_msg.rpc_cred = clp->cl_cb_cred;
rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
@@ -1098,8 +1089,8 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_msg.rpc_resp = cb;
cb->cb_ops = ops;
INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
- INIT_LIST_HEAD(&cb->cb_per_client);
- cb->cb_done = true;
+ cb->cb_status = 0;
+ cb->cb_need_restart = false;
}
void nfsd4_run_cb(struct nfsd4_callback *cb)
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 92b9d97aff4f..864e2003e8de 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -52,7 +52,7 @@
static inline void
nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval)
{
- struct inode *inode = resfh->fh_dentry->d_inode;
+ struct inode *inode = d_inode(resfh->fh_dentry);
int status;
mutex_lock(&inode->i_mutex);
@@ -110,7 +110,7 @@ check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* in current environment or not.
*/
if (bmval[0] & FATTR4_WORD0_ACL) {
- if (!IS_POSIXACL(dentry->d_inode))
+ if (!IS_POSIXACL(d_inode(dentry)))
return nfserr_attrnotsupp;
}
@@ -209,7 +209,7 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
{
- umode_t mode = fh->fh_dentry->d_inode->i_mode;
+ umode_t mode = d_inode(fh->fh_dentry)->i_mode;
if (S_ISREG(mode))
return nfs_ok;
@@ -470,7 +470,7 @@ out:
fh_put(resfh);
kfree(resfh);
}
- nfsd4_cleanup_open_state(cstate, open, status);
+ nfsd4_cleanup_open_state(cstate, open);
nfsd4_bump_seqid(cstate, status);
return status;
}
@@ -881,7 +881,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&exp, &dentry);
if (err)
return err;
- if (dentry->d_inode == NULL) {
+ if (d_really_is_negative(dentry)) {
exp_put(exp);
err = nfserr_noent;
} else
@@ -1030,6 +1030,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
return status;
}
+ if (!file)
+ return nfserr_bad_stateid;
status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
fallocate->falloc_offset,
@@ -1069,6 +1071,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
return status;
}
+ if (!file)
+ return nfserr_bad_stateid;
switch (seek->seek_whence) {
case NFS4_CONTENT_DATA:
@@ -1308,7 +1312,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls))
goto out_put_stid;
- nfserr = ops->proc_layoutget(current_fh->fh_dentry->d_inode,
+ nfserr = ops->proc_layoutget(d_inode(current_fh->fh_dentry),
current_fh, lgp);
if (nfserr)
goto out_put_stid;
@@ -1342,7 +1346,7 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
ops = nfsd4_layout_verify(current_fh->fh_export, lcp->lc_layout_type);
if (!ops)
goto out;
- inode = current_fh->fh_dentry->d_inode;
+ inode = d_inode(current_fh->fh_dentry);
nfserr = nfserr_inval;
if (new_size <= seg->offset) {
@@ -1815,7 +1819,7 @@ static inline u32 nfsd4_getattr_rsize(struct svc_rqst *rqstp,
bmap0 &= ~FATTR4_WORD0_FILEHANDLE;
}
if (bmap2 & FATTR4_WORD2_SECURITY_LABEL) {
- ret += NFSD4_MAX_SEC_LABEL_LEN + 12;
+ ret += NFS4_MAXLABELLEN + 12;
bmap2 &= ~FATTR4_WORD2_SECURITY_LABEL;
}
/*
@@ -2282,13 +2286,13 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_func = (nfsd4op_func)nfsd4_allocate,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_ALLOCATE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_DEALLOCATE] = {
.op_func = (nfsd4op_func)nfsd4_deallocate,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_DEALLOCATE",
- .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
[OP_SEEK] = {
.op_func = (nfsd4op_func)nfsd4_seek,
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 1c307f02baa8..d88ea7b9a85c 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -192,14 +192,14 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
dir = nn->rec_file->f_path.dentry;
/* lock the parent */
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock(&d_inode(dir)->i_mutex);
dentry = lookup_one_len(dname, dir, HEXDIR_LEN-1);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
goto out_unlock;
}
- if (dentry->d_inode)
+ if (d_really_is_positive(dentry))
/*
* In the 4.1 case, where we're called from
* reclaim_complete(), records from the previous reboot
@@ -209,11 +209,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
* as well be forgiving and just succeed silently.
*/
goto out_put;
- status = vfs_mkdir(dir->d_inode, dentry, S_IRWXU);
+ status = vfs_mkdir(d_inode(dir), dentry, S_IRWXU);
out_put:
dput(dentry);
out_unlock:
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
if (status == 0) {
if (nn->in_grace) {
crp = nfs4_client_to_reclaim(dname, nn);
@@ -285,7 +285,7 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
}
status = iterate_dir(nn->rec_file, &ctx.ctx);
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
while (!list_empty(&ctx.names)) {
struct name_list *entry;
entry = list_entry(ctx.names.next, struct name_list, list);
@@ -302,7 +302,7 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
list_del(&entry->list);
kfree(entry);
}
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
nfs4_reset_creds(original_cred);
return status;
}
@@ -316,20 +316,20 @@ nfsd4_unlink_clid_dir(char *name, int namlen, struct nfsd_net *nn)
dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
dir = nn->rec_file->f_path.dentry;
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(name, dir, namlen);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
goto out_unlock;
}
status = -ENOENT;
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
goto out;
- status = vfs_rmdir(dir->d_inode, dentry);
+ status = vfs_rmdir(d_inode(dir), dentry);
out:
dput(dentry);
out_unlock:
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
return status;
}
@@ -385,7 +385,7 @@ purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
if (nfs4_has_reclaimed_state(child->d_name.name, nn))
return 0;
- status = vfs_rmdir(parent->d_inode, child);
+ status = vfs_rmdir(d_inode(parent), child);
if (status)
printk("failed to remove client recovery directory %pd\n",
child);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8ba1d888f1e6..039f9c8a95e8 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -94,6 +94,7 @@ static struct kmem_cache *lockowner_slab;
static struct kmem_cache *file_slab;
static struct kmem_cache *stateid_slab;
static struct kmem_cache *deleg_slab;
+static struct kmem_cache *odstate_slab;
static void free_session(struct nfsd4_session *);
@@ -281,6 +282,7 @@ put_nfs4_file(struct nfs4_file *fi)
if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
hlist_del_rcu(&fi->fi_hash);
spin_unlock(&state_lock);
+ WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
}
@@ -471,6 +473,86 @@ static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
__nfs4_file_put_access(fp, O_RDONLY);
}
+/*
+ * Allocate a new open/delegation state counter. This is needed for
+ * pNFS for proper return on close semantics.
+ *
+ * Note that we only allocate it for pNFS-enabled exports, otherwise
+ * all pointers to struct nfs4_clnt_odstate are always NULL.
+ */
+static struct nfs4_clnt_odstate *
+alloc_clnt_odstate(struct nfs4_client *clp)
+{
+ struct nfs4_clnt_odstate *co;
+
+ co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
+ if (co) {
+ co->co_client = clp;
+ atomic_set(&co->co_odcount, 1);
+ }
+ return co;
+}
+
+static void
+hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
+{
+ struct nfs4_file *fp = co->co_file;
+
+ lockdep_assert_held(&fp->fi_lock);
+ list_add(&co->co_perfile, &fp->fi_clnt_odstate);
+}
+
+static inline void
+get_clnt_odstate(struct nfs4_clnt_odstate *co)
+{
+ if (co)
+ atomic_inc(&co->co_odcount);
+}
+
+static void
+put_clnt_odstate(struct nfs4_clnt_odstate *co)
+{
+ struct nfs4_file *fp;
+
+ if (!co)
+ return;
+
+ fp = co->co_file;
+ if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
+ list_del(&co->co_perfile);
+ spin_unlock(&fp->fi_lock);
+
+ nfsd4_return_all_file_layouts(co->co_client, fp);
+ kmem_cache_free(odstate_slab, co);
+ }
+}
+
+static struct nfs4_clnt_odstate *
+find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
+{
+ struct nfs4_clnt_odstate *co;
+ struct nfs4_client *cl;
+
+ if (!new)
+ return NULL;
+
+ cl = new->co_client;
+
+ spin_lock(&fp->fi_lock);
+ list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
+ if (co->co_client == cl) {
+ get_clnt_odstate(co);
+ goto out;
+ }
+ }
+ co = new;
+ co->co_file = fp;
+ hash_clnt_odstate_locked(new);
+out:
+ spin_unlock(&fp->fi_lock);
+ return co;
+}
+
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
struct kmem_cache *slab)
{
@@ -606,7 +688,8 @@ static void block_delegations(struct knfsd_fh *fh)
}
static struct nfs4_delegation *
-alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
+alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
+ struct nfs4_clnt_odstate *odstate)
{
struct nfs4_delegation *dp;
long n;
@@ -631,6 +714,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
INIT_LIST_HEAD(&dp->dl_perfile);
INIT_LIST_HEAD(&dp->dl_perclnt);
INIT_LIST_HEAD(&dp->dl_recall_lru);
+ dp->dl_clnt_odstate = odstate;
+ get_clnt_odstate(odstate);
dp->dl_type = NFS4_OPEN_DELEGATE_READ;
dp->dl_retries = 1;
nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
@@ -714,6 +799,7 @@ static void destroy_delegation(struct nfs4_delegation *dp)
spin_lock(&state_lock);
unhash_delegation_locked(dp);
spin_unlock(&state_lock);
+ put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
nfs4_put_stid(&dp->dl_stid);
}
@@ -724,6 +810,7 @@ static void revoke_delegation(struct nfs4_delegation *dp)
WARN_ON(!list_empty(&dp->dl_recall_lru));
+ put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
if (clp->cl_minorversion == 0)
@@ -933,6 +1020,7 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
{
struct nfs4_ol_stateid *stp = openlockstateid(stid);
+ put_clnt_odstate(stp->st_clnt_odstate);
release_all_access(stp);
if (stp->st_stateowner)
nfs4_put_stateowner(stp->st_stateowner);
@@ -1139,7 +1227,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid)
return sid->sequence % SESSION_HASH_SIZE;
}
-#ifdef NFSD_DEBUG
+#ifdef CONFIG_SUNRPC_DEBUG
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
@@ -1538,7 +1626,6 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
INIT_LIST_HEAD(&clp->cl_openowners);
INIT_LIST_HEAD(&clp->cl_delegations);
INIT_LIST_HEAD(&clp->cl_lru);
- INIT_LIST_HEAD(&clp->cl_callbacks);
INIT_LIST_HEAD(&clp->cl_revoked);
#ifdef CONFIG_NFSD_PNFS
INIT_LIST_HEAD(&clp->cl_lo_states);
@@ -1634,6 +1721,7 @@ __destroy_client(struct nfs4_client *clp)
while (!list_empty(&reaplist)) {
dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
+ put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
nfs4_put_stid(&dp->dl_stid);
}
@@ -3057,6 +3145,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
spin_lock_init(&fp->fi_lock);
INIT_LIST_HEAD(&fp->fi_stateids);
INIT_LIST_HEAD(&fp->fi_delegations);
+ INIT_LIST_HEAD(&fp->fi_clnt_odstate);
fh_copy_shallow(&fp->fi_fhandle, fh);
fp->fi_deleg_file = NULL;
fp->fi_had_conflict = false;
@@ -3073,6 +3162,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
void
nfsd4_free_slabs(void)
{
+ kmem_cache_destroy(odstate_slab);
kmem_cache_destroy(openowner_slab);
kmem_cache_destroy(lockowner_slab);
kmem_cache_destroy(file_slab);
@@ -3103,8 +3193,14 @@ nfsd4_init_slabs(void)
sizeof(struct nfs4_delegation), 0, 0, NULL);
if (deleg_slab == NULL)
goto out_free_stateid_slab;
+ odstate_slab = kmem_cache_create("nfsd4_odstate",
+ sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
+ if (odstate_slab == NULL)
+ goto out_free_deleg_slab;
return 0;
+out_free_deleg_slab:
+ kmem_cache_destroy(deleg_slab);
out_free_stateid_slab:
kmem_cache_destroy(stateid_slab);
out_free_file_slab:
@@ -3581,6 +3677,14 @@ alloc_stateid:
open->op_stp = nfs4_alloc_open_stateid(clp);
if (!open->op_stp)
return nfserr_jukebox;
+
+ if (nfsd4_has_session(cstate) &&
+ (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
+ open->op_odstate = alloc_clnt_odstate(clp);
+ if (!open->op_odstate)
+ return nfserr_jukebox;
+ }
+
return nfs_ok;
}
@@ -3869,7 +3973,7 @@ out_fput:
static struct nfs4_delegation *
nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
- struct nfs4_file *fp)
+ struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
{
int status;
struct nfs4_delegation *dp;
@@ -3877,7 +3981,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
if (fp->fi_had_conflict)
return ERR_PTR(-EAGAIN);
- dp = alloc_init_deleg(clp, fh);
+ dp = alloc_init_deleg(clp, fh, odstate);
if (!dp)
return ERR_PTR(-ENOMEM);
@@ -3903,6 +4007,7 @@ out_unlock:
spin_unlock(&state_lock);
out:
if (status) {
+ put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_stid(&dp->dl_stid);
return ERR_PTR(status);
}
@@ -3980,7 +4085,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
default:
goto out_no_deleg;
}
- dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
+ dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
if (IS_ERR(dp))
goto out_no_deleg;
@@ -4049,7 +4154,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfserr_bad_stateid;
if (nfsd4_is_deleg_cur(open))
goto out;
- status = nfserr_jukebox;
}
/*
@@ -4070,6 +4174,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
release_open_stateid(stp);
goto out;
}
+
+ stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
+ open->op_odstate);
+ if (stp->st_clnt_odstate == open->op_odstate)
+ open->op_odstate = NULL;
}
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -4118,7 +4227,7 @@ out:
}
void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
- struct nfsd4_open *open, __be32 status)
+ struct nfsd4_open *open)
{
if (open->op_openowner) {
struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
@@ -4130,6 +4239,8 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
nfs4_put_stid(&open->op_stp->st_stid);
+ if (open->op_odstate)
+ kmem_cache_free(odstate_slab, open->op_odstate);
}
__be32
@@ -4386,10 +4497,17 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
return nfserr_old_stateid;
}
+static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
+{
+ if (ols->st_stateowner->so_is_open_owner &&
+ !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
+ return nfserr_bad_stateid;
+ return nfs_ok;
+}
+
static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
{
struct nfs4_stid *s;
- struct nfs4_ol_stateid *ols;
__be32 status = nfserr_bad_stateid;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
@@ -4419,13 +4537,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
break;
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
- ols = openlockstateid(s);
- if (ols->st_stateowner->so_is_open_owner
- && !(openowner(ols->st_stateowner)->oo_flags
- & NFS4_OO_CONFIRMED))
- status = nfserr_bad_stateid;
- else
- status = nfs_ok;
+ status = nfsd4_check_openowner_confirmed(openlockstateid(s));
break;
default:
printk("unknown stateid type %x\n", s->sc_type);
@@ -4473,7 +4585,7 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
struct svc_fh *current_fh = &cstate->current_fh;
- struct inode *ino = current_fh->fh_dentry->d_inode;
+ struct inode *ino = d_inode(current_fh->fh_dentry);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct file *file = NULL;
__be32 status;
@@ -4517,8 +4629,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
status = nfs4_check_fh(current_fh, stp);
if (status)
goto out;
- if (stp->st_stateowner->so_is_open_owner
- && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
+ status = nfsd4_check_openowner_confirmed(stp);
+ if (status)
goto out;
status = nfs4_check_openmode(stp, flags);
if (status)
@@ -4853,9 +4965,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
- nfsd4_return_all_file_layouts(stp->st_stateowner->so_client,
- stp->st_stid.sc_file);
-
nfsd4_close_open_stateid(stp);
/* put reference from nfs4_preprocess_seqid_op */
@@ -4932,20 +5041,22 @@ nfs4_transform_lock_offset(struct file_lock *lock)
lock->fl_end = OFFSET_MAX;
}
-static void nfsd4_fl_get_owner(struct file_lock *dst, struct file_lock *src)
+static fl_owner_t
+nfsd4_fl_get_owner(fl_owner_t owner)
{
- struct nfs4_lockowner *lo = (struct nfs4_lockowner *)src->fl_owner;
- dst->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lo->lo_owner));
+ struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
+
+ nfs4_get_stateowner(&lo->lo_owner);
+ return owner;
}
-static void nfsd4_fl_put_owner(struct file_lock *fl)
+static void
+nfsd4_fl_put_owner(fl_owner_t owner)
{
- struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
+ struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
- if (lo) {
+ if (lo)
nfs4_put_stateowner(&lo->lo_owner);
- fl->fl_owner = NULL;
- }
}
static const struct lock_manager_operations nfsd_posix_mng_ops = {
@@ -5169,7 +5280,7 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
struct nfs4_file *fi = ost->st_stid.sc_file;
struct nfs4_openowner *oo = openowner(ost->st_stateowner);
struct nfs4_client *cl = oo->oo_owner.so_client;
- struct inode *inode = cstate->current_fh.fh_dentry->d_inode;
+ struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
struct nfs4_lockowner *lo;
unsigned int strhashval;
@@ -6487,6 +6598,7 @@ nfs4_state_shutdown_net(struct net *net)
list_for_each_safe(pos, next, &reaplist) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
+ put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
nfs4_put_stid(&dp->dl_stid);
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 5fb7e78169a6..158badf945df 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -424,7 +424,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
len += 4;
dummy32 = be32_to_cpup(p++);
READ_BUF(dummy32);
- if (dummy32 > NFSD4_MAX_SEC_LABEL_LEN)
+ if (dummy32 > NFS4_MAXLABELLEN)
return nfserr_badlabel;
len += (XDR_QUADLEN(dummy32) << 2);
READMEM(buf, dummy32);
@@ -2020,7 +2020,7 @@ static __be32 nfsd4_encode_path(struct xdr_stream *xdr,
* dentries/path components in an array.
*/
for (;;) {
- if (cur.dentry == root->dentry && cur.mnt == root->mnt)
+ if (path_equal(&cur, root))
break;
if (cur.dentry == cur.mnt->mnt_root) {
if (follow_up(&cur))
@@ -2292,7 +2292,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
- err = security_inode_getsecctx(dentry->d_inode,
+ err = security_inode_getsecctx(d_inode(dentry),
&context, &contextlen);
contextsupport = (err == 0);
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
@@ -2384,7 +2384,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
- p = encode_change(p, &stat, dentry->d_inode);
+ p = encode_change(p, &stat, d_inode(dentry));
}
if (bmval0 & FATTR4_WORD0_SIZE) {
p = xdr_reserve_space(xdr, 8);
@@ -2807,7 +2807,7 @@ nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
dentry = lookup_one_len(name, cd->rd_fhp->fh_dentry, namlen);
if (IS_ERR(dentry))
return nfserrno(PTR_ERR(dentry));
- if (!dentry->d_inode) {
+ if (d_really_is_negative(dentry)) {
/*
* nfsd_buffered_readdir drops the i_mutex between
* readdir and calling this callback, leaving a window
@@ -3324,7 +3324,7 @@ static __be32 nfsd4_encode_splice_read(
}
eof = (read->rd_offset + maxcount >=
- read->rd_fhp->fh_dentry->d_inode->i_size);
+ d_inode(read->rd_fhp->fh_dentry)->i_size);
*(p++) = htonl(eof);
*(p++) = htonl(maxcount);
@@ -3401,7 +3401,7 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3));
eof = (read->rd_offset + maxcount >=
- read->rd_fhp->fh_dentry->d_inode->i_size);
+ d_inode(read->rd_fhp->fh_dentry)->i_size);
tmp = htonl(eof);
write_bytes_to_xdr_buf(xdr->buf, starting_len , &tmp, 4);
@@ -3422,6 +3422,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
unsigned long maxcount;
struct xdr_stream *xdr = &resp->xdr;
struct file *file = read->rd_filp;
+ struct svc_fh *fhp = read->rd_fhp;
int starting_len = xdr->buf->len;
struct raparms *ra;
__be32 *p;
@@ -3445,12 +3446,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len));
maxcount = min_t(unsigned long, maxcount, read->rd_length);
- if (!read->rd_filp) {
+ if (read->rd_filp)
+ err = nfsd_permission(resp->rqstp, fhp->fh_export,
+ fhp->fh_dentry,
+ NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE);
+ else
err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp,
&file, &ra);
- if (err)
- goto err_truncate;
- }
+ if (err)
+ goto err_truncate;
if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
err = nfsd4_encode_splice_read(resp, read, file, maxcount);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index aa47d75ddb26..9690cb4dd588 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1250,15 +1250,15 @@ static int __init init_nfsd(void)
int retval;
printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
- retval = register_cld_notifier();
- if (retval)
- return retval;
retval = register_pernet_subsys(&nfsd_net_ops);
if (retval < 0)
- goto out_unregister_notifier;
- retval = nfsd4_init_slabs();
+ return retval;
+ retval = register_cld_notifier();
if (retval)
goto out_unregister_pernet;
+ retval = nfsd4_init_slabs();
+ if (retval)
+ goto out_unregister_notifier;
retval = nfsd4_init_pnfs();
if (retval)
goto out_free_slabs;
@@ -1290,10 +1290,10 @@ out_exit_pnfs:
nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
-out_unregister_pernet:
- unregister_pernet_subsys(&nfsd_net_ops);
out_unregister_notifier:
unregister_cld_notifier();
+out_unregister_pernet:
+ unregister_pernet_subsys(&nfsd_net_ops);
return retval;
}
@@ -1308,8 +1308,8 @@ static void __exit exit_nfsd(void)
nfsd4_exit_pnfs();
nfsd_fault_inject_cleanup();
unregister_filesystem(&nfsd_fs_type);
- unregister_pernet_subsys(&nfsd_net_ops);
unregister_cld_notifier();
+ unregister_pernet_subsys(&nfsd_net_ops);
}
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 565c4da1a9eb..cf980523898b 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -24,7 +24,7 @@
#include "export.h"
#undef ifdebug
-#ifdef NFSD_DEBUG
+#ifdef CONFIG_SUNRPC_DEBUG
# define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag)
#else
# define ifdebug(flag) if (0)
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index e9fa966fc37f..350041a40fe5 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -38,7 +38,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
/* make sure parents give x permission to user */
int err;
parent = dget_parent(tdentry);
- err = inode_permission(parent->d_inode, MAY_EXEC);
+ err = inode_permission(d_inode(parent), MAY_EXEC);
if (err < 0) {
dput(parent);
break;
@@ -340,7 +340,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
if (error)
goto out;
- error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type);
+ error = nfsd_mode_check(rqstp, d_inode(dentry)->i_mode, type);
if (error)
goto out;
@@ -412,8 +412,8 @@ static inline void _fh_update_old(struct dentry *dentry,
struct svc_export *exp,
struct knfsd_fh *fh)
{
- fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino);
- fh->ofh_generation = dentry->d_inode->i_generation;
+ fh->ofh_ino = ino_t_to_u32(d_inode(dentry)->i_ino);
+ fh->ofh_generation = d_inode(dentry)->i_generation;
if (d_is_dir(dentry) ||
(exp->ex_flags & NFSEXP_NOSUBTREECHECK))
fh->ofh_dirino = 0;
@@ -426,7 +426,7 @@ static bool is_root_export(struct svc_export *exp)
static struct super_block *exp_sb(struct svc_export *exp)
{
- return exp->ex_path.dentry->d_inode->i_sb;
+ return d_inode(exp->ex_path.dentry)->i_sb;
}
static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp)
@@ -520,12 +520,12 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
*
*/
- struct inode * inode = dentry->d_inode;
+ struct inode * inode = d_inode(dentry);
dev_t ex_dev = exp_sb(exp)->s_dev;
dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %pd2, ino=%ld)\n",
MAJOR(ex_dev), MINOR(ex_dev),
- (long) exp->ex_path.dentry->d_inode->i_ino,
+ (long) d_inode(exp->ex_path.dentry)->i_ino,
dentry,
(inode ? inode->i_ino : 0));
@@ -558,7 +558,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
fhp->fh_handle.ofh_dev = old_encode_dev(ex_dev);
fhp->fh_handle.ofh_xdev = fhp->fh_handle.ofh_dev;
fhp->fh_handle.ofh_xino =
- ino_t_to_u32(exp->ex_path.dentry->d_inode->i_ino);
+ ino_t_to_u32(d_inode(exp->ex_path.dentry)->i_ino);
fhp->fh_handle.ofh_dirino = ino_t_to_u32(parent_ino(dentry));
if (inode)
_fh_update_old(dentry, exp, &fhp->fh_handle);
@@ -570,7 +570,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
mk_fsid(fhp->fh_handle.fh_fsid_type,
fhp->fh_handle.fh_fsid,
ex_dev,
- exp->ex_path.dentry->d_inode->i_ino,
+ d_inode(exp->ex_path.dentry)->i_ino,
exp->ex_fsid, exp->ex_uuid);
if (inode)
@@ -597,7 +597,7 @@ fh_update(struct svc_fh *fhp)
goto out_bad;
dentry = fhp->fh_dentry;
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
goto out_negative;
if (fhp->fh_handle.fh_version != 1) {
_fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle);
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index f22920442172..1e90dad4926b 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -225,7 +225,7 @@ fill_pre_wcc(struct svc_fh *fhp)
{
struct inode *inode;
- inode = fhp->fh_dentry->d_inode;
+ inode = d_inode(fhp->fh_dentry);
if (!fhp->fh_pre_saved) {
fhp->fh_pre_mtime = inode->i_mtime;
fhp->fh_pre_ctime = inode->i_ctime;
@@ -264,7 +264,7 @@ fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
return;
}
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
mutex_lock_nested(&inode->i_mutex, subclass);
fill_pre_wcc(fhp);
fhp->fh_locked = 1;
@@ -284,7 +284,7 @@ fh_unlock(struct svc_fh *fhp)
{
if (fhp->fh_locked) {
fill_post_wcc(fhp);
- mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(fhp->fh_dentry)->i_mutex);
fhp->fh_locked = 0;
}
}
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index b8680738f588..aecbcd34d336 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -223,7 +223,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
}
fh_init(newfhp, NFS_FHSIZE);
nfserr = fh_compose(newfhp, dirfhp->fh_export, dchild, dirfhp);
- if (!nfserr && !dchild->d_inode)
+ if (!nfserr && d_really_is_negative(dchild))
nfserr = nfserr_noent;
dput(dchild);
if (nfserr) {
@@ -241,7 +241,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
}
}
- inode = newfhp->fh_dentry->d_inode;
+ inode = d_inode(newfhp->fh_dentry);
/* Unfudge the mode bits */
if (attr->ia_valid & ATTR_MODE) {
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 412d7061f9e5..79d964aa8079 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -187,7 +187,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
*p++ = htonl((u32) stat->ino);
*p++ = htonl((u32) stat->atime.tv_sec);
*p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0);
- lease_get_mtime(dentry->d_inode, &time);
+ lease_get_mtime(d_inode(dentry), &time);
*p++ = htonl((u32) time.tv_sec);
*p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0);
*p++ = htonl((u32) stat->ctime.tv_sec);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 4f3bfeb11766..dbc4f85a5008 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -63,12 +63,12 @@ typedef struct {
struct nfsd4_callback {
struct nfs4_client *cb_clp;
- struct list_head cb_per_client;
u32 cb_minorversion;
struct rpc_message cb_msg;
struct nfsd4_callback_ops *cb_ops;
struct work_struct cb_work;
- bool cb_done;
+ int cb_status;
+ bool cb_need_restart;
};
struct nfsd4_callback_ops {
@@ -126,6 +126,7 @@ struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
struct list_head dl_recall_lru; /* delegation recalled */
+ struct nfs4_clnt_odstate *dl_clnt_odstate;
u32 dl_type;
time_t dl_time;
/* For recall: */
@@ -332,7 +333,6 @@ struct nfs4_client {
int cl_cb_state;
struct nfsd4_callback cl_cb_null;
struct nfsd4_session *cl_cb_session;
- struct list_head cl_callbacks; /* list of in-progress callbacks */
/* for all client information that callback code might need: */
spinlock_t cl_lock;
@@ -465,6 +465,17 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
}
/*
+ * Per-client state indicating no. of opens and outstanding delegations
+ * on a file from a particular client.'od' stands for 'open & delegation'
+ */
+struct nfs4_clnt_odstate {
+ struct nfs4_client *co_client;
+ struct nfs4_file *co_file;
+ struct list_head co_perfile;
+ atomic_t co_odcount;
+};
+
+/*
* nfs4_file: a file opened by some number of (open) nfs4_stateowners.
*
* These objects are global. nfsd keeps one instance of a nfs4_file per
@@ -485,6 +496,7 @@ struct nfs4_file {
struct list_head fi_delegations;
struct rcu_head fi_rcu;
};
+ struct list_head fi_clnt_odstate;
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
struct file * fi_fds[3];
/*
@@ -526,6 +538,7 @@ struct nfs4_ol_stateid {
struct list_head st_perstateowner;
struct list_head st_locks;
struct nfs4_stateowner * st_stateowner;
+ struct nfs4_clnt_odstate * st_clnt_odstate;
unsigned char st_access_bmap;
unsigned char st_deny_bmap;
struct nfs4_ol_stateid * st_openstp;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 368526582429..84d770be056e 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -174,7 +174,7 @@ int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
return 1;
if (!(exp->ex_flags & NFSEXP_V4ROOT))
return 0;
- return dentry->d_inode != NULL;
+ return d_inode(dentry) != NULL;
}
__be32
@@ -270,7 +270,7 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
* dentry may be negative, it may need to be updated.
*/
err = fh_compose(resfh, exp, dentry, fhp);
- if (!err && !dentry->d_inode)
+ if (!err && d_really_is_negative(dentry))
err = nfserr_noent;
out:
dput(dentry);
@@ -284,7 +284,7 @@ out:
static int
commit_metadata(struct svc_fh *fhp)
{
- struct inode *inode = fhp->fh_dentry->d_inode;
+ struct inode *inode = d_inode(fhp->fh_dentry);
const struct export_operations *export_ops = inode->i_sb->s_export_op;
if (!EX_ISSYNC(fhp->fh_export))
@@ -364,7 +364,7 @@ static __be32
nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct iattr *iap)
{
- struct inode *inode = fhp->fh_dentry->d_inode;
+ struct inode *inode = d_inode(fhp->fh_dentry);
int host_err;
if (iap->ia_size < inode->i_size) {
@@ -426,7 +426,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
}
dentry = fhp->fh_dentry;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
/* Ignore any mode updates on symlinks */
if (S_ISLNK(inode->i_mode))
@@ -495,7 +495,7 @@ out:
*/
int nfsd4_is_junction(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (inode == NULL)
return 0;
@@ -521,9 +521,9 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
dentry = fhp->fh_dentry;
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
host_error = security_inode_setsecctx(dentry, label->data, label->len);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
return nfserrno(host_error);
}
#else
@@ -706,7 +706,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
path.mnt = fhp->fh_export->ex_path.mnt;
path.dentry = fhp->fh_dentry;
- inode = path.dentry->d_inode;
+ inode = d_inode(path.dentry);
/* Disallow write access to files with the append-only bit set
* or any access when mandatory locking enabled
@@ -1211,7 +1211,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
dentry = fhp->fh_dentry;
- dirp = dentry->d_inode;
+ dirp = d_inode(dentry);
err = nfserr_notdir;
if (!dirp->i_op->lookup)
@@ -1250,7 +1250,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
* Make sure the child dentry is still negative ...
*/
err = nfserr_exist;
- if (dchild->d_inode) {
+ if (d_really_is_positive(dchild)) {
dprintk("nfsd_create: dentry %pd/%pd not negative!\n",
dentry, dchild);
goto out;
@@ -1353,7 +1353,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
dentry = fhp->fh_dentry;
- dirp = dentry->d_inode;
+ dirp = d_inode(dentry);
/* Get all the sanity checks out of the way before
* we lock the parent. */
@@ -1376,7 +1376,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out_nfserr;
/* If file doesn't exist, check for permissions to create one */
- if (!dchild->d_inode) {
+ if (d_really_is_negative(dchild)) {
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
@@ -1397,7 +1397,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
v_atime = verifier[1]&0x7fffffff;
}
- if (dchild->d_inode) {
+ if (d_really_is_positive(dchild)) {
err = 0;
switch (createmode) {
@@ -1420,17 +1420,17 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
break;
case NFS3_CREATE_EXCLUSIVE:
- if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
- && dchild->d_inode->i_atime.tv_sec == v_atime
- && dchild->d_inode->i_size == 0 ) {
+ if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime
+ && d_inode(dchild)->i_atime.tv_sec == v_atime
+ && d_inode(dchild)->i_size == 0 ) {
if (created)
*created = 1;
break;
}
case NFS4_CREATE_EXCLUSIVE4_1:
- if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
- && dchild->d_inode->i_atime.tv_sec == v_atime
- && dchild->d_inode->i_size == 0 ) {
+ if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime
+ && d_inode(dchild)->i_atime.tv_sec == v_atime
+ && d_inode(dchild)->i_size == 0 ) {
if (created)
*created = 1;
goto set_attr;
@@ -1513,7 +1513,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
path.mnt = fhp->fh_export->ex_path.mnt;
path.dentry = fhp->fh_dentry;
- inode = path.dentry->d_inode;
+ inode = d_inode(path.dentry);
err = nfserr_inval;
if (!inode->i_op->readlink)
@@ -1576,7 +1576,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (IS_ERR(dnew))
goto out_nfserr;
- host_err = vfs_symlink(dentry->d_inode, dnew, path);
+ host_err = vfs_symlink(d_inode(dentry), dnew, path);
err = nfserrno(host_err);
if (!err)
err = nfserrno(commit_metadata(fhp));
@@ -1632,7 +1632,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
fh_lock_nested(ffhp, I_MUTEX_PARENT);
ddir = ffhp->fh_dentry;
- dirp = ddir->d_inode;
+ dirp = d_inode(ddir);
dnew = lookup_one_len(name, ddir, len);
host_err = PTR_ERR(dnew);
@@ -1642,7 +1642,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
dold = tfhp->fh_dentry;
err = nfserr_noent;
- if (!dold->d_inode)
+ if (d_really_is_negative(dold))
goto out_dput;
host_err = vfs_link(dold, dirp, dnew, NULL);
if (!host_err) {
@@ -1689,10 +1689,10 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
goto out;
fdentry = ffhp->fh_dentry;
- fdir = fdentry->d_inode;
+ fdir = d_inode(fdentry);
tdentry = tfhp->fh_dentry;
- tdir = tdentry->d_inode;
+ tdir = d_inode(tdentry);
err = nfserr_perm;
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
@@ -1717,7 +1717,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
goto out_nfserr;
host_err = -ENOENT;
- if (!odentry->d_inode)
+ if (d_really_is_negative(odentry))
goto out_dput_old;
host_err = -EINVAL;
if (odentry == trap)
@@ -1790,21 +1790,21 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
fh_lock_nested(fhp, I_MUTEX_PARENT);
dentry = fhp->fh_dentry;
- dirp = dentry->d_inode;
+ dirp = d_inode(dentry);
rdentry = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(rdentry);
if (IS_ERR(rdentry))
goto out_nfserr;
- if (!rdentry->d_inode) {
+ if (d_really_is_negative(rdentry)) {
dput(rdentry);
err = nfserr_noent;
goto out;
}
if (!type)
- type = rdentry->d_inode->i_mode & S_IFMT;
+ type = d_inode(rdentry)->i_mode & S_IFMT;
if (type != S_IFDIR)
host_err = vfs_unlink(dirp, rdentry, NULL);
@@ -2015,7 +2015,7 @@ __be32
nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
struct dentry *dentry, int acc)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int err;
if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP)
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 0bda93e58e1b..2f8c092be2b3 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -40,7 +40,6 @@
#include "state.h"
#include "nfsd.h"
-#define NFSD4_MAX_SEC_LABEL_LEN 2048
#define NFSD4_MAX_TAGLEN 128
#define XDR_LEN(n) (((n) + 3) & ~3)
@@ -248,6 +247,7 @@ struct nfsd4_open {
struct nfs4_openowner *op_openowner; /* used during processing */
struct nfs4_file *op_file; /* used during processing */
struct nfs4_ol_stateid *op_stp; /* used during processing */
+ struct nfs4_clnt_odstate *op_odstate; /* used during processing */
struct nfs4_acl *op_acl;
struct xdr_netobj op_label;
};
@@ -632,7 +632,7 @@ set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
{
BUG_ON(!fhp->fh_pre_saved);
cinfo->atomic = fhp->fh_post_saved;
- cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
+ cinfo->change_supported = IS_I_VERSION(d_inode(fhp->fh_dentry));
cinfo->before_change = fhp->fh_pre_change;
cinfo->after_change = fhp->fh_post_change;
@@ -683,7 +683,7 @@ extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
struct svc_fh *current_fh, struct nfsd4_open *open);
extern void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate);
extern void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
- struct nfsd4_open *open, __be32 status);
+ struct nfsd4_open *open);
extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc);
extern __be32 nfsd4_close(struct svc_rqst *rqstp,
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 741fd02e0444..8df0f3b7839b 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -405,13 +405,14 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
static int nilfs_palloc_count_desc_blocks(struct inode *inode,
unsigned long *desc_blocks)
{
- unsigned long blknum;
+ __u64 blknum;
int ret;
ret = nilfs_bmap_last_key(NILFS_I(inode)->i_bmap, &blknum);
if (likely(!ret))
*desc_blocks = DIV_ROUND_UP(
- blknum, NILFS_MDT(inode)->mi_blocks_per_desc_block);
+ (unsigned long)blknum,
+ NILFS_MDT(inode)->mi_blocks_per_desc_block);
return ret;
}
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index aadbd0b5e3e8..27f75bcbeb30 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -152,9 +152,7 @@ static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
*
* %-EEXIST - A record associated with @key already exist.
*/
-int nilfs_bmap_insert(struct nilfs_bmap *bmap,
- unsigned long key,
- unsigned long rec)
+int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec)
{
int ret;
@@ -191,19 +189,47 @@ static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key)
return bmap->b_ops->bop_delete(bmap, key);
}
-int nilfs_bmap_last_key(struct nilfs_bmap *bmap, unsigned long *key)
+/**
+ * nilfs_bmap_seek_key - seek a valid entry and return its key
+ * @bmap: bmap struct
+ * @start: start key number
+ * @keyp: place to store valid key
+ *
+ * Description: nilfs_bmap_seek_key() seeks a valid key on @bmap
+ * starting from @start, and stores it to @keyp if found.
+ *
+ * Return Value: On success, 0 is returned. On error, one of the following
+ * negative error codes is returned.
+ *
+ * %-EIO - I/O error.
+ *
+ * %-ENOMEM - Insufficient amount of memory available.
+ *
+ * %-ENOENT - No valid entry was found
+ */
+int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp)
{
- __u64 lastkey;
int ret;
down_read(&bmap->b_sem);
- ret = bmap->b_ops->bop_last_key(bmap, &lastkey);
+ ret = bmap->b_ops->bop_seek_key(bmap, start, keyp);
+ up_read(&bmap->b_sem);
+
+ if (ret < 0)
+ ret = nilfs_bmap_convert_error(bmap, __func__, ret);
+ return ret;
+}
+
+int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp)
+{
+ int ret;
+
+ down_read(&bmap->b_sem);
+ ret = bmap->b_ops->bop_last_key(bmap, keyp);
up_read(&bmap->b_sem);
if (ret < 0)
ret = nilfs_bmap_convert_error(bmap, __func__, ret);
- else
- *key = lastkey;
return ret;
}
@@ -224,7 +250,7 @@ int nilfs_bmap_last_key(struct nilfs_bmap *bmap, unsigned long *key)
*
* %-ENOENT - A record associated with @key does not exist.
*/
-int nilfs_bmap_delete(struct nilfs_bmap *bmap, unsigned long key)
+int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key)
{
int ret;
@@ -235,7 +261,7 @@ int nilfs_bmap_delete(struct nilfs_bmap *bmap, unsigned long key)
return nilfs_bmap_convert_error(bmap, __func__, ret);
}
-static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, unsigned long key)
+static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key)
{
__u64 lastkey;
int ret;
@@ -276,7 +302,7 @@ static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, unsigned long key)
*
* %-ENOMEM - Insufficient amount of memory available.
*/
-int nilfs_bmap_truncate(struct nilfs_bmap *bmap, unsigned long key)
+int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key)
{
int ret;
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index b89e68076adc..bfa817ce40b3 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -76,8 +76,10 @@ struct nilfs_bmap_operations {
union nilfs_binfo *);
int (*bop_mark)(struct nilfs_bmap *, __u64, int);
- /* The following functions are internal use only. */
+ int (*bop_seek_key)(const struct nilfs_bmap *, __u64, __u64 *);
int (*bop_last_key)(const struct nilfs_bmap *, __u64 *);
+
+ /* The following functions are internal use only. */
int (*bop_check_insert)(const struct nilfs_bmap *, __u64);
int (*bop_check_delete)(struct nilfs_bmap *, __u64);
int (*bop_gather_data)(struct nilfs_bmap *, __u64 *, __u64 *, int);
@@ -153,10 +155,11 @@ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *);
int nilfs_bmap_read(struct nilfs_bmap *, struct nilfs_inode *);
void nilfs_bmap_write(struct nilfs_bmap *, struct nilfs_inode *);
int nilfs_bmap_lookup_contig(struct nilfs_bmap *, __u64, __u64 *, unsigned);
-int nilfs_bmap_insert(struct nilfs_bmap *, unsigned long, unsigned long);
-int nilfs_bmap_delete(struct nilfs_bmap *, unsigned long);
-int nilfs_bmap_last_key(struct nilfs_bmap *, unsigned long *);
-int nilfs_bmap_truncate(struct nilfs_bmap *, unsigned long);
+int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec);
+int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key);
+int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp);
+int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp);
+int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key);
void nilfs_bmap_clear(struct nilfs_bmap *);
int nilfs_bmap_propagate(struct nilfs_bmap *, struct buffer_head *);
void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *, struct list_head *);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index ecdbae19a766..919fd5bb14a8 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
nchildren = nilfs_btree_node_get_nchildren(node);
if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
- level > NILFS_BTREE_LEVEL_MAX ||
+ level >= NILFS_BTREE_LEVEL_MAX ||
nchildren < 0 ||
nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
@@ -633,6 +633,44 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
return 0;
}
+/**
+ * nilfs_btree_get_next_key - get next valid key from btree path array
+ * @btree: bmap struct of btree
+ * @path: array of nilfs_btree_path struct
+ * @minlevel: start level
+ * @nextkey: place to store the next valid key
+ *
+ * Return Value: If a next key was found, 0 is returned. Otherwise,
+ * -ENOENT is returned.
+ */
+static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree,
+ const struct nilfs_btree_path *path,
+ int minlevel, __u64 *nextkey)
+{
+ struct nilfs_btree_node *node;
+ int maxlevel = nilfs_btree_height(btree) - 1;
+ int index, next_adj, level;
+
+ /* Next index is already set to bp_index for leaf nodes. */
+ next_adj = 0;
+ for (level = minlevel; level <= maxlevel; level++) {
+ if (level == maxlevel)
+ node = nilfs_btree_get_root(btree);
+ else
+ node = nilfs_btree_get_nonroot_node(path, level);
+
+ index = path[level].bp_index + next_adj;
+ if (index < nilfs_btree_node_get_nchildren(node)) {
+ /* Next key is in this node */
+ *nextkey = nilfs_btree_node_get_key(node, index);
+ return 0;
+ }
+ /* For non-leaf nodes, next index is stored at bp_index + 1. */
+ next_adj = 1;
+ }
+ return -ENOENT;
+}
+
static int nilfs_btree_lookup(const struct nilfs_bmap *btree,
__u64 key, int level, __u64 *ptrp)
{
@@ -1563,6 +1601,27 @@ out:
return ret;
}
+static int nilfs_btree_seek_key(const struct nilfs_bmap *btree, __u64 start,
+ __u64 *keyp)
+{
+ struct nilfs_btree_path *path;
+ const int minlevel = NILFS_BTREE_LEVEL_NODE_MIN;
+ int ret;
+
+ path = nilfs_btree_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = nilfs_btree_do_lookup(btree, path, start, NULL, minlevel, 0);
+ if (!ret)
+ *keyp = start;
+ else if (ret == -ENOENT)
+ ret = nilfs_btree_get_next_key(btree, path, minlevel, keyp);
+
+ nilfs_btree_free_path(path);
+ return ret;
+}
+
static int nilfs_btree_last_key(const struct nilfs_bmap *btree, __u64 *keyp)
{
struct nilfs_btree_path *path;
@@ -2298,7 +2357,9 @@ static const struct nilfs_bmap_operations nilfs_btree_ops = {
.bop_assign = nilfs_btree_assign,
.bop_mark = nilfs_btree_mark,
+ .bop_seek_key = nilfs_btree_seek_key,
.bop_last_key = nilfs_btree_last_key,
+
.bop_check_insert = NULL,
.bop_check_delete = nilfs_btree_check_delete,
.bop_gather_data = nilfs_btree_gather_data,
@@ -2318,7 +2379,9 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
.bop_assign = nilfs_btree_assign_gc,
.bop_mark = NULL,
+ .bop_seek_key = NULL,
.bop_last_key = NULL,
+
.bop_check_insert = NULL,
.bop_check_delete = NULL,
.bop_gather_data = NULL,
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 0d58075f34e2..b6596cab9e99 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -53,6 +53,13 @@ nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
}
+static __u64 nilfs_cpfile_first_checkpoint_in_block(const struct inode *cpfile,
+ unsigned long blkoff)
+{
+ return (__u64)nilfs_cpfile_checkpoints_per_block(cpfile) * blkoff
+ + 1 - NILFS_MDT(cpfile)->mi_first_entry_offset;
+}
+
static unsigned long
nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
__u64 curr,
@@ -146,6 +153,44 @@ static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
create, nilfs_cpfile_block_init, bhp);
}
+/**
+ * nilfs_cpfile_find_checkpoint_block - find and get a buffer on cpfile
+ * @cpfile: inode of cpfile
+ * @start_cno: start checkpoint number (inclusive)
+ * @end_cno: end checkpoint number (inclusive)
+ * @cnop: place to store the next checkpoint number
+ * @bhp: place to store a pointer to buffer_head struct
+ *
+ * Return Value: On success, it returns 0. On error, the following negative
+ * error code is returned.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ *
+ * %-EIO - I/O error
+ *
+ * %-ENOENT - no block exists in the range.
+ */
+static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile,
+ __u64 start_cno, __u64 end_cno,
+ __u64 *cnop,
+ struct buffer_head **bhp)
+{
+ unsigned long start, end, blkoff;
+ int ret;
+
+ if (unlikely(start_cno > end_cno))
+ return -ENOENT;
+
+ start = nilfs_cpfile_get_blkoff(cpfile, start_cno);
+ end = nilfs_cpfile_get_blkoff(cpfile, end_cno);
+
+ ret = nilfs_mdt_find_block(cpfile, start, end, &blkoff, bhp);
+ if (!ret)
+ *cnop = (blkoff == start) ? start_cno :
+ nilfs_cpfile_first_checkpoint_in_block(cpfile, blkoff);
+ return ret;
+}
+
static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
__u64 cno)
{
@@ -403,14 +448,15 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
return -ENOENT; /* checkpoint number 0 is invalid */
down_read(&NILFS_MDT(cpfile)->mi_sem);
- for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
- ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
- ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
+ for (n = 0; n < nci; cno += ncps) {
+ ret = nilfs_cpfile_find_checkpoint_block(
+ cpfile, cno, cur_cno - 1, &cno, &bh);
if (ret < 0) {
- if (ret != -ENOENT)
- goto out;
- continue; /* skip hole */
+ if (likely(ret == -ENOENT))
+ break;
+ goto out;
}
+ ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
kaddr = kmap_atomic(bh->b_page);
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 197a63e9d102..0ee0bed3649b 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -435,7 +435,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
*/
int nilfs_add_link(struct dentry *dentry, struct inode *inode)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const unsigned char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned chunk_size = nilfs_chunk_size(dir);
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 82f4865e86dd..ebf89fd8ac1a 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -173,6 +173,21 @@ static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
return ret;
}
+static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
+ __u64 *keyp)
+{
+ __u64 key;
+
+ for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
+ if (nilfs_direct_get_ptr(direct, key) !=
+ NILFS_BMAP_INVALID_PTR) {
+ *keyp = key;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
{
__u64 key, lastkey;
@@ -355,7 +370,9 @@ static const struct nilfs_bmap_operations nilfs_direct_ops = {
.bop_assign = nilfs_direct_assign,
.bop_mark = NULL,
+ .bop_seek_key = nilfs_direct_seek_key,
.bop_last_key = nilfs_direct_last_key,
+
.bop_check_insert = nilfs_direct_check_insert,
.bop_check_delete = NULL,
.bop_gather_data = nilfs_direct_gather_data,
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index a8c728acb7a8..54575e3cc1a2 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -143,8 +143,6 @@ static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
*/
const struct file_operations nilfs_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.unlocked_ioctl = nilfs_ioctl,
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 8b5969538f39..258d9fe2521a 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -26,7 +26,7 @@
#include <linux/mpage.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include "nilfs.h"
#include "btnode.h"
#include "segment.h"
@@ -106,7 +106,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
if (unlikely(err))
goto out;
- err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
+ err = nilfs_bmap_insert(ii->i_bmap, blkoff,
(unsigned long)bh_result);
if (unlikely(err != 0)) {
if (err == -EEXIST) {
@@ -305,8 +305,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
}
static ssize_t
-nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
- loff_t offset)
+nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -314,18 +313,17 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
ssize_t size;
- if (rw == WRITE)
+ if (iov_iter_rw(iter) == WRITE)
return 0;
/* Needs synchronization with the cleaner */
- size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
- nilfs_get_block);
+ size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely((rw & WRITE) && size < 0)) {
+ if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
@@ -443,21 +441,20 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
void nilfs_set_inode_flags(struct inode *inode)
{
unsigned int flags = NILFS_I(inode)->i_flags;
+ unsigned int new_fl = 0;
- inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
- S_DIRSYNC);
if (flags & FS_SYNC_FL)
- inode->i_flags |= S_SYNC;
+ new_fl |= S_SYNC;
if (flags & FS_APPEND_FL)
- inode->i_flags |= S_APPEND;
+ new_fl |= S_APPEND;
if (flags & FS_IMMUTABLE_FL)
- inode->i_flags |= S_IMMUTABLE;
+ new_fl |= S_IMMUTABLE;
if (flags & FS_NOATIME_FL)
- inode->i_flags |= S_NOATIME;
+ new_fl |= S_NOATIME;
if (flags & FS_DIRSYNC_FL)
- inode->i_flags |= S_DIRSYNC;
- mapping_set_gfp_mask(inode->i_mapping,
- mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+ new_fl |= S_DIRSYNC;
+ inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
+ S_NOATIME | S_DIRSYNC);
}
int nilfs_read_inode_common(struct inode *inode,
@@ -542,6 +539,8 @@ static int __nilfs_read_inode(struct super_block *sb,
brelse(bh);
up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
nilfs_set_inode_flags(inode);
+ mapping_set_gfp_mask(inode->i_mapping,
+ mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
return 0;
failed_unmap:
@@ -714,7 +713,7 @@ void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
unsigned long from)
{
- unsigned long b;
+ __u64 b;
int ret;
if (!test_bit(NILFS_I_BMAP, &ii->i_state))
@@ -729,7 +728,7 @@ repeat:
if (b < from)
return;
- b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
+ b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
ret = nilfs_bmap_truncate(ii->i_bmap, b);
nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
if (!ret || (ret == -ENOMEM &&
@@ -836,7 +835,7 @@ void nilfs_evict_inode(struct inode *inode)
int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct nilfs_transaction_info ti;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
int err;
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 892cf5ffdb8e..dee34d990281 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -261,6 +261,60 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
}
/**
+ * nilfs_mdt_find_block - find and get a buffer on meta data file.
+ * @inode: inode of the meta data file
+ * @start: start block offset (inclusive)
+ * @end: end block offset (inclusive)
+ * @blkoff: block offset
+ * @out_bh: place to store a pointer to buffer_head struct
+ *
+ * nilfs_mdt_find_block() looks up an existing block in range of
+ * [@start, @end] and stores pointer to a buffer head of the block to
+ * @out_bh, and block offset to @blkoff, respectively. @out_bh and
+ * @blkoff are substituted only when zero is returned.
+ *
+ * Return Value: On success, it returns 0. On error, the following negative
+ * error code is returned.
+ *
+ * %-ENOMEM - Insufficient memory available.
+ *
+ * %-EIO - I/O error
+ *
+ * %-ENOENT - no block was found in the range
+ */
+int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
+ unsigned long end, unsigned long *blkoff,
+ struct buffer_head **out_bh)
+{
+ __u64 next;
+ int ret;
+
+ if (unlikely(start > end))
+ return -ENOENT;
+
+ ret = nilfs_mdt_read_block(inode, start, true, out_bh);
+ if (!ret) {
+ *blkoff = start;
+ goto out;
+ }
+ if (unlikely(ret != -ENOENT || start == ULONG_MAX))
+ goto out;
+
+ ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next);
+ if (!ret) {
+ if (next <= end) {
+ ret = nilfs_mdt_read_block(inode, next, true, out_bh);
+ if (!ret)
+ *blkoff = next;
+ } else {
+ ret = -ENOENT;
+ }
+ }
+out:
+ return ret;
+}
+
+/**
* nilfs_mdt_delete_block - make a hole on the meta data file.
* @inode: inode of the meta data file
* @block: block offset
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index ab172e8549c5..fe529a87a208 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -78,6 +78,9 @@ int nilfs_mdt_get_block(struct inode *, unsigned long, int,
void (*init_block)(struct inode *,
struct buffer_head *, void *),
struct buffer_head **);
+int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
+ unsigned long end, unsigned long *blkoff,
+ struct buffer_head **out_bh);
int nilfs_mdt_delete_block(struct inode *, unsigned long);
int nilfs_mdt_forget_block(struct inode *, unsigned long);
int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long);
@@ -111,7 +114,10 @@ static inline __u64 nilfs_mdt_cno(struct inode *inode)
return ((struct the_nilfs *)inode->i_sb->s_fs_info)->ns_cno;
}
-#define nilfs_mdt_bgl_lock(inode, bg) \
- (&NILFS_MDT(inode)->mi_bgl->locks[(bg) & (NR_BG_LOCKS-1)].lock)
+static inline spinlock_t *
+nilfs_mdt_bgl_lock(struct inode *inode, unsigned int block_group)
+{
+ return bgl_lock_ptr(NILFS_MDT(inode)->mi_bgl, block_group);
+}
#endif /* _NILFS_MDT_H */
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 0f84b257932c..22180836ec22 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -192,7 +192,7 @@ out_fail:
static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
struct nilfs_transaction_info ti;
int err;
@@ -283,7 +283,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
if (!de)
goto out;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
err = -EIO;
if (le64_to_cpu(de->inode) != inode->i_ino)
goto out;
@@ -318,7 +318,7 @@ static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
if (!err) {
nilfs_mark_inode_dirty(dir);
- nilfs_mark_inode_dirty(dentry->d_inode);
+ nilfs_mark_inode_dirty(d_inode(dentry));
err = nilfs_transaction_commit(dir->i_sb);
} else
nilfs_transaction_abort(dir->i_sb);
@@ -328,7 +328,7 @@ static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct nilfs_transaction_info ti;
int err;
@@ -358,8 +358,8 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct page *dir_page = NULL;
struct nilfs_dir_entry *dir_de = NULL;
struct page *old_page;
@@ -453,13 +453,13 @@ static struct dentry *nilfs_get_parent(struct dentry *child)
struct qstr dotdot = QSTR_INIT("..", 2);
struct nilfs_root *root;
- ino = nilfs_inode_by_name(child->d_inode, &dotdot);
+ ino = nilfs_inode_by_name(d_inode(child), &dotdot);
if (!ino)
return ERR_PTR(-ENOENT);
- root = NILFS_I(child->d_inode)->i_root;
+ root = NILFS_I(d_inode(child))->i_root;
- inode = nilfs_iget(child->d_inode->i_sb, root, ino);
+ inode = nilfs_iget(d_inode(child)->i_sb, root, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 700ecbcca55d..45d650addd56 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -89,18 +89,16 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
void nilfs_forget_buffer(struct buffer_head *bh)
{
struct page *page = bh->b_page;
+ const unsigned long clear_bits =
+ (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
+ 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
+ 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
lock_buffer(bh);
- clear_buffer_nilfs_volatile(bh);
- clear_buffer_nilfs_checked(bh);
- clear_buffer_nilfs_redirected(bh);
- clear_buffer_async_write(bh);
- clear_buffer_dirty(bh);
+ set_mask_bits(&bh->b_state, clear_bits, 0);
if (nilfs_page_buffers_clean(page))
__nilfs_clear_page_dirty(page);
- clear_buffer_uptodate(bh);
- clear_buffer_mapped(bh);
bh->b_blocknr = -1;
ClearPageUptodate(page);
ClearPageMappedToDisk(page);
@@ -421,6 +419,10 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
+ const unsigned long clear_bits =
+ (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
+ 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
+ 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
bh = head = page_buffers(page);
do {
@@ -430,13 +432,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
"discard block %llu, size %zu",
(u64)bh->b_blocknr, bh->b_size);
}
- clear_buffer_async_write(bh);
- clear_buffer_dirty(bh);
- clear_buffer_nilfs_volatile(bh);
- clear_buffer_nilfs_checked(bh);
- clear_buffer_nilfs_redirected(bh);
- clear_buffer_uptodate(bh);
- clear_buffer_mapped(bh);
+ set_mask_bits(&bh->b_state, clear_bits, 0);
unlock_buffer(bh);
} while (bh = bh->b_this_page, bh != head);
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 0c3f303baf32..c6abbad9b8e3 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -24,6 +24,7 @@
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
+#include <linux/bitops.h>
#include <linux/bio.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
@@ -1588,7 +1589,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
- set_buffer_async_write(bh);
if (bh->b_page != bd_page) {
if (bd_page) {
lock_page(bd_page);
@@ -1688,7 +1688,6 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
list_for_each_entry(segbuf, logs, sb_list) {
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
- clear_buffer_async_write(bh);
if (bh->b_page != bd_page) {
if (bd_page)
end_page_writeback(bd_page);
@@ -1768,7 +1767,6 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
b_assoc_buffers) {
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
- clear_buffer_async_write(bh);
if (bh->b_page != bd_page) {
if (bd_page)
end_page_writeback(bd_page);
@@ -1788,12 +1786,13 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
*/
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- set_buffer_uptodate(bh);
- clear_buffer_dirty(bh);
- clear_buffer_async_write(bh);
- clear_buffer_delay(bh);
- clear_buffer_nilfs_volatile(bh);
- clear_buffer_nilfs_redirected(bh);
+ const unsigned long set_bits = (1 << BH_Uptodate);
+ const unsigned long clear_bits =
+ (1 << BH_Dirty | 1 << BH_Async_Write |
+ 1 << BH_Delay | 1 << BH_NILFS_Volatile |
+ 1 << BH_NILFS_Redirected);
+
+ set_mask_bits(&bh->b_state, clear_bits, set_bits);
if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 5bc2a1cf73c3..f47585bfeb01 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -610,7 +610,7 @@ static int nilfs_unfreeze(struct super_block *sb)
static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
- struct nilfs_root *root = NILFS_I(dentry->d_inode)->i_root;
+ struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root;
struct the_nilfs *nilfs = root->nilfs;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
unsigned long long blocks;
@@ -681,7 +681,7 @@ static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry)
{
struct super_block *sb = dentry->d_sb;
struct the_nilfs *nilfs = sb->s_fs_info;
- struct nilfs_root *root = NILFS_I(dentry->d_inode)->i_root;
+ struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root;
if (!nilfs_test_opt(nilfs, BARRIER))
seq_puts(seq, ",nobarrier");
@@ -1020,7 +1020,7 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
struct dentry *dentry;
int ret;
- if (cno < 0 || cno > nilfs->ns_cno)
+ if (cno > nilfs->ns_cno)
return false;
if (cno >= nilfs_last_cno(nilfs))
@@ -1190,7 +1190,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
sb->s_flags &= ~MS_RDONLY;
- root = NILFS_I(sb->s_root->d_inode)->i_root;
+ root = NILFS_I(d_inode(sb->s_root))->i_root;
err = nilfs_attach_log_writer(sb, root);
if (err)
goto restore_opts;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index af1b24fa899d..99521e7c492b 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -13,7 +13,7 @@ static const struct file_operations ns_file_operations = {
static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
const struct proc_ns_operations *ns_ops = dentry->d_fsdata;
return dynamic_dname(dentry, buffer, buflen, "%s:[%lu]",
@@ -22,7 +22,7 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
static void ns_prune_dentry(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (inode) {
struct ns_common *ns = inode->i_private;
atomic_long_set(&ns->stashed, 0);
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index 36ae529511c4..2ff263e6d363 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -8,7 +8,7 @@ ntfs-y := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
ntfs-$(CONFIG_NTFS_RW) += bitmap.o lcnalloc.o logfile.o quota.o usnjrnl.o
-ccflags-y := -DNTFS_VERSION=\"2.1.31\"
+ccflags-y := -DNTFS_VERSION=\"2.1.32\"
ccflags-$(CONFIG_NTFS_DEBUG) += -DDEBUG
ccflags-$(CONFIG_NTFS_RW) += -DNTFS_RW
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 1da9b2d184dc..7bb487e663b4 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1,7 +1,7 @@
/*
* file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
+ * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc.
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
@@ -28,7 +28,6 @@
#include <linux/swap.h>
#include <linux/uio.h>
#include <linux/writeback.h>
-#include <linux/aio.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -329,62 +328,166 @@ err_out:
return err;
}
-/**
- * ntfs_fault_in_pages_readable -
- *
- * Fault a number of userspace pages into pagetables.
- *
- * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes
- * with more than two userspace pages as well as handling the single page case
- * elegantly.
- *
- * If you find this difficult to understand, then think of the while loop being
- * the following code, except that we do without the integer variable ret:
- *
- * do {
- * ret = __get_user(c, uaddr);
- * uaddr += PAGE_SIZE;
- * } while (!ret && uaddr < end);
- *
- * Note, the final __get_user() may well run out-of-bounds of the user buffer,
- * but _not_ out-of-bounds of the page the user buffer belongs to, and since
- * this is only a read and not a write, and since it is still in the same page,
- * it should not matter and this makes the code much simpler.
- */
-static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
- int bytes)
-{
- const char __user *end;
- volatile char c;
-
- /* Set @end to the first byte outside the last page we care about. */
- end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
-
- while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
- ;
-}
-
-/**
- * ntfs_fault_in_pages_readable_iovec -
- *
- * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs.
- */
-static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
- size_t iov_ofs, int bytes)
+static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb,
+ struct iov_iter *from)
{
- do {
- const char __user *buf;
- unsigned len;
+ loff_t pos;
+ s64 end, ll;
+ ssize_t err;
+ unsigned long flags;
+ struct file *file = iocb->ki_filp;
+ struct inode *vi = file_inode(file);
+ ntfs_inode *base_ni, *ni = NTFS_I(vi);
+ ntfs_volume *vol = ni->vol;
- buf = iov->iov_base + iov_ofs;
- len = iov->iov_len - iov_ofs;
- if (len > bytes)
- len = bytes;
- ntfs_fault_in_pages_readable(buf, len);
- bytes -= len;
- iov++;
- iov_ofs = 0;
- } while (bytes);
+ ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
+ "0x%llx, count 0x%zx.", vi->i_ino,
+ (unsigned)le32_to_cpu(ni->type),
+ (unsigned long long)iocb->ki_pos,
+ iov_iter_count(from));
+ err = generic_write_checks(iocb, from);
+ if (unlikely(err <= 0))
+ goto out;
+ /*
+ * All checks have passed. Before we start doing any writing we want
+ * to abort any totally illegal writes.
+ */
+ BUG_ON(NInoMstProtected(ni));
+ BUG_ON(ni->type != AT_DATA);
+ /* If file is encrypted, deny access, just like NT4. */
+ if (NInoEncrypted(ni)) {
+ /* Only $DATA attributes can be encrypted. */
+ /*
+ * Reminder for later: Encrypted files are _always_
+ * non-resident so that the content can always be encrypted.
+ */
+ ntfs_debug("Denying write access to encrypted file.");
+ err = -EACCES;
+ goto out;
+ }
+ if (NInoCompressed(ni)) {
+ /* Only unnamed $DATA attribute can be compressed. */
+ BUG_ON(ni->name_len);
+ /*
+ * Reminder for later: If resident, the data is not actually
+ * compressed. Only on the switch to non-resident does
+ * compression kick in. This is in contrast to encrypted files
+ * (see above).
+ */
+ ntfs_error(vi->i_sb, "Writing to compressed files is not "
+ "implemented yet. Sorry.");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ base_ni = ni;
+ if (NInoAttr(ni))
+ base_ni = ni->ext.base_ntfs_ino;
+ err = file_remove_suid(file);
+ if (unlikely(err))
+ goto out;
+ /*
+ * Our ->update_time method always succeeds thus file_update_time()
+ * cannot fail either so there is no need to check the return code.
+ */
+ file_update_time(file);
+ pos = iocb->ki_pos;
+ /* The first byte after the last cluster being written to. */
+ end = (pos + iov_iter_count(from) + vol->cluster_size_mask) &
+ ~(u64)vol->cluster_size_mask;
+ /*
+ * If the write goes beyond the allocated size, extend the allocation
+ * to cover the whole of the write, rounded up to the nearest cluster.
+ */
+ read_lock_irqsave(&ni->size_lock, flags);
+ ll = ni->allocated_size;
+ read_unlock_irqrestore(&ni->size_lock, flags);
+ if (end > ll) {
+ /*
+ * Extend the allocation without changing the data size.
+ *
+ * Note we ensure the allocation is big enough to at least
+ * write some data but we do not require the allocation to be
+ * complete, i.e. it may be partial.
+ */
+ ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
+ if (likely(ll >= 0)) {
+ BUG_ON(pos >= ll);
+ /* If the extension was partial truncate the write. */
+ if (end > ll) {
+ ntfs_debug("Truncating write to inode 0x%lx, "
+ "attribute type 0x%x, because "
+ "the allocation was only "
+ "partially extended.",
+ vi->i_ino, (unsigned)
+ le32_to_cpu(ni->type));
+ iov_iter_truncate(from, ll - pos);
+ }
+ } else {
+ err = ll;
+ read_lock_irqsave(&ni->size_lock, flags);
+ ll = ni->allocated_size;
+ read_unlock_irqrestore(&ni->size_lock, flags);
+ /* Perform a partial write if possible or fail. */
+ if (pos < ll) {
+ ntfs_debug("Truncating write to inode 0x%lx "
+ "attribute type 0x%x, because "
+ "extending the allocation "
+ "failed (error %d).",
+ vi->i_ino, (unsigned)
+ le32_to_cpu(ni->type),
+ (int)-err);
+ iov_iter_truncate(from, ll - pos);
+ } else {
+ if (err != -ENOSPC)
+ ntfs_error(vi->i_sb, "Cannot perform "
+ "write to inode "
+ "0x%lx, attribute "
+ "type 0x%x, because "
+ "extending the "
+ "allocation failed "
+ "(error %ld).",
+ vi->i_ino, (unsigned)
+ le32_to_cpu(ni->type),
+ (long)-err);
+ else
+ ntfs_debug("Cannot perform write to "
+ "inode 0x%lx, "
+ "attribute type 0x%x, "
+ "because there is not "
+ "space left.",
+ vi->i_ino, (unsigned)
+ le32_to_cpu(ni->type));
+ goto out;
+ }
+ }
+ }
+ /*
+ * If the write starts beyond the initialized size, extend it up to the
+ * beginning of the write and initialize all non-sparse space between
+ * the old initialized size and the new one. This automatically also
+ * increments the vfs inode->i_size to keep it above or equal to the
+ * initialized_size.
+ */
+ read_lock_irqsave(&ni->size_lock, flags);
+ ll = ni->initialized_size;
+ read_unlock_irqrestore(&ni->size_lock, flags);
+ if (pos > ll) {
+ /*
+ * Wait for ongoing direct i/o to complete before proceeding.
+ * New direct i/o cannot start as we hold i_mutex.
+ */
+ inode_dio_wait(vi);
+ err = ntfs_attr_extend_initialized(ni, pos);
+ if (unlikely(err < 0))
+ ntfs_error(vi->i_sb, "Cannot perform write to inode "
+ "0x%lx, attribute type 0x%x, because "
+ "extending the initialized size "
+ "failed (error %d).", vi->i_ino,
+ (unsigned)le32_to_cpu(ni->type),
+ (int)-err);
+ }
+out:
+ return err;
}
/**
@@ -421,8 +524,8 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
goto err_out;
}
}
- err = add_to_page_cache_lru(*cached_page, mapping, index,
- GFP_KERNEL);
+ err = add_to_page_cache_lru(*cached_page, mapping,
+ index, GFP_KERNEL);
if (unlikely(err)) {
if (err == -EEXIST)
continue;
@@ -1268,180 +1371,6 @@ rl_not_mapped_enoent:
return err;
}
-/*
- * Copy as much as we can into the pages and return the number of bytes which
- * were successfully copied. If a fault is encountered then clear the pages
- * out to (ofs + bytes) and return the number of bytes which were copied.
- */
-static inline size_t ntfs_copy_from_user(struct page **pages,
- unsigned nr_pages, unsigned ofs, const char __user *buf,
- size_t bytes)
-{
- struct page **last_page = pages + nr_pages;
- char *addr;
- size_t total = 0;
- unsigned len;
- int left;
-
- do {
- len = PAGE_CACHE_SIZE - ofs;
- if (len > bytes)
- len = bytes;
- addr = kmap_atomic(*pages);
- left = __copy_from_user_inatomic(addr + ofs, buf, len);
- kunmap_atomic(addr);
- if (unlikely(left)) {
- /* Do it the slow way. */
- addr = kmap(*pages);
- left = __copy_from_user(addr + ofs, buf, len);
- kunmap(*pages);
- if (unlikely(left))
- goto err_out;
- }
- total += len;
- bytes -= len;
- if (!bytes)
- break;
- buf += len;
- ofs = 0;
- } while (++pages < last_page);
-out:
- return total;
-err_out:
- total += len - left;
- /* Zero the rest of the target like __copy_from_user(). */
- while (++pages < last_page) {
- bytes -= len;
- if (!bytes)
- break;
- len = PAGE_CACHE_SIZE;
- if (len > bytes)
- len = bytes;
- zero_user(*pages, 0, len);
- }
- goto out;
-}
-
-static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
- const struct iovec *iov, size_t iov_ofs, size_t bytes)
-{
- size_t total = 0;
-
- while (1) {
- const char __user *buf = iov->iov_base + iov_ofs;
- unsigned len;
- size_t left;
-
- len = iov->iov_len - iov_ofs;
- if (len > bytes)
- len = bytes;
- left = __copy_from_user_inatomic(vaddr, buf, len);
- total += len;
- bytes -= len;
- vaddr += len;
- if (unlikely(left)) {
- total -= left;
- break;
- }
- if (!bytes)
- break;
- iov++;
- iov_ofs = 0;
- }
- return total;
-}
-
-static inline void ntfs_set_next_iovec(const struct iovec **iovp,
- size_t *iov_ofsp, size_t bytes)
-{
- const struct iovec *iov = *iovp;
- size_t iov_ofs = *iov_ofsp;
-
- while (bytes) {
- unsigned len;
-
- len = iov->iov_len - iov_ofs;
- if (len > bytes)
- len = bytes;
- bytes -= len;
- iov_ofs += len;
- if (iov->iov_len == iov_ofs) {
- iov++;
- iov_ofs = 0;
- }
- }
- *iovp = iov;
- *iov_ofsp = iov_ofs;
-}
-
-/*
- * This has the same side-effects and return value as ntfs_copy_from_user().
- * The difference is that on a fault we need to memset the remainder of the
- * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
- * single-segment behaviour.
- *
- * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
- * atomic and when not atomic. This is ok because it calls
- * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In
- * fact, the only difference between __copy_from_user_inatomic() and
- * __copy_from_user() is that the latter calls might_sleep() and the former
- * should not zero the tail of the buffer on error. And on many architectures
- * __copy_from_user_inatomic() is just defined to __copy_from_user() so it
- * makes no difference at all on those architectures.
- */
-static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
- unsigned nr_pages, unsigned ofs, const struct iovec **iov,
- size_t *iov_ofs, size_t bytes)
-{
- struct page **last_page = pages + nr_pages;
- char *addr;
- size_t copied, len, total = 0;
-
- do {
- len = PAGE_CACHE_SIZE - ofs;
- if (len > bytes)
- len = bytes;
- addr = kmap_atomic(*pages);
- copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
- *iov, *iov_ofs, len);
- kunmap_atomic(addr);
- if (unlikely(copied != len)) {
- /* Do it the slow way. */
- addr = kmap(*pages);
- copied = __ntfs_copy_from_user_iovec_inatomic(addr +
- ofs, *iov, *iov_ofs, len);
- if (unlikely(copied != len))
- goto err_out;
- kunmap(*pages);
- }
- total += len;
- ntfs_set_next_iovec(iov, iov_ofs, len);
- bytes -= len;
- if (!bytes)
- break;
- ofs = 0;
- } while (++pages < last_page);
-out:
- return total;
-err_out:
- BUG_ON(copied > len);
- /* Zero the rest of the target like __copy_from_user(). */
- memset(addr + ofs + copied, 0, len - copied);
- kunmap(*pages);
- total += copied;
- ntfs_set_next_iovec(iov, iov_ofs, copied);
- while (++pages < last_page) {
- bytes -= len;
- if (!bytes)
- break;
- len = PAGE_CACHE_SIZE;
- if (len > bytes)
- len = bytes;
- zero_user(*pages, 0, len);
- }
- goto out;
-}
-
static inline void ntfs_flush_dcache_pages(struct page **pages,
unsigned nr_pages)
{
@@ -1762,86 +1691,83 @@ err_out:
return err;
}
-static void ntfs_write_failed(struct address_space *mapping, loff_t to)
+/*
+ * Copy as much as we can into the pages and return the number of bytes which
+ * were successfully copied. If a fault is encountered then clear the pages
+ * out to (ofs + bytes) and return the number of bytes which were copied.
+ */
+static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
+ unsigned ofs, struct iov_iter *i, size_t bytes)
{
- struct inode *inode = mapping->host;
+ struct page **last_page = pages + nr_pages;
+ size_t total = 0;
+ struct iov_iter data = *i;
+ unsigned len, copied;
- if (to > inode->i_size) {
- truncate_pagecache(inode, inode->i_size);
- ntfs_truncate_vfs(inode);
- }
+ do {
+ len = PAGE_CACHE_SIZE - ofs;
+ if (len > bytes)
+ len = bytes;
+ copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
+ len);
+ total += copied;
+ bytes -= copied;
+ if (!bytes)
+ break;
+ iov_iter_advance(&data, copied);
+ if (copied < len)
+ goto err;
+ ofs = 0;
+ } while (++pages < last_page);
+out:
+ return total;
+err:
+ /* Zero the rest of the target like __copy_from_user(). */
+ len = PAGE_CACHE_SIZE - copied;
+ do {
+ if (len > bytes)
+ len = bytes;
+ zero_user(*pages, copied, len);
+ bytes -= len;
+ copied = 0;
+ len = PAGE_CACHE_SIZE;
+ } while (++pages < last_page);
+ goto out;
}
/**
- * ntfs_file_buffered_write -
- *
- * Locking: The vfs is holding ->i_mutex on the inode.
+ * ntfs_perform_write - perform buffered write to a file
+ * @file: file to write to
+ * @i: iov_iter with data to write
+ * @pos: byte offset in file at which to begin writing to
*/
-static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
- const struct iovec *iov, unsigned long nr_segs,
- loff_t pos, loff_t *ppos, size_t count)
+static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
+ loff_t pos)
{
- struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *vi = mapping->host;
ntfs_inode *ni = NTFS_I(vi);
ntfs_volume *vol = ni->vol;
struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
struct page *cached_page = NULL;
- char __user *buf = NULL;
- s64 end, ll;
VCN last_vcn;
LCN lcn;
- unsigned long flags;
- size_t bytes, iov_ofs = 0; /* Offset in the current iovec. */
- ssize_t status, written;
+ size_t bytes;
+ ssize_t status, written = 0;
unsigned nr_pages;
- int err;
- ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
- "pos 0x%llx, count 0x%lx.",
- vi->i_ino, (unsigned)le32_to_cpu(ni->type),
- (unsigned long long)pos, (unsigned long)count);
- if (unlikely(!count))
- return 0;
- BUG_ON(NInoMstProtected(ni));
- /*
- * If the attribute is not an index root and it is encrypted or
- * compressed, we cannot write to it yet. Note we need to check for
- * AT_INDEX_ALLOCATION since this is the type of both directory and
- * index inodes.
- */
- if (ni->type != AT_INDEX_ALLOCATION) {
- /* If file is encrypted, deny access, just like NT4. */
- if (NInoEncrypted(ni)) {
- /*
- * Reminder for later: Encrypted files are _always_
- * non-resident so that the content can always be
- * encrypted.
- */
- ntfs_debug("Denying write access to encrypted file.");
- return -EACCES;
- }
- if (NInoCompressed(ni)) {
- /* Only unnamed $DATA attribute can be compressed. */
- BUG_ON(ni->type != AT_DATA);
- BUG_ON(ni->name_len);
- /*
- * Reminder for later: If resident, the data is not
- * actually compressed. Only on the switch to non-
- * resident does compression kick in. This is in
- * contrast to encrypted files (see above).
- */
- ntfs_error(vi->i_sb, "Writing to compressed files is "
- "not implemented yet. Sorry.");
- return -EOPNOTSUPP;
- }
- }
+ ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
+ "0x%llx, count 0x%lx.", vi->i_ino,
+ (unsigned)le32_to_cpu(ni->type),
+ (unsigned long long)pos,
+ (unsigned long)iov_iter_count(i));
/*
* If a previous ntfs_truncate() failed, repeat it and abort if it
* fails again.
*/
if (unlikely(NInoTruncateFailed(ni))) {
+ int err;
+
inode_dio_wait(vi);
err = ntfs_truncate(vi);
if (err || NInoTruncateFailed(ni)) {
@@ -1855,81 +1781,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
return err;
}
}
- /* The first byte after the write. */
- end = pos + count;
- /*
- * If the write goes beyond the allocated size, extend the allocation
- * to cover the whole of the write, rounded up to the nearest cluster.
- */
- read_lock_irqsave(&ni->size_lock, flags);
- ll = ni->allocated_size;
- read_unlock_irqrestore(&ni->size_lock, flags);
- if (end > ll) {
- /* Extend the allocation without changing the data size. */
- ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
- if (likely(ll >= 0)) {
- BUG_ON(pos >= ll);
- /* If the extension was partial truncate the write. */
- if (end > ll) {
- ntfs_debug("Truncating write to inode 0x%lx, "
- "attribute type 0x%x, because "
- "the allocation was only "
- "partially extended.",
- vi->i_ino, (unsigned)
- le32_to_cpu(ni->type));
- end = ll;
- count = ll - pos;
- }
- } else {
- err = ll;
- read_lock_irqsave(&ni->size_lock, flags);
- ll = ni->allocated_size;
- read_unlock_irqrestore(&ni->size_lock, flags);
- /* Perform a partial write if possible or fail. */
- if (pos < ll) {
- ntfs_debug("Truncating write to inode 0x%lx, "
- "attribute type 0x%x, because "
- "extending the allocation "
- "failed (error code %i).",
- vi->i_ino, (unsigned)
- le32_to_cpu(ni->type), err);
- end = ll;
- count = ll - pos;
- } else {
- ntfs_error(vol->sb, "Cannot perform write to "
- "inode 0x%lx, attribute type "
- "0x%x, because extending the "
- "allocation failed (error "
- "code %i).", vi->i_ino,
- (unsigned)
- le32_to_cpu(ni->type), err);
- return err;
- }
- }
- }
- written = 0;
- /*
- * If the write starts beyond the initialized size, extend it up to the
- * beginning of the write and initialize all non-sparse space between
- * the old initialized size and the new one. This automatically also
- * increments the vfs inode->i_size to keep it above or equal to the
- * initialized_size.
- */
- read_lock_irqsave(&ni->size_lock, flags);
- ll = ni->initialized_size;
- read_unlock_irqrestore(&ni->size_lock, flags);
- if (pos > ll) {
- err = ntfs_attr_extend_initialized(ni, pos);
- if (err < 0) {
- ntfs_error(vol->sb, "Cannot perform write to inode "
- "0x%lx, attribute type 0x%x, because "
- "extending the initialized size "
- "failed (error code %i).", vi->i_ino,
- (unsigned)le32_to_cpu(ni->type), err);
- status = err;
- goto err_out;
- }
- }
/*
* Determine the number of pages per cluster for non-resident
* attributes.
@@ -1937,10 +1788,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
nr_pages = 1;
if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
- /* Finally, perform the actual write. */
last_vcn = -1;
- if (likely(nr_segs == 1))
- buf = iov->iov_base;
do {
VCN vcn;
pgoff_t idx, start_idx;
@@ -1965,10 +1813,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
vol->cluster_size_bits, false);
up_read(&ni->runlist.lock);
if (unlikely(lcn < LCN_HOLE)) {
- status = -EIO;
if (lcn == LCN_ENOMEM)
status = -ENOMEM;
- else
+ else {
+ status = -EIO;
ntfs_error(vol->sb, "Cannot "
"perform write to "
"inode 0x%lx, "
@@ -1977,6 +1825,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
"is corrupt.",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
+ }
break;
}
if (lcn == LCN_HOLE) {
@@ -1989,8 +1838,9 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
}
}
}
- if (bytes > count)
- bytes = count;
+ if (bytes > iov_iter_count(i))
+ bytes = iov_iter_count(i);
+again:
/*
* Bring in the user page(s) that we will copy from _first_.
* Otherwise there is a nasty deadlock on copying from the same
@@ -1999,10 +1849,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
* pages being swapped out between us bringing them into memory
* and doing the actual copying.
*/
- if (likely(nr_segs == 1))
- ntfs_fault_in_pages_readable(buf, bytes);
- else
- ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
+ if (unlikely(iov_iter_fault_in_multipages_readable(i, bytes))) {
+ status = -EFAULT;
+ break;
+ }
/* Get and lock @do_pages starting at index @start_idx. */
status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
pages, &cached_page);
@@ -2018,56 +1868,57 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
status = ntfs_prepare_pages_for_non_resident_write(
pages, do_pages, pos, bytes);
if (unlikely(status)) {
- loff_t i_size;
-
do {
unlock_page(pages[--do_pages]);
page_cache_release(pages[do_pages]);
} while (do_pages);
- /*
- * The write preparation may have instantiated
- * allocated space outside i_size. Trim this
- * off again. We can ignore any errors in this
- * case as we will just be waisting a bit of
- * allocated space, which is not a disaster.
- */
- i_size = i_size_read(vi);
- if (pos + bytes > i_size) {
- ntfs_write_failed(mapping, pos + bytes);
- }
break;
}
}
u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
- if (likely(nr_segs == 1)) {
- copied = ntfs_copy_from_user(pages + u, do_pages - u,
- ofs, buf, bytes);
- buf += copied;
- } else
- copied = ntfs_copy_from_user_iovec(pages + u,
- do_pages - u, ofs, &iov, &iov_ofs,
- bytes);
+ copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
+ i, bytes);
ntfs_flush_dcache_pages(pages + u, do_pages - u);
- status = ntfs_commit_pages_after_write(pages, do_pages, pos,
- bytes);
- if (likely(!status)) {
- written += copied;
- count -= copied;
- pos += copied;
- if (unlikely(copied != bytes))
- status = -EFAULT;
+ status = 0;
+ if (likely(copied == bytes)) {
+ status = ntfs_commit_pages_after_write(pages, do_pages,
+ pos, bytes);
+ if (!status)
+ status = bytes;
}
do {
unlock_page(pages[--do_pages]);
page_cache_release(pages[do_pages]);
} while (do_pages);
- if (unlikely(status))
+ if (unlikely(status < 0))
break;
- balance_dirty_pages_ratelimited(mapping);
+ copied = status;
cond_resched();
- } while (count);
-err_out:
- *ppos = pos;
+ if (unlikely(!copied)) {
+ size_t sc;
+
+ /*
+ * We failed to copy anything. Fall back to single
+ * segment length write.
+ *
+ * This is needed to avoid possible livelock in the
+ * case that all segments in the iov cannot be copied
+ * at once without a pagefault.
+ */
+ sc = iov_iter_single_seg_count(i);
+ if (bytes > sc)
+ bytes = sc;
+ goto again;
+ }
+ iov_iter_advance(i, copied);
+ pos += copied;
+ written += copied;
+ balance_dirty_pages_ratelimited(mapping);
+ if (fatal_signal_pending(current)) {
+ status = -EINTR;
+ break;
+ }
+ } while (iov_iter_count(i));
if (cached_page)
page_cache_release(cached_page);
ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
@@ -2077,63 +1928,36 @@ err_out:
}
/**
- * ntfs_file_aio_write_nolock -
+ * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock()
+ * @iocb: IO state structure
+ * @from: iov_iter with data to write
+ *
+ * Basically the same as generic_file_write_iter() except that it ends up
+ * up calling ntfs_perform_write() instead of generic_perform_write() and that
+ * O_DIRECT is not implemented.
*/
-static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
- const struct iovec *iov, unsigned long nr_segs, loff_t *ppos)
+static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- loff_t pos;
- size_t count; /* after file limit checks */
- ssize_t written, err;
+ struct inode *vi = file_inode(file);
+ ssize_t written = 0;
+ ssize_t err;
- count = iov_length(iov, nr_segs);
- pos = *ppos;
+ mutex_lock(&vi->i_mutex);
/* We can write back this queue in page reclaim. */
- current->backing_dev_info = inode_to_bdi(inode);
- written = 0;
- err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
- if (err)
- goto out;
- if (!count)
- goto out;
- err = file_remove_suid(file);
- if (err)
- goto out;
- err = file_update_time(file);
- if (err)
- goto out;
- written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
- count);
-out:
+ current->backing_dev_info = inode_to_bdi(vi);
+ err = ntfs_prepare_file_for_write(iocb, from);
+ if (iov_iter_count(from) && !err)
+ written = ntfs_perform_write(file, from, iocb->ki_pos);
current->backing_dev_info = NULL;
- return written ? written : err;
-}
-
-/**
- * ntfs_file_aio_write -
- */
-static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
-{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- ssize_t ret;
-
- BUG_ON(iocb->ki_pos != pos);
-
- mutex_lock(&inode->i_mutex);
- ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
- mutex_unlock(&inode->i_mutex);
- if (ret > 0) {
- int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ mutex_unlock(&vi->i_mutex);
+ if (likely(written > 0)) {
+ err = generic_write_sync(file, iocb->ki_pos, written);
if (err < 0)
- ret = err;
+ written = 0;
}
- return ret;
+ iocb->ki_pos += written;
+ return written ? written : err;
}
/**
@@ -2197,37 +2021,15 @@ static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
#endif /* NTFS_RW */
const struct file_operations ntfs_file_ops = {
- .llseek = generic_file_llseek, /* Seek inside file. */
- .read = new_sync_read, /* Read from file. */
- .read_iter = generic_file_read_iter, /* Async read from file. */
+ .llseek = generic_file_llseek,
+ .read_iter = generic_file_read_iter,
#ifdef NTFS_RW
- .write = do_sync_write, /* Write to file. */
- .aio_write = ntfs_file_aio_write, /* Async write to file. */
- /*.release = ,*/ /* Last file is closed. See
- fs/ext2/file.c::
- ext2_release_file() for
- how to use this to discard
- preallocated space for
- write opened files. */
- .fsync = ntfs_file_fsync, /* Sync a file to disk. */
- /*.aio_fsync = ,*/ /* Sync all outstanding async
- i/o operations on a
- kiocb. */
+ .write_iter = ntfs_file_write_iter,
+ .fsync = ntfs_file_fsync,
#endif /* NTFS_RW */
- /*.ioctl = ,*/ /* Perform function on the
- mounted filesystem. */
- .mmap = generic_file_mmap, /* Mmap file. */
- .open = ntfs_file_open, /* Open file. */
- .splice_read = generic_file_splice_read /* Zero-copy data send with
- the data source being on
- the ntfs partition. We do
- not need to care about the
- data destination. */
- /*.sendpage = ,*/ /* Zero-copy data send with
- the data destination being
- on the ntfs partition. We
- do not need to care about
- the data source. */
+ .mmap = generic_file_mmap,
+ .open = ntfs_file_open,
+ .splice_read = generic_file_splice_read,
};
const struct inode_operations ntfs_file_inode_ops = {
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 898b9949d363..d284f07eda77 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -28,7 +28,6 @@
#include <linux/quotaops.h>
#include <linux/slab.h>
#include <linux/log2.h>
-#include <linux/aio.h>
#include "aops.h"
#include "attrib.h"
@@ -2890,7 +2889,7 @@ void ntfs_truncate_vfs(struct inode *vi) {
*/
int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *vi = dentry->d_inode;
+ struct inode *vi = d_inode(dentry);
int err;
unsigned int ia_valid = attr->ia_valid;
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index b3973c2fd190..0f35b80d17fe 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -292,14 +292,14 @@ const struct inode_operations ntfs_dir_inode_ops = {
* The code is based on the ext3 ->get_parent() implementation found in
* fs/ext3/namei.c::ext3_get_parent().
*
- * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_mutex down.
+ * Note: ntfs_get_parent() is called with @d_inode(child_dent)->i_mutex down.
*
* Return the dentry of the parent directory on success or the error code on
* error (IS_ERR() is true).
*/
static struct dentry *ntfs_get_parent(struct dentry *child_dent)
{
- struct inode *vi = child_dent->d_inode;
+ struct inode *vi = d_inode(child_dent);
ntfs_inode *ni = NTFS_I(vi);
MFT_RECORD *mrec;
ntfs_attr_search_ctx *ctx;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 044158bd22be..2d7f76e52c37 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -3370,7 +3370,7 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
ret = ocfs2_get_right_path(et, left_path, &right_path);
if (ret) {
mlog_errno(ret);
- goto out;
+ return ret;
}
right_el = path_leaf_el(right_path);
@@ -3453,8 +3453,7 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
subtree_index);
}
out:
- if (right_path)
- ocfs2_free_path(right_path);
+ ocfs2_free_path(right_path);
return ret;
}
@@ -3536,7 +3535,7 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
ret = ocfs2_get_left_path(et, right_path, &left_path);
if (ret) {
mlog_errno(ret);
- goto out;
+ return ret;
}
left_el = path_leaf_el(left_path);
@@ -3647,8 +3646,7 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
right_path, subtree_index);
}
out:
- if (left_path)
- ocfs2_free_path(left_path);
+ ocfs2_free_path(left_path);
return ret;
}
@@ -4334,17 +4332,17 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
} else if (path->p_tree_depth > 0) {
status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos);
if (status)
- goto out;
+ goto exit;
if (left_cpos != 0) {
left_path = ocfs2_new_path_from_path(path);
if (!left_path)
- goto out;
+ goto exit;
status = ocfs2_find_path(et->et_ci, left_path,
left_cpos);
if (status)
- goto out;
+ goto free_left_path;
new_el = path_leaf_el(left_path);
@@ -4361,7 +4359,7 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
le16_to_cpu(new_el->l_next_free_rec),
le16_to_cpu(new_el->l_count));
status = -EINVAL;
- goto out;
+ goto free_left_path;
}
rec = &new_el->l_recs[
le16_to_cpu(new_el->l_next_free_rec) - 1];
@@ -4388,18 +4386,18 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
path->p_tree_depth > 0) {
status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos);
if (status)
- goto out;
+ goto free_left_path;
if (right_cpos == 0)
- goto out;
+ goto free_left_path;
right_path = ocfs2_new_path_from_path(path);
if (!right_path)
- goto out;
+ goto free_left_path;
status = ocfs2_find_path(et->et_ci, right_path, right_cpos);
if (status)
- goto out;
+ goto free_right_path;
new_el = path_leaf_el(right_path);
rec = &new_el->l_recs[0];
@@ -4413,7 +4411,7 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
(unsigned long long)le64_to_cpu(eb->h_blkno),
le16_to_cpu(new_el->l_next_free_rec));
status = -EINVAL;
- goto out;
+ goto free_right_path;
}
rec = &new_el->l_recs[1];
}
@@ -4430,12 +4428,11 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
ret = contig_type;
}
-out:
- if (left_path)
- ocfs2_free_path(left_path);
- if (right_path)
- ocfs2_free_path(right_path);
-
+free_right_path:
+ ocfs2_free_path(right_path);
+free_left_path:
+ ocfs2_free_path(left_path);
+exit:
return ret;
}
@@ -6858,13 +6855,13 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
if (pages == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
- goto out;
+ return ret;
}
ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (ret) {
mlog_errno(ret);
- goto out;
+ goto free_pages;
}
}
@@ -6996,9 +6993,8 @@ out_commit:
out:
if (data_ac)
ocfs2_free_alloc_context(data_ac);
- if (pages)
- kfree(pages);
-
+free_pages:
+ kfree(pages);
return ret;
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 44db1808cdb5..f906a250da6a 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -29,6 +29,7 @@
#include <linux/mpage.h>
#include <linux/quotaops.h>
#include <linux/blkdev.h>
+#include <linux/uio.h>
#include <cluster/masklog.h>
@@ -663,6 +664,117 @@ static int ocfs2_is_overwrite(struct ocfs2_super *osb,
return 0;
}
+static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
+ struct inode *inode, loff_t offset,
+ u64 zero_len, int cluster_align)
+{
+ u32 p_cpos = 0;
+ u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode));
+ unsigned int num_clusters = 0;
+ unsigned int ext_flags = 0;
+ int ret = 0;
+
+ if (offset <= i_size_read(inode) || cluster_align)
+ return 0;
+
+ ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters,
+ &ext_flags);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
+ u64 s = i_size_read(inode);
+ sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
+ (do_div(s, osb->s_clustersize) >> 9);
+
+ ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
+ zero_len >> 9, GFP_NOFS, false);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+
+ return ret;
+}
+
+static int ocfs2_direct_IO_extend_no_holes(struct ocfs2_super *osb,
+ struct inode *inode, loff_t offset)
+{
+ u64 zero_start, zero_len, total_zero_len;
+ u32 p_cpos = 0, clusters_to_add;
+ u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode));
+ unsigned int num_clusters = 0;
+ unsigned int ext_flags = 0;
+ u32 size_div, offset_div;
+ int ret = 0;
+
+ {
+ u64 o = offset;
+ u64 s = i_size_read(inode);
+
+ offset_div = do_div(o, osb->s_clustersize);
+ size_div = do_div(s, osb->s_clustersize);
+ }
+
+ if (offset <= i_size_read(inode))
+ return 0;
+
+ clusters_to_add = ocfs2_bytes_to_clusters(inode->i_sb, offset) -
+ ocfs2_bytes_to_clusters(inode->i_sb, i_size_read(inode));
+ total_zero_len = offset - i_size_read(inode);
+ if (clusters_to_add)
+ total_zero_len -= offset_div;
+
+ /* Allocate clusters to fill out holes, and this is only needed
+ * when we add more than one clusters. Otherwise the cluster will
+ * be allocated during direct IO */
+ if (clusters_to_add > 1) {
+ ret = ocfs2_extend_allocation(inode,
+ OCFS2_I(inode)->ip_clusters,
+ clusters_to_add - 1, 0);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ while (total_zero_len) {
+ ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters,
+ &ext_flags);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ zero_start = ocfs2_clusters_to_bytes(osb->sb, p_cpos) +
+ size_div;
+ zero_len = ocfs2_clusters_to_bytes(osb->sb, num_clusters) -
+ size_div;
+ zero_len = min(total_zero_len, zero_len);
+
+ if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
+ ret = blkdev_issue_zeroout(osb->sb->s_bdev,
+ zero_start >> 9, zero_len >> 9,
+ GFP_NOFS, false);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ total_zero_len -= zero_len;
+ v_cpos += ocfs2_bytes_to_clusters(osb->sb, zero_len + size_div);
+
+ /* Only at first iteration can be cluster not aligned.
+ * So set size_div to 0 for the rest */
+ size_div = 0;
+ }
+
+out:
+ return ret;
+}
+
static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
struct iov_iter *iter,
loff_t offset)
@@ -677,8 +789,8 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
struct buffer_head *di_bh = NULL;
size_t count = iter->count;
journal_t *journal = osb->journal->j_journal;
- u32 zero_len;
- int cluster_align;
+ u64 zero_len_head, zero_len_tail;
+ int cluster_align_head, cluster_align_tail;
loff_t final_size = offset + count;
int append_write = offset >= i_size_read(inode) ? 1 : 0;
unsigned int num_clusters = 0;
@@ -686,9 +798,16 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
{
u64 o = offset;
+ u64 s = i_size_read(inode);
+
+ zero_len_head = do_div(o, 1 << osb->s_clustersize_bits);
+ cluster_align_head = !zero_len_head;
- zero_len = do_div(o, 1 << osb->s_clustersize_bits);
- cluster_align = !zero_len;
+ zero_len_tail = osb->s_clustersize -
+ do_div(s, osb->s_clustersize);
+ if ((offset - i_size_read(inode)) < zero_len_tail)
+ zero_len_tail = offset - i_size_read(inode);
+ cluster_align_tail = !zero_len_tail;
}
/*
@@ -706,21 +825,23 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
}
if (append_write) {
- ret = ocfs2_inode_lock(inode, &di_bh, 1);
+ ret = ocfs2_inode_lock(inode, NULL, 1);
if (ret < 0) {
mlog_errno(ret);
goto clean_orphan;
}
+ /* zeroing out the previously allocated cluster tail
+ * that but not zeroed */
if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
- ret = ocfs2_zero_extend(inode, di_bh, offset);
+ ret = ocfs2_direct_IO_zero_extend(osb, inode, offset,
+ zero_len_tail, cluster_align_tail);
else
- ret = ocfs2_extend_no_holes(inode, di_bh, offset,
+ ret = ocfs2_direct_IO_extend_no_holes(osb, inode,
offset);
if (ret < 0) {
mlog_errno(ret);
ocfs2_inode_unlock(inode, 1);
- brelse(di_bh);
goto clean_orphan;
}
@@ -728,19 +849,15 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
if (is_overwrite < 0) {
mlog_errno(is_overwrite);
ocfs2_inode_unlock(inode, 1);
- brelse(di_bh);
goto clean_orphan;
}
ocfs2_inode_unlock(inode, 1);
- brelse(di_bh);
- di_bh = NULL;
}
- written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
- iter, offset,
- ocfs2_direct_IO_get_blocks,
- ocfs2_dio_end_io, NULL, 0);
+ written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+ offset, ocfs2_direct_IO_get_blocks,
+ ocfs2_dio_end_io, NULL, 0);
if (unlikely(written < 0)) {
loff_t i_size = i_size_read(inode);
@@ -771,15 +888,23 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
if (ret < 0)
mlog_errno(ret);
}
- } else if (written < 0 && append_write && !is_overwrite &&
- !cluster_align) {
+ } else if (written > 0 && append_write && !is_overwrite &&
+ !cluster_align_head) {
+ /* zeroing out the allocated cluster head */
u32 p_cpos = 0;
u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
+ ret = ocfs2_inode_lock(inode, NULL, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto clean_orphan;
+ }
+
ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
&num_clusters, &ext_flags);
if (ret < 0) {
mlog_errno(ret);
+ ocfs2_inode_unlock(inode, 0);
goto clean_orphan;
}
@@ -787,9 +912,11 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
ret = blkdev_issue_zeroout(osb->sb->s_bdev,
p_cpos << (osb->s_clustersize_bits - 9),
- zero_len >> 9, GFP_KERNEL, false);
+ zero_len_head >> 9, GFP_NOFS, false);
if (ret < 0)
mlog_errno(ret);
+
+ ocfs2_inode_unlock(inode, 0);
}
clean_orphan:
@@ -818,9 +945,7 @@ out:
return ret;
}
-static ssize_t ocfs2_direct_IO(int rw,
- struct kiocb *iocb,
- struct iov_iter *iter,
+static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
@@ -842,12 +967,11 @@ static ssize_t ocfs2_direct_IO(int rw,
if (i_size_read(inode) <= offset && !full_coherency)
return 0;
- if (rw == READ)
- return __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev,
- iter, offset,
- ocfs2_direct_IO_get_blocks,
- ocfs2_dio_end_io, NULL, 0);
+ if (iov_iter_rw(iter) == READ)
+ return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+ iter, offset,
+ ocfs2_direct_IO_get_blocks,
+ ocfs2_dio_end_io, NULL, 0);
else
return ocfs2_direct_IO_write(iocb, iter, offset);
}
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 6cae155d54df..dd59599b022d 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -22,7 +22,7 @@
#ifndef OCFS2_AOPS_H
#define OCFS2_AOPS_H
-#include <linux/aio.h>
+#include <linux/fs.h>
handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
struct page *page,
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 2260fb9e6508..7fdc25a4d8c0 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -196,13 +196,14 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
} \
} while (0)
-#define mlog_errno(st) do { \
+#define mlog_errno(st) ({ \
int _st = (st); \
if (_st != -ERESTARTSYS && _st != -EINTR && \
_st != AOP_TRUNCATED_PAGE && _st != -ENOSPC && \
_st != -EDQUOT) \
mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
-} while (0)
+ _st; \
+})
#define mlog_bug_on_msg(cond, fmt, args...) do { \
if (cond) { \
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 4fda7a5f3088..290373024d9d 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -42,8 +42,8 @@
void ocfs2_dentry_attach_gen(struct dentry *dentry)
{
unsigned long gen =
- OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
- BUG_ON(dentry->d_inode);
+ OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen;
+ BUG_ON(d_inode(dentry));
dentry->d_fsdata = (void *)gen;
}
@@ -57,7 +57,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
osb = OCFS2_SB(dentry->d_sb);
trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len,
@@ -71,7 +71,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
unsigned long gen = (unsigned long) dentry->d_fsdata;
unsigned long pgen;
spin_lock(&dentry->d_lock);
- pgen = OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
+ pgen = OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen;
spin_unlock(&dentry->d_lock);
trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
dentry->d_name.name,
@@ -146,7 +146,7 @@ static int ocfs2_match_dentry(struct dentry *dentry,
if (skip_unhashed && d_unhashed(dentry))
return 0;
- parent = dentry->d_parent->d_inode;
+ parent = d_inode(dentry->d_parent);
/* Negative parent dentry? */
if (!parent)
return 0;
@@ -243,7 +243,7 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
if (!inode)
return 0;
- if (!dentry->d_inode && dentry->d_fsdata) {
+ if (d_really_is_negative(dentry) && dentry->d_fsdata) {
/* Converting a negative dentry to positive
Clear dentry->d_fsdata */
dentry->d_fsdata = dl = NULL;
@@ -446,7 +446,7 @@ void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target,
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb);
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
/*
* Move within the same directory, so the actual lock info won't
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index b08050bd3f2e..ccd4dcfc3645 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -18,7 +18,7 @@
*
* linux/fs/minix/dir.c
*
- * Copyright (C) 1991, 1992 Linux Torvalds
+ * Copyright (C) 1991, 1992 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
@@ -2047,22 +2047,19 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
const char *name,
int namelen)
{
- int ret;
+ int ret = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
trace_ocfs2_check_dir_for_entry(
(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
- ret = -EEXIST;
- if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0)
- goto bail;
+ if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
+ ret = -EEXIST;
+ mlog_errno(ret);
+ }
- ret = 0;
-bail:
ocfs2_free_dir_lookup_result(&lookup);
- if (ret)
- mlog_errno(ret);
return ret;
}
diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h
index f0344b75b14d..3d8639f38973 100644
--- a/fs/ocfs2/dir.h
+++ b/fs/ocfs2/dir.h
@@ -72,7 +72,7 @@ static inline int ocfs2_add_entry(handle_t *handle,
struct buffer_head *parent_fe_bh,
struct ocfs2_dir_lookup_result *lookup)
{
- return __ocfs2_add_entry(handle, dentry->d_parent->d_inode,
+ return __ocfs2_add_entry(handle, d_inode(dentry->d_parent),
dentry->d_name.name, dentry->d_name.len,
inode, blkno, parent_fe_bh, lookup);
}
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index a6944b25fd5b..fdf4b41d0609 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -757,6 +757,19 @@ lookup:
if (tmpres) {
spin_unlock(&dlm->spinlock);
spin_lock(&tmpres->spinlock);
+
+ /*
+ * Right after dlm spinlock was released, dlm_thread could have
+ * purged the lockres. Check if lockres got unhashed. If so
+ * start over.
+ */
+ if (hlist_unhashed(&tmpres->hash_node)) {
+ spin_unlock(&tmpres->spinlock);
+ dlm_lockres_put(tmpres);
+ tmpres = NULL;
+ goto lookup;
+ }
+
/* Wait on the thread that is mastering the resource */
if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
__dlm_wait_on_lockres(tmpres);
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 061ba6a91bf2..b5cf27dcb18a 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -208,7 +208,7 @@ static int dlmfs_file_release(struct inode *inode,
static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
{
int error;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
attr->ia_valid &= ~ATTR_SIZE;
error = inode_change_ok(inode, attr);
@@ -549,7 +549,7 @@ static int dlmfs_unlink(struct inode *dir,
struct dentry *dentry)
{
int status;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
mlog(0, "unlink inode %lu\n", inode->i_ino);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 11849a44dc5a..8b23aa2f52dd 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1391,6 +1391,11 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
int noqueue_attempted = 0;
int dlm_locked = 0;
+ if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
+ mlog_errno(-EINVAL);
+ return -EINVAL;
+ }
+
ocfs2_init_mask_waiter(&mw);
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 29651167190d..827fc9809bc2 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -82,7 +82,6 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
}
status = ocfs2_test_inode_bit(osb, blkno, &set);
- trace_ocfs2_get_dentry_test_bit(status, set);
if (status < 0) {
if (status == -EINVAL) {
/*
@@ -96,6 +95,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
goto unlock_nfs_sync;
}
+ trace_ocfs2_get_dentry_test_bit(status, set);
/* If the inode allocator bit is clear, this inode must be stale */
if (!set) {
status = -ESTALE;
@@ -147,7 +147,7 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
int status;
u64 blkno;
struct dentry *parent;
- struct inode *dir = child->d_inode;
+ struct inode *dir = d_inode(child);
trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index ba1790e52ff2..d8b670cbd909 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1126,7 +1126,7 @@ out:
int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
{
int status = 0, size_change;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
struct ocfs2_super *osb = OCFS2_SB(sb);
struct buffer_head *bh = NULL;
@@ -1275,8 +1275,8 @@ int ocfs2_getattr(struct vfsmount *mnt,
struct dentry *dentry,
struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
- struct super_block *sb = dentry->d_inode->i_sb;
+ struct inode *inode = d_inode(dentry);
+ struct super_block *sb = d_inode(dentry)->i_sb;
struct ocfs2_super *osb = sb->s_fs_info;
int err;
@@ -2106,7 +2106,7 @@ out:
}
static int ocfs2_prepare_inode_for_write(struct file *file,
- loff_t *ppos,
+ loff_t pos,
size_t count,
int appending,
int *direct_io,
@@ -2114,8 +2114,8 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
{
int ret = 0, meta_level = 0;
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
- loff_t saved_pos = 0, end;
+ struct inode *inode = d_inode(dentry);
+ loff_t end;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
@@ -2155,23 +2155,16 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
}
}
- /* work on a copy of ppos until we're sure that we won't have
- * to recalculate it due to relocking. */
- if (appending)
- saved_pos = i_size_read(inode);
- else
- saved_pos = *ppos;
-
- end = saved_pos + count;
+ end = pos + count;
- ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
+ ret = ocfs2_check_range_for_refcount(inode, pos, count);
if (ret == 1) {
ocfs2_inode_unlock(inode, meta_level);
meta_level = -1;
ret = ocfs2_prepare_inode_for_refcount(inode,
file,
- saved_pos,
+ pos,
count,
&meta_level);
if (has_refcount)
@@ -2227,7 +2220,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
* caller will have to retake some cluster
* locks and initiate the io as buffered.
*/
- ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
+ ret = ocfs2_check_range_for_holes(inode, pos, count);
if (ret == 1) {
/*
* Fallback to old way if the feature bit is not set.
@@ -2242,12 +2235,9 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
break;
}
- if (appending)
- *ppos = saved_pos;
-
out_unlock:
trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
- saved_pos, appending, count,
+ pos, appending, count,
direct_io, has_refcount);
if (meta_level >= 0)
@@ -2260,19 +2250,20 @@ out:
static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
- int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
+ int direct_io, appending, rw_level, have_alloc_sem = 0;
int can_do_direct, has_refcount = 0;
ssize_t written = 0;
- size_t count = iov_iter_count(from);
- loff_t old_size, *ppos = &iocb->ki_pos;
+ ssize_t ret;
+ size_t count = iov_iter_count(from), orig_count;
+ loff_t old_size;
u32 old_clusters;
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- struct address_space *mapping = file->f_mapping;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
int unaligned_dio = 0;
+ int dropped_dio = 0;
trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2280,11 +2271,11 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
file->f_path.dentry->d_name.name,
(unsigned int)from->nr_segs); /* GRRRRR */
- if (iocb->ki_nbytes == 0)
+ if (count == 0)
return 0;
- appending = file->f_flags & O_APPEND ? 1 : 0;
- direct_io = file->f_flags & O_DIRECT ? 1 : 0;
+ appending = iocb->ki_flags & IOCB_APPEND ? 1 : 0;
+ direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
mutex_lock(&inode->i_mutex);
@@ -2329,9 +2320,17 @@ relock:
ocfs2_inode_unlock(inode, 1);
}
+ orig_count = iov_iter_count(from);
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0) {
+ if (ret)
+ mlog_errno(ret);
+ goto out;
+ }
+ count = ret;
+
can_do_direct = direct_io;
- ret = ocfs2_prepare_inode_for_write(file, ppos,
- iocb->ki_nbytes, appending,
+ ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, appending,
&can_do_direct, &has_refcount);
if (ret < 0) {
mlog_errno(ret);
@@ -2339,8 +2338,7 @@ relock:
}
if (direct_io && !is_sync_kiocb(iocb))
- unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_nbytes,
- *ppos);
+ unaligned_dio = ocfs2_is_io_unaligned(inode, count, iocb->ki_pos);
/*
* We can't complete the direct I/O as requested, fall back to
@@ -2353,6 +2351,9 @@ relock:
rw_level = -1;
direct_io = 0;
+ iocb->ki_flags &= ~IOCB_DIRECT;
+ iov_iter_reexpand(from, orig_count);
+ dropped_dio = 1;
goto relock;
}
@@ -2376,74 +2377,15 @@ relock:
/* communicate with ocfs2_dio_end_io */
ocfs2_iocb_set_rw_locked(iocb, rw_level);
- ret = generic_write_checks(file, ppos, &count,
- S_ISBLK(inode->i_mode));
- if (ret)
- goto out_dio;
-
- iov_iter_truncate(from, count);
- if (direct_io) {
- loff_t endbyte;
- ssize_t written_buffered;
- written = generic_file_direct_write(iocb, from, *ppos);
- if (written < 0 || written == count) {
- ret = written;
- goto out_dio;
- }
-
- /*
- * for completing the rest of the request.
- */
- count -= written;
- written_buffered = generic_perform_write(file, from, *ppos);
- /*
- * If generic_file_buffered_write() returned a synchronous error
- * then we want to return the number of bytes which were
- * direct-written, or the error code if that was zero. Note
- * that this differs from normal direct-io semantics, which
- * will return -EFOO even if some bytes were written.
- */
- if (written_buffered < 0) {
- ret = written_buffered;
- goto out_dio;
- }
-
- /* We need to ensure that the page cache pages are written to
- * disk and invalidated to preserve the expected O_DIRECT
- * semantics.
- */
- endbyte = *ppos + written_buffered - 1;
- ret = filemap_write_and_wait_range(file->f_mapping, *ppos,
- endbyte);
- if (ret == 0) {
- iocb->ki_pos = *ppos + written_buffered;
- written += written_buffered;
- invalidate_mapping_pages(mapping,
- *ppos >> PAGE_CACHE_SHIFT,
- endbyte >> PAGE_CACHE_SHIFT);
- } else {
- /*
- * We don't know how much we wrote, so just return
- * the number of bytes which were direct-written
- */
- }
- } else {
- current->backing_dev_info = inode_to_bdi(inode);
- written = generic_perform_write(file, from, *ppos);
- if (likely(written >= 0))
- iocb->ki_pos = *ppos + written;
- current->backing_dev_info = NULL;
- }
-
-out_dio:
+ written = __generic_file_write_iter(iocb, from);
/* buffered aio wouldn't have proper lock coverage today */
- BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
+ BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
if (unlikely(written <= 0))
goto no_sync;
- if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
- ((file->f_flags & O_DIRECT) && !direct_io)) {
+ if (((file->f_flags & O_DSYNC) && !direct_io) ||
+ IS_SYNC(inode) || dropped_dio) {
ret = filemap_fdatawrite_range(file->f_mapping,
iocb->ki_pos - written,
iocb->ki_pos - 1);
@@ -2554,7 +2496,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
* need locks to protect pending reads from racing with truncate.
*/
- if (filp->f_flags & O_DIRECT) {
+ if (iocb->ki_flags & IOCB_DIRECT) {
have_alloc_sem = 1;
ocfs2_iocb_set_sem_locked(iocb);
@@ -2588,7 +2530,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
trace_generic_file_aio_read_ret(ret);
/* buffered aio wouldn't have proper lock coverage today */
- BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
+ BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
/* see ocfs2_file_write_iter */
if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
@@ -2683,8 +2625,6 @@ const struct inode_operations ocfs2_special_file_iops = {
*/
const struct file_operations ocfs2_fops = {
.llseek = ocfs2_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.mmap = ocfs2_mmap,
.fsync = ocfs2_sync_file,
.release = ocfs2_file_release,
@@ -2731,8 +2671,6 @@ const struct file_operations ocfs2_dops = {
*/
const struct file_operations ocfs2_fops_no_plocks = {
.llseek = ocfs2_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.mmap = ocfs2_mmap,
.fsync = ocfs2_sync_file,
.release = ocfs2_file_release,
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 3025c0da6b8a..b254416dc8d9 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -624,7 +624,7 @@ static int ocfs2_remove_inode(struct inode *inode,
ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
le16_to_cpu(di->i_suballoc_slot));
if (!inode_alloc_inode) {
- status = -EEXIST;
+ status = -ENOENT;
mlog_errno(status);
goto bail;
}
@@ -742,7 +742,7 @@ static int ocfs2_wipe_inode(struct inode *inode,
ORPHAN_DIR_SYSTEM_INODE,
orphaned_slot);
if (!orphan_dir_inode) {
- status = -EEXIST;
+ status = -ENOENT;
mlog_errno(status);
goto bail;
}
@@ -1209,7 +1209,7 @@ int ocfs2_drop_inode(struct inode *inode)
*/
int ocfs2_inode_revalidate(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int status = 0;
trace_ocfs2_inode_revalidate(inode,
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 044013455621..857bbbcd39f3 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -666,7 +666,7 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
ocfs2_local_alloc_count_bits(alloc)) {
ocfs2_error(osb->sb, "local alloc inode %llu says it has "
- "%u free bits, but a count shows %u",
+ "%u used bits, but a count shows %u",
(unsigned long long)le64_to_cpu(alloc->i_blkno),
le32_to_cpu(alloc->id1.bitmap1.i_used),
ocfs2_local_alloc_count_bits(alloc));
@@ -839,7 +839,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
u32 *numbits,
struct ocfs2_alloc_reservation *resv)
{
- int numfound, bitoff, left, startoff, lastzero;
+ int numfound = 0, bitoff, left, startoff, lastzero;
int local_resv = 0;
struct ocfs2_alloc_reservation r;
void *bitmap = NULL;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index b5c3a5ea3ee6..176fe6afd94e 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -689,8 +689,8 @@ static int ocfs2_link(struct dentry *old_dentry,
struct dentry *dentry)
{
handle_t *handle;
- struct inode *inode = old_dentry->d_inode;
- struct inode *old_dir = old_dentry->d_parent->d_inode;
+ struct inode *inode = d_inode(old_dentry);
+ struct inode *old_dir = d_inode(old_dentry->d_parent);
int err;
struct buffer_head *fe_bh = NULL;
struct buffer_head *old_dir_bh = NULL;
@@ -879,7 +879,7 @@ static int ocfs2_unlink(struct inode *dir,
int status;
int child_locked = 0;
bool is_unlinkable = false;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct inode *orphan_dir = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
u64 blkno;
@@ -898,7 +898,7 @@ static int ocfs2_unlink(struct inode *dir,
dquot_initialize(dir);
- BUG_ON(dentry->d_parent->d_inode != dir);
+ BUG_ON(d_inode(dentry->d_parent) != dir);
if (inode == osb->root_inode)
return -EPERM;
@@ -1209,8 +1209,8 @@ static int ocfs2_rename(struct inode *old_dir,
{
int status = 0, rename_lock = 0, parents_locked = 0, target_exists = 0;
int old_child_locked = 0, new_child_locked = 0, update_dot_dot = 0;
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct inode *orphan_dir = NULL;
struct ocfs2_dinode *newfe = NULL;
char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
@@ -1454,7 +1454,7 @@ static int ocfs2_rename(struct inode *old_dir,
should_add_orphan = true;
}
} else {
- BUG_ON(new_dentry->d_parent->d_inode != new_dir);
+ BUG_ON(d_inode(new_dentry->d_parent) != new_dir);
status = ocfs2_check_dir_for_entry(new_dir,
new_dentry->d_name.name,
@@ -2322,10 +2322,10 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
trace_ocfs2_orphan_del(
(unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno,
- name, namelen);
+ name, strlen(name));
/* find it's spot in the orphan directory */
- status = ocfs2_find_entry(name, namelen, orphan_dir_inode,
+ status = ocfs2_find_entry(name, strlen(name), orphan_dir_inode,
&lookup);
if (status) {
mlog_errno(status);
@@ -2808,7 +2808,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
ORPHAN_DIR_SYSTEM_INODE,
osb->slot_num);
if (!orphan_dir_inode) {
- status = -EEXIST;
+ status = -ENOENT;
mlog_errno(status);
goto leave;
}
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index ee541f92dab4..d8c6af101f3f 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4194,7 +4194,7 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
bool preserve)
{
int ret;
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
struct buffer_head *new_bh = NULL;
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
@@ -4263,7 +4263,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry, bool preserve)
{
int error;
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
struct buffer_head *old_bh = NULL;
struct inode *new_orphan_inode = NULL;
struct posix_acl *default_acl, *acl;
@@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
error = posix_acl_create(dir, &mode, &default_acl, &acl);
if (error) {
mlog_errno(error);
- goto out;
+ return error;
}
error = ocfs2_create_inode_in_orphan(dir, mode,
@@ -4357,7 +4357,7 @@ out:
/* copied from may_create in VFS. */
static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
{
- if (child->d_inode)
+ if (d_really_is_positive(child))
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
@@ -4375,7 +4375,7 @@ static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry, bool preserve)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
int error;
if (!inode)
@@ -4463,7 +4463,7 @@ int ocfs2_reflink_ioctl(struct inode *inode,
}
error = ocfs2_vfs_reflink(old_path.dentry,
- new_path.dentry->d_inode,
+ d_inode(new_path.dentry),
new_dentry, preserve);
out_dput:
done_path_create(&new_path, new_dentry);
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index d5493e361a38..e78a203d44c8 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -427,7 +427,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb)
if (!si) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ return status;
}
si->si_extended = ocfs2_uses_extended_slot_map(osb);
@@ -452,7 +452,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb)
osb->slot_info = (struct ocfs2_slot_info *)si;
bail:
- if (status < 0 && si)
+ if (status < 0)
__ocfs2_free_slot_info(si);
return status;
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 1724d43d3da1..220cae7bbdbc 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -295,7 +295,7 @@ static int o2cb_cluster_check(void)
set_bit(node_num, netmap);
if (!memcmp(hbmap, netmap, sizeof(hbmap)))
return 0;
- if (i < O2CB_MAP_STABILIZE_COUNT)
+ if (i < O2CB_MAP_STABILIZE_COUNT - 1)
msleep(1000);
}
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index 720aa389e0ea..2768eb1da2b8 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -1004,10 +1004,8 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
BUG_ON(conn == NULL);
lc = kzalloc(sizeof(struct ocfs2_live_connection), GFP_KERNEL);
- if (!lc) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!lc)
+ return -ENOMEM;
init_waitqueue_head(&lc->oc_wait);
init_completion(&lc->oc_sync_wait);
@@ -1063,7 +1061,7 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
}
out:
- if (rc && lc)
+ if (rc)
kfree(lc);
return rc;
}
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 0cb889a17ae1..4479029630bb 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -2499,6 +2499,8 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
+ ocfs2_block_group_set_bits(handle, alloc_inode, group, group_bh,
+ start_bit, count);
goto bail;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 26675185b886..403c5660b306 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -2069,6 +2069,8 @@ static int ocfs2_initialize_super(struct super_block *sb,
cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits);
+ memcpy(sb->s_uuid, di->id2.i_super.s_uuid,
+ sizeof(di->id2.i_super.s_uuid));
osb->osb_dx_mask = (1 << (cbits - bbits)) - 1;
@@ -2333,7 +2335,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
mlog_errno(status);
goto bail;
}
- cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb);
+ cleancache_init_shared_fs(sb);
bail:
return status;
@@ -2563,22 +2565,22 @@ static void ocfs2_handle_error(struct super_block *sb)
ocfs2_set_ro_flag(osb, 0);
}
-static char error_buf[1024];
-
-void __ocfs2_error(struct super_block *sb,
- const char *function,
- const char *fmt, ...)
+void __ocfs2_error(struct super_block *sb, const char *function,
+ const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- vsnprintf(error_buf, sizeof(error_buf), fmt, args);
- va_end(args);
+ vaf.fmt = fmt;
+ vaf.va = &args;
/* Not using mlog here because we want to show the actual
* function the error came from. */
- printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %s\n",
- sb->s_id, function, error_buf);
+ printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %pV\n",
+ sb->s_id, function, &vaf);
+
+ va_end(args);
ocfs2_handle_error(sb);
}
@@ -2586,18 +2588,21 @@ void __ocfs2_error(struct super_block *sb,
/* Handle critical errors. This is intentionally more drastic than
* ocfs2_handle_error, so we only use for things like journal errors,
* etc. */
-void __ocfs2_abort(struct super_block* sb,
- const char *function,
+void __ocfs2_abort(struct super_block *sb, const char *function,
const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- vsnprintf(error_buf, sizeof(error_buf), fmt, args);
- va_end(args);
- printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n",
- sb->s_id, function, error_buf);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_CRIT "OCFS2: abort (device %s): %s: %pV\n",
+ sb->s_id, function, &vaf);
+
+ va_end(args);
/* We don't have the cluster support yet to go straight to
* hard readonly in here. Until then, we want to keep
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 85b190dc132f..d03bfbf3d27d 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1020,7 +1020,7 @@ ssize_t ocfs2_listxattr(struct dentry *dentry,
int ret = 0, i_ret = 0, b_ret = 0;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di = NULL;
- struct ocfs2_inode_info *oi = OCFS2_I(dentry->d_inode);
+ struct ocfs2_inode_info *oi = OCFS2_I(d_inode(dentry));
if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb)))
return -EOPNOTSUPP;
@@ -1028,7 +1028,7 @@ ssize_t ocfs2_listxattr(struct dentry *dentry,
if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
return ret;
- ret = ocfs2_inode_lock(dentry->d_inode, &di_bh, 0);
+ ret = ocfs2_inode_lock(d_inode(dentry), &di_bh, 0);
if (ret < 0) {
mlog_errno(ret);
return ret;
@@ -1037,7 +1037,7 @@ ssize_t ocfs2_listxattr(struct dentry *dentry,
di = (struct ocfs2_dinode *)di_bh->b_data;
down_read(&oi->ip_xattr_sem);
- i_ret = ocfs2_xattr_ibody_list(dentry->d_inode, di, buffer, size);
+ i_ret = ocfs2_xattr_ibody_list(d_inode(dentry), di, buffer, size);
if (i_ret < 0)
b_ret = 0;
else {
@@ -1045,13 +1045,13 @@ ssize_t ocfs2_listxattr(struct dentry *dentry,
buffer += i_ret;
size -= i_ret;
}
- b_ret = ocfs2_xattr_block_list(dentry->d_inode, di,
+ b_ret = ocfs2_xattr_block_list(d_inode(dentry), di,
buffer, size);
if (b_ret < 0)
i_ret = 0;
}
up_read(&oi->ip_xattr_sem);
- ocfs2_inode_unlock(dentry->d_inode, 0);
+ ocfs2_inode_unlock(d_inode(dentry), 0);
brelse(di_bh);
@@ -1238,6 +1238,10 @@ static int ocfs2_xattr_block_get(struct inode *inode,
i,
&block_off,
&name_offset);
+ if (ret) {
+ mlog_errno(ret);
+ goto cleanup;
+ }
xs->base = bucket_block(xs->bucket, block_off);
}
if (ocfs2_xattr_is_local(xs->here)) {
@@ -5665,6 +5669,10 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
i, &xv, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
ret = ocfs2_lock_xattr_remove_allocators(inode, xv,
args->ref_ci,
@@ -7249,7 +7257,7 @@ static int ocfs2_xattr_security_get(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
+ return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY,
name, buffer, size);
}
@@ -7259,7 +7267,7 @@ static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
if (strcmp(name, "") == 0)
return -EINVAL;
- return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
+ return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY,
name, value, size, flags);
}
@@ -7339,7 +7347,7 @@ static int ocfs2_xattr_trusted_get(struct dentry *dentry, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
+ return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED,
name, buffer, size);
}
@@ -7349,7 +7357,7 @@ static int ocfs2_xattr_trusted_set(struct dentry *dentry, const char *name,
if (strcmp(name, "") == 0)
return -EINVAL;
- return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
+ return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED,
name, value, size, flags);
}
@@ -7391,7 +7399,7 @@ static int ocfs2_xattr_user_get(struct dentry *dentry, const char *name,
return -EINVAL;
if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
return -EOPNOTSUPP;
- return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_USER, name,
+ return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_USER, name,
buffer, size);
}
@@ -7405,7 +7413,7 @@ static int ocfs2_xattr_user_set(struct dentry *dentry, const char *name,
if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
return -EOPNOTSUPP;
- return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_USER,
+ return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_USER,
name, value, size, flags);
}
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index 1b8e9e8405b2..f833bf8d5792 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -110,7 +110,7 @@ int omfs_make_empty(struct inode *inode, struct super_block *sb)
static int omfs_add_link(struct dentry *dentry, struct inode *inode)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct omfs_inode *oi;
@@ -155,7 +155,7 @@ out:
static int omfs_delete_entry(struct dentry *dentry)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
struct inode *dirty;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
@@ -237,7 +237,7 @@ static int omfs_dir_is_empty(struct inode *inode)
static int omfs_remove(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int ret;
@@ -373,8 +373,8 @@ static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx,
static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- struct inode *new_inode = new_dentry->d_inode;
- struct inode *old_inode = old_dentry->d_inode;
+ struct inode *new_inode = d_inode(new_dentry);
+ struct inode *old_inode = d_inode(old_dentry);
int err;
if (new_inode) {
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 902e88527fce..d9e26cfbb793 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -337,8 +337,6 @@ static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
const struct file_operations omfs_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
@@ -348,7 +346,7 @@ const struct file_operations omfs_file_operations = {
static int omfs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
error = inode_change_ok(inode, attr);
diff --git a/fs/open.c b/fs/open.c
index 33f9cbf2610b..98e5a52dc68c 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -231,8 +231,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
return -EINVAL;
/* Return error if mode is not supported */
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
- FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
+ if (mode & ~FALLOC_FL_SUPPORTED_MASK)
return -EOPNOTSUPP;
/* Punch hole and zero range are mutually exclusive */
@@ -250,6 +249,11 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
(mode & ~FALLOC_FL_COLLAPSE_RANGE))
return -EINVAL;
+ /* Insert range should only be used exclusively. */
+ if ((mode & FALLOC_FL_INSERT_RANGE) &&
+ (mode & ~FALLOC_FL_INSERT_RANGE))
+ return -EINVAL;
+
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
@@ -570,6 +574,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);
+retry_deleg:
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
if (!uid_valid(uid))
@@ -586,7 +591,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
if (!S_ISDIR(inode->i_mode))
newattrs.ia_valid |=
ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
-retry_deleg:
mutex_lock(&inode->i_mutex);
error = security_path_chown(path, uid, gid);
if (!error)
@@ -734,10 +738,10 @@ static int do_dentry_open(struct file *f,
if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(inode);
if ((f->f_mode & FMODE_READ) &&
- likely(f->f_op->read || f->f_op->aio_read || f->f_op->read_iter))
+ likely(f->f_op->read || f->f_op->read_iter))
f->f_mode |= FMODE_CAN_READ;
if ((f->f_mode & FMODE_WRITE) &&
- likely(f->f_op->write || f->f_op->aio_write || f->f_op->write_iter))
+ likely(f->f_op->write || f->f_op->write_iter))
f->f_mode |= FMODE_CAN_WRITE;
f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
@@ -988,9 +992,6 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
return ERR_PTR(err);
if (flags & O_CREAT)
return ERR_PTR(-EINVAL);
- if (!filename && (flags & O_DIRECTORY))
- if (!dentry->d_inode->i_op->lookup)
- return ERR_PTR(-ENOTDIR);
return do_file_open_root(dentry, mnt, filename, &op);
}
EXPORT_SYMBOL(file_open_root);
diff --git a/fs/pipe.c b/fs/pipe.c
index 21981e58e2a6..8865f7963700 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -21,7 +21,6 @@
#include <linux/audit.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
-#include <linux/aio.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
@@ -628,7 +627,7 @@ static struct vfsmount *pipe_mnt __read_mostly;
static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
- dentry->d_inode->i_ino);
+ d_inode(dentry)->i_ino);
}
static const struct dentry_operations pipefs_dentry_operations = {
@@ -947,9 +946,7 @@ err:
const struct file_operations pipefifo_fops = {
.open = fifo_open,
.llseek = no_llseek,
- .read = new_sync_read,
.read_iter = pipe_read,
- .write = new_sync_write,
.write_iter = pipe_write,
.poll = pipe_poll,
.unlocked_ioctl = pipe_ioctl,
diff --git a/fs/pnode.c b/fs/pnode.c
index 260ac8f898a4..6367e1e435c6 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -362,6 +362,46 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
}
/*
+ * Clear MNT_LOCKED when it can be shown to be safe.
+ *
+ * mount_lock lock must be held for write
+ */
+void propagate_mount_unlock(struct mount *mnt)
+{
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m, *child;
+
+ BUG_ON(parent == mnt);
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+ child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
+ if (child)
+ child->mnt.mnt_flags &= ~MNT_LOCKED;
+ }
+}
+
+/*
+ * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
+ */
+static void mark_umount_candidates(struct mount *mnt)
+{
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
+
+ BUG_ON(parent == mnt);
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+ struct mount *child = __lookup_mnt_last(&m->mnt,
+ mnt->mnt_mountpoint);
+ if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
+ SET_MNT_MARK(child);
+ }
+ }
+}
+
+/*
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to.
*/
@@ -378,13 +418,16 @@ static void __propagate_umount(struct mount *mnt)
struct mount *child = __lookup_mnt_last(&m->mnt,
mnt->mnt_mountpoint);
/*
- * umount the child only if the child has no
- * other children
+ * umount the child only if the child has no children
+ * and the child is marked safe to unmount.
*/
- if (child && list_empty(&child->mnt_mounts)) {
+ if (!child || !IS_MNT_MARKED(child))
+ continue;
+ CLEAR_MNT_MARK(child);
+ if (list_empty(&child->mnt_mounts)) {
list_del_init(&child->mnt_child);
- hlist_del_init_rcu(&child->mnt_hash);
- hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
+ child->mnt.mnt_flags |= MNT_UMOUNT;
+ list_move_tail(&child->mnt_list, &mnt->mnt_list);
}
}
}
@@ -396,11 +439,14 @@ static void __propagate_umount(struct mount *mnt)
*
* vfsmount lock must be held for write
*/
-int propagate_umount(struct hlist_head *list)
+int propagate_umount(struct list_head *list)
{
struct mount *mnt;
- hlist_for_each_entry(mnt, list, mnt_hash)
+ list_for_each_entry_reverse(mnt, list, mnt_list)
+ mark_umount_candidates(mnt);
+
+ list_for_each_entry(mnt, list, mnt_list)
__propagate_umount(mnt);
return 0;
}
diff --git a/fs/pnode.h b/fs/pnode.h
index 4a246358b031..7114ce6e6b9e 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -19,6 +19,9 @@
#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
+#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
+#define IS_MNT_LOCKED_AND_LAZY(m) \
+ (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
#define CL_EXPIRE 0x01
#define CL_SLAVE 0x02
@@ -40,14 +43,14 @@ static inline void set_mnt_shared(struct mount *mnt)
void change_mnt_propagation(struct mount *, int);
int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
struct hlist_head *);
-int propagate_umount(struct hlist_head *);
+int propagate_umount(struct list_head *);
int propagate_mount_busy(struct mount *, int);
+void propagate_mount_unlock(struct mount *);
void mnt_release_group_id(struct mount *);
int get_dominating_id(struct mount *mnt, const struct path *root);
unsigned int mnt_get_count(struct mount *mnt);
void mnt_set_mountpoint(struct mount *, struct mountpoint *,
struct mount *);
-void umount_tree(struct mount *, int);
struct mount *copy_tree(struct mount *, struct dentry *, int);
bool is_path_reachable(struct mount *, struct dentry *,
const struct path *root);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 3a48bb789c9f..84bb65b83570 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -774,12 +774,12 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name,
struct posix_acl *acl;
int error;
- if (!IS_POSIXACL(dentry->d_inode))
+ if (!IS_POSIXACL(d_backing_inode(dentry)))
return -EOPNOTSUPP;
if (d_is_symlink(dentry))
return -EOPNOTSUPP;
- acl = get_acl(dentry->d_inode, type);
+ acl = get_acl(d_backing_inode(dentry), type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
@@ -795,7 +795,7 @@ static int
posix_acl_xattr_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
struct posix_acl *acl = NULL;
int ret;
@@ -834,7 +834,7 @@ posix_acl_xattr_list(struct dentry *dentry, char *list, size_t list_size,
const char *xname;
size_t size;
- if (!IS_POSIXACL(dentry->d_inode))
+ if (!IS_POSIXACL(d_backing_inode(dentry)))
return -EOPNOTSUPP;
if (d_is_symlink(dentry))
return -EOPNOTSUPP;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 1295a00ca316..fd02a9ebfc30 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -99,8 +99,8 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
buf = m->buf + m->count;
/* Ignore error for now */
- string_escape_str(tcomm, &buf, m->size - m->count,
- ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\");
+ buf += string_escape_str(tcomm, buf, m->size - m->count,
+ ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\");
m->count = buf - m->buf;
seq_putc(m, '\n');
@@ -188,6 +188,24 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
from_kgid_munged(user_ns, GROUP_AT(group_info, g)));
put_cred(cred);
+#ifdef CONFIG_PID_NS
+ seq_puts(m, "\nNStgid:");
+ for (g = ns->level; g <= pid->level; g++)
+ seq_printf(m, "\t%d",
+ task_tgid_nr_ns(p, pid->numbers[g].ns));
+ seq_puts(m, "\nNSpid:");
+ for (g = ns->level; g <= pid->level; g++)
+ seq_printf(m, "\t%d",
+ task_pid_nr_ns(p, pid->numbers[g].ns));
+ seq_puts(m, "\nNSpgid:");
+ for (g = ns->level; g <= pid->level; g++)
+ seq_printf(m, "\t%d",
+ task_pgrp_nr_ns(p, pid->numbers[g].ns));
+ seq_puts(m, "\nNSsid:");
+ for (g = ns->level; g <= pid->level; g++)
+ seq_printf(m, "\t%d",
+ task_session_nr_ns(p, pid->numbers[g].ns));
+#endif
seq_putc(m, '\n');
}
@@ -614,7 +632,9 @@ static int children_seq_show(struct seq_file *seq, void *v)
pid_t pid;
pid = pid_nr_ns(v, inode->i_sb->s_fs_info);
- return seq_printf(seq, "%d ", pid);
+ seq_printf(seq, "%d ", pid);
+
+ return 0;
}
static void *children_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3f3d7aeb0712..093ca14f5701 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -169,7 +169,7 @@ static int get_task_root(struct task_struct *task, struct path *root)
static int proc_cwd_link(struct dentry *dentry, struct path *path)
{
- struct task_struct *task = get_proc_task(dentry->d_inode);
+ struct task_struct *task = get_proc_task(d_inode(dentry));
int result = -ENOENT;
if (task) {
@@ -186,7 +186,7 @@ static int proc_cwd_link(struct dentry *dentry, struct path *path)
static int proc_root_link(struct dentry *dentry, struct path *path)
{
- struct task_struct *task = get_proc_task(dentry->d_inode);
+ struct task_struct *task = get_proc_task(d_inode(dentry));
int result = -ENOENT;
if (task) {
@@ -238,13 +238,15 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
wchan = get_wchan(task);
- if (lookup_symbol_name(wchan, symname) < 0)
+ if (lookup_symbol_name(wchan, symname) < 0) {
if (!ptrace_may_access(task, PTRACE_MODE_READ))
return 0;
- else
- return seq_printf(m, "%lu", wchan);
- else
- return seq_printf(m, "%s", symname);
+ seq_printf(m, "%lu", wchan);
+ } else {
+ seq_printf(m, "%s", symname);
+ }
+
+ return 0;
}
#endif /* CONFIG_KALLSYMS */
@@ -309,10 +311,12 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
- return seq_printf(m, "%llu %llu %lu\n",
- (unsigned long long)task->se.sum_exec_runtime,
- (unsigned long long)task->sched_info.run_delay,
- task->sched_info.pcount);
+ seq_printf(m, "%llu %llu %lu\n",
+ (unsigned long long)task->se.sum_exec_runtime,
+ (unsigned long long)task->sched_info.run_delay,
+ task->sched_info.pcount);
+
+ return 0;
}
#endif
@@ -387,7 +391,9 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
points = oom_badness(task, NULL, NULL, totalpages) *
1000 / totalpages;
read_unlock(&tasklist_lock);
- return seq_printf(m, "%lu\n", points);
+ seq_printf(m, "%lu\n", points);
+
+ return 0;
}
struct limit_names {
@@ -432,15 +438,15 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
* print the file header
*/
seq_printf(m, "%-25s %-20s %-20s %-10s\n",
- "Limit", "Soft Limit", "Hard Limit", "Units");
+ "Limit", "Soft Limit", "Hard Limit", "Units");
for (i = 0; i < RLIM_NLIMITS; i++) {
if (rlim[i].rlim_cur == RLIM_INFINITY)
seq_printf(m, "%-25s %-20s ",
- lnames[i].name, "unlimited");
+ lnames[i].name, "unlimited");
else
seq_printf(m, "%-25s %-20lu ",
- lnames[i].name, rlim[i].rlim_cur);
+ lnames[i].name, rlim[i].rlim_cur);
if (rlim[i].rlim_max == RLIM_INFINITY)
seq_printf(m, "%-20s ", "unlimited");
@@ -462,7 +468,9 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
{
long nr;
unsigned long args[6], sp, pc;
- int res = lock_trace(task);
+ int res;
+
+ res = lock_trace(task);
if (res)
return res;
@@ -477,7 +485,8 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
args[0], args[1], args[2], args[3], args[4], args[5],
sp, pc);
unlock_trace(task);
- return res;
+
+ return 0;
}
#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
@@ -505,7 +514,7 @@ static int proc_fd_access_allowed(struct inode *inode)
int proc_setattr(struct dentry *dentry, struct iattr *attr)
{
int error;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (attr->ia_valid & ATTR_MODE)
return -EPERM;
@@ -1353,7 +1362,7 @@ static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
struct mm_struct *mm;
struct file *exe_file;
- task = get_proc_task(dentry->d_inode);
+ task = get_proc_task(d_inode(dentry));
if (!task)
return -ENOENT;
mm = get_task_mm(task);
@@ -1373,7 +1382,7 @@ static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct path path;
int error = -EACCES;
@@ -1418,7 +1427,7 @@ static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
{
int error = -EACCES;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct path path;
/* Are we allowed to snoop on the tasks file descriptors? */
@@ -1488,7 +1497,7 @@ out_unlock:
int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct task_struct *task;
const struct cred *cred;
struct pid_namespace *pid = dentry->d_sb->s_fs_info;
@@ -1545,7 +1554,7 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
task = get_proc_task(inode);
if (task) {
@@ -1579,7 +1588,7 @@ int pid_delete_dentry(const struct dentry *dentry)
* If so, then don't put the dentry on the lru list,
* kill it immediately.
*/
- return proc_inode_is_dead(dentry->d_inode);
+ return proc_inode_is_dead(d_inode(dentry));
}
const struct dentry_operations pid_dentry_operations =
@@ -1617,12 +1626,12 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
child = d_alloc(dir, &qname);
if (!child)
goto end_instantiate;
- if (instantiate(dir->d_inode, child, task, ptr) < 0) {
+ if (instantiate(d_inode(dir), child, task, ptr) < 0) {
dput(child);
goto end_instantiate;
}
}
- inode = child->d_inode;
+ inode = d_inode(child);
ino = inode->i_ino;
type = inode->i_mode >> 12;
dput(child);
@@ -1665,7 +1674,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out_notask;
}
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
task = get_proc_task(inode);
if (!task)
goto out_notask;
@@ -1718,7 +1727,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
int rc;
rc = -ENOENT;
- task = get_proc_task(dentry->d_inode);
+ task = get_proc_task(d_inode(dentry));
if (!task)
goto out;
@@ -2002,12 +2011,13 @@ static int show_timer(struct seq_file *m, void *v)
notify = timer->it_sigev_notify;
seq_printf(m, "ID: %d\n", timer->it_id);
- seq_printf(m, "signal: %d/%p\n", timer->sigq->info.si_signo,
- timer->sigq->info.si_value.sival_ptr);
+ seq_printf(m, "signal: %d/%p\n",
+ timer->sigq->info.si_signo,
+ timer->sigq->info.si_value.sival_ptr);
seq_printf(m, "notify: %s/%s.%d\n",
- nstr[notify & ~SIGEV_THREAD_ID],
- (notify & SIGEV_THREAD_ID) ? "tid" : "pid",
- pid_nr_ns(timer->it_pid, tp->ns));
+ nstr[notify & ~SIGEV_THREAD_ID],
+ (notify & SIGEV_THREAD_ID) ? "tid" : "pid",
+ pid_nr_ns(timer->it_pid, tp->ns));
seq_printf(m, "ClockID: %d\n", timer->it_clock);
return 0;
@@ -2352,21 +2362,23 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
unlock_task_sighand(task, &flags);
}
- result = seq_printf(m,
- "rchar: %llu\n"
- "wchar: %llu\n"
- "syscr: %llu\n"
- "syscw: %llu\n"
- "read_bytes: %llu\n"
- "write_bytes: %llu\n"
- "cancelled_write_bytes: %llu\n",
- (unsigned long long)acct.rchar,
- (unsigned long long)acct.wchar,
- (unsigned long long)acct.syscr,
- (unsigned long long)acct.syscw,
- (unsigned long long)acct.read_bytes,
- (unsigned long long)acct.write_bytes,
- (unsigned long long)acct.cancelled_write_bytes);
+ seq_printf(m,
+ "rchar: %llu\n"
+ "wchar: %llu\n"
+ "syscr: %llu\n"
+ "syscw: %llu\n"
+ "read_bytes: %llu\n"
+ "write_bytes: %llu\n"
+ "cancelled_write_bytes: %llu\n",
+ (unsigned long long)acct.rchar,
+ (unsigned long long)acct.wchar,
+ (unsigned long long)acct.syscr,
+ (unsigned long long)acct.syscw,
+ (unsigned long long)acct.read_bytes,
+ (unsigned long long)acct.write_bytes,
+ (unsigned long long)acct.cancelled_write_bytes);
+ result = 0;
+
out_unlock:
mutex_unlock(&task->signal->cred_guard_mutex);
return result;
@@ -2851,13 +2863,13 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
return 0;
if (pos == TGID_OFFSET - 2) {
- struct inode *inode = ns->proc_self->d_inode;
+ struct inode *inode = d_inode(ns->proc_self);
if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK))
return 0;
ctx->pos = pos = pos + 1;
}
if (pos == TGID_OFFSET - 1) {
- struct inode *inode = ns->proc_thread_self->d_inode;
+ struct inode *inode = d_inode(ns->proc_thread_self);
if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK))
return 0;
ctx->pos = pos = pos + 1;
@@ -3176,7 +3188,7 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx)
static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct task_struct *p = get_proc_task(inode);
generic_fillattr(inode, stat);
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 8e5ad83b629a..6e5fcd00733e 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -8,6 +8,7 @@
#include <linux/security.h>
#include <linux/file.h>
#include <linux/seq_file.h>
+#include <linux/fs.h>
#include <linux/proc_fs.h>
@@ -48,17 +49,23 @@ static int seq_show(struct seq_file *m, void *v)
put_files_struct(files);
}
- if (!ret) {
- seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\n",
- (long long)file->f_pos, f_flags,
- real_mount(file->f_path.mnt)->mnt_id);
- if (file->f_op->show_fdinfo)
- file->f_op->show_fdinfo(m, file);
- ret = seq_has_overflowed(m);
- fput(file);
- }
+ if (ret)
+ return ret;
- return ret;
+ seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\n",
+ (long long)file->f_pos, f_flags,
+ real_mount(file->f_path.mnt)->mnt_id);
+
+ show_fd_locks(m, file, files);
+ if (seq_has_overflowed(m))
+ goto out;
+
+ if (file->f_op->show_fdinfo)
+ file->f_op->show_fdinfo(m, file);
+
+out:
+ fput(file);
+ return 0;
}
static int seq_fdinfo_open(struct inode *inode, struct file *file)
@@ -84,7 +91,7 @@ static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
task = get_proc_task(inode);
fd = proc_fd(inode);
@@ -144,14 +151,14 @@ static int proc_fd_link(struct dentry *dentry, struct path *path)
struct task_struct *task;
int ret = -ENOENT;
- task = get_proc_task(dentry->d_inode);
+ task = get_proc_task(d_inode(dentry));
if (task) {
files = get_files_struct(task);
put_task_struct(task);
}
if (files) {
- int fd = proc_fd(dentry->d_inode);
+ int fd = proc_fd(d_inode(dentry));
struct file *fd_file;
spin_lock(&files->file_lock);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index be65b2082135..df6327a2b865 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -101,7 +101,7 @@ static bool pde_subdir_insert(struct proc_dir_entry *dir,
static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct proc_dir_entry *de = PDE(inode);
int error;
@@ -120,7 +120,7 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct proc_dir_entry *de = PDE(inode);
if (de && de->nlink)
set_nlink(inode, de->nlink);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 7697b6621cfd..8272aaba1bb0 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -396,7 +396,7 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct proc_dir_entry *pde = PDE(dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(d_inode(dentry));
if (unlikely(!use_pde(pde)))
return ERR_PTR(-EINVAL);
nd_set_link(nd, pde->data);
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index c9eac4563fa8..e512642dbbdc 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -32,7 +32,7 @@ static const struct proc_ns_operations *ns_entries[] = {
static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
struct task_struct *task;
struct path ns_path;
@@ -53,7 +53,7 @@ static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
struct task_struct *task;
char name[50];
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 1bde894bc624..350984a19c83 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -142,7 +142,7 @@ static struct dentry *proc_tgid_net_lookup(struct inode *dir,
static int proc_tgid_net_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct net *net;
net = get_proc_task_net(inode);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index f92d5dd578a4..fea2561d773b 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -604,7 +604,7 @@ static bool proc_sys_fill_cache(struct file *file,
return false;
}
}
- inode = child->d_inode;
+ inode = d_inode(child);
ino = inode->i_ino;
type = inode->i_mode >> 12;
dput(child);
@@ -710,7 +710,7 @@ static int proc_sys_permission(struct inode *inode, int mask)
static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
@@ -727,7 +727,7 @@ static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr)
static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ctl_table_header *head = grab_header(inode);
struct ctl_table *table = PROC_I(inode)->sysctl_entry;
@@ -773,12 +773,12 @@ static int proc_sys_revalidate(struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
- return !PROC_I(dentry->d_inode)->sysctl->unregistering;
+ return !PROC_I(d_inode(dentry))->sysctl->unregistering;
}
static int proc_sys_delete(const struct dentry *dentry)
{
- return !!PROC_I(dentry->d_inode)->sysctl->unregistering;
+ return !!PROC_I(d_inode(dentry))->sysctl->unregistering;
}
static int sysctl_is_seen(struct ctl_table_header *p)
@@ -805,7 +805,7 @@ static int proc_sys_compare(const struct dentry *parent, const struct dentry *de
/* Although proc doesn't have negative dentries, rcu-walk means
* that inode here can be NULL */
/* AV: can it, indeed? */
- inode = ACCESS_ONCE(dentry->d_inode);
+ inode = d_inode_rcu(dentry);
if (!inode)
return 1;
if (name->len != len)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index e74ac9f1a2c0..b7fa4bfe896a 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -195,7 +195,7 @@ void __init proc_root_init(void)
static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat
)
{
- generic_fillattr(dentry->d_inode, stat);
+ generic_fillattr(d_inode(dentry), stat);
stat->nlink = proc_root.nlink + nr_processes();
return 0;
}
diff --git a/fs/proc/self.c b/fs/proc/self.c
index 4348bb8907c2..6195b4a7c3b1 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -46,7 +46,7 @@ static unsigned self_inum;
int proc_setup_self(struct super_block *s)
{
- struct inode *root_inode = s->s_root->d_inode;
+ struct inode *root_inode = d_inode(s->s_root);
struct pid_namespace *ns = s->s_fs_info;
struct dentry *self;
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index 59075b509df3..a8371993b4fb 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -47,7 +47,7 @@ static unsigned thread_self_inum;
int proc_setup_thread_self(struct super_block *s)
{
- struct inode *root_inode = s->s_root->d_inode;
+ struct inode *root_inode = d_inode(s->s_root);
struct pid_namespace *ns = s->s_fs_info;
struct dentry *thread_self;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index b32ce53d24ee..dc43b5f29305 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -190,7 +190,7 @@ static const struct file_operations pstore_file_operations = {
*/
static int pstore_unlink(struct inode *dir, struct dentry *dentry)
{
- struct pstore_private *p = dentry->d_inode->i_private;
+ struct pstore_private *p = d_inode(dentry)->i_private;
int err;
err = pstore_check_syslog_permissions(p);
@@ -199,7 +199,7 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
if (p->psi->erase)
p->psi->erase(p->type, p->id, p->count,
- dentry->d_inode->i_ctime, p->psi);
+ d_inode(dentry)->i_ctime, p->psi);
else
return -EPERM;
@@ -364,6 +364,9 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
case PSTORE_TYPE_PMSG:
scnprintf(name, sizeof(name), "pmsg-%s-%lld", psname, id);
break;
+ case PSTORE_TYPE_PPC_OPAL:
+ sprintf(name, "powerpc-opal-%s-%lld", psname, id);
+ break;
case PSTORE_TYPE_UNKNOWN:
scnprintf(name, sizeof(name), "unknown-%s-%lld", psname, id);
break;
@@ -373,7 +376,7 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
break;
}
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
dentry = d_alloc_name(root, name);
if (!dentry)
@@ -393,12 +396,12 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
list_add(&private->list, &allpstore);
spin_unlock_irqrestore(&allpstore_lock, flags);
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
return 0;
fail_lockedalloc:
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
kfree(private);
fail_alloc:
iput(inode);
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 39d1373128e9..44a549beeafa 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -539,6 +539,9 @@ static int ramoops_probe(struct platform_device *pdev)
mem_address = pdata->mem_address;
record_size = pdata->record_size;
dump_oops = pdata->dump_oops;
+ ramoops_console_size = pdata->console_size;
+ ramoops_pmsg_size = pdata->pmsg_size;
+ ramoops_ftrace_size = pdata->ftrace_size;
pr_info("attached 0x%lx@0x%llx, ecc: %d/%d\n",
cxt->size, (unsigned long long)cxt->phys_addr,
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 44e73923670d..32d2e1a9774c 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -182,7 +182,7 @@ static const char *qnx6_checkroot(struct super_block *s)
static char match_root[2][3] = {".\0\0", "..\0"};
int i, error = 0;
struct qnx6_dir_entry *dir_entry;
- struct inode *root = s->s_root->d_inode;
+ struct inode *root = d_inode(s->s_root);
struct address_space *mapping = root->i_mapping;
struct page *page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 0ccd4ba3a246..20d1f74561cf 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -900,14 +900,17 @@ static inline struct dquot **i_dquot(struct inode *inode)
static int dqinit_needed(struct inode *inode, int type)
{
+ struct dquot * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
return 0;
+
+ dquots = i_dquot(inode);
if (type != -1)
- return !i_dquot(inode)[type];
+ return !dquots[type];
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (!i_dquot(inode)[cnt])
+ if (!dquots[cnt])
return 1;
return 0;
}
@@ -970,12 +973,13 @@ static void add_dquot_ref(struct super_block *sb, int type)
static void remove_inode_dquot_ref(struct inode *inode, int type,
struct list_head *tofree_head)
{
- struct dquot *dquot = i_dquot(inode)[type];
+ struct dquot **dquots = i_dquot(inode);
+ struct dquot *dquot = dquots[type];
- i_dquot(inode)[type] = NULL;
if (!dquot)
return;
+ dquots[type] = NULL;
if (list_empty(&dquot->dq_free)) {
/*
* The inode still has reference to dquot so it can't be in the
@@ -1159,8 +1163,8 @@ static int need_print_warning(struct dquot_warn *warn)
return uid_eq(current_fsuid(), warn->w_dq_id.uid);
case GRPQUOTA:
return in_group_p(warn->w_dq_id.gid);
- case PRJQUOTA: /* Never taken... Just make gcc happy */
- return 0;
+ case PRJQUOTA:
+ return 1;
}
return 0;
}
@@ -1389,16 +1393,21 @@ static int dquot_active(const struct inode *inode)
static void __dquot_initialize(struct inode *inode, int type)
{
int cnt, init_needed = 0;
- struct dquot *got[MAXQUOTAS];
+ struct dquot **dquots, *got[MAXQUOTAS];
struct super_block *sb = inode->i_sb;
qsize_t rsv;
if (!dquot_active(inode))
return;
+ dquots = i_dquot(inode);
+
/* First get references to structures we might need. */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
struct kqid qid;
+ kprojid_t projid;
+ int rc;
+
got[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
@@ -1407,8 +1416,12 @@ static void __dquot_initialize(struct inode *inode, int type)
* we check it without locking here to avoid unnecessary
* dqget()/dqput() calls.
*/
- if (i_dquot(inode)[cnt])
+ if (dquots[cnt])
+ continue;
+
+ if (!sb_has_quota_active(sb, cnt))
continue;
+
init_needed = 1;
switch (cnt) {
@@ -1418,6 +1431,12 @@ static void __dquot_initialize(struct inode *inode, int type)
case GRPQUOTA:
qid = make_kqid_gid(inode->i_gid);
break;
+ case PRJQUOTA:
+ rc = inode->i_sb->dq_op->get_projid(inode, &projid);
+ if (rc)
+ continue;
+ qid = make_kqid_projid(projid);
+ break;
}
got[cnt] = dqget(sb, qid);
}
@@ -1438,8 +1457,8 @@ static void __dquot_initialize(struct inode *inode, int type)
/* We could race with quotaon or dqget() could have failed */
if (!got[cnt])
continue;
- if (!i_dquot(inode)[cnt]) {
- i_dquot(inode)[cnt] = got[cnt];
+ if (!dquots[cnt]) {
+ dquots[cnt] = got[cnt];
got[cnt] = NULL;
/*
* Make quota reservation system happy if someone
@@ -1447,7 +1466,7 @@ static void __dquot_initialize(struct inode *inode, int type)
*/
rsv = inode_get_rsv_space(inode);
if (unlikely(rsv))
- dquot_resv_space(i_dquot(inode)[cnt], rsv);
+ dquot_resv_space(dquots[cnt], rsv);
}
}
out_err:
@@ -1473,12 +1492,13 @@ EXPORT_SYMBOL(dquot_initialize);
static void __dquot_drop(struct inode *inode)
{
int cnt;
+ struct dquot **dquots = i_dquot(inode);
struct dquot *put[MAXQUOTAS];
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- put[cnt] = i_dquot(inode)[cnt];
- i_dquot(inode)[cnt] = NULL;
+ put[cnt] = dquots[cnt];
+ dquots[cnt] = NULL;
}
spin_unlock(&dq_data_lock);
dqput_all(put);
@@ -1486,6 +1506,7 @@ static void __dquot_drop(struct inode *inode)
void dquot_drop(struct inode *inode)
{
+ struct dquot * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
@@ -1498,8 +1519,9 @@ void dquot_drop(struct inode *inode)
* must assure that nobody can come after the DQUOT_DROP and
* add quota pointers back anyway.
*/
+ dquots = i_dquot(inode);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (i_dquot(inode)[cnt])
+ if (dquots[cnt])
break;
}
@@ -1600,8 +1622,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
{
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot **dquots = i_dquot(inode);
int reserve = flags & DQUOT_SPACE_RESERVE;
+ struct dquot **dquots;
if (!dquot_active(inode)) {
inode_incr_space(inode, number, reserve);
@@ -1611,6 +1633,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warn[cnt].w_type = QUOTA_NL_NOWARN;
+ dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1652,13 +1675,14 @@ int dquot_alloc_inode(struct inode *inode)
{
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot * const *dquots = i_dquot(inode);
+ struct dquot * const *dquots;
if (!dquot_active(inode))
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warn[cnt].w_type = QUOTA_NL_NOWARN;
+ dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1690,6 +1714,7 @@ EXPORT_SYMBOL(dquot_alloc_inode);
*/
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
+ struct dquot **dquots;
int cnt, index;
if (!dquot_active(inode)) {
@@ -1697,18 +1722,18 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
return 0;
}
+ dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (i_dquot(inode)[cnt])
- dquot_claim_reserved_space(i_dquot(inode)[cnt],
- number);
+ if (dquots[cnt])
+ dquot_claim_reserved_space(dquots[cnt], number);
}
/* Update inode bytes */
inode_claim_rsv_space(inode, number);
spin_unlock(&dq_data_lock);
- mark_all_dquot_dirty(i_dquot(inode));
+ mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
return 0;
}
@@ -1719,6 +1744,7 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
*/
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
{
+ struct dquot **dquots;
int cnt, index;
if (!dquot_active(inode)) {
@@ -1726,18 +1752,18 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
return;
}
+ dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (i_dquot(inode)[cnt])
- dquot_reclaim_reserved_space(i_dquot(inode)[cnt],
- number);
+ if (dquots[cnt])
+ dquot_reclaim_reserved_space(dquots[cnt], number);
}
/* Update inode bytes */
inode_reclaim_rsv_space(inode, number);
spin_unlock(&dq_data_lock);
- mark_all_dquot_dirty(i_dquot(inode));
+ mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
return;
}
@@ -1750,7 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot **dquots = i_dquot(inode);
+ struct dquot **dquots;
int reserve = flags & DQUOT_SPACE_RESERVE, index;
if (!dquot_active(inode)) {
@@ -1758,6 +1784,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
return;
}
+ dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1793,12 +1820,13 @@ void dquot_free_inode(struct inode *inode)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot * const *dquots = i_dquot(inode);
+ struct dquot * const *dquots;
int index;
if (!dquot_active(inode))
return;
+ dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -2161,7 +2189,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
error = -EROFS;
goto out_fmt;
}
- if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
+ if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
+ (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
error = -EINVAL;
goto out_fmt;
}
@@ -2299,7 +2328,7 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id,
if (path->dentry->d_sb != sb)
error = -EXDEV;
else
- error = vfs_load_quota_inode(path->dentry->d_inode, type,
+ error = vfs_load_quota_inode(d_inode(path->dentry), type,
format_id, DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED);
return error;
@@ -2363,20 +2392,20 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
struct dentry *dentry;
int error;
- mutex_lock(&sb->s_root->d_inode->i_mutex);
+ mutex_lock(&d_inode(sb->s_root)->i_mutex);
dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
- mutex_unlock(&sb->s_root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(sb->s_root)->i_mutex);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!dentry->d_inode) {
+ if (d_really_is_negative(dentry)) {
error = -ENOENT;
goto out;
}
error = security_quota_on(dentry);
if (!error)
- error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
+ error = vfs_load_quota_inode(d_inode(dentry), type, format_id,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
out:
@@ -2614,55 +2643,73 @@ out:
EXPORT_SYMBOL(dquot_set_dqblk);
/* Generic routine for getting common part of quota file information */
-int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
+int dquot_get_state(struct super_block *sb, struct qc_state *state)
{
struct mem_dqinfo *mi;
+ struct qc_type_state *tstate;
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int type;
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
- if (!sb_has_quota_active(sb, type)) {
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
- return -ESRCH;
+ memset(state, 0, sizeof(*state));
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (!sb_has_quota_active(sb, type))
+ continue;
+ tstate = state->s_state + type;
+ mi = sb_dqopt(sb)->info + type;
+ tstate->flags = QCI_ACCT_ENABLED;
+ spin_lock(&dq_data_lock);
+ if (mi->dqi_flags & DQF_SYS_FILE)
+ tstate->flags |= QCI_SYSFILE;
+ if (mi->dqi_flags & DQF_ROOT_SQUASH)
+ tstate->flags |= QCI_ROOT_SQUASH;
+ if (sb_has_quota_limits_enabled(sb, type))
+ tstate->flags |= QCI_LIMITS_ENFORCED;
+ tstate->spc_timelimit = mi->dqi_bgrace;
+ tstate->ino_timelimit = mi->dqi_igrace;
+ tstate->ino = dqopt->files[type]->i_ino;
+ tstate->blocks = dqopt->files[type]->i_blocks;
+ tstate->nextents = 1; /* We don't know... */
+ spin_unlock(&dq_data_lock);
}
- mi = sb_dqopt(sb)->info + type;
- spin_lock(&dq_data_lock);
- ii->dqi_bgrace = mi->dqi_bgrace;
- ii->dqi_igrace = mi->dqi_igrace;
- ii->dqi_flags = mi->dqi_flags & DQF_GETINFO_MASK;
- ii->dqi_valid = IIF_ALL;
- spin_unlock(&dq_data_lock);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
-EXPORT_SYMBOL(dquot_get_dqinfo);
+EXPORT_SYMBOL(dquot_get_state);
/* Generic routine for setting common part of quota file information */
-int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
+int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
{
struct mem_dqinfo *mi;
int err = 0;
+ if ((ii->i_fieldmask & QC_WARNS_MASK) ||
+ (ii->i_fieldmask & QC_RT_SPC_TIMER))
+ return -EINVAL;
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!sb_has_quota_active(sb, type)) {
err = -ESRCH;
goto out;
}
mi = sb_dqopt(sb)->info + type;
- if (ii->dqi_valid & IIF_FLAGS) {
- if (ii->dqi_flags & ~DQF_SETINFO_MASK ||
- (ii->dqi_flags & DQF_ROOT_SQUASH &&
+ if (ii->i_fieldmask & QC_FLAGS) {
+ if ((ii->i_flags & QCI_ROOT_SQUASH &&
mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) {
err = -EINVAL;
goto out;
}
}
spin_lock(&dq_data_lock);
- if (ii->dqi_valid & IIF_BGRACE)
- mi->dqi_bgrace = ii->dqi_bgrace;
- if (ii->dqi_valid & IIF_IGRACE)
- mi->dqi_igrace = ii->dqi_igrace;
- if (ii->dqi_valid & IIF_FLAGS)
- mi->dqi_flags = (mi->dqi_flags & ~DQF_SETINFO_MASK) |
- (ii->dqi_flags & DQF_SETINFO_MASK);
+ if (ii->i_fieldmask & QC_SPC_TIMER)
+ mi->dqi_bgrace = ii->i_spc_timelimit;
+ if (ii->i_fieldmask & QC_INO_TIMER)
+ mi->dqi_igrace = ii->i_ino_timelimit;
+ if (ii->i_fieldmask & QC_FLAGS) {
+ if (ii->i_flags & QCI_ROOT_SQUASH)
+ mi->dqi_flags |= DQF_ROOT_SQUASH;
+ else
+ mi->dqi_flags &= ~DQF_ROOT_SQUASH;
+ }
spin_unlock(&dq_data_lock);
mark_info_dirty(sb, type);
/* Force write to disk */
@@ -2677,7 +2724,7 @@ const struct quotactl_ops dquot_quotactl_ops = {
.quota_on = dquot_quota_on,
.quota_off = dquot_quota_off,
.quota_sync = dquot_quota_sync,
- .get_info = dquot_get_dqinfo,
+ .get_state = dquot_get_state,
.set_info = dquot_set_dqinfo,
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk
@@ -2688,7 +2735,7 @@ const struct quotactl_ops dquot_quotactl_sysfile_ops = {
.quota_enable = dquot_quota_enable,
.quota_disable = dquot_quota_disable,
.quota_sync = dquot_quota_sync,
- .get_info = dquot_get_dqinfo,
+ .get_state = dquot_get_state,
.set_info = dquot_set_dqinfo,
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index d14a799c7785..86ded7375c21 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -118,13 +118,30 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
{
- struct if_dqinfo info;
+ struct qc_state state;
+ struct qc_type_state *tstate;
+ struct if_dqinfo uinfo;
int ret;
- if (!sb->s_qcop->get_info)
+ /* This checks whether qc_state has enough entries... */
+ BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
+ if (!sb->s_qcop->get_state)
return -ENOSYS;
- ret = sb->s_qcop->get_info(sb, type, &info);
- if (!ret && copy_to_user(addr, &info, sizeof(info)))
+ ret = sb->s_qcop->get_state(sb, &state);
+ if (ret)
+ return ret;
+ tstate = state.s_state + type;
+ if (!(tstate->flags & QCI_ACCT_ENABLED))
+ return -ESRCH;
+ memset(&uinfo, 0, sizeof(uinfo));
+ uinfo.dqi_bgrace = tstate->spc_timelimit;
+ uinfo.dqi_igrace = tstate->ino_timelimit;
+ if (tstate->flags & QCI_SYSFILE)
+ uinfo.dqi_flags |= DQF_SYS_FILE;
+ if (tstate->flags & QCI_ROOT_SQUASH)
+ uinfo.dqi_flags |= DQF_ROOT_SQUASH;
+ uinfo.dqi_valid = IIF_ALL;
+ if (!ret && copy_to_user(addr, &uinfo, sizeof(uinfo)))
return -EFAULT;
return ret;
}
@@ -132,12 +149,31 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
{
struct if_dqinfo info;
+ struct qc_info qinfo;
if (copy_from_user(&info, addr, sizeof(info)))
return -EFAULT;
if (!sb->s_qcop->set_info)
return -ENOSYS;
- return sb->s_qcop->set_info(sb, type, &info);
+ if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE))
+ return -EINVAL;
+ memset(&qinfo, 0, sizeof(qinfo));
+ if (info.dqi_valid & IIF_FLAGS) {
+ if (info.dqi_flags & ~DQF_SETINFO_MASK)
+ return -EINVAL;
+ if (info.dqi_flags & DQF_ROOT_SQUASH)
+ qinfo.i_flags |= QCI_ROOT_SQUASH;
+ qinfo.i_fieldmask |= QC_FLAGS;
+ }
+ if (info.dqi_valid & IIF_BGRACE) {
+ qinfo.i_spc_timelimit = info.dqi_bgrace;
+ qinfo.i_fieldmask |= QC_SPC_TIMER;
+ }
+ if (info.dqi_valid & IIF_IGRACE) {
+ qinfo.i_ino_timelimit = info.dqi_igrace;
+ qinfo.i_fieldmask |= QC_INO_TIMER;
+ }
+ return sb->s_qcop->set_info(sb, type, &qinfo);
}
static inline qsize_t qbtos(qsize_t blocks)
@@ -252,25 +288,149 @@ static int quota_disable(struct super_block *sb, void __user *addr)
return sb->s_qcop->quota_disable(sb, flags);
}
+static int quota_state_to_flags(struct qc_state *state)
+{
+ int flags = 0;
+
+ if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED)
+ flags |= FS_QUOTA_UDQ_ACCT;
+ if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED)
+ flags |= FS_QUOTA_UDQ_ENFD;
+ if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)
+ flags |= FS_QUOTA_GDQ_ACCT;
+ if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED)
+ flags |= FS_QUOTA_GDQ_ENFD;
+ if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED)
+ flags |= FS_QUOTA_PDQ_ACCT;
+ if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED)
+ flags |= FS_QUOTA_PDQ_ENFD;
+ return flags;
+}
+
+static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
+{
+ int type;
+ struct qc_state state;
+ int ret;
+
+ ret = sb->s_qcop->get_state(sb, &state);
+ if (ret < 0)
+ return ret;
+
+ memset(fqs, 0, sizeof(*fqs));
+ fqs->qs_version = FS_QSTAT_VERSION;
+ fqs->qs_flags = quota_state_to_flags(&state);
+ /* No quota enabled? */
+ if (!fqs->qs_flags)
+ return -ENOSYS;
+ fqs->qs_incoredqs = state.s_incoredqs;
+ /*
+ * GETXSTATE quotactl has space for just one set of time limits so
+ * report them for the first enabled quota type
+ */
+ for (type = 0; type < XQM_MAXQUOTAS; type++)
+ if (state.s_state[type].flags & QCI_ACCT_ENABLED)
+ break;
+ BUG_ON(type == XQM_MAXQUOTAS);
+ fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
+ fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
+ fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
+ fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
+ fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
+ if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) {
+ fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
+ fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
+ fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
+ }
+ if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) {
+ fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
+ fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
+ fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
+ }
+ if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) {
+ /*
+ * Q_XGETQSTAT doesn't have room for both group and project
+ * quotas. So, allow the project quota values to be copied out
+ * only if there is no group quota information available.
+ */
+ if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) {
+ fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino;
+ fqs->qs_gquota.qfs_nblks =
+ state.s_state[PRJQUOTA].blocks;
+ fqs->qs_gquota.qfs_nextents =
+ state.s_state[PRJQUOTA].nextents;
+ }
+ }
+ return 0;
+}
+
static int quota_getxstate(struct super_block *sb, void __user *addr)
{
struct fs_quota_stat fqs;
int ret;
- if (!sb->s_qcop->get_xstate)
+ if (!sb->s_qcop->get_state)
return -ENOSYS;
- ret = sb->s_qcop->get_xstate(sb, &fqs);
+ ret = quota_getstate(sb, &fqs);
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
return -EFAULT;
return ret;
}
+static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
+{
+ int type;
+ struct qc_state state;
+ int ret;
+
+ ret = sb->s_qcop->get_state(sb, &state);
+ if (ret < 0)
+ return ret;
+
+ memset(fqs, 0, sizeof(*fqs));
+ fqs->qs_version = FS_QSTAT_VERSION;
+ fqs->qs_flags = quota_state_to_flags(&state);
+ /* No quota enabled? */
+ if (!fqs->qs_flags)
+ return -ENOSYS;
+ fqs->qs_incoredqs = state.s_incoredqs;
+ /*
+ * GETXSTATV quotactl has space for just one set of time limits so
+ * report them for the first enabled quota type
+ */
+ for (type = 0; type < XQM_MAXQUOTAS; type++)
+ if (state.s_state[type].flags & QCI_ACCT_ENABLED)
+ break;
+ BUG_ON(type == XQM_MAXQUOTAS);
+ fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
+ fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
+ fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
+ fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
+ fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
+ if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) {
+ fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
+ fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
+ fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
+ }
+ if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) {
+ fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
+ fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
+ fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
+ }
+ if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) {
+ fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino;
+ fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks;
+ fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents;
+ }
+ return 0;
+}
+
static int quota_getxstatev(struct super_block *sb, void __user *addr)
{
struct fs_quota_statv fqs;
int ret;
- if (!sb->s_qcop->get_xstatev)
+ if (!sb->s_qcop->get_state)
return -ENOSYS;
memset(&fqs, 0, sizeof(fqs));
@@ -284,7 +444,7 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
default:
return -EINVAL;
}
- ret = sb->s_qcop->get_xstatev(sb, &fqs);
+ ret = quota_getstatev(sb, &fqs);
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
return -EFAULT;
return ret;
@@ -357,6 +517,30 @@ static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
dst->d_fieldmask |= QC_RT_SPACE;
}
+static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst,
+ struct fs_disk_quota *src)
+{
+ memset(dst, 0, sizeof(*dst));
+ dst->i_spc_timelimit = src->d_btimer;
+ dst->i_ino_timelimit = src->d_itimer;
+ dst->i_rt_spc_timelimit = src->d_rtbtimer;
+ dst->i_ino_warnlimit = src->d_iwarns;
+ dst->i_spc_warnlimit = src->d_bwarns;
+ dst->i_rt_spc_warnlimit = src->d_rtbwarns;
+ if (src->d_fieldmask & FS_DQ_BWARNS)
+ dst->i_fieldmask |= QC_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_IWARNS)
+ dst->i_fieldmask |= QC_INO_WARNS;
+ if (src->d_fieldmask & FS_DQ_RTBWARNS)
+ dst->i_fieldmask |= QC_RT_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_BTIMER)
+ dst->i_fieldmask |= QC_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_ITIMER)
+ dst->i_fieldmask |= QC_INO_TIMER;
+ if (src->d_fieldmask & FS_DQ_RTBTIMER)
+ dst->i_fieldmask |= QC_RT_SPC_TIMER;
+}
+
static int quota_setxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
@@ -371,6 +555,21 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
qid = make_kqid(current_user_ns(), type, id);
if (!qid_valid(qid))
return -EINVAL;
+ /* Are we actually setting timer / warning limits for all users? */
+ if (from_kqid(&init_user_ns, qid) == 0 &&
+ fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) {
+ struct qc_info qinfo;
+ int ret;
+
+ if (!sb->s_qcop->set_info)
+ return -EINVAL;
+ copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq);
+ ret = sb->s_qcop->set_info(sb, type, &qinfo);
+ if (ret)
+ return ret;
+ /* These are already done */
+ fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK);
+ }
copy_from_xfs_dqblk(&qdq, &fdq);
return sb->s_qcop->set_dqblk(sb, qid, &qdq);
}
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index d65877fbe8f4..58efb83dec1c 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -349,6 +349,13 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
struct dquot *dquot)
{
int tmp = QT_TREEOFF;
+
+#ifdef __QUOTA_QT_PARANOIA
+ if (info->dqi_blocks <= QT_TREEOFF) {
+ quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
+ return -EIO;
+ }
+#endif
return do_insert_tree(info, dquot, &tmp, 0);
}
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 9cb10d7197f7..2aa012a68e90 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -117,12 +117,16 @@ static int v2_read_file_info(struct super_block *sb, int type)
qinfo = info->dqi_priv;
if (version == 0) {
/* limits are stored as unsigned 32-bit data */
- info->dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+ info->dqi_max_spc_limit = 0xffffffffLL << QUOTABLOCK_BITS;
info->dqi_max_ino_limit = 0xffffffff;
} else {
- /* used space is stored as unsigned 64-bit value in bytes */
- info->dqi_max_spc_limit = 0xffffffffffffffffULL; /* 2^64-1 */
- info->dqi_max_ino_limit = 0xffffffffffffffffULL;
+ /*
+ * Used space is stored as unsigned 64-bit value in bytes but
+ * quota core supports only signed 64-bit values so use that
+ * as a limit
+ */
+ info->dqi_max_spc_limit = 0x7fffffffffffffffLL; /* 2^63-1 */
+ info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
}
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h
index f1966b42c2fd..4e95430093d9 100644
--- a/fs/quota/quotaio_v2.h
+++ b/fs/quota/quotaio_v2.h
@@ -13,12 +13,14 @@
*/
#define V2_INITQMAGICS {\
0xd9c01f11, /* USRQUOTA */\
- 0xd9c01927 /* GRPQUOTA */\
+ 0xd9c01927, /* GRPQUOTA */\
+ 0xd9c03f14, /* PRJQUOTA */\
}
#define V2_INITQVERSIONS {\
1, /* USRQUOTA */\
- 1 /* GRPQUOTA */\
+ 1, /* GRPQUOTA */\
+ 1, /* PRJQUOTA */\
}
/* First generic header */
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index 4f56de822d2f..183a212694bf 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -31,9 +31,7 @@
#include "internal.h"
const struct file_operations ramfs_file_operations = {
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = noop_fsync,
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index f6ab41b39612..ba1323a94924 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -44,9 +44,7 @@ const struct file_operations ramfs_file_operations = {
.mmap_capabilities = ramfs_mmap_capabilities,
.mmap = ramfs_nommu_mmap,
.get_unmapped_area = ramfs_nommu_get_unmapped_area,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.fsync = noop_fsync,
.splice_read = generic_file_splice_read,
@@ -165,7 +163,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
*/
static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
unsigned int old_ia_valid = ia->ia_valid;
int ret = 0;
diff --git a/fs/read_write.c b/fs/read_write.c
index 8e1b68786d66..819ef3faf1bb 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -9,7 +9,6 @@
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/uio.h>
-#include <linux/aio.h>
#include <linux/fsnotify.h>
#include <linux/security.h>
#include <linux/export.h>
@@ -23,13 +22,10 @@
#include <asm/unistd.h>
typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
-typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
- unsigned long, loff_t);
typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
const struct file_operations generic_ro_fops = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
.mmap = generic_file_readonly_mmap,
.splice_read = generic_file_splice_read,
@@ -343,13 +339,10 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos)
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
- kiocb.ki_nbytes = iov_iter_count(iter);
iter->type |= READ;
ret = file->f_op->read_iter(&kiocb, iter);
- if (ret == -EIOCBQUEUED)
- ret = wait_on_sync_kiocb(&kiocb);
-
+ BUG_ON(ret == -EIOCBQUEUED);
if (ret > 0)
*ppos = kiocb.ki_pos;
return ret;
@@ -366,13 +359,10 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos)
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
- kiocb.ki_nbytes = iov_iter_count(iter);
iter->type |= WRITE;
ret = file->f_op->write_iter(&kiocb, iter);
- if (ret == -EIOCBQUEUED)
- ret = wait_on_sync_kiocb(&kiocb);
-
+ BUG_ON(ret == -EIOCBQUEUED);
if (ret > 0)
*ppos = kiocb.ki_pos;
return ret;
@@ -418,26 +408,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
}
-ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
-{
- struct iovec iov = { .iov_base = buf, .iov_len = len };
- struct kiocb kiocb;
- ssize_t ret;
-
- init_sync_kiocb(&kiocb, filp);
- kiocb.ki_pos = *ppos;
- kiocb.ki_nbytes = len;
-
- ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
- if (-EIOCBQUEUED == ret)
- ret = wait_on_sync_kiocb(&kiocb);
- *ppos = kiocb.ki_pos;
- return ret;
-}
-
-EXPORT_SYMBOL(do_sync_read);
-
-ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
+static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
{
struct iovec iov = { .iov_base = buf, .iov_len = len };
struct kiocb kiocb;
@@ -446,34 +417,25 @@ ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *p
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = *ppos;
- kiocb.ki_nbytes = len;
iov_iter_init(&iter, READ, &iov, 1, len);
ret = filp->f_op->read_iter(&kiocb, &iter);
- if (-EIOCBQUEUED == ret)
- ret = wait_on_sync_kiocb(&kiocb);
+ BUG_ON(ret == -EIOCBQUEUED);
*ppos = kiocb.ki_pos;
return ret;
}
-EXPORT_SYMBOL(new_sync_read);
-
ssize_t __vfs_read(struct file *file, char __user *buf, size_t count,
loff_t *pos)
{
- ssize_t ret;
-
if (file->f_op->read)
- ret = file->f_op->read(file, buf, count, pos);
- else if (file->f_op->aio_read)
- ret = do_sync_read(file, buf, count, pos);
+ return file->f_op->read(file, buf, count, pos);
else if (file->f_op->read_iter)
- ret = new_sync_read(file, buf, count, pos);
+ return new_sync_read(file, buf, count, pos);
else
- ret = -EINVAL;
-
- return ret;
+ return -EINVAL;
}
+EXPORT_SYMBOL(__vfs_read);
ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
{
@@ -502,26 +464,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
EXPORT_SYMBOL(vfs_read);
-ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
-{
- struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
- struct kiocb kiocb;
- ssize_t ret;
-
- init_sync_kiocb(&kiocb, filp);
- kiocb.ki_pos = *ppos;
- kiocb.ki_nbytes = len;
-
- ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
- if (-EIOCBQUEUED == ret)
- ret = wait_on_sync_kiocb(&kiocb);
- *ppos = kiocb.ki_pos;
- return ret;
-}
-
-EXPORT_SYMBOL(do_sync_write);
-
-ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
+static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
{
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
struct kiocb kiocb;
@@ -530,17 +473,26 @@ ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, lo
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = *ppos;
- kiocb.ki_nbytes = len;
iov_iter_init(&iter, WRITE, &iov, 1, len);
ret = filp->f_op->write_iter(&kiocb, &iter);
- if (-EIOCBQUEUED == ret)
- ret = wait_on_sync_kiocb(&kiocb);
- *ppos = kiocb.ki_pos;
+ BUG_ON(ret == -EIOCBQUEUED);
+ if (ret > 0)
+ *ppos = kiocb.ki_pos;
return ret;
}
-EXPORT_SYMBOL(new_sync_write);
+ssize_t __vfs_write(struct file *file, const char __user *p, size_t count,
+ loff_t *pos)
+{
+ if (file->f_op->write)
+ return file->f_op->write(file, p, count, pos);
+ else if (file->f_op->write_iter)
+ return new_sync_write(file, p, count, pos);
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL(__vfs_write);
ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
{
@@ -556,12 +508,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
p = (__force const char __user *)buf;
if (count > MAX_RW_COUNT)
count = MAX_RW_COUNT;
- if (file->f_op->write)
- ret = file->f_op->write(file, p, count, pos);
- else if (file->f_op->aio_write)
- ret = do_sync_write(file, p, count, pos);
- else
- ret = new_sync_write(file, p, count, pos);
+ ret = __vfs_write(file, p, count, pos);
set_fs(old_fs);
if (ret > 0) {
fsnotify_modify(file);
@@ -588,12 +535,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
if (ret >= 0) {
count = ret;
file_start_write(file);
- if (file->f_op->write)
- ret = file->f_op->write(file, buf, count, pos);
- else if (file->f_op->aio_write)
- ret = do_sync_write(file, buf, count, pos);
- else
- ret = new_sync_write(file, buf, count, pos);
+ ret = __vfs_write(file, buf, count, pos);
if (ret > 0) {
fsnotify_modify(file);
add_wchar(current, ret);
@@ -710,60 +652,32 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
}
EXPORT_SYMBOL(iov_shorten);
-static ssize_t do_iter_readv_writev(struct file *filp, int rw, const struct iovec *iov,
- unsigned long nr_segs, size_t len, loff_t *ppos, iter_fn_t fn)
-{
- struct kiocb kiocb;
- struct iov_iter iter;
- ssize_t ret;
-
- init_sync_kiocb(&kiocb, filp);
- kiocb.ki_pos = *ppos;
- kiocb.ki_nbytes = len;
-
- iov_iter_init(&iter, rw, iov, nr_segs, len);
- ret = fn(&kiocb, &iter);
- if (ret == -EIOCBQUEUED)
- ret = wait_on_sync_kiocb(&kiocb);
- *ppos = kiocb.ki_pos;
- return ret;
-}
-
-static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
- unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
+static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
+ loff_t *ppos, iter_fn_t fn)
{
struct kiocb kiocb;
ssize_t ret;
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = *ppos;
- kiocb.ki_nbytes = len;
- ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
- if (ret == -EIOCBQUEUED)
- ret = wait_on_sync_kiocb(&kiocb);
+ ret = fn(&kiocb, iter);
+ BUG_ON(ret == -EIOCBQUEUED);
*ppos = kiocb.ki_pos;
return ret;
}
/* Do it by hand, with file-ops */
-static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
- unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
+static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
+ loff_t *ppos, io_fn_t fn)
{
- struct iovec *vector = iov;
ssize_t ret = 0;
- while (nr_segs > 0) {
- void __user *base;
- size_t len;
+ while (iov_iter_count(iter)) {
+ struct iovec iovec = iov_iter_iovec(iter);
ssize_t nr;
- base = vector->iov_base;
- len = vector->iov_len;
- vector++;
- nr_segs--;
-
- nr = fn(filp, base, len, ppos);
+ nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos);
if (nr < 0) {
if (!ret)
@@ -771,8 +685,9 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
break;
}
ret += nr;
- if (nr != len)
+ if (nr != iovec.iov_len)
break;
+ iov_iter_advance(iter, nr);
}
return ret;
@@ -863,48 +778,42 @@ static ssize_t do_readv_writev(int type, struct file *file,
size_t tot_len;
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
+ struct iov_iter iter;
ssize_t ret;
io_fn_t fn;
- iov_fn_t fnv;
iter_fn_t iter_fn;
- ret = rw_copy_check_uvector(type, uvector, nr_segs,
- ARRAY_SIZE(iovstack), iovstack, &iov);
- if (ret <= 0)
- goto out;
+ ret = import_iovec(type, uvector, nr_segs,
+ ARRAY_SIZE(iovstack), &iov, &iter);
+ if (ret < 0)
+ return ret;
- tot_len = ret;
+ tot_len = iov_iter_count(&iter);
+ if (!tot_len)
+ goto out;
ret = rw_verify_area(type, file, pos, tot_len);
if (ret < 0)
goto out;
- fnv = NULL;
if (type == READ) {
fn = file->f_op->read;
- fnv = file->f_op->aio_read;
iter_fn = file->f_op->read_iter;
} else {
fn = (io_fn_t)file->f_op->write;
- fnv = file->f_op->aio_write;
iter_fn = file->f_op->write_iter;
file_start_write(file);
}
if (iter_fn)
- ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
- pos, iter_fn);
- else if (fnv)
- ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
- pos, fnv);
+ ret = do_iter_readv_writev(file, &iter, pos, iter_fn);
else
- ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
+ ret = do_loop_readv_writev(file, &iter, pos, fn);
if (type != READ)
file_end_write(file);
out:
- if (iov != iovstack)
- kfree(iov);
+ kfree(iov);
if ((ret + (type == READ)) > 0) {
if (type == READ)
fsnotify_access(file);
@@ -1043,48 +952,42 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
compat_ssize_t tot_len;
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
+ struct iov_iter iter;
ssize_t ret;
io_fn_t fn;
- iov_fn_t fnv;
iter_fn_t iter_fn;
- ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
- UIO_FASTIOV, iovstack, &iov);
- if (ret <= 0)
- goto out;
+ ret = compat_import_iovec(type, uvector, nr_segs,
+ UIO_FASTIOV, &iov, &iter);
+ if (ret < 0)
+ return ret;
- tot_len = ret;
+ tot_len = iov_iter_count(&iter);
+ if (!tot_len)
+ goto out;
ret = rw_verify_area(type, file, pos, tot_len);
if (ret < 0)
goto out;
- fnv = NULL;
if (type == READ) {
fn = file->f_op->read;
- fnv = file->f_op->aio_read;
iter_fn = file->f_op->read_iter;
} else {
fn = (io_fn_t)file->f_op->write;
- fnv = file->f_op->aio_write;
iter_fn = file->f_op->write_iter;
file_start_write(file);
}
if (iter_fn)
- ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
- pos, iter_fn);
- else if (fnv)
- ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
- pos, fnv);
+ ret = do_iter_readv_writev(file, &iter, pos, iter_fn);
else
- ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
+ ret = do_loop_readv_writev(file, &iter, pos, fn);
if (type != READ)
file_end_write(file);
out:
- if (iov != iovstack)
- kfree(iov);
+ kfree(iov);
if ((ret + (type == READ)) > 0) {
if (type == READ)
fsnotify_access(file);
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 0a7dc941aaf4..4a024e2ceb9f 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -53,8 +53,8 @@ static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end,
static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *deh)
{
struct dentry *privroot = REISERFS_SB(dir->i_sb)->priv_root;
- return (privroot->d_inode &&
- deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
+ return (d_really_is_positive(privroot) &&
+ deh->deh_objectid == INODE_PKEY(d_inode(privroot))->k_objectid);
}
int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 751dd3f4346b..96a1bcf33db4 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -243,8 +243,6 @@ drop_write_lock:
}
const struct file_operations reiserfs_file_operations = {
- .read = new_sync_read,
- .write = new_sync_write,
.unlocked_ioctl = reiserfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = reiserfs_compat_ioctl,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index e72401e1f995..f6f2fbad9777 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -18,7 +18,7 @@
#include <linux/writeback.h>
#include <linux/quotaops.h>
#include <linux/swap.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
@@ -3278,22 +3278,22 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
* We thank Mingming Cao for helping us understand in great detail what
* to do in this section of the code.
*/
-static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter, loff_t offset)
+static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
+ ret = blockdev_direct_IO(iocb, inode, iter, offset,
reiserfs_get_blocks_direct_io);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely((rw & WRITE) && ret < 0)) {
+ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
@@ -3308,7 +3308,7 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
unsigned int ia_valid;
int error;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index cd11358b10c7..b55a074653d7 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -400,7 +400,7 @@ struct dentry *reiserfs_get_parent(struct dentry *child)
struct inode *inode = NULL;
struct reiserfs_dir_entry de;
INITIALIZE_PATH(path_to_entry);
- struct inode *dir = child->d_inode;
+ struct inode *dir = d_inode(child);
if (dir->i_nlink == 0) {
return ERR_PTR(-ENOENT);
@@ -917,7 +917,7 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
goto end_rmdir;
}
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
reiserfs_update_inode_transaction(inode);
reiserfs_update_inode_transaction(dir);
@@ -987,7 +987,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
dquot_initialize(dir);
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
/*
* in this transaction we can be doing at max two balancings and
@@ -1174,7 +1174,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
int retval;
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
struct reiserfs_transaction_handle th;
/*
* We need blocks for transaction + update of quotas for
@@ -1311,8 +1311,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
dquot_initialize(old_dir);
dquot_initialize(new_dir);
- old_inode = old_dentry->d_inode;
- new_dentry_inode = new_dentry->d_inode;
+ old_inode = d_inode(old_dentry);
+ new_dentry_inode = d_inode(new_dentry);
/*
* make sure that oldname still exists and points to an object we
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index bb79cddf0a1f..2adcde137c3f 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -910,7 +910,6 @@ do { \
if (!(cond)) \
reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \
__FILE__ ":%i:%s: " format "\n", \
- in_interrupt() ? -1 : task_pid_nr(current), \
__LINE__, __func__ , ##args); \
} while (0)
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 71fbbe3e2dab..0111ad0466ed 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -805,7 +805,7 @@ static const struct quotactl_ops reiserfs_qctl_operations = {
.quota_on = reiserfs_quota_on,
.quota_off = dquot_quota_off,
.quota_sync = dquot_quota_sync,
- .get_info = dquot_get_dqinfo,
+ .get_state = dquot_get_state,
.set_info = dquot_set_dqinfo,
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk,
@@ -1687,7 +1687,7 @@ static __u32 find_hash_out(struct super_block *s)
__u32 hash = DEFAULT_HASH;
__u32 deh_hashval, teahash, r5hash, yurahash;
- inode = s->s_root->d_inode;
+ inode = d_inode(s->s_root);
make_cpu_key(&key, inode, ~0, TYPE_DIRENTRY, 3);
retval = search_by_entry_key(s, &key, &path, &de);
@@ -2347,7 +2347,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
err = -EXDEV;
goto out;
}
- inode = path->dentry->d_inode;
+ inode = d_inode(path->dentry);
/*
* We must not pack tails for quota files on reiserfs for quota
* IO to work
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 4e781e697c90..e87f9b52bf06 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -87,9 +87,9 @@ static int xattr_unlink(struct inode *dir, struct dentry *dentry)
BUG_ON(!mutex_is_locked(&dir->i_mutex));
- mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
+ mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD);
error = dir->i_op->unlink(dir, dentry);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
if (!error)
d_delete(dentry);
@@ -102,11 +102,11 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
BUG_ON(!mutex_is_locked(&dir->i_mutex));
- mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
+ mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_CHILD);
error = dir->i_op->rmdir(dir, dentry);
if (!error)
- dentry->d_inode->i_flags |= S_DEAD;
- mutex_unlock(&dentry->d_inode->i_mutex);
+ d_inode(dentry)->i_flags |= S_DEAD;
+ mutex_unlock(&d_inode(dentry)->i_mutex);
if (!error)
d_delete(dentry);
@@ -120,26 +120,26 @@ static struct dentry *open_xa_root(struct super_block *sb, int flags)
struct dentry *privroot = REISERFS_SB(sb)->priv_root;
struct dentry *xaroot;
- if (!privroot->d_inode)
+ if (d_really_is_negative(privroot))
return ERR_PTR(-ENODATA);
- mutex_lock_nested(&privroot->d_inode->i_mutex, I_MUTEX_XATTR);
+ mutex_lock_nested(&d_inode(privroot)->i_mutex, I_MUTEX_XATTR);
xaroot = dget(REISERFS_SB(sb)->xattr_root);
if (!xaroot)
xaroot = ERR_PTR(-ENODATA);
- else if (!xaroot->d_inode) {
+ else if (d_really_is_negative(xaroot)) {
int err = -ENODATA;
if (xattr_may_create(flags))
- err = xattr_mkdir(privroot->d_inode, xaroot, 0700);
+ err = xattr_mkdir(d_inode(privroot), xaroot, 0700);
if (err) {
dput(xaroot);
xaroot = ERR_PTR(err);
}
}
- mutex_unlock(&privroot->d_inode->i_mutex);
+ mutex_unlock(&d_inode(privroot)->i_mutex);
return xaroot;
}
@@ -156,21 +156,21 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags)
le32_to_cpu(INODE_PKEY(inode)->k_objectid),
inode->i_generation);
- mutex_lock_nested(&xaroot->d_inode->i_mutex, I_MUTEX_XATTR);
+ mutex_lock_nested(&d_inode(xaroot)->i_mutex, I_MUTEX_XATTR);
xadir = lookup_one_len(namebuf, xaroot, strlen(namebuf));
- if (!IS_ERR(xadir) && !xadir->d_inode) {
+ if (!IS_ERR(xadir) && d_really_is_negative(xadir)) {
int err = -ENODATA;
if (xattr_may_create(flags))
- err = xattr_mkdir(xaroot->d_inode, xadir, 0700);
+ err = xattr_mkdir(d_inode(xaroot), xadir, 0700);
if (err) {
dput(xadir);
xadir = ERR_PTR(err);
}
}
- mutex_unlock(&xaroot->d_inode->i_mutex);
+ mutex_unlock(&d_inode(xaroot)->i_mutex);
dput(xaroot);
return xadir;
}
@@ -195,7 +195,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
container_of(ctx, struct reiserfs_dentry_buf, ctx);
struct dentry *dentry;
- WARN_ON_ONCE(!mutex_is_locked(&dbuf->xadir->d_inode->i_mutex));
+ WARN_ON_ONCE(!mutex_is_locked(&d_inode(dbuf->xadir)->i_mutex));
if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
return -ENOSPC;
@@ -207,7 +207,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
dentry = lookup_one_len(name, dbuf->xadir, namelen);
if (IS_ERR(dentry)) {
return PTR_ERR(dentry);
- } else if (!dentry->d_inode) {
+ } else if (d_really_is_negative(dentry)) {
/* A directory entry exists, but no file? */
reiserfs_error(dentry->d_sb, "xattr-20003",
"Corrupted directory: xattr %pd listed but "
@@ -249,16 +249,16 @@ static int reiserfs_for_each_xattr(struct inode *inode,
if (IS_ERR(dir)) {
err = PTR_ERR(dir);
goto out;
- } else if (!dir->d_inode) {
+ } else if (d_really_is_negative(dir)) {
err = 0;
goto out_dir;
}
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+ mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_XATTR);
buf.xadir = dir;
while (1) {
- err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
+ err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx);
if (err)
break;
if (!buf.count)
@@ -276,7 +276,7 @@ static int reiserfs_for_each_xattr(struct inode *inode,
break;
buf.count = 0;
}
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir)->i_mutex);
cleanup_dentry_buf(&buf);
@@ -298,13 +298,13 @@ static int reiserfs_for_each_xattr(struct inode *inode,
if (!err) {
int jerror;
- mutex_lock_nested(&dir->d_parent->d_inode->i_mutex,
+ mutex_lock_nested(&d_inode(dir->d_parent)->i_mutex,
I_MUTEX_XATTR);
err = action(dir, data);
reiserfs_write_lock(inode->i_sb);
jerror = journal_end(&th);
reiserfs_write_unlock(inode->i_sb);
- mutex_unlock(&dir->d_parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dir->d_parent)->i_mutex);
err = jerror ?: err;
}
}
@@ -319,7 +319,7 @@ out:
static int delete_one_xattr(struct dentry *dentry, void *data)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
/* This is the xattr dir, handle specially. */
if (d_is_dir(dentry))
@@ -384,27 +384,27 @@ static struct dentry *xattr_lookup(struct inode *inode, const char *name,
if (IS_ERR(xadir))
return ERR_CAST(xadir);
- mutex_lock_nested(&xadir->d_inode->i_mutex, I_MUTEX_XATTR);
+ mutex_lock_nested(&d_inode(xadir)->i_mutex, I_MUTEX_XATTR);
xafile = lookup_one_len(name, xadir, strlen(name));
if (IS_ERR(xafile)) {
err = PTR_ERR(xafile);
goto out;
}
- if (xafile->d_inode && (flags & XATTR_CREATE))
+ if (d_really_is_positive(xafile) && (flags & XATTR_CREATE))
err = -EEXIST;
- if (!xafile->d_inode) {
+ if (d_really_is_negative(xafile)) {
err = -ENODATA;
if (xattr_may_create(flags))
- err = xattr_create(xadir->d_inode, xafile,
+ err = xattr_create(d_inode(xadir), xafile,
0700|S_IFREG);
}
if (err)
dput(xafile);
out:
- mutex_unlock(&xadir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(xadir)->i_mutex);
dput(xadir);
if (err)
return ERR_PTR(err);
@@ -469,21 +469,21 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name)
if (IS_ERR(xadir))
return PTR_ERR(xadir);
- mutex_lock_nested(&xadir->d_inode->i_mutex, I_MUTEX_XATTR);
+ mutex_lock_nested(&d_inode(xadir)->i_mutex, I_MUTEX_XATTR);
dentry = lookup_one_len(name, xadir, strlen(name));
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
goto out_dput;
}
- if (dentry->d_inode) {
- err = xattr_unlink(xadir->d_inode, dentry);
+ if (d_really_is_positive(dentry)) {
+ err = xattr_unlink(d_inode(xadir), dentry);
update_ctime(inode);
}
dput(dentry);
out_dput:
- mutex_unlock(&xadir->d_inode->i_mutex);
+ mutex_unlock(&d_inode(xadir)->i_mutex);
dput(xadir);
return err;
}
@@ -533,7 +533,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
else
chunk = buffer_size - buffer_pos;
- page = reiserfs_get_page(dentry->d_inode, file_pos);
+ page = reiserfs_get_page(d_inode(dentry), file_pos);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto out_unlock;
@@ -573,18 +573,18 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
}
new_size = buffer_size + sizeof(struct reiserfs_xattr_header);
- if (!err && new_size < i_size_read(dentry->d_inode)) {
+ if (!err && new_size < i_size_read(d_inode(dentry))) {
struct iattr newattrs = {
.ia_ctime = current_fs_time(inode->i_sb),
.ia_size = new_size,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
- mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
- inode_dio_wait(dentry->d_inode);
+ mutex_lock_nested(&d_inode(dentry)->i_mutex, I_MUTEX_XATTR);
+ inode_dio_wait(d_inode(dentry));
err = reiserfs_setattr(dentry, &newattrs);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
} else
update_ctime(inode);
out_unlock:
@@ -657,7 +657,7 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
down_read(&REISERFS_I(inode)->i_xattr_sem);
- isize = i_size_read(dentry->d_inode);
+ isize = i_size_read(d_inode(dentry));
/* Just return the size needed */
if (buffer == NULL) {
@@ -680,7 +680,7 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
else
chunk = isize - file_pos;
- page = reiserfs_get_page(dentry->d_inode, file_pos);
+ page = reiserfs_get_page(d_inode(dentry), file_pos);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto out_unlock;
@@ -775,7 +775,7 @@ reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
- if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
+ if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
return -EOPNOTSUPP;
return handler->get(dentry, name, buffer, size, handler->flags);
@@ -784,7 +784,7 @@ reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
/*
* Inode operation setxattr()
*
- * dentry->d_inode->i_mutex down
+ * d_inode(dentry)->i_mutex down
*/
int
reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
@@ -794,7 +794,7 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
- if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
+ if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
return -EOPNOTSUPP;
return handler->set(dentry, name, value, size, flags, handler->flags);
@@ -803,7 +803,7 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
/*
* Inode operation removexattr()
*
- * dentry->d_inode->i_mutex down
+ * d_inode(dentry)->i_mutex down
*/
int reiserfs_removexattr(struct dentry *dentry, const char *name)
{
@@ -811,7 +811,7 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name)
handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
- if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
+ if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
return -EOPNOTSUPP;
return handler->set(dentry, name, NULL, 0, XATTR_REPLACE, handler->flags);
@@ -875,14 +875,14 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
.size = buffer ? size : 0,
};
- if (!dentry->d_inode)
+ if (d_really_is_negative(dentry))
return -EINVAL;
if (!dentry->d_sb->s_xattr ||
- get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
+ get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
return -EOPNOTSUPP;
- dir = open_xa_dir(dentry->d_inode, XATTR_REPLACE);
+ dir = open_xa_dir(d_inode(dentry), XATTR_REPLACE);
if (IS_ERR(dir)) {
err = PTR_ERR(dir);
if (err == -ENODATA)
@@ -890,9 +890,9 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
goto out;
}
- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
- err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
- mutex_unlock(&dir->d_inode->i_mutex);
+ mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_XATTR);
+ err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx);
+ mutex_unlock(&d_inode(dir)->i_mutex);
if (!err)
err = buf.pos;
@@ -905,12 +905,12 @@ out:
static int create_privroot(struct dentry *dentry)
{
int err;
- struct inode *inode = dentry->d_parent->d_inode;
+ struct inode *inode = d_inode(dentry->d_parent);
WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
err = xattr_mkdir(inode, dentry, 0700);
- if (err || !dentry->d_inode) {
+ if (err || d_really_is_negative(dentry)) {
reiserfs_warning(dentry->d_sb, "jdm-20006",
"xattrs/ACLs enabled and couldn't "
"find/create .reiserfs_priv. "
@@ -918,7 +918,7 @@ static int create_privroot(struct dentry *dentry)
return -EOPNOTSUPP;
}
- dentry->d_inode->i_flags |= S_PRIVATE;
+ d_inode(dentry)->i_flags |= S_PRIVATE;
reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr "
"storage.\n", PRIVROOT_NAME);
@@ -997,17 +997,17 @@ int reiserfs_lookup_privroot(struct super_block *s)
int err = 0;
/* If we don't have the privroot located yet - go find it */
- mutex_lock(&s->s_root->d_inode->i_mutex);
+ mutex_lock(&d_inode(s->s_root)->i_mutex);
dentry = lookup_one_len(PRIVROOT_NAME, s->s_root,
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
REISERFS_SB(s)->priv_root = dentry;
d_set_d_op(dentry, &xattr_lookup_poison_ops);
- if (dentry->d_inode)
- dentry->d_inode->i_flags |= S_PRIVATE;
+ if (d_really_is_positive(dentry))
+ d_inode(dentry)->i_flags |= S_PRIVATE;
} else
err = PTR_ERR(dentry);
- mutex_unlock(&s->s_root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(s->s_root)->i_mutex);
return err;
}
@@ -1026,15 +1026,15 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
if (err)
goto error;
- if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) {
- mutex_lock(&s->s_root->d_inode->i_mutex);
+ if (d_really_is_negative(privroot) && !(mount_flags & MS_RDONLY)) {
+ mutex_lock(&d_inode(s->s_root)->i_mutex);
err = create_privroot(REISERFS_SB(s)->priv_root);
- mutex_unlock(&s->s_root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(s->s_root)->i_mutex);
}
- if (privroot->d_inode) {
+ if (d_really_is_positive(privroot)) {
s->s_xattr = reiserfs_xattr_handlers;
- mutex_lock(&privroot->d_inode->i_mutex);
+ mutex_lock(&d_inode(privroot)->i_mutex);
if (!REISERFS_SB(s)->xattr_root) {
struct dentry *dentry;
@@ -1045,7 +1045,7 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
else
err = PTR_ERR(dentry);
}
- mutex_unlock(&privroot->d_inode->i_mutex);
+ mutex_unlock(&d_inode(privroot)->i_mutex);
}
error:
diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
index f620e9678dd5..15dde6262c00 100644
--- a/fs/reiserfs/xattr.h
+++ b/fs/reiserfs/xattr.h
@@ -78,7 +78,7 @@ static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode)
if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) {
nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
- if (!REISERFS_SB(inode->i_sb)->xattr_root->d_inode)
+ if (d_really_is_negative(REISERFS_SB(inode->i_sb)->xattr_root))
nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
}
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index e7f8939a4cb5..9a3b0616f283 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -15,10 +15,10 @@ security_get(struct dentry *dentry, const char *name, void *buffer, size_t size,
if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
return -EINVAL;
- if (IS_PRIVATE(dentry->d_inode))
+ if (IS_PRIVATE(d_inode(dentry)))
return -EPERM;
- return reiserfs_xattr_get(dentry->d_inode, name, buffer, size);
+ return reiserfs_xattr_get(d_inode(dentry), name, buffer, size);
}
static int
@@ -28,10 +28,10 @@ security_set(struct dentry *dentry, const char *name, const void *buffer,
if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
return -EINVAL;
- if (IS_PRIVATE(dentry->d_inode))
+ if (IS_PRIVATE(d_inode(dentry)))
return -EPERM;
- return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags);
+ return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags);
}
static size_t security_list(struct dentry *dentry, char *list, size_t list_len,
@@ -39,7 +39,7 @@ static size_t security_list(struct dentry *dentry, char *list, size_t list_len,
{
const size_t len = namelen + 1;
- if (IS_PRIVATE(dentry->d_inode))
+ if (IS_PRIVATE(d_inode(dentry)))
return 0;
if (list && len <= list_len) {
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index 5eeb0c48ba46..e4f1343714e0 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -14,10 +14,10 @@ trusted_get(struct dentry *dentry, const char *name, void *buffer, size_t size,
if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(dentry->d_inode))
+ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
return -EPERM;
- return reiserfs_xattr_get(dentry->d_inode, name, buffer, size);
+ return reiserfs_xattr_get(d_inode(dentry), name, buffer, size);
}
static int
@@ -27,10 +27,10 @@ trusted_set(struct dentry *dentry, const char *name, const void *buffer,
if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(dentry->d_inode))
+ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
return -EPERM;
- return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags);
+ return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags);
}
static size_t trusted_list(struct dentry *dentry, char *list, size_t list_size,
@@ -38,7 +38,7 @@ static size_t trusted_list(struct dentry *dentry, char *list, size_t list_size,
{
const size_t len = name_len + 1;
- if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(dentry->d_inode))
+ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
return 0;
if (list && len <= list_size) {
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index e50eab046471..d0b08d3e5689 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -15,7 +15,7 @@ user_get(struct dentry *dentry, const char *name, void *buffer, size_t size,
return -EINVAL;
if (!reiserfs_xattrs_user(dentry->d_sb))
return -EOPNOTSUPP;
- return reiserfs_xattr_get(dentry->d_inode, name, buffer, size);
+ return reiserfs_xattr_get(d_inode(dentry), name, buffer, size);
}
static int
@@ -27,7 +27,7 @@ user_set(struct dentry *dentry, const char *name, const void *buffer,
if (!reiserfs_xattrs_user(dentry->d_sb))
return -EOPNOTSUPP;
- return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags);
+ return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags);
}
static size_t user_list(struct dentry *dentry, char *list, size_t list_size,
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index 7da9e2153953..1118a0dc6b45 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -81,7 +81,6 @@ static unsigned romfs_mmap_capabilities(struct file *file)
const struct file_operations romfs_ro_fops = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
.splice_read = generic_file_splice_read,
.mmap = romfs_mmap,
diff --git a/fs/splice.c b/fs/splice.c
index 7968da96bebb..bfe62ae40f40 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -32,7 +32,6 @@
#include <linux/gfp.h>
#include <linux/socket.h>
#include <linux/compat.h>
-#include <linux/aio.h>
#include "internal.h"
/*
@@ -524,6 +523,9 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
loff_t isize, left;
int ret;
+ if (IS_DAX(in->f_mapping->host))
+ return default_file_splice_read(in, ppos, pipe, len, flags);
+
isize = i_size_read(in->f_mapping->host);
if (unlikely(*ppos >= isize))
return 0;
@@ -1159,7 +1161,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
long ret, bytes;
umode_t i_mode;
size_t len;
- int i, flags;
+ int i, flags, more;
/*
* We require the input being a regular file, as we don't want to
@@ -1202,6 +1204,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
* Don't block on output, we have to drain the direct pipe.
*/
sd->flags &= ~SPLICE_F_NONBLOCK;
+ more = sd->flags & SPLICE_F_MORE;
while (len) {
size_t read_len;
@@ -1215,6 +1218,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
sd->total_len = read_len;
/*
+ * If more data is pending, set SPLICE_F_MORE
+ * If this is the last data and SPLICE_F_MORE was not set
+ * initially, clears it.
+ */
+ if (read_len < len)
+ sd->flags |= SPLICE_F_MORE;
+ else if (!more)
+ sd->flags &= ~SPLICE_F_MORE;
+ /*
* NOTE: nonblocking mode only applies to the input. We
* must not do the output in nonblocking mode as then we
* could get stuck data in the internal pipe:
@@ -1534,34 +1546,29 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
struct iov_iter iter;
- ssize_t count;
pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
- ret = rw_copy_check_uvector(READ, uiov, nr_segs,
- ARRAY_SIZE(iovstack), iovstack, &iov);
- if (ret <= 0)
- goto out;
-
- count = ret;
- iov_iter_init(&iter, READ, iov, nr_segs, count);
+ ret = import_iovec(READ, uiov, nr_segs,
+ ARRAY_SIZE(iovstack), &iov, &iter);
+ if (ret < 0)
+ return ret;
+ sd.total_len = iov_iter_count(&iter);
sd.len = 0;
- sd.total_len = count;
sd.flags = flags;
sd.u.data = &iter;
sd.pos = 0;
- pipe_lock(pipe);
- ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
- pipe_unlock(pipe);
-
-out:
- if (iov != iovstack)
- kfree(iov);
+ if (sd.total_len) {
+ pipe_lock(pipe);
+ ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
+ pipe_unlock(pipe);
+ }
+ kfree(iov);
return ret;
}
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 5e1101ff276f..8073b6532cf0 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -110,7 +110,7 @@ static struct dentry *squashfs_fh_to_parent(struct super_block *sb,
static struct dentry *squashfs_get_parent(struct dentry *child)
{
- struct inode *inode = child->d_inode;
+ struct inode *inode = d_inode(child);
unsigned int parent_ino = squashfs_i(inode)->parent;
return squashfs_export_iget(inode->i_sb, parent_ino);
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
index 92fcde7b4d61..e5e0ddf5b143 100644
--- a/fs/squashfs/xattr.c
+++ b/fs/squashfs/xattr.c
@@ -39,7 +39,7 @@ static const struct xattr_handler *squashfs_xattr_handler(int);
ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
size_t buffer_size)
{
- struct inode *inode = d->d_inode;
+ struct inode *inode = d_inode(d);
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr)
@@ -229,7 +229,7 @@ static int squashfs_user_get(struct dentry *d, const char *name, void *buffer,
if (name[0] == '\0')
return -EINVAL;
- return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_USER, name,
+ return squashfs_xattr_get(d_inode(d), SQUASHFS_XATTR_USER, name,
buffer, size);
}
@@ -259,7 +259,7 @@ static int squashfs_trusted_get(struct dentry *d, const char *name,
if (name[0] == '\0')
return -EINVAL;
- return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_TRUSTED, name,
+ return squashfs_xattr_get(d_inode(d), SQUASHFS_XATTR_TRUSTED, name,
buffer, size);
}
@@ -286,7 +286,7 @@ static int squashfs_security_get(struct dentry *d, const char *name,
if (name[0] == '\0')
return -EINVAL;
- return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_SECURITY, name,
+ return squashfs_xattr_get(d_inode(d), SQUASHFS_XATTR_SECURITY, name,
buffer, size);
}
diff --git a/fs/stat.c b/fs/stat.c
index ae0c3cef9927..cccc1aab9a8b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(generic_fillattr);
*/
int vfs_getattr_nosec(struct path *path, struct kstat *stat)
{
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = d_backing_inode(path->dentry);
if (inode->i_op->getattr)
return inode->i_op->getattr(path->mnt, path->dentry, stat);
@@ -66,7 +66,7 @@ int vfs_getattr(struct path *path, struct kstat *stat)
{
int retval;
- retval = security_inode_getattr(path->mnt, path->dentry);
+ retval = security_inode_getattr(path);
if (retval)
return retval;
return vfs_getattr_nosec(path, stat);
@@ -326,7 +326,7 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
retry:
error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
if (!error) {
- struct inode *inode = path.dentry->d_inode;
+ struct inode *inode = d_backing_inode(path.dentry);
error = empty ? -ENOENT : -EINVAL;
if (inode->i_op->readlink) {
diff --git a/fs/super.c b/fs/super.c
index 2b7dc90ccdbb..928c20f47af9 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -224,7 +224,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
s->s_maxbytes = MAX_NON_LFS;
s->s_op = &default_op;
s->s_time_gran = 1000000000;
- s->cleancache_poolid = -1;
+ s->cleancache_poolid = CLEANCACHE_NO_POOL;
s->s_shrink.seeks = DEFAULT_SEEKS;
s->s_shrink.scan_objects = super_cache_scan;
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index d42291d08215..8f3555f00c54 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -132,7 +132,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
{
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
- struct inode * dir = dentry->d_parent->d_inode;
+ struct inode * dir = d_inode(dentry->d_parent);
unsigned long start, n;
unsigned long npages = dir_pages(dir);
struct page *page = NULL;
@@ -176,7 +176,7 @@ found:
int sysv_add_link(struct dentry *dentry, struct inode *inode)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct page *page = NULL;
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
index b00811c75b24..82ddc09061e2 100644
--- a/fs/sysv/file.c
+++ b/fs/sysv/file.c
@@ -21,9 +21,7 @@
*/
const struct file_operations sysv_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = generic_file_fsync,
@@ -32,7 +30,7 @@ const struct file_operations sysv_file_operations = {
static int sysv_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
error = inode_change_ok(inode, attr);
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 66bc316927e8..2fde40acf024 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -443,7 +443,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size)
int sysv_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
struct super_block *s = dentry->d_sb;
- generic_fillattr(dentry->d_inode, stat);
+ generic_fillattr(d_inode(dentry), stat);
stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
stat->blksize = s->s_blocksize;
return 0;
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 731b2bbcaab3..11e83ed0b4bf 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -118,7 +118,7 @@ out_fail:
static int sysv_link(struct dentry * old_dentry, struct inode * dir,
struct dentry * dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
@@ -166,7 +166,7 @@ out_dir:
static int sysv_unlink(struct inode * dir, struct dentry * dentry)
{
- struct inode * inode = dentry->d_inode;
+ struct inode * inode = d_inode(dentry);
struct page * page;
struct sysv_dir_entry * de;
int err = -ENOENT;
@@ -187,7 +187,7 @@ out:
static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int err = -ENOTEMPTY;
if (sysv_empty_dir(inode)) {
@@ -208,8 +208,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
struct inode * new_dir, struct dentry * new_dentry)
{
- struct inode * old_inode = old_dentry->d_inode;
- struct inode * new_inode = new_dentry->d_inode;
+ struct inode * old_inode = d_inode(old_dentry);
+ struct inode * new_inode = d_inode(new_dentry);
struct page * dir_page = NULL;
struct sysv_dir_entry * dir_de = NULL;
struct page * old_page;
diff --git a/fs/sysv/symlink.c b/fs/sysv/symlink.c
index 00d2f8a43e4e..d3fa0d703314 100644
--- a/fs/sysv/symlink.c
+++ b/fs/sysv/symlink.c
@@ -10,7 +10,7 @@
static void *sysv_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- nd_set_link(nd, (char *)SYSV_I(dentry->d_inode)->i_data);
+ nd_set_link(nd, (char *)SYSV_I(d_inode(dentry))->i_data);
return NULL;
}
diff --git a/fs/tracefs/Makefile b/fs/tracefs/Makefile
new file mode 100644
index 000000000000..82fa35b656c4
--- /dev/null
+++ b/fs/tracefs/Makefile
@@ -0,0 +1,4 @@
+tracefs-objs := inode.o
+
+obj-$(CONFIG_TRACING) += tracefs.o
+
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
new file mode 100644
index 000000000000..d92bdf3b079a
--- /dev/null
+++ b/fs/tracefs/inode.c
@@ -0,0 +1,650 @@
+/*
+ * inode.c - part of tracefs, a pseudo file system for activating tracing
+ *
+ * Based on debugfs by: Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * Copyright (C) 2014 Red Hat Inc, author: Steven Rostedt <srostedt@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * tracefs is the file system that is used by the tracing infrastructure.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/kobject.h>
+#include <linux/namei.h>
+#include <linux/tracefs.h>
+#include <linux/fsnotify.h>
+#include <linux/seq_file.h>
+#include <linux/parser.h>
+#include <linux/magic.h>
+#include <linux/slab.h>
+
+#define TRACEFS_DEFAULT_MODE 0700
+
+static struct vfsmount *tracefs_mount;
+static int tracefs_mount_count;
+static bool tracefs_registered;
+
+static ssize_t default_read_file(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t default_write_file(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
+static const struct file_operations tracefs_file_operations = {
+ .read = default_read_file,
+ .write = default_write_file,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static struct tracefs_dir_ops {
+ int (*mkdir)(const char *name);
+ int (*rmdir)(const char *name);
+} tracefs_ops;
+
+static char *get_dname(struct dentry *dentry)
+{
+ const char *dname;
+ char *name;
+ int len = dentry->d_name.len;
+
+ dname = dentry->d_name.name;
+ name = kmalloc(len + 1, GFP_KERNEL);
+ if (!name)
+ return NULL;
+ memcpy(name, dname, len);
+ name[len] = 0;
+ return name;
+}
+
+static int tracefs_syscall_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode)
+{
+ char *name;
+ int ret;
+
+ name = get_dname(dentry);
+ if (!name)
+ return -ENOMEM;
+
+ /*
+ * The mkdir call can call the generic functions that create
+ * the files within the tracefs system. It is up to the individual
+ * mkdir routine to handle races.
+ */
+ mutex_unlock(&inode->i_mutex);
+ ret = tracefs_ops.mkdir(name);
+ mutex_lock(&inode->i_mutex);
+
+ kfree(name);
+
+ return ret;
+}
+
+static int tracefs_syscall_rmdir(struct inode *inode, struct dentry *dentry)
+{
+ char *name;
+ int ret;
+
+ name = get_dname(dentry);
+ if (!name)
+ return -ENOMEM;
+
+ /*
+ * The rmdir call can call the generic functions that create
+ * the files within the tracefs system. It is up to the individual
+ * rmdir routine to handle races.
+ * This time we need to unlock not only the parent (inode) but
+ * also the directory that is being deleted.
+ */
+ mutex_unlock(&inode->i_mutex);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+
+ ret = tracefs_ops.rmdir(name);
+
+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock(&dentry->d_inode->i_mutex);
+
+ kfree(name);
+
+ return ret;
+}
+
+static const struct inode_operations tracefs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .mkdir = tracefs_syscall_mkdir,
+ .rmdir = tracefs_syscall_rmdir,
+};
+
+static struct inode *tracefs_get_inode(struct super_block *sb)
+{
+ struct inode *inode = new_inode(sb);
+ if (inode) {
+ inode->i_ino = get_next_ino();
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ }
+ return inode;
+}
+
+struct tracefs_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+};
+
+enum {
+ Opt_uid,
+ Opt_gid,
+ Opt_mode,
+ Opt_err
+};
+
+static const match_table_t tokens = {
+ {Opt_uid, "uid=%u"},
+ {Opt_gid, "gid=%u"},
+ {Opt_mode, "mode=%o"},
+ {Opt_err, NULL}
+};
+
+struct tracefs_fs_info {
+ struct tracefs_mount_opts mount_opts;
+};
+
+static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
+{
+ substring_t args[MAX_OPT_ARGS];
+ int option;
+ int token;
+ kuid_t uid;
+ kgid_t gid;
+ char *p;
+
+ opts->mode = TRACEFS_DEFAULT_MODE;
+
+ while ((p = strsep(&data, ",")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_uid:
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ uid = make_kuid(current_user_ns(), option);
+ if (!uid_valid(uid))
+ return -EINVAL;
+ opts->uid = uid;
+ break;
+ case Opt_gid:
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ gid = make_kgid(current_user_ns(), option);
+ if (!gid_valid(gid))
+ return -EINVAL;
+ opts->gid = gid;
+ break;
+ case Opt_mode:
+ if (match_octal(&args[0], &option))
+ return -EINVAL;
+ opts->mode = option & S_IALLUGO;
+ break;
+ /*
+ * We might like to report bad mount options here;
+ * but traditionally tracefs has ignored all mount options
+ */
+ }
+ }
+
+ return 0;
+}
+
+static int tracefs_apply_options(struct super_block *sb)
+{
+ struct tracefs_fs_info *fsi = sb->s_fs_info;
+ struct inode *inode = sb->s_root->d_inode;
+ struct tracefs_mount_opts *opts = &fsi->mount_opts;
+
+ inode->i_mode &= ~S_IALLUGO;
+ inode->i_mode |= opts->mode;
+
+ inode->i_uid = opts->uid;
+ inode->i_gid = opts->gid;
+
+ return 0;
+}
+
+static int tracefs_remount(struct super_block *sb, int *flags, char *data)
+{
+ int err;
+ struct tracefs_fs_info *fsi = sb->s_fs_info;
+
+ sync_filesystem(sb);
+ err = tracefs_parse_options(data, &fsi->mount_opts);
+ if (err)
+ goto fail;
+
+ tracefs_apply_options(sb);
+
+fail:
+ return err;
+}
+
+static int tracefs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct tracefs_fs_info *fsi = root->d_sb->s_fs_info;
+ struct tracefs_mount_opts *opts = &fsi->mount_opts;
+
+ if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, opts->uid));
+ if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, opts->gid));
+ if (opts->mode != TRACEFS_DEFAULT_MODE)
+ seq_printf(m, ",mode=%o", opts->mode);
+
+ return 0;
+}
+
+static const struct super_operations tracefs_super_operations = {
+ .statfs = simple_statfs,
+ .remount_fs = tracefs_remount,
+ .show_options = tracefs_show_options,
+};
+
+static int trace_fill_super(struct super_block *sb, void *data, int silent)
+{
+ static struct tree_descr trace_files[] = {{""}};
+ struct tracefs_fs_info *fsi;
+ int err;
+
+ save_mount_options(sb, data);
+
+ fsi = kzalloc(sizeof(struct tracefs_fs_info), GFP_KERNEL);
+ sb->s_fs_info = fsi;
+ if (!fsi) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ err = tracefs_parse_options(data, &fsi->mount_opts);
+ if (err)
+ goto fail;
+
+ err = simple_fill_super(sb, TRACEFS_MAGIC, trace_files);
+ if (err)
+ goto fail;
+
+ sb->s_op = &tracefs_super_operations;
+
+ tracefs_apply_options(sb);
+
+ return 0;
+
+fail:
+ kfree(fsi);
+ sb->s_fs_info = NULL;
+ return err;
+}
+
+static struct dentry *trace_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data)
+{
+ return mount_single(fs_type, flags, data, trace_fill_super);
+}
+
+static struct file_system_type trace_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "tracefs",
+ .mount = trace_mount,
+ .kill_sb = kill_litter_super,
+};
+MODULE_ALIAS_FS("tracefs");
+
+static struct dentry *start_creating(const char *name, struct dentry *parent)
+{
+ struct dentry *dentry;
+ int error;
+
+ pr_debug("tracefs: creating file '%s'\n",name);
+
+ error = simple_pin_fs(&trace_fs_type, &tracefs_mount,
+ &tracefs_mount_count);
+ if (error)
+ return ERR_PTR(error);
+
+ /* If the parent is not specified, we create it in the root.
+ * We need the root dentry to do this, which is in the super
+ * block. A pointer to that is in the struct vfsmount that we
+ * have around.
+ */
+ if (!parent)
+ parent = tracefs_mount->mnt_root;
+
+ mutex_lock(&parent->d_inode->i_mutex);
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (!IS_ERR(dentry) && dentry->d_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-EEXIST);
+ }
+ if (IS_ERR(dentry))
+ mutex_unlock(&parent->d_inode->i_mutex);
+ return dentry;
+}
+
+static struct dentry *failed_creating(struct dentry *dentry)
+{
+ mutex_unlock(&dentry->d_parent->d_inode->i_mutex);
+ dput(dentry);
+ simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+ return NULL;
+}
+
+static struct dentry *end_creating(struct dentry *dentry)
+{
+ mutex_unlock(&dentry->d_parent->d_inode->i_mutex);
+ return dentry;
+}
+
+/**
+ * tracefs_create_file - create a file in the tracefs filesystem
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is NULL, then the
+ * file will be created in the root of the tracefs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ * on. The inode.i_private pointer will point to this value on
+ * the open() call.
+ * @fops: a pointer to a struct file_operations that should be used for
+ * this file.
+ *
+ * This is the basic "create a file" function for tracefs. It allows for a
+ * wide range of flexibility in creating a file, or a directory (if you want
+ * to create a directory, the tracefs_create_dir() function is
+ * recommended to be used instead.)
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the tracefs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.) If an error occurs, %NULL will be returned.
+ *
+ * If tracefs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ */
+struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+
+ if (!(mode & S_IFMT))
+ mode |= S_IFREG;
+ BUG_ON(!S_ISREG(mode));
+ dentry = start_creating(name, parent);
+
+ if (IS_ERR(dentry))
+ return NULL;
+
+ inode = tracefs_get_inode(dentry->d_sb);
+ if (unlikely(!inode))
+ return failed_creating(dentry);
+
+ inode->i_mode = mode;
+ inode->i_fop = fops ? fops : &tracefs_file_operations;
+ inode->i_private = data;
+ d_instantiate(dentry, inode);
+ fsnotify_create(dentry->d_parent->d_inode, dentry);
+ return end_creating(dentry);
+}
+
+static struct dentry *__create_dir(const char *name, struct dentry *parent,
+ const struct inode_operations *ops)
+{
+ struct dentry *dentry = start_creating(name, parent);
+ struct inode *inode;
+
+ if (IS_ERR(dentry))
+ return NULL;
+
+ inode = tracefs_get_inode(dentry->d_sb);
+ if (unlikely(!inode))
+ return failed_creating(dentry);
+
+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+ inode->i_op = ops;
+ inode->i_fop = &simple_dir_operations;
+
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ d_instantiate(dentry, inode);
+ inc_nlink(dentry->d_parent->d_inode);
+ fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
+ return end_creating(dentry);
+}
+
+/**
+ * tracefs_create_dir - create a directory in the tracefs filesystem
+ * @name: a pointer to a string containing the name of the directory to
+ * create.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is NULL, then the
+ * directory will be created in the root of the tracefs filesystem.
+ *
+ * This function creates a directory in tracefs with the given name.
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the tracefs_remove() function when the file is
+ * to be removed. If an error occurs, %NULL will be returned.
+ *
+ * If tracing is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ */
+struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
+{
+ return __create_dir(name, parent, &simple_dir_inode_operations);
+}
+
+/**
+ * tracefs_create_instance_dir - create the tracing instances directory
+ * @name: The name of the instances directory to create
+ * @parent: The parent directory that the instances directory will exist
+ * @mkdir: The function to call when a mkdir is performed.
+ * @rmdir: The function to call when a rmdir is performed.
+ *
+ * Only one instances directory is allowed.
+ *
+ * The instances directory is special as it allows for mkdir and rmdir to
+ * to be done by userspace. When a mkdir or rmdir is performed, the inode
+ * locks are released and the methhods passed in (@mkdir and @rmdir) are
+ * called without locks and with the name of the directory being created
+ * within the instances directory.
+ *
+ * Returns the dentry of the instances directory.
+ */
+struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent,
+ int (*mkdir)(const char *name),
+ int (*rmdir)(const char *name))
+{
+ struct dentry *dentry;
+
+ /* Only allow one instance of the instances directory. */
+ if (WARN_ON(tracefs_ops.mkdir || tracefs_ops.rmdir))
+ return NULL;
+
+ dentry = __create_dir(name, parent, &tracefs_dir_inode_operations);
+ if (!dentry)
+ return NULL;
+
+ tracefs_ops.mkdir = mkdir;
+ tracefs_ops.rmdir = rmdir;
+
+ return dentry;
+}
+
+static inline int tracefs_positive(struct dentry *dentry)
+{
+ return dentry->d_inode && !d_unhashed(dentry);
+}
+
+static int __tracefs_remove(struct dentry *dentry, struct dentry *parent)
+{
+ int ret = 0;
+
+ if (tracefs_positive(dentry)) {
+ if (dentry->d_inode) {
+ dget(dentry);
+ switch (dentry->d_inode->i_mode & S_IFMT) {
+ case S_IFDIR:
+ ret = simple_rmdir(parent->d_inode, dentry);
+ break;
+ default:
+ simple_unlink(parent->d_inode, dentry);
+ break;
+ }
+ if (!ret)
+ d_delete(dentry);
+ dput(dentry);
+ }
+ }
+ return ret;
+}
+
+/**
+ * tracefs_remove - removes a file or directory from the tracefs filesystem
+ * @dentry: a pointer to a the dentry of the file or directory to be
+ * removed.
+ *
+ * This function removes a file or directory in tracefs that was previously
+ * created with a call to another tracefs function (like
+ * tracefs_create_file() or variants thereof.)
+ */
+void tracefs_remove(struct dentry *dentry)
+{
+ struct dentry *parent;
+ int ret;
+
+ if (IS_ERR_OR_NULL(dentry))
+ return;
+
+ parent = dentry->d_parent;
+ if (!parent || !parent->d_inode)
+ return;
+
+ mutex_lock(&parent->d_inode->i_mutex);
+ ret = __tracefs_remove(dentry, parent);
+ mutex_unlock(&parent->d_inode->i_mutex);
+ if (!ret)
+ simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+}
+
+/**
+ * tracefs_remove_recursive - recursively removes a directory
+ * @dentry: a pointer to a the dentry of the directory to be removed.
+ *
+ * This function recursively removes a directory tree in tracefs that
+ * was previously created with a call to another tracefs function
+ * (like tracefs_create_file() or variants thereof.)
+ */
+void tracefs_remove_recursive(struct dentry *dentry)
+{
+ struct dentry *child, *parent;
+
+ if (IS_ERR_OR_NULL(dentry))
+ return;
+
+ parent = dentry->d_parent;
+ if (!parent || !parent->d_inode)
+ return;
+
+ parent = dentry;
+ down:
+ mutex_lock(&parent->d_inode->i_mutex);
+ loop:
+ /*
+ * The parent->d_subdirs is protected by the d_lock. Outside that
+ * lock, the child can be unlinked and set to be freed which can
+ * use the d_u.d_child as the rcu head and corrupt this list.
+ */
+ spin_lock(&parent->d_lock);
+ list_for_each_entry(child, &parent->d_subdirs, d_child) {
+ if (!tracefs_positive(child))
+ continue;
+
+ /* perhaps simple_empty(child) makes more sense */
+ if (!list_empty(&child->d_subdirs)) {
+ spin_unlock(&parent->d_lock);
+ mutex_unlock(&parent->d_inode->i_mutex);
+ parent = child;
+ goto down;
+ }
+
+ spin_unlock(&parent->d_lock);
+
+ if (!__tracefs_remove(child, parent))
+ simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+
+ /*
+ * The parent->d_lock protects agaist child from unlinking
+ * from d_subdirs. When releasing the parent->d_lock we can
+ * no longer trust that the next pointer is valid.
+ * Restart the loop. We'll skip this one with the
+ * tracefs_positive() check.
+ */
+ goto loop;
+ }
+ spin_unlock(&parent->d_lock);
+
+ mutex_unlock(&parent->d_inode->i_mutex);
+ child = parent;
+ parent = parent->d_parent;
+ mutex_lock(&parent->d_inode->i_mutex);
+
+ if (child != dentry)
+ /* go up */
+ goto loop;
+
+ if (!__tracefs_remove(child, parent))
+ simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+ mutex_unlock(&parent->d_inode->i_mutex);
+}
+
+/**
+ * tracefs_initialized - Tells whether tracefs has been registered
+ */
+bool tracefs_initialized(void)
+{
+ return tracefs_registered;
+}
+
+static struct kobject *trace_kobj;
+
+static int __init tracefs_init(void)
+{
+ int retval;
+
+ trace_kobj = kobject_create_and_add("tracing", kernel_kobj);
+ if (!trace_kobj)
+ return -EINVAL;
+
+ retval = register_filesystem(&trace_fs_type);
+ if (!retval)
+ tracefs_registered = true;
+
+ return retval;
+}
+core_initcall(tracefs_init);
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index eb997e9c4ab0..11a11b32a2a9 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -509,7 +509,7 @@ again:
c->bi.nospace_rp = 1;
smp_wmb();
} else
- ubifs_err("cannot budget space, error %d", err);
+ ubifs_err(c, "cannot budget space, error %d", err);
return err;
}
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index 26b69b2d4a45..63f56619991d 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -225,7 +225,7 @@ out_cancel:
out_up:
up_write(&c->commit_sem);
out:
- ubifs_err("commit failed, error %d", err);
+ ubifs_err(c, "commit failed, error %d", err);
spin_lock(&c->cs_lock);
c->cmt_state = COMMIT_BROKEN;
wake_up(&c->cmt_wq);
@@ -289,7 +289,7 @@ int ubifs_bg_thread(void *info)
int err;
struct ubifs_info *c = info;
- ubifs_msg("background thread \"%s\" started, PID %d",
+ ubifs_msg(c, "background thread \"%s\" started, PID %d",
c->bgt_name, current->pid);
set_freezable();
@@ -324,7 +324,7 @@ int ubifs_bg_thread(void *info)
cond_resched();
}
- ubifs_msg("background thread \"%s\" stops", c->bgt_name);
+ ubifs_msg(c, "background thread \"%s\" stops", c->bgt_name);
return 0;
}
@@ -712,13 +712,13 @@ out:
return 0;
out_dump:
- ubifs_err("dumping index node (iip=%d)", i->iip);
+ ubifs_err(c, "dumping index node (iip=%d)", i->iip);
ubifs_dump_node(c, idx);
list_del(&i->list);
kfree(i);
if (!list_empty(&list)) {
i = list_entry(list.prev, struct idx_node, list);
- ubifs_err("dumping parent index node");
+ ubifs_err(c, "dumping parent index node");
ubifs_dump_node(c, &i->idx);
}
out_free:
@@ -727,7 +727,7 @@ out_free:
list_del(&i->list);
kfree(i);
}
- ubifs_err("failed, error %d", err);
+ ubifs_err(c, "failed, error %d", err);
if (err > 0)
err = -EINVAL;
return err;
diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c
index 2bfa0953335d..565cb56d7225 100644
--- a/fs/ubifs/compress.c
+++ b/fs/ubifs/compress.c
@@ -92,8 +92,8 @@ struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
* Note, if the input buffer was not compressed, it is copied to the output
* buffer and %UBIFS_COMPR_NONE is returned in @compr_type.
*/
-void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len,
- int *compr_type)
+void ubifs_compress(const struct ubifs_info *c, const void *in_buf,
+ int in_len, void *out_buf, int *out_len, int *compr_type)
{
int err;
struct ubifs_compressor *compr = ubifs_compressors[*compr_type];
@@ -112,9 +112,9 @@ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len,
if (compr->comp_mutex)
mutex_unlock(compr->comp_mutex);
if (unlikely(err)) {
- ubifs_warn("cannot compress %d bytes, compressor %s, error %d, leave data uncompressed",
+ ubifs_warn(c, "cannot compress %d bytes, compressor %s, error %d, leave data uncompressed",
in_len, compr->name, err);
- goto no_compr;
+ goto no_compr;
}
/*
@@ -144,21 +144,21 @@ no_compr:
* The length of the uncompressed data is returned in @out_len. This functions
* returns %0 on success or a negative error code on failure.
*/
-int ubifs_decompress(const void *in_buf, int in_len, void *out_buf,
- int *out_len, int compr_type)
+int ubifs_decompress(const struct ubifs_info *c, const void *in_buf,
+ int in_len, void *out_buf, int *out_len, int compr_type)
{
int err;
struct ubifs_compressor *compr;
if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) {
- ubifs_err("invalid compression type %d", compr_type);
+ ubifs_err(c, "invalid compression type %d", compr_type);
return -EINVAL;
}
compr = ubifs_compressors[compr_type];
if (unlikely(!compr->capi_name)) {
- ubifs_err("%s compression is not compiled in", compr->name);
+ ubifs_err(c, "%s compression is not compiled in", compr->name);
return -EINVAL;
}
@@ -175,7 +175,7 @@ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf,
if (compr->decomp_mutex)
mutex_unlock(compr->decomp_mutex);
if (err)
- ubifs_err("cannot decompress %d bytes, compressor %s, error %d",
+ ubifs_err(c, "cannot decompress %d bytes, compressor %s, error %d",
in_len, compr->name, err);
return err;
@@ -193,8 +193,8 @@ static int __init compr_init(struct ubifs_compressor *compr)
if (compr->capi_name) {
compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0);
if (IS_ERR(compr->cc)) {
- ubifs_err("cannot initialize compressor %s, error %ld",
- compr->name, PTR_ERR(compr->cc));
+ pr_err("UBIFS error (pid %d): cannot initialize compressor %s, error %ld",
+ current->pid, compr->name, PTR_ERR(compr->cc));
return PTR_ERR(compr->cc);
}
}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 4cfb3e82c56f..4c46a9865fa7 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -746,7 +746,7 @@ void ubifs_dump_lprops(struct ubifs_info *c)
for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
err = ubifs_read_one_lp(c, lnum, &lp);
if (err) {
- ubifs_err("cannot read lprops for LEB %d", lnum);
+ ubifs_err(c, "cannot read lprops for LEB %d", lnum);
continue;
}
@@ -819,13 +819,13 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum)
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
- ubifs_err("cannot allocate memory for dumping LEB %d", lnum);
+ ubifs_err(c, "cannot allocate memory for dumping LEB %d", lnum);
return;
}
sleb = ubifs_scan(c, lnum, 0, buf, 0);
if (IS_ERR(sleb)) {
- ubifs_err("scan error %d", (int)PTR_ERR(sleb));
+ ubifs_err(c, "scan error %d", (int)PTR_ERR(sleb));
goto out;
}
@@ -1032,7 +1032,7 @@ int dbg_check_space_info(struct ubifs_info *c)
spin_unlock(&c->space_lock);
if (free != d->saved_free) {
- ubifs_err("free space changed from %lld to %lld",
+ ubifs_err(c, "free space changed from %lld to %lld",
d->saved_free, free);
goto out;
}
@@ -1040,15 +1040,15 @@ int dbg_check_space_info(struct ubifs_info *c)
return 0;
out:
- ubifs_msg("saved lprops statistics dump");
+ ubifs_msg(c, "saved lprops statistics dump");
ubifs_dump_lstats(&d->saved_lst);
- ubifs_msg("saved budgeting info dump");
+ ubifs_msg(c, "saved budgeting info dump");
ubifs_dump_budg(c, &d->saved_bi);
- ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt);
- ubifs_msg("current lprops statistics dump");
+ ubifs_msg(c, "saved idx_gc_cnt %d", d->saved_idx_gc_cnt);
+ ubifs_msg(c, "current lprops statistics dump");
ubifs_get_lp_stats(c, &lst);
ubifs_dump_lstats(&lst);
- ubifs_msg("current budgeting info dump");
+ ubifs_msg(c, "current budgeting info dump");
ubifs_dump_budg(c, &c->bi);
dump_stack();
return -EINVAL;
@@ -1077,9 +1077,9 @@ int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode)
mutex_lock(&ui->ui_mutex);
spin_lock(&ui->ui_lock);
if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
- ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode is clean",
+ ubifs_err(c, "ui_size is %lld, synced_i_size is %lld, but inode is clean",
ui->ui_size, ui->synced_i_size);
- ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
+ ubifs_err(c, "i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
inode->i_mode, i_size_read(inode));
dump_stack();
err = -EINVAL;
@@ -1140,7 +1140,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
kfree(pdent);
if (i_size_read(dir) != size) {
- ubifs_err("directory inode %lu has size %llu, but calculated size is %llu",
+ ubifs_err(c, "directory inode %lu has size %llu, but calculated size is %llu",
dir->i_ino, (unsigned long long)i_size_read(dir),
(unsigned long long)size);
ubifs_dump_inode(c, dir);
@@ -1148,7 +1148,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
return -EINVAL;
}
if (dir->i_nlink != nlink) {
- ubifs_err("directory inode %lu has nlink %u, but calculated nlink is %u",
+ ubifs_err(c, "directory inode %lu has nlink %u, but calculated nlink is %u",
dir->i_ino, dir->i_nlink, nlink);
ubifs_dump_inode(c, dir);
dump_stack();
@@ -1207,10 +1207,10 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
err = 1;
key_read(c, &dent1->key, &key);
if (keys_cmp(c, &zbr1->key, &key)) {
- ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum,
+ ubifs_err(c, "1st entry at %d:%d has key %s", zbr1->lnum,
zbr1->offs, dbg_snprintf_key(c, &key, key_buf,
DBG_KEY_BUF_LEN));
- ubifs_err("but it should have key %s according to tnc",
+ ubifs_err(c, "but it should have key %s according to tnc",
dbg_snprintf_key(c, &zbr1->key, key_buf,
DBG_KEY_BUF_LEN));
ubifs_dump_node(c, dent1);
@@ -1219,10 +1219,10 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
key_read(c, &dent2->key, &key);
if (keys_cmp(c, &zbr2->key, &key)) {
- ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum,
+ ubifs_err(c, "2nd entry at %d:%d has key %s", zbr1->lnum,
zbr1->offs, dbg_snprintf_key(c, &key, key_buf,
DBG_KEY_BUF_LEN));
- ubifs_err("but it should have key %s according to tnc",
+ ubifs_err(c, "but it should have key %s according to tnc",
dbg_snprintf_key(c, &zbr2->key, key_buf,
DBG_KEY_BUF_LEN));
ubifs_dump_node(c, dent2);
@@ -1238,14 +1238,14 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
goto out_free;
}
if (cmp == 0 && nlen1 == nlen2)
- ubifs_err("2 xent/dent nodes with the same name");
+ ubifs_err(c, "2 xent/dent nodes with the same name");
else
- ubifs_err("bad order of colliding key %s",
+ ubifs_err(c, "bad order of colliding key %s",
dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
- ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs);
+ ubifs_msg(c, "first node at %d:%d\n", zbr1->lnum, zbr1->offs);
ubifs_dump_node(c, dent1);
- ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs);
+ ubifs_msg(c, "second node at %d:%d\n", zbr2->lnum, zbr2->offs);
ubifs_dump_node(c, dent2);
out_free:
@@ -1447,11 +1447,11 @@ static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
return 0;
out:
- ubifs_err("failed, error %d", err);
- ubifs_msg("dump of the znode");
+ ubifs_err(c, "failed, error %d", err);
+ ubifs_msg(c, "dump of the znode");
ubifs_dump_znode(c, znode);
if (zp) {
- ubifs_msg("dump of the parent znode");
+ ubifs_msg(c, "dump of the parent znode");
ubifs_dump_znode(c, zp);
}
dump_stack();
@@ -1518,9 +1518,9 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
if (err < 0)
return err;
if (err) {
- ubifs_msg("first znode");
+ ubifs_msg(c, "first znode");
ubifs_dump_znode(c, prev);
- ubifs_msg("second znode");
+ ubifs_msg(c, "second znode");
ubifs_dump_znode(c, znode);
return -EINVAL;
}
@@ -1529,13 +1529,13 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
if (extra) {
if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) {
- ubifs_err("incorrect clean_zn_cnt %ld, calculated %ld",
+ ubifs_err(c, "incorrect clean_zn_cnt %ld, calculated %ld",
atomic_long_read(&c->clean_zn_cnt),
clean_cnt);
return -EINVAL;
}
if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) {
- ubifs_err("incorrect dirty_zn_cnt %ld, calculated %ld",
+ ubifs_err(c, "incorrect dirty_zn_cnt %ld, calculated %ld",
atomic_long_read(&c->dirty_zn_cnt),
dirty_cnt);
return -EINVAL;
@@ -1608,7 +1608,7 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
if (znode_cb) {
err = znode_cb(c, znode, priv);
if (err) {
- ubifs_err("znode checking function returned error %d",
+ ubifs_err(c, "znode checking function returned error %d",
err);
ubifs_dump_znode(c, znode);
goto out_dump;
@@ -1619,7 +1619,7 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
zbr = &znode->zbranch[idx];
err = leaf_cb(c, zbr, priv);
if (err) {
- ubifs_err("leaf checking function returned error %d, for leaf at LEB %d:%d",
+ ubifs_err(c, "leaf checking function returned error %d, for leaf at LEB %d:%d",
err, zbr->lnum, zbr->offs);
goto out_dump;
}
@@ -1675,7 +1675,7 @@ out_dump:
zbr = &znode->parent->zbranch[znode->iip];
else
zbr = &c->zroot;
- ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
+ ubifs_msg(c, "dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
ubifs_dump_znode(c, znode);
out_unlock:
mutex_unlock(&c->tnc_mutex);
@@ -1722,12 +1722,12 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
err = dbg_walk_index(c, NULL, add_size, &calc);
if (err) {
- ubifs_err("error %d while walking the index", err);
+ ubifs_err(c, "error %d while walking the index", err);
return err;
}
if (calc != idx_size) {
- ubifs_err("index size check failed: calculated size is %lld, should be %lld",
+ ubifs_err(c, "index size check failed: calculated size is %lld, should be %lld",
calc, idx_size);
dump_stack();
return -EINVAL;
@@ -1814,7 +1814,7 @@ static struct fsck_inode *add_inode(struct ubifs_info *c,
}
if (inum > c->highest_inum) {
- ubifs_err("too high inode number, max. is %lu",
+ ubifs_err(c, "too high inode number, max. is %lu",
(unsigned long)c->highest_inum);
return ERR_PTR(-EINVAL);
}
@@ -1921,17 +1921,17 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c,
ino_key_init(c, &key, inum);
err = ubifs_lookup_level0(c, &key, &znode, &n);
if (!err) {
- ubifs_err("inode %lu not found in index", (unsigned long)inum);
+ ubifs_err(c, "inode %lu not found in index", (unsigned long)inum);
return ERR_PTR(-ENOENT);
} else if (err < 0) {
- ubifs_err("error %d while looking up inode %lu",
+ ubifs_err(c, "error %d while looking up inode %lu",
err, (unsigned long)inum);
return ERR_PTR(err);
}
zbr = &znode->zbranch[n];
if (zbr->len < UBIFS_INO_NODE_SZ) {
- ubifs_err("bad node %lu node length %d",
+ ubifs_err(c, "bad node %lu node length %d",
(unsigned long)inum, zbr->len);
return ERR_PTR(-EINVAL);
}
@@ -1942,7 +1942,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c,
err = ubifs_tnc_read_node(c, zbr, ino);
if (err) {
- ubifs_err("cannot read inode node at LEB %d:%d, error %d",
+ ubifs_err(c, "cannot read inode node at LEB %d:%d, error %d",
zbr->lnum, zbr->offs, err);
kfree(ino);
return ERR_PTR(err);
@@ -1951,7 +1951,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c,
fscki = add_inode(c, fsckd, ino);
kfree(ino);
if (IS_ERR(fscki)) {
- ubifs_err("error %ld while adding inode %lu node",
+ ubifs_err(c, "error %ld while adding inode %lu node",
PTR_ERR(fscki), (unsigned long)inum);
return fscki;
}
@@ -1985,7 +1985,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
struct fsck_inode *fscki;
if (zbr->len < UBIFS_CH_SZ) {
- ubifs_err("bad leaf length %d (LEB %d:%d)",
+ ubifs_err(c, "bad leaf length %d (LEB %d:%d)",
zbr->len, zbr->lnum, zbr->offs);
return -EINVAL;
}
@@ -1996,7 +1996,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
err = ubifs_tnc_read_node(c, zbr, node);
if (err) {
- ubifs_err("cannot read leaf node at LEB %d:%d, error %d",
+ ubifs_err(c, "cannot read leaf node at LEB %d:%d, error %d",
zbr->lnum, zbr->offs, err);
goto out_free;
}
@@ -2006,7 +2006,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
fscki = add_inode(c, priv, node);
if (IS_ERR(fscki)) {
err = PTR_ERR(fscki);
- ubifs_err("error %d while adding inode node", err);
+ ubifs_err(c, "error %d while adding inode node", err);
goto out_dump;
}
goto out;
@@ -2014,7 +2014,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
if (type != UBIFS_DENT_KEY && type != UBIFS_XENT_KEY &&
type != UBIFS_DATA_KEY) {
- ubifs_err("unexpected node type %d at LEB %d:%d",
+ ubifs_err(c, "unexpected node type %d at LEB %d:%d",
type, zbr->lnum, zbr->offs);
err = -EINVAL;
goto out_free;
@@ -2022,7 +2022,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
ch = node;
if (le64_to_cpu(ch->sqnum) > c->max_sqnum) {
- ubifs_err("too high sequence number, max. is %llu",
+ ubifs_err(c, "too high sequence number, max. is %llu",
c->max_sqnum);
err = -EINVAL;
goto out_dump;
@@ -2042,7 +2042,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
fscki = read_add_inode(c, priv, inum);
if (IS_ERR(fscki)) {
err = PTR_ERR(fscki);
- ubifs_err("error %d while processing data node and trying to find inode node %lu",
+ ubifs_err(c, "error %d while processing data node and trying to find inode node %lu",
err, (unsigned long)inum);
goto out_dump;
}
@@ -2052,7 +2052,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
blk_offs <<= UBIFS_BLOCK_SHIFT;
blk_offs += le32_to_cpu(dn->size);
if (blk_offs > fscki->size) {
- ubifs_err("data node at LEB %d:%d is not within inode size %lld",
+ ubifs_err(c, "data node at LEB %d:%d is not within inode size %lld",
zbr->lnum, zbr->offs, fscki->size);
err = -EINVAL;
goto out_dump;
@@ -2076,7 +2076,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
fscki = read_add_inode(c, priv, inum);
if (IS_ERR(fscki)) {
err = PTR_ERR(fscki);
- ubifs_err("error %d while processing entry node and trying to find inode node %lu",
+ ubifs_err(c, "error %d while processing entry node and trying to find inode node %lu",
err, (unsigned long)inum);
goto out_dump;
}
@@ -2088,7 +2088,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
fscki1 = read_add_inode(c, priv, inum);
if (IS_ERR(fscki1)) {
err = PTR_ERR(fscki1);
- ubifs_err("error %d while processing entry node and trying to find parent inode node %lu",
+ ubifs_err(c, "error %d while processing entry node and trying to find parent inode node %lu",
err, (unsigned long)inum);
goto out_dump;
}
@@ -2111,7 +2111,7 @@ out:
return 0;
out_dump:
- ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
+ ubifs_msg(c, "dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
ubifs_dump_node(c, node);
out_free:
kfree(node);
@@ -2162,52 +2162,52 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
*/
if (fscki->inum != UBIFS_ROOT_INO &&
fscki->references != 1) {
- ubifs_err("directory inode %lu has %d direntries which refer it, but should be 1",
+ ubifs_err(c, "directory inode %lu has %d direntries which refer it, but should be 1",
(unsigned long)fscki->inum,
fscki->references);
goto out_dump;
}
if (fscki->inum == UBIFS_ROOT_INO &&
fscki->references != 0) {
- ubifs_err("root inode %lu has non-zero (%d) direntries which refer it",
+ ubifs_err(c, "root inode %lu has non-zero (%d) direntries which refer it",
(unsigned long)fscki->inum,
fscki->references);
goto out_dump;
}
if (fscki->calc_sz != fscki->size) {
- ubifs_err("directory inode %lu size is %lld, but calculated size is %lld",
+ ubifs_err(c, "directory inode %lu size is %lld, but calculated size is %lld",
(unsigned long)fscki->inum,
fscki->size, fscki->calc_sz);
goto out_dump;
}
if (fscki->calc_cnt != fscki->nlink) {
- ubifs_err("directory inode %lu nlink is %d, but calculated nlink is %d",
+ ubifs_err(c, "directory inode %lu nlink is %d, but calculated nlink is %d",
(unsigned long)fscki->inum,
fscki->nlink, fscki->calc_cnt);
goto out_dump;
}
} else {
if (fscki->references != fscki->nlink) {
- ubifs_err("inode %lu nlink is %d, but calculated nlink is %d",
+ ubifs_err(c, "inode %lu nlink is %d, but calculated nlink is %d",
(unsigned long)fscki->inum,
fscki->nlink, fscki->references);
goto out_dump;
}
}
if (fscki->xattr_sz != fscki->calc_xsz) {
- ubifs_err("inode %lu has xattr size %u, but calculated size is %lld",
+ ubifs_err(c, "inode %lu has xattr size %u, but calculated size is %lld",
(unsigned long)fscki->inum, fscki->xattr_sz,
fscki->calc_xsz);
goto out_dump;
}
if (fscki->xattr_cnt != fscki->calc_xcnt) {
- ubifs_err("inode %lu has %u xattrs, but calculated count is %lld",
+ ubifs_err(c, "inode %lu has %u xattrs, but calculated count is %lld",
(unsigned long)fscki->inum,
fscki->xattr_cnt, fscki->calc_xcnt);
goto out_dump;
}
if (fscki->xattr_nms != fscki->calc_xnms) {
- ubifs_err("inode %lu has xattr names' size %u, but calculated names' size is %lld",
+ ubifs_err(c, "inode %lu has xattr names' size %u, but calculated names' size is %lld",
(unsigned long)fscki->inum, fscki->xattr_nms,
fscki->calc_xnms);
goto out_dump;
@@ -2221,11 +2221,11 @@ out_dump:
ino_key_init(c, &key, fscki->inum);
err = ubifs_lookup_level0(c, &key, &znode, &n);
if (!err) {
- ubifs_err("inode %lu not found in index",
+ ubifs_err(c, "inode %lu not found in index",
(unsigned long)fscki->inum);
return -ENOENT;
} else if (err < 0) {
- ubifs_err("error %d while looking up inode %lu",
+ ubifs_err(c, "error %d while looking up inode %lu",
err, (unsigned long)fscki->inum);
return err;
}
@@ -2237,13 +2237,13 @@ out_dump:
err = ubifs_tnc_read_node(c, zbr, ino);
if (err) {
- ubifs_err("cannot read inode node at LEB %d:%d, error %d",
+ ubifs_err(c, "cannot read inode node at LEB %d:%d, error %d",
zbr->lnum, zbr->offs, err);
kfree(ino);
return err;
}
- ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
+ ubifs_msg(c, "dump of the inode %lu sitting in LEB %d:%d",
(unsigned long)fscki->inum, zbr->lnum, zbr->offs);
ubifs_dump_node(c, ino);
kfree(ino);
@@ -2284,7 +2284,7 @@ int dbg_check_filesystem(struct ubifs_info *c)
return 0;
out_free:
- ubifs_err("file-system check failed with error %d", err);
+ ubifs_err(c, "file-system check failed with error %d", err);
dump_stack();
free_inodes(&fsckd);
return err;
@@ -2315,12 +2315,12 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
sb = container_of(cur->next, struct ubifs_scan_node, list);
if (sa->type != UBIFS_DATA_NODE) {
- ubifs_err("bad node type %d", sa->type);
+ ubifs_err(c, "bad node type %d", sa->type);
ubifs_dump_node(c, sa->node);
return -EINVAL;
}
if (sb->type != UBIFS_DATA_NODE) {
- ubifs_err("bad node type %d", sb->type);
+ ubifs_err(c, "bad node type %d", sb->type);
ubifs_dump_node(c, sb->node);
return -EINVAL;
}
@@ -2331,7 +2331,7 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
if (inuma < inumb)
continue;
if (inuma > inumb) {
- ubifs_err("larger inum %lu goes before inum %lu",
+ ubifs_err(c, "larger inum %lu goes before inum %lu",
(unsigned long)inuma, (unsigned long)inumb);
goto error_dump;
}
@@ -2340,11 +2340,11 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
blkb = key_block(c, &sb->key);
if (blka > blkb) {
- ubifs_err("larger block %u goes before %u", blka, blkb);
+ ubifs_err(c, "larger block %u goes before %u", blka, blkb);
goto error_dump;
}
if (blka == blkb) {
- ubifs_err("two data nodes for the same block");
+ ubifs_err(c, "two data nodes for the same block");
goto error_dump;
}
}
@@ -2383,19 +2383,19 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
sa->type != UBIFS_XENT_NODE) {
- ubifs_err("bad node type %d", sa->type);
+ ubifs_err(c, "bad node type %d", sa->type);
ubifs_dump_node(c, sa->node);
return -EINVAL;
}
if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
sa->type != UBIFS_XENT_NODE) {
- ubifs_err("bad node type %d", sb->type);
+ ubifs_err(c, "bad node type %d", sb->type);
ubifs_dump_node(c, sb->node);
return -EINVAL;
}
if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
- ubifs_err("non-inode node goes before inode node");
+ ubifs_err(c, "non-inode node goes before inode node");
goto error_dump;
}
@@ -2405,7 +2405,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
/* Inode nodes are sorted in descending size order */
if (sa->len < sb->len) {
- ubifs_err("smaller inode node goes first");
+ ubifs_err(c, "smaller inode node goes first");
goto error_dump;
}
continue;
@@ -2421,7 +2421,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
if (inuma < inumb)
continue;
if (inuma > inumb) {
- ubifs_err("larger inum %lu goes before inum %lu",
+ ubifs_err(c, "larger inum %lu goes before inum %lu",
(unsigned long)inuma, (unsigned long)inumb);
goto error_dump;
}
@@ -2430,7 +2430,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
hashb = key_block(c, &sb->key);
if (hasha > hashb) {
- ubifs_err("larger hash %u goes before %u",
+ ubifs_err(c, "larger hash %u goes before %u",
hasha, hashb);
goto error_dump;
}
@@ -2439,9 +2439,9 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
return 0;
error_dump:
- ubifs_msg("dumping first node");
+ ubifs_msg(c, "dumping first node");
ubifs_dump_node(c, sa->node);
- ubifs_msg("dumping second node");
+ ubifs_msg(c, "dumping second node");
ubifs_dump_node(c, sb->node);
return -EINVAL;
return 0;
@@ -2470,13 +2470,13 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
delay = prandom_u32() % 60000;
d->pc_timeout = jiffies;
d->pc_timeout += msecs_to_jiffies(delay);
- ubifs_warn("failing after %lums", delay);
+ ubifs_warn(c, "failing after %lums", delay);
} else {
d->pc_delay = 2;
delay = prandom_u32() % 10000;
/* Fail within 10000 operations */
d->pc_cnt_max = delay;
- ubifs_warn("failing after %lu calls", delay);
+ ubifs_warn(c, "failing after %lu calls", delay);
}
}
@@ -2494,55 +2494,55 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
return 0;
if (chance(19, 20))
return 0;
- ubifs_warn("failing in super block LEB %d", lnum);
+ ubifs_warn(c, "failing in super block LEB %d", lnum);
} else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
if (chance(19, 20))
return 0;
- ubifs_warn("failing in master LEB %d", lnum);
+ ubifs_warn(c, "failing in master LEB %d", lnum);
} else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
if (write && chance(99, 100))
return 0;
if (chance(399, 400))
return 0;
- ubifs_warn("failing in log LEB %d", lnum);
+ ubifs_warn(c, "failing in log LEB %d", lnum);
} else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
if (write && chance(7, 8))
return 0;
if (chance(19, 20))
return 0;
- ubifs_warn("failing in LPT LEB %d", lnum);
+ ubifs_warn(c, "failing in LPT LEB %d", lnum);
} else if (lnum >= c->orph_first && lnum <= c->orph_last) {
if (write && chance(1, 2))
return 0;
if (chance(9, 10))
return 0;
- ubifs_warn("failing in orphan LEB %d", lnum);
+ ubifs_warn(c, "failing in orphan LEB %d", lnum);
} else if (lnum == c->ihead_lnum) {
if (chance(99, 100))
return 0;
- ubifs_warn("failing in index head LEB %d", lnum);
+ ubifs_warn(c, "failing in index head LEB %d", lnum);
} else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
if (chance(9, 10))
return 0;
- ubifs_warn("failing in GC head LEB %d", lnum);
+ ubifs_warn(c, "failing in GC head LEB %d", lnum);
} else if (write && !RB_EMPTY_ROOT(&c->buds) &&
!ubifs_search_bud(c, lnum)) {
if (chance(19, 20))
return 0;
- ubifs_warn("failing in non-bud LEB %d", lnum);
+ ubifs_warn(c, "failing in non-bud LEB %d", lnum);
} else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND ||
c->cmt_state == COMMIT_RUNNING_REQUIRED) {
if (chance(999, 1000))
return 0;
- ubifs_warn("failing in bud LEB %d commit running", lnum);
+ ubifs_warn(c, "failing in bud LEB %d commit running", lnum);
} else {
if (chance(9999, 10000))
return 0;
- ubifs_warn("failing in bud LEB %d commit not running", lnum);
+ ubifs_warn(c, "failing in bud LEB %d commit not running", lnum);
}
d->pc_happened = 1;
- ubifs_warn("========== Power cut emulated ==========");
+ ubifs_warn(c, "========== Power cut emulated ==========");
dump_stack();
return 1;
}
@@ -2557,7 +2557,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
/* Corruption span max to end of write unit */
to = min(len, ALIGN(from + 1, c->max_write_size));
- ubifs_warn("filled bytes %u-%u with %s", from, to - 1,
+ ubifs_warn(c, "filled bytes %u-%u with %s", from, to - 1,
ffs ? "0xFFs" : "random data");
if (ffs)
@@ -2579,7 +2579,7 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
failing = power_cut_emulated(c, lnum, 1);
if (failing) {
len = corrupt_data(c, buf, len);
- ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
+ ubifs_warn(c, "actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
len, lnum, offs);
}
err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
@@ -2909,7 +2909,7 @@ out_remove:
debugfs_remove_recursive(d->dfs_dir);
out:
err = dent ? PTR_ERR(dent) : -ENODEV;
- ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n",
+ ubifs_err(c, "cannot create \"%s\" debugfs file or directory, error %d\n",
fname, err);
return err;
}
@@ -3063,8 +3063,8 @@ out_remove:
debugfs_remove_recursive(dfs_rootdir);
out:
err = dent ? PTR_ERR(dent) : -ENODEV;
- ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n",
- fname, err);
+ pr_err("UBIFS error (pid %d): cannot create \"%s\" debugfs file or directory, error %d\n",
+ current->pid, fname, err);
return err;
}
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 0fa6c803992e..27060fc855d4 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -146,12 +146,12 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
if (c->highest_inum >= INUM_WARN_WATERMARK) {
if (c->highest_inum >= INUM_WATERMARK) {
spin_unlock(&c->cnt_lock);
- ubifs_err("out of inode numbers");
+ ubifs_err(c, "out of inode numbers");
make_bad_inode(inode);
iput(inode);
return ERR_PTR(-EINVAL);
}
- ubifs_warn("running out of inode numbers (current %lu, max %d)",
+ ubifs_warn(c, "running out of inode numbers (current %lu, max %u)",
(unsigned long)c->highest_inum, INUM_WATERMARK);
}
@@ -222,7 +222,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
* checking.
*/
err = PTR_ERR(inode);
- ubifs_err("dead directory entry '%pd', error %d",
+ ubifs_err(c, "dead directory entry '%pd', error %d",
dentry, err);
ubifs_ro_mode(c, err);
goto out;
@@ -272,7 +272,7 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
err = ubifs_init_security(dir, inode, &dentry->d_name);
if (err)
- goto out_cancel;
+ goto out_inode;
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
@@ -292,11 +292,12 @@ out_cancel:
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
mutex_unlock(&dir_ui->ui_mutex);
+out_inode:
make_bad_inode(inode);
iput(inode);
out_budg:
ubifs_release_budget(c, &req);
- ubifs_err("cannot create regular file, error %d", err);
+ ubifs_err(c, "cannot create regular file, error %d", err);
return err;
}
@@ -449,7 +450,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
out:
if (err != -ENOENT) {
- ubifs_err("cannot find next direntry, error %d", err);
+ ubifs_err(c, "cannot find next direntry, error %d", err);
return err;
}
@@ -498,7 +499,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct ubifs_info *c = dir->i_sb->s_fs_info;
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_inode *dir_ui = ubifs_inode(dir);
int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len);
@@ -553,7 +554,7 @@ out_cancel:
static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
{
struct ubifs_info *c = dir->i_sb->s_fs_info;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ubifs_inode *dir_ui = ubifs_inode(dir);
int sz_change = CALC_DENT_SIZE(dentry->d_name.len);
int err, budgeted = 1;
@@ -645,7 +646,7 @@ static int check_dir_empty(struct ubifs_info *c, struct inode *dir)
static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct ubifs_info *c = dir->i_sb->s_fs_info;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int sz_change = CALC_DENT_SIZE(dentry->d_name.len);
int err, budgeted = 1;
struct ubifs_inode *dir_ui = ubifs_inode(dir);
@@ -661,7 +662,7 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_ino, dir->i_ino);
ubifs_assert(mutex_is_locked(&dir->i_mutex));
ubifs_assert(mutex_is_locked(&inode->i_mutex));
- err = check_dir_empty(c, dentry->d_inode);
+ err = check_dir_empty(c, d_inode(dentry));
if (err)
return err;
@@ -732,7 +733,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
err = ubifs_init_security(dir, inode, &dentry->d_name);
if (err)
- goto out_cancel;
+ goto out_inode;
mutex_lock(&dir_ui->ui_mutex);
insert_inode_hash(inode);
@@ -743,7 +744,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
dir->i_mtime = dir->i_ctime = inode->i_ctime;
err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0);
if (err) {
- ubifs_err("cannot create directory, error %d", err);
+ ubifs_err(c, "cannot create directory, error %d", err);
goto out_cancel;
}
mutex_unlock(&dir_ui->ui_mutex);
@@ -757,6 +758,7 @@ out_cancel:
dir_ui->ui_size = dir->i_size;
drop_nlink(dir);
mutex_unlock(&dir_ui->ui_mutex);
+out_inode:
make_bad_inode(inode);
iput(inode);
out_budg:
@@ -816,7 +818,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
err = ubifs_init_security(dir, inode, &dentry->d_name);
if (err)
- goto out_cancel;
+ goto out_inode;
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
@@ -836,6 +838,7 @@ out_cancel:
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
mutex_unlock(&dir_ui->ui_mutex);
+out_inode:
make_bad_inode(inode);
iput(inode);
out_budg:
@@ -896,7 +899,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
err = ubifs_init_security(dir, inode, &dentry->d_name);
if (err)
- goto out_cancel;
+ goto out_inode;
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
@@ -967,8 +970,8 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct ubifs_info *c = old_dir->i_sb->s_fs_info;
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct ubifs_inode *old_inode_ui = ubifs_inode(old_inode);
int err, release, sync = 0, move = (new_dir != old_dir);
int is_dir = S_ISDIR(old_inode->i_mode);
@@ -1133,7 +1136,7 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
loff_t size;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ubifs_inode *ui = ubifs_inode(inode);
mutex_lock(&ui->ui_mutex);
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index e627c0acf626..35efc103c39c 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -50,7 +50,6 @@
*/
#include "ubifs.h"
-#include <linux/aio.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/slab.h>
@@ -80,7 +79,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
out_len = UBIFS_BLOCK_SIZE;
- err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
+ err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
le16_to_cpu(dn->compr_type));
if (err || len != out_len)
goto dump;
@@ -96,7 +95,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
return 0;
dump:
- ubifs_err("bad data node (block %u, inode %lu)",
+ ubifs_err(c, "bad data node (block %u, inode %lu)",
block, inode->i_ino);
ubifs_dump_node(c, dn);
return -EINVAL;
@@ -161,13 +160,14 @@ static int do_readpage(struct page *page)
addr += UBIFS_BLOCK_SIZE;
}
if (err) {
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
if (err == -ENOENT) {
/* Not found, so it must be a hole */
SetPageChecked(page);
dbg_gen("hole");
goto out_free;
}
- ubifs_err("cannot read page %lu of inode %lu, error %d",
+ ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
page->index, inode->i_ino, err);
goto error;
}
@@ -650,7 +650,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
out_len = UBIFS_BLOCK_SIZE;
- err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
+ err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
le16_to_cpu(dn->compr_type));
if (err || len != out_len)
goto out_err;
@@ -698,7 +698,7 @@ out_err:
SetPageError(page);
flush_dcache_page(page);
kunmap(page);
- ubifs_err("bad data node (block %u, inode %lu)",
+ ubifs_err(c, "bad data node (block %u, inode %lu)",
page_block, inode->i_ino);
return -EINVAL;
}
@@ -802,7 +802,7 @@ out_free:
return ret;
out_warn:
- ubifs_warn("ignoring error %d and skipping bulk-read", err);
+ ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
goto out_free;
out_bu_off:
@@ -930,7 +930,7 @@ static int do_writepage(struct page *page, int len)
}
if (err) {
SetPageError(page);
- ubifs_err("cannot write page %lu of inode %lu, error %d",
+ ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
page->index, inode->i_ino, err);
ubifs_ro_mode(c, err);
}
@@ -1257,7 +1257,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
{
int err;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ubifs_info *c = inode->i_sb->s_fs_info;
dbg_gen("ino %lu, mode %#x, ia_valid %#x",
@@ -1302,7 +1302,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset,
static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct ubifs_inode *ui = ubifs_inode(dentry->d_inode);
+ struct ubifs_inode *ui = ubifs_inode(d_inode(dentry));
nd_set_link(nd, ui->data);
return NULL;
@@ -1485,7 +1485,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
err = ubifs_budget_space(c, &req);
if (unlikely(err)) {
if (err == -ENOSPC)
- ubifs_warn("out of space for mmapped file (inode number %lu)",
+ ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
inode->i_ino);
return VM_FAULT_SIGBUS;
}
@@ -1581,8 +1581,6 @@ const struct inode_operations ubifs_symlink_inode_operations = {
const struct file_operations ubifs_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = generic_file_read_iter,
.write_iter = ubifs_write_iter,
.mmap = ubifs_file_mmap,
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index fb08b0c514b6..97be41215332 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -85,7 +85,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
c->ro_error = 1;
c->no_chk_data_crc = 0;
c->vfs_sb->s_flags |= MS_RDONLY;
- ubifs_warn("switched to read-only mode, error %d", err);
+ ubifs_warn(c, "switched to read-only mode, error %d", err);
dump_stack();
}
}
@@ -107,7 +107,7 @@ int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
* @even_ebadmsg is true.
*/
if (err && (err != -EBADMSG || even_ebadmsg)) {
- ubifs_err("reading %d bytes from LEB %d:%d failed, error %d",
+ ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
len, lnum, offs, err);
dump_stack();
}
@@ -127,7 +127,7 @@ int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
else
err = dbg_leb_write(c, lnum, buf, offs, len);
if (err) {
- ubifs_err("writing %d bytes to LEB %d:%d failed, error %d",
+ ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
len, lnum, offs, err);
ubifs_ro_mode(c, err);
dump_stack();
@@ -147,7 +147,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
else
err = dbg_leb_change(c, lnum, buf, len);
if (err) {
- ubifs_err("changing %d bytes in LEB %d failed, error %d",
+ ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
len, lnum, err);
ubifs_ro_mode(c, err);
dump_stack();
@@ -167,7 +167,7 @@ int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
else
err = dbg_leb_unmap(c, lnum);
if (err) {
- ubifs_err("unmap LEB %d failed, error %d", lnum, err);
+ ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
ubifs_ro_mode(c, err);
dump_stack();
}
@@ -186,7 +186,7 @@ int ubifs_leb_map(struct ubifs_info *c, int lnum)
else
err = dbg_leb_map(c, lnum);
if (err) {
- ubifs_err("mapping LEB %d failed, error %d", lnum, err);
+ ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
ubifs_ro_mode(c, err);
dump_stack();
}
@@ -199,7 +199,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
err = ubi_is_mapped(c->ubi, lnum);
if (err < 0) {
- ubifs_err("ubi_is_mapped failed for LEB %d, error %d",
+ ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
lnum, err);
dump_stack();
}
@@ -247,7 +247,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
magic = le32_to_cpu(ch->magic);
if (magic != UBIFS_NODE_MAGIC) {
if (!quiet)
- ubifs_err("bad magic %#08x, expected %#08x",
+ ubifs_err(c, "bad magic %#08x, expected %#08x",
magic, UBIFS_NODE_MAGIC);
err = -EUCLEAN;
goto out;
@@ -256,7 +256,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
type = ch->node_type;
if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
if (!quiet)
- ubifs_err("bad node type %d", type);
+ ubifs_err(c, "bad node type %d", type);
goto out;
}
@@ -279,7 +279,7 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
node_crc = le32_to_cpu(ch->crc);
if (crc != node_crc) {
if (!quiet)
- ubifs_err("bad CRC: calculated %#08x, read %#08x",
+ ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
crc, node_crc);
err = -EUCLEAN;
goto out;
@@ -289,10 +289,10 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
out_len:
if (!quiet)
- ubifs_err("bad node length %d", node_len);
+ ubifs_err(c, "bad node length %d", node_len);
out:
if (!quiet) {
- ubifs_err("bad node at LEB %d:%d", lnum, offs);
+ ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
ubifs_dump_node(c, buf);
dump_stack();
}
@@ -355,11 +355,11 @@ static unsigned long long next_sqnum(struct ubifs_info *c)
if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
if (sqnum >= SQNUM_WATERMARK) {
- ubifs_err("sequence number overflow %llu, end of life",
+ ubifs_err(c, "sequence number overflow %llu, end of life",
sqnum);
ubifs_ro_mode(c, -EINVAL);
}
- ubifs_warn("running out of sequence numbers, end of life soon");
+ ubifs_warn(c, "running out of sequence numbers, end of life soon");
}
return sqnum;
@@ -636,7 +636,7 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c)
err = ubifs_wbuf_sync_nolock(wbuf);
mutex_unlock(&wbuf->io_mutex);
if (err) {
- ubifs_err("cannot sync write-buffer, error %d", err);
+ ubifs_err(c, "cannot sync write-buffer, error %d", err);
ubifs_ro_mode(c, err);
goto out_timers;
}
@@ -833,7 +833,7 @@ exit:
return 0;
out:
- ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
+ ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
len, wbuf->lnum, wbuf->offs, err);
ubifs_dump_node(c, buf);
dump_stack();
@@ -932,27 +932,27 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
}
if (type != ch->node_type) {
- ubifs_err("bad node type (%d but expected %d)",
+ ubifs_err(c, "bad node type (%d but expected %d)",
ch->node_type, type);
goto out;
}
err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
if (err) {
- ubifs_err("expected node type %d", type);
+ ubifs_err(c, "expected node type %d", type);
return err;
}
rlen = le32_to_cpu(ch->len);
if (rlen != len) {
- ubifs_err("bad node length %d, expected %d", rlen, len);
+ ubifs_err(c, "bad node length %d, expected %d", rlen, len);
goto out;
}
return 0;
out:
- ubifs_err("bad node at LEB %d:%d", lnum, offs);
+ ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
ubifs_dump_node(c, buf);
dump_stack();
return -EINVAL;
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 648b143606cc..3c7b29de0ca7 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -138,7 +138,7 @@ static int setflags(struct inode *inode, int flags)
return err;
out_unlock:
- ubifs_err("can't modify inode %lu attributes", inode->i_ino);
+ ubifs_err(c, "can't modify inode %lu attributes", inode->i_ino);
mutex_unlock(&ui->ui_mutex);
ubifs_release_budget(c, &req);
return err;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index f6ac3f29323c..0b9da5b6e0f9 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -363,11 +363,11 @@ again:
* This should not happen unless the journal size limitations
* are too tough.
*/
- ubifs_err("stuck in space allocation");
+ ubifs_err(c, "stuck in space allocation");
err = -ENOSPC;
goto out;
} else if (cmt_retries > 32)
- ubifs_warn("too many space allocation re-tries (%d)",
+ ubifs_warn(c, "too many space allocation re-tries (%d)",
cmt_retries);
dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
@@ -380,7 +380,7 @@ again:
goto again;
out:
- ubifs_err("cannot reserve %d bytes in jhead %d, error %d",
+ ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d",
len, jhead, err);
if (err == -ENOSPC) {
/* This are some budgeting problems, print useful information */
@@ -731,7 +731,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
compr_type = ui->compr_type;
out_len = dlen - UBIFS_DATA_NODE_SZ;
- ubifs_compress(buf, len, &data->data, &out_len, &compr_type);
+ ubifs_compress(c, buf, len, &data->data, &out_len, &compr_type);
ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
dlen = UBIFS_DATA_NODE_SZ + out_len;
@@ -930,8 +930,8 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
union ubifs_key key;
struct ubifs_dent_node *dent, *dent2;
int err, dlen1, dlen2, ilen, lnum, offs, len;
- const struct inode *old_inode = old_dentry->d_inode;
- const struct inode *new_inode = new_dentry->d_inode;
+ const struct inode *old_inode = d_inode(old_dentry);
+ const struct inode *new_inode = d_inode(new_dentry);
int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
int last_reference = !!(new_inode && new_inode->i_nlink == 0);
int move = (old_dir != new_dir);
@@ -1100,7 +1100,8 @@ out_free:
* This function is used when an inode is truncated and the last data node of
* the inode has to be re-compressed and re-written.
*/
-static int recomp_data_node(struct ubifs_data_node *dn, int *new_len)
+static int recomp_data_node(const struct ubifs_info *c,
+ struct ubifs_data_node *dn, int *new_len)
{
void *buf;
int err, len, compr_type, out_len;
@@ -1112,11 +1113,11 @@ static int recomp_data_node(struct ubifs_data_node *dn, int *new_len)
len = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
compr_type = le16_to_cpu(dn->compr_type);
- err = ubifs_decompress(&dn->data, len, buf, &out_len, compr_type);
+ err = ubifs_decompress(c, &dn->data, len, buf, &out_len, compr_type);
if (err)
goto out;
- ubifs_compress(buf, *new_len, &dn->data, &out_len, &compr_type);
+ ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
dn->compr_type = cpu_to_le16(compr_type);
dn->size = cpu_to_le32(*new_len);
@@ -1191,7 +1192,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
int compr_type = le16_to_cpu(dn->compr_type);
if (compr_type != UBIFS_COMPR_NONE) {
- err = recomp_data_node(dn, &dlen);
+ err = recomp_data_node(c, dn, &dlen);
if (err)
goto out_free;
} else {
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index c14628fbeee2..8c795e6392b1 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -696,7 +696,7 @@ int ubifs_consolidate_log(struct ubifs_info *c)
destroy_done_tree(&done_tree);
vfree(buf);
if (write_lnum == c->lhead_lnum) {
- ubifs_err("log is too full");
+ ubifs_err(c, "log is too full");
return -EINVAL;
}
/* Unmap remaining LEBs */
@@ -743,7 +743,7 @@ static int dbg_check_bud_bytes(struct ubifs_info *c)
bud_bytes += c->leb_size - bud->start;
if (c->bud_bytes != bud_bytes) {
- ubifs_err("bad bud_bytes %lld, calculated %lld",
+ ubifs_err(c, "bad bud_bytes %lld, calculated %lld",
c->bud_bytes, bud_bytes);
err = -EINVAL;
}
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index 46190a7c42a6..a0011aa3a779 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -682,7 +682,7 @@ int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
out:
ubifs_release_lprops(c);
if (err)
- ubifs_err("cannot change properties of LEB %d, error %d",
+ ubifs_err(c, "cannot change properties of LEB %d, error %d",
lnum, err);
return err;
}
@@ -721,7 +721,7 @@ int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
out:
ubifs_release_lprops(c);
if (err)
- ubifs_err("cannot update properties of LEB %d, error %d",
+ ubifs_err(c, "cannot update properties of LEB %d, error %d",
lnum, err);
return err;
}
@@ -746,7 +746,7 @@ int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp)
lpp = ubifs_lpt_lookup(c, lnum);
if (IS_ERR(lpp)) {
err = PTR_ERR(lpp);
- ubifs_err("cannot read properties of LEB %d, error %d",
+ ubifs_err(c, "cannot read properties of LEB %d, error %d",
lnum, err);
goto out;
}
@@ -873,13 +873,13 @@ int dbg_check_cats(struct ubifs_info *c)
list_for_each_entry(lprops, &c->empty_list, list) {
if (lprops->free != c->leb_size) {
- ubifs_err("non-empty LEB %d on empty list (free %d dirty %d flags %d)",
+ ubifs_err(c, "non-empty LEB %d on empty list (free %d dirty %d flags %d)",
lprops->lnum, lprops->free, lprops->dirty,
lprops->flags);
return -EINVAL;
}
if (lprops->flags & LPROPS_TAKEN) {
- ubifs_err("taken LEB %d on empty list (free %d dirty %d flags %d)",
+ ubifs_err(c, "taken LEB %d on empty list (free %d dirty %d flags %d)",
lprops->lnum, lprops->free, lprops->dirty,
lprops->flags);
return -EINVAL;
@@ -889,13 +889,13 @@ int dbg_check_cats(struct ubifs_info *c)
i = 0;
list_for_each_entry(lprops, &c->freeable_list, list) {
if (lprops->free + lprops->dirty != c->leb_size) {
- ubifs_err("non-freeable LEB %d on freeable list (free %d dirty %d flags %d)",
+ ubifs_err(c, "non-freeable LEB %d on freeable list (free %d dirty %d flags %d)",
lprops->lnum, lprops->free, lprops->dirty,
lprops->flags);
return -EINVAL;
}
if (lprops->flags & LPROPS_TAKEN) {
- ubifs_err("taken LEB %d on freeable list (free %d dirty %d flags %d)",
+ ubifs_err(c, "taken LEB %d on freeable list (free %d dirty %d flags %d)",
lprops->lnum, lprops->free, lprops->dirty,
lprops->flags);
return -EINVAL;
@@ -903,7 +903,7 @@ int dbg_check_cats(struct ubifs_info *c)
i += 1;
}
if (i != c->freeable_cnt) {
- ubifs_err("freeable list count %d expected %d", i,
+ ubifs_err(c, "freeable list count %d expected %d", i,
c->freeable_cnt);
return -EINVAL;
}
@@ -912,26 +912,26 @@ int dbg_check_cats(struct ubifs_info *c)
list_for_each(pos, &c->idx_gc)
i += 1;
if (i != c->idx_gc_cnt) {
- ubifs_err("idx_gc list count %d expected %d", i,
+ ubifs_err(c, "idx_gc list count %d expected %d", i,
c->idx_gc_cnt);
return -EINVAL;
}
list_for_each_entry(lprops, &c->frdi_idx_list, list) {
if (lprops->free + lprops->dirty != c->leb_size) {
- ubifs_err("non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)",
+ ubifs_err(c, "non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)",
lprops->lnum, lprops->free, lprops->dirty,
lprops->flags);
return -EINVAL;
}
if (lprops->flags & LPROPS_TAKEN) {
- ubifs_err("taken LEB %d on frdi_idx list (free %d dirty %d flags %d)",
+ ubifs_err(c, "taken LEB %d on frdi_idx list (free %d dirty %d flags %d)",
lprops->lnum, lprops->free, lprops->dirty,
lprops->flags);
return -EINVAL;
}
if (!(lprops->flags & LPROPS_INDEX)) {
- ubifs_err("non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)",
+ ubifs_err(c, "non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)",
lprops->lnum, lprops->free, lprops->dirty,
lprops->flags);
return -EINVAL;
@@ -944,15 +944,15 @@ int dbg_check_cats(struct ubifs_info *c)
for (i = 0; i < heap->cnt; i++) {
lprops = heap->arr[i];
if (!lprops) {
- ubifs_err("null ptr in LPT heap cat %d", cat);
+ ubifs_err(c, "null ptr in LPT heap cat %d", cat);
return -EINVAL;
}
if (lprops->hpos != i) {
- ubifs_err("bad ptr in LPT heap cat %d", cat);
+ ubifs_err(c, "bad ptr in LPT heap cat %d", cat);
return -EINVAL;
}
if (lprops->flags & LPROPS_TAKEN) {
- ubifs_err("taken LEB in LPT heap cat %d", cat);
+ ubifs_err(c, "taken LEB in LPT heap cat %d", cat);
return -EINVAL;
}
}
@@ -988,7 +988,7 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
goto out;
}
if (lprops != lp) {
- ubifs_err("lprops %zx lp %zx lprops->lnum %d lp->lnum %d",
+ ubifs_err(c, "lprops %zx lp %zx lprops->lnum %d lp->lnum %d",
(size_t)lprops, (size_t)lp, lprops->lnum,
lp->lnum);
err = 4;
@@ -1008,7 +1008,7 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
}
out:
if (err) {
- ubifs_err("failed cat %d hpos %d err %d", cat, i, err);
+ ubifs_err(c, "failed cat %d hpos %d err %d", cat, i, err);
dump_stack();
ubifs_dump_heap(c, heap, cat);
}
@@ -1039,7 +1039,7 @@ static int scan_check_cb(struct ubifs_info *c,
if (cat != LPROPS_UNCAT) {
cat = ubifs_categorize_lprops(c, lp);
if (cat != (lp->flags & LPROPS_CAT_MASK)) {
- ubifs_err("bad LEB category %d expected %d",
+ ubifs_err(c, "bad LEB category %d expected %d",
(lp->flags & LPROPS_CAT_MASK), cat);
return -EINVAL;
}
@@ -1074,7 +1074,7 @@ static int scan_check_cb(struct ubifs_info *c,
}
}
if (!found) {
- ubifs_err("bad LPT list (category %d)", cat);
+ ubifs_err(c, "bad LPT list (category %d)", cat);
return -EINVAL;
}
}
@@ -1086,7 +1086,7 @@ static int scan_check_cb(struct ubifs_info *c,
if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) ||
lp != heap->arr[lp->hpos]) {
- ubifs_err("bad LPT heap (category %d)", cat);
+ ubifs_err(c, "bad LPT heap (category %d)", cat);
return -EINVAL;
}
}
@@ -1133,7 +1133,7 @@ static int scan_check_cb(struct ubifs_info *c,
is_idx = (snod->type == UBIFS_IDX_NODE) ? 1 : 0;
if (is_idx && snod->type != UBIFS_IDX_NODE) {
- ubifs_err("indexing node in data LEB %d:%d",
+ ubifs_err(c, "indexing node in data LEB %d:%d",
lnum, snod->offs);
goto out_destroy;
}
@@ -1159,7 +1159,7 @@ static int scan_check_cb(struct ubifs_info *c,
if (free > c->leb_size || free < 0 || dirty > c->leb_size ||
dirty < 0) {
- ubifs_err("bad calculated accounting for LEB %d: free %d, dirty %d",
+ ubifs_err(c, "bad calculated accounting for LEB %d: free %d, dirty %d",
lnum, free, dirty);
goto out_destroy;
}
@@ -1206,13 +1206,13 @@ static int scan_check_cb(struct ubifs_info *c,
/* Free but not unmapped LEB, it's fine */
is_idx = 0;
else {
- ubifs_err("indexing node without indexing flag");
+ ubifs_err(c, "indexing node without indexing flag");
goto out_print;
}
}
if (!is_idx && (lp->flags & LPROPS_INDEX)) {
- ubifs_err("data node with indexing flag");
+ ubifs_err(c, "data node with indexing flag");
goto out_print;
}
@@ -1241,7 +1241,7 @@ static int scan_check_cb(struct ubifs_info *c,
return LPT_SCAN_CONTINUE;
out_print:
- ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d",
+ ubifs_err(c, "bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d",
lnum, lp->free, lp->dirty, lp->flags, free, dirty);
ubifs_dump_leb(c, lnum);
out_destroy:
@@ -1293,11 +1293,11 @@ int dbg_check_lprops(struct ubifs_info *c)
lst.total_free != c->lst.total_free ||
lst.total_dirty != c->lst.total_dirty ||
lst.total_used != c->lst.total_used) {
- ubifs_err("bad overall accounting");
- ubifs_err("calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
+ ubifs_err(c, "bad overall accounting");
+ ubifs_err(c, "calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
lst.empty_lebs, lst.idx_lebs, lst.total_free,
lst.total_dirty, lst.total_used);
- ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
+ ubifs_err(c, "read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free,
c->lst.total_dirty, c->lst.total_used);
err = -EINVAL;
@@ -1306,10 +1306,10 @@ int dbg_check_lprops(struct ubifs_info *c)
if (lst.total_dead != c->lst.total_dead ||
lst.total_dark != c->lst.total_dark) {
- ubifs_err("bad dead/dark space accounting");
- ubifs_err("calculated: total_dead %lld, total_dark %lld",
+ ubifs_err(c, "bad dead/dark space accounting");
+ ubifs_err(c, "calculated: total_dead %lld, total_dark %lld",
lst.total_dead, lst.total_dark);
- ubifs_err("read from lprops: total_dead %lld, total_dark %lld",
+ ubifs_err(c, "read from lprops: total_dead %lld, total_dark %lld",
c->lst.total_dead, c->lst.total_dark);
err = -EINVAL;
goto out;
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index 421bd0a80424..dc9f27e9d61b 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -145,13 +145,13 @@ int ubifs_calc_lpt_geom(struct ubifs_info *c)
sz = c->lpt_sz * 2; /* Must have at least 2 times the size */
lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size);
if (lebs_needed > c->lpt_lebs) {
- ubifs_err("too few LPT LEBs");
+ ubifs_err(c, "too few LPT LEBs");
return -EINVAL;
}
/* Verify that ltab fits in a single LEB (since ltab is a single node */
if (c->ltab_sz > c->leb_size) {
- ubifs_err("LPT ltab too big");
+ ubifs_err(c, "LPT ltab too big");
return -EINVAL;
}
@@ -213,7 +213,7 @@ static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs,
continue;
}
if (c->ltab_sz > c->leb_size) {
- ubifs_err("LPT ltab too big");
+ ubifs_err(c, "LPT ltab too big");
return -EINVAL;
}
*main_lebs = c->main_lebs;
@@ -911,7 +911,7 @@ static void replace_cats(struct ubifs_info *c, struct ubifs_pnode *old_pnode,
*
* This function returns %0 on success and a negative error code on failure.
*/
-static int check_lpt_crc(void *buf, int len)
+static int check_lpt_crc(const struct ubifs_info *c, void *buf, int len)
{
int pos = 0;
uint8_t *addr = buf;
@@ -921,8 +921,8 @@ static int check_lpt_crc(void *buf, int len)
calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
len - UBIFS_LPT_CRC_BYTES);
if (crc != calc_crc) {
- ubifs_err("invalid crc in LPT node: crc %hx calc %hx", crc,
- calc_crc);
+ ubifs_err(c, "invalid crc in LPT node: crc %hx calc %hx",
+ crc, calc_crc);
dump_stack();
return -EINVAL;
}
@@ -938,14 +938,15 @@ static int check_lpt_crc(void *buf, int len)
*
* This function returns %0 on success and a negative error code on failure.
*/
-static int check_lpt_type(uint8_t **addr, int *pos, int type)
+static int check_lpt_type(const struct ubifs_info *c, uint8_t **addr,
+ int *pos, int type)
{
int node_type;
node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS);
if (node_type != type) {
- ubifs_err("invalid type (%d) in LPT node type %d", node_type,
- type);
+ ubifs_err(c, "invalid type (%d) in LPT node type %d",
+ node_type, type);
dump_stack();
return -EINVAL;
}
@@ -966,7 +967,7 @@ static int unpack_pnode(const struct ubifs_info *c, void *buf,
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0, err;
- err = check_lpt_type(&addr, &pos, UBIFS_LPT_PNODE);
+ err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_PNODE);
if (err)
return err;
if (c->big_lpt)
@@ -985,7 +986,7 @@ static int unpack_pnode(const struct ubifs_info *c, void *buf,
lprops->flags = 0;
lprops->flags |= ubifs_categorize_lprops(c, lprops);
}
- err = check_lpt_crc(buf, c->pnode_sz);
+ err = check_lpt_crc(c, buf, c->pnode_sz);
return err;
}
@@ -1003,7 +1004,7 @@ int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0, err;
- err = check_lpt_type(&addr, &pos, UBIFS_LPT_NNODE);
+ err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_NNODE);
if (err)
return err;
if (c->big_lpt)
@@ -1019,7 +1020,7 @@ int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
nnode->nbranch[i].offs = ubifs_unpack_bits(&addr, &pos,
c->lpt_offs_bits);
}
- err = check_lpt_crc(buf, c->nnode_sz);
+ err = check_lpt_crc(c, buf, c->nnode_sz);
return err;
}
@@ -1035,7 +1036,7 @@ static int unpack_ltab(const struct ubifs_info *c, void *buf)
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0, err;
- err = check_lpt_type(&addr, &pos, UBIFS_LPT_LTAB);
+ err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LTAB);
if (err)
return err;
for (i = 0; i < c->lpt_lebs; i++) {
@@ -1051,7 +1052,7 @@ static int unpack_ltab(const struct ubifs_info *c, void *buf)
c->ltab[i].tgc = 0;
c->ltab[i].cmt = 0;
}
- err = check_lpt_crc(buf, c->ltab_sz);
+ err = check_lpt_crc(c, buf, c->ltab_sz);
return err;
}
@@ -1067,7 +1068,7 @@ static int unpack_lsave(const struct ubifs_info *c, void *buf)
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0, err;
- err = check_lpt_type(&addr, &pos, UBIFS_LPT_LSAVE);
+ err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LSAVE);
if (err)
return err;
for (i = 0; i < c->lsave_cnt; i++) {
@@ -1077,7 +1078,7 @@ static int unpack_lsave(const struct ubifs_info *c, void *buf)
return -EINVAL;
c->lsave[i] = lnum;
}
- err = check_lpt_crc(buf, c->lsave_sz);
+ err = check_lpt_crc(c, buf, c->lsave_sz);
return err;
}
@@ -1243,7 +1244,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
return 0;
out:
- ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs);
+ ubifs_err(c, "error %d reading nnode at %d:%d", err, lnum, offs);
dump_stack();
kfree(nnode);
return err;
@@ -1308,10 +1309,10 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
return 0;
out:
- ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs);
+ ubifs_err(c, "error %d reading pnode at %d:%d", err, lnum, offs);
ubifs_dump_pnode(c, pnode, parent, iip);
dump_stack();
- ubifs_err("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
+ ubifs_err(c, "calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
kfree(pnode);
return err;
}
@@ -2095,7 +2096,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
int i;
if (pnode->num != col) {
- ubifs_err("pnode num %d expected %d parent num %d iip %d",
+ ubifs_err(c, "pnode num %d expected %d parent num %d iip %d",
pnode->num, col, pnode->parent->num, pnode->iip);
return -EINVAL;
}
@@ -2110,13 +2111,13 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
if (lnum >= c->leb_cnt)
continue;
if (lprops->lnum != lnum) {
- ubifs_err("bad LEB number %d expected %d",
+ ubifs_err(c, "bad LEB number %d expected %d",
lprops->lnum, lnum);
return -EINVAL;
}
if (lprops->flags & LPROPS_TAKEN) {
if (cat != LPROPS_UNCAT) {
- ubifs_err("LEB %d taken but not uncat %d",
+ ubifs_err(c, "LEB %d taken but not uncat %d",
lprops->lnum, cat);
return -EINVAL;
}
@@ -2129,7 +2130,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
case LPROPS_FRDI_IDX:
break;
default:
- ubifs_err("LEB %d index but cat %d",
+ ubifs_err(c, "LEB %d index but cat %d",
lprops->lnum, cat);
return -EINVAL;
}
@@ -2142,7 +2143,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
case LPROPS_FREEABLE:
break;
default:
- ubifs_err("LEB %d not index but cat %d",
+ ubifs_err(c, "LEB %d not index but cat %d",
lprops->lnum, cat);
return -EINVAL;
}
@@ -2183,14 +2184,14 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
break;
}
if (!found) {
- ubifs_err("LEB %d cat %d not found in cat heap/list",
+ ubifs_err(c, "LEB %d cat %d not found in cat heap/list",
lprops->lnum, cat);
return -EINVAL;
}
switch (cat) {
case LPROPS_EMPTY:
if (lprops->free != c->leb_size) {
- ubifs_err("LEB %d cat %d free %d dirty %d",
+ ubifs_err(c, "LEB %d cat %d free %d dirty %d",
lprops->lnum, cat, lprops->free,
lprops->dirty);
return -EINVAL;
@@ -2199,7 +2200,7 @@ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
case LPROPS_FREEABLE:
case LPROPS_FRDI_IDX:
if (lprops->free + lprops->dirty != c->leb_size) {
- ubifs_err("LEB %d cat %d free %d dirty %d",
+ ubifs_err(c, "LEB %d cat %d free %d dirty %d",
lprops->lnum, cat, lprops->free,
lprops->dirty);
return -EINVAL;
@@ -2236,7 +2237,7 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
/* cnode is a nnode */
num = calc_nnode_num(row, col);
if (cnode->num != num) {
- ubifs_err("nnode num %d expected %d parent num %d iip %d",
+ ubifs_err(c, "nnode num %d expected %d parent num %d iip %d",
cnode->num, num,
(nnode ? nnode->num : 0), cnode->iip);
return -EINVAL;
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index d9c02928e992..ce89bdc3eb02 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -319,7 +319,7 @@ static int layout_cnodes(struct ubifs_info *c)
return 0;
no_space:
- ubifs_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
+ ubifs_err(c, "LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
lnum, offs, len, done_ltab, done_lsave);
ubifs_dump_lpt_info(c);
ubifs_dump_lpt_lebs(c);
@@ -543,7 +543,7 @@ static int write_cnodes(struct ubifs_info *c)
return 0;
no_space:
- ubifs_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
+ ubifs_err(c, "LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
lnum, offs, len, done_ltab, done_lsave);
ubifs_dump_lpt_info(c);
ubifs_dump_lpt_lebs(c);
@@ -1638,7 +1638,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
- ubifs_err("cannot allocate memory for ltab checking");
+ ubifs_err(c, "cannot allocate memory for ltab checking");
return 0;
}
@@ -1660,18 +1660,18 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
continue;
}
if (!dbg_is_all_ff(p, len)) {
- ubifs_err("invalid empty space in LEB %d at %d",
+ ubifs_err(c, "invalid empty space in LEB %d at %d",
lnum, c->leb_size - len);
err = -EINVAL;
}
i = lnum - c->lpt_first;
if (len != c->ltab[i].free) {
- ubifs_err("invalid free space in LEB %d (free %d, expected %d)",
+ ubifs_err(c, "invalid free space in LEB %d (free %d, expected %d)",
lnum, len, c->ltab[i].free);
err = -EINVAL;
}
if (dirty != c->ltab[i].dirty) {
- ubifs_err("invalid dirty space in LEB %d (dirty %d, expected %d)",
+ ubifs_err(c, "invalid dirty space in LEB %d (dirty %d, expected %d)",
lnum, dirty, c->ltab[i].dirty);
err = -EINVAL;
}
@@ -1725,7 +1725,7 @@ int dbg_check_ltab(struct ubifs_info *c)
for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) {
err = dbg_check_ltab_lnum(c, lnum);
if (err) {
- ubifs_err("failed at LEB %d", lnum);
+ ubifs_err(c, "failed at LEB %d", lnum);
return err;
}
}
@@ -1757,7 +1757,7 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c)
free += c->leb_size;
}
if (free < c->lpt_sz) {
- ubifs_err("LPT space error: free %lld lpt_sz %lld",
+ ubifs_err(c, "LPT space error: free %lld lpt_sz %lld",
free, c->lpt_sz);
ubifs_dump_lpt_info(c);
ubifs_dump_lpt_lebs(c);
@@ -1797,12 +1797,12 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
d->chk_lpt_lebs = 0;
d->chk_lpt_wastage = 0;
if (c->dirty_pn_cnt > c->pnode_cnt) {
- ubifs_err("dirty pnodes %d exceed max %d",
+ ubifs_err(c, "dirty pnodes %d exceed max %d",
c->dirty_pn_cnt, c->pnode_cnt);
err = -EINVAL;
}
if (c->dirty_nn_cnt > c->nnode_cnt) {
- ubifs_err("dirty nnodes %d exceed max %d",
+ ubifs_err(c, "dirty nnodes %d exceed max %d",
c->dirty_nn_cnt, c->nnode_cnt);
err = -EINVAL;
}
@@ -1820,22 +1820,22 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
chk_lpt_sz *= d->chk_lpt_lebs;
chk_lpt_sz += len - c->nhead_offs;
if (d->chk_lpt_sz != chk_lpt_sz) {
- ubifs_err("LPT wrote %lld but space used was %lld",
+ ubifs_err(c, "LPT wrote %lld but space used was %lld",
d->chk_lpt_sz, chk_lpt_sz);
err = -EINVAL;
}
if (d->chk_lpt_sz > c->lpt_sz) {
- ubifs_err("LPT wrote %lld but lpt_sz is %lld",
+ ubifs_err(c, "LPT wrote %lld but lpt_sz is %lld",
d->chk_lpt_sz, c->lpt_sz);
err = -EINVAL;
}
if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) {
- ubifs_err("LPT layout size %lld but wrote %lld",
+ ubifs_err(c, "LPT layout size %lld but wrote %lld",
d->chk_lpt_sz, d->chk_lpt_sz2);
err = -EINVAL;
}
if (d->chk_lpt_sz2 && d->new_nhead_offs != len) {
- ubifs_err("LPT new nhead offs: expected %d was %d",
+ ubifs_err(c, "LPT new nhead offs: expected %d was %d",
d->new_nhead_offs, len);
err = -EINVAL;
}
@@ -1845,7 +1845,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
if (c->big_lpt)
lpt_sz += c->lsave_sz;
if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) {
- ubifs_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld",
+ ubifs_err(c, "LPT chk_lpt_sz %lld + waste %lld exceeds %lld",
d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz);
err = -EINVAL;
}
@@ -1887,7 +1887,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
- ubifs_err("cannot allocate memory to dump LPT");
+ ubifs_err(c, "cannot allocate memory to dump LPT");
return;
}
@@ -1962,7 +1962,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
pr_err("LEB %d:%d, lsave len\n", lnum, offs);
break;
default:
- ubifs_err("LPT node type %d not recognized", node_type);
+ ubifs_err(c, "LPT node type %d not recognized", node_type);
goto out;
}
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index 1a4bb9e8b3b8..c6a5e39e2ba5 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -82,7 +82,7 @@ out:
return -EUCLEAN;
out_dump:
- ubifs_err("unexpected node type %d master LEB %d:%d",
+ ubifs_err(c, "unexpected node type %d master LEB %d:%d",
snod->type, lnum, snod->offs);
ubifs_scan_destroy(sleb);
return -EINVAL;
@@ -240,7 +240,7 @@ static int validate_master(const struct ubifs_info *c)
return 0;
out:
- ubifs_err("bad master node at offset %d error %d", c->mst_offs, err);
+ ubifs_err(c, "bad master node at offset %d error %d", c->mst_offs, err);
ubifs_dump_node(c, c->mst_node);
return -EINVAL;
}
@@ -316,7 +316,7 @@ int ubifs_read_master(struct ubifs_info *c)
if (c->leb_cnt < old_leb_cnt ||
c->leb_cnt < UBIFS_MIN_LEB_CNT) {
- ubifs_err("bad leb_cnt on master node");
+ ubifs_err(c, "bad leb_cnt on master node");
ubifs_dump_node(c, c->mst_node);
return -EINVAL;
}
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 4409f486ecef..caf2d123e9ee 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -88,7 +88,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
else if (inum > o->inum)
p = &(*p)->rb_right;
else {
- ubifs_err("orphaned twice");
+ ubifs_err(c, "orphaned twice");
spin_unlock(&c->orphan_lock);
kfree(orphan);
return 0;
@@ -155,7 +155,7 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
}
}
spin_unlock(&c->orphan_lock);
- ubifs_err("missing orphan ino %lu", (unsigned long)inum);
+ ubifs_err(c, "missing orphan ino %lu", (unsigned long)inum);
dump_stack();
}
@@ -287,7 +287,7 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
* We limit the number of orphans so that this should
* never happen.
*/
- ubifs_err("out of space in orphan area");
+ ubifs_err(c, "out of space in orphan area");
return -EINVAL;
}
}
@@ -397,7 +397,7 @@ static int consolidate(struct ubifs_info *c)
* We limit the number of orphans so that this should
* never happen.
*/
- ubifs_err("out of space in orphan area");
+ ubifs_err(c, "out of space in orphan area");
err = -EINVAL;
}
spin_unlock(&c->orphan_lock);
@@ -569,7 +569,7 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
list_for_each_entry(snod, &sleb->nodes, list) {
if (snod->type != UBIFS_ORPH_NODE) {
- ubifs_err("invalid node type %d in orphan area at %d:%d",
+ ubifs_err(c, "invalid node type %d in orphan area at %d:%d",
snod->type, sleb->lnum, snod->offs);
ubifs_dump_node(c, snod->node);
return -EINVAL;
@@ -596,7 +596,7 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
* number. That makes this orphan node, out of date.
*/
if (!first) {
- ubifs_err("out of order commit number %llu in orphan node at %d:%d",
+ ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d",
cmt_no, sleb->lnum, snod->offs);
ubifs_dump_node(c, snod->node);
return -EINVAL;
@@ -831,20 +831,20 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr,
if (inum != ci->last_ino) {
/* Lowest node type is the inode node, so it comes first */
if (key_type(c, &zbr->key) != UBIFS_INO_KEY)
- ubifs_err("found orphan node ino %lu, type %d",
+ ubifs_err(c, "found orphan node ino %lu, type %d",
(unsigned long)inum, key_type(c, &zbr->key));
ci->last_ino = inum;
ci->tot_inos += 1;
err = ubifs_tnc_read_node(c, zbr, ci->node);
if (err) {
- ubifs_err("node read failed, error %d", err);
+ ubifs_err(c, "node read failed, error %d", err);
return err;
}
if (ci->node->nlink == 0)
/* Must be recorded as an orphan */
if (!dbg_find_check_orphan(&ci->root, inum) &&
!dbg_find_orphan(c, inum)) {
- ubifs_err("missing orphan, ino %lu",
+ ubifs_err(c, "missing orphan, ino %lu",
(unsigned long)inum);
ci->missing += 1;
}
@@ -887,7 +887,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci)
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
- ubifs_err("cannot allocate memory to check orphans");
+ ubifs_err(c, "cannot allocate memory to check orphans");
return 0;
}
@@ -925,7 +925,7 @@ static int dbg_check_orphans(struct ubifs_info *c)
ci.root = RB_ROOT;
ci.node = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
if (!ci.node) {
- ubifs_err("out of memory");
+ ubifs_err(c, "out of memory");
return -ENOMEM;
}
@@ -935,12 +935,12 @@ static int dbg_check_orphans(struct ubifs_info *c)
err = dbg_walk_index(c, &dbg_orphan_check, NULL, &ci);
if (err) {
- ubifs_err("cannot scan TNC, error %d", err);
+ ubifs_err(c, "cannot scan TNC, error %d", err);
goto out;
}
if (ci.missing) {
- ubifs_err("%lu missing orphan(s)", ci.missing);
+ ubifs_err(c, "%lu missing orphan(s)", ci.missing);
err = -EINVAL;
goto out;
}
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index c640938f62f0..695fc71d5244 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -305,7 +305,7 @@ int ubifs_recover_master_node(struct ubifs_info *c)
mst = mst2;
}
- ubifs_msg("recovered master node from LEB %d",
+ ubifs_msg(c, "recovered master node from LEB %d",
(mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1));
memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
@@ -360,13 +360,13 @@ int ubifs_recover_master_node(struct ubifs_info *c)
out_err:
err = -EINVAL;
out_free:
- ubifs_err("failed to recover master node");
+ ubifs_err(c, "failed to recover master node");
if (mst1) {
- ubifs_err("dumping first master node");
+ ubifs_err(c, "dumping first master node");
ubifs_dump_node(c, mst1);
}
if (mst2) {
- ubifs_err("dumping second master node");
+ ubifs_err(c, "dumping second master node");
ubifs_dump_node(c, mst2);
}
vfree(buf2);
@@ -682,7 +682,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
ret, lnum, offs);
break;
} else {
- ubifs_err("unexpected return value %d", ret);
+ ubifs_err(c, "unexpected return value %d", ret);
err = -EINVAL;
goto error;
}
@@ -702,7 +702,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
* See header comment for this file for more
* explanations about the reasons we have this check.
*/
- ubifs_err("corrupt empty space LEB %d:%d, corruption starts at %d",
+ ubifs_err(c, "corrupt empty space LEB %d:%d, corruption starts at %d",
lnum, offs, corruption);
/* Make sure we dump interesting non-0xFF data */
offs += corruption;
@@ -788,13 +788,13 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
corrupted_rescan:
/* Re-scan the corrupted data with verbose messages */
- ubifs_err("corruption %d", ret);
+ ubifs_err(c, "corruption %d", ret);
ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
corrupted:
ubifs_scanned_corruption(c, lnum, offs, buf);
err = -EUCLEAN;
error:
- ubifs_err("LEB %d scanning failed", lnum);
+ ubifs_err(c, "LEB %d scanning failed", lnum);
ubifs_scan_destroy(sleb);
return ERR_PTR(err);
}
@@ -826,15 +826,15 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
goto out_free;
ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0);
if (ret != SCANNED_A_NODE) {
- ubifs_err("Not a valid node");
+ ubifs_err(c, "Not a valid node");
goto out_err;
}
if (cs_node->ch.node_type != UBIFS_CS_NODE) {
- ubifs_err("Node a CS node, type is %d", cs_node->ch.node_type);
+ ubifs_err(c, "Node a CS node, type is %d", cs_node->ch.node_type);
goto out_err;
}
if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) {
- ubifs_err("CS node cmt_no %llu != current cmt_no %llu",
+ ubifs_err(c, "CS node cmt_no %llu != current cmt_no %llu",
(unsigned long long)le64_to_cpu(cs_node->cmt_no),
c->cmt_no);
goto out_err;
@@ -847,7 +847,7 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
out_err:
err = -EINVAL;
out_free:
- ubifs_err("failed to get CS sqnum");
+ ubifs_err(c, "failed to get CS sqnum");
kfree(cs_node);
return err;
}
@@ -899,7 +899,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
}
}
if (snod->sqnum > cs_sqnum) {
- ubifs_err("unrecoverable log corruption in LEB %d",
+ ubifs_err(c, "unrecoverable log corruption in LEB %d",
lnum);
ubifs_scan_destroy(sleb);
return ERR_PTR(-EUCLEAN);
@@ -975,11 +975,8 @@ int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf)
return err;
dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs);
- err = recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf);
- if (err)
- return err;
- return 0;
+ return recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf);
}
/**
@@ -1004,10 +1001,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c,
if (len == 0) {
/* Nothing to read, just unmap it */
- err = ubifs_leb_unmap(c, lnum);
- if (err)
- return err;
- return 0;
+ return ubifs_leb_unmap(c, lnum);
}
err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
@@ -1043,7 +1037,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c,
}
if (ret == SCANNED_EMPTY_SPACE) {
- ubifs_err("unexpected empty space at %d:%d",
+ ubifs_err(c, "unexpected empty space at %d:%d",
lnum, offs);
return -EUCLEAN;
}
@@ -1137,7 +1131,7 @@ static int grab_empty_leb(struct ubifs_info *c)
*/
lnum = ubifs_find_free_leb_for_idx(c);
if (lnum < 0) {
- ubifs_err("could not find an empty LEB");
+ ubifs_err(c, "could not find an empty LEB");
ubifs_dump_lprops(c);
ubifs_dump_budg(c, &c->bi);
return lnum;
@@ -1217,7 +1211,7 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
}
mutex_unlock(&wbuf->io_mutex);
if (err < 0) {
- ubifs_err("GC failed, error %d", err);
+ ubifs_err(c, "GC failed, error %d", err);
if (err == -EAGAIN)
err = -EINVAL;
return err;
@@ -1464,7 +1458,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
return 0;
out:
- ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d",
+ ubifs_warn(c, "inode %lu failed to fix size %lld -> %lld error %d",
(unsigned long)e->inum, e->i_size, e->d_size, err);
return err;
}
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 9b40a1c5e160..3ca4540130b5 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -458,13 +458,13 @@ int ubifs_validate_entry(struct ubifs_info *c,
nlen > UBIFS_MAX_NLEN || dent->name[nlen] != 0 ||
strnlen(dent->name, nlen) != nlen ||
le64_to_cpu(dent->inum) > MAX_INUM) {
- ubifs_err("bad %s node", key_type == UBIFS_DENT_KEY ?
+ ubifs_err(c, "bad %s node", key_type == UBIFS_DENT_KEY ?
"directory entry" : "extended attribute entry");
return -EINVAL;
}
if (key_type != UBIFS_DENT_KEY && key_type != UBIFS_XENT_KEY) {
- ubifs_err("bad key type %d", key_type);
+ ubifs_err(c, "bad key type %d", key_type);
return -EINVAL;
}
@@ -589,7 +589,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
cond_resched();
if (snod->sqnum >= SQNUM_WATERMARK) {
- ubifs_err("file system's life ended");
+ ubifs_err(c, "file system's life ended");
goto out_dump;
}
@@ -647,7 +647,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
if (old_size < 0 || old_size > c->max_inode_sz ||
new_size < 0 || new_size > c->max_inode_sz ||
old_size <= new_size) {
- ubifs_err("bad truncation node");
+ ubifs_err(c, "bad truncation node");
goto out_dump;
}
@@ -662,7 +662,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
break;
}
default:
- ubifs_err("unexpected node type %d in bud LEB %d:%d",
+ ubifs_err(c, "unexpected node type %d in bud LEB %d:%d",
snod->type, lnum, snod->offs);
err = -EINVAL;
goto out_dump;
@@ -685,7 +685,7 @@ out:
return err;
out_dump:
- ubifs_err("bad node is at LEB %d:%d", lnum, snod->offs);
+ ubifs_err(c, "bad node is at LEB %d:%d", lnum, snod->offs);
ubifs_dump_node(c, snod->node);
ubifs_scan_destroy(sleb);
return -EINVAL;
@@ -805,7 +805,7 @@ static int validate_ref(struct ubifs_info *c, const struct ubifs_ref_node *ref)
if (bud) {
if (bud->jhead == jhead && bud->start <= offs)
return 1;
- ubifs_err("bud at LEB %d:%d was already referred", lnum, offs);
+ ubifs_err(c, "bud at LEB %d:%d was already referred", lnum, offs);
return -EINVAL;
}
@@ -861,12 +861,12 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
* numbers.
*/
if (snod->type != UBIFS_CS_NODE) {
- ubifs_err("first log node at LEB %d:%d is not CS node",
+ ubifs_err(c, "first log node at LEB %d:%d is not CS node",
lnum, offs);
goto out_dump;
}
if (le64_to_cpu(node->cmt_no) != c->cmt_no) {
- ubifs_err("first CS node at LEB %d:%d has wrong commit number %llu expected %llu",
+ ubifs_err(c, "first CS node at LEB %d:%d has wrong commit number %llu expected %llu",
lnum, offs,
(unsigned long long)le64_to_cpu(node->cmt_no),
c->cmt_no);
@@ -891,7 +891,7 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
/* Make sure the first node sits at offset zero of the LEB */
if (snod->offs != 0) {
- ubifs_err("first node is not at zero offset");
+ ubifs_err(c, "first node is not at zero offset");
goto out_dump;
}
@@ -899,12 +899,12 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
cond_resched();
if (snod->sqnum >= SQNUM_WATERMARK) {
- ubifs_err("file system's life ended");
+ ubifs_err(c, "file system's life ended");
goto out_dump;
}
if (snod->sqnum < c->cs_sqnum) {
- ubifs_err("bad sqnum %llu, commit sqnum %llu",
+ ubifs_err(c, "bad sqnum %llu, commit sqnum %llu",
snod->sqnum, c->cs_sqnum);
goto out_dump;
}
@@ -934,12 +934,12 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
case UBIFS_CS_NODE:
/* Make sure it sits at the beginning of LEB */
if (snod->offs != 0) {
- ubifs_err("unexpected node in log");
+ ubifs_err(c, "unexpected node in log");
goto out_dump;
}
break;
default:
- ubifs_err("unexpected node in log");
+ ubifs_err(c, "unexpected node in log");
goto out_dump;
}
}
@@ -955,7 +955,7 @@ out:
return err;
out_dump:
- ubifs_err("log error detected while replaying the log at LEB %d:%d",
+ ubifs_err(c, "log error detected while replaying the log at LEB %d:%d",
lnum, offs + snod->offs);
ubifs_dump_node(c, snod->node);
ubifs_scan_destroy(sleb);
@@ -1017,7 +1017,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
return free; /* Error code */
if (c->ihead_offs != c->leb_size - free) {
- ubifs_err("bad index head LEB %d:%d", c->ihead_lnum,
+ ubifs_err(c, "bad index head LEB %d:%d", c->ihead_lnum,
c->ihead_offs);
return -EINVAL;
}
@@ -1040,7 +1040,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
* someting went wrong and we cannot proceed mounting
* the file-system.
*/
- ubifs_err("no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted",
+ ubifs_err(c, "no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted",
lnum, 0);
err = -EINVAL;
}
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 79c6dbbc0e04..f4fbc7b6b794 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -335,7 +335,7 @@ static int create_default_filesystem(struct ubifs_info *c)
if (err)
return err;
- ubifs_msg("default file-system created");
+ ubifs_msg(c, "default file-system created");
return 0;
}
@@ -365,13 +365,13 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
}
if (le32_to_cpu(sup->min_io_size) != c->min_io_size) {
- ubifs_err("min. I/O unit mismatch: %d in superblock, %d real",
+ ubifs_err(c, "min. I/O unit mismatch: %d in superblock, %d real",
le32_to_cpu(sup->min_io_size), c->min_io_size);
goto failed;
}
if (le32_to_cpu(sup->leb_size) != c->leb_size) {
- ubifs_err("LEB size mismatch: %d in superblock, %d real",
+ ubifs_err(c, "LEB size mismatch: %d in superblock, %d real",
le32_to_cpu(sup->leb_size), c->leb_size);
goto failed;
}
@@ -393,33 +393,33 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
min_leb_cnt += c->lpt_lebs + c->orph_lebs + c->jhead_cnt + 6;
if (c->leb_cnt < min_leb_cnt || c->leb_cnt > c->vi.size) {
- ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, %d minimum required",
+ ubifs_err(c, "bad LEB count: %d in superblock, %d on UBI volume, %d minimum required",
c->leb_cnt, c->vi.size, min_leb_cnt);
goto failed;
}
if (c->max_leb_cnt < c->leb_cnt) {
- ubifs_err("max. LEB count %d less than LEB count %d",
+ ubifs_err(c, "max. LEB count %d less than LEB count %d",
c->max_leb_cnt, c->leb_cnt);
goto failed;
}
if (c->main_lebs < UBIFS_MIN_MAIN_LEBS) {
- ubifs_err("too few main LEBs count %d, must be at least %d",
+ ubifs_err(c, "too few main LEBs count %d, must be at least %d",
c->main_lebs, UBIFS_MIN_MAIN_LEBS);
goto failed;
}
max_bytes = (long long)c->leb_size * UBIFS_MIN_BUD_LEBS;
if (c->max_bud_bytes < max_bytes) {
- ubifs_err("too small journal (%lld bytes), must be at least %lld bytes",
+ ubifs_err(c, "too small journal (%lld bytes), must be at least %lld bytes",
c->max_bud_bytes, max_bytes);
goto failed;
}
max_bytes = (long long)c->leb_size * c->main_lebs;
if (c->max_bud_bytes > max_bytes) {
- ubifs_err("too large journal size (%lld bytes), only %lld bytes available in the main area",
+ ubifs_err(c, "too large journal size (%lld bytes), only %lld bytes available in the main area",
c->max_bud_bytes, max_bytes);
goto failed;
}
@@ -468,7 +468,7 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
return 0;
failed:
- ubifs_err("bad superblock, error %d", err);
+ ubifs_err(c, "bad superblock, error %d", err);
ubifs_dump_node(c, sup);
return -EINVAL;
}
@@ -549,12 +549,12 @@ int ubifs_read_superblock(struct ubifs_info *c)
ubifs_assert(!c->ro_media || c->ro_mount);
if (!c->ro_mount ||
c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
- ubifs_err("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
+ ubifs_err(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
c->fmt_version, c->ro_compat_version,
UBIFS_FORMAT_VERSION,
UBIFS_RO_COMPAT_VERSION);
if (c->ro_compat_version <= UBIFS_RO_COMPAT_VERSION) {
- ubifs_msg("only R/O mounting is possible");
+ ubifs_msg(c, "only R/O mounting is possible");
err = -EROFS;
} else
err = -EINVAL;
@@ -570,7 +570,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
}
if (c->fmt_version < 3) {
- ubifs_err("on-flash format version %d is not supported",
+ ubifs_err(c, "on-flash format version %d is not supported",
c->fmt_version);
err = -EINVAL;
goto out;
@@ -595,7 +595,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
c->key_len = UBIFS_SK_LEN;
break;
default:
- ubifs_err("unsupported key format");
+ ubifs_err(c, "unsupported key format");
err = -EINVAL;
goto out;
}
@@ -785,7 +785,7 @@ int ubifs_fixup_free_space(struct ubifs_info *c)
ubifs_assert(c->space_fixup);
ubifs_assert(!c->ro_mount);
- ubifs_msg("start fixing up free space");
+ ubifs_msg(c, "start fixing up free space");
err = fixup_free_space(c);
if (err)
@@ -804,6 +804,6 @@ int ubifs_fixup_free_space(struct ubifs_info *c)
if (err)
return err;
- ubifs_msg("free space fixup complete");
+ ubifs_msg(c, "free space fixup complete");
return err;
}
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index 89adbc4d08ac..aab87340d3de 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -100,7 +100,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
if (pad_len < 0 ||
offs + node_len + pad_len > c->leb_size) {
if (!quiet) {
- ubifs_err("bad pad node at LEB %d:%d",
+ ubifs_err(c, "bad pad node at LEB %d:%d",
lnum, offs);
ubifs_dump_node(c, pad);
}
@@ -110,7 +110,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
/* Make the node pads to 8-byte boundary */
if ((node_len + pad_len) & 7) {
if (!quiet)
- ubifs_err("bad padding length %d - %d",
+ ubifs_err(c, "bad padding length %d - %d",
offs, offs + node_len + pad_len);
return SCANNED_A_BAD_PAD_NODE;
}
@@ -152,7 +152,7 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0);
if (err && err != -EBADMSG) {
- ubifs_err("cannot read %d bytes from LEB %d:%d, error %d",
+ ubifs_err(c, "cannot read %d bytes from LEB %d:%d, error %d",
c->leb_size - offs, lnum, offs, err);
kfree(sleb);
return ERR_PTR(err);
@@ -240,11 +240,11 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
{
int len;
- ubifs_err("corruption at LEB %d:%d", lnum, offs);
+ ubifs_err(c, "corruption at LEB %d:%d", lnum, offs);
len = c->leb_size - offs;
if (len > 8192)
len = 8192;
- ubifs_err("first %d bytes from LEB %d:%d", len, lnum, offs);
+ ubifs_err(c, "first %d bytes from LEB %d:%d", len, lnum, offs);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1);
}
@@ -299,16 +299,16 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
switch (ret) {
case SCANNED_GARBAGE:
- ubifs_err("garbage");
+ ubifs_err(c, "garbage");
goto corrupted;
case SCANNED_A_NODE:
break;
case SCANNED_A_CORRUPT_NODE:
case SCANNED_A_BAD_PAD_NODE:
- ubifs_err("bad node");
+ ubifs_err(c, "bad node");
goto corrupted;
default:
- ubifs_err("unknown");
+ ubifs_err(c, "unknown");
err = -EINVAL;
goto error;
}
@@ -325,7 +325,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
if (offs % c->min_io_size) {
if (!quiet)
- ubifs_err("empty space starts at non-aligned offset %d",
+ ubifs_err(c, "empty space starts at non-aligned offset %d",
offs);
goto corrupted;
}
@@ -338,7 +338,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
for (; len; offs++, buf++, len--)
if (*(uint8_t *)buf != 0xff) {
if (!quiet)
- ubifs_err("corrupt empty space at LEB %d:%d",
+ ubifs_err(c, "corrupt empty space at LEB %d:%d",
lnum, offs);
goto corrupted;
}
@@ -348,14 +348,14 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
corrupted:
if (!quiet) {
ubifs_scanned_corruption(c, lnum, offs, buf);
- ubifs_err("LEB %d scanning failed", lnum);
+ ubifs_err(c, "LEB %d scanning failed", lnum);
}
err = -EUCLEAN;
ubifs_scan_destroy(sleb);
return ERR_PTR(err);
error:
- ubifs_err("LEB %d scanning failed, error %d", lnum, err);
+ ubifs_err(c, "LEB %d scanning failed, error %d", lnum, err);
ubifs_scan_destroy(sleb);
return ERR_PTR(err);
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 93e946561c5c..75e6f04bb795 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -70,13 +70,13 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode)
const struct ubifs_inode *ui = ubifs_inode(inode);
if (inode->i_size > c->max_inode_sz) {
- ubifs_err("inode is too large (%lld)",
+ ubifs_err(c, "inode is too large (%lld)",
(long long)inode->i_size);
return 1;
}
if (ui->compr_type >= UBIFS_COMPR_TYPES_CNT) {
- ubifs_err("unknown compression type %d", ui->compr_type);
+ ubifs_err(c, "unknown compression type %d", ui->compr_type);
return 2;
}
@@ -90,7 +90,7 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode)
return 5;
if (!ubifs_compr_present(ui->compr_type)) {
- ubifs_warn("inode %lu uses '%s' compression, but it was not compiled in",
+ ubifs_warn(c, "inode %lu uses '%s' compression, but it was not compiled in",
inode->i_ino, ubifs_compr_name(ui->compr_type));
}
@@ -242,14 +242,14 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
return inode;
out_invalid:
- ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err);
+ ubifs_err(c, "inode %lu validation failed, error %d", inode->i_ino, err);
ubifs_dump_node(c, ino);
ubifs_dump_inode(c, inode);
err = -EINVAL;
out_ino:
kfree(ino);
out:
- ubifs_err("failed to read inode %lu, error %d", inode->i_ino, err);
+ ubifs_err(c, "failed to read inode %lu, error %d", inode->i_ino, err);
iget_failed(inode);
return ERR_PTR(err);
}
@@ -319,7 +319,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
if (inode->i_nlink) {
err = ubifs_jnl_write_inode(c, inode);
if (err)
- ubifs_err("can't write inode %lu, error %d",
+ ubifs_err(c, "can't write inode %lu, error %d",
inode->i_ino, err);
else
err = dbg_check_inode_size(c, inode, ui->ui_size);
@@ -363,7 +363,7 @@ static void ubifs_evict_inode(struct inode *inode)
* Worst case we have a lost orphan inode wasting space, so a
* simple error message is OK here.
*/
- ubifs_err("can't delete inode %lu, error %d",
+ ubifs_err(c, "can't delete inode %lu, error %d",
inode->i_ino, err);
out:
@@ -492,17 +492,17 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
static int init_constants_early(struct ubifs_info *c)
{
if (c->vi.corrupted) {
- ubifs_warn("UBI volume is corrupted - read-only mode");
+ ubifs_warn(c, "UBI volume is corrupted - read-only mode");
c->ro_media = 1;
}
if (c->di.ro_mode) {
- ubifs_msg("read-only UBI device");
+ ubifs_msg(c, "read-only UBI device");
c->ro_media = 1;
}
if (c->vi.vol_type == UBI_STATIC_VOLUME) {
- ubifs_msg("static UBI volume - read-only mode");
+ ubifs_msg(c, "static UBI volume - read-only mode");
c->ro_media = 1;
}
@@ -516,19 +516,19 @@ static int init_constants_early(struct ubifs_info *c)
c->max_write_shift = fls(c->max_write_size) - 1;
if (c->leb_size < UBIFS_MIN_LEB_SZ) {
- ubifs_err("too small LEBs (%d bytes), min. is %d bytes",
+ ubifs_err(c, "too small LEBs (%d bytes), min. is %d bytes",
c->leb_size, UBIFS_MIN_LEB_SZ);
return -EINVAL;
}
if (c->leb_cnt < UBIFS_MIN_LEB_CNT) {
- ubifs_err("too few LEBs (%d), min. is %d",
+ ubifs_err(c, "too few LEBs (%d), min. is %d",
c->leb_cnt, UBIFS_MIN_LEB_CNT);
return -EINVAL;
}
if (!is_power_of_2(c->min_io_size)) {
- ubifs_err("bad min. I/O size %d", c->min_io_size);
+ ubifs_err(c, "bad min. I/O size %d", c->min_io_size);
return -EINVAL;
}
@@ -539,7 +539,7 @@ static int init_constants_early(struct ubifs_info *c)
if (c->max_write_size < c->min_io_size ||
c->max_write_size % c->min_io_size ||
!is_power_of_2(c->max_write_size)) {
- ubifs_err("bad write buffer size %d for %d min. I/O unit",
+ ubifs_err(c, "bad write buffer size %d for %d min. I/O unit",
c->max_write_size, c->min_io_size);
return -EINVAL;
}
@@ -665,7 +665,7 @@ static int init_constants_sb(struct ubifs_info *c)
tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt;
tmp = ALIGN(tmp, c->min_io_size);
if (tmp > c->leb_size) {
- ubifs_err("too small LEB size %d, at least %d needed",
+ ubifs_err(c, "too small LEB size %d, at least %d needed",
c->leb_size, tmp);
return -EINVAL;
}
@@ -680,7 +680,7 @@ static int init_constants_sb(struct ubifs_info *c)
tmp /= c->leb_size;
tmp += 1;
if (c->log_lebs < tmp) {
- ubifs_err("too small log %d LEBs, required min. %d LEBs",
+ ubifs_err(c, "too small log %d LEBs, required min. %d LEBs",
c->log_lebs, tmp);
return -EINVAL;
}
@@ -772,7 +772,7 @@ static int take_gc_lnum(struct ubifs_info *c)
int err;
if (c->gc_lnum == -1) {
- ubifs_err("no LEB for GC");
+ ubifs_err(c, "no LEB for GC");
return -EINVAL;
}
@@ -857,7 +857,7 @@ static void free_orphans(struct ubifs_info *c)
orph = list_entry(c->orph_list.next, struct ubifs_orphan, list);
list_del(&orph->list);
kfree(orph);
- ubifs_err("orphan list not empty at unmount");
+ ubifs_err(c, "orphan list not empty at unmount");
}
vfree(c->orph_buf);
@@ -954,7 +954,8 @@ static const match_table_t tokens = {
*/
static int parse_standard_option(const char *option)
{
- ubifs_msg("parse %s", option);
+
+ pr_notice("UBIFS: parse %s\n", option);
if (!strcmp(option, "sync"))
return MS_SYNCHRONOUS;
return 0;
@@ -1026,7 +1027,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
else if (!strcmp(name, "zlib"))
c->mount_opts.compr_type = UBIFS_COMPR_ZLIB;
else {
- ubifs_err("unknown compressor \"%s\"", name);
+ ubifs_err(c, "unknown compressor \"%s\"", name); //FIXME: is c ready?
kfree(name);
return -EINVAL;
}
@@ -1042,7 +1043,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
flag = parse_standard_option(p);
if (!flag) {
- ubifs_err("unrecognized mount option \"%s\" or missing value",
+ ubifs_err(c, "unrecognized mount option \"%s\" or missing value",
p);
return -EINVAL;
}
@@ -1105,7 +1106,7 @@ again:
}
/* Just disable bulk-read */
- ubifs_warn("cannot allocate %d bytes of memory for bulk-read, disabling it",
+ ubifs_warn(c, "cannot allocate %d bytes of memory for bulk-read, disabling it",
c->max_bu_buf_len);
c->mount_opts.bulk_read = 1;
c->bulk_read = 0;
@@ -1124,7 +1125,7 @@ static int check_free_space(struct ubifs_info *c)
{
ubifs_assert(c->dark_wm > 0);
if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) {
- ubifs_err("insufficient free space to mount in R/W mode");
+ ubifs_err(c, "insufficient free space to mount in R/W mode");
ubifs_dump_budg(c, &c->bi);
ubifs_dump_lprops(c);
return -ENOSPC;
@@ -1166,14 +1167,14 @@ static int mount_ubifs(struct ubifs_info *c)
* This UBI volume is empty, and read-only, or the file system
* is mounted read-only - we cannot format it.
*/
- ubifs_err("can't format empty UBI volume: read-only %s",
+ ubifs_err(c, "can't format empty UBI volume: read-only %s",
c->ro_media ? "UBI volume" : "mount");
err = -EROFS;
goto out_free;
}
if (c->ro_media && !c->ro_mount) {
- ubifs_err("cannot mount read-write - read-only media");
+ ubifs_err(c, "cannot mount read-write - read-only media");
err = -EROFS;
goto out_free;
}
@@ -1221,7 +1222,7 @@ static int mount_ubifs(struct ubifs_info *c)
* or overridden by mount options is actually compiled in.
*/
if (!ubifs_compr_present(c->default_compr)) {
- ubifs_err("'compressor \"%s\" is not compiled in",
+ ubifs_err(c, "'compressor \"%s\" is not compiled in",
ubifs_compr_name(c->default_compr));
err = -ENOTSUPP;
goto out_free;
@@ -1250,7 +1251,7 @@ static int mount_ubifs(struct ubifs_info *c)
if (IS_ERR(c->bgt)) {
err = PTR_ERR(c->bgt);
c->bgt = NULL;
- ubifs_err("cannot spawn \"%s\", error %d",
+ ubifs_err(c, "cannot spawn \"%s\", error %d",
c->bgt_name, err);
goto out_wbufs;
}
@@ -1264,7 +1265,7 @@ static int mount_ubifs(struct ubifs_info *c)
init_constants_master(c);
if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
- ubifs_msg("recovery needed");
+ ubifs_msg(c, "recovery needed");
c->need_recovery = 1;
}
@@ -1284,7 +1285,7 @@ static int mount_ubifs(struct ubifs_info *c)
goto out_lpt;
}
- if (!c->ro_mount) {
+ if (!c->ro_mount && !c->need_recovery) {
/*
* Set the "dirty" flag so that if we reboot uncleanly we
* will notice this immediately on the next mount.
@@ -1373,10 +1374,10 @@ static int mount_ubifs(struct ubifs_info *c)
if (c->need_recovery) {
if (c->ro_mount)
- ubifs_msg("recovery deferred");
+ ubifs_msg(c, "recovery deferred");
else {
c->need_recovery = 0;
- ubifs_msg("recovery completed");
+ ubifs_msg(c, "recovery completed");
/*
* GC LEB has to be empty and taken at this point. But
* the journal head LEBs may also be accounted as
@@ -1397,20 +1398,20 @@ static int mount_ubifs(struct ubifs_info *c)
c->mounting = 0;
- ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"%s",
+ ubifs_msg(c, "UBIFS: mounted UBI device %d, volume %d, name \"%s\"%s",
c->vi.ubi_num, c->vi.vol_id, c->vi.name,
c->ro_mount ? ", R/O mode" : "");
x = (long long)c->main_lebs * c->leb_size;
y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
- ubifs_msg("LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes",
+ ubifs_msg(c, "LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes",
c->leb_size, c->leb_size >> 10, c->min_io_size,
c->max_write_size);
- ubifs_msg("FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)",
+ ubifs_msg(c, "FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)",
x, x >> 20, c->main_lebs,
y, y >> 20, c->log_lebs + c->max_bud_cnt);
- ubifs_msg("reserved for root: %llu bytes (%llu KiB)",
+ ubifs_msg(c, "reserved for root: %llu bytes (%llu KiB)",
c->report_rp_size, c->report_rp_size >> 10);
- ubifs_msg("media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s",
+ ubifs_msg(c, "media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s",
c->fmt_version, c->ro_compat_version,
UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid,
c->big_lpt ? ", big LPT model" : ", small LPT model");
@@ -1543,8 +1544,8 @@ static int ubifs_remount_rw(struct ubifs_info *c)
int err, lnum;
if (c->rw_incompat) {
- ubifs_err("the file-system is not R/W-compatible");
- ubifs_msg("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
+ ubifs_err(c, "the file-system is not R/W-compatible");
+ ubifs_msg(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
c->fmt_version, c->ro_compat_version,
UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
return -EROFS;
@@ -1581,7 +1582,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
}
if (c->need_recovery) {
- ubifs_msg("completing deferred recovery");
+ ubifs_msg(c, "completing deferred recovery");
err = ubifs_write_rcvrd_mst_node(c);
if (err)
goto out;
@@ -1630,7 +1631,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
if (IS_ERR(c->bgt)) {
err = PTR_ERR(c->bgt);
c->bgt = NULL;
- ubifs_err("cannot spawn \"%s\", error %d",
+ ubifs_err(c, "cannot spawn \"%s\", error %d",
c->bgt_name, err);
goto out;
}
@@ -1664,7 +1665,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
if (c->need_recovery) {
c->need_recovery = 0;
- ubifs_msg("deferred recovery completed");
+ ubifs_msg(c, "deferred recovery completed");
} else {
/*
* Do not run the debugging space check if the were doing
@@ -1752,8 +1753,7 @@ static void ubifs_put_super(struct super_block *sb)
int i;
struct ubifs_info *c = sb->s_fs_info;
- ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num,
- c->vi.vol_id);
+ ubifs_msg(c, "un-mount UBI device %d", c->vi.ubi_num);
/*
* The following asserts are only valid if there has not been a failure
@@ -1809,7 +1809,7 @@ static void ubifs_put_super(struct super_block *sb)
* next mount, so we just print a message and
* continue to unmount normally.
*/
- ubifs_err("failed to write master node, error %d",
+ ubifs_err(c, "failed to write master node, error %d",
err);
} else {
for (i = 0; i < c->jhead_cnt; i++)
@@ -1834,17 +1834,17 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
err = ubifs_parse_options(c, data, 1);
if (err) {
- ubifs_err("invalid or unknown remount parameter");
+ ubifs_err(c, "invalid or unknown remount parameter");
return err;
}
if (c->ro_mount && !(*flags & MS_RDONLY)) {
if (c->ro_error) {
- ubifs_msg("cannot re-mount R/W due to prior errors");
+ ubifs_msg(c, "cannot re-mount R/W due to prior errors");
return -EROFS;
}
if (c->ro_media) {
- ubifs_msg("cannot re-mount R/W - UBI volume is R/O");
+ ubifs_msg(c, "cannot re-mount R/W - UBI volume is R/O");
return -EROFS;
}
err = ubifs_remount_rw(c);
@@ -1852,7 +1852,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
return err;
} else if (!c->ro_mount && (*flags & MS_RDONLY)) {
if (c->ro_error) {
- ubifs_msg("cannot re-mount R/O due to prior errors");
+ ubifs_msg(c, "cannot re-mount R/O due to prior errors");
return -EROFS;
}
ubifs_remount_ro(c);
@@ -2104,8 +2104,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
*/
ubi = open_ubi(name, UBI_READONLY);
if (IS_ERR(ubi)) {
- ubifs_err("cannot open \"%s\", error %d",
- name, (int)PTR_ERR(ubi));
+ pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
+ current->pid, name, (int)PTR_ERR(ubi));
return ERR_CAST(ubi);
}
@@ -2233,8 +2233,8 @@ static int __init ubifs_init(void)
* UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
*/
if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
- ubifs_err("VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
- (unsigned int)PAGE_CACHE_SIZE);
+ pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
+ current->pid, (unsigned int)PAGE_CACHE_SIZE);
return -EINVAL;
}
@@ -2257,7 +2257,8 @@ static int __init ubifs_init(void)
err = register_filesystem(&ubifs_fs_type);
if (err) {
- ubifs_err("cannot register file system, error %d", err);
+ pr_err("UBIFS error (pid %d): cannot register file system, error %d",
+ current->pid, err);
goto out_dbg;
}
return 0;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 6793db0754f6..957f5757f374 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -98,7 +98,7 @@ static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
else if (offs > o->offs)
p = &(*p)->rb_right;
else {
- ubifs_err("old idx added twice!");
+ ubifs_err(c, "old idx added twice!");
kfree(old_idx);
return 0;
}
@@ -447,7 +447,7 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type,
err = ubifs_leb_read(c, lnum, buf, offs, len, 1);
if (err) {
- ubifs_err("cannot read node type %d from LEB %d:%d, error %d",
+ ubifs_err(c, "cannot read node type %d from LEB %d:%d, error %d",
type, lnum, offs, err);
return err;
}
@@ -1684,27 +1684,27 @@ static int validate_data_node(struct ubifs_info *c, void *buf,
int err, len;
if (ch->node_type != UBIFS_DATA_NODE) {
- ubifs_err("bad node type (%d but expected %d)",
+ ubifs_err(c, "bad node type (%d but expected %d)",
ch->node_type, UBIFS_DATA_NODE);
goto out_err;
}
err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
if (err) {
- ubifs_err("expected node type %d", UBIFS_DATA_NODE);
+ ubifs_err(c, "expected node type %d", UBIFS_DATA_NODE);
goto out;
}
len = le32_to_cpu(ch->len);
if (len != zbr->len) {
- ubifs_err("bad node length %d, expected %d", len, zbr->len);
+ ubifs_err(c, "bad node length %d, expected %d", len, zbr->len);
goto out_err;
}
/* Make sure the key of the read node is correct */
key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
if (!keys_eq(c, &zbr->key, &key1)) {
- ubifs_err("bad key in node at LEB %d:%d",
+ ubifs_err(c, "bad key in node at LEB %d:%d",
zbr->lnum, zbr->offs);
dbg_tnck(&zbr->key, "looked for key ");
dbg_tnck(&key1, "found node's key ");
@@ -1716,7 +1716,7 @@ static int validate_data_node(struct ubifs_info *c, void *buf,
out_err:
err = -EINVAL;
out:
- ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
+ ubifs_err(c, "bad node at LEB %d:%d", zbr->lnum, zbr->offs);
ubifs_dump_node(c, buf);
dump_stack();
return err;
@@ -1741,7 +1741,7 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
len = bu->zbranch[bu->cnt - 1].offs;
len += bu->zbranch[bu->cnt - 1].len - offs;
if (len > bu->buf_len) {
- ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
+ ubifs_err(c, "buffer too small %d vs %d", bu->buf_len, len);
return -EINVAL;
}
@@ -1757,7 +1757,7 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
return -EAGAIN;
if (err && err != -EBADMSG) {
- ubifs_err("failed to read from LEB %d:%d, error %d",
+ ubifs_err(c, "failed to read from LEB %d:%d, error %d",
lnum, offs, err);
dump_stack();
dbg_tnck(&bu->key, "key ");
@@ -3313,7 +3313,7 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
out_dump:
block = key_block(c, key);
- ubifs_err("inode %lu has size %lld, but there are data at offset %lld",
+ ubifs_err(c, "inode %lu has size %lld, but there are data at offset %lld",
(unsigned long)inode->i_ino, size,
((loff_t)block) << UBIFS_BLOCK_SHIFT);
mutex_unlock(&c->tnc_mutex);
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index 7a205e046776..b45345d701e7 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -53,7 +53,7 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
br->offs = cpu_to_le32(zbr->offs);
br->len = cpu_to_le32(zbr->len);
if (!zbr->lnum || !zbr->len) {
- ubifs_err("bad ref in znode");
+ ubifs_err(c, "bad ref in znode");
ubifs_dump_znode(c, znode);
if (zbr->znode)
ubifs_dump_znode(c, zbr->znode);
@@ -384,7 +384,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
* Do not print scary warnings if the debugging
* option which forces in-the-gaps is enabled.
*/
- ubifs_warn("out of space");
+ ubifs_warn(c, "out of space");
ubifs_dump_budg(c, &c->bi);
ubifs_dump_lprops(c);
}
@@ -441,7 +441,7 @@ static int layout_in_empty_space(struct ubifs_info *c)
/* Determine the index node position */
if (lnum == -1) {
if (c->ileb_nxt >= c->ileb_cnt) {
- ubifs_err("out of space");
+ ubifs_err(c, "out of space");
return -ENOSPC;
}
lnum = c->ilebs[c->ileb_nxt++];
@@ -855,7 +855,7 @@ static int write_index(struct ubifs_info *c)
br->offs = cpu_to_le32(zbr->offs);
br->len = cpu_to_le32(zbr->len);
if (!zbr->lnum || !zbr->len) {
- ubifs_err("bad ref in znode");
+ ubifs_err(c, "bad ref in znode");
ubifs_dump_znode(c, znode);
if (zbr->znode)
ubifs_dump_znode(c, zbr->znode);
@@ -875,7 +875,7 @@ static int write_index(struct ubifs_info *c)
if (lnum != znode->lnum || offs != znode->offs ||
len != znode->len) {
- ubifs_err("inconsistent znode posn");
+ ubifs_err(c, "inconsistent znode posn");
return -EINVAL;
}
@@ -973,7 +973,7 @@ static int write_index(struct ubifs_info *c)
if (lnum != c->dbg->new_ihead_lnum ||
buf_offs != c->dbg->new_ihead_offs) {
- ubifs_err("inconsistent ihead");
+ ubifs_err(c, "inconsistent ihead");
return -EINVAL;
}
diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c
index f6bf8995c7b1..93f5b7859e6f 100644
--- a/fs/ubifs/tnc_misc.c
+++ b/fs/ubifs/tnc_misc.c
@@ -293,9 +293,9 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
lnum, offs, znode->level, znode->child_cnt);
if (znode->child_cnt > c->fanout || znode->level > UBIFS_MAX_LEVELS) {
- ubifs_err("current fanout %d, branch count %d",
+ ubifs_err(c, "current fanout %d, branch count %d",
c->fanout, znode->child_cnt);
- ubifs_err("max levels %d, znode level %d",
+ ubifs_err(c, "max levels %d, znode level %d",
UBIFS_MAX_LEVELS, znode->level);
err = 1;
goto out_dump;
@@ -316,7 +316,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
if (zbr->lnum < c->main_first ||
zbr->lnum >= c->leb_cnt || zbr->offs < 0 ||
zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) {
- ubifs_err("bad branch %d", i);
+ ubifs_err(c, "bad branch %d", i);
err = 2;
goto out_dump;
}
@@ -328,7 +328,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
case UBIFS_XENT_KEY:
break;
default:
- ubifs_err("bad key type at slot %d: %d",
+ ubifs_err(c, "bad key type at slot %d: %d",
i, key_type(c, &zbr->key));
err = 3;
goto out_dump;
@@ -340,17 +340,17 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
type = key_type(c, &zbr->key);
if (c->ranges[type].max_len == 0) {
if (zbr->len != c->ranges[type].len) {
- ubifs_err("bad target node (type %d) length (%d)",
+ ubifs_err(c, "bad target node (type %d) length (%d)",
type, zbr->len);
- ubifs_err("have to be %d", c->ranges[type].len);
+ ubifs_err(c, "have to be %d", c->ranges[type].len);
err = 4;
goto out_dump;
}
} else if (zbr->len < c->ranges[type].min_len ||
zbr->len > c->ranges[type].max_len) {
- ubifs_err("bad target node (type %d) length (%d)",
+ ubifs_err(c, "bad target node (type %d) length (%d)",
type, zbr->len);
- ubifs_err("have to be in range of %d-%d",
+ ubifs_err(c, "have to be in range of %d-%d",
c->ranges[type].min_len,
c->ranges[type].max_len);
err = 5;
@@ -370,12 +370,12 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
cmp = keys_cmp(c, key1, key2);
if (cmp > 0) {
- ubifs_err("bad key order (keys %d and %d)", i, i + 1);
+ ubifs_err(c, "bad key order (keys %d and %d)", i, i + 1);
err = 6;
goto out_dump;
} else if (cmp == 0 && !is_hash_key(c, key1)) {
/* These can only be keys with colliding hash */
- ubifs_err("keys %d and %d are not hashed but equivalent",
+ ubifs_err(c, "keys %d and %d are not hashed but equivalent",
i, i + 1);
err = 7;
goto out_dump;
@@ -386,7 +386,7 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
return 0;
out_dump:
- ubifs_err("bad indexing node at LEB %d:%d, error %d", lnum, offs, err);
+ ubifs_err(c, "bad indexing node at LEB %d:%d, error %d", lnum, offs, err);
ubifs_dump_node(c, idx);
kfree(idx);
return -EINVAL;
@@ -482,7 +482,7 @@ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
/* Make sure the key of the read node is correct */
key_read(c, node + UBIFS_KEY_OFFSET, &key1);
if (!keys_eq(c, key, &key1)) {
- ubifs_err("bad key in node at LEB %d:%d",
+ ubifs_err(c, "bad key in node at LEB %d:%d",
zbr->lnum, zbr->offs);
dbg_tnck(key, "looked for key ");
dbg_tnck(&key1, "but found node's key ");
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index bc04b9c69891..de759022f3d6 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -43,15 +43,19 @@
#define UBIFS_VERSION 1
/* Normal UBIFS messages */
-#define ubifs_msg(fmt, ...) pr_notice("UBIFS: " fmt "\n", ##__VA_ARGS__)
+#define ubifs_msg(c, fmt, ...) \
+ pr_notice("UBIFS (ubi%d:%d): " fmt "\n", \
+ (c)->vi.ubi_num, (c)->vi.vol_id, ##__VA_ARGS__)
/* UBIFS error messages */
-#define ubifs_err(fmt, ...) \
- pr_err("UBIFS error (pid %d): %s: " fmt "\n", current->pid, \
+#define ubifs_err(c, fmt, ...) \
+ pr_err("UBIFS error (ubi%d:%d pid %d): %s: " fmt "\n", \
+ (c)->vi.ubi_num, (c)->vi.vol_id, current->pid, \
__func__, ##__VA_ARGS__)
/* UBIFS warning messages */
-#define ubifs_warn(fmt, ...) \
- pr_warn("UBIFS warning (pid %d): %s: " fmt "\n", \
- current->pid, __func__, ##__VA_ARGS__)
+#define ubifs_warn(c, fmt, ...) \
+ pr_warn("UBIFS warning (ubi%d:%d pid %d): %s: " fmt "\n", \
+ (c)->vi.ubi_num, (c)->vi.vol_id, current->pid, \
+ __func__, ##__VA_ARGS__)
/*
* A variant of 'ubifs_err()' which takes the UBIFS file-sytem description
* object as an argument.
@@ -59,7 +63,7 @@
#define ubifs_errc(c, fmt, ...) \
do { \
if (!(c)->probing) \
- ubifs_err(fmt, ##__VA_ARGS__); \
+ ubifs_err(c, fmt, ##__VA_ARGS__); \
} while (0)
/* UBIFS file system VFS magic number */
@@ -158,7 +162,7 @@
#define WORST_COMPR_FACTOR 2
/*
- * How much memory is needed for a buffer where we comress a data node.
+ * How much memory is needed for a buffer where we compress a data node.
*/
#define COMPRESSED_DATA_NODE_BUF_SZ \
(UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR)
@@ -664,7 +668,7 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c,
* @lock: serializes @buf, @lnum, @offs, @avail, @used, @next_ino and @inodes
* fields
* @softlimit: soft write-buffer timeout interval
- * @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit
+ * @delta: hard and soft timeouts delta (the timer expire interval is @softlimit
* and @softlimit + @delta)
* @timer: write-buffer timer
* @no_timer: non-zero if this write-buffer does not have a timer
@@ -930,9 +934,9 @@ struct ubifs_orphan {
/**
* struct ubifs_mount_opts - UBIFS-specific mount options information.
* @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast)
- * @bulk_read: enable/disable bulk-reads (%0 default, %1 disabe, %2 enable)
+ * @bulk_read: enable/disable bulk-reads (%0 default, %1 disable, %2 enable)
* @chk_data_crc: enable/disable CRC data checking when reading data nodes
- * (%0 default, %1 disabe, %2 enable)
+ * (%0 default, %1 disable, %2 enable)
* @override_compr: override default compressor (%0 - do not override and use
* superblock compressor, %1 - override and use compressor
* specified in @compr_type)
@@ -962,9 +966,9 @@ struct ubifs_mount_opts {
* optimization)
* @nospace_rp: the same as @nospace, but additionally means that even reserved
* pool is full
- * @page_budget: budget for a page (constant, nenver changed after mount)
- * @inode_budget: budget for an inode (constant, nenver changed after mount)
- * @dent_budget: budget for a directory entry (constant, nenver changed after
+ * @page_budget: budget for a page (constant, never changed after mount)
+ * @inode_budget: budget for an inode (constant, never changed after mount)
+ * @dent_budget: budget for a directory entry (constant, never changed after
* mount)
*/
struct ubifs_budg_info {
@@ -1787,10 +1791,10 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
/* compressor.c */
int __init ubifs_compressors_init(void);
void ubifs_compressors_exit(void);
-void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len,
- int *compr_type);
-int ubifs_decompress(const void *buf, int len, void *out, int *out_len,
- int compr_type);
+void ubifs_compress(const struct ubifs_info *c, const void *in_buf, int in_len,
+ void *out_buf, int *out_len, int *compr_type);
+int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len,
+ void *out, int *out_len, int compr_type);
#include "debug.h"
#include "misc.h"
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index a92be244a6fb..96f3448b6eb4 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -108,7 +108,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
.dirtied_ino_d = ALIGN(host_ui->data_len, 8) };
if (host_ui->xattr_cnt >= MAX_XATTRS_PER_INODE) {
- ubifs_err("inode %lu already has too many xattrs (%d), cannot create more",
+ ubifs_err(c, "inode %lu already has too many xattrs (%d), cannot create more",
host->i_ino, host_ui->xattr_cnt);
return -ENOSPC;
}
@@ -120,7 +120,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
*/
names_len = host_ui->xattr_names + host_ui->xattr_cnt + nm->len + 1;
if (names_len > XATTR_LIST_MAX) {
- ubifs_err("cannot add one more xattr name to inode %lu, total names length would become %d, max. is %d",
+ ubifs_err(c, "cannot add one more xattr name to inode %lu, total names length would become %d, max. is %d",
host->i_ino, names_len, XATTR_LIST_MAX);
return -ENOSPC;
}
@@ -288,13 +288,13 @@ static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum)
inode = ubifs_iget(c->vfs_sb, inum);
if (IS_ERR(inode)) {
- ubifs_err("dead extended attribute entry, error %d",
+ ubifs_err(c, "dead extended attribute entry, error %d",
(int)PTR_ERR(inode));
return inode;
}
if (ubifs_inode(inode)->xattr)
return inode;
- ubifs_err("corrupt extended attribute entry");
+ ubifs_err(c, "corrupt extended attribute entry");
iput(inode);
return ERR_PTR(-EINVAL);
}
@@ -364,15 +364,15 @@ int ubifs_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
- name, dentry->d_inode->i_ino, dentry, size);
+ name, d_inode(dentry)->i_ino, dentry, size);
- return setxattr(dentry->d_inode, name, value, size, flags);
+ return setxattr(d_inode(dentry), name, value, size, flags);
}
ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
size_t size)
{
- struct inode *inode, *host = dentry->d_inode;
+ struct inode *inode, *host = d_inode(dentry);
struct ubifs_info *c = host->i_sb->s_fs_info;
struct qstr nm = QSTR_INIT(name, strlen(name));
struct ubifs_inode *ui;
@@ -412,7 +412,7 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
if (buf) {
/* If @buf is %NULL we are supposed to return the length */
if (ui->data_len > size) {
- ubifs_err("buffer size %zd, xattr len %d",
+ ubifs_err(c, "buffer size %zd, xattr len %d",
size, ui->data_len);
err = -ERANGE;
goto out_iput;
@@ -432,7 +432,7 @@ out_unlock:
ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
union ubifs_key key;
- struct inode *host = dentry->d_inode;
+ struct inode *host = d_inode(dentry);
struct ubifs_info *c = host->i_sb->s_fs_info;
struct ubifs_inode *host_ui = ubifs_inode(host);
struct ubifs_dent_node *xent, *pxent = NULL;
@@ -485,7 +485,7 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
kfree(pxent);
if (err != -ENOENT) {
- ubifs_err("cannot find next direntry, error %d", err);
+ ubifs_err(c, "cannot find next direntry, error %d", err);
return err;
}
@@ -535,7 +535,7 @@ out_cancel:
int ubifs_removexattr(struct dentry *dentry, const char *name)
{
- struct inode *inode, *host = dentry->d_inode;
+ struct inode *inode, *host = d_inode(dentry);
struct ubifs_info *c = host->i_sb->s_fs_info;
struct qstr nm = QSTR_INIT(name, strlen(name));
struct ubifs_dent_node *xent;
@@ -657,8 +657,10 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
&init_xattrs, 0);
mutex_unlock(&inode->i_mutex);
- if (err)
- ubifs_err("cannot initialize security for inode %lu, error %d",
+ if (err) {
+ struct ubifs_info *c = dentry->i_sb->s_fs_info;
+ ubifs_err(c, "cannot initialize security for inode %lu, error %d",
inode->i_ino, err);
+ }
return err;
}
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1ba2baaf4367..6d6a96b4e73f 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -21,7 +21,6 @@
#include "udfdecl.h"
-#include <linux/buffer_head.h>
#include <linux/bitops.h>
#include "udf_i.h"
@@ -63,15 +62,14 @@ static int __load_block_bitmap(struct super_block *sb,
block_group, nr_groups);
}
- if (bitmap->s_block_bitmap[block_group]) {
+ if (bitmap->s_block_bitmap[block_group])
return block_group;
- } else {
- retval = read_block_bitmap(sb, bitmap, block_group,
- block_group);
- if (retval < 0)
- return retval;
- return block_group;
- }
+
+ retval = read_block_bitmap(sb, bitmap, block_group, block_group);
+ if (retval < 0)
+ return retval;
+
+ return block_group;
}
static inline int load_block_bitmap(struct super_block *sb,
@@ -358,7 +356,6 @@ static void udf_table_free_blocks(struct super_block *sb,
struct kernel_lb_addr eloc;
struct extent_position oepos, epos;
int8_t etype;
- int i;
struct udf_inode_info *iinfo;
mutex_lock(&sbi->s_alloc_mutex);
@@ -425,7 +422,6 @@ static void udf_table_free_blocks(struct super_block *sb,
}
if (epos.bh != oepos.bh) {
- i = -1;
oepos.block = epos.block;
brelse(oepos.bh);
get_bh(epos.bh);
@@ -762,7 +758,7 @@ inline int udf_prealloc_blocks(struct super_block *sb,
uint32_t block_count)
{
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
- sector_t allocated;
+ int allocated;
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
allocated = udf_bitmap_prealloc_blocks(sb,
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 05e90edd1992..541a12b5792d 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -30,7 +30,6 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include "udf_i.h"
#include "udf_sb.h"
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 3e44f575fb9c..c763fda257bf 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -16,7 +16,6 @@
#include <linux/fs.h>
#include <linux/string.h>
-#include <linux/buffer_head.h>
struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
struct udf_fileident_bh *fibh,
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 08f3555fbeac..7a95b8fed302 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -33,8 +33,7 @@
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/pagemap.h>
-#include <linux/buffer_head.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include "udf_i.h"
#include "udf_sb.h"
@@ -100,8 +99,7 @@ static int udf_adinicb_write_begin(struct file *file,
return 0;
}
-static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter,
+static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
/* Fallback to buffered I/O. */
@@ -121,21 +119,21 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t retval;
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- int err, pos;
- size_t count = iocb->ki_nbytes;
struct udf_inode_info *iinfo = UDF_I(inode);
+ int err;
mutex_lock(&inode->i_mutex);
+
+ retval = generic_write_checks(iocb, from);
+ if (retval <= 0)
+ goto out;
+
down_write(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
- if (file->f_flags & O_APPEND)
- pos = inode->i_size;
- else
- pos = iocb->ki_pos;
+ loff_t end = iocb->ki_pos + iov_iter_count(from);
if (inode->i_sb->s_blocksize <
- (udf_file_entry_alloc_offset(inode) +
- pos + count)) {
+ (udf_file_entry_alloc_offset(inode) + end)) {
err = udf_expand_file_adinicb(inode);
if (err) {
mutex_unlock(&inode->i_mutex);
@@ -143,16 +141,14 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
return err;
}
} else {
- if (pos + count > inode->i_size)
- iinfo->i_lenAlloc = pos + count;
- else
- iinfo->i_lenAlloc = inode->i_size;
+ iinfo->i_lenAlloc = max(end, inode->i_size);
up_write(&iinfo->i_data_sem);
}
} else
up_write(&iinfo->i_data_sem);
retval = __generic_file_write_iter(iocb, from);
+out:
mutex_unlock(&inode->i_mutex);
if (retval > 0) {
@@ -240,12 +236,10 @@ static int udf_release_file(struct inode *inode, struct file *filp)
}
const struct file_operations udf_file_operations = {
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
.unlocked_ioctl = udf_ioctl,
.open = generic_file_open,
.mmap = generic_file_mmap,
- .write = new_sync_write,
.write_iter = udf_file_write_iter,
.release = udf_release_file,
.fsync = generic_file_fsync,
@@ -255,7 +249,7 @@ const struct file_operations udf_file_operations = {
static int udf_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
error = inode_change_ok(inode, attr);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index a445d599098d..6afac3d561ac 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -33,12 +33,11 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pagemap.h>
-#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/slab.h>
#include <linux/crc-itu-t.h>
#include <linux/mpage.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include "udf_i.h"
#include "udf_sb.h"
@@ -215,8 +214,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
-static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
- struct iov_iter *iter,
+static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
@@ -225,8 +223,8 @@ static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
- if (unlikely(ret < 0 && (rw & WRITE)))
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block);
+ if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
udf_write_failed(mapping, offset + count);
return ret;
}
@@ -1637,7 +1635,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
if (!bh) {
udf_debug("getblk failure\n");
- return -ENOMEM;
+ return -EIO;
}
lock_buffer(bh);
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index c175b4dabc14..71d1c25f360d 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/string.h>
-#include <linux/buffer_head.h>
#include <linux/crc-itu-t.h>
#include "udf_i.h"
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 33b246b82c98..5c03f0dfb98b 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -27,7 +27,6 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include <linux/sched.h>
#include <linux/crc-itu-t.h>
#include <linux/exportfs.h>
@@ -552,7 +551,7 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
struct udf_fileident_bh fibh;
struct fileIdentDesc cfi, *fi;
int err;
@@ -569,8 +568,8 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
- if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
- mark_inode_dirty(dir);
+ dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
+ mark_inode_dirty(dir);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
@@ -683,6 +682,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY;
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
inc_nlink(dir);
+ dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
mark_inode_dirty(dir);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
@@ -767,7 +767,7 @@ static int empty_dir(struct inode *dir)
static int udf_rmdir(struct inode *dir, struct dentry *dentry)
{
int retval;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct udf_fileident_bh fibh;
struct fileIdentDesc *fi, cfi;
struct kernel_lb_addr tloc;
@@ -809,7 +809,7 @@ out:
static int udf_unlink(struct inode *dir, struct dentry *dentry)
{
int retval;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct udf_fileident_bh fibh;
struct fileIdentDesc *fi;
struct fileIdentDesc cfi;
@@ -999,7 +999,7 @@ out_no_entry:
static int udf_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
struct udf_fileident_bh fibh;
struct fileIdentDesc cfi, *fi;
int err;
@@ -1024,6 +1024,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
inc_nlink(inode);
inode->i_ctime = current_fs_time(inode->i_sb);
mark_inode_dirty(inode);
+ dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
+ mark_inode_dirty(dir);
ihold(inode);
d_instantiate(dentry, inode);
@@ -1036,8 +1038,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct udf_fileident_bh ofibh, nfibh;
struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL;
struct fileIdentDesc ocfi, ncfi;
@@ -1127,7 +1129,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
inode_dec_link_count(new_inode);
}
old_dir->i_ctime = old_dir->i_mtime = current_fs_time(old_dir->i_sb);
+ new_dir->i_ctime = new_dir->i_mtime = current_fs_time(new_dir->i_sb);
mark_inode_dirty(old_dir);
+ mark_inode_dirty(new_dir);
if (dir_fi) {
dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
@@ -1175,7 +1179,7 @@ static struct dentry *udf_get_parent(struct dentry *child)
struct fileIdentDesc cfi;
struct udf_fileident_bh fibh;
- if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
+ if (!udf_find_entry(d_inode(child), &dotdot, &fibh, &cfi))
return ERR_PTR(-EACCES);
if (fibh.sbh != fibh.ebh)
@@ -1183,7 +1187,7 @@ static struct dentry *udf_get_parent(struct dentry *child)
brelse(fibh.sbh);
tloc = lelb_to_cpu(cfi.icb.extLocation);
- inode = udf_iget(child->d_inode->i_sb, &tloc);
+ inode = udf_iget(d_inode(child)->i_sb, &tloc);
if (IS_ERR(inode))
return ERR_CAST(inode);
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index d6caf01a2097..5f861ed287c3 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -24,7 +24,6 @@
#include <linux/fs.h>
#include <linux/string.h>
-#include <linux/buffer_head.h>
#include <linux/mutex.h>
uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
diff --git a/fs/udf/super.c b/fs/udf/super.c
index f169411c4ea0..6299f341967b 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -48,7 +48,6 @@
#include <linux/stat.h>
#include <linux/cdrom.h>
#include <linux/nls.h>
-#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index ac10ca939f26..8dfbc4025e2f 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -27,7 +27,6 @@
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/pagemap.h>
-#include <linux/buffer_head.h>
#include "udf_i.h"
static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 8a9657d7f7c6..42b8c57795cb 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -22,7 +22,6 @@
#include "udfdecl.h"
#include <linux/fs.h>
#include <linux/mm.h>
-#include <linux/buffer_head.h>
#include "udf_i.h"
#include "udf_sb.h"
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 0ecc2cebed8f..1bfe8cabff0f 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -311,7 +311,7 @@ found:
*/
int ufs_add_link(struct dentry *dentry, struct inode *inode)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
const unsigned char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct super_block *sb = dir->i_sb;
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
index c84ec010a676..042ddbf110cc 100644
--- a/fs/ufs/file.c
+++ b/fs/ufs/file.c
@@ -35,9 +35,7 @@
const struct file_operations ufs_file_operations = {
.llseek = generic_file_llseek,
- .read = new_sync_read,
.read_iter = generic_file_read_iter,
- .write = new_sync_write,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.open = generic_file_open,
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index fd65deb4b5f0..e491a93a7e9a 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -165,7 +165,7 @@ out_fail:
static int ufs_link (struct dentry * old_dentry, struct inode * dir,
struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
int error;
lock_ufs(dir->i_sb);
@@ -222,7 +222,7 @@ out_fail:
static int ufs_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode * inode = dentry->d_inode;
+ struct inode * inode = d_inode(dentry);
struct ufs_dir_entry *de;
struct page *page;
int err = -ENOENT;
@@ -244,7 +244,7 @@ out:
static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
{
- struct inode * inode = dentry->d_inode;
+ struct inode * inode = d_inode(dentry);
int err= -ENOTEMPTY;
lock_ufs(dir->i_sb);
@@ -263,8 +263,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- struct inode *old_inode = old_dentry->d_inode;
- struct inode *new_inode = new_dentry->d_inode;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
struct page *dir_page = NULL;
struct ufs_dir_entry * dir_de = NULL;
struct page *old_page;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 8092d3759a5e..b3bc3e7ae79d 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -144,10 +144,10 @@ static struct dentry *ufs_get_parent(struct dentry *child)
struct qstr dot_dot = QSTR_INIT("..", 2);
ino_t ino;
- ino = ufs_inode_by_name(child->d_inode, &dot_dot);
+ ino = ufs_inode_by_name(d_inode(child), &dot_dot);
if (!ino)
return ERR_PTR(-ENOENT);
- return d_obtain_alias(ufs_iget(child->d_inode->i_sb, ino));
+ return d_obtain_alias(ufs_iget(d_inode(child)->i_sb, ino));
}
static const struct export_operations ufs_export_ops = {
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c
index d283628b4778..5b537e2fdda3 100644
--- a/fs/ufs/symlink.c
+++ b/fs/ufs/symlink.c
@@ -34,7 +34,7 @@
static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct ufs_inode_info *p = UFS_I(dentry->d_inode);
+ struct ufs_inode_info *p = UFS_I(d_inode(dentry));
nd_set_link(nd, (char*)p->i_u1.i_symlink);
return NULL;
}
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index f04f89fbd4d9..21154704c168 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -492,7 +492,7 @@ out:
int ufs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
unsigned int ia_valid = attr->ia_valid;
int error;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index a6fbf4472017..516162be1398 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -260,6 +260,7 @@ xfs_alloc_fix_len(
rlen = rlen - (k - args->mod);
else
rlen = rlen - args->prod + (args->mod - k);
+ /* casts to (int) catch length underflows */
if ((int)rlen < (int)args->minlen)
return;
ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
@@ -286,7 +287,8 @@ xfs_alloc_fix_minleft(
if (diff >= 0)
return 1;
args->len += diff; /* shrink the allocated space */
- if (args->len >= args->minlen)
+ /* casts to (int) catch length underflows */
+ if ((int)args->len >= (int)args->minlen)
return 1;
args->agbno = NULLAGBLOCK;
return 0;
@@ -315,6 +317,9 @@ xfs_alloc_fixup_trees(
xfs_agblock_t nfbno2; /* second new free startblock */
xfs_extlen_t nflen1=0; /* first new free length */
xfs_extlen_t nflen2=0; /* second new free length */
+ struct xfs_mount *mp;
+
+ mp = cnt_cur->bc_mp;
/*
* Look up the record in the by-size tree if necessary.
@@ -323,13 +328,13 @@ xfs_alloc_fixup_trees(
#ifdef DEBUG
if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp,
i == 1 && nfbno1 == fbno && nflen1 == flen);
#endif
} else {
if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
}
/*
* Look up the record in the by-block tree if necessary.
@@ -338,13 +343,13 @@ xfs_alloc_fixup_trees(
#ifdef DEBUG
if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp,
i == 1 && nfbno1 == fbno && nflen1 == flen);
#endif
} else {
if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
}
#ifdef DEBUG
@@ -355,7 +360,7 @@ xfs_alloc_fixup_trees(
bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp,
bnoblock->bb_numrecs == cntblock->bb_numrecs);
}
#endif
@@ -386,25 +391,25 @@ xfs_alloc_fixup_trees(
*/
if ((error = xfs_btree_delete(cnt_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
/*
* Add new by-size btree entry(s).
*/
if (nfbno1 != NULLAGBLOCK) {
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 0);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
}
if (nfbno2 != NULLAGBLOCK) {
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 0);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
}
/*
* Fix up the by-block btree entry(s).
@@ -415,7 +420,7 @@ xfs_alloc_fixup_trees(
*/
if ((error = xfs_btree_delete(bno_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
} else {
/*
* Update the by-block entry to start later|be shorter.
@@ -429,10 +434,10 @@ xfs_alloc_fixup_trees(
*/
if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 0);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
if ((error = xfs_btree_insert(bno_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
}
return 0;
}
@@ -682,7 +687,7 @@ xfs_alloc_ag_vextent_exact(
error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
ASSERT(fbno <= args->agbno);
/*
@@ -783,7 +788,7 @@ xfs_alloc_find_best_extent(
error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
/*
@@ -946,7 +951,7 @@ restart:
if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
&ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
if (ltlen >= args->minlen)
break;
if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
@@ -966,7 +971,7 @@ restart:
*/
if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
xfs_alloc_compute_aligned(args, ltbno, ltlen,
&ltbnoa, &ltlena);
if (ltlena < args->minlen)
@@ -999,7 +1004,7 @@ restart:
cnt_cur->bc_ptrs[0] = besti;
if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->len = blen;
if (!xfs_alloc_fix_minleft(args)) {
@@ -1088,7 +1093,7 @@ restart:
if (bno_cur_lt) {
if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
xfs_alloc_compute_aligned(args, ltbno, ltlen,
&ltbnoa, &ltlena);
if (ltlena >= args->minlen)
@@ -1104,7 +1109,7 @@ restart:
if (bno_cur_gt) {
if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
xfs_alloc_compute_aligned(args, gtbno, gtlen,
&gtbnoa, &gtlena);
if (gtlena >= args->minlen)
@@ -1303,7 +1308,7 @@ restart:
error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
xfs_alloc_compute_aligned(args, fbno, flen,
&rbno, &rlen);
@@ -1342,7 +1347,7 @@ restart:
* This can't happen in the second case above.
*/
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
- XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
+ XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
(rlen <= flen && rbno + rlen <= fbno + flen), error0);
if (rlen < args->maxlen) {
xfs_agblock_t bestfbno;
@@ -1362,13 +1367,13 @@ restart:
if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
&i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
if (flen < bestrlen)
break;
xfs_alloc_compute_aligned(args, fbno, flen,
&rbno, &rlen);
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
- XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
+ XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
(rlen <= flen && rbno + rlen <= fbno + flen),
error0);
if (rlen > bestrlen) {
@@ -1383,7 +1388,7 @@ restart:
if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
&i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
rlen = bestrlen;
rbno = bestrbno;
flen = bestflen;
@@ -1408,7 +1413,7 @@ restart:
if (!xfs_alloc_fix_minleft(args))
goto out_nominleft;
rlen = args->len;
- XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
/*
* Allocate and initialize a cursor for the by-block tree.
*/
@@ -1422,7 +1427,7 @@ restart:
cnt_cur = bno_cur = NULL;
args->len = rlen;
args->agbno = rbno;
- XFS_WANT_CORRUPTED_GOTO(
+ XFS_WANT_CORRUPTED_GOTO(args->mp,
args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
@@ -1467,7 +1472,7 @@ xfs_alloc_ag_vextent_small(
if (i) {
if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
}
/*
* Nothing in the btree, try the freelist. Make sure
@@ -1493,7 +1498,7 @@ xfs_alloc_ag_vextent_small(
}
args->len = 1;
args->agbno = fbno;
- XFS_WANT_CORRUPTED_GOTO(
+ XFS_WANT_CORRUPTED_GOTO(args->mp,
args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
@@ -1579,7 +1584,7 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
/*
* It's not contiguous, though.
*/
@@ -1591,7 +1596,8 @@ xfs_free_ag_extent(
* space was invalid, it's (partly) already free.
* Very bad.
*/
- XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ ltbno + ltlen <= bno, error0);
}
}
/*
@@ -1606,7 +1612,7 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
/*
* It's not contiguous, though.
*/
@@ -1618,7 +1624,7 @@ xfs_free_ag_extent(
* space was invalid, it's (partly) already free.
* Very bad.
*/
- XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
}
}
/*
@@ -1635,31 +1641,31 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
/*
* Delete the old by-size entry on the right.
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
/*
* Delete the old by-block entry for the right block.
*/
if ((error = xfs_btree_delete(bno_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
/*
* Move the by-block cursor back to the left neighbor.
*/
if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
#ifdef DEBUG
/*
* Check that this is the right record: delete didn't
@@ -1672,7 +1678,7 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
&i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(
+ XFS_WANT_CORRUPTED_GOTO(mp,
i == 1 && xxbno == ltbno && xxlen == ltlen,
error0);
}
@@ -1695,17 +1701,17 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
/*
* Back up the by-block cursor to the left neighbor, and
* update its length.
*/
if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
nbno = ltbno;
nlen = len + ltlen;
if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
@@ -1721,10 +1727,10 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
/*
* Update the starting block and length of the right
* neighbor in the by-block tree.
@@ -1743,7 +1749,7 @@ xfs_free_ag_extent(
nlen = len;
if ((error = xfs_btree_insert(bno_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
}
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
bno_cur = NULL;
@@ -1752,10 +1758,10 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
if ((error = xfs_btree_insert(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
cnt_cur = NULL;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 15105dbc9e28..04e79d57bca6 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -86,8 +86,83 @@ STATIC void xfs_attr3_leaf_moveents(struct xfs_da_args *args,
int move_count);
STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
+/*
+ * attr3 block 'firstused' conversion helpers.
+ *
+ * firstused refers to the offset of the first used byte of the nameval region
+ * of an attr leaf block. The region starts at the tail of the block and expands
+ * backwards towards the middle. As such, firstused is initialized to the block
+ * size for an empty leaf block and is reduced from there.
+ *
+ * The attr3 block size is pegged to the fsb size and the maximum fsb is 64k.
+ * The in-core firstused field is 32-bit and thus supports the maximum fsb size.
+ * The on-disk field is only 16-bit, however, and overflows at 64k. Since this
+ * only occurs at exactly 64k, we use zero as a magic on-disk value to represent
+ * the attr block size. The following helpers manage the conversion between the
+ * in-core and on-disk formats.
+ */
+
+static void
+xfs_attr3_leaf_firstused_from_disk(
+ struct xfs_da_geometry *geo,
+ struct xfs_attr3_icleaf_hdr *to,
+ struct xfs_attr_leafblock *from)
+{
+ struct xfs_attr3_leaf_hdr *hdr3;
+
+ if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) {
+ hdr3 = (struct xfs_attr3_leaf_hdr *) from;
+ to->firstused = be16_to_cpu(hdr3->firstused);
+ } else {
+ to->firstused = be16_to_cpu(from->hdr.firstused);
+ }
+
+ /*
+ * Convert from the magic fsb size value to actual blocksize. This
+ * should only occur for empty blocks when the block size overflows
+ * 16-bits.
+ */
+ if (to->firstused == XFS_ATTR3_LEAF_NULLOFF) {
+ ASSERT(!to->count && !to->usedbytes);
+ ASSERT(geo->blksize > USHRT_MAX);
+ to->firstused = geo->blksize;
+ }
+}
+
+static void
+xfs_attr3_leaf_firstused_to_disk(
+ struct xfs_da_geometry *geo,
+ struct xfs_attr_leafblock *to,
+ struct xfs_attr3_icleaf_hdr *from)
+{
+ struct xfs_attr3_leaf_hdr *hdr3;
+ uint32_t firstused;
+
+ /* magic value should only be seen on disk */
+ ASSERT(from->firstused != XFS_ATTR3_LEAF_NULLOFF);
+
+ /*
+ * Scale down the 32-bit in-core firstused value to the 16-bit on-disk
+ * value. This only overflows at the max supported value of 64k. Use the
+ * magic on-disk value to represent block size in this case.
+ */
+ firstused = from->firstused;
+ if (firstused > USHRT_MAX) {
+ ASSERT(from->firstused == geo->blksize);
+ firstused = XFS_ATTR3_LEAF_NULLOFF;
+ }
+
+ if (from->magic == XFS_ATTR3_LEAF_MAGIC) {
+ hdr3 = (struct xfs_attr3_leaf_hdr *) to;
+ hdr3->firstused = cpu_to_be16(firstused);
+ } else {
+ to->hdr.firstused = cpu_to_be16(firstused);
+ }
+}
+
void
xfs_attr3_leaf_hdr_from_disk(
+ struct xfs_da_geometry *geo,
struct xfs_attr3_icleaf_hdr *to,
struct xfs_attr_leafblock *from)
{
@@ -104,7 +179,7 @@ xfs_attr3_leaf_hdr_from_disk(
to->magic = be16_to_cpu(hdr3->info.hdr.magic);
to->count = be16_to_cpu(hdr3->count);
to->usedbytes = be16_to_cpu(hdr3->usedbytes);
- to->firstused = be16_to_cpu(hdr3->firstused);
+ xfs_attr3_leaf_firstused_from_disk(geo, to, from);
to->holes = hdr3->holes;
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
@@ -118,7 +193,7 @@ xfs_attr3_leaf_hdr_from_disk(
to->magic = be16_to_cpu(from->hdr.info.magic);
to->count = be16_to_cpu(from->hdr.count);
to->usedbytes = be16_to_cpu(from->hdr.usedbytes);
- to->firstused = be16_to_cpu(from->hdr.firstused);
+ xfs_attr3_leaf_firstused_from_disk(geo, to, from);
to->holes = from->hdr.holes;
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
@@ -129,10 +204,11 @@ xfs_attr3_leaf_hdr_from_disk(
void
xfs_attr3_leaf_hdr_to_disk(
+ struct xfs_da_geometry *geo,
struct xfs_attr_leafblock *to,
struct xfs_attr3_icleaf_hdr *from)
{
- int i;
+ int i;
ASSERT(from->magic == XFS_ATTR_LEAF_MAGIC ||
from->magic == XFS_ATTR3_LEAF_MAGIC);
@@ -145,7 +221,7 @@ xfs_attr3_leaf_hdr_to_disk(
hdr3->info.hdr.magic = cpu_to_be16(from->magic);
hdr3->count = cpu_to_be16(from->count);
hdr3->usedbytes = cpu_to_be16(from->usedbytes);
- hdr3->firstused = cpu_to_be16(from->firstused);
+ xfs_attr3_leaf_firstused_to_disk(geo, to, from);
hdr3->holes = from->holes;
hdr3->pad1 = 0;
@@ -160,7 +236,7 @@ xfs_attr3_leaf_hdr_to_disk(
to->hdr.info.magic = cpu_to_be16(from->magic);
to->hdr.count = cpu_to_be16(from->count);
to->hdr.usedbytes = cpu_to_be16(from->usedbytes);
- to->hdr.firstused = cpu_to_be16(from->firstused);
+ xfs_attr3_leaf_firstused_to_disk(geo, to, from);
to->hdr.holes = from->holes;
to->hdr.pad1 = 0;
@@ -178,7 +254,7 @@ xfs_attr3_leaf_verify(
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_attr3_icleaf_hdr ichdr;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
@@ -757,9 +833,10 @@ xfs_attr_shortform_allfit(
struct xfs_attr3_icleaf_hdr leafhdr;
int bytes;
int i;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
leaf = bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
entry = xfs_attr3_leaf_entryp(leaf);
bytes = sizeof(struct xfs_attr_sf_hdr);
@@ -812,7 +889,7 @@ xfs_attr3_leaf_to_shortform(
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entry = xfs_attr3_leaf_entryp(leaf);
/* XXX (dgc): buffer is about to be marked stale - why zero it? */
@@ -923,7 +1000,7 @@ xfs_attr3_leaf_to_node(
btree = dp->d_ops->node_tree_p(node);
leaf = bp2->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&icleafhdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &icleafhdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
/* both on-disk, don't endian-flip twice */
@@ -988,7 +1065,7 @@ xfs_attr3_leaf_create(
}
ichdr.freemap[0].size = ichdr.firstused - ichdr.freemap[0].base;
- xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr);
+ xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr);
xfs_trans_log_buf(args->trans, bp, 0, args->geo->blksize - 1);
*bpp = bp;
@@ -1073,7 +1150,7 @@ xfs_attr3_leaf_add(
trace_xfs_attr_leaf_add(args);
leaf = bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(args->index >= 0 && args->index <= ichdr.count);
entsize = xfs_attr_leaf_newentsize(args, NULL);
@@ -1126,7 +1203,7 @@ xfs_attr3_leaf_add(
tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0);
out_log_hdr:
- xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr);
+ xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr);
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, &leaf->hdr,
xfs_attr3_leaf_hdr_size(leaf)));
@@ -1294,7 +1371,7 @@ xfs_attr3_leaf_compact(
ichdr_dst->freemap[0].base;
/* write the header back to initialise the underlying buffer */
- xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst);
+ xfs_attr3_leaf_hdr_to_disk(args->geo, leaf_dst, ichdr_dst);
/*
* Copy all entry's in the same (sorted) order,
@@ -1344,9 +1421,10 @@ xfs_attr_leaf_order(
{
struct xfs_attr3_icleaf_hdr ichdr1;
struct xfs_attr3_icleaf_hdr ichdr2;
+ struct xfs_mount *mp = leaf1_bp->b_target->bt_mount;
- xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1_bp->b_addr);
- xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2_bp->b_addr);
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr1, leaf1_bp->b_addr);
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr2, leaf2_bp->b_addr);
return xfs_attr3_leaf_order(leaf1_bp, &ichdr1, leaf2_bp, &ichdr2);
}
@@ -1388,8 +1466,8 @@ xfs_attr3_leaf_rebalance(
ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1);
- xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2);
+ xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr1, leaf1);
+ xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, leaf2);
ASSERT(ichdr2.count == 0);
args = state->args;
@@ -1490,8 +1568,8 @@ xfs_attr3_leaf_rebalance(
ichdr1.count, count);
}
- xfs_attr3_leaf_hdr_to_disk(leaf1, &ichdr1);
- xfs_attr3_leaf_hdr_to_disk(leaf2, &ichdr2);
+ xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf1, &ichdr1);
+ xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf2, &ichdr2);
xfs_trans_log_buf(args->trans, blk1->bp, 0, args->geo->blksize - 1);
xfs_trans_log_buf(args->trans, blk2->bp, 0, args->geo->blksize - 1);
@@ -1684,7 +1762,7 @@ xfs_attr3_leaf_toosmall(
*/
blk = &state->path.blk[ state->path.active-1 ];
leaf = blk->bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr, leaf);
bytes = xfs_attr3_leaf_hdr_size(leaf) +
ichdr.count * sizeof(xfs_attr_leaf_entry_t) +
ichdr.usedbytes;
@@ -1740,7 +1818,7 @@ xfs_attr3_leaf_toosmall(
if (error)
return error;
- xfs_attr3_leaf_hdr_from_disk(&ichdr2, bp->b_addr);
+ xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, bp->b_addr);
bytes = state->args->geo->blksize -
(state->args->geo->blksize >> 2) -
@@ -1805,7 +1883,7 @@ xfs_attr3_leaf_remove(
trace_xfs_attr_leaf_remove(args);
leaf = bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(ichdr.count > 0 && ichdr.count < args->geo->blksize / 8);
ASSERT(args->index >= 0 && args->index < ichdr.count);
@@ -1918,12 +1996,11 @@ xfs_attr3_leaf_remove(
tmp = be16_to_cpu(entry->nameidx);
}
ichdr.firstused = tmp;
- if (!ichdr.firstused)
- ichdr.firstused = tmp - XFS_ATTR_LEAF_NAME_ALIGN;
+ ASSERT(ichdr.firstused != 0);
} else {
ichdr.holes = 1; /* mark as needing compaction */
}
- xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr);
+ xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr);
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, &leaf->hdr,
xfs_attr3_leaf_hdr_size(leaf)));
@@ -1957,8 +2034,8 @@ xfs_attr3_leaf_unbalance(
drop_leaf = drop_blk->bp->b_addr;
save_leaf = save_blk->bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&drophdr, drop_leaf);
- xfs_attr3_leaf_hdr_from_disk(&savehdr, save_leaf);
+ xfs_attr3_leaf_hdr_from_disk(state->args->geo, &drophdr, drop_leaf);
+ xfs_attr3_leaf_hdr_from_disk(state->args->geo, &savehdr, save_leaf);
entry = xfs_attr3_leaf_entryp(drop_leaf);
/*
@@ -2012,7 +2089,7 @@ xfs_attr3_leaf_unbalance(
tmphdr.firstused = state->args->geo->blksize;
/* write the header to the temp buffer to initialise it */
- xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr);
+ xfs_attr3_leaf_hdr_to_disk(state->args->geo, tmp_leaf, &tmphdr);
if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
drop_blk->bp, &drophdr)) {
@@ -2039,7 +2116,7 @@ xfs_attr3_leaf_unbalance(
kmem_free(tmp_leaf);
}
- xfs_attr3_leaf_hdr_to_disk(save_leaf, &savehdr);
+ xfs_attr3_leaf_hdr_to_disk(state->args->geo, save_leaf, &savehdr);
xfs_trans_log_buf(state->args->trans, save_blk->bp, 0,
state->args->geo->blksize - 1);
@@ -2085,7 +2162,7 @@ xfs_attr3_leaf_lookup_int(
trace_xfs_attr_leaf_lookup(args);
leaf = bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
ASSERT(ichdr.count < args->geo->blksize / 8);
@@ -2190,7 +2267,7 @@ xfs_attr3_leaf_getvalue(
int valuelen;
leaf = bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(ichdr.count < args->geo->blksize / 8);
ASSERT(args->index < ichdr.count);
@@ -2391,8 +2468,9 @@ xfs_attr_leaf_lasthash(
{
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_attr_leaf_entry *entries;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, bp->b_addr);
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, bp->b_addr);
entries = xfs_attr3_leaf_entryp(bp->b_addr);
if (count)
*count = ichdr.count;
@@ -2486,7 +2564,7 @@ xfs_attr3_leaf_clearflag(
ASSERT(entry->flags & XFS_ATTR_INCOMPLETE);
#ifdef DEBUG
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(args->index < ichdr.count);
ASSERT(args->index >= 0);
@@ -2550,7 +2628,7 @@ xfs_attr3_leaf_setflag(
leaf = bp->b_addr;
#ifdef DEBUG
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(args->index < ichdr.count);
ASSERT(args->index >= 0);
#endif
@@ -2629,11 +2707,11 @@ xfs_attr3_leaf_flipflags(
entry2 = &xfs_attr3_leaf_entryp(leaf2)[args->index2];
#ifdef DEBUG
- xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr1, leaf1);
ASSERT(args->index < ichdr1.count);
ASSERT(args->index >= 0);
- xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2);
+ xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr2, leaf2);
ASSERT(args->index2 < ichdr2.count);
ASSERT(args->index2 >= 0);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index e2929da7c3ba..025c4b820c03 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -100,9 +100,11 @@ int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int *local);
int xfs_attr3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
struct xfs_buf **bpp);
-void xfs_attr3_leaf_hdr_from_disk(struct xfs_attr3_icleaf_hdr *to,
+void xfs_attr3_leaf_hdr_from_disk(struct xfs_da_geometry *geo,
+ struct xfs_attr3_icleaf_hdr *to,
struct xfs_attr_leafblock *from);
-void xfs_attr3_leaf_hdr_to_disk(struct xfs_attr_leafblock *to,
+void xfs_attr3_leaf_hdr_to_disk(struct xfs_da_geometry *geo,
+ struct xfs_attr_leafblock *to,
struct xfs_attr3_icleaf_hdr *from);
#endif /* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 61ec015dca16..aeffeaaac0ec 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -244,30 +244,6 @@ xfs_bmap_forkoff_reset(
}
}
-/*
- * Debug/sanity checking code
- */
-
-STATIC int
-xfs_bmap_sanity_check(
- struct xfs_mount *mp,
- struct xfs_buf *bp,
- int level)
-{
- struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
-
- if (block->bb_magic != cpu_to_be32(XFS_BMAP_CRC_MAGIC) &&
- block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC))
- return 0;
-
- if (be16_to_cpu(block->bb_level) != level ||
- be16_to_cpu(block->bb_numrecs) == 0 ||
- be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
- return 0;
-
- return 1;
-}
-
#ifdef DEBUG
STATIC struct xfs_buf *
xfs_bmap_get_bp(
@@ -410,9 +386,6 @@ xfs_bmap_check_leaf_extents(
goto error_norelse;
}
block = XFS_BUF_TO_BLOCK(bp);
- XFS_WANT_CORRUPTED_GOTO(
- xfs_bmap_sanity_check(mp, bp, level),
- error0);
if (level == 0)
break;
@@ -424,7 +397,8 @@ xfs_bmap_check_leaf_extents(
xfs_check_block(block, mp, 0, 0);
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
- XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ XFS_FSB_SANITY_CHECK(mp, bno), error0);
if (bp_release) {
bp_release = 0;
xfs_trans_brelse(NULL, bp);
@@ -1029,7 +1003,7 @@ xfs_bmap_add_attrfork_btree(
if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
goto error0;
/* must be at least one entry */
- XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
goto error0;
if (stat == 0) {
@@ -1311,14 +1285,12 @@ xfs_bmap_read_extents(
if (error)
return error;
block = XFS_BUF_TO_BLOCK(bp);
- XFS_WANT_CORRUPTED_GOTO(
- xfs_bmap_sanity_check(mp, bp, level),
- error0);
if (level == 0)
break;
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
- XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ XFS_FSB_SANITY_CHECK(mp, bno), error0);
xfs_trans_brelse(tp, bp);
}
/*
@@ -1345,9 +1317,6 @@ xfs_bmap_read_extents(
XFS_ERRLEVEL_LOW, ip->i_mount, block);
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(
- xfs_bmap_sanity_check(mp, bp, 0),
- error0);
/*
* Read-ahead the next leaf block, if any.
*/
@@ -1755,7 +1724,9 @@ xfs_bmap_add_extent_delay_real(
xfs_filblks_t temp=0; /* value for da_new calculations */
xfs_filblks_t temp2=0;/* value for da_new calculations */
int tmp_rval; /* partial logging flags */
+ struct xfs_mount *mp;
+ mp = bma->tp ? bma->tp->t_mountp : NULL;
ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
ASSERT(bma->idx >= 0);
@@ -1866,15 +1837,15 @@ xfs_bmap_add_extent_delay_real(
RIGHT.br_blockcount, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_btree_delete(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_btree_decrement(bma->cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount +
@@ -1907,7 +1878,7 @@ xfs_bmap_add_extent_delay_real(
&i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount +
@@ -1938,7 +1909,7 @@ xfs_bmap_add_extent_delay_real(
RIGHT.br_blockcount, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
new->br_startblock,
PREV.br_blockcount +
@@ -1968,12 +1939,12 @@ xfs_bmap_add_extent_delay_real(
&i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
break;
@@ -2001,7 +1972,7 @@ xfs_bmap_add_extent_delay_real(
&i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount +
@@ -2038,12 +2009,12 @@ xfs_bmap_add_extent_delay_real(
&i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
@@ -2084,7 +2055,7 @@ xfs_bmap_add_extent_delay_real(
RIGHT.br_blockcount, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_bmbt_update(bma->cur, new->br_startoff,
new->br_startblock,
new->br_blockcount +
@@ -2122,12 +2093,12 @@ xfs_bmap_add_extent_delay_real(
&i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
@@ -2191,12 +2162,12 @@ xfs_bmap_add_extent_delay_real(
&i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
@@ -2212,9 +2183,8 @@ xfs_bmap_add_extent_delay_real(
diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
if (diff > 0) {
- error = xfs_icsb_modify_counters(bma->ip->i_mount,
- XFS_SBS_FDBLOCKS,
- -((int64_t)diff), 0);
+ error = xfs_mod_fdblocks(bma->ip->i_mount,
+ -((int64_t)diff), false);
ASSERT(!error);
if (error)
goto done;
@@ -2265,9 +2235,8 @@ xfs_bmap_add_extent_delay_real(
temp += bma->cur->bc_private.b.allocated;
ASSERT(temp <= da_old);
if (temp < da_old)
- xfs_icsb_modify_counters(bma->ip->i_mount,
- XFS_SBS_FDBLOCKS,
- (int64_t)(da_old - temp), 0);
+ xfs_mod_fdblocks(bma->ip->i_mount,
+ (int64_t)(da_old - temp), false);
}
/* clear out the allocated field, done with it now in any case. */
@@ -2309,6 +2278,7 @@ xfs_bmap_add_extent_unwritten_real(
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
int state = 0;/* state bits, accessed thru macros */
+ struct xfs_mount *mp = tp->t_mountp;
*logflagsp = 0;
@@ -2421,19 +2391,19 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_startblock,
RIGHT.br_blockcount, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount + PREV.br_blockcount +
@@ -2464,13 +2434,13 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount + PREV.br_blockcount,
@@ -2499,13 +2469,13 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_startblock,
RIGHT.br_blockcount, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_bmbt_update(cur, new->br_startoff,
new->br_startblock,
new->br_blockcount + RIGHT.br_blockcount,
@@ -2532,7 +2502,7 @@ xfs_bmap_add_extent_unwritten_real(
new->br_startblock, new->br_blockcount,
&i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_bmbt_update(cur, new->br_startoff,
new->br_startblock, new->br_blockcount,
newext)))
@@ -2569,7 +2539,7 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_bmbt_update(cur,
PREV.br_startoff + new->br_blockcount,
PREV.br_startblock + new->br_blockcount,
@@ -2611,7 +2581,7 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_bmbt_update(cur,
PREV.br_startoff + new->br_blockcount,
PREV.br_startblock + new->br_blockcount,
@@ -2621,7 +2591,7 @@ xfs_bmap_add_extent_unwritten_real(
cur->bc_rec.b = *new;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
break;
@@ -2651,7 +2621,7 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock,
PREV.br_blockcount, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
PREV.br_startblock,
PREV.br_blockcount - new->br_blockcount,
@@ -2689,7 +2659,7 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
PREV.br_startblock,
PREV.br_blockcount - new->br_blockcount,
@@ -2699,11 +2669,11 @@ xfs_bmap_add_extent_unwritten_real(
new->br_startblock, new->br_blockcount,
&i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
cur->bc_rec.b.br_state = XFS_EXT_NORM;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
break;
@@ -2737,7 +2707,7 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
/* new right extent - oldext */
if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
r[1].br_startblock, r[1].br_blockcount,
@@ -2749,7 +2719,7 @@ xfs_bmap_add_extent_unwritten_real(
new->br_startoff - PREV.br_startoff;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
/*
* Reset the cursor to the position of the new extent
* we are about to insert as we can't trust it after
@@ -2759,12 +2729,12 @@ xfs_bmap_add_extent_unwritten_real(
new->br_startblock, new->br_blockcount,
&i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
/* new middle extent - newext */
cur->bc_rec.b.br_state = new->br_state;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
break;
@@ -2944,8 +2914,8 @@ xfs_bmap_add_extent_hole_delay(
}
if (oldlen != newlen) {
ASSERT(oldlen > newlen);
- xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
- (int64_t)(oldlen - newlen), 0);
+ xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
+ false);
/*
* Nothing to do for disk quota accounting here.
*/
@@ -2968,7 +2938,9 @@ xfs_bmap_add_extent_hole_real(
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int rval=0; /* return value (logging flags) */
int state; /* state bits, accessed thru macros */
+ struct xfs_mount *mp;
+ mp = bma->tp ? bma->tp->t_mountp : NULL;
ifp = XFS_IFORK_PTR(bma->ip, whichfork);
ASSERT(bma->idx >= 0);
@@ -3056,15 +3028,15 @@ xfs_bmap_add_extent_hole_real(
&i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_btree_delete(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_btree_decrement(bma->cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_bmbt_update(bma->cur, left.br_startoff,
left.br_startblock,
left.br_blockcount +
@@ -3097,7 +3069,7 @@ xfs_bmap_add_extent_hole_real(
&i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_bmbt_update(bma->cur, left.br_startoff,
left.br_startblock,
left.br_blockcount +
@@ -3131,7 +3103,7 @@ xfs_bmap_add_extent_hole_real(
right.br_blockcount, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
error = xfs_bmbt_update(bma->cur, new->br_startoff,
new->br_startblock,
new->br_blockcount +
@@ -3161,12 +3133,12 @@ xfs_bmap_add_extent_hole_real(
new->br_blockcount, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
bma->cur->bc_rec.b.br_state = new->br_state;
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
break;
}
@@ -4160,18 +4132,15 @@ xfs_bmapi_reserve_delalloc(
ASSERT(indlen > 0);
if (rt) {
- error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
- -((int64_t)extsz), 0);
+ error = xfs_mod_frextents(mp, -((int64_t)extsz));
} else {
- error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- -((int64_t)alen), 0);
+ error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
}
if (error)
goto out_unreserve_quota;
- error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- -((int64_t)indlen), 0);
+ error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
if (error)
goto out_unreserve_blocks;
@@ -4198,9 +4167,9 @@ xfs_bmapi_reserve_delalloc(
out_unreserve_blocks:
if (rt)
- xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0);
+ xfs_mod_frextents(mp, extsz);
else
- xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0);
+ xfs_mod_fdblocks(mp, alen, false);
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
@@ -4801,7 +4770,7 @@ xfs_bmap_del_extent(
got.br_startblock, got.br_blockcount,
&i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
da_old = da_new = 0;
} else {
@@ -4835,7 +4804,7 @@ xfs_bmap_del_extent(
}
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
break;
case 2:
@@ -4935,7 +4904,8 @@ xfs_bmap_del_extent(
got.br_startblock,
temp, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ i == 1, done);
/*
* Update the btree record back
* to the original value.
@@ -4956,7 +4926,7 @@ xfs_bmap_del_extent(
error = -ENOSPC;
goto done;
}
- XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
} else
flags |= xfs_ilog_fext(whichfork);
XFS_IFORK_NEXT_SET(ip, whichfork,
@@ -5012,10 +4982,8 @@ xfs_bmap_del_extent(
* Nothing to do for disk quota accounting here.
*/
ASSERT(da_old >= da_new);
- if (da_old > da_new) {
- xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- (int64_t)(da_old - da_new), 0);
- }
+ if (da_old > da_new)
+ xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
done:
*logflagsp = flags;
return error;
@@ -5284,14 +5252,13 @@ xfs_bunmapi(
rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
do_div(rtexts, mp->m_sb.sb_rextsize);
- xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
- (int64_t)rtexts, 0);
+ xfs_mod_frextents(mp, (int64_t)rtexts);
(void)xfs_trans_reserve_quota_nblks(NULL,
ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_RTBLKS);
} else {
- xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- (int64_t)del.br_blockcount, 0);
+ xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount,
+ false);
(void)xfs_trans_reserve_quota_nblks(NULL,
ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_REGBLKS);
@@ -5453,6 +5420,7 @@ xfs_bmse_merge(
struct xfs_bmbt_irec left;
xfs_filblks_t blockcount;
int error, i;
+ struct xfs_mount *mp = ip->i_mount;
xfs_bmbt_get_all(gotp, &got);
xfs_bmbt_get_all(leftp, &left);
@@ -5487,19 +5455,19 @@ xfs_bmse_merge(
got.br_blockcount, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
error = xfs_btree_delete(cur, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
/* lookup and update size of the previous extent */
error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
left.br_blockcount, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
left.br_blockcount = blockcount;
@@ -5518,50 +5486,92 @@ xfs_bmse_shift_one(
int *current_ext,
struct xfs_bmbt_rec_host *gotp,
struct xfs_btree_cur *cur,
- int *logflags)
+ int *logflags,
+ enum shift_direction direction)
{
struct xfs_ifork *ifp;
+ struct xfs_mount *mp;
xfs_fileoff_t startoff;
- struct xfs_bmbt_rec_host *leftp;
+ struct xfs_bmbt_rec_host *adj_irecp;
struct xfs_bmbt_irec got;
- struct xfs_bmbt_irec left;
+ struct xfs_bmbt_irec adj_irec;
int error;
int i;
+ int total_extents;
+ mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
+ total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
xfs_bmbt_get_all(gotp, &got);
- startoff = got.br_startoff - offset_shift_fsb;
/* delalloc extents should be prevented by caller */
- XFS_WANT_CORRUPTED_RETURN(!isnullstartblock(got.br_startblock));
+ XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
- /*
- * Check for merge if we've got an extent to the left, otherwise make
- * sure there's enough room at the start of the file for the shift.
- */
- if (*current_ext) {
- /* grab the left extent and check for a large enough hole */
- leftp = xfs_iext_get_ext(ifp, *current_ext - 1);
- xfs_bmbt_get_all(leftp, &left);
+ if (direction == SHIFT_LEFT) {
+ startoff = got.br_startoff - offset_shift_fsb;
+
+ /*
+ * Check for merge if we've got an extent to the left,
+ * otherwise make sure there's enough room at the start
+ * of the file for the shift.
+ */
+ if (!*current_ext) {
+ if (got.br_startoff < offset_shift_fsb)
+ return -EINVAL;
+ goto update_current_ext;
+ }
+ /*
+ * grab the left extent and check for a large
+ * enough hole.
+ */
+ adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1);
+ xfs_bmbt_get_all(adj_irecp, &adj_irec);
- if (startoff < left.br_startoff + left.br_blockcount)
+ if (startoff <
+ adj_irec.br_startoff + adj_irec.br_blockcount)
return -EINVAL;
/* check whether to merge the extent or shift it down */
- if (xfs_bmse_can_merge(&left, &got, offset_shift_fsb)) {
+ if (xfs_bmse_can_merge(&adj_irec, &got,
+ offset_shift_fsb)) {
return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
- *current_ext, gotp, leftp, cur,
- logflags);
+ *current_ext, gotp, adj_irecp,
+ cur, logflags);
}
- } else if (got.br_startoff < offset_shift_fsb)
- return -EINVAL;
-
+ } else {
+ startoff = got.br_startoff + offset_shift_fsb;
+ /* nothing to move if this is the last extent */
+ if (*current_ext >= (total_extents - 1))
+ goto update_current_ext;
+ /*
+ * If this is not the last extent in the file, make sure there
+ * is enough room between current extent and next extent for
+ * accommodating the shift.
+ */
+ adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1);
+ xfs_bmbt_get_all(adj_irecp, &adj_irec);
+ if (startoff + got.br_blockcount > adj_irec.br_startoff)
+ return -EINVAL;
+ /*
+ * Unlike a left shift (which involves a hole punch),
+ * a right shift does not modify extent neighbors
+ * in any way. We should never find mergeable extents
+ * in this scenario. Check anyways and warn if we
+ * encounter two extents that could be one.
+ */
+ if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb))
+ WARN_ON_ONCE(1);
+ }
/*
* Increment the extent index for the next iteration, update the start
* offset of the in-core extent and update the btree if applicable.
*/
- (*current_ext)++;
+update_current_ext:
+ if (direction == SHIFT_LEFT)
+ (*current_ext)++;
+ else
+ (*current_ext)--;
xfs_bmbt_set_startoff(gotp, startoff);
*logflags |= XFS_ILOG_CORE;
if (!cur) {
@@ -5573,18 +5583,18 @@ xfs_bmse_shift_one(
got.br_blockcount, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
got.br_startoff = startoff;
return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
- got.br_blockcount, got.br_state);
+ got.br_blockcount, got.br_state);
}
/*
- * Shift extent records to the left to cover a hole.
+ * Shift extent records to the left/right to cover/create a hole.
*
* The maximum number of extents to be shifted in a single operation is
- * @num_exts. @start_fsb specifies the file offset to start the shift and the
+ * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
* file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
* is the length by which each extent is shifted. If there is no hole to shift
* the extents into, this will be considered invalid operation and we abort
@@ -5594,12 +5604,13 @@ int
xfs_bmap_shift_extents(
struct xfs_trans *tp,
struct xfs_inode *ip,
- xfs_fileoff_t start_fsb,
+ xfs_fileoff_t *next_fsb,
xfs_fileoff_t offset_shift_fsb,
int *done,
- xfs_fileoff_t *next_fsb,
+ xfs_fileoff_t stop_fsb,
xfs_fsblock_t *firstblock,
struct xfs_bmap_free *flist,
+ enum shift_direction direction,
int num_exts)
{
struct xfs_btree_cur *cur = NULL;
@@ -5609,10 +5620,11 @@ xfs_bmap_shift_extents(
struct xfs_ifork *ifp;
xfs_extnum_t nexts = 0;
xfs_extnum_t current_ext;
+ xfs_extnum_t total_extents;
+ xfs_extnum_t stop_extent;
int error = 0;
int whichfork = XFS_DATA_FORK;
int logflags = 0;
- int total_extents;
if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
@@ -5628,6 +5640,8 @@ xfs_bmap_shift_extents(
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
+ ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT);
ifp = XFS_IFORK_PTR(ip, whichfork);
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
@@ -5645,43 +5659,83 @@ xfs_bmap_shift_extents(
}
/*
+ * There may be delalloc extents in the data fork before the range we
+ * are collapsing out, so we cannot use the count of real extents here.
+ * Instead we have to calculate it from the incore fork.
+ */
+ total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+ if (total_extents == 0) {
+ *done = 1;
+ goto del_cursor;
+ }
+
+ /*
+ * In case of first right shift, we need to initialize next_fsb
+ */
+ if (*next_fsb == NULLFSBLOCK) {
+ gotp = xfs_iext_get_ext(ifp, total_extents - 1);
+ xfs_bmbt_get_all(gotp, &got);
+ *next_fsb = got.br_startoff;
+ if (stop_fsb > *next_fsb) {
+ *done = 1;
+ goto del_cursor;
+ }
+ }
+
+ /* Lookup the extent index at which we have to stop */
+ if (direction == SHIFT_RIGHT) {
+ gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent);
+ /* Make stop_extent exclusive of shift range */
+ stop_extent--;
+ } else
+ stop_extent = total_extents;
+
+ /*
* Look up the extent index for the fsb where we start shifting. We can
* henceforth iterate with current_ext as extent list changes are locked
* out via ilock.
*
* gotp can be null in 2 cases: 1) if there are no extents or 2)
- * start_fsb lies in a hole beyond which there are no extents. Either
+ * *next_fsb lies in a hole beyond which there are no extents. Either
* way, we are done.
*/
- gotp = xfs_iext_bno_to_ext(ifp, start_fsb, &current_ext);
+ gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, &current_ext);
if (!gotp) {
*done = 1;
goto del_cursor;
}
- /*
- * There may be delalloc extents in the data fork before the range we
- * are collapsing out, so we cannot use the count of real extents here.
- * Instead we have to calculate it from the incore fork.
- */
- total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
- while (nexts++ < num_exts && current_ext < total_extents) {
+ /* some sanity checking before we finally start shifting extents */
+ if ((direction == SHIFT_LEFT && current_ext >= stop_extent) ||
+ (direction == SHIFT_RIGHT && current_ext <= stop_extent)) {
+ error = -EIO;
+ goto del_cursor;
+ }
+
+ while (nexts++ < num_exts) {
error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
- &current_ext, gotp, cur, &logflags);
+ &current_ext, gotp, cur, &logflags,
+ direction);
if (error)
goto del_cursor;
+ /*
+ * If there was an extent merge during the shift, the extent
+ * count can change. Update the total and grade the next record.
+ */
+ if (direction == SHIFT_LEFT) {
+ total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+ stop_extent = total_extents;
+ }
- /* update total extent count and grab the next record */
- total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
- if (current_ext >= total_extents)
+ if (current_ext == stop_extent) {
+ *done = 1;
+ *next_fsb = NULLFSBLOCK;
break;
+ }
gotp = xfs_iext_get_ext(ifp, current_ext);
}
- /* Check if we are done */
- if (current_ext == total_extents) {
- *done = 1;
- } else if (next_fsb) {
+ if (!*done) {
xfs_bmbt_get_all(gotp, &got);
*next_fsb = got.br_startoff;
}
@@ -5696,3 +5750,189 @@ del_cursor:
return error;
}
+
+/*
+ * Splits an extent into two extents at split_fsb block such that it is
+ * the first block of the current_ext. @current_ext is a target extent
+ * to be split. @split_fsb is a block where the extents is split.
+ * If split_fsb lies in a hole or the first block of extents, just return 0.
+ */
+STATIC int
+xfs_bmap_split_extent_at(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ xfs_fileoff_t split_fsb,
+ xfs_fsblock_t *firstfsb,
+ struct xfs_bmap_free *free_list)
+{
+ int whichfork = XFS_DATA_FORK;
+ struct xfs_btree_cur *cur = NULL;
+ struct xfs_bmbt_rec_host *gotp;
+ struct xfs_bmbt_irec got;
+ struct xfs_bmbt_irec new; /* split extent */
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp;
+ xfs_fsblock_t gotblkcnt; /* new block count for got */
+ xfs_extnum_t current_ext;
+ int error = 0;
+ int logflags = 0;
+ int i = 0;
+
+ if (unlikely(XFS_TEST_ERROR(
+ (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
+ mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+ XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
+ XFS_ERRLEVEL_LOW, mp);
+ return -EFSCORRUPTED;
+ }
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ /* Read in all the extents */
+ error = xfs_iread_extents(tp, ip, whichfork);
+ if (error)
+ return error;
+ }
+
+ /*
+ * gotp can be null in 2 cases: 1) if there are no extents
+ * or 2) split_fsb lies in a hole beyond which there are
+ * no extents. Either way, we are done.
+ */
+ gotp = xfs_iext_bno_to_ext(ifp, split_fsb, &current_ext);
+ if (!gotp)
+ return 0;
+
+ xfs_bmbt_get_all(gotp, &got);
+
+ /*
+ * Check split_fsb lies in a hole or the start boundary offset
+ * of the extent.
+ */
+ if (got.br_startoff >= split_fsb)
+ return 0;
+
+ gotblkcnt = split_fsb - got.br_startoff;
+ new.br_startoff = split_fsb;
+ new.br_startblock = got.br_startblock + gotblkcnt;
+ new.br_blockcount = got.br_blockcount - gotblkcnt;
+ new.br_state = got.br_state;
+
+ if (ifp->if_flags & XFS_IFBROOT) {
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+ cur->bc_private.b.firstblock = *firstfsb;
+ cur->bc_private.b.flist = free_list;
+ cur->bc_private.b.flags = 0;
+ error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
+ got.br_startblock,
+ got.br_blockcount,
+ &i);
+ if (error)
+ goto del_cursor;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
+ }
+
+ xfs_bmbt_set_blockcount(gotp, gotblkcnt);
+ got.br_blockcount = gotblkcnt;
+
+ logflags = XFS_ILOG_CORE;
+ if (cur) {
+ error = xfs_bmbt_update(cur, got.br_startoff,
+ got.br_startblock,
+ got.br_blockcount,
+ got.br_state);
+ if (error)
+ goto del_cursor;
+ } else
+ logflags |= XFS_ILOG_DEXT;
+
+ /* Add new extent */
+ current_ext++;
+ xfs_iext_insert(ip, current_ext, 1, &new, 0);
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+
+ if (cur) {
+ error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
+ new.br_startblock, new.br_blockcount,
+ &i);
+ if (error)
+ goto del_cursor;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
+ cur->bc_rec.b.br_state = new.br_state;
+
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto del_cursor;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
+ }
+
+ /*
+ * Convert to a btree if necessary.
+ */
+ if (xfs_bmap_needs_btree(ip, whichfork)) {
+ int tmp_logflags; /* partial log flag return val */
+
+ ASSERT(cur == NULL);
+ error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, free_list,
+ &cur, 0, &tmp_logflags, whichfork);
+ logflags |= tmp_logflags;
+ }
+
+del_cursor:
+ if (cur) {
+ cur->bc_private.b.allocated = 0;
+ xfs_btree_del_cursor(cur,
+ error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ }
+
+ if (logflags)
+ xfs_trans_log_inode(tp, ip, logflags);
+ return error;
+}
+
+int
+xfs_bmap_split_extent(
+ struct xfs_inode *ip,
+ xfs_fileoff_t split_fsb)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ struct xfs_bmap_free free_list;
+ xfs_fsblock_t firstfsb;
+ int committed;
+ int error;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
+ XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ xfs_bmap_init(&free_list, &firstfsb);
+
+ error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
+ &firstfsb, &free_list);
+ if (error)
+ goto out;
+
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
+ if (error)
+ goto out;
+
+ return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+
+
+out:
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+ return error;
+}
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index b9d8a499d2c4..6aaa0c1c7200 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -166,6 +166,11 @@ static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
*/
#define XFS_BMAP_MAX_SHIFT_EXTENTS 1
+enum shift_direction {
+ SHIFT_LEFT = 0,
+ SHIFT_RIGHT,
+};
+
#ifdef DEBUG
void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
int whichfork, unsigned long caller_ip);
@@ -211,8 +216,10 @@ int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
xfs_extnum_t num);
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
- xfs_fileoff_t start_fsb, xfs_fileoff_t offset_shift_fsb,
- int *done, xfs_fileoff_t *next_fsb, xfs_fsblock_t *firstblock,
- struct xfs_bmap_free *flist, int num_exts);
+ xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
+ int *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
+ struct xfs_bmap_free *flist, enum shift_direction direction,
+ int num_exts);
+int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 81cad433df85..c72283dd8d44 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -168,7 +168,7 @@ xfs_btree_check_lptr(
xfs_fsblock_t bno, /* btree block disk address */
int level) /* btree block level */
{
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
level > 0 &&
bno != NULLFSBLOCK &&
XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
@@ -187,7 +187,7 @@ xfs_btree_check_sptr(
{
xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks;
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
level > 0 &&
bno != NULLAGBLOCK &&
bno != 0 &&
@@ -1825,7 +1825,7 @@ xfs_btree_lookup(
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
@@ -2285,7 +2285,7 @@ xfs_btree_rshift(
if (error)
goto error0;
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
error = xfs_btree_increment(tcur, level, &i);
if (error)
@@ -3138,7 +3138,7 @@ xfs_btree_insert(
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
level++;
/*
@@ -3582,15 +3582,15 @@ xfs_btree_delrec(
* Actually any entry but the first would suffice.
*/
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
error = xfs_btree_increment(tcur, level, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
/* Grab a pointer to the block. */
right = xfs_btree_get_block(tcur, level, &rbp);
@@ -3634,12 +3634,12 @@ xfs_btree_delrec(
rrecs = xfs_btree_get_numrecs(right);
if (!xfs_btree_ptr_is_null(cur, &lptr)) {
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
}
}
@@ -3653,13 +3653,13 @@ xfs_btree_delrec(
* previous block.
*/
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
/* Grab a pointer to the block. */
left = xfs_btree_get_block(tcur, level, &lbp);
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 9cb0115c6bd1..2385f8cd08ab 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -538,12 +538,12 @@ xfs_da3_root_split(
oldroot = blk1->bp->b_addr;
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
- struct xfs_da3_icnode_hdr nodehdr;
+ struct xfs_da3_icnode_hdr icnodehdr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, oldroot);
+ dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
btree = dp->d_ops->node_tree_p(oldroot);
- size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot);
- level = nodehdr.level;
+ size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
+ level = icnodehdr.level;
/*
* we are about to copy oldroot to bp, so set up the type
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 0a49b0286372..74bcbabfa523 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -725,7 +725,13 @@ struct xfs_attr3_icleaf_hdr {
__uint16_t magic;
__uint16_t count;
__uint16_t usedbytes;
- __uint16_t firstused;
+ /*
+ * firstused is 32-bit here instead of 16-bit like the on-disk variant
+ * to support maximum fsb size of 64k without overflow issues throughout
+ * the attr code. Instead, the overflow condition is handled on
+ * conversion to/from disk.
+ */
+ __uint32_t firstused;
__u8 holes;
struct {
__uint16_t base;
@@ -734,6 +740,12 @@ struct xfs_attr3_icleaf_hdr {
};
/*
+ * Special value to represent fs block size in the leaf header firstused field.
+ * Only used when block size overflows the 2-bytes available on disk.
+ */
+#define XFS_ATTR3_LEAF_NULLOFF 0
+
+/*
* Flags used in the leaf_entry[i].flags field.
* NOTE: the INCOMPLETE bit must not collide with the flags bits specified
* on the system call, they are "or"ed together for various operations.
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index 5ff31be9b1cd..de1ea16f5748 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -89,7 +89,7 @@ __xfs_dir3_data_check(
* so just ensure that the count falls somewhere inside the
* block right now.
*/
- XFS_WANT_CORRUPTED_RETURN(be32_to_cpu(btp->count) <
+ XFS_WANT_CORRUPTED_RETURN(mp, be32_to_cpu(btp->count) <
((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry));
break;
case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
@@ -107,21 +107,21 @@ __xfs_dir3_data_check(
bf = ops->data_bestfree_p(hdr);
count = lastfree = freeseen = 0;
if (!bf[0].length) {
- XFS_WANT_CORRUPTED_RETURN(!bf[0].offset);
+ XFS_WANT_CORRUPTED_RETURN(mp, !bf[0].offset);
freeseen |= 1 << 0;
}
if (!bf[1].length) {
- XFS_WANT_CORRUPTED_RETURN(!bf[1].offset);
+ XFS_WANT_CORRUPTED_RETURN(mp, !bf[1].offset);
freeseen |= 1 << 1;
}
if (!bf[2].length) {
- XFS_WANT_CORRUPTED_RETURN(!bf[2].offset);
+ XFS_WANT_CORRUPTED_RETURN(mp, !bf[2].offset);
freeseen |= 1 << 2;
}
- XFS_WANT_CORRUPTED_RETURN(be16_to_cpu(bf[0].length) >=
+ XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[0].length) >=
be16_to_cpu(bf[1].length));
- XFS_WANT_CORRUPTED_RETURN(be16_to_cpu(bf[1].length) >=
+ XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[1].length) >=
be16_to_cpu(bf[2].length));
/*
* Loop over the data/unused entries.
@@ -134,18 +134,18 @@ __xfs_dir3_data_check(
* doesn't need to be there.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- XFS_WANT_CORRUPTED_RETURN(lastfree == 0);
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp, lastfree == 0);
+ XFS_WANT_CORRUPTED_RETURN(mp,
be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
(char *)dup - (char *)hdr);
dfp = xfs_dir2_data_freefind(hdr, bf, dup);
if (dfp) {
i = (int)(dfp - bf);
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp,
(freeseen & (1 << i)) == 0);
freeseen |= 1 << i;
} else {
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp,
be16_to_cpu(dup->length) <=
be16_to_cpu(bf[2].length));
}
@@ -160,13 +160,13 @@ __xfs_dir3_data_check(
* The linear search is crude but this is DEBUG code.
*/
dep = (xfs_dir2_data_entry_t *)p;
- XFS_WANT_CORRUPTED_RETURN(dep->namelen != 0);
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp, dep->namelen != 0);
+ XFS_WANT_CORRUPTED_RETURN(mp,
!xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)));
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp,
be16_to_cpu(*ops->data_entry_tag_p(dep)) ==
(char *)dep - (char *)hdr);
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp,
ops->data_get_ftype(dep) < XFS_DIR3_FT_MAX);
count++;
lastfree = 0;
@@ -183,14 +183,15 @@ __xfs_dir3_data_check(
be32_to_cpu(lep[i].hashval) == hash)
break;
}
- XFS_WANT_CORRUPTED_RETURN(i < be32_to_cpu(btp->count));
+ XFS_WANT_CORRUPTED_RETURN(mp,
+ i < be32_to_cpu(btp->count));
}
p += ops->data_entsize(dep->namelen);
}
/*
* Need to have seen all the entries and all the bestfree slots.
*/
- XFS_WANT_CORRUPTED_RETURN(freeseen == 7);
+ XFS_WANT_CORRUPTED_RETURN(mp, freeseen == 7);
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
for (i = stale = 0; i < be32_to_cpu(btp->count); i++) {
@@ -198,13 +199,13 @@ __xfs_dir3_data_check(
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
if (i > 0)
- XFS_WANT_CORRUPTED_RETURN(
+ XFS_WANT_CORRUPTED_RETURN(mp,
be32_to_cpu(lep[i].hashval) >=
be32_to_cpu(lep[i - 1].hashval));
}
- XFS_WANT_CORRUPTED_RETURN(count ==
+ XFS_WANT_CORRUPTED_RETURN(mp, count ==
be32_to_cpu(btp->count) - be32_to_cpu(btp->stale));
- XFS_WANT_CORRUPTED_RETURN(stale == be32_to_cpu(btp->stale));
+ XFS_WANT_CORRUPTED_RETURN(mp, stale == be32_to_cpu(btp->stale));
}
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 8eb718979383..4daaa662337b 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -264,68 +264,6 @@ typedef struct xfs_dsb {
/* must be padded to 64 bit alignment */
} xfs_dsb_t;
-/*
- * Sequence number values for the fields.
- */
-typedef enum {
- XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS,
- XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO,
- XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS,
- XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS,
- XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE,
- XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG,
- XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG,
- XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT,
- XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO,
- XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN,
- XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG,
- XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT,
- XFS_SBS_FEATURES2, XFS_SBS_BAD_FEATURES2, XFS_SBS_FEATURES_COMPAT,
- XFS_SBS_FEATURES_RO_COMPAT, XFS_SBS_FEATURES_INCOMPAT,
- XFS_SBS_FEATURES_LOG_INCOMPAT, XFS_SBS_CRC, XFS_SBS_PAD,
- XFS_SBS_PQUOTINO, XFS_SBS_LSN,
- XFS_SBS_FIELDCOUNT
-} xfs_sb_field_t;
-
-/*
- * Mask values, defined based on the xfs_sb_field_t values.
- * Only define the ones we're using.
- */
-#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x)
-#define XFS_SB_UUID XFS_SB_MVAL(UUID)
-#define XFS_SB_FNAME XFS_SB_MVAL(FNAME)
-#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO)
-#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO)
-#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO)
-#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM)
-#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO)
-#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO)
-#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS)
-#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN)
-#define XFS_SB_UNIT XFS_SB_MVAL(UNIT)
-#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH)
-#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
-#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
-#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
-#define XFS_SB_FEATURES2 (XFS_SB_MVAL(FEATURES2) | \
- XFS_SB_MVAL(BAD_FEATURES2))
-#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
-#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
-#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
-#define XFS_SB_FEATURES_LOG_INCOMPAT XFS_SB_MVAL(FEATURES_LOG_INCOMPAT)
-#define XFS_SB_CRC XFS_SB_MVAL(CRC)
-#define XFS_SB_PQUOTINO XFS_SB_MVAL(PQUOTINO)
-#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT)
-#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1)
-#define XFS_SB_MOD_BITS \
- (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \
- XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
- XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
- XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
- XFS_SB_FEATURES_COMPAT | XFS_SB_FEATURES_RO_COMPAT | \
- XFS_SB_FEATURES_INCOMPAT | XFS_SB_FEATURES_LOG_INCOMPAT | \
- XFS_SB_PQUOTINO)
-
/*
* Misc. Flags - warning - these will be cleared by xfs_repair unless
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 116ef1ddb3e3..07349a183a11 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -376,7 +376,8 @@ xfs_ialloc_ag_alloc(
*/
newlen = args.mp->m_ialloc_inos;
if (args.mp->m_maxicount &&
- args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)
+ percpu_counter_read(&args.mp->m_icount) + newlen >
+ args.mp->m_maxicount)
return -ENOSPC;
args.minlen = args.maxlen = args.mp->m_ialloc_blks;
/*
@@ -700,7 +701,7 @@ xfs_ialloc_next_rec(
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
}
return 0;
@@ -724,7 +725,7 @@ xfs_ialloc_get_rec(
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
}
return 0;
@@ -783,12 +784,12 @@ xfs_dialloc_ag_inobt(
error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
error = xfs_inobt_get_rec(cur, &rec, &j);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(j == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, j == 1, error0);
if (rec.ir_freecount > 0) {
/*
@@ -944,19 +945,19 @@ newino:
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
for (;;) {
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
if (rec.ir_freecount > 0)
break;
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
}
alloc_inode:
@@ -1016,7 +1017,7 @@ xfs_dialloc_ag_finobt_near(
error = xfs_inobt_get_rec(lcur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(lcur->bc_mp, i == 1);
/*
* See if we've landed in the parent inode record. The finobt
@@ -1039,10 +1040,10 @@ xfs_dialloc_ag_finobt_near(
error = xfs_inobt_get_rec(rcur, &rrec, &j);
if (error)
goto error_rcur;
- XFS_WANT_CORRUPTED_GOTO(j == 1, error_rcur);
+ XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, j == 1, error_rcur);
}
- XFS_WANT_CORRUPTED_GOTO(i == 1 || j == 1, error_rcur);
+ XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, i == 1 || j == 1, error_rcur);
if (i == 1 && j == 1) {
/*
* Both the left and right records are valid. Choose the closer
@@ -1095,7 +1096,7 @@ xfs_dialloc_ag_finobt_newino(
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
return 0;
}
}
@@ -1106,12 +1107,12 @@ xfs_dialloc_ag_finobt_newino(
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
return 0;
}
@@ -1133,19 +1134,19 @@ xfs_dialloc_ag_update_inobt(
error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
XFS_INODES_PER_CHUNK) == 0);
rec.ir_free &= ~XFS_INOBT_MASK(offset);
rec.ir_freecount--;
- XFS_WANT_CORRUPTED_RETURN((rec.ir_free == frec->ir_free) &&
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, (rec.ir_free == frec->ir_free) &&
(rec.ir_freecount == frec->ir_freecount));
return xfs_inobt_update(cur, &rec);
@@ -1340,7 +1341,8 @@ xfs_dialloc(
* inode.
*/
if (mp->m_maxicount &&
- mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) {
+ percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos >
+ mp->m_maxicount) {
noroom = 1;
okalloc = 0;
}
@@ -1475,14 +1477,14 @@ xfs_difree_inobt(
__func__, error);
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error) {
xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
__func__, error);
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
/*
* Get the offset in the inode chunk.
*/
@@ -1592,7 +1594,7 @@ xfs_difree_finobt(
* freed an inode in a previously fully allocated chunk. If not,
* something is out of sync.
*/
- XFS_WANT_CORRUPTED_GOTO(ibtrec->ir_freecount == 1, error);
+ XFS_WANT_CORRUPTED_GOTO(mp, ibtrec->ir_freecount == 1, error);
error = xfs_inobt_insert_rec(cur, ibtrec->ir_freecount,
ibtrec->ir_free, &i);
@@ -1613,12 +1615,12 @@ xfs_difree_finobt(
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
goto error;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
rec.ir_free |= XFS_INOBT_MASK(offset);
rec.ir_freecount++;
- XFS_WANT_CORRUPTED_GOTO((rec.ir_free == ibtrec->ir_free) &&
+ XFS_WANT_CORRUPTED_GOTO(mp, (rec.ir_free == ibtrec->ir_free) &&
(rec.ir_freecount == ibtrec->ir_freecount),
error);
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index b0a5fe95a3e2..dc4bfc5d88fc 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -111,14 +111,6 @@ xfs_mount_validate_sb(
bool check_inprogress,
bool check_version)
{
-
- /*
- * If the log device and data device have the
- * same device number, the log is internal.
- * Consequently, the sb_logstart should be non-zero. If
- * we have a zero sb_logstart in this case, we may be trying to mount
- * a volume filesystem in a non-volume manner.
- */
if (sbp->sb_magicnum != XFS_SB_MAGIC) {
xfs_warn(mp, "bad magic number");
return -EWRONGFS;
@@ -743,17 +735,15 @@ xfs_initialize_perag_data(
btree += pag->pagf_btreeblks;
xfs_perag_put(pag);
}
- /*
- * Overwrite incore superblock counters with just-read data
- */
+
+ /* Overwrite incore superblock counters with just-read data */
spin_lock(&mp->m_sb_lock);
sbp->sb_ifree = ifree;
sbp->sb_icount = ialloc;
sbp->sb_fdblocks = bfree + bfreelst + btree;
spin_unlock(&mp->m_sb_lock);
- /* Fixup the per-cpu counters as well. */
- xfs_icsb_reinit_counters(mp);
+ xfs_reinit_percpu_counters(mp);
return 0;
}
@@ -771,6 +761,10 @@ xfs_log_sb(
struct xfs_mount *mp = tp->t_mountp;
struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0);
+ mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
+ mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree);
+ mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
+
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 3a9b7a1b8704..a56960dd1684 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -31,7 +31,6 @@
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
-#include <linux/aio.h>
#include <linux/gfp.h>
#include <linux/mpage.h>
#include <linux/pagevec.h>
@@ -1233,6 +1232,117 @@ xfs_vm_releasepage(
return try_to_free_buffers(page);
}
+/*
+ * When we map a DIO buffer, we may need to attach an ioend that describes the
+ * type of write IO we are doing. This passes to the completion function the
+ * operations it needs to perform. If the mapping is for an overwrite wholly
+ * within the EOF then we don't need an ioend and so we don't allocate one.
+ * This avoids the unnecessary overhead of allocating and freeing ioends for
+ * workloads that don't require transactions on IO completion.
+ *
+ * If we get multiple mappings in a single IO, we might be mapping different
+ * types. But because the direct IO can only have a single private pointer, we
+ * need to ensure that:
+ *
+ * a) i) the ioend spans the entire region of unwritten mappings; or
+ * ii) the ioend spans all the mappings that cross or are beyond EOF; and
+ * b) if it contains unwritten extents, it is *permanently* marked as such
+ *
+ * We could do this by chaining ioends like buffered IO does, but we only
+ * actually get one IO completion callback from the direct IO, and that spans
+ * the entire IO regardless of how many mappings and IOs are needed to complete
+ * the DIO. There is only going to be one reference to the ioend and its life
+ * cycle is constrained by the DIO completion code. hence we don't need
+ * reference counting here.
+ */
+static void
+xfs_map_direct(
+ struct inode *inode,
+ struct buffer_head *bh_result,
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
+{
+ struct xfs_ioend *ioend;
+ xfs_off_t size = bh_result->b_size;
+ int type;
+
+ if (ISUNWRITTEN(imap))
+ type = XFS_IO_UNWRITTEN;
+ else
+ type = XFS_IO_OVERWRITE;
+
+ trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap);
+
+ if (bh_result->b_private) {
+ ioend = bh_result->b_private;
+ ASSERT(ioend->io_size > 0);
+ ASSERT(offset >= ioend->io_offset);
+ if (offset + size > ioend->io_offset + ioend->io_size)
+ ioend->io_size = offset - ioend->io_offset + size;
+
+ if (type == XFS_IO_UNWRITTEN && type != ioend->io_type)
+ ioend->io_type = XFS_IO_UNWRITTEN;
+
+ trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset,
+ ioend->io_size, ioend->io_type,
+ imap);
+ } else if (type == XFS_IO_UNWRITTEN ||
+ offset + size > i_size_read(inode)) {
+ ioend = xfs_alloc_ioend(inode, type);
+ ioend->io_offset = offset;
+ ioend->io_size = size;
+
+ bh_result->b_private = ioend;
+ set_buffer_defer_completion(bh_result);
+
+ trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type,
+ imap);
+ } else {
+ trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
+ imap);
+ }
+}
+
+/*
+ * If this is O_DIRECT or the mpage code calling tell them how large the mapping
+ * is, so that we can avoid repeated get_blocks calls.
+ *
+ * If the mapping spans EOF, then we have to break the mapping up as the mapping
+ * for blocks beyond EOF must be marked new so that sub block regions can be
+ * correctly zeroed. We can't do this for mappings within EOF unless the mapping
+ * was just allocated or is unwritten, otherwise the callers would overwrite
+ * existing data with zeros. Hence we have to split the mapping into a range up
+ * to and including EOF, and a second mapping for beyond EOF.
+ */
+static void
+xfs_map_trim_size(
+ struct inode *inode,
+ sector_t iblock,
+ struct buffer_head *bh_result,
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset,
+ ssize_t size)
+{
+ xfs_off_t mapping_size;
+
+ mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
+ mapping_size <<= inode->i_blkbits;
+
+ ASSERT(mapping_size > 0);
+ if (mapping_size > size)
+ mapping_size = size;
+ if (offset < i_size_read(inode) &&
+ offset + mapping_size >= i_size_read(inode)) {
+ /* limit mapping to block that spans EOF */
+ mapping_size = roundup_64(i_size_read(inode) - offset,
+ 1 << inode->i_blkbits);
+ }
+ if (mapping_size > LONG_MAX)
+ mapping_size = LONG_MAX;
+
+ bh_result->b_size = mapping_size;
+}
+
STATIC int
__xfs_get_blocks(
struct inode *inode,
@@ -1321,31 +1431,37 @@ __xfs_get_blocks(
xfs_iunlock(ip, lockmode);
}
-
- trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
+ trace_xfs_get_blocks_alloc(ip, offset, size,
+ ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
+ : XFS_IO_DELALLOC, &imap);
} else if (nimaps) {
- trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
+ trace_xfs_get_blocks_found(ip, offset, size,
+ ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
+ : XFS_IO_OVERWRITE, &imap);
xfs_iunlock(ip, lockmode);
} else {
trace_xfs_get_blocks_notfound(ip, offset, size);
goto out_unlock;
}
+ /* trim mapping down to size requested */
+ if (direct || size > (1 << inode->i_blkbits))
+ xfs_map_trim_size(inode, iblock, bh_result,
+ &imap, offset, size);
+
+ /*
+ * For unwritten extents do not report a disk address in the buffered
+ * read case (treat as if we're reading into a hole).
+ */
if (imap.br_startblock != HOLESTARTBLOCK &&
- imap.br_startblock != DELAYSTARTBLOCK) {
- /*
- * For unwritten extents do not report a disk address on
- * the read case (treat as if we're reading into a hole).
- */
- if (create || !ISUNWRITTEN(&imap))
- xfs_map_buffer(inode, bh_result, &imap, offset);
- if (create && ISUNWRITTEN(&imap)) {
- if (direct) {
- bh_result->b_private = inode;
- set_buffer_defer_completion(bh_result);
- }
+ imap.br_startblock != DELAYSTARTBLOCK &&
+ (create || !ISUNWRITTEN(&imap))) {
+ xfs_map_buffer(inode, bh_result, &imap, offset);
+ if (ISUNWRITTEN(&imap))
set_buffer_unwritten(bh_result);
- }
+ /* direct IO needs special help */
+ if (create && direct)
+ xfs_map_direct(inode, bh_result, &imap, offset);
}
/*
@@ -1378,39 +1494,6 @@ __xfs_get_blocks(
}
}
- /*
- * If this is O_DIRECT or the mpage code calling tell them how large
- * the mapping is, so that we can avoid repeated get_blocks calls.
- *
- * If the mapping spans EOF, then we have to break the mapping up as the
- * mapping for blocks beyond EOF must be marked new so that sub block
- * regions can be correctly zeroed. We can't do this for mappings within
- * EOF unless the mapping was just allocated or is unwritten, otherwise
- * the callers would overwrite existing data with zeros. Hence we have
- * to split the mapping into a range up to and including EOF, and a
- * second mapping for beyond EOF.
- */
- if (direct || size > (1 << inode->i_blkbits)) {
- xfs_off_t mapping_size;
-
- mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
- mapping_size <<= inode->i_blkbits;
-
- ASSERT(mapping_size > 0);
- if (mapping_size > size)
- mapping_size = size;
- if (offset < i_size_read(inode) &&
- offset + mapping_size >= i_size_read(inode)) {
- /* limit mapping to block that spans EOF */
- mapping_size = roundup_64(i_size_read(inode) - offset,
- 1 << inode->i_blkbits);
- }
- if (mapping_size > LONG_MAX)
- mapping_size = LONG_MAX;
-
- bh_result->b_size = mapping_size;
- }
-
return 0;
out_unlock:
@@ -1441,9 +1524,11 @@ xfs_get_blocks_direct(
/*
* Complete a direct I/O write request.
*
- * If the private argument is non-NULL __xfs_get_blocks signals us that we
- * need to issue a transaction to convert the range from unwritten to written
- * extents.
+ * The ioend structure is passed from __xfs_get_blocks() to tell us what to do.
+ * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite
+ * wholly within the EOF and so there is nothing for us to do. Note that in this
+ * case the completion can be called in interrupt context, whereas if we have an
+ * ioend we will always be called in task context (i.e. from a workqueue).
*/
STATIC void
xfs_end_io_direct_write(
@@ -1455,48 +1540,75 @@ xfs_end_io_direct_write(
struct inode *inode = file_inode(iocb->ki_filp);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ioend *ioend = private;
- if (XFS_FORCED_SHUTDOWN(mp))
+ trace_xfs_gbmap_direct_endio(ip, offset, size,
+ ioend ? ioend->io_type : 0, NULL);
+
+ if (!ioend) {
+ ASSERT(offset + size <= i_size_read(inode));
return;
+ }
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ goto out_end_io;
/*
- * While the generic direct I/O code updates the inode size, it does
- * so only after the end_io handler is called, which means our
- * end_io handler thinks the on-disk size is outside the in-core
- * size. To prevent this just update it a little bit earlier here.
+ * dio completion end_io functions are only called on writes if more
+ * than 0 bytes was written.
*/
+ ASSERT(size > 0);
+
+ /*
+ * The ioend only maps whole blocks, while the IO may be sector aligned.
+ * Hence the ioend offset/size may not match the IO offset/size exactly.
+ * Because we don't map overwrites within EOF into the ioend, the offset
+ * may not match, but only if the endio spans EOF. Either way, write
+ * the IO sizes into the ioend so that completion processing does the
+ * right thing.
+ */
+ ASSERT(offset + size <= ioend->io_offset + ioend->io_size);
+ ioend->io_size = size;
+ ioend->io_offset = offset;
+
+ /*
+ * The ioend tells us whether we are doing unwritten extent conversion
+ * or an append transaction that updates the on-disk file size. These
+ * cases are the only cases where we should *potentially* be needing
+ * to update the VFS inode size.
+ *
+ * We need to update the in-core inode size here so that we don't end up
+ * with the on-disk inode size being outside the in-core inode size. We
+ * have no other method of updating EOF for AIO, so always do it here
+ * if necessary.
+ *
+ * We need to lock the test/set EOF update as we can be racing with
+ * other IO completions here to update the EOF. Failing to serialise
+ * here can result in EOF moving backwards and Bad Things Happen when
+ * that occurs.
+ */
+ spin_lock(&ip->i_flags_lock);
if (offset + size > i_size_read(inode))
i_size_write(inode, offset + size);
+ spin_unlock(&ip->i_flags_lock);
/*
- * For direct I/O we do not know if we need to allocate blocks or not,
- * so we can't preallocate an append transaction, as that results in
- * nested reservations and log space deadlocks. Hence allocate the
- * transaction here. While this is sub-optimal and can block IO
- * completion for some time, we're stuck with doing it this way until
- * we can pass the ioend to the direct IO allocation callbacks and
- * avoid nesting that way.
+ * If we are doing an append IO that needs to update the EOF on disk,
+ * do the transaction reserve now so we can use common end io
+ * processing. Stashing the error (if there is one) in the ioend will
+ * result in the ioend processing passing on the error if it is
+ * possible as we can't return it from here.
*/
- if (private && size > 0) {
- xfs_iomap_write_unwritten(ip, offset, size);
- } else if (offset + size > ip->i_d.di_size) {
- struct xfs_trans *tp;
- int error;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return;
- }
+ if (ioend->io_type == XFS_IO_OVERWRITE)
+ ioend->io_error = xfs_setfilesize_trans_alloc(ioend);
- xfs_setfilesize(ip, tp, offset, size);
- }
+out_end_io:
+ xfs_end_io(&ioend->io_work);
+ return;
}
STATIC ssize_t
xfs_vm_direct_IO(
- int rw,
struct kiocb *iocb,
struct iov_iter *iter,
loff_t offset)
@@ -1504,15 +1616,14 @@ xfs_vm_direct_IO(
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
- if (rw & WRITE) {
- return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
- offset, xfs_get_blocks_direct,
+ if (iov_iter_rw(iter) == WRITE) {
+ return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
+ xfs_get_blocks_direct,
xfs_end_io_direct_write, NULL,
DIO_ASYNC_EXTEND);
}
- return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
- offset, xfs_get_blocks_direct,
- NULL, NULL, 0);
+ return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
+ xfs_get_blocks_direct, NULL, NULL, 0);
}
/*
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 83af4c149635..f9c1c64782d3 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -132,9 +132,10 @@ xfs_attr3_leaf_inactive(
int size;
int tmp;
int i;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
leaf = bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
/*
* Count the number of "remote" value extents.
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index a43d370d2c58..65fb37a18e92 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -225,6 +225,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
int error, i;
struct xfs_buf *bp;
struct xfs_inode *dp = context->dp;
+ struct xfs_mount *mp = dp->i_mount;
trace_xfs_attr_node_list(context);
@@ -256,7 +257,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
leaf = bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo,
+ &leafhdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
if (cursor->hashval > be32_to_cpu(
entries[leafhdr.count - 1].hashval)) {
@@ -340,7 +342,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
xfs_trans_brelse(NULL, bp);
return error;
}
- xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
if (context->seen_enough || leafhdr.forw == 0)
break;
cursor->blkno = leafhdr.forw;
@@ -368,11 +370,12 @@ xfs_attr3_leaf_list_int(
struct xfs_attr_leaf_entry *entry;
int retval;
int i;
+ struct xfs_mount *mp = context->dp->i_mount;
trace_xfs_attr_list_leaf(context);
leaf = bp->b_addr;
- xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
cursor = context->cursor;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 22a5dcb70b32..a52bbd3abc7d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1376,22 +1376,19 @@ out:
}
/*
- * xfs_collapse_file_space()
- * This routine frees disk space and shift extent for the given file.
- * The first thing we do is to free data blocks in the specified range
- * by calling xfs_free_file_space(). It would also sync dirty data
- * and invalidate page cache over the region on which collapse range
- * is working. And Shift extent records to the left to cover a hole.
- * RETURNS:
- * 0 on success
- * errno on error
- *
+ * @next_fsb will keep track of the extent currently undergoing shift.
+ * @stop_fsb will keep track of the extent at which we have to stop.
+ * If we are shifting left, we will start with block (offset + len) and
+ * shift each extent till last extent.
+ * If we are shifting right, we will start with last extent inside file space
+ * and continue until we reach the block corresponding to offset.
*/
-int
-xfs_collapse_file_space(
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_off_t len)
+static int
+xfs_shift_file_space(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t len,
+ enum shift_direction direction)
{
int done = 0;
struct xfs_mount *mp = ip->i_mount;
@@ -1400,21 +1397,26 @@ xfs_collapse_file_space(
struct xfs_bmap_free free_list;
xfs_fsblock_t first_block;
int committed;
- xfs_fileoff_t start_fsb;
+ xfs_fileoff_t stop_fsb;
xfs_fileoff_t next_fsb;
xfs_fileoff_t shift_fsb;
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+ ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
- trace_xfs_collapse_file_space(ip);
+ if (direction == SHIFT_LEFT) {
+ next_fsb = XFS_B_TO_FSB(mp, offset + len);
+ stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
+ } else {
+ /*
+ * If right shift, delegate the work of initialization of
+ * next_fsb to xfs_bmap_shift_extent as it has ilock held.
+ */
+ next_fsb = NULLFSBLOCK;
+ stop_fsb = XFS_B_TO_FSB(mp, offset);
+ }
- next_fsb = XFS_B_TO_FSB(mp, offset + len);
shift_fsb = XFS_B_TO_FSB(mp, len);
- error = xfs_free_file_space(ip, offset, len);
- if (error)
- return error;
-
/*
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
* into the accessible region of the file.
@@ -1427,20 +1429,28 @@ xfs_collapse_file_space(
/*
* Writeback and invalidate cache for the remainder of the file as we're
- * about to shift down every extent from the collapse range to EOF. The
- * free of the collapse range above might have already done some of
- * this, but we shouldn't rely on it to do anything outside of the range
- * that was freed.
+ * about to shift down every extent from offset to EOF.
*/
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
- offset + len, -1);
+ offset, -1);
if (error)
return error;
error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
- (offset + len) >> PAGE_CACHE_SHIFT, -1);
+ offset >> PAGE_CACHE_SHIFT, -1);
if (error)
return error;
+ /*
+ * The extent shiting code works on extent granularity. So, if
+ * stop_fsb is not the starting block of extent, we need to split
+ * the extent at stop_fsb.
+ */
+ if (direction == SHIFT_RIGHT) {
+ error = xfs_bmap_split_extent(ip, stop_fsb);
+ if (error)
+ return error;
+ }
+
while (!error && !done) {
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
/*
@@ -1464,7 +1474,7 @@ xfs_collapse_file_space(
if (error)
goto out;
- xfs_trans_ijoin(tp, ip, 0);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_bmap_init(&free_list, &first_block);
@@ -1472,10 +1482,9 @@ xfs_collapse_file_space(
* We are using the write transaction in which max 2 bmbt
* updates are allowed
*/
- start_fsb = next_fsb;
- error = xfs_bmap_shift_extents(tp, ip, start_fsb, shift_fsb,
- &done, &next_fsb, &first_block, &free_list,
- XFS_BMAP_MAX_SHIFT_EXTENTS);
+ error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
+ &done, stop_fsb, &first_block, &free_list,
+ direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
if (error)
goto out;
@@ -1484,18 +1493,70 @@ xfs_collapse_file_space(
goto out;
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
return error;
out:
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
+ * xfs_collapse_file_space()
+ * This routine frees disk space and shift extent for the given file.
+ * The first thing we do is to free data blocks in the specified range
+ * by calling xfs_free_file_space(). It would also sync dirty data
+ * and invalidate page cache over the region on which collapse range
+ * is working. And Shift extent records to the left to cover a hole.
+ * RETURNS:
+ * 0 on success
+ * errno on error
+ *
+ */
+int
+xfs_collapse_file_space(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t len)
+{
+ int error;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+ trace_xfs_collapse_file_space(ip);
+
+ error = xfs_free_file_space(ip, offset, len);
+ if (error)
+ return error;
+
+ return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
+}
+
+/*
+ * xfs_insert_file_space()
+ * This routine create hole space by shifting extents for the given file.
+ * The first thing we do is to sync dirty data and invalidate page cache
+ * over the region on which insert range is working. And split an extent
+ * to two extents at given offset by calling xfs_bmap_split_extent.
+ * And shift all extent records which are laying between [offset,
+ * last allocated extent] to the right to reserve hole range.
+ * RETURNS:
+ * 0 on success
+ * errno on error
+ */
+int
+xfs_insert_file_space(
+ struct xfs_inode *ip,
+ loff_t offset,
+ loff_t len)
+{
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+ trace_xfs_insert_file_space(ip);
+
+ return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
+}
+
+/*
* We need to check that the format of the data fork in the temporary inode is
* valid for the target inode before doing the swap. This is not a problem with
* attr1 because of the fixed fork offset, but attr2 has a dynamically sized
@@ -1599,13 +1660,6 @@ xfs_swap_extent_flush(
/* Verify O_DIRECT for ftmp */
if (VFS_I(ip)->i_mapping->nrpages)
return -EINVAL;
-
- /*
- * Don't try to swap extents on mmap()d files because we can't lock
- * out races against page faults safely.
- */
- if (mapping_mapped(VFS_I(ip)->i_mapping))
- return -EBUSY;
return 0;
}
@@ -1633,13 +1687,14 @@ xfs_swap_extents(
}
/*
- * Lock up the inodes against other IO and truncate to begin with.
- * Then we can ensure the inodes are flushed and have no page cache
- * safely. Once we have done this we can take the ilocks and do the rest
- * of the checks.
+ * Lock the inodes against other IO, page faults and truncate to
+ * begin with. Then we can ensure the inodes are flushed and have no
+ * page cache safely. Once we have done this we can take the ilocks and
+ * do the rest of the checks.
*/
- lock_flags = XFS_IOLOCK_EXCL;
+ lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
+ xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
/* Verify that both files have the same format */
if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
@@ -1666,8 +1721,16 @@ xfs_swap_extents(
xfs_trans_cancel(tp, 0);
goto out_unlock;
}
+
+ /*
+ * Lock and join the inodes to the tansaction so that transaction commit
+ * or cancel will unlock the inodes from this point onwards.
+ */
xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
lock_flags |= XFS_ILOCK_EXCL;
+ xfs_trans_ijoin(tp, ip, lock_flags);
+ xfs_trans_ijoin(tp, tip, lock_flags);
+
/* Verify all data are being swapped */
if (sxp->sx_offset != 0 ||
@@ -1720,9 +1783,6 @@ xfs_swap_extents(
goto out_trans_cancel;
}
- xfs_trans_ijoin(tp, ip, lock_flags);
- xfs_trans_ijoin(tp, tip, lock_flags);
-
/*
* Before we've swapped the forks, lets set the owners of the forks
* appropriately. We have to do this as we are demand paging the btree
@@ -1856,5 +1916,5 @@ out_unlock:
out_trans_cancel:
xfs_trans_cancel(tp, 0);
- goto out_unlock;
+ goto out;
}
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 736429a72a12..af97d9a1dfb4 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -63,6 +63,8 @@ int xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t len);
int xfs_collapse_file_space(struct xfs_inode *, xfs_off_t offset,
xfs_off_t len);
+int xfs_insert_file_space(struct xfs_inode *, xfs_off_t offset,
+ xfs_off_t len);
/* EOF block manipulation functions */
bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 507d96a57ac7..092d652bc03d 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -537,9 +537,9 @@ xfs_buf_item_push(
/* has a previous flush failed due to IO errors? */
if ((bp->b_flags & XBF_WRITE_FAIL) &&
- ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
+ ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) {
xfs_warn(bp->b_target->bt_mount,
-"Detected failing async write on buffer block 0x%llx. Retrying async write.",
+"Failing async write on buffer block 0x%llx. Retrying async write.",
(long long)bp->b_bn);
}
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 799e5a2d334d..e85a9519a5ae 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -84,7 +84,7 @@ xfs_trim_extents(
error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
if (error)
goto out_del_cursor;
- XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_del_cursor);
ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
/*
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 3ee186ac1093..338e50bbfd1e 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -131,7 +131,7 @@ xfs_error_report(
{
if (level <= xfs_error_level) {
xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT,
- "Internal error %s at line %d of file %s. Caller %pF",
+ "Internal error %s at line %d of file %s. Caller %pS",
tag, linenum, filename, ra);
xfs_stack_trace();
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 279a76e52791..c0394ed126fc 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -40,25 +40,25 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
/*
* Macros to set EFSCORRUPTED & return/branch.
*/
-#define XFS_WANT_CORRUPTED_GOTO(x,l) \
+#define XFS_WANT_CORRUPTED_GOTO(mp, x, l) \
{ \
int fs_is_ok = (x); \
ASSERT(fs_is_ok); \
if (unlikely(!fs_is_ok)) { \
XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \
- XFS_ERRLEVEL_LOW, NULL); \
+ XFS_ERRLEVEL_LOW, mp); \
error = -EFSCORRUPTED; \
goto l; \
} \
}
-#define XFS_WANT_CORRUPTED_RETURN(x) \
+#define XFS_WANT_CORRUPTED_RETURN(mp, x) \
{ \
int fs_is_ok = (x); \
ASSERT(fs_is_ok); \
if (unlikely(!fs_is_ok)) { \
XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \
- XFS_ERRLEVEL_LOW, NULL); \
+ XFS_ERRLEVEL_LOW, mp); \
return -EFSCORRUPTED; \
} \
}
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index b97359ba2648..652cd3c5b58c 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -215,7 +215,7 @@ xfs_fs_get_parent(
int error;
struct xfs_inode *cip;
- error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip, NULL);
+ error = xfs_lookup(XFS_I(d_inode(child)), &xfs_name_dotdot, &cip, NULL);
if (unlikely(error))
return ERR_PTR(error);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index a2e1cb8a568b..8121e75352ee 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -38,7 +38,6 @@
#include "xfs_icache.h"
#include "xfs_pnfs.h"
-#include <linux/aio.h>
#include <linux/dcache.h>
#include <linux/falloc.h>
#include <linux/pagevec.h>
@@ -280,7 +279,7 @@ xfs_file_read_iter(
XFS_STATS_INC(xs_read_calls);
- if (unlikely(file->f_flags & O_DIRECT))
+ if (unlikely(iocb->ki_flags & IOCB_DIRECT))
ioflags |= XFS_IO_ISDIRECT;
if (file->f_mode & FMODE_NOCMTIME)
ioflags |= XFS_IO_INVIS;
@@ -545,21 +544,22 @@ xfs_zero_eof(
*/
STATIC ssize_t
xfs_file_aio_write_checks(
- struct file *file,
- loff_t *pos,
- size_t *count,
+ struct kiocb *iocb,
+ struct iov_iter *from,
int *iolock)
{
+ struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
- int error = 0;
+ ssize_t error = 0;
+ size_t count = iov_iter_count(from);
restart:
- error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
- if (error)
+ error = generic_write_checks(iocb, from);
+ if (error <= 0)
return error;
- error = xfs_break_layouts(inode, iolock);
+ error = xfs_break_layouts(inode, iolock, true);
if (error)
return error;
@@ -569,20 +569,42 @@ restart:
* write. If zeroing is needed and we are currently holding the
* iolock shared, we need to update it to exclusive which implies
* having to redo all checks before.
+ *
+ * We need to serialise against EOF updates that occur in IO
+ * completions here. We want to make sure that nobody is changing the
+ * size while we do this check until we have placed an IO barrier (i.e.
+ * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
+ * The spinlock effectively forms a memory barrier once we have the
+ * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
+ * and hence be able to correctly determine if we need to run zeroing.
*/
- if (*pos > i_size_read(inode)) {
+ spin_lock(&ip->i_flags_lock);
+ if (iocb->ki_pos > i_size_read(inode)) {
bool zero = false;
+ spin_unlock(&ip->i_flags_lock);
if (*iolock == XFS_IOLOCK_SHARED) {
xfs_rw_iunlock(ip, *iolock);
*iolock = XFS_IOLOCK_EXCL;
xfs_rw_ilock(ip, *iolock);
+ iov_iter_reexpand(from, count);
+
+ /*
+ * We now have an IO submission barrier in place, but
+ * AIO can do EOF updates during IO completion and hence
+ * we now need to wait for all of them to drain. Non-AIO
+ * DIO will have drained before we are given the
+ * XFS_IOLOCK_EXCL, and so for most cases this wait is a
+ * no-op.
+ */
+ inode_dio_wait(inode);
goto restart;
}
- error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
+ error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
if (error)
return error;
- }
+ } else
+ spin_unlock(&ip->i_flags_lock);
/*
* Updating the timestamps will grab the ilock again from
@@ -644,6 +666,8 @@ xfs_file_dio_aio_write(
int iolock;
size_t count = iov_iter_count(from);
loff_t pos = iocb->ki_pos;
+ loff_t end;
+ struct iov_iter data;
struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
@@ -679,14 +703,16 @@ xfs_file_dio_aio_write(
xfs_rw_ilock(ip, iolock);
}
- ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
+ ret = xfs_file_aio_write_checks(iocb, from, &iolock);
if (ret)
goto out;
- iov_iter_truncate(from, count);
+ count = iov_iter_count(from);
+ pos = iocb->ki_pos;
+ end = pos + count - 1;
if (mapping->nrpages) {
ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
- pos, pos + count - 1);
+ pos, end);
if (ret)
goto out;
/*
@@ -696,7 +722,7 @@ xfs_file_dio_aio_write(
*/
ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
pos >> PAGE_CACHE_SHIFT,
- (pos + count - 1) >> PAGE_CACHE_SHIFT);
+ end >> PAGE_CACHE_SHIFT);
WARN_ON_ONCE(ret);
ret = 0;
}
@@ -713,8 +739,22 @@ xfs_file_dio_aio_write(
}
trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
- ret = generic_file_direct_write(iocb, from, pos);
+ data = *from;
+ ret = mapping->a_ops->direct_IO(iocb, &data, pos);
+
+ /* see generic_file_direct_write() for why this is necessary */
+ if (mapping->nrpages) {
+ invalidate_inode_pages2_range(mapping,
+ pos >> PAGE_CACHE_SHIFT,
+ end >> PAGE_CACHE_SHIFT);
+ }
+
+ if (ret > 0) {
+ pos += ret;
+ iov_iter_advance(from, ret);
+ iocb->ki_pos = pos;
+ }
out:
xfs_rw_iunlock(ip, iolock);
@@ -735,24 +775,22 @@ xfs_file_buffered_aio_write(
ssize_t ret;
int enospc = 0;
int iolock = XFS_IOLOCK_EXCL;
- loff_t pos = iocb->ki_pos;
- size_t count = iov_iter_count(from);
xfs_rw_ilock(ip, iolock);
- ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
+ ret = xfs_file_aio_write_checks(iocb, from, &iolock);
if (ret)
goto out;
- iov_iter_truncate(from, count);
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
write_retry:
- trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
- ret = generic_perform_write(file, from, pos);
+ trace_xfs_file_buffered_write(ip, iov_iter_count(from),
+ iocb->ki_pos, 0);
+ ret = generic_perform_write(file, from, iocb->ki_pos);
if (likely(ret >= 0))
- iocb->ki_pos = pos + ret;
+ iocb->ki_pos += ret;
/*
* If we hit a space limit, try to free up some lingering preallocated
@@ -804,7 +842,7 @@ xfs_file_write_iter(
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
- if (unlikely(file->f_flags & O_DIRECT))
+ if (unlikely(iocb->ki_flags & IOCB_DIRECT))
ret = xfs_file_dio_aio_write(iocb, from);
else
ret = xfs_file_buffered_aio_write(iocb, from);
@@ -822,6 +860,11 @@ xfs_file_write_iter(
return ret;
}
+#define XFS_FALLOC_FL_SUPPORTED \
+ (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE)
+
STATIC long
xfs_file_fallocate(
struct file *file,
@@ -835,18 +878,21 @@ xfs_file_fallocate(
enum xfs_prealloc_flags flags = 0;
uint iolock = XFS_IOLOCK_EXCL;
loff_t new_size = 0;
+ bool do_file_insert = 0;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
- FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
+ if (mode & ~XFS_FALLOC_FL_SUPPORTED)
return -EOPNOTSUPP;
xfs_ilock(ip, iolock);
- error = xfs_break_layouts(inode, &iolock);
+ error = xfs_break_layouts(inode, &iolock, false);
if (error)
goto out_unlock;
+ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+ iolock |= XFS_MMAPLOCK_EXCL;
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
error = xfs_free_file_space(ip, offset, len);
if (error)
@@ -873,6 +919,27 @@ xfs_file_fallocate(
error = xfs_collapse_file_space(ip, offset, len);
if (error)
goto out_unlock;
+ } else if (mode & FALLOC_FL_INSERT_RANGE) {
+ unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
+
+ new_size = i_size_read(inode) + len;
+ if (offset & blksize_mask || len & blksize_mask) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* check the new inode size does not wrap through zero */
+ if (new_size > inode->i_sb->s_maxbytes) {
+ error = -EFBIG;
+ goto out_unlock;
+ }
+
+ /* Offset should be less than i_size */
+ if (offset >= i_size_read(inode)) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
+ do_file_insert = 1;
} else {
flags |= XFS_PREALLOC_SET;
@@ -907,8 +974,19 @@ xfs_file_fallocate(
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = new_size;
error = xfs_setattr_size(ip, &iattr);
+ if (error)
+ goto out_unlock;
}
+ /*
+ * Perform hole insertion now that the file size has been
+ * updated so that if we crash during the operation we don't
+ * leave shifted extents past EOF and hence losing access to
+ * the data that is contained within them.
+ */
+ if (do_file_insert)
+ error = xfs_insert_file_space(ip, offset, len);
+
out_unlock:
xfs_iunlock(ip, iolock);
return error;
@@ -997,20 +1075,6 @@ xfs_file_mmap(
}
/*
- * mmap()d file has taken write protection fault and is being made
- * writable. We can set the page state up correctly for a writable
- * page, which means we can do correct delalloc accounting (ENOSPC
- * checking!) and unwritten extent mapping.
- */
-STATIC int
-xfs_vm_page_mkwrite(
- struct vm_area_struct *vma,
- struct vm_fault *vmf)
-{
- return block_page_mkwrite(vma, vmf, xfs_get_blocks);
-}
-
-/*
* This type is designed to indicate the type of offset we would like
* to search from page cache for xfs_seek_hole_data().
*/
@@ -1385,10 +1449,57 @@ xfs_file_llseek(
}
}
+/*
+ * Locking for serialisation of IO during page faults. This results in a lock
+ * ordering of:
+ *
+ * mmap_sem (MM)
+ * i_mmap_lock (XFS - truncate serialisation)
+ * page_lock (MM)
+ * i_lock (XFS - extent map serialisation)
+ */
+STATIC int
+xfs_filemap_fault(
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct xfs_inode *ip = XFS_I(vma->vm_file->f_mapping->host);
+ int error;
+
+ trace_xfs_filemap_fault(ip);
+
+ xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
+ error = filemap_fault(vma, vmf);
+ xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+
+ return error;
+}
+
+/*
+ * mmap()d file has taken write protection fault and is being made writable. We
+ * can set the page state up correctly for a writable page, which means we can
+ * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
+ * mapping.
+ */
+STATIC int
+xfs_filemap_page_mkwrite(
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct xfs_inode *ip = XFS_I(vma->vm_file->f_mapping->host);
+ int error;
+
+ trace_xfs_filemap_page_mkwrite(ip);
+
+ xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
+ error = block_page_mkwrite(vma, vmf, xfs_get_blocks);
+ xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+
+ return error;
+}
+
const struct file_operations xfs_file_operations = {
.llseek = xfs_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = xfs_file_read_iter,
.write_iter = xfs_file_write_iter,
.splice_read = xfs_file_splice_read,
@@ -1417,7 +1528,7 @@ const struct file_operations xfs_dir_file_operations = {
};
static const struct vm_operations_struct xfs_file_vm_ops = {
- .fault = filemap_fault,
+ .fault = xfs_filemap_fault,
.map_pages = filemap_map_pages,
- .page_mkwrite = xfs_vm_page_mkwrite,
+ .page_mkwrite = xfs_filemap_page_mkwrite,
};
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index a2e86e8a0fea..da82f1cb4b9b 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -294,7 +294,7 @@ xfs_filestream_get_parent(
if (!parent)
goto out_dput;
- dir = igrab(parent->d_inode);
+ dir = igrab(d_inode(parent));
dput(parent);
out_dput:
@@ -322,7 +322,7 @@ xfs_filestream_lookup_ag(
pip = xfs_filestream_get_parent(ip);
if (!pip)
- goto out;
+ return NULLAGNUMBER;
mru = xfs_mru_cache_lookup(mp->m_filestream, pip->i_ino);
if (mru) {
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 74efe5b760dc..cb7e8a29dfb6 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -637,12 +637,13 @@ xfs_fs_counts(
xfs_mount_t *mp,
xfs_fsop_counts_t *cnt)
{
- xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
+ cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
+ cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
+ cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
+ XFS_ALLOC_SET_ASIDE(mp);
+
spin_lock(&mp->m_sb_lock);
- cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
cnt->freertx = mp->m_sb.sb_frextents;
- cnt->freeino = mp->m_sb.sb_ifree;
- cnt->allocino = mp->m_sb.sb_icount;
spin_unlock(&mp->m_sb_lock);
return 0;
}
@@ -692,14 +693,9 @@ xfs_reserve_blocks(
* what to do. This means that the amount of free space can
* change while we do this, so we need to retry if we end up
* trying to reserve more space than is available.
- *
- * We also use the xfs_mod_incore_sb() interface so that we
- * don't have to care about whether per cpu counter are
- * enabled, disabled or even compiled in....
*/
retry:
spin_lock(&mp->m_sb_lock);
- xfs_icsb_sync_counters_locked(mp, 0);
/*
* If our previous reservation was larger than the current value,
@@ -716,7 +712,8 @@ retry:
} else {
__int64_t free;
- free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
+ free = percpu_counter_sum(&mp->m_fdblocks) -
+ XFS_ALLOC_SET_ASIDE(mp);
if (!free)
goto out; /* ENOSPC and fdblks_delta = 0 */
@@ -755,8 +752,7 @@ out:
* the extra reserve blocks from the reserve.....
*/
int error;
- error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- fdblks_delta, 0);
+ error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
if (error == -ENOSPC)
goto retry;
}
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 9771b7ef62ed..76a9f2783282 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -439,11 +439,11 @@ again:
*ipp = ip;
/*
- * If we have a real type for an on-disk inode, we can set ops(&unlock)
+ * If we have a real type for an on-disk inode, we can setup the inode
* now. If it's a new inode being created, xfs_ialloc will handle it.
*/
if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
- xfs_setup_inode(ip);
+ xfs_setup_existing_inode(ip);
return 0;
out_error_or_again:
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 6163767aa856..d6ebc85192b7 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -117,24 +117,34 @@ xfs_ilock_attr_map_shared(
}
/*
- * The xfs inode contains 2 locks: a multi-reader lock called the
- * i_iolock and a multi-reader lock called the i_lock. This routine
- * allows either or both of the locks to be obtained.
+ * The xfs inode contains 3 multi-reader locks: the i_iolock the i_mmap_lock and
+ * the i_lock. This routine allows various combinations of the locks to be
+ * obtained.
*
- * The 2 locks should always be ordered so that the IO lock is
- * obtained first in order to prevent deadlock.
+ * The 3 locks should always be ordered so that the IO lock is obtained first,
+ * the mmap lock second and the ilock last in order to prevent deadlock.
*
- * ip -- the inode being locked
- * lock_flags -- this parameter indicates the inode's locks
- * to be locked. It can be:
- * XFS_IOLOCK_SHARED,
- * XFS_IOLOCK_EXCL,
- * XFS_ILOCK_SHARED,
- * XFS_ILOCK_EXCL,
- * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
- * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
- * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
- * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
+ * Basic locking order:
+ *
+ * i_iolock -> i_mmap_lock -> page_lock -> i_ilock
+ *
+ * mmap_sem locking order:
+ *
+ * i_iolock -> page lock -> mmap_sem
+ * mmap_sem -> i_mmap_lock -> page_lock
+ *
+ * The difference in mmap_sem locking order mean that we cannot hold the
+ * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
+ * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
+ * in get_user_pages() to map the user pages into the kernel address space for
+ * direct IO. Similarly the i_iolock cannot be taken inside a page fault because
+ * page faults already hold the mmap_sem.
+ *
+ * Hence to serialise fully against both syscall and mmap based IO, we need to
+ * take both the i_iolock and the i_mmap_lock. These locks should *only* be both
+ * taken in places where we need to invalidate the page cache in a race
+ * free manner (e.g. truncate, hole punch and other extent manipulation
+ * functions).
*/
void
xfs_ilock(
@@ -150,6 +160,8 @@ xfs_ilock(
*/
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
+ ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
+ (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
@@ -159,6 +171,11 @@ xfs_ilock(
else if (lock_flags & XFS_IOLOCK_SHARED)
mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
+ if (lock_flags & XFS_MMAPLOCK_EXCL)
+ mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
+ else if (lock_flags & XFS_MMAPLOCK_SHARED)
+ mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
+
if (lock_flags & XFS_ILOCK_EXCL)
mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
else if (lock_flags & XFS_ILOCK_SHARED)
@@ -191,6 +208,8 @@ xfs_ilock_nowait(
*/
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
+ ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
+ (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
@@ -202,21 +221,35 @@ xfs_ilock_nowait(
if (!mrtryaccess(&ip->i_iolock))
goto out;
}
+
+ if (lock_flags & XFS_MMAPLOCK_EXCL) {
+ if (!mrtryupdate(&ip->i_mmaplock))
+ goto out_undo_iolock;
+ } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
+ if (!mrtryaccess(&ip->i_mmaplock))
+ goto out_undo_iolock;
+ }
+
if (lock_flags & XFS_ILOCK_EXCL) {
if (!mrtryupdate(&ip->i_lock))
- goto out_undo_iolock;
+ goto out_undo_mmaplock;
} else if (lock_flags & XFS_ILOCK_SHARED) {
if (!mrtryaccess(&ip->i_lock))
- goto out_undo_iolock;
+ goto out_undo_mmaplock;
}
return 1;
- out_undo_iolock:
+out_undo_mmaplock:
+ if (lock_flags & XFS_MMAPLOCK_EXCL)
+ mrunlock_excl(&ip->i_mmaplock);
+ else if (lock_flags & XFS_MMAPLOCK_SHARED)
+ mrunlock_shared(&ip->i_mmaplock);
+out_undo_iolock:
if (lock_flags & XFS_IOLOCK_EXCL)
mrunlock_excl(&ip->i_iolock);
else if (lock_flags & XFS_IOLOCK_SHARED)
mrunlock_shared(&ip->i_iolock);
- out:
+out:
return 0;
}
@@ -244,6 +277,8 @@ xfs_iunlock(
*/
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
+ ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
+ (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
@@ -254,6 +289,11 @@ xfs_iunlock(
else if (lock_flags & XFS_IOLOCK_SHARED)
mrunlock_shared(&ip->i_iolock);
+ if (lock_flags & XFS_MMAPLOCK_EXCL)
+ mrunlock_excl(&ip->i_mmaplock);
+ else if (lock_flags & XFS_MMAPLOCK_SHARED)
+ mrunlock_shared(&ip->i_mmaplock);
+
if (lock_flags & XFS_ILOCK_EXCL)
mrunlock_excl(&ip->i_lock);
else if (lock_flags & XFS_ILOCK_SHARED)
@@ -271,11 +311,14 @@ xfs_ilock_demote(
xfs_inode_t *ip,
uint lock_flags)
{
- ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
- ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
+ ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
+ ASSERT((lock_flags &
+ ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
if (lock_flags & XFS_ILOCK_EXCL)
mrdemote(&ip->i_lock);
+ if (lock_flags & XFS_MMAPLOCK_EXCL)
+ mrdemote(&ip->i_mmaplock);
if (lock_flags & XFS_IOLOCK_EXCL)
mrdemote(&ip->i_iolock);
@@ -294,6 +337,12 @@ xfs_isilocked(
return rwsem_is_locked(&ip->i_lock.mr_lock);
}
+ if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
+ if (!(lock_flags & XFS_MMAPLOCK_SHARED))
+ return !!ip->i_mmaplock.mr_writer;
+ return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
+ }
+
if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
if (!(lock_flags & XFS_IOLOCK_SHARED))
return !!ip->i_iolock.mr_writer;
@@ -314,14 +363,27 @@ int xfs_lock_delays;
#endif
/*
- * Bump the subclass so xfs_lock_inodes() acquires each lock with
- * a different value
+ * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
+ * value. This shouldn't be called for page fault locking, but we also need to
+ * ensure we don't overrun the number of lockdep subclasses for the iolock or
+ * mmaplock as that is limited to 12 by the mmap lock lockdep annotations.
*/
static inline int
xfs_lock_inumorder(int lock_mode, int subclass)
{
- if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
+ if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
+ ASSERT(subclass + XFS_LOCK_INUMORDER <
+ (1 << (XFS_MMAPLOCK_SHIFT - XFS_IOLOCK_SHIFT)));
lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
+ }
+
+ if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
+ ASSERT(subclass + XFS_LOCK_INUMORDER <
+ (1 << (XFS_ILOCK_SHIFT - XFS_MMAPLOCK_SHIFT)));
+ lock_mode |= (subclass + XFS_LOCK_INUMORDER) <<
+ XFS_MMAPLOCK_SHIFT;
+ }
+
if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
@@ -329,15 +391,14 @@ xfs_lock_inumorder(int lock_mode, int subclass)
}
/*
- * The following routine will lock n inodes in exclusive mode.
- * We assume the caller calls us with the inodes in i_ino order.
+ * The following routine will lock n inodes in exclusive mode. We assume the
+ * caller calls us with the inodes in i_ino order.
*
- * We need to detect deadlock where an inode that we lock
- * is in the AIL and we start waiting for another inode that is locked
- * by a thread in a long running transaction (such as truncate). This can
- * result in deadlock since the long running trans might need to wait
- * for the inode we just locked in order to push the tail and free space
- * in the log.
+ * We need to detect deadlock where an inode that we lock is in the AIL and we
+ * start waiting for another inode that is locked by a thread in a long running
+ * transaction (such as truncate). This can result in deadlock since the long
+ * running trans might need to wait for the inode we just locked in order to
+ * push the tail and free space in the log.
*/
void
xfs_lock_inodes(
@@ -348,30 +409,27 @@ xfs_lock_inodes(
int attempts = 0, i, j, try_lock;
xfs_log_item_t *lp;
- ASSERT(ips && (inodes >= 2)); /* we need at least two */
+ /* currently supports between 2 and 5 inodes */
+ ASSERT(ips && inodes >= 2 && inodes <= 5);
try_lock = 0;
i = 0;
-
again:
for (; i < inodes; i++) {
ASSERT(ips[i]);
- if (i && (ips[i] == ips[i-1])) /* Already locked */
+ if (i && (ips[i] == ips[i - 1])) /* Already locked */
continue;
/*
- * If try_lock is not set yet, make sure all locked inodes
- * are not in the AIL.
- * If any are, set try_lock to be used later.
+ * If try_lock is not set yet, make sure all locked inodes are
+ * not in the AIL. If any are, set try_lock to be used later.
*/
-
if (!try_lock) {
for (j = (i - 1); j >= 0 && !try_lock; j--) {
lp = (xfs_log_item_t *)ips[j]->i_itemp;
- if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
+ if (lp && (lp->li_flags & XFS_LI_IN_AIL))
try_lock++;
- }
}
}
@@ -381,51 +439,42 @@ again:
* we can't get any, we must release all we have
* and try again.
*/
+ if (!try_lock) {
+ xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
+ continue;
+ }
+
+ /* try_lock means we have an inode locked that is in the AIL. */
+ ASSERT(i != 0);
+ if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
+ continue;
- if (try_lock) {
- /* try_lock must be 0 if i is 0. */
+ /*
+ * Unlock all previous guys and try again. xfs_iunlock will try
+ * to push the tail if the inode is in the AIL.
+ */
+ attempts++;
+ for (j = i - 1; j >= 0; j--) {
/*
- * try_lock means we have an inode locked
- * that is in the AIL.
+ * Check to see if we've already unlocked this one. Not
+ * the first one going back, and the inode ptr is the
+ * same.
*/
- ASSERT(i != 0);
- if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {
- attempts++;
-
- /*
- * Unlock all previous guys and try again.
- * xfs_iunlock will try to push the tail
- * if the inode is in the AIL.
- */
-
- for(j = i - 1; j >= 0; j--) {
-
- /*
- * Check to see if we've already
- * unlocked this one.
- * Not the first one going back,
- * and the inode ptr is the same.
- */
- if ((j != (i - 1)) && ips[j] ==
- ips[j+1])
- continue;
-
- xfs_iunlock(ips[j], lock_mode);
- }
+ if (j != (i - 1) && ips[j] == ips[j + 1])
+ continue;
+
+ xfs_iunlock(ips[j], lock_mode);
+ }
- if ((attempts % 5) == 0) {
- delay(1); /* Don't just spin the CPU */
+ if ((attempts % 5) == 0) {
+ delay(1); /* Don't just spin the CPU */
#ifdef DEBUG
- xfs_lock_delays++;
+ xfs_lock_delays++;
#endif
- }
- i = 0;
- try_lock = 0;
- goto again;
- }
- } else {
- xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
}
+ i = 0;
+ try_lock = 0;
+ goto again;
}
#ifdef DEBUG
@@ -440,10 +489,10 @@ again:
}
/*
- * xfs_lock_two_inodes() can only be used to lock one type of lock
- * at a time - the iolock or the ilock, but not both at once. If
- * we lock both at once, lockdep will report false positives saying
- * we have violated locking orders.
+ * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
+ * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
+ * lock more than one at a time, lockdep will report false positives saying we
+ * have violated locking orders.
*/
void
xfs_lock_two_inodes(
@@ -455,8 +504,12 @@ xfs_lock_two_inodes(
int attempts = 0;
xfs_log_item_t *lp;
- if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
- ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0);
+ if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
+ ASSERT(!(lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
+ ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ } else if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
+ ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+
ASSERT(ip0->i_ino != ip1->i_ino);
if (ip0->i_ino > ip1->i_ino) {
@@ -818,7 +871,7 @@ xfs_ialloc(
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, flags);
- /* now that we have an i_mode we can setup inode ops and unlock */
+ /* now that we have an i_mode we can setup the inode structure */
xfs_setup_inode(ip);
*ipp = ip;
@@ -1235,12 +1288,14 @@ xfs_create(
xfs_trans_cancel(tp, cancel_flags);
out_release_inode:
/*
- * Wait until after the current transaction is aborted to
- * release the inode. This prevents recursive transactions
- * and deadlocks from xfs_inactive.
+ * Wait until after the current transaction is aborted to finish the
+ * setup of the inode and release the inode. This prevents recursive
+ * transactions and deadlocks from xfs_inactive.
*/
- if (ip)
+ if (ip) {
+ xfs_finish_inode_setup(ip);
IRELE(ip);
+ }
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
@@ -1345,12 +1400,14 @@ xfs_create_tmpfile(
xfs_trans_cancel(tp, cancel_flags);
out_release_inode:
/*
- * Wait until after the current transaction is aborted to
- * release the inode. This prevents recursive transactions
- * and deadlocks from xfs_inactive.
+ * Wait until after the current transaction is aborted to finish the
+ * setup of the inode and release the inode. This prevents recursive
+ * transactions and deadlocks from xfs_inactive.
*/
- if (ip)
+ if (ip) {
+ xfs_finish_inode_setup(ip);
IRELE(ip);
+ }
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
@@ -2611,19 +2668,22 @@ xfs_remove(
/*
* Enter all inodes for a rename transaction into a sorted array.
*/
+#define __XFS_SORT_INODES 5
STATIC void
xfs_sort_for_rename(
- xfs_inode_t *dp1, /* in: old (source) directory inode */
- xfs_inode_t *dp2, /* in: new (target) directory inode */
- xfs_inode_t *ip1, /* in: inode of old entry */
- xfs_inode_t *ip2, /* in: inode of new entry, if it
- already exists, NULL otherwise. */
- xfs_inode_t **i_tab,/* out: array of inode returned, sorted */
- int *num_inodes) /* out: number of inodes in array */
+ struct xfs_inode *dp1, /* in: old (source) directory inode */
+ struct xfs_inode *dp2, /* in: new (target) directory inode */
+ struct xfs_inode *ip1, /* in: inode of old entry */
+ struct xfs_inode *ip2, /* in: inode of new entry */
+ struct xfs_inode *wip, /* in: whiteout inode */
+ struct xfs_inode **i_tab,/* out: sorted array of inodes */
+ int *num_inodes) /* in/out: inodes in array */
{
- xfs_inode_t *temp;
int i, j;
+ ASSERT(*num_inodes == __XFS_SORT_INODES);
+ memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
+
/*
* i_tab contains a list of pointers to inodes. We initialize
* the table here & we'll sort it. We will then use it to
@@ -2631,25 +2691,24 @@ xfs_sort_for_rename(
*
* Note that the table may contain duplicates. e.g., dp1 == dp2.
*/
- i_tab[0] = dp1;
- i_tab[1] = dp2;
- i_tab[2] = ip1;
- if (ip2) {
- *num_inodes = 4;
- i_tab[3] = ip2;
- } else {
- *num_inodes = 3;
- i_tab[3] = NULL;
- }
+ i = 0;
+ i_tab[i++] = dp1;
+ i_tab[i++] = dp2;
+ i_tab[i++] = ip1;
+ if (ip2)
+ i_tab[i++] = ip2;
+ if (wip)
+ i_tab[i++] = wip;
+ *num_inodes = i;
/*
* Sort the elements via bubble sort. (Remember, there are at
- * most 4 elements to sort, so this is adequate.)
+ * most 5 elements to sort, so this is adequate.)
*/
for (i = 0; i < *num_inodes; i++) {
for (j = 1; j < *num_inodes; j++) {
if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
- temp = i_tab[j];
+ struct xfs_inode *temp = i_tab[j];
i_tab[j] = i_tab[j-1];
i_tab[j-1] = temp;
}
@@ -2657,6 +2716,31 @@ xfs_sort_for_rename(
}
}
+static int
+xfs_finish_rename(
+ struct xfs_trans *tp,
+ struct xfs_bmap_free *free_list)
+{
+ int committed = 0;
+ int error;
+
+ /*
+ * If this is a synchronous mount, make sure that the rename transaction
+ * goes to disk before returning to the user.
+ */
+ if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
+ xfs_trans_set_sync(tp);
+
+ error = xfs_bmap_finish(&tp, free_list, &committed);
+ if (error) {
+ xfs_bmap_cancel(free_list);
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+ return error;
+ }
+
+ return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+}
+
/*
* xfs_cross_rename()
*
@@ -2685,14 +2769,14 @@ xfs_cross_rename(
ip2->i_ino,
first_block, free_list, spaceres);
if (error)
- goto out;
+ goto out_trans_abort;
/* Swap inode number for dirent in second parent */
error = xfs_dir_replace(tp, dp2, name2,
ip1->i_ino,
first_block, free_list, spaceres);
if (error)
- goto out;
+ goto out_trans_abort;
/*
* If we're renaming one or more directories across different parents,
@@ -2707,16 +2791,16 @@ xfs_cross_rename(
dp1->i_ino, first_block,
free_list, spaceres);
if (error)
- goto out;
+ goto out_trans_abort;
/* transfer ip2 ".." reference to dp1 */
if (!S_ISDIR(ip1->i_d.di_mode)) {
error = xfs_droplink(tp, dp2);
if (error)
- goto out;
+ goto out_trans_abort;
error = xfs_bumplink(tp, dp1);
if (error)
- goto out;
+ goto out_trans_abort;
}
/*
@@ -2734,16 +2818,16 @@ xfs_cross_rename(
dp2->i_ino, first_block,
free_list, spaceres);
if (error)
- goto out;
+ goto out_trans_abort;
/* transfer ip1 ".." reference to dp2 */
if (!S_ISDIR(ip2->i_d.di_mode)) {
error = xfs_droplink(tp, dp1);
if (error)
- goto out;
+ goto out_trans_abort;
error = xfs_bumplink(tp, dp2);
if (error)
- goto out;
+ goto out_trans_abort;
}
/*
@@ -2771,66 +2855,108 @@ xfs_cross_rename(
}
xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
-out:
+ return xfs_finish_rename(tp, free_list);
+
+out_trans_abort:
+ xfs_bmap_cancel(free_list);
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
return error;
}
/*
+ * xfs_rename_alloc_whiteout()
+ *
+ * Return a referenced, unlinked, unlocked inode that that can be used as a
+ * whiteout in a rename transaction. We use a tmpfile inode here so that if we
+ * crash between allocating the inode and linking it into the rename transaction
+ * recovery will free the inode and we won't leak it.
+ */
+static int
+xfs_rename_alloc_whiteout(
+ struct xfs_inode *dp,
+ struct xfs_inode **wip)
+{
+ struct xfs_inode *tmpfile;
+ int error;
+
+ error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile);
+ if (error)
+ return error;
+
+ /* Satisfy xfs_bumplink that this is a real tmpfile */
+ xfs_finish_inode_setup(tmpfile);
+ VFS_I(tmpfile)->i_state |= I_LINKABLE;
+
+ *wip = tmpfile;
+ return 0;
+}
+
+/*
* xfs_rename
*/
int
xfs_rename(
- xfs_inode_t *src_dp,
- struct xfs_name *src_name,
- xfs_inode_t *src_ip,
- xfs_inode_t *target_dp,
- struct xfs_name *target_name,
- xfs_inode_t *target_ip,
- unsigned int flags)
+ struct xfs_inode *src_dp,
+ struct xfs_name *src_name,
+ struct xfs_inode *src_ip,
+ struct xfs_inode *target_dp,
+ struct xfs_name *target_name,
+ struct xfs_inode *target_ip,
+ unsigned int flags)
{
- xfs_trans_t *tp = NULL;
- xfs_mount_t *mp = src_dp->i_mount;
- int new_parent; /* moving to a new dir */
- int src_is_directory; /* src_name is a directory */
- int error;
- xfs_bmap_free_t free_list;
- xfs_fsblock_t first_block;
- int cancel_flags;
- int committed;
- xfs_inode_t *inodes[4];
- int spaceres;
- int num_inodes;
+ struct xfs_mount *mp = src_dp->i_mount;
+ struct xfs_trans *tp;
+ struct xfs_bmap_free free_list;
+ xfs_fsblock_t first_block;
+ struct xfs_inode *wip = NULL; /* whiteout inode */
+ struct xfs_inode *inodes[__XFS_SORT_INODES];
+ int num_inodes = __XFS_SORT_INODES;
+ bool new_parent = (src_dp != target_dp);
+ bool src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
+ int cancel_flags = 0;
+ int spaceres;
+ int error;
trace_xfs_rename(src_dp, target_dp, src_name, target_name);
- new_parent = (src_dp != target_dp);
- src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
+ if ((flags & RENAME_EXCHANGE) && !target_ip)
+ return -EINVAL;
- xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip,
+ /*
+ * If we are doing a whiteout operation, allocate the whiteout inode
+ * we will be placing at the target and ensure the type is set
+ * appropriately.
+ */
+ if (flags & RENAME_WHITEOUT) {
+ ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
+ error = xfs_rename_alloc_whiteout(target_dp, &wip);
+ if (error)
+ return error;
+
+ /* setup target dirent info as whiteout */
+ src_name->type = XFS_DIR3_FT_CHRDEV;
+ }
+
+ xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
inodes, &num_inodes);
- xfs_bmap_init(&free_list, &first_block);
tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
- cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0);
if (error == -ENOSPC) {
spaceres = 0;
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0);
}
- if (error) {
- xfs_trans_cancel(tp, 0);
- goto std_return;
- }
+ if (error)
+ goto out_trans_cancel;
+ cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
/*
* Attach the dquots to the inodes
*/
error = xfs_qm_vop_rename_dqattach(inodes);
- if (error) {
- xfs_trans_cancel(tp, cancel_flags);
- goto std_return;
- }
+ if (error)
+ goto out_trans_cancel;
/*
* Lock all the participating inodes. Depending upon whether
@@ -2851,6 +2977,8 @@ xfs_rename(
xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
if (target_ip)
xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
+ if (wip)
+ xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
/*
* If we are using project inheritance, we only allow renames
@@ -2860,24 +2988,16 @@ xfs_rename(
if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
(xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
error = -EXDEV;
- goto error_return;
+ goto out_trans_cancel;
}
- /*
- * Handle RENAME_EXCHANGE flags
- */
- if (flags & RENAME_EXCHANGE) {
- if (target_ip == NULL) {
- error = -EINVAL;
- goto error_return;
- }
- error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
- target_dp, target_name, target_ip,
- &free_list, &first_block, spaceres);
- if (error)
- goto abort_return;
- goto finish_rename;
- }
+ xfs_bmap_init(&free_list, &first_block);
+
+ /* RENAME_EXCHANGE is unique from here on. */
+ if (flags & RENAME_EXCHANGE)
+ return xfs_cross_rename(tp, src_dp, src_name, src_ip,
+ target_dp, target_name, target_ip,
+ &free_list, &first_block, spaceres);
/*
* Set up the target.
@@ -2890,7 +3010,7 @@ xfs_rename(
if (!spaceres) {
error = xfs_dir_canenter(tp, target_dp, target_name);
if (error)
- goto error_return;
+ goto out_trans_cancel;
}
/*
* If target does not exist and the rename crosses
@@ -2901,9 +3021,9 @@ xfs_rename(
src_ip->i_ino, &first_block,
&free_list, spaceres);
if (error == -ENOSPC)
- goto error_return;
+ goto out_bmap_cancel;
if (error)
- goto abort_return;
+ goto out_trans_abort;
xfs_trans_ichgtime(tp, target_dp,
XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -2911,7 +3031,7 @@ xfs_rename(
if (new_parent && src_is_directory) {
error = xfs_bumplink(tp, target_dp);
if (error)
- goto abort_return;
+ goto out_trans_abort;
}
} else { /* target_ip != NULL */
/*
@@ -2926,7 +3046,7 @@ xfs_rename(
if (!(xfs_dir_isempty(target_ip)) ||
(target_ip->i_d.di_nlink > 2)) {
error = -EEXIST;
- goto error_return;
+ goto out_trans_cancel;
}
}
@@ -2943,7 +3063,7 @@ xfs_rename(
src_ip->i_ino,
&first_block, &free_list, spaceres);
if (error)
- goto abort_return;
+ goto out_trans_abort;
xfs_trans_ichgtime(tp, target_dp,
XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -2954,7 +3074,7 @@ xfs_rename(
*/
error = xfs_droplink(tp, target_ip);
if (error)
- goto abort_return;
+ goto out_trans_abort;
if (src_is_directory) {
/*
@@ -2962,7 +3082,7 @@ xfs_rename(
*/
error = xfs_droplink(tp, target_ip);
if (error)
- goto abort_return;
+ goto out_trans_abort;
}
} /* target_ip != NULL */
@@ -2979,7 +3099,7 @@ xfs_rename(
&first_block, &free_list, spaceres);
ASSERT(error != -EEXIST);
if (error)
- goto abort_return;
+ goto out_trans_abort;
}
/*
@@ -3005,49 +3125,67 @@ xfs_rename(
*/
error = xfs_droplink(tp, src_dp);
if (error)
- goto abort_return;
+ goto out_trans_abort;
}
- error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
+ /*
+ * For whiteouts, we only need to update the source dirent with the
+ * inode number of the whiteout inode rather than removing it
+ * altogether.
+ */
+ if (wip) {
+ error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
&first_block, &free_list, spaceres);
+ } else
+ error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
+ &first_block, &free_list, spaceres);
if (error)
- goto abort_return;
-
- xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
- if (new_parent)
- xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
+ goto out_trans_abort;
-finish_rename:
/*
- * If this is a synchronous mount, make sure that the
- * rename transaction goes to disk before returning to
- * the user.
+ * For whiteouts, we need to bump the link count on the whiteout inode.
+ * This means that failures all the way up to this point leave the inode
+ * on the unlinked list and so cleanup is a simple matter of dropping
+ * the remaining reference to it. If we fail here after bumping the link
+ * count, we're shutting down the filesystem so we'll never see the
+ * intermediate state on disk.
*/
- if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
- xfs_trans_set_sync(tp);
- }
+ if (wip) {
+ ASSERT(wip->i_d.di_nlink == 0);
+ error = xfs_bumplink(tp, wip);
+ if (error)
+ goto out_trans_abort;
+ error = xfs_iunlink_remove(tp, wip);
+ if (error)
+ goto out_trans_abort;
+ xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
- error = xfs_bmap_finish(&tp, &free_list, &committed);
- if (error) {
- xfs_bmap_cancel(&free_list);
- xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
- XFS_TRANS_ABORT));
- goto std_return;
+ /*
+ * Now we have a real link, clear the "I'm a tmpfile" state
+ * flag from the inode so it doesn't accidentally get misused in
+ * future.
+ */
+ VFS_I(wip)->i_state &= ~I_LINKABLE;
}
- /*
- * trans_commit will unlock src_ip, target_ip & decrement
- * the vnode references.
- */
- return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
+ if (new_parent)
+ xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
- abort_return:
+ error = xfs_finish_rename(tp, &free_list);
+ if (wip)
+ IRELE(wip);
+ return error;
+
+out_trans_abort:
cancel_flags |= XFS_TRANS_ABORT;
- error_return:
+out_bmap_cancel:
xfs_bmap_cancel(&free_list);
+out_trans_cancel:
xfs_trans_cancel(tp, cancel_flags);
- std_return:
+ if (wip)
+ IRELE(wip);
return error;
}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index a1cd55f3f351..8f22d20368d8 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -56,6 +56,7 @@ typedef struct xfs_inode {
struct xfs_inode_log_item *i_itemp; /* logging information */
mrlock_t i_lock; /* inode lock */
mrlock_t i_iolock; /* inode IO lock */
+ mrlock_t i_mmaplock; /* inode mmap IO lock */
atomic_t i_pincount; /* inode pin count */
spinlock_t i_flags_lock; /* inode i_flags lock */
/* Miscellaneous state. */
@@ -263,15 +264,20 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
#define XFS_IOLOCK_SHARED (1<<1)
#define XFS_ILOCK_EXCL (1<<2)
#define XFS_ILOCK_SHARED (1<<3)
+#define XFS_MMAPLOCK_EXCL (1<<4)
+#define XFS_MMAPLOCK_SHARED (1<<5)
#define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \
- | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)
+ | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED \
+ | XFS_MMAPLOCK_EXCL | XFS_MMAPLOCK_SHARED)
#define XFS_LOCK_FLAGS \
{ XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \
{ XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \
{ XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \
- { XFS_ILOCK_SHARED, "ILOCK_SHARED" }
+ { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \
+ { XFS_MMAPLOCK_EXCL, "MMAPLOCK_EXCL" }, \
+ { XFS_MMAPLOCK_SHARED, "MMAPLOCK_SHARED" }
/*
@@ -302,17 +308,26 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
#define XFS_IOLOCK_SHIFT 16
#define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
+#define XFS_MMAPLOCK_SHIFT 20
+
#define XFS_ILOCK_SHIFT 24
#define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
#define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
#define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
-#define XFS_IOLOCK_DEP_MASK 0x00ff0000
+#define XFS_IOLOCK_DEP_MASK 0x000f0000
+#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
#define XFS_ILOCK_DEP_MASK 0xff000000
-#define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | XFS_ILOCK_DEP_MASK)
+#define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | \
+ XFS_MMAPLOCK_DEP_MASK | \
+ XFS_ILOCK_DEP_MASK)
-#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
-#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
+#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) \
+ >> XFS_IOLOCK_SHIFT)
+#define XFS_MMAPLOCK_DEP(flags) (((flags) & XFS_MMAPLOCK_DEP_MASK) \
+ >> XFS_MMAPLOCK_SHIFT)
+#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) \
+ >> XFS_ILOCK_SHIFT)
/*
* For multiple groups support: if S_ISGID bit is set in the parent
@@ -391,6 +406,28 @@ int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
+/* from xfs_iops.c */
+/*
+ * When setting up a newly allocated inode, we need to call
+ * xfs_finish_inode_setup() once the inode is fully instantiated at
+ * the VFS level to prevent the rest of the world seeing the inode
+ * before we've completed instantiation. Otherwise we can do it
+ * the moment the inode lookup is complete.
+ */
+extern void xfs_setup_inode(struct xfs_inode *ip);
+static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
+{
+ xfs_iflags_clear(ip, XFS_INEW);
+ barrier();
+ unlock_new_inode(VFS_I(ip));
+}
+
+static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
+{
+ xfs_setup_inode(ip);
+ xfs_finish_inode_setup(ip);
+}
+
#define IHOLD(ip) \
do { \
ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index ac4feae45eb3..87f67c6b654c 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -82,7 +82,7 @@ xfs_find_handle(
error = user_lpath((const char __user *)hreq->path, &path);
if (error)
return error;
- inode = path.dentry->d_inode;
+ inode = d_inode(path.dentry);
}
ip = XFS_I(inode);
@@ -210,7 +210,7 @@ xfs_open_by_handle(
dentry = xfs_handlereq_to_dentry(parfilp, hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
/* Restrict xfs_open_by_handle to directories & regular files. */
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
@@ -303,7 +303,7 @@ xfs_readlink_by_handle(
goto out_dput;
}
- error = xfs_readlink(XFS_I(dentry->d_inode), link);
+ error = xfs_readlink(XFS_I(d_inode(dentry)), link);
if (error)
goto out_kfree;
error = readlink_copy(hreq->ohandle, olen, link);
@@ -376,7 +376,7 @@ xfs_fssetdm_by_handle(
return PTR_ERR(dentry);
}
- if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) {
+ if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
error = -EPERM;
goto out;
}
@@ -386,7 +386,7 @@ xfs_fssetdm_by_handle(
goto out;
}
- error = xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask,
+ error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
fsd.fsd_dmstate);
out:
@@ -429,7 +429,7 @@ xfs_attrlist_by_handle(
goto out_dput;
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
- error = xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
+ error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
al_hreq.flags, cursor);
if (error)
goto out_kfree;
@@ -559,7 +559,7 @@ xfs_attrmulti_by_handle(
switch (ops[i].am_opcode) {
case ATTR_OP_GET:
ops[i].am_error = xfs_attrmulti_attr_get(
- dentry->d_inode, attr_name,
+ d_inode(dentry), attr_name,
ops[i].am_attrvalue, &ops[i].am_length,
ops[i].am_flags);
break;
@@ -568,7 +568,7 @@ xfs_attrmulti_by_handle(
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_set(
- dentry->d_inode, attr_name,
+ d_inode(dentry), attr_name,
ops[i].am_attrvalue, ops[i].am_length,
ops[i].am_flags);
mnt_drop_write_file(parfilp);
@@ -578,7 +578,7 @@ xfs_attrmulti_by_handle(
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_remove(
- dentry->d_inode, attr_name,
+ d_inode(dentry), attr_name,
ops[i].am_flags);
mnt_drop_write_file(parfilp);
break;
@@ -631,7 +631,7 @@ xfs_ioc_space(
if (filp->f_flags & O_DSYNC)
flags |= XFS_PREALLOC_SYNC;
- if (ioflags & XFS_IO_INVIS)
+ if (ioflags & XFS_IO_INVIS)
flags |= XFS_PREALLOC_INVISIBLE;
error = mnt_want_write_file(filp);
@@ -639,10 +639,13 @@ xfs_ioc_space(
return error;
xfs_ilock(ip, iolock);
- error = xfs_break_layouts(inode, &iolock);
+ error = xfs_break_layouts(inode, &iolock, false);
if (error)
goto out_unlock;
+ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+ iolock |= XFS_MMAPLOCK_EXCL;
+
switch (bf->l_whence) {
case 0: /*SEEK_SET*/
break;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index bfc7c7c8a0c8..b88bdc85dd3d 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -375,7 +375,7 @@ xfs_compat_attrlist_by_handle(
goto out_dput;
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
- error = xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
+ error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
al_hreq.flags, cursor);
if (error)
goto out_kfree;
@@ -445,7 +445,7 @@ xfs_compat_attrmulti_by_handle(
switch (ops[i].am_opcode) {
case ATTR_OP_GET:
ops[i].am_error = xfs_attrmulti_attr_get(
- dentry->d_inode, attr_name,
+ d_inode(dentry), attr_name,
compat_ptr(ops[i].am_attrvalue),
&ops[i].am_length, ops[i].am_flags);
break;
@@ -454,7 +454,7 @@ xfs_compat_attrmulti_by_handle(
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_set(
- dentry->d_inode, attr_name,
+ d_inode(dentry), attr_name,
compat_ptr(ops[i].am_attrvalue),
ops[i].am_length, ops[i].am_flags);
mnt_drop_write_file(parfilp);
@@ -464,7 +464,7 @@ xfs_compat_attrmulti_by_handle(
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_remove(
- dentry->d_inode, attr_name,
+ d_inode(dentry), attr_name,
ops[i].am_flags);
mnt_drop_write_file(parfilp);
break;
@@ -504,7 +504,7 @@ xfs_compat_fssetdm_by_handle(
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) {
+ if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
error = -EPERM;
goto out;
}
@@ -514,7 +514,7 @@ xfs_compat_fssetdm_by_handle(
goto out;
}
- error = xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask,
+ error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
fsd.fsd_dmstate);
out:
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index ccb1dd0d509e..38e633bad8c2 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -460,8 +460,7 @@ xfs_iomap_prealloc_size(
alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
alloc_blocks);
- xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
- freesp = mp->m_sb.sb_fdblocks;
+ freesp = percpu_counter_read_positive(&mp->m_fdblocks);
if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
shift = 2;
if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index e53a90331422..f4cd7204e236 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -187,6 +187,8 @@ xfs_generic_create(
else
d_instantiate(dentry, inode);
+ xfs_finish_inode_setup(ip);
+
out_free_acl:
if (default_acl)
posix_acl_release(default_acl);
@@ -195,6 +197,7 @@ xfs_generic_create(
return error;
out_cleanup_inode:
+ xfs_finish_inode_setup(ip);
if (!tmpfile)
xfs_cleanup_inode(dir, inode, dentry);
iput(inode);
@@ -301,7 +304,7 @@ xfs_vn_link(
struct inode *dir,
struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
struct xfs_name name;
int error;
@@ -326,7 +329,7 @@ xfs_vn_unlink(
xfs_dentry_to_name(&name, dentry, 0);
- error = xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode));
+ error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
if (error)
return error;
@@ -367,9 +370,11 @@ xfs_vn_symlink(
goto out_cleanup_inode;
d_instantiate(dentry, inode);
+ xfs_finish_inode_setup(cip);
return 0;
out_cleanup_inode:
+ xfs_finish_inode_setup(cip);
xfs_cleanup_inode(dir, inode, dentry);
iput(inode);
out:
@@ -384,22 +389,22 @@ xfs_vn_rename(
struct dentry *ndentry,
unsigned int flags)
{
- struct inode *new_inode = ndentry->d_inode;
+ struct inode *new_inode = d_inode(ndentry);
int omode = 0;
struct xfs_name oname;
struct xfs_name nname;
- if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
/* if we are exchanging files, we need to set i_mode of both files */
if (flags & RENAME_EXCHANGE)
- omode = ndentry->d_inode->i_mode;
+ omode = d_inode(ndentry)->i_mode;
xfs_dentry_to_name(&oname, odentry, omode);
- xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode);
+ xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode);
- return xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
+ return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
XFS_I(ndir), &nname,
new_inode ? XFS_I(new_inode) : NULL, flags);
}
@@ -421,7 +426,7 @@ xfs_vn_follow_link(
if (!link)
goto out_err;
- error = xfs_readlink(XFS_I(dentry->d_inode), link);
+ error = xfs_readlink(XFS_I(d_inode(dentry)), link);
if (unlikely(error))
goto out_kfree;
@@ -441,7 +446,7 @@ xfs_vn_getattr(
struct dentry *dentry,
struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
@@ -766,6 +771,7 @@ xfs_setattr_size(
return error;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+ ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
ASSERT(S_ISREG(ip->i_d.di_mode));
ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
@@ -829,55 +835,27 @@ xfs_setattr_size(
inode_dio_wait(inode);
/*
- * Do all the page cache truncate work outside the transaction context
- * as the "lock" order is page lock->log space reservation. i.e.
- * locking pages inside the transaction can ABBA deadlock with
- * writeback. We have to do the VFS inode size update before we truncate
- * the pagecache, however, to avoid racing with page faults beyond the
- * new EOF they are not serialised against truncate operations except by
- * page locks and size updates.
+ * We've already locked out new page faults, so now we can safely remove
+ * pages from the page cache knowing they won't get refaulted until we
+ * drop the XFS_MMAP_EXCL lock after the extent manipulations are
+ * complete. The truncate_setsize() call also cleans partial EOF page
+ * PTEs on extending truncates and hence ensures sub-page block size
+ * filesystems are correctly handled, too.
*
- * Hence we are in a situation where a truncate can fail with ENOMEM
- * from xfs_trans_reserve(), but having already truncated the in-memory
- * version of the file (i.e. made user visible changes). There's not
- * much we can do about this, except to hope that the caller sees ENOMEM
- * and retries the truncate operation.
+ * We have to do all the page cache truncate work outside the
+ * transaction context as the "lock" order is page lock->log space
+ * reservation as defined by extent allocation in the writeback path.
+ * Hence a truncate can fail with ENOMEM from xfs_trans_reserve(), but
+ * having already truncated the in-memory version of the file (i.e. made
+ * user visible changes). There's not much we can do about this, except
+ * to hope that the caller sees ENOMEM and retries the truncate
+ * operation.
*/
error = block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks);
if (error)
return error;
truncate_setsize(inode, newsize);
- /*
- * The "we can't serialise against page faults" pain gets worse.
- *
- * If the file is mapped then we have to clean the page at the old EOF
- * when extending the file. Extending the file can expose changes the
- * underlying page mapping (e.g. from beyond EOF to a hole or
- * unwritten), and so on the next attempt to write to that page we need
- * to remap it for write. i.e. we need .page_mkwrite() to be called.
- * Hence we need to clean the page to clean the pte and so a new write
- * fault will be triggered appropriately.
- *
- * If we do it before we change the inode size, then we can race with a
- * page fault that maps the page with exactly the same problem. If we do
- * it after we change the file size, then a new page fault can come in
- * and allocate space before we've run the rest of the truncate
- * transaction. That's kinda grotesque, but it's better than have data
- * over a hole, and so that's the lesser evil that has been chosen here.
- *
- * The real solution, however, is to have some mechanism for locking out
- * page faults while a truncate is in progress.
- */
- if (newsize > oldsize && mapping_mapped(VFS_I(ip)->i_mapping)) {
- error = filemap_write_and_wait_range(
- VFS_I(ip)->i_mapping,
- round_down(oldsize, PAGE_CACHE_SIZE),
- round_up(oldsize, PAGE_CACHE_SIZE) - 1);
- if (error)
- return error;
- }
-
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
if (error)
@@ -968,16 +946,20 @@ xfs_vn_setattr(
struct dentry *dentry,
struct iattr *iattr)
{
- struct xfs_inode *ip = XFS_I(dentry->d_inode);
+ struct xfs_inode *ip = XFS_I(d_inode(dentry));
int error;
if (iattr->ia_valid & ATTR_SIZE) {
uint iolock = XFS_IOLOCK_EXCL;
xfs_ilock(ip, iolock);
- error = xfs_break_layouts(dentry->d_inode, &iolock);
- if (!error)
+ error = xfs_break_layouts(d_inode(dentry), &iolock, true);
+ if (!error) {
+ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+ iolock |= XFS_MMAPLOCK_EXCL;
+
error = xfs_setattr_size(ip, iattr);
+ }
xfs_iunlock(ip, iolock);
} else {
error = xfs_setattr_nonsize(ip, iattr, 0);
@@ -1228,16 +1210,12 @@ xfs_diflags_to_iflags(
}
/*
- * Initialize the Linux inode, set up the operation vectors and
- * unlock the inode.
+ * Initialize the Linux inode and set up the operation vectors.
*
- * When reading existing inodes from disk this is called directly
- * from xfs_iget, when creating a new inode it is called from
- * xfs_ialloc after setting up the inode.
- *
- * We are always called with an uninitialised linux inode here.
- * We need to initialise the necessary fields and take a reference
- * on it.
+ * When reading existing inodes from disk this is called directly from xfs_iget,
+ * when creating a new inode it is called from xfs_ialloc after setting up the
+ * inode. These callers have different criteria for clearing XFS_INEW, so leave
+ * it up to the caller to deal with unlocking the inode appropriately.
*/
void
xfs_setup_inode(
@@ -1324,9 +1302,4 @@ xfs_setup_inode(
inode_has_no_xattr(inode);
cache_no_acl(inode);
}
-
- xfs_iflags_clear(ip, XFS_INEW);
- barrier();
-
- unlock_new_inode(inode);
}
diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
index ea7a98e9cb70..a0f84abb0d09 100644
--- a/fs/xfs/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
@@ -25,8 +25,6 @@ extern const struct file_operations xfs_dir_file_operations;
extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
-extern void xfs_setup_inode(struct xfs_inode *);
-
/*
* Internal setattr interfaces.
*/
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 82e314258f73..80429891dc9b 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -229,7 +229,7 @@ xfs_bulkstat_grab_ichunk(
error = xfs_inobt_get_rec(cur, irec, &stat);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(stat == 1);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
/* Check if the record contains the inode in request */
if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index c31d2c2eadc4..7c7842c85a08 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -116,15 +116,6 @@ typedef __uint64_t __psunsigned_t;
#undef XFS_NATIVE_HOST
#endif
-/*
- * Feature macros (disable/enable)
- */
-#ifdef CONFIG_SMP
-#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
-#else
-#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
-#endif
-
#define irix_sgid_inherit xfs_params.sgid_inherit.val
#define irix_symlink_mode xfs_params.symlink_mode.val
#define xfs_panic_mask xfs_params.panic_mask.val
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index a5a945fc3bdc..4f5784f85a5b 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -4463,10 +4463,10 @@ xlog_do_recover(
xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
ASSERT(xfs_sb_good_version(sbp));
+ xfs_reinit_percpu_counters(log->l_mp);
+
xfs_buf_relse(bp);
- /* We've re-read the superblock so re-initialize per-cpu counters */
- xfs_icsb_reinit_counters(log->l_mp);
xlog_recover_check_summary(log);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 4fa80e63eea2..2ce7ee3b4ec1 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -43,18 +43,6 @@
#include "xfs_sysfs.h"
-#ifdef HAVE_PERCPU_SB
-STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
- int);
-STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
- int);
-STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
-#else
-
-#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
-#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
-#endif
-
static DEFINE_MUTEX(xfs_uuid_table_mutex);
static int xfs_uuid_table_size;
static uuid_t *xfs_uuid_table;
@@ -347,8 +335,7 @@ reread:
goto reread;
}
- /* Initialize per-cpu counters */
- xfs_icsb_reinit_counters(mp);
+ xfs_reinit_percpu_counters(mp);
/* no need to be quiet anymore, so reset the buf ops */
bp->b_ops = &xfs_sb_buf_ops;
@@ -1087,8 +1074,6 @@ xfs_log_sbcount(xfs_mount_t *mp)
if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
return 0;
- xfs_icsb_sync_counters(mp, 0);
-
/*
* we don't need to do this if we are updating the superblock
* counters on every modification.
@@ -1099,253 +1084,136 @@ xfs_log_sbcount(xfs_mount_t *mp)
return xfs_sync_sb(mp, true);
}
-/*
- * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply
- * a delta to a specified field in the in-core superblock. Simply
- * switch on the field indicated and apply the delta to that field.
- * Fields are not allowed to dip below zero, so if the delta would
- * do this do not apply it and return EINVAL.
- *
- * The m_sb_lock must be held when this routine is called.
- */
-STATIC int
-xfs_mod_incore_sb_unlocked(
- xfs_mount_t *mp,
- xfs_sb_field_t field,
- int64_t delta,
- int rsvd)
+int
+xfs_mod_icount(
+ struct xfs_mount *mp,
+ int64_t delta)
{
- int scounter; /* short counter for 32 bit fields */
- long long lcounter; /* long counter for 64 bit fields */
- long long res_used, rem;
-
- /*
- * With the in-core superblock spin lock held, switch
- * on the indicated field. Apply the delta to the
- * proper field. If the fields value would dip below
- * 0, then do not apply the delta and return EINVAL.
- */
- switch (field) {
- case XFS_SBS_ICOUNT:
- lcounter = (long long)mp->m_sb.sb_icount;
- lcounter += delta;
- if (lcounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_icount = lcounter;
- return 0;
- case XFS_SBS_IFREE:
- lcounter = (long long)mp->m_sb.sb_ifree;
- lcounter += delta;
- if (lcounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_ifree = lcounter;
- return 0;
- case XFS_SBS_FDBLOCKS:
- lcounter = (long long)
- mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
- res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
-
- if (delta > 0) { /* Putting blocks back */
- if (res_used > delta) {
- mp->m_resblks_avail += delta;
- } else {
- rem = delta - res_used;
- mp->m_resblks_avail = mp->m_resblks;
- lcounter += rem;
- }
- } else { /* Taking blocks away */
- lcounter += delta;
- if (lcounter >= 0) {
- mp->m_sb.sb_fdblocks = lcounter +
- XFS_ALLOC_SET_ASIDE(mp);
- return 0;
- }
-
- /*
- * We are out of blocks, use any available reserved
- * blocks if were allowed to.
- */
- if (!rsvd)
- return -ENOSPC;
-
- lcounter = (long long)mp->m_resblks_avail + delta;
- if (lcounter >= 0) {
- mp->m_resblks_avail = lcounter;
- return 0;
- }
- printk_once(KERN_WARNING
- "Filesystem \"%s\": reserve blocks depleted! "
- "Consider increasing reserve pool size.",
- mp->m_fsname);
- return -ENOSPC;
- }
-
- mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
- return 0;
- case XFS_SBS_FREXTENTS:
- lcounter = (long long)mp->m_sb.sb_frextents;
- lcounter += delta;
- if (lcounter < 0) {
- return -ENOSPC;
- }
- mp->m_sb.sb_frextents = lcounter;
- return 0;
- case XFS_SBS_DBLOCKS:
- lcounter = (long long)mp->m_sb.sb_dblocks;
- lcounter += delta;
- if (lcounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_dblocks = lcounter;
- return 0;
- case XFS_SBS_AGCOUNT:
- scounter = mp->m_sb.sb_agcount;
- scounter += delta;
- if (scounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_agcount = scounter;
- return 0;
- case XFS_SBS_IMAX_PCT:
- scounter = mp->m_sb.sb_imax_pct;
- scounter += delta;
- if (scounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_imax_pct = scounter;
- return 0;
- case XFS_SBS_REXTSIZE:
- scounter = mp->m_sb.sb_rextsize;
- scounter += delta;
- if (scounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_rextsize = scounter;
- return 0;
- case XFS_SBS_RBMBLOCKS:
- scounter = mp->m_sb.sb_rbmblocks;
- scounter += delta;
- if (scounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_rbmblocks = scounter;
- return 0;
- case XFS_SBS_RBLOCKS:
- lcounter = (long long)mp->m_sb.sb_rblocks;
- lcounter += delta;
- if (lcounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_rblocks = lcounter;
- return 0;
- case XFS_SBS_REXTENTS:
- lcounter = (long long)mp->m_sb.sb_rextents;
- lcounter += delta;
- if (lcounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_rextents = lcounter;
- return 0;
- case XFS_SBS_REXTSLOG:
- scounter = mp->m_sb.sb_rextslog;
- scounter += delta;
- if (scounter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- mp->m_sb.sb_rextslog = scounter;
- return 0;
- default:
+ /* deltas are +/-64, hence the large batch size of 128. */
+ __percpu_counter_add(&mp->m_icount, delta, 128);
+ if (percpu_counter_compare(&mp->m_icount, 0) < 0) {
ASSERT(0);
+ percpu_counter_add(&mp->m_icount, -delta);
return -EINVAL;
}
+ return 0;
}
-/*
- * xfs_mod_incore_sb() is used to change a field in the in-core
- * superblock structure by the specified delta. This modification
- * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked()
- * routine to do the work.
- */
int
-xfs_mod_incore_sb(
+xfs_mod_ifree(
struct xfs_mount *mp,
- xfs_sb_field_t field,
- int64_t delta,
- int rsvd)
+ int64_t delta)
{
- int status;
-
-#ifdef HAVE_PERCPU_SB
- ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS);
-#endif
- spin_lock(&mp->m_sb_lock);
- status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
- spin_unlock(&mp->m_sb_lock);
-
- return status;
+ percpu_counter_add(&mp->m_ifree, delta);
+ if (percpu_counter_compare(&mp->m_ifree, 0) < 0) {
+ ASSERT(0);
+ percpu_counter_add(&mp->m_ifree, -delta);
+ return -EINVAL;
+ }
+ return 0;
}
-/*
- * Change more than one field in the in-core superblock structure at a time.
- *
- * The fields and changes to those fields are specified in the array of
- * xfs_mod_sb structures passed in. Either all of the specified deltas
- * will be applied or none of them will. If any modified field dips below 0,
- * then all modifications will be backed out and EINVAL will be returned.
- *
- * Note that this function may not be used for the superblock values that
- * are tracked with the in-memory per-cpu counters - a direct call to
- * xfs_icsb_modify_counters is required for these.
- */
int
-xfs_mod_incore_sb_batch(
+xfs_mod_fdblocks(
struct xfs_mount *mp,
- xfs_mod_sb_t *msb,
- uint nmsb,
- int rsvd)
+ int64_t delta,
+ bool rsvd)
{
- xfs_mod_sb_t *msbp;
- int error = 0;
+ int64_t lcounter;
+ long long res_used;
+ s32 batch;
+
+ if (delta > 0) {
+ /*
+ * If the reserve pool is depleted, put blocks back into it
+ * first. Most of the time the pool is full.
+ */
+ if (likely(mp->m_resblks == mp->m_resblks_avail)) {
+ percpu_counter_add(&mp->m_fdblocks, delta);
+ return 0;
+ }
+
+ spin_lock(&mp->m_sb_lock);
+ res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
+
+ if (res_used > delta) {
+ mp->m_resblks_avail += delta;
+ } else {
+ delta -= res_used;
+ mp->m_resblks_avail = mp->m_resblks;
+ percpu_counter_add(&mp->m_fdblocks, delta);
+ }
+ spin_unlock(&mp->m_sb_lock);
+ return 0;
+ }
/*
- * Loop through the array of mod structures and apply each individually.
- * If any fail, then back out all those which have already been applied.
- * Do all of this within the scope of the m_sb_lock so that all of the
- * changes will be atomic.
+ * Taking blocks away, need to be more accurate the closer we
+ * are to zero.
+ *
+ * batch size is set to a maximum of 1024 blocks - if we are
+ * allocating of freeing extents larger than this then we aren't
+ * going to be hammering the counter lock so a lock per update
+ * is not a problem.
+ *
+ * If the counter has a value of less than 2 * max batch size,
+ * then make everything serialise as we are real close to
+ * ENOSPC.
+ */
+#define __BATCH 1024
+ if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0)
+ batch = 1;
+ else
+ batch = __BATCH;
+#undef __BATCH
+
+ __percpu_counter_add(&mp->m_fdblocks, delta, batch);
+ if (percpu_counter_compare(&mp->m_fdblocks,
+ XFS_ALLOC_SET_ASIDE(mp)) >= 0) {
+ /* we had space! */
+ return 0;
+ }
+
+ /*
+ * lock up the sb for dipping into reserves before releasing the space
+ * that took us to ENOSPC.
*/
spin_lock(&mp->m_sb_lock);
- for (msbp = msb; msbp < (msb + nmsb); msbp++) {
- ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
- msbp->msb_field > XFS_SBS_FDBLOCKS);
+ percpu_counter_add(&mp->m_fdblocks, -delta);
+ if (!rsvd)
+ goto fdblocks_enospc;
- error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
- msbp->msb_delta, rsvd);
- if (error)
- goto unwind;
+ lcounter = (long long)mp->m_resblks_avail + delta;
+ if (lcounter >= 0) {
+ mp->m_resblks_avail = lcounter;
+ spin_unlock(&mp->m_sb_lock);
+ return 0;
}
+ printk_once(KERN_WARNING
+ "Filesystem \"%s\": reserve blocks depleted! "
+ "Consider increasing reserve pool size.",
+ mp->m_fsname);
+fdblocks_enospc:
spin_unlock(&mp->m_sb_lock);
- return 0;
+ return -ENOSPC;
+}
-unwind:
- while (--msbp >= msb) {
- error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
- -msbp->msb_delta, rsvd);
- ASSERT(error == 0);
- }
+int
+xfs_mod_frextents(
+ struct xfs_mount *mp,
+ int64_t delta)
+{
+ int64_t lcounter;
+ int ret = 0;
+
+ spin_lock(&mp->m_sb_lock);
+ lcounter = mp->m_sb.sb_frextents + delta;
+ if (lcounter < 0)
+ ret = -ENOSPC;
+ else
+ mp->m_sb.sb_frextents = lcounter;
spin_unlock(&mp->m_sb_lock);
- return error;
+ return ret;
}
/*
@@ -1407,573 +1275,3 @@ xfs_dev_is_read_only(
}
return 0;
}
-
-#ifdef HAVE_PERCPU_SB
-/*
- * Per-cpu incore superblock counters
- *
- * Simple concept, difficult implementation
- *
- * Basically, replace the incore superblock counters with a distributed per cpu
- * counter for contended fields (e.g. free block count).
- *
- * Difficulties arise in that the incore sb is used for ENOSPC checking, and
- * hence needs to be accurately read when we are running low on space. Hence
- * there is a method to enable and disable the per-cpu counters based on how
- * much "stuff" is available in them.
- *
- * Basically, a counter is enabled if there is enough free resource to justify
- * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
- * ENOSPC), then we disable the counters to synchronise all callers and
- * re-distribute the available resources.
- *
- * If, once we redistributed the available resources, we still get a failure,
- * we disable the per-cpu counter and go through the slow path.
- *
- * The slow path is the current xfs_mod_incore_sb() function. This means that
- * when we disable a per-cpu counter, we need to drain its resources back to
- * the global superblock. We do this after disabling the counter to prevent
- * more threads from queueing up on the counter.
- *
- * Essentially, this means that we still need a lock in the fast path to enable
- * synchronisation between the global counters and the per-cpu counters. This
- * is not a problem because the lock will be local to a CPU almost all the time
- * and have little contention except when we get to ENOSPC conditions.
- *
- * Basically, this lock becomes a barrier that enables us to lock out the fast
- * path while we do things like enabling and disabling counters and
- * synchronising the counters.
- *
- * Locking rules:
- *
- * 1. m_sb_lock before picking up per-cpu locks
- * 2. per-cpu locks always picked up via for_each_online_cpu() order
- * 3. accurate counter sync requires m_sb_lock + per cpu locks
- * 4. modifying per-cpu counters requires holding per-cpu lock
- * 5. modifying global counters requires holding m_sb_lock
- * 6. enabling or disabling a counter requires holding the m_sb_lock
- * and _none_ of the per-cpu locks.
- *
- * Disabled counters are only ever re-enabled by a balance operation
- * that results in more free resources per CPU than a given threshold.
- * To ensure counters don't remain disabled, they are rebalanced when
- * the global resource goes above a higher threshold (i.e. some hysteresis
- * is present to prevent thrashing).
- */
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * hot-plug CPU notifier support.
- *
- * We need a notifier per filesystem as we need to be able to identify
- * the filesystem to balance the counters out. This is achieved by
- * having a notifier block embedded in the xfs_mount_t and doing pointer
- * magic to get the mount pointer from the notifier block address.
- */
-STATIC int
-xfs_icsb_cpu_notify(
- struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- xfs_icsb_cnts_t *cntp;
- xfs_mount_t *mp;
-
- mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
- cntp = (xfs_icsb_cnts_t *)
- per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- /* Easy Case - initialize the area and locks, and
- * then rebalance when online does everything else for us. */
- memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
- break;
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- xfs_icsb_lock(mp);
- xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
- xfs_icsb_unlock(mp);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- /* Disable all the counters, then fold the dead cpu's
- * count into the total on the global superblock and
- * re-enable the counters. */
- xfs_icsb_lock(mp);
- spin_lock(&mp->m_sb_lock);
- xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
- xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
- xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
-
- mp->m_sb.sb_icount += cntp->icsb_icount;
- mp->m_sb.sb_ifree += cntp->icsb_ifree;
- mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
-
- memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
-
- xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
- xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
- xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
- spin_unlock(&mp->m_sb_lock);
- xfs_icsb_unlock(mp);
- break;
- }
-
- return NOTIFY_OK;
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-int
-xfs_icsb_init_counters(
- xfs_mount_t *mp)
-{
- xfs_icsb_cnts_t *cntp;
- int i;
-
- mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
- if (mp->m_sb_cnts == NULL)
- return -ENOMEM;
-
- for_each_online_cpu(i) {
- cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
- memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
- }
-
- mutex_init(&mp->m_icsb_mutex);
-
- /*
- * start with all counters disabled so that the
- * initial balance kicks us off correctly
- */
- mp->m_icsb_counters = -1;
-
-#ifdef CONFIG_HOTPLUG_CPU
- mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
- mp->m_icsb_notifier.priority = 0;
- register_hotcpu_notifier(&mp->m_icsb_notifier);
-#endif /* CONFIG_HOTPLUG_CPU */
-
- return 0;
-}
-
-void
-xfs_icsb_reinit_counters(
- xfs_mount_t *mp)
-{
- xfs_icsb_lock(mp);
- /*
- * start with all counters disabled so that the
- * initial balance kicks us off correctly
- */
- mp->m_icsb_counters = -1;
- xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
- xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
- xfs_icsb_unlock(mp);
-}
-
-void
-xfs_icsb_destroy_counters(
- xfs_mount_t *mp)
-{
- if (mp->m_sb_cnts) {
- unregister_hotcpu_notifier(&mp->m_icsb_notifier);
- free_percpu(mp->m_sb_cnts);
- }
- mutex_destroy(&mp->m_icsb_mutex);
-}
-
-STATIC void
-xfs_icsb_lock_cntr(
- xfs_icsb_cnts_t *icsbp)
-{
- while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
- ndelay(1000);
- }
-}
-
-STATIC void
-xfs_icsb_unlock_cntr(
- xfs_icsb_cnts_t *icsbp)
-{
- clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
-}
-
-
-STATIC void
-xfs_icsb_lock_all_counters(
- xfs_mount_t *mp)
-{
- xfs_icsb_cnts_t *cntp;
- int i;
-
- for_each_online_cpu(i) {
- cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
- xfs_icsb_lock_cntr(cntp);
- }
-}
-
-STATIC void
-xfs_icsb_unlock_all_counters(
- xfs_mount_t *mp)
-{
- xfs_icsb_cnts_t *cntp;
- int i;
-
- for_each_online_cpu(i) {
- cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
- xfs_icsb_unlock_cntr(cntp);
- }
-}
-
-STATIC void
-xfs_icsb_count(
- xfs_mount_t *mp,
- xfs_icsb_cnts_t *cnt,
- int flags)
-{
- xfs_icsb_cnts_t *cntp;
- int i;
-
- memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
-
- if (!(flags & XFS_ICSB_LAZY_COUNT))
- xfs_icsb_lock_all_counters(mp);
-
- for_each_online_cpu(i) {
- cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
- cnt->icsb_icount += cntp->icsb_icount;
- cnt->icsb_ifree += cntp->icsb_ifree;
- cnt->icsb_fdblocks += cntp->icsb_fdblocks;
- }
-
- if (!(flags & XFS_ICSB_LAZY_COUNT))
- xfs_icsb_unlock_all_counters(mp);
-}
-
-STATIC int
-xfs_icsb_counter_disabled(
- xfs_mount_t *mp,
- xfs_sb_field_t field)
-{
- ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
- return test_bit(field, &mp->m_icsb_counters);
-}
-
-STATIC void
-xfs_icsb_disable_counter(
- xfs_mount_t *mp,
- xfs_sb_field_t field)
-{
- xfs_icsb_cnts_t cnt;
-
- ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
-
- /*
- * If we are already disabled, then there is nothing to do
- * here. We check before locking all the counters to avoid
- * the expensive lock operation when being called in the
- * slow path and the counter is already disabled. This is
- * safe because the only time we set or clear this state is under
- * the m_icsb_mutex.
- */
- if (xfs_icsb_counter_disabled(mp, field))
- return;
-
- xfs_icsb_lock_all_counters(mp);
- if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
- /* drain back to superblock */
-
- xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
- switch(field) {
- case XFS_SBS_ICOUNT:
- mp->m_sb.sb_icount = cnt.icsb_icount;
- break;
- case XFS_SBS_IFREE:
- mp->m_sb.sb_ifree = cnt.icsb_ifree;
- break;
- case XFS_SBS_FDBLOCKS:
- mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
- break;
- default:
- BUG();
- }
- }
-
- xfs_icsb_unlock_all_counters(mp);
-}
-
-STATIC void
-xfs_icsb_enable_counter(
- xfs_mount_t *mp,
- xfs_sb_field_t field,
- uint64_t count,
- uint64_t resid)
-{
- xfs_icsb_cnts_t *cntp;
- int i;
-
- ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
-
- xfs_icsb_lock_all_counters(mp);
- for_each_online_cpu(i) {
- cntp = per_cpu_ptr(mp->m_sb_cnts, i);
- switch (field) {
- case XFS_SBS_ICOUNT:
- cntp->icsb_icount = count + resid;
- break;
- case XFS_SBS_IFREE:
- cntp->icsb_ifree = count + resid;
- break;
- case XFS_SBS_FDBLOCKS:
- cntp->icsb_fdblocks = count + resid;
- break;
- default:
- BUG();
- break;
- }
- resid = 0;
- }
- clear_bit(field, &mp->m_icsb_counters);
- xfs_icsb_unlock_all_counters(mp);
-}
-
-void
-xfs_icsb_sync_counters_locked(
- xfs_mount_t *mp,
- int flags)
-{
- xfs_icsb_cnts_t cnt;
-
- xfs_icsb_count(mp, &cnt, flags);
-
- if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
- mp->m_sb.sb_icount = cnt.icsb_icount;
- if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
- mp->m_sb.sb_ifree = cnt.icsb_ifree;
- if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
- mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
-}
-
-/*
- * Accurate update of per-cpu counters to incore superblock
- */
-void
-xfs_icsb_sync_counters(
- xfs_mount_t *mp,
- int flags)
-{
- spin_lock(&mp->m_sb_lock);
- xfs_icsb_sync_counters_locked(mp, flags);
- spin_unlock(&mp->m_sb_lock);
-}
-
-/*
- * Balance and enable/disable counters as necessary.
- *
- * Thresholds for re-enabling counters are somewhat magic. inode counts are
- * chosen to be the same number as single on disk allocation chunk per CPU, and
- * free blocks is something far enough zero that we aren't going thrash when we
- * get near ENOSPC. We also need to supply a minimum we require per cpu to
- * prevent looping endlessly when xfs_alloc_space asks for more than will
- * be distributed to a single CPU but each CPU has enough blocks to be
- * reenabled.
- *
- * Note that we can be called when counters are already disabled.
- * xfs_icsb_disable_counter() optimises the counter locking in this case to
- * prevent locking every per-cpu counter needlessly.
- */
-
-#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
-#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
- (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
-STATIC void
-xfs_icsb_balance_counter_locked(
- xfs_mount_t *mp,
- xfs_sb_field_t field,
- int min_per_cpu)
-{
- uint64_t count, resid;
- int weight = num_online_cpus();
- uint64_t min = (uint64_t)min_per_cpu;
-
- /* disable counter and sync counter */
- xfs_icsb_disable_counter(mp, field);
-
- /* update counters - first CPU gets residual*/
- switch (field) {
- case XFS_SBS_ICOUNT:
- count = mp->m_sb.sb_icount;
- resid = do_div(count, weight);
- if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
- return;
- break;
- case XFS_SBS_IFREE:
- count = mp->m_sb.sb_ifree;
- resid = do_div(count, weight);
- if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
- return;
- break;
- case XFS_SBS_FDBLOCKS:
- count = mp->m_sb.sb_fdblocks;
- resid = do_div(count, weight);
- if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
- return;
- break;
- default:
- BUG();
- count = resid = 0; /* quiet, gcc */
- break;
- }
-
- xfs_icsb_enable_counter(mp, field, count, resid);
-}
-
-STATIC void
-xfs_icsb_balance_counter(
- xfs_mount_t *mp,
- xfs_sb_field_t fields,
- int min_per_cpu)
-{
- spin_lock(&mp->m_sb_lock);
- xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
- spin_unlock(&mp->m_sb_lock);
-}
-
-int
-xfs_icsb_modify_counters(
- xfs_mount_t *mp,
- xfs_sb_field_t field,
- int64_t delta,
- int rsvd)
-{
- xfs_icsb_cnts_t *icsbp;
- long long lcounter; /* long counter for 64 bit fields */
- int ret = 0;
-
- might_sleep();
-again:
- preempt_disable();
- icsbp = this_cpu_ptr(mp->m_sb_cnts);
-
- /*
- * if the counter is disabled, go to slow path
- */
- if (unlikely(xfs_icsb_counter_disabled(mp, field)))
- goto slow_path;
- xfs_icsb_lock_cntr(icsbp);
- if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
- xfs_icsb_unlock_cntr(icsbp);
- goto slow_path;
- }
-
- switch (field) {
- case XFS_SBS_ICOUNT:
- lcounter = icsbp->icsb_icount;
- lcounter += delta;
- if (unlikely(lcounter < 0))
- goto balance_counter;
- icsbp->icsb_icount = lcounter;
- break;
-
- case XFS_SBS_IFREE:
- lcounter = icsbp->icsb_ifree;
- lcounter += delta;
- if (unlikely(lcounter < 0))
- goto balance_counter;
- icsbp->icsb_ifree = lcounter;
- break;
-
- case XFS_SBS_FDBLOCKS:
- BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
-
- lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
- lcounter += delta;
- if (unlikely(lcounter < 0))
- goto balance_counter;
- icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
- break;
- default:
- BUG();
- break;
- }
- xfs_icsb_unlock_cntr(icsbp);
- preempt_enable();
- return 0;
-
-slow_path:
- preempt_enable();
-
- /*
- * serialise with a mutex so we don't burn lots of cpu on
- * the superblock lock. We still need to hold the superblock
- * lock, however, when we modify the global structures.
- */
- xfs_icsb_lock(mp);
-
- /*
- * Now running atomically.
- *
- * If the counter is enabled, someone has beaten us to rebalancing.
- * Drop the lock and try again in the fast path....
- */
- if (!(xfs_icsb_counter_disabled(mp, field))) {
- xfs_icsb_unlock(mp);
- goto again;
- }
-
- /*
- * The counter is currently disabled. Because we are
- * running atomically here, we know a rebalance cannot
- * be in progress. Hence we can go straight to operating
- * on the global superblock. We do not call xfs_mod_incore_sb()
- * here even though we need to get the m_sb_lock. Doing so
- * will cause us to re-enter this function and deadlock.
- * Hence we get the m_sb_lock ourselves and then call
- * xfs_mod_incore_sb_unlocked() as the unlocked path operates
- * directly on the global counters.
- */
- spin_lock(&mp->m_sb_lock);
- ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
- spin_unlock(&mp->m_sb_lock);
-
- /*
- * Now that we've modified the global superblock, we
- * may be able to re-enable the distributed counters
- * (e.g. lots of space just got freed). After that
- * we are done.
- */
- if (ret != -ENOSPC)
- xfs_icsb_balance_counter(mp, field, 0);
- xfs_icsb_unlock(mp);
- return ret;
-
-balance_counter:
- xfs_icsb_unlock_cntr(icsbp);
- preempt_enable();
-
- /*
- * We may have multiple threads here if multiple per-cpu
- * counters run dry at the same time. This will mean we can
- * do more balances than strictly necessary but it is not
- * the common slowpath case.
- */
- xfs_icsb_lock(mp);
-
- /*
- * running atomically.
- *
- * This will leave the counter in the correct state for future
- * accesses. After the rebalance, we simply try again and our retry
- * will either succeed through the fast path or slow path without
- * another balance operation being required.
- */
- xfs_icsb_balance_counter(mp, field, delta);
- xfs_icsb_unlock(mp);
- goto again;
-}
-
-#endif
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 0d8abd6364d9..8c995a2ccb6f 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -18,8 +18,6 @@
#ifndef __XFS_MOUNT_H__
#define __XFS_MOUNT_H__
-#ifdef __KERNEL__
-
struct xlog;
struct xfs_inode;
struct xfs_mru_cache;
@@ -29,44 +27,6 @@ struct xfs_quotainfo;
struct xfs_dir_ops;
struct xfs_da_geometry;
-#ifdef HAVE_PERCPU_SB
-
-/*
- * Valid per-cpu incore superblock counters. Note that if you add new counters,
- * you may need to define new counter disabled bit field descriptors as there
- * are more possible fields in the superblock that can fit in a bitfield on a
- * 32 bit platform. The XFS_SBS_* values for the current current counters just
- * fit.
- */
-typedef struct xfs_icsb_cnts {
- uint64_t icsb_fdblocks;
- uint64_t icsb_ifree;
- uint64_t icsb_icount;
- unsigned long icsb_flags;
-} xfs_icsb_cnts_t;
-
-#define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */
-
-#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */
-
-extern int xfs_icsb_init_counters(struct xfs_mount *);
-extern void xfs_icsb_reinit_counters(struct xfs_mount *);
-extern void xfs_icsb_destroy_counters(struct xfs_mount *);
-extern void xfs_icsb_sync_counters(struct xfs_mount *, int);
-extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
-extern int xfs_icsb_modify_counters(struct xfs_mount *, xfs_sb_field_t,
- int64_t, int);
-
-#else
-#define xfs_icsb_init_counters(mp) (0)
-#define xfs_icsb_destroy_counters(mp) do { } while (0)
-#define xfs_icsb_reinit_counters(mp) do { } while (0)
-#define xfs_icsb_sync_counters(mp, flags) do { } while (0)
-#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0)
-#define xfs_icsb_modify_counters(mp, field, delta, rsvd) \
- xfs_mod_incore_sb(mp, field, delta, rsvd)
-#endif
-
/* dynamic preallocation free space thresholds, 5% down to 1% */
enum {
XFS_LOWSP_1_PCNT = 0,
@@ -81,8 +41,13 @@ typedef struct xfs_mount {
struct super_block *m_super;
xfs_tid_t m_tid; /* next unused tid for fs */
struct xfs_ail *m_ail; /* fs active log item list */
- xfs_sb_t m_sb; /* copy of fs superblock */
+
+ struct xfs_sb m_sb; /* copy of fs superblock */
spinlock_t m_sb_lock; /* sb counter lock */
+ struct percpu_counter m_icount; /* allocated inodes counter */
+ struct percpu_counter m_ifree; /* free inodes counter */
+ struct percpu_counter m_fdblocks; /* free block counter */
+
struct xfs_buf *m_sb_bp; /* buffer for superblock */
char *m_fsname; /* filesystem name */
int m_fsname_len; /* strlen of fs name */
@@ -152,12 +117,6 @@ typedef struct xfs_mount {
const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */
uint m_chsize; /* size of next field */
atomic_t m_active_trans; /* number trans frozen */
-#ifdef HAVE_PERCPU_SB
- xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
- unsigned long m_icsb_counters; /* disabled per-cpu counters */
- struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
- struct mutex m_icsb_mutex; /* balancer sync lock */
-#endif
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
struct delayed_work m_reclaim_work; /* background inode reclaim */
struct delayed_work m_eofblocks_work; /* background eof blocks
@@ -301,35 +260,6 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
}
/*
- * Per-cpu superblock locking functions
- */
-#ifdef HAVE_PERCPU_SB
-static inline void
-xfs_icsb_lock(xfs_mount_t *mp)
-{
- mutex_lock(&mp->m_icsb_mutex);
-}
-
-static inline void
-xfs_icsb_unlock(xfs_mount_t *mp)
-{
- mutex_unlock(&mp->m_icsb_mutex);
-}
-#else
-#define xfs_icsb_lock(mp)
-#define xfs_icsb_unlock(mp)
-#endif
-
-/*
- * This structure is for use by the xfs_mod_incore_sb_batch() routine.
- * xfs_growfs can specify a few fields which are more than int limit
- */
-typedef struct xfs_mod_sb {
- xfs_sb_field_t msb_field; /* Field to modify, see below */
- int64_t msb_delta; /* Change to make to specified field */
-} xfs_mod_sb_t;
-
-/*
* Per-ag incore structure, copies of information in agf and agi, to improve the
* performance of allocation group selection.
*/
@@ -383,11 +313,14 @@ extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
extern int xfs_mountfs(xfs_mount_t *mp);
extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
xfs_agnumber_t *maxagi);
-
extern void xfs_unmountfs(xfs_mount_t *);
-extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
-extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
- uint, int);
+
+extern int xfs_mod_icount(struct xfs_mount *mp, int64_t delta);
+extern int xfs_mod_ifree(struct xfs_mount *mp, int64_t delta);
+extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
+ bool reserved);
+extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
+
extern int xfs_mount_log_sb(xfs_mount_t *);
extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
extern int xfs_readsb(xfs_mount_t *, int);
@@ -399,6 +332,4 @@ extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
extern void xfs_set_low_space_thresholds(struct xfs_mount *);
-#endif /* __KERNEL__ */
-
#endif /* __XFS_MOUNT_H__ */
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 30ecca3037e3..f8a674d7f092 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -437,7 +437,7 @@ xfs_mru_cache_insert(
if (!mru || !mru->lists)
return -EINVAL;
- if (radix_tree_preload(GFP_KERNEL))
+ if (radix_tree_preload(GFP_NOFS))
return -ENOMEM;
INIT_LIST_HEAD(&elem->list_node);
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 365dd57ea760..981a657eca39 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -31,7 +31,8 @@
int
xfs_break_layouts(
struct inode *inode,
- uint *iolock)
+ uint *iolock,
+ bool with_imutex)
{
struct xfs_inode *ip = XFS_I(inode);
int error;
@@ -40,8 +41,12 @@ xfs_break_layouts(
while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
xfs_iunlock(ip, *iolock);
+ if (with_imutex && (*iolock & XFS_IOLOCK_EXCL))
+ mutex_unlock(&inode->i_mutex);
error = break_layout(inode, true);
*iolock = XFS_IOLOCK_EXCL;
+ if (with_imutex)
+ mutex_lock(&inode->i_mutex);
xfs_ilock(ip, *iolock);
}
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
index b7fbfce660f6..8147ac108820 100644
--- a/fs/xfs/xfs_pnfs.h
+++ b/fs/xfs/xfs_pnfs.h
@@ -8,9 +8,10 @@ int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps,
struct iattr *iattr);
-int xfs_break_layouts(struct inode *inode, uint *iolock);
+int xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex);
#else
-static inline int xfs_break_layouts(struct inode *inode, uint *iolock)
+static inline int
+xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex)
{
return 0;
}
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index fbbb9e62e274..5538468c7f63 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -719,6 +719,7 @@ xfs_qm_qino_alloc(
xfs_trans_t *tp;
int error;
int committed;
+ bool need_alloc = true;
*ip = NULL;
/*
@@ -747,6 +748,7 @@ xfs_qm_qino_alloc(
return error;
mp->m_sb.sb_gquotino = NULLFSINO;
mp->m_sb.sb_pquotino = NULLFSINO;
+ need_alloc = false;
}
}
@@ -758,7 +760,7 @@ xfs_qm_qino_alloc(
return error;
}
- if (!*ip) {
+ if (need_alloc) {
error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
&committed);
if (error) {
@@ -794,11 +796,14 @@ xfs_qm_qino_alloc(
spin_unlock(&mp->m_sb_lock);
xfs_log_sb(tp);
- if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error) {
+ ASSERT(XFS_FORCED_SHUTDOWN(mp));
xfs_alert(mp, "%s failed (error %d)!", __func__, error);
- return error;
}
- return 0;
+ if (need_alloc)
+ xfs_finish_inode_setup(*ip);
+ return error;
}
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 0d4d3590cf85..996a04064894 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -168,10 +168,6 @@ extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
uint, struct qc_dqblk *);
extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
struct qc_dqblk *);
-extern int xfs_qm_scall_getqstat(struct xfs_mount *,
- struct fs_quota_stat *);
-extern int xfs_qm_scall_getqstatv(struct xfs_mount *,
- struct fs_quota_statv *);
extern int xfs_qm_scall_quotaon(struct xfs_mount *, uint);
extern int xfs_qm_scall_quotaoff(struct xfs_mount *, uint);
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 9b965db45800..9a25c9275fb3 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -38,7 +38,6 @@
STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
uint);
-STATIC uint xfs_qm_export_flags(uint);
/*
* Turn off quota accounting and/or enforcement for all udquots and/or
@@ -389,159 +388,6 @@ xfs_qm_scall_quotaon(
return 0;
}
-
-/*
- * Return quota status information, such as uquota-off, enforcements, etc.
- * for Q_XGETQSTAT command.
- */
-int
-xfs_qm_scall_getqstat(
- struct xfs_mount *mp,
- struct fs_quota_stat *out)
-{
- struct xfs_quotainfo *q = mp->m_quotainfo;
- struct xfs_inode *uip = NULL;
- struct xfs_inode *gip = NULL;
- struct xfs_inode *pip = NULL;
- bool tempuqip = false;
- bool tempgqip = false;
- bool temppqip = false;
-
- memset(out, 0, sizeof(fs_quota_stat_t));
-
- out->qs_version = FS_QSTAT_VERSION;
- out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
- (XFS_ALL_QUOTA_ACCT|
- XFS_ALL_QUOTA_ENFD));
- uip = q->qi_uquotaip;
- gip = q->qi_gquotaip;
- pip = q->qi_pquotaip;
- if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
- if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip) == 0)
- tempuqip = true;
- }
- if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
- if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip) == 0)
- tempgqip = true;
- }
- /*
- * Q_XGETQSTAT doesn't have room for both group and project quotas.
- * So, allow the project quota values to be copied out only if
- * there is no group quota information available.
- */
- if (!gip) {
- if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) {
- if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
- 0, 0, &pip) == 0)
- temppqip = true;
- }
- } else
- pip = NULL;
- if (uip) {
- out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
- out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
- out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
- if (tempuqip)
- IRELE(uip);
- }
-
- if (gip) {
- out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
- out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
- out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
- if (tempgqip)
- IRELE(gip);
- }
- if (pip) {
- out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
- out->qs_gquota.qfs_nblks = pip->i_d.di_nblocks;
- out->qs_gquota.qfs_nextents = pip->i_d.di_nextents;
- if (temppqip)
- IRELE(pip);
- }
- out->qs_incoredqs = q->qi_dquots;
- out->qs_btimelimit = q->qi_btimelimit;
- out->qs_itimelimit = q->qi_itimelimit;
- out->qs_rtbtimelimit = q->qi_rtbtimelimit;
- out->qs_bwarnlimit = q->qi_bwarnlimit;
- out->qs_iwarnlimit = q->qi_iwarnlimit;
-
- return 0;
-}
-
-/*
- * Return quota status information, such as uquota-off, enforcements, etc.
- * for Q_XGETQSTATV command, to support separate project quota field.
- */
-int
-xfs_qm_scall_getqstatv(
- struct xfs_mount *mp,
- struct fs_quota_statv *out)
-{
- struct xfs_quotainfo *q = mp->m_quotainfo;
- struct xfs_inode *uip = NULL;
- struct xfs_inode *gip = NULL;
- struct xfs_inode *pip = NULL;
- bool tempuqip = false;
- bool tempgqip = false;
- bool temppqip = false;
-
- out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
- (XFS_ALL_QUOTA_ACCT|
- XFS_ALL_QUOTA_ENFD));
- out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
- out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
- out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino;
-
- uip = q->qi_uquotaip;
- gip = q->qi_gquotaip;
- pip = q->qi_pquotaip;
- if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
- if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip) == 0)
- tempuqip = true;
- }
- if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
- if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip) == 0)
- tempgqip = true;
- }
- if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) {
- if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
- 0, 0, &pip) == 0)
- temppqip = true;
- }
- if (uip) {
- out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
- out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
- if (tempuqip)
- IRELE(uip);
- }
-
- if (gip) {
- out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
- out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
- if (tempgqip)
- IRELE(gip);
- }
- if (pip) {
- out->qs_pquota.qfs_nblks = pip->i_d.di_nblocks;
- out->qs_pquota.qfs_nextents = pip->i_d.di_nextents;
- if (temppqip)
- IRELE(pip);
- }
- out->qs_incoredqs = q->qi_dquots;
- out->qs_btimelimit = q->qi_btimelimit;
- out->qs_itimelimit = q->qi_itimelimit;
- out->qs_rtbtimelimit = q->qi_rtbtimelimit;
- out->qs_bwarnlimit = q->qi_bwarnlimit;
- out->qs_iwarnlimit = q->qi_iwarnlimit;
-
- return 0;
-}
-
#define XFS_QC_MASK \
(QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
@@ -873,28 +719,6 @@ out_put:
return error;
}
-STATIC uint
-xfs_qm_export_flags(
- uint flags)
-{
- uint uflags;
-
- uflags = 0;
- if (flags & XFS_UQUOTA_ACCT)
- uflags |= FS_QUOTA_UDQ_ACCT;
- if (flags & XFS_GQUOTA_ACCT)
- uflags |= FS_QUOTA_GDQ_ACCT;
- if (flags & XFS_PQUOTA_ACCT)
- uflags |= FS_QUOTA_PDQ_ACCT;
- if (flags & XFS_UQUOTA_ENFD)
- uflags |= FS_QUOTA_UDQ_ENFD;
- if (flags & XFS_GQUOTA_ENFD)
- uflags |= FS_QUOTA_GDQ_ENFD;
- if (flags & XFS_PQUOTA_ENFD)
- uflags |= FS_QUOTA_PDQ_ENFD;
- return uflags;
-}
-
STATIC int
xfs_dqrele_inode(
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 6923905ab33d..7795e0d01382 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -23,10 +23,81 @@
#include "xfs_inode.h"
#include "xfs_quota.h"
#include "xfs_trans.h"
+#include "xfs_trace.h"
+#include "xfs_icache.h"
#include "xfs_qm.h"
#include <linux/quota.h>
+static void
+xfs_qm_fill_state(
+ struct qc_type_state *tstate,
+ struct xfs_mount *mp,
+ struct xfs_inode *ip,
+ xfs_ino_t ino)
+{
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ bool tempqip = false;
+
+ tstate->ino = ino;
+ if (!ip && ino == NULLFSINO)
+ return;
+ if (!ip) {
+ if (xfs_iget(mp, NULL, ino, 0, 0, &ip))
+ return;
+ tempqip = true;
+ }
+ tstate->flags |= QCI_SYSFILE;
+ tstate->blocks = ip->i_d.di_nblocks;
+ tstate->nextents = ip->i_d.di_nextents;
+ tstate->spc_timelimit = q->qi_btimelimit;
+ tstate->ino_timelimit = q->qi_itimelimit;
+ tstate->rt_spc_timelimit = q->qi_rtbtimelimit;
+ tstate->spc_warnlimit = q->qi_bwarnlimit;
+ tstate->ino_warnlimit = q->qi_iwarnlimit;
+ tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit;
+ if (tempqip)
+ IRELE(ip);
+}
+
+/*
+ * Return quota status information, such as enforcements, quota file inode
+ * numbers etc.
+ */
+static int
+xfs_fs_get_quota_state(
+ struct super_block *sb,
+ struct qc_state *state)
+{
+ struct xfs_mount *mp = XFS_M(sb);
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+
+ memset(state, 0, sizeof(*state));
+ if (!XFS_IS_QUOTA_RUNNING(mp))
+ return 0;
+ state->s_incoredqs = q->qi_dquots;
+ if (XFS_IS_UQUOTA_RUNNING(mp))
+ state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED;
+ if (XFS_IS_UQUOTA_ENFORCED(mp))
+ state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
+ if (XFS_IS_GQUOTA_RUNNING(mp))
+ state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED;
+ if (XFS_IS_GQUOTA_ENFORCED(mp))
+ state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
+ if (XFS_IS_PQUOTA_RUNNING(mp))
+ state->s_state[PRJQUOTA].flags |= QCI_ACCT_ENABLED;
+ if (XFS_IS_PQUOTA_ENFORCED(mp))
+ state->s_state[PRJQUOTA].flags |= QCI_LIMITS_ENFORCED;
+
+ xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip,
+ mp->m_sb.sb_uquotino);
+ xfs_qm_fill_state(&state->s_state[GRPQUOTA], mp, q->qi_gquotaip,
+ mp->m_sb.sb_gquotino);
+ xfs_qm_fill_state(&state->s_state[PRJQUOTA], mp, q->qi_pquotaip,
+ mp->m_sb.sb_pquotino);
+ return 0;
+}
+
STATIC int
xfs_quota_type(int type)
{
@@ -40,28 +111,40 @@ xfs_quota_type(int type)
}
}
-STATIC int
-xfs_fs_get_xstate(
+#define XFS_QC_SETINFO_MASK (QC_TIMER_MASK | QC_WARNS_MASK)
+
+/*
+ * Adjust quota timers & warnings
+ */
+static int
+xfs_fs_set_info(
struct super_block *sb,
- struct fs_quota_stat *fqs)
+ int type,
+ struct qc_info *info)
{
- struct xfs_mount *mp = XFS_M(sb);
+ struct xfs_mount *mp = XFS_M(sb);
+ struct qc_dqblk newlim;
+ if (sb->s_flags & MS_RDONLY)
+ return -EROFS;
if (!XFS_IS_QUOTA_RUNNING(mp))
return -ENOSYS;
- return xfs_qm_scall_getqstat(mp, fqs);
-}
+ if (!XFS_IS_QUOTA_ON(mp))
+ return -ESRCH;
+ if (info->i_fieldmask & ~XFS_QC_SETINFO_MASK)
+ return -EINVAL;
+ if ((info->i_fieldmask & XFS_QC_SETINFO_MASK) == 0)
+ return 0;
-STATIC int
-xfs_fs_get_xstatev(
- struct super_block *sb,
- struct fs_quota_statv *fqs)
-{
- struct xfs_mount *mp = XFS_M(sb);
+ newlim.d_fieldmask = info->i_fieldmask;
+ newlim.d_spc_timer = info->i_spc_timelimit;
+ newlim.d_ino_timer = info->i_ino_timelimit;
+ newlim.d_rt_spc_timer = info->i_rt_spc_timelimit;
+ newlim.d_ino_warns = info->i_ino_warnlimit;
+ newlim.d_spc_warns = info->i_spc_warnlimit;
+ newlim.d_rt_spc_warns = info->i_rt_spc_warnlimit;
- if (!XFS_IS_QUOTA_RUNNING(mp))
- return -ENOSYS;
- return xfs_qm_scall_getqstatv(mp, fqs);
+ return xfs_qm_scall_setqlim(mp, 0, xfs_quota_type(type), &newlim);
}
static unsigned int
@@ -178,8 +261,8 @@ xfs_fs_set_dqblk(
}
const struct quotactl_ops xfs_quotactl_operations = {
- .get_xstatev = xfs_fs_get_xstatev,
- .get_xstate = xfs_fs_get_xstate,
+ .get_state = xfs_fs_get_quota_state,
+ .set_info = xfs_fs_set_info,
.quota_enable = xfs_quota_enable,
.quota_disable = xfs_quota_disable,
.rm_xquota = xfs_fs_rm_xquota,
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 8fcc4ccc5c79..858e1e62bbaa 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -109,8 +109,6 @@ static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
-#define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */
-#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */
#define MNTOPT_DISCARD "discard" /* Discard unused blocks */
#define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */
@@ -361,28 +359,10 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
mp->m_qflags &= ~XFS_GQUOTA_ENFD;
- } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
- xfs_warn(mp,
- "delaylog is the default now, option is deprecated.");
- } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
- xfs_warn(mp,
- "nodelaylog support has been removed, option is deprecated.");
} else if (!strcmp(this_char, MNTOPT_DISCARD)) {
mp->m_flags |= XFS_MOUNT_DISCARD;
} else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
mp->m_flags &= ~XFS_MOUNT_DISCARD;
- } else if (!strcmp(this_char, "ihashsize")) {
- xfs_warn(mp,
- "ihashsize no longer used, option is deprecated.");
- } else if (!strcmp(this_char, "osyncisdsync")) {
- xfs_warn(mp,
- "osyncisdsync has no effect, option is deprecated.");
- } else if (!strcmp(this_char, "osyncisosync")) {
- xfs_warn(mp,
- "osyncisosync has no effect, option is deprecated.");
- } else if (!strcmp(this_char, "irixsgid")) {
- xfs_warn(mp,
- "irixsgid is now a sysctl(2) variable, option is deprecated.");
} else {
xfs_warn(mp, "unknown mount option [%s].", this_char);
return -EINVAL;
@@ -986,6 +966,8 @@ xfs_fs_inode_init_once(
atomic_set(&ip->i_pincount, 0);
spin_lock_init(&ip->i_flags_lock);
+ mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
+ "xfsino", ip->i_ino);
mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
"xfsino", ip->i_ino);
}
@@ -1033,23 +1015,6 @@ xfs_free_fsname(
kfree(mp->m_logname);
}
-STATIC void
-xfs_fs_put_super(
- struct super_block *sb)
-{
- struct xfs_mount *mp = XFS_M(sb);
-
- xfs_filestream_unmount(mp);
- xfs_unmountfs(mp);
-
- xfs_freesb(mp);
- xfs_icsb_destroy_counters(mp);
- xfs_destroy_mount_workqueues(mp);
- xfs_close_devices(mp);
- xfs_free_fsname(mp);
- kfree(mp);
-}
-
STATIC int
xfs_fs_sync_fs(
struct super_block *sb,
@@ -1083,8 +1048,11 @@ xfs_fs_statfs(
{
struct xfs_mount *mp = XFS_M(dentry->d_sb);
xfs_sb_t *sbp = &mp->m_sb;
- struct xfs_inode *ip = XFS_I(dentry->d_inode);
+ struct xfs_inode *ip = XFS_I(d_inode(dentry));
__uint64_t fakeinos, id;
+ __uint64_t icount;
+ __uint64_t ifree;
+ __uint64_t fdblocks;
xfs_extlen_t lsize;
__int64_t ffree;
@@ -1095,17 +1063,21 @@ xfs_fs_statfs(
statp->f_fsid.val[0] = (u32)id;
statp->f_fsid.val[1] = (u32)(id >> 32);
- xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
+ icount = percpu_counter_sum(&mp->m_icount);
+ ifree = percpu_counter_sum(&mp->m_ifree);
+ fdblocks = percpu_counter_sum(&mp->m_fdblocks);
spin_lock(&mp->m_sb_lock);
statp->f_bsize = sbp->sb_blocksize;
lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
statp->f_blocks = sbp->sb_dblocks - lsize;
- statp->f_bfree = statp->f_bavail =
- sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
+ spin_unlock(&mp->m_sb_lock);
+
+ statp->f_bfree = fdblocks - XFS_ALLOC_SET_ASIDE(mp);
+ statp->f_bavail = statp->f_bfree;
+
fakeinos = statp->f_bfree << sbp->sb_inopblog;
- statp->f_files =
- MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
+ statp->f_files = MIN(icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
if (mp->m_maxicount)
statp->f_files = min_t(typeof(statp->f_files),
statp->f_files,
@@ -1117,10 +1089,9 @@ xfs_fs_statfs(
sbp->sb_icount);
/* make sure statp->f_ffree does not underflow */
- ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
+ ffree = statp->f_files - (icount - ifree);
statp->f_ffree = max_t(__int64_t, ffree, 0);
- spin_unlock(&mp->m_sb_lock);
if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
@@ -1256,6 +1227,12 @@ xfs_fs_remount(
/* ro -> rw */
if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
+ xfs_warn(mp,
+ "ro->rw transition prohibited on norecovery mount");
+ return -EINVAL;
+ }
+
mp->m_flags &= ~XFS_MOUNT_RDONLY;
/*
@@ -1401,6 +1378,51 @@ xfs_finish_flags(
return 0;
}
+static int
+xfs_init_percpu_counters(
+ struct xfs_mount *mp)
+{
+ int error;
+
+ error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
+ if (error)
+ return -ENOMEM;
+
+ error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
+ if (error)
+ goto free_icount;
+
+ error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
+ if (error)
+ goto free_ifree;
+
+ return 0;
+
+free_ifree:
+ percpu_counter_destroy(&mp->m_ifree);
+free_icount:
+ percpu_counter_destroy(&mp->m_icount);
+ return -ENOMEM;
+}
+
+void
+xfs_reinit_percpu_counters(
+ struct xfs_mount *mp)
+{
+ percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
+ percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
+ percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
+}
+
+static void
+xfs_destroy_percpu_counters(
+ struct xfs_mount *mp)
+{
+ percpu_counter_destroy(&mp->m_icount);
+ percpu_counter_destroy(&mp->m_ifree);
+ percpu_counter_destroy(&mp->m_fdblocks);
+}
+
STATIC int
xfs_fs_fill_super(
struct super_block *sb,
@@ -1449,7 +1471,7 @@ xfs_fs_fill_super(
if (error)
goto out_close_devices;
- error = xfs_icsb_init_counters(mp);
+ error = xfs_init_percpu_counters(mp);
if (error)
goto out_destroy_workqueues;
@@ -1507,7 +1529,7 @@ xfs_fs_fill_super(
out_free_sb:
xfs_freesb(mp);
out_destroy_counters:
- xfs_icsb_destroy_counters(mp);
+ xfs_destroy_percpu_counters(mp);
out_destroy_workqueues:
xfs_destroy_mount_workqueues(mp);
out_close_devices:
@@ -1524,6 +1546,24 @@ out_destroy_workqueues:
goto out_free_sb;
}
+STATIC void
+xfs_fs_put_super(
+ struct super_block *sb)
+{
+ struct xfs_mount *mp = XFS_M(sb);
+
+ xfs_notice(mp, "Unmounting Filesystem");
+ xfs_filestream_unmount(mp);
+ xfs_unmountfs(mp);
+
+ xfs_freesb(mp);
+ xfs_destroy_percpu_counters(mp);
+ xfs_destroy_mount_workqueues(mp);
+ xfs_close_devices(mp);
+ xfs_free_fsname(mp);
+ kfree(mp);
+}
+
STATIC struct dentry *
xfs_fs_mount(
struct file_system_type *fs_type,
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 2b830c2f322e..499058fea303 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -72,6 +72,8 @@ extern const struct export_operations xfs_export_operations;
extern const struct xattr_handler *xfs_xattr_handlers[];
extern const struct quotactl_ops xfs_quotactl_operations;
+extern void xfs_reinit_percpu_counters(struct xfs_mount *mp);
+
#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info))
#endif /* __XFS_SUPER_H__ */
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 25791df6f638..3df411eadb86 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -177,7 +177,7 @@ xfs_symlink(
int pathlen;
struct xfs_bmap_free free_list;
xfs_fsblock_t first_block;
- bool unlock_dp_on_error = false;
+ bool unlock_dp_on_error = false;
uint cancel_flags;
int committed;
xfs_fileoff_t first_fsb;
@@ -221,7 +221,7 @@ xfs_symlink(
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
&udqp, &gdqp, &pdqp);
if (error)
- goto std_return;
+ return error;
tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
@@ -241,7 +241,7 @@ xfs_symlink(
}
if (error) {
cancel_flags = 0;
- goto error_return;
+ goto out_trans_cancel;
}
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
@@ -252,7 +252,7 @@ xfs_symlink(
*/
if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
error = -EPERM;
- goto error_return;
+ goto out_trans_cancel;
}
/*
@@ -261,7 +261,7 @@ xfs_symlink(
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
pdqp, resblks, 1, 0);
if (error)
- goto error_return;
+ goto out_trans_cancel;
/*
* Check for ability to enter directory entry, if no space reserved.
@@ -269,7 +269,7 @@ xfs_symlink(
if (!resblks) {
error = xfs_dir_canenter(tp, dp, link_name);
if (error)
- goto error_return;
+ goto out_trans_cancel;
}
/*
* Initialize the bmap freelist prior to calling either
@@ -282,15 +282,14 @@ xfs_symlink(
*/
error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
prid, resblks > 0, &ip, NULL);
- if (error) {
- if (error == -ENOSPC)
- goto error_return;
- goto error1;
- }
+ if (error)
+ goto out_trans_cancel;
/*
- * An error after we've joined dp to the transaction will result in the
- * transaction cancel unlocking dp so don't do it explicitly in the
+ * Now we join the directory inode to the transaction. We do not do it
+ * earlier because xfs_dir_ialloc might commit the previous transaction
+ * (and release all the locks). An error from here on will result in
+ * the transaction cancel unlocking dp so don't do it explicitly in the
* error path.
*/
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
@@ -330,7 +329,7 @@ xfs_symlink(
XFS_BMAPI_METADATA, &first_block, resblks,
mval, &nmaps, &free_list);
if (error)
- goto error2;
+ goto out_bmap_cancel;
if (resblks)
resblks -= fs_blocks;
@@ -348,7 +347,7 @@ xfs_symlink(
BTOBB(byte_cnt), 0);
if (!bp) {
error = -ENOMEM;
- goto error2;
+ goto out_bmap_cancel;
}
bp->b_ops = &xfs_symlink_buf_ops;
@@ -378,7 +377,7 @@ xfs_symlink(
error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
&first_block, &free_list, resblks);
if (error)
- goto error2;
+ goto out_bmap_cancel;
xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
@@ -392,10 +391,13 @@ xfs_symlink(
}
error = xfs_bmap_finish(&tp, &free_list, &committed);
- if (error) {
- goto error2;
- }
+ if (error)
+ goto out_bmap_cancel;
+
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error)
+ goto out_release_inode;
+
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
@@ -403,20 +405,28 @@ xfs_symlink(
*ipp = ip;
return 0;
- error2:
- IRELE(ip);
- error1:
+out_bmap_cancel:
xfs_bmap_cancel(&free_list);
cancel_flags |= XFS_TRANS_ABORT;
- error_return:
+out_trans_cancel:
xfs_trans_cancel(tp, cancel_flags);
+out_release_inode:
+ /*
+ * Wait until after the current transaction is aborted to finish the
+ * setup of the inode and release the inode. This prevents recursive
+ * transactions and deadlocks from xfs_inactive.
+ */
+ if (ip) {
+ xfs_finish_inode_setup(ip);
+ IRELE(ip);
+ }
+
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- std_return:
return error;
}
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 51372e34d988..615781bf4ee5 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -115,7 +115,7 @@ DECLARE_EVENT_CLASS(xfs_perag_class,
__entry->refcount = refcount;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d agno %u refcount %d caller %pf",
+ TP_printk("dev %d:%d agno %u refcount %d caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->refcount,
@@ -239,7 +239,7 @@ TRACE_EVENT(xfs_iext_insert,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
- "offset %lld block %lld count %lld flag %d caller %pf",
+ "offset %lld block %lld count %lld flag %d caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
@@ -283,7 +283,7 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
- "offset %lld block %lld count %lld flag %d caller %pf",
+ "offset %lld block %lld count %lld flag %d caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
@@ -329,7 +329,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx nblks 0x%x hold %d pincount %d "
- "lock %d flags %s caller %pf",
+ "lock %d flags %s caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
__entry->nblks,
@@ -402,7 +402,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
- "lock %d flags %s caller %pf",
+ "lock %d flags %s caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
__entry->buffer_length,
@@ -447,7 +447,7 @@ TRACE_EVENT(xfs_buf_ioerror,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
- "lock %d error %d flags %s caller %pf",
+ "lock %d error %d flags %s caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
__entry->buffer_length,
@@ -613,7 +613,7 @@ DECLARE_EVENT_CLASS(xfs_lock_class,
__entry->lock_flags = lock_flags;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf",
+ TP_printk("dev %d:%d ino 0x%llx flags %s caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
@@ -664,6 +664,7 @@ DEFINE_INODE_EVENT(xfs_alloc_file_space);
DEFINE_INODE_EVENT(xfs_free_file_space);
DEFINE_INODE_EVENT(xfs_zero_file_space);
DEFINE_INODE_EVENT(xfs_collapse_file_space);
+DEFINE_INODE_EVENT(xfs_insert_file_space);
DEFINE_INODE_EVENT(xfs_readdir);
#ifdef CONFIG_XFS_POSIX_ACL
DEFINE_INODE_EVENT(xfs_get_acl);
@@ -685,6 +686,9 @@ DEFINE_INODE_EVENT(xfs_inode_set_eofblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid);
+DEFINE_INODE_EVENT(xfs_filemap_fault);
+DEFINE_INODE_EVENT(xfs_filemap_page_mkwrite);
+
DECLARE_EVENT_CLASS(xfs_iref_class,
TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
TP_ARGS(ip, caller_ip),
@@ -702,7 +706,7 @@ DECLARE_EVENT_CLASS(xfs_iref_class,
__entry->pincount = atomic_read(&ip->i_pincount);
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf",
+ TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->count,
@@ -1217,6 +1221,11 @@ DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
+DEFINE_IOMAP_EVENT(xfs_gbmap_direct);
+DEFINE_IOMAP_EVENT(xfs_gbmap_direct_new);
+DEFINE_IOMAP_EVENT(xfs_gbmap_direct_update);
+DEFINE_IOMAP_EVENT(xfs_gbmap_direct_none);
+DEFINE_IOMAP_EVENT(xfs_gbmap_direct_endio);
DECLARE_EVENT_CLASS(xfs_simple_io_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -1333,7 +1342,7 @@ TRACE_EVENT(xfs_bunmap,
__entry->flags = flags;
),
TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
- "flags %s caller %pf",
+ "flags %s caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->size,
@@ -1466,7 +1475,7 @@ TRACE_EVENT(xfs_agf,
),
TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
"levels b %u c %u flfirst %u fllast %u flcount %u "
- "freeblks %u longest %u caller %pf",
+ "freeblks %u longest %u caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index eb90cd59a0ec..220ef2c906b2 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -173,7 +173,7 @@ xfs_trans_reserve(
uint rtextents)
{
int error = 0;
- int rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
+ bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
/* Mark this thread as being in a transaction */
current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
@@ -184,8 +184,7 @@ xfs_trans_reserve(
* fail if the count would go below zero.
*/
if (blocks > 0) {
- error = xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
- -((int64_t)blocks), rsvd);
+ error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
if (error != 0) {
current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
return -ENOSPC;
@@ -236,8 +235,7 @@ xfs_trans_reserve(
* fail if the count would go below zero.
*/
if (rtextents > 0) {
- error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS,
- -((int64_t)rtextents), rsvd);
+ error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
if (error) {
error = -ENOSPC;
goto undo_log;
@@ -268,8 +266,7 @@ undo_log:
undo_blocks:
if (blocks > 0) {
- xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
- (int64_t)blocks, rsvd);
+ xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
tp->t_blk_res = 0;
}
@@ -488,6 +485,54 @@ xfs_trans_apply_sb_deltas(
sizeof(sbp->sb_frextents) - 1);
}
+STATIC int
+xfs_sb_mod8(
+ uint8_t *field,
+ int8_t delta)
+{
+ int8_t counter = *field;
+
+ counter += delta;
+ if (counter < 0) {
+ ASSERT(0);
+ return -EINVAL;
+ }
+ *field = counter;
+ return 0;
+}
+
+STATIC int
+xfs_sb_mod32(
+ uint32_t *field,
+ int32_t delta)
+{
+ int32_t counter = *field;
+
+ counter += delta;
+ if (counter < 0) {
+ ASSERT(0);
+ return -EINVAL;
+ }
+ *field = counter;
+ return 0;
+}
+
+STATIC int
+xfs_sb_mod64(
+ uint64_t *field,
+ int64_t delta)
+{
+ int64_t counter = *field;
+
+ counter += delta;
+ if (counter < 0) {
+ ASSERT(0);
+ return -EINVAL;
+ }
+ *field = counter;
+ return 0;
+}
+
/*
* xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
* and apply superblock counter changes to the in-core superblock. The
@@ -495,13 +540,6 @@ xfs_trans_apply_sb_deltas(
* applied to the in-core superblock. The idea is that that has already been
* done.
*
- * This is done efficiently with a single call to xfs_mod_incore_sb_batch().
- * However, we have to ensure that we only modify each superblock field only
- * once because the application of the delta values may not be atomic. That can
- * lead to ENOSPC races occurring if we have two separate modifcations of the
- * free space counter to put back the entire reservation and then take away
- * what we used.
- *
* If we are not logging superblock counters, then the inode allocated/free and
* used block counts are not updated in the on disk superblock. In this case,
* XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
@@ -509,21 +547,15 @@ xfs_trans_apply_sb_deltas(
*/
void
xfs_trans_unreserve_and_mod_sb(
- xfs_trans_t *tp)
+ struct xfs_trans *tp)
{
- xfs_mod_sb_t msb[9]; /* If you add cases, add entries */
- xfs_mod_sb_t *msbp;
- xfs_mount_t *mp = tp->t_mountp;
- /* REFERENCED */
- int error;
- int rsvd;
- int64_t blkdelta = 0;
- int64_t rtxdelta = 0;
- int64_t idelta = 0;
- int64_t ifreedelta = 0;
-
- msbp = msb;
- rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
+ struct xfs_mount *mp = tp->t_mountp;
+ bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
+ int64_t blkdelta = 0;
+ int64_t rtxdelta = 0;
+ int64_t idelta = 0;
+ int64_t ifreedelta = 0;
+ int error;
/* calculate deltas */
if (tp->t_blk_res > 0)
@@ -547,97 +579,115 @@ xfs_trans_unreserve_and_mod_sb(
/* apply the per-cpu counters */
if (blkdelta) {
- error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- blkdelta, rsvd);
+ error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
if (error)
goto out;
}
if (idelta) {
- error = xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT,
- idelta, rsvd);
+ error = xfs_mod_icount(mp, idelta);
if (error)
goto out_undo_fdblocks;
}
if (ifreedelta) {
- error = xfs_icsb_modify_counters(mp, XFS_SBS_IFREE,
- ifreedelta, rsvd);
+ error = xfs_mod_ifree(mp, ifreedelta);
if (error)
goto out_undo_icount;
}
+ if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
+ return;
+
/* apply remaining deltas */
- if (rtxdelta != 0) {
- msbp->msb_field = XFS_SBS_FREXTENTS;
- msbp->msb_delta = rtxdelta;
- msbp++;
+ spin_lock(&mp->m_sb_lock);
+ if (rtxdelta) {
+ error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
+ if (error)
+ goto out_undo_ifree;
}
- if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
- if (tp->t_dblocks_delta != 0) {
- msbp->msb_field = XFS_SBS_DBLOCKS;
- msbp->msb_delta = tp->t_dblocks_delta;
- msbp++;
- }
- if (tp->t_agcount_delta != 0) {
- msbp->msb_field = XFS_SBS_AGCOUNT;
- msbp->msb_delta = tp->t_agcount_delta;
- msbp++;
- }
- if (tp->t_imaxpct_delta != 0) {
- msbp->msb_field = XFS_SBS_IMAX_PCT;
- msbp->msb_delta = tp->t_imaxpct_delta;
- msbp++;
- }
- if (tp->t_rextsize_delta != 0) {
- msbp->msb_field = XFS_SBS_REXTSIZE;
- msbp->msb_delta = tp->t_rextsize_delta;
- msbp++;
- }
- if (tp->t_rbmblocks_delta != 0) {
- msbp->msb_field = XFS_SBS_RBMBLOCKS;
- msbp->msb_delta = tp->t_rbmblocks_delta;
- msbp++;
- }
- if (tp->t_rblocks_delta != 0) {
- msbp->msb_field = XFS_SBS_RBLOCKS;
- msbp->msb_delta = tp->t_rblocks_delta;
- msbp++;
- }
- if (tp->t_rextents_delta != 0) {
- msbp->msb_field = XFS_SBS_REXTENTS;
- msbp->msb_delta = tp->t_rextents_delta;
- msbp++;
- }
- if (tp->t_rextslog_delta != 0) {
- msbp->msb_field = XFS_SBS_REXTSLOG;
- msbp->msb_delta = tp->t_rextslog_delta;
- msbp++;
- }
+ if (tp->t_dblocks_delta != 0) {
+ error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
+ if (error)
+ goto out_undo_frextents;
}
-
- /*
- * If we need to change anything, do it.
- */
- if (msbp > msb) {
- error = xfs_mod_incore_sb_batch(tp->t_mountp, msb,
- (uint)(msbp - msb), rsvd);
+ if (tp->t_agcount_delta != 0) {
+ error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
if (error)
- goto out_undo_ifreecount;
+ goto out_undo_dblocks;
}
-
+ if (tp->t_imaxpct_delta != 0) {
+ error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
+ if (error)
+ goto out_undo_agcount;
+ }
+ if (tp->t_rextsize_delta != 0) {
+ error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
+ tp->t_rextsize_delta);
+ if (error)
+ goto out_undo_imaxpct;
+ }
+ if (tp->t_rbmblocks_delta != 0) {
+ error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
+ tp->t_rbmblocks_delta);
+ if (error)
+ goto out_undo_rextsize;
+ }
+ if (tp->t_rblocks_delta != 0) {
+ error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
+ if (error)
+ goto out_undo_rbmblocks;
+ }
+ if (tp->t_rextents_delta != 0) {
+ error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
+ tp->t_rextents_delta);
+ if (error)
+ goto out_undo_rblocks;
+ }
+ if (tp->t_rextslog_delta != 0) {
+ error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
+ tp->t_rextslog_delta);
+ if (error)
+ goto out_undo_rextents;
+ }
+ spin_unlock(&mp->m_sb_lock);
return;
-out_undo_ifreecount:
+out_undo_rextents:
+ if (tp->t_rextents_delta)
+ xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
+out_undo_rblocks:
+ if (tp->t_rblocks_delta)
+ xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
+out_undo_rbmblocks:
+ if (tp->t_rbmblocks_delta)
+ xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
+out_undo_rextsize:
+ if (tp->t_rextsize_delta)
+ xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
+out_undo_imaxpct:
+ if (tp->t_rextsize_delta)
+ xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
+out_undo_agcount:
+ if (tp->t_agcount_delta)
+ xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
+out_undo_dblocks:
+ if (tp->t_dblocks_delta)
+ xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
+out_undo_frextents:
+ if (rtxdelta)
+ xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
+out_undo_ifree:
+ spin_unlock(&mp->m_sb_lock);
if (ifreedelta)
- xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, -ifreedelta, rsvd);
+ xfs_mod_ifree(mp, -ifreedelta);
out_undo_icount:
if (idelta)
- xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, -idelta, rsvd);
+ xfs_mod_icount(mp, -idelta);
out_undo_fdblocks:
if (blkdelta)
- xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
+ xfs_mod_fdblocks(mp, -blkdelta, rsvd);
out:
ASSERT(error == 0);
return;
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 69f6e475de97..c036815183cb 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -35,7 +35,7 @@ static int
xfs_xattr_get(struct dentry *dentry, const char *name,
void *value, size_t size, int xflags)
{
- struct xfs_inode *ip = XFS_I(dentry->d_inode);
+ struct xfs_inode *ip = XFS_I(d_inode(dentry));
int error, asize = size;
if (strcmp(name, "") == 0)
@@ -57,7 +57,7 @@ static int
xfs_xattr_set(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags, int xflags)
{
- struct xfs_inode *ip = XFS_I(dentry->d_inode);
+ struct xfs_inode *ip = XFS_I(d_inode(dentry));
if (strcmp(name, "") == 0)
return -EINVAL;
@@ -197,7 +197,7 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
{
struct xfs_attr_list_context context;
struct attrlist_cursor_kern cursor = { 0 };
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int error;
/*
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 61e32ec1fc4d..8de4fa90e8c4 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -252,6 +252,7 @@ struct acpi_device_pnp {
#define acpi_device_bid(d) ((d)->pnp.bus_id)
#define acpi_device_adr(d) ((d)->pnp.bus_address)
const char *acpi_device_hid(struct acpi_device *device);
+#define acpi_device_uid(d) ((d)->pnp.unique_id)
#define acpi_device_name(d) ((d)->pnp.device_name)
#define acpi_device_class(d) ((d)->pnp.device_class)
@@ -386,7 +387,8 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode)
static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
{
- return fwnode ? container_of(fwnode, struct acpi_device, fwnode) : NULL;
+ return is_acpi_node(fwnode) ?
+ container_of(fwnode, struct acpi_device, fwnode) : NULL;
}
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index 444671e9c65d..dd86c5fc102d 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -3,11 +3,15 @@
#include <linux/io.h>
+#include <asm/acpi.h>
+
+#ifndef acpi_os_ioremap
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
return ioremap_cache(phys, size);
}
+#endif
void __iomem *__init_refok
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index d56f5d722138..08ef57bc8d63 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20150204
+#define ACPI_CA_VERSION 0x20150410
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -431,13 +431,13 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
- acpi_find_root_pointer(acpi_size * rsdp_address))
-
+ acpi_find_root_pointer(acpi_physical_address *
+ rsdp_address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_table_header(acpi_string signature,
- u32 instance,
- struct acpi_table_header
- *out_table_header))
+ acpi_get_table_header(acpi_string signature,
+ u32 instance,
+ struct acpi_table_header
+ *out_table_header))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table(acpi_string signature, u32 instance,
struct acpi_table_header
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index f06d75e5fa54..cafdeb50fbdf 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -73,6 +73,7 @@
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
+#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */
#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
@@ -845,7 +846,8 @@ struct acpi_lpit_header {
enum acpi_lpit_type {
ACPI_LPIT_TYPE_NATIVE_CSTATE = 0x00,
- ACPI_LPIT_TYPE_SIMPLE_IO = 0x01
+ ACPI_LPIT_TYPE_SIMPLE_IO = 0x01,
+ ACPI_LPIT_TYPE_RESERVED = 0x02 /* 2 and above are reserved */
};
/* Masks for Flags field above */
@@ -935,6 +937,21 @@ struct acpi_table_mchi {
/*******************************************************************************
*
+ * MSDM - Microsoft Data Management table
+ *
+ * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
+ * November 29, 2011. Copyright 2011 Microsoft
+ *
+ ******************************************************************************/
+
+/* Basic MSDM table is only the common ACPI header */
+
+struct acpi_table_msdm {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/*******************************************************************************
+ *
* MTMR - MID Timer Table
* Version 1
*
@@ -959,10 +976,9 @@ struct acpi_mtmr_entry {
/*******************************************************************************
*
* SLIC - Software Licensing Description Table
- * Version 1
*
- * Conforms to "OEM Activation 2.0 for Windows Vista Operating Systems",
- * Copyright 2006
+ * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
+ * November 29, 2011. Copyright 2011 Microsoft
*
******************************************************************************/
@@ -972,52 +988,6 @@ struct acpi_table_slic {
struct acpi_table_header header; /* Common ACPI table header */
};
-/* Common SLIC subtable header */
-
-struct acpi_slic_header {
- u32 type;
- u32 length;
-};
-
-/* Values for Type field above */
-
-enum acpi_slic_type {
- ACPI_SLIC_TYPE_PUBLIC_KEY = 0,
- ACPI_SLIC_TYPE_WINDOWS_MARKER = 1,
- ACPI_SLIC_TYPE_RESERVED = 2 /* 2 and greater are reserved */
-};
-
-/*
- * SLIC Subtables, correspond to Type in struct acpi_slic_header
- */
-
-/* 0: Public Key Structure */
-
-struct acpi_slic_key {
- struct acpi_slic_header header;
- u8 key_type;
- u8 version;
- u16 reserved;
- u32 algorithm;
- char magic[4];
- u32 bit_length;
- u32 exponent;
- u8 modulus[128];
-};
-
-/* 1: Windows Marker Structure */
-
-struct acpi_slic_marker {
- struct acpi_slic_header header;
- u32 version;
- char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
- char windows_flag[8];
- u32 slic_version;
- u8 reserved[16];
- u8 signature[128];
-};
-
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index b034f1068dfe..1c3002e1db20 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -124,7 +124,6 @@
#ifndef ACPI_USE_SYSTEM_INTTYPES
typedef unsigned char u8;
-typedef unsigned char u8;
typedef unsigned short u16;
typedef short s16;
typedef COMPILER_DEPENDENT_UINT64 u64;
@@ -199,9 +198,29 @@ typedef int s32;
typedef s32 acpi_native_int;
typedef u32 acpi_size;
+
+#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
+
+/*
+ * OSPMs can define this to shrink the size of the structures for 32-bit
+ * none PAE environment. ASL compiler may always define this to generate
+ * 32-bit OSPM compliant tables.
+ */
typedef u32 acpi_io_address;
typedef u32 acpi_physical_address;
+#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
+/*
+ * It is reported that, after some calculations, the physical addresses can
+ * wrap over the 32-bit boundary on 32-bit PAE environment.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
+ */
+typedef u64 acpi_io_address;
+typedef u64 acpi_physical_address;
+
+#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
#define ACPI_MAX_PTR ACPI_UINT32_MAX
#define ACPI_SIZE_MAX ACPI_UINT32_MAX
@@ -713,33 +732,32 @@ typedef u32 acpi_event_type;
* The encoding of acpi_event_status is illustrated below.
* Note that a set bit (1) indicates the property is TRUE
* (e.g. if bit 0 is set then the event is enabled).
- * +-------------+-+-+-+-+
- * | Bits 31:4 |3|2|1|0|
- * +-------------+-+-+-+-+
- * | | | | |
- * | | | | +- Enabled?
- * | | | +--- Enabled for wake?
- * | | +----- Set?
- * | +------- Has a handler?
- * +------------- <Reserved>
+ * +-------------+-+-+-+-+-+
+ * | Bits 31:5 |4|3|2|1|0|
+ * +-------------+-+-+-+-+-+
+ * | | | | | |
+ * | | | | | +- Enabled?
+ * | | | | +--- Enabled for wake?
+ * | | | +----- Status bit set?
+ * | | +------- Enable bit set?
+ * | +--------- Has a handler?
+ * +--------------- <Reserved>
*/
typedef u32 acpi_event_status;
#define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00
#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01
#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02
-#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04
-#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x08
+#define ACPI_EVENT_FLAG_STATUS_SET (acpi_event_status) 0x04
+#define ACPI_EVENT_FLAG_ENABLE_SET (acpi_event_status) 0x08
+#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x10
+#define ACPI_EVENT_FLAG_SET ACPI_EVENT_FLAG_STATUS_SET
/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
#define ACPI_GPE_ENABLE 0
#define ACPI_GPE_DISABLE 1
#define ACPI_GPE_CONDITIONAL_ENABLE 2
-#define ACPI_GPE_SAVE_MASK 4
-
-#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK)
-#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK)
/*
* GPE info flags - Per GPE
@@ -1251,6 +1269,7 @@ struct acpi_memory_list {
#define ACPI_OSI_WIN_VISTA_SP2 0x0A
#define ACPI_OSI_WIN_7 0x0B
#define ACPI_OSI_WIN_8 0x0C
+#define ACPI_OSI_WIN_10 0x0D
/* Definitions of file IO */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index ad74dc51d5b7..ecdf9405dd3a 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -76,6 +76,7 @@
#define ACPI_LARGE_NAMESPACE_NODE
#define ACPI_DATA_TABLE_DISASSEMBLY
#define ACPI_SINGLE_THREADED
+#define ACPI_32BIT_PHYSICAL_ADDRESS
#endif
/* acpi_exec configuration. Multithreaded with full AML debugger */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index b95dc32a6e6b..4188a4d3b597 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -196,7 +196,7 @@ struct acpi_processor_flags {
struct acpi_processor {
acpi_handle handle;
u32 acpi_id;
- u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */
+ phys_cpuid_t phys_id; /* CPU hardware ID such as APIC ID for x86 */
u32 id; /* CPU logical ID allocated by OS */
u32 pblk;
int performance_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
#endif /* CONFIG_CPU_FREQ */
/* in processor_core.c */
-int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int phys_id, u32 acpi_id);
+phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
/* in processor_pdc.c */
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 3378dcf4c31e..940d5ec122c9 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -39,6 +39,10 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
debug_dma_unmap_page(dev, addr, size, dir, true);
}
+/*
+ * dma_maps_sg_attrs returns 0 on error and > 0 on success.
+ * It should never return a value < 0.
+ */
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
@@ -51,6 +55,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ BUG_ON(ents < 0);
debug_dma_map_sg(dev, sg, nents, ents, dir);
return ents;
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 383ade1a211b..9bb0d11729c9 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/of.h>
-#include <linux/pinctrl/pinctrl.h>
#ifdef CONFIG_GPIOLIB
@@ -139,53 +138,6 @@ static inline void gpio_unexport(unsigned gpio)
gpiod_unexport(gpio_to_desc(gpio));
}
-#ifdef CONFIG_PINCTRL
-
-/**
- * struct gpio_pin_range - pin range controlled by a gpio chip
- * @head: list for maintaining set of pin ranges, used internally
- * @pctldev: pinctrl device which handles corresponding pins
- * @range: actual range of pins controlled by a gpio controller
- */
-
-struct gpio_pin_range {
- struct list_head node;
- struct pinctrl_dev *pctldev;
- struct pinctrl_gpio_range range;
-};
-
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins);
-int gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group);
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
-
-#else
-
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
-{
- return 0;
-}
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group)
-{
- return 0;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
-}
-
-#endif /* CONFIG_PINCTRL */
-
#else /* !CONFIG_GPIOLIB */
static inline bool gpio_is_valid(int number)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 4d46085c1b90..39f1d6a2b04d 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -6,6 +6,12 @@
#include <linux/mm_types.h>
#include <linux/bug.h>
+#include <linux/errno.h>
+
+#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \
+ CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED
+#endif
/*
* On almost all architectures and configurations, 0 can be used as the
@@ -691,6 +697,30 @@ static inline int pmd_protnone(pmd_t pmd)
#endif /* CONFIG_MMU */
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
+int pmd_clear_huge(pmd_t *pmd);
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pud_clear_huge(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_clear_huge(pmd_t *pmd)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index 9fa1f653ed3b..c9ccafa0d99a 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -17,7 +17,9 @@
#define __NR_seccomp_read_32 __NR_read
#define __NR_seccomp_write_32 __NR_write
#define __NR_seccomp_exit_32 __NR_exit
+#ifndef __NR_seccomp_sigreturn_32
#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+#endif
#endif /* CONFIG_COMPAT && ! already defined */
#define __NR_seccomp_read __NR_read
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ac78910d7416..8bd374d3cf21 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -124,7 +124,10 @@
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
- VMLINUX_SYMBOL(__stop_ftrace_events) = .;
+ VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
+ VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
+ *(_ftrace_enum_map) \
+ VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
#else
#define FTRACE_EVENTS()
#endif
@@ -150,6 +153,14 @@
#define TRACE_SYSCALLS()
#endif
+#ifdef CONFIG_SERIAL_EARLYCON
+#define EARLYCON_TABLE() STRUCT_ALIGN(); \
+ VMLINUX_SYMBOL(__earlycon_table) = .; \
+ *(__earlycon_table) \
+ *(__earlycon_table_end)
+#else
+#define EARLYCON_TABLE()
+#endif
#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
@@ -167,6 +178,7 @@
#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
+#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
#define KERNEL_DTB() \
@@ -401,7 +413,7 @@
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text.hot) \
- *(.text) \
+ *(.text .text.fixup) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
@@ -501,8 +513,10 @@
CLKSRC_OF_TABLES() \
IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
+ CPUIDLE_METHOD_OF_TABLES() \
KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() \
+ EARLYCON_TABLE() \
EARLYCON_OF_TABLES()
#define INIT_TEXT \
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 623a59c1ff5a..0ecb7688af71 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -137,7 +137,7 @@ struct crypto_template *crypto_lookup_template(const char *name);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
-int crypto_unregister_instance(struct crypto_alg *alg);
+int crypto_unregister_instance(struct crypto_instance *inst);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst, u32 mask);
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 178525e5f430..018afb264ac2 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -58,8 +58,9 @@ struct af_alg_type {
};
struct af_alg_sgl {
- struct scatterlist sg[ALG_MAX_PAGES];
+ struct scatterlist sg[ALG_MAX_PAGES + 1];
struct page *pages[ALG_MAX_PAGES];
+ unsigned int npages;
};
int af_alg_register_type(const struct af_alg_type *type);
@@ -70,6 +71,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock);
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index a16fb10142bf..6e28ea5be9f1 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -103,8 +103,7 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
* This function fills the caller-allocated buffer with random numbers using the
* random number generator referenced by the cipher handle.
*
- * Return: > 0 function was successful and returns the number of generated
- * bytes; < 0 if an error occurred
+ * Return: 0 function was successful; < 0 if an error occurred
*/
static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
u8 *rdata, unsigned int dlen)
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 190f8a0e0242..dd7905a3c22e 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -65,20 +65,20 @@
#define SHA512_H7 0x5be0cd19137e2179ULL
struct sha1_state {
- u64 count;
u32 state[SHA1_DIGEST_SIZE / 4];
+ u64 count;
u8 buffer[SHA1_BLOCK_SIZE];
};
struct sha256_state {
- u64 count;
u32 state[SHA256_DIGEST_SIZE / 4];
+ u64 count;
u8 buf[SHA256_BLOCK_SIZE];
};
struct sha512_state {
- u64 count[2];
u64 state[SHA512_DIGEST_SIZE / 8];
+ u64 count[2];
u8 buf[SHA512_BLOCK_SIZE];
};
@@ -87,9 +87,18 @@ struct shash_desc;
extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+
+extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
#endif
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
new file mode 100644
index 000000000000..d0df431f9a97
--- /dev/null
+++ b/include/crypto/sha1_base.h
@@ -0,0 +1,106 @@
+/*
+ * sha1_base.h - core logic for SHA-1 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
+
+static inline int sha1_base_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA1_H0;
+ sctx->state[1] = SHA1_H1;
+ sctx->state[2] = SHA1_H2;
+ sctx->state[3] = SHA1_H3;
+ sctx->state[4] = SHA1_H4;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha1_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha1_block_fn *block_fn)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA1_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ blocks = len / SHA1_BLOCK_SIZE;
+ len %= SHA1_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA1_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha1_base_do_finalize(struct shash_desc *desc,
+ sha1_block_fn *block_fn)
+{
+ const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->buffer[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ memset(sctx->buffer + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buffer, 1);
+
+ return 0;
+}
+
+static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sha1_state){};
+ return 0;
+}
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
new file mode 100644
index 000000000000..d1f2195bb7de
--- /dev/null
+++ b/include/crypto/sha256_base.h
@@ -0,0 +1,128 @@
+/*
+ * sha256_base.h - core logic for SHA-256 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
+ int blocks);
+
+static inline int sha224_base_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha256_base_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha256_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA256_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ blocks = len / SHA256_BLOCK_SIZE;
+ len %= SHA256_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA256_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha256_base_do_finalize(struct shash_desc *desc,
+ sha256_block_fn *block_fn)
+{
+ const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->buf[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ memset(sctx->buf + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buf, 1);
+
+ return 0;
+}
+
+static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
new file mode 100644
index 000000000000..6c5341e005ea
--- /dev/null
+++ b/include/crypto/sha512_base.h
@@ -0,0 +1,131 @@
+/*
+ * sha512_base.h - core logic for SHA-512 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
+ int blocks);
+
+static inline int sha384_base_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA384_H0;
+ sctx->state[1] = SHA384_H1;
+ sctx->state[2] = SHA384_H2;
+ sctx->state[3] = SHA384_H3;
+ sctx->state[4] = SHA384_H4;
+ sctx->state[5] = SHA384_H5;
+ sctx->state[6] = SHA384_H6;
+ sctx->state[7] = SHA384_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static inline int sha512_base_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA512_H0;
+ sctx->state[1] = SHA512_H1;
+ sctx->state[2] = SHA512_H2;
+ sctx->state[3] = SHA512_H3;
+ sctx->state[4] = SHA512_H4;
+ sctx->state[5] = SHA512_H5;
+ sctx->state[6] = SHA512_H6;
+ sctx->state[7] = SHA512_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static inline int sha512_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha512_block_fn *block_fn)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ sctx->count[0] += len;
+ if (sctx->count[0] < len)
+ sctx->count[1]++;
+
+ if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA512_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ blocks = len / SHA512_BLOCK_SIZE;
+ len %= SHA512_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA512_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha512_base_do_finalize(struct shash_desc *desc,
+ sha512_block_fn *block_fn)
+{
+ const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+ unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ sctx->buf[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ memset(sctx->buf + partial, 0x0, bit_offset - partial);
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+ bits[1] = cpu_to_be64(sctx->count[0] << 3);
+ block_fn(sctx, sctx->buf, 1);
+
+ return 0;
+}
+
+static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ __be64 *digest = (__be64 *)out;
+ int i;
+
+ for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
+ put_unaligned_be64(sctx->state[i], digest++);
+
+ *sctx = (struct sha512_state){};
+ return 0;
+}
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 5a4f49005169..de13bfc35634 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -38,17 +38,18 @@ struct dw_hdmi_curr_ctrl {
u16 curr[DW_HDMI_RES_MAX];
};
-struct dw_hdmi_sym_term {
+struct dw_hdmi_phy_config {
unsigned long mpixelclock;
u16 sym_ctr; /*clock symbol and transmitter control*/
u16 term; /*transmission termination value*/
+ u16 vlev_ctr; /* voltage level control */
};
struct dw_hdmi_plat_data {
enum dw_hdmi_devtype dev_type;
const struct dw_hdmi_mpll_config *mpll_cfg;
const struct dw_hdmi_curr_ctrl *cur_ctr;
- const struct dw_hdmi_sym_term *sym_term;
+ const struct dw_hdmi_phy_config *phy_config;
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
};
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e928625a9da0..62c40777c009 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -104,6 +104,9 @@ struct dma_buf_attachment;
* PRIME: used in the prime code.
* This is the category used by the DRM_DEBUG_PRIME() macro.
*
+ * ATOMIC: used in the atomic code.
+ * This is the category used by the DRM_DEBUG_ATOMIC() macro.
+ *
* Enabling verbose debug messages is done through the drm.debug parameter,
* each category being enabled by a bit.
*
@@ -121,6 +124,7 @@ struct dma_buf_attachment;
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
#define DRM_UT_PRIME 0x08
+#define DRM_UT_ATOMIC 0x10
extern __printf(2, 3)
void drm_ut_debug_printk(const char *function_name,
@@ -207,6 +211,11 @@ void drm_err(const char *format, ...);
if (unlikely(drm_debug & DRM_UT_PRIME)) \
drm_ut_debug_printk(__func__, fmt, ##args); \
} while (0)
+#define DRM_DEBUG_ATOMIC(fmt, args...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
/*@}*/
@@ -244,7 +253,6 @@ struct drm_ioctl_desc {
unsigned int cmd;
int flags;
drm_ioctl_t *func;
- unsigned int cmd_drv;
const char *name;
};
@@ -253,8 +261,13 @@ struct drm_ioctl_desc {
* ioctl, for use by drm_ioctl().
*/
-#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
+ .cmd = DRM_IOCTL_##ioctl, \
+ .func = _func, \
+ .flags = _flags, \
+ .name = #ioctl \
+ }
/* Event queued up for userspace to read */
struct drm_pending_event {
@@ -922,6 +935,7 @@ extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_on(struct drm_device *dev, int crtc);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 51168a8b723a..c157103492b0 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -75,4 +75,28 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+#define for_each_connector_in_state(state, connector, connector_state, __i) \
+ for ((__i) = 0; \
+ (connector) = (state)->connectors[__i], \
+ (connector_state) = (state)->connector_states[__i], \
+ (__i) < (state)->num_connector; \
+ (__i)++) \
+ if (connector)
+
+#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
+ for ((__i) = 0; \
+ (crtc) = (state)->crtcs[__i], \
+ (crtc_state) = (state)->crtc_states[__i], \
+ (__i) < (state)->dev->mode_config.num_crtc; \
+ (__i)++) \
+ if (crtc_state)
+
+#define for_each_plane_in_state(state, plane, plane_state, __i) \
+ for ((__i) = 0; \
+ (plane) = (state)->planes[__i], \
+ (plane_state) = (state)->plane_states[__i], \
+ (__i) < (state)->dev->mode_config.num_total_plane; \
+ (__i)++) \
+ if (plane_state)
+
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 8039d54a7441..d665781eb542 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -43,9 +43,9 @@ int drm_atomic_helper_commit(struct drm_device *dev,
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state);
-void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
- struct drm_atomic_state *state);
-void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
@@ -87,20 +87,34 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 920e21a8f3fd..ca71c03143d1 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -53,7 +53,6 @@ struct fence;
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
-#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
#define DRM_MODE_OBJECT_ANY 0
struct drm_mode_object {
@@ -202,6 +201,7 @@ struct drm_framebuffer {
const struct drm_framebuffer_funcs *funcs;
unsigned int pitches[4];
unsigned int offsets[4];
+ uint64_t modifier[4];
unsigned int width;
unsigned int height;
/* depth can be 15 or 16 */
@@ -466,7 +466,7 @@ struct drm_crtc {
int framedur_ns, linedur_ns, pixeldur_ns;
/* if you are using the helper */
- void *helper_private;
+ const void *helper_private;
struct drm_object_properties properties;
@@ -596,7 +596,7 @@ struct drm_encoder {
struct drm_crtc *crtc;
struct drm_bridge *bridge;
const struct drm_encoder_funcs *funcs;
- void *helper_private;
+ const void *helper_private;
};
/* should we poll this connector for connects and disconnects */
@@ -700,7 +700,7 @@ struct drm_connector {
/* requested DPMS state */
int dpms;
- void *helper_private;
+ const void *helper_private;
/* forced on connector */
struct drm_cmdline_mode cmdline_mode;
@@ -829,6 +829,7 @@ enum drm_plane_type {
* @possible_crtcs: pipes this plane can be bound to
* @format_types: array of formats supported by this plane
* @format_count: number of formats supported
+ * @format_default: driver hasn't supplied supported formats for the plane
* @crtc: currently bound CRTC
* @fb: currently bound fb
* @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
@@ -849,6 +850,7 @@ struct drm_plane {
uint32_t possible_crtcs;
uint32_t *format_types;
uint32_t format_count;
+ bool format_default;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
@@ -861,7 +863,7 @@ struct drm_plane {
enum drm_plane_type type;
- void *helper_private;
+ const void *helper_private;
struct drm_plane_state *state;
};
@@ -912,7 +914,7 @@ struct drm_bridge {
};
/**
- * struct struct drm_atomic_state - the global state object for atomic updates
+ * struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics
@@ -972,7 +974,7 @@ struct drm_mode_set {
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
- * @atomic_check: check whether a give atomic state update is possible
+ * @atomic_check: check whether a given atomic state update is possible
* @atomic_commit: commit an atomic state update previously verified with
* atomic_check()
*
@@ -1155,6 +1157,9 @@ struct drm_mode_config {
/* whether async page flip is supported or not */
bool async_page_flip;
+ /* whether the driver supports fb modifiers */
+ bool allow_fb_modifiers;
+
/* cursor size */
uint32_t cursor_width, cursor_height;
};
@@ -1259,6 +1264,8 @@ extern int drm_plane_init(struct drm_device *dev,
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
extern void drm_plane_force_disable(struct drm_plane *plane);
+extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
+ u32 format);
extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c250a22b39ab..c8fc187061de 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -89,6 +89,7 @@ struct drm_crtc_helper_funcs {
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
+ /* Actually set the mode for atomic helpers, optional */
void (*mode_set_nofb)(struct drm_crtc *crtc);
/* Move the crtc on the current fb to the given position *optional* */
@@ -119,7 +120,7 @@ struct drm_crtc_helper_funcs {
* @mode_fixup: try to fixup proposed mode for this connector
* @prepare: part of the disable sequence, called before the CRTC modeset
* @commit: called after the CRTC modeset
- * @mode_set: set this mode
+ * @mode_set: set this mode, optional for atomic helpers
* @get_crtc: return CRTC that the encoder is currently attached to
* @detect: connection status detection
* @disable: disable encoder when not in use (overrides DPMS off)
@@ -196,19 +197,19 @@ extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
{
- crtc->helper_private = (void *)funcs;
+ crtc->helper_private = funcs;
}
static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
const struct drm_encoder_helper_funcs *funcs)
{
- encoder->helper_private = (void *)funcs;
+ encoder->helper_private = funcs;
}
static inline void drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
- connector->helper_private = (void *)funcs;
+ connector->helper_private = funcs;
}
extern void drm_helper_resume_force_mode(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 7e25030a6aa2..523f04c90dea 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -42,6 +42,8 @@
* 1.2 formally includes both eDP and DPI definitions.
*/
+#define DP_AUX_MAX_PAYLOAD_BYTES 16
+
#define DP_AUX_I2C_WRITE 0x0
#define DP_AUX_I2C_READ 0x1
#define DP_AUX_I2C_STATUS 0x2
@@ -92,6 +94,15 @@
# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
# define DP_OUI_SUPPORT (1 << 7)
+#define DP_RECEIVE_PORT_0_CAP_0 0x008
+# define DP_LOCAL_EDID_PRESENT (1 << 1)
+# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
+
+#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
+
+#define DP_RECEIVE_PORT_1_CAP_0 0x00a
+#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b
+
#define DP_I2C_SPEED_CAP 0x00c /* DPI */
# define DP_I2C_SPEED_1K 0x01
# define DP_I2C_SPEED_5K 0x02
@@ -101,8 +112,19 @@
# define DP_I2C_SPEED_1M 0x20
#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0)
+# define DP_FRAMING_CHANGE_CAP (1 << 1)
+# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
+
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+#define DP_ADAPTER_CAP 0x00f /* 1.2 */
+# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
+# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1)
+
+#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */
+# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */
+
/* Multiple stream transport */
#define DP_FAUX_CAP 0x020 /* 1.2 */
# define DP_FAUX_CAP_1 (1 << 0)
@@ -110,10 +132,56 @@
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
+#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
+
+/* AV_SYNC_DATA_BLOCK 1.2 */
+#define DP_AV_GRANULARITY 0x023
+# define DP_AG_FACTOR_MASK (0xf << 0)
+# define DP_AG_FACTOR_3MS (0 << 0)
+# define DP_AG_FACTOR_2MS (1 << 0)
+# define DP_AG_FACTOR_1MS (2 << 0)
+# define DP_AG_FACTOR_500US (3 << 0)
+# define DP_AG_FACTOR_200US (4 << 0)
+# define DP_AG_FACTOR_100US (5 << 0)
+# define DP_AG_FACTOR_10US (6 << 0)
+# define DP_AG_FACTOR_1US (7 << 0)
+# define DP_VG_FACTOR_MASK (0xf << 4)
+# define DP_VG_FACTOR_3MS (0 << 4)
+# define DP_VG_FACTOR_2MS (1 << 4)
+# define DP_VG_FACTOR_1MS (2 << 4)
+# define DP_VG_FACTOR_500US (3 << 4)
+# define DP_VG_FACTOR_200US (4 << 4)
+# define DP_VG_FACTOR_100US (5 << 4)
+
+#define DP_AUD_DEC_LAT0 0x024
+#define DP_AUD_DEC_LAT1 0x025
+
+#define DP_AUD_PP_LAT0 0x026
+#define DP_AUD_PP_LAT1 0x027
+
+#define DP_VID_INTER_LAT 0x028
+
+#define DP_VID_PROG_LAT 0x029
+
+#define DP_REP_LAT 0x02a
+
+#define DP_AUD_DEL_INS0 0x02b
+#define DP_AUD_DEL_INS1 0x02c
+#define DP_AUD_DEL_INS2 0x02d
+/* End of AV_SYNC_DATA_BLOCK */
+
+#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
+# define DP_ALPM_CAP (1 << 0)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
+
#define DP_GUID 0x030 /* 1.2 */
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
+# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
+
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
# define DP_PSR_SETUP_TIME_330 (0 << 1)
@@ -153,6 +221,7 @@
/* link configuration */
#define DP_LINK_BW_SET 0x100
+# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
@@ -168,11 +237,12 @@
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_MASK 0x3
-# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
-# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
-# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
-# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
-# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
+# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2)
# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
@@ -215,17 +285,63 @@
/* bitmask as for DP_I2C_SPEED_CAP */
#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0)
+# define DP_FRAMING_CHANGE_ENABLE (1 << 1)
+# define DP_PANEL_SELF_TEST_ENABLE (1 << 7)
+
+#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */
+#define DP_LINK_QUAL_LANE1_SET 0x10c
+#define DP_LINK_QUAL_LANE2_SET 0x10d
+#define DP_LINK_QUAL_LANE3_SET 0x10e
+# define DP_LINK_QUAL_PATTERN_DISABLE 0
+# define DP_LINK_QUAL_PATTERN_D10_2 1
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
+# define DP_LINK_QUAL_PATTERN_PRBS7 3
+# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
+# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
+# define DP_LINK_QUAL_PATTERN_MASK 7
+
+#define DP_TRAINING_LANE0_1_SET2 0x10f
+#define DP_TRAINING_LANE2_3_SET2 0x110
+# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0)
+# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2)
+# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4)
+# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6)
#define DP_MSTM_CTRL 0x111 /* 1.2 */
# define DP_MST_EN (1 << 0)
# define DP_UP_REQ_EN (1 << 1)
# define DP_UPSTREAM_IS_SRC (1 << 2)
+#define DP_AUDIO_DELAY0 0x112 /* 1.2 */
+#define DP_AUDIO_DELAY1 0x113
+#define DP_AUDIO_DELAY2 0x114
+
+#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */
+# define DP_LINK_RATE_SET_SHIFT 0
+# define DP_LINK_RATE_SET_MASK (7 << 0)
+
+#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
+# define DP_ALPM_ENABLE (1 << 0)
+# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
+# define DP_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
+# define DP_PWR_NOT_NEEDED (1 << 0)
+
+#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
+
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
# define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3)
+# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
+# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
@@ -332,6 +448,49 @@
# define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
+#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
+# define DP_EDP_11 0x00
+# define DP_EDP_12 0x01
+# define DP_EDP_13 0x02
+# define DP_EDP_14 0x03
+
+#define DP_EDP_GENERAL_CAP_1 0x701
+
+#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
+
+#define DP_EDP_GENERAL_CAP_2 0x703
+
+#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
+
+#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
+
+#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
+
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
+
+#define DP_EDP_PWMGEN_BIT_COUNT 0x724
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726
+
+#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727
+
+#define DP_EDP_BACKLIGHT_FREQ_SET 0x728
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f
+
+#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
+#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
+
+#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
+#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
+
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
@@ -350,6 +509,7 @@
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */
#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
# define DP_PSR_CAPS_CHANGE (1 << 0)
@@ -363,6 +523,9 @@
# define DP_PSR_SINK_INTERNAL_ERROR 7
# define DP_PSR_SINK_STATE_MASK 0x07
+#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
+# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
@@ -519,6 +682,9 @@ struct drm_dp_aux_msg {
* transactions. The drm_dp_aux_register_i2c_bus() function registers an
* I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
* should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
+ * The I2C adapter uses long transfers by default; if a partial response is
+ * received, the adapter will drop down to the size given by the partial
+ * response for this transaction only.
*
* Note that the aux helper code assumes that the .transfer() function
* only modifies the reply field of the drm_dp_aux_msg structure. The
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 00c1da927245..a2507817be41 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -486,6 +486,8 @@ int drm_dp_calc_pbn_mode(int clock, int bpp);
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 87d85e81d3a7..799050198323 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -215,6 +215,8 @@ struct detailed_timing {
#define DRM_ELD_VER 0
# define DRM_ELD_VER_SHIFT 3
# define DRM_ELD_VER_MASK (0x1f << 3)
+# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
+# define DRM_ELD_VER_CANNED (0x1f << 3)
#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 21b944c456f6..0dfd94def593 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -44,6 +44,25 @@ struct drm_fb_helper_crtc {
int x, y;
};
+/**
+ * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
+ * @fb_width: fbdev width
+ * @fb_height: fbdev height
+ * @surface_width: scanout buffer width
+ * @surface_height: scanout buffer height
+ * @surface_bpp: scanout buffer bpp
+ * @surface_depth: scanout buffer depth
+ *
+ * Note that the scanout surface width/height may be larger than the fbdev
+ * width/height. In case of multiple displays, the scanout surface is sized
+ * according to the largest width/height (so it is large enough for all CRTCs
+ * to scanout). But the fbdev width/height is sized to the minimum width/
+ * height of all the displays. This ensures that fbcon fits on the smallest
+ * of the attached displays.
+ *
+ * So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height,
+ * rather than the surface size.
+ */
struct drm_fb_helper_surface_size {
u32 fb_width;
u32 fb_height;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 1e6ae1458f7a..7a592d7e398b 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -149,14 +149,16 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
- if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev;
+
+ if (!obj)
+ return;
- mutex_lock(&dev->struct_mutex);
- if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
- drm_gem_object_free(&obj->refcount);
+ dev = obj->dev;
+ if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
- }
+ else
+ might_lock(&dev->struct_mutex);
}
int drm_gem_handle_create(struct drm_file *file_priv,
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index d92f6dd1fb11..0616188c7801 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -92,7 +92,7 @@ enum drm_mode_status {
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
-#define CRTC_STEREO_DOUBLE_ONLY (CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
+#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 1fbcc96063a7..13ff44b28893 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -29,6 +29,7 @@
struct drm_connector;
struct drm_device;
struct drm_panel;
+struct display_timing;
/**
* struct drm_panel_funcs - perform operations on a given panel
@@ -38,6 +39,8 @@ struct drm_panel;
* @enable: enable panel (turn on back light, etc.)
* @get_modes: add modes to the connector that the panel is attached to and
* return the number of modes added
+ * @get_timings: copy display timings into the provided array and return
+ * the number of display timings available
*
* The .prepare() function is typically called before the display controller
* starts to transmit video data. Panel drivers can use this to turn the panel
@@ -68,6 +71,8 @@ struct drm_panel_funcs {
int (*prepare)(struct drm_panel *panel);
int (*enable)(struct drm_panel *panel);
int (*get_modes)(struct drm_panel *panel);
+ int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
+ struct display_timing *timings);
};
struct drm_panel {
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 2dd405c9be78..45c39a37f924 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -186,6 +186,7 @@
{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 31c11d36fae6..96e16283afb9 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -59,9 +59,11 @@ extern int drm_crtc_init(struct drm_device *dev,
*/
struct drm_plane_helper_funcs {
int (*prepare_fb)(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state);
void (*cleanup_fb)(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state);
int (*atomic_check)(struct drm_plane *plane,
struct drm_plane_state *state);
@@ -74,7 +76,7 @@ struct drm_plane_helper_funcs {
static inline void drm_plane_helper_add(struct drm_plane *plane,
const struct drm_plane_helper_funcs *funcs)
{
- plane->helper_private = (void *)funcs;
+ plane->helper_private = funcs;
}
extern int drm_plane_helper_check_update(struct drm_plane *plane,
@@ -98,10 +100,6 @@ extern int drm_primary_helper_update(struct drm_plane *plane,
extern int drm_primary_helper_disable(struct drm_plane *plane);
extern void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
-extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
- const uint32_t *formats,
- int num_formats);
-
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index d016dc57f007..613372375ada 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -208,40 +208,41 @@
#define INTEL_VLV_D_IDS(info) \
INTEL_VGA_DEVICE(0x0155, info)
-#define _INTEL_BDW_M(gt, id, info) \
- INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
-#define _INTEL_BDW_D(gt, id, info) \
- INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
-
-#define _INTEL_BDW_M_IDS(gt, info) \
- _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
- _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
- _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
- _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
-
-#define _INTEL_BDW_D_IDS(gt, info) \
- _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
- _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
-
-#define INTEL_BDW_GT12M_IDS(info) \
- _INTEL_BDW_M_IDS(1, info), \
- _INTEL_BDW_M_IDS(2, info)
+#define INTEL_BDW_GT12M_IDS(info) \
+ INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
+ INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
+ INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
+ INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
#define INTEL_BDW_GT12D_IDS(info) \
- _INTEL_BDW_D_IDS(1, info), \
- _INTEL_BDW_D_IDS(2, info)
+ INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
+ INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
+ INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
+ INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
#define INTEL_BDW_GT3M_IDS(info) \
- _INTEL_BDW_M_IDS(3, info)
+ INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x162E, info) /* ULX */
#define INTEL_BDW_GT3D_IDS(info) \
- _INTEL_BDW_D_IDS(3, info)
+ INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
#define INTEL_BDW_RSVDM_IDS(info) \
- _INTEL_BDW_M_IDS(4, info)
+ INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x163E, info) /* ULX */
#define INTEL_BDW_RSVDD_IDS(info) \
- _INTEL_BDW_D_IDS(4, info)
+ INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_M_IDS(info) \
INTEL_BDW_GT12M_IDS(info), \
@@ -259,21 +260,31 @@
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
-#define INTEL_SKL_IDS(info) \
- INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+#define INTEL_SKL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
- INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
+ INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
+
+#define INTEL_SKL_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
- INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
- INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
+#define INTEL_SKL_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
+
+#define INTEL_SKL_IDS(info) \
+ INTEL_SKL_GT1_IDS(info), \
+ INTEL_SKL_GT2_IDS(info), \
+ INTEL_SKL_GT3_IDS(info)
+
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index 961b9c130ea9..aab088d30199 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -282,4 +282,65 @@
*/
#define NR_CLKS_DMC 21
+/*
+ * CMU ISP
+ */
+
+/* Dividers */
+
+#define CLK_DIV_ISP1 1
+#define CLK_DIV_ISP0 2
+#define CLK_DIV_MCUISP1 3
+#define CLK_DIV_MCUISP0 4
+#define CLK_DIV_MPWM 5
+
+/* Gates */
+
+#define CLK_UART_ISP 8
+#define CLK_WDT_ISP 9
+#define CLK_PWM_ISP 10
+#define CLK_I2C1_ISP 11
+#define CLK_I2C0_ISP 12
+#define CLK_MPWM_ISP 13
+#define CLK_MCUCTL_ISP 14
+#define CLK_PPMUISPX 15
+#define CLK_PPMUISPMX 16
+#define CLK_QE_LITE1 17
+#define CLK_QE_LITE0 18
+#define CLK_QE_FD 19
+#define CLK_QE_DRC 20
+#define CLK_QE_ISP 21
+#define CLK_CSIS1 22
+#define CLK_SMMU_LITE1 23
+#define CLK_SMMU_LITE0 24
+#define CLK_SMMU_FD 25
+#define CLK_SMMU_DRC 26
+#define CLK_SMMU_ISP 27
+#define CLK_GICISP 28
+#define CLK_CSIS0 29
+#define CLK_MCUISP 30
+#define CLK_LITE1 31
+#define CLK_LITE0 32
+#define CLK_FD 33
+#define CLK_DRC 34
+#define CLK_ISP 35
+#define CLK_QE_ISPCX 36
+#define CLK_QE_SCALERP 37
+#define CLK_QE_SCALERC 38
+#define CLK_SMMU_SCALERP 39
+#define CLK_SMMU_SCALERC 40
+#define CLK_SCALERP 41
+#define CLK_SCALERC 42
+#define CLK_SPI1_ISP 43
+#define CLK_SPI0_ISP 44
+#define CLK_SMMU_ISPCX 45
+#define CLK_ASYNCAXIM 46
+#define CLK_SCLK_MPWM_ISP 47
+
+/*
+ * Total number of clocks of CMU_ISP.
+ * NOTE: Must be equal to last clock ID increased by one.
+ */
+#define NR_CLKS_ISP 48
+
#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
new file mode 100644
index 000000000000..5bd80d5ecd0f
--- /dev/null
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -0,0 +1,1403 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS5433_H
+#define _DT_BINDINGS_CLOCK_EXYNOS5433_H
+
+/* CMU_TOP */
+#define CLK_FOUT_ISP_PLL 1
+#define CLK_FOUT_AUD_PLL 2
+
+#define CLK_MOUT_AUD_PLL 10
+#define CLK_MOUT_ISP_PLL 11
+#define CLK_MOUT_AUD_PLL_USER_T 12
+#define CLK_MOUT_MPHY_PLL_USER 13
+#define CLK_MOUT_MFC_PLL_USER 14
+#define CLK_MOUT_BUS_PLL_USER 15
+#define CLK_MOUT_ACLK_HEVC_400 16
+#define CLK_MOUT_ACLK_CAM1_333 17
+#define CLK_MOUT_ACLK_CAM1_552_B 18
+#define CLK_MOUT_ACLK_CAM1_552_A 19
+#define CLK_MOUT_ACLK_ISP_DIS_400 20
+#define CLK_MOUT_ACLK_ISP_400 21
+#define CLK_MOUT_ACLK_BUS0_400 22
+#define CLK_MOUT_ACLK_MSCL_400_B 23
+#define CLK_MOUT_ACLK_MSCL_400_A 24
+#define CLK_MOUT_ACLK_GSCL_333 25
+#define CLK_MOUT_ACLK_G2D_400_B 26
+#define CLK_MOUT_ACLK_G2D_400_A 27
+#define CLK_MOUT_SCLK_JPEG_C 28
+#define CLK_MOUT_SCLK_JPEG_B 29
+#define CLK_MOUT_SCLK_JPEG_A 30
+#define CLK_MOUT_SCLK_MMC2_B 31
+#define CLK_MOUT_SCLK_MMC2_A 32
+#define CLK_MOUT_SCLK_MMC1_B 33
+#define CLK_MOUT_SCLK_MMC1_A 34
+#define CLK_MOUT_SCLK_MMC0_D 35
+#define CLK_MOUT_SCLK_MMC0_C 36
+#define CLK_MOUT_SCLK_MMC0_B 37
+#define CLK_MOUT_SCLK_MMC0_A 38
+#define CLK_MOUT_SCLK_SPI4 39
+#define CLK_MOUT_SCLK_SPI3 40
+#define CLK_MOUT_SCLK_UART2 41
+#define CLK_MOUT_SCLK_UART1 42
+#define CLK_MOUT_SCLK_UART0 43
+#define CLK_MOUT_SCLK_SPI2 44
+#define CLK_MOUT_SCLK_SPI1 45
+#define CLK_MOUT_SCLK_SPI0 46
+#define CLK_MOUT_ACLK_MFC_400_C 47
+#define CLK_MOUT_ACLK_MFC_400_B 48
+#define CLK_MOUT_ACLK_MFC_400_A 49
+#define CLK_MOUT_SCLK_ISP_SENSOR2 50
+#define CLK_MOUT_SCLK_ISP_SENSOR1 51
+#define CLK_MOUT_SCLK_ISP_SENSOR0 52
+#define CLK_MOUT_SCLK_ISP_UART 53
+#define CLK_MOUT_SCLK_ISP_SPI1 54
+#define CLK_MOUT_SCLK_ISP_SPI0 55
+#define CLK_MOUT_SCLK_PCIE_100 56
+#define CLK_MOUT_SCLK_UFSUNIPRO 57
+#define CLK_MOUT_SCLK_USBHOST30 58
+#define CLK_MOUT_SCLK_USBDRD30 59
+#define CLK_MOUT_SCLK_SLIMBUS 60
+#define CLK_MOUT_SCLK_SPDIF 61
+#define CLK_MOUT_SCLK_AUDIO1 62
+#define CLK_MOUT_SCLK_AUDIO0 63
+#define CLK_MOUT_SCLK_HDMI_SPDIF 64
+
+#define CLK_DIV_ACLK_FSYS_200 100
+#define CLK_DIV_ACLK_IMEM_SSSX_266 101
+#define CLK_DIV_ACLK_IMEM_200 102
+#define CLK_DIV_ACLK_IMEM_266 103
+#define CLK_DIV_ACLK_PERIC_66_B 104
+#define CLK_DIV_ACLK_PERIC_66_A 105
+#define CLK_DIV_ACLK_PERIS_66_B 106
+#define CLK_DIV_ACLK_PERIS_66_A 107
+#define CLK_DIV_SCLK_MMC1_B 108
+#define CLK_DIV_SCLK_MMC1_A 109
+#define CLK_DIV_SCLK_MMC0_B 110
+#define CLK_DIV_SCLK_MMC0_A 111
+#define CLK_DIV_SCLK_MMC2_B 112
+#define CLK_DIV_SCLK_MMC2_A 113
+#define CLK_DIV_SCLK_SPI1_B 114
+#define CLK_DIV_SCLK_SPI1_A 115
+#define CLK_DIV_SCLK_SPI0_B 116
+#define CLK_DIV_SCLK_SPI0_A 117
+#define CLK_DIV_SCLK_SPI2_B 118
+#define CLK_DIV_SCLK_SPI2_A 119
+#define CLK_DIV_SCLK_UART2 120
+#define CLK_DIV_SCLK_UART1 121
+#define CLK_DIV_SCLK_UART0 122
+#define CLK_DIV_SCLK_SPI4_B 123
+#define CLK_DIV_SCLK_SPI4_A 124
+#define CLK_DIV_SCLK_SPI3_B 125
+#define CLK_DIV_SCLK_SPI3_A 126
+#define CLK_DIV_SCLK_I2S1 127
+#define CLK_DIV_SCLK_PCM1 128
+#define CLK_DIV_SCLK_AUDIO1 129
+#define CLK_DIV_SCLK_AUDIO0 130
+#define CLK_DIV_ACLK_GSCL_111 131
+#define CLK_DIV_ACLK_GSCL_333 132
+#define CLK_DIV_ACLK_HEVC_400 133
+#define CLK_DIV_ACLK_MFC_400 134
+#define CLK_DIV_ACLK_G2D_266 135
+#define CLK_DIV_ACLK_G2D_400 136
+#define CLK_DIV_ACLK_G3D_400 137
+#define CLK_DIV_ACLK_BUS0_400 138
+#define CLK_DIV_ACLK_BUS1_400 139
+#define CLK_DIV_SCLK_PCIE_100 140
+#define CLK_DIV_SCLK_USBHOST30 141
+#define CLK_DIV_SCLK_UFSUNIPRO 142
+#define CLK_DIV_SCLK_USBDRD30 143
+#define CLK_DIV_SCLK_JPEG 144
+#define CLK_DIV_ACLK_MSCL_400 145
+#define CLK_DIV_ACLK_ISP_DIS_400 146
+#define CLK_DIV_ACLK_ISP_400 147
+#define CLK_DIV_ACLK_CAM0_333 148
+#define CLK_DIV_ACLK_CAM0_400 149
+#define CLK_DIV_ACLK_CAM0_552 150
+#define CLK_DIV_ACLK_CAM1_333 151
+#define CLK_DIV_ACLK_CAM1_400 152
+#define CLK_DIV_ACLK_CAM1_552 153
+#define CLK_DIV_SCLK_ISP_UART 154
+#define CLK_DIV_SCLK_ISP_SPI1_B 155
+#define CLK_DIV_SCLK_ISP_SPI1_A 156
+#define CLK_DIV_SCLK_ISP_SPI0_B 157
+#define CLK_DIV_SCLK_ISP_SPI0_A 158
+#define CLK_DIV_SCLK_ISP_SENSOR2_B 159
+#define CLK_DIV_SCLK_ISP_SENSOR2_A 160
+#define CLK_DIV_SCLK_ISP_SENSOR1_B 161
+#define CLK_DIV_SCLK_ISP_SENSOR1_A 162
+#define CLK_DIV_SCLK_ISP_SENSOR0_B 163
+#define CLK_DIV_SCLK_ISP_SENSOR0_A 164
+
+#define CLK_ACLK_PERIC_66 200
+#define CLK_ACLK_PERIS_66 201
+#define CLK_ACLK_FSYS_200 202
+#define CLK_SCLK_MMC2_FSYS 203
+#define CLK_SCLK_MMC1_FSYS 204
+#define CLK_SCLK_MMC0_FSYS 205
+#define CLK_SCLK_SPI4_PERIC 206
+#define CLK_SCLK_SPI3_PERIC 207
+#define CLK_SCLK_UART2_PERIC 208
+#define CLK_SCLK_UART1_PERIC 209
+#define CLK_SCLK_UART0_PERIC 210
+#define CLK_SCLK_SPI2_PERIC 211
+#define CLK_SCLK_SPI1_PERIC 212
+#define CLK_SCLK_SPI0_PERIC 213
+#define CLK_SCLK_SPDIF_PERIC 214
+#define CLK_SCLK_I2S1_PERIC 215
+#define CLK_SCLK_PCM1_PERIC 216
+#define CLK_SCLK_SLIMBUS 217
+#define CLK_SCLK_AUDIO1 218
+#define CLK_SCLK_AUDIO0 219
+#define CLK_ACLK_G2D_266 220
+#define CLK_ACLK_G2D_400 221
+#define CLK_ACLK_G3D_400 222
+#define CLK_ACLK_IMEM_SSX_266 223
+#define CLK_ACLK_BUS0_400 224
+#define CLK_ACLK_BUS1_400 225
+#define CLK_ACLK_IMEM_200 226
+#define CLK_ACLK_IMEM_266 227
+#define CLK_SCLK_PCIE_100_FSYS 228
+#define CLK_SCLK_UFSUNIPRO_FSYS 229
+#define CLK_SCLK_USBHOST30_FSYS 230
+#define CLK_SCLK_USBDRD30_FSYS 231
+#define CLK_ACLK_GSCL_111 232
+#define CLK_ACLK_GSCL_333 233
+#define CLK_SCLK_JPEG_MSCL 234
+#define CLK_ACLK_MSCL_400 235
+#define CLK_ACLK_MFC_400 236
+#define CLK_ACLK_HEVC_400 237
+#define CLK_ACLK_ISP_DIS_400 238
+#define CLK_ACLK_ISP_400 239
+#define CLK_ACLK_CAM0_333 240
+#define CLK_ACLK_CAM0_400 241
+#define CLK_ACLK_CAM0_552 242
+#define CLK_ACLK_CAM1_333 243
+#define CLK_ACLK_CAM1_400 244
+#define CLK_ACLK_CAM1_552 245
+#define CLK_SCLK_ISP_SENSOR2 246
+#define CLK_SCLK_ISP_SENSOR1 247
+#define CLK_SCLK_ISP_SENSOR0 248
+#define CLK_SCLK_ISP_MCTADC_CAM1 249
+#define CLK_SCLK_ISP_UART_CAM1 250
+#define CLK_SCLK_ISP_SPI1_CAM1 251
+#define CLK_SCLK_ISP_SPI0_CAM1 252
+#define CLK_SCLK_HDMI_SPDIF_DISP 253
+
+#define TOP_NR_CLK 254
+
+/* CMU_CPIF */
+#define CLK_FOUT_MPHY_PLL 1
+
+#define CLK_MOUT_MPHY_PLL 2
+
+#define CLK_DIV_SCLK_MPHY 10
+
+#define CLK_SCLK_MPHY_PLL 11
+#define CLK_SCLK_UFS_MPHY 11
+
+#define CPIF_NR_CLK 12
+
+/* CMU_MIF */
+#define CLK_FOUT_MEM0_PLL 1
+#define CLK_FOUT_MEM1_PLL 2
+#define CLK_FOUT_BUS_PLL 3
+#define CLK_FOUT_MFC_PLL 4
+#define CLK_DOUT_MFC_PLL 5
+#define CLK_DOUT_BUS_PLL 6
+#define CLK_DOUT_MEM1_PLL 7
+#define CLK_DOUT_MEM0_PLL 8
+
+#define CLK_MOUT_MFC_PLL_DIV2 10
+#define CLK_MOUT_BUS_PLL_DIV2 11
+#define CLK_MOUT_MEM1_PLL_DIV2 12
+#define CLK_MOUT_MEM0_PLL_DIV2 13
+#define CLK_MOUT_MFC_PLL 14
+#define CLK_MOUT_BUS_PLL 15
+#define CLK_MOUT_MEM1_PLL 16
+#define CLK_MOUT_MEM0_PLL 17
+#define CLK_MOUT_CLK2X_PHY_C 18
+#define CLK_MOUT_CLK2X_PHY_B 19
+#define CLK_MOUT_CLK2X_PHY_A 20
+#define CLK_MOUT_CLKM_PHY_C 21
+#define CLK_MOUT_CLKM_PHY_B 22
+#define CLK_MOUT_CLKM_PHY_A 23
+#define CLK_MOUT_ACLK_MIFNM_200 24
+#define CLK_MOUT_ACLK_MIFNM_400 25
+#define CLK_MOUT_ACLK_DISP_333_B 26
+#define CLK_MOUT_ACLK_DISP_333_A 27
+#define CLK_MOUT_SCLK_DECON_VCLK_C 28
+#define CLK_MOUT_SCLK_DECON_VCLK_B 29
+#define CLK_MOUT_SCLK_DECON_VCLK_A 30
+#define CLK_MOUT_SCLK_DECON_ECLK_C 31
+#define CLK_MOUT_SCLK_DECON_ECLK_B 32
+#define CLK_MOUT_SCLK_DECON_ECLK_A 33
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_C 34
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_B 35
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_A 36
+#define CLK_MOUT_SCLK_DSD_C 37
+#define CLK_MOUT_SCLK_DSD_B 38
+#define CLK_MOUT_SCLK_DSD_A 39
+#define CLK_MOUT_SCLK_DSIM0_C 40
+#define CLK_MOUT_SCLK_DSIM0_B 41
+#define CLK_MOUT_SCLK_DSIM0_A 42
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_C 46
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_B 47
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_A 48
+#define CLK_MOUT_SCLK_DSIM1_C 49
+#define CLK_MOUT_SCLK_DSIM1_B 50
+#define CLK_MOUT_SCLK_DSIM1_A 51
+
+#define CLK_DIV_SCLK_HPM_MIF 55
+#define CLK_DIV_ACLK_DREX1 56
+#define CLK_DIV_ACLK_DREX0 57
+#define CLK_DIV_CLK2XPHY 58
+#define CLK_DIV_ACLK_MIF_266 59
+#define CLK_DIV_ACLK_MIFND_133 60
+#define CLK_DIV_ACLK_MIF_133 61
+#define CLK_DIV_ACLK_MIFNM_200 62
+#define CLK_DIV_ACLK_MIF_200 63
+#define CLK_DIV_ACLK_MIF_400 64
+#define CLK_DIV_ACLK_BUS2_400 65
+#define CLK_DIV_ACLK_DISP_333 66
+#define CLK_DIV_ACLK_CPIF_200 67
+#define CLK_DIV_SCLK_DSIM1 68
+#define CLK_DIV_SCLK_DECON_TV_VCLK 69
+#define CLK_DIV_SCLK_DSIM0 70
+#define CLK_DIV_SCLK_DSD 71
+#define CLK_DIV_SCLK_DECON_TV_ECLK 72
+#define CLK_DIV_SCLK_DECON_VCLK 73
+#define CLK_DIV_SCLK_DECON_ECLK 74
+#define CLK_DIV_MIF_PRE 75
+
+#define CLK_CLK2X_PHY1 80
+#define CLK_CLK2X_PHY0 81
+#define CLK_CLKM_PHY1 82
+#define CLK_CLKM_PHY0 83
+#define CLK_RCLK_DREX1 84
+#define CLK_RCLK_DREX0 85
+#define CLK_ACLK_DREX1_TZ 86
+#define CLK_ACLK_DREX0_TZ 87
+#define CLK_ACLK_DREX1_PEREV 88
+#define CLK_ACLK_DREX0_PEREV 89
+#define CLK_ACLK_DREX1_MEMIF 90
+#define CLK_ACLK_DREX0_MEMIF 91
+#define CLK_ACLK_DREX1_SCH 92
+#define CLK_ACLK_DREX0_SCH 93
+#define CLK_ACLK_DREX1_BUSIF 94
+#define CLK_ACLK_DREX0_BUSIF 95
+#define CLK_ACLK_DREX1_BUSIF_RD 96
+#define CLK_ACLK_DREX0_BUSIF_RD 97
+#define CLK_ACLK_DREX1 98
+#define CLK_ACLK_DREX0 99
+#define CLK_ACLK_ASYNCAXIM_ATLAS_CCIX 100
+#define CLK_ACLK_ASYNCAXIS_ATLAS_MIF 101
+#define CLK_ACLK_ASYNCAXIM_ATLAS_MIF 102
+#define CLK_ACLK_ASYNCAXIS_MIF_IMEM 103
+#define CLK_ACLK_ASYNCAXIS_NOC_P_CCI 104
+#define CLK_ACLK_ASYNCAXIM_NOC_P_CCI 105
+#define CLK_ACLK_ASYNCAXIS_CP1 106
+#define CLK_ACLK_ASYNCAXIM_CP1 107
+#define CLK_ACLK_ASYNCAXIS_CP0 108
+#define CLK_ACLK_ASYNCAXIM_CP0 109
+#define CLK_ACLK_ASYNCAXIS_DREX1_3 110
+#define CLK_ACLK_ASYNCAXIM_DREX1_3 111
+#define CLK_ACLK_ASYNCAXIS_DREX1_1 112
+#define CLK_ACLK_ASYNCAXIM_DREX1_1 113
+#define CLK_ACLK_ASYNCAXIS_DREX1_0 114
+#define CLK_ACLK_ASYNCAXIM_DREX1_0 115
+#define CLK_ACLK_ASYNCAXIS_DREX0_3 116
+#define CLK_ACLK_ASYNCAXIM_DREX0_3 117
+#define CLK_ACLK_ASYNCAXIS_DREX0_1 118
+#define CLK_ACLK_ASYNCAXIM_DREX0_1 119
+#define CLK_ACLK_ASYNCAXIS_DREX0_0 120
+#define CLK_ACLK_ASYNCAXIM_DREX0_0 121
+#define CLK_ACLK_AHB2APB_MIF2P 122
+#define CLK_ACLK_AHB2APB_MIF1P 123
+#define CLK_ACLK_AHB2APB_MIF0P 124
+#define CLK_ACLK_IXIU_CCI 125
+#define CLK_ACLK_XIU_MIFSFRX 126
+#define CLK_ACLK_MIFNP_133 127
+#define CLK_ACLK_MIFNM_200 128
+#define CLK_ACLK_MIFND_133 129
+#define CLK_ACLK_MIFND_400 130
+#define CLK_ACLK_CCI 131
+#define CLK_ACLK_MIFND_266 132
+#define CLK_ACLK_PPMU_DREX1S3 133
+#define CLK_ACLK_PPMU_DREX1S1 134
+#define CLK_ACLK_PPMU_DREX1S0 135
+#define CLK_ACLK_PPMU_DREX0S3 136
+#define CLK_ACLK_PPMU_DREX0S1 137
+#define CLK_ACLK_PPMU_DREX0S0 138
+#define CLK_ACLK_BTS_APOLLO 139
+#define CLK_ACLK_BTS_ATLAS 140
+#define CLK_ACLK_ACE_SEL_APOLL 141
+#define CLK_ACLK_ACE_SEL_ATLAS 142
+#define CLK_ACLK_AXIDS_CCI_MIFSFRX 143
+#define CLK_ACLK_AXIUS_ATLAS_CCI 144
+#define CLK_ACLK_AXISYNCDNS_CCI 145
+#define CLK_ACLK_AXISYNCDN_CCI 146
+#define CLK_ACLK_AXISYNCDN_NOC_D 147
+#define CLK_ACLK_ASYNCACEM_APOLLO_CCI 148
+#define CLK_ACLK_ASYNCACEM_ATLAS_CCI 149
+#define CLK_ACLK_ASYNCAPBS_MIF_CSSYS 150
+#define CLK_ACLK_BUS2_400 151
+#define CLK_ACLK_DISP_333 152
+#define CLK_ACLK_CPIF_200 153
+#define CLK_PCLK_PPMU_DREX1S3 154
+#define CLK_PCLK_PPMU_DREX1S1 155
+#define CLK_PCLK_PPMU_DREX1S0 156
+#define CLK_PCLK_PPMU_DREX0S3 157
+#define CLK_PCLK_PPMU_DREX0S1 158
+#define CLK_PCLK_PPMU_DREX0S0 159
+#define CLK_PCLK_BTS_APOLLO 160
+#define CLK_PCLK_BTS_ATLAS 161
+#define CLK_PCLK_ASYNCAXI_NOC_P_CCI 162
+#define CLK_PCLK_ASYNCAXI_CP1 163
+#define CLK_PCLK_ASYNCAXI_CP0 164
+#define CLK_PCLK_ASYNCAXI_DREX1_3 165
+#define CLK_PCLK_ASYNCAXI_DREX1_1 166
+#define CLK_PCLK_ASYNCAXI_DREX1_0 167
+#define CLK_PCLK_ASYNCAXI_DREX0_3 168
+#define CLK_PCLK_ASYNCAXI_DREX0_1 169
+#define CLK_PCLK_ASYNCAXI_DREX0_0 170
+#define CLK_PCLK_MIFSRVND_133 171
+#define CLK_PCLK_PMU_MIF 172
+#define CLK_PCLK_SYSREG_MIF 173
+#define CLK_PCLK_GPIO_ALIVE 174
+#define CLK_PCLK_ABB 175
+#define CLK_PCLK_PMU_APBIF 176
+#define CLK_PCLK_DDR_PHY1 177
+#define CLK_PCLK_DREX1 178
+#define CLK_PCLK_DDR_PHY0 179
+#define CLK_PCLK_DREX0 180
+#define CLK_PCLK_DREX0_TZ 181
+#define CLK_PCLK_DREX1_TZ 182
+#define CLK_PCLK_MONOTONIC_CNT 183
+#define CLK_PCLK_RTC 184
+#define CLK_SCLK_DSIM1_DISP 185
+#define CLK_SCLK_DECON_TV_VCLK_DISP 186
+#define CLK_SCLK_FREQ_DET_BUS_PLL 187
+#define CLK_SCLK_FREQ_DET_MFC_PLL 188
+#define CLK_SCLK_FREQ_DET_MEM0_PLL 189
+#define CLK_SCLK_FREQ_DET_MEM1_PLL 190
+#define CLK_SCLK_DSIM0_DISP 191
+#define CLK_SCLK_DSD_DISP 192
+#define CLK_SCLK_DECON_TV_ECLK_DISP 193
+#define CLK_SCLK_DECON_VCLK_DISP 194
+#define CLK_SCLK_DECON_ECLK_DISP 195
+#define CLK_SCLK_HPM_MIF 196
+#define CLK_SCLK_MFC_PLL 197
+#define CLK_SCLK_BUS_PLL 198
+#define CLK_SCLK_BUS_PLL_APOLLO 199
+#define CLK_SCLK_BUS_PLL_ATLAS 200
+
+#define MIF_NR_CLK 201
+
+/* CMU_PERIC */
+#define CLK_PCLK_SPI2 1
+#define CLK_PCLK_SPI1 2
+#define CLK_PCLK_SPI0 3
+#define CLK_PCLK_UART2 4
+#define CLK_PCLK_UART1 5
+#define CLK_PCLK_UART0 6
+#define CLK_PCLK_HSI2C3 7
+#define CLK_PCLK_HSI2C2 8
+#define CLK_PCLK_HSI2C1 9
+#define CLK_PCLK_HSI2C0 10
+#define CLK_PCLK_I2C7 11
+#define CLK_PCLK_I2C6 12
+#define CLK_PCLK_I2C5 13
+#define CLK_PCLK_I2C4 14
+#define CLK_PCLK_I2C3 15
+#define CLK_PCLK_I2C2 16
+#define CLK_PCLK_I2C1 17
+#define CLK_PCLK_I2C0 18
+#define CLK_PCLK_SPI4 19
+#define CLK_PCLK_SPI3 20
+#define CLK_PCLK_HSI2C11 21
+#define CLK_PCLK_HSI2C10 22
+#define CLK_PCLK_HSI2C9 23
+#define CLK_PCLK_HSI2C8 24
+#define CLK_PCLK_HSI2C7 25
+#define CLK_PCLK_HSI2C6 26
+#define CLK_PCLK_HSI2C5 27
+#define CLK_PCLK_HSI2C4 28
+#define CLK_SCLK_SPI4 29
+#define CLK_SCLK_SPI3 30
+#define CLK_SCLK_SPI2 31
+#define CLK_SCLK_SPI1 32
+#define CLK_SCLK_SPI0 33
+#define CLK_SCLK_UART2 34
+#define CLK_SCLK_UART1 35
+#define CLK_SCLK_UART0 36
+#define CLK_ACLK_AHB2APB_PERIC2P 37
+#define CLK_ACLK_AHB2APB_PERIC1P 38
+#define CLK_ACLK_AHB2APB_PERIC0P 39
+#define CLK_ACLK_PERICNP_66 40
+#define CLK_PCLK_SCI 41
+#define CLK_PCLK_GPIO_FINGER 42
+#define CLK_PCLK_GPIO_ESE 43
+#define CLK_PCLK_PWM 44
+#define CLK_PCLK_SPDIF 45
+#define CLK_PCLK_PCM1 46
+#define CLK_PCLK_I2S1 47
+#define CLK_PCLK_ADCIF 48
+#define CLK_PCLK_GPIO_TOUCH 49
+#define CLK_PCLK_GPIO_NFC 50
+#define CLK_PCLK_GPIO_PERIC 51
+#define CLK_PCLK_PMU_PERIC 52
+#define CLK_PCLK_SYSREG_PERIC 53
+#define CLK_SCLK_IOCLK_SPI4 54
+#define CLK_SCLK_IOCLK_SPI3 55
+#define CLK_SCLK_SCI 56
+#define CLK_SCLK_SC_IN 57
+#define CLK_SCLK_PWM 58
+#define CLK_SCLK_IOCLK_SPI2 59
+#define CLK_SCLK_IOCLK_SPI1 60
+#define CLK_SCLK_IOCLK_SPI0 61
+#define CLK_SCLK_IOCLK_I2S1_BCLK 62
+#define CLK_SCLK_SPDIF 63
+#define CLK_SCLK_PCM1 64
+#define CLK_SCLK_I2S1 65
+
+#define CLK_DIV_SCLK_SCI 70
+#define CLK_DIV_SCLK_SC_IN 71
+
+#define PERIC_NR_CLK 72
+
+/* CMU_PERIS */
+#define CLK_PCLK_HPM_APBIF 1
+#define CLK_PCLK_TMU1_APBIF 2
+#define CLK_PCLK_TMU0_APBIF 3
+#define CLK_PCLK_PMU_PERIS 4
+#define CLK_PCLK_SYSREG_PERIS 5
+#define CLK_PCLK_CMU_TOP_APBIF 6
+#define CLK_PCLK_WDT_APOLLO 7
+#define CLK_PCLK_WDT_ATLAS 8
+#define CLK_PCLK_MCT 9
+#define CLK_PCLK_HDMI_CEC 10
+#define CLK_ACLK_AHB2APB_PERIS1P 11
+#define CLK_ACLK_AHB2APB_PERIS0P 12
+#define CLK_ACLK_PERISNP_66 13
+#define CLK_PCLK_TZPC12 14
+#define CLK_PCLK_TZPC11 15
+#define CLK_PCLK_TZPC10 16
+#define CLK_PCLK_TZPC9 17
+#define CLK_PCLK_TZPC8 18
+#define CLK_PCLK_TZPC7 19
+#define CLK_PCLK_TZPC6 20
+#define CLK_PCLK_TZPC5 21
+#define CLK_PCLK_TZPC4 22
+#define CLK_PCLK_TZPC3 23
+#define CLK_PCLK_TZPC2 24
+#define CLK_PCLK_TZPC1 25
+#define CLK_PCLK_TZPC0 26
+#define CLK_PCLK_SECKEY_APBIF 27
+#define CLK_PCLK_CHIPID_APBIF 28
+#define CLK_PCLK_TOPRTC 29
+#define CLK_PCLK_CUSTOM_EFUSE_APBIF 30
+#define CLK_PCLK_ANTIRBK_CNT_APBIF 31
+#define CLK_PCLK_OTP_CON_APBIF 32
+#define CLK_SCLK_ASV_TB 33
+#define CLK_SCLK_TMU1 34
+#define CLK_SCLK_TMU0 35
+#define CLK_SCLK_SECKEY 36
+#define CLK_SCLK_CHIPID 37
+#define CLK_SCLK_TOPRTC 38
+#define CLK_SCLK_CUSTOM_EFUSE 39
+#define CLK_SCLK_ANTIRBK_CNT 40
+#define CLK_SCLK_OTP_CON 41
+
+#define PERIS_NR_CLK 42
+
+/* CMU_FSYS */
+#define CLK_MOUT_ACLK_FSYS_200_USER 1
+#define CLK_MOUT_SCLK_MMC2_USER 2
+#define CLK_MOUT_SCLK_MMC1_USER 3
+#define CLK_MOUT_SCLK_MMC0_USER 4
+#define CLK_MOUT_SCLK_UFS_MPHY_USER 5
+#define CLK_MOUT_SCLK_PCIE_100_USER 6
+#define CLK_MOUT_SCLK_UFSUNIPRO_USER 7
+#define CLK_MOUT_SCLK_USBHOST30_USER 8
+#define CLK_MOUT_SCLK_USBDRD30_USER 9
+#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_USER 10
+#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_USER 11
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_HSIC1_USER 12
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_CLK48MOHCI_USER 13
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHYCLOCK_USER 14
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHY_FREECLK_USER 15
+#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_USER 16
+#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_USER 17
+#define CLK_MOUT_PHYCLK_UFS_RX1_SYMBOL_USER 18
+#define CLK_MOUT_PHYCLK_UFS_RX0_SYMBOL_USER 19
+#define CLK_MOUT_PHYCLK_UFS_TX1_SYMBOL_USER 20
+#define CLK_MOUT_PHYCLK_UFS_TX0_SYMBOL_USER 21
+#define CLK_MOUT_PHYCLK_LLI_MPHY_TO_UFS_USER 22
+#define CLK_MOUT_SCLK_MPHY 23
+
+#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY 25
+#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_PHY 26
+#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_PHY 27
+#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_PHY 28
+#define CLK_PHYCLK_USBHOST20_PHY_FREECLK_PHY 29
+#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK_PHY 30
+#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI_PHY 31
+#define CLK_PHYCLK_USBHOST20_PHY_HSIC1_PHY 32
+#define CLK_PHYCLK_UFS_TX0_SYMBOL_PHY 33
+#define CLK_PHYCLK_UFS_RX0_SYMBOL_PHY 34
+#define CLK_PHYCLK_UFS_TX1_SYMBOL_PHY 35
+#define CLK_PHYCLK_UFS_RX1_SYMBOL_PHY 36
+#define CLK_PHYCLK_LLI_MPHY_TO_UFS_PHY 37
+
+#define CLK_ACLK_PCIE 50
+#define CLK_ACLK_PDMA1 51
+#define CLK_ACLK_TSI 52
+#define CLK_ACLK_MMC2 53
+#define CLK_ACLK_MMC1 54
+#define CLK_ACLK_MMC0 55
+#define CLK_ACLK_UFS 56
+#define CLK_ACLK_USBHOST20 57
+#define CLK_ACLK_USBHOST30 58
+#define CLK_ACLK_USBDRD30 59
+#define CLK_ACLK_PDMA0 60
+#define CLK_SCLK_MMC2 61
+#define CLK_SCLK_MMC1 62
+#define CLK_SCLK_MMC0 63
+#define CLK_PDMA1 64
+#define CLK_PDMA0 65
+#define CLK_ACLK_XIU_FSYSPX 66
+#define CLK_ACLK_AHB_USBLINKH1 67
+#define CLK_ACLK_SMMU_PDMA1 68
+#define CLK_ACLK_BTS_PCIE 69
+#define CLK_ACLK_AXIUS_PDMA1 70
+#define CLK_ACLK_SMMU_PDMA0 71
+#define CLK_ACLK_BTS_UFS 72
+#define CLK_ACLK_BTS_USBHOST30 73
+#define CLK_ACLK_BTS_USBDRD30 74
+#define CLK_ACLK_AXIUS_PDMA0 75
+#define CLK_ACLK_AXIUS_USBHS 76
+#define CLK_ACLK_AXIUS_FSYSSX 77
+#define CLK_ACLK_AHB2APB_FSYSP 78
+#define CLK_ACLK_AHB2AXI_USBHS 79
+#define CLK_ACLK_AHB_USBLINKH0 80
+#define CLK_ACLK_AHB_USBHS 81
+#define CLK_ACLK_AHB_FSYSH 82
+#define CLK_ACLK_XIU_FSYSX 83
+#define CLK_ACLK_XIU_FSYSSX 84
+#define CLK_ACLK_FSYSNP_200 85
+#define CLK_ACLK_FSYSND_200 86
+#define CLK_PCLK_PCIE_CTRL 87
+#define CLK_PCLK_SMMU_PDMA1 88
+#define CLK_PCLK_PCIE_PHY 89
+#define CLK_PCLK_BTS_PCIE 90
+#define CLK_PCLK_SMMU_PDMA0 91
+#define CLK_PCLK_BTS_UFS 92
+#define CLK_PCLK_BTS_USBHOST30 93
+#define CLK_PCLK_BTS_USBDRD30 94
+#define CLK_PCLK_GPIO_FSYS 95
+#define CLK_PCLK_PMU_FSYS 96
+#define CLK_PCLK_SYSREG_FSYS 97
+#define CLK_SCLK_PCIE_100 98
+#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK 99
+#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK 100
+#define CLK_PHYCLK_UFS_RX1_SYMBOL 101
+#define CLK_PHYCLK_UFS_RX0_SYMBOL 102
+#define CLK_PHYCLK_UFS_TX1_SYMBOL 103
+#define CLK_PHYCLK_UFS_TX0_SYMBOL 104
+#define CLK_PHYCLK_USBHOST20_PHY_HSIC1 105
+#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI 106
+#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK 107
+#define CLK_PHYCLK_USBHOST20_PHY_FREECLK 108
+#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 109
+#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK 110
+#define CLK_SCLK_MPHY 111
+#define CLK_SCLK_UFSUNIPRO 112
+#define CLK_SCLK_USBHOST30 113
+#define CLK_SCLK_USBDRD30 114
+
+#define FSYS_NR_CLK 115
+
+/* CMU_G2D */
+#define CLK_MUX_ACLK_G2D_266_USER 1
+#define CLK_MUX_ACLK_G2D_400_USER 2
+
+#define CLK_DIV_PCLK_G2D 3
+
+#define CLK_ACLK_SMMU_MDMA1 4
+#define CLK_ACLK_BTS_MDMA1 5
+#define CLK_ACLK_BTS_G2D 6
+#define CLK_ACLK_ALB_G2D 7
+#define CLK_ACLK_AXIUS_G2DX 8
+#define CLK_ACLK_ASYNCAXI_SYSX 9
+#define CLK_ACLK_AHB2APB_G2D1P 10
+#define CLK_ACLK_AHB2APB_G2D0P 11
+#define CLK_ACLK_XIU_G2DX 12
+#define CLK_ACLK_G2DNP_133 13
+#define CLK_ACLK_G2DND_400 14
+#define CLK_ACLK_MDMA1 15
+#define CLK_ACLK_G2D 16
+#define CLK_ACLK_SMMU_G2D 17
+#define CLK_PCLK_SMMU_MDMA1 18
+#define CLK_PCLK_BTS_MDMA1 19
+#define CLK_PCLK_BTS_G2D 20
+#define CLK_PCLK_ALB_G2D 21
+#define CLK_PCLK_ASYNCAXI_SYSX 22
+#define CLK_PCLK_PMU_G2D 23
+#define CLK_PCLK_SYSREG_G2D 24
+#define CLK_PCLK_G2D 25
+#define CLK_PCLK_SMMU_G2D 26
+
+#define G2D_NR_CLK 27
+
+/* CMU_DISP */
+#define CLK_FOUT_DISP_PLL 1
+
+#define CLK_MOUT_DISP_PLL 2
+#define CLK_MOUT_SCLK_DSIM1_USER 3
+#define CLK_MOUT_SCLK_DSIM0_USER 4
+#define CLK_MOUT_SCLK_DSD_USER 5
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_USER 6
+#define CLK_MOUT_SCLK_DECON_VCLK_USER 7
+#define CLK_MOUT_SCLK_DECON_ECLK_USER 8
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_USER 9
+#define CLK_MOUT_ACLK_DISP_333_USER 10
+#define CLK_MOUT_PHYCLK_MIPIDPHY1_BITCLKDIV8_USER 11
+#define CLK_MOUT_PHYCLK_MIPIDPHY1_RXCLKESC0_USER 12
+#define CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER 13
+#define CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER 14
+#define CLK_MOUT_PHYCLK_HDMIPHY_TMDS_CLKO_USER 15
+#define CLK_MOUT_PHYCLK_HDMIPHY_PIXEL_CLKO_USER 16
+#define CLK_MOUT_SCLK_DSIM0 17
+#define CLK_MOUT_SCLK_DECON_TV_ECLK 18
+#define CLK_MOUT_SCLK_DECON_VCLK 19
+#define CLK_MOUT_SCLK_DECON_ECLK 20
+#define CLK_MOUT_SCLK_DSIM1_B_DISP 21
+#define CLK_MOUT_SCLK_DSIM1_A_DISP 22
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_C_DISP 23
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_B_DISP 24
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_A_DISP 25
+
+#define CLK_DIV_SCLK_DSIM1_DISP 30
+#define CLK_DIV_SCLK_DECON_TV_VCLK_DISP 31
+#define CLK_DIV_SCLK_DSIM0_DISP 32
+#define CLK_DIV_SCLK_DECON_TV_ECLK_DISP 33
+#define CLK_DIV_SCLK_DECON_VCLK_DISP 34
+#define CLK_DIV_SCLK_DECON_ECLK_DISP 35
+#define CLK_DIV_PCLK_DISP 36
+
+#define CLK_ACLK_DECON_TV 40
+#define CLK_ACLK_DECON 41
+#define CLK_ACLK_SMMU_TV1X 42
+#define CLK_ACLK_SMMU_TV0X 43
+#define CLK_ACLK_SMMU_DECON1X 44
+#define CLK_ACLK_SMMU_DECON0X 45
+#define CLK_ACLK_BTS_DECON_TV_M3 46
+#define CLK_ACLK_BTS_DECON_TV_M2 47
+#define CLK_ACLK_BTS_DECON_TV_M1 48
+#define CLK_ACLK_BTS_DECON_TV_M0 49
+#define CLK_ACLK_BTS_DECON_NM4 50
+#define CLK_ACLK_BTS_DECON_NM3 51
+#define CLK_ACLK_BTS_DECON_NM2 52
+#define CLK_ACLK_BTS_DECON_NM1 53
+#define CLK_ACLK_BTS_DECON_NM0 54
+#define CLK_ACLK_AHB2APB_DISPSFR2P 55
+#define CLK_ACLK_AHB2APB_DISPSFR1P 56
+#define CLK_ACLK_AHB2APB_DISPSFR0P 57
+#define CLK_ACLK_AHB_DISPH 58
+#define CLK_ACLK_XIU_TV1X 59
+#define CLK_ACLK_XIU_TV0X 60
+#define CLK_ACLK_XIU_DECON1X 61
+#define CLK_ACLK_XIU_DECON0X 62
+#define CLK_ACLK_XIU_DISP1X 63
+#define CLK_ACLK_XIU_DISPNP_100 64
+#define CLK_ACLK_DISP1ND_333 65
+#define CLK_ACLK_DISP0ND_333 66
+#define CLK_PCLK_SMMU_TV1X 67
+#define CLK_PCLK_SMMU_TV0X 68
+#define CLK_PCLK_SMMU_DECON1X 69
+#define CLK_PCLK_SMMU_DECON0X 70
+#define CLK_PCLK_BTS_DECON_TV_M3 71
+#define CLK_PCLK_BTS_DECON_TV_M2 72
+#define CLK_PCLK_BTS_DECON_TV_M1 73
+#define CLK_PCLK_BTS_DECON_TV_M0 74
+#define CLK_PCLK_BTS_DECONM4 75
+#define CLK_PCLK_BTS_DECONM3 76
+#define CLK_PCLK_BTS_DECONM2 77
+#define CLK_PCLK_BTS_DECONM1 78
+#define CLK_PCLK_BTS_DECONM0 79
+#define CLK_PCLK_MIC1 80
+#define CLK_PCLK_PMU_DISP 81
+#define CLK_PCLK_SYSREG_DISP 82
+#define CLK_PCLK_HDMIPHY 83
+#define CLK_PCLK_HDMI 84
+#define CLK_PCLK_MIC0 85
+#define CLK_PCLK_DSIM1 86
+#define CLK_PCLK_DSIM0 87
+#define CLK_PCLK_DECON_TV 88
+#define CLK_PHYCLK_MIPIDPHY1_BITCLKDIV8 89
+#define CLK_PHYCLK_MIPIDPHY1_RXCLKESC0 90
+#define CLK_SCLK_RGB_TV_VCLK_TO_DSIM1 91
+#define CLK_SCLK_RGB_TV_VCLK_TO_MIC1 92
+#define CLK_SCLK_DSIM1 93
+#define CLK_SCLK_DECON_TV_VCLK 94
+#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8 95
+#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0 96
+#define CLK_PHYCLK_HDMIPHY_TMDS_CLKO 97
+#define CLK_PHYCLK_HDMI_PIXEL 98
+#define CLK_SCLK_RGB_VCLK_TO_SMIES 99
+#define CLK_SCLK_FREQ_DET_DISP_PLL 100
+#define CLK_SCLK_RGB_VCLK_TO_DSIM0 101
+#define CLK_SCLK_RGB_VCLK_TO_MIC0 102
+#define CLK_SCLK_DSD 103
+#define CLK_SCLK_HDMI_SPDIF 104
+#define CLK_SCLK_DSIM0 105
+#define CLK_SCLK_DECON_TV_ECLK 106
+#define CLK_SCLK_DECON_VCLK 107
+#define CLK_SCLK_DECON_ECLK 108
+#define CLK_SCLK_RGB_VCLK 109
+#define CLK_SCLK_RGB_TV_VCLK 110
+
+#define DISP_NR_CLK 111
+
+/* CMU_AUD */
+#define CLK_MOUT_AUD_PLL_USER 1
+#define CLK_MOUT_SCLK_AUD_PCM 2
+#define CLK_MOUT_SCLK_AUD_I2S 3
+
+#define CLK_DIV_ATCLK_AUD 4
+#define CLK_DIV_PCLK_DBG_AUD 5
+#define CLK_DIV_ACLK_AUD 6
+#define CLK_DIV_AUD_CA5 7
+#define CLK_DIV_SCLK_AUD_SLIMBUS 8
+#define CLK_DIV_SCLK_AUD_UART 9
+#define CLK_DIV_SCLK_AUD_PCM 10
+#define CLK_DIV_SCLK_AUD_I2S 11
+
+#define CLK_ACLK_INTR_CTRL 12
+#define CLK_ACLK_AXIDS2_LPASSP 13
+#define CLK_ACLK_AXIDS1_LPASSP 14
+#define CLK_ACLK_AXI2APB1_LPASSP 15
+#define CLK_ACLK_AXI2APH_LPASSP 16
+#define CLK_ACLK_SMMU_LPASSX 17
+#define CLK_ACLK_AXIDS0_LPASSP 18
+#define CLK_ACLK_AXI2APB0_LPASSP 19
+#define CLK_ACLK_XIU_LPASSX 20
+#define CLK_ACLK_AUDNP_133 21
+#define CLK_ACLK_AUDND_133 22
+#define CLK_ACLK_SRAMC 23
+#define CLK_ACLK_DMAC 24
+#define CLK_PCLK_WDT1 25
+#define CLK_PCLK_WDT0 26
+#define CLK_PCLK_SFR1 27
+#define CLK_PCLK_SMMU_LPASSX 28
+#define CLK_PCLK_GPIO_AUD 29
+#define CLK_PCLK_PMU_AUD 30
+#define CLK_PCLK_SYSREG_AUD 31
+#define CLK_PCLK_AUD_SLIMBUS 32
+#define CLK_PCLK_AUD_UART 33
+#define CLK_PCLK_AUD_PCM 34
+#define CLK_PCLK_AUD_I2S 35
+#define CLK_PCLK_TIMER 36
+#define CLK_PCLK_SFR0_CTRL 37
+#define CLK_ATCLK_AUD 38
+#define CLK_PCLK_DBG_AUD 39
+#define CLK_SCLK_AUD_CA5 40
+#define CLK_SCLK_JTAG_TCK 41
+#define CLK_SCLK_SLIMBUS_CLKIN 42
+#define CLK_SCLK_AUD_SLIMBUS 43
+#define CLK_SCLK_AUD_UART 44
+#define CLK_SCLK_AUD_PCM 45
+#define CLK_SCLK_I2S_BCLK 46
+#define CLK_SCLK_AUD_I2S 47
+
+#define AUD_NR_CLK 48
+
+/* CMU_BUS{0|1|2} */
+#define CLK_DIV_PCLK_BUS_133 1
+
+#define CLK_ACLK_AHB2APB_BUSP 2
+#define CLK_ACLK_BUSNP_133 3
+#define CLK_ACLK_BUSND_400 4
+#define CLK_PCLK_BUSSRVND_133 5
+#define CLK_PCLK_PMU_BUS 6
+#define CLK_PCLK_SYSREG_BUS 7
+
+#define CLK_MOUT_ACLK_BUS2_400_USER 8 /* Only CMU_BUS2 */
+#define CLK_ACLK_BUS2BEND_400 9 /* Only CMU_BUS2 */
+#define CLK_ACLK_BUS2RTND_400 10 /* Only CMU_BUS2 */
+
+#define BUSx_NR_CLK 11
+
+/* CMU_G3D */
+#define CLK_FOUT_G3D_PLL 1
+
+#define CLK_MOUT_ACLK_G3D_400 2
+#define CLK_MOUT_G3D_PLL 3
+
+#define CLK_DIV_SCLK_HPM_G3D 4
+#define CLK_DIV_PCLK_G3D 5
+#define CLK_DIV_ACLK_G3D 6
+#define CLK_ACLK_BTS_G3D1 7
+#define CLK_ACLK_BTS_G3D0 8
+#define CLK_ACLK_ASYNCAPBS_G3D 9
+#define CLK_ACLK_ASYNCAPBM_G3D 10
+#define CLK_ACLK_AHB2APB_G3DP 11
+#define CLK_ACLK_G3DNP_150 12
+#define CLK_ACLK_G3DND_600 13
+#define CLK_ACLK_G3D 14
+#define CLK_PCLK_BTS_G3D1 15
+#define CLK_PCLK_BTS_G3D0 16
+#define CLK_PCLK_PMU_G3D 17
+#define CLK_PCLK_SYSREG_G3D 18
+#define CLK_SCLK_HPM_G3D 19
+
+#define G3D_NR_CLK 20
+
+/* CMU_GSCL */
+#define CLK_MOUT_ACLK_GSCL_111_USER 1
+#define CLK_MOUT_ACLK_GSCL_333_USER 2
+
+#define CLK_ACLK_BTS_GSCL2 3
+#define CLK_ACLK_BTS_GSCL1 4
+#define CLK_ACLK_BTS_GSCL0 5
+#define CLK_ACLK_AHB2APB_GSCLP 6
+#define CLK_ACLK_XIU_GSCLX 7
+#define CLK_ACLK_GSCLNP_111 8
+#define CLK_ACLK_GSCLRTND_333 9
+#define CLK_ACLK_GSCLBEND_333 10
+#define CLK_ACLK_GSD 11
+#define CLK_ACLK_GSCL2 12
+#define CLK_ACLK_GSCL1 13
+#define CLK_ACLK_GSCL0 14
+#define CLK_ACLK_SMMU_GSCL0 15
+#define CLK_ACLK_SMMU_GSCL1 16
+#define CLK_ACLK_SMMU_GSCL2 17
+#define CLK_PCLK_BTS_GSCL2 18
+#define CLK_PCLK_BTS_GSCL1 19
+#define CLK_PCLK_BTS_GSCL0 20
+#define CLK_PCLK_PMU_GSCL 21
+#define CLK_PCLK_SYSREG_GSCL 22
+#define CLK_PCLK_GSCL2 23
+#define CLK_PCLK_GSCL1 24
+#define CLK_PCLK_GSCL0 25
+#define CLK_PCLK_SMMU_GSCL0 26
+#define CLK_PCLK_SMMU_GSCL1 27
+#define CLK_PCLK_SMMU_GSCL2 28
+
+#define GSCL_NR_CLK 29
+
+/* CMU_APOLLO */
+#define CLK_FOUT_APOLLO_PLL 1
+
+#define CLK_MOUT_APOLLO_PLL 2
+#define CLK_MOUT_BUS_PLL_APOLLO_USER 3
+#define CLK_MOUT_APOLLO 4
+
+#define CLK_DIV_CNTCLK_APOLLO 5
+#define CLK_DIV_PCLK_DBG_APOLLO 6
+#define CLK_DIV_ATCLK_APOLLO 7
+#define CLK_DIV_PCLK_APOLLO 8
+#define CLK_DIV_ACLK_APOLLO 9
+#define CLK_DIV_APOLLO2 10
+#define CLK_DIV_APOLLO1 11
+#define CLK_DIV_SCLK_HPM_APOLLO 12
+#define CLK_DIV_APOLLO_PLL 13
+
+#define CLK_ACLK_ATBDS_APOLLO_3 14
+#define CLK_ACLK_ATBDS_APOLLO_2 15
+#define CLK_ACLK_ATBDS_APOLLO_1 16
+#define CLK_ACLK_ATBDS_APOLLO_0 17
+#define CLK_ACLK_ASATBSLV_APOLLO_3_CSSYS 18
+#define CLK_ACLK_ASATBSLV_APOLLO_2_CSSYS 19
+#define CLK_ACLK_ASATBSLV_APOLLO_1_CSSYS 20
+#define CLK_ACLK_ASATBSLV_APOLLO_0_CSSYS 21
+#define CLK_ACLK_ASYNCACES_APOLLO_CCI 22
+#define CLK_ACLK_AHB2APB_APOLLOP 23
+#define CLK_ACLK_APOLLONP_200 24
+#define CLK_PCLK_ASAPBMST_CSSYS_APOLLO 25
+#define CLK_PCLK_PMU_APOLLO 26
+#define CLK_PCLK_SYSREG_APOLLO 27
+#define CLK_CNTCLK_APOLLO 28
+#define CLK_SCLK_HPM_APOLLO 29
+#define CLK_SCLK_APOLLO 30
+
+#define APOLLO_NR_CLK 31
+
+/* CMU_ATLAS */
+#define CLK_FOUT_ATLAS_PLL 1
+
+#define CLK_MOUT_ATLAS_PLL 2
+#define CLK_MOUT_BUS_PLL_ATLAS_USER 3
+#define CLK_MOUT_ATLAS 4
+
+#define CLK_DIV_CNTCLK_ATLAS 5
+#define CLK_DIV_PCLK_DBG_ATLAS 6
+#define CLK_DIV_ATCLK_ATLASO 7
+#define CLK_DIV_PCLK_ATLAS 8
+#define CLK_DIV_ACLK_ATLAS 9
+#define CLK_DIV_ATLAS2 10
+#define CLK_DIV_ATLAS1 11
+#define CLK_DIV_SCLK_HPM_ATLAS 12
+#define CLK_DIV_ATLAS_PLL 13
+
+#define CLK_ACLK_ATB_AUD_CSSYS 14
+#define CLK_ACLK_ATB_APOLLO3_CSSYS 15
+#define CLK_ACLK_ATB_APOLLO2_CSSYS 16
+#define CLK_ACLK_ATB_APOLLO1_CSSYS 17
+#define CLK_ACLK_ATB_APOLLO0_CSSYS 18
+#define CLK_ACLK_ASYNCAHBS_CSSYS_SSS 19
+#define CLK_ACLK_ASYNCAXIS_CSSYS_CCIX 20
+#define CLK_ACLK_ASYNCACES_ATLAS_CCI 21
+#define CLK_ACLK_AHB2APB_ATLASP 22
+#define CLK_ACLK_ATLASNP_200 23
+#define CLK_PCLK_ASYNCAPB_AUD_CSSYS 24
+#define CLK_PCLK_ASYNCAPB_ISP_CSSYS 25
+#define CLK_PCLK_ASYNCAPB_APOLLO_CSSYS 26
+#define CLK_PCLK_PMU_ATLAS 27
+#define CLK_PCLK_SYSREG_ATLAS 28
+#define CLK_PCLK_SECJTAG 29
+#define CLK_CNTCLK_ATLAS 30
+#define CLK_SCLK_FREQ_DET_ATLAS_PLL 31
+#define CLK_SCLK_HPM_ATLAS 32
+#define CLK_TRACECLK 33
+#define CLK_CTMCLK 34
+#define CLK_HCLK_CSSYS 35
+#define CLK_PCLK_DBG_CSSYS 36
+#define CLK_PCLK_DBG 37
+#define CLK_ATCLK 38
+#define CLK_SCLK_ATLAS 39
+
+#define ATLAS_NR_CLK 40
+
+/* CMU_MSCL */
+#define CLK_MOUT_SCLK_JPEG_USER 1
+#define CLK_MOUT_ACLK_MSCL_400_USER 2
+#define CLK_MOUT_SCLK_JPEG 3
+
+#define CLK_DIV_PCLK_MSCL 4
+
+#define CLK_ACLK_BTS_JPEG 5
+#define CLK_ACLK_BTS_M2MSCALER1 6
+#define CLK_ACLK_BTS_M2MSCALER0 7
+#define CLK_ACLK_AHB2APB_MSCL0P 8
+#define CLK_ACLK_XIU_MSCLX 9
+#define CLK_ACLK_MSCLNP_100 10
+#define CLK_ACLK_MSCLND_400 11
+#define CLK_ACLK_JPEG 12
+#define CLK_ACLK_M2MSCALER1 13
+#define CLK_ACLK_M2MSCALER0 14
+#define CLK_ACLK_SMMU_M2MSCALER0 15
+#define CLK_ACLK_SMMU_M2MSCALER1 16
+#define CLK_ACLK_SMMU_JPEG 17
+#define CLK_PCLK_BTS_JPEG 18
+#define CLK_PCLK_BTS_M2MSCALER1 19
+#define CLK_PCLK_BTS_M2MSCALER0 20
+#define CLK_PCLK_PMU_MSCL 21
+#define CLK_PCLK_SYSREG_MSCL 22
+#define CLK_PCLK_JPEG 23
+#define CLK_PCLK_M2MSCALER1 24
+#define CLK_PCLK_M2MSCALER0 25
+#define CLK_PCLK_SMMU_M2MSCALER0 26
+#define CLK_PCLK_SMMU_M2MSCALER1 27
+#define CLK_PCLK_SMMU_JPEG 28
+#define CLK_SCLK_JPEG 29
+
+#define MSCL_NR_CLK 30
+
+/* CMU_MFC */
+#define CLK_MOUT_ACLK_MFC_400_USER 1
+
+#define CLK_DIV_PCLK_MFC 2
+
+#define CLK_ACLK_BTS_MFC_1 3
+#define CLK_ACLK_BTS_MFC_0 4
+#define CLK_ACLK_AHB2APB_MFCP 5
+#define CLK_ACLK_XIU_MFCX 6
+#define CLK_ACLK_MFCNP_100 7
+#define CLK_ACLK_MFCND_400 8
+#define CLK_ACLK_MFC 9
+#define CLK_ACLK_SMMU_MFC_1 10
+#define CLK_ACLK_SMMU_MFC_0 11
+#define CLK_PCLK_BTS_MFC_1 12
+#define CLK_PCLK_BTS_MFC_0 13
+#define CLK_PCLK_PMU_MFC 14
+#define CLK_PCLK_SYSREG_MFC 15
+#define CLK_PCLK_MFC 16
+#define CLK_PCLK_SMMU_MFC_1 17
+#define CLK_PCLK_SMMU_MFC_0 18
+
+#define MFC_NR_CLK 19
+
+/* CMU_HEVC */
+#define CLK_MOUT_ACLK_HEVC_400_USER 1
+
+#define CLK_DIV_PCLK_HEVC 2
+
+#define CLK_ACLK_BTS_HEVC_1 3
+#define CLK_ACLK_BTS_HEVC_0 4
+#define CLK_ACLK_AHB2APB_HEVCP 5
+#define CLK_ACLK_XIU_HEVCX 6
+#define CLK_ACLK_HEVCNP_100 7
+#define CLK_ACLK_HEVCND_400 8
+#define CLK_ACLK_HEVC 9
+#define CLK_ACLK_SMMU_HEVC_1 10
+#define CLK_ACLK_SMMU_HEVC_0 11
+#define CLK_PCLK_BTS_HEVC_1 12
+#define CLK_PCLK_BTS_HEVC_0 13
+#define CLK_PCLK_PMU_HEVC 14
+#define CLK_PCLK_SYSREG_HEVC 15
+#define CLK_PCLK_HEVC 16
+#define CLK_PCLK_SMMU_HEVC_1 17
+#define CLK_PCLK_SMMU_HEVC_0 18
+
+#define HEVC_NR_CLK 19
+
+/* CMU_ISP */
+#define CLK_MOUT_ACLK_ISP_DIS_400_USER 1
+#define CLK_MOUT_ACLK_ISP_400_USER 2
+
+#define CLK_DIV_PCLK_ISP_DIS 3
+#define CLK_DIV_PCLK_ISP 4
+#define CLK_DIV_ACLK_ISP_D_200 5
+#define CLK_DIV_ACLK_ISP_C_200 6
+
+#define CLK_ACLK_ISP_D_GLUE 7
+#define CLK_ACLK_SCALERP 8
+#define CLK_ACLK_3DNR 9
+#define CLK_ACLK_DIS 10
+#define CLK_ACLK_SCALERC 11
+#define CLK_ACLK_DRC 12
+#define CLK_ACLK_ISP 13
+#define CLK_ACLK_AXIUS_SCALERP 14
+#define CLK_ACLK_AXIUS_SCALERC 15
+#define CLK_ACLK_AXIUS_DRC 16
+#define CLK_ACLK_ASYNCAHBM_ISP2P 17
+#define CLK_ACLK_ASYNCAHBM_ISP1P 18
+#define CLK_ACLK_ASYNCAXIS_DIS1 19
+#define CLK_ACLK_ASYNCAXIS_DIS0 20
+#define CLK_ACLK_ASYNCAXIM_DIS1 21
+#define CLK_ACLK_ASYNCAXIM_DIS0 22
+#define CLK_ACLK_ASYNCAXIM_ISP2P 23
+#define CLK_ACLK_ASYNCAXIM_ISP1P 24
+#define CLK_ACLK_AHB2APB_ISP2P 25
+#define CLK_ACLK_AHB2APB_ISP1P 26
+#define CLK_ACLK_AXI2APB_ISP2P 27
+#define CLK_ACLK_AXI2APB_ISP1P 28
+#define CLK_ACLK_XIU_ISPEX1 29
+#define CLK_ACLK_XIU_ISPEX0 30
+#define CLK_ACLK_ISPND_400 31
+#define CLK_ACLK_SMMU_SCALERP 32
+#define CLK_ACLK_SMMU_3DNR 33
+#define CLK_ACLK_SMMU_DIS1 34
+#define CLK_ACLK_SMMU_DIS0 35
+#define CLK_ACLK_SMMU_SCALERC 36
+#define CLK_ACLK_SMMU_DRC 37
+#define CLK_ACLK_SMMU_ISP 38
+#define CLK_ACLK_BTS_SCALERP 39
+#define CLK_ACLK_BTS_3DR 40
+#define CLK_ACLK_BTS_DIS1 41
+#define CLK_ACLK_BTS_DIS0 42
+#define CLK_ACLK_BTS_SCALERC 43
+#define CLK_ACLK_BTS_DRC 44
+#define CLK_ACLK_BTS_ISP 45
+#define CLK_PCLK_SMMU_SCALERP 46
+#define CLK_PCLK_SMMU_3DNR 47
+#define CLK_PCLK_SMMU_DIS1 48
+#define CLK_PCLK_SMMU_DIS0 49
+#define CLK_PCLK_SMMU_SCALERC 50
+#define CLK_PCLK_SMMU_DRC 51
+#define CLK_PCLK_SMMU_ISP 52
+#define CLK_PCLK_BTS_SCALERP 53
+#define CLK_PCLK_BTS_3DNR 54
+#define CLK_PCLK_BTS_DIS1 55
+#define CLK_PCLK_BTS_DIS0 56
+#define CLK_PCLK_BTS_SCALERC 57
+#define CLK_PCLK_BTS_DRC 58
+#define CLK_PCLK_BTS_ISP 59
+#define CLK_PCLK_ASYNCAXI_DIS1 60
+#define CLK_PCLK_ASYNCAXI_DIS0 61
+#define CLK_PCLK_PMU_ISP 62
+#define CLK_PCLK_SYSREG_ISP 63
+#define CLK_PCLK_CMU_ISP_LOCAL 64
+#define CLK_PCLK_SCALERP 65
+#define CLK_PCLK_3DNR 66
+#define CLK_PCLK_DIS_CORE 67
+#define CLK_PCLK_DIS 68
+#define CLK_PCLK_SCALERC 69
+#define CLK_PCLK_DRC 70
+#define CLK_PCLK_ISP 71
+#define CLK_SCLK_PIXELASYNCS_DIS 72
+#define CLK_SCLK_PIXELASYNCM_DIS 73
+#define CLK_SCLK_PIXELASYNCS_SCALERP 74
+#define CLK_SCLK_PIXELASYNCM_ISPD 75
+#define CLK_SCLK_PIXELASYNCS_ISPC 76
+#define CLK_SCLK_PIXELASYNCM_ISPC 77
+
+#define ISP_NR_CLK 78
+
+/* CMU_CAM0 */
+#define CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY 1
+#define CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY 2
+
+#define CLK_MOUT_ACLK_CAM0_333_USER 3
+#define CLK_MOUT_ACLK_CAM0_400_USER 4
+#define CLK_MOUT_ACLK_CAM0_552_USER 5
+#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S4_USER 6
+#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2A_USER 7
+#define CLK_MOUT_ACLK_LITE_D_B 8
+#define CLK_MOUT_ACLK_LITE_D_A 9
+#define CLK_MOUT_ACLK_LITE_B_B 10
+#define CLK_MOUT_ACLK_LITE_B_A 11
+#define CLK_MOUT_ACLK_LITE_A_B 12
+#define CLK_MOUT_ACLK_LITE_A_A 13
+#define CLK_MOUT_ACLK_CAM0_400 14
+#define CLK_MOUT_ACLK_CSIS1_B 15
+#define CLK_MOUT_ACLK_CSIS1_A 16
+#define CLK_MOUT_ACLK_CSIS0_B 17
+#define CLK_MOUT_ACLK_CSIS0_A 18
+#define CLK_MOUT_ACLK_3AA1_B 19
+#define CLK_MOUT_ACLK_3AA1_A 20
+#define CLK_MOUT_ACLK_3AA0_B 21
+#define CLK_MOUT_ACLK_3AA0_A 22
+#define CLK_MOUT_SCLK_LITE_FREECNT_C 23
+#define CLK_MOUT_SCLK_LITE_FREECNT_B 24
+#define CLK_MOUT_SCLK_LITE_FREECNT_A 25
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_B 26
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_A 27
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_B 28
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_A 29
+
+#define CLK_DIV_PCLK_CAM0_50 30
+#define CLK_DIV_ACLK_CAM0_200 31
+#define CLK_DIV_ACLK_CAM0_BUS_400 32
+#define CLK_DIV_PCLK_LITE_D 33
+#define CLK_DIV_ACLK_LITE_D 34
+#define CLK_DIV_PCLK_LITE_B 35
+#define CLK_DIV_ACLK_LITE_B 36
+#define CLK_DIV_PCLK_LITE_A 37
+#define CLK_DIV_ACLK_LITE_A 38
+#define CLK_DIV_ACLK_CSIS1 39
+#define CLK_DIV_ACLK_CSIS0 40
+#define CLK_DIV_PCLK_3AA1 41
+#define CLK_DIV_ACLK_3AA1 42
+#define CLK_DIV_PCLK_3AA0 43
+#define CLK_DIV_ACLK_3AA0 44
+#define CLK_DIV_SCLK_PIXELASYNC_LITE_C 45
+#define CLK_DIV_PCLK_PIXELASYNC_LITE_C 46
+#define CLK_DIV_SCLK_PIXELASYNC_LITE_C_INIT 47
+
+#define CLK_ACLK_CSIS1 50
+#define CLK_ACLK_CSIS0 51
+#define CLK_ACLK_3AA1 52
+#define CLK_ACLK_3AA0 53
+#define CLK_ACLK_LITE_D 54
+#define CLK_ACLK_LITE_B 55
+#define CLK_ACLK_LITE_A 56
+#define CLK_ACLK_AHBSYNCDN 57
+#define CLK_ACLK_AXIUS_LITE_D 58
+#define CLK_ACLK_AXIUS_LITE_B 59
+#define CLK_ACLK_AXIUS_LITE_A 60
+#define CLK_ACLK_ASYNCAPBM_3AA1 61
+#define CLK_ACLK_ASYNCAPBS_3AA1 62
+#define CLK_ACLK_ASYNCAPBM_3AA0 63
+#define CLK_ACLK_ASYNCAPBS_3AA0 64
+#define CLK_ACLK_ASYNCAPBM_LITE_D 65
+#define CLK_ACLK_ASYNCAPBS_LITE_D 66
+#define CLK_ACLK_ASYNCAPBM_LITE_B 67
+#define CLK_ACLK_ASYNCAPBS_LITE_B 68
+#define CLK_ACLK_ASYNCAPBM_LITE_A 69
+#define CLK_ACLK_ASYNCAPBS_LITE_A 70
+#define CLK_ACLK_ASYNCAXIM_ISP0P 71
+#define CLK_ACLK_ASYNCAXIM_3AA1 72
+#define CLK_ACLK_ASYNCAXIS_3AA1 73
+#define CLK_ACLK_ASYNCAXIM_3AA0 74
+#define CLK_ACLK_ASYNCAXIS_3AA0 75
+#define CLK_ACLK_ASYNCAXIM_LITE_D 76
+#define CLK_ACLK_ASYNCAXIS_LITE_D 77
+#define CLK_ACLK_ASYNCAXIM_LITE_B 78
+#define CLK_ACLK_ASYNCAXIS_LITE_B 79
+#define CLK_ACLK_ASYNCAXIM_LITE_A 80
+#define CLK_ACLK_ASYNCAXIS_LITE_A 81
+#define CLK_ACLK_AHB2APB_ISPSFRP 82
+#define CLK_ACLK_AXI2APB_ISP0P 83
+#define CLK_ACLK_AXI2AHB_ISP0P 84
+#define CLK_ACLK_XIU_IS0X 85
+#define CLK_ACLK_XIU_ISP0EX 86
+#define CLK_ACLK_CAM0NP_276 87
+#define CLK_ACLK_CAM0ND_400 88
+#define CLK_ACLK_SMMU_3AA1 89
+#define CLK_ACLK_SMMU_3AA0 90
+#define CLK_ACLK_SMMU_LITE_D 91
+#define CLK_ACLK_SMMU_LITE_B 92
+#define CLK_ACLK_SMMU_LITE_A 93
+#define CLK_ACLK_BTS_3AA1 94
+#define CLK_ACLK_BTS_3AA0 95
+#define CLK_ACLK_BTS_LITE_D 96
+#define CLK_ACLK_BTS_LITE_B 97
+#define CLK_ACLK_BTS_LITE_A 98
+#define CLK_PCLK_SMMU_3AA1 99
+#define CLK_PCLK_SMMU_3AA0 100
+#define CLK_PCLK_SMMU_LITE_D 101
+#define CLK_PCLK_SMMU_LITE_B 102
+#define CLK_PCLK_SMMU_LITE_A 103
+#define CLK_PCLK_BTS_3AA1 104
+#define CLK_PCLK_BTS_3AA0 105
+#define CLK_PCLK_BTS_LITE_D 106
+#define CLK_PCLK_BTS_LITE_B 107
+#define CLK_PCLK_BTS_LITE_A 108
+#define CLK_PCLK_ASYNCAXI_CAM1 109
+#define CLK_PCLK_ASYNCAXI_3AA1 110
+#define CLK_PCLK_ASYNCAXI_3AA0 111
+#define CLK_PCLK_ASYNCAXI_LITE_D 112
+#define CLK_PCLK_ASYNCAXI_LITE_B 113
+#define CLK_PCLK_ASYNCAXI_LITE_A 114
+#define CLK_PCLK_PMU_CAM0 115
+#define CLK_PCLK_SYSREG_CAM0 116
+#define CLK_PCLK_CMU_CAM0_LOCAL 117
+#define CLK_PCLK_CSIS1 118
+#define CLK_PCLK_CSIS0 119
+#define CLK_PCLK_3AA1 120
+#define CLK_PCLK_3AA0 121
+#define CLK_PCLK_LITE_D 122
+#define CLK_PCLK_LITE_B 123
+#define CLK_PCLK_LITE_A 124
+#define CLK_PHYCLK_RXBYTECLKHS0_S4 125
+#define CLK_PHYCLK_RXBYTECLKHS0_S2A 126
+#define CLK_SCLK_LITE_FREECNT 127
+#define CLK_SCLK_PIXELASYNCM_3AA1 128
+#define CLK_SCLK_PIXELASYNCM_3AA0 129
+#define CLK_SCLK_PIXELASYNCS_3AA0 130
+#define CLK_SCLK_PIXELASYNCM_LITE_C 131
+#define CLK_SCLK_PIXELASYNCM_LITE_C_INIT 132
+#define CLK_SCLK_PIXELASYNCS_LITE_C_INIT 133
+
+#define CAM0_NR_CLK 134
+
+/* CMU_CAM1 */
+#define CLK_PHYCLK_RXBYTEECLKHS0_S2B 1
+
+#define CLK_MOUT_SCLK_ISP_UART_USER 2
+#define CLK_MOUT_SCLK_ISP_SPI1_USER 3
+#define CLK_MOUT_SCLK_ISP_SPI0_USER 4
+#define CLK_MOUT_ACLK_CAM1_333_USER 5
+#define CLK_MOUT_ACLK_CAM1_400_USER 6
+#define CLK_MOUT_ACLK_CAM1_552_USER 7
+#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2B_USER 8
+#define CLK_MOUT_ACLK_CSIS2_B 9
+#define CLK_MOUT_ACLK_CSIS2_A 10
+#define CLK_MOUT_ACLK_FD_B 11
+#define CLK_MOUT_ACLK_FD_A 12
+#define CLK_MOUT_ACLK_LITE_C_B 13
+#define CLK_MOUT_ACLK_LITE_C_A 14
+
+#define CLK_DIV_SCLK_ISP_WPWM 15
+#define CLK_DIV_PCLK_CAM1_83 16
+#define CLK_DIV_PCLK_CAM1_166 17
+#define CLK_DIV_PCLK_DBG_CAM1 18
+#define CLK_DIV_ATCLK_CAM1 19
+#define CLK_DIV_ACLK_CSIS2 20
+#define CLK_DIV_PCLK_FD 21
+#define CLK_DIV_ACLK_FD 22
+#define CLK_DIV_PCLK_LITE_C 23
+#define CLK_DIV_ACLK_LITE_C 24
+
+#define CLK_ACLK_ISP_GIC 25
+#define CLK_ACLK_FD 26
+#define CLK_ACLK_LITE_C 27
+#define CLK_ACLK_CSIS2 28
+#define CLK_ACLK_ASYNCAPBM_FD 29
+#define CLK_ACLK_ASYNCAPBS_FD 30
+#define CLK_ACLK_ASYNCAPBM_LITE_C 31
+#define CLK_ACLK_ASYNCAPBS_LITE_C 32
+#define CLK_ACLK_ASYNCAHBS_SFRISP2H2 33
+#define CLK_ACLK_ASYNCAHBS_SFRISP2H1 34
+#define CLK_ACLK_ASYNCAXIM_CA5 35
+#define CLK_ACLK_ASYNCAXIS_CA5 36
+#define CLK_ACLK_ASYNCAXIS_ISPX2 37
+#define CLK_ACLK_ASYNCAXIS_ISPX1 38
+#define CLK_ACLK_ASYNCAXIS_ISPX0 39
+#define CLK_ACLK_ASYNCAXIM_ISPEX 40
+#define CLK_ACLK_ASYNCAXIM_ISP3P 41
+#define CLK_ACLK_ASYNCAXIS_ISP3P 42
+#define CLK_ACLK_ASYNCAXIM_FD 43
+#define CLK_ACLK_ASYNCAXIS_FD 44
+#define CLK_ACLK_ASYNCAXIM_LITE_C 45
+#define CLK_ACLK_ASYNCAXIS_LITE_C 46
+#define CLK_ACLK_AHB2APB_ISP5P 47
+#define CLK_ACLK_AHB2APB_ISP3P 48
+#define CLK_ACLK_AXI2APB_ISP3P 49
+#define CLK_ACLK_AHB_SFRISP2H 50
+#define CLK_ACLK_AXI_ISP_HX_R 51
+#define CLK_ACLK_AXI_ISP_CX_R 52
+#define CLK_ACLK_AXI_ISP_HX 53
+#define CLK_ACLK_AXI_ISP_CX 54
+#define CLK_ACLK_XIU_ISPX 55
+#define CLK_ACLK_XIU_ISPEX 56
+#define CLK_ACLK_CAM1NP_333 57
+#define CLK_ACLK_CAM1ND_400 58
+#define CLK_ACLK_SMMU_ISPCPU 59
+#define CLK_ACLK_SMMU_FD 60
+#define CLK_ACLK_SMMU_LITE_C 61
+#define CLK_ACLK_BTS_ISP3P 62
+#define CLK_ACLK_BTS_FD 63
+#define CLK_ACLK_BTS_LITE_C 64
+#define CLK_ACLK_AHBDN_SFRISP2H 65
+#define CLK_ACLK_AHBDN_ISP5P 66
+#define CLK_ACLK_AXIUS_ISP3P 67
+#define CLK_ACLK_AXIUS_FD 68
+#define CLK_ACLK_AXIUS_LITE_C 69
+#define CLK_PCLK_SMMU_ISPCPU 70
+#define CLK_PCLK_SMMU_FD 71
+#define CLK_PCLK_SMMU_LITE_C 72
+#define CLK_PCLK_BTS_ISP3P 73
+#define CLK_PCLK_BTS_FD 74
+#define CLK_PCLK_BTS_LITE_C 75
+#define CLK_PCLK_ASYNCAXIM_CA5 76
+#define CLK_PCLK_ASYNCAXIM_ISPEX 77
+#define CLK_PCLK_ASYNCAXIM_ISP3P 78
+#define CLK_PCLK_ASYNCAXIM_FD 79
+#define CLK_PCLK_ASYNCAXIM_LITE_C 80
+#define CLK_PCLK_PMU_CAM1 81
+#define CLK_PCLK_SYSREG_CAM1 82
+#define CLK_PCLK_CMU_CAM1_LOCAL 83
+#define CLK_PCLK_ISP_MCTADC 84
+#define CLK_PCLK_ISP_WDT 85
+#define CLK_PCLK_ISP_PWM 86
+#define CLK_PCLK_ISP_UART 87
+#define CLK_PCLK_ISP_MCUCTL 88
+#define CLK_PCLK_ISP_SPI1 89
+#define CLK_PCLK_ISP_SPI0 90
+#define CLK_PCLK_ISP_I2C2 91
+#define CLK_PCLK_ISP_I2C1 92
+#define CLK_PCLK_ISP_I2C0 93
+#define CLK_PCLK_ISP_MPWM 94
+#define CLK_PCLK_FD 95
+#define CLK_PCLK_LITE_C 96
+#define CLK_PCLK_CSIS2 97
+#define CLK_SCLK_ISP_I2C2 98
+#define CLK_SCLK_ISP_I2C1 99
+#define CLK_SCLK_ISP_I2C0 100
+#define CLK_SCLK_ISP_PWM 101
+#define CLK_PHYCLK_RXBYTECLKHS0_S2B 102
+#define CLK_SCLK_LITE_C_FREECNT 103
+#define CLK_SCLK_PIXELASYNCM_FD 104
+#define CLK_SCLK_ISP_MCTADC 105
+#define CLK_SCLK_ISP_UART 106
+#define CLK_SCLK_ISP_SPI1 107
+#define CLK_SCLK_ISP_SPI0 108
+#define CLK_SCLK_ISP_MPWM 109
+#define CLK_PCLK_DBG_ISP 110
+#define CLK_ATCLK_ISP 111
+#define CLK_SCLK_ISP_CA5 112
+
+#define CAM1_NR_CLK 113
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index b690cdba163b..8780868458a0 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -248,6 +248,9 @@
#define IMX6QDL_PLL6_BYPASS 235
#define IMX6QDL_PLL7_BYPASS 236
#define IMX6QDL_CLK_GPT_3M 237
-#define IMX6QDL_CLK_END 238
+#define IMX6QDL_CLK_VIDEO_27M 238
+#define IMX6QDL_CLK_MIPI_CORE_CFG 239
+#define IMX6QDL_CLK_MIPI_IPG 240
+#define IMX6QDL_CLK_END 241
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/pistachio-clk.h b/include/dt-bindings/clock/pistachio-clk.h
new file mode 100644
index 000000000000..039f83facb68
--- /dev/null
+++ b/include/dt-bindings/clock/pistachio-clk.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_PISTACHIO_H
+#define _DT_BINDINGS_CLOCK_PISTACHIO_H
+
+/* PLLs */
+#define CLK_MIPS_PLL 0
+#define CLK_AUDIO_PLL 1
+#define CLK_RPU_V_PLL 2
+#define CLK_RPU_L_PLL 3
+#define CLK_SYS_PLL 4
+#define CLK_WIFI_PLL 5
+#define CLK_BT_PLL 6
+
+/* Fixed-factor clocks */
+#define CLK_WIFI_DIV4 16
+#define CLK_WIFI_DIV8 17
+
+/* Gate clocks */
+#define CLK_MIPS 32
+#define CLK_AUDIO_IN 33
+#define CLK_AUDIO 34
+#define CLK_I2S 35
+#define CLK_SPDIF 36
+#define CLK_AUDIO_DAC 37
+#define CLK_RPU_V 38
+#define CLK_RPU_L 39
+#define CLK_RPU_SLEEP 40
+#define CLK_WIFI_PLL_GATE 41
+#define CLK_RPU_CORE 42
+#define CLK_WIFI_ADC 43
+#define CLK_WIFI_DAC 44
+#define CLK_USB_PHY 45
+#define CLK_ENET_IN 46
+#define CLK_ENET 47
+#define CLK_UART0 48
+#define CLK_UART1 49
+#define CLK_PERIPH_SYS 50
+#define CLK_SPI0 51
+#define CLK_SPI1 52
+#define CLK_EVENT_TIMER 53
+#define CLK_AUX_ADC_INTERNAL 54
+#define CLK_AUX_ADC 55
+#define CLK_SD_HOST 56
+#define CLK_BT 57
+#define CLK_BT_DIV4 58
+#define CLK_BT_DIV8 59
+#define CLK_BT_1MHZ 60
+
+/* Divider clocks */
+#define CLK_MIPS_INTERNAL_DIV 64
+#define CLK_MIPS_DIV 65
+#define CLK_AUDIO_DIV 66
+#define CLK_I2S_DIV 67
+#define CLK_SPDIF_DIV 68
+#define CLK_AUDIO_DAC_DIV 69
+#define CLK_RPU_V_DIV 70
+#define CLK_RPU_L_DIV 71
+#define CLK_RPU_SLEEP_DIV 72
+#define CLK_RPU_CORE_DIV 73
+#define CLK_USB_PHY_DIV 74
+#define CLK_ENET_DIV 75
+#define CLK_UART0_INTERNAL_DIV 76
+#define CLK_UART0_DIV 77
+#define CLK_UART1_INTERNAL_DIV 78
+#define CLK_UART1_DIV 79
+#define CLK_SYS_INTERNAL_DIV 80
+#define CLK_SPI0_INTERNAL_DIV 81
+#define CLK_SPI0_DIV 82
+#define CLK_SPI1_INTERNAL_DIV 83
+#define CLK_SPI1_DIV 84
+#define CLK_EVENT_TIMER_INTERNAL_DIV 85
+#define CLK_EVENT_TIMER_DIV 86
+#define CLK_AUX_ADC_INTERNAL_DIV 87
+#define CLK_AUX_ADC_DIV 88
+#define CLK_SD_HOST_DIV 89
+#define CLK_BT_DIV 90
+#define CLK_BT_DIV4_DIV 91
+#define CLK_BT_DIV8_DIV 92
+#define CLK_BT_1MHZ_INTERNAL_DIV 93
+#define CLK_BT_1MHZ_DIV 94
+
+/* Mux clocks */
+#define CLK_AUDIO_REF_MUX 96
+#define CLK_MIPS_PLL_MUX 97
+#define CLK_AUDIO_PLL_MUX 98
+#define CLK_AUDIO_MUX 99
+#define CLK_RPU_V_PLL_MUX 100
+#define CLK_RPU_L_PLL_MUX 101
+#define CLK_RPU_L_MUX 102
+#define CLK_WIFI_PLL_MUX 103
+#define CLK_WIFI_DIV4_MUX 104
+#define CLK_WIFI_DIV8_MUX 105
+#define CLK_RPU_CORE_MUX 106
+#define CLK_SYS_PLL_MUX 107
+#define CLK_ENET_MUX 108
+#define CLK_EVENT_TIMER_MUX 109
+#define CLK_SD_HOST_MUX 110
+#define CLK_BT_PLL_MUX 111
+#define CLK_DEBUG_MUX 112
+
+#define CLK_NR_CLKS 113
+
+/* Peripheral gate clocks */
+#define PERIPH_CLK_SYS 0
+#define PERIPH_CLK_SYS_BUS 1
+#define PERIPH_CLK_DDR 2
+#define PERIPH_CLK_ROM 3
+#define PERIPH_CLK_COUNTER_FAST 4
+#define PERIPH_CLK_COUNTER_SLOW 5
+#define PERIPH_CLK_IR 6
+#define PERIPH_CLK_WD 7
+#define PERIPH_CLK_PDM 8
+#define PERIPH_CLK_PWM 9
+#define PERIPH_CLK_I2C0 10
+#define PERIPH_CLK_I2C1 11
+#define PERIPH_CLK_I2C2 12
+#define PERIPH_CLK_I2C3 13
+
+/* Peripheral divider clocks */
+#define PERIPH_CLK_ROM_DIV 32
+#define PERIPH_CLK_COUNTER_FAST_DIV 33
+#define PERIPH_CLK_COUNTER_SLOW_PRE_DIV 34
+#define PERIPH_CLK_COUNTER_SLOW_DIV 35
+#define PERIPH_CLK_IR_PRE_DIV 36
+#define PERIPH_CLK_IR_DIV 37
+#define PERIPH_CLK_WD_PRE_DIV 38
+#define PERIPH_CLK_WD_DIV 39
+#define PERIPH_CLK_PDM_PRE_DIV 40
+#define PERIPH_CLK_PDM_DIV 41
+#define PERIPH_CLK_PWM_PRE_DIV 42
+#define PERIPH_CLK_PWM_DIV 43
+#define PERIPH_CLK_I2C0_PRE_DIV 44
+#define PERIPH_CLK_I2C0_DIV 45
+#define PERIPH_CLK_I2C1_PRE_DIV 46
+#define PERIPH_CLK_I2C1_DIV 47
+#define PERIPH_CLK_I2C2_PRE_DIV 48
+#define PERIPH_CLK_I2C2_DIV 49
+#define PERIPH_CLK_I2C3_PRE_DIV 50
+#define PERIPH_CLK_I2C3_DIV 51
+
+#define PERIPH_CLK_NR_CLKS 52
+
+/* System gate clocks */
+#define SYS_CLK_I2C0 0
+#define SYS_CLK_I2C1 1
+#define SYS_CLK_I2C2 2
+#define SYS_CLK_I2C3 3
+#define SYS_CLK_I2S_IN 4
+#define SYS_CLK_PAUD_OUT 5
+#define SYS_CLK_SPDIF_OUT 6
+#define SYS_CLK_SPI0_MASTER 7
+#define SYS_CLK_SPI0_SLAVE 8
+#define SYS_CLK_PWM 9
+#define SYS_CLK_UART0 10
+#define SYS_CLK_UART1 11
+#define SYS_CLK_SPI1 12
+#define SYS_CLK_MDC 13
+#define SYS_CLK_SD_HOST 14
+#define SYS_CLK_ENET 15
+#define SYS_CLK_IR 16
+#define SYS_CLK_WD 17
+#define SYS_CLK_TIMER 18
+#define SYS_CLK_I2S_OUT 24
+#define SYS_CLK_SPDIF_IN 25
+#define SYS_CLK_EVENT_TIMER 26
+#define SYS_CLK_HASH 27
+
+#define SYS_CLK_NR_CLKS 28
+
+/* Gates for external input clocks */
+#define EXT_CLK_AUDIO_IN 0
+#define EXT_CLK_ENET_IN 1
+
+#define EXT_CLK_NR_CLKS 2
+
+#endif /* _DT_BINDINGS_CLOCK_PISTACHIO_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index 04fb29ae30e6..ebd63fd05649 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -288,5 +288,6 @@
#define UBI32_CORE2_CLK_SRC 278
#define UBI32_CORE1_CLK 279
#define UBI32_CORE2_CLK 280
+#define EBI2_AON_CLK 281
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h
new file mode 100644
index 000000000000..e430f644dd6c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8916_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8916_H
+
+#define GPLL0 0
+#define GPLL0_VOTE 1
+#define BIMC_PLL 2
+#define BIMC_PLL_VOTE 3
+#define GPLL1 4
+#define GPLL1_VOTE 5
+#define GPLL2 6
+#define GPLL2_VOTE 7
+#define PCNOC_BFDCD_CLK_SRC 8
+#define SYSTEM_NOC_BFDCD_CLK_SRC 9
+#define CAMSS_AHB_CLK_SRC 10
+#define APSS_AHB_CLK_SRC 11
+#define CSI0_CLK_SRC 12
+#define CSI1_CLK_SRC 13
+#define GFX3D_CLK_SRC 14
+#define VFE0_CLK_SRC 15
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27
+#define BLSP1_UART1_APPS_CLK_SRC 28
+#define BLSP1_UART2_APPS_CLK_SRC 29
+#define CCI_CLK_SRC 30
+#define CAMSS_GP0_CLK_SRC 31
+#define CAMSS_GP1_CLK_SRC 32
+#define JPEG0_CLK_SRC 33
+#define MCLK0_CLK_SRC 34
+#define MCLK1_CLK_SRC 35
+#define CSI0PHYTIMER_CLK_SRC 36
+#define CSI1PHYTIMER_CLK_SRC 37
+#define CPP_CLK_SRC 38
+#define CRYPTO_CLK_SRC 39
+#define GP1_CLK_SRC 40
+#define GP2_CLK_SRC 41
+#define GP3_CLK_SRC 42
+#define BYTE0_CLK_SRC 43
+#define ESC0_CLK_SRC 44
+#define MDP_CLK_SRC 45
+#define PCLK0_CLK_SRC 46
+#define VSYNC_CLK_SRC 47
+#define PDM2_CLK_SRC 48
+#define SDCC1_APPS_CLK_SRC 49
+#define SDCC2_APPS_CLK_SRC 50
+#define APSS_TCU_CLK_SRC 51
+#define USB_HS_SYSTEM_CLK_SRC 52
+#define VCODEC0_CLK_SRC 53
+#define GCC_BLSP1_AHB_CLK 54
+#define GCC_BLSP1_SLEEP_CLK 55
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67
+#define GCC_BLSP1_UART1_APPS_CLK 68
+#define GCC_BLSP1_UART2_APPS_CLK 69
+#define GCC_BOOT_ROM_AHB_CLK 70
+#define GCC_CAMSS_CCI_AHB_CLK 71
+#define GCC_CAMSS_CCI_CLK 72
+#define GCC_CAMSS_CSI0_AHB_CLK 73
+#define GCC_CAMSS_CSI0_CLK 74
+#define GCC_CAMSS_CSI0PHY_CLK 75
+#define GCC_CAMSS_CSI0PIX_CLK 76
+#define GCC_CAMSS_CSI0RDI_CLK 77
+#define GCC_CAMSS_CSI1_AHB_CLK 78
+#define GCC_CAMSS_CSI1_CLK 79
+#define GCC_CAMSS_CSI1PHY_CLK 80
+#define GCC_CAMSS_CSI1PIX_CLK 81
+#define GCC_CAMSS_CSI1RDI_CLK 82
+#define GCC_CAMSS_CSI_VFE0_CLK 83
+#define GCC_CAMSS_GP0_CLK 84
+#define GCC_CAMSS_GP1_CLK 85
+#define GCC_CAMSS_ISPIF_AHB_CLK 86
+#define GCC_CAMSS_JPEG0_CLK 87
+#define GCC_CAMSS_JPEG_AHB_CLK 88
+#define GCC_CAMSS_JPEG_AXI_CLK 89
+#define GCC_CAMSS_MCLK0_CLK 90
+#define GCC_CAMSS_MCLK1_CLK 91
+#define GCC_CAMSS_MICRO_AHB_CLK 92
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 93
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 94
+#define GCC_CAMSS_AHB_CLK 95
+#define GCC_CAMSS_TOP_AHB_CLK 96
+#define GCC_CAMSS_CPP_AHB_CLK 97
+#define GCC_CAMSS_CPP_CLK 98
+#define GCC_CAMSS_VFE0_CLK 99
+#define GCC_CAMSS_VFE_AHB_CLK 100
+#define GCC_CAMSS_VFE_AXI_CLK 101
+#define GCC_CRYPTO_AHB_CLK 102
+#define GCC_CRYPTO_AXI_CLK 103
+#define GCC_CRYPTO_CLK 104
+#define GCC_OXILI_GMEM_CLK 105
+#define GCC_GP1_CLK 106
+#define GCC_GP2_CLK 107
+#define GCC_GP3_CLK 108
+#define GCC_MDSS_AHB_CLK 109
+#define GCC_MDSS_AXI_CLK 110
+#define GCC_MDSS_BYTE0_CLK 111
+#define GCC_MDSS_ESC0_CLK 112
+#define GCC_MDSS_MDP_CLK 113
+#define GCC_MDSS_PCLK0_CLK 114
+#define GCC_MDSS_VSYNC_CLK 115
+#define GCC_MSS_CFG_AHB_CLK 116
+#define GCC_OXILI_AHB_CLK 117
+#define GCC_OXILI_GFX3D_CLK 118
+#define GCC_PDM2_CLK 119
+#define GCC_PDM_AHB_CLK 120
+#define GCC_PRNG_AHB_CLK 121
+#define GCC_SDCC1_AHB_CLK 122
+#define GCC_SDCC1_APPS_CLK 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_GTCU_AHB_CLK 126
+#define GCC_JPEG_TBU_CLK 127
+#define GCC_MDP_TBU_CLK 128
+#define GCC_SMMU_CFG_CLK 129
+#define GCC_VENUS_TBU_CLK 130
+#define GCC_VFE_TBU_CLK 131
+#define GCC_USB2A_PHY_SLEEP_CLK 132
+#define GCC_USB_HS_AHB_CLK 133
+#define GCC_USB_HS_SYSTEM_CLK 134
+#define GCC_VENUS0_AHB_CLK 135
+#define GCC_VENUS0_AXI_CLK 136
+#define GCC_VENUS0_VCODEC0_CLK 137
+
+#endif
diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h
new file mode 100644
index 000000000000..9a4b4c9ca44a
--- /dev/null
+++ b/include/dt-bindings/clock/r8a73a4-clock.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A73A4_H__
+#define __DT_BINDINGS_CLOCK_R8A73A4_H__
+
+/* CPG */
+#define R8A73A4_CLK_MAIN 0
+#define R8A73A4_CLK_PLL0 1
+#define R8A73A4_CLK_PLL1 2
+#define R8A73A4_CLK_PLL2 3
+#define R8A73A4_CLK_PLL2S 4
+#define R8A73A4_CLK_PLL2H 5
+#define R8A73A4_CLK_Z 6
+#define R8A73A4_CLK_Z2 7
+#define R8A73A4_CLK_I 8
+#define R8A73A4_CLK_M3 9
+#define R8A73A4_CLK_B 10
+#define R8A73A4_CLK_M1 11
+#define R8A73A4_CLK_M2 12
+#define R8A73A4_CLK_ZX 13
+#define R8A73A4_CLK_ZS 14
+#define R8A73A4_CLK_HP 15
+
+/* MSTP2 */
+#define R8A73A4_CLK_DMAC 18
+#define R8A73A4_CLK_SCIFB3 17
+#define R8A73A4_CLK_SCIFB2 16
+#define R8A73A4_CLK_SCIFB1 7
+#define R8A73A4_CLK_SCIFB0 6
+#define R8A73A4_CLK_SCIFA0 4
+#define R8A73A4_CLK_SCIFA1 3
+
+/* MSTP3 */
+#define R8A73A4_CLK_CMT1 29
+#define R8A73A4_CLK_IIC1 23
+#define R8A73A4_CLK_IIC0 18
+#define R8A73A4_CLK_IIC7 17
+#define R8A73A4_CLK_IIC6 16
+#define R8A73A4_CLK_MMCIF0 15
+#define R8A73A4_CLK_SDHI0 14
+#define R8A73A4_CLK_SDHI1 13
+#define R8A73A4_CLK_SDHI2 12
+#define R8A73A4_CLK_MMCIF1 5
+#define R8A73A4_CLK_IIC2 0
+
+/* MSTP4 */
+#define R8A73A4_CLK_IIC3 11
+#define R8A73A4_CLK_IIC4 10
+#define R8A73A4_CLK_IIC5 9
+
+/* MSTP5 */
+#define R8A73A4_CLK_THERMAL 22
+#define R8A73A4_CLK_IIC8 15
+
+#endif /* __DT_BINDINGS_CLOCK_R8A73A4_H__ */
diff --git a/include/dt-bindings/clock/r8a7778-clock.h b/include/dt-bindings/clock/r8a7778-clock.h
new file mode 100644
index 000000000000..f6b07c5399de
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7778-clock.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2014 Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7778_H__
+#define __DT_BINDINGS_CLOCK_R8A7778_H__
+
+/* CPG */
+#define R8A7778_CLK_PLLA 0
+#define R8A7778_CLK_PLLB 1
+#define R8A7778_CLK_B 2
+#define R8A7778_CLK_OUT 3
+#define R8A7778_CLK_P 4
+#define R8A7778_CLK_S 5
+#define R8A7778_CLK_S1 6
+
+/* MSTP0 */
+#define R8A7778_CLK_I2C0 30
+#define R8A7778_CLK_I2C1 29
+#define R8A7778_CLK_I2C2 28
+#define R8A7778_CLK_I2C3 27
+#define R8A7778_CLK_SCIF0 26
+#define R8A7778_CLK_SCIF1 25
+#define R8A7778_CLK_SCIF2 24
+#define R8A7778_CLK_SCIF3 23
+#define R8A7778_CLK_SCIF4 22
+#define R8A7778_CLK_SCIF5 21
+#define R8A7778_CLK_TMU0 16
+#define R8A7778_CLK_TMU1 15
+#define R8A7778_CLK_TMU2 14
+#define R8A7778_CLK_SSI0 12
+#define R8A7778_CLK_SSI1 11
+#define R8A7778_CLK_SSI2 10
+#define R8A7778_CLK_SSI3 9
+#define R8A7778_CLK_SRU 8
+#define R8A7778_CLK_HSPI 7
+
+/* MSTP1 */
+#define R8A7778_CLK_ETHER 14
+#define R8A7778_CLK_VIN0 10
+#define R8A7778_CLK_VIN1 9
+#define R8A7778_CLK_USB 0
+
+/* MSTP3 */
+#define R8A7778_CLK_MMC 31
+#define R8A7778_CLK_SDHI0 23
+#define R8A7778_CLK_SDHI1 22
+#define R8A7778_CLK_SDHI2 21
+#define R8A7778_CLK_SSI4 11
+#define R8A7778_CLK_SSI5 10
+#define R8A7778_CLK_SSI6 9
+#define R8A7778_CLK_SSI7 8
+#define R8A7778_CLK_SSI8 7
+
+/* MSTP5 */
+#define R8A7778_CLK_SRU_SRC0 31
+#define R8A7778_CLK_SRU_SRC1 30
+#define R8A7778_CLK_SRU_SRC2 29
+#define R8A7778_CLK_SRU_SRC3 28
+#define R8A7778_CLK_SRU_SRC4 27
+#define R8A7778_CLK_SRU_SRC5 26
+#define R8A7778_CLK_SRU_SRC6 25
+#define R8A7778_CLK_SRU_SRC7 24
+#define R8A7778_CLK_SRU_SRC8 23
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7778_H__ */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index 91940271cf83..3f2c6b198d4a 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -21,6 +21,8 @@
#define R8A7790_CLK_SD0 7
#define R8A7790_CLK_SD1 8
#define R8A7790_CLK_Z 9
+#define R8A7790_CLK_RCAN 10
+#define R8A7790_CLK_ADSP 11
/* MSTP0 */
#define R8A7790_CLK_MSIOF0 0
@@ -80,6 +82,7 @@
/* MSTP5 */
#define R8A7790_CLK_AUDIO_DMAC1 1
#define R8A7790_CLK_AUDIO_DMAC0 2
+#define R8A7790_CLK_ADSP_MOD 6
#define R8A7790_CLK_THERMAL 22
#define R8A7790_CLK_PWM 23
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index f096f3f6c16a..8fc5dc8faeea 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -20,6 +20,8 @@
#define R8A7791_CLK_SDH 6
#define R8A7791_CLK_SD0 7
#define R8A7791_CLK_Z 8
+#define R8A7791_CLK_RCAN 9
+#define R8A7791_CLK_ADSP 10
/* MSTP0 */
#define R8A7791_CLK_MSIOF0 0
@@ -71,6 +73,7 @@
/* MSTP5 */
#define R8A7791_CLK_AUDIO_DMAC1 1
#define R8A7791_CLK_AUDIO_DMAC0 2
+#define R8A7791_CLK_ADSP_MOD 6
#define R8A7791_CLK_THERMAL 22
#define R8A7791_CLK_PWM 23
diff --git a/include/dt-bindings/clock/sh73a0-clock.h b/include/dt-bindings/clock/sh73a0-clock.h
index 1dd3eb2b7d90..53369568c24c 100644
--- a/include/dt-bindings/clock/sh73a0-clock.h
+++ b/include/dt-bindings/clock/sh73a0-clock.h
@@ -76,4 +76,7 @@
#define SH73A0_CLK_IIC4 10
#define SH73A0_CLK_KEYSC 3
+/* MSTP5 */
+#define SH73A0_CLK_INTCA0 8
+
#endif
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
index ae2eb17a1658..a2156090563f 100644
--- a/include/dt-bindings/clock/tegra124-car-common.h
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -297,7 +297,7 @@
#define TEGRA124_CLK_PLL_C4 270
#define TEGRA124_CLK_PLL_DP 271
#define TEGRA124_CLK_PLL_E_MUX 272
-#define TEGRA124_CLK_PLLD_DSI 273
+#define TEGRA124_CLK_PLL_D_DSI_OUT 273
/* 274 */
/* 275 */
/* 276 */
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
new file mode 100644
index 000000000000..df017fdfb44e
--- /dev/null
+++ b/include/dt-bindings/dma/jz4780-dma.h
@@ -0,0 +1,49 @@
+#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
+#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
+
+/*
+ * Request type numbers for the JZ4780 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define JZ4780_DMA_I2S1_TX 0x4
+#define JZ4780_DMA_I2S1_RX 0x5
+#define JZ4780_DMA_I2S0_TX 0x6
+#define JZ4780_DMA_I2S0_RX 0x7
+#define JZ4780_DMA_AUTO 0x8
+#define JZ4780_DMA_SADC_RX 0x9
+#define JZ4780_DMA_UART4_TX 0xc
+#define JZ4780_DMA_UART4_RX 0xd
+#define JZ4780_DMA_UART3_TX 0xe
+#define JZ4780_DMA_UART3_RX 0xf
+#define JZ4780_DMA_UART2_TX 0x10
+#define JZ4780_DMA_UART2_RX 0x11
+#define JZ4780_DMA_UART1_TX 0x12
+#define JZ4780_DMA_UART1_RX 0x13
+#define JZ4780_DMA_UART0_TX 0x14
+#define JZ4780_DMA_UART0_RX 0x15
+#define JZ4780_DMA_SSI0_TX 0x16
+#define JZ4780_DMA_SSI0_RX 0x17
+#define JZ4780_DMA_SSI1_TX 0x18
+#define JZ4780_DMA_SSI1_RX 0x19
+#define JZ4780_DMA_MSC0_TX 0x1a
+#define JZ4780_DMA_MSC0_RX 0x1b
+#define JZ4780_DMA_MSC1_TX 0x1c
+#define JZ4780_DMA_MSC1_RX 0x1d
+#define JZ4780_DMA_MSC2_TX 0x1e
+#define JZ4780_DMA_MSC2_RX 0x1f
+#define JZ4780_DMA_PCM0_TX 0x20
+#define JZ4780_DMA_PCM0_RX 0x21
+#define JZ4780_DMA_SMB0_TX 0x24
+#define JZ4780_DMA_SMB0_RX 0x25
+#define JZ4780_DMA_SMB1_TX 0x26
+#define JZ4780_DMA_SMB1_RX 0x27
+#define JZ4780_DMA_SMB2_TX 0x28
+#define JZ4780_DMA_SMB2_RX 0x29
+#define JZ4780_DMA_SMB3_TX 0x2a
+#define JZ4780_DMA_SMB3_RX 0x2b
+#define JZ4780_DMA_SMB4_TX 0x2c
+#define JZ4780_DMA_SMB4_RX 0x2d
+#define JZ4780_DMA_DES_TX 0x2e
+#define JZ4780_DMA_DES_RX 0x2f
+
+#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/gpio/meson8b-gpio.h b/include/dt-bindings/gpio/meson8b-gpio.h
new file mode 100644
index 000000000000..c38cb20d7182
--- /dev/null
+++ b/include/dt-bindings/gpio/meson8b-gpio.h
@@ -0,0 +1,32 @@
+/*
+ * GPIO definitions for Amlogic Meson8b SoCs
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_MESON8B_GPIO_H
+#define _DT_BINDINGS_MESON8B_GPIO_H
+
+#include <dt-bindings/gpio/meson8-gpio.h>
+
+/* GPIO Bank DIF */
+#define DIF_0_P 120
+#define DIF_0_N 121
+#define DIF_1_P 122
+#define DIF_1_N 123
+#define DIF_2_P 124
+#define DIF_2_N 125
+#define DIF_3_P 126
+#define DIF_3_N 127
+#define DIF_4_P 128
+#define DIF_4_N 129
+
+#endif /* _DT_BINDINGS_MESON8B_GPIO_H */
diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h
new file mode 100644
index 000000000000..79fcef72ef57
--- /dev/null
+++ b/include/dt-bindings/leds/common.h
@@ -0,0 +1,21 @@
+/*
+ * This header provides macros for the common LEDs device tree bindings.
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ */
+
+#ifndef __DT_BINDINGS_LEDS_H__
+#define __DT_BINDINGS_LEDS_H
+
+/* External trigger type */
+#define LEDS_TRIG_TYPE_EDGE 0
+#define LEDS_TRIG_TYPE_LEVEL 1
+
+/* Boost modes */
+#define LEDS_BOOST_OFF 0
+#define LEDS_BOOST_ADAPTIVE 1
+#define LEDS_BOOST_FIXED 2
+
+#endif /* __DT_BINDINGS_LEDS_H */
diff --git a/include/dt-bindings/media/omap3-isp.h b/include/dt-bindings/media/omap3-isp.h
new file mode 100644
index 000000000000..b18c60e468c7
--- /dev/null
+++ b/include/dt-bindings/media/omap3-isp.h
@@ -0,0 +1,22 @@
+/*
+ * include/dt-bindings/media/omap3-isp.h
+ *
+ * Copyright (C) 2015 Sakari Ailus
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __DT_BINDINGS_OMAP3_ISP_H__
+#define __DT_BINDINGS_OMAP3_ISP_H__
+
+#define OMAP3ISP_PHY_TYPE_COMPLEX_IO 0
+#define OMAP3ISP_PHY_TYPE_CSIPHY 1
+
+#endif /* __DT_BINDINGS_OMAP3_ISP_H__ */
diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h
new file mode 100644
index 000000000000..6298fec00685
--- /dev/null
+++ b/include/dt-bindings/media/xilinx-vip.h
@@ -0,0 +1,39 @@
+/*
+ * Xilinx Video IP Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__
+#define __DT_BINDINGS_MEDIA_XILINX_VIP_H__
+
+/*
+ * Video format codes as defined in "AXI4-Stream Video IP and System Design
+ * Guide".
+ */
+#define XVIP_VF_YUV_422 0
+#define XVIP_VF_YUV_444 1
+#define XVIP_VF_RBG 2
+#define XVIP_VF_YUV_420 3
+#define XVIP_VF_YUVA_422 4
+#define XVIP_VF_YUVA_444 5
+#define XVIP_VF_RGBA 6
+#define XVIP_VF_YUVA_420 7
+#define XVIP_VF_YUVD_422 8
+#define XVIP_VF_YUVD_444 9
+#define XVIP_VF_RGBD 10
+#define XVIP_VF_YUVD_420 11
+#define XVIP_VF_MONO_SENSOR 12
+#define XVIP_VF_CUSTOM2 13
+#define XVIP_VF_CUSTOM3 14
+#define XVIP_VF_CUSTOM4 15
+
+#endif /* __DT_BINDINGS_MEDIA_XILINX_VIP_H__ */
diff --git a/include/linux/mfd/arizona/gpio.h b/include/dt-bindings/mfd/arizona.h
index d2146bb74f89..c7af7c7ef793 100644
--- a/include/linux/mfd/arizona/gpio.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -1,7 +1,7 @@
/*
- * GPIO configuration for Arizona devices
+ * Device Tree defines for Arizona devices
*
- * Copyright 2013 Wolfson Microelectronics. PLC.
+ * Copyright 2015 Cirrus Logic Inc.
*
* Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
*
@@ -10,9 +10,10 @@
* published by the Free Software Foundation.
*/
-#ifndef _ARIZONA_GPIO_H
-#define _ARIZONA_GPIO_H
+#ifndef _DT_BINDINGS_MFD_ARIZONA_H
+#define _DT_BINDINGS_MFD_ARIZONA_H
+/* GPIO Function Definitions */
#define ARIZONA_GP_FN_TXLRCLK 0x00
#define ARIZONA_GP_FN_GPIO 0x01
#define ARIZONA_GP_FN_IRQ1 0x02
@@ -61,36 +62,32 @@
#define ARIZONA_GP_FN_SYSCLK_ENA_STATUS 0x4B
#define ARIZONA_GP_FN_ASYNCCLK_ENA_STATUS 0x4C
-#define ARIZONA_GPN_DIR 0x8000 /* GPN_DIR */
-#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
-#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
-#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */
-#define ARIZONA_GPN_PU 0x4000 /* GPN_PU */
-#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */
-#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */
-#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */
-#define ARIZONA_GPN_PD 0x2000 /* GPN_PD */
-#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */
-#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */
-#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */
-#define ARIZONA_GPN_LVL 0x0800 /* GPN_LVL */
-#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */
-#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */
-#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */
-#define ARIZONA_GPN_POL 0x0400 /* GPN_POL */
-#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */
-#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */
-#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */
-#define ARIZONA_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */
-#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
-#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
-#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
-#define ARIZONA_GPN_DB 0x0100 /* GPN_DB */
-#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */
-#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */
-#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */
-#define ARIZONA_GPN_FN_MASK 0x007F /* GPN_DB */
-#define ARIZONA_GPN_FN_SHIFT 0 /* GPN_DB */
-#define ARIZONA_GPN_FN_WIDTH 7 /* GPN_DB */
+/* GPIO Configuration Bits */
+#define ARIZONA_GPN_DIR 0x8000
+#define ARIZONA_GPN_PU 0x4000
+#define ARIZONA_GPN_PD 0x2000
+#define ARIZONA_GPN_LVL 0x0800
+#define ARIZONA_GPN_POL 0x0400
+#define ARIZONA_GPN_OP_CFG 0x0200
+#define ARIZONA_GPN_DB 0x0100
+
+/* Provide some defines for the most common configs */
+#define ARIZONA_GP_DEFAULT 0xffffffff
+#define ARIZONA_GP_OUTPUT (ARIZONA_GP_FN_GPIO)
+#define ARIZONA_GP_INPUT (ARIZONA_GP_FN_GPIO | \
+ ARIZONA_GPN_DIR)
+
+#define ARIZONA_32KZ_MCLK1 1
+#define ARIZONA_32KZ_MCLK2 2
+#define ARIZONA_32KZ_NONE 3
+
+#define ARIZONA_DMIC_MICVDD 0
+#define ARIZONA_DMIC_MICBIAS1 1
+#define ARIZONA_DMIC_MICBIAS2 2
+#define ARIZONA_DMIC_MICBIAS3 3
+
+#define ARIZONA_INMODE_DIFF 0
+#define ARIZONA_INMODE_SE 1
+#define ARIZONA_INMODE_DMIC 2
#endif
diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h
index 388a6f3d6165..13a9d4bf2662 100644
--- a/include/dt-bindings/mfd/qcom-rpm.h
+++ b/include/dt-bindings/mfd/qcom-rpm.h
@@ -141,6 +141,12 @@
#define QCOM_RPM_SYS_FABRIC_MODE 131
#define QCOM_RPM_USB_OTG_SWITCH 132
#define QCOM_RPM_VDDMIN_GPIO 133
+#define QCOM_RPM_NSS_FABRIC_0_CLK 134
+#define QCOM_RPM_NSS_FABRIC_1_CLK 135
+#define QCOM_RPM_SMB208_S1a 136
+#define QCOM_RPM_SMB208_S1b 137
+#define QCOM_RPM_SMB208_S2a 138
+#define QCOM_RPM_SMB208_S2b 139
/*
* Constants used to select force mode for regulators.
diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h
new file mode 100644
index 000000000000..1198f4541327
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt65xx.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_MT65XX_H
+#define _DT_BINDINGS_PINCTRL_MT65XX_H
+
+#define MTK_PIN_NO(x) ((x) << 8)
+#define MTK_GET_PIN_NO(x) ((x) >> 8)
+#define MTK_GET_PIN_FUNC(x) ((x) & 0xf)
+
+#define MTK_PUPD_SET_R1R0_00 100
+#define MTK_PUPD_SET_R1R0_01 101
+#define MTK_PUPD_SET_R1R0_10 102
+#define MTK_PUPD_SET_R1R0_11 103
+
+#define MTK_DRIVE_2mA 2
+#define MTK_DRIVE_4mA 4
+#define MTK_DRIVE_6mA 6
+#define MTK_DRIVE_8mA 8
+#define MTK_DRIVE_10mA 10
+#define MTK_DRIVE_12mA 12
+#define MTK_DRIVE_14mA 14
+#define MTK_DRIVE_16mA 16
+#define MTK_DRIVE_20mA 20
+#define MTK_DRIVE_24mA 24
+#define MTK_DRIVE_28mA 28
+#define MTK_DRIVE_32mA 32
+
+#endif /* _DT_BINDINGS_PINCTRL_MT65XX_H */
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index fa74d7cc960c..aafa76cb569d 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -48,6 +48,14 @@
#define PM8058_GPIO_L5 6
#define PM8058_GPIO_L2 7
+/*
+ * Note: PM8916 GPIO1 and GPIO2 are supporting
+ * only L2(1.15V) and L5(1.8V) options
+ */
+#define PM8916_GPIO_VPH 0
+#define PM8916_GPIO_L2 2
+#define PM8916_GPIO_L5 3
+
#define PM8917_GPIO_VPH 0
#define PM8917_GPIO_S4 2
#define PM8917_GPIO_L15 3
@@ -115,6 +123,13 @@
#define PM8058_GPIO39_MP3_CLK PMIC_GPIO_FUNC_FUNC1
#define PM8058_GPIO40_EXT_BB_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO1_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO1_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+#define PM8916_GPIO2_DIV_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO2_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2
+#define PM8916_GPIO3_KEYP_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO4_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+
#define PM8917_GPIO9_18_KEYP_DRV PMIC_GPIO_FUNC_FUNC1
#define PM8917_GPIO20_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
#define PM8917_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index d2c7dabe3223..c10205491f8d 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -10,6 +10,10 @@
#define PM8841_MPP_VPH 0
#define PM8841_MPP_S3 2
+#define PM8916_MPP_VPH 0
+#define PM8916_MPP_L2 2
+#define PM8916_MPP_L5 3
+
#define PM8941_MPP_VPH 0
#define PM8941_MPP_L1 1
#define PM8941_MPP_S3 2
diff --git a/include/dt-bindings/reset/qcom,gcc-msm8916.h b/include/dt-bindings/reset/qcom,gcc-msm8916.h
new file mode 100644
index 000000000000..3d90410f09c7
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-msm8916.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_MSM_GCC_8916_H
+#define _DT_BINDINGS_RESET_MSM_GCC_8916_H
+
+#define GCC_BLSP1_BCR 0
+#define GCC_BLSP1_QUP1_BCR 1
+#define GCC_BLSP1_UART1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_QUP4_BCR 6
+#define GCC_BLSP1_QUP5_BCR 7
+#define GCC_BLSP1_QUP6_BCR 8
+#define GCC_IMEM_BCR 9
+#define GCC_SMMU_BCR 10
+#define GCC_APSS_TCU_BCR 11
+#define GCC_SMMU_XPU_BCR 12
+#define GCC_PCNOC_TBU_BCR 13
+#define GCC_PRNG_BCR 14
+#define GCC_BOOT_ROM_BCR 15
+#define GCC_CRYPTO_BCR 16
+#define GCC_SEC_CTRL_BCR 17
+#define GCC_AUDIO_CORE_BCR 18
+#define GCC_ULT_AUDIO_BCR 19
+#define GCC_DEHR_BCR 20
+#define GCC_SYSTEM_NOC_BCR 21
+#define GCC_PCNOC_BCR 22
+#define GCC_TCSR_BCR 23
+#define GCC_QDSS_BCR 24
+#define GCC_DCD_BCR 25
+#define GCC_MSG_RAM_BCR 26
+#define GCC_MPM_BCR 27
+#define GCC_SPMI_BCR 28
+#define GCC_SPDM_BCR 29
+#define GCC_MM_SPDM_BCR 30
+#define GCC_BIMC_BCR 31
+#define GCC_RBCPR_BCR 32
+#define GCC_TLMM_BCR 33
+#define GCC_USB_HS_BCR 34
+#define GCC_USB2A_PHY_BCR 35
+#define GCC_SDCC1_BCR 36
+#define GCC_SDCC2_BCR 37
+#define GCC_PDM_BCR 38
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 39
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49
+#define GCC_MMSS_BCR 50
+#define GCC_VENUS0_BCR 51
+#define GCC_MDSS_BCR 52
+#define GCC_CAMSS_PHY0_BCR 53
+#define GCC_CAMSS_CSI0_BCR 54
+#define GCC_CAMSS_CSI0PHY_BCR 55
+#define GCC_CAMSS_CSI0RDI_BCR 56
+#define GCC_CAMSS_CSI0PIX_BCR 57
+#define GCC_CAMSS_PHY1_BCR 58
+#define GCC_CAMSS_CSI1_BCR 59
+#define GCC_CAMSS_CSI1PHY_BCR 60
+#define GCC_CAMSS_CSI1RDI_BCR 61
+#define GCC_CAMSS_CSI1PIX_BCR 62
+#define GCC_CAMSS_ISPIF_BCR 63
+#define GCC_CAMSS_CCI_BCR 64
+#define GCC_CAMSS_MCLK0_BCR 65
+#define GCC_CAMSS_MCLK1_BCR 66
+#define GCC_CAMSS_GP0_BCR 67
+#define GCC_CAMSS_GP1_BCR 68
+#define GCC_CAMSS_TOP_BCR 69
+#define GCC_CAMSS_MICRO_BCR 70
+#define GCC_CAMSS_JPEG_BCR 71
+#define GCC_CAMSS_VFE_BCR 72
+#define GCC_CAMSS_CSI_VFE0_BCR 73
+#define GCC_OXILI_BCR 74
+#define GCC_GMEM_BCR 75
+#define GCC_CAMSS_AHB_BCR 76
+#define GCC_MDP_TBU_BCR 77
+#define GCC_GFX_TBU_BCR 78
+#define GCC_GFX_TCU_BCR 79
+#define GCC_MSS_TBU_AXI_BCR 80
+#define GCC_MSS_TBU_GSS_AXI_BCR 81
+#define GCC_MSS_TBU_Q6_AXI_BCR 82
+#define GCC_GTCU_AHB_BCR 83
+#define GCC_SMMU_CFG_BCR 84
+#define GCC_VFE_TBU_BCR 85
+#define GCC_VENUS_TBU_BCR 86
+#define GCC_JPEG_TBU_BCR 87
+#define GCC_PRONTO_TBU_BCR 88
+#define GCC_SMMU_CATS_BCR 89
+
+#endif
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
index 220f14338895..ee884168989f 100644
--- a/include/linux/a.out.h
+++ b/include/linux/a.out.h
@@ -4,44 +4,6 @@
#include <uapi/linux/a.out.h>
#ifndef __ASSEMBLY__
-#if defined (M_OLDSUN2)
-#else
-#endif
-#if defined (M_68010)
-#else
-#endif
-#if defined (M_68020)
-#else
-#endif
-#if defined (M_SPARC)
-#else
-#endif
-#if !defined (N_MAGIC)
-#endif
-#if !defined (N_BADMAG)
-#endif
-#if !defined (N_TXTOFF)
-#endif
-#if !defined (N_DATOFF)
-#endif
-#if !defined (N_TRELOFF)
-#endif
-#if !defined (N_DRELOFF)
-#endif
-#if !defined (N_SYMOFF)
-#endif
-#if !defined (N_STROFF)
-#endif
-#if !defined (N_TXTADDR)
-#endif
-#if defined(vax) || defined(hp300) || defined(pyr)
-#endif
-#ifdef sony
-#endif /* Sony. */
-#ifdef is68k
-#endif
-#if defined(m68k) && defined(PORTAR)
-#endif
#ifdef linux
#include <asm/page.h>
#if defined(__i386__) || defined(__mc68000__)
@@ -51,34 +13,5 @@
#endif
#endif
#endif
-#ifndef N_DATADDR
-#endif
-#if !defined (N_BSSADDR)
-#endif
-#if !defined (N_NLIST_DECLARED)
-#endif /* no N_NLIST_DECLARED. */
-#if !defined (N_UNDF)
-#endif
-#if !defined (N_ABS)
-#endif
-#if !defined (N_TEXT)
-#endif
-#if !defined (N_DATA)
-#endif
-#if !defined (N_BSS)
-#endif
-#if !defined (N_FN)
-#endif
-#if !defined (N_EXT)
-#endif
-#if !defined (N_TYPE)
-#endif
-#if !defined (N_STAB)
-#endif
-#if !defined (N_RELOCATION_INFO_DECLARED)
-#ifdef NS32K
-#else
-#endif
-#endif /* no N_RELOCATION_INFO_DECLARED. */
#endif /*__ASSEMBLY__ */
#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 24c7aa8b1d20..e4da5e35e29c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -53,10 +53,16 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
return adev ? adev->handle : NULL;
}
-#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion)
-#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev)
+#define ACPI_COMPANION(dev) acpi_node((dev)->fwnode)
+#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
+ acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+static inline bool has_acpi_companion(struct device *dev)
+{
+ return is_acpi_node(dev->fwnode);
+}
+
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
@@ -73,6 +79,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOAPIC,
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
+ ACPI_IRQ_MODEL_GIC,
ACPI_IRQ_MODEL_COUNT
};
@@ -146,9 +153,14 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
+#ifndef PHYS_CPUID_INVALID
+typedef u32 phys_cpuid_t;
+#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
+#endif
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
@@ -471,6 +483,11 @@ static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
return NULL;
}
+static inline bool has_acpi_companion(struct device *dev)
+{
+ return false;
+}
+
static inline const char *acpi_dev_name(struct acpi_device *adev)
{
return NULL;
diff --git a/include/linux/acpi_irq.h b/include/linux/acpi_irq.h
new file mode 100644
index 000000000000..f10c87265855
--- /dev/null
+++ b/include/linux/acpi_irq.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_ACPI_IRQ_H
+#define _LINUX_ACPI_IRQ_H
+
+#include <linux/irq.h>
+
+#ifndef acpi_irq_init
+static inline void acpi_irq_init(void) { }
+#endif
+
+#endif /* _LINUX_ACPI_IRQ_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index d9c92daa3944..9eb42dbc5582 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -1,86 +1,23 @@
#ifndef __LINUX__AIO_H
#define __LINUX__AIO_H
-#include <linux/list.h>
-#include <linux/workqueue.h>
#include <linux/aio_abi.h>
-#include <linux/uio.h>
-#include <linux/rcupdate.h>
-
-#include <linux/atomic.h>
struct kioctx;
struct kiocb;
+struct mm_struct;
#define KIOCB_KEY 0
-/*
- * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
- * cancelled or completed (this makes a certain amount of sense because
- * successful cancellation - io_cancel() - does deliver the completion to
- * userspace).
- *
- * And since most things don't implement kiocb cancellation and we'd really like
- * kiocb completion to be lockless when possible, we use ki_cancel to
- * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
- * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
- */
-#define KIOCB_CANCELLED ((void *) (~0ULL))
-
typedef int (kiocb_cancel_fn)(struct kiocb *);
-struct kiocb {
- struct file *ki_filp;
- struct kioctx *ki_ctx; /* NULL for sync ops */
- kiocb_cancel_fn *ki_cancel;
- void *private;
-
- union {
- void __user *user;
- struct task_struct *tsk;
- } ki_obj;
-
- __u64 ki_user_data; /* user's data for completion */
- loff_t ki_pos;
- size_t ki_nbytes; /* copy of iocb->aio_nbytes */
-
- struct list_head ki_list; /* the aio core uses this
- * for cancellation */
-
- /*
- * If the aio_resfd field of the userspace iocb is not zero,
- * this is the underlying eventfd context to deliver events to.
- */
- struct eventfd_ctx *ki_eventfd;
-};
-
-static inline bool is_sync_kiocb(struct kiocb *kiocb)
-{
- return kiocb->ki_ctx == NULL;
-}
-
-static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
-{
- *kiocb = (struct kiocb) {
- .ki_ctx = NULL,
- .ki_filp = filp,
- .ki_obj.tsk = current,
- };
-}
-
/* prototypes */
#ifdef CONFIG_AIO
-extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
-extern void aio_complete(struct kiocb *iocb, long res, long res2);
-struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
extern long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user *__user *iocbpp, bool compat);
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
#else
-static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
-static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
-struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
static inline long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user * __user *iocbpp,
@@ -89,11 +26,6 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
-static inline struct kiocb *list_kiocb(struct list_head *h)
-{
- return list_entry(h, struct kiocb, ki_list);
-}
-
/* for sysctl: */
extern unsigned long aio_nr;
extern unsigned long aio_max_nr;
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
index 79d6edf446d5..521ec1f2e6bc 100644
--- a/include/linux/arm-cci.h
+++ b/include/linux/arm-cci.h
@@ -24,16 +24,22 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <asm/arm-cci.h>
+
struct device_node;
#ifdef CONFIG_ARM_CCI
extern bool cci_probed(void);
+#else
+static inline bool cci_probed(void) { return false; }
+#endif
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
extern int cci_ace_get_port(struct device_node *dn);
extern int cci_disable_port_by_cpu(u64 mpidr);
extern int __cci_control_port_by_device(struct device_node *dn, bool enable);
extern int __cci_control_port_by_index(u32 port, bool enable);
#else
-static inline bool cci_probed(void) { return false; }
static inline int cci_ace_get_port(struct device_node *dn)
{
return -ENODEV;
@@ -49,6 +55,7 @@ static inline int __cci_control_port_by_index(u32 port, bool enable)
return -ENODEV;
}
#endif
+
#define cci_disable_port_by_device(dev) \
__cci_control_port_by_device(dev, false)
#define cci_enable_port_by_device(dev) \
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 179b38ffd351..388574ea38ed 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -60,12 +60,15 @@ struct dma_chan_ref {
* dependency chain
* @ASYNC_TX_FENCE: specify that the next operation in the dependency
* chain uses this operation's result as an input
+ * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
+ * input data. Required for rmw case.
*/
enum async_tx_flags {
ASYNC_TX_XOR_ZERO_DST = (1 << 0),
ASYNC_TX_XOR_DROP_DST = (1 << 1),
ASYNC_TX_ACK = (1 << 2),
ASYNC_TX_FENCE = (1 << 3),
+ ASYNC_TX_PQ_XOR_DST = (1 << 4),
};
/**
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index ee59ffe99922..b12b07e75929 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -1,7 +1,4 @@
/*
- * Copyright (C) 2005, Broadcom Corporation
- * Copyright (C) 2006, Felix Fietkau <nbd@openwrt.org>
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -14,8 +11,24 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#ifdef CONFIG_BCM47XX
int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
int bcm47xx_nvram_gpio_pin(const char *name);
+#else
+static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
+{
+ return -ENOTSUPP;
+};
+static inline int bcm47xx_nvram_getenv(const char *name, char *val,
+ size_t val_len)
+{
+ return -ENOTSUPP;
+};
+static inline int bcm47xx_nvram_gpio_pin(const char *name)
+{
+ return -ENOTSUPP;
+};
+#endif
#endif /* __BCM47XX_NVRAM_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 994739da827f..e34f906647d3 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -434,6 +434,27 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
return bcma_find_core_unit(bus, coreid, 0);
}
+#ifdef CONFIG_BCMA_HOST_PCI
+extern void bcma_host_pci_up(struct bcma_bus *bus);
+extern void bcma_host_pci_down(struct bcma_bus *bus);
+extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+ struct bcma_device *core, bool enable);
+#else
+static inline void bcma_host_pci_up(struct bcma_bus *bus)
+{
+}
+static inline void bcma_host_pci_down(struct bcma_bus *bus)
+{
+}
+static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+ struct bcma_device *core, bool enable)
+{
+ if (bus->hosttype == BCMA_HOSTTYPE_PCI)
+ return -ENOTSUPP;
+ return 0;
+}
+#endif
+
extern bool bcma_core_is_enabled(struct bcma_device *core);
extern void bcma_core_disable(struct bcma_device *core, u32 flags);
extern int bcma_core_enable(struct bcma_device *core, u32 flags);
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index db6fa217f98b..6cceedf65ca2 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -663,14 +663,6 @@ struct bcma_drv_cc_b {
#define bcma_cc_maskset32(cc, offset, mask, set) \
bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
-extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
-extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
-
-extern void bcma_chipco_suspend(struct bcma_drv_cc *cc);
-extern void bcma_chipco_resume(struct bcma_drv_cc *cc);
-
-void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
-
extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
@@ -690,9 +682,6 @@ u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value);
/* PMU support */
-extern void bcma_pmu_init(struct bcma_drv_cc *cc);
-extern void bcma_pmu_early_init(struct bcma_drv_cc *cc);
-
extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset,
u32 value);
extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset,
diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h
index 4dd1f33e36a2..4354d4ea6713 100644
--- a/include/linux/bcma/bcma_driver_gmac_cmn.h
+++ b/include/linux/bcma/bcma_driver_gmac_cmn.h
@@ -91,10 +91,4 @@ struct bcma_drv_gmac_cmn {
#define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val)
#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val)
-#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
-extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
-#else
-static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { }
-#endif
-
#endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index 0b3b32aeeb8a..8eea7f9e33b4 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -39,21 +39,6 @@ struct bcma_drv_mips {
u8 early_setup_done:1;
};
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
-extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
-
-extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
-#else
-static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
-static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
-
-static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
-{
- return 0;
-}
-#endif
-
extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
#endif /* LINUX_BCMA_DRIVER_MIPS_H_ */
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 3f809ae372c4..5ba6918ca20b 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -238,13 +238,13 @@ struct bcma_drv_pci {
#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
-extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
-extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
-extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
- struct bcma_device *core, bool enable);
-extern void bcma_core_pci_up(struct bcma_bus *bus);
-extern void bcma_core_pci_down(struct bcma_bus *bus);
+#ifdef CONFIG_BCMA_DRIVER_PCI
extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
+#else
+static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
+{
+}
+#endif
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h
index 5988b05781c3..31e6d17ab798 100644
--- a/include/linux/bcma/bcma_driver_pcie2.h
+++ b/include/linux/bcma/bcma_driver_pcie2.h
@@ -143,6 +143,8 @@
struct bcma_drv_pcie2 {
struct bcma_device *core;
+
+ u16 reqsize;
};
#define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset)
@@ -153,6 +155,4 @@ struct bcma_drv_pcie2 {
#define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set)
#define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask)
-void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
-
#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index dbfbf4990005..ea17cca9e685 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -172,12 +172,8 @@ extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int
extern int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
-#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
-#define BITMAP_LAST_WORD_MASK(nbits) \
-( \
- ((nbits) % BITS_PER_LONG) ? \
- (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
-)
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
@@ -287,16 +283,16 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_empty(src, nbits);
+
+ return find_first_bit(src, nbits) == nbits;
}
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_full(src, nbits);
+
+ return find_first_zero_bit(src, nbits) == nbits;
}
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 5d858e02997f..297f5bda4fdf 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -218,9 +218,9 @@ static inline unsigned long __ffs64(u64 word)
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
- * @size: The maximum size to search
+ * @size: The number of bits to search
*
- * Returns the bit number of the first set bit, or size.
+ * Returns the bit number of the last set bit, or size.
*/
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 7aec86127335..2056a99b92f8 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -13,7 +13,7 @@ struct blk_mq_cpu_notifier {
};
struct blk_mq_ctxmap {
- unsigned int map_size;
+ unsigned int size;
unsigned int bits_per_word;
struct blk_align_bitmap *map;
};
@@ -164,6 +164,8 @@ enum {
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
@@ -218,6 +220,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void *priv);
@@ -227,7 +230,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q);
/*
* Driver command data is immediately after the request. So subtract request
- * size to get back to the original request.
+ * size to get back to the original request, add request size to get the PDU.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
@@ -235,7 +238,7 @@ static inline struct request *blk_mq_rq_from_pdu(void *pdu)
}
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
- return (void *) rq + sizeof(*rq);
+ return rq + 1;
}
#define queue_for_each_hw_ctx(q, hctx, i) \
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index a1b25e35ea5f..b7299febc4b4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -220,7 +220,7 @@ enum rq_flag_bits {
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
- (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
+ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bbfceb756452..d5cda067115a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -32,23 +32,19 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
- struct bpf_map_ops *ops;
+ const struct bpf_map_ops *ops;
struct work_struct work;
};
struct bpf_map_type_list {
struct list_head list_node;
- struct bpf_map_ops *ops;
+ const struct bpf_map_ops *ops;
enum bpf_map_type type;
};
-void bpf_register_map_type(struct bpf_map_type_list *tl);
-void bpf_map_put(struct bpf_map *map);
-struct bpf_map *bpf_map_get(struct fd f);
-
/* function argument constraints */
enum bpf_arg_type {
- ARG_ANYTHING = 0, /* any argument is ok */
+ ARG_DONTCARE = 0, /* unused argument in helper function */
/* the following constraints used to prototype
* bpf_map_lookup/update/delete_elem() functions
@@ -62,6 +58,9 @@ enum bpf_arg_type {
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+
+ ARG_PTR_TO_CTX, /* pointer to context */
+ ARG_ANYTHING, /* any (initialized) argument is ok */
};
/* type of values returned from helper functions */
@@ -105,41 +104,61 @@ struct bpf_verifier_ops {
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+
+ u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off,
+ struct bpf_insn *insn);
};
struct bpf_prog_type_list {
struct list_head list_node;
- struct bpf_verifier_ops *ops;
+ const struct bpf_verifier_ops *ops;
enum bpf_prog_type type;
};
-void bpf_register_prog_type(struct bpf_prog_type_list *tl);
-
struct bpf_prog;
struct bpf_prog_aux {
atomic_t refcnt;
- bool is_gpl_compatible;
- enum bpf_prog_type prog_type;
- struct bpf_verifier_ops *ops;
- struct bpf_map **used_maps;
u32 used_map_cnt;
+ const struct bpf_verifier_ops *ops;
+ struct bpf_map **used_maps;
struct bpf_prog *prog;
struct work_struct work;
};
#ifdef CONFIG_BPF_SYSCALL
-void bpf_prog_put(struct bpf_prog *prog);
-#else
-static inline void bpf_prog_put(struct bpf_prog *prog) {}
-#endif
+void bpf_register_prog_type(struct bpf_prog_type_list *tl);
+void bpf_register_map_type(struct bpf_map_type_list *tl);
+
struct bpf_prog *bpf_prog_get(u32 ufd);
+void bpf_prog_put(struct bpf_prog *prog);
+
+struct bpf_map *bpf_map_get(struct fd f);
+void bpf_map_put(struct bpf_map *map);
+
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+#else
+static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
+{
+}
+
+static inline struct bpf_prog *bpf_prog_get(u32 ufd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_prog_put(struct bpf_prog *prog)
+{
+}
+#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
-extern struct bpf_func_proto bpf_map_lookup_elem_proto;
-extern struct bpf_func_proto bpf_map_update_elem_proto;
-extern struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
+extern const struct bpf_func_proto bpf_map_update_elem_proto;
+extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+
+extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
+extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 7ccd928cc1f2..ae2982c0f7a6 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -11,6 +11,7 @@
#define PHY_ID_BCM5421 0x002060e0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
+#define PHY_ID_BCM54616S 0x03625d10
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
@@ -19,6 +20,7 @@
#define PHY_ID_BCM7425 0x03625e60
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7439 0x600d8480
+#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
#define PHY_BCM_OUI_MASK 0xfffffc00
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index c05ff0f9f9a5..c3a9c8fc60fa 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -61,6 +61,8 @@ struct can_priv {
char tx_led_trig_name[CAN_LED_NAME_SZ];
struct led_trigger *rx_led_trig;
char rx_led_trig_name[CAN_LED_NAME_SZ];
+ struct led_trigger *rxtx_led_trig;
+ char rxtx_led_trig_name[CAN_LED_NAME_SZ];
#endif
};
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
index e0475c5cbb92..146de4506d21 100644
--- a/include/linux/can/led.h
+++ b/include/linux/can/led.h
@@ -21,8 +21,10 @@ enum can_led_event {
#ifdef CONFIG_CAN_LEDS
-/* keep space for interface name + "-tx"/"-rx" suffix and null terminator */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 4)
+/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
+ * suffix and null terminator
+ */
+#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
void can_led_event(struct net_device *netdev, enum can_led_event event);
void devm_can_led_init(struct net_device *netdev);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index cc00d15c6107..b6a52a4b457a 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -44,16 +44,11 @@ static inline void can_skb_reserve(struct sk_buff *skb)
skb_reserve(skb, sizeof(struct can_skb_priv));
}
-static inline void can_skb_destructor(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
if (sk) {
sock_hold(sk);
- skb->destructor = can_skb_destructor;
+ skb->destructor = sock_efree;
skb->sk = sk;
}
}
diff --git a/include/linux/capability.h b/include/linux/capability.h
index aa93e5ef594c..af9f0b9e80e6 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -205,6 +205,7 @@ static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
cap_intersect(permitted, __cap_nfsd_set));
}
+#ifdef CONFIG_MULTIUSER
extern bool has_capability(struct task_struct *t, int cap);
extern bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap);
@@ -213,6 +214,34 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
+#else
+static inline bool has_capability(struct task_struct *t, int cap)
+{
+ return true;
+}
+static inline bool has_ns_capability(struct task_struct *t,
+ struct user_namespace *ns, int cap)
+{
+ return true;
+}
+static inline bool has_capability_noaudit(struct task_struct *t, int cap)
+{
+ return true;
+}
+static inline bool has_ns_capability_noaudit(struct task_struct *t,
+ struct user_namespace *ns, int cap)
+{
+ return true;
+}
+static inline bool capable(int cap)
+{
+ return true;
+}
+static inline bool ns_capable(struct user_namespace *ns, int cap)
+{
+ return true;
+}
+#endif /* CONFIG_MULTIUSER */
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 71e05bbf8ceb..4763ad64e832 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -50,6 +50,19 @@
#define CEPH_FEATURE_MDS_INLINE_DATA (1ULL<<40)
#define CEPH_FEATURE_CRUSH_TUNABLES3 (1ULL<<41)
#define CEPH_FEATURE_OSD_PRIMARY_AFFINITY (1ULL<<41) /* overlap w/ tunables3 */
+#define CEPH_FEATURE_MSGR_KEEPALIVE2 (1ULL<<42)
+#define CEPH_FEATURE_OSD_POOLRESEND (1ULL<<43)
+#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 (1ULL<<44)
+#define CEPH_FEATURE_OSD_SET_ALLOC_HINT (1ULL<<45)
+#define CEPH_FEATURE_OSD_FADVISE_FLAGS (1ULL<<46)
+#define CEPH_FEATURE_OSD_REPOP (1ULL<<46) /* overlap with fadvise */
+#define CEPH_FEATURE_OSD_OBJECT_DIGEST (1ULL<<46) /* overlap with fadvise */
+#define CEPH_FEATURE_OSD_TRANSACTION_MAY_LAYOUT (1ULL<<46) /* overlap w/ fadvise */
+#define CEPH_FEATURE_MDS_QUOTA (1ULL<<47)
+#define CEPH_FEATURE_CRUSH_V4 (1ULL<<48) /* straw2 buckets */
+#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
+// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
+#define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */
/*
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -93,7 +106,8 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_EXPORT_PEER | \
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
- CEPH_FEATURE_OSD_PRIMARY_AFFINITY)
+ CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
+ CEPH_FEATURE_CRUSH_V4)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 31eb03d0c766..d7d072a25c27 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -323,6 +323,7 @@ enum {
CEPH_MDS_OP_MKSNAP = 0x01400,
CEPH_MDS_OP_RMSNAP = 0x01401,
CEPH_MDS_OP_LSSNAP = 0x00402,
+ CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
extern const char *ceph_mds_op_name(int op);
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
index 1df086d7882d..29cf897cc5cd 100644
--- a/include/linux/ceph/debugfs.h
+++ b/include/linux/ceph/debugfs.h
@@ -7,13 +7,7 @@
#define CEPH_DEFINE_SHOW_FUNC(name) \
static int name##_open(struct inode *inode, struct file *file) \
{ \
- struct seq_file *sf; \
- int ret; \
- \
- ret = single_open(file, name, NULL); \
- sf = file->private_data; \
- sf->private = inode->i_private; \
- return ret; \
+ return single_open(file, name, inode->i_private); \
} \
\
static const struct file_operations name##_fops = { \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 16fff9608848..30f92cefaa72 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -135,6 +135,7 @@ struct ceph_client {
struct dentry *debugfs_dir;
struct dentry *debugfs_monmap;
struct dentry *debugfs_osdmap;
+ struct dentry *debugfs_options;
#endif
};
@@ -191,6 +192,7 @@ extern struct ceph_options *ceph_parse_options(char *options,
const char *dev_name, const char *dev_name_end,
int (*parse_extra_token)(char *c, void *private),
void *private);
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 561ea896c657..e55c08bc3a96 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -175,13 +175,12 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
__u8 version;
if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) {
- pr_warning("incomplete pg encoding");
-
+ pr_warn("incomplete pg encoding\n");
return -EINVAL;
}
version = ceph_decode_8(p);
if (version > 1) {
- pr_warning("do not understand pg encoding %d > 1",
+ pr_warn("do not understand pg encoding %d > 1\n",
(int)version);
return -EINVAL;
}
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
index 4ce9056b31a8..bda5ec0b4b4d 100644
--- a/include/linux/cleancache.h
+++ b/include/linux/cleancache.h
@@ -5,6 +5,10 @@
#include <linux/exportfs.h>
#include <linux/mm.h>
+#define CLEANCACHE_NO_POOL -1
+#define CLEANCACHE_NO_BACKEND -2
+#define CLEANCACHE_NO_BACKEND_SHARED -3
+
#define CLEANCACHE_KEY_MAX 6
/*
@@ -33,10 +37,9 @@ struct cleancache_ops {
void (*invalidate_fs)(int);
};
-extern struct cleancache_ops *
- cleancache_register_ops(struct cleancache_ops *ops);
+extern int cleancache_register_ops(struct cleancache_ops *ops);
extern void __cleancache_init_fs(struct super_block *);
-extern void __cleancache_init_shared_fs(char *, struct super_block *);
+extern void __cleancache_init_shared_fs(struct super_block *);
extern int __cleancache_get_page(struct page *);
extern void __cleancache_put_page(struct page *);
extern void __cleancache_invalidate_page(struct address_space *, struct page *);
@@ -78,10 +81,10 @@ static inline void cleancache_init_fs(struct super_block *sb)
__cleancache_init_fs(sb);
}
-static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb)
+static inline void cleancache_init_shared_fs(struct super_block *sb)
{
if (cleancache_enabled)
- __cleancache_init_shared_fs(uuid, sb);
+ __cleancache_init_shared_fs(sb);
}
static inline int cleancache_get_page(struct page *page)
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 5591ea71a8d1..df695313f975 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -541,7 +541,7 @@ struct clk_gpio {
extern const struct clk_ops clk_gpio_gate_ops;
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, struct gpio_desc *gpio,
+ const char *parent_name, unsigned gpio, bool active_low,
unsigned long flags);
void of_gpio_clk_gate_setup(struct device_node *node);
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index c8e3b3d1eded..7669f7618f39 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -20,10 +20,10 @@
extern void __iomem *at91_pmc_base;
#define at91_pmc_read(field) \
- __raw_readl(at91_pmc_base + field)
+ readl_relaxed(at91_pmc_base + field)
#define at91_pmc_write(field, value) \
- __raw_writel(value, at91_pmc_base + field)
+ writel_relaxed(value, at91_pmc_base + field)
#else
.extern at91_pmc_base
#endif
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h
index 9f8a14041dd5..63a8159c4e64 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/shmobile.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
+void r8a7778_clocks_init(u32 mode);
void r8a7779_clocks_init(u32 mode);
void rcar_gen2_clocks_init(u32 mode);
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 67844003493d..79b76e13d904 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -215,14 +215,14 @@ struct ti_dt_clk {
.node_name = name, \
}
-/* Maximum number of clock memmaps */
-#define CLK_MAX_MEMMAPS 4
-
/* Static memmap indices */
enum {
TI_CLKM_CM = 0,
+ TI_CLKM_CM2,
TI_CLKM_PRM,
TI_CLKM_SCRM,
+ TI_CLKM_CTRL,
+ CLK_MAX_MEMMAPS
};
typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 135509821c39..d27d0152271f 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -253,4 +253,10 @@ extern void clocksource_of_init(void);
static inline void clocksource_of_init(void) {}
#endif
+#ifdef CONFIG_ACPI
+void acpi_generic_timer_init(void);
+#else
+static inline void acpi_generic_timer_init(void) { }
+#endif
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 9384ba66e975..f7ef093ec49a 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -16,16 +16,16 @@
struct cma;
extern unsigned long totalcma_pages;
-extern phys_addr_t cma_get_base(struct cma *cma);
-extern unsigned long cma_get_size(struct cma *cma);
+extern phys_addr_t cma_get_base(const struct cma *cma);
+extern unsigned long cma_get_size(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, struct cma **res_cma);
-extern int cma_init_reserved_mem(phys_addr_t base,
- phys_addr_t size, int order_per_bit,
+extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ unsigned int order_per_bit,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
-extern bool cma_release(struct cma *cma, struct page *pages, int count);
+extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
+extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index a014559e4a49..aa8f61cf3a19 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -34,6 +34,7 @@ extern int sysctl_compaction_handler(struct ctl_table *table, int write,
extern int sysctl_extfrag_threshold;
extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos);
+extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cdf13ca7cac3..371e560d13cf 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -9,10 +9,24 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-
/* Optimization barrier */
+
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proofed that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
/*
* This macro obfuscates arithmetic on a variable address so that gcc
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index ba147a1727e6..0c9a2f2c2802 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -13,9 +13,12 @@
/* Intel ECC compiler doesn't support gcc specific asm stmts.
* It uses intrinsics to do the equivalent things.
*/
+#undef barrier_data
#undef RELOC_HIDE
#undef OPTIMIZER_HIDE_VAR
+#define barrier_data(ptr) barrier()
+
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
__ptr = (unsigned long) (ptr); \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 0e41ca0e5927..867722591be2 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -169,6 +169,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define barrier() __memory_barrier()
#endif
+#ifndef barrier_data
+# define barrier_data(ptr) barrier()
+#endif
+
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
diff --git a/include/linux/console.h b/include/linux/console.h
index 7571a16bd653..9f50fb413c11 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -123,7 +123,7 @@ struct console {
struct tty_driver *(*device)(struct console *, int *);
void (*unblank)(void);
int (*setup)(struct console *, char *);
- int (*early_setup)(void);
+ int (*match)(struct console *, char *name, int idx, char *options);
short flags;
short index;
int cflag;
@@ -141,7 +141,6 @@ extern int console_set_on_cmdline;
extern struct console *early_console;
extern int add_preferred_console(char *name, int idx, char *options);
-extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
extern struct console *console_drivers;
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 37b81bd51ec0..2821838256b4 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -10,6 +10,8 @@
#ifdef CONFIG_CONTEXT_TRACKING
extern void context_tracking_cpu_set(int cpu);
+extern void context_tracking_enter(enum ctx_state state);
+extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void);
extern void __context_tracking_task_switch(struct task_struct *prev,
@@ -35,7 +37,8 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
- context_tracking_user_exit();
+ if (prev_ctx != CONTEXT_KERNEL)
+ context_tracking_exit(prev_ctx);
return prev_ctx;
}
@@ -43,8 +46,8 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
if (context_tracking_is_enabled()) {
- if (prev_ctx == IN_USER)
- context_tracking_user_enter();
+ if (prev_ctx != CONTEXT_KERNEL)
+ context_tracking_enter(prev_ctx);
}
}
@@ -78,10 +81,16 @@ static inline void guest_enter(void)
vtime_guest_enter(current);
else
current->flags |= PF_VCPU;
+
+ if (context_tracking_is_enabled())
+ context_tracking_enter(CONTEXT_GUEST);
}
static inline void guest_exit(void)
{
+ if (context_tracking_is_enabled())
+ context_tracking_exit(CONTEXT_GUEST);
+
if (vtime_accounting_enabled())
vtime_guest_exit(current);
else
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 97a81225d037..6b7b96a32b75 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -13,8 +13,9 @@ struct context_tracking {
*/
bool active;
enum ctx_state {
- IN_KERNEL = 0,
- IN_USER,
+ CONTEXT_KERNEL = 0,
+ CONTEXT_USER,
+ CONTEXT_GUEST,
} state;
};
@@ -34,11 +35,13 @@ static inline bool context_tracking_cpu_is_enabled(void)
static inline bool context_tracking_in_user(void)
{
- return __this_cpu_read(context_tracking.state) == IN_USER;
+ return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; }
+static inline bool context_tracking_is_enabled(void) { return false; }
+static inline bool context_tracking_cpu_is_enabled(void) { return false; }
#endif /* CONFIG_CONTEXT_TRACKING */
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4260e8594bd7..c0fb6b1b4712 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -73,6 +73,7 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
+ CPU_PRI_SMPBOOT = 9,
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
CPU_PRI_WORKQUEUE_DOWN = -5,
@@ -95,6 +96,10 @@ enum {
* Called on the new cpu, just before
* enabling interrupts. Must not sleep,
* must not fail */
+#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
+ * idle loop. */
+#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
+ * perhaps due to preemption. */
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
* operation in progress
@@ -161,6 +166,7 @@ static inline void __unregister_cpu_notifier(struct notifier_block *nb)
}
#endif
+void smpboot_thread_init(void);
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
@@ -208,6 +214,10 @@ static inline void cpu_notifier_register_done(void)
{
}
+static inline void smpboot_thread_init(void)
+{
+}
+
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -271,4 +281,14 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
+DECLARE_PER_CPU(bool, cpu_dead_idle);
+
+int cpu_report_state(int cpu);
+int cpu_check_up_prepare(int cpu);
+void cpu_set_state_online(int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+bool cpu_wait_death(unsigned int cpu, int seconds);
+bool cpu_report_death(void);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 086549a665e2..27e285b92b5f 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -11,6 +11,7 @@
#include <linux/bitmap.h>
#include <linux/bug.h>
+/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
/**
@@ -289,11 +290,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpumask: the cpumask pointer
*
* Returns 1 if @cpu is set in @cpumask, else returns 0
- *
- * No static inline type checking - see Subtlety (1) above.
*/
-#define cpumask_test_cpu(cpu, cpumask) \
- test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
+static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+{
+ return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
+}
/**
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
@@ -609,9 +610,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*/
static inline size_t cpumask_size(void)
{
- /* FIXME: Once all cpumask assignments are eliminated, this
- * can be nr_cpumask_bits */
- return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
+ return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
}
/*
@@ -768,7 +767,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
#if NR_CPUS <= BITS_PER_LONG
#define CPU_BITS_ALL \
{ \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#else /* NR_CPUS > BITS_PER_LONG */
@@ -776,7 +775,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
#define CPU_BITS_ALL \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#endif /* NR_CPUS > BITS_PER_LONG */
@@ -797,32 +796,18 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
nr_cpu_ids);
}
-/*
- *
- * From here down, all obsolete. Use cpumask_ variants!
- *
- */
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
#if NR_CPUS <= BITS_PER_LONG
-
#define CPU_MASK_ALL \
(cpumask_t) { { \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
-
#else
-
#define CPU_MASK_ALL \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
-
-#endif
+#endif /* NR_CPUS > BITS_PER_LONG */
#define CPU_MASK_NONE \
(cpumask_t) { { \
@@ -834,143 +819,4 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
[0] = 1UL \
} }
-#if NR_CPUS == 1
-#define first_cpu(src) ({ (void)(src); 0; })
-#define next_cpu(n, src) ({ (void)(src); 1; })
-#define any_online_cpu(mask) 0
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#else /* NR_CPUS > 1 */
-int __first_cpu(const cpumask_t *srcp);
-int __next_cpu(int n, const cpumask_t *srcp);
-
-#define first_cpu(src) __first_cpu(&(src))
-#define next_cpu(n, src) __next_cpu((n), &(src))
-#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = next_cpu((cpu), (mask)), \
- (cpu) < NR_CPUS; )
-#endif /* SMP */
-
-#if NR_CPUS <= 64
-
-#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
-
-#else /* NR_CPUS > 64 */
-
-int __next_cpu_nr(int n, const cpumask_t *srcp);
-#define for_each_cpu_mask_nr(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = __next_cpu_nr((cpu), &(mask)), \
- (cpu) < nr_cpu_ids; )
-
-#endif /* NR_CPUS > 64 */
-
-#define cpus_addr(src) ((src).bits)
-
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
- set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
- clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits)
-{
- bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits)
-{
- bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
- return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
- __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits)
-{
- return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits)
-{
- return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
- __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
- const cpumask_t *srcp, int n, int nbits)
-{
- bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
-
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 2fb2ca2127ed..8b6c083e68a7 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -62,9 +62,27 @@ do { \
groups_free(group_info); \
} while (0)
-extern struct group_info *groups_alloc(int);
extern struct group_info init_groups;
+#ifdef CONFIG_MULTIUSER
+extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);
+
+extern int in_group_p(kgid_t);
+extern int in_egroup_p(kgid_t);
+#else
+static inline void groups_free(struct group_info *group_info)
+{
+}
+
+static inline int in_group_p(kgid_t grp)
+{
+ return 1;
+}
+static inline int in_egroup_p(kgid_t grp)
+{
+ return 1;
+}
+#endif
extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
@@ -74,9 +92,6 @@ extern bool may_setgroups(void);
#define GROUP_AT(gi, i) \
((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
-extern int in_group_p(kgid_t);
-extern int in_egroup_p(kgid_t);
-
/*
* The security context of a task
*
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 4fad5f8ee01d..48a1a7d100f1 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -96,13 +96,15 @@ struct crush_rule {
* uniform O(1) poor poor
* list O(n) optimal poor
* tree O(log n) good good
- * straw O(n) optimal optimal
+ * straw O(n) better better
+ * straw2 O(n) optimal optimal
*/
enum {
CRUSH_BUCKET_UNIFORM = 1,
CRUSH_BUCKET_LIST = 2,
CRUSH_BUCKET_TREE = 3,
- CRUSH_BUCKET_STRAW = 4
+ CRUSH_BUCKET_STRAW = 4,
+ CRUSH_BUCKET_STRAW2 = 5,
};
extern const char *crush_bucket_alg_name(int alg);
@@ -149,6 +151,11 @@ struct crush_bucket_straw {
__u32 *straws; /* 16-bit fixed point */
};
+struct crush_bucket_straw2 {
+ struct crush_bucket h;
+ __u32 *item_weights; /* 16-bit fixed point */
+};
+
/*
@@ -189,6 +196,7 @@ extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
+extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
extern void crush_destroy_bucket(struct crush_bucket *b);
extern void crush_destroy_rule(struct crush_rule *r);
extern void crush_destroy(struct crush_map *map);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index fb5ef16d6a12..10df5d2d093a 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -95,6 +95,12 @@
#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
/*
+ * Mark a cipher as a service implementation only usable by another
+ * cipher and never by a normal user of the kernel crypto API
+ */
+#define CRYPTO_ALG_INTERNAL 0x00002000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_REQ_MASK 0x000fff00
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d8358799c594..df334cbacc6d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -404,26 +404,11 @@ static inline bool d_mountpoint(const struct dentry *dentry)
/*
* Directory cache entry type accessor functions.
*/
-static inline void __d_set_type(struct dentry *dentry, unsigned type)
-{
- dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type;
-}
-
-static inline void __d_clear_type(struct dentry *dentry)
-{
- __d_set_type(dentry, DCACHE_MISS_TYPE);
-}
-
-static inline void d_set_type(struct dentry *dentry, unsigned type)
-{
- spin_lock(&dentry->d_lock);
- __d_set_type(dentry, type);
- spin_unlock(&dentry->d_lock);
-}
-
static inline unsigned __d_entry_type(const struct dentry *dentry)
{
- return dentry->d_flags & DCACHE_ENTRY_TYPE;
+ unsigned type = READ_ONCE(dentry->d_flags);
+ smp_rmb();
+ return type & DCACHE_ENTRY_TYPE;
}
static inline bool d_is_miss(const struct dentry *dentry)
@@ -482,6 +467,44 @@ static inline bool d_is_positive(const struct dentry *dentry)
return !d_is_negative(dentry);
}
+/**
+ * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents either an absent name or a name that
+ * doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent
+ * a true miss, a whiteout that isn't represented by a 0,0 chardev or a
+ * fallthrough marker in an opaque directory.
+ *
+ * Note! (1) This should be used *only* by a filesystem to examine its own
+ * dentries. It should not be used to look at some other filesystem's
+ * dentries. (2) It should also be used in combination with d_inode() to get
+ * the inode. (3) The dentry may have something attached to ->d_lower and the
+ * type field of the flags may be set to something other than miss or whiteout.
+ */
+static inline bool d_really_is_negative(const struct dentry *dentry)
+{
+ return dentry->d_inode == NULL;
+}
+
+/**
+ * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents a name that maps to an inode
+ * (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if
+ * that is represented on medium as a 0,0 chardev.
+ *
+ * Note! (1) This should be used *only* by a filesystem to examine its own
+ * dentries. It should not be used to look at some other filesystem's
+ * dentries. (2) It should also be used in combination with d_inode() to get
+ * the inode.
+ */
+static inline bool d_really_is_positive(const struct dentry *dentry)
+{
+ return dentry->d_inode != NULL;
+}
+
extern void d_set_fallthru(struct dentry *dentry);
static inline bool d_is_fallthru(const struct dentry *dentry)
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 439ff698000a..221025423e6c 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -43,6 +43,7 @@ enum dccp_state {
DCCP_CLOSING = TCP_CLOSING,
DCCP_TIME_WAIT = TCP_TIME_WAIT,
DCCP_CLOSED = TCP_CLOSE,
+ DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV,
DCCP_PARTOPEN = TCP_MAX_STATES,
DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */
DCCP_MAX_STATES
@@ -57,6 +58,7 @@ enum {
DCCPF_CLOSING = TCPF_CLOSING,
DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
DCCPF_CLOSED = TCPF_CLOSE,
+ DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV,
DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN),
};
@@ -317,6 +319,6 @@ static inline const char *dccp_role(const struct sock *sk)
return NULL;
}
-extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+extern void dccp_syn_ack_timeout(const struct request_sock *req);
#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index 602fbbfcfeed..0a83a1e648b0 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -91,7 +91,7 @@ struct devfreq_event_desc {
const char *name;
void *driver_data;
- struct devfreq_event_ops *ops;
+ const struct devfreq_event_ops *ops;
};
#if defined(CONFIG_PM_DEVFREQ_EVENT)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index fd23978d93fe..51cc1deb7af3 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -605,9 +605,4 @@ static inline unsigned long to_bytes(sector_t n)
return (n << SECTOR_SHIFT);
}
-/*-----------------------------------------------------------------
- * Helper for block layer and dm core operations
- *---------------------------------------------------------------*/
-int dm_underlying_device_busy(struct request_queue *q);
-
#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index f3f2c7e38060..6558af90c8fe 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -38,6 +38,7 @@ struct class;
struct subsys_private;
struct bus_type;
struct device_node;
+struct fwnode_handle;
struct iommu_ops;
struct iommu_group;
@@ -650,14 +651,6 @@ struct device_dma_parameters {
unsigned long segment_boundary_mask;
};
-struct acpi_device;
-
-struct acpi_dev_node {
-#ifdef CONFIG_ACPI
- struct acpi_device *companion;
-#endif
-};
-
/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
@@ -703,7 +696,7 @@ struct acpi_dev_node {
* @cma_area: Contiguous memory area for dma allocations
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
- * @acpi_node: Associated ACPI device node.
+ * @fwnode: Associated device node supplied by platform firmware.
* @devt: For creating the sysfs "dev".
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
@@ -779,7 +772,7 @@ struct device {
struct dev_archdata archdata;
struct device_node *of_node; /* associated device tree node */
- struct acpi_dev_node acpi_node; /* associated ACPI device node */
+ struct fwnode_handle *fwnode; /* firmware device node */
dev_t devt; /* dev_t, creates the sysfs "dev" */
u32 id; /* device instance */
@@ -954,6 +947,9 @@ extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
+extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+
/*
* Root device objects for grouping under /sys/devices
*/
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 694e1fe1c4b4..2f0b431b73e0 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -163,6 +163,33 @@ struct dma_buf_attachment {
};
/**
+ * struct dma_buf_export_info - holds information needed to export a dma_buf
+ * @exp_name: name of the exporting module - useful for debugging.
+ * @ops: Attach allocator-defined dma buf ops to the new buffer
+ * @size: Size of the buffer
+ * @flags: mode flags for the file
+ * @resv: reservation-object, NULL to allocate default one
+ * @priv: Attach private data of allocator to this buffer
+ *
+ * This structure holds the information required to export the buffer. Used
+ * with dma_buf_export() only.
+ */
+struct dma_buf_export_info {
+ const char *exp_name;
+ const struct dma_buf_ops *ops;
+ size_t size;
+ int flags;
+ struct reservation_object *resv;
+ void *priv;
+};
+
+/**
+ * helper macro for exporters; zeros and fills in most common values
+ */
+#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
+ struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME }
+
+/**
* get_dma_buf - convenience wrapper for get_file.
* @dmabuf: [in] pointer to dma_buf
*
@@ -181,12 +208,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
-struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
- size_t size, int flags, const char *,
- struct reservation_object *);
-
-#define dma_buf_export(priv, ops, size, flags, resv) \
- dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
+struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index c3007cb4bfa6..ac07ff090919 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -34,6 +34,10 @@ struct dma_map_ops {
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
+ /*
+ * map_sg returns 0 on error and a value > 0 on success.
+ * It should never return a value < 0.
+ */
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
new file mode 100644
index 000000000000..234393a6997b
--- /dev/null
+++ b/include/linux/dma/hsu.h
@@ -0,0 +1,48 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DMA_HSU_H
+#define _DMA_HSU_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include <linux/platform_data/dma-hsu.h>
+
+struct hsu_dma;
+
+/**
+ * struct hsu_dma_chip - representation of HSU DMA hardware
+ * @dev: struct device of the DMA controller
+ * @irq: irq line
+ * @regs: memory mapped I/O space
+ * @length: I/O space length
+ * @offset: offset of the I/O space where registers are located
+ * @hsu: struct hsu_dma that is filed by ->probe()
+ * @pdata: platform data for the DMA controller if provided
+ */
+struct hsu_dma_chip {
+ struct device *dev;
+ int irq;
+ void __iomem *regs;
+ unsigned int length;
+ unsigned int offset;
+ struct hsu_dma *hsu;
+ struct hsu_dma_platform_data *pdata;
+};
+
+/* Export to the internal users */
+irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr);
+
+/* Export to the platform drivers */
+int hsu_dma_probe(struct hsu_dma_chip *chip);
+int hsu_dma_remove(struct hsu_dma_chip *chip);
+
+#endif /* _DMA_HSU_H */
diff --git a/include/linux/amba/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 34b98f276ed0..34b98f276ed0 100644
--- a/include/linux/amba/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index b6997a0cb528..ad419757241f 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
@@ -574,7 +570,6 @@ struct dma_tx_state {
* @copy_align: alignment shift for memcpy operations
* @xor_align: alignment shift for xor operations
* @pq_align: alignment shift for pq operations
- * @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @src_addr_widths: bit mask of src addr widths the device supports
@@ -625,7 +620,6 @@ struct dma_device {
u8 copy_align;
u8 xor_align;
u8 pq_align;
- u8 fill_align;
#define DMA_HAS_PQ_CONTINUE (1 << 15)
int dev_id;
@@ -826,12 +820,6 @@ static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
return dmaengine_check_align(dev->pq_align, off1, off2, len);
}
-static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
- size_t off2, size_t len)
-{
- return dmaengine_check_align(dev->fill_align, off1, off2, len);
-}
-
static inline void
dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
{
@@ -1098,7 +1086,6 @@ void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
-struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
@@ -1116,27 +1103,4 @@ static inline struct dma_chan
return __dma_request_channel(mask, fn, fn_param);
}
-
-/* --- Helper iov-locking functions --- */
-
-struct dma_page_list {
- char __user *base_address;
- int nr_pages;
- struct page **pages;
-};
-
-struct dma_pinned_list {
- int nr_iovecs;
- struct dma_page_list page_list[0];
-};
-
-struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
-void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
-
-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, struct page *page,
- unsigned int offset, size_t len);
-
#endif /* DMAENGINE_H */
diff --git a/include/linux/elf-randomize.h b/include/linux/elf-randomize.h
new file mode 100644
index 000000000000..b5f0bda9472e
--- /dev/null
+++ b/include/linux/elf-randomize.h
@@ -0,0 +1,22 @@
+#ifndef _ELF_RANDOMIZE_H
+#define _ELF_RANDOMIZE_H
+
+struct mm_struct;
+
+#ifndef CONFIG_ARCH_HAS_ELF_RANDOMIZE
+static inline unsigned long arch_mmap_rnd(void) { return 0; }
+# if defined(arch_randomize_brk) && defined(CONFIG_COMPAT_BRK)
+# define compat_brk_randomized
+# endif
+# ifndef arch_randomize_brk
+# define arch_randomize_brk(mm) (mm->brk)
+# endif
+#else
+extern unsigned long arch_mmap_rnd(void);
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+# ifdef CONFIG_COMPAT_BRK
+# define compat_brk_randomized
+# endif
+#endif
+
+#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 1d869d185a0d..606563ef8a72 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -35,7 +35,6 @@ extern const struct header_ops eth_header_ops;
int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
const void *daddr, const void *saddr, unsigned len);
-int eth_rebuild_header(struct sk_buff *skb);
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index a23556c32703..591f8c3ef410 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -153,7 +153,7 @@ struct f2fs_orphan_block {
*/
struct f2fs_extent {
__le32 fofs; /* start file offset of the extent */
- __le32 blk_addr; /* start block address of the extent */
+ __le32 blk; /* start block address of the extent */
__le32 len; /* lengh of the extent */
} __packed;
@@ -178,6 +178,7 @@ struct f2fs_extent {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
F2FS_INLINE_XATTR_ADDRS - 1))
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 31591686ac2d..996111000a8c 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -21,4 +21,10 @@ struct space_resv {
#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
+#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
+ FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | \
+ FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE)
+
#endif /* _FALLOC_H_ */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index caac2087a4d5..fa11b3a367be 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -145,8 +145,6 @@ struct bpf_prog_aux;
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })
-#define BPF_PSEUDO_MAP_FD 1
-
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
@@ -310,9 +308,11 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
bool jited; /* Is our filter JIT'ed? */
+ bool gpl_compatible; /* Is our filter GPL compatible? */
u32 len; /* Number of filter blocks */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
/* Instructions for interpreter */
@@ -454,6 +454,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
BPF_ANCILLARY(VLAN_TAG_PRESENT);
BPF_ANCILLARY(PAY_OFFSET);
BPF_ANCILLARY(RANDOM);
+ BPF_ANCILLARY(VLAN_TPID);
}
/* Fallthrough. */
default:
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
index 3089d7382325..d4686fe1cac7 100644
--- a/include/linux/fixp-arith.h
+++ b/include/linux/fixp-arith.h
@@ -1,6 +1,8 @@
#ifndef _FIXP_ARITH_H
#define _FIXP_ARITH_H
+#include <linux/math64.h>
+
/*
* Simplistic fixed-point arithmetics.
* Hmm, I'm probably duplicating some code :(
@@ -29,59 +31,126 @@
#include <linux/types.h>
-/* The type representing fixed-point values */
-typedef s16 fixp_t;
+static const s32 sin_table[] = {
+ 0x00000000, 0x023be165, 0x04779632, 0x06b2f1d2, 0x08edc7b6, 0x0b27eb5c,
+ 0x0d61304d, 0x0f996a26, 0x11d06c96, 0x14060b67, 0x163a1a7d, 0x186c6ddd,
+ 0x1a9cd9ac, 0x1ccb3236, 0x1ef74bf2, 0x2120fb82, 0x234815ba, 0x256c6f9e,
+ 0x278dde6e, 0x29ac379f, 0x2bc750e8, 0x2ddf003f, 0x2ff31bdd, 0x32037a44,
+ 0x340ff241, 0x36185aee, 0x381c8bb5, 0x3a1c5c56, 0x3c17a4e7, 0x3e0e3ddb,
+ 0x3fffffff, 0x41ecc483, 0x43d464fa, 0x45b6bb5d, 0x4793a20f, 0x496af3e1,
+ 0x4b3c8c11, 0x4d084650, 0x4ecdfec6, 0x508d9210, 0x5246dd48, 0x53f9be04,
+ 0x55a6125a, 0x574bb8e5, 0x58ea90c2, 0x5a827999, 0x5c135399, 0x5d9cff82,
+ 0x5f1f5ea0, 0x609a52d1, 0x620dbe8a, 0x637984d3, 0x64dd894f, 0x6639b039,
+ 0x678dde6d, 0x68d9f963, 0x6a1de735, 0x6b598ea1, 0x6c8cd70a, 0x6db7a879,
+ 0x6ed9eba0, 0x6ff389de, 0x71046d3c, 0x720c8074, 0x730baeec, 0x7401e4bf,
+ 0x74ef0ebb, 0x75d31a5f, 0x76adf5e5, 0x777f903b, 0x7847d908, 0x7906c0af,
+ 0x79bc384c, 0x7a6831b8, 0x7b0a9f8c, 0x7ba3751c, 0x7c32a67c, 0x7cb82884,
+ 0x7d33f0c8, 0x7da5f5a3, 0x7e0e2e31, 0x7e6c924f, 0x7ec11aa3, 0x7f0bc095,
+ 0x7f4c7e52, 0x7f834ecf, 0x7fb02dc4, 0x7fd317b3, 0x7fec09e1, 0x7ffb025e,
+ 0x7fffffff
+};
-#define FRAC_N 8
-#define FRAC_MASK ((1<<FRAC_N)-1)
+/**
+ * __fixp_sin32() returns the sin of an angle in degrees
+ *
+ * @degrees: angle, in degrees, from 0 to 360.
+ *
+ * The returned value ranges from -0x7fffffff to +0x7fffffff.
+ */
+static inline s32 __fixp_sin32(int degrees)
+{
+ s32 ret;
+ bool negative = false;
-/* Not to be used directly. Use fixp_{cos,sin} */
-static const fixp_t cos_table[46] = {
- 0x0100, 0x00FF, 0x00FF, 0x00FE, 0x00FD, 0x00FC, 0x00FA, 0x00F8,
- 0x00F6, 0x00F3, 0x00F0, 0x00ED, 0x00E9, 0x00E6, 0x00E2, 0x00DD,
- 0x00D9, 0x00D4, 0x00CF, 0x00C9, 0x00C4, 0x00BE, 0x00B8, 0x00B1,
- 0x00AB, 0x00A4, 0x009D, 0x0096, 0x008F, 0x0087, 0x0080, 0x0078,
- 0x0070, 0x0068, 0x005F, 0x0057, 0x004F, 0x0046, 0x003D, 0x0035,
- 0x002C, 0x0023, 0x001A, 0x0011, 0x0008, 0x0000
-};
+ if (degrees > 180) {
+ negative = true;
+ degrees -= 180;
+ }
+ if (degrees > 90)
+ degrees = 180 - degrees;
+ ret = sin_table[degrees];
-/* a: 123 -> 123.0 */
-static inline fixp_t fixp_new(s16 a)
-{
- return a<<FRAC_N;
+ return negative ? -ret : ret;
}
-/* a: 0xFFFF -> -1.0
- 0x8000 -> 1.0
- 0x0000 -> 0.0
-*/
-static inline fixp_t fixp_new16(s16 a)
+/**
+ * fixp_sin32() returns the sin of an angle in degrees
+ *
+ * @degrees: angle, in degrees. The angle can be positive or negative
+ *
+ * The returned value ranges from -0x7fffffff to +0x7fffffff.
+ */
+static inline s32 fixp_sin32(int degrees)
{
- return ((s32)a)>>(16-FRAC_N);
+ degrees = (degrees % 360 + 360) % 360;
+
+ return __fixp_sin32(degrees);
}
-static inline fixp_t fixp_cos(unsigned int degrees)
+/* cos(x) = sin(x + 90 degrees) */
+#define fixp_cos32(v) fixp_sin32((v) + 90)
+
+/*
+ * 16 bits variants
+ *
+ * The returned value ranges from -0x7fff to 0x7fff
+ */
+
+#define fixp_sin16(v) (fixp_sin32(v) >> 16)
+#define fixp_cos16(v) (fixp_cos32(v) >> 16)
+
+/**
+ * fixp_sin32_rad() - calculates the sin of an angle in radians
+ *
+ * @radians: angle, in radians
+ * @twopi: value to be used for 2*pi
+ *
+ * Provides a variant for the cases where just 360
+ * values is not enough. This function uses linear
+ * interpolation to a wider range of values given by
+ * twopi var.
+ *
+ * Experimental tests gave a maximum difference of
+ * 0.000038 between the value calculated by sin() and
+ * the one produced by this function, when twopi is
+ * equal to 360000. That seems to be enough precision
+ * for practical purposes.
+ *
+ * Please notice that two high numbers for twopi could cause
+ * overflows, so the routine will not allow values of twopi
+ * bigger than 1^18.
+ */
+static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
{
- int quadrant = (degrees / 90) & 3;
- unsigned int i = degrees % 90;
+ int degrees;
+ s32 v1, v2, dx, dy;
+ s64 tmp;
- if (quadrant == 1 || quadrant == 3)
- i = 90 - i;
+ /*
+ * Avoid too large values for twopi, as we don't want overflows.
+ */
+ BUG_ON(twopi > 1 << 18);
- i >>= 1;
+ degrees = (radians * 360) / twopi;
+ tmp = radians - (degrees * twopi) / 360;
- return (quadrant == 1 || quadrant == 2)? -cos_table[i] : cos_table[i];
-}
+ degrees = (degrees % 360 + 360) % 360;
+ v1 = __fixp_sin32(degrees);
-static inline fixp_t fixp_sin(unsigned int degrees)
-{
- return -fixp_cos(degrees + 90);
-}
+ v2 = fixp_sin32(degrees + 1);
-static inline fixp_t fixp_mult(fixp_t a, fixp_t b)
-{
- return ((s32)(a*b))>>FRAC_N;
+ dx = twopi / 360;
+ dy = v2 - v1;
+
+ tmp *= dy;
+
+ return v1 + div_s64(tmp, dx);
}
+/* cos(x) = sin(x + pi/2 radians) */
+
+#define fixp_cos32_rad(rad, twopi) \
+ fixp_sin32_rad(rad + twopi / 4, twopi)
+
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 52cc4492cb3a..35ec87e490b1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -314,6 +314,33 @@ struct page;
struct address_space;
struct writeback_control;
+#define IOCB_EVENTFD (1 << 0)
+#define IOCB_APPEND (1 << 1)
+#define IOCB_DIRECT (1 << 2)
+
+struct kiocb {
+ struct file *ki_filp;
+ loff_t ki_pos;
+ void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void *private;
+ int ki_flags;
+};
+
+static inline bool is_sync_kiocb(struct kiocb *kiocb)
+{
+ return kiocb->ki_complete == NULL;
+}
+
+static inline int iocb_flags(struct file *file);
+
+static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+{
+ *kiocb = (struct kiocb) {
+ .ki_filp = filp,
+ .ki_flags = iocb_flags(filp),
+ };
+}
+
/*
* "descriptor" for what we're up to with a read.
* This allows us to use the same read code yet
@@ -361,7 +388,7 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
- ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
/*
* migrate the contents of a page to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -848,6 +875,7 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
+#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
#define file_count(x) atomic_long_read(&(x)->f_count)
@@ -893,8 +921,8 @@ struct file_lock_operations {
struct lock_manager_operations {
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
unsigned long (*lm_owner_key)(struct file_lock *);
- void (*lm_get_owner)(struct file_lock *, struct file_lock *);
- void (*lm_put_owner)(struct file_lock *);
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
+ void (*lm_put_owner)(fl_owner_t);
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, int);
bool (*lm_break)(struct file_lock *);
@@ -1019,6 +1047,9 @@ extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
extern int lease_modify(struct file_lock *, int, struct list_head *);
+struct files_struct;
+extern void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1155,6 +1186,10 @@ static inline int lease_modify(struct file_lock *fl, int arg,
{
return -EINVAL;
}
+
+struct files_struct;
+static inline void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files) {}
#endif /* !CONFIG_FILE_LOCKING */
@@ -1540,8 +1575,6 @@ struct file_operations {
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
- ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
@@ -1617,6 +1650,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
struct iovec **ret_pointer);
extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
+extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -1786,7 +1820,7 @@ struct super_operations {
#define I_SYNC (1 << __I_SYNC)
#define I_REFERENCED (1 << 8)
#define __I_DIO_WAKEUP 9
-#define I_DIO_WAKEUP (1 << I_DIO_WAKEUP)
+#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
#define __I_DIRTY_TIME_EXPIRED 12
@@ -2145,7 +2179,7 @@ struct filename {
const __user char *uptr; /* original userland pointer */
struct audit_names *aname;
int refcnt;
- bool separate; /* should "name" be freed? */
+ const char iname[];
};
extern long vfs_truncate(struct path *, loff_t);
@@ -2545,16 +2579,12 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
-int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
-extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos);
@@ -2592,12 +2622,13 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
-ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *,
- loff_t, get_block_t, dio_iodone_t, int flags);
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ get_block_t, dio_iodone_t, int flags);
int dax_clear_blocks(struct inode *, sector_t block, long size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
#ifdef CONFIG_BLOCK
@@ -2613,27 +2644,56 @@ enum {
/* filesystem can handle aio writes beyond i_size */
DIO_ASYNC_EXTEND = 0x04,
+
+ /* inode/fs/bdev does not need truncate protection */
+ DIO_SKIP_DIO_COUNT = 0x08,
};
void dio_end_io(struct bio *bio, int error);
-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
- get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags);
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ loff_t offset, get_block_t get_block,
+ dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags);
-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
- struct inode *inode, struct iov_iter *iter, loff_t offset,
- get_block_t get_block)
+static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
+ struct inode *inode,
+ struct iov_iter *iter, loff_t offset,
+ get_block_t get_block)
{
- return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+ return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
offset, get_block, NULL, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
void inode_dio_wait(struct inode *inode);
-void inode_dio_done(struct inode *inode);
+
+/*
+ * inode_dio_begin - signal start of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+static inline void inode_dio_begin(struct inode *inode)
+{
+ atomic_inc(&inode->i_dio_count);
+}
+
+/*
+ * inode_dio_end - signal finish of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+static inline void inode_dio_end(struct inode *inode)
+{
+ if (atomic_dec_and_test(&inode->i_dio_count))
+ wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+}
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
@@ -2662,7 +2722,6 @@ void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
-extern int vfs_readdir(struct file *, filldir_t, void *);
extern int iterate_dir(struct file *, struct dir_context *);
extern int vfs_stat(const char __user *, struct kstat *);
@@ -2760,6 +2819,16 @@ static inline bool io_is_direct(struct file *filp)
return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
}
+static inline int iocb_flags(struct file *file)
+{
+ int res = 0;
+ if (file->f_flags & O_APPEND)
+ res |= IOCB_APPEND;
+ if (io_is_direct(file))
+ res |= IOCB_DIRECT;
+ return res;
+}
+
static inline ino_t parent_ino(struct dentry *dentry)
{
ino_t res;
diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
index 9dc4e0384bfb..3886b3bffd7f 100644
--- a/include/linux/fs_pin.h
+++ b/include/linux/fs_pin.h
@@ -13,6 +13,8 @@ struct vfsmount;
static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
{
init_waitqueue_head(&p->wait);
+ INIT_HLIST_NODE(&p->s_list);
+ INIT_HLIST_NODE(&p->m_list);
p->kill = kill;
}
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c674ee8f7fca..f9ecf63d47f1 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -13,6 +13,7 @@ struct trace_array;
struct trace_buffer;
struct tracer;
struct dentry;
+struct bpf_prog;
struct trace_print_flags {
unsigned long mask;
@@ -45,7 +46,7 @@ const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len);
const char *ftrace_print_array_seq(struct trace_seq *p,
- const void *buf, int buf_len,
+ const void *buf, int count,
size_t el_size);
struct trace_iterator;
@@ -202,7 +203,7 @@ enum trace_reg {
struct ftrace_event_call;
struct ftrace_event_class {
- char *system;
+ const char *system;
void *probe;
#ifdef CONFIG_PERF_EVENTS
void *perf_probe;
@@ -252,6 +253,7 @@ enum {
TRACE_EVENT_FL_WAS_ENABLED_BIT,
TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
+ TRACE_EVENT_FL_KPROBE_BIT,
};
/*
@@ -265,6 +267,7 @@ enum {
* it is best to clear the buffers that used it).
* USE_CALL_FILTER - For ftrace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
+ * KPROBE - Event is a kprobe
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -274,6 +277,7 @@ enum {
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
+ TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
};
struct ftrace_event_call {
@@ -285,7 +289,7 @@ struct ftrace_event_call {
struct tracepoint *tp;
};
struct trace_event event;
- const char *print_fmt;
+ char *print_fmt;
struct event_filter *filter;
void *mod;
void *data;
@@ -303,6 +307,7 @@ struct ftrace_event_call {
#ifdef CONFIG_PERF_EVENTS
int perf_refcount;
struct hlist_head __percpu *perf_events;
+ struct bpf_prog *prog;
int (*perf_perm)(struct ftrace_event_call *,
struct perf_event *);
@@ -548,6 +553,15 @@ event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
event_triggers_post_call(file, tt);
}
+#ifdef CONFIG_BPF_SYSCALL
+unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
+#else
+static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
+{
+ return 1;
+}
+#endif
+
enum {
FILTER_OTHER = 0,
FILTER_STATIC_STRING,
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
new file mode 100644
index 000000000000..0408545bce42
--- /dev/null
+++ b/include/linux/fwnode.h
@@ -0,0 +1,27 @@
+/*
+ * fwnode.h - Firmware device node object handle type definition.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_FWNODE_H_
+#define _LINUX_FWNODE_H_
+
+enum fwnode_type {
+ FWNODE_INVALID = 0,
+ FWNODE_OF,
+ FWNODE_ACPI,
+ FWNODE_PDATA,
+};
+
+struct fwnode_handle {
+ enum fwnode_type type;
+ struct fwnode_handle *secondary;
+};
+
+#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 51bd1e72a917..15928f0647e4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,6 +30,7 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
+#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
@@ -57,8 +58,10 @@ struct vm_area_struct;
* _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. This modifier is deprecated and no new
- * users should be added.
+ * cannot handle allocation failures. New users should be evaluated carefully
+ * (and the flag should be used only when there is no reasonable failure policy)
+ * but it is definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*
@@ -85,6 +88,7 @@ struct vm_area_struct;
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
+#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
@@ -117,16 +121,6 @@ struct vm_area_struct;
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
-/*
- * GFP_THISNODE does not perform any reclaim, you most likely want to
- * use __GFP_THISNODE to allocate from a given node without fallback!
- */
-#ifdef CONFIG_NUMA
-#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
-#else
-#define GFP_THISNODE ((__force gfp_t)0)
-#endif
-
/* This mask makes up all the page movable related flags */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 45afc2dee560..3a7c9ffd5ab9 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -16,6 +16,15 @@ struct device;
*/
struct gpio_desc;
+/**
+ * Struct containing an array of descriptors that can be obtained using
+ * gpiod_get_array().
+ */
+struct gpio_descs {
+ unsigned int ndescs;
+ struct gpio_desc *desc[];
+};
+
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
@@ -34,6 +43,9 @@ enum gpiod_flags {
#ifdef CONFIG_GPIOLIB
+/* Return the number of GPIOs associated with a device / function */
+int gpiod_count(struct device *dev, const char *con_id);
+
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
@@ -49,7 +61,14 @@ struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
+struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
void gpiod_put(struct gpio_desc *desc);
+void gpiod_put_array(struct gpio_descs *descs);
struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
const char *con_id,
@@ -64,7 +83,14 @@ struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
struct gpio_desc *__must_check
__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
+struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_descs *__must_check
+devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
@@ -110,9 +136,15 @@ struct fwnode_handle;
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname);
struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
+ const char *con_id,
struct fwnode_handle *child);
#else /* CONFIG_GPIOLIB */
+static inline int gpiod_count(struct device *dev, const char *con_id)
+{
+ return 0;
+}
+
static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
@@ -142,6 +174,20 @@ __gpiod_get_index_optional(struct device *dev, const char *con_id,
return ERR_PTR(-ENOSYS);
}
+static inline struct gpio_descs *__must_check
+gpiod_get_array(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void gpiod_put(struct gpio_desc *desc)
{
might_sleep();
@@ -150,6 +196,14 @@ static inline void gpiod_put(struct gpio_desc *desc)
WARN_ON(1);
}
+static inline void gpiod_put_array(struct gpio_descs *descs)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline struct gpio_desc *__must_check
__devm_gpiod_get(struct device *dev,
const char *con_id,
@@ -181,6 +235,20 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
return ERR_PTR(-ENOSYS);
}
+static inline struct gpio_descs *__must_check
+devm_gpiod_get_array(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
might_sleep();
@@ -189,6 +257,15 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
WARN_ON(1);
}
+static inline void devm_gpiod_put_array(struct device *dev,
+ struct gpio_descs *descs)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c497c62889d1..f1b36593ec9f 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/pinctrl/pinctrl.h>
struct device;
struct gpio_desc;
@@ -173,6 +174,53 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
#endif /* CONFIG_GPIOLIB_IRQCHIP */
+#ifdef CONFIG_PINCTRL
+
+/**
+ * struct gpio_pin_range - pin range controlled by a gpio chip
+ * @head: list for maintaining set of pin ranges, used internally
+ * @pctldev: pinctrl device which handles corresponding pins
+ * @range: actual range of pins controlled by a gpio controller
+ */
+
+struct gpio_pin_range {
+ struct list_head node;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range range;
+};
+
+int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins);
+int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group);
+void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
+
+#else
+
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins)
+{
+ return 0;
+}
+static inline int
+gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group)
+{
+ return 0;
+}
+
+static inline void
+gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+{
+}
+
+#endif /* CONFIG_PINCTRL */
+
struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label);
void gpiochip_free_own_desc(struct gpio_desc *desc);
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 0408421d885f..0042bf330b99 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -74,7 +74,7 @@ struct sensor_hub_pending {
* @usage: Usage id for this hub device instance.
* @start_collection_index: Starting index for a phy type collection
* @end_collection_index: Last index for a phy type collection
- * @mutex: synchronizing mutex.
+ * @mutex_ptr: synchronizing mutex pointer.
* @pending: Holds information of pending sync read request.
*/
struct hid_sensor_hub_device {
@@ -84,7 +84,7 @@ struct hid_sensor_hub_device {
u32 usage;
int start_collection_index;
int end_collection_index;
- struct mutex mutex;
+ struct mutex *mutex_ptr;
struct sensor_hub_pending pending;
};
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 176b43670e5d..f17980de2662 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -815,6 +815,8 @@ void hid_disconnect(struct hid_device *hid);
const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id);
s32 hid_snto32(__u32 value, unsigned n);
+__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
+ unsigned offset, unsigned n);
/**
* hid_device_io_start - enable HID input during probe, remove
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 464f33814a94..d2ba7d334039 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -135,6 +135,7 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
u32 host1x_syncpt_id(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read(struct host1x_syncpt *sp);
int host1x_syncpt_incr(struct host1x_syncpt *sp);
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 3ec06300d535..5dd60c2e120f 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -135,9 +135,9 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info,
* @device: Driver model representation of the device
* @tx_cfg: HSI TX configuration
* @rx_cfg: HSI RX configuration
- * e_handler: Callback for handling port events (RX Wake High/Low)
- * pclaimed: Keeps tracks if the clients claimed its associated HSI port
- * nb: Notifier block for port events
+ * @e_handler: Callback for handling port events (RX Wake High/Low)
+ * @pclaimed: Keeps tracks if the clients claimed its associated HSI port
+ * @nb: Notifier block for port events
*/
struct hsi_client {
struct device device;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 7b5785032049..205026175c42 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -22,7 +22,13 @@ struct mmu_gather;
struct hugepage_subpool {
spinlock_t lock;
long count;
- long max_hpages, used_hpages;
+ long max_hpages; /* Maximum huge pages or -1 if no maximum. */
+ long used_hpages; /* Used count against maximum, includes */
+ /* both alloced and reserved pages. */
+ struct hstate *hstate;
+ long min_hpages; /* Minimum huge pages or -1 if no minimum. */
+ long rsv_hpages; /* Pages reserved against global pool to */
+ /* sasitfy minimum size. */
};
struct resv_map {
@@ -38,11 +44,10 @@ extern int hugetlb_max_hstate __read_mostly;
#define for_each_hstate(h) \
for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
-struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
+struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
+ long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-int PageHuge(struct page *page);
-
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -79,7 +84,6 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
-bool is_hugepage_active(struct page *page);
void free_huge_page(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -109,11 +113,6 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
#else /* !CONFIG_HUGETLB_PAGE */
-static inline int PageHuge(struct page *page)
-{
- return 0;
-}
-
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -152,7 +151,6 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
#define putback_active_hugepage(p) do {} while (0)
-#define is_hugepage_active(x) false
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index eb7b414d232b..4f7d8f4b1e9a 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -50,10 +50,14 @@ struct hwrng {
struct completion cleanup_done;
};
+struct device;
+
/** Register a new Hardware Random Number Generator driver. */
extern int hwrng_register(struct hwrng *rng);
+extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
+extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
/** Feed random bits into the pool. */
extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5a2ba674795e..902c37aef67e 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -646,12 +646,13 @@ struct hv_input_signal_event_buffer {
};
struct vmbus_channel {
+ /* Unique channel id */
+ int id;
+
struct list_head listentry;
struct hv_device *device_obj;
- struct work_struct work;
-
enum vmbus_channel_state state;
struct vmbus_channel_offer_channel offermsg;
@@ -672,7 +673,6 @@ struct vmbus_channel {
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
spinlock_t inbound_lock;
- struct workqueue_struct *controlwq;
struct vmbus_close_msg close_msg;
@@ -758,6 +758,9 @@ struct vmbus_channel {
* link up channels based on their CPU affinity.
*/
struct list_head percpu_list;
+
+ int num_sc;
+ int next_oc;
};
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
@@ -861,6 +864,14 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
enum vmbus_packet_type type,
u32 flags);
+extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ enum vmbus_packet_type type,
+ u32 flags,
+ bool kick_q);
+
extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount,
@@ -868,6 +879,15 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 bufferlen,
u64 requestid);
+extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid,
+ u32 flags,
+ bool kick_q);
+
extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *mpb,
void *buffer,
@@ -1107,6 +1127,16 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
}
/*
+ * NetworkDirect. This is the guest RDMA service.
+ * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
+ */
+#define HV_ND_GUID \
+ .guid = { \
+ 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \
+ 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \
+ }
+
+/*
* Common header for Hyper-V ICs
*/
@@ -1213,6 +1243,7 @@ void hv_kvp_onchannelcallback(void *);
int hv_vss_init(struct hv_util_service *);
void hv_vss_deinit(void);
void hv_vss_onchannelcallback(void *);
+void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
extern struct resource hyperv_mmio;
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index f17da50402a4..e83a738a3b87 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -253,10 +253,10 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
- I2C_SLAVE_REQ_READ_START,
- I2C_SLAVE_REQ_READ_END,
- I2C_SLAVE_REQ_WRITE_START,
- I2C_SLAVE_REQ_WRITE_END,
+ I2C_SLAVE_READ_REQUESTED,
+ I2C_SLAVE_WRITE_REQUESTED,
+ I2C_SLAVE_READ_PROCESSED,
+ I2C_SLAVE_WRITE_RECEIVED,
I2C_SLAVE_STOP,
};
@@ -278,7 +278,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
- * @acpi_node: ACPI device node
+ * @fwnode: device node supplied by the platform firmware
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
@@ -299,7 +299,7 @@ struct i2c_board_info {
void *platform_data;
struct dev_archdata *archdata;
struct device_node *of_node;
- struct acpi_dev_node acpi_node;
+ struct fwnode_handle *fwnode;
int irq;
};
@@ -435,8 +435,8 @@ struct i2c_bus_recovery_info {
void (*set_scl)(struct i2c_adapter *, int val);
int (*get_sda)(struct i2c_adapter *);
- void (*prepare_recovery)(struct i2c_bus_recovery_info *bri);
- void (*unprepare_recovery)(struct i2c_bus_recovery_info *bri);
+ void (*prepare_recovery)(struct i2c_adapter *);
+ void (*unprepare_recovery)(struct i2c_adapter *);
/* gpio recovery */
int scl_gpio;
@@ -449,6 +449,48 @@ int i2c_recover_bus(struct i2c_adapter *adap);
int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
int i2c_generic_scl_recovery(struct i2c_adapter *adap);
+/**
+ * struct i2c_adapter_quirks - describe flaws of an i2c adapter
+ * @flags: see I2C_AQ_* for possible flags and read below
+ * @max_num_msgs: maximum number of messages per transfer
+ * @max_write_len: maximum length of a write message
+ * @max_read_len: maximum length of a read message
+ * @max_comb_1st_msg_len: maximum length of the first msg in a combined message
+ * @max_comb_2nd_msg_len: maximum length of the second msg in a combined message
+ *
+ * Note about combined messages: Some I2C controllers can only send one message
+ * per transfer, plus something called combined message or write-then-read.
+ * This is (usually) a small write message followed by a read message and
+ * barely enough to access register based devices like EEPROMs. There is a flag
+ * to support this mode. It implies max_num_msg = 2 and does the length checks
+ * with max_comb_*_len because combined message mode usually has its own
+ * limitations. Because of HW implementations, some controllers can actually do
+ * write-then-anything or other variants. To support that, write-then-read has
+ * been broken out into smaller bits like write-first and read-second which can
+ * be combined as needed.
+ */
+
+struct i2c_adapter_quirks {
+ u64 flags;
+ int max_num_msgs;
+ u16 max_write_len;
+ u16 max_read_len;
+ u16 max_comb_1st_msg_len;
+ u16 max_comb_2nd_msg_len;
+};
+
+/* enforce max_num_msgs = 2 and use max_comb_*_len for length checks */
+#define I2C_AQ_COMB BIT(0)
+/* first combined message must be write */
+#define I2C_AQ_COMB_WRITE_FIRST BIT(1)
+/* second combined message must be read */
+#define I2C_AQ_COMB_READ_SECOND BIT(2)
+/* both combined messages must have the same target address */
+#define I2C_AQ_COMB_SAME_ADDR BIT(3)
+/* convenience macro for typical write-then read case */
+#define I2C_AQ_COMB_WRITE_THEN_READ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \
+ I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
+
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
* with the access algorithms necessary to access it.
@@ -474,6 +516,7 @@ struct i2c_adapter {
struct list_head userspace_clients;
struct i2c_bus_recovery_info *bus_recovery_info;
+ const struct i2c_adapter_quirks *quirks;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 6e82d888287c..8872ca103d06 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -28,7 +28,9 @@
#include <asm/byteorder.h>
#define IEEE802154_MTU 127
-#define IEEE802154_MIN_PSDU_LEN 5
+#define IEEE802154_ACK_PSDU_LEN 5
+#define IEEE802154_MIN_PSDU_LEN 9
+#define IEEE802154_FCS_LEN 2
#define IEEE802154_PAN_ID_BROADCAST 0xffff
#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
@@ -38,6 +40,7 @@
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
+#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
#define IEEE802154_MAX_CHANNEL 26
#define IEEE802154_MAX_PAGE 31
@@ -204,11 +207,18 @@ enum {
/**
* ieee802154_is_valid_psdu_len - check if psdu len is valid
+ * available lengths:
+ * 0-4 Reserved
+ * 5 MPDU (Acknowledgment)
+ * 6-8 Reserved
+ * 9-127 MPDU
+ *
* @len: psdu len with (MHR + payload + MFR)
*/
static inline bool ieee802154_is_valid_psdu_len(const u8 len)
{
- return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU);
+ return (len == IEEE802154_ACK_PSDU_LEN ||
+ (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
}
/**
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index a57bca2ea97e..dad8b00beed2 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -44,6 +44,7 @@ struct br_ip_list {
#define BR_PROMISC BIT(7)
#define BR_PROXYARP BIT(8)
#define BR_LEARNING_SYNC BIT(9)
+#define BR_PROXYARP_WIFI BIT(10)
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 119130e9298b..da4929927f69 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -14,5 +14,6 @@ struct ifla_vf_info {
__u32 linkstate;
__u32 min_tx_rate;
__u32 max_tx_rate;
+ __u32 rss_query_en;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index aff7ad8a4ea3..66a7d7600f43 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <linux/ppp_channel.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
#include <uapi/linux/if_pppox.h>
static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb)
@@ -32,6 +33,7 @@ struct pppoe_opt {
struct pppoe_addr pa; /* what this socket is bound to*/
struct sockaddr_pppox relay; /* what socket data will be
relayed to (PPPoE relaying) */
+ struct work_struct padt_work;/* Work item for handling PADT */
};
struct pptp_opt {
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index b11b28a30b9e..920e4457ce6e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -561,4 +561,71 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
skb->protocol = htons(ETH_P_802_2);
}
+/**
+ * skb_vlan_tagged - check if skb is vlan tagged.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged, regardless of whether it is hardware
+ * accelerated or not.
+ */
+static inline bool skb_vlan_tagged(const struct sk_buff *skb)
+{
+ if (!skb_vlan_tag_present(skb) &&
+ likely(skb->protocol != htons(ETH_P_8021Q) &&
+ skb->protocol != htons(ETH_P_8021AD)))
+ return false;
+
+ return true;
+}
+
+/**
+ * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged with multiple vlan headers, regardless
+ * of whether it is hardware accelerated or not.
+ */
+static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+
+ if (!skb_vlan_tag_present(skb)) {
+ struct vlan_ethhdr *veh;
+
+ if (likely(protocol != htons(ETH_P_8021Q) &&
+ protocol != htons(ETH_P_8021AD)))
+ return false;
+
+ veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ }
+
+ if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
+ return false;
+
+ return true;
+}
+
+/**
+ * vlan_features_check - drop unsafe features for skb with multiple tags.
+ * @skb: skbuff to query
+ * @features: features to be checked
+ *
+ * Returns features without unsafe ones if the skb has multiple tags.
+ */
+static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (skb_vlan_tagged_multi(skb))
+ features = netdev_intersect_features(features,
+ NETIF_F_SG |
+ NETIF_F_HIGHDMA |
+ NETIF_F_FRAGLIST |
+ NETIF_F_GEN_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
+
+ return features;
+}
+
#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 46da02410a09..ac48b10c9395 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -11,33 +11,34 @@ struct sk_buff;
struct netlink_callback;
struct inet_diag_handler {
- void (*dump)(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct inet_diag_req_v2 *r,
- struct nlattr *bc);
-
- int (*dump_one)(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req);
-
- void (*idiag_get_info)(struct sock *sk,
- struct inet_diag_msg *r,
- void *info);
- __u16 idiag_type;
+ void (*dump)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc);
+
+ int (*dump_one)(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req);
+
+ void (*idiag_get_info)(struct sock *sk,
+ struct inet_diag_msg *r,
+ void *info);
+ __u16 idiag_type;
};
struct inet_connection_sock;
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh);
+ struct sk_buff *skb, const struct inet_diag_req_v2 *req,
+ struct user_namespace *user_ns,
+ u32 pid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
- struct netlink_callback *cb, struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb, const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req);
+ struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req);
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index a65208a8fe18..796ef9645827 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -115,10 +115,19 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
* Extended Capability Register
*/
-#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
+#define ecap_pss(e) ((e >> 35) & 0x1f)
+#define ecap_eafs(e) ((e >> 34) & 0x1)
+#define ecap_nwfs(e) ((e >> 33) & 0x1)
+#define ecap_srs(e) ((e >> 31) & 0x1)
+#define ecap_ers(e) ((e >> 30) & 0x1)
+#define ecap_prs(e) ((e >> 29) & 0x1)
+#define ecap_pasid(e) ((e >> 28) & 0x1)
+#define ecap_dis(e) ((e >> 27) & 0x1)
+#define ecap_nest(e) ((e >> 26) & 0x1)
+#define ecap_mts(e) ((e >> 25) & 0x1)
+#define ecap_ecs(e) ((e >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
-#define ecap_max_iotlb_offset(e) \
- (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
+#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
#define ecap_pass_through(e) ((e >> 6) & 0x1)
@@ -180,6 +189,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_GSTS_IRES (((u32)1) << 25)
#define DMA_GSTS_CFIS (((u32)1) << 23)
+/* DMA_RTADDR_REG */
+#define DMA_RTADDR_RTT (((u64)1) << 11)
+
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
diff --git a/include/linux/io.h b/include/linux/io.h
index fa02e55e5a2e..986f2bffea1e 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -38,6 +38,14 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
}
#endif
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+void __init ioremap_huge_init(void);
+int arch_ioremap_pud_supported(void);
+int arch_ioremap_pmd_supported(void);
+#else
+static inline void ioremap_huge_init(void) { }
+#endif
+
/*
* Managed iomap interface
*/
@@ -64,6 +72,8 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
resource_size_t size);
+void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
+ resource_size_t size);
void devm_iounmap(struct device *dev, void __iomem *addr);
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
new file mode 100644
index 000000000000..bbced83b32ee
--- /dev/null
+++ b/include/linux/iommu-common.h
@@ -0,0 +1,51 @@
+#ifndef _LINUX_IOMMU_COMMON_H
+#define _LINUX_IOMMU_COMMON_H
+
+#include <linux/spinlock_types.h>
+#include <linux/device.h>
+#include <asm/page.h>
+
+#define IOMMU_POOL_HASHBITS 4
+#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+
+struct iommu_pool {
+ unsigned long start;
+ unsigned long end;
+ unsigned long hint;
+ spinlock_t lock;
+};
+
+struct iommu_map_table {
+ unsigned long table_map_base;
+ unsigned long table_shift;
+ unsigned long nr_pools;
+ void (*lazy_flush)(struct iommu_map_table *);
+ unsigned long poolsize;
+ struct iommu_pool pools[IOMMU_NR_POOLS];
+ u32 flags;
+#define IOMMU_HAS_LARGE_POOL 0x00000001
+#define IOMMU_NO_SPAN_BOUND 0x00000002
+#define IOMMU_NEED_FLUSH 0x00000004
+ struct iommu_pool large_pool;
+ unsigned long *map;
+};
+
+extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
+ unsigned long num_entries,
+ u32 table_shift,
+ void (*lazy_flush)(struct iommu_map_table *),
+ bool large_pool, u32 npools,
+ bool skip_span_boundary_check);
+
+extern unsigned long iommu_tbl_range_alloc(struct device *dev,
+ struct iommu_map_table *iommu,
+ unsigned long npages,
+ unsigned long *handle,
+ unsigned long mask,
+ unsigned int align_order);
+
+extern void iommu_tbl_range_free(struct iommu_map_table *iommu,
+ u64 dma_addr, unsigned long npages,
+ unsigned long entry);
+
+#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 38daa453f2e5..0546b8710ce3 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -51,9 +51,33 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */
};
+/* Domain feature flags */
+#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
+#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
+ implementation */
+#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+
+/*
+ * This are the possible domain-types
+ *
+ * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
+ * devices
+ * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
+ * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
+ * for VMs
+ * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
+ * This flag allows IOMMU drivers to implement
+ * certain optimizations for these domains
+ */
+#define IOMMU_DOMAIN_BLOCKED (0U)
+#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
+#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
+#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API)
+
struct iommu_domain {
+ unsigned type;
const struct iommu_ops *ops;
- void *priv;
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
@@ -113,8 +137,11 @@ enum iommu_attr {
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
- int (*domain_init)(struct iommu_domain *domain);
- void (*domain_destroy)(struct iommu_domain *domain);
+
+ /* Domain allocation and freeing by the iommu driver */
+ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
+ void (*domain_free)(struct iommu_domain *);
+
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 2c5250222278..388e3ae94f7a 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -196,10 +196,8 @@ extern struct resource * __request_region(struct resource *,
/* Compatibility cruft */
#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
-#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n))
#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
-extern int __check_region(struct resource *, resource_size_t, resource_size_t);
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -207,12 +205,6 @@ extern int release_mem_region_adjustable(struct resource *, resource_size_t,
resource_size_t);
#endif
-static inline int __deprecated check_region(resource_size_t s,
- resource_size_t n)
-{
- return __check_region(&ioport_resource, s, n);
-}
-
/* Wrappers for managed devices */
struct device;
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 4d5169f5d7d1..82806c60aa42 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -53,6 +53,10 @@ struct ipv6_devconf {
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
+ struct ipv6_stable_secret {
+ bool initialized;
+ struct in6_addr secret;
+ } stable_secret;
void *sysctl;
};
diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h
new file mode 100644
index 000000000000..de3419ed3937
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-acpi.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014, Linaro Ltd.
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARM_GIC_ACPI_H_
+#define ARM_GIC_ACPI_H_
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hard code here, we can not get memory size from MADT (but FDT does),
+ * Actually no need to do that, because this size can be inferred
+ * from GIC spec.
+ */
+#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
+#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
+
+struct acpi_table_header;
+
+int gic_v2_acpi_init(struct acpi_table_header *table);
+void acpi_gic_init(void);
+#else
+static inline void acpi_gic_init(void) { }
+#endif
+
+#endif /* ARM_GIC_ACPI_H_ */
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 36ec4ae74634..9de976b4f9a7 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -95,8 +95,6 @@
struct device_node;
-extern struct irq_chip gic_arch_extn;
-
void gic_set_irqchip_flags(unsigned long flags);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 3ea2e4754c40..9b1ad3734911 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -165,6 +165,8 @@
#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
#define GIC_VPE_PEND_SWINT1_SHF 5
#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
+#define GIC_VPE_PEND_FDC_SHF 6
+#define GIC_VPE_PEND_FDC_MSK (MSK(1) << GIC_VPE_PEND_FDC_SHF)
/* GIC_VPE_RMASK Masks */
#define GIC_VPE_RMASK_WD_SHF 0
@@ -179,6 +181,8 @@
#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
#define GIC_VPE_RMASK_SWINT1_SHF 5
#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
+#define GIC_VPE_RMASK_FDC_SHF 6
+#define GIC_VPE_RMASK_FDC_MSK (MSK(1) << GIC_VPE_RMASK_FDC_SHF)
/* GIC_VPE_SMASK Masks */
#define GIC_VPE_SMASK_WD_SHF 0
@@ -193,6 +197,8 @@
#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
#define GIC_VPE_SMASK_SWINT1_SHF 5
#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
+#define GIC_VPE_SMASK_FDC_SHF 6
+#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
@@ -247,4 +253,5 @@ extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
extern int gic_get_c0_compare_int(void);
extern int gic_get_c0_perfcount_int(void);
+extern int gic_get_c0_fdc_int(void);
#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 47cb09edec1a..348c6f47e4cc 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
}
-/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
-static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
+static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
- a += JHASH_INITVAL;
- b += JHASH_INITVAL;
+ a += initval;
+ b += initval;
c += initval;
__jhash_final(a, b, c);
@@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
return c;
}
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+ return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
+}
+
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
- return jhash_3words(a, b, 0, initval);
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
- return jhash_3words(a, 0, 0, initval);
+ return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
}
#endif /* _LINUX_JHASH_H */
diff --git a/include/linux/jz4780-nemc.h b/include/linux/jz4780-nemc.h
new file mode 100644
index 000000000000..e7f1cc7a2284
--- /dev/null
+++ b/include/linux/jz4780-nemc.h
@@ -0,0 +1,43 @@
+/*
+ * JZ4780 NAND/external memory controller (NEMC)
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex@alex-smith.me.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_JZ4780_NEMC_H__
+#define __LINUX_JZ4780_NEMC_H__
+
+#include <linux/types.h>
+
+struct device;
+
+/*
+ * Number of NEMC banks. Note that there are actually 6, but they are numbered
+ * from 1.
+ */
+#define JZ4780_NEMC_NUM_BANKS 7
+
+/**
+ * enum jz4780_nemc_bank_type - device types which can be connected to a bank
+ * @JZ4780_NEMC_BANK_SRAM: SRAM
+ * @JZ4780_NEMC_BANK_NAND: NAND
+ */
+enum jz4780_nemc_bank_type {
+ JZ4780_NEMC_BANK_SRAM,
+ JZ4780_NEMC_BANK_NAND,
+};
+
+extern unsigned int jz4780_nemc_num_banks(struct device *dev);
+
+extern void jz4780_nemc_set_type(struct device *dev, unsigned int bank,
+ enum jz4780_nemc_bank_type type);
+extern void jz4780_nemc_assert(struct device *dev, unsigned int bank,
+ bool assert);
+
+#endif /* __LINUX_JZ4780_NEMC_H__ */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5bb074431eb0..5486d777b706 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -44,6 +44,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void kasan_kmalloc_large(const void *ptr, size_t size);
void kasan_kfree_large(const void *ptr);
+void kasan_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
void kasan_krealloc(const void *object, size_t new_size);
@@ -71,6 +72,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
static inline void kasan_kfree_large(const void *ptr) {}
+static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {}
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index be342b94c640..b33c7797eb57 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -23,14 +23,6 @@
#define ___config_enabled(__ignored, val, ...) val
/*
- * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
- * 0 otherwise.
- *
- */
-#define IS_ENABLED(option) \
- (config_enabled(option) || config_enabled(option##_MODULE))
-
-/*
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
* otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO).
@@ -43,4 +35,20 @@
*/
#define IS_MODULE(option) config_enabled(option##_MODULE)
+/*
+ * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
+ * code can call a function defined in code compiled based on CONFIG_FOO.
+ * This is similar to IS_ENABLED(), but returns false when invoked from
+ * built-in code when CONFIG_FOO is set to 'm'.
+ */
+#define IS_REACHABLE(option) (config_enabled(option) || \
+ (config_enabled(option##_MODULE) && config_enabled(MODULE)))
+
+/*
+ * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
+ * 0 otherwise.
+ */
+#define IS_ENABLED(option) \
+ (IS_BUILTIN(option) || IS_MODULE(option))
+
#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d6d630d31ef3..3a5b48e52a9e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -103,6 +103,18 @@
(((__x) - ((__d) / 2)) / (__d)); \
} \
)
+/*
+ * Same as above but for u64 dividends. divisor must be a 32-bit
+ * number.
+ */
+#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
+{ \
+ typeof(divisor) __d = divisor; \
+ unsigned long long _tmp = (x) + (__d) / 2; \
+ do_div(_tmp, __d); \
+ _tmp; \
+} \
+)
/*
* Multiplies an integer by a fraction, while avoiding unnecessary
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e60a745ac198..e804306ef5e8 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -40,6 +40,10 @@
#error KEXEC_CONTROL_MEMORY_LIMIT not defined
#endif
+#ifndef KEXEC_CONTROL_MEMORY_GFP
+#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
+#endif
+
#ifndef KEXEC_CONTROL_PAGE_SIZE
#error KEXEC_CONTROL_PAGE_SIZE not defined
#endif
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 3be6bb18562d..7ae216a39c9e 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -35,18 +35,6 @@ static inline void ksm_exit(struct mm_struct *mm)
__ksm_exit(mm);
}
-/*
- * A KSM page is one of those write-protected "shared pages" or "merged pages"
- * which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
- * anon_vma, but to that page's node of the stable tree.
- */
-static inline int PageKsm(struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
-}
-
static inline struct stable_node *page_stable_node(struct page *page)
{
return PageKsm(page) ? page_rmapping(page) : NULL;
@@ -87,11 +75,6 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}
-static inline int PageKsm(struct page *page)
-{
- return 0;
-}
-
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 82af5d0b996e..ad45054309a0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -779,7 +779,8 @@ static inline void kvm_guest_enter(void)
* one time slice). Lets treat guest mode as quiescent state, just like
* we do with user-mode execution.
*/
- rcu_virt_note_context_switch(smp_processor_id());
+ if (!context_tracking_cpu_is_enabled())
+ rcu_virt_note_context_switch(smp_processor_id());
}
static inline void kvm_guest_exit(void)
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 5ba2facd7a51..e97966d1fb8d 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -13,7 +13,6 @@
#define __LINUX_FLASH_LEDS_H_INCLUDED
#include <linux/leds.h>
-#include <uapi/linux/v4l2-controls.h>
struct device_node;
struct led_classdev_flash;
@@ -33,7 +32,7 @@ struct led_classdev_flash;
#define LED_FAULT_LED_OVER_TEMPERATURE (1 << 8)
#define LED_NUM_FLASH_FAULTS 9
-#define LED_FLASH_MAX_SYSFS_GROUPS 7
+#define LED_FLASH_SYSFS_GROUPS_SIZE 5
struct led_flash_ops {
/* set flash brightness */
@@ -81,21 +80,7 @@ struct led_classdev_flash {
struct led_flash_setting timeout;
/* LED Flash class sysfs groups */
- const struct attribute_group *sysfs_groups[LED_FLASH_MAX_SYSFS_GROUPS];
-
- /* LEDs available for flash strobe synchronization */
- struct led_classdev_flash **sync_leds;
-
- /* Number of LEDs available for flash strobe synchronization */
- int num_sync_leds;
-
- /*
- * The identifier of the sub-led to synchronize the flash strobe with.
- * Identifiers start from 1, which reflects the first element from the
- * sync_leds array. 0 means that the flash strobe should not be
- * synchronized.
- */
- u32 sync_led_id;
+ const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE];
};
static inline struct led_classdev_flash *lcdev_to_flcdev(
diff --git a/include/linux/leds.h b/include/linux/leds.h
index f70f84f35674..9a2b000094cf 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -47,7 +47,6 @@ struct led_classdev {
#define SET_BRIGHTNESS_ASYNC (1 << 21)
#define SET_BRIGHTNESS_SYNC (1 << 22)
#define LED_DEV_CAP_FLASH (1 << 23)
-#define LED_DEV_CAP_SYNC_STROBE (1 << 24)
/* Set LED brightness level */
/* Must not sleep, use a workqueue if needed */
@@ -105,7 +104,11 @@ struct led_classdev {
extern int led_classdev_register(struct device *parent,
struct led_classdev *led_cdev);
+extern int devm_led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev);
extern void led_classdev_unregister(struct led_classdev *led_cdev);
+extern void devm_led_classdev_unregister(struct device *parent,
+ struct led_classdev *led_cdev);
extern void led_classdev_suspend(struct led_classdev *led_cdev);
extern void led_classdev_resume(struct led_classdev *led_cdev);
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 9962c6bb1311..6db19f35f7c5 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -61,8 +61,8 @@ struct lguest_data {
u32 tsc_khz;
/* Fields initialized by the Guest at boot: */
- /* Instruction range to suppress interrupts even if enabled */
- unsigned long noirq_start, noirq_end;
+ /* Instruction to suppress interrupts even if enabled */
+ unsigned long noirq_iret;
/* Address above which page tables are all identical. */
unsigned long kernel_address;
/* The vector to try to use for system calls (0x40 or 0x80). */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 8dad4a307bb8..28aeae46f355 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -205,6 +205,7 @@ enum {
ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
+ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -309,6 +310,12 @@ enum {
*/
ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
+ * be a spurious PHY event, so ignore the first PHY event that
+ * occurs within 10s after the policy change.
+ */
+ ATA_TMOUT_SPURIOUS_PHY = 10000,
+
/* ATA bus states */
BUS_UNKNOWN = 0,
BUS_DMA = 1,
@@ -788,6 +795,8 @@ struct ata_link {
struct ata_eh_context eh_context;
struct ata_device device[ATA_MAX_DEVICES];
+
+ unsigned long last_lpm_change; /* when last LPM change happened */
};
#define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag)
#define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0])
@@ -1201,6 +1210,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
extern int ata_cable_40wire(struct ata_port *ap);
extern int ata_cable_80wire(struct ata_port *ap);
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 95023fd8b00d..ee6dbb39a809 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -123,10 +123,10 @@ struct klp_patch {
enum klp_state state;
};
-extern int klp_register_patch(struct klp_patch *);
-extern int klp_unregister_patch(struct klp_patch *);
-extern int klp_enable_patch(struct klp_patch *);
-extern int klp_disable_patch(struct klp_patch *);
+int klp_register_patch(struct klp_patch *);
+int klp_unregister_patch(struct klp_patch *);
+int klp_enable_patch(struct klp_patch *);
+int klp_disable_patch(struct klp_patch *);
#endif /* CONFIG_LIVEPATCH */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 74ab23176e9b..066ba4157541 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -531,8 +531,13 @@ do { \
# define might_lock_read(lock) do { } while (0)
#endif
-#ifdef CONFIG_PROVE_RCU
+#ifdef CONFIG_LOCKDEP
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
+#else
+static inline void
+lockdep_rcu_suspicious(const char *file, const int line, const char *s)
+{
+}
#endif
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e8cc45307f8f..9497ec7c77ea 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -365,6 +365,14 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
#define __initdata_memblock
#endif
+#ifdef CONFIG_MEMTEST
+extern void early_memtest(phys_addr_t start, phys_addr_t end);
+#else
+static inline void early_memtest(phys_addr_t start, phys_addr_t end)
+{
+}
+#endif
+
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 72dff5fb0d0c..6c8918114804 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
if (!memcg_kmem_enabled())
return true;
+ if (gfp & __GFP_NOACCOUNT)
+ return true;
/*
* __GFP_NOFAIL allocations will move on even if charging is not
* possible. Therefore we don't even try, and have this allocation
@@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
if (!memcg_kmem_enabled())
return cachep;
+ if (gfp & __GFP_NOACCOUNT)
+ return cachep;
if (gfp & __GFP_NOFAIL)
return cachep;
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 8f1a41951df9..6ffa0ac7f7d6 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -192,6 +192,9 @@ extern void get_page_bootmem(unsigned long ingo, struct page *page,
void get_online_mems(void);
void put_online_mems(void);
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Stub functions for when hotplug is off
@@ -231,6 +234,9 @@ static inline int try_online_node(int nid)
static inline void get_online_mems(void) {}
static inline void put_online_mems(void) {}
+static inline void mem_hotplug_begin(void) {}
+static inline void mem_hotplug_done(void) {}
+
#endif /* ! CONFIG_MEMORY_HOTPLUG */
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 39ed62ab5b8a..69b6951e8fd2 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -29,14 +29,15 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int nid);
-extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
+extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
extern void mempool_free(void *element, mempool_t *pool);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
- * a slab that is passed in through pool_data.
+ * a slab cache that is passed in through pool_data.
+ * Note: the slab cache may not have a ctor function.
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 910e3aa1e965..16a498f48169 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -24,6 +24,7 @@ enum arizona_type {
WM5102 = 1,
WM5110 = 2,
WM8997 = 3,
+ WM8280 = 4,
};
#define ARIZONA_IRQ_GP1 0
@@ -126,7 +127,7 @@ struct arizona {
struct regmap_irq_chip_data *aod_irq_chip;
struct regmap_irq_chip_data *irq_chip;
- bool hpdet_magic;
+ bool hpdet_clamp;
unsigned int hp_ena;
struct mutex clk_lock;
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 4578c72c9b86..1789cb0f4f17 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -11,31 +11,26 @@
#ifndef _ARIZONA_PDATA_H
#define _ARIZONA_PDATA_H
-#define ARIZONA_GPN_DIR 0x8000 /* GPN_DIR */
+#include <dt-bindings/mfd/arizona.h>
+
#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */
-#define ARIZONA_GPN_PU 0x4000 /* GPN_PU */
#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */
#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */
#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */
-#define ARIZONA_GPN_PD 0x2000 /* GPN_PD */
#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */
#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */
#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */
-#define ARIZONA_GPN_LVL 0x0800 /* GPN_LVL */
#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */
#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */
#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */
-#define ARIZONA_GPN_POL 0x0400 /* GPN_POL */
#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */
#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */
#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */
-#define ARIZONA_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */
#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
-#define ARIZONA_GPN_DB 0x0100 /* GPN_DB */
#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */
#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */
#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */
@@ -45,23 +40,10 @@
#define ARIZONA_MAX_GPIO 5
-#define ARIZONA_32KZ_MCLK1 1
-#define ARIZONA_32KZ_MCLK2 2
-#define ARIZONA_32KZ_NONE 3
-
#define ARIZONA_MAX_INPUT 4
-#define ARIZONA_DMIC_MICVDD 0
-#define ARIZONA_DMIC_MICBIAS1 1
-#define ARIZONA_DMIC_MICBIAS2 2
-#define ARIZONA_DMIC_MICBIAS3 3
-
#define ARIZONA_MAX_MICBIAS 3
-#define ARIZONA_INMODE_DIFF 0
-#define ARIZONA_INMODE_SE 1
-#define ARIZONA_INMODE_DMIC 2
-
#define ARIZONA_MAX_OUTPUT 6
#define ARIZONA_MAX_AIF 3
@@ -112,7 +94,7 @@ struct arizona_pdata {
int gpio_base;
/** Pin state for GPIO pins */
- int gpio_defaults[ARIZONA_MAX_GPIO];
+ unsigned int gpio_defaults[ARIZONA_MAX_GPIO];
/**
* Maximum number of channels clocks will be generated for,
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 0e166b92f5b4..324a34683971 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -16,6 +16,7 @@
#ifndef __LINUX_MFD_CROS_EC_H
#define __LINUX_MFD_CROS_EC_H
+#include <linux/cdev.h>
#include <linux/notifier.h>
#include <linux/mfd/cros_ec_commands.h>
#include <linux/mutex.h>
@@ -38,20 +39,20 @@ enum {
/*
* @version: Command version number (often 0)
* @command: Command to send (EC_CMD_...)
- * @outdata: Outgoing data to EC
* @outsize: Outgoing length in bytes
- * @indata: Where to put the incoming data from EC
* @insize: Max number of bytes to accept from EC
* @result: EC's response to the command (separate from communication failure)
+ * @outdata: Outgoing data to EC
+ * @indata: Where to put the incoming data from EC
*/
struct cros_ec_command {
uint32_t version;
uint32_t command;
- uint8_t *outdata;
uint32_t outsize;
- uint8_t *indata;
uint32_t insize;
uint32_t result;
+ uint8_t outdata[EC_PROTO2_MAX_PARAM_SIZE];
+ uint8_t indata[EC_PROTO2_MAX_PARAM_SIZE];
};
/**
@@ -59,9 +60,17 @@ struct cros_ec_command {
*
* @ec_name: name of EC device (e.g. 'chromeos-ec')
* @phys_name: name of physical comms layer (e.g. 'i2c-4')
- * @dev: Device pointer
+ * @dev: Device pointer for physical comms device
+ * @vdev: Device pointer for virtual comms device
+ * @cdev: Character device structure for virtual comms device
* @was_wake_device: true if this device was set to wake the system from
* sleep at the last suspend
+ * @cmd_readmem: direct read of the EC memory-mapped region, if supported
+ * @offset is within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: number of bytes to read. zero means "read a string" (including
+ * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read.
+ * Caller must ensure that the buffer is large enough for the result when
+ * reading a string.
*
* @priv: Private data
* @irq: Interrupt to use
@@ -90,8 +99,12 @@ struct cros_ec_device {
const char *ec_name;
const char *phys_name;
struct device *dev;
+ struct device *vdev;
+ struct cdev cdev;
bool was_wake_device;
struct class *cros_class;
+ int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
+ unsigned int bytes, void *dest);
/* These are used to implement the platform-specific interface */
void *priv;
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 955dd990beaf..51633ea6f910 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -87,6 +87,7 @@ enum max77693_pmic_reg {
/* MAX77693 ITORCH register */
#define TORCH_IOUT1_SHIFT 0
#define TORCH_IOUT2_SHIFT 4
+#define TORCH_IOUT_MASK(x) (0xf << (x))
#define TORCH_IOUT_MIN 15625
#define TORCH_IOUT_MAX 250000
#define TORCH_IOUT_STEP 15625
@@ -113,8 +114,8 @@ enum max77693_pmic_reg {
#define FLASH_EN_FLASH 0x1
#define FLASH_EN_TORCH 0x2
#define FLASH_EN_ON 0x3
-#define FLASH_EN_SHIFT(x) (6 - ((x) - 1) * 2)
-#define TORCH_EN_SHIFT(x) (2 - ((x) - 1) * 2)
+#define FLASH_EN_SHIFT(x) (6 - (x) * 2)
+#define TORCH_EN_SHIFT(x) (2 - (x) * 2)
/* MAX77693 MAX_FLASH1 register */
#define MAX_FLASH1_MAX_FL_EN 0x80
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index 09a4dedaeea8..d450f687301b 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -81,19 +81,6 @@ enum max77693_led_boost_mode {
MAX77693_LED_BOOST_FIXED,
};
-struct max77693_led_platform_data {
- u32 fleds[2];
- u32 iout_torch[2];
- u32 iout_flash[2];
- u32 trigger[2];
- u32 trigger_type[2];
- u32 num_leds;
- u32 boost_mode;
- u32 flash_timeout;
- u32 boost_vout;
- u32 low_vsys;
-};
-
/* MAX77693 */
struct max77693_platform_data {
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
new file mode 100644
index 000000000000..7178ace8379e
--- /dev/null
+++ b/include/linux/mfd/max77843-private.h
@@ -0,0 +1,454 @@
+/*
+ * Common variables for the Maxim MAX77843 driver
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MAX77843_PRIVATE_H_
+#define __MAX77843_PRIVATE_H_
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_TOPSYS (0xCC >> 1)
+#define I2C_ADDR_CHG (0xD2 >> 1)
+#define I2C_ADDR_FG (0x6C >> 1)
+#define I2C_ADDR_MUIC (0x4A >> 1)
+
+/* Topsys, Haptic and LED registers */
+enum max77843_sys_reg {
+ MAX77843_SYS_REG_PMICID = 0x00,
+ MAX77843_SYS_REG_PMICREV = 0x01,
+ MAX77843_SYS_REG_MAINCTRL1 = 0x02,
+ MAX77843_SYS_REG_INTSRC = 0x22,
+ MAX77843_SYS_REG_INTSRCMASK = 0x23,
+ MAX77843_SYS_REG_SYSINTSRC = 0x24,
+ MAX77843_SYS_REG_SYSINTMASK = 0x26,
+ MAX77843_SYS_REG_TOPSYS_STAT = 0x28,
+ MAX77843_SYS_REG_SAFEOUTCTRL = 0xC6,
+
+ MAX77843_SYS_REG_END,
+};
+
+enum max77843_haptic_reg {
+ MAX77843_HAP_REG_MCONFIG = 0x10,
+
+ MAX77843_HAP_REG_END,
+};
+
+enum max77843_led_reg {
+ MAX77843_LED_REG_LEDEN = 0x30,
+ MAX77843_LED_REG_LED0BRT = 0x31,
+ MAX77843_LED_REG_LED1BRT = 0x32,
+ MAX77843_LED_REG_LED2BRT = 0x33,
+ MAX77843_LED_REG_LED3BRT = 0x34,
+ MAX77843_LED_REG_LEDBLNK = 0x38,
+ MAX77843_LED_REG_LEDRAMP = 0x36,
+
+ MAX77843_LED_REG_END,
+};
+
+/* Charger registers */
+enum max77843_charger_reg {
+ MAX77843_CHG_REG_CHG_INT = 0xB0,
+ MAX77843_CHG_REG_CHG_INT_MASK = 0xB1,
+ MAX77843_CHG_REG_CHG_INT_OK = 0xB2,
+ MAX77843_CHG_REG_CHG_DTLS_00 = 0xB3,
+ MAX77843_CHG_REG_CHG_DTLS_01 = 0xB4,
+ MAX77843_CHG_REG_CHG_DTLS_02 = 0xB5,
+ MAX77843_CHG_REG_CHG_CNFG_00 = 0xB7,
+ MAX77843_CHG_REG_CHG_CNFG_01 = 0xB8,
+ MAX77843_CHG_REG_CHG_CNFG_02 = 0xB9,
+ MAX77843_CHG_REG_CHG_CNFG_03 = 0xBA,
+ MAX77843_CHG_REG_CHG_CNFG_04 = 0xBB,
+ MAX77843_CHG_REG_CHG_CNFG_06 = 0xBD,
+ MAX77843_CHG_REG_CHG_CNFG_07 = 0xBE,
+ MAX77843_CHG_REG_CHG_CNFG_09 = 0xC0,
+ MAX77843_CHG_REG_CHG_CNFG_10 = 0xC1,
+ MAX77843_CHG_REG_CHG_CNFG_11 = 0xC2,
+ MAX77843_CHG_REG_CHG_CNFG_12 = 0xC3,
+
+ MAX77843_CHG_REG_END,
+};
+
+/* Fuel gauge registers */
+enum max77843_fuelgauge {
+ MAX77843_FG_REG_STATUS = 0x00,
+ MAX77843_FG_REG_VALRT_TH = 0x01,
+ MAX77843_FG_REG_TALRT_TH = 0x02,
+ MAX77843_FG_REG_SALRT_TH = 0x03,
+ MAX77843_FG_RATE_AT_RATE = 0x04,
+ MAX77843_FG_REG_REMCAP_REP = 0x05,
+ MAX77843_FG_REG_SOCREP = 0x06,
+ MAX77843_FG_REG_AGE = 0x07,
+ MAX77843_FG_REG_TEMP = 0x08,
+ MAX77843_FG_REG_VCELL = 0x09,
+ MAX77843_FG_REG_CURRENT = 0x0A,
+ MAX77843_FG_REG_AVG_CURRENT = 0x0B,
+ MAX77843_FG_REG_SOCMIX = 0x0D,
+ MAX77843_FG_REG_SOCAV = 0x0E,
+ MAX77843_FG_REG_REMCAP_MIX = 0x0F,
+ MAX77843_FG_REG_FULLCAP = 0x10,
+ MAX77843_FG_REG_AVG_TEMP = 0x16,
+ MAX77843_FG_REG_CYCLES = 0x17,
+ MAX77843_FG_REG_AVG_VCELL = 0x19,
+ MAX77843_FG_REG_CONFIG = 0x1D,
+ MAX77843_FG_REG_REMCAP_AV = 0x1F,
+ MAX77843_FG_REG_FULLCAP_NOM = 0x23,
+ MAX77843_FG_REG_MISCCFG = 0x2B,
+ MAX77843_FG_REG_RCOMP = 0x38,
+ MAX77843_FG_REG_FSTAT = 0x3D,
+ MAX77843_FG_REG_DQACC = 0x45,
+ MAX77843_FG_REG_DPACC = 0x46,
+ MAX77843_FG_REG_OCV = 0xEE,
+ MAX77843_FG_REG_VFOCV = 0xFB,
+ MAX77843_FG_SOCVF = 0xFF,
+
+ MAX77843_FG_END,
+};
+
+/* MUIC registers */
+enum max77843_muic_reg {
+ MAX77843_MUIC_REG_ID = 0x00,
+ MAX77843_MUIC_REG_INT1 = 0x01,
+ MAX77843_MUIC_REG_INT2 = 0x02,
+ MAX77843_MUIC_REG_INT3 = 0x03,
+ MAX77843_MUIC_REG_STATUS1 = 0x04,
+ MAX77843_MUIC_REG_STATUS2 = 0x05,
+ MAX77843_MUIC_REG_STATUS3 = 0x06,
+ MAX77843_MUIC_REG_INTMASK1 = 0x07,
+ MAX77843_MUIC_REG_INTMASK2 = 0x08,
+ MAX77843_MUIC_REG_INTMASK3 = 0x09,
+ MAX77843_MUIC_REG_CDETCTRL1 = 0x0A,
+ MAX77843_MUIC_REG_CDETCTRL2 = 0x0B,
+ MAX77843_MUIC_REG_CONTROL1 = 0x0C,
+ MAX77843_MUIC_REG_CONTROL2 = 0x0D,
+ MAX77843_MUIC_REG_CONTROL3 = 0x0E,
+ MAX77843_MUIC_REG_CONTROL4 = 0x16,
+ MAX77843_MUIC_REG_HVCONTROL1 = 0x17,
+ MAX77843_MUIC_REG_HVCONTROL2 = 0x18,
+
+ MAX77843_MUIC_REG_END,
+};
+
+enum max77843_irq {
+ /* Topsys: SYSTEM */
+ MAX77843_SYS_IRQ_SYSINTSRC_SYSUVLO_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_SYSOVLO_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_TSHDN_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_TM_INT,
+
+ /* Charger: CHG_INT */
+ MAX77843_CHG_IRQ_CHG_INT_BYP_I,
+ MAX77843_CHG_IRQ_CHG_INT_BATP_I,
+ MAX77843_CHG_IRQ_CHG_INT_BAT_I,
+ MAX77843_CHG_IRQ_CHG_INT_CHG_I,
+ MAX77843_CHG_IRQ_CHG_INT_WCIN_I,
+ MAX77843_CHG_IRQ_CHG_INT_CHGIN_I,
+ MAX77843_CHG_IRQ_CHG_INT_AICL_I,
+
+ MAX77843_IRQ_NUM,
+};
+
+enum max77843_irq_muic {
+ /* MUIC: INT1 */
+ MAX77843_MUIC_IRQ_INT1_ADC,
+ MAX77843_MUIC_IRQ_INT1_ADCERROR,
+ MAX77843_MUIC_IRQ_INT1_ADC1K,
+
+ /* MUIC: INT2 */
+ MAX77843_MUIC_IRQ_INT2_CHGTYP,
+ MAX77843_MUIC_IRQ_INT2_CHGDETRUN,
+ MAX77843_MUIC_IRQ_INT2_DCDTMR,
+ MAX77843_MUIC_IRQ_INT2_DXOVP,
+ MAX77843_MUIC_IRQ_INT2_VBVOLT,
+
+ /* MUIC: INT3 */
+ MAX77843_MUIC_IRQ_INT3_VBADC,
+ MAX77843_MUIC_IRQ_INT3_VDNMON,
+ MAX77843_MUIC_IRQ_INT3_DNRES,
+ MAX77843_MUIC_IRQ_INT3_MPNACK,
+ MAX77843_MUIC_IRQ_INT3_MRXBUFOW,
+ MAX77843_MUIC_IRQ_INT3_MRXTRF,
+ MAX77843_MUIC_IRQ_INT3_MRXPERR,
+ MAX77843_MUIC_IRQ_INT3_MRXRDY,
+
+ MAX77843_MUIC_IRQ_NUM,
+};
+
+/* MAX77843 interrupts */
+#define MAX77843_SYS_IRQ_SYSUVLO_INT BIT(0)
+#define MAX77843_SYS_IRQ_SYSOVLO_INT BIT(1)
+#define MAX77843_SYS_IRQ_TSHDN_INT BIT(2)
+#define MAX77843_SYS_IRQ_TM_INT BIT(3)
+
+/* MAX77843 MAINCTRL1 register */
+#define MAINCTRL1_BIASEN_SHIFT 7
+#define MAX77843_MAINCTRL1_BIASEN_MASK BIT(MAINCTRL1_BIASEN_SHIFT)
+
+/* MAX77843 MCONFIG register */
+#define MCONFIG_MODE_SHIFT 7
+#define MCONFIG_MEN_SHIFT 6
+#define MCONFIG_PDIV_SHIFT 0
+
+#define MAX77843_MCONFIG_MODE_MASK BIT(MCONFIG_MODE_SHIFT)
+#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT)
+#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT)
+
+/* Max77843 charger insterrupts */
+#define MAX77843_CHG_BYP_I BIT(0)
+#define MAX77843_CHG_BATP_I BIT(2)
+#define MAX77843_CHG_BAT_I BIT(3)
+#define MAX77843_CHG_CHG_I BIT(4)
+#define MAX77843_CHG_WCIN_I BIT(5)
+#define MAX77843_CHG_CHGIN_I BIT(6)
+#define MAX77843_CHG_AICL_I BIT(7)
+
+/* MAX77843 CHG_INT_OK register */
+#define MAX77843_CHG_BYP_OK BIT(0)
+#define MAX77843_CHG_BATP_OK BIT(2)
+#define MAX77843_CHG_BAT_OK BIT(3)
+#define MAX77843_CHG_CHG_OK BIT(4)
+#define MAX77843_CHG_WCIN_OK BIT(5)
+#define MAX77843_CHG_CHGIN_OK BIT(6)
+#define MAX77843_CHG_AICL_OK BIT(7)
+
+/* MAX77843 CHG_DETAILS_00 register */
+#define MAX77843_CHG_BAT_DTLS BIT(0)
+
+/* MAX77843 CHG_DETAILS_01 register */
+#define MAX77843_CHG_DTLS_MASK 0x0f
+#define MAX77843_CHG_PQ_MODE 0x00
+#define MAX77843_CHG_CC_MODE 0x01
+#define MAX77843_CHG_CV_MODE 0x02
+#define MAX77843_CHG_TO_MODE 0x03
+#define MAX77843_CHG_DO_MODE 0x04
+#define MAX77843_CHG_HT_MODE 0x05
+#define MAX77843_CHG_TF_MODE 0x06
+#define MAX77843_CHG_TS_MODE 0x07
+#define MAX77843_CHG_OFF_MODE 0x08
+
+#define MAX77843_CHG_BAT_DTLS_MASK 0xf0
+#define MAX77843_CHG_NO_BAT (0x00 << 4)
+#define MAX77843_CHG_LOW_VOLT_BAT (0x01 << 4)
+#define MAX77843_CHG_LONG_BAT_TIME (0x02 << 4)
+#define MAX77843_CHG_OK_BAT (0x03 << 4)
+#define MAX77843_CHG_OK_LOW_VOLT_BAT (0x04 << 4)
+#define MAX77843_CHG_OVER_VOLT_BAT (0x05 << 4)
+#define MAX77843_CHG_OVER_CURRENT_BAT (0x06 << 4)
+
+/* MAX77843 CHG_CNFG_00 register */
+#define MAX77843_CHG_DISABLE 0x00
+#define MAX77843_CHG_ENABLE 0x05
+#define MAX77843_CHG_MASK 0x01
+#define MAX77843_CHG_BUCK_MASK 0x04
+
+/* MAX77843 CHG_CNFG_01 register */
+#define MAX77843_CHG_RESTART_THRESHOLD_100 0x00
+#define MAX77843_CHG_RESTART_THRESHOLD_150 0x10
+#define MAX77843_CHG_RESTART_THRESHOLD_200 0x20
+#define MAX77843_CHG_RESTART_THRESHOLD_DISABLE 0x30
+
+/* MAX77843 CHG_CNFG_02 register */
+#define MAX77843_CHG_FAST_CHG_CURRENT_MIN 100000
+#define MAX77843_CHG_FAST_CHG_CURRENT_MAX 3150000
+#define MAX77843_CHG_FAST_CHG_CURRENT_STEP 50000
+#define MAX77843_CHG_FAST_CHG_CURRENT_MASK 0x3f
+#define MAX77843_CHG_OTG_ILIMIT_500 (0x00 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_900 (0x01 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_1200 (0x02 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_1500 (0x03 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_MASK 0xc0
+
+/* MAX77843 CHG_CNFG_03 register */
+#define MAX77843_CHG_TOP_OFF_CURRENT_MIN 125000
+#define MAX77843_CHG_TOP_OFF_CURRENT_MAX 650000
+#define MAX77843_CHG_TOP_OFF_CURRENT_STEP 75000
+#define MAX77843_CHG_TOP_OFF_CURRENT_MASK 0x07
+
+/* MAX77843 CHG_CNFG_06 register */
+#define MAX77843_CHG_WRITE_CAP_BLOCK 0x10
+#define MAX77843_CHG_WRITE_CAP_UNBLOCK 0x0C
+
+/* MAX77843_CHG_CNFG_09_register */
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MIN 100000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MAX 4000000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_REF 3367000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_STEP 33000
+
+#define MAX77843_MUIC_ADC BIT(0)
+#define MAX77843_MUIC_ADCERROR BIT(2)
+#define MAX77843_MUIC_ADC1K BIT(3)
+
+#define MAX77843_MUIC_CHGTYP BIT(0)
+#define MAX77843_MUIC_CHGDETRUN BIT(1)
+#define MAX77843_MUIC_DCDTMR BIT(2)
+#define MAX77843_MUIC_DXOVP BIT(3)
+#define MAX77843_MUIC_VBVOLT BIT(4)
+
+#define MAX77843_MUIC_VBADC BIT(0)
+#define MAX77843_MUIC_VDNMON BIT(1)
+#define MAX77843_MUIC_DNRES BIT(2)
+#define MAX77843_MUIC_MPNACK BIT(3)
+#define MAX77843_MUIC_MRXBUFOW BIT(4)
+#define MAX77843_MUIC_MRXTRF BIT(5)
+#define MAX77843_MUIC_MRXPERR BIT(6)
+#define MAX77843_MUIC_MRXRDY BIT(7)
+
+/* MAX77843 INTSRCMASK register */
+#define MAX77843_INTSRCMASK_CHGR 0
+#define MAX77843_INTSRCMASK_SYS 1
+#define MAX77843_INTSRCMASK_FG 2
+#define MAX77843_INTSRCMASK_MUIC 3
+
+#define MAX77843_INTSRCMASK_CHGR_MASK BIT(MAX77843_INTSRCMASK_CHGR)
+#define MAX77843_INTSRCMASK_SYS_MASK BIT(MAX77843_INTSRCMASK_SYS)
+#define MAX77843_INTSRCMASK_FG_MASK BIT(MAX77843_INTSRCMASK_FG)
+#define MAX77843_INTSRCMASK_MUIC_MASK BIT(MAX77843_INTSRCMASK_MUIC)
+
+#define MAX77843_INTSRC_MASK_MASK \
+ (MAX77843_INTSRCMASK_MUIC_MASK | MAX77843_INTSRCMASK_FG_MASK | \
+ MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
+
+/* MAX77843 STATUS register*/
+#define STATUS1_ADC_SHIFT 0
+#define STATUS1_ADCERROR_SHIFT 6
+#define STATUS1_ADC1K_SHIFT 7
+#define STATUS2_CHGTYP_SHIFT 0
+#define STATUS2_CHGDETRUN_SHIFT 3
+#define STATUS2_DCDTMR_SHIFT 4
+#define STATUS2_DXOVP_SHIFT 5
+#define STATUS2_VBVOLT_SHIFT 6
+#define STATUS3_VBADC_SHIFT 0
+#define STATUS3_VDNMON_SHIFT 4
+#define STATUS3_DNRES_SHIFT 5
+#define STATUS3_MPNACK_SHIFT 6
+
+#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
+#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
+#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT)
+#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
+#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT)
+#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT)
+#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT)
+#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT)
+
+/* MAX77843 CONTROL register */
+#define CONTROL1_COMP1SW_SHIFT 0
+#define CONTROL1_COMP2SW_SHIFT 3
+#define CONTROL1_IDBEN_SHIFT 7
+#define CONTROL2_LOWPWR_SHIFT 0
+#define CONTROL2_ADCEN_SHIFT 1
+#define CONTROL2_CPEN_SHIFT 2
+#define CONTROL2_ACC_DET_SHIFT 5
+#define CONTROL2_USBCPINT_SHIFT 6
+#define CONTROL2_RCPS_SHIFT 7
+#define CONTROL3_JIGSET_SHIFT 0
+#define CONTROL4_ADCDBSET_SHIFT 0
+#define CONTROL4_USBAUTO_SHIFT 4
+#define CONTROL4_FCTAUTO_SHIFT 5
+#define CONTROL4_ADCMODE_SHIFT 6
+
+#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT)
+#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT)
+#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT)
+#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT)
+
+/* MAX77843 switch port */
+#define COM_OPEN 0
+#define COM_USB 1
+#define COM_AUDIO 2
+#define COM_UART 3
+#define COM_AUX_USB 4
+#define COM_AUX_UART 5
+
+#define CONTROL1_COM_SW \
+ ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
+ MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
+
+#define CONTROL1_SW_OPEN \
+ ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \
+ COM_OPEN << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_USB \
+ ((COM_USB << CONTROL1_COMP1SW_SHIFT | \
+ COM_USB << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUDIO \
+ ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUDIO << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_UART \
+ ((COM_UART << CONTROL1_COMP1SW_SHIFT | \
+ COM_UART << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUX_USB \
+ ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_USB << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUX_UART \
+ ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_UART << CONTROL1_COMP2SW_SHIFT))
+
+#define MAX77843_DISABLE 0
+#define MAX77843_ENABLE 1
+
+#define CONTROL4_AUTO_DISABLE \
+ ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT))
+#define CONTROL4_AUTO_ENABLE \
+ ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT))
+
+/* MAX77843 SAFEOUT LDO Control register */
+#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
+#define SAFEOUTCTRL_SAFEOUT2_SHIFT 2
+#define SAFEOUTCTRL_ENSAFEOUT1_SHIFT 6
+#define SAFEOUTCTRL_ENSAFEOUT2_SHIFT 7
+
+#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1 \
+ BIT(SAFEOUTCTRL_ENSAFEOUT1_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2 \
+ BIT(SAFEOUTCTRL_ENSAFEOUT2_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK \
+ (0x3 << SAFEOUTCTRL_SAFEOUT1_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
+ (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
+
+struct max77843 {
+ struct device *dev;
+
+ struct i2c_client *i2c;
+ struct i2c_client *i2c_chg;
+ struct i2c_client *i2c_fuel;
+ struct i2c_client *i2c_muic;
+
+ struct regmap *regmap;
+ struct regmap *regmap_chg;
+ struct regmap *regmap_fuel;
+ struct regmap *regmap_muic;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *irq_data_chg;
+ struct regmap_irq_chip_data *irq_data_fuel;
+ struct regmap_irq_chip_data *irq_data_muic;
+
+ int irq;
+};
+#endif /* __MAX77843_H__ */
diff --git a/include/linux/mfd/menelaus.h b/include/linux/mfd/menelaus.h
index f097e89134cb..9e85ac06da89 100644
--- a/include/linux/mfd/menelaus.h
+++ b/include/linux/mfd/menelaus.h
@@ -24,7 +24,6 @@ extern int menelaus_set_vaux(unsigned int mV);
extern int menelaus_set_vdcdc(int dcdc, unsigned int mV);
extern int menelaus_set_slot_sel(int enable);
extern int menelaus_get_slot_pin_states(void);
-extern int menelaus_set_vcore_sw(unsigned int mV);
extern int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV);
#define EN_VPLL_SLEEP (1 << 7)
@@ -38,10 +37,4 @@ extern int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV);
extern int menelaus_set_regulator_sleep(int enable, u32 val);
-#if defined(CONFIG_ARCH_OMAP2) && defined(CONFIG_MENELAUS)
-#define omap_has_menelaus() 1
-#else
-#define omap_has_menelaus() 0
-#endif
-
#endif
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
new file mode 100644
index 000000000000..cf5265b0d1c1
--- /dev/null
+++ b/include/linux/mfd/mt6397/core.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MT6397_CORE_H__
+#define __MFD_MT6397_CORE_H__
+
+enum mt6397_irq_numbers {
+ MT6397_IRQ_SPKL_AB = 0,
+ MT6397_IRQ_SPKR_AB,
+ MT6397_IRQ_SPKL,
+ MT6397_IRQ_SPKR,
+ MT6397_IRQ_BAT_L,
+ MT6397_IRQ_BAT_H,
+ MT6397_IRQ_FG_BAT_L,
+ MT6397_IRQ_FG_BAT_H,
+ MT6397_IRQ_WATCHDOG,
+ MT6397_IRQ_PWRKEY,
+ MT6397_IRQ_THR_L,
+ MT6397_IRQ_THR_H,
+ MT6397_IRQ_VBATON_UNDET,
+ MT6397_IRQ_BVALID_DET,
+ MT6397_IRQ_CHRDET,
+ MT6397_IRQ_OV,
+ MT6397_IRQ_LDO,
+ MT6397_IRQ_HOMEKEY,
+ MT6397_IRQ_ACCDET,
+ MT6397_IRQ_AUDIO,
+ MT6397_IRQ_RTC,
+ MT6397_IRQ_PWRKEY_RSTB,
+ MT6397_IRQ_HDMI_SIFM,
+ MT6397_IRQ_HDMI_CEC,
+ MT6397_IRQ_VCA15,
+ MT6397_IRQ_VSRMCA15,
+ MT6397_IRQ_VCORE,
+ MT6397_IRQ_VGPU,
+ MT6397_IRQ_VIO18,
+ MT6397_IRQ_VPCA7,
+ MT6397_IRQ_VSRMCA7,
+ MT6397_IRQ_VDRM,
+ MT6397_IRQ_NR,
+};
+
+struct mt6397_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct irq_domain *irq_domain;
+ struct mutex irqlock;
+ u16 irq_masks_cur[2];
+ u16 irq_masks_cache[2];
+};
+
+#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/registers.h b/include/linux/mfd/mt6397/registers.h
new file mode 100644
index 000000000000..f23a0a60a877
--- /dev/null
+++ b/include/linux/mfd/mt6397/registers.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MT6397_REGISTERS_H__
+#define __MFD_MT6397_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6397_CID 0x0100
+#define MT6397_TOP_CKPDN 0x0102
+#define MT6397_TOP_CKPDN_SET 0x0104
+#define MT6397_TOP_CKPDN_CLR 0x0106
+#define MT6397_TOP_CKPDN2 0x0108
+#define MT6397_TOP_CKPDN2_SET 0x010A
+#define MT6397_TOP_CKPDN2_CLR 0x010C
+#define MT6397_TOP_GPIO_CKPDN 0x010E
+#define MT6397_TOP_RST_CON 0x0114
+#define MT6397_WRP_CKPDN 0x011A
+#define MT6397_WRP_RST_CON 0x0120
+#define MT6397_TOP_RST_MISC 0x0126
+#define MT6397_TOP_CKCON1 0x0128
+#define MT6397_TOP_CKCON2 0x012A
+#define MT6397_TOP_CKTST1 0x012C
+#define MT6397_TOP_CKTST2 0x012E
+#define MT6397_OC_DEG_EN 0x0130
+#define MT6397_OC_CTL0 0x0132
+#define MT6397_OC_CTL1 0x0134
+#define MT6397_OC_CTL2 0x0136
+#define MT6397_INT_RSV 0x0138
+#define MT6397_TEST_CON0 0x013A
+#define MT6397_TEST_CON1 0x013C
+#define MT6397_STATUS0 0x013E
+#define MT6397_STATUS1 0x0140
+#define MT6397_PGSTATUS 0x0142
+#define MT6397_CHRSTATUS 0x0144
+#define MT6397_OCSTATUS0 0x0146
+#define MT6397_OCSTATUS1 0x0148
+#define MT6397_OCSTATUS2 0x014A
+#define MT6397_HDMI_PAD_IE 0x014C
+#define MT6397_TEST_OUT_L 0x014E
+#define MT6397_TEST_OUT_H 0x0150
+#define MT6397_TDSEL_CON 0x0152
+#define MT6397_RDSEL_CON 0x0154
+#define MT6397_GPIO_SMT_CON0 0x0156
+#define MT6397_GPIO_SMT_CON1 0x0158
+#define MT6397_GPIO_SMT_CON2 0x015A
+#define MT6397_GPIO_SMT_CON3 0x015C
+#define MT6397_DRV_CON0 0x015E
+#define MT6397_DRV_CON1 0x0160
+#define MT6397_DRV_CON2 0x0162
+#define MT6397_DRV_CON3 0x0164
+#define MT6397_DRV_CON4 0x0166
+#define MT6397_DRV_CON5 0x0168
+#define MT6397_DRV_CON6 0x016A
+#define MT6397_DRV_CON7 0x016C
+#define MT6397_DRV_CON8 0x016E
+#define MT6397_DRV_CON9 0x0170
+#define MT6397_DRV_CON10 0x0172
+#define MT6397_DRV_CON11 0x0174
+#define MT6397_DRV_CON12 0x0176
+#define MT6397_INT_CON0 0x0178
+#define MT6397_INT_CON1 0x017E
+#define MT6397_INT_STATUS0 0x0184
+#define MT6397_INT_STATUS1 0x0186
+#define MT6397_FQMTR_CON0 0x0188
+#define MT6397_FQMTR_CON1 0x018A
+#define MT6397_FQMTR_CON2 0x018C
+#define MT6397_EFUSE_DOUT_0_15 0x01C4
+#define MT6397_EFUSE_DOUT_16_31 0x01C6
+#define MT6397_EFUSE_DOUT_32_47 0x01C8
+#define MT6397_EFUSE_DOUT_48_63 0x01CA
+#define MT6397_SPI_CON 0x01CC
+#define MT6397_TOP_CKPDN3 0x01CE
+#define MT6397_TOP_CKCON3 0x01D4
+#define MT6397_EFUSE_DOUT_64_79 0x01D6
+#define MT6397_EFUSE_DOUT_80_95 0x01D8
+#define MT6397_EFUSE_DOUT_96_111 0x01DA
+#define MT6397_EFUSE_DOUT_112_127 0x01DC
+#define MT6397_EFUSE_DOUT_128_143 0x01DE
+#define MT6397_EFUSE_DOUT_144_159 0x01E0
+#define MT6397_EFUSE_DOUT_160_175 0x01E2
+#define MT6397_EFUSE_DOUT_176_191 0x01E4
+#define MT6397_EFUSE_DOUT_192_207 0x01E6
+#define MT6397_EFUSE_DOUT_208_223 0x01E8
+#define MT6397_EFUSE_DOUT_224_239 0x01EA
+#define MT6397_EFUSE_DOUT_240_255 0x01EC
+#define MT6397_EFUSE_DOUT_256_271 0x01EE
+#define MT6397_EFUSE_DOUT_272_287 0x01F0
+#define MT6397_EFUSE_DOUT_288_300 0x01F2
+#define MT6397_EFUSE_DOUT_304_319 0x01F4
+#define MT6397_BUCK_CON0 0x0200
+#define MT6397_BUCK_CON1 0x0202
+#define MT6397_BUCK_CON2 0x0204
+#define MT6397_BUCK_CON3 0x0206
+#define MT6397_BUCK_CON4 0x0208
+#define MT6397_BUCK_CON5 0x020A
+#define MT6397_BUCK_CON6 0x020C
+#define MT6397_BUCK_CON7 0x020E
+#define MT6397_BUCK_CON8 0x0210
+#define MT6397_BUCK_CON9 0x0212
+#define MT6397_VCA15_CON0 0x0214
+#define MT6397_VCA15_CON1 0x0216
+#define MT6397_VCA15_CON2 0x0218
+#define MT6397_VCA15_CON3 0x021A
+#define MT6397_VCA15_CON4 0x021C
+#define MT6397_VCA15_CON5 0x021E
+#define MT6397_VCA15_CON6 0x0220
+#define MT6397_VCA15_CON7 0x0222
+#define MT6397_VCA15_CON8 0x0224
+#define MT6397_VCA15_CON9 0x0226
+#define MT6397_VCA15_CON10 0x0228
+#define MT6397_VCA15_CON11 0x022A
+#define MT6397_VCA15_CON12 0x022C
+#define MT6397_VCA15_CON13 0x022E
+#define MT6397_VCA15_CON14 0x0230
+#define MT6397_VCA15_CON15 0x0232
+#define MT6397_VCA15_CON16 0x0234
+#define MT6397_VCA15_CON17 0x0236
+#define MT6397_VCA15_CON18 0x0238
+#define MT6397_VSRMCA15_CON0 0x023A
+#define MT6397_VSRMCA15_CON1 0x023C
+#define MT6397_VSRMCA15_CON2 0x023E
+#define MT6397_VSRMCA15_CON3 0x0240
+#define MT6397_VSRMCA15_CON4 0x0242
+#define MT6397_VSRMCA15_CON5 0x0244
+#define MT6397_VSRMCA15_CON6 0x0246
+#define MT6397_VSRMCA15_CON7 0x0248
+#define MT6397_VSRMCA15_CON8 0x024A
+#define MT6397_VSRMCA15_CON9 0x024C
+#define MT6397_VSRMCA15_CON10 0x024E
+#define MT6397_VSRMCA15_CON11 0x0250
+#define MT6397_VSRMCA15_CON12 0x0252
+#define MT6397_VSRMCA15_CON13 0x0254
+#define MT6397_VSRMCA15_CON14 0x0256
+#define MT6397_VSRMCA15_CON15 0x0258
+#define MT6397_VSRMCA15_CON16 0x025A
+#define MT6397_VSRMCA15_CON17 0x025C
+#define MT6397_VSRMCA15_CON18 0x025E
+#define MT6397_VSRMCA15_CON19 0x0260
+#define MT6397_VSRMCA15_CON20 0x0262
+#define MT6397_VSRMCA15_CON21 0x0264
+#define MT6397_VCORE_CON0 0x0266
+#define MT6397_VCORE_CON1 0x0268
+#define MT6397_VCORE_CON2 0x026A
+#define MT6397_VCORE_CON3 0x026C
+#define MT6397_VCORE_CON4 0x026E
+#define MT6397_VCORE_CON5 0x0270
+#define MT6397_VCORE_CON6 0x0272
+#define MT6397_VCORE_CON7 0x0274
+#define MT6397_VCORE_CON8 0x0276
+#define MT6397_VCORE_CON9 0x0278
+#define MT6397_VCORE_CON10 0x027A
+#define MT6397_VCORE_CON11 0x027C
+#define MT6397_VCORE_CON12 0x027E
+#define MT6397_VCORE_CON13 0x0280
+#define MT6397_VCORE_CON14 0x0282
+#define MT6397_VCORE_CON15 0x0284
+#define MT6397_VCORE_CON16 0x0286
+#define MT6397_VCORE_CON17 0x0288
+#define MT6397_VCORE_CON18 0x028A
+#define MT6397_VGPU_CON0 0x028C
+#define MT6397_VGPU_CON1 0x028E
+#define MT6397_VGPU_CON2 0x0290
+#define MT6397_VGPU_CON3 0x0292
+#define MT6397_VGPU_CON4 0x0294
+#define MT6397_VGPU_CON5 0x0296
+#define MT6397_VGPU_CON6 0x0298
+#define MT6397_VGPU_CON7 0x029A
+#define MT6397_VGPU_CON8 0x029C
+#define MT6397_VGPU_CON9 0x029E
+#define MT6397_VGPU_CON10 0x02A0
+#define MT6397_VGPU_CON11 0x02A2
+#define MT6397_VGPU_CON12 0x02A4
+#define MT6397_VGPU_CON13 0x02A6
+#define MT6397_VGPU_CON14 0x02A8
+#define MT6397_VGPU_CON15 0x02AA
+#define MT6397_VGPU_CON16 0x02AC
+#define MT6397_VGPU_CON17 0x02AE
+#define MT6397_VGPU_CON18 0x02B0
+#define MT6397_VIO18_CON0 0x0300
+#define MT6397_VIO18_CON1 0x0302
+#define MT6397_VIO18_CON2 0x0304
+#define MT6397_VIO18_CON3 0x0306
+#define MT6397_VIO18_CON4 0x0308
+#define MT6397_VIO18_CON5 0x030A
+#define MT6397_VIO18_CON6 0x030C
+#define MT6397_VIO18_CON7 0x030E
+#define MT6397_VIO18_CON8 0x0310
+#define MT6397_VIO18_CON9 0x0312
+#define MT6397_VIO18_CON10 0x0314
+#define MT6397_VIO18_CON11 0x0316
+#define MT6397_VIO18_CON12 0x0318
+#define MT6397_VIO18_CON13 0x031A
+#define MT6397_VIO18_CON14 0x031C
+#define MT6397_VIO18_CON15 0x031E
+#define MT6397_VIO18_CON16 0x0320
+#define MT6397_VIO18_CON17 0x0322
+#define MT6397_VIO18_CON18 0x0324
+#define MT6397_VPCA7_CON0 0x0326
+#define MT6397_VPCA7_CON1 0x0328
+#define MT6397_VPCA7_CON2 0x032A
+#define MT6397_VPCA7_CON3 0x032C
+#define MT6397_VPCA7_CON4 0x032E
+#define MT6397_VPCA7_CON5 0x0330
+#define MT6397_VPCA7_CON6 0x0332
+#define MT6397_VPCA7_CON7 0x0334
+#define MT6397_VPCA7_CON8 0x0336
+#define MT6397_VPCA7_CON9 0x0338
+#define MT6397_VPCA7_CON10 0x033A
+#define MT6397_VPCA7_CON11 0x033C
+#define MT6397_VPCA7_CON12 0x033E
+#define MT6397_VPCA7_CON13 0x0340
+#define MT6397_VPCA7_CON14 0x0342
+#define MT6397_VPCA7_CON15 0x0344
+#define MT6397_VPCA7_CON16 0x0346
+#define MT6397_VPCA7_CON17 0x0348
+#define MT6397_VPCA7_CON18 0x034A
+#define MT6397_VSRMCA7_CON0 0x034C
+#define MT6397_VSRMCA7_CON1 0x034E
+#define MT6397_VSRMCA7_CON2 0x0350
+#define MT6397_VSRMCA7_CON3 0x0352
+#define MT6397_VSRMCA7_CON4 0x0354
+#define MT6397_VSRMCA7_CON5 0x0356
+#define MT6397_VSRMCA7_CON6 0x0358
+#define MT6397_VSRMCA7_CON7 0x035A
+#define MT6397_VSRMCA7_CON8 0x035C
+#define MT6397_VSRMCA7_CON9 0x035E
+#define MT6397_VSRMCA7_CON10 0x0360
+#define MT6397_VSRMCA7_CON11 0x0362
+#define MT6397_VSRMCA7_CON12 0x0364
+#define MT6397_VSRMCA7_CON13 0x0366
+#define MT6397_VSRMCA7_CON14 0x0368
+#define MT6397_VSRMCA7_CON15 0x036A
+#define MT6397_VSRMCA7_CON16 0x036C
+#define MT6397_VSRMCA7_CON17 0x036E
+#define MT6397_VSRMCA7_CON18 0x0370
+#define MT6397_VSRMCA7_CON19 0x0372
+#define MT6397_VSRMCA7_CON20 0x0374
+#define MT6397_VSRMCA7_CON21 0x0376
+#define MT6397_VDRM_CON0 0x0378
+#define MT6397_VDRM_CON1 0x037A
+#define MT6397_VDRM_CON2 0x037C
+#define MT6397_VDRM_CON3 0x037E
+#define MT6397_VDRM_CON4 0x0380
+#define MT6397_VDRM_CON5 0x0382
+#define MT6397_VDRM_CON6 0x0384
+#define MT6397_VDRM_CON7 0x0386
+#define MT6397_VDRM_CON8 0x0388
+#define MT6397_VDRM_CON9 0x038A
+#define MT6397_VDRM_CON10 0x038C
+#define MT6397_VDRM_CON11 0x038E
+#define MT6397_VDRM_CON12 0x0390
+#define MT6397_VDRM_CON13 0x0392
+#define MT6397_VDRM_CON14 0x0394
+#define MT6397_VDRM_CON15 0x0396
+#define MT6397_VDRM_CON16 0x0398
+#define MT6397_VDRM_CON17 0x039A
+#define MT6397_VDRM_CON18 0x039C
+#define MT6397_BUCK_K_CON0 0x039E
+#define MT6397_BUCK_K_CON1 0x03A0
+#define MT6397_ANALDO_CON0 0x0400
+#define MT6397_ANALDO_CON1 0x0402
+#define MT6397_ANALDO_CON2 0x0404
+#define MT6397_ANALDO_CON3 0x0406
+#define MT6397_ANALDO_CON4 0x0408
+#define MT6397_ANALDO_CON5 0x040A
+#define MT6397_ANALDO_CON6 0x040C
+#define MT6397_ANALDO_CON7 0x040E
+#define MT6397_DIGLDO_CON0 0x0410
+#define MT6397_DIGLDO_CON1 0x0412
+#define MT6397_DIGLDO_CON2 0x0414
+#define MT6397_DIGLDO_CON3 0x0416
+#define MT6397_DIGLDO_CON4 0x0418
+#define MT6397_DIGLDO_CON5 0x041A
+#define MT6397_DIGLDO_CON6 0x041C
+#define MT6397_DIGLDO_CON7 0x041E
+#define MT6397_DIGLDO_CON8 0x0420
+#define MT6397_DIGLDO_CON9 0x0422
+#define MT6397_DIGLDO_CON10 0x0424
+#define MT6397_DIGLDO_CON11 0x0426
+#define MT6397_DIGLDO_CON12 0x0428
+#define MT6397_DIGLDO_CON13 0x042A
+#define MT6397_DIGLDO_CON14 0x042C
+#define MT6397_DIGLDO_CON15 0x042E
+#define MT6397_DIGLDO_CON16 0x0430
+#define MT6397_DIGLDO_CON17 0x0432
+#define MT6397_DIGLDO_CON18 0x0434
+#define MT6397_DIGLDO_CON19 0x0436
+#define MT6397_DIGLDO_CON20 0x0438
+#define MT6397_DIGLDO_CON21 0x043A
+#define MT6397_DIGLDO_CON22 0x043C
+#define MT6397_DIGLDO_CON23 0x043E
+#define MT6397_DIGLDO_CON24 0x0440
+#define MT6397_DIGLDO_CON25 0x0442
+#define MT6397_DIGLDO_CON26 0x0444
+#define MT6397_DIGLDO_CON27 0x0446
+#define MT6397_DIGLDO_CON28 0x0448
+#define MT6397_DIGLDO_CON29 0x044A
+#define MT6397_DIGLDO_CON30 0x044C
+#define MT6397_DIGLDO_CON31 0x044E
+#define MT6397_DIGLDO_CON32 0x0450
+#define MT6397_DIGLDO_CON33 0x045A
+#define MT6397_SPK_CON0 0x0600
+#define MT6397_SPK_CON1 0x0602
+#define MT6397_SPK_CON2 0x0604
+#define MT6397_SPK_CON3 0x0606
+#define MT6397_SPK_CON4 0x0608
+#define MT6397_SPK_CON5 0x060A
+#define MT6397_SPK_CON6 0x060C
+#define MT6397_SPK_CON7 0x060E
+#define MT6397_SPK_CON8 0x0610
+#define MT6397_SPK_CON9 0x0612
+#define MT6397_SPK_CON10 0x0614
+#define MT6397_SPK_CON11 0x0616
+#define MT6397_AUDDAC_CON0 0x0700
+#define MT6397_AUDBUF_CFG0 0x0702
+#define MT6397_AUDBUF_CFG1 0x0704
+#define MT6397_AUDBUF_CFG2 0x0706
+#define MT6397_AUDBUF_CFG3 0x0708
+#define MT6397_AUDBUF_CFG4 0x070A
+#define MT6397_IBIASDIST_CFG0 0x070C
+#define MT6397_AUDACCDEPOP_CFG0 0x070E
+#define MT6397_AUD_IV_CFG0 0x0710
+#define MT6397_AUDCLKGEN_CFG0 0x0712
+#define MT6397_AUDLDO_CFG0 0x0714
+#define MT6397_AUDLDO_CFG1 0x0716
+#define MT6397_AUDNVREGGLB_CFG0 0x0718
+#define MT6397_AUD_NCP0 0x071A
+#define MT6397_AUDPREAMP_CON0 0x071C
+#define MT6397_AUDADC_CON0 0x071E
+#define MT6397_AUDADC_CON1 0x0720
+#define MT6397_AUDADC_CON2 0x0722
+#define MT6397_AUDADC_CON3 0x0724
+#define MT6397_AUDADC_CON4 0x0726
+#define MT6397_AUDADC_CON5 0x0728
+#define MT6397_AUDADC_CON6 0x072A
+#define MT6397_AUDDIGMI_CON0 0x072C
+#define MT6397_AUDLSBUF_CON0 0x072E
+#define MT6397_AUDLSBUF_CON1 0x0730
+#define MT6397_AUDENCSPARE_CON0 0x0732
+#define MT6397_AUDENCCLKSQ_CON0 0x0734
+#define MT6397_AUDPREAMPGAIN_CON0 0x0736
+#define MT6397_ZCD_CON0 0x0738
+#define MT6397_ZCD_CON1 0x073A
+#define MT6397_ZCD_CON2 0x073C
+#define MT6397_ZCD_CON3 0x073E
+#define MT6397_ZCD_CON4 0x0740
+#define MT6397_ZCD_CON5 0x0742
+#define MT6397_NCP_CLKDIV_CON0 0x0744
+#define MT6397_NCP_CLKDIV_CON1 0x0746
+
+#endif /* __MFD_MT6397_REGISTERS_H__ */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index fb09312d854b..441b6ee72691 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -156,6 +156,9 @@ enum rk808_reg {
#define BUCK2_RATE_MASK (3 << 3)
#define MASK_ALL 0xff
+#define BUCK_UV_ACT_MASK 0x0f
+#define BUCK_UV_ACT_DISABLE 0
+
#define SWITCH2_EN BIT(6)
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 0c12628e91c6..ff843e7ca23d 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -28,74 +28,72 @@
#define MAX_RW_REG_CNT 1024
-/* PCI Operation Register Address */
#define RTSX_HCBAR 0x00
#define RTSX_HCBCTLR 0x04
+#define STOP_CMD (0x01 << 28)
+#define READ_REG_CMD 0
+#define WRITE_REG_CMD 1
+#define CHECK_REG_CMD 2
+
#define RTSX_HDBAR 0x08
+#define SG_INT 0x04
+#define SG_END 0x02
+#define SG_VALID 0x01
+#define SG_NO_OP 0x00
+#define SG_TRANS_DATA (0x02 << 4)
+#define SG_LINK_DESC (0x03 << 4)
#define RTSX_HDBCTLR 0x0C
+#define SDMA_MODE 0x00
+#define ADMA_MODE (0x02 << 26)
+#define STOP_DMA (0x01 << 28)
+#define TRIG_DMA (0x01 << 31)
+
#define RTSX_HAIMR 0x10
-#define RTSX_BIPR 0x14
-#define RTSX_BIER 0x18
+#define HAIMR_TRANS_START (0x01 << 31)
+#define HAIMR_READ 0x00
+#define HAIMR_WRITE (0x01 << 30)
+#define HAIMR_READ_START (HAIMR_TRANS_START | HAIMR_READ)
+#define HAIMR_WRITE_START (HAIMR_TRANS_START | HAIMR_WRITE)
+#define HAIMR_TRANS_END (HAIMR_TRANS_START)
-/* Host command buffer control register */
-#define STOP_CMD (0x01 << 28)
-
-/* Host data buffer control register */
-#define SDMA_MODE 0x00
-#define ADMA_MODE (0x02 << 26)
-#define STOP_DMA (0x01 << 28)
-#define TRIG_DMA (0x01 << 31)
-
-/* Host access internal memory register */
-#define HAIMR_TRANS_START (0x01 << 31)
-#define HAIMR_READ 0x00
-#define HAIMR_WRITE (0x01 << 30)
-#define HAIMR_READ_START (HAIMR_TRANS_START | HAIMR_READ)
-#define HAIMR_WRITE_START (HAIMR_TRANS_START | HAIMR_WRITE)
-#define HAIMR_TRANS_END (HAIMR_TRANS_START)
-
-/* Bus interrupt pending register */
-#define CMD_DONE_INT (1 << 31)
-#define DATA_DONE_INT (1 << 30)
-#define TRANS_OK_INT (1 << 29)
-#define TRANS_FAIL_INT (1 << 28)
-#define XD_INT (1 << 27)
-#define MS_INT (1 << 26)
-#define SD_INT (1 << 25)
-#define GPIO0_INT (1 << 24)
-#define OC_INT (1 << 23)
-#define SD_WRITE_PROTECT (1 << 19)
-#define XD_EXIST (1 << 18)
-#define MS_EXIST (1 << 17)
-#define SD_EXIST (1 << 16)
-#define DELINK_INT GPIO0_INT
-#define MS_OC_INT (1 << 23)
-#define SD_OC_INT (1 << 22)
+#define RTSX_BIPR 0x14
+#define CMD_DONE_INT (1 << 31)
+#define DATA_DONE_INT (1 << 30)
+#define TRANS_OK_INT (1 << 29)
+#define TRANS_FAIL_INT (1 << 28)
+#define XD_INT (1 << 27)
+#define MS_INT (1 << 26)
+#define SD_INT (1 << 25)
+#define GPIO0_INT (1 << 24)
+#define OC_INT (1 << 23)
+#define SD_WRITE_PROTECT (1 << 19)
+#define XD_EXIST (1 << 18)
+#define MS_EXIST (1 << 17)
+#define SD_EXIST (1 << 16)
+#define DELINK_INT GPIO0_INT
+#define MS_OC_INT (1 << 23)
+#define SD_OC_INT (1 << 22)
#define CARD_INT (XD_INT | MS_INT | SD_INT)
#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | \
CARD_INT | GPIO0_INT | OC_INT)
-
#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST)
-/* Bus interrupt enable register */
-#define CMD_DONE_INT_EN (1 << 31)
-#define DATA_DONE_INT_EN (1 << 30)
-#define TRANS_OK_INT_EN (1 << 29)
-#define TRANS_FAIL_INT_EN (1 << 28)
-#define XD_INT_EN (1 << 27)
-#define MS_INT_EN (1 << 26)
-#define SD_INT_EN (1 << 25)
-#define GPIO0_INT_EN (1 << 24)
-#define OC_INT_EN (1 << 23)
-#define DELINK_INT_EN GPIO0_INT_EN
-#define MS_OC_INT_EN (1 << 23)
-#define SD_OC_INT_EN (1 << 22)
-
-#define READ_REG_CMD 0
-#define WRITE_REG_CMD 1
-#define CHECK_REG_CMD 2
+#define RTSX_BIER 0x18
+#define CMD_DONE_INT_EN (1 << 31)
+#define DATA_DONE_INT_EN (1 << 30)
+#define TRANS_OK_INT_EN (1 << 29)
+#define TRANS_FAIL_INT_EN (1 << 28)
+#define XD_INT_EN (1 << 27)
+#define MS_INT_EN (1 << 26)
+#define SD_INT_EN (1 << 25)
+#define GPIO0_INT_EN (1 << 24)
+#define OC_INT_EN (1 << 23)
+#define DELINK_INT_EN GPIO0_INT_EN
+#define MS_OC_INT_EN (1 << 23)
+#define SD_OC_INT_EN (1 << 22)
+
/*
* macros for easy use
@@ -125,423 +123,68 @@
#define rtsx_pci_write_config_dword(pcr, where, val) \
pci_write_config_dword((pcr)->pci, where, val)
-#define STATE_TRANS_NONE 0
-#define STATE_TRANS_CMD 1
-#define STATE_TRANS_BUF 2
-#define STATE_TRANS_SG 3
-
-#define TRANS_NOT_READY 0
-#define TRANS_RESULT_OK 1
-#define TRANS_RESULT_FAIL 2
-#define TRANS_NO_DEVICE 3
-
-#define RTSX_RESV_BUF_LEN 4096
-#define HOST_CMDS_BUF_LEN 1024
-#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
-#define HOST_SG_TBL_ITEMS (HOST_SG_TBL_BUF_LEN / 8)
-#define MAX_SG_ITEM_LEN 0x80000
-
-#define HOST_TO_DEVICE 0
-#define DEVICE_TO_HOST 1
-
-#define RTSX_PHASE_MAX 32
-#define RX_TUNING_CNT 3
-
-/* SG descriptor */
-#define SG_INT 0x04
-#define SG_END 0x02
-#define SG_VALID 0x01
-
-#define SG_NO_OP 0x00
-#define SG_TRANS_DATA (0x02 << 4)
-#define SG_LINK_DESC (0x03 << 4)
-
-/* Output voltage */
-#define OUTPUT_3V3 0
-#define OUTPUT_1V8 1
-
-/* Card Clock Enable Register */
-#define SD_CLK_EN 0x04
-#define MS_CLK_EN 0x08
-
-/* Card Select Register */
-#define SD_MOD_SEL 2
-#define MS_MOD_SEL 3
-
-/* Card Output Enable Register */
-#define SD_OUTPUT_EN 0x04
-#define MS_OUTPUT_EN 0x08
-
-/* CARD_SHARE_MODE */
-#define CARD_SHARE_MASK 0x0F
-#define CARD_SHARE_MULTI_LUN 0x00
-#define CARD_SHARE_NORMAL 0x00
-#define CARD_SHARE_48_SD 0x04
-#define CARD_SHARE_48_MS 0x08
-/* CARD_SHARE_MODE for barossa */
-#define CARD_SHARE_BAROSSA_SD 0x01
-#define CARD_SHARE_BAROSSA_MS 0x02
-
-/* CARD_DRIVE_SEL */
-#define MS_DRIVE_8mA (0x01 << 6)
-#define MMC_DRIVE_8mA (0x01 << 4)
-#define XD_DRIVE_8mA (0x01 << 2)
-#define GPIO_DRIVE_8mA 0x01
-#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
- XD_DRIVE_8mA | GPIO_DRIVE_8mA)
-#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
- XD_DRIVE_8mA)
-#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define STATE_TRANS_NONE 0
+#define STATE_TRANS_CMD 1
+#define STATE_TRANS_BUF 2
+#define STATE_TRANS_SG 3
-/* SD30_DRIVE_SEL */
-#define DRIVER_TYPE_A 0x05
-#define DRIVER_TYPE_B 0x03
-#define DRIVER_TYPE_C 0x02
-#define DRIVER_TYPE_D 0x01
-#define CFG_DRIVER_TYPE_A 0x02
-#define CFG_DRIVER_TYPE_B 0x03
-#define CFG_DRIVER_TYPE_C 0x01
-#define CFG_DRIVER_TYPE_D 0x00
-
-/* FPDCTL */
-#define SSC_POWER_DOWN 0x01
-#define SD_OC_POWER_DOWN 0x02
-#define ALL_POWER_DOWN 0x07
-#define OC_POWER_DOWN 0x06
-
-/* CLK_CTL */
-#define CHANGE_CLK 0x01
-
-/* LDO_CTL */
-#define BPP_ASIC_1V7 0x00
-#define BPP_ASIC_1V8 0x01
-#define BPP_ASIC_1V9 0x02
-#define BPP_ASIC_2V0 0x03
-#define BPP_ASIC_2V7 0x04
-#define BPP_ASIC_2V8 0x05
-#define BPP_ASIC_3V2 0x06
-#define BPP_ASIC_3V3 0x07
-#define BPP_REG_TUNED18 0x07
-#define BPP_TUNED18_SHIFT_8402 5
-#define BPP_TUNED18_SHIFT_8411 4
-#define BPP_PAD_MASK 0x04
-#define BPP_PAD_3V3 0x04
-#define BPP_PAD_1V8 0x00
-#define BPP_LDO_POWB 0x03
-#define BPP_LDO_ON 0x00
-#define BPP_LDO_SUSPEND 0x02
-#define BPP_LDO_OFF 0x03
-
-/* CD_PAD_CTL */
-#define CD_DISABLE_MASK 0x07
-#define MS_CD_DISABLE 0x04
-#define SD_CD_DISABLE 0x02
-#define XD_CD_DISABLE 0x01
-#define CD_DISABLE 0x07
-#define CD_ENABLE 0x00
-#define MS_CD_EN_ONLY 0x03
-#define SD_CD_EN_ONLY 0x05
-#define XD_CD_EN_ONLY 0x06
-#define FORCE_CD_LOW_MASK 0x38
-#define FORCE_CD_XD_LOW 0x08
-#define FORCE_CD_SD_LOW 0x10
-#define FORCE_CD_MS_LOW 0x20
-#define CD_AUTO_DISABLE 0x40
-
-/* SD_STAT1 */
-#define SD_CRC7_ERR 0x80
-#define SD_CRC16_ERR 0x40
-#define SD_CRC_WRITE_ERR 0x20
-#define SD_CRC_WRITE_ERR_MASK 0x1C
-#define GET_CRC_TIME_OUT 0x02
-#define SD_TUNING_COMPARE_ERR 0x01
-
-/* SD_STAT2 */
-#define SD_RSP_80CLK_TIMEOUT 0x01
-
-/* SD_BUS_STAT */
-#define SD_CLK_TOGGLE_EN 0x80
-#define SD_CLK_FORCE_STOP 0x40
-#define SD_DAT3_STATUS 0x10
-#define SD_DAT2_STATUS 0x08
-#define SD_DAT1_STATUS 0x04
-#define SD_DAT0_STATUS 0x02
-#define SD_CMD_STATUS 0x01
-
-/* SD_PAD_CTL */
-#define SD_IO_USING_1V8 0x80
-#define SD_IO_USING_3V3 0x7F
-#define TYPE_A_DRIVING 0x00
-#define TYPE_B_DRIVING 0x01
-#define TYPE_C_DRIVING 0x02
-#define TYPE_D_DRIVING 0x03
-
-/* SD_SAMPLE_POINT_CTL */
-#define DDR_FIX_RX_DAT 0x00
-#define DDR_VAR_RX_DAT 0x80
-#define DDR_FIX_RX_DAT_EDGE 0x00
-#define DDR_FIX_RX_DAT_14_DELAY 0x40
-#define DDR_FIX_RX_CMD 0x00
-#define DDR_VAR_RX_CMD 0x20
-#define DDR_FIX_RX_CMD_POS_EDGE 0x00
-#define DDR_FIX_RX_CMD_14_DELAY 0x10
-#define SD20_RX_POS_EDGE 0x00
-#define SD20_RX_14_DELAY 0x08
-#define SD20_RX_SEL_MASK 0x08
+#define TRANS_NOT_READY 0
+#define TRANS_RESULT_OK 1
+#define TRANS_RESULT_FAIL 2
+#define TRANS_NO_DEVICE 3
-/* SD_PUSH_POINT_CTL */
-#define DDR_FIX_TX_CMD_DAT 0x00
-#define DDR_VAR_TX_CMD_DAT 0x80
-#define DDR_FIX_TX_DAT_14_TSU 0x00
-#define DDR_FIX_TX_DAT_12_TSU 0x40
-#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
-#define DDR_FIX_TX_CMD_14_AHEAD 0x20
-#define SD20_TX_NEG_EDGE 0x00
-#define SD20_TX_14_AHEAD 0x10
-#define SD20_TX_SEL_MASK 0x10
-#define DDR_VAR_SDCLK_POL_SWAP 0x01
-
-/* SD_TRANSFER */
-#define SD_TRANSFER_START 0x80
-#define SD_TRANSFER_END 0x40
-#define SD_STAT_IDLE 0x20
-#define SD_TRANSFER_ERR 0x10
-/* SD Transfer Mode definition */
-#define SD_TM_NORMAL_WRITE 0x00
-#define SD_TM_AUTO_WRITE_3 0x01
-#define SD_TM_AUTO_WRITE_4 0x02
-#define SD_TM_AUTO_READ_3 0x05
-#define SD_TM_AUTO_READ_4 0x06
-#define SD_TM_CMD_RSP 0x08
-#define SD_TM_AUTO_WRITE_1 0x09
-#define SD_TM_AUTO_WRITE_2 0x0A
-#define SD_TM_NORMAL_READ 0x0C
-#define SD_TM_AUTO_READ_1 0x0D
-#define SD_TM_AUTO_READ_2 0x0E
-#define SD_TM_AUTO_TUNING 0x0F
-
-/* SD_VPTX_CTL / SD_VPRX_CTL */
-#define PHASE_CHANGE 0x80
-#define PHASE_NOT_RESET 0x40
-
-/* SD_DCMPS_TX_CTL / SD_DCMPS_RX_CTL */
-#define DCMPS_CHANGE 0x80
-#define DCMPS_CHANGE_DONE 0x40
-#define DCMPS_ERROR 0x20
-#define DCMPS_CURRENT_PHASE 0x1F
-
-/* SD Configure 1 Register */
-#define SD_CLK_DIVIDE_0 0x00
-#define SD_CLK_DIVIDE_256 0xC0
-#define SD_CLK_DIVIDE_128 0x80
-#define SD_BUS_WIDTH_1BIT 0x00
-#define SD_BUS_WIDTH_4BIT 0x01
-#define SD_BUS_WIDTH_8BIT 0x02
-#define SD_ASYNC_FIFO_NOT_RST 0x10
-#define SD_20_MODE 0x00
-#define SD_DDR_MODE 0x04
-#define SD_30_MODE 0x08
-
-#define SD_CLK_DIVIDE_MASK 0xC0
-
-/* SD_CMD_STATE */
-#define SD_CMD_IDLE 0x80
-
-/* SD_DATA_STATE */
-#define SD_DATA_IDLE 0x80
-
-/* DCM_DRP_CTL */
-#define DCM_RESET 0x08
-#define DCM_LOCKED 0x04
-#define DCM_208M 0x00
-#define DCM_TX 0x01
-#define DCM_RX 0x02
-
-/* DCM_DRP_TRIG */
-#define DRP_START 0x80
-#define DRP_DONE 0x40
-
-/* DCM_DRP_CFG */
-#define DRP_WRITE 0x80
-#define DRP_READ 0x00
-#define DCM_WRITE_ADDRESS_50 0x50
-#define DCM_WRITE_ADDRESS_51 0x51
-#define DCM_READ_ADDRESS_00 0x00
-#define DCM_READ_ADDRESS_51 0x51
-
-/* IRQSTAT0 */
-#define DMA_DONE_INT 0x80
-#define SUSPEND_INT 0x40
-#define LINK_RDY_INT 0x20
-#define LINK_DOWN_INT 0x10
-
-/* DMACTL */
-#define DMA_RST 0x80
-#define DMA_BUSY 0x04
-#define DMA_DIR_TO_CARD 0x00
-#define DMA_DIR_FROM_CARD 0x02
-#define DMA_EN 0x01
-#define DMA_128 (0 << 4)
-#define DMA_256 (1 << 4)
-#define DMA_512 (2 << 4)
-#define DMA_1024 (3 << 4)
-#define DMA_PACK_SIZE_MASK 0x30
-
-/* SSC_CTL1 */
-#define SSC_RSTB 0x80
-#define SSC_8X_EN 0x40
-#define SSC_FIX_FRAC 0x20
-#define SSC_SEL_1M 0x00
-#define SSC_SEL_2M 0x08
-#define SSC_SEL_4M 0x10
-#define SSC_SEL_8M 0x18
-
-/* SSC_CTL2 */
-#define SSC_DEPTH_MASK 0x07
-#define SSC_DEPTH_DISALBE 0x00
-#define SSC_DEPTH_4M 0x01
-#define SSC_DEPTH_2M 0x02
-#define SSC_DEPTH_1M 0x03
-#define SSC_DEPTH_500K 0x04
-#define SSC_DEPTH_250K 0x05
-
-/* System Clock Control Register */
-#define CLK_LOW_FREQ 0x01
-
-/* System Clock Divider Register */
-#define CLK_DIV_1 0x01
-#define CLK_DIV_2 0x02
-#define CLK_DIV_4 0x03
-#define CLK_DIV_8 0x04
-
-/* MS_CFG */
-#define SAMPLE_TIME_RISING 0x00
-#define SAMPLE_TIME_FALLING 0x80
-#define PUSH_TIME_DEFAULT 0x00
-#define PUSH_TIME_ODD 0x40
-#define NO_EXTEND_TOGGLE 0x00
-#define EXTEND_TOGGLE_CHK 0x20
-#define MS_BUS_WIDTH_1 0x00
-#define MS_BUS_WIDTH_4 0x10
-#define MS_BUS_WIDTH_8 0x18
-#define MS_2K_SECTOR_MODE 0x04
-#define MS_512_SECTOR_MODE 0x00
-#define MS_TOGGLE_TIMEOUT_EN 0x00
-#define MS_TOGGLE_TIMEOUT_DISEN 0x01
-#define MS_NO_CHECK_INT 0x02
+#define RTSX_RESV_BUF_LEN 4096
+#define HOST_CMDS_BUF_LEN 1024
+#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
+#define HOST_SG_TBL_ITEMS (HOST_SG_TBL_BUF_LEN / 8)
+#define MAX_SG_ITEM_LEN 0x80000
+#define HOST_TO_DEVICE 0
+#define DEVICE_TO_HOST 1
-/* MS_TRANS_CFG */
-#define WAIT_INT 0x80
-#define NO_WAIT_INT 0x00
-#define NO_AUTO_READ_INT_REG 0x00
-#define AUTO_READ_INT_REG 0x40
-#define MS_CRC16_ERR 0x20
-#define MS_RDY_TIMEOUT 0x10
-#define MS_INT_CMDNK 0x08
-#define MS_INT_BREQ 0x04
-#define MS_INT_ERR 0x02
-#define MS_INT_CED 0x01
-
-/* MS_TRANSFER */
-#define MS_TRANSFER_START 0x80
-#define MS_TRANSFER_END 0x40
-#define MS_TRANSFER_ERR 0x20
-#define MS_BS_STATE 0x10
-#define MS_TM_READ_BYTES 0x00
-#define MS_TM_NORMAL_READ 0x01
-#define MS_TM_WRITE_BYTES 0x04
-#define MS_TM_NORMAL_WRITE 0x05
-#define MS_TM_AUTO_READ 0x08
-#define MS_TM_AUTO_WRITE 0x0C
-
-/* SD Configure 2 Register */
-#define SD_CALCULATE_CRC7 0x00
-#define SD_NO_CALCULATE_CRC7 0x80
-#define SD_CHECK_CRC16 0x00
-#define SD_NO_CHECK_CRC16 0x40
-#define SD_NO_CHECK_WAIT_CRC_TO 0x20
-#define SD_WAIT_BUSY_END 0x08
-#define SD_NO_WAIT_BUSY_END 0x00
-#define SD_CHECK_CRC7 0x00
-#define SD_NO_CHECK_CRC7 0x04
-#define SD_RSP_LEN_0 0x00
-#define SD_RSP_LEN_6 0x01
-#define SD_RSP_LEN_17 0x02
-/* SD/MMC Response Type Definition */
-#define SD_RSP_TYPE_R0 0x04
-#define SD_RSP_TYPE_R1 0x01
-#define SD_RSP_TYPE_R1b 0x09
-#define SD_RSP_TYPE_R2 0x02
-#define SD_RSP_TYPE_R3 0x05
-#define SD_RSP_TYPE_R4 0x05
-#define SD_RSP_TYPE_R5 0x01
-#define SD_RSP_TYPE_R6 0x01
-#define SD_RSP_TYPE_R7 0x01
-
-/* SD_CONFIGURE3 */
-#define SD_RSP_80CLK_TIMEOUT_EN 0x01
-
-/* Card Transfer Reset Register */
-#define SPI_STOP 0x01
-#define XD_STOP 0x02
-#define SD_STOP 0x04
-#define MS_STOP 0x08
-#define SPI_CLR_ERR 0x10
-#define XD_CLR_ERR 0x20
-#define SD_CLR_ERR 0x40
-#define MS_CLR_ERR 0x80
-
-/* Card Data Source Register */
-#define PINGPONG_BUFFER 0x01
-#define RING_BUFFER 0x00
-
-/* Card Power Control Register */
-#define PMOS_STRG_MASK 0x10
-#define PMOS_STRG_800mA 0x10
-#define PMOS_STRG_400mA 0x00
-#define SD_POWER_OFF 0x03
-#define SD_PARTIAL_POWER_ON 0x01
-#define SD_POWER_ON 0x00
-#define SD_POWER_MASK 0x03
-#define MS_POWER_OFF 0x0C
-#define MS_PARTIAL_POWER_ON 0x04
-#define MS_POWER_ON 0x00
-#define MS_POWER_MASK 0x0C
-#define BPP_POWER_OFF 0x0F
-#define BPP_POWER_5_PERCENT_ON 0x0E
-#define BPP_POWER_10_PERCENT_ON 0x0C
-#define BPP_POWER_15_PERCENT_ON 0x08
-#define BPP_POWER_ON 0x00
-#define BPP_POWER_MASK 0x0F
-#define SD_VCC_PARTIAL_POWER_ON 0x02
-#define SD_VCC_POWER_ON 0x00
-
-/* PWR_GATE_CTRL */
-#define PWR_GATE_EN 0x01
-#define LDO3318_PWR_MASK 0x06
-#define LDO_ON 0x00
-#define LDO_SUSPEND 0x04
-#define LDO_OFF 0x06
-
-/* CARD_CLK_SOURCE */
-#define CRC_FIX_CLK (0x00 << 0)
-#define CRC_VAR_CLK0 (0x01 << 0)
-#define CRC_VAR_CLK1 (0x02 << 0)
-#define SD30_FIX_CLK (0x00 << 2)
-#define SD30_VAR_CLK0 (0x01 << 2)
-#define SD30_VAR_CLK1 (0x02 << 2)
-#define SAMPLE_FIX_CLK (0x00 << 4)
-#define SAMPLE_VAR_CLK0 (0x01 << 4)
-#define SAMPLE_VAR_CLK1 (0x02 << 4)
-
-/* HOST_SLEEP_STATE */
-#define HOST_ENTER_S1 1
-#define HOST_ENTER_S3 2
+#define OUTPUT_3V3 0
+#define OUTPUT_1V8 1
+
+#define RTSX_PHASE_MAX 32
+#define RX_TUNING_CNT 3
#define MS_CFG 0xFD40
+#define SAMPLE_TIME_RISING 0x00
+#define SAMPLE_TIME_FALLING 0x80
+#define PUSH_TIME_DEFAULT 0x00
+#define PUSH_TIME_ODD 0x40
+#define NO_EXTEND_TOGGLE 0x00
+#define EXTEND_TOGGLE_CHK 0x20
+#define MS_BUS_WIDTH_1 0x00
+#define MS_BUS_WIDTH_4 0x10
+#define MS_BUS_WIDTH_8 0x18
+#define MS_2K_SECTOR_MODE 0x04
+#define MS_512_SECTOR_MODE 0x00
+#define MS_TOGGLE_TIMEOUT_EN 0x00
+#define MS_TOGGLE_TIMEOUT_DISEN 0x01
+#define MS_NO_CHECK_INT 0x02
#define MS_TPC 0xFD41
#define MS_TRANS_CFG 0xFD42
+#define WAIT_INT 0x80
+#define NO_WAIT_INT 0x00
+#define NO_AUTO_READ_INT_REG 0x00
+#define AUTO_READ_INT_REG 0x40
+#define MS_CRC16_ERR 0x20
+#define MS_RDY_TIMEOUT 0x10
+#define MS_INT_CMDNK 0x08
+#define MS_INT_BREQ 0x04
+#define MS_INT_ERR 0x02
+#define MS_INT_CED 0x01
#define MS_TRANSFER 0xFD43
+#define MS_TRANSFER_START 0x80
+#define MS_TRANSFER_END 0x40
+#define MS_TRANSFER_ERR 0x20
+#define MS_BS_STATE 0x10
+#define MS_TM_READ_BYTES 0x00
+#define MS_TM_NORMAL_READ 0x01
+#define MS_TM_WRITE_BYTES 0x04
+#define MS_TM_NORMAL_WRITE 0x05
+#define MS_TM_AUTO_READ 0x08
+#define MS_TM_AUTO_WRITE 0x0C
#define MS_INT_REG 0xFD44
#define MS_BYTE_CNT 0xFD45
#define MS_SECTOR_CNT_L 0xFD46
@@ -549,14 +192,90 @@
#define MS_DBUS_H 0xFD48
#define SD_CFG1 0xFDA0
+#define SD_CLK_DIVIDE_0 0x00
+#define SD_CLK_DIVIDE_256 0xC0
+#define SD_CLK_DIVIDE_128 0x80
+#define SD_BUS_WIDTH_1BIT 0x00
+#define SD_BUS_WIDTH_4BIT 0x01
+#define SD_BUS_WIDTH_8BIT 0x02
+#define SD_ASYNC_FIFO_NOT_RST 0x10
+#define SD_20_MODE 0x00
+#define SD_DDR_MODE 0x04
+#define SD_30_MODE 0x08
+#define SD_CLK_DIVIDE_MASK 0xC0
#define SD_CFG2 0xFDA1
+#define SD_CALCULATE_CRC7 0x00
+#define SD_NO_CALCULATE_CRC7 0x80
+#define SD_CHECK_CRC16 0x00
+#define SD_NO_CHECK_CRC16 0x40
+#define SD_NO_CHECK_WAIT_CRC_TO 0x20
+#define SD_WAIT_BUSY_END 0x08
+#define SD_NO_WAIT_BUSY_END 0x00
+#define SD_CHECK_CRC7 0x00
+#define SD_NO_CHECK_CRC7 0x04
+#define SD_RSP_LEN_0 0x00
+#define SD_RSP_LEN_6 0x01
+#define SD_RSP_LEN_17 0x02
+#define SD_RSP_TYPE_R0 0x04
+#define SD_RSP_TYPE_R1 0x01
+#define SD_RSP_TYPE_R1b 0x09
+#define SD_RSP_TYPE_R2 0x02
+#define SD_RSP_TYPE_R3 0x05
+#define SD_RSP_TYPE_R4 0x05
+#define SD_RSP_TYPE_R5 0x01
+#define SD_RSP_TYPE_R6 0x01
+#define SD_RSP_TYPE_R7 0x01
#define SD_CFG3 0xFDA2
+#define SD_RSP_80CLK_TIMEOUT_EN 0x01
+
#define SD_STAT1 0xFDA3
+#define SD_CRC7_ERR 0x80
+#define SD_CRC16_ERR 0x40
+#define SD_CRC_WRITE_ERR 0x20
+#define SD_CRC_WRITE_ERR_MASK 0x1C
+#define GET_CRC_TIME_OUT 0x02
+#define SD_TUNING_COMPARE_ERR 0x01
#define SD_STAT2 0xFDA4
+#define SD_RSP_80CLK_TIMEOUT 0x01
+
#define SD_BUS_STAT 0xFDA5
+#define SD_CLK_TOGGLE_EN 0x80
+#define SD_CLK_FORCE_STOP 0x40
+#define SD_DAT3_STATUS 0x10
+#define SD_DAT2_STATUS 0x08
+#define SD_DAT1_STATUS 0x04
+#define SD_DAT0_STATUS 0x02
+#define SD_CMD_STATUS 0x01
#define SD_PAD_CTL 0xFDA6
+#define SD_IO_USING_1V8 0x80
+#define SD_IO_USING_3V3 0x7F
+#define TYPE_A_DRIVING 0x00
+#define TYPE_B_DRIVING 0x01
+#define TYPE_C_DRIVING 0x02
+#define TYPE_D_DRIVING 0x03
#define SD_SAMPLE_POINT_CTL 0xFDA7
+#define DDR_FIX_RX_DAT 0x00
+#define DDR_VAR_RX_DAT 0x80
+#define DDR_FIX_RX_DAT_EDGE 0x00
+#define DDR_FIX_RX_DAT_14_DELAY 0x40
+#define DDR_FIX_RX_CMD 0x00
+#define DDR_VAR_RX_CMD 0x20
+#define DDR_FIX_RX_CMD_POS_EDGE 0x00
+#define DDR_FIX_RX_CMD_14_DELAY 0x10
+#define SD20_RX_POS_EDGE 0x00
+#define SD20_RX_14_DELAY 0x08
+#define SD20_RX_SEL_MASK 0x08
#define SD_PUSH_POINT_CTL 0xFDA8
+#define DDR_FIX_TX_CMD_DAT 0x00
+#define DDR_VAR_TX_CMD_DAT 0x80
+#define DDR_FIX_TX_DAT_14_TSU 0x00
+#define DDR_FIX_TX_DAT_12_TSU 0x40
+#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
+#define DDR_FIX_TX_CMD_14_AHEAD 0x20
+#define SD20_TX_NEG_EDGE 0x00
+#define SD20_TX_14_AHEAD 0x10
+#define SD20_TX_SEL_MASK 0x10
+#define DDR_VAR_SDCLK_POL_SWAP 0x01
#define SD_CMD0 0xFDA9
#define SD_CMD_START 0x40
#define SD_CMD1 0xFDAA
@@ -569,60 +288,203 @@
#define SD_BLOCK_CNT_L 0xFDB1
#define SD_BLOCK_CNT_H 0xFDB2
#define SD_TRANSFER 0xFDB3
+#define SD_TRANSFER_START 0x80
+#define SD_TRANSFER_END 0x40
+#define SD_STAT_IDLE 0x20
+#define SD_TRANSFER_ERR 0x10
+#define SD_TM_NORMAL_WRITE 0x00
+#define SD_TM_AUTO_WRITE_3 0x01
+#define SD_TM_AUTO_WRITE_4 0x02
+#define SD_TM_AUTO_READ_3 0x05
+#define SD_TM_AUTO_READ_4 0x06
+#define SD_TM_CMD_RSP 0x08
+#define SD_TM_AUTO_WRITE_1 0x09
+#define SD_TM_AUTO_WRITE_2 0x0A
+#define SD_TM_NORMAL_READ 0x0C
+#define SD_TM_AUTO_READ_1 0x0D
+#define SD_TM_AUTO_READ_2 0x0E
+#define SD_TM_AUTO_TUNING 0x0F
#define SD_CMD_STATE 0xFDB5
+#define SD_CMD_IDLE 0x80
+
#define SD_DATA_STATE 0xFDB6
+#define SD_DATA_IDLE 0x80
#define SRCTL 0xFC13
-#define DCM_DRP_CTL 0xFC23
-#define DCM_DRP_TRIG 0xFC24
-#define DCM_DRP_CFG 0xFC25
-#define DCM_DRP_WR_DATA_L 0xFC26
-#define DCM_DRP_WR_DATA_H 0xFC27
-#define DCM_DRP_RD_DATA_L 0xFC28
-#define DCM_DRP_RD_DATA_H 0xFC29
+#define DCM_DRP_CTL 0xFC23
+#define DCM_RESET 0x08
+#define DCM_LOCKED 0x04
+#define DCM_208M 0x00
+#define DCM_TX 0x01
+#define DCM_RX 0x02
+#define DCM_DRP_TRIG 0xFC24
+#define DRP_START 0x80
+#define DRP_DONE 0x40
+#define DCM_DRP_CFG 0xFC25
+#define DRP_WRITE 0x80
+#define DRP_READ 0x00
+#define DCM_WRITE_ADDRESS_50 0x50
+#define DCM_WRITE_ADDRESS_51 0x51
+#define DCM_READ_ADDRESS_00 0x00
+#define DCM_READ_ADDRESS_51 0x51
+#define DCM_DRP_WR_DATA_L 0xFC26
+#define DCM_DRP_WR_DATA_H 0xFC27
+#define DCM_DRP_RD_DATA_L 0xFC28
+#define DCM_DRP_RD_DATA_H 0xFC29
#define SD_VPCLK0_CTL 0xFC2A
#define SD_VPCLK1_CTL 0xFC2B
#define SD_DCMPS0_CTL 0xFC2C
#define SD_DCMPS1_CTL 0xFC2D
#define SD_VPTX_CTL SD_VPCLK0_CTL
#define SD_VPRX_CTL SD_VPCLK1_CTL
+#define PHASE_CHANGE 0x80
+#define PHASE_NOT_RESET 0x40
#define SD_DCMPS_TX_CTL SD_DCMPS0_CTL
#define SD_DCMPS_RX_CTL SD_DCMPS1_CTL
+#define DCMPS_CHANGE 0x80
+#define DCMPS_CHANGE_DONE 0x40
+#define DCMPS_ERROR 0x20
+#define DCMPS_CURRENT_PHASE 0x1F
#define CARD_CLK_SOURCE 0xFC2E
-
+#define CRC_FIX_CLK (0x00 << 0)
+#define CRC_VAR_CLK0 (0x01 << 0)
+#define CRC_VAR_CLK1 (0x02 << 0)
+#define SD30_FIX_CLK (0x00 << 2)
+#define SD30_VAR_CLK0 (0x01 << 2)
+#define SD30_VAR_CLK1 (0x02 << 2)
+#define SAMPLE_FIX_CLK (0x00 << 4)
+#define SAMPLE_VAR_CLK0 (0x01 << 4)
+#define SAMPLE_VAR_CLK1 (0x02 << 4)
#define CARD_PWR_CTL 0xFD50
+#define PMOS_STRG_MASK 0x10
+#define PMOS_STRG_800mA 0x10
+#define PMOS_STRG_400mA 0x00
+#define SD_POWER_OFF 0x03
+#define SD_PARTIAL_POWER_ON 0x01
+#define SD_POWER_ON 0x00
+#define SD_POWER_MASK 0x03
+#define MS_POWER_OFF 0x0C
+#define MS_PARTIAL_POWER_ON 0x04
+#define MS_POWER_ON 0x00
+#define MS_POWER_MASK 0x0C
+#define BPP_POWER_OFF 0x0F
+#define BPP_POWER_5_PERCENT_ON 0x0E
+#define BPP_POWER_10_PERCENT_ON 0x0C
+#define BPP_POWER_15_PERCENT_ON 0x08
+#define BPP_POWER_ON 0x00
+#define BPP_POWER_MASK 0x0F
+#define SD_VCC_PARTIAL_POWER_ON 0x02
+#define SD_VCC_POWER_ON 0x00
#define CARD_CLK_SWITCH 0xFD51
#define RTL8411B_PACKAGE_MODE 0xFD51
#define CARD_SHARE_MODE 0xFD52
+#define CARD_SHARE_MASK 0x0F
+#define CARD_SHARE_MULTI_LUN 0x00
+#define CARD_SHARE_NORMAL 0x00
+#define CARD_SHARE_48_SD 0x04
+#define CARD_SHARE_48_MS 0x08
+#define CARD_SHARE_BAROSSA_SD 0x01
+#define CARD_SHARE_BAROSSA_MS 0x02
#define CARD_DRIVE_SEL 0xFD53
+#define MS_DRIVE_8mA (0x01 << 6)
+#define MMC_DRIVE_8mA (0x01 << 4)
+#define XD_DRIVE_8mA (0x01 << 2)
+#define GPIO_DRIVE_8mA 0x01
+#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA)
+#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+
#define CARD_STOP 0xFD54
+#define SPI_STOP 0x01
+#define XD_STOP 0x02
+#define SD_STOP 0x04
+#define MS_STOP 0x08
+#define SPI_CLR_ERR 0x10
+#define XD_CLR_ERR 0x20
+#define SD_CLR_ERR 0x40
+#define MS_CLR_ERR 0x80
#define CARD_OE 0xFD55
+#define SD_OUTPUT_EN 0x04
+#define MS_OUTPUT_EN 0x08
#define CARD_AUTO_BLINK 0xFD56
#define CARD_GPIO_DIR 0xFD57
#define CARD_GPIO 0xFD58
#define CARD_DATA_SOURCE 0xFD5B
+#define PINGPONG_BUFFER 0x01
+#define RING_BUFFER 0x00
#define SD30_CLK_DRIVE_SEL 0xFD5A
+#define DRIVER_TYPE_A 0x05
+#define DRIVER_TYPE_B 0x03
+#define DRIVER_TYPE_C 0x02
+#define DRIVER_TYPE_D 0x01
#define CARD_SELECT 0xFD5C
+#define SD_MOD_SEL 2
+#define MS_MOD_SEL 3
#define SD30_DRIVE_SEL 0xFD5E
+#define CFG_DRIVER_TYPE_A 0x02
+#define CFG_DRIVER_TYPE_B 0x03
+#define CFG_DRIVER_TYPE_C 0x01
+#define CFG_DRIVER_TYPE_D 0x00
#define SD30_CMD_DRIVE_SEL 0xFD5E
#define SD30_DAT_DRIVE_SEL 0xFD5F
#define CARD_CLK_EN 0xFD69
+#define SD_CLK_EN 0x04
+#define MS_CLK_EN 0x08
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
-
+#define CD_DISABLE_MASK 0x07
+#define MS_CD_DISABLE 0x04
+#define SD_CD_DISABLE 0x02
+#define XD_CD_DISABLE 0x01
+#define CD_DISABLE 0x07
+#define CD_ENABLE 0x00
+#define MS_CD_EN_ONLY 0x03
+#define SD_CD_EN_ONLY 0x05
+#define XD_CD_EN_ONLY 0x06
+#define FORCE_CD_LOW_MASK 0x38
+#define FORCE_CD_XD_LOW 0x08
+#define FORCE_CD_SD_LOW 0x10
+#define FORCE_CD_MS_LOW 0x20
+#define CD_AUTO_DISABLE 0x40
#define FPDCTL 0xFC00
+#define SSC_POWER_DOWN 0x01
+#define SD_OC_POWER_DOWN 0x02
+#define ALL_POWER_DOWN 0x07
+#define OC_POWER_DOWN 0x06
#define PDINFO 0xFC01
#define CLK_CTL 0xFC02
+#define CHANGE_CLK 0x01
+#define CLK_LOW_FREQ 0x01
+
#define CLK_DIV 0xFC03
+#define CLK_DIV_1 0x01
+#define CLK_DIV_2 0x02
+#define CLK_DIV_4 0x03
+#define CLK_DIV_8 0x04
#define CLK_SEL 0xFC04
#define SSC_DIV_N_0 0xFC0F
#define SSC_DIV_N_1 0xFC10
#define SSC_CTL1 0xFC11
+#define SSC_RSTB 0x80
+#define SSC_8X_EN 0x40
+#define SSC_FIX_FRAC 0x20
+#define SSC_SEL_1M 0x00
+#define SSC_SEL_2M 0x08
+#define SSC_SEL_4M 0x10
+#define SSC_SEL_8M 0x18
#define SSC_CTL2 0xFC12
-
+#define SSC_DEPTH_MASK 0x07
+#define SSC_DEPTH_DISALBE 0x00
+#define SSC_DEPTH_4M 0x01
+#define SSC_DEPTH_2M 0x02
+#define SSC_DEPTH_1M 0x03
+#define SSC_DEPTH_500K 0x04
+#define SSC_DEPTH_250K 0x05
#define RCCTL 0xFC14
#define FPGA_PULL_CTL 0xFC1D
@@ -630,6 +492,24 @@
#define GPIO_CTL 0xFC1F
#define LDO_CTL 0xFC1E
+#define BPP_ASIC_1V7 0x00
+#define BPP_ASIC_1V8 0x01
+#define BPP_ASIC_1V9 0x02
+#define BPP_ASIC_2V0 0x03
+#define BPP_ASIC_2V7 0x04
+#define BPP_ASIC_2V8 0x05
+#define BPP_ASIC_3V2 0x06
+#define BPP_ASIC_3V3 0x07
+#define BPP_REG_TUNED18 0x07
+#define BPP_TUNED18_SHIFT_8402 5
+#define BPP_TUNED18_SHIFT_8411 4
+#define BPP_PAD_MASK 0x04
+#define BPP_PAD_3V3 0x04
+#define BPP_PAD_1V8 0x00
+#define BPP_LDO_POWB 0x03
+#define BPP_LDO_ON 0x00
+#define BPP_LDO_SUSPEND 0x02
+#define BPP_LDO_OFF 0x03
#define SYS_VER 0xFC32
#define CARD_PULL_CTL1 0xFD60
@@ -642,6 +522,10 @@
/* PCI Express Related Registers */
#define IRQEN0 0xFE20
#define IRQSTAT0 0xFE21
+#define DMA_DONE_INT 0x80
+#define SUSPEND_INT 0x40
+#define LINK_RDY_INT 0x20
+#define LINK_DOWN_INT 0x10
#define IRQEN1 0xFE22
#define IRQSTAT1 0xFE23
#define TLPRIEN 0xFE24
@@ -653,6 +537,16 @@
#define DMATC2 0xFE2A
#define DMATC3 0xFE2B
#define DMACTL 0xFE2C
+#define DMA_RST 0x80
+#define DMA_BUSY 0x04
+#define DMA_DIR_TO_CARD 0x00
+#define DMA_DIR_FROM_CARD 0x02
+#define DMA_EN 0x01
+#define DMA_128 (0 << 4)
+#define DMA_256 (1 << 4)
+#define DMA_512 (2 << 4)
+#define DMA_1024 (3 << 4)
+#define DMA_PACK_SIZE_MASK 0x30
#define BCTL 0xFE2D
#define RBBC0 0xFE2E
#define RBBC1 0xFE2F
@@ -678,14 +572,21 @@
#define MSGTXDATA2 0xFE46
#define MSGTXDATA3 0xFE47
#define MSGTXCTL 0xFE48
-#define PETXCFG 0xFE49
#define LTR_CTL 0xFE4A
#define OBFF_CFG 0xFE4C
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
+#define PCLK_CTL 0xFE55
+#define PCLK_MODE_SEL 0x20
#define PME_FORCE_CTL 0xFE56
+
#define ASPM_FORCE_CTL 0xFE57
+#define FORCE_ASPM_CTL0 0x10
+#define FORCE_ASPM_VAL_MASK 0x03
+#define FORCE_ASPM_L1_EN 0x02
+#define FORCE_ASPM_L0_EN 0x01
+#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
#define FUNC_FORCE_CTL 0xFE59
#define PERST_GLITCH_WIDTH 0xFE5C
@@ -693,19 +594,36 @@
#define RESET_LOAD_REG 0xFE5E
#define EFUSE_CONTENT 0xFE5F
#define HOST_SLEEP_STATE 0xFE60
-#define SDIO_CFG 0xFE70
+#define HOST_ENTER_S1 1
+#define HOST_ENTER_S3 2
+#define SDIO_CFG 0xFE70
+#define PM_EVENT_DEBUG 0xFE71
+#define PME_DEBUG_0 0x08
#define NFTS_TX_CTRL 0xFE72
#define PWR_GATE_CTRL 0xFE75
+#define PWR_GATE_EN 0x01
+#define LDO3318_PWR_MASK 0x06
+#define LDO_ON 0x00
+#define LDO_SUSPEND 0x04
+#define LDO_OFF 0x06
#define PWD_SUSPEND_EN 0xFE76
#define LDO_PWR_SEL 0xFE78
+#define L1SUB_CONFIG1 0xFE8D
+#define L1SUB_CONFIG2 0xFE8E
+#define L1SUB_AUTO_CFG 0x02
+#define L1SUB_CONFIG3 0xFE8F
+
#define DUMMY_REG_RESET_0 0xFE90
#define AUTOLOAD_CFG_BASE 0xFF00
+#define PETXCFG 0xFF03
#define PM_CTRL1 0xFF44
+#define CD_RESUME_EN_MASK 0xF0
+
#define PM_CTRL2 0xFF45
#define PM_CTRL3 0xFF46
#define SDIO_SEND_PME_EN 0x80
@@ -726,18 +644,125 @@
#define IMAGE_FLAG_ADDR0 0xCE80
#define IMAGE_FLAG_ADDR1 0xCE81
+#define RREF_CFG 0xFF6C
+#define RREF_VBGSEL_MASK 0x38
+#define RREF_VBGSEL_1V25 0x28
+
+#define OOBS_CONFIG 0xFF6E
+#define OOBS_AUTOK_DIS 0x80
+#define OOBS_VAL_MASK 0x1F
+
+#define LDO_DV18_CFG 0xFF70
+#define LDO_DV18_SR_MASK 0xC0
+#define LDO_DV18_SR_DF 0x40
+
+#define LDO_CONFIG2 0xFF71
+#define LDO_D3318_MASK 0x07
+#define LDO_D3318_33V 0x07
+#define LDO_D3318_18V 0x02
+
+#define LDO_VCC_CFG0 0xFF72
+#define LDO_VCC_LMTVTH_MASK 0x30
+#define LDO_VCC_LMTVTH_2A 0x10
+
+#define LDO_VCC_CFG1 0xFF73
+#define LDO_VCC_REF_TUNE_MASK 0x30
+#define LDO_VCC_REF_1V2 0x20
+#define LDO_VCC_TUNE_MASK 0x07
+#define LDO_VCC_1V8 0x04
+#define LDO_VCC_3V3 0x07
+#define LDO_VCC_LMT_EN 0x08
+
+#define LDO_VIO_CFG 0xFF75
+#define LDO_VIO_SR_MASK 0xC0
+#define LDO_VIO_SR_DF 0x40
+#define LDO_VIO_REF_TUNE_MASK 0x30
+#define LDO_VIO_REF_1V2 0x20
+#define LDO_VIO_TUNE_MASK 0x07
+#define LDO_VIO_1V7 0x03
+#define LDO_VIO_1V8 0x04
+#define LDO_VIO_3V3 0x07
+
+#define LDO_DV12S_CFG 0xFF76
+#define LDO_REF12_TUNE_MASK 0x18
+#define LDO_REF12_TUNE_DF 0x10
+#define LDO_D12_TUNE_MASK 0x07
+#define LDO_D12_TUNE_DF 0x04
+
+#define LDO_AV12S_CFG 0xFF77
+#define LDO_AV12S_TUNE_MASK 0x07
+#define LDO_AV12S_TUNE_DF 0x04
+
+#define SD40_LDO_CTL1 0xFE7D
+#define SD40_VIO_TUNE_MASK 0x70
+#define SD40_VIO_TUNE_1V7 0x30
+#define SD_VIO_LDO_1V8 0x40
+#define SD_VIO_LDO_3V3 0x70
+
/* Phy register */
#define PHY_PCR 0x00
+#define PHY_PCR_FORCE_CODE 0xB000
+#define PHY_PCR_OOBS_CALI_50 0x0800
+#define PHY_PCR_OOBS_VCM_08 0x0200
+#define PHY_PCR_OOBS_SEN_90 0x0040
+#define PHY_PCR_RSSI_EN 0x0002
+#define PHY_PCR_RX10K 0x0001
+
#define PHY_RCR0 0x01
#define PHY_RCR1 0x02
+#define PHY_RCR1_ADP_TIME_4 0x0400
+#define PHY_RCR1_VCO_COARSE 0x001F
+#define PHY_SSCCR2 0x02
+#define PHY_SSCCR2_PLL_NCODE 0x0A00
+#define PHY_SSCCR2_TIME0 0x001C
+#define PHY_SSCCR2_TIME2_WIDTH 0x0003
+
#define PHY_RCR2 0x03
+#define PHY_RCR2_EMPHASE_EN 0x8000
+#define PHY_RCR2_NADJR 0x4000
+#define PHY_RCR2_CDR_SR_2 0x0100
+#define PHY_RCR2_FREQSEL_12 0x0040
+#define PHY_RCR2_CDR_SC_12P 0x0010
+#define PHY_RCR2_CALIB_LATE 0x0002
+#define PHY_SSCCR3 0x03
+#define PHY_SSCCR3_STEP_IN 0x2740
+#define PHY_SSCCR3_CHECK_DELAY 0x0008
+#define _PHY_ANA03 0x03
+#define _PHY_ANA03_TIMER_MAX 0x2700
+#define _PHY_ANA03_OOBS_DEB_EN 0x0040
+#define _PHY_CMU_DEBUG_EN 0x0008
+
#define PHY_RTCR 0x04
#define PHY_RDR 0x05
+#define PHY_RDR_RXDSEL_1_9 0x4000
+#define PHY_SSC_AUTO_PWD 0x0600
#define PHY_TCR0 0x06
#define PHY_TCR1 0x07
#define PHY_TUNE 0x08
+#define PHY_TUNE_TUNEREF_1_0 0x4000
+#define PHY_TUNE_VBGSEL_1252 0x0C00
+#define PHY_TUNE_SDBUS_33 0x0200
+#define PHY_TUNE_TUNED18 0x01C0
+#define PHY_TUNE_TUNED12 0X0020
+#define PHY_TUNE_TUNEA12 0x0004
+#define PHY_TUNE_VOLTAGE_MASK 0xFC3F
+#define PHY_TUNE_VOLTAGE_3V3 0x03C0
+#define PHY_TUNE_D18_1V8 0x0100
+#define PHY_TUNE_D18_1V7 0x0080
+#define PHY_ANA08 0x08
+#define PHY_ANA08_RX_EQ_DCGAIN 0x5000
+#define PHY_ANA08_SEL_RX_EN 0x0400
+#define PHY_ANA08_RX_EQ_VAL 0x03C0
+#define PHY_ANA08_SCP 0x0020
+#define PHY_ANA08_SEL_IPI 0x0004
+
#define PHY_IMR 0x09
#define PHY_BPCR 0x0A
+#define PHY_BPCR_IBRXSEL 0x0400
+#define PHY_BPCR_IBTXSEL 0x0100
+#define PHY_BPCR_IB_FILTER 0x0080
+#define PHY_BPCR_CMIRROR_EN 0x0040
+
#define PHY_BIST 0x0B
#define PHY_RAW_L 0x0C
#define PHY_RAW_H 0x0D
@@ -745,6 +770,7 @@
#define PHY_HOST_CLK_CTRL 0x0F
#define PHY_DMR 0x10
#define PHY_BACR 0x11
+#define PHY_BACR_BASIC_MASK 0xFFF3
#define PHY_IER 0x12
#define PHY_BCSR 0x13
#define PHY_BPR 0x14
@@ -752,80 +778,70 @@
#define PHY_BPNR 0x16
#define PHY_BRNR2 0x17
#define PHY_BENR 0x18
-#define PHY_REG_REV 0x19
+#define PHY_REV 0x19
+#define PHY_REV_RESV 0xE000
+#define PHY_REV_RXIDLE_LATCHED 0x1000
+#define PHY_REV_P1_EN 0x0800
+#define PHY_REV_RXIDLE_EN 0x0400
+#define PHY_REV_CLKREQ_TX_EN 0x0200
+#define PHY_REV_CLKREQ_RX_EN 0x0100
+#define PHY_REV_CLKREQ_DT_1_0 0x0040
+#define PHY_REV_STOP_CLKRD 0x0020
+#define PHY_REV_RX_PWST 0x0008
+#define PHY_REV_STOP_CLKWR 0x0004
+#define _PHY_REV0 0x19
+#define _PHY_REV0_FILTER_OUT 0x3800
+#define _PHY_REV0_CDR_BYPASS_PFD 0x0100
+#define _PHY_REV0_CDR_RX_IDLE_BYPASS 0x0002
+
#define PHY_FLD0 0x1A
+#define PHY_ANA1A 0x1A
+#define PHY_ANA1A_TXR_LOOPBACK 0x2000
+#define PHY_ANA1A_RXT_BIST 0x0500
+#define PHY_ANA1A_TXR_BIST 0x0040
+#define PHY_ANA1A_REV 0x0006
#define PHY_FLD1 0x1B
#define PHY_FLD2 0x1C
#define PHY_FLD3 0x1D
+#define PHY_FLD3_TIMER_4 0x0800
+#define PHY_FLD3_TIMER_6 0x0020
+#define PHY_FLD3_RXDELINK 0x0004
+#define PHY_ANA1D 0x1D
+#define PHY_ANA1D_DEBUG_ADDR 0x0004
+#define _PHY_FLD0 0x1D
+#define _PHY_FLD0_CLK_REQ_20C 0x8000
+#define _PHY_FLD0_RX_IDLE_EN 0x1000
+#define _PHY_FLD0_BIT_ERR_RSTN 0x0800
+#define _PHY_FLD0_BER_COUNT 0x01E0
+#define _PHY_FLD0_BER_TIMER 0x001E
+#define _PHY_FLD0_CHECK_EN 0x0001
+
#define PHY_FLD4 0x1E
+#define PHY_FLD4_FLDEN_SEL 0x4000
+#define PHY_FLD4_REQ_REF 0x2000
+#define PHY_FLD4_RXAMP_OFF 0x1000
+#define PHY_FLD4_REQ_ADDA 0x0800
+#define PHY_FLD4_BER_COUNT 0x00E0
+#define PHY_FLD4_BER_TIMER 0x000A
+#define PHY_FLD4_BER_CHK_EN 0x0001
+#define PHY_DIG1E 0x1E
+#define PHY_DIG1E_REV 0x4000
+#define PHY_DIG1E_D0_X_D1 0x1000
+#define PHY_DIG1E_RX_ON_HOST 0x0800
+#define PHY_DIG1E_RCLK_REF_HOST 0x0400
+#define PHY_DIG1E_RCLK_TX_EN_KEEP 0x0040
+#define PHY_DIG1E_RCLK_TX_TERM_KEEP 0x0020
+#define PHY_DIG1E_RCLK_RX_EIDLE_ON 0x0010
+#define PHY_DIG1E_TX_TERM_KEEP 0x0008
+#define PHY_DIG1E_RX_TERM_KEEP 0x0004
+#define PHY_DIG1E_TX_EN_KEEP 0x0002
+#define PHY_DIG1E_RX_EN_KEEP 0x0001
#define PHY_DUM_REG 0x1F
-#define LCTLR 0x80
-#define LCTLR_EXT_SYNC 0x80
-#define LCTLR_COMMON_CLOCK_CFG 0x40
-#define LCTLR_RETRAIN_LINK 0x20
-#define LCTLR_LINK_DISABLE 0x10
-#define LCTLR_RCB 0x08
-#define LCTLR_RESERVED 0x04
-#define LCTLR_ASPM_CTL_MASK 0x03
-
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
-/* Phy bits */
-#define PHY_PCR_FORCE_CODE 0xB000
-#define PHY_PCR_OOBS_CALI_50 0x0800
-#define PHY_PCR_OOBS_VCM_08 0x0200
-#define PHY_PCR_OOBS_SEN_90 0x0040
-#define PHY_PCR_RSSI_EN 0x0002
-
-#define PHY_RCR1_ADP_TIME 0x0100
-#define PHY_RCR1_VCO_COARSE 0x001F
-
-#define PHY_RCR2_EMPHASE_EN 0x8000
-#define PHY_RCR2_NADJR 0x4000
-#define PHY_RCR2_CDR_CP_10 0x0400
-#define PHY_RCR2_CDR_SR_2 0x0100
-#define PHY_RCR2_FREQSEL_12 0x0040
-#define PHY_RCR2_CPADJEN 0x0020
-#define PHY_RCR2_CDR_SC_8 0x0008
-#define PHY_RCR2_CALIB_LATE 0x0002
-
-#define PHY_RDR_RXDSEL_1_9 0x4000
-
-#define PHY_TUNE_TUNEREF_1_0 0x4000
-#define PHY_TUNE_VBGSEL_1252 0x0C00
-#define PHY_TUNE_SDBUS_33 0x0200
-#define PHY_TUNE_TUNED18 0x01C0
-#define PHY_TUNE_TUNED12 0X0020
-
-#define PHY_BPCR_IBRXSEL 0x0400
-#define PHY_BPCR_IBTXSEL 0x0100
-#define PHY_BPCR_IB_FILTER 0x0080
-#define PHY_BPCR_CMIRROR_EN 0x0040
-
-#define PHY_REG_REV_RESV 0xE000
-#define PHY_REG_REV_RXIDLE_LATCHED 0x1000
-#define PHY_REG_REV_P1_EN 0x0800
-#define PHY_REG_REV_RXIDLE_EN 0x0400
-#define PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 0x0040
-#define PHY_REG_REV_STOP_CLKRD 0x0020
-#define PHY_REG_REV_RX_PWST 0x0008
-#define PHY_REG_REV_STOP_CLKWR 0x0004
-
-#define PHY_FLD3_TIMER_4 0x7800
-#define PHY_FLD3_TIMER_6 0x00E0
-#define PHY_FLD3_RXDELINK 0x0004
-
-#define PHY_FLD4_FLDEN_SEL 0x4000
-#define PHY_FLD4_REQ_REF 0x2000
-#define PHY_FLD4_RXAMP_OFF 0x1000
-#define PHY_FLD4_REQ_ADDA 0x0800
-#define PHY_FLD4_BER_COUNT 0x00E0
-#define PHY_FLD4_BER_TIMER 0x000A
-#define PHY_FLD4_BER_CHK_EN 0x0001
-
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
struct rtsx_pcr;
@@ -835,6 +851,8 @@ struct pcr_handle {
};
struct pcr_ops {
+ int (*write_phy)(struct rtsx_pcr *pcr, u8 addr, u16 val);
+ int (*read_phy)(struct rtsx_pcr *pcr, u8 addr, u16 *val);
int (*extra_init_hw)(struct rtsx_pcr *pcr);
int (*optimize_phy)(struct rtsx_pcr *pcr);
int (*turn_on_led)(struct rtsx_pcr *pcr);
@@ -856,6 +874,7 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
struct rtsx_pcr {
struct pci_dev *pci;
unsigned int id;
+ int pcie_cap;
/* pci resources */
unsigned long addr;
@@ -928,6 +947,8 @@ struct rtsx_pcr {
const struct pcr_ops *ops;
enum PDEV_STAT state;
+ u16 reg_pm_ctrl3;
+
int num_slots;
struct rtsx_slot *slots;
};
@@ -935,6 +956,10 @@ struct rtsx_pcr {
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
#define PCI_PID(pcr) ((pcr)->pci->device)
+#define is_version(pcr, pid, ver) \
+ (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver))
+#define pcr_dbg(pcr, fmt, arg...) \
+ dev_dbg(&(pcr)->pci->dev, fmt, ##arg)
#define SDR104_PHASE(val) ((val) & 0xFF)
#define SDR50_PHASE(val) (((val) >> 8) & 0xFF)
@@ -1004,4 +1029,17 @@ static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val);
}
+static inline int rtsx_pci_update_phy(struct rtsx_pcr *pcr, u8 addr,
+ u16 mask, u16 append)
+{
+ int err;
+ u16 val;
+
+ err = rtsx_pci_read_phy_register(pcr, addr, &val);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_write_phy_register(pcr, addr, (val & mask) | append);
+}
+
#endif
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 3fdb7cfbffb3..75115384f3fc 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -58,13 +58,7 @@ enum sec_device_type {
* @irq_base: Base IRQ number for device, required for IRQs
* @irq: Generic IRQ number for device
* @irq_data: Runtime data structure for IRQ controller
- * @ono: Power onoff IRQ number for s5m87xx
* @wakeup: Whether or not this is a wakeup device
- * @wtsr_smpl: Whether or not to enable in RTC driver the Watchdog
- * Timer Software Reset (registers set to default value
- * after PWRHOLD falling) and Sudden Momentary Power Loss
- * (PMIC will enter power on sequence after short drop in
- * VBATT voltage).
*/
struct sec_pmic_dev {
struct device *dev;
@@ -77,9 +71,7 @@ struct sec_pmic_dev {
int irq;
struct regmap_irq_chip_data *irq_data;
- int ono;
bool wakeup;
- bool wtsr_smpl;
};
int sec_irq_init(struct sec_pmic_dev *sec_pmic);
@@ -95,7 +87,6 @@ struct sec_platform_data {
int irq_base;
int (*cfg_pmic_irq)(void);
- int ono;
bool wakeup;
bool buck_voltage_lock;
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index f35af7361b60..667aa40486dd 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -74,8 +74,8 @@ enum s2mps11_irq {
S2MPS11_IRQ_MRB,
S2MPS11_IRQ_RTC60S,
- S2MPS11_IRQ_RTCA0,
S2MPS11_IRQ_RTCA1,
+ S2MPS11_IRQ_RTCA0,
S2MPS11_IRQ_SMPL,
S2MPS11_IRQ_RTC1S,
S2MPS11_IRQ_WTSR,
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index b6401e7661c7..29c30ac36020 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -105,6 +105,8 @@ enum s2mps_rtc_reg {
#define S5M_RTC_UDR_MASK (1 << S5M_RTC_UDR_SHIFT)
#define S2MPS_RTC_WUDR_SHIFT 4
#define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT)
+#define S2MPS13_RTC_AUDR_SHIFT 1
+#define S2MPS13_RTC_AUDR_MASK (1 << S2MPS13_RTC_AUDR_SHIFT)
#define S2MPS_RTC_RUDR_SHIFT 0
#define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT)
#define RTC_TCON_SHIFT 1
diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h
new file mode 100644
index 000000000000..b0925fa3e9ef
--- /dev/null
+++ b/include/linux/mfd/sky81452.h
@@ -0,0 +1,31 @@
+/*
+ * sky81452.h SKY81452 MFD driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SKY81452_H
+#define _SKY81452_H
+
+#include <linux/platform_data/sky81452-backlight.h>
+#include <linux/regulator/machine.h>
+
+struct sky81452_platform_data {
+ struct sky81452_bl_platform_data *bl_pdata;
+ struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/mfd/syscon/atmel-st.h b/include/linux/mfd/syscon/atmel-st.h
new file mode 100644
index 000000000000..8acf1ec1fa32
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-st.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * System Timer (ST) - System peripherals registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_ST_H
+#define _LINUX_MFD_SYSCON_ATMEL_ST_H
+
+#include <linux/bitops.h>
+
+#define AT91_ST_CR 0x00 /* Control Register */
+#define AT91_ST_WDRST BIT(0) /* Watchdog Timer Restart */
+
+#define AT91_ST_PIMR 0x04 /* Period Interval Mode Register */
+#define AT91_ST_PIV 0xffff /* Period Interval Value */
+
+#define AT91_ST_WDMR 0x08 /* Watchdog Mode Register */
+#define AT91_ST_WDV 0xffff /* Watchdog Counter Value */
+#define AT91_ST_RSTEN BIT(16) /* Reset Enable */
+#define AT91_ST_EXTEN BIT(17) /* External Signal Assertion Enable */
+
+#define AT91_ST_RTMR 0x0c /* Real-time Mode Register */
+#define AT91_ST_RTPRES 0xffff /* Real-time Prescalar Value */
+
+#define AT91_ST_SR 0x10 /* Status Register */
+#define AT91_ST_PITS BIT(0) /* Period Interval Timer Status */
+#define AT91_ST_WDOVF BIT(1) /* Watchdog Overflow */
+#define AT91_ST_RTTINC BIT(2) /* Real-time Timer Increment */
+#define AT91_ST_ALMS BIT(3) /* Alarm Status */
+
+#define AT91_ST_IER 0x14 /* Interrupt Enable Register */
+#define AT91_ST_IDR 0x18 /* Interrupt Disable Register */
+#define AT91_ST_IMR 0x1c /* Interrupt Mask Register */
+
+#define AT91_ST_RTAR 0x20 /* Real-time Alarm Register */
+#define AT91_ST_ALMV 0xfffff /* Alarm Value */
+
+#define AT91_ST_CRTR 0x24 /* Current Real-time Register */
+#define AT91_ST_CRTV 0xfffff /* Current Real-Time Value */
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_ST_H */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index c877cad61a13..d16f4c82c568 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -207,6 +207,7 @@
#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU1_DI1 (0x1 << 6)
#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI0 (0x2 << 6)
#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI1 (0x3 << 6)
+#define IMX6Q_GPR3_MIPI_MUX_CTL_SHIFT 4
#define IMX6Q_GPR3_MIPI_MUX_CTL_MASK (0x3 << 4)
#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI0 (0x0 << 4)
#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI1 (0x1 << 4)
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index c203c9c56776..468c31a27fcf 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -140,36 +140,13 @@ extern int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val);
#define TC_KPD_DEBOUNCE_PERIOD 0xA3
#define TC_KPD_SETTLE_TIME 0xA3
-/**
- * struct tc35893_platform_data - data structure for platform specific data
- * @keymap_data: matrix scan code table for keycodes
- * @krow: mask for available rows, value is 0xFF
- * @kcol: mask for available columns, value is 0xFF
- * @debounce_period: platform specific debounce time
- * @settle_time: platform specific settle down time
- * @irqtype: type of interrupt, falling or rising edge
- * @enable_wakeup: specifies if keypad event can wake up system from sleep
- * @no_autorepeat: flag for auto repetition
- */
-struct tc3589x_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
- u8 krow;
- u8 kcol;
- u8 debounce_period;
- u8 settle_time;
- unsigned long irqtype;
- bool enable_wakeup;
- bool no_autorepeat;
-};
/**
* struct tc3589x_platform_data - TC3589x platform data
* @block: bitmask of blocks to enable (use TC3589x_BLOCK_*)
- * @keypad: keypad-specific platform data
*/
struct tc3589x_platform_data {
unsigned int block;
- const struct tc3589x_keypad_platform_data *keypad;
};
#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 3f4e994ace2b..1fd50dcfe47c 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -128,6 +128,7 @@
/* Sequencer Status */
#define SEQ_STATUS BIT(5)
+#define CHARGE_STEP 0x11
#define ADC_CLK 3000000
#define TOTAL_STEPS 16
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 605812820e48..24b86d538e88 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -111,6 +111,8 @@ struct dma_chan;
* data for the MMC controller
*/
struct tmio_mmc_data {
+ void *chan_priv_tx;
+ void *chan_priv_rx;
unsigned int hclk;
unsigned long capabilities;
unsigned long capabilities2;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 78baed5f2952..cac1c0904d5f 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -69,7 +69,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
-extern bool migrate_ratelimited(int node);
#else
static inline bool pmd_trans_migrating(pmd_t pmd)
{
@@ -80,10 +79,6 @@ static inline int migrate_misplaced_page(struct page *page,
{
return -EAGAIN; /* can't migrate now */
}
-static inline bool migrate_ratelimited(int node)
-{
- return false;
-}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index ee80dd7d9f60..819077c32690 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -52,6 +52,7 @@
#define MISC_DYNAMIC_MINOR 255
struct device;
+struct attribute_group;
struct miscdevice {
int minor;
@@ -60,6 +61,7 @@ struct miscdevice {
struct list_head list;
struct device *parent;
struct device *this_device;
+ const struct attribute_group **groups;
const char *nodename;
umode_t mode;
};
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 7b6d4e9ff603..f62e7cf227c6 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -68,6 +68,8 @@ enum {
MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
MLX4_CMD_SET_ICM_SIZE = 0xffd,
MLX4_CMD_ACCESS_REG = 0x3b,
+ MLX4_CMD_ALLOCATE_VPP = 0x80,
+ MLX4_CMD_SET_VPORT_QOS = 0x81,
/*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
@@ -163,6 +165,9 @@ enum {
MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
MLX4_QP_FLOW_STEERING_DETACH = 0x66,
MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64,
+
+ /* Update and read QCN parameters */
+ MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68,
};
enum {
@@ -183,7 +188,14 @@ enum {
};
enum {
- /* set port opcode modifiers */
+ /* Set port opcode modifiers */
+ MLX4_SET_PORT_IB_OPCODE = 0x0,
+ MLX4_SET_PORT_ETH_OPCODE = 0x1,
+ MLX4_SET_PORT_BEACON_OPCODE = 0x4,
+};
+
+enum {
+ /* Set port Ethernet input modifiers */
MLX4_SET_PORT_GENERAL = 0x0,
MLX4_SET_PORT_RQP_CALC = 0x1,
MLX4_SET_PORT_MAC_TABLE = 0x2,
@@ -233,6 +245,16 @@ struct mlx4_config_dev_params {
u8 rx_csum_flags_port_2;
};
+enum mlx4_en_congestion_control_algorithm {
+ MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0,
+};
+
+enum mlx4_en_congestion_control_opmod {
+ MLX4_CONGESTION_CONTROL_GET_PARAMS,
+ MLX4_CONGESTION_CONTROL_GET_STATISTICS,
+ MLX4_CONGESTION_CONTROL_SET_PARAMS = 4,
+};
+
struct mlx4_dev;
struct mlx4_cmd_mailbox {
@@ -281,6 +303,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
+ int max_tx_rate);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index e4ebff7e9d02..83e80ab94500 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -49,8 +49,6 @@
#define MSIX_LEGACY_SZ 4
#define MIN_MSIX_P_PORT 5
-#define MLX4_NUM_UP 8
-#define MLX4_NUM_TC 8
#define MLX4_MAX_100M_UNITS_VAL 255 /*
* work around: can't set values
* greater then this value when
@@ -174,6 +172,7 @@ enum {
MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
+ MLX4_DEV_CAP_FLAG_RSS_IP_FRAG = 1LL << 52,
MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
@@ -203,7 +202,14 @@ enum {
MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
- MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21
+ MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21,
+ MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22,
+ MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23,
+ MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24,
+ MLX4_DEV_CAP_FLAG2_QOS_VPP = 1LL << 25,
+ MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
+ MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
+ MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
};
enum {
@@ -449,6 +455,21 @@ enum mlx4_module_id {
MLX4_MODULE_ID_QSFP28 = 0x11,
};
+enum { /* rl */
+ MLX4_QP_RATE_LIMIT_NONE = 0,
+ MLX4_QP_RATE_LIMIT_KBS = 1,
+ MLX4_QP_RATE_LIMIT_MBS = 2,
+ MLX4_QP_RATE_LIMIT_GBS = 3
+};
+
+struct mlx4_rate_limit_caps {
+ u16 num_rates; /* Number of different rates */
+ u8 min_unit;
+ u16 min_val;
+ u8 max_unit;
+ u16 max_val;
+};
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
@@ -564,6 +585,7 @@ struct mlx4_caps {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
u32 vf_caps;
+ struct mlx4_rate_limit_caps rl_caps;
};
struct mlx4_buf_list {
@@ -982,6 +1004,11 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev)
return dev->flags & MLX4_FLAG_SLAVE;
}
+static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
+{
+ return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
+}
+
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf, gfp_t gfp);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -1282,14 +1309,13 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
-void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
-int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
-int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
- u8 *pg, u16 *ratelimit);
+int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
+int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
+ u8 ignore_fcs_value);
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
@@ -1319,6 +1345,10 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
+ int port);
+__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port);
+void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port);
int mlx4_flow_attach(struct mlx4_dev *dev,
struct mlx4_net_trans_rule *rule, u64 *reg_id);
int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 551f85456c11..6fed539e5456 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -207,14 +207,17 @@ struct mlx4_qp_context {
__be32 msn;
__be16 rq_wqe_counter;
__be16 sq_wqe_counter;
- u32 reserved3[2];
+ u32 reserved3;
+ __be16 rate_limit_params;
+ u8 reserved4;
+ u8 qos_vport;
__be32 param3;
__be32 nummmcpeers_basemkey;
u8 log_page_size;
- u8 reserved4[2];
+ u8 reserved5[2];
u8 mtt_base_addr_h;
__be32 mtt_base_addr_l;
- u32 reserved5[10];
+ u32 reserved6[10];
};
struct mlx4_update_qp_context {
@@ -229,6 +232,8 @@ struct mlx4_update_qp_context {
enum {
MLX4_UPD_QP_MASK_PM_STATE = 32,
MLX4_UPD_QP_MASK_VSD = 33,
+ MLX4_UPD_QP_MASK_QOS_VPP = 34,
+ MLX4_UPD_QP_MASK_RATE_LIMIT = 35,
};
enum {
@@ -428,7 +433,9 @@ struct mlx4_wqe_inline_seg {
enum mlx4_update_qp_attr {
MLX4_UPDATE_QP_SMAC = 1 << 0,
MLX4_UPDATE_QP_VSD = 1 << 1,
- MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
+ MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
+ MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
+ MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1
};
enum mlx4_update_qp_params_flags {
@@ -437,7 +444,10 @@ enum mlx4_update_qp_params_flags {
struct mlx4_update_qp_params {
u8 smac_index;
+ u8 qos_vport;
u32 flags;
+ u16 rate_unit;
+ u16 rate_val;
};
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
index 2826a4b6071e..68cd08f02c2f 100644
--- a/include/linux/mlx5/cmd.h
+++ b/include/linux/mlx5/cmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index f6b17ac601bd..2695ced222df 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -137,14 +137,15 @@ enum {
static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
void __iomem *uar_page,
- spinlock_t *doorbell_lock)
+ spinlock_t *doorbell_lock,
+ u32 cons_index)
{
__be32 doorbell[2];
u32 sn;
u32 ci;
sn = cq->arm_sn & 3;
- ci = cq->cons_index & 0xffffff;
+ ci = cons_index & 0xffffff;
*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 4e5bd813bb9a..abf65c790421 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index 163a818411e7..afc78a3f4462 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 166d9315fe4b..9a90e7523dc2 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -232,6 +232,9 @@ struct mlx5_cmd_stats {
};
struct mlx5_cmd {
+ void *cmd_alloc_buf;
+ dma_addr_t alloc_dma;
+ int alloc_size;
void *cmd_buf;
dma_addr_t dma;
u16 cmdif_rev;
@@ -407,7 +410,7 @@ struct mlx5_core_srq {
struct mlx5_eq_table {
void __iomem *update_ci;
void __iomem *update_arm_ci;
- struct list_head *comp_eq_head;
+ struct list_head comp_eqs_list;
struct mlx5_eq pages_eq;
struct mlx5_eq async_eq;
struct mlx5_eq cmd_eq;
@@ -722,6 +725,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -777,14 +781,22 @@ enum {
MAX_MR_CACHE_ENTRIES = 16,
};
+enum {
+ MLX5_INTERFACE_PROTOCOL_IB = 0,
+ MLX5_INTERFACE_PROTOCOL_ETH = 1,
+};
+
struct mlx5_interface {
void * (*add)(struct mlx5_core_dev *dev);
void (*remove)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
+ void * (*get_dev)(void *context);
+ int protocol;
struct list_head list;
};
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 5f48b8f592c5..cb3ad17edd1f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 61f7a342d1bf..310b5f7fd6ae 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index e1a363a33663..f43ed054a3e0 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 47a93928b90f..0755b9fd03a7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -251,6 +251,9 @@ struct vm_operations_struct {
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
+ int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
*/
@@ -494,15 +497,6 @@ static inline int page_count(struct page *page)
return atomic_read(&compound_head(page)->_count);
}
-#ifdef CONFIG_HUGETLB_PAGE
-extern int PageHeadHuge(struct page *page_head);
-#else /* CONFIG_HUGETLB_PAGE */
-static inline int PageHeadHuge(struct page *page_head)
-{
- return 0;
-}
-#endif /* CONFIG_HUGETLB_PAGE */
-
static inline bool __compound_tail_refcounted(struct page *page)
{
return !PageSlab(page) && !PageHeadHuge(page);
@@ -571,53 +565,6 @@ static inline void init_page_count(struct page *page)
atomic_set(&page->_count, 1);
}
-/*
- * PageBuddy() indicate that the page is free and in the buddy system
- * (see mm/page_alloc.c).
- *
- * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
- * -2 so that an underflow of the page_mapcount() won't be mistaken
- * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
- * efficiently by most CPU architectures.
- */
-#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
-
-static inline int PageBuddy(struct page *page)
-{
- return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
-}
-
-static inline void __SetPageBuddy(struct page *page)
-{
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
- atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
-}
-
-static inline void __ClearPageBuddy(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageBuddy(page), page);
- atomic_set(&page->_mapcount, -1);
-}
-
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
-
-static inline int PageBalloon(struct page *page)
-{
- return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
-}
-
-static inline void __SetPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
- atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
-}
-
-static inline void __ClearPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageBalloon(page), page);
- atomic_set(&page->_mapcount, -1);
-}
-
void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
@@ -1006,34 +953,10 @@ void page_address_init(void);
#define page_address_init() do { } while(0)
#endif
-/*
- * On an anonymous page mapped into a user virtual memory area,
- * page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
- *
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
- * and then page->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page. See ksm.h.
- *
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
- *
- * Please note that, confusingly, "page_mapping" refers to the inode
- * address_space which maps the page from disk; whereas "page_mapped"
- * refers to user virtual address space into which the page is mapped.
- */
-#define PAGE_MAPPING_ANON 1
-#define PAGE_MAPPING_KSM 2
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
-
+extern void *page_rmapping(struct page *page);
+extern struct anon_vma *page_anon_vma(struct page *page);
extern struct address_space *page_mapping(struct page *page);
-/* Neutral page->mapping pointer to address_space or anon_vma or other */
-static inline void *page_rmapping(struct page *page)
-{
- return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
-}
-
extern struct address_space *__page_file_mapping(struct page *);
static inline
@@ -1045,11 +968,6 @@ struct address_space *page_file_mapping(struct page *page)
return page->mapping;
}
-static inline int PageAnon(struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
-}
-
/*
* Return the pagecache index of the passed page. Regular pagecache pages
* use ->index whereas swapcache pages use ->private
@@ -1294,9 +1212,11 @@ int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
void account_page_dirtied(struct page *page, struct address_space *mapping);
+void account_page_cleaned(struct page *page, struct address_space *mapping);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
+
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
/* Is the vma a continuation of the stack vma above it? */
@@ -1973,10 +1893,10 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
static inline unsigned long
vm_unmapped_area(struct vm_unmapped_area_info *info)
{
- if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
- return unmapped_area(info);
- else
+ if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
return unmapped_area_topdown(info);
+ else
+ return unmapped_area(info);
}
/* truncate.c */
@@ -2109,7 +2029,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
* and return without waiting upon it */
-#define FOLL_MLOCK 0x40 /* mark page as mlocked */
+#define FOLL_POPULATE 0x40 /* fault in page */
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 199a03aab8dc..8d37e26a1007 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -364,7 +364,9 @@ struct mm_struct {
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
atomic_long_t nr_ptes; /* PTE page table pages */
+#if CONFIG_PGTABLE_LEVELS > 2
atomic_long_t nr_pmds; /* PMD page table pages */
+#endif
int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some counters */
@@ -427,7 +429,7 @@ struct mm_struct {
#endif
/* store ref to file /proc/<pid>/exe symlink points to */
- struct file *exe_file;
+ struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_mm *mmu_notifier_mm;
#endif
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index a6cf4c063e4e..19f0175c0afa 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -512,8 +512,18 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
-extern int mmc_register_driver(struct device_driver *);
-extern void mmc_unregister_driver(struct device_driver *);
+/*
+ * MMC device driver (e.g., Flash card, I/O card...)
+ */
+struct mmc_driver {
+ struct device_driver drv;
+ int (*probe)(struct mmc_card *);
+ void (*remove)(struct mmc_card *);
+ void (*shutdown)(struct mmc_card *);
+};
+
+extern int mmc_register_driver(struct mmc_driver *);
+extern void mmc_unregister_driver(struct mmc_driver *);
extern void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table);
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 996807963716..83430f2ea757 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -33,6 +33,8 @@
#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_VENDOR_ID_INTEL 0x0089
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index da77e5e2041d..95d6f0314a7d 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -7,14 +7,4 @@
#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard"
#define SH_MOBILE_SDHI_IRQ_SDIO "sdio"
-struct sh_mobile_sdhi_info {
- int dma_slave_tx;
- int dma_slave_rx;
- unsigned long tmio_flags;
- unsigned long tmio_caps;
- unsigned long tmio_caps2;
- u32 tmio_ocr_mask; /* available MMC voltages */
- unsigned int cd_gpio;
-};
-
#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2782df47101e..54d74f6eb233 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -842,16 +842,16 @@ static inline int populated_zone(struct zone *zone)
extern int movable_zone;
+#ifdef CONFIG_HIGHMEM
static inline int zone_movable_is_highmem(void)
{
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
return movable_zone == ZONE_HIGHMEM;
-#elif defined(CONFIG_HIGHMEM)
- return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
#else
- return 0;
+ return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
#endif
}
+#endif
static inline int is_highmem_idx(enum zone_type idx)
{
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index e530533b94be..3bfd56778c29 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -546,6 +546,14 @@ struct amba_id {
void *data;
};
+/**
+ * struct mips_cdmm_device_id - identifies devices in MIPS CDMM bus
+ * @type: Device type identifier.
+ */
+struct mips_cdmm_device_id {
+ __u8 type;
+};
+
/*
* Match x86 CPUs for CPU specific drivers.
* See documentation of "x86_match_cpu" for details.
diff --git a/include/linux/module.h b/include/linux/module.h
index b03485bcb82a..c883b86ea964 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -338,6 +338,8 @@ struct module {
#ifdef CONFIG_EVENT_TRACING
struct ftrace_event_call **trace_events;
unsigned int num_trace_events;
+ struct trace_enum_map **trace_enums;
+ unsigned int num_trace_enums;
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
unsigned int num_ftrace_callsites;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index c2c561dc0114..f822c3c11377 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -61,6 +61,7 @@ struct mnt_namespace;
#define MNT_DOOMED 0x1000000
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
+#define MNT_UMOUNT 0x8000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
@@ -92,6 +93,6 @@ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
-extern dev_t name_to_dev_t(char *name);
+extern dev_t name_to_dev_t(const char *name);
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 5f487d776411..29975c73a953 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -77,7 +77,7 @@
/* ensure we never evaluate anything shorted than an unsigned long
* to zero, and ensure we'll never miss the end of an comparison (bjd) */
-#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1))/ sizeof(unsigned long))
+#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long))
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
# ifdef map_bankwidth
@@ -181,7 +181,7 @@ static inline int map_bankwidth_supported(int w)
}
}
-#define MAX_MAP_LONGS ( ((MAX_MAP_BANKWIDTH*8) + BITS_PER_LONG - 1) / BITS_PER_LONG )
+#define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG)
typedef union {
unsigned long x[MAX_MAP_LONGS];
@@ -264,20 +264,22 @@ void unregister_mtd_chip_driver(struct mtd_chip_driver *);
struct mtd_info *do_map_probe(const char *name, struct map_info *map);
void map_destroy(struct mtd_info *mtd);
-#define ENABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 1); } while(0)
-#define DISABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 0); } while(0)
+#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0)
+#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0)
#define INVALIDATE_CACHED_RANGE(map, from, size) \
- do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
+ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
{
int i;
- for (i=0; i<map_words(map); i++) {
+
+ for (i = 0; i < map_words(map); i++) {
if (val1.x[i] != val2.x[i])
return 0;
}
+
return 1;
}
@@ -286,9 +288,9 @@ static inline map_word map_word_and(struct map_info *map, map_word val1, map_wor
map_word r;
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] & val2.x[i];
- }
+
return r;
}
@@ -297,9 +299,9 @@ static inline map_word map_word_clr(struct map_info *map, map_word val1, map_wor
map_word r;
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] & ~val2.x[i];
- }
+
return r;
}
@@ -308,22 +310,33 @@ static inline map_word map_word_or(struct map_info *map, map_word val1, map_word
map_word r;
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] | val2.x[i];
- }
+
return r;
}
-#define map_word_andequal(m, a, b, z) map_word_equal(m, z, map_word_and(m, a, b))
+static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
+{
+ int i;
+
+ for (i = 0; i < map_words(map); i++) {
+ if ((val1.x[i] & val2.x[i]) != val3.x[i])
+ return 0;
+ }
+
+ return 1;
+}
static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
{
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++) {
if (val1.x[i] & val2.x[i])
return 1;
}
+
return 0;
}
@@ -355,14 +368,16 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
if (map_bankwidth_is_large(map)) {
char *dest = (char *)&orig;
+
memcpy(dest+start, buf, len);
} else {
- for (i=start; i < start+len; i++) {
+ for (i = start; i < start+len; i++) {
int bitpos;
+
#ifdef __LITTLE_ENDIAN
- bitpos = i*8;
+ bitpos = i * 8;
#else /* __BIG_ENDIAN */
- bitpos = (map_bankwidth(map)-1-i)*8;
+ bitpos = (map_bankwidth(map) - 1 - i) * 8;
#endif
orig.x[0] &= ~(0xff << bitpos);
orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
@@ -384,9 +399,10 @@ static inline map_word map_word_ff(struct map_info *map)
if (map_bankwidth(map) < MAP_FF_LIMIT) {
int bw = 8 * map_bankwidth(map);
+
r.x[0] = (1UL << bw) - 1;
} else {
- for (i=0; i<map_words(map); i++)
+ for (i = 0; i < map_words(map); i++)
r.x[i] = ~0UL;
}
return r;
@@ -407,7 +423,7 @@ static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
r.x[0] = __raw_readq(map->virt + ofs);
#endif
else if (map_bankwidth_is_large(map))
- memcpy_fromio(r.x, map->virt+ofs, map->bankwidth);
+ memcpy_fromio(r.x, map->virt + ofs, map->bankwidth);
else
BUG();
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 4720b86ee73d..e5409524bb0a 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -155,6 +155,8 @@ enum spi_nor_option_flags {
* @write: [DRIVER-SPECIFIC] write data to the SPI NOR
* @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
* at the offset @offs
+ * @lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
+ * @unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
* @priv: the private data
*/
struct spi_nor {
@@ -189,6 +191,9 @@ struct spi_nor {
size_t len, size_t *retlen, const u_char *write_buf);
int (*erase)(struct spi_nor *nor, loff_t offs);
+ int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+
void *priv;
};
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
deleted file mode 100644
index f62f78aef4ac..000000000000
--- a/include/linux/nbd.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * 1999 Copyright (C) Pavel Machek, pavel@ucw.cz. This code is GPL.
- * 1999/11/04 Copyright (C) 1999 VMware, Inc. (Regis "HPReg" Duchesne)
- * Made nbd_end_request() use the io_request_lock
- * 2001 Copyright (C) Steven Whitehouse
- * New nbd_end_request() for compatibility with new linux block
- * layer code.
- * 2003/06/24 Louis D. Langholtz <ldl@aros.net>
- * Removed unneeded blksize_bits field from nbd_device struct.
- * Cleanup PARANOIA usage & code.
- * 2004/02/19 Paul Clements
- * Removed PARANOIA, plus various cleanup and comments
- */
-#ifndef LINUX_NBD_H
-#define LINUX_NBD_H
-
-
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <uapi/linux/nbd.h>
-
-struct request;
-
-struct nbd_device {
- int flags;
- int harderror; /* Code of hard error */
- struct socket * sock; /* If == NULL, device is not ready, yet */
- int magic;
-
- spinlock_t queue_lock;
- struct list_head queue_head; /* Requests waiting result */
- struct request *active_req;
- wait_queue_head_t active_wq;
- struct list_head waiting_queue; /* Requests to be sent */
- wait_queue_head_t waiting_wq;
-
- struct mutex tx_lock;
- struct gendisk *disk;
- int blksize;
- u64 bytesize;
- pid_t pid; /* pid of nbd-client, if attached */
- int xmit_timeout;
- int disconnect; /* a disconnect has been requested by user */
-};
-
-#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 17d83393afcc..738ea48be889 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -120,7 +120,6 @@ struct socket {
struct vm_area_struct;
struct page;
-struct kiocb;
struct sockaddr;
struct msghdr;
struct module;
@@ -162,8 +161,8 @@ struct proto_ops {
int (*compat_getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
#endif
- int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len);
+ int (*sendmsg) (struct socket *sock, struct msghdr *m,
+ size_t total_len);
/* Notes for implementing recvmsg:
* ===============================
* msg->msg_namelen should get updated by the recvmsg handlers
@@ -172,9 +171,8 @@ struct proto_ops {
* handlers can assume that msg.msg_name is either NULL or has
* a minimum size of sizeof(struct sockaddr_storage).
*/
- int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len,
- int flags);
+ int (*recvmsg) (struct socket *sock, struct msghdr *m,
+ size_t total_len, int flags);
int (*mmap) (struct file *file, struct socket *sock,
struct vm_area_struct * vma);
ssize_t (*sendpage) (struct socket *sock, struct page *page,
@@ -213,7 +211,7 @@ int sock_create(int family, int type, int proto, struct socket **res);
int sock_create_kern(int family, int type, int proto, struct socket **res);
int sock_create_lite(int family, int type, int proto, struct socket **res);
void sock_release(struct socket *sock);
-int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 278738873703..05b9a694e213 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -25,7 +25,6 @@
#ifndef _LINUX_NETDEVICE_H
#define _LINUX_NETDEVICE_H
-#include <linux/pm_qos.h>
#include <linux/timer.h>
#include <linux/bug.h>
#include <linux/delay.h>
@@ -60,6 +59,7 @@ struct phy_device;
struct wireless_dev;
/* 802.15.4 specific */
struct wpan_dev;
+struct mpls_dev;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -261,7 +261,6 @@ struct header_ops {
unsigned short type, const void *daddr,
const void *saddr, unsigned int len);
int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
- int (*rebuild)(struct sk_buff *skb);
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
@@ -588,6 +587,7 @@ struct netdev_queue {
#ifdef CONFIG_BQL
struct dql dql;
#endif
+ unsigned long tx_maxrate;
} ____cacheline_aligned_in_smp;
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -795,7 +795,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
* struct net_device *dev);
* Called when a packet needs to be transmitted.
- * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
+ * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
+ * the queue before that can happen; it's for obsolete devices and weird
+ * corner cases, but the stack really does a non-trivial amount
+ * of useless work if you return NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
@@ -875,6 +878,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
+ *
+ * Enable or disable the VF ability to query its RSS Redirection Table and
+ * Hash Key. This is needed since on some devices VF share this information
+ * with PF and querying it may adduce a theoretical security risk.
+ * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
* Called to setup 'tc' number of traffic classes in the net device. This
@@ -968,7 +976,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
- * struct net_device *dev, u32 filter_mask)
+ * struct net_device *dev, u32 filter_mask,
+ * int nlflags)
* int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags);
*
@@ -1026,15 +1035,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* be otherwise expressed by feature flags. The check is called with
* the set of features that the stack has calculated and it returns
* those the driver believes to be appropriate.
- *
- * int (*ndo_switch_parent_id_get)(struct net_device *dev,
- * struct netdev_phys_item_id *psid);
- * Called to get an ID of the switch chip this port is part of.
- * If driver implements this, it indicates that it represents a port
- * of a switch chip.
- * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
- * Called to notify switch device port of bridge port STP
- * state change.
+ * int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ * int queue_index, u32 maxrate);
+ * Called when a user wants to set a max-rate limitation of specific
+ * TX queue.
+ * int (*ndo_get_iflink)(const struct net_device *dev);
+ * Called to get the iflink value of this device.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1099,6 +1105,9 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_set_vf_rss_query_en)(
+ struct net_device *dev,
+ int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
@@ -1164,7 +1173,8 @@ struct net_device_ops {
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask);
+ u32 filter_mask,
+ int nlflags);
int (*ndo_bridge_dellink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags);
@@ -1172,6 +1182,8 @@ struct net_device_ops {
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+ int (*ndo_get_phys_port_name)(struct net_device *dev,
+ char *name, size_t len);
void (*ndo_add_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__be16 port);
@@ -1191,12 +1203,10 @@ struct net_device_ops {
netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
-#ifdef CONFIG_NET_SWITCHDEV
- int (*ndo_switch_parent_id_get)(struct net_device *dev,
- struct netdev_phys_item_id *psid);
- int (*ndo_switch_port_stp_update)(struct net_device *dev,
- u8 state);
-#endif
+ int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ int queue_index,
+ u32 maxrate);
+ int (*ndo_get_iflink)(const struct net_device *dev);
};
/**
@@ -1305,6 +1315,8 @@ enum netdev_priv_flags {
* @base_addr: Device I/O address
* @irq: Device IRQ number
*
+ * @carrier_changes: Stats to monitor carrier on<->off transitions
+ *
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
* @napi_list: List entry, that is used for polling napi devices
@@ -1328,7 +1340,7 @@ enum netdev_priv_flags {
* @mpls_features: Mask of features inheritable by MPLS
*
* @ifindex: interface index
- * @iflink: unique device identifier
+ * @group: The group, that the device belongs to
*
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
@@ -1338,8 +1350,6 @@ enum netdev_priv_flags {
* @tx_dropped: Dropped packets by core network,
* do not use this in drivers
*
- * @carrier_changes: Stats to monitor carrier on<->off transitions
- *
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
* see <net/iw_handler.h> for details.
@@ -1348,8 +1358,7 @@ enum netdev_priv_flags {
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
* @ethtool_ops: Management operations
- * @fwd_ops: Management operations
- * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc
+ * @header_ops: Includes callbacks for creating,parsing,caching,etc
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
@@ -1383,14 +1392,14 @@ enum netdev_priv_flags {
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
- * @uc: unicast mac addresses
- * @mc: multicast mac addresses
- * @dev_addrs: list of device hw addresses
- * @queues_kset: Group of all Kobjects in the Tx and RX queues
* @uc_promisc: Counter, that indicates, that promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
* does not implement ndo_set_rx_mode()
+ * @uc: unicast mac addresses
+ * @mc: multicast mac addresses
+ * @dev_addrs: list of device hw addresses
+ * @queues_kset: Group of all Kobjects in the Tx and RX queues
* @promiscuity: Number of times, the NIC is told to work in
* Promiscuous mode, if it becomes 0 the NIC will
* exit from working in Promiscuous mode
@@ -1420,6 +1429,12 @@ enum netdev_priv_flags {
* @ingress_queue: XXX: need comments on this one
* @broadcast: hw bcast address
*
+ * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
+ * indexed by RX queue number. Assigned by driver.
+ * This must only be set if the ndo_rx_flow_steer
+ * operation is defined
+ * @index_hlist: Device index hash chain
+ *
* @_tx: Array of TX queues
* @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
* @real_num_tx_queues: Number of TX queues currently active in device
@@ -1429,11 +1444,6 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
- * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
- * indexed by RX queue number. Assigned by driver.
- * This must only be set if the ndo_rx_flow_steer
- * operation is defined
- *
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog ( see dev_watchdog() )
@@ -1441,7 +1451,6 @@ enum netdev_priv_flags {
*
* @pcpu_refcnt: Number of references to this device
* @todo_list: Delayed register/unregister
- * @index_hlist: Device index hash chain
* @link_watch_list: XXX: need comments on this one
*
* @reg_state: Register/unregister state machine
@@ -1489,9 +1498,6 @@ enum netdev_priv_flags {
*
* @qdisc_tx_busylock: XXX: need comments on this one
*
- * @group: The group, that the device belongs to
- * @pm_qos_req: Power Management QoS object
- *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1509,6 +1515,8 @@ struct net_device {
unsigned long base_addr;
int irq;
+ atomic_t carrier_changes;
+
/*
* Some hardware also needs these fields (state,dev_list,
* napi_list,unreg_list,close_list) but they are not
@@ -1542,22 +1550,22 @@ struct net_device {
netdev_features_t mpls_features;
int ifindex;
- int iflink;
+ int group;
struct net_device_stats stats;
atomic_long_t rx_dropped;
atomic_long_t tx_dropped;
- atomic_t carrier_changes;
-
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def * wireless_handlers;
struct iw_public_data * wireless_data;
#endif
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
- const struct forwarding_accel_ops *fwd_ops;
+#ifdef CONFIG_NET_SWITCHDEV
+ const struct swdev_ops *swdev_ops;
+#endif
const struct header_ops *header_ops;
@@ -1588,6 +1596,8 @@ struct net_device {
unsigned short dev_id;
unsigned short dev_port;
spinlock_t addr_list_lock;
+ unsigned char name_assign_type;
+ bool uc_promisc;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
@@ -1595,10 +1605,6 @@ struct net_device {
#ifdef CONFIG_SYSFS
struct kset *queues_kset;
#endif
-
- unsigned char name_assign_type;
-
- bool uc_promisc;
unsigned int promiscuity;
unsigned int allmulti;
@@ -1621,6 +1627,9 @@ struct net_device {
void *ax25_ptr;
struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
+#if IS_ENABLED(CONFIG_MPLS_ROUTING)
+ struct mpls_dev __rcu *mpls_ptr;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
@@ -1645,7 +1654,10 @@ struct net_device {
struct netdev_queue __rcu *ingress_queue;
unsigned char broadcast[MAX_ADDR_LEN];
-
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rx_cpu_rmap;
+#endif
+ struct hlist_node index_hlist;
/*
* Cache lines mostly used on transmit path
@@ -1656,13 +1668,11 @@ struct net_device {
struct Qdisc *qdisc;
unsigned long tx_queue_len;
spinlock_t tx_global_lock;
+ int watchdog_timeo;
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_maps;
#endif
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rx_cpu_rmap;
-#endif
/* These may be needed for future network-power-down code. */
@@ -1672,13 +1682,11 @@ struct net_device {
*/
unsigned long trans_start;
- int watchdog_timeo;
struct timer_list watchdog_timer;
int __percpu *pcpu_refcnt;
struct list_head todo_list;
- struct hlist_node index_hlist;
struct list_head link_watch_list;
enum { NETREG_UNINITIALIZED=0,
@@ -1702,9 +1710,7 @@ struct net_device {
struct netpoll_info __rcu *npinfo;
#endif
-#ifdef CONFIG_NET_NS
- struct net *nd_net;
-#endif
+ possible_net_t nd_net;
/* mid-layer private */
union {
@@ -1745,8 +1751,6 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
- int group;
- struct pm_qos_request pm_qos_req;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1844,10 +1848,7 @@ struct net *dev_net(const struct net_device *dev)
static inline
void dev_net_set(struct net_device *dev, struct net *net)
{
-#ifdef CONFIG_NET_NS
- release_net(dev->nd_net);
- dev->nd_net = hold_net(net);
-#endif
+ write_pnet(&dev->nd_net, net);
}
static inline bool netdev_uses_dsa(struct net_device *dev)
@@ -2023,10 +2024,10 @@ struct pcpu_sw_netstats {
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
if (pcpu_stats) { \
- int i; \
- for_each_possible_cpu(i) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
typeof(type) *stat; \
- stat = per_cpu_ptr(pcpu_stats, i); \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
u64_stats_init(&stat->syncp); \
} \
} \
@@ -2159,6 +2160,7 @@ void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
+int dev_get_iflink(const struct net_device *dev);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
@@ -2167,9 +2169,14 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev);
int dev_close(struct net_device *dev);
+int dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
-int dev_loopback_xmit(struct sk_buff *newskb);
-int dev_queue_xmit(struct sk_buff *skb);
+int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb);
+int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb);
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+ return dev_queue_xmit_sk(skb->sk, skb);
+}
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2409,15 +2416,6 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
-static inline int dev_rebuild_header(struct sk_buff *skb)
-{
- const struct net_device *dev = skb->dev;
-
- if (!dev->header_ops || !dev->header_ops->rebuild)
- return 0;
- return dev->header_ops->rebuild(skb);
-}
-
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
@@ -2939,7 +2937,11 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
-int netif_receive_skb(struct sk_buff *skb);
+int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb);
+static inline int netif_receive_skb(struct sk_buff *skb)
+{
+ return netif_receive_skb_sk(skb->sk, skb);
+}
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -2975,6 +2977,8 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+int dev_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3679,6 +3683,9 @@ void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
+netdev_features_t passthru_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
netdev_features_t netif_skb_features(struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
@@ -3709,7 +3716,7 @@ static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
-static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
+static inline bool netif_needs_gso(struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2517ece98820..63560d0a8dfe 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -44,11 +44,39 @@ int netfilter_init(void);
struct sk_buff;
struct nf_hook_ops;
+
+struct sock;
+
+struct nf_hook_state {
+ unsigned int hook;
+ int thresh;
+ u_int8_t pf;
+ struct net_device *in;
+ struct net_device *out;
+ struct sock *sk;
+ int (*okfn)(struct sock *, struct sk_buff *);
+};
+
+static inline void nf_hook_state_init(struct nf_hook_state *p,
+ unsigned int hook,
+ int thresh, u_int8_t pf,
+ struct net_device *indev,
+ struct net_device *outdev,
+ struct sock *sk,
+ int (*okfn)(struct sock *, struct sk_buff *))
+{
+ p->hook = hook;
+ p->thresh = thresh;
+ p->pf = pf;
+ p->in = indev;
+ p->out = outdev;
+ p->sk = sk;
+ p->okfn = okfn;
+}
+
typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *));
+ const struct nf_hook_state *state);
struct nf_hook_ops {
struct list_head list;
@@ -118,9 +146,7 @@ static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
}
#endif
-int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh);
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
* nf_hook_thresh - call a netfilter hook
@@ -130,21 +156,29 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
* value indicates the packet has been consumed by the hook.
*/
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+ struct sock *sk,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh)
+ int (*okfn)(struct sock *, struct sk_buff *),
+ int thresh)
{
- if (nf_hooks_active(pf, hook))
- return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+ if (nf_hooks_active(pf, hook)) {
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, hook, thresh, pf,
+ indev, outdev, sk, okfn);
+ return nf_hook_slow(skb, &state);
+ }
return 1;
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *, struct sk_buff *))
{
- return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN);
+ return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN);
}
/* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -165,35 +199,36 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
*/
static inline int
-NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *in, struct net_device *out,
- int (*okfn)(struct sk_buff *), int thresh)
+NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in,
+ struct net_device *out,
+ int (*okfn)(struct sock *, struct sk_buff *), int thresh)
{
- int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh);
+ int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh);
if (ret == 1)
- ret = okfn(skb);
+ ret = okfn(sk, skb);
return ret;
}
static inline int
-NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *in, struct net_device *out,
- int (*okfn)(struct sk_buff *), bool cond)
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct sock *, struct sk_buff *), bool cond)
{
int ret;
if (!cond ||
- ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
- ret = okfn(skb);
+ ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1))
+ ret = okfn(sk, skb);
return ret;
}
static inline int
-NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb,
+NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ int (*okfn)(struct sock *, struct sk_buff *))
{
- return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, INT_MIN);
+ return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN);
}
/* Call setsockopt() */
@@ -293,19 +328,21 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
}
#else /* !CONFIG_NETFILTER */
-#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
-#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb)
+#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
+#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb)
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+ struct sock *sk,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh)
+ int (*okfn)(struct sock *sk, struct sk_buff *), int thresh)
{
- return okfn(skb);
+ return okfn(sk, skb);
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *, struct sk_buff *))
{
return 1;
}
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index f1606fa6132d..34b172301558 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -483,7 +483,7 @@ static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
if (!__nested)
return -EMSGSIZE;
- ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
+ ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
if (!ret)
ipset_nest_end(skb, __nested);
return ret;
@@ -497,8 +497,7 @@ static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
if (!__nested)
return -EMSGSIZE;
- ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6,
- sizeof(struct in6_addr), ipaddrptr);
+ ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
if (!ret)
ipset_nest_end(skb, __nested);
return ret;
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index cfb7191e6efa..c22a7fb8d0df 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -54,8 +54,7 @@ extern struct xt_table *arpt_register_table(struct net *net,
extern void arpt_unregister_table(struct xt_table *table);
extern unsigned int arpt_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table);
#ifdef CONFIG_COMPAT
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index c755e4971fa3..f2fdb5a52070 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -2,7 +2,7 @@
#define __LINUX_BRIDGE_NETFILTER_H
#include <uapi/linux/netfilter_bridge.h>
-
+#include <linux/skbuff.h>
enum nf_br_hook_priorities {
NF_BR_PRI_FIRST = INT_MIN,
@@ -17,110 +17,60 @@ enum nf_br_hook_priorities {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-#define BRNF_PKT_TYPE 0x01
#define BRNF_BRIDGED_DNAT 0x02
-#define BRNF_BRIDGED 0x04
#define BRNF_NF_BRIDGE_PREROUTING 0x08
-#define BRNF_8021Q 0x10
-#define BRNF_PPPoE 0x20
-static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
+static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
{
- switch (skb->protocol) {
- case __cpu_to_be16(ETH_P_8021Q):
- return VLAN_HLEN;
- case __cpu_to_be16(ETH_P_PPP_SES):
+ if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
return PPPOE_SES_HLEN;
- default:
- return 0;
- }
+ return 0;
}
-static inline void nf_bridge_update_protocol(struct sk_buff *skb)
-{
- if (skb->nf_bridge->mask & BRNF_8021Q)
- skb->protocol = htons(ETH_P_8021Q);
- else if (skb->nf_bridge->mask & BRNF_PPPoE)
- skb->protocol = htons(ETH_P_PPP_SES);
-}
+int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
-/* Fill in the header for fragmented IP packets handled by
- * the IPv4 connection tracking code.
- *
- * Only used in br_forward.c
- */
-static inline int nf_bridge_copy_header(struct sk_buff *skb)
+static inline void br_drop_fake_rtable(struct sk_buff *skb)
{
- int err;
- unsigned int header_size;
-
- nf_bridge_update_protocol(skb);
- header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
- err = skb_cow_head(skb, header_size);
- if (err)
- return err;
-
- skb_copy_to_linear_data_offset(skb, -header_size,
- skb->nf_bridge->data, header_size);
- __skb_push(skb, nf_bridge_encap_header_len(skb));
- return 0;
-}
+ struct dst_entry *dst = skb_dst(skb);
-static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
-{
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
- return nf_bridge_copy_header(skb);
- return 0;
+ if (dst && (dst->flags & DST_FAKE_RTABLE))
+ skb_dst_drop(skb);
}
-static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
+static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
{
- if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
- return PPPOE_SES_HLEN;
- return 0;
+ struct nf_bridge_info *nf_bridge;
+
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
}
-int br_handle_frame_finish(struct sk_buff *skb);
-/* Only used in br_device.c */
-static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
-
- skb_pull(skb, ETH_HLEN);
- nf_bridge->mask ^= BRNF_BRIDGED_DNAT;
- skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
- skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
- skb->dev = nf_bridge->physindev;
- return br_handle_frame_finish(skb);
+ struct nf_bridge_info *nf_bridge;
+
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
}
-/* This is called by the IP fragmenting code and it ensures there is
- * enough room for the encapsulating header (if there is one). */
-static inline unsigned int nf_bridge_pad(const struct sk_buff *skb)
+static inline struct net_device *
+nf_bridge_get_physindev(const struct sk_buff *skb)
{
- if (skb->nf_bridge)
- return nf_bridge_encap_header_len(skb);
- return 0;
+ return skb->nf_bridge ? skb->nf_bridge->physindev : NULL;
}
-struct bridge_skb_cb {
- union {
- __be32 ipv4;
- } daddr;
-};
-
-static inline void br_drop_fake_rtable(struct sk_buff *skb)
+static inline struct net_device *
+nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
-
- if (dst && (dst->flags & DST_FAKE_RTABLE))
- skb_dst_drop(skb);
+ return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
}
-
#else
-#define nf_bridge_maybe_copy_header(skb) (0)
-#define nf_bridge_pad(skb) (0)
#define br_drop_fake_rtable(skb) do { } while (0)
#endif /* CONFIG_BRIDGE_NETFILTER */
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 34e7a2b7f867..f1bd3962e6b6 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -12,9 +12,10 @@
#ifndef __LINUX_BRIDGE_EFF_H
#define __LINUX_BRIDGE_EFF_H
+#include <linux/if.h>
+#include <linux/if_ether.h>
#include <uapi/linux/netfilter_bridge/ebtables.h>
-
/* return values for match() functions */
#define EBT_MATCH 0
#define EBT_NOMATCH 1
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 901e84db847d..4073510da485 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -65,8 +65,7 @@ struct ipt_error {
extern void *ipt_alloc_initial_table(const struct xt_table *);
extern unsigned int ipt_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table);
#ifdef CONFIG_COMPAT
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 610208b18c05..b40d2b635778 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -31,8 +31,7 @@ extern struct xt_table *ip6t_register_table(struct net *net,
extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
extern unsigned int ip6t_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table);
/* Check for an extension */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 02fc86d2348e..6835c1279df7 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -134,7 +134,7 @@ struct netlink_callback {
struct netlink_notify {
struct net *net;
- int portid;
+ u32 portid;
int protocol;
};
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index ed43cb74b11d..32201c269890 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -16,6 +16,13 @@
#include <linux/uidgid.h>
#include <uapi/linux/nfs4.h>
+enum nfs4_acl_whotype {
+ NFS4_ACL_WHO_NAMED = 0,
+ NFS4_ACL_WHO_OWNER,
+ NFS4_ACL_WHO_GROUP,
+ NFS4_ACL_WHO_EVERYONE,
+};
+
struct nfs4_ace {
uint32_t type;
uint32_t flag;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index b01ccf371fdc..b95f914ce083 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -447,13 +447,12 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t);
extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
struct iov_iter *iter,
loff_t pos);
extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
- struct iov_iter *iter,
- loff_t pos);
+ struct iov_iter *iter);
/*
* linux/fs/nfs/dir.c
@@ -512,6 +511,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
* Try to write back everything synchronously (but check the
* return value!)
*/
+extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 4cb3eaa89cf7..93ab6071bbe9 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -255,11 +255,13 @@ struct nfs4_layoutget {
struct nfs4_getdeviceinfo_args {
struct nfs4_sequence_args seq_args;
struct pnfs_device *pdev;
+ __u32 notify_types;
};
struct nfs4_getdeviceinfo_res {
struct nfs4_sequence_res seq_res;
struct pnfs_device *pdev;
+ __u32 notification;
};
struct nfs4_layoutcommit_args {
@@ -1271,11 +1273,15 @@ struct nfs42_falloc_args {
nfs4_stateid falloc_stateid;
u64 falloc_offset;
u64 falloc_length;
+ const u32 *falloc_bitmask;
};
struct nfs42_falloc_res {
struct nfs4_sequence_res seq_res;
unsigned int status;
+
+ struct nfs_fattr *falloc_fattr;
+ const struct nfs_server *falloc_server;
};
struct nfs42_seek_args {
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index ff3fea3194c6..9abb763e4b86 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -460,7 +460,7 @@ struct nilfs_btree_node {
/* level */
#define NILFS_BTREE_LEVEL_DATA 0
#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
-#define NILFS_BTREE_LEVEL_MAX 14
+#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
/**
* struct nilfs_palloc_group_desc - block group descriptor
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 9b2022ab4d85..3d46fb4708e0 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -25,16 +25,11 @@ static inline void touch_nmi_watchdog(void)
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
-extern void watchdog_enable_hardlockup_detector(bool val);
-extern bool watchdog_hardlockup_detector_is_enabled(void);
+extern void hardlockup_detector_disable(void);
#else
-static inline void watchdog_enable_hardlockup_detector(bool val)
+static inline void hardlockup_detector_disable(void)
{
}
-static inline bool watchdog_hardlockup_detector_is_enabled(void)
-{
- return true;
-}
#endif
/*
@@ -68,12 +63,20 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
#ifdef CONFIG_LOCKUP_DETECTOR
int hw_nmi_is_cpu_stuck(struct pt_regs *);
u64 hw_nmi_get_sample_period(int watchdog_thresh);
+extern int nmi_watchdog_enabled;
+extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern int sysctl_softlockup_all_cpu_backtrace;
struct ctl_table;
-extern int proc_dowatchdog(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
+extern int proc_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_nmi_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_soft_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_watchdog_thresh(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
#endif
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 0adad4a5419b..8dbd05e70f09 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -117,8 +117,9 @@ struct nvme_ns {
unsigned ns_id;
int lba_shift;
- int ms;
- int pi_type;
+ u16 ms;
+ bool ext;
+ u8 pi_type;
u64 mode_select_num_blocks;
u32 mode_select_block_len;
};
diff --git a/include/linux/of.h b/include/linux/of.h
index dfde07e77a63..ddeaae6d2083 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -305,6 +305,7 @@ extern int of_property_read_string_helper(struct device_node *np,
extern int of_device_is_compatible(const struct device_node *device,
const char *);
extern bool of_device_is_available(const struct device_node *device);
+extern bool of_device_is_big_endian(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
@@ -332,6 +333,7 @@ extern int of_count_phandle_with_args(const struct device_node *np,
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
+extern int of_alias_get_highest_id(const char *stem);
extern int of_machine_is_compatible(const char *compat);
@@ -466,6 +468,11 @@ static inline bool of_device_is_available(const struct device_node *device)
return false;
}
+static inline bool of_device_is_big_endian(const struct device_node *device)
+{
+ return false;
+}
+
static inline struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp)
@@ -594,6 +601,11 @@ static inline int of_alias_get_id(struct device_node *np, const char *stem)
return -ENOSYS;
}
+static inline int of_alias_get_highest_id(const char *stem)
+{
+ return -ENOSYS;
+}
+
static inline int of_machine_is_compatible(const char *compat)
{
return 0;
@@ -616,6 +628,38 @@ static inline const char *of_prop_next_string(struct property *prop,
return NULL;
}
+static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+{
+ return 0;
+}
+
+static inline int of_node_test_and_set_flag(struct device_node *n,
+ unsigned long flag)
+{
+ return 0;
+}
+
+static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
+{
+}
+
+static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
+{
+}
+
+static inline int of_property_check_flag(struct property *p, unsigned long flag)
+{
+ return 0;
+}
+
+static inline void of_property_set_flag(struct property *p, unsigned long flag)
+{
+}
+
+static inline void of_property_clear_flag(struct property *p, unsigned long flag)
+{
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 0ff360d5b3b3..587ee507965d 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -33,6 +33,8 @@ extern void *of_fdt_get_property(const void *blob,
extern int of_fdt_is_compatible(const void *blob,
unsigned long node,
const char *compat);
+extern bool of_fdt_is_big_endian(const void *blob,
+ unsigned long node);
extern int of_fdt_match(const void *blob, unsigned long node,
const char *const *compat);
extern void of_fdt_unflatten_tree(unsigned long *blob,
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index befef42e015b..7bc92e050608 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -14,6 +14,8 @@
#ifndef __LINUX_OF_GRAPH_H
#define __LINUX_OF_GRAPH_H
+#include <linux/types.h>
+
/**
* struct of_endpoint - the OF graph endpoint data structure
* @port: identifier (value of reg property) of a port this endpoint belongs to
@@ -26,9 +28,21 @@ struct of_endpoint {
const struct device_node *local_node;
};
+/**
+ * for_each_endpoint_of_node - iterate over every endpoint in a device node
+ * @parent: parent device node containing ports and endpoints
+ * @child: loop variable pointing to the current endpoint node
+ *
+ * When breaking out of the loop, of_node_put(child) has to be called manually.
+ */
+#define for_each_endpoint_of_node(parent, child) \
+ for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \
+ child = of_graph_get_next_endpoint(parent, child))
+
#ifdef CONFIG_OF
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
+struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *previous);
struct device_node *of_graph_get_remote_port_parent(
@@ -42,6 +56,12 @@ static inline int of_graph_parse_endpoint(const struct device_node *node,
return -ENOSYS;
}
+static inline struct device_node *of_graph_get_port_by_id(
+ struct device_node *node, u32 id)
+{
+ return NULL;
+}
+
static inline struct device_node *of_graph_get_next_endpoint(
const struct device_node *parent,
struct device_node *previous)
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index bfec136a6d1e..d884929a7747 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -37,8 +37,6 @@ extern int of_irq_parse_one(struct device_node *device, int index,
extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern int of_irq_to_resource_table(struct device_node *dev,
- struct resource *res, int nr_irqs);
extern void of_irq_init(const struct of_device_id *matches);
@@ -46,6 +44,8 @@ extern void of_irq_init(const struct of_device_id *matches);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
+extern int of_irq_to_resource_table(struct device_node *dev,
+ struct resource *res, int nr_irqs);
#else
static inline int of_irq_count(struct device_node *dev)
{
@@ -59,6 +59,11 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name)
{
return 0;
}
+static inline int of_irq_to_resource_table(struct device_node *dev,
+ struct resource *res, int nr_irqs)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_OF)
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index d449018d0726..8f2237eb3485 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -24,6 +24,7 @@ struct phy_device *of_phy_attach(struct net_device *dev,
phy_interface_t iface);
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
#else /* CONFIG_OF */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
@@ -60,6 +61,12 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
}
+
+static inline int of_mdio_parse_addr(struct device *dev,
+ const struct device_node *np)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_OF */
#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 34597c8c1a4c..9cd72aab76fe 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -9,8 +9,11 @@
#ifdef CONFIG_OF_NET
#include <linux/of.h>
+
+struct net_device;
extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
+extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np)
{
@@ -21,6 +24,11 @@ static inline const void *of_get_mac_address(struct device_node *np)
{
return NULL;
}
+
+static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
+{
+ return NULL;
+}
#endif
#endif /* __LINUX_OF_NET_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index c2080eebbb47..7dee00143afd 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -163,7 +163,8 @@ extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
extern int gpmc_calc_divider(unsigned int sync_clk);
-extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t);
+extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
+ const struct gpmc_settings *s);
extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p);
extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
extern void gpmc_cs_free(int cs);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index d5771bed59c9..44b2f6f7bbd8 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -66,7 +66,8 @@ extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order, const nodemask_t *nodemask);
+ int order, const nodemask_t *nodemask,
+ struct mem_cgroup *memcg);
extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
unsigned long totalpages, const nodemask_t *nodemask,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5ed7bdaf22d5..f34e040b34e9 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -289,6 +289,47 @@ PAGEFLAG_FALSE(HWPoison)
#define __PG_HWPOISON 0
#endif
+/*
+ * On an anonymous page mapped into a user virtual memory area,
+ * page->mapping points to its anon_vma, not to a struct address_space;
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ *
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
+ * and then page->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged page. See ksm.h.
+ *
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
+ *
+ * Please note that, confusingly, "page_mapping" refers to the inode
+ * address_space which maps the page from disk; whereas "page_mapped"
+ * refers to user virtual address space into which the page is mapped.
+ */
+#define PAGE_MAPPING_ANON 1
+#define PAGE_MAPPING_KSM 2
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+
+static inline int PageAnon(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+#ifdef CONFIG_KSM
+/*
+ * A KSM page is one of those write-protected "shared pages" or "merged pages"
+ * which KSM maps into multiple mms, wherever identical anonymous page content
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
+ * anon_vma, but to that page's node of the stable tree.
+ */
+static inline int PageKsm(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+#else
+TESTPAGEFLAG_FALSE(Ksm)
+#endif
+
u64 stable_page_flags(struct page *page);
static inline int PageUptodate(struct page *page)
@@ -328,8 +369,6 @@ static inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate)
-extern void cancel_dirty_page(struct page *page, unsigned int account_size);
-
int test_clear_page_writeback(struct page *page);
int __test_set_page_writeback(struct page *page, bool keep_write);
@@ -428,6 +467,21 @@ static inline void ClearPageCompound(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
+#ifdef CONFIG_HUGETLB_PAGE
+int PageHuge(struct page *page);
+int PageHeadHuge(struct page *page);
+bool page_huge_active(struct page *page);
+#else
+TESTPAGEFLAG_FALSE(Huge)
+TESTPAGEFLAG_FALSE(HeadHuge)
+
+static inline bool page_huge_active(struct page *page)
+{
+ return 0;
+}
+#endif
+
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -482,6 +536,53 @@ static inline int PageTransTail(struct page *page)
#endif
/*
+ * PageBuddy() indicate that the page is free and in the buddy system
+ * (see mm/page_alloc.c).
+ *
+ * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
+ * -2 so that an underflow of the page_mapcount() won't be mistaken
+ * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
+ * efficiently by most CPU architectures.
+ */
+#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
+
+static inline int PageBuddy(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBuddy(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBuddy(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBuddy(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
+#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
+
+static inline int PageBalloon(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBalloon(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
+/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
*/
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e63112fb55be..353db8dc4c6e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1178,6 +1178,7 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type);
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
@@ -1673,13 +1674,25 @@ int pci_ext_cfg_avail(void);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
+int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
+int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
+
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
+resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
#else
+static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
+{
+ return -ENOSYS;
+}
+static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
+{
+ return -ENOSYS;
+}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
@@ -1690,6 +1703,8 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{ return 0; }
static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
{ return 0; }
+static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
+{ return 0; }
#endif
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 38cff8f6716d..2f7b9a40f627 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2541,10 +2541,6 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
-#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
-#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
-#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
-#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2b621982938d..61992cf2e977 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -53,6 +53,7 @@ struct perf_guest_info_callbacks {
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
#include <linux/workqueue.h>
+#include <linux/cgroup.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -118,10 +119,19 @@ struct hw_perf_event {
struct hrtimer hrtimer;
};
struct { /* tracepoint */
- struct task_struct *tp_target;
/* for tp_event->class */
struct list_head tp_list;
};
+ struct { /* intel_cqm */
+ int cqm_state;
+ int cqm_rmid;
+ struct list_head cqm_events_entry;
+ struct list_head cqm_groups_entry;
+ struct list_head cqm_group_entry;
+ };
+ struct { /* itrace */
+ int itrace_started;
+ };
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
/*
@@ -129,12 +139,12 @@ struct hw_perf_event {
* problem hw_breakpoint has with context
* creation and event initalization.
*/
- struct task_struct *bp_target;
struct arch_hw_breakpoint info;
struct list_head bp_list;
};
#endif
};
+ struct task_struct *target;
int state;
local64_t prev_count;
u64 sample_period;
@@ -166,6 +176,11 @@ struct perf_event;
* pmu::capabilities flags
*/
#define PERF_PMU_CAP_NO_INTERRUPT 0x01
+#define PERF_PMU_CAP_NO_NMI 0x02
+#define PERF_PMU_CAP_AUX_NO_SG 0x04
+#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
+#define PERF_PMU_CAP_EXCLUSIVE 0x10
+#define PERF_PMU_CAP_ITRACE 0x20
/**
* struct pmu - generic performance monitoring unit
@@ -186,6 +201,7 @@ struct pmu {
int * __percpu pmu_disable_count;
struct perf_cpu_context * __percpu pmu_cpu_context;
+ atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
@@ -262,9 +278,32 @@ struct pmu {
int (*event_idx) (struct perf_event *event); /*optional */
/*
- * flush branch stack on context-switches (needed in cpu-wide mode)
+ * context-switches callback
+ */
+ void (*sched_task) (struct perf_event_context *ctx,
+ bool sched_in);
+ /*
+ * PMU specific data size
+ */
+ size_t task_ctx_size;
+
+
+ /*
+ * Return the count value for a counter.
+ */
+ u64 (*count) (struct perf_event *event); /*optional*/
+
+ /*
+ * Set up pmu-private data structures for an AUX area
*/
- void (*flush_branch_stack) (void);
+ void *(*setup_aux) (int cpu, void **pages,
+ int nr_pages, bool overwrite);
+ /* optional */
+
+ /*
+ * Free pmu-private AUX data structures
+ */
+ void (*free_aux) (void *aux); /* optional */
};
/**
@@ -300,6 +339,7 @@ struct swevent_hlist {
#define PERF_ATTACH_CONTEXT 0x01
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
+#define PERF_ATTACH_TASK_DATA 0x08
struct perf_cgroup;
struct ring_buffer;
@@ -438,6 +478,7 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
+ u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
@@ -504,7 +545,7 @@ struct perf_event_context {
u64 generation;
int pin_count;
int nr_cgroups; /* cgroup evts */
- int nr_branch_stack; /* branch_stack evt */
+ void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
struct delayed_work orphans_remove;
@@ -536,12 +577,52 @@ struct perf_output_handle {
struct ring_buffer *rb;
unsigned long wakeup;
unsigned long size;
- void *addr;
+ union {
+ void *addr;
+ unsigned long head;
+ };
int page;
};
+#ifdef CONFIG_CGROUP_PERF
+
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+ u64 time;
+ u64 timestamp;
+};
+
+struct perf_cgroup {
+ struct cgroup_subsys_state css;
+ struct perf_cgroup_info __percpu *info;
+};
+
+/*
+ * Must ensure cgroup is pinned (css_get) before calling
+ * this function. In other words, we cannot call this function
+ * if there is no cgroup event for the current CPU context.
+ */
+static inline struct perf_cgroup *
+perf_cgroup_from_task(struct task_struct *task)
+{
+ return container_of(task_css(task, perf_event_cgrp_id),
+ struct perf_cgroup, css);
+}
+#endif /* CONFIG_CGROUP_PERF */
+
#ifdef CONFIG_PERF_EVENTS
+extern void *perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event);
+extern void perf_aux_output_end(struct perf_output_handle *handle,
+ unsigned long size, bool truncated);
+extern int perf_aux_output_skip(struct perf_output_handle *handle,
+ unsigned long size);
+extern void *perf_get_aux(struct perf_output_handle *handle);
+
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
@@ -558,6 +639,8 @@ extern void perf_event_delayed_put(struct task_struct *task);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
+extern void perf_sched_cb_dec(struct pmu *pmu);
+extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern int perf_event_refresh(struct perf_event *event, int refresh);
@@ -731,6 +814,11 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
__perf_event_task_sched_out(prev, next);
}
+static inline u64 __perf_event_count(struct perf_event *event)
+{
+ return local64_read(&event->count) + atomic64_read(&event->child_count);
+}
+
extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -800,6 +888,16 @@ static inline bool has_branch_stack(struct perf_event *event)
return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
}
+static inline bool needs_branch_stack(struct perf_event *event)
+{
+ return event->attr.branch_sample_type != 0;
+}
+
+static inline bool has_aux(struct perf_event *event)
+{
+ return event->pmu->setup_aux;
+}
+
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size);
extern void perf_output_end(struct perf_output_handle *handle);
@@ -815,6 +913,17 @@ extern void perf_event_disable(struct perf_event *event);
extern int __perf_event_disable(void *info);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
+static inline void *
+perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event) { return NULL; }
+static inline void
+perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ bool truncated) { }
+static inline int
+perf_aux_output_skip(struct perf_output_handle *handle,
+ unsigned long size) { return -EINVAL; }
+static inline void *
+perf_get_aux(struct perf_output_handle *handle) { return NULL; }
static inline void
perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) { }
diff --git a/include/linux/personality.h b/include/linux/personality.h
index 646c0a7d50fa..aeb7892b2468 100644
--- a/include/linux/personality.h
+++ b/include/linux/personality.h
@@ -3,52 +3,14 @@
#include <uapi/linux/personality.h>
-
-/*
- * Handling of different ABIs (personalities).
- */
-
-struct exec_domain;
-struct pt_regs;
-
-extern int register_exec_domain(struct exec_domain *);
-extern int unregister_exec_domain(struct exec_domain *);
-extern int __set_personality(unsigned int);
-
-
-/*
- * Description of an execution domain.
- *
- * The first two members are refernced from assembly source
- * and should stay where they are unless explicitly needed.
- */
-typedef void (*handler_t)(int, struct pt_regs *);
-
-struct exec_domain {
- const char *name; /* name of the execdomain */
- handler_t handler; /* handler for syscalls */
- unsigned char pers_low; /* lowest personality */
- unsigned char pers_high; /* highest personality */
- unsigned long *signal_map; /* signal mapping */
- unsigned long *signal_invmap; /* reverse signal mapping */
- struct map_segment *err_map; /* error mapping */
- struct map_segment *socktype_map; /* socket type mapping */
- struct map_segment *sockopt_map; /* socket option mapping */
- struct map_segment *af_map; /* address family mapping */
- struct module *module; /* module context of the ed. */
- struct exec_domain *next; /* linked list (internal) */
-};
-
/*
* Return the base personality without flags.
*/
#define personality(pers) (pers & PER_MASK)
-
/*
* Change personality of the currently running process.
*/
-#define set_personality(pers) \
- ((current->personality == (pers)) ? 0 : __set_personality(pers))
+#define set_personality(pers) (current->personality = (pers))
#endif /* _LINUX_PERSONALITY_H */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 7e75bfe37cc7..fe5732d53eda 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
+extern int fixed_phy_update_state(struct phy_device *phydev,
+ const struct fixed_phy_status *status,
+ const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status)
@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
{
return -ENODEV;
}
+static inline int fixed_phy_update_state(struct phy_device *phydev,
+ const struct fixed_phy_status *status,
+ const struct fixed_phy_status *changed)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
new file mode 100644
index 000000000000..8a1f6a4920b2
--- /dev/null
+++ b/include/linux/platform_data/dma-hsu.h
@@ -0,0 +1,25 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PLATFORM_DATA_DMA_HSU_H
+#define _PLATFORM_DATA_DMA_HSU_H
+
+#include <linux/device.h>
+
+struct hsu_dma_slave {
+ struct device *dma_dev;
+ int chan_id;
+};
+
+struct hsu_dma_platform_data {
+ unsigned short nr_channels;
+};
+
+#endif /* _PLATFORM_DATA_DMA_HSU_H */
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
index eabac4e2fc99..2d08816720f6 100644
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ b/include/linux/platform_data/dma-imx-sdma.h
@@ -48,6 +48,9 @@ struct sdma_script_start_addrs {
s32 ssish_2_mcu_addr;
s32 hdmi_dma_addr;
/* End of v2 array */
+ s32 zcanfd_2_mcu_addr;
+ s32 zqspi_2_mcu_addr;
+ /* End of v3 array */
};
/**
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h
index 2312d197dfb7..89fd34727a24 100644
--- a/include/linux/platform_data/i2c-davinci.h
+++ b/include/linux/platform_data/i2c-davinci.h
@@ -18,6 +18,7 @@ struct davinci_i2c_platform_data {
unsigned int bus_delay; /* post-transaction delay (usec) */
unsigned int sda_pin; /* GPIO pin ID to use for SDA */
unsigned int scl_pin; /* GPIO pin ID to use for SCL */
+ bool has_pfunc; /*chip has a ICPFUNC register */
};
/* for board setup code */
diff --git a/include/linux/platform_data/mmc-msm_sdcc.h b/include/linux/platform_data/mmc-msm_sdcc.h
deleted file mode 100644
index 55aa873c9396..000000000000
--- a/include/linux/platform_data/mmc-msm_sdcc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __MMC_MSM_SDCC_H
-#define __MMC_MSM_SDCC_H
-
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-
-struct msm_mmc_gpio {
- unsigned no;
- const char *name;
-};
-
-struct msm_mmc_gpio_data {
- struct msm_mmc_gpio *gpio;
- u8 size;
-};
-
-struct msm_mmc_platform_data {
- unsigned int ocr_mask; /* available voltages */
- u32 (*translate_vdd)(struct device *, unsigned int);
- unsigned int (*status)(struct device *);
- int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
- struct msm_mmc_gpio_data *gpio_data;
- void (*init_card)(struct mmc_card *card);
-};
-
-#endif
diff --git a/include/linux/platform_data/msm_serial_hs.h b/include/linux/platform_data/msm_serial_hs.h
deleted file mode 100644
index 98a2046f8b31..000000000000
--- a/include/linux/platform_data/msm_serial_hs.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2008 Google, Inc.
- * Author: Nick Pelly <npelly@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MSM_SERIAL_HS_H
-#define __ASM_ARCH_MSM_SERIAL_HS_H
-
-#include <linux/serial_core.h>
-
-/* API to request the uart clock off or on for low power management
- * Clients should call request_clock_off() when no uart data is expected,
- * and must call request_clock_on() before any further uart data can be
- * received. */
-extern void msm_hs_request_clock_off(struct uart_port *uport);
-extern void msm_hs_request_clock_on(struct uart_port *uport);
-
-/**
- * struct msm_serial_hs_platform_data
- * @rx_wakeup_irq: Rx activity irq
- * @rx_to_inject: extra character to be inserted to Rx tty on wakeup
- * @inject_rx: 1 = insert rx_to_inject. 0 = do not insert extra character
- * @exit_lpm_cb: function called before every Tx transaction
- *
- * This is an optional structure required for UART Rx GPIO IRQ based
- * wakeup from low power state. UART wakeup can be triggered by RX activity
- * (using a wakeup GPIO on the UART RX pin). This should only be used if
- * there is not a wakeup GPIO on the UART CTS, and the first RX byte is
- * known (eg., with the Bluetooth Texas Instruments HCILL protocol),
- * since the first RX byte will always be lost. RTS will be asserted even
- * while the UART is clocked off in this mode of operation.
- */
-struct msm_serial_hs_platform_data {
- int rx_wakeup_irq;
- unsigned char inject_rx_on_wakeup;
- char rx_to_inject;
- void (*exit_lpm_cb)(struct uart_port *);
-};
-
-#endif
diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h
new file mode 100644
index 000000000000..d6ed28679bb2
--- /dev/null
+++ b/include/linux/platform_data/nxp-nci.h
@@ -0,0 +1,27 @@
+/*
+ * Generic platform data for the NXP NCI NFC chips.
+ *
+ * Copyright (C) 2014 NXP Semiconductors All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NXP_NCI_H_
+#define _NXP_NCI_H_
+
+struct nxp_nci_nfc_platform_data {
+ unsigned int gpio_en;
+ unsigned int gpio_fw;
+ unsigned int irq;
+};
+
+#endif /* _NXP_NCI_H_ */
diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h
index 3cc2e3c40914..a938eba2f18e 100644
--- a/include/linux/platform_data/serial-imx.h
+++ b/include/linux/platform_data/serial-imx.h
@@ -20,14 +20,9 @@
#define ASMARM_ARCH_UART_H
#define IMXUART_HAVE_RTSCTS (1<<0)
-#define IMXUART_IRDA (1<<1)
struct imxuart_platform_data {
unsigned int flags;
- void (*irda_enable)(int enable);
- unsigned int irda_inv_rx:1;
- unsigned int irda_inv_tx:1;
- unsigned short transceiver_delay;
};
#endif
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index a947ab8b441a..533d9807e543 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -5,8 +5,6 @@
#ifndef __LINUX_PLATFORM_DATA_SI5351_H__
#define __LINUX_PLATFORM_DATA_SI5351_H__
-struct clk;
-
/**
* enum si5351_pll_src - Si5351 pll clock source
* @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config
@@ -107,8 +105,6 @@ struct si5351_clkout_config {
* @clkout: array of clkout configuration
*/
struct si5351_platform_data {
- struct clk *clk_xtal;
- struct clk *clk_clkin;
enum si5351_pll_src pll_src[2];
struct si5351_clkout_config clkout[8];
};
diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h
new file mode 100644
index 000000000000..1231e9bb00f1
--- /dev/null
+++ b/include/linux/platform_data/sky81452-backlight.h
@@ -0,0 +1,46 @@
+/*
+ * sky81452.h SKY81452 backlight driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SKY81452_BACKLIGHT_H
+#define _SKY81452_BACKLIGHT_H
+
+/**
+ * struct sky81452_platform_data
+ * @name: backlight driver name.
+ If it is not defined, default name is lcd-backlight.
+ * @gpio_enable:GPIO number which control EN pin
+ * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
+ * @ignore_pwm: true if DPWMI should be ignored.
+ * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
+ * @phase_shift:true is phase shift mode.
+ * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V.
+ * @boost_current_limit: It should be one of 2300, 2750mA.
+ */
+struct sky81452_bl_platform_data {
+ const char *name;
+ int gpio_enable;
+ unsigned int enable;
+ bool ignore_pwm;
+ bool dpwm_mode;
+ bool phase_shift;
+ unsigned int short_detection_threshold;
+ unsigned int boost_current_limit;
+};
+
+#endif
diff --git a/include/linux/platform_data/tpm_stm_st33.h b/include/linux/platform_data/st33zp24.h
index ff75310c0f47..817dfdb37885 100644
--- a/include/linux/platform_data/tpm_stm_st33.h
+++ b/include/linux/platform_data/st33zp24.h
@@ -1,6 +1,6 @@
/*
- * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
- * Copyright (C) 2009, 2010 STMicroelectronics
+ * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24
+ * Copyright (C) 2009 - 2015 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,20 +14,9 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * STMicroelectronics version 1.2.0, Copyright (C) 2010
- * STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
- * This is free software, and you are welcome to redistribute it
- * under certain conditions.
- *
- * @Author: Christophe RICARD tpmsupport@st.com
- *
- * @File: stm_st33_tpm.h
- *
- * @Date: 09/15/2010
*/
-#ifndef __STM_ST33_TPM_H__
-#define __STM_ST33_TPM_H__
+#ifndef __ST33ZP24_H__
+#define __ST33ZP24_H__
#define TPM_ST33_I2C "st33zp24-i2c"
#define TPM_ST33_SPI "st33zp24-spi"
@@ -36,4 +25,4 @@ struct st33zp24_platform_data {
int io_lpcpd;
};
-#endif /* __STM_ST33_TPM_H__ */
+#endif /* __ST33ZP24_H__ */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index ae4882ca4a64..58f1e75ba105 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -59,7 +59,7 @@ extern int platform_add_devices(struct platform_device **, int);
struct platform_device_info {
struct device *parent;
- struct acpi_dev_node acpi_node;
+ struct fwnode_handle *fwnode;
const char *name;
int id;
diff --git a/include/linux/resume-trace.h b/include/linux/pm-trace.h
index f31db2368782..ecbde7a5548e 100644
--- a/include/linux/resume-trace.h
+++ b/include/linux/pm-trace.h
@@ -1,8 +1,8 @@
-#ifndef RESUME_TRACE_H
-#define RESUME_TRACE_H
+#ifndef PM_TRACE_H
+#define PM_TRACE_H
#ifdef CONFIG_PM_TRACE
-#include <asm/resume-trace.h>
+#include <asm/pm-trace.h>
#include <linux/types.h>
extern int pm_trace_enabled;
@@ -14,7 +14,7 @@ static inline int pm_trace_is_enabled(void)
struct device;
extern void set_trace_device(struct device *);
-extern void generate_resume_trace(const void *tracedata, unsigned int user);
+extern void generate_pm_trace(const void *tracedata, unsigned int user);
extern int show_trace_dev_match(char *buf, size_t size);
#define TRACE_DEVICE(dev) do { \
@@ -28,6 +28,7 @@ static inline int pm_trace_is_enabled(void) { return 0; }
#define TRACE_DEVICE(dev) do { } while (0)
#define TRACE_RESUME(dev) do { } while (0)
+#define TRACE_SUSPEND(dev) do { } while (0)
#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e2f1be6dd9dd..2d29c64f8fb1 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -603,10 +603,18 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions along with
* subsystem-level and driver-level callbacks.
+ *
+ * @detach: Called when removing a device from the domain.
+ * @activate: Called before executing probe routines for bus types and drivers.
+ * @sync: Called after successful driver probe.
+ * @dismiss: Called after unsuccessful driver probe and after driver removal.
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
void (*detach)(struct device *dev, bool power_off);
+ int (*activate)(struct device *dev);
+ void (*sync)(struct device *dev);
+ void (*dismiss)(struct device *dev);
};
/*
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 080e778118ba..681ccb053f72 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -127,7 +127,7 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
return to_gpd_data(dev->power.subsys_data->domain_data);
}
-extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
+extern struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev);
extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td);
@@ -163,9 +163,9 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
+static inline struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 6512e9cbc6d5..5df733b8f704 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -510,4 +510,16 @@ static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
#endif /* CONFIG_PNP */
+/**
+ * module_pnp_driver() - Helper macro for registering a PnP driver
+ * @__pnp_driver: pnp_driver struct
+ *
+ * Helper macro for PnP drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_pnp_driver(__pnp_driver) \
+ module_driver(__pnp_driver, pnp_register_driver, \
+ pnp_unregister_driver)
+
#endif /* _LINUX_PNP_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index baa3f97d8ce8..9b30871c9149 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -255,6 +255,11 @@ extern asmlinkage void dump_stack(void) __cold;
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/*
+ * Like KERN_CONT, pr_cont() should only be used when continuing
+ * a line with no newline ('\n') enclosed. Otherwise it defaults
+ * back to KERN_DEFAULT.
+ */
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
diff --git a/include/linux/property.h b/include/linux/property.h
index a6a3d98bd7e9..de8bdf417a35 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -13,6 +13,7 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/fwnode.h>
#include <linux/types.h>
struct device;
@@ -40,16 +41,6 @@ int device_property_read_string_array(struct device *dev, const char *propname,
int device_property_read_string(struct device *dev, const char *propname,
const char **val);
-enum fwnode_type {
- FWNODE_INVALID = 0,
- FWNODE_OF,
- FWNODE_ACPI,
-};
-
-struct fwnode_handle {
- enum fwnode_type type;
-};
-
bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
const char *propname, u8 *val,
@@ -140,4 +131,37 @@ static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
return fwnode_property_read_u64_array(fwnode, propname, val, 1);
}
+/**
+ * struct property_entry - "Built-in" device property representation.
+ * @name: Name of the property.
+ * @type: Type of the property.
+ * @nval: Number of items of type @type making up the value.
+ * @value: Value of the property (an array of @nval items of type @type).
+ */
+struct property_entry {
+ const char *name;
+ enum dev_prop_type type;
+ size_t nval;
+ union {
+ void *raw_data;
+ u8 *u8_data;
+ u16 *u16_data;
+ u32 *u32_data;
+ u64 *u64_data;
+ const char **str;
+ } value;
+};
+
+/**
+ * struct property_set - Collection of "built-in" device properties.
+ * @fwnode: Handle to be pointed to by the fwnode field of struct device.
+ * @properties: Array of properties terminated with a null entry.
+ */
+struct property_set {
+ struct fwnode_handle fwnode;
+ struct property_entry *properties;
+};
+
+void device_add_property_set(struct device *dev, struct property_set *pset);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 8884f6e507f7..8e7a25b068b0 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -40,6 +40,7 @@ enum pstore_type_id {
PSTORE_TYPE_PPC_OF = 5,
PSTORE_TYPE_PPC_COMMON = 6,
PSTORE_TYPE_PMSG = 7,
+ PSTORE_TYPE_PPC_OPAL = 8,
PSTORE_TYPE_UNKNOWN = 255
};
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 0d8ff3fb84ba..b8b73066d137 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -64,11 +64,11 @@ struct ptp_clock_request {
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.
*
- * @gettime: Reads the current time from the hardware clock.
- * parameter ts: Holds the result.
+ * @gettime64: Reads the current time from the hardware clock.
+ * parameter ts: Holds the result.
*
- * @settime: Set the current time on the hardware clock.
- * parameter ts: Time value to set.
+ * @settime64: Set the current time on the hardware clock.
+ * parameter ts: Time value to set.
*
* @enable: Request driver to enable or disable an ancillary feature.
* parameter request: Desired resource to enable or disable.
@@ -104,8 +104,8 @@ struct ptp_clock_info {
struct ptp_pin_desc *pin_config;
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
- int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts);
- int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts);
+ int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
new file mode 100644
index 000000000000..d7a974d5f57c
--- /dev/null
+++ b/include/linux/qcom_scm.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_SCM_H
+#define __QCOM_SCM_H
+
+extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
+extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
+
+#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
+#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
+
+extern void qcom_scm_cpu_power_down(u32 flags);
+
+#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+
+extern u32 qcom_scm_get_version(void);
+
+#endif
diff --git a/include/linux/quota.h b/include/linux/quota.h
index d534e8ed308a..b2505acfd3c0 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -50,6 +50,7 @@
#undef USRQUOTA
#undef GRPQUOTA
+#undef PRJQUOTA
enum quota_type {
USRQUOTA = 0, /* element used for user quotas */
GRPQUOTA = 1, /* element used for group quotas */
@@ -319,6 +320,7 @@ struct dquot_operations {
/* get reserved quota for delayed alloc, value returned is managed by
* quota code only */
qsize_t *(*get_reserved_space) (struct inode *);
+ int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
};
struct path;
@@ -344,7 +346,10 @@ struct qc_dqblk {
int d_rt_spc_warns; /* # warnings issued wrt RT space */
};
-/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
+/*
+ * Field specifiers for ->set_dqblk() in struct qc_dqblk and also for
+ * ->set_info() in struct qc_info
+ */
#define QC_INO_SOFT (1<<0)
#define QC_INO_HARD (1<<1)
#define QC_SPC_SOFT (1<<2)
@@ -365,6 +370,51 @@ struct qc_dqblk {
#define QC_INO_COUNT (1<<13)
#define QC_RT_SPACE (1<<14)
#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+#define QC_FLAGS (1<<15)
+
+#define QCI_SYSFILE (1 << 0) /* Quota file is hidden from userspace */
+#define QCI_ROOT_SQUASH (1 << 1) /* Root squash turned on */
+#define QCI_ACCT_ENABLED (1 << 2) /* Quota accounting enabled */
+#define QCI_LIMITS_ENFORCED (1 << 3) /* Quota limits enforced */
+
+/* Structures for communicating via ->get_state */
+struct qc_type_state {
+ unsigned int flags; /* Flags QCI_* */
+ unsigned int spc_timelimit; /* Time after which space softlimit is
+ * enforced */
+ unsigned int ino_timelimit; /* Ditto for inode softlimit */
+ unsigned int rt_spc_timelimit; /* Ditto for real-time space */
+ unsigned int spc_warnlimit; /* Limit for number of space warnings */
+ unsigned int ino_warnlimit; /* Ditto for inodes */
+ unsigned int rt_spc_warnlimit; /* Ditto for real-time space */
+ unsigned long long ino; /* Inode number of quota file */
+ blkcnt_t blocks; /* Number of 512-byte blocks in the file */
+ blkcnt_t nextents; /* Number of extents in the file */
+};
+
+struct qc_state {
+ unsigned int s_incoredqs; /* Number of dquots in core */
+ /*
+ * Per quota type information. The array should really have
+ * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
+ * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
+ * supports project quotas, this can be changed to MAXQUOTAS
+ */
+ struct qc_type_state s_state[XQM_MAXQUOTAS];
+};
+
+/* Structure for communicating via ->set_info */
+struct qc_info {
+ int i_fieldmask; /* mask of fields to change in ->set_info() */
+ unsigned int i_flags; /* Flags QCI_* */
+ unsigned int i_spc_timelimit; /* Time after which space softlimit is
+ * enforced */
+ unsigned int i_ino_timelimit; /* Ditto for inode softlimit */
+ unsigned int i_rt_spc_timelimit;/* Ditto for real-time space */
+ unsigned int i_spc_warnlimit; /* Limit for number of space warnings */
+ unsigned int i_ino_warnlimit; /* Limit for number of inode warnings */
+ unsigned int i_rt_spc_warnlimit; /* Ditto for real-time space */
+};
/* Operations handling requests from userspace */
struct quotactl_ops {
@@ -373,12 +423,10 @@ struct quotactl_ops {
int (*quota_enable)(struct super_block *, unsigned int);
int (*quota_disable)(struct super_block *, unsigned int);
int (*quota_sync)(struct super_block *, int);
- int (*get_info)(struct super_block *, int, struct if_dqinfo *);
- int (*set_info)(struct super_block *, int, struct if_dqinfo *);
+ int (*set_info)(struct super_block *, int, struct qc_info *);
int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
- int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
- int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
+ int (*get_state)(struct super_block *, struct qc_state *);
int (*rm_xquota)(struct super_block *, unsigned int);
};
@@ -389,7 +437,19 @@ struct quota_format_type {
struct quota_format_type *qf_next;
};
-/* Quota state flags - they actually come in two flavors - for users and groups */
+/**
+ * Quota state flags - they actually come in two flavors - for users and groups.
+ *
+ * Actual typed flags layout:
+ * USRQUOTA GRPQUOTA
+ * DQUOT_USAGE_ENABLED 0x0001 0x0002
+ * DQUOT_LIMITS_ENABLED 0x0004 0x0008
+ * DQUOT_SUSPENDED 0x0010 0x0020
+ *
+ * Following bits are used for non-typed flags:
+ * DQUOT_QUOTA_SYS_FILE 0x0040
+ * DQUOT_NEGATIVE_USAGE 0x0080
+ */
enum {
_DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */
_DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */
@@ -398,9 +458,9 @@ enum {
* memory to turn them on */
_DQUOT_STATE_FLAGS
};
-#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED)
-#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED)
-#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED)
+#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED * MAXQUOTAS)
+#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED * MAXQUOTAS)
+#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED * MAXQUOTAS)
#define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \
DQUOT_SUSPENDED)
/* Other quota flags */
@@ -414,15 +474,21 @@ enum {
*/
#define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1))
/* Allow negative quota usage */
-
static inline unsigned int dquot_state_flag(unsigned int flags, int type)
{
- return flags << _DQUOT_STATE_FLAGS * type;
+ return flags << type;
}
static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
{
- return (flags >> _DQUOT_STATE_FLAGS * type) & DQUOT_STATE_FLAGS;
+ return (flags >> type) & DQUOT_STATE_FLAGS;
+}
+
+/* Bitmap of quota types where flag is set in flags */
+static __always_inline unsigned dquot_state_types(unsigned flags, unsigned flag)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(flag);
+ return (flags / flag) & ((1 << MAXQUOTAS) - 1);
}
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index df73258cca47..77ca6601ff25 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -95,8 +95,8 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
int dquot_quota_off(struct super_block *sb, int type);
int dquot_writeback_dquots(struct super_block *sb, int type);
int dquot_quota_sync(struct super_block *sb, int type);
-int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
-int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
+int dquot_get_state(struct super_block *sb, struct qc_state *state);
+int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii);
int dquot_get_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
int dquot_set_dqblk(struct super_block *sb, struct kqid id,
@@ -134,10 +134,7 @@ static inline bool sb_has_quota_suspended(struct super_block *sb, int type)
static inline unsigned sb_any_quota_suspended(struct super_block *sb)
{
- unsigned type, tmsk = 0;
- for (type = 0; type < MAXQUOTAS; type++)
- tmsk |= sb_has_quota_suspended(sb, type) << type;
- return tmsk;
+ return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_SUSPENDED);
}
/* Does kernel know about any quota information for given sb + type? */
@@ -149,10 +146,7 @@ static inline bool sb_has_quota_loaded(struct super_block *sb, int type)
static inline unsigned sb_any_quota_loaded(struct super_block *sb)
{
- unsigned type, tmsk = 0;
- for (type = 0; type < MAXQUOTAS; type++)
- tmsk |= sb_has_quota_loaded(sb, type) << type;
- return tmsk;
+ return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_USAGE_ENABLED);
}
static inline bool sb_has_quota_active(struct super_block *sb, int type)
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 73069cb6c54a..a7a06d1dcf9c 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -72,6 +72,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
/* Routine choices */
struct raid6_calls {
void (*gen_syndrome)(int, size_t, void **);
+ void (*xor_syndrome)(int, int, int, size_t, void **);
int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */
int prefer; /* Has special performance attribute */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 78097491cd99..573a5afd5ed8 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -48,6 +48,26 @@
extern int rcu_expedited; /* for sysctl */
+#ifdef CONFIG_TINY_RCU
+/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
+static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
+{
+ return false;
+}
+
+static inline void rcu_expedite_gp(void)
+{
+}
+
+static inline void rcu_unexpedite_gp(void)
+{
+}
+#else /* #ifdef CONFIG_TINY_RCU */
+bool rcu_gp_is_expedited(void); /* Internal RCU use. */
+void rcu_expedite_gp(void);
+void rcu_unexpedite_gp(void);
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
enum rcutorture_type {
RCU_FLAVOR,
RCU_BH_FLAVOR,
@@ -195,6 +215,15 @@ void call_rcu_sched(struct rcu_head *head,
void synchronize_sched(void);
+/*
+ * Structure allowing asynchronous waiting on RCU.
+ */
+struct rcu_synchronize {
+ struct rcu_head head;
+ struct completion completion;
+};
+void wakeme_after_rcu(struct rcu_head *head);
+
/**
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
* @head: structure to be used for queueing the RCU updates.
@@ -258,6 +287,7 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
+void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
@@ -266,6 +296,8 @@ void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
+int rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu);
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
@@ -720,7 +752,7 @@ static inline void rcu_preempt_sleep_check(void)
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
- __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
+ __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
@@ -730,7 +762,7 @@ static inline void rcu_preempt_sleep_check(void)
* This is the RCU-bh counterpart to rcu_dereference_check().
*/
#define rcu_dereference_bh_check(p, c) \
- __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
+ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
@@ -740,7 +772,7 @@ static inline void rcu_preempt_sleep_check(void)
* This is the RCU-sched counterpart to rcu_dereference_check().
*/
#define rcu_dereference_sched_check(p, c) \
- __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
+ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
__rcu)
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
@@ -933,9 +965,9 @@ static inline void rcu_read_unlock(void)
{
rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
- rcu_lock_release(&rcu_lock_map);
__release(RCU);
__rcu_read_unlock();
+ rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
/**
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 67fc8fcdc4b0..a7ff409f386d 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -70,7 +70,8 @@ void ctrl_alt_del(void);
#define POWEROFF_CMD_PATH_LEN 256
extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
-extern int orderly_poweroff(bool force);
+extern void orderly_poweroff(bool force);
+extern void orderly_reboot(void);
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9e7e745dac55..78b8a9b9d40a 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -404,6 +404,7 @@ enum rproc_crash_type {
* @table_ptr: pointer to the resource table in effect
* @cached_table: copy of the resource table
* @table_csum: checksum of the resource table
+ * @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
struct klist_node node;
@@ -435,6 +436,7 @@ struct rproc {
struct resource_table *table_ptr;
struct resource_table *cached_table;
u32 table_csum;
+ bool has_iommu;
};
/* we currently support only two vrings per rvdev */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index d438eeb08bff..dbcbcc59aa92 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,14 +1,13 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
- * Based on the following paper by Josh Triplett, Paul E. McKenney
- * and Jonathan Walpole:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
* Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,9 +18,12 @@
#define _LINUX_RHASHTABLE_H
#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/jhash.h>
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
/*
* The end of the chain is marked with a special nulls marks which has
@@ -42,6 +44,9 @@
#define RHT_HASH_BITS 27
#define RHT_BASE_SHIFT RHT_HASH_BITS
+/* Base bits plus 1 bit for nulls marker */
+#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
+
struct rhash_head {
struct rhash_head __rcu *next;
};
@@ -49,20 +54,43 @@ struct rhash_head {
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
+ * @rehash: Current bucket being rehashed
+ * @hash_rnd: Random seed to fold into hash
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
+ * @walkers: List of active walkers
+ * @rcu: RCU structure for freeing the table
+ * @future_tbl: Table under construction during rehashing
* @buckets: size * hash buckets
*/
struct bucket_table {
- size_t size;
+ unsigned int size;
+ unsigned int rehash;
+ u32 hash_rnd;
unsigned int locks_mask;
spinlock_t *locks;
+ struct list_head walkers;
+ struct rcu_head rcu;
+
+ struct bucket_table __rcu *future_tbl;
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+ const void *obj);
struct rhashtable;
@@ -72,60 +100,62 @@ struct rhashtable;
* @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed
- * @hash_rnd: Seed to use while hashing
- * @max_shift: Maximum number of shifts while expanding
- * @min_shift: Minimum number of shifts while shrinking
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
* @nulls_base: Base value to generate nulls marker
+ * @insecure_elasticity: Set to true to disable chain length checks
+ * @automatic_shrinking: Enable automatic shrinking of tables
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
- * @hashfn: Function to hash key
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
*/
struct rhashtable_params {
size_t nelem_hint;
size_t key_len;
size_t key_offset;
size_t head_offset;
- u32 hash_rnd;
- size_t max_shift;
- size_t min_shift;
+ unsigned int max_size;
+ unsigned int min_size;
u32 nulls_base;
+ bool insecure_elasticity;
+ bool automatic_shrinking;
size_t locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
+ rht_obj_cmpfn_t obj_cmpfn;
};
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
- * @future_tbl: Table under construction during expansion/shrinking
* @nelems: Number of elements in table
- * @shift: Current size (1 << shift)
+ * @key_len: Key length for hashfn
+ * @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
- * @walkers: List of active walkers
- * @being_destroyed: True if table is set up for destruction
+ * @lock: Spin lock to protect walker list
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- struct bucket_table __rcu *future_tbl;
atomic_t nelems;
- atomic_t shift;
+ unsigned int key_len;
+ unsigned int elasticity;
struct rhashtable_params p;
struct work_struct run_work;
struct mutex mutex;
- struct list_head walkers;
- bool being_destroyed;
+ spinlock_t lock;
};
/**
* struct rhashtable_walker - Hash table walker
* @list: List entry on list of walkers
- * @resize: Resize event occured
+ * @tbl: The table that we were walking over
*/
struct rhashtable_walker {
struct list_head list;
- bool resize;
+ struct bucket_table *tbl;
};
/**
@@ -162,6 +192,119 @@ static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
return ((unsigned long) ptr) >> 1;
}
+static inline void *rht_obj(const struct rhashtable *ht,
+ const struct rhash_head *he)
+{
+ return (char *)he - ht->p.head_offset;
+}
+
+static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+}
+
+static inline unsigned int rht_key_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const void *key, const struct rhashtable_params params)
+{
+ unsigned int hash;
+
+ /* params must be equal to ht->p if it isn't constant. */
+ if (!__builtin_constant_p(params.key_len))
+ hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+ else if (params.key_len) {
+ unsigned int key_len = params.key_len;
+
+ if (params.hashfn)
+ hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ else if (key_len & (sizeof(u32) - 1))
+ hash = jhash(key, key_len, tbl->hash_rnd);
+ else
+ hash = jhash2(key, key_len / sizeof(u32),
+ tbl->hash_rnd);
+ } else {
+ unsigned int key_len = ht->p.key_len;
+
+ if (params.hashfn)
+ hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ else
+ hash = jhash(key, key_len, tbl->hash_rnd);
+ }
+
+ return rht_bucket_index(tbl, hash);
+}
+
+static inline unsigned int rht_head_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const struct rhash_head *he, const struct rhashtable_params params)
+{
+ const char *ptr = rht_obj(ht, he);
+
+ return likely(params.obj_hashfn) ?
+ rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
+ ht->p.key_len,
+ tbl->hash_rnd)) :
+ rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_75(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Expand table when exceeding 75% load */
+ return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_shrink_below_30(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Shrink table beneath 30% load */
+ return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
+ tbl->size > ht->p.min_size;
+}
+
+/**
+ * rht_grow_above_100 - returns true if nelems > table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_100(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ return atomic_read(&ht->nelems) > tbl->size &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/* The bucket lock is selected based on the hash and protects mutations
+ * on a group of hash buckets.
+ *
+ * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
+ * a single lock always covers both buckets which may both contains
+ * entries which link to the same bucket of the old table during resizing.
+ * This allows to simplify the locking as locking the bucket in both
+ * tables during resize always guarantee protection.
+ *
+ * IMPORTANT: When holding the bucket lock of both the old and new table
+ * during expansions and shrinking, the old bucket lock must always be
+ * acquired first.
+ */
+static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return &tbl->locks[hash & tbl->locks_mask];
+}
+
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(struct rhashtable *ht);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
@@ -178,23 +321,13 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
}
#endif /* CONFIG_PROVE_LOCKING */
-int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
-
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
-
-int rhashtable_expand(struct rhashtable *ht);
-int rhashtable_shrink(struct rhashtable *ht);
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params);
-void *rhashtable_lookup(struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
- bool (*compare)(void *, void *), void *arg);
-
-bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
-bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
- struct rhash_head *obj,
- bool (*compare)(void *, void *),
- void *arg);
+int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *old_tbl);
+int rhashtable_insert_rehash(struct rhashtable *ht);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
@@ -202,6 +335,9 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+void rhashtable_free_and_destroy(struct rhashtable *ht,
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg);
void rhashtable_destroy(struct rhashtable *ht);
#define rht_dereference(p, ht) \
@@ -352,4 +488,316 @@ void rhashtable_destroy(struct rhashtable *ht);
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)
+static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ struct rhashtable *ht = arg->ht;
+ const char *ptr = obj;
+
+ return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, inlined version
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ const struct bucket_table *tbl;
+ struct rhash_head *he;
+ unsigned int hash;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+restart:
+ hash = rht_key_hashfn(ht, tbl, key, params);
+ rht_for_each_rcu(he, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ rcu_read_unlock();
+ return rht_obj(ht, he);
+ }
+
+ /* Ensure we see any new tables. */
+ smp_rmb();
+
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(tbl))
+ goto restart;
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/* Internal function, please use rhashtable_insert_fast() instead */
+static inline int __rhashtable_insert_fast(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ struct bucket_table *tbl, *new_tbl;
+ struct rhash_head *head;
+ spinlock_t *lock;
+ unsigned int elasticity;
+ unsigned int hash;
+ int err;
+
+restart:
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* All insertions must grab the oldest table containing
+ * the hashed bucket that is yet to be rehashed.
+ */
+ for (;;) {
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
+
+ if (tbl->rehash <= hash)
+ break;
+
+ spin_unlock_bh(lock);
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ }
+
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(new_tbl)) {
+ err = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ if (err == -EAGAIN)
+ goto slow_path;
+ goto out;
+ }
+
+ if (unlikely(rht_grow_above_100(ht, tbl))) {
+slow_path:
+ spin_unlock_bh(lock);
+ err = rhashtable_insert_rehash(ht);
+ rcu_read_unlock();
+ if (err)
+ return err;
+
+ goto restart;
+ }
+
+ err = -EEXIST;
+ elasticity = ht->elasticity;
+ rht_for_each(head, tbl, hash) {
+ if (key &&
+ unlikely(!(params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head)))))
+ goto out;
+ if (!--elasticity)
+ goto slow_path;
+ }
+
+ err = 0;
+
+ head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+
+ RCU_INIT_POINTER(obj->next, head);
+
+ rcu_assign_pointer(tbl->buckets[hash], obj);
+
+ atomic_inc(&ht->nelems);
+ if (rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
+
+out:
+ spin_unlock_bh(lock);
+ rcu_read_unlock();
+
+ return err;
+}
+
+/**
+ * rhashtable_insert_fast - insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_insert_fast(ht, NULL, obj, params);
+}
+
+/**
+ * rhashtable_lookup_insert_fast - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * parameter set). It will BUG() if used inappropriately.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_lookup_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(ht, obj);
+
+ BUG_ON(ht->p.obj_hashfn);
+
+ return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
+ params);
+}
+
+/**
+ * rhashtable_lookup_insert_key - search and insert object to hash table
+ * with explicit key
+ * @ht: hash table
+ * @key: key
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * Lookups may occur in parallel with hashtable mutations and resizing.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ *
+ * Returns zero on success.
+ */
+static inline int rhashtable_lookup_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ return __rhashtable_insert_fast(ht, key, obj, params);
+}
+
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
+ struct rhashtable *ht, struct bucket_table *tbl,
+ struct rhash_head *obj, const struct rhashtable_params params)
+{
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ spinlock_t * lock;
+ unsigned int hash;
+ int err = -ENOENT;
+
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+
+ spin_lock_bh(lock);
+
+ pprev = &tbl->buckets[hash];
+ rht_for_each(he, tbl, hash) {
+ if (he != obj) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rcu_assign_pointer(*pprev, obj->next);
+ err = 0;
+ break;
+ }
+
+ spin_unlock_bh(lock);
+
+ return err;
+}
+
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct bucket_table *tbl;
+ int err;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* Because we have already taken (and released) the bucket
+ * lock in old_tbl, if we find that future_tbl is not yet
+ * visible then that guarantees the entry to still be in
+ * the old tbl if it exists.
+ */
+ while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
+ ;
+
+ if (err)
+ goto out;
+
+ atomic_dec(&ht->nelems);
+ if (unlikely(ht->p.automatic_shrinking &&
+ rht_shrink_below_30(ht, tbl)))
+ schedule_work(&ht->run_work);
+
+out:
+ rcu_read_unlock();
+
+ return err;
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c4c559a45dc8..c89c53a113a8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -105,14 +105,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
__put_anon_vma(anon_vma);
}
-static inline struct anon_vma *page_anon_vma(struct page *page)
-{
- if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
- PAGE_MAPPING_ANON)
- return NULL;
- return page_rmapping(page);
-}
-
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 5db76a32fcab..7b8e260c4a27 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -77,7 +77,20 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
return rtnl_dereference(dev->ingress_queue);
}
-extern struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
+struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
+
+#ifdef CONFIG_NET_CLS_ACT
+void net_inc_ingress_queue(void);
+void net_dec_ingress_queue(void);
+#else
+static inline void net_inc_ingress_queue(void)
+{
+}
+
+static inline void net_dec_ingress_queue(void)
+{
+}
+#endif
extern void rtnetlink_init(void);
extern void __rtnl_unlock(void);
@@ -109,5 +122,5 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u16 mode,
- u32 flags, u32 mask);
+ u32 flags, u32 mask, int nlflags);
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f74d4cc3a3e5..26a2e6122734 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -125,7 +125,6 @@ struct sched_attr {
u64 sched_period;
};
-struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio_list;
@@ -176,14 +175,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);
-/* Notifier for when a task gets migrated to a new CPU */
-struct task_migration_notifier {
- struct task_struct *task;
- int from_cpu;
- int to_cpu;
-};
-extern void register_task_migration_notifier(struct notifier_block *n);
-
extern unsigned long get_parent_ip(unsigned long addr);
extern void dump_cpu_task(int cpu);
@@ -2311,11 +2302,6 @@ extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
-/*
- * The default (Linux) execution domain.
- */
-extern struct exec_domain default_exec_domain;
-
union thread_union {
struct thread_info thread_info;
unsigned long stack[THREAD_SIZE/sizeof(long)];
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 6341f5be6e24..a30b172df6e1 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -18,7 +18,7 @@ static inline int rt_task(struct task_struct *p)
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
+extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
@@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
return p->normal_prio;
}
-static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
+static inline int rt_mutex_get_effective_prio(struct task_struct *task,
+ int newprio)
{
- return 0;
+ return newprio;
}
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
diff --git a/include/linux/security.h b/include/linux/security.h
index a1b7dbd127ff..18264ea9e314 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1556,7 +1556,7 @@ struct security_operations {
int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
int (*inode_permission) (struct inode *inode, int mask);
int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
- int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
+ int (*inode_getattr) (const struct path *path);
int (*inode_setxattr) (struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
@@ -1716,7 +1716,6 @@ struct security_operations {
int (*tun_dev_attach_queue) (void *security);
int (*tun_dev_attach) (struct sock *sk, void *security);
int (*tun_dev_open) (void *security);
- void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1843,7 +1842,7 @@ int security_inode_readlink(struct dentry *dentry);
int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
int security_inode_permission(struct inode *inode, int mask);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
-int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
+int security_inode_getattr(const struct path *path);
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -2259,8 +2258,7 @@ static inline int security_inode_setattr(struct dentry *dentry,
return 0;
}
-static inline int security_inode_getattr(struct vfsmount *mnt,
- struct dentry *dentry)
+static inline int security_inode_getattr(const struct path *path)
{
return 0;
}
@@ -2735,8 +2733,6 @@ int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
-void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
-
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
struct sock *other,
@@ -2928,11 +2924,6 @@ static inline int security_tun_dev_open(void *security)
{
return 0;
}
-
-static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-}
-
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index a8efa235b7c1..78097e7a330a 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -60,6 +60,20 @@ enum {
};
struct uart_8250_dma;
+struct uart_8250_port;
+
+/**
+ * 8250 core driver operations
+ *
+ * @setup_irq() Setup irq handling. The universal 8250 driver links this
+ * port to the irq chain. Other drivers may @request_irq().
+ * @release_irq() Undo irq handling. The universal 8250 driver unlinks
+ * the port from the irq chain.
+ */
+struct uart_8250_ops {
+ int (*setup_irq)(struct uart_8250_port *);
+ void (*release_irq)(struct uart_8250_port *);
+};
/*
* This should be used by drivers which want to register
@@ -88,6 +102,8 @@ struct uart_8250_port {
unsigned char canary; /* non-zero during system sleep
* if no_console_suspend
*/
+ unsigned char probe;
+#define UART_PROBE_RSA (1 << 0)
/*
* Some bits in registers are cleared on a read, so they must
@@ -100,6 +116,7 @@ struct uart_8250_port {
unsigned char msr_saved_flags;
struct uart_8250_dma *dma;
+ const struct uart_8250_ops *ops;
/* 8250 specific callbacks */
int (*dl_read)(struct uart_8250_port *);
@@ -118,11 +135,8 @@ void serial8250_resume_port(int line);
extern int early_serial_setup(struct uart_port *port);
-extern int serial8250_find_port(struct uart_port *p);
-extern int serial8250_find_port_for_earlycon(void);
extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
extern void serial8250_early_out(struct uart_port *port, int offset, int value);
-extern int setup_early_serial8250_console(char *cmdline);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
extern int serial8250_do_startup(struct uart_port *port);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index d10965f0d8a4..025dad9dcde4 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -235,7 +235,9 @@ struct uart_port {
const struct uart_ops *ops;
unsigned int custom_divisor;
unsigned int line; /* port index */
+ unsigned int minor;
resource_size_t mapbase; /* for ioremap */
+ resource_size_t mapsize;
struct device *dev; /* parent device */
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
@@ -336,24 +338,29 @@ struct earlycon_device {
char options[16]; /* e.g., 115200n8 */
unsigned int baud;
};
-int setup_earlycon(char *buf, const char *match,
- int (*setup)(struct earlycon_device *, const char *));
+struct earlycon_id {
+ char name[16];
+ int (*setup)(struct earlycon_device *, const char *options);
+} __aligned(32);
+
+extern int setup_earlycon(char *buf);
extern int of_setup_earlycon(unsigned long addr,
int (*setup)(struct earlycon_device *, const char *));
-#define EARLYCON_DECLARE(name, func) \
-static int __init name ## _setup_earlycon(char *buf) \
-{ \
- return setup_earlycon(buf, __stringify(name), func); \
-} \
-early_param("earlycon", name ## _setup_earlycon);
+#define EARLYCON_DECLARE(_name, func) \
+ static const struct earlycon_id __earlycon_##_name \
+ __used __section(__earlycon_table) \
+ = { .name = __stringify(_name), \
+ .setup = func }
#define OF_EARLYCON_DECLARE(name, compat, fn) \
_OF_DECLARE(earlycon, name, compat, fn, void *)
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
+int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
+ char **options);
void uart_parse_options(char *options, int *baud, int *parity, int *bits,
int *flow);
int uart_set_options(struct uart_port *port, struct console *co, int baud,
diff --git a/include/linux/serial_mfd.h b/include/linux/serial_mfd.h
deleted file mode 100644
index 2b071e0b034d..000000000000
--- a/include/linux/serial_mfd.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef _SERIAL_MFD_H_
-#define _SERIAL_MFD_H_
-
-/* HW register offset definition */
-#define UART_FOR 0x08
-#define UART_PS 0x0C
-#define UART_MUL 0x0D
-#define UART_DIV 0x0E
-
-#define HSU_GBL_IEN 0x0
-#define HSU_GBL_IST 0x4
-
-#define HSU_GBL_INT_BIT_PORT0 0x0
-#define HSU_GBL_INT_BIT_PORT1 0x1
-#define HSU_GBL_INT_BIT_PORT2 0x2
-#define HSU_GBL_INT_BIT_IRI 0x3
-#define HSU_GBL_INT_BIT_HDLC 0x4
-#define HSU_GBL_INT_BIT_DMA 0x5
-
-#define HSU_GBL_ISR 0x8
-#define HSU_GBL_DMASR 0x400
-#define HSU_GBL_DMAISR 0x404
-
-#define HSU_PORT_REG_OFFSET 0x80
-#define HSU_PORT0_REG_OFFSET 0x80
-#define HSU_PORT1_REG_OFFSET 0x100
-#define HSU_PORT2_REG_OFFSET 0x180
-#define HSU_PORT_REG_LENGTH 0x80
-
-#define HSU_DMA_CHANS_REG_OFFSET 0x500
-#define HSU_DMA_CHANS_REG_LENGTH 0x40
-
-#define HSU_CH_SR 0x0 /* channel status reg */
-#define HSU_CH_CR 0x4 /* control reg */
-#define HSU_CH_DCR 0x8 /* descriptor control reg */
-#define HSU_CH_BSR 0x10 /* max fifo buffer size reg */
-#define HSU_CH_MOTSR 0x14 /* minimum ocp transfer size */
-#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */
-#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */
-#define HSU_CH_D1SAR 0x28
-#define HSU_CH_D1TSR 0x2C
-#define HSU_CH_D2SAR 0x30
-#define HSU_CH_D2TSR 0x34
-#define HSU_CH_D3SAR 0x38
-#define HSU_CH_D3TSR 0x3C
-
-#endif
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index abdf1f229dc3..dd0ba502ccb3 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -69,6 +69,7 @@ struct shdma_chan {
int id; /* Raw id of this channel */
int irq; /* Channel IRQ */
int slave_id; /* Client ID for slave DMA */
+ int real_slave_id; /* argument passed to filter function */
int hw_req; /* DMA request line for slave DMA - same
* as MID/RID, used with DT */
enum shdma_pm_state pm_state;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f54d6659713a..66e374d62f64 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -166,10 +166,16 @@ struct nf_conntrack {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
atomic_t use;
+ enum {
+ BRNF_PROTO_UNCHANGED,
+ BRNF_PROTO_8021Q,
+ BRNF_PROTO_PPPOE
+ } orig_proto;
+ bool pkt_otherhost;
unsigned int mask;
struct net_device *physindev;
struct net_device *physoutdev;
- unsigned long data[32 / sizeof(unsigned long)];
+ char neigh_header[8];
};
#endif
@@ -492,7 +498,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
- * @dropcount: total number of sk_receive_queue overflows
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
@@ -641,7 +646,6 @@ struct sk_buff {
#endif
union {
__u32 mark;
- __u32 dropcount;
__u32 reserved_tailroom;
};
@@ -769,6 +773,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
@@ -870,8 +875,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
void skb_abort_seq_read(struct skb_seq_state *st);
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
- unsigned int to, struct ts_config *config,
- struct ts_state *state);
+ unsigned int to, struct ts_config *config);
/*
* Packet hash types specify the type of hash in skb_set_hash.
@@ -3013,6 +3017,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
*/
#define CHECKSUM_BREAK 76
+/* Unset checksum-complete
+ *
+ * Unset checksum complete can be done when packet is being modified
+ * (uncompressed for instance) and checksum-complete value is
+ * invalidated.
+ */
+static inline void skb_checksum_complete_unset(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
/* Validate (init) checksum based on checksum complete.
*
* Return values:
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 76f1feeabd38..ffd24c830151 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -18,7 +18,7 @@
/*
* Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
+ * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index be91db2a7017..c4414074bd88 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -18,7 +18,7 @@ struct call_single_data {
struct llist_node llist;
smp_call_func_t func;
void *info;
- u16 flags;
+ unsigned int flags;
};
/* total number of cpus in this system (may exceed NR_CPUS) */
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index 13e929679550..d600afb21926 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -47,6 +47,5 @@ struct smp_hotplug_thread {
int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-int smpboot_thread_schedule(void);
#endif
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 46cca4c06848..083ac388098e 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -19,8 +19,8 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
-int sock_diag_check_cookie(void *sk, __u32 *cookie);
-void sock_diag_save_cookie(void *sk, __u32 *cookie);
+int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
+void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5c19cba34dce..5bf59c8493b7 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -51,6 +51,7 @@ struct msghdr {
void *msg_control; /* ancillary data */
__kernel_size_t msg_controllen; /* ancillary data buffer length */
unsigned int msg_flags; /* flags on received message */
+ struct kiocb *msg_iocb; /* ptr to iocb for async requests */
};
struct user_msghdr {
@@ -138,6 +139,11 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}
+static inline size_t msg_data_left(struct msghdr *msg)
+{
+ return iov_iter_count(&msg->msg_iter);
+}
+
/* "Socket"-level control message types: */
#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */
@@ -181,6 +187,7 @@ struct ucred {
#define AF_WANPIPE 25 /* Wanpipe API Sockets */
#define AF_LLC 26 /* Linux LLC */
#define AF_IB 27 /* Native InfiniBand address */
+#define AF_MPLS 28 /* MPLS */
#define AF_CAN 29 /* Controller Area Network */
#define AF_TIPC 30 /* TIPC sockets */
#define AF_BLUETOOTH 31 /* Bluetooth sockets */
@@ -226,6 +233,7 @@ struct ucred {
#define PF_WANPIPE AF_WANPIPE
#define PF_LLC AF_LLC
#define PF_IB AF_IB
+#define PF_MPLS AF_MPLS
#define PF_CAN AF_CAN
#define PF_TIPC AF_TIPC
#define PF_BLUETOOTH AF_BLUETOOTH
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
index cd519a11c2c6..b63fe6f5fdc8 100644
--- a/include/linux/spi/at86rf230.h
+++ b/include/linux/spi/at86rf230.h
@@ -22,6 +22,7 @@ struct at86rf230_platform_data {
int rstn;
int slp_tr;
int dig2;
+ u8 xtal_trim;
};
#endif
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
index 85b8ee67e937..e741e8baad92 100644
--- a/include/linux/spi/cc2520.h
+++ b/include/linux/spi/cc2520.h
@@ -21,6 +21,7 @@ struct cc2520_platform_data {
int sfd;
int reset;
int vreg;
+ bool amplified;
};
#endif
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 9cfd9623fb03..bdeb4567b71e 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -182,7 +182,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
* lockdep_is_held() calls.
*/
#define srcu_dereference_check(p, sp, c) \
- __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu)
+ __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 669045ab73f3..0a34489a46b6 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -7,8 +7,6 @@ struct task_struct;
struct pt_regs;
#ifdef CONFIG_STACKTRACE
-struct task_struct;
-
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index cd63851b57f2..7f484a239f53 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -114,6 +114,8 @@ struct plat_stmmacenet_data {
int maxmtu;
int multicast_filter_bins;
int unicast_filter_entries;
+ int tx_fifo_size;
+ int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
void *(*setup)(struct platform_device *pdev);
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 657571817260..71f711db4500 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -10,7 +10,7 @@ enum string_size_units {
STRING_UNITS_2, /* use binary powers of 2^10 */
};
-void string_get_size(u64 size, enum string_size_units units,
+void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
char *buf, int len);
#define UNESCAPE_SPACE 0x01
@@ -47,22 +47,22 @@ static inline int string_unescape_any_inplace(char *buf)
#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
#define ESCAPE_HEX 0x20
-int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz,
+int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *esc);
static inline int string_escape_mem_any_np(const char *src, size_t isz,
- char **dst, size_t osz, const char *esc)
+ char *dst, size_t osz, const char *esc)
{
return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
}
-static inline int string_escape_str(const char *src, char **dst, size_t sz,
+static inline int string_escape_str(const char *src, char *dst, size_t sz,
unsigned int flags, const char *esc)
{
return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
}
-static inline int string_escape_str_any_np(const char *src, char **dst,
+static inline int string_escape_str_any_np(const char *src, char *dst,
size_t sz, const char *esc)
{
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index aadc6a04e1ac..807371357160 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -142,12 +142,18 @@ typedef __be32 rpc_fraghdr;
(RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4))
/*
- * RFC1833/RFC3530 rpcbind (v3+) well-known netid's.
+ * Well-known netids. See:
+ *
+ * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml
*/
#define RPCBIND_NETID_UDP "udp"
#define RPCBIND_NETID_TCP "tcp"
+#define RPCBIND_NETID_RDMA "rdma"
+#define RPCBIND_NETID_SCTP "sctp"
#define RPCBIND_NETID_UDP6 "udp6"
#define RPCBIND_NETID_TCP6 "tcp6"
+#define RPCBIND_NETID_RDMA6 "rdma6"
+#define RPCBIND_NETID_SCTP6 "sctp6"
#define RPCBIND_NETID_LOCAL "local"
/*
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 64a0a0a97b23..c984c85981ea 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -41,11 +41,6 @@
#define _LINUX_SUNRPC_XPRTRDMA_H
/*
- * rpcbind (v3+) RDMA netid.
- */
-#define RPCBIND_NETID_RDMA "rdma"
-
-/*
* Constants. Max RPC/NFS header is big enough to account for
* additional marshaling buffers passed down by Linux client.
*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7067eca501e2..cee108cbe2d5 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -307,7 +307,7 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
-extern void deactivate_page(struct page *page);
+extern void deactivate_file_page(struct page *page);
extern void swap_setup(void);
extern void add_page_to_unevictable_list(struct page *page);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b7361f831226..795d5fea5697 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -212,4 +212,7 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
#endif /* CONFIG_SYSCTL */
+int sysctl_max_threads(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
#endif /* _LINUX_SYSCTL_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 1a7adb411647..3b2911502a8c 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -58,6 +58,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
struct tcp_fastopen_cookie {
s8 len;
u8 val[TCP_FASTOPEN_COOKIE_MAX];
+ bool exp; /* In RFC6994 experimental option format */
};
/* This defines a selective acknowledgement block. */
@@ -111,7 +112,7 @@ struct tcp_request_sock_ops;
struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
- struct sock *listener; /* needed for TFO */
+ bool tfo_listener;
u32 rcv_isn;
u32 snt_isn;
u32 snt_synack; /* synack sent time */
@@ -144,11 +145,19 @@ struct tcp_sock {
* read the code and the spec side by side (and laugh ...)
* See RFC793 and RFC1122. The RFC writes these in capitals.
*/
+ u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ * sum(delta(rcv_nxt)), or how many bytes
+ * were acked.
+ */
u32 rcv_nxt; /* What we want to receive next */
u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
u32 snd_nxt; /* Next sequence we send */
+ u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+ * sum(delta(snd_una)), or how many bytes
+ * were acked.
+ */
u32 snd_una; /* First byte we want an ack for */
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
@@ -188,6 +197,7 @@ struct tcp_sock {
u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
+ syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
@@ -236,7 +246,6 @@ struct tcp_sock {
u32 lost_out; /* Lost packets */
u32 sacked_out; /* SACK'd packets */
u32 fackets_out; /* FACK'd packets */
- u32 tso_deferred;
/* from STCP, retrans queue hinting */
struct sk_buff* lost_skb_hint;
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
new file mode 100644
index 000000000000..5b727a17beee
--- /dev/null
+++ b/include/linux/tracefs.h
@@ -0,0 +1,45 @@
+/*
+ * tracefs.h - a pseudo file system for activating tracing
+ *
+ * Based on debugfs by: 2004 Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * Copyright (C) 2014 Red Hat Inc, author: Steven Rostedt <srostedt@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * tracefs is the file system that is used by the tracing infrastructure.
+ *
+ */
+
+#ifndef _TRACEFS_H_
+#define _TRACEFS_H_
+
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <linux/types.h>
+
+struct file_operations;
+
+#ifdef CONFIG_TRACING
+
+struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+struct dentry *tracefs_create_dir(const char *name, struct dentry *parent);
+
+void tracefs_remove(struct dentry *dentry);
+void tracefs_remove_recursive(struct dentry *dentry);
+
+struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent,
+ int (*mkdir)(const char *name),
+ int (*rmdir)(const char *name));
+
+bool tracefs_initialized(void);
+
+#endif /* CONFIG_TRACING */
+
+#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index c72851328ca9..a5f7f3ecafa3 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -36,6 +36,12 @@ struct tracepoint {
struct tracepoint_func __rcu *funcs;
};
+struct trace_enum_map {
+ const char *system;
+ const char *enum_string;
+ unsigned long enum_value;
+};
+
extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int
@@ -87,6 +93,8 @@ extern void syscall_unregfunc(void);
#define PARAMS(args...) args
+#define TRACE_DEFINE_ENUM(x)
+
#endif /* _LINUX_TRACEPOINT_H */
/*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 358a337af598..d76631f615c2 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -339,6 +339,7 @@ struct tty_file_private {
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
#define TTY_DEBUG 4 /* Debugging */
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
+#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
@@ -462,7 +463,6 @@ extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
extern void no_tty(void);
-extern void tty_flush_to_ldisc(struct tty_struct *tty);
extern void tty_buffer_free_all(struct tty_port *port);
extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
extern void tty_buffer_init(struct tty_port *port);
@@ -491,6 +491,7 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
+extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
extern void tty_ldisc_deref(struct tty_ldisc *);
diff --git a/include/linux/types.h b/include/linux/types.h
index 6747247e3f9f..59698be03490 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -146,12 +146,6 @@ typedef u64 dma_addr_t;
typedef u32 dma_addr_t;
#endif /* dma_addr_t */
-#ifdef __CHECKER__
-#else
-#endif
-#ifdef __CHECK_ENDIAN__
-#else
-#endif
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
typedef unsigned __bitwise__ oom_flags_t;
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 247cfdcc4b08..87c094961bd5 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -34,7 +34,7 @@ static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb)
#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
-static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask)
+static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
{
return (num + net_hash_mix(net)) & mask;
}
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index 2d1f9b627f91..03835522dfcb 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -29,6 +29,7 @@ typedef struct {
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }
+#ifdef CONFIG_MULTIUSER
static inline uid_t __kuid_val(kuid_t uid)
{
return uid.val;
@@ -38,6 +39,17 @@ static inline gid_t __kgid_val(kgid_t gid)
{
return gid.val;
}
+#else
+static inline uid_t __kuid_val(kuid_t uid)
+{
+ return 0;
+}
+
+static inline gid_t __kgid_val(kgid_t gid)
+{
+ return 0;
+}
+#endif
#define GLOBAL_ROOT_UID KUIDT_INIT(0)
#define GLOBAL_ROOT_GID KGIDT_INIT(0)
@@ -97,12 +109,12 @@ static inline bool gid_lte(kgid_t left, kgid_t right)
static inline bool uid_valid(kuid_t uid)
{
- return !uid_eq(uid, INVALID_UID);
+ return __kuid_val(uid) != (uid_t) -1;
}
static inline bool gid_valid(kgid_t gid)
{
- return !gid_eq(gid, INVALID_GID);
+ return __kgid_val(gid) != (gid_t) -1;
}
#ifdef CONFIG_USER_NS
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 71880299ed48..8b01e1c3c614 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -76,6 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
@@ -111,6 +112,14 @@ static inline bool iter_is_iovec(struct iov_iter *i)
}
/*
+ * Get one of READ or WRITE out of iter->type without any other flags OR'd in
+ * with it.
+ *
+ * The ?: is just for type safety.
+ */
+#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK)
+
+/*
* Cap the iov_iter by given limit; note that the second argument is
* *not* the new size - it's upper limit for such. Passing it a value
* greater than the amount of data in iov_iter is fine - it'll just do
@@ -139,4 +148,18 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+int import_iovec(int type, const struct iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i);
+
+#ifdef CONFIG_COMPAT
+struct compat_iovec;
+int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i);
+#endif
+
+int import_single_range(int type, void __user *buf, size_t len,
+ struct iovec *iov, struct iov_iter *i);
+
#endif
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index a7f2604c5f25..7f5f78bd15ad 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -77,6 +77,8 @@
/* Cannot handle ATA_12 or ATA_16 CDBs */ \
US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
+ US_FLAG(MAX_SECTORS_240, 0x08000000) \
+ /* Sets max_sectors to 240 */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
new file mode 100644
index 000000000000..f9b2ce58039b
--- /dev/null
+++ b/include/linux/util_macros.h
@@ -0,0 +1,40 @@
+#ifndef _LINUX_HELPER_MACROS_H_
+#define _LINUX_HELPER_MACROS_H_
+
+#define __find_closest(x, a, as, op) \
+({ \
+ typeof(as) __fc_i, __fc_as = (as) - 1; \
+ typeof(x) __fc_x = (x); \
+ typeof(*a) const *__fc_a = (a); \
+ for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \
+ if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \
+ __fc_a[__fc_i + 1], 2)) \
+ break; \
+ } \
+ (__fc_i); \
+})
+
+/**
+ * find_closest - locate the closest element in a sorted array
+ * @x: The reference value.
+ * @a: The array in which to look for the closest element. Must be sorted
+ * in ascending order.
+ * @as: Size of 'a'.
+ *
+ * Returns the index of the element closest to 'x'.
+ */
+#define find_closest(x, a, as) __find_closest(x, a, as, <=)
+
+/**
+ * find_closest_descending - locate the closest element in a sorted array
+ * @x: The reference value.
+ * @a: The array in which to look for the closest element. Must be sorted
+ * in descending order.
+ * @as: Size of 'a'.
+ *
+ * Similar to find_closest() but 'a' is expected to be sorted in descending
+ * order.
+ */
+#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
+
+#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 049b2f497bc7..ddb440975382 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -14,6 +14,8 @@
#include <linux/iommu.h>
#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
#include <uapi/linux/vfio.h>
/**
@@ -110,4 +112,27 @@ static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
return -ENOTTY;
}
#endif /* CONFIG_EEH */
+
+/*
+ * IRQfd - generic
+ */
+struct virqfd {
+ void *opaque;
+ struct eventfd_ctx *eventfd;
+ int (*handler)(void *, void *);
+ void (*thread)(void *, void *);
+ void *data;
+ struct work_struct inject;
+ wait_queue_t wait;
+ poll_table pt;
+ struct work_struct shutdown;
+ struct virqfd **pvirqfd;
+};
+
+extern int vfio_virqfd_enable(void *opaque,
+ int (*handler)(void *, void *),
+ void (*thread)(void *, void *),
+ void *data, struct virqfd **pvirqfd, int fd);
+extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
+
#endif /* VFIO_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index c37bd4d06739..8c3b412d84df 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,8 +65,13 @@ struct pci_dev;
* out of the arbitration process (and can be safe to take
* interrupts at any time.
*/
+#if defined(CONFIG_VGA_ARB)
extern void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes);
+#else
+static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes) { };
+#endif
/**
* vga_get - acquire & locks VGA resources
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 28f0e65b9a11..8f4d4bfa6d46 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -108,8 +108,6 @@ struct virtio_device {
void *priv;
};
-bool virtio_device_is_legacy_only(struct virtio_device_id id);
-
static inline struct virtio_device *dev_to_virtio(struct device *_dev)
{
return container_of(_dev, struct virtio_device, dev);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index ca3ed78e5ec7..1e306f727edc 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -298,13 +298,6 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
} \
} while(0)
-static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
-{
- u8 ret;
- vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return ret;
-}
-
/* Read @count fields, @bytes each. */
static inline void __virtio_cread_many(struct virtio_device *vdev,
unsigned int offset,
@@ -326,7 +319,6 @@ static inline void __virtio_cread_many(struct virtio_device *vdev,
} while (gen != old);
}
-
static inline void virtio_cread_bytes(struct virtio_device *vdev,
unsigned int offset,
void *buf, size_t len)
@@ -334,6 +326,13 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev,
__virtio_cread_many(vdev, offset, buf, len, 1);
}
+static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
+{
+ u8 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
static inline void virtio_cwrite8(struct virtio_device *vdev,
unsigned int offset, u8 val)
{
@@ -374,7 +373,6 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
unsigned int offset)
{
u64 ret;
- vdev->config->get(vdev, offset, &ret, sizeof(ret));
__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
return virtio64_to_cpu(vdev, (__force __virtio64)ret);
}
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 67e06fe18c03..8e50888a6d59 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -21,19 +21,20 @@
* actually quite cheap.
*/
-#ifdef CONFIG_SMP
static inline void virtio_mb(bool weak_barriers)
{
+#ifdef CONFIG_SMP
if (weak_barriers)
smp_mb();
else
+#endif
mb();
}
static inline void virtio_rmb(bool weak_barriers)
{
if (weak_barriers)
- smp_rmb();
+ dma_rmb();
else
rmb();
}
@@ -41,26 +42,10 @@ static inline void virtio_rmb(bool weak_barriers)
static inline void virtio_wmb(bool weak_barriers)
{
if (weak_barriers)
- smp_wmb();
+ dma_wmb();
else
wmb();
}
-#else
-static inline void virtio_mb(bool weak_barriers)
-{
- mb();
-}
-
-static inline void virtio_rmb(bool weak_barriers)
-{
- rmb();
-}
-
-static inline void virtio_wmb(bool weak_barriers)
-{
- wmb();
-}
-#endif
struct virtio_device;
struct virtqueue;
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 395b70e0eccf..a746bf5216f8 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -137,4 +137,12 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+void watchdog_nmi_disable_all(void);
+void watchdog_nmi_enable_all(void);
+#else
+static inline void watchdog_nmi_disable_all(void) {}
+static inline void watchdog_nmi_enable_all(void) {}
+#endif
+
#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
index a9c723be1acf..95704cd4cfab 100644
--- a/include/linux/wl12xx.h
+++ b/include/linux/wl12xx.h
@@ -26,28 +26,6 @@
#include <linux/err.h>
-/* Reference clock values */
-enum {
- WL12XX_REFCLOCK_19 = 0, /* 19.2 MHz */
- WL12XX_REFCLOCK_26 = 1, /* 26 MHz */
- WL12XX_REFCLOCK_38 = 2, /* 38.4 MHz */
- WL12XX_REFCLOCK_52 = 3, /* 52 MHz */
- WL12XX_REFCLOCK_38_XTAL = 4, /* 38.4 MHz, XTAL */
- WL12XX_REFCLOCK_26_XTAL = 5, /* 26 MHz, XTAL */
-};
-
-/* TCXO clock values */
-enum {
- WL12XX_TCXOCLOCK_19_2 = 0, /* 19.2MHz */
- WL12XX_TCXOCLOCK_26 = 1, /* 26 MHz */
- WL12XX_TCXOCLOCK_38_4 = 2, /* 38.4MHz */
- WL12XX_TCXOCLOCK_52 = 3, /* 52 MHz */
- WL12XX_TCXOCLOCK_16_368 = 4, /* 16.368 MHz */
- WL12XX_TCXOCLOCK_32_736 = 5, /* 32.736 MHz */
- WL12XX_TCXOCLOCK_16_8 = 6, /* 16.8 MHz */
- WL12XX_TCXOCLOCK_33_6 = 7, /* 33.6 MHz */
-};
-
struct wl1251_platform_data {
int power_gpio;
/* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
@@ -55,23 +33,8 @@ struct wl1251_platform_data {
bool use_eeprom;
};
-struct wl12xx_platform_data {
- int irq;
- int board_ref_clock;
- int board_tcxo_clock;
- unsigned long platform_quirks;
- bool pwr_in_suspend;
-};
-
-/* Platform does not support level trigger interrupts */
-#define WL12XX_PLATFORM_QUIRK_EDGE_IRQ BIT(0)
-
#ifdef CONFIG_WILINK_PLATFORM_DATA
-int wl12xx_set_platform_data(const struct wl12xx_platform_data *data);
-
-struct wl12xx_platform_data *wl12xx_get_platform_data(void);
-
int wl1251_set_platform_data(const struct wl1251_platform_data *data);
struct wl1251_platform_data *wl1251_get_platform_data(void);
@@ -79,18 +42,6 @@ struct wl1251_platform_data *wl1251_get_platform_data(void);
#else
static inline
-int wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
-{
- return -ENOSYS;
-}
-
-static inline
-struct wl12xx_platform_data *wl12xx_get_platform_data(void)
-{
- return ERR_PTR(-ENODATA);
-}
-
-static inline
int wl1251_set_platform_data(const struct wl1251_platform_data *data)
{
return -ENOSYS;
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 3283c6a55425..1338190b5478 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -47,5 +47,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
unsigned long zs_get_total_pages(struct zs_pool *pool);
+unsigned long zs_compact(struct zs_pool *pool);
#endif
diff --git a/include/media/adv7604.h b/include/media/adv7604.h
index aa1c4477722d..9ecf353160c1 100644
--- a/include/media/adv7604.h
+++ b/include/media/adv7604.h
@@ -47,16 +47,16 @@ enum adv7604_bus_order {
};
/* Input Color Space (IO register 0x02, [7:4]) */
-enum adv7604_inp_color_space {
- ADV7604_INP_COLOR_SPACE_LIM_RGB = 0,
- ADV7604_INP_COLOR_SPACE_FULL_RGB = 1,
- ADV7604_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
- ADV7604_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
- ADV7604_INP_COLOR_SPACE_XVYCC_601 = 4,
- ADV7604_INP_COLOR_SPACE_XVYCC_709 = 5,
- ADV7604_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
- ADV7604_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
- ADV7604_INP_COLOR_SPACE_AUTO = 0xf,
+enum adv76xx_inp_color_space {
+ ADV76XX_INP_COLOR_SPACE_LIM_RGB = 0,
+ ADV76XX_INP_COLOR_SPACE_FULL_RGB = 1,
+ ADV76XX_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
+ ADV76XX_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
+ ADV76XX_INP_COLOR_SPACE_XVYCC_601 = 4,
+ ADV76XX_INP_COLOR_SPACE_XVYCC_709 = 5,
+ ADV76XX_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
+ ADV76XX_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
+ ADV76XX_INP_COLOR_SPACE_AUTO = 0xf,
};
/* Select output format (IO register 0x03, [4:2]) */
@@ -66,38 +66,39 @@ enum adv7604_op_format_mode_sel {
ADV7604_OP_FORMAT_MODE2 = 0x08,
};
-enum adv7604_drive_strength {
- ADV7604_DR_STR_MEDIUM_LOW = 1,
- ADV7604_DR_STR_MEDIUM_HIGH = 2,
- ADV7604_DR_STR_HIGH = 3,
+enum adv76xx_drive_strength {
+ ADV76XX_DR_STR_MEDIUM_LOW = 1,
+ ADV76XX_DR_STR_MEDIUM_HIGH = 2,
+ ADV76XX_DR_STR_HIGH = 3,
};
-enum adv7604_int1_config {
- ADV7604_INT1_CONFIG_OPEN_DRAIN,
- ADV7604_INT1_CONFIG_ACTIVE_LOW,
- ADV7604_INT1_CONFIG_ACTIVE_HIGH,
- ADV7604_INT1_CONFIG_DISABLED,
+/* INT1 Configuration (IO register 0x40, [1:0]) */
+enum adv76xx_int1_config {
+ ADV76XX_INT1_CONFIG_OPEN_DRAIN,
+ ADV76XX_INT1_CONFIG_ACTIVE_LOW,
+ ADV76XX_INT1_CONFIG_ACTIVE_HIGH,
+ ADV76XX_INT1_CONFIG_DISABLED,
};
-enum adv7604_page {
- ADV7604_PAGE_IO,
+enum adv76xx_page {
+ ADV76XX_PAGE_IO,
ADV7604_PAGE_AVLINK,
- ADV7604_PAGE_CEC,
- ADV7604_PAGE_INFOFRAME,
+ ADV76XX_PAGE_CEC,
+ ADV76XX_PAGE_INFOFRAME,
ADV7604_PAGE_ESDP,
ADV7604_PAGE_DPP,
- ADV7604_PAGE_AFE,
- ADV7604_PAGE_REP,
- ADV7604_PAGE_EDID,
- ADV7604_PAGE_HDMI,
- ADV7604_PAGE_TEST,
- ADV7604_PAGE_CP,
+ ADV76XX_PAGE_AFE,
+ ADV76XX_PAGE_REP,
+ ADV76XX_PAGE_EDID,
+ ADV76XX_PAGE_HDMI,
+ ADV76XX_PAGE_TEST,
+ ADV76XX_PAGE_CP,
ADV7604_PAGE_VDP,
- ADV7604_PAGE_MAX,
+ ADV76XX_PAGE_MAX,
};
/* Platform dependent definition */
-struct adv7604_platform_data {
+struct adv76xx_platform_data {
/* DIS_PWRDNB: 1 if the PWRDNB pin is unused and unconnected */
unsigned disable_pwrdnb:1;
@@ -116,7 +117,7 @@ struct adv7604_platform_data {
enum adv7604_op_format_mode_sel op_format_mode_sel;
/* Configuration of the INT1 pin */
- enum adv7604_int1_config int1_config;
+ enum adv76xx_int1_config int1_config;
/* IO register 0x02 */
unsigned alt_gamma:1;
@@ -134,9 +135,9 @@ struct adv7604_platform_data {
unsigned inv_llc_pol:1;
/* IO register 0x14 */
- enum adv7604_drive_strength dr_str_data;
- enum adv7604_drive_strength dr_str_clk;
- enum adv7604_drive_strength dr_str_sync;
+ enum adv76xx_drive_strength dr_str_data;
+ enum adv76xx_drive_strength dr_str_clk;
+ enum adv76xx_drive_strength dr_str_sync;
/* IO register 0x30 */
unsigned output_bus_lsb_to_msb:1;
@@ -145,11 +146,11 @@ struct adv7604_platform_data {
unsigned hdmi_free_run_mode;
/* i2c addresses: 0 == use default */
- u8 i2c_addresses[ADV7604_PAGE_MAX];
+ u8 i2c_addresses[ADV76XX_PAGE_MAX];
};
-enum adv7604_pad {
- ADV7604_PAD_HDMI_PORT_A = 0,
+enum adv76xx_pad {
+ ADV76XX_PAD_HDMI_PORT_A = 0,
ADV7604_PAD_HDMI_PORT_B = 1,
ADV7604_PAD_HDMI_PORT_C = 2,
ADV7604_PAD_HDMI_PORT_D = 3,
@@ -158,7 +159,7 @@ enum adv7604_pad {
/* The source pad is either 1 (ADV7611) or 6 (ADV7604) */
ADV7604_PAD_SOURCE = 6,
ADV7611_PAD_SOURCE = 1,
- ADV7604_PAD_MAX = 7,
+ ADV76XX_PAD_MAX = 7,
};
#define V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE (V4L2_CID_DV_CLASS_BASE + 0x1000)
@@ -166,7 +167,7 @@ enum adv7604_pad {
#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002)
/* notify events */
-#define ADV7604_HOTPLUG 1
-#define ADV7604_FMT_CHANGE 2
+#define ADV76XX_HOTPLUG 1
+#define ADV76XX_FMT_CHANGE 2
#endif
diff --git a/include/media/davinci/vpfe_capture.h b/include/media/davinci/vpfe_capture.h
index 288772e6900a..28bcd71cdd26 100644
--- a/include/media/davinci/vpfe_capture.h
+++ b/include/media/davinci/vpfe_capture.h
@@ -102,7 +102,7 @@ struct vpfe_config {
struct vpfe_device {
/* V4l2 specific parameters */
/* Identifies video device for this channel */
- struct video_device *video_dev;
+ struct video_device video_dev;
/* sub devices */
struct v4l2_subdev **sd;
/* vpfe cfg */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index e00459185d20..0c003d817493 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -44,6 +44,15 @@ struct media_pad {
unsigned long flags; /* Pad flags (MEDIA_PAD_FL_*) */
};
+/**
+ * struct media_entity_operations - Media entity operations
+ * @link_setup: Notify the entity of link changes. The operation can
+ * return an error, in which case link setup will be
+ * cancelled. Optional.
+ * @link_validate: Return whether a link is valid from the entity point of
+ * view. The media_entity_pipeline_start() function
+ * validates all links by calling this operation. Optional.
+ */
struct media_entity_operations {
int (*link_setup)(struct media_entity *entity,
const struct media_pad *local,
@@ -87,17 +96,7 @@ struct media_entity {
struct {
u32 major;
u32 minor;
- } v4l;
- struct {
- u32 major;
- u32 minor;
- } fb;
- struct {
- u32 card;
- u32 device;
- u32 subdevice;
- } alsa;
- int dvb;
+ } dev;
/* Sub-device specifications */
/* Nothing needed yet */
diff --git a/include/media/mt9p031.h b/include/media/mt9p031.h
index b1e63f2b72bd..1ba361205af1 100644
--- a/include/media/mt9p031.h
+++ b/include/media/mt9p031.h
@@ -5,12 +5,10 @@ struct v4l2_subdev;
/*
* struct mt9p031_platform_data - MT9P031 platform data
- * @reset: Chip reset GPIO (set to -1 if not used)
* @ext_freq: Input clock frequency
* @target_freq: Pixel clock frequency
*/
struct mt9p031_platform_data {
- int reset;
int ext_freq;
int target_freq;
};
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
index 398279dd1922..048f8f9117ef 100644
--- a/include/media/omap3isp.h
+++ b/include/media/omap3isp.h
@@ -45,7 +45,7 @@ enum {
};
/**
- * struct isp_parallel_platform_data - Parallel interface platform data
+ * struct isp_parallel_cfg - Parallel interface configuration
* @data_lane_shift: Data lane shifter
* ISP_LANE_SHIFT_0 - CAMEXT[13:0] -> CAM[13:0]
* ISP_LANE_SHIFT_2 - CAMEXT[13:2] -> CAM[11:0]
@@ -62,7 +62,7 @@ enum {
* @data_pol: Data polarity
* 0 - Normal, 1 - One's complement
*/
-struct isp_parallel_platform_data {
+struct isp_parallel_cfg {
unsigned int data_lane_shift:2;
unsigned int clk_pol:1;
unsigned int hs_pol:1;
@@ -105,7 +105,7 @@ struct isp_csiphy_lanes_cfg {
};
/**
- * struct isp_ccp2_platform_data - CCP2 interface platform data
+ * struct isp_ccp2_cfg - CCP2 interface configuration
* @strobe_clk_pol: Strobe/clock polarity
* 0 - Non Inverted, 1 - Inverted
* @crc: Enable the cyclic redundancy check
@@ -117,7 +117,7 @@ struct isp_csiphy_lanes_cfg {
* ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
* @vpclk_div: Video port output clock control
*/
-struct isp_ccp2_platform_data {
+struct isp_ccp2_cfg {
unsigned int strobe_clk_pol:1;
unsigned int crc:1;
unsigned int ccp2_mode:1;
@@ -127,39 +127,31 @@ struct isp_ccp2_platform_data {
};
/**
- * struct isp_csi2_platform_data - CSI2 interface platform data
+ * struct isp_csi2_cfg - CSI2 interface configuration
* @crc: Enable the cyclic redundancy check
- * @vpclk_div: Video port output clock control
*/
-struct isp_csi2_platform_data {
+struct isp_csi2_cfg {
unsigned crc:1;
- unsigned vpclk_div:2;
struct isp_csiphy_lanes_cfg lanecfg;
};
-struct isp_subdev_i2c_board_info {
- struct i2c_board_info *board_info;
- int i2c_adapter_id;
-};
-
-struct isp_v4l2_subdevs_group {
- struct isp_subdev_i2c_board_info *subdevs;
+struct isp_bus_cfg {
enum isp_interface_type interface;
union {
- struct isp_parallel_platform_data parallel;
- struct isp_ccp2_platform_data ccp2;
- struct isp_csi2_platform_data csi2;
+ struct isp_parallel_cfg parallel;
+ struct isp_ccp2_cfg ccp2;
+ struct isp_csi2_cfg csi2;
} bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
};
-struct isp_platform_xclk {
- const char *dev_id;
- const char *con_id;
+struct isp_platform_subdev {
+ struct i2c_board_info *board_info;
+ int i2c_adapter_id;
+ struct isp_bus_cfg *bus;
};
struct isp_platform_data {
- struct isp_platform_xclk xclks[2];
- struct isp_v4l2_subdevs_group *subdevs;
+ struct isp_platform_subdev *subdevs;
void (*set_constraints)(struct isp_device *isp, bool enable);
};
diff --git a/include/media/ov2659.h b/include/media/ov2659.h
new file mode 100644
index 000000000000..4216adc1ede2
--- /dev/null
+++ b/include/media/ov2659.h
@@ -0,0 +1,34 @@
+/*
+ * Omnivision OV2659 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2015 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef OV2659_H
+#define OV2659_H
+
+/**
+ * struct ov2659_platform_data - ov2659 driver platform data
+ * @link_frequency: target pixel clock frequency
+ */
+struct ov2659_platform_data {
+ s64 link_frequency;
+};
+
+#endif /* OV2659_H */
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index 944ecdf3530f..92766f77a5de 100644
--- a/include/media/saa7146_vv.h
+++ b/include/media/saa7146_vv.h
@@ -178,8 +178,8 @@ struct saa7146_use_ops {
};
/* from saa7146_fops.c */
-int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev, char *name, int type);
-int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev);
+int saa7146_register_device(struct video_device *vid, struct saa7146_dev *dev, char *name, int type);
+int saa7146_unregister_device(struct video_device *vid, struct saa7146_dev *dev);
void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state);
void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi);
int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf);
diff --git a/include/media/v4l2-clk.h b/include/media/v4l2-clk.h
index 0b36cc138304..3ef6e3d5ed6c 100644
--- a/include/media/v4l2-clk.h
+++ b/include/media/v4l2-clk.h
@@ -22,14 +22,15 @@
struct module;
struct device;
+struct clk;
struct v4l2_clk {
struct list_head list;
const struct v4l2_clk_ops *ops;
const char *dev_id;
- const char *id;
int enable;
struct mutex lock; /* Protect the enable count */
atomic_t use_count;
+ struct clk *clk;
void *priv;
};
@@ -43,7 +44,7 @@ struct v4l2_clk_ops {
struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
const char *dev_name,
- const char *name, void *priv);
+ void *priv);
void v4l2_clk_unregister(struct v4l2_clk *clk);
struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id);
void v4l2_clk_put(struct v4l2_clk *clk);
@@ -55,14 +56,13 @@ int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate);
struct module;
struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
- const char *id, unsigned long rate, struct module *owner);
+ unsigned long rate, struct module *owner);
void v4l2_clk_unregister_fixed(struct v4l2_clk *clk);
static inline struct v4l2_clk *v4l2_clk_register_fixed(const char *dev_id,
- const char *id,
unsigned long rate)
{
- return __v4l2_clk_register_fixed(dev_id, id, rate, THIS_MODULE);
+ return __v4l2_clk_register_fixed(dev_id, rate, THIS_MODULE);
}
#define v4l2_clk_name_i2c(name, size, adap, client) snprintf(name, size, \
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 3e4fddfc840c..acbcd2f5fe7f 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -65,7 +65,6 @@ struct v4l2_file_operations {
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
- long (*ioctl) (struct file *, unsigned int, unsigned long);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32) (struct file *, unsigned int, unsigned long);
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index ffb69da3ce9e..9c581578783f 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -58,8 +58,6 @@ struct v4l2_device {
struct v4l2_ctrl_handler *ctrl_handler;
/* Device's priority state */
struct v4l2_prio_state prio;
- /* BKL replacement mutex. Temporary solution only. */
- struct mutex ioctl_lock;
/* Keep track of the references to this struct. */
struct kref ref;
/* Release function that is called when the ref count goes to 0. */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 8537983b9b22..8fbbd76d78e8 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -23,12 +23,6 @@ struct v4l2_ioctl_ops {
/* VIDIOC_QUERYCAP handler */
int (*vidioc_querycap)(struct file *file, void *fh, struct v4l2_capability *cap);
- /* Priority handling */
- int (*vidioc_g_priority) (struct file *file, void *fh,
- enum v4l2_priority *p);
- int (*vidioc_s_priority) (struct file *file, void *fh,
- enum v4l2_priority p);
-
/* VIDIOC_ENUM_FMT handlers */
int (*vidioc_enum_fmt_vid_cap) (struct file *file, void *fh,
struct v4l2_fmtdesc *f);
diff --git a/include/media/v4l2-of.h b/include/media/v4l2-of.h
index 70fa7b7b0487..f831c9c225b6 100644
--- a/include/media/v4l2-of.h
+++ b/include/media/v4l2-of.h
@@ -29,12 +29,15 @@ struct device_node;
* @data_lanes: an array of physical data lane indexes
* @clock_lane: physical lane index of the clock lane
* @num_data_lanes: number of data lanes
+ * @lane_polarities: polarity of the lanes. The order is the same of
+ * the physical lanes.
*/
struct v4l2_of_bus_mipi_csi2 {
unsigned int flags;
unsigned char data_lanes[4];
unsigned char clock_lane;
unsigned short num_data_lanes;
+ bool lane_polarities[5];
};
/**
@@ -66,9 +69,26 @@ struct v4l2_of_endpoint {
struct list_head head;
};
+/**
+ * struct v4l2_of_link - a link between two endpoints
+ * @local_node: pointer to device_node of this endpoint
+ * @local_port: identifier of the port this endpoint belongs to
+ * @remote_node: pointer to device_node of the remote endpoint
+ * @remote_port: identifier of the port the remote endpoint belongs to
+ */
+struct v4l2_of_link {
+ struct device_node *local_node;
+ unsigned int local_port;
+ struct device_node *remote_node;
+ unsigned int remote_port;
+};
+
#ifdef CONFIG_OF
int v4l2_of_parse_endpoint(const struct device_node *node,
struct v4l2_of_endpoint *endpoint);
+int v4l2_of_parse_link(const struct device_node *node,
+ struct v4l2_of_link *link);
+void v4l2_of_put_link(struct v4l2_of_link *link);
#else /* CONFIG_OF */
static inline int v4l2_of_parse_endpoint(const struct device_node *node,
@@ -77,6 +97,16 @@ static inline int v4l2_of_parse_endpoint(const struct device_node *node,
return -ENOSYS;
}
+static inline int v4l2_of_parse_link(const struct device_node *node,
+ struct v4l2_of_link *link)
+{
+ return -ENOSYS;
+}
+
+static inline void v4l2_of_put_link(struct v4l2_of_link *link)
+{
+}
+
#endif /* CONFIG_OF */
#endif /* _V4L2_OF_H */
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 5beeb8744fd1..2f0a345a7fed 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -332,8 +332,6 @@ struct v4l2_subdev_video_ops {
struct v4l2_subdev_frame_interval *interval);
int (*s_frame_interval)(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *interval);
- int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize);
- int (*enum_frameintervals)(struct v4l2_subdev *sd, struct v4l2_frmivalenum *fival);
int (*s_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*g_dv_timings)(struct v4l2_subdev *sd,
@@ -482,6 +480,18 @@ struct v4l2_subdev_ir_ops {
struct v4l2_subdev_ir_parameters *params);
};
+/*
+ * Used for storing subdev pad information. This structure only needs
+ * to be passed to the pad op if the 'which' field of the main argument
+ * is set to V4L2_SUBDEV_FORMAT_TRY. For V4L2_SUBDEV_FORMAT_ACTIVE it is
+ * safe to pass NULL.
+ */
+struct v4l2_subdev_pad_config {
+ struct v4l2_mbus_framefmt try_fmt;
+ struct v4l2_rect try_crop;
+ struct v4l2_rect try_compose;
+};
+
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
* @get_frame_desc: get the current low level media bus frame parameters.
@@ -489,21 +499,26 @@ struct v4l2_subdev_ir_ops {
* may be adjusted by the subdev driver to device capabilities.
*/
struct v4l2_subdev_pad_ops {
- int (*enum_mbus_code)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*enum_mbus_code)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code);
int (*enum_frame_size)(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse);
int (*enum_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_interval_enum *fie);
- int (*get_fmt)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*get_fmt)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format);
- int (*set_fmt)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*set_fmt)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format);
- int (*get_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*get_selection)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel);
- int (*set_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*set_selection)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel);
int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
@@ -625,11 +640,7 @@ struct v4l2_subdev {
struct v4l2_subdev_fh {
struct v4l2_fh vfh;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- struct {
- struct v4l2_mbus_framefmt try_fmt;
- struct v4l2_rect try_crop;
- struct v4l2_rect try_compose;
- } *pad;
+ struct v4l2_subdev_pad_config *pad;
#endif
};
@@ -639,17 +650,17 @@ struct v4l2_subdev_fh {
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
#define __V4L2_SUBDEV_MK_GET_TRY(rtype, fun_name, field_name) \
static inline struct rtype * \
- v4l2_subdev_get_try_##fun_name(struct v4l2_subdev_fh *fh, \
- unsigned int pad) \
+ fun_name(struct v4l2_subdev *sd, \
+ struct v4l2_subdev_pad_config *cfg, \
+ unsigned int pad) \
{ \
- BUG_ON(pad >= vdev_to_v4l2_subdev( \
- fh->vfh.vdev)->entity.num_pads); \
- return &fh->pad[pad].field_name; \
+ BUG_ON(pad >= sd->entity.num_pads); \
+ return &cfg[pad].field_name; \
}
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, format, try_fmt)
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, crop, try_crop)
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, compose, try_compose)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, v4l2_subdev_get_try_format, try_fmt)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, v4l2_subdev_get_try_crop, try_crop)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, v4l2_subdev_get_try_compose, try_compose)
#endif
extern const struct v4l2_file_operations v4l2_subdev_fops;
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bd2cec2d6c3d..a5790fd5d125 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -134,17 +134,6 @@ enum vb2_io_modes {
};
/**
- * enum vb2_fileio_flags - flags for selecting a mode of the file io emulator,
- * by default the 'streaming' style is used by the file io emulator
- * @VB2_FILEIO_READ_ONCE: report EOF after reading the first buffer
- * @VB2_FILEIO_WRITE_IMMEDIATELY: queue buffer after each write() call
- */
-enum vb2_fileio_flags {
- VB2_FILEIO_READ_ONCE = (1 << 0),
- VB2_FILEIO_WRITE_IMMEDIATELY = (1 << 1),
-};
-
-/**
* enum vb2_buffer_state - current video buffer state
* @VB2_BUF_STATE_DEQUEUED: buffer under userspace control
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
@@ -346,7 +335,9 @@ struct v4l2_fh;
*
* @type: queue type (see V4L2_BUF_TYPE_* in linux/videodev2.h
* @io_modes: supported io methods (see vb2_io_modes enum)
- * @io_flags: additional io flags (see vb2_fileio_flags enum)
+ * @fileio_read_once: report EOF after reading the first buffer
+ * @fileio_write_immediately: queue buffer after each write() call
+ * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
* @lock: pointer to a mutex that protects the vb2_queue struct. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -396,7 +387,10 @@ struct v4l2_fh;
struct vb2_queue {
enum v4l2_buf_type type;
unsigned int io_modes;
- unsigned int io_flags;
+ unsigned fileio_read_once:1;
+ unsigned fileio_write_immediately:1;
+ unsigned allow_zero_bytesused:1;
+
struct mutex *lock;
struct v4l2_fh *owner;
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 6fab66c5c5af..c6b97e58cf84 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -211,6 +211,8 @@ struct p9_dirent {
char d_name[256];
};
+struct iov_iter;
+
int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
const char *name);
@@ -236,10 +238,8 @@ int p9_client_clunk(struct p9_fid *fid);
int p9_client_fsync(struct p9_fid *fid, int datasync);
int p9_client_remove(struct p9_fid *fid);
int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags);
-int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
- u64 offset, u32 count);
-int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
- u64 offset, u32 count);
+int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err);
+int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
struct p9_dirent *dirent);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 2a25dec30211..5122b5e40f78 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -61,7 +61,7 @@ struct p9_trans_module {
int (*cancel) (struct p9_client *, struct p9_req_t *req);
int (*cancelled)(struct p9_client *, struct p9_req_t *req);
int (*zc_request)(struct p9_client *, struct p9_req_t *,
- char *, char *, int , int, int, int);
+ struct iov_iter *, struct iov_iter *, int , int, int);
};
void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 0d87674fb775..172632dd9930 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -100,8 +100,8 @@ struct vsock_transport {
/* DGRAM. */
int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
- int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
- struct msghdr *msg, size_t len, int flags);
+ int (*dgram_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+ size_t len, int flags);
int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
struct msghdr *, size_t len);
bool (*dgram_allow)(u32 cid, u32 port);
diff --git a/include/net/arp.h b/include/net/arp.h
index 73c49864076b..5e0f891d476c 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -9,28 +9,17 @@
extern struct neigh_table arp_tbl;
-static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
+static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd)
{
+ u32 key = *(const u32 *)pkey;
u32 val = key ^ hash32_ptr(dev);
- return val * hash_rnd;
+ return val * hash_rnd[0];
}
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
- struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht);
- struct neighbour *n;
- u32 hash_val;
-
- hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference_bh(n->next)) {
- if (n->dev == dev && *(u32 *)n->primary_key == key)
- return n;
- }
-
- return NULL;
+ return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
@@ -47,7 +36,6 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
}
void arp_init(void);
-int arp_find(unsigned char *haddr, struct sk_buff *skb);
int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
void arp_send(int type, int ptype, __be32 dest_ip,
struct net_device *dev, __be32 src_ip,
diff --git a/include/net/ax25.h b/include/net/ax25.h
index bf0396e9a5d3..16a923a3a43a 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <net/neighbour.h>
#define AX25_T1CLAMPLO 1
#define AX25_T1CLAMPHI (30 * HZ)
@@ -366,9 +367,7 @@ int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
struct net_device *);
/* ax25_ip.c */
-int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short,
- const void *, const void *, unsigned int);
-int ax25_rebuild_header(struct sk_buff *);
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e00455aab18c..7dba80546f16 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -245,10 +245,10 @@ int bt_sock_register(int proto, const struct net_proto_family *ops);
void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
-int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags);
-int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags);
+int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
+int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags);
uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
@@ -269,29 +269,34 @@ struct l2cap_ctrl {
__u16 reqseq;
__u16 txseq;
__u8 retries;
+ __le16 psm;
+ bdaddr_t bdaddr;
+ struct l2cap_chan *chan;
};
struct hci_dev;
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
-
-struct hci_req_ctrl {
- bool start;
- u8 event;
- hci_req_complete_t complete;
+typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb);
+
+struct req_ctrl {
+ bool start;
+ u8 event;
+ hci_req_complete_t complete;
+ hci_req_complete_skb_t complete_skb;
};
struct bt_skb_cb {
__u8 pkt_type;
- __u8 incoming;
+ __u8 force_active;
__u16 opcode;
__u16 expect;
- __u8 force_active;
- struct l2cap_chan *chan;
- struct l2cap_ctrl control;
- struct hci_req_ctrl req;
- bdaddr_t bdaddr;
- __le16 psm;
+ __u8 incoming:1;
+ union {
+ struct l2cap_ctrl l2cap;
+ struct req_ctrl req;
+ };
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@@ -339,6 +344,11 @@ out:
int bt_to_errno(__u16 code);
+void hci_sock_set_flag(struct sock *sk, int nr);
+void hci_sock_clear_flag(struct sock *sk, int nr);
+int hci_sock_test_flag(struct sock *sk, int nr);
+unsigned short hci_sock_get_channel(struct sock *sk);
+
int hci_sock_init(void);
void hci_sock_cleanup(void);
@@ -358,6 +368,9 @@ void l2cap_exit(void);
int sco_init(void);
void sco_exit(void);
+int mgmt_init(void);
+void mgmt_exit(void);
+
void bt_sock_reclassify_lock(struct sock *sk, int proto);
#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 8e54f825153c..d95da83cb1b0 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -160,6 +160,14 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+
+ /* When this quirk is set, LE scan and BR/EDR inquiry is done
+ * simultaneously, otherwise it's interleaved.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
};
/* HCI device flags */
@@ -179,13 +187,14 @@ enum {
HCI_RESET,
};
-/* BR/EDR and/or LE controller flags: the flags defined here should represent
- * states configured via debugfs for debugging and testing purposes only.
- */
+/* HCI socket flags */
enum {
- HCI_DUT_MODE,
- HCI_FORCE_BREDR_SMP,
- HCI_FORCE_STATIC_ADDR,
+ HCI_SOCK_TRUSTED,
+ HCI_MGMT_INDEX_EVENTS,
+ HCI_MGMT_UNCONF_INDEX_EVENTS,
+ HCI_MGMT_EXT_INDEX_EVENTS,
+ HCI_MGMT_GENERIC_EVENTS,
+ HCI_MGMT_OOB_DATA_EVENTS,
};
/*
@@ -217,6 +226,8 @@ enum {
HCI_HS_ENABLED,
HCI_LE_ENABLED,
HCI_ADVERTISING,
+ HCI_ADVERTISING_CONNECTABLE,
+ HCI_ADVERTISING_INSTANCE,
HCI_CONNECTABLE,
HCI_DISCOVERABLE,
HCI_LIMITED_DISCOVERABLE,
@@ -225,13 +236,13 @@ enum {
HCI_FAST_CONNECTABLE,
HCI_BREDR_ENABLED,
HCI_LE_SCAN_INTERRUPTED,
-};
-/* A mask for the flags that are supposed to remain when a reset happens
- * or the HCI device is closed.
- */
-#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \
- BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV))
+ HCI_DUT_MODE,
+ HCI_FORCE_BREDR_SMP,
+ HCI_FORCE_STATIC_ADDR,
+
+ __HCI_NUM_FLAGS,
+};
/* HCI timeouts */
#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
@@ -363,6 +374,7 @@ enum {
/* LE features */
#define HCI_LE_ENCRYPTION 0x01
#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
+#define HCI_LE_SLAVE_FEATURES 0x08
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_EXT_SCAN_POLICY 0x80
@@ -452,9 +464,16 @@ enum {
#define EIR_NAME_COMPLETE 0x09 /* complete local name */
#define EIR_TX_POWER 0x0A /* transmit power level */
#define EIR_CLASS_OF_DEV 0x0D /* Class of Device */
-#define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */
-#define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */
+#define EIR_SSP_HASH_C192 0x0E /* Simple Pairing Hash C-192 */
+#define EIR_SSP_RAND_R192 0x0F /* Simple Pairing Randomizer R-192 */
#define EIR_DEVICE_ID 0x10 /* device ID */
+#define EIR_APPEARANCE 0x19 /* Device appearance */
+#define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */
+#define EIR_LE_ROLE 0x1C /* LE role */
+#define EIR_SSP_HASH_C256 0x1D /* Simple Pairing Hash C-256 */
+#define EIR_SSP_RAND_R256 0x1E /* Simple Pairing Rand R-256 */
+#define EIR_LE_SC_CONFIRM 0x22 /* LE SC Confirmation Value */
+#define EIR_LE_SC_RANDOM 0x23 /* LE SC Random Value */
/* Low Energy Advertising Flags */
#define LE_AD_LIMITED 0x01 /* Limited Discoverable */
@@ -1358,6 +1377,11 @@ struct hci_cp_le_conn_update {
__le16 max_ce_len;
} __packed;
+#define HCI_OP_LE_READ_REMOTE_FEATURES 0x2016
+struct hci_cp_le_read_remote_features {
+ __le16 handle;
+} __packed;
+
#define HCI_OP_LE_START_ENC 0x2019
struct hci_cp_le_start_enc {
__le16 handle;
@@ -1850,6 +1874,13 @@ struct hci_ev_le_conn_update_complete {
__le16 supervision_timeout;
} __packed;
+#define HCI_EV_LE_REMOTE_FEAT_COMPLETE 0x04
+struct hci_ev_le_remote_feat_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 features[8];
+} __packed;
+
#define HCI_EV_LE_LTK_REQ 0x05
struct hci_ev_le_ltk_req {
__le16 handle;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 52863c3e0b13..a056c2bfeb81 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -76,6 +76,7 @@ struct discovery_state {
u8 last_adv_data[HCI_MAX_AD_LENGTH];
u8 last_adv_data_len;
bool report_invalid_rssi;
+ bool result_filtering;
s8 rssi;
u16 uuid_count;
u8 (*uuids)[16];
@@ -108,7 +109,7 @@ struct bt_uuid {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
- u8 master;
+ u8 type;
u8 val[16];
};
@@ -154,6 +155,17 @@ struct oob_data {
u8 rand256[16];
};
+struct adv_info {
+ struct delayed_work timeout_exp;
+ __u8 instance;
+ __u32 flags;
+ __u16 timeout;
+ __u16 adv_data_len;
+ __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u16 scan_rsp_len;
+ __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+};
+
#define HCI_MAX_SHORT_NAME_LENGTH 10
/* Default LE RPA expiry time, 15 minutes */
@@ -173,7 +185,6 @@ struct amp_assoc {
#define HCI_MAX_PAGES 3
-#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
struct mutex lock;
@@ -314,14 +325,13 @@ struct hci_dev {
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
- struct sk_buff *recv_evt;
struct sk_buff *sent_cmd;
- struct sk_buff *reassembly[NUM_REASSEMBLY];
struct mutex req_lock;
wait_queue_head_t req_wait_q;
__u32 req_status;
__u32 req_result;
+ struct sk_buff *req_skb;
void *smp_data;
void *smp_bredr_data;
@@ -352,8 +362,7 @@ struct hci_dev {
struct rfkill *rfkill;
- unsigned long dbg_flags;
- unsigned long dev_flags;
+ DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
struct delayed_work le_scan_disable;
struct delayed_work le_scan_restart;
@@ -364,6 +373,8 @@ struct hci_dev {
__u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
__u8 scan_rsp_data_len;
+ struct adv_info adv_instance;
+
__u8 irk[16];
__u32 rpa_timeout;
struct delayed_work rpa_expired;
@@ -373,6 +384,7 @@ struct hci_dev {
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
int (*setup)(struct hci_dev *hdev);
+ int (*shutdown)(struct hci_dev *hdev);
int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
void (*hw_error)(struct hci_dev *hdev, u8 code);
@@ -498,19 +510,29 @@ struct hci_conn_params {
extern struct list_head hci_dev_list;
extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
-extern rwlock_t hci_cb_list_lock;
+extern struct mutex hci_cb_list_lock;
+
+#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
+
+#define hci_dev_clear_volatile_flags(hdev) \
+ do { \
+ hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
+ hci_dev_clear_flag(hdev, HCI_LE_ADV); \
+ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ } while (0)
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
int l2cap_disconn_ind(struct hci_conn *hcon);
-void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
-int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
/* ----- Inquiry cache ----- */
@@ -529,6 +551,7 @@ static inline void discovery_init(struct hci_dev *hdev)
static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
{
+ hdev->discovery.result_filtering = false;
hdev->discovery.report_invalid_rssi = true;
hdev->discovery.rssi = HCI_RSSI_INVALID;
hdev->discovery.uuid_count = 0;
@@ -538,6 +561,11 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
hdev->discovery.scan_duration = 0;
}
+static inline void adv_info_init(struct hci_dev *hdev)
+{
+ memset(&hdev->adv_instance, 0, sizeof(struct adv_info));
+}
+
bool hci_discovery_active(struct hci_dev *hdev);
void hci_discovery_set_state(struct hci_dev *hdev, int state);
@@ -584,7 +612,6 @@ enum {
HCI_CONN_SC_ENABLED,
HCI_CONN_AES_CCM,
HCI_CONN_POWER_SAVE,
- HCI_CONN_REMOTE_OOB,
HCI_CONN_FLUSH_KEY,
HCI_CONN_ENCRYPT,
HCI_CONN_AUTH,
@@ -600,14 +627,14 @@ enum {
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+ return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
}
@@ -969,6 +996,8 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
void hci_smp_irks_clear(struct hci_dev *hdev);
+bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
void hci_remote_oob_data_clear(struct hci_dev *hdev);
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 bdaddr_type);
@@ -981,7 +1010,6 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
-int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
void hci_init_sysfs(struct hci_dev *hdev);
void hci_conn_init_sysfs(struct hci_conn *conn);
@@ -1025,10 +1053,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
-#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
- !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
- test_bit(HCI_SC_ENABLED, &(dev)->dev_flags))
+#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \
+ !hci_dev_test_flag(dev, HCI_AUTO_OFF))
+#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_SC_ENABLED))
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1050,28 +1078,6 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
}
-static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
-{
- switch (conn->type) {
- case ACL_LINK:
- case LE_LINK:
- l2cap_connect_cfm(conn, status);
- break;
-
- case SCO_LINK:
- case ESCO_LINK:
- sco_connect_cfm(conn, status);
- break;
-
- default:
- BT_ERR("unknown link type %d", conn->type);
- break;
- }
-
- if (conn->connect_cfm_cb)
- conn->connect_cfm_cb(conn, status);
-}
-
static inline int hci_proto_disconn_ind(struct hci_conn *conn)
{
if (conn->type != ACL_LINK && conn->type != LE_LINK)
@@ -1080,91 +1086,69 @@ static inline int hci_proto_disconn_ind(struct hci_conn *conn)
return l2cap_disconn_ind(conn);
}
-static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
-{
- switch (conn->type) {
- case ACL_LINK:
- case LE_LINK:
- l2cap_disconn_cfm(conn, reason);
- break;
-
- case SCO_LINK:
- case ESCO_LINK:
- sco_disconn_cfm(conn, reason);
- break;
-
- /* L2CAP would be handled for BREDR chan */
- case AMP_LINK:
- break;
+/* ----- HCI callbacks ----- */
+struct hci_cb {
+ struct list_head list;
- default:
- BT_ERR("unknown link type %d", conn->type);
- break;
- }
+ char *name;
- if (conn->disconn_cfm_cb)
- conn->disconn_cfm_cb(conn, reason);
-}
+ void (*connect_cfm) (struct hci_conn *conn, __u8 status);
+ void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
+ void (*security_cfm) (struct hci_conn *conn, __u8 status,
+ __u8 encrypt);
+ void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
+ void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+};
-static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
+static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
{
- __u8 encrypt;
-
- if (conn->type != ACL_LINK && conn->type != LE_LINK)
- return;
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
- return;
+ struct hci_cb *cb;
- encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
- l2cap_security_cfm(conn, status, encrypt);
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->connect_cfm)
+ cb->connect_cfm(conn, status);
+ }
+ mutex_unlock(&hci_cb_list_lock);
- if (conn->security_cfm_cb)
- conn->security_cfm_cb(conn, status);
+ if (conn->connect_cfm_cb)
+ conn->connect_cfm_cb(conn, status);
}
-static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
- __u8 encrypt)
+static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
- if (conn->type != ACL_LINK && conn->type != LE_LINK)
- return;
+ struct hci_cb *cb;
- l2cap_security_cfm(conn, status, encrypt);
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->disconn_cfm)
+ cb->disconn_cfm(conn, reason);
+ }
+ mutex_unlock(&hci_cb_list_lock);
- if (conn->security_cfm_cb)
- conn->security_cfm_cb(conn, status);
+ if (conn->disconn_cfm_cb)
+ conn->disconn_cfm_cb(conn, reason);
}
-/* ----- HCI callbacks ----- */
-struct hci_cb {
- struct list_head list;
-
- char *name;
-
- void (*security_cfm) (struct hci_conn *conn, __u8 status,
- __u8 encrypt);
- void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
- void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
-};
-
static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
__u8 encrypt;
- hci_proto_auth_cfm(conn, status);
-
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return;
encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
@@ -1178,26 +1162,27 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
if (conn->pending_sec_level > conn->sec_level)
conn->sec_level = conn->pending_sec_level;
- hci_proto_encrypt_cfm(conn, status, encrypt);
-
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->key_change_cfm)
cb->key_change_cfm(conn, status);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
}
static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
@@ -1205,12 +1190,12 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
{
struct hci_cb *cb;
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->role_switch_cfm)
cb->role_switch_cfm(conn, status, role);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
}
static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
@@ -1296,8 +1281,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-bool hci_req_pending(struct hci_dev *hdev);
-
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1312,11 +1295,35 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
/* ----- HCI Sockets ----- */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
-void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk);
+void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+ int flag, struct sock *skip_sk);
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
+#define HCI_MGMT_VAR_LEN BIT(0)
+#define HCI_MGMT_NO_HDEV BIT(1)
+#define HCI_MGMT_UNTRUSTED BIT(2)
+#define HCI_MGMT_UNCONFIGURED BIT(3)
+
+struct hci_mgmt_handler {
+ int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len);
+ size_t data_len;
+ unsigned long flags;
+};
+
+struct hci_mgmt_chan {
+ struct list_head list;
+ unsigned short channel;
+ size_t handler_count;
+ const struct hci_mgmt_handler *handlers;
+ void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
+};
+
+int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
+void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
+
/* Management interface */
#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
@@ -1336,7 +1343,6 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
-int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
@@ -1382,9 +1388,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
- u8 *rand192, u8 *hash256, u8 *rand256,
- u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index e218a30f2061..b831242d48a4 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -43,6 +43,8 @@
#define MGMT_STATUS_CANCELLED 0x10
#define MGMT_STATUS_INVALID_INDEX 0x11
#define MGMT_STATUS_RFKILLED 0x12
+#define MGMT_STATUS_ALREADY_PAIRED 0x13
+#define MGMT_STATUS_PERMISSION_DENIED 0x14
struct mgmt_hdr {
__le16 opcode;
@@ -98,6 +100,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_DEBUG_KEYS 0x00001000
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
+#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -503,6 +506,71 @@ struct mgmt_cp_start_service_discovery {
} __packed;
#define MGMT_START_SERVICE_DISCOVERY_SIZE 4
+#define MGMT_OP_READ_LOCAL_OOB_EXT_DATA 0x003B
+struct mgmt_cp_read_local_oob_ext_data {
+ __u8 type;
+} __packed;
+#define MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE 1
+struct mgmt_rp_read_local_oob_ext_data {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_READ_EXT_INDEX_LIST 0x003C
+#define MGMT_READ_EXT_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_ext_index_list {
+ __le16 num_controllers;
+ struct {
+ __le16 index;
+ __u8 type;
+ __u8 bus;
+ } entry[0];
+} __packed;
+
+#define MGMT_OP_READ_ADV_FEATURES 0x0003D
+#define MGMT_READ_ADV_FEATURES_SIZE 0
+struct mgmt_rp_read_adv_features {
+ __le32 supported_flags;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+ __u8 max_instances;
+ __u8 num_instances;
+ __u8 instance[0];
+} __packed;
+
+#define MGMT_OP_ADD_ADVERTISING 0x003E
+struct mgmt_cp_add_advertising {
+ __u8 instance;
+ __le32 flags;
+ __le16 duration;
+ __le16 timeout;
+ __u8 adv_data_len;
+ __u8 scan_rsp_len;
+ __u8 data[0];
+} __packed;
+#define MGMT_ADD_ADVERTISING_SIZE 11
+struct mgmt_rp_add_advertising {
+ __u8 instance;
+} __packed;
+
+#define MGMT_ADV_FLAG_CONNECTABLE BIT(0)
+#define MGMT_ADV_FLAG_DISCOV BIT(1)
+#define MGMT_ADV_FLAG_LIMITED_DISCOV BIT(2)
+#define MGMT_ADV_FLAG_MANAGED_FLAGS BIT(3)
+#define MGMT_ADV_FLAG_TX_POWER BIT(4)
+#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
+#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
+
+#define MGMT_OP_REMOVE_ADVERTISING 0x003F
+struct mgmt_cp_remove_advertising {
+ __u8 instance;
+} __packed;
+#define MGMT_REMOVE_ADVERTISING_SIZE 1
+struct mgmt_rp_remove_advertising {
+ __u8 instance;
+} __packed;
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -647,9 +715,14 @@ struct mgmt_ev_new_irk {
struct mgmt_irk_info irk;
} __packed;
+#define MGMT_CSRK_LOCAL_UNAUTHENTICATED 0x00
+#define MGMT_CSRK_REMOTE_UNAUTHENTICATED 0x01
+#define MGMT_CSRK_LOCAL_AUTHENTICATED 0x02
+#define MGMT_CSRK_REMOTE_AUTHENTICATED 0x03
+
struct mgmt_csrk_info {
struct mgmt_addr_info addr;
- __u8 master;
+ __u8 type;
__u8 val[16];
} __packed;
@@ -685,3 +758,29 @@ struct mgmt_ev_new_conn_param {
#define MGMT_EV_UNCONF_INDEX_REMOVED 0x001e
#define MGMT_EV_NEW_CONFIG_OPTIONS 0x001f
+
+struct mgmt_ev_ext_index {
+ __u8 type;
+ __u8 bus;
+} __packed;
+
+#define MGMT_EV_EXT_INDEX_ADDED 0x0020
+
+#define MGMT_EV_EXT_INDEX_REMOVED 0x0021
+
+#define MGMT_EV_LOCAL_OOB_DATA_UPDATED 0x0022
+struct mgmt_ev_local_oob_data_updated {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_EV_ADVERTISING_ADDED 0x0023
+struct mgmt_ev_advertising_added {
+ __u8 instance;
+} __packed;
+
+#define MGMT_EV_ADVERTISING_REMOVED 0x0024
+struct mgmt_ev_advertising_removed {
+ __u8 instance;
+} __packed;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index f04cdbb7848e..c2a40a172fcd 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -82,6 +82,13 @@ typedef enum {
AD_TRANSMIT /* tx Machine */
} tx_states_t;
+/* churn machine states(43.4.17 in the 802.3ad standard) */
+typedef enum {
+ AD_CHURN_MONITOR, /* monitoring for churn */
+ AD_CHURN, /* churn detected (error) */
+ AD_NO_CHURN /* no churn (no error) */
+} churn_state_t;
+
/* rx indication types */
typedef enum {
AD_TYPE_LACPDU = 1, /* type lacpdu */
@@ -229,6 +236,12 @@ typedef struct port {
u16 sm_mux_timer_counter; /* state machine mux timer counter */
tx_states_t sm_tx_state; /* state machine tx state */
u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ u16 sm_churn_actor_timer_counter;
+ u16 sm_churn_partner_timer_counter;
+ u32 churn_actor_count;
+ u32 churn_partner_count;
+ churn_state_t sm_churn_actor_state;
+ churn_state_t sm_churn_partner_state;
struct slave *slave; /* pointer to the bond slave that this port belongs to */
struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
@@ -262,6 +275,22 @@ struct ad_slave_info {
u16 id;
};
+static inline const char *bond_3ad_churn_desc(churn_state_t state)
+{
+ static const char *const churn_description[] = {
+ "monitoring",
+ "churned",
+ "none",
+ "unknown"
+ };
+ int max_size = sizeof(churn_description) / sizeof(churn_description[0]);
+
+ if (state >= max_size)
+ state = max_size - 1;
+
+ return churn_description[state];
+}
+
/* ========== AD Exported functions to the main bonding code ========== */
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
void bond_3ad_bind_slave(struct slave *slave);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index fda6feeb6c1f..78ed135e9dea 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -30,13 +30,6 @@
#include <net/bond_alb.h>
#include <net/bond_options.h>
-#define DRV_VERSION "3.7.1"
-#define DRV_RELDATE "April 27, 2011"
-#define DRV_NAME "bonding"
-#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
-
-#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
-
#define BOND_MAX_ARP_TARGETS 16
#define BOND_DEFAULT_MIIMON 100
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 64e09e1e8099..f8d6813cd5b2 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -215,6 +215,39 @@ enum ieee80211_rate_flags {
};
/**
+ * enum ieee80211_bss_type - BSS type filter
+ *
+ * @IEEE80211_BSS_TYPE_ESS: Infrastructure BSS
+ * @IEEE80211_BSS_TYPE_PBSS: Personal BSS
+ * @IEEE80211_BSS_TYPE_IBSS: Independent BSS
+ * @IEEE80211_BSS_TYPE_MBSS: Mesh BSS
+ * @IEEE80211_BSS_TYPE_ANY: Wildcard value for matching any BSS type
+ */
+enum ieee80211_bss_type {
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_BSS_TYPE_PBSS,
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_BSS_TYPE_MBSS,
+ IEEE80211_BSS_TYPE_ANY
+};
+
+/**
+ * enum ieee80211_privacy - BSS privacy filter
+ *
+ * @IEEE80211_PRIVACY_ON: privacy bit set
+ * @IEEE80211_PRIVACY_OFF: privacy bit clear
+ * @IEEE80211_PRIVACY_ANY: Wildcard value for matching any privacy setting
+ */
+enum ieee80211_privacy {
+ IEEE80211_PRIVACY_ON,
+ IEEE80211_PRIVACY_OFF,
+ IEEE80211_PRIVACY_ANY
+};
+
+#define IEEE80211_PRIVACY(x) \
+ ((x) ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF)
+
+/**
* struct ieee80211_rate - bitrate definition
*
* This structure describes a bitrate that an 802.11 PHY can
@@ -2423,6 +2456,7 @@ struct cfg80211_ops {
struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params);
@@ -3183,10 +3217,8 @@ struct wiphy {
const struct ieee80211_ht_cap *ht_capa_mod_mask;
const struct ieee80211_vht_cap *vht_capa_mod_mask;
-#ifdef CONFIG_NET_NS
/* the network namespace this phy lives in currently */
- struct net *_net;
-#endif
+ possible_net_t _net;
#ifdef CONFIG_CFG80211_WEXT
const struct iw_handler_def *wext;
@@ -4012,14 +4044,16 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
const u8 *ssid, size_t ssid_len,
- u16 capa_mask, u16 capa_val);
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy);
static inline struct cfg80211_bss *
cfg80211_get_ibss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *ssid, size_t ssid_len)
{
return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len,
- WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_PRIVACY_ANY);
}
/**
@@ -4260,6 +4294,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
int approxlen);
struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
enum nl80211_commands cmd,
enum nl80211_attrs attr,
int vendor_event_idx,
@@ -4314,6 +4349,7 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
/**
* cfg80211_vendor_event_alloc - allocate vendor-specific event skb
* @wiphy: the wiphy
+ * @wdev: the wireless device
* @event_idx: index of the vendor event in the wiphy's vendor_events
* @approxlen: an upper bound of the length of the data that will
* be put into the skb
@@ -4322,16 +4358,20 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
* This function allocates and pre-fills an skb for an event on the
* vendor-specific multicast group.
*
+ * If wdev != NULL, both the ifindex and identifier of the specified
+ * wireless device are added to the event message before the vendor data
+ * attribute.
+ *
* When done filling the skb, call cfg80211_vendor_event() with the
* skb to send the event.
*
* Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
static inline struct sk_buff *
-cfg80211_vendor_event_alloc(struct wiphy *wiphy, int approxlen,
- int event_idx, gfp_t gfp)
+cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int approxlen, int event_idx, gfp_t gfp)
{
- return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_VENDOR,
+ return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
NL80211_ATTR_VENDOR_DATA,
event_idx, approxlen, gfp);
}
@@ -4432,7 +4472,7 @@ static inline int cfg80211_testmode_reply(struct sk_buff *skb)
static inline struct sk_buff *
cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
{
- return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_TESTMODE,
+ return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE,
NL80211_ATTR_TESTDATA, -1,
approxlen, gfp);
}
@@ -4862,6 +4902,17 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
bool ieee80211_operating_class_to_band(u8 operating_class,
enum ieee80211_band *band);
+/**
+ * ieee80211_chandef_to_operating_class - convert chandef to operation class
+ *
+ * @chandef: the chandef to convert
+ * @op_class: a pointer to the resulting operating class
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
+ u8 *op_class);
+
/*
* cfg80211_tdls_oper_request - request userspace to perform TDLS operation
* @dev: the device on which the operation is requested
@@ -4950,6 +5001,64 @@ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
u8 *buf, unsigned int bufsize);
/**
+ * ieee80211_ie_split_ric - split an IE buffer according to ordering (with RIC)
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @after_ric: array IE types that come after the RIC element
+ * @n_after_ric: size of the @after_ric array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids,
+ const u8 *after_ric, int n_after_ric,
+ size_t offset);
+
+/**
+ * ieee80211_ie_split - split an IE buffer according to ordering
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset);
+
+/**
* cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
* @wdev: the wireless device reporting the wakeup
* @wakeup: the wakeup report
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index eeda67652766..6ea16c84293b 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -30,11 +30,13 @@ struct wpan_phy_cca;
struct cfg802154_ops {
struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
const char *name,
+ unsigned char name_assign_type,
int type);
void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
struct net_device *dev);
int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
const char *name,
+ unsigned char name_assign_type,
enum nl802154_iftype type,
__le64 extended_addr);
int (*del_virtual_intf)(struct wpan_phy *wpan_phy,
diff --git a/include/net/codel.h b/include/net/codel.h
index aeee28081245..1e18005f7f65 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -120,11 +120,13 @@ static inline u32 codel_time_to_us(codel_time_t val)
* struct codel_params - contains codel parameters
* @target: target queue size (in time units)
* @interval: width of moving time window
+ * @mtu: device mtu, or minimal queue backlog in bytes.
* @ecn: is Explicit Congestion Notification enabled
*/
struct codel_params {
codel_time_t target;
codel_time_t interval;
+ u32 mtu;
bool ecn;
};
@@ -166,10 +168,12 @@ struct codel_stats {
u32 ecn_mark;
};
-static void codel_params_init(struct codel_params *params)
+static void codel_params_init(struct codel_params *params,
+ const struct Qdisc *sch)
{
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
+ params->mtu = psched_mtu(qdisc_dev(sch));
params->ecn = false;
}
@@ -180,7 +184,7 @@ static void codel_vars_init(struct codel_vars *vars)
static void codel_stats_init(struct codel_stats *stats)
{
- stats->maxpacket = 256;
+ stats->maxpacket = 0;
}
/*
@@ -234,7 +238,7 @@ static bool codel_should_drop(const struct sk_buff *skb,
stats->maxpacket = qdisc_pkt_len(skb);
if (codel_time_before(vars->ldelay, params->target) ||
- sch->qstats.backlog <= stats->maxpacket) {
+ sch->qstats.backlog <= params->mtu) {
/* went below - stay below for at least interval */
vars->first_above_time = 0;
return false;
diff --git a/include/net/compat.h b/include/net/compat.h
index 42a9c8431177..48103cf94e97 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -40,7 +40,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
#define compat_mmsghdr mmsghdr
#endif /* defined(CONFIG_COMPAT) */
-ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
unsigned int);
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 597b88a94332..207d9ba1f92c 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -49,6 +49,9 @@ struct dcbnl_rtnl_ops {
int (*ieee_setets) (struct net_device *, struct ieee_ets *);
int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
+ int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *);
+ int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *);
+ int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *);
int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
int (*ieee_getapp) (struct net_device *, struct dcb_app *);
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
index fac4e3f4a6d3..d0424269313f 100644
--- a/include/net/dn_neigh.h
+++ b/include/net/dn_neigh.h
@@ -18,10 +18,11 @@ struct dn_neigh {
void dn_neigh_init(void);
void dn_neigh_cleanup(void);
-int dn_neigh_router_hello(struct sk_buff *skb);
-int dn_neigh_endnode_hello(struct sk_buff *skb);
+int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb);
void dn_neigh_pointopoint_hello(struct sk_buff *skb);
int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb);
extern struct neigh_table dn_neigh_table;
diff --git a/include/net/dsa.h b/include/net/dsa.h
index ed3c34bbb67a..fbca63ba8f73 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -72,6 +72,7 @@ struct dsa_platform_data {
* to the root switch chip of the tree.
*/
struct device *netdev;
+ struct net_device *of_netdev;
/*
* Info structs describing each of the switch chips
@@ -128,6 +129,11 @@ struct dsa_switch {
int index;
/*
+ * Tagging protocol understood by this switch
+ */
+ enum dsa_tag_protocol tag_protocol;
+
+ /*
* Configuration data for this switch.
*/
struct dsa_chip_data *pd;
@@ -165,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
}
+static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
+{
+ return ds->phys_port_mask & (1 << p) && ds->ports[p];
+}
+
static inline u8 dsa_upstream_port(struct dsa_switch *ds)
{
struct dsa_switch_tree *dst = ds->dst;
@@ -275,6 +286,22 @@ struct dsa_switch_driver {
int (*get_regs_len)(struct dsa_switch *ds, int port);
void (*get_regs)(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *p);
+
+ /*
+ * Bridge integration
+ */
+ int (*port_join_bridge)(struct dsa_switch *ds, int port,
+ u32 br_port_mask);
+ int (*port_leave_bridge)(struct dsa_switch *ds, int port,
+ u32 br_port_mask);
+ int (*port_stp_update)(struct dsa_switch *ds, int port,
+ u8 state);
+ int (*fdb_add)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_del)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_getnext)(struct dsa_switch *ds, int port,
+ unsigned char *addr, bool *is_static);
};
void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 1f99a1de0e4f..d64253914a6a 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -12,7 +12,6 @@ struct sock;
struct dst_ops {
unsigned short family;
- __be16 protocol;
unsigned int gc_thresh;
int (*gc)(struct dst_ops *ops);
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index e584de16e4c3..6d67383a5114 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -58,7 +58,7 @@ struct fib_rules_ops {
struct sk_buff *,
struct fib_rule_hdr *,
struct nlattr **);
- void (*delete)(struct fib_rule *);
+ int (*delete)(struct fib_rule *);
int (*compare)(struct fib_rule *,
struct fib_rule_hdr *,
struct nlattr **);
@@ -95,17 +95,10 @@ static inline void fib_rule_get(struct fib_rule *rule)
atomic_inc(&rule->refcnt);
}
-static inline void fib_rule_put_rcu(struct rcu_head *head)
-{
- struct fib_rule *rule = container_of(head, struct fib_rule, rcu);
- release_net(rule->fr_net);
- kfree(rule);
-}
-
static inline void fib_rule_put(struct fib_rule *rule)
{
if (atomic_dec_and_test(&rule->refcnt))
- call_rcu(&rule->rcu, fib_rule_put_rcu);
+ kfree_rcu(rule, rcu);
}
static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 0574abd3db86..a9af1cc8c1bc 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -92,9 +92,7 @@ struct genl_info {
struct genlmsghdr * genlhdr;
void * userhdr;
struct nlattr ** attrs;
-#ifdef CONFIG_NET_NS
- struct net * _net;
-#endif
+ possible_net_t _net;
void * user_ptr[2];
struct sock * dst_sk;
};
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 98e5f9578f86..1c8b6820b694 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -41,18 +41,18 @@ enum {
struct inet6_ifaddr {
struct in6_addr addr;
__u32 prefix_len;
-
+
/* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
__u32 valid_lft;
__u32 prefered_lft;
atomic_t refcnt;
spinlock_t lock;
- spinlock_t state_lock;
int state;
__u32 flags;
__u8 dad_probes;
+ __u8 stable_privacy_retry;
__u16 scope;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 74af137304be..6d539e4e5ba7 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -28,8 +28,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req);
-struct request_sock *inet6_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
+struct request_sock *inet6_csk_search_req(struct sock *sk,
const __be16 rport,
const struct in6_addr *raddr,
const struct in6_addr *laddr,
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 9201afe083fa..7ff588ca6817 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -38,8 +38,6 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash,
return jhash_3words(lhash, fhash, ports, initval);
}
-int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
-
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
* we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index b2828a06a5a6..4a92423eefa5 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -21,12 +21,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
int inet_accept(struct socket *sock, struct socket *newsock, int flags);
-int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size);
+int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
-int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int flags);
+int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
int inet_shutdown(struct socket *sock, int how);
int inet_listen(struct socket *sock, int backlog);
void inet_sock_destruct(struct sock *sk);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5976bdecf58b..48a815823587 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -126,6 +126,8 @@ struct inet_connection_sock {
/* Information on the current probe. */
int probe_size;
+
+ u32 probe_timestamp;
} icsk_mtup;
u32 icsk_ca_priv[16];
u32 icsk_user_timeout;
@@ -254,8 +256,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
-struct request_sock *inet_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
+struct request_sock *inet_csk_search_req(struct sock *sk,
const __be16 rport,
const __be32 raddr,
const __be32 laddr);
@@ -278,18 +279,10 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
-static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
- struct request_sock *req)
-{
- if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
- inet_csk_delete_keepalive_timer(sk);
-}
-
static inline void inet_csk_reqsk_queue_added(struct sock *sk,
const unsigned long timeout)
{
- if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
- inet_csk_reset_keepalive_timer(sk, timeout);
+ reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
}
static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
@@ -307,26 +300,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
}
-static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
- struct request_sock *req,
- struct request_sock **prev)
-{
- reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
-}
-
-static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
- struct request_sock *req,
- struct request_sock **prev)
-{
- inet_csk_reqsk_queue_unlink(sk, req, prev);
- inet_csk_reqsk_queue_removed(sk, req);
- reqsk_free(req);
-}
-
-void inet_csk_reqsk_queue_prune(struct sock *parent,
- const unsigned long interval,
- const unsigned long timeout,
- const unsigned long max_rto);
+void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_destroy_sock(struct sock *sk);
void inet_csk_prepare_forced_close(struct sock *sk);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index dd1950a7e273..73fe0f9525d9 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -76,9 +76,7 @@ struct inet_ehash_bucket {
* ports are created in O(1) time? I thought so. ;-) -DaveM
*/
struct inet_bind_bucket {
-#ifdef CONFIG_NET_NS
- struct net *ib_net;
-#endif
+ possible_net_t ib_net;
unsigned short port;
signed char fastreuse;
signed char fastreuseport;
@@ -223,8 +221,8 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
-static inline int inet_bhashfn(struct net *net, const __u16 lport,
- const int bhash_size)
+static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
+ const u32 bhash_size)
{
return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
@@ -233,7 +231,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum);
/* These can have wildcards, don't try too hard. */
-static inline int inet_lhashfn(struct net *net, const unsigned short num)
+static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
{
return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
}
@@ -251,6 +249,7 @@ void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
void inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
@@ -385,13 +384,32 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
iph->daddr, dport, inet_iif(skb));
}
+u32 sk_ehashfn(const struct sock *sk);
+u32 inet6_ehashfn(const struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport);
+
+static inline void sk_daddr_set(struct sock *sk, __be32 addr)
+{
+ sk->sk_daddr = addr; /* alias of inet_daddr */
+#if IS_ENABLED(CONFIG_IPV6)
+ ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
+#endif
+}
+
+static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
+{
+ sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
+#if IS_ENABLED(CONFIG_IPV6)
+ ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
+#endif
+}
+
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16,
- struct inet_timewait_sock **),
- int (*hash)(struct sock *sk,
- struct inet_timewait_sock *twp));
+ struct inet_timewait_sock **));
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index eb16c7beed1e..b6c3737da4e9 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -27,6 +27,7 @@
#include <net/sock.h>
#include <net/request_sock.h>
#include <net/netns/hash.h>
+#include <net/tcp_states.h>
/** struct ip_options - IP Options
*
@@ -77,6 +78,10 @@ struct inet_request_sock {
#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
#define ir_iif req.__req_common.skc_bound_dev_if
+#define ir_cookie req.__req_common.skc_cookie
+#define ireq_net req.__req_common.skc_net
+#define ireq_state req.__req_common.skc_state
+#define ireq_family req.__req_common.skc_family
kmemcheck_bitfield_begin(flags);
u16 snd_wscale : 4,
@@ -88,11 +93,11 @@ struct inet_request_sock {
acked : 1,
no_srccheck: 1;
kmemcheck_bitfield_end(flags);
+ u32 ir_mark;
union {
struct ip_options_rcu *opt;
struct sk_buff *pktopts;
};
- u32 ir_mark;
};
static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -100,13 +105,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
return (struct inet_request_sock *)sk;
}
-static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
+static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
{
- if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) {
+ if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
return skb->mark;
- } else {
- return sk->sk_mark;
- }
+
+ return sk->sk_mark;
}
struct inet_cork {
@@ -239,18 +243,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr,
initval);
}
-static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
-{
- struct request_sock *req = reqsk_alloc(ops);
- struct inet_request_sock *ireq = inet_rsk(req);
-
- if (req != NULL) {
- kmemcheck_annotate_bitfield(ireq, flags);
- ireq->opt = NULL;
- }
-
- return req;
-}
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener);
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 6c566034e26d..360c4802288d 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -31,67 +31,14 @@
struct inet_hashinfo;
-#define INET_TWDR_RECYCLE_SLOTS_LOG 5
-#define INET_TWDR_RECYCLE_SLOTS (1 << INET_TWDR_RECYCLE_SLOTS_LOG)
-
-/*
- * If time > 4sec, it is "slow" path, no recycling is required,
- * so that we select tick to get range about 4 seconds.
- */
-#if HZ <= 16 || HZ > 4096
-# error Unsupported: HZ <= 16 or HZ > 4096
-#elif HZ <= 32
-# define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 64
-# define INET_TWDR_RECYCLE_TICK (6 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 128
-# define INET_TWDR_RECYCLE_TICK (7 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 256
-# define INET_TWDR_RECYCLE_TICK (8 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 512
-# define INET_TWDR_RECYCLE_TICK (9 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 1024
-# define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 2048
-# define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#else
-# define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#endif
-
-static inline u32 inet_tw_time_stamp(void)
-{
- return jiffies;
-}
-
-/* TIME_WAIT reaping mechanism. */
-#define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
-
-#define INET_TWDR_TWKILL_QUOTA 100
-
struct inet_timewait_death_row {
- /* Short-time timewait calendar */
- int twcal_hand;
- unsigned long twcal_jiffie;
- struct timer_list twcal_timer;
- struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS];
-
- spinlock_t death_lock;
- int tw_count;
- int period;
- u32 thread_slots;
- struct work_struct twkill_work;
- struct timer_list tw_timer;
- int slot;
- struct hlist_head cells[INET_TWDR_TWKILL_SLOTS];
- struct inet_hashinfo *hashinfo;
+ atomic_t tw_count;
+
+ struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
int sysctl_tw_recycle;
int sysctl_max_tw_buckets;
};
-void inet_twdr_hangman(unsigned long data);
-void inet_twdr_twkill_work(struct work_struct *work);
-void inet_twdr_twcal_tick(unsigned long data);
-
struct inet_bind_bucket;
/*
@@ -122,6 +69,7 @@ struct inet_timewait_sock {
#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr
#define tw_dport __tw_common.skc_dport
#define tw_num __tw_common.skc_num
+#define tw_cookie __tw_common.skc_cookie
int tw_timeout;
volatile unsigned char tw_substate;
@@ -132,52 +80,18 @@ struct inet_timewait_sock {
__be16 tw_sport;
kmemcheck_bitfield_begin(flags);
/* And these are ours. */
- unsigned int tw_pad0 : 1, /* 1 bit hole */
+ unsigned int tw_kill : 1,
tw_transparent : 1,
tw_flowlabel : 20,
tw_pad : 2, /* 2 bits hole */
tw_tos : 8;
kmemcheck_bitfield_end(flags);
- u32 tw_ttd;
+ struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
- struct hlist_node tw_death_node;
+ struct inet_timewait_death_row *tw_dr;
};
#define tw_tclass tw_tos
-static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
-{
- return !hlist_unhashed(&tw->tw_death_node);
-}
-
-static inline void inet_twsk_dead_node_init(struct inet_timewait_sock *tw)
-{
- tw->tw_death_node.pprev = NULL;
-}
-
-static inline void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
-{
- __hlist_del(&tw->tw_death_node);
- inet_twsk_dead_node_init(tw);
-}
-
-static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
-{
- if (inet_twsk_dead_hashed(tw)) {
- __inet_twsk_del_dead_node(tw);
- return 1;
- }
- return 0;
-}
-
-#define inet_twsk_for_each(tw, node, head) \
- hlist_nulls_for_each_entry(tw, node, head, tw_node)
-
-#define inet_twsk_for_each_inmate(tw, jail) \
- hlist_for_each_entry(tw, jail, tw_death_node)
-
-#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \
- hlist_for_each_entry_safe(tw, safe, jail, tw_death_node)
-
static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
return (struct inet_timewait_sock *)sk;
@@ -192,16 +106,14 @@ int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo);
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ struct inet_timewait_death_row *dr,
const int state);
void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo);
-void inet_twsk_schedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr,
- const int timeo, const int timewait_len);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr);
+void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
+void inet_twsk_deschedule(struct inet_timewait_sock *tw);
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 80479abddf73..d5332ddcea3f 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -19,6 +19,7 @@ struct inetpeer_addr_base {
union {
__be32 a4;
__be32 a6[4];
+ struct in6_addr in6;
};
};
@@ -151,7 +152,7 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
{
struct inetpeer_addr daddr;
- *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
+ daddr.addr.in6 = *v6daddr;
daddr.family = AF_INET6;
return inet_getpeer(base, &daddr, create);
}
diff --git a/include/net/ip.h b/include/net/ip.h
index 6cc1eafb153a..d14af7edd197 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -108,7 +108,8 @@ int ip_local_deliver(struct sk_buff *skb);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *));
int ip_do_nat(struct sk_buff *skb);
void ip_send_check(struct iphdr *ip);
int __ip_local_out(struct sk_buff *skb);
@@ -318,9 +319,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
}
u32 ip_idents_reserve(u32 hash, int segs);
-void __ip_select_ident(struct iphdr *iph, int segs);
+void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
-static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
+static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
+ struct sock *sk, int segs)
{
struct iphdr *iph = ip_hdr(skb);
@@ -337,13 +339,14 @@ static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, in
iph->id = 0;
}
} else {
- __ip_select_ident(iph, segs);
+ __ip_select_ident(net, iph, segs);
}
}
-static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
+static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
+ struct sock *sk)
{
- ip_select_ident_segs(skb, sk, 1);
+ ip_select_ident_segs(net, skb, sk, 1);
}
static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index eda131d179d9..5e192068e6cb 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -170,7 +170,8 @@ static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
return rt->rt6i_flags & RTF_ANYCAST;
}
-int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip6_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *));
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
{
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 76c091b53dae..b8529aa1dae7 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -71,14 +71,16 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
struct net *ip6_tnl_get_link_net(const struct net_device *dev);
+int ip6_tnl_get_iflink(const struct net_device *dev);
-static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
+ struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
int pkt_len, err;
pkt_len = skb->len;
- err = ip6_local_out(skb);
+ err = ip6_local_out_sk(sk, skb);
if (net_xmit_eval(err) == 0) {
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 5bd120e4bc0a..54271ed0ed45 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -136,7 +136,7 @@ struct fib_result {
u32 tclassid;
struct fib_info *fi;
struct fib_table *table;
- struct list_head *fa_head;
+ struct hlist_head *fa_head;
};
struct fib_result_nl {
@@ -185,7 +185,9 @@ struct fib_table {
u32 tb_id;
int tb_default;
int tb_num_default;
- unsigned long tb_data[0];
+ struct rcu_head rcu;
+ unsigned long *tb_data;
+ unsigned long __data[0];
};
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
@@ -195,10 +197,10 @@ int fib_table_delete(struct fib_table *, struct fib_config *);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
int fib_table_flush(struct fib_table *table);
+struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
+void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
-
-
#ifndef CONFIG_IP_MULTIPLE_TABLES
#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
@@ -206,12 +208,16 @@ void fib_free_table(struct fib_table *tb);
static inline struct fib_table *fib_get_table(struct net *net, u32 id)
{
+ struct hlist_node *tb_hlist;
struct hlist_head *ptr;
ptr = id == RT_TABLE_LOCAL ?
&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] :
&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX];
- return hlist_entry(ptr->first, struct fib_table, tb_hlist);
+
+ tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr));
+
+ return hlist_entry(tb_hlist, struct fib_table, tb_hlist);
}
static inline struct fib_table *fib_new_table(struct net *net, u32 id)
@@ -222,14 +228,13 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
struct fib_result *res)
{
+ struct fib_table *tb;
int err = -ENETUNREACH;
rcu_read_lock();
- if (!fib_table_lookup(fib_get_table(net, RT_TABLE_LOCAL), flp, res,
- FIB_LOOKUP_NOREF) ||
- !fib_table_lookup(fib_get_table(net, RT_TABLE_MAIN), flp, res,
- FIB_LOOKUP_NOREF))
+ tb = fib_get_table(net, RT_TABLE_MAIN);
+ if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
err = 0;
rcu_read_unlock();
@@ -249,28 +254,29 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
static inline int fib_lookup(struct net *net, struct flowi4 *flp,
struct fib_result *res)
{
- if (!net->ipv4.fib_has_custom_rules) {
- int err = -ENETUNREACH;
-
- rcu_read_lock();
-
- res->tclassid = 0;
- if ((net->ipv4.fib_local &&
- !fib_table_lookup(net->ipv4.fib_local, flp, res,
- FIB_LOOKUP_NOREF)) ||
- (net->ipv4.fib_main &&
- !fib_table_lookup(net->ipv4.fib_main, flp, res,
- FIB_LOOKUP_NOREF)) ||
- (net->ipv4.fib_default &&
- !fib_table_lookup(net->ipv4.fib_default, flp, res,
- FIB_LOOKUP_NOREF)))
- err = 0;
-
- rcu_read_unlock();
-
- return err;
+ struct fib_table *tb;
+ int err;
+
+ if (net->ipv4.fib_has_custom_rules)
+ return __fib_lookup(net, flp, res);
+
+ rcu_read_lock();
+
+ res->tclassid = 0;
+
+ for (err = 0; !err; err = -ENETUNREACH) {
+ tb = rcu_dereference_rtnl(net->ipv4.fib_main);
+ if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+ break;
+
+ tb = rcu_dereference_rtnl(net->ipv4.fib_default);
+ if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
+ break;
}
- return __fib_lookup(net, flp, res);
+
+ rcu_read_unlock();
+
+ return err;
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
@@ -294,6 +300,8 @@ static inline int fib_num_tclassid_users(struct net *net)
return 0;
}
#endif
+int fib_unmerge(struct net *net);
+void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
@@ -304,7 +312,7 @@ void fib_select_multipath(struct fib_result *res);
/* Exported by fib_trie.c */
void fib_trie_init(void);
-struct fib_table *fib_trie_table(u32 id);
+struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 2c47061a6954..d8214cb88bbc 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -142,6 +142,7 @@ int ip_tunnel_init(struct net_device *dev);
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
struct net *ip_tunnel_get_link_net(const struct net_device *dev);
+int ip_tunnel_get_iflink(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 615b20b58545..4e3731ee4eac 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -47,13 +47,13 @@ static inline struct net *skb_net(const struct sk_buff *skb)
* Start with the most likely hit
* End with BUG
*/
- if (likely(skb->dev && skb->dev->nd_net))
+ if (likely(skb->dev && dev_net(skb->dev)))
return dev_net(skb->dev);
if (skb_dst(skb) && skb_dst(skb)->dev)
return dev_net(skb_dst(skb)->dev);
WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
__func__, __LINE__);
- if (likely(skb->sk && skb->sk->sk_net))
+ if (likely(skb->sk && sock_net(skb->sk)))
return sock_net(skb->sk);
pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
__func__, __LINE__);
@@ -71,11 +71,11 @@ static inline struct net *skb_sknet(const struct sk_buff *skb)
#ifdef CONFIG_NET_NS
#ifdef CONFIG_IP_VS_DEBUG
/* Start with the most likely hit */
- if (likely(skb->sk && skb->sk->sk_net))
+ if (likely(skb->sk && sock_net(skb->sk)))
return sock_net(skb->sk);
WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
__func__, __LINE__);
- if (likely(skb->dev && skb->dev->nd_net))
+ if (likely(skb->dev && dev_net(skb->dev)))
return dev_net(skb->dev);
pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
__func__, __LINE__);
@@ -365,15 +365,15 @@ struct ip_vs_seq {
/* counters per cpu */
struct ip_vs_counters {
- __u32 conns; /* connections scheduled */
- __u32 inpkts; /* incoming packets */
- __u32 outpkts; /* outgoing packets */
+ __u64 conns; /* connections scheduled */
+ __u64 inpkts; /* incoming packets */
+ __u64 outpkts; /* outgoing packets */
__u64 inbytes; /* incoming bytes */
__u64 outbytes; /* outgoing bytes */
};
/* Stats per cpu */
struct ip_vs_cpu_stats {
- struct ip_vs_counters ustats;
+ struct ip_vs_counters cnt;
struct u64_stats_sync syncp;
};
@@ -383,23 +383,40 @@ struct ip_vs_estimator {
u64 last_inbytes;
u64 last_outbytes;
- u32 last_conns;
- u32 last_inpkts;
- u32 last_outpkts;
-
- u32 cps;
- u32 inpps;
- u32 outpps;
- u32 inbps;
- u32 outbps;
+ u64 last_conns;
+ u64 last_inpkts;
+ u64 last_outpkts;
+
+ u64 cps;
+ u64 inpps;
+ u64 outpps;
+ u64 inbps;
+ u64 outbps;
+};
+
+/*
+ * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user
+ */
+struct ip_vs_kstats {
+ u64 conns; /* connections scheduled */
+ u64 inpkts; /* incoming packets */
+ u64 outpkts; /* outgoing packets */
+ u64 inbytes; /* incoming bytes */
+ u64 outbytes; /* outgoing bytes */
+
+ u64 cps; /* current connection rate */
+ u64 inpps; /* current in packet rate */
+ u64 outpps; /* current out packet rate */
+ u64 inbps; /* current in byte rate */
+ u64 outbps; /* current out byte rate */
};
struct ip_vs_stats {
- struct ip_vs_stats_user ustats; /* statistics */
+ struct ip_vs_kstats kstats; /* kernel statistics */
struct ip_vs_estimator est; /* estimator */
struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */
spinlock_t lock; /* spin lock */
- struct ip_vs_stats_user ustats0; /* reset values */
+ struct ip_vs_kstats kstats0; /* reset values */
};
struct dst_entry;
@@ -924,6 +941,7 @@ struct netns_ipvs {
int sysctl_nat_icmp_send;
int sysctl_pmtu_disc;
int sysctl_backup_only;
+ int sysctl_conn_reuse_mode;
/* ip_vs_lblc */
int sysctl_lblc_expiration;
@@ -1042,6 +1060,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
ipvs->sysctl_backup_only;
}
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_conn_reuse_mode;
+}
+
#else
static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1109,6 +1132,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
return 0;
}
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
#endif
/* IPVS core functions
@@ -1388,8 +1416,7 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_zero_estimator(struct ip_vs_stats *stats);
-void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
- struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats);
/* Various IPVS packet transmitters (from ip_vs_xmit.c) */
int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4c9fe224d73b..eec8ad3c9843 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -47,8 +47,6 @@
#define NEXTHDR_MAX 255
-
-
#define IPV6_DEFAULT_HOPLIMIT 64
#define IPV6_DEFAULT_MCASTHOPS 1
@@ -671,8 +669,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
}
-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
-void ipv6_proxy_select_ident(struct sk_buff *skb);
+void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
+ struct rt6_info *rt);
+void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -768,7 +767,7 @@ static inline u8 ip6_tclass(__be32 flowinfo)
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
-int ip6_rcv_finish(struct sk_buff *skb);
+int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb);
/*
* upper-layer output functions
@@ -826,6 +825,7 @@ int ip6_input(struct sk_buff *skb);
int ip6_mc_input(struct sk_buff *skb);
int __ip6_local_out(struct sk_buff *skb);
+int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb);
int ip6_local_out(struct sk_buff *skb);
/*
@@ -940,4 +940,8 @@ int ipv6_sysctl_register(void);
void ipv6_sysctl_unregister(void);
#endif
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
#endif /* _NET_IPV6_H */
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index a830b01baba4..8f81bbbc38fc 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -519,6 +519,17 @@ iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends,
return stream;
}
+static inline char *
+iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, int event_len)
+{
+ char *res = iwe_stream_add_event(info, stream, ends, iwe, event_len);
+
+ if (res == stream)
+ return ERR_PTR(-E2BIG);
+ return res;
+}
+
/*------------------------------------------------------------------*/
/*
* Wrapper to add an short Wireless Event containing a pointer to a
@@ -545,6 +556,17 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
return stream;
}
+static inline char *
+iwe_stream_add_point_check(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, char *extra)
+{
+ char *res = iwe_stream_add_point(info, stream, ends, iwe, extra);
+
+ if (res == stream)
+ return ERR_PTR(-E2BIG);
+ return res;
+}
+
/*------------------------------------------------------------------*/
/*
* Wrapper to add a value to a Wireless Event in a stream of events.
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index d52914b75331..8e3668b44c29 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -84,6 +84,39 @@
*
*/
+/**
+ * DOC: mac80211 software tx queueing
+ *
+ * mac80211 provides an optional intermediate queueing implementation designed
+ * to allow the driver to keep hardware queues short and provide some fairness
+ * between different stations/interfaces.
+ * In this model, the driver pulls data frames from the mac80211 queue instead
+ * of letting mac80211 push them via drv_tx().
+ * Other frames (e.g. control or management) are still pushed using drv_tx().
+ *
+ * Drivers indicate that they use this model by implementing the .wake_tx_queue
+ * driver operation.
+ *
+ * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with a
+ * single per-vif queue for multicast data frames.
+ *
+ * The driver is expected to initialize its private per-queue data for stations
+ * and interfaces in the .add_interface and .sta_add ops.
+ *
+ * The driver can't access the queue directly. To dequeue a frame, it calls
+ * ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it
+ * calls the .wake_tx_queue driver op.
+ *
+ * For AP powersave TIM handling, the driver only needs to indicate if it has
+ * buffered packets in the driver specific data structures by calling
+ * ieee80211_sta_set_buffered(). For frames buffered in the ieee80211_txq
+ * struct, mac80211 sets the appropriate TIM PVB bits and calls
+ * .release_buffered_frames().
+ * In that callback the driver is therefore expected to release its own
+ * buffered frames and afterwards also frames from the ieee80211_txq (obtained
+ * via the usual ieee80211_tx_dequeue).
+ */
+
struct device;
/**
@@ -301,17 +334,86 @@ enum ieee80211_bss_change {
#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
/**
- * enum ieee80211_rssi_event - RSSI threshold event
- * An indicator for when RSSI goes below/above a certain threshold.
- * @RSSI_EVENT_HIGH: AP's rssi crossed the high threshold set by the driver.
- * @RSSI_EVENT_LOW: AP's rssi crossed the low threshold set by the driver.
+ * enum ieee80211_event_type - event to be notified to the low level driver
+ * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver.
+ * @MLME_EVENT: event related to MLME
*/
-enum ieee80211_rssi_event {
+enum ieee80211_event_type {
+ RSSI_EVENT,
+ MLME_EVENT,
+};
+
+/**
+ * enum ieee80211_rssi_event_data - relevant when event type is %RSSI_EVENT
+ * @RSSI_EVENT_HIGH: AP's rssi went below the threshold set by the driver.
+ * @RSSI_EVENT_LOW: AP's rssi went above the threshold set by the driver.
+ */
+enum ieee80211_rssi_event_data {
RSSI_EVENT_HIGH,
RSSI_EVENT_LOW,
};
/**
+ * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT
+ * @data: See &enum ieee80211_rssi_event_data
+ */
+struct ieee80211_rssi_event {
+ enum ieee80211_rssi_event_data data;
+};
+
+/**
+ * enum ieee80211_mlme_event_data - relevant when event type is %MLME_EVENT
+ * @AUTH_EVENT: the MLME operation is authentication
+ * @ASSOC_EVENT: the MLME operation is association
+ * @DEAUTH_RX_EVENT: deauth received..
+ * @DEAUTH_TX_EVENT: deauth sent.
+ */
+enum ieee80211_mlme_event_data {
+ AUTH_EVENT,
+ ASSOC_EVENT,
+ DEAUTH_RX_EVENT,
+ DEAUTH_TX_EVENT,
+};
+
+/**
+ * enum ieee80211_mlme_event_status - relevant when event type is %MLME_EVENT
+ * @MLME_SUCCESS: the MLME operation completed successfully.
+ * @MLME_DENIED: the MLME operation was denied by the peer.
+ * @MLME_TIMEOUT: the MLME operation timed out.
+ */
+enum ieee80211_mlme_event_status {
+ MLME_SUCCESS,
+ MLME_DENIED,
+ MLME_TIMEOUT,
+};
+
+/**
+ * enum ieee80211_mlme_event - data attached to an %MLME_EVENT
+ * @data: See &enum ieee80211_mlme_event_data
+ * @status: See &enum ieee80211_mlme_event_status
+ * @reason: the reason code if applicable
+ */
+struct ieee80211_mlme_event {
+ enum ieee80211_mlme_event_data data;
+ enum ieee80211_mlme_event_status status;
+ u16 reason;
+};
+
+/**
+ * struct ieee80211_event - event to be sent to the driver
+ * @type The event itself. See &enum ieee80211_event_type.
+ * @rssi: relevant if &type is %RSSI_EVENT
+ * @mlme: relevant if &type is %AUTH_EVENT
+ */
+struct ieee80211_event {
+ enum ieee80211_event_type type;
+ union {
+ struct ieee80211_rssi_event rssi;
+ struct ieee80211_mlme_event mlme;
+ } u;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -337,12 +439,15 @@ enum ieee80211_rssi_event {
* HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can
* only come from a beacon, but might not become valid until after
* association when a beacon is received (which is notified with the
- * %BSS_CHANGED_DTIM flag.)
+ * %BSS_CHANGED_DTIM flag.). See also sync_dtim_count important notice.
* @sync_device_ts: the device timestamp corresponding to the sync_tsf,
* the driver/device can use this to calculate synchronisation
- * (see @sync_tsf)
+ * (see @sync_tsf). See also sync_dtim_count important notice.
* @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY
* is requested, see @sync_tsf/@sync_device_ts.
+ * IMPORTANT: These three sync_* parameters would possibly be out of sync
+ * by the time the driver will use them. The synchronized view is currently
+ * guaranteed only in certain callbacks.
* @beacon_int: beacon interval
* @assoc_capability: capabilities taken from assoc resp
* @basic_rates: bitmap of basic rates, each bit stands for an
@@ -1234,6 +1339,7 @@ enum ieee80211_vif_flags {
* monitor interface (if that is requested.)
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *).
+ * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1245,6 +1351,8 @@ struct ieee80211_vif {
u8 cab_queue;
u8 hw_queue[IEEE80211_NUM_ACS];
+ struct ieee80211_txq *txq;
+
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
u32 driver_flags;
@@ -1279,6 +1387,19 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
/**
+ * ieee80211_vif_to_wdev - return a wdev struct from a vif
+ * @vif: the vif to get the wdev for
+ *
+ * This can be used by mac80211 drivers with direct cfg80211 APIs
+ * (like the vendor commands) that needs to get the wdev for a vif.
+ *
+ * Note that this function may return %NULL if the given wdev isn't
+ * associated with a vif that the driver knows about (e.g. monitor
+ * or AP_VLAN interfaces.)
+ */
+struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
+
+/**
* enum ieee80211_key_flags - key flags
*
* These flags are used for communication about keys between the driver
@@ -1472,7 +1593,8 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
- * @wme: indicates whether the STA supports QoS/WME.
+ * @wme: indicates whether the STA supports QoS/WME (if local devices does,
+ * otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *), size is determined in hw information.
* @uapsd_queues: bitmap of queues configured for uapsd. Only valid
@@ -1488,6 +1610,8 @@ struct ieee80211_sta_rates {
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
+ * @mfp: indicates whether the STA uses management frame protection or not.
+ * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -1504,6 +1628,9 @@ struct ieee80211_sta {
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
+ bool mfp;
+
+ struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1533,6 +1660,29 @@ struct ieee80211_tx_control {
};
/**
+ * struct ieee80211_txq - Software intermediate tx queue
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @sta: station table entry, %NULL for per-vif queue
+ * @tid: the TID for this queue (unused for per-vif queue)
+ * @ac: the AC for this queue
+ * @drv_priv: data area for driver use, will always be aligned to
+ * sizeof(void *).
+ *
+ * The driver can obtain packets from this queue by calling
+ * ieee80211_tx_dequeue().
+ */
+struct ieee80211_txq {
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ u8 tid;
+ u8 ac;
+
+ /* must be last */
+ u8 drv_priv[0] __aligned(sizeof(void *));
+};
+
+/**
* enum ieee80211_hw_flags - hardware flags
*
* These flags are used to indicate hardware capabilities to
@@ -1756,6 +1906,8 @@ enum ieee80211_hw_flags {
* within &struct ieee80211_sta.
* @chanctx_data_size: size (in bytes) of the drv_priv data area
* within &struct ieee80211_chanctx_conf.
+ * @txq_data_size: size (in bytes) of the drv_priv data area
+ * within @struct ieee80211_txq.
*
* @max_rates: maximum number of alternate rate retry stages the hw
* can handle.
@@ -1804,6 +1956,9 @@ enum ieee80211_hw_flags {
* @n_cipher_schemes: a size of an array of cipher schemes definitions.
* @cipher_schemes: a pointer to an array of cipher scheme definitions
* supported by HW.
+ *
+ * @txq_ac_max_pending: maximum number of frames per AC pending in all txq
+ * entries for a vif.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -1816,6 +1971,7 @@ struct ieee80211_hw {
int vif_data_size;
int sta_data_size;
int chanctx_data_size;
+ int txq_data_size;
u16 queues;
u16 max_listen_interval;
s8 max_signal;
@@ -1832,6 +1988,7 @@ struct ieee80211_hw {
u8 uapsd_max_sp_len;
u8 n_cipher_schemes;
const struct ieee80211_cipher_scheme *cipher_schemes;
+ int txq_ac_max_pending;
};
/**
@@ -2844,8 +3001,9 @@ enum ieee80211_reconfig_type {
* @set_bitrate_mask: Set a mask of rates to be used for rate control selection
* when transmitting a frame. Currently only legacy rates are handled.
* The callback can sleep.
- * @rssi_callback: Notify driver when the average RSSI goes above/below
- * thresholds that were registered previously. The callback can sleep.
+ * @event_callback: Notify driver about any event in mac80211. See
+ * &enum ieee80211_event_type for the different types.
+ * The callback can sleep.
*
* @release_buffered_frames: Release buffered frames according to the given
* parameters. In the case where the driver buffers some frames for
@@ -2993,6 +3151,8 @@ enum ieee80211_reconfig_type {
* response template is provided, together with the location of the
* switch-timing IE within the template. The skb can only be used within
* the function call.
+ *
+ * @wake_tx_queue: Called when new packets have been added to the queue.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3141,9 +3301,9 @@ struct ieee80211_ops {
bool (*tx_frames_pending)(struct ieee80211_hw *hw);
int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);
- void (*rssi_callback)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event);
+ void (*event_callback)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event);
void (*allow_buffered_frames)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@@ -3224,6 +3384,9 @@ struct ieee80211_ops {
void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_tdls_ch_sw_params *params);
+
+ void (*wake_tx_queue)(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
};
/**
@@ -4343,13 +4506,33 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
* haven't been re-added to the driver yet.
* @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
* interfaces, even if they haven't been re-added to the driver yet.
+ * @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up).
*/
enum ieee80211_interface_iteration_flags {
IEEE80211_IFACE_ITER_NORMAL = 0,
IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
+ IEEE80211_IFACE_ITER_ACTIVE = BIT(1),
};
/**
+ * ieee80211_iterate_interfaces - iterate interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware and calls the callback for them. This includes active as well as
+ * inactive interfaces. This function allows the iterator function to sleep.
+ * Will iterate over a new interface during add_interface().
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data);
+
+/**
* ieee80211_iterate_active_interfaces - iterate active interfaces
*
* This function iterates over the interfaces associated with a given
@@ -4364,11 +4547,16 @@ enum ieee80211_interface_iteration_flags {
* @iterator: the iterator function to call
* @data: first argument of the iterator function
*/
-void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
- u32 iter_flags,
- void (*iterator)(void *data, u8 *mac,
- struct ieee80211_vif *vif),
- void *data);
+static inline void
+ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data)
+{
+ ieee80211_iterate_interfaces(hw,
+ iter_flags | IEEE80211_IFACE_ITER_ACTIVE,
+ iterator, data);
+}
/**
* ieee80211_iterate_active_interfaces_atomic - iterate active interfaces
@@ -5194,30 +5382,13 @@ int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid);
void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
/**
- * ieee80211_ie_split - split an IE buffer according to ordering
- *
- * @ies: the IE buffer
- * @ielen: the length of the IE buffer
- * @ids: an array with element IDs that are allowed before
- * the split
- * @n_ids: the size of the element ID array
- * @offset: offset where to start splitting in the buffer
- *
- * This function splits an IE buffer by updating the @offset
- * variable to point to the location where the buffer should be
- * split.
+ * ieee80211_tx_dequeue - dequeue a packet from a software tx queue
*
- * It assumes that the given IE buffer is well-formed, this
- * has to be guaranteed by the caller!
- *
- * It also assumes that the IEs in the buffer are ordered
- * correctly, if not the result of using this function will not
- * be ordered correctly either, i.e. it does no reordering.
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
*
- * The function returns the offset where the next part of the
- * buffer starts, which may be @ielen if the entire (remainder)
- * of the buffer should be used.
+ * Returns the skb if successful, %NULL if no frame was available.
*/
-size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
- const u8 *ids, int n_ids, size_t offset);
+struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 850647811749..7df28a4c23f9 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -19,6 +19,7 @@
#include <net/af_ieee802154.h>
#include <linux/ieee802154.h>
#include <linux/skbuff.h>
+#include <linux/unaligned/memmove.h>
#include <net/cfg802154.h>
@@ -212,7 +213,7 @@ struct ieee802154_ops {
int (*set_hw_addr_filt)(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed);
- int (*set_txpower)(struct ieee802154_hw *hw, int db);
+ int (*set_txpower)(struct ieee802154_hw *hw, s8 dbm);
int (*set_lbt)(struct ieee802154_hw *hw, bool on);
int (*set_cca_mode)(struct ieee802154_hw *hw,
const struct wpan_phy_cca *cca);
@@ -233,9 +234,7 @@ struct ieee802154_ops {
*/
static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
{
- __le64 tmp = (__force __le64)swab64p(be64_src);
-
- memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+ __put_unaligned_memmove64(swab64p(be64_src), le64_dst);
}
/**
@@ -245,24 +244,112 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
*/
static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
{
- __be64 tmp = (__force __be64)swab64p(le64_src);
-
- memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+ __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
}
-/* Basic interface to register ieee802154 hwice */
+/**
+ * ieee802154_alloc_hw - Allocate a new hardware device
+ *
+ * This must be called once for each hardware device. The returned pointer
+ * must be used to refer to this device when calling other functions.
+ * mac802154 allocates a private data area for the driver pointed to by
+ * @priv in &struct ieee802154_hw, the size of this area is given as
+ * @priv_data_len.
+ *
+ * @priv_data_len: length of private data
+ * @ops: callbacks for this device
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
+ */
struct ieee802154_hw *
ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops);
+
+/**
+ * ieee802154_free_hw - free hardware descriptor
+ *
+ * This function frees everything that was allocated, including the
+ * private data for the driver. You must call ieee802154_unregister_hw()
+ * before calling this function.
+ *
+ * @hw: the hardware to free
+ */
void ieee802154_free_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_register_hw - Register hardware device
+ *
+ * You must call this function before any other functions in
+ * mac802154. Note that before a hardware can be registered, you
+ * need to fill the contained wpan_phy's information.
+ *
+ * @hw: the device to register as returned by ieee802154_alloc_hw()
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
int ieee802154_register_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_unregister_hw - Unregister a hardware device
+ *
+ * This function instructs mac802154 to free allocated resources
+ * and unregister netdevices from the networking subsystem.
+ *
+ * @hw: the hardware to unregister
+ */
void ieee802154_unregister_hw(struct ieee802154_hw *hw);
+/**
+ * ieee802154_rx - receive frame
+ *
+ * Use this function to hand received frames to mac802154. The receive
+ * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee802154
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other.
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac802154 after this call
+ */
void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
+
+/**
+ * ieee802154_rx_irqsafe - receive frame
+ *
+ * Like ieee802154_rx() but can be called in IRQ context
+ * (internally defers to a tasklet.)
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac802154 after this call
+ * @lqi: link quality indicator
+ */
void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
u8 lqi);
-
+/**
+ * ieee802154_wake_queue - wake ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_wake_queue.
+ */
void ieee802154_wake_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_stop_queue - stop ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_stop_queue.
+ */
void ieee802154_stop_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_xmit_complete - frame transmission complete
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @ifs_handling: indicate interframe space handling
+ */
void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
bool ifs_handling);
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 6bbda34d5e59..b3a7751251b4 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -156,24 +156,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _
static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey)
{
- struct neigh_hash_table *nht;
- const u32 *p32 = pkey;
- struct neighbour *n;
- u32 hash_val;
-
- nht = rcu_dereference_bh(nd_tbl.nht);
- hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference_bh(n->next)) {
- u32 *n32 = (u32 *) n->primary_key;
- if (n->dev == dev &&
- ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
- (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0)
- return n;
- }
-
- return NULL;
+ return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
}
static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 76f708486aae..bd33e66f49aa 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -42,6 +42,7 @@ enum {
NEIGH_VAR_MCAST_PROBES,
NEIGH_VAR_UCAST_PROBES,
NEIGH_VAR_APP_PROBES,
+ NEIGH_VAR_MCAST_REPROBES,
NEIGH_VAR_RETRANS_TIME,
NEIGH_VAR_BASE_REACHABLE_TIME,
NEIGH_VAR_DELAY_PROBE_TIME,
@@ -65,9 +66,7 @@ enum {
};
struct neigh_parms {
-#ifdef CONFIG_NET_NS
- struct net *net;
-#endif
+ possible_net_t net;
struct net_device *dev;
struct list_head list;
int (*neigh_setup)(struct neighbour *);
@@ -167,9 +166,7 @@ struct neigh_ops {
struct pneigh_entry {
struct pneigh_entry *next;
-#ifdef CONFIG_NET_NS
- struct net *net;
-#endif
+ possible_net_t net;
struct net_device *dev;
u8 flags;
u8 key[0];
@@ -193,9 +190,11 @@ struct neigh_table {
int family;
int entry_size;
int key_len;
+ __be16 protocol;
__u32 (*hash)(const void *pkey,
const struct net_device *dev,
__u32 *hash_rnd);
+ bool (*key_eq)(const struct neighbour *, const void *pkey);
int (*constructor)(struct neighbour *);
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
@@ -224,6 +223,7 @@ enum {
NEIGH_ND_TABLE = 1,
NEIGH_DN_TABLE = 2,
NEIGH_NR_TABLES,
+ NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
};
static inline int neigh_parms_family(struct neigh_parms *p)
@@ -246,6 +246,57 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
+
+static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
+{
+ return *(const u16 *)n->primary_key == *(const u16 *)pkey;
+}
+
+static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
+{
+ return *(const u32 *)n->primary_key == *(const u32 *)pkey;
+}
+
+static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
+{
+ const u32 *n32 = (const u32 *)n->primary_key;
+ const u32 *p32 = pkey;
+
+ return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+ (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
+}
+
+static inline struct neighbour *___neigh_lookup_noref(
+ struct neigh_table *tbl,
+ bool (*key_eq)(const struct neighbour *n, const void *pkey),
+ __u32 (*hash)(const void *pkey,
+ const struct net_device *dev,
+ __u32 *hash_rnd),
+ const void *pkey,
+ struct net_device *dev)
+{
+ struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
+ struct neighbour *n;
+ u32 hash_val;
+
+ hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+ n != NULL;
+ n = rcu_dereference_bh(n->next)) {
+ if (n->dev == dev && key_eq(n, pkey))
+ return n;
+ }
+
+ return NULL;
+}
+
+static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev)
+{
+ return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
+}
+
void neigh_table_init(int index, struct neigh_table *tbl);
int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
@@ -268,7 +319,6 @@ void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
-int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
@@ -306,6 +356,7 @@ void neigh_for_each(struct neigh_table *tbl,
void (*cb)(struct neighbour *, void *), void *cookie);
void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *));
+int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
void pneigh_for_each(struct neigh_table *tbl,
void (*cb)(struct pneigh_entry *));
@@ -459,4 +510,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
memcpy(dst, n->ha, dev->addr_len);
} while (read_seqretry(&n->ha_lock, seq));
}
+
+
#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 36faf4990c4b..f733656404de 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -26,6 +26,7 @@
#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
+#include <net/netns/mpls.h>
#include <linux/ns_common.h>
struct user_namespace;
@@ -48,13 +49,10 @@ struct net {
atomic_t count; /* To decided when the network
* namespace should be shut down.
*/
-#ifdef NETNS_REFCNT_DEBUG
- atomic_t use_count; /* To track references we
- * destroy on demand
- */
-#endif
spinlock_t rules_mod_lock;
+ atomic64_t cookie_gen;
+
struct list_head list; /* list of network namespaces */
struct list_head cleanup_list; /* namespaces on death row */
struct list_head exit_list; /* Use only net_mutex */
@@ -130,6 +128,9 @@ struct net {
#if IS_ENABLED(CONFIG_IP_VS)
struct netns_ipvs *ipvs;
#endif
+#if IS_ENABLED(CONFIG_MPLS)
+ struct netns_mpls mpls;
+#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
};
@@ -230,48 +231,27 @@ int net_eq(const struct net *net1, const struct net *net2)
#endif
-#ifdef NETNS_REFCNT_DEBUG
-static inline struct net *hold_net(struct net *net)
-{
- if (net)
- atomic_inc(&net->use_count);
- return net;
-}
-
-static inline void release_net(struct net *net)
-{
- if (net)
- atomic_dec(&net->use_count);
-}
-#else
-static inline struct net *hold_net(struct net *net)
-{
- return net;
-}
-
-static inline void release_net(struct net *net)
-{
-}
-#endif
-
+typedef struct {
#ifdef CONFIG_NET_NS
+ struct net *net;
+#endif
+} possible_net_t;
-static inline void write_pnet(struct net **pnet, struct net *net)
+static inline void write_pnet(possible_net_t *pnet, struct net *net)
{
- *pnet = net;
+#ifdef CONFIG_NET_NS
+ pnet->net = net;
+#endif
}
-static inline struct net *read_pnet(struct net * const *pnet)
+static inline struct net *read_pnet(const possible_net_t *pnet)
{
- return *pnet;
-}
-
+#ifdef CONFIG_NET_NS
+ return pnet->net;
#else
-
-#define write_pnet(pnet, net) do { (void)(net);} while (0)
-#define read_pnet(pnet) (&init_net)
-
+ return &init_net;
#endif
+}
#define for_each_net(VAR) \
list_for_each_entry(VAR, &net_namespace_list, list)
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 03e928a55229..77862c3645f0 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -5,18 +5,14 @@
#include <net/ip.h>
#include <net/icmp.h>
-static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
-{
- icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
-}
-
+void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
void nf_send_reset(struct sk_buff *oldskb, int hook);
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook);
struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
- __be16 protocol, int ttl);
+ __u8 protocol, int ttl);
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth);
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 23216d48abf9..0ea4fa37db16 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -3,15 +3,8 @@
#include <linux/icmpv6.h>
-static inline void
-nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
- unsigned int hooknum)
-{
- if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
- skb_in->dev = net->loopback_dev;
-
- icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
-}
+void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
+ unsigned int hooknum);
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
@@ -20,7 +13,7 @@ const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
unsigned int *otcplen, int hook);
struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
- __be16 protocol, int hoplimit);
+ __u8 protocol, int hoplimit);
void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
const struct tcphdr *oth, unsigned int otcplen);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 74f271a172dd..095433b8a8b0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -95,9 +95,8 @@ struct nf_conn {
/* Timer function; drops refcnt when it goes off. */
struct timer_list timeout;
-#ifdef CONFIG_NET_NS
- struct net *ct_net;
-#endif
+ possible_net_t ct_net;
+
/* all members below initialized via memset */
u8 __nfct_init_offset[0];
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index 340c013795a4..a3127325f624 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -44,40 +44,32 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
unsigned int hooknum);
unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
@@ -85,40 +77,32 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
unsigned int hooknum, unsigned int hdrlen);
unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 84a53d780306..d81d584157e1 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -12,12 +12,8 @@ struct nf_queue_entry {
unsigned int id;
struct nf_hook_ops *elem;
- u_int8_t pf;
+ struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */
- unsigned int hook;
- struct net_device *indev;
- struct net_device *outdev;
- int (*okfn)(struct sk_buff *);
/* extra space to store route keys */
};
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index decb9a095ae7..e6bcf55dcf20 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1,6 +1,7 @@
#ifndef _NET_NF_TABLES_H
#define _NET_NF_TABLES_H
+#include <linux/module.h>
#include <linux/list.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
@@ -26,40 +27,53 @@ struct nft_pktinfo {
static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
pkt->skb = skb;
- pkt->in = pkt->xt.in = in;
- pkt->out = pkt->xt.out = out;
+ pkt->in = pkt->xt.in = state->in;
+ pkt->out = pkt->xt.out = state->out;
pkt->ops = ops;
pkt->xt.hooknum = ops->hooknum;
pkt->xt.family = ops->pf;
}
+/**
+ * struct nft_verdict - nf_tables verdict
+ *
+ * @code: nf_tables/netfilter verdict code
+ * @chain: destination chain for NFT_JUMP/NFT_GOTO
+ */
+struct nft_verdict {
+ u32 code;
+ struct nft_chain *chain;
+};
+
struct nft_data {
union {
- u32 data[4];
- struct {
- u32 verdict;
- struct nft_chain *chain;
- };
+ u32 data[4];
+ struct nft_verdict verdict;
};
} __attribute__((aligned(__alignof__(u64))));
-static inline int nft_data_cmp(const struct nft_data *d1,
- const struct nft_data *d2,
- unsigned int len)
-{
- return memcmp(d1->data, d2->data, len);
-}
+/**
+ * struct nft_regs - nf_tables register set
+ *
+ * @data: data registers
+ * @verdict: verdict register
+ *
+ * The first four data registers alias to the verdict register.
+ */
+struct nft_regs {
+ union {
+ u32 data[20];
+ struct nft_verdict verdict;
+ };
+};
-static inline void nft_data_copy(struct nft_data *dst,
- const struct nft_data *src)
+static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
+ unsigned int len)
{
- BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64));
- *(u64 *)&dst->data[0] = *(u64 *)&src->data[0];
- *(u64 *)&dst->data[2] = *(u64 *)&src->data[2];
+ memcpy(dst, src, len);
}
static inline void nft_data_debug(const struct nft_data *data)
@@ -97,7 +111,8 @@ struct nft_data_desc {
unsigned int len;
};
-int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+int nft_data_init(const struct nft_ctx *ctx,
+ struct nft_data *data, unsigned int size,
struct nft_data_desc *desc, const struct nlattr *nla);
void nft_data_uninit(const struct nft_data *data, enum nft_data_types type);
int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
@@ -113,12 +128,14 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
}
-int nft_validate_input_register(enum nft_registers reg);
-int nft_validate_output_register(enum nft_registers reg);
-int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
- const struct nft_data *data,
- enum nft_data_types type);
+unsigned int nft_parse_register(const struct nlattr *attr);
+int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
+int nft_validate_register_load(enum nft_registers reg, unsigned int len);
+int nft_validate_register_store(const struct nft_ctx *ctx,
+ enum nft_registers reg,
+ const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
/**
* struct nft_userdata - user defined data associated with an object
@@ -138,19 +155,15 @@ struct nft_userdata {
/**
* struct nft_set_elem - generic representation of set elements
*
- * @cookie: implementation specific element cookie
* @key: element key
- * @data: element data (maps only)
- * @flags: element flags (end of interval)
- *
- * The cookie can be used to store a handle to the element for subsequent
- * removal.
+ * @priv: element private data and extensions
*/
struct nft_set_elem {
- void *cookie;
- struct nft_data key;
- struct nft_data data;
- u32 flags;
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } key;
+ void *priv;
};
struct nft_set;
@@ -202,11 +215,16 @@ struct nft_set_estimate {
enum nft_set_class class;
};
+struct nft_set_ext;
+struct nft_expr;
+
/**
* struct nft_set_ops - nf_tables set operations
*
* @lookup: look up an element within the set
* @insert: insert new element into set
+ * @activate: activate new element in the next generation
+ * @deactivate: deactivate element in the next generation
* @remove: remove element from set
* @walk: iterate over all set elemeennts
* @privsize: function to return size of set private data
@@ -214,16 +232,28 @@ struct nft_set_estimate {
* @destroy: destroy private data of set instance
* @list: nf_tables_set_ops list node
* @owner: module reference
+ * @elemsize: element private size
* @features: features supported by the implementation
*/
struct nft_set_ops {
bool (*lookup)(const struct nft_set *set,
- const struct nft_data *key,
- struct nft_data *data);
- int (*get)(const struct nft_set *set,
- struct nft_set_elem *elem);
+ const u32 *key,
+ const struct nft_set_ext **ext);
+ bool (*update)(struct nft_set *set,
+ const u32 *key,
+ void *(*new)(struct nft_set *,
+ const struct nft_expr *,
+ struct nft_regs *),
+ const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_set_ext **ext);
+
int (*insert)(const struct nft_set *set,
const struct nft_set_elem *elem);
+ void (*activate)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ void * (*deactivate)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
void (*remove)(const struct nft_set *set,
const struct nft_set_elem *elem);
void (*walk)(const struct nft_ctx *ctx,
@@ -241,6 +271,7 @@ struct nft_set_ops {
struct list_head list;
struct module *owner;
+ unsigned int elemsize;
u32 features;
};
@@ -257,8 +288,12 @@ void nft_unregister_set(struct nft_set_ops *ops);
* @dtype: data type (verdict or numeric type defined by userspace)
* @size: maximum set size
* @nelems: number of elements
+ * @ndeact: number of deactivated elements queued for removal
+ * @timeout: default timeout value in msecs
+ * @gc_int: garbage collection interval in msecs
* @policy: set parameterization (see enum nft_set_policies)
* @ops: set ops
+ * @pnet: network namespace
* @flags: set flags
* @klen: key length
* @dlen: data length
@@ -271,10 +306,14 @@ struct nft_set {
u32 ktype;
u32 dtype;
u32 size;
- u32 nelems;
+ atomic_t nelems;
+ u32 ndeact;
+ u64 timeout;
+ u32 gc_int;
u16 policy;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
+ possible_net_t pnet;
u16 flags;
u8 klen;
u8 dlen;
@@ -287,16 +326,27 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline struct nft_set *nft_set_container_of(const void *priv)
+{
+ return (void *)priv - offsetof(struct nft_set, data);
+}
+
struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
const struct nlattr *nla);
struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
const struct nlattr *nla);
+static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
+{
+ return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ;
+}
+
/**
* struct nft_set_binding - nf_tables set binding
*
* @list: set bindings list node
* @chain: chain containing the rule bound to the set
+ * @flags: set action flags
*
* A set binding contains all information necessary for validation
* of new elements added to a bound set.
@@ -304,6 +354,7 @@ struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
struct nft_set_binding {
struct list_head list;
const struct nft_chain *chain;
+ u32 flags;
};
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
@@ -311,6 +362,215 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
+/**
+ * enum nft_set_extensions - set extension type IDs
+ *
+ * @NFT_SET_EXT_KEY: element key
+ * @NFT_SET_EXT_DATA: mapping data
+ * @NFT_SET_EXT_FLAGS: element flags
+ * @NFT_SET_EXT_TIMEOUT: element timeout
+ * @NFT_SET_EXT_EXPIRATION: element expiration time
+ * @NFT_SET_EXT_USERDATA: user data associated with the element
+ * @NFT_SET_EXT_EXPR: expression assiociated with the element
+ * @NFT_SET_EXT_NUM: number of extension types
+ */
+enum nft_set_extensions {
+ NFT_SET_EXT_KEY,
+ NFT_SET_EXT_DATA,
+ NFT_SET_EXT_FLAGS,
+ NFT_SET_EXT_TIMEOUT,
+ NFT_SET_EXT_EXPIRATION,
+ NFT_SET_EXT_USERDATA,
+ NFT_SET_EXT_EXPR,
+ NFT_SET_EXT_NUM
+};
+
+/**
+ * struct nft_set_ext_type - set extension type
+ *
+ * @len: fixed part length of the extension
+ * @align: alignment requirements of the extension
+ */
+struct nft_set_ext_type {
+ u8 len;
+ u8 align;
+};
+
+extern const struct nft_set_ext_type nft_set_ext_types[];
+
+/**
+ * struct nft_set_ext_tmpl - set extension template
+ *
+ * @len: length of extension area
+ * @offset: offsets of individual extension types
+ */
+struct nft_set_ext_tmpl {
+ u16 len;
+ u8 offset[NFT_SET_EXT_NUM];
+};
+
+/**
+ * struct nft_set_ext - set extensions
+ *
+ * @genmask: generation mask
+ * @offset: offsets of individual extension types
+ * @data: beginning of extension data
+ */
+struct nft_set_ext {
+ u8 genmask;
+ u8 offset[NFT_SET_EXT_NUM];
+ char data[0];
+};
+
+static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
+{
+ memset(tmpl, 0, sizeof(*tmpl));
+ tmpl->len = sizeof(struct nft_set_ext);
+}
+
+static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
+ unsigned int len)
+{
+ tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align);
+ BUG_ON(tmpl->len > U8_MAX);
+ tmpl->offset[id] = tmpl->len;
+ tmpl->len += nft_set_ext_types[id].len + len;
+}
+
+static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
+{
+ nft_set_ext_add_length(tmpl, id, 0);
+}
+
+static inline void nft_set_ext_init(struct nft_set_ext *ext,
+ const struct nft_set_ext_tmpl *tmpl)
+{
+ memcpy(ext->offset, tmpl->offset, sizeof(ext->offset));
+}
+
+static inline bool __nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+ return !!ext->offset[id];
+}
+
+static inline bool nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+ return ext && __nft_set_ext_exists(ext, id);
+}
+
+static inline void *nft_set_ext(const struct nft_set_ext *ext, u8 id)
+{
+ return (void *)ext + ext->offset[id];
+}
+
+static inline struct nft_data *nft_set_ext_key(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_KEY);
+}
+
+static inline struct nft_data *nft_set_ext_data(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_DATA);
+}
+
+static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
+}
+
+static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
+}
+
+static inline unsigned long *nft_set_ext_expiration(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
+}
+
+static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_USERDATA);
+}
+
+static inline struct nft_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_EXPR);
+}
+
+static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+{
+ return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
+ time_is_before_eq_jiffies(*nft_set_ext_expiration(ext));
+}
+
+static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
+ void *elem)
+{
+ return elem + set->ops->elemsize;
+}
+
+void *nft_set_elem_init(const struct nft_set *set,
+ const struct nft_set_ext_tmpl *tmpl,
+ const u32 *key, const u32 *data,
+ u64 timeout, gfp_t gfp);
+void nft_set_elem_destroy(const struct nft_set *set, void *elem);
+
+/**
+ * struct nft_set_gc_batch_head - nf_tables set garbage collection batch
+ *
+ * @rcu: rcu head
+ * @set: set the elements belong to
+ * @cnt: count of elements
+ */
+struct nft_set_gc_batch_head {
+ struct rcu_head rcu;
+ const struct nft_set *set;
+ unsigned int cnt;
+};
+
+#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \
+ sizeof(struct nft_set_gc_batch_head)) / \
+ sizeof(void *))
+
+/**
+ * struct nft_set_gc_batch - nf_tables set garbage collection batch
+ *
+ * @head: GC batch head
+ * @elems: garbage collection elements
+ */
+struct nft_set_gc_batch {
+ struct nft_set_gc_batch_head head;
+ void *elems[NFT_SET_GC_BATCH_SIZE];
+};
+
+struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
+ gfp_t gfp);
+void nft_set_gc_batch_release(struct rcu_head *rcu);
+
+static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb)
+{
+ if (gcb != NULL)
+ call_rcu(&gcb->head.rcu, nft_set_gc_batch_release);
+}
+
+static inline struct nft_set_gc_batch *
+nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb,
+ gfp_t gfp)
+{
+ if (gcb != NULL) {
+ if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems))
+ return gcb;
+ nft_set_gc_batch_complete(gcb);
+ }
+ return nft_set_gc_batch_alloc(set, gfp);
+}
+
+static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
+ void *elem)
+{
+ gcb->elems[gcb->head.cnt++] = elem;
+}
/**
* struct nft_expr_type - nf_tables expression type
@@ -323,6 +583,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
* @policy: netlink attribute policy
* @maxattr: highest netlink attribute number
* @family: address family for AF-specific types
+ * @flags: expression type flags
*/
struct nft_expr_type {
const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
@@ -334,8 +595,11 @@ struct nft_expr_type {
const struct nla_policy *policy;
unsigned int maxattr;
u8 family;
+ u8 flags;
};
+#define NFT_EXPR_STATEFUL 0x1
+
/**
* struct nft_expr_ops - nf_tables expression operations
*
@@ -351,7 +615,7 @@ struct nft_expr_type {
struct nft_expr;
struct nft_expr_ops {
void (*eval)(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt);
unsigned int size;
@@ -389,6 +653,18 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
return (void *)expr->data;
}
+struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
+ const struct nlattr *nla);
+void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
+int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
+ const struct nft_expr *expr);
+
+static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
+{
+ __module_get(src->ops->type->owner);
+ memcpy(dst, src, src->ops->size);
+}
+
/**
* struct nft_rule - nf_tables rule
*
@@ -409,74 +685,6 @@ struct nft_rule {
__attribute__((aligned(__alignof__(struct nft_expr))));
};
-/**
- * struct nft_trans - nf_tables object update in transaction
- *
- * @list: used internally
- * @msg_type: message type
- * @ctx: transaction context
- * @data: internal information related to the transaction
- */
-struct nft_trans {
- struct list_head list;
- int msg_type;
- struct nft_ctx ctx;
- char data[0];
-};
-
-struct nft_trans_rule {
- struct nft_rule *rule;
-};
-
-#define nft_trans_rule(trans) \
- (((struct nft_trans_rule *)trans->data)->rule)
-
-struct nft_trans_set {
- struct nft_set *set;
- u32 set_id;
-};
-
-#define nft_trans_set(trans) \
- (((struct nft_trans_set *)trans->data)->set)
-#define nft_trans_set_id(trans) \
- (((struct nft_trans_set *)trans->data)->set_id)
-
-struct nft_trans_chain {
- bool update;
- char name[NFT_CHAIN_MAXNAMELEN];
- struct nft_stats __percpu *stats;
- u8 policy;
-};
-
-#define nft_trans_chain_update(trans) \
- (((struct nft_trans_chain *)trans->data)->update)
-#define nft_trans_chain_name(trans) \
- (((struct nft_trans_chain *)trans->data)->name)
-#define nft_trans_chain_stats(trans) \
- (((struct nft_trans_chain *)trans->data)->stats)
-#define nft_trans_chain_policy(trans) \
- (((struct nft_trans_chain *)trans->data)->policy)
-
-struct nft_trans_table {
- bool update;
- bool enable;
-};
-
-#define nft_trans_table_update(trans) \
- (((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_enable(trans) \
- (((struct nft_trans_table *)trans->data)->enable)
-
-struct nft_trans_elem {
- struct nft_set *set;
- struct nft_set_elem elem;
-};
-
-#define nft_trans_elem_set(trans) \
- (((struct nft_trans_elem *)trans->data)->set)
-#define nft_trans_elem(trans) \
- (((struct nft_trans_elem *)trans->data)->elem)
-
static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
{
return (struct nft_expr *)&rule->data[0];
@@ -517,7 +725,6 @@ enum nft_chain_flags {
*
* @rules: list of rules in the chain
* @list: used internally
- * @net: net namespace that this chain belongs to
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
@@ -528,7 +735,6 @@ enum nft_chain_flags {
struct nft_chain {
struct list_head rules;
struct list_head list;
- struct net *net;
struct nft_table *table;
u64 handle;
u32 use;
@@ -544,6 +750,25 @@ enum nft_chain_type {
NFT_CHAIN_T_MAX
};
+/**
+ * struct nf_chain_type - nf_tables chain type info
+ *
+ * @name: name of the type
+ * @type: numeric identifier
+ * @family: address family
+ * @owner: module owner
+ * @hook_mask: mask of valid hooks
+ * @hooks: hookfn overrides
+ */
+struct nf_chain_type {
+ const char *name;
+ enum nft_chain_type type;
+ int family;
+ struct module *owner;
+ unsigned int hook_mask;
+ nf_hookfn *hooks[NF_MAX_HOOKS];
+};
+
int nft_chain_validate_dependency(const struct nft_chain *chain,
enum nft_chain_type type);
int nft_chain_validate_hooks(const struct nft_chain *chain,
@@ -561,6 +786,7 @@ struct nft_stats {
* struct nft_base_chain - nf_tables base chain
*
* @ops: netfilter hook ops
+ * @pnet: net namespace that this chain belongs to
* @type: chain type
* @policy: default policy
* @stats: per-cpu chain stats
@@ -568,6 +794,7 @@ struct nft_stats {
*/
struct nft_base_chain {
struct nf_hook_ops ops[NFT_HOOK_OPS_MAX];
+ possible_net_t pnet;
const struct nf_chain_type *type;
u8 policy;
struct nft_stats __percpu *stats;
@@ -600,7 +827,7 @@ struct nft_table {
u64 hgenerator;
u32 use;
u16 flags;
- char name[];
+ char name[NFT_TABLE_MAXNAMELEN];
};
/**
@@ -630,25 +857,6 @@ struct nft_af_info {
int nft_register_afinfo(struct net *, struct nft_af_info *);
void nft_unregister_afinfo(struct nft_af_info *);
-/**
- * struct nf_chain_type - nf_tables chain type info
- *
- * @name: name of the type
- * @type: numeric identifier
- * @family: address family
- * @owner: module owner
- * @hook_mask: mask of valid hooks
- * @hooks: hookfn overrides
- */
-struct nf_chain_type {
- const char *name;
- enum nft_chain_type type;
- int family;
- struct module *owner;
- unsigned int hook_mask;
- nf_hookfn *hooks[NF_MAX_HOOKS];
-};
-
int nft_register_chain_type(const struct nf_chain_type *);
void nft_unregister_chain_type(const struct nf_chain_type *);
@@ -673,4 +881,153 @@ void nft_unregister_expr(struct nft_expr_type *);
#define MODULE_ALIAS_NFT_SET() \
MODULE_ALIAS("nft-set")
+/*
+ * The gencursor defines two generations, the currently active and the
+ * next one. Objects contain a bitmask of 2 bits specifying the generations
+ * they're active in. A set bit means they're inactive in the generation
+ * represented by that bit.
+ *
+ * New objects start out as inactive in the current and active in the
+ * next generation. When committing the ruleset the bitmask is cleared,
+ * meaning they're active in all generations. When removing an object,
+ * it is set inactive in the next generation. After committing the ruleset,
+ * the objects are removed.
+ */
+static inline unsigned int nft_gencursor_next(const struct net *net)
+{
+ return net->nft.gencursor + 1 == 1 ? 1 : 0;
+}
+
+static inline u8 nft_genmask_next(const struct net *net)
+{
+ return 1 << nft_gencursor_next(net);
+}
+
+static inline u8 nft_genmask_cur(const struct net *net)
+{
+ /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */
+ return 1 << ACCESS_ONCE(net->nft.gencursor);
+}
+
+#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1))
+
+/*
+ * Set element transaction helpers
+ */
+
+static inline bool nft_set_elem_active(const struct nft_set_ext *ext,
+ u8 genmask)
+{
+ return !(ext->genmask & genmask);
+}
+
+static inline void nft_set_elem_change_active(const struct nft_set *set,
+ struct nft_set_ext *ext)
+{
+ ext->genmask ^= nft_genmask_next(read_pnet(&set->pnet));
+}
+
+/*
+ * We use a free bit in the genmask field to indicate the element
+ * is busy, meaning it is currently being processed either by
+ * the netlink API or GC.
+ *
+ * Even though the genmask is only a single byte wide, this works
+ * because the extension structure if fully constant once initialized,
+ * so there are no non-atomic write accesses unless it is already
+ * marked busy.
+ */
+#define NFT_SET_ELEM_BUSY_MASK (1 << 2)
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_BUSY_BIT 2
+#elif defined(__BIG_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
+#else
+#error
+#endif
+
+static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext)
+{
+ unsigned long *word = (unsigned long *)ext;
+
+ BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+ return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word);
+}
+
+static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
+{
+ unsigned long *word = (unsigned long *)ext;
+
+ clear_bit(NFT_SET_ELEM_BUSY_BIT, word);
+}
+
+/**
+ * struct nft_trans - nf_tables object update in transaction
+ *
+ * @list: used internally
+ * @msg_type: message type
+ * @ctx: transaction context
+ * @data: internal information related to the transaction
+ */
+struct nft_trans {
+ struct list_head list;
+ int msg_type;
+ struct nft_ctx ctx;
+ char data[0];
+};
+
+struct nft_trans_rule {
+ struct nft_rule *rule;
+};
+
+#define nft_trans_rule(trans) \
+ (((struct nft_trans_rule *)trans->data)->rule)
+
+struct nft_trans_set {
+ struct nft_set *set;
+ u32 set_id;
+};
+
+#define nft_trans_set(trans) \
+ (((struct nft_trans_set *)trans->data)->set)
+#define nft_trans_set_id(trans) \
+ (((struct nft_trans_set *)trans->data)->set_id)
+
+struct nft_trans_chain {
+ bool update;
+ char name[NFT_CHAIN_MAXNAMELEN];
+ struct nft_stats __percpu *stats;
+ u8 policy;
+};
+
+#define nft_trans_chain_update(trans) \
+ (((struct nft_trans_chain *)trans->data)->update)
+#define nft_trans_chain_name(trans) \
+ (((struct nft_trans_chain *)trans->data)->name)
+#define nft_trans_chain_stats(trans) \
+ (((struct nft_trans_chain *)trans->data)->stats)
+#define nft_trans_chain_policy(trans) \
+ (((struct nft_trans_chain *)trans->data)->policy)
+
+struct nft_trans_table {
+ bool update;
+ bool enable;
+};
+
+#define nft_trans_table_update(trans) \
+ (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_table_enable(trans) \
+ (((struct nft_trans_table *)trans->data)->enable)
+
+struct nft_trans_elem {
+ struct nft_set *set;
+ struct nft_set_elem elem;
+};
+
+#define nft_trans_elem_set(trans) \
+ (((struct nft_trans_elem *)trans->data)->set)
+#define nft_trans_elem(trans) \
+ (((struct nft_trans_elem *)trans->data)->elem)
+
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index a75fc8e27cd6..c6f400cfaac8 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -31,6 +31,9 @@ void nft_cmp_module_exit(void);
int nft_lookup_module_init(void);
void nft_lookup_module_exit(void);
+int nft_dynset_module_init(void);
+void nft_dynset_module_exit(void);
+
int nft_bitwise_module_init(void);
void nft_bitwise_module_exit(void);
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index cba143fbd2e4..2df7f96902ee 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -8,12 +8,11 @@ static inline void
nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
struct iphdr *ip;
- nft_set_pktinfo(pkt, ops, skb, in, out);
+ nft_set_pktinfo(pkt, ops, skb, state);
ip = ip_hdr(pkt->skb);
pkt->tprot = ip->protocol;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 74d976137658..97db2e3a5e65 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -8,13 +8,12 @@ static inline int
nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
int protohdr, thoff = 0;
unsigned short frag_off;
- nft_set_pktinfo(pkt, ops, skb, in, out);
+ nft_set_pktinfo(pkt, ops, skb, state);
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
/* If malformed, drop it */
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index 0ee47c3e2e31..711887a09e91 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -26,11 +26,11 @@ int nft_meta_set_dump(struct sk_buff *skb,
const struct nft_expr *expr);
void nft_meta_get_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt);
void nft_meta_set_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt);
#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e010ee8da41d..2a5dbcc90d1c 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
+#include <linux/in6.h>
/* ========================================================================
* Netlink Messages and Attributes Interface (As Seen On TV)
@@ -105,6 +106,8 @@
* nla_put_string(skb, type, str) add string attribute to skb
* nla_put_flag(skb, type) add flag attribute to skb
* nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
+ * nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb
+ * nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
*
* Nested Attributes Construction:
* nla_nest_start(skb, type) start a nested attribute
@@ -957,6 +960,32 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
}
/**
+ * nla_put_in_addr - Add an IPv4 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv4 address
+ */
+static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
+ __be32 addr)
+{
+ return nla_put_be32(skb, attrtype, addr);
+}
+
+/**
+ * nla_put_in6_addr - Add an IPv6 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv6 address
+ */
+static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
+ const struct in6_addr *addr)
+{
+ return nla_put(skb, attrtype, sizeof(*addr), addr);
+}
+
+/**
* nla_get_u32 - return payload of u32 attribute
* @nla: u32 netlink attribute
*/
@@ -1099,6 +1128,27 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla)
}
/**
+ * nla_get_in_addr - return payload of IPv4 address attribute
+ * @nla: IPv4 address netlink attribute
+ */
+static inline __be32 nla_get_in_addr(const struct nlattr *nla)
+{
+ return *(__be32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_in6_addr - return payload of IPv6 address attribute
+ * @nla: IPv6 address netlink attribute
+ */
+static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
+{
+ struct in6_addr tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+ return tmp;
+}
+
+/**
* nla_nest_start - Start a new level of nested attributes
* @skb: socket buffer to add attributes to
* @attrtype: attribute type of container
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 0931618c0f7f..70e158551704 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -38,11 +38,9 @@ static inline void *net_generic(const struct net *net, int id)
rcu_read_lock();
ng = rcu_dereference(net->gen);
- BUG_ON(id == 0 || id > ng->len);
ptr = ng->ptr[id - 1];
rcu_read_unlock();
- BUG_ON(!ptr);
return ptr;
}
#endif
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index c06ac58ca107..69a6715d9f3f 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -5,7 +5,7 @@
struct net;
-static inline unsigned int net_hash_mix(struct net *net)
+static inline u32 net_hash_mix(const struct net *net)
{
#ifdef CONFIG_NET_NS
/*
@@ -13,7 +13,7 @@ static inline unsigned int net_hash_mix(struct net *net)
* always zeroed
*/
- return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT);
+ return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
#else
return 0;
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index dbe225478adb..614a49be68a9 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -7,6 +7,7 @@
#include <linux/uidgid.h>
#include <net/inet_frag.h>
+#include <linux/rcupdate.h>
struct tcpm_hash_bucket;
struct ctl_table_header;
@@ -38,21 +39,21 @@ struct netns_ipv4 {
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
bool fib_has_custom_rules;
- struct fib_table *fib_local;
- struct fib_table *fib_main;
- struct fib_table *fib_default;
+ struct fib_table __rcu *fib_local;
+ struct fib_table __rcu *fib_main;
+ struct fib_table __rcu *fib_default;
#endif
#ifdef CONFIG_IP_ROUTE_CLASSID
int fib_num_tclassid_users;
#endif
struct hlist_head *fib_table_hash;
+ bool fib_offload_disabled;
struct sock *fibnl;
struct sock * __percpu *icmp_sk;
+ struct sock *mc_autojoin_sk;
struct inet_peer_base *peers;
- struct tcpm_hash_bucket *tcp_metrics_hash;
- unsigned int tcp_metrics_hash_log;
struct sock * __percpu *tcp_sk;
struct netns_frags frags;
#ifdef CONFIG_NETFILTER
@@ -84,6 +85,8 @@ struct netns_ipv4 {
int sysctl_tcp_fwmark_accept;
int sysctl_tcp_mtu_probing;
int sysctl_tcp_base_mss;
+ int sysctl_tcp_probe_threshold;
+ u32 sysctl_tcp_probe_interval;
struct ping_group_range ping_group_range;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 69ae41f2098c..d2527bf81142 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -32,6 +32,8 @@ struct netns_sysctl_ipv6 {
int icmpv6_time;
int anycast_src_echo_reply;
int fwmark_reflect;
+ int idgen_retries;
+ int idgen_delay;
};
struct netns_ipv6 {
@@ -67,6 +69,7 @@ struct netns_ipv6 {
struct sock *ndisc_sk;
struct sock *tcp_sk;
struct sock *igmp_sk;
+ struct sock *mc_autojoin_sk;
#ifdef CONFIG_IPV6_MROUTE
#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
struct mr6_table *mrt6;
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
new file mode 100644
index 000000000000..d29203651c01
--- /dev/null
+++ b/include/net/netns/mpls.h
@@ -0,0 +1,17 @@
+/*
+ * mpls in net namespaces
+ */
+
+#ifndef __NETNS_MPLS_H__
+#define __NETNS_MPLS_H__
+
+struct mpls_route;
+struct ctl_table_header;
+
+struct netns_mpls {
+ size_t platform_labels;
+ struct mpls_route __rcu * __rcu *platform_label;
+ struct ctl_table_header *ctl;
+};
+
+#endif /* __NETNS_MPLS_H__ */
diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h
index c24060ee411e..4d6597ad6067 100644
--- a/include/net/netns/x_tables.h
+++ b/include/net/netns/x_tables.h
@@ -9,6 +9,7 @@ struct ebt_table;
struct netns_xt {
struct list_head tables[NFPROTO_NUMPROTO];
bool notrack_deprecated_warning;
+ bool clusterip_deprecated_warning;
#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \
defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE)
struct ebt_table *broute_table;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index ab672b537dd4..020a814bc8ed 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -83,6 +83,10 @@ struct nfc_hci_pipe {
};
#define NFC_HCI_MAX_CUSTOM_GATES 50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
#define NFC_HCI_MAX_PIPES 127
struct nfc_hci_init_data {
u8 gate_count;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index ff87f8611fa3..d4dcc7199fd7 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -71,6 +71,7 @@ struct nci_ops {
int (*close)(struct nci_dev *ndev);
int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
int (*setup)(struct nci_dev *ndev);
+ int (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
__u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
int (*discover_se)(struct nci_dev *ndev);
int (*disable_se)(struct nci_dev *ndev, u32 se_idx);
@@ -137,6 +138,10 @@ struct nci_conn_info {
#define NCI_HCI_INVALID_HOST 0x80
#define NCI_HCI_MAX_CUSTOM_GATES 50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
#define NCI_HCI_MAX_PIPES 127
struct nci_hci_gate {
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 73190e65d5c1..7ac029c07546 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -157,7 +157,7 @@ struct nfc_evt_transaction {
u32 aid_len;
u8 aid[NFC_MAX_AID_LENGTH];
u8 params_len;
- u8 params[NFC_MAX_PARAMS_LENGTH];
+ u8 params[0];
} __packed;
struct nfc_genl_data {
diff --git a/include/net/ping.h b/include/net/ping.h
index cc16d413f681..ac80cb45e630 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -75,12 +75,11 @@ void ping_err(struct sk_buff *skb, int offset, u32 info);
int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
struct sk_buff *);
-int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len);
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
-int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len);
+int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
bool ping_rcv(struct sk_buff *skb);
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 7f830ff67f08..9f4265ce8892 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -39,8 +39,7 @@ struct request_sock_ops {
void (*send_reset)(struct sock *sk,
struct sk_buff *skb);
void (*destructor)(struct request_sock *req);
- void (*syn_ack_timeout)(struct sock *sk,
- struct request_sock *req);
+ void (*syn_ack_timeout)(const struct request_sock *req);
};
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
@@ -49,7 +48,11 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
*/
struct request_sock {
struct sock_common __req_common;
+#define rsk_refcnt __req_common.skc_refcnt
+#define rsk_hash __req_common.skc_hash
+
struct request_sock *dl_next;
+ struct sock *rsk_listener;
u16 mss;
u8 num_retrans; /* number of retransmits */
u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
@@ -58,32 +61,56 @@ struct request_sock {
u32 window_clamp; /* window clamp at creation time */
u32 rcv_wnd; /* rcv_wnd offered first time */
u32 ts_recent;
- unsigned long expires;
+ struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
u32 secid;
u32 peer_secid;
};
-static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
+static inline struct request_sock *
+reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
{
struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
- if (req != NULL)
+ if (req) {
req->rsk_ops = ops;
-
+ sock_hold(sk_listener);
+ req->rsk_listener = sk_listener;
+
+ /* Following is temporary. It is coupled with debugging
+ * helpers in reqsk_put() & reqsk_free()
+ */
+ atomic_set(&req->rsk_refcnt, 0);
+ }
return req;
}
-static inline void __reqsk_free(struct request_sock *req)
+static inline struct request_sock *inet_reqsk(struct sock *sk)
{
- kmem_cache_free(req->rsk_ops->slab, req);
+ return (struct request_sock *)sk;
+}
+
+static inline struct sock *req_to_sk(struct request_sock *req)
+{
+ return (struct sock *)req;
}
static inline void reqsk_free(struct request_sock *req)
{
+ /* temporary debugging */
+ WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
+
req->rsk_ops->destructor(req);
- __reqsk_free(req);
+ if (req->rsk_listener)
+ sock_put(req->rsk_listener);
+ kmem_cache_free(req->rsk_ops->slab, req);
+}
+
+static inline void reqsk_put(struct request_sock *req)
+{
+ if (atomic_dec_and_test(&req->rsk_refcnt))
+ reqsk_free(req);
}
extern int sysctl_max_syn_backlog;
@@ -93,12 +120,16 @@ extern int sysctl_max_syn_backlog;
* @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
*/
struct listen_sock {
- u8 max_qlen_log;
+ int qlen_inc; /* protected by listener lock */
+ int young_inc;/* protected by listener lock */
+
+ /* following fields can be updated by timer */
+ atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
+ atomic_t young_dec;
+
+ u8 max_qlen_log ____cacheline_aligned_in_smp;
u8 synflood_warned;
/* 2 bytes hole, try to use */
- int qlen;
- int qlen_young;
- int clock_hand;
u32 hash_rnd;
u32 nr_table_entries;
struct request_sock *syn_table[0];
@@ -142,18 +173,11 @@ struct fastopen_queue {
* %syn_wait_lock is necessary only to avoid proc interface having to grab the main
* lock sock while browsing the listening hash (otherwise it's deadlock prone).
*
- * This lock is acquired in read mode only from listening_get_next() seq_file
- * op and it's acquired in write mode _only_ from code that is actively
- * changing rskq_accept_head. All readers that are holding the master sock lock
- * don't need to grab this lock in read mode too as rskq_accept_head. writes
- * are always protected from the main sock lock.
*/
struct request_sock_queue {
struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
- rwlock_t syn_wait_lock;
u8 rskq_defer_accept;
- /* 3 bytes hole, try to pack */
struct listen_sock *listen_opt;
struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
* enabled on this listener. Check
@@ -161,6 +185,9 @@ struct request_sock_queue {
* to determine if TFO is enabled
* right at this moment.
*/
+
+ /* temporary alignment, our goal is to get rid of this lock */
+ spinlock_t syn_wait_lock ____cacheline_aligned_in_smp;
};
int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -185,15 +212,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue)
return queue->rskq_accept_head == NULL;
}
-static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
- struct request_sock *req,
- struct request_sock **prev_req)
-{
- write_lock(&queue->syn_wait_lock);
- *prev_req = req->dl_next;
- write_unlock(&queue->syn_wait_lock);
-}
-
static inline void reqsk_queue_add(struct request_sock_queue *queue,
struct request_sock *req,
struct sock *parent,
@@ -224,57 +242,53 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
return req;
}
-static inline int reqsk_queue_removed(struct request_sock_queue *queue,
- struct request_sock *req)
+static inline void reqsk_queue_removed(struct request_sock_queue *queue,
+ const struct request_sock *req)
{
struct listen_sock *lopt = queue->listen_opt;
if (req->num_timeout == 0)
- --lopt->qlen_young;
-
- return --lopt->qlen;
+ atomic_inc(&lopt->young_dec);
+ atomic_inc(&lopt->qlen_dec);
}
-static inline int reqsk_queue_added(struct request_sock_queue *queue)
+static inline void reqsk_queue_added(struct request_sock_queue *queue)
{
struct listen_sock *lopt = queue->listen_opt;
- const int prev_qlen = lopt->qlen;
- lopt->qlen_young++;
- lopt->qlen++;
- return prev_qlen;
+ lopt->young_inc++;
+ lopt->qlen_inc++;
}
-static inline int reqsk_queue_len(const struct request_sock_queue *queue)
+static inline int listen_sock_qlen(const struct listen_sock *lopt)
{
- return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
+ return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
}
-static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
+static inline int listen_sock_young(const struct listen_sock *lopt)
{
- return queue->listen_opt->qlen_young;
+ return lopt->young_inc - atomic_read(&lopt->young_dec);
}
-static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{
- return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
+ const struct listen_sock *lopt = queue->listen_opt;
+
+ return lopt ? listen_sock_qlen(lopt) : 0;
}
-static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
- u32 hash, struct request_sock *req,
- unsigned long timeout)
+static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
- struct listen_sock *lopt = queue->listen_opt;
-
- req->expires = jiffies + timeout;
- req->num_retrans = 0;
- req->num_timeout = 0;
- req->sk = NULL;
- req->dl_next = lopt->syn_table[hash];
+ return listen_sock_young(queue->listen_opt);
+}
- write_lock(&queue->syn_wait_lock);
- lopt->syn_table[hash] = req;
- write_unlock(&queue->syn_wait_lock);
+static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+{
+ return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
}
+void reqsk_queue_hash_req(struct request_sock_queue *queue,
+ u32 hash, struct request_sock *req,
+ unsigned long timeout);
+
#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 6c6d5393fc34..343d922d15c2 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -137,7 +137,7 @@ void rtnl_af_register(struct rtnl_af_ops *ops);
void rtnl_af_unregister(struct rtnl_af_ops *ops);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
-struct net_device *rtnl_create_link(struct net *net, char *ifname,
+struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
struct nlattr *tb[]);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c605d305c577..6d778efcfdfd 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -213,7 +213,7 @@ struct tcf_proto_ops {
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- void (*destroy)(struct tcf_proto*);
+ bool (*destroy)(struct tcf_proto*, bool);
unsigned long (*get)(struct tcf_proto*, u32 handle);
int (*change)(struct net *net, struct sk_buff *,
@@ -399,7 +399,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops, u32 parentid);
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
-void tcf_destroy(struct tcf_proto *tp);
+bool tcf_destroy(struct tcf_proto *tp, bool force);
void tcf_destroy_chain(struct tcf_proto __rcu **fl);
/* Reset all TX qdiscs greater then index of a device. */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 856f01cb51dd..c56a438c3a1e 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -166,6 +166,9 @@ void sctp_remaddr_proc_exit(struct net *net);
*/
extern struct kmem_cache *sctp_chunk_cachep __read_mostly;
extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
+extern long sysctl_sctp_mem[3];
+extern int sysctl_sctp_rmem[3];
+extern int sysctl_sctp_wmem[3];
/*
* Section: Macros, externs, and inlines
diff --git a/include/net/sock.h b/include/net/sock.h
index e4079c28e6b8..3a4898ec8c67 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -57,7 +57,6 @@
#include <linux/page_counter.h>
#include <linux/memcontrol.h>
#include <linux/static_key.h>
-#include <linux/aio.h>
#include <linux/sched.h>
#include <linux/filter.h>
@@ -67,6 +66,7 @@
#include <linux/atomic.h>
#include <net/dst.h>
#include <net/checksum.h>
+#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
struct cgroup;
@@ -190,15 +190,15 @@ struct sock_common {
struct hlist_nulls_node skc_portaddr_node;
};
struct proto *skc_prot;
-#ifdef CONFIG_NET_NS
- struct net *skc_net;
-#endif
+ possible_net_t skc_net;
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr skc_v6_daddr;
struct in6_addr skc_v6_rcv_saddr;
#endif
+ atomic64_t skc_cookie;
+
/*
* fields between dontcopy_begin/dontcopy_end
* are not copied in sock_copy()
@@ -329,6 +329,7 @@ struct sock {
#define sk_net __sk_common.skc_net
#define sk_v6_daddr __sk_common.skc_v6_daddr
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+#define sk_cookie __sk_common.skc_cookie
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
@@ -403,8 +404,8 @@ struct sock {
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
- unsigned short sk_ack_backlog;
- unsigned short sk_max_ack_backlog;
+ u32 sk_ack_backlog;
+ u32 sk_max_ack_backlog;
__u32 sk_priority;
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
__u32 sk_cgrp_prioidx;
@@ -958,10 +959,9 @@ struct proto {
int (*compat_ioctl)(struct sock *sk,
unsigned int cmd, unsigned long arg);
#endif
- int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len);
- int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg,
+ int (*sendmsg)(struct sock *sk, struct msghdr *msg,
+ size_t len);
+ int (*recvmsg)(struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags,
int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
@@ -1562,9 +1562,8 @@ int sock_no_listen(struct socket *, int);
int sock_no_shutdown(struct socket *, int);
int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
-int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
-int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
- int);
+int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
int sock_no_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
@@ -1576,8 +1575,8 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
*/
int sock_common_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
-int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags);
+int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
int sock_common_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen);
int compat_sock_common_getsockopt(struct socket *sock, int level,
@@ -1626,7 +1625,7 @@ static inline void sock_put(struct sock *sk)
sk_free(sk);
}
/* Generic version of sock_put(), dealing with all sockets
- * (TCP_TIMEWAIT, ESTABLISHED...)
+ * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
*/
void sock_gen_put(struct sock *sk);
@@ -2080,6 +2079,29 @@ static inline int sock_intr_errno(long timeo)
return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
}
+struct sock_skb_cb {
+ u32 dropcount;
+};
+
+/* Store sock_skb_cb at the end of skb->cb[] so protocol families
+ * using skb->cb[] would keep using it directly and utilize its
+ * alignement guarantee.
+ */
+#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
+ sizeof(struct sock_skb_cb)))
+
+#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
+ SOCK_SKB_CB_OFFSET))
+
+#define sock_skb_cb_check_size(size) \
+ BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
+
+static inline void
+sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
+{
+ SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2182,7 +2204,7 @@ static inline void sk_change_net(struct sock *sk, struct net *net)
if (!net_eq(current_net, net)) {
put_net(current_net);
- sock_net_set(sk, hold_net(net));
+ sock_net_set(sk, net);
}
}
@@ -2198,6 +2220,14 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
return NULL;
}
+/* This helper checks if a socket is a full socket,
+ * ie _not_ a timewait or request socket.
+ */
+static inline bool sk_fullsock(const struct sock *sk)
+{
+ return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
+}
+
void sock_enable_timestamp(struct sock *sk, int flag);
int sock_get_timestamp(struct sock *, struct timeval __user *);
int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index cfcdac2e5d25..d2e69ee3019a 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -1,6 +1,7 @@
/*
* include/net/switchdev.h - Switch device API
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,6 +14,35 @@
#include <linux/netdevice.h>
#include <linux/notifier.h>
+struct fib_info;
+
+/**
+ * struct switchdev_ops - switchdev operations
+ *
+ * @swdev_parent_id_get: Called to get an ID of the switch chip this port
+ * is part of. If driver implements this, it indicates that it
+ * represents a port of a switch chip.
+ *
+ * @swdev_port_stp_update: Called to notify switch device port of bridge
+ * port STP state change.
+ *
+ * @swdev_fib_ipv4_add: Called to add/modify IPv4 route to switch device.
+ *
+ * @swdev_fib_ipv4_del: Called to delete IPv4 route from switch device.
+ */
+struct swdev_ops {
+ int (*swdev_parent_id_get)(struct net_device *dev,
+ struct netdev_phys_item_id *psid);
+ int (*swdev_port_stp_update)(struct net_device *dev, u8 state);
+ int (*swdev_fib_ipv4_add)(struct net_device *dev, __be32 dst,
+ int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 nlflags,
+ u32 tb_id);
+ int (*swdev_fib_ipv4_del)(struct net_device *dev, __be32 dst,
+ int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id);
+};
+
enum netdev_switch_notifier_type {
NETDEV_SWITCH_FDB_ADD = 1,
NETDEV_SWITCH_FDB_DEL,
@@ -51,6 +81,12 @@ int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
+int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 nlflags, u32 tb_id);
+int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id);
+void netdev_switch_fib_ipv4_abort(struct fib_info *fi);
+
#else
static inline int netdev_switch_parent_id_get(struct net_device *dev,
@@ -109,6 +145,25 @@ static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *
return 0;
}
+static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type,
+ u32 nlflags, u32 tb_id)
+{
+ return 0;
+}
+
+static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id)
+{
+ return 0;
+}
+
+static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+{
+}
+
#endif
#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
index 86a070ffc930..a152e9858b2c 100644
--- a/include/net/tc_act/tc_bpf.h
+++ b/include/net/tc_act/tc_bpf.h
@@ -16,8 +16,12 @@
struct tcf_bpf {
struct tcf_common common;
struct bpf_prog *filter;
+ union {
+ u32 bpf_fd;
+ u16 bpf_num_ops;
+ };
struct sock_filter *bpf_ops;
- u16 bpf_num_ops;
+ const char *bpf_name;
};
#define to_bpf(a) \
container_of(a->priv, struct tcf_bpf, common)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8d6b983d5099..6d204f3f9df8 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -65,7 +65,13 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_MIN_MSS 88U
/* The least MTU to use for probing */
-#define TCP_BASE_MSS 512
+#define TCP_BASE_MSS 1024
+
+/* probing interval, default to 10 minutes as per RFC4821 */
+#define TCP_PROBE_INTERVAL 600
+
+/* Specify interval when tcp mtu probing will stop */
+#define TCP_PROBE_THRESHOLD 8
/* After receiving this amount of duplicate ACKs fast retransmit starts. */
#define TCP_FASTRETRANS_THRESH 3
@@ -173,6 +179,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
+#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
#define TCPOPT_EXP 254 /* Experimental */
/* Magic number to be after the option value for sharing TCP
* experimental options. See draft-ietf-tcpm-experimental-options-00.txt
@@ -188,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_SACK_PERM 2
#define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
+#define TCPOLEN_FASTOPEN_BASE 2
#define TCPOLEN_EXP_FASTOPEN_BASE 4
/* But this is what stacks really send out. */
@@ -349,8 +357,7 @@ void tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t size);
+int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
void tcp_release_cb(struct sock *sk);
@@ -401,8 +408,7 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sk_buff *skb,
const struct tcphdr *th);
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req, struct request_sock **prev,
- bool fastopen);
+ struct request_sock *req, bool fastopen);
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
@@ -429,9 +435,9 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
-void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
-int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
+void tcp_syn_ack_timeout(const struct request_sock *req);
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ int flags, int *addr_len);
void tcp_parse_options(const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
@@ -443,6 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk);
+void tcp_req_err(struct sock *sk, u32 seq);
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_create_openreq_child(struct sock *sk,
struct request_sock *req,
@@ -524,8 +531,6 @@ int tcp_write_wakeup(struct sock *);
void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
-bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
- const char *proto);
void tcp_push_one(struct sock *, unsigned int mss_now);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
@@ -571,7 +576,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
}
/* tcp.c */
-void tcp_get_info(const struct sock *, struct tcp_info *);
+void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
@@ -799,6 +804,8 @@ enum tcp_ca_ack_event_flags {
/* Requires ECN/ECT set on all packets */
#define TCP_CONG_NEEDS_ECN 0x2
+union tcp_cc_info;
+
struct tcp_congestion_ops {
struct list_head list;
u32 key;
@@ -824,7 +831,8 @@ struct tcp_congestion_ops {
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
/* get info for inet_diag (optional) */
- void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
+ size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info);
char name[TCP_CA_NAME_MAX];
struct module *owner;
@@ -1132,31 +1140,6 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk->sk_rcvbuf);
}
-static inline void tcp_openreq_init(struct request_sock *req,
- struct tcp_options_received *rx_opt,
- struct sk_buff *skb, struct sock *sk)
-{
- struct inet_request_sock *ireq = inet_rsk(req);
-
- req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
- req->cookie_ts = 0;
- tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
- tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
- tcp_rsk(req)->snt_synack = tcp_time_stamp;
- tcp_rsk(req)->last_oow_ack_time = 0;
- req->mss = rx_opt->mss_clamp;
- req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
- ireq->tstamp_ok = rx_opt->tstamp_ok;
- ireq->sack_ok = rx_opt->sack_ok;
- ireq->snd_wscale = rx_opt->snd_wscale;
- ireq->wscale_ok = rx_opt->wscale_ok;
- ireq->acked = 0;
- ireq->ecn_ok = 0;
- ireq->ir_rmt_port = tcp_hdr(skb)->source;
- ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
- ireq->ir_mark = inet_request_mark(sk, skb);
-}
-
extern void tcp_openreq_init_rwin(struct request_sock *req,
struct sock *sk, struct dst_entry *dst);
@@ -1236,36 +1219,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
return true;
}
-/* Return true if we're currently rate-limiting out-of-window ACKs and
- * thus shouldn't send a dupack right now. We rate-limit dupacks in
- * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
- * attacks that send repeated SYNs or ACKs for the same connection. To
- * do this, we do not send a duplicate SYNACK or ACK if the remote
- * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
- */
-static inline bool tcp_oow_rate_limited(struct net *net,
- const struct sk_buff *skb,
- int mib_idx, u32 *last_oow_ack_time)
-{
- /* Data packets without SYNs are not likely part of an ACK loop. */
- if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
- !tcp_hdr(skb)->syn)
- goto not_rate_limited;
-
- if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
- if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
- NET_INC_STATS_BH(net, mib_idx);
- return true; /* rate-limited: don't send yet! */
- }
- }
-
- *last_oow_ack_time = tcp_time_stamp;
-
-not_rate_limited:
- return false; /* not rate-limited: go ahead, send dupack now! */
-}
+bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
+ int mib_idx, u32 *last_oow_ack_time);
static inline void tcp_mib_init(struct net *net)
{
@@ -1344,15 +1299,14 @@ struct tcp_md5sig_pool {
};
/* - functions */
-int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- const struct sock *sk, const struct request_sock *req,
- const struct sk_buff *skb);
+int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+ const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
int family);
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
- struct sock *addr_sk);
+ const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
@@ -1388,7 +1342,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie, int *syn_loss,
unsigned long *last_syn_loss);
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
- struct tcp_fastopen_cookie *cookie, bool syn_lost);
+ struct tcp_fastopen_cookie *cookie, bool syn_lost,
+ u16 try_exp);
struct tcp_fastopen_request {
/* Fast Open cookie. Size 0 means a cookie request */
struct tcp_fastopen_cookie cookie;
@@ -1663,28 +1618,26 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
struct tcp_sock_af_ops {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
- struct sock *addr_sk);
- int (*calc_md5_hash) (char *location,
- struct tcp_md5sig_key *md5,
- const struct sock *sk,
- const struct request_sock *req,
- const struct sk_buff *skb);
- int (*md5_parse) (struct sock *sk,
- char __user *optval,
- int optlen);
+ const struct sock *addr_sk);
+ int (*calc_md5_hash)(char *location,
+ const struct tcp_md5sig_key *md5,
+ const struct sock *sk,
+ const struct sk_buff *skb);
+ int (*md5_parse)(struct sock *sk,
+ char __user *optval,
+ int optlen);
#endif
};
struct tcp_request_sock_ops {
u16 mss_clamp;
#ifdef CONFIG_TCP_MD5SIG
- struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
- struct request_sock *req);
- int (*calc_md5_hash) (char *location,
- struct tcp_md5sig_key *md5,
- const struct sock *sk,
- const struct request_sock *req,
- const struct sk_buff *skb);
+ struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk,
+ const struct sock *addr_sk);
+ int (*calc_md5_hash) (char *location,
+ const struct tcp_md5sig_key *md5,
+ const struct sock *sk,
+ const struct sk_buff *skb);
#endif
void (*init_req)(struct request_sock *req, struct sock *sk,
struct sk_buff *skb);
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
index b0b645988bd8..50e78a74d0df 100644
--- a/include/net/tcp_states.h
+++ b/include/net/tcp_states.h
@@ -25,6 +25,7 @@ enum {
TCP_LAST_ACK,
TCP_LISTEN,
TCP_CLOSING, /* Now a valid state */
+ TCP_NEW_SYN_RECV,
TCP_MAX_STATES /* Leave at the end! */
};
@@ -44,7 +45,8 @@ enum {
TCPF_CLOSE_WAIT = (1 << 8),
TCPF_LAST_ACK = (1 << 9),
TCPF_LISTEN = (1 << 10),
- TCPF_CLOSING = (1 << 11)
+ TCPF_CLOSING = (1 << 11),
+ TCPF_NEW_SYN_RECV = (1 << 12),
};
#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 07f9b70962f6..6d4ed18e1427 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -194,6 +194,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
int (*)(const struct sock *, const struct sock *),
unsigned int hash2_nulladdr);
+u32 udp_flow_hashrnd(void);
+
static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
int min, int max, bool use_eth)
{
@@ -205,12 +207,19 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
}
hash = skb_get_hash(skb);
- if (unlikely(!hash) && use_eth) {
- /* Can't find a normal hash, caller has indicated an Ethernet
- * packet so use that to compute a hash.
- */
- hash = jhash(skb->data, 2 * ETH_ALEN,
- (__force u32) skb->protocol);
+ if (unlikely(!hash)) {
+ if (use_eth) {
+ /* Can't find a normal hash, caller has indicated an
+ * Ethernet packet so use that to compute a hash.
+ */
+ hash = jhash(skb->data, 2 * ETH_ALEN,
+ (__force u32) skb->protocol);
+ } else {
+ /* Can't derive any sort of hash for the packet, set
+ * to some consistent random value.
+ */
+ hash = udp_flow_hashrnd();
+ }
}
/* Since this is being sent on the wire obfuscate hash a bit
@@ -229,8 +238,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
void udp_err(struct sk_buff *, u32);
-int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len);
+int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 1a20d33d56bc..c491c1221606 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -77,13 +77,14 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
struct udp_tunnel_sock_cfg *sock_cfg);
/* Transmit the skb using UDP encapsulation. */
-int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
__be16 df, __be16 src_port, __be16 dst_port,
bool xnet, bool nocheck);
#if IS_ENABLED(CONFIG_IPV6)
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be16 src_port,
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index c73e7abbbaa5..0082b5d33d7d 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -131,7 +131,7 @@ struct vxlan_sock {
#define VXLAN_F_GBP 0x800
#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
-/* Flags that are used in the receive patch. These flags must match in
+/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
*/
#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
@@ -145,7 +145,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
void vxlan_sock_release(struct vxlan_sock *vs);
-int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
bool xnet, u32 vxflags);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index dc4865e90fe4..36ac102c97c7 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -126,9 +126,7 @@ struct xfrm_state_walk {
/* Full description of state of transformer. */
struct xfrm_state {
-#ifdef CONFIG_NET_NS
- struct net *xs_net;
-#endif
+ possible_net_t xs_net;
union {
struct hlist_node gclist;
struct hlist_node bydst;
@@ -334,7 +332,7 @@ struct xfrm_state_afinfo {
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
int (*output)(struct sock *sk, struct sk_buff *skb);
- int (*output_finish)(struct sk_buff *skb);
+ int (*output_finish)(struct sock *sk, struct sk_buff *skb);
int (*extract_input)(struct xfrm_state *x,
struct sk_buff *skb);
int (*extract_output)(struct xfrm_state *x,
@@ -522,9 +520,7 @@ struct xfrm_policy_queue {
};
struct xfrm_policy {
-#ifdef CONFIG_NET_NS
- struct net *xp_net;
-#endif
+ possible_net_t xp_net;
struct hlist_node bydst;
struct hlist_node byidx;
@@ -1029,7 +1025,7 @@ xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
case AF_INET:
return addr->a4 == 0;
case AF_INET6:
- return ipv6_addr_any((struct in6_addr *)&addr->a6);
+ return ipv6_addr_any(&addr->in6);
}
return 0;
}
@@ -1242,8 +1238,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl,
memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
break;
case AF_INET6:
- *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
- *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
+ saddr->in6 = fl->u.ip6.saddr;
+ daddr->in6 = fl->u.ip6.daddr;
break;
}
}
@@ -1507,7 +1503,7 @@ int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
int xfrm_output_resume(struct sk_buff *skb, int err);
-int xfrm_output(struct sk_buff *skb);
+int xfrm_output(struct sock *sk, struct sk_buff *skb);
int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
void xfrm_local_error(struct sk_buff *skb, int mtu);
int xfrm4_extract_header(struct sk_buff *skb);
@@ -1528,7 +1524,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_output(struct sock *sk, struct sk_buff *skb);
-int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
@@ -1553,7 +1549,7 @@ __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_output(struct sock *sk, struct sk_buff *skb);
-int xfrm6_output_finish(struct sk_buff *skb);
+int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
u8 **prevhdr);
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ce55906b54a0..ac54c27a2bfd 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
}
/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
-static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
+static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
{
if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
struct sockaddr_in *out_in = (struct sockaddr_in *)out;
@@ -173,7 +173,6 @@ static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
out_in->sin6_family = AF_INET6;
memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16);
}
- return 0;
}
static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 0e3ff30647d5..39ed2d2fbd51 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -105,7 +105,8 @@ enum ib_cm_data_size {
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
IB_CM_SIDR_REP_INFO_LENGTH = 72,
- IB_CM_COMPARE_SIZE = 64
+ /* compare done u32 at a time */
+ IB_CM_COMPARE_SIZE = (64 / sizeof(u32))
};
struct ib_cm_id;
@@ -337,8 +338,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
struct ib_cm_compare_data {
- u8 data[IB_CM_COMPARE_SIZE];
- u8 mask[IB_CM_COMPARE_SIZE];
+ u32 data[IB_CM_COMPARE_SIZE];
+ u32 mask[IB_CM_COMPARE_SIZE];
};
/**
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index 928b2775e992..fda31673a562 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -148,6 +148,16 @@ int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *);
int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *);
/**
+ * iwpm_remote_info_cb - Process remote connecting peer address info, which
+ * the port mapper has received from the connecting peer
+ *
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Stores the IPv4/IPv6 address info in a hash table
+ */
+int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *);
+
+/**
* iwpm_mapping_error_cb - Process port mapper notification for error
*
* @skb:
@@ -175,6 +185,21 @@ int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
/**
+ * iwpm_get_remote_info - Get the remote connecting peer address info
+ *
+ * @mapped_loc_addr: Mapped local address of the listening peer
+ * @mapped_rem_addr: Mapped remote address of the connecting peer
+ * @remote_addr: To store the remote address of the connecting peer
+ * @nl_client: The index of the netlink client
+ *
+ * The remote address info is retrieved and provided to the client in
+ * the remote_addr. After that it is removed from the hash table
+ */
+int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
+ struct sockaddr_storage *mapped_rem_addr,
+ struct sockaddr_storage *remote_addr, u8 nl_client);
+
+/**
* iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
* info in a hash table
* @local_addr: Local ip/tcp address
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index f2902ef7ab75..4dce116bfd80 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -47,7 +47,8 @@ struct rxrpc_header {
#define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */
#define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */
#define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */
-#define RXRPC_N_PACKET_TYPES 9 /* number of packet types (incl type 0) */
+#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */
+#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */
uint8_t flags; /* packet flags */
#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 183eaab7c380..96e3f56519e7 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -36,5 +36,6 @@
for sequential scan */
#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
+#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
#endif
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 007a0bc01b74..784bc2c0929f 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -135,6 +135,7 @@ enum fc_vport_state {
#define FC_PORTSPEED_40GBIT 0x100
#define FC_PORTSPEED_50GBIT 0x200
#define FC_PORTSPEED_100GBIT 0x400
+#define FC_PORTSPEED_25GBIT 0x800
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index d315a08d6c6d..0e9d75b49bed 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -608,7 +608,9 @@ struct ac97_quirk {
int type; /* quirk type above */
};
-int snd_ac97_tune_hardware(struct snd_ac97 *ac97, struct ac97_quirk *quirk, const char *override);
+int snd_ac97_tune_hardware(struct snd_ac97 *ac97,
+ const struct ac97_quirk *quirk,
+ const char *override);
int snd_ac97_set_rate(struct snd_ac97 *ac97, int reg, unsigned int rate);
/*
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index f48089d364c5..fa1d05512c09 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -70,7 +70,7 @@ struct snd_compr_runtime {
* @device: device pointer
* @direction: stream direction, playback/recording
* @metadata_set: metadata set flag, true when set
- * @next_track: has userspace signall next track transistion, true when set
+ * @next_track: has userspace signal next track transition, true when set
* @private_data: pointer to DSP private data
*/
struct snd_compr_stream {
@@ -95,7 +95,7 @@ struct snd_compr_stream {
* and the stream properties
* @get_params: retrieve the codec parameters, mandatory
* @set_metadata: Set the metadata values for a stream
- * @get_metadata: retreives the requested metadata values from stream
+ * @get_metadata: retrieves the requested metadata values from stream
* @trigger: Trigger operations like start, pause, resume, drain, stop.
* This callback is mandatory
* @pointer: Retrieve current h/w pointer information. Mandatory
diff --git a/include/sound/control.h b/include/sound/control.h
index 75f3054023f7..95aad6d3fd1a 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -227,7 +227,7 @@ snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave)
* Add a virtual slave control to the given master.
* Unlike snd_ctl_add_slave(), the element added via this function
* is supposed to have volatile values, and get callback is called
- * at each time quried from the master.
+ * at each time queried from the master.
*
* When the control peeks the hardware values directly and the value
* can be changed by other means than the put callback of the element,
diff --git a/include/sound/core.h b/include/sound/core.h
index da5748289968..b12931f513f4 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -278,7 +278,8 @@ int snd_device_new(struct snd_card *card, enum snd_device_type type,
void *device_data, struct snd_device_ops *ops);
int snd_device_register(struct snd_card *card, void *device_data);
int snd_device_register_all(struct snd_card *card);
-int snd_device_disconnect_all(struct snd_card *card);
+void snd_device_disconnect(struct snd_card *card, void *device_data);
+void snd_device_disconnect_all(struct snd_card *card);
void snd_device_free(struct snd_card *card, void *device_data);
void snd_device_free_all(struct snd_card *card);
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h
index 26f406e0f673..3a8fca9409a7 100644
--- a/include/sound/designware_i2s.h
+++ b/include/sound/designware_i2s.h
@@ -1,5 +1,5 @@
/*
- * Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+ * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 0de95ccb92cf..5bd134651f5e 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -41,7 +41,8 @@
#define EMUPAGESIZE 4096
#define MAXREQVOICES 8
-#define MAXPAGES 8192
+#define MAXPAGES0 4096 /* 32 bit mode */
+#define MAXPAGES1 8192 /* 31 bit mode */
#define RESERVED 0
#define NUM_MIDI 16
#define NUM_G 64 /* use all channels */
@@ -50,8 +51,7 @@
/* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
#define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
-#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */
- /* See ALSA bug #1276 - rlrevell */
+#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
#define TMEMSIZE 256*1024
#define TMEMSIZEREG 4
@@ -466,8 +466,11 @@
#define MAPB 0x0d /* Cache map B */
-#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
-#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
+#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
+#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
+
+#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
+#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
/* 0x0e, 0x0f: Not used */
@@ -1704,6 +1707,7 @@ struct snd_emu10k1 {
unsigned short model; /* subsystem id */
unsigned int card_type; /* EMU10K1_CARD_* */
unsigned int ecard_ctrl; /* ecard control bits */
+ unsigned int address_mode; /* address mode */
unsigned long dma_mask; /* PCI DMA mask */
unsigned int delay_pcm_irq; /* in samples */
int max_cache_pages; /* max memory size / PAGE_SIZE */
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
new file mode 100644
index 000000000000..53a18b3635e2
--- /dev/null
+++ b/include/sound/hda_regmap.h
@@ -0,0 +1,217 @@
+/*
+ * HD-audio regmap helpers
+ */
+
+#ifndef __SOUND_HDA_REGMAP_H
+#define __SOUND_HDA_REGMAP_H
+
+#include <linux/regmap.h>
+#include <sound/core.h>
+#include <sound/hdaudio.h>
+
+int snd_hdac_regmap_init(struct hdac_device *codec);
+void snd_hdac_regmap_exit(struct hdac_device *codec);
+int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
+ unsigned int verb);
+int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int *val);
+int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int val);
+int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int mask, unsigned int val);
+
+/**
+ * snd_hdac_regmap_encode_verb - encode the verb to a pseudo register
+ * @nid: widget NID
+ * @verb: codec verb
+ *
+ * Returns an encoded pseudo register.
+ */
+#define snd_hdac_regmap_encode_verb(nid, verb) \
+ (((verb) << 8) | 0x80000 | ((unsigned int)(nid) << 20))
+
+/**
+ * snd_hdac_regmap_encode_amp - encode the AMP verb to a pseudo register
+ * @nid: widget NID
+ * @ch: channel (left = 0, right = 1)
+ * @dir: direction (#HDA_INPUT, #HDA_OUTPUT)
+ * @idx: input index value
+ *
+ * Returns an encoded pseudo register.
+ */
+#define snd_hdac_regmap_encode_amp(nid, ch, dir, idx) \
+ (snd_hdac_regmap_encode_verb(nid, AC_VERB_GET_AMP_GAIN_MUTE) | \
+ ((ch) ? AC_AMP_GET_RIGHT : AC_AMP_GET_LEFT) | \
+ ((dir) == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT) | \
+ (idx))
+
+/**
+ * snd_hdac_regmap_encode_amp_stereo - encode a pseudo register for stereo AMPs
+ * @nid: widget NID
+ * @dir: direction (#HDA_INPUT, #HDA_OUTPUT)
+ * @idx: input index value
+ *
+ * Returns an encoded pseudo register.
+ */
+#define snd_hdac_regmap_encode_amp_stereo(nid, dir, idx) \
+ (snd_hdac_regmap_encode_verb(nid, AC_VERB_GET_AMP_GAIN_MUTE) | \
+ AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT | /* both bits set! */ \
+ ((dir) == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT) | \
+ (idx))
+
+/**
+ * snd_hdac_regmap_write - Write a verb with caching
+ * @nid: codec NID
+ * @reg: verb to write
+ * @val: value to write
+ *
+ * For writing an amp value, use snd_hda_regmap_amp_update().
+ */
+static inline int
+snd_hdac_regmap_write(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb);
+
+ return snd_hdac_regmap_write_raw(codec, cmd, val);
+}
+
+/**
+ * snd_hda_regmap_update - Update a verb value with caching
+ * @nid: codec NID
+ * @verb: verb to update
+ * @mask: bit mask to update
+ * @val: value to update
+ *
+ * For updating an amp value, use snd_hda_regmap_amp_update().
+ */
+static inline int
+snd_hdac_regmap_update(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int mask,
+ unsigned int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb);
+
+ return snd_hdac_regmap_update_raw(codec, cmd, mask, val);
+}
+
+/**
+ * snd_hda_regmap_read - Read a verb with caching
+ * @nid: codec NID
+ * @verb: verb to read
+ * @val: pointer to store the value
+ *
+ * For reading an amp value, use snd_hda_regmap_get_amp().
+ */
+static inline int
+snd_hdac_regmap_read(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int *val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb);
+
+ return snd_hdac_regmap_read_raw(codec, cmd, val);
+}
+
+/**
+ * snd_hdac_regmap_get_amp - Read AMP value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @index: the index value (only for input direction)
+ * @val: the pointer to store the value
+ *
+ * Read AMP value. The volume is between 0 to 0x7f, 0x80 = mute bit.
+ * Returns the value or a negative error.
+ */
+static inline int
+snd_hdac_regmap_get_amp(struct hdac_device *codec, hda_nid_t nid,
+ int ch, int dir, int idx)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx);
+ int err, val;
+
+ err = snd_hdac_regmap_read_raw(codec, cmd, &val);
+ return err < 0 ? err : val;
+}
+
+/**
+ * snd_hdac_regmap_update_amp - update the AMP value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Update the AMP value with a bit mask.
+ * Returns 0 if the value is unchanged, 1 if changed, or a negative error.
+ */
+static inline int
+snd_hdac_regmap_update_amp(struct hdac_device *codec, hda_nid_t nid,
+ int ch, int dir, int idx, int mask, int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx);
+
+ return snd_hdac_regmap_update_raw(codec, cmd, mask, val);
+}
+
+/**
+ * snd_hdac_regmap_get_amp_stereo - Read stereo AMP values
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @index: the index value (only for input direction)
+ * @val: the pointer to store the value
+ *
+ * Read stereo AMP values. The lower byte is left, the upper byte is right.
+ * Returns the value or a negative error.
+ */
+static inline int
+snd_hdac_regmap_get_amp_stereo(struct hdac_device *codec, hda_nid_t nid,
+ int dir, int idx)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp_stereo(nid, dir, idx);
+ int err, val;
+
+ err = snd_hdac_regmap_read_raw(codec, cmd, &val);
+ return err < 0 ? err : val;
+}
+
+/**
+ * snd_hdac_regmap_update_amp_stereo - update the stereo AMP value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Update the stereo AMP value with a bit mask.
+ * The lower byte is left, the upper byte is right.
+ * Returns 0 if the value is unchanged, 1 if changed, or a negative error.
+ */
+static inline int
+snd_hdac_regmap_update_amp_stereo(struct hdac_device *codec, hda_nid_t nid,
+ int dir, int idx, int mask, int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp_stereo(nid, dir, idx);
+
+ return snd_hdac_regmap_update_raw(codec, cmd, mask, val);
+}
+
+/**
+ * snd_hdac_regmap_sync_node - sync the widget node attributes
+ * @codec: HD-audio codec
+ * @nid: NID to sync
+ */
+static inline void
+snd_hdac_regmap_sync_node(struct hdac_device *codec, hda_nid_t nid)
+{
+ regcache_mark_dirty(codec->regmap);
+ regcache_sync_region(codec->regmap, nid << 20, ((nid + 1) << 20) - 1);
+}
+
+#endif /* __SOUND_HDA_REGMAP_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
new file mode 100644
index 000000000000..2a8aa9dfb83d
--- /dev/null
+++ b/include/sound/hdaudio.h
@@ -0,0 +1,247 @@
+/*
+ * HD-audio core stuff
+ */
+
+#ifndef __SOUND_HDAUDIO_H
+#define __SOUND_HDAUDIO_H
+
+#include <linux/device.h>
+#include <sound/hda_verbs.h>
+
+/* codec node id */
+typedef u16 hda_nid_t;
+
+struct hdac_bus;
+struct hdac_device;
+struct hdac_driver;
+struct hdac_widget_tree;
+
+/*
+ * exported bus type
+ */
+extern struct bus_type snd_hda_bus_type;
+
+/*
+ * generic arrays
+ */
+struct snd_array {
+ unsigned int used;
+ unsigned int alloced;
+ unsigned int elem_size;
+ unsigned int alloc_align;
+ void *list;
+};
+
+/*
+ * HD-audio codec base device
+ */
+struct hdac_device {
+ struct device dev;
+ int type;
+ struct hdac_bus *bus;
+ unsigned int addr; /* codec address */
+ struct list_head list; /* list point for bus codec_list */
+
+ hda_nid_t afg; /* AFG node id */
+ hda_nid_t mfg; /* MFG node id */
+
+ /* ids */
+ unsigned int vendor_id;
+ unsigned int subsystem_id;
+ unsigned int revision_id;
+ unsigned int afg_function_id;
+ unsigned int mfg_function_id;
+ unsigned int afg_unsol:1;
+ unsigned int mfg_unsol:1;
+
+ unsigned int power_caps; /* FG power caps */
+
+ const char *vendor_name; /* codec vendor name */
+ const char *chip_name; /* codec chip name */
+
+ /* verb exec op override */
+ int (*exec_verb)(struct hdac_device *dev, unsigned int cmd,
+ unsigned int flags, unsigned int *res);
+
+ /* widgets */
+ unsigned int num_nodes;
+ hda_nid_t start_nid, end_nid;
+
+ /* misc flags */
+ atomic_t in_pm; /* suspend/resume being performed */
+
+ /* sysfs */
+ struct hdac_widget_tree *widgets;
+
+ /* regmap */
+ struct regmap *regmap;
+ struct snd_array vendor_verbs;
+ bool lazy_cache:1; /* don't wake up for writes */
+ bool caps_overwriting:1; /* caps overwrite being in process */
+ bool cache_coef:1; /* cache COEF read/write too */
+};
+
+/* device/driver type used for matching */
+enum {
+ HDA_DEV_CORE,
+ HDA_DEV_LEGACY,
+};
+
+/* direction */
+enum {
+ HDA_INPUT, HDA_OUTPUT
+};
+
+#define dev_to_hdac_dev(_dev) container_of(_dev, struct hdac_device, dev)
+
+int snd_hdac_device_init(struct hdac_device *dev, struct hdac_bus *bus,
+ const char *name, unsigned int addr);
+void snd_hdac_device_exit(struct hdac_device *dev);
+int snd_hdac_device_register(struct hdac_device *codec);
+void snd_hdac_device_unregister(struct hdac_device *codec);
+
+int snd_hdac_refresh_widgets(struct hdac_device *codec);
+
+unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int parm);
+int snd_hdac_exec_verb(struct hdac_device *codec, unsigned int cmd,
+ unsigned int flags, unsigned int *res);
+int snd_hdac_read(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int parm, unsigned int *res);
+int _snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid, int parm,
+ unsigned int *res);
+int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
+ int parm);
+int snd_hdac_override_parm(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int parm, unsigned int val);
+int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid,
+ hda_nid_t *conn_list, int max_conns);
+int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
+ hda_nid_t *start_id);
+
+/**
+ * snd_hdac_read_parm - read a codec parameter
+ * @codec: the codec object
+ * @nid: NID to read a parameter
+ * @parm: parameter to read
+ *
+ * Returns -1 for error. If you need to distinguish the error more
+ * strictly, use _snd_hdac_read_parm() directly.
+ */
+static inline int snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid,
+ int parm)
+{
+ unsigned int val;
+
+ return _snd_hdac_read_parm(codec, nid, parm, &val) < 0 ? -1 : val;
+}
+
+#ifdef CONFIG_PM
+void snd_hdac_power_up(struct hdac_device *codec);
+void snd_hdac_power_down(struct hdac_device *codec);
+void snd_hdac_power_up_pm(struct hdac_device *codec);
+void snd_hdac_power_down_pm(struct hdac_device *codec);
+#else
+static inline void snd_hdac_power_up(struct hdac_device *codec) {}
+static inline void snd_hdac_power_down(struct hdac_device *codec) {}
+static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {}
+static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
+#endif
+
+/*
+ * HD-audio codec base driver
+ */
+struct hdac_driver {
+ struct device_driver driver;
+ int type;
+ int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
+ void (*unsol_event)(struct hdac_device *dev, unsigned int event);
+};
+
+#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
+
+/*
+ * HD-audio bus base driver
+ */
+struct hdac_bus_ops {
+ /* send a single command */
+ int (*command)(struct hdac_bus *bus, unsigned int cmd);
+ /* get a response from the last command */
+ int (*get_response)(struct hdac_bus *bus, unsigned int addr,
+ unsigned int *res);
+};
+
+#define HDA_UNSOL_QUEUE_SIZE 64
+
+struct hdac_bus {
+ struct device *dev;
+ const struct hdac_bus_ops *ops;
+
+ /* codec linked list */
+ struct list_head codec_list;
+ unsigned int num_codecs;
+
+ /* link caddr -> codec */
+ struct hdac_device *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1];
+
+ /* unsolicited event queue */
+ u32 unsol_queue[HDA_UNSOL_QUEUE_SIZE * 2]; /* ring buffer */
+ unsigned int unsol_rp, unsol_wp;
+ struct work_struct unsol_work;
+
+ /* bit flags of powered codecs */
+ unsigned long codec_powered;
+
+ /* flags */
+ bool sync_write:1; /* sync after verb write */
+
+ /* locks */
+ struct mutex cmd_mutex;
+};
+
+int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
+ const struct hdac_bus_ops *ops);
+void snd_hdac_bus_exit(struct hdac_bus *bus);
+int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr,
+ unsigned int cmd, unsigned int *res);
+int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr,
+ unsigned int cmd, unsigned int *res);
+void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex);
+
+int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec);
+void snd_hdac_bus_remove_device(struct hdac_bus *bus,
+ struct hdac_device *codec);
+
+static inline void snd_hdac_codec_link_up(struct hdac_device *codec)
+{
+ set_bit(codec->addr, &codec->bus->codec_powered);
+}
+
+static inline void snd_hdac_codec_link_down(struct hdac_device *codec)
+{
+ clear_bit(codec->addr, &codec->bus->codec_powered);
+}
+
+/*
+ * generic array helpers
+ */
+void *snd_array_new(struct snd_array *array);
+void snd_array_free(struct snd_array *array);
+static inline void snd_array_init(struct snd_array *array, unsigned int size,
+ unsigned int align)
+{
+ array->elem_size = size;
+ array->alloc_align = align;
+}
+
+static inline void *snd_array_elem(struct snd_array *array, unsigned int idx)
+{
+ return array->list + idx * array->elem_size;
+}
+
+static inline unsigned int snd_array_index(struct snd_array *array, void *ptr)
+{
+ return (unsigned long)(ptr - array->list) / array->elem_size;
+}
+
+#endif /* __SOUND_HDAUDIO_H */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index c0ddb7e69c28..0cb7f3f5df7b 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -60,6 +60,9 @@ struct snd_pcm_hardware {
struct snd_pcm_substream;
+struct snd_pcm_audio_tstamp_config; /* definitions further down */
+struct snd_pcm_audio_tstamp_report;
+
struct snd_pcm_ops {
int (*open)(struct snd_pcm_substream *substream);
int (*close)(struct snd_pcm_substream *substream);
@@ -71,8 +74,10 @@ struct snd_pcm_ops {
int (*prepare)(struct snd_pcm_substream *substream);
int (*trigger)(struct snd_pcm_substream *substream, int cmd);
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *substream);
- int (*wall_clock)(struct snd_pcm_substream *substream,
- struct timespec *audio_ts);
+ int (*get_time_info)(struct snd_pcm_substream *substream,
+ struct timespec *system_ts, struct timespec *audio_ts,
+ struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+ struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*copy)(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos,
void __user *buf, snd_pcm_uframes_t count);
@@ -281,6 +286,58 @@ struct snd_pcm_hw_constraint_ranges {
struct snd_pcm_hwptr_log;
+/*
+ * userspace-provided audio timestamp config to kernel,
+ * structure is for internal use only and filled with dedicated unpack routine
+ */
+struct snd_pcm_audio_tstamp_config {
+ /* 5 of max 16 bits used */
+ u32 type_requested:4;
+ u32 report_delay:1; /* add total delay to A/D or D/A */
+};
+
+static inline void snd_pcm_unpack_audio_tstamp_config(__u32 data,
+ struct snd_pcm_audio_tstamp_config *config)
+{
+ config->type_requested = data & 0xF;
+ config->report_delay = (data >> 4) & 1;
+}
+
+/*
+ * kernel-provided audio timestamp report to user-space
+ * structure is for internal use only and read by dedicated pack routine
+ */
+struct snd_pcm_audio_tstamp_report {
+ /* 6 of max 16 bits used for bit-fields */
+
+ /* for backwards compatibility */
+ u32 valid:1;
+
+ /* actual type if hardware could not support requested timestamp */
+ u32 actual_type:4;
+
+ /* accuracy represented in ns units */
+ u32 accuracy_report:1; /* 0 if accuracy unknown, 1 if accuracy field is valid */
+ u32 accuracy; /* up to 4.29s, will be packed in separate field */
+};
+
+static inline void snd_pcm_pack_audio_tstamp_report(__u32 *data, __u32 *accuracy,
+ const struct snd_pcm_audio_tstamp_report *report)
+{
+ u32 tmp;
+
+ tmp = report->accuracy_report;
+ tmp <<= 4;
+ tmp |= report->actual_type;
+ tmp <<= 1;
+ tmp |= report->valid;
+
+ *data &= 0xffff; /* zero-clear MSBs */
+ *data |= (tmp << 16);
+ *accuracy = report->accuracy;
+}
+
+
struct snd_pcm_runtime {
/* -- Status -- */
struct snd_pcm_substream *trigger_master;
@@ -361,6 +418,11 @@ struct snd_pcm_runtime {
struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */
+ /* -- audio timestamp config -- */
+ struct snd_pcm_audio_tstamp_config audio_tstamp_config;
+ struct snd_pcm_audio_tstamp_report audio_tstamp_report;
+ struct timespec driver_tstamp;
+
#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
/* -- OSS things -- */
struct snd_pcm_oss_runtime oss;
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 3c45f3924ba7..c704357775fc 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -366,4 +366,11 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p)
return snd_pcm_format_physical_width(params_format(p));
}
+static inline void
+params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
+{
+ snd_mask_set(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT),
+ (__force int)fmt);
+}
+
#endif /* __SOUND_PCM_PARAMS_H */
diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h
index bd311197a3b5..b7d60510819b 100644
--- a/include/sound/rt5670.h
+++ b/include/sound/rt5670.h
@@ -14,6 +14,7 @@
struct rt5670_platform_data {
int jd_mode;
bool in2_diff;
+ bool dev_gpio;
bool dmic_en;
unsigned int dmic1_data_pin;
diff --git a/include/sound/seq_device.h b/include/sound/seq_device.h
index 2b5f24cc7548..ddc0d504cf39 100644
--- a/include/sound/seq_device.h
+++ b/include/sound/seq_device.h
@@ -25,29 +25,26 @@
* registered device information
*/
-#define ID_LEN 32
-
-/* status flag */
-#define SNDRV_SEQ_DEVICE_FREE 0
-#define SNDRV_SEQ_DEVICE_REGISTERED 1
-
struct snd_seq_device {
/* device info */
struct snd_card *card; /* sound card */
int device; /* device number */
- char id[ID_LEN]; /* driver id */
+ const char *id; /* driver id */
char name[80]; /* device name */
int argsize; /* size of the argument */
void *driver_data; /* private data for driver */
- int status; /* flag - read only */
void *private_data; /* private data for the caller */
void (*private_free)(struct snd_seq_device *device);
- struct list_head list; /* link to next device */
+ struct device dev;
};
+#define to_seq_dev(_dev) \
+ container_of(_dev, struct snd_seq_device, dev)
+
+/* sequencer driver */
/* driver operators
- * init_device:
+ * probe:
* Initialize the device with given parameters.
* Typically,
* 1. call snd_hwdep_new
@@ -55,25 +52,40 @@ struct snd_seq_device {
* 3. call snd_hwdep_register
* 4. store the instance to dev->driver_data pointer.
*
- * free_device:
+ * remove:
* Release the private data.
* Typically, call snd_device_free(dev->card, dev->driver_data)
*/
-struct snd_seq_dev_ops {
- int (*init_device)(struct snd_seq_device *dev);
- int (*free_device)(struct snd_seq_device *dev);
+struct snd_seq_driver {
+ struct device_driver driver;
+ char *id;
+ int argsize;
};
+#define to_seq_drv(_drv) \
+ container_of(_drv, struct snd_seq_driver, driver)
+
/*
* prototypes
*/
+#ifdef CONFIG_MODULES
void snd_seq_device_load_drivers(void);
-int snd_seq_device_new(struct snd_card *card, int device, char *id, int argsize, struct snd_seq_device **result);
-int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry, int argsize);
-int snd_seq_device_unregister_driver(char *id);
+#else
+#define snd_seq_device_load_drivers()
+#endif
+int snd_seq_device_new(struct snd_card *card, int device, const char *id,
+ int argsize, struct snd_seq_device **result);
#define SNDRV_SEQ_DEVICE_ARGPTR(dev) (void *)((char *)(dev) + sizeof(struct snd_seq_device))
+int __must_check __snd_seq_driver_register(struct snd_seq_driver *drv,
+ struct module *mod);
+#define snd_seq_driver_register(drv) \
+ __snd_seq_driver_register(drv, THIS_MODULE)
+void snd_seq_driver_unregister(struct snd_seq_driver *drv);
+
+#define module_snd_seq_driver(drv) \
+ module_driver(drv, snd_seq_driver_register, snd_seq_driver_unregister)
/*
* id strings for generic devices
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index 18a2ac58b88f..feb58d455560 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -99,13 +99,9 @@ int snd_seq_event_port_attach(int client, struct snd_seq_port_callback *pcbp,
int snd_seq_event_port_detach(int client, int port);
#ifdef CONFIG_MODULES
-void snd_seq_autoload_lock(void);
-void snd_seq_autoload_unlock(void);
void snd_seq_autoload_init(void);
-#define snd_seq_autoload_exit() snd_seq_autoload_lock()
+void snd_seq_autoload_exit(void);
#else
-#define snd_seq_autoload_lock()
-#define snd_seq_autoload_unlock()
#define snd_seq_autoload_init()
#define snd_seq_autoload_exit()
#endif
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index 1255ddb1d3e2..b9b4f289fe6b 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -16,7 +16,6 @@
struct asoc_simple_dai {
const char *name;
- unsigned int fmt;
unsigned int sysclk;
int slots;
int slot_width;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 8d7416e46861..1065095c6973 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -287,7 +287,7 @@ struct device;
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.tlv.p = (tlv_array), \
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
#define SOC_DAPM_ENUM(xname, xenum) \
@@ -378,6 +378,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
const struct snd_soc_pcm_stream *params,
+ unsigned int num_params,
struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink);
@@ -440,7 +441,6 @@ void dapm_mark_endpoints_dirty(struct snd_soc_card *card);
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list);
-struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol);
struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
struct snd_kcontrol *kcontrol);
@@ -531,6 +531,8 @@ struct snd_soc_dapm_widget {
void *priv; /* widget specific data */
struct regulator *regulator; /* attached regulator */
const struct snd_soc_pcm_stream *params; /* params for dai links */
+ unsigned int num_params; /* number of params for dai links */
+ unsigned int params_select; /* currently selected param for dai link */
/* dapm control */
int reg; /* negative reg = no direct dapm */
@@ -586,8 +588,6 @@ struct snd_soc_dapm_update {
/* DAPM context */
struct snd_soc_dapm_context {
enum snd_soc_bias_level bias_level;
- enum snd_soc_bias_level suspend_bias_level;
- struct delayed_work delayed_work;
unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
/* Go to BIAS_OFF in suspend if the DAPM context is idle */
unsigned int suspend_bias_off:1;
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 98f2ade0266e..806059052bfc 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -135,7 +135,7 @@ void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be, int stream,
/* internal use only */
int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute);
-int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
+void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
int soc_dpcm_runtime_update(struct snd_soc_card *);
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 0d1ade195628..f6226914acfe 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -387,8 +387,20 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
int snd_soc_register_card(struct snd_soc_card *card);
int snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
+#ifdef CONFIG_PM_SLEEP
int snd_soc_suspend(struct device *dev);
int snd_soc_resume(struct device *dev);
+#else
+static inline int snd_soc_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static inline int snd_soc_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
int snd_soc_poweroff(struct device *dev);
int snd_soc_register_platform(struct device *dev,
const struct snd_soc_platform_driver *platform_drv);
@@ -450,8 +462,10 @@ int soc_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai);
/* Jack reporting */
-int snd_soc_jack_new(struct snd_soc_codec *codec, const char *id, int type,
- struct snd_soc_jack *jack);
+int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
+ struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins,
+ unsigned int num_pins);
+
void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask);
int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_pin *pins);
@@ -659,7 +673,7 @@ struct snd_soc_jack_gpio {
struct snd_soc_jack {
struct mutex mutex;
struct snd_jack *jack;
- struct snd_soc_codec *codec;
+ struct snd_soc_card *card;
struct list_head pins;
int status;
struct blocking_notifier_head notifier;
@@ -941,6 +955,7 @@ struct snd_soc_dai_link {
int be_id; /* optional ID for machine driver BE identification */
const struct snd_soc_pcm_stream *params;
+ unsigned int num_params;
unsigned int dai_fmt; /* format to set on init */
@@ -954,6 +969,9 @@ struct snd_soc_dai_link {
unsigned int symmetric_channels:1;
unsigned int symmetric_samplebits:1;
+ /* Mark this pcm with non atomic ops */
+ bool nonatomic;
+
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int no_pcm:1;
@@ -1071,11 +1089,16 @@ struct snd_soc_card {
/*
* Card-specific routes and widgets.
+ * Note: of_dapm_xxx for Device Tree; Otherwise for driver build-in.
*/
const struct snd_soc_dapm_widget *dapm_widgets;
int num_dapm_widgets;
const struct snd_soc_dapm_route *dapm_routes;
int num_dapm_routes;
+ const struct snd_soc_dapm_widget *of_dapm_widgets;
+ int num_of_dapm_widgets;
+ const struct snd_soc_dapm_route *of_dapm_routes;
+ int num_of_dapm_routes;
bool fully_routed;
struct work_struct deferred_resume_work;
@@ -1258,6 +1281,19 @@ static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
return component->dapm_ptr;
}
+/**
+ * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
+ * @kcontrol: The kcontrol
+ *
+ * This function must only be used on DAPM contexts that are known to be part of
+ * a CODEC (e.g. in a CODEC driver). Otherwise the behavior is undefined.
+ */
+static inline struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(
+ struct snd_kcontrol *kcontrol)
+{
+ return snd_soc_dapm_to_codec(snd_soc_dapm_kcontrol_dapm(kcontrol));
+}
+
/* codec IO */
unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg,
@@ -1469,7 +1505,7 @@ static inline struct snd_soc_codec *snd_soc_kcontrol_codec(
}
/**
- * snd_soc_kcontrol_platform() - Returns the platform that registerd the control
+ * snd_soc_kcontrol_platform() - Returns the platform that registered the control
* @kcontrol: The control for which to get the platform
*
* Note: This function will only work correctly if the control has been
diff --git a/include/sound/spear_dma.h b/include/sound/spear_dma.h
index 65aca51fe255..e290de4e7e82 100644
--- a/include/sound/spear_dma.h
+++ b/include/sound/spear_dma.h
@@ -1,7 +1,7 @@
/*
* linux/spear_dma.h
*
-* Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+* Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index d3583d3ee193..54e7af301888 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -20,6 +20,8 @@
#define ISCSIT_MIN_TAGS 16
#define ISCSIT_EXTRA_TAGS 8
#define ISCSIT_TCP_BACKLOG 256
+#define ISCSI_RX_THREAD_NAME "iscsi_trx"
+#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -60,6 +62,7 @@
#define TA_CACHE_CORE_NPS 0
/* T10 protection information disabled by default */
#define TA_DEFAULT_T10_PI 0
+#define TA_DEFAULT_FABRIC_PROT_TYPE 0
#define ISCSI_IOV_DATA_BUFFER 5
@@ -600,8 +603,11 @@ struct iscsi_conn {
struct iscsi_tpg_np *tpg_np;
/* Pointer to parent session */
struct iscsi_session *sess;
- /* Pointer to thread_set in use for this conn's threads */
- struct iscsi_thread_set *thread_set;
+ int bitmap_id;
+ int rx_thread_active;
+ struct task_struct *rx_thread;
+ int tx_thread_active;
+ struct task_struct *tx_thread;
/* list_head for session connection list */
struct list_head conn_list;
} ____cacheline_aligned;
@@ -767,6 +773,7 @@ struct iscsi_tpg_attrib {
u32 demo_mode_discovery;
u32 default_erl;
u8 t10_pi;
+ u32 fabric_prot_type;
struct iscsi_portal_group *tpg;
};
@@ -871,10 +878,10 @@ struct iscsit_global {
/* Unique identifier used for the authentication daemon */
u32 auth_id;
u32 inactive_ts;
- /* Thread Set bitmap count */
- int ts_bitmap_count;
+#define ISCSIT_BITMAP_BITS 262144
/* Thread Set bitmap pointer */
unsigned long *ts_bitmap;
+ spinlock_t ts_bitmap_lock;
/* Used for iSCSI discovery session authentication */
struct iscsi_node_acl discovery_acl;
struct iscsi_portal_group *discovery_tpg;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 672150b6aaf5..480e9f82dfea 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -165,10 +165,8 @@ enum se_cmd_flags_table {
SCF_SEND_DELAYED_TAS = 0x00004000,
SCF_ALUA_NON_OPTIMIZED = 0x00008000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
- SCF_ACK_KREF = 0x00040000,
SCF_COMPARE_AND_WRITE = 0x00080000,
SCF_COMPARE_AND_WRITE_POST = 0x00100000,
- SCF_CMD_XCOPY_PASSTHROUGH = 0x00200000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -520,11 +518,11 @@ struct se_cmd {
struct list_head se_cmd_list;
struct completion cmd_wait_comp;
struct kref cmd_kref;
- struct target_core_fabric_ops *se_tfo;
+ const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
u32, enum dma_data_direction);
- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
+ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
@@ -591,6 +589,7 @@ struct se_node_acl {
bool acl_stop:1;
u32 queue_depth;
u32 acl_index;
+ enum target_prot_type saved_prot_type;
#define MAX_ACL_TAG_SIZE 64
char acl_tag[MAX_ACL_TAG_SIZE];
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
@@ -616,6 +615,7 @@ struct se_session {
unsigned sess_tearing_down:1;
u64 sess_bin_isid;
enum target_prot_op sup_prot_ops;
+ enum target_prot_type sess_prot_type;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
@@ -890,7 +890,7 @@ struct se_portal_group {
/* List of TCM sessions associated wth this TPG */
struct list_head tpg_sess_list;
/* Pointer to $FABRIC_MOD dependent code */
- struct target_core_fabric_ops *se_tpg_tfo;
+ const struct target_core_fabric_ops *se_tpg_tfo;
struct se_wwn *se_tpg_wwn;
struct config_group tpg_group;
struct config_group *tpg_default_groups[7];
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
index e0801386e4dc..25bb04c4209e 100644
--- a/include/target/target_core_configfs.h
+++ b/include/target/target_core_configfs.h
@@ -5,12 +5,6 @@
#define TARGET_CORE_NAME_MAX_LEN 64
#define TARGET_FABRIC_NAME_SIZE 32
-extern struct target_fabric_configfs *target_fabric_configfs_init(
- struct module *, const char *);
-extern void target_fabric_configfs_free(struct target_fabric_configfs *);
-extern int target_fabric_configfs_register(struct target_fabric_configfs *);
-extern void target_fabric_configfs_deregister(struct target_fabric_configfs *);
-
struct target_fabric_configfs_template {
struct config_item_type tfc_discovery_cit;
struct config_item_type tfc_wwn_cit;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 22a4e98eec80..17c7f5ac7ea0 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -2,6 +2,8 @@
#define TARGET_CORE_FABRIC_H
struct target_core_fabric_ops {
+ struct module *module;
+ const char *name;
struct configfs_subsystem *tf_subsys;
char *(*get_fabric_name)(void);
u8 (*get_fabric_proto_ident)(struct se_portal_group *);
@@ -27,6 +29,14 @@ struct target_core_fabric_ops {
* inquiry response
*/
int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
+ /*
+ * Optionally used as a configfs tunable to determine when
+ * target-core should signal the PROTECT=1 feature bit for
+ * backends that don't support T10-PI, so that either fabric
+ * HW offload or target-core emulation performs the associated
+ * WRITE_STRIP and READ_INSERT operations.
+ */
+ int (*tpg_check_prot_fabric_only)(struct se_portal_group *);
struct se_node_acl *(*tpg_alloc_fabric_acl)(
struct se_portal_group *);
void (*tpg_release_fabric_acl)(struct se_portal_group *,
@@ -82,8 +92,23 @@ struct target_core_fabric_ops {
struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
struct config_group *, const char *);
void (*fabric_drop_nodeacl)(struct se_node_acl *);
+
+ struct configfs_attribute **tfc_discovery_attrs;
+ struct configfs_attribute **tfc_wwn_attrs;
+ struct configfs_attribute **tfc_tpg_base_attrs;
+ struct configfs_attribute **tfc_tpg_np_base_attrs;
+ struct configfs_attribute **tfc_tpg_attrib_attrs;
+ struct configfs_attribute **tfc_tpg_auth_attrs;
+ struct configfs_attribute **tfc_tpg_param_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_base_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_param_attrs;
};
+int target_register_template(const struct target_core_fabric_ops *fo);
+void target_unregister_template(const struct target_core_fabric_ops *fo);
+
struct se_session *transport_init_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
@@ -95,13 +120,15 @@ void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void target_get_session(struct se_session *);
void target_put_session(struct se_session *);
+ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
void transport_free_session(struct se_session *);
void target_put_nacl(struct se_node_acl *);
void transport_deregister_session_configfs(struct se_session *);
void transport_deregister_session(struct se_session *);
-void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
+void transport_init_se_cmd(struct se_cmd *,
+ const struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *);
sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
@@ -153,8 +180,8 @@ int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int);
int core_tpg_set_initiator_node_tag(struct se_portal_group *,
struct se_node_acl *, const char *);
-int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
- struct se_portal_group *, void *, int);
+int core_tpg_register(const struct target_core_fabric_ops *,
+ struct se_wwn *, struct se_portal_group *, void *, int);
int core_tpg_deregister(struct se_portal_group *);
/* SAS helpers */
diff --git a/include/target/target_core_fabric_configfs.h b/include/target/target_core_fabric_configfs.h
index b32a14905cfa..7a0649c09e79 100644
--- a/include/target/target_core_fabric_configfs.h
+++ b/include/target/target_core_fabric_configfs.h
@@ -90,6 +90,11 @@ static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \
_fabric##_tpg_store_##_name);
+#define TF_TPG_BASE_ATTR_RO(_fabric, _name) \
+static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ _fabric##_tpg_show_##_name);
+
CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs);
#define TF_WWN_ATTR(_fabric, _name, _mode) \
static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index a0666362c111..633ee9ee9778 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -6,76 +6,95 @@
#include <linux/tracepoint.h>
+#define P9_MSG_T \
+ EM( P9_TLERROR, "P9_TLERROR" ) \
+ EM( P9_RLERROR, "P9_RLERROR" ) \
+ EM( P9_TSTATFS, "P9_TSTATFS" ) \
+ EM( P9_RSTATFS, "P9_RSTATFS" ) \
+ EM( P9_TLOPEN, "P9_TLOPEN" ) \
+ EM( P9_RLOPEN, "P9_RLOPEN" ) \
+ EM( P9_TLCREATE, "P9_TLCREATE" ) \
+ EM( P9_RLCREATE, "P9_RLCREATE" ) \
+ EM( P9_TSYMLINK, "P9_TSYMLINK" ) \
+ EM( P9_RSYMLINK, "P9_RSYMLINK" ) \
+ EM( P9_TMKNOD, "P9_TMKNOD" ) \
+ EM( P9_RMKNOD, "P9_RMKNOD" ) \
+ EM( P9_TRENAME, "P9_TRENAME" ) \
+ EM( P9_RRENAME, "P9_RRENAME" ) \
+ EM( P9_TREADLINK, "P9_TREADLINK" ) \
+ EM( P9_RREADLINK, "P9_RREADLINK" ) \
+ EM( P9_TGETATTR, "P9_TGETATTR" ) \
+ EM( P9_RGETATTR, "P9_RGETATTR" ) \
+ EM( P9_TSETATTR, "P9_TSETATTR" ) \
+ EM( P9_RSETATTR, "P9_RSETATTR" ) \
+ EM( P9_TXATTRWALK, "P9_TXATTRWALK" ) \
+ EM( P9_RXATTRWALK, "P9_RXATTRWALK" ) \
+ EM( P9_TXATTRCREATE, "P9_TXATTRCREATE" ) \
+ EM( P9_RXATTRCREATE, "P9_RXATTRCREATE" ) \
+ EM( P9_TREADDIR, "P9_TREADDIR" ) \
+ EM( P9_RREADDIR, "P9_RREADDIR" ) \
+ EM( P9_TFSYNC, "P9_TFSYNC" ) \
+ EM( P9_RFSYNC, "P9_RFSYNC" ) \
+ EM( P9_TLOCK, "P9_TLOCK" ) \
+ EM( P9_RLOCK, "P9_RLOCK" ) \
+ EM( P9_TGETLOCK, "P9_TGETLOCK" ) \
+ EM( P9_RGETLOCK, "P9_RGETLOCK" ) \
+ EM( P9_TLINK, "P9_TLINK" ) \
+ EM( P9_RLINK, "P9_RLINK" ) \
+ EM( P9_TMKDIR, "P9_TMKDIR" ) \
+ EM( P9_RMKDIR, "P9_RMKDIR" ) \
+ EM( P9_TRENAMEAT, "P9_TRENAMEAT" ) \
+ EM( P9_RRENAMEAT, "P9_RRENAMEAT" ) \
+ EM( P9_TUNLINKAT, "P9_TUNLINKAT" ) \
+ EM( P9_RUNLINKAT, "P9_RUNLINKAT" ) \
+ EM( P9_TVERSION, "P9_TVERSION" ) \
+ EM( P9_RVERSION, "P9_RVERSION" ) \
+ EM( P9_TAUTH, "P9_TAUTH" ) \
+ EM( P9_RAUTH, "P9_RAUTH" ) \
+ EM( P9_TATTACH, "P9_TATTACH" ) \
+ EM( P9_RATTACH, "P9_RATTACH" ) \
+ EM( P9_TERROR, "P9_TERROR" ) \
+ EM( P9_RERROR, "P9_RERROR" ) \
+ EM( P9_TFLUSH, "P9_TFLUSH" ) \
+ EM( P9_RFLUSH, "P9_RFLUSH" ) \
+ EM( P9_TWALK, "P9_TWALK" ) \
+ EM( P9_RWALK, "P9_RWALK" ) \
+ EM( P9_TOPEN, "P9_TOPEN" ) \
+ EM( P9_ROPEN, "P9_ROPEN" ) \
+ EM( P9_TCREATE, "P9_TCREATE" ) \
+ EM( P9_RCREATE, "P9_RCREATE" ) \
+ EM( P9_TREAD, "P9_TREAD" ) \
+ EM( P9_RREAD, "P9_RREAD" ) \
+ EM( P9_TWRITE, "P9_TWRITE" ) \
+ EM( P9_RWRITE, "P9_RWRITE" ) \
+ EM( P9_TCLUNK, "P9_TCLUNK" ) \
+ EM( P9_RCLUNK, "P9_RCLUNK" ) \
+ EM( P9_TREMOVE, "P9_TREMOVE" ) \
+ EM( P9_RREMOVE, "P9_RREMOVE" ) \
+ EM( P9_TSTAT, "P9_TSTAT" ) \
+ EM( P9_RSTAT, "P9_RSTAT" ) \
+ EM( P9_TWSTAT, "P9_TWSTAT" ) \
+ EMe(P9_RWSTAT, "P9_RWSTAT" )
+
+/* Define EM() to export the enums to userspace via TRACE_DEFINE_ENUM() */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+P9_MSG_T
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
#define show_9p_op(type) \
- __print_symbolic(type, \
- { P9_TLERROR, "P9_TLERROR" }, \
- { P9_RLERROR, "P9_RLERROR" }, \
- { P9_TSTATFS, "P9_TSTATFS" }, \
- { P9_RSTATFS, "P9_RSTATFS" }, \
- { P9_TLOPEN, "P9_TLOPEN" }, \
- { P9_RLOPEN, "P9_RLOPEN" }, \
- { P9_TLCREATE, "P9_TLCREATE" }, \
- { P9_RLCREATE, "P9_RLCREATE" }, \
- { P9_TSYMLINK, "P9_TSYMLINK" }, \
- { P9_RSYMLINK, "P9_RSYMLINK" }, \
- { P9_TMKNOD, "P9_TMKNOD" }, \
- { P9_RMKNOD, "P9_RMKNOD" }, \
- { P9_TRENAME, "P9_TRENAME" }, \
- { P9_RRENAME, "P9_RRENAME" }, \
- { P9_TREADLINK, "P9_TREADLINK" }, \
- { P9_RREADLINK, "P9_RREADLINK" }, \
- { P9_TGETATTR, "P9_TGETATTR" }, \
- { P9_RGETATTR, "P9_RGETATTR" }, \
- { P9_TSETATTR, "P9_TSETATTR" }, \
- { P9_RSETATTR, "P9_RSETATTR" }, \
- { P9_TXATTRWALK, "P9_TXATTRWALK" }, \
- { P9_RXATTRWALK, "P9_RXATTRWALK" }, \
- { P9_TXATTRCREATE, "P9_TXATTRCREATE" }, \
- { P9_RXATTRCREATE, "P9_RXATTRCREATE" }, \
- { P9_TREADDIR, "P9_TREADDIR" }, \
- { P9_RREADDIR, "P9_RREADDIR" }, \
- { P9_TFSYNC, "P9_TFSYNC" }, \
- { P9_RFSYNC, "P9_RFSYNC" }, \
- { P9_TLOCK, "P9_TLOCK" }, \
- { P9_RLOCK, "P9_RLOCK" }, \
- { P9_TGETLOCK, "P9_TGETLOCK" }, \
- { P9_RGETLOCK, "P9_RGETLOCK" }, \
- { P9_TLINK, "P9_TLINK" }, \
- { P9_RLINK, "P9_RLINK" }, \
- { P9_TMKDIR, "P9_TMKDIR" }, \
- { P9_RMKDIR, "P9_RMKDIR" }, \
- { P9_TRENAMEAT, "P9_TRENAMEAT" }, \
- { P9_RRENAMEAT, "P9_RRENAMEAT" }, \
- { P9_TUNLINKAT, "P9_TUNLINKAT" }, \
- { P9_RUNLINKAT, "P9_RUNLINKAT" }, \
- { P9_TVERSION, "P9_TVERSION" }, \
- { P9_RVERSION, "P9_RVERSION" }, \
- { P9_TAUTH, "P9_TAUTH" }, \
- { P9_RAUTH, "P9_RAUTH" }, \
- { P9_TATTACH, "P9_TATTACH" }, \
- { P9_RATTACH, "P9_RATTACH" }, \
- { P9_TERROR, "P9_TERROR" }, \
- { P9_RERROR, "P9_RERROR" }, \
- { P9_TFLUSH, "P9_TFLUSH" }, \
- { P9_RFLUSH, "P9_RFLUSH" }, \
- { P9_TWALK, "P9_TWALK" }, \
- { P9_RWALK, "P9_RWALK" }, \
- { P9_TOPEN, "P9_TOPEN" }, \
- { P9_ROPEN, "P9_ROPEN" }, \
- { P9_TCREATE, "P9_TCREATE" }, \
- { P9_RCREATE, "P9_RCREATE" }, \
- { P9_TREAD, "P9_TREAD" }, \
- { P9_RREAD, "P9_RREAD" }, \
- { P9_TWRITE, "P9_TWRITE" }, \
- { P9_RWRITE, "P9_RWRITE" }, \
- { P9_TCLUNK, "P9_TCLUNK" }, \
- { P9_RCLUNK, "P9_RCLUNK" }, \
- { P9_TREMOVE, "P9_TREMOVE" }, \
- { P9_RREMOVE, "P9_RREMOVE" }, \
- { P9_TSTAT, "P9_TSTAT" }, \
- { P9_RSTAT, "P9_RSTAT" }, \
- { P9_TWSTAT, "P9_TWSTAT" }, \
- { P9_RWSTAT, "P9_RWSTAT" })
+ __print_symbolic(type, P9_MSG_T)
TRACE_EVENT(9p_client_req,
TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 1faecea101f3..7f79cf459591 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -407,10 +407,10 @@ TRACE_EVENT(btrfs_sync_file,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
__entry->ino = inode->i_ino;
- __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->parent = d_inode(dentry->d_parent)->i_ino;
__entry->datasync = datasync;
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
@@ -962,7 +962,7 @@ TRACE_EVENT(alloc_extent_state,
__entry->ip = IP
),
- TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
+ TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
show_gfp_flags(__entry->mask), (void *)__entry->ip)
);
@@ -982,7 +982,7 @@ TRACE_EVENT(free_extent_state,
__entry->ip = IP
),
- TP_printk(" state=%p; caller = %pF", __entry->state,
+ TP_printk(" state=%p; caller = %pS", __entry->state,
(void *)__entry->ip)
);
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
new file mode 100644
index 000000000000..758607226bfd
--- /dev/null
+++ b/include/trace/events/clk.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM clk
+
+#if !defined(_TRACE_CLK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CLK_H
+
+#include <linux/tracepoint.h>
+
+struct clk_core;
+
+DECLARE_EVENT_CLASS(clk,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+DEFINE_EVENT(clk, clk_enable,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_enable_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_disable,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_disable_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_prepare,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_prepare_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_unprepare,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_unprepare_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DECLARE_EVENT_CLASS(clk_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field(unsigned long, rate )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->rate = rate;
+ ),
+
+ TP_printk("%s %lu", __get_str(name), (unsigned long)__entry->rate)
+);
+
+DEFINE_EVENT(clk_rate, clk_set_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DEFINE_EVENT(clk_rate, clk_set_rate_complete,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DECLARE_EVENT_CLASS(clk_parent,
+
+ TP_PROTO(struct clk_core *core, struct clk_core *parent),
+
+ TP_ARGS(core, parent),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __string( pname, parent->name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __assign_str(pname, parent->name);
+ ),
+
+ TP_printk("%s %s", __get_str(name), __get_str(pname))
+);
+
+DEFINE_EVENT(clk_parent, clk_set_parent,
+
+ TP_PROTO(struct clk_core *core, struct clk_core *parent),
+
+ TP_ARGS(core, parent)
+);
+
+DEFINE_EVENT(clk_parent, clk_set_parent_complete,
+
+ TP_PROTO(struct clk_core *core, struct clk_core *parent),
+
+ TP_ARGS(core, parent)
+);
+
+DECLARE_EVENT_CLASS(clk_phase,
+
+ TP_PROTO(struct clk_core *core, int phase),
+
+ TP_ARGS(core, phase),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field( int, phase )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->phase = phase;
+ ),
+
+ TP_printk("%s %d", __get_str(name), (int)__entry->phase)
+);
+
+DEFINE_EVENT(clk_phase, clk_set_phase,
+
+ TP_PROTO(struct clk_core *core, int phase),
+
+ TP_ARGS(core, phase)
+);
+
+DEFINE_EVENT(clk_phase, clk_set_phase_complete,
+
+ TP_PROTO(struct clk_core *core, int phase),
+
+ TP_ARGS(core, phase)
+);
+
+#endif /* _TRACE_CLK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
new file mode 100644
index 000000000000..d7cd961720a7
--- /dev/null
+++ b/include/trace/events/cma.h
@@ -0,0 +1,66 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cma
+
+#if !defined(_TRACE_CMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CMA_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cma_alloc,
+
+ TP_PROTO(unsigned long pfn, const struct page *page,
+ unsigned int count, unsigned int align),
+
+ TP_ARGS(pfn, page, count, align),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(const struct page *, page)
+ __field(unsigned int, count)
+ __field(unsigned int, align)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->page = page;
+ __entry->count = count;
+ __entry->align = align;
+ ),
+
+ TP_printk("pfn=%lx page=%p count=%u align=%u",
+ __entry->pfn,
+ __entry->page,
+ __entry->count,
+ __entry->align)
+);
+
+TRACE_EVENT(cma_release,
+
+ TP_PROTO(unsigned long pfn, const struct page *page,
+ unsigned int count),
+
+ TP_ARGS(pfn, page, count),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(const struct page *, page)
+ __field(unsigned int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->page = page;
+ __entry->count = count;
+ ),
+
+ TP_printk("pfn=%lx page=%p count=%u",
+ __entry->pfn,
+ __entry->page,
+ __entry->count)
+);
+
+#endif /* _TRACE_CMA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h
index 6797b9de90ed..fc733d28117a 100644
--- a/include/trace/events/ext3.h
+++ b/include/trace/events/ext3.h
@@ -144,7 +144,7 @@ TRACE_EVENT(ext3_mark_inode_dirty,
__entry->ip = IP;
),
- TP_printk("dev %d,%d ino %lu caller %pF",
+ TP_printk("dev %d,%d ino %lu caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, (void *)__entry->ip)
);
@@ -439,10 +439,10 @@ TRACE_EVENT(ext3_sync_file_enter,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->datasync = datasync;
- __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->parent = d_inode(dentry->d_parent)->i_ino;
),
TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
@@ -710,9 +710,9 @@ TRACE_EVENT(ext3_unlink_enter,
TP_fast_assign(
__entry->parent = parent->i_ino;
- __entry->ino = dentry->d_inode->i_ino;
- __entry->size = dentry->d_inode->i_size;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
+ __entry->size = d_inode(dentry)->i_size;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
),
TP_printk("dev %d,%d ino %lu size %lld parent %ld",
@@ -734,8 +734,8 @@ TRACE_EVENT(ext3_unlink_exit,
),
TP_fast_assign(
- __entry->ino = dentry->d_inode->i_ino;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
__entry->ret = ret;
),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 6e5abd6d38a2..08ec3dd27630 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -240,7 +240,7 @@ TRACE_EVENT(ext4_mark_inode_dirty,
__entry->ip = IP;
),
- TP_printk("dev %d,%d ino %lu caller %pF",
+ TP_printk("dev %d,%d ino %lu caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, (void *)__entry->ip)
);
@@ -872,10 +872,10 @@ TRACE_EVENT(ext4_sync_file_enter,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->datasync = datasync;
- __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->parent = d_inode(dentry->d_parent)->i_ino;
),
TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
@@ -1453,10 +1453,10 @@ TRACE_EVENT(ext4_unlink_enter,
),
TP_fast_assign(
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->parent = parent->i_ino;
- __entry->size = dentry->d_inode->i_size;
+ __entry->size = d_inode(dentry)->i_size;
),
TP_printk("dev %d,%d ino %lu size %lld parent %lu",
@@ -1477,8 +1477,8 @@ TRACE_EVENT(ext4_unlink_exit,
),
TP_fast_assign(
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->ret = ret;
),
@@ -1762,7 +1762,7 @@ TRACE_EVENT(ext4_journal_start,
__entry->rsv_blocks = rsv_blocks;
),
- TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pF",
+ TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->blocks, __entry->rsv_blocks, (void *)__entry->ip)
);
@@ -1784,7 +1784,7 @@ TRACE_EVENT(ext4_journal_start_reserved,
__entry->blocks = blocks;
),
- TP_printk("dev %d,%d blocks, %d caller %pF",
+ TP_printk("dev %d,%d blocks, %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->blocks, (void *)__entry->ip)
);
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 5422dbfaf97d..e202dec22e1d 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -9,12 +9,46 @@
#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev)
#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino
+TRACE_DEFINE_ENUM(NODE);
+TRACE_DEFINE_ENUM(DATA);
+TRACE_DEFINE_ENUM(META);
+TRACE_DEFINE_ENUM(META_FLUSH);
+TRACE_DEFINE_ENUM(CURSEG_HOT_DATA);
+TRACE_DEFINE_ENUM(CURSEG_WARM_DATA);
+TRACE_DEFINE_ENUM(CURSEG_COLD_DATA);
+TRACE_DEFINE_ENUM(CURSEG_HOT_NODE);
+TRACE_DEFINE_ENUM(CURSEG_WARM_NODE);
+TRACE_DEFINE_ENUM(CURSEG_COLD_NODE);
+TRACE_DEFINE_ENUM(NO_CHECK_TYPE);
+TRACE_DEFINE_ENUM(GC_GREEDY);
+TRACE_DEFINE_ENUM(GC_CB);
+TRACE_DEFINE_ENUM(FG_GC);
+TRACE_DEFINE_ENUM(BG_GC);
+TRACE_DEFINE_ENUM(LFS);
+TRACE_DEFINE_ENUM(SSR);
+TRACE_DEFINE_ENUM(__REQ_RAHEAD);
+TRACE_DEFINE_ENUM(__REQ_WRITE);
+TRACE_DEFINE_ENUM(__REQ_SYNC);
+TRACE_DEFINE_ENUM(__REQ_NOIDLE);
+TRACE_DEFINE_ENUM(__REQ_FLUSH);
+TRACE_DEFINE_ENUM(__REQ_FUA);
+TRACE_DEFINE_ENUM(__REQ_PRIO);
+TRACE_DEFINE_ENUM(__REQ_META);
+TRACE_DEFINE_ENUM(CP_UMOUNT);
+TRACE_DEFINE_ENUM(CP_FASTBOOT);
+TRACE_DEFINE_ENUM(CP_SYNC);
+TRACE_DEFINE_ENUM(CP_DISCARD);
+
#define show_block_type(type) \
__print_symbolic(type, \
{ NODE, "NODE" }, \
{ DATA, "DATA" }, \
{ META, "META" }, \
- { META_FLUSH, "META_FLUSH" })
+ { META_FLUSH, "META_FLUSH" }, \
+ { INMEM, "INMEM" }, \
+ { INMEM_DROP, "INMEM_DROP" }, \
+ { IPU, "IN-PLACE" }, \
+ { OPU, "OUT-OF-PLACE" })
#define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
@@ -74,6 +108,7 @@
{ CP_UMOUNT, "Umount" }, \
{ CP_FASTBOOT, "Fastboot" }, \
{ CP_SYNC, "Sync" }, \
+ { CP_RECOVERY, "Recovery" }, \
{ CP_DISCARD, "Discard" })
struct victim_sel_policy;
@@ -854,6 +889,13 @@ DEFINE_EVENT(f2fs__page, f2fs_writepage,
TP_ARGS(page, type)
);
+DEFINE_EVENT(f2fs__page, f2fs_do_write_data_page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
DEFINE_EVENT(f2fs__page, f2fs_readpage,
TP_PROTO(struct page *page, int type),
@@ -875,6 +917,20 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
TP_ARGS(page, type)
);
+DEFINE_EVENT(f2fs__page, f2fs_register_inmem_page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
TRACE_EVENT(f2fs_writepages,
TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type),
@@ -1011,6 +1067,140 @@ TRACE_EVENT(f2fs_issue_flush,
__entry->nobarrier ? "skip (nobarrier)" : "issue",
__entry->flush_merge ? " with flush_merge" : "")
);
+
+TRACE_EVENT(f2fs_lookup_extent_tree_start,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs),
+
+ TP_ARGS(inode, pgofs),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u",
+ show_dev_ino(__entry),
+ __entry->pgofs)
+);
+
+TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs,
+ struct extent_node *en),
+
+ TP_ARGS(inode, pgofs, en),
+
+ TP_CONDITION(en),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ __field(unsigned int, fofs)
+ __field(u32, blk)
+ __field(unsigned int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ __entry->fofs = en->ei.fofs;
+ __entry->blk = en->ei.blk;
+ __entry->len = en->ei.len;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ "ext_info(fofs: %u, blk: %u, len: %u)",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+ __entry->fofs,
+ __entry->blk,
+ __entry->len)
+);
+
+TRACE_EVENT(f2fs_update_extent_tree,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr),
+
+ TP_ARGS(inode, pgofs, blkaddr),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ __field(u32, blk)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ __entry->blk = blkaddr;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, blkaddr = %u",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+ __entry->blk)
+);
+
+TRACE_EVENT(f2fs_shrink_extent_tree,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt,
+ unsigned int tree_cnt),
+
+ TP_ARGS(sbi, node_cnt, tree_cnt),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, node_cnt)
+ __field(unsigned int, tree_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->node_cnt = node_cnt;
+ __entry->tree_cnt = tree_cnt;
+ ),
+
+ TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u",
+ show_dev(__entry),
+ __entry->node_cnt,
+ __entry->tree_cnt)
+);
+
+TRACE_EVENT(f2fs_destroy_extent_tree,
+
+ TP_PROTO(struct inode *inode, unsigned int node_cnt),
+
+ TP_ARGS(inode, node_cnt),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, node_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->node_cnt = node_cnt;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u",
+ show_dev_ino(__entry),
+ __entry->node_cnt)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 0421f49a20f7..42febb6bc1d5 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -18,14 +18,14 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_ARGS(page),
TP_STRUCT__entry(
- __field(struct page *, page)
+ __field(unsigned long, pfn)
__field(unsigned long, i_ino)
__field(unsigned long, index)
__field(dev_t, s_dev)
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->i_ino = page->mapping->host->i_ino;
__entry->index = page->index;
if (page->mapping->host->i_sb)
@@ -37,8 +37,8 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->index << PAGE_SHIFT)
);
diff --git a/include/trace/events/intel-sst.h b/include/trace/events/intel-sst.h
index 76c72d3f1902..edc24e6dea1b 100644
--- a/include/trace/events/intel-sst.h
+++ b/include/trace/events/intel-sst.h
@@ -1,6 +1,13 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM intel-sst
+/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR intel_sst
+
#if !defined(_TRACE_INTEL_SST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_INTEL_SST_H
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 3608bebd3d9c..ff8f6c091a15 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -9,19 +9,34 @@
struct irqaction;
struct softirq_action;
-#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
+#define SOFTIRQ_NAME_LIST \
+ softirq_name(HI) \
+ softirq_name(TIMER) \
+ softirq_name(NET_TX) \
+ softirq_name(NET_RX) \
+ softirq_name(BLOCK) \
+ softirq_name(BLOCK_IOPOLL) \
+ softirq_name(TASKLET) \
+ softirq_name(SCHED) \
+ softirq_name(HRTIMER) \
+ softirq_name_end(RCU)
+
+#undef softirq_name
+#undef softirq_name_end
+
+#define softirq_name(sirq) TRACE_DEFINE_ENUM(sirq##_SOFTIRQ);
+#define softirq_name_end(sirq) TRACE_DEFINE_ENUM(sirq##_SOFTIRQ);
+
+SOFTIRQ_NAME_LIST
+
+#undef softirq_name
+#undef softirq_name_end
+
+#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq },
+#define softirq_name_end(sirq) { sirq##_SOFTIRQ, #sirq }
+
#define show_softirq_name(val) \
- __print_symbolic(val, \
- softirq_name(HI), \
- softirq_name(TIMER), \
- softirq_name(NET_TX), \
- softirq_name(NET_RX), \
- softirq_name(BLOCK), \
- softirq_name(BLOCK_IOPOLL), \
- softirq_name(TASKLET), \
- softirq_name(SCHED), \
- softirq_name(HRTIMER), \
- softirq_name(RCU))
+ __print_symbolic(val, SOFTIRQ_NAME_LIST)
/**
* irq_handler_entry - called immediately before the irq action handler
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 4ad10baecd4d..81ea59812117 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -154,18 +154,18 @@ TRACE_EVENT(mm_page_free,
TP_ARGS(page, order),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( unsigned int, order )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->order = order;
),
TP_printk("page=%p pfn=%lu order=%d",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->order)
);
@@ -176,18 +176,18 @@ TRACE_EVENT(mm_page_free_batched,
TP_ARGS(page, cold),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( int, cold )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->cold = cold;
),
TP_printk("page=%p pfn=%lu order=0 cold=%d",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->cold)
);
@@ -199,22 +199,22 @@ TRACE_EVENT(mm_page_alloc,
TP_ARGS(page, order, gfp_flags, migratetype),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( unsigned int, order )
__field( gfp_t, gfp_flags )
__field( int, migratetype )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
__entry->gfp_flags = gfp_flags;
__entry->migratetype = migratetype;
),
TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
- __entry->page,
- __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
+ __entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
__entry->migratetype,
show_gfp_flags(__entry->gfp_flags))
@@ -227,20 +227,20 @@ DECLARE_EVENT_CLASS(mm_page,
TP_ARGS(page, order, migratetype),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( unsigned int, order )
__field( int, migratetype )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
__entry->migratetype = migratetype;
),
TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
- __entry->page,
- __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
+ __entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
__entry->migratetype,
__entry->order == 0)
@@ -260,7 +260,7 @@ DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
TP_ARGS(page, order, migratetype),
TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
- __entry->page, page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn), __entry->pfn,
__entry->order, __entry->migratetype)
);
@@ -275,7 +275,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
alloc_migratetype, fallback_migratetype),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( int, alloc_order )
__field( int, fallback_order )
__field( int, alloc_migratetype )
@@ -284,7 +284,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->alloc_order = alloc_order;
__entry->fallback_order = fallback_order;
__entry->alloc_migratetype = alloc_migratetype;
@@ -294,8 +294,8 @@ TRACE_EVENT(mm_page_alloc_extfrag,
),
TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->alloc_order,
__entry->fallback_order,
pageblock_order,
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index dd2b5467d905..539b25a76111 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -7,18 +7,40 @@
#include <linux/tracepoint.h>
#define MIGRATE_MODE \
- {MIGRATE_ASYNC, "MIGRATE_ASYNC"}, \
- {MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT"}, \
- {MIGRATE_SYNC, "MIGRATE_SYNC"}
+ EM( MIGRATE_ASYNC, "MIGRATE_ASYNC") \
+ EM( MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT") \
+ EMe(MIGRATE_SYNC, "MIGRATE_SYNC")
+
#define MIGRATE_REASON \
- {MR_COMPACTION, "compaction"}, \
- {MR_MEMORY_FAILURE, "memory_failure"}, \
- {MR_MEMORY_HOTPLUG, "memory_hotplug"}, \
- {MR_SYSCALL, "syscall_or_cpuset"}, \
- {MR_MEMPOLICY_MBIND, "mempolicy_mbind"}, \
- {MR_NUMA_MISPLACED, "numa_misplaced"}, \
- {MR_CMA, "cma"}
+ EM( MR_COMPACTION, "compaction") \
+ EM( MR_MEMORY_FAILURE, "memory_failure") \
+ EM( MR_MEMORY_HOTPLUG, "memory_hotplug") \
+ EM( MR_SYSCALL, "syscall_or_cpuset") \
+ EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
+ EM( MR_NUMA_MISPLACED, "numa_misplaced") \
+ EMe(MR_CMA, "cma")
+
+/*
+ * First define the enums in the above macros to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+MIGRATE_MODE
+MIGRATE_REASON
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
TRACE_EVENT(mm_migrate_pages,
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 81c4c183d348..28c45997e451 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -84,7 +84,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
__assign_str(name, mod->name);
),
- TP_printk("%s call_site=%pf refcnt=%d",
+ TP_printk("%s call_site=%ps refcnt=%d",
__get_str(name), (void *)__entry->ip, __entry->refcnt)
);
@@ -121,7 +121,7 @@ TRACE_EVENT(module_request,
__assign_str(name, name);
),
- TP_printk("%s wait=%d call_site=%pf",
+ TP_printk("%s wait=%d call_site=%ps",
__get_str(name), (int)__entry->wait, (void *)__entry->ip)
);
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
index 805af6db41cc..4684de344c5d 100644
--- a/include/trace/events/random.h
+++ b/include/trace/events/random.h
@@ -22,7 +22,7 @@ TRACE_EVENT(add_device_randomness,
__entry->IP = IP;
),
- TP_printk("bytes %d caller %pF",
+ TP_printk("bytes %d caller %pS",
__entry->bytes, (void *)__entry->IP)
);
@@ -43,7 +43,7 @@ DECLARE_EVENT_CLASS(random__mix_pool_bytes,
__entry->IP = IP;
),
- TP_printk("%s pool: bytes %d caller %pF",
+ TP_printk("%s pool: bytes %d caller %pS",
__entry->pool_name, __entry->bytes, (void *)__entry->IP)
);
@@ -82,7 +82,7 @@ TRACE_EVENT(credit_entropy_bits,
),
TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
- "caller %pF", __entry->pool_name, __entry->bits,
+ "caller %pS", __entry->pool_name, __entry->bits,
__entry->entropy_count, __entry->entropy_total,
(void *)__entry->IP)
);
@@ -207,7 +207,7 @@ DECLARE_EVENT_CLASS(random__get_random_bytes,
__entry->IP = IP;
),
- TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
+ TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP)
);
DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
@@ -242,7 +242,7 @@ DECLARE_EVENT_CLASS(random__extract_entropy,
__entry->IP = IP;
),
- TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
+ TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
__entry->pool_name, __entry->nbytes, __entry->entropy_count,
(void *)__entry->IP)
);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index b9c1dc6c825a..fd1a02cb3c82 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -179,27 +179,53 @@ DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
);
+/*
+ * First define the enums in the below macros to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+#define RPC_SHOW_SOCKET \
+ EM( SS_FREE, "FREE" ) \
+ EM( SS_UNCONNECTED, "UNCONNECTED" ) \
+ EM( SS_CONNECTING, "CONNECTING," ) \
+ EM( SS_CONNECTED, "CONNECTED," ) \
+ EMe(SS_DISCONNECTING, "DISCONNECTING" )
+
#define rpc_show_socket_state(state) \
- __print_symbolic(state, \
- { SS_FREE, "FREE" }, \
- { SS_UNCONNECTED, "UNCONNECTED" }, \
- { SS_CONNECTING, "CONNECTING," }, \
- { SS_CONNECTED, "CONNECTED," }, \
- { SS_DISCONNECTING, "DISCONNECTING" })
+ __print_symbolic(state, RPC_SHOW_SOCKET)
+
+RPC_SHOW_SOCKET
+
+#define RPC_SHOW_SOCK \
+ EM( TCP_ESTABLISHED, "ESTABLISHED" ) \
+ EM( TCP_SYN_SENT, "SYN_SENT" ) \
+ EM( TCP_SYN_RECV, "SYN_RECV" ) \
+ EM( TCP_FIN_WAIT1, "FIN_WAIT1" ) \
+ EM( TCP_FIN_WAIT2, "FIN_WAIT2" ) \
+ EM( TCP_TIME_WAIT, "TIME_WAIT" ) \
+ EM( TCP_CLOSE, "CLOSE" ) \
+ EM( TCP_CLOSE_WAIT, "CLOSE_WAIT" ) \
+ EM( TCP_LAST_ACK, "LAST_ACK" ) \
+ EM( TCP_LISTEN, "LISTEN" ) \
+ EMe( TCP_CLOSING, "CLOSING" )
#define rpc_show_sock_state(state) \
- __print_symbolic(state, \
- { TCP_ESTABLISHED, "ESTABLISHED" }, \
- { TCP_SYN_SENT, "SYN_SENT" }, \
- { TCP_SYN_RECV, "SYN_RECV" }, \
- { TCP_FIN_WAIT1, "FIN_WAIT1" }, \
- { TCP_FIN_WAIT2, "FIN_WAIT2" }, \
- { TCP_TIME_WAIT, "TIME_WAIT" }, \
- { TCP_CLOSE, "CLOSE" }, \
- { TCP_CLOSE_WAIT, "CLOSE_WAIT" }, \
- { TCP_LAST_ACK, "LAST_ACK" }, \
- { TCP_LISTEN, "LISTEN" }, \
- { TCP_CLOSING, "CLOSING" })
+ __print_symbolic(state, RPC_SHOW_SOCK)
+
+RPC_SHOW_SOCK
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
DECLARE_EVENT_CLASS(xs_socket_event,
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 0e7635765153..4250f364a6ca 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -7,11 +7,31 @@
#include <linux/mm_types.h>
#include <linux/tracepoint.h>
-#define TLB_FLUSH_REASON \
- { TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" }, \
- { TLB_REMOTE_SHOOTDOWN, "remote shootdown" }, \
- { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \
- { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" }
+#define TLB_FLUSH_REASON \
+ EM( TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" ) \
+ EM( TLB_REMOTE_SHOOTDOWN, "remote shootdown" ) \
+ EM( TLB_LOCAL_SHOOTDOWN, "local shootdown" ) \
+ EMe( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" )
+
+/*
+ * First define the enums in TLB_FLUSH_REASON to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a,b) TRACE_DEFINE_ENUM(a);
+#define EMe(a,b) TRACE_DEFINE_ENUM(a);
+
+TLB_FLUSH_REASON
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a,b) { a, b },
+#define EMe(a,b) { a, b }
TRACE_EVENT_CONDITION(tlb_flush,
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index b9bb1f204693..20112170ff11 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -6,33 +6,58 @@
#include <linux/tracepoint.h>
-#define show_type(type) \
- __print_symbolic(type, \
- { V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" }, \
- { V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" }, \
- { V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" }, \
- { V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" }, \
- { V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" }, \
- { V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" }, \
- { V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" }, \
- { V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" },\
- { V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" },\
- { V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" }, \
- { V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" }, \
- { V4L2_BUF_TYPE_PRIVATE, "PRIVATE" })
+/* Enums require being exported to userspace, for user tool parsing */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+#define show_type(type) \
+ __print_symbolic(type, SHOW_TYPE)
+
+#define SHOW_TYPE \
+ EM( V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" ) \
+ EM( V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \
+ EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \
+ EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" )
+
+SHOW_TYPE
#define show_field(field) \
- __print_symbolic(field, \
- { V4L2_FIELD_ANY, "ANY" }, \
- { V4L2_FIELD_NONE, "NONE" }, \
- { V4L2_FIELD_TOP, "TOP" }, \
- { V4L2_FIELD_BOTTOM, "BOTTOM" }, \
- { V4L2_FIELD_INTERLACED, "INTERLACED" }, \
- { V4L2_FIELD_SEQ_TB, "SEQ_TB" }, \
- { V4L2_FIELD_SEQ_BT, "SEQ_BT" }, \
- { V4L2_FIELD_ALTERNATE, "ALTERNATE" }, \
- { V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" }, \
- { V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" })
+ __print_symbolic(field, SHOW_FIELD)
+
+#define SHOW_FIELD \
+ EM( V4L2_FIELD_ANY, "ANY" ) \
+ EM( V4L2_FIELD_NONE, "NONE" ) \
+ EM( V4L2_FIELD_TOP, "TOP" ) \
+ EM( V4L2_FIELD_BOTTOM, "BOTTOM" ) \
+ EM( V4L2_FIELD_INTERLACED, "INTERLACED" ) \
+ EM( V4L2_FIELD_SEQ_TB, "SEQ_TB" ) \
+ EM( V4L2_FIELD_SEQ_BT, "SEQ_BT" ) \
+ EM( V4L2_FIELD_ALTERNATE, "ALTERNATE" ) \
+ EM( V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" ) \
+ EMe( V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" )
+
+SHOW_FIELD
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+/* V4L2_TC_TYPE_* are macros, not defines, they do not need processing */
#define show_timecode_type(type) \
__print_symbolic(type, \
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 69590b6ffc09..f66476b96264 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -336,18 +336,18 @@ TRACE_EVENT(mm_vmscan_writepage,
TP_ARGS(page, reclaim_flags),
TP_STRUCT__entry(
- __field(struct page *, page)
+ __field(unsigned long, pfn)
__field(int, reclaim_flags)
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->reclaim_flags = reclaim_flags;
),
TP_printk("page=%p pfn=%lu flags=%s",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
show_reclaim_flags(__entry->reclaim_flags))
);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 5a14ead59696..880dd7437172 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -23,15 +23,32 @@
{I_REFERENCED, "I_REFERENCED"} \
)
+/* enums need to be exported to user space */
+#undef EM
+#undef EMe
+#define EM(a,b) TRACE_DEFINE_ENUM(a);
+#define EMe(a,b) TRACE_DEFINE_ENUM(a);
+
#define WB_WORK_REASON \
- {WB_REASON_BACKGROUND, "background"}, \
- {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
- {WB_REASON_SYNC, "sync"}, \
- {WB_REASON_PERIODIC, "periodic"}, \
- {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
- {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
- {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
- {WB_REASON_FORKER_THREAD, "forker_thread"}
+ EM( WB_REASON_BACKGROUND, "background") \
+ EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \
+ EM( WB_REASON_SYNC, "sync") \
+ EM( WB_REASON_PERIODIC, "periodic") \
+ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
+ EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \
+ EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
+ EMe(WB_REASON_FORKER_THREAD, "forker_thread")
+
+WB_WORK_REASON
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a,b) { a, b },
+#define EMe(a,b) { a, b }
struct wb_writeback_work;
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index d06b6da5c1e3..bce990f5a35d 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -224,7 +224,7 @@ TRACE_EVENT(xen_mmu_pmd_clear,
TP_printk("pmdp %p", __entry->pmdp)
);
-#if PAGETABLE_LEVELS >= 4
+#if CONFIG_PGTABLE_LEVELS >= 4
TRACE_EVENT(xen_mmu_set_pud,
TP_PROTO(pud_t *pudp, pud_t pudval),
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 41bf65f04dd9..37d4b10b111d 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -18,6 +18,34 @@
#include <linux/ftrace_event.h>
+#ifndef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR TRACE_SYSTEM
+#endif
+
+#define __app__(x, y) str__##x##y
+#define __app(x, y) __app__(x, y)
+
+#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
+
+#define TRACE_MAKE_SYSTEM_STR() \
+ static const char TRACE_SYSTEM_STRING[] = \
+ __stringify(TRACE_SYSTEM)
+
+TRACE_MAKE_SYSTEM_STR();
+
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a) \
+ static struct trace_enum_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .enum_string = #a, \
+ .enum_value = a \
+ }; \
+ static struct trace_enum_map __used \
+ __attribute__((section("_ftrace_enum_map"))) \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
+
/*
* DECLARE_EVENT_CLASS can be used to add a generic function
* handlers for events. That is, if all events have the same
@@ -105,7 +133,6 @@
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
/*
* Stage 2 of the trace events.
*
@@ -122,6 +149,9 @@
* The size of an array is also encoded, in the higher 16 bits of <item>.
*/
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a)
+
#undef __field
#define __field(type, item)
@@ -539,7 +569,7 @@ static inline notrace int ftrace_get_offsets_##call( \
* .trace = ftrace_raw_output_<call>, <-- stage 2
* };
*
- * static const char print_fmt_<call>[] = <TP_printk>;
+ * static char print_fmt_<call>[] = <TP_printk>;
*
* static struct ftrace_event_class __used event_class_<template> = {
* .system = "<system>",
@@ -690,9 +720,9 @@ static inline void ftrace_test_probe_##call(void) \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
_TRACE_PERF_PROTO(call, PARAMS(proto)); \
-static const char print_fmt_##call[] = print; \
+static char print_fmt_##call[] = print; \
static struct ftrace_event_class __used __refdata event_class_##call = { \
- .system = __stringify(TRACE_SYSTEM), \
+ .system = TRACE_SYSTEM_STRING, \
.define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
.raw_init = trace_event_raw_init, \
@@ -719,7 +749,7 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
\
-static const char print_fmt_##call[] = print; \
+static char print_fmt_##call[] = print; \
\
static struct ftrace_event_call __used event_##call = { \
.class = &event_class_##template, \
@@ -735,6 +765,7 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef TRACE_SYSTEM_VAR
#ifdef CONFIG_PERF_EVENTS
diff --git a/include/uapi/asm-generic/errno.h b/include/uapi/asm-generic/errno.h
index 1e1ea6e6e7a5..88e0914cf2d9 100644
--- a/include/uapi/asm-generic/errno.h
+++ b/include/uapi/asm-generic/errno.h
@@ -6,7 +6,16 @@
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
-#define ENOSYS 38 /* Function not implemented */
+
+/*
+ * This error code is special: arch syscall entry code will return
+ * -ENOSYS if users try to call a syscall that doesn't exist. To keep
+ * failures of syscalls that really do exist distinguishable from
+ * failures due to attempts to use a nonexistent syscall, syscall
+ * implementations should refrain from returning -ENOSYS.
+ */
+#define ENOSYS 38 /* Invalid system call number */
+
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 01b2d6d0e355..ff6ef62d084b 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -630,6 +630,7 @@ struct drm_gem_open {
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
#define DRM_CAP_CURSOR_HEIGHT 0x9
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index a284f11a8ef5..07735822a28f 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -129,4 +129,82 @@
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+/*
+ * Format Modifiers:
+ *
+ * Format modifiers describe, typically, a re-ordering or modification
+ * of the data in a plane of an FB. This can be used to express tiled/
+ * swizzled formats, or compression, or a combination of the two.
+ *
+ * The upper 8 bits of the format modifier are a vendor-id as assigned
+ * below. The lower 56 bits are assigned as vendor sees fit.
+ */
+
+/* Vendor Ids: */
+#define DRM_FORMAT_MOD_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
+#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
+#define DRM_FORMAT_MOD_VENDOR_NV 0x03
+#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
+#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
+/* add more to the end as needed */
+
+#define fourcc_mod_code(vendor, val) \
+ ((((u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+
+/*
+ * Format Modifier tokens:
+ *
+ * When adding a new token please document the layout with a code comment,
+ * similar to the fourcc codes above. drm_fourcc.h is considered the
+ * authoritative source for all of these.
+ */
+
+/* Intel framebuffer modifiers */
+
+/*
+ * Intel X-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out row-major, with
+ * a platform-dependent stride. On top of that the memory can apply
+ * platform-depending swizzling of some higher address bits into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
+
+/*
+ * Intel Y-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
+ * chunks column-major, with a platform-dependent height. On top of that the
+ * memory can apply platform-depending swizzling of some higher address bits
+ * into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
+
+/*
+ * Intel Yf-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles in row-major layout.
+ * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
+ * are arranged in four groups (two wide, two high) with column-major layout.
+ * Each group therefore consits out of four 256 byte units, which are also laid
+ * out as 2x2 column-major.
+ * 256 byte units are made out of four 64 byte blocks of pixels, producing
+ * either a square block or a 2:1 unit.
+ * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
+ * in pixel depends on the pixel depth.
+ */
+#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
+
#endif /* DRM_FOURCC_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index ca788e01dab2..dbeba949462a 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -336,6 +336,7 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
struct drm_mode_fb_cmd2 {
__u32 fb_id;
@@ -356,10 +357,18 @@ struct drm_mode_fb_cmd2 {
* So it would consist of Y as offsets[0] and UV as
* offsets[1]. Note that offsets[0] will generally
* be 0 (but this is not required).
+ *
+ * To accommodate tiled, compressed, etc formats, a per-plane
+ * modifier can be specified. The default value of zero
+ * indicates "native" format as specified by the fourcc.
+ * Vendor specific modifier token. This allows, for example,
+ * different tiling/swizzling pattern on different planes.
+ * See discussion above of DRM_FORMAT_MOD_xxx.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
+ __u64 modifier[4]; /* ie, tiling, compressed (per plane) */
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 6eed16b92a24..551b6737f5df 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -270,7 +270,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
-#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
@@ -347,6 +347,9 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
#define I915_PARAM_MMAP_VERSION 30
#define I915_PARAM_HAS_BSD2 31
+#define I915_PARAM_REVISION 32
+#define I915_PARAM_SUBSLICE_TOTAL 33
+#define I915_PARAM_EU_TOTAL 34
typedef struct drm_i915_getparam {
int param;
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 0d7608dc1a34..5507eead5863 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -39,6 +39,7 @@
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 50d0fb41a3bf..871e73f99a4d 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1034,6 +1034,10 @@ struct drm_radeon_cs {
#define RADEON_INFO_VRAM_USAGE 0x1e
#define RADEON_INFO_GTT_USAGE 0x1f
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
+#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
+#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
+#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
+#define RADEON_INFO_READ_REG 0x24
struct drm_radeon_info {
uint32_t request;
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index c15d781ecc0f..5391780c2b05 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -36,7 +36,8 @@ struct drm_tegra_gem_create {
struct drm_tegra_gem_mmap {
__u32 handle;
- __u32 offset;
+ __u32 pad;
+ __u64 offset;
};
struct drm_tegra_syncpt_read {
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 38df23435ebb..1a0006a76b00 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -431,6 +431,7 @@ header-y += virtio_blk.h
header-y += virtio_config.h
header-y += virtio_console.h
header-y += virtio_ids.h
+header-y += virtio_input.h
header-y += virtio_net.h
header-y += virtio_pci.h
header-y += virtio_ring.h
@@ -447,5 +448,6 @@ header-y += wireless.h
header-y += x25.h
header-y += xattr.h
header-y += xfrm.h
+header-y += xilinx-v4l2-controls.h
header-y += zorro.h
header-y += zorro_ids.h
diff --git a/include/uapi/linux/am437x-vpfe.h b/include/uapi/linux/am437x-vpfe.h
index 9b03033f9cd6..d75774317b9b 100644
--- a/include/uapi/linux/am437x-vpfe.h
+++ b/include/uapi/linux/am437x-vpfe.h
@@ -21,6 +21,8 @@
#ifndef AM437X_VPFE_USER_H
#define AM437X_VPFE_USER_H
+#include <linux/videodev2.h>
+
enum vpfe_ccdc_data_size {
VPFE_CCDC_DATA_16BITS = 0,
VPFE_CCDC_DATA_15BITS,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 45da7ec7d274..a9ebdf5701e8 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -118,8 +118,13 @@ enum bpf_map_type {
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
+ BPF_PROG_TYPE_KPROBE,
+ BPF_PROG_TYPE_SCHED_CLS,
+ BPF_PROG_TYPE_SCHED_ACT,
};
+#define BPF_PSEUDO_MAP_FD 1
+
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
@@ -151,6 +156,7 @@ union bpf_attr {
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
+ __u32 kern_version; /* checked when prog_type=kprobe */
};
} __attribute__((aligned(8)));
@@ -162,7 +168,64 @@ enum bpf_func_id {
BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
+ BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */
+ BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */
+ BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */
+ BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
+ BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
+
+ /**
+ * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
+ * @skb: pointer to skb
+ * @offset: offset within packet from skb->mac_header
+ * @from: pointer where to copy bytes from
+ * @len: number of bytes to store into packet
+ * @flags: bit 0 - if true, recompute skb->csum
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_skb_store_bytes,
+
+ /**
+ * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
+ * @skb: pointer to skb
+ * @offset: offset within packet where IP checksum is located
+ * @from: old value of header field
+ * @to: new value of header field
+ * @flags: bits 0-3 - size of header field
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_l3_csum_replace,
+
+ /**
+ * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
+ * @skb: pointer to skb
+ * @offset: offset within packet where TCP/UDP checksum is located
+ * @from: old value of header field
+ * @to: new value of header field
+ * @flags: bits 0-3 - size of header field
+ * bit 4 - is pseudo header
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_l4_csum_replace,
__BPF_FUNC_MAX_ID,
};
+/* user accessible mirror of in-kernel sk_buff.
+ * new fields can only be added to the end of this structure
+ */
+struct __sk_buff {
+ __u32 len;
+ __u32 pkt_type;
+ __u32 mark;
+ __u32 queue_mapping;
+ __u32 protocol;
+ __u32 vlan_present;
+ __u32 vlan_tci;
+ __u32 vlan_proto;
+ __u32 priority;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 78ec76fd89a6..8735f1080385 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -57,6 +57,7 @@ enum {
CAN_RAW_LOOPBACK, /* local loopback (default:on) */
CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
+ CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
};
#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index e711f20dc522..6497d7933d5b 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -78,6 +78,70 @@ struct ieee_maxrate {
__u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS];
};
+enum dcbnl_cndd_states {
+ DCB_CNDD_RESET = 0,
+ DCB_CNDD_EDGE,
+ DCB_CNDD_INTERIOR,
+ DCB_CNDD_INTERIOR_READY,
+};
+
+/* This structure contains the IEEE 802.1Qau QCN managed object.
+ *
+ *@rpg_enable: enable QCN RP
+ *@rppp_max_rps: maximum number of RPs allowed for this CNPV on this port
+ *@rpg_time_reset: time between rate increases if no CNMs received.
+ * given in u-seconds
+ *@rpg_byte_reset: transmitted data between rate increases if no CNMs received.
+ * given in Bytes
+ *@rpg_threshold: The number of times rpByteStage or rpTimeStage can count
+ * before RP rate control state machine advances states
+ *@rpg_max_rate: the maxinun rate, in Mbits per second,
+ * at which an RP can transmit
+ *@rpg_ai_rate: The rate, in Mbits per second,
+ * used to increase rpTargetRate in the RPR_ACTIVE_INCREASE
+ *@rpg_hai_rate: The rate, in Mbits per second,
+ * used to increase rpTargetRate in the RPR_HYPER_INCREASE state
+ *@rpg_gd: Upon CNM receive, flow rate is limited to (Fb/Gd)*CurrentRate.
+ * rpgGd is given as log2(Gd), where Gd may only be powers of 2
+ *@rpg_min_dec_fac: The minimum factor by which the current transmit rate
+ * can be changed by reception of a CNM.
+ * value is given as percentage (1-100)
+ *@rpg_min_rate: The minimum value, in bits per second, for rate to limit
+ *@cndd_state_machine: The state of the congestion notification domain
+ * defense state machine, as defined by IEEE 802.3Qau
+ * section 32.1.1. In the interior ready state,
+ * the QCN capable hardware may add CN-TAG TLV to the
+ * outgoing traffic, to specifically identify outgoing
+ * flows.
+ */
+
+struct ieee_qcn {
+ __u8 rpg_enable[IEEE_8021QAZ_MAX_TCS];
+ __u32 rppp_max_rps[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_time_reset[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_byte_reset[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_threshold[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_max_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_ai_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_hai_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_gd[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_min_dec_fac[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_min_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 cndd_state_machine[IEEE_8021QAZ_MAX_TCS];
+};
+
+/* This structure contains the IEEE 802.1Qau QCN statistics.
+ *
+ *@rppp_rp_centiseconds: the number of RP-centiseconds accumulated
+ * by RPs at this priority level on this Port
+ *@rppp_created_rps: number of active RPs(flows) that react to CNMs
+ */
+
+struct ieee_qcn_stats {
+ __u64 rppp_rp_centiseconds[IEEE_8021QAZ_MAX_TCS];
+ __u32 rppp_created_rps[IEEE_8021QAZ_MAX_TCS];
+};
+
/* This structure contains the IEEE 802.1Qaz PFC managed object
*
* @pfc_cap: Indicates the number of traffic classes on the local device
@@ -334,6 +398,8 @@ enum ieee_attrs {
DCB_ATTR_IEEE_PEER_PFC,
DCB_ATTR_IEEE_PEER_APP,
DCB_ATTR_IEEE_MAXRATE,
+ DCB_ATTR_IEEE_QCN,
+ DCB_ATTR_IEEE_QCN_STATS,
__DCB_ATTR_IEEE_MAX
};
#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 889f3a5b7b18..eac8c3641f39 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 30
+#define DM_VERSION_MINOR 31
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2014-12-22)"
+#define DM_VERSION_EXTRA "-ioctl (2015-3-12)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/falloc.h b/include/uapi/linux/falloc.h
index d1197ae3723c..3e445a760f14 100644
--- a/include/uapi/linux/falloc.h
+++ b/include/uapi/linux/falloc.h
@@ -41,4 +41,21 @@
*/
#define FALLOC_FL_ZERO_RANGE 0x10
+/*
+ * FALLOC_FL_INSERT_RANGE is use to insert space within the file size without
+ * overwriting any existing data. The contents of the file beyond offset are
+ * shifted towards right by len bytes to create a hole. As such, this
+ * operation will increase the size of the file by len bytes.
+ *
+ * Different filesystems may implement different limitations on the granularity
+ * of the operation. Most will limit operations to filesystem block size
+ * boundaries, but this boundary may be larger or smaller depending on
+ * the filesystem and/or the configuration of the filesystem or file.
+ *
+ * Attempting to insert space using this flag at OR beyond the end of
+ * the file is considered an illegal operation - just use ftruncate(2) or
+ * fallocate(2) with mode 0 for such type of operations.
+ */
+#define FALLOC_FL_INSERT_RANGE 0x20
+
#endif /* _UAPI_FALLOC_H_ */
diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h
index 47785d5ecf17..c97340e43dd6 100644
--- a/include/uapi/linux/filter.h
+++ b/include/uapi/linux/filter.h
@@ -77,9 +77,13 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
#define SKF_AD_VLAN_TAG_PRESENT 48
#define SKF_AD_PAY_OFFSET 52
#define SKF_AD_RANDOM 56
-#define SKF_AD_MAX 60
-#define SKF_NET_OFF (-0x100000)
-#define SKF_LL_OFF (-0x200000)
+#define SKF_AD_VLAN_TPID 60
+#define SKF_AD_MAX 64
+#define SKF_NET_OFF (-0x100000)
+#define SKF_LL_OFF (-0x200000)
+
+#define BPF_NET_OFF SKF_NET_OFF
+#define BPF_LL_OFF SKF_LL_OFF
#endif /* _UAPI__LINUX_FILTER_H__ */
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
index c303588bb767..d2947c52dc67 100644
--- a/include/uapi/linux/fou.h
+++ b/include/uapi/linux/fou.h
@@ -25,6 +25,7 @@ enum {
FOU_CMD_UNSPEC,
FOU_CMD_ADD,
FOU_CMD_DEL,
+ FOU_CMD_GET,
__FOU_CMD_MAX,
};
diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h
index dea10a87dfd1..4318ab1635ce 100644
--- a/include/uapi/linux/if_addr.h
+++ b/include/uapi/linux/if_addr.h
@@ -50,6 +50,8 @@ enum {
#define IFA_F_PERMANENT 0x80
#define IFA_F_MANAGETEMPADDR 0x100
#define IFA_F_NOPREFIXROUTE 0x200
+#define IFA_F_MCAUTOJOIN 0x400
+#define IFA_F_STABLE_PRIVACY 0x800
struct ifa_cacheinfo {
__u32 ifa_prefered;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index dfd0bb22e554..d9cd19214b98 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -147,6 +147,7 @@ enum {
IFLA_CARRIER_CHANGES,
IFLA_PHYS_SWITCH_ID,
IFLA_LINK_NETNSID,
+ IFLA_PHYS_PORT_NAME,
__IFLA_MAX
};
@@ -215,6 +216,7 @@ enum {
enum in6_addr_gen_mode {
IN6_ADDR_GEN_MODE_EUI64,
IN6_ADDR_GEN_MODE_NONE,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY,
};
/* Bridge section */
@@ -224,6 +226,9 @@ enum {
IFLA_BR_FORWARD_DELAY,
IFLA_BR_HELLO_TIME,
IFLA_BR_MAX_AGE,
+ IFLA_BR_AGEING_TIME,
+ IFLA_BR_STP_STATE,
+ IFLA_BR_PRIORITY,
__IFLA_BR_MAX,
};
@@ -247,6 +252,7 @@ enum {
IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
IFLA_BRPORT_PROXYARP, /* proxy ARP */
IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
+ IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +465,9 @@ enum {
IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */
+ IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query
+ * on/off switch
+ */
__IFLA_VF_MAX,
};
@@ -503,6 +512,11 @@ struct ifla_vf_link_state {
__u32 link_state;
};
+struct ifla_vf_rss_query_en {
+ __u32 vf;
+ __u32 setting;
+};
+
/* VF ports management section
*
* Nested layout of set/get msg is:
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index da2d668b8cf1..053bd102fbe0 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -99,6 +99,7 @@ struct tpacket_auxdata {
#define TP_STATUS_VLAN_VALID (1 << 4) /* auxdata has valid tp_vlan_tci */
#define TP_STATUS_BLK_TMO (1 << 5)
#define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */
+#define TP_STATUS_CSUM_VALID (1 << 7)
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index d65c0a09efd3..c7093c75bdd6 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -143,4 +143,8 @@ struct tcp_dctcp_info {
__u32 dctcp_ab_tot;
};
+union tcp_cc_info {
+ struct tcpvegas_info vegas;
+ struct tcp_dctcp_info dctcp;
+};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 8038e9aa3b95..731417c025f6 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -369,7 +369,8 @@ struct input_keymap_entry {
#define KEY_MSDOS 151
#define KEY_COFFEE 152 /* AL Terminal Lock/Screensaver */
#define KEY_SCREENLOCK KEY_COFFEE
-#define KEY_DIRECTION 153
+#define KEY_ROTATE_DISPLAY 153 /* Display orientation for e.g. tablets */
+#define KEY_DIRECTION KEY_ROTATE_DISPLAY
#define KEY_CYCLEWINDOWS 154
#define KEY_MAIL 155
#define KEY_BOOKMARKS 156 /* AC Bookmarks */
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index cabe95d5b461..3199243f2028 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -358,6 +358,8 @@ enum {
IPVS_SVC_ATTR_PE_NAME, /* name of ct retriever */
+ IPVS_SVC_ATTR_STATS64, /* nested attribute for service stats */
+
__IPVS_SVC_ATTR_MAX,
};
@@ -387,6 +389,8 @@ enum {
IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */
+ IPVS_DEST_ATTR_STATS64, /* nested attribute for dest stats */
+
__IPVS_DEST_ATTR_MAX,
};
@@ -410,7 +414,8 @@ enum {
/*
* Attributes used to describe service or destination entry statistics
*
- * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS
+ * Used inside nested attributes IPVS_SVC_ATTR_STATS, IPVS_DEST_ATTR_STATS,
+ * IPVS_SVC_ATTR_STATS64 and IPVS_DEST_ATTR_STATS64.
*/
enum {
IPVS_STATS_ATTR_UNSPEC = 0,
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 437a6a4b125a..5efa54ae567c 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -170,6 +170,7 @@ enum {
DEVCONF_ACCEPT_RA_FROM_LOCAL,
DEVCONF_USE_OPTIMISTIC,
DEVCONF_ACCEPT_RA_MTU,
+ DEVCONF_STABLE_SECRET,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f574d7be7631..4b60056776d1 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -813,6 +813,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_MIPS_MSA 112
#define KVM_CAP_S390_INJECT_IRQ 113
#define KVM_CAP_S390_IRQ_STATE 114
+#define KVM_CAP_PPC_HWRNG 115
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 7d664ea85ebd..7b1425a6b370 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -58,6 +58,8 @@
#define STACK_END_MAGIC 0x57AC6E9D
+#define TRACEFS_MAGIC 0x74726163
+
#define V9FS_MAGIC 0x01021997
#define BDEVFS_MAGIC 0x62646576
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 23b40908be30..190d491d5b13 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -33,22 +33,32 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x100e */
+/* RGB - next is 0x1018 */
+#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004
+#define MEDIA_BUS_FMT_RGB565_1X16 0x1017
#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005
#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
+#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
+#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
+#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
+#define MEDIA_BUS_FMT_GBR888_1X24 0x1014
#define MEDIA_BUS_FMT_RGB888_1X24 0x100a
#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
+#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011
+#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
+#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
-/* YUV (including grey) - next is 0x2024 */
+/* YUV (including grey) - next is 0x2026 */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -65,6 +75,10 @@
#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c
#define MEDIA_BUS_FMT_Y12_1X12 0x2013
+#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
+#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
+#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
+#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
@@ -74,16 +88,14 @@
#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b
#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d
#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
-#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
-#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
-#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
-#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
-#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
-#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
+#define MEDIA_BUS_FMT_VUY8_1X24 0x2024
+#define MEDIA_BUS_FMT_YUV8_1X24 0x2025
#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
+#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
+#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
/* Bayer - next is 0x3019 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index d847c760e8f0..4e816be3de39 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -50,7 +50,14 @@ struct media_device_info {
#define MEDIA_ENT_T_DEVNODE_V4L (MEDIA_ENT_T_DEVNODE + 1)
#define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_T_DEVNODE + 2)
#define MEDIA_ENT_T_DEVNODE_ALSA (MEDIA_ENT_T_DEVNODE + 3)
-#define MEDIA_ENT_T_DEVNODE_DVB (MEDIA_ENT_T_DEVNODE + 4)
+#define MEDIA_ENT_T_DEVNODE_DVB_FE (MEDIA_ENT_T_DEVNODE + 4)
+#define MEDIA_ENT_T_DEVNODE_DVB_DEMUX (MEDIA_ENT_T_DEVNODE + 5)
+#define MEDIA_ENT_T_DEVNODE_DVB_DVR (MEDIA_ENT_T_DEVNODE + 6)
+#define MEDIA_ENT_T_DEVNODE_DVB_CA (MEDIA_ENT_T_DEVNODE + 7)
+#define MEDIA_ENT_T_DEVNODE_DVB_NET (MEDIA_ENT_T_DEVNODE + 8)
+
+/* Legacy symbol. Use it to avoid userspace compilation breakages */
+#define MEDIA_ENT_T_DEVNODE_DVB MEDIA_ENT_T_DEVNODE_DVB_FE
#define MEDIA_ENT_T_V4L2_SUBDEV (2 << MEDIA_ENT_TYPE_SHIFT)
#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR (MEDIA_ENT_T_V4L2_SUBDEV + 1)
@@ -59,6 +66,8 @@ struct media_device_info {
/* A converter of analogue video to its digital representation. */
#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER (MEDIA_ENT_T_V4L2_SUBDEV + 4)
+#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER (MEDIA_ENT_T_V4L2_SUBDEV + 5)
+
#define MEDIA_ENT_FL_DEFAULT (1 << 0)
struct media_entity_desc {
@@ -78,17 +87,48 @@ struct media_entity_desc {
struct {
__u32 major;
__u32 minor;
- } v4l;
- struct {
- __u32 major;
- __u32 minor;
- } fb;
+ } dev;
+
+#if 1
+ /*
+ * TODO: this shouldn't have been added without
+ * actual drivers that use this. When the first real driver
+ * appears that sets this information, special attention
+ * should be given whether this information is 1) enough, and
+ * 2) can deal with udev rules that rename devices. The struct
+ * dev would not be sufficient for this since that does not
+ * contain the subdevice information. In addition, struct dev
+ * can only refer to a single device, and not to multiple (e.g.
+ * pcm and mixer devices).
+ *
+ * So for now mark this as a to do.
+ */
struct {
__u32 card;
__u32 device;
__u32 subdevice;
} alsa;
+#endif
+
+#if 1
+ /*
+ * DEPRECATED: previous node specifications. Kept just to
+ * avoid breaking compilation, but media_entity_desc.dev
+ * should be used instead. In particular, alsa and dvb
+ * fields below are wrong: for all devnodes, there should
+ * be just major/minor inside the struct, as this is enough
+ * to represent any devnode, no matter what type.
+ */
+ struct {
+ __u32 major;
+ __u32 minor;
+ } v4l;
+ struct {
+ __u32 major;
+ __u32 minor;
+ } fb;
int dvb;
+#endif
/* Sub-device specifications */
/* Nothing needed yet */
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index bc9abfe88c9a..139d4dd1cab8 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -31,4 +31,14 @@ struct mpls_label {
#define MPLS_LS_TTL_MASK 0x000000FF
#define MPLS_LS_TTL_SHIFT 0
+/* Reserved labels */
+#define MPLS_LABEL_IPV4NULL 0 /* RFC3032 */
+#define MPLS_LABEL_RTALERT 1 /* RFC3032 */
+#define MPLS_LABEL_IPV6NULL 2 /* RFC3032 */
+#define MPLS_LABEL_IMPLNULL 3 /* RFC3032 */
+#define MPLS_LABEL_ENTROPY 7 /* RFC6790 */
+#define MPLS_LABEL_GAL 13 /* RFC5586 */
+#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */
+#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */
+
#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 3873a35509aa..2e35c61bbdd1 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -126,6 +126,7 @@ enum {
NDTPA_PROXY_QLEN, /* u32 */
NDTPA_LOCKTIME, /* u64, msecs */
NDTPA_QUEUE_LENBYTES, /* u32 */
+ NDTPA_MCAST_REPROBES, /* u32 */
__NDTPA_MAX
};
#define NDTPA_MAX (__NDTPA_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 832bc46db78b..5fa1cd04762e 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1,19 +1,49 @@
#ifndef _LINUX_NF_TABLES_H
#define _LINUX_NF_TABLES_H
+#define NFT_TABLE_MAXNAMELEN 32
#define NFT_CHAIN_MAXNAMELEN 32
#define NFT_USERDATA_MAXLEN 256
+/**
+ * enum nft_registers - nf_tables registers
+ *
+ * nf_tables used to have five registers: a verdict register and four data
+ * registers of size 16. The data registers have been changed to 16 registers
+ * of size 4. For compatibility reasons, the NFT_REG_[1-4] registers still
+ * map to areas of size 16, the 4 byte registers are addressed using
+ * NFT_REG32_00 - NFT_REG32_15.
+ */
enum nft_registers {
NFT_REG_VERDICT,
NFT_REG_1,
NFT_REG_2,
NFT_REG_3,
NFT_REG_4,
- __NFT_REG_MAX
+ __NFT_REG_MAX,
+
+ NFT_REG32_00 = 8,
+ MFT_REG32_01,
+ NFT_REG32_02,
+ NFT_REG32_03,
+ NFT_REG32_04,
+ NFT_REG32_05,
+ NFT_REG32_06,
+ NFT_REG32_07,
+ NFT_REG32_08,
+ NFT_REG32_09,
+ NFT_REG32_10,
+ NFT_REG32_11,
+ NFT_REG32_12,
+ NFT_REG32_13,
+ NFT_REG32_14,
+ NFT_REG32_15,
};
#define NFT_REG_MAX (__NFT_REG_MAX - 1)
+#define NFT_REG_SIZE 16
+#define NFT_REG32_SIZE 4
+
/**
* enum nft_verdicts - nf_tables internal verdicts
*
@@ -207,12 +237,16 @@ enum nft_rule_compat_attributes {
* @NFT_SET_CONSTANT: set contents may not change while bound
* @NFT_SET_INTERVAL: set contains intervals
* @NFT_SET_MAP: set is used as a dictionary
+ * @NFT_SET_TIMEOUT: set uses timeouts
+ * @NFT_SET_EVAL: set contains expressions for evaluation
*/
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
NFT_SET_CONSTANT = 0x2,
NFT_SET_INTERVAL = 0x4,
NFT_SET_MAP = 0x8,
+ NFT_SET_TIMEOUT = 0x10,
+ NFT_SET_EVAL = 0x20,
};
/**
@@ -251,6 +285,8 @@ enum nft_set_desc_attributes {
* @NFTA_SET_POLICY: selection policy (NLA_U32)
* @NFTA_SET_DESC: set description (NLA_NESTED)
* @NFTA_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
+ * @NFTA_SET_TIMEOUT: default timeout value (NLA_U64)
+ * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32)
*/
enum nft_set_attributes {
NFTA_SET_UNSPEC,
@@ -264,6 +300,8 @@ enum nft_set_attributes {
NFTA_SET_POLICY,
NFTA_SET_DESC,
NFTA_SET_ID,
+ NFTA_SET_TIMEOUT,
+ NFTA_SET_GC_INTERVAL,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -283,12 +321,20 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
* @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
* @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
+ * @NFTA_SET_ELEM_TIMEOUT: timeout value (NLA_U64)
+ * @NFTA_SET_ELEM_EXPIRATION: expiration time (NLA_U64)
+ * @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY)
+ * @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
*/
enum nft_set_elem_attributes {
NFTA_SET_ELEM_UNSPEC,
NFTA_SET_ELEM_KEY,
NFTA_SET_ELEM_DATA,
NFTA_SET_ELEM_FLAGS,
+ NFTA_SET_ELEM_TIMEOUT,
+ NFTA_SET_ELEM_EXPIRATION,
+ NFTA_SET_ELEM_USERDATA,
+ NFTA_SET_ELEM_EXPR,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -346,6 +392,9 @@ enum nft_data_attributes {
};
#define NFTA_DATA_MAX (__NFTA_DATA_MAX - 1)
+/* Maximum length of a value */
+#define NFT_DATA_VALUE_MAXLEN 64
+
/**
* enum nft_verdict_attributes - nf_tables verdict netlink attributes
*
@@ -504,6 +553,35 @@ enum nft_lookup_attributes {
};
#define NFTA_LOOKUP_MAX (__NFTA_LOOKUP_MAX - 1)
+enum nft_dynset_ops {
+ NFT_DYNSET_OP_ADD,
+ NFT_DYNSET_OP_UPDATE,
+};
+
+/**
+ * enum nft_dynset_attributes - dynset expression attributes
+ *
+ * @NFTA_DYNSET_SET_NAME: name of set the to add data to (NLA_STRING)
+ * @NFTA_DYNSET_SET_ID: uniquely identifier of the set in the transaction (NLA_U32)
+ * @NFTA_DYNSET_OP: operation (NLA_U32)
+ * @NFTA_DYNSET_SREG_KEY: source register of the key (NLA_U32)
+ * @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32)
+ * @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
+ * @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
+ */
+enum nft_dynset_attributes {
+ NFTA_DYNSET_UNSPEC,
+ NFTA_DYNSET_SET_NAME,
+ NFTA_DYNSET_SET_ID,
+ NFTA_DYNSET_OP,
+ NFTA_DYNSET_SREG_KEY,
+ NFTA_DYNSET_SREG_DATA,
+ NFTA_DYNSET_TIMEOUT,
+ NFTA_DYNSET_EXPR,
+ __NFTA_DYNSET_MAX,
+};
+#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
+
/**
* enum nft_payload_bases - nf_tables payload expression offset bases
*
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index ba993360dbe9..773dfe8924c7 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -12,9 +12,7 @@
#ifndef _UAPI__LINUX_BRIDGE_EFF_H
#define _UAPI__LINUX_BRIDGE_EFF_H
-#include <linux/if.h>
#include <linux/netfilter_bridge.h>
-#include <linux/if_ether.h>
#define EBT_TABLE_MAXNAMELEN 32
#define EBT_CHAIN_MAXNAMELEN EBT_TABLE_MAXNAMELEN
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 35f5f4c6c260..adc0aff83fbb 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -162,13 +162,6 @@
*/
#define NFS4_MAX_BACK_CHANNEL_OPS 2
-enum nfs4_acl_whotype {
- NFS4_ACL_WHO_NAMED = 0,
- NFS4_ACL_WHO_OWNER,
- NFS4_ACL_WHO_GROUP,
- NFS4_ACL_WHO_EVERYONE,
-};
-
#endif /* _UAPI_LINUX_NFS4_H */
/*
diff --git a/include/uapi/linux/nfs_idmap.h b/include/uapi/linux/nfs_idmap.h
index 8d4b1c7b24d4..038e36c96669 100644
--- a/include/uapi/linux/nfs_idmap.h
+++ b/include/uapi/linux/nfs_idmap.h
@@ -1,5 +1,5 @@
/*
- * include/linux/nfs_idmap.h
+ * include/uapi/linux/nfs_idmap.h
*
* UID and GID to name mapping for clients.
*
diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
index 0bf130a1c58d..28ec6c9c421a 100644
--- a/include/uapi/linux/nfsd/debug.h
+++ b/include/uapi/linux/nfsd/debug.h
@@ -12,14 +12,6 @@
#include <linux/sunrpc/debug.h>
/*
- * Enable debugging for nfsd.
- * Requires RPC_DEBUG.
- */
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define NFSD_DEBUG 1
-#endif
-
-/*
* knfsd debug flags
*/
#define NFSDDBG_SOCK 0x0001
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h
index d3bd6ffec041..0df7bd5d2fb1 100644
--- a/include/uapi/linux/nfsd/export.h
+++ b/include/uapi/linux/nfsd/export.h
@@ -21,6 +21,9 @@
/*
* Export flags.
+ *
+ * Please update the expflags[] array in fs/nfsd/export.c when adding
+ * a new flag.
*/
#define NFSEXP_READONLY 0x0001
#define NFSEXP_INSECURE_PORT 0x0002
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 68b294e83944..241220c43e86 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -25,6 +25,19 @@
*
*/
+/*
+ * This header file defines the userspace API to the wireless stack. Please
+ * be careful not to break things - i.e. don't move anything around or so
+ * unless you can demonstrate that it breaks neither API nor ABI.
+ *
+ * Additions to the API should be accompanied by actual implementations in
+ * an upstream driver, so that example implementations exist in case there
+ * are ever concerns about the precise semantics of the API or changes are
+ * needed, and to ensure that code for dead (no longer implemented) API
+ * can actually be identified and removed.
+ * Nonetheless, semantics should also be documented carefully in this file.
+ */
+
#include <linux/types.h>
#define NL80211_GENL_NAME "nl80211"
@@ -1684,6 +1697,10 @@ enum nl80211_commands {
* If set during scheduled scan start then the new scan req will be
* owned by the netlink socket that created it and the scheduled scan will
* be stopped when the socket is closed.
+ * If set during configuration of regulatory indoor operation then the
+ * regulatory indoor configuration would be owned by the netlink socket
+ * that configured the indoor setting, and the indoor operation would be
+ * cleared when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -1737,8 +1754,12 @@ enum nl80211_commands {
* should be contained in the result as the sum of the respective counters
* over all channels.
*
- * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before a scheduled scan (or a
- * WoWLAN net-detect scan) is started, u32 in seconds.
+ * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before the first cycle of a
+ * scheduled scan (or a WoWLAN net-detect scan) is started, u32
+ * in seconds.
+
+ * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device
+ * is operating in an indoor environment.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2107,6 +2128,8 @@ enum nl80211_attrs {
NL80211_ATTR_SCHED_SCAN_DELAY,
+ NL80211_ATTR_REG_INDOOR,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3092,7 +3115,8 @@ enum nl80211_mesh_power_mode {
*
* @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've
* established peering with for longer than this time (in seconds), then
- * remove it from the STA's list of peers. Default is 30 minutes.
+ * remove it from the STA's list of peers. You may set this to 0 to disable
+ * the removal of the STA. Default is 30 minutes.
*
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
@@ -3694,6 +3718,8 @@ struct nl80211_pattern_support {
* @NL80211_WOWLAN_TRIG_ANY: wake up on any activity, do not really put
* the chip into a special state -- works best with chips that have
* support for low-power operation already (flag)
+ * Note that this mode is incompatible with all of the others, if
+ * any others are even supported by the device.
* @NL80211_WOWLAN_TRIG_DISCONNECT: wake up on disconnect, the way disconnect
* is detected is implementation-specific (flag)
* @NL80211_WOWLAN_TRIG_MAGIC_PKT: wake up on magic packet (6x 0xff, followed
@@ -4327,11 +4353,13 @@ enum nl80211_feature_flags {
/**
* enum nl80211_ext_feature_index - bit index of extended features.
+ * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
enum nl80211_ext_feature_index {
+ NL80211_EXT_FEATURE_VHT_IBSS,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 9b79abbd1ab8..309211b3eb67 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -152,21 +152,42 @@ enum perf_event_sample_format {
* The branch types can be combined, however BRANCH_ANY covers all types
* of branches and therefore it supersedes all the other types.
*/
+enum perf_branch_sample_type_shift {
+ PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
+ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
+ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
+
+ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
+ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
+ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
+ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
+ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
+ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
+ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
+ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
+
+ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
+
+ PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
+};
+
enum perf_branch_sample_type {
- PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
- PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
- PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
-
- PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
- PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
- PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
- PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
- PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */
- PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */
- PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */
- PERF_SAMPLE_BRANCH_COND = 1U << 10, /* conditional branches */
-
- PERF_SAMPLE_BRANCH_MAX = 1U << 11, /* non-ABI */
+ PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
+ PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
+ PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
+
+ PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
+
+ PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+
+ PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
#define PERF_SAMPLE_BRANCH_PLM_ALL \
@@ -240,6 +261,7 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
/* add: sample_stack_user */
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
+#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -305,7 +327,8 @@ struct perf_event_attr {
exclude_callchain_user : 1, /* exclude user callchains */
mmap2 : 1, /* include mmap with inode data */
comm_exec : 1, /* flag comm events that are due to an exec */
- __reserved_1 : 39;
+ use_clockid : 1, /* use @clockid for time fields */
+ __reserved_1 : 38;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -334,8 +357,7 @@ struct perf_event_attr {
*/
__u32 sample_stack_user;
- /* Align to u64. */
- __u32 __reserved_2;
+ __s32 clockid;
/*
* Defines set of regs to dump for each sample
* state captured on:
@@ -345,6 +367,12 @@ struct perf_event_attr {
* See asm/perf_regs.h for details.
*/
__u64 sample_regs_intr;
+
+ /*
+ * Wakeup watermark for AUX area
+ */
+ __u32 aux_watermark;
+ __u32 __reserved_2; /* align to __u64 */
};
#define perf_flags(attr) (*(&(attr)->read_format + 1))
@@ -360,6 +388,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -500,9 +529,30 @@ struct perf_event_mmap_page {
* In this case the kernel will not over-write unread data.
*
* See perf_output_put_handle() for the data ordering.
+ *
+ * data_{offset,size} indicate the location and size of the perf record
+ * buffer within the mmapped area.
*/
__u64 data_head; /* head in the data section */
__u64 data_tail; /* user-space written tail */
+ __u64 data_offset; /* where the buffer starts */
+ __u64 data_size; /* data buffer size */
+
+ /*
+ * AUX area is defined by aux_{offset,size} fields that should be set
+ * by the userspace, so that
+ *
+ * aux_offset >= data_offset + data_size
+ *
+ * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
+ *
+ * Ring buffer pointers aux_{head,tail} have the same semantics as
+ * data_{head,tail} and same ordering rules apply.
+ */
+ __u64 aux_head;
+ __u64 aux_tail;
+ __u64 aux_offset;
+ __u64 aux_size;
};
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
@@ -725,6 +775,31 @@ enum perf_event_type {
*/
PERF_RECORD_MMAP2 = 10,
+ /*
+ * Records that new data landed in the AUX buffer part.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 aux_offset;
+ * u64 aux_size;
+ * u64 flags;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX = 11,
+
+ /*
+ * Indicates that instruction trace has started
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid;
+ * u32 tid;
+ * };
+ */
+ PERF_RECORD_ITRACE_START = 12,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -742,6 +817,12 @@ enum perf_callchain_context {
PERF_CONTEXT_MAX = (__u64)-4095,
};
+/**
+ * PERF_RECORD_AUX::flags bits
+ */
+#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 25731dfb3fcc..bf08e76bf505 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -397,6 +397,8 @@ enum {
TCA_BPF_CLASSID,
TCA_BPF_OPS_LEN,
TCA_BPF_OPS,
+ TCA_BPF_FD,
+ TCA_BPF_NAME,
__TCA_BPF_MAX,
};
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index 1f49b8341c99..9c95b2c1c88a 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -36,11 +36,12 @@
#include <linux/errno.h>
#include <linux/types.h>
-#define __DQUOT_VERSION__ "dquot_6.5.2"
+#define __DQUOT_VERSION__ "dquot_6.6.0"
-#define MAXQUOTAS 2
+#define MAXQUOTAS 3
#define USRQUOTA 0 /* element used for user quotas */
#define GRPQUOTA 1 /* element used for group quotas */
+#define PRJQUOTA 2 /* element used for project quotas */
/*
* Definitions for the default names of the quotas files.
@@ -48,6 +49,7 @@
#define INITQFNAMES { \
"user", /* USRQUOTA */ \
"group", /* GRPQUOTA */ \
+ "project", /* PRJQUOTA */ \
"undefined", \
};
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 49f4210d4394..2ae6131e69a5 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -78,6 +78,12 @@
#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */
#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */
#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */
+#define MD_DISK_CLUSTER_ADD 4 /* Initiate a disk add across the cluster
+ * For clustered enviroments only.
+ */
+#define MD_DISK_CANDIDATE 5 /* disk is added as spare (local) until confirmed
+ * For clustered enviroments only.
+ */
#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config.
* read requests will only be sent here in
@@ -101,6 +107,7 @@ typedef struct mdp_device_descriptor_s {
#define MD_SB_CLEAN 0
#define MD_SB_ERRORS 1
+#define MD_SB_CLUSTERED 5 /* MD is clustered */
#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */
/*
diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h
index 74e7c60c4716..1cb8aa6850b5 100644
--- a/include/uapi/linux/raid/md_u.h
+++ b/include/uapi/linux/raid/md_u.h
@@ -62,6 +62,7 @@
#define STOP_ARRAY _IO (MD_MAJOR, 0x32)
#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
+#define CLUSTERED_DISK_NACK _IO (MD_MAJOR, 0x35)
/* 63 partitions with the alternate major number (mdp) */
#define MdpMinorShift 6
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 5cc5d66bf519..974db03f7b1a 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -134,6 +134,8 @@ enum {
RTM_NEWNSID = 88,
#define RTM_NEWNSID RTM_NEWNSID
+ RTM_DELNSID = 89,
+#define RTM_DELNSID RTM_DELNSID
RTM_GETNSID = 90,
#define RTM_GETNSID RTM_GETNSID
@@ -303,6 +305,9 @@ enum rtattr_type_t {
RTA_TABLE,
RTA_MARK,
RTA_MFC_STATS,
+ RTA_VIA,
+ RTA_NEWDST,
+ RTA_PREF,
__RTA_MAX
};
@@ -332,6 +337,7 @@ struct rtnexthop {
#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
+#define RTNH_F_EXTERNAL 8 /* Route installed externally */
/* Macros to handle hexthops */
@@ -344,6 +350,12 @@ struct rtnexthop {
#define RTNH_SPACE(len) RTNH_ALIGN(RTNH_LENGTH(len))
#define RTNH_DATA(rtnh) ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0)))
+/* RTA_VIA */
+struct rtvia {
+ __kernel_sa_family_t rtvia_family;
+ __u8 rtvia_addr[0];
+};
+
/* RTM_CACHEINFO */
struct rta_cacheinfo {
@@ -623,6 +635,10 @@ enum rtnetlink_groups {
#define RTNLGRP_IPV6_NETCONF RTNLGRP_IPV6_NETCONF
RTNLGRP_MDB,
#define RTNLGRP_MDB RTNLGRP_MDB
+ RTNLGRP_MPLS_ROUTE,
+#define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE
+ RTNLGRP_NSID,
+#define RTNLGRP_NSID RTNLGRP_NSID
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index 00adb01fa5f3..e9b4cb0cd7ed 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -242,25 +242,6 @@
#define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */
/*
- * Intel MID on-chip HSU (High Speed UART) defined bits
- */
-#define UART_FCR_HSU_64_1B 0x00 /* receive FIFO treshold = 1 */
-#define UART_FCR_HSU_64_16B 0x40 /* receive FIFO treshold = 16 */
-#define UART_FCR_HSU_64_32B 0x80 /* receive FIFO treshold = 32 */
-#define UART_FCR_HSU_64_56B 0xc0 /* receive FIFO treshold = 56 */
-
-#define UART_FCR_HSU_16_1B 0x00 /* receive FIFO treshold = 1 */
-#define UART_FCR_HSU_16_4B 0x40 /* receive FIFO treshold = 4 */
-#define UART_FCR_HSU_16_8B 0x80 /* receive FIFO treshold = 8 */
-#define UART_FCR_HSU_16_14B 0xc0 /* receive FIFO treshold = 14 */
-
-#define UART_FCR_HSU_64B_FIFO 0x20 /* chose 64 bytes FIFO */
-#define UART_FCR_HSU_16B_FIFO 0x00 /* chose 16 bytes FIFO */
-
-#define UART_FCR_HALF_EMPT_TXI 0x00 /* trigger TX_EMPT IRQ for half empty */
-#define UART_FCR_FULL_EMPT_TXI 0x08 /* trigger TX_EMPT IRQ for full empty */
-
-/*
* These register definitions are for the 16C950
*/
#define UART_ASR 0x01 /* Additional Status Register */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index b483d1909d3e..b67f99d3c520 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
#include <linux/uio.h>
-#define TCMU_VERSION "1.0"
+#define TCMU_VERSION "2.0"
/*
* Ring Design
@@ -39,9 +39,13 @@
* should process the next packet the same way, and so on.
*/
-#define TCMU_MAILBOX_VERSION 1
+#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
+/* See https://gcc.gnu.org/onlinedocs/cpp/Stringification.html */
+#define xstr(s) str(s)
+#define str(s) #s
+
struct tcmu_mailbox {
__u16 version;
__u16 flags;
@@ -64,31 +68,36 @@ enum tcmu_opcode {
* Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
*/
struct tcmu_cmd_entry_hdr {
- __u32 len_op;
+ __u32 len_op;
+ __u16 cmd_id;
+ __u8 kflags;
+#define TCMU_UFLAG_UNKNOWN_OP 0x1
+ __u8 uflags;
+
} __packed;
#define TCMU_OP_MASK 0x7
-static inline enum tcmu_opcode tcmu_hdr_get_op(struct tcmu_cmd_entry_hdr *hdr)
+static inline enum tcmu_opcode tcmu_hdr_get_op(__u32 len_op)
{
- return hdr->len_op & TCMU_OP_MASK;
+ return len_op & TCMU_OP_MASK;
}
-static inline void tcmu_hdr_set_op(struct tcmu_cmd_entry_hdr *hdr, enum tcmu_opcode op)
+static inline void tcmu_hdr_set_op(__u32 *len_op, enum tcmu_opcode op)
{
- hdr->len_op &= ~TCMU_OP_MASK;
- hdr->len_op |= (op & TCMU_OP_MASK);
+ *len_op &= ~TCMU_OP_MASK;
+ *len_op |= (op & TCMU_OP_MASK);
}
-static inline __u32 tcmu_hdr_get_len(struct tcmu_cmd_entry_hdr *hdr)
+static inline __u32 tcmu_hdr_get_len(__u32 len_op)
{
- return hdr->len_op & ~TCMU_OP_MASK;
+ return len_op & ~TCMU_OP_MASK;
}
-static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len)
+static inline void tcmu_hdr_set_len(__u32 *len_op, __u32 len)
{
- hdr->len_op &= TCMU_OP_MASK;
- hdr->len_op |= len;
+ *len_op &= TCMU_OP_MASK;
+ *len_op |= len;
}
/* Currently the same as SCSI_SENSE_BUFFERSIZE */
@@ -97,13 +106,14 @@ static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len)
struct tcmu_cmd_entry {
struct tcmu_cmd_entry_hdr hdr;
- uint16_t cmd_id;
- uint16_t __pad1;
-
union {
struct {
+ uint32_t iov_cnt;
+ uint32_t iov_bidi_cnt;
+ uint32_t iov_dif_cnt;
uint64_t cdb_off;
- uint64_t iov_cnt;
+ uint64_t __pad1;
+ uint64_t __pad2;
struct iovec iov[0];
} req;
struct {
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 5288bd77e63b..07f17cc70bb3 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -24,6 +24,8 @@ enum {
TCA_ACT_BPF_PARMS,
TCA_ACT_BPF_OPS_LEN,
TCA_ACT_BPF_OPS,
+ TCA_ACT_BPF_FD,
+ TCA_ACT_BPF_NAME,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 3b9718328d8b..faa72f4fa547 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -112,6 +112,7 @@ enum {
#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
#define TCP_TIMESTAMP 24
#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */
+#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */
struct tcp_repair_opt {
__u32 opt_code;
@@ -189,6 +190,8 @@ struct tcp_info {
__u64 tcpi_pacing_rate;
__u64 tcpi_max_pacing_rate;
+ __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
+ __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 8d723824ad69..d4c8f142ba63 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -83,11 +83,20 @@ enum {
TIPC_NLA_BEARER_NAME, /* string */
TIPC_NLA_BEARER_PROP, /* nest */
TIPC_NLA_BEARER_DOMAIN, /* u32 */
+ TIPC_NLA_BEARER_UDP_OPTS, /* nest */
__TIPC_NLA_BEARER_MAX,
TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1
};
+enum {
+ TIPC_NLA_UDP_UNSPEC,
+ TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */
+ TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */
+
+ __TIPC_NLA_UDP_MAX,
+ TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1
+};
/* Socket info */
enum {
TIPC_NLA_SOCK_UNSPEC,
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 6c8f159e416e..c039f1d68a09 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -48,14 +48,15 @@
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 480, 1, 0, \
13500000, 19, 62, 57, 4, 3, 15, 4, 3, 16, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_720X480P59_94 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 480, 0, 0, \
27000000, 16, 62, 60, 9, 6, 30, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
/* Note: these are the nominal timings, for HDMI links this format is typically
@@ -64,14 +65,15 @@
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 576, 1, 0, \
13500000, 12, 63, 69, 2, 3, 19, 2, 3, 20, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_720X576P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 576, 0, 0, \
27000000, 12, 64, 68, 5, 5, 39, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P24 { \
@@ -88,7 +90,7 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 2420, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P30 { \
@@ -96,7 +98,8 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P50 { \
@@ -104,7 +107,7 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 440, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P60 { \
@@ -112,7 +115,8 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 110, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P24 { \
@@ -120,7 +124,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 638, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P25 { \
@@ -128,7 +133,7 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P30 { \
@@ -136,7 +141,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080I50 { \
@@ -144,7 +150,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 1, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 528, 44, 148, 2, 5, 15, 2, 5, 16, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P50 { \
@@ -152,7 +159,7 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
148500000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080I60 { \
@@ -161,7 +168,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 88, 44, 148, 2, 5, 15, 2, 5, 16, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P60 { \
@@ -170,77 +178,83 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
148500000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index e0a7e3da498a..dbce2b554e02 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -69,12 +69,14 @@ struct v4l2_subdev_crop {
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
+ * @which: format type (from enum v4l2_subdev_format_whence)
*/
struct v4l2_subdev_mbus_code_enum {
__u32 pad;
__u32 index;
__u32 code;
- __u32 reserved[9];
+ __u32 which;
+ __u32 reserved[8];
};
/**
@@ -82,6 +84,7 @@ struct v4l2_subdev_mbus_code_enum {
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
+ * @which: format type (from enum v4l2_subdev_format_whence)
*/
struct v4l2_subdev_frame_size_enum {
__u32 index;
@@ -91,7 +94,8 @@ struct v4l2_subdev_frame_size_enum {
__u32 max_width;
__u32 min_height;
__u32 max_height;
- __u32 reserved[9];
+ __u32 which;
+ __u32 reserved[8];
};
/**
@@ -113,6 +117,7 @@ struct v4l2_subdev_frame_interval {
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
+ * @which: format type (from enum v4l2_subdev_format_whence)
*/
struct v4l2_subdev_frame_interval_enum {
__u32 index;
@@ -121,7 +126,8 @@ struct v4l2_subdev_frame_interval_enum {
__u32 width;
__u32 height;
struct v4l2_fract interval;
- __u32 reserved[9];
+ __u32 which;
+ __u32 reserved[8];
};
/**
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 82889c30f4f5..b57b750c222f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -160,6 +160,8 @@ struct vfio_device_info {
__u32 flags;
#define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */
#define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
+#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
+#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index fbdc3602ee27..fa376f7666ba 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -268,9 +268,10 @@ enum v4l2_ycbcr_encoding {
enum v4l2_quantization {
/*
- * The default for R'G'B' quantization is always full range. For
- * Y'CbCr the quantization is always limited range, except for
- * SYCC, XV601, XV709 or JPEG: those are full range.
+ * The default for R'G'B' quantization is always full range, except
+ * for the BT2020 colorspace. For Y'CbCr the quantization is always
+ * limited range, except for COLORSPACE_JPEG, SYCC, XV601 or XV709:
+ * those are full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -1187,6 +1188,12 @@ struct v4l2_bt_timings {
exactly the same number of half-lines. Whether half-lines can be detected
or used depends on the hardware. */
#define V4L2_DV_FL_HALF_LINE (1 << 3)
+/* If set, then this is a Consumer Electronics (CE) video format. Such formats
+ * differ from other formats (commonly called IT formats) in that if RGB
+ * encoding is used then by default the RGB values use limited range (i.e.
+ * use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
+ * except for the 640x480 format are CE formats. */
+#define V4L2_DV_FL_IS_CE_VIDEO (1 << 4)
/* A few useful defines to calculate the total blanking and frame sizes */
#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
@@ -1456,6 +1463,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
#define V4L2_CTRL_FLAG_VOLATILE 0x0080
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
+#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -1841,8 +1849,8 @@ struct v4l2_mpeg_vbi_fmt_ivtv {
*/
struct v4l2_plane_pix_format {
__u32 sizeimage;
- __u16 bytesperline;
- __u16 reserved[7];
+ __u32 bytesperline;
+ __u16 reserved[6];
} __attribute__ ((packed));
/**
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 4b0488f20b2e..984169a819ee 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -25,6 +25,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
+#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -38,9 +39,9 @@
struct virtio_balloon_config {
/* Number of pages host wants Guest to give up. */
- __le32 num_pages;
+ __u32 num_pages;
/* Number of pages we've actually got in balloon. */
- __le32 actual;
+ __u32 actual;
};
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
@@ -51,9 +52,32 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_NR 6
+/*
+ * Memory statistics structure.
+ * Driver fills an array of these structures and passes to device.
+ *
+ * NOTE: fields are laid out in a way that would make compiler add padding
+ * between and after fields, so we have to use compiler-specific attributes to
+ * pack it, to disable this padding. This also often causes compiler to
+ * generate suboptimal code.
+ *
+ * We maintain this statistics structure format for backwards compatibility,
+ * but don't follow this example.
+ *
+ * If implementing a similar structure, do something like the below instead:
+ * struct virtio_balloon_stat {
+ * __virtio16 tag;
+ * __u8 reserved[6];
+ * __virtio64 val;
+ * };
+ *
+ * In other words, add explicit reserved fields to align field and
+ * structure boundaries at field size, avoiding compiler padding
+ * without the packed attribute.
+ */
struct virtio_balloon_stat {
- __u16 tag;
- __u64 val;
+ __virtio16 tag;
+ __virtio64 val;
} __attribute__((packed));
#endif /* _LINUX_VIRTIO_BALLOON_H */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 284fc3a05f7b..5f60aa4be50a 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -39,5 +39,6 @@
#define VIRTIO_ID_9P 9 /* 9p virtio console */
#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
#define VIRTIO_ID_CAIF 12 /* Virtio caif */
+#define VIRTIO_ID_INPUT 18 /* virtio input */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_input.h b/include/uapi/linux/virtio_input.h
new file mode 100644
index 000000000000..a7fe5c8fb135
--- /dev/null
+++ b/include/uapi/linux/virtio_input.h
@@ -0,0 +1,76 @@
+#ifndef _LINUX_VIRTIO_INPUT_H
+#define _LINUX_VIRTIO_INPUT_H
+/* This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
+
+#include <linux/types.h>
+
+enum virtio_input_config_select {
+ VIRTIO_INPUT_CFG_UNSET = 0x00,
+ VIRTIO_INPUT_CFG_ID_NAME = 0x01,
+ VIRTIO_INPUT_CFG_ID_SERIAL = 0x02,
+ VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03,
+ VIRTIO_INPUT_CFG_PROP_BITS = 0x10,
+ VIRTIO_INPUT_CFG_EV_BITS = 0x11,
+ VIRTIO_INPUT_CFG_ABS_INFO = 0x12,
+};
+
+struct virtio_input_absinfo {
+ __u32 min;
+ __u32 max;
+ __u32 fuzz;
+ __u32 flat;
+ __u32 res;
+};
+
+struct virtio_input_devids {
+ __u16 bustype;
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+};
+
+struct virtio_input_config {
+ __u8 select;
+ __u8 subsel;
+ __u8 size;
+ __u8 reserved[5];
+ union {
+ char string[128];
+ __u8 bitmap[128];
+ struct virtio_input_absinfo abs;
+ struct virtio_input_devids ids;
+ } u;
+};
+
+struct virtio_input_event {
+ __le16 type;
+ __le16 code;
+ __le32 value;
+};
+
+#endif /* _LINUX_VIRTIO_INPUT_H */
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index a3318f31e8e7..915980ac68df 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
-/* Assuming a given event_idx value from the other size, if
+/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
* should we trigger an event? */
static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 02d5125a5ee8..2cd9e608d0d1 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_XFRM_H
#define _LINUX_XFRM_H
+#include <linux/in6.h>
#include <linux/types.h>
/* All of the structures in this file may not change size as they are
@@ -13,6 +14,7 @@
typedef union {
__be32 a4;
__be32 a6[4];
+ struct in6_addr in6;
} xfrm_address_t;
/* Ident of a specific xfrm_state. It is used on input to lookup
diff --git a/include/uapi/linux/xilinx-v4l2-controls.h b/include/uapi/linux/xilinx-v4l2-controls.h
new file mode 100644
index 000000000000..fb495b91e800
--- /dev/null
+++ b/include/uapi/linux/xilinx-v4l2-controls.h
@@ -0,0 +1,73 @@
+/*
+ * Xilinx Controls Header
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __UAPI_XILINX_V4L2_CONTROLS_H__
+#define __UAPI_XILINX_V4L2_CONTROLS_H__
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_XILINX_OFFSET 0xc000
+#define V4L2_CID_XILINX_BASE (V4L2_CID_USER_BASE + V4L2_CID_XILINX_OFFSET)
+
+/*
+ * Private Controls for Xilinx Video IPs
+ */
+
+/*
+ * Xilinx TPG Video IP
+ */
+
+#define V4L2_CID_XILINX_TPG (V4L2_CID_USER_BASE + 0xc000)
+
+/* Draw cross hairs */
+#define V4L2_CID_XILINX_TPG_CROSS_HAIRS (V4L2_CID_XILINX_TPG + 1)
+/* Enable a moving box */
+#define V4L2_CID_XILINX_TPG_MOVING_BOX (V4L2_CID_XILINX_TPG + 2)
+/* Mask out a color component */
+#define V4L2_CID_XILINX_TPG_COLOR_MASK (V4L2_CID_XILINX_TPG + 3)
+/* Enable a stuck pixel feature */
+#define V4L2_CID_XILINX_TPG_STUCK_PIXEL (V4L2_CID_XILINX_TPG + 4)
+/* Enable a noisy output */
+#define V4L2_CID_XILINX_TPG_NOISE (V4L2_CID_XILINX_TPG + 5)
+/* Enable the motion feature */
+#define V4L2_CID_XILINX_TPG_MOTION (V4L2_CID_XILINX_TPG + 6)
+/* Configure the motion speed of moving patterns */
+#define V4L2_CID_XILINX_TPG_MOTION_SPEED (V4L2_CID_XILINX_TPG + 7)
+/* The row of horizontal cross hair location */
+#define V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW (V4L2_CID_XILINX_TPG + 8)
+/* The colum of vertical cross hair location */
+#define V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN (V4L2_CID_XILINX_TPG + 9)
+/* Set starting point of sine wave for horizontal component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_HOR_START (V4L2_CID_XILINX_TPG + 10)
+/* Set speed of the horizontal component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED (V4L2_CID_XILINX_TPG + 11)
+/* Set starting point of sine wave for vertical component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_VER_START (V4L2_CID_XILINX_TPG + 12)
+/* Set speed of the vertical component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED (V4L2_CID_XILINX_TPG + 13)
+/* Moving box size */
+#define V4L2_CID_XILINX_TPG_BOX_SIZE (V4L2_CID_XILINX_TPG + 14)
+/* Moving box color */
+#define V4L2_CID_XILINX_TPG_BOX_COLOR (V4L2_CID_XILINX_TPG + 15)
+/* Upper limit count of generated stuck pixels */
+#define V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH (V4L2_CID_XILINX_TPG + 16)
+/* Noise level */
+#define V4L2_CID_XILINX_TPG_NOISE_GAIN (V4L2_CID_XILINX_TPG + 17)
+
+#endif /* __UAPI_XILINX_V4L2_CONTROLS_H__ */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index de69170a30ce..6e4bb4270ca2 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -37,6 +37,7 @@ enum {
RDMA_NL_IWPM_ADD_MAPPING,
RDMA_NL_IWPM_QUERY_MAPPING,
RDMA_NL_IWPM_REMOVE_MAPPING,
+ RDMA_NL_IWPM_REMOTE_INFO,
RDMA_NL_IWPM_HANDLE_ERR,
RDMA_NL_IWPM_MAPINFO,
RDMA_NL_IWPM_MAPINFO_NUM,
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index 09c8a00ea503..5a5fa4956ebd 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -22,6 +22,7 @@
#ifndef _UAPI__SOUND_ASEQUENCER_H
#define _UAPI__SOUND_ASEQUENCER_H
+#include <sound/asound.h>
/** version of the sequencer */
#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION (1, 0, 1)
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 0e88e7a0f0eb..a45be6bdcf5b 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -25,6 +25,9 @@
#include <linux/types.h>
+#ifndef __KERNEL__
+#include <stdlib.h>
+#endif
/*
* protocol version
@@ -140,7 +143,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 12)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 13)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -267,10 +270,17 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */
#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */
#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP 0x00800000 /* period wakeup can be disabled */
-#define SNDRV_PCM_INFO_HAS_WALL_CLOCK 0x01000000 /* has audio wall clock for audio/system time sync */
+#define SNDRV_PCM_INFO_HAS_WALL_CLOCK 0x01000000 /* (Deprecated)has audio wall clock for audio/system time sync */
+#define SNDRV_PCM_INFO_HAS_LINK_ATIME 0x01000000 /* report hardware link audio time, reset on startup */
+#define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */
+#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
+#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
+
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
+
+
typedef int __bitwise snd_pcm_state_t;
#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */
#define SNDRV_PCM_STATE_SETUP ((__force snd_pcm_state_t) 1) /* stream has a setup */
@@ -408,6 +418,22 @@ struct snd_pcm_channel_info {
unsigned int step; /* samples distance in bits */
};
+enum {
+ /*
+ * first definition for backwards compatibility only,
+ * maps to wallclock/link time for HDAudio playback and DEFAULT/DMA time for everything else
+ */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT = 0,
+
+ /* timestamp definitions */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT = 1, /* DMA time, reported as per hw_ptr */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK = 2, /* link time reported by sample or wallclock counter, reset on startup */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ABSOLUTE = 3, /* link time reported by sample or wallclock counter, not reset on startup */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ESTIMATED = 4, /* link time estimated indirectly */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED = 5, /* link time synchronized with system time */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LAST = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED
+};
+
struct snd_pcm_status {
snd_pcm_state_t state; /* stream state */
struct timespec trigger_tstamp; /* time when stream was started/stopped/paused */
@@ -419,9 +445,11 @@ struct snd_pcm_status {
snd_pcm_uframes_t avail_max; /* max frames available on hw since last status */
snd_pcm_uframes_t overrange; /* count of ADC (capture) overrange detections from last status */
snd_pcm_state_t suspended_state; /* suspended stream state */
- __u32 reserved_alignment; /* must be filled with zero */
- struct timespec audio_tstamp; /* from sample counter or wall clock */
- unsigned char reserved[56-sizeof(struct timespec)]; /* must be filled with zero */
+ __u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */
+ struct timespec audio_tstamp; /* sample counter, wall clock, PHC or on-demand sync'ed */
+ struct timespec driver_tstamp; /* useful in case reference system tstamp is reported with delay */
+ __u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */
+ unsigned char reserved[52-2*sizeof(struct timespec)]; /* must be filled with zero */
};
struct snd_pcm_mmap_status {
@@ -534,6 +562,7 @@ enum {
#define SNDRV_PCM_IOCTL_DELAY _IOR('A', 0x21, snd_pcm_sframes_t)
#define SNDRV_PCM_IOCTL_HWSYNC _IO('A', 0x22)
#define SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct snd_pcm_sync_ptr)
+#define SNDRV_PCM_IOCTL_STATUS_EXT _IOWR('A', 0x24, struct snd_pcm_status)
#define SNDRV_PCM_IOCTL_CHANNEL_INFO _IOR('A', 0x32, struct snd_pcm_channel_info)
#define SNDRV_PCM_IOCTL_PREPARE _IO('A', 0x40)
#define SNDRV_PCM_IOCTL_RESET _IO('A', 0x41)
@@ -835,7 +864,7 @@ struct snd_ctl_elem_id {
snd_ctl_elem_iface_t iface; /* interface identifier */
unsigned int device; /* device/client number */
unsigned int subdevice; /* subdevice (substream) number */
- unsigned char name[44]; /* ASCII name of item */
+ unsigned char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* ASCII name of item */
unsigned int index; /* index of item */
};
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 22ed8cb7800b..e00d8cbfc628 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -75,7 +75,7 @@ struct snd_compr_tstamp {
/**
* struct snd_compr_avail - avail descriptor
* @avail: Number of bytes available in ring buffer for writing/reading
- * @tstamp: timestamp infomation
+ * @tstamp: timestamp information
*/
struct snd_compr_avail {
__u64 avail;
diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h
index d1bbaf78457a..ec1535bb6aed 100644
--- a/include/uapi/sound/emu10k1.h
+++ b/include/uapi/sound/emu10k1.h
@@ -23,8 +23,7 @@
#define _UAPI__SOUND_EMU10K1_H
#include <linux/types.h>
-
-
+#include <sound/asound.h>
/*
* ---- FX8010 ----
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index b357f1a5e29c..5737332d38f2 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -20,6 +20,12 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
/* Maximum channels is 64 even on 56Mode you have 64playbacks to matrix */
#define HDSPM_MAX_CHANNELS 64
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 73390c120cad..85dedca3dcfb 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -39,7 +39,7 @@ struct ipu_di_signal_cfg {
struct videomode mode;
- u32 pixel_fmt;
+ u32 bus_format;
u32 v_to_h_sync;
#define IPU_DI_CLKMODE_SYNC (1 << 0)
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index c8ed15daad02..f001a356fd98 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -129,14 +129,13 @@ enum omap_rfbi_te_mode {
};
enum omap_dss_signal_level {
- OMAPDSS_SIG_ACTIVE_HIGH = 0,
- OMAPDSS_SIG_ACTIVE_LOW = 1,
+ OMAPDSS_SIG_ACTIVE_LOW,
+ OMAPDSS_SIG_ACTIVE_HIGH,
};
enum omap_dss_signal_edge {
- OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- OMAPDSS_DRIVE_SIG_RISING_EDGE,
OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ OMAPDSS_DRIVE_SIG_RISING_EDGE,
};
enum omap_dss_venc_type {
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index a20e4a3a8b15..0530e5a4c6b1 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -289,6 +289,11 @@
#define VIDISD14C_ALPHA1_B_LIMIT 0xf
#define VIDISD14C_ALPHA1_B(_x) ((_x) << 0)
+#define VIDW_ALPHA 0x021c
+#define VIDW_ALPHA_R(_x) ((_x) << 16)
+#define VIDW_ALPHA_G(_x) ((_x) << 8)
+#define VIDW_ALPHA_B(_x) ((_x) << 0)
+
/* Video buffer addresses */
#define VIDW_BUF_START(_buff) (0xA0 + ((_buff) * 8))
#define VIDW_BUF_START1(_buff) (0xA4 + ((_buff) * 8))
@@ -436,6 +441,12 @@
#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
+/* Display port clock control */
+#define DP_MIE_CLKCON 0x27c
+#define DP_MIE_CLK_DISABLE 0x0
+#define DP_MIE_CLK_DP_ENABLE 0x2
+#define DP_MIE_CLK_MIE_ENABLE 0x3
+
/* Notes on per-window bpp settings
*
* Value Win0 Win1 Win2 Win3 Win 4
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 143ca5ffab7a..4478f4b4aae2 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -191,6 +191,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
/* Perform a batch of grant map/copy operations. Retry every batch slot
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index f68719f405af..a48378958062 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -67,7 +67,7 @@
#define __HYPERVISOR_vcpu_op 24
#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
-#define __HYPERVISOR_acm_op 27
+#define __HYPERVISOR_xsm_op 27
#define __HYPERVISOR_nmi_op 28
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_callback_op 30
@@ -75,7 +75,11 @@
#define __HYPERVISOR_event_channel_op 32
#define __HYPERVISOR_physdev_op 33
#define __HYPERVISOR_hvm_op 34
+#define __HYPERVISOR_sysctl 35
+#define __HYPERVISOR_domctl 36
+#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
+#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 83338210ee04..0ce4f32017ea 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -13,6 +13,7 @@ void xen_arch_post_suspend(int suspend_cancelled);
void xen_timer_resume(void);
void xen_arch_resume(void);
+void xen_arch_suspend(void);
void xen_resume_notifier_register(struct notifier_block *nb);
void xen_resume_notifier_unregister(struct notifier_block *nb);
@@ -27,13 +28,58 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct;
+
+/*
+ * xen_remap_domain_mfn_array() - map an array of foreign frames
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @gfn: Array of GFNs to map
+ * @nr: Number entries in the GFN array
+ * @err_ptr: Returns per-GFN error status.
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * @gfn and @err_ptr may point to the same buffer, the GFNs will be
+ * overwritten by the error codes after they are mapped.
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages);
+
+/* xen_remap_domain_mfn_range() - map a range of foreign frames
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @gfn: First GFN to map.
+ * @nr: Number frames to map
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t mfn, int nr,
+ xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
+int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages);
+int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages);
bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b0f1c9e5d687..289c0b5f08fe 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -46,6 +46,10 @@
#include <xen/interface/io/xenbus.h>
#include <xen/interface/io/xs_wire.h>
+#define XENBUS_MAX_RING_PAGE_ORDER 4
+#define XENBUS_MAX_RING_PAGES (1U << XENBUS_MAX_RING_PAGE_ORDER)
+#define INVALID_GRANT_HANDLE (~0U)
+
/* Register callback to watch this node. */
struct xenbus_watch
{
@@ -199,15 +203,19 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
const char *pathfmt, ...);
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
-int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
-int xenbus_map_ring_valloc(struct xenbus_device *dev,
- int gnt_ref, void **vaddr);
-int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
- grant_handle_t *handle, void *vaddr);
+int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
+ unsigned int nr_pages, grant_ref_t *grefs);
+int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
+ unsigned int nr_grefs, void **vaddr);
+int xenbus_map_ring(struct xenbus_device *dev,
+ grant_ref_t *gnt_refs, unsigned int nr_grefs,
+ grant_handle_t *handles, unsigned long *vaddrs,
+ bool *leaked);
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
int xenbus_unmap_ring(struct xenbus_device *dev,
- grant_handle_t handle, void *vaddr);
+ grant_handle_t *handles, unsigned int nr_handles,
+ unsigned long *vaddrs);
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
int xenbus_free_evtchn(struct xenbus_device *dev, int port);
diff --git a/init/Kconfig b/init/Kconfig
index f5dbc6d4261b..dc24dec60232 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -394,6 +394,7 @@ endchoice
config BSD_PROCESS_ACCT
bool "BSD Process Accounting"
+ depends on MULTIUSER
help
If you say Y here, a user level program will be able to instruct the
kernel (via a special system call) to write process accounting
@@ -420,6 +421,7 @@ config BSD_PROCESS_ACCT_V3
config TASKSTATS
bool "Export task/process statistics through netlink"
depends on NET
+ depends on MULTIUSER
default n
help
Export selected statistics for tasks/processes through the
@@ -791,6 +793,19 @@ config RCU_NOCB_CPU_ALL
endchoice
+config RCU_EXPEDITE_BOOT
+ bool
+ default n
+ help
+ This option enables expedited grace periods at boot time,
+ as if rcu_expedite_gp() had been invoked early in boot.
+ The corresponding rcu_unexpedite_gp() is invoked from
+ rcu_end_inkernel_boot(), which is intended to be invoked
+ at the end of the kernel-only boot sequence, just before
+ init is exec'ed.
+
+ Accept the default if unsure.
+
endmenu # "RCU Subsystem"
config BUILD_BIN2C
@@ -1032,12 +1047,6 @@ config MEMCG_KMEM
the kmem extension can use it to guarantee that no group of processes
will ever exhaust kernel resources alone.
- WARNING: Current implementation lacks reclaim support. That means
- allocation attempts will fail when close to the limit even if there
- are plenty of kmem available for reclaim. That makes this option
- unusable in real life so DO NOT SELECT IT unless for development
- purposes.
-
config CGROUP_HUGETLB
bool "HugeTLB Resource Controller for Control Groups"
depends on HUGETLB_PAGE
@@ -1147,6 +1156,7 @@ config CHECKPOINT_RESTORE
menuconfig NAMESPACES
bool "Namespaces support" if EXPERT
+ depends on MULTIUSER
default !EXPERT
help
Provides the way to make tasks work with different objects using
@@ -1343,11 +1353,25 @@ menuconfig EXPERT
config UID16
bool "Enable 16-bit UID system calls" if EXPERT
- depends on HAVE_UID16
+ depends on HAVE_UID16 && MULTIUSER
default y
help
This enables the legacy 16-bit UID syscall wrappers.
+config MULTIUSER
+ bool "Multiple users, groups and capabilities support" if EXPERT
+ default y
+ help
+ This option enables support for non-root users, groups and
+ capabilities.
+
+ If you say N here, all processes will run with UID 0, GID 0, and all
+ possible capabilities. Saying N here also compiles out support for
+ system calls related to UIDs, GIDs, and capabilities, such as setuid,
+ setgid, and capset.
+
+ If unsure, say Y here.
+
config SGETMASK_SYSCALL
bool "sgetmask/ssetmask syscalls support" if EXPERT
def_bool PARISC || MN10300 || BLACKFIN || M68K || PPC || MIPS || X86 || SPARC || CRIS || MICROBLAZE || SUPERH
@@ -1513,7 +1537,7 @@ config EVENTFD
# syscall, maps, verifier
config BPF_SYSCALL
- bool "Enable bpf() system call" if EXPERT
+ bool "Enable bpf() system call"
select ANON_INODES
select BPF
default n
diff --git a/init/do_mounts.c b/init/do_mounts.c
index eb410083e8e0..a95bbdb2a502 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -207,7 +207,7 @@ done:
* bangs.
*/
-dev_t name_to_dev_t(char *name)
+dev_t name_to_dev_t(const char *name)
{
char s[32];
char *p;
@@ -225,9 +225,11 @@ dev_t name_to_dev_t(char *name)
#endif
if (strncmp(name, "/dev/", 5) != 0) {
- unsigned maj, min;
+ unsigned maj, min, offset;
+ char dummy;
- if (sscanf(name, "%u:%u", &maj, &min) == 2) {
+ if ((sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) ||
+ (sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset, &dummy) == 3)) {
res = MKDEV(maj, min);
if (maj != MAJOR(res) || min != MINOR(res))
goto fail;
@@ -286,6 +288,7 @@ fail:
done:
return res;
}
+EXPORT_SYMBOL_GPL(name_to_dev_t);
static int __init root_dev_setup(char *line)
{
diff --git a/init/main.c b/init/main.c
index 4a6974e67839..2115055faeac 100644
--- a/init/main.c
+++ b/init/main.c
@@ -80,6 +80,7 @@
#include <linux/list.h>
#include <linux/integrity.h>
#include <linux/proc_ns.h>
+#include <linux/io.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -90,7 +91,7 @@
static int kernel_init(void *);
extern void init_IRQ(void);
-extern void fork_init(unsigned long);
+extern void fork_init(void);
extern void radix_tree_init(void);
#ifndef CONFIG_DEBUG_RODATA
static inline void mark_rodata_ro(void) { }
@@ -143,7 +144,7 @@ EXPORT_SYMBOL_GPL(static_key_initialized);
* rely on the BIOS and skip the reset operation.
*
* This is useful if kernel is booting in an unreliable environment.
- * For ex. kdump situaiton where previous kernel has crashed, BIOS has been
+ * For ex. kdump situation where previous kernel has crashed, BIOS has been
* skipped and devices will be in unknown state.
*/
unsigned int reset_devices;
@@ -384,6 +385,7 @@ static noinline void __init_refok rest_init(void)
int pid;
rcu_scheduler_starting();
+ smpboot_thread_init();
/*
* We need to spawn init first so that it obtains pid 1, however
* the init task will end up wanting to create kthreads, which, if
@@ -484,6 +486,7 @@ static void __init mm_init(void)
percpu_init_late();
pgtable_init();
vmalloc_init();
+ ioremap_huge_init();
}
asmlinkage __visible void __init start_kernel(void)
@@ -642,7 +645,7 @@ asmlinkage __visible void __init start_kernel(void)
#endif
thread_info_cache_init();
cred_init();
- fork_init(totalram_pages);
+ fork_init();
proc_caches_init();
buffer_init();
key_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 7635a1cf99f3..3aaea7ffd077 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -466,7 +466,7 @@ out_unlock:
static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
dir->i_size -= DIRENT_SIZE;
@@ -770,7 +770,7 @@ static struct file *do_open(struct path *path, int oflag)
if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
return ERR_PTR(-EINVAL);
acc = oflag2acc[oflag & O_ACCMODE];
- if (inode_permission(path->dentry->d_inode, acc))
+ if (inode_permission(d_inode(path->dentry), acc))
return ERR_PTR(-EACCES);
return dentry_open(path, oflag, current_cred());
}
@@ -802,7 +802,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
ro = mnt_want_write(mnt); /* we'll drop it in any case */
error = 0;
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
path.dentry = lookup_one_len(name->name, root, strlen(name->name));
if (IS_ERR(path.dentry)) {
error = PTR_ERR(path.dentry);
@@ -811,7 +811,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
path.mnt = mntget(mnt);
if (oflag & O_CREAT) {
- if (path.dentry->d_inode) { /* entry already exists */
+ if (d_really_is_positive(path.dentry)) { /* entry already exists */
audit_inode(name, path.dentry, 0);
if (oflag & O_EXCL) {
error = -EEXIST;
@@ -824,12 +824,12 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
goto out;
}
audit_inode_parent_hidden(name, root);
- filp = do_create(ipc_ns, root->d_inode,
+ filp = do_create(ipc_ns, d_inode(root),
&path, oflag, mode,
u_attr ? &attr : NULL);
}
} else {
- if (!path.dentry->d_inode) {
+ if (d_really_is_negative(path.dentry)) {
error = -ENOENT;
goto out;
}
@@ -848,7 +848,7 @@ out_putfd:
put_unused_fd(fd);
fd = error;
}
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
if (!ro)
mnt_drop_write(mnt);
out_putname:
@@ -873,7 +873,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
err = mnt_want_write(mnt);
if (err)
goto out_name;
- mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&d_inode(mnt->mnt_root)->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(name->name, mnt->mnt_root,
strlen(name->name));
if (IS_ERR(dentry)) {
@@ -881,17 +881,17 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
goto out_unlock;
}
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if (!inode) {
err = -ENOENT;
} else {
ihold(inode);
- err = vfs_unlink(dentry->d_parent->d_inode, dentry, NULL);
+ err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
}
dput(dentry);
out_unlock:
- mutex_unlock(&mnt->mnt_root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(mnt->mnt_root)->i_mutex);
if (inode)
iput(inode);
mnt_drop_write(mnt);
diff --git a/ipc/msg.c b/ipc/msg.c
index a7261d5cbc89..2b6fdbb9e0e9 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -1015,22 +1015,24 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
struct user_namespace *user_ns = seq_user_ns(s);
struct msg_queue *msq = it;
- return seq_printf(s,
- "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
- msq->q_perm.key,
- msq->q_perm.id,
- msq->q_perm.mode,
- msq->q_cbytes,
- msq->q_qnum,
- msq->q_lspid,
- msq->q_lrpid,
- from_kuid_munged(user_ns, msq->q_perm.uid),
- from_kgid_munged(user_ns, msq->q_perm.gid),
- from_kuid_munged(user_ns, msq->q_perm.cuid),
- from_kgid_munged(user_ns, msq->q_perm.cgid),
- msq->q_stime,
- msq->q_rtime,
- msq->q_ctime);
+ seq_printf(s,
+ "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
+ msq->q_perm.key,
+ msq->q_perm.id,
+ msq->q_perm.mode,
+ msq->q_cbytes,
+ msq->q_qnum,
+ msq->q_lspid,
+ msq->q_lrpid,
+ from_kuid_munged(user_ns, msq->q_perm.uid),
+ from_kgid_munged(user_ns, msq->q_perm.gid),
+ from_kuid_munged(user_ns, msq->q_perm.cuid),
+ from_kgid_munged(user_ns, msq->q_perm.cgid),
+ msq->q_stime,
+ msq->q_rtime,
+ msq->q_ctime);
+
+ return 0;
}
#endif
diff --git a/ipc/sem.c b/ipc/sem.c
index 92842113c6a9..d1a6edd17eba 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2170,17 +2170,19 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
sem_otime = get_semotime(sma);
- return seq_printf(s,
- "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
- sma->sem_perm.key,
- sma->sem_perm.id,
- sma->sem_perm.mode,
- sma->sem_nsems,
- from_kuid_munged(user_ns, sma->sem_perm.uid),
- from_kgid_munged(user_ns, sma->sem_perm.gid),
- from_kuid_munged(user_ns, sma->sem_perm.cuid),
- from_kgid_munged(user_ns, sma->sem_perm.cgid),
- sem_otime,
- sma->sem_ctime);
+ seq_printf(s,
+ "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
+ sma->sem_perm.key,
+ sma->sem_perm.id,
+ sma->sem_perm.mode,
+ sma->sem_nsems,
+ from_kuid_munged(user_ns, sma->sem_perm.uid),
+ from_kgid_munged(user_ns, sma->sem_perm.gid),
+ from_kuid_munged(user_ns, sma->sem_perm.cuid),
+ from_kgid_munged(user_ns, sma->sem_perm.cgid),
+ sem_otime,
+ sma->sem_ctime);
+
+ return 0;
}
#endif
diff --git a/ipc/shm.c b/ipc/shm.c
index 19633b4a2350..6d767071c367 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1132,7 +1132,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
path = shp->shm_file->f_path;
path_get(&path);
shp->shm_nattch++;
- size = i_size_read(path.dentry->d_inode);
+ size = i_size_read(d_inode(path.dentry));
ipc_unlock_object(&shp->shm_perm);
rcu_read_unlock();
@@ -1342,25 +1342,27 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
#define SIZE_SPEC "%21lu"
#endif
- return seq_printf(s,
- "%10d %10d %4o " SIZE_SPEC " %5u %5u "
- "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
- SIZE_SPEC " " SIZE_SPEC "\n",
- shp->shm_perm.key,
- shp->shm_perm.id,
- shp->shm_perm.mode,
- shp->shm_segsz,
- shp->shm_cprid,
- shp->shm_lprid,
- shp->shm_nattch,
- from_kuid_munged(user_ns, shp->shm_perm.uid),
- from_kgid_munged(user_ns, shp->shm_perm.gid),
- from_kuid_munged(user_ns, shp->shm_perm.cuid),
- from_kgid_munged(user_ns, shp->shm_perm.cgid),
- shp->shm_atim,
- shp->shm_dtim,
- shp->shm_ctim,
- rss * PAGE_SIZE,
- swp * PAGE_SIZE);
+ seq_printf(s,
+ "%10d %10d %4o " SIZE_SPEC " %5u %5u "
+ "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
+ SIZE_SPEC " " SIZE_SPEC "\n",
+ shp->shm_perm.key,
+ shp->shm_perm.id,
+ shp->shm_perm.mode,
+ shp->shm_segsz,
+ shp->shm_cprid,
+ shp->shm_lprid,
+ shp->shm_nattch,
+ from_kuid_munged(user_ns, shp->shm_perm.uid),
+ from_kgid_munged(user_ns, shp->shm_perm.gid),
+ from_kuid_munged(user_ns, shp->shm_perm.cuid),
+ from_kgid_munged(user_ns, shp->shm_perm.cgid),
+ shp->shm_atim,
+ shp->shm_dtim,
+ shp->shm_ctim,
+ rss * PAGE_SIZE,
+ swp * PAGE_SIZE);
+
+ return 0;
}
#endif
diff --git a/ipc/util.c b/ipc/util.c
index 106bed0378ab..ff3323ef8d8b 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -837,8 +837,10 @@ static int sysvipc_proc_show(struct seq_file *s, void *it)
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
- if (it == SEQ_START_TOKEN)
- return seq_puts(s, iface->header);
+ if (it == SEQ_START_TOKEN) {
+ seq_puts(s, iface->header);
+ return 0;
+ }
return iface->show(s, it);
}
diff --git a/kernel/Makefile b/kernel/Makefile
index 1408b3353a3c..60c302cfb4d3 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,9 @@ obj-y = fork.o exec_domain.o panic.o \
extable.o params.o \
kthread.o sys_ni.o nsproxy.o \
notifier.o ksysfs.o cred.o reboot.o \
- async.o range.o groups.o smpboot.o
+ async.o range.o smpboot.o
+
+obj-$(CONFIG_MULTIUSER) += groups.o
ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files
@@ -195,9 +197,9 @@ x509.genkey:
@echo >>x509.genkey "x509_extensions = myexts"
@echo >>x509.genkey
@echo >>x509.genkey "[ req_distinguished_name ]"
- @echo >>x509.genkey "O = Magrathea"
- @echo >>x509.genkey "CN = Glacier signing key"
- @echo >>x509.genkey "emailAddress = slartibartfast@magrathea.h2g2"
+ @echo >>x509.genkey "#O = Unspecified company"
+ @echo >>x509.genkey "CN = Build time autogenerated kernel key"
+ @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company"
@echo >>x509.genkey
@echo >>x509.genkey "[ myexts ]"
@echo >>x509.genkey "basicConstraints=critical,CA:FALSE"
diff --git a/kernel/acct.c b/kernel/acct.c
index e6c10d1a4058..74963d192c5d 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -213,7 +213,7 @@ static int acct_on(struct filename *pathname)
return -EACCES;
}
- if (!file->f_op->write) {
+ if (!(file->f_mode & FMODE_CAN_WRITE)) {
kfree(acct);
filp_close(file, NULL);
return -EIO;
diff --git a/kernel/audit.c b/kernel/audit.c
index 72ab759a0b43..1c13e4267de6 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -43,6 +43,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/file.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/atomic.h>
@@ -107,6 +108,7 @@ static u32 audit_rate_limit;
* When set to zero, this means unlimited. */
static u32 audit_backlog_limit = 64;
#define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
+static u32 audit_backlog_wait_time_master = AUDIT_BACKLOG_WAIT_TIME;
static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
static u32 audit_backlog_wait_overflow = 0;
@@ -338,13 +340,13 @@ static int audit_set_backlog_limit(u32 limit)
static int audit_set_backlog_wait_time(u32 timeout)
{
return audit_do_config_change("audit_backlog_wait_time",
- &audit_backlog_wait_time, timeout);
+ &audit_backlog_wait_time_master, timeout);
}
static int audit_set_enabled(u32 state)
{
int rc;
- if (state < AUDIT_OFF || state > AUDIT_LOCKED)
+ if (state > AUDIT_LOCKED)
return -EINVAL;
rc = audit_do_config_change("audit_enabled", &audit_enabled, state);
@@ -663,7 +665,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
case AUDIT_MAKE_EQUIV:
/* Only support auditd and auditctl in initial pid namespace
* for now. */
- if ((task_active_pid_ns(current) != &init_pid_ns))
+ if (task_active_pid_ns(current) != &init_pid_ns)
return -EPERM;
if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
@@ -834,7 +836,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
s.lost = atomic_read(&audit_lost);
s.backlog = skb_queue_len(&audit_skb_queue);
s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
- s.backlog_wait_time = audit_backlog_wait_time;
+ s.backlog_wait_time = audit_backlog_wait_time_master;
audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
break;
}
@@ -877,8 +879,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (s.mask & AUDIT_STATUS_BACKLOG_WAIT_TIME) {
if (sizeof(s) > (size_t)nlh->nlmsg_len)
return -EINVAL;
- if (s.backlog_wait_time < 0 ||
- s.backlog_wait_time > 10*AUDIT_BACKLOG_WAIT_TIME)
+ if (s.backlog_wait_time > 10*AUDIT_BACKLOG_WAIT_TIME)
return -EINVAL;
err = audit_set_backlog_wait_time(s.backlog_wait_time);
if (err < 0)
@@ -1385,7 +1386,8 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
return NULL;
}
- audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
+ if (!reserve)
+ audit_backlog_wait_time = audit_backlog_wait_time_master;
ab = audit_buffer_alloc(ctx, gfp_mask, type);
if (!ab) {
@@ -1759,7 +1761,7 @@ void audit_log_name(struct audit_context *context, struct audit_names *n,
} else
audit_log_format(ab, " name=(null)");
- if (n->ino != (unsigned long)-1) {
+ if (n->ino != (unsigned long)-1)
audit_log_format(ab, " inode=%lu"
" dev=%02x:%02x mode=%#ho"
" ouid=%u ogid=%u rdev=%02x:%02x",
@@ -1771,7 +1773,6 @@ void audit_log_name(struct audit_context *context, struct audit_names *n,
from_kgid(&init_user_ns, n->gid),
MAJOR(n->rdev),
MINOR(n->rdev));
- }
if (n->osid != 0) {
char *ctx = NULL;
u32 len;
@@ -1838,11 +1839,29 @@ error_path:
}
EXPORT_SYMBOL(audit_log_task_context);
+void audit_log_d_path_exe(struct audit_buffer *ab,
+ struct mm_struct *mm)
+{
+ struct file *exe_file;
+
+ if (!mm)
+ goto out_null;
+
+ exe_file = get_mm_exe_file(mm);
+ if (!exe_file)
+ goto out_null;
+
+ audit_log_d_path(ab, " exe=", &exe_file->f_path);
+ fput(exe_file);
+ return;
+out_null:
+ audit_log_format(ab, " exe=(null)");
+}
+
void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
{
const struct cred *cred;
char comm[sizeof(tsk->comm)];
- struct mm_struct *mm = tsk->mm;
char *tty;
if (!ab)
@@ -1878,13 +1897,7 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, get_task_comm(comm, tsk));
- if (mm) {
- down_read(&mm->mmap_sem);
- if (mm->exe_file)
- audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
- up_read(&mm->mmap_sem);
- } else
- audit_log_format(ab, " exe=(null)");
+ audit_log_d_path_exe(ab, tsk->mm);
audit_log_task_context(ab);
}
EXPORT_SYMBOL(audit_log_task_info);
@@ -1915,7 +1928,7 @@ void audit_log_link_denied(const char *operation, struct path *link)
/* Generate AUDIT_PATH record with object. */
name->type = AUDIT_TYPE_NORMAL;
- audit_copy_inode(name, link->dentry, link->dentry->d_inode);
+ audit_copy_inode(name, link->dentry, d_backing_inode(link->dentry));
audit_log_name(current->audit_context, name, link, 0, NULL);
out:
kfree(name);
diff --git a/kernel/audit.h b/kernel/audit.h
index 1caa0d345d90..d641f9bb3ed0 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -257,6 +257,9 @@ extern struct list_head audit_filter_list[];
extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
+extern void audit_log_d_path_exe(struct audit_buffer *ab,
+ struct mm_struct *mm);
+
/* audit watch functions */
#ifdef CONFIG_AUDIT_WATCH
extern void audit_put_watch(struct audit_watch *watch);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 2e0c97427b33..b0f9877273fc 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -37,6 +37,7 @@ struct audit_chunk {
static LIST_HEAD(tree_list);
static LIST_HEAD(prune_list);
+static struct task_struct *prune_thread;
/*
* One struct chunk is attached to each inode of interest.
@@ -576,7 +577,7 @@ int audit_remove_tree_rule(struct audit_krule *rule)
static int compare_root(struct vfsmount *mnt, void *arg)
{
- return mnt->mnt_root->d_inode == arg;
+ return d_backing_inode(mnt->mnt_root) == arg;
}
void audit_trim_trees(void)
@@ -648,7 +649,58 @@ void audit_put_tree(struct audit_tree *tree)
static int tag_mount(struct vfsmount *mnt, void *arg)
{
- return tag_chunk(mnt->mnt_root->d_inode, arg);
+ return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
+}
+
+/*
+ * That gets run when evict_chunk() ends up needing to kill audit_tree.
+ * Runs from a separate thread.
+ */
+static int prune_tree_thread(void *unused)
+{
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (list_empty(&prune_list))
+ schedule();
+ __set_current_state(TASK_RUNNING);
+
+ mutex_lock(&audit_cmd_mutex);
+ mutex_lock(&audit_filter_mutex);
+
+ while (!list_empty(&prune_list)) {
+ struct audit_tree *victim;
+
+ victim = list_entry(prune_list.next,
+ struct audit_tree, list);
+ list_del_init(&victim->list);
+
+ mutex_unlock(&audit_filter_mutex);
+
+ prune_one(victim);
+
+ mutex_lock(&audit_filter_mutex);
+ }
+
+ mutex_unlock(&audit_filter_mutex);
+ mutex_unlock(&audit_cmd_mutex);
+ }
+ return 0;
+}
+
+static int audit_launch_prune(void)
+{
+ if (prune_thread)
+ return 0;
+ prune_thread = kthread_create(prune_tree_thread, NULL,
+ "audit_prune_tree");
+ if (IS_ERR(prune_thread)) {
+ pr_err("cannot start thread audit_prune_tree");
+ prune_thread = NULL;
+ return -ENOMEM;
+ } else {
+ wake_up_process(prune_thread);
+ return 0;
+ }
}
/* called with audit_filter_mutex */
@@ -674,6 +726,12 @@ int audit_add_tree_rule(struct audit_krule *rule)
/* do not set rule->tree yet */
mutex_unlock(&audit_filter_mutex);
+ if (unlikely(!prune_thread)) {
+ err = audit_launch_prune();
+ if (err)
+ goto Err;
+ }
+
err = kern_path(tree->pathname, 0, &path);
if (err)
goto Err;
@@ -811,36 +869,10 @@ int audit_tag_tree(char *old, char *new)
return failed;
}
-/*
- * That gets run when evict_chunk() ends up needing to kill audit_tree.
- * Runs from a separate thread.
- */
-static int prune_tree_thread(void *unused)
-{
- mutex_lock(&audit_cmd_mutex);
- mutex_lock(&audit_filter_mutex);
-
- while (!list_empty(&prune_list)) {
- struct audit_tree *victim;
-
- victim = list_entry(prune_list.next, struct audit_tree, list);
- list_del_init(&victim->list);
-
- mutex_unlock(&audit_filter_mutex);
-
- prune_one(victim);
-
- mutex_lock(&audit_filter_mutex);
- }
-
- mutex_unlock(&audit_filter_mutex);
- mutex_unlock(&audit_cmd_mutex);
- return 0;
-}
static void audit_schedule_prune(void)
{
- kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
+ wake_up_process(prune_thread);
}
/*
@@ -907,9 +939,9 @@ static void evict_chunk(struct audit_chunk *chunk)
for (n = 0; n < chunk->count; n++)
list_del_init(&chunk->owners[n].list);
spin_unlock(&hash_lock);
+ mutex_unlock(&audit_filter_mutex);
if (need_prune)
audit_schedule_prune();
- mutex_unlock(&audit_filter_mutex);
}
static int audit_tree_handle_event(struct fsnotify_group *group,
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index ad9c1682f616..6e30024d9aac 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -146,7 +146,7 @@ int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev)
/* Initialize a parent watch entry. */
static struct audit_parent *audit_init_parent(struct path *path)
{
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = d_backing_inode(path->dentry);
struct audit_parent *parent;
int ret;
@@ -361,11 +361,11 @@ static int audit_get_nd(struct audit_watch *watch, struct path *parent)
struct dentry *d = kern_path_locked(watch->path, parent);
if (IS_ERR(d))
return PTR_ERR(d);
- mutex_unlock(&parent->dentry->d_inode->i_mutex);
- if (d->d_inode) {
+ mutex_unlock(&d_backing_inode(parent->dentry)->i_mutex);
+ if (d_is_positive(d)) {
/* update watch filter fields */
- watch->dev = d->d_inode->i_sb->s_dev;
- watch->ino = d->d_inode->i_ino;
+ watch->dev = d_backing_inode(d)->i_sb->s_dev;
+ watch->ino = d_backing_inode(d)->i_ino;
}
dput(d);
return 0;
@@ -426,7 +426,7 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
return ret;
/* either find an old parent or attach a new one */
- parent = audit_find_parent(parent_path.dentry->d_inode);
+ parent = audit_find_parent(d_backing_inode(parent_path.dentry));
if (!parent) {
parent = audit_init_parent(&parent_path);
if (IS_ERR(parent)) {
@@ -482,7 +482,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group,
switch (data_type) {
case (FSNOTIFY_EVENT_PATH):
- inode = ((struct path *)data)->dentry->d_inode;
+ inode = d_backing_inode(((struct path *)data)->dentry);
break;
case (FSNOTIFY_EVENT_INODE):
inode = (struct inode *)data;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index dc4ae70a7413..9fb9d1cb83ce 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1629,7 +1629,7 @@ retry:
rcu_read_lock();
seq = read_seqbegin(&rename_lock);
for(;;) {
- struct inode *inode = d->d_inode;
+ struct inode *inode = d_backing_inode(d);
if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) {
struct audit_chunk *chunk;
chunk = audit_tree_lookup(inode);
@@ -1754,7 +1754,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags)
{
struct audit_context *context = current->audit_context;
- const struct inode *inode = dentry->d_inode;
+ const struct inode *inode = d_backing_inode(dentry);
struct audit_names *n;
bool parent = flags & AUDIT_INODE_PARENT;
@@ -1853,7 +1853,7 @@ void __audit_inode_child(const struct inode *parent,
const unsigned char type)
{
struct audit_context *context = current->audit_context;
- const struct inode *inode = dentry->d_inode;
+ const struct inode *inode = d_backing_inode(dentry);
const char *dname = dentry->d_name.name;
struct audit_names *n, *found_parent = NULL, *found_child = NULL;
@@ -2361,7 +2361,6 @@ static void audit_log_task(struct audit_buffer *ab)
kuid_t auid, uid;
kgid_t gid;
unsigned int sessionid;
- struct mm_struct *mm = current->mm;
char comm[sizeof(current->comm)];
auid = audit_get_loginuid(current);
@@ -2376,13 +2375,7 @@ static void audit_log_task(struct audit_buffer *ab)
audit_log_task_context(ab);
audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
audit_log_untrustedstring(ab, get_task_comm(comm, current));
- if (mm) {
- down_read(&mm->mmap_sem);
- if (mm->exe_file)
- audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
- up_read(&mm->mmap_sem);
- } else
- audit_log_format(ab, " exe=(null)");
+ audit_log_d_path_exe(ab, current->mm);
}
/**
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index a5ae60f0b0a2..e6983be12bd3 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,5 +1,2 @@
obj-y := core.o
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o hashtab.o arraymap.o helpers.o
-ifdef CONFIG_TEST_BPF
-obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
-endif
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 9eb4d8a7cd87..8a6616583f38 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -134,7 +134,7 @@ static void array_map_free(struct bpf_map *map)
kvfree(array);
}
-static struct bpf_map_ops array_ops = {
+static const struct bpf_map_ops array_ops = {
.map_alloc = array_map_alloc,
.map_free = array_map_free,
.map_get_next_key = array_map_get_next_key,
@@ -143,14 +143,14 @@ static struct bpf_map_ops array_ops = {
.map_delete_elem = array_map_delete_elem,
};
-static struct bpf_map_type_list tl = {
+static struct bpf_map_type_list array_type __read_mostly = {
.ops = &array_ops,
.type = BPF_MAP_TYPE_ARRAY,
};
static int __init register_array_map(void)
{
- bpf_register_map_type(&tl);
+ bpf_register_map_type(&array_type);
return 0;
}
late_initcall(register_array_map);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index a64e7a207d2b..54f0e7fcd0e2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -357,8 +357,8 @@ select_insn:
ALU64_MOD_X:
if (unlikely(SRC == 0))
return 0;
- tmp = DST;
- DST = do_div(tmp, SRC);
+ div64_u64_rem(DST, SRC, &tmp);
+ DST = tmp;
CONT;
ALU_MOD_X:
if (unlikely(SRC == 0))
@@ -367,8 +367,8 @@ select_insn:
DST = do_div(tmp, (u32) SRC);
CONT;
ALU64_MOD_K:
- tmp = DST;
- DST = do_div(tmp, IMM);
+ div64_u64_rem(DST, IMM, &tmp);
+ DST = tmp;
CONT;
ALU_MOD_K:
tmp = (u32) DST;
@@ -377,7 +377,7 @@ select_insn:
ALU64_DIV_X:
if (unlikely(SRC == 0))
return 0;
- do_div(DST, SRC);
+ DST = div64_u64(DST, SRC);
CONT;
ALU_DIV_X:
if (unlikely(SRC == 0))
@@ -387,7 +387,7 @@ select_insn:
DST = (u32) tmp;
CONT;
ALU64_DIV_K:
- do_div(DST, IMM);
+ DST = div64_u64(DST, IMM);
CONT;
ALU_DIV_K:
tmp = (u32) DST;
@@ -656,6 +656,14 @@ void bpf_prog_free(struct bpf_prog *fp)
}
EXPORT_SYMBOL_GPL(bpf_prog_free);
+/* Weak definitions of helper functions in case we don't have bpf syscall. */
+const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
+const struct bpf_func_proto bpf_map_update_elem_proto __weak;
+const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
+
+const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
+const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
+
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
* skb_copy_bits(), so provide a weak definition of it for NET-less config.
*/
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index b3ba43674310..83c209d9b17a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -345,7 +345,7 @@ static void htab_map_free(struct bpf_map *map)
kfree(htab);
}
-static struct bpf_map_ops htab_ops = {
+static const struct bpf_map_ops htab_ops = {
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -354,14 +354,14 @@ static struct bpf_map_ops htab_ops = {
.map_delete_elem = htab_map_delete_elem,
};
-static struct bpf_map_type_list tl = {
+static struct bpf_map_type_list htab_type __read_mostly = {
.ops = &htab_ops,
.type = BPF_MAP_TYPE_HASH,
};
static int __init register_htab_map(void)
{
- bpf_register_map_type(&tl);
+ bpf_register_map_type(&htab_type);
return 0;
}
late_initcall(register_htab_map);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 9e3414d85459..bd7f5988ed9c 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -11,6 +11,8 @@
*/
#include <linux/bpf.h>
#include <linux/rcupdate.h>
+#include <linux/random.h>
+#include <linux/smp.h>
/* If kernel subsystem is allowing eBPF programs to call this function,
* inside its own verifier_ops->get_func_proto() callback it should return
@@ -41,7 +43,7 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return (unsigned long) value;
}
-struct bpf_func_proto bpf_map_lookup_elem_proto = {
+const struct bpf_func_proto bpf_map_lookup_elem_proto = {
.func = bpf_map_lookup_elem,
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
@@ -60,7 +62,7 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return map->ops->map_update_elem(map, key, value, r4);
}
-struct bpf_func_proto bpf_map_update_elem_proto = {
+const struct bpf_func_proto bpf_map_update_elem_proto = {
.func = bpf_map_update_elem,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -80,10 +82,32 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return map->ops->map_delete_elem(map, key);
}
-struct bpf_func_proto bpf_map_delete_elem_proto = {
+const struct bpf_func_proto bpf_map_delete_elem_proto = {
.func = bpf_map_delete_elem,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_MAP_KEY,
};
+
+static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ return prandom_u32();
+}
+
+const struct bpf_func_proto bpf_get_prandom_u32_proto = {
+ .func = bpf_get_prandom_u32,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+};
+
+static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ return raw_smp_processor_id();
+}
+
+const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
+ .func = bpf_get_smp_processor_id,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 536edc2be307..3bae6c591914 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -16,6 +16,7 @@
#include <linux/file.h>
#include <linux/license.h>
#include <linux/filter.h>
+#include <linux/version.h>
static LIST_HEAD(bpf_map_types);
@@ -354,10 +355,11 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
list_for_each_entry(tl, &bpf_prog_types, list_node) {
if (tl->type == type) {
prog->aux->ops = tl->ops;
- prog->aux->prog_type = type;
+ prog->type = type;
return 0;
}
}
+
return -EINVAL;
}
@@ -418,6 +420,7 @@ void bpf_prog_put(struct bpf_prog *prog)
bpf_prog_free(prog);
}
}
+EXPORT_SYMBOL_GPL(bpf_prog_put);
static int bpf_prog_release(struct inode *inode, struct file *filp)
{
@@ -465,9 +468,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
fdput(f);
return prog;
}
+EXPORT_SYMBOL_GPL(bpf_prog_get);
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD log_buf
+#define BPF_PROG_LOAD_LAST_FIELD kern_version
static int bpf_prog_load(union bpf_attr *attr)
{
@@ -492,6 +496,10 @@ static int bpf_prog_load(union bpf_attr *attr)
if (attr->insn_cnt >= BPF_MAXINSNS)
return -EINVAL;
+ if (type == BPF_PROG_TYPE_KPROBE &&
+ attr->kern_version != LINUX_VERSION_CODE)
+ return -EINVAL;
+
/* plain bpf_prog allocation */
prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
if (!prog)
@@ -508,7 +516,7 @@ static int bpf_prog_load(union bpf_attr *attr)
prog->jited = false;
atomic_set(&prog->aux->refcnt, 1);
- prog->aux->is_gpl_compatible = is_gpl;
+ prog->gpl_compatible = is_gpl;
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
@@ -516,8 +524,7 @@ static int bpf_prog_load(union bpf_attr *attr)
goto free_prog;
/* run eBPF verifier */
- err = bpf_check(prog, attr);
-
+ err = bpf_check(&prog, attr);
if (err < 0)
goto free_used_maps;
@@ -528,7 +535,6 @@ static int bpf_prog_load(union bpf_attr *attr)
bpf_prog_select_runtime(prog);
err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
-
if (err < 0)
/* failed to allocate fd */
goto free_used_maps;
diff --git a/kernel/bpf/test_stub.c b/kernel/bpf/test_stub.c
deleted file mode 100644
index 0ceae1e6e8b5..000000000000
--- a/kernel/bpf/test_stub.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/bpf.h>
-
-/* test stubs for BPF_MAP_TYPE_UNSPEC and for BPF_PROG_TYPE_UNSPEC
- * to be used by user space verifier testsuite
- */
-struct bpf_context {
- u64 arg1;
- u64 arg2;
-};
-
-static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id)
-{
- switch (func_id) {
- case BPF_FUNC_map_lookup_elem:
- return &bpf_map_lookup_elem_proto;
- case BPF_FUNC_map_update_elem:
- return &bpf_map_update_elem_proto;
- case BPF_FUNC_map_delete_elem:
- return &bpf_map_delete_elem_proto;
- default:
- return NULL;
- }
-}
-
-static const struct bpf_context_access {
- int size;
- enum bpf_access_type type;
-} test_ctx_access[] = {
- [offsetof(struct bpf_context, arg1)] = {
- FIELD_SIZEOF(struct bpf_context, arg1),
- BPF_READ
- },
- [offsetof(struct bpf_context, arg2)] = {
- FIELD_SIZEOF(struct bpf_context, arg2),
- BPF_READ
- },
-};
-
-static bool test_is_valid_access(int off, int size, enum bpf_access_type type)
-{
- const struct bpf_context_access *access;
-
- if (off < 0 || off >= ARRAY_SIZE(test_ctx_access))
- return false;
-
- access = &test_ctx_access[off];
- if (access->size == size && (access->type & type))
- return true;
-
- return false;
-}
-
-static struct bpf_verifier_ops test_ops = {
- .get_func_proto = test_func_proto,
- .is_valid_access = test_is_valid_access,
-};
-
-static struct bpf_prog_type_list tl_prog = {
- .ops = &test_ops,
- .type = BPF_PROG_TYPE_UNSPEC,
-};
-
-static int __init register_test_ops(void)
-{
- bpf_register_prog_type(&tl_prog);
- return 0;
-}
-late_initcall(register_test_ops);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a28e09c7825d..47dcd3aa6e23 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
enum bpf_reg_type expected_type;
int err = 0;
- if (arg_type == ARG_ANYTHING)
+ if (arg_type == ARG_DONTCARE)
return 0;
if (reg->type == NOT_INIT) {
@@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
return -EACCES;
}
+ if (arg_type == ARG_ANYTHING)
+ return 0;
+
if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
arg_type == ARG_PTR_TO_MAP_VALUE) {
expected_type = PTR_TO_STACK;
@@ -770,6 +773,8 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
expected_type = CONST_IMM;
} else if (arg_type == ARG_CONST_MAP_PTR) {
expected_type = CONST_PTR_TO_MAP;
+ } else if (arg_type == ARG_PTR_TO_CTX) {
+ expected_type = PTR_TO_CTX;
} else {
verbose("unsupported arg_type %d\n", arg_type);
return -EFAULT;
@@ -852,7 +857,7 @@ static int check_call(struct verifier_env *env, int func_id)
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
- if (!env->prog->aux->is_gpl_compatible && fn->gpl_only) {
+ if (!env->prog->gpl_compatible && fn->gpl_only) {
verbose("cannot call GPL only function from proprietary program\n");
return -EINVAL;
}
@@ -1172,6 +1177,18 @@ static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
return 0;
}
+static bool may_access_skb(enum bpf_prog_type type)
+{
+ switch (type) {
+ case BPF_PROG_TYPE_SOCKET_FILTER:
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* verify safety of LD_ABS|LD_IND instructions:
* - they can only appear in the programs where ctx == skb
* - since they are wrappers of function calls, they scratch R1-R5 registers,
@@ -1194,8 +1211,8 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
struct reg_state *reg;
int i, err;
- if (env->prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) {
- verbose("BPF_LD_ABS|IND instructions are only allowed in socket filters\n");
+ if (!may_access_skb(env->prog->type)) {
+ verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
return -EINVAL;
}
@@ -1380,7 +1397,8 @@ peek_stack:
/* tell verifier to check for equivalent states
* after every call and jump
*/
- env->explored_states[t + 1] = STATE_LIST_MARK;
+ if (t + 1 < insn_cnt)
+ env->explored_states[t + 1] = STATE_LIST_MARK;
} else {
/* conditional jump with two edges */
ret = push_insn(t, t + 1, FALLTHROUGH, env);
@@ -1606,11 +1624,10 @@ static int do_check(struct verifier_env *env)
return err;
} else if (class == BPF_LDX) {
- if (BPF_MODE(insn->code) != BPF_MEM ||
- insn->imm != 0) {
- verbose("BPF_LDX uses reserved fields\n");
- return -EINVAL;
- }
+ enum bpf_reg_type src_reg_type;
+
+ /* check for reserved fields is already done */
+
/* check src operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
@@ -1620,6 +1637,8 @@ static int do_check(struct verifier_env *env)
if (err)
return err;
+ src_reg_type = regs[insn->src_reg].type;
+
/* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func
*/
@@ -1629,6 +1648,32 @@ static int do_check(struct verifier_env *env)
if (err)
return err;
+ if (BPF_SIZE(insn->code) != BPF_W) {
+ insn_idx++;
+ continue;
+ }
+
+ if (insn->imm == 0) {
+ /* saw a valid insn
+ * dst_reg = *(u32 *)(src_reg + off)
+ * use reserved 'imm' field to mark this insn
+ */
+ insn->imm = src_reg_type;
+
+ } else if (src_reg_type != insn->imm &&
+ (src_reg_type == PTR_TO_CTX ||
+ insn->imm == PTR_TO_CTX)) {
+ /* ABuser program is trying to use the same insn
+ * dst_reg = *(u32*) (src_reg + off)
+ * with different pointer types:
+ * src_reg == ctx in one branch and
+ * src_reg == stack|map in some other branch.
+ * Reject it.
+ */
+ verbose("same insn cannot be used with different pointers\n");
+ return -EINVAL;
+ }
+
} else if (class == BPF_STX) {
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn);
@@ -1776,6 +1821,13 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
int i, j;
for (i = 0; i < insn_cnt; i++, insn++) {
+ if (BPF_CLASS(insn->code) == BPF_LDX &&
+ (BPF_MODE(insn->code) != BPF_MEM ||
+ insn->imm != 0)) {
+ verbose("BPF_LDX uses reserved fields\n");
+ return -EINVAL;
+ }
+
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_map *map;
struct fd f;
@@ -1867,6 +1919,92 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
insn->src_reg = 0;
}
+static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
+{
+ struct bpf_insn *insn = prog->insnsi;
+ int insn_cnt = prog->len;
+ int i;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ if (BPF_CLASS(insn->code) != BPF_JMP ||
+ BPF_OP(insn->code) == BPF_CALL ||
+ BPF_OP(insn->code) == BPF_EXIT)
+ continue;
+
+ /* adjust offset of jmps if necessary */
+ if (i < pos && i + insn->off + 1 > pos)
+ insn->off += delta;
+ else if (i > pos && i + insn->off + 1 < pos)
+ insn->off -= delta;
+ }
+}
+
+/* convert load instructions that access fields of 'struct __sk_buff'
+ * into sequence of instructions that access fields of 'struct sk_buff'
+ */
+static int convert_ctx_accesses(struct verifier_env *env)
+{
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+ struct bpf_insn insn_buf[16];
+ struct bpf_prog *new_prog;
+ u32 cnt;
+ int i;
+
+ if (!env->prog->aux->ops->convert_ctx_access)
+ return 0;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ if (insn->code != (BPF_LDX | BPF_MEM | BPF_W))
+ continue;
+
+ if (insn->imm != PTR_TO_CTX) {
+ /* clear internal mark */
+ insn->imm = 0;
+ continue;
+ }
+
+ cnt = env->prog->aux->ops->
+ convert_ctx_access(insn->dst_reg, insn->src_reg,
+ insn->off, insn_buf);
+ if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+ verbose("bpf verifier is misconfigured\n");
+ return -EINVAL;
+ }
+
+ if (cnt == 1) {
+ memcpy(insn, insn_buf, sizeof(*insn));
+ continue;
+ }
+
+ /* several new insns need to be inserted. Make room for them */
+ insn_cnt += cnt - 1;
+ new_prog = bpf_prog_realloc(env->prog,
+ bpf_prog_size(insn_cnt),
+ GFP_USER);
+ if (!new_prog)
+ return -ENOMEM;
+
+ new_prog->len = insn_cnt;
+
+ memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
+ sizeof(*insn) * (insn_cnt - i - cnt));
+
+ /* copy substitute insns in place of load instruction */
+ memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
+
+ /* adjust branches in the whole program */
+ adjust_branches(new_prog, i, cnt - 1);
+
+ /* keep walking new program and skip insns we just inserted */
+ env->prog = new_prog;
+ insn = new_prog->insnsi + i + cnt - 1;
+ i += cnt - 1;
+ }
+
+ return 0;
+}
+
static void free_states(struct verifier_env *env)
{
struct verifier_state_list *sl, *sln;
@@ -1889,13 +2027,13 @@ static void free_states(struct verifier_env *env)
kfree(env->explored_states);
}
-int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
+int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
char __user *log_ubuf = NULL;
struct verifier_env *env;
int ret = -EINVAL;
- if (prog->len <= 0 || prog->len > BPF_MAXINSNS)
+ if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
return -E2BIG;
/* 'struct verifier_env' can be global, but since it's not small,
@@ -1905,7 +2043,7 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
if (!env)
return -ENOMEM;
- env->prog = prog;
+ env->prog = *prog;
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
@@ -1937,7 +2075,7 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
if (ret < 0)
goto skip_full_check;
- env->explored_states = kcalloc(prog->len,
+ env->explored_states = kcalloc(env->prog->len,
sizeof(struct verifier_state_list *),
GFP_USER);
ret = -ENOMEM;
@@ -1954,6 +2092,10 @@ skip_full_check:
while (pop_stack(env, NULL) >= 0);
free_states(env);
+ if (ret == 0)
+ /* program is valid, convert *(u32*)(ctx + off) accesses */
+ ret = convert_ctx_accesses(env);
+
if (log_level && log_len >= log_size - 1) {
BUG_ON(log_len >= log_size);
/* verifier log exceeded user supplied buffer */
@@ -1969,18 +2111,18 @@ skip_full_check:
if (ret == 0 && env->used_map_cnt) {
/* if program passed verifier, update used_maps in bpf_prog_info */
- prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
- sizeof(env->used_maps[0]),
- GFP_KERNEL);
+ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
+ sizeof(env->used_maps[0]),
+ GFP_KERNEL);
- if (!prog->aux->used_maps) {
+ if (!env->prog->aux->used_maps) {
ret = -ENOMEM;
goto free_log_buf;
}
- memcpy(prog->aux->used_maps, env->used_maps,
+ memcpy(env->prog->aux->used_maps, env->used_maps,
sizeof(env->used_maps[0]) * env->used_map_cnt);
- prog->aux->used_map_cnt = env->used_map_cnt;
+ env->prog->aux->used_map_cnt = env->used_map_cnt;
/* program is valid. Convert pseudo bpf_ld_imm64 into generic
* bpf_ld_imm64 instructions
@@ -1992,11 +2134,12 @@ free_log_buf:
if (log_level)
vfree(log_buf);
free_env:
- if (!prog->aux->used_maps)
+ if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_bpf_prog_info() will release them.
*/
release_maps(env);
+ *prog = env->prog;
kfree(env);
mutex_unlock(&bpf_verifier_lock);
return ret;
diff --git a/kernel/capability.c b/kernel/capability.c
index 989f5bfc57dc..45432b54d5c6 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -35,6 +35,7 @@ static int __init file_caps_disable(char *str)
}
__setup("no_file_caps", file_caps_disable);
+#ifdef CONFIG_MULTIUSER
/*
* More recent versions of libcap are available from:
*
@@ -386,6 +387,24 @@ bool ns_capable(struct user_namespace *ns, int cap)
}
EXPORT_SYMBOL(ns_capable);
+
+/**
+ * capable - Determine if the current task has a superior capability in effect
+ * @cap: The capability to be tested for
+ *
+ * Return true if the current task has the given superior capability currently
+ * available for use, false if not.
+ *
+ * This sets PF_SUPERPRIV on the task if the capability is available on the
+ * assumption that it's about to be used.
+ */
+bool capable(int cap)
+{
+ return ns_capable(&init_user_ns, cap);
+}
+EXPORT_SYMBOL(capable);
+#endif /* CONFIG_MULTIUSER */
+
/**
* file_ns_capable - Determine if the file's opener had a capability in effect
* @file: The file we want to check
@@ -412,22 +431,6 @@ bool file_ns_capable(const struct file *file, struct user_namespace *ns,
EXPORT_SYMBOL(file_ns_capable);
/**
- * capable - Determine if the current task has a superior capability in effect
- * @cap: The capability to be tested for
- *
- * Return true if the current task has the given superior capability currently
- * available for use, false if not.
- *
- * This sets PF_SUPERPRIV on the task if the capability is available on the
- * assumption that it's about to be used.
- */
-bool capable(int cap)
-{
- return ns_capable(&init_user_ns, cap);
-}
-EXPORT_SYMBOL(capable);
-
-/**
* capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
* @inode: The inode in question
* @cap: The capability in question
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a220fdb66568..469dd547770c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4196,7 +4196,9 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
static int cgroup_pidlist_show(struct seq_file *s, void *v)
{
- return seq_printf(s, "%d\n", *(int *)v);
+ seq_printf(s, "%d\n", *(int *)v);
+
+ return 0;
}
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
@@ -5451,7 +5453,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
WARN_ON_ONCE(!rcu_read_lock_held());
- return idr_find(&ss->css_idr, id);
+ return id > 0 ? idr_find(&ss->css_idr, id) : NULL;
}
#ifdef CONFIG_CGROUP_DEBUG
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 937ecdfdf258..72d59a1a6eb6 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -39,15 +39,15 @@ void context_tracking_cpu_set(int cpu)
}
/**
- * context_tracking_user_enter - Inform the context tracking that the CPU is going to
- * enter userspace mode.
+ * context_tracking_enter - Inform the context tracking that the CPU is going
+ * enter user or guest space mode.
*
* This function must be called right before we switch from the kernel
- * to userspace, when it's guaranteed the remaining kernel instructions
- * to execute won't use any RCU read side critical section because this
- * function sets RCU in extended quiescent state.
+ * to user or guest space, when it's guaranteed the remaining kernel
+ * instructions to execute won't use any RCU read side critical section
+ * because this function sets RCU in extended quiescent state.
*/
-void context_tracking_user_enter(void)
+void context_tracking_enter(enum ctx_state state)
{
unsigned long flags;
@@ -75,9 +75,8 @@ void context_tracking_user_enter(void)
WARN_ON_ONCE(!current->mm);
local_irq_save(flags);
- if ( __this_cpu_read(context_tracking.state) != IN_USER) {
+ if ( __this_cpu_read(context_tracking.state) != state) {
if (__this_cpu_read(context_tracking.active)) {
- trace_user_enter(0);
/*
* At this stage, only low level arch entry code remains and
* then we'll run in userspace. We can assume there won't be
@@ -85,7 +84,10 @@ void context_tracking_user_enter(void)
* user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
* on the tick.
*/
- vtime_user_enter(current);
+ if (state == CONTEXT_USER) {
+ trace_user_enter(0);
+ vtime_user_enter(current);
+ }
rcu_user_enter();
}
/*
@@ -101,24 +103,32 @@ void context_tracking_user_enter(void)
* OTOH we can spare the calls to vtime and RCU when context_tracking.active
* is false because we know that CPU is not tickless.
*/
- __this_cpu_write(context_tracking.state, IN_USER);
+ __this_cpu_write(context_tracking.state, state);
}
local_irq_restore(flags);
}
+NOKPROBE_SYMBOL(context_tracking_enter);
+EXPORT_SYMBOL_GPL(context_tracking_enter);
+
+void context_tracking_user_enter(void)
+{
+ context_tracking_enter(CONTEXT_USER);
+}
NOKPROBE_SYMBOL(context_tracking_user_enter);
/**
- * context_tracking_user_exit - Inform the context tracking that the CPU is
- * exiting userspace mode and entering the kernel.
+ * context_tracking_exit - Inform the context tracking that the CPU is
+ * exiting user or guest mode and entering the kernel.
*
- * This function must be called after we entered the kernel from userspace
- * before any use of RCU read side critical section. This potentially include
- * any high level kernel code like syscalls, exceptions, signal handling, etc...
+ * This function must be called after we entered the kernel from user or
+ * guest space before any use of RCU read side critical section. This
+ * potentially include any high level kernel code like syscalls, exceptions,
+ * signal handling, etc...
*
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
-void context_tracking_user_exit(void)
+void context_tracking_exit(enum ctx_state state)
{
unsigned long flags;
@@ -129,20 +139,29 @@ void context_tracking_user_exit(void)
return;
local_irq_save(flags);
- if (__this_cpu_read(context_tracking.state) == IN_USER) {
+ if (__this_cpu_read(context_tracking.state) == state) {
if (__this_cpu_read(context_tracking.active)) {
/*
* We are going to run code that may use RCU. Inform
* RCU core about that (ie: we may need the tick again).
*/
rcu_user_exit();
- vtime_user_exit(current);
- trace_user_exit(0);
+ if (state == CONTEXT_USER) {
+ vtime_user_exit(current);
+ trace_user_exit(0);
+ }
}
- __this_cpu_write(context_tracking.state, IN_KERNEL);
+ __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
}
local_irq_restore(flags);
}
+NOKPROBE_SYMBOL(context_tracking_exit);
+EXPORT_SYMBOL_GPL(context_tracking_exit);
+
+void context_tracking_user_exit(void)
+{
+ context_tracking_exit(CONTEXT_USER);
+}
NOKPROBE_SYMBOL(context_tracking_user_exit);
/**
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 82eea9c5af61..94bbe4695232 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -411,8 +411,10 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
*
* Wait for the stop thread to go away.
*/
- while (!idle_cpu(cpu))
+ while (!per_cpu(cpu_dead_idle, cpu))
cpu_relax();
+ smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
+ per_cpu(cpu_dead_idle, cpu) = false;
hotplug_cpu__broadcast_tick_pull(cpu);
/* This actually kills the CPU. */
@@ -451,6 +453,37 @@ out:
EXPORT_SYMBOL(cpu_down);
#endif /*CONFIG_HOTPLUG_CPU*/
+/*
+ * Unpark per-CPU smpboot kthreads at CPU-online time.
+ */
+static int smpboot_thread_call(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+
+ case CPU_ONLINE:
+ smpboot_unpark_threads(cpu);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block smpboot_thread_notifier = {
+ .notifier_call = smpboot_thread_call,
+ .priority = CPU_PRI_SMPBOOT,
+};
+
+void __cpuinit smpboot_thread_init(void)
+{
+ register_cpu_notifier(&smpboot_thread_notifier);
+}
+
/* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen)
{
@@ -490,9 +523,6 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
goto out_notify;
BUG_ON(!cpu_online(cpu));
- /* Wake the per cpu threads */
- smpboot_unpark_threads(cpu);
-
/* Now call notifier in preparation. */
cpu_notify(CPU_ONLINE | mod, hcpu);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c68f0721df10..ee14e3a35a29 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2453,20 +2453,12 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
* @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
- * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
- * set, yes, we can always allocate. If node is in our task's mems_allowed,
- * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest
- * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
- * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
- * flag, yes.
+ * If we're in interrupt, yes, we can always allocate. If @node is set in
+ * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
+ * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
+ * yes. If current has access to memory reserves due to TIF_MEMDIE, yes.
* Otherwise, no.
*
- * The __GFP_THISNODE placement logic is really handled elsewhere,
- * by forcibly using a zonelist starting at a specified node, and by
- * (in get_page_from_freelist()) refusing to consider the zones for
- * any node on the zonelist except the first. By the time any such
- * calls get to this routine, we should just shut up and say 'yes'.
- *
* GFP_USER allocations are marked with the __GFP_HARDWALL bit,
* and do not allow allocations outside the current tasks cpuset
* unless the task has been OOM killed as is marked TIF_MEMDIE.
@@ -2502,7 +2494,7 @@ int __cpuset_node_allowed(int node, gfp_t gfp_mask)
int allowed; /* is allocation in zone z allowed? */
unsigned long flags;
- if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
+ if (in_interrupt())
return 1;
if (node_isset(node, current->mems_allowed))
return 1;
diff --git a/kernel/cred.c b/kernel/cred.c
index e0573a43c7df..ec1c07667ec1 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -29,6 +29,9 @@
static struct kmem_cache *cred_jar;
+/* init to 2 - one for init_task, one to ensure it is never freed */
+struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
+
/*
* The initial credentials for the initial task
*/
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2fabc0627165..1a3bf48743ce 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -34,14 +34,16 @@
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
+#include <linux/cgroup.h>
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
-#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/compat.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
#include "internal.h"
@@ -153,7 +155,7 @@ enum event_type_t {
*/
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
-static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
+static DEFINE_PER_CPU(int, perf_sched_cb_usages);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
@@ -327,6 +329,11 @@ static inline u64 perf_clock(void)
return local_clock();
}
+static inline u64 perf_event_clock(struct perf_event *event)
+{
+ return event->clock();
+}
+
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
@@ -351,32 +358,6 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
#ifdef CONFIG_CGROUP_PERF
-/*
- * perf_cgroup_info keeps track of time_enabled for a cgroup.
- * This is a per-cpu dynamically allocated data structure.
- */
-struct perf_cgroup_info {
- u64 time;
- u64 timestamp;
-};
-
-struct perf_cgroup {
- struct cgroup_subsys_state css;
- struct perf_cgroup_info __percpu *info;
-};
-
-/*
- * Must ensure cgroup is pinned (css_get) before calling
- * this function. In other words, we cannot call this function
- * if there is no cgroup event for the current CPU context.
- */
-static inline struct perf_cgroup *
-perf_cgroup_from_task(struct task_struct *task)
-{
- return container_of(task_css(task, perf_event_cgrp_id),
- struct perf_cgroup, css);
-}
-
static inline bool
perf_cgroup_match(struct perf_event *event)
{
@@ -905,6 +886,15 @@ static void get_ctx(struct perf_event_context *ctx)
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
+static void free_ctx(struct rcu_head *head)
+{
+ struct perf_event_context *ctx;
+
+ ctx = container_of(head, struct perf_event_context, rcu_head);
+ kfree(ctx->task_ctx_data);
+ kfree(ctx);
+}
+
static void put_ctx(struct perf_event_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
@@ -912,7 +902,7 @@ static void put_ctx(struct perf_event_context *ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task)
put_task_struct(ctx->task);
- kfree_rcu(ctx, rcu_head);
+ call_rcu(&ctx->rcu_head, free_ctx);
}
}
@@ -923,10 +913,30 @@ static void put_ctx(struct perf_event_context *ctx)
* Those places that change perf_event::ctx will hold both
* perf_event_ctx::mutex of the 'old' and 'new' ctx value.
*
- * Lock ordering is by mutex address. There is one other site where
- * perf_event_context::mutex nests and that is put_event(). But remember that
- * that is a parent<->child context relation, and migration does not affect
- * children, therefore these two orderings should not interact.
+ * Lock ordering is by mutex address. There are two other sites where
+ * perf_event_context::mutex nests and those are:
+ *
+ * - perf_event_exit_task_context() [ child , 0 ]
+ * __perf_event_exit_task()
+ * sync_child_event()
+ * put_event() [ parent, 1 ]
+ *
+ * - perf_event_init_context() [ parent, 0 ]
+ * inherit_task_group()
+ * inherit_group()
+ * inherit_event()
+ * perf_event_alloc()
+ * perf_init_event()
+ * perf_try_init_event() [ child , 1 ]
+ *
+ * While it appears there is an obvious deadlock here -- the parent and child
+ * nesting levels are inverted between the two. This is in fact safe because
+ * life-time rules separate them. That is an exiting task cannot fork, and a
+ * spawning task cannot (yet) exit.
+ *
+ * But remember that that these are parent<->child context relations, and
+ * migration does not affect children, therefore these two orderings should not
+ * interact.
*
* The change in perf_event::ctx does not affect children (as claimed above)
* because the sys_perf_event_open() case will install a new event and break
@@ -1239,9 +1249,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
if (is_cgroup_event(event))
ctx->nr_cgroups++;
- if (has_branch_stack(event))
- ctx->nr_branch_stack++;
-
list_add_rcu(&event->event_entry, &ctx->event_list);
ctx->nr_events++;
if (event->attr.inherit_stat)
@@ -1408,9 +1415,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
cpuctx->cgrp = NULL;
}
- if (has_branch_stack(event))
- ctx->nr_branch_stack--;
-
ctx->nr_events--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
@@ -1847,6 +1851,7 @@ static void perf_set_shadow_time(struct perf_event *event,
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
+static void perf_log_itrace_start(struct perf_event *event);
static int
event_sched_in(struct perf_event *event,
@@ -1881,6 +1886,12 @@ event_sched_in(struct perf_event *event,
perf_pmu_disable(event->pmu);
+ event->tstamp_running += tstamp - event->tstamp_stopped;
+
+ perf_set_shadow_time(event, ctx, tstamp);
+
+ perf_log_itrace_start(event);
+
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
@@ -1888,10 +1899,6 @@ event_sched_in(struct perf_event *event,
goto out;
}
- event->tstamp_running += tstamp - event->tstamp_stopped;
-
- perf_set_shadow_time(event, ctx, tstamp);
-
if (!is_software_event(event))
cpuctx->active_oncpu++;
if (!ctx->nr_active++)
@@ -2559,6 +2566,9 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
next->perf_event_ctxp[ctxn] = ctx;
ctx->task = next;
next_ctx->task = task;
+
+ swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
+
do_switch = 0;
perf_event_sync_stat(ctx, next_ctx);
@@ -2577,6 +2587,56 @@ unlock:
}
}
+void perf_sched_cb_dec(struct pmu *pmu)
+{
+ this_cpu_dec(perf_sched_cb_usages);
+}
+
+void perf_sched_cb_inc(struct pmu *pmu)
+{
+ this_cpu_inc(perf_sched_cb_usages);
+}
+
+/*
+ * This function provides the context switch callback to the lower code
+ * layer. It is invoked ONLY when the context switch callback is enabled.
+ */
+static void perf_pmu_sched_task(struct task_struct *prev,
+ struct task_struct *next,
+ bool sched_in)
+{
+ struct perf_cpu_context *cpuctx;
+ struct pmu *pmu;
+ unsigned long flags;
+
+ if (prev == next)
+ return;
+
+ local_irq_save(flags);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ if (pmu->sched_task) {
+ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+
+ perf_pmu_disable(pmu);
+
+ pmu->sched_task(cpuctx->task_ctx, sched_in);
+
+ perf_pmu_enable(pmu);
+
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+ }
+ }
+
+ rcu_read_unlock();
+
+ local_irq_restore(flags);
+}
+
#define for_each_task_context_nr(ctxn) \
for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
@@ -2596,6 +2656,9 @@ void __perf_event_task_sched_out(struct task_struct *task,
{
int ctxn;
+ if (__this_cpu_read(perf_sched_cb_usages))
+ perf_pmu_sched_task(task, next, false);
+
for_each_task_context_nr(ctxn)
perf_event_context_sched_out(task, ctxn, next);
@@ -2755,64 +2818,6 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
}
/*
- * When sampling the branck stack in system-wide, it may be necessary
- * to flush the stack on context switch. This happens when the branch
- * stack does not tag its entries with the pid of the current task.
- * Otherwise it becomes impossible to associate a branch entry with a
- * task. This ambiguity is more likely to appear when the branch stack
- * supports priv level filtering and the user sets it to monitor only
- * at the user level (which could be a useful measurement in system-wide
- * mode). In that case, the risk is high of having a branch stack with
- * branch from multiple tasks. Flushing may mean dropping the existing
- * entries or stashing them somewhere in the PMU specific code layer.
- *
- * This function provides the context switch callback to the lower code
- * layer. It is invoked ONLY when there is at least one system-wide context
- * with at least one active event using taken branch sampling.
- */
-static void perf_branch_stack_sched_in(struct task_struct *prev,
- struct task_struct *task)
-{
- struct perf_cpu_context *cpuctx;
- struct pmu *pmu;
- unsigned long flags;
-
- /* no need to flush branch stack if not changing task */
- if (prev == task)
- return;
-
- local_irq_save(flags);
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(pmu, &pmus, entry) {
- cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-
- /*
- * check if the context has at least one
- * event using PERF_SAMPLE_BRANCH_STACK
- */
- if (cpuctx->ctx.nr_branch_stack > 0
- && pmu->flush_branch_stack) {
-
- perf_ctx_lock(cpuctx, cpuctx->task_ctx);
-
- perf_pmu_disable(pmu);
-
- pmu->flush_branch_stack();
-
- perf_pmu_enable(pmu);
-
- perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
- }
- }
-
- rcu_read_unlock();
-
- local_irq_restore(flags);
-}
-
-/*
* Called from scheduler to add the events of the current task
* with interrupts disabled.
*
@@ -2844,9 +2849,8 @@ void __perf_event_task_sched_in(struct task_struct *prev,
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);
- /* check for system-wide branch_stack events */
- if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
- perf_branch_stack_sched_in(prev, task);
+ if (__this_cpu_read(perf_sched_cb_usages))
+ perf_pmu_sched_task(prev, task, true);
}
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@@ -3220,7 +3224,10 @@ static void __perf_event_read(void *info)
static inline u64 perf_event_count(struct perf_event *event)
{
- return local64_read(&event->count) + atomic64_read(&event->child_count);
+ if (event->pmu->count)
+ return event->pmu->count(event);
+
+ return __perf_event_count(event);
}
static u64 perf_event_read(struct perf_event *event)
@@ -3321,12 +3328,15 @@ errout:
* Returns a matching context with refcount and pincount.
*/
static struct perf_event_context *
-find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
+find_get_context(struct pmu *pmu, struct task_struct *task,
+ struct perf_event *event)
{
struct perf_event_context *ctx, *clone_ctx = NULL;
struct perf_cpu_context *cpuctx;
+ void *task_ctx_data = NULL;
unsigned long flags;
int ctxn, err;
+ int cpu = event->cpu;
if (!task) {
/* Must be root to operate on a CPU event: */
@@ -3354,11 +3364,24 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
if (ctxn < 0)
goto errout;
+ if (event->attach_state & PERF_ATTACH_TASK_DATA) {
+ task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
+ if (!task_ctx_data) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ }
+
retry:
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
clone_ctx = unclone_ctx(ctx);
++ctx->pin_count;
+
+ if (task_ctx_data && !ctx->task_ctx_data) {
+ ctx->task_ctx_data = task_ctx_data;
+ task_ctx_data = NULL;
+ }
raw_spin_unlock_irqrestore(&ctx->lock, flags);
if (clone_ctx)
@@ -3369,6 +3392,11 @@ retry:
if (!ctx)
goto errout;
+ if (task_ctx_data) {
+ ctx->task_ctx_data = task_ctx_data;
+ task_ctx_data = NULL;
+ }
+
err = 0;
mutex_lock(&task->perf_event_mutex);
/*
@@ -3395,13 +3423,16 @@ retry:
}
}
+ kfree(task_ctx_data);
return ctx;
errout:
+ kfree(task_ctx_data);
return ERR_PTR(err);
}
static void perf_event_free_filter(struct perf_event *event);
+static void perf_event_free_bpf_prog(struct perf_event *event);
static void free_event_rcu(struct rcu_head *head)
{
@@ -3411,10 +3442,10 @@ static void free_event_rcu(struct rcu_head *head)
if (event->ns)
put_pid_ns(event->ns);
perf_event_free_filter(event);
+ perf_event_free_bpf_prog(event);
kfree(event);
}
-static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb);
@@ -3423,10 +3454,6 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
if (event->parent)
return;
- if (has_branch_stack(event)) {
- if (!(event->attach_state & PERF_ATTACH_TASK))
- atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
- }
if (is_cgroup_event(event))
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
}
@@ -3454,6 +3481,91 @@ static void unaccount_event(struct perf_event *event)
unaccount_event_cpu(event, event->cpu);
}
+/*
+ * The following implement mutual exclusion of events on "exclusive" pmus
+ * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
+ * at a time, so we disallow creating events that might conflict, namely:
+ *
+ * 1) cpu-wide events in the presence of per-task events,
+ * 2) per-task events in the presence of cpu-wide events,
+ * 3) two matching events on the same context.
+ *
+ * The former two cases are handled in the allocation path (perf_event_alloc(),
+ * __free_event()), the latter -- before the first perf_install_in_context().
+ */
+static int exclusive_event_init(struct perf_event *event)
+{
+ struct pmu *pmu = event->pmu;
+
+ if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
+ return 0;
+
+ /*
+ * Prevent co-existence of per-task and cpu-wide events on the
+ * same exclusive pmu.
+ *
+ * Negative pmu::exclusive_cnt means there are cpu-wide
+ * events on this "exclusive" pmu, positive means there are
+ * per-task events.
+ *
+ * Since this is called in perf_event_alloc() path, event::ctx
+ * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
+ * to mean "per-task event", because unlike other attach states it
+ * never gets cleared.
+ */
+ if (event->attach_state & PERF_ATTACH_TASK) {
+ if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
+ return -EBUSY;
+ } else {
+ if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void exclusive_event_destroy(struct perf_event *event)
+{
+ struct pmu *pmu = event->pmu;
+
+ if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
+ return;
+
+ /* see comment in exclusive_event_init() */
+ if (event->attach_state & PERF_ATTACH_TASK)
+ atomic_dec(&pmu->exclusive_cnt);
+ else
+ atomic_inc(&pmu->exclusive_cnt);
+}
+
+static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
+{
+ if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
+ (e1->cpu == e2->cpu ||
+ e1->cpu == -1 ||
+ e2->cpu == -1))
+ return true;
+ return false;
+}
+
+/* Called under the same ctx::mutex as perf_install_in_context() */
+static bool exclusive_event_installable(struct perf_event *event,
+ struct perf_event_context *ctx)
+{
+ struct perf_event *iter_event;
+ struct pmu *pmu = event->pmu;
+
+ if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
+ return true;
+
+ list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
+ if (exclusive_event_match(iter_event, event))
+ return false;
+ }
+
+ return true;
+}
+
static void __free_event(struct perf_event *event)
{
if (!event->parent) {
@@ -3467,8 +3579,10 @@ static void __free_event(struct perf_event *event)
if (event->ctx)
put_ctx(event->ctx);
- if (event->pmu)
+ if (event->pmu) {
+ exclusive_event_destroy(event);
module_put(event->pmu->module);
+ }
call_rcu(&event->rcu_head, free_event_rcu);
}
@@ -3563,9 +3677,6 @@ static void perf_remove_from_owner(struct perf_event *event)
}
}
-/*
- * Called when the last reference to the file is gone.
- */
static void put_event(struct perf_event *event)
{
struct perf_event_context *ctx;
@@ -3603,6 +3714,9 @@ int perf_event_release_kernel(struct perf_event *event)
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
+/*
+ * Called when the last reference to the file is gone.
+ */
static int perf_release(struct inode *inode, struct file *file)
{
put_event(file->private_data);
@@ -3927,6 +4041,7 @@ static inline int perf_fget_light(int fd, struct fd *p)
static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
+static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
@@ -3980,6 +4095,9 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
case PERF_EVENT_IOC_SET_FILTER:
return perf_event_set_filter(event, (void __user *)arg);
+ case PERF_EVENT_IOC_SET_BPF:
+ return perf_event_set_bpf_prog(event, arg);
+
default:
return -ENOTTY;
}
@@ -4096,6 +4214,8 @@ static void perf_event_init_userpage(struct perf_event *event)
/* Allow new userspace to detect that bit 0 is deprecated */
userpg->cap_bit0_is_deprecated = 1;
userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
+ userpg->data_offset = PAGE_SIZE;
+ userpg->data_size = perf_data_size(rb);
unlock:
rcu_read_unlock();
@@ -4263,7 +4383,7 @@ static void rb_free_rcu(struct rcu_head *rcu_head)
rb_free(rb);
}
-static struct ring_buffer *ring_buffer_get(struct perf_event *event)
+struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
struct ring_buffer *rb;
@@ -4278,7 +4398,7 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
return rb;
}
-static void ring_buffer_put(struct ring_buffer *rb)
+void ring_buffer_put(struct ring_buffer *rb)
{
if (!atomic_dec_and_test(&rb->refcount))
return;
@@ -4295,6 +4415,9 @@ static void perf_mmap_open(struct vm_area_struct *vma)
atomic_inc(&event->mmap_count);
atomic_inc(&event->rb->mmap_count);
+ if (vma->vm_pgoff)
+ atomic_inc(&event->rb->aux_mmap_count);
+
if (event->pmu->event_mapped)
event->pmu->event_mapped(event);
}
@@ -4319,6 +4442,20 @@ static void perf_mmap_close(struct vm_area_struct *vma)
if (event->pmu->event_unmapped)
event->pmu->event_unmapped(event);
+ /*
+ * rb->aux_mmap_count will always drop before rb->mmap_count and
+ * event->mmap_count, so it is ok to use event->mmap_mutex to
+ * serialize with perf_mmap here.
+ */
+ if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
+ atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
+ atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
+ vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
+
+ rb_free_aux(rb);
+ mutex_unlock(&event->mmap_mutex);
+ }
+
atomic_dec(&rb->mmap_count);
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
@@ -4392,7 +4529,7 @@ out_put:
static const struct vm_operations_struct perf_mmap_vmops = {
.open = perf_mmap_open,
- .close = perf_mmap_close,
+ .close = perf_mmap_close, /* non mergable */
.fault = perf_mmap_fault,
.page_mkwrite = perf_mmap_fault,
};
@@ -4403,10 +4540,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
- struct ring_buffer *rb;
+ struct ring_buffer *rb = NULL;
unsigned long vma_size;
unsigned long nr_pages;
- long user_extra, extra;
+ long user_extra = 0, extra = 0;
int ret = 0, flags = 0;
/*
@@ -4421,7 +4558,66 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma_size = vma->vm_end - vma->vm_start;
- nr_pages = (vma_size / PAGE_SIZE) - 1;
+
+ if (vma->vm_pgoff == 0) {
+ nr_pages = (vma_size / PAGE_SIZE) - 1;
+ } else {
+ /*
+ * AUX area mapping: if rb->aux_nr_pages != 0, it's already
+ * mapped, all subsequent mappings should have the same size
+ * and offset. Must be above the normal perf buffer.
+ */
+ u64 aux_offset, aux_size;
+
+ if (!event->rb)
+ return -EINVAL;
+
+ nr_pages = vma_size / PAGE_SIZE;
+
+ mutex_lock(&event->mmap_mutex);
+ ret = -EINVAL;
+
+ rb = event->rb;
+ if (!rb)
+ goto aux_unlock;
+
+ aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
+ aux_size = ACCESS_ONCE(rb->user_page->aux_size);
+
+ if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
+ goto aux_unlock;
+
+ if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
+ goto aux_unlock;
+
+ /* already mapped with a different offset */
+ if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
+ goto aux_unlock;
+
+ if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
+ goto aux_unlock;
+
+ /* already mapped with a different size */
+ if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
+ goto aux_unlock;
+
+ if (!is_power_of_2(nr_pages))
+ goto aux_unlock;
+
+ if (!atomic_inc_not_zero(&rb->mmap_count))
+ goto aux_unlock;
+
+ if (rb_has_aux(rb)) {
+ atomic_inc(&rb->aux_mmap_count);
+ ret = 0;
+ goto unlock;
+ }
+
+ atomic_set(&rb->aux_mmap_count, 1);
+ user_extra = nr_pages;
+
+ goto accounting;
+ }
/*
* If we have rb pages ensure they're a power-of-two number, so we
@@ -4433,9 +4629,6 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (vma_size != PAGE_SIZE * (1 + nr_pages))
return -EINVAL;
- if (vma->vm_pgoff != 0)
- return -EINVAL;
-
WARN_ON_ONCE(event->ctx->parent_ctx);
again:
mutex_lock(&event->mmap_mutex);
@@ -4459,6 +4652,8 @@ again:
}
user_extra = nr_pages + 1;
+
+accounting:
user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
/*
@@ -4468,7 +4663,6 @@ again:
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
- extra = 0;
if (user_locked > user_lock_limit)
extra = user_locked - user_lock_limit;
@@ -4482,35 +4676,46 @@ again:
goto unlock;
}
- WARN_ON(event->rb);
+ WARN_ON(!rb && event->rb);
if (vma->vm_flags & VM_WRITE)
flags |= RING_BUFFER_WRITABLE;
- rb = rb_alloc(nr_pages,
- event->attr.watermark ? event->attr.wakeup_watermark : 0,
- event->cpu, flags);
-
if (!rb) {
- ret = -ENOMEM;
- goto unlock;
- }
+ rb = rb_alloc(nr_pages,
+ event->attr.watermark ? event->attr.wakeup_watermark : 0,
+ event->cpu, flags);
- atomic_set(&rb->mmap_count, 1);
- rb->mmap_locked = extra;
- rb->mmap_user = get_current_user();
+ if (!rb) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
- atomic_long_add(user_extra, &user->locked_vm);
- vma->vm_mm->pinned_vm += extra;
+ atomic_set(&rb->mmap_count, 1);
+ rb->mmap_user = get_current_user();
+ rb->mmap_locked = extra;
- ring_buffer_attach(event, rb);
+ ring_buffer_attach(event, rb);
- perf_event_init_userpage(event);
- perf_event_update_userpage(event);
+ perf_event_init_userpage(event);
+ perf_event_update_userpage(event);
+ } else {
+ ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
+ event->attr.aux_watermark, flags);
+ if (!ret)
+ rb->aux_mmap_locked = extra;
+ }
unlock:
- if (!ret)
+ if (!ret) {
+ atomic_long_add(user_extra, &user->locked_vm);
+ vma->vm_mm->pinned_vm += extra;
+
atomic_inc(&event->mmap_count);
+ } else if (rb) {
+ atomic_dec(&rb->mmap_count);
+ }
+aux_unlock:
mutex_unlock(&event->mmap_mutex);
/*
@@ -4766,7 +4971,7 @@ static void __perf_event_header__init_id(struct perf_event_header *header,
}
if (sample_type & PERF_SAMPLE_TIME)
- data->time = perf_clock();
+ data->time = perf_event_clock(event);
if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
data->id = primary_event_id(event);
@@ -5344,6 +5549,8 @@ static void perf_event_task_output(struct perf_event *event,
task_event->event_id.tid = perf_event_tid(event, task);
task_event->event_id.ptid = perf_event_tid(event, current);
+ task_event->event_id.time = perf_event_clock(event);
+
perf_output_put(&handle, task_event->event_id);
perf_event__output_id_sample(event, &handle, &sample);
@@ -5377,7 +5584,7 @@ static void perf_event_task(struct task_struct *task,
/* .ppid */
/* .tid */
/* .ptid */
- .time = perf_clock(),
+ /* .time */
},
};
@@ -5732,6 +5939,40 @@ void perf_event_mmap(struct vm_area_struct *vma)
perf_event_mmap_event(&mmap_event);
}
+void perf_event_aux_event(struct perf_event *event, unsigned long head,
+ unsigned long size, u64 flags)
+{
+ struct perf_output_handle handle;
+ struct perf_sample_data sample;
+ struct perf_aux_event {
+ struct perf_event_header header;
+ u64 offset;
+ u64 size;
+ u64 flags;
+ } rec = {
+ .header = {
+ .type = PERF_RECORD_AUX,
+ .misc = 0,
+ .size = sizeof(rec),
+ },
+ .offset = head,
+ .size = size,
+ .flags = flags,
+ };
+ int ret;
+
+ perf_event_header__init_id(&rec.header, &sample, event);
+ ret = perf_output_begin(&handle, event, rec.header.size);
+
+ if (ret)
+ return;
+
+ perf_output_put(&handle, rec);
+ perf_event__output_id_sample(event, &handle, &sample);
+
+ perf_output_end(&handle);
+}
+
/*
* IRQ throttle logging
*/
@@ -5753,7 +5994,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
.misc = 0,
.size = sizeof(throttle_event),
},
- .time = perf_clock(),
+ .time = perf_event_clock(event),
.id = primary_event_id(event),
.stream_id = event->id,
};
@@ -5773,6 +6014,44 @@ static void perf_log_throttle(struct perf_event *event, int enable)
perf_output_end(&handle);
}
+static void perf_log_itrace_start(struct perf_event *event)
+{
+ struct perf_output_handle handle;
+ struct perf_sample_data sample;
+ struct perf_aux_event {
+ struct perf_event_header header;
+ u32 pid;
+ u32 tid;
+ } rec;
+ int ret;
+
+ if (event->parent)
+ event = event->parent;
+
+ if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
+ event->hw.itrace_started)
+ return;
+
+ event->hw.itrace_started = 1;
+
+ rec.header.type = PERF_RECORD_ITRACE_START;
+ rec.header.misc = 0;
+ rec.header.size = sizeof(rec);
+ rec.pid = perf_event_pid(event, current);
+ rec.tid = perf_event_tid(event, current);
+
+ perf_event_header__init_id(&rec.header, &sample, event);
+ ret = perf_output_begin(&handle, event, rec.header.size);
+
+ if (ret)
+ return;
+
+ perf_output_put(&handle, rec);
+ perf_event__output_id_sample(event, &handle, &sample);
+
+ perf_output_end(&handle);
+}
+
/*
* Generic event overflow handling, sampling.
*/
@@ -6133,6 +6412,7 @@ static int perf_swevent_add(struct perf_event *event, int flags)
}
hlist_add_head_rcu(&event->hlist_entry, head);
+ perf_event_update_userpage(event);
return 0;
}
@@ -6296,6 +6576,8 @@ static int perf_swevent_init(struct perf_event *event)
static struct pmu perf_swevent = {
.task_ctx_nr = perf_sw_context,
+ .capabilities = PERF_PMU_CAP_NO_NMI,
+
.event_init = perf_swevent_init,
.add = perf_swevent_add,
.del = perf_swevent_del,
@@ -6449,6 +6731,49 @@ static void perf_event_free_filter(struct perf_event *event)
ftrace_profile_free_filter(event);
}
+static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
+{
+ struct bpf_prog *prog;
+
+ if (event->attr.type != PERF_TYPE_TRACEPOINT)
+ return -EINVAL;
+
+ if (event->tp_event->prog)
+ return -EEXIST;
+
+ if (!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE))
+ /* bpf programs can only be attached to kprobes */
+ return -EINVAL;
+
+ prog = bpf_prog_get(prog_fd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ if (prog->type != BPF_PROG_TYPE_KPROBE) {
+ /* valid fd, but invalid bpf program type */
+ bpf_prog_put(prog);
+ return -EINVAL;
+ }
+
+ event->tp_event->prog = prog;
+
+ return 0;
+}
+
+static void perf_event_free_bpf_prog(struct perf_event *event)
+{
+ struct bpf_prog *prog;
+
+ if (!event->tp_event)
+ return;
+
+ prog = event->tp_event->prog;
+ if (prog) {
+ event->tp_event->prog = NULL;
+ bpf_prog_put(prog);
+ }
+}
+
#else
static inline void perf_tp_register(void)
@@ -6464,6 +6789,14 @@ static void perf_event_free_filter(struct perf_event *event)
{
}
+static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
+{
+ return -ENOENT;
+}
+
+static void perf_event_free_bpf_prog(struct perf_event *event)
+{
+}
#endif /* CONFIG_EVENT_TRACING */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
@@ -6602,6 +6935,7 @@ static int cpu_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
cpu_clock_event_start(event, flags);
+ perf_event_update_userpage(event);
return 0;
}
@@ -6638,6 +6972,8 @@ static int cpu_clock_event_init(struct perf_event *event)
static struct pmu perf_cpu_clock = {
.task_ctx_nr = perf_sw_context,
+ .capabilities = PERF_PMU_CAP_NO_NMI,
+
.event_init = cpu_clock_event_init,
.add = cpu_clock_event_add,
.del = cpu_clock_event_del,
@@ -6676,6 +7012,7 @@ static int task_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
task_clock_event_start(event, flags);
+ perf_event_update_userpage(event);
return 0;
}
@@ -6716,6 +7053,8 @@ static int task_clock_event_init(struct perf_event *event)
static struct pmu perf_task_clock = {
.task_ctx_nr = perf_sw_context,
+ .capabilities = PERF_PMU_CAP_NO_NMI,
+
.event_init = task_clock_event_init,
.add = task_clock_event_add,
.del = task_clock_event_del,
@@ -6993,6 +7332,7 @@ got_cpu_context:
pmu->event_idx = perf_event_idx_default;
list_add_rcu(&pmu->entry, &pmus);
+ atomic_set(&pmu->exclusive_cnt, 0);
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
@@ -7037,12 +7377,28 @@ EXPORT_SYMBOL_GPL(perf_pmu_unregister);
static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
{
+ struct perf_event_context *ctx = NULL;
int ret;
if (!try_module_get(pmu->module))
return -ENODEV;
+
+ if (event->group_leader != event) {
+ /*
+ * This ctx->mutex can nest when we're called through
+ * inheritance. See the perf_event_ctx_lock_nested() comment.
+ */
+ ctx = perf_event_ctx_lock_nested(event->group_leader,
+ SINGLE_DEPTH_NESTING);
+ BUG_ON(!ctx);
+ }
+
event->pmu = pmu;
ret = pmu->event_init(event);
+
+ if (ctx)
+ perf_event_ctx_unlock(event->group_leader, ctx);
+
if (ret)
module_put(pmu->module);
@@ -7089,10 +7445,6 @@ static void account_event_cpu(struct perf_event *event, int cpu)
if (event->parent)
return;
- if (has_branch_stack(event)) {
- if (!(event->attach_state & PERF_ATTACH_TASK))
- atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
- }
if (is_cgroup_event(event))
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
}
@@ -7131,7 +7483,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct perf_event *group_leader,
struct perf_event *parent_event,
perf_overflow_handler_t overflow_handler,
- void *context)
+ void *context, int cgroup_fd)
{
struct pmu *pmu;
struct perf_event *event;
@@ -7186,18 +7538,18 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (task) {
event->attach_state = PERF_ATTACH_TASK;
-
- if (attr->type == PERF_TYPE_TRACEPOINT)
- event->hw.tp_target = task;
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
- * hw_breakpoint is a bit difficult here..
+ * XXX pmu::event_init needs to know what task to account to
+ * and we cannot use the ctx information because we need the
+ * pmu before we get a ctx.
*/
- else if (attr->type == PERF_TYPE_BREAKPOINT)
- event->hw.bp_target = task;
-#endif
+ event->hw.target = task;
}
+ event->clock = &local_clock;
+ if (parent_event)
+ event->clock = parent_event->clock;
+
if (!overflow_handler && parent_event) {
overflow_handler = parent_event->overflow_handler;
context = parent_event->overflow_handler_context;
@@ -7224,6 +7576,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto err_ns;
+ if (!has_branch_stack(event))
+ event->attr.branch_sample_type = 0;
+
+ if (cgroup_fd != -1) {
+ err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
+ if (err)
+ goto err_ns;
+ }
+
pmu = perf_init_event(event);
if (!pmu)
goto err_ns;
@@ -7232,21 +7593,30 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
goto err_ns;
}
+ err = exclusive_event_init(event);
+ if (err)
+ goto err_pmu;
+
if (!event->parent) {
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers();
if (err)
- goto err_pmu;
+ goto err_per_task;
}
}
return event;
+err_per_task:
+ exclusive_event_destroy(event);
+
err_pmu:
if (event->destroy)
event->destroy(event);
module_put(pmu->module);
err_ns:
+ if (is_cgroup_event(event))
+ perf_detach_cgroup(event);
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
@@ -7409,6 +7779,19 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
if (output_event->cpu == -1 && output_event->ctx != event->ctx)
goto out;
+ /*
+ * Mixing clocks in the same buffer is trouble you don't need.
+ */
+ if (output_event->clock != event->clock)
+ goto out;
+
+ /*
+ * If both events generate aux data, they must be on the same PMU
+ */
+ if (has_aux(event) && has_aux(output_event) &&
+ event->pmu != output_event->pmu)
+ goto out;
+
set:
mutex_lock(&event->mmap_mutex);
/* Can't redirect output if we've got an active mmap() */
@@ -7441,6 +7824,43 @@ static void mutex_lock_double(struct mutex *a, struct mutex *b)
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}
+static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
+{
+ bool nmi_safe = false;
+
+ switch (clk_id) {
+ case CLOCK_MONOTONIC:
+ event->clock = &ktime_get_mono_fast_ns;
+ nmi_safe = true;
+ break;
+
+ case CLOCK_MONOTONIC_RAW:
+ event->clock = &ktime_get_raw_fast_ns;
+ nmi_safe = true;
+ break;
+
+ case CLOCK_REALTIME:
+ event->clock = &ktime_get_real_ns;
+ break;
+
+ case CLOCK_BOOTTIME:
+ event->clock = &ktime_get_boot_ns;
+ break;
+
+ case CLOCK_TAI:
+ event->clock = &ktime_get_tai_ns;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@@ -7465,6 +7885,7 @@ SYSCALL_DEFINE5(perf_event_open,
int move_group = 0;
int err;
int f_flags = O_RDWR;
+ int cgroup_fd = -1;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
@@ -7530,21 +7951,16 @@ SYSCALL_DEFINE5(perf_event_open,
get_online_cpus();
+ if (flags & PERF_FLAG_PID_CGROUP)
+ cgroup_fd = pid;
+
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
- NULL, NULL);
+ NULL, NULL, cgroup_fd);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_cpus;
}
- if (flags & PERF_FLAG_PID_CGROUP) {
- err = perf_cgroup_connect(pid, event, &attr, group_leader);
- if (err) {
- __free_event(event);
- goto err_cpus;
- }
- }
-
if (is_sampling_event(event)) {
if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
err = -ENOTSUPP;
@@ -7560,6 +7976,12 @@ SYSCALL_DEFINE5(perf_event_open,
*/
pmu = event->pmu;
+ if (attr.use_clockid) {
+ err = perf_event_set_clock(event, attr.clockid);
+ if (err)
+ goto err_alloc;
+ }
+
if (group_leader &&
(is_software_event(event) != is_software_event(group_leader))) {
if (is_software_event(event)) {
@@ -7586,12 +8008,17 @@ SYSCALL_DEFINE5(perf_event_open,
/*
* Get the target context (task or percpu):
*/
- ctx = find_get_context(pmu, task, event->cpu);
+ ctx = find_get_context(pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_alloc;
}
+ if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
+ err = -EBUSY;
+ goto err_context;
+ }
+
if (task) {
put_task_struct(task);
task = NULL;
@@ -7609,6 +8036,11 @@ SYSCALL_DEFINE5(perf_event_open,
*/
if (group_leader->group_leader != group_leader)
goto err_context;
+
+ /* All events in a group should have the same clock */
+ if (group_leader->clock != event->clock)
+ goto err_context;
+
/*
* Do not allow to attach to a group in a different
* task or CPU context:
@@ -7709,6 +8141,13 @@ SYSCALL_DEFINE5(perf_event_open,
get_ctx(ctx);
}
+ if (!exclusive_event_installable(event, ctx)) {
+ err = -EBUSY;
+ mutex_unlock(&ctx->mutex);
+ fput(event_file);
+ goto err_context;
+ }
+
perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
@@ -7781,7 +8220,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
*/
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
- overflow_handler, context);
+ overflow_handler, context, -1);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
@@ -7792,7 +8231,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
account_event(event);
- ctx = find_get_context(event->pmu, task, cpu);
+ ctx = find_get_context(event->pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_free;
@@ -7800,6 +8239,14 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
+ if (!exclusive_event_installable(event, ctx)) {
+ mutex_unlock(&ctx->mutex);
+ perf_unpin_context(ctx);
+ put_ctx(ctx);
+ err = -EBUSY;
+ goto err_free;
+ }
+
perf_install_in_context(ctx, event, cpu);
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
@@ -8142,7 +8589,7 @@ inherit_event(struct perf_event *parent_event,
parent_event->cpu,
child,
group_leader, parent_event,
- NULL, NULL);
+ NULL, NULL, -1);
if (IS_ERR(child_event))
return child_event;
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 9803a6600d49..92ce5f4ccc26 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
*/
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{
- struct task_struct *tsk = bp->hw.bp_target;
+ struct task_struct *tsk = bp->hw.target;
struct perf_event *iter;
int count = 0;
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
- if (iter->hw.bp_target == tsk &&
+ if (iter->hw.target == tsk &&
find_slot_idx(iter) == type &&
(iter->cpu < 0 || cpu == iter->cpu))
count += hw_breakpoint_weight(iter);
@@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
int nr;
nr = info->cpu_pinned;
- if (!bp->hw.bp_target)
+ if (!bp->hw.target)
nr += max_task_bp_pinned(cpu, type);
else
nr += task_bp_pinned(cpu, bp, type);
@@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
weight = -weight;
/* Pinned counter cpu profiling */
- if (!bp->hw.bp_target) {
+ if (!bp->hw.target) {
get_bp_info(bp->cpu, type)->cpu_pinned += weight;
return;
}
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 569b218782ad..9f6ce9ba4a04 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -27,6 +27,7 @@ struct ring_buffer {
local_t lost; /* nr records lost */
long watermark; /* wakeup watermark */
+ long aux_watermark;
/* poll crap */
spinlock_t event_lock;
struct list_head event_list;
@@ -35,6 +36,20 @@ struct ring_buffer {
unsigned long mmap_locked;
struct user_struct *mmap_user;
+ /* AUX area */
+ local_t aux_head;
+ local_t aux_nest;
+ local_t aux_wakeup;
+ unsigned long aux_pgoff;
+ int aux_nr_pages;
+ int aux_overwrite;
+ atomic_t aux_mmap_count;
+ unsigned long aux_mmap_locked;
+ void (*free_aux)(void *);
+ atomic_t aux_refcount;
+ void **aux_pages;
+ void *aux_priv;
+
struct perf_event_mmap_page *user_page;
void *data_pages[0];
};
@@ -43,6 +58,19 @@ extern void rb_free(struct ring_buffer *rb);
extern struct ring_buffer *
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
extern void perf_event_wakeup(struct perf_event *event);
+extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
+ pgoff_t pgoff, int nr_pages, long watermark, int flags);
+extern void rb_free_aux(struct ring_buffer *rb);
+extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
+extern void ring_buffer_put(struct ring_buffer *rb);
+
+static inline bool rb_has_aux(struct ring_buffer *rb)
+{
+ return !!rb->aux_nr_pages;
+}
+
+void perf_event_aux_event(struct perf_event *event, unsigned long head,
+ unsigned long size, u64 flags);
extern void
perf_event_header__init_id(struct perf_event_header *header,
@@ -81,6 +109,11 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}
+static inline unsigned long perf_aux_size(struct ring_buffer *rb)
+{
+ return rb->aux_nr_pages << PAGE_SHIFT;
+}
+
#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
static inline unsigned long \
func_name(struct perf_output_handle *handle, \
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index eadb95ce7aac..232f00f273cb 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -243,14 +243,317 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
spin_lock_init(&rb->event_lock);
}
+/*
+ * This is called before hardware starts writing to the AUX area to
+ * obtain an output handle and make sure there's room in the buffer.
+ * When the capture completes, call perf_aux_output_end() to commit
+ * the recorded data to the buffer.
+ *
+ * The ordering is similar to that of perf_output_{begin,end}, with
+ * the exception of (B), which should be taken care of by the pmu
+ * driver, since ordering rules will differ depending on hardware.
+ */
+void *perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event)
+{
+ struct perf_event *output_event = event;
+ unsigned long aux_head, aux_tail;
+ struct ring_buffer *rb;
+
+ if (output_event->parent)
+ output_event = output_event->parent;
+
+ /*
+ * Since this will typically be open across pmu::add/pmu::del, we
+ * grab ring_buffer's refcount instead of holding rcu read lock
+ * to make sure it doesn't disappear under us.
+ */
+ rb = ring_buffer_get(output_event);
+ if (!rb)
+ return NULL;
+
+ if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
+ goto err;
+
+ /*
+ * Nesting is not supported for AUX area, make sure nested
+ * writers are caught early
+ */
+ if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
+ goto err_put;
+
+ aux_head = local_read(&rb->aux_head);
+
+ handle->rb = rb;
+ handle->event = event;
+ handle->head = aux_head;
+ handle->size = 0;
+
+ /*
+ * In overwrite mode, AUX data stores do not depend on aux_tail,
+ * therefore (A) control dependency barrier does not exist. The
+ * (B) <-> (C) ordering is still observed by the pmu driver.
+ */
+ if (!rb->aux_overwrite) {
+ aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
+ handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
+ if (aux_head - aux_tail < perf_aux_size(rb))
+ handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
+
+ /*
+ * handle->size computation depends on aux_tail load; this forms a
+ * control dependency barrier separating aux_tail load from aux data
+ * store that will be enabled on successful return
+ */
+ if (!handle->size) { /* A, matches D */
+ event->pending_disable = 1;
+ perf_output_wakeup(handle);
+ local_set(&rb->aux_nest, 0);
+ goto err_put;
+ }
+ }
+
+ return handle->rb->aux_priv;
+
+err_put:
+ rb_free_aux(rb);
+
+err:
+ ring_buffer_put(rb);
+ handle->event = NULL;
+
+ return NULL;
+}
+
+/*
+ * Commit the data written by hardware into the ring buffer by adjusting
+ * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
+ * pmu driver's responsibility to observe ordering rules of the hardware,
+ * so that all the data is externally visible before this is called.
+ */
+void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ bool truncated)
+{
+ struct ring_buffer *rb = handle->rb;
+ unsigned long aux_head;
+ u64 flags = 0;
+
+ if (truncated)
+ flags |= PERF_AUX_FLAG_TRUNCATED;
+
+ /* in overwrite mode, driver provides aux_head via handle */
+ if (rb->aux_overwrite) {
+ flags |= PERF_AUX_FLAG_OVERWRITE;
+
+ aux_head = handle->head;
+ local_set(&rb->aux_head, aux_head);
+ } else {
+ aux_head = local_read(&rb->aux_head);
+ local_add(size, &rb->aux_head);
+ }
+
+ if (size || flags) {
+ /*
+ * Only send RECORD_AUX if we have something useful to communicate
+ */
+
+ perf_event_aux_event(handle->event, aux_head, size, flags);
+ }
+
+ aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
+
+ if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
+ perf_output_wakeup(handle);
+ local_add(rb->aux_watermark, &rb->aux_wakeup);
+ }
+ handle->event = NULL;
+
+ local_set(&rb->aux_nest, 0);
+ rb_free_aux(rb);
+ ring_buffer_put(rb);
+}
+
+/*
+ * Skip over a given number of bytes in the AUX buffer, due to, for example,
+ * hardware's alignment constraints.
+ */
+int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
+{
+ struct ring_buffer *rb = handle->rb;
+ unsigned long aux_head;
+
+ if (size > handle->size)
+ return -ENOSPC;
+
+ local_add(size, &rb->aux_head);
+
+ aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
+ if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
+ perf_output_wakeup(handle);
+ local_add(rb->aux_watermark, &rb->aux_wakeup);
+ handle->wakeup = local_read(&rb->aux_wakeup) +
+ rb->aux_watermark;
+ }
+
+ handle->head = aux_head;
+ handle->size -= size;
+
+ return 0;
+}
+
+void *perf_get_aux(struct perf_output_handle *handle)
+{
+ /* this is only valid between perf_aux_output_begin and *_end */
+ if (!handle->event)
+ return NULL;
+
+ return handle->rb->aux_priv;
+}
+
+#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
+
+static struct page *rb_alloc_aux_page(int node, int order)
+{
+ struct page *page;
+
+ if (order > MAX_ORDER)
+ order = MAX_ORDER;
+
+ do {
+ page = alloc_pages_node(node, PERF_AUX_GFP, order);
+ } while (!page && order--);
+
+ if (page && order) {
+ /*
+ * Communicate the allocation size to the driver
+ */
+ split_page(page, order);
+ SetPagePrivate(page);
+ set_page_private(page, order);
+ }
+
+ return page;
+}
+
+static void rb_free_aux_page(struct ring_buffer *rb, int idx)
+{
+ struct page *page = virt_to_page(rb->aux_pages[idx]);
+
+ ClearPagePrivate(page);
+ page->mapping = NULL;
+ __free_page(page);
+}
+
+int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
+ pgoff_t pgoff, int nr_pages, long watermark, int flags)
+{
+ bool overwrite = !(flags & RING_BUFFER_WRITABLE);
+ int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
+ int ret = -ENOMEM, max_order = 0;
+
+ if (!has_aux(event))
+ return -ENOTSUPP;
+
+ if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
+ /*
+ * We need to start with the max_order that fits in nr_pages,
+ * not the other way around, hence ilog2() and not get_order.
+ */
+ max_order = ilog2(nr_pages);
+
+ /*
+ * PMU requests more than one contiguous chunks of memory
+ * for SW double buffering
+ */
+ if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
+ !overwrite) {
+ if (!max_order)
+ return -EINVAL;
+
+ max_order--;
+ }
+ }
+
+ rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
+ if (!rb->aux_pages)
+ return -ENOMEM;
+
+ rb->free_aux = event->pmu->free_aux;
+ for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
+ struct page *page;
+ int last, order;
+
+ order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
+ page = rb_alloc_aux_page(node, order);
+ if (!page)
+ goto out;
+
+ for (last = rb->aux_nr_pages + (1 << page_private(page));
+ last > rb->aux_nr_pages; rb->aux_nr_pages++)
+ rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
+ }
+
+ rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
+ overwrite);
+ if (!rb->aux_priv)
+ goto out;
+
+ ret = 0;
+
+ /*
+ * aux_pages (and pmu driver's private data, aux_priv) will be
+ * referenced in both producer's and consumer's contexts, thus
+ * we keep a refcount here to make sure either of the two can
+ * reference them safely.
+ */
+ atomic_set(&rb->aux_refcount, 1);
+
+ rb->aux_overwrite = overwrite;
+ rb->aux_watermark = watermark;
+
+ if (!rb->aux_watermark && !rb->aux_overwrite)
+ rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
+
+out:
+ if (!ret)
+ rb->aux_pgoff = pgoff;
+ else
+ rb_free_aux(rb);
+
+ return ret;
+}
+
+static void __rb_free_aux(struct ring_buffer *rb)
+{
+ int pg;
+
+ if (rb->aux_priv) {
+ rb->free_aux(rb->aux_priv);
+ rb->free_aux = NULL;
+ rb->aux_priv = NULL;
+ }
+
+ for (pg = 0; pg < rb->aux_nr_pages; pg++)
+ rb_free_aux_page(rb, pg);
+
+ kfree(rb->aux_pages);
+ rb->aux_nr_pages = 0;
+}
+
+void rb_free_aux(struct ring_buffer *rb)
+{
+ if (atomic_dec_and_test(&rb->aux_refcount))
+ __rb_free_aux(rb);
+}
+
#ifndef CONFIG_PERF_USE_VMALLOC
/*
* Back perf_mmap() with regular GFP_KERNEL-0 pages.
*/
-struct page *
-perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+static struct page *
+__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
{
if (pgoff > rb->nr_pages)
return NULL;
@@ -340,8 +643,8 @@ static int data_page_nr(struct ring_buffer *rb)
return rb->nr_pages << page_order(rb);
}
-struct page *
-perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+static struct page *
+__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
{
/* The '>' counts in the user page. */
if (pgoff > data_page_nr(rb))
@@ -416,3 +719,19 @@ fail:
}
#endif
+
+struct page *
+perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+{
+ if (rb->aux_nr_pages) {
+ /* above AUX space */
+ if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
+ return NULL;
+
+ /* AUX space */
+ if (pgoff >= rb->aux_pgoff)
+ return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
+ }
+
+ return __perf_mmap_to_page(rb, pgoff);
+}
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 83d4382f5699..6873bb3e6b7e 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -20,145 +20,10 @@
#include <linux/types.h>
#include <linux/fs_struct.h>
-
-static void default_handler(int, struct pt_regs *);
-
-static struct exec_domain *exec_domains = &default_exec_domain;
-static DEFINE_RWLOCK(exec_domains_lock);
-
-
-static unsigned long ident_map[32] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31
-};
-
-struct exec_domain default_exec_domain = {
- .name = "Linux", /* name */
- .handler = default_handler, /* lcall7 causes a seg fault. */
- .pers_low = 0, /* PER_LINUX personality. */
- .pers_high = 0, /* PER_LINUX personality. */
- .signal_map = ident_map, /* Identity map signals. */
- .signal_invmap = ident_map, /* - both ways. */
-};
-
-
-static void
-default_handler(int segment, struct pt_regs *regp)
-{
- set_personality(0);
-
- if (current_thread_info()->exec_domain->handler != default_handler)
- current_thread_info()->exec_domain->handler(segment, regp);
- else
- send_sig(SIGSEGV, current, 1);
-}
-
-static struct exec_domain *
-lookup_exec_domain(unsigned int personality)
-{
- unsigned int pers = personality(personality);
- struct exec_domain *ep;
-
- read_lock(&exec_domains_lock);
- for (ep = exec_domains; ep; ep = ep->next) {
- if (pers >= ep->pers_low && pers <= ep->pers_high)
- if (try_module_get(ep->module))
- goto out;
- }
-
-#ifdef CONFIG_MODULES
- read_unlock(&exec_domains_lock);
- request_module("personality-%d", pers);
- read_lock(&exec_domains_lock);
-
- for (ep = exec_domains; ep; ep = ep->next) {
- if (pers >= ep->pers_low && pers <= ep->pers_high)
- if (try_module_get(ep->module))
- goto out;
- }
-#endif
-
- ep = &default_exec_domain;
-out:
- read_unlock(&exec_domains_lock);
- return ep;
-}
-
-int
-register_exec_domain(struct exec_domain *ep)
-{
- struct exec_domain *tmp;
- int err = -EBUSY;
-
- if (ep == NULL)
- return -EINVAL;
-
- if (ep->next != NULL)
- return -EBUSY;
-
- write_lock(&exec_domains_lock);
- for (tmp = exec_domains; tmp; tmp = tmp->next) {
- if (tmp == ep)
- goto out;
- }
-
- ep->next = exec_domains;
- exec_domains = ep;
- err = 0;
-
-out:
- write_unlock(&exec_domains_lock);
- return err;
-}
-EXPORT_SYMBOL(register_exec_domain);
-
-int
-unregister_exec_domain(struct exec_domain *ep)
-{
- struct exec_domain **epp;
-
- epp = &exec_domains;
- write_lock(&exec_domains_lock);
- for (epp = &exec_domains; *epp; epp = &(*epp)->next) {
- if (ep == *epp)
- goto unregister;
- }
- write_unlock(&exec_domains_lock);
- return -EINVAL;
-
-unregister:
- *epp = ep->next;
- ep->next = NULL;
- write_unlock(&exec_domains_lock);
- return 0;
-}
-EXPORT_SYMBOL(unregister_exec_domain);
-
-int __set_personality(unsigned int personality)
-{
- struct exec_domain *oep = current_thread_info()->exec_domain;
-
- current_thread_info()->exec_domain = lookup_exec_domain(personality);
- current->personality = personality;
- module_put(oep->module);
-
- return 0;
-}
-EXPORT_SYMBOL(__set_personality);
-
#ifdef CONFIG_PROC_FS
static int execdomains_proc_show(struct seq_file *m, void *v)
{
- struct exec_domain *ep;
-
- read_lock(&exec_domains_lock);
- for (ep = exec_domains; ep; ep = ep->next)
- seq_printf(m, "%d-%d\t%-16s\t[%s]\n",
- ep->pers_low, ep->pers_high, ep->name,
- module_name(ep->module));
- read_unlock(&exec_domains_lock);
+ seq_puts(m, "0-0\tLinux \t[kernel]\n");
return 0;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index feff10bbb307..22fcc05dec40 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -756,8 +756,6 @@ void do_exit(long code)
cgroup_exit(tsk);
- module_put(task_thread_info(tsk)->exec_domain->module);
-
/*
* FIXME: do that only when needed, using sched_exit tracepoint
*/
diff --git a/kernel/fork.c b/kernel/fork.c
index cf65139615a0..03c1eaaa6ef5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -74,6 +74,7 @@
#include <linux/uprobes.h>
#include <linux/aio.h>
#include <linux/compiler.h>
+#include <linux/sysctl.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -88,6 +89,16 @@
#include <trace/events/task.h>
/*
+ * Minimum number of threads to boot the kernel
+ */
+#define MIN_THREADS 20
+
+/*
+ * Maximum number of threads
+ */
+#define MAX_THREADS FUTEX_TID_MASK
+
+/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
unsigned long total_forks; /* Handle normal Linux uptimes. */
@@ -253,7 +264,30 @@ EXPORT_SYMBOL_GPL(__put_task_struct);
void __init __weak arch_task_cache_init(void) { }
-void __init fork_init(unsigned long mempages)
+/*
+ * set_max_threads
+ */
+static void set_max_threads(unsigned int max_threads_suggested)
+{
+ u64 threads;
+
+ /*
+ * The number of threads shall be limited such that the thread
+ * structures may only consume a small part of the available memory.
+ */
+ if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
+ threads = MAX_THREADS;
+ else
+ threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
+ (u64) THREAD_SIZE * 8UL);
+
+ if (threads > max_threads_suggested)
+ threads = max_threads_suggested;
+
+ max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
+}
+
+void __init fork_init(void)
{
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
@@ -268,18 +302,7 @@ void __init fork_init(unsigned long mempages)
/* do the arch specific task caches init */
arch_task_cache_init();
- /*
- * The default maximum number of threads is set to a safe
- * value: the thread structures can take up at most half
- * of memory.
- */
- max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
-
- /*
- * we need to allow at least 20 threads to boot a system
- */
- if (max_threads < 20)
- max_threads = 20;
+ set_max_threads(MAX_THREADS);
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
@@ -380,6 +403,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
*/
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
+ /* No ordering required: file already has been exposed. */
+ RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
+
mm->total_vm = oldmm->total_vm;
mm->shared_vm = oldmm->shared_vm;
mm->exec_vm = oldmm->exec_vm;
@@ -505,7 +531,13 @@ static inline void mm_free_pgd(struct mm_struct *mm)
pgd_free(mm, mm->pgd);
}
#else
-#define dup_mmap(mm, oldmm) (0)
+static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ down_write(&oldmm->mmap_sem);
+ RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
+ up_write(&oldmm->mmap_sem);
+ return 0;
+}
#define mm_alloc_pgd(mm) (0)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
@@ -674,34 +706,53 @@ void mmput(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(mmput);
+/**
+ * set_mm_exe_file - change a reference to the mm's executable file
+ *
+ * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
+ *
+ * Main users are mmput() and sys_execve(). Callers prevent concurrent
+ * invocations: in mmput() nobody alive left, in execve task is single
+ * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
+ * mm->exe_file, but does so without using set_mm_exe_file() in order
+ * to do avoid the need for any locks.
+ */
void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
{
+ struct file *old_exe_file;
+
+ /*
+ * It is safe to dereference the exe_file without RCU as
+ * this function is only called if nobody else can access
+ * this mm -- see comment above for justification.
+ */
+ old_exe_file = rcu_dereference_raw(mm->exe_file);
+
if (new_exe_file)
get_file(new_exe_file);
- if (mm->exe_file)
- fput(mm->exe_file);
- mm->exe_file = new_exe_file;
+ rcu_assign_pointer(mm->exe_file, new_exe_file);
+ if (old_exe_file)
+ fput(old_exe_file);
}
+/**
+ * get_mm_exe_file - acquire a reference to the mm's executable file
+ *
+ * Returns %NULL if mm has no associated executable file.
+ * User must release file via fput().
+ */
struct file *get_mm_exe_file(struct mm_struct *mm)
{
struct file *exe_file;
- /* We need mmap_sem to protect against races with removal of exe_file */
- down_read(&mm->mmap_sem);
- exe_file = mm->exe_file;
- if (exe_file)
- get_file(exe_file);
- up_read(&mm->mmap_sem);
+ rcu_read_lock();
+ exe_file = rcu_dereference(mm->exe_file);
+ if (exe_file && !get_file_rcu(exe_file))
+ exe_file = NULL;
+ rcu_read_unlock();
return exe_file;
}
-
-static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
-{
- /* It's safe to write the exe_file pointer without exe_file_lock because
- * this is called during fork when the task is not yet in /proc */
- newmm->exe_file = get_mm_exe_file(oldmm);
-}
+EXPORT_SYMBOL(get_mm_exe_file);
/**
* get_task_mm - acquire a reference to the task's mm
@@ -864,8 +915,6 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
if (!mm_init(mm, tsk))
goto fail_nomem;
- dup_mm_exe_file(oldmm, mm);
-
err = dup_mmap(mm, oldmm);
if (err)
goto free_pt;
@@ -1279,9 +1328,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
- if (!try_module_get(task_thread_info(p)->exec_domain->module))
- goto bad_fork_cleanup_count;
-
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
p->flags |= PF_FORKNOEXEC;
@@ -1406,10 +1452,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
- retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns_for_children);
- if (!pid)
+ if (IS_ERR(pid)) {
+ retval = PTR_ERR(pid);
goto bad_fork_cleanup_io;
+ }
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1590,7 +1637,6 @@ bad_fork_cleanup_threadgroup_lock:
if (clone_flags & CLONE_THREAD)
threadgroup_change_end(current);
delayacct_tsk_free(p);
- module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
exit_creds(p);
@@ -2004,3 +2050,26 @@ int unshare_files(struct files_struct **displaced)
task_unlock(task);
return 0;
}
+
+int sysctl_max_threads(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table t;
+ int ret;
+ int threads = max_threads;
+ int min = MIN_THREADS;
+ int max = MAX_THREADS;
+
+ t = *table;
+ t.data = &threads;
+ t.extra1 = &min;
+ t.extra2 = &max;
+
+ ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
+ if (ret || !write)
+ return ret;
+
+ set_max_threads(threads);
+
+ return 0;
+}
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index b358a802fd18..a744098e4eb7 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/sched.h>
#include "gcov.h"
static int gcov_events_enabled;
@@ -107,8 +108,10 @@ void gcov_enable_events(void)
gcov_events_enabled = 1;
/* Perform event callback for previously registered entries. */
- while ((info = gcov_info_next(info)))
+ while ((info = gcov_info_next(info))) {
gcov_event(GCOV_ADD, info);
+ cond_resched();
+ }
mutex_unlock(&gcov_lock);
}
diff --git a/kernel/groups.c b/kernel/groups.c
index 664411f171b5..74d431d25251 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -9,9 +9,6 @@
#include <linux/user_namespace.h>
#include <asm/uaccess.h>
-/* init to 2 - one for init_task, one to ensure it is never freed */
-struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
-
struct group_info *groups_alloc(int gidsetsize)
{
struct group_info *group_info;
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 06db12434d72..e0f90c2b57aa 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -169,7 +169,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
return;
rcu_read_lock();
- do_each_thread(g, t) {
+ for_each_process_thread(g, t) {
if (!max_count--)
goto unlock;
if (!--batch_count) {
@@ -180,7 +180,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
if (t->state == TASK_UNINTERRUPTIBLE)
check_hung_task(t, timeout);
- } while_each_thread(g, t);
+ }
unlock:
rcu_read_unlock();
}
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c
index 988dc58e8847..2feb6feca0cc 100644
--- a/kernel/irq/dummychip.c
+++ b/kernel/irq/dummychip.c
@@ -57,5 +57,6 @@ struct irq_chip dummy_irq_chip = {
.irq_ack = noop,
.irq_mask = noop,
.irq_unmask = noop,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
EXPORT_SYMBOL_GPL(dummy_irq_chip);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 38c25b1f2fd5..7a36fdcca5bf 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -707,7 +707,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
do {
unsigned long pfn, epfn, addr, eaddr;
- pages = kimage_alloc_pages(GFP_KERNEL, order);
+ pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
if (!pages)
break;
pfn = page_to_pfn(pages);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 3f9f1d6b4c2e..284e2691e380 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -335,32 +335,20 @@ unlock:
rcu_read_unlock();
}
-static int klp_disable_func(struct klp_func *func)
+static void klp_disable_func(struct klp_func *func)
{
struct klp_ops *ops;
- int ret;
-
- if (WARN_ON(func->state != KLP_ENABLED))
- return -EINVAL;
- if (WARN_ON(!func->old_addr))
- return -EINVAL;
+ WARN_ON(func->state != KLP_ENABLED);
+ WARN_ON(!func->old_addr);
ops = klp_find_ops(func->old_addr);
if (WARN_ON(!ops))
- return -EINVAL;
+ return;
if (list_is_singular(&ops->func_stack)) {
- ret = unregister_ftrace_function(&ops->fops);
- if (ret) {
- pr_err("failed to unregister ftrace handler for function '%s' (%d)\n",
- func->old_name, ret);
- return ret;
- }
-
- ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
- if (ret)
- pr_warn("function unregister succeeded but failed to clear the filter\n");
+ WARN_ON(unregister_ftrace_function(&ops->fops));
+ WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
list_del_rcu(&func->stack_node);
list_del(&ops->node);
@@ -370,8 +358,6 @@ static int klp_disable_func(struct klp_func *func)
}
func->state = KLP_DISABLED;
-
- return 0;
}
static int klp_enable_func(struct klp_func *func)
@@ -432,23 +418,15 @@ err:
return ret;
}
-static int klp_disable_object(struct klp_object *obj)
+static void klp_disable_object(struct klp_object *obj)
{
struct klp_func *func;
- int ret;
- for (func = obj->funcs; func->old_name; func++) {
- if (func->state != KLP_ENABLED)
- continue;
-
- ret = klp_disable_func(func);
- if (ret)
- return ret;
- }
+ for (func = obj->funcs; func->old_name; func++)
+ if (func->state == KLP_ENABLED)
+ klp_disable_func(func);
obj->state = KLP_DISABLED;
-
- return 0;
}
static int klp_enable_object(struct klp_object *obj)
@@ -464,22 +442,19 @@ static int klp_enable_object(struct klp_object *obj)
for (func = obj->funcs; func->old_name; func++) {
ret = klp_enable_func(func);
- if (ret)
- goto unregister;
+ if (ret) {
+ klp_disable_object(obj);
+ return ret;
+ }
}
obj->state = KLP_ENABLED;
return 0;
-
-unregister:
- WARN_ON(klp_disable_object(obj));
- return ret;
}
static int __klp_disable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
- int ret;
/* enforce stacking: only the last enabled patch can be disabled */
if (!list_is_last(&patch->list, &klp_patches) &&
@@ -489,12 +464,8 @@ static int __klp_disable_patch(struct klp_patch *patch)
pr_notice("disabling patch '%s'\n", patch->mod->name);
for (obj = patch->objs; obj->funcs; obj++) {
- if (obj->state != KLP_ENABLED)
- continue;
-
- ret = klp_disable_object(obj);
- if (ret)
- return ret;
+ if (obj->state == KLP_ENABLED)
+ klp_disable_object(obj);
}
patch->state = KLP_DISABLED;
@@ -553,8 +524,6 @@ static int __klp_enable_patch(struct klp_patch *patch)
pr_notice("enabling patch '%s'\n", patch->mod->name);
for (obj = patch->objs; obj->funcs; obj++) {
- klp_find_object_module(obj);
-
if (!klp_is_object_loaded(obj))
continue;
@@ -945,7 +914,6 @@ static void klp_module_notify_going(struct klp_patch *patch,
{
struct module *pmod = patch->mod;
struct module *mod = obj->mod;
- int ret;
if (patch->state == KLP_DISABLED)
goto disabled;
@@ -953,10 +921,7 @@ static void klp_module_notify_going(struct klp_patch *patch,
pr_notice("reverting patch '%s' on unloading module '%s'\n",
pmod->name, mod->name);
- ret = klp_disable_object(obj);
- if (ret)
- pr_warn("failed to revert patch '%s' on module '%s' (%d)\n",
- pmod->name, mod->name, ret);
+ klp_disable_object(obj);
disabled:
klp_free_object_loaded(obj);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ba77ab5f64dd..a0831e1b99f4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -551,7 +551,21 @@ static void print_lockdep_cache(struct lockdep_map *lock)
static void print_lock(struct held_lock *hlock)
{
- print_lock_name(hlock_class(hlock));
+ /*
+ * We can be called locklessly through debug_show_all_locks() so be
+ * extra careful, the hlock might have been released and cleared.
+ */
+ unsigned int class_idx = hlock->class_idx;
+
+ /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
+ barrier();
+
+ if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
+ printk("<RELEASED>\n");
+ return;
+ }
+
+ print_lock_name(lock_classes + class_idx - 1);
printk(", at: ");
print_ip_sym(hlock->acquire_ip);
}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b73279367087..b025295f4966 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
}
/*
- * Called by sched_setscheduler() to check whether the priority change
- * is overruled by a possible priority boosting.
+ * Called by sched_setscheduler() to get the priority which will be
+ * effective after the change.
*/
-int rt_mutex_check_prio(struct task_struct *task, int newprio)
+int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
{
if (!task_has_pi_waiters(task))
- return 0;
+ return newprio;
- return task_top_pi_waiter(task)->task->prio <= newprio;
+ if (task_top_pi_waiter(task)->task->prio <= newprio)
+ return task_top_pi_waiter(task)->task->prio;
+ return newprio;
}
/*
diff --git a/kernel/module.c b/kernel/module.c
index ec53f594e9c9..42a1d2afb217 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -387,9 +387,9 @@ static bool check_symbol(const struct symsearch *syms,
pr_warn("Symbol %s is marked as UNUSED, however this module is "
"using it.\n", fsa->name);
pr_warn("This symbol will go away in the future.\n");
- pr_warn("Please evalute if this is the right api to use and if "
- "it really is, submit a report the linux kernel "
- "mailinglist together with submitting your code for "
+ pr_warn("Please evaluate if this is the right api to use and "
+ "if it really is, submit a report to the linux kernel "
+ "mailing list together with submitting your code for "
"inclusion.\n");
}
#endif
@@ -2511,7 +2511,8 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
return err;
/* Suck in entire file: we'll want most of it. */
- info->hdr = vmalloc(info->len);
+ info->hdr = __vmalloc(info->len,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL);
if (!info->hdr)
return -ENOMEM;
@@ -2770,6 +2771,9 @@ static int find_module_sections(struct module *mod, struct load_info *info)
mod->trace_events = section_objs(info, "_ftrace_events",
sizeof(*mod->trace_events),
&mod->num_trace_events);
+ mod->trace_enums = section_objs(info, "_ftrace_enum_map",
+ sizeof(*mod->trace_enums),
+ &mod->num_trace_enums);
#endif
#ifdef CONFIG_TRACING
mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
diff --git a/kernel/params.c b/kernel/params.c
index 728e05b167de..a22d6a759b1a 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -173,9 +173,9 @@ static char *next_arg(char *args, char **param, char **val)
if (args[i-1] == '"')
args[i-1] = '\0';
}
- if (quoted && args[i-1] == '"')
- args[i-1] = '\0';
}
+ if (quoted && args[i-1] == '"')
+ args[i-1] = '\0';
if (args[i]) {
args[i] = '\0';
diff --git a/kernel/pid.c b/kernel/pid.c
index cd36a5e0d173..4fd07d5b7baf 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -182,7 +182,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
spin_unlock_irq(&pidmap_lock);
kfree(page);
if (unlikely(!map->page))
- break;
+ return -ENOMEM;
}
if (likely(atomic_read(&map->nr_free))) {
for ( ; ; ) {
@@ -210,7 +210,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
}
pid = mk_pid(pid_ns, map, offset);
}
- return -1;
+ return -EAGAIN;
}
int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
@@ -301,17 +301,20 @@ struct pid *alloc_pid(struct pid_namespace *ns)
int i, nr;
struct pid_namespace *tmp;
struct upid *upid;
+ int retval = -ENOMEM;
pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
if (!pid)
- goto out;
+ return ERR_PTR(retval);
tmp = ns;
pid->level = ns->level;
for (i = ns->level; i >= 0; i--) {
nr = alloc_pidmap(tmp);
- if (nr < 0)
+ if (IS_ERR_VALUE(nr)) {
+ retval = nr;
goto out_free;
+ }
pid->numbers[i].nr = nr;
pid->numbers[i].ns = tmp;
@@ -339,7 +342,6 @@ struct pid *alloc_pid(struct pid_namespace *ns)
}
spin_unlock_irq(&pidmap_lock);
-out:
return pid;
out_unlock:
@@ -351,8 +353,7 @@ out_free:
free_pidmap(pid->numbers + i);
kmem_cache_free(ns->pid_cachep, pid);
- pid = NULL;
- goto out;
+ return ERR_PTR(retval);
}
void disable_pid_allocation(struct pid_namespace *ns)
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 9a59d042ea84..86e8157a450f 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -11,7 +11,7 @@
#include <linux/export.h>
#include <linux/kobject.h>
#include <linux/string.h>
-#include <linux/resume-trace.h>
+#include <linux/pm-trace.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index b7d6b3a721b1..8d7a1ef72758 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -28,6 +28,7 @@
#include <linux/ftrace.h>
#include <trace/events/power.h>
#include <linux/compiler.h>
+#include <linux/moduleparam.h>
#include "power.h"
@@ -233,12 +234,20 @@ static bool platform_suspend_again(suspend_state_t state)
suspend_ops->suspend_again() : false;
}
+#ifdef CONFIG_PM_DEBUG
+static unsigned int pm_test_delay = 5;
+module_param(pm_test_delay, uint, 0644);
+MODULE_PARM_DESC(pm_test_delay,
+ "Number of seconds to wait before resuming from suspend test");
+#endif
+
static int suspend_test(int level)
{
#ifdef CONFIG_PM_DEBUG
if (pm_test_level == level) {
- printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
- mdelay(5000);
+ printk(KERN_INFO "suspend debug: Waiting for %d second(s).\n",
+ pm_test_delay);
+ mdelay(pm_test_delay * 1000);
return 1;
}
#endif /* !CONFIG_PM_DEBUG */
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index bb0635bd74f2..c099b082cd02 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -32,7 +32,6 @@
#include <linux/security.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
-#include <linux/aio.h>
#include <linux/syscalls.h>
#include <linux/kexec.h>
#include <linux/kdb.h>
@@ -46,6 +45,7 @@
#include <linux/irq_work.h>
#include <linux/utsname.h>
#include <linux/ctype.h>
+#include <linux/uio.h>
#include <asm/uaccess.h>
@@ -521,7 +521,7 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
int i;
int level = default_message_loglevel;
int facility = 1; /* LOG_USER */
- size_t len = iocb->ki_nbytes;
+ size_t len = iov_iter_count(from);
ssize_t ret = len;
if (len > LOG_LINE_MAX)
@@ -2017,24 +2017,6 @@ int add_preferred_console(char *name, int idx, char *options)
return __add_preferred_console(name, idx, options, NULL);
}
-int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options)
-{
- struct console_cmdline *c;
- int i;
-
- for (i = 0, c = console_cmdline;
- i < MAX_CMDLINECONSOLES && c->name[0];
- i++, c++)
- if (strcmp(c->name, name) == 0 && c->index == idx) {
- strlcpy(c->name, name_new, sizeof(c->name));
- c->options = options;
- c->index = idx_new;
- return i;
- }
- /* not found */
- return -1;
-}
-
bool console_suspend_enabled = true;
EXPORT_SYMBOL(console_suspend_enabled);
@@ -2436,9 +2418,6 @@ void register_console(struct console *newcon)
if (preferred_console < 0 || bcon || !console_drivers)
preferred_console = selected_console;
- if (newcon->early_setup)
- newcon->early_setup();
-
/*
* See if we want to use this console driver. If we
* didn't select a console we take the first one
@@ -2464,23 +2443,27 @@ void register_console(struct console *newcon)
for (i = 0, c = console_cmdline;
i < MAX_CMDLINECONSOLES && c->name[0];
i++, c++) {
- BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
- if (strcmp(c->name, newcon->name) != 0)
- continue;
- if (newcon->index >= 0 &&
- newcon->index != c->index)
- continue;
- if (newcon->index < 0)
- newcon->index = c->index;
+ if (!newcon->match ||
+ newcon->match(newcon, c->name, c->index, c->options) != 0) {
+ /* default matching */
+ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
+ if (strcmp(c->name, newcon->name) != 0)
+ continue;
+ if (newcon->index >= 0 &&
+ newcon->index != c->index)
+ continue;
+ if (newcon->index < 0)
+ newcon->index = c->index;
- if (_braille_register_console(newcon, c))
- return;
+ if (_braille_register_console(newcon, c))
+ return;
+
+ if (newcon->setup &&
+ newcon->setup(newcon, c->options) != 0)
+ break;
+ }
- if (newcon->setup &&
- newcon->setup(newcon, console_cmdline[i].options) != 0)
- break;
newcon->flags |= CON_ENABLED;
- newcon->index = c->index;
if (i == selected_console) {
newcon->flags |= CON_CONSDEV;
preferred_console = selected_console;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 227fec36b12a..c8e0e050a36a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -456,8 +456,6 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
static int ptrace_detach(struct task_struct *child, unsigned int data)
{
- bool dead = false;
-
if (!valid_signal(data))
return -EIO;
@@ -467,18 +465,19 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
write_lock_irq(&tasklist_lock);
/*
- * This child can be already killed. Make sure de_thread() or
- * our sub-thread doing do_wait() didn't do release_task() yet.
+ * We rely on ptrace_freeze_traced(). It can't be killed and
+ * untraced by another thread, it can't be a zombie.
*/
- if (child->ptrace) {
- child->exit_code = data;
- dead = __ptrace_detach(current, child);
- }
+ WARN_ON(!child->ptrace || child->exit_state);
+ /*
+ * tasklist_lock avoids the race with wait_task_stopped(), see
+ * the comment in ptrace_resume().
+ */
+ child->exit_code = data;
+ __ptrace_detach(current, child);
write_unlock_irq(&tasklist_lock);
proc_ptrace_connector(child, PTRACE_DETACH);
- if (unlikely(dead))
- release_task(child);
return 0;
}
@@ -697,6 +696,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
static int ptrace_resume(struct task_struct *child, long request,
unsigned long data)
{
+ bool need_siglock;
+
if (!valid_signal(data))
return -EIO;
@@ -724,8 +725,26 @@ static int ptrace_resume(struct task_struct *child, long request,
user_disable_single_step(child);
}
+ /*
+ * Change ->exit_code and ->state under siglock to avoid the race
+ * with wait_task_stopped() in between; a non-zero ->exit_code will
+ * wrongly look like another report from tracee.
+ *
+ * Note that we need siglock even if ->exit_code == data and/or this
+ * status was not reported yet, the new status must not be cleared by
+ * wait_task_stopped() after resume.
+ *
+ * If data == 0 we do not care if wait_task_stopped() reports the old
+ * status and clears the code too; this can't race with the tracee, it
+ * takes siglock after resume.
+ */
+ need_siglock = data && !thread_group_empty(current);
+ if (need_siglock)
+ spin_lock_irq(&child->sighand->siglock);
child->exit_code = data;
wake_up_state(child, __TASK_TRACED);
+ if (need_siglock)
+ spin_unlock_irq(&child->sighand->siglock);
return 0;
}
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 30d42aa55d83..8dbe27611ec3 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -853,6 +853,8 @@ rcu_torture_fqs(void *arg)
static int
rcu_torture_writer(void *arg)
{
+ bool can_expedite = !rcu_gp_is_expedited();
+ int expediting = 0;
unsigned long gp_snap;
bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
bool gp_sync1 = gp_sync;
@@ -865,9 +867,15 @@ rcu_torture_writer(void *arg)
int nsynctypes = 0;
VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
+ pr_alert("%s" TORTURE_FLAG
+ " Grace periods expedited from boot/sysfs for %s,\n",
+ torture_type, cur_ops->name);
+ pr_alert("%s" TORTURE_FLAG
+ " Testing of dynamic grace-period expediting diabled.\n",
+ torture_type);
/* Initialize synctype[] array. If none set, take default. */
- if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync)
+ if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync)
synctype[nsynctypes++] = RTWS_COND_GET;
@@ -949,9 +957,26 @@ rcu_torture_writer(void *arg)
}
}
rcutorture_record_progress(++rcu_torture_current_version);
+ /* Cycle through nesting levels of rcu_expedite_gp() calls. */
+ if (can_expedite &&
+ !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
+ WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
+ if (expediting >= 0)
+ rcu_expedite_gp();
+ else
+ rcu_unexpedite_gp();
+ if (++expediting > 3)
+ expediting = -expediting;
+ }
rcu_torture_writer_state = RTWS_STUTTER;
stutter_wait("rcu_torture_writer");
} while (!torture_must_stop());
+ /* Reset expediting back to unexpedited. */
+ if (expediting > 0)
+ expediting = -expediting;
+ while (can_expedite && expediting++ < 0)
+ rcu_unexpedite_gp();
+ WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
rcu_torture_writer_state = RTWS_STOPPING;
torture_kthread_stopping("rcu_torture_writer");
return 0;
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index 445bf8ffe3fb..cad76e76b4e7 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -402,23 +402,6 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
}
EXPORT_SYMBOL_GPL(call_srcu);
-struct rcu_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-/*
- * Awaken the corresponding synchronize_srcu() instance now that a
- * grace period has elapsed.
- */
-static void wakeme_after_rcu(struct rcu_head *head)
-{
- struct rcu_synchronize *rcu;
-
- rcu = container_of(head, struct rcu_synchronize, head);
- complete(&rcu->completion);
-}
-
static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
static void srcu_reschedule(struct srcu_struct *sp);
@@ -507,7 +490,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
*/
void synchronize_srcu(struct srcu_struct *sp)
{
- __synchronize_srcu(sp, rcu_expedited
+ __synchronize_srcu(sp, rcu_gp_is_expedited()
? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
: SYNCHRONIZE_SRCU_TRYCOUNT);
}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index cc9ceca7bde1..069742d61c68 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -103,8 +103,7 @@ EXPORT_SYMBOL(__rcu_is_watching);
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
{
RCU_TRACE(reset_cpu_stall_ticks(rcp));
- if (rcp->rcucblist != NULL &&
- rcp->donetail != rcp->curtail) {
+ if (rcp->donetail != rcp->curtail) {
rcp->donetail = rcp->curtail;
return 1;
}
@@ -169,17 +168,6 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
unsigned long flags;
RCU_TRACE(int cb_count = 0);
- /* If no RCU callbacks ready to invoke, just return. */
- if (&rcp->rcucblist == rcp->donetail) {
- RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
- RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
- !!ACCESS_ONCE(rcp->rcucblist),
- need_resched(),
- is_idle_task(current),
- false));
- return;
- }
-
/* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags);
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 48d640ca1a05..8cf7304b2867 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -91,8 +91,10 @@ static const char *tp_##sname##_varname __used __tracepoint_string = sname##_var
#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
DEFINE_RCU_TPS(sname) \
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
struct rcu_state sname##_state = { \
.level = { &sname##_state.node[0] }, \
+ .rda = &sname##_data, \
.call = cr, \
.fqs_state = RCU_GP_IDLE, \
.gpnum = 0UL - 300UL, \
@@ -101,11 +103,9 @@ struct rcu_state sname##_state = { \
.orphan_nxttail = &sname##_state.orphan_nxtlist, \
.orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
- .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
.name = RCU_STATE_NAME(sname), \
.abbr = sabbr, \
-}; \
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data)
+}
RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
@@ -152,6 +152,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
*/
static int rcu_scheduler_fully_active __read_mostly;
+static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
+static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
@@ -160,6 +162,15 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
module_param(kthread_prio, int, 0644);
+/* Delay in jiffies for grace-period initialization delays, debug only. */
+#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
+static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
+module_param(gp_init_delay, int, 0644);
+#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
+static const int gp_init_delay;
+#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
+#define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */
+
/*
* Track the rcutorture test sequence number and the update version
* number within a given test. The rcutorture_testseq is incremented
@@ -173,6 +184,17 @@ unsigned long rcutorture_testseq;
unsigned long rcutorture_vernum;
/*
+ * Compute the mask of online CPUs for the specified rcu_node structure.
+ * This will not be stable unless the rcu_node structure's ->lock is
+ * held, but the bit corresponding to the current CPU will be stable
+ * in most contexts.
+ */
+unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
+{
+ return ACCESS_ONCE(rnp->qsmaskinitnext);
+}
+
+/*
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
* permit this function to be invoked without holding the root rcu_node
* structure's ->lock, but of course results can be subject to change.
@@ -292,10 +314,10 @@ void rcu_note_context_switch(void)
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
/*
- * Register a quiesecent state for all RCU flavors. If there is an
+ * Register a quiescent state for all RCU flavors. If there is an
* emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
* dyntick-idle quiescent state visible to other CPUs (but only for those
- * RCU flavors in desparate need of a quiescent state, which will normally
+ * RCU flavors in desperate need of a quiescent state, which will normally
* be none of them). Either way, do a lightweight quiescent state for
* all RCU flavors.
*/
@@ -410,6 +432,15 @@ void rcu_bh_force_quiescent_state(void)
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
/*
+ * Force a quiescent state for RCU-sched.
+ */
+void rcu_sched_force_quiescent_state(void)
+{
+ force_quiescent_state(&rcu_sched_state);
+}
+EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
+
+/*
* Show the state of the grace-period kthreads.
*/
void show_rcu_gp_kthreads(void)
@@ -483,15 +514,6 @@ void rcutorture_record_progress(unsigned long vernum)
EXPORT_SYMBOL_GPL(rcutorture_record_progress);
/*
- * Force a quiescent state for RCU-sched.
- */
-void rcu_sched_force_quiescent_state(void)
-{
- force_quiescent_state(&rcu_sched_state);
-}
-EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
-
-/*
* Does the CPU have callbacks ready to be invoked?
*/
static int
@@ -954,7 +976,7 @@ bool rcu_lockdep_current_cpu_online(void)
preempt_disable();
rdp = this_cpu_ptr(&rcu_sched_data);
rnp = rdp->mynode;
- ret = (rdp->grpmask & rnp->qsmaskinit) ||
+ ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
!rcu_scheduler_fully_active;
preempt_enable();
return ret;
@@ -1196,9 +1218,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
} else {
j = jiffies;
gpa = ACCESS_ONCE(rsp->gp_activity);
- pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n",
+ pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
rsp->name, j - gpa, j, gpa,
- jiffies_till_next_fqs);
+ jiffies_till_next_fqs,
+ rcu_get_root(rsp)->qsmask);
/* In this case, the current CPU might be at fault. */
sched_show_task(current);
}
@@ -1328,20 +1351,30 @@ void rcu_cpu_stall_reset(void)
}
/*
- * Initialize the specified rcu_data structure's callback list to empty.
+ * Initialize the specified rcu_data structure's default callback list
+ * to empty. The default callback list is the one that is not used by
+ * no-callbacks CPUs.
*/
-static void init_callback_list(struct rcu_data *rdp)
+static void init_default_callback_list(struct rcu_data *rdp)
{
int i;
- if (init_nocb_callback_list(rdp))
- return;
rdp->nxtlist = NULL;
for (i = 0; i < RCU_NEXT_SIZE; i++)
rdp->nxttail[i] = &rdp->nxtlist;
}
/*
+ * Initialize the specified rcu_data structure's callback list to empty.
+ */
+static void init_callback_list(struct rcu_data *rdp)
+{
+ if (init_nocb_callback_list(rdp))
+ return;
+ init_default_callback_list(rdp);
+}
+
+/*
* Determine the value that ->completed will have at the end of the
* next subsequent grace period. This is used to tag callbacks so that
* a CPU can invoke callbacks in a timely fashion even if that CPU has
@@ -1703,11 +1736,11 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
*/
static int rcu_gp_init(struct rcu_state *rsp)
{
+ unsigned long oldmask;
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
ACCESS_ONCE(rsp->gp_activity) = jiffies;
- rcu_bind_gp_kthread();
raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
if (!ACCESS_ONCE(rsp->gp_flags)) {
@@ -1733,9 +1766,54 @@ static int rcu_gp_init(struct rcu_state *rsp)
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
raw_spin_unlock_irq(&rnp->lock);
- /* Exclude any concurrent CPU-hotplug operations. */
- mutex_lock(&rsp->onoff_mutex);
- smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
+ /*
+ * Apply per-leaf buffered online and offline operations to the
+ * rcu_node tree. Note that this new grace period need not wait
+ * for subsequent online CPUs, and that quiescent-state forcing
+ * will handle subsequent offline CPUs.
+ */
+ rcu_for_each_leaf_node(rsp, rnp) {
+ raw_spin_lock_irq(&rnp->lock);
+ smp_mb__after_unlock_lock();
+ if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
+ !rnp->wait_blkd_tasks) {
+ /* Nothing to do on this leaf rcu_node structure. */
+ raw_spin_unlock_irq(&rnp->lock);
+ continue;
+ }
+
+ /* Record old state, apply changes to ->qsmaskinit field. */
+ oldmask = rnp->qsmaskinit;
+ rnp->qsmaskinit = rnp->qsmaskinitnext;
+
+ /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
+ if (!oldmask != !rnp->qsmaskinit) {
+ if (!oldmask) /* First online CPU for this rcu_node. */
+ rcu_init_new_rnp(rnp);
+ else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
+ rnp->wait_blkd_tasks = true;
+ else /* Last offline CPU and can propagate. */
+ rcu_cleanup_dead_rnp(rnp);
+ }
+
+ /*
+ * If all waited-on tasks from prior grace period are
+ * done, and if all this rcu_node structure's CPUs are
+ * still offline, propagate up the rcu_node tree and
+ * clear ->wait_blkd_tasks. Otherwise, if one of this
+ * rcu_node structure's CPUs has since come back online,
+ * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
+ * checks for this, so just call it unconditionally).
+ */
+ if (rnp->wait_blkd_tasks &&
+ (!rcu_preempt_has_tasks(rnp) ||
+ rnp->qsmaskinit)) {
+ rnp->wait_blkd_tasks = false;
+ rcu_cleanup_dead_rnp(rnp);
+ }
+
+ raw_spin_unlock_irq(&rnp->lock);
+ }
/*
* Set the quiescent-state-needed bits in all the rcu_node
@@ -1757,8 +1835,8 @@ static int rcu_gp_init(struct rcu_state *rsp)
rcu_preempt_check_blocked_tasks(rnp);
rnp->qsmask = rnp->qsmaskinit;
ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
- WARN_ON_ONCE(rnp->completed != rsp->completed);
- ACCESS_ONCE(rnp->completed) = rsp->completed;
+ if (WARN_ON_ONCE(rnp->completed != rsp->completed))
+ ACCESS_ONCE(rnp->completed) = rsp->completed;
if (rnp == rdp->mynode)
(void)__note_gp_changes(rsp, rnp, rdp);
rcu_preempt_boost_start_gp(rnp);
@@ -1768,9 +1846,11 @@ static int rcu_gp_init(struct rcu_state *rsp)
raw_spin_unlock_irq(&rnp->lock);
cond_resched_rcu_qs();
ACCESS_ONCE(rsp->gp_activity) = jiffies;
+ if (gp_init_delay > 0 &&
+ !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD)))
+ schedule_timeout_uninterruptible(gp_init_delay);
}
- mutex_unlock(&rsp->onoff_mutex);
return 1;
}
@@ -1798,7 +1878,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
fqs_state = RCU_FORCE_QS;
} else {
/* Handle dyntick-idle and offline CPUs. */
- isidle = false;
+ isidle = true;
force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
}
/* Clear flag to prevent immediate re-entry. */
@@ -1852,6 +1932,8 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rcu_for_each_node_breadth_first(rsp, rnp) {
raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
+ WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
+ WARN_ON_ONCE(rnp->qsmask);
ACCESS_ONCE(rnp->completed) = rsp->gpnum;
rdp = this_cpu_ptr(rsp->rda);
if (rnp == rdp->mynode)
@@ -1895,6 +1977,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
struct rcu_state *rsp = arg;
struct rcu_node *rnp = rcu_get_root(rsp);
+ rcu_bind_gp_kthread();
for (;;) {
/* Handle grace-period start. */
@@ -2062,25 +2145,32 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
* Similar to rcu_report_qs_rdp(), for which it is a helper function.
* Allows quiescent states for a group of CPUs to be reported at one go
* to the specified rcu_node structure, though all the CPUs in the group
- * must be represented by the same rcu_node structure (which need not be
- * a leaf rcu_node structure, though it often will be). That structure's
- * lock must be held upon entry, and it is released before return.
+ * must be represented by the same rcu_node structure (which need not be a
+ * leaf rcu_node structure, though it often will be). The gps parameter
+ * is the grace-period snapshot, which means that the quiescent states
+ * are valid only if rnp->gpnum is equal to gps. That structure's lock
+ * must be held upon entry, and it is released before return.
*/
static void
rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
- struct rcu_node *rnp, unsigned long flags)
+ struct rcu_node *rnp, unsigned long gps, unsigned long flags)
__releases(rnp->lock)
{
+ unsigned long oldmask = 0;
struct rcu_node *rnp_c;
/* Walk up the rcu_node hierarchy. */
for (;;) {
- if (!(rnp->qsmask & mask)) {
+ if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
- /* Our bit has already been cleared, so done. */
+ /*
+ * Our bit has already been cleared, or the
+ * relevant grace period is already over, so done.
+ */
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
+ WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
rnp->qsmask &= ~mask;
trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
mask, rnp->qsmask, rnp->level,
@@ -2104,7 +2194,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
rnp = rnp->parent;
raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
- WARN_ON_ONCE(rnp_c->qsmask);
+ oldmask = rnp_c->qsmask;
}
/*
@@ -2116,6 +2206,46 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
}
/*
+ * Record a quiescent state for all tasks that were previously queued
+ * on the specified rcu_node structure and that were blocking the current
+ * RCU grace period. The caller must hold the specified rnp->lock with
+ * irqs disabled, and this lock is released upon return, but irqs remain
+ * disabled.
+ */
+static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
+ struct rcu_node *rnp, unsigned long flags)
+ __releases(rnp->lock)
+{
+ unsigned long gps;
+ unsigned long mask;
+ struct rcu_node *rnp_p;
+
+ if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
+ rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ return; /* Still need more quiescent states! */
+ }
+
+ rnp_p = rnp->parent;
+ if (rnp_p == NULL) {
+ /*
+ * Only one rcu_node structure in the tree, so don't
+ * try to report up to its nonexistent parent!
+ */
+ rcu_report_qs_rsp(rsp, flags);
+ return;
+ }
+
+ /* Report up the rest of the hierarchy, tracking current ->gpnum. */
+ gps = rnp->gpnum;
+ mask = rnp->grpmask;
+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+ raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
+ smp_mb__after_unlock_lock();
+ rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
+}
+
+/*
* Record a quiescent state for the specified CPU to that CPU's rcu_data
* structure. This must be either called from the specified CPU, or
* called when the specified CPU is known to be offline (and when it is
@@ -2163,7 +2293,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
*/
needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
- rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+ /* ^^^ Released rnp->lock */
if (needwake)
rcu_gp_kthread_wake(rsp);
}
@@ -2256,8 +2387,12 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
}
- /* Finally, initialize the rcu_data structure's list to empty. */
+ /*
+ * Finally, initialize the rcu_data structure's list to empty and
+ * disallow further callbacks on this CPU.
+ */
init_callback_list(rdp);
+ rdp->nxttail[RCU_NEXT_TAIL] = NULL;
}
/*
@@ -2355,6 +2490,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock(); /* GP memory ordering. */
rnp->qsmaskinit &= ~mask;
+ rnp->qsmask &= ~mask;
if (rnp->qsmaskinit) {
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
return;
@@ -2364,6 +2500,26 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
}
/*
+ * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
+ * function. We now remove it from the rcu_node tree's ->qsmaskinit
+ * bit masks.
+ */
+static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+{
+ unsigned long flags;
+ unsigned long mask;
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
+
+ /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
+ mask = rdp->grpmask;
+ raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
+ rnp->qsmaskinitnext &= ~mask;
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+}
+
+/*
* The CPU has been completely removed, and some other CPU is reporting
* this fact from process context. Do the remainder of the cleanup,
* including orphaning the outgoing CPU's RCU callbacks, and also
@@ -2379,29 +2535,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
/* Adjust any no-longer-needed kthreads. */
rcu_boost_kthread_setaffinity(rnp, -1);
- /* Exclude any attempts to start a new grace period. */
- mutex_lock(&rsp->onoff_mutex);
- raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
-
/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
+ raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
rcu_adopt_orphan_cbs(rsp, flags);
raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
- /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
- raw_spin_lock_irqsave(&rnp->lock, flags);
- smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
- rnp->qsmaskinit &= ~rdp->grpmask;
- if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
- rcu_cleanup_dead_rnp(rnp);
- rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
cpu, rdp->qlen, rdp->nxtlist);
- init_callback_list(rdp);
- /* Disallow further callbacks on this CPU. */
- rdp->nxttail[RCU_NEXT_TAIL] = NULL;
- mutex_unlock(&rsp->onoff_mutex);
}
#else /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -2414,6 +2556,10 @@ static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
{
}
+static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+{
+}
+
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
{
}
@@ -2589,26 +2735,47 @@ static void force_qs_rnp(struct rcu_state *rsp,
return;
}
if (rnp->qsmask == 0) {
- rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
- continue;
+ if (rcu_state_p == &rcu_sched_state ||
+ rsp != rcu_state_p ||
+ rcu_preempt_blocked_readers_cgp(rnp)) {
+ /*
+ * No point in scanning bits because they
+ * are all zero. But we might need to
+ * priority-boost blocked readers.
+ */
+ rcu_initiate_boost(rnp, flags);
+ /* rcu_initiate_boost() releases rnp->lock */
+ continue;
+ }
+ if (rnp->parent &&
+ (rnp->parent->qsmask & rnp->grpmask)) {
+ /*
+ * Race between grace-period
+ * initialization and task exiting RCU
+ * read-side critical section: Report.
+ */
+ rcu_report_unblock_qs_rnp(rsp, rnp, flags);
+ /* rcu_report_unblock_qs_rnp() rlses ->lock */
+ continue;
+ }
}
cpu = rnp->grplo;
bit = 1;
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
if ((rnp->qsmask & bit) != 0) {
- if ((rnp->qsmaskinit & bit) != 0)
- *isidle = false;
+ if ((rnp->qsmaskinit & bit) == 0)
+ *isidle = false; /* Pending hotplug. */
if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
mask |= bit;
}
}
if (mask != 0) {
-
- /* rcu_report_qs_rnp() releases rnp->lock. */
- rcu_report_qs_rnp(mask, rsp, rnp, flags);
- continue;
+ /* Idle/offline CPUs, report (releases rnp->lock. */
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+ } else {
+ /* Nothing to do here, so just drop the lock. */
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
}
@@ -2741,7 +2908,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
* If called from an extended quiescent state, invoke the RCU
* core in order to force a re-evaluation of RCU's idleness.
*/
- if (!rcu_is_watching() && cpu_online(smp_processor_id()))
+ if (!rcu_is_watching())
invoke_rcu_core();
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
@@ -2827,11 +2994,22 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
if (cpu != -1)
rdp = per_cpu_ptr(rsp->rda, cpu);
- offline = !__call_rcu_nocb(rdp, head, lazy, flags);
- WARN_ON_ONCE(offline);
- /* _call_rcu() is illegal on offline CPU; leak the callback. */
- local_irq_restore(flags);
- return;
+ if (likely(rdp->mynode)) {
+ /* Post-boot, so this should be for a no-CBs CPU. */
+ offline = !__call_rcu_nocb(rdp, head, lazy, flags);
+ WARN_ON_ONCE(offline);
+ /* Offline CPU, _call_rcu() illegal, leak callback. */
+ local_irq_restore(flags);
+ return;
+ }
+ /*
+ * Very early boot, before rcu_init(). Initialize if needed
+ * and then drop through to queue the callback.
+ */
+ BUG_ON(cpu != -1);
+ WARN_ON_ONCE(!rcu_is_watching());
+ if (!likely(rdp->nxtlist))
+ init_default_callback_list(rdp);
}
ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
if (lazy)
@@ -2954,7 +3132,7 @@ void synchronize_sched(void)
"Illegal synchronize_sched() in RCU-sched read-side critical section");
if (rcu_blocking_is_gp())
return;
- if (rcu_expedited)
+ if (rcu_gp_is_expedited())
synchronize_sched_expedited();
else
wait_rcu_gp(call_rcu_sched);
@@ -2981,7 +3159,7 @@ void synchronize_rcu_bh(void)
"Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
if (rcu_blocking_is_gp())
return;
- if (rcu_expedited)
+ if (rcu_gp_is_expedited())
synchronize_rcu_bh_expedited();
else
wait_rcu_gp(call_rcu_bh);
@@ -3518,6 +3696,28 @@ void rcu_barrier_sched(void)
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
/*
+ * Propagate ->qsinitmask bits up the rcu_node tree to account for the
+ * first CPU in a given leaf rcu_node structure coming online. The caller
+ * must hold the corresponding leaf rcu_node ->lock with interrrupts
+ * disabled.
+ */
+static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
+{
+ long mask;
+ struct rcu_node *rnp = rnp_leaf;
+
+ for (;;) {
+ mask = rnp->grpmask;
+ rnp = rnp->parent;
+ if (rnp == NULL)
+ return;
+ raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */
+ rnp->qsmaskinit |= mask;
+ raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
+ }
+}
+
+/*
* Do boot-time initialization of a CPU's per-CPU RCU data.
*/
static void __init
@@ -3553,49 +3753,37 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rcu_get_root(rsp);
- /* Exclude new grace periods. */
- mutex_lock(&rsp->onoff_mutex);
-
/* Set up local state, ensuring consistent view of global state. */
raw_spin_lock_irqsave(&rnp->lock, flags);
rdp->beenonline = 1; /* We have now been online. */
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->blimit = blimit;
- init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
+ if (!rdp->nxtlist)
+ init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
rcu_sysidle_init_percpu_data(rdp->dynticks);
atomic_set(&rdp->dynticks->dynticks,
(atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- /* Add CPU to rcu_node bitmasks. */
+ /*
+ * Add CPU to leaf rcu_node pending-online bitmask. Any needed
+ * propagation up the rcu_node tree will happen at the beginning
+ * of the next grace period.
+ */
rnp = rdp->mynode;
mask = rdp->grpmask;
- do {
- /* Exclude any attempts to start a new GP on small systems. */
- raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rnp->qsmaskinit |= mask;
- mask = rnp->grpmask;
- if (rnp == rdp->mynode) {
- /*
- * If there is a grace period in progress, we will
- * set up to wait for it next time we run the
- * RCU core code.
- */
- rdp->gpnum = rnp->completed;
- rdp->completed = rnp->completed;
- rdp->passed_quiesce = 0;
- rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
- rdp->qs_pending = 0;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
- }
- raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
- rnp = rnp->parent;
- } while (rnp != NULL && !(rnp->qsmaskinit & mask));
- local_irq_restore(flags);
-
- mutex_unlock(&rsp->onoff_mutex);
+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+ smp_mb__after_unlock_lock();
+ rnp->qsmaskinitnext |= mask;
+ rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
+ rdp->completed = rnp->completed;
+ rdp->passed_quiesce = false;
+ rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
+ rdp->qs_pending = false;
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
static void rcu_prepare_cpu(int cpu)
@@ -3609,15 +3797,14 @@ static void rcu_prepare_cpu(int cpu)
/*
* Handle CPU online/offline notification events.
*/
-static int rcu_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+int rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
struct rcu_node *rnp = rdp->mynode;
struct rcu_state *rsp;
- trace_rcu_utilization(TPS("Start CPU hotplug"));
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
@@ -3637,6 +3824,11 @@ static int rcu_cpu_notify(struct notifier_block *self,
for_each_rcu_flavor(rsp)
rcu_cleanup_dying_cpu(rsp);
break;
+ case CPU_DYING_IDLE:
+ for_each_rcu_flavor(rsp) {
+ rcu_cleanup_dying_idle_cpu(cpu, rsp);
+ }
+ break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
case CPU_UP_CANCELED:
@@ -3649,7 +3841,6 @@ static int rcu_cpu_notify(struct notifier_block *self,
default:
break;
}
- trace_rcu_utilization(TPS("End CPU hotplug"));
return NOTIFY_OK;
}
@@ -3660,11 +3851,12 @@ static int rcu_pm_notify(struct notifier_block *self,
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
- rcu_expedited = 1;
+ rcu_expedite_gp();
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
- rcu_expedited = 0;
+ if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
+ rcu_unexpedite_gp();
break;
default:
break;
@@ -3734,30 +3926,26 @@ void rcu_scheduler_starting(void)
* Compute the per-level fanout, either using the exact fanout specified
* or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
*/
-#ifdef CONFIG_RCU_FANOUT_EXACT
-static void __init rcu_init_levelspread(struct rcu_state *rsp)
-{
- int i;
-
- rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
- for (i = rcu_num_lvls - 2; i >= 0; i--)
- rsp->levelspread[i] = CONFIG_RCU_FANOUT;
-}
-#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
static void __init rcu_init_levelspread(struct rcu_state *rsp)
{
- int ccur;
- int cprv;
int i;
- cprv = nr_cpu_ids;
- for (i = rcu_num_lvls - 1; i >= 0; i--) {
- ccur = rsp->levelcnt[i];
- rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
- cprv = ccur;
+ if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) {
+ rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
+ for (i = rcu_num_lvls - 2; i >= 0; i--)
+ rsp->levelspread[i] = CONFIG_RCU_FANOUT;
+ } else {
+ int ccur;
+ int cprv;
+
+ cprv = nr_cpu_ids;
+ for (i = rcu_num_lvls - 1; i >= 0; i--) {
+ ccur = rsp->levelcnt[i];
+ rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
+ cprv = ccur;
+ }
}
}
-#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
/*
* Helper function for rcu_init() that initializes one rcu_state structure.
@@ -3833,7 +4021,6 @@ static void __init rcu_init_one(struct rcu_state *rsp,
}
}
- rsp->rda = rda;
init_waitqueue_head(&rsp->gp_wq);
rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
@@ -3926,6 +4113,8 @@ void __init rcu_init(void)
{
int cpu;
+ rcu_early_boot_tests();
+
rcu_bootup_announce();
rcu_init_geometry();
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
@@ -3942,8 +4131,6 @@ void __init rcu_init(void)
pm_notifier(rcu_pm_notify, 0);
for_each_online_cpu(cpu)
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
-
- rcu_early_boot_tests();
}
#include "tree_plugin.h"
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 119de399eb2f..a69d3dab2ec4 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -141,12 +141,20 @@ struct rcu_node {
/* complete (only for PREEMPT_RCU). */
unsigned long qsmaskinit;
/* Per-GP initial value for qsmask & expmask. */
+ /* Initialized from ->qsmaskinitnext at the */
+ /* beginning of each grace period. */
+ unsigned long qsmaskinitnext;
+ /* Online CPUs for next grace period. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */
/* Only one bit will be set in this mask. */
int grplo; /* lowest-numbered CPU or group here. */
int grphi; /* highest-numbered CPU or group here. */
u8 grpnum; /* CPU/group number for next level up. */
u8 level; /* root is at level 0. */
+ bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
+ /* exit RCU read-side critical sections */
+ /* before propagating offline up the */
+ /* rcu_node tree? */
struct rcu_node *parent;
struct list_head blkd_tasks;
/* Tasks blocked in RCU read-side critical */
@@ -448,8 +456,6 @@ struct rcu_state {
long qlen; /* Total number of callbacks. */
/* End of fields guarded by orphan_lock. */
- struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */
-
struct mutex barrier_mutex; /* Guards barrier fields. */
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
struct completion barrier_completion; /* Wake at barrier end. */
@@ -559,6 +565,7 @@ static void rcu_prepare_kthreads(int cpu);
static void rcu_cleanup_after_idle(void);
static void rcu_prepare_for_idle(void);
static void rcu_idle_count_callbacks_posted(void);
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
static void print_cpu_stall_info_begin(void);
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
static void print_cpu_stall_info_end(void);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0a571e9a0f1d..8c0ec0f5a027 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -58,38 +58,33 @@ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
*/
static void __init rcu_bootup_announce_oddness(void)
{
-#ifdef CONFIG_RCU_TRACE
- pr_info("\tRCU debugfs-based tracing is enabled.\n");
-#endif
-#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
- pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
- CONFIG_RCU_FANOUT);
-#endif
-#ifdef CONFIG_RCU_FANOUT_EXACT
- pr_info("\tHierarchical RCU autobalancing is disabled.\n");
-#endif
-#ifdef CONFIG_RCU_FAST_NO_HZ
- pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
-#endif
-#ifdef CONFIG_PROVE_RCU
- pr_info("\tRCU lockdep checking is enabled.\n");
-#endif
-#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
- pr_info("\tRCU torture testing starts during boot.\n");
-#endif
-#if defined(CONFIG_RCU_CPU_STALL_INFO)
- pr_info("\tAdditional per-CPU info printed with stalls.\n");
-#endif
-#if NUM_RCU_LVL_4 != 0
- pr_info("\tFour-level hierarchy is enabled.\n");
-#endif
+ if (IS_ENABLED(CONFIG_RCU_TRACE))
+ pr_info("\tRCU debugfs-based tracing is enabled.\n");
+ if ((IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) ||
+ (!IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32))
+ pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
+ CONFIG_RCU_FANOUT);
+ if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT))
+ pr_info("\tHierarchical RCU autobalancing is disabled.\n");
+ if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
+ pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
+ if (IS_ENABLED(CONFIG_PROVE_RCU))
+ pr_info("\tRCU lockdep checking is enabled.\n");
+ if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_RUNNABLE))
+ pr_info("\tRCU torture testing starts during boot.\n");
+ if (IS_ENABLED(CONFIG_RCU_CPU_STALL_INFO))
+ pr_info("\tAdditional per-CPU info printed with stalls.\n");
+ if (NUM_RCU_LVL_4 != 0)
+ pr_info("\tFour-level hierarchy is enabled.\n");
+ if (CONFIG_RCU_FANOUT_LEAF != 16)
+ pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
+ CONFIG_RCU_FANOUT_LEAF);
if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
if (nr_cpu_ids != NR_CPUS)
pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
-#ifdef CONFIG_RCU_BOOST
- pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
-#endif
+ if (IS_ENABLED(CONFIG_RCU_BOOST))
+ pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
}
#ifdef CONFIG_PREEMPT_RCU
@@ -180,7 +175,7 @@ static void rcu_preempt_note_context_switch(void)
* But first, note that the current CPU must still be
* on line!
*/
- WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
+ WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
@@ -233,43 +228,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
}
/*
- * Record a quiescent state for all tasks that were previously queued
- * on the specified rcu_node structure and that were blocking the current
- * RCU grace period. The caller must hold the specified rnp->lock with
- * irqs disabled, and this lock is released upon return, but irqs remain
- * disabled.
- */
-static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
- __releases(rnp->lock)
-{
- unsigned long mask;
- struct rcu_node *rnp_p;
-
- if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return; /* Still need more quiescent states! */
- }
-
- rnp_p = rnp->parent;
- if (rnp_p == NULL) {
- /*
- * Either there is only one rcu_node in the tree,
- * or tasks were kicked up to root rcu_node due to
- * CPUs going offline.
- */
- rcu_report_qs_rsp(&rcu_preempt_state, flags);
- return;
- }
-
- /* Report up the rest of the hierarchy. */
- mask = rnp->grpmask;
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
- smp_mb__after_unlock_lock();
- rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
-}
-
-/*
* Advance a ->blkd_tasks-list pointer to the next entry, instead
* returning NULL if at the end of the list.
*/
@@ -300,7 +258,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
*/
void rcu_read_unlock_special(struct task_struct *t)
{
- bool empty;
bool empty_exp;
bool empty_norm;
bool empty_exp_now;
@@ -334,7 +291,13 @@ void rcu_read_unlock_special(struct task_struct *t)
}
/* Hardware IRQ handlers cannot block, complain if they get here. */
- if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) {
+ if (in_irq() || in_serving_softirq()) {
+ lockdep_rcu_suspicious(__FILE__, __LINE__,
+ "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
+ pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n",
+ t->rcu_read_unlock_special.s,
+ t->rcu_read_unlock_special.b.blocked,
+ t->rcu_read_unlock_special.b.need_qs);
local_irq_restore(flags);
return;
}
@@ -356,7 +319,6 @@ void rcu_read_unlock_special(struct task_struct *t)
break;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
- empty = !rcu_preempt_has_tasks(rnp);
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
empty_exp = !rcu_preempted_readers_exp(rnp);
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
@@ -377,14 +339,6 @@ void rcu_read_unlock_special(struct task_struct *t)
#endif /* #ifdef CONFIG_RCU_BOOST */
/*
- * If this was the last task on the list, go see if we
- * need to propagate ->qsmaskinit bit clearing up the
- * rcu_node tree.
- */
- if (!empty && !rcu_preempt_has_tasks(rnp))
- rcu_cleanup_dead_rnp(rnp);
-
- /*
* If this was the last task on the current list, and if
* we aren't waiting on any CPUs, report the quiescent state.
* Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
@@ -399,7 +353,8 @@ void rcu_read_unlock_special(struct task_struct *t)
rnp->grplo,
rnp->grphi,
!!rnp->gp_tasks);
- rcu_report_unblock_qs_rnp(rnp, flags);
+ rcu_report_unblock_qs_rnp(&rcu_preempt_state,
+ rnp, flags);
} else {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
@@ -520,10 +475,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
WARN_ON_ONCE(rnp->qsmask);
}
-#ifdef CONFIG_HOTPLUG_CPU
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
/*
* Check for a quiescent state from the current CPU. When a task blocks,
* the task is recorded in the corresponding CPU's rcu_node structure,
@@ -585,7 +536,7 @@ void synchronize_rcu(void)
"Illegal synchronize_rcu() in RCU read-side critical section");
if (!rcu_scheduler_active)
return;
- if (rcu_expedited)
+ if (rcu_gp_is_expedited())
synchronize_rcu_expedited();
else
wait_rcu_gp(call_rcu);
@@ -630,9 +581,6 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
* recursively up the tree. (Calm down, calm down, we do the recursion
* iteratively!)
*
- * Most callers will set the "wake" flag, but the task initiating the
- * expedited grace period need not wake itself.
- *
* Caller must hold sync_rcu_preempt_exp_mutex.
*/
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
@@ -667,29 +615,85 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
/*
* Snapshot the tasks blocking the newly started preemptible-RCU expedited
- * grace period for the specified rcu_node structure. If there are no such
- * tasks, report it up the rcu_node hierarchy.
+ * grace period for the specified rcu_node structure, phase 1. If there
+ * are such tasks, set the ->expmask bits up the rcu_node tree and also
+ * set the ->expmask bits on the leaf rcu_node structures to tell phase 2
+ * that work is needed here.
*
- * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
- * CPU hotplug operations.
+ * Caller must hold sync_rcu_preempt_exp_mutex.
*/
static void
-sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
+sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp)
{
unsigned long flags;
- int must_wait = 0;
+ unsigned long mask;
+ struct rcu_node *rnp_up;
raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
+ WARN_ON_ONCE(rnp->expmask);
+ WARN_ON_ONCE(rnp->exp_tasks);
if (!rcu_preempt_has_tasks(rnp)) {
+ /* No blocked tasks, nothing to do. */
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- } else {
+ return;
+ }
+ /* Call for Phase 2 and propagate ->expmask bits up the tree. */
+ rnp->expmask = 1;
+ rnp_up = rnp;
+ while (rnp_up->parent) {
+ mask = rnp_up->grpmask;
+ rnp_up = rnp_up->parent;
+ if (rnp_up->expmask & mask)
+ break;
+ raw_spin_lock(&rnp_up->lock); /* irqs already off */
+ smp_mb__after_unlock_lock();
+ rnp_up->expmask |= mask;
+ raw_spin_unlock(&rnp_up->lock); /* irqs still off */
+ }
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+}
+
+/*
+ * Snapshot the tasks blocking the newly started preemptible-RCU expedited
+ * grace period for the specified rcu_node structure, phase 2. If the
+ * leaf rcu_node structure has its ->expmask field set, check for tasks.
+ * If there are some, clear ->expmask and set ->exp_tasks accordingly,
+ * then initiate RCU priority boosting. Otherwise, clear ->expmask and
+ * invoke rcu_report_exp_rnp() to clear out the upper-level ->expmask bits,
+ * enabling rcu_read_unlock_special() to do the bit-clearing.
+ *
+ * Caller must hold sync_rcu_preempt_exp_mutex.
+ */
+static void
+sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb__after_unlock_lock();
+ if (!rnp->expmask) {
+ /* Phase 1 didn't do anything, so Phase 2 doesn't either. */
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+
+ /* Phase 1 is over. */
+ rnp->expmask = 0;
+
+ /*
+ * If there are still blocked tasks, set up ->exp_tasks so that
+ * rcu_read_unlock_special() will wake us and then boost them.
+ */
+ if (rcu_preempt_has_tasks(rnp)) {
rnp->exp_tasks = rnp->blkd_tasks.next;
rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
- must_wait = 1;
+ return;
}
- if (!must_wait)
- rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
+
+ /* No longer any blocked tasks, so undo bit setting. */
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ rcu_report_exp_rnp(rsp, rnp, false);
}
/**
@@ -706,7 +710,6 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
*/
void synchronize_rcu_expedited(void)
{
- unsigned long flags;
struct rcu_node *rnp;
struct rcu_state *rsp = &rcu_preempt_state;
unsigned long snap;
@@ -757,19 +760,16 @@ void synchronize_rcu_expedited(void)
/* force all RCU readers onto ->blkd_tasks lists. */
synchronize_sched_expedited();
- /* Initialize ->expmask for all non-leaf rcu_node structures. */
- rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
- raw_spin_lock_irqsave(&rnp->lock, flags);
- smp_mb__after_unlock_lock();
- rnp->expmask = rnp->qsmaskinit;
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
-
- /* Snapshot current state of ->blkd_tasks lists. */
+ /*
+ * Snapshot current state of ->blkd_tasks lists into ->expmask.
+ * Phase 1 sets bits and phase 2 permits rcu_read_unlock_special()
+ * to start clearing them. Doing this in one phase leads to
+ * strange races between setting and clearing bits, so just say "no"!
+ */
+ rcu_for_each_leaf_node(rsp, rnp)
+ sync_rcu_preempt_exp_init1(rsp, rnp);
rcu_for_each_leaf_node(rsp, rnp)
- sync_rcu_preempt_exp_init(rsp, rnp);
- if (NUM_RCU_NODES > 1)
- sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
+ sync_rcu_preempt_exp_init2(rsp, rnp);
put_online_cpus();
@@ -859,8 +859,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-
/*
* Because there is no preemptible RCU, there can be no readers blocked.
*/
@@ -869,8 +867,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
return false;
}
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
/*
* Because preemptible RCU does not exist, we never have to check for
* tasks blocked within RCU read-side critical sections.
@@ -1170,7 +1166,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
* Returns zero if all is well, a negated errno otherwise.
*/
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
- struct rcu_node *rnp)
+ struct rcu_node *rnp)
{
int rnp_index = rnp - &rsp->node[0];
unsigned long flags;
@@ -1180,7 +1176,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
if (&rcu_preempt_state != rsp)
return 0;
- if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
+ if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
return 0;
rsp->boost = 1;
@@ -1273,7 +1269,7 @@ static void rcu_cpu_kthread(unsigned int cpu)
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
{
struct task_struct *t = rnp->boost_kthread_task;
- unsigned long mask = rnp->qsmaskinit;
+ unsigned long mask = rcu_rnp_online_cpus(rnp);
cpumask_var_t cm;
int cpu;
@@ -1945,7 +1941,8 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
rhp = ACCESS_ONCE(rdp->nocb_follower_head);
/* Having no rcuo kthread but CBs after scheduler starts is bad! */
- if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) {
+ if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp &&
+ rcu_scheduler_fully_active) {
/* RCU callback enqueued before CPU first came online??? */
pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
cpu, rhp->func);
@@ -2392,18 +2389,8 @@ void __init rcu_init_nohz(void)
pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
for_each_rcu_flavor(rsp) {
- for_each_cpu(cpu, rcu_nocb_mask) {
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-
- /*
- * If there are early callbacks, they will need
- * to be moved to the nocb lists.
- */
- WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] !=
- &rdp->nxtlist &&
- rdp->nxttail[RCU_NEXT_TAIL] != NULL);
- init_nocb_callback_list(rdp);
- }
+ for_each_cpu(cpu, rcu_nocb_mask)
+ init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
rcu_organize_nocb_kthreads(rsp);
}
}
@@ -2540,6 +2527,16 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
if (!rcu_is_nocb_cpu(rdp->cpu))
return false;
+ /* If there are early-boot callbacks, move them to nocb lists. */
+ if (rdp->nxtlist) {
+ rdp->nocb_head = rdp->nxtlist;
+ rdp->nocb_tail = rdp->nxttail[RCU_NEXT_TAIL];
+ atomic_long_set(&rdp->nocb_q_count, rdp->qlen);
+ atomic_long_set(&rdp->nocb_q_count_lazy, rdp->qlen_lazy);
+ rdp->nxtlist = NULL;
+ rdp->qlen = 0;
+ rdp->qlen_lazy = 0;
+ }
rdp->nxttail[RCU_NEXT_TAIL] = NULL;
return true;
}
@@ -2763,7 +2760,8 @@ static void rcu_sysidle_exit(int irq)
/*
* Check to see if the current CPU is idle. Note that usermode execution
- * does not count as idle. The caller must have disabled interrupts.
+ * does not count as idle. The caller must have disabled interrupts,
+ * and must be running on tick_do_timer_cpu.
*/
static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
unsigned long *maxj)
@@ -2784,8 +2782,8 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
if (!*isidle || rdp->rsp != rcu_state_p ||
cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
return;
- if (rcu_gp_in_progress(rdp->rsp))
- WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
+ /* Verify affinity of current kthread. */
+ WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
/* Pick up current idle and NMI-nesting counter and check. */
cur = atomic_read(&rdtp->dynticks_idle);
@@ -3068,11 +3066,10 @@ static void rcu_bind_gp_kthread(void)
return;
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
cpu = tick_do_timer_cpu;
- if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu)
+ if (cpu >= 0 && cpu < nr_cpu_ids)
set_cpus_allowed_ptr(current, cpumask_of(cpu));
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
- if (!is_housekeeping_cpu(raw_smp_processor_id()))
- housekeeping_affine(current);
+ housekeeping_affine(current);
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
}
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index fbb6240509ea..f92361efd0f5 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -283,8 +283,8 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_puts(m, "\n");
level = rnp->level;
}
- seq_printf(m, "%lx/%lx %c%c>%c %d:%d ^%d ",
- rnp->qsmask, rnp->qsmaskinit,
+ seq_printf(m, "%lx/%lx->%lx %c%c>%c %d:%d ^%d ",
+ rnp->qsmask, rnp->qsmaskinit, rnp->qsmaskinitnext,
".G"[rnp->gp_tasks != NULL],
".E"[rnp->exp_tasks != NULL],
".T"[!list_empty(&rnp->blkd_tasks)],
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index e0d31a345ee6..1f133350da01 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -62,6 +62,63 @@ MODULE_ALIAS("rcupdate");
module_param(rcu_expedited, int, 0);
+#ifndef CONFIG_TINY_RCU
+
+static atomic_t rcu_expedited_nesting =
+ ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
+
+/*
+ * Should normal grace-period primitives be expedited? Intended for
+ * use within RCU. Note that this function takes the rcu_expedited
+ * sysfs/boot variable into account as well as the rcu_expedite_gp()
+ * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
+ * returns false is a -really- bad idea.
+ */
+bool rcu_gp_is_expedited(void)
+{
+ return rcu_expedited || atomic_read(&rcu_expedited_nesting);
+}
+EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
+
+/**
+ * rcu_expedite_gp - Expedite future RCU grace periods
+ *
+ * After a call to this function, future calls to synchronize_rcu() and
+ * friends act as the corresponding synchronize_rcu_expedited() function
+ * had instead been called.
+ */
+void rcu_expedite_gp(void)
+{
+ atomic_inc(&rcu_expedited_nesting);
+}
+EXPORT_SYMBOL_GPL(rcu_expedite_gp);
+
+/**
+ * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
+ *
+ * Undo a prior call to rcu_expedite_gp(). If all prior calls to
+ * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
+ * and if the rcu_expedited sysfs/boot parameter is not set, then all
+ * subsequent calls to synchronize_rcu() and friends will return to
+ * their normal non-expedited behavior.
+ */
+void rcu_unexpedite_gp(void)
+{
+ atomic_dec(&rcu_expedited_nesting);
+}
+EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
+
+#endif /* #ifndef CONFIG_TINY_RCU */
+
+/*
+ * Inform RCU of the end of the in-kernel boot sequence.
+ */
+void rcu_end_inkernel_boot(void)
+{
+ if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
+ rcu_unexpedite_gp();
+}
+
#ifdef CONFIG_PREEMPT_RCU
/*
@@ -199,16 +256,13 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-struct rcu_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-/*
- * Awaken the corresponding synchronize_rcu() instance now that a
- * grace period has elapsed.
+/**
+ * wakeme_after_rcu() - Callback function to awaken a task after grace period
+ * @head: Pointer to rcu_head member within rcu_synchronize structure
+ *
+ * Awaken the corresponding task now that a grace period has elapsed.
*/
-static void wakeme_after_rcu(struct rcu_head *head)
+void wakeme_after_rcu(struct rcu_head *head)
{
struct rcu_synchronize *rcu;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 5925f5ae8dff..d20c85d9f8c0 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -387,8 +387,9 @@ void ctrl_alt_del(void)
}
char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
+static const char reboot_cmd[] = "/sbin/reboot";
-static int __orderly_poweroff(bool force)
+static int run_cmd(const char *cmd)
{
char **argv;
static char *envp[] = {
@@ -397,8 +398,7 @@ static int __orderly_poweroff(bool force)
NULL
};
int ret;
-
- argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL);
+ argv = argv_split(GFP_KERNEL, cmd, NULL);
if (argv) {
ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
argv_free(argv);
@@ -406,8 +406,33 @@ static int __orderly_poweroff(bool force)
ret = -ENOMEM;
}
+ return ret;
+}
+
+static int __orderly_reboot(void)
+{
+ int ret;
+
+ ret = run_cmd(reboot_cmd);
+
+ if (ret) {
+ pr_warn("Failed to start orderly reboot: forcing the issue\n");
+ emergency_sync();
+ kernel_restart(NULL);
+ }
+
+ return ret;
+}
+
+static int __orderly_poweroff(bool force)
+{
+ int ret;
+
+ ret = run_cmd(poweroff_cmd);
+
if (ret && force) {
pr_warn("Failed to start orderly shutdown: forcing the issue\n");
+
/*
* I guess this should try to kick off some daemon to sync and
* poweroff asap. Or not even bother syncing if we're doing an
@@ -436,15 +461,33 @@ static DECLARE_WORK(poweroff_work, poweroff_work_func);
* This may be called from any context to trigger a system shutdown.
* If the orderly shutdown fails, it will force an immediate shutdown.
*/
-int orderly_poweroff(bool force)
+void orderly_poweroff(bool force)
{
if (force) /* do not override the pending "true" */
poweroff_force = true;
schedule_work(&poweroff_work);
- return 0;
}
EXPORT_SYMBOL_GPL(orderly_poweroff);
+static void reboot_work_func(struct work_struct *work)
+{
+ __orderly_reboot();
+}
+
+static DECLARE_WORK(reboot_work, reboot_work_func);
+
+/**
+ * orderly_reboot - Trigger an orderly system reboot
+ *
+ * This may be called from any context to trigger a system reboot.
+ * If the orderly reboot fails, it will force an immediate reboot.
+ */
+void orderly_reboot(void)
+{
+ schedule_work(&reboot_work);
+}
+EXPORT_SYMBOL_GPL(orderly_reboot);
+
static int __init reboot_setup(char *str)
{
for (;;) {
diff --git a/kernel/relay.c b/kernel/relay.c
index 5a56d3c8dc03..e9dbaeb8fd65 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -407,7 +407,7 @@ static inline void relay_set_buf_dentry(struct rchan_buf *buf,
struct dentry *dentry)
{
buf->dentry = dentry;
- buf->dentry->d_inode->i_size = buf->early_bytes;
+ d_inode(buf->dentry)->i_size = buf->early_bytes;
}
static struct dentry *relay_create_buf_file(struct rchan *chan,
@@ -733,7 +733,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
buf->padding[old_subbuf] = buf->prev_padding;
buf->subbufs_produced++;
if (buf->dentry)
- buf->dentry->d_inode->i_size +=
+ d_inode(buf->dentry)->i_size +=
buf->chan->subbuf_size -
buf->padding[old_subbuf];
else
diff --git a/kernel/resource.c b/kernel/resource.c
index 19f2357dfda3..90552aab5f2d 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1034,8 +1034,6 @@ resource_size_t resource_alignment(struct resource *res)
*
* request_region creates a new busy region.
*
- * check_region returns non-zero if the area is already busy.
- *
* release_region releases a matching busy region.
*/
@@ -1098,36 +1096,6 @@ struct resource * __request_region(struct resource *parent,
EXPORT_SYMBOL(__request_region);
/**
- * __check_region - check if a resource region is busy or free
- * @parent: parent resource descriptor
- * @start: resource start address
- * @n: resource region size
- *
- * Returns 0 if the region is free at the moment it is checked,
- * returns %-EBUSY if the region is busy.
- *
- * NOTE:
- * This function is deprecated because its use is racy.
- * Even if it returns 0, a subsequent call to request_region()
- * may fail because another driver etc. just allocated the region.
- * Do NOT use it. It will be removed from the kernel.
- */
-int __check_region(struct resource *parent, resource_size_t start,
- resource_size_t n)
-{
- struct resource * res;
-
- res = __request_region(parent, start, n, "check-region", 0);
- if (!res)
- return -EBUSY;
-
- release_resource(res);
- free_resource(res);
- return 0;
-}
-EXPORT_SYMBOL(__check_region);
-
-/**
* __release_region - release a previously reserved resource region
* @parent: parent resource descriptor
* @start: resource start address
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2f7937ee9e3a..57bd333bc4ab 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
rq_clock_skip_update(rq, true);
}
-static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
-
-void register_task_migration_notifier(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&task_migration_notifier, n);
-}
-
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
- struct task_migration_notifier tmn;
-
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
-
- tmn.task = p;
- tmn.from_cpu = task_cpu(p);
- tmn.to_cpu = new_cpu;
-
- atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
}
__set_task_cpu(p, new_cpu);
@@ -2853,7 +2838,7 @@ asmlinkage __visible void __sched schedule_user(void)
* we find a better solution.
*
* NB: There are buggy callers of this function. Ideally we
- * should warn if prev_state != IN_USER, but that will trigger
+ * should warn if prev_state != CONTEXT_USER, but that will trigger
* too frequently to make sense yet.
*/
enum ctx_state prev_state = exception_enter();
@@ -3315,15 +3300,18 @@ static void __setscheduler_params(struct task_struct *p,
/* Actually do priority change: must hold pi & rq lock. */
static void __setscheduler(struct rq *rq, struct task_struct *p,
- const struct sched_attr *attr)
+ const struct sched_attr *attr, bool keep_boost)
{
__setscheduler_params(p, attr);
/*
- * If we get here, there was no pi waiters boosting the
- * task. It is safe to use the normal prio.
+ * Keep a potential priority boosting if called from
+ * sched_setscheduler().
*/
- p->prio = normal_prio(p);
+ if (keep_boost)
+ p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+ else
+ p->prio = normal_prio(p);
if (dl_prio(p->prio))
p->sched_class = &dl_sched_class;
@@ -3423,7 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p,
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
MAX_RT_PRIO - 1 - attr->sched_priority;
int retval, oldprio, oldpolicy = -1, queued, running;
- int policy = attr->sched_policy;
+ int new_effective_prio, policy = attr->sched_policy;
unsigned long flags;
const struct sched_class *prev_class;
struct rq *rq;
@@ -3605,15 +3593,14 @@ change:
oldprio = p->prio;
/*
- * Special case for priority boosted tasks.
- *
- * If the new priority is lower or equal (user space view)
- * than the current (boosted) priority, we just store the new
+ * Take priority boosted tasks into account. If the new
+ * effective priority is unchanged, we just store the new
* normal parameters and do not touch the scheduler class and
* the runqueue. This will be done when the task deboost
* itself.
*/
- if (rt_mutex_check_prio(p, newprio)) {
+ new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
+ if (new_effective_prio == oldprio) {
__setscheduler_params(p, attr);
task_rq_unlock(rq, p, &flags);
return 0;
@@ -3627,7 +3614,7 @@ change:
put_prev_task(rq, p);
prev_class = p->sched_class;
- __setscheduler(rq, p, attr);
+ __setscheduler(rq, p, attr, true);
if (running)
p->sched_class->set_curr_task(rq);
@@ -7012,27 +6999,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
unsigned long flags;
long cpu = (long)hcpu;
struct dl_bw *dl_b;
+ bool overflow;
+ int cpus;
- switch (action & ~CPU_TASKS_FROZEN) {
+ switch (action) {
case CPU_DOWN_PREPARE:
- /* explicitly allow suspend */
- if (!(action & CPU_TASKS_FROZEN)) {
- bool overflow;
- int cpus;
-
- rcu_read_lock_sched();
- dl_b = dl_bw_of(cpu);
+ rcu_read_lock_sched();
+ dl_b = dl_bw_of(cpu);
- raw_spin_lock_irqsave(&dl_b->lock, flags);
- cpus = dl_bw_cpus(cpu);
- overflow = __dl_overflow(dl_b, cpus, 0, 0);
- raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+ raw_spin_lock_irqsave(&dl_b->lock, flags);
+ cpus = dl_bw_cpus(cpu);
+ overflow = __dl_overflow(dl_b, cpus, 0, 0);
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
- rcu_read_unlock_sched();
+ rcu_read_unlock_sched();
- if (overflow)
- return notifier_from_errno(-EBUSY);
- }
+ if (overflow)
+ return notifier_from_errno(-EBUSY);
cpuset_update_active_cpus(false);
break;
case CPU_DOWN_PREPARE_FROZEN:
@@ -7361,7 +7344,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
queued = task_on_rq_queued(p);
if (queued)
dequeue_task(rq, p, 0);
- __setscheduler(rq, p, &attr);
+ __setscheduler(rq, p, &attr, false);
if (queued) {
enqueue_task(rq, p, 0);
resched_curr(rq);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 4d207d2abcbd..fefcb1fa5160 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -81,7 +81,6 @@ static void cpuidle_idle_call(void)
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state;
- unsigned int broadcast;
bool reflect;
/*
@@ -150,17 +149,6 @@ static void cpuidle_idle_call(void)
goto exit_idle;
}
- broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;
-
- /*
- * Tell the time framework to switch to a broadcast timer
- * because our local timer will be shutdown. If a local timer
- * is used from another cpu as a broadcast timer, this call may
- * fail if it is not available
- */
- if (broadcast && tick_broadcast_enter())
- goto use_default;
-
/* Take note of the planned idle state. */
idle_set_state(this_rq(), &drv->states[next_state]);
@@ -174,8 +162,8 @@ static void cpuidle_idle_call(void)
/* The cpu is no longer idle or about to enter idle. */
idle_set_state(this_rq(), NULL);
- if (broadcast)
- tick_broadcast_exit();
+ if (entered_state == -EBUSY)
+ goto use_default;
/*
* Give the governor an opportunity to reflect on the outcome
@@ -209,6 +197,8 @@ use_default:
goto exit_idle;
}
+DEFINE_PER_CPU(bool, cpu_dead_idle);
+
/*
* Generic idle loop implementation
*
@@ -233,8 +223,13 @@ static void cpu_idle_loop(void)
check_pgt_cache();
rmb();
- if (cpu_is_offline(smp_processor_id()))
+ if (cpu_is_offline(smp_processor_id())) {
+ rcu_cpu_notify(NULL, CPU_DYING_IDLE,
+ (void *)(long)smp_processor_id());
+ smp_mb(); /* all activity before dead. */
+ this_cpu_write(cpu_dead_idle, true);
arch_cpu_idle_dead();
+ }
local_irq_disable();
arch_cpu_idle_enter();
diff --git a/kernel/signal.c b/kernel/signal.c
index a390499943e4..d51c5ddd855c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2992,11 +2992,9 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
- (task_pid_vnr(current) != pid)) {
- /* We used to allow any < 0 si_code */
- WARN_ON_ONCE(info->si_code < 0);
+ (task_pid_vnr(current) != pid))
return -EPERM;
- }
+
info->si_signo = sig;
/* POSIX.1b doesn't mention process groups. */
@@ -3041,12 +3039,10 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
- if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
- (task_pid_vnr(current) != pid)) {
- /* We used to allow any < 0 si_code */
- WARN_ON_ONCE(info->si_code < 0);
+ if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
+ (task_pid_vnr(current) != pid))
return -EPERM;
- }
+
info->si_signo = sig;
return do_send_specific(tgid, pid, sig, info);
diff --git a/kernel/smp.c b/kernel/smp.c
index f38a1e692259..07854477c164 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -19,7 +19,7 @@
enum {
CSD_FLAG_LOCK = 0x01,
- CSD_FLAG_WAIT = 0x02,
+ CSD_FLAG_SYNCHRONOUS = 0x02,
};
struct call_function_data {
@@ -107,7 +107,7 @@ void __init call_function_init(void)
*/
static void csd_lock_wait(struct call_single_data *csd)
{
- while (csd->flags & CSD_FLAG_LOCK)
+ while (smp_load_acquire(&csd->flags) & CSD_FLAG_LOCK)
cpu_relax();
}
@@ -121,19 +121,17 @@ static void csd_lock(struct call_single_data *csd)
* to ->flags with any subsequent assignments to other
* fields of the specified call_single_data structure:
*/
- smp_mb();
+ smp_wmb();
}
static void csd_unlock(struct call_single_data *csd)
{
- WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
+ WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
/*
* ensure we're all done before releasing data:
*/
- smp_mb();
-
- csd->flags &= ~CSD_FLAG_LOCK;
+ smp_store_release(&csd->flags, 0);
}
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
@@ -144,13 +142,16 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
* ->func, ->info, and ->flags set.
*/
static int generic_exec_single(int cpu, struct call_single_data *csd,
- smp_call_func_t func, void *info, int wait)
+ smp_call_func_t func, void *info)
{
- struct call_single_data csd_stack = { .flags = 0 };
- unsigned long flags;
-
-
if (cpu == smp_processor_id()) {
+ unsigned long flags;
+
+ /*
+ * We can unlock early even for the synchronous on-stack case,
+ * since we're doing this from the same CPU..
+ */
+ csd_unlock(csd);
local_irq_save(flags);
func(info);
local_irq_restore(flags);
@@ -158,24 +159,14 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
}
- if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
+ if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
+ csd_unlock(csd);
return -ENXIO;
-
-
- if (!csd) {
- csd = &csd_stack;
- if (!wait)
- csd = this_cpu_ptr(&csd_data);
}
- csd_lock(csd);
-
csd->func = func;
csd->info = info;
- if (wait)
- csd->flags |= CSD_FLAG_WAIT;
-
/*
* The list addition should be visible before sending the IPI
* handler locks the list to pull the entry off it because of
@@ -190,9 +181,6 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
arch_send_call_function_single_ipi(cpu);
- if (wait)
- csd_lock_wait(csd);
-
return 0;
}
@@ -250,8 +238,17 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
}
llist_for_each_entry_safe(csd, csd_next, entry, llist) {
- csd->func(csd->info);
- csd_unlock(csd);
+ smp_call_func_t func = csd->func;
+ void *info = csd->info;
+
+ /* Do we wait until *after* callback? */
+ if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
+ func(info);
+ csd_unlock(csd);
+ } else {
+ csd_unlock(csd);
+ func(info);
+ }
}
/*
@@ -274,6 +271,8 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
int wait)
{
+ struct call_single_data *csd;
+ struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
int this_cpu;
int err;
@@ -292,7 +291,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress);
- err = generic_exec_single(cpu, NULL, func, info, wait);
+ csd = &csd_stack;
+ if (!wait) {
+ csd = this_cpu_ptr(&csd_data);
+ csd_lock(csd);
+ }
+
+ err = generic_exec_single(cpu, csd, func, info);
+
+ if (wait)
+ csd_lock_wait(csd);
put_cpu();
@@ -321,7 +329,15 @@ int smp_call_function_single_async(int cpu, struct call_single_data *csd)
int err = 0;
preempt_disable();
- err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
+
+ /* We could deadlock if we have to wait here with interrupts disabled! */
+ if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
+ csd_lock_wait(csd);
+
+ csd->flags = CSD_FLAG_LOCK;
+ smp_wmb();
+
+ err = generic_exec_single(cpu, csd, csd->func, csd->info);
preempt_enable();
return err;
@@ -433,6 +449,8 @@ void smp_call_function_many(const struct cpumask *mask,
struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
csd_lock(csd);
+ if (wait)
+ csd->flags |= CSD_FLAG_SYNCHRONOUS;
csd->func = func;
csd->info = info;
llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 40190f28db35..c697f73d82d6 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -4,6 +4,7 @@
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/smp.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -314,3 +315,158 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
put_online_cpus();
}
EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
+
+static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
+
+/*
+ * Called to poll specified CPU's state, for example, when waiting for
+ * a CPU to come online.
+ */
+int cpu_report_state(int cpu)
+{
+ return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
+}
+
+/*
+ * If CPU has died properly, set its state to CPU_UP_PREPARE and
+ * return success. Otherwise, return -EBUSY if the CPU died after
+ * cpu_wait_death() timed out. And yet otherwise again, return -EAGAIN
+ * if cpu_wait_death() timed out and the CPU still hasn't gotten around
+ * to dying. In the latter two cases, the CPU might not be set up
+ * properly, but it is up to the arch-specific code to decide.
+ * Finally, -EIO indicates an unanticipated problem.
+ *
+ * Note that it is permissible to omit this call entirely, as is
+ * done in architectures that do no CPU-hotplug error checking.
+ */
+int cpu_check_up_prepare(int cpu)
+{
+ if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
+ return 0;
+ }
+
+ switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
+
+ case CPU_POST_DEAD:
+
+ /* The CPU died properly, so just start it up again. */
+ atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
+ return 0;
+
+ case CPU_DEAD_FROZEN:
+
+ /*
+ * Timeout during CPU death, so let caller know.
+ * The outgoing CPU completed its processing, but after
+ * cpu_wait_death() timed out and reported the error. The
+ * caller is free to proceed, in which case the state
+ * will be reset properly by cpu_set_state_online().
+ * Proceeding despite this -EBUSY return makes sense
+ * for systems where the outgoing CPUs take themselves
+ * offline, with no post-death manipulation required from
+ * a surviving CPU.
+ */
+ return -EBUSY;
+
+ case CPU_BROKEN:
+
+ /*
+ * The most likely reason we got here is that there was
+ * a timeout during CPU death, and the outgoing CPU never
+ * did complete its processing. This could happen on
+ * a virtualized system if the outgoing VCPU gets preempted
+ * for more than five seconds, and the user attempts to
+ * immediately online that same CPU. Trying again later
+ * might return -EBUSY above, hence -EAGAIN.
+ */
+ return -EAGAIN;
+
+ default:
+
+ /* Should not happen. Famous last words. */
+ return -EIO;
+ }
+}
+
+/*
+ * Mark the specified CPU online.
+ *
+ * Note that it is permissible to omit this call entirely, as is
+ * done in architectures that do no CPU-hotplug error checking.
+ */
+void cpu_set_state_online(int cpu)
+{
+ (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Wait for the specified CPU to exit the idle loop and die.
+ */
+bool cpu_wait_death(unsigned int cpu, int seconds)
+{
+ int jf_left = seconds * HZ;
+ int oldstate;
+ bool ret = true;
+ int sleep_jf = 1;
+
+ might_sleep();
+
+ /* The outgoing CPU will normally get done quite quickly. */
+ if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
+ goto update_state;
+ udelay(5);
+
+ /* But if the outgoing CPU dawdles, wait increasingly long times. */
+ while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
+ schedule_timeout_uninterruptible(sleep_jf);
+ jf_left -= sleep_jf;
+ if (jf_left <= 0)
+ break;
+ sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
+ }
+update_state:
+ oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
+ if (oldstate == CPU_DEAD) {
+ /* Outgoing CPU died normally, update state. */
+ smp_mb(); /* atomic_read() before update. */
+ atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
+ } else {
+ /* Outgoing CPU still hasn't died, set state accordingly. */
+ if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
+ oldstate, CPU_BROKEN) != oldstate)
+ goto update_state;
+ ret = false;
+ }
+ return ret;
+}
+
+/*
+ * Called by the outgoing CPU to report its successful death. Return
+ * false if this report follows the surviving CPU's timing out.
+ *
+ * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
+ * timed out. This approach allows architectures to omit calls to
+ * cpu_check_up_prepare() and cpu_set_state_online() without defeating
+ * the next cpu_wait_death()'s polling loop.
+ */
+bool cpu_report_death(void)
+{
+ int oldstate;
+ int newstate;
+ int cpu = smp_processor_id();
+
+ do {
+ oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
+ if (oldstate != CPU_BROKEN)
+ newstate = CPU_DEAD;
+ else
+ newstate = CPU_DEAD_FROZEN;
+ } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
+ oldstate, newstate) != oldstate);
+ return newstate == CPU_DEAD;
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
diff --git a/kernel/sys.c b/kernel/sys.c
index a03d9cd23ed7..a4e372b798a5 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -325,6 +325,7 @@ out_unlock:
* SMP: There are not races, the GIDs are checked only by filesystem
* operations (as far as semantic preservation is concerned).
*/
+#ifdef CONFIG_MULTIUSER
SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
{
struct user_namespace *ns = current_user_ns();
@@ -815,6 +816,7 @@ change_okay:
commit_creds(new);
return old_fsgid;
}
+#endif /* CONFIG_MULTIUSER */
/**
* sys_getpid - return the thread group id of the current process
@@ -1647,14 +1649,13 @@ SYSCALL_DEFINE1(umask, int, mask)
return mask;
}
-static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd)
+static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
{
struct fd exe;
+ struct file *old_exe, *exe_file;
struct inode *inode;
int err;
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
-
exe = fdget(fd);
if (!exe.file)
return -EBADF;
@@ -1678,15 +1679,22 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd)
/*
* Forbid mm->exe_file change if old file still mapped.
*/
+ exe_file = get_mm_exe_file(mm);
err = -EBUSY;
- if (mm->exe_file) {
+ if (exe_file) {
struct vm_area_struct *vma;
- for (vma = mm->mmap; vma; vma = vma->vm_next)
- if (vma->vm_file &&
- path_equal(&vma->vm_file->f_path,
- &mm->exe_file->f_path))
- goto exit;
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (!vma->vm_file)
+ continue;
+ if (path_equal(&vma->vm_file->f_path,
+ &exe_file->f_path))
+ goto exit_err;
+ }
+
+ up_read(&mm->mmap_sem);
+ fput(exe_file);
}
/*
@@ -1700,10 +1708,18 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd)
goto exit;
err = 0;
- set_mm_exe_file(mm, exe.file); /* this grabs a reference to exe.file */
+ /* set the new file, lockless */
+ get_file(exe.file);
+ old_exe = xchg(&mm->exe_file, exe.file);
+ if (old_exe)
+ fput(old_exe);
exit:
fdput(exe);
return err;
+exit_err:
+ up_read(&mm->mmap_sem);
+ fput(exe_file);
+ goto exit;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
@@ -1838,10 +1854,9 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
}
- down_write(&mm->mmap_sem);
if (prctl_map.exe_fd != (u32)-1)
- error = prctl_set_mm_exe_file_locked(mm, prctl_map.exe_fd);
- downgrade_write(&mm->mmap_sem);
+ error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
+ down_read(&mm->mmap_sem);
if (error)
goto out;
@@ -1907,12 +1922,8 @@ static int prctl_set_mm(int opt, unsigned long addr,
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (opt == PR_SET_MM_EXE_FILE) {
- down_write(&mm->mmap_sem);
- error = prctl_set_mm_exe_file_locked(mm, (unsigned int)addr);
- up_write(&mm->mmap_sem);
- return error;
- }
+ if (opt == PR_SET_MM_EXE_FILE)
+ return prctl_set_mm_exe_file(mm, (unsigned int)addr);
if (addr >= TASK_SIZE || addr < mmap_min_addr)
return -EINVAL;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 5adcb0ae3a58..7995ef5868d8 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -159,6 +159,20 @@ cond_syscall(sys_uselib);
cond_syscall(sys_fadvise64);
cond_syscall(sys_fadvise64_64);
cond_syscall(sys_madvise);
+cond_syscall(sys_setuid);
+cond_syscall(sys_setregid);
+cond_syscall(sys_setgid);
+cond_syscall(sys_setreuid);
+cond_syscall(sys_setresuid);
+cond_syscall(sys_getresuid);
+cond_syscall(sys_setresgid);
+cond_syscall(sys_getresgid);
+cond_syscall(sys_setgroups);
+cond_syscall(sys_getgroups);
+cond_syscall(sys_setfsuid);
+cond_syscall(sys_setfsgid);
+cond_syscall(sys_capget);
+cond_syscall(sys_capset);
/* arch-specific weak syscall entries */
cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ce410bb9f2e1..2082b1a88fb9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -19,6 +19,7 @@
*/
#include <linux/module.h>
+#include <linux/aio.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
@@ -92,11 +93,9 @@
#include <linux/nmi.h>
#endif
-
#if defined(CONFIG_SYSCTL)
/* External variables not in a header file. */
-extern int max_threads;
extern int suid_dumpable;
#ifdef CONFIG_COREDUMP
extern int core_uses_pid;
@@ -709,10 +708,10 @@ static struct ctl_table kern_table[] = {
#endif
{
.procname = "threads-max",
- .data = &max_threads,
+ .data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = sysctl_max_threads,
},
{
.procname = "random",
@@ -846,7 +845,7 @@ static struct ctl_table kern_table[] = {
.data = &watchdog_user_enabled,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = proc_dowatchdog,
+ .proc_handler = proc_watchdog,
.extra1 = &zero,
.extra2 = &one,
},
@@ -855,11 +854,33 @@ static struct ctl_table kern_table[] = {
.data = &watchdog_thresh,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dowatchdog,
+ .proc_handler = proc_watchdog_thresh,
.extra1 = &zero,
.extra2 = &sixty,
},
{
+ .procname = "nmi_watchdog",
+ .data = &nmi_watchdog_enabled,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = proc_nmi_watchdog,
+ .extra1 = &zero,
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+ .extra2 = &one,
+#else
+ .extra2 = &zero,
+#endif
+ },
+ {
+ .procname = "soft_watchdog",
+ .data = &soft_watchdog_enabled,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = proc_soft_watchdog,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
.procname = "softlockup_panic",
.data = &softlockup_panic,
.maxlen = sizeof(int),
@@ -879,15 +900,6 @@ static struct ctl_table kern_table[] = {
.extra2 = &one,
},
#endif /* CONFIG_SMP */
- {
- .procname = "nmi_watchdog",
- .data = &watchdog_user_enabled,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = proc_dowatchdog,
- .extra1 = &zero,
- .extra2 = &one,
- },
#endif
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
{
@@ -1321,6 +1333,15 @@ static struct ctl_table vm_table[] = {
.extra1 = &min_extfrag_threshold,
.extra2 = &max_extfrag_threshold,
},
+ {
+ .procname = "compact_unevictable_allowed",
+ .data = &sysctl_compact_unevictable_allowed,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
#endif /* CONFIG_COMPACTION */
{
@@ -1960,7 +1981,15 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
int write, void *data)
{
if (write) {
- *valp = *negp ? -*lvalp : *lvalp;
+ if (*negp) {
+ if (*lvalp > (unsigned long) INT_MAX + 1)
+ return -EINVAL;
+ *valp = -*lvalp;
+ } else {
+ if (*lvalp > (unsigned long) INT_MAX)
+ return -EINVAL;
+ *valp = *lvalp;
+ }
} else {
int val = *valp;
if (val < 0) {
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 25d942d1da27..637a09461c1d 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -117,11 +117,7 @@ static int __clockevents_set_state(struct clock_event_device *dev,
/* Transition with new state-specific callbacks */
switch (state) {
case CLOCK_EVT_STATE_DETACHED:
- /*
- * This is an internal state, which is guaranteed to go from
- * SHUTDOWN to DETACHED. No driver interaction required.
- */
- return 0;
+ /* The clockevent device is getting replaced. Shut it down. */
case CLOCK_EVT_STATE_SHUTDOWN:
return dev->set_state_shutdown(dev);
@@ -440,7 +436,7 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
mutex_unlock(&clockevents_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(clockevents_unbind);
+EXPORT_SYMBOL_GPL(clockevents_unbind_device);
/* Sanity check of state transition callbacks */
static int clockevents_sanity_check(struct clock_event_device *dev)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index a5da09c899dd..3b9a48ae153a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -432,6 +432,14 @@ config UPROBE_EVENT
This option is required if you plan to use perf-probe subcommand
of perf tools on user space applications.
+config BPF_EVENTS
+ depends on BPF_SYSCALL
+ depends on KPROBE_EVENT
+ bool
+ default y
+ help
+ This allows the user to attach BPF programs to kprobe events.
+
config PROBE_EVENTS
def_bool n
@@ -599,6 +607,34 @@ config RING_BUFFER_STARTUP_TEST
If unsure, say N
+config TRACE_ENUM_MAP_FILE
+ bool "Show enum mappings for trace events"
+ depends on TRACING
+ help
+ The "print fmt" of the trace events will show the enum names instead
+ of their values. This can cause problems for user space tools that
+ use this string to parse the raw data as user space does not know
+ how to convert the string to its value.
+
+ To fix this, there's a special macro in the kernel that can be used
+ to convert the enum into its value. If this macro is used, then the
+ print fmt strings will have the enums converted to their values.
+
+ If something does not get converted properly, this option can be
+ used to show what enums the kernel tried to convert.
+
+ This option is for debugging the enum conversions. A file is created
+ in the tracing directory called "enum_map" that will show the enum
+ names matched with their values and what trace event system they
+ belong too.
+
+ Normally, the mapping of the strings to values will be freed after
+ boot up or module load. With this option, they will not be freed, as
+ they are needed for the "enum_map" file. Enabling this option will
+ increase the memory footprint of the running kernel.
+
+ If unsure, say N
+
endif # FTRACE
endif # TRACING_SUPPORT
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 98f26588255e..9b1044e936a6 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
+obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
obj-$(CONFIG_TRACEPOINTS) += power-traces.o
ifeq ($(CONFIG_PM),y)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
new file mode 100644
index 000000000000..2d56ce501632
--- /dev/null
+++ b/kernel/trace/bpf_trace.c
@@ -0,0 +1,222 @@
+/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include "trace.h"
+
+static DEFINE_PER_CPU(int, bpf_prog_active);
+
+/**
+ * trace_call_bpf - invoke BPF program
+ * @prog: BPF program
+ * @ctx: opaque context pointer
+ *
+ * kprobe handlers execute BPF programs via this helper.
+ * Can be used from static tracepoints in the future.
+ *
+ * Return: BPF programs always return an integer which is interpreted by
+ * kprobe handler as:
+ * 0 - return from kprobe (event is filtered out)
+ * 1 - store kprobe event into ring buffer
+ * Other values are reserved and currently alias to 1
+ */
+unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
+{
+ unsigned int ret;
+
+ if (in_nmi()) /* not supported yet */
+ return 1;
+
+ preempt_disable();
+
+ if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
+ /*
+ * since some bpf program is already running on this cpu,
+ * don't call into another bpf program (same or different)
+ * and don't send kprobe event into ring-buffer,
+ * so return zero here
+ */
+ ret = 0;
+ goto out;
+ }
+
+ rcu_read_lock();
+ ret = BPF_PROG_RUN(prog, ctx);
+ rcu_read_unlock();
+
+ out:
+ __this_cpu_dec(bpf_prog_active);
+ preempt_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(trace_call_bpf);
+
+static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ void *dst = (void *) (long) r1;
+ int size = (int) r2;
+ void *unsafe_ptr = (void *) (long) r3;
+
+ return probe_kernel_read(dst, unsafe_ptr, size);
+}
+
+static const struct bpf_func_proto bpf_probe_read_proto = {
+ .func = bpf_probe_read,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_STACK,
+ .arg2_type = ARG_CONST_STACK_SIZE,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ /* NMI safe access to clock monotonic */
+ return ktime_get_mono_fast_ns();
+}
+
+static const struct bpf_func_proto bpf_ktime_get_ns_proto = {
+ .func = bpf_ktime_get_ns,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+};
+
+/*
+ * limited trace_printk()
+ * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
+ */
+static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
+{
+ char *fmt = (char *) (long) r1;
+ int mod[3] = {};
+ int fmt_cnt = 0;
+ int i;
+
+ /*
+ * bpf_check()->check_func_arg()->check_stack_boundary()
+ * guarantees that fmt points to bpf program stack,
+ * fmt_size bytes of it were initialized and fmt_size > 0
+ */
+ if (fmt[--fmt_size] != 0)
+ return -EINVAL;
+
+ /* check format string for allowed specifiers */
+ for (i = 0; i < fmt_size; i++) {
+ if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
+ return -EINVAL;
+
+ if (fmt[i] != '%')
+ continue;
+
+ if (fmt_cnt >= 3)
+ return -EINVAL;
+
+ /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
+ i++;
+ if (fmt[i] == 'l') {
+ mod[fmt_cnt]++;
+ i++;
+ } else if (fmt[i] == 'p') {
+ mod[fmt_cnt]++;
+ i++;
+ if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
+ return -EINVAL;
+ fmt_cnt++;
+ continue;
+ }
+
+ if (fmt[i] == 'l') {
+ mod[fmt_cnt]++;
+ i++;
+ }
+
+ if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
+ return -EINVAL;
+ fmt_cnt++;
+ }
+
+ return __trace_printk(1/* fake ip will not be printed */, fmt,
+ mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
+ mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
+ mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
+}
+
+static const struct bpf_func_proto bpf_trace_printk_proto = {
+ .func = bpf_trace_printk,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_STACK,
+ .arg2_type = ARG_CONST_STACK_SIZE,
+};
+
+static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
+{
+ switch (func_id) {
+ case BPF_FUNC_map_lookup_elem:
+ return &bpf_map_lookup_elem_proto;
+ case BPF_FUNC_map_update_elem:
+ return &bpf_map_update_elem_proto;
+ case BPF_FUNC_map_delete_elem:
+ return &bpf_map_delete_elem_proto;
+ case BPF_FUNC_probe_read:
+ return &bpf_probe_read_proto;
+ case BPF_FUNC_ktime_get_ns:
+ return &bpf_ktime_get_ns_proto;
+
+ case BPF_FUNC_trace_printk:
+ /*
+ * this program might be calling bpf_trace_printk,
+ * so allocate per-cpu printk buffers
+ */
+ trace_printk_init_buffers();
+
+ return &bpf_trace_printk_proto;
+ default:
+ return NULL;
+ }
+}
+
+/* bpf+kprobe programs can access fields of 'struct pt_regs' */
+static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+{
+ /* check bounds */
+ if (off < 0 || off >= sizeof(struct pt_regs))
+ return false;
+
+ /* only read is allowed */
+ if (type != BPF_READ)
+ return false;
+
+ /* disallow misaligned access */
+ if (off % size != 0)
+ return false;
+
+ return true;
+}
+
+static struct bpf_verifier_ops kprobe_prog_ops = {
+ .get_func_proto = kprobe_prog_func_proto,
+ .is_valid_access = kprobe_prog_is_valid_access,
+};
+
+static struct bpf_prog_type_list kprobe_tl = {
+ .ops = &kprobe_prog_ops,
+ .type = BPF_PROG_TYPE_KPROBE,
+};
+
+static int __init register_kprobe_prog_ops(void)
+{
+ bpf_register_prog_type(&kprobe_tl);
+ return 0;
+}
+late_initcall(register_kprobe_prog_ops);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4f228024055b..02bece4a99ea 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -18,7 +18,7 @@
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/suspend.h>
-#include <linux/debugfs.h>
+#include <linux/tracefs.h>
#include <linux/hardirq.h>
#include <linux/kthread.h>
#include <linux/uaccess.h>
@@ -249,6 +249,19 @@ static void update_function_graph_func(void);
static inline void update_function_graph_func(void) { }
#endif
+
+static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
+{
+ /*
+ * If this is a dynamic ops or we force list func,
+ * then it needs to call the list anyway.
+ */
+ if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
+ return ftrace_ops_list_func;
+
+ return ftrace_ops_get_func(ops);
+}
+
static void update_ftrace_function(void)
{
ftrace_func_t func;
@@ -270,7 +283,7 @@ static void update_ftrace_function(void)
* then have the mcount trampoline call the function directly.
*/
} else if (ftrace_ops_list->next == &ftrace_list_end) {
- func = ftrace_ops_get_func(ftrace_ops_list);
+ func = ftrace_ops_get_list_func(ftrace_ops_list);
} else {
/* Just use the default ftrace_ops */
@@ -1008,7 +1021,7 @@ static struct tracer_stat function_stats __initdata = {
.stat_show = function_stat_show
};
-static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
{
struct ftrace_profile_stat *stat;
struct dentry *entry;
@@ -1044,15 +1057,15 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
}
}
- entry = debugfs_create_file("function_profile_enabled", 0644,
+ entry = tracefs_create_file("function_profile_enabled", 0644,
d_tracer, NULL, &ftrace_profile_fops);
if (!entry)
- pr_warning("Could not create debugfs "
+ pr_warning("Could not create tracefs "
"'function_profile_enabled' entry\n");
}
#else /* CONFIG_FUNCTION_PROFILER */
-static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
{
}
#endif /* CONFIG_FUNCTION_PROFILER */
@@ -4712,7 +4725,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
mutex_unlock(&ftrace_lock);
}
-static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
+static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
{
trace_create_file("available_filter_functions", 0444,
@@ -5020,7 +5033,7 @@ static int __init ftrace_nodyn_init(void)
}
core_initcall(ftrace_nodyn_init);
-static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
+static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
static inline void ftrace_startup_all(int command) { }
/* Keep as macros so we do not need to define the commands */
@@ -5209,13 +5222,6 @@ static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
{
/*
- * If this is a dynamic ops or we force list func,
- * then it needs to call the list anyway.
- */
- if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
- return ftrace_ops_list_func;
-
- /*
* If the func handles its own recursion, call it directly.
* Otherwise call the recursion protected function that
* will call the ftrace ops function.
@@ -5473,7 +5479,7 @@ static const struct file_operations ftrace_pid_fops = {
.release = ftrace_pid_release,
};
-static __init int ftrace_init_debugfs(void)
+static __init int ftrace_init_tracefs(void)
{
struct dentry *d_tracer;
@@ -5481,16 +5487,16 @@ static __init int ftrace_init_debugfs(void)
if (IS_ERR(d_tracer))
return 0;
- ftrace_init_dyn_debugfs(d_tracer);
+ ftrace_init_dyn_tracefs(d_tracer);
trace_create_file("set_ftrace_pid", 0644, d_tracer,
NULL, &ftrace_pid_fops);
- ftrace_profile_debugfs(d_tracer);
+ ftrace_profile_tracefs(d_tracer);
return 0;
}
-fs_initcall(ftrace_init_debugfs);
+fs_initcall(ftrace_init_tracefs);
/**
* ftrace_kill - kill ftrace
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5040d44fe5a3..0315d43176d8 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2679,7 +2679,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
static __always_inline int trace_recursive_lock(void)
{
- unsigned int val = this_cpu_read(current_context);
+ unsigned int val = __this_cpu_read(current_context);
int bit;
if (in_interrupt()) {
@@ -2696,18 +2696,14 @@ static __always_inline int trace_recursive_lock(void)
return 1;
val |= (1 << bit);
- this_cpu_write(current_context, val);
+ __this_cpu_write(current_context, val);
return 0;
}
static __always_inline void trace_recursive_unlock(void)
{
- unsigned int val = this_cpu_read(current_context);
-
- val--;
- val &= this_cpu_read(current_context);
- this_cpu_write(current_context, val);
+ __this_cpu_and(current_context, __this_cpu_read(current_context) - 1);
}
#else
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 62c6506d663f..05330494a0df 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -20,6 +20,7 @@
#include <linux/notifier.h>
#include <linux/irqflags.h>
#include <linux/debugfs.h>
+#include <linux/tracefs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
#include <linux/linkage.h>
@@ -31,6 +32,7 @@
#include <linux/splice.h>
#include <linux/kdebug.h>
#include <linux/string.h>
+#include <linux/mount.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/ctype.h>
@@ -123,6 +125,42 @@ enum ftrace_dump_mode ftrace_dump_on_oops;
/* When set, tracing will stop when a WARN*() is hit */
int __disable_trace_on_warning;
+#ifdef CONFIG_TRACE_ENUM_MAP_FILE
+/* Map of enums to their values, for "enum_map" file */
+struct trace_enum_map_head {
+ struct module *mod;
+ unsigned long length;
+};
+
+union trace_enum_map_item;
+
+struct trace_enum_map_tail {
+ /*
+ * "end" is first and points to NULL as it must be different
+ * than "mod" or "enum_string"
+ */
+ union trace_enum_map_item *next;
+ const char *end; /* points to NULL */
+};
+
+static DEFINE_MUTEX(trace_enum_mutex);
+
+/*
+ * The trace_enum_maps are saved in an array with two extra elements,
+ * one at the beginning, and one at the end. The beginning item contains
+ * the count of the saved maps (head.length), and the module they
+ * belong to if not built in (head.mod). The ending item contains a
+ * pointer to the next array of saved enum_map items.
+ */
+union trace_enum_map_item {
+ struct trace_enum_map map;
+ struct trace_enum_map_head head;
+ struct trace_enum_map_tail tail;
+};
+
+static union trace_enum_map_item *trace_enum_maps;
+#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
+
static int tracing_set_tracer(struct trace_array *tr, const char *buf);
#define MAX_TRACER_SIZE 100
@@ -3908,6 +3946,182 @@ static const struct file_operations tracing_saved_cmdlines_size_fops = {
.write = tracing_saved_cmdlines_size_write,
};
+#ifdef CONFIG_TRACE_ENUM_MAP_FILE
+static union trace_enum_map_item *
+update_enum_map(union trace_enum_map_item *ptr)
+{
+ if (!ptr->map.enum_string) {
+ if (ptr->tail.next) {
+ ptr = ptr->tail.next;
+ /* Set ptr to the next real item (skip head) */
+ ptr++;
+ } else
+ return NULL;
+ }
+ return ptr;
+}
+
+static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ union trace_enum_map_item *ptr = v;
+
+ /*
+ * Paranoid! If ptr points to end, we don't want to increment past it.
+ * This really should never happen.
+ */
+ ptr = update_enum_map(ptr);
+ if (WARN_ON_ONCE(!ptr))
+ return NULL;
+
+ ptr++;
+
+ (*pos)++;
+
+ ptr = update_enum_map(ptr);
+
+ return ptr;
+}
+
+static void *enum_map_start(struct seq_file *m, loff_t *pos)
+{
+ union trace_enum_map_item *v;
+ loff_t l = 0;
+
+ mutex_lock(&trace_enum_mutex);
+
+ v = trace_enum_maps;
+ if (v)
+ v++;
+
+ while (v && l < *pos) {
+ v = enum_map_next(m, v, &l);
+ }
+
+ return v;
+}
+
+static void enum_map_stop(struct seq_file *m, void *v)
+{
+ mutex_unlock(&trace_enum_mutex);
+}
+
+static int enum_map_show(struct seq_file *m, void *v)
+{
+ union trace_enum_map_item *ptr = v;
+
+ seq_printf(m, "%s %ld (%s)\n",
+ ptr->map.enum_string, ptr->map.enum_value,
+ ptr->map.system);
+
+ return 0;
+}
+
+static const struct seq_operations tracing_enum_map_seq_ops = {
+ .start = enum_map_start,
+ .next = enum_map_next,
+ .stop = enum_map_stop,
+ .show = enum_map_show,
+};
+
+static int tracing_enum_map_open(struct inode *inode, struct file *filp)
+{
+ if (tracing_disabled)
+ return -ENODEV;
+
+ return seq_open(filp, &tracing_enum_map_seq_ops);
+}
+
+static const struct file_operations tracing_enum_map_fops = {
+ .open = tracing_enum_map_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static inline union trace_enum_map_item *
+trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
+{
+ /* Return tail of array given the head */
+ return ptr + ptr->head.length + 1;
+}
+
+static void
+trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
+ int len)
+{
+ struct trace_enum_map **stop;
+ struct trace_enum_map **map;
+ union trace_enum_map_item *map_array;
+ union trace_enum_map_item *ptr;
+
+ stop = start + len;
+
+ /*
+ * The trace_enum_maps contains the map plus a head and tail item,
+ * where the head holds the module and length of array, and the
+ * tail holds a pointer to the next list.
+ */
+ map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
+ if (!map_array) {
+ pr_warning("Unable to allocate trace enum mapping\n");
+ return;
+ }
+
+ mutex_lock(&trace_enum_mutex);
+
+ if (!trace_enum_maps)
+ trace_enum_maps = map_array;
+ else {
+ ptr = trace_enum_maps;
+ for (;;) {
+ ptr = trace_enum_jmp_to_tail(ptr);
+ if (!ptr->tail.next)
+ break;
+ ptr = ptr->tail.next;
+
+ }
+ ptr->tail.next = map_array;
+ }
+ map_array->head.mod = mod;
+ map_array->head.length = len;
+ map_array++;
+
+ for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
+ map_array->map = **map;
+ map_array++;
+ }
+ memset(map_array, 0, sizeof(*map_array));
+
+ mutex_unlock(&trace_enum_mutex);
+}
+
+static void trace_create_enum_file(struct dentry *d_tracer)
+{
+ trace_create_file("enum_map", 0444, d_tracer,
+ NULL, &tracing_enum_map_fops);
+}
+
+#else /* CONFIG_TRACE_ENUM_MAP_FILE */
+static inline void trace_create_enum_file(struct dentry *d_tracer) { }
+static inline void trace_insert_enum_map_file(struct module *mod,
+ struct trace_enum_map **start, int len) { }
+#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
+
+static void trace_insert_enum_map(struct module *mod,
+ struct trace_enum_map **start, int len)
+{
+ struct trace_enum_map **map;
+
+ if (len <= 0)
+ return;
+
+ map = start;
+
+ trace_event_enum_update(map, len);
+
+ trace_insert_enum_map_file(mod, start, len);
+}
+
static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -4105,9 +4319,24 @@ static void tracing_set_nop(struct trace_array *tr)
tr->current_trace = &nop_trace;
}
-static int tracing_set_tracer(struct trace_array *tr, const char *buf)
+static void update_tracer_options(struct trace_array *tr, struct tracer *t)
{
static struct trace_option_dentry *topts;
+
+ /* Only enable if the directory has been created already. */
+ if (!tr->dir)
+ return;
+
+ /* Currently, only the top instance has options */
+ if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
+ return;
+
+ destroy_trace_option_files(topts);
+ topts = create_trace_option_files(tr, t);
+}
+
+static int tracing_set_tracer(struct trace_array *tr, const char *buf)
+{
struct tracer *t;
#ifdef CONFIG_TRACER_MAX_TRACE
bool had_max_tr;
@@ -4172,11 +4401,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
free_snapshot(tr);
}
#endif
- /* Currently, only the top instance has options */
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
- destroy_trace_option_files(topts);
- topts = create_trace_option_files(tr, t);
- }
+ update_tracer_options(tr, t);
#ifdef CONFIG_TRACER_MAX_TRACE
if (t->use_max_tr && !had_max_tr) {
@@ -5817,6 +6042,14 @@ static inline __init int register_snapshot_cmd(void) { return 0; }
static struct dentry *tracing_get_dentry(struct trace_array *tr)
{
+ if (WARN_ON(!tr->dir))
+ return ERR_PTR(-ENODEV);
+
+ /* Top directory uses NULL as the parent */
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+ return NULL;
+
+ /* All sub buffers have a descriptor */
return tr->dir;
}
@@ -5831,10 +6064,10 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
if (IS_ERR(d_tracer))
return NULL;
- tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
+ tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
WARN_ONCE(!tr->percpu_dir,
- "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
+ "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
return tr->percpu_dir;
}
@@ -5846,12 +6079,12 @@ trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
if (ret) /* See tracing_get_cpu() */
- ret->d_inode->i_cdev = (void *)(cpu + 1);
+ d_inode(ret)->i_cdev = (void *)(cpu + 1);
return ret;
}
static void
-tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
+tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
{
struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
struct dentry *d_cpu;
@@ -5861,9 +6094,9 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
return;
snprintf(cpu_dir, 30, "cpu%ld", cpu);
- d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
+ d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
if (!d_cpu) {
- pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
+ pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
return;
}
@@ -6015,9 +6248,9 @@ struct dentry *trace_create_file(const char *name,
{
struct dentry *ret;
- ret = debugfs_create_file(name, mode, parent, data, fops);
+ ret = tracefs_create_file(name, mode, parent, data, fops);
if (!ret)
- pr_warning("Could not create debugfs '%s' entry\n", name);
+ pr_warning("Could not create tracefs '%s' entry\n", name);
return ret;
}
@@ -6034,9 +6267,9 @@ static struct dentry *trace_options_init_dentry(struct trace_array *tr)
if (IS_ERR(d_tracer))
return NULL;
- tr->options = debugfs_create_dir("options", d_tracer);
+ tr->options = tracefs_create_dir("options", d_tracer);
if (!tr->options) {
- pr_warning("Could not create debugfs directory 'options'\n");
+ pr_warning("Could not create tracefs directory 'options'\n");
return NULL;
}
@@ -6105,7 +6338,7 @@ destroy_trace_option_files(struct trace_option_dentry *topts)
return;
for (cnt = 0; topts[cnt].opt; cnt++)
- debugfs_remove(topts[cnt].entry);
+ tracefs_remove(topts[cnt].entry);
kfree(topts);
}
@@ -6194,7 +6427,7 @@ static const struct file_operations rb_simple_fops = {
struct dentry *trace_instance_dir;
static void
-init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
+init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
static int
allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
@@ -6271,7 +6504,7 @@ static void free_trace_buffers(struct trace_array *tr)
#endif
}
-static int new_instance_create(const char *name)
+static int instance_mkdir(const char *name)
{
struct trace_array *tr;
int ret;
@@ -6310,17 +6543,17 @@ static int new_instance_create(const char *name)
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
goto out_free_tr;
- tr->dir = debugfs_create_dir(name, trace_instance_dir);
+ tr->dir = tracefs_create_dir(name, trace_instance_dir);
if (!tr->dir)
goto out_free_tr;
ret = event_trace_add_tracer(tr->dir, tr);
if (ret) {
- debugfs_remove_recursive(tr->dir);
+ tracefs_remove_recursive(tr->dir);
goto out_free_tr;
}
- init_tracer_debugfs(tr, tr->dir);
+ init_tracer_tracefs(tr, tr->dir);
list_add(&tr->list, &ftrace_trace_arrays);
@@ -6341,7 +6574,7 @@ static int new_instance_create(const char *name)
}
-static int instance_delete(const char *name)
+static int instance_rmdir(const char *name)
{
struct trace_array *tr;
int found = 0;
@@ -6382,82 +6615,17 @@ static int instance_delete(const char *name)
return ret;
}
-static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
-{
- struct dentry *parent;
- int ret;
-
- /* Paranoid: Make sure the parent is the "instances" directory */
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
- if (WARN_ON_ONCE(parent != trace_instance_dir))
- return -ENOENT;
-
- /*
- * The inode mutex is locked, but debugfs_create_dir() will also
- * take the mutex. As the instances directory can not be destroyed
- * or changed in any other way, it is safe to unlock it, and
- * let the dentry try. If two users try to make the same dir at
- * the same time, then the new_instance_create() will determine the
- * winner.
- */
- mutex_unlock(&inode->i_mutex);
-
- ret = new_instance_create(dentry->d_iname);
-
- mutex_lock(&inode->i_mutex);
-
- return ret;
-}
-
-static int instance_rmdir(struct inode *inode, struct dentry *dentry)
-{
- struct dentry *parent;
- int ret;
-
- /* Paranoid: Make sure the parent is the "instances" directory */
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
- if (WARN_ON_ONCE(parent != trace_instance_dir))
- return -ENOENT;
-
- /* The caller did a dget() on dentry */
- mutex_unlock(&dentry->d_inode->i_mutex);
-
- /*
- * The inode mutex is locked, but debugfs_create_dir() will also
- * take the mutex. As the instances directory can not be destroyed
- * or changed in any other way, it is safe to unlock it, and
- * let the dentry try. If two users try to make the same dir at
- * the same time, then the instance_delete() will determine the
- * winner.
- */
- mutex_unlock(&inode->i_mutex);
-
- ret = instance_delete(dentry->d_iname);
-
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
- mutex_lock(&dentry->d_inode->i_mutex);
-
- return ret;
-}
-
-static const struct inode_operations instance_dir_inode_operations = {
- .lookup = simple_lookup,
- .mkdir = instance_mkdir,
- .rmdir = instance_rmdir,
-};
-
static __init void create_trace_instances(struct dentry *d_tracer)
{
- trace_instance_dir = debugfs_create_dir("instances", d_tracer);
+ trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
+ instance_mkdir,
+ instance_rmdir);
if (WARN_ON(!trace_instance_dir))
return;
-
- /* Hijack the dir inode operations, to allow mkdir */
- trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
}
static void
-init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
+init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
{
int cpu;
@@ -6511,10 +6679,32 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
#endif
for_each_tracing_cpu(cpu)
- tracing_init_debugfs_percpu(tr, cpu);
+ tracing_init_tracefs_percpu(tr, cpu);
}
+static struct vfsmount *trace_automount(void *ingore)
+{
+ struct vfsmount *mnt;
+ struct file_system_type *type;
+
+ /*
+ * To maintain backward compatibility for tools that mount
+ * debugfs to get to the tracing facility, tracefs is automatically
+ * mounted to the debugfs/tracing directory.
+ */
+ type = get_fs_type("tracefs");
+ if (!type)
+ return NULL;
+ mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
+ put_filesystem(type);
+ if (IS_ERR(mnt))
+ return NULL;
+ mntget(mnt);
+
+ return mnt;
+}
+
/**
* tracing_init_dentry - initialize top level trace array
*
@@ -6526,23 +6716,112 @@ struct dentry *tracing_init_dentry(void)
{
struct trace_array *tr = &global_trace;
+ /* The top level trace array uses NULL as parent */
if (tr->dir)
- return tr->dir;
+ return NULL;
if (WARN_ON(!debugfs_initialized()))
return ERR_PTR(-ENODEV);
- tr->dir = debugfs_create_dir("tracing", NULL);
-
+ /*
+ * As there may still be users that expect the tracing
+ * files to exist in debugfs/tracing, we must automount
+ * the tracefs file system there, so older tools still
+ * work with the newer kerenl.
+ */
+ tr->dir = debugfs_create_automount("tracing", NULL,
+ trace_automount, NULL);
if (!tr->dir) {
pr_warn_once("Could not create debugfs directory 'tracing'\n");
return ERR_PTR(-ENOMEM);
}
- return tr->dir;
+ return NULL;
+}
+
+extern struct trace_enum_map *__start_ftrace_enum_maps[];
+extern struct trace_enum_map *__stop_ftrace_enum_maps[];
+
+static void __init trace_enum_init(void)
+{
+ int len;
+
+ len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
+ trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
+}
+
+#ifdef CONFIG_MODULES
+static void trace_module_add_enums(struct module *mod)
+{
+ if (!mod->num_trace_enums)
+ return;
+
+ /*
+ * Modules with bad taint do not have events created, do
+ * not bother with enums either.
+ */
+ if (trace_module_has_bad_taint(mod))
+ return;
+
+ trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
}
-static __init int tracer_init_debugfs(void)
+#ifdef CONFIG_TRACE_ENUM_MAP_FILE
+static void trace_module_remove_enums(struct module *mod)
+{
+ union trace_enum_map_item *map;
+ union trace_enum_map_item **last = &trace_enum_maps;
+
+ if (!mod->num_trace_enums)
+ return;
+
+ mutex_lock(&trace_enum_mutex);
+
+ map = trace_enum_maps;
+
+ while (map) {
+ if (map->head.mod == mod)
+ break;
+ map = trace_enum_jmp_to_tail(map);
+ last = &map->tail.next;
+ map = map->tail.next;
+ }
+ if (!map)
+ goto out;
+
+ *last = trace_enum_jmp_to_tail(map)->tail.next;
+ kfree(map);
+ out:
+ mutex_unlock(&trace_enum_mutex);
+}
+#else
+static inline void trace_module_remove_enums(struct module *mod) { }
+#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
+
+static int trace_module_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+
+ switch (val) {
+ case MODULE_STATE_COMING:
+ trace_module_add_enums(mod);
+ break;
+ case MODULE_STATE_GOING:
+ trace_module_remove_enums(mod);
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block trace_module_nb = {
+ .notifier_call = trace_module_notify,
+ .priority = 0,
+};
+#endif /* CONFIG_MODULES */
+
+static __init int tracer_init_tracefs(void)
{
struct dentry *d_tracer;
@@ -6552,7 +6831,7 @@ static __init int tracer_init_debugfs(void)
if (IS_ERR(d_tracer))
return 0;
- init_tracer_debugfs(&global_trace, d_tracer);
+ init_tracer_tracefs(&global_trace, d_tracer);
trace_create_file("tracing_thresh", 0644, d_tracer,
&global_trace, &tracing_thresh_fops);
@@ -6566,6 +6845,14 @@ static __init int tracer_init_debugfs(void)
trace_create_file("saved_cmdlines_size", 0644, d_tracer,
NULL, &tracing_saved_cmdlines_size_fops);
+ trace_enum_init();
+
+ trace_create_enum_file(d_tracer);
+
+#ifdef CONFIG_MODULES
+ register_module_notifier(&trace_module_nb);
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -6575,6 +6862,10 @@ static __init int tracer_init_debugfs(void)
create_trace_options_dir(&global_trace);
+ /* If the tracer was started via cmdline, create options for it here */
+ if (global_trace.current_trace != &nop_trace)
+ update_tracer_options(&global_trace, global_trace.current_trace);
+
return 0;
}
@@ -6888,7 +7179,7 @@ void __init trace_init(void)
tracepoint_printk = 0;
}
tracer_alloc_buffers();
- trace_event_init();
+ trace_event_init();
}
__init static int clear_boot_tracer(void)
@@ -6910,5 +7201,5 @@ __init static int clear_boot_tracer(void)
return 0;
}
-fs_initcall(tracer_init_debugfs);
+fs_initcall(tracer_init_tracefs);
late_initcall(clear_boot_tracer);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index dd8205a35760..d2612016de94 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -334,7 +334,7 @@ struct tracer_flags {
/**
- * struct tracer - a specific tracer and its callbacks to interact with debugfs
+ * struct tracer - a specific tracer and its callbacks to interact with tracefs
* @name: the name chosen to select it on the available_tracers file
* @init: called when one switches to this tracer (echo name > current_tracer)
* @reset: called when one switches to another tracer
@@ -1309,8 +1309,10 @@ static inline void init_ftrace_syscalls(void) { }
#ifdef CONFIG_EVENT_TRACING
void trace_event_init(void);
+void trace_event_enum_update(struct trace_enum_map **map, int len);
#else
static inline void __init trace_event_init(void) { }
+static inlin void trace_event_enum_update(struct trace_enum_map **map, int len) { }
#endif
extern struct trace_iterator *tracepoint_print_iter;
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index e2d027ac66a2..ee7b94a4810a 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -223,7 +223,7 @@ FTRACE_ENTRY(bprint, bprint_entry,
__dynamic_array( u32, buf )
),
- F_printk("%pf: %s",
+ F_printk("%ps: %s",
(void *)__entry->ip, __entry->fmt),
FILTER_OTHER
@@ -238,7 +238,7 @@ FTRACE_ENTRY(print, print_entry,
__dynamic_array( char, buf )
),
- F_printk("%pf: %s",
+ F_printk("%ps: %s",
(void *)__entry->ip, __entry->buf),
FILTER_OTHER
@@ -253,7 +253,7 @@ FTRACE_ENTRY(bputs, bputs_entry,
__field( const char *, str )
),
- F_printk("%pf: %s",
+ F_printk("%ps: %s",
(void *)__entry->ip, __entry->str),
FILTER_OTHER
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index db54dda10ccc..c4de47fc5cca 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -13,7 +13,7 @@
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
-#include <linux/debugfs.h>
+#include <linux/tracefs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ctype.h>
@@ -480,7 +480,7 @@ static void remove_subsystem(struct ftrace_subsystem_dir *dir)
return;
if (!--dir->nr_events) {
- debugfs_remove_recursive(dir->entry);
+ tracefs_remove_recursive(dir->entry);
list_del(&dir->list);
__put_system_dir(dir);
}
@@ -494,12 +494,12 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
if (dir) {
spin_lock(&dir->d_lock); /* probably unneeded */
list_for_each_entry(child, &dir->d_subdirs, d_child) {
- if (child->d_inode) /* probably unneeded */
- child->d_inode->i_private = NULL;
+ if (d_really_is_positive(child)) /* probably unneeded */
+ d_inode(child)->i_private = NULL;
}
spin_unlock(&dir->d_lock);
- debugfs_remove_recursive(dir);
+ tracefs_remove_recursive(dir);
}
list_del(&file->list);
@@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
{
char *event = NULL, *sub = NULL, *match;
+ int ret;
/*
* The buf format can be <subsystem>:<event-name>
@@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
event = NULL;
}
- return __ftrace_set_clr_event(tr, match, sub, event, set);
+ ret = __ftrace_set_clr_event(tr, match, sub, event, set);
+
+ /* Put back the colon to allow this to be called again */
+ if (buf)
+ *(buf - 1) = ':';
+
+ return ret;
}
/**
@@ -1526,7 +1533,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
} else
__get_system(system);
- dir->entry = debugfs_create_dir(name, parent);
+ dir->entry = tracefs_create_dir(name, parent);
if (!dir->entry) {
pr_warn("Failed to create system directory %s\n", name);
__put_system(system);
@@ -1539,12 +1546,12 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
dir->subsystem = system;
file->system = dir;
- entry = debugfs_create_file("filter", 0644, dir->entry, dir,
+ entry = tracefs_create_file("filter", 0644, dir->entry, dir,
&ftrace_subsystem_filter_fops);
if (!entry) {
kfree(system->filter);
system->filter = NULL;
- pr_warn("Could not create debugfs '%s/filter' entry\n", name);
+ pr_warn("Could not create tracefs '%s/filter' entry\n", name);
}
trace_create_file("enable", 0644, dir->entry, dir,
@@ -1585,9 +1592,9 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
d_events = parent;
name = ftrace_event_name(call);
- file->dir = debugfs_create_dir(name, d_events);
+ file->dir = tracefs_create_dir(name, d_events);
if (!file->dir) {
- pr_warn("Could not create debugfs '%s' directory\n", name);
+ pr_warn("Could not create tracefs '%s' directory\n", name);
return -1;
}
@@ -1704,6 +1711,131 @@ __register_event(struct ftrace_event_call *call, struct module *mod)
return 0;
}
+static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
+{
+ int rlen;
+ int elen;
+
+ /* Find the length of the enum value as a string */
+ elen = snprintf(ptr, 0, "%ld", map->enum_value);
+ /* Make sure there's enough room to replace the string with the value */
+ if (len < elen)
+ return NULL;
+
+ snprintf(ptr, elen + 1, "%ld", map->enum_value);
+
+ /* Get the rest of the string of ptr */
+ rlen = strlen(ptr + len);
+ memmove(ptr + elen, ptr + len, rlen);
+ /* Make sure we end the new string */
+ ptr[elen + rlen] = 0;
+
+ return ptr + elen;
+}
+
+static void update_event_printk(struct ftrace_event_call *call,
+ struct trace_enum_map *map)
+{
+ char *ptr;
+ int quote = 0;
+ int len = strlen(map->enum_string);
+
+ for (ptr = call->print_fmt; *ptr; ptr++) {
+ if (*ptr == '\\') {
+ ptr++;
+ /* paranoid */
+ if (!*ptr)
+ break;
+ continue;
+ }
+ if (*ptr == '"') {
+ quote ^= 1;
+ continue;
+ }
+ if (quote)
+ continue;
+ if (isdigit(*ptr)) {
+ /* skip numbers */
+ do {
+ ptr++;
+ /* Check for alpha chars like ULL */
+ } while (isalnum(*ptr));
+ if (!*ptr)
+ break;
+ /*
+ * A number must have some kind of delimiter after
+ * it, and we can ignore that too.
+ */
+ continue;
+ }
+ if (isalpha(*ptr) || *ptr == '_') {
+ if (strncmp(map->enum_string, ptr, len) == 0 &&
+ !isalnum(ptr[len]) && ptr[len] != '_') {
+ ptr = enum_replace(ptr, map, len);
+ /* Hmm, enum string smaller than value */
+ if (WARN_ON_ONCE(!ptr))
+ return;
+ /*
+ * No need to decrement here, as enum_replace()
+ * returns the pointer to the character passed
+ * the enum, and two enums can not be placed
+ * back to back without something in between.
+ * We can skip that something in between.
+ */
+ continue;
+ }
+ skip_more:
+ do {
+ ptr++;
+ } while (isalnum(*ptr) || *ptr == '_');
+ if (!*ptr)
+ break;
+ /*
+ * If what comes after this variable is a '.' or
+ * '->' then we can continue to ignore that string.
+ */
+ if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
+ ptr += *ptr == '.' ? 1 : 2;
+ if (!*ptr)
+ break;
+ goto skip_more;
+ }
+ /*
+ * Once again, we can skip the delimiter that came
+ * after the string.
+ */
+ continue;
+ }
+ }
+}
+
+void trace_event_enum_update(struct trace_enum_map **map, int len)
+{
+ struct ftrace_event_call *call, *p;
+ const char *last_system = NULL;
+ int last_i;
+ int i;
+
+ down_write(&trace_event_sem);
+ list_for_each_entry_safe(call, p, &ftrace_events, list) {
+ /* events are usually grouped together with systems */
+ if (!last_system || call->class->system != last_system) {
+ last_i = 0;
+ last_system = call->class->system;
+ }
+
+ for (i = last_i; i < len; i++) {
+ if (call->class->system == map[i]->system) {
+ /* Save the first system if need be */
+ if (!last_i)
+ last_i = i;
+ update_event_printk(call, map[i]);
+ }
+ }
+ }
+ up_write(&trace_event_sem);
+}
+
static struct ftrace_event_file *
trace_create_new_event(struct ftrace_event_call *call,
struct trace_array *tr)
@@ -1915,7 +2047,7 @@ static int trace_module_notify(struct notifier_block *self,
static struct notifier_block trace_module_nb = {
.notifier_call = trace_module_notify,
- .priority = 0,
+ .priority = 1, /* higher than trace.c module notify */
};
#endif /* CONFIG_MODULES */
@@ -2228,7 +2360,7 @@ static inline int register_event_cmds(void) { return 0; }
/*
* The top level array has already had its ftrace_event_file
* descriptors created in order to allow for early events to
- * be recorded. This function is called after the debugfs has been
+ * be recorded. This function is called after the tracefs has been
* initialized, and we now have to create the files associated
* to the events.
*/
@@ -2311,16 +2443,16 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
struct dentry *d_events;
struct dentry *entry;
- entry = debugfs_create_file("set_event", 0644, parent,
+ entry = tracefs_create_file("set_event", 0644, parent,
tr, &ftrace_set_event_fops);
if (!entry) {
- pr_warn("Could not create debugfs 'set_event' entry\n");
+ pr_warn("Could not create tracefs 'set_event' entry\n");
return -ENOMEM;
}
- d_events = debugfs_create_dir("events", parent);
+ d_events = tracefs_create_dir("events", parent);
if (!d_events) {
- pr_warn("Could not create debugfs 'events' directory\n");
+ pr_warn("Could not create tracefs 'events' directory\n");
return -ENOMEM;
}
@@ -2412,7 +2544,7 @@ int event_trace_del_tracer(struct trace_array *tr)
down_write(&trace_event_sem);
__trace_remove_event_dirs(tr);
- debugfs_remove_recursive(tr->event_dir);
+ tracefs_remove_recursive(tr->event_dir);
up_write(&trace_event_sem);
tr->event_dir = NULL;
@@ -2534,10 +2666,10 @@ static __init int event_trace_init(void)
if (IS_ERR(d_tracer))
return 0;
- entry = debugfs_create_file("available_events", 0444, d_tracer,
+ entry = tracefs_create_file("available_events", 0444, d_tracer,
tr, &ftrace_avail_fops);
if (!entry)
- pr_warn("Could not create debugfs 'available_events' entry\n");
+ pr_warn("Could not create tracefs 'available_events' entry\n");
if (trace_define_common_fields())
pr_warn("tracing: Failed to allocate common fields");
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 12e2b99be862..174a6a71146c 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -177,7 +177,7 @@ struct ftrace_event_call __used event_##call = { \
}, \
.event.type = etype, \
.print_fmt = print, \
- .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \
+ .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \
}; \
struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 2d25ad1526bb..a51e79688455 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -6,7 +6,6 @@
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
*
*/
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
@@ -151,7 +150,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
* The curr_ret_stack is initialized to -1 and get increased
* in this function. So it can be less than -1 only if it was
* filtered out via ftrace_graph_notrace_addr() which can be
- * set from set_graph_notrace file in debugfs by user.
+ * set from set_graph_notrace file in tracefs by user.
*/
if (current->curr_ret_stack < -1)
return -EBUSY;
@@ -1309,15 +1308,19 @@ void graph_trace_open(struct trace_iterator *iter)
{
/* pid and depth on the last trace processed */
struct fgraph_data *data;
+ gfp_t gfpflags;
int cpu;
iter->private = NULL;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ /* We can be called in atomic context via ftrace_dump() */
+ gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+
+ data = kzalloc(sizeof(*data), gfpflags);
if (!data)
goto out_err;
- data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
+ data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
if (!data->cpu_data)
goto out_err_free;
@@ -1432,7 +1435,7 @@ static const struct file_operations graph_depth_fops = {
.llseek = generic_file_llseek,
};
-static __init int init_graph_debugfs(void)
+static __init int init_graph_tracefs(void)
{
struct dentry *d_tracer;
@@ -1445,7 +1448,7 @@ static __init int init_graph_debugfs(void)
return 0;
}
-fs_initcall(init_graph_debugfs);
+fs_initcall(init_graph_tracefs);
static __init int init_graph_trace(void)
{
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d73f565b4e06..d0ce590f06e1 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -250,7 +250,7 @@ DEFINE_FETCH_symbol(string_size)
#define fetch_file_offset_string_size NULL
/* Fetch type information table */
-const struct fetch_type kprobes_fetch_type_table[] = {
+static const struct fetch_type kprobes_fetch_type_table[] = {
/* Special types */
[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
sizeof(u32), 1, "__data_loc char[]"),
@@ -760,7 +760,8 @@ static int create_trace_kprobe(int argc, char **argv)
/* Parse fetch argument */
ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
- is_return, true);
+ is_return, true,
+ kprobes_fetch_type_table);
if (ret) {
pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
goto error;
@@ -1134,11 +1135,15 @@ static void
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
{
struct ftrace_event_call *call = &tk->tp.call;
+ struct bpf_prog *prog = call->prog;
struct kprobe_trace_entry_head *entry;
struct hlist_head *head;
int size, __size, dsize;
int rctx;
+ if (prog && !trace_call_bpf(prog, regs))
+ return;
+
head = this_cpu_ptr(call->perf_events);
if (hlist_empty(head))
return;
@@ -1165,11 +1170,15 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
struct pt_regs *regs)
{
struct ftrace_event_call *call = &tk->tp.call;
+ struct bpf_prog *prog = call->prog;
struct kretprobe_trace_entry_head *entry;
struct hlist_head *head;
int size, __size, dsize;
int rctx;
+ if (prog && !trace_call_bpf(prog, regs))
+ return;
+
head = this_cpu_ptr(call->perf_events);
if (hlist_empty(head))
return;
@@ -1286,7 +1295,7 @@ static int register_kprobe_event(struct trace_kprobe *tk)
kfree(call->print_fmt);
return -ENODEV;
}
- call->flags = 0;
+ call->flags = TRACE_EVENT_FL_KPROBE;
call->class->reg = kprobe_register;
call->data = tk;
ret = trace_add_event_call(call);
@@ -1310,7 +1319,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
return ret;
}
-/* Make a debugfs interface for controlling probe points */
+/* Make a tracefs interface for controlling probe points */
static __init int init_kprobe_trace(void)
{
struct dentry *d_tracer;
@@ -1323,20 +1332,20 @@ static __init int init_kprobe_trace(void)
if (IS_ERR(d_tracer))
return 0;
- entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
+ entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
NULL, &kprobe_events_ops);
/* Event list interface */
if (!entry)
- pr_warning("Could not create debugfs "
+ pr_warning("Could not create tracefs "
"'kprobe_events' entry\n");
/* Profile interface */
- entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
+ entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
NULL, &kprobe_profile_ops);
if (!entry)
- pr_warning("Could not create debugfs "
+ pr_warning("Could not create tracefs "
"'kprobe_profile' entry\n");
return 0;
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 692bf7184c8c..25a086bcb700 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -178,12 +178,13 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
EXPORT_SYMBOL(ftrace_print_hex_seq);
const char *
-ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len,
+ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count,
size_t el_size)
{
const char *ret = trace_seq_buffer_ptr(p);
const char *prefix = "";
void *ptr = (void *)buf;
+ size_t buf_len = count * el_size;
trace_seq_putc(p, '{');
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index b983b2fd2ca1..1769a81da8a7 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -356,17 +356,14 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
/* Recursive argument parser */
static int parse_probe_arg(char *arg, const struct fetch_type *t,
- struct fetch_param *f, bool is_return, bool is_kprobe)
+ struct fetch_param *f, bool is_return, bool is_kprobe,
+ const struct fetch_type *ftbl)
{
- const struct fetch_type *ftbl;
unsigned long param;
long offset;
char *tmp;
int ret = 0;
- ftbl = is_kprobe ? kprobes_fetch_type_table : uprobes_fetch_type_table;
- BUG_ON(ftbl == NULL);
-
switch (arg[0]) {
case '$':
ret = parse_probe_vars(arg + 1, t, f, is_return, is_kprobe);
@@ -447,7 +444,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
dprm->fetch_size = get_fetch_size_function(t,
dprm->fetch, ftbl);
ret = parse_probe_arg(arg, t2, &dprm->orig, is_return,
- is_kprobe);
+ is_kprobe, ftbl);
if (ret)
kfree(dprm);
else {
@@ -505,15 +502,12 @@ static int __parse_bitfield_probe_arg(const char *bf,
/* String length checking wrapper */
int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
- struct probe_arg *parg, bool is_return, bool is_kprobe)
+ struct probe_arg *parg, bool is_return, bool is_kprobe,
+ const struct fetch_type *ftbl)
{
- const struct fetch_type *ftbl;
const char *t;
int ret;
- ftbl = is_kprobe ? kprobes_fetch_type_table : uprobes_fetch_type_table;
- BUG_ON(ftbl == NULL);
-
if (strlen(arg) > MAX_ARGSTR_LEN) {
pr_info("Argument is too long.: %s\n", arg);
return -ENOSPC;
@@ -535,7 +529,8 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
}
parg->offset = *size;
*size += parg->type->size;
- ret = parse_probe_arg(arg, parg->type, &parg->fetch, is_return, is_kprobe);
+ ret = parse_probe_arg(arg, parg->type, &parg->fetch, is_return,
+ is_kprobe, ftbl);
if (ret >= 0 && t != NULL)
ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch);
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 4f815fbce16d..ab283e146b70 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -25,7 +25,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/smp.h>
-#include <linux/debugfs.h>
+#include <linux/tracefs.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
@@ -229,13 +229,6 @@ ASSIGN_FETCH_FUNC(file_offset, ftype), \
#define FETCH_TYPE_STRING 0
#define FETCH_TYPE_STRSIZE 1
-/*
- * Fetch type information table.
- * It's declared as a weak symbol due to conditional compilation.
- */
-extern __weak const struct fetch_type kprobes_fetch_type_table[];
-extern __weak const struct fetch_type uprobes_fetch_type_table[];
-
#ifdef CONFIG_KPROBE_EVENT
struct symbol_cache;
unsigned long update_symbol_cache(struct symbol_cache *sc);
@@ -333,7 +326,8 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
}
extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
- struct probe_arg *parg, bool is_return, bool is_kprobe);
+ struct probe_arg *parg, bool is_return, bool is_kprobe,
+ const struct fetch_type *ftbl);
extern int traceprobe_conflict_field_name(const char *name,
struct probe_arg *args, int narg);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index c3e4fcfddd45..3f34496244e9 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -327,11 +327,11 @@ static void t_stop(struct seq_file *m, void *p)
local_irq_enable();
}
-static int trace_lookup_stack(struct seq_file *m, long i)
+static void trace_lookup_stack(struct seq_file *m, long i)
{
unsigned long addr = stack_dump_trace[i];
- return seq_printf(m, "%pS\n", (void *)addr);
+ seq_printf(m, "%pS\n", (void *)addr);
}
static void print_disabled(struct seq_file *m)
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 75e19e86c954..6cf935316769 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -12,7 +12,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
-#include <linux/debugfs.h>
+#include <linux/tracefs.h>
#include "trace_stat.h"
#include "trace.h"
@@ -65,7 +65,7 @@ static void reset_stat_session(struct stat_session *session)
static void destroy_session(struct stat_session *session)
{
- debugfs_remove(session->file);
+ tracefs_remove(session->file);
__reset_stat_session(session);
mutex_destroy(&session->stat_mutex);
kfree(session);
@@ -279,9 +279,9 @@ static int tracing_stat_init(void)
if (IS_ERR(d_tracing))
return 0;
- stat_dir = debugfs_create_dir("trace_stat", d_tracing);
+ stat_dir = tracefs_create_dir("trace_stat", d_tracing);
if (!stat_dir)
- pr_warning("Could not create debugfs "
+ pr_warning("Could not create tracefs "
"'trace_stat' entry\n");
return 0;
}
@@ -291,7 +291,7 @@ static int init_stat_file(struct stat_session *session)
if (!stat_dir && tracing_stat_init())
return -ENODEV;
- session->file = debugfs_create_file(session->ts->name, 0644,
+ session->file = tracefs_create_file(session->ts->name, 0644,
stat_dir,
session, &tracing_stat_fops);
if (!session->file)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 7dc1c8abecd6..6dd022c7b5bc 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -196,7 +196,7 @@ DEFINE_FETCH_file_offset(string)
DEFINE_FETCH_file_offset(string_size)
/* Fetch type information table */
-const struct fetch_type uprobes_fetch_type_table[] = {
+static const struct fetch_type uprobes_fetch_type_table[] = {
/* Special types */
[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
sizeof(u32), 1, "__data_loc char[]"),
@@ -443,7 +443,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (ret)
goto fail_address_parse;
- inode = igrab(path.dentry->d_inode);
+ inode = igrab(d_inode(path.dentry));
path_put(&path);
if (!inode || !S_ISREG(inode->i_mode)) {
@@ -535,7 +535,8 @@ static int create_trace_uprobe(int argc, char **argv)
/* Parse fetch argument */
ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
- is_return, false);
+ is_return, false,
+ uprobes_fetch_type_table);
if (ret) {
pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
goto error;
@@ -1005,7 +1006,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
return true;
list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
- if (event->hw.tp_target->mm == mm)
+ if (event->hw.target->mm == mm)
return true;
}
@@ -1015,7 +1016,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
static inline bool
uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
{
- return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
+ return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
}
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
@@ -1023,10 +1024,10 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
bool done;
write_lock(&tu->filter.rwlock);
- if (event->hw.tp_target) {
+ if (event->hw.target) {
list_del(&event->hw.tp_list);
done = tu->filter.nr_systemwide ||
- (event->hw.tp_target->flags & PF_EXITING) ||
+ (event->hw.target->flags & PF_EXITING) ||
uprobe_filter_event(tu, event);
} else {
tu->filter.nr_systemwide--;
@@ -1046,7 +1047,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
int err;
write_lock(&tu->filter.rwlock);
- if (event->hw.tp_target) {
+ if (event->hw.target) {
/*
* event->parent != NULL means copy_process(), we can avoid
* uprobe_apply(). current->mm must be probed and we can rely
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 3174bf8e3538..581a68a04c64 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -24,8 +24,35 @@
#include <linux/kvm_para.h>
#include <linux/perf_event.h>
-int watchdog_user_enabled = 1;
+/*
+ * The run state of the lockup detectors is controlled by the content of the
+ * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
+ * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
+ *
+ * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
+ * are variables that are only used as an 'interface' between the parameters
+ * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
+ * 'watchdog_thresh' variable is handled differently because its value is not
+ * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
+ * is equal zero.
+ */
+#define NMI_WATCHDOG_ENABLED_BIT 0
+#define SOFT_WATCHDOG_ENABLED_BIT 1
+#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
+#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+
+static DEFINE_MUTEX(watchdog_proc_mutex);
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
+#else
+static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
+#endif
+int __read_mostly nmi_watchdog_enabled;
+int __read_mostly soft_watchdog_enabled;
+int __read_mostly watchdog_user_enabled;
int __read_mostly watchdog_thresh = 10;
+
#ifdef CONFIG_SMP
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
#else
@@ -58,8 +85,6 @@ static unsigned long soft_lockup_nmi_warn;
#ifdef CONFIG_HARDLOCKUP_DETECTOR
static int hardlockup_panic =
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-
-static bool hardlockup_detector_enabled = true;
/*
* We may not want to enable hard lockup detection by default in all cases,
* for example when running the kernel as a guest on a hypervisor. In these
@@ -68,14 +93,9 @@ static bool hardlockup_detector_enabled = true;
* kernel command line parameters are parsed, because otherwise it is not
* possible to override this in hardlockup_panic_setup().
*/
-void watchdog_enable_hardlockup_detector(bool val)
-{
- hardlockup_detector_enabled = val;
-}
-
-bool watchdog_hardlockup_detector_is_enabled(void)
+void hardlockup_detector_disable(void)
{
- return hardlockup_detector_enabled;
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
}
static int __init hardlockup_panic_setup(char *str)
@@ -85,15 +105,9 @@ static int __init hardlockup_panic_setup(char *str)
else if (!strncmp(str, "nopanic", 7))
hardlockup_panic = 0;
else if (!strncmp(str, "0", 1))
- watchdog_user_enabled = 0;
- else if (!strncmp(str, "1", 1) || !strncmp(str, "2", 1)) {
- /*
- * Setting 'nmi_watchdog=1' or 'nmi_watchdog=2' (legacy option)
- * has the same effect.
- */
- watchdog_user_enabled = 1;
- watchdog_enable_hardlockup_detector(true);
- }
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+ else if (!strncmp(str, "1", 1))
+ watchdog_enabled |= NMI_WATCHDOG_ENABLED;
return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -112,19 +126,18 @@ __setup("softlockup_panic=", softlockup_panic_setup);
static int __init nowatchdog_setup(char *str)
{
- watchdog_user_enabled = 0;
+ watchdog_enabled = 0;
return 1;
}
__setup("nowatchdog", nowatchdog_setup);
-/* deprecated */
static int __init nosoftlockup_setup(char *str)
{
- watchdog_user_enabled = 0;
+ watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
-/* */
+
#ifdef CONFIG_SMP
static int __init softlockup_all_cpu_backtrace_setup(char *str)
{
@@ -239,10 +252,11 @@ static int is_softlockup(unsigned long touch_ts)
{
unsigned long now = get_timestamp();
- /* Warn about unreasonable delays: */
- if (time_after(now, touch_ts + get_softlockup_thresh()))
- return now - touch_ts;
-
+ if (watchdog_enabled & SOFT_WATCHDOG_ENABLED) {
+ /* Warn about unreasonable delays. */
+ if (time_after(now, touch_ts + get_softlockup_thresh()))
+ return now - touch_ts;
+ }
return 0;
}
@@ -477,6 +491,21 @@ static void watchdog(unsigned int cpu)
__this_cpu_write(soft_lockup_hrtimer_cnt,
__this_cpu_read(hrtimer_interrupts));
__touch_watchdog();
+
+ /*
+ * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
+ * failure path. Check for failures that can occur asynchronously -
+ * for example, when CPUs are on-lined - and shut down the hardware
+ * perf event on each CPU accordingly.
+ *
+ * The only non-obvious place this bit can be cleared is through
+ * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
+ * pr_info here would be too noisy as it would result in a message
+ * every few seconds if the hardlockup was disabled but the softlockup
+ * enabled.
+ */
+ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ watchdog_nmi_disable(cpu);
}
#ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -492,14 +521,9 @@ static int watchdog_nmi_enable(unsigned int cpu)
struct perf_event_attr *wd_attr;
struct perf_event *event = per_cpu(watchdog_ev, cpu);
- /*
- * Some kernels need to default hard lockup detection to
- * 'disabled', for example a guest on a hypervisor.
- */
- if (!watchdog_hardlockup_detector_is_enabled()) {
- event = ERR_PTR(-ENOENT);
- goto handle_err;
- }
+ /* nothing to do if the hard lockup detector is disabled */
+ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ goto out;
/* is it already setup and enabled? */
if (event && event->state > PERF_EVENT_STATE_OFF)
@@ -515,7 +539,6 @@ static int watchdog_nmi_enable(unsigned int cpu)
/* Try to register using hardware perf events */
event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
-handle_err:
/* save cpu0 error for future comparision */
if (cpu == 0 && IS_ERR(event))
cpu0_err = PTR_ERR(event);
@@ -527,6 +550,18 @@ handle_err:
goto out_save;
}
+ /*
+ * Disable the hard lockup detector if _any_ CPU fails to set up
+ * set up the hardware perf event. The watchdog() function checks
+ * the NMI_WATCHDOG_ENABLED bit periodically.
+ *
+ * The barriers are for syncing up watchdog_enabled across all the
+ * cpus, as clear_bit() does not use barriers.
+ */
+ smp_mb__before_atomic();
+ clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
+ smp_mb__after_atomic();
+
/* skip displaying the same error again */
if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
return PTR_ERR(event);
@@ -540,6 +575,9 @@ handle_err:
else
pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
cpu, PTR_ERR(event));
+
+ pr_info("Shutting down hard lockup detector on all cpus\n");
+
return PTR_ERR(event);
/* success path */
@@ -567,9 +605,47 @@ static void watchdog_nmi_disable(unsigned int cpu)
cpu0_err = 0;
}
}
+
+void watchdog_nmi_enable_all(void)
+{
+ int cpu;
+
+ mutex_lock(&watchdog_proc_mutex);
+
+ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ goto unlock;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ watchdog_nmi_enable(cpu);
+ put_online_cpus();
+
+unlock:
+ mutex_unlock(&watchdog_proc_mutex);
+}
+
+void watchdog_nmi_disable_all(void)
+{
+ int cpu;
+
+ mutex_lock(&watchdog_proc_mutex);
+
+ if (!watchdog_running)
+ goto unlock;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ watchdog_nmi_disable(cpu);
+ put_online_cpus();
+
+unlock:
+ mutex_unlock(&watchdog_proc_mutex);
+}
#else
static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
static void watchdog_nmi_disable(unsigned int cpu) { return; }
+void watchdog_nmi_enable_all(void) {}
+void watchdog_nmi_disable_all(void) {}
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
static struct smp_hotplug_thread watchdog_threads = {
@@ -600,7 +676,7 @@ static void restart_watchdog_hrtimer(void *info)
HRTIMER_MODE_REL_PINNED);
}
-static void update_timers(int cpu)
+static void update_watchdog(int cpu)
{
/*
* Make sure that perf event counter will adopt to a new
@@ -615,17 +691,17 @@ static void update_timers(int cpu)
watchdog_nmi_enable(cpu);
}
-static void update_timers_all_cpus(void)
+static void update_watchdog_all_cpus(void)
{
int cpu;
get_online_cpus();
for_each_online_cpu(cpu)
- update_timers(cpu);
+ update_watchdog(cpu);
put_online_cpus();
}
-static int watchdog_enable_all_cpus(bool sample_period_changed)
+static int watchdog_enable_all_cpus(void)
{
int err = 0;
@@ -635,8 +711,12 @@ static int watchdog_enable_all_cpus(bool sample_period_changed)
pr_err("Failed to create watchdog threads, disabled\n");
else
watchdog_running = 1;
- } else if (sample_period_changed) {
- update_timers_all_cpus();
+ } else {
+ /*
+ * Enable/disable the lockup detectors or
+ * change the sample period 'on the fly'.
+ */
+ update_watchdog_all_cpus();
}
return err;
@@ -654,58 +734,157 @@ static void watchdog_disable_all_cpus(void)
}
/*
- * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
+ * Update the run state of the lockup detectors.
*/
+static int proc_watchdog_update(void)
+{
+ int err = 0;
-int proc_dowatchdog(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ /*
+ * Watchdog threads won't be started if they are already active.
+ * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
+ * care of this. If those threads are already active, the sample
+ * period will be updated and the lockup detectors will be enabled
+ * or disabled 'on the fly'.
+ */
+ if (watchdog_enabled && watchdog_thresh)
+ err = watchdog_enable_all_cpus();
+ else
+ watchdog_disable_all_cpus();
+
+ return err;
+
+}
+
+/*
+ * common function for watchdog, nmi_watchdog and soft_watchdog parameter
+ *
+ * caller | table->data points to | 'which' contains the flag(s)
+ * -------------------|-----------------------|-----------------------------
+ * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
+ * | | with SOFT_WATCHDOG_ENABLED
+ * -------------------|-----------------------|-----------------------------
+ * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
+ * -------------------|-----------------------|-----------------------------
+ * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
+ */
+static int proc_watchdog_common(int which, struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int err, old_thresh, old_enabled;
- bool old_hardlockup;
- static DEFINE_MUTEX(watchdog_proc_mutex);
+ int err, old, new;
+ int *watchdog_param = (int *)table->data;
mutex_lock(&watchdog_proc_mutex);
- old_thresh = ACCESS_ONCE(watchdog_thresh);
- old_enabled = ACCESS_ONCE(watchdog_user_enabled);
- old_hardlockup = watchdog_hardlockup_detector_is_enabled();
-
- err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (err || !write)
- goto out;
- set_sample_period();
/*
- * Watchdog threads shouldn't be enabled if they are
- * disabled. The 'watchdog_running' variable check in
- * watchdog_*_all_cpus() function takes care of this.
+ * If the parameter is being read return the state of the corresponding
+ * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
+ * run state of the lockup detectors.
*/
- if (watchdog_user_enabled && watchdog_thresh) {
+ if (!write) {
+ *watchdog_param = (watchdog_enabled & which) != 0;
+ err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ } else {
+ err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (err)
+ goto out;
+
/*
- * Prevent a change in watchdog_thresh accidentally overriding
- * the enablement of the hardlockup detector.
+ * There is a race window between fetching the current value
+ * from 'watchdog_enabled' and storing the new value. During
+ * this race window, watchdog_nmi_enable() can sneak in and
+ * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
+ * The 'cmpxchg' detects this race and the loop retries.
*/
- if (watchdog_user_enabled != old_enabled)
- watchdog_enable_hardlockup_detector(true);
- err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
- } else
- watchdog_disable_all_cpus();
+ do {
+ old = watchdog_enabled;
+ /*
+ * If the parameter value is not zero set the
+ * corresponding bit(s), else clear it(them).
+ */
+ if (*watchdog_param)
+ new = old | which;
+ else
+ new = old & ~which;
+ } while (cmpxchg(&watchdog_enabled, old, new) != old);
- /* Restore old values on failure */
- if (err) {
- watchdog_thresh = old_thresh;
- watchdog_user_enabled = old_enabled;
- watchdog_enable_hardlockup_detector(old_hardlockup);
+ /*
+ * Update the run state of the lockup detectors.
+ * Restore 'watchdog_enabled' on failure.
+ */
+ err = proc_watchdog_update();
+ if (err)
+ watchdog_enabled = old;
}
out:
mutex_unlock(&watchdog_proc_mutex);
return err;
}
+
+/*
+ * /proc/sys/kernel/watchdog
+ */
+int proc_watchdog(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
+ table, write, buffer, lenp, ppos);
+}
+
+/*
+ * /proc/sys/kernel/nmi_watchdog
+ */
+int proc_nmi_watchdog(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
+ table, write, buffer, lenp, ppos);
+}
+
+/*
+ * /proc/sys/kernel/soft_watchdog
+ */
+int proc_soft_watchdog(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
+ table, write, buffer, lenp, ppos);
+}
+
+/*
+ * /proc/sys/kernel/watchdog_thresh
+ */
+int proc_watchdog_thresh(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int err, old;
+
+ mutex_lock(&watchdog_proc_mutex);
+
+ old = ACCESS_ONCE(watchdog_thresh);
+ err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (err || !write)
+ goto out;
+
+ /*
+ * Update the sample period.
+ * Restore 'watchdog_thresh' on failure.
+ */
+ set_sample_period();
+ err = proc_watchdog_update();
+ if (err)
+ watchdog_thresh = old;
+out:
+ mutex_unlock(&watchdog_proc_mutex);
+ return err;
+}
#endif /* CONFIG_SYSCTL */
void __init lockup_detector_init(void)
{
set_sample_period();
- if (watchdog_user_enabled)
- watchdog_enable_all_cpus(false);
+ if (watchdog_enabled)
+ watchdog_enable_all_cpus();
}
diff --git a/lib/Kconfig b/lib/Kconfig
index 87da53bb1fef..601965a948e8 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -18,9 +18,8 @@ config HAVE_ARCH_BITREVERSE
default n
depends on BITREVERSE
help
- This option provides an config for the architecture which have instruction
- can do bitreverse operation, we use the hardware instruction if the architecture
- have this capability.
+ This option enables the use of hardware bit-reversal instructions on
+ architectures which support such operations.
config RATIONAL
bool
@@ -397,10 +396,6 @@ config CPUMASK_OFFSTACK
them on the stack. This is a bit more expensive, but avoids
stack overflow.
-config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
- bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
- depends on BROKEN
-
config CPU_RMAP
bool
depends on SMP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 36b6fa88ce5b..ba2b0c87e65b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1193,16 +1193,7 @@ config DEBUG_CREDENTIALS
menu "RCU Debugging"
config PROVE_RCU
- bool "RCU debugging: prove RCU correctness"
- depends on PROVE_LOCKING
- default n
- help
- This feature enables lockdep extensions that check for correct
- use of RCU APIs. This is currently under development. Say Y
- if you want to debug RCU usage or help work on the PROVE_RCU
- feature.
-
- Say N if you are unsure.
+ def_bool PROVE_LOCKING
config PROVE_RCU_REPEATEDLY
bool "RCU debugging: don't disable PROVE_RCU on first splat"
@@ -1270,6 +1261,31 @@ config RCU_TORTURE_TEST_RUNNABLE
Say N here if you want the RCU torture tests to start only
after being manually enabled via /proc.
+config RCU_TORTURE_TEST_SLOW_INIT
+ bool "Slow down RCU grace-period initialization to expose races"
+ depends on RCU_TORTURE_TEST
+ help
+ This option makes grace-period initialization block for a
+ few jiffies between initializing each pair of consecutive
+ rcu_node structures. This helps to expose races involving
+ grace-period initialization, in other words, it makes your
+ kernel less stable. It can also greatly increase grace-period
+ latency, especially on systems with large numbers of CPUs.
+ This is useful when torture-testing RCU, but in almost no
+ other circumstance.
+
+ Say Y here if you want your system to crash and hang more often.
+ Say N if you want a sane system.
+
+config RCU_TORTURE_TEST_SLOW_INIT_DELAY
+ int "How much to slow down RCU grace-period initialization"
+ range 0 5
+ default 3
+ depends on RCU_TORTURE_TEST_SLOW_INIT
+ help
+ This option specifies the number of jiffies to wait between
+ each rcu_node structure initialization.
+
config RCU_CPU_STALL_TIMEOUT
int "RCU CPU stall timeout in seconds"
depends on RCU_STALL_COMMON
@@ -1745,6 +1761,18 @@ config TEST_UDELAY
If unsure, say N.
+config MEMTEST
+ bool "Memtest"
+ depends on HAVE_MEMBLOCK
+ ---help---
+ This option adds a kernel parameter 'memtest', which allows memtest
+ to be set.
+ memtest=0, mean disabled; -- default
+ memtest=1, mean do 1 test pattern;
+ ...
+ memtest=17, mean do 17 test patterns.
+ If you are unsure how to answer this question, answer N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fecaedc80a2..777eda7d1ab4 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -10,8 +10,11 @@ config KASAN
help
Enables kernel address sanitizer - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
- This is strictly debugging feature. It consumes about 1/8
- of available memory and brings about ~x3 performance slowdown.
+ This is strictly a debugging feature and it requires a gcc version
+ of 4.9.2 or later. Detection of out of bounds accesses to stack or
+ global variables requires gcc 5.0 or later.
+ This feature consumes about 1/8 of available memory and brings about
+ ~x3 performance slowdown.
For better error detection enable CONFIG_STACKTRACE,
and add slub_debug=U to boot cmdline.
@@ -40,6 +43,7 @@ config KASAN_INLINE
memory accesses. This is faster than outline (in some workloads
it gives about x2 boost over outline instrumentation), but
make kernel's .text size much bigger.
+ This requires a gcc version of 5.0 or later.
endchoice
diff --git a/lib/Makefile b/lib/Makefile
index 58f74d2dd396..6c37933336a0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,7 +25,7 @@ obj-y += lockref.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
- bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
+ bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -106,7 +106,7 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
-obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
+obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index d456f4c15a9f..64c0926f5dd8 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -42,36 +42,6 @@
* for the best explanations of this ordering.
*/
-int __bitmap_empty(const unsigned long *bitmap, unsigned int bits)
-{
- unsigned int k, lim = bits/BITS_PER_LONG;
- for (k = 0; k < lim; ++k)
- if (bitmap[k])
- return 0;
-
- if (bits % BITS_PER_LONG)
- if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
- return 0;
-
- return 1;
-}
-EXPORT_SYMBOL(__bitmap_empty);
-
-int __bitmap_full(const unsigned long *bitmap, unsigned int bits)
-{
- unsigned int k, lim = bits/BITS_PER_LONG;
- for (k = 0; k < lim; ++k)
- if (~bitmap[k])
- return 0;
-
- if (bits % BITS_PER_LONG)
- if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
- return 0;
-
- return 1;
-}
-EXPORT_SYMBOL(__bitmap_full);
-
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
diff --git a/lib/cpumask.c b/lib/cpumask.c
index b6513a9f2892..830dd5dec40f 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -5,27 +5,6 @@
#include <linux/export.h>
#include <linux/bootmem.h>
-int __first_cpu(const cpumask_t *srcp)
-{
- return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
-}
-EXPORT_SYMBOL(__first_cpu);
-
-int __next_cpu(int n, const cpumask_t *srcp)
-{
- return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
-}
-EXPORT_SYMBOL(__next_cpu);
-
-#if NR_CPUS > 64
-int __next_cpu_nr(int n, const cpumask_t *srcp)
-{
- return min_t(int, nr_cpu_ids,
- find_next_bit(srcp->bits, nr_cpu_ids, n+1));
-}
-EXPORT_SYMBOL(__next_cpu_nr);
-#endif
-
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
* @n: the cpu prior to the place to search (ie. return will be > @n)
@@ -37,10 +16,11 @@ EXPORT_SYMBOL(__next_cpu_nr);
int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
- while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
- if (cpumask_test_cpu(n, src2p))
- break;
- return n;
+ struct cpumask tmp;
+
+ if (cpumask_and(&tmp, src1p, src2p))
+ return cpumask_next(n, &tmp);
+ return nr_cpu_ids;
}
EXPORT_SYMBOL(cpumask_next_and);
@@ -89,13 +69,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
dump_stack();
}
#endif
- /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
- if (*mask) {
- unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
- unsigned int tail;
- tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
- memset(ptr + cpumask_size() - tail, 0, tail);
- }
return *mask != NULL;
}
diff --git a/lib/devres.c b/lib/devres.c
index 0f1dd2e9d2c1..fbe2aac522e6 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -72,6 +72,34 @@ void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
EXPORT_SYMBOL(devm_ioremap_nocache);
/**
+ * devm_ioremap_wc - Managed ioremap_wc()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioremap_wc(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioremap_wc);
+
+/**
* devm_iounmap - Managed iounmap()
* @dev: Generic device to unmap for
* @addr: Address to unmap
diff --git a/lib/div64.c b/lib/div64.c
index 4382ad77777e..19ea7ed4b948 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -127,7 +127,7 @@ EXPORT_SYMBOL(div64_u64_rem);
* by the book 'Hacker's Delight'. The original source and full proof
* can be found here and is available for use without restriction.
*
- * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
+ * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
*/
#ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 9722bd2dbc9b..ae4b65e17e64 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -361,7 +361,7 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
unsigned int range = 0;
while (range <= max_range) {
- entry = __hash_bucket_find(*bucket, &index, containing_match);
+ entry = __hash_bucket_find(*bucket, ref, containing_match);
if (entry)
return entry;
diff --git a/lib/find_bit.c b/lib/find_bit.c
new file mode 100644
index 000000000000..18072ea9c20e
--- /dev/null
+++ b/lib/find_bit.c
@@ -0,0 +1,193 @@
+/* bit search implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Copyright (C) 2008 IBM Corporation
+ * 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
+ * (Inspired by David Howell's find_next_bit implementation)
+ *
+ * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
+ * size and improve performance, 2015.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#if !defined(find_next_bit) || !defined(find_next_zero_bit)
+
+/*
+ * This is a common helper function for find_next_bit and
+ * find_next_zero_bit. The difference is the "invert" argument, which
+ * is XORed with each fetched word before searching it for one bits.
+ */
+static unsigned long _find_next_bit(const unsigned long *addr,
+ unsigned long nbits, unsigned long start, unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (!nbits || start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_LONG] ^ invert;
+
+ /* Handle 1st word. */
+ tmp &= BITMAP_FIRST_WORD_MASK(start);
+ start = round_down(start, BITS_PER_LONG);
+
+ while (!tmp) {
+ start += BITS_PER_LONG;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_LONG] ^ invert;
+ }
+
+ return min(start + __ffs(tmp), nbits);
+}
+#endif
+
+#ifndef find_next_bit
+/*
+ * Find the next set bit in a memory region.
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ return _find_next_bit(addr, size, offset, 0UL);
+}
+EXPORT_SYMBOL(find_next_bit);
+#endif
+
+#ifndef find_next_zero_bit
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ return _find_next_bit(addr, size, offset, ~0UL);
+}
+EXPORT_SYMBOL(find_next_zero_bit);
+#endif
+
+#ifndef find_first_bit
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
+ if (addr[idx])
+ return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
+ }
+
+ return size;
+}
+EXPORT_SYMBOL(find_first_bit);
+#endif
+
+#ifndef find_first_zero_bit
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
+ if (addr[idx] != ~0UL)
+ return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
+ }
+
+ return size;
+}
+EXPORT_SYMBOL(find_first_zero_bit);
+#endif
+
+#ifndef find_last_bit
+unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ if (size) {
+ unsigned long val = BITMAP_LAST_WORD_MASK(size);
+ unsigned long idx = (size-1) / BITS_PER_LONG;
+
+ do {
+ val &= addr[idx];
+ if (val)
+ return idx * BITS_PER_LONG + __fls(val);
+
+ val = ~0ul;
+ } while (idx--);
+ }
+ return size;
+}
+EXPORT_SYMBOL(find_last_bit);
+#endif
+
+#ifdef __BIG_ENDIAN
+
+/* include/linux/byteorder does not support "unsigned long" type */
+static inline unsigned long ext2_swab(const unsigned long y)
+{
+#if BITS_PER_LONG == 64
+ return (unsigned long) __swab64((u64) y);
+#elif BITS_PER_LONG == 32
+ return (unsigned long) __swab32((u32) y);
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
+static unsigned long _find_next_bit_le(const unsigned long *addr,
+ unsigned long nbits, unsigned long start, unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (!nbits || start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_LONG] ^ invert;
+
+ /* Handle 1st word. */
+ tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
+ start = round_down(start, BITS_PER_LONG);
+
+ while (!tmp) {
+ start += BITS_PER_LONG;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_LONG] ^ invert;
+ }
+
+ return min(start + __ffs(ext2_swab(tmp)), nbits);
+}
+#endif
+
+#ifndef find_next_zero_bit_le
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ return _find_next_bit_le(addr, size, offset, ~0UL);
+}
+EXPORT_SYMBOL(find_next_zero_bit_le);
+#endif
+
+#ifndef find_next_bit_le
+unsigned long find_next_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ return _find_next_bit_le(addr, size, offset, 0UL);
+}
+EXPORT_SYMBOL(find_next_bit_le);
+#endif
+
+#endif /* __BIG_ENDIAN */
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c
deleted file mode 100644
index 91ca09fbf6f9..000000000000
--- a/lib/find_last_bit.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/* find_last_bit.c: fallback find next bit implementation
- *
- * Copyright (C) 2008 IBM Corporation
- * Written by Rusty Russell <rusty@rustcorp.com.au>
- * (Inspired by David Howell's find_next_bit implementation)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/bitops.h>
-#include <linux/export.h>
-#include <asm/types.h>
-#include <asm/byteorder.h>
-
-#ifndef find_last_bit
-
-unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
-{
- unsigned long words;
- unsigned long tmp;
-
- /* Start at final word. */
- words = size / BITS_PER_LONG;
-
- /* Partial final word? */
- if (size & (BITS_PER_LONG-1)) {
- tmp = (addr[words] & (~0UL >> (BITS_PER_LONG
- - (size & (BITS_PER_LONG-1)))));
- if (tmp)
- goto found;
- }
-
- while (words) {
- tmp = addr[--words];
- if (tmp) {
-found:
- return words * BITS_PER_LONG + __fls(tmp);
- }
- }
-
- /* Not found */
- return size;
-}
-EXPORT_SYMBOL(find_last_bit);
-
-#endif
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
deleted file mode 100644
index 0cbfc0b4398f..000000000000
--- a/lib/find_next_bit.c
+++ /dev/null
@@ -1,285 +0,0 @@
-/* find_next_bit.c: fallback find next bit implementation
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/bitops.h>
-#include <linux/export.h>
-#include <asm/types.h>
-#include <asm/byteorder.h>
-
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-
-#ifndef find_next_bit
-/*
- * Find the next set bit in a memory region.
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-EXPORT_SYMBOL(find_next_bit);
-#endif
-
-#ifndef find_next_zero_bit
-/*
- * This implementation of find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h.
- */
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (BITS_PER_LONG - offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
-EXPORT_SYMBOL(find_next_zero_bit);
-#endif
-
-#ifndef find_first_bit
-/*
- * Find the first set bit in a memory region.
- */
-unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
-{
- const unsigned long *p = addr;
- unsigned long result = 0;
- unsigned long tmp;
-
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
-
- tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found:
- return result + __ffs(tmp);
-}
-EXPORT_SYMBOL(find_first_bit);
-#endif
-
-#ifndef find_first_zero_bit
-/*
- * Find the first cleared bit in a memory region.
- */
-unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
-{
- const unsigned long *p = addr;
- unsigned long result = 0;
- unsigned long tmp;
-
- while (size & ~(BITS_PER_LONG-1)) {
- if (~(tmp = *(p++)))
- goto found;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
-
- tmp = (*p) | (~0UL << size);
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found:
- return result + ffz(tmp);
-}
-EXPORT_SYMBOL(find_first_zero_bit);
-#endif
-
-#ifdef __BIG_ENDIAN
-
-/* include/linux/byteorder does not support "unsigned long" type */
-static inline unsigned long ext2_swabp(const unsigned long * x)
-{
-#if BITS_PER_LONG == 64
- return (unsigned long) __swab64p((u64 *) x);
-#elif BITS_PER_LONG == 32
- return (unsigned long) __swab32p((u32 *) x);
-#else
-#error BITS_PER_LONG not defined
-#endif
-}
-
-/* include/linux/byteorder doesn't support "unsigned long" type */
-static inline unsigned long ext2_swab(const unsigned long y)
-{
-#if BITS_PER_LONG == 64
- return (unsigned long) __swab64((u64) y);
-#elif BITS_PER_LONG == 32
- return (unsigned long) __swab32((u32) y);
-#else
-#error BITS_PER_LONG not defined
-#endif
-}
-
-#ifndef find_next_zero_bit_le
-unsigned long find_next_zero_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- p += BITOP_WORD(offset);
- size -= result;
- offset &= (BITS_PER_LONG - 1UL);
- if (offset) {
- tmp = ext2_swabp(p++);
- tmp |= (~0UL >> (BITS_PER_LONG - offset));
- if (size < BITS_PER_LONG)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
-
- while (size & ~(BITS_PER_LONG - 1)) {
- if (~(tmp = *(p++)))
- goto found_middle_swap;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = ext2_swabp(p);
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. Skip ffz */
-found_middle:
- return result + ffz(tmp);
-
-found_middle_swap:
- return result + ffz(ext2_swab(tmp));
-}
-EXPORT_SYMBOL(find_next_zero_bit_le);
-#endif
-
-#ifndef find_next_bit_le
-unsigned long find_next_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- p += BITOP_WORD(offset);
- size -= result;
- offset &= (BITS_PER_LONG - 1UL);
- if (offset) {
- tmp = ext2_swabp(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
-
- while (size & ~(BITS_PER_LONG - 1)) {
- tmp = *(p++);
- if (tmp)
- goto found_middle_swap;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = ext2_swabp(p);
-found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-
-found_middle_swap:
- return result + __ffs(ext2_swab(tmp));
-}
-EXPORT_SYMBOL(find_next_bit_le);
-#endif
-
-#endif /* __BIG_ENDIAN */
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
new file mode 100644
index 000000000000..df30632f0bef
--- /dev/null
+++ b/lib/iommu-common.c
@@ -0,0 +1,270 @@
+/*
+ * IOMMU mmap management and range allocation functions.
+ * Based almost entirely upon the powerpc iommu allocator.
+ */
+
+#include <linux/export.h>
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+#include <linux/iommu-helper.h>
+#include <linux/iommu-common.h>
+#include <linux/dma-mapping.h>
+#include <linux/hash.h>
+
+#ifndef DMA_ERROR_CODE
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+#endif
+
+static unsigned long iommu_large_alloc = 15;
+
+static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
+
+static inline bool need_flush(struct iommu_map_table *iommu)
+{
+ return (iommu->lazy_flush != NULL &&
+ (iommu->flags & IOMMU_NEED_FLUSH) != 0);
+}
+
+static inline void set_flush(struct iommu_map_table *iommu)
+{
+ iommu->flags |= IOMMU_NEED_FLUSH;
+}
+
+static inline void clear_flush(struct iommu_map_table *iommu)
+{
+ iommu->flags &= ~IOMMU_NEED_FLUSH;
+}
+
+static void setup_iommu_pool_hash(void)
+{
+ unsigned int i;
+ static bool do_once;
+
+ if (do_once)
+ return;
+ do_once = true;
+ for_each_possible_cpu(i)
+ per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
+}
+
+/*
+ * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
+ * is the number of table entries. If `large_pool' is set to true,
+ * the top 1/4 of the table will be set aside for pool allocations
+ * of more than iommu_large_alloc pages.
+ */
+void iommu_tbl_pool_init(struct iommu_map_table *iommu,
+ unsigned long num_entries,
+ u32 table_shift,
+ void (*lazy_flush)(struct iommu_map_table *),
+ bool large_pool, u32 npools,
+ bool skip_span_boundary_check)
+{
+ unsigned int start, i;
+ struct iommu_pool *p = &(iommu->large_pool);
+
+ setup_iommu_pool_hash();
+ if (npools == 0)
+ iommu->nr_pools = IOMMU_NR_POOLS;
+ else
+ iommu->nr_pools = npools;
+ BUG_ON(npools > IOMMU_NR_POOLS);
+
+ iommu->table_shift = table_shift;
+ iommu->lazy_flush = lazy_flush;
+ start = 0;
+ if (skip_span_boundary_check)
+ iommu->flags |= IOMMU_NO_SPAN_BOUND;
+ if (large_pool)
+ iommu->flags |= IOMMU_HAS_LARGE_POOL;
+
+ if (!large_pool)
+ iommu->poolsize = num_entries/iommu->nr_pools;
+ else
+ iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
+ for (i = 0; i < iommu->nr_pools; i++) {
+ spin_lock_init(&(iommu->pools[i].lock));
+ iommu->pools[i].start = start;
+ iommu->pools[i].hint = start;
+ start += iommu->poolsize; /* start for next pool */
+ iommu->pools[i].end = start - 1;
+ }
+ if (!large_pool)
+ return;
+ /* initialize large_pool */
+ spin_lock_init(&(p->lock));
+ p->start = start;
+ p->hint = p->start;
+ p->end = num_entries;
+}
+EXPORT_SYMBOL(iommu_tbl_pool_init);
+
+unsigned long iommu_tbl_range_alloc(struct device *dev,
+ struct iommu_map_table *iommu,
+ unsigned long npages,
+ unsigned long *handle,
+ unsigned long mask,
+ unsigned int align_order)
+{
+ unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
+ unsigned long n, end, start, limit, boundary_size;
+ struct iommu_pool *pool;
+ int pass = 0;
+ unsigned int pool_nr;
+ unsigned int npools = iommu->nr_pools;
+ unsigned long flags;
+ bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
+ bool largealloc = (large_pool && npages > iommu_large_alloc);
+ unsigned long shift;
+ unsigned long align_mask = 0;
+
+ if (align_order > 0)
+ align_mask = 0xffffffffffffffffl >> (64 - align_order);
+
+ /* Sanity check */
+ if (unlikely(npages == 0)) {
+ WARN_ON_ONCE(1);
+ return DMA_ERROR_CODE;
+ }
+
+ if (largealloc) {
+ pool = &(iommu->large_pool);
+ pool_nr = 0; /* to keep compiler happy */
+ } else {
+ /* pick out pool_nr */
+ pool_nr = pool_hash & (npools - 1);
+ pool = &(iommu->pools[pool_nr]);
+ }
+ spin_lock_irqsave(&pool->lock, flags);
+
+ again:
+ if (pass == 0 && handle && *handle &&
+ (*handle >= pool->start) && (*handle < pool->end))
+ start = *handle;
+ else
+ start = pool->hint;
+
+ limit = pool->end;
+
+ /* The case below can happen if we have a small segment appended
+ * to a large, or when the previous alloc was at the very end of
+ * the available space. If so, go back to the beginning. If a
+ * flush is needed, it will get done based on the return value
+ * from iommu_area_alloc() below.
+ */
+ if (start >= limit)
+ start = pool->start;
+ shift = iommu->table_map_base >> iommu->table_shift;
+ if (limit + shift > mask) {
+ limit = mask - shift + 1;
+ /* If we're constrained on address range, first try
+ * at the masked hint to avoid O(n) search complexity,
+ * but on second pass, start at 0 in pool 0.
+ */
+ if ((start & mask) >= limit || pass > 0) {
+ spin_unlock(&(pool->lock));
+ pool = &(iommu->pools[0]);
+ spin_lock(&(pool->lock));
+ start = pool->start;
+ } else {
+ start &= mask;
+ }
+ }
+
+ if (dev)
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ 1 << iommu->table_shift);
+ else
+ boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
+
+ boundary_size = boundary_size >> iommu->table_shift;
+ /*
+ * if the skip_span_boundary_check had been set during init, we set
+ * things up so that iommu_is_span_boundary() merely checks if the
+ * (index + npages) < num_tsb_entries
+ */
+ if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
+ shift = 0;
+ boundary_size = iommu->poolsize * iommu->nr_pools;
+ }
+ n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
+ boundary_size, align_mask);
+ if (n == -1) {
+ if (likely(pass == 0)) {
+ /* First failure, rescan from the beginning. */
+ pool->hint = pool->start;
+ set_flush(iommu);
+ pass++;
+ goto again;
+ } else if (!largealloc && pass <= iommu->nr_pools) {
+ spin_unlock(&(pool->lock));
+ pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
+ pool = &(iommu->pools[pool_nr]);
+ spin_lock(&(pool->lock));
+ pool->hint = pool->start;
+ set_flush(iommu);
+ pass++;
+ goto again;
+ } else {
+ /* give up */
+ n = DMA_ERROR_CODE;
+ goto bail;
+ }
+ }
+ if (n < pool->hint || need_flush(iommu)) {
+ clear_flush(iommu);
+ iommu->lazy_flush(iommu);
+ }
+
+ end = n + npages;
+ pool->hint = end;
+
+ /* Update handle for SG allocations */
+ if (handle)
+ *handle = end;
+bail:
+ spin_unlock_irqrestore(&(pool->lock), flags);
+
+ return n;
+}
+EXPORT_SYMBOL(iommu_tbl_range_alloc);
+
+static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
+ unsigned long entry)
+{
+ struct iommu_pool *p;
+ unsigned long largepool_start = tbl->large_pool.start;
+ bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
+
+ /* The large pool is the last pool at the top of the table */
+ if (large_pool && entry >= largepool_start) {
+ p = &tbl->large_pool;
+ } else {
+ unsigned int pool_nr = entry / tbl->poolsize;
+
+ BUG_ON(pool_nr >= tbl->nr_pools);
+ p = &tbl->pools[pool_nr];
+ }
+ return p;
+}
+
+/* Caller supplies the index of the entry into the iommu map table
+ * itself when the mapping from dma_addr to the entry is not the
+ * default addr->entry mapping below.
+ */
+void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
+ unsigned long npages, unsigned long entry)
+{
+ struct iommu_pool *pool;
+ unsigned long flags;
+ unsigned long shift = iommu->table_shift;
+
+ if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */
+ entry = (dma_addr - iommu->table_map_base) >> shift;
+ pool = get_pool(iommu, entry);
+
+ spin_lock_irqsave(&(pool->lock), flags);
+ bitmap_clear(iommu->map, entry, npages);
+ spin_unlock_irqrestore(&(pool->lock), flags);
+}
+EXPORT_SYMBOL(iommu_tbl_range_free);
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 0c9216c48762..86c8911b0e3a 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -13,6 +13,43 @@
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static int __read_mostly ioremap_pud_capable;
+static int __read_mostly ioremap_pmd_capable;
+static int __read_mostly ioremap_huge_disabled;
+
+static int __init set_nohugeiomap(char *str)
+{
+ ioremap_huge_disabled = 1;
+ return 0;
+}
+early_param("nohugeiomap", set_nohugeiomap);
+
+void __init ioremap_huge_init(void)
+{
+ if (!ioremap_huge_disabled) {
+ if (arch_ioremap_pud_supported())
+ ioremap_pud_capable = 1;
+ if (arch_ioremap_pmd_supported())
+ ioremap_pmd_capable = 1;
+ }
+}
+
+static inline int ioremap_pud_enabled(void)
+{
+ return ioremap_pud_capable;
+}
+
+static inline int ioremap_pmd_enabled(void)
+{
+ return ioremap_pmd_capable;
+}
+
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int ioremap_pud_enabled(void) { return 0; }
+static inline int ioremap_pmd_enabled(void) { return 0; }
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
@@ -43,6 +80,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
+
+ if (ioremap_pmd_enabled() &&
+ ((next - addr) == PMD_SIZE) &&
+ IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
+ if (pmd_set_huge(pmd, phys_addr + addr, prot))
+ continue;
+ }
+
if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
@@ -61,6 +106,14 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
+
+ if (ioremap_pud_enabled() &&
+ ((next - addr) == PUD_SIZE) &&
+ IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
+ if (pud_set_huge(pud, phys_addr + addr, prot))
+ continue;
+ }
+
if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 9d96e283520c..75232ad0a5e7 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -317,6 +317,32 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
}
EXPORT_SYMBOL(iov_iter_fault_in_readable);
+/*
+ * Fault in one or more iovecs of the given iov_iter, to a maximum length of
+ * bytes. For each iovec, fault in each page that constitutes the iovec.
+ *
+ * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
+ * because it is an invalid address).
+ */
+int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
+{
+ size_t skip = i->iov_offset;
+ const struct iovec *iov;
+ int err;
+ struct iovec v;
+
+ if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
+ iterate_iovec(i, bytes, v, iov, skip, ({
+ err = fault_in_multipages_readable(v.iov_base,
+ v.iov_len);
+ if (unlikely(err))
+ return err;
+ 0;}))
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
+
void iov_iter_init(struct iov_iter *i, int direction,
const struct iovec *iov, unsigned long nr_segs,
size_t count)
@@ -766,3 +792,60 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
flags);
}
EXPORT_SYMBOL(dup_iter);
+
+int import_iovec(int type, const struct iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i)
+{
+ ssize_t n;
+ struct iovec *p;
+ n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
+ *iov, &p);
+ if (n < 0) {
+ if (p != *iov)
+ kfree(p);
+ *iov = NULL;
+ return n;
+ }
+ iov_iter_init(i, type, p, nr_segs, n);
+ *iov = p == *iov ? NULL : p;
+ return 0;
+}
+EXPORT_SYMBOL(import_iovec);
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i)
+{
+ ssize_t n;
+ struct iovec *p;
+ n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
+ *iov, &p);
+ if (n < 0) {
+ if (p != *iov)
+ kfree(p);
+ *iov = NULL;
+ return n;
+ }
+ iov_iter_init(i, type, p, nr_segs, n);
+ *iov = p == *iov ? NULL : p;
+ return 0;
+}
+#endif
+
+int import_single_range(int rw, void __user *buf, size_t len,
+ struct iovec *iov, struct iov_iter *i)
+{
+ if (len > MAX_RW_COUNT)
+ len = MAX_RW_COUNT;
+ if (unlikely(!access_ok(!rw, buf, len)))
+ return -EFAULT;
+
+ iov->iov_base = buf;
+ iov->iov_len = len;
+ iov_iter_init(i, rw, iov, 1, len);
+ return 0;
+}
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 852c81e3ba9a..028f5d996eef 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -247,10 +247,11 @@ size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
* progress) and "changed", when this in fact lead to an successful
* update of the cache.
*/
- return seq_printf(seq, "\t%s: used:%u/%u "
- "hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n",
- lc->name, lc->used, lc->nr_elements,
- lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
+ seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n",
+ lc->name, lc->used, lc->nr_elements,
+ lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
+
+ return 0;
}
static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index dbef2314901e..975c6e0434bd 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -131,11 +131,12 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
static inline const struct raid6_calls *raid6_choose_gen(
void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks)
{
- unsigned long perf, bestperf, j0, j1;
+ unsigned long perf, bestgenperf, bestxorperf, j0, j1;
+ int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
const struct raid6_calls *const *algo;
const struct raid6_calls *best;
- for (bestperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
+ for (bestgenperf = 0, bestxorperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
if (!best || (*algo)->prefer >= best->prefer) {
if ((*algo)->valid && !(*algo)->valid())
continue;
@@ -153,19 +154,45 @@ static inline const struct raid6_calls *raid6_choose_gen(
}
preempt_enable();
- if (perf > bestperf) {
- bestperf = perf;
+ if (perf > bestgenperf) {
+ bestgenperf = perf;
best = *algo;
}
- pr_info("raid6: %-8s %5ld MB/s\n", (*algo)->name,
+ pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
(perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
+
+ if (!(*algo)->xor_syndrome)
+ continue;
+
+ perf = 0;
+
+ preempt_disable();
+ j0 = jiffies;
+ while ((j1 = jiffies) == j0)
+ cpu_relax();
+ while (time_before(jiffies,
+ j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
+ (*algo)->xor_syndrome(disks, start, stop,
+ PAGE_SIZE, *dptrs);
+ perf++;
+ }
+ preempt_enable();
+
+ if (best == *algo)
+ bestxorperf = perf;
+
+ pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name,
+ (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
}
}
if (best) {
- pr_info("raid6: using algorithm %s (%ld MB/s)\n",
+ pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
best->name,
- (bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
+ (bestgenperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
+ if (best->xor_syndrome)
+ pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
+ (bestxorperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
raid6_call = *best;
} else
pr_err("raid6: Yikes! No algorithm found!\n");
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
index 7cc12b532e95..bec27fce7501 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid6/altivec.uc
@@ -119,6 +119,7 @@ int raid6_have_altivec(void)
const struct raid6_calls raid6_altivec$# = {
raid6_altivec$#_gen_syndrome,
+ NULL, /* XOR not yet implemented */
raid6_have_altivec,
"altivecx$#",
0
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
index bc3b1dd436eb..76734004358d 100644
--- a/lib/raid6/avx2.c
+++ b/lib/raid6/avx2.c
@@ -89,6 +89,7 @@ static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_avx2x1 = {
raid6_avx21_gen_syndrome,
+ NULL, /* XOR not yet implemented */
raid6_have_avx2,
"avx2x1",
1 /* Has cache hints */
@@ -150,6 +151,7 @@ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_avx2x2 = {
raid6_avx22_gen_syndrome,
+ NULL, /* XOR not yet implemented */
raid6_have_avx2,
"avx2x2",
1 /* Has cache hints */
@@ -242,6 +244,7 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_avx2x4 = {
raid6_avx24_gen_syndrome,
+ NULL, /* XOR not yet implemented */
raid6_have_avx2,
"avx2x4",
1 /* Has cache hints */
diff --git a/lib/raid6/int.uc b/lib/raid6/int.uc
index 5b50f8dfc5d2..558aeac9342a 100644
--- a/lib/raid6/int.uc
+++ b/lib/raid6/int.uc
@@ -107,9 +107,48 @@ static void raid6_int$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
}
}
+static void raid6_int$#_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ /* P/Q data pages */
+ wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ wp$$ ^= wd$$;
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+ w2$$ &= NBYTES(0x1d);
+ w1$$ ^= w2$$;
+ wq$$ = w1$$ ^ wd$$;
+ }
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+ w2$$ &= NBYTES(0x1d);
+ wq$$ = w1$$ ^ w2$$;
+ }
+ *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ }
+
+}
+
const struct raid6_calls raid6_intx$# = {
raid6_int$#_gen_syndrome,
- NULL, /* always valid */
+ raid6_int$#_xor_syndrome,
+ NULL, /* always valid */
"int" NSTRING "x$#",
0
};
diff --git a/lib/raid6/mmx.c b/lib/raid6/mmx.c
index 590c71c9e200..b3b0e1fcd3af 100644
--- a/lib/raid6/mmx.c
+++ b/lib/raid6/mmx.c
@@ -76,6 +76,7 @@ static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_mmxx1 = {
raid6_mmx1_gen_syndrome,
+ NULL, /* XOR not yet implemented */
raid6_have_mmx,
"mmxx1",
0
@@ -134,6 +135,7 @@ static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_mmxx2 = {
raid6_mmx2_gen_syndrome,
+ NULL, /* XOR not yet implemented */
raid6_have_mmx,
"mmxx2",
0
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
index 36ad4705df1a..d9ad6ee284f4 100644
--- a/lib/raid6/neon.c
+++ b/lib/raid6/neon.c
@@ -42,6 +42,7 @@
} \
struct raid6_calls const raid6_neonx ## _n = { \
raid6_neon ## _n ## _gen_syndrome, \
+ NULL, /* XOR not yet implemented */ \
raid6_have_neon, \
"neonx" #_n, \
0 \
diff --git a/lib/raid6/sse1.c b/lib/raid6/sse1.c
index f76297139445..9025b8ca9aa3 100644
--- a/lib/raid6/sse1.c
+++ b/lib/raid6/sse1.c
@@ -92,6 +92,7 @@ static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_sse1x1 = {
raid6_sse11_gen_syndrome,
+ NULL, /* XOR not yet implemented */
raid6_have_sse1_or_mmxext,
"sse1x1",
1 /* Has cache hints */
@@ -154,6 +155,7 @@ static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_sse1x2 = {
raid6_sse12_gen_syndrome,
+ NULL, /* XOR not yet implemented */
raid6_have_sse1_or_mmxext,
"sse1x2",
1 /* Has cache hints */
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
index 85b82c85f28e..1d2276b007ee 100644
--- a/lib/raid6/sse2.c
+++ b/lib/raid6/sse2.c
@@ -88,8 +88,58 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
+
+static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+ {
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
+
+ for ( d = 0 ; d < bytes ; d += 16 ) {
+ asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
+ asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
+ asm volatile("pxor %xmm4,%xmm2");
+ /* P/Q data pages */
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
+ asm volatile("pxor %xmm5,%xmm2");
+ asm volatile("pxor %xmm5,%xmm4");
+ }
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pxor %xmm5,%xmm4");
+ }
+ asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
+ /* Don't use movntdq for r/w memory area < cache line */
+ asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
+ asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
const struct raid6_calls raid6_sse2x1 = {
raid6_sse21_gen_syndrome,
+ raid6_sse21_xor_syndrome,
raid6_have_sse2,
"sse2x1",
1 /* Has cache hints */
@@ -150,8 +200,76 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
+ static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+ {
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
+
+ for ( d = 0 ; d < bytes ; d += 32 ) {
+ asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
+ asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
+ asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
+ asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
+ asm volatile("pxor %xmm4,%xmm2");
+ asm volatile("pxor %xmm6,%xmm3");
+ /* P/Q data pages */
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
+ asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
+ asm volatile("pxor %xmm5,%xmm2");
+ asm volatile("pxor %xmm7,%xmm3");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ }
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ }
+ asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
+ asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
+ /* Don't use movntdq for r/w memory area < cache line */
+ asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
+ asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16]));
+ asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
+ asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+ }
+
const struct raid6_calls raid6_sse2x2 = {
raid6_sse22_gen_syndrome,
+ raid6_sse22_xor_syndrome,
raid6_have_sse2,
"sse2x2",
1 /* Has cache hints */
@@ -248,8 +366,117 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
+ static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+ {
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
+
+ for ( d = 0 ; d < bytes ; d += 64 ) {
+ asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
+ asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
+ asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d+32]));
+ asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d+48]));
+ asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
+ asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
+ asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32]));
+ asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48]));
+ asm volatile("pxor %xmm4,%xmm2");
+ asm volatile("pxor %xmm6,%xmm3");
+ asm volatile("pxor %xmm12,%xmm10");
+ asm volatile("pxor %xmm14,%xmm11");
+ /* P/Q data pages */
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ asm volatile("pxor %xmm13,%xmm13");
+ asm volatile("pxor %xmm15,%xmm15");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("pcmpgtb %xmm12,%xmm13");
+ asm volatile("pcmpgtb %xmm14,%xmm15");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("paddb %xmm12,%xmm12");
+ asm volatile("paddb %xmm14,%xmm14");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pand %xmm0,%xmm13");
+ asm volatile("pand %xmm0,%xmm15");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("pxor %xmm13,%xmm12");
+ asm volatile("pxor %xmm15,%xmm14");
+ asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
+ asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
+ asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
+ asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
+ asm volatile("pxor %xmm5,%xmm2");
+ asm volatile("pxor %xmm7,%xmm3");
+ asm volatile("pxor %xmm13,%xmm10");
+ asm volatile("pxor %xmm15,%xmm11");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("pxor %xmm13,%xmm12");
+ asm volatile("pxor %xmm15,%xmm14");
+ }
+ asm volatile("prefetchnta %0" :: "m" (q[d]));
+ asm volatile("prefetchnta %0" :: "m" (q[d+32]));
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ asm volatile("pxor %xmm13,%xmm13");
+ asm volatile("pxor %xmm15,%xmm15");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("pcmpgtb %xmm12,%xmm13");
+ asm volatile("pcmpgtb %xmm14,%xmm15");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("paddb %xmm12,%xmm12");
+ asm volatile("paddb %xmm14,%xmm14");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pand %xmm0,%xmm13");
+ asm volatile("pand %xmm0,%xmm15");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("pxor %xmm13,%xmm12");
+ asm volatile("pxor %xmm15,%xmm14");
+ }
+ asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
+ asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
+ asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
+ asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
+ asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
+ asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
+ asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32]));
+ asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48]));
+ asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
+ asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
+ asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
+ asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
+ }
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+ }
+
+
const struct raid6_calls raid6_sse2x4 = {
raid6_sse24_gen_syndrome,
+ raid6_sse24_xor_syndrome,
raid6_have_sse2,
"sse2x4",
1 /* Has cache hints */
diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c
index 5a485b7a7d3c..3bebbabdb510 100644
--- a/lib/raid6/test/test.c
+++ b/lib/raid6/test/test.c
@@ -28,11 +28,11 @@ char *dataptrs[NDISKS];
char data[NDISKS][PAGE_SIZE];
char recovi[PAGE_SIZE], recovj[PAGE_SIZE];
-static void makedata(void)
+static void makedata(int start, int stop)
{
int i, j;
- for (i = 0; i < NDISKS; i++) {
+ for (i = start; i <= stop; i++) {
for (j = 0; j < PAGE_SIZE; j++)
data[i][j] = rand();
@@ -91,34 +91,55 @@ int main(int argc, char *argv[])
{
const struct raid6_calls *const *algo;
const struct raid6_recov_calls *const *ra;
- int i, j;
+ int i, j, p1, p2;
int err = 0;
- makedata();
+ makedata(0, NDISKS-1);
for (ra = raid6_recov_algos; *ra; ra++) {
if ((*ra)->valid && !(*ra)->valid())
continue;
+
raid6_2data_recov = (*ra)->data2;
raid6_datap_recov = (*ra)->datap;
printf("using recovery %s\n", (*ra)->name);
for (algo = raid6_algos; *algo; algo++) {
- if (!(*algo)->valid || (*algo)->valid()) {
- raid6_call = **algo;
+ if ((*algo)->valid && !(*algo)->valid())
+ continue;
+
+ raid6_call = **algo;
+
+ /* Nuke syndromes */
+ memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
+
+ /* Generate assumed good syndrome */
+ raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
+ (void **)&dataptrs);
+
+ for (i = 0; i < NDISKS-1; i++)
+ for (j = i+1; j < NDISKS; j++)
+ err += test_disks(i, j);
+
+ if (!raid6_call.xor_syndrome)
+ continue;
+
+ for (p1 = 0; p1 < NDISKS-2; p1++)
+ for (p2 = p1; p2 < NDISKS-2; p2++) {
- /* Nuke syndromes */
- memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
+ /* Simulate rmw run */
+ raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
+ (void **)&dataptrs);
+ makedata(p1, p2);
+ raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
+ (void **)&dataptrs);
- /* Generate assumed good syndrome */
- raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
- (void **)&dataptrs);
+ for (i = 0; i < NDISKS-1; i++)
+ for (j = i+1; j < NDISKS; j++)
+ err += test_disks(i, j);
+ }
- for (i = 0; i < NDISKS-1; i++)
- for (j = i+1; j < NDISKS; j++)
- err += test_disks(i, j);
- }
}
printf("\n");
}
diff --git a/lib/raid6/tilegx.uc b/lib/raid6/tilegx.uc
index e7c29459cbcd..2dd291a11264 100644
--- a/lib/raid6/tilegx.uc
+++ b/lib/raid6/tilegx.uc
@@ -80,6 +80,7 @@ void raid6_tilegx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
const struct raid6_calls raid6_tilegx$# = {
raid6_tilegx$#_gen_syndrome,
+ NULL, /* XOR not yet implemented */
NULL,
"tilegx$#",
0
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index b5344ef4c684..b28df4019ade 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1,13 +1,13 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
- * Based on the following paper:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
* Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -27,121 +27,18 @@
#include <linux/err.h>
#define HASH_DEFAULT_SIZE 64UL
-#define HASH_MIN_SIZE 4UL
+#define HASH_MIN_SIZE 4U
#define BUCKET_LOCKS_PER_CPU 128UL
-/* Base bits plus 1 bit for nulls marker */
-#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
-
-enum {
- RHT_LOCK_NORMAL,
- RHT_LOCK_NESTED,
-};
-
-/* The bucket lock is selected based on the hash and protects mutations
- * on a group of hash buckets.
- *
- * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
- * a single lock always covers both buckets which may both contains
- * entries which link to the same bucket of the old table during resizing.
- * This allows to simplify the locking as locking the bucket in both
- * tables during resize always guarantee protection.
- *
- * IMPORTANT: When holding the bucket lock of both the old and new table
- * during expansions and shrinking, the old bucket lock must always be
- * acquired first.
- */
-static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
-{
- return &tbl->locks[hash & tbl->locks_mask];
-}
-
-static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
-{
- return (void *) he - ht->p.head_offset;
-}
-
-static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
-{
- return hash & (tbl->size - 1);
-}
-
-static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
-{
- u32 hash;
-
- if (unlikely(!ht->p.key_len))
- hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
- else
- hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
- ht->p.hash_rnd);
-
- return hash >> HASH_RESERVED_SPACE;
-}
-
-static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
-{
- return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
-}
-
-static u32 head_hashfn(const struct rhashtable *ht,
+static u32 head_hashfn(struct rhashtable *ht,
const struct bucket_table *tbl,
const struct rhash_head *he)
{
- return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
+ return rht_head_hashfn(ht, tbl, he, ht->p);
}
#ifdef CONFIG_PROVE_LOCKING
-static void debug_dump_buckets(const struct rhashtable *ht,
- const struct bucket_table *tbl)
-{
- struct rhash_head *he;
- unsigned int i, hash;
-
- for (i = 0; i < tbl->size; i++) {
- pr_warn(" [Bucket %d] ", i);
- rht_for_each_rcu(he, tbl, i) {
- hash = head_hashfn(ht, tbl, he);
- pr_cont("[hash = %#x, lock = %p] ",
- hash, bucket_lock(tbl, hash));
- }
- pr_cont("\n");
- }
-
-}
-
-static void debug_dump_table(struct rhashtable *ht,
- const struct bucket_table *tbl,
- unsigned int hash)
-{
- struct bucket_table *old_tbl, *future_tbl;
-
- pr_emerg("BUG: lock for hash %#x in table %p not held\n",
- hash, tbl);
-
- rcu_read_lock();
- future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
- old_tbl = rht_dereference_rcu(ht->tbl, ht);
- if (future_tbl != old_tbl) {
- pr_warn("Future table %p (size: %zd)\n",
- future_tbl, future_tbl->size);
- debug_dump_buckets(ht, future_tbl);
- }
-
- pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
- debug_dump_buckets(ht, old_tbl);
-
- rcu_read_unlock();
-}
-
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
-#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \
- do { \
- if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
- debug_dump_table(HT, TBL, HASH); \
- BUG(); \
- } \
- } while (0)
int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
@@ -151,30 +48,18 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
- spinlock_t *lock = bucket_lock(tbl, hash);
+ spinlock_t *lock = rht_bucket_lock(tbl, hash);
return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
-#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
#endif
-static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
-{
- struct rhash_head __rcu **pprev;
-
- for (pprev = &tbl->buckets[n];
- !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
- pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
- ;
-
- return pprev;
-}
-
-static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
+static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
+ gfp_t gfp)
{
unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
@@ -191,12 +76,13 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
- if (size * sizeof(spinlock_t) > PAGE_SIZE)
+ if (size * sizeof(spinlock_t) > PAGE_SIZE &&
+ gfp == GFP_KERNEL)
tbl->locks = vmalloc(size * sizeof(spinlock_t));
else
#endif
tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
- GFP_KERNEL);
+ gfp);
if (!tbl->locks)
return -ENOMEM;
for (i = 0; i < size; i++)
@@ -215,153 +101,181 @@ static void bucket_table_free(const struct bucket_table *tbl)
kvfree(tbl);
}
+static void bucket_table_free_rcu(struct rcu_head *head)
+{
+ bucket_table_free(container_of(head, struct bucket_table, rcu));
+}
+
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
- size_t nbuckets)
+ size_t nbuckets,
+ gfp_t gfp)
{
struct bucket_table *tbl = NULL;
size_t size;
int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
- if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
- tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (tbl == NULL)
+ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
+ gfp != GFP_KERNEL)
+ tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
+ if (tbl == NULL && gfp == GFP_KERNEL)
tbl = vzalloc(size);
if (tbl == NULL)
return NULL;
tbl->size = nbuckets;
- if (alloc_bucket_locks(ht, tbl) < 0) {
+ if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
bucket_table_free(tbl);
return NULL;
}
+ INIT_LIST_HEAD(&tbl->walkers);
+
+ get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+
for (i = 0; i < nbuckets; i++)
INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
return tbl;
}
-/**
- * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
- * @ht: hash table
- * @new_size: new table size
- */
-static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
+ struct bucket_table *tbl)
{
- /* Expand table when exceeding 75% load */
- return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
- (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
-}
+ struct bucket_table *new_tbl;
-/**
- * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
- * @ht: hash table
- * @new_size: new table size
- */
-static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
-{
- /* Shrink table beneath 30% load */
- return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
- (atomic_read(&ht->shift) > ht->p.min_shift);
-}
+ do {
+ new_tbl = tbl;
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ } while (tbl);
-static void lock_buckets(struct bucket_table *new_tbl,
- struct bucket_table *old_tbl, unsigned int hash)
- __acquires(old_bucket_lock)
-{
- spin_lock_bh(bucket_lock(old_tbl, hash));
- if (new_tbl != old_tbl)
- spin_lock_bh_nested(bucket_lock(new_tbl, hash),
- RHT_LOCK_NESTED);
+ return new_tbl;
}
-static void unlock_buckets(struct bucket_table *new_tbl,
- struct bucket_table *old_tbl, unsigned int hash)
- __releases(old_bucket_lock)
+static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
{
- if (new_tbl != old_tbl)
- spin_unlock_bh(bucket_lock(new_tbl, hash));
- spin_unlock_bh(bucket_lock(old_tbl, hash));
+ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *new_tbl = rhashtable_last_table(ht,
+ rht_dereference_rcu(old_tbl->future_tbl, ht));
+ struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
+ int err = -ENOENT;
+ struct rhash_head *head, *next, *entry;
+ spinlock_t *new_bucket_lock;
+ unsigned int new_hash;
+
+ rht_for_each(entry, old_tbl, old_hash) {
+ err = 0;
+ next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
+
+ if (rht_is_a_nulls(next))
+ break;
+
+ pprev = &entry->next;
+ }
+
+ if (err)
+ goto out;
+
+ new_hash = head_hashfn(ht, new_tbl, entry);
+
+ new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
+
+ spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
+ head = rht_dereference_bucket(new_tbl->buckets[new_hash],
+ new_tbl, new_hash);
+
+ if (rht_is_a_nulls(head))
+ INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
+ else
+ RCU_INIT_POINTER(entry->next, head);
+
+ rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
+ spin_unlock(new_bucket_lock);
+
+ rcu_assign_pointer(*pprev, next);
+
+out:
+ return err;
}
-/**
- * Unlink entries on bucket which hash to different bucket.
- *
- * Returns true if no more work needs to be performed on the bucket.
- */
-static bool hashtable_chain_unzip(struct rhashtable *ht,
- const struct bucket_table *new_tbl,
- struct bucket_table *old_tbl,
- size_t old_hash)
+static void rhashtable_rehash_chain(struct rhashtable *ht,
+ unsigned int old_hash)
{
- struct rhash_head *he, *p, *next;
- unsigned int new_hash, new_hash2;
-
- ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
+ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+ spinlock_t *old_bucket_lock;
- /* Old bucket empty, no work needed. */
- p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
- old_hash);
- if (rht_is_a_nulls(p))
- return false;
+ old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
- new_hash = head_hashfn(ht, new_tbl, p);
- ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
+ spin_lock_bh(old_bucket_lock);
+ while (!rhashtable_rehash_one(ht, old_hash))
+ ;
+ old_tbl->rehash++;
+ spin_unlock_bh(old_bucket_lock);
+}
- /* Advance the old bucket pointer one or more times until it
- * reaches a node that doesn't hash to the same bucket as the
- * previous node p. Call the previous node p;
- */
- rht_for_each_continue(he, p->next, old_tbl, old_hash) {
- new_hash2 = head_hashfn(ht, new_tbl, he);
- ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
+static int rhashtable_rehash_attach(struct rhashtable *ht,
+ struct bucket_table *old_tbl,
+ struct bucket_table *new_tbl)
+{
+ /* Protect future_tbl using the first bucket lock. */
+ spin_lock_bh(old_tbl->locks);
- if (new_hash != new_hash2)
- break;
- p = he;
+ /* Did somebody beat us to it? */
+ if (rcu_access_pointer(old_tbl->future_tbl)) {
+ spin_unlock_bh(old_tbl->locks);
+ return -EEXIST;
}
- rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
- /* Find the subsequent node which does hash to the same
- * bucket as node P, or NULL if no such node exists.
+ /* Make insertions go into the new, empty table right away. Deletions
+ * and lookups will be attempted in both tables until we synchronize.
*/
- INIT_RHT_NULLS_HEAD(next, ht, old_hash);
- if (!rht_is_a_nulls(he)) {
- rht_for_each_continue(he, he->next, old_tbl, old_hash) {
- if (head_hashfn(ht, new_tbl, he) == new_hash) {
- next = he;
- break;
- }
- }
- }
+ rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
- /* Set p's next pointer to that subsequent node pointer,
- * bypassing the nodes which do not hash to p's bucket
- */
- rcu_assign_pointer(p->next, next);
+ /* Ensure the new table is visible to readers. */
+ smp_wmb();
- p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
- old_hash);
+ spin_unlock_bh(old_tbl->locks);
- return !rht_is_a_nulls(p);
+ return 0;
}
-static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
- unsigned int new_hash, struct rhash_head *entry)
+static int rhashtable_rehash_table(struct rhashtable *ht)
{
- ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
+ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *new_tbl;
+ struct rhashtable_walker *walker;
+ unsigned int old_hash;
+
+ new_tbl = rht_dereference(old_tbl->future_tbl, ht);
+ if (!new_tbl)
+ return 0;
+
+ for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
+ rhashtable_rehash_chain(ht, old_hash);
+
+ /* Publish the new table pointer. */
+ rcu_assign_pointer(ht->tbl, new_tbl);
+
+ spin_lock(&ht->lock);
+ list_for_each_entry(walker, &old_tbl->walkers, list)
+ walker->tbl = NULL;
+ spin_unlock(&ht->lock);
+
+ /* Wait for readers. All new readers will see the new
+ * table, and thus no references to the old table will
+ * remain.
+ */
+ call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
- rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
+ return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
}
/**
* rhashtable_expand - Expand hash table while allowing concurrent lookups
* @ht: the hash table to expand
*
- * A secondary bucket array is allocated and the hash entries are migrated
- * while keeping them on both lists until the end of the RCU grace period.
+ * A secondary bucket array is allocated and the hash entries are migrated.
*
* This function may only be called in a context where it is safe to call
* synchronize_rcu(), e.g. not within a rcu_read_lock() section.
@@ -372,89 +286,32 @@ static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
* It is valid to have concurrent insertions and deletions protected by per
* bucket locks or concurrent RCU protected lookups and traversals.
*/
-int rhashtable_expand(struct rhashtable *ht)
+static int rhashtable_expand(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
- struct rhash_head *he;
- unsigned int new_hash, old_hash;
- bool complete = false;
+ int err;
ASSERT_RHT_MUTEX(ht);
- new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
+ old_tbl = rhashtable_last_table(ht, old_tbl);
+
+ new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
if (new_tbl == NULL)
return -ENOMEM;
- atomic_inc(&ht->shift);
-
- /* Make insertions go into the new, empty table right away. Deletions
- * and lookups will be attempted in both tables until we synchronize.
- * The synchronize_rcu() guarantees for the new table to be picked up
- * so no new additions go into the old table while we relink.
- */
- rcu_assign_pointer(ht->future_tbl, new_tbl);
- synchronize_rcu();
-
- /* For each new bucket, search the corresponding old bucket for the
- * first entry that hashes to the new bucket, and link the end of
- * newly formed bucket chain (containing entries added to future
- * table) to that entry. Since all the entries which will end up in
- * the new bucket appear in the same old bucket, this constructs an
- * entirely valid new hash table, but with multiple buckets
- * "zipped" together into a single imprecise chain.
- */
- for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
- old_hash = rht_bucket_index(old_tbl, new_hash);
- lock_buckets(new_tbl, old_tbl, new_hash);
- rht_for_each(he, old_tbl, old_hash) {
- if (head_hashfn(ht, new_tbl, he) == new_hash) {
- link_old_to_new(ht, new_tbl, new_hash, he);
- break;
- }
- }
- unlock_buckets(new_tbl, old_tbl, new_hash);
- cond_resched();
- }
+ err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
+ if (err)
+ bucket_table_free(new_tbl);
- /* Unzip interleaved hash chains */
- while (!complete && !ht->being_destroyed) {
- /* Wait for readers. All new readers will see the new
- * table, and thus no references to the old table will
- * remain.
- */
- synchronize_rcu();
-
- /* For each bucket in the old table (each of which
- * contains items from multiple buckets of the new
- * table): ...
- */
- complete = true;
- for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
- lock_buckets(new_tbl, old_tbl, old_hash);
-
- if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
- old_hash))
- complete = false;
-
- unlock_buckets(new_tbl, old_tbl, old_hash);
- cond_resched();
- }
- }
-
- rcu_assign_pointer(ht->tbl, new_tbl);
- synchronize_rcu();
-
- bucket_table_free(old_tbl);
- return 0;
+ return err;
}
-EXPORT_SYMBOL_GPL(rhashtable_expand);
/**
* rhashtable_shrink - Shrink hash table while allowing concurrent lookups
* @ht: the hash table to shrink
*
- * This function may only be called in a context where it is safe to call
- * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ * This function shrinks the hash table to fit, i.e., the smallest
+ * size would not cause it to expand right away automatically.
*
* The caller must ensure that no concurrent resizing occurs by holding
* ht->mutex.
@@ -465,395 +322,151 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
* It is valid to have concurrent insertions and deletions protected by per
* bucket locks or concurrent RCU protected lookups and traversals.
*/
-int rhashtable_shrink(struct rhashtable *ht)
+static int rhashtable_shrink(struct rhashtable *ht)
{
- struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
- unsigned int new_hash;
+ struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+ unsigned int size;
+ int err;
ASSERT_RHT_MUTEX(ht);
- new_tbl = bucket_table_alloc(ht, tbl->size / 2);
- if (new_tbl == NULL)
- return -ENOMEM;
-
- rcu_assign_pointer(ht->future_tbl, new_tbl);
- synchronize_rcu();
+ size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
+ if (size < ht->p.min_size)
+ size = ht->p.min_size;
- /* Link the first entry in the old bucket to the end of the
- * bucket in the new table. As entries are concurrently being
- * added to the new table, lock down the new bucket. As we
- * always divide the size in half when shrinking, each bucket
- * in the new table maps to exactly two buckets in the old
- * table.
- */
- for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
- lock_buckets(new_tbl, tbl, new_hash);
-
- rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
- tbl->buckets[new_hash]);
- ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
- rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
- tbl->buckets[new_hash + new_tbl->size]);
+ if (old_tbl->size <= size)
+ return 0;
- unlock_buckets(new_tbl, tbl, new_hash);
- cond_resched();
- }
-
- /* Publish the new, valid hash table */
- rcu_assign_pointer(ht->tbl, new_tbl);
- atomic_dec(&ht->shift);
+ if (rht_dereference(old_tbl->future_tbl, ht))
+ return -EEXIST;
- /* Wait for readers. No new readers will have references to the
- * old hash table.
- */
- synchronize_rcu();
+ new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
+ if (new_tbl == NULL)
+ return -ENOMEM;
- bucket_table_free(tbl);
+ err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
+ if (err)
+ bucket_table_free(new_tbl);
- return 0;
+ return err;
}
-EXPORT_SYMBOL_GPL(rhashtable_shrink);
static void rht_deferred_worker(struct work_struct *work)
{
struct rhashtable *ht;
struct bucket_table *tbl;
- struct rhashtable_walker *walker;
+ int err = 0;
ht = container_of(work, struct rhashtable, run_work);
mutex_lock(&ht->mutex);
- if (ht->being_destroyed)
- goto unlock;
tbl = rht_dereference(ht->tbl, ht);
+ tbl = rhashtable_last_table(ht, tbl);
- list_for_each_entry(walker, &ht->walkers, list)
- walker->resize = true;
-
- if (rht_grow_above_75(ht, tbl->size))
+ if (rht_grow_above_75(ht, tbl))
rhashtable_expand(ht);
- else if (rht_shrink_below_30(ht, tbl->size))
+ else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
rhashtable_shrink(ht);
-unlock:
- mutex_unlock(&ht->mutex);
-}
-
-static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
- struct bucket_table *tbl,
- const struct bucket_table *old_tbl, u32 hash)
-{
- bool no_resize_running = tbl == old_tbl;
- struct rhash_head *head;
-
- hash = rht_bucket_index(tbl, hash);
- head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
- ASSERT_BUCKET_LOCK(ht, tbl, hash);
+ err = rhashtable_rehash_table(ht);
- if (rht_is_a_nulls(head))
- INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
- else
- RCU_INIT_POINTER(obj->next, head);
-
- rcu_assign_pointer(tbl->buckets[hash], obj);
+ mutex_unlock(&ht->mutex);
- atomic_inc(&ht->nelems);
- if (no_resize_running && rht_grow_above_75(ht, tbl->size))
+ if (err)
schedule_work(&ht->run_work);
}
-/**
- * rhashtable_insert - insert object into hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- *
- * Will take a per bucket spinlock to protect against mutual mutations
- * on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
+static bool rhashtable_check_elasticity(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash)
{
- struct bucket_table *tbl, *old_tbl;
- unsigned hash;
-
- rcu_read_lock();
-
- tbl = rht_dereference_rcu(ht->future_tbl, ht);
- old_tbl = rht_dereference_rcu(ht->tbl, ht);
- hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
+ unsigned int elasticity = ht->elasticity;
+ struct rhash_head *head;
- lock_buckets(tbl, old_tbl, hash);
- __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
- unlock_buckets(tbl, old_tbl, hash);
+ rht_for_each(head, tbl, hash)
+ if (!--elasticity)
+ return true;
- rcu_read_unlock();
+ return false;
}
-EXPORT_SYMBOL_GPL(rhashtable_insert);
-/**
- * rhashtable_remove - remove object from hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * The caller must ensure that no concurrent table mutations occur. It is
- * however valid to have concurrent lookups if they are RCU protected.
- */
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
+int rhashtable_insert_rehash(struct rhashtable *ht)
{
- struct bucket_table *tbl, *new_tbl, *old_tbl;
- struct rhash_head __rcu **pprev;
- struct rhash_head *he, *he2;
- unsigned int hash, new_hash;
- bool ret = false;
+ struct bucket_table *old_tbl;
+ struct bucket_table *new_tbl;
+ struct bucket_table *tbl;
+ unsigned int size;
+ int err;
- rcu_read_lock();
old_tbl = rht_dereference_rcu(ht->tbl, ht);
- tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
- new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
-
- lock_buckets(new_tbl, old_tbl, new_hash);
-restart:
- hash = rht_bucket_index(tbl, new_hash);
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
- if (he != obj) {
- pprev = &he->next;
- continue;
- }
-
- ASSERT_BUCKET_LOCK(ht, tbl, hash);
-
- if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
- !rht_is_a_nulls(obj->next) &&
- head_hashfn(ht, tbl, obj->next) != hash) {
- rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
- } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
- rht_for_each_continue(he2, obj->next, tbl, hash) {
- if (head_hashfn(ht, tbl, he2) == hash) {
- rcu_assign_pointer(*pprev, he2);
- goto found;
- }
- }
-
- rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
- } else {
- rcu_assign_pointer(*pprev, obj->next);
- }
+ tbl = rhashtable_last_table(ht, old_tbl);
-found:
- ret = true;
- break;
- }
-
- /* The entry may be linked in either 'tbl', 'future_tbl', or both.
- * 'future_tbl' only exists for a short period of time during
- * resizing. Thus traversing both is fine and the added cost is
- * very rare.
- */
- if (tbl != old_tbl) {
- tbl = old_tbl;
- goto restart;
- }
-
- unlock_buckets(new_tbl, old_tbl, new_hash);
+ size = tbl->size;
- if (ret) {
- bool no_resize_running = new_tbl == old_tbl;
+ if (rht_grow_above_75(ht, tbl))
+ size *= 2;
+ /* Do not schedule more than one rehash */
+ else if (old_tbl != tbl)
+ return -EBUSY;
- atomic_dec(&ht->nelems);
- if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
- schedule_work(&ht->run_work);
+ new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
+ if (new_tbl == NULL) {
+ /* Schedule async resize/rehash to try allocation
+ * non-atomic context.
+ */
+ schedule_work(&ht->run_work);
+ return -ENOMEM;
}
- rcu_read_unlock();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(rhashtable_remove);
-
-struct rhashtable_compare_arg {
- struct rhashtable *ht;
- const void *key;
-};
-
-static bool rhashtable_compare(void *ptr, void *arg)
-{
- struct rhashtable_compare_arg *x = arg;
- struct rhashtable *ht = x->ht;
+ err = rhashtable_rehash_attach(ht, tbl, new_tbl);
+ if (err) {
+ bucket_table_free(new_tbl);
+ if (err == -EEXIST)
+ err = 0;
+ } else
+ schedule_work(&ht->run_work);
- return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
+ return err;
}
+EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
-/**
- * rhashtable_lookup - lookup key in hash table
- * @ht: hash table
- * @key: pointer to key
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * This lookup function may only be used for fixed key hash table (key_len
- * parameter set). It will BUG() if used inappropriately.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- */
-void *rhashtable_lookup(struct rhashtable *ht, const void *key)
+int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *tbl)
{
- struct rhashtable_compare_arg arg = {
- .ht = ht,
- .key = key,
- };
-
- BUG_ON(!ht->p.key_len);
-
- return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
-}
-EXPORT_SYMBOL_GPL(rhashtable_lookup);
-
-/**
- * rhashtable_lookup_compare - search hash table with compare function
- * @ht: hash table
- * @key: the pointer to the key
- * @compare: compare function, must return true on match
- * @arg: argument passed on to compare function
- *
- * Traverses the bucket chain behind the provided hash value and calls the
- * specified compare function for each entry.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- *
- * Returns the first entry on which the compare function returned true.
- */
-void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
- bool (*compare)(void *, void *), void *arg)
-{
- const struct bucket_table *tbl, *old_tbl;
- struct rhash_head *he;
- u32 hash;
-
- rcu_read_lock();
-
- old_tbl = rht_dereference_rcu(ht->tbl, ht);
- tbl = rht_dereference_rcu(ht->future_tbl, ht);
- hash = key_hashfn(ht, key, ht->p.key_len);
-restart:
- rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
- if (!compare(rht_obj(ht, he), arg))
- continue;
- rcu_read_unlock();
- return rht_obj(ht, he);
- }
-
- if (unlikely(tbl != old_tbl)) {
- tbl = old_tbl;
- goto restart;
- }
- rcu_read_unlock();
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
-
-/**
- * rhashtable_lookup_insert - lookup and insert object into hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * This lookup function may only be used for fixed key hash table (key_len
- * parameter set). It will BUG() if used inappropriately.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
-{
- struct rhashtable_compare_arg arg = {
- .ht = ht,
- .key = rht_obj(ht, obj) + ht->p.key_offset,
- };
+ struct rhash_head *head;
+ unsigned int hash;
+ int err;
- BUG_ON(!ht->p.key_len);
+ tbl = rhashtable_last_table(ht, tbl);
+ hash = head_hashfn(ht, tbl, obj);
+ spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
- return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
- &arg);
-}
-EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
+ err = -EEXIST;
+ if (key && rhashtable_lookup_fast(ht, key, ht->p))
+ goto exit;
-/**
- * rhashtable_lookup_compare_insert - search and insert object to hash table
- * with compare function
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @compare: compare function, must return true on match
- * @arg: argument passed on to compare function
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
- struct rhash_head *obj,
- bool (*compare)(void *, void *),
- void *arg)
-{
- struct bucket_table *new_tbl, *old_tbl;
- u32 new_hash;
- bool success = true;
+ err = -EAGAIN;
+ if (rhashtable_check_elasticity(ht, tbl, hash) ||
+ rht_grow_above_100(ht, tbl))
+ goto exit;
- BUG_ON(!ht->p.key_len);
+ err = 0;
- rcu_read_lock();
- old_tbl = rht_dereference_rcu(ht->tbl, ht);
- new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
- new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
+ head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
- lock_buckets(new_tbl, old_tbl, new_hash);
+ RCU_INIT_POINTER(obj->next, head);
- if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
- compare, arg)) {
- success = false;
- goto exit;
- }
+ rcu_assign_pointer(tbl->buckets[hash], obj);
- __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
+ atomic_inc(&ht->nelems);
exit:
- unlock_buckets(new_tbl, old_tbl, new_hash);
- rcu_read_unlock();
+ spin_unlock(rht_bucket_lock(tbl, hash));
- return success;
+ return err;
}
-EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
+EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
/**
* rhashtable_walk_init - Initialise an iterator
@@ -887,11 +500,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
if (!iter->walker)
return -ENOMEM;
- INIT_LIST_HEAD(&iter->walker->list);
- iter->walker->resize = false;
-
mutex_lock(&ht->mutex);
- list_add(&iter->walker->list, &ht->walkers);
+ iter->walker->tbl = rht_dereference(ht->tbl, ht);
+ list_add(&iter->walker->list, &iter->walker->tbl->walkers);
mutex_unlock(&ht->mutex);
return 0;
@@ -907,7 +518,8 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
mutex_lock(&iter->ht->mutex);
- list_del(&iter->walker->list);
+ if (iter->walker->tbl)
+ list_del(&iter->walker->list);
mutex_unlock(&iter->ht->mutex);
kfree(iter->walker);
}
@@ -928,13 +540,21 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
* by calling rhashtable_walk_next.
*/
int rhashtable_walk_start(struct rhashtable_iter *iter)
+ __acquires(RCU)
{
+ struct rhashtable *ht = iter->ht;
+
+ mutex_lock(&ht->mutex);
+
+ if (iter->walker->tbl)
+ list_del(&iter->walker->list);
+
rcu_read_lock();
- if (iter->walker->resize) {
- iter->slot = 0;
- iter->skip = 0;
- iter->walker->resize = false;
+ mutex_unlock(&ht->mutex);
+
+ if (!iter->walker->tbl) {
+ iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
return -EAGAIN;
}
@@ -956,13 +576,11 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
*/
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
- const struct bucket_table *tbl;
+ struct bucket_table *tbl = iter->walker->tbl;
struct rhashtable *ht = iter->ht;
struct rhash_head *p = iter->p;
void *obj = NULL;
- tbl = rht_dereference_rcu(ht->tbl, ht);
-
if (p) {
p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
goto next;
@@ -988,17 +606,20 @@ next:
iter->skip = 0;
}
- iter->p = NULL;
+ /* Ensure we see any new tables. */
+ smp_rmb();
-out:
- if (iter->walker->resize) {
- iter->p = NULL;
+ iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (iter->walker->tbl) {
iter->slot = 0;
iter->skip = 0;
- iter->walker->resize = false;
return ERR_PTR(-EAGAIN);
}
+ iter->p = NULL;
+
+out:
+
return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);
@@ -1010,16 +631,39 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_next);
* Finish a hash table walk.
*/
void rhashtable_walk_stop(struct rhashtable_iter *iter)
+ __releases(RCU)
{
- rcu_read_unlock();
+ struct rhashtable *ht;
+ struct bucket_table *tbl = iter->walker->tbl;
+
+ if (!tbl)
+ goto out;
+
+ ht = iter->ht;
+
+ spin_lock(&ht->lock);
+ if (tbl->rehash < tbl->size)
+ list_add(&iter->walker->list, &tbl->walkers);
+ else
+ iter->walker->tbl = NULL;
+ spin_unlock(&ht->lock);
+
iter->p = NULL;
+
+out:
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
-static size_t rounded_hashtable_size(struct rhashtable_params *params)
+static size_t rounded_hashtable_size(const struct rhashtable_params *params)
{
return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
- 1UL << params->min_shift);
+ (unsigned long)params->min_size);
+}
+
+static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
+{
+ return jhash2(key, length, seed);
}
/**
@@ -1052,7 +696,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
* struct rhash_head node;
* };
*
- * u32 my_hash_fn(const void *data, u32 seed)
+ * u32 my_hash_fn(const void *data, u32 len, u32 seed)
* {
* struct test_obj *obj = data;
*
@@ -1065,47 +709,74 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
* .obj_hashfn = my_hash_fn,
* };
*/
-int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params)
{
struct bucket_table *tbl;
size_t size;
size = HASH_DEFAULT_SIZE;
- if ((params->key_len && !params->hashfn) ||
- (!params->key_len && !params->obj_hashfn))
+ if ((!params->key_len && !params->obj_hashfn) ||
+ (params->obj_hashfn && !params->obj_cmpfn))
return -EINVAL;
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
return -EINVAL;
- params->min_shift = max_t(size_t, params->min_shift,
- ilog2(HASH_MIN_SIZE));
-
if (params->nelem_hint)
size = rounded_hashtable_size(params);
memset(ht, 0, sizeof(*ht));
mutex_init(&ht->mutex);
+ spin_lock_init(&ht->lock);
memcpy(&ht->p, params, sizeof(*params));
- INIT_LIST_HEAD(&ht->walkers);
+
+ if (params->min_size)
+ ht->p.min_size = roundup_pow_of_two(params->min_size);
+
+ if (params->max_size)
+ ht->p.max_size = rounddown_pow_of_two(params->max_size);
+
+ ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
+
+ /* The maximum (not average) chain length grows with the
+ * size of the hash table, at a rate of (log N)/(log log N).
+ * The value of 16 is selected so that even if the hash
+ * table grew to 2^32 you would not expect the maximum
+ * chain length to exceed it unless we are under attack
+ * (or extremely unlucky).
+ *
+ * As this limit is only to detect attacks, we don't need
+ * to set it to a lower value as you'd need the chain
+ * length to vastly exceed 16 to have any real effect
+ * on the system.
+ */
+ if (!params->insecure_elasticity)
+ ht->elasticity = 16;
if (params->locks_mul)
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
else
ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
- tbl = bucket_table_alloc(ht, size);
+ ht->key_len = ht->p.key_len;
+ if (!params->hashfn) {
+ ht->p.hashfn = jhash;
+
+ if (!(ht->key_len & (sizeof(u32) - 1))) {
+ ht->key_len /= sizeof(u32);
+ ht->p.hashfn = rhashtable_jhash2;
+ }
+ }
+
+ tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
atomic_set(&ht->nelems, 0);
- atomic_set(&ht->shift, ilog2(tbl->size));
- RCU_INIT_POINTER(ht->tbl, tbl);
- RCU_INIT_POINTER(ht->future_tbl, tbl);
- if (!ht->p.hash_rnd)
- get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
+ RCU_INIT_POINTER(ht->tbl, tbl);
INIT_WORK(&ht->run_work, rht_deferred_worker);
@@ -1114,21 +785,53 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
EXPORT_SYMBOL_GPL(rhashtable_init);
/**
- * rhashtable_destroy - destroy hash table
+ * rhashtable_free_and_destroy - free elements and destroy hash table
* @ht: the hash table to destroy
+ * @free_fn: callback to release resources of element
+ * @arg: pointer passed to free_fn
+ *
+ * Stops an eventual async resize. If defined, invokes free_fn for each
+ * element to releasal resources. Please note that RCU protected
+ * readers may still be accessing the elements. Releasing of resources
+ * must occur in a compatible manner. Then frees the bucket array.
*
- * Frees the bucket array. This function is not rcu safe, therefore the caller
- * has to make sure that no resizing may happen by unpublishing the hashtable
- * and waiting for the quiescent cycle before releasing the bucket array.
+ * This function will eventually sleep to wait for an async resize
+ * to complete. The caller is responsible that no further write operations
+ * occurs in parallel.
*/
-void rhashtable_destroy(struct rhashtable *ht)
+void rhashtable_free_and_destroy(struct rhashtable *ht,
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg)
{
- ht->being_destroyed = true;
+ const struct bucket_table *tbl;
+ unsigned int i;
cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex);
- bucket_table_free(rht_dereference(ht->tbl, ht));
+ tbl = rht_dereference(ht->tbl, ht);
+ if (free_fn) {
+ for (i = 0; i < tbl->size; i++) {
+ struct rhash_head *pos, *next;
+
+ for (pos = rht_dereference(tbl->buckets[i], ht),
+ next = !rht_is_a_nulls(pos) ?
+ rht_dereference(pos->next, ht) : NULL;
+ !rht_is_a_nulls(pos);
+ pos = next,
+ next = !rht_is_a_nulls(pos) ?
+ rht_dereference(pos->next, ht) : NULL)
+ free_fn(rht_obj(ht, pos), arg);
+ }
+ }
+
+ bucket_table_free(tbl);
mutex_unlock(&ht->mutex);
}
+EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
+
+void rhashtable_destroy(struct rhashtable *ht)
+{
+ return rhashtable_free_and_destroy(ht, NULL, NULL);
+}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
diff --git a/lib/sha1.c b/lib/sha1.c
index 1df191e04a24..5a56dfd7b99d 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -198,3 +198,4 @@ void sha_init(__u32 *buf)
buf[3] = 0x10325476;
buf[4] = 0xc3d2e1f0;
}
+EXPORT_SYMBOL(sha_init);
diff --git a/lib/string.c b/lib/string.c
index ce81aaec3839..bb3d4b6993c4 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
void memzero_explicit(void *s, size_t count)
{
memset(s, 0, count);
- OPTIMIZER_HIDE_VAR(s);
+ barrier_data(s);
}
EXPORT_SYMBOL(memzero_explicit);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 8f8c4417f228..c98ae818eb4e 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -4,6 +4,7 @@
* Copyright 31 August 2008 James Bottomley
* Copyright (C) 2013, Intel Corporation
*/
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/export.h>
@@ -14,7 +15,8 @@
/**
* string_get_size - get the size in the specified units
- * @size: The size to be converted
+ * @size: The size to be converted in blocks
+ * @blk_size: Size of the block (use 1 for size in bytes)
* @units: units to use (powers of 1000 or 1024)
* @buf: buffer to format to
* @len: length of buffer
@@ -24,14 +26,14 @@
* at least 9 bytes and will always be zero terminated.
*
*/
-void string_get_size(u64 size, const enum string_size_units units,
+void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len)
{
static const char *const units_10[] = {
- "B", "kB", "MB", "GB", "TB", "PB", "EB"
+ "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
};
static const char *const units_2[] = {
- "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"
+ "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
};
static const char *const *const units_str[] = {
[STRING_UNITS_10] = units_10,
@@ -42,31 +44,57 @@ void string_get_size(u64 size, const enum string_size_units units,
[STRING_UNITS_2] = 1024,
};
int i, j;
- u32 remainder = 0, sf_cap;
+ u32 remainder = 0, sf_cap, exp;
char tmp[8];
+ const char *unit;
tmp[0] = '\0';
i = 0;
- if (size >= divisor[units]) {
- while (size >= divisor[units]) {
- remainder = do_div(size, divisor[units]);
- i++;
- }
+ if (!size)
+ goto out;
- sf_cap = size;
- for (j = 0; sf_cap*10 < 1000; j++)
- sf_cap *= 10;
+ while (blk_size >= divisor[units]) {
+ remainder = do_div(blk_size, divisor[units]);
+ i++;
+ }
- if (j) {
- remainder *= 1000;
- remainder /= divisor[units];
- snprintf(tmp, sizeof(tmp), ".%03u", remainder);
- tmp[j+1] = '\0';
- }
+ exp = divisor[units] / (u32)blk_size;
+ if (size >= exp) {
+ remainder = do_div(size, divisor[units]);
+ remainder *= blk_size;
+ i++;
+ } else {
+ remainder *= size;
+ }
+
+ size *= blk_size;
+ size += remainder / divisor[units];
+ remainder %= divisor[units];
+
+ while (size >= divisor[units]) {
+ remainder = do_div(size, divisor[units]);
+ i++;
}
+ sf_cap = size;
+ for (j = 0; sf_cap*10 < 1000; j++)
+ sf_cap *= 10;
+
+ if (j) {
+ remainder *= 1000;
+ remainder /= divisor[units];
+ snprintf(tmp, sizeof(tmp), ".%03u", remainder);
+ tmp[j+1] = '\0';
+ }
+
+ out:
+ if (i >= ARRAY_SIZE(units_2))
+ unit = "UNK";
+ else
+ unit = units_str[units][i];
+
snprintf(buf, len, "%u%s %s", (u32)size,
- tmp, units_str[units][i]);
+ tmp, unit);
}
EXPORT_SYMBOL(string_get_size);
@@ -239,29 +267,21 @@ int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
}
EXPORT_SYMBOL(string_unescape);
-static int escape_passthrough(unsigned char c, char **dst, size_t *osz)
+static bool escape_passthrough(unsigned char c, char **dst, char *end)
{
char *out = *dst;
- if (*osz < 1)
- return -ENOMEM;
-
- *out++ = c;
-
- *dst = out;
- *osz -= 1;
-
- return 1;
+ if (out < end)
+ *out = c;
+ *dst = out + 1;
+ return true;
}
-static int escape_space(unsigned char c, char **dst, size_t *osz)
+static bool escape_space(unsigned char c, char **dst, char *end)
{
char *out = *dst;
unsigned char to;
- if (*osz < 2)
- return -ENOMEM;
-
switch (c) {
case '\n':
to = 'n';
@@ -279,26 +299,25 @@ static int escape_space(unsigned char c, char **dst, size_t *osz)
to = 'f';
break;
default:
- return 0;
+ return false;
}
- *out++ = '\\';
- *out++ = to;
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = to;
+ ++out;
*dst = out;
- *osz -= 2;
-
- return 1;
+ return true;
}
-static int escape_special(unsigned char c, char **dst, size_t *osz)
+static bool escape_special(unsigned char c, char **dst, char *end)
{
char *out = *dst;
unsigned char to;
- if (*osz < 2)
- return -ENOMEM;
-
switch (c) {
case '\\':
to = '\\';
@@ -310,71 +329,78 @@ static int escape_special(unsigned char c, char **dst, size_t *osz)
to = 'e';
break;
default:
- return 0;
+ return false;
}
- *out++ = '\\';
- *out++ = to;
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = to;
+ ++out;
*dst = out;
- *osz -= 2;
-
- return 1;
+ return true;
}
-static int escape_null(unsigned char c, char **dst, size_t *osz)
+static bool escape_null(unsigned char c, char **dst, char *end)
{
char *out = *dst;
- if (*osz < 2)
- return -ENOMEM;
-
if (c)
- return 0;
+ return false;
- *out++ = '\\';
- *out++ = '0';
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = '0';
+ ++out;
*dst = out;
- *osz -= 2;
-
- return 1;
+ return true;
}
-static int escape_octal(unsigned char c, char **dst, size_t *osz)
+static bool escape_octal(unsigned char c, char **dst, char *end)
{
char *out = *dst;
- if (*osz < 4)
- return -ENOMEM;
-
- *out++ = '\\';
- *out++ = ((c >> 6) & 0x07) + '0';
- *out++ = ((c >> 3) & 0x07) + '0';
- *out++ = ((c >> 0) & 0x07) + '0';
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = ((c >> 6) & 0x07) + '0';
+ ++out;
+ if (out < end)
+ *out = ((c >> 3) & 0x07) + '0';
+ ++out;
+ if (out < end)
+ *out = ((c >> 0) & 0x07) + '0';
+ ++out;
*dst = out;
- *osz -= 4;
-
- return 1;
+ return true;
}
-static int escape_hex(unsigned char c, char **dst, size_t *osz)
+static bool escape_hex(unsigned char c, char **dst, char *end)
{
char *out = *dst;
- if (*osz < 4)
- return -ENOMEM;
-
- *out++ = '\\';
- *out++ = 'x';
- *out++ = hex_asc_hi(c);
- *out++ = hex_asc_lo(c);
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = 'x';
+ ++out;
+ if (out < end)
+ *out = hex_asc_hi(c);
+ ++out;
+ if (out < end)
+ *out = hex_asc_lo(c);
+ ++out;
*dst = out;
- *osz -= 4;
-
- return 1;
+ return true;
}
/**
@@ -426,19 +452,17 @@ static int escape_hex(unsigned char c, char **dst, size_t *osz)
* it if needs.
*
* Return:
- * The amount of the characters processed to the destination buffer, or
- * %-ENOMEM if the size of buffer is not enough to put an escaped character is
- * returned.
- *
- * Even in the case of error @dst pointer will be updated to point to the byte
- * after the last processed character.
+ * The total size of the escaped output that would be generated for
+ * the given input and flags. To check whether the output was
+ * truncated, compare the return value to osz. There is room left in
+ * dst for a '\0' terminator if and only if ret < osz.
*/
-int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz,
+int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *esc)
{
- char *out = *dst, *p = out;
+ char *p = dst;
+ char *end = p + osz;
bool is_dict = esc && *esc;
- int ret = 0;
while (isz--) {
unsigned char c = *src++;
@@ -458,55 +482,26 @@ int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz,
(is_dict && !strchr(esc, c))) {
/* do nothing */
} else {
- if (flags & ESCAPE_SPACE) {
- ret = escape_space(c, &p, &osz);
- if (ret < 0)
- break;
- if (ret > 0)
- continue;
- }
-
- if (flags & ESCAPE_SPECIAL) {
- ret = escape_special(c, &p, &osz);
- if (ret < 0)
- break;
- if (ret > 0)
- continue;
- }
-
- if (flags & ESCAPE_NULL) {
- ret = escape_null(c, &p, &osz);
- if (ret < 0)
- break;
- if (ret > 0)
- continue;
- }
+ if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
+ continue;
+
+ if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
+ continue;
+
+ if (flags & ESCAPE_NULL && escape_null(c, &p, end))
+ continue;
/* ESCAPE_OCTAL and ESCAPE_HEX always go last */
- if (flags & ESCAPE_OCTAL) {
- ret = escape_octal(c, &p, &osz);
- if (ret < 0)
- break;
+ if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
continue;
- }
- if (flags & ESCAPE_HEX) {
- ret = escape_hex(c, &p, &osz);
- if (ret < 0)
- break;
+
+ if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
continue;
- }
}
- ret = escape_passthrough(c, &p, &osz);
- if (ret < 0)
- break;
+ escape_passthrough(c, &p, end);
}
- *dst = p;
-
- if (ret < 0)
- return ret;
-
- return p - out;
+ return p - dst;
}
EXPORT_SYMBOL(string_escape_mem);
diff --git a/lib/test-hexdump.c b/lib/test-hexdump.c
index daf29a390a89..c227cc43ec0a 100644
--- a/lib/test-hexdump.c
+++ b/lib/test-hexdump.c
@@ -18,26 +18,26 @@ static const unsigned char data_b[] = {
static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
-static const char *test_data_1_le[] __initconst = {
+static const char * const test_data_1_le[] __initconst = {
"be", "32", "db", "7b", "0a", "18", "93", "b2",
"70", "ba", "c4", "24", "7d", "83", "34", "9b",
"a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
"4c", "d1", "19", "99", "43", "b1", "af", "0c",
};
-static const char *test_data_2_le[] __initconst = {
+static const char *test_data_2_le[] __initdata = {
"32be", "7bdb", "180a", "b293",
"ba70", "24c4", "837d", "9b34",
"9ca6", "ad31", "0f9c", "e9ac",
"d14c", "9919", "b143", "0caf",
};
-static const char *test_data_4_le[] __initconst = {
+static const char *test_data_4_le[] __initdata = {
"7bdb32be", "b293180a", "24c4ba70", "9b34837d",
"ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
};
-static const char *test_data_8_le[] __initconst = {
+static const char *test_data_8_le[] __initdata = {
"b293180a7bdb32be", "9b34837d24c4ba70",
"e9ac0f9cad319ca6", "0cafb1439919d14c",
};
@@ -48,7 +48,7 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize,
char test[32 * 3 + 2 + 32 + 1];
char real[32 * 3 + 2 + 32 + 1];
char *p;
- const char **result;
+ const char * const *result;
size_t l = len;
int gs = groupsize, rs = rowsize;
unsigned int i;
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index ab0d30e1e18f..8e376efd88a4 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -260,16 +260,28 @@ static __init const char *test_string_find_match(const struct test_string_2 *s2,
return NULL;
}
+static __init void
+test_string_escape_overflow(const char *in, int p, unsigned int flags, const char *esc,
+ int q_test, const char *name)
+{
+ int q_real;
+
+ q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
+ if (q_real != q_test)
+ pr_warn("Test '%s' failed: flags = %u, osz = 0, expected %d, got %d\n",
+ name, flags, q_test, q_real);
+}
+
static __init void test_string_escape(const char *name,
const struct test_string_2 *s2,
unsigned int flags, const char *esc)
{
- int q_real = 512;
- char *out_test = kmalloc(q_real, GFP_KERNEL);
- char *out_real = kmalloc(q_real, GFP_KERNEL);
+ size_t out_size = 512;
+ char *out_test = kmalloc(out_size, GFP_KERNEL);
+ char *out_real = kmalloc(out_size, GFP_KERNEL);
char *in = kmalloc(256, GFP_KERNEL);
- char *buf = out_real;
int p = 0, q_test = 0;
+ int q_real;
if (!out_test || !out_real || !in)
goto out;
@@ -301,29 +313,19 @@ static __init void test_string_escape(const char *name,
q_test += len;
}
- q_real = string_escape_mem(in, p, &buf, q_real, flags, esc);
+ q_real = string_escape_mem(in, p, out_real, out_size, flags, esc);
test_string_check_buf(name, flags, in, p, out_real, q_real, out_test,
q_test);
+
+ test_string_escape_overflow(in, p, flags, esc, q_test, name);
+
out:
kfree(in);
kfree(out_real);
kfree(out_test);
}
-static __init void test_string_escape_nomem(void)
-{
- char *in = "\eb \\C\007\"\x90\r]";
- char out[64], *buf = out;
- int rc = -ENOMEM, ret;
-
- ret = string_escape_str_any_np(in, &buf, strlen(in), NULL);
- if (ret == rc)
- return;
-
- pr_err("Test 'escape nomem' failed: got %d instead of %d\n", ret, rc);
-}
-
static int __init test_string_helpers_init(void)
{
unsigned int i;
@@ -342,8 +344,6 @@ static int __init test_string_helpers_init(void)
for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
- test_string_escape_nomem();
-
return -EINVAL;
}
module_init(test_string_helpers_init);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 67c7593d1dd6..b2957540d3c7 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -38,6 +38,15 @@ struct test_obj {
struct rhash_head node;
};
+static const struct rhashtable_params test_rht_params = {
+ .nelem_hint = TEST_HT_SIZE,
+ .head_offset = offsetof(struct test_obj, node),
+ .key_offset = offsetof(struct test_obj, value),
+ .key_len = sizeof(int),
+ .hashfn = jhash,
+ .nulls_base = (3U << RHT_BASE_SHIFT),
+};
+
static int __init test_rht_lookup(struct rhashtable *ht)
{
unsigned int i;
@@ -47,7 +56,7 @@ static int __init test_rht_lookup(struct rhashtable *ht)
bool expected = !(i % 2);
u32 key = i;
- obj = rhashtable_lookup(ht, &key);
+ obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
if (expected && !obj) {
pr_warn("Test failed: Could not find key %u\n", key);
@@ -80,7 +89,7 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet)
rcu_cnt = cnt = 0;
if (!quiet)
- pr_info(" [%#4x/%zu]", i, tbl->size);
+ pr_info(" [%#4x/%u]", i, tbl->size);
rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
cnt++;
@@ -133,7 +142,11 @@ static int __init test_rhashtable(struct rhashtable *ht)
obj->ptr = TEST_PTR;
obj->value = i * 2;
- rhashtable_insert(ht, &obj->node);
+ err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
+ if (err) {
+ kfree(obj);
+ goto error;
+ }
}
rcu_read_lock();
@@ -141,30 +154,6 @@ static int __init test_rhashtable(struct rhashtable *ht)
test_rht_lookup(ht);
rcu_read_unlock();
- for (i = 0; i < TEST_NEXPANDS; i++) {
- pr_info(" Table expansion iteration %u...\n", i);
- mutex_lock(&ht->mutex);
- rhashtable_expand(ht);
- mutex_unlock(&ht->mutex);
-
- rcu_read_lock();
- pr_info(" Verifying lookups...\n");
- test_rht_lookup(ht);
- rcu_read_unlock();
- }
-
- for (i = 0; i < TEST_NEXPANDS; i++) {
- pr_info(" Table shrinkage iteration %u...\n", i);
- mutex_lock(&ht->mutex);
- rhashtable_shrink(ht);
- mutex_unlock(&ht->mutex);
-
- rcu_read_lock();
- pr_info(" Verifying lookups...\n");
- test_rht_lookup(ht);
- rcu_read_unlock();
- }
-
rcu_read_lock();
test_bucket_stats(ht, true);
rcu_read_unlock();
@@ -173,10 +162,10 @@ static int __init test_rhashtable(struct rhashtable *ht)
for (i = 0; i < TEST_ENTRIES; i++) {
u32 key = i * 2;
- obj = rhashtable_lookup(ht, &key);
+ obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
BUG_ON(!obj);
- rhashtable_remove(ht, &obj->node);
+ rhashtable_remove_fast(ht, &obj->node, test_rht_params);
kfree(obj);
}
@@ -195,20 +184,11 @@ static struct rhashtable ht;
static int __init test_rht_init(void)
{
- struct rhashtable_params params = {
- .nelem_hint = TEST_HT_SIZE,
- .head_offset = offsetof(struct test_obj, node),
- .key_offset = offsetof(struct test_obj, value),
- .key_len = sizeof(int),
- .hashfn = jhash,
- .max_shift = 1, /* we expand/shrink manually here */
- .nulls_base = (3U << RHT_BASE_SHIFT),
- };
int err;
pr_info("Running resizable hashtable tests...\n");
- err = rhashtable_init(&ht, &params);
+ err = rhashtable_init(&ht, &test_rht_params);
if (err < 0) {
pr_warn("Test failed: Unable to initialize hashtable: %d\n",
err);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b235c96167d3..da39c608a28c 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -17,6 +17,7 @@
*/
#include <stdarg.h>
+#include <linux/clk-provider.h>
#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
#include <linux/types.h>
#include <linux/string.h>
@@ -32,6 +33,7 @@
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/sections.h> /* for dereference_function_descriptor() */
+#include <asm/byteorder.h> /* cpu_to_le16 */
#include <linux/string_helpers.h>
#include "kstrtox.h"
@@ -121,142 +123,145 @@ int skip_atoi(const char **s)
return i;
}
-/* Decimal conversion is by far the most typical, and is used
- * for /proc and /sys data. This directly impacts e.g. top performance
- * with many processes running. We optimize it for speed
- * using ideas described at <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
- * (with permission from the author, Douglas W. Jones).
+/*
+ * Decimal conversion is by far the most typical, and is used for
+ * /proc and /sys data. This directly impacts e.g. top performance
+ * with many processes running. We optimize it for speed by emitting
+ * two characters at a time, using a 200 byte lookup table. This
+ * roughly halves the number of multiplications compared to computing
+ * the digits one at a time. Implementation strongly inspired by the
+ * previous version, which in turn used ideas described at
+ * <http://www.cs.uiowa.edu/~jones/bcd/divide.html> (with permission
+ * from the author, Douglas W. Jones).
+ *
+ * It turns out there is precisely one 26 bit fixed-point
+ * approximation a of 64/100 for which x/100 == (x * (u64)a) >> 32
+ * holds for all x in [0, 10^8-1], namely a = 0x28f5c29. The actual
+ * range happens to be somewhat larger (x <= 1073741898), but that's
+ * irrelevant for our purpose.
+ *
+ * For dividing a number in the range [10^4, 10^6-1] by 100, we still
+ * need a 32x32->64 bit multiply, so we simply use the same constant.
+ *
+ * For dividing a number in the range [100, 10^4-1] by 100, there are
+ * several options. The simplest is (x * 0x147b) >> 19, which is valid
+ * for all x <= 43698.
*/
-#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
-/* Formats correctly any integer in [0, 999999999] */
+static const u16 decpair[100] = {
+#define _(x) (__force u16) cpu_to_le16(((x % 10) | ((x / 10) << 8)) + 0x3030)
+ _( 0), _( 1), _( 2), _( 3), _( 4), _( 5), _( 6), _( 7), _( 8), _( 9),
+ _(10), _(11), _(12), _(13), _(14), _(15), _(16), _(17), _(18), _(19),
+ _(20), _(21), _(22), _(23), _(24), _(25), _(26), _(27), _(28), _(29),
+ _(30), _(31), _(32), _(33), _(34), _(35), _(36), _(37), _(38), _(39),
+ _(40), _(41), _(42), _(43), _(44), _(45), _(46), _(47), _(48), _(49),
+ _(50), _(51), _(52), _(53), _(54), _(55), _(56), _(57), _(58), _(59),
+ _(60), _(61), _(62), _(63), _(64), _(65), _(66), _(67), _(68), _(69),
+ _(70), _(71), _(72), _(73), _(74), _(75), _(76), _(77), _(78), _(79),
+ _(80), _(81), _(82), _(83), _(84), _(85), _(86), _(87), _(88), _(89),
+ _(90), _(91), _(92), _(93), _(94), _(95), _(96), _(97), _(98), _(99),
+#undef _
+};
+
+/*
+ * This will print a single '0' even if r == 0, since we would
+ * immediately jump to out_r where two 0s would be written but only
+ * one of them accounted for in buf. This is needed by ip4_string
+ * below. All other callers pass a non-zero value of r.
+*/
static noinline_for_stack
-char *put_dec_full9(char *buf, unsigned q)
+char *put_dec_trunc8(char *buf, unsigned r)
{
- unsigned r;
+ unsigned q;
- /*
- * Possible ways to approx. divide by 10
- * (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit)
- * (x * 0xcccd) >> 19 x < 81920 (x < 262149 when 64-bit mul)
- * (x * 0x6667) >> 18 x < 43699
- * (x * 0x3334) >> 17 x < 16389
- * (x * 0x199a) >> 16 x < 16389
- * (x * 0x0ccd) >> 15 x < 16389
- * (x * 0x0667) >> 14 x < 2739
- * (x * 0x0334) >> 13 x < 1029
- * (x * 0x019a) >> 12 x < 1029
- * (x * 0x00cd) >> 11 x < 1029 shorter code than * 0x67 (on i386)
- * (x * 0x0067) >> 10 x < 179
- * (x * 0x0034) >> 9 x < 69 same
- * (x * 0x001a) >> 8 x < 69 same
- * (x * 0x000d) >> 7 x < 69 same, shortest code (on i386)
- * (x * 0x0007) >> 6 x < 19
- * See <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
- */
- r = (q * (uint64_t)0x1999999a) >> 32;
- *buf++ = (q - 10 * r) + '0'; /* 1 */
- q = (r * (uint64_t)0x1999999a) >> 32;
- *buf++ = (r - 10 * q) + '0'; /* 2 */
- r = (q * (uint64_t)0x1999999a) >> 32;
- *buf++ = (q - 10 * r) + '0'; /* 3 */
- q = (r * (uint64_t)0x1999999a) >> 32;
- *buf++ = (r - 10 * q) + '0'; /* 4 */
- r = (q * (uint64_t)0x1999999a) >> 32;
- *buf++ = (q - 10 * r) + '0'; /* 5 */
- /* Now value is under 10000, can avoid 64-bit multiply */
- q = (r * 0x199a) >> 16;
- *buf++ = (r - 10 * q) + '0'; /* 6 */
- r = (q * 0xcd) >> 11;
- *buf++ = (q - 10 * r) + '0'; /* 7 */
- q = (r * 0xcd) >> 11;
- *buf++ = (r - 10 * q) + '0'; /* 8 */
- *buf++ = q + '0'; /* 9 */
+ /* 1 <= r < 10^8 */
+ if (r < 100)
+ goto out_r;
+
+ /* 100 <= r < 10^8 */
+ q = (r * (u64)0x28f5c29) >> 32;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
+
+ /* 1 <= q < 10^6 */
+ if (q < 100)
+ goto out_q;
+
+ /* 100 <= q < 10^6 */
+ r = (q * (u64)0x28f5c29) >> 32;
+ *((u16 *)buf) = decpair[q - 100*r];
+ buf += 2;
+
+ /* 1 <= r < 10^4 */
+ if (r < 100)
+ goto out_r;
+
+ /* 100 <= r < 10^4 */
+ q = (r * 0x147b) >> 19;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
+out_q:
+ /* 1 <= q < 100 */
+ r = q;
+out_r:
+ /* 1 <= r < 100 */
+ *((u16 *)buf) = decpair[r];
+ buf += r < 10 ? 1 : 2;
return buf;
}
-#endif
-/* Similar to above but do not pad with zeros.
- * Code can be easily arranged to print 9 digits too, but our callers
- * always call put_dec_full9() instead when the number has 9 decimal digits.
- */
+#if BITS_PER_LONG == 64 && BITS_PER_LONG_LONG == 64
static noinline_for_stack
-char *put_dec_trunc8(char *buf, unsigned r)
+char *put_dec_full8(char *buf, unsigned r)
{
unsigned q;
- /* Copy of previous function's body with added early returns */
- while (r >= 10000) {
- q = r + '0';
- r = (r * (uint64_t)0x1999999a) >> 32;
- *buf++ = q - 10*r;
- }
-
- q = (r * 0x199a) >> 16; /* r <= 9999 */
- *buf++ = (r - 10 * q) + '0';
- if (q == 0)
- return buf;
- r = (q * 0xcd) >> 11; /* q <= 999 */
- *buf++ = (q - 10 * r) + '0';
- if (r == 0)
- return buf;
- q = (r * 0xcd) >> 11; /* r <= 99 */
- *buf++ = (r - 10 * q) + '0';
- if (q == 0)
- return buf;
- *buf++ = q + '0'; /* q <= 9 */
- return buf;
-}
+ /* 0 <= r < 10^8 */
+ q = (r * (u64)0x28f5c29) >> 32;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
-/* There are two algorithms to print larger numbers.
- * One is generic: divide by 1000000000 and repeatedly print
- * groups of (up to) 9 digits. It's conceptually simple,
- * but requires a (unsigned long long) / 1000000000 division.
- *
- * Second algorithm splits 64-bit unsigned long long into 16-bit chunks,
- * manipulates them cleverly and generates groups of 4 decimal digits.
- * It so happens that it does NOT require long long division.
- *
- * If long is > 32 bits, division of 64-bit values is relatively easy,
- * and we will use the first algorithm.
- * If long long is > 64 bits (strange architecture with VERY large long long),
- * second algorithm can't be used, and we again use the first one.
- *
- * Else (if long is 32 bits and long long is 64 bits) we use second one.
- */
+ /* 0 <= q < 10^6 */
+ r = (q * (u64)0x28f5c29) >> 32;
+ *((u16 *)buf) = decpair[q - 100*r];
+ buf += 2;
-#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
+ /* 0 <= r < 10^4 */
+ q = (r * 0x147b) >> 19;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
-/* First algorithm: generic */
+ /* 0 <= q < 100 */
+ *((u16 *)buf) = decpair[q];
+ buf += 2;
+ return buf;
+}
-static
+static noinline_for_stack
char *put_dec(char *buf, unsigned long long n)
{
- if (n >= 100*1000*1000) {
- while (n >= 1000*1000*1000)
- buf = put_dec_full9(buf, do_div(n, 1000*1000*1000));
- if (n >= 100*1000*1000)
- return put_dec_full9(buf, n);
- }
+ if (n >= 100*1000*1000)
+ buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
+ /* 1 <= n <= 1.6e11 */
+ if (n >= 100*1000*1000)
+ buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
+ /* 1 <= n < 1e8 */
return put_dec_trunc8(buf, n);
}
-#else
-
-/* Second algorithm: valid only for 64-bit long longs */
+#elif BITS_PER_LONG == 32 && BITS_PER_LONG_LONG == 64
-/* See comment in put_dec_full9 for choice of constants */
-static noinline_for_stack
-void put_dec_full4(char *buf, unsigned q)
+static void
+put_dec_full4(char *buf, unsigned r)
{
- unsigned r;
- r = (q * 0xccd) >> 15;
- buf[0] = (q - 10 * r) + '0';
- q = (r * 0xcd) >> 11;
- buf[1] = (r - 10 * q) + '0';
- r = (q * 0xcd) >> 11;
- buf[2] = (q - 10 * r) + '0';
- buf[3] = r + '0';
+ unsigned q;
+
+ /* 0 <= r < 10^4 */
+ q = (r * 0x147b) >> 19;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
+ /* 0 <= q < 100 */
+ *((u16 *)buf) = decpair[q];
}
/*
@@ -264,9 +269,9 @@ void put_dec_full4(char *buf, unsigned q)
* The approximation x/10000 == (x * 0x346DC5D7) >> 43
* holds for all x < 1,128,869,999. The largest value this
* helper will ever be asked to convert is 1,125,520,955.
- * (d1 in the put_dec code, assuming n is all-ones).
+ * (second call in the put_dec code, assuming n is all-ones).
*/
-static
+static noinline_for_stack
unsigned put_dec_helper4(char *buf, unsigned x)
{
uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
@@ -293,6 +298,8 @@ char *put_dec(char *buf, unsigned long long n)
d2 = (h ) & 0xffff;
d3 = (h >> 16); /* implicit "& 0xffff" */
+ /* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
+ = 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
q = put_dec_helper4(buf, q);
@@ -322,7 +329,8 @@ char *put_dec(char *buf, unsigned long long n)
*/
int num_to_str(char *buf, int size, unsigned long long num)
{
- char tmp[sizeof(num) * 3];
+ /* put_dec requires 2-byte alignment of the buffer. */
+ char tmp[sizeof(num) * 3] __aligned(2);
int idx, len;
/* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
@@ -340,11 +348,11 @@ int num_to_str(char *buf, int size, unsigned long long num)
return len;
}
-#define ZEROPAD 1 /* pad with zero */
-#define SIGN 2 /* unsigned/signed long */
+#define SIGN 1 /* unsigned/signed, must be 1 */
+#define LEFT 2 /* left justified */
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
-#define LEFT 16 /* left justified */
+#define ZEROPAD 16 /* pad with zero, must be 16 == '0' - ' ' */
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
@@ -383,10 +391,8 @@ static noinline_for_stack
char *number(char *buf, char *end, unsigned long long num,
struct printf_spec spec)
{
- /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
- static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
-
- char tmp[66];
+ /* put_dec requires 2-byte alignment of the buffer. */
+ char tmp[3 * sizeof(num)] __aligned(2);
char sign;
char locase;
int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
@@ -422,12 +428,7 @@ char *number(char *buf, char *end, unsigned long long num,
/* generate full string in tmp[], in reverse order */
i = 0;
if (num < spec.base)
- tmp[i++] = digits[num] | locase;
- /* Generic code, for any base:
- else do {
- tmp[i++] = (digits[do_div(num,base)] | locase);
- } while (num != 0);
- */
+ tmp[i++] = hex_asc_upper[num] | locase;
else if (spec.base != 10) { /* 8 or 16 */
int mask = spec.base - 1;
int shift = 3;
@@ -435,7 +436,7 @@ char *number(char *buf, char *end, unsigned long long num,
if (spec.base == 16)
shift = 4;
do {
- tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
+ tmp[i++] = (hex_asc_upper[((unsigned char)num) & mask] | locase);
num >>= shift;
} while (num);
} else { /* base 10 */
@@ -447,7 +448,7 @@ char *number(char *buf, char *end, unsigned long long num,
spec.precision = i;
/* leading space padding */
spec.field_width -= spec.precision;
- if (!(spec.flags & (ZEROPAD+LEFT))) {
+ if (!(spec.flags & (ZEROPAD | LEFT))) {
while (--spec.field_width >= 0) {
if (buf < end)
*buf = ' ';
@@ -475,7 +476,8 @@ char *number(char *buf, char *end, unsigned long long num,
}
/* zero or space padding */
if (!(spec.flags & LEFT)) {
- char c = (spec.flags & ZEROPAD) ? '0' : ' ';
+ char c = ' ' + (spec.flags & ZEROPAD);
+ BUILD_BUG_ON(' ' + ZEROPAD != '0');
while (--spec.field_width >= 0) {
if (buf < end)
*buf = c;
@@ -783,11 +785,19 @@ char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
if (spec.field_width > 0)
len = min_t(int, spec.field_width, 64);
- for (i = 0; i < len && buf < end - 1; i++) {
- buf = hex_byte_pack(buf, addr[i]);
+ for (i = 0; i < len; ++i) {
+ if (buf < end)
+ *buf = hex_asc_hi(addr[i]);
+ ++buf;
+ if (buf < end)
+ *buf = hex_asc_lo(addr[i]);
+ ++buf;
- if (buf < end && separator && i != len - 1)
- *buf++ = separator;
+ if (separator && i != len - 1) {
+ if (buf < end)
+ *buf = separator;
+ ++buf;
+ }
}
return buf;
@@ -942,7 +952,7 @@ char *ip4_string(char *p, const u8 *addr, const char *fmt)
break;
}
for (i = 0; i < 4; i++) {
- char temp[3]; /* hold each IP quad in reverse order */
+ char temp[4] __aligned(2); /* hold each IP quad in reverse order */
int digits = put_dec_trunc8(temp, addr[index]) - temp;
if (leading_zeros) {
if (digits < 3)
@@ -1233,8 +1243,12 @@ char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
len = spec.field_width < 0 ? 1 : spec.field_width;
- /* Ignore the error. We print as many characters as we can */
- string_escape_mem(addr, len, &buf, end - buf, flags, NULL);
+ /*
+ * string_escape_mem() writes as many characters as it can to
+ * the given buffer, and returns the total size of the output
+ * had the buffer been big enough.
+ */
+ buf += string_escape_mem(addr, len, buf, buf < end ? end - buf : 0, flags, NULL);
return buf;
}
@@ -1322,6 +1336,30 @@ char *address_val(char *buf, char *end, const void *addr,
return number(buf, end, num, spec);
}
+static noinline_for_stack
+char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
+ const char *fmt)
+{
+ if (!IS_ENABLED(CONFIG_HAVE_CLK) || !clk)
+ return string(buf, end, NULL, spec);
+
+ switch (fmt[1]) {
+ case 'r':
+ return number(buf, end, clk_get_rate(clk), spec);
+
+ case 'n':
+ default:
+#ifdef CONFIG_COMMON_CLK
+ return string(buf, end, __clk_get_name(clk), spec);
+#else
+ spec.base = 16;
+ spec.field_width = sizeof(unsigned long) * 2 + 2;
+ spec.flags |= SPECIAL | SMALL | ZEROPAD;
+ return number(buf, end, (unsigned long)clk, spec);
+#endif
+ }
+}
+
int kptr_restrict __read_mostly;
/*
@@ -1404,6 +1442,11 @@ int kptr_restrict __read_mostly;
* (default assumed to be phys_addr_t, passed by reference)
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
+ * - 'C' For a clock, it prints the name (Common Clock Framework) or address
+ * (legacy clock framework) of the clock
+ * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
+ * (legacy clock framework) of the clock
+ * - 'Cr' For a clock, it prints the current rate of the clock
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
@@ -1548,6 +1591,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return address_val(buf, end, ptr, spec, fmt);
case 'd':
return dentry_name(buf, end, ptr, spec, fmt);
+ case 'C':
+ return clock(buf, end, ptr, spec, fmt);
case 'D':
return dentry_name(buf, end,
((const struct file *)ptr)->f_path.dentry,
@@ -1738,29 +1783,21 @@ qualifier:
if (spec->qualifier == 'L')
spec->type = FORMAT_TYPE_LONG_LONG;
else if (spec->qualifier == 'l') {
- if (spec->flags & SIGN)
- spec->type = FORMAT_TYPE_LONG;
- else
- spec->type = FORMAT_TYPE_ULONG;
+ BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG);
+ spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN);
} else if (_tolower(spec->qualifier) == 'z') {
spec->type = FORMAT_TYPE_SIZE_T;
} else if (spec->qualifier == 't') {
spec->type = FORMAT_TYPE_PTRDIFF;
} else if (spec->qualifier == 'H') {
- if (spec->flags & SIGN)
- spec->type = FORMAT_TYPE_BYTE;
- else
- spec->type = FORMAT_TYPE_UBYTE;
+ BUILD_BUG_ON(FORMAT_TYPE_UBYTE + SIGN != FORMAT_TYPE_BYTE);
+ spec->type = FORMAT_TYPE_UBYTE + (spec->flags & SIGN);
} else if (spec->qualifier == 'h') {
- if (spec->flags & SIGN)
- spec->type = FORMAT_TYPE_SHORT;
- else
- spec->type = FORMAT_TYPE_USHORT;
+ BUILD_BUG_ON(FORMAT_TYPE_USHORT + SIGN != FORMAT_TYPE_SHORT);
+ spec->type = FORMAT_TYPE_USHORT + (spec->flags & SIGN);
} else {
- if (spec->flags & SIGN)
- spec->type = FORMAT_TYPE_INT;
- else
- spec->type = FORMAT_TYPE_UINT;
+ BUILD_BUG_ON(FORMAT_TYPE_UINT + SIGN != FORMAT_TYPE_INT);
+ spec->type = FORMAT_TYPE_UINT + (spec->flags & SIGN);
}
return ++fmt - start;
@@ -1800,6 +1837,11 @@ qualifier:
* %*pE[achnops] print an escaped buffer
* %*ph[CDN] a variable-length hex string with a separator (supports up to 64
* bytes of the input)
+ * %pC output the name (Common Clock Framework) or address (legacy clock
+ * framework) of a clock
+ * %pCn output the name (Common Clock Framework) or address (legacy clock
+ * framework) of a clock
+ * %pCr output the current rate of a clock
* %n is ignored
*
* ** Please update Documentation/printk-formats.txt when making changes **
diff --git a/mm/Kconfig b/mm/Kconfig
index a03131b6ba8e..390214da4546 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -517,6 +517,12 @@ config CMA_DEBUG
processing calls such as dma_alloc_from_contiguous().
This option does not affect warning and error messages.
+config CMA_DEBUGFS
+ bool "CMA debugfs interface"
+ depends on CMA && DEBUG_FS
+ help
+ Turns on the DebugFS interface for CMA.
+
config CMA_AREAS
int "Maximum count of the CMA areas"
depends on CMA
diff --git a/mm/Makefile b/mm/Makefile
index 15dbe9903c27..98c4eaeabdcb 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
obj-$(CONFIG_KASAN) += kasan/
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
+obj-$(CONFIG_MEMTEST) += memtest.o
obj-$(CONFIG_MIGRATION) += migrate.o
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
@@ -76,3 +77,4 @@ obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
obj-$(CONFIG_CMA) += cma.o
obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o
obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
+obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
diff --git a/mm/cleancache.c b/mm/cleancache.c
index 053bcd8f12fb..8fc50811119b 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -19,7 +19,7 @@
#include <linux/cleancache.h>
/*
- * cleancache_ops is set by cleancache_ops_register to contain the pointers
+ * cleancache_ops is set by cleancache_register_ops to contain the pointers
* to the cleancache "backend" implementation functions.
*/
static struct cleancache_ops *cleancache_ops __read_mostly;
@@ -34,145 +34,107 @@ static u64 cleancache_failed_gets;
static u64 cleancache_puts;
static u64 cleancache_invalidates;
-/*
- * When no backend is registered all calls to init_fs and init_shared_fs
- * are registered and fake poolids (FAKE_FS_POOLID_OFFSET or
- * FAKE_SHARED_FS_POOLID_OFFSET, plus offset in the respective array
- * [shared_|]fs_poolid_map) are given to the respective super block
- * (sb->cleancache_poolid) and no tmem_pools are created. When a backend
- * registers with cleancache the previous calls to init_fs and init_shared_fs
- * are executed to create tmem_pools and set the respective poolids. While no
- * backend is registered all "puts", "gets" and "flushes" are ignored or failed.
- */
-#define MAX_INITIALIZABLE_FS 32
-#define FAKE_FS_POOLID_OFFSET 1000
-#define FAKE_SHARED_FS_POOLID_OFFSET 2000
-
-#define FS_NO_BACKEND (-1)
-#define FS_UNKNOWN (-2)
-static int fs_poolid_map[MAX_INITIALIZABLE_FS];
-static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS];
-static char *uuids[MAX_INITIALIZABLE_FS];
-/*
- * Mutex for the [shared_|]fs_poolid_map to guard against multiple threads
- * invoking umount (and ending in __cleancache_invalidate_fs) and also multiple
- * threads calling mount (and ending up in __cleancache_init_[shared|]fs).
- */
-static DEFINE_MUTEX(poolid_mutex);
-/*
- * When set to false (default) all calls to the cleancache functions, except
- * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded
- * by the if (!cleancache_ops) return. This means multiple threads (from
- * different filesystems) will be checking cleancache_ops. The usage of a
- * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are
- * OK if the time between the backend's have been initialized (and
- * cleancache_ops has been set to not NULL) and when the filesystems start
- * actually calling the backends. The inverse (when unloading) is obviously
- * not good - but this shim does not do that (yet).
- */
-
-/*
- * The backends and filesystems work all asynchronously. This is b/c the
- * backends can be built as modules.
- * The usual sequence of events is:
- * a) mount / -> __cleancache_init_fs is called. We set the
- * [shared_|]fs_poolid_map and uuids for.
- *
- * b). user does I/Os -> we call the rest of __cleancache_* functions
- * which return immediately as cleancache_ops is false.
- *
- * c). modprobe zcache -> cleancache_register_ops. We init the backend
- * and set cleancache_ops to true, and for any fs_poolid_map
- * (which is set by __cleancache_init_fs) we initialize the poolid.
- *
- * d). user does I/Os -> now that cleancache_ops is true all the
- * __cleancache_* functions can call the backend. They all check
- * that fs_poolid_map is valid and if so invoke the backend.
- *
- * e). umount / -> __cleancache_invalidate_fs, the fs_poolid_map is
- * reset (which is the second check in the __cleancache_* ops
- * to call the backend).
- *
- * The sequence of event could also be c), followed by a), and d). and e). The
- * c) would not happen anymore. There is also the chance of c), and one thread
- * doing a) + d), and another doing e). For that case we depend on the
- * filesystem calling __cleancache_invalidate_fs in the proper sequence (so
- * that it handles all I/Os before it invalidates the fs (which is last part
- * of unmounting process).
- *
- * Note: The acute reader will notice that there is no "rmmod zcache" case.
- * This is b/c the functionality for that is not yet implemented and when
- * done, will require some extra locking not yet devised.
- */
+static void cleancache_register_ops_sb(struct super_block *sb, void *unused)
+{
+ switch (sb->cleancache_poolid) {
+ case CLEANCACHE_NO_BACKEND:
+ __cleancache_init_fs(sb);
+ break;
+ case CLEANCACHE_NO_BACKEND_SHARED:
+ __cleancache_init_shared_fs(sb);
+ break;
+ }
+}
/*
- * Register operations for cleancache, returning previous thus allowing
- * detection of multiple backends and possible nesting.
+ * Register operations for cleancache. Returns 0 on success.
*/
-struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops)
+int cleancache_register_ops(struct cleancache_ops *ops)
{
- struct cleancache_ops *old = cleancache_ops;
- int i;
+ if (cmpxchg(&cleancache_ops, NULL, ops))
+ return -EBUSY;
- mutex_lock(&poolid_mutex);
- for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
- if (fs_poolid_map[i] == FS_NO_BACKEND)
- fs_poolid_map[i] = ops->init_fs(PAGE_SIZE);
- if (shared_fs_poolid_map[i] == FS_NO_BACKEND)
- shared_fs_poolid_map[i] = ops->init_shared_fs
- (uuids[i], PAGE_SIZE);
- }
/*
- * We MUST set cleancache_ops _after_ we have called the backends
- * init_fs or init_shared_fs functions. Otherwise the compiler might
- * re-order where cleancache_ops is set in this function.
+ * A cleancache backend can be built as a module and hence loaded after
+ * a cleancache enabled filesystem has called cleancache_init_fs. To
+ * handle such a scenario, here we call ->init_fs or ->init_shared_fs
+ * for each active super block. To differentiate between local and
+ * shared filesystems, we temporarily initialize sb->cleancache_poolid
+ * to CLEANCACHE_NO_BACKEND or CLEANCACHE_NO_BACKEND_SHARED
+ * respectively in case there is no backend registered at the time
+ * cleancache_init_fs or cleancache_init_shared_fs is called.
+ *
+ * Since filesystems can be mounted concurrently with cleancache
+ * backend registration, we have to be careful to guarantee that all
+ * cleancache enabled filesystems that has been mounted by the time
+ * cleancache_register_ops is called has got and all mounted later will
+ * get cleancache_poolid. This is assured by the following statements
+ * tied together:
+ *
+ * a) iterate_supers skips only those super blocks that has started
+ * ->kill_sb
+ *
+ * b) if iterate_supers encounters a super block that has not finished
+ * ->mount yet, it waits until it is finished
+ *
+ * c) cleancache_init_fs is called from ->mount and
+ * cleancache_invalidate_fs is called from ->kill_sb
+ *
+ * d) we call iterate_supers after cleancache_ops has been set
+ *
+ * From a) it follows that if iterate_supers skips a super block, then
+ * either the super block is already dead, in which case we do not need
+ * to bother initializing cleancache for it, or it was mounted after we
+ * initiated iterate_supers. In the latter case, it must have seen
+ * cleancache_ops set according to d) and initialized cleancache from
+ * ->mount by itself according to c). This proves that we call
+ * ->init_fs at least once for each active super block.
+ *
+ * From b) and c) it follows that if iterate_supers encounters a super
+ * block that has already started ->init_fs, it will wait until ->mount
+ * and hence ->init_fs has finished, then check cleancache_poolid, see
+ * that it has already been set and therefore do nothing. This proves
+ * that we call ->init_fs no more than once for each super block.
+ *
+ * Combined together, the last two paragraphs prove the function
+ * correctness.
+ *
+ * Note that various cleancache callbacks may proceed before this
+ * function is called or even concurrently with it, but since
+ * CLEANCACHE_NO_BACKEND is negative, they will all result in a noop
+ * until the corresponding ->init_fs has been actually called and
+ * cleancache_ops has been set.
*/
- barrier();
- cleancache_ops = ops;
- mutex_unlock(&poolid_mutex);
- return old;
+ iterate_supers(cleancache_register_ops_sb, NULL);
+ return 0;
}
EXPORT_SYMBOL(cleancache_register_ops);
/* Called by a cleancache-enabled filesystem at time of mount */
void __cleancache_init_fs(struct super_block *sb)
{
- int i;
+ int pool_id = CLEANCACHE_NO_BACKEND;
- mutex_lock(&poolid_mutex);
- for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
- if (fs_poolid_map[i] == FS_UNKNOWN) {
- sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET;
- if (cleancache_ops)
- fs_poolid_map[i] = cleancache_ops->init_fs(PAGE_SIZE);
- else
- fs_poolid_map[i] = FS_NO_BACKEND;
- break;
- }
+ if (cleancache_ops) {
+ pool_id = cleancache_ops->init_fs(PAGE_SIZE);
+ if (pool_id < 0)
+ pool_id = CLEANCACHE_NO_POOL;
}
- mutex_unlock(&poolid_mutex);
+ sb->cleancache_poolid = pool_id;
}
EXPORT_SYMBOL(__cleancache_init_fs);
/* Called by a cleancache-enabled clustered filesystem at time of mount */
-void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
+void __cleancache_init_shared_fs(struct super_block *sb)
{
- int i;
+ int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
- mutex_lock(&poolid_mutex);
- for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
- if (shared_fs_poolid_map[i] == FS_UNKNOWN) {
- sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET;
- uuids[i] = uuid;
- if (cleancache_ops)
- shared_fs_poolid_map[i] = cleancache_ops->init_shared_fs
- (uuid, PAGE_SIZE);
- else
- shared_fs_poolid_map[i] = FS_NO_BACKEND;
- break;
- }
+ if (cleancache_ops) {
+ pool_id = cleancache_ops->init_shared_fs(sb->s_uuid, PAGE_SIZE);
+ if (pool_id < 0)
+ pool_id = CLEANCACHE_NO_POOL;
}
- mutex_unlock(&poolid_mutex);
+ sb->cleancache_poolid = pool_id;
}
EXPORT_SYMBOL(__cleancache_init_shared_fs);
@@ -202,19 +164,6 @@ static int cleancache_get_key(struct inode *inode,
}
/*
- * Returns a pool_id that is associated with a given fake poolid.
- */
-static int get_poolid_from_fake(int fake_pool_id)
-{
- if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET)
- return shared_fs_poolid_map[fake_pool_id -
- FAKE_SHARED_FS_POOLID_OFFSET];
- else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET)
- return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET];
- return FS_NO_BACKEND;
-}
-
-/*
* "Get" data from cleancache associated with the poolid/inode/index
* that were specified when the data was put to cleanache and, if
* successful, use it to fill the specified page with data and return 0.
@@ -229,7 +178,6 @@ int __cleancache_get_page(struct page *page)
{
int ret = -1;
int pool_id;
- int fake_pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
if (!cleancache_ops) {
@@ -238,17 +186,14 @@ int __cleancache_get_page(struct page *page)
}
VM_BUG_ON_PAGE(!PageLocked(page), page);
- fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
- if (fake_pool_id < 0)
+ pool_id = page->mapping->host->i_sb->cleancache_poolid;
+ if (pool_id < 0)
goto out;
- pool_id = get_poolid_from_fake(fake_pool_id);
if (cleancache_get_key(page->mapping->host, &key) < 0)
goto out;
- if (pool_id >= 0)
- ret = cleancache_ops->get_page(pool_id,
- key, page->index, page);
+ ret = cleancache_ops->get_page(pool_id, key, page->index, page);
if (ret == 0)
cleancache_succ_gets++;
else
@@ -271,7 +216,6 @@ EXPORT_SYMBOL(__cleancache_get_page);
void __cleancache_put_page(struct page *page)
{
int pool_id;
- int fake_pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
if (!cleancache_ops) {
@@ -280,12 +224,7 @@ void __cleancache_put_page(struct page *page)
}
VM_BUG_ON_PAGE(!PageLocked(page), page);
- fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
- if (fake_pool_id < 0)
- return;
-
- pool_id = get_poolid_from_fake(fake_pool_id);
-
+ pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (pool_id >= 0 &&
cleancache_get_key(page->mapping->host, &key) >= 0) {
cleancache_ops->put_page(pool_id, key, page->index, page);
@@ -306,18 +245,13 @@ void __cleancache_invalidate_page(struct address_space *mapping,
struct page *page)
{
/* careful... page->mapping is NULL sometimes when this is called */
- int pool_id;
- int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
+ int pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
if (!cleancache_ops)
return;
- if (fake_pool_id >= 0) {
- pool_id = get_poolid_from_fake(fake_pool_id);
- if (pool_id < 0)
- return;
-
+ if (pool_id >= 0) {
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (cleancache_get_key(mapping->host, &key) >= 0) {
cleancache_ops->invalidate_page(pool_id,
@@ -339,18 +273,12 @@ EXPORT_SYMBOL(__cleancache_invalidate_page);
*/
void __cleancache_invalidate_inode(struct address_space *mapping)
{
- int pool_id;
- int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
+ int pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
if (!cleancache_ops)
return;
- if (fake_pool_id < 0)
- return;
-
- pool_id = get_poolid_from_fake(fake_pool_id);
-
if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
cleancache_ops->invalidate_inode(pool_id, key);
}
@@ -363,32 +291,18 @@ EXPORT_SYMBOL(__cleancache_invalidate_inode);
*/
void __cleancache_invalidate_fs(struct super_block *sb)
{
- int index;
- int fake_pool_id = sb->cleancache_poolid;
- int old_poolid = fake_pool_id;
+ int pool_id;
- mutex_lock(&poolid_mutex);
- if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) {
- index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET;
- old_poolid = shared_fs_poolid_map[index];
- shared_fs_poolid_map[index] = FS_UNKNOWN;
- uuids[index] = NULL;
- } else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) {
- index = fake_pool_id - FAKE_FS_POOLID_OFFSET;
- old_poolid = fs_poolid_map[index];
- fs_poolid_map[index] = FS_UNKNOWN;
- }
- sb->cleancache_poolid = -1;
- if (cleancache_ops)
- cleancache_ops->invalidate_fs(old_poolid);
- mutex_unlock(&poolid_mutex);
+ pool_id = sb->cleancache_poolid;
+ sb->cleancache_poolid = CLEANCACHE_NO_POOL;
+
+ if (cleancache_ops && pool_id >= 0)
+ cleancache_ops->invalidate_fs(pool_id);
}
EXPORT_SYMBOL(__cleancache_invalidate_fs);
static int __init init_cleancache(void)
{
- int i;
-
#ifdef CONFIG_DEBUG_FS
struct dentry *root = debugfs_create_dir("cleancache", NULL);
if (root == NULL)
@@ -400,10 +314,6 @@ static int __init init_cleancache(void)
debugfs_create_u64("invalidates", S_IRUGO,
root, &cleancache_invalidates);
#endif
- for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
- fs_poolid_map[i] = FS_UNKNOWN;
- shared_fs_poolid_map[i] = FS_UNKNOWN;
- }
return 0;
}
module_init(init_cleancache)
diff --git a/mm/cma.c b/mm/cma.c
index 68ecb7a42983..3a7a67b93394 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -23,6 +23,7 @@
# define DEBUG
#endif
#endif
+#define CREATE_TRACE_POINTS
#include <linux/memblock.h>
#include <linux/err.h>
@@ -34,30 +35,26 @@
#include <linux/cma.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <trace/events/cma.h>
-struct cma {
- unsigned long base_pfn;
- unsigned long count;
- unsigned long *bitmap;
- unsigned int order_per_bit; /* Order of pages represented by one bit */
- struct mutex lock;
-};
+#include "cma.h"
-static struct cma cma_areas[MAX_CMA_AREAS];
-static unsigned cma_area_count;
+struct cma cma_areas[MAX_CMA_AREAS];
+unsigned cma_area_count;
static DEFINE_MUTEX(cma_mutex);
-phys_addr_t cma_get_base(struct cma *cma)
+phys_addr_t cma_get_base(const struct cma *cma)
{
return PFN_PHYS(cma->base_pfn);
}
-unsigned long cma_get_size(struct cma *cma)
+unsigned long cma_get_size(const struct cma *cma)
{
return cma->count << PAGE_SHIFT;
}
-static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
+static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
+ int align_order)
{
if (align_order <= cma->order_per_bit)
return 0;
@@ -68,7 +65,8 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
* Find a PFN aligned to the specified order and return an offset represented in
* order_per_bits.
*/
-static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
+static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
+ int align_order)
{
if (align_order <= cma->order_per_bit)
return 0;
@@ -77,18 +75,14 @@ static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
- cma->base_pfn) >> cma->order_per_bit;
}
-static unsigned long cma_bitmap_maxno(struct cma *cma)
-{
- return cma->count >> cma->order_per_bit;
-}
-
-static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
- unsigned long pages)
+static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
+ unsigned long pages)
{
return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
}
-static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
+static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
+ unsigned int count)
{
unsigned long bitmap_no, bitmap_count;
@@ -134,6 +128,12 @@ static int __init cma_activate_area(struct cma *cma)
} while (--i);
mutex_init(&cma->lock);
+
+#ifdef CONFIG_CMA_DEBUGFS
+ INIT_HLIST_HEAD(&cma->mem_head);
+ spin_lock_init(&cma->mem_head_lock);
+#endif
+
return 0;
err:
@@ -167,7 +167,8 @@ core_initcall(cma_init_reserved_areas);
* This function creates custom contiguous area from already reserved memory.
*/
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
- int order_per_bit, struct cma **res_cma)
+ unsigned int order_per_bit,
+ struct cma **res_cma)
{
struct cma *cma;
phys_addr_t alignment;
@@ -358,7 +359,7 @@ err:
* This function allocates part of contiguous memory on specific
* contiguous memory area.
*/
-struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
+struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
{
unsigned long mask, offset, pfn, start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
@@ -415,6 +416,8 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
start = bitmap_no + mask + 1;
}
+ trace_cma_alloc(page ? pfn : -1UL, page, count, align);
+
pr_debug("%s(): returned %p\n", __func__, page);
return page;
}
@@ -429,7 +432,7 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
* It returns false when provided pages do not belong to contiguous area and
* true otherwise.
*/
-bool cma_release(struct cma *cma, struct page *pages, int count)
+bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
{
unsigned long pfn;
@@ -447,6 +450,7 @@ bool cma_release(struct cma *cma, struct page *pages, int count)
free_contig_range(pfn, count);
cma_clear_bitmap(cma, pfn, count);
+ trace_cma_release(pfn, pages, count);
return true;
}
diff --git a/mm/cma.h b/mm/cma.h
new file mode 100644
index 000000000000..1132d733556d
--- /dev/null
+++ b/mm/cma.h
@@ -0,0 +1,24 @@
+#ifndef __MM_CMA_H__
+#define __MM_CMA_H__
+
+struct cma {
+ unsigned long base_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+ unsigned int order_per_bit; /* Order of pages represented by one bit */
+ struct mutex lock;
+#ifdef CONFIG_CMA_DEBUGFS
+ struct hlist_head mem_head;
+ spinlock_t mem_head_lock;
+#endif
+};
+
+extern struct cma cma_areas[MAX_CMA_AREAS];
+extern unsigned cma_area_count;
+
+static unsigned long cma_bitmap_maxno(struct cma *cma)
+{
+ return cma->count >> cma->order_per_bit;
+}
+
+#endif
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
new file mode 100644
index 000000000000..7621ee34daa0
--- /dev/null
+++ b/mm/cma_debug.c
@@ -0,0 +1,205 @@
+/*
+ * CMA DebugFS Interface
+ *
+ * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/cma.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm_types.h>
+
+#include "cma.h"
+
+struct cma_mem {
+ struct hlist_node node;
+ struct page *p;
+ unsigned long n;
+};
+
+static struct dentry *cma_debugfs_root;
+
+static int cma_debugfs_get(void *data, u64 *val)
+{
+ unsigned long *p = data;
+
+ *val = *p;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
+
+static int cma_used_get(void *data, u64 *val)
+{
+ struct cma *cma = data;
+ unsigned long used;
+
+ mutex_lock(&cma->lock);
+ /* pages counter is smaller than sizeof(int) */
+ used = bitmap_weight(cma->bitmap, (int)cma->count);
+ mutex_unlock(&cma->lock);
+ *val = (u64)used << cma->order_per_bit;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
+
+static int cma_maxchunk_get(void *data, u64 *val)
+{
+ struct cma *cma = data;
+ unsigned long maxchunk = 0;
+ unsigned long start, end = 0;
+
+ mutex_lock(&cma->lock);
+ for (;;) {
+ start = find_next_zero_bit(cma->bitmap, cma->count, end);
+ if (start >= cma->count)
+ break;
+ end = find_next_bit(cma->bitmap, cma->count, start);
+ maxchunk = max(end - start, maxchunk);
+ }
+ mutex_unlock(&cma->lock);
+ *val = (u64)maxchunk << cma->order_per_bit;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
+
+static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
+{
+ spin_lock(&cma->mem_head_lock);
+ hlist_add_head(&mem->node, &cma->mem_head);
+ spin_unlock(&cma->mem_head_lock);
+}
+
+static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
+{
+ struct cma_mem *mem = NULL;
+
+ spin_lock(&cma->mem_head_lock);
+ if (!hlist_empty(&cma->mem_head)) {
+ mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
+ hlist_del_init(&mem->node);
+ }
+ spin_unlock(&cma->mem_head_lock);
+
+ return mem;
+}
+
+static int cma_free_mem(struct cma *cma, int count)
+{
+ struct cma_mem *mem = NULL;
+
+ while (count) {
+ mem = cma_get_entry_from_list(cma);
+ if (mem == NULL)
+ return 0;
+
+ if (mem->n <= count) {
+ cma_release(cma, mem->p, mem->n);
+ count -= mem->n;
+ kfree(mem);
+ } else if (cma->order_per_bit == 0) {
+ cma_release(cma, mem->p, count);
+ mem->p += count;
+ mem->n -= count;
+ count = 0;
+ cma_add_to_cma_mem_list(cma, mem);
+ } else {
+ pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
+ cma_add_to_cma_mem_list(cma, mem);
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
+static int cma_free_write(void *data, u64 val)
+{
+ int pages = val;
+ struct cma *cma = data;
+
+ return cma_free_mem(cma, pages);
+}
+DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
+
+static int cma_alloc_mem(struct cma *cma, int count)
+{
+ struct cma_mem *mem;
+ struct page *p;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ p = cma_alloc(cma, count, 0);
+ if (!p) {
+ kfree(mem);
+ return -ENOMEM;
+ }
+
+ mem->p = p;
+ mem->n = count;
+
+ cma_add_to_cma_mem_list(cma, mem);
+
+ return 0;
+}
+
+static int cma_alloc_write(void *data, u64 val)
+{
+ int pages = val;
+ struct cma *cma = data;
+
+ return cma_alloc_mem(cma, pages);
+}
+DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
+
+static void cma_debugfs_add_one(struct cma *cma, int idx)
+{
+ struct dentry *tmp;
+ char name[16];
+ int u32s;
+
+ sprintf(name, "cma-%d", idx);
+
+ tmp = debugfs_create_dir(name, cma_debugfs_root);
+
+ debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma,
+ &cma_alloc_fops);
+
+ debugfs_create_file("free", S_IWUSR, cma_debugfs_root, cma,
+ &cma_free_fops);
+
+ debugfs_create_file("base_pfn", S_IRUGO, tmp,
+ &cma->base_pfn, &cma_debugfs_fops);
+ debugfs_create_file("count", S_IRUGO, tmp,
+ &cma->count, &cma_debugfs_fops);
+ debugfs_create_file("order_per_bit", S_IRUGO, tmp,
+ &cma->order_per_bit, &cma_debugfs_fops);
+ debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops);
+ debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops);
+
+ u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
+ debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
+}
+
+static int __init cma_debugfs_init(void)
+{
+ int i;
+
+ cma_debugfs_root = debugfs_create_dir("cma", NULL);
+ if (!cma_debugfs_root)
+ return -ENOMEM;
+
+ for (i = 0; i < cma_area_count; i++)
+ cma_debugfs_add_one(&cma_areas[i], i);
+
+ return 0;
+}
+late_initcall(cma_debugfs_init);
diff --git a/mm/compaction.c b/mm/compaction.c
index 8c0d9459b54a..018f08da99a2 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -391,28 +391,6 @@ static inline bool compact_should_abort(struct compact_control *cc)
return false;
}
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
-{
- /* If the page is a large free page, then disallow migration */
- if (PageBuddy(page)) {
- /*
- * We are checking page_order without zone->lock taken. But
- * the only small danger is that we skip a potentially suitable
- * pageblock, so it's not worth to check order for valid range.
- */
- if (page_order_unsafe(page) >= pageblock_order)
- return false;
- }
-
- /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
- if (migrate_async_suitable(get_pageblock_migratetype(page)))
- return true;
-
- /* Otherwise skip the block */
- return false;
-}
-
/*
* Isolate free pages onto a private freelist. If @strict is true, will abort
* returning 0 on any invalid PFNs or non-free pages inside of the pageblock
@@ -896,6 +874,29 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
+
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
+{
+ /* If the page is a large free page, then disallow migration */
+ if (PageBuddy(page)) {
+ /*
+ * We are checking page_order without zone->lock taken. But
+ * the only small danger is that we skip a potentially suitable
+ * pageblock, so it's not worth to check order for valid range.
+ */
+ if (page_order_unsafe(page) >= pageblock_order)
+ return false;
+ }
+
+ /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+ if (migrate_async_suitable(get_pageblock_migratetype(page)))
+ return true;
+
+ /* Otherwise skip the block */
+ return false;
+}
+
/*
* Based on information in the current compact_control, find blocks
* suitable for isolating free pages from and then isolate them.
@@ -1047,6 +1048,12 @@ typedef enum {
} isolate_migrate_t;
/*
+ * Allow userspace to control policy on scanning the unevictable LRU for
+ * compactable pages.
+ */
+int sysctl_compact_unevictable_allowed __read_mostly = 1;
+
+/*
* Isolate all pages that can be migrated from the first suitable block,
* starting at the block pointed to by the migrate scanner pfn within
* compact_control.
@@ -1057,6 +1064,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
unsigned long low_pfn, end_pfn;
struct page *page;
const isolate_mode_t isolate_mode =
+ (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
(cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
/*
@@ -1174,13 +1182,24 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
/* Direct compactor: Is a suitable page free? */
for (order = cc->order; order < MAX_ORDER; order++) {
struct free_area *area = &zone->free_area[order];
+ bool can_steal;
/* Job done if page is free of the right migratetype */
if (!list_empty(&area->free_list[migratetype]))
return COMPACT_PARTIAL;
- /* Job done if allocation would set block type */
- if (order >= pageblock_order && area->nr_free)
+#ifdef CONFIG_CMA
+ /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
+ if (migratetype == MIGRATE_MOVABLE &&
+ !list_empty(&area->free_list[MIGRATE_CMA]))
+ return COMPACT_PARTIAL;
+#endif
+ /*
+ * Job done if allocation would steal freepages from
+ * other migratetype buddy lists.
+ */
+ if (find_suitable_fallback(area, order, migratetype,
+ true, &can_steal) != -1)
return COMPACT_PARTIAL;
}
@@ -1587,6 +1606,14 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
INIT_LIST_HEAD(&cc->freepages);
INIT_LIST_HEAD(&cc->migratepages);
+ /*
+ * When called via /proc/sys/vm/compact_memory
+ * this makes sure we compact the whole zone regardless of
+ * cached scanner positions.
+ */
+ if (cc->order == -1)
+ __reset_isolation_suitable(zone);
+
if (cc->order == -1 || !compaction_deferred(zone, cc->order))
compact_zone(zone, cc);
diff --git a/mm/filemap.c b/mm/filemap.c
index ad7242043bdb..6bf5e42d560a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -13,7 +13,6 @@
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
-#include <linux/aio.h>
#include <linux/capability.h>
#include <linux/kernel_stat.h>
#include <linux/gfp.h>
@@ -203,16 +202,15 @@ void __delete_from_page_cache(struct page *page, void *shadow)
BUG_ON(page_mapped(page));
/*
- * Some filesystems seem to re-dirty the page even after
- * the VM has canceled the dirty bit (eg ext3 journaling).
+ * At this point page must be either written or cleaned by truncate.
+ * Dirty page here signals a bug and loss of unwritten data.
*
- * Fix it up by doing a final dirty accounting check after
- * having removed the page entirely.
+ * This fixes dirty accounting after removing the page entirely but
+ * leaves PageDirty set: it has no effect for truncated page and
+ * anyway will be cleared before returning page into buddy allocator.
*/
- if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
- dec_zone_page_state(page, NR_FILE_DIRTY);
- dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE);
- }
+ if (WARN_ON_ONCE(PageDirty(page)))
+ account_page_cleaned(page, mapping);
}
/**
@@ -1695,7 +1693,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
loff_t *ppos = &iocb->ki_pos;
loff_t pos = *ppos;
- if (io_is_direct(file)) {
+ if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
@@ -1708,7 +1706,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
pos + count - 1);
if (!retval) {
struct iov_iter data = *iter;
- retval = mapping->a_ops->direct_IO(READ, iocb, &data, pos);
+ retval = mapping->a_ops->direct_IO(iocb, &data, pos);
}
if (retval > 0) {
@@ -2261,41 +2259,38 @@ EXPORT_SYMBOL(read_cache_page_gfp);
* Returns appropriate error code that caller should return or
* zero in case that write should be allowed.
*/
-inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
+inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
+ struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
unsigned long limit = rlimit(RLIMIT_FSIZE);
+ loff_t pos;
- if (unlikely(*pos < 0))
- return -EINVAL;
+ if (!iov_iter_count(from))
+ return 0;
- if (!isblk) {
- /* FIXME: this is for backwards compatibility with 2.4 */
- if (file->f_flags & O_APPEND)
- *pos = i_size_read(inode);
+ /* FIXME: this is for backwards compatibility with 2.4 */
+ if (iocb->ki_flags & IOCB_APPEND)
+ iocb->ki_pos = i_size_read(inode);
- if (limit != RLIM_INFINITY) {
- if (*pos >= limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
- }
- if (*count > limit - (typeof(limit))*pos) {
- *count = limit - (typeof(limit))*pos;
- }
+ pos = iocb->ki_pos;
+
+ if (limit != RLIM_INFINITY) {
+ if (iocb->ki_pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
}
+ iov_iter_truncate(from, limit - (unsigned long)pos);
}
/*
* LFS rule
*/
- if (unlikely(*pos + *count > MAX_NON_LFS &&
+ if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
!(file->f_flags & O_LARGEFILE))) {
- if (*pos >= MAX_NON_LFS) {
+ if (pos >= MAX_NON_LFS)
return -EFBIG;
- }
- if (*count > MAX_NON_LFS - (unsigned long)*pos) {
- *count = MAX_NON_LFS - (unsigned long)*pos;
- }
+ iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
}
/*
@@ -2305,34 +2300,11 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
* exceeded without writing data we send a signal and return EFBIG.
* Linus frestrict idea will clean these up nicely..
*/
- if (likely(!isblk)) {
- if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
- if (*count || *pos > inode->i_sb->s_maxbytes) {
- return -EFBIG;
- }
- /* zero-length writes at ->s_maxbytes are OK */
- }
-
- if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
- *count = inode->i_sb->s_maxbytes - *pos;
- } else {
-#ifdef CONFIG_BLOCK
- loff_t isize;
- if (bdev_read_only(I_BDEV(inode)))
- return -EPERM;
- isize = i_size_read(inode);
- if (*pos >= isize) {
- if (*count || *pos > isize)
- return -ENOSPC;
- }
+ if (unlikely(pos >= inode->i_sb->s_maxbytes))
+ return -EFBIG;
- if (*pos + *count > isize)
- *count = isize - *pos;
-#else
- return -EPERM;
-#endif
- }
- return 0;
+ iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
+ return iov_iter_count(from);
}
EXPORT_SYMBOL(generic_write_checks);
@@ -2396,7 +2368,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
}
data = *from;
- written = mapping->a_ops->direct_IO(WRITE, iocb, &data, pos);
+ written = mapping->a_ops->direct_IO(iocb, &data, pos);
/*
* Finally, try again to invalidate clean pages which might have been
@@ -2558,23 +2530,12 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct file *file = iocb->ki_filp;
struct address_space * mapping = file->f_mapping;
struct inode *inode = mapping->host;
- loff_t pos = iocb->ki_pos;
ssize_t written = 0;
ssize_t err;
ssize_t status;
- size_t count = iov_iter_count(from);
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
- err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
- if (err)
- goto out;
-
- if (count == 0)
- goto out;
-
- iov_iter_truncate(from, count);
-
err = file_remove_suid(file);
if (err)
goto out;
@@ -2583,10 +2544,10 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (err)
goto out;
- if (io_is_direct(file)) {
- loff_t endbyte;
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ loff_t pos, endbyte;
- written = generic_file_direct_write(iocb, from, pos);
+ written = generic_file_direct_write(iocb, from, iocb->ki_pos);
/*
* If the write stopped short of completing, fall back to
* buffered writes. Some filesystems do this for writes to
@@ -2594,13 +2555,10 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
* not succeed (even if it did, DAX does not handle dirty
* page-cache pages correctly).
*/
- if (written < 0 || written == count || IS_DAX(inode))
+ if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
goto out;
- pos += written;
- count -= written;
-
- status = generic_perform_write(file, from, pos);
+ status = generic_perform_write(file, from, pos = iocb->ki_pos);
/*
* If generic_perform_write() returned a synchronous error
* then we want to return the number of bytes which were
@@ -2612,15 +2570,15 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
err = status;
goto out;
}
- iocb->ki_pos = pos + status;
/*
* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT
* semantics.
*/
endbyte = pos + status - 1;
- err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
+ err = filemap_write_and_wait_range(mapping, pos, endbyte);
if (err == 0) {
+ iocb->ki_pos = endbyte + 1;
written += status;
invalidate_mapping_pages(mapping,
pos >> PAGE_CACHE_SHIFT,
@@ -2632,9 +2590,9 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
*/
}
} else {
- written = generic_perform_write(file, from, pos);
- if (likely(written >= 0))
- iocb->ki_pos = pos + written;
+ written = generic_perform_write(file, from, iocb->ki_pos);
+ if (likely(written > 0))
+ iocb->ki_pos += written;
}
out:
current->backing_dev_info = NULL;
@@ -2658,7 +2616,9 @@ ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret;
mutex_lock(&inode->i_mutex);
- ret = __generic_file_write_iter(iocb, from);
+ ret = generic_write_checks(iocb, from);
+ if (ret > 0)
+ ret = __generic_file_write_iter(iocb, from);
mutex_unlock(&inode->i_mutex);
if (ret > 0) {
diff --git a/mm/gup.c b/mm/gup.c
index a6e24e246f86..6297f6bccfb1 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -92,7 +92,7 @@ retry:
*/
mark_page_accessed(page);
}
- if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
+ if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
@@ -265,8 +265,8 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
unsigned int fault_flags = 0;
int ret;
- /* For mlock, just skip the stack guard page. */
- if ((*flags & FOLL_MLOCK) &&
+ /* For mm_populate(), just skip the stack guard page. */
+ if ((*flags & FOLL_POPULATE) &&
(stack_guard_page_start(vma, address) ||
stack_guard_page_end(vma, address + PAGE_SIZE)))
return -ENOENT;
@@ -819,6 +819,124 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
EXPORT_SYMBOL(get_user_pages);
/**
+ * populate_vma_page_range() - populate a range of pages in the vma.
+ * @vma: target vma
+ * @start: start address
+ * @end: end address
+ * @nonblocking:
+ *
+ * This takes care of mlocking the pages too if VM_LOCKED is set.
+ *
+ * return 0 on success, negative error code on error.
+ *
+ * vma->vm_mm->mmap_sem must be held.
+ *
+ * If @nonblocking is NULL, it may be held for read or write and will
+ * be unperturbed.
+ *
+ * If @nonblocking is non-NULL, it must held for read only and may be
+ * released. If it's released, *@nonblocking will be set to 0.
+ */
+long populate_vma_page_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, int *nonblocking)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long nr_pages = (end - start) / PAGE_SIZE;
+ int gup_flags;
+
+ VM_BUG_ON(start & ~PAGE_MASK);
+ VM_BUG_ON(end & ~PAGE_MASK);
+ VM_BUG_ON_VMA(start < vma->vm_start, vma);
+ VM_BUG_ON_VMA(end > vma->vm_end, vma);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+
+ gup_flags = FOLL_TOUCH | FOLL_POPULATE;
+ /*
+ * We want to touch writable mappings with a write fault in order
+ * to break COW, except for shared mappings because these don't COW
+ * and we would not want to dirty them for nothing.
+ */
+ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
+ gup_flags |= FOLL_WRITE;
+
+ /*
+ * We want mlock to succeed for regions that have any permissions
+ * other than PROT_NONE.
+ */
+ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+ gup_flags |= FOLL_FORCE;
+
+ /*
+ * We made sure addr is within a VMA, so the following will
+ * not result in a stack expansion that recurses back here.
+ */
+ return __get_user_pages(current, mm, start, nr_pages, gup_flags,
+ NULL, NULL, nonblocking);
+}
+
+/*
+ * __mm_populate - populate and/or mlock pages within a range of address space.
+ *
+ * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
+ * flags. VMAs must be already marked with the desired vm_flags, and
+ * mmap_sem must not be held.
+ */
+int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long end, nstart, nend;
+ struct vm_area_struct *vma = NULL;
+ int locked = 0;
+ long ret = 0;
+
+ VM_BUG_ON(start & ~PAGE_MASK);
+ VM_BUG_ON(len != PAGE_ALIGN(len));
+ end = start + len;
+
+ for (nstart = start; nstart < end; nstart = nend) {
+ /*
+ * We want to fault in pages for [nstart; end) address range.
+ * Find first corresponding VMA.
+ */
+ if (!locked) {
+ locked = 1;
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, nstart);
+ } else if (nstart >= vma->vm_end)
+ vma = vma->vm_next;
+ if (!vma || vma->vm_start >= end)
+ break;
+ /*
+ * Set [nstart; nend) to intersection of desired address
+ * range with the first VMA. Also, skip undesirable VMA types.
+ */
+ nend = min(end, vma->vm_end);
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ continue;
+ if (nstart < vma->vm_start)
+ nstart = vma->vm_start;
+ /*
+ * Now fault in a range of pages. populate_vma_page_range()
+ * double checks the vma flags, so that it won't mlock pages
+ * if the vma was already munlocked.
+ */
+ ret = populate_vma_page_range(vma, nstart, nend, &locked);
+ if (ret < 0) {
+ if (ignore_errors) {
+ ret = 0;
+ continue; /* continue at next VMA */
+ }
+ break;
+ }
+ nend = nstart + ret * PAGE_SIZE;
+ ret = 0;
+ }
+ if (locked)
+ up_read(&mm->mmap_sem);
+ return ret; /* 0 or negative error code */
+}
+
+/**
* get_dump_page() - pin user page in memory while writing it to core dump
* @addr: user address
*
@@ -901,7 +1019,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
*
* for an example see gup_get_pte in arch/x86/mm/gup.c
*/
- pte_t pte = ACCESS_ONCE(*ptep);
+ pte_t pte = READ_ONCE(*ptep);
struct page *page;
/*
@@ -1191,7 +1309,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
- pgd_t pgd = ACCESS_ONCE(*pgdp);
+ pgd_t pgd = READ_ONCE(*pgdp);
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6817b0350c71..078832cf3636 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -67,6 +67,7 @@ static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
static int khugepaged(void *none);
static int khugepaged_slab_init(void);
+static void khugepaged_slab_exit(void);
#define MM_SLOTS_HASH_BITS 10
static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
@@ -109,9 +110,6 @@ static int set_recommended_min_free_kbytes(void)
int nr_zones = 0;
unsigned long recommended_min;
- if (!khugepaged_enabled())
- return 0;
-
for_each_populated_zone(zone)
nr_zones++;
@@ -143,9 +141,8 @@ static int set_recommended_min_free_kbytes(void)
setup_per_zone_wmarks();
return 0;
}
-late_initcall(set_recommended_min_free_kbytes);
-static int start_khugepaged(void)
+static int start_stop_khugepaged(void)
{
int err = 0;
if (khugepaged_enabled()) {
@@ -156,6 +153,7 @@ static int start_khugepaged(void)
pr_err("khugepaged: kthread_run(khugepaged) failed\n");
err = PTR_ERR(khugepaged_thread);
khugepaged_thread = NULL;
+ goto fail;
}
if (!list_empty(&khugepaged_scan.mm_head))
@@ -166,7 +164,7 @@ static int start_khugepaged(void)
kthread_stop(khugepaged_thread);
khugepaged_thread = NULL;
}
-
+fail:
return err;
}
@@ -183,7 +181,7 @@ static struct page *get_huge_zero_page(void)
struct page *zero_page;
retry:
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
- return ACCESS_ONCE(huge_zero_page);
+ return READ_ONCE(huge_zero_page);
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
HPAGE_PMD_ORDER);
@@ -202,7 +200,7 @@ retry:
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
preempt_enable();
- return ACCESS_ONCE(huge_zero_page);
+ return READ_ONCE(huge_zero_page);
}
static void put_huge_zero_page(void)
@@ -300,7 +298,7 @@ static ssize_t enabled_store(struct kobject *kobj,
int err;
mutex_lock(&khugepaged_mutex);
- err = start_khugepaged();
+ err = start_stop_khugepaged();
mutex_unlock(&khugepaged_mutex);
if (err)
@@ -634,27 +632,38 @@ static int __init hugepage_init(void)
err = hugepage_init_sysfs(&hugepage_kobj);
if (err)
- return err;
+ goto err_sysfs;
err = khugepaged_slab_init();
if (err)
- goto out;
+ goto err_slab;
- register_shrinker(&huge_zero_page_shrinker);
+ err = register_shrinker(&huge_zero_page_shrinker);
+ if (err)
+ goto err_hzp_shrinker;
/*
* By default disable transparent hugepages on smaller systems,
* where the extra memory used could hurt more than TLB overhead
* is likely to save. The admin can still enable it through /sys.
*/
- if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
+ if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
transparent_hugepage_flags = 0;
+ return 0;
+ }
- start_khugepaged();
+ err = start_stop_khugepaged();
+ if (err)
+ goto err_khugepaged;
return 0;
-out:
+err_khugepaged:
+ unregister_shrinker(&huge_zero_page_shrinker);
+err_hzp_shrinker:
+ khugepaged_slab_exit();
+err_slab:
hugepage_exit_sysfs(hugepage_kobj);
+err_sysfs:
return err;
}
subsys_initcall(hugepage_init);
@@ -708,7 +717,7 @@ static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long haddr, pmd_t *pmd,
- struct page *page)
+ struct page *page, gfp_t gfp)
{
struct mem_cgroup *memcg;
pgtable_t pgtable;
@@ -716,7 +725,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
VM_BUG_ON_PAGE(!PageCompound(page), page);
- if (mem_cgroup_try_charge(page, mm, GFP_TRANSHUGE, &memcg))
+ if (mem_cgroup_try_charge(page, mm, gfp, &memcg))
return VM_FAULT_OOM;
pgtable = pte_alloc_one(mm, haddr);
@@ -822,7 +831,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
}
- if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
+ if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) {
put_page(page);
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
@@ -1080,6 +1089,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long haddr;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
+ gfp_t huge_gfp; /* for allocation and charge */
ptl = pmd_lockptr(mm, pmd);
VM_BUG_ON_VMA(!vma->anon_vma, vma);
@@ -1106,10 +1116,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
alloc:
if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow()) {
- gfp_t gfp;
-
- gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
- new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+ huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
+ new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
} else
new_page = NULL;
@@ -1130,8 +1138,7 @@ alloc:
goto out;
}
- if (unlikely(mem_cgroup_try_charge(new_page, mm,
- GFP_TRANSHUGE, &memcg))) {
+ if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) {
put_page(new_page);
if (page) {
split_huge_page(page);
@@ -1231,7 +1238,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
pmd, _pmd, 1))
update_mmu_cache_pmd(vma, addr, pmd);
}
- if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
+ if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
if (page->mapping && trylock_page(page)) {
lru_add_drain();
if (page->mapping)
@@ -1976,6 +1983,11 @@ static int __init khugepaged_slab_init(void)
return 0;
}
+static void __init khugepaged_slab_exit(void)
+{
+ kmem_cache_destroy(mm_slot_cache);
+}
+
static inline struct mm_slot *alloc_mm_slot(void)
{
if (!mm_slot_cache) /* initialization failed */
@@ -2109,7 +2121,7 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte)
{
while (--_pte >= pte) {
pte_t pteval = *_pte;
- if (!pte_none(pteval))
+ if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
release_pte_page(pte_page(pteval));
}
}
@@ -2120,13 +2132,13 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
{
struct page *page;
pte_t *_pte;
- int none = 0;
+ int none_or_zero = 0;
bool referenced = false, writable = false;
for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, address += PAGE_SIZE) {
pte_t pteval = *_pte;
- if (pte_none(pteval)) {
- if (++none <= khugepaged_max_ptes_none)
+ if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+ if (++none_or_zero <= khugepaged_max_ptes_none)
continue;
else
goto out;
@@ -2207,9 +2219,21 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
pte_t pteval = *_pte;
struct page *src_page;
- if (pte_none(pteval)) {
+ if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
clear_user_highpage(page, address);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
+ if (is_zero_pfn(pte_pfn(pteval))) {
+ /*
+ * ptl mostly unnecessary.
+ */
+ spin_lock(ptl);
+ /*
+ * paravirt calls inside pte_clear here are
+ * superfluous.
+ */
+ pte_clear(vma->vm_mm, address, _pte);
+ spin_unlock(ptl);
+ }
} else {
src_page = pte_page(pteval);
copy_user_highpage(page, src_page, address, vma);
@@ -2311,8 +2335,8 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
return true;
}
-static struct page
-*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+static struct page *
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
int node)
{
@@ -2326,8 +2350,7 @@ static struct page
*/
up_read(&mm->mmap_sem);
- *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
- khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
+ *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
@@ -2380,13 +2403,14 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
return true;
}
-static struct page
-*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+static struct page *
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
int node)
{
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
+
return *hpage;
}
#endif
@@ -2421,16 +2445,21 @@ static void collapse_huge_page(struct mm_struct *mm,
struct mem_cgroup *memcg;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
+ gfp_t gfp;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ /* Only allocate from the target node */
+ gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
+ __GFP_THISNODE;
+
/* release the mmap_sem read lock. */
- new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
+ new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
if (!new_page)
return;
if (unlikely(mem_cgroup_try_charge(new_page, mm,
- GFP_TRANSHUGE, &memcg)))
+ gfp, &memcg)))
return;
/*
@@ -2543,7 +2572,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
{
pmd_t *pmd;
pte_t *pte, *_pte;
- int ret = 0, none = 0;
+ int ret = 0, none_or_zero = 0;
struct page *page;
unsigned long _address;
spinlock_t *ptl;
@@ -2561,8 +2590,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
pte_t pteval = *_pte;
- if (pte_none(pteval)) {
- if (++none <= khugepaged_max_ptes_none)
+ if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+ if (++none_or_zero <= khugepaged_max_ptes_none)
continue;
else
goto out_unmap;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c41b2a0ee273..271e4432734c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -61,6 +61,9 @@ DEFINE_SPINLOCK(hugetlb_lock);
static int num_fault_mutexes;
static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
+/* Forward declaration */
+static int hugetlb_acct_memory(struct hstate *h, long delta);
+
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
{
bool free = (spool->count == 0) && (spool->used_hpages == 0);
@@ -68,23 +71,36 @@ static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
spin_unlock(&spool->lock);
/* If no pages are used, and no other handles to the subpool
- * remain, free the subpool the subpool remain */
- if (free)
+ * remain, give up any reservations mased on minimum size and
+ * free the subpool */
+ if (free) {
+ if (spool->min_hpages != -1)
+ hugetlb_acct_memory(spool->hstate,
+ -spool->min_hpages);
kfree(spool);
+ }
}
-struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
+struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
+ long min_hpages)
{
struct hugepage_subpool *spool;
- spool = kmalloc(sizeof(*spool), GFP_KERNEL);
+ spool = kzalloc(sizeof(*spool), GFP_KERNEL);
if (!spool)
return NULL;
spin_lock_init(&spool->lock);
spool->count = 1;
- spool->max_hpages = nr_blocks;
- spool->used_hpages = 0;
+ spool->max_hpages = max_hpages;
+ spool->hstate = h;
+ spool->min_hpages = min_hpages;
+
+ if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
+ kfree(spool);
+ return NULL;
+ }
+ spool->rsv_hpages = min_hpages;
return spool;
}
@@ -97,36 +113,89 @@ void hugepage_put_subpool(struct hugepage_subpool *spool)
unlock_or_release_subpool(spool);
}
-static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
+/*
+ * Subpool accounting for allocating and reserving pages.
+ * Return -ENOMEM if there are not enough resources to satisfy the
+ * the request. Otherwise, return the number of pages by which the
+ * global pools must be adjusted (upward). The returned value may
+ * only be different than the passed value (delta) in the case where
+ * a subpool minimum size must be manitained.
+ */
+static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
long delta)
{
- int ret = 0;
+ long ret = delta;
if (!spool)
- return 0;
+ return ret;
spin_lock(&spool->lock);
- if ((spool->used_hpages + delta) <= spool->max_hpages) {
- spool->used_hpages += delta;
- } else {
- ret = -ENOMEM;
+
+ if (spool->max_hpages != -1) { /* maximum size accounting */
+ if ((spool->used_hpages + delta) <= spool->max_hpages)
+ spool->used_hpages += delta;
+ else {
+ ret = -ENOMEM;
+ goto unlock_ret;
+ }
}
- spin_unlock(&spool->lock);
+ if (spool->min_hpages != -1) { /* minimum size accounting */
+ if (delta > spool->rsv_hpages) {
+ /*
+ * Asking for more reserves than those already taken on
+ * behalf of subpool. Return difference.
+ */
+ ret = delta - spool->rsv_hpages;
+ spool->rsv_hpages = 0;
+ } else {
+ ret = 0; /* reserves already accounted for */
+ spool->rsv_hpages -= delta;
+ }
+ }
+
+unlock_ret:
+ spin_unlock(&spool->lock);
return ret;
}
-static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
+/*
+ * Subpool accounting for freeing and unreserving pages.
+ * Return the number of global page reservations that must be dropped.
+ * The return value may only be different than the passed value (delta)
+ * in the case where a subpool minimum size must be maintained.
+ */
+static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
long delta)
{
+ long ret = delta;
+
if (!spool)
- return;
+ return delta;
spin_lock(&spool->lock);
- spool->used_hpages -= delta;
- /* If hugetlbfs_put_super couldn't free spool due to
- * an outstanding quota reference, free it now. */
+
+ if (spool->max_hpages != -1) /* maximum size accounting */
+ spool->used_hpages -= delta;
+
+ if (spool->min_hpages != -1) { /* minimum size accounting */
+ if (spool->rsv_hpages + delta <= spool->min_hpages)
+ ret = 0;
+ else
+ ret = spool->rsv_hpages + delta - spool->min_hpages;
+
+ spool->rsv_hpages += delta;
+ if (spool->rsv_hpages > spool->min_hpages)
+ spool->rsv_hpages = spool->min_hpages;
+ }
+
+ /*
+ * If hugetlbfs_put_super couldn't free spool due to an outstanding
+ * quota reference, free it now.
+ */
unlock_or_release_subpool(spool);
+
+ return ret;
}
static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
@@ -855,6 +924,31 @@ struct hstate *size_to_hstate(unsigned long size)
return NULL;
}
+/*
+ * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
+ * to hstate->hugepage_activelist.)
+ *
+ * This function can be called for tail pages, but never returns true for them.
+ */
+bool page_huge_active(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHuge(page), page);
+ return PageHead(page) && PagePrivate(&page[1]);
+}
+
+/* never called for tail page */
+static void set_page_huge_active(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
+ SetPagePrivate(&page[1]);
+}
+
+static void clear_page_huge_active(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
+ ClearPagePrivate(&page[1]);
+}
+
void free_huge_page(struct page *page)
{
/*
@@ -874,7 +968,16 @@ void free_huge_page(struct page *page)
restore_reserve = PagePrivate(page);
ClearPagePrivate(page);
+ /*
+ * A return code of zero implies that the subpool will be under its
+ * minimum size if the reservation is not restored after page is free.
+ * Therefore, force restore_reserve operation.
+ */
+ if (hugepage_subpool_put_pages(spool, 1) == 0)
+ restore_reserve = true;
+
spin_lock(&hugetlb_lock);
+ clear_page_huge_active(page);
hugetlb_cgroup_uncharge_page(hstate_index(h),
pages_per_huge_page(h), page);
if (restore_reserve)
@@ -891,7 +994,6 @@ void free_huge_page(struct page *page)
enqueue_huge_page(h, page);
}
spin_unlock(&hugetlb_lock);
- hugepage_subpool_put_pages(spool, 1);
}
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
@@ -1386,7 +1488,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
if (chg < 0)
return ERR_PTR(-ENOMEM);
if (chg || avoid_reserve)
- if (hugepage_subpool_get_pages(spool, 1))
+ if (hugepage_subpool_get_pages(spool, 1) < 0)
return ERR_PTR(-ENOSPC);
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
@@ -2454,6 +2556,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
struct resv_map *resv = vma_resv_map(vma);
struct hugepage_subpool *spool = subpool_vma(vma);
unsigned long reserve, start, end;
+ long gbl_reserve;
if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return;
@@ -2466,8 +2569,12 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
kref_put(&resv->refs, resv_map_release);
if (reserve) {
- hugetlb_acct_memory(h, -reserve);
- hugepage_subpool_put_pages(spool, reserve);
+ /*
+ * Decrement reserve counts. The global reserve count may be
+ * adjusted if the subpool has a minimum size.
+ */
+ gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
+ hugetlb_acct_memory(h, -gbl_reserve);
}
}
@@ -2891,6 +2998,7 @@ retry_avoidcopy:
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
+ set_page_huge_active(new_page);
mmun_start = address & huge_page_mask(h);
mmun_end = mmun_start + huge_page_size(h);
@@ -3003,6 +3111,7 @@ retry:
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
+ set_page_huge_active(page);
if (vma->vm_flags & VM_MAYSHARE) {
int err;
@@ -3278,6 +3387,15 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page;
/*
+ * If we have a pending SIGKILL, don't keep faulting pages and
+ * potentially allocating memory.
+ */
+ if (unlikely(fatal_signal_pending(current))) {
+ remainder = 0;
+ break;
+ }
+
+ /*
* Some archs (sparc64, sh*) have multiple pte_ts to
* each hugepage. We have to make sure we get the
* first, for the page indexing below to work.
@@ -3438,6 +3556,7 @@ int hugetlb_reserve_pages(struct inode *inode,
struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode);
struct resv_map *resv_map;
+ long gbl_reserve;
/*
* Only apply hugepage reservation if asked. At fault time, an
@@ -3474,8 +3593,13 @@ int hugetlb_reserve_pages(struct inode *inode,
goto out_err;
}
- /* There must be enough pages in the subpool for the mapping */
- if (hugepage_subpool_get_pages(spool, chg)) {
+ /*
+ * There must be enough pages in the subpool for the mapping. If
+ * the subpool has a minimum size, there may be some global
+ * reservations already in place (gbl_reserve).
+ */
+ gbl_reserve = hugepage_subpool_get_pages(spool, chg);
+ if (gbl_reserve < 0) {
ret = -ENOSPC;
goto out_err;
}
@@ -3484,9 +3608,10 @@ int hugetlb_reserve_pages(struct inode *inode,
* Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not
*/
- ret = hugetlb_acct_memory(h, chg);
+ ret = hugetlb_acct_memory(h, gbl_reserve);
if (ret < 0) {
- hugepage_subpool_put_pages(spool, chg);
+ /* put back original number of pages, chg */
+ (void)hugepage_subpool_put_pages(spool, chg);
goto out_err;
}
@@ -3516,6 +3641,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
struct resv_map *resv_map = inode_resv_map(inode);
long chg = 0;
struct hugepage_subpool *spool = subpool_inode(inode);
+ long gbl_reserve;
if (resv_map)
chg = region_truncate(resv_map, offset);
@@ -3523,8 +3649,12 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock);
- hugepage_subpool_put_pages(spool, (chg - freed));
- hugetlb_acct_memory(h, -(chg - freed));
+ /*
+ * If the subpool has a minimum size, the number of global
+ * reservations to be released may be adjusted.
+ */
+ gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
+ hugetlb_acct_memory(h, -gbl_reserve);
}
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -3735,8 +3865,7 @@ retry:
if (!pmd_huge(*pmd))
goto out;
if (pmd_present(*pmd)) {
- page = pte_page(*(pte_t *)pmd) +
- ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
if (flags & FOLL_GET)
get_page(page);
} else {
@@ -3767,20 +3896,6 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
#ifdef CONFIG_MEMORY_FAILURE
-/* Should be called in hugetlb_lock */
-static int is_hugepage_on_freelist(struct page *hpage)
-{
- struct page *page;
- struct page *tmp;
- struct hstate *h = page_hstate(hpage);
- int nid = page_to_nid(hpage);
-
- list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
- if (page == hpage)
- return 1;
- return 0;
-}
-
/*
* This function is called from memory failure code.
* Assume the caller holds page lock of the head page.
@@ -3792,7 +3907,11 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
int ret = -EBUSY;
spin_lock(&hugetlb_lock);
- if (is_hugepage_on_freelist(hpage)) {
+ /*
+ * Just checking !page_huge_active is not enough, because that could be
+ * an isolated/hwpoisoned hugepage (which have >0 refcount).
+ */
+ if (!page_huge_active(hpage) && !page_count(hpage)) {
/*
* Hwpoisoned hugepage isn't linked to activelist or freelist,
* but dangling hpage->lru can trigger list-debug warnings
@@ -3812,42 +3931,27 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
bool isolate_huge_page(struct page *page, struct list_head *list)
{
+ bool ret = true;
+
VM_BUG_ON_PAGE(!PageHead(page), page);
- if (!get_page_unless_zero(page))
- return false;
spin_lock(&hugetlb_lock);
+ if (!page_huge_active(page) || !get_page_unless_zero(page)) {
+ ret = false;
+ goto unlock;
+ }
+ clear_page_huge_active(page);
list_move_tail(&page->lru, list);
+unlock:
spin_unlock(&hugetlb_lock);
- return true;
+ return ret;
}
void putback_active_hugepage(struct page *page)
{
VM_BUG_ON_PAGE(!PageHead(page), page);
spin_lock(&hugetlb_lock);
+ set_page_huge_active(page);
list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
spin_unlock(&hugetlb_lock);
put_page(page);
}
-
-bool is_hugepage_active(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- /*
- * This function can be called for a tail page because the caller,
- * scan_movable_pages, scans through a given pfn-range which typically
- * covers one memory block. In systems using gigantic hugepage (1GB
- * for x86_64,) a hugepage is larger than a memory block, and we don't
- * support migrating such large hugepages for now, so return false
- * when called for tail pages.
- */
- if (PageTail(page))
- return false;
- /*
- * Refcount of a hwpoisoned hugepages is 1, but they are not active,
- * so we should return false for them.
- */
- if (unlikely(PageHWPoison(page)))
- return false;
- return page_count(page) > 0;
-}
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 329caf56df22..4ca5fe0042e1 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -34,13 +34,13 @@ static int hwpoison_inject(void *data, u64 val)
if (!hwpoison_filter_enable)
goto inject;
- if (!PageLRU(p) && !PageHuge(p))
- shake_page(p, 0);
+ if (!PageLRU(hpage) && !PageHuge(p))
+ shake_page(hpage, 0);
/*
* This implies unable to support non-LRU pages.
*/
- if (!PageLRU(p) && !PageHuge(p))
- return 0;
+ if (!PageLRU(hpage) && !PageHuge(p))
+ goto put_out;
/*
* do a racy check with elevated page count, to make sure PG_hwpoison
@@ -52,11 +52,14 @@ static int hwpoison_inject(void *data, u64 val)
err = hwpoison_filter(hpage);
unlock_page(hpage);
if (err)
- return 0;
+ goto put_out;
inject:
pr_info("Injecting memory failure at pfn %#lx\n", pfn);
return memory_failure(pfn, 18, MF_COUNT_INCREASED);
+put_out:
+ put_page(hpage);
+ return 0;
}
static int hwpoison_unpoison(void *data, u64 val)
diff --git a/mm/internal.h b/mm/internal.h
index a96da5b0029d..a25e359a4039 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -200,6 +200,8 @@ isolate_freepages_range(struct compact_control *cc,
unsigned long
isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
+int find_suitable_fallback(struct free_area *area, unsigned int order,
+ int migratetype, bool only_stealable, bool *can_steal);
#endif
@@ -222,13 +224,13 @@ static inline unsigned long page_order(struct page *page)
* PageBuddy() should be checked first by the caller to minimize race window,
* and invalid values must be handled gracefully.
*
- * ACCESS_ONCE is used so that if the caller assigns the result into a local
+ * READ_ONCE is used so that if the caller assigns the result into a local
* variable and e.g. tests it for valid range before using, the compiler cannot
* decide to remove the variable and inline the page_private(page) multiple
* times, potentially observing different values in the tests and the actual
* use of the result.
*/
-#define page_order_unsafe(page) ACCESS_ONCE(page_private(page))
+#define page_order_unsafe(page) READ_ONCE(page_private(page))
static inline bool is_cow_mapping(vm_flags_t flags)
{
@@ -240,7 +242,7 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node *rb_parent);
#ifdef CONFIG_MMU
-extern long __mlock_vma_pages_range(struct vm_area_struct *vma,
+extern long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *nonblocking);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 936d81661c47..6c513a63ea84 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -389,6 +389,19 @@ void kasan_krealloc(const void *object, size_t size)
kasan_kmalloc(page->slab_cache, object, size);
}
+void kasan_kfree(void *ptr)
+{
+ struct page *page;
+
+ page = virt_to_head_page(ptr);
+
+ if (unlikely(!PageSlab(page)))
+ kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
+ KASAN_FREE_PAGE);
+ else
+ kasan_slab_free(page->slab_cache, ptr);
+}
+
void kasan_kfree_large(const void *ptr)
{
struct page *page = virt_to_page(ptr);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 5405aff5a590..f0fe4f2c1fa7 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -115,7 +115,8 @@
#define BYTES_PER_POINTER sizeof(void *)
/* GFP bitmask for kmemleak internal allocations */
-#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
+#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
+ __GFP_NOACCOUNT)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \
__GFP_NOWARN)
diff --git a/mm/ksm.c b/mm/ksm.c
index 4162dce2eb44..7ee101eaacdf 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -542,7 +542,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
expected_mapping = (void *)stable_node +
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
again:
- kpfn = ACCESS_ONCE(stable_node->kpfn);
+ kpfn = READ_ONCE(stable_node->kpfn);
page = pfn_to_page(kpfn);
/*
@@ -551,7 +551,7 @@ again:
* but on Alpha we need to be more careful.
*/
smp_read_barrier_depends();
- if (ACCESS_ONCE(page->mapping) != expected_mapping)
+ if (READ_ONCE(page->mapping) != expected_mapping)
goto stale;
/*
@@ -577,14 +577,14 @@ again:
cpu_relax();
}
- if (ACCESS_ONCE(page->mapping) != expected_mapping) {
+ if (READ_ONCE(page->mapping) != expected_mapping) {
put_page(page);
goto stale;
}
if (lock_it) {
lock_page(page);
- if (ACCESS_ONCE(page->mapping) != expected_mapping) {
+ if (READ_ONCE(page->mapping) != expected_mapping) {
unlock_page(page);
put_page(page);
goto stale;
@@ -600,7 +600,7 @@ stale:
* before checking whether node->kpfn has been changed.
*/
smp_rmb();
- if (ACCESS_ONCE(stable_node->kpfn) != kpfn)
+ if (READ_ONCE(stable_node->kpfn) != kpfn)
goto again;
remove_node_from_stable_tree(stable_node);
return NULL;
diff --git a/mm/memblock.c b/mm/memblock.c
index 252b77bdf65e..9318b567ed79 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -580,10 +580,24 @@ int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
return memblock_add_range(&memblock.memory, base, size, nid, 0);
}
+static int __init_memblock memblock_add_region(phys_addr_t base,
+ phys_addr_t size,
+ int nid,
+ unsigned long flags)
+{
+ struct memblock_type *_rgn = &memblock.memory;
+
+ memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
+ (unsigned long long)base,
+ (unsigned long long)base + size - 1,
+ flags, (void *)_RET_IP_);
+
+ return memblock_add_range(_rgn, base, size, nid, flags);
+}
+
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
{
- return memblock_add_range(&memblock.memory, base, size,
- MAX_NUMNODES, 0);
+ return memblock_add_region(base, size, MAX_NUMNODES, 0);
}
/**
@@ -699,14 +713,14 @@ static int __init_memblock memblock_reserve_region(phys_addr_t base,
int nid,
unsigned long flags)
{
- struct memblock_type *_rgn = &memblock.reserved;
+ struct memblock_type *type = &memblock.reserved;
memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
(unsigned long long)base,
(unsigned long long)base + size - 1,
flags, (void *)_RET_IP_);
- return memblock_add_range(_rgn, base, size, nid, flags);
+ return memblock_add_range(type, base, size, nid, flags);
}
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b34ef4a32a3b..14c2f2017e37 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -14,6 +14,12 @@
* Copyright (C) 2012 Parallels Inc. and Google Inc.
* Authors: Glauber Costa and Suleiman Souhlal
*
+ * Native page reclaim
+ * Charge lifetime sanitation
+ * Lockless page tracking & accounting
+ * Unified hierarchy configuration model
+ * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -253,11 +259,6 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
* page cache and RSS per cgroup. We would eventually like to provide
* statistics based on the statistics developed by Rik Van Riel for clock-pro,
* to help the administrator determine what knobs to tune.
- *
- * TODO: Add a water mark for the memory controller. Reclaim will begin when
- * we hit the water mark. May be even add a low water mark, such that
- * no reclaim occurs from a cgroup at it's low water mark, this is
- * a feature that will be implemented much later in the future.
*/
struct mem_cgroup {
struct cgroup_subsys_state css;
@@ -454,6 +455,12 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
return memcg->css.id;
}
+/*
+ * A helper function to get mem_cgroup from ID. must be called under
+ * rcu_read_lock(). The caller is responsible for calling
+ * css_tryget_online() if the mem_cgroup is used for charging. (dropping
+ * refcnt from swap can be called against removed memcg.)
+ */
static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
struct cgroup_subsys_state *css;
@@ -667,7 +674,7 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
unsigned long nr_pages = page_counter_read(&memcg->memory);
- unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit);
+ unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
unsigned long excess = 0;
if (nr_pages > soft_limit)
@@ -1035,7 +1042,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
goto out_unlock;
do {
- pos = ACCESS_ONCE(iter->position);
+ pos = READ_ONCE(iter->position);
/*
* A racing update may change the position and
* put the last reference, hence css_tryget(),
@@ -1352,13 +1359,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
unsigned long limit;
count = page_counter_read(&memcg->memory);
- limit = ACCESS_ONCE(memcg->memory.limit);
+ limit = READ_ONCE(memcg->memory.limit);
if (count < limit)
margin = limit - count;
if (do_swap_account) {
count = page_counter_read(&memcg->memsw);
- limit = ACCESS_ONCE(memcg->memsw.limit);
+ limit = READ_ONCE(memcg->memsw.limit);
if (count <= limit)
margin = min(margin, limit - count);
}
@@ -1436,15 +1443,17 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
struct mem_cgroup *iter;
unsigned int i;
- if (!p)
- return;
-
mutex_lock(&oom_info_lock);
rcu_read_lock();
- pr_info("Task in ");
- pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
- pr_cont(" killed as a result of limit of ");
+ if (p) {
+ pr_info("Task in ");
+ pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
+ pr_cont(" killed as a result of limit of ");
+ } else {
+ pr_info("Memory limit reached of cgroup ");
+ }
+
pr_cont_cgroup_path(memcg->css.cgroup);
pr_cont("\n");
@@ -1531,7 +1540,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
return;
}
- check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
+ check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg);
totalpages = mem_cgroup_get_limit(memcg) ? : 1;
for_each_mem_cgroup_tree(iter, memcg) {
struct css_task_iter it;
@@ -2341,20 +2350,6 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
}
/*
- * A helper function to get mem_cgroup from ID. must be called under
- * rcu_read_lock(). The caller is responsible for calling
- * css_tryget_online() if the mem_cgroup is used for charging. (dropping
- * refcnt from swap can be called against removed memcg.)
- */
-static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
-{
- /* ID 0 is unused ID */
- if (!id)
- return NULL;
- return mem_cgroup_from_id(id);
-}
-
-/*
* try_get_mem_cgroup_from_page - look up page's memcg association
* @page: the page
*
@@ -2380,7 +2375,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
ent.val = page_private(page);
id = lookup_swap_cgroup_id(ent);
rcu_read_lock();
- memcg = mem_cgroup_lookup(id);
+ memcg = mem_cgroup_from_id(id);
if (memcg && !css_tryget_online(&memcg->css))
memcg = NULL;
rcu_read_unlock();
@@ -2642,7 +2637,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
return cachep;
memcg = get_mem_cgroup_from_mm(current->mm);
- kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id);
+ kmemcg_id = READ_ONCE(memcg->kmemcg_id);
if (kmemcg_id < 0)
goto out;
@@ -2779,92 +2774,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-/**
- * mem_cgroup_move_account - move account of the page
- * @page: the page
- * @nr_pages: number of regular pages (>1 for huge pages)
- * @from: mem_cgroup which the page is moved from.
- * @to: mem_cgroup which the page is moved to. @from != @to.
- *
- * The caller must confirm following.
- * - page is not on LRU (isolate_page() is useful.)
- * - compound_lock is held when nr_pages > 1
- *
- * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
- * from old cgroup.
- */
-static int mem_cgroup_move_account(struct page *page,
- unsigned int nr_pages,
- struct mem_cgroup *from,
- struct mem_cgroup *to)
-{
- unsigned long flags;
- int ret;
-
- VM_BUG_ON(from == to);
- VM_BUG_ON_PAGE(PageLRU(page), page);
- /*
- * The page is isolated from LRU. So, collapse function
- * will not handle this page. But page splitting can happen.
- * Do this check under compound_page_lock(). The caller should
- * hold it.
- */
- ret = -EBUSY;
- if (nr_pages > 1 && !PageTransHuge(page))
- goto out;
-
- /*
- * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
- * of its source page while we change it: page migration takes
- * both pages off the LRU, but page cache replacement doesn't.
- */
- if (!trylock_page(page))
- goto out;
-
- ret = -EINVAL;
- if (page->mem_cgroup != from)
- goto out_unlock;
-
- spin_lock_irqsave(&from->move_lock, flags);
-
- if (!PageAnon(page) && page_mapped(page)) {
- __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
- nr_pages);
- __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
- nr_pages);
- }
-
- if (PageWriteback(page)) {
- __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
- nr_pages);
- __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
- nr_pages);
- }
-
- /*
- * It is safe to change page->mem_cgroup here because the page
- * is referenced, charged, and isolated - we can't race with
- * uncharging, charging, migration, or LRU putback.
- */
-
- /* caller should have done css_get */
- page->mem_cgroup = to;
- spin_unlock_irqrestore(&from->move_lock, flags);
-
- ret = 0;
-
- local_irq_disable();
- mem_cgroup_charge_statistics(to, page, nr_pages);
- memcg_check_events(to, page);
- mem_cgroup_charge_statistics(from, page, -nr_pages);
- memcg_check_events(from, page);
- local_irq_enable();
-out_unlock:
- unlock_page(page);
-out:
- return ret;
-}
-
#ifdef CONFIG_MEMCG_SWAP
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
bool charge)
@@ -4816,6 +4725,92 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
return page;
}
+/**
+ * mem_cgroup_move_account - move account of the page
+ * @page: the page
+ * @nr_pages: number of regular pages (>1 for huge pages)
+ * @from: mem_cgroup which the page is moved from.
+ * @to: mem_cgroup which the page is moved to. @from != @to.
+ *
+ * The caller must confirm following.
+ * - page is not on LRU (isolate_page() is useful.)
+ * - compound_lock is held when nr_pages > 1
+ *
+ * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
+ * from old cgroup.
+ */
+static int mem_cgroup_move_account(struct page *page,
+ unsigned int nr_pages,
+ struct mem_cgroup *from,
+ struct mem_cgroup *to)
+{
+ unsigned long flags;
+ int ret;
+
+ VM_BUG_ON(from == to);
+ VM_BUG_ON_PAGE(PageLRU(page), page);
+ /*
+ * The page is isolated from LRU. So, collapse function
+ * will not handle this page. But page splitting can happen.
+ * Do this check under compound_page_lock(). The caller should
+ * hold it.
+ */
+ ret = -EBUSY;
+ if (nr_pages > 1 && !PageTransHuge(page))
+ goto out;
+
+ /*
+ * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
+ * of its source page while we change it: page migration takes
+ * both pages off the LRU, but page cache replacement doesn't.
+ */
+ if (!trylock_page(page))
+ goto out;
+
+ ret = -EINVAL;
+ if (page->mem_cgroup != from)
+ goto out_unlock;
+
+ spin_lock_irqsave(&from->move_lock, flags);
+
+ if (!PageAnon(page) && page_mapped(page)) {
+ __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
+ nr_pages);
+ __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
+ nr_pages);
+ }
+
+ if (PageWriteback(page)) {
+ __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
+ nr_pages);
+ __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
+ nr_pages);
+ }
+
+ /*
+ * It is safe to change page->mem_cgroup here because the page
+ * is referenced, charged, and isolated - we can't race with
+ * uncharging, charging, migration, or LRU putback.
+ */
+
+ /* caller should have done css_get */
+ page->mem_cgroup = to;
+ spin_unlock_irqrestore(&from->move_lock, flags);
+
+ ret = 0;
+
+ local_irq_disable();
+ mem_cgroup_charge_statistics(to, page, nr_pages);
+ memcg_check_events(to, page);
+ mem_cgroup_charge_statistics(from, page, -nr_pages);
+ memcg_check_events(from, page);
+ local_irq_enable();
+out_unlock:
+ unlock_page(page);
+out:
+ return ret;
+}
+
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, union mc_target *target)
{
@@ -5012,7 +5007,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
* tunable will only affect upcoming migrations, not the current one.
* So we need to save it, and keep it going.
*/
- move_flags = ACCESS_ONCE(memcg->move_charge_at_immigrate);
+ move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
if (move_flags) {
struct mm_struct *mm;
struct mem_cgroup *from = mem_cgroup_from_task(p);
@@ -5246,7 +5241,7 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
static int memory_low_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long low = ACCESS_ONCE(memcg->low);
+ unsigned long low = READ_ONCE(memcg->low);
if (low == PAGE_COUNTER_MAX)
seq_puts(m, "max\n");
@@ -5276,7 +5271,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
static int memory_high_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long high = ACCESS_ONCE(memcg->high);
+ unsigned long high = READ_ONCE(memcg->high);
if (high == PAGE_COUNTER_MAX)
seq_puts(m, "max\n");
@@ -5306,7 +5301,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
static int memory_max_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long max = ACCESS_ONCE(memcg->memory.limit);
+ unsigned long max = READ_ONCE(memcg->memory.limit);
if (max == PAGE_COUNTER_MAX)
seq_puts(m, "max\n");
@@ -5861,7 +5856,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
id = swap_cgroup_record(entry, 0);
rcu_read_lock();
- memcg = mem_cgroup_lookup(id);
+ memcg = mem_cgroup_from_id(id);
if (memcg) {
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memsw, 1);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d487f8dc6d39..501820c815b3 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -521,6 +521,52 @@ static const char *action_name[] = {
[RECOVERED] = "Recovered",
};
+enum action_page_type {
+ MSG_KERNEL,
+ MSG_KERNEL_HIGH_ORDER,
+ MSG_SLAB,
+ MSG_DIFFERENT_COMPOUND,
+ MSG_POISONED_HUGE,
+ MSG_HUGE,
+ MSG_FREE_HUGE,
+ MSG_UNMAP_FAILED,
+ MSG_DIRTY_SWAPCACHE,
+ MSG_CLEAN_SWAPCACHE,
+ MSG_DIRTY_MLOCKED_LRU,
+ MSG_CLEAN_MLOCKED_LRU,
+ MSG_DIRTY_UNEVICTABLE_LRU,
+ MSG_CLEAN_UNEVICTABLE_LRU,
+ MSG_DIRTY_LRU,
+ MSG_CLEAN_LRU,
+ MSG_TRUNCATED_LRU,
+ MSG_BUDDY,
+ MSG_BUDDY_2ND,
+ MSG_UNKNOWN,
+};
+
+static const char * const action_page_types[] = {
+ [MSG_KERNEL] = "reserved kernel page",
+ [MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
+ [MSG_SLAB] = "kernel slab page",
+ [MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
+ [MSG_POISONED_HUGE] = "huge page already hardware poisoned",
+ [MSG_HUGE] = "huge page",
+ [MSG_FREE_HUGE] = "free huge page",
+ [MSG_UNMAP_FAILED] = "unmapping failed page",
+ [MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
+ [MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
+ [MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
+ [MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
+ [MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
+ [MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
+ [MSG_DIRTY_LRU] = "dirty LRU page",
+ [MSG_CLEAN_LRU] = "clean LRU page",
+ [MSG_TRUNCATED_LRU] = "already truncated LRU page",
+ [MSG_BUDDY] = "free buddy page",
+ [MSG_BUDDY_2ND] = "free buddy page (2nd try)",
+ [MSG_UNKNOWN] = "unknown page",
+};
+
/*
* XXX: It is possible that a page is isolated from LRU cache,
* and then kept in swap cache or failed to remove from page cache.
@@ -777,10 +823,10 @@ static int me_huge_page(struct page *p, unsigned long pfn)
static struct page_state {
unsigned long mask;
unsigned long res;
- char *msg;
+ enum action_page_type type;
int (*action)(struct page *p, unsigned long pfn);
} error_states[] = {
- { reserved, reserved, "reserved kernel", me_kernel },
+ { reserved, reserved, MSG_KERNEL, me_kernel },
/*
* free pages are specially detected outside this table:
* PG_buddy pages only make a small fraction of all free pages.
@@ -791,31 +837,31 @@ static struct page_state {
* currently unused objects without touching them. But just
* treat it as standard kernel for now.
*/
- { slab, slab, "kernel slab", me_kernel },
+ { slab, slab, MSG_SLAB, me_kernel },
#ifdef CONFIG_PAGEFLAGS_EXTENDED
- { head, head, "huge", me_huge_page },
- { tail, tail, "huge", me_huge_page },
+ { head, head, MSG_HUGE, me_huge_page },
+ { tail, tail, MSG_HUGE, me_huge_page },
#else
- { compound, compound, "huge", me_huge_page },
+ { compound, compound, MSG_HUGE, me_huge_page },
#endif
- { sc|dirty, sc|dirty, "dirty swapcache", me_swapcache_dirty },
- { sc|dirty, sc, "clean swapcache", me_swapcache_clean },
+ { sc|dirty, sc|dirty, MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
+ { sc|dirty, sc, MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
- { mlock|dirty, mlock|dirty, "dirty mlocked LRU", me_pagecache_dirty },
- { mlock|dirty, mlock, "clean mlocked LRU", me_pagecache_clean },
+ { mlock|dirty, mlock|dirty, MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
+ { mlock|dirty, mlock, MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
- { unevict|dirty, unevict|dirty, "dirty unevictable LRU", me_pagecache_dirty },
- { unevict|dirty, unevict, "clean unevictable LRU", me_pagecache_clean },
+ { unevict|dirty, unevict|dirty, MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
+ { unevict|dirty, unevict, MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
- { lru|dirty, lru|dirty, "dirty LRU", me_pagecache_dirty },
- { lru|dirty, lru, "clean LRU", me_pagecache_clean },
+ { lru|dirty, lru|dirty, MSG_DIRTY_LRU, me_pagecache_dirty },
+ { lru|dirty, lru, MSG_CLEAN_LRU, me_pagecache_clean },
/*
* Catchall entry: must be at end.
*/
- { 0, 0, "unknown page state", me_unknown },
+ { 0, 0, MSG_UNKNOWN, me_unknown },
};
#undef dirty
@@ -835,10 +881,10 @@ static struct page_state {
* "Dirty/Clean" indication is not 100% accurate due to the possibility of
* setting PG_dirty outside page lock. See also comment above set_page_dirty().
*/
-static void action_result(unsigned long pfn, char *msg, int result)
+static void action_result(unsigned long pfn, enum action_page_type type, int result)
{
- pr_err("MCE %#lx: %s page recovery: %s\n",
- pfn, msg, action_name[result]);
+ pr_err("MCE %#lx: recovery action for %s: %s\n",
+ pfn, action_page_types[type], action_name[result]);
}
static int page_action(struct page_state *ps, struct page *p,
@@ -854,11 +900,11 @@ static int page_action(struct page_state *ps, struct page *p,
count--;
if (count != 0) {
printk(KERN_ERR
- "MCE %#lx: %s page still referenced by %d users\n",
- pfn, ps->msg, count);
+ "MCE %#lx: %s still referenced by %d users\n",
+ pfn, action_page_types[ps->type], count);
result = FAILED;
}
- action_result(pfn, ps->msg, result);
+ action_result(pfn, ps->type, result);
/* Could do more checks here if page looks ok */
/*
@@ -1106,7 +1152,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
if (!(flags & MF_COUNT_INCREASED) &&
!get_page_unless_zero(hpage)) {
if (is_free_buddy_page(p)) {
- action_result(pfn, "free buddy", DELAYED);
+ action_result(pfn, MSG_BUDDY, DELAYED);
return 0;
} else if (PageHuge(hpage)) {
/*
@@ -1123,12 +1169,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
}
set_page_hwpoison_huge_page(hpage);
res = dequeue_hwpoisoned_huge_page(hpage);
- action_result(pfn, "free huge",
+ action_result(pfn, MSG_FREE_HUGE,
res ? IGNORED : DELAYED);
unlock_page(hpage);
return res;
} else {
- action_result(pfn, "high order kernel", IGNORED);
+ action_result(pfn, MSG_KERNEL_HIGH_ORDER, IGNORED);
return -EBUSY;
}
}
@@ -1141,18 +1187,19 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
- if (!PageHuge(p) && !PageTransTail(p)) {
- if (!PageLRU(p))
- shake_page(p, 0);
- if (!PageLRU(p)) {
+ if (!PageHuge(p)) {
+ if (!PageLRU(hpage))
+ shake_page(hpage, 0);
+ if (!PageLRU(hpage)) {
/*
* shake_page could have turned it free.
*/
if (is_free_buddy_page(p)) {
if (flags & MF_COUNT_INCREASED)
- action_result(pfn, "free buddy", DELAYED);
+ action_result(pfn, MSG_BUDDY, DELAYED);
else
- action_result(pfn, "free buddy, 2nd try", DELAYED);
+ action_result(pfn, MSG_BUDDY_2ND,
+ DELAYED);
return 0;
}
}
@@ -1165,7 +1212,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* If this happens just bail out.
*/
if (compound_head(p) != hpage) {
- action_result(pfn, "different compound page after locking", IGNORED);
+ action_result(pfn, MSG_DIFFERENT_COMPOUND, IGNORED);
res = -EBUSY;
goto out;
}
@@ -1205,8 +1252,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* on the head page to show that the hugepage is hwpoisoned
*/
if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
- action_result(pfn, "hugepage already hardware poisoned",
- IGNORED);
+ action_result(pfn, MSG_POISONED_HUGE, IGNORED);
unlock_page(hpage);
put_page(hpage);
return 0;
@@ -1235,7 +1281,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
!= SWAP_SUCCESS) {
- action_result(pfn, "unmapping failed", IGNORED);
+ action_result(pfn, MSG_UNMAP_FAILED, IGNORED);
res = -EBUSY;
goto out;
}
@@ -1244,7 +1290,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* Torn down by someone else?
*/
if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
- action_result(pfn, "already truncated LRU", IGNORED);
+ action_result(pfn, MSG_TRUNCATED_LRU, IGNORED);
res = -EBUSY;
goto out;
}
@@ -1540,8 +1586,18 @@ static int soft_offline_huge_page(struct page *page, int flags)
}
unlock_page(hpage);
- /* Keep page count to indicate a given hugepage is isolated. */
- list_move(&hpage->lru, &pagelist);
+ ret = isolate_huge_page(hpage, &pagelist);
+ if (ret) {
+ /*
+ * get_any_page() and isolate_huge_page() takes a refcount each,
+ * so need to drop one here.
+ */
+ put_page(hpage);
+ } else {
+ pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
+ return -EBUSY;
+ }
+
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
@@ -1721,12 +1777,12 @@ int soft_offline_page(struct page *page, int flags)
} else if (ret == 0) { /* for free pages */
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_order(hpage),
+ if (!dequeue_hwpoisoned_huge_page(hpage))
+ atomic_long_add(1 << compound_order(hpage),
&num_poisoned_pages);
} else {
- SetPageHWPoison(page);
- atomic_long_inc(&num_poisoned_pages);
+ if (!TestSetPageHWPoison(page))
+ atomic_long_inc(&num_poisoned_pages);
}
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
index 97839f5c8c30..22e037e3364e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -690,12 +690,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
/*
* Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
*/
- if (vma->vm_ops)
- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
- vma->vm_ops->fault);
- if (vma->vm_file)
- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
- vma->vm_file->f_op->mmap);
+ pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
+ vma->vm_file,
+ vma->vm_ops ? vma->vm_ops->fault : NULL,
+ vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
+ mapping ? mapping->a_ops->readpage : NULL);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
@@ -1983,167 +1982,91 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
}
/*
- * This routine handles present pages, when users try to write
- * to a shared page. It is done by copying the page to a new address
- * and decrementing the shared-page counter for the old page.
+ * Handle write page faults for pages that can be reused in the current vma
*
- * Note that this routine assumes that the protection checks have been
- * done by the caller (the low-level page fault routine in most cases).
- * Thus we can safely just mark it writable once we've done any necessary
- * COW.
- *
- * We also mark the page dirty at this point even though the page will
- * change only once the write actually happens. This avoids a few races,
- * and potentially makes it more efficient.
- *
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), with pte both mapped and locked.
- * We return with mmap_sem still held, but pte unmapped and unlocked.
+ * This can happen either due to the mapping being with the VM_SHARED flag,
+ * or due to us being the last reference standing to the page. In either
+ * case, all we need to do here is to mark the page as writable and update
+ * any related book-keeping.
*/
-static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pte_t *page_table, pmd_t *pmd,
- spinlock_t *ptl, pte_t orig_pte)
+static inline int wp_page_reuse(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *page_table, spinlock_t *ptl, pte_t orig_pte,
+ struct page *page, int page_mkwrite,
+ int dirty_shared)
__releases(ptl)
{
- struct page *old_page, *new_page = NULL;
pte_t entry;
- int ret = 0;
- int page_mkwrite = 0;
- bool dirty_shared = false;
- unsigned long mmun_start = 0; /* For mmu_notifiers */
- unsigned long mmun_end = 0; /* For mmu_notifiers */
- struct mem_cgroup *memcg;
-
- old_page = vm_normal_page(vma, address, orig_pte);
- if (!old_page) {
- /*
- * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
- * VM_PFNMAP VMA.
- *
- * We should not cow pages in a shared writeable mapping.
- * Just mark the pages writable as we can't do any dirty
- * accounting on raw pfn maps.
- */
- if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
- (VM_WRITE|VM_SHARED))
- goto reuse;
- goto gotten;
- }
-
/*
- * Take out anonymous pages first, anonymous shared vmas are
- * not dirty accountable.
+ * Clear the pages cpupid information as the existing
+ * information potentially belongs to a now completely
+ * unrelated process.
*/
- if (PageAnon(old_page) && !PageKsm(old_page)) {
- if (!trylock_page(old_page)) {
- page_cache_get(old_page);
- pte_unmap_unlock(page_table, ptl);
- lock_page(old_page);
- page_table = pte_offset_map_lock(mm, pmd, address,
- &ptl);
- if (!pte_same(*page_table, orig_pte)) {
- unlock_page(old_page);
- goto unlock;
- }
- page_cache_release(old_page);
- }
- if (reuse_swap_page(old_page)) {
- /*
- * The page is all ours. Move it to our anon_vma so
- * the rmap code will not search our parent or siblings.
- * Protected against the rmap code by the page lock.
- */
- page_move_anon_rmap(old_page, vma, address);
- unlock_page(old_page);
- goto reuse;
- }
- unlock_page(old_page);
- } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
- (VM_WRITE|VM_SHARED))) {
- page_cache_get(old_page);
- /*
- * Only catch write-faults on shared writable pages,
- * read-only shared pages can get COWed by
- * get_user_pages(.write=1, .force=1).
- */
- if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
- int tmp;
-
- pte_unmap_unlock(page_table, ptl);
- tmp = do_page_mkwrite(vma, old_page, address);
- if (unlikely(!tmp || (tmp &
- (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
- page_cache_release(old_page);
- return tmp;
- }
- /*
- * Since we dropped the lock we need to revalidate
- * the PTE as someone else may have changed it. If
- * they did, we just return, as we can count on the
- * MMU to tell us if they didn't also make it writable.
- */
- page_table = pte_offset_map_lock(mm, pmd, address,
- &ptl);
- if (!pte_same(*page_table, orig_pte)) {
- unlock_page(old_page);
- goto unlock;
- }
- page_mkwrite = 1;
- }
-
- dirty_shared = true;
-
-reuse:
- /*
- * Clear the pages cpupid information as the existing
- * information potentially belongs to a now completely
- * unrelated process.
- */
- if (old_page)
- page_cpupid_xchg_last(old_page, (1 << LAST_CPUPID_SHIFT) - 1);
+ if (page)
+ page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
- flush_cache_page(vma, address, pte_pfn(orig_pte));
- entry = pte_mkyoung(orig_pte);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- if (ptep_set_access_flags(vma, address, page_table, entry,1))
- update_mmu_cache(vma, address, page_table);
- pte_unmap_unlock(page_table, ptl);
- ret |= VM_FAULT_WRITE;
+ flush_cache_page(vma, address, pte_pfn(orig_pte));
+ entry = pte_mkyoung(orig_pte);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ if (ptep_set_access_flags(vma, address, page_table, entry, 1))
+ update_mmu_cache(vma, address, page_table);
+ pte_unmap_unlock(page_table, ptl);
- if (dirty_shared) {
- struct address_space *mapping;
- int dirtied;
+ if (dirty_shared) {
+ struct address_space *mapping;
+ int dirtied;
- if (!page_mkwrite)
- lock_page(old_page);
+ if (!page_mkwrite)
+ lock_page(page);
- dirtied = set_page_dirty(old_page);
- VM_BUG_ON_PAGE(PageAnon(old_page), old_page);
- mapping = old_page->mapping;
- unlock_page(old_page);
- page_cache_release(old_page);
+ dirtied = set_page_dirty(page);
+ VM_BUG_ON_PAGE(PageAnon(page), page);
+ mapping = page->mapping;
+ unlock_page(page);
+ page_cache_release(page);
- if ((dirtied || page_mkwrite) && mapping) {
- /*
- * Some device drivers do not set page.mapping
- * but still dirty their pages
- */
- balance_dirty_pages_ratelimited(mapping);
- }
-
- if (!page_mkwrite)
- file_update_time(vma->vm_file);
+ if ((dirtied || page_mkwrite) && mapping) {
+ /*
+ * Some device drivers do not set page.mapping
+ * but still dirty their pages
+ */
+ balance_dirty_pages_ratelimited(mapping);
}
- return ret;
+ if (!page_mkwrite)
+ file_update_time(vma->vm_file);
}
- /*
- * Ok, we need to copy. Oh, well..
- */
- page_cache_get(old_page);
-gotten:
- pte_unmap_unlock(page_table, ptl);
+ return VM_FAULT_WRITE;
+}
+
+/*
+ * Handle the case of a page which we actually need to copy to a new page.
+ *
+ * Called with mmap_sem locked and the old page referenced, but
+ * without the ptl held.
+ *
+ * High level logic flow:
+ *
+ * - Allocate a page, copy the content of the old page to the new one.
+ * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
+ * - Take the PTL. If the pte changed, bail out and release the allocated page
+ * - If the pte is still the way we remember it, update the page table and all
+ * relevant references. This includes dropping the reference the page-table
+ * held to the old page, as well as updating the rmap.
+ * - In any case, unlock the PTL and drop the reference we took to the old page.
+ */
+static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pte_t *page_table, pmd_t *pmd,
+ pte_t orig_pte, struct page *old_page)
+{
+ struct page *new_page = NULL;
+ spinlock_t *ptl = NULL;
+ pte_t entry;
+ int page_copied = 0;
+ const unsigned long mmun_start = address & PAGE_MASK; /* For mmu_notifiers */
+ const unsigned long mmun_end = mmun_start + PAGE_SIZE; /* For mmu_notifiers */
+ struct mem_cgroup *memcg;
if (unlikely(anon_vma_prepare(vma)))
goto oom;
@@ -2163,8 +2086,6 @@ gotten:
if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg))
goto oom_free_new;
- mmun_start = address & PAGE_MASK;
- mmun_end = mmun_start + PAGE_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
@@ -2177,8 +2098,9 @@ gotten:
dec_mm_counter_fast(mm, MM_FILEPAGES);
inc_mm_counter_fast(mm, MM_ANONPAGES);
}
- } else
+ } else {
inc_mm_counter_fast(mm, MM_ANONPAGES);
+ }
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2227,29 +2149,29 @@ gotten:
/* Free the old page.. */
new_page = old_page;
- ret |= VM_FAULT_WRITE;
- } else
+ page_copied = 1;
+ } else {
mem_cgroup_cancel_charge(new_page, memcg);
+ }
if (new_page)
page_cache_release(new_page);
-unlock:
+
pte_unmap_unlock(page_table, ptl);
- if (mmun_end > mmun_start)
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (old_page) {
/*
* Don't let another task, with possibly unlocked vma,
* keep the mlocked page.
*/
- if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
+ if (page_copied && (vma->vm_flags & VM_LOCKED)) {
lock_page(old_page); /* LRU manipulation */
munlock_vma_page(old_page);
unlock_page(old_page);
}
page_cache_release(old_page);
}
- return ret;
+ return page_copied ? VM_FAULT_WRITE : 0;
oom_free_new:
page_cache_release(new_page);
oom:
@@ -2258,6 +2180,179 @@ oom:
return VM_FAULT_OOM;
}
+/*
+ * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
+ * mapping
+ */
+static int wp_pfn_shared(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *page_table, spinlock_t *ptl, pte_t orig_pte,
+ pmd_t *pmd)
+{
+ if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
+ struct vm_fault vmf = {
+ .page = NULL,
+ .pgoff = linear_page_index(vma, address),
+ .virtual_address = (void __user *)(address & PAGE_MASK),
+ .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
+ };
+ int ret;
+
+ pte_unmap_unlock(page_table, ptl);
+ ret = vma->vm_ops->pfn_mkwrite(vma, &vmf);
+ if (ret & VM_FAULT_ERROR)
+ return ret;
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+ /*
+ * We might have raced with another page fault while we
+ * released the pte_offset_map_lock.
+ */
+ if (!pte_same(*page_table, orig_pte)) {
+ pte_unmap_unlock(page_table, ptl);
+ return 0;
+ }
+ }
+ return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte,
+ NULL, 0, 0);
+}
+
+static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pte_t *page_table,
+ pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte,
+ struct page *old_page)
+ __releases(ptl)
+{
+ int page_mkwrite = 0;
+
+ page_cache_get(old_page);
+
+ /*
+ * Only catch write-faults on shared writable pages,
+ * read-only shared pages can get COWed by
+ * get_user_pages(.write=1, .force=1).
+ */
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
+ int tmp;
+
+ pte_unmap_unlock(page_table, ptl);
+ tmp = do_page_mkwrite(vma, old_page, address);
+ if (unlikely(!tmp || (tmp &
+ (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
+ page_cache_release(old_page);
+ return tmp;
+ }
+ /*
+ * Since we dropped the lock we need to revalidate
+ * the PTE as someone else may have changed it. If
+ * they did, we just return, as we can count on the
+ * MMU to tell us if they didn't also make it writable.
+ */
+ page_table = pte_offset_map_lock(mm, pmd, address,
+ &ptl);
+ if (!pte_same(*page_table, orig_pte)) {
+ unlock_page(old_page);
+ pte_unmap_unlock(page_table, ptl);
+ page_cache_release(old_page);
+ return 0;
+ }
+ page_mkwrite = 1;
+ }
+
+ return wp_page_reuse(mm, vma, address, page_table, ptl,
+ orig_pte, old_page, page_mkwrite, 1);
+}
+
+/*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+ * and decrementing the shared-page counter for the old page.
+ *
+ * Note that this routine assumes that the protection checks have been
+ * done by the caller (the low-level page fault routine in most cases).
+ * Thus we can safely just mark it writable once we've done any necessary
+ * COW.
+ *
+ * We also mark the page dirty at this point even though the page will
+ * change only once the write actually happens. This avoids a few races,
+ * and potentially makes it more efficient.
+ *
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), with pte both mapped and locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+ */
+static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pte_t *page_table, pmd_t *pmd,
+ spinlock_t *ptl, pte_t orig_pte)
+ __releases(ptl)
+{
+ struct page *old_page;
+
+ old_page = vm_normal_page(vma, address, orig_pte);
+ if (!old_page) {
+ /*
+ * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
+ * VM_PFNMAP VMA.
+ *
+ * We should not cow pages in a shared writeable mapping.
+ * Just mark the pages writable and/or call ops->pfn_mkwrite.
+ */
+ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+ (VM_WRITE|VM_SHARED))
+ return wp_pfn_shared(mm, vma, address, page_table, ptl,
+ orig_pte, pmd);
+
+ pte_unmap_unlock(page_table, ptl);
+ return wp_page_copy(mm, vma, address, page_table, pmd,
+ orig_pte, old_page);
+ }
+
+ /*
+ * Take out anonymous pages first, anonymous shared vmas are
+ * not dirty accountable.
+ */
+ if (PageAnon(old_page) && !PageKsm(old_page)) {
+ if (!trylock_page(old_page)) {
+ page_cache_get(old_page);
+ pte_unmap_unlock(page_table, ptl);
+ lock_page(old_page);
+ page_table = pte_offset_map_lock(mm, pmd, address,
+ &ptl);
+ if (!pte_same(*page_table, orig_pte)) {
+ unlock_page(old_page);
+ pte_unmap_unlock(page_table, ptl);
+ page_cache_release(old_page);
+ return 0;
+ }
+ page_cache_release(old_page);
+ }
+ if (reuse_swap_page(old_page)) {
+ /*
+ * The page is all ours. Move it to our anon_vma so
+ * the rmap code will not search our parent or siblings.
+ * Protected against the rmap code by the page lock.
+ */
+ page_move_anon_rmap(old_page, vma, address);
+ unlock_page(old_page);
+ return wp_page_reuse(mm, vma, address, page_table, ptl,
+ orig_pte, old_page, 0, 0);
+ }
+ unlock_page(old_page);
+ } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+ (VM_WRITE|VM_SHARED))) {
+ return wp_page_shared(mm, vma, address, page_table, pmd,
+ ptl, orig_pte, old_page);
+ }
+
+ /*
+ * Ok, we need to copy. Oh, well..
+ */
+ page_cache_get(old_page);
+
+ pte_unmap_unlock(page_table, ptl);
+ return wp_page_copy(mm, vma, address, page_table, pmd,
+ orig_pte, old_page);
+}
+
static void unmap_mapping_range_vma(struct vm_area_struct *vma,
unsigned long start_addr, unsigned long end_addr,
struct zap_details *details)
@@ -2784,7 +2879,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
struct vm_fault vmf;
int off;
- nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT;
+ nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
start_addr = max(address & mask, vma->vm_start);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 65842d688b7c..457bde530cbe 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -104,7 +104,7 @@ void put_online_mems(void)
}
-static void mem_hotplug_begin(void)
+void mem_hotplug_begin(void)
{
mem_hotplug.active_writer = current;
@@ -119,7 +119,7 @@ static void mem_hotplug_begin(void)
}
}
-static void mem_hotplug_done(void)
+void mem_hotplug_done(void)
{
mem_hotplug.active_writer = NULL;
mutex_unlock(&mem_hotplug.lock);
@@ -502,7 +502,7 @@ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
for (i = start_sec; i <= end_sec; i++) {
- err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
+ err = __add_section(nid, zone, section_nr_to_pfn(i));
/*
* EEXIST is finally dealt with by ioresource collision
@@ -959,6 +959,7 @@ static void node_states_set_node(int node, struct memory_notify *arg)
}
+/* Must be protected by mem_hotplug_begin() */
int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
{
unsigned long flags;
@@ -969,7 +970,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
int ret;
struct memory_notify arg;
- mem_hotplug_begin();
/*
* This doesn't need a lock to do pfn_to_page().
* The section can't be removed here because of the
@@ -977,21 +977,20 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
*/
zone = page_zone(pfn_to_page(pfn));
- ret = -EINVAL;
if ((zone_idx(zone) > ZONE_NORMAL ||
online_type == MMOP_ONLINE_MOVABLE) &&
!can_online_high_movable(zone))
- goto out;
+ return -EINVAL;
if (online_type == MMOP_ONLINE_KERNEL &&
zone_idx(zone) == ZONE_MOVABLE) {
if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages))
- goto out;
+ return -EINVAL;
}
if (online_type == MMOP_ONLINE_MOVABLE &&
zone_idx(zone) == ZONE_MOVABLE - 1) {
if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages))
- goto out;
+ return -EINVAL;
}
/* Previous code may changed the zone of the pfn range */
@@ -1007,7 +1006,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
ret = notifier_to_errno(ret);
if (ret) {
memory_notify(MEM_CANCEL_ONLINE, &arg);
- goto out;
+ return ret;
}
/*
* If this zone is not populated, then it is not in zonelist.
@@ -1031,7 +1030,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
(((unsigned long long) pfn + nr_pages)
<< PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg);
- goto out;
+ return ret;
}
zone->present_pages += onlined_pages;
@@ -1061,9 +1060,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
if (onlined_pages)
memory_notify(MEM_ONLINE, &arg);
-out:
- mem_hotplug_done();
- return ret;
+ return 0;
}
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
@@ -1376,7 +1373,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
if (PageLRU(page))
return pfn;
if (PageHuge(page)) {
- if (is_hugepage_active(page))
+ if (page_huge_active(page))
return pfn;
else
pfn = round_up(pfn + 1,
@@ -1688,21 +1685,18 @@ static int __ref __offline_pages(unsigned long start_pfn,
if (!test_pages_in_a_zone(start_pfn, end_pfn))
return -EINVAL;
- mem_hotplug_begin();
-
zone = page_zone(pfn_to_page(start_pfn));
node = zone_to_nid(zone);
nr_pages = end_pfn - start_pfn;
- ret = -EINVAL;
if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
- goto out;
+ return -EINVAL;
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE, true);
if (ret)
- goto out;
+ return ret;
arg.start_pfn = start_pfn;
arg.nr_pages = nr_pages;
@@ -1795,7 +1789,6 @@ repeat:
writeback_set_ratelimit();
memory_notify(MEM_OFFLINE, &arg);
- mem_hotplug_done();
return 0;
failed_removal:
@@ -1805,12 +1798,10 @@ failed_removal:
memory_notify(MEM_CANCEL_OFFLINE, &arg);
/* pushback to free area */
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
-
-out:
- mem_hotplug_done();
return ret;
}
+/* Must be protected by mem_hotplug_begin() */
int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
{
return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4721046a134a..747743237d9f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -945,7 +945,8 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
return alloc_huge_page_node(page_hstate(compound_head(page)),
node);
else
- return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
+ return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE |
+ __GFP_THISNODE, 0);
}
/*
@@ -1985,7 +1986,8 @@ retry_cpuset:
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(node, *nmask)) {
mpol_cond_put(pol);
- page = alloc_pages_exact_node(node, gfp, order);
+ page = alloc_pages_exact_node(node,
+ gfp | __GFP_THISNODE, order);
goto out;
}
}
@@ -2516,7 +2518,7 @@ static void __init check_numabalancing_enable(void)
if (numabalancing_override)
set_numabalancing_state(numabalancing_override == 1);
- if (nr_node_ids > 1 && !numabalancing_override) {
+ if (num_online_nodes() > 1 && !numabalancing_override) {
pr_info("%s automatic NUMA balancing. "
"Configure with numa_balancing= or the "
"kernel.numa_balancing sysctl",
diff --git a/mm/mempool.c b/mm/mempool.c
index e209c98c7203..2cc08de8b1db 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -6,26 +6,138 @@
* extreme VM load.
*
* started by Ingo Molnar, Copyright (C) 2001
+ * debugging by David Rientjes, Copyright (C) 2015
*/
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/kasan.h>
#include <linux/kmemleak.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
+#include "slab.h"
+
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+static void poison_error(mempool_t *pool, void *element, size_t size,
+ size_t byte)
+{
+ const int nr = pool->curr_nr;
+ const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
+ const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
+ int i;
+
+ pr_err("BUG: mempool element poison mismatch\n");
+ pr_err("Mempool %p size %zu\n", pool, size);
+ pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
+ for (i = start; i < end; i++)
+ pr_cont("%x ", *(u8 *)(element + i));
+ pr_cont("%s\n", end < size ? "..." : "");
+ dump_stack();
+}
+
+static void __check_element(mempool_t *pool, void *element, size_t size)
+{
+ u8 *obj = element;
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
+
+ if (obj[i] != exp) {
+ poison_error(pool, element, size, i);
+ return;
+ }
+ }
+ memset(obj, POISON_INUSE, size);
+}
+
+static void check_element(mempool_t *pool, void *element)
+{
+ /* Mempools backed by slab allocator */
+ if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
+ __check_element(pool, element, ksize(element));
+
+ /* Mempools backed by page allocator */
+ if (pool->free == mempool_free_pages) {
+ int order = (int)(long)pool->pool_data;
+ void *addr = kmap_atomic((struct page *)element);
+
+ __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
+ kunmap_atomic(addr);
+ }
+}
+
+static void __poison_element(void *element, size_t size)
+{
+ u8 *obj = element;
+
+ memset(obj, POISON_FREE, size - 1);
+ obj[size - 1] = POISON_END;
+}
+
+static void poison_element(mempool_t *pool, void *element)
+{
+ /* Mempools backed by slab allocator */
+ if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+ __poison_element(element, ksize(element));
+
+ /* Mempools backed by page allocator */
+ if (pool->alloc == mempool_alloc_pages) {
+ int order = (int)(long)pool->pool_data;
+ void *addr = kmap_atomic((struct page *)element);
+
+ __poison_element(addr, 1UL << (PAGE_SHIFT + order));
+ kunmap_atomic(addr);
+ }
+}
+#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+static inline void check_element(mempool_t *pool, void *element)
+{
+}
+static inline void poison_element(mempool_t *pool, void *element)
+{
+}
+#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+
+static void kasan_poison_element(mempool_t *pool, void *element)
+{
+ if (pool->alloc == mempool_alloc_slab)
+ kasan_slab_free(pool->pool_data, element);
+ if (pool->alloc == mempool_kmalloc)
+ kasan_kfree(element);
+ if (pool->alloc == mempool_alloc_pages)
+ kasan_free_pages(element, (unsigned long)pool->pool_data);
+}
+
+static void kasan_unpoison_element(mempool_t *pool, void *element)
+{
+ if (pool->alloc == mempool_alloc_slab)
+ kasan_slab_alloc(pool->pool_data, element);
+ if (pool->alloc == mempool_kmalloc)
+ kasan_krealloc(element, (size_t)pool->pool_data);
+ if (pool->alloc == mempool_alloc_pages)
+ kasan_alloc_pages(element, (unsigned long)pool->pool_data);
+}
static void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
+ poison_element(pool, element);
+ kasan_poison_element(pool, element);
pool->elements[pool->curr_nr++] = element;
}
static void *remove_element(mempool_t *pool)
{
- BUG_ON(pool->curr_nr <= 0);
- return pool->elements[--pool->curr_nr];
+ void *element = pool->elements[--pool->curr_nr];
+
+ BUG_ON(pool->curr_nr < 0);
+ check_element(pool, element);
+ kasan_unpoison_element(pool, element);
+ return element;
}
/**
@@ -113,23 +225,24 @@ EXPORT_SYMBOL(mempool_create_node);
* mempool_create().
* @new_min_nr: the new minimum number of elements guaranteed to be
* allocated for this pool.
- * @gfp_mask: the usual allocation bitmask.
*
* This function shrinks/grows the pool. In the case of growing,
* it cannot be guaranteed that the pool will be grown to the new
* size immediately, but new mempool_free() calls will refill it.
+ * This function may sleep.
*
* Note, the caller must guarantee that no mempool_destroy is called
* while this function is running. mempool_alloc() & mempool_free()
* might be called (eg. from IRQ contexts) while this function executes.
*/
-int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
+int mempool_resize(mempool_t *pool, int new_min_nr)
{
void *element;
void **new_elements;
unsigned long flags;
BUG_ON(new_min_nr <= 0);
+ might_sleep();
spin_lock_irqsave(&pool->lock, flags);
if (new_min_nr <= pool->min_nr) {
@@ -145,7 +258,8 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
spin_unlock_irqrestore(&pool->lock, flags);
/* Grow the pool */
- new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
+ new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
+ GFP_KERNEL);
if (!new_elements)
return -ENOMEM;
@@ -164,7 +278,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
while (pool->curr_nr < pool->min_nr) {
spin_unlock_irqrestore(&pool->lock, flags);
- element = pool->alloc(gfp_mask, pool->pool_data);
+ element = pool->alloc(GFP_KERNEL, pool->pool_data);
if (!element)
goto out;
spin_lock_irqsave(&pool->lock, flags);
@@ -332,6 +446,7 @@ EXPORT_SYMBOL(mempool_free);
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
{
struct kmem_cache *mem = pool_data;
+ VM_BUG_ON(mem->ctor);
return kmem_cache_alloc(mem, gfp_mask);
}
EXPORT_SYMBOL(mempool_alloc_slab);
diff --git a/arch/x86/mm/memtest.c b/mm/memtest.c
index 1e9da795767a..1997d934b13b 100644
--- a/arch/x86/mm/memtest.c
+++ b/mm/memtest.c
@@ -29,7 +29,7 @@ static u64 patterns[] __initdata = {
0x7a6c7258554e494cULL, /* yeah ;-) */
};
-static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
+static void __init reserve_bad_mem(u64 pattern, phys_addr_t start_bad, phys_addr_t end_bad)
{
printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n",
(unsigned long long) pattern,
@@ -38,11 +38,11 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
memblock_reserve(start_bad, end_bad - start_bad);
}
-static void __init memtest(u64 pattern, u64 start_phys, u64 size)
+static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size)
{
u64 *p, *start, *end;
- u64 start_bad, last_bad;
- u64 start_phys_aligned;
+ phys_addr_t start_bad, last_bad;
+ phys_addr_t start_phys_aligned;
const size_t incr = sizeof(pattern);
start_phys_aligned = ALIGN(start_phys, incr);
@@ -69,14 +69,14 @@ static void __init memtest(u64 pattern, u64 start_phys, u64 size)
reserve_bad_mem(pattern, start_bad, last_bad + incr);
}
-static void __init do_one_pass(u64 pattern, u64 start, u64 end)
+static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end)
{
u64 i;
phys_addr_t this_start, this_end;
for_each_free_mem_range(i, NUMA_NO_NODE, &this_start, &this_end, NULL) {
- this_start = clamp_t(phys_addr_t, this_start, start, end);
- this_end = clamp_t(phys_addr_t, this_end, start, end);
+ this_start = clamp(this_start, start, end);
+ this_end = clamp(this_end, start, end);
if (this_start < this_end) {
printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
(unsigned long long)this_start,
@@ -102,7 +102,7 @@ static int __init parse_memtest(char *arg)
early_param("memtest", parse_memtest);
-void __init early_memtest(unsigned long start, unsigned long end)
+void __init early_memtest(phys_addr_t start, phys_addr_t end)
{
unsigned int i;
unsigned int idx = 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index 85e042686031..f53838fe3dfe 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -537,7 +537,8 @@ void migrate_page_copy(struct page *newpage, struct page *page)
* Please do not reorder this without considering how mm/ksm.c's
* get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
*/
- ClearPageSwapCache(page);
+ if (PageSwapCache(page))
+ ClearPageSwapCache(page);
ClearPagePrivate(page);
set_page_private(page, 0);
@@ -901,12 +902,23 @@ out:
}
/*
+ * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
+ * around it.
+ */
+#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
+#define ICE_noinline noinline
+#else
+#define ICE_noinline
+#endif
+
+/*
* Obtain the lock on page, remove all ptes and migrate the page
* to the newly allocated page in newpage.
*/
-static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
- unsigned long private, struct page *page, int force,
- enum migrate_mode mode)
+static ICE_noinline int unmap_and_move(new_page_t get_new_page,
+ free_page_t put_new_page,
+ unsigned long private, struct page *page,
+ int force, enum migrate_mode mode)
{
int rc = 0;
int *result = NULL;
@@ -1554,30 +1566,10 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
* page migration rate limiting control.
* Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
* window of time. Default here says do not migrate more than 1280M per second.
- * If a node is rate-limited then PTE NUMA updates are also rate-limited. However
- * as it is faults that reset the window, pte updates will happen unconditionally
- * if there has not been a fault since @pteupdate_interval_millisecs after the
- * throttle window closed.
*/
static unsigned int migrate_interval_millisecs __read_mostly = 100;
-static unsigned int pteupdate_interval_millisecs __read_mostly = 1000;
static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
-/* Returns true if NUMA migration is currently rate limited */
-bool migrate_ratelimited(int node)
-{
- pg_data_t *pgdat = NODE_DATA(node);
-
- if (time_after(jiffies, pgdat->numabalancing_migrate_next_window +
- msecs_to_jiffies(pteupdate_interval_millisecs)))
- return false;
-
- if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages)
- return false;
-
- return true;
-}
-
/* Returns true if the node is migrate rate-limited after the update */
static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
unsigned long nr_pages)
diff --git a/mm/mlock.c b/mm/mlock.c
index 8a54cd214925..6fd2cf15e868 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -205,62 +205,6 @@ out:
return nr_pages - 1;
}
-/**
- * __mlock_vma_pages_range() - mlock a range of pages in the vma.
- * @vma: target vma
- * @start: start address
- * @end: end address
- * @nonblocking:
- *
- * This takes care of making the pages present too.
- *
- * return 0 on success, negative error code on error.
- *
- * vma->vm_mm->mmap_sem must be held.
- *
- * If @nonblocking is NULL, it may be held for read or write and will
- * be unperturbed.
- *
- * If @nonblocking is non-NULL, it must held for read only and may be
- * released. If it's released, *@nonblocking will be set to 0.
- */
-long __mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end, int *nonblocking)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long nr_pages = (end - start) / PAGE_SIZE;
- int gup_flags;
-
- VM_BUG_ON(start & ~PAGE_MASK);
- VM_BUG_ON(end & ~PAGE_MASK);
- VM_BUG_ON_VMA(start < vma->vm_start, vma);
- VM_BUG_ON_VMA(end > vma->vm_end, vma);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
-
- gup_flags = FOLL_TOUCH | FOLL_MLOCK;
- /*
- * We want to touch writable mappings with a write fault in order
- * to break COW, except for shared mappings because these don't COW
- * and we would not want to dirty them for nothing.
- */
- if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
- gup_flags |= FOLL_WRITE;
-
- /*
- * We want mlock to succeed for regions that have any permissions
- * other than PROT_NONE.
- */
- if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
- gup_flags |= FOLL_FORCE;
-
- /*
- * We made sure addr is within a VMA, so the following will
- * not result in a stack expansion that recurses back here.
- */
- return __get_user_pages(current, mm, start, nr_pages, gup_flags,
- NULL, NULL, nonblocking);
-}
-
/*
* convert get_user_pages() return value to posix mlock() error
*/
@@ -596,7 +540,7 @@ success:
/*
* vm_flags is protected by the mmap_sem held in write mode.
* It's okay if try_to_unmap_one unmaps a page just after we
- * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
+ * set VM_LOCKED, populate_vma_page_range will bring it back.
*/
if (lock)
@@ -660,69 +604,6 @@ static int do_mlock(unsigned long start, size_t len, int on)
return error;
}
-/*
- * __mm_populate - populate and/or mlock pages within a range of address space.
- *
- * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
- * flags. VMAs must be already marked with the desired vm_flags, and
- * mmap_sem must not be held.
- */
-int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
-{
- struct mm_struct *mm = current->mm;
- unsigned long end, nstart, nend;
- struct vm_area_struct *vma = NULL;
- int locked = 0;
- long ret = 0;
-
- VM_BUG_ON(start & ~PAGE_MASK);
- VM_BUG_ON(len != PAGE_ALIGN(len));
- end = start + len;
-
- for (nstart = start; nstart < end; nstart = nend) {
- /*
- * We want to fault in pages for [nstart; end) address range.
- * Find first corresponding VMA.
- */
- if (!locked) {
- locked = 1;
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, nstart);
- } else if (nstart >= vma->vm_end)
- vma = vma->vm_next;
- if (!vma || vma->vm_start >= end)
- break;
- /*
- * Set [nstart; nend) to intersection of desired address
- * range with the first VMA. Also, skip undesirable VMA types.
- */
- nend = min(end, vma->vm_end);
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- continue;
- if (nstart < vma->vm_start)
- nstart = vma->vm_start;
- /*
- * Now fault in a range of pages. __mlock_vma_pages_range()
- * double checks the vma flags, so that it won't mlock pages
- * if the vma was already munlocked.
- */
- ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
- if (ret < 0) {
- if (ignore_errors) {
- ret = 0;
- continue; /* continue at next VMA */
- }
- ret = __mlock_posix_error_return(ret);
- break;
- }
- nend = nstart + ret * PAGE_SIZE;
- ret = 0;
- }
- if (locked)
- up_read(&mm->mmap_sem);
- return ret; /* 0 or negative error code */
-}
-
SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
{
unsigned long locked;
@@ -750,9 +631,13 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
error = do_mlock(start, len, 1);
up_write(&current->mm->mmap_sem);
- if (!error)
- error = __mm_populate(start, len, 0);
- return error;
+ if (error)
+ return error;
+
+ error = __mm_populate(start, len, 0);
+ if (error)
+ return __mlock_posix_error_return(error);
+ return 0;
}
SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
diff --git a/mm/mmap.c b/mm/mmap.c
index 9ec50a368634..bb50cacc3ea5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1133,7 +1133,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
* by another page fault trying to merge _that_. But that's ok: if it
* is being set up, that automatically means that it will be a singleton
* acceptable for merging, so we can do all of this optimistically. But
- * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
+ * we do that READ_ONCE() to make sure that we never re-load the pointer.
*
* IOW: that the "list_is_singular()" test on the anon_vma_chain only
* matters for the 'stable anon_vma' case (ie the thing we want to avoid
@@ -1147,7 +1147,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
{
if (anon_vma_compatible(a, b)) {
- struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
+ struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
if (anon_vma && list_is_singular(&old->anon_vma_chain))
return anon_vma;
@@ -1551,11 +1551,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Clear old maps */
error = -ENOMEM;
-munmap_back:
- if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
+ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
+ &rb_parent)) {
if (do_munmap(mm, addr, len))
return -ENOMEM;
- goto munmap_back;
}
/*
@@ -1571,7 +1570,8 @@ munmap_back:
/*
* Can we just expand an old mapping?
*/
- vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
+ NULL);
if (vma)
goto out;
@@ -2100,7 +2100,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
actual_size = size;
if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
actual_size -= PAGE_SIZE;
- if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
@@ -2108,7 +2108,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
unsigned long locked;
unsigned long limit;
locked = mm->locked_vm + grow;
- limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
+ limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
@@ -2316,7 +2316,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
if (!prev || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED)
- __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
+ populate_vma_page_range(prev, addr, prev->vm_end, NULL);
return prev;
}
#else
@@ -2351,7 +2351,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
if (expand_stack(vma, addr))
return NULL;
if (vma->vm_flags & VM_LOCKED)
- __mlock_vma_pages_range(vma, addr, start, NULL);
+ populate_vma_page_range(vma, addr, start, NULL);
return vma;
}
#endif
@@ -2739,11 +2739,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
- munmap_back:
- if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
+ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
+ &rb_parent)) {
if (do_munmap(mm, addr, len))
return -ENOMEM;
- goto munmap_back;
}
/* Check against address space limits *after* clearing old maps... */
diff --git a/mm/mremap.c b/mm/mremap.c
index 2dc44b1cb1df..034e2d360652 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -345,25 +345,25 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
struct vm_area_struct *vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
- goto Efault;
+ return ERR_PTR(-EFAULT);
if (is_vm_hugetlb_page(vma))
- goto Einval;
+ return ERR_PTR(-EINVAL);
/* We can't remap across vm area boundaries */
if (old_len > vma->vm_end - addr)
- goto Efault;
+ return ERR_PTR(-EFAULT);
/* Need to be careful about a growing mapping */
if (new_len > old_len) {
unsigned long pgoff;
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
- goto Efault;
+ return ERR_PTR(-EFAULT);
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
- goto Einval;
+ return ERR_PTR(-EINVAL);
}
if (vma->vm_flags & VM_LOCKED) {
@@ -372,29 +372,20 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
lock_limit = rlimit(RLIMIT_MEMLOCK);
locked += new_len - old_len;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
- goto Eagain;
+ return ERR_PTR(-EAGAIN);
}
if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
- goto Enomem;
+ return ERR_PTR(-ENOMEM);
if (vma->vm_flags & VM_ACCOUNT) {
unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
if (security_vm_enough_memory_mm(mm, charged))
- goto Efault;
+ return ERR_PTR(-ENOMEM);
*p = charged;
}
return vma;
-
-Efault: /* very odd choice for most of the cases, but... */
- return ERR_PTR(-EFAULT);
-Einval:
- return ERR_PTR(-EINVAL);
-Enomem:
- return ERR_PTR(-ENOMEM);
-Eagain:
- return ERR_PTR(-EAGAIN);
}
static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
diff --git a/mm/nommu.c b/mm/nommu.c
index 3fba2dc97c44..e544508e2a4b 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1016,7 +1016,7 @@ static int validate_mmap_request(struct file *file,
* device */
if (!file->f_op->get_unmapped_area)
capabilities &= ~NOMMU_MAP_DIRECT;
- if (!file->f_op->read)
+ if (!(file->f_mode & FMODE_CAN_READ))
capabilities &= ~NOMMU_MAP_COPY;
/* The file shall have been opened with read permission. */
@@ -1240,7 +1240,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
+ ret = __vfs_read(vma->vm_file, base, len, &fpos);
set_fs(old_fs);
if (ret < 0)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 642f38cb175a..2b665da1b3c9 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -408,7 +408,7 @@ bool oom_killer_disabled __read_mostly;
static DECLARE_RWSEM(oom_sem);
/**
- * mark_tsk_oom_victim - marks the given taks as OOM victim.
+ * mark_tsk_oom_victim - marks the given task as OOM victim.
* @tsk: task to mark
*
* Has to be called with oom_sem taken for read and never after
@@ -612,7 +612,8 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order, const nodemask_t *nodemask)
+ int order, const nodemask_t *nodemask,
+ struct mem_cgroup *memcg)
{
if (likely(!sysctl_panic_on_oom))
return;
@@ -625,7 +626,7 @@ void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
if (constraint != CONSTRAINT_NONE)
return;
}
- dump_header(NULL, gfp_mask, order, NULL, nodemask);
+ dump_header(NULL, gfp_mask, order, memcg, nodemask);
panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}
@@ -740,7 +741,7 @@ static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
&totalpages);
mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
- check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
+ check_panic_on_oom(constraint, gfp_mask, order, mpol_mask, NULL);
if (sysctl_oom_kill_allocating_task && current->mm &&
!oom_unkillable_task(current, NULL, nodemask) &&
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 644bcb665773..eb59f7eea508 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
long x;
x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
- limit - setpoint + 1);
+ (limit - setpoint) | 1);
pos_ratio = x;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
* scale global setpoint to bdi's:
* bdi_setpoint = setpoint * bdi_thresh / thresh
*/
- x = div_u64((u64)bdi_thresh << 16, thresh + 1);
+ x = div_u64((u64)bdi_thresh << 16, thresh | 1);
bdi_setpoint = setpoint * (u64)x >> 16;
/*
* Use span=(8*write_bw) in single bdi case as indicated by
@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
if (bdi_dirty < x_intercept - span / 4) {
pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
- x_intercept - bdi_setpoint + 1);
+ (x_intercept - bdi_setpoint) | 1);
} else
pos_ratio /= 4;
@@ -2111,6 +2111,25 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
EXPORT_SYMBOL(account_page_dirtied);
/*
+ * Helper function for deaccounting dirty page without writeback.
+ *
+ * Doing this should *normally* only ever be done when a page
+ * is truncated, and is not actually mapped anywhere at all. However,
+ * fs/buffer.c does this when it notices that somebody has cleaned
+ * out all the buffers on a page without actually doing it through
+ * the VM. Can you say "ext3 is horribly ugly"? Thought you could.
+ */
+void account_page_cleaned(struct page *page, struct address_space *mapping)
+{
+ if (mapping_cap_account_dirty(mapping)) {
+ dec_zone_page_state(page, NR_FILE_DIRTY);
+ dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE);
+ task_io_account_cancelled_write(PAGE_CACHE_SIZE);
+ }
+}
+EXPORT_SYMBOL(account_page_cleaned);
+
+/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
* its radix tree.
*
@@ -2209,7 +2228,8 @@ int set_page_dirty(struct page *page)
* it will confuse readahead and make it restart the size rampup
* process. But it's a trivial problem.
*/
- ClearPageReclaim(page);
+ if (PageReclaim(page))
+ ClearPageReclaim(page);
#ifdef CONFIG_BLOCK
if (!spd)
spd = __set_page_dirty_buffers;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 40e29429e7b0..ebffa0e4a9c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1032,11 +1032,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
static int fallbacks[MIGRATE_TYPES][4] = {
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
#ifdef CONFIG_CMA
- [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
-#else
- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
#endif
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
#ifdef CONFIG_MEMORY_ISOLATION
@@ -1044,6 +1042,17 @@ static int fallbacks[MIGRATE_TYPES][4] = {
#endif
};
+#ifdef CONFIG_CMA
+static struct page *__rmqueue_cma_fallback(struct zone *zone,
+ unsigned int order)
+{
+ return __rmqueue_smallest(zone, order, MIGRATE_CMA);
+}
+#else
+static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
+ unsigned int order) { return NULL; }
+#endif
+
/*
* Move the free pages in a range to the free lists of the requested type.
* Note that start_page and end_pages are not aligned on a pageblock
@@ -1136,14 +1145,40 @@ static void change_pageblock_range(struct page *pageblock_page,
* as fragmentation caused by those allocations polluting movable pageblocks
* is worse than movable allocations stealing from unmovable and reclaimable
* pageblocks.
- *
- * If we claim more than half of the pageblock, change pageblock's migratetype
- * as well.
*/
-static void try_to_steal_freepages(struct zone *zone, struct page *page,
- int start_type, int fallback_type)
+static bool can_steal_fallback(unsigned int order, int start_mt)
+{
+ /*
+ * Leaving this order check is intended, although there is
+ * relaxed order check in next check. The reason is that
+ * we can actually steal whole pageblock if this condition met,
+ * but, below check doesn't guarantee it and that is just heuristic
+ * so could be changed anytime.
+ */
+ if (order >= pageblock_order)
+ return true;
+
+ if (order >= pageblock_order / 2 ||
+ start_mt == MIGRATE_RECLAIMABLE ||
+ start_mt == MIGRATE_UNMOVABLE ||
+ page_group_by_mobility_disabled)
+ return true;
+
+ return false;
+}
+
+/*
+ * This function implements actual steal behaviour. If order is large enough,
+ * we can steal whole pageblock. If not, we first move freepages in this
+ * pageblock and check whether half of pages are moved or not. If half of
+ * pages are moved, we can change migratetype of pageblock and permanently
+ * use it's pages as requested migratetype in the future.
+ */
+static void steal_suitable_fallback(struct zone *zone, struct page *page,
+ int start_type)
{
int current_order = page_order(page);
+ int pages;
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
@@ -1151,19 +1186,49 @@ static void try_to_steal_freepages(struct zone *zone, struct page *page,
return;
}
- if (current_order >= pageblock_order / 2 ||
- start_type == MIGRATE_RECLAIMABLE ||
- start_type == MIGRATE_UNMOVABLE ||
- page_group_by_mobility_disabled) {
- int pages;
+ pages = move_freepages_block(zone, page, start_type);
+
+ /* Claim the whole block if over half of it is free */
+ if (pages >= (1 << (pageblock_order-1)) ||
+ page_group_by_mobility_disabled)
+ set_pageblock_migratetype(page, start_type);
+}
+
+/*
+ * Check whether there is a suitable fallback freepage with requested order.
+ * If only_stealable is true, this function returns fallback_mt only if
+ * we can steal other freepages all together. This would help to reduce
+ * fragmentation due to mixed migratetype pages in one pageblock.
+ */
+int find_suitable_fallback(struct free_area *area, unsigned int order,
+ int migratetype, bool only_stealable, bool *can_steal)
+{
+ int i;
+ int fallback_mt;
+
+ if (area->nr_free == 0)
+ return -1;
+
+ *can_steal = false;
+ for (i = 0;; i++) {
+ fallback_mt = fallbacks[migratetype][i];
+ if (fallback_mt == MIGRATE_RESERVE)
+ break;
+
+ if (list_empty(&area->free_list[fallback_mt]))
+ continue;
- pages = move_freepages_block(zone, page, start_type);
+ if (can_steal_fallback(order, migratetype))
+ *can_steal = true;
- /* Claim the whole block if over half of it is free */
- if (pages >= (1 << (pageblock_order-1)) ||
- page_group_by_mobility_disabled)
- set_pageblock_migratetype(page, start_type);
+ if (!only_stealable)
+ return fallback_mt;
+
+ if (*can_steal)
+ return fallback_mt;
}
+
+ return -1;
}
/* Remove an element from the buddy allocator from the fallback list */
@@ -1173,64 +1238,45 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
struct free_area *area;
unsigned int current_order;
struct page *page;
+ int fallback_mt;
+ bool can_steal;
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1;
current_order >= order && current_order <= MAX_ORDER-1;
--current_order) {
- int i;
- for (i = 0;; i++) {
- int migratetype = fallbacks[start_migratetype][i];
- int buddy_type = start_migratetype;
-
- /* MIGRATE_RESERVE handled later if necessary */
- if (migratetype == MIGRATE_RESERVE)
- break;
-
- area = &(zone->free_area[current_order]);
- if (list_empty(&area->free_list[migratetype]))
- continue;
-
- page = list_entry(area->free_list[migratetype].next,
- struct page, lru);
- area->nr_free--;
-
- if (!is_migrate_cma(migratetype)) {
- try_to_steal_freepages(zone, page,
- start_migratetype,
- migratetype);
- } else {
- /*
- * When borrowing from MIGRATE_CMA, we need to
- * release the excess buddy pages to CMA
- * itself, and we do not try to steal extra
- * free pages.
- */
- buddy_type = migratetype;
- }
+ area = &(zone->free_area[current_order]);
+ fallback_mt = find_suitable_fallback(area, current_order,
+ start_migratetype, false, &can_steal);
+ if (fallback_mt == -1)
+ continue;
- /* Remove the page from the freelists */
- list_del(&page->lru);
- rmv_page_order(page);
+ page = list_entry(area->free_list[fallback_mt].next,
+ struct page, lru);
+ if (can_steal)
+ steal_suitable_fallback(zone, page, start_migratetype);
- expand(zone, page, order, current_order, area,
- buddy_type);
+ /* Remove the page from the freelists */
+ area->nr_free--;
+ list_del(&page->lru);
+ rmv_page_order(page);
- /*
- * The freepage_migratetype may differ from pageblock's
- * migratetype depending on the decisions in
- * try_to_steal_freepages(). This is OK as long as it
- * does not differ for MIGRATE_CMA pageblocks. For CMA
- * we need to make sure unallocated pages flushed from
- * pcp lists are returned to the correct freelist.
- */
- set_freepage_migratetype(page, buddy_type);
+ expand(zone, page, order, current_order, area,
+ start_migratetype);
+ /*
+ * The freepage_migratetype may differ from pageblock's
+ * migratetype depending on the decisions in
+ * try_to_steal_freepages(). This is OK as long as it
+ * does not differ for MIGRATE_CMA pageblocks. For CMA
+ * we need to make sure unallocated pages flushed from
+ * pcp lists are returned to the correct freelist.
+ */
+ set_freepage_migratetype(page, start_migratetype);
- trace_mm_page_alloc_extfrag(page, order, current_order,
- start_migratetype, migratetype);
+ trace_mm_page_alloc_extfrag(page, order, current_order,
+ start_migratetype, fallback_mt);
- return page;
- }
+ return page;
}
return NULL;
@@ -1249,7 +1295,11 @@ retry_reserve:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
- page = __rmqueue_fallback(zone, order, migratetype);
+ if (migratetype == MIGRATE_MOVABLE)
+ page = __rmqueue_cma_fallback(zone, order);
+
+ if (!page)
+ page = __rmqueue_fallback(zone, order, migratetype);
/*
* Use MIGRATE_RESERVE rather than fail an allocation. goto
@@ -1321,7 +1371,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
int to_drain, batch;
local_irq_save(flags);
- batch = ACCESS_ONCE(pcp->batch);
+ batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0) {
free_pcppages_bulk(zone, to_drain, pcp);
@@ -1520,7 +1570,7 @@ void free_hot_cold_page(struct page *page, bool cold)
list_add_tail(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
- unsigned long batch = ACCESS_ONCE(pcp->batch);
+ unsigned long batch = READ_ONCE(pcp->batch);
free_pcppages_bulk(zone, batch, pcp);
pcp->count -= batch;
}
@@ -2362,13 +2412,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
*did_some_progress = 1;
goto out;
}
- /*
- * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
- * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
- * The caller should handle page allocation failure by itself if
- * it specifies __GFP_THISNODE.
- * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
- */
+ /* The OOM killer may not free memory on a specific node */
if (gfp_mask & __GFP_THISNODE)
goto out;
}
@@ -2623,15 +2667,11 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
}
/*
- * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
- * __GFP_NOWARN set) should not cause reclaim since the subsystem
- * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
- * using a larger set of nodes after it has established that the
- * allowed per node queues are empty and that nodes are
- * over allocated.
+ * If this allocation cannot block and it is for a specific node, then
+ * fail early. There's no need to wakeup kswapd or retry for a
+ * speculative node-specific allocation.
*/
- if (IS_ENABLED(CONFIG_NUMA) &&
- (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+ if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !wait)
goto nopage;
retry:
@@ -2824,7 +2864,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
/*
* Check the zones suitable for the gfp_mask contain at least one
* valid zone. It's possible to have an empty zonelist as a result
- * of GFP_THISNODE and a memoryless node
+ * of __GFP_THISNODE and a memoryless node
*/
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
@@ -3201,38 +3241,31 @@ static void show_migration_types(unsigned char type)
* Show free area list (used inside shift_scroll-lock stuff)
* We also calculate the percentage fragmentation. We do this by counting the
* memory on each free list with the exception of the first item on the list.
- * Suppresses nodes that are not allowed by current's cpuset if
- * SHOW_MEM_FILTER_NODES is passed.
+ *
+ * Bits in @filter:
+ * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
+ * cpuset.
*/
void show_free_areas(unsigned int filter)
{
+ unsigned long free_pcp = 0;
int cpu;
struct zone *zone;
for_each_populated_zone(zone) {
if (skip_free_areas_node(filter, zone_to_nid(zone)))
continue;
- show_node(zone);
- printk("%s per-cpu:\n", zone->name);
- for_each_online_cpu(cpu) {
- struct per_cpu_pageset *pageset;
-
- pageset = per_cpu_ptr(zone->pageset, cpu);
-
- printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
- cpu, pageset->pcp.high,
- pageset->pcp.batch, pageset->pcp.count);
- }
+ for_each_online_cpu(cpu)
+ free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
}
printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
- " unevictable:%lu"
- " dirty:%lu writeback:%lu unstable:%lu\n"
- " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
+ " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
+ " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
- " free_cma:%lu\n",
+ " free:%lu free_pcp:%lu free_cma:%lu\n",
global_page_state(NR_ACTIVE_ANON),
global_page_state(NR_INACTIVE_ANON),
global_page_state(NR_ISOLATED_ANON),
@@ -3243,13 +3276,14 @@ void show_free_areas(unsigned int filter)
global_page_state(NR_FILE_DIRTY),
global_page_state(NR_WRITEBACK),
global_page_state(NR_UNSTABLE_NFS),
- global_page_state(NR_FREE_PAGES),
global_page_state(NR_SLAB_RECLAIMABLE),
global_page_state(NR_SLAB_UNRECLAIMABLE),
global_page_state(NR_FILE_MAPPED),
global_page_state(NR_SHMEM),
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE),
+ global_page_state(NR_FREE_PAGES),
+ free_pcp,
global_page_state(NR_FREE_CMA_PAGES));
for_each_populated_zone(zone) {
@@ -3257,6 +3291,11 @@ void show_free_areas(unsigned int filter)
if (skip_free_areas_node(filter, zone_to_nid(zone)))
continue;
+
+ free_pcp = 0;
+ for_each_online_cpu(cpu)
+ free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
+
show_node(zone);
printk("%s"
" free:%lukB"
@@ -3283,6 +3322,8 @@ void show_free_areas(unsigned int filter)
" pagetables:%lukB"
" unstable:%lukB"
" bounce:%lukB"
+ " free_pcp:%lukB"
+ " local_pcp:%ukB"
" free_cma:%lukB"
" writeback_tmp:%lukB"
" pages_scanned:%lu"
@@ -3314,6 +3355,8 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_PAGETABLE)),
K(zone_page_state(zone, NR_UNSTABLE_NFS)),
K(zone_page_state(zone, NR_BOUNCE)),
+ K(free_pcp),
+ K(this_cpu_read(zone->pageset->pcp.count)),
K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
K(zone_page_state(zone, NR_PAGES_SCANNED)),
@@ -5717,7 +5760,7 @@ static void __setup_per_zone_wmarks(void)
* value here.
*
* The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
- * deltas controls asynch page reclaim, and so should
+ * deltas control asynch page reclaim, and so should
* not be capped for highmem.
*/
unsigned long min_pages;
@@ -6164,7 +6207,7 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
mask <<= (BITS_PER_LONG - bitidx - 1);
flags <<= (BITS_PER_LONG - bitidx - 1);
- word = ACCESS_ONCE(bitmap[word_bitidx]);
+ word = READ_ONCE(bitmap[word_bitidx]);
for (;;) {
old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
if (word == old_word)
diff --git a/mm/page_io.c b/mm/page_io.c
index e6045804c8d8..6424869e275e 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -20,8 +20,8 @@
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/frontswap.h>
-#include <linux/aio.h>
#include <linux/blkdev.h>
+#include <linux/uio.h>
#include <asm/pgtable.h>
static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -274,13 +274,10 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE);
init_sync_kiocb(&kiocb, swap_file);
kiocb.ki_pos = page_file_offset(page);
- kiocb.ki_nbytes = PAGE_SIZE;
set_page_writeback(page);
unlock_page(page);
- ret = mapping->a_ops->direct_IO(ITER_BVEC | WRITE,
- &kiocb, &from,
- kiocb.ki_pos);
+ ret = mapping->a_ops->direct_IO(&kiocb, &from, kiocb.ki_pos);
if (ret == PAGE_SIZE) {
count_vm_event(PSWPOUT);
ret = 0;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 755a42c76eb4..303c908790ef 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -101,7 +101,8 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
buddy_idx = __find_buddy_index(page_idx, order);
buddy = page + (buddy_idx - page_idx);
- if (!is_migrate_isolate_page(buddy)) {
+ if (pfn_valid_within(page_to_pfn(buddy)) &&
+ !is_migrate_isolate_page(buddy)) {
__isolate_free_page(page, order);
kernel_map_pages(page, (1 << order), 1);
set_page_refcounted(page);
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index b1597690530c..e88d071648c2 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -257,22 +257,18 @@ static ssize_t process_vm_rw(pid_t pid,
struct iovec *iov_r = iovstack_r;
struct iov_iter iter;
ssize_t rc;
+ int dir = vm_write ? WRITE : READ;
if (flags != 0)
return -EINVAL;
/* Check iovecs */
- if (vm_write)
- rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
- iovstack_l, &iov_l);
- else
- rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
- iovstack_l, &iov_l);
- if (rc <= 0)
+ rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
+ if (rc < 0)
+ return rc;
+ if (!iov_iter_count(&iter))
goto free_iovecs;
- iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
-
rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
iovstack_r, &iov_r);
if (rc <= 0)
@@ -283,8 +279,7 @@ static ssize_t process_vm_rw(pid_t pid,
free_iovecs:
if (iov_r != iovstack_r)
kfree(iov_r);
- if (iov_l != iovstack_l)
- kfree(iov_l);
+ kfree(iov_l);
return rc;
}
@@ -320,21 +315,16 @@ compat_process_vm_rw(compat_pid_t pid,
struct iovec *iov_r = iovstack_r;
struct iov_iter iter;
ssize_t rc = -EFAULT;
+ int dir = vm_write ? WRITE : READ;
if (flags != 0)
return -EINVAL;
- if (vm_write)
- rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
- UIO_FASTIOV, iovstack_l,
- &iov_l);
- else
- rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
- UIO_FASTIOV, iovstack_l,
- &iov_l);
- if (rc <= 0)
+ rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
+ if (rc < 0)
+ return rc;
+ if (!iov_iter_count(&iter))
goto free_iovecs;
- iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
UIO_FASTIOV, iovstack_r,
&iov_r);
@@ -346,8 +336,7 @@ compat_process_vm_rw(compat_pid_t pid,
free_iovecs:
if (iov_r != iovstack_r)
kfree(iov_r);
- if (iov_l != iovstack_l)
- kfree(iov_l);
+ kfree(iov_l);
return rc;
}
diff --git a/mm/rmap.c b/mm/rmap.c
index c161a14b6a8f..24dd3f9fee27 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -456,7 +456,7 @@ struct anon_vma *page_get_anon_vma(struct page *page)
unsigned long anon_mapping;
rcu_read_lock();
- anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
+ anon_mapping = (unsigned long)READ_ONCE(page->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
@@ -500,14 +500,14 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
unsigned long anon_mapping;
rcu_read_lock();
- anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
+ anon_mapping = (unsigned long)READ_ONCE(page->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- root_anon_vma = ACCESS_ONCE(anon_vma->root);
+ root_anon_vma = READ_ONCE(anon_vma->root);
if (down_read_trylock(&root_anon_vma->rwsem)) {
/*
* If the page is still mapped, then this anon_vma is still
diff --git a/mm/shmem.c b/mm/shmem.c
index cf2d0ca010bc..de981370fbc5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -31,7 +31,7 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/swap.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
static struct vfsmount *shm_mnt;
@@ -544,7 +544,7 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct shmem_inode_info *info = SHMEM_I(inode);
int error;
@@ -2274,7 +2274,7 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
*/
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
int ret;
/*
@@ -2298,7 +2298,7 @@ out:
static int shmem_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
shmem_free_inode(inode->i_sb);
@@ -2315,7 +2315,7 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
if (!simple_empty(dentry))
return -ENOTEMPTY;
- drop_nlink(dentry->d_inode);
+ drop_nlink(d_inode(dentry));
drop_nlink(dir);
return shmem_unlink(dir, dentry);
}
@@ -2336,8 +2336,8 @@ static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, stru
}
old_dir->i_ctime = old_dir->i_mtime =
new_dir->i_ctime = new_dir->i_mtime =
- old_dentry->d_inode->i_ctime =
- new_dentry->d_inode->i_ctime = CURRENT_TIME;
+ d_inode(old_dentry)->i_ctime =
+ d_inode(new_dentry)->i_ctime = CURRENT_TIME;
return 0;
}
@@ -2376,7 +2376,7 @@ static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
*/
static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
{
- struct inode *inode = old_dentry->d_inode;
+ struct inode *inode = d_inode(old_dentry);
int they_are_dirs = S_ISDIR(inode->i_mode);
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
@@ -2396,10 +2396,10 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc
return error;
}
- if (new_dentry->d_inode) {
+ if (d_really_is_positive(new_dentry)) {
(void) shmem_unlink(new_dir, new_dentry);
if (they_are_dirs) {
- drop_nlink(new_dentry->d_inode);
+ drop_nlink(d_inode(new_dentry));
drop_nlink(old_dir);
}
} else if (they_are_dirs) {
@@ -2476,14 +2476,14 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
{
- nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
+ nd_set_link(nd, SHMEM_I(d_inode(dentry))->symlink);
return NULL;
}
static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
- int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
+ int error = shmem_getpage(d_inode(dentry), 0, &page, SGP_READ, NULL);
nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
if (page)
unlock_page(page);
@@ -2574,7 +2574,7 @@ static int shmem_xattr_validate(const char *name)
static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size)
{
- struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
+ struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
int err;
/*
@@ -2595,7 +2595,7 @@ static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
static int shmem_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
+ struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
int err;
/*
@@ -2615,7 +2615,7 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
static int shmem_removexattr(struct dentry *dentry, const char *name)
{
- struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
+ struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
int err;
/*
@@ -2635,7 +2635,7 @@ static int shmem_removexattr(struct dentry *dentry, const char *name)
static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
- struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
+ struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
return simple_xattr_list(&info->xattrs, buffer, size);
}
#endif /* CONFIG_TMPFS_XATTR */
@@ -3118,8 +3118,6 @@ static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
#ifdef CONFIG_TMPFS
.llseek = shmem_file_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = shmem_file_read_iter,
.write_iter = generic_file_write_iter,
.fsync = noop_fsync,
diff --git a/mm/slab.c b/mm/slab.c
index c4b89eaf4c96..7eb38dd1cefa 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -857,6 +857,11 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep,
return NULL;
}
+static inline gfp_t gfp_exact_node(gfp_t flags)
+{
+ return flags;
+}
+
#else /* CONFIG_NUMA */
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
@@ -1023,6 +1028,15 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
return __cache_free_alien(cachep, objp, node, page_node);
}
+
+/*
+ * Construct gfp mask to allocate from a specific node but do not invoke reclaim
+ * or warn about failures.
+ */
+static inline gfp_t gfp_exact_node(gfp_t flags)
+{
+ return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT;
+}
#endif
/*
@@ -2825,7 +2839,7 @@ alloc_done:
if (unlikely(!ac->avail)) {
int x;
force_grow:
- x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
+ x = cache_grow(cachep, gfp_exact_node(flags), node, NULL);
/* cache_grow can reenable interrupts, then ac could change. */
ac = cpu_cache_get(cachep);
@@ -3019,7 +3033,7 @@ retry:
get_node(cache, nid) &&
get_node(cache, nid)->free_objects) {
obj = ____cache_alloc_node(cache,
- flags | GFP_THISNODE, nid);
+ gfp_exact_node(flags), nid);
if (obj)
break;
}
@@ -3047,7 +3061,7 @@ retry:
nid = page_to_nid(page);
if (cache_grow(cache, flags, nid, page)) {
obj = ____cache_alloc_node(cache,
- flags | GFP_THISNODE, nid);
+ gfp_exact_node(flags), nid);
if (!obj)
/*
* Another processor may allocate the
@@ -3118,7 +3132,7 @@ retry:
must_grow:
spin_unlock(&n->list_lock);
- x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
+ x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL);
if (x)
goto retry;
diff --git a/mm/slob.c b/mm/slob.c
index 94a7fede6d48..4765f65019c7 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -532,7 +532,7 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
return 0;
}
-void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
@@ -558,7 +558,6 @@ void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
return b;
}
-EXPORT_SYMBOL(slob_alloc_node);
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
diff --git a/mm/slub.c b/mm/slub.c
index 82c473780c91..54c0876b43d5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -374,7 +374,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old,
freelist_new, counters_new))
- return 1;
+ return true;
} else
#endif
{
@@ -384,7 +384,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
page->freelist = freelist_new;
set_page_slub_counters(page, counters_new);
slab_unlock(page);
- return 1;
+ return true;
}
slab_unlock(page);
}
@@ -396,7 +396,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
pr_info("%s %s: cmpxchg double redo ", n, s->name);
#endif
- return 0;
+ return false;
}
static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
@@ -410,7 +410,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old,
freelist_new, counters_new))
- return 1;
+ return true;
} else
#endif
{
@@ -424,7 +424,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
set_page_slub_counters(page, counters_new);
slab_unlock(page);
local_irq_restore(flags);
- return 1;
+ return true;
}
slab_unlock(page);
local_irq_restore(flags);
@@ -437,7 +437,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
pr_info("%s %s: cmpxchg double redo ", n, s->name);
#endif
- return 0;
+ return false;
}
#ifdef CONFIG_SLUB_DEBUG
@@ -1137,15 +1137,6 @@ static int __init setup_slub_debug(char *str)
*/
goto check_slabs;
- if (tolower(*str) == 'o') {
- /*
- * Avoid enabling debugging on caches if its minimum order
- * would increase as a result.
- */
- disable_higher_order_debug = 1;
- goto out;
- }
-
slub_debug = 0;
if (*str == '-')
/*
@@ -1176,6 +1167,13 @@ static int __init setup_slub_debug(char *str)
case 'a':
slub_debug |= SLAB_FAILSLAB;
break;
+ case 'o':
+ /*
+ * Avoid enabling debugging on caches if its minimum
+ * order would increase as a result.
+ */
+ disable_higher_order_debug = 1;
+ break;
default:
pr_err("slub_debug option '%c' unknown. skipped\n",
*str);
@@ -4279,7 +4277,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int node;
struct page *page;
- page = ACCESS_ONCE(c->page);
+ page = READ_ONCE(c->page);
if (!page)
continue;
@@ -4294,7 +4292,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x;
nodes[node] += x;
- page = ACCESS_ONCE(c->partial);
+ page = READ_ONCE(c->partial);
if (page) {
node = page_to_nid(page);
if (flags & SO_TOTAL)
diff --git a/mm/swap.c b/mm/swap.c
index cd3a5e64cea9..a7251a8ed532 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,6 +31,7 @@
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
+#include <linux/hugetlb.h>
#include "internal.h"
@@ -42,7 +43,7 @@ int page_cluster;
static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
/*
* This path almost never happens for VM activity - pages are normally
@@ -75,7 +76,14 @@ static void __put_compound_page(struct page *page)
{
compound_page_dtor *dtor;
- __page_cache_release(page);
+ /*
+ * __page_cache_release() is supposed to be called for thp, not for
+ * hugetlb. This is because hugetlb page does never have PageLRU set
+ * (it's never listed to any LRU lists) and no memcg routines should
+ * be called for hugetlb (it has a separate hugetlb_cgroup.)
+ */
+ if (!PageHuge(page))
+ __page_cache_release(page);
dtor = get_compound_page_dtor(page);
(*dtor)(page);
}
@@ -743,7 +751,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
* be write it out by flusher threads as this is much more effective
* than the single-page writeout from reclaim.
*/
-static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
int lru, file;
@@ -811,36 +819,36 @@ void lru_add_drain_cpu(int cpu)
local_irq_restore(flags);
}
- pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+ pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
if (pagevec_count(pvec))
- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+ pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
activate_page_drain(cpu);
}
/**
- * deactivate_page - forcefully deactivate a page
+ * deactivate_file_page - forcefully deactivate a file page
* @page: page to deactivate
*
* This function hints the VM that @page is a good reclaim candidate,
* for example if its invalidation fails due to the page being dirty
* or under writeback.
*/
-void deactivate_page(struct page *page)
+void deactivate_file_page(struct page *page)
{
/*
- * In a workload with many unevictable page such as mprotect, unevictable
- * page deactivation for accelerating reclaim is pointless.
+ * In a workload with many unevictable page such as mprotect,
+ * unevictable page deactivation for accelerating reclaim is pointless.
*/
if (PageUnevictable(page))
return;
if (likely(get_page_unless_zero(page))) {
- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+ struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
if (!pagevec_add(pvec, page))
- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
- put_cpu_var(lru_deactivate_pvecs);
+ pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+ put_cpu_var(lru_deactivate_file_pvecs);
}
}
@@ -872,7 +880,7 @@ void lru_add_drain_all(void)
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
- pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
+ pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
need_activate_page_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
schedule_work_on(cpu, work);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 405923f77334..8bc8e66138da 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -390,7 +390,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
unsigned int pages, max_pages, last_ra;
static atomic_t last_readahead_pages;
- max_pages = 1 << ACCESS_ONCE(page_cluster);
+ max_pages = 1 << READ_ONCE(page_cluster);
if (max_pages <= 1)
return 1;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 63f55ccb9b26..a7e72103f23b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1312,7 +1312,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
else
continue;
}
- count = ACCESS_ONCE(si->swap_map[i]);
+ count = READ_ONCE(si->swap_map[i]);
if (count && swap_count(count) != SWAP_MAP_BAD)
break;
}
diff --git a/mm/truncate.c b/mm/truncate.c
index ddec5a5966d7..66af9031fae8 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -93,35 +93,6 @@ void do_invalidatepage(struct page *page, unsigned int offset,
}
/*
- * This cancels just the dirty bit on the kernel page itself, it
- * does NOT actually remove dirty bits on any mmap's that may be
- * around. It also leaves the page tagged dirty, so any sync
- * activity will still find it on the dirty lists, and in particular,
- * clear_page_dirty_for_io() will still look at the dirty bits in
- * the VM.
- *
- * Doing this should *normally* only ever be done when a page
- * is truncated, and is not actually mapped anywhere at all. However,
- * fs/buffer.c does this when it notices that somebody has cleaned
- * out all the buffers on a page without actually doing it through
- * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
- */
-void cancel_dirty_page(struct page *page, unsigned int account_size)
-{
- if (TestClearPageDirty(page)) {
- struct address_space *mapping = page->mapping;
- if (mapping && mapping_cap_account_dirty(mapping)) {
- dec_zone_page_state(page, NR_FILE_DIRTY);
- dec_bdi_stat(inode_to_bdi(mapping->host),
- BDI_RECLAIMABLE);
- if (account_size)
- task_io_account_cancelled_write(account_size);
- }
- }
-}
-EXPORT_SYMBOL(cancel_dirty_page);
-
-/*
* If truncate cannot remove the fs-private metadata from the page, the page
* becomes orphaned. It will be left on the LRU and may even be mapped into
* user pagetables if we're racing with filemap_fault().
@@ -140,7 +111,13 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
if (page_has_private(page))
do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
- cancel_dirty_page(page, PAGE_CACHE_SIZE);
+ /*
+ * Some filesystems seem to re-dirty the page even after
+ * the VM has canceled the dirty bit (eg ext3 journaling).
+ * Hence dirty accounting check is placed after invalidation.
+ */
+ if (TestClearPageDirty(page))
+ account_page_cleaned(page, mapping);
ClearPageMappedToDisk(page);
delete_from_page_cache(page);
@@ -513,7 +490,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
* of interest and try to speed up its reclaim.
*/
if (!ret)
- deactivate_page(page);
+ deactivate_file_page(page);
count += ret;
}
pagevec_remove_exceptionals(&pvec);
diff --git a/mm/util.c b/mm/util.c
index 3981ae9d1b15..68ff8a5361e7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -325,9 +325,37 @@ void kvfree(const void *addr)
}
EXPORT_SYMBOL(kvfree);
+static inline void *__page_rmapping(struct page *page)
+{
+ unsigned long mapping;
+
+ mapping = (unsigned long)page->mapping;
+ mapping &= ~PAGE_MAPPING_FLAGS;
+
+ return (void *)mapping;
+}
+
+/* Neutral page->mapping pointer to address_space or anon_vma or other */
+void *page_rmapping(struct page *page)
+{
+ page = compound_head(page);
+ return __page_rmapping(page);
+}
+
+struct anon_vma *page_anon_vma(struct page *page)
+{
+ unsigned long mapping;
+
+ page = compound_head(page);
+ mapping = (unsigned long)page->mapping;
+ if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
+ return NULL;
+ return __page_rmapping(page);
+}
+
struct address_space *page_mapping(struct page *page)
{
- struct address_space *mapping = page->mapping;
+ unsigned long mapping;
/* This happens if someone calls flush_dcache_page on slab page */
if (unlikely(PageSlab(page)))
@@ -337,10 +365,13 @@ struct address_space *page_mapping(struct page *page)
swp_entry_t entry;
entry.val = page_private(page);
- mapping = swap_address_space(entry);
- } else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
- mapping = NULL;
- return mapping;
+ return swap_address_space(entry);
+ }
+
+ mapping = (unsigned long)page->mapping;
+ if (mapping & PAGE_MAPPING_FLAGS)
+ return NULL;
+ return page->mapping;
}
int overcommit_ratio_handler(struct ctl_table *table, int write,
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 49abccf29a29..2faaa2976447 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -29,6 +29,7 @@
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/llist.h>
+#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
@@ -74,6 +75,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
+ if (pmd_clear_huge(pmd))
+ continue;
if (pmd_none_or_clear_bad(pmd))
continue;
vunmap_pte_range(pmd, addr, next);
@@ -88,6 +91,8 @@ static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
+ if (pud_clear_huge(pud))
+ continue;
if (pud_none_or_clear_bad(pud))
continue;
vunmap_pmd_range(pud, addr, next);
@@ -760,7 +765,7 @@ struct vmap_block {
spinlock_t lock;
struct vmap_area *va;
unsigned long free, dirty;
- DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
+ unsigned long dirty_min, dirty_max; /*< dirty range */
struct list_head free_list;
struct rcu_head rcu_head;
struct list_head purge;
@@ -791,13 +796,31 @@ static unsigned long addr_to_vb_idx(unsigned long addr)
return addr;
}
-static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
+static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
+{
+ unsigned long addr;
+
+ addr = va_start + (pages_off << PAGE_SHIFT);
+ BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
+ return (void *)addr;
+}
+
+/**
+ * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
+ * block. Of course pages number can't exceed VMAP_BBMAP_BITS
+ * @order: how many 2^order pages should be occupied in newly allocated block
+ * @gfp_mask: flags for the page level allocator
+ *
+ * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
+ */
+static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
{
struct vmap_block_queue *vbq;
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
int node, err;
+ void *vaddr;
node = numa_node_id();
@@ -821,11 +844,15 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
return ERR_PTR(err);
}
+ vaddr = vmap_block_vaddr(va->va_start, 0);
spin_lock_init(&vb->lock);
vb->va = va;
- vb->free = VMAP_BBMAP_BITS;
+ /* At least something should be left free */
+ BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
+ vb->free = VMAP_BBMAP_BITS - (1UL << order);
vb->dirty = 0;
- bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
+ vb->dirty_min = VMAP_BBMAP_BITS;
+ vb->dirty_max = 0;
INIT_LIST_HEAD(&vb->free_list);
vb_idx = addr_to_vb_idx(va->va_start);
@@ -837,11 +864,11 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
vbq = &get_cpu_var(vmap_block_queue);
spin_lock(&vbq->lock);
- list_add_rcu(&vb->free_list, &vbq->free);
+ list_add_tail_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
put_cpu_var(vmap_block_queue);
- return vb;
+ return vaddr;
}
static void free_vmap_block(struct vmap_block *vb)
@@ -876,7 +903,8 @@ static void purge_fragmented_blocks(int cpu)
if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
vb->free = 0; /* prevent further allocs after releasing lock */
vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
- bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
+ vb->dirty_min = 0;
+ vb->dirty_max = VMAP_BBMAP_BITS;
spin_lock(&vbq->lock);
list_del_rcu(&vb->free_list);
spin_unlock(&vbq->lock);
@@ -905,7 +933,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
{
struct vmap_block_queue *vbq;
struct vmap_block *vb;
- unsigned long addr = 0;
+ void *vaddr = NULL;
unsigned int order;
BUG_ON(size & ~PAGE_MASK);
@@ -920,43 +948,38 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
}
order = get_order(size);
-again:
rcu_read_lock();
vbq = &get_cpu_var(vmap_block_queue);
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- int i;
+ unsigned long pages_off;
spin_lock(&vb->lock);
- if (vb->free < 1UL << order)
- goto next;
+ if (vb->free < (1UL << order)) {
+ spin_unlock(&vb->lock);
+ continue;
+ }
- i = VMAP_BBMAP_BITS - vb->free;
- addr = vb->va->va_start + (i << PAGE_SHIFT);
- BUG_ON(addr_to_vb_idx(addr) !=
- addr_to_vb_idx(vb->va->va_start));
+ pages_off = VMAP_BBMAP_BITS - vb->free;
+ vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
vb->free -= 1UL << order;
if (vb->free == 0) {
spin_lock(&vbq->lock);
list_del_rcu(&vb->free_list);
spin_unlock(&vbq->lock);
}
+
spin_unlock(&vb->lock);
break;
-next:
- spin_unlock(&vb->lock);
}
put_cpu_var(vmap_block_queue);
rcu_read_unlock();
- if (!addr) {
- vb = new_vmap_block(gfp_mask);
- if (IS_ERR(vb))
- return vb;
- goto again;
- }
+ /* Allocate new block if nothing was found */
+ if (!vaddr)
+ vaddr = new_vmap_block(order, gfp_mask);
- return (void *)addr;
+ return vaddr;
}
static void vb_free(const void *addr, unsigned long size)
@@ -974,6 +997,7 @@ static void vb_free(const void *addr, unsigned long size)
order = get_order(size);
offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
+ offset >>= PAGE_SHIFT;
vb_idx = addr_to_vb_idx((unsigned long)addr);
rcu_read_lock();
@@ -984,7 +1008,10 @@ static void vb_free(const void *addr, unsigned long size)
vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
spin_lock(&vb->lock);
- BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
+
+ /* Expand dirty range */
+ vb->dirty_min = min(vb->dirty_min, offset);
+ vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
vb->dirty += 1UL << order;
if (vb->dirty == VMAP_BBMAP_BITS) {
@@ -1023,25 +1050,18 @@ void vm_unmap_aliases(void)
rcu_read_lock();
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- int i, j;
-
spin_lock(&vb->lock);
- i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
- if (i < VMAP_BBMAP_BITS) {
+ if (vb->dirty) {
+ unsigned long va_start = vb->va->va_start;
unsigned long s, e;
- j = find_last_bit(vb->dirty_map,
- VMAP_BBMAP_BITS);
- j = j + 1; /* need exclusive index */
+ s = va_start + (vb->dirty_min << PAGE_SHIFT);
+ e = va_start + (vb->dirty_max << PAGE_SHIFT);
- s = vb->va->va_start + (i << PAGE_SHIFT);
- e = vb->va->va_start + (j << PAGE_SHIFT);
- flush = 1;
+ start = min(s, start);
+ end = max(e, end);
- if (s < start)
- start = s;
- if (e > end)
- end = e;
+ flush = 1;
}
spin_unlock(&vb->lock);
}
@@ -1314,7 +1334,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
BUG_ON(in_interrupt());
if (flags & VM_IOREMAP)
- align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
+ align = 1ul << clamp_t(int, fls_long(size),
+ PAGE_SHIFT, IOREMAP_MAX_ORDER);
size = PAGE_ALIGN(size);
if (unlikely(!size))
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 0dec1fa5f656..08bd7a3d464a 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -12,35 +12,6 @@
*/
/*
- * This allocator is designed for use with zram. Thus, the allocator is
- * supposed to work well under low memory conditions. In particular, it
- * never attempts higher order page allocation which is very likely to
- * fail under memory pressure. On the other hand, if we just use single
- * (0-order) pages, it would suffer from very high fragmentation --
- * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
- * This was one of the major issues with its predecessor (xvmalloc).
- *
- * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
- * and links them together using various 'struct page' fields. These linked
- * pages act as a single higher-order page i.e. an object can span 0-order
- * page boundaries. The code refers to these linked pages as a single entity
- * called zspage.
- *
- * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
- * since this satisfies the requirements of all its current users (in the
- * worst case, page is incompressible and is thus stored "as-is" i.e. in
- * uncompressed form). For allocation requests larger than this size, failure
- * is returned (see zs_malloc).
- *
- * Additionally, zs_malloc() does not return a dereferenceable pointer.
- * Instead, it returns an opaque handle (unsigned long) which encodes actual
- * location of the allocated object. The reason for this indirection is that
- * zsmalloc does not keep zspages permanently mapped since that would cause
- * issues on 32-bit systems where the VA region for kernel space mappings
- * is very small. So, before using the allocating memory, the object has to
- * be mapped using zs_map_object() to get a usable pointer and subsequently
- * unmapped using zs_unmap_object().
- *
* Following is how we use various fields and flags of underlying
* struct page(s) to form a zspage.
*
@@ -57,6 +28,8 @@
*
* page->private (union with page->first_page): refers to the
* component page after the first page
+ * If the page is first_page for huge object, it stores handle.
+ * Look at size_class->huge.
* page->freelist: points to the first free object in zspage.
* Free objects are linked together using in-place
* metadata.
@@ -78,6 +51,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/highmem.h>
@@ -110,6 +84,8 @@
#define ZS_MAX_ZSPAGE_ORDER 2
#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
+#define ZS_HANDLE_SIZE (sizeof(unsigned long))
+
/*
* Object location (<PFN>, <obj_idx>) is encoded as
* as single (unsigned long) handle value.
@@ -133,13 +109,33 @@
#endif
#endif
#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
-#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
+
+/*
+ * Memory for allocating for handle keeps object position by
+ * encoding <page, obj_idx> and the encoded value has a room
+ * in least bit(ie, look at obj_to_location).
+ * We use the bit to synchronize between object access by
+ * user and migration.
+ */
+#define HANDLE_PIN_BIT 0
+
+/*
+ * Head in allocated object should have OBJ_ALLOCATED_TAG
+ * to identify the object was allocated or not.
+ * It's okay to add the status bit in the least bit because
+ * header keeps handle which is 4byte-aligned address so we
+ * have room for two bit at least.
+ */
+#define OBJ_ALLOCATED_TAG 1
+#define OBJ_TAG_BITS 1
+#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
#define MAX(a, b) ((a) >= (b) ? (a) : (b))
/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
#define ZS_MIN_ALLOC_SIZE \
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
+/* each chunk includes extra space to keep handle */
#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
/*
@@ -172,6 +168,8 @@ enum fullness_group {
enum zs_stat_type {
OBJ_ALLOCATED,
OBJ_USED,
+ CLASS_ALMOST_FULL,
+ CLASS_ALMOST_EMPTY,
NR_ZS_STAT_TYPE,
};
@@ -216,6 +214,8 @@ struct size_class {
/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
int pages_per_zspage;
+ /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
+ bool huge;
#ifdef CONFIG_ZSMALLOC_STAT
struct zs_size_stat stats;
@@ -233,14 +233,24 @@ struct size_class {
* This must be power of 2 and less than or equal to ZS_ALIGN
*/
struct link_free {
- /* Handle of next free chunk (encodes <PFN, obj_idx>) */
- void *next;
+ union {
+ /*
+ * Position of next free chunk (encodes <PFN, obj_idx>)
+ * It's valid for non-allocated object
+ */
+ void *next;
+ /*
+ * Handle of allocated object.
+ */
+ unsigned long handle;
+ };
};
struct zs_pool {
char *name;
struct size_class **size_class;
+ struct kmem_cache *handle_cachep;
gfp_t flags; /* allocation flags used when growing pool */
atomic_long_t pages_allocated;
@@ -267,8 +277,37 @@ struct mapping_area {
#endif
char *vm_addr; /* address of kmap_atomic()'ed pages */
enum zs_mapmode vm_mm; /* mapping mode */
+ bool huge;
};
+static int create_handle_cache(struct zs_pool *pool)
+{
+ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
+ 0, 0, NULL);
+ return pool->handle_cachep ? 0 : 1;
+}
+
+static void destroy_handle_cache(struct zs_pool *pool)
+{
+ kmem_cache_destroy(pool->handle_cachep);
+}
+
+static unsigned long alloc_handle(struct zs_pool *pool)
+{
+ return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
+ pool->flags & ~__GFP_HIGHMEM);
+}
+
+static void free_handle(struct zs_pool *pool, unsigned long handle)
+{
+ kmem_cache_free(pool->handle_cachep, (void *)handle);
+}
+
+static void record_obj(unsigned long handle, unsigned long obj)
+{
+ *(unsigned long *)handle = obj;
+}
+
/* zpool driver */
#ifdef CONFIG_ZPOOL
@@ -346,6 +385,11 @@ static struct zpool_driver zs_zpool_driver = {
MODULE_ALIAS("zpool-zsmalloc");
#endif /* CONFIG_ZPOOL */
+static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
+{
+ return pages_per_zspage * PAGE_SIZE / size;
+}
+
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
@@ -396,9 +440,182 @@ static int get_size_class_index(int size)
idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
ZS_SIZE_CLASS_DELTA);
- return idx;
+ return min(zs_size_classes - 1, idx);
+}
+
+#ifdef CONFIG_ZSMALLOC_STAT
+
+static inline void zs_stat_inc(struct size_class *class,
+ enum zs_stat_type type, unsigned long cnt)
+{
+ class->stats.objs[type] += cnt;
+}
+
+static inline void zs_stat_dec(struct size_class *class,
+ enum zs_stat_type type, unsigned long cnt)
+{
+ class->stats.objs[type] -= cnt;
+}
+
+static inline unsigned long zs_stat_get(struct size_class *class,
+ enum zs_stat_type type)
+{
+ return class->stats.objs[type];
+}
+
+static int __init zs_stat_init(void)
+{
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
+ if (!zs_stat_root)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit zs_stat_exit(void)
+{
+ debugfs_remove_recursive(zs_stat_root);
+}
+
+static int zs_stats_size_show(struct seq_file *s, void *v)
+{
+ int i;
+ struct zs_pool *pool = s->private;
+ struct size_class *class;
+ int objs_per_zspage;
+ unsigned long class_almost_full, class_almost_empty;
+ unsigned long obj_allocated, obj_used, pages_used;
+ unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
+ unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
+
+ seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n",
+ "class", "size", "almost_full", "almost_empty",
+ "obj_allocated", "obj_used", "pages_used",
+ "pages_per_zspage");
+
+ for (i = 0; i < zs_size_classes; i++) {
+ class = pool->size_class[i];
+
+ if (class->index != i)
+ continue;
+
+ spin_lock(&class->lock);
+ class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
+ class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
+ obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+ obj_used = zs_stat_get(class, OBJ_USED);
+ spin_unlock(&class->lock);
+
+ objs_per_zspage = get_maxobj_per_zspage(class->size,
+ class->pages_per_zspage);
+ pages_used = obj_allocated / objs_per_zspage *
+ class->pages_per_zspage;
+
+ seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n",
+ i, class->size, class_almost_full, class_almost_empty,
+ obj_allocated, obj_used, pages_used,
+ class->pages_per_zspage);
+
+ total_class_almost_full += class_almost_full;
+ total_class_almost_empty += class_almost_empty;
+ total_objs += obj_allocated;
+ total_used_objs += obj_used;
+ total_pages += pages_used;
+ }
+
+ seq_puts(s, "\n");
+ seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n",
+ "Total", "", total_class_almost_full,
+ total_class_almost_empty, total_objs,
+ total_used_objs, total_pages);
+
+ return 0;
+}
+
+static int zs_stats_size_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, zs_stats_size_show, inode->i_private);
+}
+
+static const struct file_operations zs_stat_size_ops = {
+ .open = zs_stats_size_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int zs_pool_stat_create(char *name, struct zs_pool *pool)
+{
+ struct dentry *entry;
+
+ if (!zs_stat_root)
+ return -ENODEV;
+
+ entry = debugfs_create_dir(name, zs_stat_root);
+ if (!entry) {
+ pr_warn("debugfs dir <%s> creation failed\n", name);
+ return -ENOMEM;
+ }
+ pool->stat_dentry = entry;
+
+ entry = debugfs_create_file("classes", S_IFREG | S_IRUGO,
+ pool->stat_dentry, pool, &zs_stat_size_ops);
+ if (!entry) {
+ pr_warn("%s: debugfs file entry <%s> creation failed\n",
+ name, "classes");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void zs_pool_stat_destroy(struct zs_pool *pool)
+{
+ debugfs_remove_recursive(pool->stat_dentry);
+}
+
+#else /* CONFIG_ZSMALLOC_STAT */
+
+static inline void zs_stat_inc(struct size_class *class,
+ enum zs_stat_type type, unsigned long cnt)
+{
+}
+
+static inline void zs_stat_dec(struct size_class *class,
+ enum zs_stat_type type, unsigned long cnt)
+{
+}
+
+static inline unsigned long zs_stat_get(struct size_class *class,
+ enum zs_stat_type type)
+{
+ return 0;
+}
+
+static int __init zs_stat_init(void)
+{
+ return 0;
+}
+
+static void __exit zs_stat_exit(void)
+{
+}
+
+static inline int zs_pool_stat_create(char *name, struct zs_pool *pool)
+{
+ return 0;
+}
+
+static inline void zs_pool_stat_destroy(struct zs_pool *pool)
+{
}
+#endif
+
+
/*
* For each size class, zspages are divided into different groups
* depending on how "full" they are. This was done so that we could
@@ -419,7 +636,7 @@ static enum fullness_group get_fullness_group(struct page *page)
fg = ZS_EMPTY;
else if (inuse == max_objects)
fg = ZS_FULL;
- else if (inuse <= max_objects / fullness_threshold_frac)
+ else if (inuse <= 3 * max_objects / fullness_threshold_frac)
fg = ZS_ALMOST_EMPTY;
else
fg = ZS_ALMOST_FULL;
@@ -448,6 +665,8 @@ static void insert_zspage(struct page *page, struct size_class *class,
list_add_tail(&page->lru, &(*head)->lru);
*head = page;
+ zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ?
+ CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
}
/*
@@ -473,6 +692,8 @@ static void remove_zspage(struct page *page, struct size_class *class,
struct page, lru);
list_del_init(&page->lru);
+ zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ?
+ CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
}
/*
@@ -484,11 +705,10 @@ static void remove_zspage(struct page *page, struct size_class *class,
* page from the freelist of the old fullness group to that of the new
* fullness group.
*/
-static enum fullness_group fix_fullness_group(struct zs_pool *pool,
+static enum fullness_group fix_fullness_group(struct size_class *class,
struct page *page)
{
int class_idx;
- struct size_class *class;
enum fullness_group currfg, newfg;
BUG_ON(!is_first_page(page));
@@ -498,7 +718,6 @@ static enum fullness_group fix_fullness_group(struct zs_pool *pool,
if (newfg == currfg)
goto out;
- class = pool->size_class[class_idx];
remove_zspage(page, class, currfg);
insert_zspage(page, class, newfg);
set_zspage_mapping(page, class_idx, newfg);
@@ -512,7 +731,8 @@ out:
* to form a zspage for each size class. This is important
* to reduce wastage due to unusable space left at end of
* each zspage which is given as:
- * wastage = Zp - Zp % size_class
+ * wastage = Zp % class_size
+ * usage = Zp - wastage
* where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
*
* For example, for size class of 3/8 * PAGE_SIZE, we should
@@ -571,35 +791,50 @@ static struct page *get_next_page(struct page *page)
/*
* Encode <page, obj_idx> as a single handle value.
- * On hardware platforms with physical memory starting at 0x0 the pfn
- * could be 0 so we ensure that the handle will never be 0 by adjusting the
- * encoded obj_idx value before encoding.
+ * We use the least bit of handle for tagging.
*/
-static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
+static void *location_to_obj(struct page *page, unsigned long obj_idx)
{
- unsigned long handle;
+ unsigned long obj;
if (!page) {
BUG_ON(obj_idx);
return NULL;
}
- handle = page_to_pfn(page) << OBJ_INDEX_BITS;
- handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
+ obj = page_to_pfn(page) << OBJ_INDEX_BITS;
+ obj |= ((obj_idx) & OBJ_INDEX_MASK);
+ obj <<= OBJ_TAG_BITS;
- return (void *)handle;
+ return (void *)obj;
}
/*
* Decode <page, obj_idx> pair from the given object handle. We adjust the
* decoded obj_idx back to its original value since it was adjusted in
- * obj_location_to_handle().
+ * location_to_obj().
*/
-static void obj_handle_to_location(unsigned long handle, struct page **page,
+static void obj_to_location(unsigned long obj, struct page **page,
unsigned long *obj_idx)
{
- *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
- *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
+ obj >>= OBJ_TAG_BITS;
+ *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+ *obj_idx = (obj & OBJ_INDEX_MASK);
+}
+
+static unsigned long handle_to_obj(unsigned long handle)
+{
+ return *(unsigned long *)handle;
+}
+
+static unsigned long obj_to_head(struct size_class *class, struct page *page,
+ void *obj)
+{
+ if (class->huge) {
+ VM_BUG_ON(!is_first_page(page));
+ return *(unsigned long *)page_private(page);
+ } else
+ return *(unsigned long *)obj;
}
static unsigned long obj_idx_to_offset(struct page *page,
@@ -613,6 +848,25 @@ static unsigned long obj_idx_to_offset(struct page *page,
return off + obj_idx * class_size;
}
+static inline int trypin_tag(unsigned long handle)
+{
+ unsigned long *ptr = (unsigned long *)handle;
+
+ return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr);
+}
+
+static void pin_tag(unsigned long handle)
+{
+ while (!trypin_tag(handle));
+}
+
+static void unpin_tag(unsigned long handle)
+{
+ unsigned long *ptr = (unsigned long *)handle;
+
+ clear_bit_unlock(HANDLE_PIN_BIT, ptr);
+}
+
static void reset_page(struct page *page)
{
clear_bit(PG_private, &page->flags);
@@ -674,7 +928,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
- link->next = obj_location_to_handle(page, i++);
+ link->next = location_to_obj(page, i++);
link += class->size / sizeof(*link);
}
@@ -684,7 +938,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
* page (if present)
*/
next_page = get_next_page(page);
- link->next = obj_location_to_handle(next_page, 0);
+ link->next = location_to_obj(next_page, 0);
kunmap_atomic(vaddr);
page = next_page;
off %= PAGE_SIZE;
@@ -738,7 +992,7 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
init_zspage(first_page, class);
- first_page->freelist = obj_location_to_handle(first_page, 0);
+ first_page->freelist = location_to_obj(first_page, 0);
/* Maximum number of objects we can store in this zspage */
first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
@@ -860,12 +1114,19 @@ static void __zs_unmap_object(struct mapping_area *area,
{
int sizes[2];
void *addr;
- char *buf = area->vm_buf;
+ char *buf;
/* no write fastpath */
if (area->vm_mm == ZS_MM_RO)
goto out;
+ buf = area->vm_buf;
+ if (!area->huge) {
+ buf = buf + ZS_HANDLE_SIZE;
+ size -= ZS_HANDLE_SIZE;
+ off += ZS_HANDLE_SIZE;
+ }
+
sizes[0] = PAGE_SIZE - off;
sizes[1] = size - sizes[0];
@@ -952,11 +1213,6 @@ static void init_zs_size_classes(void)
zs_size_classes = nr;
}
-static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
-{
- return pages_per_zspage * PAGE_SIZE / size;
-}
-
static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
{
if (prev->pages_per_zspage != pages_per_zspage)
@@ -969,166 +1225,13 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
return true;
}
-#ifdef CONFIG_ZSMALLOC_STAT
-
-static inline void zs_stat_inc(struct size_class *class,
- enum zs_stat_type type, unsigned long cnt)
-{
- class->stats.objs[type] += cnt;
-}
-
-static inline void zs_stat_dec(struct size_class *class,
- enum zs_stat_type type, unsigned long cnt)
-{
- class->stats.objs[type] -= cnt;
-}
-
-static inline unsigned long zs_stat_get(struct size_class *class,
- enum zs_stat_type type)
-{
- return class->stats.objs[type];
-}
-
-static int __init zs_stat_init(void)
-{
- if (!debugfs_initialized())
- return -ENODEV;
-
- zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
- if (!zs_stat_root)
- return -ENOMEM;
-
- return 0;
-}
-
-static void __exit zs_stat_exit(void)
-{
- debugfs_remove_recursive(zs_stat_root);
-}
-
-static int zs_stats_size_show(struct seq_file *s, void *v)
+static bool zspage_full(struct page *page)
{
- int i;
- struct zs_pool *pool = s->private;
- struct size_class *class;
- int objs_per_zspage;
- unsigned long obj_allocated, obj_used, pages_used;
- unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
-
- seq_printf(s, " %5s %5s %13s %10s %10s\n", "class", "size",
- "obj_allocated", "obj_used", "pages_used");
-
- for (i = 0; i < zs_size_classes; i++) {
- class = pool->size_class[i];
-
- if (class->index != i)
- continue;
-
- spin_lock(&class->lock);
- obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
- obj_used = zs_stat_get(class, OBJ_USED);
- spin_unlock(&class->lock);
-
- objs_per_zspage = get_maxobj_per_zspage(class->size,
- class->pages_per_zspage);
- pages_used = obj_allocated / objs_per_zspage *
- class->pages_per_zspage;
-
- seq_printf(s, " %5u %5u %10lu %10lu %10lu\n", i,
- class->size, obj_allocated, obj_used, pages_used);
-
- total_objs += obj_allocated;
- total_used_objs += obj_used;
- total_pages += pages_used;
- }
-
- seq_puts(s, "\n");
- seq_printf(s, " %5s %5s %10lu %10lu %10lu\n", "Total", "",
- total_objs, total_used_objs, total_pages);
-
- return 0;
-}
-
-static int zs_stats_size_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zs_stats_size_show, inode->i_private);
-}
-
-static const struct file_operations zs_stat_size_ops = {
- .open = zs_stats_size_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int zs_pool_stat_create(char *name, struct zs_pool *pool)
-{
- struct dentry *entry;
-
- if (!zs_stat_root)
- return -ENODEV;
-
- entry = debugfs_create_dir(name, zs_stat_root);
- if (!entry) {
- pr_warn("debugfs dir <%s> creation failed\n", name);
- return -ENOMEM;
- }
- pool->stat_dentry = entry;
-
- entry = debugfs_create_file("obj_in_classes", S_IFREG | S_IRUGO,
- pool->stat_dentry, pool, &zs_stat_size_ops);
- if (!entry) {
- pr_warn("%s: debugfs file entry <%s> creation failed\n",
- name, "obj_in_classes");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void zs_pool_stat_destroy(struct zs_pool *pool)
-{
- debugfs_remove_recursive(pool->stat_dentry);
-}
-
-#else /* CONFIG_ZSMALLOC_STAT */
-
-static inline void zs_stat_inc(struct size_class *class,
- enum zs_stat_type type, unsigned long cnt)
-{
-}
-
-static inline void zs_stat_dec(struct size_class *class,
- enum zs_stat_type type, unsigned long cnt)
-{
-}
-
-static inline unsigned long zs_stat_get(struct size_class *class,
- enum zs_stat_type type)
-{
- return 0;
-}
-
-static int __init zs_stat_init(void)
-{
- return 0;
-}
-
-static void __exit zs_stat_exit(void)
-{
-}
-
-static inline int zs_pool_stat_create(char *name, struct zs_pool *pool)
-{
- return 0;
-}
+ BUG_ON(!is_first_page(page));
-static inline void zs_pool_stat_destroy(struct zs_pool *pool)
-{
+ return page->inuse == page->objects;
}
-#endif
-
unsigned long zs_get_total_pages(struct zs_pool *pool)
{
return atomic_long_read(&pool->pages_allocated);
@@ -1153,13 +1256,14 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm)
{
struct page *page;
- unsigned long obj_idx, off;
+ unsigned long obj, obj_idx, off;
unsigned int class_idx;
enum fullness_group fg;
struct size_class *class;
struct mapping_area *area;
struct page *pages[2];
+ void *ret;
BUG_ON(!handle);
@@ -1170,7 +1274,11 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
*/
BUG_ON(in_interrupt());
- obj_handle_to_location(handle, &page, &obj_idx);
+ /* From now on, migration cannot move the object */
+ pin_tag(handle);
+
+ obj = handle_to_obj(handle);
+ obj_to_location(obj, &page, &obj_idx);
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
class = pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size);
@@ -1180,7 +1288,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
area->vm_addr = kmap_atomic(page);
- return area->vm_addr + off;
+ ret = area->vm_addr + off;
+ goto out;
}
/* this object spans two pages */
@@ -1188,14 +1297,19 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
pages[1] = get_next_page(page);
BUG_ON(!pages[1]);
- return __zs_map_object(area, pages, off, class->size);
+ ret = __zs_map_object(area, pages, off, class->size);
+out:
+ if (!class->huge)
+ ret += ZS_HANDLE_SIZE;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(zs_map_object);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
struct page *page;
- unsigned long obj_idx, off;
+ unsigned long obj, obj_idx, off;
unsigned int class_idx;
enum fullness_group fg;
@@ -1204,7 +1318,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
BUG_ON(!handle);
- obj_handle_to_location(handle, &page, &obj_idx);
+ obj = handle_to_obj(handle);
+ obj_to_location(obj, &page, &obj_idx);
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
class = pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size);
@@ -1222,9 +1337,42 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
__zs_unmap_object(area, pages, off, class->size);
}
put_cpu_var(zs_map_area);
+ unpin_tag(handle);
}
EXPORT_SYMBOL_GPL(zs_unmap_object);
+static unsigned long obj_malloc(struct page *first_page,
+ struct size_class *class, unsigned long handle)
+{
+ unsigned long obj;
+ struct link_free *link;
+
+ struct page *m_page;
+ unsigned long m_objidx, m_offset;
+ void *vaddr;
+
+ handle |= OBJ_ALLOCATED_TAG;
+ obj = (unsigned long)first_page->freelist;
+ obj_to_location(obj, &m_page, &m_objidx);
+ m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
+
+ vaddr = kmap_atomic(m_page);
+ link = (struct link_free *)vaddr + m_offset / sizeof(*link);
+ first_page->freelist = link->next;
+ if (!class->huge)
+ /* record handle in the header of allocated chunk */
+ link->handle = handle;
+ else
+ /* record handle in first_page->private */
+ set_page_private(first_page, handle);
+ kunmap_atomic(vaddr);
+ first_page->inuse++;
+ zs_stat_inc(class, OBJ_USED, 1);
+
+ return obj;
+}
+
+
/**
* zs_malloc - Allocate block of given size from pool.
* @pool: pool to allocate from
@@ -1236,17 +1384,19 @@ EXPORT_SYMBOL_GPL(zs_unmap_object);
*/
unsigned long zs_malloc(struct zs_pool *pool, size_t size)
{
- unsigned long obj;
- struct link_free *link;
+ unsigned long handle, obj;
struct size_class *class;
- void *vaddr;
-
- struct page *first_page, *m_page;
- unsigned long m_objidx, m_offset;
+ struct page *first_page;
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0;
+ handle = alloc_handle(pool);
+ if (!handle)
+ return 0;
+
+ /* extra space in chunk to keep the handle */
+ size += ZS_HANDLE_SIZE;
class = pool->size_class[get_size_class_index(size)];
spin_lock(&class->lock);
@@ -1255,8 +1405,10 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
if (!first_page) {
spin_unlock(&class->lock);
first_page = alloc_zspage(class, pool->flags);
- if (unlikely(!first_page))
+ if (unlikely(!first_page)) {
+ free_handle(pool, handle);
return 0;
+ }
set_zspage_mapping(first_page, class->index, ZS_EMPTY);
atomic_long_add(class->pages_per_zspage,
@@ -1267,73 +1419,360 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
class->size, class->pages_per_zspage));
}
- obj = (unsigned long)first_page->freelist;
- obj_handle_to_location(obj, &m_page, &m_objidx);
- m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
-
- vaddr = kmap_atomic(m_page);
- link = (struct link_free *)vaddr + m_offset / sizeof(*link);
- first_page->freelist = link->next;
- memset(link, POISON_INUSE, sizeof(*link));
- kunmap_atomic(vaddr);
-
- first_page->inuse++;
- zs_stat_inc(class, OBJ_USED, 1);
+ obj = obj_malloc(first_page, class, handle);
/* Now move the zspage to another fullness group, if required */
- fix_fullness_group(pool, first_page);
+ fix_fullness_group(class, first_page);
+ record_obj(handle, obj);
spin_unlock(&class->lock);
- return obj;
+ return handle;
}
EXPORT_SYMBOL_GPL(zs_malloc);
-void zs_free(struct zs_pool *pool, unsigned long obj)
+static void obj_free(struct zs_pool *pool, struct size_class *class,
+ unsigned long obj)
{
struct link_free *link;
struct page *first_page, *f_page;
unsigned long f_objidx, f_offset;
void *vaddr;
-
int class_idx;
- struct size_class *class;
enum fullness_group fullness;
- if (unlikely(!obj))
- return;
+ BUG_ON(!obj);
- obj_handle_to_location(obj, &f_page, &f_objidx);
+ obj &= ~OBJ_ALLOCATED_TAG;
+ obj_to_location(obj, &f_page, &f_objidx);
first_page = get_first_page(f_page);
get_zspage_mapping(first_page, &class_idx, &fullness);
- class = pool->size_class[class_idx];
f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
- spin_lock(&class->lock);
+ vaddr = kmap_atomic(f_page);
/* Insert this object in containing zspage's freelist */
- vaddr = kmap_atomic(f_page);
link = (struct link_free *)(vaddr + f_offset);
link->next = first_page->freelist;
+ if (class->huge)
+ set_page_private(first_page, 0);
kunmap_atomic(vaddr);
first_page->freelist = (void *)obj;
-
first_page->inuse--;
- fullness = fix_fullness_group(pool, first_page);
-
zs_stat_dec(class, OBJ_USED, 1);
- if (fullness == ZS_EMPTY)
+}
+
+void zs_free(struct zs_pool *pool, unsigned long handle)
+{
+ struct page *first_page, *f_page;
+ unsigned long obj, f_objidx;
+ int class_idx;
+ struct size_class *class;
+ enum fullness_group fullness;
+
+ if (unlikely(!handle))
+ return;
+
+ pin_tag(handle);
+ obj = handle_to_obj(handle);
+ obj_to_location(obj, &f_page, &f_objidx);
+ first_page = get_first_page(f_page);
+
+ get_zspage_mapping(first_page, &class_idx, &fullness);
+ class = pool->size_class[class_idx];
+
+ spin_lock(&class->lock);
+ obj_free(pool, class, obj);
+ fullness = fix_fullness_group(class, first_page);
+ if (fullness == ZS_EMPTY) {
zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
class->size, class->pages_per_zspage));
-
+ atomic_long_sub(class->pages_per_zspage,
+ &pool->pages_allocated);
+ free_zspage(first_page);
+ }
spin_unlock(&class->lock);
+ unpin_tag(handle);
+
+ free_handle(pool, handle);
+}
+EXPORT_SYMBOL_GPL(zs_free);
+
+static void zs_object_copy(unsigned long src, unsigned long dst,
+ struct size_class *class)
+{
+ struct page *s_page, *d_page;
+ unsigned long s_objidx, d_objidx;
+ unsigned long s_off, d_off;
+ void *s_addr, *d_addr;
+ int s_size, d_size, size;
+ int written = 0;
+
+ s_size = d_size = class->size;
+
+ obj_to_location(src, &s_page, &s_objidx);
+ obj_to_location(dst, &d_page, &d_objidx);
+
+ s_off = obj_idx_to_offset(s_page, s_objidx, class->size);
+ d_off = obj_idx_to_offset(d_page, d_objidx, class->size);
+
+ if (s_off + class->size > PAGE_SIZE)
+ s_size = PAGE_SIZE - s_off;
+
+ if (d_off + class->size > PAGE_SIZE)
+ d_size = PAGE_SIZE - d_off;
+
+ s_addr = kmap_atomic(s_page);
+ d_addr = kmap_atomic(d_page);
+
+ while (1) {
+ size = min(s_size, d_size);
+ memcpy(d_addr + d_off, s_addr + s_off, size);
+ written += size;
+
+ if (written == class->size)
+ break;
+
+ s_off += size;
+ s_size -= size;
+ d_off += size;
+ d_size -= size;
+
+ if (s_off >= PAGE_SIZE) {
+ kunmap_atomic(d_addr);
+ kunmap_atomic(s_addr);
+ s_page = get_next_page(s_page);
+ BUG_ON(!s_page);
+ s_addr = kmap_atomic(s_page);
+ d_addr = kmap_atomic(d_page);
+ s_size = class->size - written;
+ s_off = 0;
+ }
+
+ if (d_off >= PAGE_SIZE) {
+ kunmap_atomic(d_addr);
+ d_page = get_next_page(d_page);
+ BUG_ON(!d_page);
+ d_addr = kmap_atomic(d_page);
+ d_size = class->size - written;
+ d_off = 0;
+ }
+ }
+
+ kunmap_atomic(d_addr);
+ kunmap_atomic(s_addr);
+}
+
+/*
+ * Find alloced object in zspage from index object and
+ * return handle.
+ */
+static unsigned long find_alloced_obj(struct page *page, int index,
+ struct size_class *class)
+{
+ unsigned long head;
+ int offset = 0;
+ unsigned long handle = 0;
+ void *addr = kmap_atomic(page);
+
+ if (!is_first_page(page))
+ offset = page->index;
+ offset += class->size * index;
+
+ while (offset < PAGE_SIZE) {
+ head = obj_to_head(class, page, addr + offset);
+ if (head & OBJ_ALLOCATED_TAG) {
+ handle = head & ~OBJ_ALLOCATED_TAG;
+ if (trypin_tag(handle))
+ break;
+ handle = 0;
+ }
+
+ offset += class->size;
+ index++;
+ }
+
+ kunmap_atomic(addr);
+ return handle;
+}
+
+struct zs_compact_control {
+ /* Source page for migration which could be a subpage of zspage. */
+ struct page *s_page;
+ /* Destination page for migration which should be a first page
+ * of zspage. */
+ struct page *d_page;
+ /* Starting object index within @s_page which used for live object
+ * in the subpage. */
+ int index;
+ /* how many of objects are migrated */
+ int nr_migrated;
+};
+
+static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
+ struct zs_compact_control *cc)
+{
+ unsigned long used_obj, free_obj;
+ unsigned long handle;
+ struct page *s_page = cc->s_page;
+ struct page *d_page = cc->d_page;
+ unsigned long index = cc->index;
+ int nr_migrated = 0;
+ int ret = 0;
+
+ while (1) {
+ handle = find_alloced_obj(s_page, index, class);
+ if (!handle) {
+ s_page = get_next_page(s_page);
+ if (!s_page)
+ break;
+ index = 0;
+ continue;
+ }
+
+ /* Stop if there is no more space */
+ if (zspage_full(d_page)) {
+ unpin_tag(handle);
+ ret = -ENOMEM;
+ break;
+ }
+
+ used_obj = handle_to_obj(handle);
+ free_obj = obj_malloc(d_page, class, handle);
+ zs_object_copy(used_obj, free_obj, class);
+ index++;
+ record_obj(handle, free_obj);
+ unpin_tag(handle);
+ obj_free(pool, class, used_obj);
+ nr_migrated++;
+ }
+
+ /* Remember last position in this iteration */
+ cc->s_page = s_page;
+ cc->index = index;
+ cc->nr_migrated = nr_migrated;
+
+ return ret;
+}
+
+static struct page *alloc_target_page(struct size_class *class)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
+ page = class->fullness_list[i];
+ if (page) {
+ remove_zspage(page, class, i);
+ break;
+ }
+ }
+
+ return page;
+}
+
+static void putback_zspage(struct zs_pool *pool, struct size_class *class,
+ struct page *first_page)
+{
+ enum fullness_group fullness;
+
+ BUG_ON(!is_first_page(first_page));
+
+ fullness = get_fullness_group(first_page);
+ insert_zspage(first_page, class, fullness);
+ set_zspage_mapping(first_page, class->index, fullness);
if (fullness == ZS_EMPTY) {
+ zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
atomic_long_sub(class->pages_per_zspage,
&pool->pages_allocated);
+
free_zspage(first_page);
}
}
-EXPORT_SYMBOL_GPL(zs_free);
+
+static struct page *isolate_source_page(struct size_class *class)
+{
+ struct page *page;
+
+ page = class->fullness_list[ZS_ALMOST_EMPTY];
+ if (page)
+ remove_zspage(page, class, ZS_ALMOST_EMPTY);
+
+ return page;
+}
+
+static unsigned long __zs_compact(struct zs_pool *pool,
+ struct size_class *class)
+{
+ int nr_to_migrate;
+ struct zs_compact_control cc;
+ struct page *src_page;
+ struct page *dst_page = NULL;
+ unsigned long nr_total_migrated = 0;
+
+ spin_lock(&class->lock);
+ while ((src_page = isolate_source_page(class))) {
+
+ BUG_ON(!is_first_page(src_page));
+
+ /* The goal is to migrate all live objects in source page */
+ nr_to_migrate = src_page->inuse;
+ cc.index = 0;
+ cc.s_page = src_page;
+
+ while ((dst_page = alloc_target_page(class))) {
+ cc.d_page = dst_page;
+ /*
+ * If there is no more space in dst_page, try to
+ * allocate another zspage.
+ */
+ if (!migrate_zspage(pool, class, &cc))
+ break;
+
+ putback_zspage(pool, class, dst_page);
+ nr_total_migrated += cc.nr_migrated;
+ nr_to_migrate -= cc.nr_migrated;
+ }
+
+ /* Stop if we couldn't find slot */
+ if (dst_page == NULL)
+ break;
+
+ putback_zspage(pool, class, dst_page);
+ putback_zspage(pool, class, src_page);
+ spin_unlock(&class->lock);
+ nr_total_migrated += cc.nr_migrated;
+ cond_resched();
+ spin_lock(&class->lock);
+ }
+
+ if (src_page)
+ putback_zspage(pool, class, src_page);
+
+ spin_unlock(&class->lock);
+
+ return nr_total_migrated;
+}
+
+unsigned long zs_compact(struct zs_pool *pool)
+{
+ int i;
+ unsigned long nr_migrated = 0;
+ struct size_class *class;
+
+ for (i = zs_size_classes - 1; i >= 0; i--) {
+ class = pool->size_class[i];
+ if (!class)
+ continue;
+ if (class->index != i)
+ continue;
+ nr_migrated += __zs_compact(pool, class);
+ }
+
+ return nr_migrated;
+}
+EXPORT_SYMBOL_GPL(zs_compact);
/**
* zs_create_pool - Creates an allocation pool to work from.
@@ -1355,20 +1794,20 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
if (!pool)
return NULL;
- pool->name = kstrdup(name, GFP_KERNEL);
- if (!pool->name) {
- kfree(pool);
- return NULL;
- }
-
pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
GFP_KERNEL);
if (!pool->size_class) {
- kfree(pool->name);
kfree(pool);
return NULL;
}
+ pool->name = kstrdup(name, GFP_KERNEL);
+ if (!pool->name)
+ goto err;
+
+ if (create_handle_cache(pool))
+ goto err;
+
/*
* Iterate reversly, because, size of size_class that we want to use
* for merging should be larger or equal to current size.
@@ -1406,6 +1845,9 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
class->size = size;
class->index = i;
class->pages_per_zspage = pages_per_zspage;
+ if (pages_per_zspage == 1 &&
+ get_maxobj_per_zspage(size, pages_per_zspage) == 1)
+ class->huge = true;
spin_lock_init(&class->lock);
pool->size_class[i] = class;
@@ -1450,6 +1892,7 @@ void zs_destroy_pool(struct zs_pool *pool)
kfree(class);
}
+ destroy_handle_cache(pool);
kfree(pool->size_class);
kfree(pool->name);
kfree(pool);
diff --git a/net/6lowpan/Kconfig b/net/6lowpan/Kconfig
index e4a02ef55102..7fa0f382e7d1 100644
--- a/net/6lowpan/Kconfig
+++ b/net/6lowpan/Kconfig
@@ -1,6 +1,61 @@
-config 6LOWPAN
+menuconfig 6LOWPAN
tristate "6LoWPAN Support"
depends on IPV6
---help---
This enables IPv6 over Low power Wireless Personal Area Network -
"6LoWPAN" which is supported by IEEE 802.15.4 or Bluetooth stacks.
+
+menuconfig 6LOWPAN_NHC
+ tristate "Next Header Compression Support"
+ depends on 6LOWPAN
+ default y
+ ---help---
+ Support for next header compression.
+
+if 6LOWPAN_NHC
+
+config 6LOWPAN_NHC_DEST
+ tristate "Destination Options Header Support"
+ default y
+ ---help---
+ 6LoWPAN IPv6 Destination Options Header compression according to
+ RFC6282.
+
+config 6LOWPAN_NHC_FRAGMENT
+ tristate "Fragment Header Support"
+ default y
+ ---help---
+ 6LoWPAN IPv6 Fragment Header compression according to RFC6282.
+
+config 6LOWPAN_NHC_HOP
+ tristate "Hop-by-Hop Options Header Support"
+ default y
+ ---help---
+ 6LoWPAN IPv6 Hop-by-Hop Options Header compression according to
+ RFC6282.
+
+config 6LOWPAN_NHC_IPV6
+ tristate "IPv6 Header Support"
+ default y
+ ---help---
+ 6LoWPAN IPv6 Header compression according to RFC6282.
+
+config 6LOWPAN_NHC_MOBILITY
+ tristate "Mobility Header Support"
+ default y
+ ---help---
+ 6LoWPAN IPv6 Mobility Header compression according to RFC6282.
+
+config 6LOWPAN_NHC_ROUTING
+ tristate "Routing Header Support"
+ default y
+ ---help---
+ 6LoWPAN IPv6 Routing Header compression according to RFC6282.
+
+config 6LOWPAN_NHC_UDP
+ tristate "UDP Header Support"
+ default y
+ ---help---
+ 6LoWPAN IPv6 UDP Header compression according to RFC6282.
+
+endif
diff --git a/net/6lowpan/Makefile b/net/6lowpan/Makefile
index 415886bb456a..eb8baa72adc8 100644
--- a/net/6lowpan/Makefile
+++ b/net/6lowpan/Makefile
@@ -1,3 +1,12 @@
-obj-$(CONFIG_6LOWPAN) := 6lowpan.o
+obj-$(CONFIG_6LOWPAN) += 6lowpan.o
-6lowpan-y := iphc.o
+6lowpan-y := iphc.o nhc.o
+
+#rfc6282 nhcs
+obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
+obj-$(CONFIG_6LOWPAN_NHC_FRAGMENT) += nhc_fragment.o
+obj-$(CONFIG_6LOWPAN_NHC_HOP) += nhc_hop.o
+obj-$(CONFIG_6LOWPAN_NHC_IPV6) += nhc_ipv6.o
+obj-$(CONFIG_6LOWPAN_NHC_MOBILITY) += nhc_mobility.o
+obj-$(CONFIG_6LOWPAN_NHC_ROUTING) += nhc_routing.o
+obj-$(CONFIG_6LOWPAN_NHC_UDP) += nhc_udp.o
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 32ffec6ef164..94a375c04f21 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -54,6 +54,8 @@
#include <net/ipv6.h>
#include <net/af_ieee802154.h>
+#include "nhc.h"
+
/* Uncompress address function for source and
* destination address(non-multicast).
*
@@ -224,77 +226,6 @@ static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
return 0;
}
-static int uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
-{
- bool fail;
- u8 tmp = 0, val = 0;
-
- fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
-
- if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
- pr_debug("UDP header uncompression\n");
- switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
- case LOWPAN_NHC_UDP_CS_P_00:
- fail |= lowpan_fetch_skb(skb, &uh->source,
- sizeof(uh->source));
- fail |= lowpan_fetch_skb(skb, &uh->dest,
- sizeof(uh->dest));
- break;
- case LOWPAN_NHC_UDP_CS_P_01:
- fail |= lowpan_fetch_skb(skb, &uh->source,
- sizeof(uh->source));
- fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
- uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
- break;
- case LOWPAN_NHC_UDP_CS_P_10:
- fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
- uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
- fail |= lowpan_fetch_skb(skb, &uh->dest,
- sizeof(uh->dest));
- break;
- case LOWPAN_NHC_UDP_CS_P_11:
- fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
- uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT +
- (val >> 4));
- uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT +
- (val & 0x0f));
- break;
- default:
- pr_debug("ERROR: unknown UDP format\n");
- goto err;
- }
-
- pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
- ntohs(uh->source), ntohs(uh->dest));
-
- /* checksum */
- if (tmp & LOWPAN_NHC_UDP_CS_C) {
- pr_debug_ratelimited("checksum elided currently not supported\n");
- goto err;
- } else {
- fail |= lowpan_fetch_skb(skb, &uh->check,
- sizeof(uh->check));
- }
-
- /* UDP length needs to be infered from the lower layers
- * here, we obtain the hint from the remaining size of the
- * frame
- */
- uh->len = htons(skb->len + sizeof(struct udphdr));
- pr_debug("uncompressed UDP length: src = %d", ntohs(uh->len));
- } else {
- pr_debug("ERROR: unsupported NH format\n");
- goto err;
- }
-
- if (fail)
- goto err;
-
- return 0;
-err:
- return -EINVAL;
-}
-
/* TTL uncompression values */
static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
@@ -425,29 +356,11 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
return -EINVAL;
}
- /* UDP data uncompression */
+ /* Next header data uncompression */
if (iphc0 & LOWPAN_IPHC_NH_C) {
- struct udphdr uh;
- const int needed = sizeof(struct udphdr) + sizeof(hdr);
-
- if (uncompress_udp_header(skb, &uh))
- return -EINVAL;
-
- /* replace the compressed UDP head by the uncompressed UDP
- * header
- */
- err = skb_cow(skb, needed);
- if (unlikely(err))
+ err = lowpan_nhc_do_uncompression(skb, dev, &hdr);
+ if (err < 0)
return err;
-
- skb_push(skb, sizeof(struct udphdr));
- skb_reset_transport_header(skb);
- skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
-
- raw_dump_table(__func__, "raw UDP header dump",
- (u8 *)&uh, sizeof(uh));
-
- hdr.nexthdr = UIP_PROTO_UDP;
} else {
err = skb_cow(skb, sizeof(hdr));
if (unlikely(err))
@@ -500,71 +413,6 @@ static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift,
return rol8(val, shift);
}
-static void compress_udp_header(u8 **hc_ptr, struct sk_buff *skb)
-{
- struct udphdr *uh;
- u8 tmp;
-
- /* In the case of RAW sockets the transport header is not set by
- * the ip6 stack so we must set it ourselves
- */
- if (skb->transport_header == skb->network_header)
- skb_set_transport_header(skb, sizeof(struct ipv6hdr));
-
- uh = udp_hdr(skb);
-
- if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) ==
- LOWPAN_NHC_UDP_4BIT_PORT) &&
- ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) ==
- LOWPAN_NHC_UDP_4BIT_PORT)) {
- pr_debug("UDP header: both ports compression to 4 bits\n");
- /* compression value */
- tmp = LOWPAN_NHC_UDP_CS_P_11;
- lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
- /* source and destination port */
- tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT +
- ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4);
- lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
- } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) ==
- LOWPAN_NHC_UDP_8BIT_PORT) {
- pr_debug("UDP header: remove 8 bits of dest\n");
- /* compression value */
- tmp = LOWPAN_NHC_UDP_CS_P_01;
- lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
- /* source port */
- lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
- /* destination port */
- tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT;
- lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
- } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) ==
- LOWPAN_NHC_UDP_8BIT_PORT) {
- pr_debug("UDP header: remove 8 bits of source\n");
- /* compression value */
- tmp = LOWPAN_NHC_UDP_CS_P_10;
- lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
- /* source port */
- tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT;
- lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
- /* destination port */
- lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
- } else {
- pr_debug("UDP header: can't compress\n");
- /* compression value */
- tmp = LOWPAN_NHC_UDP_CS_P_00;
- lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
- /* source port */
- lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
- /* destination port */
- lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
- }
-
- /* checksum is always inline */
- lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check));
-
- /* skip the UDP header */
- skb_pull(skb, sizeof(struct udphdr));
-}
-
int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len)
@@ -572,7 +420,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
u8 tmp, iphc0, iphc1, *hc_ptr;
struct ipv6hdr *hdr;
u8 head[100] = {};
- int addr_type;
+ int ret, addr_type;
if (type != ETH_P_IPV6)
return -EINVAL;
@@ -649,13 +497,12 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
/* NOTE: payload length is always compressed */
- /* Next Header is compress if UDP */
- if (hdr->nexthdr == UIP_PROTO_UDP)
- iphc0 |= LOWPAN_IPHC_NH_C;
-
- if ((iphc0 & LOWPAN_IPHC_NH_C) == 0)
- lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr,
- sizeof(hdr->nexthdr));
+ /* Check if we provide the nhc format for nexthdr and compression
+ * functionality. If not nexthdr is handled inline and not compressed.
+ */
+ ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr, &iphc0);
+ if (ret < 0)
+ return ret;
/* Hop limit
* if 1: compress, encoding is 01
@@ -741,9 +588,12 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
}
}
- /* UDP header compression */
- if (hdr->nexthdr == UIP_PROTO_UDP)
- compress_udp_header(&hc_ptr, skb);
+ /* next header compression */
+ if (iphc0 & LOWPAN_IPHC_NH_C) {
+ ret = lowpan_nhc_do_compression(skb, hdr, &hc_ptr);
+ if (ret < 0)
+ return ret;
+ }
head[0] = iphc0;
head[1] = iphc1;
@@ -761,4 +611,18 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
}
EXPORT_SYMBOL_GPL(lowpan_header_compress);
+static int __init lowpan_module_init(void)
+{
+ request_module_nowait("nhc_dest");
+ request_module_nowait("nhc_fragment");
+ request_module_nowait("nhc_hop");
+ request_module_nowait("nhc_ipv6");
+ request_module_nowait("nhc_mobility");
+ request_module_nowait("nhc_routing");
+ request_module_nowait("nhc_udp");
+
+ return 0;
+}
+module_init(lowpan_module_init);
+
MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
new file mode 100644
index 000000000000..fd20fc51a7c4
--- /dev/null
+++ b/net/6lowpan/nhc.c
@@ -0,0 +1,241 @@
+/*
+ * 6LoWPAN next header compression
+ *
+ *
+ * Authors:
+ * Alexander Aring <aar@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/netdevice.h>
+
+#include <net/ipv6.h>
+
+#include "nhc.h"
+
+static struct rb_root rb_root = RB_ROOT;
+static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX];
+static DEFINE_SPINLOCK(lowpan_nhc_lock);
+
+static int lowpan_nhc_insert(struct lowpan_nhc *nhc)
+{
+ struct rb_node **new = &rb_root.rb_node, *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct lowpan_nhc *this = container_of(*new, struct lowpan_nhc,
+ node);
+ int result, len_dif, len;
+
+ len_dif = nhc->idlen - this->idlen;
+
+ if (nhc->idlen < this->idlen)
+ len = nhc->idlen;
+ else
+ len = this->idlen;
+
+ result = memcmp(nhc->id, this->id, len);
+ if (!result)
+ result = len_dif;
+
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else if (result > 0)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&nhc->node, parent, new);
+ rb_insert_color(&nhc->node, &rb_root);
+
+ return 0;
+}
+
+static void lowpan_nhc_remove(struct lowpan_nhc *nhc)
+{
+ rb_erase(&nhc->node, &rb_root);
+}
+
+static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
+{
+ struct rb_node *node = rb_root.rb_node;
+ const u8 *nhcid_skb_ptr = skb->data;
+
+ while (node) {
+ struct lowpan_nhc *nhc = container_of(node, struct lowpan_nhc,
+ node);
+ u8 nhcid_skb_ptr_masked[LOWPAN_NHC_MAX_ID_LEN];
+ int result, i;
+
+ if (nhcid_skb_ptr + nhc->idlen > skb->data + skb->len)
+ return NULL;
+
+ /* copy and mask afterwards the nhid value from skb */
+ memcpy(nhcid_skb_ptr_masked, nhcid_skb_ptr, nhc->idlen);
+ for (i = 0; i < nhc->idlen; i++)
+ nhcid_skb_ptr_masked[i] &= nhc->idmask[i];
+
+ result = memcmp(nhcid_skb_ptr_masked, nhc->id, nhc->idlen);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return nhc;
+ }
+
+ return NULL;
+}
+
+int lowpan_nhc_check_compression(struct sk_buff *skb,
+ const struct ipv6hdr *hdr, u8 **hc_ptr,
+ u8 *iphc0)
+{
+ struct lowpan_nhc *nhc;
+
+ spin_lock_bh(&lowpan_nhc_lock);
+
+ nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
+ if (nhc && nhc->compress)
+ *iphc0 |= LOWPAN_IPHC_NH_C;
+ else
+ lowpan_push_hc_data(hc_ptr, &hdr->nexthdr,
+ sizeof(hdr->nexthdr));
+
+ spin_unlock_bh(&lowpan_nhc_lock);
+
+ return 0;
+}
+
+int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
+ u8 **hc_ptr)
+{
+ int ret;
+ struct lowpan_nhc *nhc;
+
+ spin_lock_bh(&lowpan_nhc_lock);
+
+ nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
+ /* check if the nhc module was removed in unlocked part.
+ * TODO: this is a workaround we should prevent unloading
+ * of nhc modules while unlocked part, this will always drop
+ * the lowpan packet but it's very unlikely.
+ *
+ * Solution isn't easy because we need to decide at
+ * lowpan_nhc_check_compression if we do a compression or not.
+ * Because the inline data which is added to skb, we can't move this
+ * handling.
+ */
+ if (unlikely(!nhc || !nhc->compress)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* In the case of RAW sockets the transport header is not set by
+ * the ip6 stack so we must set it ourselves
+ */
+ if (skb->transport_header == skb->network_header)
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
+ ret = nhc->compress(skb, hc_ptr);
+ if (ret < 0)
+ goto out;
+
+ /* skip the transport header */
+ skb_pull(skb, nhc->nexthdrlen);
+
+out:
+ spin_unlock_bh(&lowpan_nhc_lock);
+
+ return ret;
+}
+
+int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
+ struct ipv6hdr *hdr)
+{
+ struct lowpan_nhc *nhc;
+ int ret;
+
+ spin_lock_bh(&lowpan_nhc_lock);
+
+ nhc = lowpan_nhc_by_nhcid(skb);
+ if (nhc) {
+ if (nhc->uncompress) {
+ ret = nhc->uncompress(skb, sizeof(struct ipv6hdr) +
+ nhc->nexthdrlen);
+ if (ret < 0) {
+ spin_unlock_bh(&lowpan_nhc_lock);
+ return ret;
+ }
+ } else {
+ spin_unlock_bh(&lowpan_nhc_lock);
+ netdev_warn(dev, "received nhc id for %s which is not implemented.\n",
+ nhc->name);
+ return -ENOTSUPP;
+ }
+ } else {
+ spin_unlock_bh(&lowpan_nhc_lock);
+ netdev_warn(dev, "received unknown nhc id which was not found.\n");
+ return -ENOENT;
+ }
+
+ hdr->nexthdr = nhc->nexthdr;
+ skb_reset_transport_header(skb);
+ raw_dump_table(__func__, "raw transport header dump",
+ skb_transport_header(skb), nhc->nexthdrlen);
+
+ spin_unlock_bh(&lowpan_nhc_lock);
+
+ return 0;
+}
+
+int lowpan_nhc_add(struct lowpan_nhc *nhc)
+{
+ int ret;
+
+ if (!nhc->idlen || !nhc->idsetup)
+ return -EINVAL;
+
+ WARN_ONCE(nhc->idlen > LOWPAN_NHC_MAX_ID_LEN,
+ "LOWPAN_NHC_MAX_ID_LEN should be updated to %zd.\n",
+ nhc->idlen);
+
+ nhc->idsetup(nhc);
+
+ spin_lock_bh(&lowpan_nhc_lock);
+
+ if (lowpan_nexthdr_nhcs[nhc->nexthdr]) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ret = lowpan_nhc_insert(nhc);
+ if (ret < 0)
+ goto out;
+
+ lowpan_nexthdr_nhcs[nhc->nexthdr] = nhc;
+out:
+ spin_unlock_bh(&lowpan_nhc_lock);
+ return ret;
+}
+EXPORT_SYMBOL(lowpan_nhc_add);
+
+void lowpan_nhc_del(struct lowpan_nhc *nhc)
+{
+ spin_lock_bh(&lowpan_nhc_lock);
+
+ lowpan_nhc_remove(nhc);
+ lowpan_nexthdr_nhcs[nhc->nexthdr] = NULL;
+
+ spin_unlock_bh(&lowpan_nhc_lock);
+
+ synchronize_net();
+}
+EXPORT_SYMBOL(lowpan_nhc_del);
diff --git a/net/6lowpan/nhc.h b/net/6lowpan/nhc.h
new file mode 100644
index 000000000000..ed44938eb5de
--- /dev/null
+++ b/net/6lowpan/nhc.h
@@ -0,0 +1,146 @@
+#ifndef __6LOWPAN_NHC_H
+#define __6LOWPAN_NHC_H
+
+#include <linux/skbuff.h>
+#include <linux/rbtree.h>
+#include <linux/module.h>
+
+#include <net/6lowpan.h>
+#include <net/ipv6.h>
+
+#define LOWPAN_NHC_MAX_ID_LEN 1
+
+/**
+ * LOWPAN_NHC - helper macro to generate nh id fields and lowpan_nhc struct
+ *
+ * @__nhc: variable name of the lowpan_nhc struct.
+ * @_name: const char * of common header compression name.
+ * @_nexthdr: ipv6 nexthdr field for the header compression.
+ * @_nexthdrlen: ipv6 nexthdr len for the reserved space.
+ * @_idsetup: callback to setup id and mask values.
+ * @_idlen: len for the next header id and mask, should be always the same.
+ * @_uncompress: callback for uncompression call.
+ * @_compress: callback for compression call.
+ */
+#define LOWPAN_NHC(__nhc, _name, _nexthdr, \
+ _hdrlen, _idsetup, _idlen, \
+ _uncompress, _compress) \
+static u8 __nhc##_val[_idlen]; \
+static u8 __nhc##_mask[_idlen]; \
+static struct lowpan_nhc __nhc = { \
+ .name = _name, \
+ .nexthdr = _nexthdr, \
+ .nexthdrlen = _hdrlen, \
+ .id = __nhc##_val, \
+ .idmask = __nhc##_mask, \
+ .idlen = _idlen, \
+ .idsetup = _idsetup, \
+ .uncompress = _uncompress, \
+ .compress = _compress, \
+}
+
+#define module_lowpan_nhc(__nhc) \
+static int __init __nhc##_init(void) \
+{ \
+ return lowpan_nhc_add(&(__nhc)); \
+} \
+module_init(__nhc##_init); \
+static void __exit __nhc##_exit(void) \
+{ \
+ lowpan_nhc_del(&(__nhc)); \
+} \
+module_exit(__nhc##_exit);
+
+/**
+ * struct lowpan_nhc - hold 6lowpan next hdr compression ifnformation
+ *
+ * @node: holder for the rbtree.
+ * @name: name of the specific next header compression
+ * @nexthdr: next header value of the protocol which should be compressed.
+ * @nexthdrlen: ipv6 nexthdr len for the reserved space.
+ * @id: array for nhc id. Note this need to be in network byteorder.
+ * @mask: array for nhc id mask. Note this need to be in network byteorder.
+ * @len: the length of the next header id and mask.
+ * @setup: callback to setup fill the next header id value and mask.
+ * @compress: callback to do the header compression.
+ * @uncompress: callback to do the header uncompression.
+ */
+struct lowpan_nhc {
+ struct rb_node node;
+ const char *name;
+ const u8 nexthdr;
+ const size_t nexthdrlen;
+ u8 *id;
+ u8 *idmask;
+ const size_t idlen;
+
+ void (*idsetup)(struct lowpan_nhc *nhc);
+ int (*uncompress)(struct sk_buff *skb, size_t needed);
+ int (*compress)(struct sk_buff *skb, u8 **hc_ptr);
+};
+
+/**
+ * lowpan_nhc_by_nexthdr - return the 6lowpan nhc by ipv6 nexthdr.
+ *
+ * @nexthdr: ipv6 nexthdr value.
+ */
+struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr);
+
+/**
+ * lowpan_nhc_check_compression - checks if we support compression format. If
+ * we support the nhc by nexthdr field, the 6LoWPAN iphc NHC bit will be
+ * set. If we don't support nexthdr will be added as inline data to the
+ * 6LoWPAN header.
+ *
+ * @skb: skb of 6LoWPAN header to read nhc and replace header.
+ * @hdr: ipv6hdr to check the nexthdr value
+ * @hc_ptr: pointer for 6LoWPAN header which should increment at the end of
+ * replaced header.
+ * @iphc0: iphc0 pointer to set the 6LoWPAN NHC bit
+ */
+int lowpan_nhc_check_compression(struct sk_buff *skb,
+ const struct ipv6hdr *hdr, u8 **hc_ptr,
+ u8 *iphc0);
+
+/**
+ * lowpan_nhc_do_compression - calling compress callback for nhc
+ *
+ * @skb: skb of 6LoWPAN header to read nhc and replace header.
+ * @hdr: ipv6hdr to set the nexthdr value
+ * @hc_ptr: pointer for 6LoWPAN header which should increment at the end of
+ * replaced header.
+ */
+int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
+ u8 **hc_ptr);
+
+/**
+ * lowpan_nhc_do_uncompression - calling uncompress callback for nhc
+ *
+ * @nhc: 6LoWPAN nhc context, get by lowpan_nhc_by_ functions.
+ * @skb: skb of 6LoWPAN header, skb->data should be pointed to nhc id value.
+ * @dev: netdevice for print logging information.
+ * @hdr: ipv6hdr for setting nexthdr value.
+ */
+int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
+ struct ipv6hdr *hdr);
+
+/**
+ * lowpan_nhc_add - register a next header compression to framework
+ *
+ * @nhc: nhc which should be add.
+ */
+int lowpan_nhc_add(struct lowpan_nhc *nhc);
+
+/**
+ * lowpan_nhc_del - delete a next header compression from framework
+ *
+ * @nhc: nhc which should be delete.
+ */
+void lowpan_nhc_del(struct lowpan_nhc *nhc);
+
+/**
+ * lowpan_nhc_init - adding all default nhcs
+ */
+void lowpan_nhc_init(void);
+
+#endif /* __6LOWPAN_NHC_H */
diff --git a/net/6lowpan/nhc_dest.c b/net/6lowpan/nhc_dest.c
new file mode 100644
index 000000000000..0b292c9646eb
--- /dev/null
+++ b/net/6lowpan/nhc_dest.c
@@ -0,0 +1,28 @@
+/*
+ * 6LoWPAN IPv6 Destination Options Header compression according to
+ * RFC6282
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_DEST_IDLEN 1
+#define LOWPAN_NHC_DEST_ID_0 0xe6
+#define LOWPAN_NHC_DEST_MASK_0 0xfe
+
+static void dest_nhid_setup(struct lowpan_nhc *nhc)
+{
+ nhc->id[0] = LOWPAN_NHC_DEST_ID_0;
+ nhc->idmask[0] = LOWPAN_NHC_DEST_MASK_0;
+}
+
+LOWPAN_NHC(nhc_dest, "RFC6282 Destination Options", NEXTHDR_DEST, 0,
+ dest_nhid_setup, LOWPAN_NHC_DEST_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_dest);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Destination Options compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_fragment.c b/net/6lowpan/nhc_fragment.c
new file mode 100644
index 000000000000..473dbc58ef84
--- /dev/null
+++ b/net/6lowpan/nhc_fragment.c
@@ -0,0 +1,27 @@
+/*
+ * 6LoWPAN IPv6 Fragment Header compression according to RFC6282
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_FRAGMENT_IDLEN 1
+#define LOWPAN_NHC_FRAGMENT_ID_0 0xe4
+#define LOWPAN_NHC_FRAGMENT_MASK_0 0xfe
+
+static void fragment_nhid_setup(struct lowpan_nhc *nhc)
+{
+ nhc->id[0] = LOWPAN_NHC_FRAGMENT_ID_0;
+ nhc->idmask[0] = LOWPAN_NHC_FRAGMENT_MASK_0;
+}
+
+LOWPAN_NHC(nhc_fragment, "RFC6282 Fragment", NEXTHDR_FRAGMENT, 0,
+ fragment_nhid_setup, LOWPAN_NHC_FRAGMENT_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_fragment);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Fragment compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_hop.c b/net/6lowpan/nhc_hop.c
new file mode 100644
index 000000000000..1eb66be16f19
--- /dev/null
+++ b/net/6lowpan/nhc_hop.c
@@ -0,0 +1,27 @@
+/*
+ * 6LoWPAN IPv6 Hop-by-Hop Options Header compression according to RFC6282
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_HOP_IDLEN 1
+#define LOWPAN_NHC_HOP_ID_0 0xe0
+#define LOWPAN_NHC_HOP_MASK_0 0xfe
+
+static void hop_nhid_setup(struct lowpan_nhc *nhc)
+{
+ nhc->id[0] = LOWPAN_NHC_HOP_ID_0;
+ nhc->idmask[0] = LOWPAN_NHC_HOP_MASK_0;
+}
+
+LOWPAN_NHC(nhc_hop, "RFC6282 Hop-by-Hop Options", NEXTHDR_HOP, 0,
+ hop_nhid_setup, LOWPAN_NHC_HOP_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_hop);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Hop-by-Hop Options compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_ipv6.c b/net/6lowpan/nhc_ipv6.c
new file mode 100644
index 000000000000..2313d1600af3
--- /dev/null
+++ b/net/6lowpan/nhc_ipv6.c
@@ -0,0 +1,27 @@
+/*
+ * 6LoWPAN IPv6 Header compression according to RFC6282
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_IPV6_IDLEN 1
+#define LOWPAN_NHC_IPV6_ID_0 0xee
+#define LOWPAN_NHC_IPV6_MASK_0 0xfe
+
+static void ipv6_nhid_setup(struct lowpan_nhc *nhc)
+{
+ nhc->id[0] = LOWPAN_NHC_IPV6_ID_0;
+ nhc->idmask[0] = LOWPAN_NHC_IPV6_MASK_0;
+}
+
+LOWPAN_NHC(nhc_ipv6, "RFC6282 IPv6", NEXTHDR_IPV6, 0, ipv6_nhid_setup,
+ LOWPAN_NHC_IPV6_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_ipv6);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 IPv6 compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_mobility.c b/net/6lowpan/nhc_mobility.c
new file mode 100644
index 000000000000..60d3f3886c98
--- /dev/null
+++ b/net/6lowpan/nhc_mobility.c
@@ -0,0 +1,27 @@
+/*
+ * 6LoWPAN IPv6 Mobility Header compression according to RFC6282
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_MOBILITY_IDLEN 1
+#define LOWPAN_NHC_MOBILITY_ID_0 0xe8
+#define LOWPAN_NHC_MOBILITY_MASK_0 0xfe
+
+static void mobility_nhid_setup(struct lowpan_nhc *nhc)
+{
+ nhc->id[0] = LOWPAN_NHC_MOBILITY_ID_0;
+ nhc->idmask[0] = LOWPAN_NHC_MOBILITY_MASK_0;
+}
+
+LOWPAN_NHC(nhc_mobility, "RFC6282 Mobility", NEXTHDR_MOBILITY, 0,
+ mobility_nhid_setup, LOWPAN_NHC_MOBILITY_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_mobility);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Mobility compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_routing.c b/net/6lowpan/nhc_routing.c
new file mode 100644
index 000000000000..c393280f11c4
--- /dev/null
+++ b/net/6lowpan/nhc_routing.c
@@ -0,0 +1,27 @@
+/*
+ * 6LoWPAN IPv6 Routing Header compression according to RFC6282
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_ROUTING_IDLEN 1
+#define LOWPAN_NHC_ROUTING_ID_0 0xe2
+#define LOWPAN_NHC_ROUTING_MASK_0 0xfe
+
+static void routing_nhid_setup(struct lowpan_nhc *nhc)
+{
+ nhc->id[0] = LOWPAN_NHC_ROUTING_ID_0;
+ nhc->idmask[0] = LOWPAN_NHC_ROUTING_MASK_0;
+}
+
+LOWPAN_NHC(nhc_routing, "RFC6282 Routing", NEXTHDR_ROUTING, 0,
+ routing_nhid_setup, LOWPAN_NHC_ROUTING_IDLEN, NULL, NULL);
+
+module_lowpan_nhc(nhc_routing);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 Routing compression");
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/nhc_udp.c b/net/6lowpan/nhc_udp.c
new file mode 100644
index 000000000000..c6bcaeb428ae
--- /dev/null
+++ b/net/6lowpan/nhc_udp.c
@@ -0,0 +1,157 @@
+/*
+ * 6LoWPAN IPv6 UDP compression according to RFC6282
+ *
+ *
+ * Authors:
+ * Alexander Aring <aar@pengutronix.de>
+ *
+ * Orignal written by:
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ * Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "nhc.h"
+
+#define LOWPAN_NHC_UDP_IDLEN 1
+
+static int udp_uncompress(struct sk_buff *skb, size_t needed)
+{
+ u8 tmp = 0, val = 0;
+ struct udphdr uh;
+ bool fail;
+ int err;
+
+ fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
+
+ pr_debug("UDP header uncompression\n");
+ switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
+ case LOWPAN_NHC_UDP_CS_P_00:
+ fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
+ fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
+ break;
+ case LOWPAN_NHC_UDP_CS_P_01:
+ fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
+ fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
+ uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_10:
+ fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
+ uh.source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
+ fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
+ break;
+ case LOWPAN_NHC_UDP_CS_P_11:
+ fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
+ uh.source = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val >> 4));
+ uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f));
+ break;
+ default:
+ BUG();
+ }
+
+ pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
+ ntohs(uh.source), ntohs(uh.dest));
+
+ /* checksum */
+ if (tmp & LOWPAN_NHC_UDP_CS_C) {
+ pr_debug_ratelimited("checksum elided currently not supported\n");
+ fail = true;
+ } else {
+ fail |= lowpan_fetch_skb(skb, &uh.check, sizeof(uh.check));
+ }
+
+ if (fail)
+ return -EINVAL;
+
+ /* UDP length needs to be infered from the lower layers
+ * here, we obtain the hint from the remaining size of the
+ * frame
+ */
+ uh.len = htons(skb->len + sizeof(struct udphdr));
+ pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len));
+
+ /* replace the compressed UDP head by the uncompressed UDP
+ * header
+ */
+ err = skb_cow(skb, needed);
+ if (unlikely(err))
+ return err;
+
+ skb_push(skb, sizeof(struct udphdr));
+ skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
+
+ return 0;
+}
+
+static int udp_compress(struct sk_buff *skb, u8 **hc_ptr)
+{
+ const struct udphdr *uh = udp_hdr(skb);
+ u8 tmp;
+
+ if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) ==
+ LOWPAN_NHC_UDP_4BIT_PORT) &&
+ ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) ==
+ LOWPAN_NHC_UDP_4BIT_PORT)) {
+ pr_debug("UDP header: both ports compression to 4 bits\n");
+ /* compression value */
+ tmp = LOWPAN_NHC_UDP_CS_P_11;
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+ /* source and destination port */
+ tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT +
+ ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4);
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+ } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) ==
+ LOWPAN_NHC_UDP_8BIT_PORT) {
+ pr_debug("UDP header: remove 8 bits of dest\n");
+ /* compression value */
+ tmp = LOWPAN_NHC_UDP_CS_P_01;
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+ /* source port */
+ lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
+ /* destination port */
+ tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT;
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+ } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) ==
+ LOWPAN_NHC_UDP_8BIT_PORT) {
+ pr_debug("UDP header: remove 8 bits of source\n");
+ /* compression value */
+ tmp = LOWPAN_NHC_UDP_CS_P_10;
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+ /* source port */
+ tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT;
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+ /* destination port */
+ lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
+ } else {
+ pr_debug("UDP header: can't compress\n");
+ /* compression value */
+ tmp = LOWPAN_NHC_UDP_CS_P_00;
+ lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
+ /* source port */
+ lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
+ /* destination port */
+ lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
+ }
+
+ /* checksum is always inline */
+ lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check));
+
+ return 0;
+}
+
+static void udp_nhid_setup(struct lowpan_nhc *nhc)
+{
+ nhc->id[0] = LOWPAN_NHC_UDP_ID;
+ nhc->idmask[0] = LOWPAN_NHC_UDP_MASK;
+}
+
+LOWPAN_NHC(nhc_udp, "RFC6282 UDP", NEXTHDR_UDP, sizeof(struct udphdr),
+ udp_nhid_setup, LOWPAN_NHC_UDP_IDLEN, udp_uncompress, udp_compress);
+
+module_lowpan_nhc(nhc_udp);
+MODULE_DESCRIPTION("6LoWPAN next header RFC6282 UDP compression");
+MODULE_LICENSE("GPL");
diff --git a/net/802/fc.c b/net/802/fc.c
index 7c174b6750cd..7b9219022418 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -75,29 +75,8 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev,
return -hdr_len;
}
-/*
- * A neighbour discovery of some species (eg arp) has completed. We
- * can now send the packet.
- */
-
-static int fc_rebuild_header(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
- struct fch_hdr *fch=(struct fch_hdr *)skb->data;
- struct fcllc *fcllc=(struct fcllc *)(skb->data+sizeof(struct fch_hdr));
- if(fcllc->ethertype != htons(ETH_P_IP)) {
- printk("fc_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(fcllc->ethertype));
- return 0;
- }
- return arp_find(fch->daddr, skb);
-#else
- return 0;
-#endif
-}
-
static const struct header_ops fc_header_ops = {
.create = fc_header,
- .rebuild = fc_rebuild_header,
};
static void fc_setup(struct net_device *dev)
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 59e7346f1193..7d3a0af954e8 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -87,31 +87,6 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev,
return -hl;
}
-
-/*
- * Rebuild the FDDI MAC header. This is called after an ARP
- * (or in future other address resolution) has completed on
- * this sk_buff. We now let ARP fill in the other fields.
- */
-
-static int fddi_rebuild_header(struct sk_buff *skb)
-{
- struct fddihdr *fddi = (struct fddihdr *)skb->data;
-
-#ifdef CONFIG_INET
- if (fddi->hdr.llc_snap.ethertype == htons(ETH_P_IP))
- /* Try to get ARP to resolve the header and fill destination address */
- return arp_find(fddi->daddr, skb);
- else
-#endif
- {
- printk("%s: Don't know how to resolve type %04X addresses.\n",
- skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype));
- return 0;
- }
-}
-
-
/*
* Determine the packet's protocol ID and fill in skb fields.
* This routine is called before an incoming packet is passed
@@ -177,7 +152,6 @@ EXPORT_SYMBOL(fddi_change_mtu);
static const struct header_ops fddi_header_ops = {
.create = fddi_header,
- .rebuild = fddi_rebuild_header,
};
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 2e03f8259dd5..ade1a52cdcff 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -91,33 +91,6 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev,
/*
- * Rebuild the HIPPI MAC header. This is called after an ARP has
- * completed on this sk_buff. We now let ARP fill in the other fields.
- */
-
-static int hippi_rebuild_header(struct sk_buff *skb)
-{
- struct hippi_hdr *hip = (struct hippi_hdr *)skb->data;
-
- /*
- * Only IP is currently supported
- */
-
- if(hip->snap.ethertype != htons(ETH_P_IP))
- {
- printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype));
- return 0;
- }
-
- /*
- * We don't support dynamic ARP on HIPPI, but we use the ARP
- * static ARP tables to hold the I-FIELDs.
- */
- return arp_find(hip->le.daddr, skb);
-}
-
-
-/*
* Determine the packet's protocol ID.
*/
@@ -186,7 +159,6 @@ EXPORT_SYMBOL(hippi_neigh_setup_dev);
static const struct header_ops hippi_header_ops = {
.create = hippi_header,
- .rebuild = hippi_rebuild_header,
};
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 64c6bed4a3d3..98a30a5b8664 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -413,7 +413,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
vlan_transfer_features(dev, vlandev);
break;
- case NETDEV_DOWN:
+ case NETDEV_DOWN: {
+ struct net_device *tmp;
+ LIST_HEAD(close_list);
+
if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
@@ -425,11 +428,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
vlan = vlan_dev_priv(vlandev);
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
- dev_change_flags(vlandev, flgs & ~IFF_UP);
+ list_add(&vlandev->close_list, &close_list);
+ }
+
+ dev_close_many(&close_list, false);
+
+ list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
netif_stacked_transfer_operstate(dev, vlandev);
+ list_del_init(&vlandev->close_list);
}
+ list_del(&close_list);
break;
-
+ }
case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */
vlan_group_for_each_dev(grp, i, vlandev) {
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 118956448cf6..01d7ba840df8 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -37,39 +37,6 @@
#include <linux/netpoll.h>
/*
- * Rebuild the Ethernet MAC header. This is called after an ARP
- * (or in future other address resolution) has completed on this
- * sk_buff. We now let ARP fill in the other fields.
- *
- * This routine CANNOT use cached dst->neigh!
- * Really, it is used only when dst->neigh is wrong.
- *
- * TODO: This needs a checkup, I'm ignorant here. --BLG
- */
-static int vlan_dev_rebuild_header(struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
-
- switch (veth->h_vlan_encapsulated_proto) {
-#ifdef CONFIG_INET
- case htons(ETH_P_IP):
-
- /* TODO: Confirm this will work with VLAN headers... */
- return arp_find(veth->h_dest, skb);
-#endif
- default:
- pr_debug("%s: unable to resolve type %X addresses\n",
- dev->name, ntohs(veth->h_vlan_encapsulated_proto));
-
- ether_addr_copy(veth->h_source, dev->dev_addr);
- break;
- }
-
- return 0;
-}
-
-/*
* Create the VLAN header for an arbitrary protocol layer
*
* saddr=NULL means use device source address
@@ -534,7 +501,6 @@ static int vlan_dev_get_lock_subclass(struct net_device *dev)
static const struct header_ops vlan_header_ops = {
.create = vlan_dev_hard_header,
- .rebuild = vlan_dev_rebuild_header,
.parse = eth_header_parse,
};
@@ -554,7 +520,6 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
static const struct header_ops vlan_passthru_header_ops = {
.create = vlan_passthru_hard_header,
- .rebuild = dev_rebuild_header,
.parse = eth_header_parse,
};
@@ -573,7 +538,6 @@ static int vlan_dev_init(struct net_device *dev)
/* IFF_BROADCAST|IFF_MULTICAST; ??? */
dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
IFF_MASTER | IFF_SLAVE);
- dev->iflink = real_dev->ifindex;
dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
(1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT);
@@ -589,6 +553,7 @@ static int vlan_dev_init(struct net_device *dev)
if (dev->features & NETIF_F_VLAN_FEATURES)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
+ dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
@@ -767,6 +732,13 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
+static int vlan_dev_get_iflink(const struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+ return real_dev->ifindex;
+}
+
static const struct ethtool_ops vlan_ethtool_ops = {
.get_settings = vlan_ethtool_get_settings,
.get_drvinfo = vlan_ethtool_get_drvinfo,
@@ -803,6 +775,7 @@ static const struct net_device_ops vlan_netdev_ops = {
#endif
.ndo_fix_features = vlan_dev_fix_features,
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
+ .ndo_get_iflink = vlan_dev_get_iflink,
};
static void vlan_dev_free(struct net_device *dev)
@@ -827,5 +800,5 @@ void vlan_setup(struct net_device *dev)
dev->destructor = vlan_dev_free;
dev->ethtool_ops = &vlan_ethtool_ops;
- memset(dev->broadcast, 0, ETH_ALEN);
+ eth_zero_addr(dev->broadcast);
}
diff --git a/net/9p/client.c b/net/9p/client.c
index e86a9bea1d16..6f4c4c88db84 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <net/9p/9p.h>
#include <linux/parser.h>
#include <net/9p/client.h>
@@ -555,7 +556,7 @@ out_err:
*/
static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
- char *uidata, int in_hdrlen, int kern_buf)
+ struct iov_iter *uidata, int in_hdrlen)
{
int err;
int ecode;
@@ -591,16 +592,11 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
ename = &req->rc->sdata[req->rc->offset];
if (len > inline_len) {
/* We have error in external buffer */
- if (kern_buf) {
- memcpy(ename + inline_len, uidata,
- len - inline_len);
- } else {
- err = copy_from_user(ename + inline_len,
- uidata, len - inline_len);
- if (err) {
- err = -EFAULT;
- goto out_err;
- }
+ err = copy_from_iter(ename + inline_len,
+ len - inline_len, uidata);
+ if (err != len - inline_len) {
+ err = -EFAULT;
+ goto out_err;
}
}
ename = NULL;
@@ -806,8 +802,8 @@ reterr:
* p9_client_zc_rpc - issue a request and wait for a response
* @c: client session
* @type: type of request
- * @uidata: user bffer that should be ued for zero copy read
- * @uodata: user buffer that shoud be user for zero copy write
+ * @uidata: destination for zero copy read
+ * @uodata: source for zero copy write
* @inlen: read buffer size
* @olen: write buffer size
* @hdrlen: reader header size, This is the size of response protocol data
@@ -816,9 +812,10 @@ reterr:
* Returns request structure (which client must free using p9_free_req)
*/
static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
- char *uidata, char *uodata,
+ struct iov_iter *uidata,
+ struct iov_iter *uodata,
int inlen, int olen, int in_hdrlen,
- int kern_buf, const char *fmt, ...)
+ const char *fmt, ...)
{
va_list ap;
int sigpending, err;
@@ -841,12 +838,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
} else
sigpending = 0;
- /* If we are called with KERNEL_DS force kern_buf */
- if (segment_eq(get_fs(), KERNEL_DS))
- kern_buf = 1;
-
err = c->trans_mod->zc_request(c, req, uidata, uodata,
- inlen, olen, in_hdrlen, kern_buf);
+ inlen, olen, in_hdrlen);
if (err < 0) {
if (err == -EIO)
c->status = Disconnected;
@@ -876,7 +869,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
if (err < 0)
goto reterr;
- err = p9_check_zc_errors(c, req, uidata, in_hdrlen, kern_buf);
+ err = p9_check_zc_errors(c, req, uidata, in_hdrlen);
trace_9p_client_res(c, type, req->rc->tag, err);
if (!err)
return req;
@@ -1123,6 +1116,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
fid = NULL;
goto error;
}
+ fid->uid = n_uname;
req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid,
afid ? afid->fid : P9_NOFID, uname, aname, n_uname);
@@ -1541,142 +1535,128 @@ error:
EXPORT_SYMBOL(p9_client_unlinkat);
int
-p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
- u32 count)
+p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
{
- char *dataptr;
- int kernel_buf = 0;
+ struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
- struct p9_client *clnt;
- int err, rsize, non_zc = 0;
-
+ int total = 0;
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
- fid->fid, (unsigned long long) offset, count);
- err = 0;
- clnt = fid->clnt;
-
- rsize = fid->iounit;
- if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
- rsize = clnt->msize - P9_IOHDRSZ;
-
- if (count < rsize)
- rsize = count;
-
- /* Don't bother zerocopy for small IO (< 1024) */
- if (clnt->trans_mod->zc_request && rsize > 1024) {
- char *indata;
- if (data) {
- kernel_buf = 1;
- indata = data;
- } else
- indata = (__force char *)udata;
- /*
- * response header len is 11
- * PDU Header(7) + IO Size (4)
- */
- req = p9_client_zc_rpc(clnt, P9_TREAD, indata, NULL, rsize, 0,
- 11, kernel_buf, "dqd", fid->fid,
- offset, rsize);
- } else {
- non_zc = 1;
- req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
- rsize);
- }
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto error;
- }
+ fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
+
+ while (iov_iter_count(to)) {
+ int count = iov_iter_count(to);
+ int rsize, non_zc = 0;
+ char *dataptr;
+
+ rsize = fid->iounit;
+ if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
+ rsize = clnt->msize - P9_IOHDRSZ;
+
+ if (count < rsize)
+ rsize = count;
+
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ /*
+ * response header len is 11
+ * PDU Header(7) + IO Size (4)
+ */
+ req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize,
+ 0, 11, "dqd", fid->fid,
+ offset, rsize);
+ } else {
+ non_zc = 1;
+ req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
+ rsize);
+ }
+ if (IS_ERR(req)) {
+ *err = PTR_ERR(req);
+ break;
+ }
- err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
- if (err) {
- trace_9p_protocol_dump(clnt, req->rc);
- goto free_and_error;
- }
+ *err = p9pdu_readf(req->rc, clnt->proto_version,
+ "D", &count, &dataptr);
+ if (*err) {
+ trace_9p_protocol_dump(clnt, req->rc);
+ p9_free_req(clnt, req);
+ break;
+ }
- p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+ if (!count) {
+ p9_free_req(clnt, req);
+ break;
+ }
- if (non_zc) {
- if (data) {
- memmove(data, dataptr, count);
- } else {
- err = copy_to_user(udata, dataptr, count);
- if (err) {
- err = -EFAULT;
- goto free_and_error;
+ if (non_zc) {
+ int n = copy_to_iter(dataptr, count, to);
+ total += n;
+ offset += n;
+ if (n != count) {
+ *err = -EFAULT;
+ p9_free_req(clnt, req);
+ break;
}
+ } else {
+ iov_iter_advance(to, count);
+ total += count;
+ offset += count;
}
+ p9_free_req(clnt, req);
}
- p9_free_req(clnt, req);
- return count;
-
-free_and_error:
- p9_free_req(clnt, req);
-error:
- return err;
+ return total;
}
EXPORT_SYMBOL(p9_client_read);
int
-p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
- u64 offset, u32 count)
+p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
{
- int err, rsize;
- int kernel_buf = 0;
- struct p9_client *clnt;
+ struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
+ int total = 0;
+
+ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
+ fid->fid, (unsigned long long) offset,
+ iov_iter_count(from));
+
+ while (iov_iter_count(from)) {
+ int count = iov_iter_count(from);
+ int rsize = fid->iounit;
+ if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
+ rsize = clnt->msize - P9_IOHDRSZ;
+
+ if (count < rsize)
+ rsize = count;
+
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0,
+ rsize, P9_ZC_HDR_SZ, "dqd",
+ fid->fid, offset, rsize);
+ } else {
+ req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid,
+ offset, rsize, from);
+ }
+ if (IS_ERR(req)) {
+ *err = PTR_ERR(req);
+ break;
+ }
- p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
- fid->fid, (unsigned long long) offset, count);
- err = 0;
- clnt = fid->clnt;
-
- rsize = fid->iounit;
- if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
- rsize = clnt->msize - P9_IOHDRSZ;
+ *err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
+ if (*err) {
+ trace_9p_protocol_dump(clnt, req->rc);
+ p9_free_req(clnt, req);
+ }
- if (count < rsize)
- rsize = count;
+ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
- /* Don't bother zerocopy for small IO (< 1024) */
- if (clnt->trans_mod->zc_request && rsize > 1024) {
- char *odata;
- if (data) {
- kernel_buf = 1;
- odata = data;
- } else
- odata = (char *)udata;
- req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
- P9_ZC_HDR_SZ, kernel_buf, "dqd",
- fid->fid, offset, rsize);
- } else {
- if (data)
- req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
- offset, rsize, data);
- else
- req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
- offset, rsize, udata);
- }
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto error;
- }
-
- err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
- if (err) {
- trace_9p_protocol_dump(clnt, req->rc);
- goto free_and_error;
+ p9_free_req(clnt, req);
+ iov_iter_advance(from, count);
+ total += count;
+ offset += count;
}
-
- p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
-
- p9_free_req(clnt, req);
- return count;
-
-free_and_error:
- p9_free_req(clnt, req);
-error:
- return err;
+ return total;
}
EXPORT_SYMBOL(p9_client_write);
@@ -2068,6 +2048,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
struct p9_client *clnt;
struct p9_req_t *req;
char *dataptr;
+ struct kvec kv = {.iov_base = data, .iov_len = count};
+ struct iov_iter to;
+
+ iov_iter_kvec(&to, READ | ITER_KVEC, &kv, 1, count);
p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
fid->fid, (unsigned long long) offset, count);
@@ -2088,8 +2072,8 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
* response header len is 11
* PDU Header(7) + IO Size (4)
*/
- req = p9_client_zc_rpc(clnt, P9_TREADDIR, data, NULL, rsize, 0,
- 11, 1, "dqd", fid->fid, offset, rsize);
+ req = p9_client_zc_rpc(clnt, P9_TREADDIR, &to, NULL, rsize, 0,
+ 11, "dqd", fid->fid, offset, rsize);
} else {
non_zc = 1;
req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index ab9127ec5b7a..16d287565987 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -33,6 +33,7 @@
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/types.h>
+#include <linux/uio.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include "protocol.h"
@@ -69,10 +70,11 @@ static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
}
static size_t
-pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
+pdu_write_u(struct p9_fcall *pdu, struct iov_iter *from, size_t size)
{
size_t len = min(pdu->capacity - pdu->size, size);
- if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
+ struct iov_iter i = *from;
+ if (copy_from_iter(&pdu->sdata[pdu->size], len, &i) != len)
len = 0;
pdu->size += len;
@@ -273,7 +275,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case 'R':{
- int16_t *nwqid = va_arg(ap, int16_t *);
+ uint16_t *nwqid = va_arg(ap, uint16_t *);
struct p9_qid **wqids =
va_arg(ap, struct p9_qid **);
@@ -437,23 +439,13 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
stbuf->extension, stbuf->n_uid,
stbuf->n_gid, stbuf->n_muid);
} break;
- case 'D':{
+ case 'V':{
uint32_t count = va_arg(ap, uint32_t);
- const void *data = va_arg(ap, const void *);
-
- errcode = p9pdu_writef(pdu, proto_version, "d",
- count);
- if (!errcode && pdu_write(pdu, data, count))
- errcode = -EFAULT;
- }
- break;
- case 'U':{
- int32_t count = va_arg(ap, int32_t);
- const char __user *udata =
- va_arg(ap, const void __user *);
+ struct iov_iter *from =
+ va_arg(ap, struct iov_iter *);
errcode = p9pdu_writef(pdu, proto_version, "d",
count);
- if (!errcode && pdu_write_u(pdu, udata, count))
+ if (!errcode && pdu_write_u(pdu, from, count))
errcode = -EFAULT;
}
break;
@@ -479,7 +471,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case 'R':{
- int16_t nwqid = va_arg(ap, int);
+ uint16_t nwqid = va_arg(ap, int);
struct p9_qid *wqids =
va_arg(ap, struct p9_qid *);
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 2ee3879161b1..38aa6345bdfa 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -12,12 +12,8 @@
*
*/
-#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <net/9p/9p.h>
-#include <net/9p/client.h>
-#include <linux/scatterlist.h>
-#include "trans_common.h"
/**
* p9_release_req_pages - Release pages after the transaction.
@@ -31,39 +27,3 @@ void p9_release_pages(struct page **pages, int nr_pages)
put_page(pages[i]);
}
EXPORT_SYMBOL(p9_release_pages);
-
-/**
- * p9_nr_pages - Return number of pages needed to accommodate the payload.
- */
-int p9_nr_pages(char *data, int len)
-{
- unsigned long start_page, end_page;
- start_page = (unsigned long)data >> PAGE_SHIFT;
- end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- return end_page - start_page;
-}
-EXPORT_SYMBOL(p9_nr_pages);
-
-/**
- * payload_gup - Translates user buffer into kernel pages and
- * pins them either for read/write through get_user_pages_fast().
- * @req: Request to be sent to server.
- * @pdata_off: data offset into the first page after translation (gup).
- * @pdata_len: Total length of the IO. gup may not return requested # of pages.
- * @nr_pages: number of pages to accommodate the payload
- * @rw: Indicates if the pages are for read or write.
- */
-
-int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
-{
- int nr_mapped_pages;
-
- nr_mapped_pages = get_user_pages_fast((unsigned long)data,
- *nr_pages, write, pages);
- if (nr_mapped_pages <= 0)
- return nr_mapped_pages;
-
- *nr_pages = nr_mapped_pages;
- return 0;
-}
-EXPORT_SYMBOL(p9_payload_gup);
diff --git a/net/9p/trans_common.h b/net/9p/trans_common.h
index 173bb550a9eb..c43babb3f635 100644
--- a/net/9p/trans_common.h
+++ b/net/9p/trans_common.h
@@ -13,5 +13,3 @@
*/
void p9_release_pages(struct page **, int);
-int p9_payload_gup(char *, int *, struct page **, int);
-int p9_nr_pages(char *, int);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 80d08f6664cb..bced8c074c12 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -734,6 +734,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
opts->port = P9_PORT;
opts->rfd = ~0;
opts->wfd = ~0;
+ opts->privport = 0;
if (!params)
return 0;
@@ -940,7 +941,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
sin_server.sin_family = AF_INET;
sin_server.sin_addr.s_addr = in_aton(addr);
sin_server.sin_port = htons(opts.port);
- err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
+ err = __sock_create(current->nsproxy->net_ns, PF_INET,
SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
if (err) {
pr_err("%s (%d): problem creating socket\n",
@@ -988,7 +989,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
sun_server.sun_family = PF_UNIX;
strcpy(sun_server.sun_path, addr);
- err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
+ err = __sock_create(current->nsproxy->net_ns, PF_UNIX,
SOCK_STREAM, 0, &csocket, 1);
if (err < 0) {
pr_err("%s (%d): problem creating socket\n",
@@ -1013,7 +1014,6 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
{
int err;
struct p9_fd_opts opts;
- struct p9_trans_fd *p;
parse_opts(args, &opts);
@@ -1026,7 +1026,6 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
if (err < 0)
return err;
- p = (struct p9_trans_fd *) client->trans;
p9_conn_create(client);
return 0;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 14ad43b5cf89..3533d2a53ab6 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -139,6 +139,7 @@ struct p9_rdma_opts {
int sq_depth;
int rq_depth;
long timeout;
+ int privport;
};
/*
@@ -146,7 +147,10 @@ struct p9_rdma_opts {
*/
enum {
/* Options that take integer arguments */
- Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, Opt_err,
+ Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout,
+ /* Options that take no argument */
+ Opt_privport,
+ Opt_err,
};
static match_table_t tokens = {
@@ -154,6 +158,7 @@ static match_table_t tokens = {
{Opt_sq_depth, "sq=%u"},
{Opt_rq_depth, "rq=%u"},
{Opt_timeout, "timeout=%u"},
+ {Opt_privport, "privport"},
{Opt_err, NULL},
};
@@ -175,6 +180,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
opts->sq_depth = P9_RDMA_SQ_DEPTH;
opts->rq_depth = P9_RDMA_RQ_DEPTH;
opts->timeout = P9_RDMA_TIMEOUT;
+ opts->privport = 0;
if (!params)
return 0;
@@ -193,13 +199,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
if (!*p)
continue;
token = match_token(p, tokens, args);
- if (token == Opt_err)
- continue;
- r = match_int(&args[0], &option);
- if (r < 0) {
- p9_debug(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- continue;
+ if ((token != Opt_err) && (token != Opt_privport)) {
+ r = match_int(&args[0], &option);
+ if (r < 0) {
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
+ continue;
+ }
}
switch (token) {
case Opt_port:
@@ -214,6 +220,9 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
case Opt_timeout:
opts->timeout = option;
break;
+ case Opt_privport:
+ opts->privport = 1;
+ break;
default:
continue;
}
@@ -607,6 +616,23 @@ static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
return 0;
}
+static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma)
+{
+ struct sockaddr_in cl = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ };
+ int port, err = -EINVAL;
+
+ for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) {
+ cl.sin_port = htons((ushort)port);
+ err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl);
+ if (err != -EADDRINUSE)
+ break;
+ }
+ return err;
+}
+
/**
* trans_create_rdma - Transport method for creating atransport instance
* @client: client instance
@@ -642,6 +668,16 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
/* Associate the client with the transport */
client->trans = rdma;
+ /* Bind to a privileged port if we need to */
+ if (opts.privport) {
+ err = p9_rdma_bind_privport(rdma);
+ if (err < 0) {
+ pr_err("%s (%d): problem binding to privport: %d\n",
+ __func__, task_pid_nr(current), -err);
+ goto error;
+ }
+ }
+
/* Resolve the server's address */
rdma->addr.sin_family = AF_INET;
rdma->addr.sin_addr.s_addr = in_aton(addr);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 36a1a739ad68..9dd49ca67dbc 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -217,15 +217,15 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
* @start: which segment of the sg_list to start at
* @pdata: a list of pages to add into sg.
* @nr_pages: number of pages to pack into the scatter/gather list
- * @data: data to pack into scatter/gather list
+ * @offs: amount of data in the beginning of first page _not_ to pack
* @count: amount of data to pack into the scatter/gather list
*/
static int
pack_sg_list_p(struct scatterlist *sg, int start, int limit,
- struct page **pdata, int nr_pages, char *data, int count)
+ struct page **pdata, int nr_pages, size_t offs, int count)
{
int i = 0, s;
- int data_off;
+ int data_off = offs;
int index = start;
BUG_ON(nr_pages > (limit - start));
@@ -233,16 +233,14 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
* if the first page doesn't start at
* page boundary find the offset
*/
- data_off = offset_in_page(data);
while (nr_pages) {
- s = rest_of_page(data);
+ s = PAGE_SIZE - data_off;
if (s > count)
s = count;
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_page(&sg[index++], pdata[i++], s, data_off);
data_off = 0;
- data += s;
count -= s;
nr_pages--;
}
@@ -314,11 +312,20 @@ req_retry:
}
static int p9_get_mapped_pages(struct virtio_chan *chan,
- struct page **pages, char *data,
- int nr_pages, int write, int kern_buf)
+ struct page ***pages,
+ struct iov_iter *data,
+ int count,
+ size_t *offs,
+ int *need_drop)
{
+ int nr_pages;
int err;
- if (!kern_buf) {
+
+ if (!iov_iter_count(data))
+ return 0;
+
+ if (!(data->type & ITER_KVEC)) {
+ int n;
/*
* We allow only p9_max_pages pinned. We wait for the
* Other zc request to finish here
@@ -329,26 +336,49 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
if (err == -ERESTARTSYS)
return err;
}
- err = p9_payload_gup(data, &nr_pages, pages, write);
- if (err < 0)
- return err;
+ n = iov_iter_get_pages_alloc(data, pages, count, offs);
+ if (n < 0)
+ return n;
+ *need_drop = 1;
+ nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
atomic_add(nr_pages, &vp_pinned);
+ return n;
} else {
/* kernel buffer, no need to pin pages */
- int s, index = 0;
- int count = nr_pages;
- while (nr_pages) {
- s = rest_of_page(data);
- if (is_vmalloc_addr(data))
- pages[index++] = vmalloc_to_page(data);
+ int index;
+ size_t len;
+ void *p;
+
+ /* we'd already checked that it's non-empty */
+ while (1) {
+ len = iov_iter_single_seg_count(data);
+ if (likely(len)) {
+ p = data->kvec->iov_base + data->iov_offset;
+ break;
+ }
+ iov_iter_advance(data, 0);
+ }
+ if (len > count)
+ len = count;
+
+ nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
+ (unsigned long)p / PAGE_SIZE;
+
+ *pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+ if (!*pages)
+ return -ENOMEM;
+
+ *need_drop = 0;
+ p -= (*offs = (unsigned long)p % PAGE_SIZE);
+ for (index = 0; index < nr_pages; index++) {
+ if (is_vmalloc_addr(p))
+ (*pages)[index] = vmalloc_to_page(p);
else
- pages[index++] = kmap_to_page(data);
- data += s;
- nr_pages--;
+ (*pages)[index] = kmap_to_page(p);
+ p += PAGE_SIZE;
}
- nr_pages = count;
+ return len;
}
- return nr_pages;
}
/**
@@ -364,8 +394,8 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
*/
static int
p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
- char *uidata, char *uodata, int inlen,
- int outlen, int in_hdr_len, int kern_buf)
+ struct iov_iter *uidata, struct iov_iter *uodata,
+ int inlen, int outlen, int in_hdr_len)
{
int in, out, err, out_sgs, in_sgs;
unsigned long flags;
@@ -373,41 +403,32 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
struct page **in_pages = NULL, **out_pages = NULL;
struct virtio_chan *chan = client->trans;
struct scatterlist *sgs[4];
+ size_t offs;
+ int need_drop = 0;
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
if (uodata) {
- out_nr_pages = p9_nr_pages(uodata, outlen);
- out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
- GFP_NOFS);
- if (!out_pages) {
- err = -ENOMEM;
- goto err_out;
+ int n = p9_get_mapped_pages(chan, &out_pages, uodata,
+ outlen, &offs, &need_drop);
+ if (n < 0)
+ return n;
+ out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
+ if (n != outlen) {
+ __le32 v = cpu_to_le32(n);
+ memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
+ outlen = n;
}
- out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
- out_nr_pages, 0, kern_buf);
- if (out_nr_pages < 0) {
- err = out_nr_pages;
- kfree(out_pages);
- out_pages = NULL;
- goto err_out;
- }
- }
- if (uidata) {
- in_nr_pages = p9_nr_pages(uidata, inlen);
- in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
- GFP_NOFS);
- if (!in_pages) {
- err = -ENOMEM;
- goto err_out;
- }
- in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
- in_nr_pages, 1, kern_buf);
- if (in_nr_pages < 0) {
- err = in_nr_pages;
- kfree(in_pages);
- in_pages = NULL;
- goto err_out;
+ } else if (uidata) {
+ int n = p9_get_mapped_pages(chan, &in_pages, uidata,
+ inlen, &offs, &need_drop);
+ if (n < 0)
+ return n;
+ in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
+ if (n != inlen) {
+ __le32 v = cpu_to_le32(n);
+ memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
+ inlen = n;
}
}
req->status = REQ_STATUS_SENT;
@@ -426,7 +447,7 @@ req_retry_pinned:
if (out_pages) {
sgs[out_sgs++] = chan->sg + out;
out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
- out_pages, out_nr_pages, uodata, outlen);
+ out_pages, out_nr_pages, offs, outlen);
}
/*
@@ -444,7 +465,7 @@ req_retry_pinned:
if (in_pages) {
sgs[out_sgs + in_sgs++] = chan->sg + out + in;
in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
- in_pages, in_nr_pages, uidata, inlen);
+ in_pages, in_nr_pages, offs, inlen);
}
BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
@@ -478,7 +499,7 @@ req_retry_pinned:
* Non kernel buffers are pinned, unpin them
*/
err_out:
- if (!kern_buf) {
+ if (need_drop) {
if (in_pages) {
p9_release_pages(in_pages, in_nr_pages);
atomic_sub(in_nr_pages, &vp_pinned);
@@ -504,7 +525,10 @@ static ssize_t p9_mount_tag_show(struct device *dev,
vdev = dev_to_virtio(dev);
chan = vdev->priv;
- return snprintf(buf, chan->tag_len + 1, "%s", chan->tag);
+ memcpy(buf, chan->tag, chan->tag_len);
+ buf[chan->tag_len] = 0;
+
+ return chan->tag_len + 1;
}
static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
diff --git a/net/Makefile b/net/Makefile
index 38704bdf941a..3995613e5510 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -69,7 +69,7 @@ obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_OPENVSWITCH) += openvswitch/
obj-$(CONFIG_VSOCKETS) += vmw_vsock/
-obj-$(CONFIG_NET_MPLS_GSO) += mpls/
+obj-$(CONFIG_MPLS) += mpls/
obj-$(CONFIG_HSR) += hsr/
ifneq ($(CONFIG_NET_SWITCHDEV),)
obj-y += switchdev/
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index d1c55d8dd0a2..8ad3ec2610b6 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -141,7 +141,7 @@ static void __aarp_send_query(struct aarp_entry *a)
eah->pa_src_net = sat->s_net;
eah->pa_src_node = sat->s_node;
- memset(eah->hw_dst, '\0', ETH_ALEN);
+ eth_zero_addr(eah->hw_dst);
eah->pa_dst_zero = 0;
eah->pa_dst_net = a->target_addr.s_net;
@@ -189,7 +189,7 @@ static void aarp_send_reply(struct net_device *dev, struct atalk_addr *us,
eah->pa_src_node = us->s_node;
if (!sha)
- memset(eah->hw_dst, '\0', ETH_ALEN);
+ eth_zero_addr(eah->hw_dst);
else
ether_addr_copy(eah->hw_dst, sha);
@@ -239,7 +239,7 @@ static void aarp_send_probe(struct net_device *dev, struct atalk_addr *us)
eah->pa_src_net = us->s_net;
eah->pa_src_node = us->s_node;
- memset(eah->hw_dst, '\0', ETH_ALEN);
+ eth_zero_addr(eah->hw_dst);
eah->pa_dst_zero = 0;
eah->pa_dst_net = us->s_net;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 0d0766ea5ab1..3b7ad43c7dad 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1559,8 +1559,7 @@ freeit:
return 0;
}
-static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t len)
+static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
@@ -1728,8 +1727,8 @@ out:
return err ? : len;
}
-static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int flags)
+static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
struct ddpehdr *ddp;
diff --git a/net/atm/common.c b/net/atm/common.c
index b84057e41bd6..ed0466637e13 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -523,8 +523,8 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
return 0;
}
-int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int flags)
+int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
@@ -569,8 +569,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
return copied;
}
-int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
- size_t size)
+int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
{
struct sock *sk = sock->sk;
DEFINE_WAIT(wait);
diff --git a/net/atm/common.h b/net/atm/common.h
index cc3c2dae4d79..4d6f5b2068ac 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -13,10 +13,9 @@
int vcc_create(struct net *net, struct socket *sock, int protocol, int family);
int vcc_release(struct socket *sock);
int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
-int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int flags);
-int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
- size_t total_len);
+int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
+int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len);
unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 4b98f897044a..cd3b37989057 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -2001,7 +2001,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
if (entry == NULL)
goto out;
memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
- memset(entry->mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(entry->mac_addr);
entry->recv_vcc = vcc;
entry->old_recv_push = old_push;
entry->status = ESI_UNKNOWN;
@@ -2086,7 +2086,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
entry->vcc = vcc;
entry->old_push = old_push;
memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
- memset(entry->mac_addr, 0, ETH_ALEN);
+ eth_zero_addr(entry->mac_addr);
entry->status = ESI_UNKNOWN;
hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
entry->timer.expires = jiffies + priv->vcc_timeout_period;
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 5bdd300db0f7..2df34eb5d65f 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -272,7 +272,7 @@ static int parse_qos(const char *buff)
qos.rxtp.max_pcr = rx_pcr;
qos.rxtp.max_sdu = rx_sdu;
qos.aal = ATM_AAL5;
- dprintk("parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
+ dprintk("parse_qos(): setting qos parameters to tx=%d,%d rx=%d,%d\n",
qos.txtp.max_pcr, qos.txtp.max_sdu,
qos.rxtp.max_pcr, qos.rxtp.max_sdu);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 523bce72f698..4fd6af47383a 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -19,36 +19,15 @@
#include "resources.h"
#include "signaling.h"
-#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets
- should block until the demon runs.
- Danger: may cause nasty hangs if the demon
- crashes. */
-
struct atm_vcc *sigd = NULL;
-#ifdef WAIT_FOR_DEMON
-static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
-#endif
static void sigd_put_skb(struct sk_buff *skb)
{
-#ifdef WAIT_FOR_DEMON
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&sigd_sleep, &wait);
- while (!sigd) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- pr_debug("atmsvc: waiting for signaling daemon...\n");
- schedule();
- }
- current->state = TASK_RUNNING;
- remove_wait_queue(&sigd_sleep, &wait);
-#else
if (!sigd) {
pr_debug("atmsvc: no signaling daemon\n");
kfree_skb(skb);
return;
}
-#endif
atm_force_charge(sigd, skb->truesize);
skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
@@ -261,8 +240,5 @@ int sigd_attach(struct atm_vcc *vcc)
vcc_insert_socket(sk_atm(vcc));
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
-#ifdef WAIT_FOR_DEMON
- wake_up(&sigd_sleep);
-#endif
return 0;
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index ca049a7c9287..330c1f4a5a0b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1432,8 +1432,7 @@ out:
return err;
}
-static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
DECLARE_SOCKADDR(struct sockaddr_ax25 *, usax, msg->msg_name);
struct sock *sk = sock->sk;
@@ -1599,8 +1598,8 @@ out:
return err;
}
-static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 67de6b33f2c3..7c646bb2c6f7 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -46,9 +46,9 @@
#ifdef CONFIG_INET
-int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned int len)
+static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
{
unsigned char *buff;
@@ -100,7 +100,7 @@ int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
return -AX25_HEADER_LEN; /* Unfinished header */
}
-int ax25_rebuild_header(struct sk_buff *skb)
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
{
struct sk_buff *ourskb;
unsigned char *bp = skb->data;
@@ -115,9 +115,6 @@ int ax25_rebuild_header(struct sk_buff *skb)
dst = (ax25_address *)(bp + 1);
src = (ax25_address *)(bp + 8);
- if (arp_find(bp + 1, skb))
- return 1;
-
route = ax25_get_route(dst, NULL);
if (route) {
digipeat = route->digipeat;
@@ -129,6 +126,7 @@ int ax25_rebuild_header(struct sk_buff *skb)
dev = skb->dev;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
+ kfree_skb(skb);
goto put;
}
@@ -212,31 +210,29 @@ put:
if (route)
ax25_put_route(route);
- return 1;
+ return NETDEV_TX_OK;
}
#else /* INET */
-int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned int len)
+static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
{
return -AX25_HEADER_LEN;
}
-int ax25_rebuild_header(struct sk_buff *skb)
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
{
- return 1;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
-
#endif
const struct header_ops ax25_header_ops = {
.create = ax25_hard_header,
- .rebuild = ax25_rebuild_header,
};
-EXPORT_SYMBOL(ax25_hard_header);
-EXPORT_SYMBOL(ax25_rebuild_header);
EXPORT_SYMBOL(ax25_header_ops);
+EXPORT_SYMBOL(ax25_ip_xmit);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 27649e85f3f6..090828cf1fa7 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -592,15 +592,16 @@ static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
- ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
- (curr_gw == gw_node ? "=>" : " "),
- gw_node->orig_node->orig,
- router_ifinfo->bat_iv.tq_avg, router->addr,
- router->if_incoming->net_dev->name,
- gw_node->bandwidth_down / 10,
- gw_node->bandwidth_down % 10,
- gw_node->bandwidth_up / 10,
- gw_node->bandwidth_up % 10);
+ seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
+ (curr_gw == gw_node ? "=>" : " "),
+ gw_node->orig_node->orig,
+ router_ifinfo->bat_iv.tq_avg, router->addr,
+ router->if_incoming->net_dev->name,
+ gw_node->bandwidth_down / 10,
+ gw_node->bandwidth_down % 10,
+ gw_node->bandwidth_up / 10,
+ gw_node->bandwidth_up % 10);
+ ret = seq_has_overflowed(seq) ? -1 : 0;
if (curr_gw)
batadv_gw_node_free_ref(curr_gw);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index fbda6b54baff..baf1f9843f2c 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -83,11 +83,12 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
return true;
/* no more parents..stop recursion */
- if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
+ if (dev_get_iflink(net_dev) == 0 ||
+ dev_get_iflink(net_dev) == net_dev->ifindex)
return false;
/* recurse over the parent device */
- parent_dev = __dev_get_by_index(&init_net, net_dev->iflink);
+ parent_dev = __dev_get_by_index(&init_net, dev_get_iflink(net_dev));
/* if we got a NULL parent_dev there is something broken.. */
if (WARN(!parent_dev, "Cannot find parent device"))
return false;
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 7de74635a110..b8c794b87523 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -91,4 +91,12 @@ config BT_SELFTEST_SMP
Run test cases for SMP cryptographic functionality, including both
legacy SMP as well as the Secure Connections features.
+config BT_DEBUGFS
+ bool "Export Bluetooth internals in debugfs"
+ depends on BT && DEBUG_FS
+ default y
+ help
+ Provide extensive information about internal Bluetooth states
+ in debugfs.
+
source "drivers/bluetooth/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 8e96e3072266..9a8ea232d28f 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -13,8 +13,9 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
- a2mp.o amp.o ecc.o hci_request.o hci_debugfs.o
+ a2mp.o amp.o ecc.o hci_request.o mgmt_util.o
+bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index cedfbda15dad..5a04eb1a7e57 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -19,9 +19,11 @@
#include "a2mp.h"
#include "amp.h"
+#define A2MP_FEAT_EXT 0x8000
+
/* Global AMP Manager list */
-LIST_HEAD(amp_mgr_list);
-DEFINE_MUTEX(amp_mgr_list_lock);
+static LIST_HEAD(amp_mgr_list);
+static DEFINE_MUTEX(amp_mgr_list_lock);
/* A2MP build & send command helper functions */
static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
@@ -43,7 +45,7 @@ static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
return cmd;
}
-void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
+static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
{
struct l2cap_chan *chan = mgr->a2mp_chan;
struct a2mp_cmd *cmd;
@@ -67,7 +69,7 @@ void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
kfree(cmd);
}
-u8 __next_ident(struct amp_mgr *mgr)
+static u8 __next_ident(struct amp_mgr *mgr)
{
if (++mgr->ident == 0)
mgr->ident = 1;
@@ -75,6 +77,23 @@ u8 __next_ident(struct amp_mgr *mgr)
return mgr->ident;
}
+static struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
+{
+ struct amp_mgr *mgr;
+
+ mutex_lock(&amp_mgr_list_lock);
+ list_for_each_entry(mgr, &amp_mgr_list, list) {
+ if (test_and_clear_bit(state, &mgr->state)) {
+ amp_mgr_get(mgr);
+ mutex_unlock(&amp_mgr_list_lock);
+ return mgr;
+ }
+ }
+ mutex_unlock(&amp_mgr_list_lock);
+
+ return NULL;
+}
+
/* hci_dev_list shall be locked */
static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
{
@@ -860,23 +879,6 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
return mgr->a2mp_chan;
}
-struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
-{
- struct amp_mgr *mgr;
-
- mutex_lock(&amp_mgr_list_lock);
- list_for_each_entry(mgr, &amp_mgr_list, list) {
- if (test_and_clear_bit(state, &mgr->state)) {
- amp_mgr_get(mgr);
- mutex_unlock(&amp_mgr_list_lock);
- return mgr;
- }
- }
- mutex_unlock(&amp_mgr_list_lock);
-
- return NULL;
-}
-
void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
{
struct amp_mgr *mgr;
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
index 487b54c1308f..296f665adb09 100644
--- a/net/bluetooth/a2mp.h
+++ b/net/bluetooth/a2mp.h
@@ -17,8 +17,6 @@
#include <net/bluetooth/l2cap.h>
-#define A2MP_FEAT_EXT 0x8000
-
enum amp_mgr_state {
READ_LOC_AMP_INFO,
READ_LOC_AMP_ASSOC,
@@ -131,16 +129,10 @@ struct a2mp_physlink_rsp {
#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
#define A2MP_STATUS_SECURITY_VIOLATION 0x06
-extern struct list_head amp_mgr_list;
-extern struct mutex amp_mgr_list_lock;
-
struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
int amp_mgr_put(struct amp_mgr *mgr);
-u8 __next_ident(struct amp_mgr *mgr);
struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
struct sk_buff *skb);
-struct amp_mgr *amp_mgr_lookup_by_state(u8 state);
-void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data);
void a2mp_discover_amp(struct l2cap_chan *chan);
void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index ce22e0cfa923..70f9d945faf7 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,8 +210,8 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
}
EXPORT_SYMBOL(bt_accept_dequeue);
-int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
@@ -283,8 +283,8 @@ static long bt_sock_data_wait(struct sock *sk, long timeo)
return timeo;
}
-int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
{
struct sock *sk = sock->sk;
int err = 0;
@@ -711,10 +711,9 @@ EXPORT_SYMBOL_GPL(bt_debugfs);
static int __init bt_init(void)
{
- struct sk_buff *skb;
int err;
- BUILD_BUG_ON(sizeof(struct bt_skb_cb) > sizeof(skb->cb));
+ sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
BT_INFO("Core ver %s", VERSION);
@@ -750,6 +749,13 @@ static int __init bt_init(void)
goto sock_err;
}
+ err = mgmt_init();
+ if (err < 0) {
+ sco_exit();
+ l2cap_exit();
+ goto sock_err;
+ }
+
return 0;
sock_err:
@@ -764,6 +770,8 @@ error:
static void __exit bt_exit(void)
{
+ mgmt_exit();
+
sco_exit();
l2cap_exit();
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 5a5b16f365e9..40854c99bc1e 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -111,6 +111,10 @@ struct bnep_ext_hdr {
#define BNEPCONNDEL _IOW('B', 201, int)
#define BNEPGETCONNLIST _IOR('B', 210, int)
#define BNEPGETCONNINFO _IOR('B', 211, int)
+#define BNEPGETSUPPFEAT _IOR('B', 212, int)
+
+#define BNEP_SETUP_RESPONSE 0
+#define BNEP_SETUP_RSP_SENT 10
struct bnep_connadd_req {
int sock; /* Connected socket */
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 05f57e491ccb..1641367e54ca 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -231,7 +231,14 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
break;
case BNEP_SETUP_CONN_REQ:
- err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_CONN_NOT_ALLOWED);
+ /* Successful response should be sent only once */
+ if (test_bit(BNEP_SETUP_RESPONSE, &s->flags) &&
+ !test_and_set_bit(BNEP_SETUP_RSP_SENT, &s->flags))
+ err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
+ BNEP_SUCCESS);
+ else
+ err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
+ BNEP_CONN_NOT_ALLOWED);
break;
default: {
@@ -239,7 +246,7 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
pkt[0] = BNEP_CONTROL;
pkt[1] = BNEP_CMD_NOT_UNDERSTOOD;
pkt[2] = cmd;
- bnep_send(s, pkt, sizeof(pkt));
+ err = bnep_send(s, pkt, sizeof(pkt));
}
break;
}
@@ -292,29 +299,55 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct net_device *dev = s->dev;
struct sk_buff *nskb;
- u8 type;
+ u8 type, ctrl_type;
dev->stats.rx_bytes += skb->len;
type = *(u8 *) skb->data;
skb_pull(skb, 1);
+ ctrl_type = *(u8 *)skb->data;
if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
goto badframe;
if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
- bnep_rx_control(s, skb->data, skb->len);
- kfree_skb(skb);
- return 0;
- }
+ if (bnep_rx_control(s, skb->data, skb->len) < 0) {
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return 0;
+ }
- skb_reset_mac_header(skb);
+ if (!(type & BNEP_EXT_HEADER)) {
+ kfree_skb(skb);
+ return 0;
+ }
- /* Verify and pull out header */
- if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
- goto badframe;
+ /* Verify and pull ctrl message since it's already processed */
+ switch (ctrl_type) {
+ case BNEP_SETUP_CONN_REQ:
+ /* Pull: ctrl type (1 b), len (1 b), data (len bytes) */
+ if (!skb_pull(skb, 2 + *(u8 *)(skb->data + 1) * 2))
+ goto badframe;
+ break;
+ case BNEP_FILTER_MULTI_ADDR_SET:
+ case BNEP_FILTER_NET_TYPE_SET:
+ /* Pull: ctrl type (1 b), len (2 b), data (len bytes) */
+ if (!skb_pull(skb, 3 + *(u16 *)(skb->data + 1) * 2))
+ goto badframe;
+ break;
+ default:
+ kfree_skb(skb);
+ return 0;
+ }
+ } else {
+ skb_reset_mac_header(skb);
- s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
+ /* Verify and pull out header */
+ if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
+ goto badframe;
+
+ s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
+ }
if (type & BNEP_EXT_HEADER) {
if (bnep_rx_extension(s, skb) < 0)
@@ -525,6 +558,7 @@ static struct device_type bnep_type = {
int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
{
+ u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
struct net_device *dev;
struct bnep_session *s, *ss;
u8 dst[ETH_ALEN], src[ETH_ALEN];
@@ -535,6 +569,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
if (!l2cap_is_socket(sock))
return -EBADFD;
+ if (req->flags & ~valid_flags)
+ return -EINVAL;
+
baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
@@ -566,6 +603,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
s->sock = sock;
s->role = req->role;
s->state = BT_CONNECTED;
+ s->flags = req->flags;
s->msg.msg_flags = MSG_NOSIGNAL;
@@ -611,11 +649,15 @@ failed:
int bnep_del_connection(struct bnep_conndel_req *req)
{
+ u32 valid_flags = 0;
struct bnep_session *s;
int err = 0;
BT_DBG("");
+ if (req->flags & ~valid_flags)
+ return -EINVAL;
+
down_read(&bnep_session_sem);
s = __bnep_get_session(req->dst);
@@ -631,10 +673,12 @@ int bnep_del_connection(struct bnep_conndel_req *req)
static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
{
+ u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
+
memset(ci, 0, sizeof(*ci));
memcpy(ci->dst, s->eh.h_source, ETH_ALEN);
strcpy(ci->device, s->dev->name);
- ci->flags = s->flags;
+ ci->flags = s->flags & valid_flags;
ci->state = s->state;
ci->role = s->role;
}
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 4b488ec26105..6ceb5d36a32b 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -218,7 +218,7 @@ static const struct net_device_ops bnep_netdev_ops = {
void bnep_net_setup(struct net_device *dev)
{
- memset(dev->broadcast, 0xff, ETH_ALEN);
+ eth_broadcast_addr(dev->broadcast);
dev->addr_len = ETH_ALEN;
ether_setup(dev);
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 5f051290daba..bde2bdd9e929 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -57,6 +57,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
struct bnep_conninfo ci;
struct socket *nsock;
void __user *argp = (void __user *)arg;
+ __u32 supp_feat = BIT(BNEP_SETUP_RESPONSE);
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
@@ -120,6 +121,12 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return err;
+ case BNEPGETSUPPFEAT:
+ if (copy_to_user(argp, &supp_feat, sizeof(supp_feat)))
+ return -EFAULT;
+
+ return 0;
+
default:
return -EINVAL;
}
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 75bd2c42e3e7..b0c6c6af76ef 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -333,7 +333,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
return;
}
- if (session->flags & (1 << CMTP_LOOPBACK)) {
+ if (session->flags & BIT(CMTP_LOOPBACK)) {
kfree_skb(skb);
return;
}
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 278a194e6af4..298ed37010e6 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -75,10 +75,11 @@ static void __cmtp_unlink_session(struct cmtp_session *session)
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
{
+ u32 valid_flags = BIT(CMTP_LOOPBACK);
memset(ci, 0, sizeof(*ci));
bacpy(&ci->bdaddr, &session->bdaddr);
- ci->flags = session->flags;
+ ci->flags = session->flags & valid_flags;
ci->state = session->state;
ci->num = session->num;
@@ -313,7 +314,7 @@ static int cmtp_session(void *arg)
down_write(&cmtp_session_sem);
- if (!(session->flags & (1 << CMTP_LOOPBACK)))
+ if (!(session->flags & BIT(CMTP_LOOPBACK)))
cmtp_detach_device(session);
fput(session->sock->file);
@@ -329,6 +330,7 @@ static int cmtp_session(void *arg)
int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
{
+ u32 valid_flags = BIT(CMTP_LOOPBACK);
struct cmtp_session *session, *s;
int i, err;
@@ -337,6 +339,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
if (!l2cap_is_socket(sock))
return -EBADFD;
+ if (req->flags & ~valid_flags)
+ return -EINVAL;
+
session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
if (!session)
return -ENOMEM;
@@ -385,7 +390,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
goto unlink;
}
- if (!(session->flags & (1 << CMTP_LOOPBACK))) {
+ if (!(session->flags & BIT(CMTP_LOOPBACK))) {
err = cmtp_attach_device(session);
if (err < 0) {
atomic_inc(&session->terminate);
@@ -409,11 +414,15 @@ failed:
int cmtp_del_connection(struct cmtp_conndel_req *req)
{
+ u32 valid_flags = 0;
struct cmtp_session *session;
int err = 0;
BT_DBG("");
+ if (req->flags & ~valid_flags)
+ return -EINVAL;
+
down_read(&cmtp_session_sem);
session = __cmtp_get_session(&req->bdaddr);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index c9b8fa544785..ee5e59839b02 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -309,7 +309,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
else
hci_add_sco(sco, conn->handle);
} else {
- hci_proto_connect_cfm(sco, status);
+ hci_connect_cfm(sco, status);
hci_conn_del(sco);
}
}
@@ -571,7 +571,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) ||
- test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
+ hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
d->dev_type != HCI_BREDR)
continue;
@@ -618,7 +618,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
status);
- hci_proto_connect_cfm(conn, status);
+ hci_connect_cfm(conn, status);
hci_conn_del(conn);
@@ -700,7 +700,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
* and write a new random address. The flag will be set back on
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
- clear_bit(HCI_LE_ADV, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
/* Set require_privacy to false so that the remote device has a
* chance of identifying us.
@@ -733,6 +733,14 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
struct hci_request req;
int err;
+ /* Let's make sure that le is enabled.*/
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+ if (lmp_le_capable(hdev))
+ return ERR_PTR(-ECONNREFUSED);
+
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
/* Some devices send ATT messages as soon as the physical link is
* established. To be able to handle these ATT messages, the user-
* space first establishes the connection and then starts the pairing
@@ -791,7 +799,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* anyway have to disable it in order to start directed
* advertising.
*/
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
u8 enable = 0x00;
hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
&enable);
@@ -802,7 +810,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
/* If we're active scanning most controllers are unable
* to initiate advertising. Simply reject the attempt.
*/
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
hdev->le_scan_type == LE_SCAN_ACTIVE) {
skb_queue_purge(&req.cmd_q);
hci_conn_del(conn);
@@ -832,9 +840,9 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* handler for scan disabling knows to set the correct discovery
* state.
*/
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_req_add_le_scan_disable(&req);
- set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
}
hci_req_add_le_create_conn(&req, conn);
@@ -856,8 +864,12 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
{
struct hci_conn *acl;
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+ if (lmp_bredr_capable(hdev))
+ return ERR_PTR(-ECONNREFUSED);
+
return ERR_PTR(-EOPNOTSUPP);
+ }
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
@@ -930,7 +942,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
* Connections is used and the link is encrypted with AES-CCM
* using a P-256 authenticated combination key.
*/
- if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
+ if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
if (!hci_conn_sc_enabled(conn) ||
!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
@@ -1139,7 +1151,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
list_for_each_entry_safe(c, n, &h->list, list) {
c->state = BT_CLOSED;
- hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
+ hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
hci_conn_del(c);
}
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 3322d3f4c85a..4663c3dad3f5 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -51,7 +51,7 @@ DEFINE_RWLOCK(hci_dev_list_lock);
/* HCI callback list */
LIST_HEAD(hci_cb_list);
-DEFINE_RWLOCK(hci_cb_list_lock);
+DEFINE_MUTEX(hci_cb_list_lock);
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);
@@ -80,7 +80,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
struct hci_dev *hdev = file->private_data;
char buf[3];
- buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
+ buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -106,7 +106,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
if (strtobool(buf, &enable))
return -EINVAL;
- if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
+ if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
return -EALREADY;
hci_req_lock(hdev);
@@ -127,7 +127,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
if (err < 0)
return err;
- change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
+ hci_dev_change_flag(hdev, HCI_DUT_MODE);
return count;
}
@@ -141,13 +141,16 @@ static const struct file_operations dut_mode_fops = {
/* ---- HCI requests ---- */
-static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
+static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ struct sk_buff *skb)
{
BT_DBG("%s result 0x%2.2x", hdev->name, result);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
+ if (skb)
+ hdev->req_skb = skb_get(skb);
wake_up_interruptible(&hdev->req_wait_q);
}
}
@@ -163,66 +166,12 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
}
}
-static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
- u8 event)
-{
- struct hci_ev_cmd_complete *ev;
- struct hci_event_hdr *hdr;
- struct sk_buff *skb;
-
- hci_dev_lock(hdev);
-
- skb = hdev->recv_evt;
- hdev->recv_evt = NULL;
-
- hci_dev_unlock(hdev);
-
- if (!skb)
- return ERR_PTR(-ENODATA);
-
- if (skb->len < sizeof(*hdr)) {
- BT_ERR("Too short HCI event");
- goto failed;
- }
-
- hdr = (void *) skb->data;
- skb_pull(skb, HCI_EVENT_HDR_SIZE);
-
- if (event) {
- if (hdr->evt != event)
- goto failed;
- return skb;
- }
-
- if (hdr->evt != HCI_EV_CMD_COMPLETE) {
- BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
- goto failed;
- }
-
- if (skb->len < sizeof(*ev)) {
- BT_ERR("Too short cmd_complete event");
- goto failed;
- }
-
- ev = (void *) skb->data;
- skb_pull(skb, sizeof(*ev));
-
- if (opcode == __le16_to_cpu(ev->opcode))
- return skb;
-
- BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
- __le16_to_cpu(ev->opcode));
-
-failed:
- kfree_skb(skb);
- return ERR_PTR(-ENODATA);
-}
-
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
struct hci_request req;
+ struct sk_buff *skb;
int err = 0;
BT_DBG("%s", hdev->name);
@@ -236,7 +185,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
add_wait_queue(&hdev->req_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- err = hci_req_run(&req, hci_req_sync_complete);
+ err = hci_req_run_skb(&req, hci_req_sync_complete);
if (err < 0) {
remove_wait_queue(&hdev->req_wait_q, &wait);
set_current_state(TASK_RUNNING);
@@ -265,13 +214,20 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
}
hdev->req_status = hdev->req_result = 0;
+ skb = hdev->req_skb;
+ hdev->req_skb = NULL;
BT_DBG("%s end: err %d", hdev->name, err);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
return ERR_PTR(err);
+ }
+
+ if (!skb)
+ return ERR_PTR(-ENODATA);
- return hci_get_cmd_complete(hdev, opcode, event);
+ return skb;
}
EXPORT_SYMBOL(__hci_cmd_sync_ev);
@@ -303,7 +259,7 @@ static int __hci_req_sync(struct hci_dev *hdev,
add_wait_queue(&hdev->req_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- err = hci_req_run(&req, hci_req_sync_complete);
+ err = hci_req_run_skb(&req, hci_req_sync_complete);
if (err < 0) {
hdev->req_status = 0;
@@ -390,7 +346,7 @@ static void bredr_init(struct hci_request *req)
hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
}
-static void amp_init(struct hci_request *req)
+static void amp_init1(struct hci_request *req)
{
req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
@@ -400,9 +356,6 @@ static void amp_init(struct hci_request *req)
/* Read Local Supported Commands */
hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
- /* Read Local Supported Features */
- hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
-
/* Read Local AMP Info */
hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
@@ -416,6 +369,16 @@ static void amp_init(struct hci_request *req)
hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
}
+static void amp_init2(struct hci_request *req)
+{
+ /* Read Local Supported Features. Not all AMP controllers
+ * support this so it's placed conditionally in the second
+ * stage init.
+ */
+ if (req->hdev->commands[14] & 0x20)
+ hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+}
+
static void hci_init1_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
@@ -432,7 +395,7 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
break;
case HCI_AMP:
- amp_init(req);
+ amp_init1(req);
break;
default:
@@ -494,7 +457,7 @@ static void le_setup(struct hci_request *req)
/* LE-only controllers have LE implicitly enabled */
if (!lmp_bredr_capable(hdev))
- set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_LE_ENABLED);
}
static void hci_setup_event_mask(struct hci_request *req)
@@ -578,10 +541,13 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
+ if (hdev->dev_type == HCI_AMP)
+ return amp_init2(req);
+
if (lmp_bredr_capable(hdev))
bredr_setup(req);
else
- clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
if (lmp_le_capable(hdev))
le_setup(req);
@@ -607,7 +573,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
*/
hdev->max_page = 0x01;
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
u8 mode = 0x01;
hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
@@ -646,7 +612,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
sizeof(cp), &cp);
}
- if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
u8 enable = 1;
hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
&enable);
@@ -683,7 +649,7 @@ static void hci_set_le_support(struct hci_request *req)
memset(&cp, 0, sizeof(cp));
- if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
cp.le = 0x01;
cp.simul = 0x00;
}
@@ -871,7 +837,7 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
/* Enable Secure Connections if supported and configured */
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
bredr_sc_enabled(hdev)) {
u8 support = 0x01;
@@ -891,22 +857,22 @@ static int __hci_init(struct hci_dev *hdev)
/* The Device Under Test (DUT) mode is special and available for
* all controller types. So just create it early on.
*/
- if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_SETUP)) {
debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
&dut_mode_fops);
}
+ err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
+ if (err < 0)
+ return err;
+
/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
* BR/EDR/LE type controllers. AMP controllers only need the
- * first stage init.
+ * first two stages of init.
*/
if (hdev->dev_type != HCI_BREDR)
return 0;
- err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
- if (err < 0)
- return err;
-
err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
if (err < 0)
return err;
@@ -927,8 +893,8 @@ static int __hci_init(struct hci_dev *hdev)
* So only when in setup phase or config phase, create the debugfs
* entries and register the SMP channels.
*/
- if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
- !test_bit(HCI_CONFIG, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG))
return 0;
hci_debugfs_create_common(hdev);
@@ -1290,12 +1256,12 @@ int hci_inquiry(void __user *arg)
if (!hdev)
return -ENODEV;
- if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EBUSY;
goto done;
}
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
err = -EOPNOTSUPP;
goto done;
}
@@ -1305,7 +1271,7 @@ int hci_inquiry(void __user *arg)
goto done;
}
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP;
goto done;
}
@@ -1377,17 +1343,17 @@ static int hci_dev_do_open(struct hci_dev *hdev)
hci_req_lock(hdev);
- if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
ret = -ENODEV;
goto done;
}
- if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
- !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG)) {
/* Check for rfkill but allow the HCI setup stage to
* proceed (which in itself doesn't cause any RF activity).
*/
- if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
ret = -ERFKILL;
goto done;
}
@@ -1404,7 +1370,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
* This check is only valid for BR/EDR controllers
* since AMP controllers do not have an address.
*/
- if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hdev->dev_type == HCI_BREDR &&
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY)) {
@@ -1426,7 +1392,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
- if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_SETUP)) {
if (hdev->setup)
ret = hdev->setup(hdev);
@@ -1438,7 +1404,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
*/
if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
- set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
/* For an unconfigured controller it is required to
* read at least the version information provided by
@@ -1448,11 +1414,11 @@ static int hci_dev_do_open(struct hci_dev *hdev)
* also the original Bluetooth public device address
* will be read using the Read BD Address command.
*/
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
ret = __hci_unconf_init(hdev);
}
- if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
/* If public address change is configured, ensure that
* the address gets programmed. If the driver does not
* support changing the public address, fail the power
@@ -1466,8 +1432,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
}
if (!ret) {
- if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
- !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
ret = __hci_init(hdev);
}
@@ -1475,13 +1441,13 @@ static int hci_dev_do_open(struct hci_dev *hdev)
if (!ret) {
hci_dev_hold(hdev);
- set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
- if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
- !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
- !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
- !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG) &&
+ !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hdev->dev_type == HCI_BREDR) {
hci_dev_lock(hdev);
mgmt_powered(hdev, 1);
@@ -1533,8 +1499,8 @@ int hci_dev_open(__u16 dev)
* HCI_USER_CHANNEL will be set first before attempting to
* open the device.
*/
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
- !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EOPNOTSUPP;
goto done;
}
@@ -1544,7 +1510,7 @@ int hci_dev_open(__u16 dev)
* particularly important if the setup procedure has not yet
* completed.
*/
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
cancel_delayed_work(&hdev->power_off);
/* After this call it is guaranteed that the setup procedure
@@ -1559,9 +1525,9 @@ int hci_dev_open(__u16 dev)
* is in use this bit will be cleared again and userspace has
* to explicitly enable it.
*/
- if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
- !test_bit(HCI_MGMT, &hdev->dev_flags))
- set_bit(HCI_BONDABLE, &hdev->dev_flags);
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ !hci_dev_test_flag(hdev, HCI_MGMT))
+ hci_dev_set_flag(hdev, HCI_BONDABLE);
err = hci_dev_do_open(hdev);
@@ -1591,6 +1557,13 @@ static int hci_dev_do_close(struct hci_dev *hdev)
{
BT_DBG("%s %p", hdev->name, hdev);
+ if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ /* Execute vendor specific shutdown routine */
+ if (hdev->shutdown)
+ hdev->shutdown(hdev);
+ }
+
cancel_delayed_work(&hdev->power_off);
hci_req_cancel(hdev, ENODEV);
@@ -1609,17 +1582,17 @@ static int hci_dev_do_close(struct hci_dev *hdev)
if (hdev->discov_timeout > 0) {
cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = 0;
- clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
- clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
}
- if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
cancel_delayed_work(&hdev->service_cache);
cancel_delayed_work_sync(&hdev->le_scan_disable);
cancel_delayed_work_sync(&hdev->le_scan_restart);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
cancel_delayed_work_sync(&hdev->rpa_expired);
/* Avoid potential lockdep warnings from the *_flush() calls by
@@ -1631,7 +1604,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
if (hdev->dev_type == HCI_BREDR)
mgmt_powered(hdev, 0);
}
@@ -1651,8 +1624,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
- if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
- !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
+ if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
+ !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
@@ -1674,16 +1647,13 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
- kfree_skb(hdev->recv_evt);
- hdev->recv_evt = NULL;
-
/* After this point our queues are empty
* and no tasks are scheduled. */
hdev->close(hdev);
/* Clear flags */
hdev->flags &= BIT(HCI_RAW);
- hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
+ hci_dev_clear_volatile_flags(hdev);
/* Controller radio is available but is currently powered down */
hdev->amp_status = AMP_STATUS_POWERED_DOWN;
@@ -1707,12 +1677,12 @@ int hci_dev_close(__u16 dev)
if (!hdev)
return -ENODEV;
- if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EBUSY;
goto done;
}
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
cancel_delayed_work(&hdev->power_off);
err = hci_dev_do_close(hdev);
@@ -1770,12 +1740,12 @@ int hci_dev_reset(__u16 dev)
goto done;
}
- if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EBUSY;
goto done;
}
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
err = -EOPNOTSUPP;
goto done;
}
@@ -1796,12 +1766,12 @@ int hci_dev_reset_stat(__u16 dev)
if (!hdev)
return -ENODEV;
- if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
ret = -EBUSY;
goto done;
}
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
ret = -EOPNOTSUPP;
goto done;
}
@@ -1820,29 +1790,29 @@ static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
BT_DBG("%s scan 0x%02x", hdev->name, scan);
if ((scan & SCAN_PAGE))
- conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
- &hdev->dev_flags);
+ conn_changed = !hci_dev_test_and_set_flag(hdev,
+ HCI_CONNECTABLE);
else
- conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
- &hdev->dev_flags);
+ conn_changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_CONNECTABLE);
if ((scan & SCAN_INQUIRY)) {
- discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
- &hdev->dev_flags);
+ discov_changed = !hci_dev_test_and_set_flag(hdev,
+ HCI_DISCOVERABLE);
} else {
- clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
- discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
- &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+ discov_changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_DISCOVERABLE);
}
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
return;
if (conn_changed || discov_changed) {
/* In case this was disabled through mgmt */
- set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
- if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
mgmt_update_adv_data(hdev);
mgmt_new_settings(hdev);
@@ -1862,12 +1832,12 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!hdev)
return -ENODEV;
- if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EBUSY;
goto done;
}
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
err = -EOPNOTSUPP;
goto done;
}
@@ -1877,7 +1847,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
goto done;
}
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP;
goto done;
}
@@ -1981,7 +1951,7 @@ int hci_get_dev_list(void __user *arg)
* is running, but in that case still indicate that the
* device is actually down.
*/
- if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
flags &= ~BIT(HCI_UP);
(dr + n)->dev_id = hdev->id;
@@ -2019,7 +1989,7 @@ int hci_get_dev_info(void __user *arg)
* is running, but in that case still indicate that the
* device is actually down.
*/
- if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
flags = hdev->flags & ~BIT(HCI_UP);
else
flags = hdev->flags;
@@ -2062,16 +2032,16 @@ static int hci_rfkill_set_block(void *data, bool blocked)
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
- if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY;
if (blocked) {
- set_bit(HCI_RFKILLED, &hdev->dev_flags);
- if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
- !test_bit(HCI_CONFIG, &hdev->dev_flags))
+ hci_dev_set_flag(hdev, HCI_RFKILLED);
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG))
hci_dev_do_close(hdev);
} else {
- clear_bit(HCI_RFKILLED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_RFKILLED);
}
return 0;
@@ -2100,23 +2070,23 @@ static void hci_power_on(struct work_struct *work)
* ignored and they need to be checked now. If they are still
* valid, it is important to turn the device back off.
*/
- if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
- test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
+ if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
+ hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
(hdev->dev_type == HCI_BREDR &&
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY))) {
- clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
hci_dev_do_close(hdev);
- } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
HCI_AUTO_OFF_TIMEOUT);
}
- if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
+ if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
/* For unconfigured devices, set the HCI_RAW flag
* so that userspace can easily identify them.
*/
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
set_bit(HCI_RAW, &hdev->flags);
/* For fully configured devices, this will send
@@ -2127,11 +2097,11 @@ static void hci_power_on(struct work_struct *work)
* and no event will be send.
*/
mgmt_index_added(hdev);
- } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
+ } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
/* When the controller is now configured, then it
* is important to clear the HCI_RAW flag.
*/
- if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
clear_bit(HCI_RAW, &hdev->flags);
/* Powering on the controller with HCI_CONFIG set only
@@ -2500,6 +2470,42 @@ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
}
}
+bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+ struct smp_ltk *k;
+ struct smp_irk *irk;
+ u8 addr_type;
+
+ if (type == BDADDR_BREDR) {
+ if (hci_find_link_key(hdev, bdaddr))
+ return true;
+ return false;
+ }
+
+ /* Convert to HCI addr type which struct smp_ltk uses */
+ if (type == BDADDR_LE_PUBLIC)
+ addr_type = ADDR_LE_DEV_PUBLIC;
+ else
+ addr_type = ADDR_LE_DEV_RANDOM;
+
+ irk = hci_get_irk(hdev, bdaddr, addr_type);
+ if (irk) {
+ bdaddr = &irk->bdaddr;
+ addr_type = irk->addr_type;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
+ if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
/* HCI command timer function */
static void hci_cmd_timeout(struct work_struct *work)
{
@@ -2822,7 +2828,6 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
{
/* General inquiry access code (GIAC) */
u8 lap[3] = { 0x33, 0x8b, 0x9e };
- struct hci_request req;
struct hci_cp_inquiry cp;
int err;
@@ -2841,21 +2846,37 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
break;
case DISCOV_TYPE_INTERLEAVED:
- hci_req_init(&req, hdev);
+ hci_dev_lock(hdev);
- memset(&cp, 0, sizeof(cp));
- memcpy(&cp.lap, lap, sizeof(cp.lap));
- cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
- hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+ &hdev->quirks)) {
+ /* If we were running LE only scan, change discovery
+ * state. If we were running both LE and BR/EDR inquiry
+ * simultaneously, and BR/EDR inquiry is already
+ * finished, stop discovery, otherwise BR/EDR inquiry
+ * will stop discovery when finished.
+ */
+ if (!test_bit(HCI_INQUIRY, &hdev->flags))
+ hci_discovery_set_state(hdev,
+ DISCOVERY_STOPPED);
+ } else {
+ struct hci_request req;
- hci_dev_lock(hdev);
+ hci_inquiry_cache_flush(hdev);
- hci_inquiry_cache_flush(hdev);
+ hci_req_init(&req, hdev);
- err = hci_req_run(&req, inquiry_complete);
- if (err) {
- BT_ERR("Inquiry request failed: err %d", err);
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ memset(&cp, 0, sizeof(cp));
+ memcpy(&cp.lap, lap, sizeof(cp.lap));
+ cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
+ hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+
+ err = hci_req_run(&req, inquiry_complete);
+ if (err) {
+ BT_ERR("Inquiry request failed: err %d", err);
+ hci_discovery_set_state(hdev,
+ DISCOVERY_STOPPED);
+ }
}
hci_dev_unlock(hdev);
@@ -2934,7 +2955,7 @@ static void le_scan_restart_work(struct work_struct *work)
BT_DBG("%s", hdev->name);
/* If controller is not scanning we are done. */
- if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return;
hci_req_init(&req, hdev);
@@ -2967,9 +2988,9 @@ static void le_scan_restart_work(struct work_struct *work)
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 *bdaddr_type)
{
- if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
+ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
- (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+ (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
bacmp(&hdev->static_addr, BDADDR_ANY))) {
bacpy(bdaddr, &hdev->static_addr);
*bdaddr_type = ADDR_LE_DEV_RANDOM;
@@ -3059,6 +3080,7 @@ struct hci_dev *hci_alloc_dev(void)
hci_init_sysfs(hdev);
discovery_init(hdev);
+ adv_info_init(hdev);
return hdev;
}
@@ -3137,16 +3159,16 @@ int hci_register_dev(struct hci_dev *hdev)
}
if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
- set_bit(HCI_RFKILLED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_RFKILLED);
- set_bit(HCI_SETUP, &hdev->dev_flags);
- set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_SETUP);
+ hci_dev_set_flag(hdev, HCI_AUTO_OFF);
if (hdev->dev_type == HCI_BREDR) {
/* Assume BR/EDR support until proven otherwise (such as
* through reading supported features during init.
*/
- set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
}
write_lock(&hci_dev_list_lock);
@@ -3157,7 +3179,7 @@ int hci_register_dev(struct hci_dev *hdev)
* and should not be included in normal operation.
*/
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
- set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
hci_notify(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
@@ -3179,11 +3201,11 @@ EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
void hci_unregister_dev(struct hci_dev *hdev)
{
- int i, id;
+ int id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- set_bit(HCI_UNREGISTER, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_UNREGISTER);
id = hdev->id;
@@ -3193,14 +3215,11 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_dev_do_close(hdev);
- for (i = 0; i < NUM_REASSEMBLY; i++)
- kfree_skb(hdev->reassembly[i]);
-
cancel_work_sync(&hdev->power_on);
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->dev_flags) &&
- !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+ !hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG)) {
hci_dev_lock(hdev);
mgmt_index_removed(hdev);
hci_dev_unlock(hdev);
@@ -3299,158 +3318,15 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
}
EXPORT_SYMBOL(hci_recv_frame);
-static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
- int count, __u8 index)
-{
- int len = 0;
- int hlen = 0;
- int remain = count;
- struct sk_buff *skb;
- struct bt_skb_cb *scb;
-
- if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
- index >= NUM_REASSEMBLY)
- return -EILSEQ;
-
- skb = hdev->reassembly[index];
-
- if (!skb) {
- switch (type) {
- case HCI_ACLDATA_PKT:
- len = HCI_MAX_FRAME_SIZE;
- hlen = HCI_ACL_HDR_SIZE;
- break;
- case HCI_EVENT_PKT:
- len = HCI_MAX_EVENT_SIZE;
- hlen = HCI_EVENT_HDR_SIZE;
- break;
- case HCI_SCODATA_PKT:
- len = HCI_MAX_SCO_SIZE;
- hlen = HCI_SCO_HDR_SIZE;
- break;
- }
-
- skb = bt_skb_alloc(len, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- scb = (void *) skb->cb;
- scb->expect = hlen;
- scb->pkt_type = type;
-
- hdev->reassembly[index] = skb;
- }
-
- while (count) {
- scb = (void *) skb->cb;
- len = min_t(uint, scb->expect, count);
-
- memcpy(skb_put(skb, len), data, len);
-
- count -= len;
- data += len;
- scb->expect -= len;
- remain = count;
-
- switch (type) {
- case HCI_EVENT_PKT:
- if (skb->len == HCI_EVENT_HDR_SIZE) {
- struct hci_event_hdr *h = hci_event_hdr(skb);
- scb->expect = h->plen;
-
- if (skb_tailroom(skb) < scb->expect) {
- kfree_skb(skb);
- hdev->reassembly[index] = NULL;
- return -ENOMEM;
- }
- }
- break;
-
- case HCI_ACLDATA_PKT:
- if (skb->len == HCI_ACL_HDR_SIZE) {
- struct hci_acl_hdr *h = hci_acl_hdr(skb);
- scb->expect = __le16_to_cpu(h->dlen);
-
- if (skb_tailroom(skb) < scb->expect) {
- kfree_skb(skb);
- hdev->reassembly[index] = NULL;
- return -ENOMEM;
- }
- }
- break;
-
- case HCI_SCODATA_PKT:
- if (skb->len == HCI_SCO_HDR_SIZE) {
- struct hci_sco_hdr *h = hci_sco_hdr(skb);
- scb->expect = h->dlen;
-
- if (skb_tailroom(skb) < scb->expect) {
- kfree_skb(skb);
- hdev->reassembly[index] = NULL;
- return -ENOMEM;
- }
- }
- break;
- }
-
- if (scb->expect == 0) {
- /* Complete frame */
-
- bt_cb(skb)->pkt_type = type;
- hci_recv_frame(hdev, skb);
-
- hdev->reassembly[index] = NULL;
- return remain;
- }
- }
-
- return remain;
-}
-
-#define STREAM_REASSEMBLY 0
-
-int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
-{
- int type;
- int rem = 0;
-
- while (count) {
- struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
-
- if (!skb) {
- struct { char type; } *pkt;
-
- /* Start of the frame */
- pkt = data;
- type = pkt->type;
-
- data++;
- count--;
- } else
- type = bt_cb(skb)->pkt_type;
-
- rem = hci_reassembly(hdev, type, data, count,
- STREAM_REASSEMBLY);
- if (rem < 0)
- return rem;
-
- data += (count - rem);
- count = rem;
- }
-
- return rem;
-}
-EXPORT_SYMBOL(hci_recv_stream_fragment);
-
/* ---- Interface to upper protocols ---- */
int hci_register_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock(&hci_cb_list_lock);
- list_add(&cb->list, &hci_cb_list);
- write_unlock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
+ list_add_tail(&cb->list, &hci_cb_list);
+ mutex_unlock(&hci_cb_list_lock);
return 0;
}
@@ -3460,9 +3336,9 @@ int hci_unregister_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_del(&cb->list);
- write_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
return 0;
}
@@ -3495,11 +3371,6 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
}
}
-bool hci_req_pending(struct hci_dev *hdev)
-{
- return (hdev->req_status == HCI_REQ_PEND);
-}
-
/* Send HCI command */
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
const void *param)
@@ -3874,7 +3745,7 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
{
- if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!cnt && time_after(jiffies, hdev->acl_last_tx +
@@ -4057,7 +3928,7 @@ static void hci_sched_le(struct hci_dev *hdev)
if (!hci_conn_num(hdev, LE_LINK))
return;
- if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->le_cnt && hdev->le_pkts &&
@@ -4105,7 +3976,7 @@ static void hci_tx_work(struct work_struct *work)
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);
- if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
/* Schedule queues and send stuff to HCI driver */
hci_sched_acl(hdev);
hci_sched_sco(hdev);
@@ -4220,9 +4091,10 @@ static void hci_resend_last(struct hci_dev *hdev)
queue_work(hdev->workqueue, &hdev->cmd_work);
}
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb)
{
- hci_req_complete_t req_complete = NULL;
struct sk_buff *skb;
unsigned long flags;
@@ -4254,18 +4126,14 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
* callback would be found in hdev->sent_cmd instead of the
* command queue (hdev->cmd_q).
*/
- if (hdev->sent_cmd) {
- req_complete = bt_cb(hdev->sent_cmd)->req.complete;
-
- if (req_complete) {
- /* We must set the complete callback to NULL to
- * avoid calling the callback more than once if
- * this function gets called again.
- */
- bt_cb(hdev->sent_cmd)->req.complete = NULL;
+ if (bt_cb(hdev->sent_cmd)->req.complete) {
+ *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
+ return;
+ }
- goto call_complete;
- }
+ if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
+ *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
+ return;
}
/* Remove all pending commands belonging to this request */
@@ -4276,14 +4144,11 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
break;
}
- req_complete = bt_cb(skb)->req.complete;
+ *req_complete = bt_cb(skb)->req.complete;
+ *req_complete_skb = bt_cb(skb)->req.complete_skb;
kfree_skb(skb);
}
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
-
-call_complete:
- if (req_complete)
- req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
}
static void hci_rx_work(struct work_struct *work)
@@ -4302,7 +4167,7 @@ static void hci_rx_work(struct work_struct *work)
hci_send_to_sock(hdev, skb);
}
- if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
kfree_skb(skb);
continue;
}
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 65261e5d4b84..7db4220941cc 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -28,6 +28,54 @@
#include "hci_debugfs.h"
+#define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \
+static ssize_t __name ## _read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct hci_dev *hdev = file->private_data; \
+ char buf[3]; \
+ \
+ buf[0] = test_bit(__quirk, &hdev->quirks) ? 'Y' : 'N'; \
+ buf[1] = '\n'; \
+ buf[2] = '\0'; \
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2); \
+} \
+ \
+static ssize_t __name ## _write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct hci_dev *hdev = file->private_data; \
+ char buf[32]; \
+ size_t buf_size = min(count, (sizeof(buf) - 1)); \
+ bool enable; \
+ \
+ if (test_bit(HCI_UP, &hdev->flags)) \
+ return -EBUSY; \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ buf[buf_size] = '\0'; \
+ if (strtobool(buf, &enable)) \
+ return -EINVAL; \
+ \
+ if (enable == test_bit(__quirk, &hdev->quirks)) \
+ return -EALREADY; \
+ \
+ change_bit(__quirk, &hdev->quirks); \
+ \
+ return count; \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .open = simple_open, \
+ .read = __name ## _read, \
+ .write = __name ## _write, \
+ .llseek = default_llseek, \
+} \
+
static int features_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
@@ -66,6 +114,30 @@ static const struct file_operations features_fops = {
.release = single_release,
};
+static int device_id_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+
+ hci_dev_lock(hdev);
+ seq_printf(f, "%4.4x:%4.4x:%4.4x:%4.4x\n", hdev->devid_source,
+ hdev->devid_vendor, hdev->devid_product, hdev->devid_version);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int device_id_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, device_id_show, inode->i_private);
+}
+
+static const struct file_operations device_id_fops = {
+ .open = device_id_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int device_list_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
@@ -166,7 +238,7 @@ static int remote_oob_show(struct seq_file *f, void *ptr)
seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n",
&data->bdaddr, data->bdaddr_type, data->present,
16, data->hash192, 16, data->rand192,
- 16, data->hash256, 19, data->rand256);
+ 16, data->hash256, 16, data->rand256);
}
hci_dev_unlock(hdev);
@@ -247,7 +319,7 @@ static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
struct hci_dev *hdev = file->private_data;
char buf[3];
- buf[0] = test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
+ buf[0] = hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -265,7 +337,7 @@ static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
struct hci_dev *hdev = file->private_data;
char buf[3];
- buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
+ buf[0] = hci_dev_test_flag(hdev, HCI_SC_ONLY) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -287,6 +359,8 @@ void hci_debugfs_create_common(struct hci_dev *hdev)
debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
debugfs_create_u8("hardware_error", 0444, hdev->debugfs,
&hdev->hw_error_code);
+ debugfs_create_file("device_id", 0444, hdev->debugfs, hdev,
+ &device_id_fops);
debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
&device_list_fops);
@@ -679,7 +753,7 @@ static ssize_t force_static_address_read(struct file *file,
struct hci_dev *hdev = file->private_data;
char buf[3];
- buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
+ buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -704,10 +778,10 @@ static ssize_t force_static_address_write(struct file *file,
if (strtobool(buf, &enable))
return -EINVAL;
- if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
+ if (enable == hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR))
return -EALREADY;
- change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
+ hci_dev_change_flag(hdev, HCI_FORCE_STATIC_ADDR);
return count;
}
@@ -997,6 +1071,11 @@ static int adv_max_interval_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
adv_max_interval_set, "%llu\n");
+DEFINE_QUIRK_ATTRIBUTE(quirk_strict_duplicate_filter,
+ HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+DEFINE_QUIRK_ATTRIBUTE(quirk_simultaneous_discovery,
+ HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+
void hci_debugfs_create_le(struct hci_dev *hdev)
{
debugfs_create_file("identity", 0400, hdev->debugfs, hdev,
@@ -1041,6 +1120,13 @@ void hci_debugfs_create_le(struct hci_dev *hdev)
&adv_max_interval_fops);
debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs,
&hdev->discov_interleaved_timeout);
+
+ debugfs_create_file("quirk_strict_duplicate_filter", 0644,
+ hdev->debugfs, hdev,
+ &quirk_strict_duplicate_filter_fops);
+ debugfs_create_file("quirk_simultaneous_discovery", 0644,
+ hdev->debugfs, hdev,
+ &quirk_simultaneous_discovery_fops);
}
void hci_debugfs_create_conn(struct hci_conn *conn)
diff --git a/net/bluetooth/hci_debugfs.h b/net/bluetooth/hci_debugfs.h
index fb68efe083c5..4444dc8cedc2 100644
--- a/net/bluetooth/hci_debugfs.h
+++ b/net/bluetooth/hci_debugfs.h
@@ -20,7 +20,29 @@
SOFTWARE IS DISCLAIMED.
*/
+#if IS_ENABLED(CONFIG_BT_DEBUGFS)
+
void hci_debugfs_create_common(struct hci_dev *hdev);
void hci_debugfs_create_bredr(struct hci_dev *hdev);
void hci_debugfs_create_le(struct hci_dev *hdev);
void hci_debugfs_create_conn(struct hci_conn *conn);
+
+#else
+
+static inline void hci_debugfs_create_common(struct hci_dev *hdev)
+{
+}
+
+static inline void hci_debugfs_create_bredr(struct hci_dev *hdev)
+{
+}
+
+static inline void hci_debugfs_create_le(struct hci_dev *hdev)
+{
+}
+
+static inline void hci_debugfs_create_conn(struct hci_conn *conn)
+{
+}
+
+#endif
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a3fb094822b6..7b61be73650f 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -70,7 +70,7 @@ static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
- set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
}
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
@@ -82,7 +82,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
- clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
hci_conn_check_pending(hdev);
}
@@ -198,7 +198,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
return;
/* Reset all non-persistent flags */
- hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
+ hci_dev_clear_volatile_flags(hdev);
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
@@ -265,7 +265,7 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_set_local_name_complete(hdev, sent, status);
else if (!status)
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
@@ -282,8 +282,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
- test_bit(HCI_CONFIG, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG))
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
@@ -309,7 +309,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_AUTH, &hdev->flags);
}
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_auth_enable_complete(hdev, status);
hci_dev_unlock(hdev);
@@ -404,7 +404,7 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
if (status == 0)
memcpy(hdev->dev_class, sent, 3);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_set_class_of_dev_complete(hdev, sent, status);
hci_dev_unlock(hdev);
@@ -497,13 +497,13 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
hdev->features[1][0] &= ~LMP_HOST_SSP;
}
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_ssp_enable_complete(hdev, sent->mode, status);
else if (!status) {
if (sent->mode)
- set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
else
- clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
}
hci_dev_unlock(hdev);
@@ -529,11 +529,11 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
hdev->features[1][0] &= ~LMP_HOST_SC;
}
- if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
+ if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
if (sent->support)
- set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_SC_ENABLED);
else
- clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
}
hci_dev_unlock(hdev);
@@ -548,8 +548,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
- test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG)) {
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
hdev->lmp_ver = rp->lmp_ver;
@@ -568,8 +568,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
- test_bit(HCI_CONFIG, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG))
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
}
@@ -691,7 +691,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
if (test_bit(HCI_INIT, &hdev->flags))
bacpy(&hdev->bdaddr, &rp->bdaddr);
- if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_SETUP))
bacpy(&hdev->setup_addr, &rp->bdaddr);
}
@@ -900,7 +900,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
if (rp->status)
@@ -926,7 +926,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
rp->status);
@@ -985,7 +985,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
rp->status);
@@ -1001,7 +1001,7 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
ACL_LINK, 0, rp->status);
@@ -1016,7 +1016,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
0, rp->status);
@@ -1032,7 +1032,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
ACL_LINK, 0, rp->status);
@@ -1045,11 +1045,6 @@ static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
- hci_dev_lock(hdev);
- mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
- rp->status);
- hci_dev_unlock(hdev);
}
static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
@@ -1058,15 +1053,8 @@ static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
- hci_dev_lock(hdev);
- mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
- rp->hash256, rp->rand256,
- rp->status);
- hci_dev_unlock(hdev);
}
-
static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -1109,7 +1097,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (*sent) {
struct hci_conn *conn;
- set_bit(HCI_LE_ADV, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_LE_ADV);
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
if (conn)
@@ -1117,7 +1105,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
&conn->le_conn_timeout,
conn->conn_timeout);
} else {
- clear_bit(HCI_LE_ADV, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
}
hci_dev_unlock(hdev);
@@ -1192,7 +1180,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
switch (cp->enable) {
case LE_SCAN_ENABLE:
- set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_LE_SCAN);
if (hdev->le_scan_type == LE_SCAN_ACTIVE)
clear_pending_adv_report(hdev);
break;
@@ -1217,7 +1205,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
*/
cancel_delayed_work(&hdev->le_scan_disable);
- clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LE_SCAN);
/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
* interrupted scanning due to a connect request. Mark
@@ -1226,10 +1214,9 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
* been disabled because of active scanning, so
* re-enable it again if necessary.
*/
- if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
- &hdev->dev_flags))
+ if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
+ else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
hdev->discovery.state == DISCOVERY_FINDING)
mgmt_reenable_advertising(hdev);
@@ -1388,11 +1375,11 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
if (sent->le) {
hdev->features[1][0] |= LMP_HOST_LE;
- set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_LE_ENABLED);
} else {
hdev->features[1][0] &= ~LMP_HOST_LE;
- clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
- clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING);
}
if (sent->simul)
@@ -1537,7 +1524,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
if (conn && conn->state == BT_CONNECT) {
if (status != 0x0c || conn->attempt > 2) {
conn->state = BT_CLOSED;
- hci_proto_connect_cfm(conn, status);
+ hci_connect_cfm(conn, status);
hci_conn_del(conn);
} else
conn->state = BT_CONNECT2;
@@ -1581,7 +1568,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
if (sco) {
sco->state = BT_CLOSED;
- hci_proto_connect_cfm(sco, status);
+ hci_connect_cfm(sco, status);
hci_conn_del(sco);
}
}
@@ -1608,7 +1595,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
if (conn->state == BT_CONFIG) {
- hci_proto_connect_cfm(conn, status);
+ hci_connect_cfm(conn, status);
hci_conn_drop(conn);
}
}
@@ -1635,7 +1622,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
if (conn->state == BT_CONFIG) {
- hci_proto_connect_cfm(conn, status);
+ hci_connect_cfm(conn, status);
hci_conn_drop(conn);
}
}
@@ -1769,7 +1756,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
if (!conn)
@@ -1811,7 +1798,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
if (conn->state == BT_CONFIG) {
- hci_proto_connect_cfm(conn, status);
+ hci_connect_cfm(conn, status);
hci_conn_drop(conn);
}
}
@@ -1838,7 +1825,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
if (conn->state == BT_CONFIG) {
- hci_proto_connect_cfm(conn, status);
+ hci_connect_cfm(conn, status);
hci_conn_drop(conn);
}
}
@@ -1873,7 +1860,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
if (sco) {
sco->state = BT_CLOSED;
- hci_proto_connect_cfm(sco, status);
+ hci_connect_cfm(sco, status);
hci_conn_del(sco);
}
}
@@ -2049,6 +2036,33 @@ unlock:
hci_dev_unlock(hdev);
}
+static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_le_read_remote_features *cp;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (!status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
+ if (conn) {
+ if (conn->state == BT_CONFIG) {
+ hci_connect_cfm(conn, status);
+ hci_conn_drop(conn);
+ }
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
{
struct hci_cp_le_start_enc *cp;
@@ -2118,7 +2132,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
wake_up_bit(&hdev->flags, HCI_INQUIRY);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
return;
hci_dev_lock(hdev);
@@ -2127,7 +2141,16 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
goto unlock;
if (list_empty(&discov->resolve)) {
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ /* When BR/EDR inquiry is active and no LE scanning is in
+ * progress, then change discovery state to indicate completion.
+ *
+ * When running LE scanning and BR/EDR inquiry simultaneously
+ * and the LE scan already finished, then change the discovery
+ * state to indicate completion.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+ !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
goto unlock;
}
@@ -2136,7 +2159,16 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
e->name_state = NAME_PENDING;
hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
} else {
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ /* When BR/EDR inquiry is active and no LE scanning is in
+ * progress, then change discovery state to indicate completion.
+ *
+ * When running LE scanning and BR/EDR inquiry simultaneously
+ * and the LE scan already finished, then change the discovery
+ * state to indicate completion.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+ !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
}
unlock:
@@ -2154,7 +2186,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (!num_rsp)
return;
- if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
return;
hci_dev_lock(hdev);
@@ -2255,10 +2287,10 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_sco_setup(conn, ev->status);
if (ev->status) {
- hci_proto_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, ev->status);
hci_conn_del(conn);
} else if (ev->link_type != ACL_LINK)
- hci_proto_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, ev->status);
unlock:
hci_dev_unlock(hdev);
@@ -2304,8 +2336,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
* connection. These features are only touched through mgmt so
* only do the checks if HCI_MGMT is set.
*/
- if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
- !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_MGMT) &&
+ !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
BDADDR_BREDR)) {
hci_reject_conn(hdev, &ev->bdaddr);
@@ -2366,7 +2398,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
&cp);
} else {
conn->state = BT_CONNECT2;
- hci_proto_connect_cfm(conn, 0);
+ hci_connect_cfm(conn, 0);
}
}
@@ -2444,7 +2476,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
type = conn->type;
- hci_proto_disconn_cfm(conn, ev->reason);
+ hci_disconn_cfm(conn, ev->reason);
hci_conn_del(conn);
/* Re-enable advertising if necessary, since it might
@@ -2501,7 +2533,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
&cp);
} else {
conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, ev->status);
hci_conn_drop(conn);
}
} else {
@@ -2542,7 +2574,7 @@ static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto check_auth;
if (ev->status == 0)
@@ -2608,7 +2640,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
* whenever the encryption procedure fails.
*/
if (ev->status && conn->type == LE_LINK)
- set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
@@ -2626,15 +2658,15 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
* connections that are not encrypted with AES-CCM
* using a P-256 authenticated combination key.
*/
- if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
(!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
- hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_drop(conn);
goto unlock;
}
- hci_proto_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, ev->status);
hci_conn_drop(conn);
} else
hci_encrypt_cfm(conn, ev->status, ev->encrypt);
@@ -2707,7 +2739,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, ev->status);
hci_conn_drop(conn);
}
@@ -2715,17 +2747,19 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
+ u16 *opcode, u8 *status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb)
{
struct hci_ev_cmd_complete *ev = (void *) skb->data;
- u8 status = skb->data[sizeof(*ev)];
- __u16 opcode;
- skb_pull(skb, sizeof(*ev));
+ *opcode = __le16_to_cpu(ev->opcode);
+ *status = skb->data[sizeof(*ev)];
- opcode = __le16_to_cpu(ev->opcode);
+ skb_pull(skb, sizeof(*ev));
- switch (opcode) {
+ switch (*opcode) {
case HCI_OP_INQUIRY_CANCEL:
hci_cc_inquiry_cancel(hdev, skb);
break;
@@ -3003,32 +3037,36 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
break;
default:
- BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
break;
}
- if (opcode != HCI_OP_NOP)
+ if (*opcode != HCI_OP_NOP)
cancel_delayed_work(&hdev->cmd_timer);
- hci_req_cmd_complete(hdev, opcode, status);
-
- if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
+ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
atomic_set(&hdev->cmd_cnt, 1);
- if (!skb_queue_empty(&hdev->cmd_q))
- queue_work(hdev->workqueue, &hdev->cmd_work);
- }
+
+ hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
+ req_complete_skb);
+
+ if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
-static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
+ u16 *opcode, u8 *status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb)
{
struct hci_ev_cmd_status *ev = (void *) skb->data;
- __u16 opcode;
skb_pull(skb, sizeof(*ev));
- opcode = __le16_to_cpu(ev->opcode);
+ *opcode = __le16_to_cpu(ev->opcode);
+ *status = ev->status;
- switch (opcode) {
+ switch (*opcode) {
case HCI_OP_INQUIRY:
hci_cs_inquiry(hdev, ev->status);
break;
@@ -3093,27 +3131,38 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cs_le_create_conn(hdev, ev->status);
break;
+ case HCI_OP_LE_READ_REMOTE_FEATURES:
+ hci_cs_le_read_remote_features(hdev, ev->status);
+ break;
+
case HCI_OP_LE_START_ENC:
hci_cs_le_start_enc(hdev, ev->status);
break;
default:
- BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
break;
}
- if (opcode != HCI_OP_NOP)
+ if (*opcode != HCI_OP_NOP)
cancel_delayed_work(&hdev->cmd_timer);
+ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
+ atomic_set(&hdev->cmd_cnt, 1);
+
+ /* Indicate request completion if the command failed. Also, if
+ * we're not waiting for a special event and we get a success
+ * command status we should try to flag the request as completed
+ * (since for this kind of commands there will not be a command
+ * complete event).
+ */
if (ev->status ||
(hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
- hci_req_cmd_complete(hdev, opcode, ev->status);
+ hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
+ req_complete_skb);
- if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
- atomic_set(&hdev->cmd_cnt, 1);
- if (!skb_queue_empty(&hdev->cmd_q))
- queue_work(hdev->workqueue, &hdev->cmd_work);
- }
+ if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3331,11 +3380,11 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_drop(conn);
}
- if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
+ if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
!test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
- } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
+ } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
u8 secure;
if (conn->pending_sec_level == BT_SECURITY_HIGH)
@@ -3391,7 +3440,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s", hdev->name);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
return;
hci_dev_lock(hdev);
@@ -3465,7 +3514,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
conn_set_key(conn, ev->key_type, conn->pin_length);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto unlock;
key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
@@ -3487,7 +3536,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
* store_hint being 0).
*/
if (key->type == HCI_LK_DEBUG_COMBINATION &&
- !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
+ !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
list_del_rcu(&key->list);
kfree_rcu(key, rcu);
goto unlock;
@@ -3570,7 +3619,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
if (!num_rsp)
return;
- if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
return;
hci_dev_lock(hdev);
@@ -3679,7 +3728,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev,
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, ev->status);
hci_conn_drop(conn);
}
@@ -3738,7 +3787,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
break;
}
- hci_proto_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, ev->status);
if (ev->status)
hci_conn_del(conn);
@@ -3776,7 +3825,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
if (!num_rsp)
return;
- if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
return;
hci_dev_lock(hdev);
@@ -3794,7 +3843,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
data.rssi = info->rssi;
data.ssp_mode = 0x01;
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
name_known = eir_has_data_type(info->data,
sizeof(info->data),
EIR_NAME_COMPLETE);
@@ -3849,7 +3898,7 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
if (!ev->status)
conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, ev->status);
hci_conn_drop(conn);
} else {
hci_auth_cfm(conn, ev->status);
@@ -3890,41 +3939,37 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
if (!data)
return 0x00;
- if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) {
- if (bredr_sc_enabled(hdev)) {
- /* When Secure Connections is enabled, then just
- * return the present value stored with the OOB
- * data. The stored value contains the right present
- * information. However it can only be trusted when
- * not in Secure Connection Only mode.
- */
- if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags))
- return data->present;
-
- /* When Secure Connections Only mode is enabled, then
- * the P-256 values are required. If they are not
- * available, then do not declare that OOB data is
- * present.
- */
- if (!memcmp(data->rand256, ZERO_KEY, 16) ||
- !memcmp(data->hash256, ZERO_KEY, 16))
- return 0x00;
-
- return 0x02;
- }
+ if (bredr_sc_enabled(hdev)) {
+ /* When Secure Connections is enabled, then just
+ * return the present value stored with the OOB
+ * data. The stored value contains the right present
+ * information. However it can only be trusted when
+ * not in Secure Connection Only mode.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
+ return data->present;
- /* When Secure Connections is not enabled or actually
- * not supported by the hardware, then check that if
- * P-192 data values are present.
+ /* When Secure Connections Only mode is enabled, then
+ * the P-256 values are required. If they are not
+ * available, then do not declare that OOB data is
+ * present.
*/
- if (!memcmp(data->rand192, ZERO_KEY, 16) ||
- !memcmp(data->hash192, ZERO_KEY, 16))
+ if (!memcmp(data->rand256, ZERO_KEY, 16) ||
+ !memcmp(data->hash256, ZERO_KEY, 16))
return 0x00;
- return 0x01;
+ return 0x02;
}
- return 0x00;
+ /* When Secure Connections is not enabled or actually
+ * not supported by the hardware, then check that if
+ * P-192 data values are present.
+ */
+ if (!memcmp(data->rand192, ZERO_KEY, 16) ||
+ !memcmp(data->hash192, ZERO_KEY, 16))
+ return 0x00;
+
+ return 0x01;
}
static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3942,13 +3987,13 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_hold(conn);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto unlock;
/* Allow pairing if we're pairable, the initiators of the
* pairing or if the remote is not requesting bonding.
*/
- if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
+ if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
struct hci_cp_io_capability_reply cp;
@@ -3974,7 +4019,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* If we're not bondable, force one of the non-bondable
* authentication requirement values.
*/
- if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
conn->auth_type &= HCI_AT_NO_BONDING_MITM;
cp.authentication = conn->auth_type;
@@ -4011,8 +4056,6 @@ static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->remote_cap = ev->capability;
conn->remote_auth = ev->authentication;
- if (ev->oob_data)
- set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
unlock:
hci_dev_unlock(hdev);
@@ -4029,7 +4072,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
hci_dev_lock(hdev);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -4100,7 +4143,7 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev,
BT_DBG("%s", hdev->name);
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
}
@@ -4119,7 +4162,7 @@ static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
conn->passkey_notify = __le32_to_cpu(ev->passkey);
conn->passkey_entered = 0;
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
conn->dst_type, conn->passkey_notify,
conn->passkey_entered);
@@ -4157,7 +4200,7 @@ static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
conn->dst_type, conn->passkey_notify,
conn->passkey_entered);
@@ -4226,7 +4269,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
hci_dev_lock(hdev);
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto unlock;
data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
@@ -4243,7 +4286,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
struct hci_cp_remote_oob_ext_data_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
memset(cp.hash192, 0, sizeof(cp.hash192));
memset(cp.rand192, 0, sizeof(cp.rand192));
} else {
@@ -4409,7 +4452,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* All controllers implicitly stop advertising in the event of a
* connection, so ensure that the state bit is cleared.
*/
- clear_bit(HCI_LE_ADV, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
if (!conn) {
@@ -4432,7 +4475,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (conn->out) {
conn->resp_addr_type = ev->bdaddr_type;
bacpy(&conn->resp_addr, &ev->bdaddr);
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
conn->init_addr_type = ADDR_LE_DEV_RANDOM;
bacpy(&conn->init_addr, &hdev->rpa);
} else {
@@ -4503,7 +4546,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
- conn->state = BT_CONNECTED;
+ conn->state = BT_CONFIG;
conn->le_conn_interval = le16_to_cpu(ev->interval);
conn->le_conn_latency = le16_to_cpu(ev->latency);
@@ -4512,7 +4555,33 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn);
- hci_proto_connect_cfm(conn, ev->status);
+ if (!ev->status) {
+ /* The remote features procedure is defined for master
+ * role only. So only in case of an initiated connection
+ * request the remote features.
+ *
+ * If the local controller supports slave-initiated features
+ * exchange, then requesting the remote features in slave
+ * role is possible. Otherwise just transition into the
+ * connected state without requesting the remote features.
+ */
+ if (conn->out ||
+ (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
+ struct hci_cp_le_read_remote_features cp;
+
+ cp.handle = __cpu_to_le16(conn->handle);
+
+ hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
+ sizeof(cp), &cp);
+
+ hci_conn_hold(conn);
+ } else {
+ conn->state = BT_CONNECTED;
+ hci_connect_cfm(conn, ev->status);
+ }
+ } else {
+ hci_connect_cfm(conn, ev->status);
+ }
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
conn->dst_type);
@@ -4658,7 +4727,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
/* If the controller is not using resolvable random
* addresses, then this report can be ignored.
*/
- if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
return;
/* If the local IRK of the controller does not match
@@ -4814,6 +4883,48 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
+static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+ if (conn) {
+ if (!ev->status)
+ memcpy(conn->features[0], ev->features, 8);
+
+ if (conn->state == BT_CONFIG) {
+ __u8 status;
+
+ /* If the local controller supports slave-initiated
+ * features exchange, but the remote controller does
+ * not, then it is possible that the error code 0x1a
+ * for unsupported remote feature gets returned.
+ *
+ * In this specific case, allow the connection to
+ * transition into connected state and mark it as
+ * successful.
+ */
+ if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
+ !conn->out && ev->status == 0x1a)
+ status = 0x00;
+ else
+ status = ev->status;
+
+ conn->state = BT_CONNECTED;
+ hci_connect_cfm(conn, status);
+ hci_conn_drop(conn);
+ }
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_ltk_req *ev = (void *) skb->data;
@@ -4987,6 +5098,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_le_adv_report_evt(hdev, skb);
break;
+ case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
+ hci_le_remote_feat_complete_evt(hdev, skb);
+ break;
+
case HCI_EV_LE_LTK_REQ:
hci_le_ltk_request_evt(hdev, skb);
break;
@@ -5020,32 +5135,79 @@ static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
amp_read_loc_assoc_final_data(hdev, hcon);
}
-void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
+ u8 event, struct sk_buff *skb)
{
- struct hci_event_hdr *hdr = (void *) skb->data;
- __u8 event = hdr->evt;
+ struct hci_ev_cmd_complete *ev;
+ struct hci_event_hdr *hdr;
- hci_dev_lock(hdev);
+ if (!skb)
+ return false;
- /* Received events are (currently) only needed when a request is
- * ongoing so avoid unnecessary memory allocation.
- */
- if (hci_req_pending(hdev)) {
- kfree_skb(hdev->recv_evt);
- hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
+ if (skb->len < sizeof(*hdr)) {
+ BT_ERR("Too short HCI event");
+ return false;
}
- hci_dev_unlock(hdev);
-
+ hdr = (void *) skb->data;
skb_pull(skb, HCI_EVENT_HDR_SIZE);
+ if (event) {
+ if (hdr->evt != event)
+ return false;
+ return true;
+ }
+
+ if (hdr->evt != HCI_EV_CMD_COMPLETE) {
+ BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
+ return false;
+ }
+
+ if (skb->len < sizeof(*ev)) {
+ BT_ERR("Too short cmd_complete event");
+ return false;
+ }
+
+ ev = (void *) skb->data;
+ skb_pull(skb, sizeof(*ev));
+
+ if (opcode != __le16_to_cpu(ev->opcode)) {
+ BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
+ __le16_to_cpu(ev->opcode));
+ return false;
+ }
+
+ return true;
+}
+
+void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *) skb->data;
+ hci_req_complete_t req_complete = NULL;
+ hci_req_complete_skb_t req_complete_skb = NULL;
+ struct sk_buff *orig_skb = NULL;
+ u8 status = 0, event = hdr->evt, req_evt = 0;
+ u16 opcode = HCI_OP_NOP;
+
if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
- u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
-
- hci_req_cmd_complete(hdev, opcode, 0);
+ opcode = __le16_to_cpu(cmd_hdr->opcode);
+ hci_req_cmd_complete(hdev, opcode, status, &req_complete,
+ &req_complete_skb);
+ req_evt = event;
}
+ /* If it looks like we might end up having to call
+ * req_complete_skb, store a pristine copy of the skb since the
+ * various handlers may modify the original one through
+ * skb_pull() calls, etc.
+ */
+ if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
+ event == HCI_EV_CMD_COMPLETE)
+ orig_skb = skb_clone(skb, GFP_KERNEL);
+
+ skb_pull(skb, HCI_EVENT_HDR_SIZE);
+
switch (event) {
case HCI_EV_INQUIRY_COMPLETE:
hci_inquiry_complete_evt(hdev, skb);
@@ -5088,11 +5250,13 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
break;
case HCI_EV_CMD_COMPLETE:
- hci_cmd_complete_evt(hdev, skb);
+ hci_cmd_complete_evt(hdev, skb, &opcode, &status,
+ &req_complete, &req_complete_skb);
break;
case HCI_EV_CMD_STATUS:
- hci_cmd_status_evt(hdev, skb);
+ hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
+ &req_complete_skb);
break;
case HCI_EV_HARDWARE_ERROR:
@@ -5224,6 +5388,17 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
+ if (req_complete) {
+ req_complete(hdev, status, opcode);
+ } else if (req_complete_skb) {
+ if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
+ kfree_skb(orig_skb);
+ orig_skb = NULL;
+ }
+ req_complete_skb(hdev, status, opcode, orig_skb);
+ }
+
+ kfree_skb(orig_skb);
kfree_skb(skb);
hdev->stat.evt_rx++;
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index b59f92c6df0c..d6025d6e6d59 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -34,7 +34,8 @@ void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
req->err = 0;
}
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+static int req_run(struct hci_request *req, hci_req_complete_t complete,
+ hci_req_complete_skb_t complete_skb)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
@@ -56,6 +57,7 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
skb = skb_peek_tail(&req->cmd_q);
bt_cb(skb)->req.complete = complete;
+ bt_cb(skb)->req.complete_skb = complete_skb;
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
@@ -66,6 +68,16 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
return 0;
}
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+{
+ return req_run(req, complete, NULL);
+}
+
+int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
+{
+ return req_run(req, NULL, complete);
+}
+
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param)
{
@@ -270,7 +282,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
* and 0x01 (whitelist enabled) use the new filter policies
* 0x02 (no whitelist) and 0x03 (whitelist enabled).
*/
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
filter_policy |= 0x02;
@@ -304,10 +316,10 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
* In this kind of scenario skip the update and let the random
* address be updated at the next cycle.
*/
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
BT_DBG("Deferring random address update");
- set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
return;
}
@@ -324,12 +336,12 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
* current RPA has expired or there is something else than
* the current RPA in use, then generate a new one.
*/
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
int to;
*own_addr_type = ADDR_LE_DEV_RANDOM;
- if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
+ if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
!bacmp(&hdev->random_addr, &hdev->rpa))
return 0;
@@ -383,9 +395,9 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
* and a static address has been configured, then use that
* address instead of the public BR/EDR address.
*/
- if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
+ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
- (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+ (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
bacmp(&hdev->static_addr, BDADDR_ANY))) {
*own_addr_type = ADDR_LE_DEV_RANDOM;
if (bacmp(&hdev->static_addr, &hdev->random_addr))
@@ -425,7 +437,7 @@ void __hci_update_page_scan(struct hci_request *req)
struct hci_dev *hdev = req->hdev;
u8 scan;
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return;
if (!hdev_is_powered(hdev))
@@ -434,7 +446,7 @@ void __hci_update_page_scan(struct hci_request *req)
if (mgmt_powering_down(hdev))
return;
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
+ if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
disconnected_whitelist_entries(hdev))
scan = SCAN_PAGE;
else
@@ -443,7 +455,7 @@ void __hci_update_page_scan(struct hci_request *req)
if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
return;
- if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
scan |= SCAN_INQUIRY;
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
@@ -471,14 +483,14 @@ void __hci_update_background_scan(struct hci_request *req)
if (!test_bit(HCI_UP, &hdev->flags) ||
test_bit(HCI_INIT, &hdev->flags) ||
- test_bit(HCI_SETUP, &hdev->dev_flags) ||
- test_bit(HCI_CONFIG, &hdev->dev_flags) ||
- test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
- test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+ hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG) ||
+ hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
+ hci_dev_test_flag(hdev, HCI_UNREGISTER))
return;
/* No point in doing scanning if LE support hasn't been enabled */
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
/* If discovery is active don't interfere with it */
@@ -502,7 +514,7 @@ void __hci_update_background_scan(struct hci_request *req)
*/
/* If controller is not scanning we are done. */
- if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return;
hci_req_add_le_scan_disable(req);
@@ -524,7 +536,7 @@ void __hci_update_background_scan(struct hci_request *req)
/* If controller is currently scanning, we stop it to ensure we
* don't miss any advertising (due to duplicates filter).
*/
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req);
hci_req_add_le_passive_scan(req);
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index adf074d33544..bf6df92f42db 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -32,11 +32,14 @@ struct hci_request {
void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
+int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
const void *param);
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
const void *param, u8 event);
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb);
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 1d65c5be7c82..56f9edbf3d05 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -30,6 +30,12 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci_mon.h>
+#include <net/bluetooth/mgmt.h>
+
+#include "mgmt_util.h"
+
+static LIST_HEAD(mgmt_chan_list);
+static DEFINE_MUTEX(mgmt_chan_list_lock);
static atomic_t monitor_promisc = ATOMIC_INIT(0);
@@ -44,11 +50,32 @@ struct hci_pinfo {
struct hci_filter filter;
__u32 cmsg_mask;
unsigned short channel;
+ unsigned long flags;
};
-static inline int hci_test_bit(int nr, void *addr)
+void hci_sock_set_flag(struct sock *sk, int nr)
+{
+ set_bit(nr, &hci_pi(sk)->flags);
+}
+
+void hci_sock_clear_flag(struct sock *sk, int nr)
{
- return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
+ clear_bit(nr, &hci_pi(sk)->flags);
+}
+
+int hci_sock_test_flag(struct sock *sk, int nr)
+{
+ return test_bit(nr, &hci_pi(sk)->flags);
+}
+
+unsigned short hci_sock_get_channel(struct sock *sk)
+{
+ return hci_pi(sk)->channel;
+}
+
+static inline int hci_test_bit(int nr, const void *addr)
+{
+ return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
}
/* Security filter */
@@ -183,54 +210,31 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(skb_copy);
}
-/* Send frame to control socket */
-void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
+/* Send frame to sockets with specific channel */
+void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+ int flag, struct sock *skip_sk)
{
struct sock *sk;
- BT_DBG("len %d", skb->len);
+ BT_DBG("channel %u len %d", channel, skb->len);
read_lock(&hci_sk_list.lock);
sk_for_each(sk, &hci_sk_list.head) {
struct sk_buff *nskb;
- /* Skip the original socket */
- if (sk == skip_sk)
- continue;
-
- if (sk->sk_state != BT_BOUND)
- continue;
-
- if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
+ /* Ignore socket without the flag set */
+ if (!hci_sock_test_flag(sk, flag))
continue;
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
+ /* Skip the original socket */
+ if (sk == skip_sk)
continue;
- if (sock_queue_rcv_skb(sk, nskb))
- kfree_skb(nskb);
- }
-
- read_unlock(&hci_sk_list.lock);
-}
-
-static void queue_monitor_skb(struct sk_buff *skb)
-{
- struct sock *sk;
-
- BT_DBG("len %d", skb->len);
-
- read_lock(&hci_sk_list.lock);
-
- sk_for_each(sk, &hci_sk_list.head) {
- struct sk_buff *nskb;
-
if (sk->sk_state != BT_BOUND)
continue;
- if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+ if (hci_pi(sk)->channel != channel)
continue;
nskb = skb_clone(skb, GFP_ATOMIC);
@@ -290,7 +294,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
hdr->index = cpu_to_le16(hdev->id);
hdr->len = cpu_to_le16(skb->len);
- queue_monitor_skb(skb_copy);
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
+ HCI_SOCK_TRUSTED, NULL);
kfree_skb(skb_copy);
}
@@ -397,7 +402,8 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
skb = create_monitor_event(hdev, event);
if (skb) {
- queue_monitor_skb(skb);
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
kfree_skb(skb);
}
}
@@ -428,6 +434,56 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
}
}
+static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
+{
+ struct hci_mgmt_chan *c;
+
+ list_for_each_entry(c, &mgmt_chan_list, list) {
+ if (c->channel == channel)
+ return c;
+ }
+
+ return NULL;
+}
+
+static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
+{
+ struct hci_mgmt_chan *c;
+
+ mutex_lock(&mgmt_chan_list_lock);
+ c = __hci_mgmt_chan_find(channel);
+ mutex_unlock(&mgmt_chan_list_lock);
+
+ return c;
+}
+
+int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
+{
+ if (c->channel < HCI_CHANNEL_CONTROL)
+ return -EINVAL;
+
+ mutex_lock(&mgmt_chan_list_lock);
+ if (__hci_mgmt_chan_find(c->channel)) {
+ mutex_unlock(&mgmt_chan_list_lock);
+ return -EALREADY;
+ }
+
+ list_add_tail(&c->list, &mgmt_chan_list);
+
+ mutex_unlock(&mgmt_chan_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hci_mgmt_chan_register);
+
+void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
+{
+ mutex_lock(&mgmt_chan_list_lock);
+ list_del(&c->list);
+ mutex_unlock(&mgmt_chan_list_lock);
+}
+EXPORT_SYMBOL(hci_mgmt_chan_unregister);
+
static int hci_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -448,7 +504,7 @@ static int hci_sock_release(struct socket *sock)
if (hdev) {
if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
mgmt_index_added(hdev);
- clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
hci_dev_close(hdev->id);
}
@@ -508,10 +564,10 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
if (!hdev)
return -EBADFD;
- if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY;
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
return -EOPNOTSUPP;
if (hdev->dev_type != HCI_BREDR)
@@ -687,14 +743,14 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
if (test_bit(HCI_UP, &hdev->flags) ||
test_bit(HCI_INIT, &hdev->flags) ||
- test_bit(HCI_SETUP, &hdev->dev_flags) ||
- test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+ hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG)) {
err = -EBUSY;
hci_dev_put(hdev);
goto done;
}
- if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
err = -EUSERS;
hci_dev_put(hdev);
goto done;
@@ -704,7 +760,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
err = hci_dev_open(hdev->id);
if (err) {
- clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
mgmt_index_added(hdev);
hci_dev_put(hdev);
goto done;
@@ -715,38 +771,62 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
hci_pi(sk)->hdev = hdev;
break;
- case HCI_CHANNEL_CONTROL:
+ case HCI_CHANNEL_MONITOR:
if (haddr.hci_dev != HCI_DEV_NONE) {
err = -EINVAL;
goto done;
}
- if (!capable(CAP_NET_ADMIN)) {
+ if (!capable(CAP_NET_RAW)) {
err = -EPERM;
goto done;
}
+ /* The monitor interface is restricted to CAP_NET_RAW
+ * capabilities and with that implicitly trusted.
+ */
+ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+
+ send_monitor_replay(sk);
+
+ atomic_inc(&monitor_promisc);
break;
- case HCI_CHANNEL_MONITOR:
- if (haddr.hci_dev != HCI_DEV_NONE) {
+ default:
+ if (!hci_mgmt_chan_find(haddr.hci_channel)) {
err = -EINVAL;
goto done;
}
- if (!capable(CAP_NET_RAW)) {
- err = -EPERM;
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ err = -EINVAL;
goto done;
}
- send_monitor_replay(sk);
-
- atomic_inc(&monitor_promisc);
+ /* Users with CAP_NET_ADMIN capabilities are allowed
+ * access to all management commands and events. For
+ * untrusted users the interface is restricted and
+ * also only untrusted events are sent.
+ */
+ if (capable(CAP_NET_ADMIN))
+ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+
+ /* At the moment the index and unconfigured index events
+ * are enabled unconditionally. Setting them on each
+ * socket when binding keeps this functionality. They
+ * however might be cleared later and then sending of these
+ * events will be disabled, but that is then intentional.
+ *
+ * This also enables generic events that are safe to be
+ * received by untrusted users. Example for such events
+ * are changes to settings, class of device, name etc.
+ */
+ if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
+ hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
+ }
break;
-
- default:
- err = -EINVAL;
- goto done;
}
@@ -826,8 +906,8 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
}
}
-static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
@@ -860,10 +940,13 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
hci_sock_cmsg(sk, msg, skb);
break;
case HCI_CHANNEL_USER:
- case HCI_CHANNEL_CONTROL:
case HCI_CHANNEL_MONITOR:
sock_recv_timestamp(msg, sk, skb);
break;
+ default:
+ if (hci_mgmt_chan_find(hci_pi(sk)->channel))
+ sock_recv_timestamp(msg, sk, skb);
+ break;
}
skb_free_datagram(sk, skb);
@@ -871,10 +954,122 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err ? : copied;
}
-static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
+ struct msghdr *msg, size_t msglen)
+{
+ void *buf;
+ u8 *cp;
+ struct mgmt_hdr *hdr;
+ u16 opcode, index, len;
+ struct hci_dev *hdev = NULL;
+ const struct hci_mgmt_handler *handler;
+ bool var_len, no_hdev;
+ int err;
+
+ BT_DBG("got %zu bytes", msglen);
+
+ if (msglen < sizeof(*hdr))
+ return -EINVAL;
+
+ buf = kmalloc(msglen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (memcpy_from_msg(buf, msg, msglen)) {
+ err = -EFAULT;
+ goto done;
+ }
+
+ hdr = buf;
+ opcode = __le16_to_cpu(hdr->opcode);
+ index = __le16_to_cpu(hdr->index);
+ len = __le16_to_cpu(hdr->len);
+
+ if (len != msglen - sizeof(*hdr)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (opcode >= chan->handler_count ||
+ chan->handlers[opcode].func == NULL) {
+ BT_DBG("Unknown op %u", opcode);
+ err = mgmt_cmd_status(sk, index, opcode,
+ MGMT_STATUS_UNKNOWN_COMMAND);
+ goto done;
+ }
+
+ handler = &chan->handlers[opcode];
+
+ if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
+ !(handler->flags & HCI_MGMT_UNTRUSTED)) {
+ err = mgmt_cmd_status(sk, index, opcode,
+ MGMT_STATUS_PERMISSION_DENIED);
+ goto done;
+ }
+
+ if (index != MGMT_INDEX_NONE) {
+ hdev = hci_dev_get(index);
+ if (!hdev) {
+ err = mgmt_cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG) ||
+ hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ err = mgmt_cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
+ err = mgmt_cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
+ }
+
+ no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
+ if (no_hdev != !hdev) {
+ err = mgmt_cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
+
+ var_len = (handler->flags & HCI_MGMT_VAR_LEN);
+ if ((var_len && len < handler->data_len) ||
+ (!var_len && len != handler->data_len)) {
+ err = mgmt_cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto done;
+ }
+
+ if (hdev && chan->hdev_init)
+ chan->hdev_init(sk, hdev);
+
+ cp = buf + sizeof(*hdr);
+
+ err = handler->func(sk, hdev, cp, len);
+ if (err < 0)
+ goto done;
+
+ err = msglen;
+
+done:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ kfree(buf);
+ return err;
+}
+
+static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
+ struct hci_mgmt_chan *chan;
struct hci_dev *hdev;
struct sk_buff *skb;
int err;
@@ -896,14 +1091,18 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
case HCI_CHANNEL_RAW:
case HCI_CHANNEL_USER:
break;
- case HCI_CHANNEL_CONTROL:
- err = mgmt_control(sk, msg, len);
- goto done;
case HCI_CHANNEL_MONITOR:
err = -EOPNOTSUPP;
goto done;
default:
- err = -EINVAL;
+ mutex_lock(&mgmt_chan_list_lock);
+ chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
+ if (chan)
+ err = hci_mgmt_cmd(chan, sk, msg, len);
+ else
+ err = -EINVAL;
+
+ mutex_unlock(&mgmt_chan_list_lock);
goto done;
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 07348e142f16..9070dfd6b4ad 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -70,10 +70,11 @@ static void hidp_session_terminate(struct hidp_session *s);
static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
{
+ u32 valid_flags = 0;
memset(ci, 0, sizeof(*ci));
bacpy(&ci->bdaddr, &session->bdaddr);
- ci->flags = session->flags;
+ ci->flags = session->flags & valid_flags;
ci->state = BT_CONNECTED;
if (session->input) {
@@ -907,7 +908,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
kref_init(&session->ref);
atomic_set(&session->state, HIDP_SESSION_IDLING);
init_waitqueue_head(&session->state_queue);
- session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
+ session->flags = req->flags & BIT(HIDP_BLUETOOTH_VENDOR_ID);
/* connection management */
bacpy(&session->bdaddr, bdaddr);
@@ -1312,6 +1313,8 @@ int hidp_connection_add(struct hidp_connadd_req *req,
struct socket *ctrl_sock,
struct socket *intr_sock)
{
+ u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG) |
+ BIT(HIDP_BOOT_PROTOCOL_MODE);
struct hidp_session *session;
struct l2cap_conn *conn;
struct l2cap_chan *chan;
@@ -1321,6 +1324,9 @@ int hidp_connection_add(struct hidp_connadd_req *req,
if (ret)
return ret;
+ if (req->flags & ~valid_flags)
+ return -EINVAL;
+
chan = l2cap_pi(ctrl_sock->sk)->chan;
conn = NULL;
l2cap_chan_lock(chan);
@@ -1351,13 +1357,17 @@ out_conn:
int hidp_connection_del(struct hidp_conndel_req *req)
{
+ u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG);
struct hidp_session *session;
+ if (req->flags & ~valid_flags)
+ return -EINVAL;
+
session = hidp_session_find(&req->bdaddr);
if (!session)
return -ENOENT;
- if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG))
+ if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG))
hidp_send_ctrl_message(session,
HIDP_TRANS_HID_CONTROL |
HIDP_CTRL_VIRTUAL_CABLE_UNPLUG,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 6ba33f9631e8..dad419782a12 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -292,7 +292,7 @@ static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
struct sk_buff *skb;
skb_queue_walk(head, skb) {
- if (bt_cb(skb)->control.txseq == seq)
+ if (bt_cb(skb)->l2cap.txseq == seq)
return skb;
}
@@ -954,11 +954,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
{
if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
__unpack_extended_control(get_unaligned_le32(skb->data),
- &bt_cb(skb)->control);
+ &bt_cb(skb)->l2cap);
skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
} else {
__unpack_enhanced_control(get_unaligned_le16(skb->data),
- &bt_cb(skb)->control);
+ &bt_cb(skb)->l2cap);
skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
}
}
@@ -1200,8 +1200,8 @@ static void l2cap_move_setup(struct l2cap_chan *chan)
chan->retry_count = 0;
skb_queue_walk(&chan->tx_q, skb) {
- if (bt_cb(skb)->control.retries)
- bt_cb(skb)->control.retries = 1;
+ if (bt_cb(skb)->l2cap.retries)
+ bt_cb(skb)->l2cap.retries = 1;
else
break;
}
@@ -1244,6 +1244,13 @@ static void l2cap_move_done(struct l2cap_chan *chan)
static void l2cap_chan_ready(struct l2cap_chan *chan)
{
+ /* The channel may have already been flagged as connected in
+ * case of receiving data before the L2CAP info req/rsp
+ * procedure is complete.
+ */
+ if (chan->state == BT_CONNECTED)
+ return;
+
/* This clears all conf flags, including CONF_NOT_COMPLETE */
chan->conf_state = 0;
__clear_chan_timer(chan);
@@ -1839,8 +1846,8 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
skb = skb_dequeue(&chan->tx_q);
- bt_cb(skb)->control.retries = 1;
- control = &bt_cb(skb)->control;
+ bt_cb(skb)->l2cap.retries = 1;
+ control = &bt_cb(skb)->l2cap;
control->reqseq = 0;
control->txseq = chan->next_tx_seq;
@@ -1884,8 +1891,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
skb = chan->tx_send_head;
- bt_cb(skb)->control.retries = 1;
- control = &bt_cb(skb)->control;
+ bt_cb(skb)->l2cap.retries = 1;
+ control = &bt_cb(skb)->l2cap;
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
control->final = 1;
@@ -1956,11 +1963,11 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
continue;
}
- bt_cb(skb)->control.retries++;
- control = bt_cb(skb)->control;
+ bt_cb(skb)->l2cap.retries++;
+ control = bt_cb(skb)->l2cap;
if (chan->max_tx != 0 &&
- bt_cb(skb)->control.retries > chan->max_tx) {
+ bt_cb(skb)->l2cap.retries > chan->max_tx) {
BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
l2cap_send_disconn_req(chan, ECONNRESET);
l2cap_seq_list_clear(&chan->retrans_list);
@@ -2038,7 +2045,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
if (chan->unacked_frames) {
skb_queue_walk(&chan->tx_q, skb) {
- if (bt_cb(skb)->control.txseq == control->reqseq ||
+ if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
skb == chan->tx_send_head)
break;
}
@@ -2048,7 +2055,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
break;
l2cap_seq_list_append(&chan->retrans_list,
- bt_cb(skb)->control.txseq);
+ bt_cb(skb)->l2cap.txseq);
}
l2cap_ertm_resend(chan);
@@ -2260,8 +2267,8 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
return ERR_PTR(err);
}
- bt_cb(skb)->control.fcs = chan->fcs;
- bt_cb(skb)->control.retries = 0;
+ bt_cb(skb)->l2cap.fcs = chan->fcs;
+ bt_cb(skb)->l2cap.retries = 0;
return skb;
}
@@ -2314,7 +2321,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
return PTR_ERR(skb);
}
- bt_cb(skb)->control.sar = sar;
+ bt_cb(skb)->l2cap.sar = sar;
__skb_queue_tail(seg_queue, skb);
len -= pdu_len;
@@ -2849,7 +2856,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
continue;
/* Don't send frame to the channel it came from */
- if (bt_cb(skb)->chan == chan)
+ if (bt_cb(skb)->l2cap.chan == chan)
continue;
nskb = skb_clone(skb, GFP_KERNEL);
@@ -3893,7 +3900,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
return -EPROTO;
hci_dev_lock(hdev);
- if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_MGMT) &&
!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
mgmt_device_connected(hdev, hcon, 0, NULL, 0);
hci_dev_unlock(hdev);
@@ -5911,7 +5918,7 @@ static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
skb_unlink(skb, &chan->srej_q);
chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
- err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
+ err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
if (err)
break;
}
@@ -5945,7 +5952,7 @@ static void l2cap_handle_srej(struct l2cap_chan *chan,
return;
}
- if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
+ if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
l2cap_send_disconn_req(chan, ECONNRESET);
return;
@@ -5998,7 +6005,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan,
skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
if (chan->max_tx && skb &&
- bt_cb(skb)->control.retries >= chan->max_tx) {
+ bt_cb(skb)->l2cap.retries >= chan->max_tx) {
BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
l2cap_send_disconn_req(chan, ECONNRESET);
return;
@@ -6558,7 +6565,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
- struct l2cap_ctrl *control = &bt_cb(skb)->control;
+ struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
u16 len;
u8 event;
@@ -6785,6 +6792,13 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
BT_DBG("chan %p, len %d", chan, skb->len);
+ /* If we receive data on a fixed channel before the info req/rsp
+ * procdure is done simply assume that the channel is supported
+ * and mark it as ready.
+ */
+ if (chan->chan_type == L2CAP_CHAN_FIXED)
+ l2cap_chan_ready(chan);
+
if (chan->state != BT_CONNECTED)
goto drop;
@@ -6850,8 +6864,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
goto drop;
/* Store remote BD_ADDR and PSM for msg_name */
- bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
- bt_cb(skb)->psm = psm;
+ bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
+ bt_cb(skb)->l2cap.psm = psm;
if (!chan->ops->recv(chan, skb)) {
l2cap_chan_put(chan);
@@ -6973,12 +6987,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
if (hcon->type == ACL_LINK &&
- test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
+ hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
conn->local_fixed_chan |= L2CAP_FC_A2MP;
- if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
+ if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
(bredr_sc_enabled(hcon->hdev) ||
- test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags)))
+ hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
mutex_init(&conn->ident_lock);
@@ -7098,7 +7112,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
else
dst_type = ADDR_LE_DEV_RANDOM;
- if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
role = HCI_ROLE_SLAVE;
else
role = HCI_ROLE_MASTER;
@@ -7238,13 +7252,16 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
return NULL;
}
-void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
{
struct hci_dev *hdev = hcon->hdev;
struct l2cap_conn *conn;
struct l2cap_chan *pchan;
u8 dst_type;
+ if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+ return;
+
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (status) {
@@ -7307,8 +7324,11 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
return conn->disc_reason;
}
-void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
{
+ if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+ return;
+
BT_DBG("hcon %p reason %d", hcon, reason);
l2cap_conn_del(hcon, bt_to_errno(reason));
@@ -7331,13 +7351,13 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
}
}
-int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
{
struct l2cap_conn *conn = hcon->l2cap_data;
struct l2cap_chan *chan;
if (!conn)
- return 0;
+ return;
BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
@@ -7420,8 +7440,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
}
mutex_unlock(&conn->chan_lock);
-
- return 0;
}
int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
@@ -7529,6 +7547,13 @@ drop:
return 0;
}
+static struct hci_cb l2cap_cb = {
+ .name = "L2CAP",
+ .connect_cfm = l2cap_connect_cfm,
+ .disconn_cfm = l2cap_disconn_cfm,
+ .security_cfm = l2cap_security_cfm,
+};
+
static int l2cap_debugfs_show(struct seq_file *f, void *p)
{
struct l2cap_chan *c;
@@ -7570,6 +7595,8 @@ int __init l2cap_init(void)
if (err < 0)
return err;
+ hci_register_cb(&l2cap_cb);
+
if (IS_ERR_OR_NULL(bt_debugfs))
return 0;
@@ -7587,6 +7614,7 @@ int __init l2cap_init(void)
void l2cap_exit(void)
{
debugfs_remove(l2cap_debugfs);
+ hci_unregister_cb(&l2cap_cb);
l2cap_cleanup_sockets();
}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 60694f0f4c73..a7278f05eafb 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -944,8 +944,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
return err;
}
-static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -976,8 +976,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
-static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
{
struct sock *sk = sock->sk;
struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1004,9 +1004,9 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
release_sock(sk);
if (sock->type == SOCK_STREAM)
- err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+ err = bt_sock_stream_recvmsg(sock, msg, len, flags);
else
- err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
+ err = bt_sock_recvmsg(sock, msg, len, flags);
if (pi->chan->mode != L2CAP_MODE_ERTM)
return err;
@@ -1330,7 +1330,7 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
skb->priority = sk->sk_priority;
- bt_cb(skb)->chan = chan;
+ bt_cb(skb)->l2cap.chan = chan;
return skb;
}
@@ -1444,8 +1444,8 @@ static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
memset(la, 0, sizeof(struct sockaddr_l2));
la->l2_family = AF_BLUETOOTH;
- la->l2_psm = bt_cb(skb)->psm;
- bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr);
+ la->l2_psm = bt_cb(skb)->l2cap.psm;
+ bacpy(&la->l2_bdaddr, &bt_cb(skb)->l2cap.bdaddr);
*msg_namelen = sizeof(struct sockaddr_l2);
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 9ec5390c85eb..7fd87e7135b5 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -29,14 +29,16 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_sock.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include "hci_request.h"
#include "smp.h"
+#include "mgmt_util.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 8
+#define MGMT_REVISION 9
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -95,6 +97,11 @@ static const u16 mgmt_commands[] = {
MGMT_OP_SET_EXTERNAL_CONFIG,
MGMT_OP_SET_PUBLIC_ADDRESS,
MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
+ MGMT_OP_READ_EXT_INDEX_LIST,
+ MGMT_OP_READ_ADV_FEATURES,
+ MGMT_OP_ADD_ADVERTISING,
+ MGMT_OP_REMOVE_ADVERTISING,
};
static const u16 mgmt_events[] = {
@@ -127,6 +134,32 @@ static const u16 mgmt_events[] = {
MGMT_EV_UNCONF_INDEX_ADDED,
MGMT_EV_UNCONF_INDEX_REMOVED,
MGMT_EV_NEW_CONFIG_OPTIONS,
+ MGMT_EV_EXT_INDEX_ADDED,
+ MGMT_EV_EXT_INDEX_REMOVED,
+ MGMT_EV_LOCAL_OOB_DATA_UPDATED,
+ MGMT_EV_ADVERTISING_ADDED,
+ MGMT_EV_ADVERTISING_REMOVED,
+};
+
+static const u16 mgmt_untrusted_commands[] = {
+ MGMT_OP_READ_INDEX_LIST,
+ MGMT_OP_READ_INFO,
+ MGMT_OP_READ_UNCONF_INDEX_LIST,
+ MGMT_OP_READ_CONFIG_INFO,
+ MGMT_OP_READ_EXT_INDEX_LIST,
+};
+
+static const u16 mgmt_untrusted_events[] = {
+ MGMT_EV_INDEX_ADDED,
+ MGMT_EV_INDEX_REMOVED,
+ MGMT_EV_NEW_SETTINGS,
+ MGMT_EV_CLASS_OF_DEV_CHANGED,
+ MGMT_EV_LOCAL_NAME_CHANGED,
+ MGMT_EV_UNCONF_INDEX_ADDED,
+ MGMT_EV_UNCONF_INDEX_REMOVED,
+ MGMT_EV_NEW_CONFIG_OPTIONS,
+ MGMT_EV_EXT_INDEX_ADDED,
+ MGMT_EV_EXT_INDEX_REMOVED,
};
#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
@@ -134,17 +167,6 @@ static const u16 mgmt_events[] = {
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\x00\x00"
-struct pending_cmd {
- struct list_head list;
- u16 opcode;
- int index;
- void *param;
- size_t param_len;
- struct sock *sk;
- void *user_data;
- int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
-};
-
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
MGMT_STATUS_SUCCESS,
@@ -218,98 +240,32 @@ static u8 mgmt_status(u8 hci_status)
return MGMT_STATUS_FAILED;
}
-static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
- struct sock *skip_sk)
+static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
+ u16 len, int flag)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
-
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(event);
- if (hdev)
- hdr->index = cpu_to_le16(hdev->id);
- else
- hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
- hdr->len = cpu_to_le16(data_len);
-
- if (data)
- memcpy(skb_put(skb, data_len), data, data_len);
-
- /* Time stamp */
- __net_timestamp(skb);
-
- hci_send_to_control(skb, skip_sk);
- kfree_skb(skb);
-
- return 0;
+ return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
+ flag, NULL);
}
-static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
+static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
+ u16 len, int flag, struct sock *skip_sk)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
- struct mgmt_ev_cmd_status *ev;
- int err;
-
- BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
-
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- hdr = (void *) skb_put(skb, sizeof(*hdr));
-
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
- hdr->index = cpu_to_le16(index);
- hdr->len = cpu_to_le16(sizeof(*ev));
-
- ev = (void *) skb_put(skb, sizeof(*ev));
- ev->status = status;
- ev->opcode = cpu_to_le16(cmd);
-
- err = sock_queue_rcv_skb(sk, skb);
- if (err < 0)
- kfree_skb(skb);
-
- return err;
+ return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
+ flag, skip_sk);
}
-static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
- void *rp, size_t rp_len)
+static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
+ u16 len, struct sock *skip_sk)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
- struct mgmt_ev_cmd_complete *ev;
- int err;
-
- BT_DBG("sock %p", sk);
-
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- hdr = (void *) skb_put(skb, sizeof(*hdr));
-
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->index = cpu_to_le16(index);
- hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
-
- ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
- ev->opcode = cpu_to_le16(cmd);
- ev->status = status;
-
- if (rp)
- memcpy(ev->data, rp, rp_len);
-
- err = sock_queue_rcv_skb(sk, skb);
- if (err < 0)
- kfree_skb(skb);
+ return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
+ HCI_MGMT_GENERIC_EVENTS, skip_sk);
+}
- return err;
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
+ struct sock *skip_sk)
+{
+ return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
+ HCI_SOCK_TRUSTED, skip_sk);
}
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -322,22 +278,28 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
rp.version = MGMT_VERSION;
rp.revision = cpu_to_le16(MGMT_REVISION);
- return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
- sizeof(rp));
+ return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
+ &rp, sizeof(rp));
}
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
struct mgmt_rp_read_commands *rp;
- const u16 num_commands = ARRAY_SIZE(mgmt_commands);
- const u16 num_events = ARRAY_SIZE(mgmt_events);
- __le16 *opcode;
+ u16 num_commands, num_events;
size_t rp_size;
int i, err;
BT_DBG("sock %p", sk);
+ if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
+ num_commands = ARRAY_SIZE(mgmt_commands);
+ num_events = ARRAY_SIZE(mgmt_events);
+ } else {
+ num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
+ num_events = ARRAY_SIZE(mgmt_untrusted_events);
+ }
+
rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
rp = kmalloc(rp_size, GFP_KERNEL);
@@ -347,14 +309,26 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
rp->num_commands = cpu_to_le16(num_commands);
rp->num_events = cpu_to_le16(num_events);
- for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
- put_unaligned_le16(mgmt_commands[i], opcode);
+ if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
+ __le16 *opcode = rp->opcodes;
+
+ for (i = 0; i < num_commands; i++, opcode++)
+ put_unaligned_le16(mgmt_commands[i], opcode);
+
+ for (i = 0; i < num_events; i++, opcode++)
+ put_unaligned_le16(mgmt_events[i], opcode);
+ } else {
+ __le16 *opcode = rp->opcodes;
+
+ for (i = 0; i < num_commands; i++, opcode++)
+ put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
- for (i = 0; i < num_events; i++, opcode++)
- put_unaligned_le16(mgmt_events[i], opcode);
+ for (i = 0; i < num_events; i++, opcode++)
+ put_unaligned_le16(mgmt_untrusted_events[i], opcode);
+ }
- err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
- rp_size);
+ err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
+ rp, rp_size);
kfree(rp);
return err;
@@ -376,7 +350,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (d->dev_type == HCI_BREDR &&
- !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
+ !hci_dev_test_flag(d, HCI_UNCONFIGURED))
count++;
}
@@ -389,9 +363,9 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
- if (test_bit(HCI_SETUP, &d->dev_flags) ||
- test_bit(HCI_CONFIG, &d->dev_flags) ||
- test_bit(HCI_USER_CHANNEL, &d->dev_flags))
+ if (hci_dev_test_flag(d, HCI_SETUP) ||
+ hci_dev_test_flag(d, HCI_CONFIG) ||
+ hci_dev_test_flag(d, HCI_USER_CHANNEL))
continue;
/* Devices marked as raw-only are neither configured
@@ -401,7 +375,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
continue;
if (d->dev_type == HCI_BREDR &&
- !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
+ !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
rp->index[count++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
@@ -412,8 +386,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
read_unlock(&hci_dev_list_lock);
- err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
- rp_len);
+ err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
+ 0, rp, rp_len);
kfree(rp);
@@ -436,7 +410,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (d->dev_type == HCI_BREDR &&
- test_bit(HCI_UNCONFIGURED, &d->dev_flags))
+ hci_dev_test_flag(d, HCI_UNCONFIGURED))
count++;
}
@@ -449,9 +423,9 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
- if (test_bit(HCI_SETUP, &d->dev_flags) ||
- test_bit(HCI_CONFIG, &d->dev_flags) ||
- test_bit(HCI_USER_CHANNEL, &d->dev_flags))
+ if (hci_dev_test_flag(d, HCI_SETUP) ||
+ hci_dev_test_flag(d, HCI_CONFIG) ||
+ hci_dev_test_flag(d, HCI_USER_CHANNEL))
continue;
/* Devices marked as raw-only are neither configured
@@ -461,7 +435,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
continue;
if (d->dev_type == HCI_BREDR &&
- test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
+ hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
rp->index[count++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
@@ -472,8 +446,84 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
read_unlock(&hci_dev_list_lock);
- err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
- 0, rp, rp_len);
+ err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
+ MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
+
+ kfree(rp);
+
+ return err;
+}
+
+static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_rp_read_ext_index_list *rp;
+ struct hci_dev *d;
+ size_t rp_len;
+ u16 count;
+ int err;
+
+ BT_DBG("sock %p", sk);
+
+ read_lock(&hci_dev_list_lock);
+
+ count = 0;
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
+ count++;
+ }
+
+ rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
+ rp = kmalloc(rp_len, GFP_ATOMIC);
+ if (!rp) {
+ read_unlock(&hci_dev_list_lock);
+ return -ENOMEM;
+ }
+
+ count = 0;
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (hci_dev_test_flag(d, HCI_SETUP) ||
+ hci_dev_test_flag(d, HCI_CONFIG) ||
+ hci_dev_test_flag(d, HCI_USER_CHANNEL))
+ continue;
+
+ /* Devices marked as raw-only are neither configured
+ * nor unconfigured controllers.
+ */
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ continue;
+
+ if (d->dev_type == HCI_BREDR) {
+ if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ rp->entry[count].type = 0x01;
+ else
+ rp->entry[count].type = 0x00;
+ } else if (d->dev_type == HCI_AMP) {
+ rp->entry[count].type = 0x02;
+ } else {
+ continue;
+ }
+
+ rp->entry[count].bus = d->bus;
+ rp->entry[count++].index = cpu_to_le16(d->id);
+ BT_DBG("Added hci%u", d->id);
+ }
+
+ rp->num_controllers = cpu_to_le16(count);
+ rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
+
+ read_unlock(&hci_dev_list_lock);
+
+ /* If this command is called at least once, then all the
+ * default index and unconfigured index events are disabled
+ * and from now on only extended index events are used.
+ */
+ hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
+ hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
+ hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
+
+ err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
+ MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
kfree(rp);
@@ -483,7 +533,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
static bool is_configured(struct hci_dev *hdev)
{
if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
- !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
+ !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
return false;
if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
@@ -498,7 +548,7 @@ static __le32 get_missing_options(struct hci_dev *hdev)
u32 options = 0;
if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
- !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
+ !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
options |= MGMT_OPTION_EXTERNAL_CONFIG;
if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
@@ -512,16 +562,16 @@ static int new_options(struct hci_dev *hdev, struct sock *skip)
{
__le32 options = get_missing_options(hdev);
- return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
- sizeof(options), skip);
+ return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
+ sizeof(options), skip);
}
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
__le32 options = get_missing_options(hdev);
- return cmd_complete(sk, hdev->id, opcode, 0, &options,
- sizeof(options));
+ return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
+ sizeof(options));
}
static int read_config_info(struct sock *sk, struct hci_dev *hdev,
@@ -548,8 +598,8 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev,
hci_dev_unlock(hdev);
- return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
- sizeof(rp));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
+ &rp, sizeof(rp));
}
static u32 get_supported_settings(struct hci_dev *hdev)
@@ -582,6 +632,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
settings |= MGMT_SETTING_ADVERTISING;
settings |= MGMT_SETTING_SECURE_CONN;
settings |= MGMT_SETTING_PRIVACY;
+ settings |= MGMT_SETTING_STATIC_ADDRESS;
}
if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
@@ -598,45 +649,64 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (hdev_is_powered(hdev))
settings |= MGMT_SETTING_POWERED;
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
settings |= MGMT_SETTING_CONNECTABLE;
- if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
settings |= MGMT_SETTING_FAST_CONNECTABLE;
- if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
settings |= MGMT_SETTING_DISCOVERABLE;
- if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_BONDABLE))
settings |= MGMT_SETTING_BONDABLE;
- if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
settings |= MGMT_SETTING_BREDR;
- if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
settings |= MGMT_SETTING_LE;
- if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
settings |= MGMT_SETTING_LINK_SECURITY;
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
settings |= MGMT_SETTING_SSP;
- if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
settings |= MGMT_SETTING_HS;
- if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
settings |= MGMT_SETTING_ADVERTISING;
- if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
settings |= MGMT_SETTING_SECURE_CONN;
- if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
settings |= MGMT_SETTING_DEBUG_KEYS;
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY))
settings |= MGMT_SETTING_PRIVACY;
+ /* The current setting for static address has two purposes. The
+ * first is to indicate if the static address will be used and
+ * the second is to indicate if it is actually set.
+ *
+ * This means if the static address is not configured, this flag
+ * will never be set. If the address is configured, then if the
+ * address is actually used decides if the flag is set or not.
+ *
+ * For single mode LE only controllers and dual-mode controllers
+ * with BR/EDR disabled, the existence of the static address will
+ * be evaluated.
+ */
+ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
+ !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
+ !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
+ if (bacmp(&hdev->static_addr, BDADDR_ANY))
+ settings |= MGMT_SETTING_STATIC_ADDRESS;
+ }
+
return settings;
}
@@ -750,35 +820,19 @@ static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
return ptr;
}
-static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
+static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
{
- struct pending_cmd *cmd;
-
- list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
- if (cmd->opcode == opcode)
- return cmd;
- }
-
- return NULL;
+ return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
}
-static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
+static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
struct hci_dev *hdev,
const void *data)
{
- struct pending_cmd *cmd;
-
- list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
- if (cmd->user_data != data)
- continue;
- if (cmd->opcode == opcode)
- return cmd;
- }
-
- return NULL;
+ return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
}
-static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
u8 ad_len = 0;
size_t name_len;
@@ -804,21 +858,36 @@ static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
return ad_len;
}
-static void update_scan_rsp_data(struct hci_request *req)
+static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+{
+ /* TODO: Set the appropriate entries based on advertising instance flags
+ * here once flags other than 0 are supported.
+ */
+ memcpy(ptr, hdev->adv_instance.scan_rsp_data,
+ hdev->adv_instance.scan_rsp_len);
+
+ return hdev->adv_instance.scan_rsp_len;
+}
+
+static void update_scan_rsp_data_for_instance(struct hci_request *req,
+ u8 instance)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_scan_rsp_data cp;
u8 len;
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
memset(&cp, 0, sizeof(cp));
- len = create_scan_rsp_data(hdev, cp.data);
+ if (instance)
+ len = create_instance_scan_rsp_data(hdev, cp.data);
+ else
+ len = create_default_scan_rsp_data(hdev, cp.data);
if (hdev->scan_rsp_data_len == len &&
- memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
+ !memcmp(cp.data, hdev->scan_rsp_data, len))
return;
memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
@@ -829,14 +898,33 @@ static void update_scan_rsp_data(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}
+static void update_scan_rsp_data(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 instance;
+
+ /* The "Set Advertising" setting supersedes the "Add Advertising"
+ * setting. Here we set the scan response data based on which
+ * setting was set. When neither apply, default to the global settings,
+ * represented by instance "0".
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
+ !hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ instance = 0x01;
+ else
+ instance = 0x00;
+
+ update_scan_rsp_data_for_instance(req, instance);
+}
+
static u8 get_adv_discov_flags(struct hci_dev *hdev)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
/* If there's a pending mgmt command the flags will not yet have
* their final values, so check for this first.
*/
- cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+ cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
if (cmd) {
struct mgmt_mode *cp = cmd->param;
if (cp->val == 0x01)
@@ -844,39 +932,131 @@ static u8 get_adv_discov_flags(struct hci_dev *hdev)
else if (cp->val == 0x02)
return LE_AD_LIMITED;
} else {
- if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
return LE_AD_LIMITED;
- else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
return LE_AD_GENERAL;
}
return 0;
}
-static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
+static u8 get_current_adv_instance(struct hci_dev *hdev)
+{
+ /* The "Set Advertising" setting supersedes the "Add Advertising"
+ * setting. Here we set the advertising data based on which
+ * setting was set. When neither apply, default to the global settings,
+ * represented by instance "0".
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
+ !hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ return 0x01;
+
+ return 0x00;
+}
+
+static bool get_connectable(struct hci_dev *hdev)
+{
+ struct mgmt_pending_cmd *cmd;
+
+ /* If there's a pending mgmt command the flag will not yet have
+ * it's final value, so check for this first.
+ */
+ cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+ if (cmd) {
+ struct mgmt_mode *cp = cmd->param;
+
+ return cp->val;
+ }
+
+ return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
+}
+
+static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
+{
+ u32 flags;
+
+ if (instance > 0x01)
+ return 0;
+
+ if (instance == 0x01)
+ return hdev->adv_instance.flags;
+
+ /* Instance 0 always manages the "Tx Power" and "Flags" fields */
+ flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
+
+ /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
+ * to the "connectable" instance flag.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
+ flags |= MGMT_ADV_FLAG_CONNECTABLE;
+
+ return flags;
+}
+
+static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
+{
+ /* Ignore instance 0 and other unsupported instances */
+ if (instance != 0x01)
+ return 0;
+
+ /* TODO: Take into account the "appearance" and "local-name" flags here.
+ * These are currently being ignored as they are not supported.
+ */
+ return hdev->adv_instance.scan_rsp_len;
+}
+
+static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
{
u8 ad_len = 0, flags = 0;
+ u32 instance_flags = get_adv_instance_flags(hdev, instance);
- flags |= get_adv_discov_flags(hdev);
+ /* The Add Advertising command allows userspace to set both the general
+ * and limited discoverable flags.
+ */
+ if (instance_flags & MGMT_ADV_FLAG_DISCOV)
+ flags |= LE_AD_GENERAL;
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
- flags |= LE_AD_NO_BREDR;
+ if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
+ flags |= LE_AD_LIMITED;
- if (flags) {
- BT_DBG("adv flags 0x%02x", flags);
+ if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
+ /* If a discovery flag wasn't provided, simply use the global
+ * settings.
+ */
+ if (!flags)
+ flags |= get_adv_discov_flags(hdev);
- ptr[0] = 2;
- ptr[1] = EIR_FLAGS;
- ptr[2] = flags;
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ flags |= LE_AD_NO_BREDR;
- ad_len += 3;
- ptr += 3;
+ /* If flags would still be empty, then there is no need to
+ * include the "Flags" AD field".
+ */
+ if (flags) {
+ ptr[0] = 0x02;
+ ptr[1] = EIR_FLAGS;
+ ptr[2] = flags;
+
+ ad_len += 3;
+ ptr += 3;
+ }
}
- if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
- ptr[0] = 2;
+ if (instance) {
+ memcpy(ptr, hdev->adv_instance.adv_data,
+ hdev->adv_instance.adv_data_len);
+
+ ad_len += hdev->adv_instance.adv_data_len;
+ ptr += hdev->adv_instance.adv_data_len;
+ }
+
+ /* Provide Tx Power only if we can provide a valid value for it */
+ if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
+ (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
+ ptr[0] = 0x02;
ptr[1] = EIR_TX_POWER;
- ptr[2] = (u8) hdev->adv_tx_power;
+ ptr[2] = (u8)hdev->adv_tx_power;
ad_len += 3;
ptr += 3;
@@ -885,19 +1065,20 @@ static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
return ad_len;
}
-static void update_adv_data(struct hci_request *req)
+static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_adv_data cp;
u8 len;
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
memset(&cp, 0, sizeof(cp));
- len = create_adv_data(hdev, cp.data);
+ len = create_instance_adv_data(hdev, instance, cp.data);
+ /* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
memcmp(cp.data, hdev->adv_data, len) == 0)
return;
@@ -910,6 +1091,14 @@ static void update_adv_data(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}
+static void update_adv_data(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 instance = get_current_adv_instance(hdev);
+
+ update_adv_data_for_instance(req, instance);
+}
+
int mgmt_update_adv_data(struct hci_dev *hdev)
{
struct hci_request req;
@@ -979,10 +1168,10 @@ static void update_eir(struct hci_request *req)
if (!lmp_ext_inq_capable(hdev))
return;
- if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
return;
- if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
return;
memset(&cp, 0, sizeof(cp));
@@ -1018,17 +1207,17 @@ static void update_class(struct hci_request *req)
if (!hdev_is_powered(hdev))
return;
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return;
- if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
return;
cod[0] = hdev->minor_class;
cod[1] = hdev->major_class;
cod[2] = get_service_classes(hdev);
- if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
cod[1] |= 0x20;
if (memcmp(cod, hdev->dev_class, 3) == 0)
@@ -1037,22 +1226,6 @@ static void update_class(struct hci_request *req)
hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
}
-static bool get_connectable(struct hci_dev *hdev)
-{
- struct pending_cmd *cmd;
-
- /* If there's a pending mgmt command the flag will not yet have
- * it's final value, so check for this first.
- */
- cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
- if (cmd) {
- struct mgmt_mode *cp = cmd->param;
- return cp->val;
- }
-
- return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-}
-
static void disable_advertising(struct hci_request *req)
{
u8 enable = 0x00;
@@ -1066,11 +1239,13 @@ static void enable_advertising(struct hci_request *req)
struct hci_cp_le_set_adv_param cp;
u8 own_addr_type, enable = 0x01;
bool connectable;
+ u8 instance;
+ u32 flags;
if (hci_conn_num(hdev, LE_LINK) > 0)
return;
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
disable_advertising(req);
/* Clear the HCI_LE_ADV bit temporarily so that the
@@ -1078,9 +1253,16 @@ static void enable_advertising(struct hci_request *req)
* and write a new random address. The flag will be set back on
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
- clear_bit(HCI_LE_ADV, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
+
+ instance = get_current_adv_instance(hdev);
+ flags = get_adv_instance_flags(hdev, instance);
- connectable = get_connectable(hdev);
+ /* If the "connectable" instance flag was not set, then choose between
+ * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+ */
+ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+ get_connectable(hdev);
/* Set require_privacy to true only when non-connectable
* advertising is used. In that case it is fine to use a
@@ -1092,7 +1274,14 @@ static void enable_advertising(struct hci_request *req)
memset(&cp, 0, sizeof(cp));
cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
- cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
+
+ if (connectable)
+ cp.type = LE_ADV_IND;
+ else if (get_adv_instance_scan_rsp_len(hdev, instance))
+ cp.type = LE_ADV_SCAN_IND;
+ else
+ cp.type = LE_ADV_NONCONN_IND;
+
cp.own_address_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
@@ -1107,7 +1296,7 @@ static void service_cache_off(struct work_struct *work)
service_cache.work);
struct hci_request req;
- if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
return;
hci_req_init(&req, hdev);
@@ -1130,9 +1319,9 @@ static void rpa_expired(struct work_struct *work)
BT_DBG("");
- set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
- if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
return;
/* The generation of a new RPA and programming it into the
@@ -1145,7 +1334,7 @@ static void rpa_expired(struct work_struct *work)
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
{
- if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
+ if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
return;
INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
@@ -1156,7 +1345,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
* for mgmt we require user-space to explicitly enable
* it
*/
- clear_bit(HCI_BONDABLE, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_BONDABLE);
}
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
@@ -1185,73 +1374,16 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
hci_dev_unlock(hdev);
- return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
- sizeof(rp));
-}
-
-static void mgmt_pending_free(struct pending_cmd *cmd)
-{
- sock_put(cmd->sk);
- kfree(cmd->param);
- kfree(cmd);
-}
-
-static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
- struct hci_dev *hdev, void *data,
- u16 len)
-{
- struct pending_cmd *cmd;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return NULL;
-
- cmd->opcode = opcode;
- cmd->index = hdev->id;
-
- cmd->param = kmemdup(data, len, GFP_KERNEL);
- if (!cmd->param) {
- kfree(cmd);
- return NULL;
- }
-
- cmd->param_len = len;
-
- cmd->sk = sk;
- sock_hold(sk);
-
- list_add(&cmd->list, &hdev->mgmt_pending);
-
- return cmd;
-}
-
-static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
- void (*cb)(struct pending_cmd *cmd,
- void *data),
- void *data)
-{
- struct pending_cmd *cmd, *tmp;
-
- list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
- if (opcode > 0 && cmd->opcode != opcode)
- continue;
-
- cb(cmd, data);
- }
-}
-
-static void mgmt_pending_remove(struct pending_cmd *cmd)
-{
- list_del(&cmd->list);
- mgmt_pending_free(cmd);
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
+ sizeof(rp));
}
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
__le32 settings = cpu_to_le32(get_current_settings(hdev));
- return cmd_complete(sk, hdev->id, opcode, 0, &settings,
- sizeof(settings));
+ return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
+ sizeof(settings));
}
static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
@@ -1272,9 +1404,10 @@ static bool hci_stop_discovery(struct hci_request *req)
switch (hdev->discovery.state) {
case DISCOVERY_FINDING:
- if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
- } else {
+
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
cancel_delayed_work(&hdev->le_scan_disable);
hci_req_add_le_scan_disable(req);
}
@@ -1295,7 +1428,7 @@ static bool hci_stop_discovery(struct hci_request *req)
default:
/* Passive scanning */
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_req_add_le_scan_disable(req);
return true;
}
@@ -1306,6 +1439,49 @@ static bool hci_stop_discovery(struct hci_request *req)
return false;
}
+static void advertising_added(struct sock *sk, struct hci_dev *hdev,
+ u8 instance)
+{
+ struct mgmt_ev_advertising_added ev;
+
+ ev.instance = instance;
+
+ mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
+}
+
+static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
+ u8 instance)
+{
+ struct mgmt_ev_advertising_removed ev;
+
+ ev.instance = instance;
+
+ mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
+}
+
+static void clear_adv_instance(struct hci_dev *hdev)
+{
+ struct hci_request req;
+
+ if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
+ return;
+
+ if (hdev->adv_instance.timeout)
+ cancel_delayed_work(&hdev->adv_instance.timeout_exp);
+
+ memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
+ advertising_removed(NULL, hdev, 1);
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
+
+ if (!hdev_is_powered(hdev) ||
+ hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ return;
+
+ hci_req_init(&req, hdev);
+ disable_advertising(&req);
+ hci_req_run(&req, NULL);
+}
+
static int clean_up_hci_state(struct hci_dev *hdev)
{
struct hci_request req;
@@ -1321,7 +1497,10 @@ static int clean_up_hci_state(struct hci_dev *hdev)
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
+ if (hdev->adv_instance.timeout)
+ clear_adv_instance(hdev);
+
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
disable_advertising(&req);
discov_stopped = hci_stop_discovery(&req);
@@ -1369,24 +1548,24 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
int err;
BT_DBG("request for %s", hdev->name);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
- if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
goto failed;
}
- if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
cancel_delayed_work(&hdev->power_off);
if (cp->val) {
@@ -1433,11 +1612,10 @@ failed:
static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
- __le32 ev;
-
- ev = cpu_to_le32(get_current_settings(hdev));
+ __le32 ev = cpu_to_le32(get_current_settings(hdev));
- return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
+ return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
+ sizeof(ev), skip);
}
int mgmt_new_settings(struct hci_dev *hdev)
@@ -1451,7 +1629,7 @@ struct cmd_lookup {
u8 mgmt_status;
};
-static void settings_rsp(struct pending_cmd *cmd, void *data)
+static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
{
struct cmd_lookup *match = data;
@@ -1467,15 +1645,15 @@ static void settings_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_free(cmd);
}
-static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
{
u8 *status = data;
- cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
mgmt_pending_remove(cmd);
}
-static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
+static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
{
if (cmd->cmd_complete) {
u8 *status = data;
@@ -1489,23 +1667,23 @@ static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
cmd_status_rsp(cmd, data);
}
-static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
{
- return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
- cmd->param, cmd->param_len);
+ return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+ cmd->param, cmd->param_len);
}
-static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
{
- return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
- sizeof(struct mgmt_addr_info));
+ return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+ cmd->param, sizeof(struct mgmt_addr_info));
}
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
if (!lmp_bredr_capable(hdev))
return MGMT_STATUS_NOT_SUPPORTED;
- else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return MGMT_STATUS_REJECTED;
else
return MGMT_STATUS_SUCCESS;
@@ -1515,7 +1693,7 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
{
if (!lmp_le_capable(hdev))
return MGMT_STATUS_NOT_SUPPORTED;
- else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return MGMT_STATUS_REJECTED;
else
return MGMT_STATUS_SUCCESS;
@@ -1524,7 +1702,7 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct mgmt_mode *cp;
struct hci_request req;
bool changed;
@@ -1533,21 +1711,20 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+ cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
if (!cmd)
goto unlock;
if (status) {
u8 mgmt_err = mgmt_status(status);
- cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
- clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
goto remove_cmd;
}
cp = cmd->param;
if (cp->val) {
- changed = !test_and_set_bit(HCI_DISCOVERABLE,
- &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
if (hdev->discov_timeout > 0) {
int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
@@ -1555,8 +1732,7 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
to);
}
} else {
- changed = test_and_clear_bit(HCI_DISCOVERABLE,
- &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
}
send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
@@ -1585,7 +1761,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_set_discoverable *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
u16 timeout;
u8 scan;
@@ -1593,14 +1769,14 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
- !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_REJECTED);
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
+ !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_REJECTED);
if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
timeout = __le16_to_cpu(cp->timeout);
@@ -1609,27 +1785,27 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
*/
if ((cp->val == 0x00 && timeout > 0) ||
(cp->val == 0x02 && timeout == 0))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev) && timeout > 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_NOT_POWERED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_BUSY);
goto failed;
}
- if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
- MGMT_STATUS_REJECTED);
+ if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_REJECTED);
goto failed;
}
@@ -1640,8 +1816,8 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
* not a valid operation since it requires a timeout
* and so no need to check HCI_LIMITED_DISCOVERABLE.
*/
- if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
- change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
+ hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
changed = true;
}
@@ -1659,9 +1835,9 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
* value with the new value. And if only the timeout gets updated,
* then no need for any HCI transactions.
*/
- if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
- (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
- &hdev->dev_flags)) {
+ if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
+ (cp->val == 0x02) == hci_dev_test_flag(hdev,
+ HCI_LIMITED_DISCOVERABLE)) {
cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = timeout;
@@ -1690,16 +1866,16 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
/* Limited discoverable mode */
if (cp->val == 0x02)
- set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
else
- clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
hci_req_init(&req, hdev);
/* The procedure for LE-only controllers is much simpler - just
* update the advertising data.
*/
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
goto update_ad;
scan = SCAN_PAGE;
@@ -1729,7 +1905,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
scan |= SCAN_INQUIRY;
} else {
- clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
}
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
@@ -1752,7 +1928,7 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
struct hci_cp_write_page_scan_activity acp;
u8 type;
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return;
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
@@ -1784,7 +1960,7 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
static void set_connectable_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct mgmt_mode *cp;
bool conn_changed, discov_changed;
@@ -1792,26 +1968,26 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status,
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+ cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
if (!cmd)
goto unlock;
if (status) {
u8 mgmt_err = mgmt_status(status);
- cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
goto remove_cmd;
}
cp = cmd->param;
if (cp->val) {
- conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
- &hdev->dev_flags);
+ conn_changed = !hci_dev_test_and_set_flag(hdev,
+ HCI_CONNECTABLE);
discov_changed = false;
} else {
- conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
- &hdev->dev_flags);
- discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
- &hdev->dev_flags);
+ conn_changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_CONNECTABLE);
+ discov_changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_DISCOVERABLE);
}
send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
@@ -1837,14 +2013,14 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
bool changed = false;
int err;
- if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
changed = true;
if (val) {
- set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_CONNECTABLE);
} else {
- clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
- clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
}
err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
@@ -1864,21 +2040,21 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
u8 scan;
int err;
BT_DBG("request for %s", hdev->name);
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
- !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
- MGMT_STATUS_REJECTED);
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
+ !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_REJECTED);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
@@ -1887,10 +2063,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -1906,10 +2082,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
* by-product of disabling connectable, we need to update the
* advertising flags.
*/
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
if (!cp->val) {
- clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
- clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
}
update_adv_data(&req);
} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
@@ -1938,17 +2114,9 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
}
no_scan_update:
- /* If we're going from non-connectable to connectable or
- * vice-versa when fast connectable is enabled ensure that fast
- * connectable gets disabled. write_fast_connectable won't do
- * anything if the page scan parameters are already what they
- * should be.
- */
- if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
- write_fast_connectable(&req, false);
-
/* Update the advertising parameters if necessary */
- if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
enable_advertising(&req);
err = hci_req_run(&req, set_connectable_complete);
@@ -1975,15 +2143,15 @@ static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
if (cp->val)
- changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
else
- changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
if (err < 0)
@@ -2001,7 +2169,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
u8 val, status;
int err;
@@ -2009,21 +2177,20 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
status = mgmt_bredr_support(hdev);
if (status)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
- status);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ status);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
bool changed = false;
- if (!!cp->val != test_bit(HCI_LINK_SECURITY,
- &hdev->dev_flags)) {
- change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+ if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
+ hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
changed = true;
}
@@ -2037,9 +2204,9 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -2070,7 +2237,7 @@ failed:
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
u8 status;
int err;
@@ -2078,15 +2245,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
status = mgmt_bredr_support(hdev);
if (status)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
if (!lmp_ssp_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_NOT_SUPPORTED);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
@@ -2094,16 +2261,16 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
bool changed;
if (cp->val) {
- changed = !test_and_set_bit(HCI_SSP_ENABLED,
- &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev,
+ HCI_SSP_ENABLED);
} else {
- changed = test_and_clear_bit(HCI_SSP_ENABLED,
- &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_SSP_ENABLED);
if (!changed)
- changed = test_and_clear_bit(HCI_HS_ENABLED,
- &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_HS_ENABLED);
else
- clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -2116,14 +2283,13 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_SSP, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_BUSY);
goto failed;
}
- if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
goto failed;
}
@@ -2134,7 +2300,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto failed;
}
- if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
+ if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
sizeof(cp->val), &cp->val);
@@ -2160,32 +2326,38 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
status = mgmt_bredr_support(hdev);
if (status)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
if (!lmp_ssp_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_NOT_SUPPORTED);
- if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
+ if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_REJECTED);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
+ if (pending_find(MGMT_OP_SET_SSP, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
if (cp->val) {
- changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
} else {
if (hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_REJECTED);
goto unlock;
}
- changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
}
err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
@@ -2226,7 +2398,7 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
* has actually been enabled. During power on, the
* update in powered_update_hci will take care of it.
*/
- if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
struct hci_request req;
hci_req_init(&req, hdev);
@@ -2244,7 +2416,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
struct hci_cp_write_le_host_supported hci_cp;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
int err;
u8 val, enabled;
@@ -2252,17 +2424,29 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
BT_DBG("request for %s", hdev->name);
if (!lmp_le_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_NOT_SUPPORTED);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* Bluetooth single mode LE only controllers or dual-mode
+ * controllers configured as LE only devices, do not allow
+ * switching LE off. These have either LE enabled explicitly
+ * or BR/EDR has been previously switched off.
+ *
+ * When trying to enable an already enabled LE, then gracefully
+ * send a positive response. Trying to disable it however will
+ * result into rejection.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+ if (cp->val == 0x01)
+ return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
- /* LE-only devices do not allow toggling LE on/off */
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
- MGMT_STATUS_REJECTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_REJECTED);
+ }
hci_dev_lock(hdev);
@@ -2272,13 +2456,13 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
if (!hdev_is_powered(hdev) || val == enabled) {
bool changed = false;
- if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
- change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+ hci_dev_change_flag(hdev, HCI_LE_ENABLED);
changed = true;
}
- if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
- clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING);
changed = true;
}
@@ -2292,10 +2476,10 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto unlock;
}
- if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_LE, hdev) ||
+ pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_BUSY);
goto unlock;
}
@@ -2313,7 +2497,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_cp.le = val;
hci_cp.simul = 0x00;
} else {
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
disable_advertising(&req);
}
@@ -2337,7 +2521,7 @@ unlock:
*/
static bool pending_eir_or_class(struct hci_dev *hdev)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
switch (cmd->opcode) {
@@ -2373,16 +2557,16 @@ static u8 get_uuid_size(const u8 *uuid)
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(mgmt_op, hdev);
+ cmd = pending_find(mgmt_op, hdev);
if (!cmd)
goto unlock;
- cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
- hdev->dev_class, 3);
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(status), hdev->dev_class, 3);
mgmt_pending_remove(cmd);
@@ -2400,7 +2584,7 @@ static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_cp_add_uuid *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
struct bt_uuid *uuid;
int err;
@@ -2410,8 +2594,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_lock(hdev);
if (pending_eir_or_class(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
- MGMT_STATUS_BUSY);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -2437,8 +2621,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
if (err != -ENODATA)
goto failed;
- err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
- hdev->dev_class, 3);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
+ hdev->dev_class, 3);
goto failed;
}
@@ -2460,7 +2644,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
if (!hdev_is_powered(hdev))
return false;
- if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
+ if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
queue_delayed_work(hdev->workqueue, &hdev->service_cache,
CACHE_TIMEOUT);
return true;
@@ -2480,7 +2664,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_remove_uuid *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct bt_uuid *match, *tmp;
u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
struct hci_request req;
@@ -2491,8 +2675,8 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
if (pending_eir_or_class(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
- MGMT_STATUS_BUSY);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_BUSY);
goto unlock;
}
@@ -2500,8 +2684,9 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
hci_uuids_clear(hdev);
if (enable_service_cache(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
- 0, hdev->dev_class, 3);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_UUID,
+ 0, hdev->dev_class, 3);
goto unlock;
}
@@ -2520,8 +2705,8 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (found == 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
- MGMT_STATUS_INVALID_PARAMS);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
goto unlock;
}
@@ -2536,8 +2721,8 @@ update_class:
if (err != -ENODATA)
goto unlock;
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
- hdev->dev_class, 3);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
+ hdev->dev_class, 3);
goto unlock;
}
@@ -2565,27 +2750,27 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_set_dev_class *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
int err;
BT_DBG("request for %s", hdev->name);
if (!lmp_bredr_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_NOT_SUPPORTED);
hci_dev_lock(hdev);
if (pending_eir_or_class(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
- MGMT_STATUS_BUSY);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_BUSY);
goto unlock;
}
if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
- MGMT_STATUS_INVALID_PARAMS);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_INVALID_PARAMS);
goto unlock;
}
@@ -2593,14 +2778,14 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
hdev->minor_class = cp->minor;
if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
- hdev->dev_class, 3);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+ hdev->dev_class, 3);
goto unlock;
}
hci_req_init(&req, hdev);
- if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
+ if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
hci_dev_unlock(hdev);
cancel_delayed_work_sync(&hdev->service_cache);
hci_dev_lock(hdev);
@@ -2614,8 +2799,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
if (err != -ENODATA)
goto unlock;
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
- hdev->dev_class, 3);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+ hdev->dev_class, 3);
goto unlock;
}
@@ -2645,15 +2830,15 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
if (!lmp_bredr_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_NOT_SUPPORTED);
key_count = __le16_to_cpu(cp->key_count);
if (key_count > max_key_count) {
BT_ERR("load_link_keys: too big key_count value %u",
key_count);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
}
expected_len = sizeof(*cp) + key_count *
@@ -2661,13 +2846,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
if (expected_len != len) {
BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
expected_len, len);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
}
if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
key_count);
@@ -2676,8 +2861,9 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
struct mgmt_link_key_info *key = &cp->keys[i];
if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
}
hci_dev_lock(hdev);
@@ -2685,11 +2871,10 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
hci_link_keys_clear(hdev);
if (cp->debug_keys)
- changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
- &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
else
- changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
- &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_KEEP_DEBUG_KEYS);
if (changed)
new_settings(hdev, NULL);
@@ -2707,7 +2892,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
key->type, key->pin_len, NULL);
}
- cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
+ mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
hci_dev_unlock(hdev);
@@ -2732,7 +2917,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
struct mgmt_cp_unpair_device *cp = data;
struct mgmt_rp_unpair_device rp;
struct hci_cp_disconnect dc;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_conn *conn;
int err;
@@ -2741,20 +2926,21 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
rp.addr.type = cp->addr.type;
if (!bdaddr_type_is_valid(cp->addr.type))
- return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &rp, sizeof(rp));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
- return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &rp, sizeof(rp));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
- MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED, &rp,
+ sizeof(rp));
goto unlock;
}
@@ -2804,8 +2990,9 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (err < 0) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
- MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_NOT_PAIRED, &rp,
+ sizeof(rp));
goto unlock;
}
@@ -2813,8 +3000,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
* link is requested.
*/
if (!conn) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
- &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
+ &rp, sizeof(rp));
device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
goto unlock;
}
@@ -2844,7 +3031,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_disconnect *cp = data;
struct mgmt_rp_disconnect rp;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_conn *conn;
int err;
@@ -2855,21 +3042,22 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
rp.addr.type = cp->addr.type;
if (!bdaddr_type_is_valid(cp->addr.type))
- return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_INVALID_PARAMS,
- &rp, sizeof(rp));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_POWERED, &rp,
+ sizeof(rp));
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_BUSY, &rp, sizeof(rp));
+ if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_BUSY, &rp, sizeof(rp));
goto failed;
}
@@ -2880,8 +3068,9 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_CONNECTED, &rp,
+ sizeof(rp));
goto failed;
}
@@ -2935,8 +3124,8 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
- MGMT_STATUS_NOT_POWERED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
+ MGMT_STATUS_NOT_POWERED);
goto unlock;
}
@@ -2969,8 +3158,8 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
/* Recalculate length in case of filtered SCO connections, etc */
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
- err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
- rp_len);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
+ rp_len);
kfree(rp);
@@ -2982,7 +3171,7 @@ unlock:
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_pin_code_neg_reply *cp)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
int err;
cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
@@ -3004,7 +3193,7 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
struct hci_conn *conn;
struct mgmt_cp_pin_code_reply *cp = data;
struct hci_cp_pin_code_reply reply;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
int err;
BT_DBG("");
@@ -3012,15 +3201,15 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_NOT_POWERED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
if (!conn) {
- err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_NOT_CONNECTED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_CONNECTED);
goto failed;
}
@@ -3033,8 +3222,8 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
err = send_pin_code_neg_reply(sk, hdev, &ncp);
if (err >= 0)
- err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
goto failed;
}
@@ -3068,8 +3257,8 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("");
if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
- return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
- MGMT_STATUS_INVALID_PARAMS, NULL, 0);
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_STATUS_INVALID_PARAMS, NULL, 0);
hci_dev_lock(hdev);
@@ -3080,14 +3269,14 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_unlock(hdev);
- return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
- 0);
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
+ NULL, 0);
}
-static struct pending_cmd *find_pairing(struct hci_conn *conn)
+static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
@@ -3102,7 +3291,7 @@ static struct pending_cmd *find_pairing(struct hci_conn *conn)
return NULL;
}
-static int pairing_complete(struct pending_cmd *cmd, u8 status)
+static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
{
struct mgmt_rp_pair_device rp;
struct hci_conn *conn = cmd->user_data;
@@ -3111,8 +3300,8 @@ static int pairing_complete(struct pending_cmd *cmd, u8 status)
bacpy(&rp.addr.bdaddr, &conn->dst);
rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
- err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
- &rp, sizeof(rp));
+ err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
+ status, &rp, sizeof(rp));
/* So we don't get further callbacks for this connection */
conn->connect_cfm_cb = NULL;
@@ -3134,7 +3323,7 @@ static int pairing_complete(struct pending_cmd *cmd, u8 status)
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
{
u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
cmd = find_pairing(conn);
if (cmd) {
@@ -3145,7 +3334,7 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete)
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
BT_DBG("status %u", status);
@@ -3161,7 +3350,7 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
BT_DBG("status %u", status);
@@ -3183,7 +3372,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_pair_device *cp = data;
struct mgmt_rp_pair_device rp;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
u8 sec_level, auth_type;
struct hci_conn *conn;
int err;
@@ -3195,20 +3384,28 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
rp.addr.type = cp->addr.type;
if (!bdaddr_type_is_valid(cp->addr.type))
- return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &rp, sizeof(rp));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
- return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &rp, sizeof(rp));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED, &rp,
+ sizeof(rp));
+ goto unlock;
+ }
+
+ if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_ALREADY_PAIRED, &rp,
+ sizeof(rp));
goto unlock;
}
@@ -3249,19 +3446,22 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
if (PTR_ERR(conn) == -EBUSY)
status = MGMT_STATUS_BUSY;
+ else if (PTR_ERR(conn) == -EOPNOTSUPP)
+ status = MGMT_STATUS_NOT_SUPPORTED;
+ else if (PTR_ERR(conn) == -ECONNREFUSED)
+ status = MGMT_STATUS_REJECTED;
else
status = MGMT_STATUS_CONNECT_FAILED;
- err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- status, &rp,
- sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ status, &rp, sizeof(rp));
goto unlock;
}
if (conn->connect_cfm_cb) {
hci_conn_drop(conn);
- err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_BUSY, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_BUSY, &rp, sizeof(rp));
goto unlock;
}
@@ -3305,7 +3505,7 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_addr_info *addr = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_conn *conn;
int err;
@@ -3314,31 +3514,31 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
- MGMT_STATUS_NOT_POWERED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED);
goto unlock;
}
- cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
+ cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
if (!cmd) {
- err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
goto unlock;
}
conn = cmd->user_data;
if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
- MGMT_STATUS_INVALID_PARAMS);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
goto unlock;
}
cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
mgmt_pending_remove(cmd);
- err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
- addr, sizeof(*addr));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
+ addr, sizeof(*addr));
unlock:
hci_dev_unlock(hdev);
return err;
@@ -3348,16 +3548,16 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
struct mgmt_addr_info *addr, u16 mgmt_op,
u16 hci_op, __le32 passkey)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_conn *conn;
int err;
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, mgmt_op,
- MGMT_STATUS_NOT_POWERED, addr,
- sizeof(*addr));
+ err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_NOT_POWERED, addr,
+ sizeof(*addr));
goto done;
}
@@ -3367,22 +3567,22 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
if (!conn) {
- err = cmd_complete(sk, hdev->id, mgmt_op,
- MGMT_STATUS_NOT_CONNECTED, addr,
- sizeof(*addr));
+ err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_NOT_CONNECTED, addr,
+ sizeof(*addr));
goto done;
}
if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
if (!err)
- err = cmd_complete(sk, hdev->id, mgmt_op,
- MGMT_STATUS_SUCCESS, addr,
- sizeof(*addr));
+ err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_SUCCESS, addr,
+ sizeof(*addr));
else
- err = cmd_complete(sk, hdev->id, mgmt_op,
- MGMT_STATUS_FAILED, addr,
- sizeof(*addr));
+ err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_FAILED, addr,
+ sizeof(*addr));
goto done;
}
@@ -3434,8 +3634,8 @@ static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("");
if (len != sizeof(*cp))
- return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
return user_pairing_resp(sk, hdev, &cp->addr,
MGMT_OP_USER_CONFIRM_REPLY,
@@ -3491,24 +3691,24 @@ static void update_name(struct hci_request *req)
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct mgmt_cp_set_local_name *cp;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
BT_DBG("status 0x%02x", status);
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
+ cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
if (!cmd)
goto unlock;
cp = cmd->param;
if (status)
- cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
- mgmt_status(status));
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+ mgmt_status(status));
else
- cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
- cp, sizeof(*cp));
+ mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ cp, sizeof(*cp));
mgmt_pending_remove(cmd);
@@ -3520,7 +3720,7 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_set_local_name *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
int err;
@@ -3534,8 +3734,8 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
!memcmp(hdev->short_name, cp->short_name,
sizeof(hdev->short_name))) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
- data, len);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ data, len);
goto failed;
}
@@ -3544,13 +3744,13 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
if (!hdev_is_powered(hdev)) {
memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
- data, len);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ data, len);
if (err < 0)
goto failed;
- err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
- sk);
+ err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
+ data, len, sk);
goto failed;
}
@@ -3585,10 +3785,70 @@ failed:
return err;
}
+static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb)
+{
+ struct mgmt_rp_read_local_oob_data mgmt_rp;
+ size_t rp_size = sizeof(mgmt_rp);
+ struct mgmt_pending_cmd *cmd;
+
+ BT_DBG("%s status %u", hdev->name, status);
+
+ cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
+ if (!cmd)
+ return;
+
+ if (status || !skb) {
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ status ? mgmt_status(status) : MGMT_STATUS_FAILED);
+ goto remove;
+ }
+
+ memset(&mgmt_rp, 0, sizeof(mgmt_rp));
+
+ if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
+ struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
+
+ if (skb->len < sizeof(*rp)) {
+ mgmt_cmd_status(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_FAILED);
+ goto remove;
+ }
+
+ memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
+ memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
+
+ rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
+ } else {
+ struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
+
+ if (skb->len < sizeof(*rp)) {
+ mgmt_cmd_status(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_FAILED);
+ goto remove;
+ }
+
+ memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
+ memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
+
+ memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
+ memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
+ }
+
+ mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
+
+remove:
+ mgmt_pending_remove(cmd);
+}
+
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
+ struct hci_request req;
int err;
BT_DBG("%s", hdev->name);
@@ -3596,20 +3856,20 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_NOT_POWERED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_NOT_POWERED);
goto unlock;
}
if (!lmp_ssp_capable(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_NOT_SUPPORTED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
}
- if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_BUSY);
goto unlock;
}
@@ -3619,12 +3879,14 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
+ hci_req_init(&req, hdev);
+
if (bredr_sc_enabled(hdev))
- err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
- 0, NULL);
+ hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
else
- err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+ hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+ err = hci_req_run_skb(&req, read_local_oob_data_complete);
if (err < 0)
mgmt_pending_remove(cmd);
@@ -3642,9 +3904,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
BT_DBG("%s ", hdev->name);
if (!bdaddr_type_is_valid(addr->type))
- return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS, addr,
- sizeof(*addr));
+ return mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS,
+ addr, sizeof(*addr));
hci_dev_lock(hdev);
@@ -3653,10 +3916,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
u8 status;
if (cp->addr.type != BDADDR_BREDR) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
goto unlock;
}
@@ -3668,8 +3931,9 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
else
status = MGMT_STATUS_SUCCESS;
- err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
- status, &cp->addr, sizeof(cp->addr));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_REMOTE_OOB_DATA, status,
+ &cp->addr, sizeof(cp->addr));
} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
struct mgmt_cp_add_remote_oob_ext_data *cp = data;
u8 *rand192, *hash192, *rand256, *hash256;
@@ -3681,10 +3945,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
*/
if (memcmp(cp->rand192, ZERO_KEY, 16) ||
memcmp(cp->hash192, ZERO_KEY, 16)) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS,
- addr, sizeof(*addr));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS,
+ addr, sizeof(*addr));
goto unlock;
}
@@ -3724,12 +3988,13 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
else
status = MGMT_STATUS_SUCCESS;
- err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
- status, &cp->addr, sizeof(cp->addr));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ status, &cp->addr, sizeof(cp->addr));
} else {
BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
- err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS);
}
unlock:
@@ -3747,9 +4012,10 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
BT_DBG("%s", hdev->name);
if (cp->addr.type != BDADDR_BREDR)
- return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ return mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
hci_dev_lock(hdev);
@@ -3766,100 +4032,136 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
status = MGMT_STATUS_SUCCESS;
done:
- err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- status, &cp->addr, sizeof(cp->addr));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ status, &cp->addr, sizeof(cp->addr));
hci_dev_unlock(hdev);
return err;
}
-static bool trigger_discovery(struct hci_request *req, u8 *status)
+static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
{
struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_scan_param param_cp;
- struct hci_cp_le_set_scan_enable enable_cp;
- struct hci_cp_inquiry inq_cp;
+ struct hci_cp_inquiry cp;
/* General inquiry access code (GIAC) */
u8 lap[3] = { 0x33, 0x8b, 0x9e };
+
+ *status = mgmt_bredr_support(hdev);
+ if (*status)
+ return false;
+
+ if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
+ *status = MGMT_STATUS_BUSY;
+ return false;
+ }
+
+ hci_inquiry_cache_flush(hdev);
+
+ memset(&cp, 0, sizeof(cp));
+ memcpy(&cp.lap, lap, sizeof(cp.lap));
+ cp.length = DISCOV_BREDR_INQUIRY_LEN;
+
+ hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+
+ return true;
+}
+
+static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_scan_param param_cp;
+ struct hci_cp_le_set_scan_enable enable_cp;
u8 own_addr_type;
int err;
- switch (hdev->discovery.type) {
- case DISCOV_TYPE_BREDR:
- *status = mgmt_bredr_support(hdev);
- if (*status)
- return false;
+ *status = mgmt_le_support(hdev);
+ if (*status)
+ return false;
- if (test_bit(HCI_INQUIRY, &hdev->flags)) {
- *status = MGMT_STATUS_BUSY;
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
+ /* Don't let discovery abort an outgoing connection attempt
+ * that's using directed advertising.
+ */
+ if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+ *status = MGMT_STATUS_REJECTED;
return false;
}
- hci_inquiry_cache_flush(hdev);
+ disable_advertising(req);
+ }
- memset(&inq_cp, 0, sizeof(inq_cp));
- memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
- inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
- hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
- break;
+ /* If controller is scanning, it means the background scanning is
+ * running. Thus, we should temporarily stop it in order to set the
+ * discovery scanning parameters.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ hci_req_add_le_scan_disable(req);
- case DISCOV_TYPE_LE:
- case DISCOV_TYPE_INTERLEAVED:
- *status = mgmt_le_support(hdev);
- if (*status)
- return false;
+ /* All active scans will be done with either a resolvable private
+ * address (when privacy feature has been enabled) or non-resolvable
+ * private address.
+ */
+ err = hci_update_random_address(req, true, &own_addr_type);
+ if (err < 0) {
+ *status = MGMT_STATUS_FAILED;
+ return false;
+ }
- if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
- !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
- *status = MGMT_STATUS_NOT_SUPPORTED;
+ memset(&param_cp, 0, sizeof(param_cp));
+ param_cp.type = LE_SCAN_ACTIVE;
+ param_cp.interval = cpu_to_le16(interval);
+ param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
+ param_cp.own_address_type = own_addr_type;
+
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+ &param_cp);
+
+ memset(&enable_cp, 0, sizeof(enable_cp));
+ enable_cp.enable = LE_SCAN_ENABLE;
+ enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+ &enable_cp);
+
+ return true;
+}
+
+static bool trigger_discovery(struct hci_request *req, u8 *status)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_BREDR:
+ if (!trigger_bredr_inquiry(req, status))
return false;
- }
+ break;
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
- /* Don't let discovery abort an outgoing
- * connection attempt that's using directed
- * advertising.
+ case DISCOV_TYPE_INTERLEAVED:
+ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+ &hdev->quirks)) {
+ /* During simultaneous discovery, we double LE scan
+ * interval. We must leave some time for the controller
+ * to do BR/EDR inquiry.
*/
- if (hci_conn_hash_lookup_state(hdev, LE_LINK,
- BT_CONNECT)) {
- *status = MGMT_STATUS_REJECTED;
+ if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
+ status))
return false;
- }
- disable_advertising(req);
- }
-
- /* If controller is scanning, it means the background scanning
- * is running. Thus, we should temporarily stop it in order to
- * set the discovery scanning parameters.
- */
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
- hci_req_add_le_scan_disable(req);
+ if (!trigger_bredr_inquiry(req, status))
+ return false;
- memset(&param_cp, 0, sizeof(param_cp));
+ return true;
+ }
- /* All active scans will be done with either a resolvable
- * private address (when privacy feature has been enabled)
- * or non-resolvable private address.
- */
- err = hci_update_random_address(req, true, &own_addr_type);
- if (err < 0) {
- *status = MGMT_STATUS_FAILED;
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+ *status = MGMT_STATUS_NOT_SUPPORTED;
return false;
}
+ /* fall through */
- param_cp.type = LE_SCAN_ACTIVE;
- param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
- param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
- param_cp.own_address_type = own_addr_type;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
- &param_cp);
-
- memset(&enable_cp, 0, sizeof(enable_cp));
- enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
- &enable_cp);
+ case DISCOV_TYPE_LE:
+ if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
+ return false;
break;
default:
@@ -3873,16 +4175,16 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
static void start_discovery_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
unsigned long timeout;
BT_DBG("status %d", status);
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
if (!cmd)
- cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
+ cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
if (cmd) {
cmd->cmd_complete(cmd, mgmt_status(status));
@@ -3904,7 +4206,18 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
break;
case DISCOV_TYPE_INTERLEAVED:
- timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
+ /* When running simultaneous discovery, the LE scanning time
+ * should occupy the whole discovery time sine BR/EDR inquiry
+ * and LE scanning are scheduled by the controller.
+ *
+ * For interleaving discovery in comparison, BR/EDR inquiry
+ * and LE scanning are done sequentially with separate
+ * timeouts.
+ */
+ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
+ timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+ else
+ timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
break;
case DISCOV_TYPE_BREDR:
timeout = 0;
@@ -3923,8 +4236,7 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
*/
if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
&hdev->quirks) &&
- (hdev->discovery.uuid_count > 0 ||
- hdev->discovery.rssi != HCI_RSSI_INVALID)) {
+ hdev->discovery.result_filtering) {
hdev->discovery.scan_start = jiffies;
hdev->discovery.scan_duration = timeout;
}
@@ -3941,7 +4253,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_start_discovery *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
u8 status;
int err;
@@ -3951,17 +4263,17 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_NOT_POWERED,
- &cp->type, sizeof(cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_POWERED,
+ &cp->type, sizeof(cp->type));
goto failed;
}
if (hdev->discovery.state != DISCOVERY_STOPPED ||
- test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_BUSY, &cp->type,
- sizeof(cp->type));
+ hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY, &cp->type,
+ sizeof(cp->type));
goto failed;
}
@@ -3984,8 +4296,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
hci_req_init(&req, hdev);
if (!trigger_discovery(&req, &status)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- status, &cp->type, sizeof(cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ status, &cp->type, sizeof(cp->type));
mgmt_pending_remove(cmd);
goto failed;
}
@@ -4003,17 +4315,18 @@ failed:
return err;
}
-static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
+ u8 status)
{
- return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
- cmd->param, 1);
+ return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
+ cmd->param, 1);
}
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_start_service_discovery *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
u16 uuid_count, expected_len;
@@ -4025,19 +4338,19 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_START_SERVICE_DISCOVERY,
- MGMT_STATUS_NOT_POWERED,
- &cp->type, sizeof(cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_NOT_POWERED,
+ &cp->type, sizeof(cp->type));
goto failed;
}
if (hdev->discovery.state != DISCOVERY_STOPPED ||
- test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_START_SERVICE_DISCOVERY,
- MGMT_STATUS_BUSY, &cp->type,
- sizeof(cp->type));
+ hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_BUSY, &cp->type,
+ sizeof(cp->type));
goto failed;
}
@@ -4045,10 +4358,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
if (uuid_count > max_uuid_count) {
BT_ERR("service_discovery: too big uuid_count value %u",
uuid_count);
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_START_SERVICE_DISCOVERY,
- MGMT_STATUS_INVALID_PARAMS, &cp->type,
- sizeof(cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS, &cp->type,
+ sizeof(cp->type));
goto failed;
}
@@ -4056,10 +4369,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
if (expected_len != len) {
BT_ERR("service_discovery: expected %u bytes, got %u bytes",
expected_len, len);
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_START_SERVICE_DISCOVERY,
- MGMT_STATUS_INVALID_PARAMS, &cp->type,
- sizeof(cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS, &cp->type,
+ sizeof(cp->type));
goto failed;
}
@@ -4077,6 +4390,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
*/
hci_discovery_filter_clear(hdev);
+ hdev->discovery.result_filtering = true;
hdev->discovery.type = cp->type;
hdev->discovery.rssi = cp->rssi;
hdev->discovery.uuid_count = uuid_count;
@@ -4085,10 +4399,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
GFP_KERNEL);
if (!hdev->discovery.uuids) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_START_SERVICE_DISCOVERY,
- MGMT_STATUS_FAILED,
- &cp->type, sizeof(cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_FAILED,
+ &cp->type, sizeof(cp->type));
mgmt_pending_remove(cmd);
goto failed;
}
@@ -4097,9 +4411,9 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
hci_req_init(&req, hdev);
if (!trigger_discovery(&req, &status)) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_START_SERVICE_DISCOVERY,
- status, &cp->type, sizeof(cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ status, &cp->type, sizeof(cp->type));
mgmt_pending_remove(cmd);
goto failed;
}
@@ -4119,13 +4433,13 @@ failed:
static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
BT_DBG("status %d", status);
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+ cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
if (cmd) {
cmd->cmd_complete(cmd, mgmt_status(status));
mgmt_pending_remove(cmd);
@@ -4141,7 +4455,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_stop_discovery *mgmt_cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
int err;
@@ -4150,16 +4464,16 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
if (!hci_discovery_active(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
- MGMT_STATUS_REJECTED, &mgmt_cp->type,
- sizeof(mgmt_cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_REJECTED, &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
goto unlock;
}
if (hdev->discovery.type != mgmt_cp->type) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
- MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
- sizeof(mgmt_cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS,
+ &mgmt_cp->type, sizeof(mgmt_cp->type));
goto unlock;
}
@@ -4185,8 +4499,8 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
/* If no HCI commands were sent we're done */
if (err == -ENODATA) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
- &mgmt_cp->type, sizeof(mgmt_cp->type));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
+ &mgmt_cp->type, sizeof(mgmt_cp->type));
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
}
@@ -4207,17 +4521,17 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
if (!hci_discovery_active(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
- MGMT_STATUS_FAILED, &cp->addr,
- sizeof(cp->addr));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+ MGMT_STATUS_FAILED, &cp->addr,
+ sizeof(cp->addr));
goto failed;
}
e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
if (!e) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
- MGMT_STATUS_INVALID_PARAMS, &cp->addr,
- sizeof(cp->addr));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+ MGMT_STATUS_INVALID_PARAMS, &cp->addr,
+ sizeof(cp->addr));
goto failed;
}
@@ -4229,8 +4543,8 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
hci_inquiry_cache_update_resolve(hdev, e);
}
- err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
- sizeof(cp->addr));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
+ &cp->addr, sizeof(cp->addr));
failed:
hci_dev_unlock(hdev);
@@ -4247,9 +4561,9 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("%s", hdev->name);
if (!bdaddr_type_is_valid(cp->addr.type))
- return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
hci_dev_lock(hdev);
@@ -4265,8 +4579,8 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
status = MGMT_STATUS_SUCCESS;
done:
- err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
- &cp->addr, sizeof(cp->addr));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
+ &cp->addr, sizeof(cp->addr));
hci_dev_unlock(hdev);
@@ -4283,9 +4597,9 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("%s", hdev->name);
if (!bdaddr_type_is_valid(cp->addr.type))
- return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
hci_dev_lock(hdev);
@@ -4301,8 +4615,8 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
status = MGMT_STATUS_SUCCESS;
done:
- err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
- &cp->addr, sizeof(cp->addr));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
+ &cp->addr, sizeof(cp->addr));
hci_dev_unlock(hdev);
@@ -4322,8 +4636,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
source = __le16_to_cpu(cp->source);
if (source > 0x0002)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
@@ -4332,7 +4646,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
hdev->devid_product = __le16_to_cpu(cp->product);
hdev->devid_version = __le16_to_cpu(cp->version);
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
+ NULL, 0);
hci_req_init(&req, hdev);
update_eir(&req);
@@ -4343,10 +4658,17 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
+static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ BT_DBG("status %d", status);
+}
+
static void set_advertising_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
{
struct cmd_lookup match = { NULL, hdev };
+ struct hci_request req;
hci_dev_lock(hdev);
@@ -4358,10 +4680,10 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
goto unlock;
}
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
- set_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+ hci_dev_set_flag(hdev, HCI_ADVERTISING);
else
- clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING);
mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
&match);
@@ -4371,6 +4693,21 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
if (match.sk)
sock_put(match.sk);
+ /* If "Set Advertising" was just disabled and instance advertising was
+ * set up earlier, then enable the advertising instance.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
+ goto unlock;
+
+ hci_req_init(&req, hdev);
+
+ update_adv_data(&req);
+ enable_advertising(&req);
+
+ if (hci_req_run(&req, enable_advertising_instance) < 0)
+ BT_ERR("Failed to re-configure advertising");
+
unlock:
hci_dev_unlock(hdev);
}
@@ -4379,41 +4716,48 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
- u8 val, enabled, status;
+ u8 val, status;
int err;
BT_DBG("request for %s", hdev->name);
status = mgmt_le_support(hdev);
if (status)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
- status);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ status);
- if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
- MGMT_STATUS_INVALID_PARAMS);
+ if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
val = !!cp->val;
- enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
/* The following conditions are ones which mean that we should
* not do any HCI communication but directly send a mgmt
* response to user space (after toggling the flag if
* necessary).
*/
- if (!hdev_is_powered(hdev) || val == enabled ||
+ if (!hdev_is_powered(hdev) ||
+ (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
hci_conn_num(hdev, LE_LINK) > 0 ||
- (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+ (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
hdev->le_scan_type == LE_SCAN_ACTIVE)) {
- bool changed = false;
+ bool changed;
- if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
- change_bit(HCI_ADVERTISING, &hdev->dev_flags);
- changed = true;
+ if (cp->val) {
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
+ if (cp->val == 0x02)
+ hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+ else
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+ } else {
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
}
err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
@@ -4426,10 +4770,10 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
+ pending_find(MGMT_OP_SET_LE, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ MGMT_STATUS_BUSY);
goto unlock;
}
@@ -4441,10 +4785,19 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
hci_req_init(&req, hdev);
- if (val)
- enable_advertising(&req);
+ if (cp->val == 0x02)
+ hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
else
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+
+ if (val) {
+ /* Switch to instance "0" for the Set Advertising setting. */
+ update_adv_data_for_instance(&req, 0);
+ update_scan_rsp_data_for_instance(&req, 0);
+ enable_advertising(&req);
+ } else {
disable_advertising(&req);
+ }
err = hci_req_run(&req, set_advertising_complete);
if (err < 0)
@@ -4464,34 +4817,38 @@ static int set_static_address(struct sock *sk, struct hci_dev *hdev,
BT_DBG("%s", hdev->name);
if (!lmp_le_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_NOT_SUPPORTED);
if (hdev_is_powered(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
- MGMT_STATUS_REJECTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_REJECTED);
if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
if (!bacmp(&cp->bdaddr, BDADDR_NONE))
- return cmd_status(sk, hdev->id,
- MGMT_OP_SET_STATIC_ADDRESS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_INVALID_PARAMS);
/* Two most significant bits shall be set */
if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
- return cmd_status(sk, hdev->id,
- MGMT_OP_SET_STATIC_ADDRESS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_INVALID_PARAMS);
}
hci_dev_lock(hdev);
bacpy(&hdev->static_addr, &cp->bdaddr);
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
+ err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
+ if (err < 0)
+ goto unlock;
- hci_dev_unlock(hdev);
+ err = new_settings(hdev, sk);
+unlock:
+ hci_dev_unlock(hdev);
return err;
}
@@ -4505,36 +4862,37 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
BT_DBG("%s", hdev->name);
if (!lmp_le_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_NOT_SUPPORTED);
interval = __le16_to_cpu(cp->interval);
if (interval < 0x0004 || interval > 0x4000)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
window = __le16_to_cpu(cp->window);
if (window < 0x0004 || window > 0x4000)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
if (window > interval)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
hdev->le_scan_interval = interval;
hdev->le_scan_window = window;
- err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
+ NULL, 0);
/* If background scan is running, restart it so new parameters are
* loaded.
*/
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
hdev->discovery.state == DISCOVERY_STOPPED) {
struct hci_request req;
@@ -4554,26 +4912,26 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
BT_DBG("status 0x%02x", status);
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
+ cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
if (!cmd)
goto unlock;
if (status) {
- cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- mgmt_status(status));
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ mgmt_status(status));
} else {
struct mgmt_mode *cp = cmd->param;
if (cp->val)
- set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
else
- clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
new_settings(hdev, cmd->sk);
@@ -4589,40 +4947,40 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
int err;
BT_DBG("%s", hdev->name);
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
hdev->hci_ver < BLUETOOTH_VER_1_2)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_NOT_SUPPORTED);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_INVALID_PARAMS);
-
- if (!hdev_is_powered(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_NOT_POWERED);
-
- if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_REJECTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
- if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
+ hdev);
goto unlock;
}
- if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
+ if (!hdev_is_powered(hdev)) {
+ hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
hdev);
+ new_settings(hdev, sk);
goto unlock;
}
@@ -4639,8 +4997,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
err = hci_req_run(&req, fast_connectable_complete);
if (err < 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_FAILED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_FAILED);
mgmt_pending_remove(cmd);
}
@@ -4652,13 +5010,13 @@ unlock:
static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
BT_DBG("status 0x%02x", status);
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
+ cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
if (!cmd)
goto unlock;
@@ -4668,9 +5026,9 @@ static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
/* We need to restore the flag if related HCI commands
* failed.
*/
- clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
- cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
} else {
send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
new_settings(hdev, cmd->sk);
@@ -4685,41 +5043,41 @@ unlock:
static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
int err;
BT_DBG("request for %s", hdev->name);
if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_NOT_SUPPORTED);
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
- MGMT_STATUS_REJECTED);
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_REJECTED);
if (cp->val != 0x00 && cp->val != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
- if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
goto unlock;
}
if (!hdev_is_powered(hdev)) {
if (!cp->val) {
- clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
- clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
- clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
- clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
- clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+ hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
+ hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
+ hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
+ hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
- change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+ hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
if (err < 0)
@@ -4731,8 +5089,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
/* Reject disabling when powered on */
if (!cp->val) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
- MGMT_STATUS_REJECTED);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_REJECTED);
goto unlock;
} else {
/* When configuring a dual-mode controller to operate
@@ -4749,18 +5107,18 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
* switching BR/EDR back on when secure connections has been
* enabled is not a supported transaction.
*/
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
(bacmp(&hdev->static_addr, BDADDR_ANY) ||
- test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
- MGMT_STATUS_REJECTED);
+ hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_REJECTED);
goto unlock;
}
}
- if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_BUSY);
goto unlock;
}
@@ -4773,7 +5131,7 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
/* We need to flip the bit already here so that update_adv_data
* generates the correct flags.
*/
- set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
hci_req_init(&req, hdev);
@@ -4796,20 +5154,20 @@ unlock:
static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct mgmt_mode *cp;
BT_DBG("%s status %u", hdev->name, status);
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
+ cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
if (!cmd)
goto unlock;
if (status) {
- cmd_status(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status));
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(status));
goto remove;
}
@@ -4817,16 +5175,16 @@ static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
switch (cp->val) {
case 0x00:
- clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
- clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
+ hci_dev_clear_flag(hdev, HCI_SC_ONLY);
break;
case 0x01:
- set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
- clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_SC_ENABLED);
+ hci_dev_clear_flag(hdev, HCI_SC_ONLY);
break;
case 0x02:
- set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
- set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_SC_ENABLED);
+ hci_dev_set_flag(hdev, HCI_SC_ONLY);
break;
}
@@ -4843,7 +5201,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_mode *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
u8 val;
int err;
@@ -4851,37 +5209,37 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
BT_DBG("request for %s", hdev->name);
if (!lmp_sc_capable(hdev) &&
- !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
- MGMT_STATUS_NOT_SUPPORTED);
+ !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ MGMT_STATUS_NOT_SUPPORTED);
- if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
lmp_sc_capable(hdev) &&
- !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
- MGMT_STATUS_REJECTED);
+ !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ MGMT_STATUS_REJECTED);
if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
- !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
bool changed;
if (cp->val) {
- changed = !test_and_set_bit(HCI_SC_ENABLED,
- &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev,
+ HCI_SC_ENABLED);
if (cp->val == 0x02)
- set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_SC_ONLY);
else
- clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_SC_ONLY);
} else {
- changed = test_and_clear_bit(HCI_SC_ENABLED,
- &hdev->dev_flags);
- clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_SC_ENABLED);
+ hci_dev_clear_flag(hdev, HCI_SC_ONLY);
}
err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
@@ -4894,16 +5252,16 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
- MGMT_STATUS_BUSY);
+ if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ MGMT_STATUS_BUSY);
goto failed;
}
val = !!cp->val;
- if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
- (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
+ if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
+ (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
goto failed;
}
@@ -4937,27 +5295,26 @@ static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
BT_DBG("request for %s", hdev->name);
if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
if (cp->val)
- changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
- &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
else
- changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
- &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_KEEP_DEBUG_KEYS);
if (cp->val == 0x02)
- use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
- &hdev->dev_flags);
+ use_changed = !hci_dev_test_and_set_flag(hdev,
+ HCI_USE_DEBUG_KEYS);
else
- use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
- &hdev->dev_flags);
+ use_changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_USE_DEBUG_KEYS);
if (hdev_is_powered(hdev) && use_changed &&
- test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
sizeof(mode), &mode);
@@ -4985,32 +5342,32 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
BT_DBG("request for %s", hdev->name);
if (!lmp_le_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+ MGMT_STATUS_NOT_SUPPORTED);
if (cp->privacy != 0x00 && cp->privacy != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+ MGMT_STATUS_INVALID_PARAMS);
if (hdev_is_powered(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
- MGMT_STATUS_REJECTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+ MGMT_STATUS_REJECTED);
hci_dev_lock(hdev);
/* If user space supports this command it is also expected to
* handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
*/
- set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
if (cp->privacy) {
- changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
- set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
} else {
- changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
memset(hdev->irk, 0, sizeof(hdev->irk));
- clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
}
err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
@@ -5053,22 +5410,22 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
BT_DBG("request for %s", hdev->name);
if (!lmp_le_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+ MGMT_STATUS_NOT_SUPPORTED);
irk_count = __le16_to_cpu(cp->irk_count);
if (irk_count > max_irk_count) {
BT_ERR("load_irks: too big irk_count value %u", irk_count);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+ MGMT_STATUS_INVALID_PARAMS);
}
expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
if (expected_len != len) {
BT_ERR("load_irks: expected %u bytes, got %u bytes",
expected_len, len);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+ MGMT_STATUS_INVALID_PARAMS);
}
BT_DBG("%s irk_count %u", hdev->name, irk_count);
@@ -5077,9 +5434,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
struct mgmt_irk_info *key = &cp->irks[i];
if (!irk_is_valid(key))
- return cmd_status(sk, hdev->id,
- MGMT_OP_LOAD_IRKS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_LOAD_IRKS,
+ MGMT_STATUS_INVALID_PARAMS);
}
hci_dev_lock(hdev);
@@ -5099,9 +5456,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
BDADDR_ANY);
}
- set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
- err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
hci_dev_unlock(hdev);
@@ -5139,14 +5496,14 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
BT_DBG("request for %s", hdev->name);
if (!lmp_le_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_NOT_SUPPORTED);
key_count = __le16_to_cpu(cp->key_count);
if (key_count > max_key_count) {
BT_ERR("load_ltks: too big key_count value %u", key_count);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
}
expected_len = sizeof(*cp) + key_count *
@@ -5154,8 +5511,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
if (expected_len != len) {
BT_ERR("load_keys: expected %u bytes, got %u bytes",
expected_len, len);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
}
BT_DBG("%s key_count %u", hdev->name, key_count);
@@ -5164,9 +5521,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
struct mgmt_ltk_info *key = &cp->keys[i];
if (!ltk_is_valid(key))
- return cmd_status(sk, hdev->id,
- MGMT_OP_LOAD_LONG_TERM_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
}
hci_dev_lock(hdev);
@@ -5211,7 +5568,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
key->rand);
}
- err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
NULL, 0);
hci_dev_unlock(hdev);
@@ -5219,7 +5576,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
{
struct hci_conn *conn = cmd->user_data;
struct mgmt_rp_get_conn_info rp;
@@ -5237,8 +5594,8 @@ static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
rp.max_tx_power = HCI_TX_POWER_INVALID;
}
- err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
- &rp, sizeof(rp));
+ err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
+ status, &rp, sizeof(rp));
hci_conn_drop(conn);
hci_conn_put(conn);
@@ -5250,7 +5607,7 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
u16 opcode)
{
struct hci_cp_read_rssi *cp;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_conn *conn;
u16 handle;
u8 status;
@@ -5288,7 +5645,7 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
goto unlock;
}
- cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
+ cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
if (!cmd)
goto unlock;
@@ -5315,15 +5672,16 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
rp.addr.type = cp->addr.type;
if (!bdaddr_type_is_valid(cp->addr.type))
- return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
- MGMT_STATUS_INVALID_PARAMS,
- &rp, sizeof(rp));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
- MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_NOT_POWERED, &rp,
+ sizeof(rp));
goto unlock;
}
@@ -5334,14 +5692,15 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
if (!conn || conn->state != BT_CONNECTED) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
- MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_NOT_CONNECTED, &rp,
+ sizeof(rp));
goto unlock;
}
- if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
- MGMT_STATUS_BUSY, &rp, sizeof(rp));
+ if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_BUSY, &rp, sizeof(rp));
goto unlock;
}
@@ -5361,7 +5720,7 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
struct hci_request req;
struct hci_cp_read_tx_power req_txp_cp;
struct hci_cp_read_rssi req_rssi_cp;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
hci_req_init(&req, hdev);
req_rssi_cp.handle = cpu_to_le16(conn->handle);
@@ -5409,8 +5768,8 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
rp.tx_power = conn->tx_power;
rp.max_tx_power = conn->max_tx_power;
- err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
- MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
}
unlock:
@@ -5418,7 +5777,7 @@ unlock:
return err;
}
-static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
+static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
{
struct hci_conn *conn = cmd->user_data;
struct mgmt_rp_get_clock_info rp;
@@ -5443,8 +5802,8 @@ static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
}
complete:
- err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
- sizeof(rp));
+ err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
+ sizeof(rp));
if (conn) {
hci_conn_drop(conn);
@@ -5457,7 +5816,7 @@ complete:
static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct hci_cp_read_clock *hci_cp;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_conn *conn;
BT_DBG("%s status %u", hdev->name, status);
@@ -5475,7 +5834,7 @@ static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
conn = NULL;
}
- cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
+ cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
if (!cmd)
goto unlock;
@@ -5492,7 +5851,7 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
struct mgmt_cp_get_clock_info *cp = data;
struct mgmt_rp_get_clock_info rp;
struct hci_cp_read_clock hci_cp;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
struct hci_conn *conn;
int err;
@@ -5504,15 +5863,16 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
rp.addr.type = cp->addr.type;
if (cp->addr.type != BDADDR_BREDR)
- return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
- MGMT_STATUS_INVALID_PARAMS,
- &rp, sizeof(rp));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
- MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
+ MGMT_STATUS_NOT_POWERED, &rp,
+ sizeof(rp));
goto unlock;
}
@@ -5520,10 +5880,10 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr);
if (!conn || conn->state != BT_CONNECTED) {
- err = cmd_complete(sk, hdev->id,
- MGMT_OP_GET_CLOCK_INFO,
- MGMT_STATUS_NOT_CONNECTED,
- &rp, sizeof(rp));
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_GET_CLOCK_INFO,
+ MGMT_STATUS_NOT_CONNECTED,
+ &rp, sizeof(rp));
goto unlock;
}
} else {
@@ -5634,13 +5994,13 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
BT_DBG("status 0x%02x", status);
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
+ cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
if (!cmd)
goto unlock;
@@ -5655,7 +6015,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_add_device *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
u8 auto_conn, addr_type;
int err;
@@ -5664,14 +6024,14 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
if (!bdaddr_type_is_valid(cp->addr.type) ||
!bacmp(&cp->addr.bdaddr, BDADDR_ANY))
- return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
- return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
- MGMT_STATUS_INVALID_PARAMS,
- &cp->addr, sizeof(cp->addr));
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
hci_req_init(&req, hdev);
@@ -5757,13 +6117,13 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
BT_DBG("status 0x%02x", status);
hci_dev_lock(hdev);
- cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
+ cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
if (!cmd)
goto unlock;
@@ -5778,7 +6138,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_remove_device *cp = data;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct hci_request req;
int err;
@@ -5911,15 +6271,15 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
int i;
if (!lmp_le_capable(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
+ MGMT_STATUS_NOT_SUPPORTED);
param_count = __le16_to_cpu(cp->param_count);
if (param_count > max_param_count) {
BT_ERR("load_conn_param: too big param_count value %u",
param_count);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
+ MGMT_STATUS_INVALID_PARAMS);
}
expected_len = sizeof(*cp) + param_count *
@@ -5927,8 +6287,8 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
if (expected_len != len) {
BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
expected_len, len);
- return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
+ MGMT_STATUS_INVALID_PARAMS);
}
BT_DBG("%s param_count %u", hdev->name, param_count);
@@ -5983,7 +6343,8 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_unlock(hdev);
- return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
+ NULL, 0);
}
static int set_external_config(struct sock *sk, struct hci_dev *hdev,
@@ -5996,25 +6357,23 @@ static int set_external_config(struct sock *sk, struct hci_dev *hdev,
BT_DBG("%s", hdev->name);
if (hdev_is_powered(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
- MGMT_STATUS_REJECTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
+ MGMT_STATUS_REJECTED);
if (cp->config != 0x00 && cp->config != 0x01)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
+ MGMT_STATUS_INVALID_PARAMS);
if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
+ MGMT_STATUS_NOT_SUPPORTED);
hci_dev_lock(hdev);
if (cp->config)
- changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
- &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
else
- changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
- &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
if (err < 0)
@@ -6025,12 +6384,12 @@ static int set_external_config(struct sock *sk, struct hci_dev *hdev,
err = new_options(hdev, sk);
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
mgmt_index_removed(hdev);
- if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
- set_bit(HCI_CONFIG, &hdev->dev_flags);
- set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
+ hci_dev_set_flag(hdev, HCI_CONFIG);
+ hci_dev_set_flag(hdev, HCI_AUTO_OFF);
queue_work(hdev->req_workqueue, &hdev->power_on);
} else {
@@ -6054,16 +6413,16 @@ static int set_public_address(struct sock *sk, struct hci_dev *hdev,
BT_DBG("%s", hdev->name);
if (hdev_is_powered(hdev))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
- MGMT_STATUS_REJECTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
+ MGMT_STATUS_REJECTED);
if (!bacmp(&cp->bdaddr, BDADDR_ANY))
- return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
- MGMT_STATUS_INVALID_PARAMS);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
+ MGMT_STATUS_INVALID_PARAMS);
if (!hdev->set_bdaddr)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
- MGMT_STATUS_NOT_SUPPORTED);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
+ MGMT_STATUS_NOT_SUPPORTED);
hci_dev_lock(hdev);
@@ -6077,16 +6436,16 @@ static int set_public_address(struct sock *sk, struct hci_dev *hdev,
if (!changed)
goto unlock;
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
err = new_options(hdev, sk);
if (is_configured(hdev)) {
mgmt_index_removed(hdev);
- clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
- set_bit(HCI_CONFIG, &hdev->dev_flags);
- set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ hci_dev_set_flag(hdev, HCI_CONFIG);
+ hci_dev_set_flag(hdev, HCI_AUTO_OFF);
queue_work(hdev->req_workqueue, &hdev->power_on);
}
@@ -6096,213 +6455,852 @@ unlock:
return err;
}
-static const struct mgmt_handler {
- int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
- u16 data_len);
- bool var_len;
- size_t data_len;
-} mgmt_handlers[] = {
- { NULL }, /* 0x0000 (no command) */
- { read_version, false, MGMT_READ_VERSION_SIZE },
- { read_commands, false, MGMT_READ_COMMANDS_SIZE },
- { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
- { read_controller_info, false, MGMT_READ_INFO_SIZE },
- { set_powered, false, MGMT_SETTING_SIZE },
- { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
- { set_connectable, false, MGMT_SETTING_SIZE },
- { set_fast_connectable, false, MGMT_SETTING_SIZE },
- { set_bondable, false, MGMT_SETTING_SIZE },
- { set_link_security, false, MGMT_SETTING_SIZE },
- { set_ssp, false, MGMT_SETTING_SIZE },
- { set_hs, false, MGMT_SETTING_SIZE },
- { set_le, false, MGMT_SETTING_SIZE },
- { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
- { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
- { add_uuid, false, MGMT_ADD_UUID_SIZE },
- { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
- { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
- { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
- { disconnect, false, MGMT_DISCONNECT_SIZE },
- { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
- { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
- { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
- { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
- { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
- { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
- { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
- { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
- { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
- { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
- { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
- { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
- { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
- { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
- { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
- { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
- { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
- { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
- { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
- { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
- { set_advertising, false, MGMT_SETTING_SIZE },
- { set_bredr, false, MGMT_SETTING_SIZE },
- { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
- { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
- { set_secure_conn, false, MGMT_SETTING_SIZE },
- { set_debug_keys, false, MGMT_SETTING_SIZE },
- { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
- { load_irks, true, MGMT_LOAD_IRKS_SIZE },
- { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
- { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
- { add_device, false, MGMT_ADD_DEVICE_SIZE },
- { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
- { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
- { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
- { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
- { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
- { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
- { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
-};
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+ u8 data_len)
+{
+ eir[eir_len++] = sizeof(type) + data_len;
+ eir[eir_len++] = type;
+ memcpy(&eir[eir_len], data, data_len);
+ eir_len += data_len;
-int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
+ return eir_len;
+}
+
+static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb)
{
- void *buf;
- u8 *cp;
- struct mgmt_hdr *hdr;
- u16 opcode, index, len;
- struct hci_dev *hdev = NULL;
- const struct mgmt_handler *handler;
+ const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
+ struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
+ u8 *h192, *r192, *h256, *r256;
+ struct mgmt_pending_cmd *cmd;
+ u16 eir_len;
int err;
- BT_DBG("got %zu bytes", msglen);
+ BT_DBG("%s status %u", hdev->name, status);
- if (msglen < sizeof(*hdr))
- return -EINVAL;
+ cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
+ if (!cmd)
+ return;
- buf = kmalloc(msglen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ mgmt_cp = cmd->param;
+
+ if (status) {
+ status = mgmt_status(status);
+ eir_len = 0;
+
+ h192 = NULL;
+ r192 = NULL;
+ h256 = NULL;
+ r256 = NULL;
+ } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
+ struct hci_rp_read_local_oob_data *rp;
+
+ if (skb->len != sizeof(*rp)) {
+ status = MGMT_STATUS_FAILED;
+ eir_len = 0;
+ } else {
+ status = MGMT_STATUS_SUCCESS;
+ rp = (void *)skb->data;
- if (memcpy_from_msg(buf, msg, msglen)) {
- err = -EFAULT;
+ eir_len = 5 + 18 + 18;
+ h192 = rp->hash;
+ r192 = rp->rand;
+ h256 = NULL;
+ r256 = NULL;
+ }
+ } else {
+ struct hci_rp_read_local_oob_ext_data *rp;
+
+ if (skb->len != sizeof(*rp)) {
+ status = MGMT_STATUS_FAILED;
+ eir_len = 0;
+ } else {
+ status = MGMT_STATUS_SUCCESS;
+ rp = (void *)skb->data;
+
+ if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
+ eir_len = 5 + 18 + 18;
+ h192 = NULL;
+ r192 = NULL;
+ } else {
+ eir_len = 5 + 18 + 18 + 18 + 18;
+ h192 = rp->hash192;
+ r192 = rp->rand192;
+ }
+
+ h256 = rp->hash256;
+ r256 = rp->rand256;
+ }
+ }
+
+ mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
+ if (!mgmt_rp)
goto done;
+
+ if (status)
+ goto send_rsp;
+
+ eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
+ hdev->dev_class, 3);
+
+ if (h192 && r192) {
+ eir_len = eir_append_data(mgmt_rp->eir, eir_len,
+ EIR_SSP_HASH_C192, h192, 16);
+ eir_len = eir_append_data(mgmt_rp->eir, eir_len,
+ EIR_SSP_RAND_R192, r192, 16);
+ }
+
+ if (h256 && r256) {
+ eir_len = eir_append_data(mgmt_rp->eir, eir_len,
+ EIR_SSP_HASH_C256, h256, 16);
+ eir_len = eir_append_data(mgmt_rp->eir, eir_len,
+ EIR_SSP_RAND_R256, r256, 16);
}
- hdr = buf;
- opcode = __le16_to_cpu(hdr->opcode);
- index = __le16_to_cpu(hdr->index);
- len = __le16_to_cpu(hdr->len);
+send_rsp:
+ mgmt_rp->type = mgmt_cp->type;
+ mgmt_rp->eir_len = cpu_to_le16(eir_len);
- if (len != msglen - sizeof(*hdr)) {
- err = -EINVAL;
+ err = mgmt_cmd_complete(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
+ mgmt_rp, sizeof(*mgmt_rp) + eir_len);
+ if (err < 0 || status)
goto done;
+
+ hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
+
+ err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
+ mgmt_rp, sizeof(*mgmt_rp) + eir_len,
+ HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
+done:
+ kfree(mgmt_rp);
+ mgmt_pending_remove(cmd);
+}
+
+static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
+ struct mgmt_cp_read_local_oob_ext_data *cp)
+{
+ struct mgmt_pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
+ cp, sizeof(*cp));
+ if (!cmd)
+ return -ENOMEM;
+
+ hci_req_init(&req, hdev);
+
+ if (bredr_sc_enabled(hdev))
+ hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
+ else
+ hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+
+ err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ return err;
}
- if (index != MGMT_INDEX_NONE) {
- hdev = hci_dev_get(index);
- if (!hdev) {
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_INVALID_INDEX);
- goto done;
+ return 0;
+}
+
+static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_cp_read_local_oob_ext_data *cp = data;
+ struct mgmt_rp_read_local_oob_ext_data *rp;
+ size_t rp_len;
+ u16 eir_len;
+ u8 status, flags, role, addr[7], hash[16], rand[16];
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (hdev_is_powered(hdev)) {
+ switch (cp->type) {
+ case BIT(BDADDR_BREDR):
+ status = mgmt_bredr_support(hdev);
+ if (status)
+ eir_len = 0;
+ else
+ eir_len = 5;
+ break;
+ case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
+ status = mgmt_le_support(hdev);
+ if (status)
+ eir_len = 0;
+ else
+ eir_len = 9 + 3 + 18 + 18 + 3;
+ break;
+ default:
+ status = MGMT_STATUS_INVALID_PARAMS;
+ eir_len = 0;
+ break;
}
+ } else {
+ status = MGMT_STATUS_NOT_POWERED;
+ eir_len = 0;
+ }
- if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
- test_bit(HCI_CONFIG, &hdev->dev_flags) ||
- test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_INVALID_INDEX);
- goto done;
+ rp_len = sizeof(*rp) + eir_len;
+ rp = kmalloc(rp_len, GFP_ATOMIC);
+ if (!rp)
+ return -ENOMEM;
+
+ if (status)
+ goto complete;
+
+ hci_dev_lock(hdev);
+
+ eir_len = 0;
+ switch (cp->type) {
+ case BIT(BDADDR_BREDR):
+ if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
+ err = read_local_ssp_oob_req(hdev, sk, cp);
+ hci_dev_unlock(hdev);
+ if (!err)
+ goto done;
+
+ status = MGMT_STATUS_FAILED;
+ goto complete;
+ } else {
+ eir_len = eir_append_data(rp->eir, eir_len,
+ EIR_CLASS_OF_DEV,
+ hdev->dev_class, 3);
+ }
+ break;
+ case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
+ if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
+ smp_generate_oob(hdev, hash, rand) < 0) {
+ hci_dev_unlock(hdev);
+ status = MGMT_STATUS_FAILED;
+ goto complete;
+ }
+
+ /* This should return the active RPA, but since the RPA
+ * is only programmed on demand, it is really hard to fill
+ * this in at the moment. For now disallow retrieving
+ * local out-of-band data when privacy is in use.
+ *
+ * Returning the identity address will not help here since
+ * pairing happens before the identity resolving key is
+ * known and thus the connection establishment happens
+ * based on the RPA and not the identity address.
+ */
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
+ hci_dev_unlock(hdev);
+ status = MGMT_STATUS_REJECTED;
+ goto complete;
}
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
- opcode != MGMT_OP_READ_CONFIG_INFO &&
- opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
- opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_INVALID_INDEX);
- goto done;
+ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+ (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
+ bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ memcpy(addr, &hdev->static_addr, 6);
+ addr[6] = 0x01;
+ } else {
+ memcpy(addr, &hdev->bdaddr, 6);
+ addr[6] = 0x00;
+ }
+
+ eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
+ addr, sizeof(addr));
+
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ role = 0x02;
+ else
+ role = 0x01;
+
+ eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
+ &role, sizeof(role));
+
+ if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
+ eir_len = eir_append_data(rp->eir, eir_len,
+ EIR_LE_SC_CONFIRM,
+ hash, sizeof(hash));
+
+ eir_len = eir_append_data(rp->eir, eir_len,
+ EIR_LE_SC_RANDOM,
+ rand, sizeof(rand));
}
+
+ flags = get_adv_discov_flags(hdev);
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ flags |= LE_AD_NO_BREDR;
+
+ eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
+ &flags, sizeof(flags));
+ break;
}
- if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
- mgmt_handlers[opcode].func == NULL) {
- BT_DBG("Unknown op %u", opcode);
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_UNKNOWN_COMMAND);
+ hci_dev_unlock(hdev);
+
+ hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
+
+ status = MGMT_STATUS_SUCCESS;
+
+complete:
+ rp->type = cp->type;
+ rp->eir_len = cpu_to_le16(eir_len);
+
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
+ status, rp, sizeof(*rp) + eir_len);
+ if (err < 0 || status)
goto done;
+
+ err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
+ rp, sizeof(*rp) + eir_len,
+ HCI_MGMT_OOB_DATA_EVENTS, sk);
+
+done:
+ kfree(rp);
+
+ return err;
+}
+
+static u32 get_supported_adv_flags(struct hci_dev *hdev)
+{
+ u32 flags = 0;
+
+ flags |= MGMT_ADV_FLAG_CONNECTABLE;
+ flags |= MGMT_ADV_FLAG_DISCOV;
+ flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
+ flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
+
+ if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
+ flags |= MGMT_ADV_FLAG_TX_POWER;
+
+ return flags;
+}
+
+static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_rp_read_adv_features *rp;
+ size_t rp_len;
+ int err;
+ bool instance;
+ u32 supported_flags;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
+ MGMT_STATUS_REJECTED);
+
+ hci_dev_lock(hdev);
+
+ rp_len = sizeof(*rp);
+
+ /* Currently only one instance is supported, so just add 1 to the
+ * response length.
+ */
+ instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
+ if (instance)
+ rp_len++;
+
+ rp = kmalloc(rp_len, GFP_ATOMIC);
+ if (!rp) {
+ hci_dev_unlock(hdev);
+ return -ENOMEM;
}
- if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
- opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_INVALID_INDEX);
- goto done;
+ supported_flags = get_supported_adv_flags(hdev);
+
+ rp->supported_flags = cpu_to_le32(supported_flags);
+ rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
+ rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
+ rp->max_instances = 1;
+
+ /* Currently only one instance is supported, so simply return the
+ * current instance number.
+ */
+ if (instance) {
+ rp->num_instances = 1;
+ rp->instance[0] = 1;
+ } else {
+ rp->num_instances = 0;
}
- if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
- opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_INVALID_INDEX);
- goto done;
+ hci_dev_unlock(hdev);
+
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
+ MGMT_STATUS_SUCCESS, rp, rp_len);
+
+ kfree(rp);
+
+ return err;
+}
+
+static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
+ u8 len, bool is_adv_data)
+{
+ u8 max_len = HCI_MAX_AD_LENGTH;
+ int i, cur_len;
+ bool flags_managed = false;
+ bool tx_power_managed = false;
+ u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
+ MGMT_ADV_FLAG_MANAGED_FLAGS;
+
+ if (is_adv_data && (adv_flags & flags_params)) {
+ flags_managed = true;
+ max_len -= 3;
}
- handler = &mgmt_handlers[opcode];
+ if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
+ tx_power_managed = true;
+ max_len -= 3;
+ }
- if ((handler->var_len && len < handler->data_len) ||
- (!handler->var_len && len != handler->data_len)) {
- err = cmd_status(sk, index, opcode,
- MGMT_STATUS_INVALID_PARAMS);
- goto done;
+ if (len > max_len)
+ return false;
+
+ /* Make sure that the data is correctly formatted. */
+ for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
+ cur_len = data[i];
+
+ if (flags_managed && data[i + 1] == EIR_FLAGS)
+ return false;
+
+ if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
+ return false;
+
+ /* If the current field length would exceed the total data
+ * length, then it's invalid.
+ */
+ if (i + cur_len >= len)
+ return false;
}
- if (hdev)
- mgmt_init_hdev(sk, hdev);
+ return true;
+}
+
+static void add_advertising_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ struct mgmt_pending_cmd *cmd;
+ struct mgmt_rp_add_advertising rp;
+
+ BT_DBG("status %d", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
+
+ if (status) {
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
+ memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
+ advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
+ }
+
+ if (!cmd)
+ goto unlock;
- cp = buf + sizeof(*hdr);
+ rp.instance = 0x01;
+
+ if (status)
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(status));
+ else
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(status), &rp, sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void adv_timeout_expired(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ adv_instance.timeout_exp.work);
+
+ hdev->adv_instance.timeout = 0;
+
+ hci_dev_lock(hdev);
+ clear_adv_instance(hdev);
+ hci_dev_unlock(hdev);
+}
+
+static int add_advertising(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_cp_add_advertising *cp = data;
+ struct mgmt_rp_add_advertising rp;
+ u32 flags;
+ u32 supported_flags;
+ u8 status;
+ u16 timeout;
+ int err;
+ struct mgmt_pending_cmd *cmd;
+ struct hci_request req;
+
+ BT_DBG("%s", hdev->name);
+
+ status = mgmt_le_support(hdev);
+ if (status)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ status);
- err = handler->func(sk, hdev, cp, len);
+ flags = __le32_to_cpu(cp->flags);
+ timeout = __le16_to_cpu(cp->timeout);
+
+ /* The current implementation only supports adding one instance and only
+ * a subset of the specified flags.
+ */
+ supported_flags = get_supported_adv_flags(hdev);
+ if (cp->instance != 0x01 || (flags & ~supported_flags))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (timeout && !hdev_is_powered(hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_REJECTED);
+ goto unlock;
+ }
+
+ if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
+ pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
+ pending_find(MGMT_OP_SET_LE, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
+ !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
+ cp->scan_rsp_len, false)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
+
+ hdev->adv_instance.flags = flags;
+ hdev->adv_instance.adv_data_len = cp->adv_data_len;
+ hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
+
+ if (cp->adv_data_len)
+ memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
+
+ if (cp->scan_rsp_len)
+ memcpy(hdev->adv_instance.scan_rsp_data,
+ cp->data + cp->adv_data_len, cp->scan_rsp_len);
+
+ if (hdev->adv_instance.timeout)
+ cancel_delayed_work(&hdev->adv_instance.timeout_exp);
+
+ hdev->adv_instance.timeout = timeout;
+
+ if (timeout)
+ queue_delayed_work(hdev->workqueue,
+ &hdev->adv_instance.timeout_exp,
+ msecs_to_jiffies(timeout * 1000));
+
+ if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
+ advertising_added(sk, hdev, 1);
+
+ /* If the HCI_ADVERTISING flag is set or the device isn't powered then
+ * we have no HCI communication to make. Simply return.
+ */
+ if (!hdev_is_powered(hdev) ||
+ hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
+ rp.instance = 0x01;
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ /* We're good to go, update advertising data, parameters, and start
+ * advertising.
+ */
+ cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
+ data_len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ update_adv_data(&req);
+ update_scan_rsp_data(&req);
+ enable_advertising(&req);
+
+ err = hci_req_run(&req, add_advertising_complete);
if (err < 0)
- goto done;
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
- err = msglen;
+ return err;
+}
-done:
- if (hdev)
- hci_dev_put(hdev);
+static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ struct mgmt_pending_cmd *cmd;
+ struct mgmt_rp_remove_advertising rp;
+
+ BT_DBG("status %d", status);
+
+ hci_dev_lock(hdev);
+
+ /* A failure status here only means that we failed to disable
+ * advertising. Otherwise, the advertising instance has been removed,
+ * so report success.
+ */
+ cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
+ if (!cmd)
+ goto unlock;
+
+ rp.instance = 1;
+
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
+ &rp, sizeof(rp));
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_cp_remove_advertising *cp = data;
+ struct mgmt_rp_remove_advertising rp;
+ int err;
+ struct mgmt_pending_cmd *cmd;
+ struct hci_request req;
+
+ BT_DBG("%s", hdev->name);
+
+ /* The current implementation only allows modifying instance no 1. A
+ * value of 0 indicates that all instances should be cleared.
+ */
+ if (cp->instance > 1)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
+ pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
+ pending_find(MGMT_OP_SET_LE, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ if (hdev->adv_instance.timeout)
+ cancel_delayed_work(&hdev->adv_instance.timeout_exp);
+
+ memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
+
+ advertising_removed(sk, hdev, 1);
+
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
+
+ /* If the HCI_ADVERTISING flag is set or the device isn't powered then
+ * we have no HCI communication to make. Simply return.
+ */
+ if (!hdev_is_powered(hdev) ||
+ hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
+ rp.instance = 1;
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_ADVERTISING,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
+ data_len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+ disable_advertising(&req);
+
+ err = hci_req_run(&req, remove_advertising_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
- kfree(buf);
return err;
}
+static const struct hci_mgmt_handler mgmt_handlers[] = {
+ { NULL }, /* 0x0000 (no command) */
+ { read_version, MGMT_READ_VERSION_SIZE,
+ HCI_MGMT_NO_HDEV |
+ HCI_MGMT_UNTRUSTED },
+ { read_commands, MGMT_READ_COMMANDS_SIZE,
+ HCI_MGMT_NO_HDEV |
+ HCI_MGMT_UNTRUSTED },
+ { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
+ HCI_MGMT_NO_HDEV |
+ HCI_MGMT_UNTRUSTED },
+ { read_controller_info, MGMT_READ_INFO_SIZE,
+ HCI_MGMT_UNTRUSTED },
+ { set_powered, MGMT_SETTING_SIZE },
+ { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
+ { set_connectable, MGMT_SETTING_SIZE },
+ { set_fast_connectable, MGMT_SETTING_SIZE },
+ { set_bondable, MGMT_SETTING_SIZE },
+ { set_link_security, MGMT_SETTING_SIZE },
+ { set_ssp, MGMT_SETTING_SIZE },
+ { set_hs, MGMT_SETTING_SIZE },
+ { set_le, MGMT_SETTING_SIZE },
+ { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
+ { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
+ { add_uuid, MGMT_ADD_UUID_SIZE },
+ { remove_uuid, MGMT_REMOVE_UUID_SIZE },
+ { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
+ HCI_MGMT_VAR_LEN },
+ { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
+ HCI_MGMT_VAR_LEN },
+ { disconnect, MGMT_DISCONNECT_SIZE },
+ { get_connections, MGMT_GET_CONNECTIONS_SIZE },
+ { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
+ { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
+ { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
+ { pair_device, MGMT_PAIR_DEVICE_SIZE },
+ { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
+ { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
+ { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
+ { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
+ { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
+ { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
+ { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
+ { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
+ HCI_MGMT_VAR_LEN },
+ { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
+ { start_discovery, MGMT_START_DISCOVERY_SIZE },
+ { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
+ { confirm_name, MGMT_CONFIRM_NAME_SIZE },
+ { block_device, MGMT_BLOCK_DEVICE_SIZE },
+ { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
+ { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
+ { set_advertising, MGMT_SETTING_SIZE },
+ { set_bredr, MGMT_SETTING_SIZE },
+ { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
+ { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
+ { set_secure_conn, MGMT_SETTING_SIZE },
+ { set_debug_keys, MGMT_SETTING_SIZE },
+ { set_privacy, MGMT_SET_PRIVACY_SIZE },
+ { load_irks, MGMT_LOAD_IRKS_SIZE,
+ HCI_MGMT_VAR_LEN },
+ { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
+ { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
+ { add_device, MGMT_ADD_DEVICE_SIZE },
+ { remove_device, MGMT_REMOVE_DEVICE_SIZE },
+ { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
+ HCI_MGMT_VAR_LEN },
+ { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
+ HCI_MGMT_NO_HDEV |
+ HCI_MGMT_UNTRUSTED },
+ { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
+ HCI_MGMT_UNCONFIGURED |
+ HCI_MGMT_UNTRUSTED },
+ { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
+ HCI_MGMT_UNCONFIGURED },
+ { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
+ HCI_MGMT_UNCONFIGURED },
+ { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
+ HCI_MGMT_VAR_LEN },
+ { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
+ { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
+ HCI_MGMT_NO_HDEV |
+ HCI_MGMT_UNTRUSTED },
+ { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
+ { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
+ HCI_MGMT_VAR_LEN },
+ { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
+};
+
void mgmt_index_added(struct hci_dev *hdev)
{
- if (hdev->dev_type != HCI_BREDR)
- return;
+ struct mgmt_ev_ext_index ev;
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return;
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
- mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
- else
- mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+ mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
+ NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
+ ev.type = 0x01;
+ } else {
+ mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
+ HCI_MGMT_INDEX_EVENTS);
+ ev.type = 0x00;
+ }
+ break;
+ case HCI_AMP:
+ ev.type = 0x02;
+ break;
+ default:
+ return;
+ }
+
+ ev.bus = hdev->bus;
+
+ mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
+ HCI_MGMT_EXT_INDEX_EVENTS);
}
void mgmt_index_removed(struct hci_dev *hdev)
{
+ struct mgmt_ev_ext_index ev;
u8 status = MGMT_STATUS_INVALID_INDEX;
- if (hdev->dev_type != HCI_BREDR)
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return;
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
+
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+ mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
+ NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
+ ev.type = 0x01;
+ } else {
+ mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
+ HCI_MGMT_INDEX_EVENTS);
+ ev.type = 0x00;
+ }
+ break;
+ case HCI_AMP:
+ ev.type = 0x02;
+ break;
+ default:
return;
+ }
- mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
+ ev.bus = hdev->bus;
- if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
- mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
- else
- mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
+ mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
+ HCI_MGMT_EXT_INDEX_EVENTS);
}
/* This function requires the caller holds hdev->lock */
@@ -6367,7 +7365,7 @@ static int powered_update_hci(struct hci_dev *hdev)
hci_req_init(&req, hdev);
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
!lmp_host_ssp_capable(hdev)) {
u8 mode = 0x01;
@@ -6381,7 +7379,7 @@ static int powered_update_hci(struct hci_dev *hdev)
}
}
- if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
@@ -6402,24 +7400,28 @@ static int powered_update_hci(struct hci_dev *hdev)
* advertising data. This also applies to the case
* where BR/EDR was toggled during the AUTO_OFF phase.
*/
- if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
update_adv_data(&req);
update_scan_rsp_data(&req);
}
- if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
enable_advertising(&req);
restart_le_actions(&req);
}
- link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+ link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
sizeof(link_sec), &link_sec);
if (lmp_bredr_capable(hdev)) {
- write_fast_connectable(&req, false);
+ if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
+ write_fast_connectable(&req, true);
+ else
+ write_fast_connectable(&req, false);
__hci_update_page_scan(&req);
update_class(&req);
update_name(&req);
@@ -6435,7 +7437,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
u8 status, zero_cod[] = { 0, 0, 0 };
int err;
- if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
return 0;
if (powered) {
@@ -6456,7 +7458,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
* been triggered, potentially causing misleading DISCONNECTED
* status responses.
*/
- if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
status = MGMT_STATUS_INVALID_INDEX;
else
status = MGMT_STATUS_NOT_POWERED;
@@ -6464,8 +7466,8 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
- mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
- zero_cod, sizeof(zero_cod), NULL);
+ mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+ zero_cod, sizeof(zero_cod), NULL);
new_settings:
err = new_settings(hdev, match.sk);
@@ -6478,10 +7480,10 @@ new_settings:
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
u8 status;
- cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+ cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
if (!cmd)
return;
@@ -6490,7 +7492,7 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
else
status = MGMT_STATUS_FAILED;
- cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
mgmt_pending_remove(cmd);
}
@@ -6506,17 +7508,23 @@ void mgmt_discoverable_timeout(struct hci_dev *hdev)
* of a timeout triggered from general discoverable, it is
* safe to unconditionally clear the flag.
*/
- clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
- clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
hci_req_init(&req, hdev);
- if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
u8 scan = SCAN_PAGE;
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
sizeof(scan), &scan);
}
update_class(&req);
- update_adv_data(&req);
+
+ /* Advertising instances don't use the global discoverable setting, so
+ * only update AD if advertising was enabled using Set Advertising.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ update_adv_data(&req);
+
hci_req_run(&req, NULL);
hdev->discov_timeout = 0;
@@ -6654,7 +7662,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
- ev.key.master = csrk->master;
+ ev.key.type = csrk->type;
memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
@@ -6681,17 +7689,6 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
}
-static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
- u8 data_len)
-{
- eir[eir_len++] = sizeof(type) + data_len;
- eir[eir_len++] = type;
- memcpy(&eir[eir_len], data, data_len);
- eir_len += data_len;
-
- return eir_len;
-}
-
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
u32 flags, u8 *name, u8 name_len)
{
@@ -6729,7 +7726,7 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
sizeof(*ev) + eir_len, NULL);
}
-static void disconnect_rsp(struct pending_cmd *cmd, void *data)
+static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
{
struct sock **sk = data;
@@ -6741,7 +7738,7 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_remove(cmd);
}
-static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
+static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
{
struct hci_dev *hdev = data;
struct mgmt_cp_unpair_device *cp = cmd->param;
@@ -6754,10 +7751,10 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
bool mgmt_powering_down(struct hci_dev *hdev)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
struct mgmt_mode *cp;
- cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+ cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
if (!cmd)
return false;
@@ -6809,12 +7806,12 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
{
u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
struct mgmt_cp_disconnect *cp;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
hdev);
- cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
+ cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
if (!cmd)
return;
@@ -6864,9 +7861,9 @@ void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 status)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
+ cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
if (!cmd)
return;
@@ -6877,9 +7874,9 @@ void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 status)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
+ cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
if (!cmd)
return;
@@ -6922,9 +7919,9 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status,
u8 opcode)
{
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
- cmd = mgmt_pending_find(opcode, hdev);
+ cmd = pending_find(opcode, hdev);
if (!cmd)
return -ENOENT;
@@ -6983,7 +7980,7 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
{
struct mgmt_ev_auth_failed ev;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
u8 status = mgmt_status(hci_status);
bacpy(&ev.addr.bdaddr, &conn->dst);
@@ -7014,11 +8011,9 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
}
if (test_bit(HCI_AUTH, &hdev->flags))
- changed = !test_and_set_bit(HCI_LINK_SECURITY,
- &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
else
- changed = test_and_clear_bit(HCI_LINK_SECURITY,
- &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
&match);
@@ -7054,9 +8049,9 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
if (status) {
u8 mgmt_err = mgmt_status(status);
- if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
- &hdev->dev_flags)) {
- clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ if (enable && hci_dev_test_and_clear_flag(hdev,
+ HCI_SSP_ENABLED)) {
+ hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
new_settings(hdev, NULL);
}
@@ -7066,14 +8061,14 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
}
if (enable) {
- changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
} else {
- changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
if (!changed)
- changed = test_and_clear_bit(HCI_HS_ENABLED,
- &hdev->dev_flags);
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_HS_ENABLED);
else
- clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
@@ -7086,8 +8081,8 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
hci_req_init(&req, hdev);
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
- if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
+ if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
sizeof(enable), &enable);
update_eir(&req);
@@ -7098,7 +8093,7 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
hci_req_run(&req, NULL);
}
-static void sk_lookup(struct pending_cmd *cmd, void *data)
+static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
{
struct cmd_lookup *match = data;
@@ -7118,8 +8113,8 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
if (!status)
- mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
- NULL);
+ mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+ dev_class, 3, NULL);
if (match.sk)
sock_put(match.sk);
@@ -7128,7 +8123,7 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
{
struct mgmt_cp_set_local_name ev;
- struct pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd;
if (status)
return;
@@ -7137,55 +8132,19 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
- cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
+ cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
if (!cmd) {
memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
/* If this is a HCI command related to powering on the
* HCI dev don't send any mgmt signals.
*/
- if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+ if (pending_find(MGMT_OP_SET_POWERED, hdev))
return;
}
- mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
-}
-
-void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
- u8 *rand192, u8 *hash256, u8 *rand256,
- u8 status)
-{
- struct pending_cmd *cmd;
-
- BT_DBG("%s status %u", hdev->name, status);
-
- cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
- if (!cmd)
- return;
-
- if (status) {
- cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- mgmt_status(status));
- } else {
- struct mgmt_rp_read_local_oob_data rp;
- size_t rp_size = sizeof(rp);
-
- memcpy(rp.hash192, hash192, sizeof(rp.hash192));
- memcpy(rp.rand192, rand192, sizeof(rp.rand192));
-
- if (bredr_sc_enabled(hdev) && hash256 && rand256) {
- memcpy(rp.hash256, hash256, sizeof(rp.hash256));
- memcpy(rp.rand256, rand256, sizeof(rp.rand256));
- } else {
- rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
- }
-
- cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
- &rp, rp_size);
- }
-
- mgmt_pending_remove(cmd);
+ mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
@@ -7258,7 +8217,7 @@ static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
static void restart_le_scan(struct hci_dev *hdev)
{
/* If controller is not scanning we are done. */
- if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return;
if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
@@ -7270,14 +8229,58 @@ static void restart_le_scan(struct hci_dev *hdev)
DISCOV_LE_RESTART_DELAY);
}
+static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
+ u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
+{
+ /* If a RSSI threshold has been specified, and
+ * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
+ * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
+ * is set, let it through for further processing, as we might need to
+ * restart the scan.
+ *
+ * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
+ * the results are also dropped.
+ */
+ if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
+ (rssi == HCI_RSSI_INVALID ||
+ (rssi < hdev->discovery.rssi &&
+ !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
+ return false;
+
+ if (hdev->discovery.uuid_count != 0) {
+ /* If a list of UUIDs is provided in filter, results with no
+ * matching UUID should be dropped.
+ */
+ if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
+ hdev->discovery.uuids) &&
+ !eir_has_uuids(scan_rsp, scan_rsp_len,
+ hdev->discovery.uuid_count,
+ hdev->discovery.uuids))
+ return false;
+ }
+
+ /* If duplicate filtering does not report RSSI changes, then restart
+ * scanning to ensure updated result with updated RSSI values.
+ */
+ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
+ restart_le_scan(hdev);
+
+ /* Validate RSSI value against the RSSI threshold once more. */
+ if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
+ rssi < hdev->discovery.rssi)
+ return false;
+ }
+
+ return true;
+}
+
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{
char buf[512];
- struct mgmt_ev_device_found *ev = (void *) buf;
+ struct mgmt_ev_device_found *ev = (void *)buf;
size_t ev_size;
- bool match;
/* Don't send events for a non-kernel initiated discovery. With
* LE one exception is if we have pend_le_reports > 0 in which
@@ -7290,21 +8293,12 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
return;
}
- /* When using service discovery with a RSSI threshold, then check
- * if such a RSSI threshold is specified. If a RSSI threshold has
- * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
- * then all results with a RSSI smaller than the RSSI threshold will be
- * dropped. If the quirk is set, let it through for further processing,
- * as we might need to restart the scan.
- *
- * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
- * the results are also dropped.
- */
- if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
- (rssi == HCI_RSSI_INVALID ||
- (rssi < hdev->discovery.rssi &&
- !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
- return;
+ if (hdev->discovery.result_filtering) {
+ /* We are using service discovery */
+ if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
+ scan_rsp_len))
+ return;
+ }
/* Make sure that the buffer is big enough. The 5 extra bytes
* are for the potential CoD field.
@@ -7331,87 +8325,17 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
ev->rssi = rssi;
ev->flags = cpu_to_le32(flags);
- if (eir_len > 0) {
- /* When using service discovery and a list of UUID is
- * provided, results with no matching UUID should be
- * dropped. In case there is a match the result is
- * kept and checking possible scan response data
- * will be skipped.
- */
- if (hdev->discovery.uuid_count > 0) {
- match = eir_has_uuids(eir, eir_len,
- hdev->discovery.uuid_count,
- hdev->discovery.uuids);
- /* If duplicate filtering does not report RSSI changes,
- * then restart scanning to ensure updated result with
- * updated RSSI values.
- */
- if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
- &hdev->quirks))
- restart_le_scan(hdev);
- } else {
- match = true;
- }
-
- if (!match && !scan_rsp_len)
- return;
-
+ if (eir_len > 0)
/* Copy EIR or advertising data into event */
memcpy(ev->eir, eir, eir_len);
- } else {
- /* When using service discovery and a list of UUID is
- * provided, results with empty EIR or advertising data
- * should be dropped since they do not match any UUID.
- */
- if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
- return;
-
- match = false;
- }
if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
dev_class, 3);
- if (scan_rsp_len > 0) {
- /* When using service discovery and a list of UUID is
- * provided, results with no matching UUID should be
- * dropped if there is no previous match from the
- * advertising data.
- */
- if (hdev->discovery.uuid_count > 0) {
- if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
- hdev->discovery.uuid_count,
- hdev->discovery.uuids))
- return;
-
- /* If duplicate filtering does not report RSSI changes,
- * then restart scanning to ensure updated result with
- * updated RSSI values.
- */
- if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
- &hdev->quirks))
- restart_le_scan(hdev);
- }
-
+ if (scan_rsp_len > 0)
/* Append scan response data to event */
memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
- } else {
- /* When using service discovery and a list of UUID is
- * provided, results with empty scan response and no
- * previous matched advertising data should be dropped.
- */
- if (hdev->discovery.uuid_count > 0 && !match)
- return;
- }
-
- /* Validate the reported RSSI value against the RSSI threshold once more
- * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
- * scanning.
- */
- if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
- rssi < hdev->discovery.rssi)
- return;
ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
@@ -7464,10 +8388,28 @@ void mgmt_reenable_advertising(struct hci_dev *hdev)
{
struct hci_request req;
- if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
return;
hci_req_init(&req, hdev);
enable_advertising(&req);
hci_req_run(&req, adv_enable_complete);
}
+
+static struct hci_mgmt_chan chan = {
+ .channel = HCI_CHANNEL_CONTROL,
+ .handler_count = ARRAY_SIZE(mgmt_handlers),
+ .handlers = mgmt_handlers,
+ .hdev_init = mgmt_init_hdev,
+};
+
+int mgmt_init(void)
+{
+ return hci_mgmt_chan_register(&chan);
+}
+
+void mgmt_exit(void)
+{
+ hci_mgmt_chan_unregister(&chan);
+}
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
new file mode 100644
index 000000000000..8c30c7eb8bef
--- /dev/null
+++ b/net/bluetooth/mgmt_util.c
@@ -0,0 +1,210 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+
+ Copyright (C) 2015 Intel Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
+
+#include "mgmt_util.h"
+
+int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
+ void *data, u16 data_len, int flag, struct sock *skip_sk)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr->opcode = cpu_to_le16(event);
+ if (hdev)
+ hdr->index = cpu_to_le16(hdev->id);
+ else
+ hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
+ hdr->len = cpu_to_le16(data_len);
+
+ if (data)
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ /* Time stamp */
+ __net_timestamp(skb);
+
+ hci_send_to_channel(channel, skb, flag, skip_sk);
+ kfree_skb(skb);
+
+ return 0;
+}
+
+int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+ struct mgmt_ev_cmd_status *ev;
+ int err;
+
+ BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
+
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+
+ hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
+ hdr->index = cpu_to_le16(index);
+ hdr->len = cpu_to_le16(sizeof(*ev));
+
+ ev = (void *) skb_put(skb, sizeof(*ev));
+ ev->status = status;
+ ev->opcode = cpu_to_le16(cmd);
+
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
+ kfree_skb(skb);
+
+ return err;
+}
+
+int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
+ void *rp, size_t rp_len)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+ struct mgmt_ev_cmd_complete *ev;
+ int err;
+
+ BT_DBG("sock %p", sk);
+
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+
+ hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+ hdr->index = cpu_to_le16(index);
+ hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
+
+ ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
+ ev->opcode = cpu_to_le16(cmd);
+ ev->status = status;
+
+ if (rp)
+ memcpy(ev->data, rp, rp_len);
+
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
+ kfree_skb(skb);
+
+ return err;
+}
+
+struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode,
+ struct hci_dev *hdev)
+{
+ struct mgmt_pending_cmd *cmd;
+
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ if (hci_sock_get_channel(cmd->sk) != channel)
+ continue;
+ if (cmd->opcode == opcode)
+ return cmd;
+ }
+
+ return NULL;
+}
+
+struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel,
+ u16 opcode,
+ struct hci_dev *hdev,
+ const void *data)
+{
+ struct mgmt_pending_cmd *cmd;
+
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ if (cmd->user_data != data)
+ continue;
+ if (cmd->opcode == opcode)
+ return cmd;
+ }
+
+ return NULL;
+}
+
+void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
+ void (*cb)(struct mgmt_pending_cmd *cmd, void *data),
+ void *data)
+{
+ struct mgmt_pending_cmd *cmd, *tmp;
+
+ list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
+ if (opcode > 0 && cmd->opcode != opcode)
+ continue;
+
+ cb(cmd, data);
+ }
+}
+
+struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+ struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_pending_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->opcode = opcode;
+ cmd->index = hdev->id;
+
+ cmd->param = kmemdup(data, len, GFP_KERNEL);
+ if (!cmd->param) {
+ kfree(cmd);
+ return NULL;
+ }
+
+ cmd->param_len = len;
+
+ cmd->sk = sk;
+ sock_hold(sk);
+
+ list_add(&cmd->list, &hdev->mgmt_pending);
+
+ return cmd;
+}
+
+void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
+{
+ sock_put(cmd->sk);
+ kfree(cmd->param);
+ kfree(cmd);
+}
+
+void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
+{
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+}
diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
new file mode 100644
index 000000000000..6559f189213c
--- /dev/null
+++ b/net/bluetooth/mgmt_util.h
@@ -0,0 +1,53 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2015 Intel Coropration
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+struct mgmt_pending_cmd {
+ struct list_head list;
+ u16 opcode;
+ int index;
+ void *param;
+ size_t param_len;
+ struct sock *sk;
+ void *user_data;
+ int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
+};
+
+int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
+ void *data, u16 data_len, int flag, struct sock *skip_sk);
+int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status);
+int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
+ void *rp, size_t rp_len);
+
+struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode,
+ struct hci_dev *hdev);
+struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel,
+ u16 opcode,
+ struct hci_dev *hdev,
+ const void *data);
+void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
+ void (*cb)(struct mgmt_pending_cmd *cmd, void *data),
+ void *data);
+struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+ struct hci_dev *hdev,
+ void *data, u16 len);
+void mgmt_pending_free(struct mgmt_pending_cmd *cmd);
+void mgmt_pending_remove(struct mgmt_pending_cmd *cmd);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 3c6d2c8ac1a4..825e8fb5114b 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -549,8 +549,8 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
return 0;
}
-static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
@@ -615,8 +615,8 @@ done:
return sent;
}
-static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
@@ -627,7 +627,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return 0;
}
- len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags);
+ len = bt_sock_stream_recvmsg(sock, msg, size, flags);
lock_sock(sk);
if (!(flags & MSG_PEEK) && len > 0)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 76321b546e84..4322c833e748 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -688,8 +688,8 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
return 0;
}
-static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
int err;
@@ -758,8 +758,8 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
}
}
-static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sco_pinfo *pi = sco_pi(sk);
@@ -777,7 +777,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
release_sock(sk);
- return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+ return bt_sock_recvmsg(sock, msg, len, flags);
}
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
@@ -1083,9 +1083,13 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
return lm;
}
-void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
+ if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+ return;
+
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+
if (!status) {
struct sco_conn *conn;
@@ -1096,8 +1100,11 @@ void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
sco_conn_del(hcon, bt_to_errno(status));
}
-void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
+ if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+ return;
+
BT_DBG("hcon %p reason %d", hcon, reason);
sco_conn_del(hcon, bt_to_errno(reason));
@@ -1122,6 +1129,12 @@ drop:
return 0;
}
+static struct hci_cb sco_cb = {
+ .name = "SCO",
+ .connect_cfm = sco_connect_cfm,
+ .disconn_cfm = sco_disconn_cfm,
+};
+
static int sco_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
@@ -1203,6 +1216,8 @@ int __init sco_init(void)
BT_INFO("SCO socket layer initialized");
+ hci_register_cb(&sco_cb);
+
if (IS_ERR_OR_NULL(bt_debugfs))
return 0;
@@ -1216,12 +1231,14 @@ error:
return err;
}
-void __exit sco_exit(void)
+void sco_exit(void)
{
bt_procfs_cleanup(&init_net, "sco");
debugfs_remove(sco_debugfs);
+ hci_unregister_cb(&sco_cb);
+
bt_sock_unregister(BTPROTO_SCO);
proto_unregister(&sco_proto);
diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c
index 378f4064952c..dc688f13e496 100644
--- a/net/bluetooth/selftest.c
+++ b/net/bluetooth/selftest.c
@@ -21,6 +21,8 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/debugfs.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -154,6 +156,21 @@ static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32],
return 0;
}
+static char test_ecdh_buffer[32];
+
+static ssize_t test_ecdh_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(user_buf, count, ppos, test_ecdh_buffer,
+ strlen(test_ecdh_buffer));
+}
+
+static const struct file_operations test_ecdh_fops = {
+ .open = simple_open,
+ .read = test_ecdh_read,
+ .llseek = default_llseek,
+};
+
static int __init test_ecdh(void)
{
ktime_t calltime, delta, rettime;
@@ -165,19 +182,19 @@ static int __init test_ecdh(void)
err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1);
if (err) {
BT_ERR("ECDH sample 1 failed");
- return err;
+ goto done;
}
err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2);
if (err) {
BT_ERR("ECDH sample 2 failed");
- return err;
+ goto done;
}
err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3);
if (err) {
BT_ERR("ECDH sample 3 failed");
- return err;
+ goto done;
}
rettime = ktime_get();
@@ -186,7 +203,17 @@ static int __init test_ecdh(void)
BT_INFO("ECDH test passed in %llu usecs", duration);
- return 0;
+done:
+ if (!err)
+ snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer),
+ "PASS (%llu usecs)\n", duration);
+ else
+ snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "FAIL\n");
+
+ debugfs_create_file("selftest_ecdh", 0444, bt_debugfs, NULL,
+ &test_ecdh_fops);
+
+ return err;
}
#else
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index c09a821f381d..1ab3dc9c8f99 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -52,7 +52,7 @@
#define SMP_TIMEOUT msecs_to_jiffies(30000)
-#define AUTH_REQ_MASK(dev) (test_bit(HCI_SC_ENABLED, &(dev)->dev_flags) ? \
+#define AUTH_REQ_MASK(dev) (hci_dev_test_flag(dev, HCI_SC_ENABLED) ? \
0x1f : 0x07)
#define KEY_DIST_MASK 0x07
@@ -70,7 +70,19 @@ enum {
SMP_FLAG_DEBUG_KEY,
SMP_FLAG_WAIT_USER,
SMP_FLAG_DHKEY_PENDING,
- SMP_FLAG_OOB,
+ SMP_FLAG_REMOTE_OOB,
+ SMP_FLAG_LOCAL_OOB,
+};
+
+struct smp_dev {
+ /* Secure Connections OOB data */
+ u8 local_pk[64];
+ u8 local_sk[32];
+ u8 local_rand[16];
+ bool debug_key;
+
+ struct crypto_blkcipher *tfm_aes;
+ struct crypto_hash *tfm_cmac;
};
struct smp_chan {
@@ -84,7 +96,8 @@ struct smp_chan {
u8 rrnd[16]; /* SMP Pairing Random (remote) */
u8 pcnf[16]; /* SMP Pairing Confirm */
u8 tk[16]; /* SMP Temporary Key */
- u8 rr[16];
+ u8 rr[16]; /* Remote OOB ra/rb value */
+ u8 lr[16]; /* Local OOB ra/rb value */
u8 enc_key_size;
u8 remote_key_dist;
bdaddr_t id_addr;
@@ -478,18 +491,18 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
const bdaddr_t *bdaddr)
{
struct l2cap_chan *chan = hdev->smp_data;
- struct crypto_blkcipher *tfm;
+ struct smp_dev *smp;
u8 hash[3];
int err;
if (!chan || !chan->data)
return false;
- tfm = chan->data;
+ smp = chan->data;
BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk);
- err = smp_ah(tfm, irk, &bdaddr->b[3], hash);
+ err = smp_ah(smp->tfm_aes, irk, &bdaddr->b[3], hash);
if (err)
return false;
@@ -499,20 +512,20 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
{
struct l2cap_chan *chan = hdev->smp_data;
- struct crypto_blkcipher *tfm;
+ struct smp_dev *smp;
int err;
if (!chan || !chan->data)
return -EOPNOTSUPP;
- tfm = chan->data;
+ smp = chan->data;
get_random_bytes(&rpa->b[3], 3);
rpa->b[5] &= 0x3f; /* Clear two most significant bits */
rpa->b[5] |= 0x40; /* Set second most significant bit */
- err = smp_ah(tfm, irk, &rpa->b[3], rpa->b);
+ err = smp_ah(smp->tfm_aes, irk, &rpa->b[3], rpa->b);
if (err < 0)
return err;
@@ -521,6 +534,53 @@ int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
return 0;
}
+int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
+{
+ struct l2cap_chan *chan = hdev->smp_data;
+ struct smp_dev *smp;
+ int err;
+
+ if (!chan || !chan->data)
+ return -EOPNOTSUPP;
+
+ smp = chan->data;
+
+ if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
+ BT_DBG("Using debug keys");
+ memcpy(smp->local_pk, debug_pk, 64);
+ memcpy(smp->local_sk, debug_sk, 32);
+ smp->debug_key = true;
+ } else {
+ while (true) {
+ /* Generate local key pair for Secure Connections */
+ if (!ecc_make_key(smp->local_pk, smp->local_sk))
+ return -EIO;
+
+ /* This is unlikely, but we need to check that
+ * we didn't accidentially generate a debug key.
+ */
+ if (memcmp(smp->local_sk, debug_sk, 32))
+ break;
+ }
+ smp->debug_key = false;
+ }
+
+ SMP_DBG("OOB Public Key X: %32phN", smp->local_pk);
+ SMP_DBG("OOB Public Key Y: %32phN", smp->local_pk + 32);
+ SMP_DBG("OOB Private Key: %32phN", smp->local_sk);
+
+ get_random_bytes(smp->local_rand, 16);
+
+ err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->local_pk,
+ smp->local_rand, 0, hash);
+ if (err < 0)
+ return err;
+
+ memcpy(rand, smp->local_rand, 16);
+
+ return 0;
+}
+
static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
{
struct l2cap_chan *chan = conn->smp;
@@ -589,7 +649,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
struct hci_dev *hdev = hcon->hdev;
u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT;
- if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_BONDABLE)) {
local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
authreq |= SMP_AUTH_BONDING;
@@ -597,18 +657,18 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
authreq &= ~SMP_AUTH_BONDING;
}
- if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING))
remote_dist |= SMP_DIST_ID_KEY;
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY))
local_dist |= SMP_DIST_ID_KEY;
- if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+ if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
(authreq & SMP_AUTH_SC)) {
struct oob_data *oob_data;
u8 bdaddr_type;
- if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
local_dist |= SMP_DIST_LINK_KEY;
remote_dist |= SMP_DIST_LINK_KEY;
}
@@ -621,10 +681,12 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
oob_data = hci_find_remote_oob_data(hdev, &hcon->dst,
bdaddr_type);
if (oob_data && oob_data->present) {
- set_bit(SMP_FLAG_OOB, &smp->flags);
+ set_bit(SMP_FLAG_REMOTE_OOB, &smp->flags);
oob_flag = SMP_OOB_PRESENT;
memcpy(smp->rr, oob_data->rand256, 16);
memcpy(smp->pcnf, oob_data->hash256, 16);
+ SMP_DBG("OOB Remote Confirmation: %16phN", smp->pcnf);
+ SMP_DBG("OOB Remote Random: %16phN", smp->rr);
}
} else {
@@ -681,9 +743,9 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags);
mgmt_smp_complete(hcon, complete);
- kfree(smp->csrk);
- kfree(smp->slave_csrk);
- kfree(smp->link_key);
+ kzfree(smp->csrk);
+ kzfree(smp->slave_csrk);
+ kzfree(smp->link_key);
crypto_free_blkcipher(smp->tfm_aes);
crypto_free_hash(smp->tfm_cmac);
@@ -692,7 +754,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
* support hasn't been explicitly enabled.
*/
if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG &&
- !test_bit(HCI_KEEP_DEBUG_KEYS, &hcon->hdev->dev_flags)) {
+ !hci_dev_test_flag(hcon->hdev, HCI_KEEP_DEBUG_KEYS)) {
list_del_rcu(&smp->ltk->list);
kfree_rcu(smp->ltk, rcu);
smp->ltk = NULL;
@@ -717,7 +779,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
}
chan->data = NULL;
- kfree(smp);
+ kzfree(smp);
hci_conn_drop(hcon);
}
@@ -818,6 +880,12 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
return 0;
}
+ /* If this function is used for SC -> legacy fallback we
+ * can only recover the just-works case.
+ */
+ if (test_bit(SMP_FLAG_SC, &smp->flags))
+ return -EINVAL;
+
/* Not Just Works/Confirm results in MITM Authentication */
if (smp->method != JUST_CFM) {
set_bit(SMP_FLAG_MITM_AUTH, &smp->flags);
@@ -1052,7 +1120,7 @@ static void smp_notify_keys(struct l2cap_conn *conn)
/* Don't keep debug keys around if the relevant
* flag is not set.
*/
- if (!test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags) &&
+ if (!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS) &&
key->type == HCI_LK_DEBUG_COMBINATION) {
list_del_rcu(&key->list);
kfree_rcu(key, rcu);
@@ -1097,13 +1165,13 @@ static void sc_generate_link_key(struct smp_chan *smp)
return;
if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) {
- kfree(smp->link_key);
+ kzfree(smp->link_key);
smp->link_key = NULL;
return;
}
if (smp_h6(smp->tfm_cmac, smp->link_key, lebr, smp->link_key)) {
- kfree(smp->link_key);
+ kzfree(smp->link_key);
smp->link_key = NULL;
return;
}
@@ -1252,7 +1320,10 @@ static void smp_distribute_keys(struct smp_chan *smp)
csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
if (csrk) {
- csrk->master = 0x00;
+ if (hcon->sec_level > BT_SECURITY_MEDIUM)
+ csrk->type = MGMT_CSRK_LOCAL_AUTHENTICATED;
+ else
+ csrk->type = MGMT_CSRK_LOCAL_UNAUTHENTICATED;
memcpy(csrk->val, sign.csrk, sizeof(csrk->val));
}
smp->slave_csrk = csrk;
@@ -1297,7 +1368,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(smp->tfm_aes)) {
BT_ERR("Unable to create ECB crypto context");
- kfree(smp);
+ kzfree(smp);
return NULL;
}
@@ -1305,7 +1376,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
if (IS_ERR(smp->tfm_cmac)) {
BT_ERR("Unable to create CMAC crypto context");
crypto_free_blkcipher(smp->tfm_aes);
- kfree(smp);
+ kzfree(smp);
return NULL;
}
@@ -1601,15 +1672,15 @@ static void build_bredr_pairing_cmd(struct smp_chan *smp,
struct hci_dev *hdev = conn->hcon->hdev;
u8 local_dist = 0, remote_dist = 0;
- if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) {
+ if (hci_dev_test_flag(hdev, HCI_BONDABLE)) {
local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
}
- if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING))
remote_dist |= SMP_DIST_ID_KEY;
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY))
local_dist |= SMP_DIST_ID_KEY;
if (!rsp) {
@@ -1661,22 +1732,29 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
/* We didn't start the pairing, so match remote */
auth = req->auth_req & AUTH_REQ_MASK(hdev);
- if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
+ if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
(auth & SMP_AUTH_BONDING))
return SMP_PAIRING_NOTSUPP;
- if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
+ if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
return SMP_AUTH_REQUIREMENTS;
smp->preq[0] = SMP_CMD_PAIRING_REQ;
memcpy(&smp->preq[1], req, sizeof(*req));
skb_pull(skb, sizeof(*req));
+ /* If the remote side's OOB flag is set it means it has
+ * successfully received our local OOB data - therefore set the
+ * flag to indicate that local OOB is in use.
+ */
+ if (req->oob_flag == SMP_OOB_PRESENT)
+ set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
+
/* SMP over BR/EDR requires special treatment */
if (conn->hcon->type == ACL_LINK) {
/* We must have a BR/EDR SC link */
if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
- !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
+ !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
return SMP_CROSS_TRANSP_NOT_ALLOWED;
set_bit(SMP_FLAG_SC, &smp->flags);
@@ -1734,14 +1812,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
+ /* Strictly speaking we shouldn't allow Pairing Confirm for the
+ * SC case, however some implementations incorrectly copy RFU auth
+ * req bits from our security request, which may create a false
+ * positive SC enablement.
+ */
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+
if (test_bit(SMP_FLAG_SC, &smp->flags)) {
SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY);
/* Clear bits which are generated but not distributed */
smp->remote_key_dist &= ~SMP_SC_NO_DIST;
/* Wait for Public Key from Initiating Device */
return 0;
- } else {
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
}
/* Request setup of TK */
@@ -1758,7 +1841,26 @@ static u8 sc_send_public_key(struct smp_chan *smp)
BT_DBG("");
- if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) {
+ if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) {
+ struct l2cap_chan *chan = hdev->smp_data;
+ struct smp_dev *smp_dev;
+
+ if (!chan || !chan->data)
+ return SMP_UNSPECIFIED;
+
+ smp_dev = chan->data;
+
+ memcpy(smp->local_pk, smp_dev->local_pk, 64);
+ memcpy(smp->local_sk, smp_dev->local_sk, 32);
+ memcpy(smp->lr, smp_dev->local_rand, 16);
+
+ if (smp_dev->debug_key)
+ set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
+
+ goto done;
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
BT_DBG("Using debug keys");
memcpy(smp->local_pk, debug_pk, 64);
memcpy(smp->local_sk, debug_sk, 32);
@@ -1777,8 +1879,9 @@ static u8 sc_send_public_key(struct smp_chan *smp)
}
}
+done:
SMP_DBG("Local Public Key X: %32phN", smp->local_pk);
- SMP_DBG("Local Public Key Y: %32phN", &smp->local_pk[32]);
+ SMP_DBG("Local Public Key Y: %32phN", smp->local_pk + 32);
SMP_DBG("Local Private Key: %32phN", smp->local_sk);
smp_send_cmd(smp->conn, SMP_CMD_PUBLIC_KEY, 64, smp->local_pk);
@@ -1813,9 +1916,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
auth = rsp->auth_req & AUTH_REQ_MASK(hdev);
- if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
+ if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
return SMP_AUTH_REQUIREMENTS;
+ /* If the remote side's OOB flag is set it means it has
+ * successfully received our local OOB data - therefore set the
+ * flag to indicate that local OOB is in use.
+ */
+ if (rsp->oob_flag == SMP_OOB_PRESENT)
+ set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
+
smp->prsp[0] = SMP_CMD_PAIRING_RSP;
memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
@@ -1882,10 +1992,6 @@ static u8 sc_check_confirm(struct smp_chan *smp)
BT_DBG("");
- /* Public Key exchange must happen before any other steps */
- if (!test_bit(SMP_FLAG_REMOTE_PK, &smp->flags))
- return SMP_UNSPECIFIED;
-
if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM);
@@ -1898,6 +2004,47 @@ static u8 sc_check_confirm(struct smp_chan *smp)
return 0;
}
+/* Work-around for some implementations that incorrectly copy RFU bits
+ * from our security request and thereby create the impression that
+ * we're doing SC when in fact the remote doesn't support it.
+ */
+static int fixup_sc_false_positive(struct smp_chan *smp)
+{
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ struct smp_cmd_pairing *req, *rsp;
+ u8 auth;
+
+ /* The issue is only observed when we're in slave role */
+ if (hcon->out)
+ return SMP_UNSPECIFIED;
+
+ if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
+ BT_ERR("Refusing SMP SC -> legacy fallback in SC-only mode");
+ return SMP_UNSPECIFIED;
+ }
+
+ BT_ERR("Trying to fall back to legacy SMP");
+
+ req = (void *) &smp->preq[1];
+ rsp = (void *) &smp->prsp[1];
+
+ /* Rebuild key dist flags which may have been cleared for SC */
+ smp->remote_key_dist = (req->init_key_dist & rsp->resp_key_dist);
+
+ auth = req->auth_req & AUTH_REQ_MASK(hdev);
+
+ if (tk_request(conn, 0, auth, rsp->io_capability, req->io_capability)) {
+ BT_ERR("Failed to fall back to legacy SMP");
+ return SMP_UNSPECIFIED;
+ }
+
+ clear_bit(SMP_FLAG_SC, &smp->flags);
+
+ return 0;
+}
+
static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct l2cap_chan *chan = conn->smp;
@@ -1911,8 +2058,19 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
skb_pull(skb, sizeof(smp->pcnf));
- if (test_bit(SMP_FLAG_SC, &smp->flags))
- return sc_check_confirm(smp);
+ if (test_bit(SMP_FLAG_SC, &smp->flags)) {
+ int ret;
+
+ /* Public Key exchange must happen before any other steps */
+ if (test_bit(SMP_FLAG_REMOTE_PK, &smp->flags))
+ return sc_check_confirm(smp);
+
+ BT_ERR("Unexpected SMP Pairing Confirm");
+
+ ret = fixup_sc_false_positive(smp);
+ if (ret)
+ return ret;
+ }
if (conn->hcon->out) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
@@ -1923,8 +2081,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
return smp_confirm(smp);
- else
- set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
+
+ set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
return 0;
}
@@ -2083,7 +2241,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
auth = rp->auth_req & AUTH_REQ_MASK(hdev);
- if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
+ if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
return SMP_AUTH_REQUIREMENTS;
if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
@@ -2104,7 +2262,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (!smp)
return SMP_UNSPECIFIED;
- if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) &&
+ if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
(auth & SMP_AUTH_BONDING))
return SMP_PAIRING_NOTSUPP;
@@ -2138,7 +2296,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
chan = conn->smp;
- if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
+ if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
return 1;
if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
@@ -2167,7 +2325,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
authreq = seclevel_to_authreq(sec_level);
- if (test_bit(HCI_SC_ENABLED, &hcon->hdev->dev_flags))
+ if (hci_dev_test_flag(hcon->hdev, HCI_SC_ENABLED))
authreq |= SMP_AUTH_SC;
/* Require MITM if IO Capability allows or the security level
@@ -2352,7 +2510,10 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
if (csrk) {
- csrk->master = 0x01;
+ if (conn->hcon->sec_level > BT_SECURITY_MEDIUM)
+ csrk->type = MGMT_CSRK_REMOTE_AUTHENTICATED;
+ else
+ csrk->type = MGMT_CSRK_REMOTE_UNAUTHENTICATED;
memcpy(csrk->val, rp->csrk, sizeof(csrk->val));
}
smp->csrk = csrk;
@@ -2368,7 +2529,8 @@ static u8 sc_select_method(struct smp_chan *smp)
struct smp_cmd_pairing *local, *remote;
u8 local_mitm, remote_mitm, local_io, remote_io, method;
- if (test_bit(SMP_FLAG_OOB, &smp->flags))
+ if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags) ||
+ test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags))
return REQ_OOB;
/* The preq/prsp contain the raw Pairing Request/Response PDUs
@@ -2422,6 +2584,16 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(smp->remote_pk, key, 64);
+ if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) {
+ err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk,
+ smp->rr, 0, cfm.confirm_val);
+ if (err)
+ return SMP_UNSPECIFIED;
+
+ if (memcmp(cfm.confirm_val, smp->pcnf, 16))
+ return SMP_CONFIRM_FAILED;
+ }
+
/* Non-initiating device sends its public key after receiving
* the key from the initiating device.
*/
@@ -2432,7 +2604,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
}
SMP_DBG("Remote Public Key X: %32phN", smp->remote_pk);
- SMP_DBG("Remote Public Key Y: %32phN", &smp->remote_pk[32]);
+ SMP_DBG("Remote Public Key Y: %32phN", smp->remote_pk + 32);
if (!ecdh_shared_secret(smp->remote_pk, smp->local_sk, smp->dhkey))
return SMP_UNSPECIFIED;
@@ -2470,14 +2642,6 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
}
if (smp->method == REQ_OOB) {
- err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk,
- smp->rr, 0, cfm.confirm_val);
- if (err)
- return SMP_UNSPECIFIED;
-
- if (memcmp(cfm.confirm_val, smp->pcnf, 16))
- return SMP_CONFIRM_FAILED;
-
if (hcon->out)
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
sizeof(smp->prnd), smp->prnd);
@@ -2550,6 +2714,8 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
put_unaligned_le32(hcon->passkey_notify, r);
+ else if (smp->method == REQ_OOB)
+ memcpy(r, smp->lr, 16);
err = smp_f6(smp->tfm_cmac, smp->mackey, smp->rrnd, smp->prnd, r,
io_cap, remote_addr, local_addr, e);
@@ -2600,7 +2766,7 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
if (skb->len < 1)
return -EILSEQ;
- if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
+ if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) {
reason = SMP_PAIRING_NOTSUPP;
goto done;
}
@@ -2738,16 +2904,16 @@ static void bredr_pairing(struct l2cap_chan *chan)
return;
/* Secure Connections support must be enabled */
- if (!test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_SC_ENABLED))
return;
/* BR/EDR must use Secure Connections for SMP */
if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) &&
- !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
+ !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
return;
/* If our LE support is not enabled don't do anything */
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
/* Don't bother if remote LE support is not enabled */
@@ -2851,7 +3017,7 @@ static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan,
return ERR_PTR(-ENOMEM);
skb->priority = HCI_PRIO_MAX;
- bt_cb(skb)->chan = chan;
+ bt_cb(skb)->l2cap.chan = chan;
return skb;
}
@@ -2924,51 +3090,63 @@ static const struct l2cap_ops smp_root_chan_ops = {
static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
{
struct l2cap_chan *chan;
- struct crypto_blkcipher *tfm_aes;
+ struct smp_dev *smp;
+ struct crypto_blkcipher *tfm_aes;
+ struct crypto_hash *tfm_cmac;
if (cid == L2CAP_CID_SMP_BREDR) {
- tfm_aes = NULL;
+ smp = NULL;
goto create_chan;
}
- tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, 0);
+ smp = kzalloc(sizeof(*smp), GFP_KERNEL);
+ if (!smp)
+ return ERR_PTR(-ENOMEM);
+
+ tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm_aes)) {
- BT_ERR("Unable to create crypto context");
+ BT_ERR("Unable to create ECB crypto context");
+ kzfree(smp);
return ERR_CAST(tfm_aes);
}
+ tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm_cmac)) {
+ BT_ERR("Unable to create CMAC crypto context");
+ crypto_free_blkcipher(tfm_aes);
+ kzfree(smp);
+ return ERR_CAST(tfm_cmac);
+ }
+
+ smp->tfm_aes = tfm_aes;
+ smp->tfm_cmac = tfm_cmac;
+
create_chan:
chan = l2cap_chan_create();
if (!chan) {
- crypto_free_blkcipher(tfm_aes);
+ if (smp) {
+ crypto_free_blkcipher(smp->tfm_aes);
+ crypto_free_hash(smp->tfm_cmac);
+ kzfree(smp);
+ }
return ERR_PTR(-ENOMEM);
}
- chan->data = tfm_aes;
+ chan->data = smp;
l2cap_add_scid(chan, cid);
l2cap_chan_set_defaults(chan);
if (cid == L2CAP_CID_SMP) {
- /* If usage of static address is forced or if the devices
- * does not have a public address, then listen on the static
- * address.
- *
- * In case BR/EDR has been disabled on a dual-mode controller
- * and a static address has been configued, then listen on
- * the static address instead.
- */
- if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
- !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
- (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
- bacmp(&hdev->static_addr, BDADDR_ANY))) {
- bacpy(&chan->src, &hdev->static_addr);
- chan->src_type = BDADDR_LE_RANDOM;
- } else {
- bacpy(&chan->src, &hdev->bdaddr);
+ u8 bdaddr_type;
+
+ hci_copy_identity_address(hdev, &chan->src, &bdaddr_type);
+
+ if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
chan->src_type = BDADDR_LE_PUBLIC;
- }
+ else
+ chan->src_type = BDADDR_LE_RANDOM;
} else {
bacpy(&chan->src, &hdev->bdaddr);
chan->src_type = BDADDR_BREDR;
@@ -2987,14 +3165,18 @@ create_chan:
static void smp_del_chan(struct l2cap_chan *chan)
{
- struct crypto_blkcipher *tfm_aes;
+ struct smp_dev *smp;
BT_DBG("chan %p", chan);
- tfm_aes = chan->data;
- if (tfm_aes) {
+ smp = chan->data;
+ if (smp) {
chan->data = NULL;
- crypto_free_blkcipher(tfm_aes);
+ if (smp->tfm_aes)
+ crypto_free_blkcipher(smp->tfm_aes);
+ if (smp->tfm_cmac)
+ crypto_free_hash(smp->tfm_cmac);
+ kzfree(smp);
}
l2cap_chan_put(chan);
@@ -3007,7 +3189,7 @@ static ssize_t force_bredr_smp_read(struct file *file,
struct hci_dev *hdev = file->private_data;
char buf[3];
- buf[0] = test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags) ? 'Y': 'N';
+ buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -3029,7 +3211,7 @@ static ssize_t force_bredr_smp_write(struct file *file,
if (strtobool(buf, &enable))
return -EINVAL;
- if (enable == test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
+ if (enable == hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
return -EALREADY;
if (enable) {
@@ -3048,7 +3230,7 @@ static ssize_t force_bredr_smp_write(struct file *file,
smp_del_chan(chan);
}
- change_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags);
+ hci_dev_change_flag(hdev, HCI_FORCE_BREDR_SMP);
return count;
}
@@ -3367,6 +3549,21 @@ static int __init test_h6(struct crypto_hash *tfm_cmac)
return 0;
}
+static char test_smp_buffer[32];
+
+static ssize_t test_smp_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(user_buf, count, ppos, test_smp_buffer,
+ strlen(test_smp_buffer));
+}
+
+static const struct file_operations test_smp_fops = {
+ .open = simple_open,
+ .read = test_smp_read,
+ .llseek = default_llseek,
+};
+
static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
struct crypto_hash *tfm_cmac)
{
@@ -3379,49 +3576,49 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
err = test_ah(tfm_aes);
if (err) {
BT_ERR("smp_ah test failed");
- return err;
+ goto done;
}
err = test_c1(tfm_aes);
if (err) {
BT_ERR("smp_c1 test failed");
- return err;
+ goto done;
}
err = test_s1(tfm_aes);
if (err) {
BT_ERR("smp_s1 test failed");
- return err;
+ goto done;
}
err = test_f4(tfm_cmac);
if (err) {
BT_ERR("smp_f4 test failed");
- return err;
+ goto done;
}
err = test_f5(tfm_cmac);
if (err) {
BT_ERR("smp_f5 test failed");
- return err;
+ goto done;
}
err = test_f6(tfm_cmac);
if (err) {
BT_ERR("smp_f6 test failed");
- return err;
+ goto done;
}
err = test_g2(tfm_cmac);
if (err) {
BT_ERR("smp_g2 test failed");
- return err;
+ goto done;
}
err = test_h6(tfm_cmac);
if (err) {
BT_ERR("smp_h6 test failed");
- return err;
+ goto done;
}
rettime = ktime_get();
@@ -3430,7 +3627,17 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
BT_INFO("SMP test passed in %llu usecs", duration);
- return 0;
+done:
+ if (!err)
+ snprintf(test_smp_buffer, sizeof(test_smp_buffer),
+ "PASS (%llu usecs)\n", duration);
+ else
+ snprintf(test_smp_buffer, sizeof(test_smp_buffer), "FAIL\n");
+
+ debugfs_create_file("selftest_smp", 0444, bt_debugfs, NULL,
+ &test_smp_fops);
+
+ return err;
}
int __init bt_selftest_smp(void)
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 60c5b73fcb4b..6cf872563ea7 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -188,6 +188,7 @@ int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
const bdaddr_t *bdaddr);
int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa);
+int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]);
int smp_register(struct hci_dev *hdev);
void smp_unregister(struct hci_dev *hdev);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ffd379db5938..4ff77a16956c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -25,6 +25,9 @@
#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
+const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
+EXPORT_SYMBOL_GPL(nf_br_ops);
+
/* net device transmit always called with BH disabled */
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -33,16 +36,15 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
+ const struct nf_br_ops *nf_ops;
u16 vid = 0;
rcu_read_lock();
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
- br_nf_pre_routing_finish_bridge_slow(skb);
+ nf_ops = rcu_dereference(nf_br_ops);
+ if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
rcu_read_unlock();
return NETDEV_TX_OK;
}
-#endif
u64_stats_update_begin(&brstats->syncp);
brstats->tx_packets++;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index f96933a823e3..e97572b5d2cc 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -35,11 +35,9 @@ static inline int should_deliver(const struct net_bridge_port *p,
p->state == BR_STATE_FORWARDING;
}
-int br_dev_queue_push_xmit(struct sk_buff *skb)
+int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
{
- /* ip_fragment doesn't copy the MAC header */
- if (nf_bridge_maybe_copy_header(skb) ||
- !is_skb_forwardable(skb->dev, skb)) {
+ if (!is_skb_forwardable(skb->dev, skb)) {
kfree_skb(skb);
} else {
skb_push(skb, ETH_HLEN);
@@ -51,9 +49,10 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
-int br_forward_finish(struct sk_buff *skb)
+int br_forward_finish(struct sock *sk, struct sk_buff *skb)
{
- return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+ return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, sk, skb,
+ NULL, skb->dev,
br_dev_queue_push_xmit);
}
@@ -77,7 +76,8 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
return;
}
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
+ NULL, skb->dev,
br_forward_finish);
}
@@ -98,7 +98,8 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
skb->dev = to->dev;
skb_forward_csum(skb);
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, NULL, skb,
+ indev, skb->dev,
br_forward_finish);
}
@@ -188,6 +189,9 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
/* Do not flood to ports that enable proxy ARP */
if (p->flags & BR_PROXYARP)
continue;
+ if ((p->flags & BR_PROXYARP_WIFI) &&
+ BR_INPUT_SKB_CB(skb)->proxyarp_replied)
+ continue;
prev = maybe_deliver(prev, p, skb, __packet_hook);
if (IS_ERR(prev))
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index e2aa7be3a847..f921a5dce22d 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -55,12 +55,13 @@ static int br_pass_frame_up(struct sk_buff *skb)
if (!skb)
return NET_RX_DROP;
- return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
- netif_receive_skb);
+ return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb,
+ indev, NULL,
+ netif_receive_skb_sk);
}
static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
- u16 vid)
+ u16 vid, struct net_bridge_port *p)
{
struct net_device *dev = br->dev;
struct neighbour *n;
@@ -68,6 +69,8 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
u8 *arpptr, *sha;
__be32 sip, tip;
+ BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
+
if (dev->flags & IFF_NOARP)
return;
@@ -105,16 +108,19 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
}
f = __br_fdb_get(br, n->ha, vid);
- if (f)
+ if (f && ((p->flags & BR_PROXYARP) ||
+ (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)))) {
arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip,
sha, n->ha, sha);
+ BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
+ }
neigh_release(n);
}
}
/* note: already called with rcu_read_lock */
-int br_handle_frame_finish(struct sk_buff *skb)
+int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
@@ -153,12 +159,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst = NULL;
- if (is_broadcast_ether_addr(dest)) {
- if (IS_ENABLED(CONFIG_INET) &&
- p->flags & BR_PROXYARP &&
- skb->protocol == htons(ETH_P_ARP))
- br_do_proxy_arp(skb, br, vid);
+ if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP))
+ br_do_proxy_arp(skb, br, vid, p);
+ if (is_broadcast_ether_addr(dest)) {
skb2 = skb;
unicast = false;
} else if (is_multicast_ether_addr(dest)) {
@@ -204,7 +208,7 @@ drop:
EXPORT_SYMBOL_GPL(br_handle_frame_finish);
/* note: already called with rcu_read_lock */
-static int br_handle_local_finish(struct sk_buff *skb)
+static int br_handle_local_finish(struct sock *sk, struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
u16 vid = 0;
@@ -274,8 +278,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
}
/* Deliver packet to local host only */
- if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
- NULL, br_handle_local_finish)) {
+ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb,
+ skb->dev, NULL, br_handle_local_finish)) {
return RX_HANDLER_CONSUMED; /* consumed by filter */
} else {
*pskb = skb;
@@ -299,7 +303,8 @@ forward:
if (ether_addr_equal(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, NULL, skb,
+ skb->dev, NULL,
br_handle_frame_finish);
break;
default:
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 409608960899..e29ad70b3000 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -170,7 +170,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
struct br_port_msg *bpm;
struct nlattr *nest, *nest2;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index c465876c7861..4b6722f8f179 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -814,7 +814,8 @@ static void __br_multicast_send_query(struct net_bridge *br,
if (port) {
skb->dev = port->dev;
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
+ NULL, skb->dev,
br_dev_queue_push_xmit);
} else {
br_multicast_select_own_querier(br, ip, skb);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 0ee453fad3de..ab55e2472beb 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -37,17 +37,16 @@
#include <net/route.h>
#include <net/netfilter/br_netfilter.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
#include <asm/uaccess.h>
#include "br_private.h"
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
-#define skb_origaddr(skb) (((struct bridge_skb_cb *) \
- (skb->nf_bridge->data))->daddr.ipv4)
-#define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
-#define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
-
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables __read_mostly = 1;
@@ -112,6 +111,24 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
pppoe_proto(skb) == htons(PPP_IPV6) && \
brnf_filter_pppoe_tagged)
+/* largest possible L2 header, see br_nf_dev_queue_xmit() */
+#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
+
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+struct brnf_frag_data {
+ char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
+ u8 encap_size;
+ u8 size;
+};
+
+static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
+#endif
+
+static struct nf_bridge_info *nf_bridge_info_get(const struct sk_buff *skb)
+{
+ return skb->nf_bridge;
+}
+
static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
{
struct net_bridge_port *port;
@@ -154,6 +171,18 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
return nf_bridge;
}
+static unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case __cpu_to_be16(ETH_P_8021Q):
+ return VLAN_HLEN;
+ case __cpu_to_be16(ETH_P_PPP_SES):
+ return PPPOE_SES_HLEN;
+ default:
+ return 0;
+ }
+}
+
static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
@@ -178,14 +207,6 @@ static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
skb->network_header += len;
}
-static inline void nf_bridge_save_header(struct sk_buff *skb)
-{
- int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
-
- skb_copy_from_linear_data_offset(skb, -header_size,
- skb->nf_bridge->data, header_size);
-}
-
/* When handing a packet over to the IP layer
* check whether we have a skb that is in the
* expected format
@@ -239,17 +260,31 @@ drop:
return -1;
}
+static void nf_bridge_update_protocol(struct sk_buff *skb)
+{
+ switch (skb->nf_bridge->orig_proto) {
+ case BRNF_PROTO_8021Q:
+ skb->protocol = htons(ETH_P_8021Q);
+ break;
+ case BRNF_PROTO_PPPOE:
+ skb->protocol = htons(ETH_P_PPP_SES);
+ break;
+ case BRNF_PROTO_UNCHANGED:
+ break;
+ }
+}
+
/* PF_BRIDGE/PRE_ROUTING *********************************************/
/* Undo the changes made for ip6tables PREROUTING and continue the
* bridge PRE_ROUTING hook. */
-static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
+static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct rtable *rt;
- if (nf_bridge->mask & BRNF_PKT_TYPE) {
+ if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
- nf_bridge->mask ^= BRNF_PKT_TYPE;
+ nf_bridge->pkt_otherhost = false;
}
nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
@@ -263,7 +298,8 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
+ skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
@@ -274,9 +310,8 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
* don't, we use the neighbour framework to find out. In both cases, we make
* sure that br_handle_frame_finish() is called afterwards.
*/
-static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
+static int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct neighbour *neigh;
struct dst_entry *dst;
@@ -286,12 +321,13 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
dst = skb_dst(skb);
neigh = dst_neigh_lookup_skb(dst, skb);
if (neigh) {
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
int ret;
if (neigh->hh.hh_len) {
neigh_hh_bridge(&neigh->hh, skb);
skb->dev = nf_bridge->physindev;
- ret = br_handle_frame_finish(skb);
+ ret = br_handle_frame_finish(sk, skb);
} else {
/* the neighbour function below overwrites the complete
* MAC header, so we save the Ethernet source address and
@@ -299,7 +335,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
*/
skb_copy_from_linear_data_offset(skb,
-(ETH_HLEN-ETH_ALEN),
- skb->nf_bridge->data,
+ nf_bridge->neigh_header,
ETH_HLEN-ETH_ALEN);
/* tell br_dev_xmit to continue with forwarding */
nf_bridge->mask |= BRNF_BRIDGED_DNAT;
@@ -314,6 +350,22 @@ free_skb:
return 0;
}
+static bool dnat_took_place(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || nf_ct_is_untracked(ct))
+ return false;
+
+ return test_bit(IPS_DST_NAT_BIT, &ct->status);
+#else
+ return false;
+#endif
+}
+
/* This requires some explaining. If DNAT has taken place,
* we will need to fix up the destination Ethernet address.
*
@@ -352,11 +404,11 @@ free_skb:
* device, we proceed as if ip_route_input() succeeded. If it differs from the
* logical bridge port or if ip_route_output_key() fails we drop the packet.
*/
-static int br_nf_pre_routing_finish(struct sk_buff *skb)
+static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct iphdr *iph = ip_hdr(skb);
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct rtable *rt;
int err;
int frag_max_size;
@@ -364,9 +416,9 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
frag_max_size = IPCB(skb)->frag_max_size;
BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
- if (nf_bridge->mask & BRNF_PKT_TYPE) {
+ if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
- nf_bridge->mask ^= BRNF_PKT_TYPE;
+ nf_bridge->pkt_otherhost = false;
}
nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
if (dnat_took_place(skb)) {
@@ -405,7 +457,7 @@ bridged_dnat:
nf_bridge_push_encap_header(skb);
NF_HOOK_THRESH(NFPROTO_BRIDGE,
NF_BR_PRE_ROUTING,
- skb, skb->dev, NULL,
+ sk, skb, skb->dev, NULL,
br_nf_pre_routing_finish_bridge,
1);
return 0;
@@ -425,7 +477,8 @@ bridged_dnat:
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
+ skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
@@ -448,20 +501,21 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
/* Some common code for IPv4/IPv6 */
static struct net_device *setup_pre_routing(struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
- nf_bridge->mask |= BRNF_PKT_TYPE;
+ nf_bridge->pkt_otherhost = true;
}
nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
nf_bridge->physindev = skb->dev;
skb->dev = brnf_get_logical_dev(skb, skb->dev);
+
if (skb->protocol == htons(ETH_P_8021Q))
- nf_bridge->mask |= BRNF_8021Q;
+ nf_bridge->orig_proto = BRNF_PROTO_8021Q;
else if (skb->protocol == htons(ETH_P_PPP_SES))
- nf_bridge->mask |= BRNF_PPPoE;
+ nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
@@ -527,9 +581,7 @@ bad:
* to ip6tables, which doesn't support NAT, so things are fairly simple. */
static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
const struct ipv6hdr *hdr;
u32 pkt_len;
@@ -563,7 +615,8 @@ static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
return NF_DROP;
skb->protocol = htons(ETH_P_IPV6);
- NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->sk, skb,
+ skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
return NF_STOLEN;
@@ -577,9 +630,7 @@ static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
* address to be able to detect DNAT afterwards. */
static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct net_bridge_port *p;
struct net_bridge *br;
@@ -588,7 +639,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
if (unlikely(!pskb_may_pull(skb, len)))
return NF_DROP;
- p = br_port_get_rcu(in);
+ p = br_port_get_rcu(state->in);
if (p == NULL)
return NF_DROP;
br = p->br;
@@ -598,7 +649,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
- return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
+ return br_nf_pre_routing_ipv6(ops, skb, state);
}
if (!brnf_call_iptables && !br->nf_call_iptables)
@@ -617,10 +668,11 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
return NF_DROP;
if (!setup_pre_routing(skb))
return NF_DROP;
- store_orig_dstaddr(skb);
+
skb->protocol = htons(ETH_P_IP);
- NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb,
+ skb->dev, NULL,
br_nf_pre_routing_finish);
return NF_STOLEN;
@@ -636,25 +688,30 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
* prevent this from happening. */
static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
br_drop_fake_rtable(skb);
return NF_ACCEPT;
}
/* PF_BRIDGE/FORWARD *************************************************/
-static int br_nf_forward_finish(struct sk_buff *skb)
+static int br_nf_forward_finish(struct sock *sk, struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct net_device *in;
if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
+ int frag_max_size;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ frag_max_size = IPCB(skb)->frag_max_size;
+ BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
+ }
+
in = nf_bridge->physindev;
- if (nf_bridge->mask & BRNF_PKT_TYPE) {
+ if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
- nf_bridge->mask ^= BRNF_PKT_TYPE;
+ nf_bridge->pkt_otherhost = false;
}
nf_bridge_update_protocol(skb);
} else {
@@ -662,8 +719,8 @@ static int br_nf_forward_finish(struct sk_buff *skb)
}
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
- skb->dev, br_forward_finish, 1);
+ NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, sk, skb,
+ in, skb->dev, br_forward_finish, 1);
return 0;
}
@@ -675,9 +732,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
* bridge ports. */
static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct nf_bridge_info *nf_bridge;
struct net_device *parent;
@@ -691,7 +746,11 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
if (!nf_bridge_unshare(skb))
return NF_DROP;
- parent = bridge_parent(out);
+ nf_bridge = nf_bridge_info_get(skb);
+ if (!nf_bridge)
+ return NF_DROP;
+
+ parent = bridge_parent(state->out);
if (!parent)
return NF_DROP;
@@ -704,40 +763,42 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
nf_bridge_pull_encap_header(skb);
- nf_bridge = skb->nf_bridge;
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
- nf_bridge->mask |= BRNF_PKT_TYPE;
+ nf_bridge->pkt_otherhost = true;
}
- if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
- return NF_DROP;
+ if (pf == NFPROTO_IPV4) {
+ int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
+
+ if (br_parse_ip_options(skb))
+ return NF_DROP;
+
+ IPCB(skb)->frag_max_size = frag_max;
+ }
- /* The physdev module checks on this */
- nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev;
if (pf == NFPROTO_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
- NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
- br_nf_forward_finish);
+ NF_HOOK(pf, NF_INET_FORWARD, NULL, skb,
+ brnf_get_logical_dev(skb, state->in),
+ parent, br_nf_forward_finish);
return NF_STOLEN;
}
static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct net_bridge_port *p;
struct net_bridge *br;
struct net_device **d = (struct net_device **)(skb->cb);
- p = br_port_get_rcu(out);
+ p = br_port_get_rcu(state->out);
if (p == NULL)
return NF_ACCEPT;
br = p->br;
@@ -756,55 +817,93 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
nf_bridge_push_encap_header(skb);
return NF_ACCEPT;
}
- *d = (struct net_device *)in;
- NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
- (struct net_device *)out, br_nf_forward_finish);
+ *d = state->in;
+ NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->sk, skb,
+ state->in, state->out, br_nf_forward_finish);
return NF_STOLEN;
}
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
-static int br_nf_dev_queue_xmit(struct sk_buff *skb)
+static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
+{
+ struct brnf_frag_data *data;
+ int err;
+
+ data = this_cpu_ptr(&brnf_frag_data_storage);
+ err = skb_cow_head(skb, data->size);
+
+ if (err) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
+ __skb_push(skb, data->encap_size);
+
+ return br_dev_queue_push_xmit(sk, skb);
+}
+
+static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
{
int ret;
int frag_max_size;
+ unsigned int mtu_reserved;
+
+ if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
+ return br_dev_queue_push_xmit(sk, skb);
+ mtu_reserved = nf_bridge_mtu_reduction(skb);
/* This is wrong! We should preserve the original fragment
* boundaries by preserving frag_list rather than refragmenting.
*/
- if (skb->protocol == htons(ETH_P_IP) &&
- skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
- !skb_is_gso(skb)) {
+ if (skb->len + mtu_reserved > skb->dev->mtu) {
+ struct brnf_frag_data *data;
+
frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
if (br_parse_ip_options(skb))
/* Drop invalid packet */
return NF_DROP;
IPCB(skb)->frag_max_size = frag_max_size;
- ret = ip_fragment(skb, br_dev_queue_push_xmit);
- } else
- ret = br_dev_queue_push_xmit(skb);
+
+ nf_bridge_update_protocol(skb);
+
+ data = this_cpu_ptr(&brnf_frag_data_storage);
+ data->encap_size = nf_bridge_encap_header_len(skb);
+ data->size = ETH_HLEN + data->encap_size;
+
+ skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
+ data->size);
+
+ ret = ip_fragment(sk, skb, br_nf_push_frag_xmit);
+ } else {
+ ret = br_dev_queue_push_xmit(sk, skb);
+ }
return ret;
}
#else
-static int br_nf_dev_queue_xmit(struct sk_buff *skb)
+static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
{
- return br_dev_queue_push_xmit(skb);
+ return br_dev_queue_push_xmit(sk, skb);
}
#endif
/* PF_BRIDGE/POST_ROUTING ********************************************/
static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct net_device *realoutdev = bridge_parent(skb->dev);
u_int8_t pf;
- if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
+ /* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
+ * on a bridge, but was delivered locally and is now being routed:
+ *
+ * POST_ROUTING was already invoked from the ip stack.
+ */
+ if (!nf_bridge || !nf_bridge->physoutdev)
return NF_ACCEPT;
if (!realoutdev)
@@ -821,17 +920,17 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
* about the value of skb->pkt_type. */
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
- nf_bridge->mask |= BRNF_PKT_TYPE;
+ nf_bridge->pkt_otherhost = true;
}
nf_bridge_pull_encap_header(skb);
- nf_bridge_save_header(skb);
if (pf == NFPROTO_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
- NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
+ NF_HOOK(pf, NF_INET_POST_ROUTING, state->sk, skb,
+ NULL, realoutdev,
br_nf_dev_queue_xmit);
return NF_STOLEN;
@@ -842,9 +941,7 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
* for the second time. */
static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
if (skb->nf_bridge &&
!(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
@@ -854,6 +951,44 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
return NF_ACCEPT;
}
+/* This is called when br_netfilter has called into iptables/netfilter,
+ * and DNAT has taken place on a bridge-forwarded packet.
+ *
+ * neigh->output has created a new MAC header, with local br0 MAC
+ * as saddr.
+ *
+ * This restores the original MAC saddr of the bridged packet
+ * before invoking bridge forward logic to transmit the packet.
+ */
+static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+{
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ skb_pull(skb, ETH_HLEN);
+ nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
+
+ BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
+
+ skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
+ nf_bridge->neigh_header,
+ ETH_HLEN - ETH_ALEN);
+ skb->dev = nf_bridge->physindev;
+ br_handle_frame_finish(NULL, skb);
+}
+
+static int br_nf_dev_xmit(struct sk_buff *skb)
+{
+ if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
+ br_nf_pre_routing_finish_bridge_slow(skb);
+ return 1;
+ }
+ return 0;
+}
+
+static const struct nf_br_ops br_ops = {
+ .br_dev_xmit_hook = br_nf_dev_xmit,
+};
+
void br_netfilter_enable(void)
{
}
@@ -991,12 +1126,14 @@ static int __init br_netfilter_init(void)
return -ENOMEM;
}
#endif
+ RCU_INIT_POINTER(nf_br_ops, &br_ops);
printk(KERN_NOTICE "Bridge firewalling registered\n");
return 0;
}
static void __exit br_netfilter_fini(void)
{
+ RCU_INIT_POINTER(nf_br_ops, NULL);
nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
#ifdef CONFIG_SYSCTL
unregister_net_sysctl_table(brnf_sysctl_header);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4fbcea0e7ecb..4b5c236998ff 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -22,6 +22,85 @@
#include "br_private.h"
#include "br_private_stp.h"
+static int br_get_num_vlan_infos(const struct net_port_vlans *pv,
+ u32 filter_mask)
+{
+ u16 vid_range_start = 0, vid_range_end = 0;
+ u16 vid_range_flags = 0;
+ u16 pvid, vid, flags;
+ int num_vlans = 0;
+
+ if (filter_mask & RTEXT_FILTER_BRVLAN)
+ return pv->num_vlans;
+
+ if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
+ return 0;
+
+ /* Count number of vlan info's
+ */
+ pvid = br_get_pvid(pv);
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+ flags = 0;
+ if (vid == pvid)
+ flags |= BRIDGE_VLAN_INFO_PVID;
+
+ if (test_bit(vid, pv->untagged_bitmap))
+ flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+ if (vid_range_start == 0) {
+ goto initvars;
+ } else if ((vid - vid_range_end) == 1 &&
+ flags == vid_range_flags) {
+ vid_range_end = vid;
+ continue;
+ } else {
+ if ((vid_range_end - vid_range_start) > 0)
+ num_vlans += 2;
+ else
+ num_vlans += 1;
+ }
+initvars:
+ vid_range_start = vid;
+ vid_range_end = vid;
+ vid_range_flags = flags;
+ }
+
+ if (vid_range_start != 0) {
+ if ((vid_range_end - vid_range_start) > 0)
+ num_vlans += 2;
+ else
+ num_vlans += 1;
+ }
+
+ return num_vlans;
+}
+
+static size_t br_get_link_af_size_filtered(const struct net_device *dev,
+ u32 filter_mask)
+{
+ struct net_port_vlans *pv;
+ int num_vlan_infos;
+
+ rcu_read_lock();
+ if (br_port_exists(dev))
+ pv = nbp_get_vlan_info(br_port_get_rcu(dev));
+ else if (dev->priv_flags & IFF_EBRIDGE)
+ pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
+ else
+ pv = NULL;
+ if (pv)
+ num_vlan_infos = br_get_num_vlan_infos(pv, filter_mask);
+ else
+ num_vlan_infos = 0;
+ rcu_read_unlock();
+
+ if (!num_vlan_infos)
+ return 0;
+
+ /* Each VLAN is returned in bridge_vlan_info along with flags */
+ return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
+}
+
static inline size_t br_port_info_size(void)
{
return nla_total_size(1) /* IFLA_BRPORT_STATE */
@@ -36,7 +115,7 @@ static inline size_t br_port_info_size(void)
+ 0;
}
-static inline size_t br_nlmsg_size(void)
+static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -45,7 +124,9 @@ static inline size_t br_nlmsg_size(void)
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(1) /* IFLA_OPERSTATE */
- + nla_total_size(br_port_info_size()); /* IFLA_PROTINFO */
+ + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
+ + nla_total_size(br_get_link_af_size_filtered(dev,
+ filter_mask)); /* IFLA_AF_SPEC */
}
static int br_port_fill_attrs(struct sk_buff *skb,
@@ -62,7 +143,9 @@ static int br_port_fill_attrs(struct sk_buff *skb,
nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
- nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)))
+ nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
+ nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
+ !!(p->flags & BR_PROXYARP_WIFI)))
return -EMSGSIZE;
return 0;
@@ -222,8 +305,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
- (dev->ifindex != dev->iflink &&
- nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+ (dev->ifindex != dev_get_iflink(dev) &&
+ nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
goto nla_put_failure;
if (event == RTM_NEWLINK && port) {
@@ -280,6 +363,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
struct net *net;
struct sk_buff *skb;
int err = -ENOBUFS;
+ u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
if (!port)
return;
@@ -288,11 +372,11 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
br_debug(port->br, "port %u(%s) event %d\n",
(unsigned int)port->port_no, port->dev->name, event);
- skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
+ skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC);
if (skb == NULL)
goto errout;
- err = br_fill_ifinfo(skb, port, 0, 0, event, 0, 0, port->dev);
+ err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev);
if (err < 0) {
/* -EMSGSIZE implies BUG in br_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -310,7 +394,7 @@ errout:
* Dump information about all ports, in response to GETLINK
*/
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 filter_mask)
+ struct net_device *dev, u32 filter_mask, int nlflags)
{
struct net_bridge_port *port = br_port_get_rtnl(dev);
@@ -318,7 +402,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
return 0;
- return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
+ return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
filter_mask, dev);
}
@@ -471,6 +555,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
+ br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
if (tb[IFLA_BRPORT_COST]) {
err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
@@ -648,6 +733,9 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
[IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
[IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
+ [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
+ [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
+ [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -677,6 +765,24 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
return err;
}
+ if (data[IFLA_BR_AGEING_TIME]) {
+ u32 ageing_time = nla_get_u32(data[IFLA_BR_AGEING_TIME]);
+
+ br->ageing_time = clock_t_to_jiffies(ageing_time);
+ }
+
+ if (data[IFLA_BR_STP_STATE]) {
+ u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
+
+ br_stp_set_enabled(br, stp_enabled);
+ }
+
+ if (data[IFLA_BR_PRIORITY]) {
+ u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
+
+ br_stp_set_bridge_priority(br, priority);
+ }
+
return 0;
}
@@ -685,6 +791,9 @@ static size_t br_get_size(const struct net_device *brdev)
return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
+ nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
+ nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
0;
}
@@ -694,10 +803,16 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
u32 hello_time = jiffies_to_clock_t(br->hello_time);
u32 age_time = jiffies_to_clock_t(br->max_age);
+ u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
+ u32 stp_enabled = br->stp_enabled;
+ u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
- nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time))
+ nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
+ nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
+ nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
+ nla_put_u16(skb, IFLA_BR_PRIORITY, priority))
return -EMSGSIZE;
return 0;
diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
index 387cb3bd017c..20cbb727df4d 100644
--- a/net/bridge/br_nf_core.c
+++ b/net/bridge/br_nf_core.c
@@ -54,7 +54,6 @@ static unsigned int fake_mtu(const struct dst_entry *dst)
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
- .protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
.redirect = fake_redirect,
.cow_metrics = fake_cow_metrics,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index de0919975a25..3362c29400f1 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -305,6 +305,7 @@ struct br_input_skb_cb {
#endif
u16 frag_max_size;
+ bool proxyarp_replied;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
bool vlan_filtered;
@@ -409,10 +410,10 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
/* br_forward.c */
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
-int br_dev_queue_push_xmit(struct sk_buff *skb);
+int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb);
void br_forward(const struct net_bridge_port *to,
struct sk_buff *skb, struct sk_buff *skb0);
-int br_forward_finish(struct sk_buff *skb);
+int br_forward_finish(struct sock *sk, struct sk_buff *skb);
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast);
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb2, bool unicast);
@@ -430,7 +431,7 @@ void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
void br_manage_promisc(struct net_bridge *br);
/* br_input.c */
-int br_handle_frame_finish(struct sk_buff *skb);
+int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
@@ -762,6 +763,11 @@ static inline int br_vlan_enabled(struct net_bridge *br)
}
#endif
+struct nf_br_ops {
+ int (*br_dev_xmit_hook)(struct sk_buff *skb);
+};
+extern const struct nf_br_ops __rcu *nf_br_ops;
+
/* br_netfilter.c */
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
int br_nf_core_init(void);
@@ -822,7 +828,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port);
int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
- u32 filter_mask);
+ u32 filter_mask, int nlflags);
#ifdef CONFIG_SYSFS
/* br_sysfs_if.c */
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index bdb459d21ad8..534fc4cd263e 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -54,8 +54,9 @@ static void br_send_bpdu(struct net_bridge_port *p,
skb_reset_mac_header(skb);
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
- dev_queue_xmit);
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
+ NULL, skb->dev,
+ dev_queue_xmit_sk);
}
static inline void br_set_ticks(unsigned char *dest, int j)
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 2de5d91199e8..4905845a94e9 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -171,6 +171,7 @@ BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
BRPORT_ATTR_FLAG(learning, BR_LEARNING);
BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP);
+BRPORT_ATTR_FLAG(proxyarp_wifi, BR_PROXYARP_WIFI);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
@@ -215,6 +216,7 @@ static const struct brport_attribute *brport_attrs[] = {
&brport_attr_multicast_fast_leave,
#endif
&brport_attr_proxyarp,
+ &brport_attr_proxyarp_wifi,
NULL
};
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index ce205aabf9c5..8a3f63b2e807 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -58,20 +58,18 @@ static const struct ebt_table frame_filter = {
static unsigned int
ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return ebt_do_table(ops->hooknum, skb, in, out,
- dev_net(in)->xt.frame_filter);
+ return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+ dev_net(state->in)->xt.frame_filter);
}
static unsigned int
ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return ebt_do_table(ops->hooknum, skb, in, out,
- dev_net(out)->xt.frame_filter);
+ return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+ dev_net(state->out)->xt.frame_filter);
}
static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index a0ac2984fb6c..c5ef5b1ab678 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -58,20 +58,18 @@ static struct ebt_table frame_nat = {
static unsigned int
ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return ebt_do_table(ops->hooknum, skb, in, out,
- dev_net(in)->xt.frame_nat);
+ return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+ dev_net(state->in)->xt.frame_nat);
}
static unsigned int
ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return ebt_do_table(ops->hooknum, skb, in, out,
- dev_net(out)->xt.frame_nat);
+ return ebt_do_table(ops->hooknum, skb, state->in, state->out,
+ dev_net(state->out)->xt.frame_nat);
}
static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index 19473a9371b8..a343e62442b1 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -67,47 +67,43 @@ EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate);
static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
if (nft_bridge_iphdr_validate(skb))
- nft_set_pktinfo_ipv4(pkt, ops, skb, in, out);
+ nft_set_pktinfo_ipv4(pkt, ops, skb, state);
else
- nft_set_pktinfo(pkt, ops, skb, in, out);
+ nft_set_pktinfo(pkt, ops, skb, state);
}
static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
- const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
{
#if IS_ENABLED(CONFIG_IPV6)
if (nft_bridge_ip6hdr_validate(skb) &&
- nft_set_pktinfo_ipv6(pkt, ops, skb, in, out) == 0)
+ nft_set_pktinfo_ipv6(pkt, ops, skb, state) == 0)
return;
#endif
- nft_set_pktinfo(pkt, ops, skb, in, out);
+ nft_set_pktinfo(pkt, ops, skb, state);
}
static unsigned int
nft_do_chain_bridge(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
- nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+ nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, state);
break;
case htons(ETH_P_IPV6):
- nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+ nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, state);
break;
default:
- nft_set_pktinfo(&pkt, ops, skb, in, out);
+ nft_set_pktinfo(&pkt, ops, skb, state);
break;
}
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 4f02109d708f..a21269b83f16 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -19,12 +19,12 @@
#include "../br_private.h"
static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct net_device *in = pkt->in, *out = pkt->out;
- struct nft_data *dest = &data[priv->dreg];
+ u32 *dest = &regs->data[priv->dreg];
const struct net_bridge_port *p;
switch (priv->key) {
@@ -40,12 +40,12 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
goto out;
}
- strncpy((char *)dest->data, p->br->dev->name, sizeof(dest->data));
+ strncpy((char *)dest, p->br->dev->name, IFNAMSIZ);
return;
out:
- return nft_meta_get_eval(expr, data, pkt);
+ return nft_meta_get_eval(expr, regs, pkt);
err:
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ regs->verdict.code = NFT_BREAK;
}
static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
@@ -53,27 +53,21 @@ static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
- int err;
+ unsigned int len;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_BRI_IIFNAME:
case NFT_META_BRI_OIFNAME:
+ len = IFNAMSIZ;
break;
default:
return nft_meta_get_init(ctx, expr, tb);
}
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
- err = nft_validate_output_register(priv->dreg);
- if (err < 0)
- return err;
-
- err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
- if (err < 0)
- return err;
-
- return 0;
+ priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
+ return nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, len);
}
static struct nft_expr_type nft_meta_bridge_type;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 3244aead0926..858d848564ee 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -21,6 +21,7 @@
#include <net/ip.h>
#include <net/ip6_checksum.h>
#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv6.h>
#include "../br_private.h"
static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
@@ -36,7 +37,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
skb_pull(nskb, ETH_HLEN);
}
-static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
+/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
+ * or the bridge port (NF_BRIDGE PREROUTING).
+ */
+static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
{
struct sk_buff *nskb;
struct iphdr *niph;
@@ -65,11 +71,12 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
nft_reject_br_push_etherhdr(oldskb, nskb);
- br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+ br_deliver(br_port_get_rcu(dev), nskb);
}
-static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
- u8 code)
+static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
{
struct sk_buff *nskb;
struct iphdr *niph;
@@ -77,8 +84,9 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
unsigned int len;
void *payload;
__wsum csum;
+ u8 proto;
- if (!nft_bridge_iphdr_validate(oldskb))
+ if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
return;
/* IP header checks: fragment. */
@@ -91,7 +99,17 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
if (!pskb_may_pull(oldskb, len))
return;
- if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
+ if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
+ return;
+
+ if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
+ ip_hdr(oldskb)->protocol == IPPROTO_UDP)
+ proto = ip_hdr(oldskb)->protocol;
+ else
+ proto = 0;
+
+ if (!skb_csum_unnecessary(oldskb) &&
+ nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
@@ -120,11 +138,13 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
nft_reject_br_push_etherhdr(oldskb, nskb);
- br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+ br_deliver(br_port_get_rcu(dev), nskb);
}
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
- struct sk_buff *oldskb, int hook)
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
{
struct sk_buff *nskb;
const struct tcphdr *oth;
@@ -152,12 +172,37 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net,
nft_reject_br_push_etherhdr(oldskb, nskb);
- br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+ br_deliver(br_port_get_rcu(dev), nskb);
+}
+
+static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ int thoff;
+ __be16 fo;
+ u8 proto = ip6h->nexthdr;
+
+ if (skb->csum_bad)
+ return false;
+
+ if (skb_csum_unnecessary(skb))
+ return true;
+
+ if (ip6h->payload_len &&
+ pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
+ return false;
+
+ thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
+ if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
+ return false;
+
+ return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
}
static void nft_reject_br_send_v6_unreach(struct net *net,
- struct sk_buff *oldskb, int hook,
- u8 code)
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
{
struct sk_buff *nskb;
struct ipv6hdr *nip6h;
@@ -176,6 +221,9 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
if (!pskb_may_pull(oldskb, len))
return;
+ if (!reject6_br_csum_ok(oldskb, hook))
+ return;
+
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb)
@@ -205,12 +253,12 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
nft_reject_br_push_etherhdr(oldskb, nskb);
- br_deliver(br_port_get_rcu(oldskb->dev), nskb);
+ br_deliver(br_port_get_rcu(dev), nskb);
}
static void nft_reject_bridge_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
@@ -224,16 +272,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
case htons(ETH_P_IP):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
- nft_reject_br_send_v4_unreach(pkt->skb,
+ nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
pkt->ops->hooknum,
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
- nft_reject_br_send_v4_tcp_reset(pkt->skb,
+ nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in,
pkt->ops->hooknum);
break;
case NFT_REJECT_ICMPX_UNREACH:
- nft_reject_br_send_v4_unreach(pkt->skb,
+ nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
pkt->ops->hooknum,
nft_reject_icmp_code(priv->icmp_code));
break;
@@ -242,16 +290,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
case htons(ETH_P_IPV6):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
- nft_reject_br_send_v6_unreach(net, pkt->skb,
+ nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in,
pkt->ops->hooknum,
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
- nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
+ nft_reject_br_send_v6_tcp_reset(net, pkt->skb, pkt->in,
pkt->ops->hooknum);
break;
case NFT_REJECT_ICMPX_UNREACH:
- nft_reject_br_send_v6_unreach(net, pkt->skb,
+ nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in,
pkt->ops->hooknum,
nft_reject_icmpv6_code(priv->icmp_code));
break;
@@ -262,7 +310,7 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
break;
}
out:
- data[NFT_REG_VERDICT].verdict = NF_DROP;
+ regs->verdict.code = NF_DROP;
}
static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
@@ -323,6 +371,8 @@ static int nft_reject_bridge_dump(struct sk_buff *skb,
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
+ default:
+ break;
}
return 0;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a6e2da0bc718..4ec0c803aef1 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -271,8 +271,8 @@ static void caif_check_flow_release(struct sock *sk)
* Copied from unix_dgram_recvmsg, but removed credit checks,
* changed locking, address handling and added MSG_TRUNC.
*/
-static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t len, int flags)
+static int caif_seqpkt_recvmsg(struct socket *sock, struct msghdr *m,
+ size_t len, int flags)
{
struct sock *sk = sock->sk;
@@ -343,9 +343,8 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
* Copied from unix_stream_recvmsg, but removed credit checks,
* changed locking calls, changed address handling.
*/
-static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size,
- int flags)
+static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
{
struct sock *sk = sock->sk;
int copied = 0;
@@ -511,8 +510,8 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
}
/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
-static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
@@ -586,8 +585,8 @@ err:
* Changed removed permission handling and added waiting for flow on
* and other minor adaptations.
*/
-static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int caif_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index ee9ffd956552..b523453585be 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -328,7 +328,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* containing the interface index.
*/
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
+ sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
@@ -1231,8 +1231,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
/*
* bcm_sendmsg - process BCM commands (opcodes) from the userspace
*/
-static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size)
+static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct bcm_sock *bo = bcm_sk(sk);
@@ -1535,8 +1534,8 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
return 0;
}
-static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
diff --git a/net/can/raw.c b/net/can/raw.c
index 00c13ef23661..31b9748cbb4e 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -74,6 +74,12 @@ MODULE_ALIAS("can-proto-1");
* storing the single filter in dfilter, to avoid using dynamic memory.
*/
+struct uniqframe {
+ ktime_t tstamp;
+ const struct sk_buff *skb;
+ unsigned int join_rx_count;
+};
+
struct raw_sock {
struct sock sk;
int bound;
@@ -82,10 +88,12 @@ struct raw_sock {
int loopback;
int recv_own_msgs;
int fd_frames;
+ int join_filters;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
struct can_filter *filter; /* pointer to filter(s) */
can_err_mask_t err_mask;
+ struct uniqframe __percpu *uniq;
};
/*
@@ -95,8 +103,8 @@ struct raw_sock {
*/
static inline unsigned int *raw_flags(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) <= (sizeof(struct sockaddr_can) +
- sizeof(unsigned int)));
+ sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
+ sizeof(unsigned int));
/* return pointer after struct sockaddr_can */
return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
@@ -123,6 +131,26 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
if (!ro->fd_frames && oskb->len != CAN_MTU)
return;
+ /* eliminate multiple filter matches for the same skb */
+ if (this_cpu_ptr(ro->uniq)->skb == oskb &&
+ ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
+ if (ro->join_filters) {
+ this_cpu_inc(ro->uniq->join_rx_count);
+ /* drop frame until all enabled filters matched */
+ if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
+ return;
+ } else {
+ return;
+ }
+ } else {
+ this_cpu_ptr(ro->uniq)->skb = oskb;
+ this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
+ this_cpu_ptr(ro->uniq)->join_rx_count = 1;
+ /* drop first frame to check all enabled filters? */
+ if (ro->join_filters && ro->count > 1)
+ return;
+ }
+
/* clone the given skb to be able to enqueue it into the rcv queue */
skb = skb_clone(oskb, GFP_ATOMIC);
if (!skb)
@@ -135,7 +163,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
* containing the interface index.
*/
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
+ sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
@@ -296,6 +324,12 @@ static int raw_init(struct sock *sk)
ro->loopback = 1;
ro->recv_own_msgs = 0;
ro->fd_frames = 0;
+ ro->join_filters = 0;
+
+ /* alloc_percpu provides zero'ed memory */
+ ro->uniq = alloc_percpu(struct uniqframe);
+ if (unlikely(!ro->uniq))
+ return -ENOMEM;
/* set notifier */
ro->notifier.notifier_call = raw_notifier;
@@ -339,6 +373,7 @@ static int raw_release(struct socket *sock)
ro->ifindex = 0;
ro->bound = 0;
ro->count = 0;
+ free_percpu(ro->uniq);
sock_orphan(sk);
sock->sk = NULL;
@@ -583,6 +618,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
break;
+ case CAN_RAW_JOIN_FILTERS:
+ if (optlen != sizeof(ro->join_filters))
+ return -EINVAL;
+
+ if (copy_from_user(&ro->join_filters, optval, optlen))
+ return -EFAULT;
+
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -647,6 +691,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
val = &ro->fd_frames;
break;
+ case CAN_RAW_JOIN_FILTERS:
+ if (len > sizeof(int))
+ len = sizeof(int);
+ val = &ro->join_filters;
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -658,8 +708,7 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
-static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size)
+static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
@@ -728,8 +777,8 @@ send_failed:
return err;
}
-static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index ec565508e904..79e8f71aef5b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -490,6 +490,43 @@ out:
}
EXPORT_SYMBOL(ceph_parse_options);
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
+{
+ struct ceph_options *opt = client->options;
+ size_t pos = m->count;
+
+ if (opt->name)
+ seq_printf(m, "name=%s,", opt->name);
+ if (opt->key)
+ seq_puts(m, "secret=<hidden>,");
+
+ if (opt->flags & CEPH_OPT_FSID)
+ seq_printf(m, "fsid=%pU,", &opt->fsid);
+ if (opt->flags & CEPH_OPT_NOSHARE)
+ seq_puts(m, "noshare,");
+ if (opt->flags & CEPH_OPT_NOCRC)
+ seq_puts(m, "nocrc,");
+ if (opt->flags & CEPH_OPT_NOMSGAUTH)
+ seq_puts(m, "nocephx_require_signatures,");
+ if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
+ seq_puts(m, "notcp_nodelay,");
+
+ if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
+ seq_printf(m, "mount_timeout=%d,", opt->mount_timeout);
+ if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
+ seq_printf(m, "osd_idle_ttl=%d,", opt->osd_idle_ttl);
+ if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
+ seq_printf(m, "osdkeepalivetimeout=%d,",
+ opt->osd_keepalive_timeout);
+
+ /* drop redundant comma */
+ if (m->count != pos)
+ m->count--;
+
+ return 0;
+}
+EXPORT_SYMBOL(ceph_print_client_options);
+
u64 ceph_client_id(struct ceph_client *client)
{
return client->monc.auth->global_id;
diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c
index 16bc199d9a62..9d84ce4ea0df 100644
--- a/net/ceph/crush/crush.c
+++ b/net/ceph/crush/crush.c
@@ -17,6 +17,7 @@ const char *crush_bucket_alg_name(int alg)
case CRUSH_BUCKET_LIST: return "list";
case CRUSH_BUCKET_TREE: return "tree";
case CRUSH_BUCKET_STRAW: return "straw";
+ case CRUSH_BUCKET_STRAW2: return "straw2";
default: return "unknown";
}
}
@@ -40,6 +41,8 @@ int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
case CRUSH_BUCKET_STRAW:
return ((struct crush_bucket_straw *)b)->item_weights[p];
+ case CRUSH_BUCKET_STRAW2:
+ return ((struct crush_bucket_straw2 *)b)->item_weights[p];
}
return 0;
}
@@ -77,6 +80,14 @@ void crush_destroy_bucket_straw(struct crush_bucket_straw *b)
kfree(b);
}
+void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b)
+{
+ kfree(b->item_weights);
+ kfree(b->h.perm);
+ kfree(b->h.items);
+ kfree(b);
+}
+
void crush_destroy_bucket(struct crush_bucket *b)
{
switch (b->alg) {
@@ -92,6 +103,9 @@ void crush_destroy_bucket(struct crush_bucket *b)
case CRUSH_BUCKET_STRAW:
crush_destroy_bucket_straw((struct crush_bucket_straw *)b);
break;
+ case CRUSH_BUCKET_STRAW2:
+ crush_destroy_bucket_straw2((struct crush_bucket_straw2 *)b);
+ break;
}
}
diff --git a/net/ceph/crush/crush_ln_table.h b/net/ceph/crush/crush_ln_table.h
new file mode 100644
index 000000000000..6192c7fc958c
--- /dev/null
+++ b/net/ceph/crush/crush_ln_table.h
@@ -0,0 +1,166 @@
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2015 Intel Corporation All Rights Reserved
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#if defined(__linux__)
+#include <linux/types.h>
+#elif defined(__FreeBSD__)
+#include <sys/types.h>
+#endif
+
+#ifndef CEPH_CRUSH_LN_H
+#define CEPH_CRUSH_LN_H
+
+
+// RH_LH_tbl[2*k] = 2^48/(1.0+k/128.0)
+// RH_LH_tbl[2*k+1] = 2^48*log2(1.0+k/128.0)
+
+static int64_t __RH_LH_tbl[128*2+2] = {
+ 0x0001000000000000ll, 0x0000000000000000ll, 0x0000fe03f80fe040ll, 0x000002dfca16dde1ll,
+ 0x0000fc0fc0fc0fc1ll, 0x000005b9e5a170b4ll, 0x0000fa232cf25214ll, 0x0000088e68ea899all,
+ 0x0000f83e0f83e0f9ll, 0x00000b5d69bac77ell, 0x0000f6603d980f67ll, 0x00000e26fd5c8555ll,
+ 0x0000f4898d5f85bcll, 0x000010eb389fa29fll, 0x0000f2b9d6480f2cll, 0x000013aa2fdd27f1ll,
+ 0x0000f0f0f0f0f0f1ll, 0x00001663f6fac913ll, 0x0000ef2eb71fc435ll, 0x00001918a16e4633ll,
+ 0x0000ed7303b5cc0fll, 0x00001bc84240adabll, 0x0000ebbdb2a5c162ll, 0x00001e72ec117fa5ll,
+ 0x0000ea0ea0ea0ea1ll, 0x00002118b119b4f3ll, 0x0000e865ac7b7604ll, 0x000023b9a32eaa56ll,
+ 0x0000e6c2b4481cd9ll, 0x00002655d3c4f15cll, 0x0000e525982af70dll, 0x000028ed53f307eell,
+ 0x0000e38e38e38e39ll, 0x00002b803473f7adll, 0x0000e1fc780e1fc8ll, 0x00002e0e85a9de04ll,
+ 0x0000e070381c0e08ll, 0x0000309857a05e07ll, 0x0000dee95c4ca038ll, 0x0000331dba0efce1ll,
+ 0x0000dd67c8a60dd7ll, 0x0000359ebc5b69d9ll, 0x0000dbeb61eed19dll, 0x0000381b6d9bb29bll,
+ 0x0000da740da740dbll, 0x00003a93dc9864b2ll, 0x0000d901b2036407ll, 0x00003d0817ce9cd4ll,
+ 0x0000d79435e50d7all, 0x00003f782d7204d0ll, 0x0000d62b80d62b81ll, 0x000041e42b6ec0c0ll,
+ 0x0000d4c77b03531ell, 0x0000444c1f6b4c2dll, 0x0000d3680d3680d4ll, 0x000046b016ca47c1ll,
+ 0x0000d20d20d20d21ll, 0x000049101eac381cll, 0x0000d0b69fcbd259ll, 0x00004b6c43f1366all,
+ 0x0000cf6474a8819fll, 0x00004dc4933a9337ll, 0x0000ce168a772509ll, 0x0000501918ec6c11ll,
+ 0x0000cccccccccccdll, 0x00005269e12f346ell, 0x0000cb8727c065c4ll, 0x000054b6f7f1325all,
+ 0x0000ca4587e6b750ll, 0x0000570068e7ef5all, 0x0000c907da4e8712ll, 0x000059463f919deell,
+ 0x0000c7ce0c7ce0c8ll, 0x00005b8887367433ll, 0x0000c6980c6980c7ll, 0x00005dc74ae9fbecll,
+ 0x0000c565c87b5f9ell, 0x00006002958c5871ll, 0x0000c4372f855d83ll, 0x0000623a71cb82c8ll,
+ 0x0000c30c30c30c31ll, 0x0000646eea247c5cll, 0x0000c1e4bbd595f7ll, 0x000066a008e4788cll,
+ 0x0000c0c0c0c0c0c1ll, 0x000068cdd829fd81ll, 0x0000bfa02fe80bfbll, 0x00006af861e5fc7dll,
+ 0x0000be82fa0be830ll, 0x00006d1fafdce20all, 0x0000bd6910470767ll, 0x00006f43cba79e40ll,
+ 0x0000bc52640bc527ll, 0x00007164beb4a56dll, 0x0000bb3ee721a54ell, 0x000073829248e961ll,
+ 0x0000ba2e8ba2e8bbll, 0x0000759d4f80cba8ll, 0x0000b92143fa36f6ll, 0x000077b4ff5108d9ll,
+ 0x0000b81702e05c0cll, 0x000079c9aa879d53ll, 0x0000b70fbb5a19bfll, 0x00007bdb59cca388ll,
+ 0x0000b60b60b60b61ll, 0x00007dea15a32c1bll, 0x0000b509e68a9b95ll, 0x00007ff5e66a0ffell,
+ 0x0000b40b40b40b41ll, 0x000081fed45cbccbll, 0x0000b30f63528918ll, 0x00008404e793fb81ll,
+ 0x0000b21642c8590cll, 0x000086082806b1d5ll, 0x0000b11fd3b80b12ll, 0x000088089d8a9e47ll,
+ 0x0000b02c0b02c0b1ll, 0x00008a064fd50f2all, 0x0000af3addc680b0ll, 0x00008c01467b94bbll,
+ 0x0000ae4c415c9883ll, 0x00008df988f4ae80ll, 0x0000ad602b580ad7ll, 0x00008fef1e987409ll,
+ 0x0000ac7691840ac8ll, 0x000091e20ea1393ell, 0x0000ab8f69e2835all, 0x000093d2602c2e5fll,
+ 0x0000aaaaaaaaaaabll, 0x000095c01a39fbd6ll, 0x0000a9c84a47a080ll, 0x000097ab43af59f9ll,
+ 0x0000a8e83f5717c1ll, 0x00009993e355a4e5ll, 0x0000a80a80a80a81ll, 0x00009b79ffdb6c8bll,
+ 0x0000a72f0539782all, 0x00009d5d9fd5010bll, 0x0000a655c4392d7cll, 0x00009f3ec9bcfb80ll,
+ 0x0000a57eb50295fbll, 0x0000a11d83f4c355ll, 0x0000a4a9cf1d9684ll, 0x0000a2f9d4c51039ll,
+ 0x0000a3d70a3d70a4ll, 0x0000a4d3c25e68dcll, 0x0000a3065e3fae7dll, 0x0000a6ab52d99e76ll,
+ 0x0000a237c32b16d0ll, 0x0000a8808c384547ll, 0x0000a16b312ea8fdll, 0x0000aa5374652a1cll,
+ 0x0000a0a0a0a0a0a1ll, 0x0000ac241134c4e9ll, 0x00009fd809fd80a0ll, 0x0000adf26865a8a1ll,
+ 0x00009f1165e72549ll, 0x0000afbe7fa0f04dll, 0x00009e4cad23dd60ll, 0x0000b1885c7aa982ll,
+ 0x00009d89d89d89d9ll, 0x0000b35004723c46ll, 0x00009cc8e160c3fcll, 0x0000b5157cf2d078ll,
+ 0x00009c09c09c09c1ll, 0x0000b6d8cb53b0call, 0x00009b4c6f9ef03bll, 0x0000b899f4d8ab63ll,
+ 0x00009a90e7d95bc7ll, 0x0000ba58feb2703all, 0x000099d722dabde6ll, 0x0000bc15edfeed32ll,
+ 0x0000991f1a515886ll, 0x0000bdd0c7c9a817ll, 0x00009868c809868dll, 0x0000bf89910c1678ll,
+ 0x000097b425ed097cll, 0x0000c1404eadf383ll, 0x000097012e025c05ll, 0x0000c2f5058593d9ll,
+ 0x0000964fda6c0965ll, 0x0000c4a7ba58377cll, 0x000095a02568095bll, 0x0000c65871da59ddll,
+ 0x000094f2094f2095ll, 0x0000c80730b00016ll, 0x0000944580944581ll, 0x0000c9b3fb6d0559ll,
+ 0x0000939a85c4093all, 0x0000cb5ed69565afll, 0x000092f113840498ll, 0x0000cd07c69d8702ll,
+ 0x0000924924924925ll, 0x0000ceaecfea8085ll, 0x000091a2b3c4d5e7ll, 0x0000d053f6d26089ll,
+ 0x000090fdbc090fdcll, 0x0000d1f73f9c70c0ll, 0x0000905a38633e07ll, 0x0000d398ae817906ll,
+ 0x00008fb823ee08fcll, 0x0000d53847ac00a6ll, 0x00008f1779d9fdc4ll, 0x0000d6d60f388e41ll,
+ 0x00008e78356d1409ll, 0x0000d8720935e643ll, 0x00008dda5202376all, 0x0000da0c39a54804ll,
+ 0x00008d3dcb08d3ddll, 0x0000dba4a47aa996ll, 0x00008ca29c046515ll, 0x0000dd3b4d9cf24bll,
+ 0x00008c08c08c08c1ll, 0x0000ded038e633f3ll, 0x00008b70344a139cll, 0x0000e0636a23e2eell,
+ 0x00008ad8f2fba939ll, 0x0000e1f4e5170d02ll, 0x00008a42f870566all, 0x0000e384ad748f0ell,
+ 0x000089ae4089ae41ll, 0x0000e512c6e54998ll, 0x0000891ac73ae982ll, 0x0000e69f35065448ll,
+ 0x0000888888888889ll, 0x0000e829fb693044ll, 0x000087f78087f781ll, 0x0000e9b31d93f98ell,
+ 0x00008767ab5f34e5ll, 0x0000eb3a9f019750ll, 0x000086d905447a35ll, 0x0000ecc08321eb30ll,
+ 0x0000864b8a7de6d2ll, 0x0000ee44cd59ffabll, 0x000085bf37612cefll, 0x0000efc781043579ll,
+ 0x0000853408534086ll, 0x0000f148a170700all, 0x000084a9f9c8084bll, 0x0000f2c831e44116ll,
+ 0x0000842108421085ll, 0x0000f446359b1353ll, 0x0000839930523fbfll, 0x0000f5c2afc65447ll,
+ 0x000083126e978d50ll, 0x0000f73da38d9d4all, 0x0000828cbfbeb9a1ll, 0x0000f8b7140edbb1ll,
+ 0x0000820820820821ll, 0x0000fa2f045e7832ll, 0x000081848da8faf1ll, 0x0000fba577877d7dll,
+ 0x0000810204081021ll, 0x0000fd1a708bbe11ll, 0x0000808080808081ll, 0x0000fe8df263f957ll,
+ 0x0000800000000000ll, 0x0000ffff00000000ll,
+ };
+
+
+ // LL_tbl[k] = 2^48*log2(1.0+k/2^15);
+static int64_t __LL_tbl[256] = {
+ 0x0000000000000000ull, 0x00000002e2a60a00ull, 0x000000070cb64ec5ull, 0x00000009ef50ce67ull,
+ 0x0000000cd1e588fdull, 0x0000000fb4747e9cull, 0x0000001296fdaf5eull, 0x0000001579811b58ull,
+ 0x000000185bfec2a1ull, 0x0000001b3e76a552ull, 0x0000001e20e8c380ull, 0x0000002103551d43ull,
+ 0x00000023e5bbb2b2ull, 0x00000026c81c83e4ull, 0x00000029aa7790f0ull, 0x0000002c8cccd9edull,
+ 0x0000002f6f1c5ef2ull, 0x0000003251662017ull, 0x0000003533aa1d71ull, 0x0000003815e8571aull,
+ 0x0000003af820cd26ull, 0x0000003dda537faeull, 0x00000040bc806ec8ull, 0x000000439ea79a8cull,
+ 0x0000004680c90310ull, 0x0000004962e4a86cull, 0x0000004c44fa8ab6ull, 0x0000004f270aaa06ull,
+ 0x0000005209150672ull, 0x00000054eb19a013ull, 0x00000057cd1876fdull, 0x0000005aaf118b4aull,
+ 0x0000005d9104dd0full, 0x0000006072f26c64ull, 0x0000006354da3960ull, 0x0000006636bc441aull,
+ 0x0000006918988ca8ull, 0x0000006bfa6f1322ull, 0x0000006edc3fd79full, 0x00000071be0ada35ull,
+ 0x000000749fd01afdull, 0x00000077818f9a0cull, 0x0000007a6349577aull, 0x0000007d44fd535eull,
+ 0x0000008026ab8dceull, 0x00000083085406e3ull, 0x00000085e9f6beb2ull, 0x00000088cb93b552ull,
+ 0x0000008bad2aeadcull, 0x0000008e8ebc5f65ull, 0x0000009170481305ull, 0x0000009451ce05d3ull,
+ 0x00000097334e37e5ull, 0x0000009a14c8a953ull, 0x0000009cf63d5a33ull, 0x0000009fd7ac4a9dull,
+ 0x000000a2b07f3458ull, 0x000000a59a78ea6aull, 0x000000a87bd699fbull, 0x000000ab5d2e8970ull,
+ 0x000000ae3e80b8e3ull, 0x000000b11fcd2869ull, 0x000000b40113d818ull, 0x000000b6e254c80aull,
+ 0x000000b9c38ff853ull, 0x000000bca4c5690cull, 0x000000bf85f51a4aull, 0x000000c2671f0c26ull,
+ 0x000000c548433eb6ull, 0x000000c82961b211ull, 0x000000cb0a7a664dull, 0x000000cdeb8d5b82ull,
+ 0x000000d0cc9a91c8ull, 0x000000d3ada20933ull, 0x000000d68ea3c1ddull, 0x000000d96f9fbbdbull,
+ 0x000000dc5095f744ull, 0x000000df31867430ull, 0x000000e2127132b5ull, 0x000000e4f35632eaull,
+ 0x000000e7d43574e6ull, 0x000000eab50ef8c1ull, 0x000000ed95e2be90ull, 0x000000f076b0c66cull,
+ 0x000000f35779106aull, 0x000000f6383b9ca2ull, 0x000000f918f86b2aull, 0x000000fbf9af7c1aull,
+ 0x000000feda60cf88ull, 0x00000101bb0c658cull, 0x000001049bb23e3cull, 0x000001077c5259afull,
+ 0x0000010a5cecb7fcull, 0x0000010d3d81593aull, 0x000001101e103d7full, 0x00000112fe9964e4ull,
+ 0x00000115df1ccf7eull, 0x00000118bf9a7d64ull, 0x0000011ba0126eadull, 0x0000011e8084a371ull,
+ 0x0000012160f11bc6ull, 0x000001244157d7c3ull, 0x0000012721b8d77full, 0x0000012a02141b10ull,
+ 0x0000012ce269a28eull, 0x0000012fc2b96e0full, 0x00000132a3037daaull, 0x000001358347d177ull,
+ 0x000001386386698cull, 0x0000013b43bf45ffull, 0x0000013e23f266e9ull, 0x00000141041fcc5eull,
+ 0x00000143e4477678ull, 0x00000146c469654bull, 0x00000149a48598f0ull, 0x0000014c849c117cull,
+ 0x0000014f64accf08ull, 0x0000015244b7d1a9ull, 0x0000015524bd1976ull, 0x0000015804bca687ull,
+ 0x0000015ae4b678f2ull, 0x0000015dc4aa90ceull, 0x00000160a498ee31ull, 0x0000016384819134ull,
+ 0x00000166646479ecull, 0x000001694441a870ull, 0x0000016c24191cd7ull, 0x0000016df6ca19bdull,
+ 0x00000171e3b6d7aaull, 0x00000174c37d1e44ull, 0x00000177a33dab1cull, 0x0000017a82f87e49ull,
+ 0x0000017d62ad97e2ull, 0x00000180425cf7feull, 0x00000182b07f3458ull, 0x0000018601aa8c19ull,
+ 0x00000188e148c046ull, 0x0000018bc0e13b52ull, 0x0000018ea073fd52ull, 0x000001918001065dull,
+ 0x000001945f88568bull, 0x000001973f09edf2ull, 0x0000019a1e85ccaaull, 0x0000019cfdfbf2c8ull,
+ 0x0000019fdd6c6063ull, 0x000001a2bcd71593ull, 0x000001a59c3c126eull, 0x000001a87b9b570bull,
+ 0x000001ab5af4e380ull, 0x000001ae3a48b7e5ull, 0x000001b11996d450ull, 0x000001b3f8df38d9ull,
+ 0x000001b6d821e595ull, 0x000001b9b75eda9bull, 0x000001bc96961803ull, 0x000001bf75c79de3ull,
+ 0x000001c254f36c51ull, 0x000001c534198365ull, 0x000001c81339e336ull, 0x000001caf2548bd9ull,
+ 0x000001cdd1697d67ull, 0x000001d0b078b7f5ull, 0x000001d38f823b9aull, 0x000001d66e86086dull,
+ 0x000001d94d841e86ull, 0x000001dc2c7c7df9ull, 0x000001df0b6f26dfull, 0x000001e1ea5c194eull,
+ 0x000001e4c943555dull, 0x000001e7a824db23ull, 0x000001ea8700aab5ull, 0x000001ed65d6c42bull,
+ 0x000001f044a7279dull, 0x000001f32371d51full, 0x000001f60236cccaull, 0x000001f8e0f60eb3ull,
+ 0x000001fbbfaf9af3ull, 0x000001fe9e63719eull, 0x000002017d1192ccull, 0x000002045bb9fe94ull,
+ 0x000002073a5cb50dull, 0x00000209c06e6212ull, 0x0000020cf791026aull, 0x0000020fd622997cull,
+ 0x00000212b07f3458ull, 0x000002159334a8d8ull, 0x0000021871b52150ull, 0x0000021b502fe517ull,
+ 0x0000021d6a73a78full, 0x000002210d144eeeull, 0x00000223eb7df52cull, 0x00000226c9e1e713ull,
+ 0x00000229a84024bbull, 0x0000022c23679b4eull, 0x0000022f64eb83a8ull, 0x000002324338a51bull,
+ 0x00000235218012a9ull, 0x00000237ffc1cc69ull, 0x0000023a2c3b0ea4ull, 0x0000023d13ee805bull,
+ 0x0000024035e9221full, 0x00000243788faf25ull, 0x0000024656b4e735ull, 0x00000247ed646bfeull,
+ 0x0000024c12ee3d98ull, 0x0000024ef1025c1aull, 0x00000251cf10c799ull, 0x0000025492644d65ull,
+ 0x000002578b1c85eeull, 0x0000025a6919d8f0ull, 0x0000025d13ee805bull, 0x0000026025036716ull,
+ 0x0000026296453882ull, 0x00000265e0d62b53ull, 0x00000268beb701f3ull, 0x0000026b9c92265eull,
+ 0x0000026d32f798a9ull, 0x00000271583758ebull, 0x000002743601673bull, 0x0000027713c5c3b0ull,
+ 0x00000279f1846e5full, 0x0000027ccf3d6761ull, 0x0000027e6580aecbull, 0x000002828a9e44b3ull,
+ 0x0000028568462932ull, 0x00000287bdbf5255ull, 0x0000028b2384de4aull, 0x0000028d13ee805bull,
+ 0x0000029035e9221full, 0x0000029296453882ull, 0x0000029699bdfb61ull, 0x0000029902a37aabull,
+ 0x0000029c54b864c9ull, 0x0000029deabd1083ull, 0x000002a20f9c0bb5ull, 0x000002a4c7605d61ull,
+ 0x000002a7bdbf5255ull, 0x000002a96056dafcull, 0x000002ac3daf14efull, 0x000002af1b019ecaull,
+ 0x000002b296453882ull, 0x000002b5d022d80full, 0x000002b8fa471cb3ull, 0x000002ba9012e713ull,
+ 0x000002bd6d4901ccull, 0x000002c04a796cf6ull, 0x000002c327a428a6ull, 0x000002c61a5e8f4cull,
+ 0x000002c8e1e891f6ull, 0x000002cbbf023fc2ull, 0x000002ce9c163e6eull, 0x000002d179248e13ull,
+ 0x000002d4562d2ec6ull, 0x000002d73330209dull, 0x000002da102d63b0ull, 0x000002dced24f814ull,
+};
+
+
+
+
+#endif
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index a1ef53c04415..5b47736d27d9 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -20,7 +20,7 @@
#include <linux/crush/crush.h>
#include <linux/crush/hash.h>
-#include <linux/crush/mapper.h>
+#include "crush_ln_table.h"
/*
* Implement the core CRUSH mapping algorithm.
@@ -238,6 +238,102 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket,
return bucket->h.items[high];
}
+// compute 2^44*log2(input+1)
+uint64_t crush_ln(unsigned xin)
+{
+ unsigned x=xin, x1;
+ int iexpon, index1, index2;
+ uint64_t RH, LH, LL, xl64, result;
+
+ x++;
+
+ // normalize input
+ iexpon = 15;
+ while(!(x&0x18000)) { x<<=1; iexpon--; }
+
+ index1 = (x>>8)<<1;
+ // RH ~ 2^56/index1
+ RH = __RH_LH_tbl[index1 - 256];
+ // LH ~ 2^48 * log2(index1/256)
+ LH = __RH_LH_tbl[index1 + 1 - 256];
+
+ // RH*x ~ 2^48 * (2^15 + xf), xf<2^8
+ xl64 = (int64_t)x * RH;
+ xl64 >>= 48;
+ x1 = xl64;
+
+ result = iexpon;
+ result <<= (12 + 32);
+
+ index2 = x1 & 0xff;
+ // LL ~ 2^48*log2(1.0+index2/2^15)
+ LL = __LL_tbl[index2];
+
+ LH = LH + LL;
+
+ LH >>= (48-12 - 32);
+ result += LH;
+
+ return result;
+}
+
+
+/*
+ * straw2
+ *
+ * for reference, see:
+ *
+ * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
+ *
+ */
+
+static int bucket_straw2_choose(struct crush_bucket_straw2 *bucket,
+ int x, int r)
+{
+ unsigned i, high = 0;
+ unsigned u;
+ unsigned w;
+ __s64 ln, draw, high_draw = 0;
+
+ for (i = 0; i < bucket->h.size; i++) {
+ w = bucket->item_weights[i];
+ if (w) {
+ u = crush_hash32_3(bucket->h.hash, x,
+ bucket->h.items[i], r);
+ u &= 0xffff;
+
+ /*
+ * for some reason slightly less than 0x10000 produces
+ * a slightly more accurate distribution... probably a
+ * rounding effect.
+ *
+ * the natural log lookup table maps [0,0xffff]
+ * (corresponding to real numbers [1/0x10000, 1] to
+ * [0, 0xffffffffffff] (corresponding to real numbers
+ * [-11.090355,0]).
+ */
+ ln = crush_ln(u) - 0x1000000000000ll;
+
+ /*
+ * divide by 16.16 fixed-point weight. note
+ * that the ln value is negative, so a larger
+ * weight means a larger (less negative) value
+ * for draw.
+ */
+ draw = div64_s64(ln, w);
+ } else {
+ draw = S64_MIN;
+ }
+
+ if (i == 0 || draw > high_draw) {
+ high = i;
+ high_draw = draw;
+ }
+ }
+ return bucket->h.items[high];
+}
+
+
static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
{
dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
@@ -255,12 +351,16 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
case CRUSH_BUCKET_STRAW:
return bucket_straw_choose((struct crush_bucket_straw *)in,
x, r);
+ case CRUSH_BUCKET_STRAW2:
+ return bucket_straw2_choose((struct crush_bucket_straw2 *)in,
+ x, r);
default:
dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
return in->items[0];
}
}
+
/*
* true if device is marked "out" (failed, fully offloaded)
* of the cluster
@@ -290,6 +390,7 @@ static int is_out(const struct crush_map *map,
* @type: the type of item to choose
* @out: pointer to output vector
* @outpos: our position in that vector
+ * @out_size: size of the out vector
* @tries: number of attempts to make
* @recurse_tries: number of attempts to have recursive chooseleaf make
* @local_retries: localized retries
@@ -304,6 +405,7 @@ static int crush_choose_firstn(const struct crush_map *map,
const __u32 *weight, int weight_max,
int x, int numrep, int type,
int *out, int outpos,
+ int out_size,
unsigned int tries,
unsigned int recurse_tries,
unsigned int local_retries,
@@ -322,6 +424,7 @@ static int crush_choose_firstn(const struct crush_map *map,
int item = 0;
int itemtype;
int collide, reject;
+ int count = out_size;
dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n",
recurse_to_leaf ? "_LEAF" : "",
@@ -329,7 +432,7 @@ static int crush_choose_firstn(const struct crush_map *map,
tries, recurse_tries, local_retries, local_fallback_retries,
parent_r);
- for (rep = outpos; rep < numrep; rep++) {
+ for (rep = outpos; rep < numrep && count > 0 ; rep++) {
/* keep trying until we get a non-out, non-colliding item */
ftotal = 0;
skip_rep = 0;
@@ -403,7 +506,7 @@ static int crush_choose_firstn(const struct crush_map *map,
map->buckets[-1-item],
weight, weight_max,
x, outpos+1, 0,
- out2, outpos,
+ out2, outpos, count,
recurse_tries, 0,
local_retries,
local_fallback_retries,
@@ -463,6 +566,7 @@ reject:
dprintk("CHOOSE got %d\n", item);
out[outpos] = item;
outpos++;
+ count--;
}
dprintk("CHOOSE returns %d\n", outpos);
@@ -654,6 +758,7 @@ int crush_do_rule(const struct crush_map *map,
__u32 step;
int i, j;
int numrep;
+ int out_size;
/*
* the original choose_total_tries value was off by one (it
* counted "retries" and not "tries"). add one.
@@ -761,6 +866,7 @@ int crush_do_rule(const struct crush_map *map,
x, numrep,
curstep->arg2,
o+osize, j,
+ result_max-osize,
choose_tries,
recurse_tries,
choose_local_retries,
@@ -770,11 +876,13 @@ int crush_do_rule(const struct crush_map *map,
c+osize,
0);
} else {
+ out_size = ((numrep < (result_max-osize)) ?
+ numrep : (result_max-osize));
crush_choose_indep(
map,
map->buckets[-1-w[i]],
weight, weight_max,
- x, numrep, numrep,
+ x, out_size, numrep,
curstep->arg2,
o+osize, j,
choose_tries,
@@ -783,7 +891,7 @@ int crush_do_rule(const struct crush_map *map,
recurse_to_leaf,
c+osize,
0);
- osize += numrep;
+ osize += out_size;
}
}
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 14d9995097cc..593dc2eabcc8 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -22,6 +22,7 @@
* .../monmap - current monmap
* .../osdc - active osd requests
* .../monc - mon client state
+ * .../client_options - libceph-only (i.e. not rbd or cephfs) options
* .../dentry_lru - dump contents of dentry lru
* .../caps - expose cap (reservation) stats
* .../bdi - symlink to ../../bdi/something
@@ -177,10 +178,24 @@ static int osdc_show(struct seq_file *s, void *pp)
return 0;
}
+static int client_options_show(struct seq_file *s, void *p)
+{
+ struct ceph_client *client = s->private;
+ int ret;
+
+ ret = ceph_print_client_options(s, client);
+ if (ret)
+ return ret;
+
+ seq_putc(s, '\n');
+ return 0;
+}
+
CEPH_DEFINE_SHOW_FUNC(monmap_show)
CEPH_DEFINE_SHOW_FUNC(osdmap_show)
CEPH_DEFINE_SHOW_FUNC(monc_show)
CEPH_DEFINE_SHOW_FUNC(osdc_show)
+CEPH_DEFINE_SHOW_FUNC(client_options_show)
int ceph_debugfs_init(void)
{
@@ -242,6 +257,14 @@ int ceph_debugfs_client_init(struct ceph_client *client)
if (!client->debugfs_osdmap)
goto out;
+ client->debugfs_options = debugfs_create_file("client_options",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &client_options_show_fops);
+ if (!client->debugfs_options)
+ goto out;
+
return 0;
out:
@@ -252,6 +275,7 @@ out:
void ceph_debugfs_client_cleanup(struct ceph_client *client)
{
dout("ceph_debugfs_client_cleanup %p\n", client);
+ debugfs_remove(client->debugfs_options);
debugfs_remove(client->debugfs_osdmap);
debugfs_remove(client->debugfs_monmap);
debugfs_remove(client->osdc.debugfs_file);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index a9f4ae45b7fb..967080a9f043 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -505,8 +505,6 @@ static int ceph_tcp_connect(struct ceph_connection *con)
pr_err("connect %s error %d\n",
ceph_pr_addr(&con->peer_addr.in_addr), ret);
sock_release(sock);
- con->error_msg = "connect error";
-
return ret;
}
@@ -2145,12 +2143,10 @@ static int process_connect(struct ceph_connection *con)
* to WAIT. This shouldn't happen if we are the
* client.
*/
- pr_err("process_connect got WAIT as client\n");
con->error_msg = "protocol error, got WAIT as client";
return -1;
default:
- pr_err("connect protocol error, will retry\n");
con->error_msg = "protocol error, garbage tag during connect";
return -1;
}
@@ -2282,8 +2278,7 @@ static int read_partial_message(struct ceph_connection *con)
crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
if (cpu_to_le32(crc) != con->in_hdr.crc) {
- pr_err("read_partial_message bad hdr "
- " crc %u != expected %u\n",
+ pr_err("read_partial_message bad hdr crc %u != expected %u\n",
crc, con->in_hdr.crc);
return -EBADMSG;
}
@@ -2313,7 +2308,7 @@ static int read_partial_message(struct ceph_connection *con)
pr_err("read_partial_message bad seq %lld expected %lld\n",
seq, con->in_seq + 1);
con->error_msg = "bad message sequence # for incoming message";
- return -EBADMSG;
+ return -EBADE;
}
/* allocate message? */
@@ -2660,6 +2655,8 @@ more:
switch (ret) {
case -EBADMSG:
con->error_msg = "bad crc";
+ /* fall through */
+ case -EBADE:
ret = -EIO;
break;
case -EIO:
@@ -2838,7 +2835,8 @@ static void con_work(struct work_struct *work)
if (ret < 0) {
if (ret == -EAGAIN)
continue;
- con->error_msg = "socket error on read";
+ if (!con->error_msg)
+ con->error_msg = "socket error on read";
fault = true;
break;
}
@@ -2847,7 +2845,8 @@ static void con_work(struct work_struct *work)
if (ret < 0) {
if (ret == -EAGAIN)
continue;
- con->error_msg = "socket error on write";
+ if (!con->error_msg)
+ con->error_msg = "socket error on write";
fault = true;
}
@@ -2869,11 +2868,13 @@ static void con_work(struct work_struct *work)
*/
static void con_fault(struct ceph_connection *con)
{
- pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
- ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
dout("fault %p state %lu to peer %s\n",
con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
+ pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
+ ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
+ con->error_msg = NULL;
+
WARN_ON(con->state != CON_STATE_CONNECTING &&
con->state != CON_STATE_NEGOTIATING &&
con->state != CON_STATE_OPEN);
@@ -3295,8 +3296,8 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
*/
if (*skip)
return 0;
- con->error_msg = "error allocating memory for incoming message";
+ con->error_msg = "error allocating memory for incoming message";
return -ENOMEM;
}
memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index b8c3fde5b04f..15796696d64e 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -122,6 +122,22 @@ bad:
return -EINVAL;
}
+static int crush_decode_straw2_bucket(void **p, void *end,
+ struct crush_bucket_straw2 *b)
+{
+ int j;
+ dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
+ b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
+ if (b->item_weights == NULL)
+ return -ENOMEM;
+ ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
+ for (j = 0; j < b->h.size; j++)
+ b->item_weights[j] = ceph_decode_32(p);
+ return 0;
+bad:
+ return -EINVAL;
+}
+
static int skip_name_map(void **p, void *end)
{
int len;
@@ -204,6 +220,9 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
case CRUSH_BUCKET_STRAW:
size = sizeof(struct crush_bucket_straw);
break;
+ case CRUSH_BUCKET_STRAW2:
+ size = sizeof(struct crush_bucket_straw2);
+ break;
default:
err = -EINVAL;
goto bad;
@@ -261,6 +280,12 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
if (err < 0)
goto bad;
break;
+ case CRUSH_BUCKET_STRAW2:
+ err = crush_decode_straw2_bucket(p, end,
+ (struct crush_bucket_straw2 *)b);
+ if (err < 0)
+ goto bad;
+ break;
}
}
diff --git a/net/compat.c b/net/compat.c
index f7bd286a8280..5cfd26a0006f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -31,10 +31,10 @@
#include <asm/uaccess.h>
#include <net/compat.h>
-ssize_t get_compat_msghdr(struct msghdr *kmsg,
- struct compat_msghdr __user *umsg,
- struct sockaddr __user **save_addr,
- struct iovec **iov)
+int get_compat_msghdr(struct msghdr *kmsg,
+ struct compat_msghdr __user *umsg,
+ struct sockaddr __user **save_addr,
+ struct iovec **iov)
{
compat_uptr_t uaddr, uiov, tmp3;
compat_size_t nr_segs;
@@ -79,13 +79,11 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
if (nr_segs > UIO_MAXIOV)
return -EMSGSIZE;
- err = compat_rw_copy_check_uvector(save_addr ? READ : WRITE,
- compat_ptr(uiov), nr_segs,
- UIO_FASTIOV, *iov, iov);
- if (err >= 0)
- iov_iter_init(&kmsg->msg_iter, save_addr ? READ : WRITE,
- *iov, nr_segs, err);
- return err;
+ kmsg->msg_iocb = NULL;
+
+ return compat_import_iovec(save_addr ? READ : WRITE,
+ compat_ptr(uiov), nr_segs,
+ UIO_FASTIOV, iov, &kmsg->msg_iter);
}
/* Bleech... */
@@ -515,25 +513,25 @@ COMPAT_SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
struct compat_group_req {
__u32 gr_interface;
struct __kernel_sockaddr_storage gr_group
- __attribute__ ((aligned(4)));
+ __aligned(4);
} __packed;
struct compat_group_source_req {
__u32 gsr_interface;
struct __kernel_sockaddr_storage gsr_group
- __attribute__ ((aligned(4)));
+ __aligned(4);
struct __kernel_sockaddr_storage gsr_source
- __attribute__ ((aligned(4)));
+ __aligned(4);
} __packed;
struct compat_group_filter {
__u32 gf_interface;
struct __kernel_sockaddr_storage gf_group
- __attribute__ ((aligned(4)));
+ __aligned(4);
__u32 gf_fmode;
__u32 gf_numsrc;
struct __kernel_sockaddr_storage gf_slist[1]
- __attribute__ ((aligned(4)));
+ __aligned(4);
} __packed;
#define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \
diff --git a/net/core/datagram.c b/net/core/datagram.c
index df493d68330c..b80fb91bb3f7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -673,7 +673,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
if (!chunk)
return 0;
- if (iov_iter_count(&msg->msg_iter) < chunk) {
+ if (msg_data_left(msg) < chunk) {
if (__skb_checksum_complete(skb))
goto csum_error;
if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
diff --git a/net/core/dev.c b/net/core/dev.c
index 45109b70664e..2c1c67fad64d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -660,6 +660,27 @@ __setup("netdev=", netdev_boot_setup);
*******************************************************************************/
/**
+ * dev_get_iflink - get 'iflink' value of a interface
+ * @dev: targeted interface
+ *
+ * Indicates the ifindex the interface is linked to.
+ * Physical interfaces have the same 'ifindex' and 'iflink' values.
+ */
+
+int dev_get_iflink(const struct net_device *dev)
+{
+ if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
+ return dev->netdev_ops->ndo_get_iflink(dev);
+
+ /* If dev->rtnl_link_ops is set, it's a virtual interface. */
+ if (dev->rtnl_link_ops)
+ return 0;
+
+ return dev->ifindex;
+}
+EXPORT_SYMBOL(dev_get_iflink);
+
+/**
* __dev_get_by_name - find a device by its name
* @net: the applicable net namespace
* @name: name to find
@@ -1385,7 +1406,7 @@ static int __dev_close(struct net_device *dev)
return retval;
}
-static int dev_close_many(struct list_head *head)
+int dev_close_many(struct list_head *head, bool unlink)
{
struct net_device *dev, *tmp;
@@ -1399,11 +1420,13 @@ static int dev_close_many(struct list_head *head)
list_for_each_entry_safe(dev, tmp, head, close_list) {
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
call_netdevice_notifiers(NETDEV_DOWN, dev);
- list_del_init(&dev->close_list);
+ if (unlink)
+ list_del_init(&dev->close_list);
}
return 0;
}
+EXPORT_SYMBOL(dev_close_many);
/**
* dev_close - shutdown an interface.
@@ -1420,7 +1443,7 @@ int dev_close(struct net_device *dev)
LIST_HEAD(single);
list_add(&dev->close_list, &single);
- dev_close_many(&single);
+ dev_close_many(&single, true);
list_del(&single);
}
return 0;
@@ -1607,6 +1630,22 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
}
EXPORT_SYMBOL(call_netdevice_notifiers);
+#ifdef CONFIG_NET_CLS_ACT
+static struct static_key ingress_needed __read_mostly;
+
+void net_inc_ingress_queue(void)
+{
+ static_key_slow_inc(&ingress_needed);
+}
+EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
+
+void net_dec_ingress_queue(void)
+{
+ static_key_slow_dec(&ingress_needed);
+}
+EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
+#endif
+
static struct static_key netstamp_needed __read_mostly;
#ifdef HAVE_JUMP_LABEL
/* We are not allowed to call static_key_slow_dec() from irq context
@@ -1694,6 +1733,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
}
skb_scrub_packet(skb, true);
+ skb->priority = 0;
skb->protocol = eth_type_trans(skb, dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
@@ -1737,7 +1777,8 @@ static inline int deliver_skb(struct sk_buff *skb,
static inline void deliver_ptype_list_skb(struct sk_buff *skb,
struct packet_type **pt,
- struct net_device *dev, __be16 type,
+ struct net_device *orig_dev,
+ __be16 type,
struct list_head *ptype_list)
{
struct packet_type *ptype, *pt_prev = *pt;
@@ -1746,7 +1787,7 @@ static inline void deliver_ptype_list_skb(struct sk_buff *skb,
if (ptype->type != type)
continue;
if (pt_prev)
- deliver_skb(skb, pt_prev, dev);
+ deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
*pt = pt_prev;
@@ -2559,12 +2600,26 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
return features;
}
+netdev_features_t passthru_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ return features;
+}
+EXPORT_SYMBOL(passthru_features_check);
+
+static netdev_features_t dflt_features_check(const struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ return vlan_features_check(skb, features);
+}
+
netdev_features_t netif_skb_features(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
netdev_features_t features = dev->features;
u16 gso_segs = skb_shinfo(skb)->gso_segs;
- __be16 protocol = skb->protocol;
if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
features &= ~NETIF_F_GSO_MASK;
@@ -2576,34 +2631,17 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
if (skb->encapsulation)
features &= dev->hw_enc_features;
- if (!skb_vlan_tag_present(skb)) {
- if (unlikely(protocol == htons(ETH_P_8021Q) ||
- protocol == htons(ETH_P_8021AD))) {
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- protocol = veh->h_vlan_encapsulated_proto;
- } else {
- goto finalize;
- }
- }
-
- features = netdev_intersect_features(features,
- dev->vlan_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
-
- if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
+ if (skb_vlan_tagged(skb))
features = netdev_intersect_features(features,
- NETIF_F_SG |
- NETIF_F_HIGHDMA |
- NETIF_F_FRAGLIST |
- NETIF_F_GEN_CSUM |
+ dev->vlan_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
-finalize:
if (dev->netdev_ops->ndo_features_check)
features &= dev->netdev_ops->ndo_features_check(skb, dev,
features);
+ else
+ features &= dflt_features_check(skb, dev, features);
return harmonize_features(skb, features);
}
@@ -2675,7 +2713,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
if (unlikely(!skb))
goto out_null;
- if (netif_needs_gso(dev, skb, features)) {
+ if (netif_needs_gso(skb, features)) {
struct sk_buff *segs;
segs = skb_gso_segment(skb, features);
@@ -2857,7 +2895,7 @@ EXPORT_SYMBOL(xmit_recursion);
* dev_loopback_xmit - loop back @skb
* @skb: buffer to transmit
*/
-int dev_loopback_xmit(struct sk_buff *skb)
+int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
{
skb_reset_mac_header(skb);
__skb_pull(skb, skb_network_offset(skb));
@@ -2995,11 +3033,11 @@ out:
return rc;
}
-int dev_queue_xmit(struct sk_buff *skb)
+int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
{
return __dev_queue_xmit(skb, NULL);
}
-EXPORT_SYMBOL(dev_queue_xmit);
+EXPORT_SYMBOL(dev_queue_xmit_sk);
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
{
@@ -3041,7 +3079,7 @@ static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{
- if (next_cpu != RPS_NO_CPU) {
+ if (next_cpu < nr_cpu_ids) {
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
@@ -3146,7 +3184,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow
* table entry), switch if one of the following holds:
- * - Current CPU is unset (equal to RPS_NO_CPU).
+ * - Current CPU is unset (>= nr_cpu_ids).
* - Current CPU is offline.
* - The current CPU's queue tail has advanced beyond the
* last packet that was enqueued using this table entry.
@@ -3154,14 +3192,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* have been dequeued, thus preserving in order delivery.
*/
if (unlikely(tcpu != next_cpu) &&
- (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
+ (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
}
- if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
+ if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
goto done;
@@ -3202,14 +3240,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *rflow;
bool expire = true;
- int cpu;
+ unsigned int cpu;
rcu_read_lock();
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id];
cpu = ACCESS_ONCE(rflow->cpu);
- if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+ if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
((int)(per_cpu(softnet_data, cpu).input_queue_head -
rflow->last_qtail) <
(int)(10 * flow_table->mask)))
@@ -3525,7 +3563,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
- goto out;
+ return skb;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
@@ -3539,8 +3577,6 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
return NULL;
}
-out:
- skb->tc_verd = 0;
return skb;
}
#endif
@@ -3676,12 +3712,15 @@ another_round:
skip_taps:
#ifdef CONFIG_NET_CLS_ACT
- skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
- if (!skb)
- goto unlock;
+ if (static_key_false(&ingress_needed)) {
+ skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
+ if (!skb)
+ goto unlock;
+ }
+
+ skb->tc_verd = 0;
ncls:
#endif
-
if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
goto drop;
@@ -3831,13 +3870,13 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
* NET_RX_SUCCESS: no congestion
* NET_RX_DROP: packet was dropped
*/
-int netif_receive_skb(struct sk_buff *skb)
+int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
{
trace_netif_receive_skb_entry(skb);
return netif_receive_skb_internal(skb);
}
-EXPORT_SYMBOL(netif_receive_skb);
+EXPORT_SYMBOL(netif_receive_skb_sk);
/* Network device is going away, flush any packets still pending
* Called with irqs disabled.
@@ -5170,7 +5209,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
return -EBUSY;
- if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
+ if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
return -EEXIST;
if (master && netdev_master_upper_dev_get(dev))
@@ -5914,6 +5953,24 @@ int dev_get_phys_port_id(struct net_device *dev,
EXPORT_SYMBOL(dev_get_phys_port_id);
/**
+ * dev_get_phys_port_name - Get device physical port name
+ * @dev: device
+ * @name: port name
+ *
+ * Get device physical port name
+ */
+int dev_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!ops->ndo_get_phys_port_name)
+ return -EOPNOTSUPP;
+ return ops->ndo_get_phys_port_name(dev, name, len);
+}
+EXPORT_SYMBOL(dev_get_phys_port_name);
+
+/**
* dev_new_index - allocate an ifindex
* @net: the applicable net namespace
*
@@ -5970,7 +6027,7 @@ static void rollback_registered_many(struct list_head *head)
/* If device is running, close it first. */
list_for_each_entry(dev, head, unreg_list)
list_add_tail(&dev->close_list, &close_head);
- dev_close_many(&close_head);
+ dev_close_many(&close_head, true);
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
@@ -6297,8 +6354,6 @@ int register_netdevice(struct net_device *dev)
spin_lock_init(&dev->addr_list_lock);
netdev_set_addr_lockdep_class(dev);
- dev->iflink = -1;
-
ret = dev_get_valid_name(net, dev, dev->name);
if (ret < 0)
goto out;
@@ -6328,9 +6383,6 @@ int register_netdevice(struct net_device *dev)
else if (__dev_get_by_index(net, dev->ifindex))
goto err_uninit;
- if (dev->iflink == -1)
- dev->iflink = dev->ifindex;
-
/* Transfer changeable features to wanted_features and enable
* software offloads (GSO and GRO).
*/
@@ -6843,8 +6895,6 @@ void free_netdev(struct net_device *dev)
{
struct napi_struct *p, *n;
- release_net(dev_net(dev));
-
netif_free_tx_queues(dev);
#ifdef CONFIG_SYSFS
kvfree(dev->_rx);
@@ -7045,12 +7095,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
dev_net_set(dev, net);
/* If there is an ifindex conflict assign a new one */
- if (__dev_get_by_index(net, dev->ifindex)) {
- int iflink = (dev->iflink == dev->ifindex);
+ if (__dev_get_by_index(net, dev->ifindex))
dev->ifindex = dev_new_index(net);
- if (iflink)
- dev->iflink = dev->ifindex;
- }
/* Send a netdev-add uevent to the new namespace */
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index aa378ecef186..1d00b8922902 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -790,7 +790,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
if (ops->get_rxfh_indir_size)
dev_indir_size = ops->get_rxfh_indir_size(dev);
if (ops->get_rxfh_key_size)
- dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev);
+ dev_key_size = ops->get_rxfh_key_size(dev);
if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
return -EFAULT;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index e4fdc9dfb2c7..9a12668f7d62 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -31,7 +31,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
r->pref = pref;
r->table = table;
r->flags = flags;
- r->fr_net = hold_net(ops->fro_net);
+ r->fr_net = ops->fro_net;
r->suppress_prefixlen = -1;
r->suppress_ifgroup = -1;
@@ -116,7 +116,6 @@ static int __fib_rules_register(struct fib_rules_ops *ops)
if (ops->family == o->family)
goto errout;
- hold_net(net);
list_add_tail_rcu(&ops->list, &net->rules_ops);
err = 0;
errout:
@@ -160,15 +159,6 @@ static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
}
}
-static void fib_rules_put_rcu(struct rcu_head *head)
-{
- struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
- struct net *net = ops->fro_net;
-
- release_net(net);
- kfree(ops);
-}
-
void fib_rules_unregister(struct fib_rules_ops *ops)
{
struct net *net = ops->fro_net;
@@ -178,7 +168,7 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
spin_unlock(&net->rules_mod_lock);
fib_rules_cleanup_ops(ops);
- call_rcu(&ops->rcu, fib_rules_put_rcu);
+ kfree_rcu(ops, rcu);
}
EXPORT_SYMBOL_GPL(fib_rules_unregister);
@@ -303,7 +293,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
err = -ENOMEM;
goto errout;
}
- rule->fr_net = hold_net(net);
+ rule->fr_net = net;
if (tb[FRA_PRIORITY])
rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
@@ -423,7 +413,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
return 0;
errout_free:
- release_net(rule->fr_net);
kfree(rule);
errout:
rules_ops_put(ops);
@@ -492,6 +481,12 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
goto errout;
}
+ if (ops->delete) {
+ err = ops->delete(rule);
+ if (err)
+ goto errout;
+ }
+
list_del_rcu(&rule->list);
if (rule->action == FR_ACT_GOTO) {
@@ -517,8 +512,6 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
notify_rule_change(RTM_DELRULE, rule, ops, nlh,
NETLINK_CB(skb).portid);
- if (ops->delete)
- ops->delete(rule);
fib_rule_put(rule);
flush_route_cache(ops);
rules_ops_put(ops);
diff --git a/net/core/filter.c b/net/core/filter.c
index f6bdc2b1ba01..bf831a85c315 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -150,10 +150,62 @@ static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
return prandom_u32();
}
+static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
+ struct bpf_insn *insn_buf)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ switch (skb_field) {
+ case SKF_AD_MARK:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ offsetof(struct sk_buff, mark));
+ break;
+
+ case SKF_AD_PKTTYPE:
+ *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
+ *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
+#ifdef __BIG_ENDIAN_BITFIELD
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
+#endif
+ break;
+
+ case SKF_AD_QUEUE:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
+
+ *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ offsetof(struct sk_buff, queue_mapping));
+ break;
+
+ case SKF_AD_VLAN_TAG:
+ case SKF_AD_VLAN_TAG_PRESENT:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+ BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+
+ /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
+ *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ offsetof(struct sk_buff, vlan_tci));
+ if (skb_field == SKF_AD_VLAN_TAG) {
+ *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
+ ~VLAN_TAG_PRESENT);
+ } else {
+ /* dst_reg >>= 12 */
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
+ /* dst_reg &= 1 */
+ *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
+ }
+ break;
+ }
+
+ return insn - insn_buf;
+}
+
static bool convert_bpf_extensions(struct sock_filter *fp,
struct bpf_insn **insnp)
{
struct bpf_insn *insn = *insnp;
+ u32 cnt;
switch (fp->k) {
case SKF_AD_OFF + SKF_AD_PROTOCOL:
@@ -167,13 +219,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
break;
case SKF_AD_OFF + SKF_AD_PKTTYPE:
- *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
- PKT_TYPE_OFFSET());
- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
-#ifdef __BIG_ENDIAN_BITFIELD
- insn++;
- *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
-#endif
+ cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
+ insn += cnt - 1;
break;
case SKF_AD_OFF + SKF_AD_IFINDEX:
@@ -197,10 +244,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
break;
case SKF_AD_OFF + SKF_AD_MARK:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
-
- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
- offsetof(struct sk_buff, mark));
+ cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
+ insn += cnt - 1;
break;
case SKF_AD_OFF + SKF_AD_RXHASH:
@@ -211,29 +256,30 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
break;
case SKF_AD_OFF + SKF_AD_QUEUE:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
-
- *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
- offsetof(struct sk_buff, queue_mapping));
+ cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
+ insn += cnt - 1;
break;
case SKF_AD_OFF + SKF_AD_VLAN_TAG:
+ cnt = convert_skb_access(SKF_AD_VLAN_TAG,
+ BPF_REG_A, BPF_REG_CTX, insn);
+ insn += cnt - 1;
+ break;
+
case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+ cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
+ BPF_REG_A, BPF_REG_CTX, insn);
+ insn += cnt - 1;
+ break;
- /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
+ case SKF_AD_OFF + SKF_AD_VLAN_TPID:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
+
+ /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
- offsetof(struct sk_buff, vlan_tci));
- if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
- ~VLAN_TAG_PRESENT);
- } else {
- /* A >>= 12 */
- *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
- /* A &= 1 */
- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
- }
+ offsetof(struct sk_buff, vlan_proto));
+ /* A = ntohs(A) [emitting a nop or swap16] */
+ *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
break;
case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
@@ -814,7 +860,7 @@ static void bpf_release_orig_filter(struct bpf_prog *fp)
static void __bpf_prog_release(struct bpf_prog *prog)
{
- if (prog->aux->prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
+ if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
bpf_prog_put(prog);
} else {
bpf_release_orig_filter(prog);
@@ -1019,6 +1065,32 @@ void bpf_prog_destroy(struct bpf_prog *fp)
}
EXPORT_SYMBOL_GPL(bpf_prog_destroy);
+static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
+{
+ struct sk_filter *fp, *old_fp;
+
+ fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+ if (!fp)
+ return -ENOMEM;
+
+ fp->prog = prog;
+ atomic_set(&fp->refcnt, 0);
+
+ if (!sk_filter_charge(sk, fp)) {
+ kfree(fp);
+ return -ENOMEM;
+ }
+
+ old_fp = rcu_dereference_protected(sk->sk_filter,
+ sock_owned_by_user(sk));
+ rcu_assign_pointer(sk->sk_filter, fp);
+
+ if (old_fp)
+ sk_filter_uncharge(sk, old_fp);
+
+ return 0;
+}
+
/**
* sk_attach_filter - attach a socket filter
* @fprog: the filter program
@@ -1031,7 +1103,6 @@ EXPORT_SYMBOL_GPL(bpf_prog_destroy);
*/
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
- struct sk_filter *fp, *old_fp;
unsigned int fsize = bpf_classic_proglen(fprog);
unsigned int bpf_fsize = bpf_prog_size(fprog->len);
struct bpf_prog *prog;
@@ -1068,36 +1139,20 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (IS_ERR(prog))
return PTR_ERR(prog);
- fp = kmalloc(sizeof(*fp), GFP_KERNEL);
- if (!fp) {
+ err = __sk_attach_prog(prog, sk);
+ if (err < 0) {
__bpf_prog_release(prog);
- return -ENOMEM;
- }
- fp->prog = prog;
-
- atomic_set(&fp->refcnt, 0);
-
- if (!sk_filter_charge(sk, fp)) {
- __sk_filter_release(fp);
- return -ENOMEM;
+ return err;
}
- old_fp = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
- rcu_assign_pointer(sk->sk_filter, fp);
-
- if (old_fp)
- sk_filter_uncharge(sk, old_fp);
-
return 0;
}
EXPORT_SYMBOL_GPL(sk_attach_filter);
-#ifdef CONFIG_BPF_SYSCALL
int sk_attach_bpf(u32 ufd, struct sock *sk)
{
- struct sk_filter *fp, *old_fp;
struct bpf_prog *prog;
+ int err;
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return -EPERM;
@@ -1106,40 +1161,191 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
if (IS_ERR(prog))
return PTR_ERR(prog);
- if (prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) {
- /* valid fd, but invalid program type */
+ if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
bpf_prog_put(prog);
return -EINVAL;
}
- fp = kmalloc(sizeof(*fp), GFP_KERNEL);
- if (!fp) {
+ err = __sk_attach_prog(prog, sk);
+ if (err < 0) {
bpf_prog_put(prog);
- return -ENOMEM;
+ return err;
}
- fp->prog = prog;
- atomic_set(&fp->refcnt, 0);
+ return 0;
+}
- if (!sk_filter_charge(sk, fp)) {
- __sk_filter_release(fp);
- return -ENOMEM;
+/**
+ * bpf_skb_clone_not_writable - is the header of a clone not writable
+ * @skb: buffer to check
+ * @len: length up to which to write, can be negative
+ *
+ * Returns true if modifying the header part of the cloned buffer
+ * does require the data to be copied. I.e. this version works with
+ * negative lengths needed for eBPF case!
+ */
+static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len)
+{
+ return skb_header_cloned(skb) ||
+ (int) skb_headroom(skb) + len > skb->hdr_len;
+}
+
+#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1)
+
+static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ int offset = (int) r2;
+ void *from = (void *) (long) r3;
+ unsigned int len = (unsigned int) r4;
+ char buf[16];
+ void *ptr;
+
+ /* bpf verifier guarantees that:
+ * 'from' pointer points to bpf program stack
+ * 'len' bytes of it were initialized
+ * 'len' > 0
+ * 'skb' is a valid pointer to 'struct sk_buff'
+ *
+ * so check for invalid 'offset' and too large 'len'
+ */
+ if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
+ return -EFAULT;
+
+ offset -= skb->data - skb_mac_header(skb);
+ if (unlikely(skb_cloned(skb) &&
+ bpf_skb_clone_unwritable(skb, offset + len)))
+ return -EFAULT;
+
+ ptr = skb_header_pointer(skb, offset, len, buf);
+ if (unlikely(!ptr))
+ return -EFAULT;
+
+ if (BPF_RECOMPUTE_CSUM(flags))
+ skb_postpull_rcsum(skb, ptr, len);
+
+ memcpy(ptr, from, len);
+
+ if (ptr == buf)
+ /* skb_store_bits cannot return -EFAULT here */
+ skb_store_bits(skb, offset, ptr, len);
+
+ if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0));
+ return 0;
+}
+
+const struct bpf_func_proto bpf_skb_store_bytes_proto = {
+ .func = bpf_skb_store_bytes,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_STACK,
+ .arg4_type = ARG_CONST_STACK_SIZE,
+ .arg5_type = ARG_ANYTHING,
+};
+
+#define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f)
+#define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10)
+
+static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ int offset = (int) r2;
+ __sum16 sum, *ptr;
+
+ if (unlikely((u32) offset > 0xffff))
+ return -EFAULT;
+
+ offset -= skb->data - skb_mac_header(skb);
+ if (unlikely(skb_cloned(skb) &&
+ bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
+ return -EFAULT;
+
+ ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+ if (unlikely(!ptr))
+ return -EFAULT;
+
+ switch (BPF_HEADER_FIELD_SIZE(flags)) {
+ case 2:
+ csum_replace2(ptr, from, to);
+ break;
+ case 4:
+ csum_replace4(ptr, from, to);
+ break;
+ default:
+ return -EINVAL;
}
- old_fp = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
- rcu_assign_pointer(sk->sk_filter, fp);
+ if (ptr == &sum)
+ /* skb_store_bits guaranteed to not return -EFAULT here */
+ skb_store_bits(skb, offset, ptr, sizeof(sum));
- if (old_fp)
- sk_filter_uncharge(sk, old_fp);
+ return 0;
+}
+
+const struct bpf_func_proto bpf_l3_csum_replace_proto = {
+ .func = bpf_l3_csum_replace,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+ int offset = (int) r2;
+ __sum16 sum, *ptr;
+
+ if (unlikely((u32) offset > 0xffff))
+ return -EFAULT;
+
+ offset -= skb->data - skb_mac_header(skb);
+ if (unlikely(skb_cloned(skb) &&
+ bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
+ return -EFAULT;
+
+ ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+ if (unlikely(!ptr))
+ return -EFAULT;
+
+ switch (BPF_HEADER_FIELD_SIZE(flags)) {
+ case 2:
+ inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
+ break;
+ case 4:
+ inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ptr == &sum)
+ /* skb_store_bits guaranteed to not return -EFAULT here */
+ skb_store_bits(skb, offset, ptr, sizeof(sum));
return 0;
}
-/* allow socket filters to call
- * bpf_map_lookup_elem(), bpf_map_update_elem(), bpf_map_delete_elem()
- */
-static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func_id)
+const struct bpf_func_proto bpf_l4_csum_replace_proto = {
+ .func = bpf_l4_csum_replace,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *
+sk_filter_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
@@ -1148,39 +1354,144 @@ static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func
return &bpf_map_update_elem_proto;
case BPF_FUNC_map_delete_elem:
return &bpf_map_delete_elem_proto;
+ case BPF_FUNC_get_prandom_u32:
+ return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_get_smp_processor_id:
+ return &bpf_get_smp_processor_id_proto;
default:
return NULL;
}
}
-static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type)
+static const struct bpf_func_proto *
+tc_cls_act_func_proto(enum bpf_func_id func_id)
{
- /* skb fields cannot be accessed yet */
- return false;
+ switch (func_id) {
+ case BPF_FUNC_skb_store_bytes:
+ return &bpf_skb_store_bytes_proto;
+ case BPF_FUNC_l3_csum_replace:
+ return &bpf_l3_csum_replace_proto;
+ case BPF_FUNC_l4_csum_replace:
+ return &bpf_l4_csum_replace_proto;
+ default:
+ return sk_filter_func_proto(func_id);
+ }
}
-static struct bpf_verifier_ops sock_filter_ops = {
- .get_func_proto = sock_filter_func_proto,
- .is_valid_access = sock_filter_is_valid_access,
+static bool sk_filter_is_valid_access(int off, int size,
+ enum bpf_access_type type)
+{
+ /* only read is allowed */
+ if (type != BPF_READ)
+ return false;
+
+ /* check bounds */
+ if (off < 0 || off >= sizeof(struct __sk_buff))
+ return false;
+
+ /* disallow misaligned access */
+ if (off % size != 0)
+ return false;
+
+ /* all __sk_buff fields are __u32 */
+ if (size != 4)
+ return false;
+
+ return true;
+}
+
+static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
+ struct bpf_insn *insn_buf)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ switch (ctx_off) {
+ case offsetof(struct __sk_buff, len):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ offsetof(struct sk_buff, len));
+ break;
+
+ case offsetof(struct __sk_buff, protocol):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
+
+ *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ offsetof(struct sk_buff, protocol));
+ break;
+
+ case offsetof(struct __sk_buff, vlan_proto):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
+
+ *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ offsetof(struct sk_buff, vlan_proto));
+ break;
+
+ case offsetof(struct __sk_buff, priority):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ offsetof(struct sk_buff, priority));
+ break;
+
+ case offsetof(struct __sk_buff, mark):
+ return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn);
+
+ case offsetof(struct __sk_buff, pkt_type):
+ return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
+
+ case offsetof(struct __sk_buff, queue_mapping):
+ return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
+
+ case offsetof(struct __sk_buff, vlan_present):
+ return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
+ dst_reg, src_reg, insn);
+
+ case offsetof(struct __sk_buff, vlan_tci):
+ return convert_skb_access(SKF_AD_VLAN_TAG,
+ dst_reg, src_reg, insn);
+ }
+
+ return insn - insn_buf;
+}
+
+static const struct bpf_verifier_ops sk_filter_ops = {
+ .get_func_proto = sk_filter_func_proto,
+ .is_valid_access = sk_filter_is_valid_access,
+ .convert_ctx_access = sk_filter_convert_ctx_access,
+};
+
+static const struct bpf_verifier_ops tc_cls_act_ops = {
+ .get_func_proto = tc_cls_act_func_proto,
+ .is_valid_access = sk_filter_is_valid_access,
+ .convert_ctx_access = sk_filter_convert_ctx_access,
};
-static struct bpf_prog_type_list tl = {
- .ops = &sock_filter_ops,
+static struct bpf_prog_type_list sk_filter_type __read_mostly = {
+ .ops = &sk_filter_ops,
.type = BPF_PROG_TYPE_SOCKET_FILTER,
};
-static int __init register_sock_filter_ops(void)
+static struct bpf_prog_type_list sched_cls_type __read_mostly = {
+ .ops = &tc_cls_act_ops,
+ .type = BPF_PROG_TYPE_SCHED_CLS,
+};
+
+static struct bpf_prog_type_list sched_act_type __read_mostly = {
+ .ops = &tc_cls_act_ops,
+ .type = BPF_PROG_TYPE_SCHED_ACT,
+};
+
+static int __init register_sk_filter_ops(void)
{
- bpf_register_prog_type(&tl);
+ bpf_register_prog_type(&sk_filter_type);
+ bpf_register_prog_type(&sched_cls_type);
+ bpf_register_prog_type(&sched_act_type);
+
return 0;
}
-late_initcall(register_sock_filter_ops);
-#else
-int sk_attach_bpf(u32 ufd, struct sock *sk)
-{
- return -EOPNOTSUPP;
-}
-#endif
+late_initcall(register_sk_filter_ops);
+
int sk_detach_filter(struct sock *sk)
{
int ret = -ENOENT;
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 49a9e3e06c08..982861607f88 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -40,7 +40,7 @@ static DEFINE_SPINLOCK(lweventlist_lock);
static unsigned char default_operstate(const struct net_device *dev)
{
if (!netif_carrier_ok(dev))
- return (dev->ifindex != dev->iflink ?
+ return (dev->ifindex != dev_get_iflink(dev) ?
IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
if (netif_dormant(dev))
@@ -89,7 +89,7 @@ static bool linkwatch_urgent_event(struct net_device *dev)
if (!netif_running(dev))
return false;
- if (dev->ifindex != dev->iflink)
+ if (dev->ifindex != dev_get_iflink(dev))
return true;
if (dev->priv_flags & IFF_TEAM_PORT)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 70fe9e10ac86..3de654256028 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -397,25 +397,15 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct net_device *dev)
{
struct neighbour *n;
- int key_len = tbl->key_len;
- u32 hash_val;
- struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, lookups);
rcu_read_lock_bh();
- nht = rcu_dereference_bh(tbl->nht);
- hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
-
- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference_bh(n->next)) {
- if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
- if (!atomic_inc_not_zero(&n->refcnt))
- n = NULL;
- NEIGH_CACHE_STAT_INC(tbl, hits);
- break;
- }
+ n = __neigh_lookup_noref(tbl, pkey, dev);
+ if (n) {
+ if (!atomic_inc_not_zero(&n->refcnt))
+ n = NULL;
+ NEIGH_CACHE_STAT_INC(tbl, hits);
}
rcu_read_unlock_bh();
@@ -601,7 +591,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
if (!n)
goto out;
- write_pnet(&n->net, hold_net(net));
+ write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
n->dev = dev;
if (dev)
@@ -610,7 +600,6 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
if (tbl->pconstructor && tbl->pconstructor(n)) {
if (dev)
dev_put(dev);
- release_net(net);
kfree(n);
n = NULL;
goto out;
@@ -644,7 +633,6 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
tbl->pdestructor(n);
if (n->dev)
dev_put(n->dev);
- release_net(pneigh_net(n));
kfree(n);
return 0;
}
@@ -667,7 +655,6 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
tbl->pdestructor(n);
if (n->dev)
dev_put(n->dev);
- release_net(pneigh_net(n));
kfree(n);
continue;
}
@@ -830,10 +817,9 @@ out:
static __inline__ int neigh_max_probes(struct neighbour *n)
{
struct neigh_parms *p = n->parms;
- int max_probes = NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES);
- if (!(n->nud_state & NUD_PROBE))
- max_probes += NEIGH_VAR(p, MCAST_PROBES);
- return max_probes;
+ return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
+ (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
+ NEIGH_VAR(p, MCAST_PROBES));
}
static void neigh_invalidate(struct neighbour *neigh)
@@ -1263,10 +1249,10 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl,
EXPORT_SYMBOL(neigh_event_ns);
/* called with read_lock_bh(&n->lock); */
-static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
+static void neigh_hh_init(struct neighbour *n)
{
- struct net_device *dev = dst->dev;
- __be16 prot = dst->ops->protocol;
+ struct net_device *dev = n->dev;
+ __be16 prot = n->tbl->protocol;
struct hh_cache *hh = &n->hh;
write_lock_bh(&n->lock);
@@ -1280,43 +1266,19 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
write_unlock_bh(&n->lock);
}
-/* This function can be used in contexts, where only old dev_queue_xmit
- * worked, f.e. if you want to override normal output path (eql, shaper),
- * but resolution is not made yet.
- */
-
-int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
-
- __skb_pull(skb, skb_network_offset(skb));
-
- if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
- skb->len) < 0 &&
- dev_rebuild_header(skb))
- return 0;
-
- return dev_queue_xmit(skb);
-}
-EXPORT_SYMBOL(neigh_compat_output);
-
/* Slow and careful. */
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
int rc = 0;
- if (!dst)
- goto discard;
-
if (!neigh_event_send(neigh, skb)) {
int err;
struct net_device *dev = neigh->dev;
unsigned int seq;
if (dev->header_ops->cache && !neigh->hh.hh_len)
- neigh_hh_init(neigh, dst);
+ neigh_hh_init(neigh);
do {
__skb_pull(skb, skb_network_offset(skb));
@@ -1332,8 +1294,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
}
out:
return rc;
-discard:
- neigh_dbg(1, "%s: dst=%p neigh=%p\n", __func__, dst, neigh);
out_kfree_skb:
rc = -EINVAL;
kfree_skb(skb);
@@ -1464,11 +1424,10 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
dev_hold(dev);
p->dev = dev;
- write_pnet(&p->net, hold_net(net));
+ write_pnet(&p->net, net);
p->sysctl_table = NULL;
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
- release_net(net);
dev_put(dev);
kfree(p);
return NULL;
@@ -1508,7 +1467,6 @@ EXPORT_SYMBOL(neigh_parms_release);
static void neigh_parms_destroy(struct neigh_parms *parms)
{
- release_net(neigh_parms_net(parms));
kfree(parms);
}
@@ -1783,6 +1741,8 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
NEIGH_VAR(parms, UCAST_PROBES)) ||
nla_put_u32(skb, NDTPA_MCAST_PROBES,
NEIGH_VAR(parms, MCAST_PROBES)) ||
+ nla_put_u32(skb, NDTPA_MCAST_REPROBES,
+ NEIGH_VAR(parms, MCAST_REPROBES)) ||
nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
@@ -1942,6 +1902,7 @@ static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
[NDTPA_APP_PROBES] = { .type = NLA_U32 },
[NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
[NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
+ [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
[NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
[NDTPA_GC_STALETIME] = { .type = NLA_U64 },
[NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
@@ -2042,6 +2003,10 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
NEIGH_VAR_SET(p, MCAST_PROBES,
nla_get_u32(tbp[i]));
break;
+ case NDTPA_MCAST_REPROBES:
+ NEIGH_VAR_SET(p, MCAST_REPROBES,
+ nla_get_u32(tbp[i]));
+ break;
case NDTPA_BASE_REACHABLE_TIME:
NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
nla_get_msecs(tbp[i]));
@@ -2427,6 +2392,40 @@ void __neigh_for_each_release(struct neigh_table *tbl,
}
EXPORT_SYMBOL(__neigh_for_each_release);
+int neigh_xmit(int index, struct net_device *dev,
+ const void *addr, struct sk_buff *skb)
+{
+ int err = -EAFNOSUPPORT;
+ if (likely(index < NEIGH_NR_TABLES)) {
+ struct neigh_table *tbl;
+ struct neighbour *neigh;
+
+ tbl = neigh_tables[index];
+ if (!tbl)
+ goto out;
+ neigh = __neigh_lookup_noref(tbl, addr, dev);
+ if (!neigh)
+ neigh = __neigh_create(tbl, addr, dev, false);
+ err = PTR_ERR(neigh);
+ if (IS_ERR(neigh))
+ goto out_kfree_skb;
+ err = neigh->output(neigh, skb);
+ }
+ else if (index == NEIGH_LINK_TABLE) {
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ addr, NULL, skb->len);
+ if (err < 0)
+ goto out_kfree_skb;
+ err = dev_queue_xmit(skb);
+ }
+out:
+ return err;
+out_kfree_skb:
+ kfree_skb(skb);
+ goto out;
+}
+EXPORT_SYMBOL(neigh_xmit);
+
#ifdef CONFIG_PROC_FS
static struct neighbour *neigh_get_first(struct seq_file *seq)
@@ -2994,6 +2993,7 @@ static struct neigh_sysctl_table {
NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f2aa73bfb0e4..4238d6da5c60 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -23,6 +23,7 @@
#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include "net-sysfs.h"
@@ -108,11 +109,19 @@ NETDEVICE_SHOW_RO(dev_id, fmt_hex);
NETDEVICE_SHOW_RO(dev_port, fmt_dec);
NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
NETDEVICE_SHOW_RO(addr_len, fmt_dec);
-NETDEVICE_SHOW_RO(iflink, fmt_dec);
NETDEVICE_SHOW_RO(ifindex, fmt_dec);
NETDEVICE_SHOW_RO(type, fmt_dec);
NETDEVICE_SHOW_RO(link_mode, fmt_dec);
+static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+
+ return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
+}
+static DEVICE_ATTR_RO(iflink);
+
static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
{
return sprintf(buf, fmt_dec, dev->name_assign_type);
@@ -417,6 +426,28 @@ static ssize_t phys_port_id_show(struct device *dev,
}
static DEVICE_ATTR_RO(phys_port_id);
+static ssize_t phys_port_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ ssize_t ret = -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (dev_isalive(netdev)) {
+ char name[IFNAMSIZ];
+
+ ret = dev_get_phys_port_name(netdev, name, sizeof(name));
+ if (!ret)
+ ret = sprintf(buf, "%s\n", name);
+ }
+ rtnl_unlock();
+
+ return ret;
+}
+static DEVICE_ATTR_RO(phys_port_name);
+
static ssize_t phys_switch_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -464,6 +495,7 @@ static struct attribute *net_class_attrs[] = {
&dev_attr_tx_queue_len.attr,
&dev_attr_gro_flush_timeout.attr,
&dev_attr_phys_port_id.attr,
+ &dev_attr_phys_port_name.attr,
&dev_attr_phys_switch_id.attr,
NULL,
};
@@ -950,6 +982,60 @@ static ssize_t show_trans_timeout(struct netdev_queue *queue,
return sprintf(buf, "%lu", trans_timeout);
}
+#ifdef CONFIG_XPS
+static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+{
+ struct net_device *dev = queue->dev;
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ if (queue == &dev->_tx[i])
+ break;
+
+ BUG_ON(i >= dev->num_tx_queues);
+
+ return i;
+}
+
+static ssize_t show_tx_maxrate(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", queue->tx_maxrate);
+}
+
+static ssize_t set_tx_maxrate(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute,
+ const char *buf, size_t len)
+{
+ struct net_device *dev = queue->dev;
+ int err, index = get_netdev_queue_index(queue);
+ u32 rate = 0;
+
+ err = kstrtou32(buf, 10, &rate);
+ if (err < 0)
+ return err;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ err = -EOPNOTSUPP;
+ if (dev->netdev_ops->ndo_set_tx_maxrate)
+ err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
+
+ rtnl_unlock();
+ if (!err) {
+ queue->tx_maxrate = rate;
+ return len;
+ }
+ return err;
+}
+
+static struct netdev_queue_attribute queue_tx_maxrate =
+ __ATTR(tx_maxrate, S_IRUGO | S_IWUSR,
+ show_tx_maxrate, set_tx_maxrate);
+#endif
+
static struct netdev_queue_attribute queue_trans_timeout =
__ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
@@ -1064,18 +1150,6 @@ static struct attribute_group dql_group = {
#endif /* CONFIG_BQL */
#ifdef CONFIG_XPS
-static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
-{
- struct net_device *dev = queue->dev;
- unsigned int i;
-
- i = queue - dev->_tx;
- BUG_ON(i >= dev->num_tx_queues);
-
- return i;
-}
-
-
static ssize_t show_xps_map(struct netdev_queue *queue,
struct netdev_queue_attribute *attribute, char *buf)
{
@@ -1152,6 +1226,7 @@ static struct attribute *netdev_queue_default_attrs[] = {
&queue_trans_timeout.attr,
#ifdef CONFIG_XPS
&xps_cpus_attribute.attr,
+ &queue_tx_maxrate.attr,
#endif
NULL
};
@@ -1374,6 +1449,30 @@ static struct class net_class = {
.namespace = net_namespace,
};
+#ifdef CONFIG_OF_NET
+static int of_dev_node_match(struct device *dev, const void *data)
+{
+ int ret = 0;
+
+ if (dev->parent)
+ ret = dev->parent->of_node == data;
+
+ return ret == 0 ? dev->of_node == data : ret;
+}
+
+struct net_device *of_find_net_device_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
+ if (!dev)
+ return NULL;
+
+ return to_net_dev(dev);
+}
+EXPORT_SYMBOL(of_find_net_device_by_node);
+#endif
+
/* Delete sysfs entries but hold kobject reference until after all
* netdev references are gone.
*/
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 70d3450588b2..572af0011997 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -16,7 +16,6 @@
#include <linux/export.h>
#include <linux/user_namespace.h>
#include <linux/net_namespace.h>
-#include <linux/rtnetlink.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
@@ -148,9 +147,11 @@ static void ops_free_list(const struct pernet_operations *ops,
}
}
+static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
+ int id);
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
- int min = 0, max = 0;
+ int min = 0, max = 0, id;
ASSERT_RTNL();
@@ -159,7 +160,11 @@ static int alloc_netid(struct net *net, struct net *peer, int reqid)
max = reqid + 1;
}
- return idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
+ id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
+ if (id >= 0)
+ rtnl_net_notifyid(net, peer, RTM_NEWNSID, id);
+
+ return id;
}
/* This function is used by idr_for_each(). If net is equal to peer, the
@@ -238,10 +243,6 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
net->user_ns = user_ns;
idr_init(&net->netns_ids);
-#ifdef NETNS_REFCNT_DEBUG
- atomic_set(&net->use_count, 0);
-#endif
-
list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net);
if (error < 0)
@@ -296,13 +297,6 @@ out_free:
static void net_free(struct net *net)
{
-#ifdef NETNS_REFCNT_DEBUG
- if (unlikely(atomic_read(&net->use_count) != 0)) {
- pr_emerg("network namespace not free! Usage: %d\n",
- atomic_read(&net->use_count));
- return;
- }
-#endif
kfree(rcu_access_pointer(net->gen));
kmem_cache_free(net_cachep, net);
}
@@ -370,8 +364,10 @@ static void cleanup_net(struct work_struct *work)
for_each_net(tmp) {
int id = __peernet2id(tmp, net, false);
- if (id >= 0)
+ if (id >= 0) {
+ rtnl_net_notifyid(tmp, net, RTM_DELNSID, id);
idr_remove(&tmp->netns_ids, id);
+ }
}
idr_destroy(&net->netns_ids);
@@ -542,7 +538,8 @@ static int rtnl_net_get_size(void)
}
static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
- int cmd, struct net *net, struct net *peer)
+ int cmd, struct net *net, struct net *peer,
+ int nsid)
{
struct nlmsghdr *nlh;
struct rtgenmsg *rth;
@@ -557,9 +554,13 @@ static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
rth = nlmsg_data(nlh);
rth->rtgen_family = AF_UNSPEC;
- id = __peernet2id(net, peer, false);
- if (id < 0)
- id = NETNSA_NSID_NOT_ASSIGNED;
+ if (nsid >= 0) {
+ id = nsid;
+ } else {
+ id = __peernet2id(net, peer, false);
+ if (id < 0)
+ id = NETNSA_NSID_NOT_ASSIGNED;
+ }
if (nla_put_s32(skb, NETNSA_NSID, id))
goto nla_put_failure;
@@ -576,8 +577,8 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
struct sk_buff *msg;
- int err = -ENOBUFS;
struct net *peer;
+ int err;
err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy);
@@ -600,7 +601,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
}
err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
- RTM_GETNSID, net, peer);
+ RTM_NEWNSID, net, peer, -1);
if (err < 0)
goto err_out;
@@ -614,6 +615,75 @@ out:
return err;
}
+struct rtnl_net_dump_cb {
+ struct net *net;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+ int s_idx;
+};
+
+static int rtnl_net_dumpid_one(int id, void *peer, void *data)
+{
+ struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
+ int ret;
+
+ if (net_cb->idx < net_cb->s_idx)
+ goto cont;
+
+ ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
+ net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWNSID, net_cb->net, peer, id);
+ if (ret < 0)
+ return ret;
+
+cont:
+ net_cb->idx++;
+ return 0;
+}
+
+static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct rtnl_net_dump_cb net_cb = {
+ .net = net,
+ .skb = skb,
+ .cb = cb,
+ .idx = 0,
+ .s_idx = cb->args[0],
+ };
+
+ ASSERT_RTNL();
+
+ idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
+
+ cb->args[0] = net_cb.idx;
+ return skb->len;
+}
+
+static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
+ int id)
+{
+ struct sk_buff *msg;
+ int err = -ENOMEM;
+
+ msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
+ if (!msg)
+ goto out;
+
+ err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id);
+ if (err < 0)
+ goto err_out;
+
+ rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
+ return;
+
+err_out:
+ nlmsg_free(msg);
+out:
+ rtnl_set_sk_err(net, RTNLGRP_NSID, err);
+}
+
static int __init net_ns_init(void)
{
struct net_generic *ng;
@@ -648,7 +718,8 @@ static int __init net_ns_init(void)
register_pernet_subsys(&net_ns_ops);
rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
- rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, NULL, NULL);
+ rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
+ NULL);
return 0;
}
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 04db318e6218..87b22c0bc08c 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -58,14 +58,14 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
return -ENOMEM;
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
- rwlock_init(&queue->syn_wait_lock);
+ spin_lock_init(&queue->syn_wait_lock);
queue->rskq_accept_head = NULL;
lopt->nr_table_entries = nr_table_entries;
lopt->max_qlen_log = ilog2(nr_table_entries);
- write_lock_bh(&queue->syn_wait_lock);
+ spin_lock_bh(&queue->syn_wait_lock);
queue->listen_opt = lopt;
- write_unlock_bh(&queue->syn_wait_lock);
+ spin_unlock_bh(&queue->syn_wait_lock);
return 0;
}
@@ -81,10 +81,10 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk(
{
struct listen_sock *lopt;
- write_lock_bh(&queue->syn_wait_lock);
+ spin_lock_bh(&queue->syn_wait_lock);
lopt = queue->listen_opt;
queue->listen_opt = NULL;
- write_unlock_bh(&queue->syn_wait_lock);
+ spin_unlock_bh(&queue->syn_wait_lock);
return lopt;
}
@@ -94,21 +94,26 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
/* make all the listen_opt local to us */
struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
- if (lopt->qlen != 0) {
+ if (listen_sock_qlen(lopt) != 0) {
unsigned int i;
for (i = 0; i < lopt->nr_table_entries; i++) {
struct request_sock *req;
+ spin_lock_bh(&queue->syn_wait_lock);
while ((req = lopt->syn_table[i]) != NULL) {
lopt->syn_table[i] = req->dl_next;
- lopt->qlen--;
- reqsk_free(req);
+ atomic_inc(&lopt->qlen_dec);
+ if (del_timer(&req->rsk_timer))
+ reqsk_put(req);
+ reqsk_put(req);
}
+ spin_unlock_bh(&queue->syn_wait_lock);
}
}
- WARN_ON(lopt->qlen != 0);
+ if (WARN_ON(listen_sock_qlen(lopt) != 0))
+ pr_err("qlen %u\n", listen_sock_qlen(lopt));
kvfree(lopt);
}
@@ -153,24 +158,22 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
* case might also exist in tcp_v4_hnd_req() that will trigger this locking
* order.
*
- * When a TFO req is created, it needs to sock_hold its listener to prevent
- * the latter data structure from going away.
- *
- * This function also sets "treq->listener" to NULL and unreference listener
- * socket. treq->listener is used by the listener so it is protected by the
+ * This function also sets "treq->tfo_listener" to false.
+ * treq->tfo_listener is used by the listener so it is protected by the
* fastopenq->lock in this function.
*/
void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
bool reset)
{
- struct sock *lsk = tcp_rsk(req)->listener;
- struct fastopen_queue *fastopenq =
- inet_csk(lsk)->icsk_accept_queue.fastopenq;
+ struct sock *lsk = req->rsk_listener;
+ struct fastopen_queue *fastopenq;
+
+ fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq;
tcp_sk(sk)->fastopen_rsk = NULL;
spin_lock_bh(&fastopenq->lock);
fastopenq->qlen--;
- tcp_rsk(req)->listener = NULL;
+ tcp_rsk(req)->tfo_listener = false;
if (req->sk) /* the child socket hasn't been accepted yet */
goto out;
@@ -179,8 +182,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
* special RST handling below.
*/
spin_unlock_bh(&fastopenq->lock);
- sock_put(lsk);
- reqsk_free(req);
+ reqsk_put(req);
return;
}
/* Wait for 60secs before removing a req that has triggered RST.
@@ -190,7 +192,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
*
* For more details see CoNext'11 "TCP Fast Open" paper.
*/
- req->expires = jiffies + 60*HZ;
+ req->rsk_timer.expires = jiffies + 60*HZ;
if (fastopenq->rskq_rst_head == NULL)
fastopenq->rskq_rst_head = req;
else
@@ -201,5 +203,4 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
fastopenq->qlen++;
out:
spin_unlock_bh(&fastopenq->lock);
- sock_put(lsk);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 7ebed55b5f7d..666e0928ba40 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -818,7 +818,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
nla_total_size(sizeof(struct ifla_vf_vlan)) +
nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
nla_total_size(sizeof(struct ifla_vf_rate)) +
- nla_total_size(sizeof(struct ifla_vf_link_state)));
+ nla_total_size(sizeof(struct ifla_vf_link_state)) +
+ nla_total_size(sizeof(struct ifla_vf_rss_query_en)));
return size;
} else
return 0;
@@ -982,6 +983,24 @@ static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
+{
+ char name[IFNAMSIZ];
+ int err;
+
+ err = dev_get_phys_port_name(dev, name, sizeof(name));
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
{
int err;
@@ -1037,8 +1056,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
#ifdef CONFIG_RPS
nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
#endif
- (dev->ifindex != dev->iflink &&
- nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+ (dev->ifindex != dev_get_iflink(dev) &&
+ nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
(upper_dev &&
nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
@@ -1072,6 +1091,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
if (rtnl_phys_port_id_fill(skb, dev))
goto nla_put_failure;
+ if (rtnl_phys_port_name_fill(skb, dev))
+ goto nla_put_failure;
+
if (rtnl_phys_switch_id_fill(skb, dev))
goto nla_put_failure;
@@ -1111,14 +1133,16 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct ifla_vf_tx_rate vf_tx_rate;
struct ifla_vf_spoofchk vf_spoofchk;
struct ifla_vf_link_state vf_linkstate;
+ struct ifla_vf_rss_query_en vf_rss_query_en;
/*
* Not all SR-IOV capable drivers support the
- * spoofcheck query. Preset to -1 so the user
- * space tool can detect that the driver didn't
- * report anything.
+ * spoofcheck and "RSS query enable" query. Preset to
+ * -1 so the user space tool can detect that the driver
+ * didn't report anything.
*/
ivi.spoofchk = -1;
+ ivi.rss_query_en = -1;
memset(ivi.mac, 0, sizeof(ivi.mac));
/* The default value for VF link state is "auto"
* IFLA_VF_LINK_STATE_AUTO which equals zero
@@ -1131,7 +1155,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
vf_rate.vf =
vf_tx_rate.vf =
vf_spoofchk.vf =
- vf_linkstate.vf = ivi.vf;
+ vf_linkstate.vf =
+ vf_rss_query_en.vf = ivi.vf;
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
@@ -1141,6 +1166,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
vf_rate.max_tx_rate = ivi.max_tx_rate;
vf_spoofchk.setting = ivi.spoofchk;
vf_linkstate.link_state = ivi.linkstate;
+ vf_rss_query_en.setting = ivi.rss_query_en;
vf = nla_nest_start(skb, IFLA_VF_INFO);
if (!vf) {
nla_nest_cancel(skb, vfinfo);
@@ -1155,7 +1181,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
&vf_spoofchk) ||
nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
- &vf_linkstate))
+ &vf_linkstate) ||
+ nla_put(skb, IFLA_VF_RSS_QUERY_EN,
+ sizeof(vf_rss_query_en),
+ &vf_rss_query_en))
goto nla_put_failure;
nla_nest_end(skb, vf);
}
@@ -1269,6 +1298,7 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
[IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
[IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
+ [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
};
static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@ -1479,6 +1509,17 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
ivl->link_state);
break;
}
+ case IFLA_VF_RSS_QUERY_EN: {
+ struct ifla_vf_rss_query_en *ivrssq_en;
+
+ ivrssq_en = nla_data(vf);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_rss_query_en)
+ err = ops->ndo_set_vf_rss_query_en(dev,
+ ivrssq_en->vf,
+ ivrssq_en->setting);
+ break;
+ }
default:
err = -EINVAL;
break;
@@ -1815,6 +1856,42 @@ errout:
return err;
}
+static int rtnl_group_dellink(const struct net *net, int group)
+{
+ struct net_device *dev, *aux;
+ LIST_HEAD(list_kill);
+ bool found = false;
+
+ if (!group)
+ return -EPERM;
+
+ for_each_netdev(net, dev) {
+ if (dev->group == group) {
+ const struct rtnl_link_ops *ops;
+
+ found = true;
+ ops = dev->rtnl_link_ops;
+ if (!ops || !ops->dellink)
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ for_each_netdev_safe(net, dev, aux) {
+ if (dev->group == group) {
+ const struct rtnl_link_ops *ops;
+
+ ops = dev->rtnl_link_ops;
+ ops->dellink(dev, &list_kill);
+ }
+ }
+ unregister_netdevice_many(&list_kill);
+
+ return 0;
+}
+
static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
@@ -1838,6 +1915,8 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
+ else if (tb[IFLA_GROUP])
+ return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
else
return -EINVAL;
@@ -1873,7 +1952,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
EXPORT_SYMBOL(rtnl_configure_link);
struct net_device *rtnl_create_link(struct net *net,
- char *ifname, unsigned char name_assign_type,
+ const char *ifname, unsigned char name_assign_type,
const struct rtnl_link_ops *ops, struct nlattr *tb[])
{
int err;
@@ -2345,7 +2424,7 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
struct net_device *dev,
- u8 *addr, u32 pid, u32 seq,
+ u8 *addr, u16 vid, u32 pid, u32 seq,
int type, unsigned int flags,
int nlflags)
{
@@ -2367,6 +2446,9 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
goto nla_put_failure;
+ if (vid)
+ if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
+ goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
@@ -2381,7 +2463,7 @@ static inline size_t rtnl_fdb_nlmsg_size(void)
return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
}
-static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
+static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
@@ -2391,7 +2473,8 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
if (!skb)
goto errout;
- err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
+ err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
+ 0, 0, type, NTF_SELF, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
@@ -2526,7 +2609,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
nlh->nlmsg_flags);
if (!err) {
- rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
+ rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH);
ndm->ndm_flags &= ~NTF_SELF;
}
}
@@ -2627,7 +2710,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
if (!err) {
- rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
+ rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH);
ndm->ndm_flags &= ~NTF_SELF;
}
}
@@ -2652,7 +2735,7 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
if (*idx < cb->args[0])
goto skip;
- err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
+ err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
portid, seq,
RTM_NEWNEIGH, NTF_SELF,
NLM_F_MULTI);
@@ -2695,7 +2778,6 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net_device *dev;
struct nlattr *tb[IFLA_MAX+1];
- struct net_device *bdev = NULL;
struct net_device *br_dev = NULL;
const struct net_device_ops *ops = NULL;
const struct net_device_ops *cops = NULL;
@@ -2719,7 +2801,6 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
return -ENODEV;
ops = br_dev->netdev_ops;
- bdev = br_dev;
}
for_each_netdev(net, dev) {
@@ -2732,7 +2813,6 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
cops = br_dev->netdev_ops;
}
- bdev = dev;
} else {
if (dev != br_dev &&
!(dev->priv_flags & IFF_BRIDGE_PORT))
@@ -2742,7 +2822,6 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
!(dev->priv_flags & IFF_EBRIDGE))
continue;
- bdev = br_dev;
cops = ops;
}
@@ -2775,7 +2854,7 @@ static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u16 mode,
- u32 flags, u32 mask)
+ u32 flags, u32 mask, int nlflags)
{
struct nlmsghdr *nlh;
struct ifinfomsg *ifm;
@@ -2784,7 +2863,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
- nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
if (nlh == NULL)
return -EMSGSIZE;
@@ -2804,8 +2883,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
- (dev->ifindex != dev->iflink &&
- nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+ (dev->ifindex != dev_get_iflink(dev) &&
+ nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
goto nla_put_failure;
br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -2890,7 +2969,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
if (idx >= cb->args[0] &&
br_dev->netdev_ops->ndo_bridge_getlink(
- skb, portid, seq, dev, filter_mask) < 0)
+ skb, portid, seq, dev, filter_mask,
+ NLM_F_MULTI) < 0)
break;
idx++;
}
@@ -2898,7 +2978,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
if (ops->ndo_bridge_getlink) {
if (idx >= cb->args[0] &&
ops->ndo_bridge_getlink(skb, portid, seq, dev,
- filter_mask) < 0)
+ filter_mask,
+ NLM_F_MULTI) < 0)
break;
idx++;
}
@@ -2939,7 +3020,7 @@ static int rtnl_bridge_notify(struct net_device *dev)
goto errout;
}
- err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
+ err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
if (err < 0)
goto errout;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e4ac97c8477..3cfff2a3d651 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -280,13 +280,14 @@ nodata:
EXPORT_SYMBOL(__alloc_skb);
/**
- * build_skb - build a network buffer
+ * __build_skb - build a network buffer
* @data: data buffer provided by caller
- * @frag_size: size of fragment, or 0 if head was kmalloced
+ * @frag_size: size of data, or 0 if head was kmalloced
*
* Allocate a new &sk_buff. Caller provides space holding head and
* skb_shared_info. @data must have been allocated by kmalloc() only if
- * @frag_size is 0, otherwise data should come from the page allocator.
+ * @frag_size is 0, otherwise data should come from the page allocator
+ * or vmalloc()
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
-struct sk_buff *build_skb(void *data, unsigned int frag_size)
+struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct skb_shared_info *shinfo;
struct sk_buff *skb;
@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = SKB_TRUESIZE(size);
- skb->head_frag = frag_size != 0;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
return skb;
}
+
+/* build_skb() is wrapper over __build_skb(), that specifically
+ * takes care of skb->head and skb->pfmemalloc
+ * This means that if @frag_size is not zero, then @data must be backed
+ * by a page fragment, not kmalloc() or vmalloc()
+ */
+struct sk_buff *build_skb(void *data, unsigned int frag_size)
+{
+ struct sk_buff *skb = __build_skb(data, frag_size);
+
+ if (skb && frag_size) {
+ skb->head_frag = 1;
+ if (virt_to_head_page(data)->pfmemalloc)
+ skb->pfmemalloc = 1;
+ }
+ return skb;
+}
EXPORT_SYMBOL(build_skb);
struct netdev_alloc_cache {
@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
gfp_t gfp = gfp_mask;
if (order) {
- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
+ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
+ __GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
nc->frag.size = PAGE_SIZE << (page ? order : 0);
}
@@ -2865,7 +2883,6 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
* @from: search offset
* @to: search limit
* @config: textsearch configuration
- * @state: uninitialized textsearch state variable
*
* Finds a pattern in the skb data according to the specified
* textsearch configuration. Use textsearch_next() to retrieve
@@ -2873,17 +2890,17 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
* to the first occurrence or UINT_MAX if no match was found.
*/
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
- unsigned int to, struct ts_config *config,
- struct ts_state *state)
+ unsigned int to, struct ts_config *config)
{
+ struct ts_state state;
unsigned int ret;
config->get_next_block = skb_ts_get_next_block;
config->finish = skb_ts_finish;
- skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
+ skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
- ret = textsearch_find(config, state);
+ ret = textsearch_find(config, &state);
return (ret <= to - from ? ret : UINT_MAX);
}
EXPORT_SYMBOL(skb_find_text);
@@ -3207,10 +3224,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
unsigned int offset = skb_gro_offset(skb);
unsigned int headlen = skb_headlen(skb);
- struct sk_buff *nskb, *lp, *p = *head;
unsigned int len = skb_gro_len(skb);
+ struct sk_buff *lp, *p = *head;
unsigned int delta_truesize;
- unsigned int headroom;
if (unlikely(p->len + len >= 65536))
return -E2BIG;
@@ -3277,48 +3293,6 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
goto done;
}
- /* switch back to head shinfo */
- pinfo = skb_shinfo(p);
-
- if (pinfo->frag_list)
- goto merge;
- if (skb_gro_len(p) != pinfo->gso_size)
- return -E2BIG;
-
- headroom = skb_headroom(p);
- nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
- if (unlikely(!nskb))
- return -ENOMEM;
-
- __copy_skb_header(nskb, p);
- nskb->mac_len = p->mac_len;
-
- skb_reserve(nskb, headroom);
- __skb_put(nskb, skb_gro_offset(p));
-
- skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
- skb_set_network_header(nskb, skb_network_offset(p));
- skb_set_transport_header(nskb, skb_transport_offset(p));
-
- __skb_pull(p, skb_gro_offset(p));
- memcpy(skb_mac_header(nskb), skb_mac_header(p),
- p->data - skb_mac_header(p));
-
- skb_shinfo(nskb)->frag_list = p;
- skb_shinfo(nskb)->gso_size = pinfo->gso_size;
- pinfo->gso_size = 0;
- __skb_header_release(p);
- NAPI_GRO_CB(nskb)->last = p;
-
- nskb->data_len += p->len;
- nskb->truesize += p->truesize;
- nskb->len += p->len;
-
- *head = nskb;
- nskb->next = p->next;
- p->next = NULL;
-
- p = nskb;
merge:
delta_truesize = skb->truesize;
@@ -3796,7 +3770,6 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
}
EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
-
/**
* skb_partial_csum_set - set up and verify partial csum values for packet
* @skb: the skb to set
@@ -4169,19 +4142,21 @@ EXPORT_SYMBOL(skb_try_coalesce);
*/
void skb_scrub_packet(struct sk_buff *skb, bool xnet)
{
- if (xnet)
- skb_orphan(skb);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
skb->ignore_df = 0;
skb_dst_drop(skb);
- skb->mark = 0;
skb_sender_cpu_clear(skb);
- skb_init_secmark(skb);
secpath_reset(skb);
nf_reset(skb);
nf_reset_trace(skb);
+
+ if (!xnet)
+ return;
+
+ skb_orphan(skb);
+ skb->mark = 0;
}
EXPORT_SYMBOL_GPL(skb_scrub_packet);
diff --git a/net/core/sock.c b/net/core/sock.c
index 71e3e5f1eaa0..292f42228bfb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb_dst_force(skb);
spin_lock_irqsave(&list->lock, flags);
- skb->dropcount = atomic_read(&sk->sk_drops);
+ sock_skb_set_dropcount(sk, skb);
__skb_queue_tail(list, skb);
spin_unlock_irqrestore(&list->lock, flags);
@@ -947,8 +947,6 @@ set_rcvbuf:
sk->sk_mark = val;
break;
- /* We implement the SO_SNDLOWAT etc to
- not be settable (1003.1g 5.3) */
case SO_RXQ_OVFL:
sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
break;
@@ -1253,6 +1251,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
default:
+ /* We implement the SO_SNDLOWAT etc to not be settable
+ * (1003.1g 7).
+ */
return -ENOPROTOOPT;
}
@@ -1474,7 +1475,6 @@ void sk_release_kernel(struct sock *sk)
sock_hold(sk);
sock_release(sk->sk_socket);
- release_net(sock_net(sk));
sock_net_set(sk, get_net(&init_net));
sock_put(sk);
}
@@ -1557,6 +1557,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_err = 0;
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
+ atomic64_set(&newsk->sk_cookie, 0);
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)
@@ -1684,19 +1685,6 @@ void sock_efree(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_efree);
-#ifdef CONFIG_INET
-void sock_edemux(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
-
- if (sk->sk_state == TCP_TIME_WAIT)
- inet_twsk_put(inet_twsk(sk));
- else
- sock_put(sk);
-}
-EXPORT_SYMBOL(sock_edemux);
-#endif
-
kuid_t sock_i_uid(struct sock *sk)
{
kuid_t uid;
@@ -2186,15 +2174,14 @@ int sock_no_getsockopt(struct socket *sock, int level, int optname,
}
EXPORT_SYMBOL(sock_no_getsockopt);
-int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
- size_t len)
+int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
{
return -EOPNOTSUPP;
}
EXPORT_SYMBOL(sock_no_sendmsg);
-int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
- size_t len, int flags)
+int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
+ int flags)
{
return -EOPNOTSUPP;
}
@@ -2566,14 +2553,14 @@ int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
EXPORT_SYMBOL(compat_sock_common_getsockopt);
#endif
-int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
int addr_len = 0;
int err;
- err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
flags & ~MSG_DONTWAIT, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
@@ -2750,6 +2737,42 @@ static inline void release_proto_idx(struct proto *prot)
}
#endif
+static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
+{
+ if (!rsk_prot)
+ return;
+ kfree(rsk_prot->slab_name);
+ rsk_prot->slab_name = NULL;
+ if (rsk_prot->slab) {
+ kmem_cache_destroy(rsk_prot->slab);
+ rsk_prot->slab = NULL;
+ }
+}
+
+static int req_prot_init(const struct proto *prot)
+{
+ struct request_sock_ops *rsk_prot = prot->rsk_prot;
+
+ if (!rsk_prot)
+ return 0;
+
+ rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
+ prot->name);
+ if (!rsk_prot->slab_name)
+ return -ENOMEM;
+
+ rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
+ rsk_prot->obj_size, 0,
+ 0, NULL);
+
+ if (!rsk_prot->slab) {
+ pr_crit("%s: Can't create request sock SLAB cache!\n",
+ prot->name);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
int proto_register(struct proto *prot, int alloc_slab)
{
if (alloc_slab) {
@@ -2763,21 +2786,8 @@ int proto_register(struct proto *prot, int alloc_slab)
goto out;
}
- if (prot->rsk_prot != NULL) {
- prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
- if (prot->rsk_prot->slab_name == NULL)
- goto out_free_sock_slab;
-
- prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
- prot->rsk_prot->obj_size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
-
- if (prot->rsk_prot->slab == NULL) {
- pr_crit("%s: Can't create request sock SLAB cache!\n",
- prot->name);
- goto out_free_request_sock_slab_name;
- }
- }
+ if (req_prot_init(prot))
+ goto out_free_request_sock_slab;
if (prot->twsk_prot != NULL) {
prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
@@ -2789,8 +2799,7 @@ int proto_register(struct proto *prot, int alloc_slab)
kmem_cache_create(prot->twsk_prot->twsk_slab_name,
prot->twsk_prot->twsk_obj_size,
0,
- SLAB_HWCACHE_ALIGN |
- prot->slab_flags,
+ prot->slab_flags,
NULL);
if (prot->twsk_prot->twsk_slab == NULL)
goto out_free_timewait_sock_slab_name;
@@ -2806,14 +2815,8 @@ int proto_register(struct proto *prot, int alloc_slab)
out_free_timewait_sock_slab_name:
kfree(prot->twsk_prot->twsk_slab_name);
out_free_request_sock_slab:
- if (prot->rsk_prot && prot->rsk_prot->slab) {
- kmem_cache_destroy(prot->rsk_prot->slab);
- prot->rsk_prot->slab = NULL;
- }
-out_free_request_sock_slab_name:
- if (prot->rsk_prot)
- kfree(prot->rsk_prot->slab_name);
-out_free_sock_slab:
+ req_prot_cleanup(prot->rsk_prot);
+
kmem_cache_destroy(prot->slab);
prot->slab = NULL;
out:
@@ -2833,11 +2836,7 @@ void proto_unregister(struct proto *prot)
prot->slab = NULL;
}
- if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
- kmem_cache_destroy(prot->rsk_prot->slab);
- kfree(prot->rsk_prot->slab_name);
- prot->rsk_prot->slab = NULL;
- }
+ req_prot_cleanup(prot->rsk_prot);
if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
kmem_cache_destroy(prot->twsk_prot->twsk_slab);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index ad704c757bb4..74dddf84adcd 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -13,22 +13,39 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
static DEFINE_MUTEX(sock_diag_table_mutex);
-int sock_diag_check_cookie(void *sk, __u32 *cookie)
+static u64 sock_gen_cookie(struct sock *sk)
{
- if ((cookie[0] != INET_DIAG_NOCOOKIE ||
- cookie[1] != INET_DIAG_NOCOOKIE) &&
- ((u32)(unsigned long)sk != cookie[0] ||
- (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
- return -ESTALE;
- else
+ while (1) {
+ u64 res = atomic64_read(&sk->sk_cookie);
+
+ if (res)
+ return res;
+ res = atomic64_inc_return(&sock_net(sk)->cookie_gen);
+ atomic64_cmpxchg(&sk->sk_cookie, 0, res);
+ }
+}
+
+int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie)
+{
+ u64 res;
+
+ if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE)
return 0;
+
+ res = sock_gen_cookie(sk);
+ if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1])
+ return -ESTALE;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
-void sock_diag_save_cookie(void *sk, __u32 *cookie)
+void sock_diag_save_cookie(struct sock *sk, __u32 *cookie)
{
- cookie[0] = (u32)(unsigned long)sk;
- cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+ u64 res = sock_gen_cookie(sk);
+
+ cookie[0] = (u32)res;
+ cookie[1] = (u32)(res >> 32);
}
EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 8ce351ffceb1..95b6139d710c 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -24,7 +24,6 @@
static int zero = 0;
static int one = 1;
-static int ushort_max = USHRT_MAX;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
@@ -403,7 +402,6 @@ static struct ctl_table netns_core_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.extra1 = &zero,
- .extra2 = &ushort_max,
.proc_handler = proc_dointvec_minmax
},
{ }
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 93ea80196f0e..5b21f6f88e97 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -177,6 +177,8 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
[DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
[DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
[DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
+ [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
+ [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
};
static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
@@ -1030,7 +1032,7 @@ nla_put_failure:
return err;
}
-/* Handle IEEE 802.1Qaz GET commands. */
+/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
{
struct nlattr *ieee, *app;
@@ -1067,6 +1069,32 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
}
}
+ if (ops->ieee_getqcn) {
+ struct ieee_qcn qcn;
+
+ memset(&qcn, 0, sizeof(qcn));
+ err = ops->ieee_getqcn(netdev, &qcn);
+ if (!err) {
+ err = nla_put(skb, DCB_ATTR_IEEE_QCN,
+ sizeof(qcn), &qcn);
+ if (err)
+ return -EMSGSIZE;
+ }
+ }
+
+ if (ops->ieee_getqcnstats) {
+ struct ieee_qcn_stats qcn_stats;
+
+ memset(&qcn_stats, 0, sizeof(qcn_stats));
+ err = ops->ieee_getqcnstats(netdev, &qcn_stats);
+ if (!err) {
+ err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
+ sizeof(qcn_stats), &qcn_stats);
+ if (err)
+ return -EMSGSIZE;
+ }
+ }
+
if (ops->ieee_getpfc) {
struct ieee_pfc pfc;
memset(&pfc, 0, sizeof(pfc));
@@ -1379,8 +1407,9 @@ int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
}
EXPORT_SYMBOL(dcbnl_cee_notify);
-/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
- * be completed the entire msg is aborted and error value is returned.
+/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
+ * If any requested operation can not be completed
+ * the entire msg is aborted and error value is returned.
* No attempt is made to reconcile the case where only part of the
* cmd can be completed.
*/
@@ -1417,6 +1446,15 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
goto err;
}
+ if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
+ struct ieee_qcn *qcn =
+ nla_data(ieee[DCB_ATTR_IEEE_QCN]);
+
+ err = ops->ieee_setqcn(netdev, qcn);
+ if (err)
+ goto err;
+ }
+
if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
err = ops->ieee_setpfc(netdev, pfc);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index e4c144fa706f..bebc735f5afc 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -280,8 +280,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct request_sock **prev);
+ struct request_sock *req);
int dccp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
@@ -310,16 +309,15 @@ int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
#endif
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t size);
-int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len, int nonblock, int flags,
- int *addr_len);
+int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ int flags, int *addr_len);
void dccp_shutdown(struct sock *sk, int how);
int inet_dccp_listen(struct socket *sock, int backlog);
unsigned int dccp_poll(struct file *file, struct socket *sock,
poll_table *wait);
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+void dccp_req_err(struct sock *sk, u64 seq);
struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb);
int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 028fc43aacbd..5a45f8de5d99 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -49,13 +49,14 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
}
static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r, struct nlattr *bc)
{
inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
}
-static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req)
+static int dccp_diag_dump_one(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req)
{
return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index e45b968613a4..ccf4c5629b3c 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -89,10 +89,9 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (inet->inet_saddr == 0)
inet->inet_saddr = fl4->saddr;
- inet->inet_rcv_saddr = inet->inet_saddr;
-
+ sk_rcv_saddr_set(sk, inet->inet_saddr);
inet->inet_dport = usin->sin_port;
- inet->inet_daddr = daddr;
+ sk_daddr_set(sk, daddr);
inet_csk(sk)->icsk_ext_hdr_len = 0;
if (inet_opt)
@@ -196,6 +195,32 @@ static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
dst->ops->redirect(dst, sk, skb);
}
+void dccp_req_err(struct sock *sk, u64 seq)
+ {
+ struct request_sock *req = inet_reqsk(sk);
+ struct net *net = sock_net(sk);
+
+ /*
+ * ICMPs are not backlogged, hence we cannot get an established
+ * socket here.
+ */
+ WARN_ON(req->sk);
+
+ if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
+ NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+ reqsk_put(req);
+ } else {
+ /*
+ * Still in RESPOND, just remove it silently.
+ * There is no good way to pass the error to the newly
+ * created socket, and POSIX does not want network
+ * errors returned from accept().
+ */
+ inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+ }
+}
+EXPORT_SYMBOL(dccp_req_err);
+
/*
* This routine is called by the ICMP module when it gets some sort of error
* condition. If err < 0 then the socket should be closed and the error
@@ -228,10 +253,11 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
return;
}
- sk = inet_lookup(net, &dccp_hashinfo,
- iph->daddr, dh->dccph_dport,
- iph->saddr, dh->dccph_sport, inet_iif(skb));
- if (sk == NULL) {
+ sk = __inet_lookup_established(net, &dccp_hashinfo,
+ iph->daddr, dh->dccph_dport,
+ iph->saddr, ntohs(dh->dccph_sport),
+ inet_iif(skb));
+ if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
@@ -240,6 +266,9 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
inet_twsk_put(inet_twsk(sk));
return;
}
+ seq = dccp_hdr_seq(dh);
+ if (sk->sk_state == DCCP_NEW_SYN_RECV)
+ return dccp_req_err(sk, seq);
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
@@ -252,7 +281,6 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
goto out;
dp = dccp_sk(sk);
- seq = dccp_hdr_seq(dh);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
@@ -289,35 +317,6 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
}
switch (sk->sk_state) {
- struct request_sock *req , **prev;
- case DCCP_LISTEN:
- if (sock_owned_by_user(sk))
- goto out;
- req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
- iph->daddr, iph->saddr);
- if (!req)
- goto out;
-
- /*
- * ICMPs are not backlogged, hence we cannot get an established
- * socket here.
- */
- WARN_ON(req->sk);
-
- if (!between48(seq, dccp_rsk(req)->dreq_iss,
- dccp_rsk(req)->dreq_gss)) {
- NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
- goto out;
- }
- /*
- * Still in RESPOND, just remove it silently.
- * There is no good way to pass the error to the newly
- * created socket, and POSIX does not want network
- * errors returned from accept().
- */
- inet_csk_reqsk_queue_drop(sk, req, prev);
- goto out;
-
case DCCP_REQUESTING:
case DCCP_RESPOND:
if (!sock_owned_by_user(sk)) {
@@ -408,8 +407,8 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
- newinet->inet_daddr = ireq->ir_rmt_addr;
- newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+ sk_daddr_set(newsk, ireq->ir_rmt_addr);
+ sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
newinet->inet_saddr = ireq->ir_loc_addr;
newinet->inet_opt = ireq->opt;
ireq->opt = NULL;
@@ -449,14 +448,15 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
const struct dccp_hdr *dh = dccp_hdr(skb);
const struct iphdr *iph = ip_hdr(skb);
struct sock *nsk;
- struct request_sock **prev;
/* Find possible connection requests. */
- struct request_sock *req = inet_csk_search_req(sk, &prev,
- dh->dccph_sport,
+ struct request_sock *req = inet_csk_search_req(sk, dh->dccph_sport,
iph->saddr, iph->daddr);
- if (req != NULL)
- return dccp_check_req(sk, skb, req, prev);
-
+ if (req) {
+ nsk = dccp_check_req(sk, skb, req);
+ if (!nsk)
+ reqsk_put(req);
+ return nsk;
+ }
nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
iph->saddr, dh->dccph_sport,
iph->daddr, dh->dccph_dport,
@@ -575,7 +575,7 @@ static void dccp_v4_reqsk_destructor(struct request_sock *req)
kfree(inet_rsk(req)->opt);
}
-void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
+void dccp_syn_ack_timeout(const struct request_sock *req)
{
}
EXPORT_SYMBOL(dccp_syn_ack_timeout);
@@ -624,7 +624,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
- req = inet_reqsk_alloc(&dccp_request_sock_ops);
+ req = inet_reqsk_alloc(&dccp_request_sock_ops, sk);
if (req == NULL)
goto drop;
@@ -639,8 +639,10 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
ireq = inet_rsk(req);
- ireq->ir_loc_addr = ip_hdr(skb)->daddr;
- ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
+ sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+ ireq->ireq_family = AF_INET;
+ ireq->ir_iif = sk->sk_bound_dev_if;
/*
* Step 3: Process LISTEN state
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6bcaa33cd804..5165571f397a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -40,19 +40,6 @@
static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
-static void dccp_v6_hash(struct sock *sk)
-{
- if (sk->sk_state != DCCP_CLOSED) {
- if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
- inet_hash(sk);
- return;
- }
- local_bh_disable();
- __inet6_hash(sk, NULL);
- local_bh_enable();
- }
-}
-
/* add pseudo-header to DCCP checksum stored in skb->csum */
static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
const struct in6_addr *saddr,
@@ -98,11 +85,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
}
- sk = inet6_lookup(net, &dccp_hashinfo,
- &hdr->daddr, dh->dccph_dport,
- &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
+ sk = __inet6_lookup_established(net, &dccp_hashinfo,
+ &hdr->daddr, dh->dccph_dport,
+ &hdr->saddr, ntohs(dh->dccph_sport),
+ inet6_iif(skb));
- if (sk == NULL) {
+ if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
@@ -112,6 +100,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
inet_twsk_put(inet_twsk(sk));
return;
}
+ seq = dccp_hdr_seq(dh);
+ if (sk->sk_state == DCCP_NEW_SYN_RECV)
+ return dccp_req_err(sk, seq);
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
@@ -121,7 +112,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out;
dp = dccp_sk(sk);
- seq = dccp_hdr_seq(dh);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
@@ -162,32 +152,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
/* Might be for an request_sock */
switch (sk->sk_state) {
- struct request_sock *req, **prev;
- case DCCP_LISTEN:
- if (sock_owned_by_user(sk))
- goto out;
-
- req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
- &hdr->daddr, &hdr->saddr,
- inet6_iif(skb));
- if (req == NULL)
- goto out;
-
- /*
- * ICMPs are not backlogged, hence we cannot get an established
- * socket here.
- */
- WARN_ON(req->sk != NULL);
-
- if (!between48(seq, dccp_rsk(req)->dreq_iss,
- dccp_rsk(req)->dreq_gss)) {
- NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
- goto out;
- }
-
- inet_csk_reqsk_queue_drop(sk, req, prev);
- goto out;
-
case DCCP_REQUESTING:
case DCCP_RESPOND: /* Cannot happen.
It can, it SYNs are crossed. --ANK */
@@ -330,17 +294,17 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
const struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct request_sock *req;
struct sock *nsk;
- struct request_sock **prev;
- /* Find possible connection requests. */
- struct request_sock *req = inet6_csk_search_req(sk, &prev,
- dh->dccph_sport,
- &iph->saddr,
- &iph->daddr,
- inet6_iif(skb));
- if (req != NULL)
- return dccp_check_req(sk, skb, req, prev);
+ req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr,
+ &iph->daddr, inet6_iif(skb));
+ if (req) {
+ nsk = dccp_check_req(sk, skb, req);
+ if (!nsk)
+ reqsk_put(req);
+ return nsk;
+ }
nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
&iph->saddr, dh->dccph_sport,
&iph->daddr, ntohs(dh->dccph_dport),
@@ -386,7 +350,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
- req = inet_reqsk_alloc(&dccp6_request_sock_ops);
+ req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk);
if (req == NULL)
goto drop;
@@ -403,6 +367,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
ireq = inet_rsk(req);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ ireq->ireq_family = AF_INET6;
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -469,11 +434,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
-
- ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
-
- newsk->sk_v6_rcv_saddr = newnp->saddr;
+ newnp->saddr = newsk->sk_v6_rcv_saddr;
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -591,7 +552,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
dccp_done(newsk);
goto out;
}
- __inet6_hash(newsk, NULL);
+ __inet_hash(newsk, NULL);
return newsk;
@@ -916,9 +877,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
sk->sk_backlog_rcv = dccp_v6_do_rcv;
goto failure;
}
- ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &sk->sk_v6_rcv_saddr);
-
+ np->saddr = sk->sk_v6_rcv_saddr;
return err;
}
@@ -1061,7 +1020,7 @@ static struct proto dccp_v6_prot = {
.sendmsg = dccp_sendmsg,
.recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v6_do_rcv,
- .hash = dccp_v6_hash,
+ .hash = inet_hash,
.unhash = inet_unhash,
.accept = inet_csk_accept,
.get_port = inet_csk_get_port,
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index b50dc436db1f..30addee2dd03 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -27,28 +27,16 @@
struct inet_timewait_death_row dccp_death_row = {
.sysctl_max_tw_buckets = NR_FILE * 2,
- .period = DCCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
- .death_lock = __SPIN_LOCK_UNLOCKED(dccp_death_row.death_lock),
.hashinfo = &dccp_hashinfo,
- .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
- (unsigned long)&dccp_death_row),
- .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
- inet_twdr_twkill_work),
-/* Short-time timewait calendar */
-
- .twcal_hand = -1,
- .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
- (unsigned long)&dccp_death_row),
};
EXPORT_SYMBOL_GPL(dccp_death_row);
void dccp_time_wait(struct sock *sk, int state, int timeo)
{
- struct inet_timewait_sock *tw = NULL;
+ struct inet_timewait_sock *tw;
- if (dccp_death_row.tw_count < dccp_death_row.sysctl_max_tw_buckets)
- tw = inet_twsk_alloc(sk, state);
+ tw = inet_twsk_alloc(sk, &dccp_death_row, state);
if (tw != NULL) {
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -71,8 +59,7 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
if (state == DCCP_TIME_WAIT)
timeo = DCCP_TIMEWAIT_LEN;
- inet_twsk_schedule(tw, &dccp_death_row, timeo,
- DCCP_TIMEWAIT_LEN);
+ inet_twsk_schedule(tw, timeo);
inet_twsk_put(tw);
} else {
/* Sorry, if we're out of memory, just CLOSE this
@@ -152,8 +139,7 @@ EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
* as an request_sock.
*/
struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct request_sock **prev)
+ struct request_sock *req)
{
struct sock *child = NULL;
struct dccp_request_sock *dreq = dccp_rsk(req);
@@ -200,8 +186,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
if (child == NULL)
goto listen_overflow;
- inet_csk_reqsk_queue_unlink(sk, req, prev);
- inet_csk_reqsk_queue_removed(sk, req);
+ inet_csk_reqsk_queue_drop(sk, req);
inet_csk_reqsk_queue_add(sk, req, child);
out:
return child;
@@ -212,7 +197,7 @@ drop:
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
req->rsk_ops->send_reset(sk, skb);
- inet_csk_reqsk_queue_drop(sk, req, prev);
+ inet_csk_reqsk_queue_drop(sk, req);
goto out;
}
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 595ddf0459db..d8346d0eadeb 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -72,8 +72,7 @@ static void printl(const char *fmt, ...)
wake_up(&dccpw.wait);
}
-static int jdccp_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t size)
+static int jdccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
const struct inet_sock *inet = inet_sk(sk);
struct ccid3_hc_tx_sock *hc = NULL;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index e171b780b499..52a94016526d 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -741,8 +741,7 @@ static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
return 0;
}
-int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len)
+int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
const struct dccp_sock *dp = dccp_sk(sk);
const int flags = msg->msg_flags;
@@ -806,8 +805,8 @@ out_discard:
EXPORT_SYMBOL_GPL(dccp_sendmsg);
-int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len)
+int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ int flags, int *addr_len)
{
const struct dccp_hdr *dh;
long timeo;
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 1cd46a345cb0..3ef7acef3ce8 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -161,33 +161,11 @@ out:
sock_put(sk);
}
-/*
- * Timer for listening sockets
- */
-static void dccp_response_timer(struct sock *sk)
-{
- inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
- DCCP_RTO_MAX);
-}
-
static void dccp_keepalive_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
- /* Only process if socket is not in use. */
- bh_lock_sock(sk);
- if (sock_owned_by_user(sk)) {
- /* Try again later. */
- inet_csk_reset_keepalive_timer(sk, HZ / 20);
- goto out;
- }
-
- if (sk->sk_state == DCCP_LISTEN) {
- dccp_response_timer(sk);
- goto out;
- }
-out:
- bh_unlock_sock(sk);
+ pr_err("dccp should not use a keepalive timer !\n");
sock_put(sk);
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 810228646de3..754484b3cd0e 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1669,8 +1669,8 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
}
-static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
struct dn_scp *scp = DN_SK(sk);
@@ -1905,8 +1905,7 @@ static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
return skb;
}
-static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size)
+static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct dn_scp *scp = DN_SK(sk);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 7ca7c3143da3..4507b188fc51 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -49,41 +49,17 @@
#include <net/dn_route.h>
static int dn_neigh_construct(struct neighbour *);
-static void dn_long_error_report(struct neighbour *, struct sk_buff *);
-static void dn_short_error_report(struct neighbour *, struct sk_buff *);
-static int dn_long_output(struct neighbour *, struct sk_buff *);
-static int dn_short_output(struct neighbour *, struct sk_buff *);
-static int dn_phase3_output(struct neighbour *, struct sk_buff *);
-
-
-/*
- * For talking to broadcast devices: Ethernet & PPP
- */
-static const struct neigh_ops dn_long_ops = {
- .family = AF_DECnet,
- .error_report = dn_long_error_report,
- .output = dn_long_output,
- .connected_output = dn_long_output,
-};
+static void dn_neigh_error_report(struct neighbour *, struct sk_buff *);
+static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb);
/*
- * For talking to pointopoint and multidrop devices: DDCMP and X.25
+ * Operations for adding the link layer header.
*/
-static const struct neigh_ops dn_short_ops = {
+static const struct neigh_ops dn_neigh_ops = {
.family = AF_DECnet,
- .error_report = dn_short_error_report,
- .output = dn_short_output,
- .connected_output = dn_short_output,
-};
-
-/*
- * For talking to DECnet phase III nodes
- */
-static const struct neigh_ops dn_phase3_ops = {
- .family = AF_DECnet,
- .error_report = dn_short_error_report, /* Can use short version here */
- .output = dn_phase3_output,
- .connected_output = dn_phase3_output,
+ .error_report = dn_neigh_error_report,
+ .output = dn_neigh_output,
+ .connected_output = dn_neigh_output,
};
static u32 dn_neigh_hash(const void *pkey,
@@ -93,11 +69,18 @@ static u32 dn_neigh_hash(const void *pkey,
return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]);
}
+static bool dn_key_eq(const struct neighbour *neigh, const void *pkey)
+{
+ return neigh_key_eq16(neigh, pkey);
+}
+
struct neigh_table dn_neigh_table = {
.family = PF_DECnet,
.entry_size = NEIGH_ENTRY_SIZE(sizeof(struct dn_neigh)),
.key_len = sizeof(__le16),
+ .protocol = cpu_to_be16(ETH_P_DNA_RT),
.hash = dn_neigh_hash,
+ .key_eq = dn_key_eq,
.constructor = dn_neigh_construct,
.id = "dn_neigh_cache",
.parms ={
@@ -146,16 +129,9 @@ static int dn_neigh_construct(struct neighbour *neigh)
__neigh_parms_put(neigh->parms);
neigh->parms = neigh_parms_clone(parms);
-
- if (dn_db->use_long)
- neigh->ops = &dn_long_ops;
- else
- neigh->ops = &dn_short_ops;
rcu_read_unlock();
- if (dn->flags & DN_NDFLAG_P3)
- neigh->ops = &dn_phase3_ops;
-
+ neigh->ops = &dn_neigh_ops;
neigh->nud_state = NUD_NOARP;
neigh->output = neigh->ops->connected_output;
@@ -187,24 +163,16 @@ static int dn_neigh_construct(struct neighbour *neigh)
return 0;
}
-static void dn_long_error_report(struct neighbour *neigh, struct sk_buff *skb)
-{
- printk(KERN_DEBUG "dn_long_error_report: called\n");
- kfree_skb(skb);
-}
-
-
-static void dn_short_error_report(struct neighbour *neigh, struct sk_buff *skb)
+static void dn_neigh_error_report(struct neighbour *neigh, struct sk_buff *skb)
{
- printk(KERN_DEBUG "dn_short_error_report: called\n");
+ printk(KERN_DEBUG "dn_neigh_error_report: called\n");
kfree_skb(skb);
}
-static int dn_neigh_output_packet(struct sk_buff *skb)
+static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct dn_route *rt = (struct dn_route *)dst;
- struct neighbour *neigh = rt->n;
struct net_device *dev = neigh->dev;
char mac_addr[ETH_ALEN];
unsigned int seq;
@@ -226,7 +194,20 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
return err;
}
-static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
+static int dn_neigh_output_packet(struct sock *sk, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct dn_route *rt = (struct dn_route *)dst;
+ struct neighbour *neigh = rt->n;
+
+ return neigh->output(neigh, skb);
+}
+
+/*
+ * For talking to broadcast devices: Ethernet & PPP
+ */
+static int dn_long_output(struct neighbour *neigh, struct sock *sk,
+ struct sk_buff *skb)
{
struct net_device *dev = neigh->dev;
int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
@@ -265,11 +246,15 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
skb_reset_network_header(skb);
- return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
- neigh->dev, dn_neigh_output_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
+ NULL, neigh->dev, dn_neigh_output_packet);
}
-static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
+/*
+ * For talking to pointopoint and multidrop devices: DDCMP and X.25
+ */
+static int dn_short_output(struct neighbour *neigh, struct sock *sk,
+ struct sk_buff *skb)
{
struct net_device *dev = neigh->dev;
int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
@@ -301,15 +286,17 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
skb_reset_network_header(skb);
- return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
- neigh->dev, dn_neigh_output_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
+ NULL, neigh->dev, dn_neigh_output_packet);
}
/*
- * Phase 3 output is the same is short output, execpt that
+ * For talking to DECnet phase III nodes
+ * Phase 3 output is the same as short output, execpt that
* it clears the area bits before transmission.
*/
-static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
+static int dn_phase3_output(struct neighbour *neigh, struct sock *sk,
+ struct sk_buff *skb)
{
struct net_device *dev = neigh->dev;
int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
@@ -340,8 +327,34 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
skb_reset_network_header(skb);
- return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
- neigh->dev, dn_neigh_output_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
+ NULL, neigh->dev, dn_neigh_output_packet);
+}
+
+int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct dn_route *rt = (struct dn_route *) dst;
+ struct neighbour *neigh = rt->n;
+ struct dn_neigh *dn = (struct dn_neigh *)neigh;
+ struct dn_dev *dn_db;
+ bool use_long;
+
+ rcu_read_lock();
+ dn_db = rcu_dereference(neigh->dev->dn_ptr);
+ if (dn_db == NULL) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ use_long = dn_db->use_long;
+ rcu_read_unlock();
+
+ if (dn->flags & DN_NDFLAG_P3)
+ return dn_phase3_output(neigh, sk, skb);
+ if (use_long)
+ return dn_long_output(neigh, sk, skb);
+ else
+ return dn_short_output(neigh, sk, skb);
}
/*
@@ -362,7 +375,7 @@ void dn_neigh_pointopoint_hello(struct sk_buff *skb)
/*
* Ethernet router hello message received
*/
-int dn_neigh_router_hello(struct sk_buff *skb)
+int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb)
{
struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data;
@@ -424,7 +437,7 @@ int dn_neigh_router_hello(struct sk_buff *skb)
/*
* Endnode hello message received
*/
-int dn_neigh_endnode_hello(struct sk_buff *skb)
+int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb)
{
struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data;
struct neighbour *neigh;
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index fe5f01485d33..a321eac9fd0c 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -714,7 +714,7 @@ out:
return ret;
}
-static int dn_nsp_rx_packet(struct sk_buff *skb)
+static int dn_nsp_rx_packet(struct sock *sk2, struct sk_buff *skb)
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct sock *sk = NULL;
@@ -814,7 +814,8 @@ free_out:
int dn_nsp_rx(struct sk_buff *skb)
{
- return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, skb, skb->dev, NULL,
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, NULL, skb,
+ skb->dev, NULL,
dn_nsp_rx_packet);
}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 3b81092771f8..03227ffd19ce 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -136,7 +136,6 @@ int decnet_dst_gc_interval = 2;
static struct dst_ops dn_dst_ops = {
.family = PF_DECnet,
- .protocol = cpu_to_be16(ETH_P_DNA_RT),
.gc_thresh = 128,
.gc = dn_dst_gc,
.check = dn_dst_check,
@@ -513,7 +512,7 @@ static int dn_return_long(struct sk_buff *skb)
*
* Returns: result of input function if route is found, error code otherwise
*/
-static int dn_route_rx_packet(struct sk_buff *skb)
+static int dn_route_rx_packet(struct sock *sk, struct sk_buff *skb)
{
struct dn_skb_cb *cb;
int err;
@@ -574,7 +573,8 @@ static int dn_route_rx_long(struct sk_buff *skb)
ptr++;
cb->hops = *ptr++; /* Visit Count */
- return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb,
+ skb->dev, NULL,
dn_route_rx_packet);
drop_it:
@@ -601,7 +601,8 @@ static int dn_route_rx_short(struct sk_buff *skb)
ptr += 2;
cb->hops = *ptr & 0x3f;
- return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb,
+ skb->dev, NULL,
dn_route_rx_packet);
drop_it:
@@ -609,7 +610,7 @@ drop_it:
return NET_RX_DROP;
}
-static int dn_route_discard(struct sk_buff *skb)
+static int dn_route_discard(struct sock *sk, struct sk_buff *skb)
{
/*
* I know we drop the packet here, but thats considered success in
@@ -619,7 +620,7 @@ static int dn_route_discard(struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-static int dn_route_ptp_hello(struct sk_buff *skb)
+static int dn_route_ptp_hello(struct sock *sk, struct sk_buff *skb)
{
dn_dev_hello(skb);
dn_neigh_pointopoint_hello(skb);
@@ -705,22 +706,22 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
switch (flags & DN_RT_CNTL_MSK) {
case DN_RT_PKT_HELO:
return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
- skb, skb->dev, NULL,
+ NULL, skb, skb->dev, NULL,
dn_route_ptp_hello);
case DN_RT_PKT_L1RT:
case DN_RT_PKT_L2RT:
return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
- skb, skb->dev, NULL,
+ NULL, skb, skb->dev, NULL,
dn_route_discard);
case DN_RT_PKT_ERTH:
return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
- skb, skb->dev, NULL,
+ NULL, skb, skb->dev, NULL,
dn_neigh_router_hello);
case DN_RT_PKT_EEDH:
return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
- skb, skb->dev, NULL,
+ NULL, skb, skb->dev, NULL,
dn_neigh_endnode_hello);
}
} else {
@@ -743,15 +744,6 @@ out:
return NET_RX_DROP;
}
-static int dn_to_neigh_output(struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
- struct dn_route *rt = (struct dn_route *) dst;
- struct neighbour *n = rt->n;
-
- return n->output(n, skb);
-}
-
static int dn_output(struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -778,7 +770,8 @@ static int dn_output(struct sock *sk, struct sk_buff *skb)
cb->rt_flags |= DN_RT_F_IE;
cb->hops = 0;
- return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, sk, skb,
+ NULL, dev,
dn_to_neigh_output);
error:
@@ -826,7 +819,8 @@ static int dn_forward(struct sk_buff *skb)
if (rt->rt_flags & RTCF_DOREDIRECT)
cb->rt_flags |= DN_RT_F_IE;
- return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, NULL, skb,
+ dev, skb->dev,
dn_to_neigh_output);
drop:
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index e4d9560a910b..af34fc9bdf69 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -89,9 +89,7 @@ static void dnrmg_send_peer(struct sk_buff *skb)
static unsigned int dnrmg_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
dnrmg_send_peer(skb);
return NF_ACCEPT;
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 5f8ac404535b..ff7736f7ff42 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -5,9 +5,12 @@ config HAVE_NET_DSA
# Drivers must select NET_DSA and the appropriate tagging format
config NET_DSA
- tristate
- depends on HAVE_NET_DSA
+ tristate "Distributed Switch Architecture"
+ depends on HAVE_NET_DSA && NET_SWITCHDEV
select PHYLIB
+ ---help---
+ Say Y if you want to enable support for the hardware switches supported
+ by the Distributed Switch Architecture.
if NET_DSA
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 4dea2e0681d1..e6f6cc3a1bcf 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/of_net.h>
#include <linux/sysfs.h>
#include "dsa_priv.h"
@@ -123,7 +124,7 @@ static ssize_t temp1_max_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store);
+static DEVICE_ATTR_RW(temp1_max);
static ssize_t temp1_max_alarm_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -158,8 +159,8 @@ static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
if (index == 1) {
if (!drv->get_temp_limit)
mode = 0;
- else if (drv->set_temp_limit)
- mode |= S_IWUSR;
+ else if (!drv->set_temp_limit)
+ mode &= ~S_IWUSR;
} else if (index == 2 && !drv->get_temp_alarm) {
mode = 0;
}
@@ -175,43 +176,14 @@ __ATTRIBUTE_GROUPS(dsa_hwmon);
#endif /* CONFIG_NET_DSA_HWMON */
/* basic switch operations **************************************************/
-static struct dsa_switch *
-dsa_switch_setup(struct dsa_switch_tree *dst, int index,
- struct device *parent, struct device *host_dev)
+static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
{
- struct dsa_chip_data *pd = dst->pd->chip + index;
- struct dsa_switch_driver *drv;
- struct dsa_switch *ds;
- int ret;
- char *name;
- int i;
+ struct dsa_switch_driver *drv = ds->drv;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_chip_data *pd = ds->pd;
bool valid_name_found = false;
-
- /*
- * Probe for switch model.
- */
- drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
- if (drv == NULL) {
- netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
- index);
- return ERR_PTR(-EINVAL);
- }
- netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n",
- index, name);
-
-
- /*
- * Allocate and initialise switch state.
- */
- ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
- if (ds == NULL)
- return ERR_PTR(-ENOMEM);
-
- ds->dst = dst;
- ds->index = index;
- ds->pd = dst->pd->chip + index;
- ds->drv = drv;
- ds->master_dev = host_dev;
+ int index = ds->index;
+ int i, ret;
/*
* Validate supplied switch configuration.
@@ -256,7 +228,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
* switch.
*/
if (dst->cpu_switch == index) {
- switch (drv->tag_protocol) {
+ switch (ds->tag_protocol) {
#ifdef CONFIG_NET_DSA_TAG_DSA
case DSA_TAG_PROTO_DSA:
dst->rcv = dsa_netdev_ops.rcv;
@@ -284,7 +256,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
goto out;
}
- dst->tag_protocol = drv->tag_protocol;
+ dst->tag_protocol = ds->tag_protocol;
}
/*
@@ -314,19 +286,15 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
* Create network devices for physical switch ports.
*/
for (i = 0; i < DSA_MAX_PORTS; i++) {
- struct net_device *slave_dev;
-
if (!(ds->phys_port_mask & (1 << i)))
continue;
- slave_dev = dsa_slave_create(ds, parent, i, pd->port_names[i]);
- if (slave_dev == NULL) {
+ ret = dsa_slave_create(ds, parent, i, pd->port_names[i]);
+ if (ret < 0) {
netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n",
index, i, pd->port_names[i]);
- continue;
+ ret = 0;
}
-
- ds->ports[i] = slave_dev;
}
#ifdef CONFIG_NET_DSA_HWMON
@@ -354,13 +322,57 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
}
#endif /* CONFIG_NET_DSA_HWMON */
- return ds;
+ return ret;
out_free:
mdiobus_free(ds->slave_mii_bus);
out:
kfree(ds);
- return ERR_PTR(ret);
+ return ret;
+}
+
+static struct dsa_switch *
+dsa_switch_setup(struct dsa_switch_tree *dst, int index,
+ struct device *parent, struct device *host_dev)
+{
+ struct dsa_chip_data *pd = dst->pd->chip + index;
+ struct dsa_switch_driver *drv;
+ struct dsa_switch *ds;
+ int ret;
+ char *name;
+
+ /*
+ * Probe for switch model.
+ */
+ drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
+ if (drv == NULL) {
+ netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
+ index);
+ return ERR_PTR(-EINVAL);
+ }
+ netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n",
+ index, name);
+
+
+ /*
+ * Allocate and initialise switch state.
+ */
+ ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+ if (ds == NULL)
+ return NULL;
+
+ ds->dst = dst;
+ ds->index = index;
+ ds->pd = pd;
+ ds->drv = drv;
+ ds->tag_protocol = drv->tag_protocol;
+ ds->master_dev = host_dev;
+
+ ret = dsa_switch_setup_one(ds, parent);
+ if (ret)
+ return NULL;
+
+ return ds;
}
static void dsa_switch_destroy(struct dsa_switch *ds)
@@ -378,7 +390,7 @@ static int dsa_switch_suspend(struct dsa_switch *ds)
/* Suspend slave network devices */
for (i = 0; i < DSA_MAX_PORTS; i++) {
- if (!(ds->phys_port_mask & (1 << i)))
+ if (!dsa_is_port_initialized(ds, i))
continue;
ret = dsa_slave_suspend(ds->ports[i]);
@@ -404,7 +416,7 @@ static int dsa_switch_resume(struct dsa_switch *ds)
/* Resume slave network devices */
for (i = 0; i < DSA_MAX_PORTS; i++) {
- if (!(ds->phys_port_mask & (1 << i)))
+ if (!dsa_is_port_initialized(ds, i))
continue;
ret = dsa_slave_resume(ds->ports[i]);
@@ -558,12 +570,12 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
kfree(pd->chip);
}
-static int dsa_of_probe(struct platform_device *pdev)
+static int dsa_of_probe(struct device *dev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct device_node *child, *mdio, *ethernet, *port, *link;
struct mii_bus *mdio_bus;
- struct platform_device *ethernet_dev;
+ struct net_device *ethernet_dev;
struct dsa_platform_data *pd;
struct dsa_chip_data *cd;
const char *port_name;
@@ -578,22 +590,22 @@ static int dsa_of_probe(struct platform_device *pdev)
mdio_bus = of_mdio_find_bus(mdio);
if (!mdio_bus)
- return -EINVAL;
+ return -EPROBE_DEFER;
ethernet = of_parse_phandle(np, "dsa,ethernet", 0);
if (!ethernet)
return -EINVAL;
- ethernet_dev = of_find_device_by_node(ethernet);
+ ethernet_dev = of_find_net_device_by_node(ethernet);
if (!ethernet_dev)
- return -ENODEV;
+ return -EPROBE_DEFER;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
- pdev->dev.platform_data = pd;
- pd->netdev = &ethernet_dev->dev;
+ dev->platform_data = pd;
+ pd->of_netdev = ethernet_dev;
pd->nr_chips = of_get_available_child_count(np);
if (pd->nr_chips > DSA_MAX_SWITCHES)
pd->nr_chips = DSA_MAX_SWITCHES;
@@ -621,7 +633,7 @@ static int dsa_of_probe(struct platform_device *pdev)
if (cd->sw_addr > PHY_MAX_ADDR)
continue;
- if (!of_property_read_u32(np, "eeprom-length", &eeprom_len))
+ if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
cd->eeprom_len = eeprom_len;
for_each_available_child_of_node(child, port) {
@@ -665,72 +677,35 @@ out_free_chip:
dsa_of_free_platform_data(pd);
out_free:
kfree(pd);
- pdev->dev.platform_data = NULL;
+ dev->platform_data = NULL;
return ret;
}
-static void dsa_of_remove(struct platform_device *pdev)
+static void dsa_of_remove(struct device *dev)
{
- struct dsa_platform_data *pd = pdev->dev.platform_data;
+ struct dsa_platform_data *pd = dev->platform_data;
- if (!pdev->dev.of_node)
+ if (!dev->of_node)
return;
dsa_of_free_platform_data(pd);
kfree(pd);
}
#else
-static inline int dsa_of_probe(struct platform_device *pdev)
+static inline int dsa_of_probe(struct device *dev)
{
return 0;
}
-static inline void dsa_of_remove(struct platform_device *pdev)
+static inline void dsa_of_remove(struct device *dev)
{
}
#endif
-static int dsa_probe(struct platform_device *pdev)
+static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
+ struct device *parent, struct dsa_platform_data *pd)
{
- struct dsa_platform_data *pd = pdev->dev.platform_data;
- struct net_device *dev;
- struct dsa_switch_tree *dst;
- int i, ret;
-
- pr_notice_once("Distributed Switch Architecture driver version %s\n",
- dsa_driver_version);
-
- if (pdev->dev.of_node) {
- ret = dsa_of_probe(pdev);
- if (ret)
- return ret;
-
- pd = pdev->dev.platform_data;
- }
-
- if (pd == NULL || pd->netdev == NULL)
- return -EINVAL;
-
- dev = dev_to_net_device(pd->netdev);
- if (dev == NULL) {
- ret = -EINVAL;
- goto out;
- }
-
- if (dev->dsa_ptr != NULL) {
- dev_put(dev);
- ret = -EEXIST;
- goto out;
- }
-
- dst = kzalloc(sizeof(*dst), GFP_KERNEL);
- if (dst == NULL) {
- dev_put(dev);
- ret = -ENOMEM;
- goto out;
- }
-
- platform_set_drvdata(pdev, dst);
+ int i;
dst->pd = pd;
dst->master_netdev = dev;
@@ -740,7 +715,7 @@ static int dsa_probe(struct platform_device *pdev)
for (i = 0; i < pd->nr_chips; i++) {
struct dsa_switch *ds;
- ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev);
+ ds = dsa_switch_setup(dst, i, parent, pd->chip[i].host_dev);
if (IS_ERR(ds)) {
netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n",
i, PTR_ERR(ds));
@@ -768,18 +743,67 @@ static int dsa_probe(struct platform_device *pdev)
dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
add_timer(&dst->link_poll_timer);
}
+}
+
+static int dsa_probe(struct platform_device *pdev)
+{
+ struct dsa_platform_data *pd = pdev->dev.platform_data;
+ struct net_device *dev;
+ struct dsa_switch_tree *dst;
+ int ret;
+
+ pr_notice_once("Distributed Switch Architecture driver version %s\n",
+ dsa_driver_version);
+
+ if (pdev->dev.of_node) {
+ ret = dsa_of_probe(&pdev->dev);
+ if (ret)
+ return ret;
+
+ pd = pdev->dev.platform_data;
+ }
+
+ if (pd == NULL || (pd->netdev == NULL && pd->of_netdev == NULL))
+ return -EINVAL;
+
+ if (pd->of_netdev) {
+ dev = pd->of_netdev;
+ dev_hold(dev);
+ } else {
+ dev = dev_to_net_device(pd->netdev);
+ }
+ if (dev == NULL) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
+
+ if (dev->dsa_ptr != NULL) {
+ dev_put(dev);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (dst == NULL) {
+ dev_put(dev);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, dst);
+
+ dsa_setup_dst(dst, dev, &pdev->dev, pd);
return 0;
out:
- dsa_of_remove(pdev);
+ dsa_of_remove(&pdev->dev);
return ret;
}
-static int dsa_remove(struct platform_device *pdev)
+static void dsa_remove_dst(struct dsa_switch_tree *dst)
{
- struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
int i;
if (dst->link_poll_needed)
@@ -793,8 +817,14 @@ static int dsa_remove(struct platform_device *pdev)
if (ds != NULL)
dsa_switch_destroy(ds);
}
+}
+
+static int dsa_remove(struct platform_device *pdev)
+{
+ struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
- dsa_of_remove(pdev);
+ dsa_remove_dst(dst);
+ dsa_of_remove(&pdev->dev);
return 0;
}
@@ -821,6 +851,10 @@ static struct packet_type dsa_pack_type __read_mostly = {
.func = dsa_switch_rcv,
};
+static struct notifier_block dsa_netdevice_nb __read_mostly = {
+ .notifier_call = dsa_slave_netdevice_event,
+};
+
#ifdef CONFIG_PM_SLEEP
static int dsa_suspend(struct device *d)
{
@@ -879,6 +913,8 @@ static int __init dsa_init_module(void)
{
int rc;
+ register_netdevice_notifier(&dsa_netdevice_nb);
+
rc = platform_driver_register(&dsa_driver);
if (rc)
return rc;
@@ -891,6 +927,7 @@ module_init(dsa_init_module);
static void __exit dsa_cleanup_module(void)
{
+ unregister_netdevice_notifier(&dsa_netdevice_nb);
dev_remove_pack(&dsa_pack_type);
platform_driver_unregister(&dsa_driver);
}
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index dc9756d3154c..d5f1f9b862ea 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -45,6 +45,8 @@ struct dsa_slave_priv {
int old_link;
int old_pause;
int old_duplex;
+
+ struct net_device *bridge_dev;
};
/* dsa.c */
@@ -53,11 +55,12 @@ extern char dsa_driver_version[];
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
-struct net_device *dsa_slave_create(struct dsa_switch *ds,
- struct device *parent,
- int port, char *name);
+int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
+ int port, char *name);
int dsa_slave_suspend(struct net_device *slave_dev);
int dsa_slave_resume(struct net_device *slave_dev);
+int dsa_slave_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
/* tag_dsa.c */
extern const struct dsa_device_ops dsa_netdev_ops;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index f23deadf42a0..827cda560a55 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -10,10 +10,14 @@
#include <linux/list.h>
#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <net/rtnetlink.h>
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
#include "dsa_priv.h"
/* slave mii_bus handling ***************************************************/
@@ -51,13 +55,16 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
/* slave device handling ****************************************************/
-static int dsa_slave_init(struct net_device *dev)
+static int dsa_slave_get_iflink(const struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- dev->iflink = p->parent->dst->master_netdev->ifindex;
+ return p->parent->dst->master_netdev->ifindex;
+}
- return 0;
+static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p)
+{
+ return !!p->bridge_dev;
}
static int dsa_slave_open(struct net_device *dev)
@@ -65,6 +72,8 @@ static int dsa_slave_open(struct net_device *dev)
struct dsa_slave_priv *p = netdev_priv(dev);
struct net_device *master = p->parent->dst->master_netdev;
struct dsa_switch *ds = p->parent;
+ u8 stp_state = dsa_port_is_bridged(p) ?
+ BR_STATE_BLOCKING : BR_STATE_FORWARDING;
int err;
if (!(master->flags & IFF_UP))
@@ -93,6 +102,9 @@ static int dsa_slave_open(struct net_device *dev)
goto clear_promisc;
}
+ if (ds->drv->port_stp_update)
+ ds->drv->port_stp_update(ds, p->port, stp_state);
+
if (p->phy)
phy_start(p->phy);
@@ -133,6 +145,9 @@ static int dsa_slave_close(struct net_device *dev)
if (ds->drv->port_disable)
ds->drv->port_disable(ds, p->port, p->phy);
+ if (ds->drv->port_stp_update)
+ ds->drv->port_stp_update(ds, p->port, BR_STATE_DISABLED);
+
return 0;
}
@@ -184,6 +199,105 @@ out:
return 0;
}
+static int dsa_slave_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid, u16 nlm_flags)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret = -EOPNOTSUPP;
+
+ if (ds->drv->fdb_add)
+ ret = ds->drv->fdb_add(ds, p->port, addr, vid);
+
+ return ret;
+}
+
+static int dsa_slave_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret = -EOPNOTSUPP;
+
+ if (ds->drv->fdb_del)
+ ret = ds->drv->fdb_del(ds, p->port, addr, vid);
+
+ return ret;
+}
+
+static int dsa_slave_fill_info(struct net_device *dev, struct sk_buff *skb,
+ const unsigned char *addr, u16 vid,
+ bool is_static,
+ u32 portid, u32 seq, int type,
+ unsigned int flags)
+{
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_EXT_LEARNED;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dev->ifindex;
+ ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ if (vid && nla_put_u16(skb, NDA_VLAN, vid))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+/* Dump information about entries, in response to GETNEIGH */
+static int dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int idx)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ unsigned char addr[ETH_ALEN] = { 0 };
+ int ret;
+
+ if (!ds->drv->fdb_getnext)
+ return -EOPNOTSUPP;
+
+ for (; ; idx++) {
+ bool is_static;
+
+ ret = ds->drv->fdb_getnext(ds, p->port, addr, &is_static);
+ if (ret < 0)
+ break;
+
+ if (idx < cb->args[0])
+ continue;
+
+ ret = dsa_slave_fill_info(dev, skb, addr, 0,
+ is_static,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, NLM_F_MULTI);
+ if (ret < 0)
+ break;
+ }
+
+ return idx;
+}
+
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dsa_slave_priv *p = netdev_priv(dev);
@@ -194,6 +308,92 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
+/* Return a bitmask of all ports being currently bridged within a given bridge
+ * device. Note that on leave, the mask will still return the bitmask of ports
+ * currently bridged, prior to port removal, and this is exactly what we want.
+ */
+static u32 dsa_slave_br_port_mask(struct dsa_switch *ds,
+ struct net_device *bridge)
+{
+ struct dsa_slave_priv *p;
+ unsigned int port;
+ u32 mask = 0;
+
+ for (port = 0; port < DSA_MAX_PORTS; port++) {
+ if (!dsa_is_port_initialized(ds, port))
+ continue;
+
+ p = netdev_priv(ds->ports[port]);
+
+ if (ds->ports[port]->priv_flags & IFF_BRIDGE_PORT &&
+ p->bridge_dev == bridge)
+ mask |= 1 << port;
+ }
+
+ return mask;
+}
+
+static int dsa_slave_stp_update(struct net_device *dev, u8 state)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret = -EOPNOTSUPP;
+
+ if (ds->drv->port_stp_update)
+ ret = ds->drv->port_stp_update(ds, p->port, state);
+
+ return ret;
+}
+
+static int dsa_slave_bridge_port_join(struct net_device *dev,
+ struct net_device *br)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret = -EOPNOTSUPP;
+
+ p->bridge_dev = br;
+
+ if (ds->drv->port_join_bridge)
+ ret = ds->drv->port_join_bridge(ds, p->port,
+ dsa_slave_br_port_mask(ds, br));
+
+ return ret;
+}
+
+static int dsa_slave_bridge_port_leave(struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret = -EOPNOTSUPP;
+
+
+ if (ds->drv->port_leave_bridge)
+ ret = ds->drv->port_leave_bridge(ds, p->port,
+ dsa_slave_br_port_mask(ds, p->bridge_dev));
+
+ p->bridge_dev = NULL;
+
+ /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
+ * so allow it to be in BR_STATE_FORWARDING to be kept functional
+ */
+ dsa_slave_stp_update(dev, BR_STATE_FORWARDING);
+
+ return ret;
+}
+
+static int dsa_slave_parent_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *psid)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ psid->id_len = sizeof(ds->index);
+ memcpy(&psid->id, &ds->index, psid->id_len);
+
+ return 0;
+}
+
static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
@@ -462,14 +662,22 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
};
static const struct net_device_ops dsa_slave_netdev_ops = {
- .ndo_init = dsa_slave_init,
.ndo_open = dsa_slave_open,
.ndo_stop = dsa_slave_close,
.ndo_start_xmit = dsa_slave_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
+ .ndo_fdb_add = dsa_slave_fdb_add,
+ .ndo_fdb_del = dsa_slave_fdb_del,
+ .ndo_fdb_dump = dsa_slave_fdb_dump,
.ndo_do_ioctl = dsa_slave_ioctl,
+ .ndo_get_iflink = dsa_slave_get_iflink,
+};
+
+static const struct swdev_ops dsa_slave_swdev_ops = {
+ .swdev_parent_id_get = dsa_slave_parent_id_get,
+ .swdev_port_stp_update = dsa_slave_stp_update,
};
static void dsa_slave_adjust_link(struct net_device *dev)
@@ -513,6 +721,24 @@ static int dsa_slave_fixed_link_update(struct net_device *dev,
}
/* slave device setup *******************************************************/
+static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
+ struct net_device *slave_dev,
+ int addr)
+{
+ struct dsa_switch *ds = p->parent;
+
+ p->phy = ds->slave_mii_bus->phy_map[addr];
+ if (!p->phy)
+ return -ENODEV;
+
+ /* Use already configured phy mode */
+ p->phy_interface = p->phy->interface;
+ phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+ p->phy_interface);
+
+ return 0;
+}
+
static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
struct net_device *slave_dev)
{
@@ -546,10 +772,25 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
if (ds->drv->get_phy_flags)
phy_flags = ds->drv->get_phy_flags(ds, p->port);
- if (phy_dn)
- p->phy = of_phy_connect(slave_dev, phy_dn,
- dsa_slave_adjust_link, phy_flags,
- p->phy_interface);
+ if (phy_dn) {
+ ret = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
+ /* If this PHY address is part of phys_mii_mask, which means
+ * that we need to divert reads and writes to/from it, then we
+ * want to bind this device using the slave MII bus created by
+ * DSA to make that happen.
+ */
+ if (!phy_is_fixed && ret >= 0 &&
+ (ds->phys_mii_mask & (1 << ret))) {
+ ret = dsa_slave_phy_connect(p, slave_dev, ret);
+ if (ret)
+ return ret;
+ } else {
+ p->phy = of_phy_connect(slave_dev, phy_dn,
+ dsa_slave_adjust_link,
+ phy_flags,
+ p->phy_interface);
+ }
+ }
if (p->phy && phy_is_fixed)
fixed_phy_set_link_update(p->phy, dsa_slave_fixed_link_update);
@@ -558,14 +799,9 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
* MDIO bus instead
*/
if (!p->phy) {
- p->phy = ds->slave_mii_bus->phy_map[p->port];
- if (!p->phy)
- return -ENODEV;
-
- /* Use already configured phy mode */
- p->phy_interface = p->phy->interface;
- phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
- p->phy_interface);
+ ret = dsa_slave_phy_connect(p, slave_dev, p->port);
+ if (ret)
+ return ret;
} else {
netdev_info(slave_dev, "attached PHY at address %d [%s]\n",
p->phy->addr, p->phy->drv->name);
@@ -605,9 +841,8 @@ int dsa_slave_resume(struct net_device *slave_dev)
return 0;
}
-struct net_device *
-dsa_slave_create(struct dsa_switch *ds, struct device *parent,
- int port, char *name)
+int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
+ int port, char *name)
{
struct net_device *master = ds->dst->master_netdev;
struct net_device *slave_dev;
@@ -617,13 +852,14 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name,
NET_NAME_UNKNOWN, ether_setup);
if (slave_dev == NULL)
- return slave_dev;
+ return -ENOMEM;
slave_dev->features = master->vlan_features;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
eth_hw_addr_inherit(slave_dev, master);
slave_dev->tx_queue_len = 0;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
+ slave_dev->swdev_ops = &dsa_slave_swdev_ops;
SET_NETDEV_DEV(slave_dev, parent);
slave_dev->dev.of_node = ds->pd->port_dn[port];
@@ -667,19 +903,64 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
ret = dsa_slave_phy_setup(p, slave_dev);
if (ret) {
free_netdev(slave_dev);
- return NULL;
+ return ret;
}
+ ds->ports[port] = slave_dev;
ret = register_netdev(slave_dev);
if (ret) {
netdev_err(master, "error %d registering interface %s\n",
ret, slave_dev->name);
phy_disconnect(p->phy);
+ ds->ports[port] = NULL;
free_netdev(slave_dev);
- return NULL;
+ return ret;
}
netif_carrier_off(slave_dev);
- return slave_dev;
+ return 0;
+}
+
+static bool dsa_slave_dev_check(struct net_device *dev)
+{
+ return dev->netdev_ops == &dsa_slave_netdev_ops;
+}
+
+static int dsa_slave_master_changed(struct net_device *dev)
+{
+ struct net_device *master = netdev_master_upper_dev_get(dev);
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ int err = 0;
+
+ if (master && master->rtnl_link_ops &&
+ !strcmp(master->rtnl_link_ops->kind, "bridge"))
+ err = dsa_slave_bridge_port_join(dev, master);
+ else if (dsa_port_is_bridged(p))
+ err = dsa_slave_bridge_port_leave(dev);
+
+ return err;
+}
+
+int dsa_slave_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev;
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ dev = netdev_notifier_info_to_dev(ptr);
+ if (!dsa_slave_dev_check(dev))
+ goto out;
+
+ err = dsa_slave_master_changed(dev);
+ if (err)
+ netdev_warn(dev, "failed to reflect master change\n");
+
+ break;
+ }
+
+out:
+ return NOTIFY_DONE;
}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 238f38d21641..f3bad41d725f 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -104,7 +104,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
*/
if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
- memset(eth->h_dest, 0, ETH_ALEN);
+ eth_zero_addr(eth->h_dest);
return ETH_HLEN;
}
@@ -113,39 +113,6 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
EXPORT_SYMBOL(eth_header);
/**
- * eth_rebuild_header- rebuild the Ethernet MAC header.
- * @skb: socket buffer to update
- *
- * This is called after an ARP or IPV6 ndisc it's resolution on this
- * sk_buff. We now let protocol (ARP) fill in the other fields.
- *
- * This routine CANNOT use cached dst->neigh!
- * Really, it is used only when dst->neigh is wrong.
- */
-int eth_rebuild_header(struct sk_buff *skb)
-{
- struct ethhdr *eth = (struct ethhdr *)skb->data;
- struct net_device *dev = skb->dev;
-
- switch (eth->h_proto) {
-#ifdef CONFIG_INET
- case htons(ETH_P_IP):
- return arp_find(eth->h_dest, skb);
-#endif
- default:
- netdev_dbg(dev,
- "%s: unable to resolve type %X addresses.\n",
- dev->name, ntohs(eth->h_proto));
-
- memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(eth_rebuild_header);
-
-/**
* eth_get_headlen - determine the the length of header for an ethernet frame
* @data: pointer to start of frame
* @len: total length of frame
@@ -369,7 +336,6 @@ EXPORT_SYMBOL(eth_validate_addr);
const struct header_ops eth_header_ops ____cacheline_aligned = {
.create = eth_header,
.parse = eth_header_parse,
- .rebuild = eth_rebuild_header,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
};
@@ -391,7 +357,7 @@ void ether_setup(struct net_device *dev)
dev->flags = IFF_BROADCAST|IFF_MULTICAST;
dev->priv_flags |= IFF_TX_SKB_SHARING;
- memset(dev->broadcast, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(dev->broadcast);
}
EXPORT_SYMBOL(ether_setup);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 055fbb71ba6f..0ae5822ef944 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -113,7 +113,7 @@ static void lowpan_setup(struct net_device *dev)
{
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
- dev->type = ARPHRD_IEEE802154;
+ dev->type = ARPHRD_6LOWPAN;
/* Frame Control + Sequence Number + Address fields + Security Header */
dev->hard_header_len = 2 + 1 + 20 + 14;
dev->needed_tailroom = 2; /* FCS */
@@ -126,6 +126,7 @@ static void lowpan_setup(struct net_device *dev)
dev->header_ops = &lowpan_header_ops;
dev->ml_priv = &lowpan_mlme;
dev->destructor = free_netdev;
+ dev->features |= NETIF_F_NETNS_LOCAL;
}
static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -148,10 +149,11 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
pr_debug("adding new link\n");
- if (!tb[IFLA_LINK])
+ if (!tb[IFLA_LINK] ||
+ !net_eq(dev_net(dev), &init_net))
return -EINVAL;
/* find and hold real wpan device */
- real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
if (real_dev->type != ARPHRD_IEEE802154) {
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 05dab2957cd4..4adfd4d5471b 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -3,7 +3,9 @@ obj-$(CONFIG_IEEE802154_SOCKET) += ieee802154_socket.o
obj-y += 6lowpan/
ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \
- header_ops.o sysfs.o nl802154.o
+ header_ops.o sysfs.o nl802154.o trace.o
ieee802154_socket-y := socket.o
+CFLAGS_trace.o := -I$(src)
+
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index 18bc7e738507..2ee00e8a0308 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -25,6 +25,9 @@
#include "sysfs.h"
#include "core.h"
+/* name for sysfs, %d is appended */
+#define PHY_NAME "phy"
+
/* RCU-protected (and RTNL for writers) */
LIST_HEAD(cfg802154_rdev_list);
int cfg802154_rdev_list_generation;
@@ -122,7 +125,7 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
INIT_LIST_HEAD(&rdev->wpan_dev_list);
device_initialize(&rdev->wpan_phy.dev);
- dev_set_name(&rdev->wpan_phy.dev, "wpan-phy%d", rdev->wpan_phy_idx);
+ dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
rdev->wpan_phy.dev.class = &wpan_phy_class;
rdev->wpan_phy.dev.platform_data = rdev;
@@ -225,6 +228,7 @@ static int cfg802154_netdev_notifier_call(struct notifier_block *nb,
switch (state) {
/* TODO NETDEV_DEVTYPE */
case NETDEV_REGISTER:
+ dev->features |= NETIF_F_NETNS_LOCAL;
wpan_dev->identifier = ++rdev->wpan_dev_id;
list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list);
rdev->devlist_generation++;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 9105265920fe..2b4955d7aae5 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -76,7 +76,6 @@ nla_put_failure:
nlmsg_free(msg);
return -ENOBUFS;
}
-EXPORT_SYMBOL(ieee802154_nl_start_confirm);
static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
u32 seq, int flags, struct net_device *dev)
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 1b9d25f6e898..346c6665d25e 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -175,6 +175,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
int rc = -ENOBUFS;
struct net_device *dev;
int type = __IEEE802154_DEV_INVALID;
+ unsigned char name_assign_type;
pr_debug("%s\n", __func__);
@@ -190,8 +191,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1]
!= '\0')
return -EINVAL; /* phy name should be null-terminated */
+ name_assign_type = NET_NAME_USER;
} else {
devname = "wpan%d";
+ name_assign_type = NET_NAME_ENUM;
}
if (strlen(devname) >= IFNAMSIZ)
@@ -221,7 +224,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
}
dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname,
- type);
+ name_assign_type, type);
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
goto nla_put_failure;
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index a4daf91b8d0a..f3c12f6a4a39 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -589,7 +589,7 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
return rdev_add_virtual_intf(rdev,
nla_data(info->attrs[NL802154_ATTR_IFNAME]),
- type, extended_addr);
+ NET_NAME_USER, type, extended_addr);
}
static int nl802154_del_interface(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index 7c46732fad2b..7b5a9dd94fe5 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -4,13 +4,16 @@
#include <net/cfg802154.h>
#include "core.h"
+#include "trace.h"
static inline struct net_device *
rdev_add_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
- const char *name, int type)
+ const char *name,
+ unsigned char name_assign_type,
+ int type)
{
return rdev->ops->add_virtual_intf_deprecated(&rdev->wpan_phy, name,
- type);
+ name_assign_type, type);
}
static inline void
@@ -22,75 +25,131 @@ rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
static inline int
rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
+ unsigned char name_assign_type,
enum nl802154_iftype type, __le64 extended_addr)
{
- return rdev->ops->add_virtual_intf(&rdev->wpan_phy, name, type,
+ int ret;
+
+ trace_802154_rdev_add_virtual_intf(&rdev->wpan_phy, name, type,
extended_addr);
+ ret = rdev->ops->add_virtual_intf(&rdev->wpan_phy, name,
+ name_assign_type, type,
+ extended_addr);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
static inline int
rdev_del_virtual_intf(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev)
{
- return rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev);
+ int ret;
+
+ trace_802154_rdev_del_virtual_intf(&rdev->wpan_phy, wpan_dev);
+ ret = rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
static inline int
rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel)
{
- return rdev->ops->set_channel(&rdev->wpan_phy, page, channel);
+ int ret;
+
+ trace_802154_rdev_set_channel(&rdev->wpan_phy, page, channel);
+ ret = rdev->ops->set_channel(&rdev->wpan_phy, page, channel);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
static inline int
rdev_set_cca_mode(struct cfg802154_registered_device *rdev,
const struct wpan_phy_cca *cca)
{
- return rdev->ops->set_cca_mode(&rdev->wpan_phy, cca);
+ int ret;
+
+ trace_802154_rdev_set_cca_mode(&rdev->wpan_phy, cca);
+ ret = rdev->ops->set_cca_mode(&rdev->wpan_phy, cca);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
static inline int
rdev_set_pan_id(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, __le16 pan_id)
{
- return rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id);
+ int ret;
+
+ trace_802154_rdev_set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id);
+ ret = rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
static inline int
rdev_set_short_addr(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, __le16 short_addr)
{
- return rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr);
+ int ret;
+
+ trace_802154_rdev_set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr);
+ ret = rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
static inline int
rdev_set_backoff_exponent(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, u8 min_be, u8 max_be)
{
- return rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev,
+ int ret;
+
+ trace_802154_rdev_set_backoff_exponent(&rdev->wpan_phy, wpan_dev,
min_be, max_be);
+ ret = rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev,
+ min_be, max_be);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
static inline int
rdev_set_max_csma_backoffs(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, u8 max_csma_backoffs)
{
- return rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev,
- max_csma_backoffs);
+ int ret;
+
+ trace_802154_rdev_set_csma_backoffs(&rdev->wpan_phy, wpan_dev,
+ max_csma_backoffs);
+ ret = rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev,
+ max_csma_backoffs);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
static inline int
rdev_set_max_frame_retries(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, s8 max_frame_retries)
{
- return rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev,
+ int ret;
+
+ trace_802154_rdev_set_max_frame_retries(&rdev->wpan_phy, wpan_dev,
max_frame_retries);
+ ret = rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev,
+ max_frame_retries);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
static inline int
rdev_set_lbt_mode(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, bool mode)
{
- return rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode);
+ int ret;
+
+ trace_802154_rdev_set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode);
+ ret = rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
}
#endif /* __CFG802154_RDEV_OPS */
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 2878d8ca6d3b..b60c65f70346 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -98,12 +98,12 @@ static int ieee802154_sock_release(struct socket *sock)
return 0;
}
-static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int ieee802154_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
- return sk->sk_prot->sendmsg(iocb, sk, msg, len);
+ return sk->sk_prot->sendmsg(sk, msg, len);
}
static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
@@ -255,8 +255,7 @@ static int raw_disconnect(struct sock *sk, int flags)
return 0;
}
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t size)
+static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct net_device *dev;
unsigned int mtu;
@@ -327,8 +326,8 @@ out:
return err;
}
-static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len)
+static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int noblock, int flags, int *addr_len)
{
size_t copied = 0;
int err = -EOPNOTSUPP;
@@ -615,8 +614,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
return 0;
}
-static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t size)
+static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct net_device *dev;
unsigned int mtu;
@@ -715,9 +713,8 @@ out:
return err;
}
-static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
+static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int noblock, int flags, int *addr_len)
{
size_t copied = 0;
int err = -EOPNOTSUPP;
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index dff55c2d87f3..133b4280660c 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -48,49 +48,6 @@ static ssize_t name_show(struct device *dev,
}
static DEVICE_ATTR_RO(name);
-#define MASTER_SHOW_COMPLEX(name, format_string, args...) \
-static ssize_t name ## _show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \
- int ret; \
- \
- mutex_lock(&phy->pib_lock); \
- ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \
- mutex_unlock(&phy->pib_lock); \
- return ret; \
-} \
-static DEVICE_ATTR_RO(name)
-
-#define MASTER_SHOW(field, format_string) \
- MASTER_SHOW_COMPLEX(field, format_string, phy->field)
-
-MASTER_SHOW(current_channel, "%d");
-MASTER_SHOW(current_page, "%d");
-MASTER_SHOW(transmit_power, "%d +- 1 dB");
-MASTER_SHOW_COMPLEX(cca_mode, "%d", phy->cca.mode);
-
-static ssize_t channels_supported_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
- int ret;
- int i, len = 0;
-
- mutex_lock(&phy->pib_lock);
- for (i = 0; i < 32; i++) {
- ret = snprintf(buf + len, PAGE_SIZE - len,
- "%#09x\n", phy->channels_supported[i]);
- if (ret < 0)
- break;
- len += ret;
- }
- mutex_unlock(&phy->pib_lock);
- return len;
-}
-static DEVICE_ATTR_RO(channels_supported);
-
static void wpan_phy_release(struct device *dev)
{
struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
@@ -101,12 +58,6 @@ static void wpan_phy_release(struct device *dev)
static struct attribute *pmib_attrs[] = {
&dev_attr_index.attr,
&dev_attr_name.attr,
- /* below will be removed soon */
- &dev_attr_current_channel.attr,
- &dev_attr_current_page.attr,
- &dev_attr_channels_supported.attr,
- &dev_attr_transmit_power.attr,
- &dev_attr_cca_mode.attr,
NULL,
};
ATTRIBUTE_GROUPS(pmib);
diff --git a/net/ieee802154/trace.c b/net/ieee802154/trace.c
new file mode 100644
index 000000000000..95f997fad755
--- /dev/null
+++ b/net/ieee802154/trace.c
@@ -0,0 +1,7 @@
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
new file mode 100644
index 000000000000..5ac25eb6ed17
--- /dev/null
+++ b/net/ieee802154/trace.h
@@ -0,0 +1,247 @@
+/* Based on net/wireless/tracing.h */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cfg802154
+
+#if !defined(__RDEV_CFG802154_OPS_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __RDEV_CFG802154_OPS_TRACE
+
+#include <linux/tracepoint.h>
+
+#include <net/cfg802154.h>
+
+#define MAXNAME 32
+#define WPAN_PHY_ENTRY __array(char, wpan_phy_name, MAXNAME)
+#define WPAN_PHY_ASSIGN strlcpy(__entry->wpan_phy_name, \
+ wpan_phy_name(wpan_phy), \
+ MAXNAME)
+#define WPAN_PHY_PR_FMT "%s"
+#define WPAN_PHY_PR_ARG __entry->wpan_phy_name
+
+#define WPAN_DEV_ENTRY __field(u32, identifier)
+#define WPAN_DEV_ASSIGN (__entry->identifier) = (!IS_ERR_OR_NULL(wpan_dev) \
+ ? wpan_dev->identifier : 0)
+#define WPAN_DEV_PR_FMT "wpan_dev(%u)"
+#define WPAN_DEV_PR_ARG (__entry->identifier)
+
+#define WPAN_CCA_ENTRY __field(enum nl802154_cca_modes, cca_mode) \
+ __field(enum nl802154_cca_opts, cca_opt)
+#define WPAN_CCA_ASSIGN \
+ do { \
+ (__entry->cca_mode) = cca->mode; \
+ (__entry->cca_opt) = cca->opt; \
+ } while (0)
+#define WPAN_CCA_PR_FMT "cca_mode: %d, cca_opt: %d"
+#define WPAN_CCA_PR_ARG __entry->cca_mode, __entry->cca_opt
+
+#define BOOL_TO_STR(bo) (bo) ? "true" : "false"
+
+/*************************************************************
+ * rdev->ops traces *
+ *************************************************************/
+
+TRACE_EVENT(802154_rdev_add_virtual_intf,
+ TP_PROTO(struct wpan_phy *wpan_phy, char *name,
+ enum nl802154_iftype type, __le64 extended_addr),
+ TP_ARGS(wpan_phy, name, type, extended_addr),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ __string(vir_intf_name, name ? name : "<noname>")
+ __field(enum nl802154_iftype, type)
+ __field(__le64, extended_addr)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ __assign_str(vir_intf_name, name ? name : "<noname>");
+ __entry->type = type;
+ __entry->extended_addr = extended_addr;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", virtual intf name: %s, type: %d, ea %llx",
+ WPAN_PHY_PR_ARG, __get_str(vir_intf_name), __entry->type,
+ __le64_to_cpu(__entry->extended_addr))
+);
+
+TRACE_EVENT(802154_rdev_del_virtual_intf,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev),
+ TP_ARGS(wpan_phy, wpan_dev),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT, WPAN_PHY_PR_ARG,
+ WPAN_DEV_PR_ARG)
+);
+
+TRACE_EVENT(802154_rdev_set_channel,
+ TP_PROTO(struct wpan_phy *wpan_phy, u8 page, u8 channel),
+ TP_ARGS(wpan_phy, page, channel),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ __field(u8, page)
+ __field(u8, channel)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ __entry->page = page;
+ __entry->channel = channel;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", page: %d, channel: %d", WPAN_PHY_PR_ARG,
+ __entry->page, __entry->channel)
+);
+
+TRACE_EVENT(802154_rdev_set_cca_mode,
+ TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca),
+ TP_ARGS(wpan_phy, cca),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_CCA_ENTRY
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_CCA_ASSIGN;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_CCA_PR_FMT, WPAN_PHY_PR_ARG,
+ WPAN_CCA_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(802154_le16_template,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ __le16 le16arg),
+ TP_ARGS(wpan_phy, wpan_dev, le16arg),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ __field(__le16, le16arg)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ __entry->le16arg = le16arg;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", pan id: 0x%04x",
+ WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG,
+ __le16_to_cpu(__entry->le16arg))
+);
+
+DEFINE_EVENT(802154_le16_template, 802154_rdev_set_pan_id,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ __le16 le16arg),
+ TP_ARGS(wpan_phy, wpan_dev, le16arg)
+);
+
+DEFINE_EVENT_PRINT(802154_le16_template, 802154_rdev_set_short_addr,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ __le16 le16arg),
+ TP_ARGS(wpan_phy, wpan_dev, le16arg),
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", sa: 0x%04x",
+ WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG,
+ __le16_to_cpu(__entry->le16arg))
+);
+
+TRACE_EVENT(802154_rdev_set_backoff_exponent,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ u8 min_be, u8 max_be),
+ TP_ARGS(wpan_phy, wpan_dev, min_be, max_be),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ __field(u8, min_be)
+ __field(u8, max_be)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ __entry->min_be = min_be;
+ __entry->max_be = max_be;
+ ),
+
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+ ", min be: %d, max_be: %d", WPAN_PHY_PR_ARG,
+ WPAN_DEV_PR_ARG, __entry->min_be, __entry->max_be)
+);
+
+TRACE_EVENT(802154_rdev_set_csma_backoffs,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ u8 max_csma_backoffs),
+ TP_ARGS(wpan_phy, wpan_dev, max_csma_backoffs),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ __field(u8, max_csma_backoffs)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ __entry->max_csma_backoffs = max_csma_backoffs;
+ ),
+
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+ ", max csma backoffs: %d", WPAN_PHY_PR_ARG,
+ WPAN_DEV_PR_ARG, __entry->max_csma_backoffs)
+);
+
+TRACE_EVENT(802154_rdev_set_max_frame_retries,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ s8 max_frame_retries),
+ TP_ARGS(wpan_phy, wpan_dev, max_frame_retries),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ __field(s8, max_frame_retries)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ __entry->max_frame_retries = max_frame_retries;
+ ),
+
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+ ", max frame retries: %d", WPAN_PHY_PR_ARG,
+ WPAN_DEV_PR_ARG, __entry->max_frame_retries)
+);
+
+TRACE_EVENT(802154_rdev_set_lbt_mode,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ bool mode),
+ TP_ARGS(wpan_phy, wpan_dev, mode),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ __field(bool, mode)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ __entry->mode = mode;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+ ", lbt mode: %s", WPAN_PHY_PR_ARG,
+ WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
+);
+
+TRACE_EVENT(802154_rdev_return_int,
+ TP_PROTO(struct wpan_phy *wpan_phy, int ret),
+ TP_ARGS(wpan_phy, ret),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ __entry->ret = ret;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", returned: %d", WPAN_PHY_PR_ARG,
+ __entry->ret)
+);
+
+#endif /* !__RDEV_CFG802154_OPS_TRACE || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d2e49baaff63..8b47a4d79d04 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog)
* shutdown() (rather than close()).
*/
if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
- inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
+ !inet_csk(sk)->icsk_accept_queue.fastopenq) {
if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
err = fastopen_init_queue(sk, backlog);
else if ((sysctl_tcp_fastopen &
@@ -314,11 +314,11 @@ lookup_protocol:
answer_flags = answer->flags;
rcu_read_unlock();
- WARN_ON(answer_prot->slab == NULL);
+ WARN_ON(!answer_prot->slab);
err = -ENOBUFS;
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
- if (sk == NULL)
+ if (!sk)
goto out;
err = 0;
@@ -716,8 +716,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
}
EXPORT_SYMBOL(inet_getname);
-int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size)
+int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
@@ -728,7 +727,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
inet_autobind(sk))
return -EAGAIN;
- return sk->sk_prot->sendmsg(iocb, sk, msg, size);
+ return sk->sk_prot->sendmsg(sk, msg, size);
}
EXPORT_SYMBOL(inet_sendmsg);
@@ -750,8 +749,8 @@ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
}
EXPORT_SYMBOL(inet_sendpage);
-int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int flags)
+int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
int addr_len = 0;
@@ -759,7 +758,7 @@ int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
sock_rps_record_flow(sk);
- err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
flags & ~MSG_DONTWAIT, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
@@ -1270,7 +1269,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (udpfrag) {
iph->id = htons(id);
iph->frag_off = htons(offset >> 3);
- if (skb->next != NULL)
+ if (skb->next)
iph->frag_off |= htons(IP_MF);
offset += skb->len - nhoff - ihl;
} else {
@@ -1675,7 +1674,7 @@ static int __init inet_init(void)
struct list_head *r;
int rc = -EINVAL;
- BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
+ sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
rc = proto_register(&tcp_prot, 1);
if (rc)
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 205e1472aa78..933a92820d26 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -122,6 +122,7 @@
* Interface to generic neighbour cache.
*/
static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd);
+static bool arp_key_eq(const struct neighbour *n, const void *pkey);
static int arp_constructor(struct neighbour *neigh);
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -149,18 +150,12 @@ static const struct neigh_ops arp_direct_ops = {
.connected_output = neigh_direct_output,
};
-static const struct neigh_ops arp_broken_ops = {
- .family = AF_INET,
- .solicit = arp_solicit,
- .error_report = arp_error_report,
- .output = neigh_compat_output,
- .connected_output = neigh_compat_output,
-};
-
struct neigh_table arp_tbl = {
.family = AF_INET,
.key_len = 4,
+ .protocol = cpu_to_be16(ETH_P_IP),
.hash = arp_hash,
+ .key_eq = arp_key_eq,
.constructor = arp_constructor,
.proxy_redo = parp_redo,
.id = "arp_cache",
@@ -216,7 +211,12 @@ static u32 arp_hash(const void *pkey,
const struct net_device *dev,
__u32 *hash_rnd)
{
- return arp_hashfn(*(u32 *)pkey, dev, *hash_rnd);
+ return arp_hashfn(pkey, dev, hash_rnd);
+}
+
+static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
+{
+ return neigh_key_eq32(neigh, pkey);
}
static int arp_constructor(struct neighbour *neigh)
@@ -228,7 +228,7 @@ static int arp_constructor(struct neighbour *neigh)
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
- if (in_dev == NULL) {
+ if (!in_dev) {
rcu_read_unlock();
return -EINVAL;
}
@@ -260,35 +260,6 @@ static int arp_constructor(struct neighbour *neigh)
in old paradigm.
*/
-#if 1
- /* So... these "amateur" devices are hopeless.
- The only thing, that I can say now:
- It is very sad that we need to keep ugly obsolete
- code to make them happy.
-
- They should be moved to more reasonable state, now
- they use rebuild_header INSTEAD OF hard_start_xmit!!!
- Besides that, they are sort of out of date
- (a lot of redundant clones/copies, useless in 2.1),
- I wonder why people believe that they work.
- */
- switch (dev->type) {
- default:
- break;
- case ARPHRD_ROSE:
-#if IS_ENABLED(CONFIG_AX25)
- case ARPHRD_AX25:
-#if IS_ENABLED(CONFIG_NETROM)
- case ARPHRD_NETROM:
-#endif
- neigh->ops = &arp_broken_ops;
- neigh->output = neigh->ops->output;
- return 0;
-#else
- break;
-#endif
- }
-#endif
if (neigh->type == RTN_MULTICAST) {
neigh->nud_state = NUD_NOARP;
arp_mc_map(addr, neigh->ha, dev, 1);
@@ -433,71 +404,6 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
return flag;
}
-/* OBSOLETE FUNCTIONS */
-
-/*
- * Find an arp mapping in the cache. If not found, post a request.
- *
- * It is very UGLY routine: it DOES NOT use skb->dst->neighbour,
- * even if it exists. It is supposed that skb->dev was mangled
- * by a virtual device (eql, shaper). Nobody but broken devices
- * is allowed to use this function, it is scheduled to be removed. --ANK
- */
-
-static int arp_set_predefined(int addr_hint, unsigned char *haddr,
- __be32 paddr, struct net_device *dev)
-{
- switch (addr_hint) {
- case RTN_LOCAL:
- pr_debug("arp called for own IP address\n");
- memcpy(haddr, dev->dev_addr, dev->addr_len);
- return 1;
- case RTN_MULTICAST:
- arp_mc_map(paddr, haddr, dev, 1);
- return 1;
- case RTN_BROADCAST:
- memcpy(haddr, dev->broadcast, dev->addr_len);
- return 1;
- }
- return 0;
-}
-
-
-int arp_find(unsigned char *haddr, struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- __be32 paddr;
- struct neighbour *n;
-
- if (!skb_dst(skb)) {
- pr_debug("arp_find is called with dst==NULL\n");
- kfree_skb(skb);
- return 1;
- }
-
- paddr = rt_nexthop(skb_rtable(skb), ip_hdr(skb)->daddr);
- if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr,
- paddr, dev))
- return 0;
-
- n = __neigh_lookup(&arp_tbl, &paddr, dev, 1);
-
- if (n) {
- n->used = jiffies;
- if (n->nud_state & NUD_VALID || neigh_event_send(n, skb) == 0) {
- neigh_ha_snapshot(haddr, n, dev);
- neigh_release(n);
- return 0;
- }
- neigh_release(n);
- } else
- kfree_skb(skb);
- return 1;
-}
-EXPORT_SYMBOL(arp_find);
-
-/* END OF OBSOLETE FUNCTIONS */
-
/*
* Check if we can use proxy ARP for this path
*/
@@ -569,7 +475,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev,
*/
/*
- * Create an arp packet. If (dest_hw == NULL), we create a broadcast
+ * Create an arp packet. If dest_hw is not set, we create a broadcast
* message.
*/
struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
@@ -589,7 +495,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
*/
skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
return NULL;
skb_reserve(skb, hlen);
@@ -597,9 +503,9 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
skb->dev = dev;
skb->protocol = htons(ETH_P_ARP);
- if (src_hw == NULL)
+ if (!src_hw)
src_hw = dev->dev_addr;
- if (dest_hw == NULL)
+ if (!dest_hw)
dest_hw = dev->broadcast;
/*
@@ -663,7 +569,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
break;
#endif
default:
- if (target_hw != NULL)
+ if (target_hw)
memcpy(arp_ptr, target_hw, dev->addr_len);
else
memset(arp_ptr, 0, dev->addr_len);
@@ -685,7 +591,8 @@ EXPORT_SYMBOL(arp_create);
void arp_xmit(struct sk_buff *skb)
{
/* Send it off, maybe filter it using firewalling first. */
- NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit);
+ NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, NULL, skb,
+ NULL, skb->dev, dev_queue_xmit_sk);
}
EXPORT_SYMBOL(arp_xmit);
@@ -708,7 +615,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
skb = arp_create(type, ptype, dest_ip, dev, src_ip,
dest_hw, src_hw, target_hw);
- if (skb == NULL)
+ if (!skb)
return;
arp_xmit(skb);
@@ -719,7 +626,7 @@ EXPORT_SYMBOL(arp_send);
* Process an arp request.
*/
-static int arp_process(struct sk_buff *skb)
+static int arp_process(struct sock *sk, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -738,7 +645,7 @@ static int arp_process(struct sk_buff *skb)
* is ARP'able.
*/
- if (in_dev == NULL)
+ if (!in_dev)
goto out;
arp = arp_hdr(skb);
@@ -902,7 +809,7 @@ static int arp_process(struct sk_buff *skb)
is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
inet_addr_type(net, sip) == RTN_UNICAST;
- if (n == NULL &&
+ if (!n &&
((arp->ar_op == htons(ARPOP_REPLY) &&
inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
@@ -940,7 +847,7 @@ out:
static void parp_redo(struct sk_buff *skb)
{
- arp_process(skb);
+ arp_process(NULL, skb);
}
@@ -973,7 +880,8 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
- return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
+ return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, NULL, skb,
+ dev, NULL, arp_process);
consumeskb:
consume_skb(skb);
@@ -994,7 +902,7 @@ out_of_mem:
static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
{
- if (dev == NULL) {
+ if (!dev) {
IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
return 0;
}
@@ -1020,7 +928,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
return -ENODEV;
}
if (mask) {
- if (pneigh_lookup(&arp_tbl, net, &ip, dev, 1) == NULL)
+ if (!pneigh_lookup(&arp_tbl, net, &ip, dev, 1))
return -ENOBUFS;
return 0;
}
@@ -1041,7 +949,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
if (r->arp_flags & ATF_PERM)
r->arp_flags |= ATF_COM;
- if (dev == NULL) {
+ if (!dev) {
struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
if (IS_ERR(rt))
@@ -1161,7 +1069,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
return arp_req_delete_public(net, r, dev);
ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
- if (dev == NULL) {
+ if (!dev) {
struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -1210,7 +1118,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (r.arp_dev[0]) {
err = -ENODEV;
dev = __dev_get_by_name(net, r.arp_dev);
- if (dev == NULL)
+ if (!dev)
goto out;
/* Mmmm... It is wrong... ARPHRD_NETROM==0 */
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index e361ea6f3fc8..bdb2a07ec363 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -255,7 +255,7 @@ static int __init cipso_v4_cache_init(void)
cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
sizeof(struct cipso_v4_map_cache_bkt),
GFP_KERNEL);
- if (cipso_v4_cache == NULL)
+ if (!cipso_v4_cache)
return -ENOMEM;
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
@@ -339,7 +339,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
secattr->cache = entry->lsm_data;
secattr->flags |= NETLBL_SECATTR_CACHE;
secattr->type = NETLBL_NLTYPE_CIPSOV4;
- if (prev_entry == NULL) {
+ if (!prev_entry) {
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
}
@@ -393,10 +393,10 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,
cipso_ptr_len = cipso_ptr[1];
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (entry == NULL)
+ if (!entry)
return -ENOMEM;
entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
- if (entry->key == NULL) {
+ if (!entry->key) {
ret_val = -ENOMEM;
goto cache_add_failure;
}
@@ -502,7 +502,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
atomic_set(&doi_def->refcount, 1);
spin_lock(&cipso_v4_doi_list_lock);
- if (cipso_v4_doi_search(doi_def->doi) != NULL) {
+ if (cipso_v4_doi_search(doi_def->doi)) {
spin_unlock(&cipso_v4_doi_list_lock);
ret_val = -EEXIST;
goto doi_add_return;
@@ -513,7 +513,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
doi_add_return:
audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
- if (audit_buf != NULL) {
+ if (audit_buf) {
const char *type_str;
switch (doi_type) {
case CIPSO_V4_MAP_TRANS:
@@ -547,7 +547,7 @@ doi_add_return:
*/
void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
{
- if (doi_def == NULL)
+ if (!doi_def)
return;
switch (doi_def->type) {
@@ -598,7 +598,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
spin_lock(&cipso_v4_doi_list_lock);
doi_def = cipso_v4_doi_search(doi);
- if (doi_def == NULL) {
+ if (!doi_def) {
spin_unlock(&cipso_v4_doi_list_lock);
ret_val = -ENOENT;
goto doi_remove_return;
@@ -617,7 +617,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
doi_remove_return:
audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
- if (audit_buf != NULL) {
+ if (audit_buf) {
audit_log_format(audit_buf,
" cipso_doi=%u res=%u",
doi, ret_val == 0 ? 1 : 0);
@@ -644,7 +644,7 @@ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
rcu_read_lock();
doi_def = cipso_v4_doi_search(doi);
- if (doi_def == NULL)
+ if (!doi_def)
goto doi_getdef_return;
if (!atomic_inc_not_zero(&doi_def->refcount))
doi_def = NULL;
@@ -664,7 +664,7 @@ doi_getdef_return:
*/
void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
{
- if (doi_def == NULL)
+ if (!doi_def)
return;
if (!atomic_dec_and_test(&doi_def->refcount))
@@ -1642,7 +1642,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
rcu_read_lock();
doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
- if (doi_def == NULL) {
+ if (!doi_def) {
err_offset = 2;
goto validate_return_locked;
}
@@ -1736,7 +1736,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
* not the loopback device drop the packet. Further,
* there is no legitimate reason for setting this from
* userspace so reject it if skb is NULL. */
- if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
+ if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) {
err_offset = opt_iter;
goto validate_return_locked;
}
@@ -1897,7 +1897,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
* defined yet but it is not a problem as the only users of these
* "lite" PF_INET sockets are functions which do an accept() call
* afterwards so we will label the socket as part of the accept(). */
- if (sk == NULL)
+ if (!sk)
return 0;
/* We allocate the maximum CIPSO option size here so we are probably
@@ -1905,7 +1905,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
* on and after all we are only talking about 40 bytes. */
buf_len = CIPSO_V4_OPT_LEN_MAX;
buf = kmalloc(buf_len, GFP_ATOMIC);
- if (buf == NULL) {
+ if (!buf) {
ret_val = -ENOMEM;
goto socket_setattr_failure;
}
@@ -1921,7 +1921,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
* set the IPOPT_CIPSO option. */
opt_len = (buf_len + 3) & ~3;
opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
- if (opt == NULL) {
+ if (!opt) {
ret_val = -ENOMEM;
goto socket_setattr_failure;
}
@@ -1981,7 +1981,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
* on and after all we are only talking about 40 bytes. */
buf_len = CIPSO_V4_OPT_LEN_MAX;
buf = kmalloc(buf_len, GFP_ATOMIC);
- if (buf == NULL) {
+ if (!buf) {
ret_val = -ENOMEM;
goto req_setattr_failure;
}
@@ -1997,7 +1997,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
* set the IPOPT_CIPSO option. */
opt_len = (buf_len + 3) & ~3;
opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
- if (opt == NULL) {
+ if (!opt) {
ret_val = -ENOMEM;
goto req_setattr_failure;
}
@@ -2102,7 +2102,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
sk_inet = inet_sk(sk);
opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
- if (opt == NULL || opt->opt.cipso == 0)
+ if (!opt || opt->opt.cipso == 0)
return;
hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
@@ -2128,7 +2128,7 @@ void cipso_v4_req_delattr(struct request_sock *req)
req_inet = inet_rsk(req);
opt = req_inet->opt;
- if (opt == NULL || opt->opt.cipso == 0)
+ if (!opt || opt->opt.cipso == 0)
return;
cipso_v4_delopt(&req_inet->opt);
@@ -2157,7 +2157,7 @@ int cipso_v4_getattr(const unsigned char *cipso,
doi = get_unaligned_be32(&cipso[2]);
rcu_read_lock();
doi_def = cipso_v4_doi_search(doi);
- if (doi_def == NULL)
+ if (!doi_def)
goto getattr_return;
/* XXX - This code assumes only one tag per CIPSO option which isn't
* really a good assumption to make but since we only support the MAC
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 3a8985c94581..419d23c53ec7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -107,7 +107,7 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
-static u32 inet_addr_hash(struct net *net, __be32 addr)
+static u32 inet_addr_hash(const struct net *net, __be32 addr)
{
u32 val = (__force u32) addr ^ net_hash_mix(net);
@@ -548,6 +548,26 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
return NULL;
}
+static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
+{
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ifa->ifa_address,
+ .imr_ifindex = ifa->ifa_dev->dev->ifindex,
+ };
+ int ret;
+
+ ASSERT_RTNL();
+
+ lock_sock(sk);
+ if (join)
+ ret = ip_mc_join_group(sk, &mreq);
+ else
+ ret = ip_mc_leave_group(sk, &mreq);
+ release_sock(sk);
+
+ return ret;
+}
+
static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
@@ -565,7 +585,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
ifm = nlmsg_data(nlh);
in_dev = inetdev_by_index(net, ifm->ifa_index);
- if (in_dev == NULL) {
+ if (!in_dev) {
err = -ENODEV;
goto errout;
}
@@ -573,7 +593,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
ifap = &ifa->ifa_next) {
if (tb[IFA_LOCAL] &&
- ifa->ifa_local != nla_get_be32(tb[IFA_LOCAL]))
+ ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
continue;
if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
@@ -581,9 +601,11 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
if (tb[IFA_ADDRESS] &&
(ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
- !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa)))
+ !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
continue;
+ if (ipv4_is_multicast(ifa->ifa_address))
+ ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
return 0;
}
@@ -733,21 +755,21 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
ifm = nlmsg_data(nlh);
err = -EINVAL;
- if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL)
+ if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
goto errout;
dev = __dev_get_by_index(net, ifm->ifa_index);
err = -ENODEV;
- if (dev == NULL)
+ if (!dev)
goto errout;
in_dev = __in_dev_get_rtnl(dev);
err = -ENOBUFS;
- if (in_dev == NULL)
+ if (!in_dev)
goto errout;
ifa = inet_alloc_ifa();
- if (ifa == NULL)
+ if (!ifa)
/*
* A potential indev allocation can be left alive, it stays
* assigned to its device and is destroy with it.
@@ -758,7 +780,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
neigh_parms_data_state_setall(in_dev->arp_parms);
in_dev_hold(in_dev);
- if (tb[IFA_ADDRESS] == NULL)
+ if (!tb[IFA_ADDRESS])
tb[IFA_ADDRESS] = tb[IFA_LOCAL];
INIT_HLIST_NODE(&ifa->hash);
@@ -769,11 +791,11 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
ifa->ifa_scope = ifm->ifa_scope;
ifa->ifa_dev = in_dev;
- ifa->ifa_local = nla_get_be32(tb[IFA_LOCAL]);
- ifa->ifa_address = nla_get_be32(tb[IFA_ADDRESS]);
+ ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
+ ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
if (tb[IFA_BROADCAST])
- ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]);
+ ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
if (tb[IFA_LABEL])
nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
@@ -838,6 +860,15 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
* userspace already relies on not having to provide this.
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
+ if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
+ int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
+ true, ifa);
+
+ if (ret < 0) {
+ inet_free_ifa(ifa);
+ return ret;
+ }
+ }
return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
} else {
inet_free_ifa(ifa);
@@ -1259,7 +1290,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
__be32 addr = 0;
struct net_device *dev;
- if (in_dev != NULL)
+ if (in_dev)
return confirm_addr_indev(in_dev, dst, local, scope);
rcu_read_lock();
@@ -1309,7 +1340,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
if (named++ == 0)
goto skip;
dot = strchr(old, ':');
- if (dot == NULL) {
+ if (!dot) {
sprintf(old, ":%d", named);
dot = old;
}
@@ -1478,7 +1509,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
u32 preferred, valid;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
ifm = nlmsg_data(nlh);
@@ -1510,11 +1541,11 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
valid = INFINITY_LIFE_TIME;
}
if ((ifa->ifa_address &&
- nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+ nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
(ifa->ifa_local &&
- nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) ||
+ nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
(ifa->ifa_broadcast &&
- nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
+ nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
(ifa->ifa_label[0] &&
nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
@@ -1597,7 +1628,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
net = dev_net(ifa->ifa_dev->dev);
skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
@@ -1634,7 +1665,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
return -ENODATA;
nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
- if (nla == NULL)
+ if (!nla)
return -EMSGSIZE;
for (i = 0; i < IPV4_DEVCONF_MAX; i++)
@@ -1723,7 +1754,7 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
ncm = nlmsg_data(nlh);
@@ -1765,7 +1796,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
int err = -ENOBUFS;
skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
@@ -1822,10 +1853,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
break;
default:
dev = __dev_get_by_index(net, ifindex);
- if (dev == NULL)
+ if (!dev)
goto errout;
in_dev = __in_dev_get_rtnl(dev);
- if (in_dev == NULL)
+ if (!in_dev)
goto errout;
devconf = &in_dev->cnf;
break;
@@ -1833,7 +1864,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
err = -ENOBUFS;
skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = inet_netconf_fill_devconf(skb, ifindex, devconf,
@@ -2184,7 +2215,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
{
struct devinet_sysctl_table *t = cnf->sysctl;
- if (t == NULL)
+ if (!t)
return;
cnf->sysctl = NULL;
@@ -2245,16 +2276,16 @@ static __net_init int devinet_init_net(struct net *net)
if (!net_eq(net, &init_net)) {
all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
- if (all == NULL)
+ if (!all)
goto err_alloc_all;
dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
- if (dflt == NULL)
+ if (!dflt)
goto err_alloc_dflt;
#ifdef CONFIG_SYSCTL
tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
- if (tbl == NULL)
+ if (!tbl)
goto err_alloc_ctl;
tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
@@ -2274,7 +2305,7 @@ static __net_init int devinet_init_net(struct net *net)
err = -ENOMEM;
forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
- if (forw_hdr == NULL)
+ if (!forw_hdr)
goto err_reg_ctl;
net->ipv4.forw_hdr = forw_hdr;
#endif
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 60173d4d3a0e..421a80b09b62 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -553,7 +553,7 @@ static int esp_init_authenc(struct xfrm_state *x)
int err;
err = -EINVAL;
- if (x->ealg == NULL)
+ if (!x->ealg)
goto error;
err = -ENAMETOOLONG;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 23b9b3e86f4c..872494e6e6eb 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -52,12 +52,12 @@ static int __net_init fib4_rules_init(struct net *net)
{
struct fib_table *local_table, *main_table;
- local_table = fib_trie_table(RT_TABLE_LOCAL);
- if (local_table == NULL)
+ main_table = fib_trie_table(RT_TABLE_MAIN, NULL);
+ if (!main_table)
return -ENOMEM;
- main_table = fib_trie_table(RT_TABLE_MAIN);
- if (main_table == NULL)
+ local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
+ if (!local_table)
goto fail;
hlist_add_head_rcu(&local_table->tb_hlist,
@@ -67,14 +67,14 @@ static int __net_init fib4_rules_init(struct net *net)
return 0;
fail:
- fib_free_table(local_table);
+ fib_free_table(main_table);
return -ENOMEM;
}
#else
struct fib_table *fib_new_table(struct net *net, u32 id)
{
- struct fib_table *tb;
+ struct fib_table *tb, *alias = NULL;
unsigned int h;
if (id == 0)
@@ -83,23 +83,23 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
if (tb)
return tb;
- tb = fib_trie_table(id);
+ if (id == RT_TABLE_LOCAL)
+ alias = fib_new_table(net, RT_TABLE_MAIN);
+
+ tb = fib_trie_table(id, alias);
if (!tb)
return NULL;
switch (id) {
case RT_TABLE_LOCAL:
- net->ipv4.fib_local = tb;
+ rcu_assign_pointer(net->ipv4.fib_local, tb);
break;
-
case RT_TABLE_MAIN:
- net->ipv4.fib_main = tb;
+ rcu_assign_pointer(net->ipv4.fib_main, tb);
break;
-
case RT_TABLE_DEFAULT:
- net->ipv4.fib_default = tb;
+ rcu_assign_pointer(net->ipv4.fib_default, tb);
break;
-
default:
break;
}
@@ -129,16 +129,62 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
+static void fib_replace_table(struct net *net, struct fib_table *old,
+ struct fib_table *new)
+{
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ switch (new->tb_id) {
+ case RT_TABLE_LOCAL:
+ rcu_assign_pointer(net->ipv4.fib_local, new);
+ break;
+ case RT_TABLE_MAIN:
+ rcu_assign_pointer(net->ipv4.fib_main, new);
+ break;
+ case RT_TABLE_DEFAULT:
+ rcu_assign_pointer(net->ipv4.fib_default, new);
+ break;
+ default:
+ break;
+ }
+
+#endif
+ /* replace the old table in the hlist */
+ hlist_replace_rcu(&old->tb_hlist, &new->tb_hlist);
+}
+
+int fib_unmerge(struct net *net)
+{
+ struct fib_table *old, *new;
+
+ /* attempt to fetch local table if it has been allocated */
+ old = fib_get_table(net, RT_TABLE_LOCAL);
+ if (!old)
+ return 0;
+
+ new = fib_trie_unmerge(old);
+ if (!new)
+ return -ENOMEM;
+
+ /* replace merged table with clean table */
+ if (new != old) {
+ fib_replace_table(net, old, new);
+ fib_free_table(old);
+ }
+
+ return 0;
+}
+
static void fib_flush(struct net *net)
{
int flushed = 0;
- struct fib_table *tb;
- struct hlist_head *head;
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
- head = &net->ipv4.fib_table_hash[h];
- hlist_for_each_entry(tb, head, tb_hlist)
+ struct hlist_head *head = &net->ipv4.fib_table_hash[h];
+ struct hlist_node *tmp;
+ struct fib_table *tb;
+
+ hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
flushed += fib_table_flush(tb);
}
@@ -146,6 +192,19 @@ static void fib_flush(struct net *net)
rt_cache_flush(net);
}
+void fib_flush_external(struct net *net)
+{
+ struct fib_table *tb;
+ struct hlist_head *head;
+ unsigned int h;
+
+ for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
+ head = &net->ipv4.fib_table_hash[h];
+ hlist_for_each_entry(tb, head, tb_hlist)
+ fib_table_flush_external(tb);
+ }
+}
+
/*
* Find address type as if only "dev" was present in the system. If
* on_dev is NULL then all interfaces are taken into consideration.
@@ -427,7 +486,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
if (strcmp(ifa->ifa_label, devname) == 0)
break;
- if (ifa == NULL)
+ if (!ifa)
return -ENODEV;
cfg->fc_prefsrc = ifa->ifa_local;
}
@@ -455,7 +514,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
int len = 0;
mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
- if (mx == NULL)
+ if (!mx)
return -ENOMEM;
if (rt->rt_flags & RTF_MTU)
@@ -617,7 +676,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
goto errout;
tb = fib_get_table(net, cfg.fc_table);
- if (tb == NULL) {
+ if (!tb) {
err = -ESRCH;
goto errout;
}
@@ -639,7 +698,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
goto errout;
tb = fib_new_table(net, cfg.fc_table);
- if (tb == NULL) {
+ if (!tb) {
err = -ENOBUFS;
goto errout;
}
@@ -665,10 +724,12 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
s_h = cb->args[0];
s_e = cb->args[1];
+ rcu_read_lock();
+
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv4.fib_table_hash[h];
- hlist_for_each_entry(tb, head, tb_hlist) {
+ hlist_for_each_entry_rcu(tb, head, tb_hlist) {
if (e < s_e)
goto next;
if (dumped)
@@ -682,6 +743,8 @@ next:
}
}
out:
+ rcu_read_unlock();
+
cb->args[1] = e;
cb->args[0] = h;
@@ -716,7 +779,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad
else
tb = fib_new_table(net, RT_TABLE_LOCAL);
- if (tb == NULL)
+ if (!tb)
return;
cfg.fc_table = tb->tb_id;
@@ -743,7 +806,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, prefix, mask);
- if (prim == NULL) {
+ if (!prim) {
pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
@@ -797,7 +860,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
- if (prim == NULL) {
+ if (!prim) {
pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
@@ -967,7 +1030,7 @@ static void nl_fib_input(struct sk_buff *skb)
return;
skb = netlink_skb_clone(skb, GFP_KERNEL);
- if (skb == NULL)
+ if (!skb)
return;
nlh = nlmsg_hdr(skb);
@@ -988,7 +1051,7 @@ static int __net_init nl_fib_lookup_init(struct net *net)
};
sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
- if (sk == NULL)
+ if (!sk)
return -EAFNOSUPPORT;
net->ipv4.fibnl = sk;
return 0;
@@ -1026,7 +1089,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
case NETDEV_DOWN:
fib_del_ifaddr(ifa, NULL);
atomic_inc(&net->ipv4.dev_addr_genid);
- if (ifa->ifa_dev->ifa_list == NULL) {
+ if (!ifa->ifa_dev->ifa_list) {
/* Last address was deleted from this interface.
* Disable IP.
*/
@@ -1094,7 +1157,7 @@ static int __net_init ip_fib_net_init(struct net *net)
size = max_t(size_t, size, L1_CACHE_BYTES);
net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
- if (net->ipv4.fib_table_hash == NULL)
+ if (!net->ipv4.fib_table_hash)
return -ENOMEM;
err = fib4_rules_init(net);
@@ -1113,20 +1176,25 @@ static void ip_fib_net_exit(struct net *net)
rtnl_lock();
#ifdef CONFIG_IP_MULTIPLE_TABLES
- fib4_rules_exit(net);
+ RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
+ RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
+ RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
#endif
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
- struct fib_table *tb;
- struct hlist_head *head;
+ struct hlist_head *head = &net->ipv4.fib_table_hash[i];
struct hlist_node *tmp;
+ struct fib_table *tb;
- head = &net->ipv4.fib_table_hash[i];
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
hlist_del(&tb->tb_hlist);
fib_table_flush(tb);
fib_free_table(tb);
}
}
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ fib4_rules_exit(net);
+#endif
rtnl_unlock();
kfree(net->ipv4.fib_table_hash);
}
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index 825981b1049a..c6211ed60b03 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -6,11 +6,13 @@
#include <net/ip_fib.h>
struct fib_alias {
- struct list_head fa_list;
+ struct hlist_node fa_list;
struct fib_info *fa_info;
u8 fa_tos;
u8 fa_type;
u8 fa_state;
+ u8 fa_slen;
+ u32 tb_id;
struct rcu_head rcu;
};
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index d3db718be51d..56151982f74e 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -153,7 +153,7 @@ static struct fib_table *fib_empty_table(struct net *net)
u32 id;
for (id = 1; id <= RT_TABLE_MAX; id++)
- if (fib_get_table(net, id) == NULL)
+ if (!fib_get_table(net, id))
return fib_new_table(net, id);
return NULL;
}
@@ -174,12 +174,17 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
if (frh->tos & ~IPTOS_TOS_MASK)
goto errout;
+ /* split local/main if they are not already split */
+ err = fib_unmerge(net);
+ if (err)
+ goto errout;
+
if (rule->table == RT_TABLE_UNSPEC) {
if (rule->action == FR_ACT_TO_TBL) {
struct fib_table *table;
table = fib_empty_table(net);
- if (table == NULL) {
+ if (!table) {
err = -ENOBUFS;
goto errout;
}
@@ -189,10 +194,10 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
}
if (frh->src_len)
- rule4->src = nla_get_be32(tb[FRA_SRC]);
+ rule4->src = nla_get_in_addr(tb[FRA_SRC]);
if (frh->dst_len)
- rule4->dst = nla_get_be32(tb[FRA_DST]);
+ rule4->dst = nla_get_in_addr(tb[FRA_DST]);
#ifdef CONFIG_IP_ROUTE_CLASSID
if (tb[FRA_FLOW]) {
@@ -209,21 +214,31 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
rule4->tos = frh->tos;
net->ipv4.fib_has_custom_rules = true;
+ fib_flush_external(rule->fr_net);
+
err = 0;
errout:
return err;
}
-static void fib4_rule_delete(struct fib_rule *rule)
+static int fib4_rule_delete(struct fib_rule *rule)
{
struct net *net = rule->fr_net;
-#ifdef CONFIG_IP_ROUTE_CLASSID
- struct fib4_rule *rule4 = (struct fib4_rule *) rule;
+ int err;
- if (rule4->tclassid)
+ /* split local/main if they are not already split */
+ err = fib_unmerge(net);
+ if (err)
+ goto errout;
+
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ if (((struct fib4_rule *)rule)->tclassid)
net->ipv4.fib_num_tclassid_users--;
#endif
net->ipv4.fib_has_custom_rules = true;
+ fib_flush_external(rule->fr_net);
+errout:
+ return err;
}
static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
@@ -245,10 +260,10 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
return 0;
#endif
- if (frh->src_len && (rule4->src != nla_get_be32(tb[FRA_SRC])))
+ if (frh->src_len && (rule4->src != nla_get_in_addr(tb[FRA_SRC])))
return 0;
- if (frh->dst_len && (rule4->dst != nla_get_be32(tb[FRA_DST])))
+ if (frh->dst_len && (rule4->dst != nla_get_in_addr(tb[FRA_DST])))
return 0;
return 1;
@@ -264,9 +279,9 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
frh->tos = rule4->tos;
if ((rule4->dst_len &&
- nla_put_be32(skb, FRA_DST, rule4->dst)) ||
+ nla_put_in_addr(skb, FRA_DST, rule4->dst)) ||
(rule4->src_len &&
- nla_put_be32(skb, FRA_SRC, rule4->src)))
+ nla_put_in_addr(skb, FRA_SRC, rule4->src)))
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (rule4->tclassid &&
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 1e2090ea663e..8d695b6659c7 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -213,7 +213,6 @@ static void free_fib_info_rcu(struct rcu_head *head)
rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi);
- release_net(fi->fib_net);
if (fi->fib_metrics != (u32 *) dst_default_metrics)
kfree(fi->fib_metrics);
kfree(fi);
@@ -391,7 +390,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
int err = -ENOBUFS;
skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = fib_dump_info(skb, info->portid, seq, event, tb_id,
@@ -469,7 +468,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
- nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
+ nexthop_nh->nh_gw = nla ? nla_get_in_addr(nla) : 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
@@ -504,7 +503,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (cfg->fc_mp == NULL)
+ if (!cfg->fc_mp)
return 0;
rtnh = cfg->fc_mp;
@@ -524,7 +523,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
- if (nla && nla_get_be32(nla) != nh->nh_gw)
+ if (nla && nla_get_in_addr(nla) != nh->nh_gw)
return 1;
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
@@ -647,7 +646,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
rcu_read_lock();
err = -ENODEV;
in_dev = inetdev_by_index(net, nh->nh_oif);
- if (in_dev == NULL)
+ if (!in_dev)
goto out;
err = -ENETDOWN;
if (!(in_dev->dev->flags & IFF_UP))
@@ -804,7 +803,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
}
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
- if (fi == NULL)
+ if (!fi)
goto failure;
fib_info_cnt++;
if (cfg->fc_mx) {
@@ -814,7 +813,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
} else
fi->fib_metrics = (u32 *) dst_default_metrics;
- fi->fib_net = hold_net(net);
+ fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol;
fi->fib_scope = cfg->fc_scope;
fi->fib_flags = cfg->fc_flags;
@@ -922,7 +921,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
nh->nh_scope = RT_SCOPE_NOWHERE;
nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
err = -ENODEV;
- if (nh->nh_dev == NULL)
+ if (!nh->nh_dev)
goto failure;
} else {
change_nexthops(fi) {
@@ -996,7 +995,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
struct rtmsg *rtm;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
@@ -1016,7 +1015,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
rtm->rtm_protocol = fi->fib_protocol;
if (rtm->rtm_dst_len &&
- nla_put_be32(skb, RTA_DST, dst))
+ nla_put_in_addr(skb, RTA_DST, dst))
goto nla_put_failure;
if (fi->fib_priority &&
nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
@@ -1025,11 +1024,11 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
goto nla_put_failure;
if (fi->fib_prefsrc &&
- nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
+ nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
goto nla_put_failure;
if (fi->fib_nhs == 1) {
if (fi->fib_nh->nh_gw &&
- nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
+ nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
goto nla_put_failure;
if (fi->fib_nh->nh_oif &&
nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
@@ -1046,12 +1045,12 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
struct nlattr *mp;
mp = nla_nest_start(skb, RTA_MULTIPATH);
- if (mp == NULL)
+ if (!mp)
goto nla_put_failure;
for_nexthops(fi) {
rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
- if (rtnh == NULL)
+ if (!rtnh)
goto nla_put_failure;
rtnh->rtnh_flags = nh->nh_flags & 0xFF;
@@ -1059,7 +1058,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
rtnh->rtnh_ifindex = nh->nh_oif;
if (nh->nh_gw &&
- nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
+ nla_put_in_addr(skb, RTA_GATEWAY, nh->nh_gw))
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (nh->nh_tclassid &&
@@ -1094,7 +1093,7 @@ int fib_sync_down_addr(struct net *net, __be32 local)
struct hlist_head *head = &fib_info_laddrhash[hash];
struct fib_info *fi;
- if (fib_info_laddrhash == NULL || local == 0)
+ if (!fib_info_laddrhash || local == 0)
return 0;
hlist_for_each_entry(fi, head, fib_lhash) {
@@ -1163,12 +1162,12 @@ int fib_sync_down_dev(struct net_device *dev, int force)
void fib_select_default(struct fib_result *res)
{
struct fib_info *fi = NULL, *last_resort = NULL;
- struct list_head *fa_head = res->fa_head;
+ struct hlist_head *fa_head = res->fa_head;
struct fib_table *tb = res->table;
int order = -1, last_idx = -1;
struct fib_alias *fa;
- list_for_each_entry_rcu(fa, fa_head, fa_list) {
+ hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
struct fib_info *next_fi = fa->fa_info;
if (next_fi->fib_scope != res->scope ||
@@ -1183,7 +1182,7 @@ void fib_select_default(struct fib_result *res)
fib_alias_accessed(fa);
- if (fi == NULL) {
+ if (!fi) {
if (next_fi != res->fi)
break;
} else if (!fib_detect_death(fi, order, &last_resort,
@@ -1196,7 +1195,7 @@ void fib_select_default(struct fib_result *res)
order++;
}
- if (order <= 0 || fi == NULL) {
+ if (order <= 0 || !fi) {
tb->tb_default = -1;
goto out;
}
@@ -1252,7 +1251,7 @@ int fib_sync_up(struct net_device *dev)
alive++;
continue;
}
- if (nexthop_nh->nh_dev == NULL ||
+ if (!nexthop_nh->nh_dev ||
!(nexthop_nh->nh_dev->flags & IFF_UP))
continue;
if (nexthop_nh->nh_dev != dev ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 3daf0224ff2e..e13fcc602da2 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -79,6 +79,7 @@
#include <net/tcp.h>
#include <net/sock.h>
#include <net/ip_fib.h>
+#include <net/switchdev.h>
#include "fib_lookup.h"
#define MAX_STAT_DEPTH 32
@@ -88,38 +89,35 @@
typedef unsigned int t_key;
-#define IS_TNODE(n) ((n)->bits)
-#define IS_LEAF(n) (!(n)->bits)
+#define IS_TRIE(n) ((n)->pos >= KEYLENGTH)
+#define IS_TNODE(n) ((n)->bits)
+#define IS_LEAF(n) (!(n)->bits)
-#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
-
-struct tnode {
+struct key_vector {
t_key key;
- unsigned char bits; /* 2log(KEYLENGTH) bits needed */
unsigned char pos; /* 2log(KEYLENGTH) bits needed */
+ unsigned char bits; /* 2log(KEYLENGTH) bits needed */
unsigned char slen;
- struct tnode __rcu *parent;
- struct rcu_head rcu;
union {
- /* The fields in this struct are valid if bits > 0 (TNODE) */
- struct {
- t_key empty_children; /* KEYLENGTH bits needed */
- t_key full_children; /* KEYLENGTH bits needed */
- struct tnode __rcu *child[0];
- };
- /* This list pointer if valid if bits == 0 (LEAF) */
- struct hlist_head list;
+ /* This list pointer if valid if (pos | bits) == 0 (LEAF) */
+ struct hlist_head leaf;
+ /* This array is valid if (pos | bits) > 0 (TNODE) */
+ struct key_vector __rcu *tnode[0];
};
};
-struct leaf_info {
- struct hlist_node hlist;
- int plen;
- u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
- struct list_head falh;
+struct tnode {
struct rcu_head rcu;
+ t_key empty_children; /* KEYLENGTH bits needed */
+ t_key full_children; /* KEYLENGTH bits needed */
+ struct key_vector __rcu *parent;
+ struct key_vector kv[1];
+#define tn_bits kv[0].bits
};
+#define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n])
+#define LEAF_SIZE TNODE_SIZE(1)
+
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats {
unsigned int gets;
@@ -142,13 +140,13 @@ struct trie_stat {
};
struct trie {
- struct tnode __rcu *trie;
+ struct key_vector kv[1];
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats __percpu *stats;
#endif
};
-static void resize(struct trie *t, struct tnode *tn);
+static struct key_vector *resize(struct trie *t, struct key_vector *tn);
static size_t tnode_free_size;
/*
@@ -161,41 +159,46 @@ static const int sync_pages = 128;
static struct kmem_cache *fn_alias_kmem __read_mostly;
static struct kmem_cache *trie_leaf_kmem __read_mostly;
+static inline struct tnode *tn_info(struct key_vector *kv)
+{
+ return container_of(kv, struct tnode, kv[0]);
+}
+
/* caller must hold RTNL */
-#define node_parent(n) rtnl_dereference((n)->parent)
+#define node_parent(tn) rtnl_dereference(tn_info(tn)->parent)
+#define get_child(tn, i) rtnl_dereference((tn)->tnode[i])
/* caller must hold RCU read lock or RTNL */
-#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
+#define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent)
+#define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i])
/* wrapper for rcu_assign_pointer */
-static inline void node_set_parent(struct tnode *n, struct tnode *tp)
+static inline void node_set_parent(struct key_vector *n, struct key_vector *tp)
{
if (n)
- rcu_assign_pointer(n->parent, tp);
+ rcu_assign_pointer(tn_info(n)->parent, tp);
}
-#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
+#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p)
/* This provides us with the number of children in this node, in the case of a
* leaf this will return 0 meaning none of the children are accessible.
*/
-static inline unsigned long tnode_child_length(const struct tnode *tn)
+static inline unsigned long child_length(const struct key_vector *tn)
{
return (1ul << tn->bits) & ~(1ul);
}
-/* caller must hold RTNL */
-static inline struct tnode *tnode_get_child(const struct tnode *tn,
- unsigned long i)
-{
- return rtnl_dereference(tn->child[i]);
-}
+#define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos)
-/* caller must hold RCU read lock or RTNL */
-static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
- unsigned long i)
+static inline unsigned long get_index(t_key key, struct key_vector *kv)
{
- return rcu_dereference_rtnl(tn->child[i]);
+ unsigned long index = key ^ kv->key;
+
+ if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos))
+ return 0;
+
+ return index >> kv->pos;
}
/* To understand this stuff, an understanding of keys and all their bits is
@@ -274,106 +277,104 @@ static inline void alias_free_mem_rcu(struct fib_alias *fa)
}
#define TNODE_KMALLOC_MAX \
- ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
+ ilog2((PAGE_SIZE - TNODE_SIZE(0)) / sizeof(struct key_vector *))
+#define TNODE_VMALLOC_MAX \
+ ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *))
static void __node_free_rcu(struct rcu_head *head)
{
struct tnode *n = container_of(head, struct tnode, rcu);
- if (IS_LEAF(n))
+ if (!n->tn_bits)
kmem_cache_free(trie_leaf_kmem, n);
- else if (n->bits <= TNODE_KMALLOC_MAX)
+ else if (n->tn_bits <= TNODE_KMALLOC_MAX)
kfree(n);
else
vfree(n);
}
-#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
+#define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu)
-static inline void free_leaf_info(struct leaf_info *leaf)
+static struct tnode *tnode_alloc(int bits)
{
- kfree_rcu(leaf, rcu);
-}
+ size_t size;
+
+ /* verify bits is within bounds */
+ if (bits > TNODE_VMALLOC_MAX)
+ return NULL;
+
+ /* determine size and verify it is non-zero and didn't overflow */
+ size = TNODE_SIZE(1ul << bits);
-static struct tnode *tnode_alloc(size_t size)
-{
if (size <= PAGE_SIZE)
return kzalloc(size, GFP_KERNEL);
else
return vzalloc(size);
}
-static inline void empty_child_inc(struct tnode *n)
+static inline void empty_child_inc(struct key_vector *n)
{
- ++n->empty_children ? : ++n->full_children;
+ ++tn_info(n)->empty_children ? : ++tn_info(n)->full_children;
}
-static inline void empty_child_dec(struct tnode *n)
+static inline void empty_child_dec(struct key_vector *n)
{
- n->empty_children-- ? : n->full_children--;
+ tn_info(n)->empty_children-- ? : tn_info(n)->full_children--;
}
-static struct tnode *leaf_new(t_key key)
+static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
{
- struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
- if (l) {
- l->parent = NULL;
- /* set key and pos to reflect full key value
- * any trailing zeros in the key should be ignored
- * as the nodes are searched
- */
- l->key = key;
- l->slen = 0;
- l->pos = 0;
- /* set bits to 0 indicating we are not a tnode */
- l->bits = 0;
+ struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
+ struct key_vector *l = kv->kv;
- INIT_HLIST_HEAD(&l->list);
- }
- return l;
-}
+ if (!kv)
+ return NULL;
-static struct leaf_info *leaf_info_new(int plen)
-{
- struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
- if (li) {
- li->plen = plen;
- li->mask_plen = ntohl(inet_make_mask(plen));
- INIT_LIST_HEAD(&li->falh);
- }
- return li;
+ /* initialize key vector */
+ l->key = key;
+ l->pos = 0;
+ l->bits = 0;
+ l->slen = fa->fa_slen;
+
+ /* link leaf to fib alias */
+ INIT_HLIST_HEAD(&l->leaf);
+ hlist_add_head(&fa->fa_list, &l->leaf);
+
+ return l;
}
-static struct tnode *tnode_new(t_key key, int pos, int bits)
+static struct key_vector *tnode_new(t_key key, int pos, int bits)
{
- size_t sz = offsetof(struct tnode, child[1ul << bits]);
- struct tnode *tn = tnode_alloc(sz);
+ struct tnode *tnode = tnode_alloc(bits);
unsigned int shift = pos + bits;
+ struct key_vector *tn = tnode->kv;
/* verify bits and pos their msb bits clear and values are valid */
BUG_ON(!bits || (shift > KEYLENGTH));
- if (tn) {
- tn->parent = NULL;
- tn->slen = pos;
- tn->pos = pos;
- tn->bits = bits;
- tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
- if (bits == KEYLENGTH)
- tn->full_children = 1;
- else
- tn->empty_children = 1ul << bits;
- }
+ pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
+ sizeof(struct key_vector *) << bits);
+
+ if (!tnode)
+ return NULL;
+
+ if (bits == KEYLENGTH)
+ tnode->full_children = 1;
+ else
+ tnode->empty_children = 1ul << bits;
+
+ tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
+ tn->pos = pos;
+ tn->bits = bits;
+ tn->slen = pos;
- pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
- sizeof(struct tnode *) << bits);
return tn;
}
/* Check whether a tnode 'n' is "full", i.e. it is an internal node
* and no bits are skipped. See discussion in dyntree paper p. 6
*/
-static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
+static inline int tnode_full(struct key_vector *tn, struct key_vector *n)
{
return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
}
@@ -381,17 +382,18 @@ static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
/* Add a child at position i overwriting the old value.
* Update the value of full_children and empty_children.
*/
-static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
+static void put_child(struct key_vector *tn, unsigned long i,
+ struct key_vector *n)
{
- struct tnode *chi = tnode_get_child(tn, i);
+ struct key_vector *chi = get_child(tn, i);
int isfull, wasfull;
- BUG_ON(i >= tnode_child_length(tn));
+ BUG_ON(i >= child_length(tn));
/* update emptyChildren, overflow into fullChildren */
- if (n == NULL && chi != NULL)
+ if (!n && chi)
empty_child_inc(tn);
- if (n != NULL && chi == NULL)
+ if (n && !chi)
empty_child_dec(tn);
/* update fullChildren */
@@ -399,23 +401,23 @@ static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
isfull = tnode_full(tn, n);
if (wasfull && !isfull)
- tn->full_children--;
+ tn_info(tn)->full_children--;
else if (!wasfull && isfull)
- tn->full_children++;
+ tn_info(tn)->full_children++;
if (n && (tn->slen < n->slen))
tn->slen = n->slen;
- rcu_assign_pointer(tn->child[i], n);
+ rcu_assign_pointer(tn->tnode[i], n);
}
-static void update_children(struct tnode *tn)
+static void update_children(struct key_vector *tn)
{
unsigned long i;
/* update all of the child parent pointers */
- for (i = tnode_child_length(tn); i;) {
- struct tnode *inode = tnode_get_child(tn, --i);
+ for (i = child_length(tn); i;) {
+ struct key_vector *inode = get_child(tn, --i);
if (!inode)
continue;
@@ -431,36 +433,37 @@ static void update_children(struct tnode *tn)
}
}
-static inline void put_child_root(struct tnode *tp, struct trie *t,
- t_key key, struct tnode *n)
+static inline void put_child_root(struct key_vector *tp, t_key key,
+ struct key_vector *n)
{
- if (tp)
- put_child(tp, get_index(key, tp), n);
+ if (IS_TRIE(tp))
+ rcu_assign_pointer(tp->tnode[0], n);
else
- rcu_assign_pointer(t->trie, n);
+ put_child(tp, get_index(key, tp), n);
}
-static inline void tnode_free_init(struct tnode *tn)
+static inline void tnode_free_init(struct key_vector *tn)
{
- tn->rcu.next = NULL;
+ tn_info(tn)->rcu.next = NULL;
}
-static inline void tnode_free_append(struct tnode *tn, struct tnode *n)
+static inline void tnode_free_append(struct key_vector *tn,
+ struct key_vector *n)
{
- n->rcu.next = tn->rcu.next;
- tn->rcu.next = &n->rcu;
+ tn_info(n)->rcu.next = tn_info(tn)->rcu.next;
+ tn_info(tn)->rcu.next = &tn_info(n)->rcu;
}
-static void tnode_free(struct tnode *tn)
+static void tnode_free(struct key_vector *tn)
{
- struct callback_head *head = &tn->rcu;
+ struct callback_head *head = &tn_info(tn)->rcu;
while (head) {
head = head->next;
- tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
+ tnode_free_size += TNODE_SIZE(1ul << tn->bits);
node_free(tn);
- tn = container_of(head, struct tnode, rcu);
+ tn = container_of(head, struct tnode, rcu)->kv;
}
if (tnode_free_size >= PAGE_SIZE * sync_pages) {
@@ -469,14 +472,16 @@ static void tnode_free(struct tnode *tn)
}
}
-static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn)
+static struct key_vector *replace(struct trie *t,
+ struct key_vector *oldtnode,
+ struct key_vector *tn)
{
- struct tnode *tp = node_parent(oldtnode);
+ struct key_vector *tp = node_parent(oldtnode);
unsigned long i;
/* setup the parent pointer out of and back into this node */
NODE_INIT_PARENT(tn, tp);
- put_child_root(tp, t, tn->key, tn);
+ put_child_root(tp, tn->key, tn);
/* update all of the child parent pointers */
update_children(tn);
@@ -485,18 +490,21 @@ static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn)
tnode_free(oldtnode);
/* resize children now that oldtnode is freed */
- for (i = tnode_child_length(tn); i;) {
- struct tnode *inode = tnode_get_child(tn, --i);
+ for (i = child_length(tn); i;) {
+ struct key_vector *inode = get_child(tn, --i);
/* resize child node */
if (tnode_full(tn, inode))
- resize(t, inode);
+ tn = resize(t, inode);
}
+
+ return tp;
}
-static int inflate(struct trie *t, struct tnode *oldtnode)
+static struct key_vector *inflate(struct trie *t,
+ struct key_vector *oldtnode)
{
- struct tnode *tn;
+ struct key_vector *tn;
unsigned long i;
t_key m;
@@ -504,7 +512,7 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
if (!tn)
- return -ENOMEM;
+ goto notnode;
/* prepare oldtnode to be freed */
tnode_free_init(oldtnode);
@@ -514,13 +522,13 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
* point to existing tnodes and the links between our allocated
* nodes.
*/
- for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) {
- struct tnode *inode = tnode_get_child(oldtnode, --i);
- struct tnode *node0, *node1;
+ for (i = child_length(oldtnode), m = 1u << tn->pos; i;) {
+ struct key_vector *inode = get_child(oldtnode, --i);
+ struct key_vector *node0, *node1;
unsigned long j, k;
/* An empty child */
- if (inode == NULL)
+ if (!inode)
continue;
/* A leaf or an internal node with skipped bits */
@@ -534,8 +542,8 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
/* An internal node with two children */
if (inode->bits == 1) {
- put_child(tn, 2 * i + 1, tnode_get_child(inode, 1));
- put_child(tn, 2 * i, tnode_get_child(inode, 0));
+ put_child(tn, 2 * i + 1, get_child(inode, 1));
+ put_child(tn, 2 * i, get_child(inode, 0));
continue;
}
@@ -564,11 +572,11 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
tnode_free_append(tn, node0);
/* populate child pointers in new nodes */
- for (k = tnode_child_length(inode), j = k / 2; j;) {
- put_child(node1, --j, tnode_get_child(inode, --k));
- put_child(node0, j, tnode_get_child(inode, j));
- put_child(node1, --j, tnode_get_child(inode, --k));
- put_child(node0, j, tnode_get_child(inode, j));
+ for (k = child_length(inode), j = k / 2; j;) {
+ put_child(node1, --j, get_child(inode, --k));
+ put_child(node0, j, get_child(inode, j));
+ put_child(node1, --j, get_child(inode, --k));
+ put_child(node0, j, get_child(inode, j));
}
/* link new nodes to parent */
@@ -581,25 +589,25 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
}
/* setup the parent pointers into and out of this node */
- replace(t, oldtnode, tn);
-
- return 0;
+ return replace(t, oldtnode, tn);
nomem:
/* all pointers should be clean so we are done */
tnode_free(tn);
- return -ENOMEM;
+notnode:
+ return NULL;
}
-static int halve(struct trie *t, struct tnode *oldtnode)
+static struct key_vector *halve(struct trie *t,
+ struct key_vector *oldtnode)
{
- struct tnode *tn;
+ struct key_vector *tn;
unsigned long i;
pr_debug("In halve\n");
tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
if (!tn)
- return -ENOMEM;
+ goto notnode;
/* prepare oldtnode to be freed */
tnode_free_init(oldtnode);
@@ -609,10 +617,10 @@ static int halve(struct trie *t, struct tnode *oldtnode)
* point to existing tnodes and the links between our allocated
* nodes.
*/
- for (i = tnode_child_length(oldtnode); i;) {
- struct tnode *node1 = tnode_get_child(oldtnode, --i);
- struct tnode *node0 = tnode_get_child(oldtnode, --i);
- struct tnode *inode;
+ for (i = child_length(oldtnode); i;) {
+ struct key_vector *node1 = get_child(oldtnode, --i);
+ struct key_vector *node0 = get_child(oldtnode, --i);
+ struct key_vector *inode;
/* At least one of the children is empty */
if (!node1 || !node0) {
@@ -622,10 +630,8 @@ static int halve(struct trie *t, struct tnode *oldtnode)
/* Two nonempty children */
inode = tnode_new(node0->key, oldtnode->pos, 1);
- if (!inode) {
- tnode_free(tn);
- return -ENOMEM;
- }
+ if (!inode)
+ goto nomem;
tnode_free_append(tn, inode);
/* initialize pointers out of node */
@@ -638,30 +644,36 @@ static int halve(struct trie *t, struct tnode *oldtnode)
}
/* setup the parent pointers into and out of this node */
- replace(t, oldtnode, tn);
-
- return 0;
+ return replace(t, oldtnode, tn);
+nomem:
+ /* all pointers should be clean so we are done */
+ tnode_free(tn);
+notnode:
+ return NULL;
}
-static void collapse(struct trie *t, struct tnode *oldtnode)
+static struct key_vector *collapse(struct trie *t,
+ struct key_vector *oldtnode)
{
- struct tnode *n, *tp;
+ struct key_vector *n, *tp;
unsigned long i;
/* scan the tnode looking for that one child that might still exist */
- for (n = NULL, i = tnode_child_length(oldtnode); !n && i;)
- n = tnode_get_child(oldtnode, --i);
+ for (n = NULL, i = child_length(oldtnode); !n && i;)
+ n = get_child(oldtnode, --i);
/* compress one level */
tp = node_parent(oldtnode);
- put_child_root(tp, t, oldtnode->key, n);
+ put_child_root(tp, oldtnode->key, n);
node_set_parent(n, tp);
/* drop dead node */
node_free(oldtnode);
+
+ return tp;
}
-static unsigned char update_suffix(struct tnode *tn)
+static unsigned char update_suffix(struct key_vector *tn)
{
unsigned char slen = tn->pos;
unsigned long stride, i;
@@ -671,8 +683,8 @@ static unsigned char update_suffix(struct tnode *tn)
* why we start with a stride of 2 since a stride of 1 would
* represent the nodes with suffix length equal to tn->pos
*/
- for (i = 0, stride = 0x2ul ; i < tnode_child_length(tn); i += stride) {
- struct tnode *n = tnode_get_child(tn, i);
+ for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) {
+ struct key_vector *n = get_child(tn, i);
if (!n || (n->slen <= slen))
continue;
@@ -704,12 +716,12 @@ static unsigned char update_suffix(struct tnode *tn)
*
* 'high' in this instance is the variable 'inflate_threshold'. It
* is expressed as a percentage, so we multiply it with
- * tnode_child_length() and instead of multiplying by 2 (since the
+ * child_length() and instead of multiplying by 2 (since the
* child array will be doubled by inflate()) and multiplying
* the left-hand side by 100 (to handle the percentage thing) we
* multiply the left-hand side by 50.
*
- * The left-hand side may look a bit weird: tnode_child_length(tn)
+ * The left-hand side may look a bit weird: child_length(tn)
* - tn->empty_children is of course the number of non-null children
* in the current node. tn->full_children is the number of "full"
* children, that is non-null tnodes with a skip value of 0.
@@ -719,10 +731,10 @@ static unsigned char update_suffix(struct tnode *tn)
* A clearer way to write this would be:
*
* to_be_doubled = tn->full_children;
- * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
+ * not_to_be_doubled = child_length(tn) - tn->empty_children -
* tn->full_children;
*
- * new_child_length = tnode_child_length(tn) * 2;
+ * new_child_length = child_length(tn) * 2;
*
* new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
* new_child_length;
@@ -739,57 +751,57 @@ static unsigned char update_suffix(struct tnode *tn)
* inflate_threshold * new_child_length
*
* expand not_to_be_doubled and to_be_doubled, and shorten:
- * 100 * (tnode_child_length(tn) - tn->empty_children +
+ * 100 * (child_length(tn) - tn->empty_children +
* tn->full_children) >= inflate_threshold * new_child_length
*
* expand new_child_length:
- * 100 * (tnode_child_length(tn) - tn->empty_children +
+ * 100 * (child_length(tn) - tn->empty_children +
* tn->full_children) >=
- * inflate_threshold * tnode_child_length(tn) * 2
+ * inflate_threshold * child_length(tn) * 2
*
* shorten again:
- * 50 * (tn->full_children + tnode_child_length(tn) -
+ * 50 * (tn->full_children + child_length(tn) -
* tn->empty_children) >= inflate_threshold *
- * tnode_child_length(tn)
+ * child_length(tn)
*
*/
-static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
+static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn)
{
- unsigned long used = tnode_child_length(tn);
+ unsigned long used = child_length(tn);
unsigned long threshold = used;
/* Keep root node larger */
- threshold *= tp ? inflate_threshold : inflate_threshold_root;
- used -= tn->empty_children;
- used += tn->full_children;
+ threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold;
+ used -= tn_info(tn)->empty_children;
+ used += tn_info(tn)->full_children;
/* if bits == KEYLENGTH then pos = 0, and will fail below */
return (used > 1) && tn->pos && ((50 * used) >= threshold);
}
-static bool should_halve(const struct tnode *tp, const struct tnode *tn)
+static inline bool should_halve(struct key_vector *tp, struct key_vector *tn)
{
- unsigned long used = tnode_child_length(tn);
+ unsigned long used = child_length(tn);
unsigned long threshold = used;
/* Keep root node larger */
- threshold *= tp ? halve_threshold : halve_threshold_root;
- used -= tn->empty_children;
+ threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold;
+ used -= tn_info(tn)->empty_children;
/* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */
return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
}
-static bool should_collapse(const struct tnode *tn)
+static inline bool should_collapse(struct key_vector *tn)
{
- unsigned long used = tnode_child_length(tn);
+ unsigned long used = child_length(tn);
- used -= tn->empty_children;
+ used -= tn_info(tn)->empty_children;
/* account for bits == KEYLENGTH case */
- if ((tn->bits == KEYLENGTH) && tn->full_children)
+ if ((tn->bits == KEYLENGTH) && tn_info(tn)->full_children)
used -= KEY_MAX;
/* One child or none, time to drop us from the trie */
@@ -797,10 +809,13 @@ static bool should_collapse(const struct tnode *tn)
}
#define MAX_WORK 10
-static void resize(struct trie *t, struct tnode *tn)
+static struct key_vector *resize(struct trie *t, struct key_vector *tn)
{
- struct tnode *tp = node_parent(tn);
- struct tnode __rcu **cptr;
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ struct trie_use_stats __percpu *stats = t->stats;
+#endif
+ struct key_vector *tp = node_parent(tn);
+ unsigned long cindex = get_index(tn->key, tp);
int max_work = MAX_WORK;
pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
@@ -810,183 +825,128 @@ static void resize(struct trie *t, struct tnode *tn)
* doing it ourselves. This way we can let RCU fully do its
* thing without us interfering
*/
- cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie;
- BUG_ON(tn != rtnl_dereference(*cptr));
+ BUG_ON(tn != get_child(tp, cindex));
/* Double as long as the resulting node has a number of
* nonempty nodes that are above the threshold.
*/
while (should_inflate(tp, tn) && max_work) {
- if (inflate(t, tn)) {
+ tp = inflate(t, tn);
+ if (!tp) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
- this_cpu_inc(t->stats->resize_node_skipped);
+ this_cpu_inc(stats->resize_node_skipped);
#endif
break;
}
max_work--;
- tn = rtnl_dereference(*cptr);
+ tn = get_child(tp, cindex);
}
+ /* update parent in case inflate failed */
+ tp = node_parent(tn);
+
/* Return if at least one inflate is run */
if (max_work != MAX_WORK)
- return;
+ return tp;
/* Halve as long as the number of empty children in this
* node is above threshold.
*/
while (should_halve(tp, tn) && max_work) {
- if (halve(t, tn)) {
+ tp = halve(t, tn);
+ if (!tp) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
- this_cpu_inc(t->stats->resize_node_skipped);
+ this_cpu_inc(stats->resize_node_skipped);
#endif
break;
}
max_work--;
- tn = rtnl_dereference(*cptr);
+ tn = get_child(tp, cindex);
}
/* Only one child remains */
- if (should_collapse(tn)) {
- collapse(t, tn);
- return;
- }
+ if (should_collapse(tn))
+ return collapse(t, tn);
+
+ /* update parent in case halve failed */
+ tp = node_parent(tn);
/* Return if at least one deflate was run */
if (max_work != MAX_WORK)
- return;
+ return tp;
/* push the suffix length to the parent node */
if (tn->slen > tn->pos) {
unsigned char slen = update_suffix(tn);
- if (tp && (slen > tp->slen))
+ if (slen > tp->slen)
tp->slen = slen;
}
-}
-
-/* readside must use rcu_read_lock currently dump routines
- via get_fa_head and dump */
-
-static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
-{
- struct hlist_head *head = &l->list;
- struct leaf_info *li;
-
- hlist_for_each_entry_rcu(li, head, hlist)
- if (li->plen == plen)
- return li;
-
- return NULL;
-}
-
-static inline struct list_head *get_fa_head(struct tnode *l, int plen)
-{
- struct leaf_info *li = find_leaf_info(l, plen);
-
- if (!li)
- return NULL;
- return &li->falh;
+ return tp;
}
-static void leaf_pull_suffix(struct tnode *l)
+static void leaf_pull_suffix(struct key_vector *tp, struct key_vector *l)
{
- struct tnode *tp = node_parent(l);
-
- while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) {
+ while ((tp->slen > tp->pos) && (tp->slen > l->slen)) {
if (update_suffix(tp) > l->slen)
break;
tp = node_parent(tp);
}
}
-static void leaf_push_suffix(struct tnode *l)
+static void leaf_push_suffix(struct key_vector *tn, struct key_vector *l)
{
- struct tnode *tn = node_parent(l);
-
/* if this is a new leaf then tn will be NULL and we can sort
* out parent suffix lengths as a part of trie_rebalance
*/
- while (tn && (tn->slen < l->slen)) {
+ while (tn->slen < l->slen) {
tn->slen = l->slen;
tn = node_parent(tn);
}
}
-static void remove_leaf_info(struct tnode *l, struct leaf_info *old)
-{
- /* record the location of the previous list_info entry */
- struct hlist_node **pprev = old->hlist.pprev;
- struct leaf_info *li = hlist_entry(pprev, typeof(*li), hlist.next);
-
- /* remove the leaf info from the list */
- hlist_del_rcu(&old->hlist);
-
- /* only access li if it is pointing at the last valid hlist_node */
- if (hlist_empty(&l->list) || (*pprev))
- return;
-
- /* update the trie with the latest suffix length */
- l->slen = KEYLENGTH - li->plen;
- leaf_pull_suffix(l);
-}
-
-static void insert_leaf_info(struct tnode *l, struct leaf_info *new)
+/* rcu_read_lock needs to be hold by caller from readside */
+static struct key_vector *fib_find_node(struct trie *t,
+ struct key_vector **tp, u32 key)
{
- struct hlist_head *head = &l->list;
- struct leaf_info *li = NULL, *last = NULL;
+ struct key_vector *pn, *n = t->kv;
+ unsigned long index = 0;
- if (hlist_empty(head)) {
- hlist_add_head_rcu(&new->hlist, head);
- } else {
- hlist_for_each_entry(li, head, hlist) {
- if (new->plen > li->plen)
- break;
-
- last = li;
- }
- if (last)
- hlist_add_behind_rcu(&new->hlist, &last->hlist);
- else
- hlist_add_before_rcu(&new->hlist, &li->hlist);
- }
-
- /* if we added to the tail node then we need to update slen */
- if (l->slen < (KEYLENGTH - new->plen)) {
- l->slen = KEYLENGTH - new->plen;
- leaf_push_suffix(l);
- }
-}
+ do {
+ pn = n;
+ n = get_child_rcu(n, index);
-/* rcu_read_lock needs to be hold by caller from readside */
-static struct tnode *fib_find_node(struct trie *t, u32 key)
-{
- struct tnode *n = rcu_dereference_rtnl(t->trie);
+ if (!n)
+ break;
- while (n) {
- unsigned long index = get_index(key, n);
+ index = get_cindex(key, n);
/* This bit of code is a bit tricky but it combines multiple
* checks into a single check. The prefix consists of the
* prefix plus zeros for the bits in the cindex. The index
* is the difference between the key and this value. From
* this we can actually derive several pieces of data.
- * if (index & (~0ul << bits))
+ * if (index >= (1ul << bits))
* we have a mismatch in skip bits and failed
* else
* we know the value is cindex
+ *
+ * This check is safe even if bits == KEYLENGTH due to the
+ * fact that we can only allocate a node with 32 bits if a
+ * long is greater than 32 bits.
*/
- if (index & (~0ul << n->bits))
- return NULL;
-
- /* we have found a leaf. Prefixes have already been compared */
- if (IS_LEAF(n))
+ if (index >= (1ul << n->bits)) {
+ n = NULL;
break;
+ }
- n = tnode_get_child_rcu(n, index);
- }
+ /* keep searching until we find a perfect match leaf or NULL */
+ } while (IS_TNODE(n));
+
+ *tp = pn;
return n;
}
@@ -994,14 +954,23 @@ static struct tnode *fib_find_node(struct trie *t, u32 key)
/* Return the first fib alias matching TOS with
* priority less than or equal to PRIO.
*/
-static struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
+static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen,
+ u8 tos, u32 prio, u32 tb_id)
{
struct fib_alias *fa;
if (!fah)
return NULL;
- list_for_each_entry(fa, fah, fa_list) {
+ hlist_for_each_entry(fa, fah, fa_list) {
+ if (fa->fa_slen < slen)
+ continue;
+ if (fa->fa_slen != slen)
+ break;
+ if (fa->tb_id > tb_id)
+ continue;
+ if (fa->tb_id != tb_id)
+ break;
if (fa->fa_tos > tos)
continue;
if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos)
@@ -1011,77 +980,23 @@ static struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
return NULL;
}
-static void trie_rebalance(struct trie *t, struct tnode *tn)
+static void trie_rebalance(struct trie *t, struct key_vector *tn)
{
- struct tnode *tp;
-
- while ((tp = node_parent(tn)) != NULL) {
- resize(t, tn);
- tn = tp;
- }
-
- /* Handle last (top) tnode */
- if (IS_TNODE(tn))
- resize(t, tn);
+ while (!IS_TRIE(tn))
+ tn = resize(t, tn);
}
-/* only used from updater-side */
-
-static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
+static int fib_insert_node(struct trie *t, struct key_vector *tp,
+ struct fib_alias *new, t_key key)
{
- struct list_head *fa_head = NULL;
- struct tnode *l, *n, *tp = NULL;
- struct leaf_info *li;
-
- li = leaf_info_new(plen);
- if (!li)
- return NULL;
- fa_head = &li->falh;
+ struct key_vector *n, *l;
- n = rtnl_dereference(t->trie);
-
- /* If we point to NULL, stop. Either the tree is empty and we should
- * just put a new leaf in if, or we have reached an empty child slot,
- * and we should just put our new leaf in that.
- *
- * If we hit a node with a key that does't match then we should stop
- * and create a new tnode to replace that node and insert ourselves
- * and the other node into the new tnode.
- */
- while (n) {
- unsigned long index = get_index(key, n);
-
- /* This bit of code is a bit tricky but it combines multiple
- * checks into a single check. The prefix consists of the
- * prefix plus zeros for the "bits" in the prefix. The index
- * is the difference between the key and this value. From
- * this we can actually derive several pieces of data.
- * if !(index >> bits)
- * we know the value is child index
- * else
- * we have a mismatch in skip bits and failed
- */
- if (index >> n->bits)
- break;
-
- /* we have found a leaf. Prefixes have already been compared */
- if (IS_LEAF(n)) {
- /* Case 1: n is a leaf, and prefixes match*/
- insert_leaf_info(n, li);
- return fa_head;
- }
-
- tp = n;
- n = tnode_get_child_rcu(n, index);
- }
-
- l = leaf_new(key);
- if (!l) {
- free_leaf_info(li);
- return NULL;
- }
+ l = leaf_new(key, new);
+ if (!l)
+ goto noleaf;
- insert_leaf_info(l, li);
+ /* retrieve child from parent node */
+ n = get_child(tp, get_index(key, tp));
/* Case 2: n is a LEAF or a TNODE and the key doesn't match.
*
@@ -1090,21 +1005,18 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
* leaves us in position for handling as case 3
*/
if (n) {
- struct tnode *tn;
+ struct key_vector *tn;
tn = tnode_new(key, __fls(key ^ n->key), 1);
- if (!tn) {
- free_leaf_info(li);
- node_free(l);
- return NULL;
- }
+ if (!tn)
+ goto notnode;
/* initialize routes out of node */
NODE_INIT_PARENT(tn, tp);
put_child(tn, get_index(key, tn) ^ 1, n);
/* start adding routes into the node */
- put_child_root(tp, t, key, tn);
+ put_child_root(tp, key, tn);
node_set_parent(n, tn);
/* parent now has a NULL spot where the leaf can go */
@@ -1112,69 +1024,93 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
}
/* Case 3: n is NULL, and will just insert a new leaf */
- if (tp) {
- NODE_INIT_PARENT(l, tp);
- put_child(tp, get_index(key, tp), l);
- trie_rebalance(t, tp);
+ NODE_INIT_PARENT(l, tp);
+ put_child_root(tp, key, l);
+ trie_rebalance(t, tp);
+
+ return 0;
+notnode:
+ node_free(l);
+noleaf:
+ return -ENOMEM;
+}
+
+static int fib_insert_alias(struct trie *t, struct key_vector *tp,
+ struct key_vector *l, struct fib_alias *new,
+ struct fib_alias *fa, t_key key)
+{
+ if (!l)
+ return fib_insert_node(t, tp, new, key);
+
+ if (fa) {
+ hlist_add_before_rcu(&new->fa_list, &fa->fa_list);
} else {
- rcu_assign_pointer(t->trie, l);
+ struct fib_alias *last;
+
+ hlist_for_each_entry(last, &l->leaf, fa_list) {
+ if (new->fa_slen < last->fa_slen)
+ break;
+ if ((new->fa_slen == last->fa_slen) &&
+ (new->tb_id > last->tb_id))
+ break;
+ fa = last;
+ }
+
+ if (fa)
+ hlist_add_behind_rcu(&new->fa_list, &fa->fa_list);
+ else
+ hlist_add_head_rcu(&new->fa_list, &l->leaf);
}
- return fa_head;
+ /* if we added to the tail node then we need to update slen */
+ if (l->slen < new->fa_slen) {
+ l->slen = new->fa_slen;
+ leaf_push_suffix(tp, l);
+ }
+
+ return 0;
}
-/*
- * Caller must hold RTNL.
- */
+/* Caller must hold RTNL. */
int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
{
- struct trie *t = (struct trie *) tb->tb_data;
+ struct trie *t = (struct trie *)tb->tb_data;
struct fib_alias *fa, *new_fa;
- struct list_head *fa_head = NULL;
+ struct key_vector *l, *tp;
struct fib_info *fi;
- int plen = cfg->fc_dst_len;
+ u8 plen = cfg->fc_dst_len;
+ u8 slen = KEYLENGTH - plen;
u8 tos = cfg->fc_tos;
- u32 key, mask;
+ u32 key;
int err;
- struct tnode *l;
- if (plen > 32)
+ if (plen > KEYLENGTH)
return -EINVAL;
key = ntohl(cfg->fc_dst);
pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
- mask = ntohl(inet_make_mask(plen));
-
- if (key & ~mask)
+ if ((plen < KEYLENGTH) && (key << plen))
return -EINVAL;
- key = key & mask;
-
fi = fib_create_info(cfg);
if (IS_ERR(fi)) {
err = PTR_ERR(fi);
goto err;
}
- l = fib_find_node(t, key);
- fa = NULL;
-
- if (l) {
- fa_head = get_fa_head(l, plen);
- fa = fib_find_alias(fa_head, tos, fi->fib_priority);
- }
+ l = fib_find_node(t, &tp, key);
+ fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority,
+ tb->tb_id) : NULL;
/* Now fa, if non-NULL, points to the first fib alias
* with the same keys [prefix,tos,priority], if such key already
* exists or to the node before which we will insert new one.
*
* If fa is NULL, we will need to allocate a new one and
- * insert to the head of f.
- *
- * If f is NULL, no fib node matched the destination key
- * and we need to allocate a new one of those as well.
+ * insert to the tail of the section matching the suffix length
+ * of the new alias.
*/
if (fa && fa->fa_tos == tos &&
@@ -1192,9 +1128,10 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
*/
fa_match = NULL;
fa_first = fa;
- fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
- list_for_each_entry_continue(fa, fa_head, fa_list) {
- if (fa->fa_tos != tos)
+ hlist_for_each_entry_from(fa, fa_list) {
+ if ((fa->fa_slen != slen) ||
+ (fa->tb_id != tb->tb_id) ||
+ (fa->fa_tos != tos))
break;
if (fa->fa_info->fib_priority != fi->fib_priority)
break;
@@ -1217,7 +1154,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
}
err = -ENOBUFS;
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
- if (new_fa == NULL)
+ if (!new_fa)
goto out;
fi_drop = fa->fa_info;
@@ -1226,8 +1163,21 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
new_fa->fa_type = cfg->fc_type;
state = fa->fa_state;
new_fa->fa_state = state & ~FA_S_ACCESSED;
+ new_fa->fa_slen = fa->fa_slen;
+
+ err = netdev_switch_fib_ipv4_add(key, plen, fi,
+ new_fa->fa_tos,
+ cfg->fc_type,
+ cfg->fc_nlflags,
+ tb->tb_id);
+ if (err) {
+ netdev_switch_fib_ipv4_abort(fi);
+ kmem_cache_free(fn_alias_kmem, new_fa);
+ goto out;
+ }
+
+ hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
- list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
alias_free_mem_rcu(fa);
fib_release_info(fi_drop);
@@ -1254,37 +1204,42 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
err = -ENOBUFS;
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
- if (new_fa == NULL)
+ if (!new_fa)
goto out;
new_fa->fa_info = fi;
new_fa->fa_tos = tos;
new_fa->fa_type = cfg->fc_type;
new_fa->fa_state = 0;
- /*
- * Insert new entry to the list.
- */
-
- if (!fa_head) {
- fa_head = fib_insert_node(t, key, plen);
- if (unlikely(!fa_head)) {
- err = -ENOMEM;
- goto out_free_new_fa;
- }
+ new_fa->fa_slen = slen;
+ new_fa->tb_id = tb->tb_id;
+
+ /* (Optionally) offload fib entry to switch hardware. */
+ err = netdev_switch_fib_ipv4_add(key, plen, fi, tos,
+ cfg->fc_type,
+ cfg->fc_nlflags,
+ tb->tb_id);
+ if (err) {
+ netdev_switch_fib_ipv4_abort(fi);
+ goto out_free_new_fa;
}
+ /* Insert new entry to the list. */
+ err = fib_insert_alias(t, tp, l, new_fa, fa, key);
+ if (err)
+ goto out_sw_fib_del;
+
if (!plen)
tb->tb_num_default++;
- list_add_tail_rcu(&new_fa->fa_list,
- (fa ? &fa->fa_list : fa_head));
-
rt_cache_flush(cfg->fc_nlinfo.nl_net);
- rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
+ rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, 0);
succeeded:
return 0;
+out_sw_fib_del:
+ netdev_switch_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
out_free_new_fa:
kmem_cache_free(fn_alias_kmem, new_fa);
out:
@@ -1293,7 +1248,7 @@ err:
return err;
}
-static inline t_key prefix_mismatch(t_key key, struct tnode *n)
+static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
{
t_key prefix = n->key;
@@ -1304,16 +1259,20 @@ static inline t_key prefix_mismatch(t_key key, struct tnode *n)
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags)
{
- struct trie *t = (struct trie *)tb->tb_data;
+ struct trie *t = (struct trie *) tb->tb_data;
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats __percpu *stats = t->stats;
#endif
const t_key key = ntohl(flp->daddr);
- struct tnode *n, *pn;
- struct leaf_info *li;
+ struct key_vector *n, *pn;
+ struct fib_alias *fa;
+ unsigned long index;
t_key cindex;
- n = rcu_dereference(t->trie);
+ pn = t->kv;
+ cindex = 0;
+
+ n = get_child_rcu(pn, cindex);
if (!n)
return -EAGAIN;
@@ -1321,24 +1280,25 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
this_cpu_inc(stats->gets);
#endif
- pn = n;
- cindex = 0;
-
/* Step 1: Travel to the longest prefix match in the trie */
for (;;) {
- unsigned long index = get_index(key, n);
+ index = get_cindex(key, n);
/* This bit of code is a bit tricky but it combines multiple
* checks into a single check. The prefix consists of the
* prefix plus zeros for the "bits" in the prefix. The index
* is the difference between the key and this value. From
* this we can actually derive several pieces of data.
- * if (index & (~0ul << bits))
+ * if (index >= (1ul << bits))
* we have a mismatch in skip bits and failed
* else
* we know the value is cindex
+ *
+ * This check is safe even if bits == KEYLENGTH due to the
+ * fact that we can only allocate a node with 32 bits if a
+ * long is greater than 32 bits.
*/
- if (index & (~0ul << n->bits))
+ if (index >= (1ul << n->bits))
break;
/* we have found a leaf. Prefixes have already been compared */
@@ -1353,7 +1313,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
cindex = index;
}
- n = tnode_get_child_rcu(n, index);
+ n = get_child_rcu(n, index);
if (unlikely(!n))
goto backtrace;
}
@@ -1361,7 +1321,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
/* Step 2: Sort out leaves and begin backtracing for longest prefix */
for (;;) {
/* record the pointer where our next node pointer is stored */
- struct tnode __rcu **cptr = n->child;
+ struct key_vector __rcu **cptr = n->tnode;
/* This test verifies that none of the bits that differ
* between the key and the prefix exist in the region of
@@ -1393,13 +1353,17 @@ backtrace:
while (!cindex) {
t_key pkey = pn->key;
- pn = node_parent_rcu(pn);
- if (unlikely(!pn))
+ /* If we don't have a parent then there is
+ * nothing for us to do as we do not have any
+ * further nodes to parse.
+ */
+ if (IS_TRIE(pn))
return -EAGAIN;
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->backtrack);
#endif
/* Get Child's index */
+ pn = node_parent_rcu(pn);
cindex = get_index(pkey, pn);
}
@@ -1407,138 +1371,134 @@ backtrace:
cindex &= cindex - 1;
/* grab pointer for next child node */
- cptr = &pn->child[cindex];
+ cptr = &pn->tnode[cindex];
}
}
found:
+ /* this line carries forward the xor from earlier in the function */
+ index = key ^ n->key;
+
/* Step 3: Process the leaf, if that fails fall back to backtracing */
- hlist_for_each_entry_rcu(li, &n->list, hlist) {
- struct fib_alias *fa;
+ hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
+ struct fib_info *fi = fa->fa_info;
+ int nhsel, err;
- if ((key ^ n->key) & li->mask_plen)
+ if ((index >= (1ul << fa->fa_slen)) &&
+ ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH)))
continue;
-
- list_for_each_entry_rcu(fa, &li->falh, fa_list) {
- struct fib_info *fi = fa->fa_info;
- int nhsel, err;
-
- if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
- continue;
- if (fi->fib_dead)
- continue;
- if (fa->fa_info->fib_scope < flp->flowi4_scope)
- continue;
- fib_alias_accessed(fa);
- err = fib_props[fa->fa_type].error;
- if (unlikely(err < 0)) {
+ if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
+ continue;
+ if (fi->fib_dead)
+ continue;
+ if (fa->fa_info->fib_scope < flp->flowi4_scope)
+ continue;
+ fib_alias_accessed(fa);
+ err = fib_props[fa->fa_type].error;
+ if (unlikely(err < 0)) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
- this_cpu_inc(stats->semantic_match_passed);
+ this_cpu_inc(stats->semantic_match_passed);
#endif
- return err;
- }
- if (fi->fib_flags & RTNH_F_DEAD)
+ return err;
+ }
+ if (fi->fib_flags & RTNH_F_DEAD)
+ continue;
+ for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+ const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+ if (nh->nh_flags & RTNH_F_DEAD)
continue;
- for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
- const struct fib_nh *nh = &fi->fib_nh[nhsel];
-
- if (nh->nh_flags & RTNH_F_DEAD)
- continue;
- if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
- continue;
-
- if (!(fib_flags & FIB_LOOKUP_NOREF))
- atomic_inc(&fi->fib_clntref);
-
- res->prefixlen = li->plen;
- res->nh_sel = nhsel;
- res->type = fa->fa_type;
- res->scope = fi->fib_scope;
- res->fi = fi;
- res->table = tb;
- res->fa_head = &li->falh;
+ if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
+ continue;
+
+ if (!(fib_flags & FIB_LOOKUP_NOREF))
+ atomic_inc(&fi->fib_clntref);
+
+ res->prefixlen = KEYLENGTH - fa->fa_slen;
+ res->nh_sel = nhsel;
+ res->type = fa->fa_type;
+ res->scope = fi->fib_scope;
+ res->fi = fi;
+ res->table = tb;
+ res->fa_head = &n->leaf;
#ifdef CONFIG_IP_FIB_TRIE_STATS
- this_cpu_inc(stats->semantic_match_passed);
+ this_cpu_inc(stats->semantic_match_passed);
#endif
- return err;
- }
+ return err;
}
-
+ }
#ifdef CONFIG_IP_FIB_TRIE_STATS
- this_cpu_inc(stats->semantic_match_miss);
+ this_cpu_inc(stats->semantic_match_miss);
#endif
- }
goto backtrace;
}
EXPORT_SYMBOL_GPL(fib_table_lookup);
-/*
- * Remove the leaf and return parent.
- */
-static void trie_leaf_remove(struct trie *t, struct tnode *l)
+static void fib_remove_alias(struct trie *t, struct key_vector *tp,
+ struct key_vector *l, struct fib_alias *old)
{
- struct tnode *tp = node_parent(l);
+ /* record the location of the previous list_info entry */
+ struct hlist_node **pprev = old->fa_list.pprev;
+ struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next);
- pr_debug("entering trie_leaf_remove(%p)\n", l);
+ /* remove the fib_alias from the list */
+ hlist_del_rcu(&old->fa_list);
- if (tp) {
- put_child(tp, get_index(l->key, tp), NULL);
+ /* if we emptied the list this leaf will be freed and we can sort
+ * out parent suffix lengths as a part of trie_rebalance
+ */
+ if (hlist_empty(&l->leaf)) {
+ put_child_root(tp, l->key, NULL);
+ node_free(l);
trie_rebalance(t, tp);
- } else {
- RCU_INIT_POINTER(t->trie, NULL);
+ return;
}
- node_free(l);
+ /* only access fa if it is pointing at the last valid hlist_node */
+ if (*pprev)
+ return;
+
+ /* update the trie with the latest suffix length */
+ l->slen = fa->fa_slen;
+ leaf_pull_suffix(tp, l);
}
-/*
- * Caller must hold RTNL.
- */
+/* Caller must hold RTNL. */
int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
{
struct trie *t = (struct trie *) tb->tb_data;
- u32 key, mask;
- int plen = cfg->fc_dst_len;
- u8 tos = cfg->fc_tos;
struct fib_alias *fa, *fa_to_delete;
- struct list_head *fa_head;
- struct tnode *l;
- struct leaf_info *li;
+ struct key_vector *l, *tp;
+ u8 plen = cfg->fc_dst_len;
+ u8 slen = KEYLENGTH - plen;
+ u8 tos = cfg->fc_tos;
+ u32 key;
- if (plen > 32)
+ if (plen > KEYLENGTH)
return -EINVAL;
key = ntohl(cfg->fc_dst);
- mask = ntohl(inet_make_mask(plen));
- if (key & ~mask)
+ if ((plen < KEYLENGTH) && (key << plen))
return -EINVAL;
- key = key & mask;
- l = fib_find_node(t, key);
-
+ l = fib_find_node(t, &tp, key);
if (!l)
return -ESRCH;
- li = find_leaf_info(l, plen);
-
- if (!li)
- return -ESRCH;
-
- fa_head = &li->falh;
- fa = fib_find_alias(fa_head, tos, 0);
-
+ fa = fib_find_alias(&l->leaf, slen, tos, 0, tb->tb_id);
if (!fa)
return -ESRCH;
pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
fa_to_delete = NULL;
- fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
- list_for_each_entry_continue(fa, fa_head, fa_list) {
+ hlist_for_each_entry_from(fa, fa_list) {
struct fib_info *fi = fa->fa_info;
- if (fa->fa_tos != tos)
+ if ((fa->fa_slen != slen) ||
+ (fa->tb_id != tb->tb_id) ||
+ (fa->fa_tos != tos))
break;
if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
@@ -1557,240 +1517,397 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
if (!fa_to_delete)
return -ESRCH;
- fa = fa_to_delete;
- rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
- &cfg->fc_nlinfo, 0);
+ netdev_switch_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
+ cfg->fc_type, tb->tb_id);
- list_del_rcu(&fa->fa_list);
+ rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
+ &cfg->fc_nlinfo, 0);
if (!plen)
tb->tb_num_default--;
- if (list_empty(fa_head)) {
- remove_leaf_info(l, li);
- free_leaf_info(li);
- }
+ fib_remove_alias(t, tp, l, fa_to_delete);
- if (hlist_empty(&l->list))
- trie_leaf_remove(t, l);
-
- if (fa->fa_state & FA_S_ACCESSED)
+ if (fa_to_delete->fa_state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
- fib_release_info(fa->fa_info);
- alias_free_mem_rcu(fa);
+ fib_release_info(fa_to_delete->fa_info);
+ alias_free_mem_rcu(fa_to_delete);
return 0;
}
-static int trie_flush_list(struct list_head *head)
+/* Scan for the next leaf starting at the provided key value */
+static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
{
- struct fib_alias *fa, *fa_node;
- int found = 0;
+ struct key_vector *pn, *n = *tn;
+ unsigned long cindex;
- list_for_each_entry_safe(fa, fa_node, head, fa_list) {
- struct fib_info *fi = fa->fa_info;
+ /* this loop is meant to try and find the key in the trie */
+ do {
+ /* record parent and next child index */
+ pn = n;
+ cindex = key ? get_index(key, pn) : 0;
- if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
- list_del_rcu(&fa->fa_list);
- fib_release_info(fa->fa_info);
- alias_free_mem_rcu(fa);
- found++;
+ if (cindex >> pn->bits)
+ break;
+
+ /* descend into the next child */
+ n = get_child_rcu(pn, cindex++);
+ if (!n)
+ break;
+
+ /* guarantee forward progress on the keys */
+ if (IS_LEAF(n) && (n->key >= key))
+ goto found;
+ } while (IS_TNODE(n));
+
+ /* this loop will search for the next leaf with a greater key */
+ while (!IS_TRIE(pn)) {
+ /* if we exhausted the parent node we will need to climb */
+ if (cindex >= (1ul << pn->bits)) {
+ t_key pkey = pn->key;
+
+ pn = node_parent_rcu(pn);
+ cindex = get_index(pkey, pn) + 1;
+ continue;
}
+
+ /* grab the next available node */
+ n = get_child_rcu(pn, cindex++);
+ if (!n)
+ continue;
+
+ /* no need to compare keys since we bumped the index */
+ if (IS_LEAF(n))
+ goto found;
+
+ /* Rescan start scanning in new node */
+ pn = n;
+ cindex = 0;
}
- return found;
+
+ *tn = pn;
+ return NULL; /* Root of trie */
+found:
+ /* if we are at the limit for keys just return NULL for the tnode */
+ *tn = pn;
+ return n;
}
-static int trie_flush_leaf(struct tnode *l)
+static void fib_trie_free(struct fib_table *tb)
{
- int found = 0;
- struct hlist_head *lih = &l->list;
+ struct trie *t = (struct trie *)tb->tb_data;
+ struct key_vector *pn = t->kv;
+ unsigned long cindex = 1;
struct hlist_node *tmp;
- struct leaf_info *li = NULL;
- unsigned char plen = KEYLENGTH;
+ struct fib_alias *fa;
+
+ /* walk trie in reverse order and free everything */
+ for (;;) {
+ struct key_vector *n;
+
+ if (!(cindex--)) {
+ t_key pkey = pn->key;
+
+ if (IS_TRIE(pn))
+ break;
+
+ n = pn;
+ pn = node_parent(pn);
- hlist_for_each_entry_safe(li, tmp, lih, hlist) {
- found += trie_flush_list(&li->falh);
+ /* drop emptied tnode */
+ put_child_root(pn, n->key, NULL);
+ node_free(n);
+
+ cindex = get_index(pkey, pn);
- if (list_empty(&li->falh)) {
- hlist_del_rcu(&li->hlist);
- free_leaf_info(li);
continue;
}
- plen = li->plen;
- }
+ /* grab the next available node */
+ n = get_child(pn, cindex);
+ if (!n)
+ continue;
- l->slen = KEYLENGTH - plen;
+ if (IS_TNODE(n)) {
+ /* record pn and cindex for leaf walking */
+ pn = n;
+ cindex = 1ul << n->bits;
- return found;
+ continue;
+ }
+
+ hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+ hlist_del_rcu(&fa->fa_list);
+ alias_free_mem_rcu(fa);
+ }
+
+ put_child_root(pn, n->key, NULL);
+ node_free(n);
+ }
+
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+ free_percpu(t->stats);
+#endif
+ kfree(tb);
}
-/*
- * Scan for the next right leaf starting at node p->child[idx]
- * Since we have back pointer, no recursion necessary.
- */
-static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
+struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
{
- do {
- unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
+ struct trie *ot = (struct trie *)oldtb->tb_data;
+ struct key_vector *l, *tp = ot->kv;
+ struct fib_table *local_tb;
+ struct fib_alias *fa;
+ struct trie *lt;
+ t_key key = 0;
- while (idx < tnode_child_length(p)) {
- c = tnode_get_child_rcu(p, idx++);
- if (!c)
+ if (oldtb->tb_data == oldtb->__data)
+ return oldtb;
+
+ local_tb = fib_trie_table(RT_TABLE_LOCAL, NULL);
+ if (!local_tb)
+ return NULL;
+
+ lt = (struct trie *)local_tb->tb_data;
+
+ while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
+ struct key_vector *local_l = NULL, *local_tp;
+
+ hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+ struct fib_alias *new_fa;
+
+ if (local_tb->tb_id != fa->tb_id)
continue;
- if (IS_LEAF(c))
- return c;
+ /* clone fa for new local table */
+ new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
+ if (!new_fa)
+ goto out;
+
+ memcpy(new_fa, fa, sizeof(*fa));
- /* Rescan start scanning in new node */
- p = c;
- idx = 0;
+ /* insert clone into table */
+ if (!local_l)
+ local_l = fib_find_node(lt, &local_tp, l->key);
+
+ if (fib_insert_alias(lt, local_tp, local_l, new_fa,
+ NULL, l->key))
+ goto out;
}
- /* Node empty, walk back up to parent */
- c = p;
- } while ((p = node_parent_rcu(c)) != NULL);
+ /* stop loop if key wrapped back to 0 */
+ key = l->key + 1;
+ if (key < l->key)
+ break;
+ }
- return NULL; /* Root of trie */
+ return local_tb;
+out:
+ fib_trie_free(local_tb);
+
+ return NULL;
}
-static struct tnode *trie_firstleaf(struct trie *t)
+/* Caller must hold RTNL */
+void fib_table_flush_external(struct fib_table *tb)
{
- struct tnode *n = rcu_dereference_rtnl(t->trie);
+ struct trie *t = (struct trie *)tb->tb_data;
+ struct key_vector *pn = t->kv;
+ unsigned long cindex = 1;
+ struct hlist_node *tmp;
+ struct fib_alias *fa;
- if (!n)
- return NULL;
+ /* walk trie in reverse order */
+ for (;;) {
+ unsigned char slen = 0;
+ struct key_vector *n;
- if (IS_LEAF(n)) /* trie is just a leaf */
- return n;
+ if (!(cindex--)) {
+ t_key pkey = pn->key;
- return leaf_walk_rcu(n, NULL);
-}
+ /* cannot resize the trie vector */
+ if (IS_TRIE(pn))
+ break;
-static struct tnode *trie_nextleaf(struct tnode *l)
-{
- struct tnode *p = node_parent_rcu(l);
+ /* resize completed node */
+ pn = resize(t, pn);
+ cindex = get_index(pkey, pn);
- if (!p)
- return NULL; /* trie with just one leaf */
+ continue;
+ }
- return leaf_walk_rcu(p, l);
-}
+ /* grab the next available node */
+ n = get_child(pn, cindex);
+ if (!n)
+ continue;
-static struct tnode *trie_leafindex(struct trie *t, int index)
-{
- struct tnode *l = trie_firstleaf(t);
+ if (IS_TNODE(n)) {
+ /* record pn and cindex for leaf walking */
+ pn = n;
+ cindex = 1ul << n->bits;
- while (l && index-- > 0)
- l = trie_nextleaf(l);
+ continue;
+ }
- return l;
-}
+ hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+ struct fib_info *fi = fa->fa_info;
+
+ /* if alias was cloned to local then we just
+ * need to remove the local copy from main
+ */
+ if (tb->tb_id != fa->tb_id) {
+ hlist_del_rcu(&fa->fa_list);
+ alias_free_mem_rcu(fa);
+ continue;
+ }
+ /* record local slen */
+ slen = fa->fa_slen;
-/*
- * Caller must hold RTNL.
- */
+ if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL))
+ continue;
+
+ netdev_switch_fib_ipv4_del(n->key,
+ KEYLENGTH - fa->fa_slen,
+ fi, fa->fa_tos,
+ fa->fa_type, tb->tb_id);
+ }
+
+ /* update leaf slen */
+ n->slen = slen;
+
+ if (hlist_empty(&n->leaf)) {
+ put_child_root(pn, n->key, NULL);
+ node_free(n);
+ } else {
+ leaf_pull_suffix(pn, n);
+ }
+ }
+}
+
+/* Caller must hold RTNL. */
int fib_table_flush(struct fib_table *tb)
{
- struct trie *t = (struct trie *) tb->tb_data;
- struct tnode *l, *ll = NULL;
+ struct trie *t = (struct trie *)tb->tb_data;
+ struct key_vector *pn = t->kv;
+ unsigned long cindex = 1;
+ struct hlist_node *tmp;
+ struct fib_alias *fa;
int found = 0;
- for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
- found += trie_flush_leaf(l);
+ /* walk trie in reverse order */
+ for (;;) {
+ unsigned char slen = 0;
+ struct key_vector *n;
+
+ if (!(cindex--)) {
+ t_key pkey = pn->key;
- if (ll) {
- if (hlist_empty(&ll->list))
- trie_leaf_remove(t, ll);
- else
- leaf_pull_suffix(ll);
+ /* cannot resize the trie vector */
+ if (IS_TRIE(pn))
+ break;
+
+ /* resize completed node */
+ pn = resize(t, pn);
+ cindex = get_index(pkey, pn);
+
+ continue;
}
- ll = l;
- }
+ /* grab the next available node */
+ n = get_child(pn, cindex);
+ if (!n)
+ continue;
- if (ll) {
- if (hlist_empty(&ll->list))
- trie_leaf_remove(t, ll);
- else
- leaf_pull_suffix(ll);
+ if (IS_TNODE(n)) {
+ /* record pn and cindex for leaf walking */
+ pn = n;
+ cindex = 1ul << n->bits;
+
+ continue;
+ }
+
+ hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+ struct fib_info *fi = fa->fa_info;
+
+ if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) {
+ slen = fa->fa_slen;
+ continue;
+ }
+
+ netdev_switch_fib_ipv4_del(n->key,
+ KEYLENGTH - fa->fa_slen,
+ fi, fa->fa_tos,
+ fa->fa_type, tb->tb_id);
+ hlist_del_rcu(&fa->fa_list);
+ fib_release_info(fa->fa_info);
+ alias_free_mem_rcu(fa);
+ found++;
+ }
+
+ /* update leaf slen */
+ n->slen = slen;
+
+ if (hlist_empty(&n->leaf)) {
+ put_child_root(pn, n->key, NULL);
+ node_free(n);
+ } else {
+ leaf_pull_suffix(pn, n);
+ }
}
pr_debug("trie_flush found=%d\n", found);
return found;
}
-void fib_free_table(struct fib_table *tb)
+static void __trie_free_rcu(struct rcu_head *head)
{
+ struct fib_table *tb = container_of(head, struct fib_table, rcu);
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie *t = (struct trie *)tb->tb_data;
- free_percpu(t->stats);
+ if (tb->tb_data == tb->__data)
+ free_percpu(t->stats);
#endif /* CONFIG_IP_FIB_TRIE_STATS */
kfree(tb);
}
-static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
- struct fib_table *tb,
- struct sk_buff *skb, struct netlink_callback *cb)
+void fib_free_table(struct fib_table *tb)
{
- int i, s_i;
+ call_rcu(&tb->rcu, __trie_free_rcu);
+}
+
+static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ __be32 xkey = htonl(l->key);
struct fib_alias *fa;
- __be32 xkey = htonl(key);
+ int i, s_i;
- s_i = cb->args[5];
+ s_i = cb->args[4];
i = 0;
/* rcu_read_lock is hold by caller */
-
- list_for_each_entry_rcu(fa, fah, fa_list) {
+ hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
if (i < s_i) {
i++;
continue;
}
+ if (tb->tb_id != fa->tb_id) {
+ i++;
+ continue;
+ }
+
if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWROUTE,
tb->tb_id,
fa->fa_type,
xkey,
- plen,
+ KEYLENGTH - fa->fa_slen,
fa->fa_tos,
fa->fa_info, NLM_F_MULTI) < 0) {
- cb->args[5] = i;
- return -1;
- }
- i++;
- }
- cb->args[5] = i;
- return skb->len;
-}
-
-static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
- struct sk_buff *skb, struct netlink_callback *cb)
-{
- struct leaf_info *li;
- int i, s_i;
-
- s_i = cb->args[4];
- i = 0;
-
- /* rcu_read_lock is hold by caller */
- hlist_for_each_entry_rcu(li, &l->list, hlist) {
- if (i < s_i) {
- i++;
- continue;
- }
-
- if (i > s_i)
- cb->args[5] = 0;
-
- if (list_empty(&li->falh))
- continue;
-
- if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
cb->args[4] = i;
return -1;
}
@@ -1801,44 +1918,38 @@ static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
return skb->len;
}
+/* rcu_read_lock needs to be hold by caller from readside */
int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct tnode *l;
- struct trie *t = (struct trie *) tb->tb_data;
- t_key key = cb->args[2];
- int count = cb->args[3];
-
- rcu_read_lock();
+ struct trie *t = (struct trie *)tb->tb_data;
+ struct key_vector *l, *tp = t->kv;
/* Dump starting at last key.
* Note: 0.0.0.0/0 (ie default) is first key.
*/
- if (count == 0)
- l = trie_firstleaf(t);
- else {
- /* Normally, continue from last key, but if that is missing
- * fallback to using slow rescan
- */
- l = fib_find_node(t, key);
- if (!l)
- l = trie_leafindex(t, count);
- }
+ int count = cb->args[2];
+ t_key key = cb->args[3];
- while (l) {
- cb->args[2] = l->key;
+ while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
- cb->args[3] = count;
- rcu_read_unlock();
+ cb->args[3] = key;
+ cb->args[2] = count;
return -1;
}
++count;
- l = trie_nextleaf(l);
+ key = l->key + 1;
+
memset(&cb->args[4], 0,
sizeof(cb->args) - 4*sizeof(cb->args[0]));
+
+ /* stop loop if key wrapped back to 0 */
+ if (key < l->key)
+ break;
}
- cb->args[3] = count;
- rcu_read_unlock();
+
+ cb->args[3] = key;
+ cb->args[2] = count;
return skb->len;
}
@@ -1850,28 +1961,34 @@ void __init fib_trie_init(void)
0, SLAB_PANIC, NULL);
trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
- max(sizeof(struct tnode),
- sizeof(struct leaf_info)),
+ LEAF_SIZE,
0, SLAB_PANIC, NULL);
}
-
-struct fib_table *fib_trie_table(u32 id)
+struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
{
struct fib_table *tb;
struct trie *t;
+ size_t sz = sizeof(*tb);
+
+ if (!alias)
+ sz += sizeof(struct trie);
- tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
- GFP_KERNEL);
- if (tb == NULL)
+ tb = kzalloc(sz, GFP_KERNEL);
+ if (!tb)
return NULL;
tb->tb_id = id;
tb->tb_default = -1;
tb->tb_num_default = 0;
+ tb->tb_data = (alias ? alias->__data : tb->__data);
+
+ if (alias)
+ return tb;
t = (struct trie *) tb->tb_data;
- RCU_INIT_POINTER(t->trie, NULL);
+ t->kv[0].pos = KEYLENGTH;
+ t->kv[0].slen = KEYLENGTH;
#ifdef CONFIG_IP_FIB_TRIE_STATS
t->stats = alloc_percpu(struct trie_use_stats);
if (!t->stats) {
@@ -1888,65 +2005,63 @@ struct fib_table *fib_trie_table(u32 id)
struct fib_trie_iter {
struct seq_net_private p;
struct fib_table *tb;
- struct tnode *tnode;
+ struct key_vector *tnode;
unsigned int index;
unsigned int depth;
};
-static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
{
unsigned long cindex = iter->index;
- struct tnode *tn = iter->tnode;
- struct tnode *p;
-
- /* A single entry routing table */
- if (!tn)
- return NULL;
+ struct key_vector *pn = iter->tnode;
+ t_key pkey;
pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
iter->tnode, iter->index, iter->depth);
-rescan:
- while (cindex < tnode_child_length(tn)) {
- struct tnode *n = tnode_get_child_rcu(tn, cindex);
- if (n) {
+ while (!IS_TRIE(pn)) {
+ while (cindex < child_length(pn)) {
+ struct key_vector *n = get_child_rcu(pn, cindex++);
+
+ if (!n)
+ continue;
+
if (IS_LEAF(n)) {
- iter->tnode = tn;
- iter->index = cindex + 1;
+ iter->tnode = pn;
+ iter->index = cindex;
} else {
/* push down one level */
iter->tnode = n;
iter->index = 0;
++iter->depth;
}
+
return n;
}
- ++cindex;
- }
-
- /* Current node exhausted, pop back up */
- p = node_parent_rcu(tn);
- if (p) {
- cindex = get_index(tn->key, p) + 1;
- tn = p;
+ /* Current node exhausted, pop back up */
+ pkey = pn->key;
+ pn = node_parent_rcu(pn);
+ cindex = get_index(pkey, pn) + 1;
--iter->depth;
- goto rescan;
}
- /* got root? */
+ /* record root node so further searches know we are done */
+ iter->tnode = pn;
+ iter->index = 0;
+
return NULL;
}
-static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
- struct trie *t)
+static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
+ struct trie *t)
{
- struct tnode *n;
+ struct key_vector *n, *pn = t->kv;
if (!t)
return NULL;
- n = rcu_dereference(t->trie);
+ n = rcu_dereference(pn->tnode[0]);
if (!n)
return NULL;
@@ -1955,7 +2070,7 @@ static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
iter->index = 0;
iter->depth = 1;
} else {
- iter->tnode = NULL;
+ iter->tnode = pn;
iter->index = 0;
iter->depth = 0;
}
@@ -1965,7 +2080,7 @@ static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
static void trie_collect_stats(struct trie *t, struct trie_stat *s)
{
- struct tnode *n;
+ struct key_vector *n;
struct fib_trie_iter iter;
memset(s, 0, sizeof(*s));
@@ -1973,20 +2088,20 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
rcu_read_lock();
for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
if (IS_LEAF(n)) {
- struct leaf_info *li;
+ struct fib_alias *fa;
s->leaves++;
s->totdepth += iter.depth;
if (iter.depth > s->maxdepth)
s->maxdepth = iter.depth;
- hlist_for_each_entry_rcu(li, &n->list, hlist)
+ hlist_for_each_entry_rcu(fa, &n->leaf, fa_list)
++s->prefixes;
} else {
s->tnodes++;
if (n->bits < MAX_STAT_DEPTH)
s->nodesizes[n->bits]++;
- s->nullpointers += n->empty_children;
+ s->nullpointers += tn_info(n)->empty_children;
}
}
rcu_read_unlock();
@@ -2009,13 +2124,13 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
- bytes = sizeof(struct tnode) * stat->leaves;
+ bytes = LEAF_SIZE * stat->leaves;
seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
- bytes += sizeof(struct leaf_info) * stat->prefixes;
+ bytes += sizeof(struct fib_alias) * stat->prefixes;
seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
- bytes += sizeof(struct tnode) * stat->tnodes;
+ bytes += TNODE_SIZE(0) * stat->tnodes;
max = MAX_STAT_DEPTH;
while (max > 0 && stat->nodesizes[max-1] == 0)
@@ -2030,7 +2145,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
seq_putc(seq, '\n');
seq_printf(seq, "\tPointers: %u\n", pointers);
- bytes += sizeof(struct tnode *) * pointers;
+ bytes += sizeof(struct key_vector *) * pointers;
seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
}
@@ -2084,7 +2199,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"Basic info: size of leaf:"
" %Zd bytes, size of tnode: %Zd bytes.\n",
- sizeof(struct tnode), sizeof(struct tnode));
+ LEAF_SIZE, TNODE_SIZE(0));
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
@@ -2123,7 +2238,7 @@ static const struct file_operations fib_triestat_fops = {
.release = single_release_net,
};
-static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
{
struct fib_trie_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
@@ -2135,7 +2250,7 @@ static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
- struct tnode *n;
+ struct key_vector *n;
for (n = fib_trie_get_first(iter,
(struct trie *) tb->tb_data);
@@ -2164,7 +2279,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct fib_table *tb = iter->tb;
struct hlist_node *tb_node;
unsigned int h;
- struct tnode *n;
+ struct key_vector *n;
++*pos;
/* next node in same table */
@@ -2250,9 +2365,9 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
static int fib_trie_seq_show(struct seq_file *seq, void *v)
{
const struct fib_trie_iter *iter = seq->private;
- struct tnode *n = v;
+ struct key_vector *n = v;
- if (!node_parent_rcu(n))
+ if (IS_TRIE(node_parent_rcu(n)))
fib_table_print(seq, iter->tb);
if (IS_TNODE(n)) {
@@ -2261,30 +2376,28 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
seq_indent(seq, iter->depth-1);
seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
&prf, KEYLENGTH - n->pos - n->bits, n->bits,
- n->full_children, n->empty_children);
+ tn_info(n)->full_children,
+ tn_info(n)->empty_children);
} else {
- struct leaf_info *li;
__be32 val = htonl(n->key);
+ struct fib_alias *fa;
seq_indent(seq, iter->depth);
seq_printf(seq, " |-- %pI4\n", &val);
- hlist_for_each_entry_rcu(li, &n->list, hlist) {
- struct fib_alias *fa;
-
- list_for_each_entry_rcu(fa, &li->falh, fa_list) {
- char buf1[32], buf2[32];
-
- seq_indent(seq, iter->depth+1);
- seq_printf(seq, " /%d %s %s", li->plen,
- rtn_scope(buf1, sizeof(buf1),
- fa->fa_info->fib_scope),
- rtn_type(buf2, sizeof(buf2),
- fa->fa_type));
- if (fa->fa_tos)
- seq_printf(seq, " tos=%d", fa->fa_tos);
- seq_putc(seq, '\n');
- }
+ hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
+ char buf1[32], buf2[32];
+
+ seq_indent(seq, iter->depth + 1);
+ seq_printf(seq, " /%zu %s %s",
+ KEYLENGTH - fa->fa_slen,
+ rtn_scope(buf1, sizeof(buf1),
+ fa->fa_info->fib_scope),
+ rtn_type(buf2, sizeof(buf2),
+ fa->fa_type));
+ if (fa->fa_tos)
+ seq_printf(seq, " tos=%d", fa->fa_tos);
+ seq_putc(seq, '\n');
}
}
@@ -2314,31 +2427,47 @@ static const struct file_operations fib_trie_fops = {
struct fib_route_iter {
struct seq_net_private p;
- struct trie *main_trie;
+ struct fib_table *main_tb;
+ struct key_vector *tnode;
loff_t pos;
t_key key;
};
-static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
+static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+ loff_t pos)
{
- struct tnode *l = NULL;
- struct trie *t = iter->main_trie;
+ struct fib_table *tb = iter->main_tb;
+ struct key_vector *l, **tp = &iter->tnode;
+ struct trie *t;
+ t_key key;
- /* use cache location of last found key */
- if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
+ /* use cache location of next-to-find key */
+ if (iter->pos > 0 && pos >= iter->pos) {
pos -= iter->pos;
- else {
+ key = iter->key;
+ } else {
+ t = (struct trie *)tb->tb_data;
+ iter->tnode = t->kv;
iter->pos = 0;
- l = trie_firstleaf(t);
+ key = 0;
}
- while (l && pos-- > 0) {
+ while ((l = leaf_walk_rcu(tp, key)) != NULL) {
+ key = l->key + 1;
iter->pos++;
- l = trie_nextleaf(l);
+
+ if (pos-- <= 0)
+ break;
+
+ l = NULL;
+
+ /* handle unlikely case of a key wrap */
+ if (!key)
+ break;
}
if (l)
- iter->key = pos; /* remember it */
+ iter->key = key; /* remember it */
else
iter->pos = 0; /* forget it */
@@ -2350,37 +2479,46 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
{
struct fib_route_iter *iter = seq->private;
struct fib_table *tb;
+ struct trie *t;
rcu_read_lock();
+
tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
if (!tb)
return NULL;
- iter->main_trie = (struct trie *) tb->tb_data;
- if (*pos == 0)
- return SEQ_START_TOKEN;
- else
- return fib_route_get_idx(iter, *pos - 1);
+ iter->main_tb = tb;
+
+ if (*pos != 0)
+ return fib_route_get_idx(iter, *pos);
+
+ t = (struct trie *)tb->tb_data;
+ iter->tnode = t->kv;
+ iter->pos = 0;
+ iter->key = 0;
+
+ return SEQ_START_TOKEN;
}
static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct fib_route_iter *iter = seq->private;
- struct tnode *l = v;
+ struct key_vector *l = NULL;
+ t_key key = iter->key;
++*pos;
- if (v == SEQ_START_TOKEN) {
- iter->pos = 0;
- l = trie_firstleaf(iter->main_trie);
- } else {
+
+ /* only allow key of 0 for start of sequence */
+ if ((v == SEQ_START_TOKEN) || key)
+ l = leaf_walk_rcu(&iter->tnode, key);
+
+ if (l) {
+ iter->key = l->key + 1;
iter->pos++;
- l = trie_nextleaf(l);
+ } else {
+ iter->pos = 0;
}
- if (l)
- iter->key = l->key;
- else
- iter->pos = 0;
return l;
}
@@ -2412,8 +2550,11 @@ static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info
*/
static int fib_route_seq_show(struct seq_file *seq, void *v)
{
- struct tnode *l = v;
- struct leaf_info *li;
+ struct fib_route_iter *iter = seq->private;
+ struct fib_table *tb = iter->main_tb;
+ struct fib_alias *fa;
+ struct key_vector *l = v;
+ __be32 prefix;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
@@ -2422,45 +2563,43 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
return 0;
}
- hlist_for_each_entry_rcu(li, &l->list, hlist) {
- struct fib_alias *fa;
- __be32 mask, prefix;
+ prefix = htonl(l->key);
- mask = inet_make_mask(li->plen);
- prefix = htonl(l->key);
+ hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+ const struct fib_info *fi = fa->fa_info;
+ __be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen);
+ unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
- list_for_each_entry_rcu(fa, &li->falh, fa_list) {
- const struct fib_info *fi = fa->fa_info;
- unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
+ if ((fa->fa_type == RTN_BROADCAST) ||
+ (fa->fa_type == RTN_MULTICAST))
+ continue;
- if (fa->fa_type == RTN_BROADCAST
- || fa->fa_type == RTN_MULTICAST)
- continue;
+ if (fa->tb_id != tb->tb_id)
+ continue;
- seq_setwidth(seq, 127);
-
- if (fi)
- seq_printf(seq,
- "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
- "%d\t%08X\t%d\t%u\t%u",
- fi->fib_dev ? fi->fib_dev->name : "*",
- prefix,
- fi->fib_nh->nh_gw, flags, 0, 0,
- fi->fib_priority,
- mask,
- (fi->fib_advmss ?
- fi->fib_advmss + 40 : 0),
- fi->fib_window,
- fi->fib_rtt >> 3);
- else
- seq_printf(seq,
- "*\t%08X\t%08X\t%04X\t%d\t%u\t"
- "%d\t%08X\t%d\t%u\t%u",
- prefix, 0, flags, 0, 0, 0,
- mask, 0, 0, 0);
-
- seq_pad(seq, '\n');
- }
+ seq_setwidth(seq, 127);
+
+ if (fi)
+ seq_printf(seq,
+ "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
+ "%d\t%08X\t%d\t%u\t%u",
+ fi->fib_dev ? fi->fib_dev->name : "*",
+ prefix,
+ fi->fib_nh->nh_gw, flags, 0, 0,
+ fi->fib_priority,
+ mask,
+ (fi->fib_advmss ?
+ fi->fib_advmss + 40 : 0),
+ fi->fib_window,
+ fi->fib_rtt >> 3);
+ else
+ seq_printf(seq,
+ "*\t%08X\t%08X\t%04X\t%d\t%u\t"
+ "%d\t%08X\t%d\t%u\t%u",
+ prefix, 0, flags, 0, 0, 0,
+ mask, 0, 0, 0);
+
+ seq_pad(seq, '\n');
}
return 0;
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index ff069f6597ac..34968cd5c146 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -16,14 +16,12 @@
#include <uapi/linux/fou.h>
#include <uapi/linux/genetlink.h>
-static DEFINE_SPINLOCK(fou_lock);
-static LIST_HEAD(fou_list);
-
struct fou {
struct socket *sock;
u8 protocol;
u8 flags;
- u16 port;
+ __be16 port;
+ u16 type;
struct udp_offload udp_offloads;
struct list_head list;
};
@@ -37,6 +35,13 @@ struct fou_cfg {
struct udp_port_cfg udp_config;
};
+static unsigned int fou_net_id;
+
+struct fou_net {
+ struct list_head fou_list;
+ struct mutex fou_lock;
+};
+
static inline struct fou *fou_from_sock(struct sock *sk)
{
return sk->sk_user_data;
@@ -387,20 +392,21 @@ out_unlock:
return err;
}
-static int fou_add_to_port_list(struct fou *fou)
+static int fou_add_to_port_list(struct net *net, struct fou *fou)
{
+ struct fou_net *fn = net_generic(net, fou_net_id);
struct fou *fout;
- spin_lock(&fou_lock);
- list_for_each_entry(fout, &fou_list, list) {
+ mutex_lock(&fn->fou_lock);
+ list_for_each_entry(fout, &fn->fou_list, list) {
if (fou->port == fout->port) {
- spin_unlock(&fou_lock);
+ mutex_unlock(&fn->fou_lock);
return -EALREADY;
}
}
- list_add(&fou->list, &fou_list);
- spin_unlock(&fou_lock);
+ list_add(&fou->list, &fn->fou_list);
+ mutex_unlock(&fn->fou_lock);
return 0;
}
@@ -410,14 +416,10 @@ static void fou_release(struct fou *fou)
struct socket *sock = fou->sock;
struct sock *sk = sock->sk;
- udp_del_offload(&fou->udp_offloads);
-
+ if (sk->sk_family == AF_INET)
+ udp_del_offload(&fou->udp_offloads);
list_del(&fou->list);
-
- /* Remove hooks into tunnel socket */
- sk->sk_user_data = NULL;
-
- sock_release(sock);
+ udp_tunnel_sock_release(sock);
kfree(fou);
}
@@ -447,10 +449,10 @@ static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
static int fou_create(struct net *net, struct fou_cfg *cfg,
struct socket **sockp)
{
- struct fou *fou = NULL;
- int err;
struct socket *sock = NULL;
+ struct fou *fou = NULL;
struct sock *sk;
+ int err;
/* Open UDP socket */
err = udp_sock_create(net, &cfg->udp_config, &sock);
@@ -486,6 +488,8 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
goto error;
}
+ fou->type = cfg->type;
+
udp_sk(sk)->encap_type = 1;
udp_encap_enable();
@@ -502,7 +506,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
goto error;
}
- err = fou_add_to_port_list(fou);
+ err = fou_add_to_port_list(net, fou);
if (err)
goto error;
@@ -514,27 +518,27 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
error:
kfree(fou);
if (sock)
- sock_release(sock);
+ udp_tunnel_sock_release(sock);
return err;
}
static int fou_destroy(struct net *net, struct fou_cfg *cfg)
{
- struct fou *fou;
- u16 port = cfg->udp_config.local_udp_port;
+ struct fou_net *fn = net_generic(net, fou_net_id);
+ __be16 port = cfg->udp_config.local_udp_port;
int err = -EINVAL;
+ struct fou *fou;
- spin_lock(&fou_lock);
- list_for_each_entry(fou, &fou_list, list) {
+ mutex_lock(&fn->fou_lock);
+ list_for_each_entry(fou, &fn->fou_list, list) {
if (fou->port == port) {
- udp_del_offload(&fou->udp_offloads);
fou_release(fou);
err = 0;
break;
}
}
- spin_unlock(&fou_lock);
+ mutex_unlock(&fn->fou_lock);
return err;
}
@@ -573,7 +577,7 @@ static int parse_nl_config(struct genl_info *info,
}
if (info->attrs[FOU_ATTR_PORT]) {
- u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
+ __be16 port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
cfg->udp_config.local_udp_port = port;
}
@@ -592,6 +596,7 @@ static int parse_nl_config(struct genl_info *info,
static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
{
+ struct net *net = genl_info_net(info);
struct fou_cfg cfg;
int err;
@@ -599,16 +604,119 @@ static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
- return fou_create(&init_net, &cfg, NULL);
+ return fou_create(net, &cfg, NULL);
}
static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
{
+ struct net *net = genl_info_net(info);
struct fou_cfg cfg;
+ int err;
- parse_nl_config(info, &cfg);
+ err = parse_nl_config(info, &cfg);
+ if (err)
+ return err;
- return fou_destroy(&init_net, &cfg);
+ return fou_destroy(net, &cfg);
+}
+
+static int fou_fill_info(struct fou *fou, struct sk_buff *msg)
+{
+ if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
+ nla_put_be16(msg, FOU_ATTR_PORT, fou->port) ||
+ nla_put_u8(msg, FOU_ATTR_IPPROTO, fou->protocol) ||
+ nla_put_u8(msg, FOU_ATTR_TYPE, fou->type))
+ return -1;
+
+ if (fou->flags & FOU_F_REMCSUM_NOPARTIAL)
+ if (nla_put_flag(msg, FOU_ATTR_REMCSUM_NOPARTIAL))
+ return -1;
+ return 0;
+}
+
+static int fou_dump_info(struct fou *fou, u32 portid, u32 seq,
+ u32 flags, struct sk_buff *skb, u8 cmd)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd);
+ if (!hdr)
+ return -ENOMEM;
+
+ if (fou_fill_info(fou, skb) < 0)
+ goto nla_put_failure;
+
+ genlmsg_end(skb, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct fou_net *fn = net_generic(net, fou_net_id);
+ struct sk_buff *msg;
+ struct fou_cfg cfg;
+ struct fou *fout;
+ __be16 port;
+ int ret;
+
+ ret = parse_nl_config(info, &cfg);
+ if (ret)
+ return ret;
+ port = cfg.udp_config.local_udp_port;
+ if (port == 0)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = -ESRCH;
+ mutex_lock(&fn->fou_lock);
+ list_for_each_entry(fout, &fn->fou_list, list) {
+ if (port == fout->port) {
+ ret = fou_dump_info(fout, info->snd_portid,
+ info->snd_seq, 0, msg,
+ info->genlhdr->cmd);
+ break;
+ }
+ }
+ mutex_unlock(&fn->fou_lock);
+ if (ret < 0)
+ goto out_free;
+
+ return genlmsg_reply(msg, info);
+
+out_free:
+ nlmsg_free(msg);
+ return ret;
+}
+
+static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct fou_net *fn = net_generic(net, fou_net_id);
+ struct fou *fout;
+ int idx = 0, ret;
+
+ mutex_lock(&fn->fou_lock);
+ list_for_each_entry(fout, &fn->fou_list, list) {
+ if (idx++ < cb->args[0])
+ continue;
+ ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ skb, FOU_CMD_GET);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&fn->fou_lock);
+
+ cb->args[0] = idx;
+ return skb->len;
}
static const struct genl_ops fou_nl_ops[] = {
@@ -624,6 +732,12 @@ static const struct genl_ops fou_nl_ops[] = {
.policy = fou_nl_policy,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = FOU_CMD_GET,
+ .doit = fou_nl_cmd_get_port,
+ .dumpit = fou_nl_dump,
+ .policy = fou_nl_policy,
+ },
};
size_t fou_encap_hlen(struct ip_tunnel_encap *e)
@@ -771,12 +885,12 @@ EXPORT_SYMBOL(gue_build_header);
#ifdef CONFIG_NET_FOU_IP_TUNNELS
-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
.encap_hlen = fou_encap_hlen,
.build_header = fou_build_header,
};
-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
.encap_hlen = gue_encap_hlen,
.build_header = gue_build_header,
};
@@ -820,38 +934,63 @@ static void ip_tunnel_encap_del_fou_ops(void)
#endif
+static __net_init int fou_init_net(struct net *net)
+{
+ struct fou_net *fn = net_generic(net, fou_net_id);
+
+ INIT_LIST_HEAD(&fn->fou_list);
+ mutex_init(&fn->fou_lock);
+ return 0;
+}
+
+static __net_exit void fou_exit_net(struct net *net)
+{
+ struct fou_net *fn = net_generic(net, fou_net_id);
+ struct fou *fou, *next;
+
+ /* Close all the FOU sockets */
+ mutex_lock(&fn->fou_lock);
+ list_for_each_entry_safe(fou, next, &fn->fou_list, list)
+ fou_release(fou);
+ mutex_unlock(&fn->fou_lock);
+}
+
+static struct pernet_operations fou_net_ops = {
+ .init = fou_init_net,
+ .exit = fou_exit_net,
+ .id = &fou_net_id,
+ .size = sizeof(struct fou_net),
+};
+
static int __init fou_init(void)
{
int ret;
+ ret = register_pernet_device(&fou_net_ops);
+ if (ret)
+ goto exit;
+
ret = genl_register_family_with_ops(&fou_nl_family,
fou_nl_ops);
-
if (ret < 0)
- goto exit;
+ goto unregister;
ret = ip_tunnel_encap_add_fou_ops();
- if (ret < 0)
- genl_unregister_family(&fou_nl_family);
+ if (ret == 0)
+ return 0;
+ genl_unregister_family(&fou_nl_family);
+unregister:
+ unregister_pernet_device(&fou_net_ops);
exit:
return ret;
}
static void __exit fou_fini(void)
{
- struct fou *fou, *next;
-
ip_tunnel_encap_del_fou_ops();
-
genl_unregister_family(&fou_nl_family);
-
- /* Close all the FOU sockets */
-
- spin_lock(&fou_lock);
- list_for_each_entry_safe(fou, next, &fou_list, list)
- fou_release(fou);
- spin_unlock(&fou_lock);
+ unregister_pernet_device(&fou_net_ops);
}
module_init(fou_init);
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 5a4828ba05ad..8986e63f3bda 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -113,10 +113,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
int min_headroom;
int err;
- skb = udp_tunnel_handle_offloads(skb, csum);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
@@ -131,12 +127,16 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
if (unlikely(!skb))
return -ENOMEM;
+ skb = udp_tunnel_handle_offloads(skb, csum);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
- return udp_tunnel_xmit_skb(rt, skb, src, dst,
+ return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst,
tos, ttl, df, src_port, dst_port, xnet,
!csum);
}
@@ -196,7 +196,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
rcu_read_lock();
ptype = gro_find_receive_by_type(type);
- if (ptype == NULL) {
+ if (!ptype) {
flush = 1;
goto out_unlock;
}
@@ -230,7 +230,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
rcu_read_lock();
ptype = gro_find_complete_by_type(type);
- if (ptype != NULL)
+ if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
rcu_read_unlock();
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 51973ddc05a6..5aa46d4b44ef 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -149,7 +149,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
rcu_read_lock();
ptype = gro_find_receive_by_type(type);
- if (ptype == NULL)
+ if (!ptype)
goto out_unlock;
grehlen = GRE_HEADER_SECTION;
@@ -243,7 +243,7 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
rcu_read_lock();
ptype = gro_find_complete_by_type(type);
- if (ptype != NULL)
+ if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
rcu_read_unlock();
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5e564014a0b7..f5203fba6236 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -399,7 +399,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
return;
sk = icmp_xmit_lock(net);
- if (sk == NULL)
+ if (!sk)
return;
inet = inet_sk(sk);
@@ -609,7 +609,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
skb_in->data,
sizeof(_inner_type),
&_inner_type);
- if (itp == NULL)
+ if (!itp)
goto out;
/*
@@ -627,7 +627,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
return;
sk = icmp_xmit_lock(net);
- if (sk == NULL)
+ if (!sk)
goto out_free;
/*
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 666cf364df86..a3a697f5ffba 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -97,6 +97,7 @@
#include <net/route.h>
#include <net/sock.h>
#include <net/checksum.h>
+#include <net/inet_common.h>
#include <linux/netfilter_ipv4.h>
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
@@ -369,7 +370,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
pip->saddr = fl4.saddr;
pip->protocol = IPPROTO_IGMP;
pip->tot_len = 0; /* filled in later */
- ip_select_ident(skb, NULL);
+ ip_select_ident(net, skb, NULL);
((u8 *)&pip[1])[0] = IPOPT_RA;
((u8 *)&pip[1])[1] = 4;
((u8 *)&pip[1])[2] = 0;
@@ -691,7 +692,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
- if (skb == NULL) {
+ if (!skb) {
ip_rt_put(rt);
return -1;
}
@@ -713,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
iph->daddr = dst;
iph->saddr = fl4.saddr;
iph->protocol = IPPROTO_IGMP;
- ip_select_ident(skb, NULL);
+ ip_select_ident(net, skb, NULL);
((u8 *)&iph[1])[0] = IPOPT_RA;
((u8 *)&iph[1])[1] = 4;
((u8 *)&iph[1])[2] = 0;
@@ -980,7 +981,7 @@ int igmp_rcv(struct sk_buff *skb)
int len = skb->len;
bool dropped = true;
- if (in_dev == NULL)
+ if (!in_dev)
goto drop;
if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
@@ -1849,30 +1850,28 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
pmc->sfcount[MCAST_EXCLUDE] = 1;
}
-
-/*
- * Join a multicast group
+/* Join a multicast group
*/
-int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
+
+int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
{
- int err;
__be32 addr = imr->imr_multiaddr.s_addr;
- struct ip_mc_socklist *iml = NULL, *i;
+ struct ip_mc_socklist *iml, *i;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
int ifindex;
int count = 0;
+ int err;
+
+ ASSERT_RTNL();
if (!ipv4_is_multicast(addr))
return -EINVAL;
- rtnl_lock();
-
in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
- iml = NULL;
err = -ENODEV;
goto done;
}
@@ -1889,7 +1888,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
if (count >= sysctl_igmp_max_memberships)
goto done;
iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
- if (iml == NULL)
+ if (!iml)
goto done;
memcpy(&iml->multi, imr, sizeof(*imr));
@@ -1900,7 +1899,6 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
ip_mc_inc_group(in_dev, addr);
err = 0;
done:
- rtnl_unlock();
return err;
}
EXPORT_SYMBOL(ip_mc_join_group);
@@ -1911,7 +1909,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
int err;
- if (psf == NULL) {
+ if (!psf) {
/* any-source empty exclude case */
return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
iml->sfmode, 0, NULL, 0);
@@ -1925,10 +1923,6 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
return err;
}
-/*
- * Ask a socket to leave a group.
- */
-
int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
{
struct inet_sock *inet = inet_sk(sk);
@@ -1940,7 +1934,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
u32 ifindex;
int ret = -EADDRNOTAVAIL;
- rtnl_lock();
+ ASSERT_RTNL();
+
in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
ret = -ENODEV;
@@ -1964,14 +1959,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
*imlp = iml->next_rcu;
ip_mc_dec_group(in_dev, group);
- rtnl_unlock();
+
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
return 0;
}
out:
- rtnl_unlock();
return ret;
}
EXPORT_SYMBOL(ip_mc_leave_group);
@@ -1993,7 +1987,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
if (!ipv4_is_multicast(addr))
return -EINVAL;
- rtnl_lock();
+ ASSERT_RTNL();
imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
imr.imr_address.s_addr = mreqs->imr_interface;
@@ -2107,9 +2101,8 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
&mreqs->imr_sourceaddr, 1);
done:
- rtnl_unlock();
if (leavegroup)
- return ip_mc_leave_group(sk, &imr);
+ err = ip_mc_leave_group(sk, &imr);
return err;
}
@@ -2131,7 +2124,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
msf->imsf_fmode != MCAST_EXCLUDE)
return -EINVAL;
- rtnl_lock();
+ ASSERT_RTNL();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
@@ -2193,7 +2186,6 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
pmc->sfmode = msf->imsf_fmode;
err = 0;
done:
- rtnl_unlock();
if (leavegroup)
err = ip_mc_leave_group(sk, &imr);
return err;
@@ -2368,7 +2360,7 @@ void ip_mc_drop_socket(struct sock *sk)
struct ip_mc_socklist *iml;
struct net *net = sock_net(sk);
- if (inet->mc_list == NULL)
+ if (!inet->mc_list)
return;
rtnl_lock();
@@ -2378,7 +2370,7 @@ void ip_mc_drop_socket(struct sock *sk)
inet->mc_list = iml->next_rcu;
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
- if (in_dev != NULL)
+ if (in_dev)
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
@@ -2595,13 +2587,13 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
for_each_netdev_rcu(net, state->dev) {
struct in_device *idev;
idev = __in_dev_get_rcu(state->dev);
- if (unlikely(idev == NULL))
+ if (unlikely(!idev))
continue;
im = rcu_dereference(idev->mc_list);
- if (likely(im != NULL)) {
+ if (likely(im)) {
spin_lock_bh(&im->lock);
psf = im->sources;
- if (likely(psf != NULL)) {
+ if (likely(psf)) {
state->im = im;
state->idev = idev;
break;
@@ -2671,7 +2663,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
__releases(rcu)
{
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
- if (likely(state->im != NULL)) {
+ if (likely(state->im)) {
spin_unlock_bh(&state->im->lock);
state->im = NULL;
}
@@ -2724,6 +2716,7 @@ static const struct file_operations igmp_mcf_seq_fops = {
static int __net_init igmp_net_init(struct net *net)
{
struct proc_dir_entry *pde;
+ int err;
pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
if (!pde)
@@ -2732,8 +2725,18 @@ static int __net_init igmp_net_init(struct net *net)
&igmp_mcf_seq_fops);
if (!pde)
goto out_mcfilter;
+ err = inet_ctl_sock_create(&net->ipv4.mc_autojoin_sk, AF_INET,
+ SOCK_DGRAM, 0, net);
+ if (err < 0) {
+ pr_err("Failed to initialize the IGMP autojoin socket (err %d)\n",
+ err);
+ goto out_sock;
+ }
+
return 0;
+out_sock:
+ remove_proc_entry("mcfilter", net->proc_net);
out_mcfilter:
remove_proc_entry("igmp", net->proc_net);
out_igmp:
@@ -2744,6 +2747,7 @@ static void __net_exit igmp_net_exit(struct net *net)
{
remove_proc_entry("mcfilter", net->proc_net);
remove_proc_entry("igmp", net->proc_net);
+ inet_ctl_sock_destroy(net->ipv4.mc_autojoin_sk);
}
static struct pernet_operations igmp_net_ops = {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 3e44b9b0b78e..8976ca423a07 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -23,6 +23,7 @@
#include <net/route.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
+#include <net/tcp.h>
#ifdef INET_CSK_DEBUG
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
@@ -294,8 +295,8 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
- struct sock *newsk;
struct request_sock *req;
+ struct sock *newsk;
int error;
lock_sock(sk);
@@ -324,9 +325,11 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
newsk = req->sk;
sk_acceptq_removed(sk);
- if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
+ if (sk->sk_protocol == IPPROTO_TCP &&
+ tcp_rsk(req)->tfo_listener &&
+ queue->fastopenq) {
spin_lock_bh(&queue->fastopenq->lock);
- if (tcp_rsk(req)->listener) {
+ if (tcp_rsk(req)->tfo_listener) {
/* We are still waiting for the final ACK from 3WHS
* so can't free req now. Instead, we set req->sk to
* NULL to signify that the child socket is taken
@@ -341,7 +344,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
out:
release_sock(sk);
if (req)
- __reqsk_free(req);
+ reqsk_put(req);
return newsk;
out_err:
newsk = NULL;
@@ -400,18 +403,17 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
struct flowi4 *fl4,
const struct request_sock *req)
{
- struct rtable *rt;
const struct inet_request_sock *ireq = inet_rsk(req);
- struct ip_options_rcu *opt = inet_rsk(req)->opt;
- struct net *net = sock_net(sk);
- int flags = inet_sk_flowi_flags(sk);
+ struct net *net = read_pnet(&ireq->ireq_net);
+ struct ip_options_rcu *opt = ireq->opt;
+ struct rtable *rt;
- flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
+ flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
- sk->sk_protocol,
- flags,
+ sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
- ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
+ ireq->ir_loc_addr, ireq->ir_rmt_port,
+ htons(ireq->ir_num));
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
@@ -433,9 +435,9 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
const struct request_sock *req)
{
const struct inet_request_sock *ireq = inet_rsk(req);
+ struct net *net = read_pnet(&ireq->ireq_net);
struct inet_sock *newinet = inet_sk(newsk);
struct ip_options_rcu *opt;
- struct net *net = sock_net(sk);
struct flowi4 *fl4;
struct rtable *rt;
@@ -443,11 +445,12 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
rcu_read_lock();
opt = rcu_dereference(newinet->inet_opt);
- flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
+ flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
- ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
+ ireq->ir_loc_addr, ireq->ir_rmt_port,
+ htons(ireq->ir_num));
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
@@ -475,33 +478,37 @@ static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
#if IS_ENABLED(CONFIG_IPV6)
#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
#else
-#define AF_INET_FAMILY(fam) 1
+#define AF_INET_FAMILY(fam) true
#endif
-struct request_sock *inet_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
- const __be16 rport, const __be32 raddr,
+/* Note: this is temporary :
+ * req sock will no longer be in listener hash table
+*/
+struct request_sock *inet_csk_search_req(struct sock *sk,
+ const __be16 rport,
+ const __be32 raddr,
const __be32 laddr)
{
- const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
- struct request_sock *req, **prev;
+ struct request_sock *req;
+ u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
+ lopt->nr_table_entries);
- for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
- lopt->nr_table_entries)];
- (req = *prev) != NULL;
- prev = &req->dl_next) {
+ spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
+ for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
const struct inet_request_sock *ireq = inet_rsk(req);
if (ireq->ir_rmt_port == rport &&
ireq->ir_rmt_addr == raddr &&
ireq->ir_loc_addr == laddr &&
AF_INET_FAMILY(req->rsk_ops->family)) {
+ atomic_inc(&req->rsk_refcnt);
WARN_ON(req->sk);
- *prevp = prev;
break;
}
}
+ spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
return req;
}
@@ -557,23 +564,58 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
}
EXPORT_SYMBOL(inet_rtx_syn_ack);
-void inet_csk_reqsk_queue_prune(struct sock *parent,
- const unsigned long interval,
- const unsigned long timeout,
- const unsigned long max_rto)
+/* return true if req was found in the syn_table[] */
+static bool reqsk_queue_unlink(struct request_sock_queue *queue,
+ struct request_sock *req)
+{
+ struct listen_sock *lopt = queue->listen_opt;
+ struct request_sock **prev;
+ bool found = false;
+
+ spin_lock(&queue->syn_wait_lock);
+
+ for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
+ prev = &(*prev)->dl_next) {
+ if (*prev == req) {
+ *prev = req->dl_next;
+ found = true;
+ break;
+ }
+ }
+
+ spin_unlock(&queue->syn_wait_lock);
+ if (del_timer(&req->rsk_timer))
+ reqsk_put(req);
+ return found;
+}
+
+void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{
- struct inet_connection_sock *icsk = inet_csk(parent);
+ if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
+ reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+ reqsk_put(req);
+ }
+}
+EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
+
+static void reqsk_timer_handler(unsigned long data)
+{
+ struct request_sock *req = (struct request_sock *)data;
+ struct sock *sk_listener = req->rsk_listener;
+ struct inet_connection_sock *icsk = inet_csk(sk_listener);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
struct listen_sock *lopt = queue->listen_opt;
- int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
- int thresh = max_retries;
- unsigned long now = jiffies;
- struct request_sock **reqp, *req;
- int i, budget;
+ int qlen, expire = 0, resend = 0;
+ int max_retries, thresh;
+ u8 defer_accept;
- if (lopt == NULL || lopt->qlen == 0)
+ if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
+ reqsk_put(req);
return;
+ }
+ max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
+ thresh = max_retries;
/* Normally all the openreqs are young and become mature
* (i.e. converted to established socket) for first timeout.
* If synack was not acknowledged for 1 second, it means
@@ -591,67 +633,65 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
* embrions; and abort old ones without pity, if old
* ones are about to clog our table.
*/
- if (lopt->qlen>>(lopt->max_qlen_log-1)) {
- int young = (lopt->qlen_young<<1);
+ qlen = listen_sock_qlen(lopt);
+ if (qlen >> (lopt->max_qlen_log - 1)) {
+ int young = listen_sock_young(lopt) << 1;
while (thresh > 2) {
- if (lopt->qlen < young)
+ if (qlen < young)
break;
thresh--;
young <<= 1;
}
}
+ defer_accept = READ_ONCE(queue->rskq_defer_accept);
+ if (defer_accept)
+ max_retries = defer_accept;
+ syn_ack_recalc(req, thresh, max_retries, defer_accept,
+ &expire, &resend);
+ req->rsk_ops->syn_ack_timeout(req);
+ if (!expire &&
+ (!resend ||
+ !inet_rtx_syn_ack(sk_listener, req) ||
+ inet_rsk(req)->acked)) {
+ unsigned long timeo;
+
+ if (req->num_timeout++ == 0)
+ atomic_inc(&lopt->young_dec);
+ timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
+ mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
+ return;
+ }
+ inet_csk_reqsk_queue_drop(sk_listener, req);
+ reqsk_put(req);
+}
- if (queue->rskq_defer_accept)
- max_retries = queue->rskq_defer_accept;
-
- budget = 2 * (lopt->nr_table_entries / (timeout / interval));
- i = lopt->clock_hand;
-
- do {
- reqp=&lopt->syn_table[i];
- while ((req = *reqp) != NULL) {
- if (time_after_eq(now, req->expires)) {
- int expire = 0, resend = 0;
-
- syn_ack_recalc(req, thresh, max_retries,
- queue->rskq_defer_accept,
- &expire, &resend);
- req->rsk_ops->syn_ack_timeout(parent, req);
- if (!expire &&
- (!resend ||
- !inet_rtx_syn_ack(parent, req) ||
- inet_rsk(req)->acked)) {
- unsigned long timeo;
-
- if (req->num_timeout++ == 0)
- lopt->qlen_young--;
- timeo = min(timeout << req->num_timeout,
- max_rto);
- req->expires = now + timeo;
- reqp = &req->dl_next;
- continue;
- }
-
- /* Drop this request */
- inet_csk_reqsk_queue_unlink(parent, req, reqp);
- reqsk_queue_removed(queue, req);
- reqsk_free(req);
- continue;
- }
- reqp = &req->dl_next;
- }
+void reqsk_queue_hash_req(struct request_sock_queue *queue,
+ u32 hash, struct request_sock *req,
+ unsigned long timeout)
+{
+ struct listen_sock *lopt = queue->listen_opt;
- i = (i + 1) & (lopt->nr_table_entries - 1);
+ req->num_retrans = 0;
+ req->num_timeout = 0;
+ req->sk = NULL;
- } while (--budget > 0);
+ /* before letting lookups find us, make sure all req fields
+ * are committed to memory and refcnt initialized.
+ */
+ smp_wmb();
+ atomic_set(&req->rsk_refcnt, 2);
+ setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
+ req->rsk_hash = hash;
- lopt->clock_hand = i;
+ spin_lock(&queue->syn_wait_lock);
+ req->dl_next = lopt->syn_table[hash];
+ lopt->syn_table[hash] = req;
+ spin_unlock(&queue->syn_wait_lock);
- if (lopt->qlen)
- inet_csk_reset_keepalive_timer(parent, interval);
+ mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
}
-EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
+EXPORT_SYMBOL(reqsk_queue_hash_req);
/**
* inet_csk_clone_lock - clone an inet socket, and lock its clone
@@ -667,7 +707,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
{
struct sock *newsk = sk_clone_lock(sk, priority);
- if (newsk != NULL) {
+ if (newsk) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
newsk->sk_state = TCP_SYN_RECV;
@@ -679,6 +719,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
newsk->sk_write_space = sk_stream_write_space;
newsk->sk_mark = inet_rsk(req)->ir_mark;
+ atomic64_set(&newsk->sk_cookie,
+ atomic64_read(&inet_rsk(req)->ir_cookie));
newicsk->icsk_retransmits = 0;
newicsk->icsk_backoff = 0;
@@ -785,8 +827,6 @@ void inet_csk_listen_stop(struct sock *sk)
struct request_sock *acc_req;
struct request_sock *req;
- inet_csk_delete_keepalive_timer(sk);
-
/* make all the listen_opt local to us */
acc_req = reqsk_queue_yank_acceptq(queue);
@@ -816,9 +856,9 @@ void inet_csk_listen_stop(struct sock *sk)
percpu_counter_inc(sk->sk_prot->orphan_count);
- if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
+ if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
BUG_ON(tcp_sk(child)->fastopen_rsk != req);
- BUG_ON(sk != tcp_rsk(req)->listener);
+ BUG_ON(sk != req->rsk_listener);
/* Paranoid, to prevent race condition if
* an inbound pkt destined for child is
@@ -827,7 +867,6 @@ void inet_csk_listen_stop(struct sock *sk)
* tcp_v4_destroy_sock().
*/
tcp_sk(child)->fastopen_rsk = NULL;
- sock_put(sk);
}
inet_csk_destroy_sock(child);
@@ -836,9 +875,9 @@ void inet_csk_listen_stop(struct sock *sk)
sock_put(child);
sk_acceptq_removed(sk);
- __reqsk_free(req);
+ reqsk_put(req);
}
- if (queue->fastopenq != NULL) {
+ if (queue->fastopenq) {
/* Free all the reqs queued in rskq_rst_head. */
spin_lock_bh(&queue->fastopenq->lock);
acc_req = queue->fastopenq->rskq_rst_head;
@@ -846,7 +885,7 @@ void inet_csk_listen_stop(struct sock *sk)
spin_unlock_bh(&queue->fastopenq->lock);
while ((req = acc_req) != NULL) {
acc_req = req->dl_next;
- __reqsk_free(req);
+ reqsk_put(req);
}
}
WARN_ON(sk->sk_ack_backlog);
@@ -870,7 +909,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- if (icsk->icsk_af_ops->compat_getsockopt != NULL)
+ if (icsk->icsk_af_ops->compat_getsockopt)
return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->getsockopt(sk, level, optname,
@@ -883,7 +922,7 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- if (icsk->icsk_af_ops->compat_setsockopt != NULL)
+ if (icsk->icsk_af_ops->compat_setsockopt)
return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 592aff37366b..4d32262c7502 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -38,16 +38,12 @@
static const struct inet_diag_handler **inet_diag_table;
struct inet_diag_entry {
- __be32 *saddr;
- __be32 *daddr;
+ const __be32 *saddr;
+ const __be32 *daddr;
u16 sport;
u16 dport;
u16 family;
u16 userlocks;
-#if IS_ENABLED(CONFIG_IPV6)
- struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */
- struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */
-#endif
};
static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -65,12 +61,35 @@ static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
return inet_diag_table[proto];
}
-static inline void inet_diag_unlock_handler(
- const struct inet_diag_handler *handler)
+static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
{
mutex_unlock(&inet_diag_table_mutex);
}
+static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
+{
+ r->idiag_family = sk->sk_family;
+
+ r->id.idiag_sport = htons(sk->sk_num);
+ r->id.idiag_dport = sk->sk_dport;
+ r->id.idiag_if = sk->sk_bound_dev_if;
+ sock_diag_save_cookie(sk, r->id.idiag_cookie);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
+ *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
+ } else
+#endif
+ {
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
+ r->id.idiag_src[0] = sk->sk_rcv_saddr;
+ r->id.idiag_dst[0] = sk->sk_daddr;
+ }
+}
+
static size_t inet_sk_attr_size(void)
{
return nla_total_size(sizeof(struct tcp_info))
@@ -86,21 +105,22 @@ static size_t inet_sk_attr_size(void)
}
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ struct sk_buff *skb, const struct inet_diag_req_v2 *req,
+ struct user_namespace *user_ns,
+ u32 portid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh)
{
const struct inet_sock *inet = inet_sk(sk);
+ const struct tcp_congestion_ops *ca_ops;
+ const struct inet_diag_handler *handler;
+ int ext = req->idiag_ext;
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
struct nlattr *attr;
void *info = NULL;
- const struct inet_diag_handler *handler;
- int ext = req->idiag_ext;
handler = inet_diag_table[req->sdiag_protocol];
- BUG_ON(handler == NULL);
+ BUG_ON(!handler);
nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
nlmsg_flags);
@@ -108,25 +128,13 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
return -EMSGSIZE;
r = nlmsg_data(nlh);
- BUG_ON(sk->sk_state == TCP_TIME_WAIT);
+ BUG_ON(!sk_fullsock(sk));
- r->idiag_family = sk->sk_family;
+ inet_diag_msg_common_fill(r, sk);
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
r->idiag_retrans = 0;
- r->id.idiag_if = sk->sk_bound_dev_if;
- sock_diag_save_cookie(sk, r->id.idiag_cookie);
-
- r->id.idiag_sport = inet->inet_sport;
- r->id.idiag_dport = inet->inet_dport;
-
- memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
- memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
-
- r->id.idiag_src[0] = inet->inet_rcv_saddr;
- r->id.idiag_dst[0] = inet->inet_daddr;
-
if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
goto errout;
@@ -139,10 +147,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
-
- *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
- *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
-
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
if (nla_put_u8(skb, INET_DIAG_TCLASS,
inet6_sk(sk)->tclass) < 0)
@@ -169,7 +173,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
goto errout;
- if (icsk == NULL) {
+ if (!icsk) {
handler->idiag_get_info(sk, r, NULL);
goto out;
}
@@ -205,16 +209,33 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
info = nla_data(attr);
}
- if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
- if (nla_put_string(skb, INET_DIAG_CONG,
- icsk->icsk_ca_ops->name) < 0)
+ if (ext & (1 << (INET_DIAG_CONG - 1))) {
+ int err = 0;
+
+ rcu_read_lock();
+ ca_ops = READ_ONCE(icsk->icsk_ca_ops);
+ if (ca_ops)
+ err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name);
+ rcu_read_unlock();
+ if (err < 0)
goto errout;
+ }
handler->idiag_get_info(sk, r, info);
- if (sk->sk_state < TCP_TIME_WAIT &&
- icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
- icsk->icsk_ca_ops->get_info(sk, ext, skb);
+ if (sk->sk_state < TCP_TIME_WAIT) {
+ union tcp_cc_info info;
+ size_t sz = 0;
+ int attr;
+
+ rcu_read_lock();
+ ca_ops = READ_ONCE(icsk->icsk_ca_ops);
+ if (ca_ops && ca_ops->get_info)
+ sz = ca_ops->get_info(sk, ext, &attr, &info);
+ rcu_read_unlock();
+ if (sz && nla_put(skb, attr, sz, &info) < 0)
+ goto errout;
+ }
out:
nlmsg_end(skb, nlh);
@@ -227,23 +248,25 @@ errout:
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
static int inet_csk_diag_fill(struct sock *sk,
- struct sk_buff *skb, struct inet_diag_req_v2 *req,
+ struct sk_buff *skb,
+ const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 portid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
- return inet_sk_diag_fill(sk, inet_csk(sk),
- skb, req, user_ns, portid, seq, nlmsg_flags, unlh);
+ return inet_sk_diag_fill(sk, inet_csk(sk), skb, req,
+ user_ns, portid, seq, nlmsg_flags, unlh);
}
-static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
- struct sk_buff *skb, struct inet_diag_req_v2 *req,
+static int inet_twsk_diag_fill(struct sock *sk,
+ struct sk_buff *skb,
u32 portid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
- s32 tmo;
+ struct inet_timewait_sock *tw = inet_twsk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
+ long tmo;
nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
nlmsg_flags);
@@ -253,25 +276,13 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r = nlmsg_data(nlh);
BUG_ON(tw->tw_state != TCP_TIME_WAIT);
- tmo = tw->tw_ttd - inet_tw_time_stamp();
+ tmo = tw->tw_timer.expires - jiffies;
if (tmo < 0)
tmo = 0;
- r->idiag_family = tw->tw_family;
+ inet_diag_msg_common_fill(r, sk);
r->idiag_retrans = 0;
- r->id.idiag_if = tw->tw_bound_dev_if;
- sock_diag_save_cookie(tw, r->id.idiag_cookie);
-
- r->id.idiag_sport = tw->tw_sport;
- r->id.idiag_dport = tw->tw_dport;
-
- memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
- memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
-
- r->id.idiag_src[0] = tw->tw_rcv_saddr;
- r->id.idiag_dst[0] = tw->tw_daddr;
-
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = jiffies_to_msecs(tmo);
@@ -279,61 +290,91 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r->idiag_wqueue = 0;
r->idiag_uid = 0;
r->idiag_inode = 0;
-#if IS_ENABLED(CONFIG_IPV6)
- if (tw->tw_family == AF_INET6) {
- *(struct in6_addr *)r->id.idiag_src = tw->tw_v6_rcv_saddr;
- *(struct in6_addr *)r->id.idiag_dst = tw->tw_v6_daddr;
- }
-#endif
+
+ nlmsg_end(skb, nlh);
+ return 0;
+}
+
+static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
+ u32 portid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh)
+{
+ struct inet_diag_msg *r;
+ struct nlmsghdr *nlh;
+ long tmo;
+
+ nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
+ nlmsg_flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ r = nlmsg_data(nlh);
+ inet_diag_msg_common_fill(r, sk);
+ r->idiag_state = TCP_SYN_RECV;
+ r->idiag_timer = 1;
+ r->idiag_retrans = inet_reqsk(sk)->num_retrans;
+
+ BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
+ offsetof(struct sock, sk_cookie));
+
+ tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
+ r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
+ r->idiag_rqueue = 0;
+ r->idiag_wqueue = 0;
+ r->idiag_uid = 0;
+ r->idiag_inode = 0;
nlmsg_end(skb, nlh);
return 0;
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
- struct inet_diag_req_v2 *r,
+ const struct inet_diag_req_v2 *r,
struct user_namespace *user_ns,
u32 portid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
if (sk->sk_state == TCP_TIME_WAIT)
- return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq,
+ return inet_twsk_diag_fill(sk, skb, portid, seq,
nlmsg_flags, unlh);
+ if (sk->sk_state == TCP_NEW_SYN_RECV)
+ return inet_req_diag_fill(sk, skb, portid, seq,
+ nlmsg_flags, unlh);
+
return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
nlmsg_flags, unlh);
}
-int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
- const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
+ struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req)
{
- int err;
- struct sock *sk;
- struct sk_buff *rep;
struct net *net = sock_net(in_skb->sk);
+ struct sk_buff *rep;
+ struct sock *sk;
+ int err;
err = -EINVAL;
- if (req->sdiag_family == AF_INET) {
+ if (req->sdiag_family == AF_INET)
sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
req->id.idiag_dport, req->id.idiag_src[0],
req->id.idiag_sport, req->id.idiag_if);
- }
#if IS_ENABLED(CONFIG_IPV6)
- else if (req->sdiag_family == AF_INET6) {
+ else if (req->sdiag_family == AF_INET6)
sk = inet6_lookup(net, hashinfo,
(struct in6_addr *)req->id.idiag_dst,
req->id.idiag_dport,
(struct in6_addr *)req->id.idiag_src,
req->id.idiag_sport,
req->id.idiag_if);
- }
#endif
- else {
+ else
goto out_nosk;
- }
err = -ENOENT;
- if (sk == NULL)
+ if (!sk)
goto out_nosk;
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
@@ -371,7 +412,7 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
static int inet_diag_get_exact(struct sk_buff *in_skb,
const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req)
+ const struct inet_diag_req_v2 *req)
{
const struct inet_diag_handler *handler;
int err;
@@ -412,9 +453,8 @@ static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
return 1;
}
-
static int inet_diag_bc_run(const struct nlattr *_bc,
- const struct inet_diag_entry *entry)
+ const struct inet_diag_entry *entry)
{
const void *bc = nla_data(_bc);
int len = nla_len(_bc);
@@ -446,10 +486,10 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
break;
case INET_DIAG_BC_S_COND:
case INET_DIAG_BC_D_COND: {
- struct inet_diag_hostcond *cond;
- __be32 *addr;
+ const struct inet_diag_hostcond *cond;
+ const __be32 *addr;
- cond = (struct inet_diag_hostcond *)(op + 1);
+ cond = (const struct inet_diag_hostcond *)(op + 1);
if (cond->port != -1 &&
cond->port != (op->code == INET_DIAG_BC_S_COND ?
entry->sport : entry->dport)) {
@@ -498,29 +538,36 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
return len == 0;
}
+/* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
+ */
+static void entry_fill_addrs(struct inet_diag_entry *entry,
+ const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
+ entry->daddr = sk->sk_v6_daddr.s6_addr32;
+ } else
+#endif
+ {
+ entry->saddr = &sk->sk_rcv_saddr;
+ entry->daddr = &sk->sk_daddr;
+ }
+}
+
int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
{
- struct inet_diag_entry entry;
struct inet_sock *inet = inet_sk(sk);
+ struct inet_diag_entry entry;
- if (bc == NULL)
+ if (!bc)
return 1;
entry.family = sk->sk_family;
-#if IS_ENABLED(CONFIG_IPV6)
- if (entry.family == AF_INET6) {
-
- entry.saddr = sk->sk_v6_rcv_saddr.s6_addr32;
- entry.daddr = sk->sk_v6_daddr.s6_addr32;
- } else
-#endif
- {
- entry.saddr = &inet->inet_rcv_saddr;
- entry.daddr = &inet->inet_daddr;
- }
+ entry_fill_addrs(&entry, sk);
entry.sport = inet->inet_num;
entry.dport = ntohs(inet->inet_dport);
- entry.userlocks = sk->sk_userlocks;
+ entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
return inet_diag_bc_run(bc, &entry);
}
@@ -547,8 +594,8 @@ static int valid_cc(const void *bc, int len, int cc)
static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
int *min_len)
{
- int addr_len;
struct inet_diag_hostcond *cond;
+ int addr_len;
/* Check hostcond space. */
*min_len += sizeof(struct inet_diag_hostcond);
@@ -582,8 +629,8 @@ static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
}
/* Validate a port comparison operator. */
-static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
- int len, int *min_len)
+static bool valid_port_comparison(const struct inet_diag_bc_op *op,
+ int len, int *min_len)
{
/* Port comparisons put the port in a follow-on inet_diag_bc_op. */
*min_len += sizeof(struct inet_diag_bc_op);
@@ -598,10 +645,9 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
int len = bytecode_len;
while (len > 0) {
- const struct inet_diag_bc_op *op = bc;
int min_len = sizeof(struct inet_diag_bc_op);
+ const struct inet_diag_bc_op *op = bc;
-//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
switch (op->code) {
case INET_DIAG_BC_S_COND:
case INET_DIAG_BC_D_COND:
@@ -642,7 +688,7 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
static int inet_csk_diag_dump(struct sock *sk,
struct sk_buff *skb,
struct netlink_callback *cb,
- struct inet_diag_req_v2 *r,
+ const struct inet_diag_req_v2 *r,
const struct nlattr *bc)
{
if (!inet_diag_bc_sk(bc, sk))
@@ -654,139 +700,42 @@ static int inet_csk_diag_dump(struct sock *sk,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
-static int inet_twsk_diag_dump(struct sock *sk,
- struct sk_buff *skb,
- struct netlink_callback *cb,
- struct inet_diag_req_v2 *r,
- const struct nlattr *bc)
+static void twsk_build_assert(void)
{
- struct inet_timewait_sock *tw = inet_twsk(sk);
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
+ offsetof(struct sock, sk_family));
- if (bc != NULL) {
- struct inet_diag_entry entry;
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
+ offsetof(struct inet_sock, inet_num));
- entry.family = tw->tw_family;
-#if IS_ENABLED(CONFIG_IPV6)
- if (tw->tw_family == AF_INET6) {
- entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32;
- entry.daddr = tw->tw_v6_daddr.s6_addr32;
- } else
-#endif
- {
- entry.saddr = &tw->tw_rcv_saddr;
- entry.daddr = &tw->tw_daddr;
- }
- entry.sport = tw->tw_num;
- entry.dport = ntohs(tw->tw_dport);
- entry.userlocks = 0;
-
- if (!inet_diag_bc_run(bc, &entry))
- return 0;
- }
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
+ offsetof(struct inet_sock, inet_dport));
- return inet_twsk_diag_fill(tw, skb, r,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
-}
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
+ offsetof(struct inet_sock, inet_rcv_saddr));
-/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
- * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
- */
-static inline void inet_diag_req_addrs(const struct sock *sk,
- const struct request_sock *req,
- struct inet_diag_entry *entry)
-{
- struct inet_request_sock *ireq = inet_rsk(req);
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
+ offsetof(struct inet_sock, inet_daddr));
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6) {
- if (req->rsk_ops->family == AF_INET6) {
- entry->saddr = ireq->ir_v6_loc_addr.s6_addr32;
- entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32;
- } else if (req->rsk_ops->family == AF_INET) {
- ipv6_addr_set_v4mapped(ireq->ir_loc_addr,
- &entry->saddr_storage);
- ipv6_addr_set_v4mapped(ireq->ir_rmt_addr,
- &entry->daddr_storage);
- entry->saddr = entry->saddr_storage.s6_addr32;
- entry->daddr = entry->daddr_storage.s6_addr32;
- }
- } else
-#endif
- {
- entry->saddr = &ireq->ir_loc_addr;
- entry->daddr = &ireq->ir_rmt_addr;
- }
-}
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
+ offsetof(struct sock, sk_v6_rcv_saddr));
-static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
- struct request_sock *req,
- struct user_namespace *user_ns,
- u32 portid, u32 seq,
- const struct nlmsghdr *unlh)
-{
- const struct inet_request_sock *ireq = inet_rsk(req);
- struct inet_sock *inet = inet_sk(sk);
- struct inet_diag_msg *r;
- struct nlmsghdr *nlh;
- long tmo;
-
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
- NLM_F_MULTI);
- if (!nlh)
- return -EMSGSIZE;
-
- r = nlmsg_data(nlh);
- r->idiag_family = sk->sk_family;
- r->idiag_state = TCP_SYN_RECV;
- r->idiag_timer = 1;
- r->idiag_retrans = req->num_retrans;
-
- r->id.idiag_if = sk->sk_bound_dev_if;
- sock_diag_save_cookie(req, r->id.idiag_cookie);
-
- tmo = req->expires - jiffies;
- if (tmo < 0)
- tmo = 0;
-
- r->id.idiag_sport = inet->inet_sport;
- r->id.idiag_dport = ireq->ir_rmt_port;
-
- memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
- memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
-
- r->id.idiag_src[0] = ireq->ir_loc_addr;
- r->id.idiag_dst[0] = ireq->ir_rmt_addr;
-
- r->idiag_expires = jiffies_to_msecs(tmo);
- r->idiag_rqueue = 0;
- r->idiag_wqueue = 0;
- r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
- r->idiag_inode = 0;
-#if IS_ENABLED(CONFIG_IPV6)
- if (r->idiag_family == AF_INET6) {
- struct inet_diag_entry entry;
- inet_diag_req_addrs(sk, req, &entry);
- memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
- memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
- }
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
+ offsetof(struct sock, sk_v6_daddr));
#endif
-
- nlmsg_end(skb, nlh);
- return 0;
}
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
struct netlink_callback *cb,
- struct inet_diag_req_v2 *r,
+ const struct inet_diag_req_v2 *r,
const struct nlattr *bc)
{
- struct inet_diag_entry entry;
struct inet_connection_sock *icsk = inet_csk(sk);
- struct listen_sock *lopt;
struct inet_sock *inet = inet_sk(sk);
- int j, s_j;
- int reqnum, s_reqnum;
+ struct inet_diag_entry entry;
+ int j, s_j, reqnum, s_reqnum;
+ struct listen_sock *lopt;
int err = 0;
s_j = cb->args[3];
@@ -797,13 +746,13 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
entry.family = sk->sk_family;
- read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+ spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
lopt = icsk->icsk_accept_queue.listen_opt;
- if (!lopt || !lopt->qlen)
+ if (!lopt || !listen_sock_qlen(lopt))
goto out;
- if (bc != NULL) {
+ if (bc) {
entry.sport = inet->inet_num;
entry.userlocks = sk->sk_userlocks;
}
@@ -822,17 +771,18 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
continue;
if (bc) {
- inet_diag_req_addrs(sk, req, &entry);
+ /* Note: entry.sport and entry.userlocks are already set */
+ entry_fill_addrs(&entry, req_to_sk(req));
entry.dport = ntohs(ireq->ir_rmt_port);
if (!inet_diag_bc_run(bc, &entry))
continue;
}
- err = inet_diag_fill_req(skb, sk, req,
- sk_user_ns(NETLINK_CB(cb->skb).sk),
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, cb->nlh);
+ err = inet_req_diag_fill(req_to_sk(req), skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, cb->nlh);
if (err < 0) {
cb->args[3] = j + 1;
cb->args[4] = reqnum;
@@ -844,17 +794,17 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
}
out:
- read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+ spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
return err;
}
void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
- struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc)
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r, struct nlattr *bc)
{
- int i, num;
- int s_i, s_num;
struct net *net = sock_net(skb->sk);
+ int i, num, s_i, s_num;
s_i = cb->args[1];
s_num = num = cb->args[2];
@@ -864,9 +814,9 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
goto skip_listen_ht;
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
- struct sock *sk;
- struct hlist_nulls_node *node;
struct inet_listen_hashbucket *ilb;
+ struct hlist_nulls_node *node;
+ struct sock *sk;
num = 0;
ilb = &hashinfo->listening_hash[i];
@@ -883,7 +833,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
}
if (r->sdiag_family != AF_UNSPEC &&
- sk->sk_family != r->sdiag_family)
+ sk->sk_family != r->sdiag_family)
goto next_listen;
if (r->id.idiag_sport != inet->inet_sport &&
@@ -931,8 +881,8 @@ skip_listen_ht:
for (i = s_i; i <= hashinfo->ehash_mask; i++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
- struct sock *sk;
struct hlist_nulls_node *node;
+ struct sock *sk;
num = 0;
@@ -944,8 +894,7 @@ skip_listen_ht:
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &head->chain) {
- int res;
- int state;
+ int state, res;
if (!net_eq(sock_net(sk), net))
continue;
@@ -964,10 +913,16 @@ skip_listen_ht:
if (r->id.idiag_dport != sk->sk_dport &&
r->id.idiag_dport)
goto next_normal;
- if (sk->sk_state == TCP_TIME_WAIT)
- res = inet_twsk_diag_dump(sk, skb, cb, r, bc);
- else
- res = inet_csk_diag_dump(sk, skb, cb, r, bc);
+ twsk_build_assert();
+
+ if (!inet_diag_bc_sk(bc, sk))
+ goto next_normal;
+
+ res = sk_diag_fill(sk, skb, r,
+ sk_user_ns(NETLINK_CB(cb->skb).sk),
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ cb->nlh);
if (res < 0) {
spin_unlock_bh(lock);
goto done;
@@ -988,7 +943,8 @@ out:
EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc)
{
const struct inet_diag_handler *handler;
int err = 0;
@@ -1005,8 +961,8 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct nlattr *bc = NULL;
int hdrlen = sizeof(struct inet_diag_req_v2);
+ struct nlattr *bc = NULL;
if (nlmsg_attrlen(cb->nlh, hdrlen))
bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
@@ -1014,7 +970,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
}
-static inline int inet_diag_type2proto(int type)
+static int inet_diag_type2proto(int type)
{
switch (type) {
case TCPDIAG_GETSOCK:
@@ -1026,12 +982,13 @@ static inline int inet_diag_type2proto(int type)
}
}
-static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
+static int inet_diag_dump_compat(struct sk_buff *skb,
+ struct netlink_callback *cb)
{
struct inet_diag_req *rc = nlmsg_data(cb->nlh);
+ int hdrlen = sizeof(struct inet_diag_req);
struct inet_diag_req_v2 req;
struct nlattr *bc = NULL;
- int hdrlen = sizeof(struct inet_diag_req);
req.sdiag_family = AF_UNSPEC; /* compatibility */
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
@@ -1046,7 +1003,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *c
}
static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh)
+ const struct nlmsghdr *nlh)
{
struct inet_diag_req *rc = nlmsg_data(nlh);
struct inet_diag_req_v2 req;
@@ -1075,7 +1032,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
attr = nlmsg_find_attr(nlh, hdrlen,
INET_DIAG_REQ_BYTECODE);
- if (attr == NULL ||
+ if (!attr ||
nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
return -EINVAL;
@@ -1102,9 +1059,10 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
if (h->nlmsg_flags & NLM_F_DUMP) {
if (nlmsg_attrlen(h, hdrlen)) {
struct nlattr *attr;
+
attr = nlmsg_find_attr(h, hdrlen,
INET_DIAG_REQ_BYTECODE);
- if (attr == NULL ||
+ if (!attr ||
nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
return -EINVAL;
@@ -1140,7 +1098,7 @@ int inet_diag_register(const struct inet_diag_handler *h)
mutex_lock(&inet_diag_table_mutex);
err = -EEXIST;
- if (inet_diag_table[type] == NULL) {
+ if (!inet_diag_table[type]) {
inet_diag_table[type] = h;
err = 0;
}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index e7920352646a..5e346a082e5f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -385,7 +385,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
}
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
- if (q == NULL)
+ if (!q)
return NULL;
q->net = nf;
@@ -406,7 +406,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
struct inet_frag_queue *q;
q = inet_frag_alloc(nf, f, arg);
- if (q == NULL)
+ if (!q)
return NULL;
return inet_frag_intern(nf, q, f, arg);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 9111a4e22155..c6fb80bd5826 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -24,9 +24,9 @@
#include <net/secure_seq.h>
#include <net/ip.h>
-static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
- const __u16 lport, const __be32 faddr,
- const __be16 fport)
+static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
+ const __u16 lport, const __be32 faddr,
+ const __be16 fport)
{
static u32 inet_ehash_secret __read_mostly;
@@ -36,17 +36,21 @@ static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
inet_ehash_secret + net_hash_mix(net));
}
-
-static unsigned int inet_sk_ehashfn(const struct sock *sk)
+/* This function handles inet_sock, but also timewait and request sockets
+ * for IPv4/IPv6.
+ */
+u32 sk_ehashfn(const struct sock *sk)
{
- const struct inet_sock *inet = inet_sk(sk);
- const __be32 laddr = inet->inet_rcv_saddr;
- const __u16 lport = inet->inet_num;
- const __be32 faddr = inet->inet_daddr;
- const __be16 fport = inet->inet_dport;
- struct net *net = sock_net(sk);
-
- return inet_ehashfn(net, laddr, lport, faddr, fport);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6 &&
+ !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+ return inet6_ehashfn(sock_net(sk),
+ &sk->sk_v6_rcv_saddr, sk->sk_num,
+ &sk->sk_v6_daddr, sk->sk_dport);
+#endif
+ return inet_ehashfn(sock_net(sk),
+ sk->sk_rcv_saddr, sk->sk_num,
+ sk->sk_daddr, sk->sk_dport);
}
/*
@@ -60,8 +64,8 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
{
struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
- if (tb != NULL) {
- write_pnet(&tb->ib_net, hold_net(net));
+ if (tb) {
+ write_pnet(&tb->ib_net, net);
tb->port = snum;
tb->fastreuse = 0;
tb->fastreuseport = 0;
@@ -79,7 +83,6 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
{
if (hlist_empty(&tb->owners)) {
__hlist_del(&tb->node);
- release_net(ib_net(tb));
kmem_cache_free(cachep, tb);
}
}
@@ -263,11 +266,19 @@ void sock_gen_put(struct sock *sk)
if (sk->sk_state == TCP_TIME_WAIT)
inet_twsk_free(inet_twsk(sk));
+ else if (sk->sk_state == TCP_NEW_SYN_RECV)
+ reqsk_free(inet_reqsk(sk));
else
sk_free(sk);
}
EXPORT_SYMBOL_GPL(sock_gen_put);
+void sock_edemux(struct sk_buff *skb)
+{
+ sock_gen_put(skb->sk);
+}
+EXPORT_SYMBOL(sock_edemux);
+
struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
@@ -377,7 +388,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
*twp = tw;
} else if (tw) {
/* Silly. Should hash-dance instead... */
- inet_twsk_deschedule(tw, death_row);
+ inet_twsk_deschedule(tw);
inet_twsk_put(tw);
}
@@ -400,13 +411,13 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct hlist_nulls_head *list;
- spinlock_t *lock;
struct inet_ehash_bucket *head;
+ spinlock_t *lock;
int twrefcnt = 0;
WARN_ON(!sk_unhashed(sk));
- sk->sk_hash = inet_sk_ehashfn(sk);
+ sk->sk_hash = sk_ehashfn(sk);
head = inet_ehash_bucket(hashinfo, sk->sk_hash);
list = &head->chain;
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
@@ -423,15 +434,13 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
-static void __inet_hash(struct sock *sk)
+int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_listen_hashbucket *ilb;
- if (sk->sk_state != TCP_LISTEN) {
- __inet_hash_nolisten(sk, NULL);
- return;
- }
+ if (sk->sk_state != TCP_LISTEN)
+ return __inet_hash_nolisten(sk, tw);
WARN_ON(!sk_unhashed(sk));
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -440,13 +449,15 @@ static void __inet_hash(struct sock *sk)
__sk_nulls_add_node_rcu(sk, &ilb->head);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
spin_unlock(&ilb->lock);
+ return 0;
}
+EXPORT_SYMBOL(__inet_hash);
void inet_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
- __inet_hash(sk);
+ __inet_hash(sk, NULL);
local_bh_enable();
}
}
@@ -477,8 +488,7 @@ EXPORT_SYMBOL_GPL(inet_unhash);
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
- struct sock *, __u16, struct inet_timewait_sock **),
- int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
+ struct sock *, __u16, struct inet_timewait_sock **))
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
const unsigned short snum = inet_sk(sk)->inet_num;
@@ -548,14 +558,14 @@ ok:
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
- twrefcnt += hash(sk, tw);
+ twrefcnt += __inet_hash_nolisten(sk, tw);
}
if (tw)
twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
spin_unlock(&head->lock);
if (tw) {
- inet_twsk_deschedule(tw, death_row);
+ inet_twsk_deschedule(tw);
while (twrefcnt) {
twrefcnt--;
inet_twsk_put(tw);
@@ -570,7 +580,7 @@ ok:
tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
- hash(sk, NULL);
+ __inet_hash_nolisten(sk, NULL);
spin_unlock_bh(&head->lock);
return 0;
} else {
@@ -590,7 +600,7 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
- __inet_check_established, __inet_hash_nolisten);
+ __inet_check_established);
}
EXPORT_SYMBOL_GPL(inet_hash_connect);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 6d592f8555fb..00ec8d5d7e7e 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -67,9 +67,9 @@ int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
}
/* Must be called with locally disabled BHs. */
-static void __inet_twsk_kill(struct inet_timewait_sock *tw,
- struct inet_hashinfo *hashinfo)
+static void inet_twsk_kill(struct inet_timewait_sock *tw)
{
+ struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
struct inet_bind_hashbucket *bhead;
int refcnt;
/* Unlink from established hashes. */
@@ -89,6 +89,8 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
atomic_sub(refcnt, &tw->tw_refcnt);
+ atomic_dec(&tw->tw_dr->tw_count);
+ inet_twsk_put(tw);
}
void inet_twsk_free(struct inet_timewait_sock *tw)
@@ -98,7 +100,6 @@ void inet_twsk_free(struct inet_timewait_sock *tw)
#ifdef SOCK_REFCNT_DEBUG
pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
#endif
- release_net(twsk_net(tw));
kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
module_put(owner);
}
@@ -169,16 +170,34 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
}
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
-struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
+void tw_timer_handler(unsigned long data)
{
- struct inet_timewait_sock *tw =
- kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
- GFP_ATOMIC);
- if (tw != NULL) {
+ struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
+
+ if (tw->tw_kill)
+ NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
+ else
+ NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
+ inet_twsk_kill(tw);
+}
+
+struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ struct inet_timewait_death_row *dr,
+ const int state)
+{
+ struct inet_timewait_sock *tw;
+
+ if (atomic_read(&dr->tw_count) >= dr->sysctl_max_tw_buckets)
+ return NULL;
+
+ tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
+ GFP_ATOMIC);
+ if (tw) {
const struct inet_sock *inet = inet_sk(sk);
kmemcheck_annotate_bitfield(tw, flags);
+ tw->tw_dr = dr;
/* Give us an identity. */
tw->tw_daddr = inet->inet_daddr;
tw->tw_rcv_saddr = inet->inet_rcv_saddr;
@@ -195,14 +214,16 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
tw->tw_ipv6only = 0;
tw->tw_transparent = inet->transparent;
tw->tw_prot = sk->sk_prot_creator;
- twsk_net_set(tw, hold_net(sock_net(sk)));
+ atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
+ twsk_net_set(tw, sock_net(sk));
+ setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw);
/*
* Because we use RCU lookups, we should not set tw_refcnt
* to a non null value before everything is setup for this
* timewait socket.
*/
atomic_set(&tw->tw_refcnt, 0);
- inet_twsk_dead_node_init(tw);
+
__module_get(tw->tw_prot->owner);
}
@@ -210,139 +231,20 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
}
EXPORT_SYMBOL_GPL(inet_twsk_alloc);
-/* Returns non-zero if quota exceeded. */
-static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
- const int slot)
-{
- struct inet_timewait_sock *tw;
- unsigned int killed;
- int ret;
-
- /* NOTE: compare this to previous version where lock
- * was released after detaching chain. It was racy,
- * because tw buckets are scheduled in not serialized context
- * in 2.3 (with netfilter), and with softnet it is common, because
- * soft irqs are not sequenced.
- */
- killed = 0;
- ret = 0;
-rescan:
- inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) {
- __inet_twsk_del_dead_node(tw);
- spin_unlock(&twdr->death_lock);
- __inet_twsk_kill(tw, twdr->hashinfo);
-#ifdef CONFIG_NET_NS
- NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
-#endif
- inet_twsk_put(tw);
- killed++;
- spin_lock(&twdr->death_lock);
- if (killed > INET_TWDR_TWKILL_QUOTA) {
- ret = 1;
- break;
- }
-
- /* While we dropped twdr->death_lock, another cpu may have
- * killed off the next TW bucket in the list, therefore
- * do a fresh re-read of the hlist head node with the
- * lock reacquired. We still use the hlist traversal
- * macro in order to get the prefetches.
- */
- goto rescan;
- }
-
- twdr->tw_count -= killed;
-#ifndef CONFIG_NET_NS
- NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
-#endif
- return ret;
-}
-
-void inet_twdr_hangman(unsigned long data)
-{
- struct inet_timewait_death_row *twdr;
- unsigned int need_timer;
-
- twdr = (struct inet_timewait_death_row *)data;
- spin_lock(&twdr->death_lock);
-
- if (twdr->tw_count == 0)
- goto out;
-
- need_timer = 0;
- if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
- twdr->thread_slots |= (1 << twdr->slot);
- schedule_work(&twdr->twkill_work);
- need_timer = 1;
- } else {
- /* We purged the entire slot, anything left? */
- if (twdr->tw_count)
- need_timer = 1;
- twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
- }
- if (need_timer)
- mod_timer(&twdr->tw_timer, jiffies + twdr->period);
-out:
- spin_unlock(&twdr->death_lock);
-}
-EXPORT_SYMBOL_GPL(inet_twdr_hangman);
-
-void inet_twdr_twkill_work(struct work_struct *work)
-{
- struct inet_timewait_death_row *twdr =
- container_of(work, struct inet_timewait_death_row, twkill_work);
- int i;
-
- BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
- (sizeof(twdr->thread_slots) * 8));
-
- while (twdr->thread_slots) {
- spin_lock_bh(&twdr->death_lock);
- for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
- if (!(twdr->thread_slots & (1 << i)))
- continue;
-
- while (inet_twdr_do_twkill_work(twdr, i) != 0) {
- if (need_resched()) {
- spin_unlock_bh(&twdr->death_lock);
- schedule();
- spin_lock_bh(&twdr->death_lock);
- }
- }
-
- twdr->thread_slots &= ~(1 << i);
- }
- spin_unlock_bh(&twdr->death_lock);
- }
-}
-EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
-
/* These are always called from BH context. See callers in
* tcp_input.c to verify this.
*/
/* This is for handling early-kills of TIME_WAIT sockets. */
-void inet_twsk_deschedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr)
+void inet_twsk_deschedule(struct inet_timewait_sock *tw)
{
- spin_lock(&twdr->death_lock);
- if (inet_twsk_del_dead_node(tw)) {
- inet_twsk_put(tw);
- if (--twdr->tw_count == 0)
- del_timer(&twdr->tw_timer);
- }
- spin_unlock(&twdr->death_lock);
- __inet_twsk_kill(tw, twdr->hashinfo);
+ if (del_timer_sync(&tw->tw_timer))
+ inet_twsk_kill(tw);
}
EXPORT_SYMBOL(inet_twsk_deschedule);
-void inet_twsk_schedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr,
- const int timeo, const int timewait_len)
+void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
{
- struct hlist_head *list;
- int slot;
-
/* timeout := RTO * 3.5
*
* 3.5 = 1+2+0.5 to wait for two retransmits.
@@ -367,115 +269,15 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw,
* is greater than TS tick!) and detect old duplicates with help
* of PAWS.
*/
- slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
- spin_lock(&twdr->death_lock);
-
- /* Unlink it, if it was scheduled */
- if (inet_twsk_del_dead_node(tw))
- twdr->tw_count--;
- else
+ tw->tw_kill = timeo <= 4*HZ;
+ if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) {
atomic_inc(&tw->tw_refcnt);
-
- if (slot >= INET_TWDR_RECYCLE_SLOTS) {
- /* Schedule to slow timer */
- if (timeo >= timewait_len) {
- slot = INET_TWDR_TWKILL_SLOTS - 1;
- } else {
- slot = DIV_ROUND_UP(timeo, twdr->period);
- if (slot >= INET_TWDR_TWKILL_SLOTS)
- slot = INET_TWDR_TWKILL_SLOTS - 1;
- }
- tw->tw_ttd = inet_tw_time_stamp() + timeo;
- slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
- list = &twdr->cells[slot];
- } else {
- tw->tw_ttd = inet_tw_time_stamp() + (slot << INET_TWDR_RECYCLE_TICK);
-
- if (twdr->twcal_hand < 0) {
- twdr->twcal_hand = 0;
- twdr->twcal_jiffie = jiffies;
- twdr->twcal_timer.expires = twdr->twcal_jiffie +
- (slot << INET_TWDR_RECYCLE_TICK);
- add_timer(&twdr->twcal_timer);
- } else {
- if (time_after(twdr->twcal_timer.expires,
- jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
- mod_timer(&twdr->twcal_timer,
- jiffies + (slot << INET_TWDR_RECYCLE_TICK));
- slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
- }
- list = &twdr->twcal_row[slot];
+ atomic_inc(&tw->tw_dr->tw_count);
}
-
- hlist_add_head(&tw->tw_death_node, list);
-
- if (twdr->tw_count++ == 0)
- mod_timer(&twdr->tw_timer, jiffies + twdr->period);
- spin_unlock(&twdr->death_lock);
}
EXPORT_SYMBOL_GPL(inet_twsk_schedule);
-void inet_twdr_twcal_tick(unsigned long data)
-{
- struct inet_timewait_death_row *twdr;
- int n, slot;
- unsigned long j;
- unsigned long now = jiffies;
- int killed = 0;
- int adv = 0;
-
- twdr = (struct inet_timewait_death_row *)data;
-
- spin_lock(&twdr->death_lock);
- if (twdr->twcal_hand < 0)
- goto out;
-
- slot = twdr->twcal_hand;
- j = twdr->twcal_jiffie;
-
- for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
- if (time_before_eq(j, now)) {
- struct hlist_node *safe;
- struct inet_timewait_sock *tw;
-
- inet_twsk_for_each_inmate_safe(tw, safe,
- &twdr->twcal_row[slot]) {
- __inet_twsk_del_dead_node(tw);
- __inet_twsk_kill(tw, twdr->hashinfo);
-#ifdef CONFIG_NET_NS
- NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
-#endif
- inet_twsk_put(tw);
- killed++;
- }
- } else {
- if (!adv) {
- adv = 1;
- twdr->twcal_jiffie = j;
- twdr->twcal_hand = slot;
- }
-
- if (!hlist_empty(&twdr->twcal_row[slot])) {
- mod_timer(&twdr->twcal_timer, j);
- goto out;
- }
- }
- j += 1 << INET_TWDR_RECYCLE_TICK;
- slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
- }
- twdr->twcal_hand = -1;
-
-out:
- if ((twdr->tw_count -= killed) == 0)
- del_timer(&twdr->tw_timer);
-#ifndef CONFIG_NET_NS
- NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
-#endif
- spin_unlock(&twdr->death_lock);
-}
-EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
-
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family)
{
@@ -487,6 +289,7 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo,
for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
restart_rcu:
+ cond_resched();
rcu_read_lock();
restart:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
@@ -508,7 +311,7 @@ restart:
rcu_read_unlock();
local_bh_disable();
- inet_twsk_deschedule(tw, twdr);
+ inet_twsk_deschedule(tw);
local_bh_enable();
inet_twsk_put(tw);
goto restart_rcu;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index d9bc28ac5d1b..3674484946a5 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -57,7 +57,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
}
-static int ip_forward_finish(struct sk_buff *skb)
+static int ip_forward_finish(struct sock *sk, struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
@@ -68,7 +68,7 @@ static int ip_forward_finish(struct sk_buff *skb)
ip_forward_options(skb);
skb_sender_cpu_clear(skb);
- return dst_output(skb);
+ return dst_output_sk(sk, skb);
}
int ip_forward(struct sk_buff *skb)
@@ -82,6 +82,9 @@ int ip_forward(struct sk_buff *skb)
if (skb->pkt_type != PACKET_HOST)
goto drop;
+ if (unlikely(skb->sk))
+ goto drop;
+
if (skb_warn_if_lro(skb))
goto drop;
@@ -136,8 +139,8 @@ int ip_forward(struct sk_buff *skb)
skb->priority = rt_tos2priority(iph->tos);
- return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
- rt->dst.dev, ip_forward_finish);
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb,
+ skb->dev, rt->dst.dev, ip_forward_finish);
sr_failed:
/*
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 145a50c4d566..cc1da6d9cb35 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
goto err;
err = -ENOMEM;
- if (pskb_pull(skb, ihl) == NULL)
+ if (!pskb_pull(skb, ihl))
goto err;
err = pskb_trim_rcsum(skb, end - offset);
@@ -537,7 +537,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
qp->q.fragments = head;
}
- WARN_ON(head == NULL);
+ WARN_ON(!head);
WARN_ON(FRAG_CB(head)->offset != 0);
/* Allocate a new buffer for the datagram. */
@@ -559,7 +559,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
struct sk_buff *clone;
int i, plen = 0;
- if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
goto out_nomem;
clone->next = head->next;
head->next = clone;
@@ -638,7 +639,8 @@ int ip_defrag(struct sk_buff *skb, u32 user)
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
/* Lookup (or create) queue header */
- if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
+ qp = ip_find(net, ip_hdr(skb), user);
+ if (qp) {
int ret;
spin_lock(&qp->q.lock);
@@ -754,7 +756,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
table = ip4_frags_ns_ctl_table;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
- if (table == NULL)
+ if (!table)
goto err_alloc;
table[0].data = &net->ipv4.frags.high_thresh;
@@ -770,7 +772,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
}
hdr = register_net_sysctl(net, "net/ipv4", table);
- if (hdr == NULL)
+ if (!hdr)
goto err_reg;
net->ipv4.frags_hdr = hdr;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6207275fc749..5fd706473c73 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -182,7 +182,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
iph->daddr, iph->saddr, tpi->key);
- if (t == NULL)
+ if (!t)
return PACKET_REJECT;
if (t->parms.iph.daddr == 0 ||
@@ -423,7 +423,7 @@ static int ipgre_open(struct net_device *dev)
return -EADDRNOTAVAIL;
dev = rt->dst.dev;
ip_rt_put(rt);
- if (__in_dev_get_rtnl(dev) == NULL)
+ if (!__in_dev_get_rtnl(dev))
return -EADDRNOTAVAIL;
t->mlink = dev->ifindex;
ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
@@ -456,6 +456,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
.ndo_do_ioctl = ipgre_tunnel_ioctl,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip_tunnel_get_iflink,
};
#define GRE_FEATURES (NETIF_F_SG | \
@@ -621,10 +622,10 @@ static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
if (data[IFLA_GRE_LOCAL])
- parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
+ parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
if (data[IFLA_GRE_REMOTE])
- parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
+ parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
if (data[IFLA_GRE_TTL])
parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
@@ -686,6 +687,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip_tunnel_get_iflink,
};
static void ipgre_tap_setup(struct net_device *dev)
@@ -776,8 +778,8 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
- nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
- nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
+ nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
+ nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
nla_put_u8(skb, IFLA_GRE_PMTUDISC,
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 3d4da2c16b6a..2db4c8773c1b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -187,7 +187,7 @@ bool ip_call_ra_chain(struct sk_buff *skb)
return false;
}
-static int ip_local_deliver_finish(struct sk_buff *skb)
+static int ip_local_deliver_finish(struct sock *sk, struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
@@ -203,7 +203,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
raw = raw_local_deliver(skb, protocol);
ipprot = rcu_dereference(inet_protos[protocol]);
- if (ipprot != NULL) {
+ if (ipprot) {
int ret;
if (!ipprot->no_policy) {
@@ -253,7 +253,8 @@ int ip_local_deliver(struct sk_buff *skb)
return 0;
}
- return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, NULL, skb,
+ skb->dev, NULL,
ip_local_deliver_finish);
}
@@ -309,12 +310,12 @@ drop:
int sysctl_ip_early_demux __read_mostly = 1;
EXPORT_SYMBOL(sysctl_ip_early_demux);
-static int ip_rcv_finish(struct sk_buff *skb)
+static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
- if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
+ if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) {
const struct net_protocol *ipprot;
int protocol = iph->protocol;
@@ -387,7 +388,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto out;
}
@@ -450,7 +452,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
- return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL,
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb,
+ dev, NULL,
ip_rcv_finish);
csum_error:
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5b3d91be2db0..bd246792360b 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -264,7 +264,7 @@ int ip_options_compile(struct net *net,
unsigned char *iph;
int optlen, l;
- if (skb != NULL) {
+ if (skb) {
rt = skb_rtable(skb);
optptr = (unsigned char *)&(ip_hdr(skb)[1]);
} else
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a7aea2048a0d..c65b93a7b711 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -91,14 +91,19 @@ void ip_send_check(struct iphdr *iph)
}
EXPORT_SYMBOL(ip_send_check);
-int __ip_local_out(struct sk_buff *skb)
+int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = htons(skb->len);
ip_send_check(iph);
- return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
- skb_dst(skb)->dev, dst_output);
+ return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, NULL,
+ skb_dst(skb)->dev, dst_output_sk);
+}
+
+int __ip_local_out(struct sk_buff *skb)
+{
+ return __ip_local_out_sk(skb->sk, skb);
}
int ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
@@ -148,7 +153,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr;
iph->protocol = sk->sk_protocol;
- ip_select_ident(skb, sk);
+ ip_select_ident(sock_net(sk), skb, sk);
if (opt && opt->opt.optlen) {
iph->ihl += opt->opt.optlen>>2;
@@ -163,7 +168,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
}
EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
-static inline int ip_finish_output2(struct sk_buff *skb)
+static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = (struct rtable *)dst;
@@ -182,7 +187,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
struct sk_buff *skb2;
skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
- if (skb2 == NULL) {
+ if (!skb2) {
kfree_skb(skb);
return -ENOMEM;
}
@@ -211,7 +216,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
return -EINVAL;
}
-static int ip_finish_output_gso(struct sk_buff *skb)
+static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
{
netdev_features_t features;
struct sk_buff *segs;
@@ -220,7 +225,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
/* common case: locally created skb or seglen is <= mtu */
if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
- return ip_finish_output2(skb);
+ return ip_finish_output2(sk, skb);
/* Slowpath - GSO segment length is exceeding the dst MTU.
*
@@ -243,7 +248,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
int err;
segs->next = NULL;
- err = ip_fragment(segs, ip_finish_output2);
+ err = ip_fragment(sk, segs, ip_finish_output2);
if (err && ret == 0)
ret = err;
@@ -253,22 +258,22 @@ static int ip_finish_output_gso(struct sk_buff *skb)
return ret;
}
-static int ip_finish_output(struct sk_buff *skb)
+static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
{
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
- if (skb_dst(skb)->xfrm != NULL) {
+ if (skb_dst(skb)->xfrm) {
IPCB(skb)->flags |= IPSKB_REROUTED;
- return dst_output(skb);
+ return dst_output_sk(sk, skb);
}
#endif
if (skb_is_gso(skb))
- return ip_finish_output_gso(skb);
+ return ip_finish_output_gso(sk, skb);
if (skb->len > ip_skb_dst_mtu(skb))
- return ip_fragment(skb, ip_finish_output2);
+ return ip_fragment(sk, skb, ip_finish_output2);
- return ip_finish_output2(skb);
+ return ip_finish_output2(sk, skb);
}
int ip_mc_output(struct sock *sk, struct sk_buff *skb)
@@ -307,7 +312,7 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb)
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- newskb, NULL, newskb->dev,
+ sk, newskb, NULL, newskb->dev,
dev_loopback_xmit);
}
@@ -322,11 +327,11 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb)
if (rt->rt_flags&RTCF_BROADCAST) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
- NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, newskb,
NULL, newskb->dev, dev_loopback_xmit);
}
- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, NULL,
skb->dev, ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
@@ -340,7 +345,8 @@ int ip_output(struct sock *sk, struct sk_buff *skb)
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
+ NULL, dev,
ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
@@ -376,12 +382,12 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
inet_opt = rcu_dereference(inet->inet_opt);
fl4 = &fl->u.ip4;
rt = skb_rtable(skb);
- if (rt != NULL)
+ if (rt)
goto packet_routed;
/* Make sure we can route this packet. */
rt = (struct rtable *)__sk_dst_check(sk, 0);
- if (rt == NULL) {
+ if (!rt) {
__be32 daddr;
/* Use correct destination address if we have options. */
@@ -430,7 +436,8 @@ packet_routed:
ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
}
- ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
+ ip_select_ident_segs(sock_net(sk), skb, sk,
+ skb_shinfo(skb)->gso_segs ?: 1);
/* TODO : should we use skb->sk here instead of sk ? */
skb->priority = sk->sk_priority;
@@ -448,7 +455,6 @@ no_route:
}
EXPORT_SYMBOL(ip_queue_xmit);
-
static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
@@ -479,7 +485,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
* single device frame, and queue such a frame for sending.
*/
-int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+int ip_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *))
{
struct iphdr *iph;
int ptr;
@@ -586,13 +593,13 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
ip_options_fragment(frag);
offset += skb->len - hlen;
iph->frag_off = htons(offset>>3);
- if (frag->next != NULL)
+ if (frag->next)
iph->frag_off |= htons(IP_MF);
/* Ready, complete checksum */
ip_send_check(iph);
}
- err = output(skb);
+ err = output(sk, skb);
if (!err)
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
@@ -636,10 +643,7 @@ slow_path:
left = skb->len - hlen; /* Space per frame */
ptr = hlen; /* Where to start from */
- /* for bridged IP traffic encapsulated inside f.e. a vlan header,
- * we need to make room for the encapsulating header
- */
- ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
+ ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
/*
* Fragment the datagram.
@@ -732,7 +736,7 @@ slow_path:
ip_send_check(iph);
- err = output(skb2);
+ err = output(sk, skb2);
if (err)
goto fail;
@@ -792,12 +796,13 @@ static inline int ip_ufo_append_data(struct sock *sk,
* device, so create one single skb packet containing complete
* udp datagram
*/
- if ((skb = skb_peek_tail(queue)) == NULL) {
+ skb = skb_peek_tail(queue);
+ if (!skb) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
- if (skb == NULL)
+ if (!skb)
return err;
/* reserve space for Hardware header */
@@ -814,7 +819,6 @@ static inline int ip_ufo_append_data(struct sock *sk,
skb->csum = 0;
-
__skb_queue_tail(queue, skb);
} else if (skb_is_gso(skb)) {
goto append;
@@ -963,10 +967,10 @@ alloc_new_skb:
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
sk->sk_allocation);
- if (unlikely(skb == NULL))
+ if (unlikely(!skb))
err = -ENOBUFS;
}
- if (skb == NULL)
+ if (!skb)
goto error;
/*
@@ -1090,10 +1094,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
*/
opt = ipc->opt;
if (opt) {
- if (cork->opt == NULL) {
+ if (!cork->opt) {
cork->opt = kmalloc(sizeof(struct ip_options) + 40,
sk->sk_allocation);
- if (unlikely(cork->opt == NULL))
+ if (unlikely(!cork->opt))
return -ENOBUFS;
}
memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
@@ -1200,7 +1204,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
return -EMSGSIZE;
}
- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+ skb = skb_peek_tail(&sk->sk_write_queue);
+ if (!skb)
return -EINVAL;
cork->length += size;
@@ -1211,7 +1216,6 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
}
-
while (size > 0) {
int i;
@@ -1331,7 +1335,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
__be16 df = 0;
__u8 ttl;
- if ((skb = __skb_dequeue(queue)) == NULL)
+ skb = __skb_dequeue(queue);
+ if (!skb)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
@@ -1382,7 +1387,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
ip_copy_addrs(iph, fl4);
- ip_select_ident(skb, sk);
+ ip_select_ident(net, skb, sk);
if (opt) {
iph->ihl += opt->optlen>>2;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5cd99271d3a6..7cfb0893f263 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -351,7 +351,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
return 0;
}
}
- if (new_ra == NULL) {
+ if (!new_ra) {
spin_unlock_bh(&ip_ra_lock);
return -ENOBUFS;
}
@@ -387,7 +387,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
skb_network_header(skb);
serr->port = port;
- if (skb_pull(skb, payload - skb->data) != NULL) {
+ if (skb_pull(skb, payload - skb->data)) {
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb) == 0)
return;
@@ -482,7 +482,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
err = -EAGAIN;
skb = sock_dequeue_err_skb(sk);
- if (skb == NULL)
+ if (!skb)
goto out;
copied = skb->len;
@@ -536,12 +536,34 @@ out:
* Socket option code for IP. This is the end of the line after any
* TCP,UDP etc options on an IP socket.
*/
+static bool setsockopt_needs_rtnl(int optname)
+{
+ switch (optname) {
+ case IP_ADD_MEMBERSHIP:
+ case IP_ADD_SOURCE_MEMBERSHIP:
+ case IP_BLOCK_SOURCE:
+ case IP_DROP_MEMBERSHIP:
+ case IP_DROP_SOURCE_MEMBERSHIP:
+ case IP_MSFILTER:
+ case IP_UNBLOCK_SOURCE:
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_MSFILTER:
+ case MCAST_JOIN_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ case MCAST_LEAVE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ case MCAST_UNBLOCK_SOURCE:
+ return true;
+ }
+ return false;
+}
static int do_ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
struct inet_sock *inet = inet_sk(sk);
int val = 0, err;
+ bool needs_rtnl = setsockopt_needs_rtnl(optname);
switch (optname) {
case IP_PKTINFO:
@@ -584,6 +606,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
return ip_mroute_setsockopt(sk, optname, optval, optlen);
err = 0;
+ if (needs_rtnl)
+ rtnl_lock();
lock_sock(sk);
switch (optname) {
@@ -1118,10 +1142,14 @@ mc_msf_out:
break;
}
release_sock(sk);
+ if (needs_rtnl)
+ rtnl_unlock();
return err;
e_inval:
release_sock(sk);
+ if (needs_rtnl)
+ rtnl_unlock();
return -EINVAL;
}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 2cd08280c77b..4c2c3ba4ba65 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -389,7 +389,6 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
hlen = tdev->hard_header_len + tdev->needed_headroom;
mtu = tdev->mtu;
}
- dev->iflink = tunnel->parms.link;
dev->needed_headroom = t_hlen + hlen;
mtu -= (dev->hard_header_len + t_hlen);
@@ -655,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (dst == 0) {
/* NBMA tunnel */
- if (skb_dst(skb) == NULL) {
+ if (!skb_dst(skb)) {
dev->stats.tx_fifo_errors++;
goto tx_error;
}
@@ -673,7 +672,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
neigh = dst_neigh_lookup(skb_dst(skb),
&ipv6_hdr(skb)->daddr);
- if (neigh == NULL)
+ if (!neigh)
goto tx_error;
addr6 = (const struct in6_addr *)&neigh->primary_key;
@@ -783,7 +782,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
return;
}
- err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol,
+ err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol,
tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
@@ -844,7 +843,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
case SIOCGETTUNNEL:
if (dev == itn->fb_tunnel_dev) {
t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
- if (t == NULL)
+ if (!t)
t = netdev_priv(dev);
}
memcpy(p, &t->parms, sizeof(*p));
@@ -877,7 +876,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
break;
}
if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
- if (t != NULL) {
+ if (t) {
if (t->dev != dev) {
err = -EEXIST;
break;
@@ -915,7 +914,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
if (dev == itn->fb_tunnel_dev) {
err = -ENOENT;
t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
- if (t == NULL)
+ if (!t)
goto done;
err = -EPERM;
if (t == netdev_priv(itn->fb_tunnel_dev))
@@ -980,6 +979,14 @@ struct net *ip_tunnel_get_link_net(const struct net_device *dev)
}
EXPORT_SYMBOL(ip_tunnel_get_link_net);
+int ip_tunnel_get_iflink(const struct net_device *dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
+ return tunnel->parms.link;
+}
+EXPORT_SYMBOL(ip_tunnel_get_iflink);
+
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname)
{
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 88c386cf7d85..ce63ab21b6cd 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -74,7 +74,8 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
iph->daddr = dst;
iph->saddr = src;
iph->ttl = ttl;
- __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
+ __ip_select_ident(dev_net(rt->dst.dev), iph,
+ skb_shinfo(skb)->gso_segs ?: 1);
err = ip_local_out_sk(sk, skb);
if (unlikely(net_xmit_eval(err)))
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 94efe148181c..9f7269f3c54a 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -60,7 +60,7 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0);
- if (tunnel != NULL) {
+ if (tunnel) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
@@ -341,6 +341,7 @@ static const struct net_device_ops vti_netdev_ops = {
.ndo_do_ioctl = vti_tunnel_ioctl,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip_tunnel_get_iflink,
};
static void vti_tunnel_setup(struct net_device *dev)
@@ -361,7 +362,6 @@ static int vti_tunnel_init(struct net_device *dev)
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
dev->mtu = ETH_DATA_LEN;
dev->flags = IFF_NOARP;
- dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
@@ -456,10 +456,10 @@ static void vti_netlink_parms(struct nlattr *data[],
parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
if (data[IFLA_VTI_LOCAL])
- parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
+ parms->iph.saddr = nla_get_in_addr(data[IFLA_VTI_LOCAL]);
if (data[IFLA_VTI_REMOTE])
- parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
+ parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]);
}
@@ -505,8 +505,8 @@ static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_VTI_LINK, p->link);
nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
- nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
- nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
+ nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr);
+ nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr);
return 0;
}
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index c0855d50a3fa..d97f4f2787f5 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -63,7 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
struct xfrm_state *t;
t = xfrm_state_alloc(net);
- if (t == NULL)
+ if (!t)
goto out;
t->id.proto = IPPROTO_IPIP;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index b26376ef87f6..8e7328c6a390 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -504,7 +504,8 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (!net_eq(dev_net(dev), &init_net))
goto drop;
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
return NET_RX_DROP;
if (!pskb_may_pull(skb, sizeof(struct arphdr)))
@@ -958,7 +959,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
return NET_RX_DROP;
if (!pskb_may_pull(skb,
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 915d215a7d14..ff96396ebec5 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -144,7 +144,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
err = -ENOENT;
t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->daddr, iph->saddr, 0);
- if (t == NULL)
+ if (!t)
goto out;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
@@ -272,6 +272,7 @@ static const struct net_device_ops ipip_netdev_ops = {
.ndo_do_ioctl = ipip_tunnel_ioctl,
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip_tunnel_get_iflink,
};
#define IPIP_FEATURES (NETIF_F_SG | \
@@ -286,7 +287,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->type = ARPHRD_TUNNEL;
dev->flags = IFF_NOARP;
- dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
@@ -325,10 +325,10 @@ static void ipip_netlink_parms(struct nlattr *data[],
parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
if (data[IFLA_IPTUN_LOCAL])
- parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
+ parms->iph.saddr = nla_get_in_addr(data[IFLA_IPTUN_LOCAL]);
if (data[IFLA_IPTUN_REMOTE])
- parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
+ parms->iph.daddr = nla_get_in_addr(data[IFLA_IPTUN_REMOTE]);
if (data[IFLA_IPTUN_TTL]) {
parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
@@ -450,8 +450,8 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_parm *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
- nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
- nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
+ nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
+ nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index fe54eba6d00d..3a2c0162c3ba 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -73,9 +73,7 @@
struct mr_table {
struct list_head list;
-#ifdef CONFIG_NET_NS
- struct net *net;
-#endif
+ possible_net_t net;
u32 id;
struct sock __rcu *mroute_sk;
struct timer_list ipmr_expire_timer;
@@ -191,7 +189,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
}
mrt = ipmr_get_table(rule->fr_net, rule->table);
- if (mrt == NULL)
+ if (!mrt)
return -EAGAIN;
res->mrt = mrt;
return 0;
@@ -255,7 +253,7 @@ static int __net_init ipmr_rules_init(struct net *net)
INIT_LIST_HEAD(&net->ipv4.mr_tables);
mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
- if (mrt == NULL) {
+ if (!mrt) {
err = -ENOMEM;
goto err1;
}
@@ -323,11 +321,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
unsigned int i;
mrt = ipmr_get_table(net, id);
- if (mrt != NULL)
+ if (mrt)
return mrt;
mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
- if (mrt == NULL)
+ if (!mrt)
return NULL;
write_pnet(&mrt->net, net);
mrt->id = id;
@@ -429,7 +427,7 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
dev->flags |= IFF_MULTICAST;
in_dev = __in_dev_get_rtnl(dev);
- if (in_dev == NULL)
+ if (!in_dev)
goto failure;
ipv4_devconf_setall(in_dev);
@@ -480,8 +478,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int reg_vif_get_iflink(const struct net_device *dev)
+{
+ return 0;
+}
+
static const struct net_device_ops reg_vif_netdev_ops = {
.ndo_start_xmit = reg_vif_xmit,
+ .ndo_get_iflink = reg_vif_get_iflink,
};
static void reg_vif_setup(struct net_device *dev)
@@ -507,7 +511,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
- if (dev == NULL)
+ if (!dev)
return NULL;
dev_net_set(dev, net);
@@ -516,7 +520,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
free_netdev(dev);
return NULL;
}
- dev->iflink = 0;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
@@ -764,7 +767,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
case 0:
if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
- if (dev && __in_dev_get_rtnl(dev) == NULL) {
+ if (dev && !__in_dev_get_rtnl(dev)) {
dev_put(dev);
return -EADDRNOTAVAIL;
}
@@ -808,7 +811,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
v->pkt_out = 0;
v->link = dev->ifindex;
if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
- v->link = dev->iflink;
+ v->link = dev_get_iflink(dev);
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
@@ -1010,7 +1013,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
rcu_read_lock();
mroute_sk = rcu_dereference(mrt->mroute_sk);
- if (mroute_sk == NULL) {
+ if (!mroute_sk) {
rcu_read_unlock();
kfree_skb(skb);
return -EINVAL;
@@ -1163,7 +1166,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
return -EINVAL;
c = ipmr_cache_alloc();
- if (c == NULL)
+ if (!c)
return -ENOMEM;
c->mfc_origin = mfc->mfcc_origin.s_addr;
@@ -1285,7 +1288,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -EOPNOTSUPP;
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
if (optname != MRT_INIT) {
@@ -1448,7 +1451,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
return -EOPNOTSUPP;
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
if (optname != MRT_VERSION &&
@@ -1494,7 +1497,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
struct mr_table *mrt;
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
switch (cmd) {
@@ -1568,7 +1571,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
struct mr_table *mrt;
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
switch (cmd) {
@@ -1649,7 +1652,8 @@ static struct notifier_block ip_mr_notifier = {
* important for multicast video.
*/
-static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
+static void ip_encap(struct net *net, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr)
{
struct iphdr *iph;
const struct iphdr *old_iph = ip_hdr(skb);
@@ -1668,14 +1672,14 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
iph->tot_len = htons(skb->len);
- ip_select_ident(skb, NULL);
+ ip_select_ident(net, skb, NULL);
ip_send_check(iph);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
nf_reset(skb);
}
-static inline int ipmr_forward_finish(struct sk_buff *skb)
+static inline int ipmr_forward_finish(struct sock *sk, struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
@@ -1685,7 +1689,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
if (unlikely(opt->optlen))
ip_forward_options(skb);
- return dst_output(skb);
+ return dst_output_sk(sk, skb);
}
/*
@@ -1702,7 +1706,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
struct flowi4 fl4;
int encap = 0;
- if (vif->dev == NULL)
+ if (!vif->dev)
goto out_free;
#ifdef CONFIG_IP_PIMSM
@@ -1765,7 +1769,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
* What do we do with netfilter? -- RR
*/
if (vif->flags & VIFF_TUNNEL) {
- ip_encap(skb, vif->local, vif->remote);
+ ip_encap(net, skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */
vif->dev->stats.tx_packets++;
vif->dev->stats.tx_bytes += skb->len;
@@ -1784,7 +1788,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
* not mrouter) cannot join to more than one interface - it will
* result in receiving multiple packets.
*/
- NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb,
+ skb->dev, dev,
ipmr_forward_finish);
return;
@@ -1993,7 +1998,7 @@ int ip_mr_input(struct sk_buff *skb)
/* already under rcu_read_lock() */
cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
- if (cache == NULL) {
+ if (!cache) {
int vif = ipmr_find_vif(mrt, skb->dev);
if (vif >= 0)
@@ -2004,13 +2009,13 @@ int ip_mr_input(struct sk_buff *skb)
/*
* No usable cache entry
*/
- if (cache == NULL) {
+ if (!cache) {
int vif;
if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
ip_local_deliver(skb);
- if (skb2 == NULL)
+ if (!skb2)
return -ENOBUFS;
skb = skb2;
}
@@ -2069,7 +2074,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
read_unlock(&mrt_lock);
- if (reg_dev == NULL)
+ if (!reg_dev)
return 1;
skb->mac_header = skb->network_header;
@@ -2199,18 +2204,18 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
int err;
mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
rcu_read_lock();
cache = ipmr_cache_find(mrt, saddr, daddr);
- if (cache == NULL && skb->dev) {
+ if (!cache && skb->dev) {
int vif = ipmr_find_vif(mrt, skb->dev);
if (vif >= 0)
cache = ipmr_cache_find_any(mrt, daddr, vif);
}
- if (cache == NULL) {
+ if (!cache) {
struct sk_buff *skb2;
struct iphdr *iph;
struct net_device *dev;
@@ -2268,7 +2273,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
int err;
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
@@ -2287,8 +2292,8 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
rtm->rtm_protocol = RTPROT_MROUTED;
rtm->rtm_flags = 0;
- if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
- nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
+ if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
+ nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
goto nla_put_failure;
err = __ipmr_fill_mroute(mrt, skb, c, rtm);
/* do not break the dump if cache is unresolved */
@@ -2333,7 +2338,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
@@ -2448,7 +2453,7 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
struct mr_table *mrt;
mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
- if (mrt == NULL)
+ if (!mrt)
return ERR_PTR(-ENOENT);
iter->mrt = mrt;
@@ -2567,7 +2572,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
struct mr_table *mrt;
mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
- if (mrt == NULL)
+ if (!mrt)
return ERR_PTR(-ENOENT);
it->mrt = mrt;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 7ebd6e37875c..65de0684e22a 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -94,7 +94,7 @@ static void nf_ip_saveroute(const struct sk_buff *skb,
{
struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
- if (entry->hook == NF_INET_LOCAL_OUT) {
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
rt_info->tos = iph->tos;
@@ -109,7 +109,7 @@ static int nf_ip_reroute(struct sk_buff *skb,
{
const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
- if (entry->hook == NF_INET_LOCAL_OUT) {
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
if (!(iph->tos == rt_info->tos &&
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 59f883d9cadf..fb20f363151f 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -36,24 +36,16 @@ config NF_CONNTRACK_PROC_COMPAT
If unsure, say Y.
-config NF_LOG_ARP
- tristate "ARP packet logging"
- default m if NETFILTER_ADVANCED=n
- select NF_LOG_COMMON
-
-config NF_LOG_IPV4
- tristate "IPv4 packet logging"
- default m if NETFILTER_ADVANCED=n
- select NF_LOG_COMMON
+if NF_TABLES
config NF_TABLES_IPV4
- depends on NF_TABLES
tristate "IPv4 nf_tables support"
help
This option enables the IPv4 support for nf_tables.
+if NF_TABLES_IPV4
+
config NFT_CHAIN_ROUTE_IPV4
- depends on NF_TABLES_IPV4
tristate "IPv4 nf_tables route chain support"
help
This option enables the "route" chain for IPv4 in nf_tables. This
@@ -61,22 +53,34 @@ config NFT_CHAIN_ROUTE_IPV4
fields such as the source, destination, type of service and
the packet mark.
-config NF_REJECT_IPV4
- tristate "IPv4 packet rejection"
- default m if NETFILTER_ADVANCED=n
-
config NFT_REJECT_IPV4
- depends on NF_TABLES_IPV4
select NF_REJECT_IPV4
default NFT_REJECT
tristate
+endif # NF_TABLES_IPV4
+
config NF_TABLES_ARP
- depends on NF_TABLES
tristate "ARP nf_tables support"
help
This option enables the ARP support for nf_tables.
+endif # NF_TABLES
+
+config NF_LOG_ARP
+ tristate "ARP packet logging"
+ default m if NETFILTER_ADVANCED=n
+ select NF_LOG_COMMON
+
+config NF_LOG_IPV4
+ tristate "IPv4 packet logging"
+ default m if NETFILTER_ADVANCED=n
+ select NF_LOG_COMMON
+
+config NF_REJECT_IPV4
+ tristate "IPv4 packet rejection"
+ default m if NETFILTER_ADVANCED=n
+
config NF_NAT_IPV4
tristate "IPv4 NAT"
depends on NF_CONNTRACK_IPV4
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f95b6f93814b..13bfe84bf3ca 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -248,8 +248,7 @@ struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
unsigned int arpt_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table)
{
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -265,8 +264,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
return NF_DROP;
- indev = in ? in->name : nulldevname;
- outdev = out ? out->name : nulldevname;
+ indev = state->in ? state->in->name : nulldevname;
+ outdev = state->out ? state->out->name : nulldevname;
local_bh_disable();
addend = xt_write_recseq_begin();
@@ -281,8 +280,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
e = get_entry(table_base, private->hook_entry[hook]);
back = get_entry(table_base, private->underflow[hook]);
- acpar.in = in;
- acpar.out = out;
+ acpar.in = state->in;
+ acpar.out = state->out;
acpar.hooknum = hook;
acpar.family = NFPROTO_ARP;
acpar.hotdrop = false;
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 802ddecb30b8..93876d03120c 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -28,12 +28,11 @@ static const struct xt_table packet_filter = {
/* The work comes in here from netfilter.c */
static unsigned int
arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- const struct net *net = dev_net((in != NULL) ? in : out);
+ const struct net *net = dev_net(state->in ? state->in : state->out);
- return arpt_do_table(skb, ops->hooknum, in, out,
+ return arpt_do_table(skb, ops->hooknum, state,
net->ipv4.arptable_filter);
}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index cf5e82f39d3b..c69db7fa25ee 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -288,8 +288,7 @@ struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
unsigned int
ipt_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table)
{
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -306,8 +305,8 @@ ipt_do_table(struct sk_buff *skb,
/* Initialization */
ip = ip_hdr(skb);
- indev = in ? in->name : nulldevname;
- outdev = out ? out->name : nulldevname;
+ indev = state->in ? state->in->name : nulldevname;
+ outdev = state->out ? state->out->name : nulldevname;
/* We handle fragments by dealing with the first fragment as
* if it was a normal packet. All other fragments are treated
* normally, except that they will NEVER match rules that ask
@@ -317,8 +316,8 @@ ipt_do_table(struct sk_buff *skb,
acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
acpar.thoff = ip_hdrlen(skb);
acpar.hotdrop = false;
- acpar.in = in;
- acpar.out = out;
+ acpar.in = state->in;
+ acpar.out = state->out;
acpar.family = NFPROTO_IPV4;
acpar.hooknum = hook;
@@ -370,7 +369,7 @@ ipt_do_table(struct sk_buff *skb,
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
/* The packet is traced: log it */
if (unlikely(skb->nf_trace))
- trace_packet(skb, hook, in, out,
+ trace_packet(skb, hook, state->in, state->out,
table->name, private, e);
#endif
/* Standard target? */
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index e90f83a3415b..771ab3d01ad3 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -418,6 +418,13 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
if (ret < 0)
pr_info("cannot load conntrack support for proto=%u\n",
par->family);
+
+ if (!par->net->xt.clusterip_deprecated_warning) {
+ pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
+ "use xt_cluster instead\n");
+ par->net->xt.clusterip_deprecated_warning = true;
+ }
+
return ret;
}
@@ -497,14 +504,12 @@ static void arp_print(struct arp_payload *payload)
static unsigned int
arp_mangle(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct arphdr *arp = arp_hdr(skb);
struct arp_payload *payload;
struct clusterip_config *c;
- struct net *net = dev_net(in ? in : out);
+ struct net *net = dev_net(state->in ? state->in : state->out);
/* we don't care about non-ethernet and non-ipv4 ARP */
if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
@@ -529,10 +534,10 @@ arp_mangle(const struct nf_hook_ops *ops,
* addresses on different interfacs. However, in the CLUSTERIP case
* this wouldn't work, since we didn't subscribe the mcast group on
* other interfaces */
- if (c->dev != out) {
+ if (c->dev != state->out) {
pr_debug("not mangling arp reply on different "
"interface: cip'%s'-skb'%s'\n",
- c->dev->name, out->name);
+ c->dev->name, state->out->name);
clusterip_config_put(c);
return NF_ACCEPT;
}
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 8f48f5517e33..87907d4bd259 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -34,31 +34,32 @@ static unsigned int
reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipt_reject_info *reject = par->targinfo;
+ int hook = par->hooknum;
switch (reject->with) {
case IPT_ICMP_NET_UNREACHABLE:
- nf_send_unreach(skb, ICMP_NET_UNREACH);
+ nf_send_unreach(skb, ICMP_NET_UNREACH, hook);
break;
case IPT_ICMP_HOST_UNREACHABLE:
- nf_send_unreach(skb, ICMP_HOST_UNREACH);
+ nf_send_unreach(skb, ICMP_HOST_UNREACH, hook);
break;
case IPT_ICMP_PROT_UNREACHABLE:
- nf_send_unreach(skb, ICMP_PROT_UNREACH);
+ nf_send_unreach(skb, ICMP_PROT_UNREACH, hook);
break;
case IPT_ICMP_PORT_UNREACHABLE:
- nf_send_unreach(skb, ICMP_PORT_UNREACH);
+ nf_send_unreach(skb, ICMP_PORT_UNREACH, hook);
break;
case IPT_ICMP_NET_PROHIBITED:
- nf_send_unreach(skb, ICMP_NET_ANO);
+ nf_send_unreach(skb, ICMP_NET_ANO, hook);
break;
case IPT_ICMP_HOST_PROHIBITED:
- nf_send_unreach(skb, ICMP_HOST_ANO);
+ nf_send_unreach(skb, ICMP_HOST_ANO, hook);
break;
case IPT_ICMP_ADMIN_PROHIBITED:
- nf_send_unreach(skb, ICMP_PKT_FILTERED);
+ nf_send_unreach(skb, ICMP_PKT_FILTERED, hook);
break;
case IPT_TCP_RESET:
- nf_send_reset(skb, par->hooknum);
+ nf_send_reset(skb, hook);
case IPT_ICMP_ECHOREPLY:
/* Doesn't happen. */
break;
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index a313c3fbeb46..e9e67793055f 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -300,11 +300,9 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *nhs)
{
- struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+ struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
struct nf_conn_synproxy *synproxy;
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index e08a74a243a8..a0f3beca52d2 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -34,8 +34,7 @@ static const struct xt_table packet_filter = {
static unsigned int
iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
const struct net *net;
@@ -45,9 +44,8 @@ iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
/* root is playing with raw sockets. */
return NF_ACCEPT;
- net = dev_net((in != NULL) ? in : out);
- return ipt_do_table(skb, ops->hooknum, in, out,
- net->ipv4.iptable_filter);
+ net = dev_net(state->in ? state->in : state->out);
+ return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_filter);
}
static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 6a5079c34bb3..62cbb8c5f4a8 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -37,8 +37,9 @@ static const struct xt_table packet_mangler = {
};
static unsigned int
-ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
+ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
{
+ struct net_device *out = state->out;
unsigned int ret;
const struct iphdr *iph;
u_int8_t tos;
@@ -58,7 +59,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
daddr = iph->daddr;
tos = iph->tos;
- ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
+ ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, state,
dev_net(out)->ipv4.iptable_mangle);
/* Reroute for ANY change. */
if (ret != NF_DROP && ret != NF_STOLEN) {
@@ -81,18 +82,16 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
static unsigned int
iptable_mangle_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
if (ops->hooknum == NF_INET_LOCAL_OUT)
- return ipt_mangle_out(skb, out);
+ return ipt_mangle_out(skb, state);
if (ops->hooknum == NF_INET_POST_ROUTING)
- return ipt_do_table(skb, ops->hooknum, in, out,
- dev_net(out)->ipv4.iptable_mangle);
+ return ipt_do_table(skb, ops->hooknum, state,
+ dev_net(state->out)->ipv4.iptable_mangle);
/* PREROUTING/INPUT/FORWARD: */
- return ipt_do_table(skb, ops->hooknum, in, out,
- dev_net(in)->ipv4.iptable_mangle);
+ return ipt_do_table(skb, ops->hooknum, state,
+ dev_net(state->in)->ipv4.iptable_mangle);
}
static struct nf_hook_ops *mangle_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 6b67d7e9a75d..0d4d9cdf98a4 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -30,49 +30,40 @@ static const struct xt_table nf_nat_ipv4_table = {
static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.nat_table);
+ return ipt_do_table(skb, ops->hooknum, state, net->ipv4.nat_table);
}
static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv4_fn(ops, skb, in, out, iptable_nat_do_chain);
+ return nf_nat_ipv4_fn(ops, skb, state, iptable_nat_do_chain);
}
static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv4_in(ops, skb, in, out, iptable_nat_do_chain);
+ return nf_nat_ipv4_in(ops, skb, state, iptable_nat_do_chain);
}
static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv4_out(ops, skb, in, out, iptable_nat_do_chain);
+ return nf_nat_ipv4_out(ops, skb, state, iptable_nat_do_chain);
}
static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv4_local_fn(ops, skb, in, out, iptable_nat_do_chain);
+ return nf_nat_ipv4_local_fn(ops, skb, state, iptable_nat_do_chain);
}
static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index b2f7e8f98316..0356e6da4bb7 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -21,8 +21,7 @@ static const struct xt_table packet_raw = {
/* The work comes in here from netfilter.c. */
static unsigned int
iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
const struct net *net;
@@ -32,8 +31,8 @@ iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
/* root is playing with raw sockets. */
return NF_ACCEPT;
- net = dev_net((in != NULL) ? in : out);
- return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.iptable_raw);
+ net = dev_net(state->in ? state->in : state->out);
+ return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_raw);
}
static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index c86647ed2078..4bce3980ccd9 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -38,9 +38,7 @@ static const struct xt_table security_table = {
static unsigned int
iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
const struct net *net;
@@ -50,8 +48,8 @@ iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
/* Somebody is playing with raw sockets. */
return NF_ACCEPT;
- net = dev_net((in != NULL) ? in : out);
- return ipt_do_table(skb, ops->hooknum, in, out,
+ net = dev_net(state->in ? state->in : state->out);
+ return ipt_do_table(skb, ops->hooknum, state,
net->ipv4.iptable_security);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 5c61328b7704..30ad9554b5e9 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -94,9 +94,7 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -123,9 +121,7 @@ static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -149,24 +145,20 @@ out:
static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb);
+ return nf_conntrack_in(dev_net(state->in), PF_INET, ops->hooknum, skb);
}
static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb);
+ return nf_conntrack_in(dev_net(state->out), PF_INET, ops->hooknum, skb);
}
/* Connection tracking may drop packets, but never alters them, so
@@ -322,8 +314,8 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
- if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
- nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
+ if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
+ nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
goto nla_put_failure;
return 0;
@@ -342,8 +334,8 @@ static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
return -EINVAL;
- t->src.u3.ip = nla_get_be32(tb[CTA_IP_V4_SRC]);
- t->dst.u3.ip = nla_get_be32(tb[CTA_IP_V4_DST]);
+ t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
+ t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index a460a87e14f8..f0dfe92a00d6 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -300,7 +300,9 @@ static int exp_seq_show(struct seq_file *s, void *v)
__nf_ct_l3proto_find(exp->tuple.src.l3num),
__nf_ct_l4proto_find(exp->tuple.src.l3num,
exp->tuple.dst.protonum));
- return seq_putc(s, '\n');
+ seq_putc(s, '\n');
+
+ return 0;
}
static const struct seq_operations exp_seq_ops = {
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 7e5ca6f2d0cd..c88b7d434718 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -63,9 +63,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(skb->sk);
diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
index d059182c1466..e7ad950cf9ef 100644
--- a/net/ipv4/netfilter/nf_log_arp.c
+++ b/net/ipv4/netfilter/nf_log_arp.c
@@ -10,8 +10,10 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
@@ -27,7 +29,7 @@ static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
- .level = 5,
+ .level = LOGLEVEL_NOTICE,
.logflags = NF_LOG_MASK,
},
},
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
index 75101980eeee..076aadda0473 100644
--- a/net/ipv4/netfilter/nf_log_ipv4.c
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
@@ -5,8 +5,10 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
@@ -26,7 +28,7 @@ static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
- .level = 5,
+ .level = LOGLEVEL_NOTICE,
.logflags = NF_LOG_MASK,
},
},
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index fc37711e11f3..e59cc05c09e9 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -256,11 +256,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
unsigned int
nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct))
{
struct nf_conn *ct;
@@ -309,7 +308,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
if (!nf_nat_initialized(ct, maniptype)) {
unsigned int ret;
- ret = do_chain(ops, skb, in, out, ct);
+ ret = do_chain(ops, skb, state, ct);
if (ret != NF_ACCEPT)
return ret;
@@ -323,7 +322,8 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
pr_debug("Already setup manip %s for ct %p\n",
maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
ct);
- if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat,
+ state->out))
goto oif_changed;
}
break;
@@ -332,7 +332,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
/* ESTABLISHED */
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
ctinfo == IP_CT_ESTABLISHED_REPLY);
- if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
goto oif_changed;
}
@@ -346,17 +346,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
unsigned int
nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct))
{
unsigned int ret;
__be32 daddr = ip_hdr(skb)->daddr;
- ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+ ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
if (ret != NF_DROP && ret != NF_STOLEN &&
daddr != ip_hdr(skb)->daddr)
skb_dst_drop(skb);
@@ -367,11 +366,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_in);
unsigned int
nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct))
{
#ifdef CONFIG_XFRM
@@ -386,7 +384,7 @@ nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+ ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
#ifdef CONFIG_XFRM
if (ret != NF_DROP && ret != NF_STOLEN &&
!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
@@ -410,11 +408,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_out);
unsigned int
nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct))
{
const struct nf_conn *ct;
@@ -427,7 +424,7 @@ nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+ ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 536da7bc598a..3262e41ff76f 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -13,6 +13,7 @@
#include <net/dst.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_bridge.h>
#include <net/netfilter/ipv4/nf_reject.h>
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
@@ -43,7 +44,7 @@ EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
- __be16 protocol, int ttl)
+ __u8 protocol, int ttl)
{
struct iphdr *niph, *oiph = ip_hdr(oldskb);
@@ -146,7 +147,8 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
*/
if (oldskb->nf_bridge) {
struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = oldskb->nf_bridge->physindev;
+
+ nskb->dev = nf_bridge_get_physindev(oldskb);
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
@@ -164,4 +166,27 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
}
EXPORT_SYMBOL_GPL(nf_send_reset);
+void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+{
+ struct iphdr *iph = ip_hdr(skb_in);
+ u8 proto;
+
+ if (skb_in->csum_bad || iph->frag_off & htons(IP_OFFSET))
+ return;
+
+ if (skb_csum_unnecessary(skb_in)) {
+ icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+ return;
+ }
+
+ if (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP)
+ proto = iph->protocol;
+ else
+ proto = 0;
+
+ if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0)
+ icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+}
+EXPORT_SYMBOL_GPL(nf_send_unreach);
+
MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 19412a4063fb..8412268bbad1 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -17,13 +17,11 @@
static unsigned int
nft_do_chain_arp(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
- nft_set_pktinfo(&pkt, ops, skb, in, out);
+ nft_set_pktinfo(&pkt, ops, skb, state);
return nft_do_chain(&pkt, ops);
}
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
index 6820c8c40842..aa180d3a69a5 100644
--- a/net/ipv4/netfilter/nf_tables_ipv4.c
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -20,22 +20,18 @@
static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
- nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
return nft_do_chain(&pkt, ops);
}
static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
if (unlikely(skb->len < sizeof(struct iphdr) ||
ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
@@ -45,7 +41,7 @@ static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
return NF_ACCEPT;
}
- return nft_do_chain_ipv4(ops, skb, in, out, okfn);
+ return nft_do_chain_ipv4(ops, skb, state);
}
struct nft_af_info nft_af_ipv4 __read_mostly = {
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index df547bf50078..bf5c30ae14e4 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -28,51 +28,42 @@
static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct)
{
struct nft_pktinfo pkt;
- nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
return nft_do_chain(&pkt, ops);
}
static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv4_fn(ops, skb, in, out, nft_nat_do_chain);
+ return nf_nat_ipv4_fn(ops, skb, state, nft_nat_do_chain);
}
static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv4_in(ops, skb, in, out, nft_nat_do_chain);
+ return nf_nat_ipv4_in(ops, skb, state, nft_nat_do_chain);
}
static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv4_out(ops, skb, in, out, nft_nat_do_chain);
+ return nf_nat_ipv4_out(ops, skb, state, nft_nat_do_chain);
}
static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv4_local_fn(ops, skb, in, out, nft_nat_do_chain);
+ return nf_nat_ipv4_local_fn(ops, skb, state, nft_nat_do_chain);
}
static const struct nf_chain_type nft_chain_nat_ipv4 = {
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 125b66766c0a..e335b0afdaf3 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -23,9 +23,7 @@
static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
unsigned int ret;
struct nft_pktinfo pkt;
@@ -39,7 +37,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
mark = skb->mark;
iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index 665de06561cd..40e414c4ca56 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -17,20 +17,17 @@
#include <net/netfilter/ipv4/nf_nat_masquerade.h>
static void nft_masq_ipv4_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_masq *priv = nft_expr_priv(expr);
struct nf_nat_range range;
- unsigned int verdict;
memset(&range, 0, sizeof(range));
range.flags = priv->flags;
- verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum,
- &range, pkt->out);
-
- data[NFT_REG_VERDICT].verdict = verdict;
+ regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum,
+ &range, pkt->out);
}
static struct nft_expr_type nft_masq_ipv4_type;
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index 6ecfce63201a..d8d795df9c13 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -18,26 +18,25 @@
#include <net/netfilter/nft_redir.h>
static void nft_redir_ipv4_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_redir *priv = nft_expr_priv(expr);
struct nf_nat_ipv4_multi_range_compat mr;
- unsigned int verdict;
memset(&mr, 0, sizeof(mr));
if (priv->sreg_proto_min) {
mr.range[0].min.all =
- *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ *(__be16 *)&regs->data[priv->sreg_proto_min];
mr.range[0].max.all =
- *(__be16 *)&data[priv->sreg_proto_max].data[0];
+ *(__be16 *)&regs->data[priv->sreg_proto_max];
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
mr.range[0].flags |= priv->flags;
- verdict = nf_nat_redirect_ipv4(pkt->skb, &mr, pkt->ops->hooknum);
- data[NFT_REG_VERDICT].verdict = verdict;
+ regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr,
+ pkt->ops->hooknum);
}
static struct nft_expr_type nft_redir_ipv4_type;
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index d729542bd1b7..b07e58b51158 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -20,21 +20,24 @@
#include <net/netfilter/nft_reject.h>
static void nft_reject_ipv4_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
- nf_send_unreach(pkt->skb, priv->icmp_code);
+ nf_send_unreach(pkt->skb, priv->icmp_code,
+ pkt->ops->hooknum);
break;
case NFT_REJECT_TCP_RST:
nf_send_reset(pkt->skb, pkt->ops->hooknum);
break;
+ default:
+ break;
}
- data[NFT_REG_VERDICT].verdict = NF_DROP;
+ regs->verdict.code = NF_DROP;
}
static struct nft_expr_type nft_reject_ipv4_type;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 208d5439e59b..05ff44b758df 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -64,11 +64,11 @@ EXPORT_SYMBOL_GPL(pingv6_ops);
static u16 ping_port_rover;
-static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask)
+static inline u32 ping_hashfn(const struct net *net, u32 num, u32 mask)
{
- int res = (num + net_hash_mix(net)) & mask;
+ u32 res = (num + net_hash_mix(net)) & mask;
- pr_debug("hash(%d) = %d\n", num, res);
+ pr_debug("hash(%u) = %u\n", num, res);
return res;
}
EXPORT_SYMBOL_GPL(ping_hash);
@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
if (sk_hashed(sk)) {
write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
+ sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
@@ -516,7 +517,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
ntohs(icmph->un.echo.sequence));
sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
- if (sk == NULL) {
+ if (!sk) {
pr_debug("no socket, dropping\n");
return; /* No socket for error */
}
@@ -692,8 +693,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
}
EXPORT_SYMBOL_GPL(ping_common_sendmsg);
-static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len)
+static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct net *net = sock_net(sk);
struct flowi4 fl4;
@@ -849,8 +849,8 @@ do_confirm:
goto out;
}
-int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len)
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len)
{
struct inet_sock *isk = inet_sk(sk);
int family = sk->sk_family;
@@ -972,7 +972,7 @@ bool ping_rcv(struct sk_buff *skb)
skb_push(skb, skb->data - (u8 *)icmph);
sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
- if (sk != NULL) {
+ if (sk) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
pr_debug("rcv on socket %p\n", sk);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index d8953ef0770c..e1f3b911dd1e 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -63,7 +63,7 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
socket_seq_show(seq);
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
sock_prot_inuse_get(net, &tcp_prot), orphans,
- tcp_death_row.tw_count, sockets,
+ atomic_read(&tcp_death_row.tw_count), sockets,
proto_memory_allocated(&tcp_prot));
seq_printf(seq, "UDP: inuse %d mem %ld\n",
sock_prot_inuse_get(net, &udp_prot),
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index f027a708b7e0..561cd4b8fc6e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -46,7 +46,6 @@
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/aio.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -293,7 +292,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
read_lock(&raw_v4_hashinfo.lock);
raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
- if (raw_sk != NULL) {
+ if (raw_sk) {
iph = (const struct iphdr *)skb->data;
net = dev_net(skb->dev);
@@ -363,7 +362,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb = sock_alloc_send_skb(sk,
length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
- if (skb == NULL)
+ if (!skb)
goto error;
skb_reserve(skb, hlen);
@@ -404,7 +403,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
iph->check = 0;
iph->tot_len = htons(length);
if (!iph->id)
- ip_select_ident(skb, NULL);
+ ip_select_ident(net, skb, NULL);
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
@@ -412,8 +411,8 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
- err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
- rt->dst.dev, dst_output);
+ err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb,
+ NULL, rt->dst.dev, dst_output_sk);
if (err > 0)
err = net_xmit_errno(err);
if (err)
@@ -481,8 +480,7 @@ static int raw_getfrag(void *from, char *to, int offset, int len, int odd,
return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
}
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len)
+static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipcm_cookie ipc;
@@ -709,8 +707,8 @@ out: return ret;
* we return it, otherwise we block.
*/
-static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len)
+static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
size_t copied = 0;
@@ -873,7 +871,7 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
- if (skb != NULL)
+ if (skb)
amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ad5064362c5c..bff62fc87b8e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -152,7 +152,6 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
- .protocol = cpu_to_be16(ETH_P_IP),
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.mtu = ipv4_mtu,
@@ -483,7 +482,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
}
EXPORT_SYMBOL(ip_idents_reserve);
-void __ip_select_ident(struct iphdr *iph, int segs)
+void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
{
static u32 ip_idents_hashrnd __read_mostly;
u32 hash, id;
@@ -492,7 +491,7 @@ void __ip_select_ident(struct iphdr *iph, int segs)
hash = jhash_3words((__force u32)iph->daddr,
(__force u32)iph->saddr,
- iph->protocol,
+ iph->protocol ^ net_hash_mix(net),
ip_idents_hashrnd);
id = ip_idents_reserve(hash, segs);
iph->id = htons(id);
@@ -963,10 +962,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (dst_metric_locked(dst, RTAX_MTU))
return;
- if (dst->dev->mtu < mtu)
- return;
-
- if (rt->rt_pmtu && rt->rt_pmtu < mtu)
+ if (ipv4_mtu(dst) < mtu)
return;
if (mtu < ip_rt_min_pmtu)
@@ -1057,7 +1053,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
rt = (struct rtable *)odst;
- if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
+ if (odst->obsolete && !odst->ops->check(odst, 0)) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
@@ -1451,7 +1447,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
/* Primary sanity checks. */
- if (in_dev == NULL)
+ if (!in_dev)
return -EINVAL;
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
@@ -1554,7 +1550,7 @@ static int __mkroute_input(struct sk_buff *skb,
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
- if (out_dev == NULL) {
+ if (!out_dev) {
net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
}
@@ -1592,7 +1588,7 @@ static int __mkroute_input(struct sk_buff *skb,
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
if (do_cache) {
- if (fnhe != NULL)
+ if (fnhe)
rth = rcu_dereference(fnhe->fnhe_rth_input);
else
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
@@ -2055,7 +2051,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
ipv4_is_lbcast(fl4->daddr))) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
dev_out = __ip_dev_find(net, fl4->saddr, false);
- if (dev_out == NULL)
+ if (!dev_out)
goto out;
/* Special hack: user can direct multicasts
@@ -2088,7 +2084,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
if (fl4->flowi4_oif) {
dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
rth = ERR_PTR(-ENODEV);
- if (dev_out == NULL)
+ if (!dev_out)
goto out;
/* RACE: Check return value of inet_select_addr instead. */
@@ -2225,7 +2221,6 @@ static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
static struct dst_ops ipv4_dst_blackhole_ops = {
.family = AF_INET,
- .protocol = cpu_to_be16(ETH_P_IP),
.check = ipv4_blackhole_dst_check,
.mtu = ipv4_blackhole_mtu,
.default_advmss = ipv4_default_advmss,
@@ -2301,7 +2296,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
u32 metrics[RTAX_MAX];
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
r = nlmsg_data(nlh);
@@ -2321,11 +2316,11 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
r->rtm_flags |= RTCF_DOREDIRECT;
- if (nla_put_be32(skb, RTA_DST, dst))
+ if (nla_put_in_addr(skb, RTA_DST, dst))
goto nla_put_failure;
if (src) {
r->rtm_src_len = 32;
- if (nla_put_be32(skb, RTA_SRC, src))
+ if (nla_put_in_addr(skb, RTA_SRC, src))
goto nla_put_failure;
}
if (rt->dst.dev &&
@@ -2338,11 +2333,11 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
#endif
if (!rt_is_input_route(rt) &&
fl4->saddr != src) {
- if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
+ if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
goto nla_put_failure;
}
if (rt->rt_uses_gateway &&
- nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
+ nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
goto nla_put_failure;
expires = rt->dst.expires;
@@ -2423,7 +2418,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
rtm = nlmsg_data(nlh);
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
- if (skb == NULL) {
+ if (!skb) {
err = -ENOBUFS;
goto errout;
}
@@ -2438,8 +2433,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
ip_hdr(skb)->protocol = IPPROTO_ICMP;
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
- src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
- dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
+ src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
+ dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
@@ -2454,7 +2449,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
struct net_device *dev;
dev = __dev_get_by_index(net, iif);
- if (dev == NULL) {
+ if (!dev) {
err = -ENODEV;
goto errout_free;
}
@@ -2653,7 +2648,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
tbl = ipv4_route_flush_table;
if (!net_eq(net, &init_net)) {
tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
- if (tbl == NULL)
+ if (!tbl)
goto err_dup;
/* Don't export sysctls to unprivileged users */
@@ -2663,7 +2658,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
tbl[0].extra1 = net;
net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
- if (net->ipv4.route_hdr == NULL)
+ if (!net->ipv4.route_hdr)
goto err_reg;
return 0;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 45fe60c5238e..df849e5a10f1 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -219,19 +219,20 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
}
EXPORT_SYMBOL_GPL(__cookie_v4_check);
-static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct dst_entry *dst)
+static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *child;
child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
- if (child)
+ if (child) {
+ atomic_set(&req->rsk_refcnt, 1);
inet_csk_reqsk_queue_add(sk, req, child);
- else
+ } else {
reqsk_free(req);
-
+ }
return child;
}
@@ -325,7 +326,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
goto out;
ret = NULL;
- req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
+ req = inet_reqsk_alloc(&tcp_request_sock_ops, sk); /* for safety */
if (!req)
goto out;
@@ -336,8 +337,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
req->mss = mss;
ireq->ir_num = ntohs(th->dest);
ireq->ir_rmt_port = th->source;
- ireq->ir_loc_addr = ip_hdr(skb)->daddr;
- ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
+ sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
ireq->ir_mark = inet_request_mark(sk, skb);
ireq->snd_wscale = tcp_opt.snd_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
@@ -345,7 +346,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
- treq->listener = NULL;
+ treq->tfo_listener = false;
+
+ ireq->ir_iif = sk->sk_bound_dev_if;
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -357,7 +360,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
goto out;
}
- req->expires = 0UL;
req->num_retrans = 0;
/*
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d151539da8e6..c3852a7ff3c7 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -883,6 +883,20 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "tcp_probe_threshold",
+ .data = &init_net.ipv4.sysctl_tcp_probe_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "tcp_probe_interval",
+ .data = &init_net.ipv4.sysctl_tcp_probe_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{ }
};
@@ -895,7 +909,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
int i;
table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
- if (table == NULL)
+ if (!table)
goto err_alloc;
/* Update the variables to point into the current struct net */
@@ -904,7 +918,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
}
net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
- if (net->ipv4.ipv4_hdr == NULL)
+ if (!net->ipv4.ipv4_hdr)
goto err_reg;
net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
@@ -942,7 +956,7 @@ static __init int sysctl_ipv4_init(void)
struct ctl_table_header *hdr;
hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
- if (hdr == NULL)
+ if (!hdr)
return -ENOMEM;
if (register_pernet_subsys(&ipv4_sysctl_ops)) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 995a2259bcfc..46efa03d2b11 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -252,6 +252,7 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
+#include <linux/inet_diag.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
@@ -496,7 +497,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
/* Connected or passive Fast Open socket? */
if (sk->sk_state != TCP_SYN_SENT &&
- (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
+ (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
int target = sock_rcvlowat(sk, 0, INT_MAX);
if (tp->urg_seq == tp->copied_seq &&
@@ -520,8 +521,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
/* Race breaker. If space is freed after
* wspace test but before the flags are set,
- * IO signal will be lost.
+ * IO signal will be lost. Memory barrier
+ * pairs with the input side.
*/
+ smp_mb__after_atomic();
if (sk_stream_is_writeable(sk))
mask |= POLLOUT | POLLWRNORM;
}
@@ -1028,7 +1031,7 @@ static inline int select_size(const struct sock *sk, bool sg)
void tcp_free_fastopen_req(struct tcp_sock *tp)
{
- if (tp->fastopen_req != NULL) {
+ if (tp->fastopen_req) {
kfree(tp->fastopen_req);
tp->fastopen_req = NULL;
}
@@ -1042,12 +1045,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
return -EOPNOTSUPP;
- if (tp->fastopen_req != NULL)
+ if (tp->fastopen_req)
return -EALREADY; /* Another Fast Open is in progress */
tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
sk->sk_allocation);
- if (unlikely(tp->fastopen_req == NULL))
+ if (unlikely(!tp->fastopen_req))
return -ENOBUFS;
tp->fastopen_req->data = msg;
tp->fastopen_req->size = size;
@@ -1060,8 +1063,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
return err;
}
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t size)
+int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
@@ -1120,7 +1122,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sg = !!(sk->sk_route_caps & NETIF_F_SG);
- while (iov_iter_count(&msg->msg_iter)) {
+ while (msg_data_left(msg)) {
int copy = 0;
int max = size_goal;
@@ -1164,8 +1166,8 @@ new_segment:
}
/* Try to append data to the end of skb. */
- if (copy > iov_iter_count(&msg->msg_iter))
- copy = iov_iter_count(&msg->msg_iter);
+ if (copy > msg_data_left(msg))
+ copy = msg_data_left(msg);
/* Where to copy to? */
if (skb_availroom(skb) > 0) {
@@ -1222,7 +1224,7 @@ new_segment:
tcp_skb_pcount_set(skb, 0);
copied += copy;
- if (!iov_iter_count(&msg->msg_iter)) {
+ if (!msg_data_left(msg)) {
tcp_tx_timestamp(sk, skb);
goto out;
}
@@ -1539,8 +1541,8 @@ EXPORT_SYMBOL(tcp_read_sock);
* Probably, code can be easily improved even more.
*/
-int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len)
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ int flags, int *addr_len)
{
struct tcp_sock *tp = tcp_sk(sk);
int copied = 0;
@@ -1914,18 +1916,19 @@ EXPORT_SYMBOL_GPL(tcp_set_state);
static const unsigned char new_state[16] = {
/* current state: new state: action: */
- /* (Invalid) */ TCP_CLOSE,
- /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
- /* TCP_SYN_SENT */ TCP_CLOSE,
- /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
- /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
- /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
- /* TCP_TIME_WAIT */ TCP_CLOSE,
- /* TCP_CLOSE */ TCP_CLOSE,
- /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
- /* TCP_LAST_ACK */ TCP_LAST_ACK,
- /* TCP_LISTEN */ TCP_CLOSE,
- /* TCP_CLOSING */ TCP_CLOSING,
+ [0 /* (Invalid) */] = TCP_CLOSE,
+ [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
+ [TCP_SYN_SENT] = TCP_CLOSE,
+ [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
+ [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
+ [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
+ [TCP_TIME_WAIT] = TCP_CLOSE,
+ [TCP_CLOSE] = TCP_CLOSE,
+ [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
+ [TCP_LAST_ACK] = TCP_LAST_ACK,
+ [TCP_LISTEN] = TCP_CLOSE,
+ [TCP_CLOSING] = TCP_CLOSING,
+ [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
};
static int tcp_close_state(struct sock *sk)
@@ -2138,7 +2141,7 @@ adjudge_to_death:
* aborted (e.g., closed with unread data) before 3WHS
* finishes.
*/
- if (req != NULL)
+ if (req)
reqsk_fastopen_remove(sk, req, false);
inet_csk_destroy_sock(sk);
}
@@ -2590,11 +2593,12 @@ EXPORT_SYMBOL(compat_tcp_setsockopt);
#endif
/* Return information about state of tcp endpoint in API format. */
-void tcp_get_info(const struct sock *sk, struct tcp_info *info)
+void tcp_get_info(struct sock *sk, struct tcp_info *info)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 now = tcp_time_stamp;
+ u32 rate;
memset(info, 0, sizeof(*info));
@@ -2655,10 +2659,16 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
info->tcpi_total_retrans = tp->total_retrans;
- info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ?
- sk->sk_pacing_rate : ~0ULL;
- info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ?
- sk->sk_max_pacing_rate : ~0ULL;
+ rate = READ_ONCE(sk->sk_pacing_rate);
+ info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
+
+ rate = READ_ONCE(sk->sk_max_pacing_rate);
+ info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
+
+ spin_lock_bh(&sk->sk_lock.slock);
+ info->tcpi_bytes_acked = tp->bytes_acked;
+ info->tcpi_bytes_received = tp->bytes_received;
+ spin_unlock_bh(&sk->sk_lock.slock);
}
EXPORT_SYMBOL_GPL(tcp_get_info);
@@ -2730,6 +2740,26 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
return -EFAULT;
return 0;
}
+ case TCP_CC_INFO: {
+ const struct tcp_congestion_ops *ca_ops;
+ union tcp_cc_info info;
+ size_t sz = 0;
+ int attr;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ ca_ops = icsk->icsk_ca_ops;
+ if (ca_ops && ca_ops->get_info)
+ sz = ca_ops->get_info(sk, ~0U, &attr, &info);
+
+ len = min_t(unsigned int, len, sz);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &info, len))
+ return -EFAULT;
+ return 0;
+ }
case TCP_QUICKACK:
val = !icsk->icsk_ack.pingpong;
break;
@@ -2776,7 +2806,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break;
case TCP_FASTOPEN:
- if (icsk->icsk_accept_queue.fastopenq != NULL)
+ if (icsk->icsk_accept_queue.fastopenq)
val = icsk->icsk_accept_queue.fastopenq->max_qlen;
else
val = 0;
@@ -2960,7 +2990,7 @@ void tcp_done(struct sock *sk)
tcp_set_state(sk, TCP_CLOSE);
tcp_clear_xmit_timers(sk);
- if (req != NULL)
+ if (req)
reqsk_fastopen_remove(sk, req, false);
sk->sk_shutdown = SHUTDOWN_MASK;
@@ -3001,12 +3031,11 @@ static void __init tcp_init_mem(void)
void __init tcp_init(void)
{
- struct sk_buff *skb = NULL;
unsigned long limit;
int max_rshare, max_wshare, cnt;
unsigned int i;
- BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
+ sock_skb_cb_check_size(sizeof(struct tcp_skb_cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 62856e185a93..7a5ae50c80c8 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -83,7 +83,7 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
ret = -EEXIST;
} else {
list_add_tail_rcu(&ca->list, &tcp_cong_list);
- pr_info("%s registered\n", ca->name);
+ pr_debug("%s registered\n", ca->name);
}
spin_unlock(&tcp_cong_list_lock);
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index b504371af742..4c41c1287197 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -277,7 +277,8 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
}
}
-static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
{
const struct dctcp *ca = inet_csk_ca(sk);
@@ -286,19 +287,19 @@ static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
*/
if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) ||
ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
- struct tcp_dctcp_info info;
-
- memset(&info, 0, sizeof(info));
+ memset(info, 0, sizeof(struct tcp_dctcp_info));
if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) {
- info.dctcp_enabled = 1;
- info.dctcp_ce_state = (u16) ca->ce_state;
- info.dctcp_alpha = ca->dctcp_alpha;
- info.dctcp_ab_ecn = ca->acked_bytes_ecn;
- info.dctcp_ab_tot = ca->acked_bytes_total;
+ info->dctcp.dctcp_enabled = 1;
+ info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
+ info->dctcp.dctcp_alpha = ca->dctcp_alpha;
+ info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn;
+ info->dctcp.dctcp_ab_tot = ca->acked_bytes_total;
}
- nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info);
+ *attr = INET_DIAG_DCTCPINFO;
+ return sizeof(*info);
}
+ return 0;
}
static struct tcp_congestion_ops dctcp __read_mostly = {
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 0d73f9ddb55b..79b34a0f4a4a 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -29,18 +29,18 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
r->idiag_wqueue = tp->write_seq - tp->snd_una;
}
- if (info != NULL)
+ if (info)
tcp_get_info(sk, info);
}
static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r, struct nlattr *bc)
{
inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
}
static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req)
+ const struct inet_diag_req_v2 *req)
{
return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
}
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index ea82fd492c1b..3c673d5e6cff 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -141,7 +141,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
req->sk = NULL;
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
- if (child == NULL)
+ if (!child)
return false;
spin_lock(&queue->fastopenq->lock);
@@ -155,12 +155,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
tp = tcp_sk(child);
tp->fastopen_rsk = req;
- /* Do a hold on the listner sk so that if the listener is being
- * closed, the child that has been accepted can live on and still
- * access listen_lock.
- */
- sock_hold(sk);
- tcp_rsk(req)->listener = sk;
+ tcp_rsk(req)->tfo_listener = true;
/* RFC1323: The window in SYN & SYN/ACK segments is never
* scaled. So correct it appropriately.
@@ -174,6 +169,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+ atomic_set(&req->rsk_refcnt, 1);
/* Add the child socket directly into the accept queue */
inet_csk_reqsk_queue_add(sk, req, child);
@@ -210,6 +206,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
skb_set_owner_r(skb2, child);
__skb_queue_tail(&child->sk_receive_queue, skb2);
tp->syn_data_acked = 1;
+ tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
} else {
end_seq = TCP_SKB_CB(skb)->seq + 1;
}
@@ -218,10 +215,9 @@ static bool tcp_fastopen_create_child(struct sock *sk,
sk->sk_data_ready(sk);
bh_unlock_sock(child);
sock_put(child);
- WARN_ON(req->sk == NULL);
+ WARN_ON(!req->sk);
return true;
}
-EXPORT_SYMBOL(tcp_fastopen_create_child);
static bool tcp_fastopen_queue_check(struct sock *sk)
{
@@ -238,14 +234,14 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
* temporarily vs a server not supporting Fast Open at all.
*/
fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
- if (fastopenq == NULL || fastopenq->max_qlen == 0)
+ if (!fastopenq || fastopenq->max_qlen == 0)
return false;
if (fastopenq->qlen >= fastopenq->max_qlen) {
struct request_sock *req1;
spin_lock(&fastopenq->lock);
req1 = fastopenq->rskq_rst_head;
- if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
+ if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
spin_unlock(&fastopenq->lock);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
@@ -254,7 +250,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
fastopenq->rskq_rst_head = req1->dl_next;
fastopenq->qlen--;
spin_unlock(&fastopenq->lock);
- reqsk_free(req1);
+ reqsk_put(req1);
}
return true;
}
@@ -308,6 +304,7 @@ fastopen:
} else if (foc->len > 0) /* Client presents an invalid cookie */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+ valid_foc.exp = foc->exp;
*foc = valid_foc;
return false;
}
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 1d5a30a90adf..f71002e4db0b 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -300,26 +300,27 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
}
/* Extract info for Tcp socket info provided via netlink. */
-static void tcp_illinois_info(struct sock *sk, u32 ext,
- struct sk_buff *skb)
+static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
{
const struct illinois *ca = inet_csk_ca(sk);
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
- struct tcpvegas_info info = {
- .tcpv_enabled = 1,
- .tcpv_rttcnt = ca->cnt_rtt,
- .tcpv_minrtt = ca->base_rtt,
- };
+ info->vegas.tcpv_enabled = 1;
+ info->vegas.tcpv_rttcnt = ca->cnt_rtt;
+ info->vegas.tcpv_minrtt = ca->base_rtt;
+ info->vegas.tcpv_rtt = 0;
- if (info.tcpv_rttcnt > 0) {
+ if (info->vegas.tcpv_rttcnt > 0) {
u64 t = ca->sum_rtt;
- do_div(t, info.tcpv_rttcnt);
- info.tcpv_rtt = t;
+ do_div(t, info->vegas.tcpv_rttcnt);
+ info->vegas.tcpv_rtt = t;
}
- nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+ *attr = INET_DIAG_VEGASINFO;
+ return sizeof(struct tcpvegas_info);
}
+ return 0;
}
static struct tcp_congestion_ops tcp_illinois __read_mostly = {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f501ac048366..bc790ea9960f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
/* This must be called before lost_out is incremented */
static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
{
- if ((tp->retransmit_skb_hint == NULL) ||
+ if (!tp->retransmit_skb_hint ||
before(TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
tp->retransmit_skb_hint = skb;
@@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
fack_count += pcount;
/* Lost marker hint past SACKed? Tweak RFC3517 cnt */
- if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
+ if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
tp->lost_cnt_hint += pcount;
@@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
break;
- if ((next_dup != NULL) &&
+ if (next_dup &&
before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
in_sack = tcp_match_skb_to_sack(sk, skb,
next_dup->start_seq,
@@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
if (in_sack <= 0) {
tmp = tcp_shift_skb_data(sk, skb, state,
start_seq, end_seq, dup_sack);
- if (tmp != NULL) {
+ if (tmp) {
if (tmp != skb) {
skb = tmp;
continue;
@@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
struct tcp_sacktag_state *state,
u32 skip_to_seq)
{
- if (next_dup == NULL)
+ if (!next_dup)
return skb;
if (before(next_dup->start_seq, skip_to_seq)) {
@@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
if (tcp_highest_sack_seq(tp) == cache->end_seq) {
/* ...but better entrypoint exists! */
skb = tcp_highest_sack(sk);
- if (skb == NULL)
+ if (!skb)
break;
state.fack_count = tp->fackets_out;
cache++;
@@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
if (!before(start_seq, tcp_highest_sack_seq(tp))) {
skb = tcp_highest_sack(sk);
- if (skb == NULL)
+ if (!skb)
break;
state.fack_count = tp->fackets_out;
}
@@ -1820,14 +1820,12 @@ advance_sp:
for (j = 0; j < used_sacks; j++)
tp->recv_sack_cache[i++] = sp[j];
- tcp_mark_lost_retrans(sk);
-
- tcp_verify_left_out(tp);
-
if ((state.reord < tp->fackets_out) &&
((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
+ tcp_mark_lost_retrans(sk);
+ tcp_verify_left_out(tp);
out:
#if FASTRETRANS_DEBUG > 0
@@ -3099,17 +3097,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= acked_pcount;
flag |= FLAG_RETRANS_DATA_ACKED;
- } else {
+ } else if (!(sacked & TCPCB_SACKED_ACKED)) {
last_ackt = skb->skb_mstamp;
WARN_ON_ONCE(last_ackt.v64 == 0);
if (!first_ackt.v64)
first_ackt = last_ackt;
- if (!(sacked & TCPCB_SACKED_ACKED)) {
- reord = min(pkts_acked, reord);
- if (!after(scb->end_seq, tp->high_seq))
- flag |= FLAG_ORIG_SACK_ACKED;
- }
+ reord = min(pkts_acked, reord);
+ if (!after(scb->end_seq, tp->high_seq))
+ flag |= FLAG_ORIG_SACK_ACKED;
}
if (sacked & TCPCB_SACKED_ACKED)
@@ -3282,6 +3278,24 @@ static inline bool tcp_may_update_window(const struct tcp_sock *tp,
(ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
}
+/* If we update tp->snd_una, also update tp->bytes_acked */
+static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
+{
+ u32 delta = ack - tp->snd_una;
+
+ tp->bytes_acked += delta;
+ tp->snd_una = ack;
+}
+
+/* If we update tp->rcv_nxt, also update tp->bytes_received */
+static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
+{
+ u32 delta = seq - tp->rcv_nxt;
+
+ tp->bytes_received += delta;
+ tp->rcv_nxt = seq;
+}
+
/* Update our send window.
*
* Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
@@ -3317,11 +3331,41 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
}
}
- tp->snd_una = ack;
+ tcp_snd_una_update(tp, ack);
return flag;
}
+/* Return true if we're currently rate-limiting out-of-window ACKs and
+ * thus shouldn't send a dupack right now. We rate-limit dupacks in
+ * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
+ * attacks that send repeated SYNs or ACKs for the same connection. To
+ * do this, we do not send a duplicate SYNACK or ACK if the remote
+ * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
+ */
+bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
+ int mib_idx, u32 *last_oow_ack_time)
+{
+ /* Data packets without SYNs are not likely part of an ACK loop. */
+ if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
+ !tcp_hdr(skb)->syn)
+ goto not_rate_limited;
+
+ if (*last_oow_ack_time) {
+ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+ NET_INC_STATS_BH(net, mib_idx);
+ return true; /* rate-limited: don't send yet! */
+ }
+ }
+
+ *last_oow_ack_time = tcp_time_stamp;
+
+not_rate_limited:
+ return false; /* not rate-limited: go ahead, send dupack now! */
+}
+
/* RFC 5961 7 [ACK Throttling] */
static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
{
@@ -3469,7 +3513,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
* Note, we use the fact that SND.UNA>=SND.WL2.
*/
tcp_update_wl(tp, ack_seq);
- tp->snd_una = ack;
+ tcp_snd_una_update(tp, ack);
flag |= FLAG_WIN_UPDATE;
tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
@@ -3573,6 +3617,23 @@ old_ack:
return 0;
}
+static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
+ bool syn, struct tcp_fastopen_cookie *foc,
+ bool exp_opt)
+{
+ /* Valid only in SYN or SYN-ACK with an even length. */
+ if (!foc || !syn || len < 0 || (len & 1))
+ return;
+
+ if (len >= TCP_FASTOPEN_COOKIE_MIN &&
+ len <= TCP_FASTOPEN_COOKIE_MAX)
+ memcpy(foc->val, cookie, len);
+ else if (len != 0)
+ len = -1;
+ foc->len = len;
+ foc->exp = exp_opt;
+}
+
/* Look for tcp options. Normally only called on SYN and SYNACK packets.
* But, this can also be called on packets in the established flow when
* the fast version below fails.
@@ -3662,21 +3723,22 @@ void tcp_parse_options(const struct sk_buff *skb,
*/
break;
#endif
+ case TCPOPT_FASTOPEN:
+ tcp_parse_fastopen_option(
+ opsize - TCPOLEN_FASTOPEN_BASE,
+ ptr, th->syn, foc, false);
+ break;
+
case TCPOPT_EXP:
/* Fast Open option shares code 254 using a
- * 16 bits magic number. It's valid only in
- * SYN or SYN-ACK with an even size.
+ * 16 bits magic number.
*/
- if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
- get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
- foc == NULL || !th->syn || (opsize & 1))
- break;
- foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
- if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
- foc->len <= TCP_FASTOPEN_COOKIE_MAX)
- memcpy(foc->val, ptr + 2, foc->len);
- else if (foc->len != 0)
- foc->len = -1;
+ if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
+ get_unaligned_be16(ptr) ==
+ TCPOPT_FASTOPEN_MAGIC)
+ tcp_parse_fastopen_option(opsize -
+ TCPOLEN_EXP_FASTOPEN_BASE,
+ ptr + 2, th->syn, foc, true);
break;
}
@@ -4190,7 +4252,7 @@ static void tcp_ofo_queue(struct sock *sk)
tail = skb_peek_tail(&sk->sk_receive_queue);
eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
- tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
if (!eaten)
__skb_queue_tail(&sk->sk_receive_queue, skb);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -4358,7 +4420,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
__skb_pull(skb, hdrlen);
eaten = (tail &&
tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0;
- tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
if (!eaten) {
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
@@ -4451,7 +4513,7 @@ queue_and_out:
eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
}
- tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
if (skb->len)
tcp_event_data_recv(sk, skb);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -4640,7 +4702,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
struct sk_buff *head;
u32 start, end;
- if (skb == NULL)
+ if (!skb)
return;
start = TCP_SKB_CB(skb)->seq;
@@ -4799,6 +4861,8 @@ static void tcp_check_space(struct sock *sk)
{
if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
+ /* pairs with tcp_poll() */
+ smp_mb__after_atomic();
if (sk->sk_socket &&
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
tcp_new_space(sk);
@@ -5095,7 +5159,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
- if (unlikely(sk->sk_rx_dst == NULL))
+ if (unlikely(!sk->sk_rx_dst))
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
* Header prediction.
@@ -5197,7 +5261,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_rcv_rtt_measure_ts(sk, skb);
__skb_pull(skb, tcp_header_len);
- tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
eaten = 1;
}
@@ -5292,7 +5356,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
tcp_set_state(sk, TCP_ESTABLISHED);
- if (skb != NULL) {
+ if (skb) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
security_inet_conn_established(sk, skb);
}
@@ -5330,8 +5394,8 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL;
- u16 mss = tp->rx_opt.mss_clamp;
- bool syn_drop;
+ u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
+ bool syn_drop = false;
if (mss == tp->rx_opt.user_mss) {
struct tcp_options_received opt;
@@ -5343,16 +5407,25 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
mss = opt.mss_clamp;
}
- if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */
+ if (!tp->syn_fastopen) {
+ /* Ignore an unsolicited cookie */
cookie->len = -1;
+ } else if (tp->total_retrans) {
+ /* SYN timed out and the SYN-ACK neither has a cookie nor
+ * acknowledges data. Presumably the remote received only
+ * the retransmitted (regular) SYNs: either the original
+ * SYN-data or the corresponding SYN-ACK was dropped.
+ */
+ syn_drop = (cookie->len < 0 && data);
+ } else if (cookie->len < 0 && !tp->syn_data) {
+ /* We requested a cookie but didn't get it. If we did not use
+ * the (old) exp opt format then try so next time (try_exp=1).
+ * Otherwise we go back to use the RFC7413 opt (try_exp=2).
+ */
+ try_exp = tp->syn_fastopen_exp ? 2 : 1;
+ }
- /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably
- * the remote receives only the retransmitted (regular) SYNs: either
- * the original SYN-data or the corresponding SYN-ACK is lost.
- */
- syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
-
- tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
+ tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
if (data) { /* Retransmit unacked data in SYN */
tcp_for_write_queue_from(data, sk) {
@@ -5661,11 +5734,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
}
req = tp->fastopen_rsk;
- if (req != NULL) {
+ if (req) {
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
- if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
+ if (!tcp_check_req(sk, skb, req, true))
goto discard;
}
@@ -5751,7 +5824,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* ACK we have received, this would have acknowledged
* our SYNACK so stop the SYNACK timer.
*/
- if (req != NULL) {
+ if (req) {
/* Return RST if ack_seq is invalid.
* Note that RFC793 only says to generate a
* DUPACK for it but for TCP Fast Open it seems
@@ -5913,6 +5986,80 @@ static void tcp_ecn_create_request(struct request_sock *req,
inet_rsk(req)->ecn_ok = 1;
}
+static void tcp_openreq_init(struct request_sock *req,
+ const struct tcp_options_received *rx_opt,
+ struct sk_buff *skb, const struct sock *sk)
+{
+ struct inet_request_sock *ireq = inet_rsk(req);
+
+ req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
+ req->cookie_ts = 0;
+ tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
+ tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+ tcp_rsk(req)->snt_synack = tcp_time_stamp;
+ tcp_rsk(req)->last_oow_ack_time = 0;
+ req->mss = rx_opt->mss_clamp;
+ req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
+ ireq->tstamp_ok = rx_opt->tstamp_ok;
+ ireq->sack_ok = rx_opt->sack_ok;
+ ireq->snd_wscale = rx_opt->snd_wscale;
+ ireq->wscale_ok = rx_opt->wscale_ok;
+ ireq->acked = 0;
+ ireq->ecn_ok = 0;
+ ireq->ir_rmt_port = tcp_hdr(skb)->source;
+ ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
+ ireq->ir_mark = inet_request_mark(sk, skb);
+}
+
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener)
+{
+ struct request_sock *req = reqsk_alloc(ops, sk_listener);
+
+ if (req) {
+ struct inet_request_sock *ireq = inet_rsk(req);
+
+ kmemcheck_annotate_bitfield(ireq, flags);
+ ireq->opt = NULL;
+ atomic64_set(&ireq->ir_cookie, 0);
+ ireq->ireq_state = TCP_NEW_SYN_RECV;
+ write_pnet(&ireq->ireq_net, sock_net(sk_listener));
+ ireq->ireq_family = sk_listener->sk_family;
+ }
+
+ return req;
+}
+EXPORT_SYMBOL(inet_reqsk_alloc);
+
+/*
+ * Return true if a syncookie should be sent
+ */
+static bool tcp_syn_flood_action(struct sock *sk,
+ const struct sk_buff *skb,
+ const char *proto)
+{
+ const char *msg = "Dropping request";
+ bool want_cookie = false;
+ struct listen_sock *lopt;
+
+#ifdef CONFIG_SYN_COOKIES
+ if (sysctl_tcp_syncookies) {
+ msg = "Sending cookies";
+ want_cookie = true;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
+ } else
+#endif
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
+
+ lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
+ if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
+ lopt->synflood_warned = 1;
+ pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
+ proto, ntohs(tcp_hdr(skb)->dest), msg);
+ }
+ return want_cookie;
+}
+
int tcp_conn_request(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb)
@@ -5950,7 +6097,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop;
}
- req = inet_reqsk_alloc(rsk_ops);
+ req = inet_reqsk_alloc(rsk_ops, sk);
if (!req)
goto drop;
@@ -5967,6 +6114,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb, sk);
+ /* Note: tcp_v6_init_req() might override ir_iif for link locals */
+ inet_rsk(req)->ir_iif = sk->sk_bound_dev_if;
+
af_ops->init_req(req, sk, skb);
if (security_inet_conn_request(sk, skb, req))
@@ -6039,7 +6189,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (err || want_cookie)
goto drop_and_free;
- tcp_rsk(req)->listener = NULL;
+ tcp_rsk(req)->tfo_listener = false;
af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f1756ee02207..fc1c658ec6c1 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -122,7 +122,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
and use initial timestamp retrieved from peer table.
*/
if (tcptw->tw_ts_recent_stamp &&
- (twp == NULL || (sysctl_tcp_tw_reuse &&
+ (!twp || (sysctl_tcp_tw_reuse &&
get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
if (tp->write_seq == 0)
@@ -189,7 +189,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (!inet->inet_saddr)
inet->inet_saddr = fl4->saddr;
- inet->inet_rcv_saddr = inet->inet_saddr;
+ sk_rcv_saddr_set(sk, inet->inet_saddr);
if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
/* Reset inherited state */
@@ -204,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
tcp_fetch_timewait_stamp(sk, &rt->dst);
inet->inet_dport = usin->sin_port;
- inet->inet_daddr = daddr;
+ sk_daddr_set(sk, daddr);
inet_csk(sk)->icsk_ext_hdr_len = 0;
if (inet_opt)
@@ -310,6 +310,34 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
dst->ops->redirect(dst, sk, skb);
}
+
+/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
+void tcp_req_err(struct sock *sk, u32 seq)
+{
+ struct request_sock *req = inet_reqsk(sk);
+ struct net *net = sock_net(sk);
+
+ /* ICMPs are not backlogged, hence we cannot get
+ * an established socket here.
+ */
+ WARN_ON(req->sk);
+
+ if (seq != tcp_rsk(req)->snt_isn) {
+ NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+ reqsk_put(req);
+ } else {
+ /*
+ * Still in SYN_RECV, just remove it silently.
+ * There is no good way to pass the error to the newly
+ * created socket, and POSIX does not want network
+ * errors returned from accept().
+ */
+ NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
+ inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+ }
+}
+EXPORT_SYMBOL(tcp_req_err);
+
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
@@ -343,8 +371,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
int err;
struct net *net = dev_net(icmp_skb->dev);
- sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
- iph->saddr, th->source, inet_iif(icmp_skb));
+ sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
+ th->dest, iph->saddr, ntohs(th->source),
+ inet_iif(icmp_skb));
if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
@@ -353,6 +382,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
inet_twsk_put(inet_twsk(sk));
return;
}
+ seq = ntohl(th->seq);
+ if (sk->sk_state == TCP_NEW_SYN_RECV)
+ return tcp_req_err(sk, seq);
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
@@ -374,7 +406,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
icsk = inet_csk(sk);
tp = tcp_sk(sk);
- seq = ntohl(th->seq);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
fastopen = tp->fastopen_rsk;
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
@@ -458,42 +489,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
}
switch (sk->sk_state) {
- struct request_sock *req, **prev;
- case TCP_LISTEN:
- if (sock_owned_by_user(sk))
- goto out;
-
- req = inet_csk_search_req(sk, &prev, th->dest,
- iph->daddr, iph->saddr);
- if (!req)
- goto out;
-
- /* ICMPs are not backlogged, hence we cannot get
- an established socket here.
- */
- WARN_ON(req->sk);
-
- if (seq != tcp_rsk(req)->snt_isn) {
- NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
- goto out;
- }
-
- /*
- * Still in SYN_RECV, just remove it silently.
- * There is no good way to pass the error to the newly
- * created socket, and POSIX does not want network
- * errors returned from accept().
- */
- inet_csk_reqsk_queue_drop(sk, req, prev);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
- goto out;
-
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
* is already accepted it is treated as a connected one below.
*/
- if (fastopen && fastopen->sk == NULL)
+ if (fastopen && !fastopen->sk)
break;
if (!sock_owned_by_user(sk)) {
@@ -647,7 +648,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
if (!key)
goto release_sk1;
- genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
+ genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0)
goto release_sk1;
} else {
@@ -855,35 +856,6 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
kfree(inet_rsk(req)->opt);
}
-/*
- * Return true if a syncookie should be sent
- */
-bool tcp_syn_flood_action(struct sock *sk,
- const struct sk_buff *skb,
- const char *proto)
-{
- const char *msg = "Dropping request";
- bool want_cookie = false;
- struct listen_sock *lopt;
-
-#ifdef CONFIG_SYN_COOKIES
- if (sysctl_tcp_syncookies) {
- msg = "Sending cookies";
- want_cookie = true;
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
- } else
-#endif
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
-
- lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
- if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
- lopt->synflood_warned = 1;
- pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
- proto, ntohs(tcp_hdr(skb)->dest), msg);
- }
- return want_cookie;
-}
-EXPORT_SYMBOL(tcp_syn_flood_action);
#ifdef CONFIG_TCP_MD5SIG
/*
@@ -897,10 +869,10 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
const union tcp_md5_addr *addr,
int family)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
unsigned int size = sizeof(struct in_addr);
- struct tcp_md5sig_info *md5sig;
+ const struct tcp_md5sig_info *md5sig;
/* caller either holds rcu_read_lock() or socket lock */
md5sig = rcu_dereference_check(tp->md5sig_info,
@@ -923,24 +895,15 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
EXPORT_SYMBOL(tcp_md5_do_lookup);
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
- struct sock *addr_sk)
+ const struct sock *addr_sk)
{
- union tcp_md5_addr *addr;
+ const union tcp_md5_addr *addr;
- addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
+ addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
return tcp_md5_do_lookup(sk, addr, AF_INET);
}
EXPORT_SYMBOL(tcp_v4_md5_lookup);
-static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
- struct request_sock *req)
-{
- union tcp_md5_addr *addr;
-
- addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
- return tcp_md5_do_lookup(sk, addr, AF_INET);
-}
-
/* This can be called on a newly created socket, from other files */
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
@@ -1101,8 +1064,8 @@ clear_hash_noput:
return 1;
}
-int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- const struct sock *sk, const struct request_sock *req,
+int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+ const struct sock *sk,
const struct sk_buff *skb)
{
struct tcp_md5sig_pool *hp;
@@ -1110,12 +1073,9 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
const struct tcphdr *th = tcp_hdr(skb);
__be32 saddr, daddr;
- if (sk) {
- saddr = inet_sk(sk)->inet_saddr;
- daddr = inet_sk(sk)->inet_daddr;
- } else if (req) {
- saddr = inet_rsk(req)->ir_loc_addr;
- daddr = inet_rsk(req)->ir_rmt_addr;
+ if (sk) { /* valid for establish/request sockets */
+ saddr = sk->sk_rcv_saddr;
+ daddr = sk->sk_daddr;
} else {
const struct iphdr *iph = ip_hdr(skb);
saddr = iph->saddr;
@@ -1152,8 +1112,9 @@ clear_hash_noput:
}
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
-static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
- const struct sk_buff *skb)
+/* Called with rcu_read_lock() */
+static bool tcp_v4_inbound_md5_hash(struct sock *sk,
+ const struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
@@ -1193,7 +1154,7 @@ static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
*/
genhash = tcp_v4_md5_hash_skb(newhash,
hash_expected,
- NULL, NULL, skb);
+ NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
@@ -1205,28 +1166,16 @@ static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
}
return false;
}
-
-static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
-{
- bool ret;
-
- rcu_read_lock();
- ret = __tcp_v4_inbound_md5_hash(sk, skb);
- rcu_read_unlock();
-
- return ret;
-}
-
#endif
-static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
+static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
struct sk_buff *skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
- ireq->ir_loc_addr = ip_hdr(skb)->daddr;
- ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
- ireq->no_srccheck = inet_sk(sk)->transparent;
+ sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+ ireq->no_srccheck = inet_sk(sk_listener)->transparent;
ireq->opt = tcp_v4_save_options(skb);
}
@@ -1259,7 +1208,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.mss_clamp = TCP_MSS_DEFAULT,
#ifdef CONFIG_TCP_MD5SIG
- .md5_lookup = tcp_v4_reqsk_md5_lookup,
+ .req_md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
#endif
.init_req = tcp_v4_init_req,
@@ -1318,8 +1267,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
- newinet->inet_daddr = ireq->ir_rmt_addr;
- newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+ sk_daddr_set(newsk, ireq->ir_rmt_addr);
+ sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
newinet->inet_saddr = ireq->ir_loc_addr;
inet_opt = ireq->opt;
rcu_assign_pointer(newinet->inet_opt, inet_opt);
@@ -1356,7 +1305,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
/* Copy over the MD5 key from the original socket */
key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
AF_INET);
- if (key != NULL) {
+ if (key) {
/*
* We're using one, so create a matching key
* on the newsk structure. If we fail to get
@@ -1391,15 +1340,18 @@ EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
const struct iphdr *iph = ip_hdr(skb);
+ struct request_sock *req;
struct sock *nsk;
- struct request_sock **prev;
- /* Find possible connection requests. */
- struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
- iph->saddr, iph->daddr);
- if (req)
- return tcp_check_req(sk, skb, req, prev, false);
+
+ req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
+ if (req) {
+ nsk = tcp_check_req(sk, skb, req, false);
+ if (!nsk)
+ reqsk_put(req);
+ return nsk;
+ }
nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
th->source, iph->daddr, th->dest, inet_iif(skb));
@@ -1439,7 +1391,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
sk_mark_napi_id(sk, skb);
if (dst) {
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
- dst->ops->check(dst, 0) == NULL) {
+ !dst->ops->check(dst, 0)) {
dst_release(dst);
sk->sk_rx_dst = NULL;
}
@@ -1517,7 +1469,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
if (sk) {
skb->sk = sk;
skb->destructor = sock_edemux;
- if (sk->sk_state != TCP_TIME_WAIT) {
+ if (sk_fullsock(sk)) {
struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
@@ -1734,7 +1686,7 @@ do_time_wait:
iph->daddr, th->dest,
inet_iif(skb));
if (sk2) {
- inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
+ inet_twsk_deschedule(inet_twsk(sk));
inet_twsk_put(inet_twsk(sk));
sk = sk2;
goto process;
@@ -1846,7 +1798,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(sk);
- BUG_ON(tp->fastopen_rsk != NULL);
+ BUG_ON(tp->fastopen_rsk);
/* If socket is aborted during connect operation */
tcp_free_fastopen_req(tp);
@@ -1904,13 +1856,13 @@ get_req:
}
sk = sk_nulls_next(st->syn_wait_sk);
st->state = TCP_SEQ_STATE_LISTENING;
- read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+ spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
} else {
icsk = inet_csk(sk);
- read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+ spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
if (reqsk_queue_len(&icsk->icsk_accept_queue))
goto start_req;
- read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+ spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
sk = sk_nulls_next(sk);
}
get_sk:
@@ -1922,7 +1874,7 @@ get_sk:
goto out;
}
icsk = inet_csk(sk);
- read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+ spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
start_req:
st->uid = sock_i_uid(sk);
@@ -1931,7 +1883,7 @@ start_req:
st->sbucket = 0;
goto get_req;
}
- read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+ spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
}
spin_unlock_bh(&ilb->lock);
st->offset = 0;
@@ -2150,7 +2102,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
case TCP_SEQ_STATE_OPENREQ:
if (v) {
struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
- read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+ spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
}
case TCP_SEQ_STATE_LISTENING:
if (v != SEQ_START_TOKEN)
@@ -2204,17 +2156,17 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
}
EXPORT_SYMBOL(tcp_proc_unregister);
-static void get_openreq4(const struct sock *sk, const struct request_sock *req,
+static void get_openreq4(const struct request_sock *req,
struct seq_file *f, int i, kuid_t uid)
{
const struct inet_request_sock *ireq = inet_rsk(req);
- long delta = req->expires - jiffies;
+ long delta = req->rsk_timer.expires - jiffies;
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
i,
ireq->ir_loc_addr,
- ntohs(inet_sk(sk)->inet_sport),
+ ireq->ir_num,
ireq->ir_rmt_addr,
ntohs(ireq->ir_rmt_port),
TCP_SYN_RECV,
@@ -2225,7 +2177,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
from_kuid_munged(seq_user_ns(f), uid),
0, /* non standard timer */
0, /* open_requests have no inode */
- atomic_read(&sk->sk_refcnt),
+ 0,
req);
}
@@ -2291,9 +2243,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
struct seq_file *f, int i)
{
+ long delta = tw->tw_timer.expires - jiffies;
__be32 dest, src;
__u16 destp, srcp;
- s32 delta = tw->tw_ttd - inet_tw_time_stamp();
dest = tw->tw_daddr;
src = tw->tw_rcv_saddr;
@@ -2332,7 +2284,7 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
get_tcp4_sock(v, seq, st->num);
break;
case TCP_SEQ_STATE_OPENREQ:
- get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
+ get_openreq4(v, seq, st->num, st->uid);
break;
}
out:
@@ -2460,6 +2412,8 @@ static int __net_init tcp_sk_init(struct net *net)
}
net->ipv4.sysctl_tcp_ecn = 2;
net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
+ net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
+ net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
return 0;
fail:
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index e5f41bd5ec1b..a51d63a43e33 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -28,7 +28,8 @@ static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *s
struct tcp_fastopen_metrics {
u16 mss;
- u16 syn_loss:10; /* Recurring Fast Open SYN losses */
+ u16 syn_loss:10, /* Recurring Fast Open SYN losses */
+ try_exp:2; /* Request w/ exp. option (once) */
unsigned long last_syn_loss; /* Last Fast Open SYN loss */
struct tcp_fastopen_cookie cookie;
};
@@ -40,6 +41,7 @@ struct tcp_fastopen_metrics {
struct tcp_metrics_block {
struct tcp_metrics_block __rcu *tcpm_next;
+ possible_net_t tcpm_net;
struct inetpeer_addr tcpm_saddr;
struct inetpeer_addr tcpm_daddr;
unsigned long tcpm_stamp;
@@ -52,6 +54,11 @@ struct tcp_metrics_block {
struct rcu_head rcu_head;
};
+static inline struct net *tm_net(struct tcp_metrics_block *tm)
+{
+ return read_pnet(&tm->tcpm_net);
+}
+
static bool tcp_metric_locked(struct tcp_metrics_block *tm,
enum tcp_metric_index idx)
{
@@ -74,23 +81,20 @@ static void tcp_metric_set(struct tcp_metrics_block *tm,
static bool addr_same(const struct inetpeer_addr *a,
const struct inetpeer_addr *b)
{
- const struct in6_addr *a6, *b6;
-
if (a->family != b->family)
return false;
if (a->family == AF_INET)
return a->addr.a4 == b->addr.a4;
-
- a6 = (const struct in6_addr *) &a->addr.a6[0];
- b6 = (const struct in6_addr *) &b->addr.a6[0];
-
- return ipv6_addr_equal(a6, b6);
+ return ipv6_addr_equal(&a->addr.in6, &b->addr.in6);
}
struct tcpm_hash_bucket {
struct tcp_metrics_block __rcu *chain;
};
+static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
+static unsigned int tcp_metrics_hash_log __read_mostly;
+
static DEFINE_SPINLOCK(tcp_metrics_lock);
static void tcpm_suck_dst(struct tcp_metrics_block *tm,
@@ -128,6 +132,8 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
if (fastopen_clear) {
tm->tcpm_fastopen.mss = 0;
tm->tcpm_fastopen.syn_loss = 0;
+ tm->tcpm_fastopen.try_exp = 0;
+ tm->tcpm_fastopen.cookie.exp = false;
tm->tcpm_fastopen.cookie.len = 0;
}
}
@@ -143,6 +149,9 @@ static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst
#define TCP_METRICS_RECLAIM_DEPTH 5
#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
+#define deref_locked(p) \
+ rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
+
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
struct inetpeer_addr *saddr,
struct inetpeer_addr *daddr,
@@ -171,9 +180,9 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
if (unlikely(reclaim)) {
struct tcp_metrics_block *oldest;
- oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
- for (tm = rcu_dereference(oldest->tcpm_next); tm;
- tm = rcu_dereference(tm->tcpm_next)) {
+ oldest = deref_locked(tcp_metrics_hash[hash].chain);
+ for (tm = deref_locked(oldest->tcpm_next); tm;
+ tm = deref_locked(tm->tcpm_next)) {
if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
oldest = tm;
}
@@ -183,14 +192,15 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
if (!tm)
goto out_unlock;
}
+ write_pnet(&tm->tcpm_net, net);
tm->tcpm_saddr = *saddr;
tm->tcpm_daddr = *daddr;
tcpm_suck_dst(tm, dst, true);
if (likely(!reclaim)) {
- tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
- rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
+ tm->tcpm_next = tcp_metrics_hash[hash].chain;
+ rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
}
out_unlock:
@@ -214,10 +224,11 @@ static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *s
struct tcp_metrics_block *tm;
int depth = 0;
- for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
if (addr_same(&tm->tcpm_saddr, saddr) &&
- addr_same(&tm->tcpm_daddr, daddr))
+ addr_same(&tm->tcpm_daddr, daddr) &&
+ net_eq(tm_net(tm), net))
break;
depth++;
}
@@ -242,8 +253,8 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- *(struct in6_addr *)saddr.addr.a6 = inet_rsk(req)->ir_v6_loc_addr;
- *(struct in6_addr *)daddr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
+ saddr.addr.in6 = inet_rsk(req)->ir_v6_loc_addr;
+ daddr.addr.in6 = inet_rsk(req)->ir_v6_rmt_addr;
hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
break;
#endif
@@ -252,12 +263,14 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
}
net = dev_net(dst->dev);
- hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+ hash ^= net_hash_mix(net);
+ hash = hash_32(hash, tcp_metrics_hash_log);
- for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
if (addr_same(&tm->tcpm_saddr, &saddr) &&
- addr_same(&tm->tcpm_daddr, &daddr))
+ addr_same(&tm->tcpm_daddr, &daddr) &&
+ net_eq(tm_net(tm), net))
break;
}
tcpm_check_stamp(tm, dst);
@@ -288,9 +301,9 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock
hash = (__force unsigned int) daddr.addr.a4;
} else {
saddr.family = AF_INET6;
- *(struct in6_addr *)saddr.addr.a6 = tw->tw_v6_rcv_saddr;
+ saddr.addr.in6 = tw->tw_v6_rcv_saddr;
daddr.family = AF_INET6;
- *(struct in6_addr *)daddr.addr.a6 = tw->tw_v6_daddr;
+ daddr.addr.in6 = tw->tw_v6_daddr;
hash = ipv6_addr_hash(&tw->tw_v6_daddr);
}
}
@@ -299,12 +312,14 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock
return NULL;
net = twsk_net(tw);
- hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+ hash ^= net_hash_mix(net);
+ hash = hash_32(hash, tcp_metrics_hash_log);
- for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
if (addr_same(&tm->tcpm_saddr, &saddr) &&
- addr_same(&tm->tcpm_daddr, &daddr))
+ addr_same(&tm->tcpm_daddr, &daddr) &&
+ net_eq(tm_net(tm), net))
break;
}
return tm;
@@ -336,9 +351,9 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
hash = (__force unsigned int) daddr.addr.a4;
} else {
saddr.family = AF_INET6;
- *(struct in6_addr *)saddr.addr.a6 = sk->sk_v6_rcv_saddr;
+ saddr.addr.in6 = sk->sk_v6_rcv_saddr;
daddr.family = AF_INET6;
- *(struct in6_addr *)daddr.addr.a6 = sk->sk_v6_daddr;
+ daddr.addr.in6 = sk->sk_v6_daddr;
hash = ipv6_addr_hash(&sk->sk_v6_daddr);
}
}
@@ -347,7 +362,8 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
return NULL;
net = dev_net(dst->dev);
- hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+ hash ^= net_hash_mix(net);
+ hash = hash_32(hash, tcp_metrics_hash_log);
tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
if (tm == TCP_METRICS_RECLAIM_PTR)
@@ -492,7 +508,7 @@ void tcp_init_metrics(struct sock *sk)
struct tcp_metrics_block *tm;
u32 val, crtt = 0; /* cached RTT scaled by 8 */
- if (dst == NULL)
+ if (!dst)
goto reset;
dst_confirm(dst);
@@ -700,6 +716,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
if (tfom->mss)
*mss = tfom->mss;
*cookie = tfom->cookie;
+ if (cookie->len <= 0 && tfom->try_exp == 1)
+ cookie->exp = true;
*syn_loss = tfom->syn_loss;
*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
} while (read_seqretry(&fastopen_seqlock, seq));
@@ -708,7 +726,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
}
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
- struct tcp_fastopen_cookie *cookie, bool syn_lost)
+ struct tcp_fastopen_cookie *cookie, bool syn_lost,
+ u16 try_exp)
{
struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_metrics_block *tm;
@@ -725,6 +744,9 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
tfom->mss = mss;
if (cookie && cookie->len > 0)
tfom->cookie = *cookie;
+ else if (try_exp > tfom->try_exp &&
+ tfom->cookie.len <= 0 && !tfom->cookie.exp)
+ tfom->try_exp = try_exp;
if (syn_lost) {
++tfom->syn_loss;
tfom->last_syn_loss = jiffies;
@@ -773,19 +795,19 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
switch (tm->tcpm_daddr.family) {
case AF_INET:
- if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
- tm->tcpm_daddr.addr.a4) < 0)
+ if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
+ tm->tcpm_daddr.addr.a4) < 0)
goto nla_put_failure;
- if (nla_put_be32(msg, TCP_METRICS_ATTR_SADDR_IPV4,
- tm->tcpm_saddr.addr.a4) < 0)
+ if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
+ tm->tcpm_saddr.addr.a4) < 0)
goto nla_put_failure;
break;
case AF_INET6:
- if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
- tm->tcpm_daddr.addr.a6) < 0)
+ if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
+ &tm->tcpm_daddr.addr.in6) < 0)
goto nla_put_failure;
- if (nla_put(msg, TCP_METRICS_ATTR_SADDR_IPV6, 16,
- tm->tcpm_saddr.addr.a6) < 0)
+ if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
+ &tm->tcpm_saddr.addr.in6) < 0)
goto nla_put_failure;
break;
default:
@@ -898,17 +920,19 @@ static int tcp_metrics_nl_dump(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
+ unsigned int max_rows = 1U << tcp_metrics_hash_log;
unsigned int row, s_row = cb->args[0];
int s_col = cb->args[1], col = s_col;
for (row = s_row; row < max_rows; row++, s_col = 0) {
struct tcp_metrics_block *tm;
- struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
+ struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
rcu_read_lock();
for (col = 0, tm = rcu_dereference(hb->chain); tm;
tm = rcu_dereference(tm->tcpm_next), col++) {
+ if (!net_eq(tm_net(tm), net))
+ continue;
if (col < s_col)
continue;
if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
@@ -933,7 +957,7 @@ static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
a = info->attrs[v4];
if (a) {
addr->family = AF_INET;
- addr->addr.a4 = nla_get_be32(a);
+ addr->addr.a4 = nla_get_in_addr(a);
if (hash)
*hash = (__force unsigned int) addr->addr.a4;
return 0;
@@ -943,9 +967,9 @@ static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
if (nla_len(a) != sizeof(struct in6_addr))
return -EINVAL;
addr->family = AF_INET6;
- memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
+ addr->addr.in6 = nla_get_in6_addr(a);
if (hash)
- *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
+ *hash = ipv6_addr_hash(&addr->addr.in6);
return 0;
}
return optional ? 1 : -EAFNOSUPPORT;
@@ -994,13 +1018,15 @@ static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
if (!reply)
goto nla_put_failure;
- hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+ hash ^= net_hash_mix(net);
+ hash = hash_32(hash, tcp_metrics_hash_log);
ret = -ESRCH;
rcu_read_lock();
- for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
if (addr_same(&tm->tcpm_daddr, &daddr) &&
- (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
+ (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+ net_eq(tm_net(tm), net)) {
ret = tcp_metrics_fill_info(msg, tm);
break;
}
@@ -1020,34 +1046,27 @@ out_free:
return ret;
}
-#define deref_locked_genl(p) \
- rcu_dereference_protected(p, lockdep_genl_is_held() && \
- lockdep_is_held(&tcp_metrics_lock))
-
-#define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
-
-static int tcp_metrics_flush_all(struct net *net)
+static void tcp_metrics_flush_all(struct net *net)
{
- unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
- struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
+ unsigned int max_rows = 1U << tcp_metrics_hash_log;
+ struct tcpm_hash_bucket *hb = tcp_metrics_hash;
struct tcp_metrics_block *tm;
unsigned int row;
for (row = 0; row < max_rows; row++, hb++) {
+ struct tcp_metrics_block __rcu **pp;
spin_lock_bh(&tcp_metrics_lock);
- tm = deref_locked_genl(hb->chain);
- if (tm)
- hb->chain = NULL;
- spin_unlock_bh(&tcp_metrics_lock);
- while (tm) {
- struct tcp_metrics_block *next;
-
- next = deref_genl(tm->tcpm_next);
- kfree_rcu(tm, rcu_head);
- tm = next;
+ pp = &hb->chain;
+ for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
+ if (net_eq(tm_net(tm), net)) {
+ *pp = tm->tcpm_next;
+ kfree_rcu(tm, rcu_head);
+ } else {
+ pp = &tm->tcpm_next;
+ }
}
+ spin_unlock_bh(&tcp_metrics_lock);
}
- return 0;
}
static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
@@ -1064,19 +1083,23 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
ret = parse_nl_addr(info, &daddr, &hash, 1);
if (ret < 0)
return ret;
- if (ret > 0)
- return tcp_metrics_flush_all(net);
+ if (ret > 0) {
+ tcp_metrics_flush_all(net);
+ return 0;
+ }
ret = parse_nl_saddr(info, &saddr);
if (ret < 0)
src = false;
- hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
- hb = net->ipv4.tcp_metrics_hash + hash;
+ hash ^= net_hash_mix(net);
+ hash = hash_32(hash, tcp_metrics_hash_log);
+ hb = tcp_metrics_hash + hash;
pp = &hb->chain;
spin_lock_bh(&tcp_metrics_lock);
- for (tm = deref_locked_genl(*pp); tm; tm = deref_locked_genl(*pp)) {
+ for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
if (addr_same(&tm->tcpm_daddr, &daddr) &&
- (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
+ (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+ net_eq(tm_net(tm), net)) {
*pp = tm->tcpm_next;
kfree_rcu(tm, rcu_head);
found = true;
@@ -1126,6 +1149,9 @@ static int __net_init tcp_net_metrics_init(struct net *net)
size_t size;
unsigned int slots;
+ if (!net_eq(net, &init_net))
+ return 0;
+
slots = tcpmhash_entries;
if (!slots) {
if (totalram_pages >= 128 * 1024)
@@ -1134,14 +1160,14 @@ static int __net_init tcp_net_metrics_init(struct net *net)
slots = 8 * 1024;
}
- net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
- size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
+ tcp_metrics_hash_log = order_base_2(slots);
+ size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
- net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!net->ipv4.tcp_metrics_hash)
- net->ipv4.tcp_metrics_hash = vzalloc(size);
+ tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!tcp_metrics_hash)
+ tcp_metrics_hash = vzalloc(size);
- if (!net->ipv4.tcp_metrics_hash)
+ if (!tcp_metrics_hash)
return -ENOMEM;
return 0;
@@ -1149,19 +1175,7 @@ static int __net_init tcp_net_metrics_init(struct net *net)
static void __net_exit tcp_net_metrics_exit(struct net *net)
{
- unsigned int i;
-
- for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
- struct tcp_metrics_block *tm, *next;
-
- tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
- while (tm) {
- next = rcu_dereference_protected(tm->tcpm_next, 1);
- kfree(tm);
- tm = next;
- }
- }
- kvfree(net->ipv4.tcp_metrics_hash);
+ tcp_metrics_flush_all(net);
}
static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
@@ -1175,16 +1189,10 @@ void __init tcp_metrics_init(void)
ret = register_pernet_subsys(&tcp_net_metrics_ops);
if (ret < 0)
- goto cleanup;
+ panic("Could not allocate the tcp_metrics hash table\n");
+
ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
tcp_metrics_nl_ops);
if (ret < 0)
- goto cleanup_subsys;
- return;
-
-cleanup_subsys:
- unregister_pernet_subsys(&tcp_net_metrics_ops);
-
-cleanup:
- return;
+ panic("Could not register tcp_metrics generic netlink\n");
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index dd11ac7798c6..e5d7649136fc 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -34,18 +34,7 @@ int sysctl_tcp_abort_on_overflow __read_mostly;
struct inet_timewait_death_row tcp_death_row = {
.sysctl_max_tw_buckets = NR_FILE * 2,
- .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
- .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
.hashinfo = &tcp_hashinfo,
- .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
- (unsigned long)&tcp_death_row),
- .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
- inet_twdr_twkill_work),
-/* Short-time timewait calendar */
-
- .twcal_hand = -1,
- .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
- (unsigned long)&tcp_death_row),
};
EXPORT_SYMBOL_GPL(tcp_death_row);
@@ -158,7 +147,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
if (!th->fin ||
TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
kill_with_rst:
- inet_twsk_deschedule(tw, &tcp_death_row);
+ inet_twsk_deschedule(tw);
inet_twsk_put(tw);
return TCP_TW_RST;
}
@@ -174,11 +163,9 @@ kill_with_rst:
if (tcp_death_row.sysctl_tw_recycle &&
tcptw->tw_ts_recent_stamp &&
tcp_tw_remember_stamp(tw))
- inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
- TCP_TIMEWAIT_LEN);
+ inet_twsk_schedule(tw, tw->tw_timeout);
else
- inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
- TCP_TIMEWAIT_LEN);
+ inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
return TCP_TW_ACK;
}
@@ -211,13 +198,12 @@ kill_with_rst:
*/
if (sysctl_tcp_rfc1337 == 0) {
kill:
- inet_twsk_deschedule(tw, &tcp_death_row);
+ inet_twsk_deschedule(tw);
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
}
- inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
- TCP_TIMEWAIT_LEN);
+ inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
@@ -267,8 +253,7 @@ kill:
* Do not reschedule in the last case.
*/
if (paws_reject || th->ack)
- inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
- TCP_TIMEWAIT_LEN);
+ inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
return tcp_timewait_check_oow_rate_limit(
tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
@@ -283,18 +268,17 @@ EXPORT_SYMBOL(tcp_timewait_state_process);
*/
void tcp_time_wait(struct sock *sk, int state, int timeo)
{
- struct inet_timewait_sock *tw = NULL;
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_timewait_sock *tw;
bool recycle_ok = false;
if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
recycle_ok = tcp_remember_stamp(sk);
- if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
- tw = inet_twsk_alloc(sk, state);
+ tw = inet_twsk_alloc(sk, &tcp_death_row, state);
- if (tw != NULL) {
+ if (tw) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
struct inet_sock *inet = inet_sk(sk);
@@ -332,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
struct tcp_md5sig_key *key;
tcptw->tw_md5_key = NULL;
key = tp->af_specific->md5_lookup(sk, sk);
- if (key != NULL) {
+ if (key) {
tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
BUG();
@@ -355,8 +339,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
timeo = TCP_TIMEWAIT_LEN;
}
- inet_twsk_schedule(tw, &tcp_death_row, timeo,
- TCP_TIMEWAIT_LEN);
+ inet_twsk_schedule(tw, timeo);
inet_twsk_put(tw);
} else {
/* Sorry, if we're out of memory, just CLOSE this
@@ -454,7 +437,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
{
struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
- if (newsk != NULL) {
+ if (newsk) {
const struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_connection_sock *newicsk = inet_csk(newsk);
@@ -572,7 +555,6 @@ EXPORT_SYMBOL(tcp_create_openreq_child);
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct request_sock **prev,
bool fastopen)
{
struct tcp_options_received tmp_opt;
@@ -629,9 +611,16 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
LINUX_MIB_TCPACKSKIPPEDSYNRECV,
&tcp_rsk(req)->last_oow_ack_time) &&
- !inet_rtx_syn_ack(sk, req))
- req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
- TCP_RTO_MAX) + jiffies;
+ !inet_rtx_syn_ack(sk, req)) {
+ unsigned long expires = jiffies;
+
+ expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
+ TCP_RTO_MAX);
+ if (!fastopen)
+ mod_timer_pending(&req->rsk_timer, expires);
+ else
+ req->rsk_timer.expires = expires;
+ }
return NULL;
}
@@ -763,13 +752,14 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* socket is created, wait for troubles.
*/
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
- if (child == NULL)
+ if (!child)
goto listen_overflow;
- inet_csk_reqsk_queue_unlink(sk, req, prev);
- inet_csk_reqsk_queue_removed(sk, req);
-
+ inet_csk_reqsk_queue_drop(sk, req);
inet_csk_reqsk_queue_add(sk, req, child);
+ /* Warning: caller must not call reqsk_put(req);
+ * child stole last reference on it.
+ */
return child;
listen_overflow:
@@ -791,7 +781,7 @@ embryonic_reset:
tcp_reset(sk);
}
if (!fastopen) {
- inet_csk_reqsk_queue_drop(sk, req, prev);
+ inet_csk_reqsk_queue_drop(sk, req);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
}
return NULL;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 9d7930ba8e0f..3f7c2fca5431 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -29,8 +29,8 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
}
}
-struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
- netdev_features_t features)
+static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1db253e36045..a369e8a70b2c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -518,17 +518,26 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
+ u8 *p = (u8 *)ptr;
+ u32 len; /* Fast Open option length */
+
+ if (foc->exp) {
+ len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
+ *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
+ TCPOPT_FASTOPEN_MAGIC);
+ p += TCPOLEN_EXP_FASTOPEN_BASE;
+ } else {
+ len = TCPOLEN_FASTOPEN_BASE + foc->len;
+ *p++ = TCPOPT_FASTOPEN;
+ *p++ = len;
+ }
- *ptr++ = htonl((TCPOPT_EXP << 24) |
- ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
- TCPOPT_FASTOPEN_MAGIC);
-
- memcpy(ptr, foc->val, foc->len);
- if ((foc->len & 3) == 2) {
- u8 *align = ((u8 *)ptr) + foc->len;
- align[0] = align[1] = TCPOPT_NOP;
+ memcpy(p, foc->val, foc->len);
+ if ((len & 3) == 2) {
+ p[foc->len] = TCPOPT_NOP;
+ p[foc->len + 1] = TCPOPT_NOP;
}
- ptr += (foc->len + 3) >> 2;
+ ptr += (len + 3) >> 2;
}
}
@@ -565,7 +574,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
opts->mss = tcp_advertise_mss(sk);
remaining -= TCPOLEN_MSS_ALIGNED;
- if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
+ if (likely(sysctl_tcp_timestamps && !*md5)) {
opts->options |= OPTION_TS;
opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
opts->tsecr = tp->rx_opt.ts_recent;
@@ -583,13 +592,17 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
}
if (fastopen && fastopen->cookie.len >= 0) {
- u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
+ u32 need = fastopen->cookie.len;
+
+ need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
+ TCPOLEN_FASTOPEN_BASE;
need = (need + 3) & ~3U; /* Align to 32 bits */
if (remaining >= need) {
opts->options |= OPTION_FAST_OPEN_COOKIE;
opts->fastopen_cookie = &fastopen->cookie;
remaining -= need;
tp->syn_fastopen = 1;
+ tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
}
}
@@ -601,15 +614,14 @@ static unsigned int tcp_synack_options(struct sock *sk,
struct request_sock *req,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
- struct tcp_md5sig_key **md5,
+ const struct tcp_md5sig_key *md5,
struct tcp_fastopen_cookie *foc)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
#ifdef CONFIG_TCP_MD5SIG
- *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
- if (*md5) {
+ if (md5) {
opts->options |= OPTION_MD5;
remaining -= TCPOLEN_MD5SIG_ALIGNED;
@@ -620,8 +632,6 @@ static unsigned int tcp_synack_options(struct sock *sk,
*/
ireq->tstamp_ok &= !ireq->sack_ok;
}
-#else
- *md5 = NULL;
#endif
/* We always send an MSS option. */
@@ -645,7 +655,10 @@ static unsigned int tcp_synack_options(struct sock *sk,
remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
if (foc != NULL && foc->len >= 0) {
- u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
+ u32 need = foc->len;
+
+ need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
+ TCPOLEN_FASTOPEN_BASE;
need = (need + 3) & ~3U; /* Align to 32 bits */
if (remaining >= need) {
opts->options |= OPTION_FAST_OPEN_COOKIE;
@@ -989,7 +1002,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (md5) {
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
tp->af_specific->calc_md5_hash(opts.hash_location,
- md5, sk, NULL, skb);
+ md5, sk, skb);
}
#endif
@@ -1151,7 +1164,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
/* Get a new skb... force flag on. */
buff = sk_stream_alloc_skb(sk, nsize, gfp);
- if (buff == NULL)
+ if (!buff)
return -ENOMEM; /* We'll just try again later. */
sk->sk_wmem_queued += buff->truesize;
@@ -1354,6 +1367,8 @@ void tcp_mtup_init(struct sock *sk)
icsk->icsk_af_ops->net_header_len;
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
icsk->icsk_mtup.probe_size = 0;
+ if (icsk->icsk_mtup.enabled)
+ icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
}
EXPORT_SYMBOL(tcp_mtup_init);
@@ -1708,7 +1723,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
return tcp_fragment(sk, skb, len, mss_now, gfp);
buff = sk_stream_alloc_skb(sk, 0, gfp);
- if (unlikely(buff == NULL))
+ if (unlikely(!buff))
return -ENOMEM;
sk->sk_wmem_queued += buff->truesize;
@@ -1752,20 +1767,23 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
bool *is_cwnd_limited, u32 max_segs)
{
- struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
- u32 send_win, cong_win, limit, in_flight;
+ u32 age, send_win, cong_win, limit, in_flight;
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct skb_mstamp now;
+ struct sk_buff *head;
int win_divisor;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
- if (icsk->icsk_ca_state != TCP_CA_Open)
+ if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
goto send_now;
- /* Defer for less than two clock ticks. */
- if (tp->tso_deferred &&
- (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
+ /* Avoid bursty behavior by allowing defer
+ * only if the last write was recent.
+ */
+ if ((s32)(tcp_time_stamp - tp->lsndtime) > 0)
goto send_now;
in_flight = tcp_packets_in_flight(tp);
@@ -1807,11 +1825,14 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
goto send_now;
}
- /* Ok, it looks like it is advisable to defer.
- * Do not rearm the timer if already set to not break TCP ACK clocking.
- */
- if (!tp->tso_deferred)
- tp->tso_deferred = 1 | (jiffies << 1);
+ head = tcp_write_queue_head(sk);
+ skb_mstamp_get(&now);
+ age = skb_mstamp_us_delta(&now, &head->skb_mstamp);
+ /* If next ACK is likely to come too late (half srtt), do not defer */
+ if (age < (tp->srtt_us >> 4))
+ goto send_now;
+
+ /* Ok, it looks like it is advisable to defer. */
if (cong_win < send_win && cong_win < skb->len)
*is_cwnd_limited = true;
@@ -1819,10 +1840,34 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
return true;
send_now:
- tp->tso_deferred = 0;
return false;
}
+static inline void tcp_mtu_check_reprobe(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct net *net = sock_net(sk);
+ u32 interval;
+ s32 delta;
+
+ interval = net->ipv4.sysctl_tcp_probe_interval;
+ delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp;
+ if (unlikely(delta >= interval * HZ)) {
+ int mss = tcp_current_mss(sk);
+
+ /* Update current search range */
+ icsk->icsk_mtup.probe_size = 0;
+ icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
+ sizeof(struct tcphdr) +
+ icsk->icsk_af_ops->net_header_len;
+ icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
+
+ /* Update probe time stamp */
+ icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
+ }
+}
+
/* Create a new MTU probe if we are ready.
* MTU probe is regularly attempting to increase the path MTU by
* deliberately sending larger packets. This discovers routing
@@ -1837,11 +1882,13 @@ static int tcp_mtu_probe(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb, *nskb, *next;
+ struct net *net = sock_net(sk);
int len;
int probe_size;
int size_needed;
int copy;
int mss_now;
+ int interval;
/* Not currently probing/verifying,
* not in recovery,
@@ -1854,12 +1901,25 @@ static int tcp_mtu_probe(struct sock *sk)
tp->rx_opt.num_sacks || tp->rx_opt.dsack)
return -1;
- /* Very simple search strategy: just double the MSS. */
+ /* Use binary search for probe_size between tcp_mss_base,
+ * and current mss_clamp. if (search_high - search_low)
+ * smaller than a threshold, backoff from probing.
+ */
mss_now = tcp_current_mss(sk);
- probe_size = 2 * tp->mss_cache;
+ probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
+ icsk->icsk_mtup.search_low) >> 1);
size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
- if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
- /* TODO: set timer for probe_converge_event */
+ interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
+ /* When misfortune happens, we are reprobing actively,
+ * and then reprobe timer has expired. We stick with current
+ * probing process by not resetting search range to its orignal.
+ */
+ if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
+ interval < net->ipv4.sysctl_tcp_probe_threshold) {
+ /* Check whether enough time has elaplased for
+ * another round of probing.
+ */
+ tcp_mtu_check_reprobe(sk);
return -1;
}
@@ -1881,7 +1941,8 @@ static int tcp_mtu_probe(struct sock *sk)
}
/* We're allowed to probe. Build it now. */
- if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
+ nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC);
+ if (!nskb)
return -1;
sk->sk_wmem_queued += nskb->truesize;
sk_mem_charge(sk, nskb->truesize);
@@ -2179,7 +2240,7 @@ void tcp_send_loss_probe(struct sock *sk)
int mss = tcp_current_mss(sk);
int err = -1;
- if (tcp_send_head(sk) != NULL) {
+ if (tcp_send_head(sk)) {
err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
goto rearm_timer;
}
@@ -2689,7 +2750,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (skb == tcp_send_head(sk))
break;
/* we could do better than to assign each time */
- if (hole == NULL)
+ if (!hole)
tp->retransmit_skb_hint = skb;
/* Assume this retransmit will generate
@@ -2713,7 +2774,7 @@ begin_fwd:
if (!tcp_can_forward_retransmit(sk))
break;
/* Backtrack if necessary to non-L'ed skb */
- if (hole != NULL) {
+ if (hole) {
skb = hole;
hole = NULL;
}
@@ -2721,7 +2782,7 @@ begin_fwd:
goto begin_fwd;
} else if (!(sacked & TCPCB_LOST)) {
- if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
+ if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
hole = skb;
continue;
@@ -2751,39 +2812,65 @@ begin_fwd:
}
}
-/* Send a fin. The caller locks the socket for us. This cannot be
- * allowed to fail queueing a FIN frame under any circumstances.
+/* We allow to exceed memory limits for FIN packets to expedite
+ * connection tear down and (memory) recovery.
+ * Otherwise tcp_send_fin() could be tempted to either delay FIN
+ * or even be forced to close flow without any FIN.
+ */
+static void sk_forced_wmem_schedule(struct sock *sk, int size)
+{
+ int amt, status;
+
+ if (size <= sk->sk_forward_alloc)
+ return;
+ amt = sk_mem_pages(size);
+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+ sk_memory_allocated_add(sk, amt, &status);
+}
+
+/* Send a FIN. The caller locks the socket for us.
+ * We should try to send a FIN packet really hard, but eventually give up.
*/
void tcp_send_fin(struct sock *sk)
{
+ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb = tcp_write_queue_tail(sk);
- int mss_now;
- /* Optimization, tack on the FIN if we have a queue of
- * unsent frames. But be careful about outgoing SACKS
- * and IP options.
+ /* Optimization, tack on the FIN if we have one skb in write queue and
+ * this skb was not yet sent, or we are under memory pressure.
+ * Note: in the latter case, FIN packet will be sent after a timeout,
+ * as TCP stack thinks it has already been transmitted.
*/
- mss_now = tcp_current_mss(sk);
-
- if (tcp_send_head(sk) != NULL) {
- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
- TCP_SKB_CB(skb)->end_seq++;
+ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+coalesce:
+ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
+ TCP_SKB_CB(tskb)->end_seq++;
tp->write_seq++;
+ if (!tcp_send_head(sk)) {
+ /* This means tskb was already sent.
+ * Pretend we included the FIN on previous transmit.
+ * We need to set tp->snd_nxt to the value it would have
+ * if FIN had been sent. This is because retransmit path
+ * does not change tp->snd_nxt.
+ */
+ tp->snd_nxt++;
+ return;
+ }
} else {
- /* Socket is locked, keep trying until memory is available. */
- for (;;) {
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
- if (skb)
- break;
- yield();
+ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
+ if (unlikely(!skb)) {
+ if (tskb)
+ goto coalesce;
+ return;
}
+ skb_reserve(skb, MAX_TCP_HEADER);
+ sk_forced_wmem_schedule(sk, skb->truesize);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
}
/* We get here when a process closes a file descriptor (either due to
@@ -2824,14 +2911,14 @@ int tcp_send_synack(struct sock *sk)
struct sk_buff *skb;
skb = tcp_write_queue_head(sk);
- if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+ if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
pr_debug("%s: wrong queue state\n", __func__);
return -EFAULT;
}
if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
- if (nskb == NULL)
+ if (!nskb)
return -ENOMEM;
tcp_unlink_write_queue(skb, sk);
__skb_header_release(nskb);
@@ -2866,7 +2953,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct tcp_sock *tp = tcp_sk(sk);
struct tcphdr *th;
struct sk_buff *skb;
- struct tcp_md5sig_key *md5;
+ struct tcp_md5sig_key *md5 = NULL;
int tcp_header_size;
int mss;
@@ -2879,7 +2966,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
skb_reserve(skb, MAX_TCP_HEADER);
skb_dst_set(skb, dst);
- security_skb_owned_by(skb, sk);
mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
@@ -2892,7 +2978,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
else
#endif
skb_mstamp_get(&skb->skb_mstamp);
- tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
+
+#ifdef CONFIG_TCP_MD5SIG
+ rcu_read_lock();
+ md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
+#endif
+ tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
foc) + sizeof(*th);
skb_push(skb, tcp_header_size);
@@ -2923,12 +3014,14 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */
- if (md5) {
+ if (md5)
tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
- md5, NULL, req, skb);
- }
+ md5, req_to_sk(req), skb);
+ rcu_read_unlock();
#endif
+ /* Do not fool tcpdump (if any), clean our debris */
+ skb->tstamp.tv64 = 0;
return skb;
}
EXPORT_SYMBOL(tcp_make_synack);
@@ -2966,7 +3059,7 @@ static void tcp_connect_init(struct sock *sk)
(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
#ifdef CONFIG_TCP_MD5SIG
- if (tp->af_specific->md5_lookup(sk, sk) != NULL)
+ if (tp->af_specific->md5_lookup(sk, sk))
tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
@@ -3252,7 +3345,7 @@ void tcp_send_ack(struct sock *sk)
* sock.
*/
buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
- if (buff == NULL) {
+ if (!buff) {
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
@@ -3296,7 +3389,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
/* We don't queue it, tcp_transmit_skb() sets ownership. */
skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
- if (skb == NULL)
+ if (!skb)
return -1;
/* Reserve space for headers and set control bits. */
@@ -3327,8 +3420,8 @@ int tcp_write_wakeup(struct sock *sk)
if (sk->sk_state == TCP_CLOSE)
return -1;
- if ((skb = tcp_send_head(sk)) != NULL &&
- before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
+ skb = tcp_send_head(sk);
+ if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
int err;
unsigned int mss = tcp_current_mss(sk);
unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 0732b787904e..8c65dc147d8b 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -107,6 +107,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
if (net->ipv4.sysctl_tcp_mtu_probing) {
if (!icsk->icsk_mtup.enabled) {
icsk->icsk_mtup.enabled = 1;
+ icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else {
struct net *net = sock_net(sk);
@@ -166,7 +167,7 @@ static int tcp_write_timeout(struct sock *sk)
if (icsk->icsk_retransmits) {
dst_negative_advice(sk);
if (tp->syn_fastopen || tp->syn_data)
- tcp_fastopen_cache_set(sk, 0, NULL, true);
+ tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
if (tp->syn_data)
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
@@ -326,7 +327,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
struct request_sock *req;
req = tcp_sk(sk)->fastopen_rsk;
- req->rsk_ops->syn_ack_timeout(sk, req);
+ req->rsk_ops->syn_ack_timeout(req);
if (req->num_timeout >= max_retries) {
tcp_write_err(sk);
@@ -538,19 +539,11 @@ static void tcp_write_timer(unsigned long data)
sock_put(sk);
}
-/*
- * Timer for listening sockets
- */
-
-static void tcp_synack_timer(struct sock *sk)
+void tcp_syn_ack_timeout(const struct request_sock *req)
{
- inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
- TCP_TIMEOUT_INIT, TCP_RTO_MAX);
-}
+ struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
-void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
-{
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
}
EXPORT_SYMBOL(tcp_syn_ack_timeout);
@@ -582,7 +575,7 @@ static void tcp_keepalive_timer (unsigned long data)
}
if (sk->sk_state == TCP_LISTEN) {
- tcp_synack_timer(sk);
+ pr_err("Hmm... keepalive on a LISTEN ???\n");
goto out;
}
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a6afde666ab1..a6cea1d5e20d 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -286,19 +286,21 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
}
/* Extract info for Tcp socket info provided via netlink. */
-void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
{
const struct vegas *ca = inet_csk_ca(sk);
+
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
- struct tcpvegas_info info = {
- .tcpv_enabled = ca->doing_vegas_now,
- .tcpv_rttcnt = ca->cntRTT,
- .tcpv_rtt = ca->baseRTT,
- .tcpv_minrtt = ca->minRTT,
- };
-
- nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+ info->vegas.tcpv_enabled = ca->doing_vegas_now,
+ info->vegas.tcpv_rttcnt = ca->cntRTT,
+ info->vegas.tcpv_rtt = ca->baseRTT,
+ info->vegas.tcpv_minrtt = ca->minRTT,
+
+ *attr = INET_DIAG_VEGASINFO;
+ return sizeof(struct tcpvegas_info);
}
+ return 0;
}
EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index 0531b99d8637..ef9da5306c68 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -19,6 +19,7 @@ void tcp_vegas_init(struct sock *sk);
void tcp_vegas_state(struct sock *sk, u8 ca_state);
void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
-void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
+size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info);
#endif /* __TCP_VEGAS_H */
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index bb63fba47d47..c10732e39837 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -256,20 +256,21 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
}
/* Extract info for Tcp socket info provided via netlink. */
-static void tcp_westwood_info(struct sock *sk, u32 ext,
- struct sk_buff *skb)
+static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
{
const struct westwood *ca = inet_csk_ca(sk);
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
- struct tcpvegas_info info = {
- .tcpv_enabled = 1,
- .tcpv_rtt = jiffies_to_usecs(ca->rtt),
- .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
- };
+ info->vegas.tcpv_enabled = 1;
+ info->vegas.tcpv_rttcnt = 0;
+ info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt),
+ info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
- nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+ *attr = INET_DIAG_VEGASINFO;
+ return sizeof(struct tcpvegas_info);
}
+ return 0;
}
static struct tcp_congestion_ops tcp_westwood __read_mostly = {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 97ef1f8b7be8..d10b7e0112eb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -318,8 +318,8 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
}
-static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
- unsigned int port)
+static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
+ unsigned int port)
{
return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
}
@@ -421,9 +421,9 @@ static inline int compute_score2(struct sock *sk, struct net *net,
return score;
}
-static unsigned int udp_ehashfn(struct net *net, const __be32 laddr,
- const __u16 lport, const __be32 faddr,
- const __be16 fport)
+static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
+ const __u16 lport, const __be32 faddr,
+ const __be16 fport)
{
static u32 udp_ehash_secret __read_mostly;
@@ -433,7 +433,6 @@ static unsigned int udp_ehashfn(struct net *net, const __be32 laddr,
udp_ehash_secret + net_hash_mix(net));
}
-
/* called with read_rcu_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
@@ -633,7 +632,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
iph->saddr, uh->source, skb->dev->ifindex, udptable);
- if (sk == NULL) {
+ if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return; /* No socket for error */
}
@@ -873,8 +872,7 @@ out:
}
EXPORT_SYMBOL(udp_push_pending_frames);
-int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len)
+int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
@@ -1012,7 +1010,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
- if (rt == NULL) {
+ if (!rt) {
struct net *net = sock_net(sk);
fl4 = &fl4_stack;
@@ -1136,7 +1134,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
* sendpage interface can't pass.
* This will succeed only when the socket is connected.
*/
- ret = udp_sendmsg(NULL, sk, &msg, 0);
+ ret = udp_sendmsg(sk, &msg, 0);
if (ret < 0)
return ret;
}
@@ -1172,7 +1170,6 @@ out:
return ret;
}
-
/**
* first_packet_length - return length of first packet in receive queue
* @sk: socket
@@ -1254,8 +1251,8 @@ EXPORT_SYMBOL(udp_ioctl);
* return it, otherwise we block.
*/
-int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len)
+int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
@@ -1356,7 +1353,6 @@ csum_copy_err:
goto try_again;
}
-
int udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
@@ -1523,7 +1519,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
- if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
+ if (skb->len > sizeof(struct udphdr) && encap_rcv) {
int ret;
/* Verify checksum before giving to encap */
@@ -1580,7 +1576,6 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
udp_lib_checksum_complete(skb))
goto csum_error;
-
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite);
@@ -1610,7 +1605,6 @@ drop:
return -1;
}
-
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
@@ -1620,7 +1614,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
for (i = 0; i < count; i++) {
sk = stack[i];
- if (likely(skb1 == NULL))
+ if (likely(!skb1))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
@@ -1803,7 +1797,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
saddr, daddr, udptable, proto);
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
- if (sk != NULL) {
+ if (sk) {
int ret;
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
@@ -2525,6 +2519,16 @@ void __init udp_table_init(struct udp_table *table, const char *name)
}
}
+u32 udp_flow_hashrnd(void)
+{
+ static u32 hashrnd __read_mostly;
+
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+
+ return hashrnd;
+}
+EXPORT_SYMBOL(udp_flow_hashrnd);
+
void __init udp_init(void)
{
unsigned long limit;
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 4a000f1dd757..b763c39ae1d7 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -18,8 +18,9 @@
#include <linux/sock_diag.h>
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
- struct netlink_callback *cb, struct inet_diag_req_v2 *req,
- struct nlattr *bc)
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *req,
+ struct nlattr *bc)
{
if (!inet_diag_bc_sk(bc, sk))
return 0;
@@ -31,7 +32,8 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
}
static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
- const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
+ const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req)
{
int err = -EINVAL;
struct sock *sk;
@@ -56,7 +58,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
goto out_nosk;
err = -ENOENT;
- if (sk == NULL)
+ if (!sk)
goto out_nosk;
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
@@ -90,8 +92,9 @@ out_nosk:
return err;
}
-static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
- struct inet_diag_req_v2 *r, struct nlattr *bc)
+static void udp_dump(struct udp_table *table, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r, struct nlattr *bc)
{
int num, s_num, slot, s_slot;
struct net *net = sock_net(skb->sk);
@@ -144,13 +147,13 @@ done:
}
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r, struct nlattr *bc)
{
udp_dump(&udp_table, skb, cb, r, bc);
}
static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req)
+ const struct inet_diag_req_v2 *req)
{
return udp_dump_one(&udp_table, in_skb, nlh, req);
}
@@ -170,13 +173,14 @@ static const struct inet_diag_handler udp_diag_handler = {
};
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc)
{
udp_dump(&udplite_table, skb, cb, r, bc);
}
static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req)
+ const struct inet_diag_req_v2 *req)
{
return udp_dump_one(&udplite_table, in_skb, nlh, req);
}
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index f3c27899f62b..7e0fe4bdd967 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -21,8 +21,8 @@ int compat_udp_setsockopt(struct sock *sk, int level, int optname,
int compat_udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
#endif
-int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len);
+int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len);
int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 4915d8284a86..f9386160cbee 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -285,7 +285,7 @@ void udp_del_offload(struct udp_offload *uo)
pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
unlock:
spin_unlock(&udp_offload_lock);
- if (uo_priv != NULL)
+ if (uo_priv)
call_rcu(&uo_priv->rcu, udp_offload_free_routine);
}
EXPORT_SYMBOL(udp_del_offload);
@@ -394,7 +394,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
break;
}
- if (uo_priv != NULL) {
+ if (uo_priv) {
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
err = uo_priv->offload->callbacks.gro_complete(skb,
nhoff + sizeof(struct udphdr),
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index c83b35485056..6bb98cc193c9 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -75,7 +75,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
}
EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
-int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
__be16 df, __be16 src_port, __be16 dst_port,
bool xnet, bool nocheck)
@@ -92,7 +92,7 @@ int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
udp_set_csum(nocheck, skb, src, dst, skb->len);
- return iptunnel_xmit(skb->sk, rt, skb, src, dst, IPPROTO_UDP,
+ return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP,
tos, ttl, df, xnet);
}
EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index aac6197b7a71..60b032f58ccc 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -22,9 +22,9 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
return xfrm4_extract_header(skb);
}
-static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
+static inline int xfrm4_rcv_encap_finish(struct sock *sk, struct sk_buff *skb)
{
- if (skb_dst(skb) == NULL) {
+ if (!skb_dst(skb)) {
const struct iphdr *iph = ip_hdr(skb);
if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
@@ -52,7 +52,8 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
iph->tot_len = htons(skb->len);
ip_send_check(iph);
- NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb,
+ skb->dev, NULL,
xfrm4_rcv_encap_finish);
return 0;
}
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 91771a7c802f..35feda676464 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -63,7 +63,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
- ip_select_ident(skb, NULL);
+ ip_select_ident(dev_net(dst->dev), skb, NULL);
return 0;
}
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index dab73813cb92..2878dbfffeb7 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
}
EXPORT_SYMBOL(xfrm4_prepare_output);
-int xfrm4_output_finish(struct sk_buff *skb)
+int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
{
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
@@ -77,26 +77,26 @@ int xfrm4_output_finish(struct sk_buff *skb)
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
#endif
- return xfrm_output(skb);
+ return xfrm_output(sk, skb);
}
-static int __xfrm4_output(struct sk_buff *skb)
+static int __xfrm4_output(struct sock *sk, struct sk_buff *skb)
{
struct xfrm_state *x = skb_dst(skb)->xfrm;
#ifdef CONFIG_NETFILTER
if (!x) {
IPCB(skb)->flags |= IPSKB_REROUTED;
- return dst_output(skb);
+ return dst_output_sk(sk, skb);
}
#endif
- return x->outer_mode->afinfo->output_finish(skb);
+ return x->outer_mode->afinfo->output_finish(sk, skb);
}
int xfrm4_output(struct sock *sk, struct sk_buff *skb)
{
- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
NULL, skb_dst(skb)->dev, __xfrm4_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 6156f68a1e90..bff69746e05f 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -232,7 +232,6 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static struct dst_ops xfrm4_dst_ops = {
.family = AF_INET,
- .protocol = cpu_to_be16(ETH_P_IP),
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
.redirect = xfrm4_redirect,
@@ -299,7 +298,7 @@ static void __net_exit xfrm4_net_exit(struct net *net)
{
struct ctl_table *table;
- if (net->ipv4.xfrm4_hdr == NULL)
+ if (!net->ipv4.xfrm4_hdr)
return;
table = net->ipv4.xfrm4_hdr->ctl_table_arg;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b6030025f411..37b70e82bff8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -46,6 +46,7 @@
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
+#include <linux/inet.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_addr.h>
@@ -102,6 +103,9 @@
#define INFINITY_LIFE_TIME 0xFFFFFFFF
+#define IPV6_MAX_STRLEN \
+ sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
+
static inline u32 cstamp_delta(unsigned long cstamp)
{
return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
@@ -127,6 +131,9 @@ static void ipv6_regen_rndid(unsigned long data);
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
static int ipv6_count_addresses(struct inet6_dev *idev);
+static int ipv6_generate_stable_address(struct in6_addr *addr,
+ u8 dad_count,
+ const struct inet6_dev *idev);
/*
* Configured unicast address hash table
@@ -202,6 +209,9 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_dad = 1,
.suppress_frag_ndisc = 1,
.accept_ra_mtu = 1,
+ .stable_secret = {
+ .initialized = false,
+ }
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -240,6 +250,9 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.accept_dad = 1,
.suppress_frag_ndisc = 1,
.accept_ra_mtu = 1,
+ .stable_secret = {
+ .initialized = false,
+ },
};
/* Check if a valid qdisc is available */
@@ -321,7 +334,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
return ERR_PTR(-EINVAL);
ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
- if (ndev == NULL)
+ if (!ndev)
return ERR_PTR(err);
rwlock_init(&ndev->lock);
@@ -333,7 +346,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
ndev->cnf.mtu6 = dev->mtu;
ndev->cnf.sysctl = NULL;
ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
- if (ndev->nd_parms == NULL) {
+ if (!ndev->nd_parms) {
kfree(ndev);
return ERR_PTR(err);
}
@@ -468,7 +481,7 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
ncm = nlmsg_data(nlh);
@@ -506,7 +519,7 @@ void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
int err = -ENOBUFS;
skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
@@ -561,10 +574,10 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
break;
default:
dev = __dev_get_by_index(net, ifindex);
- if (dev == NULL)
+ if (!dev)
goto errout;
in6_dev = __in6_dev_get(dev);
- if (in6_dev == NULL)
+ if (!in6_dev)
goto errout;
devconf = &in6_dev->cnf;
break;
@@ -572,7 +585,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
err = -ENOBUFS;
skb = nlmsg_new(inet6_netconf_msgsize_devconf(-1), GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
@@ -841,7 +854,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
- if (ifa == NULL) {
+ if (!ifa) {
ADBG("ipv6_add_addr: malloc failed\n");
err = -ENOBUFS;
goto out;
@@ -860,7 +873,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
ifa->peer_addr = *peer_addr;
spin_lock_init(&ifa->lock);
- spin_lock_init(&ifa->state_lock);
INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
INIT_HLIST_NODE(&ifa->addr_lst);
ifa->scope = scope;
@@ -1003,10 +1015,10 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
ASSERT_RTNL();
- spin_lock_bh(&ifp->state_lock);
+ spin_lock_bh(&ifp->lock);
state = ifp->state;
ifp->state = INET6_IFADDR_STATE_DEAD;
- spin_unlock_bh(&ifp->state_lock);
+ spin_unlock_bh(&ifp->lock);
if (state == INET6_IFADDR_STATE_DEAD)
goto out;
@@ -1546,7 +1558,7 @@ int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
: ifp->flags;
if (ipv6_addr_equal(&ifp->addr, addr) &&
!(ifp_flags&banned_flags) &&
- (dev == NULL || ifp->idev->dev == dev ||
+ (!dev || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
rcu_read_unlock_bh();
return 1;
@@ -1568,7 +1580,7 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
- if (dev == NULL || ifp->idev->dev == dev)
+ if (!dev || ifp->idev->dev == dev)
return true;
}
}
@@ -1637,7 +1649,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
- if (dev == NULL || ifp->idev->dev == dev ||
+ if (!dev || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
result = ifp;
in6_ifa_hold(ifp);
@@ -1686,19 +1698,21 @@ static int addrconf_dad_end(struct inet6_ifaddr *ifp)
{
int err = -ENOENT;
- spin_lock_bh(&ifp->state_lock);
+ spin_lock_bh(&ifp->lock);
if (ifp->state == INET6_IFADDR_STATE_DAD) {
ifp->state = INET6_IFADDR_STATE_POSTDAD;
err = 0;
}
- spin_unlock_bh(&ifp->state_lock);
+ spin_unlock_bh(&ifp->lock);
return err;
}
void addrconf_dad_failure(struct inet6_ifaddr *ifp)
{
+ struct in6_addr addr;
struct inet6_dev *idev = ifp->idev;
+ struct net *net = dev_net(ifp->idev->dev);
if (addrconf_dad_end(ifp)) {
in6_ifa_put(ifp);
@@ -1708,9 +1722,57 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
ifp->idev->dev->name, &ifp->addr);
- if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
- struct in6_addr addr;
+ spin_lock_bh(&ifp->lock);
+
+ if (ifp->flags & IFA_F_STABLE_PRIVACY) {
+ int scope = ifp->scope;
+ u32 flags = ifp->flags;
+ struct in6_addr new_addr;
+ struct inet6_ifaddr *ifp2;
+ u32 valid_lft, preferred_lft;
+ int pfxlen = ifp->prefix_len;
+ int retries = ifp->stable_privacy_retry + 1;
+
+ if (retries > net->ipv6.sysctl.idgen_retries) {
+ net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
+ ifp->idev->dev->name);
+ goto errdad;
+ }
+
+ new_addr = ifp->addr;
+ if (ipv6_generate_stable_address(&new_addr, retries,
+ idev))
+ goto errdad;
+
+ valid_lft = ifp->valid_lft;
+ preferred_lft = ifp->prefered_lft;
+
+ spin_unlock_bh(&ifp->lock);
+
+ if (idev->cnf.max_addresses &&
+ ipv6_count_addresses(idev) >=
+ idev->cnf.max_addresses)
+ goto lock_errdad;
+
+ net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
+ ifp->idev->dev->name);
+
+ ifp2 = ipv6_add_addr(idev, &new_addr, NULL, pfxlen,
+ scope, flags, valid_lft,
+ preferred_lft);
+ if (IS_ERR(ifp2))
+ goto lock_errdad;
+
+ spin_lock_bh(&ifp2->lock);
+ ifp2->stable_privacy_retry = retries;
+ ifp2->state = INET6_IFADDR_STATE_PREDAD;
+ spin_unlock_bh(&ifp2->lock);
+ addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
+ in6_ifa_put(ifp2);
+lock_errdad:
+ spin_lock_bh(&ifp->lock);
+ } else if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
addr.s6_addr32[0] = htonl(0xfe800000);
addr.s6_addr32[1] = 0;
@@ -1724,10 +1786,10 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
}
}
- spin_lock_bh(&ifp->state_lock);
+errdad:
/* transition from _POSTDAD to _ERRDAD */
ifp->state = INET6_IFADDR_STATE_ERRDAD;
- spin_unlock_bh(&ifp->state_lock);
+ spin_unlock_bh(&ifp->lock);
addrconf_mod_dad_work(ifp, 0);
}
@@ -2052,7 +2114,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
struct fib6_table *table;
table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
- if (table == NULL)
+ if (!table)
return NULL;
read_lock_bh(&table->tb6_lock);
@@ -2186,6 +2248,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
__u32 valid_lft;
__u32 prefered_lft;
int addr_type;
+ u32 addr_flags = 0;
struct inet6_dev *in6_dev;
struct net *net = dev_net(dev);
@@ -2215,7 +2278,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
in6_dev = in6_dev_get(dev);
- if (in6_dev == NULL) {
+ if (!in6_dev) {
net_dbg_ratelimited("addrconf: device %s not configured\n",
dev->name);
return;
@@ -2292,6 +2355,12 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
in6_dev->token.s6_addr + 8, 8);
read_unlock_bh(&in6_dev->lock);
tokenized = true;
+ } else if (in6_dev->addr_gen_mode ==
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+ !ipv6_generate_stable_address(&addr, 0,
+ in6_dev)) {
+ addr_flags |= IFA_F_STABLE_PRIVACY;
+ goto ok;
} else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
in6_dev_put(in6_dev);
@@ -2308,9 +2377,8 @@ ok:
ifp = ipv6_get_ifaddr(net, &addr, dev, 1);
- if (ifp == NULL && valid_lft) {
+ if (!ifp && valid_lft) {
int max_addresses = in6_dev->cnf.max_addresses;
- u32 addr_flags = 0;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (in6_dev->cnf.optimistic_dad &&
@@ -2350,7 +2418,7 @@ ok:
u32 stored_lft;
/* update lifetime (RFC2462 5.5.3 e) */
- spin_lock(&ifp->lock);
+ spin_lock_bh(&ifp->lock);
now = jiffies;
if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
@@ -2380,12 +2448,12 @@ ok:
ifp->tstamp = now;
flags = ifp->flags;
ifp->flags &= ~IFA_F_DEPRECATED;
- spin_unlock(&ifp->lock);
+ spin_unlock_bh(&ifp->lock);
if (!(flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ifp);
} else
- spin_unlock(&ifp->lock);
+ spin_unlock_bh(&ifp->lock);
manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
create, now);
@@ -2418,7 +2486,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
err = -ENODEV;
- if (dev == NULL)
+ if (!dev)
goto err_exit;
#if IS_ENABLED(CONFIG_IPV6_SIT)
@@ -2464,6 +2532,23 @@ err_exit:
return err;
}
+static int ipv6_mc_config(struct sock *sk, bool join,
+ const struct in6_addr *addr, int ifindex)
+{
+ int ret;
+
+ ASSERT_RTNL();
+
+ lock_sock(sk);
+ if (join)
+ ret = ipv6_sock_mc_join(sk, ifindex, addr);
+ else
+ ret = ipv6_sock_mc_drop(sk, ifindex, addr);
+ release_sock(sk);
+
+ return ret;
+}
+
/*
* Manual configuration of address on an interface
*/
@@ -2476,10 +2561,10 @@ static int inet6_addr_add(struct net *net, int ifindex,
struct inet6_ifaddr *ifp;
struct inet6_dev *idev;
struct net_device *dev;
+ unsigned long timeout;
+ clock_t expires;
int scope;
u32 flags;
- clock_t expires;
- unsigned long timeout;
ASSERT_RTNL();
@@ -2501,6 +2586,14 @@ static int inet6_addr_add(struct net *net, int ifindex,
if (IS_ERR(idev))
return PTR_ERR(idev);
+ if (ifa_flags & IFA_F_MCAUTOJOIN) {
+ int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
+ true, pfx, ifindex);
+
+ if (ret < 0)
+ return ret;
+ }
+
scope = ipv6_addr_scope(pfx);
timeout = addrconf_timeout_fixup(valid_lft, HZ);
@@ -2542,6 +2635,9 @@ static int inet6_addr_add(struct net *net, int ifindex,
in6_ifa_put(ifp);
addrconf_verify_rtnl();
return 0;
+ } else if (ifa_flags & IFA_F_MCAUTOJOIN) {
+ ipv6_mc_config(net->ipv6.mc_autojoin_sk,
+ false, pfx, ifindex);
}
return PTR_ERR(ifp);
@@ -2562,7 +2658,7 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
return -ENODEV;
idev = __in6_dev_get(dev);
- if (idev == NULL)
+ if (!idev)
return -ENXIO;
read_lock_bh(&idev->lock);
@@ -2578,6 +2674,10 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
jiffies);
ipv6_del_addr(ifp);
addrconf_verify_rtnl();
+ if (ipv6_addr_is_multicast(pfx)) {
+ ipv6_mc_config(net->ipv6.mc_autojoin_sk,
+ false, pfx, dev->ifindex);
+ }
return 0;
}
}
@@ -2710,7 +2810,7 @@ static void init_loopback(struct net_device *dev)
ASSERT_RTNL();
idev = ipv6_find_idev(dev);
- if (idev == NULL) {
+ if (!idev) {
pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2757,10 +2857,11 @@ static void init_loopback(struct net_device *dev)
}
}
-static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
+static void addrconf_add_linklocal(struct inet6_dev *idev,
+ const struct in6_addr *addr, u32 flags)
{
struct inet6_ifaddr *ifp;
- u32 addr_flags = IFA_F_PERMANENT;
+ u32 addr_flags = flags | IFA_F_PERMANENT;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (idev->cnf.optimistic_dad &&
@@ -2768,7 +2869,6 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
addr_flags |= IFA_F_OPTIMISTIC;
#endif
-
ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
if (!IS_ERR(ifp)) {
@@ -2778,18 +2878,103 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
}
}
+static bool ipv6_reserved_interfaceid(struct in6_addr address)
+{
+ if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
+ return true;
+
+ if (address.s6_addr32[2] == htonl(0x02005eff) &&
+ ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
+ return true;
+
+ if (address.s6_addr32[2] == htonl(0xfdffffff) &&
+ ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
+ return true;
+
+ return false;
+}
+
+static int ipv6_generate_stable_address(struct in6_addr *address,
+ u8 dad_count,
+ const struct inet6_dev *idev)
+{
+ static DEFINE_SPINLOCK(lock);
+ static __u32 digest[SHA_DIGEST_WORDS];
+ static __u32 workspace[SHA_WORKSPACE_WORDS];
+
+ static union {
+ char __data[SHA_MESSAGE_BYTES];
+ struct {
+ struct in6_addr secret;
+ __be32 prefix[2];
+ unsigned char hwaddr[MAX_ADDR_LEN];
+ u8 dad_count;
+ } __packed;
+ } data;
+
+ struct in6_addr secret;
+ struct in6_addr temp;
+ struct net *net = dev_net(idev->dev);
+
+ BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
+
+ if (idev->cnf.stable_secret.initialized)
+ secret = idev->cnf.stable_secret.secret;
+ else if (net->ipv6.devconf_dflt->stable_secret.initialized)
+ secret = net->ipv6.devconf_dflt->stable_secret.secret;
+ else
+ return -1;
+
+retry:
+ spin_lock_bh(&lock);
+
+ sha_init(digest);
+ memset(&data, 0, sizeof(data));
+ memset(workspace, 0, sizeof(workspace));
+ memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
+ data.prefix[0] = address->s6_addr32[0];
+ data.prefix[1] = address->s6_addr32[1];
+ data.secret = secret;
+ data.dad_count = dad_count;
+
+ sha_transform(digest, data.__data, workspace);
+
+ temp = *address;
+ temp.s6_addr32[2] = (__force __be32)digest[0];
+ temp.s6_addr32[3] = (__force __be32)digest[1];
+
+ spin_unlock_bh(&lock);
+
+ if (ipv6_reserved_interfaceid(temp)) {
+ dad_count++;
+ if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
+ return -1;
+ goto retry;
+ }
+
+ *address = temp;
+ return 0;
+}
+
static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
{
- if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) {
- struct in6_addr addr;
+ struct in6_addr addr;
+
+ ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
- ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
+ if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY) {
+ if (!ipv6_generate_stable_address(&addr, 0, idev))
+ addrconf_add_linklocal(idev, &addr,
+ IFA_F_STABLE_PRIVACY);
+ else if (prefix_route)
+ addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
+ } else if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) {
/* addrconf_add_linklocal also adds a prefix_route and we
* only need to care about prefix routes if ipv6_generate_eui64
* couldn't generate one.
*/
if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
- addrconf_add_linklocal(idev, &addr);
+ addrconf_add_linklocal(idev, &addr, 0);
else if (prefix_route)
addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
}
@@ -2834,7 +3019,7 @@ static void addrconf_sit_config(struct net_device *dev)
*/
idev = ipv6_find_idev(dev);
- if (idev == NULL) {
+ if (!idev) {
pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2859,7 +3044,7 @@ static void addrconf_gre_config(struct net_device *dev)
ASSERT_RTNL();
idev = ipv6_find_idev(dev);
- if (idev == NULL) {
+ if (!idev) {
pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -3056,7 +3241,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
neigh_ifdown(&nd_tbl, dev);
idev = __in6_dev_get(dev);
- if (idev == NULL)
+ if (!idev)
return -ENODEV;
/*
@@ -3127,10 +3312,10 @@ restart:
write_unlock_bh(&idev->lock);
- spin_lock_bh(&ifa->state_lock);
+ spin_lock_bh(&ifa->lock);
state = ifa->state;
ifa->state = INET6_IFADDR_STATE_DEAD;
- spin_unlock_bh(&ifa->state_lock);
+ spin_unlock_bh(&ifa->lock);
if (state != INET6_IFADDR_STATE_DEAD) {
__ipv6_ifa_notify(RTM_DELADDR, ifa);
@@ -3288,12 +3473,12 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp)
{
bool begin_dad = false;
- spin_lock_bh(&ifp->state_lock);
+ spin_lock_bh(&ifp->lock);
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
ifp->state = INET6_IFADDR_STATE_PREDAD;
begin_dad = true;
}
- spin_unlock_bh(&ifp->state_lock);
+ spin_unlock_bh(&ifp->lock);
if (begin_dad)
addrconf_mod_dad_work(ifp, 0);
@@ -3315,7 +3500,7 @@ static void addrconf_dad_work(struct work_struct *w)
rtnl_lock();
- spin_lock_bh(&ifp->state_lock);
+ spin_lock_bh(&ifp->lock);
if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
action = DAD_BEGIN;
ifp->state = INET6_IFADDR_STATE_DAD;
@@ -3323,7 +3508,7 @@ static void addrconf_dad_work(struct work_struct *w)
action = DAD_ABORT;
ifp->state = INET6_IFADDR_STATE_POSTDAD;
}
- spin_unlock_bh(&ifp->state_lock);
+ spin_unlock_bh(&ifp->lock);
if (action == DAD_BEGIN) {
addrconf_dad_begin(ifp);
@@ -3811,7 +3996,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
ifm = nlmsg_data(nlh);
pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
- if (pfx == NULL)
+ if (!pfx)
return -EINVAL;
ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
@@ -3923,7 +4108,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
ifm = nlmsg_data(nlh);
pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
- if (pfx == NULL)
+ if (!pfx)
return -EINVAL;
if (tb[IFA_CACHEINFO]) {
@@ -3938,17 +4123,17 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
}
dev = __dev_get_by_index(net, ifm->ifa_index);
- if (dev == NULL)
+ if (!dev)
return -ENODEV;
ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
/* We ignore other flags so far. */
ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
- IFA_F_NOPREFIXROUTE;
+ IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN;
ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
- if (ifa == NULL) {
+ if (!ifa) {
/*
* It would be best to check for !NLM_F_CREATE here but
* userspace already relies on not having to provide this.
@@ -4023,7 +4208,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
u32 preferred, valid;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
@@ -4052,11 +4237,11 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
}
if (!ipv6_addr_any(&ifa->peer_addr)) {
- if (nla_put(skb, IFA_LOCAL, 16, &ifa->addr) < 0 ||
- nla_put(skb, IFA_ADDRESS, 16, &ifa->peer_addr) < 0)
+ if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
+ nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
goto error;
} else
- if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0)
+ if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
goto error;
if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
@@ -4084,11 +4269,11 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
scope = RT_SCOPE_SITE;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
- if (nla_put(skb, IFA_MULTICAST, 16, &ifmca->mca_addr) < 0 ||
+ if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
@@ -4110,11 +4295,11 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
scope = RT_SCOPE_SITE;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
- if (nla_put(skb, IFA_ANYCAST, 16, &ifaca->aca_addr) < 0 ||
+ if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
@@ -4283,7 +4468,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
goto errout;
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
- if (addr == NULL) {
+ if (!addr) {
err = -EINVAL;
goto errout;
}
@@ -4326,7 +4511,7 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
int err = -ENOBUFS;
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
@@ -4398,6 +4583,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
+ /* we omit DEVCONF_STABLE_SECRET for now */
}
static inline size_t inet6_ifla6_size(void)
@@ -4478,24 +4664,24 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
- if (nla == NULL)
+ if (!nla)
goto nla_put_failure;
ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
/* XXX - MC not implemented */
nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
- if (nla == NULL)
+ if (!nla)
goto nla_put_failure;
snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
- if (nla == NULL)
+ if (!nla)
goto nla_put_failure;
snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
- if (nla == NULL)
+ if (!nla)
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode))
@@ -4541,7 +4727,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
ASSERT_RTNL();
- if (token == NULL)
+ if (!token)
return -EINVAL;
if (ipv6_addr_any(token))
return -EINVAL;
@@ -4632,8 +4818,15 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
- mode != IN6_ADDR_GEN_MODE_NONE)
+ mode != IN6_ADDR_GEN_MODE_NONE &&
+ mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY)
return -EINVAL;
+
+ if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+ !idev->cnf.stable_secret.initialized &&
+ !dev_net(dev)->ipv6.devconf_dflt->stable_secret.initialized)
+ return -EINVAL;
+
idev->addr_gen_mode = mode;
err = 0;
}
@@ -4650,7 +4843,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
void *protoinfo;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
hdr = nlmsg_data(nlh);
@@ -4665,11 +4858,11 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
- (dev->ifindex != dev->iflink &&
- nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+ (dev->ifindex != dev_get_iflink(dev) &&
+ nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
goto nla_put_failure;
protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
- if (protoinfo == NULL)
+ if (!protoinfo)
goto nla_put_failure;
if (inet6_fill_ifla6_attrs(skb, idev) < 0)
@@ -4730,7 +4923,7 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
int err = -ENOBUFS;
skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
@@ -4763,7 +4956,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
struct prefix_cacheinfo ci;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
pmsg = nlmsg_data(nlh);
@@ -4802,7 +4995,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
int err = -ENOBUFS;
skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
@@ -5042,6 +5235,74 @@ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
return ret;
}
+static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int err;
+ struct in6_addr addr;
+ char str[IPV6_MAX_STRLEN];
+ struct ctl_table lctl = *ctl;
+ struct net *net = ctl->extra2;
+ struct ipv6_stable_secret *secret = ctl->data;
+
+ if (&net->ipv6.devconf_all->stable_secret == ctl->data)
+ return -EIO;
+
+ lctl.maxlen = IPV6_MAX_STRLEN;
+ lctl.data = str;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (!write && !secret->initialized) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (!write) {
+ err = snprintf(str, sizeof(str), "%pI6",
+ &secret->secret);
+ if (err >= sizeof(str)) {
+ err = -EIO;
+ goto out;
+ }
+ }
+
+ err = proc_dostring(&lctl, write, buffer, lenp, ppos);
+ if (err || !write)
+ goto out;
+
+ if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
+ err = -EIO;
+ goto out;
+ }
+
+ secret->initialized = true;
+ secret->secret = addr;
+
+ if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
+ struct net_device *dev;
+
+ for_each_netdev(net, dev) {
+ struct inet6_dev *idev = __in6_dev_get(dev);
+
+ if (idev) {
+ idev->addr_gen_mode =
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ }
+ }
+ } else {
+ struct inet6_dev *idev = ctl->extra1;
+
+ idev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ }
+
+out:
+ rtnl_unlock();
+
+ return err;
+}
static struct addrconf_sysctl_table
{
@@ -5315,6 +5576,13 @@ static struct addrconf_sysctl_table
.proc_handler = proc_dointvec,
},
{
+ .procname = "stable_secret",
+ .data = &ipv6_devconf.stable_secret,
+ .maxlen = IPV6_MAX_STRLEN,
+ .mode = 0600,
+ .proc_handler = addrconf_sysctl_stable_secret,
+ },
+ {
/* sentinel */
}
},
@@ -5328,7 +5596,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
- if (t == NULL)
+ if (!t)
goto out;
for (i = 0; t->addrconf_vars[i].data; i++) {
@@ -5340,7 +5608,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars);
- if (t->sysctl_header == NULL)
+ if (!t->sysctl_header)
goto free;
p->sysctl = t;
@@ -5356,7 +5624,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
{
struct addrconf_sysctl_table *t;
- if (p->sysctl == NULL)
+ if (!p->sysctl)
return;
t = p->sysctl;
@@ -5399,17 +5667,20 @@ static int __net_init addrconf_init_net(struct net *net)
struct ipv6_devconf *all, *dflt;
all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
- if (all == NULL)
+ if (!all)
goto err_alloc_all;
dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
- if (dflt == NULL)
+ if (!dflt)
goto err_alloc_dflt;
/* these will be inherited by all namespaces */
dflt->autoconf = ipv6_defaults.autoconf;
dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
+ dflt->stable_secret.initialized = false;
+ all->stable_secret.initialized = false;
+
net->ipv6.devconf_all = all;
net->ipv6.devconf_dflt = dflt;
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 98cc4cd570e2..d873ceea86e6 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -140,7 +140,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
struct net_device *dev = idev->dev;
WARN_ON(!list_empty(&idev->addr_list));
- WARN_ON(idev->mc_list != NULL);
+ WARN_ON(idev->mc_list);
WARN_ON(timer_pending(&idev->rs_timer));
#ifdef NET_REFCNT_DEBUG
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index e43e79d0a612..882124ebb438 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -29,9 +29,7 @@
* Policy Table
*/
struct ip6addrlbl_entry {
-#ifdef CONFIG_NET_NS
- struct net *lbl_net;
-#endif
+ possible_net_t lbl_net;
struct in6_addr prefix;
int prefixlen;
int ifindex;
@@ -129,9 +127,6 @@ static const __net_initconst struct ip6addrlbl_init_table
/* Object management */
static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p)
{
-#ifdef CONFIG_NET_NS
- release_net(p->lbl_net);
-#endif
kfree(p);
}
@@ -240,9 +235,7 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
newp->addrtype = addrtype;
newp->label = label;
INIT_HLIST_NODE(&newp->list);
-#ifdef CONFIG_NET_NS
- newp->lbl_net = hold_net(net);
-#endif
+ write_pnet(&newp->lbl_net, net);
atomic_set(&newp->refcnt, 1);
return newp;
}
@@ -484,7 +477,7 @@ static int ip6addrlbl_fill(struct sk_buff *skb,
ip6addrlbl_putmsg(nlh, p->prefixlen, p->ifindex, lseq);
- if (nla_put(skb, IFAL_ADDRESS, 16, &p->prefix) < 0 ||
+ if (nla_put_in6_addr(skb, IFAL_ADDRESS, &p->prefix) < 0 ||
nla_put_u32(skb, IFAL_LABEL, p->label) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e8c4400f23e9..eef63b394c5a 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -164,11 +164,11 @@ lookup_protocol:
answer_flags = answer->flags;
rcu_read_unlock();
- WARN_ON(answer_prot->slab == NULL);
+ WARN_ON(!answer_prot->slab);
err = -ENOBUFS;
sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
- if (sk == NULL)
+ if (!sk)
goto out;
sock_init_data(sock, sk);
@@ -391,7 +391,7 @@ int inet6_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- if (sk == NULL)
+ if (!sk)
return -EINVAL;
/* Free mc lists */
@@ -413,11 +413,11 @@ void inet6_destroy_sock(struct sock *sk)
/* Release rx options */
skb = xchg(&np->pktoptions, NULL);
- if (skb != NULL)
+ if (skb)
kfree_skb(skb);
skb = xchg(&np->rxpmtu, NULL);
- if (skb != NULL)
+ if (skb)
kfree_skb(skb);
/* Free flowlabels */
@@ -426,7 +426,7 @@ void inet6_destroy_sock(struct sock *sk)
/* Free tx options */
opt = xchg(&np->opt, NULL);
- if (opt != NULL)
+ if (opt)
sock_kfree_s(sk, opt, opt->tot_len);
}
EXPORT_SYMBOL_GPL(inet6_destroy_sock);
@@ -640,7 +640,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
dst = __sk_dst_check(sk, np->dst_cookie);
- if (dst == NULL) {
+ if (!dst) {
struct inet_sock *inet = inet_sk(sk);
struct in6_addr *final_p, final;
struct flowi6 fl6;
@@ -766,6 +766,8 @@ static int __net_init inet6_net_init(struct net *net)
net->ipv6.sysctl.icmpv6_time = 1*HZ;
net->ipv6.sysctl.flowlabel_consistency = 1;
net->ipv6.sysctl.auto_flowlabels = 0;
+ net->ipv6.sysctl.idgen_retries = 3;
+ net->ipv6.sysctl.idgen_delay = 1 * HZ;
atomic_set(&net->ipv6.fib6_sernum, 1);
err = ipv6_init_mibs(net);
@@ -824,7 +826,7 @@ static int __init inet6_init(void)
struct list_head *r;
int err = 0;
- BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
+ sock_skb_cb_check_size(sizeof(struct inet6_skb_parm));
/* Register the socket-side information for inet6_create. */
for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index a6727add2624..ed7d4e3f9c10 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -681,7 +681,7 @@ static int ah6_init_state(struct xfrm_state *x)
goto error;
ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
- if (ahp == NULL)
+ if (!ahp)
return -ENOMEM;
ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index baf2742d1ec4..514ac259f543 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -60,6 +60,8 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
int ishost = !net->ipv6.devconf_all->forwarding;
int err = 0;
+ ASSERT_RTNL();
+
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (ipv6_addr_is_multicast(addr))
@@ -68,12 +70,11 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
return -EINVAL;
pac = sock_kmalloc(sk, sizeof(struct ipv6_ac_socklist), GFP_KERNEL);
- if (pac == NULL)
+ if (!pac)
return -ENOMEM;
pac->acl_next = NULL;
pac->acl_addr = *addr;
- rtnl_lock();
if (ifindex == 0) {
struct rt6_info *rt;
@@ -92,7 +93,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
} else
dev = __dev_get_by_index(net, ifindex);
- if (dev == NULL) {
+ if (!dev) {
err = -ENODEV;
goto error;
}
@@ -130,7 +131,6 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
}
error:
- rtnl_unlock();
if (pac)
sock_kfree_s(sk, pac, sizeof(*pac));
return err;
@@ -146,7 +146,8 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
struct ipv6_ac_socklist *pac, *prev_pac;
struct net *net = sock_net(sk);
- rtnl_lock();
+ ASSERT_RTNL();
+
prev_pac = NULL;
for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) {
if ((ifindex == 0 || pac->acl_ifindex == ifindex) &&
@@ -154,10 +155,8 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
break;
prev_pac = pac;
}
- if (!pac) {
- rtnl_unlock();
+ if (!pac)
return -ENOENT;
- }
if (prev_pac)
prev_pac->acl_next = pac->acl_next;
else
@@ -166,7 +165,6 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
dev = __dev_get_by_index(net, pac->acl_ifindex);
if (dev)
ipv6_dev_ac_dec(dev, &pac->acl_addr);
- rtnl_unlock();
sock_kfree_s(sk, pac, sizeof(*pac));
return 0;
@@ -224,7 +222,7 @@ static struct ifacaddr6 *aca_alloc(struct rt6_info *rt,
struct ifacaddr6 *aca;
aca = kzalloc(sizeof(*aca), GFP_ATOMIC);
- if (aca == NULL)
+ if (!aca)
return NULL;
aca->aca_addr = *addr;
@@ -270,7 +268,7 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
goto out;
}
aca = aca_alloc(rt, addr);
- if (aca == NULL) {
+ if (!aca) {
ip6_rt_put(rt);
err = -ENOMEM;
goto out;
@@ -339,7 +337,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
{
struct inet6_dev *idev = __in6_dev_get(dev);
- if (idev == NULL)
+ if (!idev)
return -ENODEV;
return __ipv6_dev_ac_dec(idev, addr);
}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index ace8daca5c83..762a58c772b8 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -71,7 +71,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
- if (flowlabel == NULL)
+ if (!flowlabel)
return -EINVAL;
}
}
@@ -373,7 +373,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
err = -EAGAIN;
skb = sock_dequeue_err_skb(sk);
- if (skb == NULL)
+ if (!skb)
goto out;
copied = skb->len;
@@ -463,7 +463,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
err = -EAGAIN;
skb = xchg(&np->rxpmtu, NULL);
- if (skb == NULL)
+ if (!skb)
goto out;
copied = skb->len;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index e48f2c7c5c59..31f1b5d5e2ef 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -495,7 +495,7 @@ static int esp_init_authenc(struct xfrm_state *x)
int err;
err = -EINVAL;
- if (x->ealg == NULL)
+ if (!x->ealg)
goto error;
err = -ENAMETOOLONG;
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 8af3eb57f438..5c5d23e59da5 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -82,7 +82,7 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
if (nexthdr == NEXTHDR_NONE)
return -1;
hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
- if (hp == NULL)
+ if (!hp)
return -1;
if (nexthdr == NEXTHDR_FRAGMENT) {
__be16 _frag_off, *fp;
@@ -91,7 +91,7 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
frag_off),
sizeof(_frag_off),
&_frag_off);
- if (fp == NULL)
+ if (!fp)
return -1;
*frag_offp = *fp;
@@ -218,7 +218,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
}
hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
- if (hp == NULL)
+ if (!hp)
return -EBADMSG;
if (nexthdr == NEXTHDR_ROUTING) {
@@ -226,7 +226,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
rh = skb_header_pointer(skb, start, sizeof(_rh),
&_rh);
- if (rh == NULL)
+ if (!rh)
return -EBADMSG;
if (flags && (*flags & IP6_FH_F_SKIP_RH) &&
@@ -245,7 +245,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
frag_off),
sizeof(_frag_off),
&_frag_off);
- if (fp == NULL)
+ if (!fp)
return -EBADMSG;
_frag_off = ntohs(*fp) & ~0x7;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 70bc6abc0639..2367a16eae58 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -199,12 +199,10 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
}
if (frh->src_len)
- nla_memcpy(&rule6->src.addr, tb[FRA_SRC],
- sizeof(struct in6_addr));
+ rule6->src.addr = nla_get_in6_addr(tb[FRA_SRC]);
if (frh->dst_len)
- nla_memcpy(&rule6->dst.addr, tb[FRA_DST],
- sizeof(struct in6_addr));
+ rule6->dst.addr = nla_get_in6_addr(tb[FRA_DST]);
rule6->src.plen = frh->src_len;
rule6->dst.plen = frh->dst_len;
@@ -250,11 +248,9 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
frh->tos = rule6->tclass;
if ((rule6->dst.plen &&
- nla_put(skb, FRA_DST, sizeof(struct in6_addr),
- &rule6->dst.addr)) ||
+ nla_put_in6_addr(skb, FRA_DST, &rule6->dst.addr)) ||
(rule6->src.plen &&
- nla_put(skb, FRA_SRC, sizeof(struct in6_addr),
- &rule6->src.addr)))
+ nla_put_in6_addr(skb, FRA_SRC, &rule6->src.addr)))
goto nla_put_failure;
return 0;
@@ -299,19 +295,16 @@ static int __net_init fib6_rules_net_init(struct net *net)
ops = fib_rules_register(&fib6_rules_ops_template, net);
if (IS_ERR(ops))
return PTR_ERR(ops);
- net->ipv6.fib6_rules_ops = ops;
-
- err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0,
- RT6_TABLE_LOCAL, 0);
+ err = fib_default_rule_add(ops, 0, RT6_TABLE_LOCAL, 0);
if (err)
goto out_fib6_rules_ops;
- err = fib_default_rule_add(net->ipv6.fib6_rules_ops,
- 0x7FFE, RT6_TABLE_MAIN, 0);
+ err = fib_default_rule_add(ops, 0x7FFE, RT6_TABLE_MAIN, 0);
if (err)
goto out_fib6_rules_ops;
+ net->ipv6.fib6_rules_ops = ops;
out:
return err;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index a5e95199585e..2c2b5d51f15c 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -160,8 +160,7 @@ static bool is_ineligible(const struct sk_buff *skb)
tp = skb_header_pointer(skb,
ptr+offsetof(struct icmp6hdr, icmp6_type),
sizeof(_type), &_type);
- if (tp == NULL ||
- !(*tp & ICMPV6_INFOMSG_MASK))
+ if (!tp || !(*tp & ICMPV6_INFOMSG_MASK))
return true;
}
return false;
@@ -231,7 +230,7 @@ static bool opt_unrec(struct sk_buff *skb, __u32 offset)
offset += skb_network_offset(skb);
op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
- if (op == NULL)
+ if (!op)
return true;
return (*op & 0xC0) == 0x80;
}
@@ -244,7 +243,7 @@ int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
int err = 0;
skb = skb_peek(&sk->sk_write_queue);
- if (skb == NULL)
+ if (!skb)
goto out;
icmp6h = icmp6_hdr(skb);
@@ -479,7 +478,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
sk = icmpv6_xmit_lock(net);
- if (sk == NULL)
+ if (!sk)
return;
sk->sk_mark = mark;
np = inet6_sk(sk);
@@ -582,7 +581,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
sk = icmpv6_xmit_lock(net);
- if (sk == NULL)
+ if (!sk)
return;
sk->sk_mark = mark;
np = inet6_sk(sk);
@@ -839,7 +838,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
net->ipv6.icmp_sk =
kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
- if (net->ipv6.icmp_sk == NULL)
+ if (!net->ipv6.icmp_sk)
return -ENOMEM;
for_each_possible_cpu(i) {
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 29b32206e494..6927f3fb5597 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -112,22 +112,20 @@ static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
return c & (synq_hsize - 1);
}
-struct request_sock *inet6_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
+struct request_sock *inet6_csk_search_req(struct sock *sk,
const __be16 rport,
const struct in6_addr *raddr,
const struct in6_addr *laddr,
const int iif)
{
- const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
- struct request_sock *req, **prev;
+ struct request_sock *req;
+ u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
+ lopt->nr_table_entries);
- for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport,
- lopt->hash_rnd,
- lopt->nr_table_entries)];
- (req = *prev) != NULL;
- prev = &req->dl_next) {
+ spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
+ for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
const struct inet_request_sock *ireq = inet_rsk(req);
if (ireq->ir_rmt_port == rport &&
@@ -135,13 +133,14 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
(!ireq->ir_iif || ireq->ir_iif == iif)) {
+ atomic_inc(&req->rsk_refcnt);
WARN_ON(req->sk != NULL);
- *prevp = prev;
- return req;
+ break;
}
}
+ spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
- return NULL;
+ return req;
}
EXPORT_SYMBOL_GPL(inet6_csk_search_req);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 051dffb49c90..871641bc1ed4 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -23,11 +23,9 @@
#include <net/secure_seq.h>
#include <net/ip.h>
-static unsigned int inet6_ehashfn(struct net *net,
- const struct in6_addr *laddr,
- const u16 lport,
- const struct in6_addr *faddr,
- const __be16 fport)
+u32 inet6_ehashfn(const struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport)
{
static u32 inet6_ehash_secret __read_mostly;
static u32 ipv6_hash_secret __read_mostly;
@@ -44,54 +42,6 @@ static unsigned int inet6_ehashfn(struct net *net,
inet6_ehash_secret + net_hash_mix(net));
}
-static int inet6_sk_ehashfn(const struct sock *sk)
-{
- const struct inet_sock *inet = inet_sk(sk);
- const struct in6_addr *laddr = &sk->sk_v6_rcv_saddr;
- const struct in6_addr *faddr = &sk->sk_v6_daddr;
- const __u16 lport = inet->inet_num;
- const __be16 fport = inet->inet_dport;
- struct net *net = sock_net(sk);
-
- return inet6_ehashfn(net, laddr, lport, faddr, fport);
-}
-
-int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
-{
- struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
- int twrefcnt = 0;
-
- WARN_ON(!sk_unhashed(sk));
-
- if (sk->sk_state == TCP_LISTEN) {
- struct inet_listen_hashbucket *ilb;
-
- ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
- spin_lock(&ilb->lock);
- __sk_nulls_add_node_rcu(sk, &ilb->head);
- spin_unlock(&ilb->lock);
- } else {
- unsigned int hash;
- struct hlist_nulls_head *list;
- spinlock_t *lock;
-
- sk->sk_hash = hash = inet6_sk_ehashfn(sk);
- list = &inet_ehash_bucket(hashinfo, hash)->chain;
- lock = inet_ehash_lockp(hashinfo, hash);
- spin_lock(lock);
- __sk_nulls_add_node_rcu(sk, list);
- if (tw) {
- WARN_ON(sk->sk_hash != tw->tw_hash);
- twrefcnt = inet_twsk_unhash(tw);
- }
- spin_unlock(lock);
- }
-
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- return twrefcnt;
-}
-EXPORT_SYMBOL(__inet6_hash);
-
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
* we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
@@ -296,7 +246,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
*twp = tw;
} else if (tw) {
/* Silly. Should hash-dance instead... */
- inet_twsk_deschedule(tw, death_row);
+ inet_twsk_deschedule(tw);
inet_twsk_put(tw);
}
@@ -320,6 +270,6 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk),
- __inet6_check_established, __inet6_hash);
+ __inet6_check_established);
}
EXPORT_SYMBOL_GPL(inet6_hash_connect);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 263ef4143bff..96dbffff5a24 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1206,7 +1206,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
WARN_ON(fn->fn_flags & RTN_RTINFO);
WARN_ON(fn->fn_flags & RTN_TL_ROOT);
- WARN_ON(fn->leaf != NULL);
+ WARN_ON(fn->leaf);
children = 0;
child = NULL;
@@ -1361,7 +1361,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
#if RT6_DEBUG >= 2
if (rt->dst.obsolete > 0) {
- WARN_ON(fn != NULL);
+ WARN_ON(fn);
return -ENOENT;
}
#endif
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index f45d6db50a45..d491125011c4 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -100,7 +100,6 @@ static void fl_free(struct ip6_flowlabel *fl)
if (fl) {
if (fl->share == IPV6_FL_S_PROCESS)
put_pid(fl->owner.pid);
- release_net(fl->fl_net);
kfree(fl->opt);
kfree_rcu(fl, rcu);
}
@@ -206,7 +205,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
if (fl->label) {
lfl = __fl_lookup(net, fl->label);
- if (lfl == NULL)
+ if (!lfl)
break;
}
}
@@ -220,7 +219,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
* with the same label can only appear on another sock
*/
lfl = __fl_lookup(net, fl->label);
- if (lfl != NULL) {
+ if (lfl) {
atomic_inc(&lfl->users);
spin_unlock_bh(&ip6_fl_lock);
return lfl;
@@ -298,10 +297,10 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
{
struct ipv6_txoptions *fl_opt = fl->opt;
- if (fopt == NULL || fopt->opt_flen == 0)
+ if (!fopt || fopt->opt_flen == 0)
return fl_opt;
- if (fl_opt != NULL) {
+ if (fl_opt) {
opt_space->hopopt = fl_opt->hopopt;
opt_space->dst0opt = fl_opt->dst0opt;
opt_space->srcrt = fl_opt->srcrt;
@@ -367,7 +366,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
err = -ENOMEM;
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
- if (fl == NULL)
+ if (!fl)
goto done;
if (olen > 0) {
@@ -377,7 +376,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
err = -ENOMEM;
fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
- if (fl->opt == NULL)
+ if (!fl->opt)
goto done;
memset(fl->opt, 0, sizeof(*fl->opt));
@@ -403,7 +402,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
}
}
- fl->fl_net = hold_net(net);
+ fl->fl_net = net;
fl->expires = jiffies;
err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
if (err)
@@ -597,7 +596,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
return -EINVAL;
fl = fl_create(net, sk, &freq, optval, optlen, &err);
- if (fl == NULL)
+ if (!fl)
return err;
sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
@@ -617,7 +616,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
}
rcu_read_unlock_bh();
- if (fl1 == NULL)
+ if (!fl1)
fl1 = fl_lookup(net, freq.flr_label);
if (fl1) {
recheck:
@@ -634,7 +633,7 @@ recheck:
goto release;
err = -ENOMEM;
- if (sfl1 == NULL)
+ if (!sfl1)
goto release;
if (fl->linger > fl1->linger)
fl1->linger = fl->linger;
@@ -654,7 +653,7 @@ release:
goto done;
err = -ENOMEM;
- if (sfl1 == NULL)
+ if (!sfl1)
goto done;
err = mem_check(sk);
@@ -662,7 +661,7 @@ release:
goto done;
fl1 = fl_intern(net, fl, freq.flr_label);
- if (fl1 != NULL)
+ if (fl1)
goto recheck;
if (!freq.flr_label) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index bc28b7d42a6d..a38d3ac0f18f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -223,7 +223,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
}
}
- if (cand != NULL)
+ if (cand)
return cand;
dev = ign->fb_tunnel_dev;
@@ -395,7 +395,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
flags & GRE_KEY ?
*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
p[1]);
- if (t == NULL)
+ if (!t)
return;
switch (type) {
@@ -760,7 +760,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
skb_set_inner_protocol(skb, protocol);
- ip6tunnel_xmit(skb, dev);
+ ip6tunnel_xmit(NULL, skb, dev);
if (ndst)
ip6_tnl_dst_store(tunnel, ndst);
return 0;
@@ -980,7 +980,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
&p->raddr, &p->laddr,
p->link, strict);
- if (rt == NULL)
+ if (!rt)
return;
if (rt->dst.dev) {
@@ -1073,7 +1073,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
}
ip6gre_tnl_parm_from_user(&p1, &p);
t = ip6gre_tunnel_locate(net, &p1, 0);
- if (t == NULL)
+ if (!t)
t = netdev_priv(dev);
}
memset(&p, 0, sizeof(p));
@@ -1105,7 +1105,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
- if (t != NULL) {
+ if (t) {
if (t->dev != dev) {
err = -EEXIST;
break;
@@ -1144,7 +1144,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
err = -ENOENT;
ip6gre_tnl_parm_from_user(&p1, &p);
t = ip6gre_tunnel_locate(net, &p1, 0);
- if (t == NULL)
+ if (!t)
goto done;
err = -EPERM;
if (t == netdev_priv(ign->fb_tunnel_dev))
@@ -1216,6 +1216,7 @@ static const struct net_device_ops ip6gre_netdev_ops = {
.ndo_do_ioctl = ip6gre_tunnel_ioctl,
.ndo_change_mtu = ip6gre_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip6_tnl_get_iflink,
};
static void ip6gre_dev_free(struct net_device *dev)
@@ -1238,7 +1239,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
dev->flags |= IFF_NOARP;
- dev->iflink = 0;
dev->addr_len = sizeof(struct in6_addr);
netif_keep_dst(dev);
}
@@ -1246,7 +1246,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
static int ip6gre_tunnel_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
- int i;
tunnel = netdev_priv(dev);
@@ -1260,18 +1259,10 @@ static int ip6gre_tunnel_init(struct net_device *dev)
if (ipv6_addr_any(&tunnel->parms.raddr))
dev->header_ops = &ip6gre_header_ops;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ip6gre_tunnel_stats;
- ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ip6gre_tunnel_stats->syncp);
- }
-
- dev->iflink = tunnel->parms.link;
-
return 0;
}
@@ -1313,7 +1304,7 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
t = rtnl_dereference(ign->tunnels[prio][h]);
- while (t != NULL) {
+ while (t) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
@@ -1412,7 +1403,7 @@ static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
goto out;
if (data[IFLA_GRE_REMOTE]) {
- nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+ daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
if (ipv6_addr_any(&daddr))
return -EINVAL;
}
@@ -1446,10 +1437,10 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
if (data[IFLA_GRE_LOCAL])
- nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
+ parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
if (data[IFLA_GRE_REMOTE])
- nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+ parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
if (data[IFLA_GRE_TTL])
parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
@@ -1480,8 +1471,6 @@ static int ip6gre_tap_init(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
- dev->iflink = tunnel->parms.link;
-
return 0;
}
@@ -1493,6 +1482,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip6gre_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip6_tnl_get_iflink,
};
static void ip6gre_tap_setup(struct net_device *dev)
@@ -1503,7 +1493,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
dev->netdev_ops = &ip6gre_tap_netdev_ops;
dev->destructor = ip6gre_dev_free;
- dev->iflink = 0;
dev->features |= NETIF_F_NETNS_LOCAL;
}
@@ -1622,8 +1611,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
- nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) ||
- nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) ||
+ nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
+ nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
/*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index aacdcb4dc762..f2e464eba5ef 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -46,8 +46,7 @@
#include <net/xfrm.h>
#include <net/inet_ecn.h>
-
-int ip6_rcv_finish(struct sk_buff *skb)
+int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
{
if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
const struct inet6_protocol *ipprot;
@@ -183,7 +182,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
- return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb,
+ dev, NULL,
ip6_rcv_finish);
err:
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
@@ -198,7 +198,7 @@ drop:
*/
-static int ip6_input_finish(struct sk_buff *skb)
+static int ip6_input_finish(struct sock *sk, struct sk_buff *skb)
{
struct net *net = dev_net(skb_dst(skb)->dev);
const struct inet6_protocol *ipprot;
@@ -221,7 +221,7 @@ resubmit:
raw = raw6_local_deliver(skb, nexthdr);
ipprot = rcu_dereference(inet6_protos[nexthdr]);
- if (ipprot != NULL) {
+ if (ipprot) {
int ret;
if (ipprot->flags & INET6_PROTO_FINAL) {
@@ -277,7 +277,8 @@ discard:
int ip6_input(struct sk_buff *skb)
{
- return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, NULL, skb,
+ skb->dev, NULL,
ip6_input_finish);
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 46d452a56d3e..e893cd18612f 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -124,7 +124,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
fptr->frag_off = htons(offset);
- if (skb->next != NULL)
+ if (skb->next)
fptr->frag_off |= htons(IP6_MF);
offset += (ntohs(ipv6h->payload_len) -
sizeof(struct frag_hdr));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 36cf0ab685a0..c21777565c58 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -56,7 +56,7 @@
#include <net/checksum.h>
#include <linux/mroute6.h>
-static int ip6_finish_output2(struct sk_buff *skb)
+static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
@@ -70,7 +70,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
- if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
+ if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
((mroute6_socket(dev_net(dev), skb) &&
!(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
@@ -82,7 +82,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
*/
if (newskb)
NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- newskb, NULL, newskb->dev,
+ sk, newskb, NULL, newskb->dev,
dev_loopback_xmit);
if (ipv6_hdr(skb)->hop_limit == 0) {
@@ -122,14 +122,14 @@ static int ip6_finish_output2(struct sk_buff *skb)
return -EINVAL;
}
-static int ip6_finish_output(struct sk_buff *skb)
+static int ip6_finish_output(struct sock *sk, struct sk_buff *skb)
{
if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)) ||
(IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
- return ip6_fragment(skb, ip6_finish_output2);
+ return ip6_fragment(sk, skb, ip6_finish_output2);
else
- return ip6_finish_output2(skb);
+ return ip6_finish_output2(sk, skb);
}
int ip6_output(struct sock *sk, struct sk_buff *skb)
@@ -143,7 +143,8 @@ int ip6_output(struct sock *sk, struct sk_buff *skb)
return 0;
}
- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
+ return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
+ NULL, dev,
ip6_finish_output,
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
@@ -177,7 +178,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
if (skb_headroom(skb) < head_room) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
- if (skb2 == NULL) {
+ if (!skb2) {
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
@@ -223,8 +224,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUT, skb->len);
- return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
- dst->dev, dst_output);
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+ NULL, dst->dev, dst_output_sk);
}
skb->dev = dst->dev;
@@ -316,10 +317,10 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
return 0;
}
-static inline int ip6_forward_finish(struct sk_buff *skb)
+static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb)
{
skb_sender_cpu_clear(skb);
- return dst_output(skb);
+ return dst_output_sk(sk, skb);
}
static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
@@ -511,7 +512,8 @@ int ip6_forward(struct sk_buff *skb)
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
- return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
+ skb->dev, dst->dev,
ip6_forward_finish);
error:
@@ -538,7 +540,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
skb_copy_secmark(to, from);
}
-int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+int ip6_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *))
{
struct sk_buff *frag;
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
@@ -629,7 +632,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
skb_reset_network_header(skb);
memcpy(skb_network_header(skb), tmp_hdr, hlen);
- ipv6_select_ident(fh, rt);
+ ipv6_select_ident(net, fh, rt);
fh->nexthdr = nexthdr;
fh->reserved = 0;
fh->frag_off = htons(IP6_MF);
@@ -658,7 +661,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
fh->nexthdr = nexthdr;
fh->reserved = 0;
fh->frag_off = htons(offset);
- if (frag->next != NULL)
+ if (frag->next)
fh->frag_off |= htons(IP6_MF);
fh->identification = frag_id;
ipv6_hdr(frag)->payload_len =
@@ -667,7 +670,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
ip6_copy_metadata(frag, skb);
}
- err = output(skb);
+ err = output(sk, skb);
if (!err)
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGCREATES);
@@ -776,7 +779,7 @@ slow_path:
fh->nexthdr = nexthdr;
fh->reserved = 0;
if (!frag_id) {
- ipv6_select_ident(fh, rt);
+ ipv6_select_ident(net, fh, rt);
frag_id = fh->identification;
} else
fh->identification = frag_id;
@@ -800,7 +803,7 @@ slow_path:
/*
* Put this fragment into the sending queue.
*/
- err = output(frag);
+ err = output(sk, frag);
if (err)
goto fail;
@@ -824,7 +827,7 @@ static inline int ip6_rt_check(const struct rt6key *rt_key,
const struct in6_addr *addr_cache)
{
return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
- (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
+ (!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
}
static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
@@ -883,22 +886,45 @@ static int ip6_dst_lookup_tail(struct sock *sk,
#endif
int err;
- if (*dst == NULL)
- *dst = ip6_route_output(net, sk, fl6);
-
- err = (*dst)->error;
- if (err)
- goto out_err_release;
+ /* The correct way to handle this would be to do
+ * ip6_route_get_saddr, and then ip6_route_output; however,
+ * the route-specific preferred source forces the
+ * ip6_route_output call _before_ ip6_route_get_saddr.
+ *
+ * In source specific routing (no src=any default route),
+ * ip6_route_output will fail given src=any saddr, though, so
+ * that's why we try it again later.
+ */
+ if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
+ struct rt6_info *rt;
+ bool had_dst = *dst != NULL;
- if (ipv6_addr_any(&fl6->saddr)) {
- struct rt6_info *rt = (struct rt6_info *) *dst;
+ if (!had_dst)
+ *dst = ip6_route_output(net, sk, fl6);
+ rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
err = ip6_route_get_saddr(net, rt, &fl6->daddr,
sk ? inet6_sk(sk)->srcprefs : 0,
&fl6->saddr);
if (err)
goto out_err_release;
+
+ /* If we had an erroneous initial result, pretend it
+ * never existed and let the SA-enabled version take
+ * over.
+ */
+ if (!had_dst && (*dst)->error) {
+ dst_release(*dst);
+ *dst = NULL;
+ }
}
+ if (!*dst)
+ *dst = ip6_route_output(net, sk, fl6);
+
+ err = (*dst)->error;
+ if (err)
+ goto out_err_release;
+
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
/*
* Here if the dst entry we've looked up
@@ -1046,11 +1072,11 @@ static inline int ip6_ufo_append_data(struct sock *sk,
* udp datagram
*/
skb = skb_peek_tail(queue);
- if (skb == NULL) {
+ if (!skb) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
- if (skb == NULL)
+ if (!skb)
return err;
/* reserve space for Hardware header */
@@ -1080,7 +1106,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
sizeof(struct frag_hdr)) & ~7;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- ipv6_select_ident(&fhdr, rt);
+ ipv6_select_ident(sock_net(sk), &fhdr, rt);
skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
append:
@@ -1108,7 +1134,7 @@ static void ip6_append_data_mtu(unsigned int *mtu,
unsigned int orig_mtu)
{
if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
- if (skb == NULL) {
+ if (!skb) {
/* first fragment, reserve header_len */
*mtu = orig_mtu - rt->dst.header_len;
@@ -1140,7 +1166,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
return -EINVAL;
v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
- if (unlikely(v6_cork->opt == NULL))
+ if (unlikely(!v6_cork->opt))
return -ENOBUFS;
v6_cork->opt->tot_len = opt->tot_len;
@@ -1332,7 +1358,7 @@ alloc_new_skb:
else
fraggap = 0;
/* update mtu and maxfraglen if necessary */
- if (skb == NULL || skb_prev == NULL)
+ if (!skb || !skb_prev)
ip6_append_data_mtu(&mtu, &maxfraglen,
fragheaderlen, skb, rt,
orig_mtu);
@@ -1384,10 +1410,10 @@ alloc_new_skb:
skb = sock_wmalloc(sk,
alloclen + hh_len, 1,
sk->sk_allocation);
- if (unlikely(skb == NULL))
+ if (unlikely(!skb))
err = -ENOBUFS;
}
- if (skb == NULL)
+ if (!skb)
goto error;
/*
* Fill in the control structures
@@ -1579,7 +1605,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
unsigned char proto = fl6->flowi6_proto;
skb = __skb_dequeue(queue);
- if (skb == NULL)
+ if (!skb)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index ddd94eca19b3..5cafd92c2312 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -64,12 +64,6 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("ip6tnl");
MODULE_ALIAS_NETDEV("ip6tnl0");
-#ifdef IP6_TNL_DEBUG
-#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__)
-#else
-#define IP6_TNL_TRACE(x...) do {;} while(0)
-#endif
-
#define HASH_SIZE_SHIFT 5
#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
@@ -137,7 +131,7 @@ struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
struct dst_entry *dst = t->dst_cache;
if (dst && dst->obsolete &&
- dst->ops->check(dst, t->dst_cookie) == NULL) {
+ !dst->ops->check(dst, t->dst_cookie)) {
t->dst_cache = NULL;
dst_release(dst);
return NULL;
@@ -331,7 +325,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6_tnl_dev_setup);
- if (dev == NULL)
+ if (!dev)
goto failed;
dev_net_set(dev, net);
@@ -502,7 +496,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
rcu_read_lock();
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
- if (t == NULL)
+ if (!t)
goto out;
tproto = ACCESS_ONCE(t->parms.proto);
@@ -813,7 +807,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
rcu_read_lock();
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
- if (t != NULL) {
+ if (t) {
struct pcpu_sw_netstats *tstats;
tproto = ACCESS_ONCE(t->parms.proto);
@@ -1106,7 +1100,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
ipv6h->daddr = fl6->daddr;
- ip6tunnel_xmit(skb, dev);
+ ip6tunnel_xmit(NULL, skb, dev);
if (ndst)
ip6_tnl_dst_store(t, ndst);
return 0;
@@ -1270,8 +1264,6 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
else
dev->flags &= ~IFF_POINTOPOINT;
- dev->iflink = p->link;
-
if (p->flags & IP6_TNL_F_CAP_XMIT) {
int strict = (ipv6_addr_type(&p->raddr) &
(IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
@@ -1280,7 +1272,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
&p->raddr, &p->laddr,
p->link, strict);
- if (rt == NULL)
+ if (!rt)
return;
if (rt->dst.dev) {
@@ -1523,6 +1515,13 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+int ip6_tnl_get_iflink(const struct net_device *dev)
+{
+ struct ip6_tnl *t = netdev_priv(dev);
+
+ return t->parms.link;
+}
+EXPORT_SYMBOL(ip6_tnl_get_iflink);
static const struct net_device_ops ip6_tnl_netdev_ops = {
.ndo_init = ip6_tnl_dev_init,
@@ -1531,6 +1530,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
.ndo_do_ioctl = ip6_tnl_ioctl,
.ndo_change_mtu = ip6_tnl_change_mtu,
.ndo_get_stats = ip6_get_stats,
+ .ndo_get_iflink = ip6_tnl_get_iflink,
};
@@ -1646,12 +1646,10 @@ static void ip6_tnl_netlink_parms(struct nlattr *data[],
parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
if (data[IFLA_IPTUN_LOCAL])
- nla_memcpy(&parms->laddr, data[IFLA_IPTUN_LOCAL],
- sizeof(struct in6_addr));
+ parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
if (data[IFLA_IPTUN_REMOTE])
- nla_memcpy(&parms->raddr, data[IFLA_IPTUN_REMOTE],
- sizeof(struct in6_addr));
+ parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
if (data[IFLA_IPTUN_TTL])
parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
@@ -1745,10 +1743,8 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct __ip6_tnl_parm *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
- nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr),
- &parm->laddr) ||
- nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
- &parm->raddr) ||
+ nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
+ nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
@@ -1821,7 +1817,7 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
for (h = 0; h < HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
- while (t != NULL) {
+ while (t) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index 32d9b268e7d8..bba8903e871f 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -62,7 +62,8 @@ error:
}
EXPORT_SYMBOL_GPL(udp_sock_create6);
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be16 src_port,
@@ -97,7 +98,7 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
ip6h->daddr = *daddr;
ip6h->saddr = *saddr;
- ip6tunnel_xmit(skb, dev);
+ ip6tunnel_xmit(sk, skb, dev);
return 0;
}
EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 5fb9e212eca8..ed9d681207fa 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -218,7 +218,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
sprintf(name, "ip6_vti%%d");
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
- if (dev == NULL)
+ if (!dev)
goto failed;
dev_net_set(dev, net);
@@ -288,8 +288,7 @@ static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p,
static void vti6_dev_uninit(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct net *net = dev_net(dev);
- struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ struct vti6_net *ip6n = net_generic(t->net, vti6_net_id);
if (dev == ip6n->fb_tnl_dev)
RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
@@ -305,7 +304,7 @@ static int vti6_rcv(struct sk_buff *skb)
rcu_read_lock();
t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
- if (t != NULL) {
+ if (t) {
if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
rcu_read_unlock();
goto discard;
@@ -601,8 +600,6 @@ static void vti6_link_config(struct ip6_tnl *t)
dev->flags |= IFF_POINTOPOINT;
else
dev->flags &= ~IFF_POINTOPOINT;
-
- dev->iflink = p->link;
}
/**
@@ -716,7 +713,7 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
} else {
memset(&p, 0, sizeof(p));
}
- if (t == NULL)
+ if (!t)
t = netdev_priv(dev);
vti6_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
@@ -736,7 +733,7 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
vti6_parm_from_user(&p1, &p);
t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL);
if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
- if (t != NULL) {
+ if (t) {
if (t->dev != dev) {
err = -EEXIST;
break;
@@ -767,7 +764,7 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
err = -ENOENT;
vti6_parm_from_user(&p1, &p);
t = vti6_locate(net, &p1, 0);
- if (t == NULL)
+ if (!t)
break;
err = -EPERM;
if (t->dev == ip6n->fb_tnl_dev)
@@ -808,6 +805,7 @@ static const struct net_device_ops vti6_netdev_ops = {
.ndo_do_ioctl = vti6_ioctl,
.ndo_change_mtu = vti6_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip6_tnl_get_iflink,
};
/**
@@ -897,12 +895,10 @@ static void vti6_netlink_parms(struct nlattr *data[],
parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
if (data[IFLA_VTI_LOCAL])
- nla_memcpy(&parms->laddr, data[IFLA_VTI_LOCAL],
- sizeof(struct in6_addr));
+ parms->laddr = nla_get_in6_addr(data[IFLA_VTI_LOCAL]);
if (data[IFLA_VTI_REMOTE])
- nla_memcpy(&parms->raddr, data[IFLA_VTI_REMOTE],
- sizeof(struct in6_addr));
+ parms->raddr = nla_get_in6_addr(data[IFLA_VTI_REMOTE]);
if (data[IFLA_VTI_IKEY])
parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
@@ -983,10 +979,8 @@ static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct __ip6_tnl_parm *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) ||
- nla_put(skb, IFLA_VTI_LOCAL, sizeof(struct in6_addr),
- &parm->laddr) ||
- nla_put(skb, IFLA_VTI_REMOTE, sizeof(struct in6_addr),
- &parm->raddr) ||
+ nla_put_in6_addr(skb, IFLA_VTI_LOCAL, &parm->laddr) ||
+ nla_put_in6_addr(skb, IFLA_VTI_REMOTE, &parm->raddr) ||
nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) ||
nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key))
goto nla_put_failure;
@@ -1027,7 +1021,7 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
for (h = 0; h < HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
- while (t != NULL) {
+ while (t) {
unregister_netdevice_queue(t->dev, &list);
t = rtnl_dereference(t->next);
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 312e0ff47339..74ceb73c1c9a 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -56,9 +56,7 @@
struct mr6_table {
struct list_head list;
-#ifdef CONFIG_NET_NS
- struct net *net;
-#endif
+ possible_net_t net;
u32 id;
struct sock *mroute6_sk;
struct timer_list ipmr_expire_timer;
@@ -175,7 +173,7 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
}
mrt = ip6mr_get_table(rule->fr_net, rule->table);
- if (mrt == NULL)
+ if (!mrt)
return -EAGAIN;
res->mrt = mrt;
return 0;
@@ -239,7 +237,7 @@ static int __net_init ip6mr_rules_init(struct net *net)
INIT_LIST_HEAD(&net->ipv6.mr6_tables);
mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
- if (mrt == NULL) {
+ if (!mrt) {
err = -ENOMEM;
goto err1;
}
@@ -307,11 +305,11 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
unsigned int i;
mrt = ip6mr_get_table(net, id);
- if (mrt != NULL)
+ if (mrt)
return mrt;
mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
- if (mrt == NULL)
+ if (!mrt)
return NULL;
mrt->id = id;
write_pnet(&mrt->net, net);
@@ -410,7 +408,7 @@ static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
- if (mrt == NULL)
+ if (!mrt)
return ERR_PTR(-ENOENT);
iter->mrt = mrt;
@@ -494,7 +492,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
- if (mrt == NULL)
+ if (!mrt)
return ERR_PTR(-ENOENT);
it->mrt = mrt;
@@ -667,7 +665,7 @@ static int pim6_rcv(struct sk_buff *skb)
dev_hold(reg_dev);
read_unlock(&mrt_lock);
- if (reg_dev == NULL)
+ if (!reg_dev)
goto drop;
skb->mac_header = skb->network_header;
@@ -720,8 +718,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int reg_vif_get_iflink(const struct net_device *dev)
+{
+ return 0;
+}
+
static const struct net_device_ops reg_vif_netdev_ops = {
.ndo_start_xmit = reg_vif_xmit,
+ .ndo_get_iflink = reg_vif_get_iflink,
};
static void reg_vif_setup(struct net_device *dev)
@@ -745,7 +749,7 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
sprintf(name, "pim6reg%u", mrt->id);
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
- if (dev == NULL)
+ if (!dev)
return NULL;
dev_net_set(dev, net);
@@ -754,7 +758,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
free_netdev(dev);
return NULL;
}
- dev->iflink = 0;
if (dev_open(dev))
goto failure;
@@ -994,7 +997,7 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
v->pkt_out = 0;
v->link = dev->ifindex;
if (v->flags & MIFF_REGISTER)
- v->link = dev->iflink;
+ v->link = dev_get_iflink(dev);
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
@@ -1074,7 +1077,7 @@ skip:
static struct mfc6_cache *ip6mr_cache_alloc(void)
{
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
- if (c == NULL)
+ if (!c)
return NULL;
c->mfc_un.res.minvif = MAXMIFS;
return c;
@@ -1083,7 +1086,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
{
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
- if (c == NULL)
+ if (!c)
return NULL;
skb_queue_head_init(&c->mfc_un.unres.unresolved);
c->mfc_un.unres.expires = jiffies + 10 * HZ;
@@ -1200,7 +1203,7 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- if (mrt->mroute6_sk == NULL) {
+ if (!mrt->mroute6_sk) {
kfree_skb(skb);
return -EINVAL;
}
@@ -1495,7 +1498,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
return -EINVAL;
c = ip6mr_cache_alloc();
- if (c == NULL)
+ if (!c)
return -ENOMEM;
c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
@@ -1665,7 +1668,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
if (optname != MRT6_INIT) {
@@ -1814,7 +1817,7 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
switch (optname) {
@@ -1861,7 +1864,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
switch (cmd) {
@@ -1935,7 +1938,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
struct mr6_table *mrt;
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
switch (cmd) {
@@ -1983,13 +1986,13 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
}
#endif
-static inline int ip6mr_forward2_finish(struct sk_buff *skb)
+static inline int ip6mr_forward2_finish(struct sock *sk, struct sk_buff *skb)
{
IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTFORWDATAGRAMS);
IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTOCTETS, skb->len);
- return dst_output(skb);
+ return dst_output_sk(sk, skb);
}
/*
@@ -2005,7 +2008,7 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
struct dst_entry *dst;
struct flowi6 fl6;
- if (vif->dev == NULL)
+ if (!vif->dev)
goto out_free;
#ifdef CONFIG_IPV6_PIMSM_V2
@@ -2061,7 +2064,8 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
IP6CB(skb)->flags |= IP6SKB_FORWARDED;
- return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
+ skb->dev, dev,
ip6mr_forward2_finish);
out_free:
@@ -2194,7 +2198,7 @@ int ip6_mr_input(struct sk_buff *skb)
read_lock(&mrt_lock);
cache = ip6mr_cache_find(mrt,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
- if (cache == NULL) {
+ if (!cache) {
int vif = ip6mr_find_vif(mrt, skb->dev);
if (vif >= 0)
@@ -2206,7 +2210,7 @@ int ip6_mr_input(struct sk_buff *skb)
/*
* No usable cache entry
*/
- if (cache == NULL) {
+ if (!cache) {
int vif;
vif = ip6mr_find_vif(mrt, skb->dev);
@@ -2245,13 +2249,13 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
return -EMSGSIZE;
mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
- if (mp_attr == NULL)
+ if (!mp_attr)
return -EMSGSIZE;
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
- if (nhp == NULL) {
+ if (!nhp) {
nla_nest_cancel(skb, mp_attr);
return -EMSGSIZE;
}
@@ -2284,7 +2288,7 @@ int ip6mr_get_route(struct net *net,
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
- if (mrt == NULL)
+ if (!mrt)
return -ENOENT;
read_lock(&mrt_lock);
@@ -2309,7 +2313,7 @@ int ip6mr_get_route(struct net *net,
}
dev = skb->dev;
- if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
+ if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
read_unlock(&mrt_lock);
return -ENODEV;
}
@@ -2361,7 +2365,7 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
int err;
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
@@ -2380,8 +2384,8 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
rtm->rtm_protocol = RTPROT_MROUTED;
rtm->rtm_flags = 0;
- if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
- nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
+ if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
+ nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
goto nla_put_failure;
err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
/* do not break the dump if cache is unresolved */
@@ -2426,7 +2430,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
GFP_ATOMIC);
- if (skb == NULL)
+ if (!skb)
goto errout;
err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 8d766d9100cb..63e6956917c9 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -85,7 +85,7 @@ int ip6_ra_control(struct sock *sk, int sel)
return 0;
}
}
- if (new_ra == NULL) {
+ if (!new_ra) {
write_unlock_bh(&ip6_ra_lock);
return -ENOBUFS;
}
@@ -117,6 +117,25 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
return opt;
}
+static bool setsockopt_needs_rtnl(int optname)
+{
+ switch (optname) {
+ case IPV6_ADD_MEMBERSHIP:
+ case IPV6_DROP_MEMBERSHIP:
+ case IPV6_JOIN_ANYCAST:
+ case IPV6_LEAVE_ANYCAST:
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ case MCAST_MSFILTER:
+ return true;
+ }
+ return false;
+}
+
static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
@@ -124,8 +143,9 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
struct net *net = sock_net(sk);
int val, valbool;
int retv = -ENOPROTOOPT;
+ bool needs_rtnl = setsockopt_needs_rtnl(optname);
- if (optval == NULL)
+ if (!optval)
val = 0;
else {
if (optlen >= sizeof(int)) {
@@ -140,6 +160,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (ip6_mroute_opt(optname))
return ip6_mroute_setsockopt(sk, optname, optval, optlen);
+ if (needs_rtnl)
+ rtnl_lock();
lock_sock(sk);
switch (optname) {
@@ -370,7 +392,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
*/
if (optlen == 0)
optval = NULL;
- else if (optval == NULL)
+ else if (!optval)
goto e_inval;
else if (optlen < sizeof(struct ipv6_opt_hdr) ||
optlen & 0x7 || optlen > 8 * 255)
@@ -421,7 +443,7 @@ sticky_done:
if (optlen == 0)
goto e_inval;
- else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL)
+ else if (optlen < sizeof(struct in6_pktinfo) || !optval)
goto e_inval;
if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
@@ -460,7 +482,7 @@ sticky_done:
opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
retv = -ENOBUFS;
- if (opt == NULL)
+ if (!opt)
break;
memset(opt, 0, sizeof(*opt));
@@ -624,10 +646,10 @@ done:
psin6 = (struct sockaddr_in6 *)&greq.gr_group;
if (optname == MCAST_JOIN_GROUP)
retv = ipv6_sock_mc_join(sk, greq.gr_interface,
- &psin6->sin6_addr);
+ &psin6->sin6_addr);
else
retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
- &psin6->sin6_addr);
+ &psin6->sin6_addr);
break;
}
case MCAST_JOIN_SOURCE_GROUP:
@@ -660,7 +682,7 @@ done:
psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
- &psin6->sin6_addr);
+ &psin6->sin6_addr);
/* prior join w/ different source is ok */
if (retv && retv != -EADDRINUSE)
break;
@@ -837,11 +859,15 @@ pref_skip_coa:
}
release_sock(sk);
+ if (needs_rtnl)
+ rtnl_unlock();
return retv;
e_inval:
release_sock(sk);
+ if (needs_rtnl)
+ rtnl_unlock();
return -EINVAL;
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 5ce107c8aab3..083b2927fc67 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -140,6 +140,8 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
struct net *net = sock_net(sk);
int err;
+ ASSERT_RTNL();
+
if (!ipv6_addr_is_multicast(addr))
return -EINVAL;
@@ -155,13 +157,12 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
- if (mc_lst == NULL)
+ if (!mc_lst)
return -ENOMEM;
mc_lst->next = NULL;
mc_lst->addr = *addr;
- rtnl_lock();
if (ifindex == 0) {
struct rt6_info *rt;
rt = rt6_lookup(net, addr, NULL, 0, 0);
@@ -172,8 +173,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
} else
dev = __dev_get_by_index(net, ifindex);
- if (dev == NULL) {
- rtnl_unlock();
+ if (!dev) {
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return -ENODEV;
}
@@ -190,7 +190,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
err = ipv6_dev_mc_inc(dev, addr);
if (err) {
- rtnl_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return err;
}
@@ -198,10 +197,9 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
mc_lst->next = np->ipv6_mc_list;
rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
- rtnl_unlock();
-
return 0;
}
+EXPORT_SYMBOL(ipv6_sock_mc_join);
/*
* socket leave on multicast group
@@ -213,10 +211,11 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
struct ipv6_mc_socklist __rcu **lnk;
struct net *net = sock_net(sk);
+ ASSERT_RTNL();
+
if (!ipv6_addr_is_multicast(addr))
return -EINVAL;
- rtnl_lock();
for (lnk = &np->ipv6_mc_list;
(mc_lst = rtnl_dereference(*lnk)) != NULL;
lnk = &mc_lst->next) {
@@ -227,7 +226,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
*lnk = mc_lst->next;
dev = __dev_get_by_index(net, mc_lst->ifindex);
- if (dev != NULL) {
+ if (dev) {
struct inet6_dev *idev = __in6_dev_get(dev);
(void) ip6_mc_leave_src(sk, mc_lst, idev);
@@ -235,17 +234,16 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
- rtnl_unlock();
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
return 0;
}
}
- rtnl_unlock();
return -EADDRNOTAVAIL;
}
+EXPORT_SYMBOL(ipv6_sock_mc_drop);
/* called with rcu_read_lock() */
static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
@@ -438,7 +436,7 @@ done:
read_unlock_bh(&idev->lock);
rcu_read_unlock();
if (leavegroup)
- return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
+ err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
return err;
}
@@ -825,7 +823,7 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
struct ifmcaddr6 *mc;
mc = kzalloc(sizeof(*mc), GFP_ATOMIC);
- if (mc == NULL)
+ if (!mc)
return NULL;
setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
@@ -862,7 +860,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
/* we need to take a reference on idev */
idev = in6_dev_get(dev);
- if (idev == NULL)
+ if (!idev)
return -EINVAL;
write_lock_bh(&idev->lock);
@@ -1330,7 +1328,7 @@ int igmp6_event_query(struct sk_buff *skb)
return -EINVAL;
idev = __in6_dev_get(skb->dev);
- if (idev == NULL)
+ if (!idev)
return 0;
mld = (struct mld_msg *)icmp6_hdr(skb);
@@ -1445,7 +1443,7 @@ int igmp6_event_report(struct sk_buff *skb)
return -EINVAL;
idev = __in6_dev_get(skb->dev);
- if (idev == NULL)
+ if (!idev)
return -ENODEV;
/*
@@ -1646,8 +1644,9 @@ static void mld_sendpack(struct sk_buff *skb)
payload_len = skb->len;
- err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
- dst_output);
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+ net->ipv6.igmp_sk, skb, NULL, skb->dev,
+ dst_output_sk);
out:
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
@@ -1964,7 +1963,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
- if (skb == NULL) {
+ if (!skb) {
rcu_read_lock();
IP6_INC_STATS(net, __in6_dev_get(dev),
IPSTATS_MIB_OUTDISCARDS);
@@ -2009,8 +2008,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
}
skb_dst_set(skb, dst);
- err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
- dst_output);
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+ NULL, skb->dev, dst_output_sk);
out:
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, type);
@@ -2613,7 +2612,7 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr
im = im->next;
while (!im) {
- if (likely(state->idev != NULL))
+ if (likely(state->idev))
read_unlock_bh(&state->idev->lock);
state->dev = next_net_device_rcu(state->dev);
@@ -2659,7 +2658,7 @@ static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
{
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
- if (likely(state->idev != NULL)) {
+ if (likely(state->idev)) {
read_unlock_bh(&state->idev->lock);
state->idev = NULL;
}
@@ -2728,10 +2727,10 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
continue;
read_lock_bh(&idev->lock);
im = idev->mc_list;
- if (likely(im != NULL)) {
+ if (likely(im)) {
spin_lock_bh(&im->mca_lock);
psf = im->mca_sources;
- if (likely(psf != NULL)) {
+ if (likely(psf)) {
state->im = im;
state->idev = idev;
break;
@@ -2752,7 +2751,7 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s
spin_unlock_bh(&state->im->mca_lock);
state->im = state->im->next;
while (!state->im) {
- if (likely(state->idev != NULL))
+ if (likely(state->idev))
read_unlock_bh(&state->idev->lock);
state->dev = next_net_device_rcu(state->dev);
@@ -2806,11 +2805,11 @@ static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
- if (likely(state->im != NULL)) {
+ if (likely(state->im)) {
spin_unlock_bh(&state->im->mca_lock);
state->im = NULL;
}
- if (likely(state->idev != NULL)) {
+ if (likely(state->idev)) {
read_unlock_bh(&state->idev->lock);
state->idev = NULL;
}
@@ -2907,20 +2906,32 @@ static int __net_init igmp6_net_init(struct net *net)
inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
+ err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
+ SOCK_RAW, IPPROTO_ICMPV6, net);
+ if (err < 0) {
+ pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
+ err);
+ goto out_sock_create;
+ }
+
err = igmp6_proc_init(net);
if (err)
- goto out_sock_create;
-out:
- return err;
+ goto out_sock_create_autojoin;
+
+ return 0;
+out_sock_create_autojoin:
+ inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
out_sock_create:
inet_ctl_sock_destroy(net->ipv6.igmp_sk);
- goto out;
+out:
+ return err;
}
static void __net_exit igmp6_net_exit(struct net *net)
{
inet_ctl_sock_destroy(net->ipv6.igmp_sk);
+ inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
igmp6_proc_exit(net);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 14ecdaf06bf7..96f153c0846b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -84,6 +84,7 @@ do { \
static u32 ndisc_hash(const void *pkey,
const struct net_device *dev,
__u32 *hash_rnd);
+static bool ndisc_key_eq(const struct neighbour *neigh, const void *pkey);
static int ndisc_constructor(struct neighbour *neigh);
static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -117,7 +118,9 @@ static const struct neigh_ops ndisc_direct_ops = {
struct neigh_table nd_tbl = {
.family = AF_INET6,
.key_len = sizeof(struct in6_addr),
+ .protocol = cpu_to_be16(ETH_P_IPV6),
.hash = ndisc_hash,
+ .key_eq = ndisc_key_eq,
.constructor = ndisc_constructor,
.pconstructor = pndisc_constructor,
.pdestructor = pndisc_destructor,
@@ -294,6 +297,11 @@ static u32 ndisc_hash(const void *pkey,
return ndisc_hashfn(pkey, dev, hash_rnd);
}
+static bool ndisc_key_eq(const struct neighbour *n, const void *pkey)
+{
+ return neigh_key_eq128(n, pkey);
+}
+
static int ndisc_constructor(struct neighbour *neigh)
{
struct in6_addr *addr = (struct in6_addr *)&neigh->primary_key;
@@ -303,7 +311,7 @@ static int ndisc_constructor(struct neighbour *neigh)
bool is_multicast = ipv6_addr_is_multicast(addr);
in6_dev = in6_dev_get(dev);
- if (in6_dev == NULL) {
+ if (!in6_dev) {
return -EINVAL;
}
@@ -348,7 +356,7 @@ static int pndisc_constructor(struct pneigh_entry *n)
struct in6_addr maddr;
struct net_device *dev = n->dev;
- if (dev == NULL || __in6_dev_get(dev) == NULL)
+ if (!dev || !__in6_dev_get(dev))
return -EINVAL;
addrconf_addr_solict_mult(addr, &maddr);
ipv6_dev_mc_inc(dev, &maddr);
@@ -361,7 +369,7 @@ static void pndisc_destructor(struct pneigh_entry *n)
struct in6_addr maddr;
struct net_device *dev = n->dev;
- if (dev == NULL || __in6_dev_get(dev) == NULL)
+ if (!dev || !__in6_dev_get(dev))
return;
addrconf_addr_solict_mult(addr, &maddr);
ipv6_dev_mc_dec(dev, &maddr);
@@ -455,8 +463,9 @@ static void ndisc_send_skb(struct sk_buff *skb,
idev = __in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
- err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
- dst_output);
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+ NULL, dst->dev,
+ dst_output_sk);
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, type);
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
@@ -552,7 +561,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
int optlen = 0;
struct nd_msg *msg;
- if (saddr == NULL) {
+ if (!saddr) {
if (ipv6_get_lladdr(dev, &addr_buf,
(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)))
return;
@@ -1022,13 +1031,13 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
size_t msg_size = base_size + nla_total_size(sizeof(struct in6_addr));
skb = nlmsg_new(msg_size, GFP_ATOMIC);
- if (skb == NULL) {
+ if (!skb) {
err = -ENOBUFS;
goto errout;
}
nlh = nlmsg_put(skb, 0, 0, RTM_NEWNDUSEROPT, base_size, 0);
- if (nlh == NULL) {
+ if (!nlh) {
goto nla_put_failure;
}
@@ -1041,8 +1050,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3);
- if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
- &ipv6_hdr(ra)->saddr))
+ if (nla_put_in6_addr(skb, NDUSEROPT_SRCADDR, &ipv6_hdr(ra)->saddr))
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -1096,7 +1104,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
*/
in6_dev = __in6_dev_get(skb->dev);
- if (in6_dev == NULL) {
+ if (!in6_dev) {
ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n",
skb->dev->name);
return;
@@ -1191,11 +1199,11 @@ static void ndisc_router_discovery(struct sk_buff *skb)
ND_PRINTK(3, info, "RA: rt: %p lifetime: %d, for dev: %s\n",
rt, lifetime, skb->dev->name);
- if (rt == NULL && lifetime) {
+ if (!rt && lifetime) {
ND_PRINTK(3, info, "RA: adding default router\n");
rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
- if (rt == NULL) {
+ if (!rt) {
ND_PRINTK(0, err,
"RA: %s failed to add default route\n",
__func__);
@@ -1203,7 +1211,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
}
neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
- if (neigh == NULL) {
+ if (!neigh) {
ND_PRINTK(0, err,
"RA: %s got default router without neighbour\n",
__func__);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 398377a9d018..d958718b5031 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -84,7 +84,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
{
struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
- if (entry->hook == NF_INET_LOCAL_OUT) {
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
rt_info->daddr = iph->daddr;
@@ -98,7 +98,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
{
struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
- if (entry->hook == NF_INET_LOCAL_OUT) {
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
!ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index a069822936e6..ca6998345b42 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -25,14 +25,16 @@ config NF_CONNTRACK_IPV6
To compile it as a module, choose M here. If unsure, say N.
+if NF_TABLES
+
config NF_TABLES_IPV6
- depends on NF_TABLES
tristate "IPv6 nf_tables support"
help
This option enables the IPv6 support for nf_tables.
+if NF_TABLES_IPV6
+
config NFT_CHAIN_ROUTE_IPV6
- depends on NF_TABLES_IPV6
tristate "IPv6 nf_tables route chain support"
help
This option enables the "route" chain for IPv6 in nf_tables. This
@@ -40,16 +42,18 @@ config NFT_CHAIN_ROUTE_IPV6
fields such as the source, destination, flowlabel, hop-limit and
the packet mark.
-config NF_REJECT_IPV6
- tristate "IPv6 packet rejection"
- default m if NETFILTER_ADVANCED=n
-
config NFT_REJECT_IPV6
- depends on NF_TABLES_IPV6
select NF_REJECT_IPV6
default NFT_REJECT
tristate
+endif # NF_TABLES_IPV6
+endif # NF_TABLES
+
+config NF_REJECT_IPV6
+ tristate "IPv6 packet rejection"
+ default m if NETFILTER_ADVANCED=n
+
config NF_LOG_IPV6
tristate "IPv6 packet logging"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index bb00c6f2a885..1a732a1d3c8e 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -9,7 +9,10 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/in.h>
#include <linux/skbuff.h>
@@ -234,7 +237,7 @@ static struct nf_loginfo trace_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
- .level = 4,
+ .level = LOGLEVEL_WARNING,
.logflags = NF_LOG_MASK,
},
},
@@ -314,8 +317,7 @@ ip6t_next_entry(const struct ip6t_entry *entry)
unsigned int
ip6t_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table)
{
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -330,8 +332,8 @@ ip6t_do_table(struct sk_buff *skb,
unsigned int addend;
/* Initialization */
- indev = in ? in->name : nulldevname;
- outdev = out ? out->name : nulldevname;
+ indev = state->in ? state->in->name : nulldevname;
+ outdev = state->out ? state->out->name : nulldevname;
/* We handle fragments by dealing with the first fragment as
* if it was a normal packet. All other fragments are treated
* normally, except that they will NEVER match rules that ask
@@ -339,8 +341,8 @@ ip6t_do_table(struct sk_buff *skb,
* rule is also a fragment-specific rule, non-fragments won't
* match it. */
acpar.hotdrop = false;
- acpar.in = in;
- acpar.out = out;
+ acpar.in = state->in;
+ acpar.out = state->out;
acpar.family = NFPROTO_IPV6;
acpar.hooknum = hook;
@@ -390,7 +392,7 @@ ip6t_do_table(struct sk_buff *skb,
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
/* The packet is traced: log it */
if (unlikely(skb->nf_trace))
- trace_packet(skb, hook, in, out,
+ trace_packet(skb, hook, state->in, state->out,
table->name, private, e);
#endif
/* Standard target? */
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 544b0a9da1b5..12331efd49cf 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -83,7 +83,8 @@ static int reject_tg6_check(const struct xt_tgchk_param *par)
return -EINVAL;
} else if (rejinfo->with == IP6T_TCP_RESET) {
/* Must specify that it's a TCP packet */
- if (e->ipv6.proto != IPPROTO_TCP ||
+ if (!(e->ipv6.flags & IP6T_F_PROTO) ||
+ e->ipv6.proto != IPPROTO_TCP ||
(e->ipv6.invflags & XT_INV_PROTO)) {
pr_info("TCP_RESET illegal for non-tcp\n");
return -EINVAL;
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index a0d17270117c..6edb7b106de7 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -315,11 +315,9 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *nhs)
{
- struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+ struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
struct nf_conn_synproxy *synproxy;
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index ca7f6c128086..5c33d8abc077 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -33,13 +33,11 @@ static const struct xt_table packet_filter = {
/* The work comes in here from netfilter.c. */
static unsigned int
ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- const struct net *net = dev_net((in != NULL) ? in : out);
+ const struct net *net = dev_net(state->in ? state->in : state->out);
- return ip6t_do_table(skb, ops->hooknum, in, out,
- net->ipv6.ip6table_filter);
+ return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_filter);
}
static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 307bbb782d14..b551f5b79fe2 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -32,7 +32,7 @@ static const struct xt_table packet_mangler = {
};
static unsigned int
-ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
+ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
{
unsigned int ret;
struct in6_addr saddr, daddr;
@@ -57,8 +57,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
/* flowlabel and prio (includes version, which shouldn't change either */
flowlabel = *((u_int32_t *)ipv6_hdr(skb));
- ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
- dev_net(out)->ipv6.ip6table_mangle);
+ ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, state,
+ dev_net(state->out)->ipv6.ip6table_mangle);
if (ret != NF_DROP && ret != NF_STOLEN &&
(!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
@@ -77,17 +77,16 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
/* The work comes in here from netfilter.c. */
static unsigned int
ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
if (ops->hooknum == NF_INET_LOCAL_OUT)
- return ip6t_mangle_out(skb, out);
+ return ip6t_mangle_out(skb, state);
if (ops->hooknum == NF_INET_POST_ROUTING)
- return ip6t_do_table(skb, ops->hooknum, in, out,
- dev_net(out)->ipv6.ip6table_mangle);
+ return ip6t_do_table(skb, ops->hooknum, state,
+ dev_net(state->out)->ipv6.ip6table_mangle);
/* INPUT/FORWARD */
- return ip6t_do_table(skb, ops->hooknum, in, out,
- dev_net(in)->ipv6.ip6table_mangle);
+ return ip6t_do_table(skb, ops->hooknum, state,
+ dev_net(state->in)->ipv6.ip6table_mangle);
}
static struct nf_hook_ops *mangle_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index b0634ac996b7..c3a7f7af0ed4 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -32,49 +32,40 @@ static const struct xt_table nf_nat_ipv6_table = {
static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- return ip6t_do_table(skb, ops->hooknum, in, out, net->ipv6.ip6table_nat);
+ return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_nat);
}
static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv6_fn(ops, skb, in, out, ip6table_nat_do_chain);
+ return nf_nat_ipv6_fn(ops, skb, state, ip6table_nat_do_chain);
}
static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv6_in(ops, skb, in, out, ip6table_nat_do_chain);
+ return nf_nat_ipv6_in(ops, skb, state, ip6table_nat_do_chain);
}
static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv6_out(ops, skb, in, out, ip6table_nat_do_chain);
+ return nf_nat_ipv6_out(ops, skb, state, ip6table_nat_do_chain);
}
static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv6_local_fn(ops, skb, in, out, ip6table_nat_do_chain);
+ return nf_nat_ipv6_local_fn(ops, skb, state, ip6table_nat_do_chain);
}
static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 5274740acecc..0b33caad2b69 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -20,13 +20,11 @@ static const struct xt_table packet_raw = {
/* The work comes in here from netfilter.c. */
static unsigned int
ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- const struct net *net = dev_net((in != NULL) ? in : out);
+ const struct net *net = dev_net(state->in ? state->in : state->out);
- return ip6t_do_table(skb, ops->hooknum, in, out,
- net->ipv6.ip6table_raw);
+ return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_raw);
}
static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index ab3b0219ecfa..fcef83c25f7b 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -37,13 +37,11 @@ static const struct xt_table security_table = {
static unsigned int
ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- const struct net *net = dev_net((in != NULL) ? in : out);
+ const struct net *net = dev_net(state->in ? state->in : state->out);
- return ip6t_do_table(skb, ops->hooknum, in, out,
+ return ip6t_do_table(skb, ops->hooknum, state,
net->ipv6.ip6table_security);
}
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index b68d0e59c1f8..4ba0c34c627b 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -97,9 +97,7 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct nf_conn *ct;
const struct nf_conn_help *help;
@@ -135,9 +133,7 @@ static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
static unsigned int ipv6_confirm(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -171,25 +167,21 @@ out:
static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_conntrack_in(dev_net(in), PF_INET6, ops->hooknum, skb);
+ return nf_conntrack_in(dev_net(state->in), PF_INET6, ops->hooknum, skb);
}
static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct ipv6hdr)) {
net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
return NF_ACCEPT;
}
- return nf_conntrack_in(dev_net(out), PF_INET6, ops->hooknum, skb);
+ return nf_conntrack_in(dev_net(state->out), PF_INET6, ops->hooknum, skb);
}
static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
@@ -290,10 +282,8 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
- if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
- &tuple->src.u3.ip6) ||
- nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
- &tuple->dst.u3.ip6))
+ if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
+ nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
goto nla_put_failure;
return 0;
@@ -312,10 +302,8 @@ static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST])
return -EINVAL;
- memcpy(&t->src.u3.ip6, nla_data(tb[CTA_IP_V6_SRC]),
- sizeof(u_int32_t) * 4);
- memcpy(&t->dst.u3.ip6, nla_data(tb[CTA_IP_V6_DST]),
- sizeof(u_int32_t) * 4);
+ t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
+ t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
return 0;
}
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index e70382e4dfb5..a45db0b4785c 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -54,9 +54,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct sk_buff *reasm;
@@ -77,9 +75,9 @@ static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
nf_ct_frag6_consume_orig(reasm);
- NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, reasm,
- (struct net_device *) in, (struct net_device *) out,
- okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+ NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, state->sk, reasm,
+ state->in, state->out,
+ state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
return NF_STOLEN;
}
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index ddf07e6f59d7..8dd869642f45 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -5,8 +5,10 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
@@ -27,7 +29,7 @@ static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
- .level = 5,
+ .level = LOGLEVEL_NOTICE,
.logflags = NF_LOG_MASK,
},
},
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index c5812e1c1ffb..e76900e0aa92 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -263,11 +263,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
unsigned int
nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct))
{
struct nf_conn *ct;
@@ -318,7 +317,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
if (!nf_nat_initialized(ct, maniptype)) {
unsigned int ret;
- ret = do_chain(ops, skb, in, out, ct);
+ ret = do_chain(ops, skb, state, ct);
if (ret != NF_ACCEPT)
return ret;
@@ -332,7 +331,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
pr_debug("Already setup manip %s for ct %p\n",
maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
ct);
- if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
goto oif_changed;
}
break;
@@ -341,7 +340,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
/* ESTABLISHED */
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
ctinfo == IP_CT_ESTABLISHED_REPLY);
- if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
goto oif_changed;
}
@@ -355,17 +354,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn);
unsigned int
nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct))
{
unsigned int ret;
struct in6_addr daddr = ipv6_hdr(skb)->daddr;
- ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+ ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
if (ret != NF_DROP && ret != NF_STOLEN &&
ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
skb_dst_drop(skb);
@@ -376,11 +374,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_in);
unsigned int
nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct))
{
#ifdef CONFIG_XFRM
@@ -394,7 +391,7 @@ nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
if (skb->len < sizeof(struct ipv6hdr))
return NF_ACCEPT;
- ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+ ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
#ifdef CONFIG_XFRM
if (ret != NF_DROP && ret != NF_STOLEN &&
!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -418,11 +415,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_out);
unsigned int
nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct))
{
const struct nf_conn *ct;
@@ -434,7 +430,7 @@ nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
if (skb->len < sizeof(struct ipv6hdr))
return NF_ACCEPT;
- ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+ ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index d05b36440e8b..94b4c6dfb400 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -13,6 +13,7 @@
#include <net/ip6_checksum.h>
#include <net/netfilter/ipv6/nf_reject.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_bridge.h>
#include <net/netfilter/ipv6/nf_reject.h>
const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
@@ -65,7 +66,7 @@ EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get);
struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
- __be16 protocol, int hoplimit)
+ __u8 protocol, int hoplimit)
{
struct ipv6hdr *ip6h;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
@@ -195,7 +196,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
*/
if (oldskb->nf_bridge) {
struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = oldskb->nf_bridge->physindev;
+
+ nskb->dev = nf_bridge_get_physindev(oldskb);
nskb->protocol = htons(ETH_P_IPV6);
ip6h->payload_len = htons(sizeof(struct tcphdr));
if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
@@ -208,4 +210,39 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
}
EXPORT_SYMBOL_GPL(nf_send_reset6);
+static bool reject6_csum_ok(struct sk_buff *skb, int hook)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ int thoff;
+ __be16 fo;
+ u8 proto;
+
+ if (skb->csum_bad)
+ return false;
+
+ if (skb_csum_unnecessary(skb))
+ return true;
+
+ proto = ip6h->nexthdr;
+ thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
+
+ if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
+ return false;
+
+ return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
+}
+
+void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+ unsigned char code, unsigned int hooknum)
+{
+ if (!reject6_csum_ok(skb_in, hooknum))
+ return;
+
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+}
+EXPORT_SYMBOL_GPL(nf_send_unreach6);
+
MODULE_LICENSE("GPL");
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
index 0d812b31277d..c8148ba76d1a 100644
--- a/net/ipv6/netfilter/nf_tables_ipv6.c
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -18,14 +18,12 @@
static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct nft_pktinfo pkt;
/* malformed packet, drop it */
- if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+ if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
return NF_DROP;
return nft_do_chain(&pkt, ops);
@@ -33,9 +31,7 @@ static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
if (net_ratelimit())
@@ -44,7 +40,7 @@ static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
return NF_ACCEPT;
}
- return nft_do_chain_ipv6(ops, skb, in, out, okfn);
+ return nft_do_chain_ipv6(ops, skb, state);
}
struct nft_af_info nft_af_ipv6 __read_mostly = {
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 1c4b75dd425b..951bb458b7bd 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -26,51 +26,42 @@
static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct)
{
struct nft_pktinfo pkt;
- nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+ nft_set_pktinfo_ipv6(&pkt, ops, skb, state);
return nft_do_chain(&pkt, ops);
}
static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv6_fn(ops, skb, in, out, nft_nat_do_chain);
+ return nf_nat_ipv6_fn(ops, skb, state, nft_nat_do_chain);
}
static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv6_in(ops, skb, in, out, nft_nat_do_chain);
+ return nf_nat_ipv6_in(ops, skb, state, nft_nat_do_chain);
}
static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv6_out(ops, skb, in, out, nft_nat_do_chain);
+ return nf_nat_ipv6_out(ops, skb, state, nft_nat_do_chain);
}
static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return nf_nat_ipv6_local_fn(ops, skb, in, out, nft_nat_do_chain);
+ return nf_nat_ipv6_local_fn(ops, skb, state, nft_nat_do_chain);
}
static const struct nf_chain_type nft_chain_nat_ipv6 = {
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index 42031299585e..0dafdaac5e17 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -24,9 +24,7 @@
static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
unsigned int ret;
struct nft_pktinfo pkt;
@@ -35,7 +33,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
u32 mark, flowlabel;
/* malformed packet, drop it */
- if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+ if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
return NF_DROP;
/* save source/dest address, mark, hoplimit, flowlabel, priority */
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index 529c119cbb14..cd1ac1637a05 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -18,19 +18,16 @@
#include <net/netfilter/ipv6/nf_nat_masquerade.h>
static void nft_masq_ipv6_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_masq *priv = nft_expr_priv(expr);
struct nf_nat_range range;
- unsigned int verdict;
memset(&range, 0, sizeof(range));
range.flags = priv->flags;
- verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out);
-
- data[NFT_REG_VERDICT].verdict = verdict;
+ regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out);
}
static struct nft_expr_type nft_masq_ipv6_type;
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index 11820b6b3613..effd393bd517 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -18,26 +18,25 @@
#include <net/netfilter/nf_nat_redirect.h>
static void nft_redir_ipv6_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_redir *priv = nft_expr_priv(expr);
struct nf_nat_range range;
- unsigned int verdict;
memset(&range, 0, sizeof(range));
if (priv->sreg_proto_min) {
range.min_proto.all =
- *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ *(__be16 *)&regs->data[priv->sreg_proto_min],
range.max_proto.all =
- *(__be16 *)&data[priv->sreg_proto_max].data[0];
+ *(__be16 *)&regs->data[priv->sreg_proto_max],
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
range.flags |= priv->flags;
- verdict = nf_nat_redirect_ipv6(pkt->skb, &range, pkt->ops->hooknum);
- data[NFT_REG_VERDICT].verdict = verdict;
+ regs->verdict.code = nf_nat_redirect_ipv6(pkt->skb, &range,
+ pkt->ops->hooknum);
}
static struct nft_expr_type nft_redir_ipv6_type;
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
index f73285924144..d0d1540ecf87 100644
--- a/net/ipv6/netfilter/nft_reject_ipv6.c
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -20,7 +20,7 @@
#include <net/netfilter/ipv6/nf_reject.h>
static void nft_reject_ipv6_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
@@ -34,9 +34,11 @@ static void nft_reject_ipv6_eval(const struct nft_expr *expr,
case NFT_REJECT_TCP_RST:
nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
break;
+ default:
+ break;
}
- data[NFT_REG_VERDICT].verdict = NF_DROP;
+ regs->verdict.code = NF_DROP;
}
static struct nft_expr_type nft_reject_ipv6_type;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 74581f706c4d..85892af57364 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -9,13 +9,14 @@
#include <net/addrconf.h>
#include <net/secure_seq.h>
-static u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
- struct in6_addr *src)
+static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
+ struct in6_addr *dst, struct in6_addr *src)
{
u32 hash, id;
hash = __ipv6_addr_jhash(dst, hashrnd);
hash = __ipv6_addr_jhash(src, hash);
+ hash ^= net_hash_mix(net);
/* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
* set the hight order instead thus minimizing possible future
@@ -36,7 +37,7 @@ static u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
*
* The network header must be set before calling this.
*/
-void ipv6_proxy_select_ident(struct sk_buff *skb)
+void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
{
static u32 ip6_proxy_idents_hashrnd __read_mostly;
struct in6_addr buf[2];
@@ -53,20 +54,21 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
net_get_random_once(&ip6_proxy_idents_hashrnd,
sizeof(ip6_proxy_idents_hashrnd));
- id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
+ id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
&addrs[1], &addrs[0]);
skb_shinfo(skb)->ip6_frag_id = htonl(id);
}
EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
+ struct rt6_info *rt)
{
static u32 ip6_idents_hashrnd __read_mostly;
u32 id;
net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
- id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
+ id = __ipv6_select_ident(net, ip6_idents_hashrnd, &rt->rt6i_dst.addr,
&rt->rt6i_src.addr);
fhdr->identification = htonl(id);
}
@@ -134,7 +136,7 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
EXPORT_SYMBOL(ip6_dst_hoplimit);
#endif
-int __ip6_local_out(struct sk_buff *skb)
+static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
{
int len;
@@ -144,19 +146,30 @@ int __ip6_local_out(struct sk_buff *skb)
ipv6_hdr(skb)->payload_len = htons(len);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
- return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
- skb_dst(skb)->dev, dst_output);
+ return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+ NULL, skb_dst(skb)->dev, dst_output_sk);
+}
+
+int __ip6_local_out(struct sk_buff *skb)
+{
+ return __ip6_local_out_sk(skb->sk, skb);
}
EXPORT_SYMBOL_GPL(__ip6_local_out);
-int ip6_local_out(struct sk_buff *skb)
+int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
{
int err;
- err = __ip6_local_out(skb);
+ err = __ip6_local_out_sk(sk, skb);
if (likely(err == 1))
- err = dst_output(skb);
+ err = dst_output_sk(sk, skb);
return err;
}
+EXPORT_SYMBOL_GPL(ip6_local_out_sk);
+
+int ip6_local_out(struct sk_buff *skb)
+{
+ return ip6_local_out_sk(skb->sk, skb);
+}
EXPORT_SYMBOL_GPL(ip6_local_out);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index a2dfff6ff227..263a5164a6f5 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -77,8 +77,7 @@ static int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
return 0;
}
-int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len)
+int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index dae7f1a1e464..8072bd4139b7 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -32,7 +32,7 @@
#include <linux/netfilter_ipv6.h>
#include <linux/skbuff.h>
#include <linux/compat.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/net_namespace.h>
@@ -172,7 +172,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
- if (sk == NULL)
+ if (!sk)
goto out;
net = dev_net(skb->dev);
@@ -367,7 +367,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
- if (sk != NULL) {
+ if (sk) {
/* Note: ipv6_hdr(skb) != skb->data */
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
saddr = &ip6h->saddr;
@@ -456,9 +456,8 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
* we return it, otherwise we block.
*/
-static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len,
- int noblock, int flags, int *addr_len)
+static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
@@ -631,7 +630,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb = sock_alloc_send_skb(sk,
length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
- if (skb == NULL)
+ if (!skb)
goto error;
skb_reserve(skb, hlen);
@@ -653,8 +652,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
goto error_fault;
IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
- err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
- rt->dst.dev, dst_output);
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
+ NULL, rt->dst.dev, dst_output_sk);
if (err > 0)
err = net_xmit_errno(err);
if (err)
@@ -730,8 +729,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
}
-static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len)
+static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
@@ -791,7 +789,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
- if (flowlabel == NULL)
+ if (!flowlabel)
return -EINVAL;
}
}
@@ -833,13 +831,13 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
- if (flowlabel == NULL)
+ if (!flowlabel)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
- if (opt == NULL)
+ if (!opt)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
@@ -1132,7 +1130,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
- if (skb != NULL)
+ if (skb)
amount = skb_tail_pointer(skb) -
skb_transport_header(skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index d7d70e69973b..8ffa2c8cce77 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -430,7 +430,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
int i, plen = 0;
clone = alloc_skb(0, GFP_ATOMIC);
- if (clone == NULL)
+ if (!clone)
goto out_oom;
clone->next = head->next;
head->next = clone;
@@ -552,7 +552,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
ip6_frag_ecn(hdr));
- if (fq != NULL) {
+ if (fq) {
int ret;
spin_lock(&fq->q.lock);
@@ -632,7 +632,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
table = ip6_frags_ns_ctl_table;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
- if (table == NULL)
+ if (!table)
goto err_alloc;
table[0].data = &net->ipv6.frags.high_thresh;
@@ -648,7 +648,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
}
hdr = register_net_sysctl(net, "net/ipv6", table);
- if (hdr == NULL)
+ if (!hdr)
goto err_reg;
net->ipv6.sysctl.frags_hdr = hdr;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4688bd4d7f59..d3588885f097 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -194,7 +194,6 @@ static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
static struct dst_ops ip6_dst_ops_template = {
.family = AF_INET6,
- .protocol = cpu_to_be16(ETH_P_IPV6),
.gc = ip6_dst_gc,
.gc_thresh = 1024,
.check = ip6_dst_check,
@@ -236,7 +235,6 @@ static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
static struct dst_ops ip6_dst_blackhole_ops = {
.family = AF_INET6,
- .protocol = cpu_to_be16(ETH_P_IPV6),
.destroy = ip6_dst_destroy,
.check = ip6_dst_check,
.mtu = ip6_blackhole_mtu,
@@ -1478,7 +1476,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
int remaining;
u32 *mp;
- if (cfg->fc_mx == NULL)
+ if (!cfg->fc_mx)
return 0;
mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
@@ -2247,9 +2245,10 @@ int ip6_route_get_saddr(struct net *net,
unsigned int prefs,
struct in6_addr *saddr)
{
- struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
+ struct inet6_dev *idev =
+ rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
int err = 0;
- if (rt->rt6i_prefsrc.plen)
+ if (rt && rt->rt6i_prefsrc.plen)
*saddr = rt->rt6i_prefsrc.addr;
else
err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
@@ -2400,6 +2399,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_PRIORITY] = { .type = NLA_U32 },
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
+ [RTA_PREF] = { .type = NLA_U8 },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2407,6 +2407,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
{
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
+ unsigned int pref;
int err;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
@@ -2438,7 +2439,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
if (tb[RTA_GATEWAY]) {
- nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
+ cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
cfg->fc_flags |= RTF_GATEWAY;
}
@@ -2461,7 +2462,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
}
if (tb[RTA_PREFSRC])
- nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
+ cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
if (tb[RTA_OIF])
cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
@@ -2482,6 +2483,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
}
+ if (tb[RTA_PREF]) {
+ pref = nla_get_u8(tb[RTA_PREF]);
+ if (pref != ICMPV6_ROUTER_PREF_LOW &&
+ pref != ICMPV6_ROUTER_PREF_HIGH)
+ pref = ICMPV6_ROUTER_PREF_MEDIUM;
+ cfg->fc_flags |= RTF_PREF(pref);
+ }
+
err = 0;
errout:
return err;
@@ -2511,7 +2520,7 @@ beginning:
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
- nla_memcpy(&r_cfg.fc_gateway, nla, 16);
+ r_cfg.fc_gateway = nla_get_in6_addr(nla);
r_cfg.fc_flags |= RTF_GATEWAY;
}
}
@@ -2585,7 +2594,8 @@ static inline size_t rt6_nlmsg_size(void)
+ nla_total_size(4) /* RTA_PRIORITY */
+ RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
+ nla_total_size(sizeof(struct rta_cacheinfo))
- + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
+ + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
+ + nla_total_size(1); /* RTA_PREF */
}
static int rt6_fill_node(struct net *net,
@@ -2660,19 +2670,19 @@ static int rt6_fill_node(struct net *net,
rtm->rtm_flags |= RTM_F_CLONED;
if (dst) {
- if (nla_put(skb, RTA_DST, 16, dst))
+ if (nla_put_in6_addr(skb, RTA_DST, dst))
goto nla_put_failure;
rtm->rtm_dst_len = 128;
} else if (rtm->rtm_dst_len)
- if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
+ if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
goto nla_put_failure;
#ifdef CONFIG_IPV6_SUBTREES
if (src) {
- if (nla_put(skb, RTA_SRC, 16, src))
+ if (nla_put_in6_addr(skb, RTA_SRC, src))
goto nla_put_failure;
rtm->rtm_src_len = 128;
} else if (rtm->rtm_src_len &&
- nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
+ nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
goto nla_put_failure;
#endif
if (iif) {
@@ -2696,14 +2706,14 @@ static int rt6_fill_node(struct net *net,
} else if (dst) {
struct in6_addr saddr_buf;
if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
- nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+ nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
goto nla_put_failure;
}
if (rt->rt6i_prefsrc.plen) {
struct in6_addr saddr_buf;
saddr_buf = rt->rt6i_prefsrc.addr;
- if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+ if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
goto nla_put_failure;
}
@@ -2711,7 +2721,7 @@ static int rt6_fill_node(struct net *net,
goto nla_put_failure;
if (rt->rt6i_flags & RTF_GATEWAY) {
- if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
+ if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
goto nla_put_failure;
}
@@ -2726,6 +2736,9 @@ static int rt6_fill_node(struct net *net,
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
goto nla_put_failure;
+ if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e4cbd5798eba..ac35a28599be 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -118,7 +118,7 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
return t;
}
t = rcu_dereference(sitn->tunnels_wc[0]);
- if ((t != NULL) && (t->dev->flags & IFF_UP))
+ if (t && (t->dev->flags & IFF_UP))
return t;
return NULL;
}
@@ -251,7 +251,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ipip6_tunnel_setup);
- if (dev == NULL)
+ if (!dev)
return NULL;
dev_net_set(dev, net);
@@ -555,7 +555,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
skb->dev,
iph->daddr,
iph->saddr);
- if (t == NULL)
+ if (!t)
goto out;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
@@ -671,7 +671,7 @@ static int ipip6_rcv(struct sk_buff *skb)
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr);
- if (tunnel != NULL) {
+ if (tunnel) {
struct pcpu_sw_netstats *tstats;
if (tunnel->parms.iph.protocol != IPPROTO_IPV6 &&
@@ -733,7 +733,7 @@ static int ipip_rcv(struct sk_buff *skb)
iph = ip_hdr(skb);
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr);
- if (tunnel != NULL) {
+ if (tunnel) {
if (tunnel->parms.iph.protocol != IPPROTO_IPIP &&
tunnel->parms.iph.protocol != 0)
goto drop;
@@ -838,7 +838,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (skb_dst(skb))
neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
- if (neigh == NULL) {
+ if (!neigh) {
net_dbg_ratelimited("nexthop == NULL\n");
goto tx_error;
}
@@ -867,7 +867,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (skb_dst(skb))
neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
- if (neigh == NULL) {
+ if (!neigh) {
net_dbg_ratelimited("nexthop == NULL\n");
goto tx_error;
}
@@ -983,7 +983,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
- err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr,
+ err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr,
protocol, tos, ttl, df,
!net_eq(tunnel->net, dev_net(dev)));
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
@@ -1076,7 +1076,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
}
- dev->iflink = tunnel->parms.link;
}
static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
@@ -1158,7 +1157,7 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
t = ipip6_tunnel_locate(net, &p, 0);
- if (t == NULL)
+ if (!t)
t = netdev_priv(dev);
}
@@ -1206,7 +1205,7 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
- if (t != NULL) {
+ if (t) {
if (t->dev != dev) {
err = -EEXIST;
break;
@@ -1242,7 +1241,7 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
goto done;
err = -ENOENT;
t = ipip6_tunnel_locate(net, &p, 0);
- if (t == NULL)
+ if (!t)
goto done;
err = -EPERM;
if (t == netdev_priv(sitn->fb_tunnel_dev))
@@ -1336,6 +1335,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_do_ioctl = ipip6_tunnel_ioctl,
.ndo_change_mtu = ipip6_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip_tunnel_get_iflink,
};
static void ipip6_dev_free(struct net_device *dev)
@@ -1366,7 +1366,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->mtu = ETH_DATA_LEN - t_hlen;
dev->flags = IFF_NOARP;
netif_keep_dst(dev);
- dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_LLTX;
dev->features |= SIT_FEATURES;
@@ -1530,8 +1529,7 @@ static bool ipip6_netlink_6rd_parms(struct nlattr *data[],
if (data[IFLA_IPTUN_6RD_PREFIX]) {
ret = true;
- nla_memcpy(&ip6rd->prefix, data[IFLA_IPTUN_6RD_PREFIX],
- sizeof(struct in6_addr));
+ ip6rd->prefix = nla_get_in6_addr(data[IFLA_IPTUN_6RD_PREFIX]);
}
if (data[IFLA_IPTUN_6RD_RELAY_PREFIX]) {
@@ -1683,8 +1681,8 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_parm *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
- nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
- nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
+ nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
+ nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
@@ -1694,10 +1692,10 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
#ifdef CONFIG_IPV6_SIT_6RD
- if (nla_put(skb, IFLA_IPTUN_6RD_PREFIX, sizeof(struct in6_addr),
- &tunnel->ip6rd.prefix) ||
- nla_put_be32(skb, IFLA_IPTUN_6RD_RELAY_PREFIX,
- tunnel->ip6rd.relay_prefix) ||
+ if (nla_put_in6_addr(skb, IFLA_IPTUN_6RD_PREFIX,
+ &tunnel->ip6rd.prefix) ||
+ nla_put_in_addr(skb, IFLA_IPTUN_6RD_RELAY_PREFIX,
+ tunnel->ip6rd.relay_prefix) ||
nla_put_u16(skb, IFLA_IPTUN_6RD_PREFIXLEN,
tunnel->ip6rd.prefixlen) ||
nla_put_u16(skb, IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
@@ -1795,7 +1793,7 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
struct ip_tunnel *t;
t = rtnl_dereference(sitn->tunnels[prio][h]);
- while (t != NULL) {
+ while (t) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7337fc7947e2..21bc2eb53c57 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -49,11 +49,12 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct sock *child;
child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
- if (child)
+ if (child) {
+ atomic_set(&req->rsk_refcnt, 1);
inet_csk_reqsk_queue_add(sk, req, child);
- else
+ } else {
reqsk_free(req);
-
+ }
return child;
}
@@ -189,13 +190,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out;
ret = NULL;
- req = inet_reqsk_alloc(&tcp6_request_sock_ops);
+ req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk);
if (!req)
goto out;
ireq = inet_rsk(req);
treq = tcp_rsk(req);
- treq->listener = NULL;
+ treq->tfo_listener = false;
if (security_inet_conn_request(sk, skb, req))
goto out_free;
@@ -220,7 +221,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->ir_mark = inet_request_mark(sk, skb);
- req->expires = 0UL;
req->num_retrans = 0;
ireq->snd_wscale = tcp_opt.snd_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index c5c10fafcfe2..abcc79f649b3 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -54,6 +54,20 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "idgen_retries",
+ .data = &init_net.ipv6.sysctl.idgen_retries,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "idgen_delay",
+ .data = &init_net.ipv6.sysctl.idgen_delay,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
{ }
};
@@ -93,6 +107,8 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
+ ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
+ ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
ipv6_route_table = ipv6_route_sysctl_init(net);
if (!ipv6_route_table)
@@ -163,7 +179,7 @@ int ipv6_sysctl_register(void)
int err = -ENOMEM;
ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable);
- if (ip6_header == NULL)
+ if (!ip6_header)
goto out;
err = register_pernet_subsys(&ipv6_sysctl_net_ops);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1f5e62229aaa..b6575d665568 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -104,19 +104,6 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
}
}
-static void tcp_v6_hash(struct sock *sk)
-{
- if (sk->sk_state != TCP_CLOSE) {
- if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
- tcp_prot.hash(sk);
- return;
- }
- local_bh_disable();
- __inet6_hash(sk, NULL);
- local_bh_enable();
- }
-}
-
static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
{
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
@@ -154,7 +141,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
struct ip6_flowlabel *flowlabel;
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
- if (flowlabel == NULL)
+ if (!flowlabel)
return -EINVAL;
fl6_sock_release(flowlabel);
}
@@ -233,11 +220,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tp->af_specific = &tcp_sock_ipv6_specific;
#endif
goto failure;
- } else {
- ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
- &sk->sk_v6_rcv_saddr);
}
+ np->saddr = sk->sk_v6_rcv_saddr;
return err;
}
@@ -263,7 +247,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
goto failure;
}
- if (saddr == NULL) {
+ if (!saddr) {
saddr = &fl6.saddr;
sk->sk_v6_rcv_saddr = *saddr;
}
@@ -340,18 +324,20 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
+ struct net *net = dev_net(skb->dev);
+ struct request_sock *fastopen;
struct ipv6_pinfo *np;
- struct sock *sk;
- int err;
struct tcp_sock *tp;
- struct request_sock *fastopen;
__u32 seq, snd_una;
- struct net *net = dev_net(skb->dev);
+ struct sock *sk;
+ int err;
- sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
- th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
+ sk = __inet6_lookup_established(net, &tcp_hashinfo,
+ &hdr->daddr, th->dest,
+ &hdr->saddr, ntohs(th->source),
+ skb->dev->ifindex);
- if (sk == NULL) {
+ if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
@@ -361,6 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
inet_twsk_put(inet_twsk(sk));
return;
}
+ seq = ntohl(th->seq);
+ if (sk->sk_state == TCP_NEW_SYN_RECV)
+ return tcp_req_err(sk, seq);
bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -375,7 +364,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
tp = tcp_sk(sk);
- seq = ntohl(th->seq);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
fastopen = tp->fastopen_rsk;
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
@@ -419,37 +407,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
/* Might be for an request_sock */
switch (sk->sk_state) {
- struct request_sock *req, **prev;
- case TCP_LISTEN:
- if (sock_owned_by_user(sk))
- goto out;
-
- /* Note : We use inet6_iif() here, not tcp_v6_iif() */
- req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
- &hdr->saddr, inet6_iif(skb));
- if (!req)
- goto out;
-
- /* ICMPs are not backlogged, hence we cannot get
- * an established socket here.
- */
- WARN_ON(req->sk != NULL);
-
- if (seq != tcp_rsk(req)->snt_isn) {
- NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
- goto out;
- }
-
- inet_csk_reqsk_queue_drop(sk, req, prev);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
- goto out;
-
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
* is already accepted it is treated as a connected one below.
*/
- if (fastopen && fastopen->sk == NULL)
+ if (fastopen && !fastopen->sk)
break;
if (!sock_owned_by_user(sk)) {
@@ -497,7 +460,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
&ireq->ir_v6_rmt_addr);
fl6->daddr = ireq->ir_v6_rmt_addr;
- if (np->repflow && (ireq->pktopts != NULL))
+ if (np->repflow && ireq->pktopts)
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
skb_set_queue_mapping(skb, queue_mapping);
@@ -523,17 +486,11 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
}
static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
- struct sock *addr_sk)
+ const struct sock *addr_sk)
{
return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
}
-static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
- struct request_sock *req)
-{
- return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
-}
-
static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
int optlen)
{
@@ -619,9 +576,9 @@ clear_hash_noput:
return 1;
}
-static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
+static int tcp_v6_md5_hash_skb(char *md5_hash,
+ const struct tcp_md5sig_key *key,
const struct sock *sk,
- const struct request_sock *req,
const struct sk_buff *skb)
{
const struct in6_addr *saddr, *daddr;
@@ -629,12 +586,9 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
struct hash_desc *desc;
const struct tcphdr *th = tcp_hdr(skb);
- if (sk) {
- saddr = &inet6_sk(sk)->saddr;
+ if (sk) { /* valid for establish/request sockets */
+ saddr = &sk->sk_v6_rcv_saddr;
daddr = &sk->sk_v6_daddr;
- } else if (req) {
- saddr = &inet_rsk(req)->ir_v6_loc_addr;
- daddr = &inet_rsk(req)->ir_v6_rmt_addr;
} else {
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
saddr = &ip6h->saddr;
@@ -670,8 +624,7 @@ clear_hash_noput:
return 1;
}
-static int __tcp_v6_inbound_md5_hash(struct sock *sk,
- const struct sk_buff *skb)
+static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
@@ -685,44 +638,32 @@ static int __tcp_v6_inbound_md5_hash(struct sock *sk,
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
- return 0;
+ return false;
if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
- return 1;
+ return true;
}
if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
- return 1;
+ return true;
}
/* check the signature */
genhash = tcp_v6_md5_hash_skb(newhash,
hash_expected,
- NULL, NULL, skb);
+ NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
genhash ? "failed" : "mismatch",
&ip6h->saddr, ntohs(th->source),
&ip6h->daddr, ntohs(th->dest));
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-
-static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
-{
- int ret;
-
- rcu_read_lock();
- ret = __tcp_v6_inbound_md5_hash(sk, skb);
- rcu_read_unlock();
-
- return ret;
-}
-
#endif
static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
@@ -734,8 +675,6 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
- ireq->ir_iif = sk->sk_bound_dev_if;
-
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
@@ -774,7 +713,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
sizeof(struct ipv6hdr),
#ifdef CONFIG_TCP_MD5SIG
- .md5_lookup = tcp_v6_reqsk_md5_lookup,
+ .req_md5_lookup = tcp_v6_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb,
#endif
.init_req = tcp_v6_init_req,
@@ -811,7 +750,7 @@ static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
GFP_ATOMIC);
- if (buff == NULL)
+ if (!buff)
return;
skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
@@ -931,7 +870,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
if (!key)
goto release_sk1;
- genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
+ genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0)
goto release_sk1;
} else {
@@ -997,17 +936,20 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
{
- struct request_sock *req, **prev;
const struct tcphdr *th = tcp_hdr(skb);
+ struct request_sock *req;
struct sock *nsk;
/* Find possible connection requests. */
- req = inet6_csk_search_req(sk, &prev, th->source,
+ req = inet6_csk_search_req(sk, th->source,
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
- if (req)
- return tcp_check_req(sk, skb, req, prev, false);
-
+ if (req) {
+ nsk = tcp_check_req(sk, skb, req, false);
+ if (!nsk)
+ reqsk_put(req);
+ return nsk;
+ }
nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
&ipv6_hdr(skb)->saddr, th->source,
&ipv6_hdr(skb)->daddr, ntohs(th->dest),
@@ -1067,7 +1009,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
- if (newsk == NULL)
+ if (!newsk)
return NULL;
newtcp6sk = (struct tcp6_sock *)newsk;
@@ -1079,11 +1021,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
-
- ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
-
- newsk->sk_v6_rcv_saddr = newnp->saddr;
+ newnp->saddr = newsk->sk_v6_rcv_saddr;
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1128,7 +1066,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
}
newsk = tcp_create_openreq_child(sk, req, skb);
- if (newsk == NULL)
+ if (!newsk)
goto out_nonewsk;
/*
@@ -1170,7 +1108,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
- if (ireq->pktopts != NULL) {
+ if (ireq->pktopts) {
newnp->pktoptions = skb_clone(ireq->pktopts,
sk_gfp_atomic(sk, GFP_ATOMIC));
consume_skb(ireq->pktopts);
@@ -1215,7 +1153,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
- if (key != NULL) {
+ if (key) {
/* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
@@ -1232,7 +1170,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
tcp_done(newsk);
goto out;
}
- __inet6_hash(newsk, NULL);
+ __inet_hash(newsk, NULL);
return newsk;
@@ -1547,9 +1485,9 @@ do_time_wait:
&ipv6_hdr(skb)->saddr, th->source,
&ipv6_hdr(skb)->daddr,
ntohs(th->dest), tcp_v6_iif(skb));
- if (sk2 != NULL) {
+ if (sk2) {
struct inet_timewait_sock *tw = inet_twsk(sk);
- inet_twsk_deschedule(tw, &tcp_death_row);
+ inet_twsk_deschedule(tw);
inet_twsk_put(tw);
sk = sk2;
tcp_v6_restore_cb(skb);
@@ -1595,7 +1533,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
if (sk) {
skb->sk = sk;
skb->destructor = sock_edemux;
- if (sk->sk_state != TCP_TIME_WAIT) {
+ if (sk_fullsock(sk)) {
struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
@@ -1700,9 +1638,9 @@ static void tcp_v6_destroy_sock(struct sock *sk)
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq,
- const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
+ struct request_sock *req, int i, kuid_t uid)
{
- int ttd = req->expires - jiffies;
+ long ttd = req->rsk_timer.expires - jiffies;
const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
@@ -1791,9 +1729,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
static void get_timewait6_sock(struct seq_file *seq,
struct inet_timewait_sock *tw, int i)
{
+ long delta = tw->tw_timer.expires - jiffies;
const struct in6_addr *dest, *src;
__u16 destp, srcp;
- s32 delta = tw->tw_ttd - inet_tw_time_stamp();
dest = &tw->tw_v6_daddr;
src = &tw->tw_v6_rcv_saddr;
@@ -1838,7 +1776,7 @@ static int tcp6_seq_show(struct seq_file *seq, void *v)
get_tcp6_sock(seq, v, st->num);
break;
case TCP_SEQ_STATE_OPENREQ:
- get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
+ get_openreq6(seq, v, st->num, st->uid);
break;
}
out:
@@ -1902,7 +1840,7 @@ struct proto tcpv6_prot = {
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v6_do_rcv,
.release_cb = tcp_release_cb,
- .hash = tcp_v6_hash,
+ .hash = inet_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index c1ab77105b4c..d883c9204c01 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -41,8 +41,8 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
return tcp_gro_complete(skb);
}
-struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
- netdev_features_t features)
+static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct tcphdr *th;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d048d46779fc..3477c919fcc8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -53,11 +53,11 @@
#include <trace/events/skb.h>
#include "udp_impl.h"
-static unsigned int udp6_ehashfn(struct net *net,
- const struct in6_addr *laddr,
- const u16 lport,
- const struct in6_addr *faddr,
- const __be16 fport)
+static u32 udp6_ehashfn(const struct net *net,
+ const struct in6_addr *laddr,
+ const u16 lport,
+ const struct in6_addr *faddr,
+ const __be16 fport)
{
static u32 udp6_ehash_secret __read_mostly;
static u32 udp_ipv6_hash_secret __read_mostly;
@@ -104,9 +104,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
return 0;
}
-static unsigned int udp6_portaddr_hash(struct net *net,
- const struct in6_addr *addr6,
- unsigned int port)
+static u32 udp6_portaddr_hash(const struct net *net,
+ const struct in6_addr *addr6,
+ unsigned int port)
{
unsigned int hash, mix = net_hash_mix(net);
@@ -120,7 +120,6 @@ static unsigned int udp6_portaddr_hash(struct net *net,
return hash ^ port;
}
-
int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
@@ -385,14 +384,12 @@ struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup);
-
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
*/
-int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len,
+int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -551,7 +548,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk = __udp6_lib_lookup(net, daddr, uh->dest,
saddr, uh->source, inet6_iif(skb), udptable);
- if (sk == NULL) {
+ if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
@@ -649,7 +646,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
- if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
+ if (skb->len > sizeof(struct udphdr) && encap_rcv) {
int ret;
/* Verify checksum before giving to encap */
@@ -750,7 +747,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
for (i = 0; i < count; i++) {
sk = stack[i];
- if (likely(skb1 == NULL))
+ if (likely(!skb1))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
atomic_inc(&sk->sk_drops);
@@ -900,7 +897,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
- if (sk != NULL) {
+ if (sk) {
int ret;
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
@@ -1101,8 +1098,7 @@ out:
return err;
}
-int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len)
+int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct udp_sock *up = udp_sk(sk);
@@ -1164,12 +1160,12 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
do_udp_sendmsg:
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
- return udp_sendmsg(iocb, sk, msg, len);
+ return udp_sendmsg(sk, msg, len);
}
}
if (up->pending == AF_INET)
- return udp_sendmsg(iocb, sk, msg, len);
+ return udp_sendmsg(sk, msg, len);
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
@@ -1209,7 +1205,7 @@ do_udp_sendmsg:
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
- if (flowlabel == NULL)
+ if (!flowlabel)
return -EINVAL;
}
}
@@ -1257,14 +1253,14 @@ do_udp_sendmsg:
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
- if (flowlabel == NULL)
+ if (!flowlabel)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
connected = 0;
}
- if (opt == NULL)
+ if (!opt)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
@@ -1557,7 +1553,6 @@ static struct inet_protosw udpv6_protosw = {
.flags = INET_PROTOSW_PERMANENT,
};
-
int __init udpv6_init(void)
{
int ret;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index c779c3c90b9d..0682c031ccdc 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -23,10 +23,9 @@ int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
#endif
-int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len);
-int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len);
+int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
+int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len);
int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
void udpv6_destroy_sock(struct sock *sk);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index be2c0ba82c85..7441e1e63893 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -54,7 +54,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
/* Set the IPv6 fragment id if not set yet */
if (!skb_shinfo(skb)->ip6_frag_id)
- ipv6_proxy_select_ident(skb);
+ ipv6_proxy_select_ident(dev_net(skb->dev), skb);
segs = NULL;
goto out;
@@ -113,7 +113,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
if (!skb_shinfo(skb)->ip6_frag_id)
- ipv6_proxy_select_ident(skb);
+ ipv6_proxy_select_ident(dev_net(skb->dev), skb);
fptr->identification = skb_shinfo(skb)->ip6_frag_id;
/* Fragment the skb. ipv6 header and the remaining fields of the
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index f48fbe4d16f5..74bd17882a2f 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -42,7 +42,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
ipv6_hdr(skb)->payload_len = htons(skb->len);
__skb_push(skb, skb->data - skb_network_header(skb));
- NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb,
+ skb->dev, NULL,
ip6_rcv_finish);
return -1;
}
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index 9949a356d62c..1e205c3253ac 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -95,8 +95,8 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
ip6h = ipv6_hdr(skb);
ip6h->payload_len = htons(skb->len - size);
- ip6h->daddr = *(struct in6_addr *)&x->sel.daddr.a6;
- ip6h->saddr = *(struct in6_addr *)&x->sel.saddr.a6;
+ ip6h->daddr = x->sel.daddr.in6;
+ ip6h->saddr = x->sel.saddr.in6;
err = 0;
out:
return err;
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 010f8bd2d577..09c76a7b474d 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -120,7 +120,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
}
EXPORT_SYMBOL(xfrm6_prepare_output);
-int xfrm6_output_finish(struct sk_buff *skb)
+int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
{
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
@@ -128,10 +128,10 @@ int xfrm6_output_finish(struct sk_buff *skb)
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
#endif
- return xfrm_output(skb);
+ return xfrm_output(sk, skb);
}
-static int __xfrm6_output(struct sk_buff *skb)
+static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
@@ -140,7 +140,7 @@ static int __xfrm6_output(struct sk_buff *skb)
#ifdef CONFIG_NETFILTER
if (!x) {
IP6CB(skb)->flags |= IP6SKB_REROUTED;
- return dst_output(skb);
+ return dst_output_sk(sk, skb);
}
#endif
@@ -160,14 +160,15 @@ static int __xfrm6_output(struct sk_buff *skb)
if (x->props.mode == XFRM_MODE_TUNNEL &&
((skb->len > mtu && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)))) {
- return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
+ return ip6_fragment(sk, skb,
+ x->outer_mode->afinfo->output_finish);
}
- return x->outer_mode->afinfo->output_finish(skb);
+ return x->outer_mode->afinfo->output_finish(sk, skb);
}
int xfrm6_output(struct sock *sk, struct sk_buff *skb)
{
- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
+ return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
NULL, skb_dst(skb)->dev, __xfrm6_output,
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8d2d01b4800a..f337a908a76a 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -61,9 +61,7 @@ static int xfrm6_get_saddr(struct net *net,
return -EHOSTUNREACH;
dev = ip6_dst_idev(dst)->dev;
- ipv6_dev_get_saddr(dev_net(dev), dev,
- (struct in6_addr *)&daddr->a6, 0,
- (struct in6_addr *)&saddr->a6);
+ ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
dst_release(dst);
return 0;
}
@@ -293,7 +291,6 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static struct dst_ops xfrm6_dst_ops = {
.family = AF_INET6,
- .protocol = cpu_to_be16(ETH_P_IPV6),
.gc = xfrm6_garbage_collect,
.update_pmtu = xfrm6_update_pmtu,
.redirect = xfrm6_redirect,
@@ -371,7 +368,7 @@ static void __net_exit xfrm6_net_exit(struct net *net)
{
struct ctl_table *table;
- if (net->ipv6.sysctl.xfrm6_hdr == NULL)
+ if (!net->ipv6.sysctl.xfrm6_hdr)
return;
table = net->ipv6.sysctl.xfrm6_hdr->ctl_table_arg;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index f11ad1d95e0e..4ea5d7497b5f 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1688,8 +1688,7 @@ out:
return rc;
}
-static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int ipx_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct ipx_sock *ipxs = ipx_sk(sk);
@@ -1754,8 +1753,8 @@ out:
}
-static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
struct ipx_sock *ipxs = ipx_sk(sk);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 568edc72d737..ee0ea25c8e7a 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1256,14 +1256,13 @@ static int irda_release(struct socket *sock)
}
/*
- * Function irda_sendmsg (iocb, sock, msg, len)
+ * Function irda_sendmsg (sock, msg, len)
*
* Send message down to TinyTP. This function is used for both STREAM and
* SEQPACK services. This is possible since it forces the client to
* fragment the message if necessary
*/
-static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int irda_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct irda_sock *self;
@@ -1348,13 +1347,13 @@ out:
}
/*
- * Function irda_recvmsg_dgram (iocb, sock, msg, size, flags)
+ * Function irda_recvmsg_dgram (sock, msg, size, flags)
*
* Try to receive message and copy it to user. The frame is discarded
* after being read, regardless of how much the user actually read
*/
-static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
@@ -1398,10 +1397,10 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
}
/*
- * Function irda_recvmsg_stream (iocb, sock, msg, size, flags)
+ * Function irda_recvmsg_stream (sock, msg, size, flags)
*/
-static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
@@ -1515,14 +1514,14 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
}
/*
- * Function irda_sendmsg_dgram (iocb, sock, msg, len)
+ * Function irda_sendmsg_dgram (sock, msg, len)
*
* Send message down to TinyTP for the unreliable sequenced
* packet service...
*
*/
-static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int irda_sendmsg_dgram(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct irda_sock *self;
@@ -1594,14 +1593,14 @@ out:
}
/*
- * Function irda_sendmsg_ultra (iocb, sock, msg, len)
+ * Function irda_sendmsg_ultra (sock, msg, len)
*
* Send message down to IrLMP for the unreliable Ultra
* packet service...
*/
#ifdef CONFIG_IRDA_ULTRA
-static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int irda_sendmsg_ultra(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct irda_sock *self;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 53d931172088..6daa52a18d40 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1026,8 +1026,8 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
(void *) prmdata, 8);
}
-static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1315,8 +1315,8 @@ static void iucv_process_message_q(struct sock *sk)
}
}
-static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f8ac939d52b4..f0d52d721b3a 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -709,7 +709,7 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
sin6->sin6_family = AF_INET6;
sin6->sin6_port = port;
sin6->sin6_flowinfo = 0;
- sin6->sin6_addr = *(struct in6_addr *)xaddr->a6;
+ sin6->sin6_addr = xaddr->in6;
sin6->sin6_scope_id = 0;
return 128;
}
@@ -3588,8 +3588,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
}
#endif
-static int pfkey_sendmsg(struct kiocb *kiocb,
- struct socket *sock, struct msghdr *msg, size_t len)
+static int pfkey_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sk_buff *skb = NULL;
@@ -3630,8 +3629,7 @@ out:
return err ? : len;
}
-static int pfkey_recvmsg(struct kiocb *kiocb,
- struct socket *sock, struct msghdr *msg, size_t len,
+static int pfkey_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 781b3a226ba7..4b552873b556 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -74,7 +74,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
priv->dev = dev;
eth_hw_addr_random(dev);
- memset(&dev->broadcast[0], 0xff, 6);
+ eth_broadcast_addr(dev->broadcast);
dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
return 0;
}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 05dfc8aa36af..79649937ec71 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -385,7 +385,7 @@ drop:
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
* control frames.
*/
-static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
+static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
int rc;
@@ -506,7 +506,7 @@ no_route:
goto out;
}
-static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 8611f1b63141..d1ded3777815 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -480,8 +480,7 @@ out:
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
* control frames.
*/
-static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len)
+static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
@@ -643,9 +642,8 @@ do_confirm:
goto done;
}
-static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
+static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index b4e923f77954..9e13c2ff8789 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -205,9 +205,9 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
#endif
if (info->attrs[L2TP_ATTR_IP_SADDR] &&
info->attrs[L2TP_ATTR_IP_DADDR]) {
- cfg.local_ip.s_addr = nla_get_be32(
+ cfg.local_ip.s_addr = nla_get_in_addr(
info->attrs[L2TP_ATTR_IP_SADDR]);
- cfg.peer_ip.s_addr = nla_get_be32(
+ cfg.peer_ip.s_addr = nla_get_in_addr(
info->attrs[L2TP_ATTR_IP_DADDR]);
} else {
ret = -EINVAL;
@@ -376,15 +376,17 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
case L2TP_ENCAPTYPE_IP:
#if IS_ENABLED(CONFIG_IPV6)
if (np) {
- if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
- &np->saddr) ||
- nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(sk->sk_v6_daddr),
- &sk->sk_v6_daddr))
+ if (nla_put_in6_addr(skb, L2TP_ATTR_IP6_SADDR,
+ &np->saddr) ||
+ nla_put_in6_addr(skb, L2TP_ATTR_IP6_DADDR,
+ &sk->sk_v6_daddr))
goto nla_put_failure;
} else
#endif
- if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
- nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
+ if (nla_put_in_addr(skb, L2TP_ATTR_IP_SADDR,
+ inet->inet_saddr) ||
+ nla_put_in_addr(skb, L2TP_ATTR_IP_DADDR,
+ inet->inet_daddr))
goto nla_put_failure;
break;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index cc7a828fc914..e9b0dec56b8e 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -185,9 +185,8 @@ static int pppol2tp_recv_payload_hook(struct sk_buff *skb)
/* Receive message. This is the recvmsg for the PPPoL2TP socket.
*/
-static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len,
- int flags)
+static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
{
int err;
struct sk_buff *skb;
@@ -295,7 +294,7 @@ static void pppol2tp_session_sock_put(struct l2tp_session *session)
* when a user application does a sendmsg() on the session socket. L2TP and
* PPP headers must be inserted into the user's data.
*/
-static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m,
size_t total_len)
{
static const unsigned char ppph[2] = { 0xff, 0x03 };
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 2c0b83ce43bd..17a8dff06090 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -704,8 +704,8 @@ out:
* Copy received data to the socket user.
* Returns non-negative upon success, negative otherwise.
*/
-static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
DECLARE_SOCKADDR(struct sockaddr_llc *, uaddr, msg->msg_name);
const int nonblock = flags & MSG_DONTWAIT;
@@ -878,8 +878,7 @@ copy_uaddr:
* Transmit data provided by the socket user.
* Returns non-negative upon success, negative otherwise.
*/
-static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 7869bb40acaa..208df7c0b6ea 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -85,11 +85,15 @@ struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
return tfm;
err = crypto_aead_setkey(tfm, key, key_len);
- if (!err)
- err = crypto_aead_setauthsize(tfm, mic_len);
- if (!err)
- return tfm;
+ if (err)
+ goto free_aead;
+ err = crypto_aead_setauthsize(tfm, mic_len);
+ if (err)
+ goto free_aead;
+
+ return tfm;
+free_aead:
crypto_free_aead(tfm);
return ERR_PTR(err);
}
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
index c2bf6698d738..fd278bbe1b0d 100644
--- a/net/mac80211/aes_gcm.c
+++ b/net/mac80211/aes_gcm.c
@@ -80,11 +80,15 @@ struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
return tfm;
err = crypto_aead_setkey(tfm, key, key_len);
- if (!err)
- err = crypto_aead_setauthsize(tfm, IEEE80211_GCMP_MIC_LEN);
- if (!err)
- return tfm;
+ if (err)
+ goto free_aead;
+ err = crypto_aead_setauthsize(tfm, IEEE80211_GCMP_MIC_LEN);
+ if (err)
+ goto free_aead;
+
+ return tfm;
+free_aead:
crypto_free_aead(tfm);
return ERR_PTR(err);
}
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
index 1c72edcb0083..f1321b7d6506 100644
--- a/net/mac80211/aes_gmac.c
+++ b/net/mac80211/aes_gmac.c
@@ -70,9 +70,9 @@ struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
err = crypto_aead_setkey(tfm, key, key_len);
if (!err)
- return tfm;
- if (!err)
err = crypto_aead_setauthsize(tfm, GMAC_MIC_LEN);
+ if (!err)
+ return tfm;
crypto_free_aead(tfm);
return ERR_PTR(err);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 7702978a4c99..5c564a68fb50 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -238,6 +238,14 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
int i, ret = -EOPNOTSUPP;
u16 status = WLAN_STATUS_REQUEST_DECLINED;
+ if (!sta->sta.ht_cap.ht_supported) {
+ ht_dbg(sta->sdata,
+ "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
+ sta->sta.addr, tid);
+ /* send a response anyway, it's an error case if we get here */
+ goto end_no_lock;
+ }
+
if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
ht_dbg(sta->sdata,
"Suspend in progress - Denying ADDBA request (%pM tid %d)\n",
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index a360c15cc978..cce9d425c718 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -188,6 +188,43 @@ ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
__release(agg_queue);
}
+static void
+ieee80211_agg_stop_txq(struct sta_info *sta, int tid)
+{
+ struct ieee80211_txq *txq = sta->sta.txq[tid];
+ struct txq_info *txqi;
+
+ if (!txq)
+ return;
+
+ txqi = to_txq_info(txq);
+
+ /* Lock here to protect against further seqno updates on dequeue */
+ spin_lock_bh(&txqi->queue.lock);
+ set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
+ spin_unlock_bh(&txqi->queue.lock);
+}
+
+static void
+ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
+{
+ struct ieee80211_txq *txq = sta->sta.txq[tid];
+ struct txq_info *txqi;
+
+ if (!txq)
+ return;
+
+ txqi = to_txq_info(txq);
+
+ if (enable)
+ set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
+ else
+ clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
+
+ clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
+ drv_wake_tx_queue(sta->sdata->local, txqi);
+}
+
/*
* splice packets from the STA's pending to the local pending,
* requires a call to ieee80211_agg_splice_finish later
@@ -247,6 +284,7 @@ static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
ieee80211_assign_tid_tx(sta, tid, NULL);
ieee80211_agg_splice_finish(sta->sdata, tid);
+ ieee80211_agg_start_txq(sta, tid, false);
kfree_rcu(tid_tx, rcu_head);
}
@@ -418,6 +456,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
*/
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+ ieee80211_agg_stop_txq(sta, tid);
+
/*
* Make sure no packets are being processed. This ensures that
* we have a valid starting sequence number and that in-flight
@@ -440,6 +480,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
ieee80211_agg_splice_finish(sdata, tid);
spin_unlock_bh(&sta->lock);
+ ieee80211_agg_start_txq(sta, tid, false);
+
kfree_rcu(tid_tx, rcu_head);
return;
}
@@ -509,11 +551,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
struct tid_ampdu_tx *tid_tx;
int ret = 0;
+ trace_api_start_tx_ba_session(pubsta, tid);
+
if (WARN(sta->reserved_tid == tid,
"Requested to start BA session on reserved tid=%d", tid))
return -EINVAL;
- trace_api_start_tx_ba_session(pubsta, tid);
+ if (!pubsta->ht_cap.ht_supported)
+ return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action))
return -EINVAL;
@@ -666,6 +711,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
ieee80211_agg_splice_finish(sta->sdata, tid);
spin_unlock_bh(&sta->lock);
+
+ ieee80211_agg_start_txq(sta, tid, true);
}
void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
@@ -793,6 +840,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
struct tid_ampdu_tx *tid_tx;
+ bool send_delba = false;
trace_api_stop_tx_ba_cb(sdata, ra, tid);
@@ -824,13 +872,17 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
}
if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
- ieee80211_send_delba(sta->sdata, ra, tid,
- WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
+ send_delba = true;
ieee80211_remove_tid_tx(sta, tid);
unlock_sta:
spin_unlock_bh(&sta->lock);
+
+ if (send_delba)
+ ieee80211_send_delba(sdata, ra, tid,
+ WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
+
mutex_unlock(&sta->ampdu_mlme.mtx);
unlock:
mutex_unlock(&local->sta_mtx);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index dd4ff36c557a..265e42721a66 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -24,6 +24,7 @@
static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params)
@@ -33,7 +34,7 @@ static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata;
int err;
- err = ieee80211_if_add(local, name, &wdev, type, params);
+ err = ieee80211_if_add(local, name, name_assign_type, &wdev, type, params);
if (err)
return ERR_PTR(err);
@@ -977,6 +978,14 @@ static int sta_apply_auth_flags(struct ieee80211_local *local,
if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) &&
set & BIT(NL80211_STA_FLAG_ASSOCIATED) &&
!test_sta_flag(sta, WLAN_STA_ASSOC)) {
+ /*
+ * When peer becomes associated, init rate control as
+ * well. Some drivers require rate control initialized
+ * before drv_sta_state() is called.
+ */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+ rate_control_rate_init(sta);
+
ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (ret)
return ret;
@@ -1050,6 +1059,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
}
}
+ if (mask & BIT(NL80211_STA_FLAG_WME) &&
+ local->hw.queues >= IEEE80211_NUM_ACS)
+ sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME);
+
/* auth flags will be set later for TDLS stations */
if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
ret = sta_apply_auth_flags(local, sta, mask, set);
@@ -1064,10 +1077,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
}
- if (mask & BIT(NL80211_STA_FLAG_WME))
- sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME);
-
if (mask & BIT(NL80211_STA_FLAG_MFP)) {
+ sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
if (set & BIT(NL80211_STA_FLAG_MFP))
set_sta_flag(sta, WLAN_STA_MFP);
else
@@ -1377,11 +1388,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
if (err)
goto out_err;
- /* When peer becomes authorized, init rate control as well */
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
- test_sta_flag(sta, WLAN_STA_AUTHORIZED))
- rate_control_rate_init(sta);
-
mutex_unlock(&local->sta_mtx);
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -1488,7 +1494,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
if (next_hop_sta)
memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN);
else
- memset(next_hop, 0, ETH_ALEN);
+ eth_zero_addr(next_hop);
memset(pinfo, 0, sizeof(*pinfo));
@@ -2273,7 +2279,6 @@ int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
{
struct sta_info *sta;
enum ieee80211_smps_mode old_req;
- int i;
if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP))
return -EINVAL;
@@ -2297,52 +2302,44 @@ int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
}
ht_dbg(sdata,
- "SMSP %d requested in AP mode, sending Action frame to %d stations\n",
+ "SMPS %d requested in AP mode, sending Action frame to %d stations\n",
smps_mode, atomic_read(&sdata->u.ap.num_mcast_sta));
mutex_lock(&sdata->local->sta_mtx);
- for (i = 0; i < STA_HASH_SIZE; i++) {
- for (sta = rcu_dereference_protected(sdata->local->sta_hash[i],
- lockdep_is_held(&sdata->local->sta_mtx));
- sta;
- sta = rcu_dereference_protected(sta->hnext,
- lockdep_is_held(&sdata->local->sta_mtx))) {
- /*
- * Only stations associated to our AP and
- * associated VLANs
- */
- if (sta->sdata->bss != &sdata->u.ap)
- continue;
+ list_for_each_entry(sta, &sdata->local->sta_list, list) {
+ /*
+ * Only stations associated to our AP and
+ * associated VLANs
+ */
+ if (sta->sdata->bss != &sdata->u.ap)
+ continue;
- /* This station doesn't support MIMO - skip it */
- if (sta_info_tx_streams(sta) == 1)
- continue;
+ /* This station doesn't support MIMO - skip it */
+ if (sta_info_tx_streams(sta) == 1)
+ continue;
- /*
- * Don't wake up a STA just to send the action frame
- * unless we are getting more restrictive.
- */
- if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
- !ieee80211_smps_is_restrictive(sta->known_smps_mode,
- smps_mode)) {
- ht_dbg(sdata,
- "Won't send SMPS to sleeping STA %pM\n",
- sta->sta.addr);
- continue;
- }
+ /*
+ * Don't wake up a STA just to send the action frame
+ * unless we are getting more restrictive.
+ */
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
+ !ieee80211_smps_is_restrictive(sta->known_smps_mode,
+ smps_mode)) {
+ ht_dbg(sdata, "Won't send SMPS to sleeping STA %pM\n",
+ sta->sta.addr);
+ continue;
+ }
- /*
- * If the STA is not authorized, wait until it gets
- * authorized and the action frame will be sent then.
- */
- if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
- continue;
+ /*
+ * If the STA is not authorized, wait until it gets
+ * authorized and the action frame will be sent then.
+ */
+ if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ continue;
- ht_dbg(sdata, "Sending SMPS to %pM\n", sta->sta.addr);
- ieee80211_send_smps_action(sdata, smps_mode,
- sta->sta.addr,
- sdata->vif.bss_conf.bssid);
- }
+ ht_dbg(sdata, "Sending SMPS to %pM\n", sta->sta.addr);
+ ieee80211_send_smps_action(sdata, smps_mode, sta->sta.addr,
+ sdata->vif.bss_conf.bssid);
}
mutex_unlock(&sdata->local->sta_mtx);
@@ -3581,7 +3578,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
nullfunc->qos_ctrl = cpu_to_le16(7);
local_bh_disable();
- ieee80211_xmit(sdata, skb);
+ ieee80211_xmit(sdata, sta, skb);
local_bh_enable();
rcu_read_unlock();
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index eeb0bbd69d98..23813ebb349c 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -18,172 +18,6 @@
#define DEBUGFS_FORMAT_BUFFER_SIZE 100
-#define TX_LATENCY_BIN_DELIMTER_C ','
-#define TX_LATENCY_BIN_DELIMTER_S ","
-#define TX_LATENCY_BINS_DISABLED "enable(bins disabled)\n"
-#define TX_LATENCY_DISABLED "disable\n"
-
-
-/*
- * Display if Tx latency statistics & bins are enabled/disabled
- */
-static ssize_t sta_tx_latency_stat_read(struct file *file,
- char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- struct ieee80211_tx_latency_bin_ranges *tx_latency;
- char *buf;
- int bufsz, i, ret;
- int pos = 0;
-
- rcu_read_lock();
-
- tx_latency = rcu_dereference(local->tx_latency);
-
- if (tx_latency && tx_latency->n_ranges) {
- bufsz = tx_latency->n_ranges * 15;
- buf = kzalloc(bufsz, GFP_ATOMIC);
- if (!buf)
- goto err;
-
- for (i = 0; i < tx_latency->n_ranges; i++)
- pos += scnprintf(buf + pos, bufsz - pos, "%d,",
- tx_latency->ranges[i]);
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- } else if (tx_latency) {
- bufsz = sizeof(TX_LATENCY_BINS_DISABLED) + 1;
- buf = kzalloc(bufsz, GFP_ATOMIC);
- if (!buf)
- goto err;
-
- pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
- TX_LATENCY_BINS_DISABLED);
- } else {
- bufsz = sizeof(TX_LATENCY_DISABLED) + 1;
- buf = kzalloc(bufsz, GFP_ATOMIC);
- if (!buf)
- goto err;
-
- pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
- TX_LATENCY_DISABLED);
- }
-
- rcu_read_unlock();
-
- ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
- kfree(buf);
-
- return ret;
-err:
- rcu_read_unlock();
- return -ENOMEM;
-}
-
-/*
- * Receive input from user regarding Tx latency statistics
- * The input should indicate if Tx latency statistics and bins are
- * enabled/disabled.
- * If bins are enabled input should indicate the amount of different bins and
- * their ranges. Each bin will count how many Tx frames transmitted within the
- * appropriate latency.
- * Legal input is:
- * a) "enable(bins disabled)" - to enable only general statistics
- * b) "a,b,c,d,...z" - to enable general statistics and bins, where all are
- * numbers and a < b < c < d.. < z
- * c) "disable" - disable all statistics
- * NOTE: must configure Tx latency statistics bins before stations connected.
- */
-
-static ssize_t sta_tx_latency_stat_write(struct file *file,
- const char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- char buf[128] = {};
- char *bins = buf;
- char *token;
- int buf_size, i, alloc_size;
- int prev_bin = 0;
- int n_ranges = 0;
- int ret = count;
- struct ieee80211_tx_latency_bin_ranges *tx_latency;
-
- if (sizeof(buf) <= count)
- return -EINVAL;
- buf_size = count;
- if (copy_from_user(buf, userbuf, buf_size))
- return -EFAULT;
-
- mutex_lock(&local->sta_mtx);
-
- /* cannot change config once we have stations */
- if (local->num_sta)
- goto unlock;
-
- tx_latency =
- rcu_dereference_protected(local->tx_latency,
- lockdep_is_held(&local->sta_mtx));
-
- /* disable Tx statistics */
- if (!strcmp(buf, TX_LATENCY_DISABLED)) {
- if (!tx_latency)
- goto unlock;
- RCU_INIT_POINTER(local->tx_latency, NULL);
- synchronize_rcu();
- kfree(tx_latency);
- goto unlock;
- }
-
- /* Tx latency already enabled */
- if (tx_latency)
- goto unlock;
-
- if (strcmp(TX_LATENCY_BINS_DISABLED, buf)) {
- /* check how many bins and between what ranges user requested */
- token = buf;
- while (*token != '\0') {
- if (*token == TX_LATENCY_BIN_DELIMTER_C)
- n_ranges++;
- token++;
- }
- n_ranges++;
- }
-
- alloc_size = sizeof(struct ieee80211_tx_latency_bin_ranges) +
- n_ranges * sizeof(u32);
- tx_latency = kzalloc(alloc_size, GFP_ATOMIC);
- if (!tx_latency) {
- ret = -ENOMEM;
- goto unlock;
- }
- tx_latency->n_ranges = n_ranges;
- for (i = 0; i < n_ranges; i++) { /* setting bin ranges */
- token = strsep(&bins, TX_LATENCY_BIN_DELIMTER_S);
- sscanf(token, "%d", &tx_latency->ranges[i]);
- /* bins values should be in ascending order */
- if (prev_bin >= tx_latency->ranges[i]) {
- ret = -EINVAL;
- kfree(tx_latency);
- goto unlock;
- }
- prev_bin = tx_latency->ranges[i];
- }
- rcu_assign_pointer(local->tx_latency, tx_latency);
-
-unlock:
- mutex_unlock(&local->sta_mtx);
-
- return ret;
-}
-
-static const struct file_operations stats_tx_latency_ops = {
- .write = sta_tx_latency_stat_write,
- .read = sta_tx_latency_stat_read,
- .open = simple_open,
- .llseek = generic_file_llseek,
-};
-
int mac80211_format_buffer(char __user *userbuf, size_t count,
loff_t *ppos, char *fmt, ...)
{
@@ -440,8 +274,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
- DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted,
- local->tx_handlers_drop_unencrypted);
DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
local->tx_handlers_drop_fragment);
DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
@@ -475,6 +307,4 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount);
DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount);
-
- DEBUGFS_DEVSTATS_ADD(tx_latency);
}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index c68896adfa96..29236e832e44 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -177,7 +177,6 @@ static ssize_t ieee80211_if_write_##name(struct file *file, \
IEEE80211_IF_FILE_R(name)
/* common attributes */
-IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
HEX);
IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
@@ -562,7 +561,6 @@ IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration,
static void add_common_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 94c70091bbd7..252859e90e8a 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -39,13 +39,6 @@ static const struct file_operations sta_ ##name## _ops = { \
.llseek = generic_file_llseek, \
}
-#define STA_OPS_W(name) \
-static const struct file_operations sta_ ##name## _ops = { \
- .write = sta_##name##_write, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-}
-
#define STA_OPS_RW(name) \
static const struct file_operations sta_ ##name## _ops = { \
.read = sta_##name##_read, \
@@ -398,131 +391,6 @@ static ssize_t sta_last_rx_rate_read(struct file *file, char __user *userbuf,
}
STA_OPS(last_rx_rate);
-static int
-sta_tx_latency_stat_header(struct ieee80211_tx_latency_bin_ranges *tx_latency,
- char *buf, int pos, int bufsz)
-{
- int i;
- int range_count = tx_latency->n_ranges;
- u32 *bin_ranges = tx_latency->ranges;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Station\t\t\tTID\tMax\tAvg");
- if (range_count) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t<=%d", bin_ranges[0]);
- for (i = 0; i < range_count - 1; i++)
- pos += scnprintf(buf + pos, bufsz - pos, "\t%d-%d",
- bin_ranges[i], bin_ranges[i+1]);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%d<", bin_ranges[range_count - 1]);
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
- return pos;
-}
-
-static int
-sta_tx_latency_stat_table(struct ieee80211_tx_latency_bin_ranges *tx_lat_range,
- struct ieee80211_tx_latency_stat *tx_lat,
- char *buf, int pos, int bufsz, int tid)
-{
- u32 avg = 0;
- int j;
- int bin_count = tx_lat->bin_count;
-
- pos += scnprintf(buf + pos, bufsz - pos, "\t\t\t%d", tid);
- /* make sure you don't divide in 0 */
- if (tx_lat->counter)
- avg = tx_lat->sum / tx_lat->counter;
-
- pos += scnprintf(buf + pos, bufsz - pos, "\t%d\t%d",
- tx_lat->max, avg);
-
- if (tx_lat_range->n_ranges && tx_lat->bins)
- for (j = 0; j < bin_count; j++)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%d", tx_lat->bins[j]);
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
- return pos;
-}
-
-/*
- * Output Tx latency statistics station && restart all statistics information
- */
-static ssize_t sta_tx_latency_stat_read(struct file *file,
- char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct sta_info *sta = file->private_data;
- struct ieee80211_local *local = sta->local;
- struct ieee80211_tx_latency_bin_ranges *tx_latency;
- char *buf;
- int bufsz, ret, i;
- int pos = 0;
-
- bufsz = 20 * IEEE80211_NUM_TIDS *
- sizeof(struct ieee80211_tx_latency_stat);
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rcu_read_lock();
-
- tx_latency = rcu_dereference(local->tx_latency);
-
- if (!sta->tx_lat) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "Tx latency statistics are not enabled\n");
- goto unlock;
- }
-
- pos = sta_tx_latency_stat_header(tx_latency, buf, pos, bufsz);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->sta.addr);
- for (i = 0; i < IEEE80211_NUM_TIDS; i++)
- pos = sta_tx_latency_stat_table(tx_latency, &sta->tx_lat[i],
- buf, pos, bufsz, i);
-unlock:
- rcu_read_unlock();
-
- ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
- kfree(buf);
-
- return ret;
-}
-STA_OPS(tx_latency_stat);
-
-static ssize_t sta_tx_latency_stat_reset_write(struct file *file,
- const char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- u32 *bins;
- int bin_count;
- struct sta_info *sta = file->private_data;
- int i;
-
- if (!sta->tx_lat)
- return -EINVAL;
-
- for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
- bins = sta->tx_lat[i].bins;
- bin_count = sta->tx_lat[i].bin_count;
-
- sta->tx_lat[i].max = 0;
- sta->tx_lat[i].sum = 0;
- sta->tx_lat[i].counter = 0;
-
- if (bin_count)
- memset(bins, 0, bin_count * sizeof(u32));
- }
-
- return count;
-}
-STA_OPS_W(tx_latency_stat_reset);
-
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
sta->debugfs.dir, sta, &sta_ ##name## _ops);
@@ -576,8 +444,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(last_ack_signal);
DEBUGFS_ADD(current_tx_rate);
DEBUGFS_ADD(last_rx_rate);
- DEBUGFS_ADD(tx_latency_stat);
- DEBUGFS_ADD(tx_latency_stat_reset);
DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index fdeda17b8dd2..26e1ca8a474a 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -941,13 +941,13 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
trace_drv_return_void(local);
}
-static inline void drv_rssi_callback(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- const enum ieee80211_rssi_event event)
+static inline void drv_event_callback(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_event *event)
{
- trace_drv_rssi_callback(local, sdata, event);
- if (local->ops->rssi_callback)
- local->ops->rssi_callback(&local->hw, &sdata->vif, event);
+ trace_drv_event_callback(local, sdata, event);
+ if (local->ops->event_callback)
+ local->ops->event_callback(&local->hw, &sdata->vif, event);
trace_drv_return_void(local);
}
@@ -1367,4 +1367,16 @@ drv_tdls_recv_channel_switch(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+static inline void drv_wake_tx_queue(struct ieee80211_local *local,
+ struct txq_info *txq)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
+
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_wake_tx_queue(local, sdata, txq);
+ local->ops->wake_tx_queue(&local->hw, &txq->txq);
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index ff630be2ca75..7a76ce639d58 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -252,8 +252,6 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
break;
}
- if (bw != sta->sta.bandwidth)
- changed = true;
sta->sta.bandwidth = bw;
sta->cur_max_bandwidth =
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index b606b53a49a7..bfef1b215050 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -188,6 +188,16 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
*/
pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
chandef, 0);
+
+ /* add VHT capability and information IEs */
+ if (chandef->width != NL80211_CHAN_WIDTH_20 &&
+ chandef->width != NL80211_CHAN_WIDTH_40 &&
+ sband->vht_cap.vht_supported) {
+ pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
+ sband->vht_cap.cap);
+ pos = ieee80211_ie_build_vht_oper(pos, &sband->vht_cap,
+ chandef);
+ }
}
if (local->hw.queues >= IEEE80211_NUM_ACS)
@@ -249,8 +259,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
if (presp)
kfree_rcu(presp, rcu_head);
- sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
-
/* make a copy of the chandef, it could be modified below. */
chandef = *req_chandef;
chan = chandef.chan;
@@ -417,6 +425,11 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
NL80211_CHAN_WIDTH_20_NOHT);
chandef.width = sdata->u.ibss.chandef.width;
break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_160:
+ chandef = sdata->u.ibss.chandef;
+ chandef.chan = cbss->channel;
+ break;
default:
/* fall back to 20 MHz for unsupported modes */
cfg80211_chandef_create(&chandef, cbss->channel,
@@ -470,22 +483,19 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct beacon_data *presp, *old_presp;
struct cfg80211_bss *cbss;
const struct cfg80211_bss_ies *ies;
- u16 capability;
+ u16 capability = 0;
u64 tsf;
int ret = 0;
sdata_assert_lock(sdata);
- capability = WLAN_CAPABILITY_IBSS;
-
if (ifibss->privacy)
- capability |= WLAN_CAPABILITY_PRIVACY;
+ capability = WLAN_CAPABILITY_PRIVACY;
cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
ifibss->bssid, ifibss->ssid,
- ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
- WLAN_CAPABILITY_PRIVACY,
- capability);
+ ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_PRIVACY(ifibss->privacy));
if (WARN_ON(!cbss)) {
ret = -EINVAL;
@@ -525,23 +535,17 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct cfg80211_bss *cbss;
int err, changed = 0;
- u16 capability;
sdata_assert_lock(sdata);
/* update cfg80211 bss information with the new channel */
if (!is_zero_ether_addr(ifibss->bssid)) {
- capability = WLAN_CAPABILITY_IBSS;
-
- if (ifibss->privacy)
- capability |= WLAN_CAPABILITY_PRIVACY;
-
cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
ifibss->chandef.chan,
ifibss->bssid, ifibss->ssid,
- ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
- WLAN_CAPABILITY_PRIVACY,
- capability);
+ ifibss->ssid_len,
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_PRIVACY(ifibss->privacy));
/* XXX: should not really modify cfg80211 data */
if (cbss) {
cbss->channel = sdata->csa_chandef.chan;
@@ -682,19 +686,13 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
struct cfg80211_bss *cbss;
struct beacon_data *presp;
struct sta_info *sta;
- u16 capability;
if (!is_zero_ether_addr(ifibss->bssid)) {
- capability = WLAN_CAPABILITY_IBSS;
-
- if (ifibss->privacy)
- capability |= WLAN_CAPABILITY_PRIVACY;
-
cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
ifibss->bssid, ifibss->ssid,
- ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
- WLAN_CAPABILITY_PRIVACY,
- capability);
+ ifibss->ssid_len,
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_PRIVACY(ifibss->privacy));
if (cbss) {
cfg80211_unlink_bss(local->hw.wiphy, cbss);
@@ -980,110 +978,140 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0, 0);
}
-static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt, size_t len,
- struct ieee80211_rx_status *rx_status,
- struct ieee802_11_elems *elems)
+static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee802_11_elems *elems,
+ struct ieee80211_channel *channel)
{
- struct ieee80211_local *local = sdata->local;
- struct cfg80211_bss *cbss;
- struct ieee80211_bss *bss;
struct sta_info *sta;
- struct ieee80211_channel *channel;
- u64 beacon_timestamp, rx_timestamp;
- u32 supp_rates = 0;
enum ieee80211_band band = rx_status->band;
enum nl80211_bss_scan_width scan_width;
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
bool rates_updated = false;
+ u32 supp_rates = 0;
- channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
- if (!channel)
+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
return;
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) {
+ if (!ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid))
+ return;
- rcu_read_lock();
- sta = sta_info_get(sdata, mgmt->sa);
-
- if (elems->supp_rates) {
- supp_rates = ieee80211_sta_get_rates(sdata, elems,
- band, NULL);
- if (sta) {
- u32 prev_rates;
-
- prev_rates = sta->sta.supp_rates[band];
- /* make sure mandatory rates are always added */
- scan_width = NL80211_BSS_CHAN_WIDTH_20;
- if (rx_status->flag & RX_FLAG_5MHZ)
- scan_width = NL80211_BSS_CHAN_WIDTH_5;
- if (rx_status->flag & RX_FLAG_10MHZ)
- scan_width = NL80211_BSS_CHAN_WIDTH_10;
-
- sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband,
- scan_width);
- if (sta->sta.supp_rates[band] != prev_rates) {
- ibss_dbg(sdata,
- "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
- sta->sta.addr, prev_rates,
- sta->sta.supp_rates[band]);
- rates_updated = true;
- }
- } else {
- rcu_read_unlock();
- sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
- mgmt->sa, supp_rates);
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->sa);
+
+ if (elems->supp_rates) {
+ supp_rates = ieee80211_sta_get_rates(sdata, elems,
+ band, NULL);
+ if (sta) {
+ u32 prev_rates;
+
+ prev_rates = sta->sta.supp_rates[band];
+ /* make sure mandatory rates are always added */
+ scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ if (rx_status->flag & RX_FLAG_5MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ if (rx_status->flag & RX_FLAG_10MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_10;
+
+ sta->sta.supp_rates[band] = supp_rates |
+ ieee80211_mandatory_rates(sband, scan_width);
+ if (sta->sta.supp_rates[band] != prev_rates) {
+ ibss_dbg(sdata,
+ "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
+ sta->sta.addr, prev_rates,
+ sta->sta.supp_rates[band]);
+ rates_updated = true;
}
+ } else {
+ rcu_read_unlock();
+ sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
+ mgmt->sa, supp_rates);
}
+ }
- if (sta && elems->wmm_info)
- sta->sta.wme = true;
-
- if (sta && elems->ht_operation && elems->ht_cap_elem &&
- sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
- sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 &&
- sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) {
- /* we both use HT */
- struct ieee80211_ht_cap htcap_ie;
- struct cfg80211_chan_def chandef;
-
- ieee80211_ht_oper_to_chandef(channel,
- elems->ht_operation,
- &chandef);
-
- memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
-
- /*
- * fall back to HT20 if we don't use or use
- * the other extension channel
- */
- if (chandef.center_freq1 !=
- sdata->u.ibss.chandef.center_freq1)
- htcap_ie.cap_info &=
- cpu_to_le16(~IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
- rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(
- sdata, sband, &htcap_ie, sta);
+ if (sta && elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS)
+ sta->sta.wme = true;
+
+ if (sta && elems->ht_operation && elems->ht_cap_elem &&
+ sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
+ sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 &&
+ sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) {
+ /* we both use HT */
+ struct ieee80211_ht_cap htcap_ie;
+ struct cfg80211_chan_def chandef;
+ enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
+
+ ieee80211_ht_oper_to_chandef(channel,
+ elems->ht_operation,
+ &chandef);
+
+ memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
+ rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+ &htcap_ie,
+ sta);
+
+ if (elems->vht_operation && elems->vht_cap_elem &&
+ sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20 &&
+ sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_40) {
+ /* we both use VHT */
+ struct ieee80211_vht_cap cap_ie;
+ struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap;
+
+ ieee80211_vht_oper_to_chandef(channel,
+ elems->vht_operation,
+ &chandef);
+ memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
+ ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+ &cap_ie, sta);
+ if (memcmp(&cap, &sta->sta.vht_cap, sizeof(cap)))
+ rates_updated |= true;
}
- if (sta && rates_updated) {
- u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
- u8 rx_nss = sta->sta.rx_nss;
+ if (bw != sta->sta.bandwidth)
+ rates_updated |= true;
- /* Force rx_nss recalculation */
- sta->sta.rx_nss = 0;
- rate_control_rate_init(sta);
- if (sta->sta.rx_nss != rx_nss)
- changed |= IEEE80211_RC_NSS_CHANGED;
+ if (!cfg80211_chandef_compatible(&sdata->u.ibss.chandef,
+ &chandef))
+ WARN_ON_ONCE(1);
+ }
- drv_sta_rc_update(local, sdata, &sta->sta, changed);
- }
+ if (sta && rates_updated) {
+ u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
+ u8 rx_nss = sta->sta.rx_nss;
- rcu_read_unlock();
+ /* Force rx_nss recalculation */
+ sta->sta.rx_nss = 0;
+ rate_control_rate_init(sta);
+ if (sta->sta.rx_nss != rx_nss)
+ changed |= IEEE80211_RC_NSS_CHANGED;
+
+ drv_sta_rc_update(local, sdata, &sta->sta, changed);
}
+ rcu_read_unlock();
+}
+
+static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee802_11_elems *elems)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct cfg80211_bss *cbss;
+ struct ieee80211_bss *bss;
+ struct ieee80211_channel *channel;
+ u64 beacon_timestamp, rx_timestamp;
+ u32 supp_rates = 0;
+ enum ieee80211_band band = rx_status->band;
+
+ channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
+ if (!channel)
+ return;
+
+ ieee80211_update_sta_info(sdata, mgmt, len, rx_status, elems, channel);
+
bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
channel);
if (!bss)
@@ -1273,7 +1301,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
- NULL, scan_width);
+ NULL, 0, scan_width);
}
static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -1304,14 +1332,82 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
if (ifibss->privacy)
capability |= WLAN_CAPABILITY_PRIVACY;
- else
- sdata->drop_unencrypted = 0;
__ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
&ifibss->chandef, ifibss->basic_rates,
capability, 0, true);
}
+static unsigned ibss_setup_channels(struct wiphy *wiphy,
+ struct ieee80211_channel **channels,
+ unsigned int channels_max,
+ u32 center_freq, u32 width)
+{
+ struct ieee80211_channel *chan = NULL;
+ unsigned int n_chan = 0;
+ u32 start_freq, end_freq, freq;
+
+ if (width <= 20) {
+ start_freq = center_freq;
+ end_freq = center_freq;
+ } else {
+ start_freq = center_freq - width / 2 + 10;
+ end_freq = center_freq + width / 2 - 10;
+ }
+
+ for (freq = start_freq; freq <= end_freq; freq += 20) {
+ chan = ieee80211_get_channel(wiphy, freq);
+ if (!chan)
+ continue;
+ if (n_chan >= channels_max)
+ return n_chan;
+
+ channels[n_chan] = chan;
+ n_chan++;
+ }
+
+ return n_chan;
+}
+
+static unsigned int
+ieee80211_ibss_setup_scan_channels(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ struct ieee80211_channel **channels,
+ unsigned int channels_max)
+{
+ unsigned int n_chan = 0;
+ u32 width, cf1, cf2 = 0;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ width = 40;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ cf2 = chandef->center_freq2;
+ /* fall through */
+ case NL80211_CHAN_WIDTH_80:
+ width = 80;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ width = 160;
+ break;
+ default:
+ width = 20;
+ break;
+ }
+
+ cf1 = chandef->center_freq1;
+
+ n_chan = ibss_setup_channels(wiphy, channels, channels_max, cf1, width);
+
+ if (cf2)
+ n_chan += ibss_setup_channels(wiphy, &channels[n_chan],
+ channels_max - n_chan, cf2,
+ width);
+
+ return n_chan;
+}
+
/*
* This function is called with state == IEEE80211_IBSS_MLME_SEARCH
*/
@@ -1325,7 +1421,6 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
const u8 *bssid = NULL;
enum nl80211_bss_scan_width scan_width;
int active_ibss;
- u16 capability;
sdata_assert_lock(sdata);
@@ -1335,9 +1430,6 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
if (active_ibss)
return;
- capability = WLAN_CAPABILITY_IBSS;
- if (ifibss->privacy)
- capability |= WLAN_CAPABILITY_PRIVACY;
if (ifibss->fixed_bssid)
bssid = ifibss->bssid;
if (ifibss->fixed_channel)
@@ -1346,8 +1438,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
bssid = ifibss->bssid;
cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid,
ifibss->ssid, ifibss->ssid_len,
- WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY,
- capability);
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_PRIVACY(ifibss->privacy));
if (cbss) {
struct ieee80211_bss *bss;
@@ -1381,11 +1473,18 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
/* Selected IBSS not found in current scan results - try to scan */
if (time_after(jiffies, ifibss->last_scan_completed +
IEEE80211_SCAN_INTERVAL)) {
+ struct ieee80211_channel *channels[8];
+ unsigned int num;
+
sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
+ num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
+ &ifibss->chandef,
+ channels,
+ ARRAY_SIZE(channels));
scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
ieee80211_request_ibss_scan(sdata, ifibss->ssid,
- ifibss->ssid_len, chan,
+ ifibss->ssid_len, channels, num,
scan_width);
} else {
int interval = IEEE80211_SCAN_INTERVAL;
@@ -1742,7 +1841,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
ieee80211_ibss_disconnect(sdata);
ifibss->ssid_len = 0;
- memset(ifibss->bssid, 0, ETH_ALEN);
+ eth_zero_addr(ifibss->bssid);
/* remove beacon */
kfree(sdata->u.ibss.ie);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8d53d65bd2ab..ab46ab4a7249 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -26,6 +26,7 @@
#include <linux/etherdevice.h>
#include <linux/leds.h>
#include <linux/idr.h>
+#include <linux/rhashtable.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
@@ -810,6 +811,19 @@ struct mac80211_qos_map {
struct rcu_head rcu_head;
};
+enum txq_info_flags {
+ IEEE80211_TXQ_STOP,
+ IEEE80211_TXQ_AMPDU,
+};
+
+struct txq_info {
+ struct sk_buff_head queue;
+ unsigned long flags;
+
+ /* keep last! */
+ struct ieee80211_txq txq;
+};
+
struct ieee80211_sub_if_data {
struct list_head list;
@@ -830,8 +844,6 @@ struct ieee80211_sub_if_data {
unsigned long state;
- int drop_unencrypted;
-
char name[IFNAMSIZ];
/* Fragment table for host-based reassembly */
@@ -854,6 +866,7 @@ struct ieee80211_sub_if_data {
bool control_port_no_encrypt;
int encrypt_headroom;
+ atomic_t txqs_len[IEEE80211_NUM_ACS];
struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
struct mac80211_qos_map __rcu *qos_map;
@@ -1042,24 +1055,6 @@ struct tpt_led_trigger {
};
#endif
-/*
- * struct ieee80211_tx_latency_bin_ranges - Tx latency statistics bins ranges
- *
- * Measuring Tx latency statistics. Counts how many Tx frames transmitted in a
- * certain latency range (in Milliseconds). Each station that uses these
- * ranges will have bins to count the amount of frames received in that range.
- * The user can configure the ranges via debugfs.
- * If ranges is NULL then Tx latency statistics bins are disabled for all
- * stations.
- *
- * @n_ranges: number of ranges that are taken in account
- * @ranges: the ranges that the user requested or NULL if disabled.
- */
-struct ieee80211_tx_latency_bin_ranges {
- int n_ranges;
- u32 ranges[];
-};
-
/**
* mac80211 scan flags - currently active scan mode
*
@@ -1207,16 +1202,10 @@ struct ieee80211_local {
spinlock_t tim_lock;
unsigned long num_sta;
struct list_head sta_list;
- struct sta_info __rcu *sta_hash[STA_HASH_SIZE];
+ struct rhashtable sta_hash;
struct timer_list sta_cleanup;
int sta_generation;
- /*
- * Tx latency statistics parameters for all stations.
- * Can enable via debugfs (NULL when disabled).
- */
- struct ieee80211_tx_latency_bin_ranges __rcu *tx_latency;
-
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
struct tasklet_struct tx_pending_tasklet;
@@ -1298,7 +1287,6 @@ struct ieee80211_local {
/* TX/RX handler statistics */
unsigned int tx_handlers_drop;
unsigned int tx_handlers_queued;
- unsigned int tx_handlers_drop_unencrypted;
unsigned int tx_handlers_drop_fragment;
unsigned int tx_handlers_drop_wep;
unsigned int tx_handlers_drop_not_assoc;
@@ -1476,6 +1464,10 @@ static inline struct ieee80211_local *hw_to_local(
return container_of(hw, struct ieee80211_local, hw);
}
+static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq)
+{
+ return container_of(txq, struct txq_info, txq);
+}
static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
{
@@ -1568,7 +1560,8 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
void ieee80211_scan_work(struct work_struct *work);
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan,
+ struct ieee80211_channel **channels,
+ unsigned int n_channels,
enum nl80211_bss_scan_width scan_width);
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
@@ -1617,6 +1610,7 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
int ieee80211_iface_init(void);
void ieee80211_iface_exit(void);
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ unsigned char name_assign_type,
struct wireless_dev **new_wdev, enum nl80211_iftype type,
struct vif_params *params);
int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
@@ -1784,7 +1778,8 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
bool bss_notify);
-void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, struct sk_buff *skb);
void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid,
@@ -1929,6 +1924,9 @@ static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
return true;
}
+void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct txq_info *txq, int tid);
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg, u16 status,
const u8 *extra, size_t extra_len, const u8 *bssid,
@@ -1967,10 +1965,6 @@ int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata);
-size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
- const u8 *ids, int n_ids,
- const u8 *after_ric, int n_after_ric,
- size_t offset);
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 cap);
@@ -1979,6 +1973,8 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 prot_mode);
u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
+u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+ const struct cfg80211_chan_def *chandef);
int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
const struct ieee80211_supported_band *sband,
const u8 *srates, int srates_len, u32 *rates);
@@ -1994,6 +1990,9 @@ u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
const struct ieee80211_ht_operation *ht_oper,
struct cfg80211_chan_def *chandef);
+void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
+ const struct ieee80211_vht_operation *oper,
+ struct cfg80211_chan_def *chandef);
u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
int __must_check
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 81a27516813e..bab5c63c0bad 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -819,13 +819,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
* (because if we remove a STA after ops->remove_interface()
* the driver will have removed the vif info already!)
*
- * This is relevant only in WDS mode, in all other modes we've
- * already removed all stations when disconnecting or similar,
- * so warn otherwise.
+ * In WDS mode a station must exist here and be flushed, for
+ * AP_VLANs stations may exist since there's nothing else that
+ * would have removed them, but in other modes there shouldn't
+ * be any stations.
*/
flushed = sta_info_flush(sdata);
- WARN_ON_ONCE((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
- (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1));
+ WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+ ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
+ (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)));
/* don't count this interface for promisc/allmulti while it is down */
if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
@@ -969,6 +971,13 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ if (sdata->vif.txq) {
+ struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+
+ ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
+ atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
+ }
+
if (local->open_count == 0)
ieee80211_clear_tx_pending(local);
@@ -1508,7 +1517,6 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
}
/* reset some values that shouldn't be kept across type changes */
- sdata->drop_unencrypted = 0;
if (type == NL80211_IFTYPE_STATION)
sdata->u.mgd.use_4addr = false;
@@ -1649,11 +1657,13 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
}
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ unsigned char name_assign_type,
struct wireless_dev **new_wdev, enum nl80211_iftype type,
struct vif_params *params)
{
struct net_device *ndev = NULL;
struct ieee80211_sub_if_data *sdata = NULL;
+ struct txq_info *txqi;
int ret, i;
int txqs = 1;
@@ -1673,11 +1683,19 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ieee80211_assign_perm_addr(local, wdev->address, type);
memcpy(sdata->vif.addr, wdev->address, ETH_ALEN);
} else {
+ int size = ALIGN(sizeof(*sdata) + local->hw.vif_data_size,
+ sizeof(void *));
+ int txq_size = 0;
+
+ if (local->ops->wake_tx_queue)
+ txq_size += sizeof(struct txq_info) +
+ local->hw.txq_data_size;
+
if (local->hw.queues >= IEEE80211_NUM_ACS)
txqs = IEEE80211_NUM_ACS;
- ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
- name, NET_NAME_UNKNOWN,
+ ndev = alloc_netdev_mqs(size + txq_size,
+ name, name_assign_type,
ieee80211_if_setup, txqs, 1);
if (!ndev)
return -ENOMEM;
@@ -1711,6 +1729,11 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
memcpy(sdata->name, ndev->name, IFNAMSIZ);
+ if (txq_size) {
+ txqi = netdev_priv(ndev) + size;
+ ieee80211_init_tx_queue(sdata, NULL, txqi, 0);
+ }
+
sdata->dev = ndev;
}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 0825d76edcfc..2291cd730091 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -492,6 +492,7 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
for (j = 0; j < len; j++)
key->u.gen.rx_pn[i][j] =
seq[len - j - 1];
+ key->flags |= KEY_FLAG_CIPHER_SCHEME;
}
}
memcpy(key->conf.key, key_data, key_len);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index d57a9915494f..c5a31835be0e 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -30,10 +30,12 @@ struct sta_info;
* @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
* in the hardware for TX crypto hardware acceleration.
* @KEY_FLAG_TAINTED: Key is tainted and packets should be dropped.
+ * @KEY_FLAG_CIPHER_SCHEME: This key is for a hardware cipher scheme
*/
enum ieee80211_internal_key_flags {
KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0),
KEY_FLAG_TAINTED = BIT(1),
+ KEY_FLAG_CIPHER_SCHEME = BIT(2),
};
enum ieee80211_internal_tkip_state {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 5e09d354c5a5..df3051d96aff 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -557,6 +557,9 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local = wiphy_priv(wiphy);
+ if (sta_info_init(local))
+ goto err_free;
+
local->hw.wiphy = wiphy;
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
@@ -629,8 +632,6 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
spin_lock_init(&local->ack_status_lock);
idr_init(&local->ack_status_frames);
- sta_info_init(local);
-
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
skb_queue_head_init(&local->pending[i]);
atomic_set(&local->agg_queue_stop[i], 0);
@@ -650,6 +651,9 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
ieee80211_roc_setup(local);
return &local->hw;
+ err_free:
+ wiphy_free(wiphy);
+ return NULL;
}
EXPORT_SYMBOL(ieee80211_alloc_hw_nm);
@@ -1035,6 +1039,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->dynamic_ps_forced_timeout = -1;
+ if (!local->hw.txq_ac_max_pending)
+ local->hw.txq_ac_max_pending = 64;
+
result = ieee80211_wep_init(local);
if (result < 0)
wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
@@ -1057,7 +1064,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* add one default STA interface if supported */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
!(hw->flags & IEEE80211_HW_NO_AUTO_VIF)) {
- result = ieee80211_if_add(local, "wlan%d", NULL,
+ result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL,
NL80211_IFTYPE_STATION, NULL);
if (result)
wiphy_warn(local->hw.wiphy,
@@ -1173,7 +1180,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
- sta_info_stop(local);
ieee80211_wep_free(local);
ieee80211_led_exit(local);
kfree(local->int_scan_req);
@@ -1201,8 +1207,6 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
ieee80211_free_ack_frame, NULL);
idr_destroy(&local->ack_status_frames);
- kfree(rcu_access_pointer(local->tx_latency));
-
sta_info_stop(local);
wiphy_free(local->hw.wiphy);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 0c8b2a77d312..d4684242e78b 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -520,7 +520,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
} else {
*fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
- memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */
+ eth_zero_addr(hdr->addr1); /* RA is resolved later */
memcpy(hdr->addr2, meshsa, ETH_ALEN);
memcpy(hdr->addr3, meshda, ETH_ALEN);
memcpy(hdr->addr4, meshsa, ETH_ALEN);
@@ -574,7 +574,8 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u32 changed;
- ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ);
+ if (ifmsh->mshcfg.plink_timeout > 0)
+ ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ);
mesh_path_expire(sdata);
changed = mesh_accept_plinks_update(sdata);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index b488e1859b18..60d737f144e3 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -17,7 +17,7 @@
#define PLINK_GET_PLID(p) (p + 4)
#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
- jiffies + HZ * t / 1000))
+ jiffies + msecs_to_jiffies(t)))
enum plink_event {
PLINK_UNDEFINED,
@@ -382,6 +382,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
struct ieee80211_supported_band *sband;
u32 rates, basic_rates = 0, changed = 0;
+ enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
sband = local->hw.wiphy->bands[band];
rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
@@ -401,6 +402,9 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
elems->ht_cap_elem, sta))
changed |= IEEE80211_RC_BW_CHANGED;
+ if (bw != sta->sta.bandwidth)
+ changed |= IEEE80211_RC_BW_CHANGED;
+
/* HT peer is operating 20MHz-only */
if (elems->ht_operation &&
!(elems->ht_operation->ht_param &
@@ -621,9 +625,9 @@ static void mesh_plink_timer(unsigned long data)
sta->llid, sta->plid, reason);
}
-static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
+static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout)
{
- sta->plink_timer.expires = jiffies + (HZ * timeout / 1000);
+ sta->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
sta->plink_timer.data = (unsigned long) sta;
sta->plink_timer.function = mesh_plink_timer;
sta->plink_timeout = timeout;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 142f66aece18..26053bf2faa8 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1168,11 +1168,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
if (!conf) {
sdata_info(sdata,
"no channel context assigned to vif?, disconnecting\n");
- ieee80211_queue_work(&local->hw,
- &ifmgd->csa_connection_drop_work);
- mutex_unlock(&local->chanctx_mtx);
- mutex_unlock(&local->mtx);
- return;
+ goto drop_connection;
}
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
@@ -1181,11 +1177,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
!(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) {
sdata_info(sdata,
"driver doesn't support chan-switch with channel contexts\n");
- ieee80211_queue_work(&local->hw,
- &ifmgd->csa_connection_drop_work);
- mutex_unlock(&local->chanctx_mtx);
- mutex_unlock(&local->mtx);
- return;
+ goto drop_connection;
}
ch_switch.timestamp = timestamp;
@@ -1197,11 +1189,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
if (drv_pre_channel_switch(sdata, &ch_switch)) {
sdata_info(sdata,
"preparing for channel switch failed, disconnecting\n");
- ieee80211_queue_work(&local->hw,
- &ifmgd->csa_connection_drop_work);
- mutex_unlock(&local->chanctx_mtx);
- mutex_unlock(&local->mtx);
- return;
+ goto drop_connection;
}
res = ieee80211_vif_reserve_chanctx(sdata, &csa_ie.chandef,
@@ -1210,11 +1198,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata,
"failed to reserve channel context for channel switch, disconnecting (err=%d)\n",
res);
- ieee80211_queue_work(&local->hw,
- &ifmgd->csa_connection_drop_work);
- mutex_unlock(&local->chanctx_mtx);
- mutex_unlock(&local->mtx);
- return;
+ goto drop_connection;
}
mutex_unlock(&local->chanctx_mtx);
@@ -1244,6 +1228,11 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
mod_timer(&ifmgd->chswitch_timer,
TU_TO_EXP_TIME((csa_ie.count - 1) *
cbss->beacon_interval));
+ return;
+ drop_connection:
+ ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
+ mutex_unlock(&local->chanctx_mtx);
+ mutex_unlock(&local->mtx);
}
static bool
@@ -1359,15 +1348,15 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
*/
if (has_80211h_pwr &&
(!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) {
- sdata_info(sdata,
- "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
- pwr_level_80211h, chan_pwr, pwr_reduction_80211h,
- sdata->u.mgd.bssid);
+ sdata_dbg(sdata,
+ "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
+ pwr_level_80211h, chan_pwr, pwr_reduction_80211h,
+ sdata->u.mgd.bssid);
new_ap_level = pwr_level_80211h;
} else { /* has_cisco_pwr is always true here. */
- sdata_info(sdata,
- "Limiting TX power to %d dBm as advertised by %pM\n",
- pwr_level_cisco, sdata->u.mgd.bssid);
+ sdata_dbg(sdata,
+ "Limiting TX power to %d dBm as advertised by %pM\n",
+ pwr_level_cisco, sdata->u.mgd.bssid);
new_ap_level = pwr_level_cisco;
}
@@ -1633,9 +1622,6 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
{
struct ieee80211_local *local = (void *) data;
- if (local->quiescing || local->suspended)
- return;
-
ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
}
@@ -2045,7 +2031,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_flush_queues(local, sdata, false);
/* clear bssid only after building the needed mgmt frames */
- memset(ifmgd->bssid, 0, ETH_ALEN);
+ eth_zero_addr(ifmgd->bssid);
/* remove AP and TDLS peers */
sta_info_flush(sdata);
@@ -2260,7 +2246,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
else
ssid_len = ssid[1];
- ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
+ ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
ssid + 2, ssid_len, NULL,
0, (u32) -1, true, 0,
ifmgd->associated->channel, false);
@@ -2372,6 +2358,24 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_ap_probereq_get);
+static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata,
+ const u8 *buf, size_t len, bool tx,
+ u16 reason)
+{
+ struct ieee80211_event event = {
+ .type = MLME_EVENT,
+ .u.mlme.data = tx ? DEAUTH_TX_EVENT : DEAUTH_RX_EVENT,
+ .u.mlme.reason = reason,
+ };
+
+ if (tx)
+ cfg80211_tx_mlme_mgmt(sdata->dev, buf, len);
+ else
+ cfg80211_rx_mlme_mgmt(sdata->dev, buf, len);
+
+ drv_event_callback(sdata->local, sdata, &event);
+}
+
static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -2397,8 +2401,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
}
mutex_unlock(&local->mtx);
- cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
- IEEE80211_DEAUTH_FRAME_LEN);
+ ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
+
sdata_unlock(sdata);
}
@@ -2477,7 +2482,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, auth_data->bss->bssid);
- memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
+ eth_zero_addr(sdata->u.mgd.bssid);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
mutex_lock(&sdata->local->mtx);
@@ -2522,6 +2527,10 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
u8 bssid[ETH_ALEN];
u16 auth_alg, auth_transaction, status_code;
struct sta_info *sta;
+ struct ieee80211_event event = {
+ .type = MLME_EVENT,
+ .u.mlme.data = AUTH_EVENT,
+ };
sdata_assert_lock(sdata);
@@ -2554,6 +2563,9 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
mgmt->sa, status_code);
ieee80211_destroy_auth_data(sdata, false);
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+ event.u.mlme.status = MLME_DENIED;
+ event.u.mlme.reason = status_code;
+ drv_event_callback(sdata->local, sdata, &event);
return;
}
@@ -2576,6 +2588,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
return;
}
+ event.u.mlme.status = MLME_SUCCESS;
+ drv_event_callback(sdata->local, sdata, &event);
sdata_info(sdata, "authenticated\n");
ifmgd->auth_data->done = true;
ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
@@ -2694,7 +2708,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+ ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code);
}
@@ -2720,7 +2734,7 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+ ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code);
}
static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
@@ -2790,7 +2804,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
- memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
+ eth_zero_addr(sdata->u.mgd.bssid);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
mutex_lock(&sdata->local->mtx);
@@ -2982,10 +2996,14 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
rate_control_rate_init(sta);
- if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
+ if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
set_sta_flag(sta, WLAN_STA_MFP);
+ sta->sta.mfp = true;
+ } else {
+ sta->sta.mfp = false;
+ }
- sta->sta.wme = elems.wmm_param;
+ sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
@@ -3055,6 +3073,10 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
u8 *pos;
bool reassoc;
struct cfg80211_bss *bss;
+ struct ieee80211_event event = {
+ .type = MLME_EVENT,
+ .u.mlme.data = ASSOC_EVENT,
+ };
sdata_assert_lock(sdata);
@@ -3106,6 +3128,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata, "%pM denied association (code=%d)\n",
mgmt->sa, status_code);
ieee80211_destroy_assoc_data(sdata, false);
+ event.u.mlme.status = MLME_DENIED;
+ event.u.mlme.reason = status_code;
+ drv_event_callback(sdata->local, sdata, &event);
} else {
if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) {
/* oops -- internal error -- send timeout for now */
@@ -3113,6 +3138,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
cfg80211_assoc_timeout(sdata->dev, bss);
return;
}
+ event.u.mlme.status = MLME_SUCCESS;
+ drv_event_callback(sdata->local, sdata, &event);
sdata_info(sdata, "associated\n");
/*
@@ -3315,6 +3342,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
int sig = ifmgd->ave_beacon_signal;
int last_sig = ifmgd->last_ave_beacon_signal;
+ struct ieee80211_event event = {
+ .type = RSSI_EVENT,
+ };
/*
* if signal crosses either of the boundaries, invoke callback
@@ -3323,12 +3353,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (sig > ifmgd->rssi_max_thold &&
(last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) {
ifmgd->last_ave_beacon_signal = sig;
- drv_rssi_callback(local, sdata, RSSI_EVENT_HIGH);
+ event.u.rssi.data = RSSI_EVENT_HIGH;
+ drv_event_callback(local, sdata, &event);
} else if (sig < ifmgd->rssi_min_thold &&
(last_sig >= ifmgd->rssi_max_thold ||
last_sig == 0)) {
ifmgd->last_ave_beacon_signal = sig;
- drv_rssi_callback(local, sdata, RSSI_EVENT_LOW);
+ event.u.rssi.data = RSSI_EVENT_LOW;
+ drv_event_callback(local, sdata, &event);
}
}
@@ -3433,6 +3465,26 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (ifmgd->csa_waiting_bcn)
ieee80211_chswitch_post_beacon(sdata);
+ /*
+ * Update beacon timing and dtim count on every beacon appearance. This
+ * will allow the driver to use the most updated values. Do it before
+ * comparing this one with last received beacon.
+ * IMPORTANT: These parameters would possibly be out of sync by the time
+ * the driver will use them. The synchronized view is currently
+ * guaranteed only in certain callbacks.
+ */
+ if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+ sdata->vif.bss_conf.sync_tsf =
+ le64_to_cpu(mgmt->u.beacon.timestamp);
+ sdata->vif.bss_conf.sync_device_ts =
+ rx_status->device_timestamp;
+ if (elems.tim)
+ sdata->vif.bss_conf.sync_dtim_count =
+ elems.tim->dtim_count;
+ else
+ sdata->vif.bss_conf.sync_dtim_count = 0;
+ }
+
if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
return;
ifmgd->beacon_crc = ncrc;
@@ -3460,18 +3512,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
else
bss_conf->dtim_period = 1;
- if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
- sdata->vif.bss_conf.sync_tsf =
- le64_to_cpu(mgmt->u.beacon.timestamp);
- sdata->vif.bss_conf.sync_device_ts =
- rx_status->device_timestamp;
- if (elems.tim)
- sdata->vif.bss_conf.sync_dtim_count =
- elems.tim->dtim_count;
- else
- sdata->vif.bss_conf.sync_dtim_count = 0;
- }
-
changed |= BSS_CHANGED_BEACON_INFO;
ifmgd->have_beacon = true;
@@ -3502,8 +3542,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
true, deauth_buf);
- cfg80211_tx_mlme_mgmt(sdata->dev, deauth_buf,
- sizeof(deauth_buf));
+ ieee80211_report_disconnect(sdata, deauth_buf,
+ sizeof(deauth_buf), true,
+ WLAN_REASON_DEAUTH_LEAVING);
return;
}
@@ -3621,8 +3662,8 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
tx, frame_buf);
- cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
- IEEE80211_DEAUTH_FRAME_LEN);
+ ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+ reason);
}
static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
@@ -3816,12 +3857,18 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
ieee80211_destroy_auth_data(sdata, false);
} else if (ieee80211_probe_auth(sdata)) {
u8 bssid[ETH_ALEN];
+ struct ieee80211_event event = {
+ .type = MLME_EVENT,
+ .u.mlme.data = AUTH_EVENT,
+ .u.mlme.status = MLME_TIMEOUT,
+ };
memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
ieee80211_destroy_auth_data(sdata, false);
cfg80211_auth_timeout(sdata->dev, bssid);
+ drv_event_callback(sdata->local, sdata, &event);
}
} else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started)
run_again(sdata, ifmgd->auth_data->timeout);
@@ -3831,9 +3878,15 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
if ((ifmgd->assoc_data->need_beacon && !ifmgd->have_beacon) ||
ieee80211_do_assoc(sdata)) {
struct cfg80211_bss *bss = ifmgd->assoc_data->bss;
+ struct ieee80211_event event = {
+ .type = MLME_EVENT,
+ .u.mlme.data = ASSOC_EVENT,
+ .u.mlme.status = MLME_TIMEOUT,
+ };
ieee80211_destroy_assoc_data(sdata, false);
cfg80211_assoc_timeout(sdata->dev, bss);
+ drv_event_callback(sdata->local, sdata, &event);
}
} else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
run_again(sdata, ifmgd->assoc_data->timeout);
@@ -3905,12 +3958,8 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
{
struct ieee80211_sub_if_data *sdata =
(struct ieee80211_sub_if_data *) data;
- struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- if (local->quiescing)
- return;
-
if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
return;
@@ -3926,9 +3975,6 @@ static void ieee80211_sta_conn_mon_timer(unsigned long data)
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- if (local->quiescing)
- return;
-
if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
return;
@@ -3991,6 +4037,34 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
IEEE80211_DEAUTH_FRAME_LEN);
}
+ /* This is a bit of a hack - we should find a better and more generic
+ * solution to this. Normally when suspending, cfg80211 will in fact
+ * deauthenticate. However, it doesn't (and cannot) stop an ongoing
+ * auth (not so important) or assoc (this is the problem) process.
+ *
+ * As a consequence, it can happen that we are in the process of both
+ * associating and suspending, and receive an association response
+ * after cfg80211 has checked if it needs to disconnect, but before
+ * we actually set the flag to drop incoming frames. This will then
+ * cause the workqueue flush to process the association response in
+ * the suspend, resulting in a successful association just before it
+ * tries to remove the interface from the driver, which now though
+ * has a channel context assigned ... this results in issues.
+ *
+ * To work around this (for now) simply deauth here again if we're
+ * now connected.
+ */
+ if (ifmgd->associated && !sdata->local->wowlan) {
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_deauth_request req = {
+ .reason_code = WLAN_REASON_DEAUTH_LEAVING,
+ .bssid = bssid,
+ };
+
+ memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
+ ieee80211_mgd_deauth(sdata, &req);
+ }
+
sdata_unlock(sdata);
}
@@ -4379,6 +4453,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
} else
WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid));
+ /* Cancel scan to ensure that nothing interferes with connection */
+ if (local->scanning)
+ ieee80211_scan_cancel(local);
+
return 0;
}
@@ -4467,8 +4545,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
- cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
- sizeof(frame_buf));
+ ieee80211_report_disconnect(sdata, frame_buf,
+ sizeof(frame_buf), true,
+ WLAN_REASON_UNSPECIFIED);
}
sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
@@ -4488,7 +4567,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return 0;
err_clear:
- memset(ifmgd->bssid, 0, ETH_ALEN);
+ eth_zero_addr(ifmgd->bssid);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
ifmgd->auth_data = NULL;
err_free:
@@ -4568,8 +4647,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
- cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
- sizeof(frame_buf));
+ ieee80211_report_disconnect(sdata, frame_buf,
+ sizeof(frame_buf), true,
+ WLAN_REASON_UNSPECIFIED);
}
if (ifmgd->auth_data && !ifmgd->auth_data->done) {
@@ -4831,7 +4911,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
return 0;
err_clear:
- memset(ifmgd->bssid, 0, ETH_ALEN);
+ eth_zero_addr(ifmgd->bssid);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
ifmgd->assoc_data = NULL;
err_free:
@@ -4859,8 +4939,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
req->reason_code, tx,
frame_buf);
ieee80211_destroy_auth_data(sdata, false);
- cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
- IEEE80211_DEAUTH_FRAME_LEN);
+ ieee80211_report_disconnect(sdata, frame_buf,
+ sizeof(frame_buf), true,
+ req->reason_code);
return 0;
}
@@ -4874,8 +4955,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
req->reason_code, tx, frame_buf);
- cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
- IEEE80211_DEAUTH_FRAME_LEN);
+ ieee80211_report_disconnect(sdata, frame_buf,
+ sizeof(frame_buf), true,
+ req->reason_code);
return 0;
}
@@ -4907,8 +4989,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
req->reason_code, !req->local_state_change,
frame_buf);
- cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
- IEEE80211_DEAUTH_FRAME_LEN);
+ ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+ req->reason_code);
return 0;
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index ca405b6b686d..ac6ad6238e3a 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -59,9 +59,26 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
cancel_work_sync(&local->dynamic_ps_enable_work);
del_timer_sync(&local->dynamic_ps_timer);
- local->wowlan = wowlan && local->open_count;
+ local->wowlan = wowlan;
if (local->wowlan) {
- int err = drv_suspend(local, wowlan);
+ int err;
+
+ /* Drivers don't expect to suspend while some operations like
+ * authenticating or associating are in progress. It doesn't
+ * make sense anyway to accept that, since the authentication
+ * or association would never finish since the driver can't do
+ * that on its own.
+ * Thus, clean up in-progress auth/assoc first.
+ */
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ continue;
+ ieee80211_mgd_quiesce(sdata);
+ }
+
+ err = drv_suspend(local, wowlan);
if (err < 0) {
local->quiescing = false;
local->wowlan = false;
@@ -80,6 +97,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return err;
} else if (err > 0) {
WARN_ON(err != 1);
+ /* cfg80211 will call back into mac80211 to disconnect
+ * all interfaces, allow that to proceed properly
+ */
+ ieee80211_wake_queues_by_reason(hw,
+ IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_SUSPEND,
+ false);
return err;
} else {
goto suspend;
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index ef6e8a6c4253..247552a7f6c2 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -69,14 +69,39 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix)
return i;
}
+/* return current EMWA throughput */
+int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
+{
+ int usecs;
+
+ usecs = mr->perfect_tx_time;
+ if (!usecs)
+ usecs = 1000000;
+
+ /* reset thr. below 10% success */
+ if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100))
+ return 0;
+
+ if (prob_ewma > MINSTREL_FRAC(90, 100))
+ return MINSTREL_TRUNC(100000 * (MINSTREL_FRAC(90, 100) / usecs));
+ else
+ return MINSTREL_TRUNC(100000 * (prob_ewma / usecs));
+}
+
/* find & sort topmost throughput rates */
static inline void
minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
{
int j = MAX_THR_RATES;
+ struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+ struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
- while (j > 0 && mi->r[i].stats.cur_tp > mi->r[tp_list[j - 1]].stats.cur_tp)
+ while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
+ minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
j--;
+ tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+ }
+
if (j < MAX_THR_RATES - 1)
memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1));
if (j < MAX_THR_RATES)
@@ -127,13 +152,47 @@ minstrel_update_rates(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
rate_control_set_rates(mp->hw, mi->sta, ratetbl);
}
+/*
+* Recalculate statistics and counters of a given rate
+*/
+void
+minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
+{
+ if (unlikely(mrs->attempts > 0)) {
+ mrs->sample_skipped = 0;
+ mrs->cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
+ if (unlikely(!mrs->att_hist)) {
+ mrs->prob_ewma = mrs->cur_prob;
+ } else {
+ /* update exponential weighted moving variance */
+ mrs->prob_ewmsd = minstrel_ewmsd(mrs->prob_ewmsd,
+ mrs->cur_prob,
+ mrs->prob_ewma,
+ EWMA_LEVEL);
+
+ /*update exponential weighted moving avarage */
+ mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
+ mrs->cur_prob,
+ EWMA_LEVEL);
+ }
+ mrs->att_hist += mrs->attempts;
+ mrs->succ_hist += mrs->success;
+ } else {
+ mrs->sample_skipped++;
+ }
+
+ mrs->last_success = mrs->success;
+ mrs->last_attempts = mrs->attempts;
+ mrs->success = 0;
+ mrs->attempts = 0;
+}
+
static void
minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
{
u8 tmp_tp_rate[MAX_THR_RATES];
u8 tmp_prob_rate = 0;
- u32 usecs;
- int i;
+ int i, tmp_cur_tp, tmp_prob_tp;
for (i = 0; i < MAX_THR_RATES; i++)
tmp_tp_rate[i] = 0;
@@ -141,38 +200,15 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
struct minstrel_rate_stats *mrs = &mi->r[i].stats;
+ struct minstrel_rate_stats *tmp_mrs = &mi->r[tmp_prob_rate].stats;
- usecs = mr->perfect_tx_time;
- if (!usecs)
- usecs = 1000000;
-
- if (unlikely(mrs->attempts > 0)) {
- mrs->sample_skipped = 0;
- mrs->cur_prob = MINSTREL_FRAC(mrs->success,
- mrs->attempts);
- mrs->succ_hist += mrs->success;
- mrs->att_hist += mrs->attempts;
- mrs->probability = minstrel_ewma(mrs->probability,
- mrs->cur_prob,
- EWMA_LEVEL);
- } else
- mrs->sample_skipped++;
-
- mrs->last_success = mrs->success;
- mrs->last_attempts = mrs->attempts;
- mrs->success = 0;
- mrs->attempts = 0;
-
- /* Update throughput per rate, reset thr. below 10% success */
- if (mrs->probability < MINSTREL_FRAC(10, 100))
- mrs->cur_tp = 0;
- else
- mrs->cur_tp = mrs->probability * (1000000 / usecs);
+ /* Update statistics of success probability per rate */
+ minstrel_calc_rate_stats(mrs);
/* Sample less often below the 10% chance of success.
* Sample less often above the 95% chance of success. */
- if (mrs->probability > MINSTREL_FRAC(95, 100) ||
- mrs->probability < MINSTREL_FRAC(10, 100)) {
+ if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
+ mrs->prob_ewma < MINSTREL_FRAC(10, 100)) {
mr->adjusted_retry_count = mrs->retry_count >> 1;
if (mr->adjusted_retry_count > 2)
mr->adjusted_retry_count = 2;
@@ -192,11 +228,14 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
* choose the maximum throughput rate as max_prob_rate
* (2) if all success probabilities < 95%, the rate with
* highest success probability is chosen as max_prob_rate */
- if (mrs->probability >= MINSTREL_FRAC(95, 100)) {
- if (mrs->cur_tp >= mi->r[tmp_prob_rate].stats.cur_tp)
+ if (mrs->prob_ewma >= MINSTREL_FRAC(95, 100)) {
+ tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_ewma);
+ tmp_prob_tp = minstrel_get_tp_avg(&mi->r[tmp_prob_rate],
+ tmp_mrs->prob_ewma);
+ if (tmp_cur_tp >= tmp_prob_tp)
tmp_prob_rate = i;
} else {
- if (mrs->probability >= mi->r[tmp_prob_rate].stats.probability)
+ if (mrs->prob_ewma >= tmp_mrs->prob_ewma)
tmp_prob_rate = i;
}
}
@@ -215,7 +254,7 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
#endif
/* Reset update timer */
- mi->stats_update = jiffies;
+ mi->last_stats_update = jiffies;
minstrel_update_rates(mp, mi);
}
@@ -253,7 +292,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (mi->sample_deferred > 0)
mi->sample_deferred--;
- if (time_after(jiffies, mi->stats_update +
+ if (time_after(jiffies, mi->last_stats_update +
(mp->update_interval * HZ) / 1000))
minstrel_update_stats(mp, mi);
}
@@ -385,7 +424,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
* has a probability of >95%, we shouldn't be attempting
* to use it, as this only wastes precious airtime */
if (!mrr_capable &&
- (mi->r[ndx].stats.probability > MINSTREL_FRAC(95, 100)))
+ (mi->r[ndx].stats.prob_ewma > MINSTREL_FRAC(95, 100)))
return;
mi->prev_sample = true;
@@ -519,7 +558,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
}
mi->n_rates = n;
- mi->stats_update = jiffies;
+ mi->last_stats_update = jiffies;
init_sample_table(mi);
minstrel_update_rates(mp, mi);
@@ -553,7 +592,7 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
if (!mi->sample_table)
goto error1;
- mi->stats_update = jiffies;
+ mi->last_stats_update = jiffies;
return mi;
error1:
@@ -663,12 +702,18 @@ minstrel_free(void *priv)
static u32 minstrel_get_expected_throughput(void *priv_sta)
{
struct minstrel_sta_info *mi = priv_sta;
+ struct minstrel_rate_stats *tmp_mrs;
int idx = mi->max_tp_rate[0];
+ int tmp_cur_tp;
/* convert pkt per sec in kbps (1200 is the average pkt size used for
* computing cur_tp
*/
- return MINSTREL_TRUNC(mi->r[idx].stats.cur_tp) * 1200 * 8 / 1024;
+ tmp_mrs = &mi->r[idx].stats;
+ tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma);
+ tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
+
+ return tmp_cur_tp;
}
const struct rate_control_ops mac80211_minstrel = {
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 410efe620c57..c230bbe93262 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -13,7 +13,6 @@
#define EWMA_DIV 128
#define SAMPLE_COLUMNS 10 /* number of columns in sample table */
-
/* scaled fraction values */
#define MINSTREL_SCALE 16
#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
@@ -24,11 +23,34 @@
/*
* Perform EWMA (Exponentially Weighted Moving Average) calculation
- */
+ */
static inline int
minstrel_ewma(int old, int new, int weight)
{
- return (new * (EWMA_DIV - weight) + old * weight) / EWMA_DIV;
+ int diff, incr;
+
+ diff = new - old;
+ incr = (EWMA_DIV - weight) * diff / EWMA_DIV;
+
+ return old + incr;
+}
+
+/*
+ * Perform EWMSD (Exponentially Weighted Moving Standard Deviation) calculation
+ */
+static inline int
+minstrel_ewmsd(int old_ewmsd, int cur_prob, int prob_ewma, int weight)
+{
+ int diff, incr, tmp_var;
+
+ /* calculate exponential weighted moving variance */
+ diff = MINSTREL_TRUNC((cur_prob - prob_ewma) * 1000000);
+ incr = (EWMA_DIV - weight) * diff / EWMA_DIV;
+ tmp_var = old_ewmsd * old_ewmsd;
+ tmp_var = weight * (tmp_var + diff * incr / 1000000) / EWMA_DIV;
+
+ /* return standard deviation */
+ return (u16) int_sqrt(tmp_var);
}
struct minstrel_rate_stats {
@@ -39,11 +61,13 @@ struct minstrel_rate_stats {
/* total attempts/success counters */
u64 att_hist, succ_hist;
- /* current throughput */
- unsigned int cur_tp;
-
- /* packet delivery probabilities */
- unsigned int cur_prob, probability;
+ /* statistis of packet delivery probability
+ * cur_prob - current prob within last update intervall
+ * prob_ewma - exponential weighted moving average of prob
+ * prob_ewmsd - exp. weighted moving standard deviation of prob */
+ unsigned int cur_prob;
+ unsigned int prob_ewma;
+ u16 prob_ewmsd;
/* maximum retry counts */
u8 retry_count;
@@ -71,7 +95,7 @@ struct minstrel_rate {
struct minstrel_sta_info {
struct ieee80211_sta *sta;
- unsigned long stats_update;
+ unsigned long last_stats_update;
unsigned int sp_ack_dur;
unsigned int rate_avg;
@@ -95,6 +119,7 @@ struct minstrel_sta_info {
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *dbg_stats;
+ struct dentry *dbg_stats_csv;
#endif
};
@@ -121,7 +146,6 @@ struct minstrel_priv {
u32 fixed_rate_idx;
struct dentry *dbg_fixed_rate;
#endif
-
};
struct minstrel_debugfs_info {
@@ -133,8 +157,13 @@ extern const struct rate_control_ops mac80211_minstrel;
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
+/* Recalculate success probabilities and counters for a given rate using EWMA */
+void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs);
+int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
+
/* debugfs */
int minstrel_stats_open(struct inode *inode, struct file *file);
+int minstrel_stats_csv_open(struct inode *inode, struct file *file);
ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos);
int minstrel_stats_release(struct inode *inode, struct file *file);
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 2acab1bcaa4b..1db5f7c3318a 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -54,12 +54,28 @@
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
+ssize_t
+minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
+{
+ struct minstrel_debugfs_info *ms;
+
+ ms = file->private_data;
+ return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
+}
+
+int
+minstrel_stats_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
int
minstrel_stats_open(struct inode *inode, struct file *file)
{
struct minstrel_sta_info *mi = inode->i_private;
struct minstrel_debugfs_info *ms;
- unsigned int i, tp, prob, eprob;
+ unsigned int i, tp_max, tp_avg, prob, eprob;
char *p;
ms = kmalloc(2048, GFP_KERNEL);
@@ -68,8 +84,14 @@ minstrel_stats_open(struct inode *inode, struct file *file)
file->private_data = ms;
p = ms->buf;
- p += sprintf(p, "rate tpt eprob *prob"
- " *ok(*cum) ok( cum)\n");
+ p += sprintf(p, "\n");
+ p += sprintf(p, "best __________rate_________ ______"
+ "statistics______ ________last_______ "
+ "______sum-of________\n");
+ p += sprintf(p, "rate [name idx airtime max_tp] [ ø(tp) ø(prob) "
+ "sd(prob)] [prob.|retry|suc|att] "
+ "[#success | #attempts]\n");
+
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
struct minstrel_rate_stats *mrs = &mi->r[i].stats;
@@ -79,18 +101,26 @@ minstrel_stats_open(struct inode *inode, struct file *file)
*(p++) = (i == mi->max_tp_rate[2]) ? 'C' : ' ';
*(p++) = (i == mi->max_tp_rate[3]) ? 'D' : ' ';
*(p++) = (i == mi->max_prob_rate) ? 'P' : ' ';
- p += sprintf(p, "%3u%s", mr->bitrate / 2,
+
+ p += sprintf(p, " %3u%s ", mr->bitrate / 2,
(mr->bitrate & 1 ? ".5" : " "));
+ p += sprintf(p, "%3u ", i);
+ p += sprintf(p, "%6u ", mr->perfect_tx_time);
- tp = MINSTREL_TRUNC(mrs->cur_tp / 10);
+ tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
+ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
- eprob = MINSTREL_TRUNC(mrs->probability * 1000);
+ eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
- p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u"
- " %4u(%4u) %9llu(%9llu)\n",
- tp / 10, tp % 10,
+ p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
+ " %3u.%1u %3u %3u %-3u "
+ "%9llu %-9llu\n",
+ tp_max / 10, tp_max % 10,
+ tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
+ mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
prob / 10, prob % 10,
+ mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
(unsigned long long)mrs->succ_hist,
@@ -107,25 +137,75 @@ minstrel_stats_open(struct inode *inode, struct file *file)
return 0;
}
-ssize_t
-minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
+static const struct file_operations minstrel_stat_fops = {
+ .owner = THIS_MODULE,
+ .open = minstrel_stats_open,
+ .read = minstrel_stats_read,
+ .release = minstrel_stats_release,
+ .llseek = default_llseek,
+};
+
+int
+minstrel_stats_csv_open(struct inode *inode, struct file *file)
{
+ struct minstrel_sta_info *mi = inode->i_private;
struct minstrel_debugfs_info *ms;
+ unsigned int i, tp_max, tp_avg, prob, eprob;
+ char *p;
- ms = file->private_data;
- return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
-}
+ ms = kmalloc(2048, GFP_KERNEL);
+ if (!ms)
+ return -ENOMEM;
+
+ file->private_data = ms;
+ p = ms->buf;
+
+ for (i = 0; i < mi->n_rates; i++) {
+ struct minstrel_rate *mr = &mi->r[i];
+ struct minstrel_rate_stats *mrs = &mi->r[i].stats;
+
+ p += sprintf(p, "%s" ,((i == mi->max_tp_rate[0]) ? "A" : ""));
+ p += sprintf(p, "%s" ,((i == mi->max_tp_rate[1]) ? "B" : ""));
+ p += sprintf(p, "%s" ,((i == mi->max_tp_rate[2]) ? "C" : ""));
+ p += sprintf(p, "%s" ,((i == mi->max_tp_rate[3]) ? "D" : ""));
+ p += sprintf(p, "%s" ,((i == mi->max_prob_rate) ? "P" : ""));
+
+ p += sprintf(p, ",%u%s", mr->bitrate / 2,
+ (mr->bitrate & 1 ? ".5," : ","));
+ p += sprintf(p, "%u,", i);
+ p += sprintf(p, "%u,",mr->perfect_tx_time);
+
+ tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
+ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
+ prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
+ eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u,"
+ "%llu,%llu,%d,%d\n",
+ tp_max / 10, tp_max % 10,
+ tp_avg / 10, tp_avg % 10,
+ eprob / 10, eprob % 10,
+ mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
+ prob / 10, prob % 10,
+ mrs->retry_count,
+ mrs->last_success,
+ mrs->last_attempts,
+ (unsigned long long)mrs->succ_hist,
+ (unsigned long long)mrs->att_hist,
+ mi->total_packets - mi->sample_packets,
+ mi->sample_packets);
+
+ }
+ ms->len = p - ms->buf;
+
+ WARN_ON(ms->len + sizeof(*ms) > 2048);
-int
-minstrel_stats_release(struct inode *inode, struct file *file)
-{
- kfree(file->private_data);
return 0;
}
-static const struct file_operations minstrel_stat_fops = {
+static const struct file_operations minstrel_stat_csv_fops = {
.owner = THIS_MODULE,
- .open = minstrel_stats_open,
+ .open = minstrel_stats_csv_open,
.read = minstrel_stats_read,
.release = minstrel_stats_release,
.llseek = default_llseek,
@@ -138,6 +218,9 @@ minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
mi->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, mi,
&minstrel_stat_fops);
+
+ mi->dbg_stats_csv = debugfs_create_file("rc_stats_csv", S_IRUGO, dir,
+ mi, &minstrel_stat_csv_fops);
}
void
@@ -146,4 +229,6 @@ minstrel_remove_sta_debugfs(void *priv, void *priv_sta)
struct minstrel_sta_info *mi = priv_sta;
debugfs_remove(mi->dbg_stats);
+
+ debugfs_remove(mi->dbg_stats_csv);
}
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 80452cfd2dc5..7430a1df2ab1 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -17,10 +17,11 @@
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"
+#define AVG_AMPDU_SIZE 16
#define AVG_PKT_SIZE 1200
/* Number of bits for an average sized packet */
-#define MCS_NBITS (AVG_PKT_SIZE << 3)
+#define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3)
/* Number of symbols for a packet with (bps) bits per symbol */
#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
@@ -33,7 +34,8 @@
)
/* Transmit duration for the raw data part of an average sized packet */
-#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
+#define MCS_DURATION(streams, sgi, bps) \
+ (MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) / AVG_AMPDU_SIZE)
#define BW_20 0
#define BW_40 1
@@ -311,67 +313,35 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
}
-
/*
- * Recalculate success probabilities and counters for a rate using EWMA
+ * Return current throughput based on the average A-MPDU length, taking into
+ * account the expected number of retransmissions and their expected length
*/
-static void
-minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
+int
+minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
+ int prob_ewma)
{
- if (unlikely(mr->attempts > 0)) {
- mr->sample_skipped = 0;
- mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
- if (!mr->att_hist)
- mr->probability = mr->cur_prob;
- else
- mr->probability = minstrel_ewma(mr->probability,
- mr->cur_prob, EWMA_LEVEL);
- mr->att_hist += mr->attempts;
- mr->succ_hist += mr->success;
- } else {
- mr->sample_skipped++;
- }
- mr->last_success = mr->success;
- mr->last_attempts = mr->attempts;
- mr->success = 0;
- mr->attempts = 0;
-}
-
-/*
- * Calculate throughput based on the average A-MPDU length, taking into account
- * the expected number of retransmissions and their expected length
- */
-static void
-minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
-{
- struct minstrel_rate_stats *mr;
unsigned int nsecs = 0;
- unsigned int tp;
- unsigned int prob;
- mr = &mi->groups[group].rates[rate];
- prob = mr->probability;
-
- if (prob < MINSTREL_FRAC(1, 10)) {
- mr->cur_tp = 0;
- return;
- }
-
- /*
- * For the throughput calculation, limit the probability value to 90% to
- * account for collision related packet error rate fluctuation
- */
- if (prob > MINSTREL_FRAC(9, 10))
- prob = MINSTREL_FRAC(9, 10);
+ /* do not account throughput if sucess prob is below 10% */
+ if (prob_ewma < MINSTREL_FRAC(10, 100))
+ return 0;
if (group != MINSTREL_CCK_GROUP)
nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
nsecs += minstrel_mcs_groups[group].duration[rate];
- /* prob is scaled - see MINSTREL_FRAC above */
- tp = 1000000 * ((prob * 1000) / nsecs);
- mr->cur_tp = MINSTREL_TRUNC(tp);
+ /*
+ * For the throughput calculation, limit the probability value to 90% to
+ * account for collision related packet error rate fluctuation
+ * (prob is scaled - see MINSTREL_FRAC above)
+ */
+ if (prob_ewma > MINSTREL_FRAC(90, 100))
+ return MINSTREL_TRUNC(100000 * ((MINSTREL_FRAC(90, 100) * 1000)
+ / nsecs));
+ else
+ return MINSTREL_TRUNC(100000 * ((prob_ewma * 1000) / nsecs));
}
/*
@@ -385,22 +355,23 @@ static void
minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index,
u16 *tp_list)
{
- int cur_group, cur_idx, cur_thr, cur_prob;
- int tmp_group, tmp_idx, tmp_thr, tmp_prob;
+ int cur_group, cur_idx, cur_tp_avg, cur_prob;
+ int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
int j = MAX_THR_RATES;
cur_group = index / MCS_GROUP_RATES;
cur_idx = index % MCS_GROUP_RATES;
- cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp;
- cur_prob = mi->groups[cur_group].rates[cur_idx].probability;
+ cur_prob = mi->groups[cur_group].rates[cur_idx].prob_ewma;
+ cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
do {
tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
- tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
- if (cur_thr < tmp_thr ||
- (cur_thr == tmp_thr && cur_prob <= tmp_prob))
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
+ tmp_prob);
+ if (cur_tp_avg < tmp_tp_avg ||
+ (cur_tp_avg == tmp_tp_avg && cur_prob <= tmp_prob))
break;
j--;
} while (j > 0);
@@ -420,16 +391,21 @@ static void
minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
{
struct minstrel_mcs_group_data *mg;
- struct minstrel_rate_stats *mr;
- int tmp_group, tmp_idx, tmp_tp, tmp_prob, max_tp_group;
+ struct minstrel_rate_stats *mrs;
+ int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
+ int max_tp_group, cur_tp_avg, cur_group, cur_idx;
+ int max_gpr_group, max_gpr_idx;
+ int max_gpr_tp_avg, max_gpr_prob;
+ cur_group = index / MCS_GROUP_RATES;
+ cur_idx = index % MCS_GROUP_RATES;
mg = &mi->groups[index / MCS_GROUP_RATES];
- mr = &mg->rates[index % MCS_GROUP_RATES];
+ mrs = &mg->rates[index % MCS_GROUP_RATES];
tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
- tmp_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
* MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
@@ -438,15 +414,24 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
(max_tp_group != MINSTREL_CCK_GROUP))
return;
- if (mr->probability > MINSTREL_FRAC(75, 100)) {
- if (mr->cur_tp > tmp_tp)
+ if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
+ cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
+ mrs->prob_ewma);
+ if (cur_tp_avg > tmp_tp_avg)
mi->max_prob_rate = index;
- if (mr->cur_tp > mg->rates[mg->max_group_prob_rate].cur_tp)
+
+ max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
+ max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
+ max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
+ max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group,
+ max_gpr_idx,
+ max_gpr_prob);
+ if (cur_tp_avg > max_gpr_tp_avg)
mg->max_group_prob_rate = index;
} else {
- if (mr->probability > tmp_prob)
+ if (mrs->prob_ewma > tmp_prob)
mi->max_prob_rate = index;
- if (mr->probability > mg->rates[mg->max_group_prob_rate].probability)
+ if (mrs->prob_ewma > mg->rates[mg->max_group_prob_rate].prob_ewma)
mg->max_group_prob_rate = index;
}
}
@@ -463,16 +448,18 @@ minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
u16 tmp_mcs_tp_rate[MAX_THR_RATES],
u16 tmp_cck_tp_rate[MAX_THR_RATES])
{
- unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp;
+ unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob;
int i;
tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
- tmp_cck_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
- tmp_mcs_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
if (tmp_cck_tp > tmp_mcs_tp) {
for(i = 0; i < MAX_THR_RATES; i++) {
@@ -491,8 +478,7 @@ static inline void
minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
- struct minstrel_rate_stats *mr;
- int tmp_max_streams, group;
+ int tmp_max_streams, group, tmp_idx, tmp_prob;
int tmp_tp = 0;
tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
@@ -501,11 +487,16 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
mg = &mi->groups[group];
if (!mg->supported || group == MINSTREL_CCK_GROUP)
continue;
- mr = minstrel_get_ratestats(mi, mg->max_group_prob_rate);
- if (tmp_tp < mr->cur_tp &&
+
+ tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
+ tmp_prob = mi->groups[group].rates[tmp_idx].prob_ewma;
+
+ if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
(minstrel_mcs_groups[group].streams < tmp_max_streams)) {
mi->max_prob_rate = mg->max_group_prob_rate;
- tmp_tp = mr->cur_tp;
+ tmp_tp = minstrel_ht_get_tp_avg(mi, group,
+ tmp_idx,
+ tmp_prob);
}
}
}
@@ -523,8 +514,8 @@ static void
minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
- struct minstrel_rate_stats *mr;
- int group, i, j;
+ struct minstrel_rate_stats *mrs;
+ int group, i, j, cur_prob;
u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
u16 tmp_cck_tp_rate[MAX_THR_RATES], index;
@@ -563,12 +554,12 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
index = MCS_GROUP_RATES * group + i;
- mr = &mg->rates[i];
- mr->retry_updated = false;
- minstrel_calc_rate_ewma(mr);
- minstrel_ht_calc_tp(mi, group, i);
+ mrs = &mg->rates[i];
+ mrs->retry_updated = false;
+ minstrel_calc_rate_stats(mrs);
+ cur_prob = mrs->prob_ewma;
- if (!mr->cur_tp)
+ if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
continue;
/* Find max throughput rate set */
@@ -612,7 +603,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
#endif
/* Reset update timer */
- mi->stats_update = jiffies;
+ mi->last_stats_update = jiffies;
}
static bool
@@ -635,7 +626,7 @@ minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rat
}
static void
-minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
+minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
@@ -776,7 +767,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
update = true;
}
- if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
+ if (time_after(jiffies, mi->last_stats_update +
+ (mp->update_interval / 2 * HZ) / 1000)) {
update = true;
minstrel_ht_update_stats(mp, mi);
}
@@ -789,7 +781,7 @@ static void
minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
int index)
{
- struct minstrel_rate_stats *mr;
+ struct minstrel_rate_stats *mrs;
const struct mcs_group *group;
unsigned int tx_time, tx_time_rtscts, tx_time_data;
unsigned int cw = mp->cw_min;
@@ -798,16 +790,16 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
unsigned int overhead = 0, overhead_rtscts = 0;
- mr = minstrel_get_ratestats(mi, index);
- if (mr->probability < MINSTREL_FRAC(1, 10)) {
- mr->retry_count = 1;
- mr->retry_count_rtscts = 1;
+ mrs = minstrel_get_ratestats(mi, index);
+ if (mrs->prob_ewma < MINSTREL_FRAC(1, 10)) {
+ mrs->retry_count = 1;
+ mrs->retry_count_rtscts = 1;
return;
}
- mr->retry_count = 2;
- mr->retry_count_rtscts = 2;
- mr->retry_updated = true;
+ mrs->retry_count = 2;
+ mrs->retry_count_rtscts = 2;
+ mrs->retry_updated = true;
group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000;
@@ -838,9 +830,9 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
if (tx_time_rtscts < mp->segment_size)
- mr->retry_count_rtscts++;
+ mrs->retry_count_rtscts++;
} while ((tx_time < mp->segment_size) &&
- (++mr->retry_count < mp->max_retry));
+ (++mrs->retry_count < mp->max_retry));
}
@@ -849,22 +841,22 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_sta_rates *ratetbl, int offset, int index)
{
const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
- struct minstrel_rate_stats *mr;
+ struct minstrel_rate_stats *mrs;
u8 idx;
u16 flags = group->flags;
- mr = minstrel_get_ratestats(mi, index);
- if (!mr->retry_updated)
+ mrs = minstrel_get_ratestats(mi, index);
+ if (!mrs->retry_updated)
minstrel_calc_retransmit(mp, mi, index);
- if (mr->probability < MINSTREL_FRAC(20, 100) || !mr->retry_count) {
+ if (mrs->prob_ewma < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
ratetbl->rate[offset].count = 2;
ratetbl->rate[offset].count_rts = 2;
ratetbl->rate[offset].count_cts = 2;
} else {
- ratetbl->rate[offset].count = mr->retry_count;
- ratetbl->rate[offset].count_cts = mr->retry_count;
- ratetbl->rate[offset].count_rts = mr->retry_count_rtscts;
+ ratetbl->rate[offset].count = mrs->retry_count;
+ ratetbl->rate[offset].count_cts = mrs->retry_count;
+ ratetbl->rate[offset].count_rts = mrs->retry_count_rtscts;
}
if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP)
@@ -922,7 +914,7 @@ minstrel_get_duration(int index)
static int
minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
- struct minstrel_rate_stats *mr;
+ struct minstrel_rate_stats *mrs;
struct minstrel_mcs_group_data *mg;
unsigned int sample_dur, sample_group, cur_max_tp_streams;
int sample_idx = 0;
@@ -938,12 +930,12 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
sample_group = mi->sample_group;
mg = &mi->groups[sample_group];
sample_idx = sample_table[mg->column][mg->index];
- minstrel_next_sample_idx(mi);
+ minstrel_set_next_sample_idx(mi);
if (!(mg->supported & BIT(sample_idx)))
return -1;
- mr = &mg->rates[sample_idx];
+ mrs = &mg->rates[sample_idx];
sample_idx += sample_group * MCS_GROUP_RATES;
/*
@@ -960,7 +952,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
* Do not sample if the probability is already higher than 95%
* to avoid wasting airtime.
*/
- if (mr->probability > MINSTREL_FRAC(95, 100))
+ if (mrs->prob_ewma > MINSTREL_FRAC(95, 100))
return -1;
/*
@@ -975,7 +967,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
(cur_max_tp_streams - 1 <
minstrel_mcs_groups[sample_group].streams ||
sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
- if (mr->sample_skipped < 20)
+ if (mrs->sample_skipped < 20)
return -1;
if (mi->sample_slow++ > 2)
@@ -1129,7 +1121,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
memset(mi, 0, sizeof(*mi));
mi->sta = sta;
- mi->stats_update = jiffies;
+ mi->last_stats_update = jiffies;
ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
@@ -1326,16 +1318,19 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
{
struct minstrel_ht_sta_priv *msp = priv_sta;
struct minstrel_ht_sta *mi = &msp->ht;
- int i, j;
+ int i, j, prob, tp_avg;
if (!msp->is_ht)
return mac80211_minstrel.get_expected_throughput(priv_sta);
i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
+ prob = mi->groups[i].rates[j].prob_ewma;
+
+ /* convert tp_avg from pkt per second in kbps */
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024;
- /* convert cur_tp from pkt per second in kbps */
- return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024;
+ return tp_avg;
}
static const struct rate_control_ops mac80211_minstrel_ht = {
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index f2217d6aa0c2..e8b52a94d24b 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -78,7 +78,7 @@ struct minstrel_ht_sta {
u16 max_prob_rate;
/* time of last status update */
- unsigned long stats_update;
+ unsigned long last_stats_update;
/* overhead time in usec for each frame */
unsigned int overhead;
@@ -112,6 +112,7 @@ struct minstrel_ht_sta_priv {
};
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *dbg_stats;
+ struct dentry *dbg_stats_csv;
#endif
void *ratelist;
void *sample_table;
@@ -120,5 +121,7 @@ struct minstrel_ht_sta_priv {
void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta);
+int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
+ int prob_ewma);
#endif
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 20c676b8e5b6..6822ce0f95e5 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -19,7 +19,7 @@ static char *
minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
{
const struct mcs_group *mg;
- unsigned int j, tp, prob, eprob;
+ unsigned int j, tp_max, tp_avg, prob, eprob, tx_time;
char htmode = '2';
char gimode = 'L';
u32 gflags;
@@ -38,19 +38,26 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
gimode = 'S';
for (j = 0; j < MCS_GROUP_RATES; j++) {
- struct minstrel_rate_stats *mr = &mi->groups[i].rates[j];
+ struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
static const int bitrates[4] = { 10, 20, 55, 110 };
int idx = i * MCS_GROUP_RATES + j;
if (!(mi->groups[i].supported & BIT(j)))
continue;
- if (gflags & IEEE80211_TX_RC_MCS)
- p += sprintf(p, " HT%c0/%cGI ", htmode, gimode);
- else if (gflags & IEEE80211_TX_RC_VHT_MCS)
- p += sprintf(p, "VHT%c0/%cGI ", htmode, gimode);
- else
- p += sprintf(p, " CCK/%cP ", j < 4 ? 'L' : 'S');
+ if (gflags & IEEE80211_TX_RC_MCS) {
+ p += sprintf(p, "HT%c0 ", htmode);
+ p += sprintf(p, "%cGI ", gimode);
+ p += sprintf(p, "%d ", mg->streams);
+ } else if (gflags & IEEE80211_TX_RC_VHT_MCS) {
+ p += sprintf(p, "VHT%c0 ", htmode);
+ p += sprintf(p, "%cGI ", gimode);
+ p += sprintf(p, "%d ", mg->streams);
+ } else {
+ p += sprintf(p, "CCK ");
+ p += sprintf(p, "%cP ", j < 4 ? 'L' : 'S');
+ p += sprintf(p, "1 ");
+ }
*(p++) = (idx == mi->max_tp_rate[0]) ? 'A' : ' ';
*(p++) = (idx == mi->max_tp_rate[1]) ? 'B' : ' ';
@@ -59,29 +66,39 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
*(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
if (gflags & IEEE80211_TX_RC_MCS) {
- p += sprintf(p, " MCS%-2u ", (mg->streams - 1) * 8 + j);
+ p += sprintf(p, " MCS%-2u", (mg->streams - 1) * 8 + j);
} else if (gflags & IEEE80211_TX_RC_VHT_MCS) {
- p += sprintf(p, " MCS%-1u/%1u", j, mg->streams);
+ p += sprintf(p, " MCS%-1u/%1u", j, mg->streams);
} else {
int r = bitrates[j % 4];
- p += sprintf(p, " %2u.%1uM ", r / 10, r % 10);
+ p += sprintf(p, " %2u.%1uM", r / 10, r % 10);
}
- tp = mr->cur_tp / 10;
- prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
- eprob = MINSTREL_TRUNC(mr->probability * 1000);
+ p += sprintf(p, " %3u ", idx);
- p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u "
- "%3u %4u(%4u) %9llu(%9llu)\n",
- tp / 10, tp % 10,
+ /* tx_time[rate(i)] in usec */
+ tx_time = DIV_ROUND_CLOSEST(mg->duration[j], 1000);
+ p += sprintf(p, "%6u ", tx_time);
+
+ tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
+ prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
+ eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+
+ p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
+ " %3u.%1u %3u %3u %-3u "
+ "%9llu %-9llu\n",
+ tp_max / 10, tp_max % 10,
+ tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
+ mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
prob / 10, prob % 10,
- mr->retry_count,
- mr->last_success,
- mr->last_attempts,
- (unsigned long long)mr->succ_hist,
- (unsigned long long)mr->att_hist);
+ mrs->retry_count,
+ mrs->last_success,
+ mrs->last_attempts,
+ (unsigned long long)mrs->succ_hist,
+ (unsigned long long)mrs->att_hist);
}
return p;
@@ -94,8 +111,8 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
struct minstrel_ht_sta *mi = &msp->ht;
struct minstrel_debugfs_info *ms;
unsigned int i;
- char *p;
int ret;
+ char *p;
if (!msp->is_ht) {
inode->i_private = &msp->legacy;
@@ -110,8 +127,14 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
file->private_data = ms;
p = ms->buf;
- p += sprintf(p, " type rate tpt eprob *prob "
- "ret *ok(*cum) ok( cum)\n");
+
+ p += sprintf(p, "\n");
+ p += sprintf(p, " best ____________rate__________ "
+ "______statistics______ ________last_______ "
+ "______sum-of________\n");
+ p += sprintf(p, "mode guard # rate [name idx airtime max_tp] "
+ "[ ø(tp) ø(prob) sd(prob)] [prob.|retry|suc|att] [#success | "
+ "#attempts]\n");
p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
for (i = 0; i < MINSTREL_CCK_GROUP; i++)
@@ -123,11 +146,10 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
"lookaround %d\n",
max(0, (int) mi->total_packets - (int) mi->sample_packets),
mi->sample_packets);
- p += sprintf(p, "Average A-MPDU length: %d.%d\n",
+ p += sprintf(p, "Average # of aggregated frames per A-MPDU: %d.%d\n",
MINSTREL_TRUNC(mi->avg_ampdu_len),
MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
ms->len = p - ms->buf;
-
WARN_ON(ms->len + sizeof(*ms) > 32768);
return nonseekable_open(inode, file);
@@ -141,6 +163,143 @@ static const struct file_operations minstrel_ht_stat_fops = {
.llseek = no_llseek,
};
+static char *
+minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
+{
+ const struct mcs_group *mg;
+ unsigned int j, tp_max, tp_avg, prob, eprob, tx_time;
+ char htmode = '2';
+ char gimode = 'L';
+ u32 gflags;
+
+ if (!mi->groups[i].supported)
+ return p;
+
+ mg = &minstrel_mcs_groups[i];
+ gflags = mg->flags;
+
+ if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ htmode = '4';
+ else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ htmode = '8';
+ if (gflags & IEEE80211_TX_RC_SHORT_GI)
+ gimode = 'S';
+
+ for (j = 0; j < MCS_GROUP_RATES; j++) {
+ struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
+ static const int bitrates[4] = { 10, 20, 55, 110 };
+ int idx = i * MCS_GROUP_RATES + j;
+
+ if (!(mi->groups[i].supported & BIT(j)))
+ continue;
+
+ if (gflags & IEEE80211_TX_RC_MCS) {
+ p += sprintf(p, "HT%c0,", htmode);
+ p += sprintf(p, "%cGI,", gimode);
+ p += sprintf(p, "%d,", mg->streams);
+ } else if (gflags & IEEE80211_TX_RC_VHT_MCS) {
+ p += sprintf(p, "VHT%c0,", htmode);
+ p += sprintf(p, "%cGI,", gimode);
+ p += sprintf(p, "%d,", mg->streams);
+ } else {
+ p += sprintf(p, "CCK,");
+ p += sprintf(p, "%cP,", j < 4 ? 'L' : 'S');
+ p += sprintf(p, "1,");
+ }
+
+ p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[0]) ? "A" : ""));
+ p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[1]) ? "B" : ""));
+ p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[2]) ? "C" : ""));
+ p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[3]) ? "D" : ""));
+ p += sprintf(p, "%s" ,((idx == mi->max_prob_rate) ? "P" : ""));
+
+ if (gflags & IEEE80211_TX_RC_MCS) {
+ p += sprintf(p, ",MCS%-2u,", (mg->streams - 1) * 8 + j);
+ } else if (gflags & IEEE80211_TX_RC_VHT_MCS) {
+ p += sprintf(p, ",MCS%-1u/%1u,", j, mg->streams);
+ } else {
+ int r = bitrates[j % 4];
+ p += sprintf(p, ",%2u.%1uM,", r / 10, r % 10);
+ }
+
+ p += sprintf(p, "%u,", idx);
+ tx_time = DIV_ROUND_CLOSEST(mg->duration[j], 1000);
+ p += sprintf(p, "%u,", tx_time);
+
+ tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
+ prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
+ eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,"
+ "%u,%llu,%llu,",
+ tp_max / 10, tp_max % 10,
+ tp_avg / 10, tp_avg % 10,
+ eprob / 10, eprob % 10,
+ mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
+ prob / 10, prob % 10,
+ mrs->retry_count,
+ mrs->last_success,
+ mrs->last_attempts,
+ (unsigned long long)mrs->succ_hist,
+ (unsigned long long)mrs->att_hist);
+ p += sprintf(p, "%d,%d,%d.%d\n",
+ max(0, (int) mi->total_packets -
+ (int) mi->sample_packets),
+ mi->sample_packets,
+ MINSTREL_TRUNC(mi->avg_ampdu_len),
+ MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
+ }
+
+ return p;
+}
+
+static int
+minstrel_ht_stats_csv_open(struct inode *inode, struct file *file)
+{
+ struct minstrel_ht_sta_priv *msp = inode->i_private;
+ struct minstrel_ht_sta *mi = &msp->ht;
+ struct minstrel_debugfs_info *ms;
+ unsigned int i;
+ int ret;
+ char *p;
+
+ if (!msp->is_ht) {
+ inode->i_private = &msp->legacy;
+ ret = minstrel_stats_csv_open(inode, file);
+ inode->i_private = msp;
+ return ret;
+ }
+
+ ms = kmalloc(32768, GFP_KERNEL);
+
+ if (!ms)
+ return -ENOMEM;
+
+ file->private_data = ms;
+
+ p = ms->buf;
+
+ p = minstrel_ht_stats_csv_dump(mi, MINSTREL_CCK_GROUP, p);
+ for (i = 0; i < MINSTREL_CCK_GROUP; i++)
+ p = minstrel_ht_stats_csv_dump(mi, i, p);
+ for (i++; i < ARRAY_SIZE(mi->groups); i++)
+ p = minstrel_ht_stats_csv_dump(mi, i, p);
+
+ ms->len = p - ms->buf;
+ WARN_ON(ms->len + sizeof(*ms) > 32768);
+
+ return nonseekable_open(inode, file);
+}
+
+static const struct file_operations minstrel_ht_stat_csv_fops = {
+ .owner = THIS_MODULE,
+ .open = minstrel_ht_stats_csv_open,
+ .read = minstrel_stats_read,
+ .release = minstrel_stats_release,
+ .llseek = no_llseek,
+};
+
void
minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
{
@@ -148,6 +307,8 @@ minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
msp->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, msp,
&minstrel_ht_stat_fops);
+ msp->dbg_stats_csv = debugfs_create_file("rc_stats_csv", S_IRUGO,
+ dir, msp, &minstrel_ht_stat_csv_fops);
}
void
@@ -156,4 +317,5 @@ minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta)
struct minstrel_ht_sta_priv *msp = priv_sta;
debugfs_remove(msp->dbg_stats);
+ debugfs_remove(msp->dbg_stats_csv);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1eb730bf8752..260eed45b6d2 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1185,6 +1185,7 @@ static void sta_ps_start(struct sta_info *sta)
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
struct ps_data *ps;
+ int tid;
if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1198,6 +1199,18 @@ static void sta_ps_start(struct sta_info *sta)
drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
sta->sta.addr, sta->sta.aid);
+
+ if (!sta->sta.txq[0])
+ return;
+
+ for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
+ struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
+
+ if (!skb_queue_len(&txqi->queue))
+ set_bit(tid, &sta->txq_buffered_tids);
+ else
+ clear_bit(tid, &sta->txq_buffered_tids);
+ }
}
static void sta_ps_end(struct sta_info *sta)
@@ -1913,8 +1926,7 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
/* Drop unencrypted frames if key is set. */
if (unlikely(!ieee80211_has_protected(fc) &&
!ieee80211_is_nullfunc(fc) &&
- ieee80211_is_data(fc) &&
- (rx->key || rx->sdata->drop_unencrypted)))
+ ieee80211_is_data(fc) && rx->key))
return -EACCES;
return 0;
@@ -2044,6 +2056,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
struct sta_info *dsta;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += rx->skb->len;
+
skb = rx->skb;
xmit_skb = NULL;
@@ -2174,8 +2189,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
dev_kfree_skb(rx->skb);
continue;
}
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += rx->skb->len;
ieee80211_deliver_skb(rx);
}
@@ -2401,9 +2414,6 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
rx->skb->dev = dev;
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += rx->skb->len;
-
if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
!is_multicast_ether_addr(
((struct ethhdr *)rx->skb->data)->h_dest) &&
@@ -3129,6 +3139,12 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
goto rxh_next; \
} while (0);
+ /* Lock here to avoid hitting all of the data used in the RX
+ * path (e.g. key data, station data, ...) concurrently when
+ * a frame is released from the reorder buffer due to timeout
+ * from the timer, potentially concurrently with RX from the
+ * driver.
+ */
spin_lock_bh(&rx->local->rx_path_lock);
while ((skb = __skb_dequeue(frames))) {
@@ -3421,7 +3437,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
__le16 fc;
struct ieee80211_rx_data rx;
struct ieee80211_sub_if_data *prev;
- struct sta_info *sta, *tmp, *prev_sta;
+ struct sta_info *sta, *prev_sta;
+ struct rhash_head *tmp;
int err = 0;
fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
@@ -3456,9 +3473,13 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_scan_rx(local, skb);
if (ieee80211_is_data(fc)) {
+ const struct bucket_table *tbl;
+
prev_sta = NULL;
- for_each_sta_info(local, hdr->addr2, sta, tmp) {
+ tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
+
+ for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) {
if (!prev_sta) {
prev_sta = sta;
continue;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 05f0d711b6d8..7bb6a9383f58 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -928,11 +928,12 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan,
+ struct ieee80211_channel **channels,
+ unsigned int n_channels,
enum nl80211_bss_scan_width scan_width)
{
struct ieee80211_local *local = sdata->local;
- int ret = -EBUSY;
+ int ret = -EBUSY, i, n_ch = 0;
enum ieee80211_band band;
mutex_lock(&local->mtx);
@@ -942,9 +943,8 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
goto unlock;
/* fill internal scan request */
- if (!chan) {
- int i, max_n;
- int n_ch = 0;
+ if (!channels) {
+ int max_n;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!local->hw.wiphy->bands[band])
@@ -969,12 +969,19 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
local->int_scan_req->n_channels = n_ch;
} else {
- if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IR |
- IEEE80211_CHAN_DISABLED)))
+ for (i = 0; i < n_channels; i++) {
+ if (channels[i]->flags & (IEEE80211_CHAN_NO_IR |
+ IEEE80211_CHAN_DISABLED))
+ continue;
+
+ local->int_scan_req->channels[n_ch] = channels[i];
+ n_ch++;
+ }
+
+ if (WARN_ON_ONCE(n_ch == 0))
goto unlock;
- local->int_scan_req->channels[0] = chan;
- local->int_scan_req->n_channels = 1;
+ local->int_scan_req->n_channels = n_ch;
}
local->int_scan_req->ssids = &local->scan_ssid;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 00ca8dcc2bcf..2880f2ae99ab 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -64,32 +64,21 @@
* freed before they are done using it.
*/
+static const struct rhashtable_params sta_rht_params = {
+ .nelem_hint = 3, /* start small */
+ .automatic_shrinking = true,
+ .head_offset = offsetof(struct sta_info, hash_node),
+ .key_offset = offsetof(struct sta_info, sta.addr),
+ .key_len = ETH_ALEN,
+ .hashfn = sta_addr_hash,
+};
+
/* Caller must hold local->sta_mtx */
static int sta_info_hash_del(struct ieee80211_local *local,
struct sta_info *sta)
{
- struct sta_info *s;
-
- s = rcu_dereference_protected(local->sta_hash[STA_HASH(sta->sta.addr)],
- lockdep_is_held(&local->sta_mtx));
- if (!s)
- return -ENOENT;
- if (s == sta) {
- rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
- s->hnext);
- return 0;
- }
-
- while (rcu_access_pointer(s->hnext) &&
- rcu_access_pointer(s->hnext) != sta)
- s = rcu_dereference_protected(s->hnext,
- lockdep_is_held(&local->sta_mtx));
- if (rcu_access_pointer(s->hnext)) {
- rcu_assign_pointer(s->hnext, sta->hnext);
- return 0;
- }
-
- return -ENOENT;
+ return rhashtable_remove_fast(&local->sta_hash, &sta->hash_node,
+ sta_rht_params);
}
static void __cleanup_single_sta(struct sta_info *sta)
@@ -118,6 +107,16 @@ static void __cleanup_single_sta(struct sta_info *sta)
atomic_dec(&ps->num_sta_ps);
}
+ if (sta->sta.txq[0]) {
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ struct txq_info *txqi = to_txq_info(sta->sta.txq[i]);
+ int n = skb_queue_len(&txqi->queue);
+
+ ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
+ atomic_sub(n, &sdata->txqs_len[txqi->txq.ac]);
+ }
+ }
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]);
@@ -160,17 +159,23 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
+ struct rhash_head *tmp;
+ const struct bucket_table *tbl;
- sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- lockdep_is_held(&local->sta_mtx));
- while (sta) {
- if (sta->sdata == sdata &&
- ether_addr_equal(sta->sta.addr, addr))
- break;
- sta = rcu_dereference_check(sta->hnext,
- lockdep_is_held(&local->sta_mtx));
+ rcu_read_lock();
+ tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
+
+ for_each_sta_info(local, tbl, addr, sta, tmp) {
+ if (sta->sdata == sdata) {
+ rcu_read_unlock();
+ /* this is safe as the caller must already hold
+ * another rcu read section or the mutex
+ */
+ return sta;
+ }
}
- return sta;
+ rcu_read_unlock();
+ return NULL;
}
/*
@@ -182,18 +187,24 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
+ struct rhash_head *tmp;
+ const struct bucket_table *tbl;
- sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- lockdep_is_held(&local->sta_mtx));
- while (sta) {
- if ((sta->sdata == sdata ||
- (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
- ether_addr_equal(sta->sta.addr, addr))
- break;
- sta = rcu_dereference_check(sta->hnext,
- lockdep_is_held(&local->sta_mtx));
+ rcu_read_lock();
+ tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
+
+ for_each_sta_info(local, tbl, addr, sta, tmp) {
+ if (sta->sdata == sdata ||
+ (sta->sdata->bss && sta->sdata->bss == sdata->bss)) {
+ rcu_read_unlock();
+ /* this is safe as the caller must already hold
+ * another rcu read section or the mutex
+ */
+ return sta;
+ }
}
- return sta;
+ rcu_read_unlock();
+ return NULL;
}
struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
@@ -229,19 +240,13 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
*/
void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
{
- int i;
-
if (sta->rate_ctrl)
rate_control_free_sta(sta);
- if (sta->tx_lat) {
- for (i = 0; i < IEEE80211_NUM_TIDS; i++)
- kfree(sta->tx_lat[i].bins);
- kfree(sta->tx_lat);
- }
-
sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
+ if (sta->sta.txq[0])
+ kfree(to_txq_info(sta->sta.txq[0]));
kfree(rcu_dereference_raw(sta->sta.rates));
kfree(sta);
}
@@ -250,9 +255,8 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
static void sta_info_hash_add(struct ieee80211_local *local,
struct sta_info *sta)
{
- lockdep_assert_held(&local->sta_mtx);
- sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
- rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
+ rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
+ sta_rht_params);
}
static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -293,44 +297,15 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
const u8 *addr, gfp_t gfp)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_hw *hw = &local->hw;
struct sta_info *sta;
struct timespec uptime;
- struct ieee80211_tx_latency_bin_ranges *tx_latency;
int i;
- sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
+ sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
if (!sta)
return NULL;
- rcu_read_lock();
- tx_latency = rcu_dereference(local->tx_latency);
- /* init stations Tx latency statistics && TID bins */
- if (tx_latency) {
- sta->tx_lat = kzalloc(IEEE80211_NUM_TIDS *
- sizeof(struct ieee80211_tx_latency_stat),
- GFP_ATOMIC);
- if (!sta->tx_lat) {
- rcu_read_unlock();
- goto free;
- }
-
- if (tx_latency->n_ranges) {
- for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
- /* size of bins is size of the ranges +1 */
- sta->tx_lat[i].bin_count =
- tx_latency->n_ranges + 1;
- sta->tx_lat[i].bins =
- kcalloc(sta->tx_lat[i].bin_count,
- sizeof(u32), GFP_ATOMIC);
- if (!sta->tx_lat[i].bins) {
- rcu_read_unlock();
- goto free;
- }
- }
- }
- }
- rcu_read_unlock();
-
spin_lock_init(&sta->lock);
spin_lock_init(&sta->ps_lock);
INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
@@ -359,8 +334,24 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
ewma_init(&sta->chain_signal_avg[i], 1024, 8);
+ if (local->ops->wake_tx_queue) {
+ void *txq_data;
+ int size = sizeof(struct txq_info) +
+ ALIGN(hw->txq_data_size, sizeof(void *));
+
+ txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp);
+ if (!txq_data)
+ goto free;
+
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ struct txq_info *txq = txq_data + i * size;
+
+ ieee80211_init_tx_queue(sdata, sta, txq, i);
+ }
+ }
+
if (sta_prepare_rate_control(local, sta, gfp))
- goto free;
+ goto free_txq;
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
/*
@@ -382,7 +373,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
struct ieee80211_supported_band *sband =
- local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
+ hw->wiphy->bands[ieee80211_get_sdata_band(sdata)];
u8 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >>
IEEE80211_HT_CAP_SM_PS_SHIFT;
/*
@@ -405,14 +396,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
}
sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
+
return sta;
+free_txq:
+ if (sta->sta.txq[0])
+ kfree(to_txq_info(sta->sta.txq[0]));
free:
- if (sta->tx_lat) {
- for (i = 0; i < IEEE80211_NUM_TIDS; i++)
- kfree(sta->tx_lat[i].bins);
- kfree(sta->tx_lat);
- }
kfree(sta);
return NULL;
}
@@ -684,6 +674,8 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
indicate_tim |=
sta->driver_buffered_tids & tids;
+ indicate_tim |=
+ sta->txq_buffered_tids & tids;
}
done:
@@ -992,19 +984,32 @@ static void sta_info_cleanup(unsigned long data)
round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL));
}
-void sta_info_init(struct ieee80211_local *local)
+u32 sta_addr_hash(const void *key, u32 length, u32 seed)
+{
+ return jhash(key, ETH_ALEN, seed);
+}
+
+int sta_info_init(struct ieee80211_local *local)
{
+ int err;
+
+ err = rhashtable_init(&local->sta_hash, &sta_rht_params);
+ if (err)
+ return err;
+
spin_lock_init(&local->tim_lock);
mutex_init(&local->sta_mtx);
INIT_LIST_HEAD(&local->sta_list);
setup_timer(&local->sta_cleanup, sta_info_cleanup,
(unsigned long)local);
+ return 0;
}
void sta_info_stop(struct ieee80211_local *local)
{
del_timer_sync(&local->sta_cleanup);
+ rhashtable_destroy(&local->sta_hash);
}
@@ -1068,16 +1073,21 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
}
struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
- const u8 *addr,
- const u8 *localaddr)
+ const u8 *addr,
+ const u8 *localaddr)
{
- struct sta_info *sta, *nxt;
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct sta_info *sta;
+ struct rhash_head *tmp;
+ const struct bucket_table *tbl;
+
+ tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
/*
* Just return a random station if localaddr is NULL
* ... first in list.
*/
- for_each_sta_info(hw_to_local(hw), addr, sta, nxt) {
+ for_each_sta_info(local, tbl, addr, sta, tmp) {
if (localaddr &&
!ether_addr_equal(sta->sdata->vif.addr, localaddr))
continue;
@@ -1115,7 +1125,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
struct sk_buff_head pending;
- int filtered = 0, buffered = 0, ac;
+ int filtered = 0, buffered = 0, ac, i;
unsigned long flags;
struct ps_data *ps;
@@ -1134,10 +1144,22 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1);
sta->driver_buffered_tids = 0;
+ sta->txq_buffered_tids = 0;
if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
+ if (sta->sta.txq[0]) {
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ struct txq_info *txqi = to_txq_info(sta->sta.txq[i]);
+
+ if (!skb_queue_len(&txqi->queue))
+ continue;
+
+ drv_wake_tx_queue(local, txqi);
+ }
+ }
+
skb_queue_head_init(&pending);
/* sync with ieee80211_tx_h_unicast_ps_buf */
@@ -1275,7 +1297,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
}
info->band = chanctx_conf->def.chan->band;
- ieee80211_xmit(sdata, skb);
+ ieee80211_xmit(sdata, sta, skb);
rcu_read_unlock();
}
@@ -1319,8 +1341,10 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
/* if we already have frames from software, then we can't also
* release from hardware queues
*/
- if (skb_queue_empty(&frames))
+ if (skb_queue_empty(&frames)) {
driver_release_tids |= sta->driver_buffered_tids & tids;
+ driver_release_tids |= sta->txq_buffered_tids & tids;
+ }
if (driver_release_tids) {
/* If the driver has data on more than one TID then
@@ -1491,6 +1515,9 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
sta_info_recalc_tim(sta);
} else {
+ unsigned long tids = sta->txq_buffered_tids & driver_release_tids;
+ int tid;
+
/*
* We need to release a frame that is buffered somewhere in the
* driver ... it'll have to handle that.
@@ -1510,8 +1537,22 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
* that the TID(s) became empty before returning here from the
* release function.
* Either way, however, when the driver tells us that the TID(s)
- * became empty we'll do the TIM recalculation.
+ * became empty or we find that a txq became empty, we'll do the
+ * TIM recalculation.
*/
+
+ if (!sta->sta.txq[0])
+ return;
+
+ for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
+ struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
+
+ if (!(tids & BIT(tid)) || skb_queue_len(&txqi->queue))
+ continue;
+
+ sta_info_recalc_tim(sta);
+ break;
+ }
}
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index fb0fc1302a58..5c164fb3f6c5 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -16,6 +16,7 @@
#include <linux/workqueue.h>
#include <linux/average.h>
#include <linux/etherdevice.h>
+#include <linux/rhashtable.h>
#include "key.h"
/**
@@ -236,25 +237,6 @@ struct sta_ampdu_mlme {
u8 dialog_token_allocator;
};
-/*
- * struct ieee80211_tx_latency_stat - Tx latency statistics
- *
- * Measures TX latency and jitter for a station per TID.
- *
- * @max: worst case latency
- * @sum: sum of all latencies
- * @counter: amount of Tx frames sent from interface
- * @bins: each bin counts how many frames transmitted within a certain
- * latency range. when disabled it is NULL.
- * @bin_count: amount of bins.
- */
-struct ieee80211_tx_latency_stat {
- u32 max;
- u32 sum;
- u32 counter;
- u32 *bins;
- u32 bin_count;
-};
/* Value to indicate no TID reservation */
#define IEEE80211_TID_UNRESERVED 0xff
@@ -267,7 +249,7 @@ struct ieee80211_tx_latency_stat {
*
* @list: global linked list entry
* @free_list: list entry for keeping track of stations to free
- * @hnext: hash table linked list pointer
+ * @hash_node: hash node for rhashtable
* @local: pointer to the global information
* @sdata: virtual interface this station belongs to
* @ptk: peer keys negotiated with this station, if any
@@ -295,6 +277,7 @@ struct ieee80211_tx_latency_stat {
* entered power saving state, these are also delivered to
* the station when it leaves powersave or polls for frames
* @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
+ * @txq_buffered_tids: bitmap of TIDs that mac80211 has txq data buffered on
* @rx_packets: Number of MSDUs received from this STA
* @rx_bytes: Number of bytes received from this STA
* @last_rx: time (in jiffies) when last frame was received from this STA
@@ -316,7 +299,6 @@ struct ieee80211_tx_latency_stat {
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
* @timer_to_tid: identity mapping to ID timers
- * @tx_lat: Tx latency statistics
* @llid: Local link ID
* @plid: Peer link ID
* @reason: Cancel reason on PLINK_HOLDING state
@@ -361,7 +343,7 @@ struct sta_info {
/* General information, mostly static */
struct list_head list, free_list;
struct rcu_head rcu_head;
- struct sta_info __rcu *hnext;
+ struct rhash_head hash_node;
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
@@ -390,6 +372,7 @@ struct sta_info {
struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
unsigned long driver_buffered_tids;
+ unsigned long txq_buffered_tids;
/* Updated from RX path only, no locking requirements */
unsigned long rx_packets;
@@ -437,8 +420,6 @@ struct sta_info {
struct sta_ampdu_mlme ampdu_mlme;
u8 timer_to_tid[IEEE80211_NUM_TIDS];
- struct ieee80211_tx_latency_stat *tx_lat;
-
#ifdef CONFIG_MAC80211_MESH
/*
* Mesh peer link attributes
@@ -559,10 +540,6 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
lockdep_is_held(&sta->ampdu_mlme.mtx));
}
-#define STA_HASH_SIZE 256
-#define STA_HASH(sta) (sta[5])
-
-
/* Maximum number of frames to buffer per power saving station per AC */
#define STA_MAX_TX_BUFFER 64
@@ -583,26 +560,15 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
-static inline
-void for_each_sta_info_type_check(struct ieee80211_local *local,
- const u8 *addr,
- struct sta_info *sta,
- struct sta_info *nxt)
-{
-}
+u32 sta_addr_hash(const void *key, u32 length, u32 seed);
+
+#define _sta_bucket_idx(_tbl, _a) \
+ rht_bucket_index(_tbl, sta_addr_hash(_a, ETH_ALEN, (_tbl)->hash_rnd))
-#define for_each_sta_info(local, _addr, _sta, nxt) \
- for ( /* initialise loop */ \
- _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
- nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
- /* typecheck */ \
- for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
- /* continue condition */ \
- _sta; \
- /* advance loop */ \
- _sta = nxt, \
- nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
- ) \
+#define for_each_sta_info(local, tbl, _addr, _sta, _tmp) \
+ rht_for_each_entry_rcu(_sta, _tmp, tbl, \
+ _sta_bucket_idx(tbl, _addr), \
+ hash_node) \
/* compare address and run code only if it matches */ \
if (ether_addr_equal(_sta->sta.addr, (_addr)))
@@ -639,7 +605,7 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
void sta_info_recalc_tim(struct sta_info *sta);
-void sta_info_init(struct ieee80211_local *local);
+int sta_info_init(struct ieee80211_local *local);
void sta_info_stop(struct ieee80211_local *local);
/**
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index e679b7c9b160..005fdbe39a8b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -12,7 +12,6 @@
#include <linux/export.h>
#include <linux/etherdevice.h>
-#include <linux/time.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
@@ -515,73 +514,6 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
}
/*
- * Measure Tx frame completion and removal time for Tx latency statistics
- * calculation. A single Tx frame latency should be measured from when it
- * is entering the Kernel until we receive Tx complete confirmation indication
- * and remove the skb.
- */
-static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
- struct sk_buff *skb,
- struct sta_info *sta,
- struct ieee80211_hdr *hdr)
-{
- u32 msrmnt;
- u16 tid;
- u8 *qc;
- int i, bin_range_count;
- u32 *bin_ranges;
- __le16 fc;
- struct ieee80211_tx_latency_stat *tx_lat;
- struct ieee80211_tx_latency_bin_ranges *tx_latency;
- ktime_t skb_arv = skb->tstamp;
-
- tx_latency = rcu_dereference(local->tx_latency);
-
- /* assert Tx latency stats are enabled & frame arrived when enabled */
- if (!tx_latency || !ktime_to_ns(skb_arv))
- return;
-
- fc = hdr->frame_control;
-
- if (!ieee80211_is_data(fc)) /* make sure it is a data frame */
- return;
-
- /* get frame tid */
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- } else {
- tid = 0;
- }
-
- tx_lat = &sta->tx_lat[tid];
-
- /* Calculate the latency */
- msrmnt = ktime_to_ms(ktime_sub(ktime_get(), skb_arv));
-
- if (tx_lat->max < msrmnt) /* update stats */
- tx_lat->max = msrmnt;
- tx_lat->counter++;
- tx_lat->sum += msrmnt;
-
- if (!tx_lat->bins) /* bins not activated */
- return;
-
- /* count how many Tx frames transmitted with the appropriate latency */
- bin_range_count = tx_latency->n_ranges;
- bin_ranges = tx_latency->ranges;
-
- for (i = 0; i < bin_range_count; i++) {
- if (msrmnt <= bin_ranges[i]) {
- tx_lat->bins[i]++;
- break;
- }
- }
- if (i == bin_range_count) /* msrmnt is bigger than the biggest range */
- tx_lat->bins[i]++;
-}
-
-/*
* Use a static threshold for now, best value to be determined
* by testing ...
* Should it depend on:
@@ -722,7 +654,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_supported_band *sband;
struct ieee80211_sub_if_data *sdata;
struct net_device *prev_dev = NULL;
- struct sta_info *sta, *tmp;
+ struct sta_info *sta;
+ struct rhash_head *tmp;
int retry_count;
int rates_idx;
bool send_to_cooked;
@@ -731,6 +664,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
int rtap_len;
int shift = 0;
int tid = IEEE80211_NUM_TIDS;
+ const struct bucket_table *tbl;
rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
@@ -739,7 +673,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
sband = local->hw.wiphy->bands[info->band];
fc = hdr->frame_control;
- for_each_sta_info(local, hdr->addr1, sta, tmp) {
+ tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
+
+ for_each_sta_info(local, tbl, hdr->addr1, sta, tmp) {
/* skip wrong virtual interface */
if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
continue;
@@ -853,12 +789,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (acked)
sta->last_ack_signal = info->status.ack_signal;
-
- /*
- * Measure frame removal for tx latency
- * statistics calculation
- */
- ieee80211_tx_latency_end_msrmnt(local, skb, sta, hdr);
}
rcu_read_unlock();
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index c9f9752217ac..fff0d864adfa 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -136,6 +136,24 @@ ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata,
*pos = 2 * subband_cnt;
}
+static void ieee80211_tdls_add_oper_classes(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ u8 *pos;
+ u8 op_class;
+
+ if (!ieee80211_chandef_to_operating_class(&sdata->vif.bss_conf.chandef,
+ &op_class))
+ return;
+
+ pos = skb_put(skb, 4);
+ *pos++ = WLAN_EID_SUPPORTED_REGULATORY_CLASSES;
+ *pos++ = 2; /* len */
+
+ *pos++ = op_class;
+ *pos++ = op_class; /* give current operating class as alternate too */
+}
+
static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb)
{
u8 *pos = (void *)skb_put(skb, 3);
@@ -193,6 +211,17 @@ static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata,
memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN);
}
+static void
+ieee80211_tdls_add_aid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ u8 *pos = (void *)skb_put(skb, 4);
+
+ *pos++ = WLAN_EID_AID;
+ *pos++ = 2; /* len */
+ put_unaligned_le16(ifmgd->aid, pos);
+}
+
/* translate numbering in the WMM parameter IE to the mac80211 notation */
static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac)
{
@@ -271,21 +300,11 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
struct sta_info *sta = NULL;
size_t offset = 0, noffset;
u8 *pos;
- rcu_read_lock();
-
- /* we should have the peer STA if we're already responding */
- if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
- sta = sta_info_get(sdata, peer);
- if (WARN_ON_ONCE(!sta)) {
- rcu_read_unlock();
- return;
- }
- }
-
ieee80211_add_srates_ie(sdata, skb, false, band);
ieee80211_add_ext_srates_ie(sdata, skb, false, band);
ieee80211_tdls_add_supp_channels(sdata, skb);
@@ -338,6 +357,19 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
offset = noffset;
}
+ rcu_read_lock();
+
+ /* we should have the peer STA if we're already responding */
+ if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
+ sta = sta_info_get(sdata, peer);
+ if (WARN_ON_ONCE(!sta)) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+
+ ieee80211_tdls_add_oper_classes(sdata, skb);
+
/*
* with TDLS we can switch channels, and HT-caps are not necessarily
* the same on all bands. The specification limits the setup to a
@@ -346,7 +378,9 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
sband = local->hw.wiphy->bands[band];
memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
- if (action_code == WLAN_TDLS_SETUP_REQUEST && ht_cap.ht_supported) {
+ if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) &&
+ ht_cap.ht_supported) {
ieee80211_apply_htcap_overrides(sdata, &ht_cap);
/* disable SMPS in TDLS initiator */
@@ -368,12 +402,63 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
}
- rcu_read_unlock();
-
if (ht_cap.ht_supported &&
(ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
ieee80211_tdls_add_bss_coex_ie(skb);
+ ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+
+ /* add any custom IEs that go before VHT capabilities */
+ if (extra_ies_len) {
+ static const u8 before_vht_cap[] = {
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_COUNTRY,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_SUPPORTED_CHANNELS,
+ WLAN_EID_RSN,
+ WLAN_EID_EXT_CAPABILITY,
+ WLAN_EID_QOS_CAPA,
+ WLAN_EID_FAST_BSS_TRANSITION,
+ WLAN_EID_TIMEOUT_INTERVAL,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ WLAN_EID_MULTI_BAND,
+ };
+ noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
+ before_vht_cap,
+ ARRAY_SIZE(before_vht_cap),
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, extra_ies + offset, noffset - offset);
+ offset = noffset;
+ }
+
+ /* build the VHT-cap similarly to the HT-cap */
+ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+ if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) &&
+ vht_cap.vht_supported) {
+ ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
+
+ /* the AID is present only when VHT is implemented */
+ if (action_code == WLAN_TDLS_SETUP_REQUEST)
+ ieee80211_tdls_add_aid(sdata, skb);
+
+ pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+ ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+ } else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
+ vht_cap.vht_supported && sta->sta.vht_cap.vht_supported) {
+ /* the peer caps are already intersected with our own */
+ memcpy(&vht_cap, &sta->sta.vht_cap, sizeof(vht_cap));
+
+ /* the AID is present only when VHT is implemented */
+ ieee80211_tdls_add_aid(sdata, skb);
+
+ pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+ ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+ }
+
+ rcu_read_unlock();
+
/* add any remaining IEs */
if (extra_ies_len) {
noffset = extra_ies_len;
@@ -381,7 +466,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
memcpy(pos, extra_ies + offset, noffset - offset);
}
- ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
}
static void
@@ -394,6 +478,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
size_t offset = 0, noffset;
struct sta_info *sta, *ap_sta;
+ enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
u8 *pos;
rcu_read_lock();
@@ -453,6 +538,21 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
}
}
+ ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+
+ /* only include VHT-operation if not on the 2.4GHz band */
+ if (band != IEEE80211_BAND_2GHZ && !ap_sta->sta.vht_cap.vht_supported &&
+ sta->sta.vht_cap.vht_supported) {
+ struct ieee80211_chanctx_conf *chanctx_conf =
+ rcu_dereference(sdata->vif.chanctx_conf);
+ if (!WARN_ON(!chanctx_conf)) {
+ pos = skb_put(skb, 2 +
+ sizeof(struct ieee80211_vht_operation));
+ ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
+ &chanctx_conf->def);
+ }
+ }
+
rcu_read_unlock();
/* add any remaining IEs */
@@ -461,8 +561,6 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, noffset - offset);
memcpy(pos, extra_ies + offset, noffset - offset);
}
-
- ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
}
static void
@@ -708,8 +806,12 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata,
26 + /* max(WMM-info, WMM-param) */
2 + max(sizeof(struct ieee80211_ht_cap),
sizeof(struct ieee80211_ht_operation)) +
+ 2 + max(sizeof(struct ieee80211_vht_cap),
+ sizeof(struct ieee80211_vht_operation)) +
50 + /* supported channels */
3 + /* 40/20 BSS coex */
+ 4 + /* AID */
+ 4 + /* oper classes */
extra_ies_len +
sizeof(struct ieee80211_tdls_lnkie));
if (!skb)
@@ -907,7 +1009,7 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer) &&
!ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
ret = -EBUSY;
- goto exit;
+ goto out_unlock;
}
/*
@@ -922,27 +1024,34 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
if (!sta_info_get(sdata, peer)) {
rcu_read_unlock();
ret = -ENOLINK;
- goto exit;
+ goto out_unlock;
}
rcu_read_unlock();
}
ieee80211_flush_queues(local, sdata, false);
+ memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN);
+ mutex_unlock(&local->mtx);
+ /* we cannot take the mutex while preparing the setup packet */
ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
dialog_token, status_code,
peer_capability, initiator,
extra_ies, extra_ies_len, 0,
NULL);
- if (ret < 0)
- goto exit;
+ if (ret < 0) {
+ mutex_lock(&local->mtx);
+ eth_zero_addr(sdata->u.mgd.tdls_peer);
+ mutex_unlock(&local->mtx);
+ return ret;
+ }
- memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN);
ieee80211_queue_delayed_work(&sdata->local->hw,
&sdata->u.mgd.tdls_peer_del_work,
TDLS_PEER_SETUP_TIMEOUT);
+ return 0;
-exit:
+out_unlock:
mutex_unlock(&local->mtx);
return ret;
}
diff --git a/net/mac80211/trace.c b/net/mac80211/trace.c
index 386e45d8a958..edfe0c170a1c 100644
--- a/net/mac80211/trace.c
+++ b/net/mac80211/trace.c
@@ -8,6 +8,7 @@
#include "debug.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
+#include "trace_msg.h"
#ifdef CONFIG_MAC80211_MESSAGE_TRACING
void __sdata_info(const char *fmt, ...)
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 263a9561eb26..4c2e7690226a 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1256,28 +1256,28 @@ TRACE_EVENT(drv_set_rekey_data,
LOCAL_PR_ARG, VIF_PR_ARG)
);
-TRACE_EVENT(drv_rssi_callback,
+TRACE_EVENT(drv_event_callback,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- enum ieee80211_rssi_event rssi_event),
+ const struct ieee80211_event *_event),
- TP_ARGS(local, sdata, rssi_event),
+ TP_ARGS(local, sdata, _event),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
- __field(u32, rssi_event)
+ __field(u32, type)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- __entry->rssi_event = rssi_event;
+ __entry->type = _event->type;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " rssi_event:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->rssi_event
+ LOCAL_PR_FMT VIF_PR_FMT " event:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->type
)
);
@@ -2312,43 +2312,36 @@ TRACE_EVENT(drv_tdls_recv_channel_switch,
)
);
-#ifdef CONFIG_MAC80211_MESSAGE_TRACING
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM mac80211_msg
-
-#define MAX_MSG_LEN 100
-
-DECLARE_EVENT_CLASS(mac80211_msg_event,
- TP_PROTO(struct va_format *vaf),
+TRACE_EVENT(drv_wake_tx_queue,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct txq_info *txq),
- TP_ARGS(vaf),
+ TP_ARGS(local, sdata, txq),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(u8, ac)
+ __field(u8, tid)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
- ),
+ struct ieee80211_sta *sta = txq->txq.sta;
- TP_printk("%s", __get_str(msg))
-);
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->ac = txq->txq.ac;
+ __entry->tid = txq->txq.tid;
+ ),
-DEFINE_EVENT(mac80211_msg_event, mac80211_info,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-DEFINE_EVENT(mac80211_msg_event, mac80211_dbg,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-DEFINE_EVENT(mac80211_msg_event, mac80211_err,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ac:%d tid:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ac, __entry->tid
+ )
);
-#endif
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
new file mode 100644
index 000000000000..768f7c22a190
--- /dev/null
+++ b/net/mac80211/trace_msg.h
@@ -0,0 +1,53 @@
+#ifdef CONFIG_MAC80211_MESSAGE_TRACING
+
+#if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __MAC80211_MSG_DRIVER_TRACE
+
+#include <linux/tracepoint.h>
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mac80211_msg
+
+#define MAX_MSG_LEN 100
+
+DECLARE_EVENT_CLASS(mac80211_msg_event,
+ TP_PROTO(struct va_format *vaf),
+
+ TP_ARGS(vaf),
+
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(mac80211_msg_event, mac80211_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+DEFINE_EVENT(mac80211_msg_event, mac80211_dbg,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+DEFINE_EVENT(mac80211_msg_event, mac80211_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+#endif /* !__MAC80211_MSG_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace_msg
+#include <trace/define_trace.h>
+
+#endif
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 07bd8db00af8..667111ee6a20 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -20,7 +20,6 @@
#include <linux/bitmap.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
-#include <linux/time.h>
#include <net/net_namespace.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
@@ -595,23 +594,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
else if (!is_multicast_ether_addr(hdr->addr1) &&
(key = rcu_dereference(tx->sdata->default_unicast_key)))
tx->key = key;
- else if (info->flags & IEEE80211_TX_CTL_INJECTED)
- tx->key = NULL;
- else if (!tx->sdata->drop_unencrypted)
- tx->key = NULL;
- else if (tx->skb->protocol == tx->sdata->control_port_protocol)
- tx->key = NULL;
- else if (ieee80211_is_robust_mgmt_frame(tx->skb) &&
- !(ieee80211_is_action(hdr->frame_control) &&
- tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))
- tx->key = NULL;
- else if (ieee80211_is_mgmt(hdr->frame_control) &&
- !ieee80211_is_robust_mgmt_frame(tx->skb))
+ else
tx->key = NULL;
- else {
- I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
- return TX_DROP;
- }
if (tx->key) {
bool skip_hw = false;
@@ -783,12 +767,22 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
}
+static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
+{
+ u16 *seq = &sta->tid_seq[tid];
+ __le16 ret = cpu_to_le16(*seq);
+
+ /* Increase the sequence number. */
+ *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
+
+ return ret;
+}
+
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
- u16 *seq;
u8 *qc;
int tid;
@@ -839,13 +833,10 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- seq = &tx->sta->tid_seq[tid];
tx->sta->tx_msdu[tid]++;
- hdr->seq_ctrl = cpu_to_le16(*seq);
-
- /* Increase the sequence number. */
- *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
+ if (!tx->sta->sta.txq[0])
+ hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
return TX_CONTINUE;
}
@@ -1086,7 +1077,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
* nothing -- this aggregation session is being started
* but that might still fail with the driver
*/
- } else {
+ } else if (!tx->sta->sta.txq[tid]) {
spin_lock(&tx->sta->lock);
/*
* Need to re-check now, because we may get here
@@ -1137,11 +1128,13 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
/*
* initialises @tx
+ * pass %NULL for the station if unknown, a valid pointer if known
+ * or an ERR_PTR() if the station is known not to exist
*/
static ieee80211_tx_result
ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_data *tx,
- struct sk_buff *skb)
+ struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
@@ -1164,17 +1157,22 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
hdr = (struct ieee80211_hdr *) skb->data;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
- tx->sta = rcu_dereference(sdata->u.vlan.sta);
- if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
- return TX_DROP;
- } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
- IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
- tx->sdata->control_port_protocol == tx->skb->protocol) {
- tx->sta = sta_info_get_bss(sdata, hdr->addr1);
+ if (likely(sta)) {
+ if (!IS_ERR(sta))
+ tx->sta = sta;
+ } else {
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ tx->sta = rcu_dereference(sdata->u.vlan.sta);
+ if (!tx->sta && sdata->wdev.use_4addr)
+ return TX_DROP;
+ } else if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+ IEEE80211_TX_CTL_INJECTED) ||
+ tx->sdata->control_port_protocol == tx->skb->protocol) {
+ tx->sta = sta_info_get_bss(sdata, hdr->addr1);
+ }
+ if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
+ tx->sta = sta_info_get(sdata, hdr->addr1);
}
- if (!tx->sta)
- tx->sta = sta_info_get(sdata, hdr->addr1);
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
!ieee80211_is_qos_nullfunc(hdr->frame_control) &&
@@ -1220,13 +1218,102 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
return TX_CONTINUE;
}
+static void ieee80211_drv_tx(struct ieee80211_local *local,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *pubsta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_control control = {
+ .sta = pubsta,
+ };
+ struct ieee80211_txq *txq = NULL;
+ struct txq_info *txqi;
+ u8 ac;
+
+ if (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)
+ goto tx_normal;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ goto tx_normal;
+
+ if (pubsta) {
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+ txq = pubsta->txq[tid];
+ } else if (vif) {
+ txq = vif->txq;
+ }
+
+ if (!txq)
+ goto tx_normal;
+
+ ac = txq->ac;
+ txqi = to_txq_info(txq);
+ atomic_inc(&sdata->txqs_len[ac]);
+ if (atomic_read(&sdata->txqs_len[ac]) >= local->hw.txq_ac_max_pending)
+ netif_stop_subqueue(sdata->dev, ac);
+
+ skb_queue_tail(&txqi->queue, skb);
+ drv_wake_tx_queue(local, txqi);
+
+ return;
+
+tx_normal:
+ drv_tx(local, &control, skb);
+}
+
+struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
+ struct txq_info *txqi = container_of(txq, struct txq_info, txq);
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb = NULL;
+ u8 ac = txq->ac;
+
+ spin_lock_bh(&txqi->queue.lock);
+
+ if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
+ goto out;
+
+ skb = __skb_dequeue(&txqi->queue);
+ if (!skb)
+ goto out;
+
+ atomic_dec(&sdata->txqs_len[ac]);
+ if (__netif_subqueue_stopped(sdata->dev, ac))
+ ieee80211_propagate_queue_wake(local, sdata->vif.hw_queue[ac]);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) {
+ struct sta_info *sta = container_of(txq->sta, struct sta_info,
+ sta);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ hdr->seq_ctrl = ieee80211_tx_next_seq(sta, txq->tid);
+ if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ else
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ }
+
+out:
+ spin_unlock_bh(&txqi->queue.lock);
+
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_tx_dequeue);
+
static bool ieee80211_tx_frags(struct ieee80211_local *local,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct sk_buff_head *skbs,
bool txpending)
{
- struct ieee80211_tx_control control;
struct sk_buff *skb, *tmp;
unsigned long flags;
@@ -1284,10 +1371,9 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
info->control.vif = vif;
- control.sta = sta;
__skb_unlink(skb, skbs);
- drv_tx(local, &control, skb);
+ ieee80211_drv_tx(local, vif, sta, skb);
}
return true;
@@ -1422,8 +1508,9 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_data tx;
+ struct sk_buff *skb2;
- if (ieee80211_tx_prepare(sdata, &tx, skb) == TX_DROP)
+ if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP)
return false;
info->band = band;
@@ -1440,6 +1527,14 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
*sta = NULL;
}
+ /* this function isn't suitable for fragmented data frames */
+ skb2 = __skb_dequeue(&tx.skbs);
+ if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) {
+ ieee80211_free_txskb(hw, skb2);
+ ieee80211_purge_tx_queue(hw, &tx.skbs);
+ return false;
+ }
+
return true;
}
EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
@@ -1448,7 +1543,8 @@ EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
* Returns false if the frame couldn't be transmitted but was queued instead.
*/
static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool txpending)
+ struct sta_info *sta, struct sk_buff *skb,
+ bool txpending)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_data tx;
@@ -1464,7 +1560,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
/* initialises tx */
led_len = skb->len;
- res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
+ res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
if (unlikely(res_prepare == TX_DROP)) {
ieee80211_free_txskb(&local->hw, skb);
@@ -1520,7 +1616,8 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1555,7 +1652,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
}
ieee80211_set_qos_hdr(sdata, skb);
- ieee80211_tx(sdata, skb, false);
+ ieee80211_tx(sdata, sta, skb, false);
}
static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
@@ -1776,7 +1873,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
goto fail_rcu;
info->band = chandef->chan->band;
- ieee80211_xmit(sdata, skb);
+ ieee80211_xmit(sdata, NULL, skb);
rcu_read_unlock();
return NETDEV_TX_OK;
@@ -1788,21 +1885,89 @@ fail:
return NETDEV_TX_OK; /* meaning, we dealt with the skb */
}
-/*
- * Measure Tx frame arrival time for Tx latency statistics calculation
- * A single Tx frame latency should be measured from when it is entering the
- * Kernel until we receive Tx complete confirmation indication and the skb is
- * freed.
- */
-static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local,
- struct sk_buff *skb)
+static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb)
{
- struct ieee80211_tx_latency_bin_ranges *tx_latency;
+ u16 ethertype = (skb->data[12] << 8) | skb->data[13];
- tx_latency = rcu_dereference(local->tx_latency);
- if (!tx_latency)
- return;
- skb->tstamp = ktime_get();
+ return ethertype == ETH_P_TDLS &&
+ skb->len > 14 &&
+ skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
+}
+
+static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb,
+ struct sta_info **sta_out)
+{
+ struct sta_info *sta;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ sta = rcu_dereference(sdata->u.vlan.sta);
+ if (sta) {
+ *sta_out = sta;
+ return 0;
+ } else if (sdata->wdev.use_4addr) {
+ return -ENOLINK;
+ }
+ /* fall through */
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_OCB:
+ case NL80211_IFTYPE_ADHOC:
+ if (is_multicast_ether_addr(skb->data)) {
+ *sta_out = ERR_PTR(-ENOENT);
+ return 0;
+ }
+ sta = sta_info_get_bss(sdata, skb->data);
+ break;
+ case NL80211_IFTYPE_WDS:
+ sta = sta_info_get(sdata, sdata->u.wds.remote_addr);
+ break;
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+ /* determined much later */
+ *sta_out = NULL;
+ return 0;
+#endif
+ case NL80211_IFTYPE_STATION:
+ if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+ sta = sta_info_get(sdata, skb->data);
+ if (sta) {
+ bool tdls_peer, tdls_auth;
+
+ tdls_peer = test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER);
+ tdls_auth = test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER_AUTH);
+
+ if (tdls_peer && tdls_auth) {
+ *sta_out = sta;
+ return 0;
+ }
+
+ /*
+ * TDLS link during setup - throw out frames to
+ * peer. Allow TDLS-setup frames to unauthorized
+ * peers for the special case of a link teardown
+ * after a TDLS sta is removed due to being
+ * unreachable.
+ */
+ if (tdls_peer && !tdls_auth &&
+ !ieee80211_is_tdls_setup(skb))
+ return -EINVAL;
+ }
+
+ }
+
+ sta = sta_info_get(sdata, sdata->u.mgd.bssid);
+ if (!sta)
+ return -ENOLINK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *sta_out = sta ?: ERR_PTR(-ENOENT);
+ return 0;
}
/**
@@ -1824,7 +1989,8 @@ static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local,
* Returns: the (possibly reallocated) skb or an ERR_PTR() code
*/
static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, u32 info_flags)
+ struct sk_buff *skb, u32 info_flags,
+ struct sta_info *sta)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info;
@@ -1837,9 +2003,8 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
const u8 *encaps_data;
int encaps_len, skip_header_bytes;
int nh_pos, h_pos;
- struct sta_info *sta = NULL;
- bool wme_sta = false, authorized = false, tdls_auth = false;
- bool tdls_peer = false, tdls_setup_frame = false;
+ bool wme_sta = false, authorized = false;
+ bool tdls_peer;
bool multicast;
u16 info_id = 0;
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -1847,6 +2012,9 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
enum ieee80211_band band;
int ret;
+ if (IS_ERR(sta))
+ sta = NULL;
+
/* convert Ethernet header to proper 802.11 header (based on
* operation mode) */
ethertype = (skb->data[12] << 8) | skb->data[13];
@@ -1854,8 +2022,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
- sta = rcu_dereference(sdata->u.vlan.sta);
- if (sta) {
+ if (sdata->wdev.use_4addr) {
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
@@ -1874,7 +2041,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
goto free;
}
band = chanctx_conf->def.chan->band;
- if (sta)
+ if (sdata->wdev.use_4addr)
break;
/* fall through */
case NL80211_IFTYPE_AP:
@@ -1978,38 +2145,10 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
break;
#endif
case NL80211_IFTYPE_STATION:
- if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
- sta = sta_info_get(sdata, skb->data);
- if (sta) {
- authorized = test_sta_flag(sta,
- WLAN_STA_AUTHORIZED);
- wme_sta = sta->sta.wme;
- tdls_peer = test_sta_flag(sta,
- WLAN_STA_TDLS_PEER);
- tdls_auth = test_sta_flag(sta,
- WLAN_STA_TDLS_PEER_AUTH);
- }
-
- if (tdls_peer)
- tdls_setup_frame =
- ethertype == ETH_P_TDLS &&
- skb->len > 14 &&
- skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
- }
-
- /*
- * TDLS link during setup - throw out frames to peer. We allow
- * TDLS-setup frames to unauthorized peers for the special case
- * of a link teardown after a TDLS sta is removed due to being
- * unreachable.
- */
- if (tdls_peer && !tdls_auth && !tdls_setup_frame) {
- ret = -EINVAL;
- goto free;
- }
+ /* we already did checks when looking up the RA STA */
+ tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER);
- /* send direct packets to authorized TDLS peers */
- if (tdls_peer && tdls_auth) {
+ if (tdls_peer) {
/* DA SA BSSID */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
@@ -2071,26 +2210,19 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
goto free;
}
- /*
- * There's no need to try to look up the destination
- * if it is a multicast address (which can only happen
- * in AP mode)
- */
multicast = is_multicast_ether_addr(hdr.addr1);
- if (!multicast) {
- sta = sta_info_get(sdata, hdr.addr1);
- if (sta) {
- authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
- wme_sta = sta->sta.wme;
- }
- }
- /* For mesh, the use of the QoS header is mandatory */
- if (ieee80211_vif_is_mesh(&sdata->vif))
+ /* sta is always NULL for mesh */
+ if (sta) {
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ wme_sta = sta->sta.wme;
+ } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
+ /* For mesh, the use of the QoS header is mandatory */
wme_sta = true;
+ }
- /* receiver and we are QoS enabled, use a QoS type frame */
- if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) {
+ /* receiver does QoS (which also means we do) use it */
+ if (wme_sta) {
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
hdrlen += 2;
}
@@ -2260,7 +2392,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
u32 info_flags)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
if (unlikely(skb->len < ETH_HLEN)) {
kfree_skb(skb);
@@ -2269,10 +2401,12 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
rcu_read_lock();
- /* Measure frame arrival for Tx latency statistics calculation */
- ieee80211_tx_latency_start_msrmnt(local, skb);
+ if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
+ kfree_skb(skb);
+ goto out;
+ }
- skb = ieee80211_build_hdr(sdata, skb, info_flags);
+ skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
if (IS_ERR(skb))
goto out;
@@ -2280,7 +2414,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
dev->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
- ieee80211_xmit(sdata, skb);
+ ieee80211_xmit(sdata, sta, skb);
out:
rcu_read_unlock();
}
@@ -2308,10 +2442,17 @@ ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
.local = sdata->local,
.sdata = sdata,
};
+ struct sta_info *sta;
rcu_read_lock();
- skb = ieee80211_build_hdr(sdata, skb, info_flags);
+ if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
+ kfree_skb(skb);
+ skb = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
if (IS_ERR(skb))
goto out;
@@ -2369,7 +2510,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
return true;
}
info->band = chanctx_conf->def.chan->band;
- result = ieee80211_tx(sdata, skb, true);
+ result = ieee80211_tx(sdata, NULL, skb, true);
} else {
struct sk_buff_head skbs;
@@ -3107,7 +3248,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
if (sdata->vif.type == NL80211_IFTYPE_AP)
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
- if (!ieee80211_tx_prepare(sdata, &tx, skb))
+ if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
break;
dev_kfree_skb_any(skb);
}
@@ -3239,6 +3380,6 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
*/
local_bh_disable();
IEEE80211_SKB_CB(skb)->band = band;
- ieee80211_xmit(sdata, skb);
+ ieee80211_xmit(sdata, NULL, skb);
local_bh_enable();
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 747bdcf72e92..79412f16b61d 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -308,6 +308,11 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
for (ac = 0; ac < n_acs; ac++) {
int ac_queue = sdata->vif.hw_queue[ac];
+ if (local->ops->wake_tx_queue &&
+ (atomic_read(&sdata->txqs_len[ac]) >
+ local->hw.txq_ac_max_pending))
+ continue;
+
if (ac_queue == queue ||
(sdata->vif.cab_queue == queue &&
local->queue_stop_reasons[ac_queue] == 0 &&
@@ -625,13 +630,14 @@ void ieee80211_wake_vif_queues(struct ieee80211_local *local,
reason, true);
}
-static void __iterate_active_interfaces(struct ieee80211_local *local,
- u32 iter_flags,
- void (*iterator)(void *data, u8 *mac,
- struct ieee80211_vif *vif),
- void *data)
+static void __iterate_interfaces(struct ieee80211_local *local,
+ u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data)
{
struct ieee80211_sub_if_data *sdata;
+ bool active_only = iter_flags & IEEE80211_IFACE_ITER_ACTIVE;
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
switch (sdata->vif.type) {
@@ -645,9 +651,9 @@ static void __iterate_active_interfaces(struct ieee80211_local *local,
break;
}
if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
- !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
+ active_only && !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
continue;
- if (ieee80211_sdata_running(sdata))
+ if (ieee80211_sdata_running(sdata) || !active_only)
iterator(data, sdata->vif.addr,
&sdata->vif);
}
@@ -656,12 +662,12 @@ static void __iterate_active_interfaces(struct ieee80211_local *local,
lockdep_is_held(&local->iflist_mtx) ||
lockdep_rtnl_is_held());
if (sdata &&
- (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
+ (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
sdata->flags & IEEE80211_SDATA_IN_DRIVER))
iterator(data, sdata->vif.addr, &sdata->vif);
}
-void ieee80211_iterate_active_interfaces(
+void ieee80211_iterate_interfaces(
struct ieee80211_hw *hw, u32 iter_flags,
void (*iterator)(void *data, u8 *mac,
struct ieee80211_vif *vif),
@@ -670,10 +676,10 @@ void ieee80211_iterate_active_interfaces(
struct ieee80211_local *local = hw_to_local(hw);
mutex_lock(&local->iflist_mtx);
- __iterate_active_interfaces(local, iter_flags, iterator, data);
+ __iterate_interfaces(local, iter_flags, iterator, data);
mutex_unlock(&local->iflist_mtx);
}
-EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
+EXPORT_SYMBOL_GPL(ieee80211_iterate_interfaces);
void ieee80211_iterate_active_interfaces_atomic(
struct ieee80211_hw *hw, u32 iter_flags,
@@ -684,7 +690,8 @@ void ieee80211_iterate_active_interfaces_atomic(
struct ieee80211_local *local = hw_to_local(hw);
rcu_read_lock();
- __iterate_active_interfaces(local, iter_flags, iterator, data);
+ __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE,
+ iterator, data);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
@@ -699,7 +706,8 @@ void ieee80211_iterate_active_interfaces_rtnl(
ASSERT_RTNL();
- __iterate_active_interfaces(local, iter_flags, iterator, data);
+ __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE,
+ iterator, data);
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_rtnl);
@@ -742,6 +750,18 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev)
}
EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif);
+struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (!ieee80211_sdata_running(sdata) ||
+ !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
+ return NULL;
+
+ return &sdata->wdev;
+}
+EXPORT_SYMBOL_GPL(ieee80211_vif_to_wdev);
+
/*
* Nothing should have been stuffed into the workqueue during
* the suspend->resume cycle. Since we can't check each caller
@@ -1811,8 +1831,25 @@ int ieee80211_reconfig(struct ieee80211_local *local)
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_MONITOR &&
- ieee80211_sdata_running(sdata))
+ ieee80211_sdata_running(sdata)) {
res = drv_add_interface(local, sdata);
+ if (WARN_ON(res))
+ break;
+ }
+ }
+
+ /* If adding any of the interfaces failed above, roll back and
+ * report failure.
+ */
+ if (res) {
+ list_for_each_entry_continue_reverse(sdata, &local->interfaces,
+ list)
+ if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+ sdata->vif.type != NL80211_IFTYPE_MONITOR &&
+ ieee80211_sdata_running(sdata))
+ drv_remove_interface(local, sdata);
+ ieee80211_handle_reconfig_failure(local);
+ return res;
}
/* add channel contexts */
@@ -2157,46 +2194,6 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&local->chanctx_mtx);
}
-static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
-{
- int i;
-
- for (i = 0; i < n_ids; i++)
- if (ids[i] == id)
- return true;
- return false;
-}
-
-size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
- const u8 *ids, int n_ids,
- const u8 *after_ric, int n_after_ric,
- size_t offset)
-{
- size_t pos = offset;
-
- while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos])) {
- if (ies[pos] == WLAN_EID_RIC_DATA && n_after_ric) {
- pos += 2 + ies[pos + 1];
-
- while (pos < ielen &&
- !ieee80211_id_in_list(after_ric, n_after_ric,
- ies[pos]))
- pos += 2 + ies[pos + 1];
- } else {
- pos += 2 + ies[pos + 1];
- }
- }
-
- return pos;
-}
-
-size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
- const u8 *ids, int n_ids, size_t offset)
-{
- return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset);
-}
-EXPORT_SYMBOL(ieee80211_ie_split);
-
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
{
size_t pos = offset;
@@ -2344,6 +2341,41 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
return pos + sizeof(struct ieee80211_ht_operation);
}
+u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+ const struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_vht_operation *vht_oper;
+
+ *pos++ = WLAN_EID_VHT_OPERATION;
+ *pos++ = sizeof(struct ieee80211_vht_operation);
+ vht_oper = (struct ieee80211_vht_operation *)pos;
+ vht_oper->center_freq_seg1_idx = ieee80211_frequency_to_channel(
+ chandef->center_freq1);
+ if (chandef->center_freq2)
+ vht_oper->center_freq_seg2_idx =
+ ieee80211_frequency_to_channel(chandef->center_freq2);
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_160:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ default:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ /* don't require special VHT peer rates */
+ vht_oper->basic_mcs_set = cpu_to_le16(0xffff);
+
+ return pos + sizeof(struct ieee80211_vht_operation);
+}
+
void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
const struct ieee80211_ht_operation *ht_oper,
struct cfg80211_chan_def *chandef)
@@ -2373,6 +2405,39 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
cfg80211_chandef_create(chandef, control_chan, channel_type);
}
+void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
+ const struct ieee80211_vht_operation *oper,
+ struct cfg80211_chan_def *chandef)
+{
+ if (!oper)
+ return;
+
+ chandef->chan = control_chan;
+
+ switch (oper->chan_width) {
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_80;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_80P80;
+ break;
+ default:
+ break;
+ }
+
+ chandef->center_freq1 =
+ ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
+ control_chan->band);
+ chandef->center_freq2 =
+ ieee80211_channel_to_frequency(oper->center_freq_seg2_idx,
+ control_chan->band);
+}
+
int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
const struct ieee80211_supported_band *sband,
const u8 *srates, int srates_len, u32 *rates)
@@ -3252,3 +3317,20 @@ u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo)
return buf;
}
+
+void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct txq_info *txqi, int tid)
+{
+ skb_queue_head_init(&txqi->queue);
+ txqi->txq.vif = &sdata->vif;
+
+ if (sta) {
+ txqi->txq.sta = &sta->sta;
+ sta->sta.txq[tid] = &txqi->txq;
+ txqi->txq.ac = ieee802_1d_to_ac[tid & 7];
+ } else {
+ sdata->vif.txq = &txqi->txq;
+ txqi->txq.ac = IEEE80211_AC_BE;
+ }
+}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 85f9596da07b..80694d55db74 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -129,10 +129,6 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
return;
- /* don't support VHT for TDLS peers for now */
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
- return;
-
/*
* A VHT STA must support 40 MHz, but if we verify that here
* then we break a few things - some APs (e.g. Netgear R6300v2
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 75de6fac40d1..9d63d93c836e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -780,9 +780,8 @@ ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key *key = tx->key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- const struct ieee80211_cipher_scheme *cs = key->sta->cipher_scheme;
int hdrlen;
- u8 *pos;
+ u8 *pos, iv_len = key->conf.iv_len;
if (info->control.hw_key &&
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
@@ -790,14 +789,14 @@ ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
return TX_CONTINUE;
}
- if (unlikely(skb_headroom(skb) < cs->hdr_len &&
- pskb_expand_head(skb, cs->hdr_len, 0, GFP_ATOMIC)))
+ if (unlikely(skb_headroom(skb) < iv_len &&
+ pskb_expand_head(skb, iv_len, 0, GFP_ATOMIC)))
return TX_DROP;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- pos = skb_push(skb, cs->hdr_len);
- memmove(pos, pos + cs->hdr_len, hdrlen);
+ pos = skb_push(skb, iv_len);
+ memmove(pos, pos + iv_len, hdrlen);
return TX_CONTINUE;
}
@@ -1217,7 +1216,7 @@ ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx)
if (!info->control.hw_key)
return TX_DROP;
- if (tx->key->sta->cipher_scheme) {
+ if (tx->key->flags & KEY_FLAG_CIPHER_SCHEME) {
res = ieee80211_crypto_cs_encrypt(tx, skb);
if (res != TX_CONTINUE)
return res;
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index 5d9f68c75e5f..70be9c799f8a 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -22,13 +22,14 @@
static struct net_device *
ieee802154_add_iface_deprecated(struct wpan_phy *wpan_phy,
- const char *name, int type)
+ const char *name,
+ unsigned char name_assign_type, int type)
{
struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
struct net_device *dev;
rtnl_lock();
- dev = ieee802154_if_add(local, name, type,
+ dev = ieee802154_if_add(local, name, name_assign_type, type,
cpu_to_le64(0x0000000000000000ULL));
rtnl_unlock();
@@ -45,12 +46,14 @@ static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy,
static int
ieee802154_add_iface(struct wpan_phy *phy, const char *name,
+ unsigned char name_assign_type,
enum nl802154_iftype type, __le64 extended_addr)
{
struct ieee802154_local *local = wpan_phy_priv(phy);
struct net_device *err;
- err = ieee802154_if_add(local, name, type, extended_addr);
+ err = ieee802154_if_add(local, name, name_assign_type, type,
+ extended_addr);
return PTR_ERR_OR_ZERO(err);
}
diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h
index 98180a9fff4a..a0533357b9ea 100644
--- a/net/mac802154/driver-ops.h
+++ b/net/mac802154/driver-ops.h
@@ -1,4 +1,4 @@
-#ifndef __MAC802154_DRVIER_OPS
+#ifndef __MAC802154_DRIVER_OPS
#define __MAC802154_DRIVER_OPS
#include <linux/types.h>
@@ -220,4 +220,4 @@ drv_set_promiscuous_mode(struct ieee802154_local *local, bool on)
return local->ops->set_promiscuous_mode(&local->hw, on);
}
-#endif /* __MAC802154_DRVIER_OPS */
+#endif /* __MAC802154_DRIVER_OPS */
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index bebd70ffc7a3..127ba18386fc 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -182,7 +182,8 @@ void ieee802154_iface_exit(void);
void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata);
struct net_device *
ieee802154_if_add(struct ieee802154_local *local, const char *name,
- enum nl802154_iftype type, __le64 extended_addr);
+ unsigned char name_assign_type, enum nl802154_iftype type,
+ __le64 extended_addr);
void ieee802154_remove_interfaces(struct ieee802154_local *local);
#endif /* __IEEE802154_I_H */
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 6fb6bdf9868c..91b75abbd1a1 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -174,24 +174,16 @@ ieee802154_check_mac_settings(struct ieee802154_local *local,
}
if (local->hw.flags & IEEE802154_HW_AFILT) {
- if (wpan_dev->pan_id != nwpan_dev->pan_id)
- return -EBUSY;
-
- if (wpan_dev->short_addr != nwpan_dev->short_addr)
- return -EBUSY;
-
- if (wpan_dev->extended_addr != nwpan_dev->extended_addr)
+ if (wpan_dev->pan_id != nwpan_dev->pan_id ||
+ wpan_dev->short_addr != nwpan_dev->short_addr ||
+ wpan_dev->extended_addr != nwpan_dev->extended_addr)
return -EBUSY;
}
if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
- if (wpan_dev->min_be != nwpan_dev->min_be)
- return -EBUSY;
-
- if (wpan_dev->max_be != nwpan_dev->max_be)
- return -EBUSY;
-
- if (wpan_dev->csma_retries != nwpan_dev->csma_retries)
+ if (wpan_dev->min_be != nwpan_dev->min_be ||
+ wpan_dev->max_be != nwpan_dev->max_be ||
+ wpan_dev->csma_retries != nwpan_dev->csma_retries)
return -EBUSY;
}
@@ -530,7 +522,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
struct net_device *
ieee802154_if_add(struct ieee802154_local *local, const char *name,
- enum nl802154_iftype type, __le64 extended_addr)
+ unsigned char name_assign_type, enum nl802154_iftype type,
+ __le64 extended_addr)
{
struct net_device *ndev = NULL;
struct ieee802154_sub_if_data *sdata = NULL;
@@ -539,7 +532,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
ASSERT_RTNL();
ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, name,
- NET_NAME_UNKNOWN, ieee802154_if_setup);
+ name_assign_type, ieee802154_if_setup);
if (!ndev)
return ERR_PTR(-ENOMEM);
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index dcf73958133a..5b2be12832e6 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -134,7 +134,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
CRYPTO_ALG_ASYNC);
- if (!key->tfm[i])
+ if (IS_ERR(key->tfm[i]))
goto err_tfm;
if (crypto_aead_setkey(key->tfm[i], template->key,
IEEE802154_LLSEC_KEY_SIZE))
@@ -144,7 +144,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
}
key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
- if (!key->tfm0)
+ if (IS_ERR(key->tfm0))
goto err_tfm;
if (crypto_blkcipher_setkey(key->tfm0, template->key,
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 8500378c8318..08cb32dc8fd3 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -161,18 +161,21 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
rtnl_lock();
- dev = ieee802154_if_add(local, "wpan%d", NL802154_IFTYPE_NODE,
+ dev = ieee802154_if_add(local, "wpan%d", NET_NAME_ENUM,
+ NL802154_IFTYPE_NODE,
cpu_to_le64(0x0000000000000000ULL));
if (IS_ERR(dev)) {
rtnl_unlock();
rc = PTR_ERR(dev);
- goto out_wq;
+ goto out_phy;
}
rtnl_unlock();
return 0;
+out_phy:
+ wpan_phy_unregister(local->phy);
out_wq:
destroy_workqueue(local->workqueue);
out:
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index 5fc979027919..150bf807e572 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -65,8 +65,19 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
{
if (ifs_handling) {
struct ieee802154_local *local = hw_to_local(hw);
+ u8 max_sifs_size;
- if (skb->len > 18)
+ /* If transceiver sets CRC on his own we need to use lifs
+ * threshold len above 16 otherwise 18, because it's not
+ * part of skb->len.
+ */
+ if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM)
+ max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
+ IEEE802154_FCS_LEN;
+ else
+ max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE;
+
+ if (skb->len > max_sifs_size)
hrtimer_start(&local->ifs_timer,
ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC),
HRTIMER_MODE_REL);
diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig
index 37421db88965..17bde799c854 100644
--- a/net/mpls/Kconfig
+++ b/net/mpls/Kconfig
@@ -1,9 +1,30 @@
#
# MPLS configuration
#
+
+menuconfig MPLS
+ bool "MultiProtocol Label Switching"
+ default n
+ ---help---
+ MultiProtocol Label Switching routes packets through logical
+ circuits. Originally conceived as a way of routing packets at
+ hardware speeds (before hardware was capable of routing ipv4 packets),
+ MPLS remains a simple way of making tunnels.
+
+ If you have not heard of MPLS you probably want to say N here.
+
+if MPLS
+
config NET_MPLS_GSO
tristate "MPLS: GSO support"
help
This is helper module to allow segmentation of non-MPLS GSO packets
that have had MPLS stack entries pushed onto them and thus
become MPLS GSO packets.
+
+config MPLS_ROUTING
+ tristate "MPLS: routing support"
+ help
+ Add support for forwarding of mpls packets.
+
+endif # MPLS
diff --git a/net/mpls/Makefile b/net/mpls/Makefile
index 6dec088c2d0f..65bbe68c72e6 100644
--- a/net/mpls/Makefile
+++ b/net/mpls/Makefile
@@ -2,3 +2,6 @@
# Makefile for MPLS.
#
obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
+obj-$(CONFIG_MPLS_ROUTING) += mpls_router.o
+
+mpls_router-y := af_mpls.o
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
new file mode 100644
index 000000000000..7b3f732269e4
--- /dev/null
+++ b/net/mpls/af_mpls.c
@@ -0,0 +1,1142 @@
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/sysctl.h>
+#include <linux/net.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/ipv6.h>
+#include <linux/mpls.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/ip_fib.h>
+#include <net/netevent.h>
+#include <net/netns/generic.h>
+#include "internal.h"
+
+#define LABEL_NOT_SPECIFIED (1<<20)
+#define MAX_NEW_LABELS 2
+
+/* This maximum ha length copied from the definition of struct neighbour */
+#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
+
+struct mpls_route { /* next hop label forwarding entry */
+ struct net_device __rcu *rt_dev;
+ struct rcu_head rt_rcu;
+ u32 rt_label[MAX_NEW_LABELS];
+ u8 rt_protocol; /* routing protocol that set this entry */
+ u8 rt_labels;
+ u8 rt_via_alen;
+ u8 rt_via_table;
+ u8 rt_via[0];
+};
+
+static int zero = 0;
+static int label_limit = (1 << 20) - 1;
+
+static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
+ struct nlmsghdr *nlh, struct net *net, u32 portid,
+ unsigned int nlm_flags);
+
+static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
+{
+ struct mpls_route *rt = NULL;
+
+ if (index < net->mpls.platform_labels) {
+ struct mpls_route __rcu **platform_label =
+ rcu_dereference(net->mpls.platform_label);
+ rt = rcu_dereference(platform_label[index]);
+ }
+ return rt;
+}
+
+static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
+{
+ return rcu_dereference_rtnl(dev->mpls_ptr);
+}
+
+static bool mpls_output_possible(const struct net_device *dev)
+{
+ return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
+}
+
+static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
+{
+ /* The size of the layer 2.5 labels to be added for this route */
+ return rt->rt_labels * sizeof(struct mpls_shim_hdr);
+}
+
+static unsigned int mpls_dev_mtu(const struct net_device *dev)
+{
+ /* The amount of data the layer 2 frame can hold */
+ return dev->mtu;
+}
+
+static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+{
+ if (skb->len <= mtu)
+ return false;
+
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+ return false;
+
+ return true;
+}
+
+static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
+ struct mpls_entry_decoded dec)
+{
+ /* RFC4385 and RFC5586 encode other packets in mpls such that
+ * they don't conflict with the ip version number, making
+ * decoding by examining the ip version correct in everything
+ * except for the strangest cases.
+ *
+ * The strange cases if we choose to support them will require
+ * manual configuration.
+ */
+ struct iphdr *hdr4;
+ bool success = true;
+
+ /* The IPv4 code below accesses through the IPv4 header
+ * checksum, which is 12 bytes into the packet.
+ * The IPv6 code below accesses through the IPv6 hop limit
+ * which is 8 bytes into the packet.
+ *
+ * For all supported cases there should always be at least 12
+ * bytes of packet data present. The IPv4 header is 20 bytes
+ * without options and the IPv6 header is always 40 bytes
+ * long.
+ */
+ if (!pskb_may_pull(skb, 12))
+ return false;
+
+ /* Use ip_hdr to find the ip protocol version */
+ hdr4 = ip_hdr(skb);
+ if (hdr4->version == 4) {
+ skb->protocol = htons(ETH_P_IP);
+ csum_replace2(&hdr4->check,
+ htons(hdr4->ttl << 8),
+ htons(dec.ttl << 8));
+ hdr4->ttl = dec.ttl;
+ }
+ else if (hdr4->version == 6) {
+ struct ipv6hdr *hdr6 = ipv6_hdr(skb);
+ skb->protocol = htons(ETH_P_IPV6);
+ hdr6->hop_limit = dec.ttl;
+ }
+ else
+ /* version 0 and version 1 are used by pseudo wires */
+ success = false;
+ return success;
+}
+
+static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct net *net = dev_net(dev);
+ struct mpls_shim_hdr *hdr;
+ struct mpls_route *rt;
+ struct mpls_entry_decoded dec;
+ struct net_device *out_dev;
+ struct mpls_dev *mdev;
+ unsigned int hh_len;
+ unsigned int new_header_size;
+ unsigned int mtu;
+ int err;
+
+ /* Careful this entire function runs inside of an rcu critical section */
+
+ mdev = mpls_dev_get(dev);
+ if (!mdev || !mdev->input_enabled)
+ goto drop;
+
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ goto drop;
+
+ if (!pskb_may_pull(skb, sizeof(*hdr)))
+ goto drop;
+
+ /* Read and decode the label */
+ hdr = mpls_hdr(skb);
+ dec = mpls_entry_decode(hdr);
+
+ /* Pop the label */
+ skb_pull(skb, sizeof(*hdr));
+ skb_reset_network_header(skb);
+
+ skb_orphan(skb);
+
+ rt = mpls_route_input_rcu(net, dec.label);
+ if (!rt)
+ goto drop;
+
+ /* Find the output device */
+ out_dev = rcu_dereference(rt->rt_dev);
+ if (!mpls_output_possible(out_dev))
+ goto drop;
+
+ if (skb_warn_if_lro(skb))
+ goto drop;
+
+ skb_forward_csum(skb);
+
+ /* Verify ttl is valid */
+ if (dec.ttl <= 1)
+ goto drop;
+ dec.ttl -= 1;
+
+ /* Verify the destination can hold the packet */
+ new_header_size = mpls_rt_header_size(rt);
+ mtu = mpls_dev_mtu(out_dev);
+ if (mpls_pkt_too_big(skb, mtu - new_header_size))
+ goto drop;
+
+ hh_len = LL_RESERVED_SPACE(out_dev);
+ if (!out_dev->header_ops)
+ hh_len = 0;
+
+ /* Ensure there is enough space for the headers in the skb */
+ if (skb_cow(skb, hh_len + new_header_size))
+ goto drop;
+
+ skb->dev = out_dev;
+ skb->protocol = htons(ETH_P_MPLS_UC);
+
+ if (unlikely(!new_header_size && dec.bos)) {
+ /* Penultimate hop popping */
+ if (!mpls_egress(rt, skb, dec))
+ goto drop;
+ } else {
+ bool bos;
+ int i;
+ skb_push(skb, new_header_size);
+ skb_reset_network_header(skb);
+ /* Push the new labels */
+ hdr = mpls_hdr(skb);
+ bos = dec.bos;
+ for (i = rt->rt_labels - 1; i >= 0; i--) {
+ hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
+ bos = false;
+ }
+ }
+
+ err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb);
+ if (err)
+ net_dbg_ratelimited("%s: packet transmission failed: %d\n",
+ __func__, err);
+ return 0;
+
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static struct packet_type mpls_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_MPLS_UC),
+ .func = mpls_forward,
+};
+
+static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
+ [RTA_DST] = { .type = NLA_U32 },
+ [RTA_OIF] = { .type = NLA_U32 },
+};
+
+struct mpls_route_config {
+ u32 rc_protocol;
+ u32 rc_ifindex;
+ u16 rc_via_table;
+ u16 rc_via_alen;
+ u8 rc_via[MAX_VIA_ALEN];
+ u32 rc_label;
+ u32 rc_output_labels;
+ u32 rc_output_label[MAX_NEW_LABELS];
+ u32 rc_nlflags;
+ struct nl_info rc_nlinfo;
+};
+
+static struct mpls_route *mpls_rt_alloc(size_t alen)
+{
+ struct mpls_route *rt;
+
+ rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL);
+ if (rt)
+ rt->rt_via_alen = alen;
+ return rt;
+}
+
+static void mpls_rt_free(struct mpls_route *rt)
+{
+ if (rt)
+ kfree_rcu(rt, rt_rcu);
+}
+
+static void mpls_notify_route(struct net *net, unsigned index,
+ struct mpls_route *old, struct mpls_route *new,
+ const struct nl_info *info)
+{
+ struct nlmsghdr *nlh = info ? info->nlh : NULL;
+ unsigned portid = info ? info->portid : 0;
+ int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
+ struct mpls_route *rt = new ? new : old;
+ unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
+ /* Ignore reserved labels for now */
+ if (rt && (index >= 16))
+ rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
+}
+
+static void mpls_route_update(struct net *net, unsigned index,
+ struct net_device *dev, struct mpls_route *new,
+ const struct nl_info *info)
+{
+ struct mpls_route __rcu **platform_label;
+ struct mpls_route *rt, *old = NULL;
+
+ ASSERT_RTNL();
+
+ platform_label = rtnl_dereference(net->mpls.platform_label);
+ rt = rtnl_dereference(platform_label[index]);
+ if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) {
+ rcu_assign_pointer(platform_label[index], new);
+ old = rt;
+ }
+
+ mpls_notify_route(net, index, old, new, info);
+
+ /* If we removed a route free it now */
+ mpls_rt_free(old);
+}
+
+static unsigned find_free_label(struct net *net)
+{
+ struct mpls_route __rcu **platform_label;
+ size_t platform_labels;
+ unsigned index;
+
+ platform_label = rtnl_dereference(net->mpls.platform_label);
+ platform_labels = net->mpls.platform_labels;
+ for (index = 16; index < platform_labels; index++) {
+ if (!rtnl_dereference(platform_label[index]))
+ return index;
+ }
+ return LABEL_NOT_SPECIFIED;
+}
+
+static int mpls_route_add(struct mpls_route_config *cfg)
+{
+ struct mpls_route __rcu **platform_label;
+ struct net *net = cfg->rc_nlinfo.nl_net;
+ struct net_device *dev = NULL;
+ struct mpls_route *rt, *old;
+ unsigned index;
+ int i;
+ int err = -EINVAL;
+
+ index = cfg->rc_label;
+
+ /* If a label was not specified during insert pick one */
+ if ((index == LABEL_NOT_SPECIFIED) &&
+ (cfg->rc_nlflags & NLM_F_CREATE)) {
+ index = find_free_label(net);
+ }
+
+ /* The first 16 labels are reserved, and may not be set */
+ if (index < 16)
+ goto errout;
+
+ /* The full 20 bit range may not be supported. */
+ if (index >= net->mpls.platform_labels)
+ goto errout;
+
+ /* Ensure only a supported number of labels are present */
+ if (cfg->rc_output_labels > MAX_NEW_LABELS)
+ goto errout;
+
+ err = -ENODEV;
+ dev = dev_get_by_index(net, cfg->rc_ifindex);
+ if (!dev)
+ goto errout;
+
+ /* Ensure this is a supported device */
+ err = -EINVAL;
+ if (!mpls_dev_get(dev))
+ goto errout;
+
+ err = -EINVAL;
+ if ((cfg->rc_via_table == NEIGH_LINK_TABLE) &&
+ (dev->addr_len != cfg->rc_via_alen))
+ goto errout;
+
+ /* Append makes no sense with mpls */
+ err = -EOPNOTSUPP;
+ if (cfg->rc_nlflags & NLM_F_APPEND)
+ goto errout;
+
+ err = -EEXIST;
+ platform_label = rtnl_dereference(net->mpls.platform_label);
+ old = rtnl_dereference(platform_label[index]);
+ if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
+ goto errout;
+
+ err = -EEXIST;
+ if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
+ goto errout;
+
+ err = -ENOENT;
+ if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
+ goto errout;
+
+ err = -ENOMEM;
+ rt = mpls_rt_alloc(cfg->rc_via_alen);
+ if (!rt)
+ goto errout;
+
+ rt->rt_labels = cfg->rc_output_labels;
+ for (i = 0; i < rt->rt_labels; i++)
+ rt->rt_label[i] = cfg->rc_output_label[i];
+ rt->rt_protocol = cfg->rc_protocol;
+ RCU_INIT_POINTER(rt->rt_dev, dev);
+ rt->rt_via_table = cfg->rc_via_table;
+ memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
+
+ mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo);
+
+ dev_put(dev);
+ return 0;
+
+errout:
+ if (dev)
+ dev_put(dev);
+ return err;
+}
+
+static int mpls_route_del(struct mpls_route_config *cfg)
+{
+ struct net *net = cfg->rc_nlinfo.nl_net;
+ unsigned index;
+ int err = -EINVAL;
+
+ index = cfg->rc_label;
+
+ /* The first 16 labels are reserved, and may not be removed */
+ if (index < 16)
+ goto errout;
+
+ /* The full 20 bit range may not be supported */
+ if (index >= net->mpls.platform_labels)
+ goto errout;
+
+ mpls_route_update(net, index, NULL, NULL, &cfg->rc_nlinfo);
+
+ err = 0;
+errout:
+ return err;
+}
+
+#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
+ (&((struct mpls_dev *)0)->field)
+
+static const struct ctl_table mpls_dev_table[] = {
+ {
+ .procname = "input",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
+ },
+ { }
+};
+
+static int mpls_dev_sysctl_register(struct net_device *dev,
+ struct mpls_dev *mdev)
+{
+ char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
+ struct ctl_table *table;
+ int i;
+
+ table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
+ if (!table)
+ goto out;
+
+ /* Table data contains only offsets relative to the base of
+ * the mdev at this point, so make them absolute.
+ */
+ for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++)
+ table[i].data = (char *)mdev + (uintptr_t)table[i].data;
+
+ snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
+
+ mdev->sysctl = register_net_sysctl(dev_net(dev), path, table);
+ if (!mdev->sysctl)
+ goto free;
+
+ return 0;
+
+free:
+ kfree(table);
+out:
+ return -ENOBUFS;
+}
+
+static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev)
+{
+ struct ctl_table *table;
+
+ table = mdev->sysctl->ctl_table_arg;
+ unregister_net_sysctl_table(mdev->sysctl);
+ kfree(table);
+}
+
+static struct mpls_dev *mpls_add_dev(struct net_device *dev)
+{
+ struct mpls_dev *mdev;
+ int err = -ENOMEM;
+
+ ASSERT_RTNL();
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return ERR_PTR(err);
+
+ err = mpls_dev_sysctl_register(dev, mdev);
+ if (err)
+ goto free;
+
+ rcu_assign_pointer(dev->mpls_ptr, mdev);
+
+ return mdev;
+
+free:
+ kfree(mdev);
+ return ERR_PTR(err);
+}
+
+static void mpls_ifdown(struct net_device *dev)
+{
+ struct mpls_route __rcu **platform_label;
+ struct net *net = dev_net(dev);
+ struct mpls_dev *mdev;
+ unsigned index;
+
+ platform_label = rtnl_dereference(net->mpls.platform_label);
+ for (index = 0; index < net->mpls.platform_labels; index++) {
+ struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+ if (!rt)
+ continue;
+ if (rtnl_dereference(rt->rt_dev) != dev)
+ continue;
+ rt->rt_dev = NULL;
+ }
+
+ mdev = mpls_dev_get(dev);
+ if (!mdev)
+ return;
+
+ mpls_dev_sysctl_unregister(mdev);
+
+ RCU_INIT_POINTER(dev->mpls_ptr, NULL);
+
+ kfree(mdev);
+}
+
+static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mpls_dev *mdev;
+
+ switch(event) {
+ case NETDEV_REGISTER:
+ /* For now just support ethernet devices */
+ if ((dev->type == ARPHRD_ETHER) ||
+ (dev->type == ARPHRD_LOOPBACK)) {
+ mdev = mpls_add_dev(dev);
+ if (IS_ERR(mdev))
+ return notifier_from_errno(PTR_ERR(mdev));
+ }
+ break;
+
+ case NETDEV_UNREGISTER:
+ mpls_ifdown(dev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mpls_dev_notifier = {
+ .notifier_call = mpls_dev_notify,
+};
+
+static int nla_put_via(struct sk_buff *skb,
+ u8 table, const void *addr, int alen)
+{
+ static const int table_to_family[NEIGH_NR_TABLES + 1] = {
+ AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
+ };
+ struct nlattr *nla;
+ struct rtvia *via;
+ int family = AF_UNSPEC;
+
+ nla = nla_reserve(skb, RTA_VIA, alen + 2);
+ if (!nla)
+ return -EMSGSIZE;
+
+ if (table <= NEIGH_NR_TABLES)
+ family = table_to_family[table];
+
+ via = nla_data(nla);
+ via->rtvia_family = family;
+ memcpy(via->rtvia_addr, addr, alen);
+ return 0;
+}
+
+int nla_put_labels(struct sk_buff *skb, int attrtype,
+ u8 labels, const u32 label[])
+{
+ struct nlattr *nla;
+ struct mpls_shim_hdr *nla_label;
+ bool bos;
+ int i;
+ nla = nla_reserve(skb, attrtype, labels*4);
+ if (!nla)
+ return -EMSGSIZE;
+
+ nla_label = nla_data(nla);
+ bos = true;
+ for (i = labels - 1; i >= 0; i--) {
+ nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
+ bos = false;
+ }
+
+ return 0;
+}
+
+int nla_get_labels(const struct nlattr *nla,
+ u32 max_labels, u32 *labels, u32 label[])
+{
+ unsigned len = nla_len(nla);
+ unsigned nla_labels;
+ struct mpls_shim_hdr *nla_label;
+ bool bos;
+ int i;
+
+ /* len needs to be an even multiple of 4 (the label size) */
+ if (len & 3)
+ return -EINVAL;
+
+ /* Limit the number of new labels allowed */
+ nla_labels = len/4;
+ if (nla_labels > max_labels)
+ return -EINVAL;
+
+ nla_label = nla_data(nla);
+ bos = true;
+ for (i = nla_labels - 1; i >= 0; i--, bos = false) {
+ struct mpls_entry_decoded dec;
+ dec = mpls_entry_decode(nla_label + i);
+
+ /* Ensure the bottom of stack flag is properly set
+ * and ttl and tc are both clear.
+ */
+ if ((dec.bos != bos) || dec.ttl || dec.tc)
+ return -EINVAL;
+
+ switch (dec.label) {
+ case MPLS_LABEL_IMPLNULL:
+ /* RFC3032: This is a label that an LSR may
+ * assign and distribute, but which never
+ * actually appears in the encapsulation.
+ */
+ return -EINVAL;
+ }
+
+ label[i] = dec.label;
+ }
+ *labels = nla_labels;
+ return 0;
+}
+
+static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct mpls_route_config *cfg)
+{
+ struct rtmsg *rtm;
+ struct nlattr *tb[RTA_MAX+1];
+ int index;
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy);
+ if (err < 0)
+ goto errout;
+
+ err = -EINVAL;
+ rtm = nlmsg_data(nlh);
+ memset(cfg, 0, sizeof(*cfg));
+
+ if (rtm->rtm_family != AF_MPLS)
+ goto errout;
+ if (rtm->rtm_dst_len != 20)
+ goto errout;
+ if (rtm->rtm_src_len != 0)
+ goto errout;
+ if (rtm->rtm_tos != 0)
+ goto errout;
+ if (rtm->rtm_table != RT_TABLE_MAIN)
+ goto errout;
+ /* Any value is acceptable for rtm_protocol */
+
+ /* As mpls uses destination specific addresses
+ * (or source specific address in the case of multicast)
+ * all addresses have universal scope.
+ */
+ if (rtm->rtm_scope != RT_SCOPE_UNIVERSE)
+ goto errout;
+ if (rtm->rtm_type != RTN_UNICAST)
+ goto errout;
+ if (rtm->rtm_flags != 0)
+ goto errout;
+
+ cfg->rc_label = LABEL_NOT_SPECIFIED;
+ cfg->rc_protocol = rtm->rtm_protocol;
+ cfg->rc_nlflags = nlh->nlmsg_flags;
+ cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
+ cfg->rc_nlinfo.nlh = nlh;
+ cfg->rc_nlinfo.nl_net = sock_net(skb->sk);
+
+ for (index = 0; index <= RTA_MAX; index++) {
+ struct nlattr *nla = tb[index];
+ if (!nla)
+ continue;
+
+ switch(index) {
+ case RTA_OIF:
+ cfg->rc_ifindex = nla_get_u32(nla);
+ break;
+ case RTA_NEWDST:
+ if (nla_get_labels(nla, MAX_NEW_LABELS,
+ &cfg->rc_output_labels,
+ cfg->rc_output_label))
+ goto errout;
+ break;
+ case RTA_DST:
+ {
+ u32 label_count;
+ if (nla_get_labels(nla, 1, &label_count,
+ &cfg->rc_label))
+ goto errout;
+
+ /* The first 16 labels are reserved, and may not be set */
+ if (cfg->rc_label < 16)
+ goto errout;
+
+ break;
+ }
+ case RTA_VIA:
+ {
+ struct rtvia *via = nla_data(nla);
+ if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+ goto errout;
+ cfg->rc_via_alen = nla_len(nla) -
+ offsetof(struct rtvia, rtvia_addr);
+ if (cfg->rc_via_alen > MAX_VIA_ALEN)
+ goto errout;
+
+ /* Validate the address family */
+ switch(via->rtvia_family) {
+ case AF_PACKET:
+ cfg->rc_via_table = NEIGH_LINK_TABLE;
+ break;
+ case AF_INET:
+ cfg->rc_via_table = NEIGH_ARP_TABLE;
+ if (cfg->rc_via_alen != 4)
+ goto errout;
+ break;
+ case AF_INET6:
+ cfg->rc_via_table = NEIGH_ND_TABLE;
+ if (cfg->rc_via_alen != 16)
+ goto errout;
+ break;
+ default:
+ /* Unsupported address family */
+ goto errout;
+ }
+
+ memcpy(cfg->rc_via, via->rtvia_addr, cfg->rc_via_alen);
+ break;
+ }
+ default:
+ /* Unsupported attribute */
+ goto errout;
+ }
+ }
+
+ err = 0;
+errout:
+ return err;
+}
+
+static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ struct mpls_route_config cfg;
+ int err;
+
+ err = rtm_to_route_config(skb, nlh, &cfg);
+ if (err < 0)
+ return err;
+
+ return mpls_route_del(&cfg);
+}
+
+
+static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ struct mpls_route_config cfg;
+ int err;
+
+ err = rtm_to_route_config(skb, nlh, &cfg);
+ if (err < 0)
+ return err;
+
+ return mpls_route_add(&cfg);
+}
+
+static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
+ u32 label, struct mpls_route *rt, int flags)
+{
+ struct net_device *dev;
+ struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
+
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ rtm = nlmsg_data(nlh);
+ rtm->rtm_family = AF_MPLS;
+ rtm->rtm_dst_len = 20;
+ rtm->rtm_src_len = 0;
+ rtm->rtm_tos = 0;
+ rtm->rtm_table = RT_TABLE_MAIN;
+ rtm->rtm_protocol = rt->rt_protocol;
+ rtm->rtm_scope = RT_SCOPE_UNIVERSE;
+ rtm->rtm_type = RTN_UNICAST;
+ rtm->rtm_flags = 0;
+
+ if (rt->rt_labels &&
+ nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label))
+ goto nla_put_failure;
+ if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen))
+ goto nla_put_failure;
+ dev = rtnl_dereference(rt->rt_dev);
+ if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
+ goto nla_put_failure;
+ if (nla_put_labels(skb, RTA_DST, 1, &label))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct mpls_route __rcu **platform_label;
+ size_t platform_labels;
+ unsigned int index;
+
+ ASSERT_RTNL();
+
+ index = cb->args[0];
+ if (index < 16)
+ index = 16;
+
+ platform_label = rtnl_dereference(net->mpls.platform_label);
+ platform_labels = net->mpls.platform_labels;
+ for (; index < platform_labels; index++) {
+ struct mpls_route *rt;
+ rt = rtnl_dereference(platform_label[index]);
+ if (!rt)
+ continue;
+
+ if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+ index, rt, NLM_F_MULTI) < 0)
+ break;
+ }
+ cb->args[0] = index;
+
+ return skb->len;
+}
+
+static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
+{
+ size_t payload =
+ NLMSG_ALIGN(sizeof(struct rtmsg))
+ + nla_total_size(2 + rt->rt_via_alen) /* RTA_VIA */
+ + nla_total_size(4); /* RTA_DST */
+ if (rt->rt_labels) /* RTA_NEWDST */
+ payload += nla_total_size(rt->rt_labels * 4);
+ if (rt->rt_dev) /* RTA_OIF */
+ payload += nla_total_size(4);
+ return payload;
+}
+
+static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
+ struct nlmsghdr *nlh, struct net *net, u32 portid,
+ unsigned int nlm_flags)
+{
+ struct sk_buff *skb;
+ u32 seq = nlh ? nlh->nlmsg_seq : 0;
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
+ if (skb == NULL)
+ goto errout;
+
+ err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+ rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
+
+ return;
+errout:
+ if (err < 0)
+ rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
+}
+
+static int resize_platform_label_table(struct net *net, size_t limit)
+{
+ size_t size = sizeof(struct mpls_route *) * limit;
+ size_t old_limit;
+ size_t cp_size;
+ struct mpls_route __rcu **labels = NULL, **old;
+ struct mpls_route *rt0 = NULL, *rt2 = NULL;
+ unsigned index;
+
+ if (size) {
+ labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!labels)
+ labels = vzalloc(size);
+
+ if (!labels)
+ goto nolabels;
+ }
+
+ /* In case the predefined labels need to be populated */
+ if (limit > MPLS_LABEL_IPV4NULL) {
+ struct net_device *lo = net->loopback_dev;
+ rt0 = mpls_rt_alloc(lo->addr_len);
+ if (!rt0)
+ goto nort0;
+ RCU_INIT_POINTER(rt0->rt_dev, lo);
+ rt0->rt_protocol = RTPROT_KERNEL;
+ rt0->rt_via_table = NEIGH_LINK_TABLE;
+ memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
+ }
+ if (limit > MPLS_LABEL_IPV6NULL) {
+ struct net_device *lo = net->loopback_dev;
+ rt2 = mpls_rt_alloc(lo->addr_len);
+ if (!rt2)
+ goto nort2;
+ RCU_INIT_POINTER(rt2->rt_dev, lo);
+ rt2->rt_protocol = RTPROT_KERNEL;
+ rt2->rt_via_table = NEIGH_LINK_TABLE;
+ memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
+ }
+
+ rtnl_lock();
+ /* Remember the original table */
+ old = rtnl_dereference(net->mpls.platform_label);
+ old_limit = net->mpls.platform_labels;
+
+ /* Free any labels beyond the new table */
+ for (index = limit; index < old_limit; index++)
+ mpls_route_update(net, index, NULL, NULL, NULL);
+
+ /* Copy over the old labels */
+ cp_size = size;
+ if (old_limit < limit)
+ cp_size = old_limit * sizeof(struct mpls_route *);
+
+ memcpy(labels, old, cp_size);
+
+ /* If needed set the predefined labels */
+ if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
+ (limit > MPLS_LABEL_IPV6NULL)) {
+ RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
+ rt2 = NULL;
+ }
+
+ if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
+ (limit > MPLS_LABEL_IPV4NULL)) {
+ RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
+ rt0 = NULL;
+ }
+
+ /* Update the global pointers */
+ net->mpls.platform_labels = limit;
+ rcu_assign_pointer(net->mpls.platform_label, labels);
+
+ rtnl_unlock();
+
+ mpls_rt_free(rt2);
+ mpls_rt_free(rt0);
+
+ if (old) {
+ synchronize_rcu();
+ kvfree(old);
+ }
+ return 0;
+
+nort2:
+ mpls_rt_free(rt0);
+nort0:
+ kvfree(labels);
+nolabels:
+ return -ENOMEM;
+}
+
+static int mpls_platform_labels(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = table->data;
+ int platform_labels = net->mpls.platform_labels;
+ int ret;
+ struct ctl_table tmp = {
+ .procname = table->procname,
+ .data = &platform_labels,
+ .maxlen = sizeof(int),
+ .mode = table->mode,
+ .extra1 = &zero,
+ .extra2 = &label_limit,
+ };
+
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+
+ if (write && ret == 0)
+ ret = resize_platform_label_table(net, platform_labels);
+
+ return ret;
+}
+
+static const struct ctl_table mpls_table[] = {
+ {
+ .procname = "platform_labels",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = mpls_platform_labels,
+ },
+ { }
+};
+
+static int mpls_net_init(struct net *net)
+{
+ struct ctl_table *table;
+
+ net->mpls.platform_labels = 0;
+ net->mpls.platform_label = NULL;
+
+ table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
+ if (table == NULL)
+ return -ENOMEM;
+
+ table[0].data = net;
+ net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
+ if (net->mpls.ctl == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mpls_net_exit(struct net *net)
+{
+ struct mpls_route __rcu **platform_label;
+ size_t platform_labels;
+ struct ctl_table *table;
+ unsigned int index;
+
+ table = net->mpls.ctl->ctl_table_arg;
+ unregister_net_sysctl_table(net->mpls.ctl);
+ kfree(table);
+
+ /* An rcu grace period has passed since there was a device in
+ * the network namespace (and thus the last in flight packet)
+ * left this network namespace. This is because
+ * unregister_netdevice_many and netdev_run_todo has completed
+ * for each network device that was in this network namespace.
+ *
+ * As such no additional rcu synchronization is necessary when
+ * freeing the platform_label table.
+ */
+ rtnl_lock();
+ platform_label = rtnl_dereference(net->mpls.platform_label);
+ platform_labels = net->mpls.platform_labels;
+ for (index = 0; index < platform_labels; index++) {
+ struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+ RCU_INIT_POINTER(platform_label[index], NULL);
+ mpls_rt_free(rt);
+ }
+ rtnl_unlock();
+
+ kvfree(platform_label);
+}
+
+static struct pernet_operations mpls_net_ops = {
+ .init = mpls_net_init,
+ .exit = mpls_net_exit,
+};
+
+static int __init mpls_init(void)
+{
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
+
+ err = register_pernet_subsys(&mpls_net_ops);
+ if (err)
+ goto out;
+
+ err = register_netdevice_notifier(&mpls_dev_notifier);
+ if (err)
+ goto out_unregister_pernet;
+
+ dev_add_pack(&mpls_packet_type);
+
+ rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL);
+ rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL);
+ rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL);
+ err = 0;
+out:
+ return err;
+
+out_unregister_pernet:
+ unregister_pernet_subsys(&mpls_net_ops);
+ goto out;
+}
+module_init(mpls_init);
+
+static void __exit mpls_exit(void)
+{
+ rtnl_unregister_all(PF_MPLS);
+ dev_remove_pack(&mpls_packet_type);
+ unregister_netdevice_notifier(&mpls_dev_notifier);
+ unregister_pernet_subsys(&mpls_net_ops);
+}
+module_exit(mpls_exit);
+
+MODULE_DESCRIPTION("MultiProtocol Label Switching");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_NETPROTO(PF_MPLS);
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
new file mode 100644
index 000000000000..b064c345042c
--- /dev/null
+++ b/net/mpls/internal.h
@@ -0,0 +1,55 @@
+#ifndef MPLS_INTERNAL_H
+#define MPLS_INTERNAL_H
+
+struct mpls_shim_hdr {
+ __be32 label_stack_entry;
+};
+
+struct mpls_entry_decoded {
+ u32 label;
+ u8 ttl;
+ u8 tc;
+ u8 bos;
+};
+
+struct mpls_dev {
+ int input_enabled;
+
+ struct ctl_table_header *sysctl;
+};
+
+struct sk_buff;
+
+static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
+{
+ return (struct mpls_shim_hdr *)skb_network_header(skb);
+}
+
+static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos)
+{
+ struct mpls_shim_hdr result;
+ result.label_stack_entry =
+ cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) |
+ (tc << MPLS_LS_TC_SHIFT) |
+ (bos ? (1 << MPLS_LS_S_SHIFT) : 0) |
+ (ttl << MPLS_LS_TTL_SHIFT));
+ return result;
+}
+
+static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *hdr)
+{
+ struct mpls_entry_decoded result;
+ unsigned entry = be32_to_cpu(hdr->label_stack_entry);
+
+ result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
+ result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
+ result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
+ result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
+
+ return result;
+}
+
+int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels, const u32 label[]);
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]);
+
+#endif /* MPLS_INTERNAL_H */
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index b02660fa9eb0..f70e34a68f70 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -438,8 +438,10 @@ config NF_TABLES
To compile it as a module, choose M here.
+if NF_TABLES
+
config NF_TABLES_INET
- depends on NF_TABLES && IPV6
+ depends on IPV6
select NF_TABLES_IPV4
select NF_TABLES_IPV6
tristate "Netfilter nf_tables mixed IPv4/IPv6 tables support"
@@ -447,21 +449,18 @@ config NF_TABLES_INET
This option enables support for a mixed IPv4/IPv6 "inet" table.
config NFT_EXTHDR
- depends on NF_TABLES
tristate "Netfilter nf_tables IPv6 exthdr module"
help
This option adds the "exthdr" expression that you can use to match
IPv6 extension headers.
config NFT_META
- depends on NF_TABLES
tristate "Netfilter nf_tables meta module"
help
This option adds the "meta" expression that you can use to match and
to set packet metainformation such as the packet mark.
config NFT_CT
- depends on NF_TABLES
depends on NF_CONNTRACK
tristate "Netfilter nf_tables conntrack module"
help
@@ -469,42 +468,36 @@ config NFT_CT
connection tracking information such as the flow state.
config NFT_RBTREE
- depends on NF_TABLES
tristate "Netfilter nf_tables rbtree set module"
help
This option adds the "rbtree" set type (Red Black tree) that is used
to build interval-based sets.
config NFT_HASH
- depends on NF_TABLES
tristate "Netfilter nf_tables hash set module"
help
This option adds the "hash" set type that is used to build one-way
mappings between matchings and actions.
config NFT_COUNTER
- depends on NF_TABLES
tristate "Netfilter nf_tables counter module"
help
This option adds the "counter" expression that you can use to
include packet and byte counters in a rule.
config NFT_LOG
- depends on NF_TABLES
tristate "Netfilter nf_tables log module"
help
This option adds the "log" expression that you can use to log
packets matching some criteria.
config NFT_LIMIT
- depends on NF_TABLES
tristate "Netfilter nf_tables limit module"
help
This option adds the "limit" expression that you can use to
ratelimit rule matchings.
config NFT_MASQ
- depends on NF_TABLES
depends on NF_CONNTRACK
depends on NF_NAT
tristate "Netfilter nf_tables masquerade support"
@@ -513,7 +506,6 @@ config NFT_MASQ
to perform NAT in the masquerade flavour.
config NFT_REDIR
- depends on NF_TABLES
depends on NF_CONNTRACK
depends on NF_NAT
tristate "Netfilter nf_tables redirect support"
@@ -522,7 +514,6 @@ config NFT_REDIR
to perform NAT in the redirect flavour.
config NFT_NAT
- depends on NF_TABLES
depends on NF_CONNTRACK
select NF_NAT
tristate "Netfilter nf_tables nat module"
@@ -531,8 +522,6 @@ config NFT_NAT
typical Network Address Translation (NAT) packet transformations.
config NFT_QUEUE
- depends on NF_TABLES
- depends on NETFILTER_XTABLES
depends on NETFILTER_NETLINK_QUEUE
tristate "Netfilter nf_tables queue module"
help
@@ -540,7 +529,6 @@ config NFT_QUEUE
infrastructure (also known as NFQUEUE) from nftables.
config NFT_REJECT
- depends on NF_TABLES
default m if NETFILTER_ADVANCED=n
tristate "Netfilter nf_tables reject support"
help
@@ -554,7 +542,6 @@ config NFT_REJECT_INET
tristate
config NFT_COMPAT
- depends on NF_TABLES
depends on NETFILTER_XTABLES
tristate "Netfilter x_tables over nf_tables module"
help
@@ -562,6 +549,8 @@ config NFT_COMPAT
x_tables match/target extensions over the nf_tables
framework.
+endif # NF_TABLES
+
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n
@@ -951,7 +940,7 @@ comment "Xtables matches"
config NETFILTER_XT_MATCH_ADDRTYPE
tristate '"addrtype" address type match support'
- depends on NETFILTER_ADVANCED
+ default m if NETFILTER_ADVANCED=n
---help---
This option allows you to match what routing thinks of an address,
eg. UNICAST, LOCAL, BROADCAST, ...
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 89f73a9e9874..a87d8b8ec730 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -70,7 +70,7 @@ obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
# nf_tables
nf_tables-objs += nf_tables_core.o nf_tables_api.o
-nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o
+nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o nft_dynset.o
nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
obj-$(CONFIG_NF_TABLES) += nf_tables.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index fea9ef566427..e6163017c42d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -120,12 +120,8 @@ EXPORT_SYMBOL(nf_unregister_hooks);
unsigned int nf_iterate(struct list_head *head,
struct sk_buff *skb,
- unsigned int hook,
- const struct net_device *indev,
- const struct net_device *outdev,
- struct nf_hook_ops **elemp,
- int (*okfn)(struct sk_buff *),
- int hook_thresh)
+ struct nf_hook_state *state,
+ struct nf_hook_ops **elemp)
{
unsigned int verdict;
@@ -134,19 +130,19 @@ unsigned int nf_iterate(struct list_head *head,
* function because of risk of continuing from deleted element.
*/
list_for_each_entry_continue_rcu((*elemp), head, list) {
- if (hook_thresh > (*elemp)->priority)
+ if (state->thresh > (*elemp)->priority)
continue;
/* Optimization: we don't need to hold module
reference here, since function can't sleep. --RR */
repeat:
- verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn);
+ verdict = (*elemp)->hook(*elemp, skb, state);
if (verdict != NF_ACCEPT) {
#ifdef CONFIG_NETFILTER_DEBUG
if (unlikely((verdict & NF_VERDICT_MASK)
> NF_MAX_VERDICT)) {
NFDEBUG("Evil return from %p(%u).\n",
- (*elemp)->hook, hook);
+ (*elemp)->hook, state->hook);
continue;
}
#endif
@@ -161,11 +157,7 @@ repeat:
/* Returns 1 if okfn() needs to be executed by the caller,
* -EPERM for NF_DROP, 0 otherwise. */
-int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev,
- struct net_device *outdev,
- int (*okfn)(struct sk_buff *),
- int hook_thresh)
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
{
struct nf_hook_ops *elem;
unsigned int verdict;
@@ -174,10 +166,11 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
/* We may already have this, but read-locks nest anyway */
rcu_read_lock();
- elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list);
+ elem = list_entry_rcu(&nf_hooks[state->pf][state->hook],
+ struct nf_hook_ops, list);
next_hook:
- verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev,
- outdev, &elem, okfn, hook_thresh);
+ verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state,
+ &elem);
if (verdict == NF_ACCEPT || verdict == NF_STOP) {
ret = 1;
} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
@@ -186,8 +179,8 @@ next_hook:
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
- verdict >> NF_VERDICT_QBITS);
+ int err = nf_queue(skb, elem, state,
+ verdict >> NF_VERDICT_QBITS);
if (err < 0) {
if (err == -ECANCELED)
goto next_hook;
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 758b002130d9..380ef5148ea1 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -19,6 +19,7 @@
#include <net/netlink.h>
#include <linux/netfilter.h>
+#include <linux/netfilter_bridge.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
@@ -211,6 +212,22 @@ hash_netiface4_data_next(struct hash_netiface4_elem *next,
#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
#include "ip_set_hash_gen.h"
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+static const char *get_physindev_name(const struct sk_buff *skb)
+{
+ struct net_device *dev = nf_bridge_get_physindev(skb);
+
+ return dev ? dev->name : NULL;
+}
+
+static const char *get_phyoutdev_name(const struct sk_buff *skb)
+{
+ struct net_device *dev = nf_bridge_get_physoutdev(skb);
+
+ return dev ? dev->name : NULL;
+}
+#endif
+
static int
hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
@@ -234,16 +251,15 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
e.ip &= ip_set_netmask(e.cidr);
#define IFACE(dir) (par->dir ? par->dir->name : NULL)
-#define PHYSDEV(dir) (nf_bridge->dir ? nf_bridge->dir->name : NULL)
#define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC)
if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+ e.iface = SRCDIR ? get_physindev_name(skb) :
+ get_phyoutdev_name(skb);
- if (!nf_bridge)
+ if (!e.iface)
return -EINVAL;
- e.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
e.physdev = 1;
#else
e.iface = NULL;
@@ -476,11 +492,11 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
-
- if (!nf_bridge)
+ e.iface = SRCDIR ? get_physindev_name(skb) :
+ get_phyoutdev_name(skb);
+ if (!e.iface)
return -EINVAL;
- e.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
+
e.physdev = 1;
#else
e.iface = NULL;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b87ca32efa0b..5d2b806a862e 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -119,24 +119,24 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
struct ip_vs_service *svc;
s = this_cpu_ptr(dest->stats.cpustats);
- s->ustats.inpkts++;
u64_stats_update_begin(&s->syncp);
- s->ustats.inbytes += skb->len;
+ s->cnt.inpkts++;
+ s->cnt.inbytes += skb->len;
u64_stats_update_end(&s->syncp);
rcu_read_lock();
svc = rcu_dereference(dest->svc);
s = this_cpu_ptr(svc->stats.cpustats);
- s->ustats.inpkts++;
u64_stats_update_begin(&s->syncp);
- s->ustats.inbytes += skb->len;
+ s->cnt.inpkts++;
+ s->cnt.inbytes += skb->len;
u64_stats_update_end(&s->syncp);
rcu_read_unlock();
s = this_cpu_ptr(ipvs->tot_stats.cpustats);
- s->ustats.inpkts++;
u64_stats_update_begin(&s->syncp);
- s->ustats.inbytes += skb->len;
+ s->cnt.inpkts++;
+ s->cnt.inbytes += skb->len;
u64_stats_update_end(&s->syncp);
}
}
@@ -153,24 +153,24 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
struct ip_vs_service *svc;
s = this_cpu_ptr(dest->stats.cpustats);
- s->ustats.outpkts++;
u64_stats_update_begin(&s->syncp);
- s->ustats.outbytes += skb->len;
+ s->cnt.outpkts++;
+ s->cnt.outbytes += skb->len;
u64_stats_update_end(&s->syncp);
rcu_read_lock();
svc = rcu_dereference(dest->svc);
s = this_cpu_ptr(svc->stats.cpustats);
- s->ustats.outpkts++;
u64_stats_update_begin(&s->syncp);
- s->ustats.outbytes += skb->len;
+ s->cnt.outpkts++;
+ s->cnt.outbytes += skb->len;
u64_stats_update_end(&s->syncp);
rcu_read_unlock();
s = this_cpu_ptr(ipvs->tot_stats.cpustats);
- s->ustats.outpkts++;
u64_stats_update_begin(&s->syncp);
- s->ustats.outbytes += skb->len;
+ s->cnt.outpkts++;
+ s->cnt.outbytes += skb->len;
u64_stats_update_end(&s->syncp);
}
}
@@ -183,13 +183,19 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
struct ip_vs_cpu_stats *s;
s = this_cpu_ptr(cp->dest->stats.cpustats);
- s->ustats.conns++;
+ u64_stats_update_begin(&s->syncp);
+ s->cnt.conns++;
+ u64_stats_update_end(&s->syncp);
s = this_cpu_ptr(svc->stats.cpustats);
- s->ustats.conns++;
+ u64_stats_update_begin(&s->syncp);
+ s->cnt.conns++;
+ u64_stats_update_end(&s->syncp);
s = this_cpu_ptr(ipvs->tot_stats.cpustats);
- s->ustats.conns++;
+ u64_stats_update_begin(&s->syncp);
+ s->cnt.conns++;
+ u64_stats_update_end(&s->syncp);
}
@@ -1046,6 +1052,26 @@ static inline bool is_new_conn(const struct sk_buff *skb,
}
}
+static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
+ int conn_reuse_mode)
+{
+ /* Controlled (FTP DATA or persistence)? */
+ if (cp->control)
+ return false;
+
+ switch (cp->protocol) {
+ case IPPROTO_TCP:
+ return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
+ ((conn_reuse_mode & 2) &&
+ (cp->state == IP_VS_TCP_S_FIN_WAIT) &&
+ (cp->flags & IP_VS_CONN_F_NOOUTPUT));
+ case IPPROTO_SCTP:
+ return cp->state == IP_VS_SCTP_S_CLOSED;
+ default:
+ return false;
+ }
+}
+
/* Handle response packets: rewrite addresses and send away...
*/
static unsigned int
@@ -1246,8 +1272,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
*/
static unsigned int
ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
return ip_vs_out(ops->hooknum, skb, AF_INET);
}
@@ -1258,8 +1283,7 @@ ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
*/
static unsigned int
ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
return ip_vs_out(ops->hooknum, skb, AF_INET);
}
@@ -1273,8 +1297,7 @@ ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
*/
static unsigned int
ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
return ip_vs_out(ops->hooknum, skb, AF_INET6);
}
@@ -1285,8 +1308,7 @@ ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
*/
static unsigned int
ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
return ip_vs_out(ops->hooknum, skb, AF_INET6);
}
@@ -1585,6 +1607,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
struct ip_vs_conn *cp;
int ret, pkts;
struct netns_ipvs *ipvs;
+ int conn_reuse_mode;
/* Already marked as IPVS request or reply? */
if (skb->ipvs_property)
@@ -1653,10 +1676,14 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
*/
cp = pp->conn_in_get(af, skb, &iph, 0);
- if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest &&
- unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs &&
- is_new_conn(skb, &iph)) {
- ip_vs_conn_expire_now(cp);
+ conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
+ if (conn_reuse_mode && !iph.fragoffs &&
+ is_new_conn(skb, &iph) && cp &&
+ ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+ unlikely(!atomic_read(&cp->dest->weight))) ||
+ unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
+ if (!atomic_read(&cp->n_control))
+ ip_vs_conn_expire_now(cp);
__ip_vs_conn_put(cp);
cp = NULL;
}
@@ -1738,9 +1765,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
*/
static unsigned int
ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
return ip_vs_in(ops->hooknum, skb, AF_INET);
}
@@ -1751,8 +1776,7 @@ ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
*/
static unsigned int
ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
return ip_vs_in(ops->hooknum, skb, AF_INET);
}
@@ -1765,9 +1789,7 @@ ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
*/
static unsigned int
ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
return ip_vs_in(ops->hooknum, skb, AF_INET6);
}
@@ -1778,8 +1800,7 @@ ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
*/
static unsigned int
ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
return ip_vs_in(ops->hooknum, skb, AF_INET6);
}
@@ -1798,8 +1819,7 @@ ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
*/
static unsigned int
ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
int r;
struct net *net;
@@ -1820,8 +1840,7 @@ ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
#ifdef CONFIG_IP_VS_IPV6
static unsigned int
ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
int r;
struct net *net;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index ed99448671c3..49532672f66d 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -729,9 +729,9 @@ static void ip_vs_trash_cleanup(struct net *net)
}
static void
-ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
+ip_vs_copy_stats(struct ip_vs_kstats *dst, struct ip_vs_stats *src)
{
-#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->ustats.c - src->ustats0.c
+#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->kstats.c - src->kstats0.c
spin_lock_bh(&src->lock);
@@ -747,13 +747,28 @@ ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
}
static void
+ip_vs_export_stats_user(struct ip_vs_stats_user *dst, struct ip_vs_kstats *src)
+{
+ dst->conns = (u32)src->conns;
+ dst->inpkts = (u32)src->inpkts;
+ dst->outpkts = (u32)src->outpkts;
+ dst->inbytes = src->inbytes;
+ dst->outbytes = src->outbytes;
+ dst->cps = (u32)src->cps;
+ dst->inpps = (u32)src->inpps;
+ dst->outpps = (u32)src->outpps;
+ dst->inbps = (u32)src->inbps;
+ dst->outbps = (u32)src->outbps;
+}
+
+static void
ip_vs_zero_stats(struct ip_vs_stats *stats)
{
spin_lock_bh(&stats->lock);
/* get current counters as zero point, rates are zeroed */
-#define IP_VS_ZERO_STATS_COUNTER(c) stats->ustats0.c = stats->ustats.c
+#define IP_VS_ZERO_STATS_COUNTER(c) stats->kstats0.c = stats->kstats.c
IP_VS_ZERO_STATS_COUNTER(conns);
IP_VS_ZERO_STATS_COUNTER(inpkts);
@@ -1808,6 +1823,12 @@ static struct ctl_table vs_vars[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "conn_reuse_mode",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#ifdef CONFIG_IP_VS_DEBUG
{
.procname = "debug_level",
@@ -2044,7 +2065,7 @@ static const struct file_operations ip_vs_info_fops = {
static int ip_vs_stats_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_single_net(seq);
- struct ip_vs_stats_user show;
+ struct ip_vs_kstats show;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
@@ -2053,17 +2074,22 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
" Conns Packets Packets Bytes Bytes\n");
ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats);
- seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", show.conns,
- show.inpkts, show.outpkts,
- (unsigned long long) show.inbytes,
- (unsigned long long) show.outbytes);
-
-/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+ seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n\n",
+ (unsigned long long)show.conns,
+ (unsigned long long)show.inpkts,
+ (unsigned long long)show.outpkts,
+ (unsigned long long)show.inbytes,
+ (unsigned long long)show.outbytes);
+
+/* 01234567 01234567 01234567 0123456701234567 0123456701234567*/
seq_puts(seq,
- " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
- seq_printf(seq, "%8X %8X %8X %16X %16X\n",
- show.cps, show.inpps, show.outpps,
- show.inbps, show.outbps);
+ " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
+ seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n",
+ (unsigned long long)show.cps,
+ (unsigned long long)show.inpps,
+ (unsigned long long)show.outpps,
+ (unsigned long long)show.inbps,
+ (unsigned long long)show.outbps);
return 0;
}
@@ -2086,7 +2112,7 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
struct net *net = seq_file_single_net(seq);
struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats;
struct ip_vs_cpu_stats __percpu *cpustats = tot_stats->cpustats;
- struct ip_vs_stats_user rates;
+ struct ip_vs_kstats kstats;
int i;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
@@ -2098,41 +2124,41 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
for_each_possible_cpu(i) {
struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i);
unsigned int start;
- __u64 inbytes, outbytes;
+ u64 conns, inpkts, outpkts, inbytes, outbytes;
do {
start = u64_stats_fetch_begin_irq(&u->syncp);
- inbytes = u->ustats.inbytes;
- outbytes = u->ustats.outbytes;
+ conns = u->cnt.conns;
+ inpkts = u->cnt.inpkts;
+ outpkts = u->cnt.outpkts;
+ inbytes = u->cnt.inbytes;
+ outbytes = u->cnt.outbytes;
} while (u64_stats_fetch_retry_irq(&u->syncp, start));
- seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
- i, u->ustats.conns, u->ustats.inpkts,
- u->ustats.outpkts, (__u64)inbytes,
- (__u64)outbytes);
+ seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
+ i, (u64)conns, (u64)inpkts,
+ (u64)outpkts, (u64)inbytes,
+ (u64)outbytes);
}
- spin_lock_bh(&tot_stats->lock);
-
- seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n",
- tot_stats->ustats.conns, tot_stats->ustats.inpkts,
- tot_stats->ustats.outpkts,
- (unsigned long long) tot_stats->ustats.inbytes,
- (unsigned long long) tot_stats->ustats.outbytes);
-
- ip_vs_read_estimator(&rates, tot_stats);
+ ip_vs_copy_stats(&kstats, tot_stats);
- spin_unlock_bh(&tot_stats->lock);
+ seq_printf(seq, " ~ %8LX %8LX %8LX %16LX %16LX\n\n",
+ (unsigned long long)kstats.conns,
+ (unsigned long long)kstats.inpkts,
+ (unsigned long long)kstats.outpkts,
+ (unsigned long long)kstats.inbytes,
+ (unsigned long long)kstats.outbytes);
-/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+/* ... 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
- " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
- seq_printf(seq, " %8X %8X %8X %16X %16X\n",
- rates.cps,
- rates.inpps,
- rates.outpps,
- rates.inbps,
- rates.outbps);
+ " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
+ seq_printf(seq, " %8LX %8LX %8LX %16LX %16LX\n",
+ kstats.cps,
+ kstats.inpps,
+ kstats.outpps,
+ kstats.inbps,
+ kstats.outbps);
return 0;
}
@@ -2400,6 +2426,7 @@ static void
ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
{
struct ip_vs_scheduler *sched;
+ struct ip_vs_kstats kstats;
sched = rcu_dereference_protected(src->scheduler, 1);
dst->protocol = src->protocol;
@@ -2411,7 +2438,8 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
dst->timeout = src->timeout / HZ;
dst->netmask = src->netmask;
dst->num_dests = src->num_dests;
- ip_vs_copy_stats(&dst->stats, &src->stats);
+ ip_vs_copy_stats(&kstats, &src->stats);
+ ip_vs_export_stats_user(&dst->stats, &kstats);
}
static inline int
@@ -2485,6 +2513,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
int count = 0;
struct ip_vs_dest *dest;
struct ip_vs_dest_entry entry;
+ struct ip_vs_kstats kstats;
memset(&entry, 0, sizeof(entry));
list_for_each_entry(dest, &svc->destinations, n_list) {
@@ -2506,7 +2535,8 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
entry.activeconns = atomic_read(&dest->activeconns);
entry.inactconns = atomic_read(&dest->inactconns);
entry.persistconns = atomic_read(&dest->persistconns);
- ip_vs_copy_stats(&entry.stats, &dest->stats);
+ ip_vs_copy_stats(&kstats, &dest->stats);
+ ip_vs_export_stats_user(&entry.stats, &kstats);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ret = -EFAULT;
@@ -2798,25 +2828,51 @@ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
};
static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
- struct ip_vs_stats *stats)
+ struct ip_vs_kstats *kstats)
{
- struct ip_vs_stats_user ustats;
struct nlattr *nl_stats = nla_nest_start(skb, container_type);
+
if (!nl_stats)
return -EMSGSIZE;
- ip_vs_copy_stats(&ustats, stats);
-
- if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
- nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
- nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
- nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
- nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
- nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
- nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
- nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
- nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
- nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
+ if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, (u32)kstats->inbps) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, (u32)kstats->outbps))
+ goto nla_put_failure;
+ nla_nest_end(skb, nl_stats);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nl_stats);
+ return -EMSGSIZE;
+}
+
+static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type,
+ struct ip_vs_kstats *kstats)
+{
+ struct nlattr *nl_stats = nla_nest_start(skb, container_type);
+
+ if (!nl_stats)
+ return -EMSGSIZE;
+
+ if (nla_put_u64(skb, IPVS_STATS_ATTR_CONNS, kstats->conns) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_CPS, kstats->cps) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps))
goto nla_put_failure;
nla_nest_end(skb, nl_stats);
@@ -2835,6 +2891,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
struct nlattr *nl_service;
struct ip_vs_flags flags = { .flags = svc->flags,
.mask = ~0 };
+ struct ip_vs_kstats kstats;
nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
if (!nl_service)
@@ -2860,7 +2917,10 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
nla_put_be32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
goto nla_put_failure;
- if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
+ ip_vs_copy_stats(&kstats, &svc->stats);
+ if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &kstats))
+ goto nla_put_failure;
+ if (ip_vs_genl_fill_stats64(skb, IPVS_SVC_ATTR_STATS64, &kstats))
goto nla_put_failure;
nla_nest_end(skb, nl_service);
@@ -3032,6 +3092,7 @@ static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
{
struct nlattr *nl_dest;
+ struct ip_vs_kstats kstats;
nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST);
if (!nl_dest)
@@ -3054,7 +3115,10 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
atomic_read(&dest->persistconns)) ||
nla_put_u16(skb, IPVS_DEST_ATTR_ADDR_FAMILY, dest->af))
goto nla_put_failure;
- if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
+ ip_vs_copy_stats(&kstats, &dest->stats);
+ if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &kstats))
+ goto nla_put_failure;
+ if (ip_vs_genl_fill_stats64(skb, IPVS_DEST_ATTR_STATS64, &kstats))
goto nla_put_failure;
nla_nest_end(skb, nl_dest);
@@ -3732,6 +3796,8 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
ipvs->sysctl_pmtu_disc = 1;
tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
tbl[idx++].data = &ipvs->sysctl_backup_only;
+ ipvs->sysctl_conn_reuse_mode = 1;
+ tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 1425e9a924c4..ef0eb0a8d552 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -45,17 +45,19 @@
NOTES.
- * The stored value for average bps is scaled by 2^5, so that maximal
- rate is ~2.15Gbits/s, average pps and cps are scaled by 2^10.
+ * Average bps is scaled by 2^5, while average pps and cps are scaled by 2^10.
- * A lot code is taken from net/sched/estimator.c
+ * Netlink users can see 64-bit values but sockopt users are restricted
+ to 32-bit values for conns, packets, bps, cps and pps.
+
+ * A lot of code is taken from net/core/gen_estimator.c
*/
/*
* Make a summary from each cpu
*/
-static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
+static void ip_vs_read_cpu_stats(struct ip_vs_kstats *sum,
struct ip_vs_cpu_stats __percpu *stats)
{
int i;
@@ -64,27 +66,31 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
for_each_possible_cpu(i) {
struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
unsigned int start;
- __u64 inbytes, outbytes;
+ u64 conns, inpkts, outpkts, inbytes, outbytes;
+
if (add) {
- sum->conns += s->ustats.conns;
- sum->inpkts += s->ustats.inpkts;
- sum->outpkts += s->ustats.outpkts;
do {
start = u64_stats_fetch_begin(&s->syncp);
- inbytes = s->ustats.inbytes;
- outbytes = s->ustats.outbytes;
+ conns = s->cnt.conns;
+ inpkts = s->cnt.inpkts;
+ outpkts = s->cnt.outpkts;
+ inbytes = s->cnt.inbytes;
+ outbytes = s->cnt.outbytes;
} while (u64_stats_fetch_retry(&s->syncp, start));
+ sum->conns += conns;
+ sum->inpkts += inpkts;
+ sum->outpkts += outpkts;
sum->inbytes += inbytes;
sum->outbytes += outbytes;
} else {
add = true;
- sum->conns = s->ustats.conns;
- sum->inpkts = s->ustats.inpkts;
- sum->outpkts = s->ustats.outpkts;
do {
start = u64_stats_fetch_begin(&s->syncp);
- sum->inbytes = s->ustats.inbytes;
- sum->outbytes = s->ustats.outbytes;
+ sum->conns = s->cnt.conns;
+ sum->inpkts = s->cnt.inpkts;
+ sum->outpkts = s->cnt.outpkts;
+ sum->inbytes = s->cnt.inbytes;
+ sum->outbytes = s->cnt.outbytes;
} while (u64_stats_fetch_retry(&s->syncp, start));
}
}
@@ -95,10 +101,7 @@ static void estimation_timer(unsigned long arg)
{
struct ip_vs_estimator *e;
struct ip_vs_stats *s;
- u32 n_conns;
- u32 n_inpkts, n_outpkts;
- u64 n_inbytes, n_outbytes;
- u32 rate;
+ u64 rate;
struct net *net = (struct net *)arg;
struct netns_ipvs *ipvs;
@@ -108,33 +111,29 @@ static void estimation_timer(unsigned long arg)
s = container_of(e, struct ip_vs_stats, est);
spin_lock(&s->lock);
- ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
- n_conns = s->ustats.conns;
- n_inpkts = s->ustats.inpkts;
- n_outpkts = s->ustats.outpkts;
- n_inbytes = s->ustats.inbytes;
- n_outbytes = s->ustats.outbytes;
+ ip_vs_read_cpu_stats(&s->kstats, s->cpustats);
/* scaled by 2^10, but divided 2 seconds */
- rate = (n_conns - e->last_conns) << 9;
- e->last_conns = n_conns;
- e->cps += ((long)rate - (long)e->cps) >> 2;
-
- rate = (n_inpkts - e->last_inpkts) << 9;
- e->last_inpkts = n_inpkts;
- e->inpps += ((long)rate - (long)e->inpps) >> 2;
-
- rate = (n_outpkts - e->last_outpkts) << 9;
- e->last_outpkts = n_outpkts;
- e->outpps += ((long)rate - (long)e->outpps) >> 2;
-
- rate = (n_inbytes - e->last_inbytes) << 4;
- e->last_inbytes = n_inbytes;
- e->inbps += ((long)rate - (long)e->inbps) >> 2;
-
- rate = (n_outbytes - e->last_outbytes) << 4;
- e->last_outbytes = n_outbytes;
- e->outbps += ((long)rate - (long)e->outbps) >> 2;
+ rate = (s->kstats.conns - e->last_conns) << 9;
+ e->last_conns = s->kstats.conns;
+ e->cps += ((s64)rate - (s64)e->cps) >> 2;
+
+ rate = (s->kstats.inpkts - e->last_inpkts) << 9;
+ e->last_inpkts = s->kstats.inpkts;
+ e->inpps += ((s64)rate - (s64)e->inpps) >> 2;
+
+ rate = (s->kstats.outpkts - e->last_outpkts) << 9;
+ e->last_outpkts = s->kstats.outpkts;
+ e->outpps += ((s64)rate - (s64)e->outpps) >> 2;
+
+ /* scaled by 2^5, but divided 2 seconds */
+ rate = (s->kstats.inbytes - e->last_inbytes) << 4;
+ e->last_inbytes = s->kstats.inbytes;
+ e->inbps += ((s64)rate - (s64)e->inbps) >> 2;
+
+ rate = (s->kstats.outbytes - e->last_outbytes) << 4;
+ e->last_outbytes = s->kstats.outbytes;
+ e->outbps += ((s64)rate - (s64)e->outbps) >> 2;
spin_unlock(&s->lock);
}
spin_unlock(&ipvs->est_lock);
@@ -166,14 +165,14 @@ void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats)
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est = &stats->est;
- struct ip_vs_stats_user *u = &stats->ustats;
+ struct ip_vs_kstats *k = &stats->kstats;
/* reset counters, caller must hold the stats->lock lock */
- est->last_inbytes = u->inbytes;
- est->last_outbytes = u->outbytes;
- est->last_conns = u->conns;
- est->last_inpkts = u->inpkts;
- est->last_outpkts = u->outpkts;
+ est->last_inbytes = k->inbytes;
+ est->last_outbytes = k->outbytes;
+ est->last_conns = k->conns;
+ est->last_inpkts = k->inpkts;
+ est->last_outpkts = k->outpkts;
est->cps = 0;
est->inpps = 0;
est->outpps = 0;
@@ -182,8 +181,7 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
}
/* Get decoded rates */
-void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
- struct ip_vs_stats *stats)
+void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats)
{
struct ip_vs_estimator *e = &stats->est;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index d93ceeb3ef04..19b9cce6c210 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -845,10 +845,27 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
struct ip_vs_conn *cp;
struct netns_ipvs *ipvs = net_ipvs(net);
- if (!(flags & IP_VS_CONN_F_TEMPLATE))
+ if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
cp = ip_vs_conn_in_get(param);
- else
+ if (cp && ((cp->dport != dport) ||
+ !ip_vs_addr_equal(cp->daf, &cp->daddr, daddr))) {
+ if (!(flags & IP_VS_CONN_F_INACTIVE)) {
+ ip_vs_conn_expire_now(cp);
+ __ip_vs_conn_put(cp);
+ cp = NULL;
+ } else {
+ /* This is the expiration message for the
+ * connection that was already replaced, so we
+ * just ignore it.
+ */
+ __ip_vs_conn_put(cp);
+ kfree(param->pe_data);
+ return;
+ }
+ }
+ } else {
cp = ip_vs_ct_in_get(param);
+ }
if (cp) {
/* Free pe_data */
@@ -1388,9 +1405,11 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
mreq.imr_ifindex = dev->ifindex;
+ rtnl_lock();
lock_sock(sk);
ret = ip_mc_join_group(sk, &mreq);
release_sock(sk);
+ rtnl_unlock();
return ret;
}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 3aedbda7658a..19986ec5f21a 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -209,7 +209,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
struct sock *sk = skb->sk;
struct rtable *ort = skb_rtable(skb);
- if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT)
+ if (!skb->dev && sk && sk_fullsock(sk))
ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
}
@@ -536,8 +536,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
ip_vs_update_conntrack(skb, cp, 1);
if (!local) {
skb_forward_csum(skb);
- NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
- dst_output);
+ NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
+ NULL, skb_dst(skb)->dev, dst_output_sk);
} else
ret = NF_ACCEPT;
return ret;
@@ -554,8 +554,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
ip_vs_notrack(skb);
if (!local) {
skb_forward_csum(skb);
- NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
- dst_output);
+ NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
+ NULL, skb_dst(skb)->dev, dst_output_sk);
} else
ret = NF_ACCEPT;
return ret;
@@ -924,7 +924,8 @@ int
ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
- struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+ struct net *net = skb_net(skb);
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct rtable *rt; /* Route to the other host */
__be32 saddr; /* Source for tunnel */
struct net_device *tdev; /* Device to other host */
@@ -991,7 +992,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
iph->daddr = cp->daddr.ip;
iph->saddr = saddr;
iph->ttl = ttl;
- ip_select_ident(skb, NULL);
+ ip_select_ident(net, skb, NULL);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index a4b5e2a435ac..45da11afa785 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -47,9 +47,11 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
return 0;
counter = acct->counter;
- return seq_printf(s, "packets=%llu bytes=%llu ",
- (unsigned long long)atomic64_read(&counter[dir].packets),
- (unsigned long long)atomic64_read(&counter[dir].bytes));
+ seq_printf(s, "packets=%llu bytes=%llu ",
+ (unsigned long long)atomic64_read(&counter[dir].packets),
+ (unsigned long long)atomic64_read(&counter[dir].bytes));
+
+ return 0;
};
EXPORT_SYMBOL_GPL(seq_print_acct);
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index b8b95f4027ca..57a26cc90c9f 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -88,7 +88,6 @@ static int amanda_help(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
- struct ts_state ts;
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
unsigned int dataoff, start, stop, off, i;
@@ -113,23 +112,20 @@ static int amanda_help(struct sk_buff *skb,
return NF_ACCEPT;
}
- memset(&ts, 0, sizeof(ts));
start = skb_find_text(skb, dataoff, skb->len,
- search[SEARCH_CONNECT].ts, &ts);
+ search[SEARCH_CONNECT].ts);
if (start == UINT_MAX)
goto out;
start += dataoff + search[SEARCH_CONNECT].len;
- memset(&ts, 0, sizeof(ts));
stop = skb_find_text(skb, start, skb->len,
- search[SEARCH_NEWLINE].ts, &ts);
+ search[SEARCH_NEWLINE].ts);
if (stop == UINT_MAX)
goto out;
stop += start;
for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) {
- memset(&ts, 0, sizeof(ts));
- off = skb_find_text(skb, start, stop, search[i].ts, &ts);
+ off = skb_find_text(skb, start, stop, search[i].ts);
if (off == UINT_MAX)
continue;
off += start + search[i].len;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 91a1837acd0e..7a17070c5dab 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -561,7 +561,9 @@ static int exp_seq_show(struct seq_file *s, void *v)
helper->expect_policy[expect->class].name);
}
- return seq_putc(s, '\n');
+ seq_putc(s, '\n');
+
+ return 0;
}
static const struct seq_operations exp_seq_ops = {
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 61a3c927e63c..ea7f36784b3d 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -14,16 +14,11 @@
/* core.c */
unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
- unsigned int hook, const struct net_device *indev,
- const struct net_device *outdev,
- struct nf_hook_ops **elemp,
- int (*okfn)(struct sk_buff *), int hook_thresh);
+ struct nf_hook_state *state, struct nf_hook_ops **elemp);
/* nf_queue.c */
-int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
- unsigned int hook, struct net_device *indev,
- struct net_device *outdev, int (*okfn)(struct sk_buff *),
- unsigned int queuenum);
+int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
+ struct nf_hook_state *state, unsigned int queuenum);
int __init netfilter_queue_init(void);
/* nf_log.c */
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
index a2233e77cf39..a5aa5967b8e1 100644
--- a/net/netfilter/nf_log_common.c
+++ b/net/netfilter/nf_log_common.c
@@ -17,6 +17,7 @@
#include <net/route.h>
#include <linux/netfilter.h>
+#include <linux/netfilter_bridge.h>
#include <linux/netfilter/xt_LOG.h>
#include <net/netfilter/nf_log.h>
@@ -133,7 +134,7 @@ EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header);
void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk)
{
- if (!sk || sk->sk_state == TCP_TIME_WAIT)
+ if (!sk || !sk_fullsock(sk))
return;
read_lock_bh(&sk->sk_callback_lock);
@@ -163,10 +164,10 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
const struct net_device *physindev;
const struct net_device *physoutdev;
- physindev = skb->nf_bridge->physindev;
+ physindev = nf_bridge_get_physindev(skb);
if (physindev && in != physindev)
nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
- physoutdev = skb->nf_bridge->physoutdev;
+ physoutdev = nf_bridge_get_physoutdev(skb);
if (physoutdev && out != physoutdev)
nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name);
}
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 4c8b68e5fa16..2e88032cd5ad 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -10,6 +10,7 @@
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
+#include <linux/netfilter_bridge.h>
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
#include <net/protocol.h>
@@ -47,19 +48,25 @@ EXPORT_SYMBOL(nf_unregister_queue_handler);
void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
{
+ struct nf_hook_state *state = &entry->state;
+
/* Release those devices we held, or Alexey will kill me. */
- if (entry->indev)
- dev_put(entry->indev);
- if (entry->outdev)
- dev_put(entry->outdev);
+ if (state->in)
+ dev_put(state->in);
+ if (state->out)
+ dev_put(state->out);
+ if (state->sk)
+ sock_put(state->sk);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (entry->skb->nf_bridge) {
- struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
+ struct net_device *physdev;
- if (nf_bridge->physindev)
- dev_put(nf_bridge->physindev);
- if (nf_bridge->physoutdev)
- dev_put(nf_bridge->physoutdev);
+ physdev = nf_bridge_get_physindev(entry->skb);
+ if (physdev)
+ dev_put(physdev);
+ physdev = nf_bridge_get_physoutdev(entry->skb);
+ if (physdev)
+ dev_put(physdev);
}
#endif
/* Drop reference to owner of hook which queued us. */
@@ -70,22 +77,25 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
/* Bump dev refs so they don't vanish while packet is out */
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
{
+ struct nf_hook_state *state = &entry->state;
+
if (!try_module_get(entry->elem->owner))
return false;
- if (entry->indev)
- dev_hold(entry->indev);
- if (entry->outdev)
- dev_hold(entry->outdev);
+ if (state->in)
+ dev_hold(state->in);
+ if (state->out)
+ dev_hold(state->out);
+ if (state->sk)
+ sock_hold(state->sk);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (entry->skb->nf_bridge) {
- struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
struct net_device *physdev;
- physdev = nf_bridge->physindev;
+ physdev = nf_bridge_get_physindev(entry->skb);
if (physdev)
dev_hold(physdev);
- physdev = nf_bridge->physoutdev;
+ physdev = nf_bridge_get_physoutdev(entry->skb);
if (physdev)
dev_hold(physdev);
}
@@ -100,12 +110,9 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
* through nf_reinject().
*/
int nf_queue(struct sk_buff *skb,
- struct nf_hook_ops *elem,
- u_int8_t pf, unsigned int hook,
- struct net_device *indev,
- struct net_device *outdev,
- int (*okfn)(struct sk_buff *),
- unsigned int queuenum)
+ struct nf_hook_ops *elem,
+ struct nf_hook_state *state,
+ unsigned int queuenum)
{
int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
@@ -121,7 +128,7 @@ int nf_queue(struct sk_buff *skb,
goto err_unlock;
}
- afinfo = nf_get_afinfo(pf);
+ afinfo = nf_get_afinfo(state->pf);
if (!afinfo)
goto err_unlock;
@@ -134,11 +141,7 @@ int nf_queue(struct sk_buff *skb,
*entry = (struct nf_queue_entry) {
.skb = skb,
.elem = elem,
- .pf = pf,
- .hook = hook,
- .indev = indev,
- .outdev = outdev,
- .okfn = okfn,
+ .state = *state,
.size = sizeof(*entry) + afinfo->route_key_size,
};
@@ -184,30 +187,29 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
}
if (verdict == NF_ACCEPT) {
- afinfo = nf_get_afinfo(entry->pf);
+ afinfo = nf_get_afinfo(entry->state.pf);
if (!afinfo || afinfo->reroute(skb, entry) < 0)
verdict = NF_DROP;
}
+ entry->state.thresh = INT_MIN;
+
if (verdict == NF_ACCEPT) {
next_hook:
- verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
- skb, entry->hook,
- entry->indev, entry->outdev, &elem,
- entry->okfn, INT_MIN);
+ verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook],
+ skb, &entry->state, &elem);
}
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_STOP:
local_bh_disable();
- entry->okfn(skb);
+ entry->state.okfn(entry->state.sk, skb);
local_bh_enable();
break;
case NF_QUEUE:
- err = nf_queue(skb, elem, entry->pf, entry->hook,
- entry->indev, entry->outdev, entry->okfn,
- verdict >> NF_VERDICT_QBITS);
+ err = nf_queue(skb, elem, &entry->state,
+ verdict >> NF_VERDICT_QBITS);
if (err < 0) {
if (err == -ECANCELED)
goto next_hook;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ac1a9528dbf2..ad9d11fb29fd 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -198,36 +198,31 @@ static int nft_delchain(struct nft_ctx *ctx)
static inline bool
nft_rule_is_active(struct net *net, const struct nft_rule *rule)
{
- return (rule->genmask & (1 << net->nft.gencursor)) == 0;
-}
-
-static inline int gencursor_next(struct net *net)
-{
- return net->nft.gencursor+1 == 1 ? 1 : 0;
+ return (rule->genmask & nft_genmask_cur(net)) == 0;
}
static inline int
nft_rule_is_active_next(struct net *net, const struct nft_rule *rule)
{
- return (rule->genmask & (1 << gencursor_next(net))) == 0;
+ return (rule->genmask & nft_genmask_next(net)) == 0;
}
static inline void
nft_rule_activate_next(struct net *net, struct nft_rule *rule)
{
/* Now inactive, will be active in the future */
- rule->genmask = (1 << net->nft.gencursor);
+ rule->genmask = nft_genmask_cur(net);
}
static inline void
nft_rule_deactivate_next(struct net *net, struct nft_rule *rule)
{
- rule->genmask = (1 << gencursor_next(net));
+ rule->genmask = nft_genmask_next(net);
}
static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
{
- rule->genmask &= ~(1 << gencursor_next(net));
+ rule->genmask &= ~nft_genmask_next(net);
}
static int
@@ -401,7 +396,8 @@ nf_tables_chain_type_lookup(const struct nft_af_info *afi,
}
static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
- [NFTA_TABLE_NAME] = { .type = NLA_STRING },
+ [NFTA_TABLE_NAME] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_TABLE_FLAGS] = { .type = NLA_U32 },
};
@@ -686,26 +682,28 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
if (!try_module_get(afi->owner))
return -EAFNOSUPPORT;
- table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL);
- if (table == NULL) {
- module_put(afi->owner);
- return -ENOMEM;
- }
+ err = -ENOMEM;
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (table == NULL)
+ goto err1;
- nla_strlcpy(table->name, name, nla_len(name));
+ nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN);
INIT_LIST_HEAD(&table->chains);
INIT_LIST_HEAD(&table->sets);
table->flags = flags;
nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
- if (err < 0) {
- kfree(table);
- module_put(afi->owner);
- return err;
- }
+ if (err < 0)
+ goto err2;
+
list_add_tail_rcu(&table->list, &afi->tables);
return 0;
+err2:
+ kfree(table);
+err1:
+ module_put(afi->owner);
+ return err;
}
static int nft_flush_table(struct nft_ctx *ctx)
@@ -1351,6 +1349,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
rcu_assign_pointer(basechain->stats, stats);
}
+ write_pnet(&basechain->pnet, net);
basechain->type = type;
chain = &basechain->chain;
@@ -1378,7 +1377,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
INIT_LIST_HEAD(&chain->rules);
chain->handle = nf_tables_alloc_handle(table);
- chain->net = net;
chain->table = table;
nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
@@ -1547,6 +1545,23 @@ nla_put_failure:
return -1;
};
+int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
+ const struct nft_expr *expr)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, attr);
+ if (!nest)
+ goto nla_put_failure;
+ if (nf_tables_fill_expr_info(skb, expr) < 0)
+ goto nla_put_failure;
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
struct nft_expr_info {
const struct nft_expr_ops *ops;
struct nlattr *tb[NFT_EXPR_MAXATTR + 1];
@@ -1624,6 +1639,39 @@ static void nf_tables_expr_destroy(const struct nft_ctx *ctx,
module_put(expr->ops->type->owner);
}
+struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
+ const struct nlattr *nla)
+{
+ struct nft_expr_info info;
+ struct nft_expr *expr;
+ int err;
+
+ err = nf_tables_expr_parse(ctx, nla, &info);
+ if (err < 0)
+ goto err1;
+
+ err = -ENOMEM;
+ expr = kzalloc(info.ops->size, GFP_KERNEL);
+ if (expr == NULL)
+ goto err2;
+
+ err = nf_tables_newexpr(ctx, &info, expr);
+ if (err < 0)
+ goto err2;
+
+ return expr;
+err2:
+ module_put(info.ops->type->owner);
+err1:
+ return ERR_PTR(err);
+}
+
+void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr)
+{
+ nf_tables_expr_destroy(ctx, expr);
+ kfree(expr);
+}
+
/*
* Rules
*/
@@ -1705,12 +1753,8 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
if (list == NULL)
goto nla_put_failure;
nft_rule_for_each_expr(expr, next, rule) {
- struct nlattr *elem = nla_nest_start(skb, NFTA_LIST_ELEM);
- if (elem == NULL)
+ if (nft_expr_dump(skb, NFTA_LIST_ELEM, expr) < 0)
goto nla_put_failure;
- if (nf_tables_fill_expr_info(skb, expr) < 0)
- goto nla_put_failure;
- nla_nest_end(skb, elem);
}
nla_nest_end(skb, list);
@@ -2161,7 +2205,7 @@ nft_select_set_ops(const struct nlattr * const nla[],
features = 0;
if (nla[NFTA_SET_FLAGS] != NULL) {
features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
- features &= NFT_SET_INTERVAL | NFT_SET_MAP;
+ features &= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_TIMEOUT;
}
bops = NULL;
@@ -2218,6 +2262,8 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
[NFTA_SET_POLICY] = { .type = NLA_U32 },
[NFTA_SET_DESC] = { .type = NLA_NESTED },
[NFTA_SET_ID] = { .type = NLA_U32 },
+ [NFTA_SET_TIMEOUT] = { .type = NLA_U64 },
+ [NFTA_SET_GC_INTERVAL] = { .type = NLA_U32 },
};
static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
@@ -2368,6 +2414,13 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
}
+ if (set->timeout &&
+ nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout)))
+ goto nla_put_failure;
+ if (set->gc_int &&
+ nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int)))
+ goto nla_put_failure;
+
if (set->policy != NFT_SET_POL_PERFORMANCE) {
if (nla_put_be32(skb, NFTA_SET_POLICY, htonl(set->policy)))
goto nla_put_failure;
@@ -2580,7 +2633,8 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
char name[IFNAMSIZ];
unsigned int size;
bool create;
- u32 ktype, dtype, flags, policy;
+ u64 timeout;
+ u32 ktype, dtype, flags, policy, gc_int;
struct nft_set_desc desc;
int err;
@@ -2600,15 +2654,20 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
}
desc.klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
- if (desc.klen == 0 || desc.klen > FIELD_SIZEOF(struct nft_data, data))
+ if (desc.klen == 0 || desc.klen > NFT_DATA_VALUE_MAXLEN)
return -EINVAL;
flags = 0;
if (nla[NFTA_SET_FLAGS] != NULL) {
flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
- NFT_SET_INTERVAL | NFT_SET_MAP))
+ NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
+ NFT_SET_MAP | NFT_SET_EVAL))
return -EINVAL;
+ /* Only one of both operations is supported */
+ if ((flags & (NFT_SET_MAP | NFT_SET_EVAL)) ==
+ (NFT_SET_MAP | NFT_SET_EVAL))
+ return -EOPNOTSUPP;
}
dtype = 0;
@@ -2625,14 +2684,26 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
if (nla[NFTA_SET_DATA_LEN] == NULL)
return -EINVAL;
desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
- if (desc.dlen == 0 ||
- desc.dlen > FIELD_SIZEOF(struct nft_data, data))
+ if (desc.dlen == 0 || desc.dlen > NFT_DATA_VALUE_MAXLEN)
return -EINVAL;
} else
- desc.dlen = sizeof(struct nft_data);
+ desc.dlen = sizeof(struct nft_verdict);
} else if (flags & NFT_SET_MAP)
return -EINVAL;
+ timeout = 0;
+ if (nla[NFTA_SET_TIMEOUT] != NULL) {
+ if (!(flags & NFT_SET_TIMEOUT))
+ return -EINVAL;
+ timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT]));
+ }
+ gc_int = 0;
+ if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
+ if (!(flags & NFT_SET_TIMEOUT))
+ return -EINVAL;
+ gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
+ }
+
policy = NFT_SET_POL_PERFORMANCE;
if (nla[NFTA_SET_POLICY] != NULL)
policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
@@ -2692,6 +2763,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
goto err2;
INIT_LIST_HEAD(&set->bindings);
+ write_pnet(&set->pnet, net);
set->ops = ops;
set->ktype = ktype;
set->klen = desc.klen;
@@ -2700,6 +2772,8 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
set->flags = flags;
set->size = desc.size;
set->policy = policy;
+ set->timeout = timeout;
+ set->gc_int = gc_int;
err = ops->init(set, &desc, nla);
if (err < 0)
@@ -2768,12 +2842,14 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
const struct nft_set_elem *elem)
{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
enum nft_registers dreg;
dreg = nft_type_to_reg(set->dtype);
- return nft_validate_data_load(ctx, dreg, &elem->data,
- set->dtype == NFT_DATA_VERDICT ?
- NFT_DATA_VERDICT : NFT_DATA_VALUE);
+ return nft_validate_register_store(ctx, dreg, nft_set_ext_data(ext),
+ set->dtype == NFT_DATA_VERDICT ?
+ NFT_DATA_VERDICT : NFT_DATA_VALUE,
+ set->dlen);
}
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
@@ -2785,12 +2861,13 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
return -EBUSY;
- if (set->flags & NFT_SET_MAP) {
+ if (binding->flags & NFT_SET_MAP) {
/* If the set is already bound to the same chain all
* jumps are already validated for that chain.
*/
list_for_each_entry(i, &set->bindings, list) {
- if (i->chain == binding->chain)
+ if (binding->flags & NFT_SET_MAP &&
+ i->chain == binding->chain)
goto bind;
}
@@ -2824,6 +2901,35 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
nf_tables_set_destroy(ctx, set);
}
+const struct nft_set_ext_type nft_set_ext_types[] = {
+ [NFT_SET_EXT_KEY] = {
+ .align = __alignof__(u32),
+ },
+ [NFT_SET_EXT_DATA] = {
+ .align = __alignof__(u32),
+ },
+ [NFT_SET_EXT_EXPR] = {
+ .align = __alignof__(struct nft_expr),
+ },
+ [NFT_SET_EXT_FLAGS] = {
+ .len = sizeof(u8),
+ .align = __alignof__(u8),
+ },
+ [NFT_SET_EXT_TIMEOUT] = {
+ .len = sizeof(u64),
+ .align = __alignof__(u64),
+ },
+ [NFT_SET_EXT_EXPIRATION] = {
+ .len = sizeof(unsigned long),
+ .align = __alignof__(unsigned long),
+ },
+ [NFT_SET_EXT_USERDATA] = {
+ .len = sizeof(struct nft_userdata),
+ .align = __alignof__(struct nft_userdata),
+ },
+};
+EXPORT_SYMBOL_GPL(nft_set_ext_types);
+
/*
* Set elements
*/
@@ -2832,6 +2938,9 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
[NFTA_SET_ELEM_KEY] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_DATA] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_FLAGS] = { .type = NLA_U32 },
+ [NFTA_SET_ELEM_TIMEOUT] = { .type = NLA_U64 },
+ [NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY,
+ .len = NFT_USERDATA_MAXLEN },
};
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
@@ -2870,6 +2979,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
@@ -2877,20 +2987,52 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
if (nest == NULL)
goto nla_put_failure;
- if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, &elem->key, NFT_DATA_VALUE,
- set->klen) < 0)
+ if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, nft_set_ext_key(ext),
+ NFT_DATA_VALUE, set->klen) < 0)
goto nla_put_failure;
- if (set->flags & NFT_SET_MAP &&
- !(elem->flags & NFT_SET_ELEM_INTERVAL_END) &&
- nft_data_dump(skb, NFTA_SET_ELEM_DATA, &elem->data,
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
+ nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
set->dlen) < 0)
goto nla_put_failure;
- if (elem->flags != 0)
- if (nla_put_be32(skb, NFTA_SET_ELEM_FLAGS, htonl(elem->flags)))
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR) &&
+ nft_expr_dump(skb, NFTA_SET_ELEM_EXPR, nft_set_ext_expr(ext)) < 0)
+ goto nla_put_failure;
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+ nla_put_be32(skb, NFTA_SET_ELEM_FLAGS,
+ htonl(*nft_set_ext_flags(ext))))
+ goto nla_put_failure;
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+ nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
+ cpu_to_be64(*nft_set_ext_timeout(ext))))
+ goto nla_put_failure;
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
+ unsigned long expires, now = jiffies;
+
+ expires = *nft_set_ext_expiration(ext);
+ if (time_before(now, expires))
+ expires -= now;
+ else
+ expires = 0;
+
+ if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION,
+ cpu_to_be64(jiffies_to_msecs(expires))))
goto nla_put_failure;
+ }
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) {
+ struct nft_userdata *udata;
+
+ udata = nft_set_ext_userdata(ext);
+ if (nla_put(skb, NFTA_SET_ELEM_USERDATA,
+ udata->len + 1, udata->data))
+ goto nla_put_failure;
+ }
nla_nest_end(skb, nest);
return 0;
@@ -3111,20 +3253,65 @@ static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
return trans;
}
+void *nft_set_elem_init(const struct nft_set *set,
+ const struct nft_set_ext_tmpl *tmpl,
+ const u32 *key, const u32 *data,
+ u64 timeout, gfp_t gfp)
+{
+ struct nft_set_ext *ext;
+ void *elem;
+
+ elem = kzalloc(set->ops->elemsize + tmpl->len, gfp);
+ if (elem == NULL)
+ return NULL;
+
+ ext = nft_set_elem_ext(set, elem);
+ nft_set_ext_init(ext, tmpl);
+
+ memcpy(nft_set_ext_key(ext), key, set->klen);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+ memcpy(nft_set_ext_data(ext), data, set->dlen);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION))
+ *nft_set_ext_expiration(ext) =
+ jiffies + msecs_to_jiffies(timeout);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
+ *nft_set_ext_timeout(ext) = timeout;
+
+ return elem;
+}
+
+void nft_set_elem_destroy(const struct nft_set *set, void *elem)
+{
+ struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
+
+ nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+ nft_data_uninit(nft_set_ext_data(ext), set->dtype);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
+ nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
+
+ kfree(elem);
+}
+EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
+
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
struct nft_data_desc d1, d2;
+ struct nft_set_ext_tmpl tmpl;
+ struct nft_set_ext *ext;
struct nft_set_elem elem;
struct nft_set_binding *binding;
+ struct nft_userdata *udata;
+ struct nft_data data;
enum nft_registers dreg;
struct nft_trans *trans;
+ u64 timeout;
+ u32 flags;
+ u8 ulen;
int err;
- if (set->size && set->nelems == set->size)
- return -ENFILE;
-
err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
nft_set_elem_policy);
if (err < 0)
@@ -3133,38 +3320,59 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (nla[NFTA_SET_ELEM_KEY] == NULL)
return -EINVAL;
- elem.flags = 0;
+ nft_set_ext_prepare(&tmpl);
+
+ flags = 0;
if (nla[NFTA_SET_ELEM_FLAGS] != NULL) {
- elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
- if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END)
+ flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
+ if (flags & ~NFT_SET_ELEM_INTERVAL_END)
+ return -EINVAL;
+ if (!(set->flags & NFT_SET_INTERVAL) &&
+ flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
+ if (flags != 0)
+ nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
}
if (set->flags & NFT_SET_MAP) {
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
- !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
+ !(flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
if (nla[NFTA_SET_ELEM_DATA] != NULL &&
- elem.flags & NFT_SET_ELEM_INTERVAL_END)
+ flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_DATA] != NULL)
return -EINVAL;
}
- err = nft_data_init(ctx, &elem.key, &d1, nla[NFTA_SET_ELEM_KEY]);
+ timeout = 0;
+ if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
+ if (!(set->flags & NFT_SET_TIMEOUT))
+ return -EINVAL;
+ timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT]));
+ } else if (set->flags & NFT_SET_TIMEOUT) {
+ timeout = set->timeout;
+ }
+
+ err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &d1,
+ nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
goto err1;
err = -EINVAL;
if (d1.type != NFT_DATA_VALUE || d1.len != set->klen)
goto err2;
- err = -EEXIST;
- if (set->ops->get(set, &elem) == 0)
- goto err2;
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, d1.len);
+ if (timeout > 0) {
+ nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
+ if (timeout != set->timeout)
+ nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
+ }
if (nla[NFTA_SET_ELEM_DATA] != NULL) {
- err = nft_data_init(ctx, &elem.data, &d2, nla[NFTA_SET_ELEM_DATA]);
+ err = nft_data_init(ctx, &data, sizeof(data), &d2,
+ nla[NFTA_SET_ELEM_DATA]);
if (err < 0)
goto err2;
@@ -3180,32 +3388,68 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
.chain = (struct nft_chain *)binding->chain,
};
- err = nft_validate_data_load(&bind_ctx, dreg,
- &elem.data, d2.type);
+ if (!(binding->flags & NFT_SET_MAP))
+ continue;
+
+ err = nft_validate_register_store(&bind_ctx, dreg,
+ &data,
+ d2.type, d2.len);
if (err < 0)
goto err3;
}
+
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, d2.len);
+ }
+
+ /* The full maximum length of userdata can exceed the maximum
+ * offset value (U8_MAX) for following extensions, therefor it
+ * must be the last extension added.
+ */
+ ulen = 0;
+ if (nla[NFTA_SET_ELEM_USERDATA] != NULL) {
+ ulen = nla_len(nla[NFTA_SET_ELEM_USERDATA]);
+ if (ulen > 0)
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA,
+ ulen);
+ }
+
+ err = -ENOMEM;
+ elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, data.data,
+ timeout, GFP_KERNEL);
+ if (elem.priv == NULL)
+ goto err3;
+
+ ext = nft_set_elem_ext(set, elem.priv);
+ if (flags)
+ *nft_set_ext_flags(ext) = flags;
+ if (ulen > 0) {
+ udata = nft_set_ext_userdata(ext);
+ udata->len = ulen - 1;
+ nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen);
}
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
if (trans == NULL)
- goto err3;
+ goto err4;
+ ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
err = set->ops->insert(set, &elem);
if (err < 0)
- goto err4;
+ goto err5;
nft_trans_elem(trans) = elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
-err4:
+err5:
kfree(trans);
+err4:
+ kfree(elem.priv);
err3:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
- nft_data_uninit(&elem.data, d2.type);
+ nft_data_uninit(&data, d2.type);
err2:
- nft_data_uninit(&elem.key, d1.type);
+ nft_data_uninit(&elem.key.val, d1.type);
err1:
return err;
}
@@ -3241,11 +3485,15 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
return -EBUSY;
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+ if (set->size &&
+ !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
+ return -ENFILE;
+
err = nft_add_set_elem(&ctx, set, attr);
- if (err < 0)
+ if (err < 0) {
+ atomic_dec(&set->nelems);
break;
-
- set->nelems++;
+ }
}
return err;
}
@@ -3268,7 +3516,8 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
if (nla[NFTA_SET_ELEM_KEY] == NULL)
goto err1;
- err = nft_data_init(ctx, &elem.key, &desc, nla[NFTA_SET_ELEM_KEY]);
+ err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
+ nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
goto err1;
@@ -3276,21 +3525,26 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
goto err2;
- err = set->ops->get(set, &elem);
- if (err < 0)
- goto err2;
-
trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
if (trans == NULL) {
err = -ENOMEM;
goto err2;
}
+ elem.priv = set->ops->deactivate(set, &elem);
+ if (elem.priv == NULL) {
+ err = -ENOENT;
+ goto err3;
+ }
+
nft_trans_elem(trans) = elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
+
+err3:
+ kfree(trans);
err2:
- nft_data_uninit(&elem.key, desc.type);
+ nft_data_uninit(&elem.key.val, desc.type);
err1:
return err;
}
@@ -3322,11 +3576,36 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
if (err < 0)
break;
- set->nelems--;
+ set->ndeact++;
}
return err;
}
+void nft_set_gc_batch_release(struct rcu_head *rcu)
+{
+ struct nft_set_gc_batch *gcb;
+ unsigned int i;
+
+ gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu);
+ for (i = 0; i < gcb->head.cnt; i++)
+ nft_set_elem_destroy(gcb->head.set, gcb->elems[i]);
+ kfree(gcb);
+}
+EXPORT_SYMBOL_GPL(nft_set_gc_batch_release);
+
+struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
+ gfp_t gfp)
+{
+ struct nft_set_gc_batch *gcb;
+
+ gcb = kzalloc(sizeof(*gcb), gfp);
+ if (gcb == NULL)
+ return gcb;
+ gcb->head.set = set;
+ return gcb;
+}
+EXPORT_SYMBOL_GPL(nft_set_gc_batch_alloc);
+
static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq)
{
@@ -3526,6 +3805,10 @@ static void nf_tables_commit_release(struct nft_trans *trans)
case NFT_MSG_DELSET:
nft_set_destroy(nft_trans_set(trans));
break;
+ case NFT_MSG_DELSETELEM:
+ nft_set_elem_destroy(nft_trans_elem_set(trans),
+ nft_trans_elem(trans).priv);
+ break;
}
kfree(trans);
}
@@ -3540,7 +3823,7 @@ static int nf_tables_commit(struct sk_buff *skb)
while (++net->nft.base_seq == 0);
/* A new generation has just started */
- net->nft.gencursor = gencursor_next(net);
+ net->nft.gencursor = nft_gencursor_next(net);
/* Make sure all packets have left the previous generation before
* purging old rules.
@@ -3611,24 +3894,23 @@ static int nf_tables_commit(struct sk_buff *skb)
NFT_MSG_DELSET, GFP_KERNEL);
break;
case NFT_MSG_NEWSETELEM:
- nf_tables_setelem_notify(&trans->ctx,
- nft_trans_elem_set(trans),
- &nft_trans_elem(trans),
+ te = (struct nft_trans_elem *)trans->data;
+
+ te->set->ops->activate(te->set, &te->elem);
+ nf_tables_setelem_notify(&trans->ctx, te->set,
+ &te->elem,
NFT_MSG_NEWSETELEM, 0);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSETELEM:
te = (struct nft_trans_elem *)trans->data;
+
nf_tables_setelem_notify(&trans->ctx, te->set,
&te->elem,
NFT_MSG_DELSETELEM, 0);
- te->set->ops->get(te->set, &te->elem);
- nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
- if (te->set->flags & NFT_SET_MAP &&
- !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_uninit(&te->elem.data, te->set->dtype);
te->set->ops->remove(te->set, &te->elem);
- nft_trans_destroy(trans);
+ atomic_dec(&te->set->nelems);
+ te->set->ndeact--;
break;
}
}
@@ -3660,6 +3942,10 @@ static void nf_tables_abort_release(struct nft_trans *trans)
case NFT_MSG_NEWSET:
nft_set_destroy(nft_trans_set(trans));
break;
+ case NFT_MSG_NEWSETELEM:
+ nft_set_elem_destroy(nft_trans_elem_set(trans),
+ nft_trans_elem(trans).priv);
+ break;
}
kfree(trans);
}
@@ -3728,18 +4014,17 @@ static int nf_tables_abort(struct sk_buff *skb)
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSETELEM:
- nft_trans_elem_set(trans)->nelems--;
te = (struct nft_trans_elem *)trans->data;
- te->set->ops->get(te->set, &te->elem);
- nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
- if (te->set->flags & NFT_SET_MAP &&
- !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_uninit(&te->elem.data, te->set->dtype);
+
te->set->ops->remove(te->set, &te->elem);
- nft_trans_destroy(trans);
+ atomic_dec(&te->set->nelems);
break;
case NFT_MSG_DELSETELEM:
- nft_trans_elem_set(trans)->nelems++;
+ te = (struct nft_trans_elem *)trans->data;
+
+ te->set->ops->activate(te->set, &te->elem);
+ te->set->ndeact--;
+
nft_trans_destroy(trans);
break;
}
@@ -3814,13 +4099,18 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
const struct nft_set_elem *elem)
{
- if (elem->flags & NFT_SET_ELEM_INTERVAL_END)
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ const struct nft_data *data;
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+ *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
return 0;
- switch (elem->data.verdict) {
+ data = nft_set_ext_data(ext);
+ switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
- return nf_tables_check_loops(ctx, elem->data.chain);
+ return nf_tables_check_loops(ctx, data->verdict.chain);
default:
return 0;
}
@@ -3853,10 +4143,11 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
if (data == NULL)
continue;
- switch (data->verdict) {
+ switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
- err = nf_tables_check_loops(ctx, data->chain);
+ err = nf_tables_check_loops(ctx,
+ data->verdict.chain);
if (err < 0)
return err;
default:
@@ -3871,7 +4162,8 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
continue;
list_for_each_entry(binding, &set->bindings, list) {
- if (binding->chain != chain)
+ if (!(binding->flags & NFT_SET_MAP) ||
+ binding->chain != chain)
continue;
iter.skip = 0;
@@ -3889,85 +4181,129 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
}
/**
- * nft_validate_input_register - validate an expressions' input register
+ * nft_parse_register - parse a register value from a netlink attribute
*
- * @reg: the register number
+ * @attr: netlink attribute
*
- * Validate that the input register is one of the general purpose
- * registers.
+ * Parse and translate a register value from a netlink attribute.
+ * Registers used to be 128 bit wide, these register numbers will be
+ * mapped to the corresponding 32 bit register numbers.
*/
-int nft_validate_input_register(enum nft_registers reg)
+unsigned int nft_parse_register(const struct nlattr *attr)
{
- if (reg <= NFT_REG_VERDICT)
- return -EINVAL;
- if (reg > NFT_REG_MAX)
- return -ERANGE;
- return 0;
+ unsigned int reg;
+
+ reg = ntohl(nla_get_be32(attr));
+ switch (reg) {
+ case NFT_REG_VERDICT...NFT_REG_4:
+ return reg * NFT_REG_SIZE / NFT_REG32_SIZE;
+ default:
+ return reg + NFT_REG_SIZE / NFT_REG32_SIZE - NFT_REG32_00;
+ }
}
-EXPORT_SYMBOL_GPL(nft_validate_input_register);
+EXPORT_SYMBOL_GPL(nft_parse_register);
/**
- * nft_validate_output_register - validate an expressions' output register
+ * nft_dump_register - dump a register value to a netlink attribute
+ *
+ * @skb: socket buffer
+ * @attr: attribute number
+ * @reg: register number
+ *
+ * Construct a netlink attribute containing the register number. For
+ * compatibility reasons, register numbers being a multiple of 4 are
+ * translated to the corresponding 128 bit register numbers.
+ */
+int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg)
+{
+ if (reg % (NFT_REG_SIZE / NFT_REG32_SIZE) == 0)
+ reg = reg / (NFT_REG_SIZE / NFT_REG32_SIZE);
+ else
+ reg = reg - NFT_REG_SIZE / NFT_REG32_SIZE + NFT_REG32_00;
+
+ return nla_put_be32(skb, attr, htonl(reg));
+}
+EXPORT_SYMBOL_GPL(nft_dump_register);
+
+/**
+ * nft_validate_register_load - validate a load from a register
*
* @reg: the register number
+ * @len: the length of the data
*
- * Validate that the output register is one of the general purpose
- * registers or the verdict register.
+ * Validate that the input register is one of the general purpose
+ * registers and that the length of the load is within the bounds.
*/
-int nft_validate_output_register(enum nft_registers reg)
+int nft_validate_register_load(enum nft_registers reg, unsigned int len)
{
- if (reg < NFT_REG_VERDICT)
+ if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
return -EINVAL;
- if (reg > NFT_REG_MAX)
+ if (len == 0)
+ return -EINVAL;
+ if (reg * NFT_REG32_SIZE + len > FIELD_SIZEOF(struct nft_regs, data))
return -ERANGE;
+
return 0;
}
-EXPORT_SYMBOL_GPL(nft_validate_output_register);
+EXPORT_SYMBOL_GPL(nft_validate_register_load);
/**
- * nft_validate_data_load - validate an expressions' data load
+ * nft_validate_register_store - validate an expressions' register store
*
* @ctx: context of the expression performing the load
* @reg: the destination register number
* @data: the data to load
* @type: the data type
+ * @len: the length of the data
*
* Validate that a data load uses the appropriate data type for
- * the destination register. A value of NULL for the data means
- * that its runtime gathered data, which is always of type
- * NFT_DATA_VALUE.
+ * the destination register and the length is within the bounds.
+ * A value of NULL for the data means that its runtime gathered
+ * data.
*/
-int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
- const struct nft_data *data,
- enum nft_data_types type)
+int nft_validate_register_store(const struct nft_ctx *ctx,
+ enum nft_registers reg,
+ const struct nft_data *data,
+ enum nft_data_types type, unsigned int len)
{
int err;
switch (reg) {
case NFT_REG_VERDICT:
- if (data == NULL || type != NFT_DATA_VERDICT)
+ if (type != NFT_DATA_VERDICT)
return -EINVAL;
- if (data->verdict == NFT_GOTO || data->verdict == NFT_JUMP) {
- err = nf_tables_check_loops(ctx, data->chain);
+ if (data != NULL &&
+ (data->verdict.code == NFT_GOTO ||
+ data->verdict.code == NFT_JUMP)) {
+ err = nf_tables_check_loops(ctx, data->verdict.chain);
if (err < 0)
return err;
- if (ctx->chain->level + 1 > data->chain->level) {
+ if (ctx->chain->level + 1 >
+ data->verdict.chain->level) {
if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
return -EMLINK;
- data->chain->level = ctx->chain->level + 1;
+ data->verdict.chain->level = ctx->chain->level + 1;
}
}
return 0;
default:
+ if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
+ return -EINVAL;
+ if (len == 0)
+ return -EINVAL;
+ if (reg * NFT_REG32_SIZE + len >
+ FIELD_SIZEOF(struct nft_regs, data))
+ return -ERANGE;
+
if (data != NULL && type != NFT_DATA_VALUE)
return -EINVAL;
return 0;
}
}
-EXPORT_SYMBOL_GPL(nft_validate_data_load);
+EXPORT_SYMBOL_GPL(nft_validate_register_store);
static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
[NFTA_VERDICT_CODE] = { .type = NLA_U32 },
@@ -3988,11 +4324,11 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
if (!tb[NFTA_VERDICT_CODE])
return -EINVAL;
- data->verdict = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+ data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
- switch (data->verdict) {
+ switch (data->verdict.code) {
default:
- switch (data->verdict & NF_VERDICT_MASK) {
+ switch (data->verdict.code & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_DROP:
case NF_QUEUE:
@@ -4004,7 +4340,6 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
case NFT_CONTINUE:
case NFT_BREAK:
case NFT_RETURN:
- desc->len = sizeof(data->verdict);
break;
case NFT_JUMP:
case NFT_GOTO:
@@ -4018,21 +4353,21 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
return -EOPNOTSUPP;
chain->use++;
- data->chain = chain;
- desc->len = sizeof(data);
+ data->verdict.chain = chain;
break;
}
+ desc->len = sizeof(data->verdict);
desc->type = NFT_DATA_VERDICT;
return 0;
}
static void nft_verdict_uninit(const struct nft_data *data)
{
- switch (data->verdict) {
+ switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
- data->chain->use--;
+ data->verdict.chain->use--;
break;
}
}
@@ -4045,13 +4380,14 @@ static int nft_verdict_dump(struct sk_buff *skb, const struct nft_data *data)
if (!nest)
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(data->verdict)))
+ if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(data->verdict.code)))
goto nla_put_failure;
- switch (data->verdict) {
+ switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
- if (nla_put_string(skb, NFTA_VERDICT_CHAIN, data->chain->name))
+ if (nla_put_string(skb, NFTA_VERDICT_CHAIN,
+ data->verdict.chain->name))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
@@ -4061,7 +4397,8 @@ nla_put_failure:
return -1;
}
-static int nft_value_init(const struct nft_ctx *ctx, struct nft_data *data,
+static int nft_value_init(const struct nft_ctx *ctx,
+ struct nft_data *data, unsigned int size,
struct nft_data_desc *desc, const struct nlattr *nla)
{
unsigned int len;
@@ -4069,10 +4406,10 @@ static int nft_value_init(const struct nft_ctx *ctx, struct nft_data *data,
len = nla_len(nla);
if (len == 0)
return -EINVAL;
- if (len > sizeof(data->data))
+ if (len > size)
return -EOVERFLOW;
- nla_memcpy(data->data, nla, sizeof(data->data));
+ nla_memcpy(data->data, nla, len);
desc->type = NFT_DATA_VALUE;
desc->len = len;
return 0;
@@ -4085,8 +4422,7 @@ static int nft_value_dump(struct sk_buff *skb, const struct nft_data *data,
}
static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
- [NFTA_DATA_VALUE] = { .type = NLA_BINARY,
- .len = FIELD_SIZEOF(struct nft_data, data) },
+ [NFTA_DATA_VALUE] = { .type = NLA_BINARY },
[NFTA_DATA_VERDICT] = { .type = NLA_NESTED },
};
@@ -4095,6 +4431,7 @@ static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
*
* @ctx: context of the expression using the data
* @data: destination struct nft_data
+ * @size: maximum data length
* @desc: data description
* @nla: netlink attribute containing data
*
@@ -4104,7 +4441,8 @@ static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
* The caller can indicate that it only wants to accept data of type
* NFT_DATA_VALUE by passing NULL for the ctx argument.
*/
-int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+int nft_data_init(const struct nft_ctx *ctx,
+ struct nft_data *data, unsigned int size,
struct nft_data_desc *desc, const struct nlattr *nla)
{
struct nlattr *tb[NFTA_DATA_MAX + 1];
@@ -4115,7 +4453,8 @@ int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
return err;
if (tb[NFTA_DATA_VALUE])
- return nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]);
+ return nft_value_init(ctx, data, size, desc,
+ tb[NFTA_DATA_VALUE]);
if (tb[NFTA_DATA_VERDICT] && ctx != NULL)
return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
return -EINVAL;
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 2d298dccb6dd..f153b07073af 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -8,6 +8,7 @@
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -21,24 +22,66 @@
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_log.h>
+enum nft_trace {
+ NFT_TRACE_RULE,
+ NFT_TRACE_RETURN,
+ NFT_TRACE_POLICY,
+};
+
+static const char *const comments[] = {
+ [NFT_TRACE_RULE] = "rule",
+ [NFT_TRACE_RETURN] = "return",
+ [NFT_TRACE_POLICY] = "policy",
+};
+
+static struct nf_loginfo trace_loginfo = {
+ .type = NF_LOG_TYPE_LOG,
+ .u = {
+ .log = {
+ .level = LOGLEVEL_WARNING,
+ .logflags = NF_LOG_MASK,
+ },
+ },
+};
+
+static void __nft_trace_packet(const struct nft_pktinfo *pkt,
+ const struct nft_chain *chain,
+ int rulenum, enum nft_trace type)
+{
+ struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+ nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
+ pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
+ chain->table->name, chain->name, comments[type],
+ rulenum);
+}
+
+static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
+ const struct nft_chain *chain,
+ int rulenum, enum nft_trace type)
+{
+ if (unlikely(pkt->skb->nf_trace))
+ __nft_trace_packet(pkt, chain, rulenum, type);
+}
+
static void nft_cmp_fast_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1])
+ struct nft_regs *regs)
{
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
u32 mask = nft_cmp_fast_mask(priv->len);
- if ((data[priv->sreg].data[0] & mask) == priv->data)
+ if ((regs->data[priv->sreg] & mask) == priv->data)
return;
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ regs->verdict.code = NFT_BREAK;
}
static bool nft_payload_fast_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
- struct nft_data *dest = &data[priv->dreg];
+ u32 *dest = &regs->data[priv->dreg];
unsigned char *ptr;
if (priv->base == NFT_PAYLOAD_NETWORK_HEADER)
@@ -51,12 +94,13 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr,
if (unlikely(ptr + priv->len >= skb_tail_pointer(skb)))
return false;
+ *dest = 0;
if (priv->len == 2)
- *(u16 *)dest->data = *(u16 *)ptr;
+ *(u16 *)dest = *(u16 *)ptr;
else if (priv->len == 4)
- *(u32 *)dest->data = *(u32 *)ptr;
+ *(u32 *)dest = *(u32 *)ptr;
else
- *(u8 *)dest->data = *(u8 *)ptr;
+ *(u8 *)dest = *(u8 *)ptr;
return true;
}
@@ -66,62 +110,25 @@ struct nft_jumpstack {
int rulenum;
};
-enum nft_trace {
- NFT_TRACE_RULE,
- NFT_TRACE_RETURN,
- NFT_TRACE_POLICY,
-};
-
-static const char *const comments[] = {
- [NFT_TRACE_RULE] = "rule",
- [NFT_TRACE_RETURN] = "return",
- [NFT_TRACE_POLICY] = "policy",
-};
-
-static struct nf_loginfo trace_loginfo = {
- .type = NF_LOG_TYPE_LOG,
- .u = {
- .log = {
- .level = 4,
- .logflags = NF_LOG_MASK,
- },
- },
-};
-
-static void nft_trace_packet(const struct nft_pktinfo *pkt,
- const struct nft_chain *chain,
- int rulenum, enum nft_trace type)
-{
- struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
-
- nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
- pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
- chain->table->name, chain->name, comments[type],
- rulenum);
-}
-
unsigned int
nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
{
const struct nft_chain *chain = ops->priv, *basechain = chain;
+ const struct net *net = read_pnet(&nft_base_chain(basechain)->pnet);
const struct nft_rule *rule;
const struct nft_expr *expr, *last;
- struct nft_data data[NFT_REG_MAX + 1];
+ struct nft_regs regs;
unsigned int stackptr = 0;
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
struct nft_stats *stats;
int rulenum;
- /*
- * Cache cursor to avoid problems in case that the cursor is updated
- * while traversing the ruleset.
- */
- unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
+ unsigned int gencursor = nft_genmask_cur(net);
do_chain:
rulenum = 0;
rule = list_entry(&chain->rules, struct nft_rule, list);
next_rule:
- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ regs.verdict.code = NFT_CONTINUE;
list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
/* This rule is not active, skip. */
@@ -132,62 +139,52 @@ next_rule:
nft_rule_for_each_expr(expr, last, rule) {
if (expr->ops == &nft_cmp_fast_ops)
- nft_cmp_fast_eval(expr, data);
+ nft_cmp_fast_eval(expr, &regs);
else if (expr->ops != &nft_payload_fast_ops ||
- !nft_payload_fast_eval(expr, data, pkt))
- expr->ops->eval(expr, data, pkt);
+ !nft_payload_fast_eval(expr, &regs, pkt))
+ expr->ops->eval(expr, &regs, pkt);
- if (data[NFT_REG_VERDICT].verdict != NFT_CONTINUE)
+ if (regs.verdict.code != NFT_CONTINUE)
break;
}
- switch (data[NFT_REG_VERDICT].verdict) {
+ switch (regs.verdict.code) {
case NFT_BREAK:
- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ regs.verdict.code = NFT_CONTINUE;
continue;
case NFT_CONTINUE:
- if (unlikely(pkt->skb->nf_trace))
- nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+ nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
continue;
}
break;
}
- switch (data[NFT_REG_VERDICT].verdict & NF_VERDICT_MASK) {
+ switch (regs.verdict.code & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_DROP:
case NF_QUEUE:
- if (unlikely(pkt->skb->nf_trace))
- nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
-
- return data[NFT_REG_VERDICT].verdict;
+ nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+ return regs.verdict.code;
}
- switch (data[NFT_REG_VERDICT].verdict) {
+ switch (regs.verdict.code) {
case NFT_JUMP:
- if (unlikely(pkt->skb->nf_trace))
- nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
-
BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
jumpstack[stackptr].chain = chain;
jumpstack[stackptr].rule = rule;
jumpstack[stackptr].rulenum = rulenum;
stackptr++;
- chain = data[NFT_REG_VERDICT].chain;
- goto do_chain;
+ /* fall through */
case NFT_GOTO:
- if (unlikely(pkt->skb->nf_trace))
- nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+ nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
- chain = data[NFT_REG_VERDICT].chain;
+ chain = regs.verdict.chain;
goto do_chain;
- case NFT_RETURN:
- if (unlikely(pkt->skb->nf_trace))
- nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
- break;
case NFT_CONTINUE:
- if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
- nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
+ rulenum++;
+ /* fall through */
+ case NFT_RETURN:
+ nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
break;
default:
WARN_ON(1);
@@ -201,8 +198,7 @@ next_rule:
goto next_rule;
}
- if (unlikely(pkt->skb->nf_trace))
- nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
+ nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
rcu_read_lock_bh();
stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
@@ -244,8 +240,14 @@ int __init nf_tables_core_module_init(void)
if (err < 0)
goto err6;
+ err = nft_dynset_module_init();
+ if (err < 0)
+ goto err7;
+
return 0;
+err7:
+ nft_payload_module_exit();
err6:
nft_byteorder_module_exit();
err5:
@@ -262,6 +264,7 @@ err1:
void nf_tables_core_module_exit(void)
{
+ nft_dynset_module_exit();
nft_payload_module_exit();
nft_byteorder_module_exit();
nft_bitwise_module_exit();
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 11d85b3813f2..3ad91266c821 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -23,6 +23,7 @@
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
+#include <linux/netfilter_bridge.h>
#include <net/netlink.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_log.h>
@@ -62,7 +63,7 @@ struct nfulnl_instance {
struct timer_list timer;
struct net *net;
struct user_namespace *peer_user_ns; /* User namespace of the peer process */
- int peer_portid; /* PORTID of the peer process */
+ u32 peer_portid; /* PORTID of the peer process */
/* configurable parameters */
unsigned int flushtimeout; /* timeout until queue flush */
@@ -151,7 +152,7 @@ static void nfulnl_timer(unsigned long data);
static struct nfulnl_instance *
instance_create(struct net *net, u_int16_t group_num,
- int portid, struct user_namespace *user_ns)
+ u32 portid, struct user_namespace *user_ns)
{
struct nfulnl_instance *inst;
struct nfnl_log_net *log = nfnl_log_pernet(net);
@@ -448,14 +449,18 @@ __build_packet_message(struct nfnl_log_net *log,
htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
+ struct net_device *physindev;
+
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
htonl(indev->ifindex)))
goto nla_put_failure;
- if (skb->nf_bridge && skb->nf_bridge->physindev &&
+
+ physindev = nf_bridge_get_physindev(skb);
+ if (physindev &&
nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
- htonl(skb->nf_bridge->physindev->ifindex)))
+ htonl(physindev->ifindex)))
goto nla_put_failure;
}
#endif
@@ -479,14 +484,18 @@ __build_packet_message(struct nfnl_log_net *log,
htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
+ struct net_device *physoutdev;
+
/* Case 2: indev is a bridge group, we need to look
* for physical device (when called from ipv4) */
if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
htonl(outdev->ifindex)))
goto nla_put_failure;
- if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
+
+ physoutdev = nf_bridge_get_physoutdev(skb);
+ if (physoutdev &&
nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
- htonl(skb->nf_bridge->physoutdev->ifindex)))
+ htonl(physoutdev->ifindex)))
goto nla_put_failure;
}
#endif
@@ -539,7 +548,7 @@ __build_packet_message(struct nfnl_log_net *log,
/* UID */
sk = skb->sk;
- if (sk && sk->sk_state != TCP_TIME_WAIT) {
+ if (sk && sk_fullsock(sk)) {
read_lock_bh(&sk->sk_callback_lock);
if (sk->sk_socket && sk->sk_socket->file) {
struct file *file = sk->sk_socket->file;
@@ -998,11 +1007,13 @@ static int seq_show(struct seq_file *s, void *v)
{
const struct nfulnl_instance *inst = v;
- return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
- inst->group_num,
- inst->peer_portid, inst->qlen,
- inst->copy_mode, inst->copy_range,
- inst->flushtimeout, atomic_read(&inst->use));
+ seq_printf(s, "%5u %6u %5u %1u %5u %6u %2u\n",
+ inst->group_num,
+ inst->peer_portid, inst->qlen,
+ inst->copy_mode, inst->copy_range,
+ inst->flushtimeout, atomic_read(&inst->use));
+
+ return 0;
}
static const struct seq_operations nful_seq_ops = {
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 0db8515e76da..0b98c7420239 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -25,6 +25,7 @@
#include <linux/proc_fs.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_bridge.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_queue.h>
#include <linux/list.h>
@@ -54,7 +55,7 @@ struct nfqnl_instance {
struct hlist_node hlist; /* global list of queues */
struct rcu_head rcu;
- int peer_portid;
+ u32 peer_portid;
unsigned int queue_maxlen;
unsigned int copy_range;
unsigned int queue_dropped;
@@ -109,8 +110,7 @@ instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
}
static struct nfqnl_instance *
-instance_create(struct nfnl_queue_net *q, u_int16_t queue_num,
- int portid)
+instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
{
struct nfqnl_instance *inst;
unsigned int h;
@@ -257,7 +257,7 @@ static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
{
const struct cred *cred;
- if (sk->sk_state == TCP_TIME_WAIT)
+ if (!sk_fullsock(sk))
return 0;
read_lock_bh(&sk->sk_callback_lock);
@@ -314,13 +314,13 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (entskb->tstamp.tv64)
size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
- if (entry->hook <= NF_INET_FORWARD ||
- (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
+ if (entry->state.hook <= NF_INET_FORWARD ||
+ (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
csum_verify = !skb_csum_unnecessary(entskb);
else
csum_verify = false;
- outdev = entry->outdev;
+ outdev = entry->state.out;
switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
case NFQNL_COPY_META:
@@ -368,23 +368,23 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
return NULL;
}
nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = entry->pf;
+ nfmsg->nfgen_family = entry->state.pf;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(queue->queue_num);
nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
pmsg = nla_data(nla);
pmsg->hw_protocol = entskb->protocol;
- pmsg->hook = entry->hook;
+ pmsg->hook = entry->state.hook;
*packet_id_ptr = &pmsg->packet_id;
- indev = entry->indev;
+ indev = entry->state.in;
if (indev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
goto nla_put_failure;
#else
- if (entry->pf == PF_BRIDGE) {
+ if (entry->state.pf == PF_BRIDGE) {
/* Case 1: indev is physical input device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
@@ -396,14 +396,18 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
+ int physinif;
+
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
htonl(indev->ifindex)))
goto nla_put_failure;
- if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
+
+ physinif = nf_bridge_get_physinif(entskb);
+ if (physinif &&
nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
- htonl(entskb->nf_bridge->physindev->ifindex)))
+ htonl(physinif)))
goto nla_put_failure;
}
#endif
@@ -414,7 +418,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
goto nla_put_failure;
#else
- if (entry->pf == PF_BRIDGE) {
+ if (entry->state.pf == PF_BRIDGE) {
/* Case 1: outdev is physical output device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
@@ -426,14 +430,18 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
+ int physoutif;
+
/* Case 2: outdev is bridge group, we need to look for
* physical output device (when called from ipv4) */
if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
htonl(outdev->ifindex)))
goto nla_put_failure;
- if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
+
+ physoutif = nf_bridge_get_physoutif(entskb);
+ if (physoutif &&
nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
- htonl(entskb->nf_bridge->physoutdev->ifindex)))
+ htonl(physoutif)))
goto nla_put_failure;
}
#endif
@@ -633,8 +641,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
struct nfqnl_instance *queue;
struct sk_buff *skb, *segs;
int err = -ENOBUFS;
- struct net *net = dev_net(entry->indev ?
- entry->indev : entry->outdev);
+ struct net *net = dev_net(entry->state.in ?
+ entry->state.in : entry->state.out);
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
/* rcu_read_lock()ed by nf_hook_slow() */
@@ -647,7 +655,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
skb = entry->skb;
- switch (entry->pf) {
+ switch (entry->state.pf) {
case NFPROTO_IPV4:
skb->protocol = htons(ETH_P_IP);
break;
@@ -757,19 +765,20 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
static int
dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
{
- if (entry->indev)
- if (entry->indev->ifindex == ifindex)
+ if (entry->state.in)
+ if (entry->state.in->ifindex == ifindex)
return 1;
- if (entry->outdev)
- if (entry->outdev->ifindex == ifindex)
+ if (entry->state.out)
+ if (entry->state.out->ifindex == ifindex)
return 1;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (entry->skb->nf_bridge) {
- if (entry->skb->nf_bridge->physindev &&
- entry->skb->nf_bridge->physindev->ifindex == ifindex)
- return 1;
- if (entry->skb->nf_bridge->physoutdev &&
- entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
+ int physinif, physoutif;
+
+ physinif = nf_bridge_get_physinif(entry->skb);
+ physoutif = nf_bridge_get_physoutif(entry->skb);
+
+ if (physinif == ifindex || physoutif == ifindex)
return 1;
}
#endif
@@ -860,7 +869,7 @@ static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
};
static struct nfqnl_instance *
-verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, int nlportid)
+verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
{
struct nfqnl_instance *queue;
@@ -1242,7 +1251,7 @@ static int seq_show(struct seq_file *s, void *v)
{
const struct nfqnl_instance *inst = v;
- seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
+ seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
inst->queue_num,
inst->peer_portid, inst->queue_total,
inst->copy_mode, inst->copy_range,
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 4fb6ee2c1106..d71cc18fa35d 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -26,18 +26,16 @@ struct nft_bitwise {
};
static void nft_bitwise_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
- const struct nft_data *src = &data[priv->sreg];
- struct nft_data *dst = &data[priv->dreg];
+ const u32 *src = &regs->data[priv->sreg];
+ u32 *dst = &regs->data[priv->dreg];
unsigned int i;
- for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++) {
- dst->data[i] = (src->data[i] & priv->mask.data[i]) ^
- priv->xor.data[i];
- }
+ for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++)
+ dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i];
}
static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
@@ -63,28 +61,27 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
tb[NFTA_BITWISE_XOR] == NULL)
return -EINVAL;
- priv->sreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_SREG]));
- err = nft_validate_input_register(priv->sreg);
+ priv->len = ntohl(nla_get_be32(tb[NFTA_BITWISE_LEN]));
+ priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
+ err = nft_validate_register_load(priv->sreg, priv->len);
if (err < 0)
return err;
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_DREG]));
- err = nft_validate_output_register(priv->dreg);
+ priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
+ err = nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, priv->len);
if (err < 0)
return err;
- err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
- if (err < 0)
- return err;
-
- priv->len = ntohl(nla_get_be32(tb[NFTA_BITWISE_LEN]));
- err = nft_data_init(NULL, &priv->mask, &d1, tb[NFTA_BITWISE_MASK]);
+ err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &d1,
+ tb[NFTA_BITWISE_MASK]);
if (err < 0)
return err;
if (d1.len != priv->len)
return -EINVAL;
- err = nft_data_init(NULL, &priv->xor, &d2, tb[NFTA_BITWISE_XOR]);
+ err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2,
+ tb[NFTA_BITWISE_XOR]);
if (err < 0)
return err;
if (d2.len != priv->len)
@@ -97,9 +94,9 @@ static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_BITWISE_SREG, htonl(priv->sreg)))
+ if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_BITWISE_DREG, htonl(priv->dreg)))
+ if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
goto nla_put_failure;
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
index c39ed8d29df1..fde5145f2e36 100644
--- a/net/netfilter/nft_byteorder.c
+++ b/net/netfilter/nft_byteorder.c
@@ -26,16 +26,17 @@ struct nft_byteorder {
};
static void nft_byteorder_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_byteorder *priv = nft_expr_priv(expr);
- struct nft_data *src = &data[priv->sreg], *dst = &data[priv->dreg];
+ u32 *src = &regs->data[priv->sreg];
+ u32 *dst = &regs->data[priv->dreg];
union { u32 u32; u16 u16; } *s, *d;
unsigned int i;
- s = (void *)src->data;
- d = (void *)dst->data;
+ s = (void *)src;
+ d = (void *)dst;
switch (priv->size) {
case 4:
@@ -87,19 +88,6 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
tb[NFTA_BYTEORDER_OP] == NULL)
return -EINVAL;
- priv->sreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SREG]));
- err = nft_validate_input_register(priv->sreg);
- if (err < 0)
- return err;
-
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_DREG]));
- err = nft_validate_output_register(priv->dreg);
- if (err < 0)
- return err;
- err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
- if (err < 0)
- return err;
-
priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP]));
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
@@ -109,10 +97,6 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
return -EINVAL;
}
- priv->len = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_LEN]));
- if (priv->len == 0 || priv->len > FIELD_SIZEOF(struct nft_data, data))
- return -EINVAL;
-
priv->size = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SIZE]));
switch (priv->size) {
case 2:
@@ -122,16 +106,24 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
return -EINVAL;
}
- return 0;
+ priv->sreg = nft_parse_register(tb[NFTA_BYTEORDER_SREG]);
+ priv->len = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_LEN]));
+ err = nft_validate_register_load(priv->sreg, priv->len);
+ if (err < 0)
+ return err;
+
+ priv->dreg = nft_parse_register(tb[NFTA_BYTEORDER_DREG]);
+ return nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, priv->len);
}
static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_byteorder *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_BYTEORDER_SREG, htonl(priv->sreg)))
+ if (nft_dump_register(skb, NFTA_BYTEORDER_SREG, priv->sreg))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_BYTEORDER_DREG, htonl(priv->dreg)))
+ if (nft_dump_register(skb, NFTA_BYTEORDER_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_BYTEORDER_OP, htonl(priv->op)))
goto nla_put_failure;
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index e2b3f51c81f1..e25b35d70e4d 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -25,13 +25,13 @@ struct nft_cmp_expr {
};
static void nft_cmp_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_cmp_expr *priv = nft_expr_priv(expr);
int d;
- d = nft_data_cmp(&data[priv->sreg], &priv->data, priv->len);
+ d = memcmp(&regs->data[priv->sreg], &priv->data, priv->len);
switch (priv->op) {
case NFT_CMP_EQ:
if (d != 0)
@@ -59,7 +59,7 @@ static void nft_cmp_eval(const struct nft_expr *expr,
return;
mismatch:
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
@@ -75,12 +75,16 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
struct nft_data_desc desc;
int err;
- priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
- priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
-
- err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
+ err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
+ tb[NFTA_CMP_DATA]);
BUG_ON(err < 0);
+ priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
+ err = nft_validate_register_load(priv->sreg, desc.len);
+ if (err < 0)
+ return err;
+
+ priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
priv->len = desc.len;
return 0;
}
@@ -89,7 +93,7 @@ static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_cmp_expr *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+ if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
goto nla_put_failure;
@@ -122,13 +126,18 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
u32 mask;
int err;
- priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
-
- err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+ err = nft_data_init(NULL, &data, sizeof(data), &desc,
+ tb[NFTA_CMP_DATA]);
BUG_ON(err < 0);
- desc.len *= BITS_PER_BYTE;
+ priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
+ err = nft_validate_register_load(priv->sreg, desc.len);
+ if (err < 0)
+ return err;
+
+ desc.len *= BITS_PER_BYTE;
mask = nft_cmp_fast_mask(desc.len);
+
priv->data = data.data[0] & mask;
priv->len = desc.len;
return 0;
@@ -139,7 +148,7 @@ static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
struct nft_data data;
- if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+ if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CMP_OP, htonl(NFT_CMP_EQ)))
goto nla_put_failure;
@@ -167,7 +176,6 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
{
struct nft_data_desc desc;
struct nft_data data;
- enum nft_registers sreg;
enum nft_cmp_ops op;
int err;
@@ -176,11 +184,6 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
tb[NFTA_CMP_DATA] == NULL)
return ERR_PTR(-EINVAL);
- sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
- err = nft_validate_input_register(sreg);
- if (err < 0)
- return ERR_PTR(err);
-
op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
switch (op) {
case NFT_CMP_EQ:
@@ -194,7 +197,8 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
return ERR_PTR(-EINVAL);
}
- err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+ err = nft_data_init(NULL, &data, sizeof(data), &desc,
+ tb[NFTA_CMP_DATA]);
if (err < 0)
return ERR_PTR(err);
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 65f3e2b6be44..7f29cfc76349 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -20,6 +20,7 @@
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_arp/arp_tables.h>
#include <net/netfilter/nf_tables.h>
static int nft_compat_chain_validate_dependency(const char *tablename,
@@ -42,6 +43,7 @@ union nft_entry {
struct ipt_entry e4;
struct ip6t_entry e6;
struct ebt_entry ebt;
+ struct arpt_entry arp;
};
static inline void
@@ -53,7 +55,7 @@ nft_compat_set_par(struct xt_action_param *par, void *xt, const void *xt_info)
}
static void nft_target_eval_xt(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
void *info = nft_expr_priv(expr);
@@ -70,16 +72,16 @@ static void nft_target_eval_xt(const struct nft_expr *expr,
switch (ret) {
case XT_CONTINUE:
- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ regs->verdict.code = NFT_CONTINUE;
break;
default:
- data[NFT_REG_VERDICT].verdict = ret;
+ regs->verdict.code = ret;
break;
}
}
static void nft_target_eval_bridge(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
void *info = nft_expr_priv(expr);
@@ -96,19 +98,19 @@ static void nft_target_eval_bridge(const struct nft_expr *expr,
switch (ret) {
case EBT_ACCEPT:
- data[NFT_REG_VERDICT].verdict = NF_ACCEPT;
+ regs->verdict.code = NF_ACCEPT;
break;
case EBT_DROP:
- data[NFT_REG_VERDICT].verdict = NF_DROP;
+ regs->verdict.code = NF_DROP;
break;
case EBT_CONTINUE:
- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ regs->verdict.code = NFT_CONTINUE;
break;
case EBT_RETURN:
- data[NFT_REG_VERDICT].verdict = NFT_RETURN;
+ regs->verdict.code = NFT_RETURN;
break;
default:
- data[NFT_REG_VERDICT].verdict = ret;
+ regs->verdict.code = ret;
break;
}
}
@@ -143,6 +145,8 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
entry->ebt.ethproto = (__force __be16)proto;
entry->ebt.invflags = inv ? EBT_IPROTO : 0;
break;
+ case NFPROTO_ARP:
+ break;
}
par->entryinfo = entry;
par->target = target;
@@ -300,7 +304,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
}
static void nft_match_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
void *info = nft_expr_priv(expr);
@@ -313,16 +317,16 @@ static void nft_match_eval(const struct nft_expr *expr,
ret = match->match(skb, (struct xt_action_param *)&pkt->xt);
if (pkt->xt.hotdrop) {
- data[NFT_REG_VERDICT].verdict = NF_DROP;
+ regs->verdict.code = NF_DROP;
return;
}
- switch(ret) {
- case true:
- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ switch (ret ? 1 : 0) {
+ case 1:
+ regs->verdict.code = NFT_CONTINUE;
break;
- case false:
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ case 0:
+ regs->verdict.code = NFT_BREAK;
break;
}
}
@@ -357,6 +361,8 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
entry->ebt.ethproto = (__force __be16)proto;
entry->ebt.invflags = inv ? EBT_IPROTO : 0;
break;
+ case NFPROTO_ARP:
+ break;
}
par->entryinfo = entry;
par->match = match;
@@ -543,6 +549,9 @@ nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb,
case NFPROTO_BRIDGE:
fmt = "ebt_%s";
break;
+ case NFPROTO_ARP:
+ fmt = "arpt_%s";
+ break;
default:
pr_err("nft_compat: unsupported protocol %d\n",
nfmsg->nfgen_family);
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index c89ee486ce54..17591239229f 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -24,7 +24,7 @@ struct nft_counter {
};
static void nft_counter_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_counter *priv = nft_expr_priv(expr);
@@ -92,6 +92,7 @@ static struct nft_expr_type nft_counter_type __read_mostly = {
.ops = &nft_counter_ops,
.policy = nft_counter_policy,
.maxattr = NFTA_COUNTER_MAX,
+ .flags = NFT_EXPR_STATEFUL,
.owner = THIS_MODULE,
};
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index cc5603016242..8cbca3432f90 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -31,11 +31,11 @@ struct nft_ct {
};
static void nft_ct_get_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct *priv = nft_expr_priv(expr);
- struct nft_data *dest = &data[priv->dreg];
+ u32 *dest = &regs->data[priv->dreg];
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
const struct nf_conn_help *help;
@@ -54,8 +54,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
state = NF_CT_STATE_UNTRACKED_BIT;
else
state = NF_CT_STATE_BIT(ctinfo);
- dest->data[0] = state;
+ *dest = state;
return;
+ default:
+ break;
}
if (ct == NULL)
@@ -63,26 +65,26 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
switch (priv->key) {
case NFT_CT_DIRECTION:
- dest->data[0] = CTINFO2DIR(ctinfo);
+ *dest = CTINFO2DIR(ctinfo);
return;
case NFT_CT_STATUS:
- dest->data[0] = ct->status;
+ *dest = ct->status;
return;
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
- dest->data[0] = ct->mark;
+ *dest = ct->mark;
return;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
case NFT_CT_SECMARK:
- dest->data[0] = ct->secmark;
+ *dest = ct->secmark;
return;
#endif
case NFT_CT_EXPIRATION:
diff = (long)jiffies - (long)ct->timeout.expires;
if (diff < 0)
diff = 0;
- dest->data[0] = jiffies_to_msecs(diff);
+ *dest = jiffies_to_msecs(diff);
return;
case NFT_CT_HELPER:
if (ct->master == NULL)
@@ -93,9 +95,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
helper = rcu_dereference(help->helper);
if (helper == NULL)
goto err;
- if (strlen(helper->name) >= sizeof(dest->data))
- goto err;
- strncpy((char *)dest->data, helper->name, sizeof(dest->data));
+ strncpy((char *)dest, helper->name, NF_CT_HELPER_NAME_LEN);
return;
#ifdef CONFIG_NF_CONNTRACK_LABELS
case NFT_CT_LABELS: {
@@ -103,58 +103,60 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
unsigned int size;
if (!labels) {
- memset(dest->data, 0, sizeof(dest->data));
+ memset(dest, 0, NF_CT_LABELS_MAX_SIZE);
return;
}
- BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE > sizeof(dest->data));
size = labels->words * sizeof(long);
-
- memcpy(dest->data, labels->bits, size);
- if (size < sizeof(dest->data))
- memset(((char *) dest->data) + size, 0,
- sizeof(dest->data) - size);
+ memcpy(dest, labels->bits, size);
+ if (size < NF_CT_LABELS_MAX_SIZE)
+ memset(((char *) dest) + size, 0,
+ NF_CT_LABELS_MAX_SIZE - size);
return;
}
#endif
+ default:
+ break;
}
tuple = &ct->tuplehash[priv->dir].tuple;
switch (priv->key) {
case NFT_CT_L3PROTOCOL:
- dest->data[0] = nf_ct_l3num(ct);
+ *dest = nf_ct_l3num(ct);
return;
case NFT_CT_SRC:
- memcpy(dest->data, tuple->src.u3.all,
+ memcpy(dest, tuple->src.u3.all,
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
return;
case NFT_CT_DST:
- memcpy(dest->data, tuple->dst.u3.all,
+ memcpy(dest, tuple->dst.u3.all,
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
return;
case NFT_CT_PROTOCOL:
- dest->data[0] = nf_ct_protonum(ct);
+ *dest = nf_ct_protonum(ct);
return;
case NFT_CT_PROTO_SRC:
- dest->data[0] = (__force __u16)tuple->src.u.all;
+ *dest = (__force __u16)tuple->src.u.all;
return;
case NFT_CT_PROTO_DST:
- dest->data[0] = (__force __u16)tuple->dst.u.all;
+ *dest = (__force __u16)tuple->dst.u.all;
return;
+ default:
+ break;
}
return;
err:
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ regs->verdict.code = NFT_BREAK;
}
static void nft_ct_set_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
#ifdef CONFIG_NF_CONNTRACK_MARK
- u32 value = data[priv->sreg].data[0];
+ u32 value = regs->data[priv->sreg];
#endif
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
@@ -172,6 +174,8 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
}
break;
#endif
+ default:
+ break;
}
}
@@ -220,12 +224,17 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_ct *priv = nft_expr_priv(expr);
+ unsigned int len;
int err;
priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
switch (priv->key) {
- case NFT_CT_STATE:
case NFT_CT_DIRECTION:
+ if (tb[NFTA_CT_DIRECTION] != NULL)
+ return -EINVAL;
+ len = sizeof(u8);
+ break;
+ case NFT_CT_STATE:
case NFT_CT_STATUS:
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
@@ -233,22 +242,54 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
#ifdef CONFIG_NF_CONNTRACK_SECMARK
case NFT_CT_SECMARK:
#endif
+ case NFT_CT_EXPIRATION:
+ if (tb[NFTA_CT_DIRECTION] != NULL)
+ return -EINVAL;
+ len = sizeof(u32);
+ break;
#ifdef CONFIG_NF_CONNTRACK_LABELS
case NFT_CT_LABELS:
+ if (tb[NFTA_CT_DIRECTION] != NULL)
+ return -EINVAL;
+ len = NF_CT_LABELS_MAX_SIZE;
+ break;
#endif
- case NFT_CT_EXPIRATION:
case NFT_CT_HELPER:
if (tb[NFTA_CT_DIRECTION] != NULL)
return -EINVAL;
+ len = NF_CT_HELPER_NAME_LEN;
break;
+
case NFT_CT_L3PROTOCOL:
case NFT_CT_PROTOCOL:
+ if (tb[NFTA_CT_DIRECTION] == NULL)
+ return -EINVAL;
+ len = sizeof(u8);
+ break;
case NFT_CT_SRC:
case NFT_CT_DST:
+ if (tb[NFTA_CT_DIRECTION] == NULL)
+ return -EINVAL;
+
+ switch (ctx->afi->family) {
+ case NFPROTO_IPV4:
+ len = FIELD_SIZEOF(struct nf_conntrack_tuple,
+ src.u3.ip);
+ break;
+ case NFPROTO_IPV6:
+ case NFPROTO_INET:
+ len = FIELD_SIZEOF(struct nf_conntrack_tuple,
+ src.u3.ip6);
+ break;
+ default:
+ return -EAFNOSUPPORT;
+ }
+ break;
case NFT_CT_PROTO_SRC:
case NFT_CT_PROTO_DST:
if (tb[NFTA_CT_DIRECTION] == NULL)
return -EINVAL;
+ len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u.all);
break;
default:
return -EOPNOTSUPP;
@@ -265,12 +306,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
}
}
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
- err = nft_validate_output_register(priv->dreg);
- if (err < 0)
- return err;
-
- err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+ priv->dreg = nft_parse_register(tb[NFTA_CT_DREG]);
+ err = nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, len);
if (err < 0)
return err;
@@ -286,20 +324,22 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_ct *priv = nft_expr_priv(expr);
+ unsigned int len;
int err;
priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
switch (priv->key) {
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
+ len = FIELD_SIZEOF(struct nf_conn, mark);
break;
#endif
default:
return -EOPNOTSUPP;
}
- priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
- err = nft_validate_input_register(priv->sreg);
+ priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]);
+ err = nft_validate_register_load(priv->sreg, len);
if (err < 0)
return err;
@@ -320,7 +360,7 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_ct *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_CT_DREG, htonl(priv->dreg)))
+ if (nft_dump_register(skb, NFTA_CT_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
goto nla_put_failure;
@@ -347,7 +387,7 @@ static int nft_ct_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_ct *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_CT_SREG, htonl(priv->sreg)))
+ if (nft_dump_register(skb, NFTA_CT_SREG, priv->sreg))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
goto nla_put_failure;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
new file mode 100644
index 000000000000..513a8ef60a59
--- /dev/null
+++ b/net/netfilter/nft_dynset.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2015 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+
+struct nft_dynset {
+ struct nft_set *set;
+ struct nft_set_ext_tmpl tmpl;
+ enum nft_dynset_ops op:8;
+ enum nft_registers sreg_key:8;
+ enum nft_registers sreg_data:8;
+ u64 timeout;
+ struct nft_expr *expr;
+ struct nft_set_binding binding;
+};
+
+static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
+ struct nft_regs *regs)
+{
+ const struct nft_dynset *priv = nft_expr_priv(expr);
+ struct nft_set_ext *ext;
+ u64 timeout;
+ void *elem;
+
+ if (set->size && !atomic_add_unless(&set->nelems, 1, set->size))
+ return NULL;
+
+ timeout = priv->timeout ? : set->timeout;
+ elem = nft_set_elem_init(set, &priv->tmpl,
+ &regs->data[priv->sreg_key],
+ &regs->data[priv->sreg_data],
+ timeout, GFP_ATOMIC);
+ if (elem == NULL) {
+ if (set->size)
+ atomic_dec(&set->nelems);
+ return NULL;
+ }
+
+ ext = nft_set_elem_ext(set, elem);
+ if (priv->expr != NULL)
+ nft_expr_clone(nft_set_ext_expr(ext), priv->expr);
+
+ return elem;
+}
+
+static void nft_dynset_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_dynset *priv = nft_expr_priv(expr);
+ struct nft_set *set = priv->set;
+ const struct nft_set_ext *ext;
+ const struct nft_expr *sexpr;
+ u64 timeout;
+
+ if (set->ops->update(set, &regs->data[priv->sreg_key], nft_dynset_new,
+ expr, regs, &ext)) {
+ sexpr = NULL;
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
+ sexpr = nft_set_ext_expr(ext);
+
+ if (priv->op == NFT_DYNSET_OP_UPDATE &&
+ nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
+ timeout = priv->timeout ? : set->timeout;
+ *nft_set_ext_expiration(ext) = jiffies + timeout;
+ } else if (sexpr == NULL)
+ goto out;
+
+ if (sexpr != NULL)
+ sexpr->ops->eval(sexpr, regs, pkt);
+ return;
+ }
+out:
+ regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
+ [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING },
+ [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 },
+ [NFTA_DYNSET_OP] = { .type = NLA_U32 },
+ [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 },
+ [NFTA_DYNSET_SREG_DATA] = { .type = NLA_U32 },
+ [NFTA_DYNSET_TIMEOUT] = { .type = NLA_U64 },
+ [NFTA_DYNSET_EXPR] = { .type = NLA_NESTED },
+};
+
+static int nft_dynset_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_dynset *priv = nft_expr_priv(expr);
+ struct nft_set *set;
+ u64 timeout;
+ int err;
+
+ if (tb[NFTA_DYNSET_SET_NAME] == NULL ||
+ tb[NFTA_DYNSET_OP] == NULL ||
+ tb[NFTA_DYNSET_SREG_KEY] == NULL)
+ return -EINVAL;
+
+ set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME]);
+ if (IS_ERR(set)) {
+ if (tb[NFTA_DYNSET_SET_ID])
+ set = nf_tables_set_lookup_byid(ctx->net,
+ tb[NFTA_DYNSET_SET_ID]);
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+ }
+
+ if (set->flags & NFT_SET_CONSTANT)
+ return -EBUSY;
+
+ priv->op = ntohl(nla_get_be32(tb[NFTA_DYNSET_OP]));
+ switch (priv->op) {
+ case NFT_DYNSET_OP_ADD:
+ break;
+ case NFT_DYNSET_OP_UPDATE:
+ if (!(set->flags & NFT_SET_TIMEOUT))
+ return -EOPNOTSUPP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ timeout = 0;
+ if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
+ if (!(set->flags & NFT_SET_TIMEOUT))
+ return -EINVAL;
+ timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
+ }
+
+ priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
+ err = nft_validate_register_load(priv->sreg_key, set->klen);;
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_DYNSET_SREG_DATA] != NULL) {
+ if (!(set->flags & NFT_SET_MAP))
+ return -EINVAL;
+ if (set->dtype == NFT_DATA_VERDICT)
+ return -EOPNOTSUPP;
+
+ priv->sreg_data = nft_parse_register(tb[NFTA_DYNSET_SREG_DATA]);
+ err = nft_validate_register_load(priv->sreg_data, set->dlen);
+ if (err < 0)
+ return err;
+ } else if (set->flags & NFT_SET_MAP)
+ return -EINVAL;
+
+ if (tb[NFTA_DYNSET_EXPR] != NULL) {
+ if (!(set->flags & NFT_SET_EVAL))
+ return -EINVAL;
+ if (!(set->flags & NFT_SET_ANONYMOUS))
+ return -EOPNOTSUPP;
+
+ priv->expr = nft_expr_init(ctx, tb[NFTA_DYNSET_EXPR]);
+ if (IS_ERR(priv->expr))
+ return PTR_ERR(priv->expr);
+
+ err = -EOPNOTSUPP;
+ if (!(priv->expr->ops->type->flags & NFT_EXPR_STATEFUL))
+ goto err1;
+ } else if (set->flags & NFT_SET_EVAL)
+ return -EINVAL;
+
+ nft_set_ext_prepare(&priv->tmpl);
+ nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen);
+ if (set->flags & NFT_SET_MAP)
+ nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_DATA, set->dlen);
+ if (priv->expr != NULL)
+ nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR,
+ priv->expr->ops->size);
+ if (set->flags & NFT_SET_TIMEOUT) {
+ if (timeout || set->timeout)
+ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
+ }
+
+ priv->timeout = timeout;
+
+ err = nf_tables_bind_set(ctx, set, &priv->binding);
+ if (err < 0)
+ goto err1;
+
+ priv->set = set;
+ return 0;
+
+err1:
+ if (priv->expr != NULL)
+ nft_expr_destroy(ctx, priv->expr);
+ return err;
+}
+
+static void nft_dynset_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_dynset *priv = nft_expr_priv(expr);
+
+ nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+ if (priv->expr != NULL)
+ nft_expr_destroy(ctx, priv->expr);
+}
+
+static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_dynset *priv = nft_expr_priv(expr);
+
+ if (nft_dump_register(skb, NFTA_DYNSET_SREG_KEY, priv->sreg_key))
+ goto nla_put_failure;
+ if (priv->set->flags & NFT_SET_MAP &&
+ nft_dump_register(skb, NFTA_DYNSET_SREG_DATA, priv->sreg_data))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_DYNSET_OP, htonl(priv->op)))
+ goto nla_put_failure;
+ if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
+ goto nla_put_failure;
+ if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout)))
+ goto nla_put_failure;
+ if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_dynset_type;
+static const struct nft_expr_ops nft_dynset_ops = {
+ .type = &nft_dynset_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_dynset)),
+ .eval = nft_dynset_eval,
+ .init = nft_dynset_init,
+ .destroy = nft_dynset_destroy,
+ .dump = nft_dynset_dump,
+};
+
+static struct nft_expr_type nft_dynset_type __read_mostly = {
+ .name = "dynset",
+ .ops = &nft_dynset_ops,
+ .policy = nft_dynset_policy,
+ .maxattr = NFTA_DYNSET_MAX,
+ .owner = THIS_MODULE,
+};
+
+int __init nft_dynset_module_init(void)
+{
+ return nft_register_expr(&nft_dynset_type);
+}
+
+void nft_dynset_module_exit(void)
+{
+ nft_unregister_expr(&nft_dynset_type);
+}
diff --git a/net/netfilter/nft_expr_template.c b/net/netfilter/nft_expr_template.c
deleted file mode 100644
index b6eed4d5a096..000000000000
--- a/net/netfilter/nft_expr_template.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
-
-struct nft_template {
-
-};
-
-static void nft_template_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
-{
- struct nft_template *priv = nft_expr_priv(expr);
-
-}
-
-static const struct nla_policy nft_template_policy[NFTA_TEMPLATE_MAX + 1] = {
- [NFTA_TEMPLATE_ATTR] = { .type = NLA_U32 },
-};
-
-static int nft_template_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_template *priv = nft_expr_priv(expr);
-
- return 0;
-}
-
-static void nft_template_destroy(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
-{
- struct nft_template *priv = nft_expr_priv(expr);
-
-}
-
-static int nft_template_dump(struct sk_buff *skb, const struct nft_expr *expr)
-{
- const struct nft_template *priv = nft_expr_priv(expr);
-
- NLA_PUT_BE32(skb, NFTA_TEMPLATE_ATTR, priv->field);
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
-static struct nft_expr_type nft_template_type;
-static const struct nft_expr_ops nft_template_ops = {
- .type = &nft_template_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_template)),
- .eval = nft_template_eval,
- .init = nft_template_init,
- .destroy = nft_template_destroy,
- .dump = nft_template_dump,
-};
-
-static struct nft_expr_type nft_template_type __read_mostly = {
- .name = "template",
- .ops = &nft_template_ops,
- .policy = nft_template_policy,
- .maxattr = NFTA_TEMPLATE_MAX,
- .owner = THIS_MODULE,
-};
-
-static int __init nft_template_module_init(void)
-{
- return nft_register_expr(&nft_template_type);
-}
-
-static void __exit nft_template_module_exit(void)
-{
- nft_unregister_expr(&nft_template_type);
-}
-
-module_init(nft_template_module_init);
-module_exit(nft_template_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_EXPR("template");
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 55c939f5371f..ba7aed13e174 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -26,11 +26,11 @@ struct nft_exthdr {
};
static void nft_exthdr_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_exthdr *priv = nft_expr_priv(expr);
- struct nft_data *dest = &data[priv->dreg];
+ u32 *dest = &regs->data[priv->dreg];
unsigned int offset = 0;
int err;
@@ -39,11 +39,12 @@ static void nft_exthdr_eval(const struct nft_expr *expr,
goto err;
offset += priv->offset;
- if (skb_copy_bits(pkt->skb, offset, dest->data, priv->len) < 0)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
@@ -58,7 +59,6 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
- int err;
if (tb[NFTA_EXTHDR_DREG] == NULL ||
tb[NFTA_EXTHDR_TYPE] == NULL ||
@@ -69,22 +69,17 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
- if (priv->len == 0 ||
- priv->len > FIELD_SIZEOF(struct nft_data, data))
- return -EINVAL;
+ priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_EXTHDR_DREG]));
- err = nft_validate_output_register(priv->dreg);
- if (err < 0)
- return err;
- return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+ return nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, priv->len);
}
static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_exthdr *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_EXTHDR_DREG, htonl(priv->dreg)))
+ if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg))
goto nla_put_failure;
if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
goto nla_put_failure;
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 37c15e674884..3f9d45d3d9b7 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -15,6 +15,7 @@
#include <linux/log2.h>
#include <linux/jhash.h>
#include <linux/netlink.h>
+#include <linux/workqueue.h>
#include <linux/rhashtable.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
@@ -23,119 +24,175 @@
/* We target a hash table size of 4, element hint is 75% of final size */
#define NFT_HASH_ELEMENT_HINT 3
+struct nft_hash {
+ struct rhashtable ht;
+ struct delayed_work gc_work;
+};
+
struct nft_hash_elem {
struct rhash_head node;
- struct nft_data key;
- struct nft_data data[];
+ struct nft_set_ext ext;
+};
+
+struct nft_hash_cmp_arg {
+ const struct nft_set *set;
+ const u32 *key;
+ u8 genmask;
};
-static bool nft_hash_lookup(const struct nft_set *set,
- const struct nft_data *key,
- struct nft_data *data)
+static const struct rhashtable_params nft_hash_params;
+
+static inline u32 nft_hash_key(const void *data, u32 len, u32 seed)
+{
+ const struct nft_hash_cmp_arg *arg = data;
+
+ return jhash(arg->key, len, seed);
+}
+
+static inline u32 nft_hash_obj(const void *data, u32 len, u32 seed)
{
- struct rhashtable *priv = nft_set_priv(set);
+ const struct nft_hash_elem *he = data;
+
+ return jhash(nft_set_ext_key(&he->ext), len, seed);
+}
+
+static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct nft_hash_cmp_arg *x = arg->key;
+ const struct nft_hash_elem *he = ptr;
+
+ if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
+ return 1;
+ if (nft_set_elem_expired(&he->ext))
+ return 1;
+ if (!nft_set_elem_active(&he->ext, x->genmask))
+ return 1;
+ return 0;
+}
+
+static bool nft_hash_lookup(const struct nft_set *set, const u32 *key,
+ const struct nft_set_ext **ext)
+{
+ struct nft_hash *priv = nft_set_priv(set);
const struct nft_hash_elem *he;
+ struct nft_hash_cmp_arg arg = {
+ .genmask = nft_genmask_cur(read_pnet(&set->pnet)),
+ .set = set,
+ .key = key,
+ };
- he = rhashtable_lookup(priv, key);
- if (he && set->flags & NFT_SET_MAP)
- nft_data_copy(data, he->data);
+ he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
+ if (he != NULL)
+ *ext = &he->ext;
return !!he;
}
-static int nft_hash_insert(const struct nft_set *set,
- const struct nft_set_elem *elem)
+static bool nft_hash_update(struct nft_set *set, const u32 *key,
+ void *(*new)(struct nft_set *,
+ const struct nft_expr *,
+ struct nft_regs *regs),
+ const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_set_ext **ext)
{
- struct rhashtable *priv = nft_set_priv(set);
+ struct nft_hash *priv = nft_set_priv(set);
struct nft_hash_elem *he;
- unsigned int size;
-
- if (elem->flags != 0)
- return -EINVAL;
+ struct nft_hash_cmp_arg arg = {
+ .genmask = NFT_GENMASK_ANY,
+ .set = set,
+ .key = key,
+ };
- size = sizeof(*he);
- if (set->flags & NFT_SET_MAP)
- size += sizeof(he->data[0]);
+ he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
+ if (he != NULL)
+ goto out;
- he = kzalloc(size, GFP_KERNEL);
+ he = new(set, expr, regs);
if (he == NULL)
- return -ENOMEM;
-
- nft_data_copy(&he->key, &elem->key);
- if (set->flags & NFT_SET_MAP)
- nft_data_copy(he->data, &elem->data);
-
- rhashtable_insert(priv, &he->node);
+ goto err1;
+ if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
+ nft_hash_params))
+ goto err2;
+out:
+ *ext = &he->ext;
+ return true;
- return 0;
+err2:
+ nft_set_elem_destroy(set, he);
+err1:
+ return false;
}
-static void nft_hash_elem_destroy(const struct nft_set *set,
- struct nft_hash_elem *he)
+static int nft_hash_insert(const struct nft_set *set,
+ const struct nft_set_elem *elem)
{
- nft_data_uninit(&he->key, NFT_DATA_VALUE);
- if (set->flags & NFT_SET_MAP)
- nft_data_uninit(he->data, set->dtype);
- kfree(he);
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he = elem->priv;
+ struct nft_hash_cmp_arg arg = {
+ .genmask = nft_genmask_next(read_pnet(&set->pnet)),
+ .set = set,
+ .key = elem->key.val.data,
+ };
+
+ return rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
+ nft_hash_params);
}
-static void nft_hash_remove(const struct nft_set *set,
- const struct nft_set_elem *elem)
+static void nft_hash_activate(const struct nft_set *set,
+ const struct nft_set_elem *elem)
{
- struct rhashtable *priv = nft_set_priv(set);
+ struct nft_hash_elem *he = elem->priv;
- rhashtable_remove(priv, elem->cookie);
- synchronize_rcu();
- kfree(elem->cookie);
+ nft_set_elem_change_active(set, &he->ext);
+ nft_set_elem_clear_busy(&he->ext);
}
-struct nft_compare_arg {
- const struct nft_set *set;
- struct nft_set_elem *elem;
-};
-
-static bool nft_hash_compare(void *ptr, void *arg)
+static void *nft_hash_deactivate(const struct nft_set *set,
+ const struct nft_set_elem *elem)
{
- struct nft_hash_elem *he = ptr;
- struct nft_compare_arg *x = arg;
-
- if (!nft_data_cmp(&he->key, &x->elem->key, x->set->klen)) {
- x->elem->cookie = he;
- x->elem->flags = 0;
- if (x->set->flags & NFT_SET_MAP)
- nft_data_copy(&x->elem->data, he->data);
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he;
+ struct nft_hash_cmp_arg arg = {
+ .genmask = nft_genmask_next(read_pnet(&set->pnet)),
+ .set = set,
+ .key = elem->key.val.data,
+ };
- return true;
+ rcu_read_lock();
+ he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
+ if (he != NULL) {
+ if (!nft_set_elem_mark_busy(&he->ext))
+ nft_set_elem_change_active(set, &he->ext);
+ else
+ he = NULL;
}
+ rcu_read_unlock();
- return false;
+ return he;
}
-static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
+static void nft_hash_remove(const struct nft_set *set,
+ const struct nft_set_elem *elem)
{
- struct rhashtable *priv = nft_set_priv(set);
- struct nft_compare_arg arg = {
- .set = set,
- .elem = elem,
- };
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he = elem->priv;
- if (rhashtable_lookup_compare(priv, &elem->key,
- &nft_hash_compare, &arg))
- return 0;
-
- return -ENOENT;
+ rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
}
static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
struct nft_set_iter *iter)
{
- struct rhashtable *priv = nft_set_priv(set);
- const struct nft_hash_elem *he;
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he;
struct rhashtable_iter hti;
struct nft_set_elem elem;
+ u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
int err;
- err = rhashtable_walk_init(priv, &hti);
+ err = rhashtable_walk_init(&priv->ht, &hti);
iter->err = err;
if (err)
return;
@@ -159,11 +216,12 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
if (iter->count < iter->skip)
goto cont;
+ if (nft_set_elem_expired(&he->ext))
+ goto cont;
+ if (!nft_set_elem_active(&he->ext, genmask))
+ goto cont;
- memcpy(&elem.key, &he->key, sizeof(elem.key));
- if (set->flags & NFT_SET_MAP)
- memcpy(&elem.data, he->data, sizeof(elem.data));
- elem.flags = 0;
+ elem.priv = he;
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0)
@@ -178,47 +236,102 @@ out:
rhashtable_walk_exit(&hti);
}
+static void nft_hash_gc(struct work_struct *work)
+{
+ struct nft_set *set;
+ struct nft_hash_elem *he;
+ struct nft_hash *priv;
+ struct nft_set_gc_batch *gcb = NULL;
+ struct rhashtable_iter hti;
+ int err;
+
+ priv = container_of(work, struct nft_hash, gc_work.work);
+ set = nft_set_container_of(priv);
+
+ err = rhashtable_walk_init(&priv->ht, &hti);
+ if (err)
+ goto schedule;
+
+ err = rhashtable_walk_start(&hti);
+ if (err && err != -EAGAIN)
+ goto out;
+
+ while ((he = rhashtable_walk_next(&hti))) {
+ if (IS_ERR(he)) {
+ if (PTR_ERR(he) != -EAGAIN)
+ goto out;
+ continue;
+ }
+
+ if (!nft_set_elem_expired(&he->ext))
+ continue;
+ if (nft_set_elem_mark_busy(&he->ext))
+ continue;
+
+ gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+ if (gcb == NULL)
+ goto out;
+ rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
+ atomic_dec(&set->nelems);
+ nft_set_gc_batch_add(gcb, he);
+ }
+out:
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+
+ nft_set_gc_batch_complete(gcb);
+schedule:
+ queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+ nft_set_gc_interval(set));
+}
+
static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
{
- return sizeof(struct rhashtable);
+ return sizeof(struct nft_hash);
}
+static const struct rhashtable_params nft_hash_params = {
+ .head_offset = offsetof(struct nft_hash_elem, node),
+ .hashfn = nft_hash_key,
+ .obj_hashfn = nft_hash_obj,
+ .obj_cmpfn = nft_hash_cmp,
+ .automatic_shrinking = true,
+};
+
static int nft_hash_init(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const tb[])
{
- struct rhashtable *priv = nft_set_priv(set);
- struct rhashtable_params params = {
- .nelem_hint = desc->size ? : NFT_HASH_ELEMENT_HINT,
- .head_offset = offsetof(struct nft_hash_elem, node),
- .key_offset = offsetof(struct nft_hash_elem, key),
- .key_len = set->klen,
- .hashfn = jhash,
- };
+ struct nft_hash *priv = nft_set_priv(set);
+ struct rhashtable_params params = nft_hash_params;
+ int err;
- return rhashtable_init(priv, &params);
+ params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
+ params.key_len = set->klen;
+
+ err = rhashtable_init(&priv->ht, &params);
+ if (err < 0)
+ return err;
+
+ INIT_DEFERRABLE_WORK(&priv->gc_work, nft_hash_gc);
+ if (set->flags & NFT_SET_TIMEOUT)
+ queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+ nft_set_gc_interval(set));
+ return 0;
}
-static void nft_hash_destroy(const struct nft_set *set)
+static void nft_hash_elem_destroy(void *ptr, void *arg)
{
- struct rhashtable *priv = nft_set_priv(set);
- const struct bucket_table *tbl;
- struct nft_hash_elem *he;
- struct rhash_head *pos, *next;
- unsigned int i;
-
- /* Stop an eventual async resizing */
- priv->being_destroyed = true;
- mutex_lock(&priv->mutex);
+ nft_set_elem_destroy((const struct nft_set *)arg, ptr);
+}
- tbl = rht_dereference(priv->tbl, priv);
- for (i = 0; i < tbl->size; i++) {
- rht_for_each_entry_safe(he, pos, next, tbl, i, node)
- nft_hash_elem_destroy(set, he);
- }
- mutex_unlock(&priv->mutex);
+static void nft_hash_destroy(const struct nft_set *set)
+{
+ struct nft_hash *priv = nft_set_priv(set);
- rhashtable_destroy(priv);
+ cancel_delayed_work_sync(&priv->gc_work);
+ rhashtable_free_and_destroy(&priv->ht, nft_hash_elem_destroy,
+ (void *)set);
}
static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
@@ -227,11 +340,8 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
unsigned int esize;
esize = sizeof(struct nft_hash_elem);
- if (features & NFT_SET_MAP)
- esize += FIELD_SIZEOF(struct nft_hash_elem, data[0]);
-
if (desc->size) {
- est->size = sizeof(struct rhashtable) +
+ est->size = sizeof(struct nft_hash) +
roundup_pow_of_two(desc->size * 4 / 3) *
sizeof(struct nft_hash_elem *) +
desc->size * esize;
@@ -251,15 +361,18 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
static struct nft_set_ops nft_hash_ops __read_mostly = {
.privsize = nft_hash_privsize,
+ .elemsize = offsetof(struct nft_hash_elem, ext),
.estimate = nft_hash_estimate,
.init = nft_hash_init,
.destroy = nft_hash_destroy,
- .get = nft_hash_get,
.insert = nft_hash_insert,
+ .activate = nft_hash_activate,
+ .deactivate = nft_hash_deactivate,
.remove = nft_hash_remove,
.lookup = nft_hash_lookup,
+ .update = nft_hash_update,
.walk = nft_hash_walk,
- .features = NFT_SET_MAP,
+ .features = NFT_SET_MAP | NFT_SET_TIMEOUT,
.owner = THIS_MODULE,
};
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 810385eb7249..db3b746858e3 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -24,12 +24,12 @@ struct nft_immediate_expr {
};
static void nft_immediate_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
- nft_data_copy(&data[priv->dreg], &priv->data);
+ nft_data_copy(&regs->data[priv->dreg], &priv->data, priv->dlen);
}
static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
@@ -49,17 +49,15 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
tb[NFTA_IMMEDIATE_DATA] == NULL)
return -EINVAL;
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_IMMEDIATE_DREG]));
- err = nft_validate_output_register(priv->dreg);
- if (err < 0)
- return err;
-
- err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]);
+ err = nft_data_init(ctx, &priv->data, sizeof(priv->data), &desc,
+ tb[NFTA_IMMEDIATE_DATA]);
if (err < 0)
return err;
priv->dlen = desc.len;
- err = nft_validate_data_load(ctx, priv->dreg, &priv->data, desc.type);
+ priv->dreg = nft_parse_register(tb[NFTA_IMMEDIATE_DREG]);
+ err = nft_validate_register_store(ctx, priv->dreg, &priv->data,
+ desc.type, desc.len);
if (err < 0)
goto err1;
@@ -81,7 +79,7 @@ static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_IMMEDIATE_DREG, htonl(priv->dreg)))
+ if (nft_dump_register(skb, NFTA_IMMEDIATE_DREG, priv->dreg))
goto nla_put_failure;
return nft_data_dump(skb, NFTA_IMMEDIATE_DATA, &priv->data,
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 85da5bd02f64..435c1ccd6c0e 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -27,7 +27,7 @@ struct nft_limit {
};
static void nft_limit_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_limit *priv = nft_expr_priv(expr);
@@ -45,7 +45,7 @@ static void nft_limit_eval(const struct nft_expr *expr,
}
spin_unlock_bh(&limit_lock);
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
@@ -98,6 +98,7 @@ static struct nft_expr_type nft_limit_type __read_mostly = {
.ops = &nft_limit_ops,
.policy = nft_limit_policy,
.maxattr = NFTA_LIMIT_MAX,
+ .flags = NFT_EXPR_STATEFUL,
.owner = THIS_MODULE,
};
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index bde05f28cf14..a13d6a386d63 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -27,7 +27,7 @@ struct nft_log {
};
static void nft_log_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_log *priv = nft_expr_priv(expr);
@@ -78,7 +78,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
li->u.log.level =
ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
} else {
- li->u.log.level = 4;
+ li->u.log.level = LOGLEVEL_WARNING;
}
if (tb[NFTA_LOG_FLAGS] != NULL) {
li->u.log.logflags =
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 9615b8b9fb37..b3c31ef8015d 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -26,15 +26,20 @@ struct nft_lookup {
};
static void nft_lookup_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_lookup *priv = nft_expr_priv(expr);
const struct nft_set *set = priv->set;
+ const struct nft_set_ext *ext;
- if (set->ops->lookup(set, &data[priv->sreg], &data[priv->dreg]))
+ if (set->ops->lookup(set, &regs->data[priv->sreg], &ext)) {
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(&regs->data[priv->dreg],
+ nft_set_ext_data(ext), set->dlen);
return;
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ }
+ regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
@@ -66,8 +71,11 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
return PTR_ERR(set);
}
- priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG]));
- err = nft_validate_input_register(priv->sreg);
+ if (set->flags & NFT_SET_EVAL)
+ return -EOPNOTSUPP;
+
+ priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
+ err = nft_validate_register_load(priv->sreg, set->klen);
if (err < 0)
return err;
@@ -75,19 +83,16 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
if (!(set->flags & NFT_SET_MAP))
return -EINVAL;
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_DREG]));
- err = nft_validate_output_register(priv->dreg);
+ priv->dreg = nft_parse_register(tb[NFTA_LOOKUP_DREG]);
+ err = nft_validate_register_store(ctx, priv->dreg, NULL,
+ set->dtype, set->dlen);
if (err < 0)
return err;
-
- if (priv->dreg == NFT_REG_VERDICT) {
- if (set->dtype != NFT_DATA_VERDICT)
- return -EINVAL;
- } else if (set->dtype == NFT_DATA_VERDICT)
- return -EINVAL;
} else if (set->flags & NFT_SET_MAP)
return -EINVAL;
+ priv->binding.flags = set->flags & NFT_SET_MAP;
+
err = nf_tables_bind_set(ctx, set, &priv->binding);
if (err < 0)
return err;
@@ -110,10 +115,10 @@ static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_LOOKUP_SREG, htonl(priv->sreg)))
+ if (nft_dump_register(skb, NFTA_LOOKUP_SREG, priv->sreg))
goto nla_put_failure;
if (priv->set->flags & NFT_SET_MAP)
- if (nla_put_be32(skb, NFTA_LOOKUP_DREG, htonl(priv->dreg)))
+ if (nft_dump_register(skb, NFTA_LOOKUP_DREG, priv->dreg))
goto nla_put_failure;
return 0;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index e99911eda915..52561e1c31e2 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -25,65 +25,68 @@
#include <net/netfilter/nft_meta.h>
void nft_meta_get_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
const struct net_device *in = pkt->in, *out = pkt->out;
- struct nft_data *dest = &data[priv->dreg];
+ u32 *dest = &regs->data[priv->dreg];
switch (priv->key) {
case NFT_META_LEN:
- dest->data[0] = skb->len;
+ *dest = skb->len;
break;
case NFT_META_PROTOCOL:
- *(__be16 *)dest->data = skb->protocol;
+ *dest = 0;
+ *(__be16 *)dest = skb->protocol;
break;
case NFT_META_NFPROTO:
- dest->data[0] = pkt->ops->pf;
+ *dest = pkt->ops->pf;
break;
case NFT_META_L4PROTO:
- dest->data[0] = pkt->tprot;
+ *dest = pkt->tprot;
break;
case NFT_META_PRIORITY:
- dest->data[0] = skb->priority;
+ *dest = skb->priority;
break;
case NFT_META_MARK:
- dest->data[0] = skb->mark;
+ *dest = skb->mark;
break;
case NFT_META_IIF:
if (in == NULL)
goto err;
- dest->data[0] = in->ifindex;
+ *dest = in->ifindex;
break;
case NFT_META_OIF:
if (out == NULL)
goto err;
- dest->data[0] = out->ifindex;
+ *dest = out->ifindex;
break;
case NFT_META_IIFNAME:
if (in == NULL)
goto err;
- strncpy((char *)dest->data, in->name, sizeof(dest->data));
+ strncpy((char *)dest, in->name, IFNAMSIZ);
break;
case NFT_META_OIFNAME:
if (out == NULL)
goto err;
- strncpy((char *)dest->data, out->name, sizeof(dest->data));
+ strncpy((char *)dest, out->name, IFNAMSIZ);
break;
case NFT_META_IIFTYPE:
if (in == NULL)
goto err;
- *(u16 *)dest->data = in->type;
+ *dest = 0;
+ *(u16 *)dest = in->type;
break;
case NFT_META_OIFTYPE:
if (out == NULL)
goto err;
- *(u16 *)dest->data = out->type;
+ *dest = 0;
+ *(u16 *)dest = out->type;
break;
case NFT_META_SKUID:
- if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+ if (skb->sk == NULL || !sk_fullsock(skb->sk))
goto err;
read_lock_bh(&skb->sk->sk_callback_lock);
@@ -93,13 +96,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
goto err;
}
- dest->data[0] =
- from_kuid_munged(&init_user_ns,
+ *dest = from_kuid_munged(&init_user_ns,
skb->sk->sk_socket->file->f_cred->fsuid);
read_unlock_bh(&skb->sk->sk_callback_lock);
break;
case NFT_META_SKGID:
- if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+ if (skb->sk == NULL || !sk_fullsock(skb->sk))
goto err;
read_lock_bh(&skb->sk->sk_callback_lock);
@@ -108,8 +110,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
read_unlock_bh(&skb->sk->sk_callback_lock);
goto err;
}
- dest->data[0] =
- from_kgid_munged(&init_user_ns,
+ *dest = from_kgid_munged(&init_user_ns,
skb->sk->sk_socket->file->f_cred->fsgid);
read_unlock_bh(&skb->sk->sk_callback_lock);
break;
@@ -119,33 +120,33 @@ void nft_meta_get_eval(const struct nft_expr *expr,
if (dst == NULL)
goto err;
- dest->data[0] = dst->tclassid;
+ *dest = dst->tclassid;
break;
}
#endif
#ifdef CONFIG_NETWORK_SECMARK
case NFT_META_SECMARK:
- dest->data[0] = skb->secmark;
+ *dest = skb->secmark;
break;
#endif
case NFT_META_PKTTYPE:
if (skb->pkt_type != PACKET_LOOPBACK) {
- dest->data[0] = skb->pkt_type;
+ *dest = skb->pkt_type;
break;
}
switch (pkt->ops->pf) {
case NFPROTO_IPV4:
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
- dest->data[0] = PACKET_MULTICAST;
+ *dest = PACKET_MULTICAST;
else
- dest->data[0] = PACKET_BROADCAST;
+ *dest = PACKET_BROADCAST;
break;
case NFPROTO_IPV6:
if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
- dest->data[0] = PACKET_MULTICAST;
+ *dest = PACKET_MULTICAST;
else
- dest->data[0] = PACKET_BROADCAST;
+ *dest = PACKET_BROADCAST;
break;
default:
WARN_ON(1);
@@ -153,23 +154,22 @@ void nft_meta_get_eval(const struct nft_expr *expr,
}
break;
case NFT_META_CPU:
- dest->data[0] = smp_processor_id();
+ *dest = raw_smp_processor_id();
break;
case NFT_META_IIFGROUP:
if (in == NULL)
goto err;
- dest->data[0] = in->group;
+ *dest = in->group;
break;
case NFT_META_OIFGROUP:
if (out == NULL)
goto err;
- dest->data[0] = out->group;
+ *dest = out->group;
break;
case NFT_META_CGROUP:
- if (skb->sk == NULL)
- break;
-
- dest->data[0] = skb->sk->sk_classid;
+ if (skb->sk == NULL || !sk_fullsock(skb->sk))
+ goto err;
+ *dest = skb->sk->sk_classid;
break;
default:
WARN_ON(1);
@@ -178,17 +178,17 @@ void nft_meta_get_eval(const struct nft_expr *expr,
return;
err:
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ regs->verdict.code = NFT_BREAK;
}
EXPORT_SYMBOL_GPL(nft_meta_get_eval);
void nft_meta_set_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_meta *meta = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
- u32 value = data[meta->sreg].data[0];
+ u32 value = regs->data[meta->sreg];
switch (meta->key) {
case NFT_META_MARK:
@@ -218,22 +218,22 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
- int err;
+ unsigned int len;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
- case NFT_META_LEN:
case NFT_META_PROTOCOL:
+ case NFT_META_IIFTYPE:
+ case NFT_META_OIFTYPE:
+ len = sizeof(u16);
+ break;
case NFT_META_NFPROTO:
case NFT_META_L4PROTO:
+ case NFT_META_LEN:
case NFT_META_PRIORITY:
case NFT_META_MARK:
case NFT_META_IIF:
case NFT_META_OIF:
- case NFT_META_IIFNAME:
- case NFT_META_OIFNAME:
- case NFT_META_IIFTYPE:
- case NFT_META_OIFTYPE:
case NFT_META_SKUID:
case NFT_META_SKGID:
#ifdef CONFIG_IP_ROUTE_CLASSID
@@ -247,21 +247,19 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
case NFT_META_IIFGROUP:
case NFT_META_OIFGROUP:
case NFT_META_CGROUP:
+ len = sizeof(u32);
+ break;
+ case NFT_META_IIFNAME:
+ case NFT_META_OIFNAME:
+ len = IFNAMSIZ;
break;
default:
return -EOPNOTSUPP;
}
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
- err = nft_validate_output_register(priv->dreg);
- if (err < 0)
- return err;
-
- err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
- if (err < 0)
- return err;
-
- return 0;
+ priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
+ return nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, len);
}
EXPORT_SYMBOL_GPL(nft_meta_get_init);
@@ -270,20 +268,24 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
+ unsigned int len;
int err;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_MARK:
case NFT_META_PRIORITY:
+ len = sizeof(u32);
+ break;
case NFT_META_NFTRACE:
+ len = sizeof(u8);
break;
default:
return -EOPNOTSUPP;
}
- priv->sreg = ntohl(nla_get_be32(tb[NFTA_META_SREG]));
- err = nft_validate_input_register(priv->sreg);
+ priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
+ err = nft_validate_register_load(priv->sreg, len);
if (err < 0)
return err;
@@ -298,7 +300,7 @@ int nft_meta_get_dump(struct sk_buff *skb,
if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_META_DREG, htonl(priv->dreg)))
+ if (nft_dump_register(skb, NFTA_META_DREG, priv->dreg))
goto nla_put_failure;
return 0;
@@ -314,7 +316,7 @@ int nft_meta_set_dump(struct sk_buff *skb,
if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_META_SREG, htonl(priv->sreg)))
+ if (nft_dump_register(skb, NFTA_META_SREG, priv->sreg))
goto nla_put_failure;
return 0;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index a0837c6c9283..ee2d71753746 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -37,7 +37,7 @@ struct nft_nat {
};
static void nft_nat_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_nat *priv = nft_expr_priv(expr);
@@ -49,33 +49,32 @@ static void nft_nat_eval(const struct nft_expr *expr,
if (priv->sreg_addr_min) {
if (priv->family == AF_INET) {
range.min_addr.ip = (__force __be32)
- data[priv->sreg_addr_min].data[0];
+ regs->data[priv->sreg_addr_min];
range.max_addr.ip = (__force __be32)
- data[priv->sreg_addr_max].data[0];
+ regs->data[priv->sreg_addr_max];
} else {
memcpy(range.min_addr.ip6,
- data[priv->sreg_addr_min].data,
- sizeof(struct nft_data));
+ &regs->data[priv->sreg_addr_min],
+ sizeof(range.min_addr.ip6));
memcpy(range.max_addr.ip6,
- data[priv->sreg_addr_max].data,
- sizeof(struct nft_data));
+ &regs->data[priv->sreg_addr_max],
+ sizeof(range.max_addr.ip6));
}
range.flags |= NF_NAT_RANGE_MAP_IPS;
}
if (priv->sreg_proto_min) {
range.min_proto.all =
- *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ *(__be16 *)&regs->data[priv->sreg_proto_min];
range.max_proto.all =
- *(__be16 *)&data[priv->sreg_proto_max].data[0];
+ *(__be16 *)&regs->data[priv->sreg_proto_max];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
range.flags |= priv->flags;
- data[NFT_REG_VERDICT].verdict =
- nf_nat_setup_info(ct, &range, priv->type);
+ regs->verdict.code = nf_nat_setup_info(ct, &range, priv->type);
}
static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
@@ -119,6 +118,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_nat *priv = nft_expr_priv(expr);
+ unsigned int alen, plen;
u32 family;
int err;
@@ -146,25 +146,34 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
return -EINVAL;
family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
- if (family != AF_INET && family != AF_INET6)
- return -EAFNOSUPPORT;
if (family != ctx->afi->family)
return -EOPNOTSUPP;
+
+ switch (family) {
+ case NFPROTO_IPV4:
+ alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip);
+ break;
+ case NFPROTO_IPV6:
+ alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6);
+ break;
+ default:
+ return -EAFNOSUPPORT;
+ }
priv->family = family;
if (tb[NFTA_NAT_REG_ADDR_MIN]) {
priv->sreg_addr_min =
- ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MIN]));
-
- err = nft_validate_input_register(priv->sreg_addr_min);
+ nft_parse_register(tb[NFTA_NAT_REG_ADDR_MIN]);
+ err = nft_validate_register_load(priv->sreg_addr_min, alen);
if (err < 0)
return err;
if (tb[NFTA_NAT_REG_ADDR_MAX]) {
priv->sreg_addr_max =
- ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MAX]));
+ nft_parse_register(tb[NFTA_NAT_REG_ADDR_MAX]);
- err = nft_validate_input_register(priv->sreg_addr_max);
+ err = nft_validate_register_load(priv->sreg_addr_max,
+ alen);
if (err < 0)
return err;
} else {
@@ -172,19 +181,21 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
}
}
+ plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
if (tb[NFTA_NAT_REG_PROTO_MIN]) {
priv->sreg_proto_min =
- ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MIN]));
+ nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]);
- err = nft_validate_input_register(priv->sreg_proto_min);
+ err = nft_validate_register_load(priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_NAT_REG_PROTO_MAX]) {
priv->sreg_proto_max =
- ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MAX]));
+ nft_parse_register(tb[NFTA_NAT_REG_PROTO_MAX]);
- err = nft_validate_input_register(priv->sreg_proto_max);
+ err = nft_validate_register_load(priv->sreg_proto_max,
+ plen);
if (err < 0)
return err;
} else {
@@ -220,18 +231,18 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
goto nla_put_failure;
if (priv->sreg_addr_min) {
- if (nla_put_be32(skb, NFTA_NAT_REG_ADDR_MIN,
- htonl(priv->sreg_addr_min)) ||
- nla_put_be32(skb, NFTA_NAT_REG_ADDR_MAX,
- htonl(priv->sreg_addr_max)))
+ if (nft_dump_register(skb, NFTA_NAT_REG_ADDR_MIN,
+ priv->sreg_addr_min) ||
+ nft_dump_register(skb, NFTA_NAT_REG_ADDR_MAX,
+ priv->sreg_addr_max))
goto nla_put_failure;
}
if (priv->sreg_proto_min) {
- if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
- htonl(priv->sreg_proto_min)) ||
- nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
- htonl(priv->sreg_proto_max)))
+ if (nft_dump_register(skb, NFTA_NAT_REG_PROTO_MIN,
+ priv->sreg_proto_min) ||
+ nft_dump_register(skb, NFTA_NAT_REG_PROTO_MAX,
+ priv->sreg_proto_max))
goto nla_put_failure;
}
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 85daa84bfdfe..94fb3b27a2c5 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -18,12 +18,12 @@
#include <net/netfilter/nf_tables.h>
static void nft_payload_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
- struct nft_data *dest = &data[priv->dreg];
+ u32 *dest = &regs->data[priv->dreg];
int offset;
switch (priv->base) {
@@ -43,11 +43,12 @@ static void nft_payload_eval(const struct nft_expr *expr,
}
offset += priv->offset;
- if (skb_copy_bits(skb, offset, dest->data, priv->len) < 0)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+ regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
@@ -62,24 +63,21 @@ static int nft_payload_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_payload *priv = nft_expr_priv(expr);
- int err;
priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+ priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_DREG]));
- err = nft_validate_output_register(priv->dreg);
- if (err < 0)
- return err;
- return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+ return nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, priv->len);
}
static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_payload *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_PAYLOAD_DREG, htonl(priv->dreg)) ||
+ if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
@@ -131,9 +129,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
}
offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
- len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
- if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data))
- return ERR_PTR(-EINVAL);
+ len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
base != NFT_PAYLOAD_LL_HEADER)
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index e8ae2f6bf232..96805d21d618 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -28,7 +28,7 @@ struct nft_queue {
};
static void nft_queue_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_queue *priv = nft_expr_priv(expr);
@@ -51,7 +51,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
if (priv->flags & NFT_QUEUE_FLAG_BYPASS)
ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
- data[NFT_REG_VERDICT].verdict = ret;
+ regs->verdict.code = ret;
}
static const struct nla_policy nft_queue_policy[NFTA_QUEUE_MAX + 1] = {
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index 46214f245665..1c30f41cff5b 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -26,25 +26,25 @@ struct nft_rbtree {
struct nft_rbtree_elem {
struct rb_node node;
- u16 flags;
- struct nft_data key;
- struct nft_data data[];
+ struct nft_set_ext ext;
};
-static bool nft_rbtree_lookup(const struct nft_set *set,
- const struct nft_data *key,
- struct nft_data *data)
+
+static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key,
+ const struct nft_set_ext **ext)
{
const struct nft_rbtree *priv = nft_set_priv(set);
const struct nft_rbtree_elem *rbe, *interval = NULL;
- const struct rb_node *parent = priv->root.rb_node;
+ const struct rb_node *parent;
+ u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
int d;
spin_lock_bh(&nft_rbtree_lock);
+ parent = priv->root.rb_node;
while (parent != NULL) {
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- d = nft_data_cmp(&rbe->key, key, set->klen);
+ d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
if (d < 0) {
parent = parent->rb_left;
interval = rbe;
@@ -52,12 +52,17 @@ static bool nft_rbtree_lookup(const struct nft_set *set,
parent = parent->rb_right;
else {
found:
- if (rbe->flags & NFT_SET_ELEM_INTERVAL_END)
+ if (!nft_set_elem_active(&rbe->ext, genmask)) {
+ parent = parent->rb_left;
+ continue;
+ }
+ if (nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
+ *nft_set_ext_flags(&rbe->ext) &
+ NFT_SET_ELEM_INTERVAL_END)
goto out;
- if (set->flags & NFT_SET_MAP)
- nft_data_copy(data, rbe->data);
-
spin_unlock_bh(&nft_rbtree_lock);
+
+ *ext = &rbe->ext;
return true;
}
}
@@ -71,23 +76,13 @@ out:
return false;
}
-static void nft_rbtree_elem_destroy(const struct nft_set *set,
- struct nft_rbtree_elem *rbe)
-{
- nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
- if (set->flags & NFT_SET_MAP &&
- !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_uninit(rbe->data, set->dtype);
-
- kfree(rbe);
-}
-
static int __nft_rbtree_insert(const struct nft_set *set,
struct nft_rbtree_elem *new)
{
struct nft_rbtree *priv = nft_set_priv(set);
struct nft_rbtree_elem *rbe;
struct rb_node *parent, **p;
+ u8 genmask = nft_genmask_next(read_pnet(&set->pnet));
int d;
parent = NULL;
@@ -95,13 +90,18 @@ static int __nft_rbtree_insert(const struct nft_set *set,
while (*p != NULL) {
parent = *p;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- d = nft_data_cmp(&rbe->key, &new->key, set->klen);
+ d = memcmp(nft_set_ext_key(&rbe->ext),
+ nft_set_ext_key(&new->ext),
+ set->klen);
if (d < 0)
p = &parent->rb_left;
else if (d > 0)
p = &parent->rb_right;
- else
- return -EEXIST;
+ else {
+ if (nft_set_elem_active(&rbe->ext, genmask))
+ return -EEXIST;
+ p = &parent->rb_left;
+ }
}
rb_link_node(&new->node, parent, p);
rb_insert_color(&new->node, &priv->root);
@@ -111,31 +111,13 @@ static int __nft_rbtree_insert(const struct nft_set *set,
static int nft_rbtree_insert(const struct nft_set *set,
const struct nft_set_elem *elem)
{
- struct nft_rbtree_elem *rbe;
- unsigned int size;
+ struct nft_rbtree_elem *rbe = elem->priv;
int err;
- size = sizeof(*rbe);
- if (set->flags & NFT_SET_MAP &&
- !(elem->flags & NFT_SET_ELEM_INTERVAL_END))
- size += sizeof(rbe->data[0]);
-
- rbe = kzalloc(size, GFP_KERNEL);
- if (rbe == NULL)
- return -ENOMEM;
-
- rbe->flags = elem->flags;
- nft_data_copy(&rbe->key, &elem->key);
- if (set->flags & NFT_SET_MAP &&
- !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_copy(rbe->data, &elem->data);
-
spin_lock_bh(&nft_rbtree_lock);
err = __nft_rbtree_insert(set, rbe);
- if (err < 0)
- kfree(rbe);
-
spin_unlock_bh(&nft_rbtree_lock);
+
return err;
}
@@ -143,42 +125,49 @@ static void nft_rbtree_remove(const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_rbtree *priv = nft_set_priv(set);
- struct nft_rbtree_elem *rbe = elem->cookie;
+ struct nft_rbtree_elem *rbe = elem->priv;
spin_lock_bh(&nft_rbtree_lock);
rb_erase(&rbe->node, &priv->root);
spin_unlock_bh(&nft_rbtree_lock);
- kfree(rbe);
}
-static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
+static void nft_rbtree_activate(const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_rbtree_elem *rbe = elem->priv;
+
+ nft_set_elem_change_active(set, &rbe->ext);
+}
+
+static void *nft_rbtree_deactivate(const struct nft_set *set,
+ const struct nft_set_elem *elem)
{
const struct nft_rbtree *priv = nft_set_priv(set);
const struct rb_node *parent = priv->root.rb_node;
struct nft_rbtree_elem *rbe;
+ u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
int d;
- spin_lock_bh(&nft_rbtree_lock);
while (parent != NULL) {
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- d = nft_data_cmp(&rbe->key, &elem->key, set->klen);
+ d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
+ set->klen);
if (d < 0)
parent = parent->rb_left;
else if (d > 0)
parent = parent->rb_right;
else {
- elem->cookie = rbe;
- if (set->flags & NFT_SET_MAP &&
- !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_copy(&elem->data, rbe->data);
- elem->flags = rbe->flags;
- spin_unlock_bh(&nft_rbtree_lock);
- return 0;
+ if (!nft_set_elem_active(&rbe->ext, genmask)) {
+ parent = parent->rb_left;
+ continue;
+ }
+ nft_set_elem_change_active(set, &rbe->ext);
+ return rbe;
}
}
- spin_unlock_bh(&nft_rbtree_lock);
- return -ENOENT;
+ return NULL;
}
static void nft_rbtree_walk(const struct nft_ctx *ctx,
@@ -186,21 +175,21 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
struct nft_set_iter *iter)
{
const struct nft_rbtree *priv = nft_set_priv(set);
- const struct nft_rbtree_elem *rbe;
+ struct nft_rbtree_elem *rbe;
struct nft_set_elem elem;
struct rb_node *node;
+ u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
spin_lock_bh(&nft_rbtree_lock);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+
if (iter->count < iter->skip)
goto cont;
+ if (!nft_set_elem_active(&rbe->ext, genmask))
+ goto cont;
- rbe = rb_entry(node, struct nft_rbtree_elem, node);
- nft_data_copy(&elem.key, &rbe->key);
- if (set->flags & NFT_SET_MAP &&
- !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_copy(&elem.data, rbe->data);
- elem.flags = rbe->flags;
+ elem.priv = rbe;
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0) {
@@ -237,7 +226,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
while ((node = priv->root.rb_node) != NULL) {
rb_erase(node, &priv->root);
rbe = rb_entry(node, struct nft_rbtree_elem, node);
- nft_rbtree_elem_destroy(set, rbe);
+ nft_set_elem_destroy(set, rbe);
}
}
@@ -247,9 +236,6 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
unsigned int nsize;
nsize = sizeof(struct nft_rbtree_elem);
- if (features & NFT_SET_MAP)
- nsize += FIELD_SIZEOF(struct nft_rbtree_elem, data[0]);
-
if (desc->size)
est->size = sizeof(struct nft_rbtree) + desc->size * nsize;
else
@@ -262,12 +248,14 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
static struct nft_set_ops nft_rbtree_ops __read_mostly = {
.privsize = nft_rbtree_privsize,
+ .elemsize = offsetof(struct nft_rbtree_elem, ext),
.estimate = nft_rbtree_estimate,
.init = nft_rbtree_init,
.destroy = nft_rbtree_destroy,
.insert = nft_rbtree_insert,
.remove = nft_rbtree_remove,
- .get = nft_rbtree_get,
+ .deactivate = nft_rbtree_deactivate,
+ .activate = nft_rbtree_activate,
.lookup = nft_rbtree_lookup,
.walk = nft_rbtree_walk,
.features = NFT_SET_INTERVAL | NFT_SET_MAP,
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index d7e9e93a4e90..03f7bf40ae75 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -44,25 +44,28 @@ int nft_redir_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_redir *priv = nft_expr_priv(expr);
+ unsigned int plen;
int err;
err = nft_redir_validate(ctx, expr, NULL);
if (err < 0)
return err;
+ plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
priv->sreg_proto_min =
- ntohl(nla_get_be32(tb[NFTA_REDIR_REG_PROTO_MIN]));
+ nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]);
- err = nft_validate_input_register(priv->sreg_proto_min);
+ err = nft_validate_register_load(priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_REDIR_REG_PROTO_MAX]) {
priv->sreg_proto_max =
- ntohl(nla_get_be32(tb[NFTA_REDIR_REG_PROTO_MAX]));
+ nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MAX]);
- err = nft_validate_input_register(priv->sreg_proto_max);
+ err = nft_validate_register_load(priv->sreg_proto_max,
+ plen);
if (err < 0)
return err;
} else {
@@ -85,11 +88,11 @@ int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr)
const struct nft_redir *priv = nft_expr_priv(expr);
if (priv->sreg_proto_min) {
- if (nla_put_be32(skb, NFTA_REDIR_REG_PROTO_MIN,
- htonl(priv->sreg_proto_min)))
+ if (nft_dump_register(skb, NFTA_REDIR_REG_PROTO_MIN,
+ priv->sreg_proto_min))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_REDIR_REG_PROTO_MAX,
- htonl(priv->sreg_proto_max)))
+ if (nft_dump_register(skb, NFTA_REDIR_REG_PROTO_MAX,
+ priv->sreg_proto_max))
goto nla_put_failure;
}
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 57d3e1af5630..0522fc9bfb0a 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -63,6 +63,8 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
+ default:
+ break;
}
return 0;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 7b5f9d58680a..635dbba93d01 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -18,7 +18,7 @@
#include <net/netfilter/ipv6/nf_reject.h>
static void nft_reject_inet_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
@@ -28,14 +28,16 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
case NFPROTO_IPV4:
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
- nf_send_unreach(pkt->skb, priv->icmp_code);
+ nf_send_unreach(pkt->skb, priv->icmp_code,
+ pkt->ops->hooknum);
break;
case NFT_REJECT_TCP_RST:
nf_send_reset(pkt->skb, pkt->ops->hooknum);
break;
case NFT_REJECT_ICMPX_UNREACH:
nf_send_unreach(pkt->skb,
- nft_reject_icmp_code(priv->icmp_code));
+ nft_reject_icmp_code(priv->icmp_code),
+ pkt->ops->hooknum);
break;
}
break;
@@ -56,7 +58,8 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
}
break;
}
- data[NFT_REG_VERDICT].verdict = NF_DROP;
+
+ regs->verdict.code = NF_DROP;
}
static int nft_reject_inet_init(const struct nft_ctx *ctx,
@@ -105,6 +108,8 @@ static int nft_reject_inet_dump(struct sk_buff *skb,
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
+ default:
+ break;
}
return 0;
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 50e1e5aaf4ce..cca96cec1b68 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -42,15 +42,21 @@ enum nf_tproxy_lookup_t {
static bool tproxy_sk_is_transparent(struct sock *sk)
{
- if (sk->sk_state != TCP_TIME_WAIT) {
- if (inet_sk(sk)->transparent)
- return true;
- sock_put(sk);
- } else {
+ switch (sk->sk_state) {
+ case TCP_TIME_WAIT:
if (inet_twsk(sk)->tw_transparent)
return true;
- inet_twsk_put(inet_twsk(sk));
+ break;
+ case TCP_NEW_SYN_RECV:
+ if (inet_rsk(inet_reqsk(sk))->no_srccheck)
+ return true;
+ break;
+ default:
+ if (inet_sk(sk)->transparent)
+ return true;
}
+
+ sock_gen_put(sk);
return false;
}
@@ -266,7 +272,7 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
hp->source, lport ? lport : hp->dest,
skb->dev, NFT_LOOKUP_LISTENER);
if (sk2) {
- inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
+ inet_twsk_deschedule(inet_twsk(sk));
inet_twsk_put(inet_twsk(sk));
sk = sk2;
}
@@ -431,7 +437,7 @@ tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
tgi->lport ? tgi->lport : hp->dest,
skb->dev, NFT_LOOKUP_LISTENER);
if (sk2) {
- inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
+ inet_twsk_deschedule(inet_twsk(sk));
inet_twsk_put(inet_twsk(sk));
sk = sk2;
}
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
index 7198d660b4de..a1d126f29463 100644
--- a/net/netfilter/xt_cgroup.c
+++ b/net/netfilter/xt_cgroup.c
@@ -39,7 +39,7 @@ cgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_cgroup_info *info = par->matchinfo;
- if (skb->sk == NULL)
+ if (skb->sk == NULL || !sk_fullsock(skb->sk))
return false;
return (info->id == skb->sk->sk_classid) ^ info->invert;
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index f440f57a452f..1caaccbc306c 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -25,16 +25,15 @@ MODULE_ALIAS("ip6t_physdev");
static bool
physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
- static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
const struct xt_physdev_info *info = par->matchinfo;
+ const struct net_device *physdev;
unsigned long ret;
const char *indev, *outdev;
- const struct nf_bridge_info *nf_bridge;
/* Not a bridged IP packet or no info available yet:
* LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
* the destination device will be a bridge. */
- if (!(nf_bridge = skb->nf_bridge)) {
+ if (!skb->nf_bridge) {
/* Return MATCH if the invert flags of the used options are on */
if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
!(info->invert & XT_PHYSDEV_OP_BRIDGED))
@@ -54,31 +53,41 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
return true;
}
+ physdev = nf_bridge_get_physoutdev(skb);
+ outdev = physdev ? physdev->name : NULL;
+
/* This only makes sense in the FORWARD and POSTROUTING chains */
if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
- (!!(nf_bridge->mask & BRNF_BRIDGED) ^
- !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
+ (!!outdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
return false;
+ physdev = nf_bridge_get_physindev(skb);
+ indev = physdev ? physdev->name : NULL;
+
if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
- (!nf_bridge->physindev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) ||
+ (!indev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) ||
(info->bitmask & XT_PHYSDEV_OP_ISOUT &&
- (!nf_bridge->physoutdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT))))
+ (!outdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT))))
return false;
if (!(info->bitmask & XT_PHYSDEV_OP_IN))
goto match_outdev;
- indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
- ret = ifname_compare_aligned(indev, info->physindev, info->in_mask);
- if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN))
- return false;
+ if (indev) {
+ ret = ifname_compare_aligned(indev, info->physindev,
+ info->in_mask);
+
+ if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN))
+ return false;
+ }
match_outdev:
if (!(info->bitmask & XT_PHYSDEV_OP_OUT))
return true;
- outdev = nf_bridge->physoutdev ?
- nf_bridge->physoutdev->name : nulldevname;
+
+ if (!outdev)
+ return false;
+
ret = ifname_compare_aligned(outdev, info->physoutdev, info->out_mask);
return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT));
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 0d47afea9682..89045982ec94 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -193,7 +193,7 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
return ret;
if (!match_counter0(opt.ext.packets, &info->packets))
- return 0;
+ return false;
return match_counter0(opt.ext.bytes, &info->bytes);
}
@@ -239,7 +239,7 @@ set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
return ret;
if (!match_counter(opt.ext.packets, &info->packets))
- return 0;
+ return false;
return match_counter(opt.ext.bytes, &info->bytes);
}
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 13332dbf291d..e092cb046326 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -129,13 +129,24 @@ xt_socket_get_sock_v4(struct net *net, const u8 protocol,
return NULL;
}
-static bool
-socket_match(const struct sk_buff *skb, struct xt_action_param *par,
- const struct xt_socket_mtinfo1 *info)
+static bool xt_socket_sk_is_transparent(struct sock *sk)
+{
+ switch (sk->sk_state) {
+ case TCP_TIME_WAIT:
+ return inet_twsk(sk)->tw_transparent;
+
+ case TCP_NEW_SYN_RECV:
+ return inet_rsk(inet_reqsk(sk))->no_srccheck;
+
+ default:
+ return inet_sk(sk)->transparent;
+ }
+}
+
+static struct sock *xt_socket_lookup_slow_v4(const struct sk_buff *skb,
+ const struct net_device *indev)
{
const struct iphdr *iph = ip_hdr(skb);
- struct udphdr _hdr, *hp = NULL;
- struct sock *sk = skb->sk;
__be32 uninitialized_var(daddr), uninitialized_var(saddr);
__be16 uninitialized_var(dport), uninitialized_var(sport);
u8 uninitialized_var(protocol);
@@ -145,10 +156,12 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
#endif
if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) {
+ struct udphdr _hdr, *hp;
+
hp = skb_header_pointer(skb, ip_hdrlen(skb),
sizeof(_hdr), &_hdr);
if (hp == NULL)
- return false;
+ return NULL;
protocol = iph->protocol;
saddr = iph->saddr;
@@ -158,16 +171,17 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
} else if (iph->protocol == IPPROTO_ICMP) {
if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
- &sport, &dport))
- return false;
+ &sport, &dport))
+ return NULL;
} else {
- return false;
+ return NULL;
}
#ifdef XT_SOCKET_HAVE_CONNTRACK
- /* Do the lookup with the original socket address in case this is a
- * reply packet of an established SNAT-ted connection. */
-
+ /* Do the lookup with the original socket address in
+ * case this is a reply packet of an established
+ * SNAT-ted connection.
+ */
ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct) &&
((iph->protocol != IPPROTO_ICMP &&
@@ -183,10 +197,18 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
}
#endif
+ return xt_socket_get_sock_v4(dev_net(skb->dev), protocol, saddr, daddr,
+ sport, dport, indev);
+}
+
+static bool
+socket_match(const struct sk_buff *skb, struct xt_action_param *par,
+ const struct xt_socket_mtinfo1 *info)
+{
+ struct sock *sk = skb->sk;
+
if (!sk)
- sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol,
- saddr, daddr, sport, dport,
- par->in);
+ sk = xt_socket_lookup_slow_v4(skb, par->in);
if (sk) {
bool wildcard;
bool transparent = true;
@@ -195,16 +217,14 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
* unless XT_SOCKET_NOWILDCARD is set
*/
wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
- sk->sk_state != TCP_TIME_WAIT &&
+ sk_fullsock(sk) &&
inet_sk(sk)->inet_rcv_saddr == 0);
/* Ignore non-transparent sockets,
- if XT_SOCKET_TRANSPARENT is used */
+ * if XT_SOCKET_TRANSPARENT is used
+ */
if (info->flags & XT_SOCKET_TRANSPARENT)
- transparent = ((sk->sk_state != TCP_TIME_WAIT &&
- inet_sk(sk)->transparent) ||
- (sk->sk_state == TCP_TIME_WAIT &&
- inet_twsk(sk)->tw_transparent));
+ transparent = xt_socket_sk_is_transparent(sk);
if (sk != skb->sk)
sock_gen_put(sk);
@@ -213,12 +233,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
sk = NULL;
}
- pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
- protocol, &saddr, ntohs(sport),
- &daddr, ntohs(dport),
- &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
- return (sk != NULL);
+ return sk != NULL;
}
static bool
@@ -315,28 +330,26 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
return NULL;
}
-static bool
-socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+static struct sock *xt_socket_lookup_slow_v6(const struct sk_buff *skb,
+ const struct net_device *indev)
{
- struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
- struct udphdr _hdr, *hp = NULL;
- struct sock *sk = skb->sk;
- const struct in6_addr *daddr = NULL, *saddr = NULL;
__be16 uninitialized_var(dport), uninitialized_var(sport);
- int thoff = 0, uninitialized_var(tproto);
- const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+ const struct in6_addr *daddr = NULL, *saddr = NULL;
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ int thoff = 0, tproto;
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
if (tproto < 0) {
pr_debug("unable to find transport header in IPv6 packet, dropping\n");
- return NF_DROP;
+ return NULL;
}
if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) {
- hp = skb_header_pointer(skb, thoff,
- sizeof(_hdr), &_hdr);
+ struct udphdr _hdr, *hp;
+
+ hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
if (hp == NULL)
- return false;
+ return NULL;
saddr = &iph->saddr;
sport = hp->source;
@@ -344,17 +357,27 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
dport = hp->dest;
} else if (tproto == IPPROTO_ICMPV6) {
+ struct ipv6hdr ipv6_var;
+
if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
&sport, &dport, &ipv6_var))
- return false;
+ return NULL;
} else {
- return false;
+ return NULL;
}
+ return xt_socket_get_sock_v6(dev_net(skb->dev), tproto, saddr, daddr,
+ sport, dport, indev);
+}
+
+static bool
+socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+ struct sock *sk = skb->sk;
+
if (!sk)
- sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto,
- saddr, daddr, sport, dport,
- par->in);
+ sk = xt_socket_lookup_slow_v6(skb, par->in);
if (sk) {
bool wildcard;
bool transparent = true;
@@ -363,16 +386,14 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
* unless XT_SOCKET_NOWILDCARD is set
*/
wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
- sk->sk_state != TCP_TIME_WAIT &&
+ sk_fullsock(sk) &&
ipv6_addr_any(&sk->sk_v6_rcv_saddr));
/* Ignore non-transparent sockets,
- if XT_SOCKET_TRANSPARENT is used */
+ * if XT_SOCKET_TRANSPARENT is used
+ */
if (info->flags & XT_SOCKET_TRANSPARENT)
- transparent = ((sk->sk_state != TCP_TIME_WAIT &&
- inet_sk(sk)->transparent) ||
- (sk->sk_state == TCP_TIME_WAIT &&
- inet_twsk(sk)->tw_transparent));
+ transparent = xt_socket_sk_is_transparent(sk);
if (sk != skb->sk)
sock_gen_put(sk);
@@ -381,13 +402,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
sk = NULL;
}
- pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
- "(orig %pI6:%hu) sock %p\n",
- tproto, saddr, ntohs(sport),
- daddr, ntohs(dport),
- &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
- return (sk != NULL);
+ return sk != NULL;
}
#endif
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index 5699adb97652..0bc3460319c8 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -26,13 +26,12 @@ static bool
string_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_string_info *conf = par->matchinfo;
- struct ts_state state;
bool invert;
invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT;
return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
- conf->to_offset, conf->config, &state)
+ conf->to_offset, conf->config)
!= UINT_MAX) ^ invert;
}
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 70440748fe5c..13f777f20995 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -293,15 +293,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return -ENOMEM;
addr_struct.s_addr = iter4->addr;
- ret_val = nla_put(skb, NLBL_MGMT_A_IPV4ADDR,
- sizeof(struct in_addr),
- &addr_struct);
+ ret_val = nla_put_in_addr(skb, NLBL_MGMT_A_IPV4ADDR,
+ addr_struct.s_addr);
if (ret_val != 0)
return ret_val;
addr_struct.s_addr = iter4->mask;
- ret_val = nla_put(skb, NLBL_MGMT_A_IPV4MASK,
- sizeof(struct in_addr),
- &addr_struct);
+ ret_val = nla_put_in_addr(skb, NLBL_MGMT_A_IPV4MASK,
+ addr_struct.s_addr);
if (ret_val != 0)
return ret_val;
map4 = netlbl_domhsh_addr4_entry(iter4);
@@ -328,14 +326,12 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
if (nla_b == NULL)
return -ENOMEM;
- ret_val = nla_put(skb, NLBL_MGMT_A_IPV6ADDR,
- sizeof(struct in6_addr),
- &iter6->addr);
+ ret_val = nla_put_in6_addr(skb, NLBL_MGMT_A_IPV6ADDR,
+ &iter6->addr);
if (ret_val != 0)
return ret_val;
- ret_val = nla_put(skb, NLBL_MGMT_A_IPV6MASK,
- sizeof(struct in6_addr),
- &iter6->mask);
+ ret_val = nla_put_in6_addr(skb, NLBL_MGMT_A_IPV6MASK,
+ &iter6->mask);
if (ret_val != 0)
return ret_val;
map6 = netlbl_domhsh_addr6_entry(iter6);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index aec7994f78cf..b0380927f05f 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1117,34 +1117,30 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
struct in_addr addr_struct;
addr_struct.s_addr = addr4->list.addr;
- ret_val = nla_put(cb_arg->skb,
- NLBL_UNLABEL_A_IPV4ADDR,
- sizeof(struct in_addr),
- &addr_struct);
+ ret_val = nla_put_in_addr(cb_arg->skb,
+ NLBL_UNLABEL_A_IPV4ADDR,
+ addr_struct.s_addr);
if (ret_val != 0)
goto list_cb_failure;
addr_struct.s_addr = addr4->list.mask;
- ret_val = nla_put(cb_arg->skb,
- NLBL_UNLABEL_A_IPV4MASK,
- sizeof(struct in_addr),
- &addr_struct);
+ ret_val = nla_put_in_addr(cb_arg->skb,
+ NLBL_UNLABEL_A_IPV4MASK,
+ addr_struct.s_addr);
if (ret_val != 0)
goto list_cb_failure;
secid = addr4->secid;
} else {
- ret_val = nla_put(cb_arg->skb,
- NLBL_UNLABEL_A_IPV6ADDR,
- sizeof(struct in6_addr),
- &addr6->list.addr);
+ ret_val = nla_put_in6_addr(cb_arg->skb,
+ NLBL_UNLABEL_A_IPV6ADDR,
+ &addr6->list.addr);
if (ret_val != 0)
goto list_cb_failure;
- ret_val = nla_put(cb_arg->skb,
- NLBL_UNLABEL_A_IPV6MASK,
- sizeof(struct in6_addr),
- &addr6->list.mask);
+ ret_val = nla_put_in6_addr(cb_arg->skb,
+ NLBL_UNLABEL_A_IPV6MASK,
+ &addr6->list.mask);
if (ret_val != 0)
goto list_cb_failure;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 05919bf3f670..daa0b818174b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -116,6 +116,8 @@ static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static DEFINE_SPINLOCK(netlink_tap_lock);
static struct list_head netlink_tap_all __read_mostly;
+static const struct rhashtable_params netlink_rhashtable_params;
+
static inline u32 netlink_group_mask(u32 group)
{
return group ? 1 << (group - 1) : 0;
@@ -970,41 +972,50 @@ netlink_unlock_table(void)
struct netlink_compare_arg
{
- struct net *net;
+ possible_net_t pnet;
u32 portid;
};
-static bool netlink_compare(void *ptr, void *arg)
+/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
+#define netlink_compare_arg_len \
+ (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
+
+static inline int netlink_compare(struct rhashtable_compare_arg *arg,
+ const void *ptr)
{
- struct netlink_compare_arg *x = arg;
- struct sock *sk = ptr;
+ const struct netlink_compare_arg *x = arg->key;
+ const struct netlink_sock *nlk = ptr;
+
+ return nlk->portid != x->portid ||
+ !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
+}
- return nlk_sk(sk)->portid == x->portid &&
- net_eq(sock_net(sk), x->net);
+static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
+ struct net *net, u32 portid)
+{
+ memset(arg, 0, sizeof(*arg));
+ write_pnet(&arg->pnet, net);
+ arg->portid = portid;
}
static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
struct net *net)
{
- struct netlink_compare_arg arg = {
- .net = net,
- .portid = portid,
- };
+ struct netlink_compare_arg arg;
- return rhashtable_lookup_compare(&table->hash, &portid,
- &netlink_compare, &arg);
+ netlink_compare_arg_init(&arg, net, portid);
+ return rhashtable_lookup_fast(&table->hash, &arg,
+ netlink_rhashtable_params);
}
-static bool __netlink_insert(struct netlink_table *table, struct sock *sk)
+static int __netlink_insert(struct netlink_table *table, struct sock *sk)
{
- struct netlink_compare_arg arg = {
- .net = sock_net(sk),
- .portid = nlk_sk(sk)->portid,
- };
+ struct netlink_compare_arg arg;
- return rhashtable_lookup_compare_insert(&table->hash,
- &nlk_sk(sk)->node,
- &netlink_compare, &arg);
+ netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
+ return rhashtable_lookup_insert_key(&table->hash, &arg,
+ &nlk_sk(sk)->node,
+ netlink_rhashtable_params);
}
static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
@@ -1066,9 +1077,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
nlk_sk(sk)->portid = portid;
sock_hold(sk);
- err = 0;
- if (!__netlink_insert(table, sk)) {
- err = -EADDRINUSE;
+ err = __netlink_insert(table, sk);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
sock_put(sk);
}
@@ -1082,7 +1094,8 @@ static void netlink_remove(struct sock *sk)
struct netlink_table *table;
table = &nl_table[sk->sk_protocol];
- if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
+ if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
+ netlink_rhashtable_params)) {
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
@@ -1616,13 +1629,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
if (data == NULL)
return NULL;
- skb = build_skb(data, size);
+ skb = __build_skb(data, size);
if (skb == NULL)
vfree(data);
- else {
- skb->head_frag = 0;
+ else
skb->destructor = netlink_skb_destructor;
- }
return skb;
}
@@ -2256,8 +2267,7 @@ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
}
-static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
@@ -2346,8 +2356,7 @@ out:
return err;
}
-static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len,
+static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct scm_cookie scm;
@@ -3116,17 +3125,27 @@ static struct pernet_operations __net_initdata netlink_net_ops = {
.exit = netlink_net_exit,
};
+static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
+{
+ const struct netlink_sock *nlk = data;
+ struct netlink_compare_arg arg;
+
+ netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
+ return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
+}
+
+static const struct rhashtable_params netlink_rhashtable_params = {
+ .head_offset = offsetof(struct netlink_sock, node),
+ .key_len = netlink_compare_arg_len,
+ .obj_hashfn = netlink_hash,
+ .obj_cmpfn = netlink_compare,
+ .automatic_shrinking = true,
+};
+
static int __init netlink_proto_init(void)
{
int i;
int err = proto_register(&netlink_proto, 0);
- struct rhashtable_params ht_params = {
- .head_offset = offsetof(struct netlink_sock, node),
- .key_offset = offsetof(struct netlink_sock, portid),
- .key_len = sizeof(u32), /* portid */
- .hashfn = jhash,
- .max_shift = 16, /* 64K */
- };
if (err != 0)
goto out;
@@ -3138,7 +3157,8 @@ static int __init netlink_proto_init(void)
goto panic;
for (i = 0; i < MAX_LINKS; i++) {
- if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
+ if (rhashtable_init(&nl_table[i].hash,
+ &netlink_rhashtable_params) < 0) {
while (--i > 0)
rhashtable_destroy(&nl_table[i].hash);
kfree(nl_table);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 69f1d5e9959f..b987fd56c3c5 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1023,8 +1023,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
return 1;
}
-static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int nr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
@@ -1133,8 +1132,8 @@ out:
return err;
}
-static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int nr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_ax25 *, sax, msg->msg_name);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 6ae063cebf7d..988f542481a8 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -65,36 +65,6 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
return 1;
}
-#ifdef CONFIG_INET
-
-static int nr_rebuild_header(struct sk_buff *skb)
-{
- unsigned char *bp = skb->data;
-
- if (arp_find(bp + 7, skb))
- return 1;
-
- bp[6] &= ~AX25_CBIT;
- bp[6] &= ~AX25_EBIT;
- bp[6] |= AX25_SSSID_SPARE;
- bp += AX25_ADDR_LEN;
-
- bp[6] &= ~AX25_CBIT;
- bp[6] |= AX25_EBIT;
- bp[6] |= AX25_SSSID_SPARE;
-
- return 0;
-}
-
-#else
-
-static int nr_rebuild_header(struct sk_buff *skb)
-{
- return 1;
-}
-
-#endif
-
static int nr_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
@@ -188,7 +158,6 @@ static netdev_tx_t nr_xmit(struct sk_buff *skb, struct net_device *dev)
static const struct header_ops nr_header_ops = {
.create = nr_header,
- .rebuild= nr_rebuild_header,
};
static const struct net_device_ops nr_netdev_ops = {
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index e181e290427c..9578bd6a4f3e 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -750,8 +750,8 @@ error:
return ret;
}
-static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -793,8 +793,8 @@ static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
return nfc_llcp_send_i_frame(llcp_sock, msg, len);
}
-static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 9575a1892607..49ff32106080 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -907,6 +907,16 @@ static int nci_se_io(struct nfc_dev *nfc_dev, u32 se_idx,
return 0;
}
+static int nci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ if (!ndev->ops->fw_download)
+ return -ENOTSUPP;
+
+ return ndev->ops->fw_download(ndev, firmware_name);
+}
+
static struct nfc_ops nci_nfc_ops = {
.dev_up = nci_dev_up,
.dev_down = nci_dev_down,
@@ -922,6 +932,7 @@ static struct nfc_ops nci_nfc_ops = {
.disable_se = nci_disable_se,
.discover_se = nci_discover_se,
.se_io = nci_se_io,
+ .fw_download = nci_fw_download,
};
/* ---- Interface to NCI drivers ---- */
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 14a2d11581da..3763036710ae 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1584,7 +1584,7 @@ static const struct genl_ops nfc_genl_ops[] = {
struct urelease_work {
struct work_struct w;
- int portid;
+ u32 portid;
};
static void nfc_urelease_event_work(struct work_struct *work)
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 373e138c0ab6..82b4e8024778 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -211,8 +211,7 @@ static void rawsock_tx_work(struct work_struct *work)
}
}
-static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct nfc_dev *dev = nfc_rawsock(sk)->dev;
@@ -248,8 +247,8 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
return len;
}
-static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index b7d818c59423..ed6b0f8dd1bb 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -6,6 +6,7 @@ config OPENVSWITCH
tristate "Open vSwitch"
depends on INET
select LIBCRC32C
+ select MPLS
select NET_MPLS_GSO
---help---
Open vSwitch is a multilayer Ethernet switch targeted at virtualized
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 5bae7243c577..096c6276e6b9 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -203,7 +203,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
ovs_flow_tbl_destroy(&dp->table);
free_percpu(dp->stats_percpu);
- release_net(ovs_dp_get_net(dp));
kfree(dp->ports);
kfree(dp);
}
@@ -1501,7 +1500,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (dp == NULL)
goto err_free_reply;
- ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
+ ovs_dp_set_net(dp, sock_net(skb->sk));
/* Allocate table. */
err = ovs_flow_tbl_init(&dp->table);
@@ -1575,7 +1574,6 @@ err_destroy_percpu:
err_destroy_table:
ovs_flow_tbl_destroy(&dp->table);
err_free_dp:
- release_net(ovs_dp_get_net(dp));
kfree(dp);
err_free_reply:
kfree_skb(reply);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 3ece94563079..4ec4a480b147 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -84,10 +84,8 @@ struct datapath {
/* Stats. */
struct dp_stats_percpu __percpu *stats_percpu;
-#ifdef CONFIG_NET_NS
/* Network namespace ref. */
- struct net *net;
-#endif
+ possible_net_t net;
u32 user_features;
};
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 50ec42f170a0..2dacc7b5af23 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -100,7 +100,9 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
new_stats =
kmem_cache_alloc_node(flow_stats_cache,
- GFP_THISNODE |
+ GFP_NOWAIT |
+ __GFP_THISNODE |
+ __GFP_NOWARN |
__GFP_NOMEMALLOC,
node);
if (likely(new_stats)) {
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 22b18c145c92..c691b1a1eee0 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -535,11 +535,11 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
- nla_get_be32(a), is_mask);
+ nla_get_in_addr(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
- nla_get_be32(a), is_mask);
+ nla_get_in_addr(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TOS:
SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
@@ -648,10 +648,12 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
return -EMSGSIZE;
if (output->ipv4_src &&
- nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
+ nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
+ output->ipv4_src))
return -EMSGSIZE;
if (output->ipv4_dst &&
- nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
+ nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
+ output->ipv4_dst))
return -EMSGSIZE;
if (output->ipv4_tos &&
nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 3277a7520e31..6d39766e7828 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -222,7 +222,8 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct net *net = ovs_dp_get_net(vport->dp);
struct vxlan_port *vxlan_port = vxlan_vport(vport);
- __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+ struct sock *sk = vxlan_port->vs->sock->sk;
+ __be16 dst_port = inet_sk(sk)->inet_sport;
const struct ovs_key_ipv4_tunnel *tun_key;
struct vxlan_metadata md = {0};
struct rtable *rt;
@@ -255,7 +256,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
vxflags = vxlan_port->exts |
(tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
- err = vxlan_xmit_skb(rt, skb, fl.saddr, tun_key->ipv4_dst,
+ err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst,
tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
src_port, dst_port,
&md, false, vxflags);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f8db7064d81c..b5989c6ee551 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -216,10 +216,16 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
static void packet_flush_mclist(struct sock *sk);
struct packet_skb_cb {
- unsigned int origlen;
union {
struct sockaddr_pkt pkt;
- struct sockaddr_ll ll;
+ union {
+ /* Trick: alias skb original length with
+ * ll.sll_family and ll.protocol in order
+ * to save room.
+ */
+ unsigned int origlen;
+ struct sockaddr_ll ll;
+ };
} sa;
};
@@ -1608,8 +1614,8 @@ oom:
* protocol layers and you must therefore supply it with a complete frame
*/
-static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
@@ -1818,13 +1824,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
skb = nskb;
}
- BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
- sizeof(skb->cb));
+ sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
sll = &PACKET_SKB_CB(skb)->sa.ll;
- sll->sll_family = AF_PACKET;
sll->sll_hatype = dev->type;
- sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(po->origdev))
sll->sll_ifindex = orig_dev->ifindex;
@@ -1833,7 +1836,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
- PACKET_SKB_CB(skb)->origlen = skb->len;
+ /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
+ * Use their space for storing the original skb length.
+ */
+ PACKET_SKB_CB(skb)->sa.origlen = skb->len;
if (pskb_trim(skb, snaplen))
goto drop_n_acct;
@@ -1847,7 +1853,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
spin_lock(&sk->sk_receive_queue.lock);
po->stats.stats1.tp_packets++;
- skb->dropcount = atomic_read(&sk->sk_drops);
+ sock_skb_set_dropcount(sk, skb);
__skb_queue_tail(&sk->sk_receive_queue, skb);
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk);
@@ -1910,14 +1916,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
}
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- status |= TP_STATUS_CSUMNOTREADY;
-
snaplen = skb->len;
res = run_filter(skb, sk, snaplen);
if (!res)
goto drop_n_restore;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ status |= TP_STATUS_CSUMNOTREADY;
+ else if (skb->pkt_type != PACKET_OUTGOING &&
+ (skb->ip_summed == CHECKSUM_COMPLETE ||
+ skb_csum_unnecessary(skb)))
+ status |= TP_STATUS_CSUM_VALID;
+
if (snaplen > res)
snaplen = res;
@@ -2300,11 +2311,14 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(&po->sk,
hlen + tlen + sizeof(struct sockaddr_ll),
- 0, &err);
+ !need_wait, &err);
- if (unlikely(skb == NULL))
+ if (unlikely(skb == NULL)) {
+ /* we assume the socket was initially writeable ... */
+ if (likely(len_sum > 0))
+ err = len_sum;
goto out_status;
-
+ }
tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
addr, hlen);
if (tp_len > dev->mtu + dev->hard_header_len) {
@@ -2603,8 +2617,7 @@ out:
return err;
}
-static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
@@ -2884,13 +2897,14 @@ out:
* If necessary we block.
*/
-static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
int vnet_hdr_len = 0;
+ unsigned int origlen = 0;
err = -EINVAL;
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
@@ -2990,6 +3004,15 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
if (err)
goto out_free;
+ if (sock->type != SOCK_PACKET) {
+ struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
+
+ /* Original length was stored in sockaddr_ll fields */
+ origlen = PACKET_SKB_CB(skb)->sa.origlen;
+ sll->sll_family = AF_PACKET;
+ sll->sll_protocol = skb->protocol;
+ }
+
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) {
@@ -3001,6 +3024,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
msg->msg_namelen = sizeof(struct sockaddr_pkt);
} else {
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
+
msg->msg_namelen = sll->sll_halen +
offsetof(struct sockaddr_ll, sll_addr);
}
@@ -3014,7 +3038,12 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
aux.tp_status = TP_STATUS_USER;
if (skb->ip_summed == CHECKSUM_PARTIAL)
aux.tp_status |= TP_STATUS_CSUMNOTREADY;
- aux.tp_len = PACKET_SKB_CB(skb)->origlen;
+ else if (skb->pkt_type != PACKET_OUTGOING &&
+ (skb->ip_summed == CHECKSUM_COMPLETE ||
+ skb_csum_unnecessary(skb)))
+ aux.tp_status |= TP_STATUS_CSUM_VALID;
+
+ aux.tp_len = origlen;
aux.tp_snaplen = skb->len;
aux.tp_mac = 0;
aux.tp_net = skb_network_offset(skb);
diff --git a/net/packet/internal.h b/net/packet/internal.h
index cdddf6a30399..fe6e20caea1d 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -74,9 +74,7 @@ extern struct mutex fanout_mutex;
#define PACKET_FANOUT_MAX 256
struct packet_fanout {
-#ifdef CONFIG_NET_NS
- struct net *net;
-#endif
+ possible_net_t net;
unsigned int num_members;
u16 id;
u8 type;
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 26054b4b467c..5e710435ffa9 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -83,8 +83,7 @@ static int pn_init(struct sock *sk)
return 0;
}
-static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len)
+static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
DECLARE_SOCKADDR(struct sockaddr_pn *, target, msg->msg_name);
struct sk_buff *skb;
@@ -125,9 +124,8 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
return (err >= 0) ? len : err;
}
-static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
+static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int noblock, int flags, int *addr_len)
{
struct sk_buff *skb = NULL;
struct sockaddr_pn sa;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 5d3f2b7507d4..6de2aeb98a1f 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -1118,8 +1118,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
}
-static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len)
+static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct pep_sock *pn = pep_sk(sk);
struct sk_buff *skb;
@@ -1246,9 +1245,8 @@ struct sk_buff *pep_read(struct sock *sk)
return skb;
}
-static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
+static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int noblock, int flags, int *addr_len)
{
struct sk_buff *skb;
int err;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 008214a3d5eb..d575ef4e9aa6 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -425,15 +425,15 @@ out:
return err;
}
-static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
+ size_t total_len)
{
struct sock *sk = sock->sk;
if (pn_socket_autobind(sock))
return -EAGAIN;
- return sk->sk_prot->sendmsg(iocb, sk, m, total_len);
+ return sk->sk_prot->sendmsg(sk, m, total_len);
}
const struct proto_ops phonet_dgram_ops = {
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 378c3a6acf84..da6da57e5f36 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -126,11 +126,14 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
struct rds_transport *loop_trans;
unsigned long flags;
int ret;
+ struct rds_transport *otrans = trans;
+ if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
+ goto new_conn;
rcu_read_lock();
conn = rds_conn_lookup(head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
- !is_outgoing) {
+ laddr == faddr && !is_outgoing) {
/* This is a looped back IB connection, and we're
* called by the code handling the incoming connect.
* We need a second connection object into which we
@@ -142,6 +145,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
if (conn)
goto out;
+new_conn:
conn = kmem_cache_zalloc(rds_conn_slab, gfp);
if (!conn) {
conn = ERR_PTR(-ENOMEM);
@@ -193,6 +197,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
}
atomic_set(&conn->c_state, RDS_CONN_DOWN);
+ conn->c_send_gen = 0;
conn->c_reconnect_jiffies = 0;
INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
@@ -229,13 +234,22 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
/* Creating normal conn */
struct rds_connection *found;
- found = rds_conn_lookup(head, laddr, faddr, trans);
+ if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
+ found = NULL;
+ else
+ found = rds_conn_lookup(head, laddr, faddr, trans);
if (found) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
conn = found;
} else {
- hlist_add_head_rcu(&conn->c_hash_node, head);
+ if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) ||
+ (otrans->t_type != RDS_TRANS_TCP)) {
+ /* Only the active side should be added to
+ * reconnect list for TCP.
+ */
+ hlist_add_head_rcu(&conn->c_hash_node, head);
+ }
rds_cong_add_conn(conn);
rds_conn_count++;
}
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 31b74f5e61ad..8a09ee7db3c1 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -183,8 +183,17 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
/* If the peer gave us the last packet it saw, process this as if
* we had received a regular ACK. */
- if (dp && dp->dp_ack_seq)
- rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
+ if (dp) {
+ /* dp structure start is not guaranteed to be 8 bytes aligned.
+ * Since dp_ack_seq is 64-bit extended load operations can be
+ * used so go through get_unaligned to avoid unaligned errors.
+ */
+ __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
+
+ if (dp_ack_seq)
+ rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
+ NULL);
+ }
rds_connect_complete(conn);
}
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c2a5eef41343..0d41155a2258 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -110,6 +110,7 @@ struct rds_connection {
void *c_transport_data;
atomic_t c_state;
+ unsigned long c_send_gen;
unsigned long c_flags;
unsigned long c_reconnect_jiffies;
struct delayed_work c_send_w;
@@ -702,8 +703,8 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
void rds_inc_put(struct rds_incoming *inc);
void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
struct rds_incoming *inc, gfp_t gfp);
-int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int msg_flags);
+int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int msg_flags);
void rds_clear_recv_queue(struct rds_sock *rs);
int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
void rds_inc_info_copy(struct rds_incoming *inc,
@@ -711,8 +712,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
__be32 saddr, __be32 daddr, int flip);
/* send.c */
-int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t payload_len);
+int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
void rds_send_reset(struct rds_connection *conn);
int rds_send_xmit(struct rds_connection *conn);
struct sockaddr_in;
diff --git a/net/rds/recv.c b/net/rds/recv.c
index f9ec1acd801c..a00462b0d01d 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -395,8 +395,8 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg)
return 0;
}
-int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int msg_flags)
+int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int msg_flags)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
diff --git a/net/rds/send.c b/net/rds/send.c
index 42f65d4305c8..e9430f537f9c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -140,8 +140,11 @@ int rds_send_xmit(struct rds_connection *conn)
struct scatterlist *sg;
int ret = 0;
LIST_HEAD(to_be_dropped);
+ int batch_count;
+ unsigned long send_gen = 0;
restart:
+ batch_count = 0;
/*
* sendmsg calls here after having queued its message on the send
@@ -157,6 +160,17 @@ restart:
}
/*
+ * we record the send generation after doing the xmit acquire.
+ * if someone else manages to jump in and do some work, we'll use
+ * this to avoid a goto restart farther down.
+ *
+ * The acquire_in_xmit() check above ensures that only one
+ * caller can increment c_send_gen at any time.
+ */
+ conn->c_send_gen++;
+ send_gen = conn->c_send_gen;
+
+ /*
* rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
* we do the opposite to avoid races.
*/
@@ -202,6 +216,16 @@ restart:
if (!rm) {
unsigned int len;
+ batch_count++;
+
+ /* we want to process as big a batch as we can, but
+ * we also want to avoid softlockups. If we've been
+ * through a lot of messages, lets back off and see
+ * if anyone else jumps in
+ */
+ if (batch_count >= 1024)
+ goto over_batch;
+
spin_lock_irqsave(&conn->c_lock, flags);
if (!list_empty(&conn->c_send_queue)) {
@@ -357,9 +381,9 @@ restart:
}
}
+over_batch:
if (conn->c_trans->xmit_complete)
conn->c_trans->xmit_complete(conn);
-
release_in_xmit(conn);
/* Nuke any messages we decided not to retransmit. */
@@ -380,10 +404,15 @@ restart:
* If the transport cannot continue (i.e ret != 0), then it must
* call us when more room is available, such as from the tx
* completion handler.
+ *
+ * We have an extra generation check here so that if someone manages
+ * to jump in after our release_in_xmit, we'll see that they have done
+ * some work and we will skip our goto
*/
if (ret == 0) {
smp_mb();
- if (!list_empty(&conn->c_send_queue)) {
+ if (!list_empty(&conn->c_send_queue) &&
+ send_gen == conn->c_send_gen) {
rds_stats_inc(s_send_lock_queue_raced);
goto restart;
}
@@ -920,8 +949,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
return ret;
}
-int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t payload_len)
+int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index f9f564a6c960..973109c7b8e8 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -62,6 +62,7 @@ void rds_tcp_state_change(struct sock *sk)
case TCP_ESTABLISHED:
rds_connect_complete(conn);
break;
+ case TCP_CLOSE_WAIT:
case TCP_CLOSE:
rds_conn_drop(conn);
default:
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 23ab4dcd1d9f..0da49e34495f 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -45,12 +45,45 @@ static void rds_tcp_accept_worker(struct work_struct *work);
static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
static struct socket *rds_tcp_listen_sock;
+static int rds_tcp_keepalive(struct socket *sock)
+{
+ /* values below based on xs_udp_default_timeout */
+ int keepidle = 5; /* send a probe 'keepidle' secs after last data */
+ int keepcnt = 5; /* number of unack'ed probes before declaring dead */
+ int keepalive = 1;
+ int ret = 0;
+
+ ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
+ (char *)&keepalive, sizeof(keepalive));
+ if (ret < 0)
+ goto bail;
+
+ ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT,
+ (char *)&keepcnt, sizeof(keepcnt));
+ if (ret < 0)
+ goto bail;
+
+ ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE,
+ (char *)&keepidle, sizeof(keepidle));
+ if (ret < 0)
+ goto bail;
+
+ /* KEEPINTVL is the interval between successive probes. We follow
+ * the model in xs_tcp_finish_connecting() and re-use keepidle.
+ */
+ ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL,
+ (char *)&keepidle, sizeof(keepidle));
+bail:
+ return ret;
+}
+
static int rds_tcp_accept_one(struct socket *sock)
{
struct socket *new_sock = NULL;
struct rds_connection *conn;
int ret;
struct inet_sock *inet;
+ struct rds_tcp_connection *rs_tcp;
ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
sock->sk->sk_protocol, &new_sock);
@@ -63,6 +96,10 @@ static int rds_tcp_accept_one(struct socket *sock)
if (ret < 0)
goto out;
+ ret = rds_tcp_keepalive(new_sock);
+ if (ret < 0)
+ goto out;
+
rds_tcp_tune(new_sock);
inet = inet_sk(new_sock->sk);
@@ -77,6 +114,15 @@ static int rds_tcp_accept_one(struct socket *sock)
ret = PTR_ERR(conn);
goto out;
}
+ /* An incoming SYN request came in, and TCP just accepted it.
+ * We always create a new conn for listen side of TCP, and do not
+ * add it to the c_hash_list.
+ *
+ * If the client reboots, this conn will need to be cleaned up.
+ * rds_tcp_state_change() will do that cleanup
+ */
+ rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
+ WARN_ON(!rs_tcp || rs_tcp->t_sock);
/*
* see the comment above rds_queue_delayed_reconnect()
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 43bac7c4dd9e..8ae603069a1a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1046,8 +1046,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
return 1;
}
-static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
@@ -1211,8 +1210,8 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
}
-static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 50005888be57..369ca81a8c5d 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -41,6 +41,9 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev,
{
unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2);
+ if (daddr)
+ memcpy(buff + 7, daddr, dev->addr_len);
+
*buff++ = ROSE_GFI | ROSE_Q_BIT;
*buff++ = 0x00;
*buff++ = ROSE_DATA;
@@ -53,43 +56,6 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev,
return -37;
}
-static int rose_rebuild_header(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
- struct net_device *dev = skb->dev;
- struct net_device_stats *stats = &dev->stats;
- unsigned char *bp = (unsigned char *)skb->data;
- struct sk_buff *skbn;
- unsigned int len;
-
- if (arp_find(bp + 7, skb)) {
- return 1;
- }
-
- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
- kfree_skb(skb);
- return 1;
- }
-
- if (skb->sk != NULL)
- skb_set_owner_w(skbn, skb->sk);
-
- kfree_skb(skb);
-
- len = skbn->len;
-
- if (!rose_route_frame(skbn, NULL)) {
- kfree_skb(skbn);
- stats->tx_errors++;
- return 1;
- }
-
- stats->tx_packets++;
- stats->tx_bytes += len;
-#endif
- return 1;
-}
-
static int rose_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
@@ -134,19 +100,26 @@ static int rose_close(struct net_device *dev)
static netdev_tx_t rose_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
+ unsigned int len = skb->len;
if (!netif_running(dev)) {
printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n");
return NETDEV_TX_BUSY;
}
- dev_kfree_skb(skb);
- stats->tx_errors++;
+
+ if (!rose_route_frame(skb, NULL)) {
+ dev_kfree_skb(skb);
+ stats->tx_errors++;
+ return NETDEV_TX_OK;
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += len;
return NETDEV_TX_OK;
}
static const struct header_ops rose_header_ops = {
.create = rose_header,
- .rebuild = rose_rebuild_header,
};
static const struct net_device_ops rose_netdev_ops = {
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 7b1670489638..0095b9a0b779 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -441,8 +441,7 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
* - sends a call data packet
* - may send an abort (abort code in control data)
*/
-static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t len)
+static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
{
struct rxrpc_transport *trans;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
@@ -482,7 +481,7 @@ static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
switch (rx->sk.sk_state) {
case RXRPC_SERVER_LISTENING:
if (!m->msg_name) {
- ret = rxrpc_server_sendmsg(iocb, rx, m, len);
+ ret = rxrpc_server_sendmsg(rx, m, len);
break;
}
case RXRPC_SERVER_BOUND:
@@ -492,7 +491,7 @@ static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
break;
}
case RXRPC_CLIENT_CONNECTED:
- ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
+ ret = rxrpc_client_sendmsg(rx, trans, m, len);
break;
default:
ret = -ENOTCONN;
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 481f89f93789..4505a691d88c 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -28,7 +28,7 @@
const char *rxrpc_pkts[] = {
"?00",
"DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
- "?09", "?10", "?11", "?12", "?13", "?14", "?15"
+ "?09", "?10", "?11", "?12", "VERSION", "?14", "?15"
};
/*
@@ -593,6 +593,20 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
rxrpc_queue_conn(conn);
}
+/*
+ * post endpoint-level events to the local endpoint
+ * - this includes debug and version messages
+ */
+static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
+ struct sk_buff *skb)
+{
+ _enter("%p,%p", local, skb);
+
+ atomic_inc(&local->usage);
+ skb_queue_tail(&local->event_queue, skb);
+ rxrpc_queue_work(&local->event_processor);
+}
+
static struct rxrpc_connection *rxrpc_conn_from_local(struct rxrpc_local *local,
struct sk_buff *skb,
struct rxrpc_skb_priv *sp)
@@ -699,6 +713,11 @@ void rxrpc_data_ready(struct sock *sk)
goto bad_message;
}
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_VERSION) {
+ rxrpc_post_packet_to_local(local, skb);
+ goto out;
+ }
+
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
(sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
goto bad_message;
@@ -731,6 +750,8 @@ void rxrpc_data_ready(struct sock *sk)
else
goto cant_route_call;
}
+
+out:
rxrpc_put_local(local);
return;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index ba9fd36d3f15..aef1bd294e17 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -152,11 +152,13 @@ struct rxrpc_local {
struct work_struct destroyer; /* endpoint destroyer */
struct work_struct acceptor; /* incoming call processor */
struct work_struct rejecter; /* packet reject writer */
+ struct work_struct event_processor; /* endpoint event processor */
struct list_head services; /* services listening on this endpoint */
struct list_head link; /* link in endpoint list */
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
struct sk_buff_head reject_queue; /* packets awaiting rejection */
+ struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
spinlock_t lock; /* access lock */
rwlock_t services_lock; /* lock for services list */
atomic_t usage;
@@ -548,10 +550,9 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
extern unsigned rxrpc_resend_timeout;
int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
-int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
- struct rxrpc_transport *, struct msghdr *, size_t);
-int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, struct msghdr *,
- size_t);
+int rxrpc_client_sendmsg(struct rxrpc_sock *, struct rxrpc_transport *,
+ struct msghdr *, size_t);
+int rxrpc_server_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
/*
* ar-peer.c
@@ -572,8 +573,7 @@ extern const struct file_operations rxrpc_connection_seq_fops;
* ar-recvmsg.c
*/
void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
-int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
- int);
+int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
/*
* ar-security.c
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index 87f7135d238b..ca904ed5400a 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -13,16 +13,22 @@
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
+#include <generated/utsrelease.h>
#include "ar-internal.h"
+static const char rxrpc_version_string[65] = "linux-" UTS_RELEASE " AF_RXRPC";
+
static LIST_HEAD(rxrpc_locals);
DEFINE_RWLOCK(rxrpc_local_lock);
static DECLARE_RWSEM(rxrpc_local_sem);
static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq);
static void rxrpc_destroy_local(struct work_struct *work);
+static void rxrpc_process_local_events(struct work_struct *work);
/*
* allocate a new local
@@ -37,11 +43,13 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
INIT_WORK(&local->destroyer, &rxrpc_destroy_local);
INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls);
INIT_WORK(&local->rejecter, &rxrpc_reject_packets);
+ INIT_WORK(&local->event_processor, &rxrpc_process_local_events);
INIT_LIST_HEAD(&local->services);
INIT_LIST_HEAD(&local->link);
init_rwsem(&local->defrag_sem);
skb_queue_head_init(&local->accept_queue);
skb_queue_head_init(&local->reject_queue);
+ skb_queue_head_init(&local->event_queue);
spin_lock_init(&local->lock);
rwlock_init(&local->services_lock);
atomic_set(&local->usage, 1);
@@ -264,10 +272,12 @@ static void rxrpc_destroy_local(struct work_struct *work)
ASSERT(list_empty(&local->services));
ASSERT(!work_pending(&local->acceptor));
ASSERT(!work_pending(&local->rejecter));
+ ASSERT(!work_pending(&local->event_processor));
/* finish cleaning up the local descriptor */
rxrpc_purge_queue(&local->accept_queue);
rxrpc_purge_queue(&local->reject_queue);
+ rxrpc_purge_queue(&local->event_queue);
kernel_sock_shutdown(local->socket, SHUT_RDWR);
sock_release(local->socket);
@@ -308,3 +318,91 @@ void __exit rxrpc_destroy_all_locals(void)
_leave("");
}
+
+/*
+ * Reply to a version request
+ */
+static void rxrpc_send_version_request(struct rxrpc_local *local,
+ struct rxrpc_header *hdr,
+ struct sk_buff *skb)
+{
+ struct sockaddr_in sin;
+ struct msghdr msg;
+ struct kvec iov[2];
+ size_t len;
+ int ret;
+
+ _enter("");
+
+ sin.sin_family = AF_INET;
+ sin.sin_port = udp_hdr(skb)->source;
+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+
+ msg.msg_name = &sin;
+ msg.msg_namelen = sizeof(sin);
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ hdr->seq = 0;
+ hdr->serial = 0;
+ hdr->type = RXRPC_PACKET_TYPE_VERSION;
+ hdr->flags = RXRPC_LAST_PACKET | (~hdr->flags & RXRPC_CLIENT_INITIATED);
+ hdr->userStatus = 0;
+ hdr->_rsvd = 0;
+
+ iov[0].iov_base = hdr;
+ iov[0].iov_len = sizeof(*hdr);
+ iov[1].iov_base = (char *)rxrpc_version_string;
+ iov[1].iov_len = sizeof(rxrpc_version_string);
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+ _proto("Tx VERSION (reply)");
+
+ ret = kernel_sendmsg(local->socket, &msg, iov, 2, len);
+ if (ret < 0)
+ _debug("sendmsg failed: %d", ret);
+
+ _leave("");
+}
+
+/*
+ * Process event packets targetted at a local endpoint.
+ */
+static void rxrpc_process_local_events(struct work_struct *work)
+{
+ struct rxrpc_local *local = container_of(work, struct rxrpc_local, event_processor);
+ struct sk_buff *skb;
+ char v;
+
+ _enter("");
+
+ atomic_inc(&local->usage);
+
+ while ((skb = skb_dequeue(&local->event_queue))) {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ kdebug("{%d},{%u}", local->debug_id, sp->hdr.type);
+
+ switch (sp->hdr.type) {
+ case RXRPC_PACKET_TYPE_VERSION:
+ if (skb_copy_bits(skb, 0, &v, 1) < 0)
+ return;
+ _proto("Rx VERSION { %02x }", v);
+ if (v == 0)
+ rxrpc_send_version_request(local, &sp->hdr, skb);
+ break;
+
+ default:
+ /* Just ignore anything we don't understand */
+ break;
+ }
+
+ rxrpc_put_local(local);
+ rxrpc_free_skb(skb);
+ }
+
+ rxrpc_put_local(local);
+ _leave("");
+}
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 8331c95e1522..c0042807bfc6 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -23,8 +23,7 @@
*/
unsigned rxrpc_resend_timeout = 4 * HZ;
-static int rxrpc_send_data(struct kiocb *iocb,
- struct rxrpc_sock *rx,
+static int rxrpc_send_data(struct rxrpc_sock *rx,
struct rxrpc_call *call,
struct msghdr *msg, size_t len);
@@ -129,9 +128,8 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
* - caller holds the socket locked
* - the socket may be either a client socket or a server socket
*/
-int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
- struct rxrpc_transport *trans, struct msghdr *msg,
- size_t len)
+int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans,
+ struct msghdr *msg, size_t len)
{
struct rxrpc_conn_bundle *bundle;
enum rxrpc_command cmd;
@@ -191,7 +189,7 @@ int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
/* request phase complete for this client call */
ret = -EPROTO;
} else {
- ret = rxrpc_send_data(iocb, rx, call, msg, len);
+ ret = rxrpc_send_data(rx, call, msg, len);
}
rxrpc_put_call(call);
@@ -232,7 +230,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
ret = -EPROTO; /* request phase complete for this client call */
} else {
- ret = rxrpc_send_data(NULL, call->socket, call, msg, len);
+ ret = rxrpc_send_data(call->socket, call, msg, len);
}
release_sock(&call->socket->sk);
@@ -271,8 +269,7 @@ EXPORT_SYMBOL(rxrpc_kernel_abort_call);
* send a message through a server socket
* - caller holds the socket locked
*/
-int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
- struct msghdr *msg, size_t len)
+int rxrpc_server_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
{
enum rxrpc_command cmd;
struct rxrpc_call *call;
@@ -313,7 +310,7 @@ int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
break;
}
- ret = rxrpc_send_data(iocb, rx, call, msg, len);
+ ret = rxrpc_send_data(rx, call, msg, len);
break;
case RXRPC_CMD_SEND_ABORT:
@@ -520,8 +517,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
* - must be called in process context
* - caller holds the socket locked
*/
-static int rxrpc_send_data(struct kiocb *iocb,
- struct rxrpc_sock *rx,
+static int rxrpc_send_data(struct rxrpc_sock *rx,
struct rxrpc_call *call,
struct msghdr *msg, size_t len)
{
@@ -546,11 +542,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
call->tx_pending = NULL;
copied = 0;
- if (len > iov_iter_count(&msg->msg_iter))
- len = iov_iter_count(&msg->msg_iter);
- while (len) {
- int copy;
-
+ do {
if (!skb) {
size_t size, chunk, max, space;
@@ -572,8 +564,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
max &= ~(call->conn->size_align - 1UL);
chunk = max;
- if (chunk > len && !more)
- chunk = len;
+ if (chunk > msg_data_left(msg) && !more)
+ chunk = msg_data_left(msg);
space = chunk + call->conn->size_align;
space &= ~(call->conn->size_align - 1UL);
@@ -616,23 +608,23 @@ static int rxrpc_send_data(struct kiocb *iocb,
sp = rxrpc_skb(skb);
/* append next segment of data to the current buffer */
- copy = skb_tailroom(skb);
- ASSERTCMP(copy, >, 0);
- if (copy > len)
- copy = len;
- if (copy > sp->remain)
- copy = sp->remain;
-
- _debug("add");
- ret = skb_add_data(skb, &msg->msg_iter, copy);
- _debug("added");
- if (ret < 0)
- goto efault;
- sp->remain -= copy;
- skb->mark += copy;
- copied += copy;
-
- len -= copy;
+ if (msg_data_left(msg) > 0) {
+ int copy = skb_tailroom(skb);
+ ASSERTCMP(copy, >, 0);
+ if (copy > msg_data_left(msg))
+ copy = msg_data_left(msg);
+ if (copy > sp->remain)
+ copy = sp->remain;
+
+ _debug("add");
+ ret = skb_add_data(skb, &msg->msg_iter, copy);
+ _debug("added");
+ if (ret < 0)
+ goto efault;
+ sp->remain -= copy;
+ skb->mark += copy;
+ copied += copy;
+ }
/* check for the far side aborting the call or a network error
* occurring */
@@ -640,7 +632,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
goto call_aborted;
/* add the packet to the send queue if it's now full */
- if (sp->remain <= 0 || (!len && !more)) {
+ if (sp->remain <= 0 ||
+ (msg_data_left(msg) == 0 && !more)) {
struct rxrpc_connection *conn = call->conn;
uint32_t seq;
size_t pad;
@@ -670,7 +663,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
sp->hdr.serviceId = conn->service_id;
sp->hdr.flags = conn->out_clientflag;
- if (len == 0 && !more)
+ if (msg_data_left(msg) == 0 && !more)
sp->hdr.flags |= RXRPC_LAST_PACKET;
else if (CIRC_SPACE(call->acks_head, call->acks_tail,
call->acks_winsz) > 1)
@@ -686,10 +679,10 @@ static int rxrpc_send_data(struct kiocb *iocb,
memcpy(skb->head, &sp->hdr,
sizeof(struct rxrpc_header));
- rxrpc_queue_packet(call, skb, !iov_iter_count(&msg->msg_iter) && !more);
+ rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
skb = NULL;
}
- }
+ } while (msg_data_left(msg) > 0);
success:
ret = copied;
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 19a560626dc4..b92beded7459 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -43,8 +43,8 @@ void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call)
* - we need to be careful about two or more threads calling recvmsg
* simultaneously
*/
-int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct rxrpc_skb_priv *sp;
struct rxrpc_call *call = NULL, *continue_call = NULL;
@@ -150,7 +150,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
&call->conn->trans->peer->srx, len);
msg->msg_namelen = len;
}
- sock_recv_ts_and_drops(msg, &rx->sk, skb);
+ sock_recv_timestamp(msg, &rx->sk, skb);
}
/* receive the message */
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 5f6288fa3f12..dc6a2d324bd8 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -13,26 +13,43 @@
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/filter.h>
+#include <linux/bpf.h>
+
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <linux/tc_act/tc_bpf.h>
#include <net/tc_act/tc_bpf.h>
-#define BPF_TAB_MASK 15
+#define BPF_TAB_MASK 15
+#define ACT_BPF_NAME_LEN 256
+
+struct tcf_bpf_cfg {
+ struct bpf_prog *filter;
+ struct sock_filter *bpf_ops;
+ char *bpf_name;
+ u32 bpf_fd;
+ u16 bpf_num_ops;
+};
-static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
+static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
struct tcf_result *res)
{
- struct tcf_bpf *b = a->priv;
+ struct tcf_bpf *prog = act->priv;
int action, filter_res;
- spin_lock(&b->tcf_lock);
+ if (unlikely(!skb_mac_header_was_set(skb)))
+ return TC_ACT_UNSPEC;
+
+ spin_lock(&prog->tcf_lock);
- b->tcf_tm.lastuse = jiffies;
- bstats_update(&b->tcf_bstats, skb);
+ prog->tcf_tm.lastuse = jiffies;
+ bstats_update(&prog->tcf_bstats, skb);
- filter_res = BPF_PROG_RUN(b->filter, skb);
+ /* Needed here for accessing maps. */
+ rcu_read_lock();
+ filter_res = BPF_PROG_RUN(prog->filter, skb);
+ rcu_read_unlock();
/* A BPF program may overwrite the default action opcode.
* Similarly as in cls_bpf, if filter_res == -1 we use the
@@ -52,52 +69,87 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
break;
case TC_ACT_SHOT:
action = filter_res;
- b->tcf_qstats.drops++;
+ prog->tcf_qstats.drops++;
break;
case TC_ACT_UNSPEC:
- action = b->tcf_action;
+ action = prog->tcf_action;
break;
default:
action = TC_ACT_UNSPEC;
break;
}
- spin_unlock(&b->tcf_lock);
+ spin_unlock(&prog->tcf_lock);
return action;
}
-static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *a,
+static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
+{
+ return !prog->bpf_ops;
+}
+
+static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
+ struct sk_buff *skb)
+{
+ struct nlattr *nla;
+
+ if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
+ return -EMSGSIZE;
+
+ nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
+ sizeof(struct sock_filter));
+ if (nla == NULL)
+ return -EMSGSIZE;
+
+ memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
+
+ return 0;
+}
+
+static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
+ struct sk_buff *skb)
+{
+ if (nla_put_u32(skb, TCA_ACT_BPF_FD, prog->bpf_fd))
+ return -EMSGSIZE;
+
+ if (prog->bpf_name &&
+ nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
int bind, int ref)
{
unsigned char *tp = skb_tail_pointer(skb);
- struct tcf_bpf *b = a->priv;
+ struct tcf_bpf *prog = act->priv;
struct tc_act_bpf opt = {
- .index = b->tcf_index,
- .refcnt = b->tcf_refcnt - ref,
- .bindcnt = b->tcf_bindcnt - bind,
- .action = b->tcf_action,
+ .index = prog->tcf_index,
+ .refcnt = prog->tcf_refcnt - ref,
+ .bindcnt = prog->tcf_bindcnt - bind,
+ .action = prog->tcf_action,
};
- struct tcf_t t;
- struct nlattr *nla;
+ struct tcf_t tm;
+ int ret;
if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, b->bpf_num_ops))
- goto nla_put_failure;
-
- nla = nla_reserve(skb, TCA_ACT_BPF_OPS, b->bpf_num_ops *
- sizeof(struct sock_filter));
- if (!nla)
+ if (tcf_bpf_is_ebpf(prog))
+ ret = tcf_bpf_dump_ebpf_info(prog, skb);
+ else
+ ret = tcf_bpf_dump_bpf_info(prog, skb);
+ if (ret)
goto nla_put_failure;
- memcpy(nla_data(nla), b->bpf_ops, nla_len(nla));
+ tm.install = jiffies_to_clock_t(jiffies - prog->tcf_tm.install);
+ tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse);
+ tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires);
- t.install = jiffies_to_clock_t(jiffies - b->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - b->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(b->tcf_tm.expires);
- if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(t), &t))
+ if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm))
goto nla_put_failure;
+
return skb->len;
nla_put_failure:
@@ -107,36 +159,21 @@ nla_put_failure:
static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
[TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
+ [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
+ [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING, .len = ACT_BPF_NAME_LEN },
[TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
[TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
};
-static int tcf_bpf_init(struct net *net, struct nlattr *nla,
- struct nlattr *est, struct tc_action *a,
- int ovr, int bind)
+static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
{
- struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
- struct tc_act_bpf *parm;
- struct tcf_bpf *b;
- u16 bpf_size, bpf_num_ops;
struct sock_filter *bpf_ops;
- struct sock_fprog_kern tmp;
+ struct sock_fprog_kern fprog_tmp;
struct bpf_prog *fp;
+ u16 bpf_size, bpf_num_ops;
int ret;
- if (!nla)
- return -EINVAL;
-
- ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy);
- if (ret < 0)
- return ret;
-
- if (!tb[TCA_ACT_BPF_PARMS] ||
- !tb[TCA_ACT_BPF_OPS_LEN] || !tb[TCA_ACT_BPF_OPS])
- return -EINVAL;
- parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
-
bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
return -EINVAL;
@@ -146,68 +183,165 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
return -EINVAL;
bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
- if (!bpf_ops)
+ if (bpf_ops == NULL)
return -ENOMEM;
memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size);
- tmp.len = bpf_num_ops;
- tmp.filter = bpf_ops;
+ fprog_tmp.len = bpf_num_ops;
+ fprog_tmp.filter = bpf_ops;
- ret = bpf_prog_create(&fp, &tmp);
- if (ret)
- goto free_bpf_ops;
+ ret = bpf_prog_create(&fp, &fprog_tmp);
+ if (ret < 0) {
+ kfree(bpf_ops);
+ return ret;
+ }
- if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*b), bind);
- if (ret)
+ cfg->bpf_ops = bpf_ops;
+ cfg->bpf_num_ops = bpf_num_ops;
+ cfg->filter = fp;
+
+ return 0;
+}
+
+static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
+{
+ struct bpf_prog *fp;
+ char *name = NULL;
+ u32 bpf_fd;
+
+ bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
+
+ fp = bpf_prog_get(bpf_fd);
+ if (IS_ERR(fp))
+ return PTR_ERR(fp);
+
+ if (fp->type != BPF_PROG_TYPE_SCHED_ACT) {
+ bpf_prog_put(fp);
+ return -EINVAL;
+ }
+
+ if (tb[TCA_ACT_BPF_NAME]) {
+ name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]),
+ nla_len(tb[TCA_ACT_BPF_NAME]),
+ GFP_KERNEL);
+ if (!name) {
+ bpf_prog_put(fp);
+ return -ENOMEM;
+ }
+ }
+
+ cfg->bpf_fd = bpf_fd;
+ cfg->bpf_name = name;
+ cfg->filter = fp;
+
+ return 0;
+}
+
+static int tcf_bpf_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *act,
+ int replace, int bind)
+{
+ struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+ struct tc_act_bpf *parm;
+ struct tcf_bpf *prog;
+ struct tcf_bpf_cfg cfg;
+ bool is_bpf, is_ebpf;
+ int ret;
+
+ if (!nla)
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy);
+ if (ret < 0)
+ return ret;
+
+ is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
+ is_ebpf = tb[TCA_ACT_BPF_FD];
+
+ if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
+ !tb[TCA_ACT_BPF_PARMS])
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
+ tcf_bpf_init_from_efd(tb, &cfg);
+ if (ret < 0)
+ return ret;
+
+ if (!tcf_hash_check(parm->index, act, bind)) {
+ ret = tcf_hash_create(parm->index, est, act,
+ sizeof(*prog), bind);
+ if (ret < 0)
goto destroy_fp;
ret = ACT_P_CREATED;
} else {
+ /* Don't override defaults. */
if (bind)
goto destroy_fp;
- tcf_hash_release(a, bind);
- if (!ovr) {
+
+ tcf_hash_release(act, bind);
+ if (!replace) {
ret = -EEXIST;
goto destroy_fp;
}
}
- b = to_bpf(a);
- spin_lock_bh(&b->tcf_lock);
- b->tcf_action = parm->action;
- b->bpf_num_ops = bpf_num_ops;
- b->bpf_ops = bpf_ops;
- b->filter = fp;
- spin_unlock_bh(&b->tcf_lock);
+ prog = to_bpf(act);
+ spin_lock_bh(&prog->tcf_lock);
+
+ prog->bpf_ops = cfg.bpf_ops;
+ prog->bpf_name = cfg.bpf_name;
+
+ if (cfg.bpf_num_ops)
+ prog->bpf_num_ops = cfg.bpf_num_ops;
+ if (cfg.bpf_fd)
+ prog->bpf_fd = cfg.bpf_fd;
+
+ prog->tcf_action = parm->action;
+ prog->filter = cfg.filter;
+
+ spin_unlock_bh(&prog->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(a);
+ tcf_hash_insert(act);
+
return ret;
destroy_fp:
- bpf_prog_destroy(fp);
-free_bpf_ops:
- kfree(bpf_ops);
+ if (is_ebpf)
+ bpf_prog_put(cfg.filter);
+ else
+ bpf_prog_destroy(cfg.filter);
+
+ kfree(cfg.bpf_ops);
+ kfree(cfg.bpf_name);
+
return ret;
}
-static void tcf_bpf_cleanup(struct tc_action *a, int bind)
+static void tcf_bpf_cleanup(struct tc_action *act, int bind)
{
- struct tcf_bpf *b = a->priv;
+ const struct tcf_bpf *prog = act->priv;
- bpf_prog_destroy(b->filter);
+ if (tcf_bpf_is_ebpf(prog))
+ bpf_prog_put(prog->filter);
+ else
+ bpf_prog_destroy(prog->filter);
}
-static struct tc_action_ops act_bpf_ops = {
- .kind = "bpf",
- .type = TCA_ACT_BPF,
- .owner = THIS_MODULE,
- .act = tcf_bpf,
- .dump = tcf_bpf_dump,
- .cleanup = tcf_bpf_cleanup,
- .init = tcf_bpf_init,
+static struct tc_action_ops act_bpf_ops __read_mostly = {
+ .kind = "bpf",
+ .type = TCA_ACT_BPF,
+ .owner = THIS_MODULE,
+ .act = tcf_bpf,
+ .dump = tcf_bpf_dump,
+ .cleanup = tcf_bpf_cleanup,
+ .init = tcf_bpf_init,
};
static int __init bpf_init_module(void)
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 8e472518f9f6..295d14bd6c67 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -63,7 +63,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
skb->mark = c->mark;
/* using overlimits stats to count how many packets marked */
ca->tcf_qstats.overlimits++;
- nf_ct_put(c);
goto out;
}
@@ -82,7 +81,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
nf_ct_put(c);
out:
- skb->nfct = NULL;
spin_unlock(&ca->tcf_lock);
return ca->tcf_action;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 5953517ec059..3f63ceac8e01 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -157,7 +157,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
if (!(at & AT_EGRESS)) {
if (m->tcfm_ok_push)
- skb_push(skb2, skb2->dev->hard_header_len);
+ skb_push(skb2, skb->mac_len);
}
/* mirror is always swallowed */
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index baef987fe2c0..b6ef9a04de06 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -286,7 +286,7 @@ replay:
RCU_INIT_POINTER(*back, next);
tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
- tcf_destroy(tp);
+ tcf_destroy(tp, true);
err = 0;
goto errout;
}
@@ -301,14 +301,19 @@ replay:
err = -EEXIST;
if (n->nlmsg_flags & NLM_F_EXCL) {
if (tp_created)
- tcf_destroy(tp);
+ tcf_destroy(tp, true);
goto errout;
}
break;
case RTM_DELTFILTER:
err = tp->ops->delete(tp, fh);
- if (err == 0)
+ if (err == 0) {
+ struct tcf_proto *next = rtnl_dereference(tp->next);
+
tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
+ if (tcf_destroy(tp, false))
+ RCU_INIT_POINTER(*back, next);
+ }
goto errout;
case RTM_GETTFILTER:
err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
@@ -329,7 +334,7 @@ replay:
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
} else {
if (tp_created)
- tcf_destroy(tp);
+ tcf_destroy(tp, true);
}
errout:
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index fc399db86f11..0b8c3ace671f 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -96,11 +96,14 @@ static void basic_delete_filter(struct rcu_head *head)
kfree(f);
}
-static void basic_destroy(struct tcf_proto *tp)
+static bool basic_destroy(struct tcf_proto *tp, bool force)
{
struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f, *n;
+ if (!force && !list_empty(&head->flist))
+ return false;
+
list_for_each_entry_safe(f, n, &head->flist, link) {
list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res);
@@ -108,6 +111,7 @@ static void basic_destroy(struct tcf_proto *tp)
}
RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
+ return true;
}
static int basic_delete(struct tcf_proto *tp, unsigned long arg)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 5f3ee9e4b5bf..91bd9c19471d 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -16,6 +16,8 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
+#include <linux/bpf.h>
+
#include <net/rtnetlink.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
@@ -24,6 +26,8 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
MODULE_DESCRIPTION("TC BPF based classifier");
+#define CLS_BPF_NAME_LEN 256
+
struct cls_bpf_head {
struct list_head plist;
u32 hgen;
@@ -32,18 +36,24 @@ struct cls_bpf_head {
struct cls_bpf_prog {
struct bpf_prog *filter;
- struct sock_filter *bpf_ops;
- struct tcf_exts exts;
- struct tcf_result res;
struct list_head link;
+ struct tcf_result res;
+ struct tcf_exts exts;
u32 handle;
- u16 bpf_num_ops;
+ union {
+ u32 bpf_fd;
+ u16 bpf_num_ops;
+ };
+ struct sock_filter *bpf_ops;
+ const char *bpf_name;
struct tcf_proto *tp;
struct rcu_head rcu;
};
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
[TCA_BPF_CLASSID] = { .type = NLA_U32 },
+ [TCA_BPF_FD] = { .type = NLA_U32 },
+ [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
[TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
[TCA_BPF_OPS] = { .type = NLA_BINARY,
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
@@ -54,8 +64,13 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
{
struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
struct cls_bpf_prog *prog;
- int ret;
+ int ret = -1;
+
+ if (unlikely(!skb_mac_header_was_set(skb)))
+ return -1;
+ /* Needed here for accessing maps. */
+ rcu_read_lock();
list_for_each_entry_rcu(prog, &head->plist, link) {
int filter_res = BPF_PROG_RUN(prog->filter, skb);
@@ -70,10 +85,16 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
if (ret < 0)
continue;
- return ret;
+ break;
}
+ rcu_read_unlock();
- return -1;
+ return ret;
+}
+
+static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
+{
+ return !prog->bpf_ops;
}
static int cls_bpf_init(struct tcf_proto *tp)
@@ -94,8 +115,12 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
{
tcf_exts_destroy(&prog->exts);
- bpf_prog_destroy(prog->filter);
+ if (cls_bpf_is_ebpf(prog))
+ bpf_prog_put(prog->filter);
+ else
+ bpf_prog_destroy(prog->filter);
+ kfree(prog->bpf_name);
kfree(prog->bpf_ops);
kfree(prog);
}
@@ -114,14 +139,18 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, __cls_bpf_delete_prog);
+
return 0;
}
-static void cls_bpf_destroy(struct tcf_proto *tp)
+static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog, *tmp;
+ if (!force && !list_empty(&head->plist))
+ return false;
+
list_for_each_entry_safe(prog, tmp, &head->plist, link) {
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
@@ -130,6 +159,7 @@ static void cls_bpf_destroy(struct tcf_proto *tp)
RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
+ return true;
}
static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
@@ -151,69 +181,121 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
return ret;
}
-static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
- struct cls_bpf_prog *prog,
- unsigned long base, struct nlattr **tb,
- struct nlattr *est, bool ovr)
+static int cls_bpf_prog_from_ops(struct nlattr **tb,
+ struct cls_bpf_prog *prog, u32 classid)
{
struct sock_filter *bpf_ops;
- struct tcf_exts exts;
- struct sock_fprog_kern tmp;
+ struct sock_fprog_kern fprog_tmp;
struct bpf_prog *fp;
u16 bpf_size, bpf_num_ops;
- u32 classid;
int ret;
- if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
- return -EINVAL;
-
- tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
- ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
- if (ret < 0)
- return ret;
-
- classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
- if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) {
- ret = -EINVAL;
- goto errout;
- }
+ if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
+ return -EINVAL;
bpf_size = bpf_num_ops * sizeof(*bpf_ops);
- if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
- ret = -EINVAL;
- goto errout;
- }
+ if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
+ return -EINVAL;
bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
- if (bpf_ops == NULL) {
- ret = -ENOMEM;
- goto errout;
- }
+ if (bpf_ops == NULL)
+ return -ENOMEM;
memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
- tmp.len = bpf_num_ops;
- tmp.filter = bpf_ops;
+ fprog_tmp.len = bpf_num_ops;
+ fprog_tmp.filter = bpf_ops;
- ret = bpf_prog_create(&fp, &tmp);
- if (ret)
- goto errout_free;
+ ret = bpf_prog_create(&fp, &fprog_tmp);
+ if (ret < 0) {
+ kfree(bpf_ops);
+ return ret;
+ }
- prog->bpf_num_ops = bpf_num_ops;
prog->bpf_ops = bpf_ops;
+ prog->bpf_num_ops = bpf_num_ops;
+ prog->bpf_name = NULL;
+
prog->filter = fp;
prog->res.classid = classid;
+ return 0;
+}
+
+static int cls_bpf_prog_from_efd(struct nlattr **tb,
+ struct cls_bpf_prog *prog, u32 classid)
+{
+ struct bpf_prog *fp;
+ char *name = NULL;
+ u32 bpf_fd;
+
+ bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
+
+ fp = bpf_prog_get(bpf_fd);
+ if (IS_ERR(fp))
+ return PTR_ERR(fp);
+
+ if (fp->type != BPF_PROG_TYPE_SCHED_CLS) {
+ bpf_prog_put(fp);
+ return -EINVAL;
+ }
+
+ if (tb[TCA_BPF_NAME]) {
+ name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
+ nla_len(tb[TCA_BPF_NAME]),
+ GFP_KERNEL);
+ if (!name) {
+ bpf_prog_put(fp);
+ return -ENOMEM;
+ }
+ }
+
+ prog->bpf_ops = NULL;
+ prog->bpf_fd = bpf_fd;
+ prog->bpf_name = name;
+
+ prog->filter = fp;
+ prog->res.classid = classid;
+
+ return 0;
+}
+
+static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
+ struct cls_bpf_prog *prog,
+ unsigned long base, struct nlattr **tb,
+ struct nlattr *est, bool ovr)
+{
+ struct tcf_exts exts;
+ bool is_bpf, is_ebpf;
+ u32 classid;
+ int ret;
+
+ is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
+ is_ebpf = tb[TCA_BPF_FD];
+
+ if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
+ !tb[TCA_BPF_CLASSID])
+ return -EINVAL;
+
+ tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
+ ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
+ if (ret < 0)
+ return ret;
+
+ classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
+
+ ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog, classid) :
+ cls_bpf_prog_from_efd(tb, prog, classid);
+ if (ret < 0) {
+ tcf_exts_destroy(&exts);
+ return ret;
+ }
+
tcf_bind_filter(tp, &prog->res, base);
tcf_exts_change(tp, &prog->exts, &exts);
return 0;
-errout_free:
- kfree(bpf_ops);
-errout:
- tcf_exts_destroy(&exts);
- return ret;
}
static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
@@ -297,11 +379,43 @@ errout:
return ret;
}
+static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ struct nlattr *nla;
+
+ if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
+ return -EMSGSIZE;
+
+ nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
+ sizeof(struct sock_filter));
+ if (nla == NULL)
+ return -EMSGSIZE;
+
+ memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
+
+ return 0;
+}
+
+static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
+ return -EMSGSIZE;
+
+ if (prog->bpf_name &&
+ nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *tm)
{
struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
- struct nlattr *nest, *nla;
+ struct nlattr *nest;
+ int ret;
if (prog == NULL)
return skb->len;
@@ -314,16 +428,14 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
goto nla_put_failure;
- if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
- goto nla_put_failure;
- nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
- sizeof(struct sock_filter));
- if (nla == NULL)
+ if (cls_bpf_is_ebpf(prog))
+ ret = cls_bpf_dump_ebpf_info(prog, skb);
+ else
+ ret = cls_bpf_dump_bpf_info(prog, skb);
+ if (ret)
goto nla_put_failure;
- memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
-
if (tcf_exts_dump(skb, &prog->exts) < 0)
goto nla_put_failure;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 221697ab0247..ea611b216412 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -143,14 +143,18 @@ errout:
return err;
}
-static void cls_cgroup_destroy(struct tcf_proto *tp)
+static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
{
struct cls_cgroup_head *head = rtnl_dereference(tp->root);
+ if (!force)
+ return false;
+
if (head) {
RCU_INIT_POINTER(tp->root, NULL);
call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
}
+ return true;
}
static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 461410394d08..a620c4e288a5 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -557,17 +557,21 @@ static int flow_init(struct tcf_proto *tp)
return 0;
}
-static void flow_destroy(struct tcf_proto *tp)
+static bool flow_destroy(struct tcf_proto *tp, bool force)
{
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f, *next;
+ if (!force && !list_empty(&head->filters))
+ return false;
+
list_for_each_entry_safe(f, next, &head->filters, list) {
list_del_rcu(&f->list);
call_rcu(&f->rcu, flow_destroy_filter);
}
RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
+ return true;
}
static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index a5269f76004c..715e01e5910a 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -33,6 +33,7 @@
struct fw_head {
u32 mask;
+ bool mask_set;
struct fw_filter __rcu *ht[HTSIZE];
struct rcu_head rcu;
};
@@ -113,6 +114,14 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
static int fw_init(struct tcf_proto *tp)
{
+ struct fw_head *head;
+
+ head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
+ if (head == NULL)
+ return -ENOBUFS;
+
+ head->mask_set = false;
+ rcu_assign_pointer(tp->root, head);
return 0;
}
@@ -124,14 +133,20 @@ static void fw_delete_filter(struct rcu_head *head)
kfree(f);
}
-static void fw_destroy(struct tcf_proto *tp)
+static bool fw_destroy(struct tcf_proto *tp, bool force)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f;
int h;
if (head == NULL)
- return;
+ return true;
+
+ if (!force) {
+ for (h = 0; h < HTSIZE; h++)
+ if (rcu_access_pointer(head->ht[h]))
+ return false;
+ }
for (h = 0; h < HTSIZE; h++) {
while ((f = rtnl_dereference(head->ht[h])) != NULL) {
@@ -143,6 +158,7 @@ static void fw_destroy(struct tcf_proto *tp)
}
RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
+ return true;
}
static int fw_delete(struct tcf_proto *tp, unsigned long arg)
@@ -286,17 +302,11 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
if (!handle)
return -EINVAL;
- if (head == NULL) {
- u32 mask = 0xFFFFFFFF;
+ if (!head->mask_set) {
+ head->mask = 0xFFFFFFFF;
if (tb[TCA_FW_MASK])
- mask = nla_get_u32(tb[TCA_FW_MASK]);
-
- head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
- if (head == NULL)
- return -ENOBUFS;
- head->mask = mask;
-
- rcu_assign_pointer(tp->root, head);
+ head->mask = nla_get_u32(tb[TCA_FW_MASK]);
+ head->mask_set = true;
}
f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 2ecd24688554..08a3b0a6f5ab 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -258,6 +258,13 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
static int route4_init(struct tcf_proto *tp)
{
+ struct route4_head *head;
+
+ head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
+ if (head == NULL)
+ return -ENOBUFS;
+
+ rcu_assign_pointer(tp->root, head);
return 0;
}
@@ -270,13 +277,20 @@ route4_delete_filter(struct rcu_head *head)
kfree(f);
}
-static void route4_destroy(struct tcf_proto *tp)
+static bool route4_destroy(struct tcf_proto *tp, bool force)
{
struct route4_head *head = rtnl_dereference(tp->root);
int h1, h2;
if (head == NULL)
- return;
+ return true;
+
+ if (!force) {
+ for (h1 = 0; h1 <= 256; h1++) {
+ if (rcu_access_pointer(head->table[h1]))
+ return false;
+ }
+ }
for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b;
@@ -301,6 +315,7 @@ static void route4_destroy(struct tcf_proto *tp)
}
RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
+ return true;
}
static int route4_delete(struct tcf_proto *tp, unsigned long arg)
@@ -484,13 +499,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL;
err = -ENOBUFS;
- if (head == NULL) {
- head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
- if (head == NULL)
- goto errout;
- rcu_assign_pointer(tp->root, head);
- }
-
f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
if (!f)
goto errout;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index edd8ade3fbc1..02fa82792dab 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -291,13 +291,20 @@ rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
kfree_rcu(f, rcu);
}
-static void rsvp_destroy(struct tcf_proto *tp)
+static bool rsvp_destroy(struct tcf_proto *tp, bool force)
{
struct rsvp_head *data = rtnl_dereference(tp->root);
int h1, h2;
if (data == NULL)
- return;
+ return true;
+
+ if (!force) {
+ for (h1 = 0; h1 < 256; h1++) {
+ if (rcu_access_pointer(data->ht[h1]))
+ return false;
+ }
+ }
RCU_INIT_POINTER(tp->root, NULL);
@@ -319,6 +326,7 @@ static void rsvp_destroy(struct tcf_proto *tp)
}
}
kfree_rcu(data, rcu);
+ return true;
}
static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index bd49bf547a47..a557dbaf5afe 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -468,11 +468,14 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
}
}
-static void tcindex_destroy(struct tcf_proto *tp)
+static bool tcindex_destroy(struct tcf_proto *tp, bool force)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcf_walker walker;
+ if (!force)
+ return false;
+
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
walker.count = 0;
walker.skip = 0;
@@ -481,6 +484,7 @@ static void tcindex_destroy(struct tcf_proto *tp)
RCU_INIT_POINTER(tp->root, NULL);
call_rcu(&p->rcu, __tcindex_destroy);
+ return true;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 95fdf4e40051..cab9e9b43967 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -463,13 +463,35 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
return -ENOENT;
}
-static void u32_destroy(struct tcf_proto *tp)
+static bool ht_empty(struct tc_u_hnode *ht)
+{
+ unsigned int h;
+
+ for (h = 0; h <= ht->divisor; h++)
+ if (rcu_access_pointer(ht->ht[h]))
+ return false;
+
+ return true;
+}
+
+static bool u32_destroy(struct tcf_proto *tp, bool force)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
WARN_ON(root_ht == NULL);
+ if (!force) {
+ if (root_ht) {
+ if (root_ht->refcnt > 1)
+ return false;
+ if (root_ht->refcnt == 1) {
+ if (!ht_empty(root_ht))
+ return false;
+ }
+ }
+ }
+
if (root_ht && --root_ht->refcnt == 0)
u32_destroy_hnode(tp, root_ht);
@@ -494,6 +516,7 @@ static void u32_destroy(struct tcf_proto *tp)
}
tp->data = NULL;
+ return true;
}
static int u32_delete(struct tcf_proto *tp, unsigned long arg)
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index f03c3de16c27..73e2ed576ceb 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -34,7 +34,6 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
{
struct text_match *tm = EM_TEXT_PRIV(m);
int from, to;
- struct ts_state state;
from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data;
from += tm->from_offset;
@@ -42,7 +41,7 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
to += tm->to_offset;
- return skb_find_text(skb, from, to, tm->config, &state) != UINT_MAX;
+ return skb_find_text(skb, from, to, tm->config) != UINT_MAX;
}
static int em_text_change(struct net *net, void *data, int len,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 243b7d169d61..ad9eed70bc8f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1858,11 +1858,15 @@ reclassify:
}
EXPORT_SYMBOL(tc_classify);
-void tcf_destroy(struct tcf_proto *tp)
+bool tcf_destroy(struct tcf_proto *tp, bool force)
{
- tp->ops->destroy(tp);
- module_put(tp->ops->owner);
- kfree_rcu(tp, rcu);
+ if (tp->ops->destroy(tp, force)) {
+ module_put(tp->ops->owner);
+ kfree_rcu(tp, rcu);
+ return true;
+ }
+
+ return false;
}
void tcf_destroy_chain(struct tcf_proto __rcu **fl)
@@ -1871,7 +1875,7 @@ void tcf_destroy_chain(struct tcf_proto __rcu **fl)
while ((tp = rtnl_dereference(*fl)) != NULL) {
RCU_INIT_POINTER(*fl, tp->next);
- tcf_destroy(tp);
+ tcf_destroy(tp, true);
}
}
EXPORT_SYMBOL(tcf_destroy_chain);
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index de28f8e968e8..7a0bdb16ac92 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -164,7 +164,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt)
sch->limit = DEFAULT_CODEL_LIMIT;
- codel_params_init(&q->params);
+ codel_params_init(&q->params, sch);
codel_vars_init(&q->vars);
codel_stats_init(&q->stats);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index dfcea20e3171..f377702d4b91 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Meant to be mostly used for localy generated traffic :
+ * Meant to be mostly used for locally generated traffic :
* Fast classification depends on skb->sk being set before reaching us.
* If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
* All packets belonging to a socket are considered as a 'flow'.
@@ -63,7 +63,7 @@ struct fq_flow {
struct sk_buff *tail; /* last skb in the list */
unsigned long age; /* jiffies when flow was emptied, for gc */
};
- struct rb_node fq_node; /* anchor in fq_root[] trees */
+ struct rb_node fq_node; /* anchor in fq_root[] trees */
struct sock *sk;
int qlen; /* number of packets in flow queue */
int credit;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 1e52decb7b59..c244c45b78d7 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -391,7 +391,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
q->perturbation = prandom_u32();
INIT_LIST_HEAD(&q->new_flows);
INIT_LIST_HEAD(&q->old_flows);
- codel_params_init(&q->cparams);
+ codel_params_init(&q->cparams, sch);
codel_stats_init(&q->cstats);
q->cparams.ecn = true;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index a4ca4517cdc8..634529e0ce6b 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -229,7 +229,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break;
}
- if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
+ if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
q->backlog += qdisc_pkt_len(skb);
return qdisc_enqueue_tail(skb, sch);
}
@@ -553,7 +553,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.limit = q->limit;
opt.DP = q->DP;
- opt.backlog = q->backlog;
+ opt.backlog = gred_backlog(table, q, sch);
opt.prio = q->prio;
opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index eb5b8445fef9..4cdbfb85686a 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -88,11 +88,19 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* ------------------------------------------------------------- */
+static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ net_inc_ingress_queue();
+
+ return 0;
+}
+
static void ingress_destroy(struct Qdisc *sch)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
tcf_destroy_chain(&p->filter_list);
+ net_dec_ingress_queue();
}
static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -124,6 +132,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.id = "ingress",
.priv_size = sizeof(struct ingress_qdisc_data),
.enqueue = ingress_enqueue,
+ .init = ingress_init,
.destroy = ingress_destroy,
.dump = ingress_dump,
.owner = THIS_MODULE,
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 179f1c8c0d8b..956ead2cab9a 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -560,8 +560,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
tfifo_dequeue:
skb = __skb_dequeue(&sch->q);
if (skb) {
-deliver:
qdisc_qstats_backlog_dec(sch, skb);
+deliver:
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
return skb;
@@ -578,6 +578,7 @@ deliver:
rb_erase(p, &q->t_root);
sch->q.qlen--;
+ qdisc_qstats_backlog_dec(sch, skb);
skb->next = NULL;
skb->prev = NULL;
skb->tstamp = netem_skb_cb(skb)->tstamp_save;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 8f34b27d5775..53b7acde9aa3 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1322,8 +1322,7 @@ static __init int sctp_init(void)
int max_share;
int order;
- BUILD_BUG_ON(sizeof(struct sctp_ulpevent) >
- sizeof(((struct sk_buff *) 0)->cb));
+ sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
/* Allocate bind_bucket and chunk caches. */
status = -ENOBUFS;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index aafe94bf292e..f09de7fac2e6 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -102,11 +102,6 @@ static int sctp_autobind(struct sock *sk);
static void sctp_sock_migrate(struct sock *, struct sock *,
struct sctp_association *, sctp_socket_type_t);
-extern struct kmem_cache *sctp_bucket_cachep;
-extern long sysctl_sctp_mem[3];
-extern int sysctl_sctp_rmem[3];
-extern int sysctl_sctp_wmem[3];
-
static int sctp_memory_pressure;
static atomic_long_t sctp_memory_allocated;
struct percpu_counter sctp_sockets_allocated;
@@ -1586,8 +1581,7 @@ static int sctp_error(struct sock *sk, int flags, int err)
static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
-static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t msg_len)
+static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
@@ -2066,9 +2060,8 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
* flags - flags sent or received with the user message, see Section
* 5 for complete description of the flags.
*/
-static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
+static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int noblock, int flags, int *addr_len)
{
struct sctp_ulpevent *event = NULL;
struct sctp_sock *sp = sctp_sk(sk);
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 2e9ada10fd84..26d50c565f54 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -58,10 +58,6 @@ static unsigned long max_autoclose_max =
(MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
-extern long sysctl_sctp_mem[3];
-extern int sysctl_sctp_rmem[3];
-extern int sysctl_sctp_wmem[3];
-
static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
diff --git a/net/socket.c b/net/socket.c
index 245330ca0015..884e32997698 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -140,8 +140,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
static const struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .read = new_sync_read,
- .write = new_sync_write,
.read_iter = sock_read_iter,
.write_iter = sock_write_iter,
.poll = sock_poll,
@@ -314,7 +312,7 @@ static const struct super_operations sockfs_ops = {
static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]",
- dentry->d_inode->i_ino);
+ d_inode(dentry)->i_ino);
}
static const struct dentry_operations sockfs_dentry_operations = {
@@ -377,7 +375,7 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
&socket_file_ops);
if (unlikely(IS_ERR(file))) {
/* drop dentry, keep inode */
- ihold(path.dentry->d_inode);
+ ihold(d_inode(path.dentry));
path_put(&path);
return file;
}
@@ -499,7 +497,7 @@ static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
ssize_t len;
ssize_t used = 0;
- len = security_inode_listsecurity(dentry->d_inode, buffer, size);
+ len = security_inode_listsecurity(d_inode(dentry), buffer, size);
if (len < 0)
return len;
used += len;
@@ -610,60 +608,27 @@ void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
}
EXPORT_SYMBOL(__sock_tx_timestamp);
-static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size)
+static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
{
- return sock->ops->sendmsg(iocb, sock, msg, size);
-}
-
-static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size)
-{
- int err = security_socket_sendmsg(sock, msg, size);
-
- return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size);
-}
-
-static int do_sock_sendmsg(struct socket *sock, struct msghdr *msg,
- size_t size, bool nosec)
-{
- struct kiocb iocb;
- int ret;
-
- init_sync_kiocb(&iocb, NULL);
- ret = nosec ? __sock_sendmsg_nosec(&iocb, sock, msg, size) :
- __sock_sendmsg(&iocb, sock, msg, size);
- if (-EIOCBQUEUED == ret)
- ret = wait_on_sync_kiocb(&iocb);
+ int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg));
+ BUG_ON(ret == -EIOCBQUEUED);
return ret;
}
-int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+int sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
- return do_sock_sendmsg(sock, msg, size, false);
-}
-EXPORT_SYMBOL(sock_sendmsg);
+ int err = security_socket_sendmsg(sock, msg,
+ msg_data_left(msg));
-static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size)
-{
- return do_sock_sendmsg(sock, msg, size, true);
+ return err ?: sock_sendmsg_nosec(sock, msg);
}
+EXPORT_SYMBOL(sock_sendmsg);
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
- mm_segment_t oldfs = get_fs();
- int result;
-
- set_fs(KERNEL_DS);
- /*
- * the following is safe, since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- iov_iter_init(&msg->msg_iter, WRITE, (struct iovec *)vec, num, size);
- result = sock_sendmsg(sock, msg, size);
- set_fs(oldfs);
- return result;
+ iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size);
+ return sock_sendmsg(sock, msg);
}
EXPORT_SYMBOL(kernel_sendmsg);
@@ -731,9 +696,9 @@ EXPORT_SYMBOL_GPL(__sock_recv_wifi_status);
static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
- if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount)
+ if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && SOCK_SKB_CB(skb)->dropcount)
put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL,
- sizeof(__u32), &skb->dropcount);
+ sizeof(__u32), &SOCK_SKB_CB(skb)->dropcount);
}
void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
@@ -744,47 +709,21 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
}
EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
-static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
{
- return sock->ops->recvmsg(iocb, sock, msg, size, flags);
+ return sock->ops->recvmsg(sock, msg, size, flags);
}
-static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags)
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
{
int err = security_socket_recvmsg(sock, msg, size, flags);
- return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags);
-}
-
-int sock_recvmsg(struct socket *sock, struct msghdr *msg,
- size_t size, int flags)
-{
- struct kiocb iocb;
- int ret;
-
- init_sync_kiocb(&iocb, NULL);
- ret = __sock_recvmsg(&iocb, sock, msg, size, flags);
- if (-EIOCBQUEUED == ret)
- ret = wait_on_sync_kiocb(&iocb);
- return ret;
+ return err ?: sock_recvmsg_nosec(sock, msg, size, flags);
}
EXPORT_SYMBOL(sock_recvmsg);
-static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
- size_t size, int flags)
-{
- struct kiocb iocb;
- int ret;
-
- init_sync_kiocb(&iocb, NULL);
- ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags);
- if (-EIOCBQUEUED == ret)
- ret = wait_on_sync_kiocb(&iocb);
- return ret;
-}
-
/**
* kernel_recvmsg - Receive a message from a socket (kernel space)
* @sock: The socket to receive the message from
@@ -806,12 +745,8 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
mm_segment_t oldfs = get_fs();
int result;
+ iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size);
set_fs(KERNEL_DS);
- /*
- * the following is safe, since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- iov_iter_init(&msg->msg_iter, READ, (struct iovec *)vec, num, size);
result = sock_recvmsg(sock, msg, size, flags);
set_fs(oldfs);
return result;
@@ -849,7 +784,8 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct socket *sock = file->private_data;
- struct msghdr msg = {.msg_iter = *to};
+ struct msghdr msg = {.msg_iter = *to,
+ .msg_iocb = iocb};
ssize_t res;
if (file->f_flags & O_NONBLOCK)
@@ -858,11 +794,10 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (iocb->ki_pos != 0)
return -ESPIPE;
- if (iocb->ki_nbytes == 0) /* Match SYS5 behaviour */
+ if (!iov_iter_count(to)) /* Match SYS5 behaviour */
return 0;
- res = __sock_recvmsg(iocb, sock, &msg,
- iocb->ki_nbytes, msg.msg_flags);
+ res = sock_recvmsg(sock, &msg, iov_iter_count(to), msg.msg_flags);
*to = msg.msg_iter;
return res;
}
@@ -871,7 +806,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct socket *sock = file->private_data;
- struct msghdr msg = {.msg_iter = *from};
+ struct msghdr msg = {.msg_iter = *from,
+ .msg_iocb = iocb};
ssize_t res;
if (iocb->ki_pos != 0)
@@ -883,7 +819,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (sock->type == SOCK_SEQPACKET)
msg.msg_flags |= MSG_EOR;
- res = __sock_sendmsg(iocb, sock, &msg, iocb->ki_nbytes);
+ res = sock_sendmsg(sock, &msg);
*from = msg.msg_iter;
return res;
}
@@ -1700,18 +1636,14 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
struct iovec iov;
int fput_needed;
- if (len > INT_MAX)
- len = INT_MAX;
- if (unlikely(!access_ok(VERIFY_READ, buff, len)))
- return -EFAULT;
+ err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter);
+ if (unlikely(err))
+ return err;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
- iov.iov_base = buff;
- iov.iov_len = len;
msg.msg_name = NULL;
- iov_iter_init(&msg.msg_iter, WRITE, &iov, 1, len);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
@@ -1725,7 +1657,7 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
msg.msg_flags = flags;
- err = sock_sendmsg(sock, &msg, len);
+ err = sock_sendmsg(sock, &msg);
out_put:
fput_light(sock->file, fput_needed);
@@ -1760,26 +1692,22 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
int err, err2;
int fput_needed;
- if (size > INT_MAX)
- size = INT_MAX;
- if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
- return -EFAULT;
+ err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter);
+ if (unlikely(err))
+ return err;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
msg.msg_control = NULL;
msg.msg_controllen = 0;
- iov.iov_len = size;
- iov.iov_base = ubuf;
- iov_iter_init(&msg.msg_iter, READ, &iov, 1, size);
/* Save some cycles and don't copy the address if not needed */
msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
/* We assume all kernel code knows the size of sockaddr_storage */
msg.msg_namelen = 0;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
- err = sock_recvmsg(sock, &msg, size, flags);
+ err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
if (err >= 0 && addr != NULL) {
err2 = move_addr_to_user(&address,
@@ -1899,10 +1827,10 @@ struct used_address {
unsigned int name_len;
};
-static ssize_t copy_msghdr_from_user(struct msghdr *kmsg,
- struct user_msghdr __user *umsg,
- struct sockaddr __user **save_addr,
- struct iovec **iov)
+static int copy_msghdr_from_user(struct msghdr *kmsg,
+ struct user_msghdr __user *umsg,
+ struct sockaddr __user **save_addr,
+ struct iovec **iov)
{
struct sockaddr __user *uaddr;
struct iovec __user *uiov;
@@ -1946,13 +1874,10 @@ static ssize_t copy_msghdr_from_user(struct msghdr *kmsg,
if (nr_segs > UIO_MAXIOV)
return -EMSGSIZE;
- err = rw_copy_check_uvector(save_addr ? READ : WRITE,
- uiov, nr_segs,
- UIO_FASTIOV, *iov, iov);
- if (err >= 0)
- iov_iter_init(&kmsg->msg_iter, save_addr ? READ : WRITE,
- *iov, nr_segs, err);
- return err;
+ kmsg->msg_iocb = NULL;
+
+ return import_iovec(save_addr ? READ : WRITE, uiov, nr_segs,
+ UIO_FASTIOV, iov, &kmsg->msg_iter);
}
static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
@@ -1967,7 +1892,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
__attribute__ ((aligned(sizeof(__kernel_size_t))));
/* 20 is size of ipv6_pktinfo */
unsigned char *ctl_buf = ctl;
- int ctl_len, total_len;
+ int ctl_len;
ssize_t err;
msg_sys->msg_name = &address;
@@ -1977,8 +1902,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
else
err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov);
if (err < 0)
- goto out_freeiov;
- total_len = err;
+ return err;
err = -ENOBUFS;
@@ -2025,10 +1949,10 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
used_address->name_len == msg_sys->msg_namelen &&
!memcmp(&used_address->name, msg_sys->msg_name,
used_address->name_len)) {
- err = sock_sendmsg_nosec(sock, msg_sys, total_len);
+ err = sock_sendmsg_nosec(sock, msg_sys);
goto out_freectl;
}
- err = sock_sendmsg(sock, msg_sys, total_len);
+ err = sock_sendmsg(sock, msg_sys);
/*
* If this is sendmmsg() and sending to current destination address was
* successful, remember it.
@@ -2044,8 +1968,7 @@ out_freectl:
if (ctl_buf != ctl)
sock_kfree_s(sock->sk, ctl_buf, ctl_len);
out_freeiov:
- if (iov != iovstack)
- kfree(iov);
+ kfree(iov);
return err;
}
@@ -2170,8 +2093,8 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
else
err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov);
if (err < 0)
- goto out_freeiov;
- total_len = err;
+ return err;
+ total_len = iov_iter_count(&msg_sys->msg_iter);
cmsg_ptr = (unsigned long)msg_sys->msg_control;
msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
@@ -2209,8 +2132,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
err = len;
out_freeiov:
- if (iov != iovstack)
- kfree(iov);
+ kfree(iov);
return err;
}
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index fb78117b896c..9068e72aa73c 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -1,9 +1,11 @@
config SUNRPC
tristate
+ depends on MULTIUSER
config SUNRPC_GSS
tristate
select OID_REGISTRY
+ depends on MULTIUSER
config SUNRPC_BACKCHANNEL
bool
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 1ec19f6f0c2b..eeeba5adee6d 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
{
u32 value_follows;
int err;
+ struct page *scratch;
+
+ scratch = alloc_page(GFP_KERNEL);
+ if (!scratch)
+ return -ENOMEM;
+ xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
/* res->status */
err = gssx_dec_status(xdr, &res->status);
if (err)
- return err;
+ goto out_free;
/* res->context_handle */
err = gssx_dec_bool(xdr, &value_follows);
if (err)
- return err;
+ goto out_free;
if (value_follows) {
err = gssx_dec_ctx(xdr, res->context_handle);
if (err)
- return err;
+ goto out_free;
} else {
res->context_handle = NULL;
}
@@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
/* res->output_token */
err = gssx_dec_bool(xdr, &value_follows);
if (err)
- return err;
+ goto out_free;
if (value_follows) {
err = gssx_dec_buffer(xdr, res->output_token);
if (err)
- return err;
+ goto out_free;
} else {
res->output_token = NULL;
}
@@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
/* res->delegated_cred_handle */
err = gssx_dec_bool(xdr, &value_follows);
if (err)
- return err;
+ goto out_free;
if (value_follows) {
/* we do not support upcall servers sending this data. */
- return -EINVAL;
+ err = -EINVAL;
+ goto out_free;
}
/* res->options */
err = gssx_dec_option_array(xdr, &res->options);
+out_free:
+ __free_page(scratch);
return err;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 5199bb1a017e..2928afffbb81 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1072,10 +1072,12 @@ void qword_add(char **bpp, int *lp, char *str)
if (len < 0) return;
- ret = string_escape_str(str, &bp, len, ESCAPE_OCTAL, "\\ \n\t");
- if (ret < 0 || ret == len)
+ ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
+ if (ret >= len) {
+ bp += len;
len = -1;
- else {
+ } else {
+ bp += ret;
len -= ret;
*bp++ = ' ';
len--;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 2d12b76b5a64..d81186d34558 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -94,7 +94,7 @@ rpc_timeout_upcall_queue(struct work_struct *work)
}
dentry = dget(pipe->dentry);
spin_unlock(&pipe->lock);
- rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL,
+ rpc_purge_list(dentry ? &RPC_I(d_inode(dentry))->waitq : NULL,
&free_list, destroy_msg, -ETIMEDOUT);
dput(dentry);
}
@@ -152,7 +152,7 @@ rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg)
dentry = dget(pipe->dentry);
spin_unlock(&pipe->lock);
if (dentry) {
- wake_up(&RPC_I(dentry->d_inode)->waitq);
+ wake_up(&RPC_I(d_inode(dentry))->waitq);
dput(dentry);
}
return res;
@@ -591,7 +591,7 @@ static int __rpc_mkpipe_dentry(struct inode *dir, struct dentry *dentry,
err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private);
if (err)
return err;
- rpci = RPC_I(dentry->d_inode);
+ rpci = RPC_I(d_inode(dentry));
rpci->private = private;
rpci->pipe = pipe;
fsnotify_create(dir, dentry);
@@ -616,7 +616,7 @@ int rpc_rmdir(struct dentry *dentry)
int error;
parent = dget_parent(dentry);
- dir = parent->d_inode;
+ dir = d_inode(parent);
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
error = __rpc_rmdir(dir, dentry);
mutex_unlock(&dir->i_mutex);
@@ -638,7 +638,7 @@ static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
rpc_close_pipes(inode);
return __rpc_unlink(dir, dentry);
@@ -654,7 +654,7 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
if (!dentry)
return ERR_PTR(-ENOMEM);
}
- if (dentry->d_inode == NULL)
+ if (d_really_is_negative(dentry))
return dentry;
dput(dentry);
return ERR_PTR(-EEXIST);
@@ -667,7 +667,7 @@ static void __rpc_depopulate(struct dentry *parent,
const struct rpc_filelist *files,
int start, int eof)
{
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct dentry *dentry;
struct qstr name;
int i;
@@ -679,9 +679,9 @@ static void __rpc_depopulate(struct dentry *parent,
if (dentry == NULL)
continue;
- if (dentry->d_inode == NULL)
+ if (d_really_is_negative(dentry))
goto next;
- switch (dentry->d_inode->i_mode & S_IFMT) {
+ switch (d_inode(dentry)->i_mode & S_IFMT) {
default:
BUG();
case S_IFREG:
@@ -699,7 +699,7 @@ static void rpc_depopulate(struct dentry *parent,
const struct rpc_filelist *files,
int start, int eof)
{
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
__rpc_depopulate(parent, files, start, eof);
@@ -711,7 +711,7 @@ static int rpc_populate(struct dentry *parent,
int start, int eof,
void *private)
{
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct dentry *dentry;
int i, err;
@@ -754,7 +754,7 @@ static struct dentry *rpc_mkdir_populate(struct dentry *parent,
int (*populate)(struct dentry *, void *), void *args_populate)
{
struct dentry *dentry;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
int error;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
@@ -787,7 +787,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
int error;
parent = dget_parent(dentry);
- dir = parent->d_inode;
+ dir = d_inode(parent);
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
if (depopulate != NULL)
depopulate(dentry);
@@ -819,7 +819,7 @@ struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
void *private, struct rpc_pipe *pipe)
{
struct dentry *dentry;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
umode_t umode = S_IFIFO | S_IRUSR | S_IWUSR;
int err;
@@ -864,7 +864,7 @@ rpc_unlink(struct dentry *dentry)
int error = 0;
parent = dget_parent(dentry);
- dir = parent->d_inode;
+ dir = d_inode(parent);
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
error = __rpc_rmpipe(dir, dentry);
mutex_unlock(&dir->i_mutex);
@@ -1375,7 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
struct dentry *clnt_dir = pipe_dentry->d_parent;
struct dentry *gssd_dir = clnt_dir->d_parent;
- __rpc_rmpipe(clnt_dir->d_inode, pipe_dentry);
+ __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
__rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
__rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
dput(pipe_dentry);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b91fd9c597b4..337ca851a350 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -89,8 +89,8 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
if (!task->tk_timeout)
return;
- dprintk("RPC: %5u setting alarm for %lu ms\n",
- task->tk_pid, task->tk_timeout * 1000 / HZ);
+ dprintk("RPC: %5u setting alarm for %u ms\n",
+ task->tk_pid, jiffies_to_msecs(task->tk_timeout));
task->u.tk_wait.expires = jiffies + task->tk_timeout;
if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index cc331b6cf573..0c8120229a03 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -257,7 +257,7 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
svc_set_cmsg_data(rqstp, cmh);
- if (sock_sendmsg(sock, &msg, 0) < 0)
+ if (sock_sendmsg(sock, &msg) < 0)
goto out;
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 9949722d99ce..1d4fe24af06a 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -326,6 +326,15 @@ out_unlock:
xprt_clear_locked(xprt);
}
+static void xprt_task_clear_bytes_sent(struct rpc_task *task)
+{
+ if (task != NULL) {
+ struct rpc_rqst *req = task->tk_rqstp;
+ if (req != NULL)
+ req->rq_bytes_sent = 0;
+ }
+}
+
/**
* xprt_release_xprt - allow other requests to use a transport
* @xprt: transport with other tasks potentially waiting
@@ -336,11 +345,7 @@ out_unlock:
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task == task) {
- if (task != NULL) {
- struct rpc_rqst *req = task->tk_rqstp;
- if (req != NULL)
- req->rq_bytes_sent = 0;
- }
+ xprt_task_clear_bytes_sent(task);
xprt_clear_locked(xprt);
__xprt_lock_write_next(xprt);
}
@@ -358,11 +363,7 @@ EXPORT_SYMBOL_GPL(xprt_release_xprt);
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task == task) {
- if (task != NULL) {
- struct rpc_rqst *req = task->tk_rqstp;
- if (req != NULL)
- req->rq_bytes_sent = 0;
- }
+ xprt_task_clear_bytes_sent(task);
xprt_clear_locked(xprt);
__xprt_lock_write_next_cong(xprt);
}
@@ -700,6 +701,7 @@ bool xprt_lock_connect(struct rpc_xprt *xprt,
goto out;
if (xprt->snd_task != task)
goto out;
+ xprt_task_clear_bytes_sent(task);
xprt->snd_task = cookie;
ret = true;
out:
diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile
index da5136fd5694..579f72bbcf4b 100644
--- a/net/sunrpc/xprtrdma/Makefile
+++ b/net/sunrpc/xprtrdma/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_SUNRPC_XPRT_RDMA_CLIENT) += xprtrdma.o
-xprtrdma-y := transport.o rpc_rdma.o verbs.o
+xprtrdma-y := transport.o rpc_rdma.o verbs.o \
+ fmr_ops.o frwr_ops.o physical_ops.o
obj-$(CONFIG_SUNRPC_XPRT_RDMA_SERVER) += svcrdma.o
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
new file mode 100644
index 000000000000..302d4ebf6fbf
--- /dev/null
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Oracle. All rights reserved.
+ * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
+ */
+
+/* Lightweight memory registration using Fast Memory Regions (FMR).
+ * Referred to sometimes as MTHCAFMR mode.
+ *
+ * FMR uses synchronous memory registration and deregistration.
+ * FMR registration is known to be fast, but FMR deregistration
+ * can take tens of usecs to complete.
+ */
+
+#include "xprt_rdma.h"
+
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+# define RPCDBG_FACILITY RPCDBG_TRANS
+#endif
+
+/* Maximum scatter/gather per FMR */
+#define RPCRDMA_MAX_FMR_SGES (64)
+
+static int
+fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
+ struct rpcrdma_create_data_internal *cdata)
+{
+ return 0;
+}
+
+/* FMR mode conveys up to 64 pages of payload per chunk segment.
+ */
+static size_t
+fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
+{
+ return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
+ rpcrdma_max_segments(r_xprt) * RPCRDMA_MAX_FMR_SGES);
+}
+
+static int
+fmr_op_init(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
+ struct ib_fmr_attr fmr_attr = {
+ .max_pages = RPCRDMA_MAX_FMR_SGES,
+ .max_maps = 1,
+ .page_shift = PAGE_SHIFT
+ };
+ struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
+ struct rpcrdma_mw *r;
+ int i, rc;
+
+ INIT_LIST_HEAD(&buf->rb_mws);
+ INIT_LIST_HEAD(&buf->rb_all);
+
+ i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
+ dprintk("RPC: %s: initializing %d FMRs\n", __func__, i);
+
+ while (i--) {
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+
+ r->r.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr);
+ if (IS_ERR(r->r.fmr))
+ goto out_fmr_err;
+
+ list_add(&r->mw_list, &buf->rb_mws);
+ list_add(&r->mw_all, &buf->rb_all);
+ }
+ return 0;
+
+out_fmr_err:
+ rc = PTR_ERR(r->r.fmr);
+ dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc);
+ kfree(r);
+ return rc;
+}
+
+/* Use the ib_map_phys_fmr() verb to register a memory region
+ * for remote access via RDMA READ or RDMA WRITE.
+ */
+static int
+fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
+ int nsegs, bool writing)
+{
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct ib_device *device = ia->ri_id->device;
+ enum dma_data_direction direction = rpcrdma_data_dir(writing);
+ struct rpcrdma_mr_seg *seg1 = seg;
+ struct rpcrdma_mw *mw = seg1->rl_mw;
+ u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
+ int len, pageoff, i, rc;
+
+ pageoff = offset_in_page(seg1->mr_offset);
+ seg1->mr_offset -= pageoff; /* start of page */
+ seg1->mr_len += pageoff;
+ len = -pageoff;
+ if (nsegs > RPCRDMA_MAX_FMR_SGES)
+ nsegs = RPCRDMA_MAX_FMR_SGES;
+ for (i = 0; i < nsegs;) {
+ rpcrdma_map_one(device, seg, direction);
+ physaddrs[i] = seg->mr_dma;
+ len += seg->mr_len;
+ ++seg;
+ ++i;
+ /* Check for holes */
+ if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
+ offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
+ break;
+ }
+
+ rc = ib_map_phys_fmr(mw->r.fmr, physaddrs, i, seg1->mr_dma);
+ if (rc)
+ goto out_maperr;
+
+ seg1->mr_rkey = mw->r.fmr->rkey;
+ seg1->mr_base = seg1->mr_dma + pageoff;
+ seg1->mr_nsegs = i;
+ seg1->mr_len = len;
+ return i;
+
+out_maperr:
+ dprintk("RPC: %s: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
+ __func__, len, (unsigned long long)seg1->mr_dma,
+ pageoff, i, rc);
+ while (i--)
+ rpcrdma_unmap_one(device, --seg);
+ return rc;
+}
+
+/* Use the ib_unmap_fmr() verb to prevent further remote
+ * access via RDMA READ or RDMA WRITE.
+ */
+static int
+fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
+{
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct rpcrdma_mr_seg *seg1 = seg;
+ struct ib_device *device;
+ int rc, nsegs = seg->mr_nsegs;
+ LIST_HEAD(l);
+
+ list_add(&seg1->rl_mw->r.fmr->list, &l);
+ rc = ib_unmap_fmr(&l);
+ read_lock(&ia->ri_qplock);
+ device = ia->ri_id->device;
+ while (seg1->mr_nsegs--)
+ rpcrdma_unmap_one(device, seg++);
+ read_unlock(&ia->ri_qplock);
+ if (rc)
+ goto out_err;
+ return nsegs;
+
+out_err:
+ dprintk("RPC: %s: ib_unmap_fmr status %i\n", __func__, rc);
+ return nsegs;
+}
+
+/* After a disconnect, unmap all FMRs.
+ *
+ * This is invoked only in the transport connect worker in order
+ * to serialize with rpcrdma_register_fmr_external().
+ */
+static void
+fmr_op_reset(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct rpcrdma_mw *r;
+ LIST_HEAD(list);
+ int rc;
+
+ list_for_each_entry(r, &buf->rb_all, mw_all)
+ list_add(&r->r.fmr->list, &list);
+
+ rc = ib_unmap_fmr(&list);
+ if (rc)
+ dprintk("RPC: %s: ib_unmap_fmr failed %i\n",
+ __func__, rc);
+}
+
+static void
+fmr_op_destroy(struct rpcrdma_buffer *buf)
+{
+ struct rpcrdma_mw *r;
+ int rc;
+
+ while (!list_empty(&buf->rb_all)) {
+ r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
+ list_del(&r->mw_all);
+ rc = ib_dealloc_fmr(r->r.fmr);
+ if (rc)
+ dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
+ __func__, rc);
+ kfree(r);
+ }
+}
+
+const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
+ .ro_map = fmr_op_map,
+ .ro_unmap = fmr_op_unmap,
+ .ro_open = fmr_op_open,
+ .ro_maxpages = fmr_op_maxpages,
+ .ro_init = fmr_op_init,
+ .ro_reset = fmr_op_reset,
+ .ro_destroy = fmr_op_destroy,
+ .ro_displayname = "fmr",
+};
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
new file mode 100644
index 000000000000..dff0481dbcf8
--- /dev/null
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2015 Oracle. All rights reserved.
+ * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
+ */
+
+/* Lightweight memory registration using Fast Registration Work
+ * Requests (FRWR). Also referred to sometimes as FRMR mode.
+ *
+ * FRWR features ordered asynchronous registration and deregistration
+ * of arbitrarily sized memory regions. This is the fastest and safest
+ * but most complex memory registration mode.
+ */
+
+#include "xprt_rdma.h"
+
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+# define RPCDBG_FACILITY RPCDBG_TRANS
+#endif
+
+static int
+__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
+ unsigned int depth)
+{
+ struct rpcrdma_frmr *f = &r->r.frmr;
+ int rc;
+
+ f->fr_mr = ib_alloc_fast_reg_mr(pd, depth);
+ if (IS_ERR(f->fr_mr))
+ goto out_mr_err;
+ f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth);
+ if (IS_ERR(f->fr_pgl))
+ goto out_list_err;
+ return 0;
+
+out_mr_err:
+ rc = PTR_ERR(f->fr_mr);
+ dprintk("RPC: %s: ib_alloc_fast_reg_mr status %i\n",
+ __func__, rc);
+ return rc;
+
+out_list_err:
+ rc = PTR_ERR(f->fr_pgl);
+ dprintk("RPC: %s: ib_alloc_fast_reg_page_list status %i\n",
+ __func__, rc);
+ ib_dereg_mr(f->fr_mr);
+ return rc;
+}
+
+static void
+__frwr_release(struct rpcrdma_mw *r)
+{
+ int rc;
+
+ rc = ib_dereg_mr(r->r.frmr.fr_mr);
+ if (rc)
+ dprintk("RPC: %s: ib_dereg_mr status %i\n",
+ __func__, rc);
+ ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
+}
+
+static int
+frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
+ struct rpcrdma_create_data_internal *cdata)
+{
+ struct ib_device_attr *devattr = &ia->ri_devattr;
+ int depth, delta;
+
+ ia->ri_max_frmr_depth =
+ min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
+ devattr->max_fast_reg_page_list_len);
+ dprintk("RPC: %s: device's max FR page list len = %u\n",
+ __func__, ia->ri_max_frmr_depth);
+
+ /* Add room for frmr register and invalidate WRs.
+ * 1. FRMR reg WR for head
+ * 2. FRMR invalidate WR for head
+ * 3. N FRMR reg WRs for pagelist
+ * 4. N FRMR invalidate WRs for pagelist
+ * 5. FRMR reg WR for tail
+ * 6. FRMR invalidate WR for tail
+ * 7. The RDMA_SEND WR
+ */
+ depth = 7;
+
+ /* Calculate N if the device max FRMR depth is smaller than
+ * RPCRDMA_MAX_DATA_SEGS.
+ */
+ if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
+ delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
+ do {
+ depth += 2; /* FRMR reg + invalidate */
+ delta -= ia->ri_max_frmr_depth;
+ } while (delta > 0);
+ }
+
+ ep->rep_attr.cap.max_send_wr *= depth;
+ if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
+ cdata->max_requests = devattr->max_qp_wr / depth;
+ if (!cdata->max_requests)
+ return -EINVAL;
+ ep->rep_attr.cap.max_send_wr = cdata->max_requests *
+ depth;
+ }
+
+ return 0;
+}
+
+/* FRWR mode conveys a list of pages per chunk segment. The
+ * maximum length of that list is the FRWR page list depth.
+ */
+static size_t
+frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+
+ return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
+ rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
+}
+
+/* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */
+static void
+frwr_sendcompletion(struct ib_wc *wc)
+{
+ struct rpcrdma_mw *r;
+
+ if (likely(wc->status == IB_WC_SUCCESS))
+ return;
+
+ /* WARNING: Only wr_id and status are reliable at this point */
+ r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
+ dprintk("RPC: %s: frmr %p (stale), status %d\n",
+ __func__, r, wc->status);
+ r->r.frmr.fr_state = FRMR_IS_STALE;
+}
+
+static int
+frwr_op_init(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct ib_device *device = r_xprt->rx_ia.ri_id->device;
+ unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
+ struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
+ int i;
+
+ INIT_LIST_HEAD(&buf->rb_mws);
+ INIT_LIST_HEAD(&buf->rb_all);
+
+ i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
+ dprintk("RPC: %s: initializing %d FRMRs\n", __func__, i);
+
+ while (i--) {
+ struct rpcrdma_mw *r;
+ int rc;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+
+ rc = __frwr_init(r, pd, device, depth);
+ if (rc) {
+ kfree(r);
+ return rc;
+ }
+
+ list_add(&r->mw_list, &buf->rb_mws);
+ list_add(&r->mw_all, &buf->rb_all);
+ r->mw_sendcompletion = frwr_sendcompletion;
+ }
+
+ return 0;
+}
+
+/* Post a FAST_REG Work Request to register a memory region
+ * for remote access via RDMA READ or RDMA WRITE.
+ */
+static int
+frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
+ int nsegs, bool writing)
+{
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct ib_device *device = ia->ri_id->device;
+ enum dma_data_direction direction = rpcrdma_data_dir(writing);
+ struct rpcrdma_mr_seg *seg1 = seg;
+ struct rpcrdma_mw *mw = seg1->rl_mw;
+ struct rpcrdma_frmr *frmr = &mw->r.frmr;
+ struct ib_mr *mr = frmr->fr_mr;
+ struct ib_send_wr fastreg_wr, *bad_wr;
+ u8 key;
+ int len, pageoff;
+ int i, rc;
+ int seg_len;
+ u64 pa;
+ int page_no;
+
+ pageoff = offset_in_page(seg1->mr_offset);
+ seg1->mr_offset -= pageoff; /* start of page */
+ seg1->mr_len += pageoff;
+ len = -pageoff;
+ if (nsegs > ia->ri_max_frmr_depth)
+ nsegs = ia->ri_max_frmr_depth;
+ for (page_no = i = 0; i < nsegs;) {
+ rpcrdma_map_one(device, seg, direction);
+ pa = seg->mr_dma;
+ for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
+ frmr->fr_pgl->page_list[page_no++] = pa;
+ pa += PAGE_SIZE;
+ }
+ len += seg->mr_len;
+ ++seg;
+ ++i;
+ /* Check for holes */
+ if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
+ offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
+ break;
+ }
+ dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n",
+ __func__, mw, i, len);
+
+ frmr->fr_state = FRMR_IS_VALID;
+
+ memset(&fastreg_wr, 0, sizeof(fastreg_wr));
+ fastreg_wr.wr_id = (unsigned long)(void *)mw;
+ fastreg_wr.opcode = IB_WR_FAST_REG_MR;
+ fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff;
+ fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
+ fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
+ fastreg_wr.wr.fast_reg.page_list_len = page_no;
+ fastreg_wr.wr.fast_reg.length = len;
+ fastreg_wr.wr.fast_reg.access_flags = writing ?
+ IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
+ IB_ACCESS_REMOTE_READ;
+ key = (u8)(mr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(mr, ++key);
+ fastreg_wr.wr.fast_reg.rkey = mr->rkey;
+
+ DECR_CQCOUNT(&r_xprt->rx_ep);
+ rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
+ if (rc)
+ goto out_senderr;
+
+ seg1->mr_rkey = mr->rkey;
+ seg1->mr_base = seg1->mr_dma + pageoff;
+ seg1->mr_nsegs = i;
+ seg1->mr_len = len;
+ return i;
+
+out_senderr:
+ dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
+ ib_update_fast_reg_key(mr, --key);
+ frmr->fr_state = FRMR_IS_INVALID;
+ while (i--)
+ rpcrdma_unmap_one(device, --seg);
+ return rc;
+}
+
+/* Post a LOCAL_INV Work Request to prevent further remote access
+ * via RDMA READ or RDMA WRITE.
+ */
+static int
+frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
+{
+ struct rpcrdma_mr_seg *seg1 = seg;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct ib_send_wr invalidate_wr, *bad_wr;
+ int rc, nsegs = seg->mr_nsegs;
+ struct ib_device *device;
+
+ seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
+
+ memset(&invalidate_wr, 0, sizeof(invalidate_wr));
+ invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
+ invalidate_wr.opcode = IB_WR_LOCAL_INV;
+ invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
+ DECR_CQCOUNT(&r_xprt->rx_ep);
+
+ read_lock(&ia->ri_qplock);
+ device = ia->ri_id->device;
+ while (seg1->mr_nsegs--)
+ rpcrdma_unmap_one(device, seg++);
+ rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
+ read_unlock(&ia->ri_qplock);
+ if (rc)
+ goto out_err;
+ return nsegs;
+
+out_err:
+ /* Force rpcrdma_buffer_get() to retry */
+ seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
+ dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
+ return nsegs;
+}
+
+/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
+ * an unusable state. Find FRMRs in this state and dereg / reg
+ * each. FRMRs that are VALID and attached to an rpcrdma_req are
+ * also torn down.
+ *
+ * This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
+ *
+ * This is invoked only in the transport connect worker in order
+ * to serialize with rpcrdma_register_frmr_external().
+ */
+static void
+frwr_op_reset(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct ib_device *device = r_xprt->rx_ia.ri_id->device;
+ unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
+ struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
+ struct rpcrdma_mw *r;
+ int rc;
+
+ list_for_each_entry(r, &buf->rb_all, mw_all) {
+ if (r->r.frmr.fr_state == FRMR_IS_INVALID)
+ continue;
+
+ __frwr_release(r);
+ rc = __frwr_init(r, pd, device, depth);
+ if (rc) {
+ dprintk("RPC: %s: mw %p left %s\n",
+ __func__, r,
+ (r->r.frmr.fr_state == FRMR_IS_STALE ?
+ "stale" : "valid"));
+ continue;
+ }
+
+ r->r.frmr.fr_state = FRMR_IS_INVALID;
+ }
+}
+
+static void
+frwr_op_destroy(struct rpcrdma_buffer *buf)
+{
+ struct rpcrdma_mw *r;
+
+ while (!list_empty(&buf->rb_all)) {
+ r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
+ list_del(&r->mw_all);
+ __frwr_release(r);
+ kfree(r);
+ }
+}
+
+const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
+ .ro_map = frwr_op_map,
+ .ro_unmap = frwr_op_unmap,
+ .ro_open = frwr_op_open,
+ .ro_maxpages = frwr_op_maxpages,
+ .ro_init = frwr_op_init,
+ .ro_reset = frwr_op_reset,
+ .ro_destroy = frwr_op_destroy,
+ .ro_displayname = "frwr",
+};
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
new file mode 100644
index 000000000000..ba518af16787
--- /dev/null
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015 Oracle. All rights reserved.
+ * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
+ */
+
+/* No-op chunk preparation. All client memory is pre-registered.
+ * Sometimes referred to as ALLPHYSICAL mode.
+ *
+ * Physical registration is simple because all client memory is
+ * pre-registered and never deregistered. This mode is good for
+ * adapter bring up, but is considered not safe: the server is
+ * trusted not to abuse its access to client memory not involved
+ * in RDMA I/O.
+ */
+
+#include "xprt_rdma.h"
+
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+# define RPCDBG_FACILITY RPCDBG_TRANS
+#endif
+
+static int
+physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
+ struct rpcrdma_create_data_internal *cdata)
+{
+ return 0;
+}
+
+/* PHYSICAL memory registration conveys one page per chunk segment.
+ */
+static size_t
+physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
+{
+ return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
+ rpcrdma_max_segments(r_xprt));
+}
+
+static int
+physical_op_init(struct rpcrdma_xprt *r_xprt)
+{
+ return 0;
+}
+
+/* The client's physical memory is already exposed for
+ * remote access via RDMA READ or RDMA WRITE.
+ */
+static int
+physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
+ int nsegs, bool writing)
+{
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+
+ rpcrdma_map_one(ia->ri_id->device, seg,
+ rpcrdma_data_dir(writing));
+ seg->mr_rkey = ia->ri_bind_mem->rkey;
+ seg->mr_base = seg->mr_dma;
+ seg->mr_nsegs = 1;
+ return 1;
+}
+
+/* Unmap a memory region, but leave it registered.
+ */
+static int
+physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
+{
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+
+ read_lock(&ia->ri_qplock);
+ rpcrdma_unmap_one(ia->ri_id->device, seg);
+ read_unlock(&ia->ri_qplock);
+
+ return 1;
+}
+
+static void
+physical_op_reset(struct rpcrdma_xprt *r_xprt)
+{
+}
+
+static void
+physical_op_destroy(struct rpcrdma_buffer *buf)
+{
+}
+
+const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
+ .ro_map = physical_op_map,
+ .ro_unmap = physical_op_unmap,
+ .ro_open = physical_op_open,
+ .ro_maxpages = physical_op_maxpages,
+ .ro_init = physical_op_init,
+ .ro_reset = physical_op_reset,
+ .ro_destroy = physical_op_destroy,
+ .ro_displayname = "physical",
+};
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 91ffde82fa0c..2c53ea9e1b83 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -53,6 +53,14 @@
# define RPCDBG_FACILITY RPCDBG_TRANS
#endif
+enum rpcrdma_chunktype {
+ rpcrdma_noch = 0,
+ rpcrdma_readch,
+ rpcrdma_areadch,
+ rpcrdma_writech,
+ rpcrdma_replych
+};
+
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
static const char transfertypes[][12] = {
"pure inline", /* no chunks */
@@ -179,6 +187,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
struct rpcrdma_write_array *warray = NULL;
struct rpcrdma_write_chunk *cur_wchunk = NULL;
__be32 *iptr = headerp->rm_body.rm_chunks;
+ int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
if (type == rpcrdma_readch || type == rpcrdma_areadch) {
/* a read chunk - server will RDMA Read our memory */
@@ -201,9 +210,9 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
if (nsegs < 0)
return nsegs;
+ map = r_xprt->rx_ia.ri_ops->ro_map;
do {
- n = rpcrdma_register_external(seg, nsegs,
- cur_wchunk != NULL, r_xprt);
+ n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
if (n <= 0)
goto out;
if (cur_rchunk) { /* read */
@@ -275,34 +284,13 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
return (unsigned char *)iptr - (unsigned char *)headerp;
out:
- if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_FRMR) {
- for (pos = 0; nchunks--;)
- pos += rpcrdma_deregister_external(
- &req->rl_segments[pos], r_xprt);
- }
- return n;
-}
+ if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
+ return n;
-/*
- * Marshal chunks. This routine returns the header length
- * consumed by marshaling.
- *
- * Returns positive RPC/RDMA header size, or negative errno.
- */
-
-ssize_t
-rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
-{
- struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- struct rpcrdma_msg *headerp = rdmab_to_msg(req->rl_rdmabuf);
-
- if (req->rl_rtype != rpcrdma_noch)
- result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
- headerp, req->rl_rtype);
- else if (req->rl_wtype != rpcrdma_noch)
- result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
- headerp, req->rl_wtype);
- return result;
+ for (pos = 0; nchunks--;)
+ pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
+ &req->rl_segments[pos]);
+ return n;
}
/*
@@ -397,6 +385,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
char *base;
size_t rpclen, padlen;
ssize_t hdrlen;
+ enum rpcrdma_chunktype rtype, wtype;
struct rpcrdma_msg *headerp;
/*
@@ -433,13 +422,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
* into pages; otherwise use reply chunks.
*/
if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
- req->rl_wtype = rpcrdma_noch;
+ wtype = rpcrdma_noch;
else if (rqst->rq_rcv_buf.page_len == 0)
- req->rl_wtype = rpcrdma_replych;
+ wtype = rpcrdma_replych;
else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
- req->rl_wtype = rpcrdma_writech;
+ wtype = rpcrdma_writech;
else
- req->rl_wtype = rpcrdma_replych;
+ wtype = rpcrdma_replych;
/*
* Chunks needed for arguments?
@@ -456,16 +445,16 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
* TBD check NFSv4 setacl
*/
if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
- req->rl_rtype = rpcrdma_noch;
+ rtype = rpcrdma_noch;
else if (rqst->rq_snd_buf.page_len == 0)
- req->rl_rtype = rpcrdma_areadch;
+ rtype = rpcrdma_areadch;
else
- req->rl_rtype = rpcrdma_readch;
+ rtype = rpcrdma_readch;
/* The following simplification is not true forever */
- if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych)
- req->rl_wtype = rpcrdma_noch;
- if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) {
+ if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
+ wtype = rpcrdma_noch;
+ if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
__func__);
return -EIO;
@@ -479,7 +468,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
* When padding is in use and applies to the transfer, insert
* it and change the message type.
*/
- if (req->rl_rtype == rpcrdma_noch) {
+ if (rtype == rpcrdma_noch) {
padlen = rpcrdma_inline_pullup(rqst,
RPCRDMA_INLINE_PAD_VALUE(rqst));
@@ -494,7 +483,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
- if (req->rl_wtype != rpcrdma_noch) {
+ if (wtype != rpcrdma_noch) {
dprintk("RPC: %s: invalid chunk list\n",
__func__);
return -EIO;
@@ -515,18 +504,26 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
* on receive. Therefore, we request a reply chunk
* for non-writes wherever feasible and efficient.
*/
- if (req->rl_wtype == rpcrdma_noch)
- req->rl_wtype = rpcrdma_replych;
+ if (wtype == rpcrdma_noch)
+ wtype = rpcrdma_replych;
}
}
- hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen);
+ if (rtype != rpcrdma_noch) {
+ hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
+ headerp, rtype);
+ wtype = rtype; /* simplify dprintk */
+
+ } else if (wtype != rpcrdma_noch) {
+ hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
+ headerp, wtype);
+ }
if (hdrlen < 0)
return hdrlen;
dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
" headerp 0x%p base 0x%p lkey 0x%x\n",
- __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
+ __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
headerp, base, rdmab_lkey(req->rl_rdmabuf));
/*
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 2e192baa59f3..54f23b1be986 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -157,12 +157,47 @@ static struct ctl_table sunrpc_table[] = {
static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */
static void
+xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)sap;
+ char buf[20];
+
+ snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
+ xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
+
+ xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA;
+}
+
+static void
+xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap)
+{
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
+ char buf[40];
+
+ snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
+ xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
+
+ xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6;
+}
+
+static void
xprt_rdma_format_addresses(struct rpc_xprt *xprt)
{
struct sockaddr *sap = (struct sockaddr *)
&rpcx_to_rdmad(xprt).addr;
- struct sockaddr_in *sin = (struct sockaddr_in *)sap;
- char buf[64];
+ char buf[128];
+
+ switch (sap->sa_family) {
+ case AF_INET:
+ xprt_rdma_format_addresses4(xprt, sap);
+ break;
+ case AF_INET6:
+ xprt_rdma_format_addresses6(xprt, sap);
+ break;
+ default:
+ pr_err("rpcrdma: Unrecognized address family\n");
+ return;
+ }
(void)rpc_ntop(sap, buf, sizeof(buf));
xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
@@ -170,16 +205,10 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt)
snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
- xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
-
- snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
- xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
-
snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
- /* netid */
- xprt->address_strings[RPC_DISPLAY_NETID] = "rdma";
+ xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
}
static void
@@ -377,7 +406,10 @@ xprt_setup_rdma(struct xprt_create *args)
xprt_rdma_connect_worker);
xprt_rdma_format_addresses(xprt);
- xprt->max_payload = rpcrdma_max_payload(new_xprt);
+ xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
+ if (xprt->max_payload == 0)
+ goto out4;
+ xprt->max_payload <<= PAGE_SHIFT;
dprintk("RPC: %s: transport data payload maximum: %zu bytes\n",
__func__, xprt->max_payload);
@@ -552,8 +584,8 @@ xprt_rdma_free(void *buffer)
for (i = 0; req->rl_nchunks;) {
--req->rl_nchunks;
- i += rpcrdma_deregister_external(
- &req->rl_segments[i], r_xprt);
+ i += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
+ &req->rl_segments[i]);
}
rpcrdma_buffer_put(req);
@@ -579,10 +611,7 @@ xprt_rdma_send_request(struct rpc_task *task)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
int rc = 0;
- if (req->rl_niovs == 0)
- rc = rpcrdma_marshal_req(rqst);
- else if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_ALLPHYSICAL)
- rc = rpcrdma_marshal_chunks(rqst, 0);
+ rc = rpcrdma_marshal_req(rqst);
if (rc < 0)
goto failed_marshal;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 124676c13780..4870d272e006 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -50,6 +50,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
+#include <linux/sunrpc/addr.h>
#include <asm/bitops.h>
#include "xprt_rdma.h"
@@ -62,9 +63,6 @@
# define RPCDBG_FACILITY RPCDBG_TRANS
#endif
-static void rpcrdma_reset_frmrs(struct rpcrdma_ia *);
-static void rpcrdma_reset_fmrs(struct rpcrdma_ia *);
-
/*
* internal functions
*/
@@ -188,7 +186,7 @@ static const char * const wc_status[] = {
"remote access error",
"remote operation error",
"transport retry counter exceeded",
- "RNR retrycounter exceeded",
+ "RNR retry counter exceeded",
"local RDD violation error",
"remove invalid RD request",
"operation aborted",
@@ -206,21 +204,17 @@ static const char * const wc_status[] = {
static void
rpcrdma_sendcq_process_wc(struct ib_wc *wc)
{
- if (likely(wc->status == IB_WC_SUCCESS))
- return;
-
/* WARNING: Only wr_id and status are reliable at this point */
- if (wc->wr_id == 0ULL) {
- if (wc->status != IB_WC_WR_FLUSH_ERR)
+ if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) {
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR)
pr_err("RPC: %s: SEND: %s\n",
__func__, COMPLETION_MSG(wc->status));
} else {
struct rpcrdma_mw *r;
r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
- r->r.frmr.fr_state = FRMR_IS_STALE;
- pr_err("RPC: %s: frmr %p (stale): %s\n",
- __func__, r, COMPLETION_MSG(wc->status));
+ r->mw_sendcompletion(wc);
}
}
@@ -424,7 +418,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
struct rpcrdma_ia *ia = &xprt->rx_ia;
struct rpcrdma_ep *ep = &xprt->rx_ep;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
- struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr;
+ struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
#endif
struct ib_qp_attr *attr = &ia->ri_qp_attr;
struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
@@ -480,9 +474,8 @@ connected:
wake_up_all(&ep->rep_connect_wait);
/*FALLTHROUGH*/
default:
- dprintk("RPC: %s: %pI4:%u (ep 0x%p): %s\n",
- __func__, &addr->sin_addr.s_addr,
- ntohs(addr->sin_port), ep,
+ dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n",
+ __func__, sap, rpc_get_port(sap), ep,
CONNECTION_MSG(event->event));
break;
}
@@ -491,19 +484,16 @@ connected:
if (connstate == 1) {
int ird = attr->max_dest_rd_atomic;
int tird = ep->rep_remote_cma.responder_resources;
- printk(KERN_INFO "rpcrdma: connection to %pI4:%u "
- "on %s, memreg %d slots %d ird %d%s\n",
- &addr->sin_addr.s_addr,
- ntohs(addr->sin_port),
+
+ pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
+ sap, rpc_get_port(sap),
ia->ri_id->device->name,
- ia->ri_memreg_strategy,
+ ia->ri_ops->ro_displayname,
xprt->rx_buf.rb_max_requests,
ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
} else if (connstate < 0) {
- printk(KERN_INFO "rpcrdma: connection to %pI4:%u closed (%d)\n",
- &addr->sin_addr.s_addr,
- ntohs(addr->sin_port),
- connstate);
+ pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
+ sap, rpc_get_port(sap), connstate);
}
#endif
@@ -621,17 +611,13 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
if (memreg == RPCRDMA_FRMR) {
/* Requires both frmr reg and local dma lkey */
- if ((devattr->device_cap_flags &
+ if (((devattr->device_cap_flags &
(IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
- (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) {
+ (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) ||
+ (devattr->max_fast_reg_page_list_len == 0)) {
dprintk("RPC: %s: FRMR registration "
"not supported by HCA\n", __func__);
memreg = RPCRDMA_MTHCAFMR;
- } else {
- /* Mind the ia limit on FRMR page list depth */
- ia->ri_max_frmr_depth = min_t(unsigned int,
- RPCRDMA_MAX_DATA_SEGS,
- devattr->max_fast_reg_page_list_len);
}
}
if (memreg == RPCRDMA_MTHCAFMR) {
@@ -652,13 +638,16 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
*/
switch (memreg) {
case RPCRDMA_FRMR:
+ ia->ri_ops = &rpcrdma_frwr_memreg_ops;
break;
case RPCRDMA_ALLPHYSICAL:
+ ia->ri_ops = &rpcrdma_physical_memreg_ops;
mem_priv = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ;
goto register_setup;
case RPCRDMA_MTHCAFMR:
+ ia->ri_ops = &rpcrdma_fmr_memreg_ops;
if (ia->ri_have_dma_lkey)
break;
mem_priv = IB_ACCESS_LOCAL_WRITE;
@@ -678,8 +667,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
rc = -ENOMEM;
goto out3;
}
- dprintk("RPC: %s: memory registration strategy is %d\n",
- __func__, memreg);
+ dprintk("RPC: %s: memory registration strategy is '%s'\n",
+ __func__, ia->ri_ops->ro_displayname);
/* Else will do memory reg/dereg for each chunk */
ia->ri_memreg_strategy = memreg;
@@ -743,49 +732,11 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
ep->rep_attr.qp_context = ep;
- /* send_cq and recv_cq initialized below */
ep->rep_attr.srq = NULL;
ep->rep_attr.cap.max_send_wr = cdata->max_requests;
- switch (ia->ri_memreg_strategy) {
- case RPCRDMA_FRMR: {
- int depth = 7;
-
- /* Add room for frmr register and invalidate WRs.
- * 1. FRMR reg WR for head
- * 2. FRMR invalidate WR for head
- * 3. N FRMR reg WRs for pagelist
- * 4. N FRMR invalidate WRs for pagelist
- * 5. FRMR reg WR for tail
- * 6. FRMR invalidate WR for tail
- * 7. The RDMA_SEND WR
- */
-
- /* Calculate N if the device max FRMR depth is smaller than
- * RPCRDMA_MAX_DATA_SEGS.
- */
- if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
- int delta = RPCRDMA_MAX_DATA_SEGS -
- ia->ri_max_frmr_depth;
-
- do {
- depth += 2; /* FRMR reg + invalidate */
- delta -= ia->ri_max_frmr_depth;
- } while (delta > 0);
-
- }
- ep->rep_attr.cap.max_send_wr *= depth;
- if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
- cdata->max_requests = devattr->max_qp_wr / depth;
- if (!cdata->max_requests)
- return -EINVAL;
- ep->rep_attr.cap.max_send_wr = cdata->max_requests *
- depth;
- }
- break;
- }
- default:
- break;
- }
+ rc = ia->ri_ops->ro_open(ia, ep, cdata);
+ if (rc)
+ return rc;
ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
ep->rep_attr.cap.max_recv_sge = 1;
@@ -944,21 +895,9 @@ retry:
rpcrdma_ep_disconnect(ep, ia);
rpcrdma_flush_cqs(ep);
- switch (ia->ri_memreg_strategy) {
- case RPCRDMA_FRMR:
- rpcrdma_reset_frmrs(ia);
- break;
- case RPCRDMA_MTHCAFMR:
- rpcrdma_reset_fmrs(ia);
- break;
- case RPCRDMA_ALLPHYSICAL:
- break;
- default:
- rc = -EIO;
- goto out;
- }
-
xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
+ ia->ri_ops->ro_reset(xprt);
+
id = rpcrdma_create_id(xprt, ia,
(struct sockaddr *)&xprt->rx_data.addr);
if (IS_ERR(id)) {
@@ -1123,91 +1062,6 @@ out:
return ERR_PTR(rc);
}
-static int
-rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
-{
- int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
- struct ib_fmr_attr fmr_attr = {
- .max_pages = RPCRDMA_MAX_DATA_SEGS,
- .max_maps = 1,
- .page_shift = PAGE_SHIFT
- };
- struct rpcrdma_mw *r;
- int i, rc;
-
- i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
- dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
-
- while (i--) {
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (r == NULL)
- return -ENOMEM;
-
- r->r.fmr = ib_alloc_fmr(ia->ri_pd, mr_access_flags, &fmr_attr);
- if (IS_ERR(r->r.fmr)) {
- rc = PTR_ERR(r->r.fmr);
- dprintk("RPC: %s: ib_alloc_fmr failed %i\n",
- __func__, rc);
- goto out_free;
- }
-
- list_add(&r->mw_list, &buf->rb_mws);
- list_add(&r->mw_all, &buf->rb_all);
- }
- return 0;
-
-out_free:
- kfree(r);
- return rc;
-}
-
-static int
-rpcrdma_init_frmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
-{
- struct rpcrdma_frmr *f;
- struct rpcrdma_mw *r;
- int i, rc;
-
- i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
- dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
-
- while (i--) {
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (r == NULL)
- return -ENOMEM;
- f = &r->r.frmr;
-
- f->fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
- ia->ri_max_frmr_depth);
- if (IS_ERR(f->fr_mr)) {
- rc = PTR_ERR(f->fr_mr);
- dprintk("RPC: %s: ib_alloc_fast_reg_mr "
- "failed %i\n", __func__, rc);
- goto out_free;
- }
-
- f->fr_pgl = ib_alloc_fast_reg_page_list(ia->ri_id->device,
- ia->ri_max_frmr_depth);
- if (IS_ERR(f->fr_pgl)) {
- rc = PTR_ERR(f->fr_pgl);
- dprintk("RPC: %s: ib_alloc_fast_reg_page_list "
- "failed %i\n", __func__, rc);
-
- ib_dereg_mr(f->fr_mr);
- goto out_free;
- }
-
- list_add(&r->mw_list, &buf->rb_mws);
- list_add(&r->mw_all, &buf->rb_all);
- }
-
- return 0;
-
-out_free:
- kfree(r);
- return rc;
-}
-
int
rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
{
@@ -1244,22 +1098,9 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
- INIT_LIST_HEAD(&buf->rb_mws);
- INIT_LIST_HEAD(&buf->rb_all);
- switch (ia->ri_memreg_strategy) {
- case RPCRDMA_FRMR:
- rc = rpcrdma_init_frmrs(ia, buf);
- if (rc)
- goto out;
- break;
- case RPCRDMA_MTHCAFMR:
- rc = rpcrdma_init_fmrs(ia, buf);
- if (rc)
- goto out;
- break;
- default:
- break;
- }
+ rc = ia->ri_ops->ro_init(r_xprt);
+ if (rc)
+ goto out;
for (i = 0; i < buf->rb_max_requests; i++) {
struct rpcrdma_req *req;
@@ -1311,47 +1152,6 @@ rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
kfree(req);
}
-static void
-rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf)
-{
- struct rpcrdma_mw *r;
- int rc;
-
- while (!list_empty(&buf->rb_all)) {
- r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
- list_del(&r->mw_all);
- list_del(&r->mw_list);
-
- rc = ib_dealloc_fmr(r->r.fmr);
- if (rc)
- dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
- __func__, rc);
-
- kfree(r);
- }
-}
-
-static void
-rpcrdma_destroy_frmrs(struct rpcrdma_buffer *buf)
-{
- struct rpcrdma_mw *r;
- int rc;
-
- while (!list_empty(&buf->rb_all)) {
- r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
- list_del(&r->mw_all);
- list_del(&r->mw_list);
-
- rc = ib_dereg_mr(r->r.frmr.fr_mr);
- if (rc)
- dprintk("RPC: %s: ib_dereg_mr failed %i\n",
- __func__, rc);
- ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
-
- kfree(r);
- }
-}
-
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
@@ -1372,104 +1172,11 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
}
- switch (ia->ri_memreg_strategy) {
- case RPCRDMA_FRMR:
- rpcrdma_destroy_frmrs(buf);
- break;
- case RPCRDMA_MTHCAFMR:
- rpcrdma_destroy_fmrs(buf);
- break;
- default:
- break;
- }
+ ia->ri_ops->ro_destroy(buf);
kfree(buf->rb_pool);
}
-/* After a disconnect, unmap all FMRs.
- *
- * This is invoked only in the transport connect worker in order
- * to serialize with rpcrdma_register_fmr_external().
- */
-static void
-rpcrdma_reset_fmrs(struct rpcrdma_ia *ia)
-{
- struct rpcrdma_xprt *r_xprt =
- container_of(ia, struct rpcrdma_xprt, rx_ia);
- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct list_head *pos;
- struct rpcrdma_mw *r;
- LIST_HEAD(l);
- int rc;
-
- list_for_each(pos, &buf->rb_all) {
- r = list_entry(pos, struct rpcrdma_mw, mw_all);
-
- INIT_LIST_HEAD(&l);
- list_add(&r->r.fmr->list, &l);
- rc = ib_unmap_fmr(&l);
- if (rc)
- dprintk("RPC: %s: ib_unmap_fmr failed %i\n",
- __func__, rc);
- }
-}
-
-/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
- * an unusable state. Find FRMRs in this state and dereg / reg
- * each. FRMRs that are VALID and attached to an rpcrdma_req are
- * also torn down.
- *
- * This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
- *
- * This is invoked only in the transport connect worker in order
- * to serialize with rpcrdma_register_frmr_external().
- */
-static void
-rpcrdma_reset_frmrs(struct rpcrdma_ia *ia)
-{
- struct rpcrdma_xprt *r_xprt =
- container_of(ia, struct rpcrdma_xprt, rx_ia);
- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct list_head *pos;
- struct rpcrdma_mw *r;
- int rc;
-
- list_for_each(pos, &buf->rb_all) {
- r = list_entry(pos, struct rpcrdma_mw, mw_all);
-
- if (r->r.frmr.fr_state == FRMR_IS_INVALID)
- continue;
-
- rc = ib_dereg_mr(r->r.frmr.fr_mr);
- if (rc)
- dprintk("RPC: %s: ib_dereg_mr failed %i\n",
- __func__, rc);
- ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
-
- r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
- ia->ri_max_frmr_depth);
- if (IS_ERR(r->r.frmr.fr_mr)) {
- rc = PTR_ERR(r->r.frmr.fr_mr);
- dprintk("RPC: %s: ib_alloc_fast_reg_mr"
- " failed %i\n", __func__, rc);
- continue;
- }
- r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list(
- ia->ri_id->device,
- ia->ri_max_frmr_depth);
- if (IS_ERR(r->r.frmr.fr_pgl)) {
- rc = PTR_ERR(r->r.frmr.fr_pgl);
- dprintk("RPC: %s: "
- "ib_alloc_fast_reg_page_list "
- "failed %i\n", __func__, rc);
-
- ib_dereg_mr(r->r.frmr.fr_mr);
- continue;
- }
- r->r.frmr.fr_state = FRMR_IS_INVALID;
- }
-}
-
/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
* some req segments uninitialized.
*/
@@ -1509,7 +1216,7 @@ rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
}
}
-/* rpcrdma_unmap_one() was already done by rpcrdma_deregister_frmr_external().
+/* rpcrdma_unmap_one() was already done during deregistration.
* Redo only the ib_post_send().
*/
static void
@@ -1729,6 +1436,14 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
* Wrappers for internal-use kmalloc memory registration, used by buffer code.
*/
+void
+rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
+{
+ dprintk("RPC: map_one: offset %p iova %llx len %zu\n",
+ seg->mr_offset,
+ (unsigned long long)seg->mr_dma, seg->mr_dmalen);
+}
+
static int
rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
struct ib_mr **mrp, struct ib_sge *iov)
@@ -1854,287 +1569,6 @@ rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
}
/*
- * Wrappers for chunk registration, shared by read/write chunk code.
- */
-
-static void
-rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing)
-{
- seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- seg->mr_dmalen = seg->mr_len;
- if (seg->mr_page)
- seg->mr_dma = ib_dma_map_page(ia->ri_id->device,
- seg->mr_page, offset_in_page(seg->mr_offset),
- seg->mr_dmalen, seg->mr_dir);
- else
- seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
- seg->mr_offset,
- seg->mr_dmalen, seg->mr_dir);
- if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
- dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
- __func__,
- (unsigned long long)seg->mr_dma,
- seg->mr_offset, seg->mr_dmalen);
- }
-}
-
-static void
-rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg)
-{
- if (seg->mr_page)
- ib_dma_unmap_page(ia->ri_id->device,
- seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
- else
- ib_dma_unmap_single(ia->ri_id->device,
- seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
-}
-
-static int
-rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
- int *nsegs, int writing, struct rpcrdma_ia *ia,
- struct rpcrdma_xprt *r_xprt)
-{
- struct rpcrdma_mr_seg *seg1 = seg;
- struct rpcrdma_mw *mw = seg1->rl_mw;
- struct rpcrdma_frmr *frmr = &mw->r.frmr;
- struct ib_mr *mr = frmr->fr_mr;
- struct ib_send_wr fastreg_wr, *bad_wr;
- u8 key;
- int len, pageoff;
- int i, rc;
- int seg_len;
- u64 pa;
- int page_no;
-
- pageoff = offset_in_page(seg1->mr_offset);
- seg1->mr_offset -= pageoff; /* start of page */
- seg1->mr_len += pageoff;
- len = -pageoff;
- if (*nsegs > ia->ri_max_frmr_depth)
- *nsegs = ia->ri_max_frmr_depth;
- for (page_no = i = 0; i < *nsegs;) {
- rpcrdma_map_one(ia, seg, writing);
- pa = seg->mr_dma;
- for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
- frmr->fr_pgl->page_list[page_no++] = pa;
- pa += PAGE_SIZE;
- }
- len += seg->mr_len;
- ++seg;
- ++i;
- /* Check for holes */
- if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
- offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
- break;
- }
- dprintk("RPC: %s: Using frmr %p to map %d segments\n",
- __func__, mw, i);
-
- frmr->fr_state = FRMR_IS_VALID;
-
- memset(&fastreg_wr, 0, sizeof(fastreg_wr));
- fastreg_wr.wr_id = (unsigned long)(void *)mw;
- fastreg_wr.opcode = IB_WR_FAST_REG_MR;
- fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma;
- fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
- fastreg_wr.wr.fast_reg.page_list_len = page_no;
- fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
- fastreg_wr.wr.fast_reg.length = page_no << PAGE_SHIFT;
- if (fastreg_wr.wr.fast_reg.length < len) {
- rc = -EIO;
- goto out_err;
- }
-
- /* Bump the key */
- key = (u8)(mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(mr, ++key);
-
- fastreg_wr.wr.fast_reg.access_flags = (writing ?
- IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
- IB_ACCESS_REMOTE_READ);
- fastreg_wr.wr.fast_reg.rkey = mr->rkey;
- DECR_CQCOUNT(&r_xprt->rx_ep);
-
- rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
- if (rc) {
- dprintk("RPC: %s: failed ib_post_send for register,"
- " status %i\n", __func__, rc);
- ib_update_fast_reg_key(mr, --key);
- goto out_err;
- } else {
- seg1->mr_rkey = mr->rkey;
- seg1->mr_base = seg1->mr_dma + pageoff;
- seg1->mr_nsegs = i;
- seg1->mr_len = len;
- }
- *nsegs = i;
- return 0;
-out_err:
- frmr->fr_state = FRMR_IS_INVALID;
- while (i--)
- rpcrdma_unmap_one(ia, --seg);
- return rc;
-}
-
-static int
-rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
- struct rpcrdma_ia *ia, struct rpcrdma_xprt *r_xprt)
-{
- struct rpcrdma_mr_seg *seg1 = seg;
- struct ib_send_wr invalidate_wr, *bad_wr;
- int rc;
-
- seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
-
- memset(&invalidate_wr, 0, sizeof invalidate_wr);
- invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
- invalidate_wr.opcode = IB_WR_LOCAL_INV;
- invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
- DECR_CQCOUNT(&r_xprt->rx_ep);
-
- read_lock(&ia->ri_qplock);
- while (seg1->mr_nsegs--)
- rpcrdma_unmap_one(ia, seg++);
- rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
- read_unlock(&ia->ri_qplock);
- if (rc) {
- /* Force rpcrdma_buffer_get() to retry */
- seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
- dprintk("RPC: %s: failed ib_post_send for invalidate,"
- " status %i\n", __func__, rc);
- }
- return rc;
-}
-
-static int
-rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg,
- int *nsegs, int writing, struct rpcrdma_ia *ia)
-{
- struct rpcrdma_mr_seg *seg1 = seg;
- u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
- int len, pageoff, i, rc;
-
- pageoff = offset_in_page(seg1->mr_offset);
- seg1->mr_offset -= pageoff; /* start of page */
- seg1->mr_len += pageoff;
- len = -pageoff;
- if (*nsegs > RPCRDMA_MAX_DATA_SEGS)
- *nsegs = RPCRDMA_MAX_DATA_SEGS;
- for (i = 0; i < *nsegs;) {
- rpcrdma_map_one(ia, seg, writing);
- physaddrs[i] = seg->mr_dma;
- len += seg->mr_len;
- ++seg;
- ++i;
- /* Check for holes */
- if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
- offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
- break;
- }
- rc = ib_map_phys_fmr(seg1->rl_mw->r.fmr, physaddrs, i, seg1->mr_dma);
- if (rc) {
- dprintk("RPC: %s: failed ib_map_phys_fmr "
- "%u@0x%llx+%i (%d)... status %i\n", __func__,
- len, (unsigned long long)seg1->mr_dma,
- pageoff, i, rc);
- while (i--)
- rpcrdma_unmap_one(ia, --seg);
- } else {
- seg1->mr_rkey = seg1->rl_mw->r.fmr->rkey;
- seg1->mr_base = seg1->mr_dma + pageoff;
- seg1->mr_nsegs = i;
- seg1->mr_len = len;
- }
- *nsegs = i;
- return rc;
-}
-
-static int
-rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg,
- struct rpcrdma_ia *ia)
-{
- struct rpcrdma_mr_seg *seg1 = seg;
- LIST_HEAD(l);
- int rc;
-
- list_add(&seg1->rl_mw->r.fmr->list, &l);
- rc = ib_unmap_fmr(&l);
- read_lock(&ia->ri_qplock);
- while (seg1->mr_nsegs--)
- rpcrdma_unmap_one(ia, seg++);
- read_unlock(&ia->ri_qplock);
- if (rc)
- dprintk("RPC: %s: failed ib_unmap_fmr,"
- " status %i\n", __func__, rc);
- return rc;
-}
-
-int
-rpcrdma_register_external(struct rpcrdma_mr_seg *seg,
- int nsegs, int writing, struct rpcrdma_xprt *r_xprt)
-{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- int rc = 0;
-
- switch (ia->ri_memreg_strategy) {
-
- case RPCRDMA_ALLPHYSICAL:
- rpcrdma_map_one(ia, seg, writing);
- seg->mr_rkey = ia->ri_bind_mem->rkey;
- seg->mr_base = seg->mr_dma;
- seg->mr_nsegs = 1;
- nsegs = 1;
- break;
-
- /* Registration using frmr registration */
- case RPCRDMA_FRMR:
- rc = rpcrdma_register_frmr_external(seg, &nsegs, writing, ia, r_xprt);
- break;
-
- /* Registration using fmr memory registration */
- case RPCRDMA_MTHCAFMR:
- rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia);
- break;
-
- default:
- return -EIO;
- }
- if (rc)
- return rc;
-
- return nsegs;
-}
-
-int
-rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg,
- struct rpcrdma_xprt *r_xprt)
-{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- int nsegs = seg->mr_nsegs, rc;
-
- switch (ia->ri_memreg_strategy) {
-
- case RPCRDMA_ALLPHYSICAL:
- read_lock(&ia->ri_qplock);
- rpcrdma_unmap_one(ia, seg);
- read_unlock(&ia->ri_qplock);
- break;
-
- case RPCRDMA_FRMR:
- rc = rpcrdma_deregister_frmr_external(seg, ia, r_xprt);
- break;
-
- case RPCRDMA_MTHCAFMR:
- rc = rpcrdma_deregister_fmr_external(seg, ia);
- break;
-
- default:
- break;
- }
- return nsegs;
-}
-
-/*
* Prepost any receive buffer, then post send.
*
* Receive buffer is donated to hardware, reclaimed upon recv completion.
@@ -2156,7 +1590,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
}
send_wr.next = NULL;
- send_wr.wr_id = 0ULL; /* no send cookie */
+ send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
send_wr.sg_list = req->rl_send_iov;
send_wr.num_sge = req->rl_niovs;
send_wr.opcode = IB_WR_SEND;
@@ -2215,43 +1649,24 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
return rc;
}
-/* Physical mapping means one Read/Write list entry per-page.
- * All list entries must fit within an inline buffer
- *
- * NB: The server must return a Write list for NFS READ,
- * which has the same constraint. Factor in the inline
- * rsize as well.
+/* How many chunk list items fit within our inline buffers?
*/
-static size_t
-rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt)
+unsigned int
+rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
- unsigned int inline_size, pages;
+ int bytes, segments;
- inline_size = min_t(unsigned int,
- cdata->inline_wsize, cdata->inline_rsize);
- inline_size -= RPCRDMA_HDRLEN_MIN;
- pages = inline_size / sizeof(struct rpcrdma_segment);
- return pages << PAGE_SHIFT;
-}
-
-static size_t
-rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt)
-{
- return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
-}
-
-size_t
-rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt)
-{
- size_t result;
-
- switch (r_xprt->rx_ia.ri_memreg_strategy) {
- case RPCRDMA_ALLPHYSICAL:
- result = rpcrdma_physical_max_payload(r_xprt);
- break;
- default:
- result = rpcrdma_mr_max_payload(r_xprt);
+ bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
+ bytes -= RPCRDMA_HDRLEN_MIN;
+ if (bytes < sizeof(struct rpcrdma_segment) * 2) {
+ pr_warn("RPC: %s: inline threshold too small\n",
+ __func__);
+ return 0;
}
- return result;
+
+ segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
+ dprintk("RPC: %s: max chunk list size = %d segments\n",
+ __func__, segments);
+ return segments;
}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 0a16fb6f0885..78e0b8beaa36 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -60,6 +60,7 @@
* Interface Adapter -- one per transport instance
*/
struct rpcrdma_ia {
+ const struct rpcrdma_memreg_ops *ri_ops;
rwlock_t ri_qplock;
struct rdma_cm_id *ri_id;
struct ib_pd *ri_pd;
@@ -105,6 +106,10 @@ struct rpcrdma_ep {
#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
+/* Force completion handler to ignore the signal
+ */
+#define RPCRDMA_IGNORE_COMPLETION (0ULL)
+
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
*
* The below structure appears at the front of a large region of kmalloc'd
@@ -143,14 +148,6 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
return (struct rpcrdma_msg *)rb->rg_base;
}
-enum rpcrdma_chunktype {
- rpcrdma_noch = 0,
- rpcrdma_readch,
- rpcrdma_areadch,
- rpcrdma_writech,
- rpcrdma_replych
-};
-
/*
* struct rpcrdma_rep -- this structure encapsulates state required to recv
* and complete a reply, asychronously. It needs several pieces of
@@ -213,6 +210,7 @@ struct rpcrdma_mw {
struct ib_fmr *fmr;
struct rpcrdma_frmr frmr;
} r;
+ void (*mw_sendcompletion)(struct ib_wc *);
struct list_head mw_list;
struct list_head mw_all;
};
@@ -258,7 +256,6 @@ struct rpcrdma_req {
unsigned int rl_niovs; /* 0, 2 or 4 */
unsigned int rl_nchunks; /* non-zero if chunks */
unsigned int rl_connect_cookie; /* retry detection */
- enum rpcrdma_chunktype rl_rtype, rl_wtype;
struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
struct ib_sge rl_send_iov[4]; /* for active requests */
@@ -340,6 +337,29 @@ struct rpcrdma_stats {
};
/*
+ * Per-registration mode operations
+ */
+struct rpcrdma_xprt;
+struct rpcrdma_memreg_ops {
+ int (*ro_map)(struct rpcrdma_xprt *,
+ struct rpcrdma_mr_seg *, int, bool);
+ int (*ro_unmap)(struct rpcrdma_xprt *,
+ struct rpcrdma_mr_seg *);
+ int (*ro_open)(struct rpcrdma_ia *,
+ struct rpcrdma_ep *,
+ struct rpcrdma_create_data_internal *);
+ size_t (*ro_maxpages)(struct rpcrdma_xprt *);
+ int (*ro_init)(struct rpcrdma_xprt *);
+ void (*ro_reset)(struct rpcrdma_xprt *);
+ void (*ro_destroy)(struct rpcrdma_buffer *);
+ const char *ro_displayname;
+};
+
+extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
+extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;
+extern const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops;
+
+/*
* RPCRDMA transport -- encapsulates the structures above for
* integration with RPC.
*
@@ -398,16 +418,56 @@ void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
-int rpcrdma_register_external(struct rpcrdma_mr_seg *,
- int, int, struct rpcrdma_xprt *);
-int rpcrdma_deregister_external(struct rpcrdma_mr_seg *,
- struct rpcrdma_xprt *);
-
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
size_t, gfp_t);
void rpcrdma_free_regbuf(struct rpcrdma_ia *,
struct rpcrdma_regbuf *);
+unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
+
+/*
+ * Wrappers for chunk registration, shared by read/write chunk code.
+ */
+
+void rpcrdma_mapping_error(struct rpcrdma_mr_seg *);
+
+static inline enum dma_data_direction
+rpcrdma_data_dir(bool writing)
+{
+ return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+}
+
+static inline void
+rpcrdma_map_one(struct ib_device *device, struct rpcrdma_mr_seg *seg,
+ enum dma_data_direction direction)
+{
+ seg->mr_dir = direction;
+ seg->mr_dmalen = seg->mr_len;
+
+ if (seg->mr_page)
+ seg->mr_dma = ib_dma_map_page(device,
+ seg->mr_page, offset_in_page(seg->mr_offset),
+ seg->mr_dmalen, seg->mr_dir);
+ else
+ seg->mr_dma = ib_dma_map_single(device,
+ seg->mr_offset,
+ seg->mr_dmalen, seg->mr_dir);
+
+ if (ib_dma_mapping_error(device, seg->mr_dma))
+ rpcrdma_mapping_error(seg);
+}
+
+static inline void
+rpcrdma_unmap_one(struct ib_device *device, struct rpcrdma_mr_seg *seg)
+{
+ if (seg->mr_page)
+ ib_dma_unmap_page(device,
+ seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
+ else
+ ib_dma_unmap_single(device,
+ seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
+}
+
/*
* RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
*/
@@ -418,9 +478,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
/*
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
*/
-ssize_t rpcrdma_marshal_chunks(struct rpc_rqst *, ssize_t);
int rpcrdma_marshal_req(struct rpc_rqst *);
-size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
/* Temporary NFS request map cache. Created in svc_rdma.c */
extern struct kmem_cache *svc_rdma_map_cachep;
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 8c1e558db118..46568b85c333 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -1,6 +1,7 @@
/*
* net/switchdev/switchdev.c - Switch device API
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
+#include <net/ip_fib.h>
#include <net/switchdev.h>
/**
@@ -26,13 +28,13 @@
int netdev_switch_parent_id_get(struct net_device *dev,
struct netdev_phys_item_id *psid)
{
- const struct net_device_ops *ops = dev->netdev_ops;
+ const struct swdev_ops *ops = dev->swdev_ops;
- if (!ops->ndo_switch_parent_id_get)
+ if (!ops || !ops->swdev_parent_id_get)
return -EOPNOTSUPP;
- return ops->ndo_switch_parent_id_get(dev, psid);
+ return ops->swdev_parent_id_get(dev, psid);
}
-EXPORT_SYMBOL(netdev_switch_parent_id_get);
+EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get);
/**
* netdev_switch_port_stp_update - Notify switch device port of STP
@@ -44,20 +46,29 @@ EXPORT_SYMBOL(netdev_switch_parent_id_get);
*/
int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
{
- const struct net_device_ops *ops = dev->netdev_ops;
+ const struct swdev_ops *ops = dev->swdev_ops;
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
- if (!ops->ndo_switch_port_stp_update)
- return -EOPNOTSUPP;
- WARN_ON(!ops->ndo_switch_parent_id_get);
- return ops->ndo_switch_port_stp_update(dev, state);
+ if (ops && ops->swdev_port_stp_update)
+ return ops->swdev_port_stp_update(dev, state);
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = netdev_switch_port_stp_update(lower_dev, state);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ return err;
}
-EXPORT_SYMBOL(netdev_switch_port_stp_update);
+EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update);
static DEFINE_MUTEX(netdev_switch_mutex);
static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
/**
- * register_netdev_switch_notifier - Register nofifier
+ * register_netdev_switch_notifier - Register notifier
* @nb: notifier_block
*
* Register switch device notifier. This should be used by code
@@ -73,10 +84,10 @@ int register_netdev_switch_notifier(struct notifier_block *nb)
mutex_unlock(&netdev_switch_mutex);
return err;
}
-EXPORT_SYMBOL(register_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(register_netdev_switch_notifier);
/**
- * unregister_netdev_switch_notifier - Unregister nofifier
+ * unregister_netdev_switch_notifier - Unregister notifier
* @nb: notifier_block
*
* Unregister switch device notifier.
@@ -91,10 +102,10 @@ int unregister_netdev_switch_notifier(struct notifier_block *nb)
mutex_unlock(&netdev_switch_mutex);
return err;
}
-EXPORT_SYMBOL(unregister_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
/**
- * call_netdev_switch_notifiers - Call nofifiers
+ * call_netdev_switch_notifiers - Call notifiers
* @val: value passed unmodified to notifier function
* @dev: port device
* @info: notifier information data
@@ -114,7 +125,7 @@ int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
mutex_unlock(&netdev_switch_mutex);
return err;
}
-EXPORT_SYMBOL(call_netdev_switch_notifiers);
+EXPORT_SYMBOL_GPL(call_netdev_switch_notifiers);
/**
* netdev_switch_port_bridge_setlink - Notify switch device port of bridge
@@ -139,7 +150,7 @@ int netdev_switch_port_bridge_setlink(struct net_device *dev,
return ops->ndo_bridge_setlink(dev, nlh, flags);
}
-EXPORT_SYMBOL(netdev_switch_port_bridge_setlink);
+EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_setlink);
/**
* netdev_switch_port_bridge_dellink - Notify switch device port of bridge
@@ -164,7 +175,7 @@ int netdev_switch_port_bridge_dellink(struct net_device *dev,
return ops->ndo_bridge_dellink(dev, nlh, flags);
}
-EXPORT_SYMBOL(netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_dellink);
/**
* ndo_dflt_netdev_switch_port_bridge_setlink - default ndo bridge setlink
@@ -194,7 +205,7 @@ int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
return ret;
}
-EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_setlink);
+EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_setlink);
/**
* ndo_dflt_netdev_switch_port_bridge_dellink - default ndo bridge dellink
@@ -224,4 +235,170 @@ int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
return ret;
}
-EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_dellink);
+
+static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev)
+{
+ const struct swdev_ops *ops = dev->swdev_ops;
+ struct net_device *lower_dev;
+ struct net_device *port_dev;
+ struct list_head *iter;
+
+ /* Recusively search down until we find a sw port dev.
+ * (A sw port dev supports swdev_parent_id_get).
+ */
+
+ if (dev->features & NETIF_F_HW_SWITCH_OFFLOAD &&
+ ops && ops->swdev_parent_id_get)
+ return dev;
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ port_dev = netdev_switch_get_lowest_dev(lower_dev);
+ if (port_dev)
+ return port_dev;
+ }
+
+ return NULL;
+}
+
+static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
+{
+ struct netdev_phys_item_id psid;
+ struct netdev_phys_item_id prev_psid;
+ struct net_device *dev = NULL;
+ int nhsel;
+
+ /* For this route, all nexthop devs must be on the same switch. */
+
+ for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+ const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+ if (!nh->nh_dev)
+ return NULL;
+
+ dev = netdev_switch_get_lowest_dev(nh->nh_dev);
+ if (!dev)
+ return NULL;
+
+ if (netdev_switch_parent_id_get(dev, &psid))
+ return NULL;
+
+ if (nhsel > 0) {
+ if (prev_psid.id_len != psid.id_len)
+ return NULL;
+ if (memcmp(prev_psid.id, psid.id, psid.id_len))
+ return NULL;
+ }
+
+ prev_psid = psid;
+ }
+
+ return dev;
+}
+
+/**
+ * netdev_switch_fib_ipv4_add - Add IPv4 route entry to switch
+ *
+ * @dst: route's IPv4 destination address
+ * @dst_len: destination address length (prefix length)
+ * @fi: route FIB info structure
+ * @tos: route TOS
+ * @type: route type
+ * @nlflags: netlink flags passed in (NLM_F_*)
+ * @tb_id: route table ID
+ *
+ * Add IPv4 route entry to switch device.
+ */
+int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 nlflags, u32 tb_id)
+{
+ struct net_device *dev;
+ const struct swdev_ops *ops;
+ int err = 0;
+
+ /* Don't offload route if using custom ip rules or if
+ * IPv4 FIB offloading has been disabled completely.
+ */
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ if (fi->fib_net->ipv4.fib_has_custom_rules)
+ return 0;
+#endif
+
+ if (fi->fib_net->ipv4.fib_offload_disabled)
+ return 0;
+
+ dev = netdev_switch_get_dev_by_nhs(fi);
+ if (!dev)
+ return 0;
+ ops = dev->swdev_ops;
+
+ if (ops->swdev_fib_ipv4_add) {
+ err = ops->swdev_fib_ipv4_add(dev, htonl(dst), dst_len,
+ fi, tos, type, nlflags,
+ tb_id);
+ if (!err)
+ fi->fib_flags |= RTNH_F_EXTERNAL;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_add);
+
+/**
+ * netdev_switch_fib_ipv4_del - Delete IPv4 route entry from switch
+ *
+ * @dst: route's IPv4 destination address
+ * @dst_len: destination address length (prefix length)
+ * @fi: route FIB info structure
+ * @tos: route TOS
+ * @type: route type
+ * @tb_id: route table ID
+ *
+ * Delete IPv4 route entry from switch device.
+ */
+int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id)
+{
+ struct net_device *dev;
+ const struct swdev_ops *ops;
+ int err = 0;
+
+ if (!(fi->fib_flags & RTNH_F_EXTERNAL))
+ return 0;
+
+ dev = netdev_switch_get_dev_by_nhs(fi);
+ if (!dev)
+ return 0;
+ ops = dev->swdev_ops;
+
+ if (ops->swdev_fib_ipv4_del) {
+ err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len,
+ fi, tos, type, tb_id);
+ if (!err)
+ fi->fib_flags &= ~RTNH_F_EXTERNAL;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_del);
+
+/**
+ * netdev_switch_fib_ipv4_abort - Abort an IPv4 FIB operation
+ *
+ * @fi: route FIB info structure
+ */
+void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+{
+ /* There was a problem installing this route to the offload
+ * device. For now, until we come up with more refined
+ * policy handling, abruptly end IPv4 fib offloading for
+ * for entire net by flushing offload device(s) of all
+ * IPv4 routes, and mark IPv4 fib offloading broken from
+ * this point forward.
+ */
+
+ fib_flush_external(fi->fib_net);
+ fi->fib_net->ipv4.fib_offload_disabled = true;
+}
+EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_abort);
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 91c8a8e031db..c25a3a149dc4 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -26,3 +26,11 @@ config TIPC_MEDIA_IB
help
Saying Y here will enable support for running TIPC on
IP-over-InfiniBand devices.
+config TIPC_MEDIA_UDP
+ bool "IP/UDP media type support"
+ depends on TIPC
+ select NET_UDP_TUNNEL
+ help
+ Saying Y here will enable support for running TIPC over IP/UDP
+ bool
+ default y
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 599b1a540d2b..57e460be4692 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -10,5 +10,6 @@ tipc-y += addr.o bcast.o bearer.o \
netlink.o netlink_compat.o node.o socket.o eth_media.o \
server.o socket.o
+tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 48fd3b5a73fb..ba7daa864d44 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -38,6 +38,13 @@
#include "addr.h"
#include "core.h"
+u32 tipc_own_addr(struct net *net)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ return tn->own_addr;
+}
+
/**
* in_own_cluster - test for cluster inclusion; <0.0.0> always matches
*/
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index c700c2d28e09..7ba6d5c8ae40 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -55,6 +55,7 @@ static inline u32 tipc_cluster_mask(u32 addr)
return addr & TIPC_CLUSTER_MASK;
}
+u32 tipc_own_addr(struct net *net);
int in_own_cluster(struct net *net, u32 addr);
int in_own_cluster_exact(struct net *net, u32 addr);
int in_own_node(struct net *net, u32 addr);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 3e41704832de..c5cbdcb1f0b5 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -62,21 +62,8 @@ static void tipc_bclink_lock(struct net *net)
static void tipc_bclink_unlock(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_node *node = NULL;
- if (likely(!tn->bclink->flags)) {
- spin_unlock_bh(&tn->bclink->lock);
- return;
- }
-
- if (tn->bclink->flags & TIPC_BCLINK_RESET) {
- tn->bclink->flags &= ~TIPC_BCLINK_RESET;
- node = tipc_bclink_retransmit_to(net);
- }
spin_unlock_bh(&tn->bclink->lock);
-
- if (node)
- tipc_link_reset_all(node);
}
void tipc_bclink_input(struct net *net)
@@ -91,13 +78,6 @@ uint tipc_bclink_get_mtu(void)
return MAX_PKT_DEFAULT_MCAST;
}
-void tipc_bclink_set_flags(struct net *net, unsigned int flags)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- tn->bclink->flags |= flags;
-}
-
static u32 bcbuf_acks(struct sk_buff *buf)
{
return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
@@ -135,9 +115,10 @@ static void bclink_set_last_sent(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *bcl = tn->bcl;
+ struct sk_buff *skb = skb_peek(&bcl->backlogq);
- if (bcl->next_out)
- bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
+ if (skb)
+ bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
else
bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
}
@@ -155,7 +136,6 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
seqno : node->bclink.last_sent;
}
-
/**
* tipc_bclink_retransmit_to - get most recent node to request retransmission
*
@@ -180,7 +160,7 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
struct sk_buff *skb;
struct tipc_link *bcl = tn->bcl;
- skb_queue_walk(&bcl->outqueue, skb) {
+ skb_queue_walk(&bcl->transmq, skb) {
if (more(buf_seqno(skb), after)) {
tipc_link_retransmit(bcl, skb, mod(to - after));
break;
@@ -210,14 +190,17 @@ void tipc_bclink_wakeup_users(struct net *net)
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
{
struct sk_buff *skb, *tmp;
- struct sk_buff *next;
unsigned int released = 0;
struct net *net = n_ptr->net;
struct tipc_net *tn = net_generic(net, tipc_net_id);
+ if (unlikely(!n_ptr->bclink.recv_permitted))
+ return;
+
tipc_bclink_lock(net);
+
/* Bail out if tx queue is empty (no clean up is required) */
- skb = skb_peek(&tn->bcl->outqueue);
+ skb = skb_peek(&tn->bcl->transmq);
if (!skb)
goto exit;
@@ -244,27 +227,19 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
}
/* Skip over packets that node has previously acknowledged */
- skb_queue_walk(&tn->bcl->outqueue, skb) {
+ skb_queue_walk(&tn->bcl->transmq, skb) {
if (more(buf_seqno(skb), n_ptr->bclink.acked))
break;
}
/* Update packets that node is now acknowledging */
- skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
+ skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
if (more(buf_seqno(skb), acked))
break;
-
- next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
- if (skb != tn->bcl->next_out) {
- bcbuf_decr_acks(skb);
- } else {
- bcbuf_set_acks(skb, 0);
- tn->bcl->next_out = next;
- bclink_set_last_sent(net);
- }
-
+ bcbuf_decr_acks(skb);
+ bclink_set_last_sent(net);
if (bcbuf_acks(skb) == 0) {
- __skb_unlink(skb, &tn->bcl->outqueue);
+ __skb_unlink(skb, &tn->bcl->transmq);
kfree_skb(skb);
released = 1;
}
@@ -272,7 +247,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
n_ptr->bclink.acked = acked;
/* Try resolving broadcast link congestion, if necessary */
- if (unlikely(tn->bcl->next_out)) {
+ if (unlikely(skb_peek(&tn->bcl->backlogq))) {
tipc_link_push_packets(tn->bcl);
bclink_set_last_sent(net);
}
@@ -319,7 +294,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
buf = tipc_buf_acquire(INT_H_SIZE);
if (buf) {
struct tipc_msg *msg = buf_msg(buf);
- struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
+ struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
@@ -354,13 +329,12 @@ static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
return;
tipc_node_lock(n_ptr);
-
if (n_ptr->bclink.recv_permitted &&
(n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
(n_ptr->bclink.last_in == msg_bcgap_after(msg)))
n_ptr->bclink.oos_state = 2;
-
tipc_node_unlock(n_ptr);
+ tipc_node_put(n_ptr);
}
/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
@@ -387,14 +361,13 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
__skb_queue_purge(list);
return -EHOSTUNREACH;
}
-
/* Broadcast to all nodes */
if (likely(bclink)) {
tipc_bclink_lock(net);
if (likely(bclink->bcast_nodes.count)) {
rc = __tipc_link_xmit(net, bcl, list);
if (likely(!rc)) {
- u32 len = skb_queue_len(&bcl->outqueue);
+ u32 len = skb_queue_len(&bcl->transmq);
bclink_set_last_sent(net);
bcl->stats.queue_sz_counts++;
@@ -440,7 +413,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
*/
if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
tipc_link_proto_xmit(node->active_links[node->addr & 1],
- STATE_MSG, 0, 0, 0, 0, 0);
+ STATE_MSG, 0, 0, 0, 0);
tn->bcl->stats.sent_acks++;
}
}
@@ -481,17 +454,18 @@ void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
goto unlock;
if (msg_destnode(msg) == tn->own_addr) {
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
- tipc_node_unlock(node);
tipc_bclink_lock(net);
bcl->stats.recv_nacks++;
tn->bclink->retransmit_to = node;
bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
msg_bcgap_to(msg));
tipc_bclink_unlock(net);
+ tipc_node_unlock(node);
} else {
tipc_node_unlock(node);
bclink_peek_nack(net, msg);
}
+ tipc_node_put(node);
goto exit;
}
@@ -528,11 +502,13 @@ receive:
tipc_bclink_unlock(net);
tipc_node_unlock(node);
} else if (msg_user(msg) == MSG_FRAGMENTER) {
- tipc_buf_append(&node->bclink.reasm_buf, &buf);
- if (unlikely(!buf && !node->bclink.reasm_buf))
- goto unlock;
tipc_bclink_lock(net);
bclink_accept_pkt(node, seqno);
+ tipc_buf_append(&node->bclink.reasm_buf, &buf);
+ if (unlikely(!buf && !node->bclink.reasm_buf)) {
+ tipc_bclink_unlock(net);
+ goto unlock;
+ }
bcl->stats.recv_fragments++;
if (buf) {
bcl->stats.recv_fragmented++;
@@ -559,25 +535,25 @@ receive:
if (node->bclink.last_in == node->bclink.last_sent)
goto unlock;
- if (skb_queue_empty(&node->bclink.deferred_queue)) {
+ if (skb_queue_empty(&node->bclink.deferdq)) {
node->bclink.oos_state = 1;
goto unlock;
}
- msg = buf_msg(skb_peek(&node->bclink.deferred_queue));
+ msg = buf_msg(skb_peek(&node->bclink.deferdq));
seqno = msg_seqno(msg);
next_in = mod(next_in + 1);
if (seqno != next_in)
goto unlock;
/* Take in-sequence message from deferred queue & deliver it */
- buf = __skb_dequeue(&node->bclink.deferred_queue);
+ buf = __skb_dequeue(&node->bclink.deferdq);
goto receive;
}
/* Handle out-of-sequence broadcast message */
if (less(next_in, seqno)) {
- deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue,
+ deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
buf);
bclink_update_last_sent(node, seqno);
buf = NULL;
@@ -594,6 +570,7 @@ receive:
unlock:
tipc_node_unlock(node);
+ tipc_node_put(node);
exit:
kfree_skb(buf);
}
@@ -634,7 +611,6 @@ static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tn->net_id);
tn->bcl->stats.sent_info++;
-
if (WARN_ON(!bclink->bcast_nodes.count)) {
dump_stack();
return 0;
@@ -835,7 +811,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
if (!prop)
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0]))
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
goto prop_msg_full;
nla_nest_end(msg->skb, prop);
@@ -913,8 +889,9 @@ int tipc_bclink_init(struct net *net)
sprintf(bcbearer->media.name, "tipc-broadcast");
spin_lock_init(&bclink->lock);
- __skb_queue_head_init(&bcl->outqueue);
- __skb_queue_head_init(&bcl->deferred_queue);
+ __skb_queue_head_init(&bcl->transmq);
+ __skb_queue_head_init(&bcl->backlogq);
+ __skb_queue_head_init(&bcl->deferdq);
skb_queue_head_init(&bcl->wakeupq);
bcl->next_out_no = 1;
spin_lock_init(&bclink->node.lock);
@@ -922,7 +899,7 @@ int tipc_bclink_init(struct net *net)
skb_queue_head_init(&bclink->inputq);
bcl->owner = &bclink->node;
bcl->owner->net = net;
- bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
+ bcl->mtu = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
bcl->bearer_id = MAX_BEARERS;
rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 43f397fbac55..4bdc12277d33 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -55,7 +55,6 @@ struct tipc_bcbearer_pair {
struct tipc_bearer *secondary;
};
-#define TIPC_BCLINK_RESET 1
#define BCBEARER MAX_BEARERS
/**
@@ -86,7 +85,6 @@ struct tipc_bcbearer {
* @lock: spinlock governing access to structure
* @link: (non-standard) broadcast link structure
* @node: (non-standard) node structure representing b'cast link's peer node
- * @flags: represent bclink states
* @bcast_nodes: map of broadcast-capable nodes
* @retransmit_to: node that most recently requested a retransmit
*
@@ -96,7 +94,6 @@ struct tipc_bclink {
spinlock_t lock;
struct tipc_link link;
struct tipc_node node;
- unsigned int flags;
struct sk_buff_head arrvq;
struct sk_buff_head inputq;
struct tipc_node_map bcast_nodes;
@@ -117,7 +114,6 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
int tipc_bclink_init(struct net *net);
void tipc_bclink_stop(struct net *net);
-void tipc_bclink_set_flags(struct net *tn, unsigned int flags);
void tipc_bclink_add_node(struct net *net, u32 addr);
void tipc_bclink_remove_node(struct net *net, u32 addr);
struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 48852c2dcc03..70e3dacbf84a 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -48,6 +48,9 @@ static struct tipc_media * const media_info_array[] = {
#ifdef CONFIG_TIPC_MEDIA_IB
&ib_media_info,
#endif
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ &udp_media_info,
+#endif
NULL
};
@@ -216,7 +219,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
* tipc_enable_bearer - enable bearer with the given name
*/
static int tipc_enable_bearer(struct net *net, const char *name,
- u32 disc_domain, u32 priority)
+ u32 disc_domain, u32 priority,
+ struct nlattr *attr[])
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
@@ -304,7 +308,7 @@ restart:
strcpy(b_ptr->name, name);
b_ptr->media = m_ptr;
- res = m_ptr->enable_media(net, b_ptr);
+ res = m_ptr->enable_media(net, b_ptr, attr);
if (res) {
pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
name, -res);
@@ -372,7 +376,8 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
kfree_rcu(b_ptr, rcu);
}
-int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b)
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
+ struct nlattr *attr[])
{
struct net_device *dev;
char *driver_name = strchr((const char *)b->name, ':') + 1;
@@ -586,14 +591,14 @@ void tipc_bearer_stop(struct net *net)
/* Caller should hold rtnl_lock to protect the bearer */
static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
- struct tipc_bearer *bearer)
+ struct tipc_bearer *bearer, int nlflags)
{
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
- NLM_F_MULTI, TIPC_NL_BEARER_GET);
+ nlflags, TIPC_NL_BEARER_GET);
if (!hdr)
return -EMSGSIZE;
@@ -652,7 +657,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!bearer)
continue;
- err = __tipc_nl_add_bearer(&msg, bearer);
+ err = __tipc_nl_add_bearer(&msg, bearer, NLM_F_MULTI);
if (err)
break;
}
@@ -700,7 +705,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- err = __tipc_nl_add_bearer(&msg, bearer);
+ err = __tipc_nl_add_bearer(&msg, bearer, 0);
if (err)
goto err_out;
rtnl_unlock();
@@ -791,7 +796,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
}
rtnl_lock();
- err = tipc_enable_bearer(net, bearer, domain, prio);
+ err = tipc_enable_bearer(net, bearer, domain, prio, attrs);
if (err) {
rtnl_unlock();
return err;
@@ -852,14 +857,14 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
}
static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
- struct tipc_media *media)
+ struct tipc_media *media, int nlflags)
{
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
- NLM_F_MULTI, TIPC_NL_MEDIA_GET);
+ nlflags, TIPC_NL_MEDIA_GET);
if (!hdr)
return -EMSGSIZE;
@@ -911,7 +916,8 @@ int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
for (; media_info_array[i] != NULL; i++) {
- err = __tipc_nl_add_media(&msg, media_info_array[i]);
+ err = __tipc_nl_add_media(&msg, media_info_array[i],
+ NLM_F_MULTI);
if (err)
break;
}
@@ -958,7 +964,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- err = __tipc_nl_add_media(&msg, media);
+ err = __tipc_nl_add_media(&msg, media, 0);
if (err)
goto err_out;
rtnl_unlock();
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 6b17795ff8bc..5cad243ee8fc 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -41,7 +41,7 @@
#include <net/genetlink.h>
#define MAX_BEARERS 2
-#define MAX_MEDIA 2
+#define MAX_MEDIA 3
#define MAX_NODES 4096
#define WSIZE 32
@@ -50,14 +50,16 @@
* - the field's actual content and length is defined per media
* - remaining unused bytes in the field are set to zero
*/
-#define TIPC_MEDIA_ADDR_SIZE 32
+#define TIPC_MEDIA_INFO_SIZE 32
#define TIPC_MEDIA_TYPE_OFFSET 3
+#define TIPC_MEDIA_ADDR_OFFSET 4
/*
* Identifiers of supported TIPC media types
*/
#define TIPC_MEDIA_TYPE_ETH 1
#define TIPC_MEDIA_TYPE_IB 2
+#define TIPC_MEDIA_TYPE_UDP 3
/**
* struct tipc_node_map - set of node identifiers
@@ -76,7 +78,7 @@ struct tipc_node_map {
* @broadcast: non-zero if address is a broadcast address
*/
struct tipc_media_addr {
- u8 value[TIPC_MEDIA_ADDR_SIZE];
+ u8 value[TIPC_MEDIA_INFO_SIZE];
u8 media_id;
u8 broadcast;
};
@@ -103,7 +105,8 @@ struct tipc_media {
int (*send_msg)(struct net *net, struct sk_buff *buf,
struct tipc_bearer *b_ptr,
struct tipc_media_addr *dest);
- int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr);
+ int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr,
+ struct nlattr *attr[]);
void (*disable_media)(struct tipc_bearer *b_ptr);
int (*addr2str)(struct tipc_media_addr *addr,
char *strbuf,
@@ -182,6 +185,9 @@ extern struct tipc_media eth_media_info;
#ifdef CONFIG_TIPC_MEDIA_IB
extern struct tipc_media ib_media_info;
#endif
+#ifdef CONFIG_TIPC_MEDIA_UDP
+extern struct tipc_media udp_media_info;
+#endif
int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
@@ -196,7 +202,8 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
int tipc_media_set_priority(const char *name, u32 new_value);
int tipc_media_set_window(const char *name, u32 new_value);
void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
-int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b);
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
+ struct nlattr *attrs[]);
void tipc_disable_l2_media(struct tipc_bearer *b);
int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
struct tipc_bearer *b, struct tipc_media_addr *dest);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index feef3753615d..967e292f53c8 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -86,9 +86,10 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
msg = buf_msg(buf);
tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
- INT_H_SIZE, dest_domain);
+ MAX_H_SIZE, dest_domain);
msg_set_non_seq(msg, 1);
msg_set_node_sig(msg, tn->random);
+ msg_set_node_capabilities(msg, 0);
msg_set_dest_domain(msg, dest_domain);
msg_set_bc_netid(msg, tn->net_id);
b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
@@ -133,6 +134,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
u32 net_id = msg_bc_netid(msg);
u32 mtyp = msg_type(msg);
u32 signature = msg_node_sig(msg);
+ u16 caps = msg_node_capabilities(msg);
bool addr_match = false;
bool sign_match = false;
bool link_up = false;
@@ -167,6 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
if (!node)
return;
tipc_node_lock(node);
+ node->capabilities = caps;
link = node->links[bearer->identity];
/* Prepare to validate requesting node's signature and media address */
@@ -249,7 +252,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
/* Send response, if necessary */
if (respond && (mtyp == DSC_REQ_MSG)) {
- rbuf = tipc_buf_acquire(INT_H_SIZE);
+ rbuf = tipc_buf_acquire(MAX_H_SIZE);
if (rbuf) {
tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
@@ -257,6 +260,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
}
}
tipc_node_unlock(node);
+ tipc_node_put(node);
}
/**
@@ -359,8 +363,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
return -ENOMEM;
-
- req->buf = tipc_buf_acquire(INT_H_SIZE);
+ req->buf = tipc_buf_acquire(MAX_H_SIZE);
if (!req->buf) {
kfree(req);
return -ENOMEM;
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 5e1426f1751f..f69a2fde9f4a 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -37,8 +37,6 @@
#include "core.h"
#include "bearer.h"
-#define ETH_ADDR_OFFSET 4 /* MAC addr position inside address field */
-
/* Convert Ethernet address (media address format) to string */
static int tipc_eth_addr2str(struct tipc_media_addr *addr,
char *strbuf, int bufsz)
@@ -53,9 +51,9 @@ static int tipc_eth_addr2str(struct tipc_media_addr *addr,
/* Convert from media address format to discovery message addr format */
static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
{
- memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+ memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
- memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN);
+ memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, addr->value, ETH_ALEN);
return 0;
}
@@ -79,7 +77,7 @@ static int tipc_eth_msg2addr(struct tipc_bearer *b,
char *msg)
{
/* Skip past preamble: */
- msg += ETH_ADDR_OFFSET;
+ msg += TIPC_MEDIA_ADDR_OFFSET;
return tipc_eth_raw2addr(b, addr, msg);
}
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index 8522eef9c136..e8c16718e3fa 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -57,7 +57,7 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
/* Convert from media address format to discovery message addr format */
static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
{
- memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+ memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
memcpy(msg, addr->value, INFINIBAND_ALEN);
return 0;
}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 14f09b3cb87c..43a515dc97b0 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1,7 +1,7 @@
/*
* net/tipc/link.c: TIPC link code
*
- * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
+ * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
* Copyright (c) 2004-2007, 2010-2013, Wind River Systems
* All rights reserved.
*
@@ -35,6 +35,7 @@
*/
#include "core.h"
+#include "subscr.h"
#include "link.h"
#include "bcast.h"
#include "socket.h"
@@ -88,24 +89,14 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
#define TIMEOUT_EVT 560817u /* link timer expired */
/*
- * The following two 'message types' is really just implementation
- * data conveniently stored in the message header.
- * They must not be considered part of the protocol
+ * State value stored in 'failover_pkts'
*/
-#define OPEN_MSG 0
-#define CLOSED_MSG 1
-
-/*
- * State value stored in 'exp_msg_count'
- */
-#define START_CHANGEOVER 100000u
+#define FIRST_FAILOVER 0xffffu
static void link_handle_out_of_seq_msg(struct tipc_link *link,
struct sk_buff *skb);
static void tipc_link_proto_rcv(struct tipc_link *link,
struct sk_buff *skb);
-static int tipc_link_tunnel_rcv(struct tipc_node *node,
- struct sk_buff **skb);
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
@@ -114,7 +105,7 @@ static void tipc_link_sync_xmit(struct tipc_link *l);
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-
+static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
/*
* Simple link routines
*/
@@ -138,32 +129,11 @@ static void tipc_link_put(struct tipc_link *l_ptr)
kref_put(&l_ptr->ref, tipc_link_release);
}
-static void link_init_max_pkt(struct tipc_link *l_ptr)
+static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
{
- struct tipc_node *node = l_ptr->owner;
- struct tipc_net *tn = net_generic(node->net, tipc_net_id);
- struct tipc_bearer *b_ptr;
- u32 max_pkt;
-
- rcu_read_lock();
- b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
- if (!b_ptr) {
- rcu_read_unlock();
- return;
- }
- max_pkt = (b_ptr->mtu & ~3);
- rcu_read_unlock();
-
- if (max_pkt > MAX_MSG_SIZE)
- max_pkt = MAX_MSG_SIZE;
-
- l_ptr->max_pkt_target = max_pkt;
- if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
- l_ptr->max_pkt = l_ptr->max_pkt_target;
- else
- l_ptr->max_pkt = MAX_PKT_DEFAULT;
-
- l_ptr->max_pkt_probes = 0;
+ if (l->owner->active_links[0] != l)
+ return l->owner->active_links[0];
+ return l->owner->active_links[1];
}
/*
@@ -194,10 +164,10 @@ static void link_timeout(unsigned long data)
tipc_node_lock(l_ptr->owner);
/* update counters used in statistical profiling of send traffic */
- l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
+ l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
l_ptr->stats.queue_sz_counts++;
- skb = skb_peek(&l_ptr->outqueue);
+ skb = skb_peek(&l_ptr->transmq);
if (skb) {
struct tipc_msg *msg = buf_msg(skb);
u32 length = msg_size(msg);
@@ -229,7 +199,7 @@ static void link_timeout(unsigned long data)
/* do all other link processing performed on a periodic basis */
link_state_event(l_ptr, TIMEOUT_EVT);
- if (l_ptr->next_out)
+ if (skb_queue_len(&l_ptr->backlogq))
tipc_link_push_packets(l_ptr);
tipc_node_unlock(l_ptr->owner);
@@ -305,16 +275,15 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
msg_set_session(msg, (tn->random & 0xffff));
msg_set_bearer_id(msg, b_ptr->identity);
strcpy((char *)msg_data(msg), if_name);
-
+ l_ptr->net_plane = b_ptr->net_plane;
+ l_ptr->advertised_mtu = b_ptr->mtu;
+ l_ptr->mtu = l_ptr->advertised_mtu;
l_ptr->priority = b_ptr->priority;
tipc_link_set_queue_limits(l_ptr, b_ptr->window);
-
- l_ptr->net_plane = b_ptr->net_plane;
- link_init_max_pkt(l_ptr);
-
l_ptr->next_out_no = 1;
- __skb_queue_head_init(&l_ptr->outqueue);
- __skb_queue_head_init(&l_ptr->deferred_queue);
+ __skb_queue_head_init(&l_ptr->transmq);
+ __skb_queue_head_init(&l_ptr->backlogq);
+ __skb_queue_head_init(&l_ptr->deferdq);
skb_queue_head_init(&l_ptr->wakeupq);
skb_queue_head_init(&l_ptr->inputq);
skb_queue_head_init(&l_ptr->namedq);
@@ -327,15 +296,19 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
}
/**
- * link_delete - Conditional deletion of link.
- * If timer still running, real delete is done when it expires
- * @link: link to be deleted
+ * tipc_link_delete - Delete a link
+ * @l: link to be deleted
*/
-void tipc_link_delete(struct tipc_link *link)
+void tipc_link_delete(struct tipc_link *l)
{
- tipc_link_reset_fragments(link);
- tipc_node_detach_link(link->owner, link);
- tipc_link_put(link);
+ tipc_link_reset(l);
+ if (del_timer(&l->timer))
+ tipc_link_put(l);
+ l->flags |= LINK_STOPPED;
+ /* Delete link now, or when timer is finished: */
+ tipc_link_reset_fragments(l);
+ tipc_node_detach_link(l->owner, l);
+ tipc_link_put(l);
}
void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
@@ -349,16 +322,7 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
list_for_each_entry_rcu(node, &tn->node_list, list) {
tipc_node_lock(node);
link = node->links[bearer_id];
- if (!link) {
- tipc_node_unlock(node);
- continue;
- }
- tipc_link_reset(link);
- if (del_timer(&link->timer))
- tipc_link_put(link);
- link->flags |= LINK_STOPPED;
- /* Delete link now, or when failover is finished: */
- if (shutting_down || !tipc_node_is_up(node))
+ if (link)
tipc_link_delete(link);
tipc_node_unlock(node);
}
@@ -366,28 +330,43 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
}
/**
- * link_schedule_user - schedule user for wakeup after congestion
+ * link_schedule_user - schedule a message sender for wakeup after congestion
* @link: congested link
- * @oport: sending port
- * @chain_sz: size of buffer chain that was attempted sent
- * @imp: importance of message attempted sent
+ * @list: message that was attempted sent
* Create pseudo msg to send back to user when congestion abates
+ * Only consumes message if there is an error
*/
-static bool link_schedule_user(struct tipc_link *link, u32 oport,
- uint chain_sz, uint imp)
+static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
{
- struct sk_buff *buf;
+ struct tipc_msg *msg = buf_msg(skb_peek(list));
+ int imp = msg_importance(msg);
+ u32 oport = msg_origport(msg);
+ u32 addr = link_own_addr(link);
+ struct sk_buff *skb;
- buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
- link_own_addr(link), link_own_addr(link),
- oport, 0, 0);
- if (!buf)
- return false;
- TIPC_SKB_CB(buf)->chain_sz = chain_sz;
- TIPC_SKB_CB(buf)->chain_imp = imp;
- skb_queue_tail(&link->wakeupq, buf);
+ /* This really cannot happen... */
+ if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
+ pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
+ tipc_link_reset(link);
+ goto err;
+ }
+ /* Non-blocking sender: */
+ if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
+ return -ELINKCONG;
+
+ /* Create and schedule wakeup pseudo message */
+ skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
+ addr, addr, oport, 0, 0);
+ if (!skb)
+ goto err;
+ TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
+ TIPC_SKB_CB(skb)->chain_imp = imp;
+ skb_queue_tail(&link->wakeupq, skb);
link->stats.link_congs++;
- return true;
+ return -ELINKCONG;
+err:
+ __skb_queue_purge(list);
+ return -ENOBUFS;
}
/**
@@ -396,19 +375,22 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
* Move a number of waiting users, as permitted by available space in
* the send queue, from link wait queue to node wait queue for wakeup
*/
-void link_prepare_wakeup(struct tipc_link *link)
+void link_prepare_wakeup(struct tipc_link *l)
{
- uint pend_qsz = skb_queue_len(&link->outqueue);
+ int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
+ int imp, lim;
struct sk_buff *skb, *tmp;
- skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
- if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
+ skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
+ imp = TIPC_SKB_CB(skb)->chain_imp;
+ lim = l->window + l->backlog[imp].limit;
+ pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
+ if ((pnd[imp] + l->backlog[imp].len) >= lim)
break;
- pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
- skb_unlink(skb, &link->wakeupq);
- skb_queue_tail(&link->inputq, skb);
- link->owner->inputq = &link->inputq;
- link->owner->action_flags |= TIPC_MSG_EVT;
+ skb_unlink(skb, &l->wakeupq);
+ skb_queue_tail(&l->inputq, skb);
+ l->owner->inputq = &l->inputq;
+ l->owner->action_flags |= TIPC_MSG_EVT;
}
}
@@ -422,31 +404,42 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
l_ptr->reasm_buf = NULL;
}
+static void tipc_link_purge_backlog(struct tipc_link *l)
+{
+ __skb_queue_purge(&l->backlogq);
+ l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+ l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
+ l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
+ l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
+ l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
+}
+
/**
* tipc_link_purge_queues - purge all pkt queues associated with link
* @l_ptr: pointer to link
*/
void tipc_link_purge_queues(struct tipc_link *l_ptr)
{
- __skb_queue_purge(&l_ptr->deferred_queue);
- __skb_queue_purge(&l_ptr->outqueue);
+ __skb_queue_purge(&l_ptr->deferdq);
+ __skb_queue_purge(&l_ptr->transmq);
+ tipc_link_purge_backlog(l_ptr);
tipc_link_reset_fragments(l_ptr);
}
void tipc_link_reset(struct tipc_link *l_ptr)
{
u32 prev_state = l_ptr->state;
- u32 checkpoint = l_ptr->next_in_no;
int was_active_link = tipc_link_is_active(l_ptr);
struct tipc_node *owner = l_ptr->owner;
+ struct tipc_link *pl = tipc_parallel_link(l_ptr);
msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
/* Link is down, accept any session */
l_ptr->peer_session = INVALID_SESSION;
- /* Prepare for max packet size negotiation */
- link_init_max_pkt(l_ptr);
+ /* Prepare for renewed mtu size negotiation */
+ l_ptr->mtu = l_ptr->advertised_mtu;
l_ptr->state = RESET_UNKNOWN;
@@ -456,21 +449,26 @@ void tipc_link_reset(struct tipc_link *l_ptr)
tipc_node_link_down(l_ptr->owner, l_ptr);
tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
- if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
- l_ptr->reset_checkpoint = checkpoint;
- l_ptr->exp_msg_count = START_CHANGEOVER;
+ if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
+ l_ptr->flags |= LINK_FAILINGOVER;
+ l_ptr->failover_checkpt = l_ptr->next_in_no;
+ pl->failover_pkts = FIRST_FAILOVER;
+ pl->failover_checkpt = l_ptr->next_in_no;
+ pl->failover_skb = l_ptr->reasm_buf;
+ } else {
+ kfree_skb(l_ptr->reasm_buf);
}
-
/* Clean up all queues, except inputq: */
- __skb_queue_purge(&l_ptr->outqueue);
- __skb_queue_purge(&l_ptr->deferred_queue);
+ __skb_queue_purge(&l_ptr->transmq);
+ __skb_queue_purge(&l_ptr->deferdq);
if (!owner->inputq)
owner->inputq = &l_ptr->inputq;
skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
if (!skb_queue_empty(owner->inputq))
owner->action_flags |= TIPC_MSG_EVT;
- l_ptr->next_out = NULL;
- l_ptr->unacked_window = 0;
+ tipc_link_purge_backlog(l_ptr);
+ l_ptr->reasm_buf = NULL;
+ l_ptr->rcv_unacked = 0;
l_ptr->checkpoint = 1;
l_ptr->next_out_no = 1;
l_ptr->fsm_msg_cnt = 0;
@@ -521,8 +519,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
return; /* Not yet. */
- /* Check whether changeover is going on */
- if (l_ptr->exp_msg_count) {
+ if (l_ptr->flags & LINK_FAILINGOVER) {
if (event == TIMEOUT_EVT)
link_set_timer(l_ptr, cont_intv);
return;
@@ -539,11 +536,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->checkpoint = l_ptr->next_in_no;
if (tipc_bclink_acks_missing(l_ptr->owner)) {
tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 0, 0, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
- tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 1, 0, 0, 0, 0);
+ 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
}
link_set_timer(l_ptr, cont_intv);
@@ -551,7 +544,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
}
l_ptr->state = WORKING_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
break;
@@ -562,7 +555,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 0, 0, 0, 0, 0);
+ 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -585,7 +578,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 0, 0, 0, 0, 0);
+ 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -596,13 +589,13 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->checkpoint = l_ptr->next_in_no;
if (tipc_bclink_acks_missing(l_ptr->owner)) {
tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 0, 0, 0, 0, 0);
+ 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
}
link_set_timer(l_ptr, cont_intv);
} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 1, 0, 0, 0, 0);
+ 1, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
} else { /* Link has failed */
@@ -612,7 +605,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = RESET_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
tipc_link_proto_xmit(l_ptr, RESET_MSG,
- 0, 0, 0, 0, 0);
+ 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
}
@@ -632,7 +625,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
if (l_ptr->owner->working_links == 1)
tipc_link_sync_xmit(l_ptr);
@@ -642,7 +635,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 1, 0, 0, 0, 0);
+ 1, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -652,7 +645,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
case TIMEOUT_EVT:
- tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -670,7 +663,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
if (l_ptr->owner->working_links == 1)
tipc_link_sync_xmit(l_ptr);
@@ -680,7 +673,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
break;
case TIMEOUT_EVT:
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 0, 0, 0, 0, 0);
+ 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -693,101 +686,65 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
}
}
-/* tipc_link_cong: determine return value and how to treat the
- * sent buffer during link congestion.
- * - For plain, errorless user data messages we keep the buffer and
- * return -ELINKONG.
- * - For all other messages we discard the buffer and return -EHOSTUNREACH
- * - For TIPC internal messages we also reset the link
- */
-static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
-{
- struct sk_buff *skb = skb_peek(list);
- struct tipc_msg *msg = buf_msg(skb);
- uint imp = tipc_msg_tot_importance(msg);
- u32 oport = msg_tot_origport(msg);
-
- if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
- pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
- tipc_link_reset(link);
- goto drop;
- }
- if (unlikely(msg_errcode(msg)))
- goto drop;
- if (unlikely(msg_reroute_cnt(msg)))
- goto drop;
- if (TIPC_SKB_CB(skb)->wakeup_pending)
- return -ELINKCONG;
- if (link_schedule_user(link, oport, skb_queue_len(list), imp))
- return -ELINKCONG;
-drop:
- __skb_queue_purge(list);
- return -EHOSTUNREACH;
-}
-
/**
* __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
* @link: link to use
* @list: chain of buffers containing message
*
- * Consumes the buffer chain, except when returning -ELINKCONG
- * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
- * user data messages) or -EHOSTUNREACH (all other messages/senders)
- * Only the socket functions tipc_send_stream() and tipc_send_packet() need
- * to act on the return value, since they may need to do more send attempts.
+ * Consumes the buffer chain, except when returning -ELINKCONG,
+ * since the caller then may want to make more send attempts.
+ * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
*/
int __tipc_link_xmit(struct net *net, struct tipc_link *link,
struct sk_buff_head *list)
{
struct tipc_msg *msg = buf_msg(skb_peek(list));
- uint psz = msg_size(msg);
- uint sndlim = link->queue_limit[0];
- uint imp = tipc_msg_tot_importance(msg);
- uint mtu = link->max_pkt;
+ unsigned int maxwin = link->window;
+ unsigned int imp = msg_importance(msg);
+ uint mtu = link->mtu;
uint ack = mod(link->next_in_no - 1);
uint seqno = link->next_out_no;
uint bc_last_in = link->owner->bclink.last_in;
struct tipc_media_addr *addr = &link->media_addr;
- struct sk_buff_head *outqueue = &link->outqueue;
+ struct sk_buff_head *transmq = &link->transmq;
+ struct sk_buff_head *backlogq = &link->backlogq;
struct sk_buff *skb, *tmp;
- /* Match queue limits against msg importance: */
- if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
- return tipc_link_cong(link, list);
+ /* Match backlog limit against msg importance: */
+ if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
+ return link_schedule_user(link, list);
- /* Has valid packet limit been used ? */
- if (unlikely(psz > mtu)) {
+ if (unlikely(msg_size(msg) > mtu)) {
__skb_queue_purge(list);
return -EMSGSIZE;
}
-
- /* Prepare each packet for sending, and add to outqueue: */
+ /* Prepare each packet for sending, and add to relevant queue: */
skb_queue_walk_safe(list, skb, tmp) {
__skb_unlink(skb, list);
msg = buf_msg(skb);
- msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
+ msg_set_seqno(msg, seqno);
+ msg_set_ack(msg, ack);
msg_set_bcast_ack(msg, bc_last_in);
- if (skb_queue_len(outqueue) < sndlim) {
- __skb_queue_tail(outqueue, skb);
- tipc_bearer_send(net, link->bearer_id,
- skb, addr);
- link->next_out = NULL;
- link->unacked_window = 0;
- } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
+ if (likely(skb_queue_len(transmq) < maxwin)) {
+ __skb_queue_tail(transmq, skb);
+ tipc_bearer_send(net, link->bearer_id, skb, addr);
+ link->rcv_unacked = 0;
+ seqno++;
+ continue;
+ }
+ if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
link->stats.sent_bundled++;
continue;
- } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
- link->addr)) {
+ }
+ if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
link->stats.sent_bundled++;
link->stats.sent_bundles++;
- if (!link->next_out)
- link->next_out = skb_peek_tail(outqueue);
- } else {
- __skb_queue_tail(outqueue, skb);
- if (!link->next_out)
- link->next_out = skb;
+ imp = msg_importance(buf_msg(skb));
}
+ __skb_queue_tail(backlogq, skb);
+ link->backlog[imp].len++;
seqno++;
}
link->next_out_no = seqno;
@@ -808,13 +765,25 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
return __tipc_link_xmit(link->owner->net, link, &head);
}
+/* tipc_link_xmit_skb(): send single buffer to destination
+ * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * messages, which will not be rejected
+ * The only exception is datagram messages rerouted after secondary
+ * lookup, which are rare and safe to dispose of anyway.
+ * TODO: Return real return value, and let callers use
+ * tipc_wait_for_sendpkt() where applicable
+ */
int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
u32 selector)
{
struct sk_buff_head head;
+ int rc;
skb2list(skb, &head);
- return tipc_link_xmit(net, &head, dnode, selector);
+ rc = tipc_link_xmit(net, &head, dnode, selector);
+ if (rc == -ELINKCONG)
+ kfree_skb(skb);
+ return 0;
}
/**
@@ -841,12 +810,15 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
if (link)
rc = __tipc_link_xmit(net, link, list);
tipc_node_unlock(node);
+ tipc_node_put(node);
}
if (link)
return rc;
- if (likely(in_own_node(net, dnode)))
- return tipc_sk_rcv(net, list);
+ if (likely(in_own_node(net, dnode))) {
+ tipc_sk_rcv(net, list);
+ return 0;
+ }
__skb_queue_purge(list);
return rc;
@@ -893,14 +865,6 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
kfree_skb(buf);
}
-struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
- const struct sk_buff *skb)
-{
- if (skb_queue_is_last(list, skb))
- return NULL;
- return skb->next;
-}
-
/*
* tipc_link_push_packets - push unsent packets to bearer
*
@@ -909,30 +873,24 @@ struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
*
* Called with node locked
*/
-void tipc_link_push_packets(struct tipc_link *l_ptr)
+void tipc_link_push_packets(struct tipc_link *link)
{
- struct sk_buff_head *outqueue = &l_ptr->outqueue;
- struct sk_buff *skb = l_ptr->next_out;
+ struct sk_buff *skb;
struct tipc_msg *msg;
- u32 next, first;
+ unsigned int ack = mod(link->next_in_no - 1);
- skb_queue_walk_from(outqueue, skb) {
- msg = buf_msg(skb);
- next = msg_seqno(msg);
- first = buf_seqno(skb_peek(outqueue));
-
- if (mod(next - first) < l_ptr->queue_limit[0]) {
- msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
- msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- if (msg_user(msg) == MSG_BUNDLER)
- TIPC_SKB_CB(skb)->bundling = false;
- tipc_bearer_send(l_ptr->owner->net,
- l_ptr->bearer_id, skb,
- &l_ptr->media_addr);
- l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
- } else {
+ while (skb_queue_len(&link->transmq) < link->window) {
+ skb = __skb_dequeue(&link->backlogq);
+ if (!skb)
break;
- }
+ msg = buf_msg(skb);
+ link->backlog[msg_importance(msg)].len--;
+ msg_set_ack(msg, ack);
+ msg_set_bcast_ack(msg, link->owner->bclink.last_in);
+ link->rcv_unacked = 0;
+ __skb_queue_tail(&link->transmq, skb);
+ tipc_bearer_send(link->owner->net, link->bearer_id,
+ skb, &link->media_addr);
}
}
@@ -979,7 +937,6 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
(unsigned long) TIPC_SKB_CB(buf)->handle);
n_ptr = tipc_bclink_retransmit_to(net);
- tipc_node_lock(n_ptr);
tipc_addr_string_fill(addr_string, n_ptr->addr);
pr_info("Broadcast link info for %s\n", addr_string);
@@ -991,9 +948,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
n_ptr->bclink.oos_state,
n_ptr->bclink.last_sent);
- tipc_node_unlock(n_ptr);
-
- tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
+ n_ptr->action_flags |= TIPC_BCAST_RESET;
l_ptr->stale_count = 0;
}
}
@@ -1019,8 +974,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
l_ptr->stale_count = 1;
}
- skb_queue_walk_from(&l_ptr->outqueue, skb) {
- if (!retransmits || skb == l_ptr->next_out)
+ skb_queue_walk_from(&l_ptr->transmq, skb) {
+ if (!retransmits)
break;
msg = buf_msg(skb);
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
@@ -1032,72 +987,43 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
}
}
-static void link_retrieve_defq(struct tipc_link *link,
- struct sk_buff_head *list)
-{
- u32 seq_no;
-
- if (skb_queue_empty(&link->deferred_queue))
- return;
-
- seq_no = buf_seqno(skb_peek(&link->deferred_queue));
- if (seq_no == mod(link->next_in_no))
- skb_queue_splice_tail_init(&link->deferred_queue, list);
-}
-
-/**
- * link_recv_buf_validate - validate basic format of received message
- *
- * This routine ensures a TIPC message has an acceptable header, and at least
- * as much data as the header indicates it should. The routine also ensures
- * that the entire message header is stored in the main fragment of the message
- * buffer, to simplify future access to message header fields.
- *
- * Note: Having extra info present in the message header or data areas is OK.
- * TIPC will ignore the excess, under the assumption that it is optional info
- * introduced by a later release of the protocol.
+/* link_synch(): check if all packets arrived before the synch
+ * point have been consumed
+ * Returns true if the parallel links are synched, otherwise false
*/
-static int link_recv_buf_validate(struct sk_buff *buf)
+static bool link_synch(struct tipc_link *l)
{
- static u32 min_data_hdr_size[8] = {
- SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
- MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
- };
-
- struct tipc_msg *msg;
- u32 tipc_hdr[2];
- u32 size;
- u32 hdr_size;
- u32 min_hdr_size;
-
- /* If this packet comes from the defer queue, the skb has already
- * been validated
- */
- if (unlikely(TIPC_SKB_CB(buf)->deferred))
- return 1;
+ unsigned int post_synch;
+ struct tipc_link *pl;
- if (unlikely(buf->len < MIN_H_SIZE))
- return 0;
+ pl = tipc_parallel_link(l);
+ if (pl == l)
+ goto synched;
- msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
- if (msg == NULL)
- return 0;
+ /* Was last pre-synch packet added to input queue ? */
+ if (less_eq(pl->next_in_no, l->synch_point))
+ return false;
- if (unlikely(msg_version(msg) != TIPC_VERSION))
- return 0;
+ /* Is it still in the input queue ? */
+ post_synch = mod(pl->next_in_no - l->synch_point) - 1;
+ if (skb_queue_len(&pl->inputq) > post_synch)
+ return false;
+synched:
+ l->flags &= ~LINK_SYNCHING;
+ return true;
+}
- size = msg_size(msg);
- hdr_size = msg_hdr_sz(msg);
- min_hdr_size = msg_isdata(msg) ?
- min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
+static void link_retrieve_defq(struct tipc_link *link,
+ struct sk_buff_head *list)
+{
+ u32 seq_no;
- if (unlikely((hdr_size < min_hdr_size) ||
- (size < hdr_size) ||
- (buf->len < size) ||
- (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
- return 0;
+ if (skb_queue_empty(&link->deferdq))
+ return;
- return pskb_may_pull(buf, hdr_size);
+ seq_no = buf_seqno(skb_peek(&link->deferdq));
+ if (seq_no == mod(link->next_in_no))
+ skb_queue_splice_tail_init(&link->deferdq, list);
}
/**
@@ -1125,16 +1051,11 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
while ((skb = __skb_dequeue(&head))) {
/* Ensure message is well-formed */
- if (unlikely(!link_recv_buf_validate(skb)))
- goto discard;
-
- /* Ensure message data is a single contiguous unit */
- if (unlikely(skb_linearize(skb)))
+ if (unlikely(!tipc_msg_validate(skb)))
goto discard;
/* Handle arrival of a non-unicast link message */
msg = buf_msg(skb);
-
if (unlikely(msg_non_seq(msg))) {
if (msg_user(msg) == LINK_CONFIG)
tipc_disc_rcv(net, skb, b_ptr);
@@ -1152,8 +1073,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
n_ptr = tipc_node_find(net, msg_prevnode(msg));
if (unlikely(!n_ptr))
goto discard;
- tipc_node_lock(n_ptr);
+ tipc_node_lock(n_ptr);
/* Locate unicast link endpoint that should handle message */
l_ptr = n_ptr->links[b_ptr->identity];
if (unlikely(!l_ptr))
@@ -1175,21 +1096,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
ackd = msg_ack(msg);
/* Release acked messages */
- if (n_ptr->bclink.recv_permitted)
+ if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
released = 0;
- skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
- if (skb1 == l_ptr->next_out ||
- more(buf_seqno(skb1), ackd))
+ skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
+ if (more(buf_seqno(skb1), ackd))
break;
- __skb_unlink(skb1, &l_ptr->outqueue);
+ __skb_unlink(skb1, &l_ptr->transmq);
kfree_skb(skb1);
released = 1;
}
/* Try sending any messages link endpoint has pending */
- if (unlikely(l_ptr->next_out))
+ if (unlikely(skb_queue_len(&l_ptr->backlogq)))
tipc_link_push_packets(l_ptr);
if (released && !skb_queue_empty(&l_ptr->wakeupq))
@@ -1223,18 +1143,23 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
skb = NULL;
goto unlock;
}
+ /* Synchronize with parallel link if applicable */
+ if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
+ if (!link_synch(l_ptr))
+ goto unlock;
+ }
l_ptr->next_in_no++;
- if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
+ if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
link_retrieve_defq(l_ptr, &head);
-
- if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
+ if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
l_ptr->stats.sent_acks++;
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
}
tipc_link_input(l_ptr, skb);
skb = NULL;
unlock:
tipc_node_unlock(n_ptr);
+ tipc_node_put(n_ptr);
discard:
if (unlikely(skb))
kfree_skb(skb);
@@ -1271,7 +1196,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
node->action_flags |= TIPC_NAMED_MSG_EVT;
return true;
case MSG_BUNDLER:
- case CHANGEOVER_PROTOCOL:
+ case TUNNEL_PROTOCOL:
case MSG_FRAGMENTER:
case BCAST_PROTOCOL:
return false;
@@ -1298,8 +1223,14 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
return;
switch (msg_user(msg)) {
- case CHANGEOVER_PROTOCOL:
- if (!tipc_link_tunnel_rcv(node, &skb))
+ case TUNNEL_PROTOCOL:
+ if (msg_dup(msg)) {
+ link->flags |= LINK_SYNCHING;
+ link->synch_point = msg_seqno(msg_get_wrapped(msg));
+ kfree_skb(skb);
+ break;
+ }
+ if (!tipc_link_failover_rcv(link, &skb))
break;
if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
tipc_data_input(link, skb);
@@ -1394,11 +1325,10 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
return;
}
- if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
+ if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
l_ptr->stats.deferred_recv++;
- TIPC_SKB_CB(buf)->deferred = true;
- if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
} else {
l_ptr->stats.duplicates++;
}
@@ -1408,15 +1338,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
* Send protocol message to the other endpoint.
*/
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
- u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+ u32 gap, u32 tolerance, u32 priority)
{
struct sk_buff *buf = NULL;
struct tipc_msg *msg = l_ptr->pmsg;
u32 msg_size = sizeof(l_ptr->proto_msg);
int r_flag;
- /* Don't send protocol message during link changeover */
- if (l_ptr->exp_msg_count)
+ /* Don't send protocol message during link failover */
+ if (l_ptr->flags & LINK_FAILINGOVER)
return;
/* Abort non-RESET send if communication with node is prohibited */
@@ -1434,11 +1364,11 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
if (!tipc_link_is_up(l_ptr))
return;
- if (l_ptr->next_out)
- next_sent = buf_seqno(l_ptr->next_out);
+ if (skb_queue_len(&l_ptr->backlogq))
+ next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
msg_set_next_sent(msg, next_sent);
- if (!skb_queue_empty(&l_ptr->deferred_queue)) {
- u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
+ if (!skb_queue_empty(&l_ptr->deferdq)) {
+ u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
gap = mod(rec - mod(l_ptr->next_in_no));
}
msg_set_seq_gap(msg, gap);
@@ -1446,35 +1376,20 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
l_ptr->stats.sent_nacks++;
msg_set_link_tolerance(msg, tolerance);
msg_set_linkprio(msg, priority);
- msg_set_max_pkt(msg, ack_mtu);
+ msg_set_max_pkt(msg, l_ptr->mtu);
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_probe(msg, probe_msg != 0);
- if (probe_msg) {
- u32 mtu = l_ptr->max_pkt;
-
- if ((mtu < l_ptr->max_pkt_target) &&
- link_working_working(l_ptr) &&
- l_ptr->fsm_msg_cnt) {
- msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
- if (l_ptr->max_pkt_probes == 10) {
- l_ptr->max_pkt_target = (msg_size - 4);
- l_ptr->max_pkt_probes = 0;
- msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
- }
- l_ptr->max_pkt_probes++;
- }
-
+ if (probe_msg)
l_ptr->stats.sent_probes++;
- }
l_ptr->stats.sent_states++;
} else { /* RESET_MSG or ACTIVATE_MSG */
- msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
+ msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
msg_set_seq_gap(msg, 0);
msg_set_next_sent(msg, 1);
msg_set_probe(msg, 0);
msg_set_link_tolerance(msg, l_ptr->tolerance);
msg_set_linkprio(msg, l_ptr->priority);
- msg_set_max_pkt(msg, l_ptr->max_pkt_target);
+ msg_set_max_pkt(msg, l_ptr->advertised_mtu);
}
r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
@@ -1490,10 +1405,9 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
buf->priority = TC_PRIO_CONTROL;
-
tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
&l_ptr->media_addr);
- l_ptr->unacked_window = 0;
+ l_ptr->rcv_unacked = 0;
kfree_skb(buf);
}
@@ -1506,13 +1420,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
u32 rec_gap = 0;
- u32 max_pkt_info;
- u32 max_pkt_ack;
u32 msg_tol;
struct tipc_msg *msg = buf_msg(buf);
- /* Discard protocol message during link changeover */
- if (l_ptr->exp_msg_count)
+ if (l_ptr->flags & LINK_FAILINGOVER)
goto exit;
if (l_ptr->net_plane != msg_net_plane(msg))
@@ -1551,15 +1462,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
if (msg_linkprio(msg) > l_ptr->priority)
l_ptr->priority = msg_linkprio(msg);
- max_pkt_info = msg_max_pkt(msg);
- if (max_pkt_info) {
- if (max_pkt_info < l_ptr->max_pkt_target)
- l_ptr->max_pkt_target = max_pkt_info;
- if (l_ptr->max_pkt > l_ptr->max_pkt_target)
- l_ptr->max_pkt = l_ptr->max_pkt_target;
- } else {
- l_ptr->max_pkt = l_ptr->max_pkt_target;
- }
+ if (l_ptr->mtu > msg_max_pkt(msg))
+ l_ptr->mtu = msg_max_pkt(msg);
/* Synchronize broadcast link info, if not done previously */
if (!tipc_node_is_up(l_ptr->owner)) {
@@ -1604,18 +1508,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
mod(l_ptr->next_in_no));
}
- max_pkt_ack = msg_max_pkt(msg);
- if (max_pkt_ack > l_ptr->max_pkt) {
- l_ptr->max_pkt = max_pkt_ack;
- l_ptr->max_pkt_probes = 0;
- }
-
- max_pkt_ack = 0;
- if (msg_probe(msg)) {
+ if (msg_probe(msg))
l_ptr->stats.recv_probes++;
- if (msg_size(msg) > sizeof(l_ptr->proto_msg))
- max_pkt_ack = msg_size(msg);
- }
/* Protocol message before retransmits, reduce loss risk */
if (l_ptr->owner->bclink.recv_permitted)
@@ -1623,12 +1517,12 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
msg_last_bcast(msg));
if (rec_gap || (msg_probe(msg))) {
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
- 0, max_pkt_ack);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
+ rec_gap, 0, 0);
}
if (msg_seq_gap(msg)) {
l_ptr->stats.recv_nacks++;
- tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
+ tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
msg_seq_gap(msg));
}
break;
@@ -1675,7 +1569,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
*/
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
{
- u32 msgcount = skb_queue_len(&l_ptr->outqueue);
+ int msgcount;
struct tipc_link *tunnel = l_ptr->owner->active_links[0];
struct tipc_msg tunnel_hdr;
struct sk_buff *skb;
@@ -1684,12 +1578,15 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
if (!tunnel)
return;
- tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
- ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
+ tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
+ FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
+ skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
+ tipc_link_purge_backlog(l_ptr);
+ msgcount = skb_queue_len(&l_ptr->transmq);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
msg_set_msgcnt(&tunnel_hdr, msgcount);
- if (skb_queue_empty(&l_ptr->outqueue)) {
+ if (skb_queue_empty(&l_ptr->transmq)) {
skb = tipc_buf_acquire(INT_H_SIZE);
if (skb) {
skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
@@ -1705,7 +1602,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
split_bundles = (l_ptr->owner->active_links[0] !=
l_ptr->owner->active_links[1]);
- skb_queue_walk(&l_ptr->outqueue, skb) {
+ skb_queue_walk(&l_ptr->transmq, skb) {
struct tipc_msg *msg = buf_msg(skb);
if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
@@ -1736,157 +1633,105 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
* and sequence order is preserved per sender/receiver socket pair.
* Owner node is locked.
*/
-void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
- struct tipc_link *tunnel)
+void tipc_link_dup_queue_xmit(struct tipc_link *link,
+ struct tipc_link *tnl)
{
struct sk_buff *skb;
- struct tipc_msg tunnel_hdr;
-
- tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
- DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
- msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
- msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
- skb_queue_walk(&l_ptr->outqueue, skb) {
+ struct tipc_msg tnl_hdr;
+ struct sk_buff_head *queue = &link->transmq;
+ int mcnt;
+
+ tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
+ SYNCH_MSG, INT_H_SIZE, link->addr);
+ mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
+ msg_set_msgcnt(&tnl_hdr, mcnt);
+ msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
+
+tunnel_queue:
+ skb_queue_walk(queue, skb) {
struct sk_buff *outskb;
struct tipc_msg *msg = buf_msg(skb);
- u32 length = msg_size(msg);
+ u32 len = msg_size(msg);
- if (msg_user(msg) == MSG_BUNDLER)
- msg_set_type(msg, CLOSED_MSG);
- msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
- msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
- outskb = tipc_buf_acquire(length + INT_H_SIZE);
+ msg_set_ack(msg, mod(link->next_in_no - 1));
+ msg_set_bcast_ack(msg, link->owner->bclink.last_in);
+ msg_set_size(&tnl_hdr, len + INT_H_SIZE);
+ outskb = tipc_buf_acquire(len + INT_H_SIZE);
if (outskb == NULL) {
pr_warn("%sunable to send duplicate msg\n",
link_co_err);
return;
}
- skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
- skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
- length);
- __tipc_link_xmit_skb(tunnel, outskb);
- if (!tipc_link_is_up(l_ptr))
+ skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
+ skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
+ skb->data, len);
+ __tipc_link_xmit_skb(tnl, outskb);
+ if (!tipc_link_is_up(link))
return;
}
-}
-
-/**
- * buf_extract - extracts embedded TIPC message from another message
- * @skb: encapsulating message buffer
- * @from_pos: offset to extract from
- *
- * Returns a new message buffer containing an embedded message. The
- * encapsulating buffer is left unchanged.
- */
-static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
-{
- struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
- u32 size = msg_size(msg);
- struct sk_buff *eb;
-
- eb = tipc_buf_acquire(size);
- if (eb)
- skb_copy_to_linear_data(eb, msg, size);
- return eb;
-}
-
-/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
- * Owner node is locked.
- */
-static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
- struct sk_buff *t_buf)
-{
- struct sk_buff *buf;
-
- if (!tipc_link_is_up(l_ptr))
+ if (queue == &link->backlogq)
return;
-
- buf = buf_extract(t_buf, INT_H_SIZE);
- if (buf == NULL) {
- pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
- return;
- }
-
- /* Add buffer to deferred queue, if applicable: */
- link_handle_out_of_seq_msg(l_ptr, buf);
+ queue = &link->backlogq;
+ goto tunnel_queue;
}
-/* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
+/* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
* Owner node is locked.
*/
-static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
- struct sk_buff *t_buf)
+static bool tipc_link_failover_rcv(struct tipc_link *link,
+ struct sk_buff **skb)
{
- struct tipc_msg *t_msg = buf_msg(t_buf);
- struct sk_buff *buf = NULL;
- struct tipc_msg *msg;
-
- if (tipc_link_is_up(l_ptr))
- tipc_link_reset(l_ptr);
-
- /* First failover packet? */
- if (l_ptr->exp_msg_count == START_CHANGEOVER)
- l_ptr->exp_msg_count = msg_msgcnt(t_msg);
-
- /* Should there be an inner packet? */
- if (l_ptr->exp_msg_count) {
- l_ptr->exp_msg_count--;
- buf = buf_extract(t_buf, INT_H_SIZE);
- if (buf == NULL) {
- pr_warn("%sno inner failover pkt\n", link_co_err);
- goto exit;
- }
- msg = buf_msg(buf);
+ struct tipc_msg *msg = buf_msg(*skb);
+ struct sk_buff *iskb = NULL;
+ struct tipc_link *pl = NULL;
+ int bearer_id = msg_bearer_id(msg);
+ int pos = 0;
- if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
- kfree_skb(buf);
- buf = NULL;
- goto exit;
- }
- if (msg_user(msg) == MSG_FRAGMENTER) {
- l_ptr->stats.recv_fragments++;
- tipc_buf_append(&l_ptr->reasm_buf, &buf);
- }
+ if (msg_type(msg) != FAILOVER_MSG) {
+ pr_warn("%sunknown tunnel pkt received\n", link_co_err);
+ goto exit;
}
-exit:
- if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
- tipc_link_delete(l_ptr);
- return buf;
-}
+ if (bearer_id >= MAX_BEARERS)
+ goto exit;
-/* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
- * via other link as result of a failover (ORIGINAL_MSG) or
- * a new active link (DUPLICATE_MSG). Failover packets are
- * returned to the active link for delivery upwards.
- * Owner node is locked.
- */
-static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
- struct sk_buff **buf)
-{
- struct sk_buff *t_buf = *buf;
- struct tipc_link *l_ptr;
- struct tipc_msg *t_msg = buf_msg(t_buf);
- u32 bearer_id = msg_bearer_id(t_msg);
+ if (bearer_id == link->bearer_id)
+ goto exit;
- *buf = NULL;
+ pl = link->owner->links[bearer_id];
+ if (pl && tipc_link_is_up(pl))
+ tipc_link_reset(pl);
- if (bearer_id >= MAX_BEARERS)
+ if (link->failover_pkts == FIRST_FAILOVER)
+ link->failover_pkts = msg_msgcnt(msg);
+
+ /* Should we expect an inner packet? */
+ if (!link->failover_pkts)
goto exit;
- l_ptr = n_ptr->links[bearer_id];
- if (!l_ptr)
+ if (!tipc_msg_extract(*skb, &iskb, &pos)) {
+ pr_warn("%sno inner failover pkt\n", link_co_err);
+ *skb = NULL;
goto exit;
+ }
+ link->failover_pkts--;
+ *skb = NULL;
- if (msg_type(t_msg) == DUPLICATE_MSG)
- tipc_link_dup_rcv(l_ptr, t_buf);
- else if (msg_type(t_msg) == ORIGINAL_MSG)
- *buf = tipc_link_failover_rcv(l_ptr, t_buf);
- else
- pr_warn("%sunknown tunnel pkt received\n", link_co_err);
+ /* Was this packet already delivered? */
+ if (less(buf_seqno(iskb), link->failover_checkpt)) {
+ kfree_skb(iskb);
+ iskb = NULL;
+ goto exit;
+ }
+ if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
+ link->stats.recv_fragments++;
+ tipc_buf_append(&link->failover_skb, &iskb);
+ }
exit:
- kfree_skb(t_buf);
- return *buf != NULL;
+ if (!link->failover_pkts && pl)
+ pl->flags &= ~LINK_FAILINGOVER;
+ kfree_skb(*skb);
+ *skb = iskb;
+ return *skb;
}
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
@@ -1901,23 +1746,16 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
}
-void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
+void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
{
- /* Data messages from this node, inclusive FIRST_FRAGM */
- l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
- l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
- l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
- l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
- /* Transiting data messages,inclusive FIRST_FRAGM */
- l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
- l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
- l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
- l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
- l_ptr->queue_limit[CONN_MANAGER] = 1200;
- l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
- l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
- /* FRAGMENT and LAST_FRAGMENT packets */
- l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
+ int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
+
+ l->window = win;
+ l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
+ l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
+ l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
+ l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
+ l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
}
/* tipc_link_find_owner - locate owner node of link by link's name
@@ -2082,14 +1920,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
link_set_supervision_props(link, tol);
- tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
+ tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
}
if (props[TIPC_NLA_PROP_PRIO]) {
u32 prio;
prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
link->priority = prio;
- tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
+ tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
}
if (props[TIPC_NLA_PROP_WIN]) {
u32 win;
@@ -2172,7 +2010,7 @@ msg_full:
/* Caller should hold appropriate locks to protect the link */
static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
- struct tipc_link *link)
+ struct tipc_link *link, int nlflags)
{
int err;
void *hdr;
@@ -2181,7 +2019,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
struct tipc_net *tn = net_generic(net, tipc_net_id);
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
- NLM_F_MULTI, TIPC_NL_LINK_GET);
+ nlflags, TIPC_NL_LINK_GET);
if (!hdr)
return -EMSGSIZE;
@@ -2194,7 +2032,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
tipc_cluster_mask(tn->own_addr)))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
goto attr_msg_full;
@@ -2216,7 +2054,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
- link->queue_limit[TIPC_LOW_IMPORTANCE]))
+ link->window))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
goto prop_msg_full;
@@ -2254,7 +2092,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
if (!node->links[i])
continue;
- err = __tipc_nl_add_link(net, msg, node->links[i]);
+ err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
if (err)
return err;
}
@@ -2282,7 +2120,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
msg.seq = cb->nlh->nlmsg_seq;
rcu_read_lock();
-
if (prev_node) {
node = tipc_node_find(net, prev_node);
if (!node) {
@@ -2295,6 +2132,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->prev_seq = 1;
goto out;
}
+ tipc_node_put(node);
list_for_each_entry_continue_rcu(node, &tn->node_list,
list) {
@@ -2368,7 +2206,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- err = __tipc_nl_add_link(net, &msg, link);
+ err = __tipc_nl_add_link(net, &msg, link, 0);
if (err)
goto err_out;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 7aeb52092bf3..b5b4e3554d4e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -58,8 +58,10 @@
/* Link endpoint execution states
*/
-#define LINK_STARTED 0x0001
-#define LINK_STOPPED 0x0002
+#define LINK_STARTED 0x0001
+#define LINK_STOPPED 0x0002
+#define LINK_SYNCHING 0x0004
+#define LINK_FAILINGOVER 0x0008
/* Starting value for maximum packet size negotiation on unicast links
* (unless bearer MTU is less)
@@ -118,13 +120,13 @@ struct tipc_stats {
* @pmsg: convenience pointer to "proto_msg" field
* @priority: current link priority
* @net_plane: current link network plane ('A' through 'H')
- * @queue_limit: outbound message queue congestion thresholds (indexed by user)
+ * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
* @exp_msg_count: # of tunnelled messages expected during link changeover
* @reset_checkpoint: seq # of last acknowledged message at time of link reset
- * @max_pkt: current maximum packet size for this link
- * @max_pkt_target: desired maximum packet size for this link
- * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
- * @outqueue: outbound message queue
+ * @mtu: current maximum packet size for this link
+ * @advertised_mtu: advertised own mtu when link is being established
+ * @transmitq: queue for sent, non-acked messages
+ * @backlogq: queue for messages waiting to be sent
* @next_out_no: next sequence number to use for outbound messages
* @last_retransmitted: sequence number of most recently retransmitted message
* @stale_count: # of identical retransmit requests made by peer
@@ -165,36 +167,40 @@ struct tipc_link {
struct tipc_msg *pmsg;
u32 priority;
char net_plane;
- u32 queue_limit[15]; /* queue_limit[0]==window limit */
+ u16 synch_point;
- /* Changeover */
- u32 exp_msg_count;
- u32 reset_checkpoint;
+ /* Failover */
+ u16 failover_pkts;
+ u16 failover_checkpt;
+ struct sk_buff *failover_skb;
/* Max packet negotiation */
- u32 max_pkt;
- u32 max_pkt_target;
- u32 max_pkt_probes;
+ u16 mtu;
+ u16 advertised_mtu;
/* Sending */
- struct sk_buff_head outqueue;
+ struct sk_buff_head transmq;
+ struct sk_buff_head backlogq;
+ struct {
+ u16 len;
+ u16 limit;
+ } backlog[5];
u32 next_out_no;
+ u32 window;
u32 last_retransmitted;
u32 stale_count;
/* Reception */
u32 next_in_no;
- struct sk_buff_head deferred_queue;
- u32 unacked_window;
+ u32 rcv_unacked;
+ struct sk_buff_head deferdq;
struct sk_buff_head inputq;
struct sk_buff_head namedq;
/* Congestion handling */
- struct sk_buff *next_out;
struct sk_buff_head wakeupq;
/* Fragmentation/reassembly */
- u32 long_msg_seq_no;
struct sk_buff *reasm_buf;
/* Statistics */
@@ -225,7 +231,7 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
int __tipc_link_xmit(struct net *net, struct tipc_link *link,
struct sk_buff_head *list);
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
- u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
+ u32 gap, u32 tolerance, u32 priority);
void tipc_link_push_packets(struct tipc_link *l_ptr);
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
@@ -302,9 +308,4 @@ static inline int link_reset_reset(struct tipc_link *l_ptr)
return l_ptr->state == RESET_RESET;
}
-static inline int link_congested(struct tipc_link *l_ptr)
-{
- return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0];
-}
-
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b6eb90cd3ef7..c3e96e815418 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -1,7 +1,7 @@
/*
* net/tipc/msg.c: TIPC message header routines
*
- * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -165,6 +165,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
}
if (fragid == LAST_FRAGMENT) {
+ TIPC_SKB_CB(head)->validated = false;
+ if (unlikely(!tipc_msg_validate(head)))
+ goto err;
*buf = head;
TIPC_SKB_CB(head)->tail = NULL;
*headbuf = NULL;
@@ -172,7 +175,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
}
*buf = NULL;
return 0;
-
err:
pr_warn_ratelimited("Unable to build fragment list\n");
kfree_skb(*buf);
@@ -181,6 +183,48 @@ err:
return 0;
}
+/* tipc_msg_validate - validate basic format of received message
+ *
+ * This routine ensures a TIPC message has an acceptable header, and at least
+ * as much data as the header indicates it should. The routine also ensures
+ * that the entire message header is stored in the main fragment of the message
+ * buffer, to simplify future access to message header fields.
+ *
+ * Note: Having extra info present in the message header or data areas is OK.
+ * TIPC will ignore the excess, under the assumption that it is optional info
+ * introduced by a later release of the protocol.
+ */
+bool tipc_msg_validate(struct sk_buff *skb)
+{
+ struct tipc_msg *msg;
+ int msz, hsz;
+
+ if (unlikely(TIPC_SKB_CB(skb)->validated))
+ return true;
+ if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
+ return false;
+
+ hsz = msg_hdr_sz(buf_msg(skb));
+ if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
+ return false;
+ if (unlikely(!pskb_may_pull(skb, hsz)))
+ return false;
+
+ msg = buf_msg(skb);
+ if (unlikely(msg_version(msg) != TIPC_VERSION))
+ return false;
+
+ msz = msg_size(msg);
+ if (unlikely(msz < hsz))
+ return false;
+ if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
+ return false;
+ if (unlikely(skb->len < msz))
+ return false;
+
+ TIPC_SKB_CB(skb)->validated = true;
+ return true;
+}
/**
* tipc_msg_build - create buffer chain containing specified header and data
@@ -228,6 +272,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
msg_set_size(&pkthdr, pktmax);
msg_set_fragm_no(&pkthdr, pktno);
+ msg_set_importance(&pkthdr, msg_importance(mhdr));
/* Prepare first fragment */
skb = tipc_buf_acquire(pktmax);
@@ -286,33 +331,36 @@ error:
/**
* tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @list: the buffer chain of the existing buffer ("bundle")
+ * @bskb: the buffer to append to ("bundle")
* @skb: buffer to be appended
* @mtu: max allowable size for the bundle buffer
* Consumes buffer if successful
* Returns true if bundling could be performed, otherwise false
*/
-bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
+bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
{
- struct sk_buff *bskb = skb_peek_tail(list);
- struct tipc_msg *bmsg = buf_msg(bskb);
+ struct tipc_msg *bmsg;
struct tipc_msg *msg = buf_msg(skb);
- unsigned int bsz = msg_size(bmsg);
+ unsigned int bsz;
unsigned int msz = msg_size(msg);
- u32 start = align(bsz);
+ u32 start, pad;
u32 max = mtu - INT_H_SIZE;
- u32 pad = start - bsz;
if (likely(msg_user(msg) == MSG_FRAGMENTER))
return false;
- if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL))
+ if (!bskb)
+ return false;
+ bmsg = buf_msg(bskb);
+ bsz = msg_size(bmsg);
+ start = align(bsz);
+ pad = start - bsz;
+
+ if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
return false;
if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
return false;
if (likely(msg_user(bmsg) != MSG_BUNDLER))
return false;
- if (likely(!TIPC_SKB_CB(bskb)->bundling))
- return false;
if (unlikely(skb_tailroom(bskb) < (pad + msz)))
return false;
if (unlikely(max < (start + msz)))
@@ -328,34 +376,40 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
/**
* tipc_msg_extract(): extract bundled inner packet from buffer
- * @skb: linear outer buffer, to be extracted from.
+ * @skb: buffer to be extracted from.
* @iskb: extracted inner buffer, to be returned
- * @pos: position of msg to be extracted. Returns with pointer of next msg
+ * @pos: position in outer message of msg to be extracted.
+ * Returns position of next msg
* Consumes outer buffer when last packet extracted
* Returns true when when there is an extracted buffer, otherwise false
*/
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
{
- struct tipc_msg *msg = buf_msg(skb);
- int imsz;
- struct tipc_msg *imsg = (struct tipc_msg *)(msg_data(msg) + *pos);
+ struct tipc_msg *msg;
+ int imsz, offset;
- /* Is there space left for shortest possible message? */
- if (*pos > (msg_data_sz(msg) - SHORT_H_SIZE))
+ *iskb = NULL;
+ if (unlikely(skb_linearize(skb)))
+ goto none;
+
+ msg = buf_msg(skb);
+ offset = msg_hdr_sz(msg) + *pos;
+ if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE)))
goto none;
- imsz = msg_size(imsg);
- /* Is there space left for current message ? */
- if ((*pos + imsz) > msg_data_sz(msg))
+ *iskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!*iskb))
goto none;
- *iskb = tipc_buf_acquire(imsz);
- if (!*iskb)
+ skb_pull(*iskb, offset);
+ imsz = msg_size(buf_msg(*iskb));
+ skb_trim(*iskb, imsz);
+ if (unlikely(!tipc_msg_validate(*iskb)))
goto none;
- skb_copy_to_linear_data(*iskb, imsg, imsz);
*pos += align(imsz);
return true;
none:
kfree_skb(skb);
+ kfree_skb(*iskb);
*iskb = NULL;
return false;
}
@@ -369,18 +423,17 @@ none:
* Replaces buffer if successful
* Returns true if success, otherwise false
*/
-bool tipc_msg_make_bundle(struct sk_buff_head *list,
- struct sk_buff *skb, u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
{
struct sk_buff *bskb;
struct tipc_msg *bmsg;
- struct tipc_msg *msg = buf_msg(skb);
+ struct tipc_msg *msg = buf_msg(*skb);
u32 msz = msg_size(msg);
u32 max = mtu - INT_H_SIZE;
if (msg_user(msg) == MSG_FRAGMENTER)
return false;
- if (msg_user(msg) == CHANGEOVER_PROTOCOL)
+ if (msg_user(msg) == TUNNEL_PROTOCOL)
return false;
if (msg_user(msg) == BCAST_PROTOCOL)
return false;
@@ -398,9 +451,9 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list,
msg_set_seqno(bmsg, msg_seqno(msg));
msg_set_ack(bmsg, msg_ack(msg));
msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
- TIPC_SKB_CB(bskb)->bundling = true;
- __skb_queue_tail(list, bskb);
- return tipc_msg_bundle(list, skb, mtu);
+ tipc_msg_bundle(bskb, *skb, mtu);
+ *skb = bskb;
+ return true;
}
/**
@@ -415,21 +468,17 @@ bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
int err)
{
struct tipc_msg *msg = buf_msg(buf);
- uint imp = msg_importance(msg);
struct tipc_msg ohdr;
uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
if (skb_linearize(buf))
goto exit;
+ msg = buf_msg(buf);
if (msg_dest_droppable(msg))
goto exit;
if (msg_errcode(msg))
goto exit;
-
memcpy(&ohdr, msg, msg_hdr_sz(msg));
- imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE);
- if (msg_isdata(msg))
- msg_set_importance(msg, imp);
msg_set_errcode(msg, err);
msg_set_origport(msg, msg_destport(&ohdr));
msg_set_destport(msg, msg_origport(&ohdr));
@@ -462,15 +511,18 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
{
struct tipc_msg *msg = buf_msg(skb);
u32 dport;
+ u32 own_addr = tipc_own_addr(net);
if (!msg_isdata(msg))
return false;
if (!msg_named(msg))
return false;
+ if (msg_errcode(msg))
+ return false;
*err = -TIPC_ERR_NO_NAME;
if (skb_linearize(skb))
return false;
- if (msg_reroute_cnt(msg) > 0)
+ if (msg_reroute_cnt(msg))
return false;
*dnode = addr_domain(net, msg_lookup_scope(msg));
dport = tipc_nametbl_translate(net, msg_nametype(msg),
@@ -478,6 +530,8 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
if (!dport)
return false;
msg_incr_reroute_cnt(msg);
+ if (*dnode != own_addr)
+ msg_set_prevnode(msg, own_addr);
msg_set_destnode(msg, *dnode);
msg_set_destport(msg, dport);
*err = TIPC_OK;
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 9ace47f44a69..e1d3595e2ee9 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -1,7 +1,7 @@
/*
* net/tipc/msg.h: Include file for TIPC message header routines
*
- * Copyright (c) 2000-2007, 2014, Ericsson AB
+ * Copyright (c) 2000-2007, 2014-2015 Ericsson AB
* Copyright (c) 2005-2008, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -54,6 +54,8 @@ struct plist;
* - TIPC_HIGH_IMPORTANCE
* - TIPC_CRITICAL_IMPORTANCE
*/
+#define TIPC_SYSTEM_IMPORTANCE 4
+
/*
* Payload message types
@@ -64,6 +66,19 @@ struct plist;
#define TIPC_DIRECT_MSG 3
/*
+ * Internal message users
+ */
+#define BCAST_PROTOCOL 5
+#define MSG_BUNDLER 6
+#define LINK_PROTOCOL 7
+#define CONN_MANAGER 8
+#define TUNNEL_PROTOCOL 10
+#define NAME_DISTRIBUTOR 11
+#define MSG_FRAGMENTER 12
+#define LINK_CONFIG 13
+#define SOCK_WAKEUP 14 /* pseudo user */
+
+/*
* Message header sizes
*/
#define SHORT_H_SIZE 24 /* In-cluster basic payload message */
@@ -76,7 +91,7 @@ struct plist;
#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
-#define TIPC_MEDIA_ADDR_OFFSET 5
+#define TIPC_MEDIA_INFO_OFFSET 5
/**
* TIPC message buffer code
@@ -87,12 +102,12 @@ struct plist;
* Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
* are word aligned for quicker access
*/
-#define BUF_HEADROOM LL_MAX_HEADER
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
struct tipc_skb_cb {
void *handle;
struct sk_buff *tail;
- bool deferred;
+ bool validated;
bool wakeup_pending;
bool bundling;
u16 chain_sz;
@@ -170,16 +185,6 @@ static inline void msg_set_user(struct tipc_msg *m, u32 n)
msg_set_bits(m, 0, 25, 0xf, n);
}
-static inline u32 msg_importance(struct tipc_msg *m)
-{
- return msg_bits(m, 0, 25, 0xf);
-}
-
-static inline void msg_set_importance(struct tipc_msg *m, u32 i)
-{
- msg_set_user(m, i);
-}
-
static inline u32 msg_hdr_sz(struct tipc_msg *m)
{
return msg_bits(m, 0, 21, 0xf) << 2;
@@ -235,6 +240,15 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
}
+static inline unchar *msg_data(struct tipc_msg *m)
+{
+ return ((unchar *)m) + msg_hdr_sz(m);
+}
+
+static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
+{
+ return (struct tipc_msg *)msg_data(m);
+}
/*
* Word 1
@@ -336,6 +350,25 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
/*
* Words 3-10
*/
+static inline u32 msg_importance(struct tipc_msg *m)
+{
+ if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+ return msg_bits(m, 5, 13, 0x7);
+ if (likely(msg_isdata(m) && !msg_errcode(m)))
+ return msg_user(m);
+ return TIPC_SYSTEM_IMPORTANCE;
+}
+
+static inline void msg_set_importance(struct tipc_msg *m, u32 i)
+{
+ if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+ msg_set_bits(m, 5, 13, 0x7, i);
+ else if (likely(i < TIPC_SYSTEM_IMPORTANCE))
+ msg_set_user(m, i);
+ else
+ pr_warn("Trying to set illegal importance in message\n");
+}
+
static inline u32 msg_prevnode(struct tipc_msg *m)
{
return msg_word(m, 3);
@@ -348,6 +381,8 @@ static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
static inline u32 msg_origport(struct tipc_msg *m)
{
+ if (msg_user(m) == MSG_FRAGMENTER)
+ m = msg_get_wrapped(m);
return msg_word(m, 4);
}
@@ -443,35 +478,11 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
msg_set_word(m, 10, n);
}
-static inline unchar *msg_data(struct tipc_msg *m)
-{
- return ((unchar *)m) + msg_hdr_sz(m);
-}
-
-static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
-{
- return (struct tipc_msg *)msg_data(m);
-}
-
/*
* Constants and routines used to read and write TIPC internal message headers
*/
/*
- * Internal message users
- */
-#define BCAST_PROTOCOL 5
-#define MSG_BUNDLER 6
-#define LINK_PROTOCOL 7
-#define CONN_MANAGER 8
-#define ROUTE_DISTRIBUTOR 9 /* obsoleted */
-#define CHANGEOVER_PROTOCOL 10
-#define NAME_DISTRIBUTOR 11
-#define MSG_FRAGMENTER 12
-#define LINK_CONFIG 13
-#define SOCK_WAKEUP 14 /* pseudo user */
-
-/*
* Connection management protocol message types
*/
#define CONN_PROBE 0
@@ -501,8 +512,8 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
/*
* Changeover tunnel message types
*/
-#define DUPLICATE_MSG 0
-#define ORIGINAL_MSG 1
+#define SYNCH_MSG 0
+#define FAILOVER_MSG 1
/*
* Config protocol message types
@@ -510,7 +521,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
#define DSC_REQ_MSG 0
#define DSC_RESP_MSG 1
-
/*
* Word 1
*/
@@ -534,6 +544,24 @@ static inline void msg_set_node_sig(struct tipc_msg *m, u32 n)
msg_set_bits(m, 1, 0, 0xffff, n);
}
+static inline u32 msg_node_capabilities(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 15, 0x1fff);
+}
+
+static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 15, 0x1fff, n);
+}
+
+static inline bool msg_dup(struct tipc_msg *m)
+{
+ if (likely(msg_user(m) != TUNNEL_PROTOCOL))
+ return false;
+ if (msg_type(m) != SYNCH_MSG)
+ return false;
+ return true;
+}
/*
* Word 2
@@ -688,7 +716,7 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r)
static inline char *msg_media_addr(struct tipc_msg *m)
{
- return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET];
+ return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
}
/*
@@ -734,21 +762,8 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
msg_set_bits(m, 9, 0, 0xffff, n);
}
-static inline u32 tipc_msg_tot_importance(struct tipc_msg *m)
-{
- if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT))
- return msg_importance(msg_get_wrapped(m));
- return msg_importance(m);
-}
-
-static inline u32 msg_tot_origport(struct tipc_msg *m)
-{
- if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT))
- return msg_origport(msg_get_wrapped(m));
- return msg_origport(m);
-}
-
struct sk_buff *tipc_buf_acquire(u32 size);
+bool tipc_msg_validate(struct sk_buff *skb);
bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
int err);
void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
@@ -757,9 +772,9 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
uint data_sz, u32 dnode, u32 onode,
u32 dport, u32 oport, int errcode);
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
-bool tipc_msg_make_bundle(struct sk_buff_head *list,
- struct sk_buff *skb, u32 mtu, u32 dnode);
+bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu);
+
+bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode);
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index fcb07915aaac..41e7b7e4dda0 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -98,7 +98,7 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb)
continue;
if (!tipc_node_active_links(node))
continue;
- oskb = skb_copy(skb, GFP_ATOMIC);
+ oskb = pskb_copy(skb, GFP_ATOMIC);
if (!oskb)
break;
msg_set_destnode(buf_msg(oskb), dnode);
@@ -244,6 +244,7 @@ static void tipc_publ_subscribe(struct net *net, struct publication *publ,
tipc_node_lock(node);
list_add_tail(&publ->nodesub_list, &node->publ_list);
tipc_node_unlock(node);
+ tipc_node_put(node);
}
static void tipc_publ_unsubscribe(struct net *net, struct publication *publ,
@@ -258,6 +259,7 @@ static void tipc_publ_unsubscribe(struct net *net, struct publication *publ,
tipc_node_lock(node);
list_del_init(&publ->nodesub_list);
tipc_node_unlock(node);
+ tipc_node_put(node);
}
/**
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 105ba7adf06f..ab0ac62a1287 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -811,8 +811,8 @@ static void tipc_purge_publications(struct net *net, struct name_seq *seq)
sseq = seq->sseqs;
info = sseq->info;
list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
- tipc_nametbl_remove_publ(net, publ->type, publ->lower,
- publ->node, publ->ref, publ->key);
+ tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node,
+ publ->ref, publ->key);
kfree_rcu(publ, rcu);
}
hlist_del_init_rcu(&seq->ns_list);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 86152de8248d..22c059ad2999 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -42,6 +42,7 @@
static void node_lost_contact(struct tipc_node *n_ptr);
static void node_established_contact(struct tipc_node *n_ptr);
+static void tipc_node_delete(struct tipc_node *node);
struct tipc_sock_conn {
u32 port;
@@ -67,6 +68,23 @@ static unsigned int tipc_hashfn(u32 addr)
return addr & (NODE_HTABLE_SIZE - 1);
}
+static void tipc_node_kref_release(struct kref *kref)
+{
+ struct tipc_node *node = container_of(kref, struct tipc_node, kref);
+
+ tipc_node_delete(node);
+}
+
+void tipc_node_put(struct tipc_node *node)
+{
+ kref_put(&node->kref, tipc_node_kref_release);
+}
+
+static void tipc_node_get(struct tipc_node *node)
+{
+ kref_get(&node->kref);
+}
+
/*
* tipc_node_find - locate specified node object, if it exists
*/
@@ -82,6 +100,7 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr)
hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
hash) {
if (node->addr == addr) {
+ tipc_node_get(node);
rcu_read_unlock();
return node;
}
@@ -106,12 +125,13 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
}
n_ptr->addr = addr;
n_ptr->net = net;
+ kref_init(&n_ptr->kref);
spin_lock_init(&n_ptr->lock);
INIT_HLIST_NODE(&n_ptr->hash);
INIT_LIST_HEAD(&n_ptr->list);
INIT_LIST_HEAD(&n_ptr->publ_list);
INIT_LIST_HEAD(&n_ptr->conn_sks);
- __skb_queue_head_init(&n_ptr->bclink.deferred_queue);
+ __skb_queue_head_init(&n_ptr->bclink.deferdq);
hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
if (n_ptr->addr < temp_node->addr)
@@ -120,16 +140,17 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
list_add_tail_rcu(&n_ptr->list, &temp_node->list);
n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
n_ptr->signature = INVALID_NODE_SIG;
+ tipc_node_get(n_ptr);
exit:
spin_unlock_bh(&tn->node_list_lock);
return n_ptr;
}
-static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr)
+static void tipc_node_delete(struct tipc_node *node)
{
- list_del_rcu(&n_ptr->list);
- hlist_del_rcu(&n_ptr->hash);
- kfree_rcu(n_ptr, rcu);
+ list_del_rcu(&node->list);
+ hlist_del_rcu(&node->hash);
+ kfree_rcu(node, rcu);
}
void tipc_node_stop(struct net *net)
@@ -139,7 +160,7 @@ void tipc_node_stop(struct net *net)
spin_lock_bh(&tn->node_list_lock);
list_for_each_entry_safe(node, t_node, &tn->node_list, list)
- tipc_node_delete(tn, node);
+ tipc_node_put(node);
spin_unlock_bh(&tn->node_list_lock);
}
@@ -147,6 +168,7 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
{
struct tipc_node *node;
struct tipc_sock_conn *conn;
+ int err = 0;
if (in_own_node(net, dnode))
return 0;
@@ -157,8 +179,10 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
return -EHOSTUNREACH;
}
conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
- if (!conn)
- return -EHOSTUNREACH;
+ if (!conn) {
+ err = -EHOSTUNREACH;
+ goto exit;
+ }
conn->peer_node = dnode;
conn->port = port;
conn->peer_port = peer_port;
@@ -166,7 +190,9 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
tipc_node_lock(node);
list_add_tail(&conn->list, &node->conn_sks);
tipc_node_unlock(node);
- return 0;
+exit:
+ tipc_node_put(node);
+ return err;
}
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
@@ -189,6 +215,7 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
kfree(conn);
}
tipc_node_unlock(node);
+ tipc_node_put(node);
}
/**
@@ -227,8 +254,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
active[0] = active[1] = l_ptr;
exit:
/* Leave room for changeover header when returning 'mtu' to users: */
- n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
- n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
+ n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
+ n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
}
/**
@@ -292,11 +319,10 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
/* Leave room for changeover header when returning 'mtu' to users: */
if (active[0]) {
- n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
- n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
+ n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
+ n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
return;
}
-
/* Loopback link went down? No fragmentation needed from now on. */
if (n_ptr->addr == tn->own_addr) {
n_ptr->act_mtus[0] = MAX_MSG_SIZE;
@@ -354,7 +380,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Flush broadcast link info associated with lost node */
if (n_ptr->bclink.recv_permitted) {
- __skb_queue_purge(&n_ptr->bclink.deferred_queue);
+ __skb_queue_purge(&n_ptr->bclink.deferdq);
if (n_ptr->bclink.reasm_buf) {
kfree_skb(n_ptr->bclink.reasm_buf);
@@ -367,18 +393,17 @@ static void node_lost_contact(struct tipc_node *n_ptr)
n_ptr->bclink.recv_permitted = false;
}
- /* Abort link changeover */
+ /* Abort any ongoing link failover */
for (i = 0; i < MAX_BEARERS; i++) {
struct tipc_link *l_ptr = n_ptr->links[i];
if (!l_ptr)
continue;
- l_ptr->reset_checkpoint = l_ptr->next_in_no;
- l_ptr->exp_msg_count = 0;
+ l_ptr->flags &= ~LINK_FAILINGOVER;
+ l_ptr->failover_checkpt = 0;
+ l_ptr->failover_pkts = 0;
+ kfree_skb(l_ptr->failover_skb);
+ l_ptr->failover_skb = NULL;
tipc_link_reset_fragments(l_ptr);
-
- /* Link marked for deletion after failover? => do it now */
- if (l_ptr->flags & LINK_STOPPED)
- tipc_link_delete(l_ptr);
}
n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
@@ -417,19 +442,25 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
char *linkname, size_t len)
{
struct tipc_link *link;
+ int err = -EINVAL;
struct tipc_node *node = tipc_node_find(net, addr);
- if ((bearer_id >= MAX_BEARERS) || !node)
- return -EINVAL;
+ if (!node)
+ return err;
+
+ if (bearer_id >= MAX_BEARERS)
+ goto exit;
+
tipc_node_lock(node);
link = node->links[bearer_id];
if (link) {
strncpy(linkname, link->name, len);
- tipc_node_unlock(node);
- return 0;
+ err = 0;
}
+exit:
tipc_node_unlock(node);
- return -EINVAL;
+ tipc_node_put(node);
+ return err;
}
void tipc_node_unlock(struct tipc_node *node)
@@ -459,7 +490,7 @@ void tipc_node_unlock(struct tipc_node *node)
TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
- TIPC_NAMED_MSG_EVT);
+ TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
spin_unlock_bh(&node->lock);
@@ -488,6 +519,9 @@ void tipc_node_unlock(struct tipc_node *node)
if (flags & TIPC_BCAST_MSG_EVT)
tipc_bclink_input(net);
+
+ if (flags & TIPC_BCAST_RESET)
+ tipc_link_reset_all(node);
}
/* Caller should hold node lock for the passed node */
@@ -542,17 +576,21 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
msg.seq = cb->nlh->nlmsg_seq;
rcu_read_lock();
-
- if (last_addr && !tipc_node_find(net, last_addr)) {
- rcu_read_unlock();
- /* We never set seq or call nl_dump_check_consistent() this
- * means that setting prev_seq here will cause the consistence
- * check to fail in the netlink callback handler. Resulting in
- * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
- * the node state changed while we released the lock.
- */
- cb->prev_seq = 1;
- return -EPIPE;
+ if (last_addr) {
+ node = tipc_node_find(net, last_addr);
+ if (!node) {
+ rcu_read_unlock();
+ /* We never set seq or call nl_dump_check_consistent()
+ * this means that setting prev_seq here will cause the
+ * consistence check to fail in the netlink callback
+ * handler. Resulting in the NLMSG_DONE message having
+ * the NLM_F_DUMP_INTR flag set if the node state
+ * changed while we released the lock.
+ */
+ cb->prev_seq = 1;
+ return -EPIPE;
+ }
+ tipc_node_put(node);
}
list_for_each_entry_rcu(node, &tn->node_list, list) {
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 3d18c66b7f78..02d5c20dc551 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -64,7 +64,8 @@ enum {
TIPC_NOTIFY_LINK_UP = (1 << 6),
TIPC_NOTIFY_LINK_DOWN = (1 << 7),
TIPC_NAMED_MSG_EVT = (1 << 8),
- TIPC_BCAST_MSG_EVT = (1 << 9)
+ TIPC_BCAST_MSG_EVT = (1 << 9),
+ TIPC_BCAST_RESET = (1 << 10)
};
/**
@@ -84,7 +85,7 @@ struct tipc_node_bclink {
u32 last_sent;
u32 oos_state;
u32 deferred_size;
- struct sk_buff_head deferred_queue;
+ struct sk_buff_head deferdq;
struct sk_buff *reasm_buf;
int inputq_map;
bool recv_permitted;
@@ -93,6 +94,7 @@ struct tipc_node_bclink {
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
+ * @ref: reference counter to node object
* @lock: spinlock governing access to structure
* @net: the applicable net namespace
* @hash: links to adjacent nodes in unsorted hash chain
@@ -106,6 +108,7 @@ struct tipc_node_bclink {
* @list: links to adjacent nodes in sorted list of cluster's nodes
* @working_links: number of working links to node (both active and standby)
* @link_cnt: number of links to node
+ * @capabilities: bitmap, indicating peer node's functional capabilities
* @signature: node instance identifier
* @link_id: local and remote bearer ids of changing link, if any
* @publ_list: list of publications
@@ -113,6 +116,7 @@ struct tipc_node_bclink {
*/
struct tipc_node {
u32 addr;
+ struct kref kref;
spinlock_t lock;
struct net *net;
struct hlist_node hash;
@@ -125,7 +129,8 @@ struct tipc_node {
struct tipc_node_bclink bclink;
struct list_head list;
int link_cnt;
- int working_links;
+ u16 working_links;
+ u16 capabilities;
u32 signature;
u32 link_id;
struct list_head publ_list;
@@ -134,6 +139,7 @@ struct tipc_node {
};
struct tipc_node *tipc_node_find(struct net *net, u32 addr);
+void tipc_node_put(struct tipc_node *node);
struct tipc_node *tipc_node_create(struct net *net, u32 addr);
void tipc_node_stop(struct net *net);
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
@@ -168,10 +174,12 @@ static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
node = tipc_node_find(net, addr);
- if (likely(node))
+ if (likely(node)) {
mtu = node->act_mtus[selector & 1];
- else
+ tipc_node_put(node);
+ } else {
mtu = MAX_MSG_SIZE;
+ }
return mtu;
}
diff --git a/net/tipc/server.c b/net/tipc/server.c
index eadd4ed45905..77ff03ed1e18 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -37,11 +37,13 @@
#include "core.h"
#include "socket.h"
#include <net/sock.h>
+#include <linux/module.h>
/* Number of messages to send before rescheduling */
#define MAX_SEND_MSG_COUNT 25
#define MAX_RECV_MSG_COUNT 25
#define CF_CONNECTED 1
+#define CF_SERVER 2
#define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
@@ -88,9 +90,19 @@ static void tipc_clean_outqueues(struct tipc_conn *con);
static void tipc_conn_kref_release(struct kref *kref)
{
struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+ struct sockaddr_tipc *saddr = con->server->saddr;
+ struct socket *sock = con->sock;
+ struct sock *sk;
- if (con->sock) {
- tipc_sock_release_local(con->sock);
+ if (sock) {
+ sk = sock->sk;
+ if (test_bit(CF_SERVER, &con->flags)) {
+ __module_get(sock->ops->owner);
+ __module_get(sk->sk_prot_creator->owner);
+ }
+ saddr->scope = -TIPC_NODE_SCOPE;
+ kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
+ sock_release(sock);
con->sock = NULL;
}
@@ -281,7 +293,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
struct tipc_conn *newcon;
int ret;
- ret = tipc_sock_accept_local(sock, &newsock, O_NONBLOCK);
+ ret = kernel_accept(sock, &newsock, O_NONBLOCK);
if (ret < 0)
return ret;
@@ -309,7 +321,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
struct socket *sock = NULL;
int ret;
- ret = tipc_sock_create_local(s->net, s->type, &sock);
+ ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1);
if (ret < 0)
return NULL;
ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
@@ -337,11 +349,31 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
pr_err("Unknown socket type %d\n", s->type);
goto create_err;
}
+
+ /* As server's listening socket owner and creator is the same module,
+ * we have to decrease TIPC module reference count to guarantee that
+ * it remains zero after the server socket is created, otherwise,
+ * executing "rmmod" command is unable to make TIPC module deleted
+ * after TIPC module is inserted successfully.
+ *
+ * However, the reference count is ever increased twice in
+ * sock_create_kern(): one is to increase the reference count of owner
+ * of TIPC socket's proto_ops struct; another is to increment the
+ * reference count of owner of TIPC proto struct. Therefore, we must
+ * decrement the module reference count twice to ensure that it keeps
+ * zero after server's listening socket is created. Of course, we
+ * must bump the module reference count twice as well before the socket
+ * is closed.
+ */
+ module_put(sock->ops->owner);
+ module_put(sock->sk->sk_prot_creator->owner);
+ set_bit(CF_SERVER, &con->flags);
+
return sock;
create_err:
+ kernel_sock_shutdown(sock, SHUT_RDWR);
sock_release(sock);
- con->sock = NULL;
return NULL;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b4d4467d0bb0..9074b5cede38 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -35,7 +35,6 @@
*/
#include <linux/rhashtable.h>
-#include <linux/jhash.h>
#include "core.h"
#include "name_table.h"
#include "node.h"
@@ -74,6 +73,7 @@
* @link_cong: non-zero if owner must sleep because of link congestion
* @sent_unacked: # messages sent by socket, and not yet acked by peer
* @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @remote: 'connected' peer for dgram/rdm
* @node: hash table node
* @rcu: rcu struct for tipc_sock
*/
@@ -96,6 +96,7 @@ struct tipc_sock {
bool link_cong;
uint sent_unacked;
uint rcv_unacked;
+ struct sockaddr_tipc remote;
struct rhash_head node;
struct rcu_head rcu;
};
@@ -114,13 +115,14 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
+static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
+ size_t dsz);
+static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
-
static struct proto tipc_proto;
-static struct proto tipc_proto_kern;
static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
[TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
@@ -130,6 +132,8 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
[TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
};
+static const struct rhashtable_params tsk_rht_params;
+
/*
* Revised TIPC socket locking policy:
*
@@ -338,11 +342,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
}
/* Allocate socket's protocol area */
- if (!kern)
- sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
- else
- sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
-
+ sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
if (sk == NULL)
return -ENOMEM;
@@ -380,75 +380,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
return 0;
}
-/**
- * tipc_sock_create_local - create TIPC socket from inside TIPC module
- * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
- *
- * We cannot use sock_creat_kern here because it bumps module user count.
- * Since socket owner and creator is the same module we must make sure
- * that module count remains zero for module local sockets, otherwise
- * we cannot do rmmod.
- *
- * Returns 0 on success, errno otherwise
- */
-int tipc_sock_create_local(struct net *net, int type, struct socket **res)
-{
- int rc;
-
- rc = sock_create_lite(AF_TIPC, type, 0, res);
- if (rc < 0) {
- pr_err("Failed to create kernel socket\n");
- return rc;
- }
- tipc_sk_create(net, *res, 0, 1);
-
- return 0;
-}
-
-/**
- * tipc_sock_release_local - release socket created by tipc_sock_create_local
- * @sock: the socket to be released.
- *
- * Module reference count is not incremented when such sockets are created,
- * so we must keep it from being decremented when they are released.
- */
-void tipc_sock_release_local(struct socket *sock)
-{
- tipc_release(sock);
- sock->ops = NULL;
- sock_release(sock);
-}
-
-/**
- * tipc_sock_accept_local - accept a connection on a socket created
- * with tipc_sock_create_local. Use this function to avoid that
- * module reference count is inadvertently incremented.
- *
- * @sock: the accepting socket
- * @newsock: reference to the new socket to be created
- * @flags: socket flags
- */
-
-int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
- int flags)
-{
- struct sock *sk = sock->sk;
- int ret;
-
- ret = sock_create_lite(sk->sk_family, sk->sk_type,
- sk->sk_protocol, newsock);
- if (ret < 0)
- return ret;
-
- ret = tipc_accept(sock, *newsock, flags);
- if (ret < 0) {
- sock_release(*newsock);
- return ret;
- }
- (*newsock)->ops = sock->ops;
- return ret;
-}
-
static void tipc_sk_callback(struct rcu_head *head)
{
struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
@@ -892,7 +823,6 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
/**
* tipc_sendmsg - send message in connectionless manner
- * @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
* @dsz: amount of user data to be sent
@@ -904,9 +834,21 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
+static int tipc_sendmsg(struct socket *sock,
struct msghdr *m, size_t dsz)
{
+ struct sock *sk = sock->sk;
+ int ret;
+
+ lock_sock(sk);
+ ret = __tipc_sendmsg(sock, m, dsz);
+ release_sock(sk);
+
+ return ret;
+}
+
+static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
+{
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
@@ -915,49 +857,40 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
u32 dnode, dport;
struct sk_buff_head *pktchain = &sk->sk_write_queue;
struct sk_buff *skb;
- struct tipc_name_seq *seq = &dest->addr.nameseq;
+ struct tipc_name_seq *seq;
struct iov_iter save;
u32 mtu;
long timeo;
int rc;
- if (unlikely(!dest))
- return -EDESTADDRREQ;
-
- if (unlikely((m->msg_namelen < sizeof(*dest)) ||
- (dest->family != AF_TIPC)))
- return -EINVAL;
-
if (dsz > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
-
- if (iocb)
- lock_sock(sk);
-
+ if (unlikely(!dest)) {
+ if (tsk->connected && sock->state == SS_READY)
+ dest = &tsk->remote;
+ else
+ return -EDESTADDRREQ;
+ } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
+ dest->family != AF_TIPC) {
+ return -EINVAL;
+ }
if (unlikely(sock->state != SS_READY)) {
- if (sock->state == SS_LISTENING) {
- rc = -EPIPE;
- goto exit;
- }
- if (sock->state != SS_UNCONNECTED) {
- rc = -EISCONN;
- goto exit;
- }
- if (tsk->published) {
- rc = -EOPNOTSUPP;
- goto exit;
- }
+ if (sock->state == SS_LISTENING)
+ return -EPIPE;
+ if (sock->state != SS_UNCONNECTED)
+ return -EISCONN;
+ if (tsk->published)
+ return -EOPNOTSUPP;
if (dest->addrtype == TIPC_ADDR_NAME) {
tsk->conn_type = dest->addr.name.name.type;
tsk->conn_instance = dest->addr.name.name.instance;
}
}
-
+ seq = &dest->addr.nameseq;
timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
if (dest->addrtype == TIPC_ADDR_MCAST) {
- rc = tipc_sendmcast(sock, seq, m, dsz, timeo);
- goto exit;
+ return tipc_sendmcast(sock, seq, m, dsz, timeo);
} else if (dest->addrtype == TIPC_ADDR_NAME) {
u32 type = dest->addr.name.name.type;
u32 inst = dest->addr.name.name.instance;
@@ -972,10 +905,8 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
dport = tipc_nametbl_translate(net, type, inst, &dnode);
msg_set_destnode(mhdr, dnode);
msg_set_destport(mhdr, dport);
- if (unlikely(!dport && !dnode)) {
- rc = -EHOSTUNREACH;
- goto exit;
- }
+ if (unlikely(!dport && !dnode))
+ return -EHOSTUNREACH;
} else if (dest->addrtype == TIPC_ADDR_ID) {
dnode = dest->addr.id.node;
msg_set_type(mhdr, TIPC_DIRECT_MSG);
@@ -990,7 +921,7 @@ new_mtu:
mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
if (rc < 0)
- goto exit;
+ return rc;
do {
skb = skb_peek(pktchain);
@@ -1013,9 +944,6 @@ new_mtu:
if (rc)
__skb_queue_purge(pktchain);
} while (!rc);
-exit:
- if (iocb)
- release_sock(sk);
return rc;
}
@@ -1052,7 +980,6 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
/**
* tipc_send_stream - send stream-oriented data
- * @iocb: (unused)
* @sock: socket structure
* @m: data to send
* @dsz: total length of data to be transmitted
@@ -1062,8 +989,19 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
* Returns the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
-static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t dsz)
+static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
+{
+ struct sock *sk = sock->sk;
+ int ret;
+
+ lock_sock(sk);
+ ret = __tipc_send_stream(sock, m, dsz);
+ release_sock(sk);
+
+ return ret;
+}
+
+static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
@@ -1080,7 +1018,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
/* Handle implied connection establishment */
if (unlikely(dest)) {
- rc = tipc_sendmsg(iocb, sock, m, dsz);
+ rc = __tipc_sendmsg(sock, m, dsz);
if (dsz && (dsz == rc))
tsk->sent_unacked = 1;
return rc;
@@ -1088,15 +1026,11 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
if (dsz > (uint)INT_MAX)
return -EMSGSIZE;
- if (iocb)
- lock_sock(sk);
-
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_DISCONNECTING)
- rc = -EPIPE;
+ return -EPIPE;
else
- rc = -ENOTCONN;
- goto exit;
+ return -ENOTCONN;
}
timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
@@ -1108,7 +1042,7 @@ next:
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
if (unlikely(rc < 0))
- goto exit;
+ return rc;
do {
if (likely(!tsk_conn_cong(tsk))) {
rc = tipc_link_xmit(net, pktchain, dnode, portid);
@@ -1133,15 +1067,12 @@ next:
if (rc)
__skb_queue_purge(pktchain);
} while (!rc);
-exit:
- if (iocb)
- release_sock(sk);
+
return sent ? sent : rc;
}
/**
* tipc_send_packet - send a connection-oriented message
- * @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
* @dsz: length of data to be transmitted
@@ -1150,13 +1081,12 @@ exit:
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t dsz)
+static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
{
if (dsz > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
- return tipc_send_stream(iocb, sock, m, dsz);
+ return tipc_send_stream(sock, m, dsz);
}
/* tipc_sk_finish_conn - complete the setup of a connection
@@ -1317,12 +1247,12 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
break;
- err = sock_intr_errno(timeo);
- if (signal_pending(current))
- break;
err = -EAGAIN;
if (!timeo)
break;
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ break;
}
finish_wait(sk_sleep(sk), &wait);
*timeop = timeo;
@@ -1331,7 +1261,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
/**
* tipc_recvmsg - receive packet-oriented message
- * @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
* @flags: receive flags
@@ -1341,8 +1270,8 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
*
* Returns size of returned message data, errno otherwise
*/
-static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
+ int flags)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
@@ -1426,7 +1355,6 @@ exit:
/**
* tipc_recv_stream - receive stream-oriented data
- * @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
* @flags: receive flags
@@ -1436,8 +1364,8 @@ exit:
*
* Returns size of returned message data, errno otherwise
*/
-static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
+ size_t buf_len, int flags)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
@@ -1836,13 +1764,14 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
u32 dnode, dport = 0;
- int err = -TIPC_ERR_NO_PORT;
+ int err;
struct sk_buff *skb;
struct tipc_sock *tsk;
struct tipc_net *tn;
struct sock *sk;
while (skb_queue_len(inputq)) {
+ err = -TIPC_ERR_NO_PORT;
skb = NULL;
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
@@ -1909,17 +1838,26 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
int destlen, int flags)
{
struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
struct msghdr m = {NULL,};
- long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
+ long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
socket_state previous;
- int res;
+ int res = 0;
lock_sock(sk);
- /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
+ /* DGRAM/RDM connect(), just save the destaddr */
if (sock->state == SS_READY) {
- res = -EOPNOTSUPP;
+ if (dst->family == AF_UNSPEC) {
+ memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc));
+ tsk->connected = 0;
+ } else if (destlen != sizeof(struct sockaddr_tipc)) {
+ res = -EINVAL;
+ } else {
+ memcpy(&tsk->remote, dest, destlen);
+ tsk->connected = 1;
+ }
goto exit;
}
@@ -1947,7 +1885,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
if (!timeout)
m.msg_flags = MSG_DONTWAIT;
- res = tipc_sendmsg(NULL, sock, &m, 0);
+ res = __tipc_sendmsg(sock, &m, 0);
if ((res < 0) && (res != -EWOULDBLOCK))
goto exit;
@@ -2027,12 +1965,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
err = -EINVAL;
if (sock->state != SS_LISTENING)
break;
- err = sock_intr_errno(timeo);
- if (signal_pending(current))
- break;
err = -EAGAIN;
if (!timeo)
break;
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ break;
}
finish_wait(sk_sleep(sk), &wait);
return err;
@@ -2103,7 +2041,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
struct msghdr m = {NULL,};
tsk_advance_rx_queue(sk);
- tipc_send_packet(NULL, new_sock, &m, 0);
+ __tipc_send_stream(new_sock, &m, 0);
} else {
__skb_dequeue(&sk->sk_receive_queue);
__skb_queue_head(&new_sk->sk_receive_queue, buf);
@@ -2154,7 +2092,6 @@ restart:
TIPC_CONN_SHUTDOWN))
tipc_link_xmit_skb(net, skb, dnode,
tsk->portid);
- tipc_node_remove_conn(net, dnode, tsk->portid);
} else {
dnode = tsk_peer_node(tsk);
@@ -2312,7 +2249,7 @@ static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
struct tipc_sock *tsk;
rcu_read_lock();
- tsk = rhashtable_lookup(&tn->sk_rht, &portid);
+ tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
if (tsk)
sock_hold(&tsk->sk);
rcu_read_unlock();
@@ -2334,7 +2271,8 @@ static int tipc_sk_insert(struct tipc_sock *tsk)
portid = TIPC_MIN_PORT;
tsk->portid = portid;
sock_hold(&tsk->sk);
- if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node))
+ if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
+ tsk_rht_params))
return 0;
sock_put(&tsk->sk);
}
@@ -2347,26 +2285,27 @@ static void tipc_sk_remove(struct tipc_sock *tsk)
struct sock *sk = &tsk->sk;
struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
- if (rhashtable_remove(&tn->sk_rht, &tsk->node)) {
+ if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
}
+static const struct rhashtable_params tsk_rht_params = {
+ .nelem_hint = 192,
+ .head_offset = offsetof(struct tipc_sock, node),
+ .key_offset = offsetof(struct tipc_sock, portid),
+ .key_len = sizeof(u32), /* portid */
+ .max_size = 1048576,
+ .min_size = 256,
+ .automatic_shrinking = true,
+};
+
int tipc_sk_rht_init(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct rhashtable_params rht_params = {
- .nelem_hint = 192,
- .head_offset = offsetof(struct tipc_sock, node),
- .key_offset = offsetof(struct tipc_sock, portid),
- .key_len = sizeof(u32), /* portid */
- .hashfn = jhash,
- .max_shift = 20, /* 1M */
- .min_shift = 8, /* 256 */
- };
- return rhashtable_init(&tn->sk_rht, &rht_params);
+ return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
}
void tipc_sk_rht_destroy(struct net *net)
@@ -2609,12 +2548,6 @@ static struct proto tipc_proto = {
.sysctl_rmem = sysctl_tipc_rmem
};
-static struct proto tipc_proto_kern = {
- .name = "TIPC",
- .obj_size = sizeof(struct tipc_sock),
- .sysctl_rmem = sysctl_tipc_rmem
-};
-
/**
* tipc_socket_init - initialize TIPC socket interface
*
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 238f1b7bd9bd..bf6551389522 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -44,10 +44,6 @@
SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
int tipc_socket_init(void);
void tipc_socket_stop(void);
-int tipc_sock_create_local(struct net *net, int type, struct socket **res);
-void tipc_sock_release_local(struct socket *sock);
-int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
- int flags);
int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
struct sk_buff_head *inputq);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 72c339e432aa..1c147c869c2e 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -162,19 +162,6 @@ static void subscr_del(struct tipc_subscription *sub)
atomic_dec(&tn->subscription_count);
}
-/**
- * subscr_terminate - terminate communication with a subscriber
- *
- * Note: Must call it in process context since it might sleep.
- */
-static void subscr_terminate(struct tipc_subscription *sub)
-{
- struct tipc_subscriber *subscriber = sub->subscriber;
- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
-
- tipc_conn_terminate(tn->topsrv, subscriber->conid);
-}
-
static void subscr_release(struct tipc_subscriber *subscriber)
{
struct tipc_subscription *sub;
@@ -312,16 +299,14 @@ static void subscr_conn_msg_event(struct net *net, int conid,
{
struct tipc_subscriber *subscriber = usr_data;
struct tipc_subscription *sub = NULL;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
spin_lock_bh(&subscriber->lock);
- if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber,
- &sub) < 0) {
- spin_unlock_bh(&subscriber->lock);
- subscr_terminate(sub);
- return;
- }
+ subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub);
if (sub)
tipc_nametbl_subscribe(sub);
+ else
+ tipc_conn_terminate(tn->topsrv, subscriber->conid);
spin_unlock_bh(&subscriber->lock);
}
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
new file mode 100644
index 000000000000..66deebc66aa1
--- /dev/null
+++ b/net/tipc/udp_media.c
@@ -0,0 +1,448 @@
+/* net/tipc/udp_media.c: IP bearer support for TIPC
+ *
+ * Copyright (c) 2015, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/inet.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/udp_tunnel.h>
+#include <net/addrconf.h>
+#include <linux/tipc_netlink.h>
+#include "core.h"
+#include "bearer.h"
+
+/* IANA assigned UDP port */
+#define UDP_PORT_DEFAULT 6118
+
+static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
+ [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
+ [TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY,
+ .len = sizeof(struct sockaddr_storage)},
+ [TIPC_NLA_UDP_REMOTE] = {.type = NLA_BINARY,
+ .len = sizeof(struct sockaddr_storage)},
+};
+
+/**
+ * struct udp_media_addr - IP/UDP addressing information
+ *
+ * This is the bearer level originating address used in neighbor discovery
+ * messages, and all fields should be in network byte order
+ */
+struct udp_media_addr {
+ __be16 proto;
+ __be16 udp_port;
+ union {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ };
+};
+
+/**
+ * struct udp_bearer - ip/udp bearer data structure
+ * @bearer: associated generic tipc bearer
+ * @ubsock: bearer associated socket
+ * @ifindex: local address scope
+ * @work: used to schedule deferred work on a bearer
+ */
+struct udp_bearer {
+ struct tipc_bearer __rcu *bearer;
+ struct socket *ubsock;
+ u32 ifindex;
+ struct work_struct work;
+};
+
+/* udp_media_addr_set - convert a ip/udp address to a TIPC media address */
+static void tipc_udp_media_addr_set(struct tipc_media_addr *addr,
+ struct udp_media_addr *ua)
+{
+ memset(addr, 0, sizeof(struct tipc_media_addr));
+ addr->media_id = TIPC_MEDIA_TYPE_UDP;
+ memcpy(addr->value, ua, sizeof(struct udp_media_addr));
+ if (ntohs(ua->proto) == ETH_P_IP) {
+ if (ipv4_is_multicast(ua->ipv4.s_addr))
+ addr->broadcast = 1;
+ } else if (ntohs(ua->proto) == ETH_P_IPV6) {
+ if (ipv6_addr_type(&ua->ipv6) & IPV6_ADDR_MULTICAST)
+ addr->broadcast = 1;
+ } else {
+ pr_err("Invalid UDP media address\n");
+ }
+}
+
+/* tipc_udp_addr2str - convert ip/udp address to string */
+static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
+{
+ struct udp_media_addr *ua = (struct udp_media_addr *)&a->value;
+
+ if (ntohs(ua->proto) == ETH_P_IP)
+ snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->udp_port));
+ else if (ntohs(ua->proto) == ETH_P_IPV6)
+ snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->udp_port));
+ else
+ pr_err("Invalid UDP media address\n");
+ return 0;
+}
+
+/* tipc_udp_msg2addr - extract an ip/udp address from a TIPC ndisc message */
+static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a,
+ char *msg)
+{
+ struct udp_media_addr *ua;
+
+ ua = (struct udp_media_addr *) (msg + TIPC_MEDIA_ADDR_OFFSET);
+ if (msg[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_UDP)
+ return -EINVAL;
+ tipc_udp_media_addr_set(a, ua);
+ return 0;
+}
+
+/* tipc_udp_addr2msg - write an ip/udp address to a TIPC ndisc message */
+static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a)
+{
+ memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
+ msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_UDP;
+ memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, a->value,
+ sizeof(struct udp_media_addr));
+ return 0;
+}
+
+/* tipc_send_msg - enqueue a send request */
+static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dest)
+{
+ int ttl, err = 0;
+ struct udp_bearer *ub;
+ struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value;
+ struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
+ struct sk_buff *clone;
+ struct rtable *rt;
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
+ ub = rcu_dereference_rtnl(b->media_ptr);
+ if (!ub) {
+ err = -ENODEV;
+ goto tx_error;
+ }
+ if (dst->proto == htons(ETH_P_IP)) {
+ struct flowi4 fl = {
+ .daddr = dst->ipv4.s_addr,
+ .saddr = src->ipv4.s_addr,
+ .flowi4_mark = clone->mark,
+ .flowi4_proto = IPPROTO_UDP
+ };
+ rt = ip_route_output_key(net, &fl);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto tx_error;
+ }
+ ttl = ip4_dst_hoplimit(&rt->dst);
+ err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone,
+ src->ipv4.s_addr,
+ dst->ipv4.s_addr, 0, ttl, 0,
+ src->udp_port, dst->udp_port,
+ false, true);
+ if (err < 0) {
+ ip_rt_put(rt);
+ goto tx_error;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct dst_entry *ndst;
+ struct flowi6 fl6 = {
+ .flowi6_oif = ub->ifindex,
+ .daddr = dst->ipv6,
+ .saddr = src->ipv6,
+ .flowi6_proto = IPPROTO_UDP
+ };
+ err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6);
+ if (err)
+ goto tx_error;
+ ttl = ip6_dst_hoplimit(ndst);
+ err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone,
+ ndst->dev, &src->ipv6,
+ &dst->ipv6, 0, ttl, src->udp_port,
+ dst->udp_port, false);
+#endif
+ }
+ return err;
+
+tx_error:
+ kfree_skb(clone);
+ return err;
+}
+
+/* tipc_udp_recv - read data from bearer socket */
+static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct udp_bearer *ub;
+ struct tipc_bearer *b;
+
+ ub = rcu_dereference_sk_user_data(sk);
+ if (!ub) {
+ pr_err_ratelimited("Failed to get UDP bearer reference");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ skb_pull(skb, sizeof(struct udphdr));
+ rcu_read_lock();
+ b = rcu_dereference_rtnl(ub->bearer);
+
+ if (b) {
+ tipc_rcv(sock_net(sk), skb, b);
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return 0;
+}
+
+static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote)
+{
+ int err = 0;
+ struct ip_mreqn mreqn;
+ struct sock *sk = ub->ubsock->sk;
+
+ if (ntohs(remote->proto) == ETH_P_IP) {
+ if (!ipv4_is_multicast(remote->ipv4.s_addr))
+ return 0;
+ mreqn.imr_multiaddr = remote->ipv4;
+ mreqn.imr_ifindex = ub->ifindex;
+ err = ip_mc_join_group(sk, &mreqn);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (!ipv6_addr_is_multicast(&remote->ipv6))
+ return 0;
+ err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex,
+ &remote->ipv6);
+#endif
+ }
+ return err;
+}
+
+/**
+ * parse_options - build local/remote addresses from configuration
+ * @attrs: netlink config data
+ * @ub: UDP bearer instance
+ * @local: local bearer IP address/port
+ * @remote: peer or multicast IP/port
+ */
+static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub,
+ struct udp_media_addr *local,
+ struct udp_media_addr *remote)
+{
+ struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
+ struct sockaddr_storage *sa_local, *sa_remote;
+
+ if (!attrs[TIPC_NLA_BEARER_UDP_OPTS])
+ goto err;
+ if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX,
+ attrs[TIPC_NLA_BEARER_UDP_OPTS],
+ tipc_nl_udp_policy))
+ goto err;
+ if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) {
+ sa_local = nla_data(opts[TIPC_NLA_UDP_LOCAL]);
+ sa_remote = nla_data(opts[TIPC_NLA_UDP_REMOTE]);
+ } else {
+err:
+ pr_err("Invalid UDP bearer configuration");
+ return -EINVAL;
+ }
+ if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET) {
+ struct sockaddr_in *ip4;
+
+ ip4 = (struct sockaddr_in *)sa_local;
+ local->proto = htons(ETH_P_IP);
+ local->udp_port = ip4->sin_port;
+ local->ipv4.s_addr = ip4->sin_addr.s_addr;
+
+ ip4 = (struct sockaddr_in *)sa_remote;
+ remote->proto = htons(ETH_P_IP);
+ remote->udp_port = ip4->sin_port;
+ remote->ipv4.s_addr = ip4->sin_addr.s_addr;
+ return 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET6) {
+ struct sockaddr_in6 *ip6;
+
+ ip6 = (struct sockaddr_in6 *)sa_local;
+ local->proto = htons(ETH_P_IPV6);
+ local->udp_port = ip6->sin6_port;
+ local->ipv6 = ip6->sin6_addr;
+ ub->ifindex = ip6->sin6_scope_id;
+
+ ip6 = (struct sockaddr_in6 *)sa_remote;
+ remote->proto = htons(ETH_P_IPV6);
+ remote->udp_port = ip6->sin6_port;
+ remote->ipv6 = ip6->sin6_addr;
+ return 0;
+#endif
+ }
+ return -EADDRNOTAVAIL;
+}
+
+/**
+ * tipc_udp_enable - callback to create a new udp bearer instance
+ * @net: network namespace
+ * @b: pointer to generic tipc_bearer
+ * @attrs: netlink bearer configuration
+ *
+ * validate the bearer parameters and initialize the udp bearer
+ * rtnl_lock should be held
+ */
+static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
+ struct nlattr *attrs[])
+{
+ int err = -EINVAL;
+ struct udp_bearer *ub;
+ struct udp_media_addr *remote;
+ struct udp_media_addr local = {0};
+ struct udp_port_cfg udp_conf = {0};
+ struct udp_tunnel_sock_cfg tuncfg = {NULL};
+
+ ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
+ if (!ub)
+ return -ENOMEM;
+
+ remote = (struct udp_media_addr *)&b->bcast_addr.value;
+ memset(remote, 0, sizeof(struct udp_media_addr));
+ err = parse_options(attrs, ub, &local, remote);
+ if (err)
+ goto err;
+
+ b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP;
+ b->bcast_addr.broadcast = 1;
+ rcu_assign_pointer(b->media_ptr, ub);
+ rcu_assign_pointer(ub->bearer, b);
+ tipc_udp_media_addr_set(&b->addr, &local);
+ if (local.proto == htons(ETH_P_IP)) {
+ struct net_device *dev;
+
+ dev = __ip_dev_find(net, local.ipv4.s_addr, false);
+ if (!dev) {
+ err = -ENODEV;
+ goto err;
+ }
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ udp_conf.use_udp_checksums = false;
+ ub->ifindex = dev->ifindex;
+ b->mtu = dev->mtu - sizeof(struct iphdr)
+ - sizeof(struct udphdr);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (local.proto == htons(ETH_P_IPV6)) {
+ udp_conf.family = AF_INET6;
+ udp_conf.use_udp6_tx_checksums = true;
+ udp_conf.use_udp6_rx_checksums = true;
+ udp_conf.local_ip6 = in6addr_any;
+ b->mtu = 1280;
+#endif
+ } else {
+ err = -EAFNOSUPPORT;
+ goto err;
+ }
+ udp_conf.local_udp_port = local.udp_port;
+ err = udp_sock_create(net, &udp_conf, &ub->ubsock);
+ if (err)
+ goto err;
+ tuncfg.sk_user_data = ub;
+ tuncfg.encap_type = 1;
+ tuncfg.encap_rcv = tipc_udp_recv;
+ tuncfg.encap_destroy = NULL;
+ setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
+
+ if (enable_mcast(ub, remote))
+ goto err;
+ return 0;
+err:
+ kfree(ub);
+ return err;
+}
+
+/* cleanup_bearer - break the socket/bearer association */
+static void cleanup_bearer(struct work_struct *work)
+{
+ struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
+
+ if (ub->ubsock)
+ udp_tunnel_sock_release(ub->ubsock);
+ synchronize_net();
+ kfree(ub);
+}
+
+/* tipc_udp_disable - detach bearer from socket */
+static void tipc_udp_disable(struct tipc_bearer *b)
+{
+ struct udp_bearer *ub;
+
+ ub = rcu_dereference_rtnl(b->media_ptr);
+ if (!ub) {
+ pr_err("UDP bearer instance not found\n");
+ return;
+ }
+ if (ub->ubsock)
+ sock_set_flag(ub->ubsock->sk, SOCK_DEAD);
+ RCU_INIT_POINTER(b->media_ptr, NULL);
+ RCU_INIT_POINTER(ub->bearer, NULL);
+
+ /* sock_release need to be done outside of rtnl lock */
+ INIT_WORK(&ub->work, cleanup_bearer);
+ schedule_work(&ub->work);
+}
+
+struct tipc_media udp_media_info = {
+ .send_msg = tipc_udp_send_msg,
+ .enable_media = tipc_udp_enable,
+ .disable_media = tipc_udp_disable,
+ .addr2str = tipc_udp_addr2str,
+ .addr2msg = tipc_udp_addr2msg,
+ .msg2addr = tipc_udp_msg2addr,
+ .priority = TIPC_DEF_LINK_PRI,
+ .tolerance = TIPC_DEF_LINK_TOL,
+ .window = TIPC_DEF_LINK_WIN,
+ .type_id = TIPC_MEDIA_TYPE_UDP,
+ .hwaddr_len = 0,
+ .name = "udp"
+};
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 526b6edab018..5266ea7b922b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -305,7 +305,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
- if (dentry && dentry->d_inode == i) {
+ if (dentry && d_backing_inode(dentry) == i) {
sock_hold(s);
goto found;
}
@@ -516,20 +516,15 @@ static unsigned int unix_dgram_poll(struct file *, struct socket *,
poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
-static int unix_stream_sendmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t);
-static int unix_stream_recvmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t, int);
-static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t);
-static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t, int);
+static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
+static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
+static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
+static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
static int unix_dgram_connect(struct socket *, struct sockaddr *,
int, int);
-static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t);
-static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
- struct msghdr *, size_t, int);
+static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
+static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
+ int);
static int unix_set_peek_off(struct sock *sk, int val)
{
@@ -783,7 +778,7 @@ static struct sock *unix_find_other(struct net *net,
err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
if (err)
goto fail;
- inode = path.dentry->d_inode;
+ inode = d_backing_inode(path.dentry);
err = inode_permission(inode, MAY_WRITE);
if (err)
goto put_fail;
@@ -844,7 +839,7 @@ static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
*/
err = security_path_mknod(&path, dentry, mode, 0);
if (!err) {
- err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
+ err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
if (!err) {
res->mnt = mntget(path.mnt);
res->dentry = dget(dentry);
@@ -910,7 +905,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out_up;
}
addr->hash = UNIX_HASH_SIZE;
- hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
+ hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
spin_lock(&unix_table_lock);
u->path = path;
list = &unix_socket_table[hash];
@@ -1442,8 +1437,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
* Send AF_UNIX data.
*/
-static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
@@ -1622,8 +1617,8 @@ out:
*/
#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
-static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk = sock->sk;
struct sock *other = NULL;
@@ -1725,8 +1720,8 @@ out_err:
return sent ? : err;
}
-static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
int err;
struct sock *sk = sock->sk;
@@ -1741,19 +1736,18 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (msg->msg_namelen)
msg->msg_namelen = 0;
- return unix_dgram_sendmsg(kiocb, sock, msg, len);
+ return unix_dgram_sendmsg(sock, msg, len);
}
-static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size,
- int flags)
+static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
{
struct sock *sk = sock->sk;
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
- return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
+ return unix_dgram_recvmsg(sock, msg, size, flags);
}
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
@@ -1766,9 +1760,8 @@ static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
}
}
-static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size,
- int flags)
+static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
{
struct scm_cookie scm;
struct sock *sk = sock->sk;
@@ -1900,9 +1893,8 @@ static unsigned int unix_skb_len(const struct sk_buff *skb)
return skb->len - UNIXCB(skb).consumed;
}
-static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size,
- int flags)
+static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
{
struct scm_cookie scm;
struct sock *sk = sock->sk;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index ef542fbca9fe..c512f64d5287 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -25,7 +25,7 @@ static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
if (dentry) {
struct unix_diag_vfs uv = {
- .udiag_vfs_ino = dentry->d_inode->i_ino,
+ .udiag_vfs_ino = d_backing_inode(dentry)->i_ino,
.udiag_vfs_dev = dentry->d_sb->s_dev,
};
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 99f7012b23b9..a73a226f2d33 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -95,39 +95,36 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
unsigned int unix_tot_inflight;
-
struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = file_inode(filp);
- /*
- * Socket ?
- */
+ /* Socket ? */
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
struct socket *sock = SOCKET_I(inode);
struct sock *s = sock->sk;
- /*
- * PF_UNIX ?
- */
+ /* PF_UNIX ? */
if (s && sock->ops && sock->ops->family == PF_UNIX)
u_sock = s;
}
return u_sock;
}
-/*
- * Keep the number of times in flight count for the file
- * descriptor if it is for an AF_UNIX socket.
+/* Keep the number of times in flight count for the file
+ * descriptor if it is for an AF_UNIX socket.
*/
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
+
if (s) {
struct unix_sock *u = unix_sk(s);
+
spin_lock(&unix_gc_lock);
+
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
@@ -142,10 +139,13 @@ void unix_inflight(struct file *fp)
void unix_notinflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
+
if (s) {
struct unix_sock *u = unix_sk(s);
+
spin_lock(&unix_gc_lock);
BUG_ON(list_empty(&u->link));
+
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
unix_tot_inflight--;
@@ -161,32 +161,27 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
- /*
- * Do we have file descriptors ?
- */
+ /* Do we have file descriptors ? */
if (UNIXCB(skb).fp) {
bool hit = false;
- /*
- * Process the descriptors of this socket
- */
+ /* Process the descriptors of this socket */
int nfd = UNIXCB(skb).fp->count;
struct file **fp = UNIXCB(skb).fp->fp;
+
while (nfd--) {
- /*
- * Get the socket the fd matches
- * if it indeed does so
- */
+ /* Get the socket the fd matches if it indeed does so */
struct sock *sk = unix_get_socket(*fp++);
+
if (sk) {
struct unix_sock *u = unix_sk(sk);
- /*
- * Ignore non-candidates, they could
+ /* Ignore non-candidates, they could
* have been added to the queues after
* starting the garbage collection
*/
if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
hit = true;
+
func(u);
}
}
@@ -203,24 +198,22 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
- if (x->sk_state != TCP_LISTEN)
+ if (x->sk_state != TCP_LISTEN) {
scan_inflight(x, func, hitlist);
- else {
+ } else {
struct sk_buff *skb;
struct sk_buff *next;
struct unix_sock *u;
LIST_HEAD(embryos);
- /*
- * For a listening socket collect the queued embryos
+ /* For a listening socket collect the queued embryos
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
- /*
- * An embryo cannot be in-flight, so it's safe
+ /* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
BUG_ON(!list_empty(&u->link));
@@ -249,8 +242,7 @@ static void inc_inflight(struct unix_sock *usk)
static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
- /*
- * If this still might be part of a cycle, move it to the end
+ /* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
*/
@@ -263,8 +255,7 @@ static bool gc_in_progress;
void wait_for_unix_gc(void)
{
- /*
- * If number of inflight sockets is insane,
+ /* If number of inflight sockets is insane,
* force a garbage collect right now.
*/
if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
@@ -288,8 +279,7 @@ void unix_gc(void)
goto out;
gc_in_progress = true;
- /*
- * First, select candidates for garbage collection. Only
+ /* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
*
@@ -320,15 +310,13 @@ void unix_gc(void)
}
}
- /*
- * Now remove all internal in-flight reference to children of
+ /* Now remove all internal in-flight reference to children of
* the candidates.
*/
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, dec_inflight, NULL);
- /*
- * Restore the references for children of all candidates,
+ /* Restore the references for children of all candidates,
* which have remaining references. Do this recursively, so
* only those remain, which form cyclic references.
*
@@ -350,8 +338,7 @@ void unix_gc(void)
}
list_del(&cursor);
- /*
- * not_cycle_list contains those sockets which do not make up a
+ /* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
while (!list_empty(&not_cycle_list)) {
@@ -360,8 +347,7 @@ void unix_gc(void)
list_move_tail(&u->link, &gc_inflight_list);
}
- /*
- * Now gc_candidates contains only garbage. Restore original
+ /* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 1d0e39c9a3e2..2ec86e652a19 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -949,8 +949,8 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
return mask;
}
-static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
int err;
struct sock *sk;
@@ -1062,11 +1062,10 @@ out:
return err;
}
-static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
{
- return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len,
- flags);
+ return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags);
}
static const struct proto_ops vsock_dgram_ops = {
@@ -1505,8 +1504,8 @@ static int vsock_stream_getsockopt(struct socket *sock,
return 0;
}
-static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk;
struct vsock_sock *vsk;
@@ -1644,9 +1643,8 @@ out:
static int
-vsock_stream_recvmsg(struct kiocb *kiocb,
- struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
struct sock *sk;
struct vsock_sock *vsk;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 7f3255084a6c..c294da095461 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1730,8 +1730,7 @@ static int vmci_transport_dgram_enqueue(
return err - sizeof(*dg);
}
-static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
- struct vsock_sock *vsk,
+static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
struct msghdr *msg, size_t len,
int flags)
{
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 29c8675f9a11..4f5543dd2524 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -175,13 +175,21 @@ config CFG80211_INTERNAL_REGDB
Most distributions have a CRDA package. So if unsure, say N.
config CFG80211_WEXT
- bool "cfg80211 wireless extensions compatibility"
+ bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT
depends on CFG80211
select WEXT_CORE
+ default y if CFG80211_WEXT_EXPORT
help
Enable this option if you need old userspace for wireless
extensions with cfg80211-based drivers.
+config CFG80211_WEXT_EXPORT
+ bool
+ depends on CFG80211
+ help
+ Drivers should select this option if they require cfg80211's
+ wext compatibility symbols to be exported.
+
config LIB80211
tristate
default n
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index e24fc585c883..4c55fab9b4e4 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -30,7 +30,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
return;
bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0,
- WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+ IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY_ANY);
if (WARN_ON(!bss))
return;
@@ -533,7 +533,7 @@ int cfg80211_ibss_wext_giwap(struct net_device *dev,
else if (wdev->wext.ibss.bssid)
memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN);
else
- memset(ap_addr->sa_data, 0, ETH_ALEN);
+ eth_zero_addr(ap_addr->sa_data);
wdev_unlock(wdev);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 2c52b59e43f3..7aae329e2b4e 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -229,7 +229,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
return -EALREADY;
req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
- WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
if (!req.bss)
return -ENOENT;
@@ -296,7 +297,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
rdev->wiphy.vht_capa_mod_mask);
req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
- WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
if (!req->bss)
return -ENOENT;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b6f84f6a2a09..dd78445c7d50 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -399,6 +399,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
[NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
[NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
+ [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -1098,8 +1099,6 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
if (large && nl80211_send_wowlan_tcp_caps(rdev, msg))
return -ENOBUFS;
- /* TODO: send wowlan net detect */
-
nla_nest_end(msg, nl_wowlan);
return 0;
@@ -2668,7 +2667,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
wdev = rdev_add_virtual_intf(rdev,
nla_data(info->attrs[NL80211_ATTR_IFNAME]),
- type, err ? NULL : &flags, &params);
+ NET_NAME_USER, type, err ? NULL : &flags,
+ &params);
if (WARN_ON(!wdev)) {
nlmsg_free(msg);
return -EPROTO;
@@ -4968,7 +4968,10 @@ static int parse_reg_rule(struct nlattr *tb[],
static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
{
char *data = NULL;
+ bool is_indoor;
enum nl80211_user_reg_hint_type user_reg_hint_type;
+ u32 owner_nlportid;
+
/*
* You should only get this when cfg80211 hasn't yet initialized
@@ -4994,7 +4997,15 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
return regulatory_hint_user(data, user_reg_hint_type);
case NL80211_USER_REG_HINT_INDOOR:
- return regulatory_hint_indoor_user();
+ if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
+ owner_nlportid = info->snd_portid;
+ is_indoor = !!info->attrs[NL80211_ATTR_REG_INDOOR];
+ } else {
+ owner_nlportid = 0;
+ is_indoor = true;
+ }
+
+ return regulatory_hint_indoor(is_indoor, owner_nlportid);
default:
return -EINVAL;
}
@@ -5275,7 +5286,7 @@ do { \
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration,
0, 65535, mask,
NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 1, 0xffffffff,
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff,
mask, NL80211_MESHCONF_PLINK_TIMEOUT,
nla_get_u32);
if (mask_out)
@@ -5653,7 +5664,7 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
}
}
- r = set_regdom(rd);
+ r = set_regdom(rd, REGD_SOURCE_CRDA);
/* set_regdom took ownership */
rd = NULL;
@@ -5693,8 +5704,8 @@ static int nl80211_parse_random_mac(struct nlattr **attrs,
int i;
if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) {
- memset(mac_addr, 0, ETH_ALEN);
- memset(mac_addr_mask, 0, ETH_ALEN);
+ eth_zero_addr(mac_addr);
+ eth_zero_addr(mac_addr_mask);
mac_addr[0] = 0x2;
mac_addr_mask[0] = 0x3;
@@ -7275,8 +7286,18 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
break;
case NL80211_CHAN_WIDTH_20:
case NL80211_CHAN_WIDTH_40:
- if (rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)
- break;
+ if (!(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS))
+ return -EINVAL;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ if (!(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS))
+ return -EINVAL;
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_VHT_IBSS))
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -7389,8 +7410,8 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
static struct sk_buff *
__cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
- int approxlen, u32 portid, u32 seq,
- enum nl80211_commands cmd,
+ struct wireless_dev *wdev, int approxlen,
+ u32 portid, u32 seq, enum nl80211_commands cmd,
enum nl80211_attrs attr,
const struct nl80211_vendor_cmd_info *info,
gfp_t gfp)
@@ -7421,6 +7442,16 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
}
+ if (wdev) {
+ if (nla_put_u64(skb, NL80211_ATTR_WDEV,
+ wdev_id(wdev)))
+ goto nla_put_failure;
+ if (wdev->netdev &&
+ nla_put_u32(skb, NL80211_ATTR_IFINDEX,
+ wdev->netdev->ifindex))
+ goto nla_put_failure;
+ }
+
data = nla_nest_start(skb, attr);
((void **)skb->cb)[0] = rdev;
@@ -7435,6 +7466,7 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
}
struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
enum nl80211_commands cmd,
enum nl80211_attrs attr,
int vendor_event_idx,
@@ -7460,7 +7492,7 @@ struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
return NULL;
}
- return __cfg80211_alloc_vendor_skb(rdev, approxlen, 0, 0,
+ return __cfg80211_alloc_vendor_skb(rdev, wdev, approxlen, 0, 0,
cmd, attr, info, gfp);
}
EXPORT_SYMBOL(__cfg80211_alloc_event_skb);
@@ -8761,8 +8793,8 @@ static int nl80211_send_wowlan_tcp(struct sk_buff *msg,
if (!nl_tcp)
return -ENOBUFS;
- if (nla_put_be32(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) ||
- nla_put_be32(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) ||
+ if (nla_put_in_addr(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) ||
+ nla_put_in_addr(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) ||
nla_put(msg, NL80211_WOWLAN_TCP_DST_MAC, ETH_ALEN, tcp->dst_mac) ||
nla_put_u16(msg, NL80211_WOWLAN_TCP_SRC_PORT, tcp->src_port) ||
nla_put_u16(msg, NL80211_WOWLAN_TCP_DST_PORT, tcp->dst_port) ||
@@ -8808,6 +8840,9 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->interval))
return -ENOBUFS;
+ if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
+ return -ENOBUFS;
+
freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
if (!freqs)
return -ENOBUFS;
@@ -8993,8 +9028,8 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
cfg = kzalloc(size, GFP_KERNEL);
if (!cfg)
return -ENOMEM;
- cfg->src = nla_get_be32(tb[NL80211_WOWLAN_TCP_SRC_IPV4]);
- cfg->dst = nla_get_be32(tb[NL80211_WOWLAN_TCP_DST_IPV4]);
+ cfg->src = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_SRC_IPV4]);
+ cfg->dst = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_DST_IPV4]);
memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]),
ETH_ALEN);
if (tb[NL80211_WOWLAN_TCP_SRC_PORT])
@@ -9094,6 +9129,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan;
int err, i;
bool prev_enabled = rdev->wiphy.wowlan_config;
+ bool regular = false;
if (!wowlan)
return -EOPNOTSUPP;
@@ -9121,12 +9157,14 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
if (!(wowlan->flags & WIPHY_WOWLAN_DISCONNECT))
return -EINVAL;
new_triggers.disconnect = true;
+ regular = true;
}
if (tb[NL80211_WOWLAN_TRIG_MAGIC_PKT]) {
if (!(wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT))
return -EINVAL;
new_triggers.magic_pkt = true;
+ regular = true;
}
if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED])
@@ -9136,24 +9174,28 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
if (!(wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE))
return -EINVAL;
new_triggers.gtk_rekey_failure = true;
+ regular = true;
}
if (tb[NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST]) {
if (!(wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ))
return -EINVAL;
new_triggers.eap_identity_req = true;
+ regular = true;
}
if (tb[NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE]) {
if (!(wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE))
return -EINVAL;
new_triggers.four_way_handshake = true;
+ regular = true;
}
if (tb[NL80211_WOWLAN_TRIG_RFKILL_RELEASE]) {
if (!(wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE))
return -EINVAL;
new_triggers.rfkill_release = true;
+ regular = true;
}
if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) {
@@ -9162,6 +9204,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
int rem, pat_len, mask_len, pkt_offset;
struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
+ regular = true;
+
nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
rem)
n_patterns++;
@@ -9223,6 +9267,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
}
if (tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION]) {
+ regular = true;
err = nl80211_parse_wowlan_tcp(
rdev, tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION],
&new_triggers);
@@ -9231,6 +9276,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
}
if (tb[NL80211_WOWLAN_TRIG_NET_DETECT]) {
+ regular = true;
err = nl80211_parse_wowlan_nd(
rdev, wowlan, tb[NL80211_WOWLAN_TRIG_NET_DETECT],
&new_triggers);
@@ -9238,6 +9284,17 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
goto error;
}
+ /* The 'any' trigger means the device continues operating more or less
+ * as in its normal operation mode and wakes up the host on most of the
+ * normal interrupts (like packet RX, ...)
+ * It therefore makes little sense to combine with the more constrained
+ * wakeup trigger modes.
+ */
+ if (new_triggers.any && regular) {
+ err = -EINVAL;
+ goto error;
+ }
+
ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL);
if (!ntrig) {
err = -ENOMEM;
@@ -9906,7 +9963,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
if (WARN_ON(!rdev->cur_cmd_info))
return NULL;
- return __cfg80211_alloc_vendor_skb(rdev, approxlen,
+ return __cfg80211_alloc_vendor_skb(rdev, NULL, approxlen,
rdev->cur_cmd_info->snd_portid,
rdev->cur_cmd_info->snd_seq,
cmd, attr, NULL, GFP_KERNEL);
@@ -12775,6 +12832,11 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
rcu_read_unlock();
+ /*
+ * It is possible that the user space process that is controlling the
+ * indoor setting disappeared, so notify the regulatory core.
+ */
+ regulatory_netlink_notify(notify->portid);
return NOTIFY_OK;
}
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 35cfb7134bdb..c6e83a7468c0 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -35,13 +35,14 @@ static inline void rdev_set_wakeup(struct cfg80211_registered_device *rdev,
static inline struct wireless_dev
*rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
struct wireless_dev *ret;
trace_rdev_add_virtual_intf(&rdev->wiphy, name, type);
- ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, type, flags,
- params);
+ ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, name_assign_type,
+ type, flags, params);
trace_rdev_return_wdev(&rdev->wiphy, ret);
return ret;
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 48dfc7b4e981..0e347f888fe9 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -82,17 +82,12 @@
* be intersected with the current one.
* @REG_REQ_ALREADY_SET: the regulatory request will not change the current
* regulatory settings, and no further processing is required.
- * @REG_REQ_USER_HINT_HANDLED: a non alpha2 user hint was handled and no
- * further processing is required, i.e., not need to update last_request
- * etc. This should be used for user hints that do not provide an alpha2
- * but some other type of regulatory hint, i.e., indoor operation.
*/
enum reg_request_treatment {
REG_REQ_OK,
REG_REQ_IGNORE,
REG_REQ_INTERSECT,
REG_REQ_ALREADY_SET,
- REG_REQ_USER_HINT_HANDLED,
};
static struct regulatory_request core_request_world = {
@@ -133,9 +128,17 @@ static int reg_num_devs_support_basehint;
* State variable indicating if the platform on which the devices
* are attached is operating in an indoor environment. The state variable
* is relevant for all registered devices.
- * (protected by RTNL)
*/
static bool reg_is_indoor;
+static spinlock_t reg_indoor_lock;
+
+/* Used to track the userspace process controlling the indoor setting */
+static u32 reg_is_indoor_portid;
+
+/* Max number of consecutive attempts to communicate with CRDA */
+#define REG_MAX_CRDA_TIMEOUTS 10
+
+static u32 reg_crda_timeouts;
static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
{
@@ -487,7 +490,7 @@ static void reg_regdb_search(struct work_struct *work)
mutex_unlock(&reg_regdb_search_mutex);
if (!IS_ERR_OR_NULL(regdom))
- set_regdom(regdom);
+ set_regdom(regdom, REGD_SOURCE_INTERNAL_DB);
rtnl_unlock();
}
@@ -537,15 +540,20 @@ static int call_crda(const char *alpha2)
snprintf(country, sizeof(country), "COUNTRY=%c%c",
alpha2[0], alpha2[1]);
+ /* query internal regulatory database (if it exists) */
+ reg_regdb_query(alpha2);
+
+ if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
+ pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n");
+ return -EINVAL;
+ }
+
if (!is_world_regdom((char *) alpha2))
pr_info("Calling CRDA for country: %c%c\n",
alpha2[0], alpha2[1]);
else
pr_info("Calling CRDA to update world regulatory domain\n");
- /* query internal regulatory database (if it exists) */
- reg_regdb_query(alpha2);
-
return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
}
@@ -554,6 +562,9 @@ reg_call_crda(struct regulatory_request *request)
{
if (call_crda(request->alpha2))
return REG_REQ_IGNORE;
+
+ queue_delayed_work(system_power_efficient_wq,
+ &reg_timeout, msecs_to_jiffies(3142));
return REG_REQ_OK;
}
@@ -1248,13 +1259,6 @@ static bool reg_request_cell_base(struct regulatory_request *request)
return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE;
}
-static bool reg_request_indoor(struct regulatory_request *request)
-{
- if (request->initiator != NL80211_REGDOM_SET_BY_USER)
- return false;
- return request->user_reg_hint_type == NL80211_USER_REG_HINT_INDOOR;
-}
-
bool reg_last_request_cell_base(void)
{
return reg_request_cell_base(get_last_request());
@@ -1800,8 +1804,7 @@ static void reg_set_request_processed(void)
need_more_processing = true;
spin_unlock(&reg_requests_lock);
- if (lr->initiator == NL80211_REGDOM_SET_BY_USER)
- cancel_delayed_work(&reg_timeout);
+ cancel_delayed_work(&reg_timeout);
if (need_more_processing)
schedule_work(&reg_work);
@@ -1833,11 +1836,6 @@ __reg_process_hint_user(struct regulatory_request *user_request)
{
struct regulatory_request *lr = get_last_request();
- if (reg_request_indoor(user_request)) {
- reg_is_indoor = true;
- return REG_REQ_USER_HINT_HANDLED;
- }
-
if (reg_request_cell_base(user_request))
return reg_ignore_cell_hint(user_request);
@@ -1885,8 +1883,7 @@ reg_process_hint_user(struct regulatory_request *user_request)
treatment = __reg_process_hint_user(user_request);
if (treatment == REG_REQ_IGNORE ||
- treatment == REG_REQ_ALREADY_SET ||
- treatment == REG_REQ_USER_HINT_HANDLED) {
+ treatment == REG_REQ_ALREADY_SET) {
reg_free_request(user_request);
return treatment;
}
@@ -1947,7 +1944,6 @@ reg_process_hint_driver(struct wiphy *wiphy,
case REG_REQ_OK:
break;
case REG_REQ_IGNORE:
- case REG_REQ_USER_HINT_HANDLED:
reg_free_request(driver_request);
return treatment;
case REG_REQ_INTERSECT:
@@ -2047,7 +2043,6 @@ reg_process_hint_country_ie(struct wiphy *wiphy,
case REG_REQ_OK:
break;
case REG_REQ_IGNORE:
- case REG_REQ_USER_HINT_HANDLED:
/* fall through */
case REG_REQ_ALREADY_SET:
reg_free_request(country_ie_request);
@@ -2086,11 +2081,8 @@ static void reg_process_hint(struct regulatory_request *reg_request)
case NL80211_REGDOM_SET_BY_USER:
treatment = reg_process_hint_user(reg_request);
if (treatment == REG_REQ_IGNORE ||
- treatment == REG_REQ_ALREADY_SET ||
- treatment == REG_REQ_USER_HINT_HANDLED)
+ treatment == REG_REQ_ALREADY_SET)
return;
- queue_delayed_work(system_power_efficient_wq,
- &reg_timeout, msecs_to_jiffies(3142));
return;
case NL80211_REGDOM_SET_BY_DRIVER:
if (!wiphy)
@@ -2177,6 +2169,13 @@ static void reg_process_pending_hints(void)
}
reg_process_hint(reg_request);
+
+ lr = get_last_request();
+
+ spin_lock(&reg_requests_lock);
+ if (!list_empty(&reg_requests_list) && lr && lr->processed)
+ schedule_work(&reg_work);
+ spin_unlock(&reg_requests_lock);
}
/* Processes beacon hints -- this has nothing to do with country IEs */
@@ -2304,27 +2303,58 @@ int regulatory_hint_user(const char *alpha2,
request->initiator = NL80211_REGDOM_SET_BY_USER;
request->user_reg_hint_type = user_reg_hint_type;
+ /* Allow calling CRDA again */
+ reg_crda_timeouts = 0;
+
queue_regulatory_request(request);
return 0;
}
-int regulatory_hint_indoor_user(void)
+int regulatory_hint_indoor(bool is_indoor, u32 portid)
{
- struct regulatory_request *request;
+ spin_lock(&reg_indoor_lock);
- request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
- if (!request)
- return -ENOMEM;
+ /* It is possible that more than one user space process is trying to
+ * configure the indoor setting. To handle such cases, clear the indoor
+ * setting in case that some process does not think that the device
+ * is operating in an indoor environment. In addition, if a user space
+ * process indicates that it is controlling the indoor setting, save its
+ * portid, i.e., make it the owner.
+ */
+ reg_is_indoor = is_indoor;
+ if (reg_is_indoor) {
+ if (!reg_is_indoor_portid)
+ reg_is_indoor_portid = portid;
+ } else {
+ reg_is_indoor_portid = 0;
+ }
- request->wiphy_idx = WIPHY_IDX_INVALID;
- request->initiator = NL80211_REGDOM_SET_BY_USER;
- request->user_reg_hint_type = NL80211_USER_REG_HINT_INDOOR;
- queue_regulatory_request(request);
+ spin_unlock(&reg_indoor_lock);
+
+ if (!is_indoor)
+ reg_check_channels();
return 0;
}
+void regulatory_netlink_notify(u32 portid)
+{
+ spin_lock(&reg_indoor_lock);
+
+ if (reg_is_indoor_portid != portid) {
+ spin_unlock(&reg_indoor_lock);
+ return;
+ }
+
+ reg_is_indoor = false;
+ reg_is_indoor_portid = 0;
+
+ spin_unlock(&reg_indoor_lock);
+
+ reg_check_channels();
+}
+
/* Driver hints */
int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
{
@@ -2345,6 +2375,9 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_DRIVER;
+ /* Allow calling CRDA again */
+ reg_crda_timeouts = 0;
+
queue_regulatory_request(request);
return 0;
@@ -2398,6 +2431,9 @@ void regulatory_hint_country_ie(struct wiphy *wiphy, enum ieee80211_band band,
request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE;
request->country_ie_env = env;
+ /* Allow calling CRDA again */
+ reg_crda_timeouts = 0;
+
queue_regulatory_request(request);
request = NULL;
out:
@@ -2486,13 +2522,22 @@ static void restore_regulatory_settings(bool reset_user)
char alpha2[2];
char world_alpha2[2];
struct reg_beacon *reg_beacon, *btmp;
- struct regulatory_request *reg_request, *tmp;
LIST_HEAD(tmp_reg_req_list);
struct cfg80211_registered_device *rdev;
ASSERT_RTNL();
- reg_is_indoor = false;
+ /*
+ * Clear the indoor setting in case that it is not controlled by user
+ * space, as otherwise there is no guarantee that the device is still
+ * operating in an indoor environment.
+ */
+ spin_lock(&reg_indoor_lock);
+ if (reg_is_indoor && !reg_is_indoor_portid) {
+ reg_is_indoor = false;
+ reg_check_channels();
+ }
+ spin_unlock(&reg_indoor_lock);
reset_regdomains(true, &world_regdom);
restore_alpha2(alpha2, reset_user);
@@ -2504,11 +2549,7 @@ static void restore_regulatory_settings(bool reset_user)
* settings.
*/
spin_lock(&reg_requests_lock);
- list_for_each_entry_safe(reg_request, tmp, &reg_requests_list, list) {
- if (reg_request->initiator != NL80211_REGDOM_SET_BY_USER)
- continue;
- list_move_tail(&reg_request->list, &tmp_reg_req_list);
- }
+ list_splice_tail_init(&reg_requests_list, &tmp_reg_req_list);
spin_unlock(&reg_requests_lock);
/* Clear beacon hints */
@@ -2871,7 +2912,8 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
* multiple drivers can be ironed out later. Caller must've already
* kmalloc'd the rd structure.
*/
-int set_regdom(const struct ieee80211_regdomain *rd)
+int set_regdom(const struct ieee80211_regdomain *rd,
+ enum ieee80211_regd_source regd_src)
{
struct regulatory_request *lr;
bool user_reset = false;
@@ -2882,6 +2924,9 @@ int set_regdom(const struct ieee80211_regdomain *rd)
return -EINVAL;
}
+ if (regd_src == REGD_SOURCE_CRDA)
+ reg_crda_timeouts = 0;
+
lr = get_last_request();
/* Note that this doesn't update the wiphys, this is done below */
@@ -3041,6 +3086,7 @@ static void reg_timeout_work(struct work_struct *work)
{
REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
rtnl_lock();
+ reg_crda_timeouts++;
restore_regulatory_settings(true);
rtnl_unlock();
}
@@ -3089,6 +3135,7 @@ int __init regulatory_init(void)
spin_lock_init(&reg_requests_lock);
spin_lock_init(&reg_pending_beacons_lock);
+ spin_lock_init(&reg_indoor_lock);
reg_regdb_size_check();
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 4b45d6e61d24..9f495d76eca0 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -16,6 +16,11 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+enum ieee80211_regd_source {
+ REGD_SOURCE_INTERNAL_DB,
+ REGD_SOURCE_CRDA,
+};
+
extern const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
bool reg_is_valid_request(const char *alpha2);
@@ -25,7 +30,20 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy);
int regulatory_hint_user(const char *alpha2,
enum nl80211_user_reg_hint_type user_reg_hint_type);
-int regulatory_hint_indoor_user(void);
+
+/**
+ * regulatory_hint_indoor - hint operation in indoor env. or not
+ * @is_indoor: if true indicates that user space thinks that the
+ * device is operating in an indoor environment.
+ * @portid: the netlink port ID on which the hint was given.
+ */
+int regulatory_hint_indoor(bool is_indoor, u32 portid);
+
+/**
+ * regulatory_netlink_notify - notify on released netlink socket
+ * @portid: the netlink socket port ID
+ */
+void regulatory_netlink_notify(u32 portid);
void wiphy_regulatory_register(struct wiphy *wiphy);
void wiphy_regulatory_deregister(struct wiphy *wiphy);
@@ -33,7 +51,9 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy);
int __init regulatory_init(void);
void regulatory_exit(void);
-int set_regdom(const struct ieee80211_regdomain *rd);
+int set_regdom(const struct ieee80211_regdomain *rd,
+ enum ieee80211_regd_source regd_src);
+
unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
const struct ieee80211_reg_rule *rule);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index c705c3e2b751..3a50aa2553bf 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -531,24 +531,78 @@ static int cmp_bss(struct cfg80211_bss *a,
}
}
+static bool cfg80211_bss_type_match(u16 capability,
+ enum ieee80211_band band,
+ enum ieee80211_bss_type bss_type)
+{
+ bool ret = true;
+ u16 mask, val;
+
+ if (bss_type == IEEE80211_BSS_TYPE_ANY)
+ return ret;
+
+ if (band == IEEE80211_BAND_60GHZ) {
+ mask = WLAN_CAPABILITY_DMG_TYPE_MASK;
+ switch (bss_type) {
+ case IEEE80211_BSS_TYPE_ESS:
+ val = WLAN_CAPABILITY_DMG_TYPE_AP;
+ break;
+ case IEEE80211_BSS_TYPE_PBSS:
+ val = WLAN_CAPABILITY_DMG_TYPE_PBSS;
+ break;
+ case IEEE80211_BSS_TYPE_IBSS:
+ val = WLAN_CAPABILITY_DMG_TYPE_IBSS;
+ break;
+ default:
+ return false;
+ }
+ } else {
+ mask = WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS;
+ switch (bss_type) {
+ case IEEE80211_BSS_TYPE_ESS:
+ val = WLAN_CAPABILITY_ESS;
+ break;
+ case IEEE80211_BSS_TYPE_IBSS:
+ val = WLAN_CAPABILITY_IBSS;
+ break;
+ case IEEE80211_BSS_TYPE_MBSS:
+ val = 0;
+ break;
+ default:
+ return false;
+ }
+ }
+
+ ret = ((capability & mask) == val);
+ return ret;
+}
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
const u8 *ssid, size_t ssid_len,
- u16 capa_mask, u16 capa_val)
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy privacy)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
struct cfg80211_internal_bss *bss, *res = NULL;
unsigned long now = jiffies;
+ int bss_privacy;
- trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask,
- capa_val);
+ trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, bss_type,
+ privacy);
spin_lock_bh(&rdev->bss_lock);
list_for_each_entry(bss, &rdev->bss_list, list) {
- if ((bss->pub.capability & capa_mask) != capa_val)
+ if (!cfg80211_bss_type_match(bss->pub.capability,
+ bss->pub.channel->band, bss_type))
+ continue;
+
+ bss_privacy = (bss->pub.capability & WLAN_CAPABILITY_PRIVACY);
+ if ((privacy == IEEE80211_PRIVACY_ON && !bss_privacy) ||
+ (privacy == IEEE80211_PRIVACY_OFF && bss_privacy))
continue;
if (channel && bss->pub.channel != channel)
continue;
@@ -896,6 +950,7 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
struct cfg80211_bss_ies *ies;
struct ieee80211_channel *channel;
struct cfg80211_internal_bss tmp = {}, *res;
+ int bss_type;
bool signal_valid;
if (WARN_ON(!wiphy))
@@ -950,8 +1005,15 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
if (!res)
return NULL;
- if (res->pub.capability & WLAN_CAPABILITY_ESS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
+ if (channel->band == IEEE80211_BAND_60GHZ) {
+ bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
+ if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
+ bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
+ regulatory_hint_found_beacon(wiphy, channel, gfp);
+ } else {
+ if (res->pub.capability & WLAN_CAPABILITY_ESS)
+ regulatory_hint_found_beacon(wiphy, channel, gfp);
+ }
trace_cfg80211_return_bss(&res->pub);
/* cfg80211_bss_update gives us a referenced result */
@@ -973,6 +1035,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
bool signal_valid;
size_t ielen = len - offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
+ int bss_type;
BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
offsetof(struct ieee80211_mgmt, u.beacon.variable));
@@ -1025,8 +1088,15 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
if (!res)
return NULL;
- if (res->pub.capability & WLAN_CAPABILITY_ESS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
+ if (channel->band == IEEE80211_BAND_60GHZ) {
+ bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
+ if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
+ bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
+ regulatory_hint_found_beacon(wiphy, channel, gfp);
+ } else {
+ if (res->pub.capability & WLAN_CAPABILITY_ESS)
+ regulatory_hint_found_beacon(wiphy, channel, gfp);
+ }
trace_cfg80211_return_bss(&res->pub);
/* cfg80211_bss_update gives us a referenced result */
@@ -1237,17 +1307,17 @@ int cfg80211_wext_siwscan(struct net_device *dev,
kfree(creq);
return err;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan);
+EXPORT_WEXT_HANDLER(cfg80211_wext_siwscan);
-static void ieee80211_scan_add_ies(struct iw_request_info *info,
- const struct cfg80211_bss_ies *ies,
- char **current_ev, char *end_buf)
+static char *ieee80211_scan_add_ies(struct iw_request_info *info,
+ const struct cfg80211_bss_ies *ies,
+ char *current_ev, char *end_buf)
{
const u8 *pos, *end, *next;
struct iw_event iwe;
if (!ies)
- return;
+ return current_ev;
/*
* If needed, fragment the IEs buffer (at IE boundaries) into short
@@ -1264,10 +1334,11 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info,
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVGENIE;
iwe.u.data.length = next - pos;
- *current_ev = iwe_stream_add_point(info, *current_ev,
- end_buf, &iwe,
- (void *)pos);
-
+ current_ev = iwe_stream_add_point_check(info, current_ev,
+ end_buf, &iwe,
+ (void *)pos);
+ if (IS_ERR(current_ev))
+ return current_ev;
pos = next;
}
@@ -1275,10 +1346,14 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info,
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVGENIE;
iwe.u.data.length = end - pos;
- *current_ev = iwe_stream_add_point(info, *current_ev,
- end_buf, &iwe,
- (void *)pos);
+ current_ev = iwe_stream_add_point_check(info, current_ev,
+ end_buf, &iwe,
+ (void *)pos);
+ if (IS_ERR(current_ev))
+ return current_ev;
}
+
+ return current_ev;
}
static char *
@@ -1289,7 +1364,8 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
const struct cfg80211_bss_ies *ies;
struct iw_event iwe;
const u8 *ie;
- u8 *buf, *cfg, *p;
+ u8 buf[50];
+ u8 *cfg, *p, *tmp;
int rem, i, sig;
bool ismesh = false;
@@ -1297,22 +1373,28 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, bss->pub.bssid, ETH_ALEN);
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_ADDR_LEN);
+ current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe,
+ IW_EV_ADDR_LEN);
+ if (IS_ERR(current_ev))
+ return current_ev;
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = ieee80211_frequency_to_channel(bss->pub.channel->center_freq);
iwe.u.freq.e = 0;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_FREQ_LEN);
+ current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe,
+ IW_EV_FREQ_LEN);
+ if (IS_ERR(current_ev))
+ return current_ev;
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = bss->pub.channel->center_freq;
iwe.u.freq.e = 6;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_FREQ_LEN);
+ current_ev = iwe_stream_add_event_check(info, current_ev, end_buf, &iwe,
+ IW_EV_FREQ_LEN);
+ if (IS_ERR(current_ev))
+ return current_ev;
if (wiphy->signal_type != CFG80211_SIGNAL_TYPE_NONE) {
memset(&iwe, 0, sizeof(iwe));
@@ -1341,8 +1423,11 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
/* not reached */
break;
}
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_QUAL_LEN);
+ current_ev = iwe_stream_add_event_check(info, current_ev,
+ end_buf, &iwe,
+ IW_EV_QUAL_LEN);
+ if (IS_ERR(current_ev))
+ return current_ev;
}
memset(&iwe, 0, sizeof(iwe));
@@ -1352,8 +1437,10 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, "");
+ current_ev = iwe_stream_add_point_check(info, current_ev, end_buf,
+ &iwe, "");
+ if (IS_ERR(current_ev))
+ return current_ev;
rcu_read_lock();
ies = rcu_dereference(bss->pub.ies);
@@ -1371,66 +1458,91 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
iwe.cmd = SIOCGIWESSID;
iwe.u.data.length = ie[1];
iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, (u8 *)ie + 2);
+ current_ev = iwe_stream_add_point_check(info,
+ current_ev,
+ end_buf, &iwe,
+ (u8 *)ie + 2);
+ if (IS_ERR(current_ev))
+ goto unlock;
break;
case WLAN_EID_MESH_ID:
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWESSID;
iwe.u.data.length = ie[1];
iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, (u8 *)ie + 2);
+ current_ev = iwe_stream_add_point_check(info,
+ current_ev,
+ end_buf, &iwe,
+ (u8 *)ie + 2);
+ if (IS_ERR(current_ev))
+ goto unlock;
break;
case WLAN_EID_MESH_CONFIG:
ismesh = true;
if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
break;
- buf = kmalloc(50, GFP_ATOMIC);
- if (!buf)
- break;
cfg = (u8 *)ie + 2;
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
sprintf(buf, "Mesh Network Path Selection Protocol ID: "
"0x%02X", cfg[0]);
iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf,
- &iwe, buf);
+ current_ev = iwe_stream_add_point_check(info,
+ current_ev,
+ end_buf,
+ &iwe, buf);
+ if (IS_ERR(current_ev))
+ goto unlock;
sprintf(buf, "Path Selection Metric ID: 0x%02X",
cfg[1]);
iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf,
- &iwe, buf);
+ current_ev = iwe_stream_add_point_check(info,
+ current_ev,
+ end_buf,
+ &iwe, buf);
+ if (IS_ERR(current_ev))
+ goto unlock;
sprintf(buf, "Congestion Control Mode ID: 0x%02X",
cfg[2]);
iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf,
- &iwe, buf);
+ current_ev = iwe_stream_add_point_check(info,
+ current_ev,
+ end_buf,
+ &iwe, buf);
+ if (IS_ERR(current_ev))
+ goto unlock;
sprintf(buf, "Synchronization ID: 0x%02X", cfg[3]);
iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf,
- &iwe, buf);
+ current_ev = iwe_stream_add_point_check(info,
+ current_ev,
+ end_buf,
+ &iwe, buf);
+ if (IS_ERR(current_ev))
+ goto unlock;
sprintf(buf, "Authentication ID: 0x%02X", cfg[4]);
iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf,
- &iwe, buf);
+ current_ev = iwe_stream_add_point_check(info,
+ current_ev,
+ end_buf,
+ &iwe, buf);
+ if (IS_ERR(current_ev))
+ goto unlock;
sprintf(buf, "Formation Info: 0x%02X", cfg[5]);
iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf,
- &iwe, buf);
+ current_ev = iwe_stream_add_point_check(info,
+ current_ev,
+ end_buf,
+ &iwe, buf);
+ if (IS_ERR(current_ev))
+ goto unlock;
sprintf(buf, "Capabilities: 0x%02X", cfg[6]);
iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf,
- &iwe, buf);
- kfree(buf);
+ current_ev = iwe_stream_add_point_check(info,
+ current_ev,
+ end_buf,
+ &iwe, buf);
+ if (IS_ERR(current_ev))
+ goto unlock;
break;
case WLAN_EID_SUPP_RATES:
case WLAN_EID_EXT_SUPP_RATES:
@@ -1445,8 +1557,14 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
for (i = 0; i < ie[1]; i++) {
iwe.u.bitrate.value =
((ie[i + 2] & 0x7f) * 500000);
+ tmp = p;
p = iwe_stream_add_value(info, current_ev, p,
- end_buf, &iwe, IW_EV_PARAM_LEN);
+ end_buf, &iwe,
+ IW_EV_PARAM_LEN);
+ if (p == tmp) {
+ current_ev = ERR_PTR(-E2BIG);
+ goto unlock;
+ }
}
current_ev = p;
break;
@@ -1465,31 +1583,35 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_UINT_LEN);
- }
-
- buf = kmalloc(31, GFP_ATOMIC);
- if (buf) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "tsf=%016llx", (unsigned long long)(ies->tsf));
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, buf);
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, " Last beacon: %ums ago",
- elapsed_jiffies_msecs(bss->ts));
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf, &iwe, buf);
- kfree(buf);
+ current_ev = iwe_stream_add_event_check(info, current_ev,
+ end_buf, &iwe,
+ IW_EV_UINT_LEN);
+ if (IS_ERR(current_ev))
+ goto unlock;
}
- ieee80211_scan_add_ies(info, ies, &current_ev, end_buf);
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ sprintf(buf, "tsf=%016llx", (unsigned long long)(ies->tsf));
+ iwe.u.data.length = strlen(buf);
+ current_ev = iwe_stream_add_point_check(info, current_ev, end_buf,
+ &iwe, buf);
+ if (IS_ERR(current_ev))
+ goto unlock;
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ sprintf(buf, " Last beacon: %ums ago",
+ elapsed_jiffies_msecs(bss->ts));
+ iwe.u.data.length = strlen(buf);
+ current_ev = iwe_stream_add_point_check(info, current_ev,
+ end_buf, &iwe, buf);
+ if (IS_ERR(current_ev))
+ goto unlock;
+
+ current_ev = ieee80211_scan_add_ies(info, ies, current_ev, end_buf);
+
+ unlock:
rcu_read_unlock();
-
return current_ev;
}
@@ -1501,19 +1623,27 @@ static int ieee80211_scan_results(struct cfg80211_registered_device *rdev,
char *current_ev = buf;
char *end_buf = buf + len;
struct cfg80211_internal_bss *bss;
+ int err = 0;
spin_lock_bh(&rdev->bss_lock);
cfg80211_bss_expire(rdev);
list_for_each_entry(bss, &rdev->bss_list, list) {
if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
- spin_unlock_bh(&rdev->bss_lock);
- return -E2BIG;
+ err = -E2BIG;
+ break;
}
current_ev = ieee80211_bss(&rdev->wiphy, info, bss,
current_ev, end_buf);
+ if (IS_ERR(current_ev)) {
+ err = PTR_ERR(current_ev);
+ break;
+ }
}
spin_unlock_bh(&rdev->bss_lock);
+
+ if (err)
+ return err;
return current_ev - buf;
}
@@ -1545,5 +1675,5 @@ int cfg80211_wext_giwscan(struct net_device *dev,
return res;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwscan);
#endif
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 0ab3711c79a0..d11454f87bac 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -42,7 +42,7 @@ struct cfg80211_conn {
CFG80211_CONN_CONNECTED,
} state;
u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
- u8 *ie;
+ const u8 *ie;
size_t ie_len;
bool auto_auth, prev_bssid_valid;
};
@@ -257,19 +257,15 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_bss *bss;
- u16 capa = WLAN_CAPABILITY_ESS;
ASSERT_WDEV_LOCK(wdev);
- if (wdev->conn->params.privacy)
- capa |= WLAN_CAPABILITY_PRIVACY;
-
bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
wdev->conn->params.bssid,
wdev->conn->params.ssid,
wdev->conn->params.ssid_len,
- WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
- capa);
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY(wdev->conn->params.privacy));
if (!bss)
return NULL;
@@ -427,6 +423,62 @@ void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
schedule_work(&rdev->conn_work);
}
+static int cfg80211_sme_get_conn_ies(struct wireless_dev *wdev,
+ const u8 *ies, size_t ies_len,
+ const u8 **out_ies, size_t *out_ies_len)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ u8 *buf;
+ size_t offs;
+
+ if (!rdev->wiphy.extended_capabilities_len ||
+ (ies && cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, ies, ies_len))) {
+ *out_ies = kmemdup(ies, ies_len, GFP_KERNEL);
+ if (!*out_ies)
+ return -ENOMEM;
+ *out_ies_len = ies_len;
+ return 0;
+ }
+
+ buf = kmalloc(ies_len + rdev->wiphy.extended_capabilities_len + 2,
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (ies_len) {
+ static const u8 before_extcapa[] = {
+ /* not listing IEs expected to be created by driver */
+ WLAN_EID_RSN,
+ WLAN_EID_QOS_CAPA,
+ WLAN_EID_RRM_ENABLED_CAPABILITIES,
+ WLAN_EID_MOBILITY_DOMAIN,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ WLAN_EID_BSS_COEX_2040,
+ };
+
+ offs = ieee80211_ie_split(ies, ies_len, before_extcapa,
+ ARRAY_SIZE(before_extcapa), 0);
+ memcpy(buf, ies, offs);
+ /* leave a whole for extended capabilities IE */
+ memcpy(buf + offs + rdev->wiphy.extended_capabilities_len + 2,
+ ies + offs, ies_len - offs);
+ } else {
+ offs = 0;
+ }
+
+ /* place extended capabilities IE (with only driver capabilities) */
+ buf[offs] = WLAN_EID_EXT_CAPABILITY;
+ buf[offs + 1] = rdev->wiphy.extended_capabilities_len;
+ memcpy(buf + offs + 2,
+ rdev->wiphy.extended_capabilities,
+ rdev->wiphy.extended_capabilities_len);
+
+ *out_ies = buf;
+ *out_ies_len = ies_len + rdev->wiphy.extended_capabilities_len + 2;
+
+ return 0;
+}
+
static int cfg80211_sme_connect(struct wireless_dev *wdev,
struct cfg80211_connect_params *connect,
const u8 *prev_bssid)
@@ -457,16 +509,14 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN);
}
- if (connect->ie) {
- wdev->conn->ie = kmemdup(connect->ie, connect->ie_len,
- GFP_KERNEL);
- wdev->conn->params.ie = wdev->conn->ie;
- if (!wdev->conn->ie) {
- kfree(wdev->conn);
- wdev->conn = NULL;
- return -ENOMEM;
- }
+ if (cfg80211_sme_get_conn_ies(wdev, connect->ie, connect->ie_len,
+ &wdev->conn->ie,
+ &wdev->conn->params.ie_len)) {
+ kfree(wdev->conn);
+ wdev->conn = NULL;
+ return -ENOMEM;
}
+ wdev->conn->params.ie = wdev->conn->ie;
if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) {
wdev->conn->auto_auth = true;
@@ -637,8 +687,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
wdev->ssid, wdev->ssid_len,
- WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
if (bss)
cfg80211_hold_bss(bss_from_pub(bss));
}
@@ -795,8 +845,8 @@ void cfg80211_roamed(struct net_device *dev,
struct cfg80211_bss *bss;
bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
- wdev->ssid_len, WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
+ wdev->ssid_len,
+ IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
if (WARN_ON(!bss))
return;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index b17b3692f8c2..af3617c9879e 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -7,6 +7,7 @@
#include <linux/tracepoint.h>
#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
#include <net/cfg80211.h>
#include "core.h"
@@ -15,7 +16,7 @@
if (given_mac) \
memcpy(__entry->entry_mac, given_mac, ETH_ALEN); \
else \
- memset(__entry->entry_mac, 0, ETH_ALEN); \
+ eth_zero_addr(__entry->entry_mac); \
} while (0)
#define MAC_PR_FMT "%pM"
#define MAC_PR_ARG(entry_mac) (__entry->entry_mac)
@@ -627,6 +628,7 @@ DECLARE_EVENT_CLASS(station_add_change,
__field(u8, plink_state)
__field(u8, uapsd_queues)
__array(u8, ht_capa, (int)sizeof(struct ieee80211_ht_cap))
+ __array(char, vlan, IFNAMSIZ)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -644,16 +646,19 @@ DECLARE_EVENT_CLASS(station_add_change,
if (params->ht_capa)
memcpy(__entry->ht_capa, params->ht_capa,
sizeof(struct ieee80211_ht_cap));
+ memset(__entry->vlan, 0, sizeof(__entry->vlan));
+ if (params->vlan)
+ memcpy(__entry->vlan, params->vlan->name, IFNAMSIZ);
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
", station flags mask: %u, station flags set: %u, "
"station modify mask: %u, listen interval: %d, aid: %u, "
- "plink action: %u, plink state: %u, uapsd queues: %u",
+ "plink action: %u, plink state: %u, uapsd queues: %u, vlan:%s",
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
__entry->sta_flags_mask, __entry->sta_flags_set,
__entry->sta_modify_mask, __entry->listen_interval,
__entry->aid, __entry->plink_action, __entry->plink_state,
- __entry->uapsd_queues)
+ __entry->uapsd_queues, __entry->vlan)
);
DEFINE_EVENT(station_add_change, rdev_add_station,
@@ -1077,7 +1082,7 @@ TRACE_EVENT(rdev_auth,
if (req->bss)
MAC_ASSIGN(bssid, req->bss->bssid);
else
- memset(__entry->bssid, 0, ETH_ALEN);
+ eth_zero_addr(__entry->bssid);
__entry->auth_type = req->auth_type;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: " MAC_PR_FMT,
@@ -1103,7 +1108,7 @@ TRACE_EVENT(rdev_assoc,
if (req->bss)
MAC_ASSIGN(bssid, req->bss->bssid);
else
- memset(__entry->bssid, 0, ETH_ALEN);
+ eth_zero_addr(__entry->bssid);
MAC_ASSIGN(prev_bssid, req->prev_bssid);
__entry->use_mfp = req->use_mfp;
__entry->flags = req->flags;
@@ -1153,7 +1158,7 @@ TRACE_EVENT(rdev_disassoc,
if (req->bss)
MAC_ASSIGN(bssid, req->bss->bssid);
else
- memset(__entry->bssid, 0, ETH_ALEN);
+ eth_zero_addr(__entry->bssid);
__entry->reason_code = req->reason_code;
__entry->local_state_change = req->local_state_change;
),
@@ -2636,28 +2641,30 @@ DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_stopped,
TRACE_EVENT(cfg80211_get_bss,
TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
const u8 *bssid, const u8 *ssid, size_t ssid_len,
- u16 capa_mask, u16 capa_val),
- TP_ARGS(wiphy, channel, bssid, ssid, ssid_len, capa_mask, capa_val),
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy privacy),
+ TP_ARGS(wiphy, channel, bssid, ssid, ssid_len, bss_type, privacy),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_ENTRY
MAC_ENTRY(bssid)
__dynamic_array(u8, ssid, ssid_len)
- __field(u16, capa_mask)
- __field(u16, capa_val)
+ __field(enum ieee80211_bss_type, bss_type)
+ __field(enum ieee80211_privacy, privacy)
),
TP_fast_assign(
WIPHY_ASSIGN;
CHAN_ASSIGN(channel);
MAC_ASSIGN(bssid, bssid);
memcpy(__get_dynamic_array(ssid), ssid, ssid_len);
- __entry->capa_mask = capa_mask;
- __entry->capa_val = capa_val;
- ),
- TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", " MAC_PR_FMT ", buf: %#.2x, "
- "capa_mask: %d, capa_val: %u", WIPHY_PR_ARG, CHAN_PR_ARG,
- MAC_PR_ARG(bssid), ((u8 *)__get_dynamic_array(ssid))[0],
- __entry->capa_mask, __entry->capa_val)
+ __entry->bss_type = bss_type;
+ __entry->privacy = privacy;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", " MAC_PR_FMT
+ ", buf: %#.2x, bss_type: %d, privacy: %d",
+ WIPHY_PR_ARG, CHAN_PR_ARG, MAC_PR_ARG(bssid),
+ ((u8 *)__get_dynamic_array(ssid))[0], __entry->bss_type,
+ __entry->privacy)
);
TRACE_EVENT(cfg80211_inform_bss_width_frame,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 6903dbdcb8c1..70051ab52f4f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1290,12 +1290,54 @@ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
}
EXPORT_SYMBOL(cfg80211_get_p2p_attr);
+static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
+{
+ int i;
+
+ for (i = 0; i < n_ids; i++)
+ if (ids[i] == id)
+ return true;
+ return false;
+}
+
+size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids,
+ const u8 *after_ric, int n_after_ric,
+ size_t offset)
+{
+ size_t pos = offset;
+
+ while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos])) {
+ if (ies[pos] == WLAN_EID_RIC_DATA && n_after_ric) {
+ pos += 2 + ies[pos + 1];
+
+ while (pos < ielen &&
+ !ieee80211_id_in_list(after_ric, n_after_ric,
+ ies[pos]))
+ pos += 2 + ies[pos + 1];
+ } else {
+ pos += 2 + ies[pos + 1];
+ }
+ }
+
+ return pos;
+}
+EXPORT_SYMBOL(ieee80211_ie_split_ric);
+
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset)
+{
+ return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset);
+}
+EXPORT_SYMBOL(ieee80211_ie_split);
+
bool ieee80211_operating_class_to_band(u8 operating_class,
enum ieee80211_band *band)
{
switch (operating_class) {
case 112:
case 115 ... 127:
+ case 128 ... 130:
*band = IEEE80211_BAND_5GHZ;
return true;
case 81:
@@ -1313,6 +1355,135 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
}
EXPORT_SYMBOL(ieee80211_operating_class_to_band);
+bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
+ u8 *op_class)
+{
+ u8 vht_opclass;
+ u16 freq = chandef->center_freq1;
+
+ if (freq >= 2412 && freq <= 2472) {
+ if (chandef->width > NL80211_CHAN_WIDTH_40)
+ return false;
+
+ /* 2.407 GHz, channels 1..13 */
+ if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ if (freq > chandef->chan->center_freq)
+ *op_class = 83; /* HT40+ */
+ else
+ *op_class = 84; /* HT40- */
+ } else {
+ *op_class = 81;
+ }
+
+ return true;
+ }
+
+ if (freq == 2484) {
+ if (chandef->width > NL80211_CHAN_WIDTH_40)
+ return false;
+
+ *op_class = 82; /* channel 14 */
+ return true;
+ }
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_80:
+ vht_opclass = 128;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ vht_opclass = 129;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ vht_opclass = 130;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_5:
+ return false; /* unsupported for now */
+ default:
+ vht_opclass = 0;
+ break;
+ }
+
+ /* 5 GHz, channels 36..48 */
+ if (freq >= 5180 && freq <= 5240) {
+ if (vht_opclass) {
+ *op_class = vht_opclass;
+ } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ if (freq > chandef->chan->center_freq)
+ *op_class = 116;
+ else
+ *op_class = 117;
+ } else {
+ *op_class = 115;
+ }
+
+ return true;
+ }
+
+ /* 5 GHz, channels 52..64 */
+ if (freq >= 5260 && freq <= 5320) {
+ if (vht_opclass) {
+ *op_class = vht_opclass;
+ } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ if (freq > chandef->chan->center_freq)
+ *op_class = 119;
+ else
+ *op_class = 120;
+ } else {
+ *op_class = 118;
+ }
+
+ return true;
+ }
+
+ /* 5 GHz, channels 100..144 */
+ if (freq >= 5500 && freq <= 5720) {
+ if (vht_opclass) {
+ *op_class = vht_opclass;
+ } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ if (freq > chandef->chan->center_freq)
+ *op_class = 122;
+ else
+ *op_class = 123;
+ } else {
+ *op_class = 121;
+ }
+
+ return true;
+ }
+
+ /* 5 GHz, channels 149..169 */
+ if (freq >= 5745 && freq <= 5845) {
+ if (vht_opclass) {
+ *op_class = vht_opclass;
+ } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ if (freq > chandef->chan->center_freq)
+ *op_class = 126;
+ else
+ *op_class = 127;
+ } else if (freq <= 5805) {
+ *op_class = 124;
+ } else {
+ *op_class = 125;
+ }
+
+ return true;
+ }
+
+ /* 56.16 GHz, channel 1..4 */
+ if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 4) {
+ if (chandef->width >= NL80211_CHAN_WIDTH_40)
+ return false;
+
+ *op_class = 180;
+ return true;
+ }
+
+ /* not supported yet */
+ return false;
+}
+EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
+
int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
u32 beacon_int)
{
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 5b24d39d7903..fff1bef6ed6d 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -63,7 +63,7 @@ int cfg80211_wext_giwname(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwname);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwname);
int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
u32 *mode, char *extra)
@@ -99,7 +99,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
return cfg80211_change_iface(rdev, dev, type, NULL, &vifparams);
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwmode);
+EXPORT_WEXT_HANDLER(cfg80211_wext_siwmode);
int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
u32 *mode, char *extra)
@@ -134,7 +134,7 @@ int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
}
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwmode);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwmode);
int cfg80211_wext_giwrange(struct net_device *dev,
@@ -248,7 +248,7 @@ int cfg80211_wext_giwrange(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwrange);
/**
@@ -303,7 +303,7 @@ int cfg80211_wext_siwrts(struct net_device *dev,
return err;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwrts);
+EXPORT_WEXT_HANDLER(cfg80211_wext_siwrts);
int cfg80211_wext_giwrts(struct net_device *dev,
struct iw_request_info *info,
@@ -317,7 +317,7 @@ int cfg80211_wext_giwrts(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwrts);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwrts);
int cfg80211_wext_siwfrag(struct net_device *dev,
struct iw_request_info *info,
@@ -343,7 +343,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
return err;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwfrag);
+EXPORT_WEXT_HANDLER(cfg80211_wext_siwfrag);
int cfg80211_wext_giwfrag(struct net_device *dev,
struct iw_request_info *info,
@@ -357,7 +357,7 @@ int cfg80211_wext_giwfrag(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwfrag);
static int cfg80211_wext_siwretry(struct net_device *dev,
struct iw_request_info *info,
@@ -427,7 +427,7 @@ int cfg80211_wext_giwretry(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwretry);
+EXPORT_WEXT_HANDLER(cfg80211_wext_giwretry);
static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool pairwise,
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
index ebcacca2f731..94c7405a5413 100644
--- a/net/wireless/wext-compat.h
+++ b/net/wireless/wext-compat.h
@@ -4,6 +4,12 @@
#include <net/iw_handler.h>
#include <linux/wireless.h>
+#ifdef CONFIG_CFG80211_WEXT_EXPORT
+#define EXPORT_WEXT_HANDLER(h) EXPORT_SYMBOL_GPL(h)
+#else
+#define EXPORT_WEXT_HANDLER(h)
+#endif /* CONFIG_CFG80211_WEXT_EXPORT */
+
int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *freq, char *extra);
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 368611c05739..a4e8af3321d2 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -322,7 +322,7 @@ int cfg80211_mgd_wext_giwap(struct net_device *dev,
if (wdev->current_bss)
memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
else
- memset(ap_addr->sa_data, 0, ETH_ALEN);
+ eth_zero_addr(ap_addr->sa_data);
wdev_unlock(wdev);
return 0;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d9149b68b9bc..c3ab230e4493 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1077,8 +1077,7 @@ out_clear_request:
goto out;
}
-static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
+static int x25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct x25_sock *x25 = x25_sk(sk);
@@ -1252,8 +1251,7 @@ out_kfree_skb:
}
-static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size,
+static int x25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 85d1d4764612..526c4feb3b50 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -238,11 +238,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
skb->sp->xvec[skb->sp->len++] = x;
- if (xfrm_tunnel_check(skb, x, family)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
- goto drop;
- }
-
spin_lock(&x->lock);
if (unlikely(x->km.state == XFRM_STATE_ACQ)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
@@ -271,6 +266,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
spin_unlock(&x->lock);
+ if (xfrm_tunnel_check(skb, x, family)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
+ goto drop;
+ }
+
seq_hi = htonl(xfrm_replay_seqhi(x, seq));
XFRM_SKB_CB(skb)->seq.input.low = seq;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 7c532856b398..fbcedbe33190 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -19,7 +19,7 @@
#include <net/dst.h>
#include <net/xfrm.h>
-static int xfrm_output2(struct sk_buff *skb);
+static int xfrm_output2(struct sock *sk, struct sk_buff *skb);
static int xfrm_skb_check_space(struct sk_buff *skb)
{
@@ -130,7 +130,7 @@ int xfrm_output_resume(struct sk_buff *skb, int err)
return dst_output(skb);
err = nf_hook(skb_dst(skb)->ops->family,
- NF_INET_POST_ROUTING, skb,
+ NF_INET_POST_ROUTING, skb->sk, skb,
NULL, skb_dst(skb)->dev, xfrm_output2);
if (unlikely(err != 1))
goto out;
@@ -144,12 +144,12 @@ out:
}
EXPORT_SYMBOL_GPL(xfrm_output_resume);
-static int xfrm_output2(struct sk_buff *skb)
+static int xfrm_output2(struct sock *sk, struct sk_buff *skb)
{
return xfrm_output_resume(skb, 1);
}
-static int xfrm_output_gso(struct sk_buff *skb)
+static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff *segs;
@@ -165,7 +165,7 @@ static int xfrm_output_gso(struct sk_buff *skb)
int err;
segs->next = NULL;
- err = xfrm_output2(segs);
+ err = xfrm_output2(sk, segs);
if (unlikely(err)) {
kfree_skb_list(nskb);
@@ -178,13 +178,13 @@ static int xfrm_output_gso(struct sk_buff *skb)
return 0;
}
-int xfrm_output(struct sk_buff *skb)
+int xfrm_output(struct sock *sk, struct sk_buff *skb)
{
struct net *net = dev_net(skb_dst(skb)->dev);
int err;
if (skb_is_gso(skb))
- return xfrm_output_gso(skb);
+ return xfrm_output_gso(sk, skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
err = skb_checksum_help(skb);
@@ -195,7 +195,7 @@ int xfrm_output(struct sk_buff *skb)
}
}
- return xfrm_output2(skb);
+ return xfrm_output2(sk, skb);
}
EXPORT_SYMBOL_GPL(xfrm_output);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index de971b6d38c5..f5e39e35d73a 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1043,12 +1043,12 @@ static struct xfrm_state *__find_acq_core(struct net *net,
break;
case AF_INET6:
- *(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr;
- *(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr;
+ x->sel.daddr.in6 = daddr->in6;
+ x->sel.saddr.in6 = saddr->in6;
x->sel.prefixlen_d = 128;
x->sel.prefixlen_s = 128;
- *(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr;
- *(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr;
+ x->props.saddr.in6 = saddr->in6;
+ x->id.daddr.in6 = daddr->in6;
break;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7de2ed9ec46d..2091664295ba 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2423,6 +2423,11 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
const struct xfrm_link *link;
int type, err;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ return -ENOTSUPP;
+#endif
+
type = nlh->nlmsg_type;
if (type > XFRM_MSG_MAX)
return -EINVAL;
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index b5b3600dcdf5..76e3458a5419 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -6,23 +6,40 @@ hostprogs-y := test_verifier test_maps
hostprogs-y += sock_example
hostprogs-y += sockex1
hostprogs-y += sockex2
+hostprogs-y += tracex1
+hostprogs-y += tracex2
+hostprogs-y += tracex3
+hostprogs-y += tracex4
test_verifier-objs := test_verifier.o libbpf.o
test_maps-objs := test_maps.o libbpf.o
sock_example-objs := sock_example.o libbpf.o
sockex1-objs := bpf_load.o libbpf.o sockex1_user.o
sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
+tracex1-objs := bpf_load.o libbpf.o tracex1_user.o
+tracex2-objs := bpf_load.o libbpf.o tracex2_user.o
+tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
+tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
# Tell kbuild to always build the programs
always := $(hostprogs-y)
always += sockex1_kern.o
always += sockex2_kern.o
+always += tracex1_kern.o
+always += tracex2_kern.o
+always += tracex3_kern.o
+always += tracex4_kern.o
+always += tcbpf1_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include
HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
HOSTLOADLIBES_sockex1 += -lelf
HOSTLOADLIBES_sockex2 += -lelf
+HOSTLOADLIBES_tracex1 += -lelf
+HOSTLOADLIBES_tracex2 += -lelf
+HOSTLOADLIBES_tracex3 += -lelf
+HOSTLOADLIBES_tracex4 += -lelf -lrt
# point this to your LLVM backend with bpf support
LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index ca0333146006..f960b5fb3ed8 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -15,6 +15,12 @@ static int (*bpf_map_update_elem)(void *map, void *key, void *value,
(void *) BPF_FUNC_map_update_elem;
static int (*bpf_map_delete_elem)(void *map, void *key) =
(void *) BPF_FUNC_map_delete_elem;
+static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) =
+ (void *) BPF_FUNC_probe_read;
+static unsigned long long (*bpf_ktime_get_ns)(void) =
+ (void *) BPF_FUNC_ktime_get_ns;
+static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
+ (void *) BPF_FUNC_trace_printk;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
@@ -37,4 +43,11 @@ struct bpf_map_def {
unsigned int max_entries;
};
+static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
+ (void *) BPF_FUNC_skb_store_bytes;
+static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
+ (void *) BPF_FUNC_l3_csum_replace;
+static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
+ (void *) BPF_FUNC_l4_csum_replace;
+
#endif
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 1831d236382b..38dac5a53b51 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -8,29 +8,70 @@
#include <unistd.h>
#include <string.h>
#include <stdbool.h>
+#include <stdlib.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/perf_event.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <poll.h>
#include "libbpf.h"
#include "bpf_helpers.h"
#include "bpf_load.h"
+#define DEBUGFS "/sys/kernel/debug/tracing/"
+
static char license[128];
+static int kern_version;
static bool processed_sec[128];
int map_fd[MAX_MAPS];
int prog_fd[MAX_PROGS];
+int event_fd[MAX_PROGS];
int prog_cnt;
static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
{
- int fd;
bool is_socket = strncmp(event, "socket", 6) == 0;
-
- if (!is_socket)
- /* tracing events tbd */
+ bool is_kprobe = strncmp(event, "kprobe/", 7) == 0;
+ bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
+ enum bpf_prog_type prog_type;
+ char buf[256];
+ int fd, efd, err, id;
+ struct perf_event_attr attr = {};
+
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+
+ if (is_socket) {
+ prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ } else if (is_kprobe || is_kretprobe) {
+ prog_type = BPF_PROG_TYPE_KPROBE;
+ } else {
+ printf("Unknown event '%s'\n", event);
return -1;
+ }
+
+ if (is_kprobe || is_kretprobe) {
+ if (is_kprobe)
+ event += 7;
+ else
+ event += 10;
+
+ snprintf(buf, sizeof(buf),
+ "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
+ is_kprobe ? 'p' : 'r', event, event);
+ err = system(buf);
+ if (err < 0) {
+ printf("failed to create kprobe '%s' error '%s'\n",
+ event, strerror(errno));
+ return -1;
+ }
+ }
- fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER,
- prog, size, license);
+ fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
if (fd < 0) {
printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
@@ -39,6 +80,41 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
prog_fd[prog_cnt++] = fd;
+ if (is_socket)
+ return 0;
+
+ strcpy(buf, DEBUGFS);
+ strcat(buf, "events/kprobes/");
+ strcat(buf, event);
+ strcat(buf, "/id");
+
+ efd = open(buf, O_RDONLY, 0);
+ if (efd < 0) {
+ printf("failed to open event %s\n", event);
+ return -1;
+ }
+
+ err = read(efd, buf, sizeof(buf));
+ if (err < 0 || err >= sizeof(buf)) {
+ printf("read from '%s' failed '%s'\n", event, strerror(errno));
+ return -1;
+ }
+
+ close(efd);
+
+ buf[err] = 0;
+ id = atoi(buf);
+ attr.config = id;
+
+ efd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
+ if (efd < 0) {
+ printf("event %d fd %d err %s\n", id, efd, strerror(errno));
+ return -1;
+ }
+ event_fd[prog_cnt - 1] = efd;
+ ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
+ ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
+
return 0;
}
@@ -135,6 +211,9 @@ int load_bpf_file(char *path)
if (gelf_getehdr(elf, &ehdr) != &ehdr)
return 1;
+ /* clear all kprobes */
+ i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
+
/* scan over all elf sections to get license and map info */
for (i = 1; i < ehdr.e_shnum; i++) {
@@ -149,6 +228,14 @@ int load_bpf_file(char *path)
if (strcmp(shname, "license") == 0) {
processed_sec[i] = true;
memcpy(license, data->d_buf, data->d_size);
+ } else if (strcmp(shname, "version") == 0) {
+ processed_sec[i] = true;
+ if (data->d_size != sizeof(int)) {
+ printf("invalid size of version section %zd\n",
+ data->d_size);
+ return 1;
+ }
+ memcpy(&kern_version, data->d_buf, sizeof(int));
} else if (strcmp(shname, "maps") == 0) {
processed_sec[i] = true;
if (load_maps(data->d_buf, data->d_size))
@@ -178,7 +265,8 @@ int load_bpf_file(char *path)
if (parse_relo_and_apply(data, symbols, &shdr, insns))
continue;
- if (memcmp(shname_prog, "events/", 7) == 0 ||
+ if (memcmp(shname_prog, "kprobe/", 7) == 0 ||
+ memcmp(shname_prog, "kretprobe/", 10) == 0 ||
memcmp(shname_prog, "socket", 6) == 0)
load_and_attach(shname_prog, insns, data_prog->d_size);
}
@@ -193,7 +281,8 @@ int load_bpf_file(char *path)
if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
continue;
- if (memcmp(shname, "events/", 7) == 0 ||
+ if (memcmp(shname, "kprobe/", 7) == 0 ||
+ memcmp(shname, "kretprobe/", 10) == 0 ||
memcmp(shname, "socket", 6) == 0)
load_and_attach(shname, data->d_buf, data->d_size);
}
@@ -201,3 +290,23 @@ int load_bpf_file(char *path)
close(fd);
return 0;
}
+
+void read_trace_pipe(void)
+{
+ int trace_fd;
+
+ trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
+ if (trace_fd < 0)
+ return;
+
+ while (1) {
+ static char buf[4096];
+ ssize_t sz;
+
+ sz = read(trace_fd, buf, sizeof(buf));
+ if (sz > 0) {
+ buf[sz] = 0;
+ puts(buf);
+ }
+ }
+}
diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h
index 27789a34f5e6..cbd7c2b532b9 100644
--- a/samples/bpf/bpf_load.h
+++ b/samples/bpf/bpf_load.h
@@ -6,6 +6,7 @@
extern int map_fd[MAX_MAPS];
extern int prog_fd[MAX_PROGS];
+extern int event_fd[MAX_PROGS];
/* parses elf file compiled by llvm .c->.o
* . parses 'maps' section and creates maps via BPF syscall
@@ -21,4 +22,6 @@ extern int prog_fd[MAX_PROGS];
*/
int load_bpf_file(char *path);
+void read_trace_pipe(void);
+
#endif
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
index 46d50b7ddf79..7e1efa7e2ed7 100644
--- a/samples/bpf/libbpf.c
+++ b/samples/bpf/libbpf.c
@@ -81,7 +81,7 @@ char bpf_log_buf[LOG_BUF_SIZE];
int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, int prog_len,
- const char *license)
+ const char *license, int kern_version)
{
union bpf_attr attr = {
.prog_type = prog_type,
@@ -93,6 +93,11 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
.log_level = 1,
};
+ /* assign one field outside of struct init to make sure any
+ * padding is zero initialized
+ */
+ attr.kern_version = kern_version;
+
bpf_log_buf[0] = 0;
return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
@@ -121,3 +126,10 @@ int open_raw_sock(const char *name)
return sock;
}
+
+int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
+ int group_fd, unsigned long flags)
+{
+ return syscall(__NR_perf_event_open, attr, pid, cpu,
+ group_fd, flags);
+}
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index 58c5fe1bdba1..7235e292a03b 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -13,7 +13,7 @@ int bpf_get_next_key(int fd, void *key, void *next_key);
int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, int insn_len,
- const char *license);
+ const char *license, int kern_version);
#define LOG_BUF_SIZE 65536
extern char bpf_log_buf[LOG_BUF_SIZE];
@@ -92,7 +92,9 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })
-#define BPF_PSEUDO_MAP_FD 1
+#ifndef BPF_PSEUDO_MAP_FD
+# define BPF_PSEUDO_MAP_FD 1
+#endif
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
@@ -182,4 +184,7 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
/* create RAW socket and bind to interface 'name' */
int open_raw_sock(const char *name);
+struct perf_event_attr;
+int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
+ int group_fd, unsigned long flags);
#endif
diff --git a/samples/bpf/sock_example.c b/samples/bpf/sock_example.c
index c8ad0404416f..a0ce251c5390 100644
--- a/samples/bpf/sock_example.c
+++ b/samples/bpf/sock_example.c
@@ -56,7 +56,7 @@ static int test_sock(void)
};
prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, sizeof(prog),
- "GPL");
+ "GPL", 0);
if (prog_fd < 0) {
printf("failed to load prog '%s'\n", strerror(errno));
goto cleanup;
diff --git a/samples/bpf/sockex1_kern.c b/samples/bpf/sockex1_kern.c
index 066892662915..ed18e9a4909c 100644
--- a/samples/bpf/sockex1_kern.c
+++ b/samples/bpf/sockex1_kern.c
@@ -1,5 +1,6 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include "bpf_helpers.h"
@@ -11,14 +12,17 @@ struct bpf_map_def SEC("maps") my_map = {
};
SEC("socket1")
-int bpf_prog1(struct sk_buff *skb)
+int bpf_prog1(struct __sk_buff *skb)
{
int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
long *value;
+ if (skb->pkt_type != PACKET_OUTGOING)
+ return 0;
+
value = bpf_map_lookup_elem(&my_map, &index);
if (value)
- __sync_fetch_and_add(value, 1);
+ __sync_fetch_and_add(value, skb->len);
return 0;
}
diff --git a/samples/bpf/sockex1_user.c b/samples/bpf/sockex1_user.c
index 34a443ff3831..678ce4693551 100644
--- a/samples/bpf/sockex1_user.c
+++ b/samples/bpf/sockex1_user.c
@@ -40,7 +40,7 @@ int main(int ac, char **argv)
key = IPPROTO_ICMP;
assert(bpf_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0);
- printf("TCP %lld UDP %lld ICMP %lld packets\n",
+ printf("TCP %lld UDP %lld ICMP %lld bytes\n",
tcp_cnt, udp_cnt, icmp_cnt);
sleep(1);
}
diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c
index 6f0135f0f217..ba0e177ff561 100644
--- a/samples/bpf/sockex2_kern.c
+++ b/samples/bpf/sockex2_kern.c
@@ -42,13 +42,13 @@ static inline int proto_ports_offset(__u64 proto)
}
}
-static inline int ip_is_fragment(struct sk_buff *ctx, __u64 nhoff)
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
{
return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
& (IP_MF | IP_OFFSET);
}
-static inline __u32 ipv6_addr_hash(struct sk_buff *ctx, __u64 off)
+static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
{
__u64 w0 = load_word(ctx, off);
__u64 w1 = load_word(ctx, off + 4);
@@ -58,7 +58,7 @@ static inline __u32 ipv6_addr_hash(struct sk_buff *ctx, __u64 off)
return (__u32)(w0 ^ w1 ^ w2 ^ w3);
}
-static inline __u64 parse_ip(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
+static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
struct flow_keys *flow)
{
__u64 verlen;
@@ -82,7 +82,7 @@ static inline __u64 parse_ip(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
return nhoff;
}
-static inline __u64 parse_ipv6(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
+static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
struct flow_keys *flow)
{
*ip_proto = load_byte(skb,
@@ -96,7 +96,7 @@ static inline __u64 parse_ipv6(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto
return nhoff;
}
-static inline bool flow_dissector(struct sk_buff *skb, struct flow_keys *flow)
+static inline bool flow_dissector(struct __sk_buff *skb, struct flow_keys *flow)
{
__u64 nhoff = ETH_HLEN;
__u64 ip_proto;
@@ -183,18 +183,23 @@ static inline bool flow_dissector(struct sk_buff *skb, struct flow_keys *flow)
return true;
}
+struct pair {
+ long packets;
+ long bytes;
+};
+
struct bpf_map_def SEC("maps") hash_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__be32),
- .value_size = sizeof(long),
+ .value_size = sizeof(struct pair),
.max_entries = 1024,
};
SEC("socket2")
-int bpf_prog2(struct sk_buff *skb)
+int bpf_prog2(struct __sk_buff *skb)
{
struct flow_keys flow;
- long *value;
+ struct pair *value;
u32 key;
if (!flow_dissector(skb, &flow))
@@ -203,9 +208,10 @@ int bpf_prog2(struct sk_buff *skb)
key = flow.dst;
value = bpf_map_lookup_elem(&hash_map, &key);
if (value) {
- __sync_fetch_and_add(value, 1);
+ __sync_fetch_and_add(&value->packets, 1);
+ __sync_fetch_and_add(&value->bytes, skb->len);
} else {
- long val = 1;
+ struct pair val = {1, skb->len};
bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY);
}
diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c
index d2d5f5a790d3..29a276d766fc 100644
--- a/samples/bpf/sockex2_user.c
+++ b/samples/bpf/sockex2_user.c
@@ -6,6 +6,11 @@
#include <unistd.h>
#include <arpa/inet.h>
+struct pair {
+ __u64 packets;
+ __u64 bytes;
+};
+
int main(int ac, char **argv)
{
char filename[256];
@@ -29,13 +34,13 @@ int main(int ac, char **argv)
for (i = 0; i < 5; i++) {
int key = 0, next_key;
- long long value;
+ struct pair value;
while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
bpf_lookup_elem(map_fd[0], &next_key, &value);
- printf("ip %s count %lld\n",
+ printf("ip %s bytes %lld packets %lld\n",
inet_ntoa((struct in_addr){htonl(next_key)}),
- value);
+ value.bytes, value.packets);
key = next_key;
}
sleep(1);
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
new file mode 100644
index 000000000000..7c27710f8296
--- /dev/null
+++ b/samples/bpf/tcbpf1_kern.c
@@ -0,0 +1,67 @@
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/filter.h>
+
+#include "bpf_helpers.h"
+
+/* compiler workaround */
+#define _htonl __builtin_bswap32
+
+static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
+{
+ bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1);
+}
+
+#define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check))
+#define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos))
+
+static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
+{
+ __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF);
+
+ bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
+ bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
+}
+
+#define TCP_CSUM_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, check))
+#define IP_SRC_OFF (ETH_HLEN + offsetof(struct iphdr, saddr))
+
+#define IS_PSEUDO 0x10
+
+static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
+{
+ __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF));
+
+ bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
+ bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
+ bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
+}
+
+#define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
+static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
+{
+ __u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF));
+
+ bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
+ bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
+}
+
+SEC("classifier")
+int bpf_prog1(struct __sk_buff *skb)
+{
+ __u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol));
+ long *value;
+
+ if (proto == IPPROTO_TCP) {
+ set_ip_tos(skb, 8);
+ set_tcp_ip_src(skb, 0xA010101);
+ set_tcp_dest_port(skb, 5001);
+ }
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index b96175e90363..12f3780af73f 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -14,6 +14,7 @@
#include <linux/unistd.h>
#include <string.h>
#include <linux/filter.h>
+#include <stddef.h>
#include "libbpf.h"
#define MAX_INSNS 512
@@ -288,7 +289,8 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
/* should be able to access R0 = *(R2 + 8) */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8),
+ /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
@@ -641,6 +643,106 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
},
+ {
+ "access skb fields ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "access skb fields bad1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "access skb fields bad2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {4},
+ .errstr = "different pointers",
+ .result = REJECT,
+ },
+ {
+ "access skb fields bad3",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -12),
+ },
+ .fixup = {6},
+ .errstr = "different pointers",
+ .result = REJECT,
+ },
+ {
+ "access skb fields bad4",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -13),
+ },
+ .fixup = {7},
+ .errstr = "different pointers",
+ .result = REJECT,
+ },
};
static int probe_filter_length(struct bpf_insn *fp)
@@ -687,9 +789,9 @@ static int test(void)
}
printf("#%d %s ", i, tests[i].descr);
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_UNSPEC, prog,
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog,
prog_len * sizeof(struct bpf_insn),
- "GPL");
+ "GPL", 0);
if (tests[i].result == ACCEPT) {
if (prog_fd < 0) {
diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
new file mode 100644
index 000000000000..31620463701a
--- /dev/null
+++ b/samples/bpf/tracex1_kern.c
@@ -0,0 +1,50 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+
+/* kprobe is NOT a stable ABI
+ * kernel functions can be removed, renamed or completely change semantics.
+ * Number of arguments and their positions can change, etc.
+ * In such case this bpf+kprobe example will no longer be meaningful
+ */
+SEC("kprobe/__netif_receive_skb_core")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ /* attaches to kprobe netif_receive_skb,
+ * looks for packets on loobpack device and prints them
+ */
+ char devname[IFNAMSIZ] = {};
+ struct net_device *dev;
+ struct sk_buff *skb;
+ int len;
+
+ /* non-portable! works for the given kernel only */
+ skb = (struct sk_buff *) ctx->di;
+
+ dev = _(skb->dev);
+
+ len = _(skb->len);
+
+ bpf_probe_read(devname, sizeof(devname), dev->name);
+
+ if (devname[0] == 'l' && devname[1] == 'o') {
+ char fmt[] = "skb %p len %d\n";
+ /* using bpf_trace_printk() for DEBUG ONLY */
+ bpf_trace_printk(fmt, sizeof(fmt), skb, len);
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c
new file mode 100644
index 000000000000..31a48183beea
--- /dev/null
+++ b/samples/bpf/tracex1_user.c
@@ -0,0 +1,25 @@
+#include <stdio.h>
+#include <linux/bpf.h>
+#include <unistd.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+int main(int ac, char **argv)
+{
+ FILE *f;
+ char filename[256];
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ f = popen("taskset 1 ping -c5 localhost", "r");
+ (void) f;
+
+ read_trace_pipe();
+
+ return 0;
+}
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
new file mode 100644
index 000000000000..19ec1cfc45db
--- /dev/null
+++ b/samples/bpf/tracex2_kern.c
@@ -0,0 +1,86 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(long),
+ .value_size = sizeof(long),
+ .max_entries = 1024,
+};
+
+/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
+ * example will no longer be meaningful
+ */
+SEC("kprobe/kfree_skb")
+int bpf_prog2(struct pt_regs *ctx)
+{
+ long loc = 0;
+ long init_val = 1;
+ long *value;
+
+ /* x64 specific: read ip of kfree_skb caller.
+ * non-portable version of __builtin_return_address(0)
+ */
+ bpf_probe_read(&loc, sizeof(loc), (void *)ctx->sp);
+
+ value = bpf_map_lookup_elem(&my_map, &loc);
+ if (value)
+ *value += 1;
+ else
+ bpf_map_update_elem(&my_map, &loc, &init_val, BPF_ANY);
+ return 0;
+}
+
+static unsigned int log2(unsigned int v)
+{
+ unsigned int r;
+ unsigned int shift;
+
+ r = (v > 0xFFFF) << 4; v >>= r;
+ shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
+ shift = (v > 0xF) << 2; v >>= shift; r |= shift;
+ shift = (v > 0x3) << 1; v >>= shift; r |= shift;
+ r |= (v >> 1);
+ return r;
+}
+
+static unsigned int log2l(unsigned long v)
+{
+ unsigned int hi = v >> 32;
+ if (hi)
+ return log2(hi) + 32;
+ else
+ return log2(v);
+}
+
+struct bpf_map_def SEC("maps") my_hist_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(long),
+ .max_entries = 64,
+};
+
+SEC("kprobe/sys_write")
+int bpf_prog3(struct pt_regs *ctx)
+{
+ long write_size = ctx->dx; /* arg3 */
+ long init_val = 1;
+ long *value;
+ u32 index = log2l(write_size);
+
+ value = bpf_map_lookup_elem(&my_hist_map, &index);
+ if (value)
+ __sync_fetch_and_add(value, 1);
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
new file mode 100644
index 000000000000..91b8d0896fbb
--- /dev/null
+++ b/samples/bpf/tracex2_user.c
@@ -0,0 +1,95 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define MAX_INDEX 64
+#define MAX_STARS 38
+
+static void stars(char *str, long val, long max, int width)
+{
+ int i;
+
+ for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++)
+ str[i] = '*';
+ if (val > max)
+ str[i - 1] = '+';
+ str[i] = '\0';
+}
+
+static void print_hist(int fd)
+{
+ int key;
+ long value;
+ long data[MAX_INDEX] = {};
+ char starstr[MAX_STARS];
+ int i;
+ int max_ind = -1;
+ long max_value = 0;
+
+ for (key = 0; key < MAX_INDEX; key++) {
+ bpf_lookup_elem(fd, &key, &value);
+ data[key] = value;
+ if (value && key > max_ind)
+ max_ind = key;
+ if (value > max_value)
+ max_value = value;
+ }
+
+ printf(" syscall write() stats\n");
+ printf(" byte_size : count distribution\n");
+ for (i = 1; i <= max_ind + 1; i++) {
+ stars(starstr, data[i - 1], max_value, MAX_STARS);
+ printf("%8ld -> %-8ld : %-8ld |%-*s|\n",
+ (1l << i) >> 1, (1l << i) - 1, data[i - 1],
+ MAX_STARS, starstr);
+ }
+}
+static void int_exit(int sig)
+{
+ print_hist(map_fd[1]);
+ exit(0);
+}
+
+int main(int ac, char **argv)
+{
+ char filename[256];
+ long key, next_key, value;
+ FILE *f;
+ int i;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ signal(SIGINT, int_exit);
+
+ /* start 'ping' in the background to have some kfree_skb events */
+ f = popen("ping -c5 localhost", "r");
+ (void) f;
+
+ /* start 'dd' in the background to have plenty of 'write' syscalls */
+ f = popen("dd if=/dev/zero of=/dev/null count=5000000", "r");
+ (void) f;
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ for (i = 0; i < 5; i++) {
+ key = 0;
+ while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
+ bpf_lookup_elem(map_fd[0], &next_key, &value);
+ printf("location 0x%lx count %ld\n", next_key, value);
+ key = next_key;
+ }
+ if (key)
+ printf("\n");
+ sleep(1);
+ }
+ print_hist(map_fd[1]);
+
+ return 0;
+}
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
new file mode 100644
index 000000000000..255ff2792366
--- /dev/null
+++ b/samples/bpf/tracex3_kern.c
@@ -0,0 +1,89 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(long),
+ .value_size = sizeof(u64),
+ .max_entries = 4096,
+};
+
+/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
+ * example will no longer be meaningful
+ */
+SEC("kprobe/blk_mq_start_request")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ long rq = ctx->di;
+ u64 val = bpf_ktime_get_ns();
+
+ bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
+ return 0;
+}
+
+static unsigned int log2l(unsigned long long n)
+{
+#define S(k) if (n >= (1ull << k)) { i += k; n >>= k; }
+ int i = -(n == 0);
+ S(32); S(16); S(8); S(4); S(2); S(1);
+ return i;
+#undef S
+}
+
+#define SLOTS 100
+
+struct bpf_map_def SEC("maps") lat_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u64),
+ .max_entries = SLOTS,
+};
+
+SEC("kprobe/blk_update_request")
+int bpf_prog2(struct pt_regs *ctx)
+{
+ long rq = ctx->di;
+ u64 *value, l, base;
+ u32 index;
+
+ value = bpf_map_lookup_elem(&my_map, &rq);
+ if (!value)
+ return 0;
+
+ u64 cur_time = bpf_ktime_get_ns();
+ u64 delta = cur_time - *value;
+
+ bpf_map_delete_elem(&my_map, &rq);
+
+ /* the lines below are computing index = log10(delta)*10
+ * using integer arithmetic
+ * index = 29 ~ 1 usec
+ * index = 59 ~ 1 msec
+ * index = 89 ~ 1 sec
+ * index = 99 ~ 10sec or more
+ * log10(x)*10 = log2(x)*10/log2(10) = log2(x)*3
+ */
+ l = log2l(delta);
+ base = 1ll << l;
+ index = (l * 64 + (delta - base) * 64 / base) * 3 / 64;
+
+ if (index >= SLOTS)
+ index = SLOTS - 1;
+
+ value = bpf_map_lookup_elem(&lat_map, &index);
+ if (value)
+ __sync_fetch_and_add((long *)value, 1);
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex3_user.c b/samples/bpf/tracex3_user.c
new file mode 100644
index 000000000000..0aaa933ab938
--- /dev/null
+++ b/samples/bpf/tracex3_user.c
@@ -0,0 +1,150 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+
+#define SLOTS 100
+
+static void clear_stats(int fd)
+{
+ __u32 key;
+ __u64 value = 0;
+
+ for (key = 0; key < SLOTS; key++)
+ bpf_update_elem(fd, &key, &value, BPF_ANY);
+}
+
+const char *color[] = {
+ "\033[48;5;255m",
+ "\033[48;5;252m",
+ "\033[48;5;250m",
+ "\033[48;5;248m",
+ "\033[48;5;246m",
+ "\033[48;5;244m",
+ "\033[48;5;242m",
+ "\033[48;5;240m",
+ "\033[48;5;238m",
+ "\033[48;5;236m",
+ "\033[48;5;234m",
+ "\033[48;5;232m",
+};
+const int num_colors = ARRAY_SIZE(color);
+
+const char nocolor[] = "\033[00m";
+
+const char *sym[] = {
+ " ",
+ " ",
+ ".",
+ ".",
+ "*",
+ "*",
+ "o",
+ "o",
+ "O",
+ "O",
+ "#",
+ "#",
+};
+
+bool full_range = false;
+bool text_only = false;
+
+static void print_banner(void)
+{
+ if (full_range)
+ printf("|1ns |10ns |100ns |1us |10us |100us"
+ " |1ms |10ms |100ms |1s |10s\n");
+ else
+ printf("|1us |10us |100us |1ms |10ms "
+ "|100ms |1s |10s\n");
+}
+
+static void print_hist(int fd)
+{
+ __u32 key;
+ __u64 value;
+ __u64 cnt[SLOTS];
+ __u64 max_cnt = 0;
+ __u64 total_events = 0;
+
+ for (key = 0; key < SLOTS; key++) {
+ value = 0;
+ bpf_lookup_elem(fd, &key, &value);
+ cnt[key] = value;
+ total_events += value;
+ if (value > max_cnt)
+ max_cnt = value;
+ }
+ clear_stats(fd);
+ for (key = full_range ? 0 : 29; key < SLOTS; key++) {
+ int c = num_colors * cnt[key] / (max_cnt + 1);
+
+ if (text_only)
+ printf("%s", sym[c]);
+ else
+ printf("%s %s", color[c], nocolor);
+ }
+ printf(" # %lld\n", total_events);
+}
+
+int main(int ac, char **argv)
+{
+ char filename[256];
+ int i;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ for (i = 1; i < ac; i++) {
+ if (strcmp(argv[i], "-a") == 0) {
+ full_range = true;
+ } else if (strcmp(argv[i], "-t") == 0) {
+ text_only = true;
+ } else if (strcmp(argv[i], "-h") == 0) {
+ printf("Usage:\n"
+ " -a display wider latency range\n"
+ " -t text only\n");
+ return 1;
+ }
+ }
+
+ printf(" heatmap of IO latency\n");
+ if (text_only)
+ printf(" %s", sym[num_colors - 1]);
+ else
+ printf(" %s %s", color[num_colors - 1], nocolor);
+ printf(" - many events with this latency\n");
+
+ if (text_only)
+ printf(" %s", sym[0]);
+ else
+ printf(" %s %s", color[0], nocolor);
+ printf(" - few events\n");
+
+ for (i = 0; ; i++) {
+ if (i % 20 == 0)
+ print_banner();
+ print_hist(map_fd[1]);
+ sleep(2);
+ }
+
+ return 0;
+}
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c
new file mode 100644
index 000000000000..126b80512228
--- /dev/null
+++ b/samples/bpf/tracex4_kern.c
@@ -0,0 +1,54 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct pair {
+ u64 val;
+ u64 ip;
+};
+
+struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(long),
+ .value_size = sizeof(struct pair),
+ .max_entries = 1000000,
+};
+
+/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
+ * example will no longer be meaningful
+ */
+SEC("kprobe/kmem_cache_free")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ long ptr = ctx->si;
+
+ bpf_map_delete_elem(&my_map, &ptr);
+ return 0;
+}
+
+SEC("kretprobe/kmem_cache_alloc_node")
+int bpf_prog2(struct pt_regs *ctx)
+{
+ long ptr = ctx->ax;
+ long ip = 0;
+
+ /* get ip address of kmem_cache_alloc_node() caller */
+ bpf_probe_read(&ip, sizeof(ip), (void *)(ctx->bp + sizeof(ip)));
+
+ struct pair v = {
+ .val = bpf_ktime_get_ns(),
+ .ip = ip,
+ };
+
+ bpf_map_update_elem(&my_map, &ptr, &v, BPF_ANY);
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex4_user.c b/samples/bpf/tracex4_user.c
new file mode 100644
index 000000000000..bc4a3bdea6ed
--- /dev/null
+++ b/samples/bpf/tracex4_user.c
@@ -0,0 +1,69 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <string.h>
+#include <time.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+struct pair {
+ long long val;
+ __u64 ip;
+};
+
+static __u64 time_get_ns(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+static void print_old_objects(int fd)
+{
+ long long val = time_get_ns();
+ __u64 key, next_key;
+ struct pair v;
+
+ key = write(1, "\e[1;1H\e[2J", 12); /* clear screen */
+
+ key = -1;
+ while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
+ bpf_lookup_elem(map_fd[0], &next_key, &v);
+ key = next_key;
+ if (val - v.val < 1000000000ll)
+ /* object was allocated more then 1 sec ago */
+ continue;
+ printf("obj 0x%llx is %2lldsec old was allocated at ip %llx\n",
+ next_key, (val - v.val) / 1000000000ll, v.ip);
+ }
+}
+
+int main(int ac, char **argv)
+{
+ char filename[256];
+ int i;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ for (i = 0; ; i++) {
+ print_old_objects(map_fd[1]);
+ sleep(1);
+ }
+
+ return 0;
+}
diff --git a/samples/pktgen/pktgen.conf-1-1 b/samples/pktgen/pktgen.conf-1-1
new file mode 100755
index 000000000000..f91daad9e916
--- /dev/null
+++ b/samples/pktgen/pktgen.conf-1-1
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+ local result
+
+ echo $1 > $PGDEV
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [ "$result" = "" ]; then
+ cat $PGDEV | fgrep Result:
+ fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+ echo "Removing all devices"
+ pgset "rem_device_all"
+ echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0 means maximum speed.
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 10.10.11.2"
+ pgset "dst_mac 00:04:23:08:91:dc"
+
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-1-flows b/samples/pktgen/pktgen.conf-1-1-flows
new file mode 100755
index 000000000000..081749c9707d
--- /dev/null
+++ b/samples/pktgen/pktgen.conf-1-1-flows
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+ local result
+
+ echo $1 > $PGDEV
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [ "$result" = "" ]; then
+ cat $PGDEV | fgrep Result:
+ fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+ echo "Removing all devices"
+ pgset "rem_device_all"
+ echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0
+# We need to do alloc for every skb since we cannot clone here.
+
+CLONE_SKB="clone_skb 0"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ # Random address with in the min-max range
+ pgset "flag IPDST_RND"
+ pgset "dst_min 10.0.0.0"
+ pgset "dst_max 10.255.255.255"
+
+ # 8k Concurrent flows at 4 pkts
+ pgset "flows 8192"
+ pgset "flowlen 4"
+
+ pgset "dst_mac 00:04:23:08:91:dc"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-1-ip6 b/samples/pktgen/pktgen.conf-1-1-ip6
new file mode 100755
index 000000000000..0b9ffd47fd41
--- /dev/null
+++ b/samples/pktgen/pktgen.conf-1-1-ip6
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+ local result
+
+ echo $1 > $PGDEV
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [ "$result" = "" ]; then
+ cat $PGDEV | fgrep Result:
+ fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+# IPv6. Note increase in minimal packet length
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+ echo "Removing all devices"
+ pgset "rem_device_all"
+ echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 66"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst6 fec0::1"
+ pgset "src6 fec0::2"
+ pgset "dst_mac 00:04:23:08:91:dc"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-1-ip6-rdos b/samples/pktgen/pktgen.conf-1-1-ip6-rdos
new file mode 100755
index 000000000000..ad98e5f40776
--- /dev/null
+++ b/samples/pktgen/pktgen.conf-1-1-ip6-rdos
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+ local result
+
+ echo $1 > $PGDEV
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [ "$result" = "" ]; then
+ cat $PGDEV | fgrep Result:
+ fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+# IPv6. Note increase in minimal packet length
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+ echo "Removing all devices"
+ pgset "rem_device_all"
+ echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0 means maximum speed.
+
+# We need to do alloc for every skb since we cannot clone here.
+CLONE_SKB="clone_skb 0"
+
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 66"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst6_min fec0::1"
+ pgset "dst6_max fec0::FFFF:FFFF"
+
+ pgset "dst_mac 00:04:23:08:91:dc"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-1-rdos b/samples/pktgen/pktgen.conf-1-1-rdos
new file mode 100755
index 000000000000..c7553be49b80
--- /dev/null
+++ b/samples/pktgen/pktgen.conf-1-1-rdos
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+ local result
+
+ echo $1 > $PGDEV
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [ "$result" = "" ]; then
+ cat $PGDEV | fgrep Result:
+ fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. One CPU example. We add eth1.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+ echo "Removing all devices"
+ pgset "rem_device_all"
+ echo "Adding eth1"
+ pgset "add_device eth1"
+
+
+# device config
+# delay 0
+
+# We need to do alloc for every skb since we cannot clone here.
+
+CLONE_SKB="clone_skb 0"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ # Random address with in the min-max range
+ pgset "flag IPDST_RND"
+ pgset "dst_min 10.0.0.0"
+ pgset "dst_max 10.255.255.255"
+
+ pgset "dst_mac 00:04:23:08:91:dc"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-1-2 b/samples/pktgen/pktgen.conf-1-2
new file mode 100755
index 000000000000..ba4eb26e168d
--- /dev/null
+++ b/samples/pktgen/pktgen.conf-1-2
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+ local result
+
+ echo $1 > $PGDEV
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [ "$result" = "" ]; then
+ cat $PGDEV | fgrep Result:
+ fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# One CPU means one thread. One CPU example. We add eth1, eth2 respectivly.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+ echo "Removing all devices"
+ pgset "rem_device_all"
+ echo "Adding eth1"
+ pgset "add_device eth1"
+ echo "Adding eth2"
+ pgset "add_device eth2"
+
+
+# device config
+# delay 0 means maximum speed.
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 10.10.11.2"
+ pgset "dst_mac 00:04:23:08:91:dc"
+
+PGDEV=/proc/net/pktgen/eth2
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 192.168.2.2"
+ pgset "dst_mac 00:04:23:08:91:de"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1 /proc/net/pktgen/eth2
diff --git a/samples/pktgen/pktgen.conf-2-1 b/samples/pktgen/pktgen.conf-2-1
new file mode 100755
index 000000000000..e108e97d6d89
--- /dev/null
+++ b/samples/pktgen/pktgen.conf-2-1
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+ local result
+
+ echo $1 > $PGDEV
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [ "$result" = "" ]; then
+ cat $PGDEV | fgrep Result:
+ fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. Two CPU example. We add eth1 to the first
+# and leave the second idle.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+ echo "Removing all devices"
+ pgset "rem_device_all"
+ echo "Adding eth1"
+ pgset "add_device eth1"
+
+# We need to remove old config since we dont use this thread. We can only
+# one NIC on one CPU due to affinity reasons.
+
+PGDEV=/proc/net/pktgen/kpktgend_1
+ echo "Removing all devices"
+ pgset "rem_device_all"
+
+# device config
+# delay 0 means maximum speed.
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 10.10.11.2"
+ pgset "dst_mac 00:04:23:08:91:dc"
+
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-2-2 b/samples/pktgen/pktgen.conf-2-2
new file mode 100755
index 000000000000..acea15503e71
--- /dev/null
+++ b/samples/pktgen/pktgen.conf-2-2
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#modprobe pktgen
+
+
+function pgset() {
+ local result
+
+ echo $1 > $PGDEV
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [ "$result" = "" ]; then
+ cat $PGDEV | fgrep Result:
+ fi
+}
+
+# Config Start Here -----------------------------------------------------------
+
+
+# thread config
+# Each CPU has its own thread. Two CPU example. We add eth1, eth2 respectively.
+
+PGDEV=/proc/net/pktgen/kpktgend_0
+ echo "Removing all devices"
+ pgset "rem_device_all"
+ echo "Adding eth1"
+ pgset "add_device eth1"
+
+PGDEV=/proc/net/pktgen/kpktgend_1
+ echo "Removing all devices"
+ pgset "rem_device_all"
+ echo "Adding eth2"
+ pgset "add_device eth2"
+
+
+# device config
+# delay 0 means maximum speed.
+
+CLONE_SKB="clone_skb 1000000"
+# NIC adds 4 bytes CRC
+PKT_SIZE="pkt_size 60"
+
+# COUNT 0 means forever
+#COUNT="count 0"
+COUNT="count 10000000"
+DELAY="delay 0"
+
+PGDEV=/proc/net/pktgen/eth1
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 10.10.11.2"
+ pgset "dst_mac 00:04:23:08:91:dc"
+
+PGDEV=/proc/net/pktgen/eth2
+ echo "Configuring $PGDEV"
+ pgset "$COUNT"
+ pgset "$CLONE_SKB"
+ pgset "$PKT_SIZE"
+ pgset "$DELAY"
+ pgset "dst 192.168.2.2"
+ pgset "dst_mac 00:04:23:08:91:de"
+
+# Time to run
+PGDEV=/proc/net/pktgen/pgctrl
+
+ echo "Running... ctrl^C to stop"
+ trap true INT
+ pgset "start"
+ echo "Done"
+ cat /proc/net/pktgen/eth1 /proc/net/pktgen/eth2
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index a2c8b02b6359..8965d1bb8811 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -22,7 +22,25 @@
* protection, just like TRACE_INCLUDE_FILE.
*/
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM sample
+#define TRACE_SYSTEM sample-trace
+
+/*
+ * TRACE_SYSTEM is expected to be a C valid variable (alpha-numeric
+ * and underscore), although it may start with numbers. If for some
+ * reason it is not, you need to add the following lines:
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR sample_trace
+/*
+ * But the above is only needed if TRACE_SYSTEM is not alpha-numeric
+ * and underscored. By default, TRACE_SYSTEM_VAR will be equal to
+ * TRACE_SYSTEM. As TRACE_SYSTEM_VAR must be alpha-numeric, if
+ * TRACE_SYSTEM is not, then TRACE_SYSTEM_VAR must be defined with
+ * only alpha-numeric and underscores.
+ *
+ * The TRACE_SYSTEM_VAR is only used internally and not visible to
+ * user space.
+ */
/*
* Notice that this file is not protected like a normal header.
@@ -180,8 +198,30 @@ static inline int __length_of(const int *list)
;
return i;
}
+
+enum {
+ TRACE_SAMPLE_FOO = 2,
+ TRACE_SAMPLE_BAR = 4,
+ TRACE_SAMPLE_ZOO = 8,
+};
#endif
+/*
+ * If enums are used in the TP_printk(), their names will be shown in
+ * format files and not their values. This can cause problems with user
+ * space programs that parse the format files to know how to translate
+ * the raw binary trace output into human readable text.
+ *
+ * To help out user space programs, any enum that is used in the TP_printk()
+ * should be defined by TRACE_DEFINE_ENUM() macro. All that is needed to
+ * be done is to add this macro with the enum within it in the trace
+ * header file, and it will be converted in the output.
+ */
+
+TRACE_DEFINE_ENUM(TRACE_SAMPLE_FOO);
+TRACE_DEFINE_ENUM(TRACE_SAMPLE_BAR);
+TRACE_DEFINE_ENUM(TRACE_SAMPLE_ZOO);
+
TRACE_EVENT(foo_bar,
TP_PROTO(const char *foo, int bar, const int *lst,
@@ -206,7 +246,47 @@ TRACE_EVENT(foo_bar,
__assign_bitmask(cpus, cpumask_bits(mask), num_possible_cpus());
),
- TP_printk("foo %s %d %s %s (%s)", __entry->foo, __entry->bar,
+ TP_printk("foo %s %d %s %s %s %s (%s)", __entry->foo, __entry->bar,
+
+/*
+ * Notice here the use of some helper functions. This includes:
+ *
+ * __print_symbolic( variable, { value, "string" }, ... ),
+ *
+ * The variable is tested against each value of the { } pair. If
+ * the variable matches one of the values, then it will print the
+ * string in that pair. If non are matched, it returns a string
+ * version of the number (if __entry->bar == 7 then "7" is returned).
+ */
+ __print_symbolic(__entry->bar,
+ { 0, "zero" },
+ { TRACE_SAMPLE_FOO, "TWO" },
+ { TRACE_SAMPLE_BAR, "FOUR" },
+ { TRACE_SAMPLE_ZOO, "EIGHT" },
+ { 10, "TEN" }
+ ),
+
+/*
+ * __print_flags( variable, "delim", { value, "flag" }, ... ),
+ *
+ * This is similar to __print_symbolic, except that it tests the bits
+ * of the value. If ((FLAG & variable) == FLAG) then the string is
+ * printed. If more than one flag matches, then each one that does is
+ * also printed with delim in between them.
+ * If not all bits are accounted for, then the not found bits will be
+ * added in hex format: 0x506 will show BIT2|BIT4|0x500
+ */
+ __print_flags(__entry->bar, "|",
+ { 1, "BIT1" },
+ { 2, "BIT2" },
+ { 4, "BIT3" },
+ { 8, "BIT4" }
+ ),
+/*
+ * __print_array( array, len, element_size )
+ *
+ * This prints out the array that is defined by __array in a nice format.
+ */
__print_array(__get_dynamic_array(list),
__get_dynamic_array_len(list),
sizeof(int)),
diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst
index 909ed7a2ac61..1c15717e0d56 100644
--- a/scripts/Makefile.dtbinst
+++ b/scripts/Makefile.dtbinst
@@ -18,7 +18,7 @@ export dtbinst-root ?= $(obj)
include include/config/auto.conf
include scripts/Kbuild.include
-include $(srctree)/$(obj)/Makefile
+include $(src)/Makefile
PHONY += __dtbs_install_prep
__dtbs_install_prep:
diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
index 5b698add4f31..b27290035253 100644
--- a/scripts/Makefile.fwinst
+++ b/scripts/Makefile.fwinst
@@ -13,7 +13,7 @@ src := $(obj)
-include $(objtree)/.config
include scripts/Kbuild.include
-include $(srctree)/$(obj)/Makefile
+include $(src)/Makefile
include scripts/Makefile.host
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 631619b2b118..3f874d24234f 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -13,12 +13,16 @@ CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
--param asan-instrumentation-with-call-threshold=$(call_threshold))
ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),)
+ ifneq ($(CONFIG_COMPILE_TEST),y)
$(warning Cannot use CONFIG_KASAN: \
-fsanitize=kernel-address is not supported by compiler)
+ endif
else
ifeq ($(CFLAGS_KASAN),)
- $(warning CONFIG_KASAN: compiler does not support all options.\
- Trying minimal configuration)
+ ifneq ($(CONFIG_COMPILE_TEST),y)
+ $(warning CONFIG_KASAN: compiler does not support all options.\
+ Trying minimal configuration)
+ endif
CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
endif
endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 044eb4f89a91..79e86613712f 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -282,7 +282,8 @@ $(obj)/%.dtb.S: $(obj)/%.dtb
$(call cmd,dt_S_dtb)
quiet_cmd_dtc = DTC $@
-cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
+cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
+ $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
$(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \
-i $(dir $<) $(DTC_FLAGS) \
-d $(depfile).dtc.tmp $(dtc-tmp) ; \
diff --git a/scripts/check_extable.sh b/scripts/check_extable.sh
new file mode 100755
index 000000000000..0fb6b1c97c27
--- /dev/null
+++ b/scripts/check_extable.sh
@@ -0,0 +1,146 @@
+#! /bin/bash
+# (c) 2015, Quentin Casasnovas <quentin.casasnovas@oracle.com>
+
+obj=$1
+
+file ${obj} | grep -q ELF || (echo "${obj} is not and ELF file." 1>&2 ; exit 0)
+
+# Bail out early if there isn't an __ex_table section in this object file.
+objdump -hj __ex_table ${obj} 2> /dev/null > /dev/null
+[ $? -ne 0 ] && exit 0
+
+white_list=.text,.fixup
+
+suspicious_relocs=$(objdump -rj __ex_table ${obj} | tail -n +6 |
+ grep -v $(eval echo -e{${white_list}}) | awk '{print $3}')
+
+# No suspicious relocs in __ex_table, jobs a good'un
+[ -z "${suspicious_relocs}" ] && exit 0
+
+
+# After this point, something is seriously wrong since we just found out we
+# have some relocations in __ex_table which point to sections which aren't
+# white listed. If you're adding a new section in the Linux kernel, and
+# you're expecting this section to contain code which can fault (i.e. the
+# __ex_table relocation to your new section is expected), simply add your
+# new section to the white_list variable above. If not, you're probably
+# doing something wrong and the rest of this code is just trying to print
+# you more information about it.
+
+function find_section_offset_from_symbol()
+{
+ eval $(objdump -t ${obj} | grep ${1} | sed 's/\([0-9a-f]\+\) .\{7\} \([^ \t]\+\).*/section="\2"; section_offset="0x\1" /')
+
+ # addr2line takes addresses in hexadecimal...
+ section_offset=$(printf "0x%016x" $(( ${section_offset} + $2 )) )
+}
+
+function find_symbol_and_offset_from_reloc()
+{
+ # Extract symbol and offset from the objdump output
+ eval $(echo $reloc | sed 's/\([^+]\+\)+\?\(0x[0-9a-f]\+\)\?/symbol="\1"; symbol_offset="\2"/')
+
+ # When the relocation points to the begining of a symbol or section, it
+ # won't print the offset since it is zero.
+ if [ -z "${symbol_offset}" ]; then
+ symbol_offset=0x0
+ fi
+}
+
+function find_alt_replacement_target()
+{
+ # The target of the .altinstr_replacement is the relocation just before
+ # the .altinstr_replacement one.
+ eval $(objdump -rj .altinstructions ${obj} | grep -B1 "${section}+${section_offset}" | head -n1 | awk '{print $3}' |
+ sed 's/\([^+]\+\)+\(0x[0-9a-f]\+\)/alt_target_section="\1"; alt_target_offset="\2"/')
+}
+
+function handle_alt_replacement_reloc()
+{
+ # This will define alt_target_section and alt_target_section_offset
+ find_alt_replacement_target ${section} ${section_offset}
+
+ echo "Error: found a reference to .altinstr_replacement in __ex_table:"
+ addr2line -fip -j ${alt_target_section} -e ${obj} ${alt_target_offset} | awk '{print "\t" $0}'
+
+ error=true
+}
+
+function is_executable_section()
+{
+ objdump -hwj ${section} ${obj} | grep -q CODE
+ return $?
+}
+
+function handle_suspicious_generic_reloc()
+{
+ if is_executable_section ${section}; then
+ # We've got a relocation to a non white listed _executable_
+ # section, print a warning so the developper adds the section to
+ # the white list or fix his code. We try to pretty-print the file
+ # and line number where that relocation was added.
+ echo "Warning: found a reference to section \"${section}\" in __ex_table:"
+ addr2line -fip -j ${section} -e ${obj} ${section_offset} | awk '{print "\t" $0}'
+ else
+ # Something is definitively wrong here since we've got a relocation
+ # to a non-executable section, there's no way this would ever be
+ # running in the kernel.
+ echo "Error: found a reference to non-executable section \"${section}\" in __ex_table at offset ${section_offset}"
+ error=true
+ fi
+}
+
+function handle_suspicious_reloc()
+{
+ case "${section}" in
+ ".altinstr_replacement")
+ handle_alt_replacement_reloc ${section} ${section_offset}
+ ;;
+ *)
+ handle_suspicious_generic_reloc ${section} ${section_offset}
+ ;;
+ esac
+}
+
+function diagnose()
+{
+
+ for reloc in ${suspicious_relocs}; do
+ # Let's find out where the target of the relocation in __ex_table
+ # is, this will define ${symbol} and ${symbol_offset}
+ find_symbol_and_offset_from_reloc ${reloc}
+
+ # When there's a global symbol at the place of the relocation,
+ # objdump will use it instead of giving us a section+offset, so
+ # let's find out which section is this symbol in and the total
+ # offset withing that section.
+ find_section_offset_from_symbol ${symbol} ${symbol_offset}
+
+ # In this case objdump was presenting us with a reloc to a symbol
+ # rather than a section. Now that we've got the actual section,
+ # we can skip it if it's in the white_list.
+ if [ -z "$( echo $section | grep -v $(eval echo -e{${white_list}}))" ]; then
+ continue;
+ fi
+
+ # Will either print a warning if the relocation happens to be in a
+ # section we do not know but has executable bit set, or error out.
+ handle_suspicious_reloc
+ done
+}
+
+function check_debug_info() {
+ objdump -hj .debug_info ${obj} 2> /dev/null > /dev/null ||
+ echo -e "${obj} does not contain debug information, the addr2line output will be limited.\n" \
+ "Recompile ${obj} with CONFIG_DEBUG_INFO to get a more useful output."
+}
+
+check_debug_info
+
+diagnose
+
+if [ "${error}" ]; then
+ exit 1
+fi
+
+exit 0
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index e9cc689033fe..74086a583d8d 100644..100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -1,8 +1,8 @@
#!/usr/bin/env python
-"""Find Kconfig identifiers that are referenced but not defined."""
+"""Find Kconfig symbols that are referenced but not defined."""
-# (c) 2014 Valentin Rothberg <valentinrothberg@gmail.com>
+# (c) 2014-2015 Valentin Rothberg <Valentin.Rothberg@lip6.fr>
# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
#
# Licensed under the terms of the GNU GPL License version 2
@@ -10,7 +10,9 @@
import os
import re
+import sys
from subprocess import Popen, PIPE, STDOUT
+from optparse import OptionParser
# regex expressions
@@ -32,22 +34,149 @@ REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
+def parse_options():
+ """The user interface of this module."""
+ usage = "%prog [options]\n\n" \
+ "Run this tool to detect Kconfig symbols that are referenced but " \
+ "not defined in\nKconfig. The output of this tool has the " \
+ "format \'Undefined symbol\\tFile list\'\n\n" \
+ "If no option is specified, %prog will default to check your\n" \
+ "current tree. Please note that specifying commits will " \
+ "\'git reset --hard\'\nyour current tree! You may save " \
+ "uncommitted changes to avoid losing data."
+
+ parser = OptionParser(usage=usage)
+
+ parser.add_option('-c', '--commit', dest='commit', action='store',
+ default="",
+ help="Check if the specified commit (hash) introduces "
+ "undefined Kconfig symbols.")
+
+ parser.add_option('-d', '--diff', dest='diff', action='store',
+ default="",
+ help="Diff undefined symbols between two commits. The "
+ "input format bases on Git log's "
+ "\'commmit1..commit2\'.")
+
+ parser.add_option('', '--force', dest='force', action='store_true',
+ default=False,
+ help="Reset current Git tree even when it's dirty.")
+
+ (opts, _) = parser.parse_args()
+
+ if opts.commit and opts.diff:
+ sys.exit("Please specify only one option at once.")
+
+ if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff):
+ sys.exit("Please specify valid input in the following format: "
+ "\'commmit1..commit2\'")
+
+ if opts.commit or opts.diff:
+ if not opts.force and tree_is_dirty():
+ sys.exit("The current Git tree is dirty (see 'git status'). "
+ "Running this script may\ndelete important data since it "
+ "calls 'git reset --hard' for some performance\nreasons. "
+ " Please run this script in a clean Git tree or pass "
+ "'--force' if you\nwant to ignore this warning and "
+ "continue.")
+
+ return opts
+
+
def main():
"""Main function of this module."""
+ opts = parse_options()
+
+ if opts.commit or opts.diff:
+ head = get_head()
+
+ # get commit range
+ commit_a = None
+ commit_b = None
+ if opts.commit:
+ commit_a = opts.commit + "~"
+ commit_b = opts.commit
+ elif opts.diff:
+ split = opts.diff.split("..")
+ commit_a = split[0]
+ commit_b = split[1]
+ undefined_a = {}
+ undefined_b = {}
+
+ # get undefined items before the commit
+ execute("git reset --hard %s" % commit_a)
+ undefined_a = check_symbols()
+
+ # get undefined items for the commit
+ execute("git reset --hard %s" % commit_b)
+ undefined_b = check_symbols()
+
+ # report cases that are present for the commit but not before
+ for feature in sorted(undefined_b):
+ # feature has not been undefined before
+ if not feature in undefined_a:
+ files = sorted(undefined_b.get(feature))
+ print "%s\t%s" % (feature, ", ".join(files))
+ # check if there are new files that reference the undefined feature
+ else:
+ files = sorted(undefined_b.get(feature) -
+ undefined_a.get(feature))
+ if files:
+ print "%s\t%s" % (feature, ", ".join(files))
+
+ # reset to head
+ execute("git reset --hard %s" % head)
+
+ # default to check the entire tree
+ else:
+ undefined = check_symbols()
+ for feature in sorted(undefined):
+ files = sorted(undefined.get(feature))
+ print "%s\t%s" % (feature, ", ".join(files))
+
+
+def execute(cmd):
+ """Execute %cmd and return stdout. Exit in case of error."""
+ pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
+ (stdout, _) = pop.communicate() # wait until finished
+ if pop.returncode != 0:
+ sys.exit(stdout)
+ return stdout
+
+
+def tree_is_dirty():
+ """Return true if the current working tree is dirty (i.e., if any file has
+ been added, deleted, modified, renamed or copied but not committed)."""
+ stdout = execute("git status --porcelain")
+ for line in stdout:
+ if re.findall(r"[URMADC]{1}", line[:2]):
+ return True
+ return False
+
+
+def get_head():
+ """Return commit hash of current HEAD."""
+ stdout = execute("git rev-parse HEAD")
+ return stdout.strip('\n')
+
+
+def check_symbols():
+ """Find undefined Kconfig symbols and return a dict with the symbol as key
+ and a list of referencing files as value."""
source_files = []
kconfig_files = []
defined_features = set()
referenced_features = dict() # {feature: [files]}
# use 'git ls-files' to get the worklist
- pop = Popen("git ls-files", stdout=PIPE, stderr=STDOUT, shell=True)
- (stdout, _) = pop.communicate() # wait until finished
+ stdout = execute("git ls-files")
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
for gitfile in stdout.rsplit("\n"):
- if ".git" in gitfile or "ChangeLog" in gitfile or \
- ".log" in gitfile or os.path.isdir(gitfile):
+ if ".git" in gitfile or "ChangeLog" in gitfile or \
+ ".log" in gitfile or os.path.isdir(gitfile) or \
+ gitfile.startswith("tools/"):
continue
if REGEX_FILE_KCONFIG.match(gitfile):
kconfig_files.append(gitfile)
@@ -61,7 +190,7 @@ def main():
for kfile in kconfig_files:
parse_kconfig_file(kfile, defined_features, referenced_features)
- print "Undefined symbol used\tFile list"
+ undefined = {} # {feature: [files]}
for feature in sorted(referenced_features):
# filter some false positives
if feature == "FOO" or feature == "BAR" or \
@@ -72,8 +201,8 @@ def main():
# avoid false positives for kernel modules
if feature[:-len("_MODULE")] in defined_features:
continue
- files = referenced_features.get(feature)
- print "%s\t%s" % (feature, ", ".join(files))
+ undefined[feature] = referenced_features.get(feature)
+ return undefined
def parse_source_file(sfile, referenced_features):
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index d12435992dea..89b1df4e72ab 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -47,6 +47,8 @@ my $ignore_perl_version = 0;
my $minimum_perl_version = 5.10.0;
my $min_conf_desc_length = 4;
my $spelling_file = "$D/spelling.txt";
+my $codespell = 0;
+my $codespellfile = "/usr/local/share/codespell/dictionary.txt";
sub help {
my ($exitcode) = @_;
@@ -88,6 +90,9 @@ Options:
file. It's your fault if there's no backup or git
--ignore-perl-version override checking of perl version. expect
runtime errors.
+ --codespell Use the codespell dictionary for spelling/typos
+ (default:/usr/local/share/codespell/dictionary.txt)
+ --codespellfile Use this codespell dictionary
-h, --help, --version display this help and exit
When FILE is - read standard input.
@@ -146,6 +151,8 @@ GetOptions(
'ignore-perl-version!' => \$ignore_perl_version,
'debug=s' => \%debug,
'test-only=s' => \$tst_only,
+ 'codespell!' => \$codespell,
+ 'codespellfile=s' => \$codespellfile,
'h|help' => \$help,
'version' => \$help
) or help(1);
@@ -316,6 +323,7 @@ our $Operators = qr{
our $c90_Keywords = qr{do|for|while|if|else|return|goto|continue|switch|default|case|break}x;
+our $BasicType;
our $NonptrType;
our $NonptrTypeMisordered;
our $NonptrTypeWithAttr;
@@ -436,6 +444,14 @@ foreach my $entry (@mode_permission_funcs) {
$mode_perms_search .= $entry->[0];
}
+our $mode_perms_world_writable = qr{
+ S_IWUGO |
+ S_IWOTH |
+ S_IRWXUGO |
+ S_IALLUGO |
+ 0[0-7][0-7][2367]
+}x;
+
our $allowed_asm_includes = qr{(?x:
irq|
memory|
@@ -449,7 +465,6 @@ my $misspellings;
my %spelling_fix;
if (open(my $spelling, '<', $spelling_file)) {
- my @spelling_list;
while (<$spelling>) {
my $line = $_;
@@ -461,21 +476,50 @@ if (open(my $spelling, '<', $spelling_file)) {
my ($suspect, $fix) = split(/\|\|/, $line);
- push(@spelling_list, $suspect);
$spelling_fix{$suspect} = $fix;
}
close($spelling);
- $misspellings = join("|", @spelling_list);
} else {
warn "No typos will be found - file '$spelling_file': $!\n";
}
+if ($codespell) {
+ if (open(my $spelling, '<', $codespellfile)) {
+ while (<$spelling>) {
+ my $line = $_;
+
+ $line =~ s/\s*\n?$//g;
+ $line =~ s/^\s*//g;
+
+ next if ($line =~ m/^\s*#/);
+ next if ($line =~ m/^\s*$/);
+ next if ($line =~ m/, disabled/i);
+
+ $line =~ s/,.*$//;
+
+ my ($suspect, $fix) = split(/->/, $line);
+
+ $spelling_fix{$suspect} = $fix;
+ }
+ close($spelling);
+ } else {
+ warn "No codespell typos will be found - file '$codespellfile': $!\n";
+ }
+}
+
+$misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix;
+
sub build_types {
my $mods = "(?x: \n" . join("|\n ", @modifierList) . "\n)";
my $all = "(?x: \n" . join("|\n ", @typeList) . "\n)";
my $Misordered = "(?x: \n" . join("|\n ", @typeListMisordered) . "\n)";
my $allWithAttr = "(?x: \n" . join("|\n ", @typeListWithAttr) . "\n)";
$Modifier = qr{(?:$Attribute|$Sparse|$mods)};
+ $BasicType = qr{
+ (?:$typeOtherOSTypedefs\b)|
+ (?:$typeTypedefs\b)|
+ (?:${all}\b)
+ }x;
$NonptrType = qr{
(?:$Modifier\s+|const\s+)*
(?:
@@ -1646,7 +1690,7 @@ sub fix_inserted_deleted_lines {
foreach my $old_line (@{$linesRef}) {
my $save_line = 1;
my $line = $old_line; #don't modify the array
- if ($line =~ /^(?:\+\+\+\|\-\-\-)\s+\S+/) { #new filename
+ if ($line =~ /^(?:\+\+\+|\-\-\-)\s+\S+/) { #new filename
$delta_offset = 0;
} elsif ($line =~ /^\@\@ -\d+,\d+ \+\d+,\d+ \@\@/) { #new hunk
$range_last_linenr = $new_linenr;
@@ -1854,6 +1898,7 @@ sub process {
my $in_header_lines = $file ? 0 : 1;
my $in_commit_log = 0; #Scanning lines before patch
+ my $commit_log_long_line = 0;
my $reported_maintainer_file = 0;
my $non_utf8_charset = 0;
@@ -2189,6 +2234,14 @@ sub process {
"Remove Gerrit Change-Id's before submitting upstream.\n" . $herecurr);
}
+# Check for line lengths > 75 in commit log, warn once
+ if ($in_commit_log && !$commit_log_long_line &&
+ length($line) > 75) {
+ WARN("COMMIT_LOG_LONG_LINE",
+ "Possible unwrapped commit description (prefer a maximum 75 chars per line)\n" . $herecurr);
+ $commit_log_long_line = 1;
+ }
+
# Check for git id commit length and improperly formed commit descriptions
if ($in_commit_log && $line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i) {
my $init_char = $1;
@@ -2303,8 +2356,9 @@ sub process {
}
# Check for various typo / spelling mistakes
- if (defined($misspellings) && ($in_commit_log || $line =~ /^\+/)) {
- while ($rawline =~ /(?:^|[^a-z@])($misspellings)(?:$|[^a-z@])/gi) {
+ if (defined($misspellings) &&
+ ($in_commit_log || $line =~ /^(?:\+|Subject:)/i)) {
+ while ($rawline =~ /(?:^|[^a-z@])($misspellings)(?:\b|$|[^a-z@])/gi) {
my $typo = $1;
my $typo_fix = $spelling_fix{lc($typo)};
$typo_fix = ucfirst($typo_fix) if ($typo =~ /^[A-Z]/);
@@ -2459,8 +2513,9 @@ sub process {
#line length limit
if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
$rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
- !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:|,|\)\s*;)\s*$/ ||
- $line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) &&
+ !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?$String\s*(?:|,|\)\s*;)\s*$/ ||
+ $line =~ /^\+\s*$String\s*(?:\s*|,|\)\s*;)\s*$/ ||
+ $line =~ /^\+\s*#\s*define\s+\w+\s+$String$/) &&
$length > $max_line_length)
{
WARN("LONG_LINE",
@@ -2552,8 +2607,15 @@ sub process {
}
}
- if ($line =~ /^\+.*(\w+\s*)?\(\s*$Type\s*\)[ \t]+(?!$Assignment|$Arithmetic|[,;:\?\(\{\}\[\<\>]|&&|\|\||\\$)/ &&
- (!defined($1) || $1 !~ /sizeof\s*/)) {
+# check for space after cast like "(int) foo" or "(struct foo) bar"
+# avoid checking a few false positives:
+# "sizeof(<type>)" or "__alignof__(<type>)"
+# function pointer declarations like "(*foo)(int) = bar;"
+# structure definitions like "(struct foo) { 0 };"
+# multiline macros that define functions
+# known attributes or the __attribute__ keyword
+ if ($line =~ /^\+(.*)\(\s*$Type\s*\)([ \t]++)((?![={]|\\$|$Attribute|__attribute__))/ &&
+ (!defined($1) || $1 !~ /\b(?:sizeof|__alignof__)\s*$/)) {
if (CHK("SPACING",
"No space is necessary after a cast\n" . $herecurr) &&
$fix) {
@@ -3146,6 +3208,18 @@ sub process {
$herecurr);
}
+# check for const <foo> const where <foo> is not a pointer or array type
+ if ($sline =~ /\bconst\s+($BasicType)\s+const\b/) {
+ my $found = $1;
+ if ($sline =~ /\bconst\s+\Q$found\E\s+const\b\s*\*/) {
+ WARN("CONST_CONST",
+ "'const $found const *' should probably be 'const $found * const'\n" . $herecurr);
+ } elsif ($sline !~ /\bconst\s+\Q$found\E\s+const\s+\w+\s*\[/) {
+ WARN("CONST_CONST",
+ "'const $found const' should probably be 'const $found'\n" . $herecurr);
+ }
+ }
+
# check for non-global char *foo[] = {"bar", ...} declarations.
if ($line =~ /^.\s+(?:static\s+|const\s+)?char\s+\*\s*\w+\s*\[\s*\]\s*=\s*\{/) {
WARN("STATIC_CONST_CHAR_ARRAY",
@@ -3153,6 +3227,19 @@ sub process {
$herecurr);
}
+# check for sizeof(foo)/sizeof(foo[0]) that could be ARRAY_SIZE(foo)
+ if ($line =~ m@\bsizeof\s*\(\s*($Lval)\s*\)@) {
+ my $array = $1;
+ if ($line =~ m@\b(sizeof\s*\(\s*\Q$array\E\s*\)\s*/\s*sizeof\s*\(\s*\Q$array\E\s*\[\s*0\s*\]\s*\))@) {
+ my $array_div = $1;
+ if (WARN("ARRAY_SIZE",
+ "Prefer ARRAY_SIZE($array)\n" . $herecurr) &&
+ $fix) {
+ $fixed[$fixlinenr] =~ s/\Q$array_div\E/ARRAY_SIZE($array)/;
+ }
+ }
+ }
+
# check for function declarations without arguments like "int foo()"
if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) {
if (ERROR("FUNCTION_WITHOUT_ARGS",
@@ -3309,6 +3396,14 @@ sub process {
"Prefer dev_$level(... to dev_printk(KERN_$orig, ...\n" . $herecurr);
}
+# ENOSYS means "bad syscall nr" and nothing else. This will have a small
+# number of false positives, but assembly files are not checked, so at
+# least the arch entry code will not trigger this warning.
+ if ($line =~ /\bENOSYS\b/) {
+ WARN("ENOSYS",
+ "ENOSYS means 'invalid syscall nr' and nothing else\n" . $herecurr);
+ }
+
# function brace can't be on same line, except for #defines of do while,
# or if closed on same line
if (($line=~/$Type\s*$Ident\(.*\).*\s*{/) and
@@ -3565,7 +3660,7 @@ sub process {
# Ignore operators passed as parameters.
if ($op_type ne 'V' &&
- $ca =~ /\s$/ && $cc =~ /^\s*,/) {
+ $ca =~ /\s$/ && $cc =~ /^\s*[,\)]/) {
# # Ignore comments
# } elsif ($op =~ /^$;+$/) {
@@ -3750,6 +3845,14 @@ sub process {
$ok = 1;
}
+ # for asm volatile statements
+ # ignore a colon with another
+ # colon immediately before or after
+ if (($op eq ':') &&
+ ($ca =~ /:$/ || $cc =~ /^:/)) {
+ $ok = 1;
+ }
+
# messages are ERROR, but ?: are CHK
if ($ok == 0) {
my $msg_type = \&ERROR;
@@ -3963,12 +4066,12 @@ sub process {
}
}
-# Return of what appears to be an errno should normally be -'ve
- if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) {
+# Return of what appears to be an errno should normally be negative
+ if ($sline =~ /\breturn(?:\s*\(+\s*|\s+)(E[A-Z]+)(?:\s*\)+\s*|\s*)[;:,]/) {
my $name = $1;
if ($name ne 'EOF' && $name ne 'ERROR') {
WARN("USE_NEGATIVE_ERRNO",
- "return of an errno should typically be -ve (return -$1)\n" . $herecurr);
+ "return of an errno should typically be negative (ie: return -$1)\n" . $herecurr);
}
}
@@ -4178,7 +4281,8 @@ sub process {
}
}
-#warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)
+# warn if <asm/foo.h> is #included and <linux/foo.h> is available and includes
+# itself <asm/foo.h> (uses RAW line)
if ($tree && $rawline =~ m{^.\s*\#\s*include\s*\<asm\/(.*)\.h\>}) {
my $file = "$1.h";
my $checkfile = "include/linux/$file";
@@ -4186,12 +4290,15 @@ sub process {
$realfile ne $checkfile &&
$1 !~ /$allowed_asm_includes/)
{
- if ($realfile =~ m{^arch/}) {
- CHK("ARCH_INCLUDE_LINUX",
- "Consider using #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
- } else {
- WARN("INCLUDE_LINUX",
- "Use #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
+ my $asminclude = `grep -Ec "#include\\s+<asm/$file>" $root/$checkfile`;
+ if ($asminclude > 0) {
+ if ($realfile =~ m{^arch/}) {
+ CHK("ARCH_INCLUDE_LINUX",
+ "Consider using #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
+ } else {
+ WARN("INCLUDE_LINUX",
+ "Use #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
+ }
}
}
}
@@ -4700,6 +4807,16 @@ sub process {
}
}
+# check for __read_mostly with const non-pointer (should just be const)
+ if ($line =~ /\b__read_mostly\b/ &&
+ $line =~ /($Type)\s*$Ident/ && $1 !~ /\*\s*$/ && $1 =~ /\bconst\b/) {
+ if (ERROR("CONST_READ_MOSTLY",
+ "Invalid use of __read_mostly with const type\n" . $herecurr) &&
+ $fix) {
+ $fixed[$fixlinenr] =~ s/\s+__read_mostly\b//;
+ }
+ }
+
# don't use __constant_<foo> functions outside of include/uapi/
if ($realfile !~ m@^include/uapi/@ &&
$line =~ /(__constant_(?:htons|ntohs|[bl]e(?:16|32|64)_to_cpu|cpu_to_[bl]e(?:16|32|64)))\s*\(/) {
@@ -5261,6 +5378,7 @@ sub process {
stacktrace_ops|
sysfs_ops|
tty_operations|
+ uart_ops|
usb_mon_operations|
wd_ops}x;
if ($line !~ /\bconst\b/ &&
@@ -5318,8 +5436,8 @@ sub process {
}
}
- if ($line =~ /debugfs_create_file.*S_IWUGO/ ||
- $line =~ /DEVICE_ATTR.*S_IWUGO/ ) {
+ if ($line =~ /debugfs_create_\w+.*\b$mode_perms_world_writable\b/ ||
+ $line =~ /DEVICE_ATTR.*\b$mode_perms_world_writable\b/) {
WARN("EXPORTED_WORLD_WRITABLE",
"Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
}
diff --git a/scripts/coccinelle/misc/bugon.cocci b/scripts/coccinelle/misc/bugon.cocci
index 3b7eec24fb5a..27c97f1f2767 100644
--- a/scripts/coccinelle/misc/bugon.cocci
+++ b/scripts/coccinelle/misc/bugon.cocci
@@ -57,6 +57,6 @@ coccilib.org.print_todo(p[0], "WARNING use BUG_ON")
p << r.p;
@@
-msg="WARNING: Use BUG_ON"
+msg="WARNING: Use BUG_ON instead of if condition followed by BUG.\nPlease make sure the condition has no side effects (see conditional BUG_ON definition in include/asm-generic/bug.h)"
coccilib.report.print_report(p[0], msg)
diff --git a/scripts/coccinelle/misc/irqf_oneshot.cocci b/scripts/coccinelle/misc/irqf_oneshot.cocci
index 6cfde94be0ef..a24a754ae1d7 100644
--- a/scripts/coccinelle/misc/irqf_oneshot.cocci
+++ b/scripts/coccinelle/misc/irqf_oneshot.cocci
@@ -12,11 +12,13 @@ virtual org
virtual report
@r1@
+expression dev;
expression irq;
expression thread_fn;
expression flags;
position p;
@@
+(
request_threaded_irq@p(irq, NULL, thread_fn,
(
flags | IRQF_ONESHOT
@@ -24,13 +26,24 @@ flags | IRQF_ONESHOT
IRQF_ONESHOT
)
, ...)
+|
+devm_request_threaded_irq@p(dev, irq, NULL, thread_fn,
+(
+flags | IRQF_ONESHOT
+|
+IRQF_ONESHOT
+)
+, ...)
+)
@depends on patch@
+expression dev;
expression irq;
expression thread_fn;
expression flags;
position p != r1.p;
@@
+(
request_threaded_irq@p(irq, NULL, thread_fn,
(
-0
@@ -40,6 +53,17 @@ request_threaded_irq@p(irq, NULL, thread_fn,
+flags | IRQF_ONESHOT
)
, ...)
+|
+devm_request_threaded_irq@p(dev, irq, NULL, thread_fn,
+(
+-0
++IRQF_ONESHOT
+|
+-flags
++flags | IRQF_ONESHOT
+)
+, ...)
+)
@depends on context@
position p != r1.p;
diff --git a/scripts/extract-ikconfig b/scripts/extract-ikconfig
index e1862429ccda..3b42f255e2ba 100755
--- a/scripts/extract-ikconfig
+++ b/scripts/extract-ikconfig
@@ -61,6 +61,7 @@ try_decompress '\3757zXZ\000' abcde unxz
try_decompress 'BZh' xy bunzip2
try_decompress '\135\0\0\0' xxx unlzma
try_decompress '\211\114\132' xy 'lzop -d'
+try_decompress '\002\041\114\030' xyy 'lz4 -d -l'
# Bail out:
echo "$me: Cannot find kernel config." >&2
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index c6d33bd15b04..8fa81e84e295 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -212,15 +212,22 @@ static int symbol_valid(struct sym_entry *s)
"_SDA_BASE_", /* ppc */
"_SDA2_BASE_", /* ppc */
NULL };
+
+ static char *special_suffixes[] = {
+ "_veneer", /* arm */
+ NULL };
+
int i;
- int offset = 1;
+ char *sym_name = (char *)s->sym + 1;
+
if (s->addr < kernel_start_addr)
return 0;
/* skip prefix char */
- if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char)
- offset++;
+ if (symbol_prefix_char && *sym_name == symbol_prefix_char)
+ sym_name++;
+
/* if --all-symbols is not specified, then symbols outside the text
* and inittext sections are discarded */
@@ -235,22 +242,26 @@ static int symbol_valid(struct sym_entry *s)
* rules.
*/
if ((s->addr == text_range_text->end &&
- strcmp((char *)s->sym + offset,
+ strcmp(sym_name,
text_range_text->end_sym)) ||
(s->addr == text_range_inittext->end &&
- strcmp((char *)s->sym + offset,
+ strcmp(sym_name,
text_range_inittext->end_sym)))
return 0;
}
/* Exclude symbols which vary between passes. */
- if (strstr((char *)s->sym + offset, "_compiled."))
- return 0;
-
for (i = 0; special_symbols[i]; i++)
- if( strcmp((char *)s->sym + offset, special_symbols[i]) == 0 )
+ if (strcmp(sym_name, special_symbols[i]) == 0)
return 0;
+ for (i = 0; special_suffixes[i]; i++) {
+ int l = strlen(sym_name) - strlen(special_suffixes[i]);
+
+ if (l >= 0 && strcmp(sym_name + l, special_suffixes[i]) == 0)
+ return 0;
+ }
+
return 1;
}
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 9645c0739386..d9b1fef0c67e 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -2,7 +2,7 @@
# Kernel configuration targets
# These targets are used from top-level makefile
-PHONY += oldconfig xconfig gconfig menuconfig config silentoldconfig update-po-config \
+PHONY += xconfig gconfig menuconfig config silentoldconfig update-po-config \
localmodconfig localyesconfig
ifdef KBUILD_KCONFIG
@@ -11,30 +11,31 @@ else
Kconfig := Kconfig
endif
+ifeq ($(quiet),silent_)
+silent := -s
+endif
+
# We need this, in case the user has it in its environment
unexport CONFIG_
xconfig: $(obj)/qconf
- $< $(Kconfig)
+ $< $(silent) $(Kconfig)
gconfig: $(obj)/gconf
- $< $(Kconfig)
+ $< $(silent) $(Kconfig)
menuconfig: $(obj)/mconf
- $< $(Kconfig)
+ $< $(silent) $(Kconfig)
config: $(obj)/conf
- $< --oldaskconfig $(Kconfig)
+ $< $(silent) --oldaskconfig $(Kconfig)
nconfig: $(obj)/nconf
- $< $(Kconfig)
-
-oldconfig: $(obj)/conf
- $< --$@ $(Kconfig)
+ $< $(silent) $(Kconfig)
silentoldconfig: $(obj)/conf
$(Q)mkdir -p include/config include/generated
- $< --$@ $(Kconfig)
+ $< $(silent) --$@ $(Kconfig)
localyesconfig localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
$(Q)mkdir -p include/config include/generated
@@ -43,18 +44,18 @@ localyesconfig localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
cmp -s .tmp.config .config || \
(mv -f .config .config.old.1; \
mv -f .tmp.config .config; \
- $(obj)/conf --silentoldconfig $(Kconfig); \
+ $(obj)/conf $(silent) --silentoldconfig $(Kconfig); \
mv -f .config.old.1 .config.old) \
else \
mv -f .tmp.config .config; \
- $(obj)/conf --silentoldconfig $(Kconfig); \
+ $(obj)/conf $(silent) --silentoldconfig $(Kconfig); \
fi
$(Q)rm -f .tmp.config
# Create new linux.pot file
# Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files
update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
- $(Q)echo " GEN config.pot"
+ $(Q)$(kecho) " GEN config.pot"
$(Q)xgettext --default-domain=linux \
--add-comments --keyword=_ --keyword=N_ \
--from-code=UTF-8 \
@@ -65,61 +66,58 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
$(Q)(for i in `ls $(srctree)/arch/*/Kconfig \
$(srctree)/arch/*/um/Kconfig`; \
do \
- echo " GEN $$i"; \
+ $(kecho) " GEN $$i"; \
$(obj)/kxgettext $$i \
>> $(obj)/config.pot; \
done )
- $(Q)echo " GEN linux.pot"
+ $(Q)$(kecho) " GEN linux.pot"
$(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
--output $(obj)/linux.pot
$(Q)rm -f $(obj)/config.pot
-PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
-
-allnoconfig allyesconfig allmodconfig alldefconfig randconfig: $(obj)/conf
- $< --$@ $(Kconfig)
+# These targets map 1:1 to the commandline options of 'conf'
+simple-targets := oldconfig allnoconfig allyesconfig allmodconfig \
+ alldefconfig randconfig listnewconfig olddefconfig
+PHONY += $(simple-targets)
-PHONY += listnewconfig olddefconfig oldnoconfig savedefconfig defconfig
+$(simple-targets): $(obj)/conf
+ $< $(silent) --$@ $(Kconfig)
-listnewconfig olddefconfig: $(obj)/conf
- $< --$@ $(Kconfig)
+PHONY += oldnoconfig savedefconfig defconfig
# oldnoconfig is an alias of olddefconfig, because people already are dependent
# on its behavior(sets new symbols to their default value but not 'n') with the
# counter-intuitive name.
-oldnoconfig: $(obj)/conf
- $< --olddefconfig $(Kconfig)
+oldnoconfig: olddefconfig
savedefconfig: $(obj)/conf
- $< --$@=defconfig $(Kconfig)
+ $< $(silent) --$@=defconfig $(Kconfig)
defconfig: $(obj)/conf
ifeq ($(KBUILD_DEFCONFIG),)
- $< --defconfig $(Kconfig)
+ $< $(silent) --defconfig $(Kconfig)
else
- @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
- $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
+ @$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
+ $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
endif
%_defconfig: $(obj)/conf
- $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
+ $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
-configfiles=$(wildcard $(srctree)/kernel/configs/$(1).config $(srctree)/arch/$(SRCARCH)/configs/$(1).config)
+configfiles=$(wildcard $(srctree)/kernel/configs/$@ $(srctree)/arch/$(SRCARCH)/configs/$@)
-define mergeconfig
-$(if $(wildcard $(objtree)/.config),, $(error You need an existing .config for this target))
-$(if $(call configfiles,$(1)),, $(error No configuration exists for this target on this architecture))
-$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m -O $(objtree) $(objtree)/.config $(call configfiles,$(1))
-$(Q)yes "" | $(MAKE) -f $(srctree)/Makefile oldconfig
-endef
+%.config: $(obj)/conf
+ $(if $(call configfiles),, $(error No configuration exists for this target on this architecture))
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m .config $(configfiles)
+ +$(Q)yes "" | $(MAKE) -f $(srctree)/Makefile oldconfig
PHONY += kvmconfig
-kvmconfig:
- $(call mergeconfig,kvm_guest)
+kvmconfig: kvm_guest.config
+ @:
PHONY += tinyconfig
-tinyconfig: allnoconfig
- $(call mergeconfig,tiny)
+tinyconfig:
+ $(Q)$(MAKE) -f $(srctree)/Makefile allnoconfig tiny.config
# Help text used by make help
help:
@@ -221,7 +219,7 @@ $(obj)/.tmp_qtcheck: $(src)/Makefile
# QT needs some extra effort...
$(obj)/.tmp_qtcheck:
- @set -e; echo " CHECK qt"; dir=""; pkg=""; \
+ @set -e; $(kecho) " CHECK qt"; dir=""; pkg=""; \
if ! pkg-config --exists QtCore 2> /dev/null; then \
echo "* Unable to find the QT4 tool qmake. Trying to use QT3"; \
pkg-config --exists qt 2> /dev/null && pkg=qt; \
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index fef75fc756f4..6c204318bc94 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -471,7 +471,7 @@ static struct option long_opts[] = {
static void conf_usage(const char *progname)
{
- printf("Usage: %s [option] <kconfig-file>\n", progname);
+ printf("Usage: %s [-s] [option] <kconfig-file>\n", progname);
printf("[option] is _one_ of the following:\n");
printf(" --listnewconfig List new options\n");
printf(" --oldaskconfig Start a new configuration using a line-oriented program\n");
@@ -501,7 +501,11 @@ int main(int ac, char **av)
tty_stdio = isatty(0) && isatty(1) && isatty(2);
- while ((opt = getopt_long(ac, av, "", long_opts, NULL)) != -1) {
+ while ((opt = getopt_long(ac, av, "s", long_opts, NULL)) != -1) {
+ if (opt == 's') {
+ conf_set_message_callback(NULL);
+ continue;
+ }
input_mode = (enum input_mode)opt;
switch (opt) {
case silentoldconfig:
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 28df18dd1147..c814f57672fc 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -16,6 +16,11 @@
#include "lkc.h"
+struct conf_printer {
+ void (*print_symbol)(FILE *, struct symbol *, const char *, void *);
+ void (*print_comment)(FILE *, const char *, void *);
+};
+
static void conf_warning(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index d6626521f9b9..fb0a2a286dca 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -11,6 +11,12 @@
#define DEBUG_EXPR 0
+static int expr_eq(struct expr *e1, struct expr *e2);
+static struct expr *expr_eliminate_yn(struct expr *e);
+static struct expr *expr_extract_eq_and(struct expr **ep1, struct expr **ep2);
+static struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2);
+static void expr_extract_eq(enum expr_type type, struct expr **ep, struct expr **ep1, struct expr **ep2);
+
struct expr *expr_alloc_symbol(struct symbol *sym)
{
struct expr *e = xcalloc(1, sizeof(*e));
@@ -186,7 +192,7 @@ void expr_eliminate_eq(struct expr **ep1, struct expr **ep2)
#undef e1
#undef e2
-int expr_eq(struct expr *e1, struct expr *e2)
+static int expr_eq(struct expr *e1, struct expr *e2)
{
int res, old_count;
@@ -228,7 +234,7 @@ int expr_eq(struct expr *e1, struct expr *e2)
return 0;
}
-struct expr *expr_eliminate_yn(struct expr *e)
+static struct expr *expr_eliminate_yn(struct expr *e)
{
struct expr *tmp;
@@ -823,7 +829,7 @@ bool expr_depends_symbol(struct expr *dep, struct symbol *sym)
return false;
}
-struct expr *expr_extract_eq_and(struct expr **ep1, struct expr **ep2)
+static struct expr *expr_extract_eq_and(struct expr **ep1, struct expr **ep2)
{
struct expr *tmp = NULL;
expr_extract_eq(E_AND, &tmp, ep1, ep2);
@@ -834,7 +840,7 @@ struct expr *expr_extract_eq_and(struct expr **ep1, struct expr **ep2)
return tmp;
}
-struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2)
+static struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2)
{
struct expr *tmp = NULL;
expr_extract_eq(E_OR, &tmp, ep1, ep2);
@@ -845,7 +851,7 @@ struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2)
return tmp;
}
-void expr_extract_eq(enum expr_type type, struct expr **ep, struct expr **ep1, struct expr **ep2)
+static void expr_extract_eq(enum expr_type type, struct expr **ep, struct expr **ep1, struct expr **ep2)
{
#define e1 (*ep1)
#define e2 (*ep2)
@@ -976,11 +982,8 @@ tristate expr_calc_value(struct expr *e)
}
}
-int expr_compare_type(enum expr_type t1, enum expr_type t2)
+static int expr_compare_type(enum expr_type t1, enum expr_type t2)
{
-#if 0
- return 1;
-#else
if (t1 == t2)
return 0;
switch (t1) {
@@ -1005,7 +1008,6 @@ int expr_compare_type(enum expr_type t1, enum expr_type t2)
}
printf("[%dgt%d?]", t1, t2);
return 0;
-#endif
}
static inline struct expr *
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 412ea8a2abb8..a2fc96a2bd2c 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -205,18 +205,13 @@ struct expr *expr_alloc_and(struct expr *e1, struct expr *e2);
struct expr *expr_alloc_or(struct expr *e1, struct expr *e2);
struct expr *expr_copy(const struct expr *org);
void expr_free(struct expr *e);
-int expr_eq(struct expr *e1, struct expr *e2);
void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
tristate expr_calc_value(struct expr *e);
-struct expr *expr_eliminate_yn(struct expr *e);
struct expr *expr_trans_bool(struct expr *e);
struct expr *expr_eliminate_dups(struct expr *e);
struct expr *expr_transform(struct expr *e);
int expr_contains_symbol(struct expr *dep, struct symbol *sym);
bool expr_depends_symbol(struct expr *dep, struct symbol *sym);
-struct expr *expr_extract_eq_and(struct expr **ep1, struct expr **ep2);
-struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2);
-void expr_extract_eq(enum expr_type type, struct expr **ep, struct expr **ep1, struct expr **ep2);
struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym);
struct expr *expr_simplify_unmet_dep(struct expr *e1, struct expr *e2);
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index d0a35b21f308..26d208b435a0 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -169,14 +169,6 @@ void init_main_window(const gchar * glade_file)
style = gtk_widget_get_style(main_wnd);
widget = glade_xml_get_widget(xml, "toolbar1");
-#if 0 /* Use stock Gtk icons instead */
- replace_button_icon(xml, main_wnd->window, style,
- "button1", (gchar **) xpm_back);
- replace_button_icon(xml, main_wnd->window, style,
- "button2", (gchar **) xpm_load);
- replace_button_icon(xml, main_wnd->window, style,
- "button3", (gchar **) xpm_save);
-#endif
replace_button_icon(xml, main_wnd->window, style,
"button4", (gchar **) xpm_single_view);
replace_button_icon(xml, main_wnd->window, style,
@@ -184,22 +176,6 @@ void init_main_window(const gchar * glade_file)
replace_button_icon(xml, main_wnd->window, style,
"button6", (gchar **) xpm_tree_view);
-#if 0
- switch (view_mode) {
- case SINGLE_VIEW:
- widget = glade_xml_get_widget(xml, "button4");
- g_signal_emit_by_name(widget, "clicked");
- break;
- case SPLIT_VIEW:
- widget = glade_xml_get_widget(xml, "button5");
- g_signal_emit_by_name(widget, "clicked");
- break;
- case FULL_VIEW:
- widget = glade_xml_get_widget(xml, "button6");
- g_signal_emit_by_name(widget, "clicked");
- break;
- }
-#endif
txtbuf = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w));
tag1 = gtk_text_buffer_create_tag(txtbuf, "mytag1",
"foreground", "red",
@@ -1498,9 +1474,12 @@ int main(int ac, char *av[])
case 'a':
//showAll = 1;
break;
+ case 's':
+ conf_set_message_callback(NULL);
+ break;
case 'h':
case '?':
- printf("%s <config>\n", av[0]);
+ printf("%s [-s] <config>\n", av[0]);
exit(0);
}
name = av[2];
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index d5daa7af8b49..91ca126ea080 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -21,9 +21,7 @@ static inline char *bind_textdomain_codeset(const char *dn, char *c) { return c;
extern "C" {
#endif
-#define P(name,type,arg) extern type name arg
#include "lkc_proto.h"
-#undef P
#define SRCTREE "srctree"
@@ -70,9 +68,6 @@ struct kconf_id {
enum symbol_type stype;
};
-extern int zconfdebug;
-
-int zconfparse(void);
void zconfdump(FILE *out);
void zconf_starthelp(void);
FILE *zconf_fopen(const char *name);
@@ -90,11 +85,6 @@ void sym_add_change_count(int count);
bool conf_set_all_new_symbols(enum conf_def_mode mode);
void set_all_choice_values(struct symbol *csym);
-struct conf_printer {
- void (*print_symbol)(FILE *, struct symbol *, const char *, void *);
- void (*print_comment)(FILE *, const char *, void *);
-};
-
/* confdata.c and expr.c */
static inline void xfwrite(const void *str, size_t len, size_t count, FILE *out)
{
@@ -113,7 +103,6 @@ void menu_add_entry(struct symbol *sym);
void menu_end_entry(void);
void menu_add_dep(struct expr *dep);
void menu_add_visibility(struct expr *dep);
-struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep);
struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
@@ -137,7 +126,6 @@ struct gstr {
int max_width;
};
struct gstr str_new(void);
-struct gstr str_assign(const char *s);
void str_free(struct gstr *gs);
void str_append(struct gstr *gs, const char *s);
void str_printf(struct gstr *gs, const char *fmt, ...);
@@ -148,8 +136,6 @@ extern struct expr *sym_env_list;
void sym_init(void);
void sym_clear_all_valid(void);
-void sym_set_all_changed(void);
-void sym_set_changed(struct symbol *sym);
struct symbol *sym_choice_default(struct symbol *sym);
const char *sym_get_string_default(struct symbol *sym);
struct symbol *sym_check_deps(struct symbol *sym);
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index ecdb9659b67d..d5398718ec2a 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -1,57 +1,52 @@
#include <stdarg.h>
/* confdata.c */
-P(conf_parse,void,(const char *name));
-P(conf_read,int,(const char *name));
-P(conf_read_simple,int,(const char *name, int));
-P(conf_write_defconfig,int,(const char *name));
-P(conf_write,int,(const char *name));
-P(conf_write_autoconf,int,(void));
-P(conf_get_changed,bool,(void));
-P(conf_set_changed_callback, void,(void (*fn)(void)));
-P(conf_set_message_callback, void,(void (*fn)(const char *fmt, va_list ap)));
+void conf_parse(const char *name);
+int conf_read(const char *name);
+int conf_read_simple(const char *name, int);
+int conf_write_defconfig(const char *name);
+int conf_write(const char *name);
+int conf_write_autoconf(void);
+bool conf_get_changed(void);
+void conf_set_changed_callback(void (*fn)(void));
+void conf_set_message_callback(void (*fn)(const char *fmt, va_list ap));
/* menu.c */
-P(rootmenu,struct menu,);
+extern struct menu rootmenu;
-P(menu_is_empty, bool, (struct menu *menu));
-P(menu_is_visible, bool, (struct menu *menu));
-P(menu_has_prompt, bool, (struct menu *menu));
-P(menu_get_prompt,const char *,(struct menu *menu));
-P(menu_get_root_menu,struct menu *,(struct menu *menu));
-P(menu_get_parent_menu,struct menu *,(struct menu *menu));
-P(menu_has_help,bool,(struct menu *menu));
-P(menu_get_help,const char *,(struct menu *menu));
-P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct list_head
- *head));
-P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct list_head
- *head));
-P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help));
+bool menu_is_empty(struct menu *menu);
+bool menu_is_visible(struct menu *menu);
+bool menu_has_prompt(struct menu *menu);
+const char * menu_get_prompt(struct menu *menu);
+struct menu * menu_get_root_menu(struct menu *menu);
+struct menu * menu_get_parent_menu(struct menu *menu);
+bool menu_has_help(struct menu *menu);
+const char * menu_get_help(struct menu *menu);
+struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head);
+void menu_get_ext_help(struct menu *menu, struct gstr *help);
/* symbol.c */
-P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]);
+extern struct symbol * symbol_hash[SYMBOL_HASHSIZE];
-P(sym_lookup,struct symbol *,(const char *name, int flags));
-P(sym_find,struct symbol *,(const char *name));
-P(sym_expand_string_value,const char *,(const char *in));
-P(sym_escape_string_value, const char *,(const char *in));
-P(sym_re_search,struct symbol **,(const char *pattern));
-P(sym_type_name,const char *,(enum symbol_type type));
-P(sym_calc_value,void,(struct symbol *sym));
-P(sym_get_type,enum symbol_type,(struct symbol *sym));
-P(sym_tristate_within_range,bool,(struct symbol *sym,tristate tri));
-P(sym_set_tristate_value,bool,(struct symbol *sym,tristate tri));
-P(sym_toggle_tristate_value,tristate,(struct symbol *sym));
-P(sym_string_valid,bool,(struct symbol *sym, const char *newval));
-P(sym_string_within_range,bool,(struct symbol *sym, const char *str));
-P(sym_set_string_value,bool,(struct symbol *sym, const char *newval));
-P(sym_is_changable,bool,(struct symbol *sym));
-P(sym_get_choice_prop,struct property *,(struct symbol *sym));
-P(sym_get_default_prop,struct property *,(struct symbol *sym));
-P(sym_get_string_value,const char *,(struct symbol *sym));
+struct symbol * sym_lookup(const char *name, int flags);
+struct symbol * sym_find(const char *name);
+const char * sym_expand_string_value(const char *in);
+const char * sym_escape_string_value(const char *in);
+struct symbol ** sym_re_search(const char *pattern);
+const char * sym_type_name(enum symbol_type type);
+void sym_calc_value(struct symbol *sym);
+enum symbol_type sym_get_type(struct symbol *sym);
+bool sym_tristate_within_range(struct symbol *sym,tristate tri);
+bool sym_set_tristate_value(struct symbol *sym,tristate tri);
+tristate sym_toggle_tristate_value(struct symbol *sym);
+bool sym_string_valid(struct symbol *sym, const char *newval);
+bool sym_string_within_range(struct symbol *sym, const char *str);
+bool sym_set_string_value(struct symbol *sym, const char *newval);
+bool sym_is_changable(struct symbol *sym);
+struct property * sym_get_choice_prop(struct symbol *sym);
+const char * sym_get_string_value(struct symbol *sym);
-P(prop_get_type_name,const char *,(enum prop_type type));
+const char * prop_get_type_name(enum prop_type type);
/* expr.c */
-P(expr_compare_type,int,(enum expr_type t1, enum expr_type t2));
-P(expr_print,void,(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken));
+void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken);
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 4dd37552abc2..315ce2c7cb9d 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -279,6 +279,7 @@ static int child_count;
static int single_menu_mode;
static int show_all_options;
static int save_and_exit;
+static int silent;
static void conf(struct menu *menu, struct menu *active_menu);
static void conf_choice(struct menu *menu);
@@ -777,10 +778,12 @@ static void conf_message_callback(const char *fmt, va_list ap)
char buf[PATH_MAX+1];
vsnprintf(buf, sizeof(buf), fmt, ap);
- if (save_and_exit)
- printf("%s", buf);
- else
+ if (save_and_exit) {
+ if (!silent)
+ printf("%s", buf);
+ } else {
show_textbox(NULL, buf, 6, 60);
+ }
}
static void show_help(struct menu *menu)
@@ -977,16 +980,18 @@ static int handle_exit(void)
}
/* fall through */
case -1:
- printf(_("\n\n"
- "*** End of the configuration.\n"
- "*** Execute 'make' to start the build or try 'make help'."
- "\n\n"));
+ if (!silent)
+ printf(_("\n\n"
+ "*** End of the configuration.\n"
+ "*** Execute 'make' to start the build or try 'make help'."
+ "\n\n"));
res = 0;
break;
default:
- fprintf(stderr, _("\n\n"
- "Your configuration changes were NOT saved."
- "\n\n"));
+ if (!silent)
+ fprintf(stderr, _("\n\n"
+ "Your configuration changes were NOT saved."
+ "\n\n"));
if (res != KEY_ESC)
res = 0;
}
@@ -1010,6 +1015,12 @@ int main(int ac, char **av)
signal(SIGINT, sig_handler);
+ if (ac > 1 && strcmp(av[1], "-s") == 0) {
+ silent = 1;
+ /* Silence conf_read() until the real callback is set up */
+ conf_set_message_callback(NULL);
+ av++;
+ }
conf_parse(av[1]);
conf_read(NULL);
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 72c9dba84c5d..b05cc3d4a9be 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -125,7 +125,7 @@ void menu_set_type(int type)
sym_type_name(sym->type), sym_type_name(type));
}
-struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep)
+static struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep)
{
struct property *prop = prop_alloc(type, current_entry->sym);
@@ -615,7 +615,7 @@ static struct property *get_symbol_prop(struct symbol *sym)
/*
* head is optional and may be NULL
*/
-void get_symbol_str(struct gstr *r, struct symbol *sym,
+static void get_symbol_str(struct gstr *r, struct symbol *sym,
struct list_head *head)
{
bool hit;
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index 2ab91b9b100d..ec8e20350a64 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -35,7 +35,7 @@ usage() {
echo " -O dir to put generated output files"
}
-MAKE=true
+RUNMAKE=true
ALLTARGET=alldefconfig
WARNREDUN=false
OUTPUT=.
@@ -48,7 +48,7 @@ while true; do
continue
;;
"-m")
- MAKE=false
+ RUNMAKE=false
shift
continue
;;
@@ -85,6 +85,11 @@ fi
INITFILE=$1
shift;
+if [ ! -r "$INITFILE" ]; then
+ echo "The base file '$INITFILE' does not exist. Exit." >&2
+ exit 1
+fi
+
MERGE_LIST=$*
SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p"
TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
@@ -92,31 +97,29 @@ TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
echo "Using $INITFILE as base"
cat $INITFILE > $TMP_FILE
-# Merge files, printing warnings on overrided values
+# Merge files, printing warnings on overridden values
for MERGE_FILE in $MERGE_LIST ; do
echo "Merging $MERGE_FILE"
CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE)
for CFG in $CFG_LIST ; do
- grep -q -w $CFG $TMP_FILE
- if [ $? -eq 0 ] ; then
- PREV_VAL=$(grep -w $CFG $TMP_FILE)
- NEW_VAL=$(grep -w $CFG $MERGE_FILE)
- if [ "x$PREV_VAL" != "x$NEW_VAL" ] ; then
+ grep -q -w $CFG $TMP_FILE || continue
+ PREV_VAL=$(grep -w $CFG $TMP_FILE)
+ NEW_VAL=$(grep -w $CFG $MERGE_FILE)
+ if [ "x$PREV_VAL" != "x$NEW_VAL" ] ; then
echo Value of $CFG is redefined by fragment $MERGE_FILE:
echo Previous value: $PREV_VAL
echo New value: $NEW_VAL
echo
- elif [ "$WARNREDUN" = "true" ]; then
+ elif [ "$WARNREDUN" = "true" ]; then
echo Value of $CFG is redundant by fragment $MERGE_FILE:
- fi
- sed -i "/$CFG[ =]/d" $TMP_FILE
fi
+ sed -i "/$CFG[ =]/d" $TMP_FILE
done
cat $MERGE_FILE >> $TMP_FILE
done
-if [ "$MAKE" = "false" ]; then
+if [ "$RUNMAKE" = "false" ]; then
cp $TMP_FILE $OUTPUT/.config
echo "#"
echo "# merged configuration written to $OUTPUT/.config (needs make)"
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 984489ef2b46..d42d534a66cd 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -1482,6 +1482,11 @@ int main(int ac, char **av)
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE);
+ if (ac > 1 && strcmp(av[1], "-s") == 0) {
+ /* Silence conf_read() until the real callback is set up */
+ conf_set_message_callback(NULL);
+ av++;
+ }
conf_parse(av[1]);
conf_read(NULL);
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 9d3b04b0769c..c3bb7fe8dfa6 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -1746,7 +1746,7 @@ static const char *progname;
static void usage(void)
{
- printf(_("%s <config>\n"), progname);
+ printf(_("%s [-s] <config>\n"), progname);
exit(0);
}
@@ -1762,6 +1762,9 @@ int main(int ac, char** av)
configApp = new QApplication(ac, av);
if (ac > 1 && av[1][0] == '-') {
switch (av[1][1]) {
+ case 's':
+ conf_set_message_callback(NULL);
+ break;
case 'h':
case '?':
usage();
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 7caabdb51c64..6731377f9bb2 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -112,7 +112,7 @@ struct property *sym_get_env_prop(struct symbol *sym)
return NULL;
}
-struct property *sym_get_default_prop(struct symbol *sym)
+static struct property *sym_get_default_prop(struct symbol *sym)
{
struct property *prop;
@@ -186,6 +186,26 @@ static void sym_validate_range(struct symbol *sym)
sym->curr.val = strdup(str);
}
+static void sym_set_changed(struct symbol *sym)
+{
+ struct property *prop;
+
+ sym->flags |= SYMBOL_CHANGED;
+ for (prop = sym->prop; prop; prop = prop->next) {
+ if (prop->menu)
+ prop->menu->flags |= MENU_CHANGED;
+ }
+}
+
+static void sym_set_all_changed(void)
+{
+ struct symbol *sym;
+ int i;
+
+ for_all_symbols(i, sym)
+ sym_set_changed(sym);
+}
+
static void sym_calc_visibility(struct symbol *sym)
{
struct property *prop;
@@ -451,26 +471,6 @@ void sym_clear_all_valid(void)
sym_calc_value(modules_sym);
}
-void sym_set_changed(struct symbol *sym)
-{
- struct property *prop;
-
- sym->flags |= SYMBOL_CHANGED;
- for (prop = sym->prop; prop; prop = prop->next) {
- if (prop->menu)
- prop->menu->flags |= MENU_CHANGED;
- }
-}
-
-void sym_set_all_changed(void)
-{
- struct symbol *sym;
- int i;
-
- for_all_symbols(i, sym)
- sym_set_changed(sym);
-}
-
bool sym_tristate_within_range(struct symbol *sym, tristate val)
{
int type = sym_get_type(sym);
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index 94f9c83e324f..0e76042473cc 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -88,16 +88,6 @@ struct gstr str_new(void)
return gs;
}
-/* Allocate and assign growable string */
-struct gstr str_assign(const char *s)
-{
- struct gstr gs;
- gs.s = strdup(s);
- gs.len = strlen(s) + 1;
- gs.max_width = 0;
- return gs;
-}
-
/* Free storage for growable string */
void str_free(struct gstr *gs)
{
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index f282516acc7b..fce36d0f6898 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -168,6 +168,9 @@ int main(void)
DEVID_FIELD(amba_id, id);
DEVID_FIELD(amba_id, mask);
+ DEVID(mips_cdmm_device_id);
+ DEVID_FIELD(mips_cdmm_device_id, type);
+
DEVID(x86_cpu_id);
DEVID_FIELD(x86_cpu_id, feature);
DEVID_FIELD(x86_cpu_id, family);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index e614ef689eee..78691d51a479 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1109,6 +1109,22 @@ static int do_amba_entry(const char *filename,
}
ADD_TO_DEVTABLE("amba", amba_id, do_amba_entry);
+/*
+ * looks like: "mipscdmm:tN"
+ *
+ * N is exactly 2 digits, where each is an upper-case hex digit, or
+ * a ? or [] pattern matching exactly one digit.
+ */
+static int do_mips_cdmm_entry(const char *filename,
+ void *symval, char *alias)
+{
+ DEF_FIELD(symval, mips_cdmm_device_id, type);
+
+ sprintf(alias, "mipscdmm:t%02X*", type);
+ return 1;
+}
+ADD_TO_DEVTABLE("mipscdmm", mips_cdmm_device_id, do_mips_cdmm_entry);
+
/* LOOKS like cpu:type:x86,venVVVVfamFFFFmodMMMM:feature:*,FEAT,*
* All fields are numbers. It would be nicer to use strings for vendor
* and feature, but getting those out of the build system here is too
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index d439856f8176..91ee1b2e0f9a 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -776,6 +776,7 @@ static const char *sech_name(struct elf_info *elf, Elf_Shdr *sechdr)
* "foo" will match an exact string equal to "foo"
* "*foo" will match a string that ends with "foo"
* "foo*" will match a string that begins with "foo"
+ * "*foo*" will match a string that contains "foo"
*/
static int match(const char *sym, const char * const pat[])
{
@@ -784,8 +785,17 @@ static int match(const char *sym, const char * const pat[])
p = *pat++;
const char *endp = p + strlen(p) - 1;
+ /* "*foo*" */
+ if (*p == '*' && *endp == '*') {
+ char *here, *bare = strndup(p + 1, strlen(p) - 2);
+
+ here = strstr(sym, bare);
+ free(bare);
+ if (here != NULL)
+ return 1;
+ }
/* "*foo" */
- if (*p == '*') {
+ else if (*p == '*') {
if (strrcmp(sym, p + 1) == 0)
return 1;
}
@@ -873,7 +883,10 @@ static void check_section(const char *modname, struct elf_info *elf,
#define ALL_EXIT_SECTIONS EXIT_SECTIONS, ALL_XXXEXIT_SECTIONS
#define DATA_SECTIONS ".data", ".data.rel"
-#define TEXT_SECTIONS ".text", ".text.unlikely"
+#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
+ ".kprobes.text"
+#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
+ ".fixup", ".entry.text", ".exception.text", ".text.*"
#define INIT_SECTIONS ".init.*"
#define MEM_INIT_SECTIONS ".meminit.*"
@@ -881,6 +894,9 @@ static void check_section(const char *modname, struct elf_info *elf,
#define EXIT_SECTIONS ".exit.*"
#define MEM_EXIT_SECTIONS ".memexit.*"
+#define ALL_TEXT_SECTIONS ALL_INIT_TEXT_SECTIONS, ALL_EXIT_TEXT_SECTIONS, \
+ TEXT_SECTIONS, OTHER_TEXT_SECTIONS
+
/* init data sections */
static const char *const init_data_sections[] =
{ ALL_INIT_DATA_SECTIONS, NULL };
@@ -892,6 +908,9 @@ static const char *const init_sections[] = { ALL_INIT_SECTIONS, NULL };
static const char *const init_exit_sections[] =
{ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS, NULL };
+/* all text sections */
+static const char *const text_sections[] = { ALL_TEXT_SECTIONS, NULL };
+
/* data section */
static const char *const data_sections[] = { DATA_SECTIONS, NULL };
@@ -910,6 +929,7 @@ static const char *const data_sections[] = { DATA_SECTIONS, NULL };
static const char *const head_sections[] = { ".head.text*", NULL };
static const char *const linker_symbols[] =
{ "__init_begin", "_sinittext", "_einittext", NULL };
+static const char *const optim_symbols[] = { "*.constprop.*", NULL };
enum mismatch {
TEXT_TO_ANY_INIT,
@@ -921,34 +941,65 @@ enum mismatch {
ANY_INIT_TO_ANY_EXIT,
ANY_EXIT_TO_ANY_INIT,
EXPORT_TO_INIT_EXIT,
+ EXTABLE_TO_NON_TEXT,
};
+/**
+ * Describe how to match sections on different criterias:
+ *
+ * @fromsec: Array of sections to be matched.
+ *
+ * @bad_tosec: Relocations applied to a section in @fromsec to a section in
+ * this array is forbidden (black-list). Can be empty.
+ *
+ * @good_tosec: Relocations applied to a section in @fromsec must be
+ * targetting sections in this array (white-list). Can be empty.
+ *
+ * @mismatch: Type of mismatch.
+ *
+ * @symbol_white_list: Do not match a relocation to a symbol in this list
+ * even if it is targetting a section in @bad_to_sec.
+ *
+ * @handler: Specific handler to call when a match is found. If NULL,
+ * default_mismatch_handler() will be called.
+ *
+ */
struct sectioncheck {
const char *fromsec[20];
- const char *tosec[20];
+ const char *bad_tosec[20];
+ const char *good_tosec[20];
enum mismatch mismatch;
const char *symbol_white_list[20];
+ void (*handler)(const char *modname, struct elf_info *elf,
+ const struct sectioncheck* const mismatch,
+ Elf_Rela *r, Elf_Sym *sym, const char *fromsec);
+
};
+static void extable_mismatch_handler(const char *modname, struct elf_info *elf,
+ const struct sectioncheck* const mismatch,
+ Elf_Rela *r, Elf_Sym *sym,
+ const char *fromsec);
+
static const struct sectioncheck sectioncheck[] = {
/* Do not reference init/exit code/data from
* normal code and data
*/
{
.fromsec = { TEXT_SECTIONS, NULL },
- .tosec = { ALL_INIT_SECTIONS, NULL },
+ .bad_tosec = { ALL_INIT_SECTIONS, NULL },
.mismatch = TEXT_TO_ANY_INIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { DATA_SECTIONS, NULL },
- .tosec = { ALL_XXXINIT_SECTIONS, NULL },
+ .bad_tosec = { ALL_XXXINIT_SECTIONS, NULL },
.mismatch = DATA_TO_ANY_INIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { DATA_SECTIONS, NULL },
- .tosec = { INIT_SECTIONS, NULL },
+ .bad_tosec = { INIT_SECTIONS, NULL },
.mismatch = DATA_TO_ANY_INIT,
.symbol_white_list = {
"*_template", "*_timer", "*_sht", "*_ops",
@@ -957,56 +1008,66 @@ static const struct sectioncheck sectioncheck[] = {
},
{
.fromsec = { TEXT_SECTIONS, NULL },
- .tosec = { ALL_EXIT_SECTIONS, NULL },
+ .bad_tosec = { ALL_EXIT_SECTIONS, NULL },
.mismatch = TEXT_TO_ANY_EXIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { DATA_SECTIONS, NULL },
- .tosec = { ALL_EXIT_SECTIONS, NULL },
+ .bad_tosec = { ALL_EXIT_SECTIONS, NULL },
.mismatch = DATA_TO_ANY_EXIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference init code/data from meminit code/data */
{
.fromsec = { ALL_XXXINIT_SECTIONS, NULL },
- .tosec = { INIT_SECTIONS, NULL },
+ .bad_tosec = { INIT_SECTIONS, NULL },
.mismatch = XXXINIT_TO_SOME_INIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference exit code/data from memexit code/data */
{
.fromsec = { ALL_XXXEXIT_SECTIONS, NULL },
- .tosec = { EXIT_SECTIONS, NULL },
+ .bad_tosec = { EXIT_SECTIONS, NULL },
.mismatch = XXXEXIT_TO_SOME_EXIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not use exit code/data from init code */
{
.fromsec = { ALL_INIT_SECTIONS, NULL },
- .tosec = { ALL_EXIT_SECTIONS, NULL },
+ .bad_tosec = { ALL_EXIT_SECTIONS, NULL },
.mismatch = ANY_INIT_TO_ANY_EXIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not use init code/data from exit code */
{
.fromsec = { ALL_EXIT_SECTIONS, NULL },
- .tosec = { ALL_INIT_SECTIONS, NULL },
+ .bad_tosec = { ALL_INIT_SECTIONS, NULL },
.mismatch = ANY_EXIT_TO_ANY_INIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { ALL_PCI_INIT_SECTIONS, NULL },
- .tosec = { INIT_SECTIONS, NULL },
+ .bad_tosec = { INIT_SECTIONS, NULL },
.mismatch = ANY_INIT_TO_ANY_EXIT,
.symbol_white_list = { NULL },
},
/* Do not export init/exit functions or data */
{
.fromsec = { "__ksymtab*", NULL },
- .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
+ .bad_tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
.mismatch = EXPORT_TO_INIT_EXIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
+},
+{
+ .fromsec = { "__ex_table", NULL },
+ /* If you're adding any new black-listed sections in here, consider
+ * adding a special 'printer' for them in scripts/check_extable.
+ */
+ .bad_tosec = { ".altinstr_replacement", NULL },
+ .good_tosec = {ALL_TEXT_SECTIONS , NULL},
+ .mismatch = EXTABLE_TO_NON_TEXT,
+ .handler = extable_mismatch_handler,
}
};
@@ -1017,10 +1078,22 @@ static const struct sectioncheck *section_mismatch(
int elems = sizeof(sectioncheck) / sizeof(struct sectioncheck);
const struct sectioncheck *check = &sectioncheck[0];
+ /*
+ * The target section could be the SHT_NUL section when we're
+ * handling relocations to un-resolved symbols, trying to match it
+ * doesn't make much sense and causes build failures on parisc and
+ * mn10300 architectures.
+ */
+ if (*tosec == '\0')
+ return NULL;
+
for (i = 0; i < elems; i++) {
- if (match(fromsec, check->fromsec) &&
- match(tosec, check->tosec))
- return check;
+ if (match(fromsec, check->fromsec)) {
+ if (check->bad_tosec[0] && match(tosec, check->bad_tosec))
+ return check;
+ if (check->good_tosec[0] && !match(tosec, check->good_tosec))
+ return check;
+ }
check++;
}
return NULL;
@@ -1067,6 +1140,17 @@ static const struct sectioncheck *section_mismatch(
* This pattern is identified by
* refsymname = __init_begin, _sinittext, _einittext
*
+ * Pattern 5:
+ * GCC may optimize static inlines when fed constant arg(s) resulting
+ * in functions like cpumask_empty() -- generating an associated symbol
+ * cpumask_empty.constprop.3 that appears in the audit. If the const that
+ * is passed in comes from __init, like say nmi_ipi_mask, we get a
+ * meaningless section warning. May need to add isra symbols too...
+ * This pattern is identified by
+ * tosec = init section
+ * fromsec = text section
+ * refsymname = *.constprop.*
+ *
**/
static int secref_whitelist(const struct sectioncheck *mismatch,
const char *fromsec, const char *fromsym,
@@ -1099,6 +1183,12 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
if (match(tosym, linker_symbols))
return 0;
+ /* Check for pattern 5 */
+ if (match(fromsec, text_sections) &&
+ match(tosec, init_sections) &&
+ match(fromsym, optim_symbols))
+ return 0;
+
return 1;
}
@@ -1261,6 +1351,15 @@ static void print_section_list(const char * const list[20])
fprintf(stderr, "\n");
}
+static inline void get_pretty_name(int is_func, const char** name, const char** name_p)
+{
+ switch (is_func) {
+ case 0: *name = "variable"; *name_p = ""; break;
+ case 1: *name = "function"; *name_p = "()"; break;
+ default: *name = "(unknown reference)"; *name_p = ""; break;
+ }
+}
+
/*
* Print a warning about a section mismatch.
* Try to find symbols near it so user can find it.
@@ -1280,21 +1379,13 @@ static void report_sec_mismatch(const char *modname,
char *prl_from;
char *prl_to;
- switch (from_is_func) {
- case 0: from = "variable"; from_p = ""; break;
- case 1: from = "function"; from_p = "()"; break;
- default: from = "(unknown reference)"; from_p = ""; break;
- }
- switch (to_is_func) {
- case 0: to = "variable"; to_p = ""; break;
- case 1: to = "function"; to_p = "()"; break;
- default: to = "(unknown reference)"; to_p = ""; break;
- }
-
sec_mismatch_count++;
if (!sec_mismatch_verbose)
return;
+ get_pretty_name(from_is_func, &from, &from_p);
+ get_pretty_name(to_is_func, &to, &to_p);
+
warn("%s(%s+0x%llx): Section mismatch in reference from the %s %s%s "
"to the %s %s:%s%s\n",
modname, fromsec, fromaddr, from, fromsym, from_p, to, tosec,
@@ -1408,41 +1499,179 @@ static void report_sec_mismatch(const char *modname,
tosym, prl_to, prl_to, tosym);
free(prl_to);
break;
+ case EXTABLE_TO_NON_TEXT:
+ fatal("There's a special handler for this mismatch type, "
+ "we should never get here.");
+ break;
}
fprintf(stderr, "\n");
}
-static void check_section_mismatch(const char *modname, struct elf_info *elf,
- Elf_Rela *r, Elf_Sym *sym, const char *fromsec)
+static void default_mismatch_handler(const char *modname, struct elf_info *elf,
+ const struct sectioncheck* const mismatch,
+ Elf_Rela *r, Elf_Sym *sym, const char *fromsec)
{
const char *tosec;
- const struct sectioncheck *mismatch;
+ Elf_Sym *to;
+ Elf_Sym *from;
+ const char *tosym;
+ const char *fromsym;
+
+ from = find_elf_symbol2(elf, r->r_offset, fromsec);
+ fromsym = sym_name(elf, from);
+
+ if (!strncmp(fromsym, "reference___initcall",
+ sizeof("reference___initcall")-1))
+ return;
tosec = sec_name(elf, get_secindex(elf, sym));
- mismatch = section_mismatch(fromsec, tosec);
+ to = find_elf_symbol(elf, r->r_addend, sym);
+ tosym = sym_name(elf, to);
+
+ /* check whitelist - we may ignore it */
+ if (secref_whitelist(mismatch,
+ fromsec, fromsym, tosec, tosym)) {
+ report_sec_mismatch(modname, mismatch,
+ fromsec, r->r_offset, fromsym,
+ is_function(from), tosec, tosym,
+ is_function(to));
+ }
+}
+
+static int is_executable_section(struct elf_info* elf, unsigned int section_index)
+{
+ if (section_index > elf->num_sections)
+ fatal("section_index is outside elf->num_sections!\n");
+
+ return ((elf->sechdrs[section_index].sh_flags & SHF_EXECINSTR) == SHF_EXECINSTR);
+}
+
+/*
+ * We rely on a gross hack in section_rel[a]() calling find_extable_entry_size()
+ * to know the sizeof(struct exception_table_entry) for the target architecture.
+ */
+static unsigned int extable_entry_size = 0;
+static void find_extable_entry_size(const char* const sec, const Elf_Rela* r)
+{
+ /*
+ * If we're currently checking the second relocation within __ex_table,
+ * that relocation offset tells us the offsetof(struct
+ * exception_table_entry, fixup) which is equal to sizeof(struct
+ * exception_table_entry) divided by two. We use that to our advantage
+ * since there's no portable way to get that size as every architecture
+ * seems to go with different sized types. Not pretty but better than
+ * hard-coding the size for every architecture..
+ */
+ if (!extable_entry_size)
+ extable_entry_size = r->r_offset * 2;
+}
+
+static inline bool is_extable_fault_address(Elf_Rela *r)
+{
+ /*
+ * extable_entry_size is only discovered after we've handled the
+ * _second_ relocation in __ex_table, so only abort when we're not
+ * handling the first reloc and extable_entry_size is zero.
+ */
+ if (r->r_offset && extable_entry_size == 0)
+ fatal("extable_entry size hasn't been discovered!\n");
+
+ return ((r->r_offset == 0) ||
+ (r->r_offset % extable_entry_size == 0));
+}
+
+#define is_second_extable_reloc(Start, Cur, Sec) \
+ (((Cur) == (Start) + 1) && (strcmp("__ex_table", (Sec)) == 0))
+
+static void report_extable_warnings(const char* modname, struct elf_info* elf,
+ const struct sectioncheck* const mismatch,
+ Elf_Rela* r, Elf_Sym* sym,
+ const char* fromsec, const char* tosec)
+{
+ Elf_Sym* fromsym = find_elf_symbol2(elf, r->r_offset, fromsec);
+ const char* fromsym_name = sym_name(elf, fromsym);
+ Elf_Sym* tosym = find_elf_symbol(elf, r->r_addend, sym);
+ const char* tosym_name = sym_name(elf, tosym);
+ const char* from_pretty_name;
+ const char* from_pretty_name_p;
+ const char* to_pretty_name;
+ const char* to_pretty_name_p;
+
+ get_pretty_name(is_function(fromsym),
+ &from_pretty_name, &from_pretty_name_p);
+ get_pretty_name(is_function(tosym),
+ &to_pretty_name, &to_pretty_name_p);
+
+ warn("%s(%s+0x%lx): Section mismatch in reference"
+ " from the %s %s%s to the %s %s:%s%s\n",
+ modname, fromsec, (long)r->r_offset, from_pretty_name,
+ fromsym_name, from_pretty_name_p,
+ to_pretty_name, tosec, tosym_name, to_pretty_name_p);
+
+ if (!match(tosec, mismatch->bad_tosec) &&
+ is_executable_section(elf, get_secindex(elf, sym)))
+ fprintf(stderr,
+ "The relocation at %s+0x%lx references\n"
+ "section \"%s\" which is not in the list of\n"
+ "authorized sections. If you're adding a new section\n"
+ "and/or if this reference is valid, add \"%s\" to the\n"
+ "list of authorized sections to jump to on fault.\n"
+ "This can be achieved by adding \"%s\" to \n"
+ "OTHER_TEXT_SECTIONS in scripts/mod/modpost.c.\n",
+ fromsec, (long)r->r_offset, tosec, tosec, tosec);
+}
+
+static void extable_mismatch_handler(const char* modname, struct elf_info *elf,
+ const struct sectioncheck* const mismatch,
+ Elf_Rela* r, Elf_Sym* sym,
+ const char *fromsec)
+{
+ const char* tosec = sec_name(elf, get_secindex(elf, sym));
+
+ sec_mismatch_count++;
+
+ if (sec_mismatch_verbose)
+ report_extable_warnings(modname, elf, mismatch, r, sym,
+ fromsec, tosec);
+
+ if (match(tosec, mismatch->bad_tosec))
+ fatal("The relocation at %s+0x%lx references\n"
+ "section \"%s\" which is black-listed.\n"
+ "Something is seriously wrong and should be fixed.\n"
+ "You might get more information about where this is\n"
+ "coming from by using scripts/check_extable.sh %s\n",
+ fromsec, (long)r->r_offset, tosec, modname);
+ else if (!is_executable_section(elf, get_secindex(elf, sym))) {
+ if (is_extable_fault_address(r))
+ fatal("The relocation at %s+0x%lx references\n"
+ "section \"%s\" which is not executable, IOW\n"
+ "it is not possible for the kernel to fault\n"
+ "at that address. Something is seriously wrong\n"
+ "and should be fixed.\n",
+ fromsec, (long)r->r_offset, tosec);
+ else
+ fatal("The relocation at %s+0x%lx references\n"
+ "section \"%s\" which is not executable, IOW\n"
+ "the kernel will fault if it ever tries to\n"
+ "jump to it. Something is seriously wrong\n"
+ "and should be fixed.\n",
+ fromsec, (long)r->r_offset, tosec);
+ }
+}
+
+static void check_section_mismatch(const char *modname, struct elf_info *elf,
+ Elf_Rela *r, Elf_Sym *sym, const char *fromsec)
+{
+ const char *tosec = sec_name(elf, get_secindex(elf, sym));;
+ const struct sectioncheck *mismatch = section_mismatch(fromsec, tosec);
+
if (mismatch) {
- Elf_Sym *to;
- Elf_Sym *from;
- const char *tosym;
- const char *fromsym;
-
- from = find_elf_symbol2(elf, r->r_offset, fromsec);
- fromsym = sym_name(elf, from);
- to = find_elf_symbol(elf, r->r_addend, sym);
- tosym = sym_name(elf, to);
-
- if (!strncmp(fromsym, "reference___initcall",
- sizeof("reference___initcall")-1))
- return;
-
- /* check whitelist - we may ignore it */
- if (secref_whitelist(mismatch,
- fromsec, fromsym, tosec, tosym)) {
- report_sec_mismatch(modname, mismatch,
- fromsec, r->r_offset, fromsym,
- is_function(from), tosec, tosym,
- is_function(to));
- }
+ if (mismatch->handler)
+ mismatch->handler(modname, elf, mismatch,
+ r, sym, fromsec);
+ else
+ default_mismatch_handler(modname, elf, mismatch,
+ r, sym, fromsec);
}
}
@@ -1582,6 +1811,8 @@ static void section_rela(const char *modname, struct elf_info *elf,
/* Skip special sections */
if (is_shndx_special(sym->st_shndx))
continue;
+ if (is_second_extable_reloc(start, rela, fromsec))
+ find_extable_entry_size(fromsec, &r);
check_section_mismatch(modname, elf, &r, sym, fromsec);
}
}
@@ -1640,6 +1871,8 @@ static void section_rel(const char *modname, struct elf_info *elf,
/* Skip special sections */
if (is_shndx_special(sym->st_shndx))
continue;
+ if (is_second_extable_reloc(start, rel, fromsec))
+ find_extable_entry_size(fromsec, &r);
check_section_mismatch(modname, elf, &r, sym, fromsec);
}
}
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index fc7fd52b5e03..bb8e4d0a1911 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -825,6 +825,7 @@ retreived||retrieved
retreive||retrieve
retrive||retrieve
retuned||returned
+reudce||reduce
reuest||request
reuqest||request
reutnred||returned
diff --git a/scripts/xen-hypercalls.sh b/scripts/xen-hypercalls.sh
new file mode 100644
index 000000000000..676d9226814f
--- /dev/null
+++ b/scripts/xen-hypercalls.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+out="$1"
+shift
+in="$@"
+
+for i in $in; do
+ eval $CPP $LINUXINCLUDE -dD -imacros "$i" -x c /dev/null
+done | \
+awk '$1 == "#define" && $2 ~ /__HYPERVISOR_[a-z][a-z_0-9]*/ { v[$3] = $2 }
+ END { print "/* auto-generated by scripts/xen-hypercall.sh */"
+ for (i in v) if (!(v[i] in v))
+ print "HYPERCALL("substr(v[i], 14)")"}' | sort -u >$out
diff --git a/security/Kconfig b/security/Kconfig
index beb86b500adf..bf4ec46474b6 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -21,6 +21,7 @@ config SECURITY_DMESG_RESTRICT
config SECURITY
bool "Enable different security models"
depends on SYSFS
+ depends on MULTIUSER
help
This allows you to choose different security modules to be
configured into your kernel.
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 7db9954f1af2..ad4fa49ad1db 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -365,7 +365,7 @@ void __aa_fs_profile_rmdir(struct aa_profile *profile)
if (!profile->dents[i])
continue;
- r = profile->dents[i]->d_inode->i_private;
+ r = d_inode(profile->dents[i])->i_private;
securityfs_remove(profile->dents[i]);
aa_put_replacedby(r);
profile->dents[i] = NULL;
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index fdaa50cb1876..913f377a038a 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -259,7 +259,7 @@ unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
*/
static inline bool is_deleted(struct dentry *dentry)
{
- if (d_unlinked(dentry) && dentry->d_inode->i_nlink == 0)
+ if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
return 1;
return 0;
}
@@ -351,8 +351,8 @@ int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
struct path link = { new_dir->mnt, new_dentry };
struct path target = { new_dir->mnt, old_dentry };
struct path_cond cond = {
- old_dentry->d_inode->i_uid,
- old_dentry->d_inode->i_mode
+ d_backing_inode(old_dentry)->i_uid,
+ d_backing_inode(old_dentry)->i_mode
};
char *buffer = NULL, *buffer2 = NULL;
const char *lname, *tname = NULL, *info = NULL;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 107db88b1d5f..e5f1561439db 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -204,8 +204,8 @@ static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
struct dentry *dentry, u32 mask)
{
struct path path = { mnt, dentry };
- struct path_cond cond = { dentry->d_inode->i_uid,
- dentry->d_inode->i_mode
+ struct path_cond cond = { d_backing_inode(dentry)->i_uid,
+ d_backing_inode(dentry)->i_mode
};
return common_perm(op, &path, mask, &cond);
@@ -223,7 +223,7 @@ static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
static int common_perm_rm(int op, struct path *dir,
struct dentry *dentry, u32 mask)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
struct path_cond cond = { };
if (!inode || !dir->mnt || !mediated_filesystem(dentry))
@@ -281,8 +281,8 @@ static int apparmor_path_mknod(struct path *dir, struct dentry *dentry,
static int apparmor_path_truncate(struct path *path)
{
- struct path_cond cond = { path->dentry->d_inode->i_uid,
- path->dentry->d_inode->i_mode
+ struct path_cond cond = { d_backing_inode(path->dentry)->i_uid,
+ d_backing_inode(path->dentry)->i_mode
};
if (!path->mnt || !mediated_filesystem(path->dentry))
@@ -327,8 +327,8 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
if (!unconfined(profile)) {
struct path old_path = { old_dir->mnt, old_dentry };
struct path new_path = { new_dir->mnt, new_dentry };
- struct path_cond cond = { old_dentry->d_inode->i_uid,
- old_dentry->d_inode->i_mode
+ struct path_cond cond = { d_backing_inode(old_dentry)->i_uid,
+ d_backing_inode(old_dentry)->i_mode
};
error = aa_path_perm(OP_RENAME_SRC, profile, &old_path, 0,
@@ -354,8 +354,8 @@ static int apparmor_path_chmod(struct path *path, umode_t mode)
static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
- struct path_cond cond = { path->dentry->d_inode->i_uid,
- path->dentry->d_inode->i_mode
+ struct path_cond cond = { d_backing_inode(path->dentry)->i_uid,
+ d_backing_inode(path->dentry)->i_mode
};
if (!mediated_filesystem(path->dentry))
@@ -364,12 +364,12 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
}
-static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int apparmor_inode_getattr(const struct path *path)
{
- if (!mediated_filesystem(dentry))
+ if (!mediated_filesystem(path->dentry))
return 0;
- return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
+ return common_perm_mnt_dentry(OP_GETATTR, path->mnt, path->dentry,
AA_MAY_META_READ);
}
diff --git a/security/capability.c b/security/capability.c
index 070dd46f62f4..0d03fcc489a4 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -225,7 +225,7 @@ static int cap_inode_setattr(struct dentry *dentry, struct iattr *iattr)
return 0;
}
-static int cap_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int cap_inode_getattr(const struct path *path)
{
return 0;
}
@@ -776,11 +776,6 @@ static int cap_tun_dev_open(void *security)
{
return 0;
}
-
-static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-}
-
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1134,7 +1129,6 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, tun_dev_open);
set_to_cap_if_null(ops, tun_dev_attach_queue);
set_to_cap_if_null(ops, tun_dev_attach);
- set_to_cap_if_null(ops, skb_owned_by);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
set_to_cap_if_null(ops, xfrm_policy_alloc_security);
diff --git a/security/commoncap.c b/security/commoncap.c
index f66713bd7450..f2875cd9f677 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -297,7 +297,7 @@ static inline void bprm_clear_caps(struct linux_binprm *bprm)
*/
int cap_inode_need_killpriv(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
int error;
if (!inode->i_op->getxattr)
@@ -319,7 +319,7 @@ int cap_inode_need_killpriv(struct dentry *dentry)
*/
int cap_inode_killpriv(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
if (!inode->i_op->removexattr)
return 0;
@@ -375,7 +375,7 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
*/
int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
__u32 magic_etc;
unsigned tocopy, i;
int size;
diff --git a/security/inode.c b/security/inode.c
index 131a3c49f766..91503b79c5f8 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -27,7 +27,7 @@ static int mount_count;
static inline int positive(struct dentry *dentry)
{
- return dentry->d_inode && !d_unhashed(dentry);
+ return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
static int fill_super(struct super_block *sb, void *data, int silent)
@@ -102,14 +102,14 @@ struct dentry *securityfs_create_file(const char *name, umode_t mode,
if (!parent)
parent = mount->mnt_root;
- dir = parent->d_inode;
+ dir = d_inode(parent);
mutex_lock(&dir->i_mutex);
dentry = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(dentry))
goto out;
- if (dentry->d_inode) {
+ if (d_really_is_positive(dentry)) {
error = -EEXIST;
goto out1;
}
@@ -197,20 +197,20 @@ void securityfs_remove(struct dentry *dentry)
return;
parent = dentry->d_parent;
- if (!parent || !parent->d_inode)
+ if (!parent || d_really_is_negative(parent))
return;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
if (positive(dentry)) {
- if (dentry->d_inode) {
+ if (d_really_is_positive(dentry)) {
if (d_is_dir(dentry))
- simple_rmdir(parent->d_inode, dentry);
+ simple_rmdir(d_inode(parent), dentry);
else
- simple_unlink(parent->d_inode, dentry);
+ simple_unlink(d_inode(parent), dentry);
dput(dentry);
}
}
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
simple_release_fs(&mount, &mount_count);
}
EXPORT_SYMBOL_GPL(securityfs_remove);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 5e9687f02e1b..159ef3ea4130 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -131,7 +131,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
size_t req_xattr_value_len,
char type, char *digest)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
struct shash_desc *desc;
char **xattrname;
size_t xattr_size = 0;
@@ -199,7 +199,7 @@ int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
const char *xattr_value, size_t xattr_value_len)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
struct evm_ima_xattr_data xattr_data;
int rc = 0;
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index f589c9a05da2..10f994307a04 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -72,7 +72,7 @@ static void __init evm_init_config(void)
static int evm_find_protected_xattrs(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
char **xattr;
int error;
int count = 0;
@@ -165,8 +165,8 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
/* Replace RSA with HMAC if not mounted readonly and
* not immutable
*/
- if (!IS_RDONLY(dentry->d_inode) &&
- !IS_IMMUTABLE(dentry->d_inode))
+ if (!IS_RDONLY(d_backing_inode(dentry)) &&
+ !IS_IMMUTABLE(d_backing_inode(dentry)))
evm_update_evmxattr(dentry, xattr_name,
xattr_value,
xattr_value_len);
@@ -235,7 +235,7 @@ enum integrity_status evm_verifyxattr(struct dentry *dentry,
return INTEGRITY_UNKNOWN;
if (!iint) {
- iint = integrity_iint_find(dentry->d_inode);
+ iint = integrity_iint_find(d_backing_inode(dentry));
if (!iint)
return INTEGRITY_UNKNOWN;
}
@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(evm_verifyxattr);
*/
static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode)
return 0;
@@ -293,13 +293,13 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
if (evm_status == INTEGRITY_NOXATTRS) {
struct integrity_iint_cache *iint;
- iint = integrity_iint_find(dentry->d_inode);
+ iint = integrity_iint_find(d_backing_inode(dentry));
if (iint && (iint->flags & IMA_NEW_FILE))
return 0;
}
out:
if (evm_status != INTEGRITY_PASS)
- integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
dentry->d_name.name, "appraise_metadata",
integrity_status_msg[evm_status],
-EPERM, 0);
@@ -379,7 +379,7 @@ void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
*/
void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
if (!evm_initialized || !evm_protected_xattr(xattr_name))
return;
@@ -404,7 +404,7 @@ int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
if ((evm_status == INTEGRITY_PASS) ||
(evm_status == INTEGRITY_NOXATTRS))
return 0;
- integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
dentry->d_name.name, "appraise_metadata",
integrity_status_msg[evm_status], -EPERM, 0);
return -EPERM;
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index fffcdb0b31f0..4df493e4b3c9 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -165,7 +165,7 @@ void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
int ima_read_xattr(struct dentry *dentry,
struct evm_ima_xattr_data **xattr_value)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
if (!inode->i_op->getxattr)
return 0;
@@ -190,7 +190,7 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
static const char op[] = "appraise_data";
char *cause = "unknown";
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
enum integrity_status status = INTEGRITY_UNKNOWN;
int rc = xattr_len, hash_start = 0;
@@ -314,7 +314,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
*/
void ima_inode_post_setattr(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
struct integrity_iint_cache *iint;
int must_appraise, rc;
@@ -380,7 +380,7 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
if (result == 1) {
if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
return -EINVAL;
- ima_reset_appraise_flags(dentry->d_inode,
+ ima_reset_appraise_flags(d_backing_inode(dentry),
(xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
result = 0;
}
@@ -393,7 +393,7 @@ int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
if (result == 1) {
- ima_reset_appraise_flags(dentry->d_inode, 0);
+ ima_reset_appraise_flags(d_backing_inode(dentry), 0);
result = 0;
}
return result;
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 347896548ad3..25430a3aa7f7 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -31,30 +31,21 @@ static long compat_keyctl_instantiate_key_iov(
key_serial_t ringid)
{
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ struct iov_iter from;
long ret;
- if (!_payload_iov || !ioc)
- goto no_payload;
+ if (!_payload_iov)
+ ioc = 0;
- ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
- ARRAY_SIZE(iovstack),
- iovstack, &iov);
+ ret = compat_import_iovec(WRITE, _payload_iov, ioc,
+ ARRAY_SIZE(iovstack), &iov,
+ &from);
if (ret < 0)
- goto err;
- if (ret == 0)
- goto no_payload_free;
-
- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-err:
- if (iov != iovstack)
- kfree(iov);
- return ret;
+ return ret;
-no_payload_free:
- if (iov != iovstack)
- kfree(iov);
-no_payload:
- return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+ ret = keyctl_instantiate_key_common(id, &from, ringid);
+ kfree(iov);
+ return ret;
}
/*
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 200e37867336..5105c2c2da75 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -243,9 +243,10 @@ extern long keyctl_instantiate_key_iov(key_serial_t,
unsigned, key_serial_t);
extern long keyctl_invalidate_key(key_serial_t);
+struct iov_iter;
extern long keyctl_instantiate_key_common(key_serial_t,
- const struct iovec *,
- unsigned, size_t, key_serial_t);
+ struct iov_iter *,
+ key_serial_t);
#ifdef CONFIG_PERSISTENT_KEYRINGS
extern long keyctl_get_persistent(uid_t, key_serial_t);
extern unsigned persistent_keyring_expiry;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 4743d71e4aa6..0b9ec78a7a7a 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -998,21 +998,6 @@ static int keyctl_change_reqkey_auth(struct key *key)
}
/*
- * Copy the iovec data from userspace
- */
-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
- unsigned ioc)
-{
- for (; ioc > 0; ioc--) {
- if (copy_from_user(buffer, iov->iov_base, iov->iov_len) != 0)
- return -EFAULT;
- buffer += iov->iov_len;
- iov++;
- }
- return 0;
-}
-
-/*
* Instantiate a key with the specified payload and link the key into the
* destination keyring if one is given.
*
@@ -1022,20 +1007,21 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key_common(key_serial_t id,
- const struct iovec *payload_iov,
- unsigned ioc,
- size_t plen,
+ struct iov_iter *from,
key_serial_t ringid)
{
const struct cred *cred = current_cred();
struct request_key_auth *rka;
struct key *instkey, *dest_keyring;
+ size_t plen = from ? iov_iter_count(from) : 0;
void *payload;
long ret;
- bool vm = false;
kenter("%d,,%zu,%d", id, plen, ringid);
+ if (!plen)
+ from = NULL;
+
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
goto error;
@@ -1054,20 +1040,19 @@ long keyctl_instantiate_key_common(key_serial_t id,
/* pull the payload in if one was supplied */
payload = NULL;
- if (payload_iov) {
+ if (from) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL);
if (!payload) {
if (plen <= PAGE_SIZE)
goto error;
- vm = true;
payload = vmalloc(plen);
if (!payload)
goto error;
}
- ret = copy_from_user_iovec(payload, payload_iov, ioc);
- if (ret < 0)
+ ret = -EFAULT;
+ if (copy_from_iter(payload, plen, from) != plen)
goto error2;
}
@@ -1089,10 +1074,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
keyctl_change_reqkey_auth(NULL);
error2:
- if (!vm)
- kfree(payload);
- else
- vfree(payload);
+ kvfree(payload);
error:
return ret;
}
@@ -1112,15 +1094,19 @@ long keyctl_instantiate_key(key_serial_t id,
key_serial_t ringid)
{
if (_payload && plen) {
- struct iovec iov[1] = {
- [0].iov_base = (void __user *)_payload,
- [0].iov_len = plen
- };
+ struct iovec iov;
+ struct iov_iter from;
+ int ret;
- return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
+ ret = import_single_range(WRITE, (void __user *)_payload, plen,
+ &iov, &from);
+ if (unlikely(ret))
+ return ret;
+
+ return keyctl_instantiate_key_common(id, &from, ringid);
}
- return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+ return keyctl_instantiate_key_common(id, NULL, ringid);
}
/*
@@ -1138,29 +1124,19 @@ long keyctl_instantiate_key_iov(key_serial_t id,
key_serial_t ringid)
{
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ struct iov_iter from;
long ret;
- if (!_payload_iov || !ioc)
- goto no_payload;
+ if (!_payload_iov)
+ ioc = 0;
- ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
- ARRAY_SIZE(iovstack), iovstack, &iov);
+ ret = import_iovec(WRITE, _payload_iov, ioc,
+ ARRAY_SIZE(iovstack), &iov, &from);
if (ret < 0)
- goto err;
- if (ret == 0)
- goto no_payload_free;
-
- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-err:
- if (iov != iovstack)
- kfree(iov);
+ return ret;
+ ret = keyctl_instantiate_key_common(id, &from, ringid);
+ kfree(iov);
return ret;
-
-no_payload_free:
- if (iov != iovstack)
- kfree(iov);
-no_payload:
- return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
}
/*
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 69fdf3bc765b..1d34277dc402 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -211,7 +211,7 @@ static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
static void dump_common_audit_data(struct audit_buffer *ab,
struct common_audit_data *a)
{
- struct task_struct *tsk = current;
+ char comm[sizeof(current->comm)];
/*
* To keep stack sizes in check force programers to notice if they
@@ -220,8 +220,8 @@ static void dump_common_audit_data(struct audit_buffer *ab,
*/
BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
- audit_log_format(ab, " pid=%d comm=", task_pid_nr(tsk));
- audit_log_untrustedstring(ab, tsk->comm);
+ audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
+ audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));
switch (a->type) {
case LSM_AUDIT_DATA_NONE:
@@ -237,7 +237,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
audit_log_d_path(ab, " path=", &a->u.path);
- inode = a->u.path.dentry->d_inode;
+ inode = d_backing_inode(a->u.path.dentry);
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
@@ -251,7 +251,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
- inode = a->u.dentry->d_inode;
+ inode = d_backing_inode(a->u.dentry);
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
@@ -276,16 +276,19 @@ static void dump_common_audit_data(struct audit_buffer *ab,
audit_log_format(ab, " ino=%lu", inode->i_ino);
break;
}
- case LSM_AUDIT_DATA_TASK:
- tsk = a->u.tsk;
+ case LSM_AUDIT_DATA_TASK: {
+ struct task_struct *tsk = a->u.tsk;
if (tsk) {
pid_t pid = task_pid_nr(tsk);
if (pid) {
+ char comm[sizeof(tsk->comm)];
audit_log_format(ab, " pid=%d comm=", pid);
- audit_log_untrustedstring(ab, tsk->comm);
+ audit_log_untrustedstring(ab,
+ memcpy(comm, tsk->comm, sizeof(comm)));
}
}
break;
+ }
case LSM_AUDIT_DATA_NET:
if (a->u.net->sk) {
struct sock *sk = a->u.net->sk;
diff --git a/security/security.c b/security/security.c
index e81d5bbe7363..8e9b1f4b9b45 100644
--- a/security/security.c
+++ b/security/security.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL(security_old_inode_init_security);
int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev)
{
- if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return security_ops->path_mknod(dir, dentry, mode, dev);
}
@@ -418,7 +418,7 @@ EXPORT_SYMBOL(security_path_mknod);
int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode)
{
- if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return security_ops->path_mkdir(dir, dentry, mode);
}
@@ -426,14 +426,14 @@ EXPORT_SYMBOL(security_path_mkdir);
int security_path_rmdir(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return security_ops->path_rmdir(dir, dentry);
}
int security_path_unlink(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return security_ops->path_unlink(dir, dentry);
}
@@ -442,7 +442,7 @@ EXPORT_SYMBOL(security_path_unlink);
int security_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
- if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return security_ops->path_symlink(dir, dentry, old_name);
}
@@ -450,7 +450,7 @@ int security_path_symlink(struct path *dir, struct dentry *dentry,
int security_path_link(struct dentry *old_dentry, struct path *new_dir,
struct dentry *new_dentry)
{
- if (unlikely(IS_PRIVATE(old_dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
return 0;
return security_ops->path_link(old_dentry, new_dir, new_dentry);
}
@@ -459,8 +459,8 @@ int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
struct path *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
- if (unlikely(IS_PRIVATE(old_dentry->d_inode) ||
- (new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode))))
+ if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
+ (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
return 0;
if (flags & RENAME_EXCHANGE) {
@@ -477,21 +477,21 @@ EXPORT_SYMBOL(security_path_rename);
int security_path_truncate(struct path *path)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return security_ops->path_truncate(path);
}
int security_path_chmod(struct path *path, umode_t mode)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return security_ops->path_chmod(path, mode);
}
int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return security_ops->path_chown(path, uid, gid);
}
@@ -513,14 +513,14 @@ EXPORT_SYMBOL_GPL(security_inode_create);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
- if (unlikely(IS_PRIVATE(old_dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
return 0;
return security_ops->inode_link(old_dentry, dir, new_dentry);
}
int security_inode_unlink(struct inode *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return security_ops->inode_unlink(dir, dentry);
}
@@ -543,7 +543,7 @@ EXPORT_SYMBOL_GPL(security_inode_mkdir);
int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return security_ops->inode_rmdir(dir, dentry);
}
@@ -559,8 +559,8 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
- if (unlikely(IS_PRIVATE(old_dentry->d_inode) ||
- (new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode))))
+ if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
+ (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
return 0;
if (flags & RENAME_EXCHANGE) {
@@ -576,14 +576,14 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
int security_inode_readlink(struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return security_ops->inode_readlink(dentry);
}
int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return security_ops->inode_follow_link(dentry, nd);
}
@@ -599,7 +599,7 @@ int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
int ret;
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
ret = security_ops->inode_setattr(dentry, attr);
if (ret)
@@ -608,11 +608,11 @@ int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
}
EXPORT_SYMBOL_GPL(security_inode_setattr);
-int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+int security_inode_getattr(const struct path *path)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
- return security_ops->inode_getattr(mnt, dentry);
+ return security_ops->inode_getattr(path);
}
int security_inode_setxattr(struct dentry *dentry, const char *name,
@@ -620,7 +620,7 @@ int security_inode_setxattr(struct dentry *dentry, const char *name,
{
int ret;
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
ret = security_ops->inode_setxattr(dentry, name, value, size, flags);
if (ret)
@@ -634,7 +634,7 @@ int security_inode_setxattr(struct dentry *dentry, const char *name,
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return;
security_ops->inode_post_setxattr(dentry, name, value, size, flags);
evm_inode_post_setxattr(dentry, name, value, size);
@@ -642,14 +642,14 @@ void security_inode_post_setxattr(struct dentry *dentry, const char *name,
int security_inode_getxattr(struct dentry *dentry, const char *name)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return security_ops->inode_getxattr(dentry, name);
}
int security_inode_listxattr(struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return security_ops->inode_listxattr(dentry);
}
@@ -658,7 +658,7 @@ int security_inode_removexattr(struct dentry *dentry, const char *name)
{
int ret;
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
ret = security_ops->inode_removexattr(dentry, name);
if (ret)
@@ -1359,11 +1359,6 @@ int security_tun_dev_open(void *security)
}
EXPORT_SYMBOL(security_tun_dev_open);
-void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
- security_ops->skb_owned_by(skb, sk);
-}
-
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index afcc0aed9393..3c17dda9571d 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -724,12 +724,10 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
rcu_read_lock();
node = avc_lookup(ssid, tsid, tclass);
- if (unlikely(!node)) {
+ if (unlikely(!node))
node = avc_compute_av(ssid, tsid, tclass, avd);
- } else {
+ else
memcpy(avd, &node->ae.avd, sizeof(*avd));
- avd = &node->ae.avd;
- }
denied = requested & ~(avd->allowed);
if (unlikely(denied))
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4d1a54190388..7dade28affba 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -51,7 +51,6 @@
#include <linux/tty.h>
#include <net/icmp.h>
#include <net/ip.h> /* for local_port_range[] */
-#include <net/sock.h>
#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
#include <net/inet_connection_sock.h>
#include <net/net_namespace.h>
@@ -415,7 +414,7 @@ static int sb_finish_set_opts(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
struct dentry *root = sb->s_root;
- struct inode *root_inode = root->d_inode;
+ struct inode *root_inode = d_backing_inode(root);
int rc = 0;
if (sbsec->behavior == SECURITY_FS_USE_XATTR) {
@@ -553,7 +552,7 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
opts->mnt_opts_flags[i++] = DEFCONTEXT_MNT;
}
if (sbsec->flags & ROOTCONTEXT_MNT) {
- struct inode *root = sbsec->sb->s_root->d_inode;
+ struct inode *root = d_backing_inode(sbsec->sb->s_root);
struct inode_security_struct *isec = root->i_security;
rc = security_sid_to_context(isec->sid, &context, &len);
@@ -609,7 +608,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
int rc = 0, i;
struct superblock_security_struct *sbsec = sb->s_security;
const char *name = sb->s_type->name;
- struct inode *inode = sbsec->sb->s_root->d_inode;
+ struct inode *inode = d_backing_inode(sbsec->sb->s_root);
struct inode_security_struct *root_isec = inode->i_security;
u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
u32 defcontext_sid = 0;
@@ -836,8 +835,8 @@ static int selinux_cmp_sb_context(const struct super_block *oldsb,
if ((oldflags & DEFCONTEXT_MNT) && old->def_sid != new->def_sid)
goto mismatch;
if (oldflags & ROOTCONTEXT_MNT) {
- struct inode_security_struct *oldroot = oldsb->s_root->d_inode->i_security;
- struct inode_security_struct *newroot = newsb->s_root->d_inode->i_security;
+ struct inode_security_struct *oldroot = d_backing_inode(oldsb->s_root)->i_security;
+ struct inode_security_struct *newroot = d_backing_inode(newsb->s_root)->i_security;
if (oldroot->sid != newroot->sid)
goto mismatch;
}
@@ -887,16 +886,16 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
if (!set_fscontext)
newsbsec->sid = sid;
if (!set_rootcontext) {
- struct inode *newinode = newsb->s_root->d_inode;
+ struct inode *newinode = d_backing_inode(newsb->s_root);
struct inode_security_struct *newisec = newinode->i_security;
newisec->sid = sid;
}
newsbsec->mntpoint_sid = sid;
}
if (set_rootcontext) {
- const struct inode *oldinode = oldsb->s_root->d_inode;
+ const struct inode *oldinode = d_backing_inode(oldsb->s_root);
const struct inode_security_struct *oldisec = oldinode->i_security;
- struct inode *newinode = newsb->s_root->d_inode;
+ struct inode *newinode = d_backing_inode(newsb->s_root);
struct inode_security_struct *newisec = newinode->i_security;
newisec->sid = oldisec->sid;
@@ -1611,7 +1610,7 @@ static inline int dentry_has_perm(const struct cred *cred,
struct dentry *dentry,
u32 av)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
struct common_audit_data ad;
ad.type = LSM_AUDIT_DATA_DENTRY;
@@ -1623,10 +1622,10 @@ static inline int dentry_has_perm(const struct cred *cred,
the path to help the auditing code to more easily generate the
pathname if needed. */
static inline int path_has_perm(const struct cred *cred,
- struct path *path,
+ const struct path *path,
u32 av)
{
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = d_backing_inode(path->dentry);
struct common_audit_data ad;
ad.type = LSM_AUDIT_DATA_PATH;
@@ -1754,7 +1753,7 @@ static int may_link(struct inode *dir,
int rc;
dsec = dir->i_security;
- isec = dentry->d_inode->i_security;
+ isec = d_backing_inode(dentry)->i_security;
ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
@@ -1798,7 +1797,7 @@ static inline int may_rename(struct inode *old_dir,
int rc;
old_dsec = old_dir->i_security;
- old_isec = old_dentry->d_inode->i_security;
+ old_isec = d_backing_inode(old_dentry)->i_security;
old_is_dir = d_is_dir(old_dentry);
new_dsec = new_dir->i_security;
@@ -1828,7 +1827,7 @@ static inline int may_rename(struct inode *old_dir,
if (rc)
return rc;
if (d_is_positive(new_dentry)) {
- new_isec = new_dentry->d_inode->i_security;
+ new_isec = d_backing_inode(new_dentry)->i_security;
new_is_dir = d_is_dir(new_dentry);
rc = avc_has_perm(sid, new_isec->sid,
new_isec->sclass,
@@ -1964,7 +1963,7 @@ static int selinux_binder_transfer_file(struct task_struct *from,
{
u32 sid = task_sid(to);
struct file_security_struct *fsec = file->f_security;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = d_backing_inode(file->f_path.dentry);
struct inode_security_struct *isec = inode->i_security;
struct common_audit_data ad;
int rc;
@@ -2628,7 +2627,7 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
break;
case ROOTCONTEXT_MNT: {
struct inode_security_struct *root_isec;
- root_isec = sb->s_root->d_inode->i_security;
+ root_isec = d_backing_inode(sb->s_root)->i_security;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
goto out_bad_option;
@@ -2728,7 +2727,7 @@ static int selinux_dentry_init_security(struct dentry *dentry, int mode,
struct task_security_struct *tsec;
struct inode_security_struct *dsec;
struct superblock_security_struct *sbsec;
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_backing_inode(dentry->d_parent);
u32 newsid;
int rc;
@@ -2954,15 +2953,9 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
return dentry_has_perm(cred, dentry, av);
}
-static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int selinux_inode_getattr(const struct path *path)
{
- const struct cred *cred = current_cred();
- struct path path;
-
- path.dentry = dentry;
- path.mnt = mnt;
-
- return path_has_perm(cred, &path, FILE__GETATTR);
+ return path_has_perm(current_cred(), path, FILE__GETATTR);
}
static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
@@ -2989,7 +2982,7 @@ static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
struct inode_security_struct *isec = inode->i_security;
struct superblock_security_struct *sbsec;
struct common_audit_data ad;
@@ -3066,7 +3059,7 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size,
int flags)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
struct inode_security_struct *isec = inode->i_security;
u32 newsid;
int rc;
@@ -4652,11 +4645,6 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
}
-static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
- skb_set_owner_w(skb, sk);
-}
-
static int selinux_secmark_relabel_packet(u32 sid)
{
const struct task_security_struct *__tsec;
@@ -4858,21 +4846,17 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb,
static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return selinux_ip_forward(skb, in, PF_INET);
+ return selinux_ip_forward(skb, state->in, PF_INET);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return selinux_ip_forward(skb, in, PF_INET6);
+ return selinux_ip_forward(skb, state->in, PF_INET6);
}
#endif /* IPV6 */
@@ -4920,9 +4904,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
return selinux_ip_output(skb, PF_INET);
}
@@ -5097,21 +5079,17 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb,
static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return selinux_ip_postroute(skb, out, PF_INET);
+ return selinux_ip_postroute(skb, state->out, PF_INET);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
- return selinux_ip_postroute(skb, out, PF_INET6);
+ return selinux_ip_postroute(skb, state->out, PF_INET6);
}
#endif /* IPV6 */
@@ -6041,7 +6019,6 @@ static struct security_operations selinux_ops = {
.tun_dev_attach_queue = selinux_tun_dev_attach_queue,
.tun_dev_attach = selinux_tun_dev_attach,
.tun_dev_open = selinux_tun_dev_open,
- .skb_owned_by = selinux_skb_owned_by,
#ifdef CONFIG_SECURITY_NETWORK_XFRM
.xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 2df7b900e259..2bbb41822d8e 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -73,6 +73,9 @@ static struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_NEWMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETMDB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWNSID, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELNSID, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_GETNSID, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -100,6 +103,13 @@ static struct nlmsg_perm nlmsg_xfrm_perms[] =
{ XFRM_MSG_FLUSHPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_NEWAE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_GETAE, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_REPORT, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_MIGRATE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_NEWSADINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_GETSADINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_NEWSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_GETSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_MAPPING, NETLINK_XFRM_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_audit_perms[] =
@@ -143,6 +153,8 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
switch (sclass) {
case SECCLASS_NETLINK_ROUTE_SOCKET:
+ /* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */
+ BUILD_BUG_ON(RTM_MAX != (RTM_NEWNSID + 3));
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
break;
@@ -153,6 +165,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
break;
case SECCLASS_NETLINK_XFRM_SOCKET:
+ BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING);
err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
sizeof(nlmsg_xfrm_perms));
break;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 5fde34326dcf..d2787cca1fcb 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1737,7 +1737,7 @@ static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
inc_nlink(inode);
d_add(dentry, inode);
/* bump link count on parent directory, too */
- inc_nlink(dir->d_inode);
+ inc_nlink(d_inode(dir));
return dentry;
}
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index a3dd9faa19c0..b64f2772b030 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -25,10 +25,43 @@
static struct kmem_cache *avtab_node_cachep;
-static inline int avtab_hash(struct avtab_key *keyp, u16 mask)
+/* Based on MurmurHash3, written by Austin Appleby and placed in the
+ * public domain.
+ */
+static inline int avtab_hash(struct avtab_key *keyp, u32 mask)
{
- return ((keyp->target_class + (keyp->target_type << 2) +
- (keyp->source_type << 9)) & mask);
+ static const u32 c1 = 0xcc9e2d51;
+ static const u32 c2 = 0x1b873593;
+ static const u32 r1 = 15;
+ static const u32 r2 = 13;
+ static const u32 m = 5;
+ static const u32 n = 0xe6546b64;
+
+ u32 hash = 0;
+
+#define mix(input) { \
+ u32 v = input; \
+ v *= c1; \
+ v = (v << r1) | (v >> (32 - r1)); \
+ v *= c2; \
+ hash ^= v; \
+ hash = (hash << r2) | (hash >> (32 - r2)); \
+ hash = hash * m + n; \
+}
+
+ mix(keyp->target_class);
+ mix(keyp->target_type);
+ mix(keyp->source_type);
+
+#undef mix
+
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 13;
+ hash *= 0xc2b2ae35;
+ hash ^= hash >> 16;
+
+ return hash & mask;
}
static struct avtab_node*
@@ -46,8 +79,12 @@ avtab_insert_node(struct avtab *h, int hvalue,
newnode->next = prev->next;
prev->next = newnode;
} else {
- newnode->next = h->htable[hvalue];
- h->htable[hvalue] = newnode;
+ newnode->next = flex_array_get_ptr(h->htable, hvalue);
+ if (flex_array_put_ptr(h->htable, hvalue, newnode,
+ GFP_KERNEL|__GFP_ZERO)) {
+ kmem_cache_free(avtab_node_cachep, newnode);
+ return NULL;
+ }
}
h->nel++;
@@ -64,7 +101,7 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
return -EINVAL;
hvalue = avtab_hash(key, h->mask);
- for (prev = NULL, cur = h->htable[hvalue];
+ for (prev = NULL, cur = flex_array_get_ptr(h->htable, hvalue);
cur;
prev = cur, cur = cur->next) {
if (key->source_type == cur->key.source_type &&
@@ -104,7 +141,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
if (!h || !h->htable)
return NULL;
hvalue = avtab_hash(key, h->mask);
- for (prev = NULL, cur = h->htable[hvalue];
+ for (prev = NULL, cur = flex_array_get_ptr(h->htable, hvalue);
cur;
prev = cur, cur = cur->next) {
if (key->source_type == cur->key.source_type &&
@@ -135,7 +172,8 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
return NULL;
hvalue = avtab_hash(key, h->mask);
- for (cur = h->htable[hvalue]; cur; cur = cur->next) {
+ for (cur = flex_array_get_ptr(h->htable, hvalue); cur;
+ cur = cur->next) {
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class == cur->key.target_class &&
@@ -170,7 +208,8 @@ avtab_search_node(struct avtab *h, struct avtab_key *key)
return NULL;
hvalue = avtab_hash(key, h->mask);
- for (cur = h->htable[hvalue]; cur; cur = cur->next) {
+ for (cur = flex_array_get_ptr(h->htable, hvalue); cur;
+ cur = cur->next) {
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class == cur->key.target_class &&
@@ -228,15 +267,14 @@ void avtab_destroy(struct avtab *h)
return;
for (i = 0; i < h->nslot; i++) {
- cur = h->htable[i];
+ cur = flex_array_get_ptr(h->htable, i);
while (cur) {
temp = cur;
cur = cur->next;
kmem_cache_free(avtab_node_cachep, temp);
}
- h->htable[i] = NULL;
}
- kfree(h->htable);
+ flex_array_free(h->htable);
h->htable = NULL;
h->nslot = 0;
h->mask = 0;
@@ -251,7 +289,7 @@ int avtab_init(struct avtab *h)
int avtab_alloc(struct avtab *h, u32 nrules)
{
- u16 mask = 0;
+ u32 mask = 0;
u32 shift = 0;
u32 work = nrules;
u32 nslot = 0;
@@ -270,7 +308,8 @@ int avtab_alloc(struct avtab *h, u32 nrules)
nslot = MAX_AVTAB_HASH_BUCKETS;
mask = nslot - 1;
- h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
+ h->htable = flex_array_alloc(sizeof(struct avtab_node *), nslot,
+ GFP_KERNEL | __GFP_ZERO);
if (!h->htable)
return -ENOMEM;
@@ -293,7 +332,7 @@ void avtab_hash_eval(struct avtab *h, char *tag)
max_chain_len = 0;
chain2_len_sum = 0;
for (i = 0; i < h->nslot; i++) {
- cur = h->htable[i];
+ cur = flex_array_get_ptr(h->htable, i);
if (cur) {
slots_used++;
chain_len = 0;
@@ -534,7 +573,8 @@ int avtab_write(struct policydb *p, struct avtab *a, void *fp)
return rc;
for (i = 0; i < a->nslot; i++) {
- for (cur = a->htable[i]; cur; cur = cur->next) {
+ for (cur = flex_array_get_ptr(a->htable, i); cur;
+ cur = cur->next) {
rc = avtab_write_item(p, cur, fp);
if (rc)
return rc;
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 63ce2f9e441d..adb451cd44f9 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -23,6 +23,8 @@
#ifndef _SS_AVTAB_H_
#define _SS_AVTAB_H_
+#include <linux/flex_array.h>
+
struct avtab_key {
u16 source_type; /* source type */
u16 target_type; /* target type */
@@ -51,10 +53,10 @@ struct avtab_node {
};
struct avtab {
- struct avtab_node **htable;
+ struct flex_array *htable;
u32 nel; /* number of elements */
u32 nslot; /* number of hash slots */
- u16 mask; /* mask to compute hash func */
+ u32 mask; /* mask to compute hash func */
};
@@ -84,7 +86,7 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified
void avtab_cache_init(void);
void avtab_cache_destroy(void);
-#define MAX_AVTAB_HASH_BITS 11
+#define MAX_AVTAB_HASH_BITS 16
#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
#endif /* _SS_AVTAB_H_ */
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index d307b37ddc2b..e1088842232c 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -654,19 +654,15 @@ int mls_import_netlbl_cat(struct context *context,
rc = ebitmap_netlbl_import(&context->range.level[0].cat,
secattr->attr.mls.cat);
- if (rc != 0)
- goto import_netlbl_cat_failure;
-
- rc = ebitmap_cpy(&context->range.level[1].cat,
- &context->range.level[0].cat);
- if (rc != 0)
+ if (rc)
goto import_netlbl_cat_failure;
+ memcpy(&context->range.level[1].cat, &context->range.level[0].cat,
+ sizeof(context->range.level[0].cat));
return 0;
import_netlbl_cat_failure:
ebitmap_destroy(&context->range.level[0].cat);
- ebitmap_destroy(&context->range.level[1].cat);
return rc;
}
#endif /* CONFIG_NETLABEL */
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index a1d3944751b9..9e2d82070915 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -3179,13 +3179,9 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
ctx_new.type = ctx->type;
mls_import_netlbl_lvl(&ctx_new, secattr);
if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
- rc = ebitmap_netlbl_import(&ctx_new.range.level[0].cat,
- secattr->attr.mls.cat);
+ rc = mls_import_netlbl_cat(&ctx_new, secattr);
if (rc)
goto out;
- memcpy(&ctx_new.range.level[1].cat,
- &ctx_new.range.level[0].cat,
- sizeof(ctx_new.range.level[0].cat));
}
rc = -EIDRM;
if (!mls_context_isvalid(&policydb, &ctx_new))
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 67ccb7b2b89b..49eada6266ec 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -105,6 +105,7 @@ struct task_smack {
#define SMK_INODE_INSTANT 0x01 /* inode is instantiated */
#define SMK_INODE_TRANSMUTE 0x02 /* directory is transmuting */
#define SMK_INODE_CHANGED 0x04 /* smack was transmuted */
+#define SMK_INODE_IMPURE 0x08 /* involved in an impure transaction */
/*
* A label access rule.
@@ -193,6 +194,10 @@ struct smk_port_label {
#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */
#define MAY_BRINGUP 0x00004000 /* Report use of this rule */
+#define SMACK_BRINGUP_ALLOW 1 /* Allow bringup mode */
+#define SMACK_UNCONFINED_SUBJECT 2 /* Allow unconfined label */
+#define SMACK_UNCONFINED_OBJECT 3 /* Allow unconfined label */
+
/*
* Just to make the common cases easier to deal with
*/
@@ -254,6 +259,9 @@ extern int smack_cipso_mapped;
extern struct smack_known *smack_net_ambient;
extern struct smack_known *smack_onlycap;
extern struct smack_known *smack_syslog_label;
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+extern struct smack_known *smack_unconfined;
+#endif
extern struct smack_known smack_cipso_option;
extern int smack_ptrace_rule;
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 1158430f5bb9..0f410fc56e33 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -130,7 +130,8 @@ int smk_access(struct smack_known *subject, struct smack_known *object,
/*
* Hardcoded comparisons.
- *
+ */
+ /*
* A star subject can't access any object.
*/
if (subject == &smack_known_star) {
@@ -189,10 +190,20 @@ int smk_access(struct smack_known *subject, struct smack_known *object,
* succeed because of "b" rules.
*/
if (may & MAY_BRINGUP)
- rc = MAY_BRINGUP;
+ rc = SMACK_BRINGUP_ALLOW;
#endif
out_audit:
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ if (rc < 0) {
+ if (object == smack_unconfined)
+ rc = SMACK_UNCONFINED_OBJECT;
+ if (subject == smack_unconfined)
+ rc = SMACK_UNCONFINED_SUBJECT;
+ }
+#endif
+
#ifdef CONFIG_AUDIT
if (a)
smack_log(subject->smk_known, object->smk_known,
@@ -338,19 +349,16 @@ static void smack_log_callback(struct audit_buffer *ab, void *a)
void smack_log(char *subject_label, char *object_label, int request,
int result, struct smk_audit_info *ad)
{
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ char request_buffer[SMK_NUM_ACCESS_TYPE + 5];
+#else
char request_buffer[SMK_NUM_ACCESS_TYPE + 1];
+#endif
struct smack_audit_data *sad;
struct common_audit_data *a = &ad->a;
-#ifdef CONFIG_SECURITY_SMACK_BRINGUP
- /*
- * The result may be positive in bringup mode.
- */
- if (result > 0)
- result = 0;
-#endif
/* check if we have to log the current event */
- if (result != 0 && (log_policy & SMACK_AUDIT_DENIED) == 0)
+ if (result < 0 && (log_policy & SMACK_AUDIT_DENIED) == 0)
return;
if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0)
return;
@@ -364,6 +372,21 @@ void smack_log(char *subject_label, char *object_label, int request,
smack_str_from_perm(request_buffer, request);
sad->subject = subject_label;
sad->object = object_label;
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ /*
+ * The result may be positive in bringup mode.
+ * A positive result is an allow, but not for normal reasons.
+ * Mark it as successful, but don't filter it out even if
+ * the logging policy says to do so.
+ */
+ if (result == SMACK_UNCONFINED_SUBJECT)
+ strcat(request_buffer, "(US)");
+ else if (result == SMACK_UNCONFINED_OBJECT)
+ strcat(request_buffer, "(UO)");
+
+ if (result > 0)
+ result = 0;
+#endif
sad->request = request_buffer;
sad->result = result;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index c934311812f1..b644757886bc 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -57,6 +57,13 @@ static struct kmem_cache *smack_inode_cache;
int smack_enabled;
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static char *smk_bu_mess[] = {
+ "Bringup Error", /* Unused */
+ "Bringup", /* SMACK_BRINGUP_ALLOW */
+ "Unconfined Subject", /* SMACK_UNCONFINED_SUBJECT */
+ "Unconfined Object", /* SMACK_UNCONFINED_OBJECT */
+};
+
static void smk_bu_mode(int mode, char *s)
{
int i = 0;
@@ -87,9 +94,11 @@ static int smk_bu_note(char *note, struct smack_known *sskp,
if (rc <= 0)
return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
smk_bu_mode(mode, acc);
- pr_info("Smack Bringup: (%s %s %s) %s\n",
+ pr_info("Smack %s: (%s %s %s) %s\n", smk_bu_mess[rc],
sskp->smk_known, oskp->smk_known, acc, note);
return 0;
}
@@ -106,9 +115,11 @@ static int smk_bu_current(char *note, struct smack_known *oskp,
if (rc <= 0)
return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
smk_bu_mode(mode, acc);
- pr_info("Smack Bringup: (%s %s %s) %s %s\n",
+ pr_info("Smack %s: (%s %s %s) %s %s\n", smk_bu_mess[rc],
tsp->smk_task->smk_known, oskp->smk_known,
acc, current->comm, note);
return 0;
@@ -126,9 +137,11 @@ static int smk_bu_task(struct task_struct *otp, int mode, int rc)
if (rc <= 0)
return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
smk_bu_mode(mode, acc);
- pr_info("Smack Bringup: (%s %s %s) %s to %s\n",
+ pr_info("Smack %s: (%s %s %s) %s to %s\n", smk_bu_mess[rc],
tsp->smk_task->smk_known, smk_task->smk_known, acc,
current->comm, otp->comm);
return 0;
@@ -141,14 +154,25 @@ static int smk_bu_task(struct task_struct *otp, int mode, int rc)
static int smk_bu_inode(struct inode *inode, int mode, int rc)
{
struct task_smack *tsp = current_security();
+ struct inode_smack *isp = inode->i_security;
char acc[SMK_NUM_ACCESS_TYPE + 1];
+ if (isp->smk_flags & SMK_INODE_IMPURE)
+ pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n",
+ inode->i_sb->s_id, inode->i_ino, current->comm);
+
if (rc <= 0)
return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
+ if (rc == SMACK_UNCONFINED_SUBJECT &&
+ (mode & (MAY_WRITE | MAY_APPEND)))
+ isp->smk_flags |= SMK_INODE_IMPURE;
smk_bu_mode(mode, acc);
- pr_info("Smack Bringup: (%s %s %s) inode=(%s %ld) %s\n",
- tsp->smk_task->smk_known, smk_of_inode(inode)->smk_known, acc,
+
+ pr_info("Smack %s: (%s %s %s) inode=(%s %ld) %s\n", smk_bu_mess[rc],
+ tsp->smk_task->smk_known, isp->smk_inode->smk_known, acc,
inode->i_sb->s_id, inode->i_ino, current->comm);
return 0;
}
@@ -162,13 +186,20 @@ static int smk_bu_file(struct file *file, int mode, int rc)
struct task_smack *tsp = current_security();
struct smack_known *sskp = tsp->smk_task;
struct inode *inode = file_inode(file);
+ struct inode_smack *isp = inode->i_security;
char acc[SMK_NUM_ACCESS_TYPE + 1];
+ if (isp->smk_flags & SMK_INODE_IMPURE)
+ pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n",
+ inode->i_sb->s_id, inode->i_ino, current->comm);
+
if (rc <= 0)
return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
smk_bu_mode(mode, acc);
- pr_info("Smack Bringup: (%s %s %s) file=(%s %ld %pD) %s\n",
+ pr_info("Smack %s: (%s %s %s) file=(%s %ld %pD) %s\n", smk_bu_mess[rc],
sskp->smk_known, smk_of_inode(inode)->smk_known, acc,
inode->i_sb->s_id, inode->i_ino, file,
current->comm);
@@ -185,13 +216,20 @@ static int smk_bu_credfile(const struct cred *cred, struct file *file,
struct task_smack *tsp = cred->security;
struct smack_known *sskp = tsp->smk_task;
struct inode *inode = file->f_inode;
+ struct inode_smack *isp = inode->i_security;
char acc[SMK_NUM_ACCESS_TYPE + 1];
+ if (isp->smk_flags & SMK_INODE_IMPURE)
+ pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n",
+ inode->i_sb->s_id, inode->i_ino, current->comm);
+
if (rc <= 0)
return rc;
+ if (rc > SMACK_UNCONFINED_OBJECT)
+ rc = 0;
smk_bu_mode(mode, acc);
- pr_info("Smack Bringup: (%s %s %s) file=(%s %ld %pD) %s\n",
+ pr_info("Smack %s: (%s %s %s) file=(%s %ld %pD) %s\n", smk_bu_mess[rc],
sskp->smk_known, smk_of_inode(inode)->smk_known, acc,
inode->i_sb->s_id, inode->i_ino, file,
current->comm);
@@ -555,7 +593,7 @@ static int smack_sb_copy_data(char *orig, char *smackopts)
static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
struct dentry *root = sb->s_root;
- struct inode *inode = root->d_inode;
+ struct inode *inode = d_backing_inode(root);
struct superblock_smack *sp = sb->s_security;
struct inode_smack *isp;
struct smack_known *skp;
@@ -851,15 +889,15 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
- isp = smk_of_inode(old_dentry->d_inode);
+ isp = smk_of_inode(d_backing_inode(old_dentry));
rc = smk_curacc(isp, MAY_WRITE, &ad);
- rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc);
+ rc = smk_bu_inode(d_backing_inode(old_dentry), MAY_WRITE, rc);
if (rc == 0 && d_is_positive(new_dentry)) {
- isp = smk_of_inode(new_dentry->d_inode);
+ isp = smk_of_inode(d_backing_inode(new_dentry));
smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
rc = smk_curacc(isp, MAY_WRITE, &ad);
- rc = smk_bu_inode(new_dentry->d_inode, MAY_WRITE, rc);
+ rc = smk_bu_inode(d_backing_inode(new_dentry), MAY_WRITE, rc);
}
return rc;
@@ -875,7 +913,7 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
*/
static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
{
- struct inode *ip = dentry->d_inode;
+ struct inode *ip = d_backing_inode(dentry);
struct smk_audit_info ad;
int rc;
@@ -918,8 +956,8 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
/*
* You need write access to the thing you're removing
*/
- rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
- rc = smk_bu_inode(dentry->d_inode, MAY_WRITE, rc);
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
if (rc == 0) {
/*
* You also need write access to the containing directory
@@ -957,15 +995,15 @@ static int smack_inode_rename(struct inode *old_inode,
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
- isp = smk_of_inode(old_dentry->d_inode);
+ isp = smk_of_inode(d_backing_inode(old_dentry));
rc = smk_curacc(isp, MAY_READWRITE, &ad);
- rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc);
+ rc = smk_bu_inode(d_backing_inode(old_dentry), MAY_READWRITE, rc);
if (rc == 0 && d_is_positive(new_dentry)) {
- isp = smk_of_inode(new_dentry->d_inode);
+ isp = smk_of_inode(d_backing_inode(new_dentry));
smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
rc = smk_curacc(isp, MAY_READWRITE, &ad);
- rc = smk_bu_inode(new_dentry->d_inode, MAY_READWRITE, rc);
+ rc = smk_bu_inode(d_backing_inode(new_dentry), MAY_READWRITE, rc);
}
return rc;
}
@@ -1022,8 +1060,8 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
- rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
- rc = smk_bu_inode(dentry->d_inode, MAY_WRITE, rc);
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
return rc;
}
@@ -1034,19 +1072,16 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
*
* Returns 0 if access is permitted, an error code otherwise
*/
-static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int smack_inode_getattr(const struct path *path)
{
struct smk_audit_info ad;
- struct path path;
+ struct inode *inode = d_backing_inode(path->dentry);
int rc;
- path.dentry = dentry;
- path.mnt = mnt;
-
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
- smk_ad_setfield_u_fs_path(&ad, path);
- rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
- rc = smk_bu_inode(dentry->d_inode, MAY_READ, rc);
+ smk_ad_setfield_u_fs_path(&ad, *path);
+ rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad);
+ rc = smk_bu_inode(inode, MAY_READ, rc);
return rc;
}
@@ -1107,8 +1142,8 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
if (rc == 0) {
- rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
- rc = smk_bu_inode(dentry->d_inode, MAY_WRITE, rc);
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
}
return rc;
@@ -1129,7 +1164,7 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct smack_known *skp;
- struct inode_smack *isp = dentry->d_inode->i_security;
+ struct inode_smack *isp = d_backing_inode(dentry)->i_security;
if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
isp->smk_flags |= SMK_INODE_TRANSMUTE;
@@ -1174,8 +1209,8 @@ static int smack_inode_getxattr(struct dentry *dentry, const char *name)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
- rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
- rc = smk_bu_inode(dentry->d_inode, MAY_READ, rc);
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_READ, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_READ, rc);
return rc;
}
@@ -1211,12 +1246,12 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
- rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
- rc = smk_bu_inode(dentry->d_inode, MAY_WRITE, rc);
+ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad);
+ rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc);
if (rc != 0)
return rc;
- isp = dentry->d_inode->i_security;
+ isp = d_backing_inode(dentry)->i_security;
/*
* Don't do anything special for these.
* XATTR_NAME_SMACKIPIN
@@ -2452,7 +2487,21 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
static int smack_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
- if (family != PF_INET || sock->sk == NULL)
+ struct socket_smack *ssp;
+
+ if (sock->sk == NULL)
+ return 0;
+
+ /*
+ * Sockets created by kernel threads receive web label.
+ */
+ if (unlikely(current->flags & PF_KTHREAD)) {
+ ssp = sock->sk->sk_security;
+ ssp->smk_in = &smack_known_web;
+ ssp->smk_out = &smack_known_web;
+ }
+
+ if (family != PF_INET)
return 0;
/*
* Set the outbound netlbl.
@@ -3986,6 +4035,36 @@ static int smack_key_permission(key_ref_t key_ref,
rc = smk_bu_note("key access", tkp, keyp->security, request, rc);
return rc;
}
+
+/*
+ * smack_key_getsecurity - Smack label tagging the key
+ * @key points to the key to be queried
+ * @_buffer points to a pointer that should be set to point to the
+ * resulting string (if no label or an error occurs).
+ * Return the length of the string (including terminating NUL) or -ve if
+ * an error.
+ * May also return 0 (and a NULL buffer pointer) if there is no label.
+ */
+static int smack_key_getsecurity(struct key *key, char **_buffer)
+{
+ struct smack_known *skp = key->security;
+ size_t length;
+ char *copy;
+
+ if (key->security == NULL) {
+ *_buffer = NULL;
+ return 0;
+ }
+
+ copy = kstrdup(skp->smk_known, GFP_KERNEL);
+ if (copy == NULL)
+ return -ENOMEM;
+ length = strlen(copy) + 1;
+
+ *_buffer = copy;
+ return length;
+}
+
#endif /* CONFIG_KEYS */
/*
@@ -4310,6 +4389,7 @@ struct security_operations smack_ops = {
.key_alloc = smack_key_alloc,
.key_free = smack_key_free,
.key_permission = smack_key_permission,
+ .key_getsecurity = smack_key_getsecurity,
#endif /* CONFIG_KEYS */
/* Audit hooks */
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
index c952632afb0d..a455cfc9ec1f 100644
--- a/security/smack/smack_netfilter.c
+++ b/security/smack/smack_netfilter.c
@@ -23,9 +23,7 @@
static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct socket_smack *ssp;
struct smack_known *skp;
@@ -42,9 +40,7 @@ static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
static unsigned int smack_ipv4_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct nf_hook_state *state)
{
struct socket_smack *ssp;
struct smack_known *skp;
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index bce4e8f1b267..d9682985349e 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -54,6 +54,9 @@ enum smk_inos {
SMK_CHANGE_RULE = 19, /* change or add rules (long labels) */
SMK_SYSLOG = 20, /* change syslog label) */
SMK_PTRACE = 21, /* set ptrace rule */
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ SMK_UNCONFINED = 22, /* define an unconfined label */
+#endif
};
/*
@@ -61,7 +64,6 @@ enum smk_inos {
*/
static DEFINE_MUTEX(smack_cipso_lock);
static DEFINE_MUTEX(smack_ambient_lock);
-static DEFINE_MUTEX(smack_syslog_lock);
static DEFINE_MUTEX(smk_netlbladdr_lock);
/*
@@ -95,6 +97,16 @@ int smack_cipso_mapped = SMACK_CIPSO_MAPPED_DEFAULT;
*/
struct smack_known *smack_onlycap;
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+/*
+ * Allow one label to be unconfined. This is for
+ * debugging and application bring-up purposes only.
+ * It is bad and wrong, but everyone seems to expect
+ * to have it.
+ */
+struct smack_known *smack_unconfined;
+#endif
+
/*
* If this value is set restrict syslog use to the label specified.
* It can be reset via smackfs/syslog
@@ -1717,6 +1729,85 @@ static const struct file_operations smk_onlycap_ops = {
.llseek = default_llseek,
};
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+/**
+ * smk_read_unconfined - read() for smackfs/unconfined
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_unconfined(struct file *filp, char __user *buf,
+ size_t cn, loff_t *ppos)
+{
+ char *smack = "";
+ ssize_t rc = -EINVAL;
+ int asize;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (smack_unconfined != NULL)
+ smack = smack_unconfined->smk_known;
+
+ asize = strlen(smack) + 1;
+
+ if (cn >= asize)
+ rc = simple_read_from_buffer(buf, cn, ppos, smack, asize);
+
+ return rc;
+}
+
+/**
+ * smk_write_unconfined - write() for smackfs/unconfined
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_unconfined(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ int rc = count;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ data = kzalloc(count + 1, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ /*
+ * Should the null string be passed in unset the unconfined value.
+ * This seems like something to be careful with as usually
+ * smk_import only expects to return NULL for errors. It
+ * is usually the case that a nullstring or "\n" would be
+ * bad to pass to smk_import but in fact this is useful here.
+ *
+ * smk_import will also reject a label beginning with '-',
+ * so "-confine" will also work.
+ */
+ if (copy_from_user(data, buf, count) != 0)
+ rc = -EFAULT;
+ else
+ smack_unconfined = smk_import_entry(data, count);
+
+ kfree(data);
+ return rc;
+}
+
+static const struct file_operations smk_unconfined_ops = {
+ .read = smk_read_unconfined,
+ .write = smk_write_unconfined,
+ .llseek = default_llseek,
+};
+#endif /* CONFIG_SECURITY_SMACK_BRINGUP */
+
/**
* smk_read_logging - read() for /smack/logging
* @filp: file pointer, not actually used
@@ -2384,6 +2475,10 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
"syslog", &smk_syslog_ops, S_IRUGO|S_IWUSR},
[SMK_PTRACE] = {
"ptrace", &smk_ptrace_ops, S_IRUGO|S_IWUSR},
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ [SMK_UNCONFINED] = {
+ "unconfined", &smk_unconfined_ops, S_IRUGO|S_IWUSR},
+#endif
/* last one */
{""}
};
@@ -2395,7 +2490,7 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
return rc;
}
- root_inode = sb->s_root->d_inode;
+ root_inode = d_inode(sb->s_root);
return 0;
}
diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore
index 5caf1a6f5907..dc0f220a210b 100644
--- a/security/tomoyo/.gitignore
+++ b/security/tomoyo/.gitignore
@@ -1,2 +1,2 @@
builtin-policy.h
-policy/
+policy/*.conf
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 604e718d68d3..404dce66952a 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -6,6 +6,7 @@ config SECURITY_TOMOYO
select SECURITY_PATH
select SECURITY_NETWORK
select SRCU
+ select BUILD_BIN2C
default n
help
This selects TOMOYO Linux, pathname-based access control.
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 56a0c7be409e..65dbcb2fd850 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1,48 +1,15 @@
obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
-$(obj)/policy/profile.conf:
- @mkdir -p $(obj)/policy/
- @echo Creating an empty policy/profile.conf
- @touch $@
-
-$(obj)/policy/exception_policy.conf:
- @mkdir -p $(obj)/policy/
- @echo Creating a default policy/exception_policy.conf
- @echo initialize_domain /sbin/modprobe from any >> $@
- @echo initialize_domain /sbin/hotplug from any >> $@
-
-$(obj)/policy/domain_policy.conf:
- @mkdir -p $(obj)/policy/
- @echo Creating an empty policy/domain_policy.conf
- @touch $@
-
-$(obj)/policy/manager.conf:
- @mkdir -p $(obj)/policy/
- @echo Creating an empty policy/manager.conf
- @touch $@
-
-$(obj)/policy/stat.conf:
- @mkdir -p $(obj)/policy/
- @echo Creating an empty policy/stat.conf
- @touch $@
-
-$(obj)/builtin-policy.h: $(obj)/policy/profile.conf $(obj)/policy/exception_policy.conf $(obj)/policy/domain_policy.conf $(obj)/policy/manager.conf $(obj)/policy/stat.conf
- @echo Generating built-in policy for TOMOYO 2.5.x.
- @echo "static char tomoyo_builtin_profile[] __initdata =" > $@.tmp
- @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/profile.conf >> $@.tmp
- @echo "\"\";" >> $@.tmp
- @echo "static char tomoyo_builtin_exception_policy[] __initdata =" >> $@.tmp
- @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/exception_policy.conf >> $@.tmp
- @echo "\"\";" >> $@.tmp
- @echo "static char tomoyo_builtin_domain_policy[] __initdata =" >> $@.tmp
- @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/domain_policy.conf >> $@.tmp
- @echo "\"\";" >> $@.tmp
- @echo "static char tomoyo_builtin_manager[] __initdata =" >> $@.tmp
- @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/manager.conf >> $@.tmp
- @echo "\"\";" >> $@.tmp
- @echo "static char tomoyo_builtin_stat[] __initdata =" >> $@.tmp
- @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/stat.conf >> $@.tmp
- @echo "\"\";" >> $@.tmp
- @mv $@.tmp $@
+targets += builtin-policy.h
+define do_policy
+echo "static char tomoyo_builtin_$(1)[] __initdata ="; \
+$(objtree)/scripts/basic/bin2c <$(firstword $(wildcard $(obj)/policy/$(1).conf $(srctree)/$(src)/policy/$(1).conf.default) /dev/null); \
+echo ";"
+endef
+quiet_cmd_policy = POLICY $@
+ cmd_policy = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
+
+$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE
+ $(call if_changed,policy)
$(obj)/common.o: $(obj)/builtin-policy.h
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index b897d4862016..f9c9fb1d56b4 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -945,7 +945,7 @@ char *tomoyo_encode2(const char *str, int str_len);
char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
va_list args);
char *tomoyo_read_token(struct tomoyo_acl_param *param);
-char *tomoyo_realpath_from_path(struct path *path);
+char *tomoyo_realpath_from_path(const struct path *path);
char *tomoyo_realpath_nofollow(const char *pathname);
const char *tomoyo_get_exe(void);
const char *tomoyo_yesno(const unsigned int value);
@@ -978,7 +978,7 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1,
struct path *path2);
int tomoyo_path_number_perm(const u8 operation, struct path *path,
unsigned long number);
-int tomoyo_path_perm(const u8 operation, struct path *path,
+int tomoyo_path_perm(const u8 operation, const struct path *path,
const char *target);
unsigned int tomoyo_poll_control(struct file *file, poll_table *wait);
unsigned int tomoyo_poll_log(struct file *file, poll_table *wait);
diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c
index 63681e8be628..6c4528d4b48f 100644
--- a/security/tomoyo/condition.c
+++ b/security/tomoyo/condition.c
@@ -714,7 +714,7 @@ void tomoyo_get_attributes(struct tomoyo_obj_info *obj)
dentry = dget_parent(dentry);
break;
}
- inode = dentry->d_inode;
+ inode = d_backing_inode(dentry);
if (inode) {
struct tomoyo_mini_stat *stat = &obj->stat[i];
stat->uid = inode->i_uid;
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index c151a1869597..2367b100cc62 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -145,7 +145,7 @@ static void tomoyo_add_slash(struct tomoyo_path_info *buf)
*
* Returns true on success, false otherwise.
*/
-static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, struct path *path)
+static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, const struct path *path)
{
buf->name = tomoyo_realpath_from_path(path);
if (buf->name) {
@@ -782,7 +782,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_path_perm(const u8 operation, struct path *path, const char *target)
+int tomoyo_path_perm(const u8 operation, const struct path *path, const char *target)
{
struct tomoyo_request_info r;
struct tomoyo_obj_info obj = {
diff --git a/security/tomoyo/policy/exception_policy.conf.default b/security/tomoyo/policy/exception_policy.conf.default
new file mode 100644
index 000000000000..2678df4964ee
--- /dev/null
+++ b/security/tomoyo/policy/exception_policy.conf.default
@@ -0,0 +1,2 @@
+initialize_domain /sbin/modprobe from any
+initialize_domain /sbin/hotplug from any
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index bed745c8b1a3..5077f1968841 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -89,7 +89,7 @@ char *tomoyo_encode(const char *str)
*
* If dentry is a directory, trailing '/' is appended.
*/
-static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
+static char *tomoyo_get_absolute_path(const struct path *path, char * const buffer,
const int buflen)
{
char *pos = ERR_PTR(-ENOMEM);
@@ -97,7 +97,7 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
/* go to whatever namespace root we are under */
pos = d_absolute_path(path, buffer, buflen - 1);
if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = d_backing_inode(path->dentry);
if (inode && S_ISDIR(inode->i_mode)) {
buffer[buflen - 2] = '/';
buffer[buflen - 1] = '\0';
@@ -125,7 +125,7 @@ static char *tomoyo_get_dentry_path(struct dentry *dentry, char * const buffer,
if (buflen >= 256) {
pos = dentry_path_raw(dentry, buffer, buflen - 1);
if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_backing_inode(dentry);
if (inode && S_ISDIR(inode->i_mode)) {
buffer[buflen - 2] = '/';
buffer[buflen - 1] = '\0';
@@ -168,7 +168,7 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
if (!MAJOR(sb->s_dev))
goto prepend_filesystem_name;
{
- struct inode *inode = sb->s_root->d_inode;
+ struct inode *inode = d_backing_inode(sb->s_root);
/*
* Use filesystem name if filesystem does not support rename()
* operation.
@@ -216,10 +216,10 @@ out:
*
* Returns the buffer.
*/
-static char *tomoyo_get_socket_name(struct path *path, char * const buffer,
+static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
const int buflen)
{
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = d_backing_inode(path->dentry);
struct socket *sock = inode ? SOCKET_I(inode) : NULL;
struct sock *sk = sock ? sock->sk : NULL;
if (sk) {
@@ -247,7 +247,7 @@ static char *tomoyo_get_socket_name(struct path *path, char * const buffer,
* These functions use kzalloc(), so the caller must call kfree()
* if these functions didn't return NULL.
*/
-char *tomoyo_realpath_from_path(struct path *path)
+char *tomoyo_realpath_from_path(const struct path *path)
{
char *buf = NULL;
char *name = NULL;
@@ -277,7 +277,7 @@ char *tomoyo_realpath_from_path(struct path *path)
pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
goto encode;
}
- inode = sb->s_root->d_inode;
+ inode = d_backing_inode(sb->s_root);
/*
* Get local name for filesystems without rename() operation
* or dentry without vfsmount.
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index f0b756e27fed..57c88d52ffa5 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -144,10 +144,9 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int tomoyo_inode_getattr(const struct path *path)
{
- struct path path = { mnt, dentry };
- return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL);
+ return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, path, NULL);
}
/**
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 2952ba576fb9..b974a6997d7f 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -948,15 +948,18 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
*/
const char *tomoyo_get_exe(void)
{
+ struct file *exe_file;
+ const char *cp;
struct mm_struct *mm = current->mm;
- const char *cp = NULL;
if (!mm)
return NULL;
- down_read(&mm->mmap_sem);
- if (mm->exe_file)
- cp = tomoyo_realpath_from_path(&mm->exe_file->f_path);
- up_read(&mm->mmap_sem);
+ exe_file = get_mm_exe_file(mm);
+ if (!exe_file)
+ return NULL;
+
+ cp = tomoyo_realpath_from_path(&exe_file->f_path);
+ fput(exe_file);
return cp;
}
diff --git a/security/yama/Kconfig b/security/yama/Kconfig
index 20ef5143c0c0..3123e1da2fed 100644
--- a/security/yama/Kconfig
+++ b/security/yama/Kconfig
@@ -1,8 +1,6 @@
config SECURITY_YAMA
bool "Yama support"
depends on SECURITY
- select SECURITYFS
- select SECURITY_PATH
default n
help
This selects Yama, which extends DAC support with additional
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 13c88fbcf037..24aae2ae2b30 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -379,20 +379,17 @@ static struct security_operations yama_ops = {
static int yama_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int rc;
+ struct ctl_table table_copy;
if (write && !capable(CAP_SYS_PTRACE))
return -EPERM;
- rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (rc)
- return rc;
-
/* Lock the max value if it ever gets set. */
- if (write && *(int *)table->data == *(int *)table->extra2)
- table->extra1 = table->extra2;
+ table_copy = *table;
+ if (*(int *)table_copy.data == *(int *)table_copy.extra2)
+ table_copy.extra1 = table_copy.extra2;
- return rc;
+ return proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos);
}
static int zero;
diff --git a/sound/Kconfig b/sound/Kconfig
index c710ce2c5c37..5a240e050ae6 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -76,6 +76,8 @@ source "sound/isa/Kconfig"
source "sound/pci/Kconfig"
+source "sound/hda/Kconfig"
+
source "sound/ppc/Kconfig"
source "sound/aoa/Kconfig"
diff --git a/sound/Makefile b/sound/Makefile
index ce9132b1c395..77320709fd26 100644
--- a/sound/Makefile
+++ b/sound/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_SOUND_PRIME) += sound_firmware.o
obj-$(CONFIG_SOUND_PRIME) += oss/
obj-$(CONFIG_DMASOUND) += oss/
obj-$(CONFIG_SND) += core/ i2c/ drivers/ isa/ pci/ ppc/ arm/ sh/ synth/ usb/ \
- firewire/ sparc/ spi/ parisc/ pcmcia/ mips/ soc/ atmel/
+ firewire/ sparc/ spi/ parisc/ pcmcia/ mips/ soc/ atmel/ hda/
obj-$(CONFIG_SND_AOA) += aoa/
# This one must be compilable even if sound is configured out
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index b9737fae656a..1cbf210080a1 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -31,7 +31,7 @@ module_param(force, int, 0444);
MODULE_PARM_DESC(force, "Force loading i2sbus even when"
" no layout-id property is present");
-static struct of_device_id i2sbus_match[] = {
+static const struct of_device_id i2sbus_match[] = {
{ .name = "i2s" },
{ }
};
diff --git a/sound/core/control.c b/sound/core/control.c
index eeb691d1911f..196a6fe100ca 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -192,36 +192,41 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
EXPORT_SYMBOL(snd_ctl_notify);
/**
- * snd_ctl_new - create a control instance from the template
- * @control: the control template
- * @access: the default control access
+ * snd_ctl_new - create a new control instance with some elements
+ * @kctl: the pointer to store new control instance
+ * @count: the number of elements in this control
+ * @access: the default access flags for elements in this control
+ * @file: given when locking these elements
*
- * Allocates a new struct snd_kcontrol instance and copies the given template
- * to the new instance. It does not copy volatile data (access).
+ * Allocates a memory object for a new control instance. The instance has
+ * elements as many as the given number (@count). Each element has given
+ * access permissions (@access). Each element is locked when @file is given.
*
- * Return: The pointer of the new instance, or %NULL on failure.
+ * Return: 0 on success, error code on failure
*/
-static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
- unsigned int access)
+static int snd_ctl_new(struct snd_kcontrol **kctl, unsigned int count,
+ unsigned int access, struct snd_ctl_file *file)
{
- struct snd_kcontrol *kctl;
+ unsigned int size;
unsigned int idx;
- if (snd_BUG_ON(!control || !control->count))
- return NULL;
+ if (count == 0 || count > MAX_CONTROL_COUNT)
+ return -EINVAL;
- if (control->count > MAX_CONTROL_COUNT)
- return NULL;
+ size = sizeof(struct snd_kcontrol);
+ size += sizeof(struct snd_kcontrol_volatile) * count;
- kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
- if (kctl == NULL) {
- pr_err("ALSA: Cannot allocate control instance\n");
- return NULL;
+ *kctl = kzalloc(size, GFP_KERNEL);
+ if (!*kctl)
+ return -ENOMEM;
+
+ for (idx = 0; idx < count; idx++) {
+ (*kctl)->vd[idx].access = access;
+ (*kctl)->vd[idx].owner = file;
}
- *kctl = *control;
- for (idx = 0; idx < kctl->count; idx++)
- kctl->vd[idx].access = access;
- return kctl;
+ (*kctl)->count = count;
+
+ return 0;
}
/**
@@ -238,37 +243,53 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol,
void *private_data)
{
- struct snd_kcontrol kctl;
+ struct snd_kcontrol *kctl;
+ unsigned int count;
unsigned int access;
+ int err;
if (snd_BUG_ON(!ncontrol || !ncontrol->info))
return NULL;
- memset(&kctl, 0, sizeof(kctl));
- kctl.id.iface = ncontrol->iface;
- kctl.id.device = ncontrol->device;
- kctl.id.subdevice = ncontrol->subdevice;
+
+ count = ncontrol->count;
+ if (count == 0)
+ count = 1;
+
+ access = ncontrol->access;
+ if (access == 0)
+ access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ access &= (SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE |
+ SNDRV_CTL_ELEM_ACCESS_INACTIVE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND |
+ SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK);
+
+ err = snd_ctl_new(&kctl, count, access, NULL);
+ if (err < 0)
+ return NULL;
+
+ /* The 'numid' member is decided when calling snd_ctl_add(). */
+ kctl->id.iface = ncontrol->iface;
+ kctl->id.device = ncontrol->device;
+ kctl->id.subdevice = ncontrol->subdevice;
if (ncontrol->name) {
- strlcpy(kctl.id.name, ncontrol->name, sizeof(kctl.id.name));
- if (strcmp(ncontrol->name, kctl.id.name) != 0)
+ strlcpy(kctl->id.name, ncontrol->name, sizeof(kctl->id.name));
+ if (strcmp(ncontrol->name, kctl->id.name) != 0)
pr_warn("ALSA: Control name '%s' truncated to '%s'\n",
- ncontrol->name, kctl.id.name);
+ ncontrol->name, kctl->id.name);
}
- kctl.id.index = ncontrol->index;
- kctl.count = ncontrol->count ? ncontrol->count : 1;
- access = ncontrol->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
- (ncontrol->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
- SNDRV_CTL_ELEM_ACCESS_VOLATILE|
- SNDRV_CTL_ELEM_ACCESS_INACTIVE|
- SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE|
- SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND|
- SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK));
- kctl.info = ncontrol->info;
- kctl.get = ncontrol->get;
- kctl.put = ncontrol->put;
- kctl.tlv.p = ncontrol->tlv.p;
- kctl.private_value = ncontrol->private_value;
- kctl.private_data = private_data;
- return snd_ctl_new(&kctl, access);
+ kctl->id.index = ncontrol->index;
+
+ kctl->info = ncontrol->info;
+ kctl->get = ncontrol->get;
+ kctl->put = ncontrol->put;
+ kctl->tlv.p = ncontrol->tlv.p;
+
+ kctl->private_value = ncontrol->private_value;
+ kctl->private_data = private_data;
+
+ return kctl;
}
EXPORT_SYMBOL(snd_ctl_new1);
@@ -557,6 +578,7 @@ error:
*
* Finds the control instance with the given id, and activate or
* inactivate the control together with notification, if changed.
+ * The given ID data is filled with full information.
*
* Return: 0 if unchanged, 1 if changed, or a negative error code on failure.
*/
@@ -586,6 +608,7 @@ int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id,
goto unlock;
vd->access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
}
+ snd_ctl_build_ioff(id, kctl, index_offset);
ret = 1;
unlock:
up_write(&card->controls_rwsem);
@@ -1006,7 +1029,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
struct user_element {
struct snd_ctl_elem_info info;
struct snd_card *card;
- void *elem_data; /* element data */
+ char *elem_data; /* element data */
unsigned long elem_data_size; /* size of element data in bytes */
void *tlv_data; /* TLV data */
unsigned long tlv_data_size; /* TLV data size */
@@ -1017,8 +1040,12 @@ static int snd_ctl_elem_user_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct user_element *ue = kcontrol->private_data;
+ unsigned int offset;
+ offset = snd_ctl_get_ioff(kcontrol, &uinfo->id);
*uinfo = ue->info;
+ snd_ctl_build_ioff(&uinfo->id, kcontrol, offset);
+
return 0;
}
@@ -1028,10 +1055,13 @@ static int snd_ctl_elem_user_enum_info(struct snd_kcontrol *kcontrol,
struct user_element *ue = kcontrol->private_data;
const char *names;
unsigned int item;
+ unsigned int offset;
item = uinfo->value.enumerated.item;
+ offset = snd_ctl_get_ioff(kcontrol, &uinfo->id);
*uinfo = ue->info;
+ snd_ctl_build_ioff(&uinfo->id, kcontrol, offset);
item = min(item, uinfo->value.enumerated.items - 1);
uinfo->value.enumerated.item = item;
@@ -1048,9 +1078,12 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct user_element *ue = kcontrol->private_data;
+ unsigned int size = ue->elem_data_size;
+ char *src = ue->elem_data +
+ snd_ctl_get_ioff(kcontrol, &ucontrol->id) * size;
mutex_lock(&ue->card->user_ctl_lock);
- memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
+ memcpy(&ucontrol->value, src, size);
mutex_unlock(&ue->card->user_ctl_lock);
return 0;
}
@@ -1060,11 +1093,14 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
{
int change;
struct user_element *ue = kcontrol->private_data;
+ unsigned int size = ue->elem_data_size;
+ char *dst = ue->elem_data +
+ snd_ctl_get_ioff(kcontrol, &ucontrol->id) * size;
mutex_lock(&ue->card->user_ctl_lock);
- change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
+ change = memcmp(&ucontrol->value, dst, size) != 0;
if (change)
- memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
+ memcpy(dst, &ucontrol->value, size);
mutex_unlock(&ue->card->user_ctl_lock);
return change;
}
@@ -1078,7 +1114,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
int change = 0;
void *new_data;
- if (op_flag > 0) {
+ if (op_flag == SNDRV_CTL_TLV_OP_WRITE) {
if (size > 1024 * 128) /* sane value */
return -EINVAL;
@@ -1161,84 +1197,103 @@ static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol)
static int snd_ctl_elem_add(struct snd_ctl_file *file,
struct snd_ctl_elem_info *info, int replace)
{
+ /* The capacity of struct snd_ctl_elem_value.value.*/
+ static const unsigned int value_sizes[] = {
+ [SNDRV_CTL_ELEM_TYPE_BOOLEAN] = sizeof(long),
+ [SNDRV_CTL_ELEM_TYPE_INTEGER] = sizeof(long),
+ [SNDRV_CTL_ELEM_TYPE_ENUMERATED] = sizeof(unsigned int),
+ [SNDRV_CTL_ELEM_TYPE_BYTES] = sizeof(unsigned char),
+ [SNDRV_CTL_ELEM_TYPE_IEC958] = sizeof(struct snd_aes_iec958),
+ [SNDRV_CTL_ELEM_TYPE_INTEGER64] = sizeof(long long),
+ };
+ static const unsigned int max_value_counts[] = {
+ [SNDRV_CTL_ELEM_TYPE_BOOLEAN] = 128,
+ [SNDRV_CTL_ELEM_TYPE_INTEGER] = 128,
+ [SNDRV_CTL_ELEM_TYPE_ENUMERATED] = 128,
+ [SNDRV_CTL_ELEM_TYPE_BYTES] = 512,
+ [SNDRV_CTL_ELEM_TYPE_IEC958] = 1,
+ [SNDRV_CTL_ELEM_TYPE_INTEGER64] = 64,
+ };
struct snd_card *card = file->card;
- struct snd_kcontrol kctl, *_kctl;
+ struct snd_kcontrol *kctl;
+ unsigned int count;
unsigned int access;
long private_size;
struct user_element *ue;
- int idx, err;
+ unsigned int offset;
+ int err;
- if (info->count < 1)
- return -EINVAL;
if (!*info->id.name)
return -EINVAL;
if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name))
return -EINVAL;
- access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
- (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
- SNDRV_CTL_ELEM_ACCESS_INACTIVE|
- SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
- info->id.numid = 0;
- memset(&kctl, 0, sizeof(kctl));
+ /* Delete a control to replace them if needed. */
if (replace) {
+ info->id.numid = 0;
err = snd_ctl_remove_user_ctl(file, &info->id);
if (err)
return err;
}
- if (card->user_ctl_count >= MAX_USER_CONTROLS)
+ /*
+ * The number of userspace controls are counted control by control,
+ * not element by element.
+ */
+ if (card->user_ctl_count + 1 > MAX_USER_CONTROLS)
return -ENOMEM;
- memcpy(&kctl.id, &info->id, sizeof(info->id));
- kctl.count = info->owner ? info->owner : 1;
- access |= SNDRV_CTL_ELEM_ACCESS_USER;
- if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
- kctl.info = snd_ctl_elem_user_enum_info;
- else
- kctl.info = snd_ctl_elem_user_info;
- if (access & SNDRV_CTL_ELEM_ACCESS_READ)
- kctl.get = snd_ctl_elem_user_get;
- if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
- kctl.put = snd_ctl_elem_user_put;
- if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) {
- kctl.tlv.c = snd_ctl_elem_user_tlv;
+ /* Check the number of elements for this userspace control. */
+ count = info->owner;
+ if (count == 0)
+ count = 1;
+
+ /* Arrange access permissions if needed. */
+ access = info->access;
+ if (access == 0)
+ access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ access &= (SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_INACTIVE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE);
+ if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)
access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
- }
- switch (info->type) {
- case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
- case SNDRV_CTL_ELEM_TYPE_INTEGER:
- private_size = sizeof(long);
- if (info->count > 128)
- return -EINVAL;
- break;
- case SNDRV_CTL_ELEM_TYPE_INTEGER64:
- private_size = sizeof(long long);
- if (info->count > 64)
- return -EINVAL;
- break;
- case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
- private_size = sizeof(unsigned int);
- if (info->count > 128 || info->value.enumerated.items == 0)
- return -EINVAL;
- break;
- case SNDRV_CTL_ELEM_TYPE_BYTES:
- private_size = sizeof(unsigned char);
- if (info->count > 512)
- return -EINVAL;
- break;
- case SNDRV_CTL_ELEM_TYPE_IEC958:
- private_size = sizeof(struct snd_aes_iec958);
- if (info->count != 1)
- return -EINVAL;
- break;
- default:
+ access |= SNDRV_CTL_ELEM_ACCESS_USER;
+
+ /*
+ * Check information and calculate the size of data specific to
+ * this userspace control.
+ */
+ if (info->type < SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
+ info->type > SNDRV_CTL_ELEM_TYPE_INTEGER64)
return -EINVAL;
- }
- private_size *= info->count;
- ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
- if (ue == NULL)
+ if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED &&
+ info->value.enumerated.items == 0)
+ return -EINVAL;
+ if (info->count < 1 ||
+ info->count > max_value_counts[info->type])
+ return -EINVAL;
+ private_size = value_sizes[info->type] * info->count;
+
+ /*
+ * Keep memory object for this userspace control. After passing this
+ * code block, the instance should be freed by snd_ctl_free_one().
+ *
+ * Note that these elements in this control are locked.
+ */
+ err = snd_ctl_new(&kctl, count, access, file);
+ if (err < 0)
+ return err;
+ memcpy(&kctl->id, &info->id, sizeof(kctl->id));
+ kctl->private_data = kzalloc(sizeof(struct user_element) + private_size * count,
+ GFP_KERNEL);
+ if (kctl->private_data == NULL) {
+ kfree(kctl);
return -ENOMEM;
+ }
+ kctl->private_free = snd_ctl_elem_user_free;
+
+ /* Set private data for this userspace control. */
+ ue = (struct user_element *)kctl->private_data;
ue->card = card;
ue->info = *info;
ue->info.access = 0;
@@ -1247,23 +1302,36 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
err = snd_ctl_elem_init_enum_names(ue);
if (err < 0) {
- kfree(ue);
+ snd_ctl_free_one(kctl);
return err;
}
}
- kctl.private_free = snd_ctl_elem_user_free;
- _kctl = snd_ctl_new(&kctl, access);
- if (_kctl == NULL) {
- kfree(ue->priv_data);
- kfree(ue);
- return -ENOMEM;
- }
- _kctl->private_data = ue;
- for (idx = 0; idx < _kctl->count; idx++)
- _kctl->vd[idx].owner = file;
- err = snd_ctl_add(card, _kctl);
+
+ /* Set callback functions. */
+ if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
+ kctl->info = snd_ctl_elem_user_enum_info;
+ else
+ kctl->info = snd_ctl_elem_user_info;
+ if (access & SNDRV_CTL_ELEM_ACCESS_READ)
+ kctl->get = snd_ctl_elem_user_get;
+ if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
+ kctl->put = snd_ctl_elem_user_put;
+ if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)
+ kctl->tlv.c = snd_ctl_elem_user_tlv;
+
+ /* This function manage to free the instance on failure. */
+ err = snd_ctl_add(card, kctl);
if (err < 0)
return err;
+ offset = snd_ctl_get_ioff(kctl, &info->id);
+ snd_ctl_build_ioff(&info->id, kctl, offset);
+ /*
+ * Here we cannot fill any field for the number of elements added by
+ * this operation because there're no specific fields. The usage of
+ * 'owner' field for this purpose may cause any bugs to userspace
+ * applications because the field originally means PID of a process
+ * which locks the element.
+ */
down_write(&card->controls_rwsem);
card->user_ctl_count++;
@@ -1276,9 +1344,19 @@ static int snd_ctl_elem_add_user(struct snd_ctl_file *file,
struct snd_ctl_elem_info __user *_info, int replace)
{
struct snd_ctl_elem_info info;
+ int err;
+
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
- return snd_ctl_elem_add(file, &info, replace);
+ err = snd_ctl_elem_add(file, &info, replace);
+ if (err < 0)
+ return err;
+ if (copy_to_user(_info, &info, sizeof(info))) {
+ snd_ctl_remove_user_ctl(file, &info.id);
+ return -EFAULT;
+ }
+
+ return 0;
}
static int snd_ctl_elem_remove(struct snd_ctl_file *file,
@@ -1338,9 +1416,12 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
goto __kctl_end;
}
vd = &kctl->vd[tlv.numid - kctl->id.numid];
- if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) ||
- (op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) ||
- (op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) {
+ if ((op_flag == SNDRV_CTL_TLV_OP_READ &&
+ (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) ||
+ (op_flag == SNDRV_CTL_TLV_OP_WRITE &&
+ (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) ||
+ (op_flag == SNDRV_CTL_TLV_OP_CMD &&
+ (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) {
err = -ENXIO;
goto __kctl_end;
}
@@ -1357,7 +1438,7 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
return 0;
}
} else {
- if (op_flag) {
+ if (op_flag != SNDRV_CTL_TLV_OP_READ) {
err = -ENXIO;
goto __kctl_end;
}
diff --git a/sound/core/device.c b/sound/core/device.c
index 41bec3075ae5..8918838b1999 100644
--- a/sound/core/device.c
+++ b/sound/core/device.c
@@ -50,10 +50,8 @@ int snd_device_new(struct snd_card *card, enum snd_device_type type,
if (snd_BUG_ON(!card || !device_data || !ops))
return -ENXIO;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(card->dev, "Cannot allocate device, type=%d\n", type);
+ if (!dev)
return -ENOMEM;
- }
INIT_LIST_HEAD(&dev->list);
dev->card = card;
dev->type = type;
@@ -73,7 +71,7 @@ int snd_device_new(struct snd_card *card, enum snd_device_type type,
}
EXPORT_SYMBOL(snd_device_new);
-static int __snd_device_disconnect(struct snd_device *dev)
+static void __snd_device_disconnect(struct snd_device *dev)
{
if (dev->state == SNDRV_DEV_REGISTERED) {
if (dev->ops->dev_disconnect &&
@@ -81,7 +79,6 @@ static int __snd_device_disconnect(struct snd_device *dev)
dev_err(dev->card->dev, "device disconnect failure\n");
dev->state = SNDRV_DEV_DISCONNECTED;
}
- return 0;
}
static void __snd_device_free(struct snd_device *dev)
@@ -109,6 +106,34 @@ static struct snd_device *look_for_dev(struct snd_card *card, void *device_data)
}
/**
+ * snd_device_disconnect - disconnect the device
+ * @card: the card instance
+ * @device_data: the data pointer to disconnect
+ *
+ * Turns the device into the disconnection state, invoking
+ * dev_disconnect callback, if the device was already registered.
+ *
+ * Usually called from snd_card_disconnect().
+ *
+ * Return: Zero if successful, or a negative error code on failure or if the
+ * device not found.
+ */
+void snd_device_disconnect(struct snd_card *card, void *device_data)
+{
+ struct snd_device *dev;
+
+ if (snd_BUG_ON(!card || !device_data))
+ return;
+ dev = look_for_dev(card, device_data);
+ if (dev)
+ __snd_device_disconnect(dev);
+ else
+ dev_dbg(card->dev, "device disconnect %p (from %pF), not found\n",
+ device_data, __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(snd_device_disconnect);
+
+/**
* snd_device_free - release the device from the card
* @card: the card instance
* @device_data: the data pointer to release
@@ -195,18 +220,14 @@ int snd_device_register_all(struct snd_card *card)
* disconnect all the devices on the card.
* called from init.c
*/
-int snd_device_disconnect_all(struct snd_card *card)
+void snd_device_disconnect_all(struct snd_card *card)
{
struct snd_device *dev;
- int err = 0;
if (snd_BUG_ON(!card))
- return -ENXIO;
- list_for_each_entry_reverse(dev, &card->devices, list) {
- if (__snd_device_disconnect(dev) < 0)
- err = -ENXIO;
- }
- return err;
+ return;
+ list_for_each_entry_reverse(dev, &card->devices, list)
+ __snd_device_disconnect(dev);
}
/*
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
index 84244a5143cf..51692c8a39ea 100644
--- a/sound/core/hwdep.c
+++ b/sound/core/hwdep.c
@@ -378,10 +378,8 @@ int snd_hwdep_new(struct snd_card *card, char *id, int device,
if (rhwdep)
*rhwdep = NULL;
hwdep = kzalloc(sizeof(*hwdep), GFP_KERNEL);
- if (hwdep == NULL) {
- dev_err(card->dev, "hwdep: cannot allocate\n");
+ if (!hwdep)
return -ENOMEM;
- }
init_waitqueue_head(&hwdep->open_wait);
mutex_init(&hwdep->open_mutex);
diff --git a/sound/core/init.c b/sound/core/init.c
index 35419054821c..04734e047bfe 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -400,7 +400,6 @@ static const struct file_operations snd_shutdown_f_ops =
int snd_card_disconnect(struct snd_card *card)
{
struct snd_monitor_file *mfile;
- int err;
if (!card)
return -EINVAL;
@@ -445,9 +444,7 @@ int snd_card_disconnect(struct snd_card *card)
#endif
/* notify all devices that we are disconnected */
- err = snd_device_disconnect_all(card);
- if (err < 0)
- dev_err(card->dev, "not all devices for card %i can be disconnected\n", card->number);
+ snd_device_disconnect_all(card);
snd_info_card_disconnect(card);
if (card->registered) {
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 5e6349f00ecd..056f8e274851 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -1212,10 +1212,8 @@ static void snd_mixer_oss_proc_write(struct snd_info_entry *entry,
/* not changed */
goto __unlock;
tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
- if (! tbl) {
- pr_err("ALSA: mixer_oss: no memory\n");
+ if (!tbl)
goto __unlock;
- }
tbl->oss_id = ch;
tbl->name = kstrdup(str, GFP_KERNEL);
if (! tbl->name) {
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 80423a4ccab6..58550cc93f28 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -854,7 +854,6 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
params = kmalloc(sizeof(*params), GFP_KERNEL);
sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
if (!sw_params || !params || !sparams) {
- pcm_dbg(substream->pcm, "No memory\n");
err = -ENOMEM;
goto failure;
}
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 0345e53a340c..b25bcf5b8644 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -49,8 +49,6 @@ static struct snd_pcm *snd_pcm_get(struct snd_card *card, int device)
struct snd_pcm *pcm;
list_for_each_entry(pcm, &snd_pcm_devices, list) {
- if (pcm->internal)
- continue;
if (pcm->card == card && pcm->device == device)
return pcm;
}
@@ -62,8 +60,6 @@ static int snd_pcm_next(struct snd_card *card, int device)
struct snd_pcm *pcm;
list_for_each_entry(pcm, &snd_pcm_devices, list) {
- if (pcm->internal)
- continue;
if (pcm->card == card && pcm->device > device)
return pcm->device;
else if (pcm->card->number > card->number)
@@ -76,6 +72,9 @@ static int snd_pcm_add(struct snd_pcm *newpcm)
{
struct snd_pcm *pcm;
+ if (newpcm->internal)
+ return 0;
+
list_for_each_entry(pcm, &snd_pcm_devices, list) {
if (pcm->card == newpcm->card && pcm->device == newpcm->device)
return -EBUSY;
@@ -344,11 +343,8 @@ static void snd_pcm_proc_info_read(struct snd_pcm_substream *substream,
return;
info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (! info) {
- pcm_dbg(substream->pcm,
- "snd_pcm_proc_info_read: cannot malloc\n");
+ if (!info)
return;
- }
err = snd_pcm_info(substream, info);
if (err < 0) {
@@ -718,10 +714,8 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
prev = NULL;
for (idx = 0, prev = NULL; idx < substream_count; idx++) {
substream = kzalloc(sizeof(*substream), GFP_KERNEL);
- if (substream == NULL) {
- pcm_err(pcm, "Cannot allocate PCM substream\n");
+ if (!substream)
return -ENOMEM;
- }
substream->pcm = pcm;
substream->pstr = pstr;
substream->number = idx;
@@ -775,13 +769,14 @@ static int _snd_pcm_new(struct snd_card *card, const char *id, int device,
if (rpcm)
*rpcm = NULL;
pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
- if (pcm == NULL) {
- dev_err(card->dev, "Cannot allocate PCM\n");
+ if (!pcm)
return -ENOMEM;
- }
pcm->card = card;
pcm->device = device;
pcm->internal = internal;
+ mutex_init(&pcm->open_mutex);
+ init_waitqueue_head(&pcm->open_wait);
+ INIT_LIST_HEAD(&pcm->list);
if (id)
strlcpy(pcm->id, id, sizeof(pcm->id));
if ((err = snd_pcm_new_stream(pcm, SNDRV_PCM_STREAM_PLAYBACK, playback_count)) < 0) {
@@ -792,8 +787,6 @@ static int _snd_pcm_new(struct snd_card *card, const char *id, int device,
snd_pcm_free(pcm);
return err;
}
- mutex_init(&pcm->open_mutex);
- init_waitqueue_head(&pcm->open_wait);
if ((err = snd_device_new(card, SNDRV_DEV_PCM, pcm, &ops)) < 0) {
snd_pcm_free(pcm);
return err;
@@ -888,8 +881,9 @@ static int snd_pcm_free(struct snd_pcm *pcm)
if (!pcm)
return 0;
- list_for_each_entry(notify, &snd_pcm_notify_list, list) {
- notify->n_unregister(pcm);
+ if (!pcm->internal) {
+ list_for_each_entry(notify, &snd_pcm_notify_list, list)
+ notify->n_unregister(pcm);
}
if (pcm->private_free)
pcm->private_free(pcm);
@@ -919,6 +913,9 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
if (snd_BUG_ON(!pcm || !rsubstream))
return -ENXIO;
+ if (snd_BUG_ON(stream != SNDRV_PCM_STREAM_PLAYBACK &&
+ stream != SNDRV_PCM_STREAM_CAPTURE))
+ return -EINVAL;
*rsubstream = NULL;
pstr = &pcm->streams[stream];
if (pstr->substream == NULL || pstr->substream_count == 0)
@@ -927,25 +924,14 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
card = pcm->card;
prefer_subdevice = snd_ctl_get_preferred_subdevice(card, SND_CTL_SUBDEV_PCM);
- switch (stream) {
- case SNDRV_PCM_STREAM_PLAYBACK:
- if (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX) {
- for (substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; substream; substream = substream->next) {
- if (SUBSTREAM_BUSY(substream))
- return -EAGAIN;
- }
- }
- break;
- case SNDRV_PCM_STREAM_CAPTURE:
- if (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX) {
- for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next) {
- if (SUBSTREAM_BUSY(substream))
- return -EAGAIN;
- }
+ if (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX) {
+ int opposite = !stream;
+
+ for (substream = pcm->streams[opposite].substream; substream;
+ substream = substream->next) {
+ if (SUBSTREAM_BUSY(substream))
+ return -EAGAIN;
}
- break;
- default:
- return -EINVAL;
}
if (file->f_flags & O_APPEND) {
@@ -968,15 +954,12 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
return 0;
}
- if (prefer_subdevice >= 0) {
- for (substream = pstr->substream; substream; substream = substream->next)
- if (!SUBSTREAM_BUSY(substream) && substream->number == prefer_subdevice)
- goto __ok;
- }
- for (substream = pstr->substream; substream; substream = substream->next)
- if (!SUBSTREAM_BUSY(substream))
+ for (substream = pstr->substream; substream; substream = substream->next) {
+ if (!SUBSTREAM_BUSY(substream) &&
+ (prefer_subdevice == -1 ||
+ substream->number == prefer_subdevice))
break;
- __ok:
+ }
if (substream == NULL)
return -EAGAIN;
@@ -1086,15 +1069,16 @@ static int snd_pcm_dev_register(struct snd_device *device)
if (snd_BUG_ON(!device || !device->device_data))
return -ENXIO;
pcm = device->device_data;
+ if (pcm->internal)
+ return 0;
+
mutex_lock(&register_mutex);
err = snd_pcm_add(pcm);
- if (err) {
- mutex_unlock(&register_mutex);
- return err;
- }
+ if (err)
+ goto unlock;
for (cidx = 0; cidx < 2; cidx++) {
int devtype = -1;
- if (pcm->streams[cidx].substream == NULL || pcm->internal)
+ if (pcm->streams[cidx].substream == NULL)
continue;
switch (cidx) {
case SNDRV_PCM_STREAM_PLAYBACK:
@@ -1109,9 +1093,8 @@ static int snd_pcm_dev_register(struct snd_device *device)
&snd_pcm_f_ops[cidx], pcm,
&pcm->streams[cidx].dev);
if (err < 0) {
- list_del(&pcm->list);
- mutex_unlock(&register_mutex);
- return err;
+ list_del_init(&pcm->list);
+ goto unlock;
}
for (substream = pcm->streams[cidx].substream; substream; substream = substream->next)
@@ -1121,8 +1104,9 @@ static int snd_pcm_dev_register(struct snd_device *device)
list_for_each_entry(notify, &snd_pcm_notify_list, list)
notify->n_register(pcm);
+ unlock:
mutex_unlock(&register_mutex);
- return 0;
+ return err;
}
static int snd_pcm_dev_disconnect(struct snd_device *device)
@@ -1133,13 +1117,10 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
int cidx;
mutex_lock(&register_mutex);
- if (list_empty(&pcm->list))
- goto unlock;
-
mutex_lock(&pcm->open_mutex);
wake_up(&pcm->open_wait);
list_del_init(&pcm->list);
- for (cidx = 0; cidx < 2; cidx++)
+ for (cidx = 0; cidx < 2; cidx++) {
for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) {
snd_pcm_stream_lock_irq(substream);
if (substream->runtime) {
@@ -1149,18 +1130,20 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
}
snd_pcm_stream_unlock_irq(substream);
}
- list_for_each_entry(notify, &snd_pcm_notify_list, list) {
- notify->n_disconnect(pcm);
+ }
+ if (!pcm->internal) {
+ list_for_each_entry(notify, &snd_pcm_notify_list, list)
+ notify->n_disconnect(pcm);
}
for (cidx = 0; cidx < 2; cidx++) {
- snd_unregister_device(&pcm->streams[cidx].dev);
+ if (!pcm->internal)
+ snd_unregister_device(&pcm->streams[cidx].dev);
if (pcm->streams[cidx].chmap_kctl) {
snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
pcm->streams[cidx].chmap_kctl = NULL;
}
}
mutex_unlock(&pcm->open_mutex);
- unlock:
mutex_unlock(&register_mutex);
return 0;
}
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 2d957ba63557..b48b434444ed 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -194,18 +194,30 @@ struct snd_pcm_status32 {
u32 avail_max;
u32 overrange;
s32 suspended_state;
- u32 reserved_alignment;
+ u32 audio_tstamp_data;
struct compat_timespec audio_tstamp;
- unsigned char reserved[56-sizeof(struct compat_timespec)];
+ struct compat_timespec driver_tstamp;
+ u32 audio_tstamp_accuracy;
+ unsigned char reserved[52-2*sizeof(struct compat_timespec)];
} __attribute__((packed));
static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
- struct snd_pcm_status32 __user *src)
+ struct snd_pcm_status32 __user *src,
+ bool ext)
{
struct snd_pcm_status status;
int err;
+ memset(&status, 0, sizeof(status));
+ /*
+ * with extension, parameters are read/write,
+ * get audio_tstamp_data from user,
+ * ignore rest of status structure
+ */
+ if (ext && get_user(status.audio_tstamp_data,
+ (u32 __user *)(&src->audio_tstamp_data)))
+ return -EFAULT;
err = snd_pcm_status(substream, &status);
if (err < 0)
return err;
@@ -222,7 +234,10 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
put_user(status.avail_max, &src->avail_max) ||
put_user(status.overrange, &src->overrange) ||
put_user(status.suspended_state, &src->suspended_state) ||
- compat_put_timespec(&status.audio_tstamp, &src->audio_tstamp))
+ put_user(status.audio_tstamp_data, &src->audio_tstamp_data) ||
+ compat_put_timespec(&status.audio_tstamp, &src->audio_tstamp) ||
+ compat_put_timespec(&status.driver_tstamp, &src->driver_tstamp) ||
+ put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy))
return -EFAULT;
return err;
@@ -457,6 +472,7 @@ enum {
SNDRV_PCM_IOCTL_HW_PARAMS32 = _IOWR('A', 0x11, struct snd_pcm_hw_params32),
SNDRV_PCM_IOCTL_SW_PARAMS32 = _IOWR('A', 0x13, struct snd_pcm_sw_params32),
SNDRV_PCM_IOCTL_STATUS32 = _IOR('A', 0x20, struct snd_pcm_status32),
+ SNDRV_PCM_IOCTL_STATUS_EXT32 = _IOWR('A', 0x24, struct snd_pcm_status32),
SNDRV_PCM_IOCTL_DELAY32 = _IOR('A', 0x21, s32),
SNDRV_PCM_IOCTL_CHANNEL_INFO32 = _IOR('A', 0x32, struct snd_pcm_channel_info32),
SNDRV_PCM_IOCTL_REWIND32 = _IOW('A', 0x46, u32),
@@ -517,7 +533,9 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
case SNDRV_PCM_IOCTL_SW_PARAMS32:
return snd_pcm_ioctl_sw_params_compat(substream, argp);
case SNDRV_PCM_IOCTL_STATUS32:
- return snd_pcm_status_user_compat(substream, argp);
+ return snd_pcm_status_user_compat(substream, argp, false);
+ case SNDRV_PCM_IOCTL_STATUS_EXT32:
+ return snd_pcm_status_user_compat(substream, argp, true);
case SNDRV_PCM_IOCTL_SYNC_PTR32:
return snd_pcm_ioctl_sync_ptr_compat(substream, argp);
case SNDRV_PCM_IOCTL_CHANNEL_INFO32:
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 6542c4083594..fba365a78390 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
*
* The function should usually be called from the pcm open callback. Note that
* this function will use private_data field of the substream's runtime. So it
- * is not availabe to your pcm driver implementation.
+ * is not available to your pcm driver implementation.
*/
int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
struct dma_chan *chan)
@@ -328,7 +328,7 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
* This function will request a DMA channel using the passed filter function and
* data. The function should usually be called from the pcm open callback. Note
* that this function will use private_data field of the substream's runtime. So
- * it is not availabe to your pcm driver implementation.
+ * it is not available to your pcm driver implementation.
*/
int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
dma_filter_fn filter_fn, void *filter_data)
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index ffd656012ab8..ac6b33f3779c 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -232,6 +232,49 @@ int snd_pcm_update_state(struct snd_pcm_substream *substream,
return 0;
}
+static void update_audio_tstamp(struct snd_pcm_substream *substream,
+ struct timespec *curr_tstamp,
+ struct timespec *audio_tstamp)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ u64 audio_frames, audio_nsecs;
+ struct timespec driver_tstamp;
+
+ if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
+ return;
+
+ if (!(substream->ops->get_time_info) ||
+ (runtime->audio_tstamp_report.actual_type ==
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
+
+ /*
+ * provide audio timestamp derived from pointer position
+ * add delay only if requested
+ */
+
+ audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
+
+ if (runtime->audio_tstamp_config.report_delay) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ audio_frames -= runtime->delay;
+ else
+ audio_frames += runtime->delay;
+ }
+ audio_nsecs = div_u64(audio_frames * 1000000000LL,
+ runtime->rate);
+ *audio_tstamp = ns_to_timespec(audio_nsecs);
+ }
+ runtime->status->audio_tstamp = *audio_tstamp;
+ runtime->status->tstamp = *curr_tstamp;
+
+ /*
+ * re-take a driver timestamp to let apps detect if the reference tstamp
+ * read by low-level hardware was provided with a delay
+ */
+ snd_pcm_gettime(substream->runtime, (struct timespec *)&driver_tstamp);
+ runtime->driver_tstamp = driver_tstamp;
+}
+
static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
unsigned int in_interrupt)
{
@@ -256,11 +299,18 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
pos = substream->ops->pointer(substream);
curr_jiffies = jiffies;
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
- snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
-
- if ((runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK) &&
- (substream->ops->wall_clock))
- substream->ops->wall_clock(substream, &audio_tstamp);
+ if ((substream->ops->get_time_info) &&
+ (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
+ substream->ops->get_time_info(substream, &curr_tstamp,
+ &audio_tstamp,
+ &runtime->audio_tstamp_config,
+ &runtime->audio_tstamp_report);
+
+ /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
+ if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
+ snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
+ } else
+ snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
}
if (pos == SNDRV_PCM_POS_XRUN) {
@@ -403,8 +453,10 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
}
no_delta_check:
- if (runtime->status->hw_ptr == new_hw_ptr)
+ if (runtime->status->hw_ptr == new_hw_ptr) {
+ update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
return 0;
+ }
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
runtime->silence_size > 0)
@@ -426,30 +478,8 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
snd_BUG_ON(crossed_boundary != 1);
runtime->hw_ptr_wrap += runtime->boundary;
}
- if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
- runtime->status->tstamp = curr_tstamp;
- if (!(runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)) {
- /*
- * no wall clock available, provide audio timestamp
- * derived from pointer position+delay
- */
- u64 audio_frames, audio_nsecs;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- audio_frames = runtime->hw_ptr_wrap
- + runtime->status->hw_ptr
- - runtime->delay;
- else
- audio_frames = runtime->hw_ptr_wrap
- + runtime->status->hw_ptr
- + runtime->delay;
- audio_nsecs = div_u64(audio_frames * 1000000000LL,
- runtime->rate);
- audio_tstamp = ns_to_timespec(audio_nsecs);
- }
- runtime->status->audio_tstamp = audio_tstamp;
- }
+ update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
return snd_pcm_update_state(substream, runtime);
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 279e24f61305..d126c03361ae 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/pm_qos.h>
-#include <linux/aio.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
@@ -35,6 +34,7 @@
#include <sound/pcm_params.h>
#include <sound/timer.h>
#include <sound/minors.h>
+#include <linux/uio.h>
/*
* Compatibility
@@ -707,6 +707,23 @@ int snd_pcm_status(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_stream_lock_irq(substream);
+
+ snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
+ &runtime->audio_tstamp_config);
+
+ /* backwards compatible behavior */
+ if (runtime->audio_tstamp_config.type_requested ==
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
+ if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
+ runtime->audio_tstamp_config.type_requested =
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
+ else
+ runtime->audio_tstamp_config.type_requested =
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
+ runtime->audio_tstamp_report.valid = 0;
+ } else
+ runtime->audio_tstamp_report.valid = 1;
+
status->state = runtime->status->state;
status->suspended_state = runtime->status->suspended_state;
if (status->state == SNDRV_PCM_STATE_OPEN)
@@ -716,8 +733,15 @@ int snd_pcm_status(struct snd_pcm_substream *substream,
snd_pcm_update_hw_ptr(substream);
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
status->tstamp = runtime->status->tstamp;
+ status->driver_tstamp = runtime->driver_tstamp;
status->audio_tstamp =
runtime->status->audio_tstamp;
+ if (runtime->audio_tstamp_report.valid == 1)
+ /* backwards compatibility, no report provided in COMPAT mode */
+ snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
+ &status->audio_tstamp_accuracy,
+ &runtime->audio_tstamp_report);
+
goto _tstamp_end;
}
} else {
@@ -753,12 +777,21 @@ int snd_pcm_status(struct snd_pcm_substream *substream,
}
static int snd_pcm_status_user(struct snd_pcm_substream *substream,
- struct snd_pcm_status __user * _status)
+ struct snd_pcm_status __user * _status,
+ bool ext)
{
struct snd_pcm_status status;
int res;
-
+
memset(&status, 0, sizeof(status));
+ /*
+ * with extension, parameters are read/write,
+ * get audio_tstamp_data from user,
+ * ignore rest of status structure
+ */
+ if (ext && get_user(status.audio_tstamp_data,
+ (u32 __user *)(&_status->audio_tstamp_data)))
+ return -EFAULT;
res = snd_pcm_status(substream, &status);
if (res < 0)
return res;
@@ -2725,7 +2758,9 @@ static int snd_pcm_common_ioctl1(struct file *file,
case SNDRV_PCM_IOCTL_SW_PARAMS:
return snd_pcm_sw_params_user(substream, arg);
case SNDRV_PCM_IOCTL_STATUS:
- return snd_pcm_status_user(substream, arg);
+ return snd_pcm_status_user(substream, arg, false);
+ case SNDRV_PCM_IOCTL_STATUS_EXT:
+ return snd_pcm_status_user(substream, arg, true);
case SNDRV_PCM_IOCTL_CHANNEL_INFO:
return snd_pcm_channel_info_user(substream, arg);
case SNDRV_PCM_IOCTL_PREPARE:
@@ -3033,9 +3068,7 @@ static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
return result;
}
-static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
-
+static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
@@ -3052,16 +3085,18 @@ static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov,
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
- if (nr_segs > 1024 || nr_segs != runtime->channels)
+ if (!iter_is_iovec(to))
+ return -EINVAL;
+ if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
return -EINVAL;
- if (!frame_aligned(runtime, iov->iov_len))
+ if (!frame_aligned(runtime, to->iov->iov_len))
return -EINVAL;
- frames = bytes_to_samples(runtime, iov->iov_len);
- bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL);
+ frames = bytes_to_samples(runtime, to->iov->iov_len);
+ bufs = kmalloc(sizeof(void *) * to->nr_segs, GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
- for (i = 0; i < nr_segs; ++i)
- bufs[i] = iov[i].iov_base;
+ for (i = 0; i < to->nr_segs; ++i)
+ bufs[i] = to->iov[i].iov_base;
result = snd_pcm_lib_readv(substream, bufs, frames);
if (result > 0)
result = frames_to_bytes(runtime, result);
@@ -3069,8 +3104,7 @@ static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov,
return result;
}
-static ssize_t snd_pcm_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
@@ -3087,15 +3121,17 @@ static ssize_t snd_pcm_aio_write(struct kiocb *iocb, const struct iovec *iov,
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
- if (nr_segs > 128 || nr_segs != runtime->channels ||
- !frame_aligned(runtime, iov->iov_len))
+ if (!iter_is_iovec(from))
+ return -EINVAL;
+ if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
+ !frame_aligned(runtime, from->iov->iov_len))
return -EINVAL;
- frames = bytes_to_samples(runtime, iov->iov_len);
- bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL);
+ frames = bytes_to_samples(runtime, from->iov->iov_len);
+ bufs = kmalloc(sizeof(void *) * from->nr_segs, GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
- for (i = 0; i < nr_segs; ++i)
- bufs[i] = iov[i].iov_base;
+ for (i = 0; i < from->nr_segs; ++i)
+ bufs[i] = from->iov[i].iov_base;
result = snd_pcm_lib_writev(substream, bufs, frames);
if (result > 0)
result = frames_to_bytes(runtime, result);
@@ -3633,7 +3669,7 @@ const struct file_operations snd_pcm_f_ops[2] = {
{
.owner = THIS_MODULE,
.write = snd_pcm_write,
- .aio_write = snd_pcm_aio_write,
+ .write_iter = snd_pcm_writev,
.open = snd_pcm_playback_open,
.release = snd_pcm_release,
.llseek = no_llseek,
@@ -3647,7 +3683,7 @@ const struct file_operations snd_pcm_f_ops[2] = {
{
.owner = THIS_MODULE,
.read = snd_pcm_read,
- .aio_read = snd_pcm_aio_read,
+ .read_iter = snd_pcm_readv,
.open = snd_pcm_capture_open,
.release = snd_pcm_release,
.llseek = no_llseek,
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index b5a748596fc4..a7759846fbaa 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1429,10 +1429,8 @@ static int snd_rawmidi_alloc_substreams(struct snd_rawmidi *rmidi,
for (idx = 0; idx < count; idx++) {
substream = kzalloc(sizeof(*substream), GFP_KERNEL);
- if (substream == NULL) {
- rmidi_err(rmidi, "rawmidi: cannot allocate substream\n");
+ if (!substream)
return -ENOMEM;
- }
substream->stream = direction;
substream->number = idx;
substream->rmidi = rmidi;
@@ -1479,10 +1477,8 @@ int snd_rawmidi_new(struct snd_card *card, char *id, int device,
if (rrawmidi)
*rrawmidi = NULL;
rmidi = kzalloc(sizeof(*rmidi), GFP_KERNEL);
- if (rmidi == NULL) {
- dev_err(card->dev, "rawmidi: cannot allocate\n");
+ if (!rmidi)
return -ENOMEM;
- }
rmidi->card = card;
rmidi->device = device;
mutex_init(&rmidi->open_mutex);
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
index 16d42679e43f..72873a46afeb 100644
--- a/sound/core/seq/oss/seq_oss.c
+++ b/sound/core/seq/oss/seq_oss.c
@@ -65,15 +65,20 @@ static unsigned int odev_poll(struct file *file, poll_table * wait);
* module interface
*/
+static struct snd_seq_driver seq_oss_synth_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .probe = snd_seq_oss_synth_probe,
+ .remove = snd_seq_oss_synth_remove,
+ },
+ .id = SNDRV_SEQ_DEV_ID_OSS,
+ .argsize = sizeof(struct snd_seq_oss_reg),
+};
+
static int __init alsa_seq_oss_init(void)
{
int rc;
- static struct snd_seq_dev_ops ops = {
- snd_seq_oss_synth_register,
- snd_seq_oss_synth_unregister,
- };
- snd_seq_autoload_lock();
if ((rc = register_device()) < 0)
goto error;
if ((rc = register_proc()) < 0) {
@@ -86,8 +91,8 @@ static int __init alsa_seq_oss_init(void)
goto error;
}
- if ((rc = snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OSS, &ops,
- sizeof(struct snd_seq_oss_reg))) < 0) {
+ rc = snd_seq_driver_register(&seq_oss_synth_driver);
+ if (rc < 0) {
snd_seq_oss_delete_client();
unregister_proc();
unregister_device();
@@ -98,13 +103,12 @@ static int __init alsa_seq_oss_init(void)
snd_seq_oss_synth_init();
error:
- snd_seq_autoload_unlock();
return rc;
}
static void __exit alsa_seq_oss_exit(void)
{
- snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_OSS);
+ snd_seq_driver_unregister(&seq_oss_synth_driver);
snd_seq_oss_delete_client();
unregister_proc();
unregister_device();
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index b0e32e161dd1..2de3feff70d0 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -188,10 +188,8 @@ snd_seq_oss_open(struct file *file, int level)
struct seq_oss_devinfo *dp;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
- if (!dp) {
- pr_err("ALSA: seq_oss: can't malloc device info\n");
+ if (!dp)
return -ENOMEM;
- }
dp->cseq = system_client;
dp->port = -1;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index e79cc44b1394..96e8395ae586 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -173,10 +173,9 @@ snd_seq_oss_midi_check_new_port(struct snd_seq_port_info *pinfo)
/*
* allocate midi info record
*/
- if ((mdev = kzalloc(sizeof(*mdev), GFP_KERNEL)) == NULL) {
- pr_err("ALSA: seq_oss: can't malloc midi info\n");
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
return -ENOMEM;
- }
/* copy the port information */
mdev->client = pinfo->addr.client;
diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c
index 654d17a5023c..c080c73cea04 100644
--- a/sound/core/seq/oss/seq_oss_readq.c
+++ b/sound/core/seq/oss/seq_oss_readq.c
@@ -47,13 +47,12 @@ snd_seq_oss_readq_new(struct seq_oss_devinfo *dp, int maxlen)
{
struct seq_oss_readq *q;
- if ((q = kzalloc(sizeof(*q), GFP_KERNEL)) == NULL) {
- pr_err("ALSA: seq_oss: can't malloc read queue\n");
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
return NULL;
- }
- if ((q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL)) == NULL) {
- pr_err("ALSA: seq_oss: can't malloc read queue buffer\n");
+ q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL);
+ if (!q->q) {
kfree(q);
return NULL;
}
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 701feb71b700..48e4fe1b68ab 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -98,17 +98,17 @@ snd_seq_oss_synth_init(void)
* registration of the synth device
*/
int
-snd_seq_oss_synth_register(struct snd_seq_device *dev)
+snd_seq_oss_synth_probe(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
int i;
struct seq_oss_synth *rec;
struct snd_seq_oss_reg *reg = SNDRV_SEQ_DEVICE_ARGPTR(dev);
unsigned long flags;
- if ((rec = kzalloc(sizeof(*rec), GFP_KERNEL)) == NULL) {
- pr_err("ALSA: seq_oss: can't malloc synth info\n");
+ rec = kzalloc(sizeof(*rec), GFP_KERNEL);
+ if (!rec)
return -ENOMEM;
- }
rec->seq_device = -1;
rec->synth_type = reg->type;
rec->synth_subtype = reg->subtype;
@@ -149,8 +149,9 @@ snd_seq_oss_synth_register(struct snd_seq_device *dev)
int
-snd_seq_oss_synth_unregister(struct snd_seq_device *dev)
+snd_seq_oss_synth_remove(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
int index;
struct seq_oss_synth *rec = dev->driver_data;
unsigned long flags;
@@ -247,7 +248,6 @@ snd_seq_oss_synth_setup(struct seq_oss_devinfo *dp)
if (info->nr_voices > 0) {
info->ch = kcalloc(info->nr_voices, sizeof(struct seq_oss_chinfo), GFP_KERNEL);
if (!info->ch) {
- pr_err("ALSA: seq_oss: Cannot malloc voices\n");
rec->oper.close(&info->arg);
module_put(rec->oper.owner);
snd_use_lock_free(&rec->use_lock);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index dbdfcbb80eaa..74ac55f166b6 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -28,8 +28,8 @@
#include <sound/seq_device.h>
void snd_seq_oss_synth_init(void);
-int snd_seq_oss_synth_register(struct snd_seq_device *dev);
-int snd_seq_oss_synth_unregister(struct snd_seq_device *dev);
+int snd_seq_oss_synth_probe(struct device *dev);
+int snd_seq_oss_synth_remove(struct device *dev);
void snd_seq_oss_synth_setup(struct seq_oss_devinfo *dp);
void snd_seq_oss_synth_setup_midi(struct seq_oss_devinfo *dp);
void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 48287651ac77..edbdab85fc02 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1879,6 +1879,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
if (cptr == NULL)
return -ENOENT;
memset(&info, 0, sizeof(info));
+ info.client = cptr->number;
info.output_pool = cptr->pool->size;
info.output_room = cptr->pool->room;
info.output_free = info.output_pool;
diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
index 0631bdadd12b..d99f99d61983 100644
--- a/sound/core/seq/seq_device.c
+++ b/sound/core/seq/seq_device.c
@@ -36,6 +36,7 @@
*
*/
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
#include <sound/core.h>
@@ -51,140 +52,78 @@ MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA sequencer device management");
MODULE_LICENSE("GPL");
-/* driver state */
-#define DRIVER_EMPTY 0
-#define DRIVER_LOADED (1<<0)
-#define DRIVER_REQUESTED (1<<1)
-#define DRIVER_LOCKED (1<<2)
-#define DRIVER_REQUESTING (1<<3)
-
-struct ops_list {
- char id[ID_LEN]; /* driver id */
- int driver; /* driver state */
- int used; /* reference counter */
- int argsize; /* argument size */
-
- /* operators */
- struct snd_seq_dev_ops ops;
-
- /* registered devices */
- struct list_head dev_list; /* list of devices */
- int num_devices; /* number of associated devices */
- int num_init_devices; /* number of initialized devices */
- struct mutex reg_mutex;
-
- struct list_head list; /* next driver */
-};
+/*
+ * bus definition
+ */
+static int snd_seq_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct snd_seq_device *sdev = to_seq_dev(dev);
+ struct snd_seq_driver *sdrv = to_seq_drv(drv);
+ return strcmp(sdrv->id, sdev->id) == 0 &&
+ sdrv->argsize == sdev->argsize;
+}
-static LIST_HEAD(opslist);
-static int num_ops;
-static DEFINE_MUTEX(ops_mutex);
-#ifdef CONFIG_PROC_FS
-static struct snd_info_entry *info_entry;
-#endif
+static struct bus_type snd_seq_bus_type = {
+ .name = "snd_seq",
+ .match = snd_seq_bus_match,
+};
/*
- * prototypes
+ * proc interface -- just for compatibility
*/
-static int snd_seq_device_free(struct snd_seq_device *dev);
-static int snd_seq_device_dev_free(struct snd_device *device);
-static int snd_seq_device_dev_register(struct snd_device *device);
-static int snd_seq_device_dev_disconnect(struct snd_device *device);
-
-static int init_device(struct snd_seq_device *dev, struct ops_list *ops);
-static int free_device(struct snd_seq_device *dev, struct ops_list *ops);
-static struct ops_list *find_driver(char *id, int create_if_empty);
-static struct ops_list *create_driver(char *id);
-static void unlock_driver(struct ops_list *ops);
-static void remove_drivers(void);
+#ifdef CONFIG_PROC_FS
+static struct snd_info_entry *info_entry;
-/*
- * show all drivers and their status
- */
+static int print_dev_info(struct device *dev, void *data)
+{
+ struct snd_seq_device *sdev = to_seq_dev(dev);
+ struct snd_info_buffer *buffer = data;
+
+ snd_iprintf(buffer, "snd-%s,%s,%d\n", sdev->id,
+ dev->driver ? "loaded" : "empty",
+ dev->driver ? 1 : 0);
+ return 0;
+}
-#ifdef CONFIG_PROC_FS
static void snd_seq_device_info(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
- struct ops_list *ops;
-
- mutex_lock(&ops_mutex);
- list_for_each_entry(ops, &opslist, list) {
- snd_iprintf(buffer, "snd-%s%s%s%s,%d\n",
- ops->id,
- ops->driver & DRIVER_LOADED ? ",loaded" : (ops->driver == DRIVER_EMPTY ? ",empty" : ""),
- ops->driver & DRIVER_REQUESTED ? ",requested" : "",
- ops->driver & DRIVER_LOCKED ? ",locked" : "",
- ops->num_devices);
- }
- mutex_unlock(&ops_mutex);
+ bus_for_each_dev(&snd_seq_bus_type, NULL, buffer, print_dev_info);
}
#endif
-
+
/*
* load all registered drivers (called from seq_clientmgr.c)
*/
#ifdef CONFIG_MODULES
-/* avoid auto-loading during module_init() */
+/* flag to block auto-loading */
static atomic_t snd_seq_in_init = ATOMIC_INIT(1); /* blocked as default */
-void snd_seq_autoload_lock(void)
-{
- atomic_inc(&snd_seq_in_init);
-}
-void snd_seq_autoload_unlock(void)
+static int request_seq_drv(struct device *dev, void *data)
{
- atomic_dec(&snd_seq_in_init);
+ struct snd_seq_device *sdev = to_seq_dev(dev);
+
+ if (!dev->driver)
+ request_module("snd-%s", sdev->id);
+ return 0;
}
-static void autoload_drivers(void)
+static void autoload_drivers(struct work_struct *work)
{
/* avoid reentrance */
- if (atomic_inc_return(&snd_seq_in_init) == 1) {
- struct ops_list *ops;
-
- mutex_lock(&ops_mutex);
- list_for_each_entry(ops, &opslist, list) {
- if ((ops->driver & DRIVER_REQUESTING) &&
- !(ops->driver & DRIVER_REQUESTED)) {
- ops->used++;
- mutex_unlock(&ops_mutex);
- ops->driver |= DRIVER_REQUESTED;
- request_module("snd-%s", ops->id);
- mutex_lock(&ops_mutex);
- ops->used--;
- }
- }
- mutex_unlock(&ops_mutex);
- }
+ if (atomic_inc_return(&snd_seq_in_init) == 1)
+ bus_for_each_dev(&snd_seq_bus_type, NULL, NULL,
+ request_seq_drv);
atomic_dec(&snd_seq_in_init);
}
-static void call_autoload(struct work_struct *work)
-{
- autoload_drivers();
-}
-
-static DECLARE_WORK(autoload_work, call_autoload);
-
-static void try_autoload(struct ops_list *ops)
-{
- if (!ops->driver) {
- ops->driver |= DRIVER_REQUESTING;
- schedule_work(&autoload_work);
- }
-}
+static DECLARE_WORK(autoload_work, autoload_drivers);
static void queue_autoload_drivers(void)
{
- struct ops_list *ops;
-
- mutex_lock(&ops_mutex);
- list_for_each_entry(ops, &opslist, list)
- try_autoload(ops);
- mutex_unlock(&ops_mutex);
+ schedule_work(&autoload_work);
}
void snd_seq_autoload_init(void)
@@ -195,384 +134,143 @@ void snd_seq_autoload_init(void)
queue_autoload_drivers();
#endif
}
-#else
-#define try_autoload(ops) /* NOP */
-#endif
+EXPORT_SYMBOL(snd_seq_autoload_init);
-void snd_seq_device_load_drivers(void)
+void snd_seq_autoload_exit(void)
{
-#ifdef CONFIG_MODULES
- queue_autoload_drivers();
- flush_work(&autoload_work);
-#endif
+ atomic_inc(&snd_seq_in_init);
}
+EXPORT_SYMBOL(snd_seq_autoload_exit);
-/*
- * register a sequencer device
- * card = card info
- * device = device number (if any)
- * id = id of driver
- * result = return pointer (NULL allowed if unnecessary)
- */
-int snd_seq_device_new(struct snd_card *card, int device, char *id, int argsize,
- struct snd_seq_device **result)
+void snd_seq_device_load_drivers(void)
{
- struct snd_seq_device *dev;
- struct ops_list *ops;
- int err;
- static struct snd_device_ops dops = {
- .dev_free = snd_seq_device_dev_free,
- .dev_register = snd_seq_device_dev_register,
- .dev_disconnect = snd_seq_device_dev_disconnect,
- };
-
- if (result)
- *result = NULL;
-
- if (snd_BUG_ON(!id))
- return -EINVAL;
-
- ops = find_driver(id, 1);
- if (ops == NULL)
- return -ENOMEM;
-
- dev = kzalloc(sizeof(*dev)*2 + argsize, GFP_KERNEL);
- if (dev == NULL) {
- unlock_driver(ops);
- return -ENOMEM;
- }
-
- /* set up device info */
- dev->card = card;
- dev->device = device;
- strlcpy(dev->id, id, sizeof(dev->id));
- dev->argsize = argsize;
- dev->status = SNDRV_SEQ_DEVICE_FREE;
-
- /* add this device to the list */
- mutex_lock(&ops->reg_mutex);
- list_add_tail(&dev->list, &ops->dev_list);
- ops->num_devices++;
- mutex_unlock(&ops->reg_mutex);
-
- if ((err = snd_device_new(card, SNDRV_DEV_SEQUENCER, dev, &dops)) < 0) {
- snd_seq_device_free(dev);
- return err;
- }
-
- try_autoload(ops);
- unlock_driver(ops);
-
- if (result)
- *result = dev;
-
- return 0;
+ queue_autoload_drivers();
+ flush_work(&autoload_work);
}
+EXPORT_SYMBOL(snd_seq_device_load_drivers);
+#else
+#define queue_autoload_drivers() /* NOP */
+#endif
/*
- * free the existing device
+ * device management
*/
-static int snd_seq_device_free(struct snd_seq_device *dev)
-{
- struct ops_list *ops;
-
- if (snd_BUG_ON(!dev))
- return -EINVAL;
-
- ops = find_driver(dev->id, 0);
- if (ops == NULL)
- return -ENXIO;
-
- /* remove the device from the list */
- mutex_lock(&ops->reg_mutex);
- list_del(&dev->list);
- ops->num_devices--;
- mutex_unlock(&ops->reg_mutex);
-
- free_device(dev, ops);
- if (dev->private_free)
- dev->private_free(dev);
- kfree(dev);
-
- unlock_driver(ops);
-
- return 0;
-}
-
static int snd_seq_device_dev_free(struct snd_device *device)
{
struct snd_seq_device *dev = device->device_data;
- return snd_seq_device_free(dev);
+
+ put_device(&dev->dev);
+ return 0;
}
-/*
- * register the device
- */
static int snd_seq_device_dev_register(struct snd_device *device)
{
struct snd_seq_device *dev = device->device_data;
- struct ops_list *ops;
-
- ops = find_driver(dev->id, 0);
- if (ops == NULL)
- return -ENOENT;
-
- /* initialize this device if the corresponding driver was
- * already loaded
- */
- if (ops->driver & DRIVER_LOADED)
- init_device(dev, ops);
+ int err;
- unlock_driver(ops);
+ err = device_add(&dev->dev);
+ if (err < 0)
+ return err;
+ if (!dev->dev.driver)
+ queue_autoload_drivers();
return 0;
}
-/*
- * disconnect the device
- */
static int snd_seq_device_dev_disconnect(struct snd_device *device)
{
struct snd_seq_device *dev = device->device_data;
- struct ops_list *ops;
-
- ops = find_driver(dev->id, 0);
- if (ops == NULL)
- return -ENOENT;
-
- free_device(dev, ops);
- unlock_driver(ops);
+ device_del(&dev->dev);
return 0;
}
-/*
- * register device driver
- * id = driver id
- * entry = driver operators - duplicated to each instance
- */
-int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
- int argsize)
+static void snd_seq_dev_release(struct device *dev)
{
- struct ops_list *ops;
- struct snd_seq_device *dev;
+ struct snd_seq_device *sdev = to_seq_dev(dev);
- if (id == NULL || entry == NULL ||
- entry->init_device == NULL || entry->free_device == NULL)
- return -EINVAL;
-
- ops = find_driver(id, 1);
- if (ops == NULL)
- return -ENOMEM;
- if (ops->driver & DRIVER_LOADED) {
- pr_warn("ALSA: seq: driver_register: driver '%s' already exists\n", id);
- unlock_driver(ops);
- return -EBUSY;
- }
-
- mutex_lock(&ops->reg_mutex);
- /* copy driver operators */
- ops->ops = *entry;
- ops->driver |= DRIVER_LOADED;
- ops->argsize = argsize;
-
- /* initialize existing devices if necessary */
- list_for_each_entry(dev, &ops->dev_list, list) {
- init_device(dev, ops);
- }
- mutex_unlock(&ops->reg_mutex);
-
- unlock_driver(ops);
-
- return 0;
+ if (sdev->private_free)
+ sdev->private_free(sdev);
+ kfree(sdev);
}
-
-/*
- * create driver record
- */
-static struct ops_list * create_driver(char *id)
-{
- struct ops_list *ops;
-
- ops = kzalloc(sizeof(*ops), GFP_KERNEL);
- if (ops == NULL)
- return ops;
-
- /* set up driver entry */
- strlcpy(ops->id, id, sizeof(ops->id));
- mutex_init(&ops->reg_mutex);
- /*
- * The ->reg_mutex locking rules are per-driver, so we create
- * separate per-driver lock classes:
- */
- lockdep_set_class(&ops->reg_mutex, (struct lock_class_key *)id);
-
- ops->driver = DRIVER_EMPTY;
- INIT_LIST_HEAD(&ops->dev_list);
- /* lock this instance */
- ops->used = 1;
-
- /* register driver entry */
- mutex_lock(&ops_mutex);
- list_add_tail(&ops->list, &opslist);
- num_ops++;
- mutex_unlock(&ops_mutex);
-
- return ops;
-}
-
-
/*
- * unregister the specified driver
+ * register a sequencer device
+ * card = card info
+ * device = device number (if any)
+ * id = id of driver
+ * result = return pointer (NULL allowed if unnecessary)
*/
-int snd_seq_device_unregister_driver(char *id)
+int snd_seq_device_new(struct snd_card *card, int device, const char *id,
+ int argsize, struct snd_seq_device **result)
{
- struct ops_list *ops;
struct snd_seq_device *dev;
+ int err;
+ static struct snd_device_ops dops = {
+ .dev_free = snd_seq_device_dev_free,
+ .dev_register = snd_seq_device_dev_register,
+ .dev_disconnect = snd_seq_device_dev_disconnect,
+ };
- ops = find_driver(id, 0);
- if (ops == NULL)
- return -ENXIO;
- if (! (ops->driver & DRIVER_LOADED) ||
- (ops->driver & DRIVER_LOCKED)) {
- pr_err("ALSA: seq: driver_unregister: cannot unload driver '%s': status=%x\n",
- id, ops->driver);
- unlock_driver(ops);
- return -EBUSY;
- }
-
- /* close and release all devices associated with this driver */
- mutex_lock(&ops->reg_mutex);
- ops->driver |= DRIVER_LOCKED; /* do not remove this driver recursively */
- list_for_each_entry(dev, &ops->dev_list, list) {
- free_device(dev, ops);
- }
-
- ops->driver = 0;
- if (ops->num_init_devices > 0)
- pr_err("ALSA: seq: free_driver: init_devices > 0!! (%d)\n",
- ops->num_init_devices);
- mutex_unlock(&ops->reg_mutex);
-
- unlock_driver(ops);
+ if (result)
+ *result = NULL;
- /* remove empty driver entries */
- remove_drivers();
+ if (snd_BUG_ON(!id))
+ return -EINVAL;
- return 0;
-}
+ dev = kzalloc(sizeof(*dev) + argsize, GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ /* set up device info */
+ dev->card = card;
+ dev->device = device;
+ dev->id = id;
+ dev->argsize = argsize;
-/*
- * remove empty driver entries
- */
-static void remove_drivers(void)
-{
- struct list_head *head;
-
- mutex_lock(&ops_mutex);
- head = opslist.next;
- while (head != &opslist) {
- struct ops_list *ops = list_entry(head, struct ops_list, list);
- if (! (ops->driver & DRIVER_LOADED) &&
- ops->used == 0 && ops->num_devices == 0) {
- head = head->next;
- list_del(&ops->list);
- kfree(ops);
- num_ops--;
- } else
- head = head->next;
- }
- mutex_unlock(&ops_mutex);
-}
+ device_initialize(&dev->dev);
+ dev->dev.parent = &card->card_dev;
+ dev->dev.bus = &snd_seq_bus_type;
+ dev->dev.release = snd_seq_dev_release;
+ dev_set_name(&dev->dev, "%s-%d-%d", dev->id, card->number, device);
-/*
- * initialize the device - call init_device operator
- */
-static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
-{
- if (! (ops->driver & DRIVER_LOADED))
- return 0; /* driver is not loaded yet */
- if (dev->status != SNDRV_SEQ_DEVICE_FREE)
- return 0; /* already initialized */
- if (ops->argsize != dev->argsize) {
- pr_err("ALSA: seq: incompatible device '%s' for plug-in '%s' (%d %d)\n",
- dev->name, ops->id, ops->argsize, dev->argsize);
- return -EINVAL;
- }
- if (ops->ops.init_device(dev) >= 0) {
- dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
- ops->num_init_devices++;
- } else {
- pr_err("ALSA: seq: init_device failed: %s: %s\n",
- dev->name, dev->id);
+ /* add this device to the list */
+ err = snd_device_new(card, SNDRV_DEV_SEQUENCER, dev, &dops);
+ if (err < 0) {
+ put_device(&dev->dev);
+ return err;
}
+
+ if (result)
+ *result = dev;
return 0;
}
+EXPORT_SYMBOL(snd_seq_device_new);
/*
- * release the device - call free_device operator
+ * driver registration
*/
-static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
+int __snd_seq_driver_register(struct snd_seq_driver *drv, struct module *mod)
{
- int result;
-
- if (! (ops->driver & DRIVER_LOADED))
- return 0; /* driver is not loaded yet */
- if (dev->status != SNDRV_SEQ_DEVICE_REGISTERED)
- return 0; /* not registered */
- if (ops->argsize != dev->argsize) {
- pr_err("ALSA: seq: incompatible device '%s' for plug-in '%s' (%d %d)\n",
- dev->name, ops->id, ops->argsize, dev->argsize);
+ if (WARN_ON(!drv->driver.name || !drv->id))
return -EINVAL;
- }
- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
- dev->status = SNDRV_SEQ_DEVICE_FREE;
- dev->driver_data = NULL;
- ops->num_init_devices--;
- } else {
- pr_err("ALSA: seq: free_device failed: %s: %s\n",
- dev->name, dev->id);
- }
-
- return 0;
+ drv->driver.bus = &snd_seq_bus_type;
+ drv->driver.owner = mod;
+ return driver_register(&drv->driver);
}
+EXPORT_SYMBOL_GPL(__snd_seq_driver_register);
-/*
- * find the matching driver with given id
- */
-static struct ops_list * find_driver(char *id, int create_if_empty)
+void snd_seq_driver_unregister(struct snd_seq_driver *drv)
{
- struct ops_list *ops;
-
- mutex_lock(&ops_mutex);
- list_for_each_entry(ops, &opslist, list) {
- if (strcmp(ops->id, id) == 0) {
- ops->used++;
- mutex_unlock(&ops_mutex);
- return ops;
- }
- }
- mutex_unlock(&ops_mutex);
- if (create_if_empty)
- return create_driver(id);
- return NULL;
+ driver_unregister(&drv->driver);
}
-
-static void unlock_driver(struct ops_list *ops)
-{
- mutex_lock(&ops_mutex);
- ops->used--;
- mutex_unlock(&ops_mutex);
-}
-
+EXPORT_SYMBOL_GPL(snd_seq_driver_unregister);
/*
* module part
*/
-static int __init alsa_seq_device_init(void)
+static int __init seq_dev_proc_init(void)
{
#ifdef CONFIG_PROC_FS
info_entry = snd_info_create_module_entry(THIS_MODULE, "drivers",
@@ -589,28 +287,29 @@ static int __init alsa_seq_device_init(void)
return 0;
}
+static int __init alsa_seq_device_init(void)
+{
+ int err;
+
+ err = bus_register(&snd_seq_bus_type);
+ if (err < 0)
+ return err;
+ err = seq_dev_proc_init();
+ if (err < 0)
+ bus_unregister(&snd_seq_bus_type);
+ return err;
+}
+
static void __exit alsa_seq_device_exit(void)
{
#ifdef CONFIG_MODULES
cancel_work_sync(&autoload_work);
#endif
- remove_drivers();
#ifdef CONFIG_PROC_FS
snd_info_free_entry(info_entry);
#endif
- if (num_ops)
- pr_err("ALSA: seq: drivers not released (%d)\n", num_ops);
+ bus_unregister(&snd_seq_bus_type);
}
-module_init(alsa_seq_device_init)
+subsys_initcall(alsa_seq_device_init)
module_exit(alsa_seq_device_exit)
-
-EXPORT_SYMBOL(snd_seq_device_load_drivers);
-EXPORT_SYMBOL(snd_seq_device_new);
-EXPORT_SYMBOL(snd_seq_device_register_driver);
-EXPORT_SYMBOL(snd_seq_device_unregister_driver);
-#ifdef CONFIG_MODULES
-EXPORT_SYMBOL(snd_seq_autoload_init);
-EXPORT_SYMBOL(snd_seq_autoload_lock);
-EXPORT_SYMBOL(snd_seq_autoload_unlock);
-#endif
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
index 5d905d90d504..d3a2ec4f0561 100644
--- a/sound/core/seq/seq_dummy.c
+++ b/sound/core/seq/seq_dummy.c
@@ -214,11 +214,7 @@ delete_client(void)
static int __init alsa_seq_dummy_init(void)
{
- int err;
- snd_seq_autoload_lock();
- err = register_client();
- snd_seq_autoload_unlock();
- return err;
+ return register_client();
}
static void __exit alsa_seq_dummy_exit(void)
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 53a403e17c5b..1d5acbe0c08b 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -33,10 +33,8 @@ struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
struct snd_seq_fifo *f;
f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (f == NULL) {
- pr_debug("ALSA: seq: malloc failed for snd_seq_fifo_new() \n");
+ if (!f)
return NULL;
- }
f->pool = snd_seq_pool_new(poolsize);
if (f->pool == NULL) {
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index ba8e4a64e13e..801076687bb1 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -387,10 +387,8 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
return 0;
pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
- if (pool->ptr == NULL) {
- pr_debug("ALSA: seq: malloc for sequencer events failed\n");
+ if (!pool->ptr)
return -ENOMEM;
- }
/* add new cells to the free cell list */
spin_lock_irqsave(&pool->lock, flags);
@@ -463,10 +461,8 @@ struct snd_seq_pool *snd_seq_pool_new(int poolsize)
/* create pool block */
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (pool == NULL) {
- pr_debug("ALSA: seq: malloc failed for pool\n");
+ if (!pool)
return NULL;
- }
spin_lock_init(&pool->lock);
pool->ptr = NULL;
pool->free = NULL;
diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
index 68fec776da26..5dd0ee258359 100644
--- a/sound/core/seq/seq_midi.c
+++ b/sound/core/seq/seq_midi.c
@@ -273,8 +273,9 @@ static void snd_seq_midisynth_delete(struct seq_midisynth *msynth)
/* register new midi synth port */
static int
-snd_seq_midisynth_register_port(struct snd_seq_device *dev)
+snd_seq_midisynth_probe(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct seq_midisynth_client *client;
struct seq_midisynth *msynth, *ms;
struct snd_seq_port_info *port;
@@ -427,8 +428,9 @@ snd_seq_midisynth_register_port(struct snd_seq_device *dev)
/* release midi synth port */
static int
-snd_seq_midisynth_unregister_port(struct snd_seq_device *dev)
+snd_seq_midisynth_remove(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct seq_midisynth_client *client;
struct seq_midisynth *msynth;
struct snd_card *card = dev->card;
@@ -457,24 +459,14 @@ snd_seq_midisynth_unregister_port(struct snd_seq_device *dev)
return 0;
}
+static struct snd_seq_driver seq_midisynth_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .probe = snd_seq_midisynth_probe,
+ .remove = snd_seq_midisynth_remove,
+ },
+ .id = SNDRV_SEQ_DEV_ID_MIDISYNTH,
+ .argsize = 0,
+};
-static int __init alsa_seq_midi_init(void)
-{
- static struct snd_seq_dev_ops ops = {
- snd_seq_midisynth_register_port,
- snd_seq_midisynth_unregister_port,
- };
- memset(&synths, 0, sizeof(synths));
- snd_seq_autoload_lock();
- snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_MIDISYNTH, &ops, 0);
- snd_seq_autoload_unlock();
- return 0;
-}
-
-static void __exit alsa_seq_midi_exit(void)
-{
- snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_MIDISYNTH);
-}
-
-module_init(alsa_seq_midi_init)
-module_exit(alsa_seq_midi_exit)
+module_snd_seq_driver(seq_midisynth_driver);
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 46ff593f618d..55170a20ae72 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -141,10 +141,8 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
/* create a new port */
new_port = kzalloc(sizeof(*new_port), GFP_KERNEL);
- if (! new_port) {
- pr_debug("ALSA: seq: malloc failed for registering client port\n");
+ if (!new_port)
return NULL; /* failure, out of memory */
- }
/* init port data */
new_port->addr.client = client->number;
new_port->addr.port = -1;
diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c
index 021b02bc9330..bc1c8488fc2a 100644
--- a/sound/core/seq/seq_prioq.c
+++ b/sound/core/seq/seq_prioq.c
@@ -59,10 +59,8 @@ struct snd_seq_prioq *snd_seq_prioq_new(void)
struct snd_seq_prioq *f;
f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (f == NULL) {
- pr_debug("ALSA: seq: malloc failed for snd_seq_prioq_new()\n");
+ if (!f)
return NULL;
- }
spin_lock_init(&f->lock);
f->head = NULL;
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index aad4878cee55..a0cda38205b9 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -111,10 +111,8 @@ static struct snd_seq_queue *queue_new(int owner, int locked)
struct snd_seq_queue *q;
q = kzalloc(sizeof(*q), GFP_KERNEL);
- if (q == NULL) {
- pr_debug("ALSA: seq: malloc failed for snd_seq_queue_new()\n");
+ if (!q)
return NULL;
- }
spin_lock_init(&q->owner_lock);
spin_lock_init(&q->check_lock);
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index e73605393eee..186f1611103c 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -56,10 +56,8 @@ struct snd_seq_timer *snd_seq_timer_new(void)
struct snd_seq_timer *tmr;
tmr = kzalloc(sizeof(*tmr), GFP_KERNEL);
- if (tmr == NULL) {
- pr_debug("ALSA: seq: malloc failed for snd_seq_timer_new() \n");
+ if (!tmr)
return NULL;
- }
spin_lock_init(&tmr->lock);
/* reset setup to defaults */
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 185cec01ee25..5fc93d00572a 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -186,7 +186,7 @@ static const struct file_operations snd_fops =
};
#ifdef CONFIG_SND_DYNAMIC_MINORS
-static int snd_find_free_minor(int type)
+static int snd_find_free_minor(int type, struct snd_card *card, int dev)
{
int minor;
@@ -209,7 +209,7 @@ static int snd_find_free_minor(int type)
return -EBUSY;
}
#else
-static int snd_kernel_minor(int type, struct snd_card *card, int dev)
+static int snd_find_free_minor(int type, struct snd_card *card, int dev)
{
int minor;
@@ -237,6 +237,8 @@ static int snd_kernel_minor(int type, struct snd_card *card, int dev)
}
if (snd_BUG_ON(minor < 0 || minor >= SNDRV_OS_MINORS))
return -EINVAL;
+ if (snd_minors[minor])
+ return -EBUSY;
return minor;
}
#endif
@@ -276,13 +278,7 @@ int snd_register_device(int type, struct snd_card *card, int dev,
preg->private_data = private_data;
preg->card_ptr = card;
mutex_lock(&sound_mutex);
-#ifdef CONFIG_SND_DYNAMIC_MINORS
- minor = snd_find_free_minor(type);
-#else
- minor = snd_kernel_minor(type, card, dev);
- if (minor >= 0 && snd_minors[minor])
- minor = -EBUSY;
-#endif
+ minor = snd_find_free_minor(type, card, dev);
if (minor < 0) {
err = minor;
goto error;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 490b489d713d..a9a1a047c521 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -774,10 +774,8 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
- if (timer == NULL) {
- pr_err("ALSA: timer: cannot allocate\n");
+ if (!timer)
return -ENOMEM;
- }
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
diff --git a/sound/drivers/opl3/opl3_seq.c b/sound/drivers/opl3/opl3_seq.c
index a9f618e06a22..fdae5d7f421f 100644
--- a/sound/drivers/opl3/opl3_seq.c
+++ b/sound/drivers/opl3/opl3_seq.c
@@ -216,8 +216,9 @@ static int snd_opl3_synth_create_port(struct snd_opl3 * opl3)
/* ------------------------------ */
-static int snd_opl3_seq_new_device(struct snd_seq_device *dev)
+static int snd_opl3_seq_probe(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct snd_opl3 *opl3;
int client, err;
char name[32];
@@ -257,8 +258,9 @@ static int snd_opl3_seq_new_device(struct snd_seq_device *dev)
return 0;
}
-static int snd_opl3_seq_delete_device(struct snd_seq_device *dev)
+static int snd_opl3_seq_remove(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct snd_opl3 *opl3;
opl3 = *(struct snd_opl3 **)SNDRV_SEQ_DEVICE_ARGPTR(dev);
@@ -275,22 +277,14 @@ static int snd_opl3_seq_delete_device(struct snd_seq_device *dev)
return 0;
}
-static int __init alsa_opl3_seq_init(void)
-{
- static struct snd_seq_dev_ops ops =
- {
- snd_opl3_seq_new_device,
- snd_opl3_seq_delete_device
- };
-
- return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL3, &ops,
- sizeof(struct snd_opl3 *));
-}
-
-static void __exit alsa_opl3_seq_exit(void)
-{
- snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_OPL3);
-}
+static struct snd_seq_driver opl3_seq_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .probe = snd_opl3_seq_probe,
+ .remove = snd_opl3_seq_remove,
+ },
+ .id = SNDRV_SEQ_DEV_ID_OPL3,
+ .argsize = sizeof(struct snd_opl3 *),
+};
-module_init(alsa_opl3_seq_init)
-module_exit(alsa_opl3_seq_exit)
+module_snd_seq_driver(opl3_seq_driver);
diff --git a/sound/drivers/opl4/opl4_seq.c b/sound/drivers/opl4/opl4_seq.c
index 99197699c55a..03d6202f4829 100644
--- a/sound/drivers/opl4/opl4_seq.c
+++ b/sound/drivers/opl4/opl4_seq.c
@@ -124,8 +124,9 @@ static void snd_opl4_seq_free_port(void *private_data)
snd_midi_channel_free_set(opl4->chset);
}
-static int snd_opl4_seq_new_device(struct snd_seq_device *dev)
+static int snd_opl4_seq_probe(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct snd_opl4 *opl4;
int client;
struct snd_seq_port_callback pcallbacks;
@@ -180,8 +181,9 @@ static int snd_opl4_seq_new_device(struct snd_seq_device *dev)
return 0;
}
-static int snd_opl4_seq_delete_device(struct snd_seq_device *dev)
+static int snd_opl4_seq_remove(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct snd_opl4 *opl4;
opl4 = *(struct snd_opl4 **)SNDRV_SEQ_DEVICE_ARGPTR(dev);
@@ -195,21 +197,14 @@ static int snd_opl4_seq_delete_device(struct snd_seq_device *dev)
return 0;
}
-static int __init alsa_opl4_synth_init(void)
-{
- static struct snd_seq_dev_ops ops = {
- snd_opl4_seq_new_device,
- snd_opl4_seq_delete_device
- };
-
- return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL4, &ops,
- sizeof(struct snd_opl4 *));
-}
-
-static void __exit alsa_opl4_synth_exit(void)
-{
- snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_OPL4);
-}
+static struct snd_seq_driver opl4_seq_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .probe = snd_opl4_seq_probe,
+ .remove = snd_opl4_seq_remove,
+ },
+ .id = SNDRV_SEQ_DEV_ID_OPL4,
+ .argsize = sizeof(struct snd_opl4 *),
+};
-module_init(alsa_opl4_synth_init)
-module_exit(alsa_opl4_synth_exit)
+module_snd_seq_driver(opl4_seq_driver);
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 5cc356db5351..e061355f535f 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -166,10 +166,10 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
* One AMDTP packet can include some frames. In blocking mode, the
* number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
* depending on its sampling rate. For accurate period interrupt, it's
- * preferrable to aligh period/buffer sizes to current SYT_INTERVAL.
+ * preferrable to align period/buffer sizes to current SYT_INTERVAL.
*
- * TODO: These constraints can be improved with propper rules.
- * Currently apply LCM of SYT_INTEVALs.
+ * TODO: These constraints can be improved with proper rules.
+ * Currently apply LCM of SYT_INTERVALs.
*/
err = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32);
@@ -270,7 +270,7 @@ static void amdtp_read_s32(struct amdtp_stream *s,
* @s: the AMDTP stream to configure
* @format: the format of the ALSA PCM device
*
- * The sample format must be set after the other paramters (rate/PCM channels/
+ * The sample format must be set after the other parameters (rate/PCM channels/
* MIDI) and before the stream is started, and must not be changed while the
* stream is running.
*/
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index 2a85e4209f0b..f550808d1784 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -13,7 +13,7 @@
*
* Transaction substance:
* At first, 6 data exist. Following to the data, parameters for each command
- * exist. All of the parameters are 32 bit alighed to big endian.
+ * exist. All of the parameters are 32 bit aligned to big endian.
* data[0]: Length of transaction substance
* data[1]: Transaction version
* data[2]: Sequence number. This is incremented by the device
diff --git a/sound/hda/Kconfig b/sound/hda/Kconfig
new file mode 100644
index 000000000000..001c6588a5ff
--- /dev/null
+++ b/sound/hda/Kconfig
@@ -0,0 +1,3 @@
+config SND_HDA_CORE
+ tristate
+ select REGMAP
diff --git a/sound/hda/Makefile b/sound/hda/Makefile
new file mode 100644
index 000000000000..7a359f5b7e25
--- /dev/null
+++ b/sound/hda/Makefile
@@ -0,0 +1,7 @@
+snd-hda-core-objs := hda_bus_type.o hdac_bus.o hdac_device.o hdac_sysfs.o \
+ hdac_regmap.o array.o
+
+snd-hda-core-objs += trace.o
+CFLAGS_trace.o := -I$(src)
+
+obj-$(CONFIG_SND_HDA_CORE) += snd-hda-core.o
diff --git a/sound/hda/array.c b/sound/hda/array.c
new file mode 100644
index 000000000000..516795baa7db
--- /dev/null
+++ b/sound/hda/array.c
@@ -0,0 +1,49 @@
+/*
+ * generic arrays
+ */
+
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/hdaudio.h>
+
+/**
+ * snd_array_new - get a new element from the given array
+ * @array: the array object
+ *
+ * Get a new element from the given array. If it exceeds the
+ * pre-allocated array size, re-allocate the array.
+ *
+ * Returns NULL if allocation failed.
+ */
+void *snd_array_new(struct snd_array *array)
+{
+ if (snd_BUG_ON(!array->elem_size))
+ return NULL;
+ if (array->used >= array->alloced) {
+ int num = array->alloced + array->alloc_align;
+ int size = (num + 1) * array->elem_size;
+ void *nlist;
+ if (snd_BUG_ON(num >= 4096))
+ return NULL;
+ nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO);
+ if (!nlist)
+ return NULL;
+ array->list = nlist;
+ array->alloced = num;
+ }
+ return snd_array_elem(array, array->used++);
+}
+EXPORT_SYMBOL_GPL(snd_array_new);
+
+/**
+ * snd_array_free - free the given array elements
+ * @array: the array object
+ */
+void snd_array_free(struct snd_array *array)
+{
+ kfree(array->list);
+ array->used = 0;
+ array->alloced = 0;
+ array->list = NULL;
+}
+EXPORT_SYMBOL_GPL(snd_array_free);
diff --git a/sound/hda/hda_bus_type.c b/sound/hda/hda_bus_type.c
new file mode 100644
index 000000000000..519914a12e8a
--- /dev/null
+++ b/sound/hda/hda_bus_type.c
@@ -0,0 +1,42 @@
+/*
+ * HD-audio bus
+ */
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <sound/hdaudio.h>
+
+MODULE_DESCRIPTION("HD-audio bus");
+MODULE_LICENSE("GPL");
+
+static int hda_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct hdac_device *hdev = dev_to_hdac_dev(dev);
+ struct hdac_driver *hdrv = drv_to_hdac_driver(drv);
+
+ if (hdev->type != hdrv->type)
+ return 0;
+ if (hdrv->match)
+ return hdrv->match(hdev, hdrv);
+ return 1;
+}
+
+struct bus_type snd_hda_bus_type = {
+ .name = "hdaudio",
+ .match = hda_bus_match,
+};
+EXPORT_SYMBOL_GPL(snd_hda_bus_type);
+
+static int __init hda_bus_init(void)
+{
+ return bus_register(&snd_hda_bus_type);
+}
+
+static void __exit hda_bus_exit(void)
+{
+ bus_unregister(&snd_hda_bus_type);
+}
+
+subsys_initcall(hda_bus_init);
+module_exit(hda_bus_exit);
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
new file mode 100644
index 000000000000..8e262da74f6a
--- /dev/null
+++ b/sound/hda/hdac_bus.c
@@ -0,0 +1,186 @@
+/*
+ * HD-audio core bus driver
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <sound/hdaudio.h>
+#include "trace.h"
+
+static void process_unsol_events(struct work_struct *work);
+
+/**
+ * snd_hdac_bus_init - initialize a HD-audio bas bus
+ * @bus: the pointer to bus object
+ *
+ * Returns 0 if successful, or a negative error code.
+ */
+int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
+ const struct hdac_bus_ops *ops)
+{
+ memset(bus, 0, sizeof(*bus));
+ bus->dev = dev;
+ bus->ops = ops;
+ INIT_LIST_HEAD(&bus->codec_list);
+ INIT_WORK(&bus->unsol_work, process_unsol_events);
+ mutex_init(&bus->cmd_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_init);
+
+/**
+ * snd_hdac_bus_exit - clean up a HD-audio bas bus
+ * @bus: the pointer to bus object
+ */
+void snd_hdac_bus_exit(struct hdac_bus *bus)
+{
+ WARN_ON(!list_empty(&bus->codec_list));
+ cancel_work_sync(&bus->unsol_work);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_exit);
+
+/**
+ * snd_hdac_bus_exec_verb - execute a HD-audio verb on the given bus
+ * @bus: bus object
+ * @cmd: HD-audio encoded verb
+ * @res: pointer to store the response, NULL if performing asynchronously
+ *
+ * Returns 0 if successful, or a negative error code.
+ */
+int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr,
+ unsigned int cmd, unsigned int *res)
+{
+ int err;
+
+ mutex_lock(&bus->cmd_mutex);
+ err = snd_hdac_bus_exec_verb_unlocked(bus, addr, cmd, res);
+ mutex_unlock(&bus->cmd_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_exec_verb);
+
+/**
+ * snd_hdac_bus_exec_verb_unlocked - unlocked version
+ * @bus: bus object
+ * @cmd: HD-audio encoded verb
+ * @res: pointer to store the response, NULL if performing asynchronously
+ *
+ * Returns 0 if successful, or a negative error code.
+ */
+int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr,
+ unsigned int cmd, unsigned int *res)
+{
+ unsigned int tmp;
+ int err;
+
+ if (cmd == ~0)
+ return -EINVAL;
+
+ if (res)
+ *res = -1;
+ else if (bus->sync_write)
+ res = &tmp;
+ for (;;) {
+ trace_hda_send_cmd(bus, cmd);
+ err = bus->ops->command(bus, cmd);
+ if (err != -EAGAIN)
+ break;
+ /* process pending verbs */
+ err = bus->ops->get_response(bus, addr, &tmp);
+ if (err)
+ break;
+ }
+ if (!err && res) {
+ err = bus->ops->get_response(bus, addr, res);
+ trace_hda_get_response(bus, addr, *res);
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_exec_verb_unlocked);
+
+/**
+ * snd_hdac_bus_queue_event - add an unsolicited event to queue
+ * @bus: the BUS
+ * @res: unsolicited event (lower 32bit of RIRB entry)
+ * @res_ex: codec addr and flags (upper 32bit or RIRB entry)
+ *
+ * Adds the given event to the queue. The events are processed in
+ * the workqueue asynchronously. Call this function in the interrupt
+ * hanlder when RIRB receives an unsolicited event.
+ */
+void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex)
+{
+ unsigned int wp;
+
+ if (!bus)
+ return;
+
+ trace_hda_unsol_event(bus, res, res_ex);
+ wp = (bus->unsol_wp + 1) % HDA_UNSOL_QUEUE_SIZE;
+ bus->unsol_wp = wp;
+
+ wp <<= 1;
+ bus->unsol_queue[wp] = res;
+ bus->unsol_queue[wp + 1] = res_ex;
+
+ schedule_work(&bus->unsol_work);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_queue_event);
+
+/*
+ * process queued unsolicited events
+ */
+static void process_unsol_events(struct work_struct *work)
+{
+ struct hdac_bus *bus = container_of(work, struct hdac_bus, unsol_work);
+ struct hdac_device *codec;
+ struct hdac_driver *drv;
+ unsigned int rp, caddr, res;
+
+ while (bus->unsol_rp != bus->unsol_wp) {
+ rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE;
+ bus->unsol_rp = rp;
+ rp <<= 1;
+ res = bus->unsol_queue[rp];
+ caddr = bus->unsol_queue[rp + 1];
+ if (!(caddr & (1 << 4))) /* no unsolicited event? */
+ continue;
+ codec = bus->caddr_tbl[caddr & 0x0f];
+ if (!codec || !codec->dev.driver)
+ continue;
+ drv = drv_to_hdac_driver(codec->dev.driver);
+ if (drv->unsol_event)
+ drv->unsol_event(codec, res);
+ }
+}
+
+int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec)
+{
+ if (bus->caddr_tbl[codec->addr]) {
+ dev_err(bus->dev, "address 0x%x is already occupied\n",
+ codec->addr);
+ return -EBUSY;
+ }
+
+ list_add_tail(&codec->list, &bus->codec_list);
+ bus->caddr_tbl[codec->addr] = codec;
+ set_bit(codec->addr, &bus->codec_powered);
+ bus->num_codecs++;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_add_device);
+
+void snd_hdac_bus_remove_device(struct hdac_bus *bus,
+ struct hdac_device *codec)
+{
+ WARN_ON(bus != codec->bus);
+ if (list_empty(&codec->list))
+ return;
+ list_del_init(&codec->list);
+ bus->caddr_tbl[codec->addr] = NULL;
+ clear_bit(codec->addr, &bus->codec_powered);
+ bus->num_codecs--;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_remove_device);
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
new file mode 100644
index 000000000000..f75bf5622687
--- /dev/null
+++ b/sound/hda/hdac_device.c
@@ -0,0 +1,599 @@
+/*
+ * HD-audio codec core device
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/pm_runtime.h>
+#include <sound/hdaudio.h>
+#include <sound/hda_regmap.h>
+#include "local.h"
+
+static void setup_fg_nodes(struct hdac_device *codec);
+static int get_codec_vendor_name(struct hdac_device *codec);
+
+static void default_release(struct device *dev)
+{
+ snd_hdac_device_exit(container_of(dev, struct hdac_device, dev));
+}
+
+/**
+ * snd_hdac_device_init - initialize the HD-audio codec base device
+ * @codec: device to initialize
+ * @bus: but to attach
+ * @name: device name string
+ * @addr: codec address
+ *
+ * Returns zero for success or a negative error code.
+ *
+ * This function increments the runtime PM counter and marks it active.
+ * The caller needs to turn it off appropriately later.
+ *
+ * The caller needs to set the device's release op properly by itself.
+ */
+int snd_hdac_device_init(struct hdac_device *codec, struct hdac_bus *bus,
+ const char *name, unsigned int addr)
+{
+ struct device *dev;
+ hda_nid_t fg;
+ int err;
+
+ dev = &codec->dev;
+ device_initialize(dev);
+ dev->parent = bus->dev;
+ dev->bus = &snd_hda_bus_type;
+ dev->release = default_release;
+ dev->groups = hdac_dev_attr_groups;
+ dev_set_name(dev, "%s", name);
+ device_enable_async_suspend(dev);
+
+ codec->bus = bus;
+ codec->addr = addr;
+ codec->type = HDA_DEV_CORE;
+ pm_runtime_set_active(&codec->dev);
+ pm_runtime_get_noresume(&codec->dev);
+ atomic_set(&codec->in_pm, 0);
+
+ err = snd_hdac_bus_add_device(bus, codec);
+ if (err < 0)
+ goto error;
+
+ /* fill parameters */
+ codec->vendor_id = snd_hdac_read_parm(codec, AC_NODE_ROOT,
+ AC_PAR_VENDOR_ID);
+ if (codec->vendor_id == -1) {
+ /* read again, hopefully the access method was corrected
+ * in the last read...
+ */
+ codec->vendor_id = snd_hdac_read_parm(codec, AC_NODE_ROOT,
+ AC_PAR_VENDOR_ID);
+ }
+
+ codec->subsystem_id = snd_hdac_read_parm(codec, AC_NODE_ROOT,
+ AC_PAR_SUBSYSTEM_ID);
+ codec->revision_id = snd_hdac_read_parm(codec, AC_NODE_ROOT,
+ AC_PAR_REV_ID);
+
+ setup_fg_nodes(codec);
+ if (!codec->afg && !codec->mfg) {
+ dev_err(dev, "no AFG or MFG node found\n");
+ err = -ENODEV;
+ goto error;
+ }
+
+ fg = codec->afg ? codec->afg : codec->mfg;
+
+ err = snd_hdac_refresh_widgets(codec);
+ if (err < 0)
+ goto error;
+
+ codec->power_caps = snd_hdac_read_parm(codec, fg, AC_PAR_POWER_STATE);
+ /* reread ssid if not set by parameter */
+ if (codec->subsystem_id == -1 || codec->subsystem_id == 0)
+ snd_hdac_read(codec, fg, AC_VERB_GET_SUBSYSTEM_ID, 0,
+ &codec->subsystem_id);
+
+ err = get_codec_vendor_name(codec);
+ if (err < 0)
+ goto error;
+
+ codec->chip_name = kasprintf(GFP_KERNEL, "ID %x",
+ codec->vendor_id & 0xffff);
+ if (!codec->chip_name) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ return 0;
+
+ error:
+ put_device(&codec->dev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_device_init);
+
+/**
+ * snd_hdac_device_exit - clean up the HD-audio codec base device
+ * @codec: device to clean up
+ */
+void snd_hdac_device_exit(struct hdac_device *codec)
+{
+ pm_runtime_put_noidle(&codec->dev);
+ snd_hdac_bus_remove_device(codec->bus, codec);
+ kfree(codec->vendor_name);
+ kfree(codec->chip_name);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_device_exit);
+
+/**
+ * snd_hdac_device_register - register the hd-audio codec base device
+ * codec: the device to register
+ */
+int snd_hdac_device_register(struct hdac_device *codec)
+{
+ int err;
+
+ err = device_add(&codec->dev);
+ if (err < 0)
+ return err;
+ err = hda_widget_sysfs_init(codec);
+ if (err < 0) {
+ device_del(&codec->dev);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_device_register);
+
+/**
+ * snd_hdac_device_unregister - unregister the hd-audio codec base device
+ * codec: the device to unregister
+ */
+void snd_hdac_device_unregister(struct hdac_device *codec)
+{
+ if (device_is_registered(&codec->dev)) {
+ hda_widget_sysfs_exit(codec);
+ device_del(&codec->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(snd_hdac_device_unregister);
+
+/**
+ * snd_hdac_make_cmd - compose a 32bit command word to be sent to the
+ * HD-audio controller
+ * @codec: the codec object
+ * @nid: NID to encode
+ * @verb: verb to encode
+ * @parm: parameter to encode
+ *
+ * Return an encoded command verb or -1 for error.
+ */
+unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int parm)
+{
+ u32 val, addr;
+
+ addr = codec->addr;
+ if ((addr & ~0xf) || (nid & ~0x7f) ||
+ (verb & ~0xfff) || (parm & ~0xffff)) {
+ dev_err(&codec->dev, "out of range cmd %x:%x:%x:%x\n",
+ addr, nid, verb, parm);
+ return -1;
+ }
+
+ val = addr << 28;
+ val |= (u32)nid << 20;
+ val |= verb << 8;
+ val |= parm;
+ return val;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_make_cmd);
+
+/**
+ * snd_hdac_exec_verb - execute an encoded verb
+ * @codec: the codec object
+ * @cmd: encoded verb to execute
+ * @flags: optional flags, pass zero for default
+ * @res: the pointer to store the result, NULL if running async
+ *
+ * Returns zero if successful, or a negative error code.
+ *
+ * This calls the exec_verb op when set in hdac_codec. If not,
+ * call the default snd_hdac_bus_exec_verb().
+ */
+int snd_hdac_exec_verb(struct hdac_device *codec, unsigned int cmd,
+ unsigned int flags, unsigned int *res)
+{
+ if (codec->exec_verb)
+ return codec->exec_verb(codec, cmd, flags, res);
+ return snd_hdac_bus_exec_verb(codec->bus, codec->addr, cmd, res);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_exec_verb);
+
+
+/**
+ * snd_hdac_read - execute a verb
+ * @codec: the codec object
+ * @nid: NID to execute a verb
+ * @verb: verb to execute
+ * @parm: parameter for a verb
+ * @res: the pointer to store the result, NULL if running async
+ *
+ * Returns zero if successful, or a negative error code.
+ */
+int snd_hdac_read(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int parm, unsigned int *res)
+{
+ unsigned int cmd = snd_hdac_make_cmd(codec, nid, verb, parm);
+
+ return snd_hdac_exec_verb(codec, cmd, 0, res);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_read);
+
+/**
+ * _snd_hdac_read_parm - read a parmeter
+ *
+ * This function returns zero or an error unlike snd_hdac_read_parm().
+ */
+int _snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid, int parm,
+ unsigned int *res)
+{
+ unsigned int cmd;
+
+ cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
+ return snd_hdac_regmap_read_raw(codec, cmd, res);
+}
+EXPORT_SYMBOL_GPL(_snd_hdac_read_parm);
+
+/**
+ * snd_hdac_read_parm_uncached - read a codec parameter without caching
+ * @codec: the codec object
+ * @nid: NID to read a parameter
+ * @parm: parameter to read
+ *
+ * Returns -1 for error. If you need to distinguish the error more
+ * strictly, use snd_hdac_read() directly.
+ */
+int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
+ int parm)
+{
+ int val;
+
+ if (codec->regmap)
+ regcache_cache_bypass(codec->regmap, true);
+ val = snd_hdac_read_parm(codec, nid, parm);
+ if (codec->regmap)
+ regcache_cache_bypass(codec->regmap, false);
+ return val;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
+
+/**
+ * snd_hdac_override_parm - override read-only parameters
+ * @codec: the codec object
+ * @nid: NID for the parameter
+ * @parm: the parameter to change
+ * @val: the parameter value to overwrite
+ */
+int snd_hdac_override_parm(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int parm, unsigned int val)
+{
+ unsigned int verb = (AC_VERB_PARAMETERS << 8) | (nid << 20) | parm;
+ int err;
+
+ if (!codec->regmap)
+ return -EINVAL;
+
+ codec->caps_overwriting = true;
+ err = snd_hdac_regmap_write_raw(codec, verb, val);
+ codec->caps_overwriting = false;
+ return err;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_override_parm);
+
+/**
+ * snd_hdac_get_sub_nodes - get start NID and number of subtree nodes
+ * @codec: the codec object
+ * @nid: NID to inspect
+ * @start_id: the pointer to store the starting NID
+ *
+ * Returns the number of subtree nodes or zero if not found.
+ * This function reads parameters always without caching.
+ */
+int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
+ hda_nid_t *start_id)
+{
+ unsigned int parm;
+
+ parm = snd_hdac_read_parm_uncached(codec, nid, AC_PAR_NODE_COUNT);
+ if (parm == -1) {
+ *start_id = 0;
+ return 0;
+ }
+ *start_id = (parm >> 16) & 0x7fff;
+ return (int)(parm & 0x7fff);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_get_sub_nodes);
+
+/*
+ * look for an AFG and MFG nodes
+ */
+static void setup_fg_nodes(struct hdac_device *codec)
+{
+ int i, total_nodes, function_id;
+ hda_nid_t nid;
+
+ total_nodes = snd_hdac_get_sub_nodes(codec, AC_NODE_ROOT, &nid);
+ for (i = 0; i < total_nodes; i++, nid++) {
+ function_id = snd_hdac_read_parm(codec, nid,
+ AC_PAR_FUNCTION_TYPE);
+ switch (function_id & 0xff) {
+ case AC_GRP_AUDIO_FUNCTION:
+ codec->afg = nid;
+ codec->afg_function_id = function_id & 0xff;
+ codec->afg_unsol = (function_id >> 8) & 1;
+ break;
+ case AC_GRP_MODEM_FUNCTION:
+ codec->mfg = nid;
+ codec->mfg_function_id = function_id & 0xff;
+ codec->mfg_unsol = (function_id >> 8) & 1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * snd_hdac_refresh_widgets - Reset the widget start/end nodes
+ * @codec: the codec object
+ */
+int snd_hdac_refresh_widgets(struct hdac_device *codec)
+{
+ hda_nid_t start_nid;
+ int nums;
+
+ nums = snd_hdac_get_sub_nodes(codec, codec->afg, &start_nid);
+ if (!start_nid || nums <= 0 || nums >= 0xff) {
+ dev_err(&codec->dev, "cannot read sub nodes for FG 0x%02x\n",
+ codec->afg);
+ return -EINVAL;
+ }
+
+ codec->num_nodes = nums;
+ codec->start_nid = start_nid;
+ codec->end_nid = start_nid + nums;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_refresh_widgets);
+
+/* return CONNLIST_LEN parameter of the given widget */
+static unsigned int get_num_conns(struct hdac_device *codec, hda_nid_t nid)
+{
+ unsigned int wcaps = get_wcaps(codec, nid);
+ unsigned int parm;
+
+ if (!(wcaps & AC_WCAP_CONN_LIST) &&
+ get_wcaps_type(wcaps) != AC_WID_VOL_KNB)
+ return 0;
+
+ parm = snd_hdac_read_parm(codec, nid, AC_PAR_CONNLIST_LEN);
+ if (parm == -1)
+ parm = 0;
+ return parm;
+}
+
+/**
+ * snd_hdac_get_connections - get a widget connection list
+ * @codec: the codec object
+ * @nid: NID
+ * @conn_list: the array to store the results, can be NULL
+ * @max_conns: the max size of the given array
+ *
+ * Returns the number of connected widgets, zero for no connection, or a
+ * negative error code. When the number of elements don't fit with the
+ * given array size, it returns -ENOSPC.
+ *
+ * When @conn_list is NULL, it just checks the number of connections.
+ */
+int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid,
+ hda_nid_t *conn_list, int max_conns)
+{
+ unsigned int parm;
+ int i, conn_len, conns, err;
+ unsigned int shift, num_elems, mask;
+ hda_nid_t prev_nid;
+ int null_count = 0;
+
+ parm = get_num_conns(codec, nid);
+ if (!parm)
+ return 0;
+
+ if (parm & AC_CLIST_LONG) {
+ /* long form */
+ shift = 16;
+ num_elems = 2;
+ } else {
+ /* short form */
+ shift = 8;
+ num_elems = 4;
+ }
+ conn_len = parm & AC_CLIST_LENGTH;
+ mask = (1 << (shift-1)) - 1;
+
+ if (!conn_len)
+ return 0; /* no connection */
+
+ if (conn_len == 1) {
+ /* single connection */
+ err = snd_hdac_read(codec, nid, AC_VERB_GET_CONNECT_LIST, 0,
+ &parm);
+ if (err < 0)
+ return err;
+ if (conn_list)
+ conn_list[0] = parm & mask;
+ return 1;
+ }
+
+ /* multi connection */
+ conns = 0;
+ prev_nid = 0;
+ for (i = 0; i < conn_len; i++) {
+ int range_val;
+ hda_nid_t val, n;
+
+ if (i % num_elems == 0) {
+ err = snd_hdac_read(codec, nid,
+ AC_VERB_GET_CONNECT_LIST, i,
+ &parm);
+ if (err < 0)
+ return -EIO;
+ }
+ range_val = !!(parm & (1 << (shift-1))); /* ranges */
+ val = parm & mask;
+ if (val == 0 && null_count++) { /* no second chance */
+ dev_dbg(&codec->dev,
+ "invalid CONNECT_LIST verb %x[%i]:%x\n",
+ nid, i, parm);
+ return 0;
+ }
+ parm >>= shift;
+ if (range_val) {
+ /* ranges between the previous and this one */
+ if (!prev_nid || prev_nid >= val) {
+ dev_warn(&codec->dev,
+ "invalid dep_range_val %x:%x\n",
+ prev_nid, val);
+ continue;
+ }
+ for (n = prev_nid + 1; n <= val; n++) {
+ if (conn_list) {
+ if (conns >= max_conns)
+ return -ENOSPC;
+ conn_list[conns] = n;
+ }
+ conns++;
+ }
+ } else {
+ if (conn_list) {
+ if (conns >= max_conns)
+ return -ENOSPC;
+ conn_list[conns] = val;
+ }
+ conns++;
+ }
+ prev_nid = val;
+ }
+ return conns;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_get_connections);
+
+#ifdef CONFIG_PM
+/**
+ * snd_hdac_power_up - power up the codec
+ * @codec: the codec object
+ *
+ * This function calls the runtime PM helper to power up the given codec.
+ * Unlike snd_hdac_power_up_pm(), you should call this only for the code
+ * path that isn't included in PM path. Otherwise it gets stuck.
+ */
+void snd_hdac_power_up(struct hdac_device *codec)
+{
+ pm_runtime_get_sync(&codec->dev);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_power_up);
+
+/**
+ * snd_hdac_power_down - power down the codec
+ * @codec: the codec object
+ */
+void snd_hdac_power_down(struct hdac_device *codec)
+{
+ struct device *dev = &codec->dev;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_power_down);
+
+/**
+ * snd_hdac_power_up_pm - power up the codec
+ * @codec: the codec object
+ *
+ * This function can be called in a recursive code path like init code
+ * which may be called by PM suspend/resume again. OTOH, if a power-up
+ * call must wake up the sleeper (e.g. in a kctl callback), use
+ * snd_hdac_power_up() instead.
+ */
+void snd_hdac_power_up_pm(struct hdac_device *codec)
+{
+ if (!atomic_inc_not_zero(&codec->in_pm))
+ snd_hdac_power_up(codec);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
+
+/**
+ * snd_hdac_power_down_pm - power down the codec
+ * @codec: the codec object
+ *
+ * Like snd_hdac_power_up_pm(), this function is used in a recursive
+ * code path like init code which may be called by PM suspend/resume again.
+ */
+void snd_hdac_power_down_pm(struct hdac_device *codec)
+{
+ if (atomic_dec_if_positive(&codec->in_pm) < 0)
+ snd_hdac_power_down(codec);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_power_down_pm);
+#endif
+
+/* codec vendor labels */
+struct hda_vendor_id {
+ unsigned int id;
+ const char *name;
+};
+
+static struct hda_vendor_id hda_vendor_ids[] = {
+ { 0x1002, "ATI" },
+ { 0x1013, "Cirrus Logic" },
+ { 0x1057, "Motorola" },
+ { 0x1095, "Silicon Image" },
+ { 0x10de, "Nvidia" },
+ { 0x10ec, "Realtek" },
+ { 0x1102, "Creative" },
+ { 0x1106, "VIA" },
+ { 0x111d, "IDT" },
+ { 0x11c1, "LSI" },
+ { 0x11d4, "Analog Devices" },
+ { 0x13f6, "C-Media" },
+ { 0x14f1, "Conexant" },
+ { 0x17e8, "Chrontel" },
+ { 0x1854, "LG" },
+ { 0x1aec, "Wolfson Microelectronics" },
+ { 0x1af4, "QEMU" },
+ { 0x434d, "C-Media" },
+ { 0x8086, "Intel" },
+ { 0x8384, "SigmaTel" },
+ {} /* terminator */
+};
+
+/* store the codec vendor name */
+static int get_codec_vendor_name(struct hdac_device *codec)
+{
+ const struct hda_vendor_id *c;
+ u16 vendor_id = codec->vendor_id >> 16;
+
+ for (c = hda_vendor_ids; c->id; c++) {
+ if (c->id == vendor_id) {
+ codec->vendor_name = kstrdup(c->name, GFP_KERNEL);
+ return codec->vendor_name ? 0 : -ENOMEM;
+ }
+ }
+
+ codec->vendor_name = kasprintf(GFP_KERNEL, "Generic %04x", vendor_id);
+ return codec->vendor_name ? 0 : -ENOMEM;
+}
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
new file mode 100644
index 000000000000..7371e0c3926f
--- /dev/null
+++ b/sound/hda/hdac_regmap.c
@@ -0,0 +1,472 @@
+/*
+ * Regmap support for HD-audio verbs
+ *
+ * A virtual register is translated to one or more hda verbs for write,
+ * vice versa for read.
+ *
+ * A few limitations:
+ * - Provided for not all verbs but only subset standard non-volatile verbs.
+ * - For reading, only AC_VERB_GET_* variants can be used.
+ * - For writing, mapped to the *corresponding* AC_VERB_SET_* variants,
+ * so can't handle asymmetric verbs for read and write
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <sound/core.h>
+#include <sound/hdaudio.h>
+#include <sound/hda_regmap.h>
+
+#ifdef CONFIG_PM
+#define codec_is_running(codec) \
+ (atomic_read(&(codec)->in_pm) || \
+ !pm_runtime_suspended(&(codec)->dev))
+#else
+#define codec_is_running(codec) true
+#endif
+
+#define get_verb(reg) (((reg) >> 8) & 0xfff)
+
+static bool hda_volatile_reg(struct device *dev, unsigned int reg)
+{
+ struct hdac_device *codec = dev_to_hdac_dev(dev);
+ unsigned int verb = get_verb(reg);
+
+ switch (verb) {
+ case AC_VERB_GET_PROC_COEF:
+ return !codec->cache_coef;
+ case AC_VERB_GET_COEF_INDEX:
+ case AC_VERB_GET_PROC_STATE:
+ case AC_VERB_GET_POWER_STATE:
+ case AC_VERB_GET_PIN_SENSE:
+ case AC_VERB_GET_HDMI_DIP_SIZE:
+ case AC_VERB_GET_HDMI_ELDD:
+ case AC_VERB_GET_HDMI_DIP_INDEX:
+ case AC_VERB_GET_HDMI_DIP_DATA:
+ case AC_VERB_GET_HDMI_DIP_XMIT:
+ case AC_VERB_GET_HDMI_CP_CTRL:
+ case AC_VERB_GET_HDMI_CHAN_SLOT:
+ case AC_VERB_GET_DEVICE_SEL:
+ case AC_VERB_GET_DEVICE_LIST: /* read-only volatile */
+ return true;
+ }
+
+ return false;
+}
+
+static bool hda_writeable_reg(struct device *dev, unsigned int reg)
+{
+ struct hdac_device *codec = dev_to_hdac_dev(dev);
+ unsigned int verb = get_verb(reg);
+ int i;
+
+ for (i = 0; i < codec->vendor_verbs.used; i++) {
+ unsigned int *v = snd_array_elem(&codec->vendor_verbs, i);
+ if (verb == *v)
+ return true;
+ }
+
+ if (codec->caps_overwriting)
+ return true;
+
+ switch (verb & 0xf00) {
+ case AC_VERB_GET_STREAM_FORMAT:
+ case AC_VERB_GET_AMP_GAIN_MUTE:
+ return true;
+ case AC_VERB_GET_PROC_COEF:
+ return codec->cache_coef;
+ case 0xf00:
+ break;
+ default:
+ return false;
+ }
+
+ switch (verb) {
+ case AC_VERB_GET_CONNECT_SEL:
+ case AC_VERB_GET_SDI_SELECT:
+ case AC_VERB_GET_PIN_WIDGET_CONTROL:
+ case AC_VERB_GET_UNSOLICITED_RESPONSE: /* only as SET_UNSOLICITED_ENABLE */
+ case AC_VERB_GET_BEEP_CONTROL:
+ case AC_VERB_GET_EAPD_BTLENABLE:
+ case AC_VERB_GET_DIGI_CONVERT_1:
+ case AC_VERB_GET_DIGI_CONVERT_2: /* only for beep control */
+ case AC_VERB_GET_VOLUME_KNOB_CONTROL:
+ case AC_VERB_GET_GPIO_MASK:
+ case AC_VERB_GET_GPIO_DIRECTION:
+ case AC_VERB_GET_GPIO_DATA: /* not for volatile read */
+ case AC_VERB_GET_GPIO_WAKE_MASK:
+ case AC_VERB_GET_GPIO_UNSOLICITED_RSP_MASK:
+ case AC_VERB_GET_GPIO_STICKY_MASK:
+ return true;
+ }
+
+ return false;
+}
+
+static bool hda_readable_reg(struct device *dev, unsigned int reg)
+{
+ struct hdac_device *codec = dev_to_hdac_dev(dev);
+ unsigned int verb = get_verb(reg);
+
+ if (codec->caps_overwriting)
+ return true;
+
+ switch (verb) {
+ case AC_VERB_PARAMETERS:
+ case AC_VERB_GET_CONNECT_LIST:
+ case AC_VERB_GET_SUBSYSTEM_ID:
+ return true;
+ /* below are basically writable, but disabled for reducing unnecessary
+ * writes at sync
+ */
+ case AC_VERB_GET_CONFIG_DEFAULT: /* usually just read */
+ case AC_VERB_GET_CONV: /* managed in PCM code */
+ case AC_VERB_GET_CVT_CHAN_COUNT: /* managed in HDMI CA code */
+ return true;
+ }
+
+ return hda_writeable_reg(dev, reg);
+}
+
+/*
+ * Stereo amp pseudo register:
+ * for making easier to handle the stereo volume control, we provide a
+ * fake register to deal both left and right channels by a single
+ * (pseudo) register access. A verb consisting of SET_AMP_GAIN with
+ * *both* SET_LEFT and SET_RIGHT bits takes a 16bit value, the lower 8bit
+ * for the left and the upper 8bit for the right channel.
+ */
+static bool is_stereo_amp_verb(unsigned int reg)
+{
+ if (((reg >> 8) & 0x700) != AC_VERB_SET_AMP_GAIN_MUTE)
+ return false;
+ return (reg & (AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT)) ==
+ (AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT);
+}
+
+/* read a pseudo stereo amp register (16bit left+right) */
+static int hda_reg_read_stereo_amp(struct hdac_device *codec,
+ unsigned int reg, unsigned int *val)
+{
+ unsigned int left, right;
+ int err;
+
+ reg &= ~(AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT);
+ err = snd_hdac_exec_verb(codec, reg | AC_AMP_GET_LEFT, 0, &left);
+ if (err < 0)
+ return err;
+ err = snd_hdac_exec_verb(codec, reg | AC_AMP_GET_RIGHT, 0, &right);
+ if (err < 0)
+ return err;
+ *val = left | (right << 8);
+ return 0;
+}
+
+/* write a pseudo stereo amp register (16bit left+right) */
+static int hda_reg_write_stereo_amp(struct hdac_device *codec,
+ unsigned int reg, unsigned int val)
+{
+ int err;
+ unsigned int verb, left, right;
+
+ verb = AC_VERB_SET_AMP_GAIN_MUTE << 8;
+ if (reg & AC_AMP_GET_OUTPUT)
+ verb |= AC_AMP_SET_OUTPUT;
+ else
+ verb |= AC_AMP_SET_INPUT | ((reg & 0xf) << 8);
+ reg = (reg & ~0xfffff) | verb;
+
+ left = val & 0xff;
+ right = (val >> 8) & 0xff;
+ if (left == right) {
+ reg |= AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT;
+ return snd_hdac_exec_verb(codec, reg | left, 0, NULL);
+ }
+
+ err = snd_hdac_exec_verb(codec, reg | AC_AMP_SET_LEFT | left, 0, NULL);
+ if (err < 0)
+ return err;
+ err = snd_hdac_exec_verb(codec, reg | AC_AMP_SET_RIGHT | right, 0, NULL);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+/* read a pseudo coef register (16bit) */
+static int hda_reg_read_coef(struct hdac_device *codec, unsigned int reg,
+ unsigned int *val)
+{
+ unsigned int verb;
+ int err;
+
+ if (!codec->cache_coef)
+ return -EINVAL;
+ /* LSB 8bit = coef index */
+ verb = (reg & ~0xfff00) | (AC_VERB_SET_COEF_INDEX << 8);
+ err = snd_hdac_exec_verb(codec, verb, 0, NULL);
+ if (err < 0)
+ return err;
+ verb = (reg & ~0xfffff) | (AC_VERB_GET_COEF_INDEX << 8);
+ return snd_hdac_exec_verb(codec, verb, 0, val);
+}
+
+/* write a pseudo coef register (16bit) */
+static int hda_reg_write_coef(struct hdac_device *codec, unsigned int reg,
+ unsigned int val)
+{
+ unsigned int verb;
+ int err;
+
+ if (!codec->cache_coef)
+ return -EINVAL;
+ /* LSB 8bit = coef index */
+ verb = (reg & ~0xfff00) | (AC_VERB_SET_COEF_INDEX << 8);
+ err = snd_hdac_exec_verb(codec, verb, 0, NULL);
+ if (err < 0)
+ return err;
+ verb = (reg & ~0xfffff) | (AC_VERB_GET_COEF_INDEX << 8) |
+ (val & 0xffff);
+ return snd_hdac_exec_verb(codec, verb, 0, NULL);
+}
+
+static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct hdac_device *codec = context;
+ int verb = get_verb(reg);
+ int err;
+
+ if (!codec_is_running(codec) && verb != AC_VERB_GET_POWER_STATE)
+ return -EAGAIN;
+ reg |= (codec->addr << 28);
+ if (is_stereo_amp_verb(reg))
+ return hda_reg_read_stereo_amp(codec, reg, val);
+ if (verb == AC_VERB_GET_PROC_COEF)
+ return hda_reg_read_coef(codec, reg, val);
+ err = snd_hdac_exec_verb(codec, reg, 0, val);
+ if (err < 0)
+ return err;
+ /* special handling for asymmetric reads */
+ if (verb == AC_VERB_GET_POWER_STATE) {
+ if (*val & AC_PWRST_ERROR)
+ *val = -1;
+ else /* take only the actual state */
+ *val = (*val >> 4) & 0x0f;
+ }
+ return 0;
+}
+
+static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct hdac_device *codec = context;
+ unsigned int verb;
+ int i, bytes, err;
+
+ reg &= ~0x00080000U; /* drop GET bit */
+ reg |= (codec->addr << 28);
+ verb = get_verb(reg);
+
+ if (!codec_is_running(codec) && verb != AC_VERB_SET_POWER_STATE)
+ return codec->lazy_cache ? 0 : -EAGAIN;
+
+ if (is_stereo_amp_verb(reg))
+ return hda_reg_write_stereo_amp(codec, reg, val);
+
+ if (verb == AC_VERB_SET_PROC_COEF)
+ return hda_reg_write_coef(codec, reg, val);
+
+ switch (verb & 0xf00) {
+ case AC_VERB_SET_AMP_GAIN_MUTE:
+ verb = AC_VERB_SET_AMP_GAIN_MUTE;
+ if (reg & AC_AMP_GET_LEFT)
+ verb |= AC_AMP_SET_LEFT >> 8;
+ else
+ verb |= AC_AMP_SET_RIGHT >> 8;
+ if (reg & AC_AMP_GET_OUTPUT) {
+ verb |= AC_AMP_SET_OUTPUT >> 8;
+ } else {
+ verb |= AC_AMP_SET_INPUT >> 8;
+ verb |= reg & 0xf;
+ }
+ break;
+ }
+
+ switch (verb) {
+ case AC_VERB_SET_DIGI_CONVERT_1:
+ bytes = 2;
+ break;
+ case AC_VERB_SET_CONFIG_DEFAULT_BYTES_0:
+ bytes = 4;
+ break;
+ default:
+ bytes = 1;
+ break;
+ }
+
+ for (i = 0; i < bytes; i++) {
+ reg &= ~0xfffff;
+ reg |= (verb + i) << 8 | ((val >> (8 * i)) & 0xff);
+ err = snd_hdac_exec_verb(codec, reg, 0, NULL);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config hda_regmap_cfg = {
+ .name = "hdaudio",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .max_register = 0xfffffff,
+ .writeable_reg = hda_writeable_reg,
+ .readable_reg = hda_readable_reg,
+ .volatile_reg = hda_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_read = hda_reg_read,
+ .reg_write = hda_reg_write,
+ .use_single_rw = true,
+};
+
+int snd_hdac_regmap_init(struct hdac_device *codec)
+{
+ struct regmap *regmap;
+
+ regmap = regmap_init(&codec->dev, NULL, codec, &hda_regmap_cfg);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ codec->regmap = regmap;
+ snd_array_init(&codec->vendor_verbs, sizeof(unsigned int), 8);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_regmap_init);
+
+void snd_hdac_regmap_exit(struct hdac_device *codec)
+{
+ if (codec->regmap) {
+ regmap_exit(codec->regmap);
+ codec->regmap = NULL;
+ snd_array_free(&codec->vendor_verbs);
+ }
+}
+EXPORT_SYMBOL_GPL(snd_hdac_regmap_exit);
+
+/**
+ * snd_hdac_regmap_add_vendor_verb - add a vendor-specific verb to regmap
+ * @codec: the codec object
+ * @verb: verb to allow accessing via regmap
+ *
+ * Returns zero for success or a negative error code.
+ */
+int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
+ unsigned int verb)
+{
+ unsigned int *p = snd_array_new(&codec->vendor_verbs);
+
+ if (!p)
+ return -ENOMEM;
+ *p = verb | 0x800; /* set GET bit */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_regmap_add_vendor_verb);
+
+/*
+ * helper functions
+ */
+
+/* write a pseudo-register value (w/o power sequence) */
+static int reg_raw_write(struct hdac_device *codec, unsigned int reg,
+ unsigned int val)
+{
+ if (!codec->regmap)
+ return hda_reg_write(codec, reg, val);
+ else
+ return regmap_write(codec->regmap, reg, val);
+}
+
+/**
+ * snd_hdac_regmap_write_raw - write a pseudo register with power mgmt
+ * @codec: the codec object
+ * @reg: pseudo register
+ * @val: value to write
+ *
+ * Returns zero if successful or a negative error code.
+ */
+int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int val)
+{
+ int err;
+
+ err = reg_raw_write(codec, reg, val);
+ if (err == -EAGAIN) {
+ snd_hdac_power_up_pm(codec);
+ err = reg_raw_write(codec, reg, val);
+ snd_hdac_power_down_pm(codec);
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
+
+static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
+ unsigned int *val)
+{
+ if (!codec->regmap)
+ return hda_reg_read(codec, reg, val);
+ else
+ return regmap_read(codec->regmap, reg, val);
+}
+
+/**
+ * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
+ * @codec: the codec object
+ * @reg: pseudo register
+ * @val: pointer to store the read value
+ *
+ * Returns zero if successful or a negative error code.
+ */
+int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int *val)
+{
+ int err;
+
+ err = reg_raw_read(codec, reg, val);
+ if (err == -EAGAIN) {
+ snd_hdac_power_up_pm(codec);
+ err = reg_raw_read(codec, reg, val);
+ snd_hdac_power_down_pm(codec);
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
+
+/**
+ * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
+ * @codec: the codec object
+ * @reg: pseudo register
+ * @mask: bit mask to udpate
+ * @val: value to update
+ *
+ * Returns zero if successful or a negative error code.
+ */
+int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ unsigned int orig;
+ int err;
+
+ val &= mask;
+ err = snd_hdac_regmap_read_raw(codec, reg, &orig);
+ if (err < 0)
+ return err;
+ val |= orig & ~mask;
+ if (val == orig)
+ return 0;
+ err = snd_hdac_regmap_write_raw(codec, reg, val);
+ if (err < 0)
+ return err;
+ return 1;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw);
diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c
new file mode 100644
index 000000000000..0a6ce3b84cc4
--- /dev/null
+++ b/sound/hda/hdac_sysfs.c
@@ -0,0 +1,406 @@
+/*
+ * sysfs support for HD-audio core device
+ */
+
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/hdaudio.h>
+#include "local.h"
+
+struct hdac_widget_tree {
+ struct kobject *root;
+ struct kobject *afg;
+ struct kobject **nodes;
+};
+
+#define CODEC_ATTR(type) \
+static ssize_t type##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct hdac_device *codec = dev_to_hdac_dev(dev); \
+ return sprintf(buf, "0x%x\n", codec->type); \
+} \
+static DEVICE_ATTR_RO(type)
+
+#define CODEC_ATTR_STR(type) \
+static ssize_t type##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct hdac_device *codec = dev_to_hdac_dev(dev); \
+ return sprintf(buf, "%s\n", \
+ codec->type ? codec->type : ""); \
+} \
+static DEVICE_ATTR_RO(type)
+
+CODEC_ATTR(type);
+CODEC_ATTR(vendor_id);
+CODEC_ATTR(subsystem_id);
+CODEC_ATTR(revision_id);
+CODEC_ATTR(afg);
+CODEC_ATTR(mfg);
+CODEC_ATTR_STR(vendor_name);
+CODEC_ATTR_STR(chip_name);
+
+static struct attribute *hdac_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_vendor_id.attr,
+ &dev_attr_subsystem_id.attr,
+ &dev_attr_revision_id.attr,
+ &dev_attr_afg.attr,
+ &dev_attr_mfg.attr,
+ &dev_attr_vendor_name.attr,
+ &dev_attr_chip_name.attr,
+ NULL
+};
+
+static struct attribute_group hdac_dev_attr_group = {
+ .attrs = hdac_dev_attrs,
+};
+
+const struct attribute_group *hdac_dev_attr_groups[] = {
+ &hdac_dev_attr_group,
+ NULL
+};
+
+/*
+ * Widget tree sysfs
+ *
+ * This is a tree showing the attributes of each widget. It appears like
+ * /sys/bus/hdaudioC0D0/widgets/04/caps
+ */
+
+struct widget_attribute;
+
+struct widget_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf);
+ ssize_t (*store)(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr,
+ const char *buf, size_t count);
+};
+
+static int get_codec_nid(struct kobject *kobj, struct hdac_device **codecp)
+{
+ struct device *dev = kobj_to_dev(kobj->parent->parent);
+ int nid;
+ ssize_t ret;
+
+ ret = kstrtoint(kobj->name, 16, &nid);
+ if (ret < 0)
+ return ret;
+ *codecp = dev_to_hdac_dev(dev);
+ return nid;
+}
+
+static ssize_t widget_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct widget_attribute *wid_attr =
+ container_of(attr, struct widget_attribute, attr);
+ struct hdac_device *codec;
+ int nid;
+
+ if (!wid_attr->show)
+ return -EIO;
+ nid = get_codec_nid(kobj, &codec);
+ if (nid < 0)
+ return nid;
+ return wid_attr->show(codec, nid, wid_attr, buf);
+}
+
+static ssize_t widget_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct widget_attribute *wid_attr =
+ container_of(attr, struct widget_attribute, attr);
+ struct hdac_device *codec;
+ int nid;
+
+ if (!wid_attr->store)
+ return -EIO;
+ nid = get_codec_nid(kobj, &codec);
+ if (nid < 0)
+ return nid;
+ return wid_attr->store(codec, nid, wid_attr, buf, count);
+}
+
+static const struct sysfs_ops widget_sysfs_ops = {
+ .show = widget_attr_show,
+ .store = widget_attr_store,
+};
+
+static void widget_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type widget_ktype = {
+ .release = widget_release,
+ .sysfs_ops = &widget_sysfs_ops,
+};
+
+#define WIDGET_ATTR_RO(_name) \
+ struct widget_attribute wid_attr_##_name = __ATTR_RO(_name)
+#define WIDGET_ATTR_RW(_name) \
+ struct widget_attribute wid_attr_##_name = __ATTR_RW(_name)
+
+static ssize_t caps_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%08x\n", get_wcaps(codec, nid));
+}
+
+static ssize_t pin_caps_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN)
+ return 0;
+ return sprintf(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_PIN_CAP));
+}
+
+static ssize_t pin_cfg_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ unsigned int val;
+
+ if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN)
+ return 0;
+ if (snd_hdac_read(codec, nid, AC_VERB_GET_CONFIG_DEFAULT, 0, &val))
+ return 0;
+ return sprintf(buf, "0x%08x\n", val);
+}
+
+static bool has_pcm_cap(struct hdac_device *codec, hda_nid_t nid)
+{
+ if (nid == codec->afg || nid == codec->mfg)
+ return true;
+ switch (get_wcaps_type(get_wcaps(codec, nid))) {
+ case AC_WID_AUD_OUT:
+ case AC_WID_AUD_IN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static ssize_t pcm_caps_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ if (!has_pcm_cap(codec, nid))
+ return 0;
+ return sprintf(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_PCM));
+}
+
+static ssize_t pcm_formats_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ if (!has_pcm_cap(codec, nid))
+ return 0;
+ return sprintf(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_STREAM));
+}
+
+static ssize_t amp_in_caps_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ if (nid != codec->afg && !(get_wcaps(codec, nid) & AC_WCAP_IN_AMP))
+ return 0;
+ return sprintf(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_AMP_IN_CAP));
+}
+
+static ssize_t amp_out_caps_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ if (nid != codec->afg && !(get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
+ return 0;
+ return sprintf(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_AMP_OUT_CAP));
+}
+
+static ssize_t power_caps_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ if (nid != codec->afg && !(get_wcaps(codec, nid) & AC_WCAP_POWER))
+ return 0;
+ return sprintf(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_POWER_STATE));
+}
+
+static ssize_t gpio_caps_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_GPIO_CAP));
+}
+
+static ssize_t connections_show(struct hdac_device *codec, hda_nid_t nid,
+ struct widget_attribute *attr, char *buf)
+{
+ hda_nid_t list[32];
+ int i, nconns;
+ ssize_t ret = 0;
+
+ nconns = snd_hdac_get_connections(codec, nid, list, ARRAY_SIZE(list));
+ if (nconns <= 0)
+ return nconns;
+ for (i = 0; i < nconns; i++)
+ ret += sprintf(buf + ret, "%s0x%02x", i ? " " : "", list[i]);
+ ret += sprintf(buf + ret, "\n");
+ return ret;
+}
+
+static WIDGET_ATTR_RO(caps);
+static WIDGET_ATTR_RO(pin_caps);
+static WIDGET_ATTR_RO(pin_cfg);
+static WIDGET_ATTR_RO(pcm_caps);
+static WIDGET_ATTR_RO(pcm_formats);
+static WIDGET_ATTR_RO(amp_in_caps);
+static WIDGET_ATTR_RO(amp_out_caps);
+static WIDGET_ATTR_RO(power_caps);
+static WIDGET_ATTR_RO(gpio_caps);
+static WIDGET_ATTR_RO(connections);
+
+static struct attribute *widget_node_attrs[] = {
+ &wid_attr_caps.attr,
+ &wid_attr_pin_caps.attr,
+ &wid_attr_pin_cfg.attr,
+ &wid_attr_pcm_caps.attr,
+ &wid_attr_pcm_formats.attr,
+ &wid_attr_amp_in_caps.attr,
+ &wid_attr_amp_out_caps.attr,
+ &wid_attr_power_caps.attr,
+ &wid_attr_connections.attr,
+ NULL,
+};
+
+static struct attribute *widget_afg_attrs[] = {
+ &wid_attr_pcm_caps.attr,
+ &wid_attr_pcm_formats.attr,
+ &wid_attr_amp_in_caps.attr,
+ &wid_attr_amp_out_caps.attr,
+ &wid_attr_power_caps.attr,
+ &wid_attr_gpio_caps.attr,
+ NULL,
+};
+
+static const struct attribute_group widget_node_group = {
+ .attrs = widget_node_attrs,
+};
+
+static const struct attribute_group widget_afg_group = {
+ .attrs = widget_afg_attrs,
+};
+
+static void free_widget_node(struct kobject *kobj,
+ const struct attribute_group *group)
+{
+ if (kobj) {
+ sysfs_remove_group(kobj, group);
+ kobject_put(kobj);
+ }
+}
+
+static void widget_tree_free(struct hdac_device *codec)
+{
+ struct hdac_widget_tree *tree = codec->widgets;
+ struct kobject **p;
+
+ if (!tree)
+ return;
+ free_widget_node(tree->afg, &widget_afg_group);
+ if (tree->nodes) {
+ for (p = tree->nodes; *p; p++)
+ free_widget_node(*p, &widget_node_group);
+ kfree(tree->nodes);
+ }
+ if (tree->root)
+ kobject_put(tree->root);
+ kfree(tree);
+ codec->widgets = NULL;
+}
+
+static int add_widget_node(struct kobject *parent, hda_nid_t nid,
+ const struct attribute_group *group,
+ struct kobject **res)
+{
+ struct kobject *kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ int err;
+
+ if (!kobj)
+ return -ENOMEM;
+ kobject_init(kobj, &widget_ktype);
+ err = kobject_add(kobj, parent, "%02x", nid);
+ if (err < 0)
+ return err;
+ err = sysfs_create_group(kobj, group);
+ if (err < 0) {
+ kobject_put(kobj);
+ return err;
+ }
+
+ *res = kobj;
+ return 0;
+}
+
+static int widget_tree_create(struct hdac_device *codec)
+{
+ struct hdac_widget_tree *tree;
+ int i, err;
+ hda_nid_t nid;
+
+ tree = codec->widgets = kzalloc(sizeof(*tree), GFP_KERNEL);
+ if (!tree)
+ return -ENOMEM;
+
+ tree->root = kobject_create_and_add("widgets", &codec->dev.kobj);
+ if (!tree->root)
+ return -ENOMEM;
+
+ tree->nodes = kcalloc(codec->num_nodes + 1, sizeof(*tree->nodes),
+ GFP_KERNEL);
+ if (!tree->nodes)
+ return -ENOMEM;
+
+ for (i = 0, nid = codec->start_nid; i < codec->num_nodes; i++, nid++) {
+ err = add_widget_node(tree->root, nid, &widget_node_group,
+ &tree->nodes[i]);
+ if (err < 0)
+ return err;
+ }
+
+ if (codec->afg) {
+ err = add_widget_node(tree->root, codec->afg,
+ &widget_afg_group, &tree->afg);
+ if (err < 0)
+ return err;
+ }
+
+ kobject_uevent(tree->root, KOBJ_CHANGE);
+ return 0;
+}
+
+int hda_widget_sysfs_init(struct hdac_device *codec)
+{
+ int err;
+
+ err = widget_tree_create(codec);
+ if (err < 0) {
+ widget_tree_free(codec);
+ return err;
+ }
+
+ return 0;
+}
+
+void hda_widget_sysfs_exit(struct hdac_device *codec)
+{
+ widget_tree_free(codec);
+}
diff --git a/sound/hda/local.h b/sound/hda/local.h
new file mode 100644
index 000000000000..d692f417ddc0
--- /dev/null
+++ b/sound/hda/local.h
@@ -0,0 +1,23 @@
+/*
+ * Local helper macros and functions for HD-audio core drivers
+ */
+
+#ifndef __HDAC_LOCAL_H
+#define __HDAC_LOCAL_H
+
+#define get_wcaps(codec, nid) \
+ snd_hdac_read_parm(codec, nid, AC_PAR_AUDIO_WIDGET_CAP)
+
+/* get the widget type from widget capability bits */
+static inline int get_wcaps_type(unsigned int wcaps)
+{
+ if (!wcaps)
+ return -1; /* invalid type */
+ return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+}
+
+extern const struct attribute_group *hdac_dev_attr_groups[];
+int hda_widget_sysfs_init(struct hdac_device *codec);
+void hda_widget_sysfs_exit(struct hdac_device *codec);
+
+#endif /* __HDAC_LOCAL_H */
diff --git a/sound/hda/trace.c b/sound/hda/trace.c
new file mode 100644
index 000000000000..ca2d6bd94518
--- /dev/null
+++ b/sound/hda/trace.c
@@ -0,0 +1,6 @@
+/*
+ * tracepoint definitions for HD-audio core drivers
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/sound/hda/trace.h b/sound/hda/trace.h
new file mode 100644
index 000000000000..33a7eb5573d4
--- /dev/null
+++ b/sound/hda/trace.h
@@ -0,0 +1,62 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hda
+
+#if !defined(__HDAC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HDAC_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/device.h>
+#include <sound/hdaudio.h>
+
+#ifndef HDAC_MSG_MAX
+#define HDAC_MSG_MAX 500
+#endif
+
+struct hdac_bus;
+struct hdac_codec;
+
+TRACE_EVENT(hda_send_cmd,
+ TP_PROTO(struct hdac_bus *bus, unsigned int cmd),
+ TP_ARGS(bus, cmd),
+ TP_STRUCT__entry(__dynamic_array(char, msg, HDAC_MSG_MAX)),
+ TP_fast_assign(
+ snprintf(__get_str(msg), HDAC_MSG_MAX,
+ "[%s:%d] val=0x%08x",
+ dev_name((bus)->dev), (cmd) >> 28, cmd);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(hda_get_response,
+ TP_PROTO(struct hdac_bus *bus, unsigned int addr, unsigned int res),
+ TP_ARGS(bus, addr, res),
+ TP_STRUCT__entry(__dynamic_array(char, msg, HDAC_MSG_MAX)),
+ TP_fast_assign(
+ snprintf(__get_str(msg), HDAC_MSG_MAX,
+ "[%s:%d] val=0x%08x",
+ dev_name((bus)->dev), addr, res);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(hda_unsol_event,
+ TP_PROTO(struct hdac_bus *bus, u32 res, u32 res_ex),
+ TP_ARGS(bus, res, res_ex),
+ TP_STRUCT__entry(__dynamic_array(char, msg, HDAC_MSG_MAX)),
+ TP_fast_assign(
+ snprintf(__get_str(msg), HDAC_MSG_MAX,
+ "[%s:%d] res=0x%08x, res_ex=0x%08x",
+ dev_name((bus)->dev), res_ex & 0x0f, res, res_ex);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+#endif /* __HDAC_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 88844881cbff..2183e9ebaa6d 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -73,7 +73,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
void *private_data, struct ak4113 **r_ak4113)
{
struct ak4113 *chip;
- int err = 0;
+ int err;
unsigned char reg;
static struct snd_device_ops ops = {
.dev_free = snd_ak4113_dev_free,
@@ -109,7 +109,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
__fail:
snd_ak4113_free(chip);
- return err < 0 ? err : -EIO;
+ return err;
}
EXPORT_SYMBOL_GPL(snd_ak4113_create);
diff --git a/sound/isa/sb/emu8000_synth.c b/sound/isa/sb/emu8000_synth.c
index 72332dfada9a..4aa719cad331 100644
--- a/sound/isa/sb/emu8000_synth.c
+++ b/sound/isa/sb/emu8000_synth.c
@@ -34,8 +34,9 @@ MODULE_LICENSE("GPL");
/*
* create a new hardware dependent device for Emu8000
*/
-static int snd_emu8000_new_device(struct snd_seq_device *dev)
+static int snd_emu8000_probe(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct snd_emu8000 *hw;
struct snd_emux *emu;
@@ -93,8 +94,9 @@ static int snd_emu8000_new_device(struct snd_seq_device *dev)
/*
* free all resources
*/
-static int snd_emu8000_delete_device(struct snd_seq_device *dev)
+static int snd_emu8000_remove(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct snd_emu8000 *hw;
if (dev->driver_data == NULL)
@@ -114,21 +116,14 @@ static int snd_emu8000_delete_device(struct snd_seq_device *dev)
* INIT part
*/
-static int __init alsa_emu8000_init(void)
-{
-
- static struct snd_seq_dev_ops ops = {
- snd_emu8000_new_device,
- snd_emu8000_delete_device,
- };
- return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU8000, &ops,
- sizeof(struct snd_emu8000*));
-}
-
-static void __exit alsa_emu8000_exit(void)
-{
- snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_EMU8000);
-}
-
-module_init(alsa_emu8000_init)
-module_exit(alsa_emu8000_exit)
+static struct snd_seq_driver emu8000_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .probe = snd_emu8000_probe,
+ .remove = snd_emu8000_remove,
+ },
+ .id = SNDRV_SEQ_DEV_ID_EMU8000,
+ .argsize = sizeof(struct snd_emu8000 *),
+};
+
+module_snd_seq_driver(emu8000_driver);
diff --git a/sound/isa/wavefront/wavefront_fx.c b/sound/isa/wavefront/wavefront_fx.c
index b5a19708473a..0608a5a4289d 100644
--- a/sound/isa/wavefront/wavefront_fx.c
+++ b/sound/isa/wavefront/wavefront_fx.c
@@ -79,13 +79,13 @@ wavefront_fx_memset (snd_wavefront_t *dev,
if (page < 0 || page > 7) {
snd_printk ("FX memset: "
"page must be >= 0 and <= 7\n");
- return -(EINVAL);
+ return -EINVAL;
}
if (addr < 0 || addr > 0x7f) {
snd_printk ("FX memset: "
"addr must be >= 0 and <= 7f\n");
- return -(EINVAL);
+ return -EINVAL;
}
if (cnt == 1) {
@@ -118,7 +118,7 @@ wavefront_fx_memset (snd_wavefront_t *dev,
snd_printk ("FX memset "
"(0x%x, 0x%x, 0x%lx, %d) incomplete\n",
page, addr, (unsigned long) data, cnt);
- return -(EIO);
+ return -EIO;
}
}
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index 33f5ec14fcfa..69f76ff5693d 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -793,7 +793,7 @@ wavefront_send_patch (snd_wavefront_t *dev, wavefront_patch_info *header)
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, NULL, buf)) {
snd_printk ("download patch failed\n");
- return -(EIO);
+ return -EIO;
}
return (0);
@@ -831,7 +831,7 @@ wavefront_send_program (snd_wavefront_t *dev, wavefront_patch_info *header)
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, NULL, buf)) {
snd_printk ("download patch failed\n");
- return -(EIO);
+ return -EIO;
}
return (0);
@@ -952,7 +952,7 @@ wavefront_send_sample (snd_wavefront_t *dev,
if (skip > 0 && header->hdr.s.SampleResolution != LINEAR_16BIT) {
snd_printk ("channel selection only "
"possible on 16-bit samples");
- return -(EINVAL);
+ return -EINVAL;
}
switch (skip) {
@@ -1049,7 +1049,7 @@ wavefront_send_sample (snd_wavefront_t *dev,
NULL, sample_hdr)) {
snd_printk ("sample %sdownload refused.\n",
header->size ? "" : "header ");
- return -(EIO);
+ return -EIO;
}
if (header->size == 0) {
@@ -1075,7 +1075,7 @@ wavefront_send_sample (snd_wavefront_t *dev,
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, NULL, NULL)) {
snd_printk ("download block "
"request refused.\n");
- return -(EIO);
+ return -EIO;
}
for (i = 0; i < blocksize; i++) {
@@ -1135,12 +1135,12 @@ wavefront_send_sample (snd_wavefront_t *dev,
if (dma_ack == -1) {
snd_printk ("upload sample "
"DMA ack timeout\n");
- return -(EIO);
+ return -EIO;
} else {
snd_printk ("upload sample "
"DMA ack error 0x%x\n",
dma_ack);
- return -(EIO);
+ return -EIO;
}
}
}
@@ -1181,7 +1181,7 @@ wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header)
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) {
snd_printk ("download alias failed.\n");
- return -(EIO);
+ return -EIO;
}
dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_ALIAS);
@@ -1232,7 +1232,7 @@ wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header)
msample_hdr)) {
snd_printk ("download of multisample failed.\n");
kfree(msample_hdr);
- return -(EIO);
+ return -EIO;
}
dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_MULTISAMPLE);
@@ -1254,7 +1254,7 @@ wavefront_fetch_multisample (snd_wavefront_t *dev,
if (snd_wavefront_cmd (dev, WFC_UPLOAD_MULTISAMPLE, log_ns, number)) {
snd_printk ("upload multisample failed.\n");
- return -(EIO);
+ return -EIO;
}
DPRINT (WF_DEBUG_DATA, "msample %d has %d samples\n",
@@ -1273,14 +1273,14 @@ wavefront_fetch_multisample (snd_wavefront_t *dev,
if ((val = wavefront_read (dev)) == -1) {
snd_printk ("upload multisample failed "
"during sample loop.\n");
- return -(EIO);
+ return -EIO;
}
d[0] = val;
if ((val = wavefront_read (dev)) == -1) {
snd_printk ("upload multisample failed "
"during sample loop.\n");
- return -(EIO);
+ return -EIO;
}
d[1] = val;
@@ -1315,7 +1315,7 @@ wavefront_send_drum (snd_wavefront_t *dev, wavefront_patch_info *header)
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) {
snd_printk ("download drum failed.\n");
- return -(EIO);
+ return -EIO;
}
return (0);
diff --git a/sound/mips/au1x00.c b/sound/mips/au1x00.c
index fbcaa5434fd8..1e30e8475431 100644
--- a/sound/mips/au1x00.c
+++ b/sound/mips/au1x00.c
@@ -633,19 +633,25 @@ static int au1000_ac97_probe(struct platform_device *pdev)
au1000->stream[PLAYBACK] = kmalloc(sizeof(struct audio_stream),
GFP_KERNEL);
- if (!au1000->stream[PLAYBACK])
+ if (!au1000->stream[PLAYBACK]) {
+ err = -ENOMEM;
goto out;
+ }
au1000->stream[PLAYBACK]->dma = -1;
au1000->stream[CAPTURE] = kmalloc(sizeof(struct audio_stream),
GFP_KERNEL);
- if (!au1000->stream[CAPTURE])
+ if (!au1000->stream[CAPTURE]) {
+ err = -ENOMEM;
goto out;
+ }
au1000->stream[CAPTURE]->dma = -1;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
+ if (!r) {
+ err = -ENODEV;
goto out;
+ }
err = -EBUSY;
au1000->ac97_res_port = request_mem_region(r->start, resource_size(r),
diff --git a/sound/oss/dev_table.c b/sound/oss/dev_table.c
index d8cf3e58dc76..6dad51596b70 100644
--- a/sound/oss/dev_table.c
+++ b/sound/oss/dev_table.c
@@ -58,13 +58,13 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver,
if (vers != AUDIO_DRIVER_VERSION || driver_size > sizeof(struct audio_driver)) {
printk(KERN_ERR "Sound: Incompatible audio driver for %s\n", name);
- return -(EINVAL);
+ return -EINVAL;
}
num = sound_alloc_audiodev();
if (num == -1) {
printk(KERN_ERR "sound: Too many audio drivers\n");
- return -(EBUSY);
+ return -EBUSY;
}
d = (struct audio_driver *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct audio_driver)));
sound_nblocks++;
@@ -79,7 +79,7 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver,
if (d == NULL || op == NULL) {
printk(KERN_ERR "Sound: Can't allocate driver for (%s)\n", name);
sound_unload_audiodev(num);
- return -(ENOMEM);
+ return -ENOMEM;
}
init_waitqueue_head(&op->in_sleeper);
init_waitqueue_head(&op->out_sleeper);
diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c
index 607cee4d545e..b6d19adf8f41 100644
--- a/sound/oss/opl3.c
+++ b/sound/oss/opl3.c
@@ -666,7 +666,7 @@ static int opl3_start_note (int dev, int voice, int note, int volume)
opl3_command(map->ioaddr, FNUM_LOW + map->voice_num, data);
data = 0x20 | ((block & 0x7) << 2) | ((fnum >> 8) & 0x3);
- devc->voc[voice].keyon_byte = data;
+ devc->voc[voice].keyon_byte = data;
opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, data);
if (voice_mode == 4)
opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num + 3, data);
@@ -717,7 +717,7 @@ static void freq_to_fnum (int freq, int *block, int *fnum)
static void opl3_command (int io_addr, unsigned int addr, unsigned int val)
{
- int i;
+ int i;
/*
* The original 2-OP synth requires a quite long delay after writing to a
diff --git a/sound/oss/sb_ess.c b/sound/oss/sb_ess.c
index b47a69026f1b..57f7d25a2cd3 100644
--- a/sound/oss/sb_ess.c
+++ b/sound/oss/sb_ess.c
@@ -604,7 +604,7 @@ static void ess_audio_output_block_audio2
ess_chgmixer (devc, 0x78, 0x03, 0x03); /* Go */
devc->irq_mode_16 = IMODE_OUTPUT;
- devc->intr_active_16 = 1;
+ devc->intr_active_16 = 1;
}
static void ess_audio_output_block
@@ -1183,17 +1183,12 @@ FKS_test (devc);
chip = "ES1688";
}
- printk ( KERN_INFO "ESS chip %s %s%s\n"
- , chip
- , ( devc->sbmo.esstype == ESSTYPE_DETECT || devc->sbmo.esstype == ESSTYPE_LIKE20
- ? "detected"
- : "specified"
- )
- , ( devc->sbmo.esstype == ESSTYPE_LIKE20
- ? " (kernel 2.0 compatible)"
- : ""
- )
- );
+ printk(KERN_INFO "ESS chip %s %s%s\n", chip,
+ (devc->sbmo.esstype == ESSTYPE_DETECT ||
+ devc->sbmo.esstype == ESSTYPE_LIKE20) ?
+ "detected" : "specified",
+ devc->sbmo.esstype == ESSTYPE_LIKE20 ?
+ " (kernel 2.0 compatible)" : "");
sprintf(name,"ESS %s AudioDrive (rev %d)", chip, ess_minor & 0x0f);
} else {
diff --git a/sound/oss/sb_midi.c b/sound/oss/sb_midi.c
index f139028e85c0..551ee7557b4e 100644
--- a/sound/oss/sb_midi.c
+++ b/sound/oss/sb_midi.c
@@ -179,14 +179,14 @@ void sb_dsp_midi_init(sb_devc * devc, struct module *owner)
{
printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n");
sound_unload_mididev(dev);
- return;
+ return;
}
memcpy((char *) midi_devs[dev], (char *) &sb_midi_operations,
sizeof(struct midi_operations));
if (owner)
- midi_devs[dev]->owner = owner;
-
+ midi_devs[dev]->owner = owner;
+
midi_devs[dev]->devc = devc;
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
index c0eea1dfe90f..f19da4b47c1d 100644
--- a/sound/oss/sequencer.c
+++ b/sound/oss/sequencer.c
@@ -681,13 +681,8 @@ static int seq_timing_event(unsigned char *event_rec)
break;
case TMR_ECHO:
- if (seq_mode == SEQ_2)
- seq_copy_to_input(event_rec, 8);
- else
- {
- parm = (parm << 8 | SEQ_ECHO);
- seq_copy_to_input((unsigned char *) &parm, 4);
- }
+ parm = (parm << 8 | SEQ_ECHO);
+ seq_copy_to_input((unsigned char *) &parm, 4);
break;
default:;
@@ -1324,7 +1319,6 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
int mode = translate_mode(file);
struct synth_info inf;
struct seq_event_rec event_rec;
- unsigned long flags;
int __user *p = arg;
orig_dev = dev = dev >> 4;
@@ -1479,9 +1473,7 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
case SNDCTL_SEQ_OUTOFBAND:
if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
return -EFAULT;
- spin_lock_irqsave(&lock,flags);
play_event(event_rec.arr);
- spin_unlock_irqrestore(&lock,flags);
return 0;
case SNDCTL_MIDI_INFO:
diff --git a/sound/oss/sys_timer.c b/sound/oss/sys_timer.c
index 9f039831114c..2226dda0eff0 100644
--- a/sound/oss/sys_timer.c
+++ b/sound/oss/sys_timer.c
@@ -50,29 +50,24 @@ tmr2ticks(int tmr_value)
static void
poll_def_tmr(unsigned long dummy)
{
+ if (!opened)
+ return;
+ def_tmr.expires = (1) + jiffies;
+ add_timer(&def_tmr);
- if (opened)
- {
+ if (!tmr_running)
+ return;
- {
- def_tmr.expires = (1) + jiffies;
- add_timer(&def_tmr);
- }
+ spin_lock(&lock);
+ tmr_ctr++;
+ curr_ticks = ticks_offs + tmr2ticks(tmr_ctr);
- if (tmr_running)
- {
- spin_lock(&lock);
- tmr_ctr++;
- curr_ticks = ticks_offs + tmr2ticks(tmr_ctr);
-
- if (curr_ticks >= next_event_time)
- {
- next_event_time = (unsigned long) -1;
- sequencer_timer(0);
- }
- spin_unlock(&lock);
- }
- }
+ if (curr_ticks >= next_event_time) {
+ next_event_time = (unsigned long) -1;
+ sequencer_timer(0);
+ }
+
+ spin_unlock(&lock);
}
static void
diff --git a/sound/oss/v_midi.c b/sound/oss/v_midi.c
index f0b4151d9b17..fc0ba276cc8f 100644
--- a/sound/oss/v_midi.c
+++ b/sound/oss/v_midi.c
@@ -49,13 +49,13 @@ static int v_midi_open (int dev, int mode,
unsigned long flags;
if (devc == NULL)
- return -(ENXIO);
+ return -ENXIO;
spin_lock_irqsave(&devc->lock,flags);
if (devc->opened)
{
spin_unlock_irqrestore(&devc->lock,flags);
- return -(EBUSY);
+ return -EBUSY;
}
devc->opened = 1;
spin_unlock_irqrestore(&devc->lock,flags);
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 5ee2f17c287c..82259ca61e64 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -177,6 +177,7 @@ static const struct ac97_codec_id snd_ac97_codec_ids[] = {
{ 0x54524123, 0xffffffff, "TR28602", NULL, NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)]
{ 0x54584e03, 0xffffffff, "TLV320AIC27", NULL, NULL },
{ 0x54584e20, 0xffffffff, "TLC320AD9xC", NULL, NULL },
+{ 0x56494120, 0xfffffff0, "VIA1613", patch_vt1613, NULL },
{ 0x56494161, 0xffffffff, "VIA1612A", NULL, NULL }, // modified ICE1232 with S/PDIF
{ 0x56494170, 0xffffffff, "VIA1617A", patch_vt1617a, NULL }, // modified VT1616 with S/PDIF
{ 0x56494182, 0xffffffff, "VIA1618", patch_vt1618, NULL },
@@ -2901,7 +2902,8 @@ static int apply_quirk_str(struct snd_ac97 *ac97, const char *typestr)
* Return: Zero if successful, or a negative error code on failure.
*/
-int snd_ac97_tune_hardware(struct snd_ac97 *ac97, struct ac97_quirk *quirk, const char *override)
+int snd_ac97_tune_hardware(struct snd_ac97 *ac97,
+ const struct ac97_quirk *quirk, const char *override)
{
int result;
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index ceaac1c41906..f4234edb878c 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -3352,6 +3352,33 @@ static int patch_cm9780(struct snd_ac97 *ac97)
}
/*
+ * VIA VT1613 codec
+ */
+static const struct snd_kcontrol_new snd_ac97_controls_vt1613[] = {
+AC97_SINGLE("DC Offset removal", 0x5a, 10, 1, 0),
+};
+
+static int patch_vt1613_specific(struct snd_ac97 *ac97)
+{
+ return patch_build_controls(ac97, &snd_ac97_controls_vt1613[0],
+ ARRAY_SIZE(snd_ac97_controls_vt1613));
+};
+
+static const struct snd_ac97_build_ops patch_vt1613_ops = {
+ .build_specific = patch_vt1613_specific
+};
+
+static int patch_vt1613(struct snd_ac97 *ac97)
+{
+ ac97->build_ops = &patch_vt1613_ops;
+
+ ac97->flags |= AC97_HAS_NO_VIDEO;
+ ac97->caps |= AC97_BC_HEADPHONE;
+
+ return 0;
+}
+
+/*
* VIA VT1616 codec
*/
static const struct snd_kcontrol_new snd_ac97_controls_vt1616[] = {
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index 850a8c984c25..66ddd981d1d5 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -747,7 +747,7 @@ snd_ad1889_proc_init(struct snd_ad1889 *chip)
snd_info_set_text_ops(entry, chip, snd_ad1889_proc_read);
}
-static struct ac97_quirk ac97_quirks[] = {
+static const struct ac97_quirk ac97_quirks[] = {
{
.subvendor = 0x11d4, /* AD */
.subdevice = 0x1889, /* AD1889 */
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index e5cd7be85355..1039eccbb895 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -2376,7 +2376,7 @@ static int snd_asihpi_cmode_add(struct snd_card_asihpi *asihpi,
/*------------------------------------------------------------
Sampleclock source controls
------------------------------------------------------------*/
-static const char const *sampleclock_sources[] = {
+static const char * const sampleclock_sources[] = {
"N/A", "Local PLL", "Digital Sync", "Word External", "Word Header",
"SMPTE", "Digital1", "Auto", "Network", "Invalid",
"Prev Module", "BLU-Link",
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index d5f15c9bbeda..42a20c806b39 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -1390,7 +1390,7 @@ static irqreturn_t snd_atiixp_interrupt(int irq, void *dev_id)
* ac97 mixer section
*/
-static struct ac97_quirk ac97_quirks[] = {
+static const struct ac97_quirk ac97_quirks[] = {
{
.subvendor = 0x103c,
.subdevice = 0x006b,
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index a40a2b4c8fd7..33b2a0af1b59 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1385,8 +1385,8 @@ snd_azf3328_ctrl_codec_activity(struct snd_azf3328 *chip,
.running)
&& (!chip->codecs[peer_codecs[codec_type].other2]
.running));
- }
- if (call_function)
+ }
+ if (call_function)
snd_azf3328_ctrl_enable_codecs(chip, enable);
/* ...and adjust clock, too
@@ -2126,7 +2126,8 @@ static struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
static int
snd_azf3328_pcm(struct snd_azf3328 *chip)
{
-enum { AZF_PCMDEV_STD, AZF_PCMDEV_I2S_OUT, NUM_AZF_PCMDEVS }; /* pcm devices */
+ /* pcm devices */
+ enum { AZF_PCMDEV_STD, AZF_PCMDEV_I2S_OUT, NUM_AZF_PCMDEVS };
struct snd_pcm *pcm;
int err;
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 1d0f2cad2f5a..6cf464d9043d 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -2062,7 +2062,7 @@ static int snd_cmipci_get_volume(struct snd_kcontrol *kcontrol,
val = (snd_cmipci_mixer_read(cm, reg.right_reg) >> reg.right_shift) & reg.mask;
if (reg.invert)
val = reg.mask - val;
- ucontrol->value.integer.value[1] = val;
+ ucontrol->value.integer.value[1] = val;
}
spin_unlock_irq(&cm->reg_lock);
return 0;
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 802c33f1cc59..963b912550d4 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -43,7 +43,7 @@ static char *ac97_quirk;
module_param(ac97_quirk, charp, 0444);
MODULE_PARM_DESC(ac97_quirk, "AC'97 board specific workarounds.");
-static struct ac97_quirk ac97_quirks[] = {
+static const struct ac97_quirk ac97_quirks[] = {
#if 0 /* Not yet confirmed if all 5536 boards are HP only */
{
.subvendor = PCI_VENDOR_ID_AMD,
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index a962de03ebb6..862db9a0b041 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1283,12 +1283,14 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol,
static int snd_echo_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct echoaudio *chip;
+ struct echoaudio *chip = snd_kcontrol_chip(kcontrol);
+ unsigned int out = ucontrol->id.index / num_busses_in(chip);
+ unsigned int in = ucontrol->id.index % num_busses_in(chip);
- chip = snd_kcontrol_chip(kcontrol);
- ucontrol->value.integer.value[0] =
- chip->monitor_gain[ucontrol->id.index / num_busses_in(chip)]
- [ucontrol->id.index % num_busses_in(chip)];
+ if (out >= ECHO_MAXAUDIOOUTPUTS || in >= ECHO_MAXAUDIOINPUTS)
+ return -EINVAL;
+
+ ucontrol->value.integer.value[0] = chip->monitor_gain[out][in];
return 0;
}
@@ -1297,12 +1299,14 @@ static int snd_echo_mixer_put(struct snd_kcontrol *kcontrol,
{
struct echoaudio *chip;
int changed, gain;
- short out, in;
+ unsigned int out, in;
changed = 0;
chip = snd_kcontrol_chip(kcontrol);
out = ucontrol->id.index / num_busses_in(chip);
in = ucontrol->id.index % num_busses_in(chip);
+ if (out >= ECHO_MAXAUDIOOUTPUTS || in >= ECHO_MAXAUDIOINPUTS)
+ return -EINVAL;
gain = ucontrol->value.integer.value[0];
if (gain < ECHOGAIN_MINOUT || gain > ECHOGAIN_MAXOUT)
return -EINVAL;
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index 37d0220a094c..db7a2e5e4a14 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
}
#endif
- strcpy(card->driver, emu->card_capabilities->driver);
- strcpy(card->shortname, emu->card_capabilities->name);
+ strlcpy(card->driver, emu->card_capabilities->driver,
+ sizeof(card->driver));
+ strlcpy(card->shortname, emu->card_capabilities->name,
+ sizeof(card->shortname));
snprintf(card->longname, sizeof(card->longname),
"%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
index 874cd76c7b7f..d2c7ea3a7610 100644
--- a/sound/pci/emu10k1/emu10k1_callback.c
+++ b/sound/pci/emu10k1/emu10k1_callback.c
@@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
snd_emu10k1_ptr_write(hw, Z2, ch, 0);
/* invalidate maps */
- temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
+ temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
#if 0
@@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
snd_emu10k1_ptr_write(hw, CDF, ch, sample);
/* invalidate maps */
- temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
+ temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index b4458a630a7c..a4548147c621 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */
snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
- silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
+ silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
for (ch = 0; ch < NUM_G; ch++) {
snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
}
+ if (emu->address_mode == 0) {
+ /* use 16M in 4G */
+ outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
+ }
+
return 0;
}
@@ -707,6 +712,7 @@ static int emu1010_firmware_thread(void *data)
{
struct snd_emu10k1 *emu = data;
u32 tmp, tmp2, reg;
+ u32 last_reg = 0;
int err;
for (;;) {
@@ -782,7 +788,15 @@ static int emu1010_firmware_thread(void *data)
msleep(10);
/* Unmute all. Default is muted after a firmware load */
snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
+ } else if (!reg && last_reg) {
+ /* Audio Dock removed */
+ dev_info(emu->card->dev,
+ "emu1010: Audio Dock detached\n");
+ /* Unmute all */
+ snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
}
+
+ last_reg = reg;
}
dev_info(emu->card->dev, "emu1010: firmware thread stopping\n");
return 0;
@@ -1325,6 +1339,22 @@ static int snd_emu10k1_dev_free(struct snd_device *device)
}
static struct snd_emu_chip_details emu_chip_details[] = {
+ /* Audigy 5/Rx SB1550 */
+ /* Tested by michael@gernoth.net 28 Mar 2015 */
+ /* DSP: CA10300-IAT LF
+ * DAC: Cirrus Logic CS4382-KQZ
+ * ADC: Philips 1361T
+ * AC97: Sigmatel STAC9750
+ * CA0151: None
+ */
+ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x10241102,
+ .driver = "Audigy2", .name = "SB Audigy 5/Rx [SB1550]",
+ .id = "Audigy2",
+ .emu10k2_chip = 1,
+ .ca0108_chip = 1,
+ .spk71 = 1,
+ .adc_1361t = 1, /* 24 bit capture instead of 16bit */
+ .ac97_chip = 1},
/* Audigy4 (Not PRO) SB0610 */
/* Tested by James@superbug.co.uk 4th April 2006 */
/* A_IOCFG bits
@@ -1421,7 +1451,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
*
*/
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
- .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
+ .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0108_chip = 1,
@@ -1571,7 +1601,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
.adc_1361t = 1, /* 24 bit capture instead of 16bit */
.ac97_chip = 1} ,
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
- .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
+ .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
.id = "Audigy2",
.emu10k2_chip = 1,
.ca0102_chip = 1,
@@ -1877,8 +1907,10 @@ int snd_emu10k1_create(struct snd_card *card,
is_audigy = emu->audigy = c->emu10k2_chip;
+ /* set addressing mode */
+ emu->address_mode = is_audigy ? 0 : 1;
/* set the DMA transfer mask */
- emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
+ emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
dev_err(card->dev,
@@ -1903,7 +1935,7 @@ int snd_emu10k1_create(struct snd_card *card,
emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- 32 * 1024, &emu->ptb_pages) < 0) {
+ (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
err = -ENOMEM;
goto error;
}
@@ -2002,8 +2034,8 @@ int snd_emu10k1_create(struct snd_card *card,
/* Clear silent pages and set up pointers */
memset(emu->silent_page.area, 0, PAGE_SIZE);
- silent_page = emu->silent_page.addr << 1;
- for (idx = 0; idx < MAXPAGES; idx++)
+ silent_page = emu->silent_page.addr << emu->address_mode;
+ for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
/* set up voice indices */
diff --git a/sound/pci/emu10k1/emu10k1_synth.c b/sound/pci/emu10k1/emu10k1_synth.c
index 4c41c903a840..5457d5613f6b 100644
--- a/sound/pci/emu10k1/emu10k1_synth.c
+++ b/sound/pci/emu10k1/emu10k1_synth.c
@@ -29,8 +29,9 @@ MODULE_LICENSE("GPL");
/*
* create a new hardware dependent device for Emu10k1
*/
-static int snd_emu10k1_synth_new_device(struct snd_seq_device *dev)
+static int snd_emu10k1_synth_probe(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct snd_emux *emux;
struct snd_emu10k1 *hw;
struct snd_emu10k1_synth_arg *arg;
@@ -79,8 +80,9 @@ static int snd_emu10k1_synth_new_device(struct snd_seq_device *dev)
return 0;
}
-static int snd_emu10k1_synth_delete_device(struct snd_seq_device *dev)
+static int snd_emu10k1_synth_remove(struct device *_dev)
{
+ struct snd_seq_device *dev = to_seq_dev(_dev);
struct snd_emux *emux;
struct snd_emu10k1 *hw;
unsigned long flags;
@@ -104,21 +106,14 @@ static int snd_emu10k1_synth_delete_device(struct snd_seq_device *dev)
* INIT part
*/
-static int __init alsa_emu10k1_synth_init(void)
-{
-
- static struct snd_seq_dev_ops ops = {
- snd_emu10k1_synth_new_device,
- snd_emu10k1_synth_delete_device,
- };
- return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH, &ops,
- sizeof(struct snd_emu10k1_synth_arg));
-}
-
-static void __exit alsa_emu10k1_synth_exit(void)
-{
- snd_seq_device_unregister_driver(SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH);
-}
-
-module_init(alsa_emu10k1_synth_init)
-module_exit(alsa_emu10k1_synth_exit)
+static struct snd_seq_driver emu10k1_synth_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .probe = snd_emu10k1_synth_probe,
+ .remove = snd_emu10k1_synth_remove,
+ },
+ .id = SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH,
+ .argsize = sizeof(struct snd_emu10k1_synth_arg),
+};
+
+module_snd_seq_driver(emu10k1_synth_driver);
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index 1de33025669a..55e57166256e 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -806,6 +806,108 @@ static struct snd_kcontrol_new snd_emu1010_internal_clock =
.put = snd_emu1010_internal_clock_put
};
+static int snd_emu1010_optical_out_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char * const texts[2] = {
+ "SPDIF", "ADAT"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
+}
+
+static int snd_emu1010_optical_out_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.enumerated.item[0] = emu->emu1010.optical_out;
+ return 0;
+}
+
+static int snd_emu1010_optical_out_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
+ unsigned int val;
+ u32 tmp;
+ int change = 0;
+
+ val = ucontrol->value.enumerated.item[0];
+ /* Limit: uinfo->value.enumerated.items = 2; */
+ if (val >= 2)
+ return -EINVAL;
+ change = (emu->emu1010.optical_out != val);
+ if (change) {
+ emu->emu1010.optical_out = val;
+ tmp = (emu->emu1010.optical_in ? EMU_HANA_OPTICAL_IN_ADAT : 0) |
+ (emu->emu1010.optical_out ? EMU_HANA_OPTICAL_OUT_ADAT : 0);
+ snd_emu1010_fpga_write(emu, EMU_HANA_OPTICAL_TYPE, tmp);
+ }
+ return change;
+}
+
+static struct snd_kcontrol_new snd_emu1010_optical_out = {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Optical Output Mode",
+ .count = 1,
+ .info = snd_emu1010_optical_out_info,
+ .get = snd_emu1010_optical_out_get,
+ .put = snd_emu1010_optical_out_put
+};
+
+static int snd_emu1010_optical_in_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char * const texts[2] = {
+ "SPDIF", "ADAT"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
+}
+
+static int snd_emu1010_optical_in_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.enumerated.item[0] = emu->emu1010.optical_in;
+ return 0;
+}
+
+static int snd_emu1010_optical_in_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
+ unsigned int val;
+ u32 tmp;
+ int change = 0;
+
+ val = ucontrol->value.enumerated.item[0];
+ /* Limit: uinfo->value.enumerated.items = 2; */
+ if (val >= 2)
+ return -EINVAL;
+ change = (emu->emu1010.optical_in != val);
+ if (change) {
+ emu->emu1010.optical_in = val;
+ tmp = (emu->emu1010.optical_in ? EMU_HANA_OPTICAL_IN_ADAT : 0) |
+ (emu->emu1010.optical_out ? EMU_HANA_OPTICAL_OUT_ADAT : 0);
+ snd_emu1010_fpga_write(emu, EMU_HANA_OPTICAL_TYPE, tmp);
+ }
+ return change;
+}
+
+static struct snd_kcontrol_new snd_emu1010_optical_in = {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Optical Input Mode",
+ .count = 1,
+ .info = snd_emu1010_optical_in_info,
+ .get = snd_emu1010_optical_in_get,
+ .put = snd_emu1010_optical_in_put
+};
+
static int snd_audigy_i2c_capture_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -2051,6 +2153,14 @@ int snd_emu10k1_mixer(struct snd_emu10k1 *emu,
snd_ctl_new1(&snd_emu1010_internal_clock, emu));
if (err < 0)
return err;
+ err = snd_ctl_add(card,
+ snd_ctl_new1(&snd_emu1010_optical_out, emu));
+ if (err < 0)
+ return err;
+ err = snd_ctl_add(card,
+ snd_ctl_new1(&snd_emu1010_optical_in, emu));
+ if (err < 0)
+ return err;
} else if (emu->card_capabilities->emu_model) {
/* all other e-mu cards for now */
@@ -2086,6 +2196,14 @@ int snd_emu10k1_mixer(struct snd_emu10k1 *emu,
snd_ctl_new1(&snd_emu1010_internal_clock, emu));
if (err < 0)
return err;
+ err = snd_ctl_add(card,
+ snd_ctl_new1(&snd_emu1010_optical_out, emu));
+ if (err < 0)
+ return err;
+ err = snd_ctl_add(card,
+ snd_ctl_new1(&snd_emu1010_optical_in, emu));
+ if (err < 0)
+ return err;
}
if ( emu->card_capabilities->i2c_adc) {
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 0dc07385af0e..14a305bd8a98 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
snd_emu10k1_ptr_write(emu, Z1, voice, 0);
snd_emu10k1_ptr_write(emu, Z2, voice, 0);
/* invalidate maps */
- silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
+ silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
/* modulation envelope */
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
index 2ca9f2e93139..53745f4c2bf5 100644
--- a/sound/pci/emu10k1/emuproc.c
+++ b/sound/pci/emu10k1/emuproc.c
@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
struct snd_emu10k1 *emu = entry->private_data;
u32 value;
u32 value2;
- unsigned long flags;
u32 rate;
if (emu->card_capabilities->emu_model) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x38, &value);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
if ((value & 0x1) == 0) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x2a, &value);
snd_emu1010_fpga_read(emu, 0x2b, &value2);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
rate = 0x1770000 / (((value << 5) | value2)+1);
snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
} else {
snd_iprintf(buffer, "ADAT Unlocked\n");
}
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x20, &value);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
if ((value & 0x4) == 0) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x28, &value);
snd_emu1010_fpga_read(emu, 0x29, &value2);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
rate = 0x1770000 / (((value << 5) | value2)+1);
snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
} else {
@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
{
struct snd_emu10k1 *emu = entry->private_data;
u32 value;
- unsigned long flags;
int i;
snd_iprintf(buffer, "EMU1010 Registers:\n\n");
for(i = 0; i < 0x40; i+=1) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, i, &value);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
}
}
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index c68e6dd2fa67..4f1f69be1865 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -34,10 +34,11 @@
* aligned pages in others
*/
#define __set_ptb_entry(emu,page,addr) \
- (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
+ (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
#define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
-#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
+#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
+#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
/* get aligned page from offset address */
#define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
/* get offset address from aligned page */
@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
}
page = blk->mapped_page + blk->pages;
}
- size = MAX_ALIGN_PAGES - page;
+ size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
if (size >= max_size) {
*nextp = pos;
return page;
@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
q = get_emu10k1_memblk(p, mapped_link);
end_page = q->mapped_page;
} else
- end_page = MAX_ALIGN_PAGES;
+ end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
/* remove links */
list_del(&blk->mapped_link);
@@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
if (snd_BUG_ON(!emu))
return NULL;
if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
- runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
+ runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
return NULL;
hdr = emu->memhdr;
if (snd_BUG_ON(!hdr))
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 7f0f2c5a4e97..a5ed1c181784 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -5,6 +5,7 @@ config SND_HDA
select SND_PCM
select SND_VMASTER
select SND_KCTL_JACK
+ select SND_HDA_CORE
config SND_HDA_INTEL
tristate "HD Audio PCI"
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index 194f30935e77..af78fb33a4fd 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -4,13 +4,12 @@ snd-hda-tegra-objs := hda_tegra.o
# for haswell power well
snd-hda-intel-$(CONFIG_SND_HDA_I915) += hda_i915.o
-snd-hda-codec-y := hda_codec.o hda_jack.o hda_auto_parser.o hda_sysfs.o
+snd-hda-codec-y := hda_bind.o hda_codec.o hda_jack.o hda_auto_parser.o hda_sysfs.o
snd-hda-codec-$(CONFIG_PROC_FS) += hda_proc.o
snd-hda-codec-$(CONFIG_SND_HDA_HWDEP) += hda_hwdep.o
snd-hda-codec-$(CONFIG_SND_HDA_INPUT_BEEP) += hda_beep.o
# for trace-points
-CFLAGS_hda_codec.o := -I$(src)
CFLAGS_hda_controller.o := -I$(src)
snd-hda-codec-generic-objs := hda_generic.o
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 3f8706bb3d16..03b7399bb7f0 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -172,7 +172,7 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
const hda_nid_t *ignore_nids,
unsigned int cond_flags)
{
- hda_nid_t nid, end_nid;
+ hda_nid_t nid;
short seq, assoc_line_out;
struct auto_out_pin line_out[ARRAY_SIZE(cfg->line_out_pins)];
struct auto_out_pin speaker_out[ARRAY_SIZE(cfg->speaker_pins)];
@@ -189,8 +189,7 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
memset(hp_out, 0, sizeof(hp_out));
assoc_line_out = 0;
- end_nid = codec->start_nid + codec->num_nodes;
- for (nid = codec->start_nid; nid < end_nid; nid++) {
+ for_each_hda_codec_node(nid, codec) {
unsigned int wid_caps = get_wcaps(codec, nid);
unsigned int wid_type = get_wcaps_type(wid_caps);
unsigned int def_conf;
@@ -410,7 +409,7 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
* debug prints of the parsed results
*/
codec_info(codec, "autoconfig for %s: line_outs=%d (0x%x/0x%x/0x%x/0x%x/0x%x) type:%s\n",
- codec->chip_name, cfg->line_outs, cfg->line_out_pins[0],
+ codec->core.chip_name, cfg->line_outs, cfg->line_out_pins[0],
cfg->line_out_pins[1], cfg->line_out_pins[2],
cfg->line_out_pins[3], cfg->line_out_pins[4],
cfg->line_out_type == AUTO_PIN_HP_OUT ? "hp" :
@@ -836,33 +835,33 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
if (action != HDA_FIXUP_ACT_PRE_PROBE || !fix->v.pins)
break;
codec_dbg(codec, "%s: Apply pincfg for %s\n",
- codec->chip_name, modelname);
+ codec->core.chip_name, modelname);
snd_hda_apply_pincfgs(codec, fix->v.pins);
break;
case HDA_FIXUP_VERBS:
if (action != HDA_FIXUP_ACT_PROBE || !fix->v.verbs)
break;
codec_dbg(codec, "%s: Apply fix-verbs for %s\n",
- codec->chip_name, modelname);
+ codec->core.chip_name, modelname);
snd_hda_add_verbs(codec, fix->v.verbs);
break;
case HDA_FIXUP_FUNC:
if (!fix->v.func)
break;
codec_dbg(codec, "%s: Apply fix-func for %s\n",
- codec->chip_name, modelname);
+ codec->core.chip_name, modelname);
fix->v.func(codec, fix, action);
break;
case HDA_FIXUP_PINCTLS:
if (action != HDA_FIXUP_ACT_PROBE || !fix->v.pins)
break;
codec_dbg(codec, "%s: Apply pinctl for %s\n",
- codec->chip_name, modelname);
+ codec->core.chip_name, modelname);
set_pin_targets(codec, fix->v.pins);
break;
default:
codec_err(codec, "%s: Invalid fixup type %d\n",
- codec->chip_name, fix->type);
+ codec->core.chip_name, fix->type);
break;
}
if (!fix->chained || fix->chained_before)
@@ -912,16 +911,16 @@ void snd_hda_pick_pin_fixup(struct hda_codec *codec,
return;
for (pq = pin_quirk; pq->subvendor; pq++) {
- if ((codec->subsystem_id & 0xffff0000) != (pq->subvendor << 16))
+ if ((codec->core.subsystem_id & 0xffff0000) != (pq->subvendor << 16))
continue;
- if (codec->vendor_id != pq->codec)
+ if (codec->core.vendor_id != pq->codec)
continue;
if (pin_config_match(codec, pq->pins)) {
codec->fixup_id = pq->value;
#ifdef CONFIG_SND_DEBUG_VERBOSE
codec->fixup_name = pq->name;
codec_dbg(codec, "%s: picked fixup %s (pin match)\n",
- codec->chip_name, codec->fixup_name);
+ codec->core.chip_name, codec->fixup_name);
#endif
codec->fixup_list = fixlist;
return;
@@ -963,7 +962,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
codec->fixup_name = NULL;
codec->fixup_id = HDA_FIXUP_ID_NO_FIXUP;
codec_dbg(codec, "%s: picked no fixup (nofixup specified)\n",
- codec->chip_name);
+ codec->core.chip_name);
return;
}
@@ -974,7 +973,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
codec->fixup_name = models->name;
codec->fixup_list = fixlist;
codec_dbg(codec, "%s: picked fixup %s (model specified)\n",
- codec->chip_name, codec->fixup_name);
+ codec->core.chip_name, codec->fixup_name);
return;
}
models++;
@@ -987,7 +986,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
#ifdef CONFIG_SND_DEBUG_VERBOSE
name = q->name;
codec_dbg(codec, "%s: picked fixup %s (PCI SSID%s)\n",
- codec->chip_name, name, q->subdevice_mask ? "" : " - vendor generic");
+ codec->core.chip_name, name, q->subdevice_mask ? "" : " - vendor generic");
#endif
}
}
@@ -996,12 +995,12 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
unsigned int vendorid =
q->subdevice | (q->subvendor << 16);
unsigned int mask = 0xffff0000 | q->subdevice_mask;
- if ((codec->subsystem_id & mask) == (vendorid & mask)) {
+ if ((codec->core.subsystem_id & mask) == (vendorid & mask)) {
id = q->value;
#ifdef CONFIG_SND_DEBUG_VERBOSE
name = q->name;
codec_dbg(codec, "%s: picked fixup %s (codec SSID)\n",
- codec->chip_name, name);
+ codec->core.chip_name, name);
#endif
break;
}
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 1e7de08e77cb..3364dc0fdeab 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -33,30 +33,36 @@ enum {
DIGBEEP_HZ_MAX = 12000000, /* 12 KHz */
};
-static void snd_hda_generate_beep(struct work_struct *work)
+/* generate or stop tone */
+static void generate_tone(struct hda_beep *beep, int tone)
{
- struct hda_beep *beep =
- container_of(work, struct hda_beep, beep_work);
struct hda_codec *codec = beep->codec;
- int tone;
- if (!beep->enabled)
- return;
-
- tone = beep->tone;
if (tone && !beep->playing) {
snd_hda_power_up(codec);
+ if (beep->power_hook)
+ beep->power_hook(beep, true);
beep->playing = 1;
}
- /* generate tone */
snd_hda_codec_write(codec, beep->nid, 0,
AC_VERB_SET_BEEP_CONTROL, tone);
if (!tone && beep->playing) {
beep->playing = 0;
+ if (beep->power_hook)
+ beep->power_hook(beep, false);
snd_hda_power_down(codec);
}
}
+static void snd_hda_generate_beep(struct work_struct *work)
+{
+ struct hda_beep *beep =
+ container_of(work, struct hda_beep, beep_work);
+
+ if (beep->enabled)
+ generate_tone(beep, beep->tone);
+}
+
/* (non-standard) Linear beep tone calculation for IDT/STAC codecs
*
* The tone frequency of beep generator on IDT/STAC codecs is
@@ -130,10 +136,7 @@ static void turn_off_beep(struct hda_beep *beep)
cancel_work_sync(&beep->beep_work);
if (beep->playing) {
/* turn off beep */
- snd_hda_codec_write(beep->codec, beep->nid, 0,
- AC_VERB_SET_BEEP_CONTROL, 0);
- beep->playing = 0;
- snd_hda_power_down(beep->codec);
+ generate_tone(beep, 0);
}
}
@@ -160,15 +163,15 @@ static int snd_hda_do_attach(struct hda_beep *beep)
input_dev->name = "HDA Digital PCBeep";
input_dev->phys = beep->phys;
input_dev->id.bustype = BUS_PCI;
+ input_dev->dev.parent = &codec->card->card_dev;
- input_dev->id.vendor = codec->vendor_id >> 16;
- input_dev->id.product = codec->vendor_id & 0xffff;
+ input_dev->id.vendor = codec->core.vendor_id >> 16;
+ input_dev->id.product = codec->core.vendor_id & 0xffff;
input_dev->id.version = 0x01;
input_dev->evbit[0] = BIT_MASK(EV_SND);
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
input_dev->event = snd_hda_beep_event;
- input_dev->dev.parent = &codec->dev;
input_set_drvdata(input_dev, beep);
beep->dev = input_dev;
@@ -224,7 +227,7 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
if (beep == NULL)
return -ENOMEM;
snprintf(beep->phys, sizeof(beep->phys),
- "card%d/codec#%d/beep0", codec->bus->card->number, codec->addr);
+ "card%d/codec#%d/beep0", codec->card->number, codec->addr);
/* enable linear scale */
snd_hda_codec_write_cache(codec, nid, 0,
AC_VERB_SET_DIGI_CONVERT_2, 0x01);
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index a63b5e077332..46524ff7e79e 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -40,6 +40,7 @@ struct hda_beep {
unsigned int playing:1;
struct work_struct beep_work; /* scheduled task for beep event */
struct mutex mutex;
+ void (*power_hook)(struct hda_beep *beep, bool on);
};
#ifdef CONFIG_SND_HDA_INPUT_BEEP
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
new file mode 100644
index 000000000000..00aa31c5f08e
--- /dev/null
+++ b/sound/pci/hda/hda_bind.c
@@ -0,0 +1,273 @@
+/*
+ * HD-audio codec driver binding
+ * Copyright (c) Takashi Iwai <tiwai@suse.de>
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <sound/core.h>
+#include "hda_codec.h"
+#include "hda_local.h"
+
+/*
+ * find a matching codec preset
+ */
+static int hda_codec_match(struct hdac_device *dev, struct hdac_driver *drv)
+{
+ struct hda_codec *codec = container_of(dev, struct hda_codec, core);
+ struct hda_codec_driver *driver =
+ container_of(drv, struct hda_codec_driver, core);
+ const struct hda_codec_preset *preset;
+ /* check probe_id instead of vendor_id if set */
+ u32 id = codec->probe_id ? codec->probe_id : codec->core.vendor_id;
+
+ for (preset = driver->preset; preset->id; preset++) {
+ u32 mask = preset->mask;
+
+ if (preset->afg && preset->afg != codec->core.afg)
+ continue;
+ if (preset->mfg && preset->mfg != codec->core.mfg)
+ continue;
+ if (!mask)
+ mask = ~0;
+ if (preset->id == (id & mask) &&
+ (!preset->rev || preset->rev == codec->core.revision_id)) {
+ codec->preset = preset;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* process an unsolicited event */
+static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
+{
+ struct hda_codec *codec = container_of(dev, struct hda_codec, core);
+
+ if (codec->patch_ops.unsol_event)
+ codec->patch_ops.unsol_event(codec, ev);
+}
+
+/* reset the codec name from the preset */
+static int codec_refresh_name(struct hda_codec *codec, const char *name)
+{
+ if (name) {
+ kfree(codec->core.chip_name);
+ codec->core.chip_name = kstrdup(name, GFP_KERNEL);
+ }
+ return codec->core.chip_name ? 0 : -ENOMEM;
+}
+
+static int hda_codec_driver_probe(struct device *dev)
+{
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+ struct module *owner = dev->driver->owner;
+ int err;
+
+ if (WARN_ON(!codec->preset))
+ return -EINVAL;
+
+ err = codec_refresh_name(codec, codec->preset->name);
+ if (err < 0)
+ goto error;
+ err = snd_hdac_regmap_init(&codec->core);
+ if (err < 0)
+ goto error;
+
+ if (!try_module_get(owner)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ err = codec->preset->patch(codec);
+ if (err < 0)
+ goto error_module;
+
+ err = snd_hda_codec_build_pcms(codec);
+ if (err < 0)
+ goto error_module;
+ err = snd_hda_codec_build_controls(codec);
+ if (err < 0)
+ goto error_module;
+ if (codec->card->registered) {
+ err = snd_card_register(codec->card);
+ if (err < 0)
+ goto error_module;
+ snd_hda_codec_register(codec);
+ }
+
+ codec->core.lazy_cache = true;
+ return 0;
+
+ error_module:
+ module_put(owner);
+
+ error:
+ snd_hda_codec_cleanup_for_unbind(codec);
+ return err;
+}
+
+static int hda_codec_driver_remove(struct device *dev)
+{
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+
+ if (codec->patch_ops.free)
+ codec->patch_ops.free(codec);
+ snd_hda_codec_cleanup_for_unbind(codec);
+ module_put(dev->driver->owner);
+ return 0;
+}
+
+static void hda_codec_driver_shutdown(struct device *dev)
+{
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+
+ if (!pm_runtime_suspended(dev) && codec->patch_ops.reboot_notify)
+ codec->patch_ops.reboot_notify(codec);
+}
+
+int __hda_codec_driver_register(struct hda_codec_driver *drv, const char *name,
+ struct module *owner)
+{
+ drv->core.driver.name = name;
+ drv->core.driver.owner = owner;
+ drv->core.driver.bus = &snd_hda_bus_type;
+ drv->core.driver.probe = hda_codec_driver_probe;
+ drv->core.driver.remove = hda_codec_driver_remove;
+ drv->core.driver.shutdown = hda_codec_driver_shutdown;
+ drv->core.driver.pm = &hda_codec_driver_pm;
+ drv->core.type = HDA_DEV_LEGACY;
+ drv->core.match = hda_codec_match;
+ drv->core.unsol_event = hda_codec_unsol_event;
+ return driver_register(&drv->core.driver);
+}
+EXPORT_SYMBOL_GPL(__hda_codec_driver_register);
+
+void hda_codec_driver_unregister(struct hda_codec_driver *drv)
+{
+ driver_unregister(&drv->core.driver);
+}
+EXPORT_SYMBOL_GPL(hda_codec_driver_unregister);
+
+static inline bool codec_probed(struct hda_codec *codec)
+{
+ return device_attach(hda_codec_dev(codec)) > 0 && codec->preset;
+}
+
+/* try to auto-load and bind the codec module */
+static void codec_bind_module(struct hda_codec *codec)
+{
+#ifdef MODULE
+ request_module("snd-hda-codec-id:%08x", codec->core.vendor_id);
+ if (codec_probed(codec))
+ return;
+ request_module("snd-hda-codec-id:%04x*",
+ (codec->core.vendor_id >> 16) & 0xffff);
+ if (codec_probed(codec))
+ return;
+#endif
+}
+
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
+/* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */
+static bool is_likely_hdmi_codec(struct hda_codec *codec)
+{
+ hda_nid_t nid;
+
+ for_each_hda_codec_node(nid, codec) {
+ unsigned int wcaps = get_wcaps(codec, nid);
+ switch (get_wcaps_type(wcaps)) {
+ case AC_WID_AUD_IN:
+ return false; /* HDMI parser supports only HDMI out */
+ case AC_WID_AUD_OUT:
+ if (!(wcaps & AC_WCAP_DIGITAL))
+ return false;
+ break;
+ }
+ }
+ return true;
+}
+#else
+/* no HDMI codec parser support */
+#define is_likely_hdmi_codec(codec) false
+#endif /* CONFIG_SND_HDA_CODEC_HDMI */
+
+static int codec_bind_generic(struct hda_codec *codec)
+{
+ if (codec->probe_id)
+ return -ENODEV;
+
+ if (is_likely_hdmi_codec(codec)) {
+ codec->probe_id = HDA_CODEC_ID_GENERIC_HDMI;
+#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
+ request_module("snd-hda-codec-hdmi");
+#endif
+ if (codec_probed(codec))
+ return 0;
+ }
+
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
+ request_module("snd-hda-codec-generic");
+#endif
+ if (codec_probed(codec))
+ return 0;
+ return -ENODEV;
+}
+
+#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
+#define is_generic_config(codec) \
+ (codec->modelname && !strcmp(codec->modelname, "generic"))
+#else
+#define is_generic_config(codec) 0
+#endif
+
+/**
+ * snd_hda_codec_configure - (Re-)configure the HD-audio codec
+ * @codec: the HDA codec
+ *
+ * Start parsing of the given codec tree and (re-)initialize the whole
+ * patch instance.
+ *
+ * Returns 0 if successful or a negative error code.
+ */
+int snd_hda_codec_configure(struct hda_codec *codec)
+{
+ int err;
+
+ if (is_generic_config(codec))
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ else
+ codec->probe_id = 0;
+
+ err = snd_hdac_device_register(&codec->core);
+ if (err < 0)
+ return err;
+
+ if (!codec->preset)
+ codec_bind_module(codec);
+ if (!codec->preset) {
+ err = codec_bind_generic(codec);
+ if (err < 0) {
+ codec_err(codec, "Unable to bind the codec\n");
+ goto error;
+ }
+ }
+
+ /* audio codec should override the mixer name */
+ if (codec->core.afg || !*codec->card->mixername)
+ snprintf(codec->card->mixername,
+ sizeof(codec->card->mixername), "%s %s",
+ codec->core.vendor_name, codec->core.chip_name);
+ return 0;
+
+ error:
+ snd_hdac_device_unregister(&codec->core);
+ return err;
+}
+EXPORT_SYMBOL_GPL(snd_hda_codec_configure);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 2fe86d2e1b09..b49feff0a319 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -26,6 +26,8 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/async.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <sound/core.h>
#include "hda_codec.h"
#include <sound/asoundef.h>
@@ -37,97 +39,20 @@
#include "hda_jack.h"
#include <sound/hda_hwdep.h>
-#define CREATE_TRACE_POINTS
-#include "hda_trace.h"
-
-/*
- * vendor / preset table
- */
-
-struct hda_vendor_id {
- unsigned int id;
- const char *name;
-};
-
-/* codec vendor labels */
-static struct hda_vendor_id hda_vendor_ids[] = {
- { 0x1002, "ATI" },
- { 0x1013, "Cirrus Logic" },
- { 0x1057, "Motorola" },
- { 0x1095, "Silicon Image" },
- { 0x10de, "Nvidia" },
- { 0x10ec, "Realtek" },
- { 0x1102, "Creative" },
- { 0x1106, "VIA" },
- { 0x111d, "IDT" },
- { 0x11c1, "LSI" },
- { 0x11d4, "Analog Devices" },
- { 0x13f6, "C-Media" },
- { 0x14f1, "Conexant" },
- { 0x17e8, "Chrontel" },
- { 0x1854, "LG" },
- { 0x1aec, "Wolfson Microelectronics" },
- { 0x1af4, "QEMU" },
- { 0x434d, "C-Media" },
- { 0x8086, "Intel" },
- { 0x8384, "SigmaTel" },
- {} /* terminator */
-};
-
-static DEFINE_MUTEX(preset_mutex);
-static LIST_HEAD(hda_preset_tables);
-
-/**
- * snd_hda_add_codec_preset - Add a codec preset to the chain
- * @preset: codec preset table to add
- */
-int snd_hda_add_codec_preset(struct hda_codec_preset_list *preset)
-{
- mutex_lock(&preset_mutex);
- list_add_tail(&preset->list, &hda_preset_tables);
- mutex_unlock(&preset_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_add_codec_preset);
-
-/**
- * snd_hda_delete_codec_preset - Delete a codec preset from the chain
- * @preset: codec preset table to delete
- */
-int snd_hda_delete_codec_preset(struct hda_codec_preset_list *preset)
-{
- mutex_lock(&preset_mutex);
- list_del(&preset->list);
- mutex_unlock(&preset_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_delete_codec_preset);
-
#ifdef CONFIG_PM
-#define codec_in_pm(codec) ((codec)->in_pm)
-static void hda_power_work(struct work_struct *work);
-static void hda_keep_power_on(struct hda_codec *codec);
-#define hda_codec_is_power_on(codec) ((codec)->power_on)
-
-static void hda_call_pm_notify(struct hda_codec *codec, bool power_up)
-{
- struct hda_bus *bus = codec->bus;
-
- if ((power_up && codec->pm_up_notified) ||
- (!power_up && !codec->pm_up_notified))
- return;
- if (bus->ops.pm_notify)
- bus->ops.pm_notify(bus, power_up);
- codec->pm_up_notified = power_up;
-}
-
+#define codec_in_pm(codec) atomic_read(&(codec)->core.in_pm)
+#define hda_codec_is_power_on(codec) \
+ (!pm_runtime_suspended(hda_codec_dev(codec)))
#else
#define codec_in_pm(codec) 0
-static inline void hda_keep_power_on(struct hda_codec *codec) {}
#define hda_codec_is_power_on(codec) 1
-#define hda_call_pm_notify(codec, state) {}
#endif
+#define codec_has_epss(codec) \
+ ((codec)->core.power_caps & AC_PWRST_EPSS)
+#define codec_has_clkstop(codec) \
+ ((codec)->core.power_caps & AC_PWRST_CLKSTOP)
+
/**
* snd_hda_get_jack_location - Give a location string of the jack
* @cfg: pin default config value
@@ -199,67 +124,32 @@ const char *snd_hda_get_jack_type(u32 cfg)
EXPORT_SYMBOL_GPL(snd_hda_get_jack_type);
/*
- * Compose a 32bit command word to be sent to the HD-audio controller
- */
-static inline unsigned int
-make_codec_cmd(struct hda_codec *codec, hda_nid_t nid, int flags,
- unsigned int verb, unsigned int parm)
-{
- u32 val;
-
- if ((codec->addr & ~0xf) || (nid & ~0x7f) ||
- (verb & ~0xfff) || (parm & ~0xffff)) {
- codec_err(codec, "hda-codec: out of range cmd %x:%x:%x:%x\n",
- codec->addr, nid, verb, parm);
- return ~0;
- }
-
- val = (u32)codec->addr << 28;
- val |= (u32)nid << 20;
- val |= verb << 8;
- val |= parm;
- return val;
-}
-
-/*
- * Send and receive a verb
+ * Send and receive a verb - passed to exec_verb override for hdac_device
*/
-static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd,
- int flags, unsigned int *res)
+static int codec_exec_verb(struct hdac_device *dev, unsigned int cmd,
+ unsigned int flags, unsigned int *res)
{
+ struct hda_codec *codec = container_of(dev, struct hda_codec, core);
struct hda_bus *bus = codec->bus;
int err;
if (cmd == ~0)
return -1;
- if (res)
- *res = -1;
again:
- snd_hda_power_up(codec);
- mutex_lock(&bus->cmd_mutex);
+ snd_hda_power_up_pm(codec);
+ mutex_lock(&bus->core.cmd_mutex);
if (flags & HDA_RW_NO_RESPONSE_FALLBACK)
bus->no_response_fallback = 1;
- for (;;) {
- trace_hda_send_cmd(codec, cmd);
- err = bus->ops.command(bus, cmd);
- if (err != -EAGAIN)
- break;
- /* process pending verbs */
- bus->ops.get_response(bus, codec->addr);
- }
- if (!err && res) {
- *res = bus->ops.get_response(bus, codec->addr);
- trace_hda_get_response(codec, *res);
- }
+ err = snd_hdac_bus_exec_verb_unlocked(&bus->core, codec->core.addr,
+ cmd, res);
bus->no_response_fallback = 0;
- mutex_unlock(&bus->cmd_mutex);
- snd_hda_power_down(codec);
- if (!codec_in_pm(codec) && res && *res == -1 && bus->rirb_error) {
+ mutex_unlock(&bus->core.cmd_mutex);
+ snd_hda_power_down_pm(codec);
+ if (!codec_in_pm(codec) && res && err < 0 && bus->rirb_error) {
if (bus->response_reset) {
codec_dbg(codec,
"resetting BUS due to fatal communication error\n");
- trace_hda_bus_reset(bus);
bus->ops.bus_reset(bus);
}
goto again;
@@ -286,9 +176,9 @@ unsigned int snd_hda_codec_read(struct hda_codec *codec, hda_nid_t nid,
int flags,
unsigned int verb, unsigned int parm)
{
- unsigned cmd = make_codec_cmd(codec, nid, flags, verb, parm);
+ unsigned int cmd = snd_hdac_make_cmd(&codec->core, nid, verb, parm);
unsigned int res;
- if (codec_exec_verb(codec, cmd, flags, &res))
+ if (snd_hdac_exec_verb(&codec->core, cmd, flags, &res))
return -1;
return res;
}
@@ -309,10 +199,8 @@ EXPORT_SYMBOL_GPL(snd_hda_codec_read);
int snd_hda_codec_write(struct hda_codec *codec, hda_nid_t nid, int flags,
unsigned int verb, unsigned int parm)
{
- unsigned int cmd = make_codec_cmd(codec, nid, flags, verb, parm);
- unsigned int res;
- return codec_exec_verb(codec, cmd, flags,
- codec->bus->sync_write ? &res : NULL);
+ unsigned int cmd = snd_hdac_make_cmd(&codec->core, nid, verb, parm);
+ return snd_hdac_exec_verb(&codec->core, cmd, flags, NULL);
}
EXPORT_SYMBOL_GPL(snd_hda_codec_write);
@@ -331,30 +219,6 @@ void snd_hda_sequence_write(struct hda_codec *codec, const struct hda_verb *seq)
}
EXPORT_SYMBOL_GPL(snd_hda_sequence_write);
-/**
- * snd_hda_get_sub_nodes - get the range of sub nodes
- * @codec: the HDA codec
- * @nid: NID to parse
- * @start_id: the pointer to store the start NID
- *
- * Parse the NID and store the start NID of its sub-nodes.
- * Returns the number of sub-nodes.
- */
-int snd_hda_get_sub_nodes(struct hda_codec *codec, hda_nid_t nid,
- hda_nid_t *start_id)
-{
- unsigned int parm;
-
- parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT);
- if (parm == -1) {
- *start_id = 0;
- return 0;
- }
- *start_id = (parm >> 16) & 0x7fff;
- return (int)(parm & 0x7fff);
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_sub_nodes);
-
/* connection list element */
struct hda_conn_list {
struct list_head list;
@@ -495,128 +359,6 @@ int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid,
}
EXPORT_SYMBOL_GPL(snd_hda_get_connections);
-/* return CONNLIST_LEN parameter of the given widget */
-static unsigned int get_num_conns(struct hda_codec *codec, hda_nid_t nid)
-{
- unsigned int wcaps = get_wcaps(codec, nid);
- unsigned int parm;
-
- if (!(wcaps & AC_WCAP_CONN_LIST) &&
- get_wcaps_type(wcaps) != AC_WID_VOL_KNB)
- return 0;
-
- parm = snd_hda_param_read(codec, nid, AC_PAR_CONNLIST_LEN);
- if (parm == -1)
- parm = 0;
- return parm;
-}
-
-int snd_hda_get_num_raw_conns(struct hda_codec *codec, hda_nid_t nid)
-{
- return snd_hda_get_raw_connections(codec, nid, NULL, 0);
-}
-
-/**
- * snd_hda_get_raw_connections - copy connection list without cache
- * @codec: the HDA codec
- * @nid: NID to parse
- * @conn_list: connection list array
- * @max_conns: max. number of connections to store
- *
- * Like snd_hda_get_connections(), copy the connection list but without
- * checking through the connection-list cache.
- * Currently called only from hda_proc.c, so not exported.
- */
-int snd_hda_get_raw_connections(struct hda_codec *codec, hda_nid_t nid,
- hda_nid_t *conn_list, int max_conns)
-{
- unsigned int parm;
- int i, conn_len, conns;
- unsigned int shift, num_elems, mask;
- hda_nid_t prev_nid;
- int null_count = 0;
-
- parm = get_num_conns(codec, nid);
- if (!parm)
- return 0;
-
- if (parm & AC_CLIST_LONG) {
- /* long form */
- shift = 16;
- num_elems = 2;
- } else {
- /* short form */
- shift = 8;
- num_elems = 4;
- }
- conn_len = parm & AC_CLIST_LENGTH;
- mask = (1 << (shift-1)) - 1;
-
- if (!conn_len)
- return 0; /* no connection */
-
- if (conn_len == 1) {
- /* single connection */
- parm = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_CONNECT_LIST, 0);
- if (parm == -1 && codec->bus->rirb_error)
- return -EIO;
- if (conn_list)
- conn_list[0] = parm & mask;
- return 1;
- }
-
- /* multi connection */
- conns = 0;
- prev_nid = 0;
- for (i = 0; i < conn_len; i++) {
- int range_val;
- hda_nid_t val, n;
-
- if (i % num_elems == 0) {
- parm = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_CONNECT_LIST, i);
- if (parm == -1 && codec->bus->rirb_error)
- return -EIO;
- }
- range_val = !!(parm & (1 << (shift-1))); /* ranges */
- val = parm & mask;
- if (val == 0 && null_count++) { /* no second chance */
- codec_dbg(codec,
- "invalid CONNECT_LIST verb %x[%i]:%x\n",
- nid, i, parm);
- return 0;
- }
- parm >>= shift;
- if (range_val) {
- /* ranges between the previous and this one */
- if (!prev_nid || prev_nid >= val) {
- codec_warn(codec,
- "invalid dep_range_val %x:%x\n",
- prev_nid, val);
- continue;
- }
- for (n = prev_nid + 1; n <= val; n++) {
- if (conn_list) {
- if (conns >= max_conns)
- return -ENOSPC;
- conn_list[conns] = n;
- }
- conns++;
- }
- } else {
- if (conn_list) {
- if (conns >= max_conns)
- return -ENOSPC;
- conn_list[conns] = val;
- }
- conns++;
- }
- prev_nid = val;
- }
- return conns;
-}
-
/**
* snd_hda_override_conn_list - add/modify the connection-list to cache
* @codec: the HDA codec
@@ -741,90 +483,6 @@ int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
return devices;
}
-/**
- * snd_hda_queue_unsol_event - add an unsolicited event to queue
- * @bus: the BUS
- * @res: unsolicited event (lower 32bit of RIRB entry)
- * @res_ex: codec addr and flags (upper 32bit or RIRB entry)
- *
- * Adds the given event to the queue. The events are processed in
- * the workqueue asynchronously. Call this function in the interrupt
- * hanlder when RIRB receives an unsolicited event.
- *
- * Returns 0 if successful, or a negative error code.
- */
-int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
-{
- struct hda_bus_unsolicited *unsol;
- unsigned int wp;
-
- if (!bus || !bus->workq)
- return 0;
-
- trace_hda_unsol_event(bus, res, res_ex);
- unsol = bus->unsol;
- if (!unsol)
- return 0;
-
- wp = (unsol->wp + 1) % HDA_UNSOL_QUEUE_SIZE;
- unsol->wp = wp;
-
- wp <<= 1;
- unsol->queue[wp] = res;
- unsol->queue[wp + 1] = res_ex;
-
- queue_work(bus->workq, &unsol->work);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_queue_unsol_event);
-
-/*
- * process queued unsolicited events
- */
-static void process_unsol_events(struct work_struct *work)
-{
- struct hda_bus_unsolicited *unsol =
- container_of(work, struct hda_bus_unsolicited, work);
- struct hda_bus *bus = unsol->bus;
- struct hda_codec *codec;
- unsigned int rp, caddr, res;
-
- while (unsol->rp != unsol->wp) {
- rp = (unsol->rp + 1) % HDA_UNSOL_QUEUE_SIZE;
- unsol->rp = rp;
- rp <<= 1;
- res = unsol->queue[rp];
- caddr = unsol->queue[rp + 1];
- if (!(caddr & (1 << 4))) /* no unsolicited event? */
- continue;
- codec = bus->caddr_tbl[caddr & 0x0f];
- if (codec && codec->patch_ops.unsol_event)
- codec->patch_ops.unsol_event(codec, res);
- }
-}
-
-/*
- * initialize unsolicited queue
- */
-static int init_unsol_queue(struct hda_bus *bus)
-{
- struct hda_bus_unsolicited *unsol;
-
- if (bus->unsol) /* already initialized */
- return 0;
-
- unsol = kzalloc(sizeof(*unsol), GFP_KERNEL);
- if (!unsol) {
- dev_err(bus->card->dev, "can't allocate unsolicited queue\n");
- return -ENOMEM;
- }
- INIT_WORK(&unsol->work, process_unsol_events);
- unsol->bus = bus;
- bus->unsol = unsol;
- return 0;
-}
-
/*
* destructor
*/
@@ -832,16 +490,9 @@ static void snd_hda_bus_free(struct hda_bus *bus)
{
if (!bus)
return;
-
- WARN_ON(!list_empty(&bus->codec_list));
- if (bus->workq)
- flush_workqueue(bus->workq);
- kfree(bus->unsol);
if (bus->ops.private_free)
bus->ops.private_free(bus);
- if (bus->workq)
- destroy_workqueue(bus->workq);
-
+ snd_hdac_bus_exit(&bus->core);
kfree(bus);
}
@@ -858,17 +509,35 @@ static int snd_hda_bus_dev_disconnect(struct snd_device *device)
return 0;
}
+/* hdac_bus_ops translations */
+static int _hda_bus_command(struct hdac_bus *_bus, unsigned int cmd)
+{
+ struct hda_bus *bus = container_of(_bus, struct hda_bus, core);
+ return bus->ops.command(bus, cmd);
+}
+
+static int _hda_bus_get_response(struct hdac_bus *_bus, unsigned int addr,
+ unsigned int *res)
+{
+ struct hda_bus *bus = container_of(_bus, struct hda_bus, core);
+ *res = bus->ops.get_response(bus, addr);
+ return bus->rirb_error ? -EIO : 0;
+}
+
+static const struct hdac_bus_ops bus_ops = {
+ .command = _hda_bus_command,
+ .get_response = _hda_bus_get_response,
+};
+
/**
* snd_hda_bus_new - create a HDA bus
* @card: the card entry
- * @temp: the template for hda_bus information
* @busp: the pointer to store the created bus instance
*
* Returns 0 if successful, or a negative error code.
*/
int snd_hda_bus_new(struct snd_card *card,
- const struct hda_bus_template *temp,
- struct hda_bus **busp)
+ struct hda_bus **busp)
{
struct hda_bus *bus;
int err;
@@ -877,40 +546,21 @@ int snd_hda_bus_new(struct snd_card *card,
.dev_free = snd_hda_bus_dev_free,
};
- if (snd_BUG_ON(!temp))
- return -EINVAL;
- if (snd_BUG_ON(!temp->ops.command || !temp->ops.get_response))
- return -EINVAL;
-
if (busp)
*busp = NULL;
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
- if (bus == NULL) {
- dev_err(card->dev, "can't allocate struct hda_bus\n");
+ if (!bus)
return -ENOMEM;
+
+ err = snd_hdac_bus_init(&bus->core, card->dev, &bus_ops);
+ if (err < 0) {
+ kfree(bus);
+ return err;
}
bus->card = card;
- bus->private_data = temp->private_data;
- bus->pci = temp->pci;
- bus->modelname = temp->modelname;
- bus->power_save = temp->power_save;
- bus->ops = temp->ops;
-
- mutex_init(&bus->cmd_mutex);
mutex_init(&bus->prepare_mutex);
- INIT_LIST_HEAD(&bus->codec_list);
-
- snprintf(bus->workq_name, sizeof(bus->workq_name),
- "hd-audio%d", card->number);
- bus->workq = create_singlethread_workqueue(bus->workq_name);
- if (!bus->workq) {
- dev_err(card->dev, "cannot create workqueue %s\n",
- bus->workq_name);
- kfree(bus);
- return -ENOMEM;
- }
err = snd_device_new(card, SNDRV_DEV_BUS, bus, &dev_ops);
if (err < 0) {
@@ -923,140 +573,6 @@ int snd_hda_bus_new(struct snd_card *card,
}
EXPORT_SYMBOL_GPL(snd_hda_bus_new);
-#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
-#define is_generic_config(codec) \
- (codec->modelname && !strcmp(codec->modelname, "generic"))
-#else
-#define is_generic_config(codec) 0
-#endif
-
-#ifdef MODULE
-#define HDA_MODREQ_MAX_COUNT 2 /* two request_modules()'s */
-#else
-#define HDA_MODREQ_MAX_COUNT 0 /* all presets are statically linked */
-#endif
-
-/*
- * find a matching codec preset
- */
-static const struct hda_codec_preset *
-find_codec_preset(struct hda_codec *codec)
-{
- struct hda_codec_preset_list *tbl;
- const struct hda_codec_preset *preset;
- unsigned int mod_requested = 0;
-
- again:
- mutex_lock(&preset_mutex);
- list_for_each_entry(tbl, &hda_preset_tables, list) {
- if (!try_module_get(tbl->owner)) {
- codec_err(codec, "cannot module_get\n");
- continue;
- }
- for (preset = tbl->preset; preset->id; preset++) {
- u32 mask = preset->mask;
- if (preset->afg && preset->afg != codec->afg)
- continue;
- if (preset->mfg && preset->mfg != codec->mfg)
- continue;
- if (!mask)
- mask = ~0;
- if (preset->id == (codec->vendor_id & mask) &&
- (!preset->rev ||
- preset->rev == codec->revision_id)) {
- mutex_unlock(&preset_mutex);
- codec->owner = tbl->owner;
- return preset;
- }
- }
- module_put(tbl->owner);
- }
- mutex_unlock(&preset_mutex);
-
- if (mod_requested < HDA_MODREQ_MAX_COUNT) {
- if (!mod_requested)
- request_module("snd-hda-codec-id:%08x",
- codec->vendor_id);
- else
- request_module("snd-hda-codec-id:%04x*",
- (codec->vendor_id >> 16) & 0xffff);
- mod_requested++;
- goto again;
- }
- return NULL;
-}
-
-/*
- * get_codec_name - store the codec name
- */
-static int get_codec_name(struct hda_codec *codec)
-{
- const struct hda_vendor_id *c;
- const char *vendor = NULL;
- u16 vendor_id = codec->vendor_id >> 16;
- char tmp[16];
-
- if (codec->vendor_name)
- goto get_chip_name;
-
- for (c = hda_vendor_ids; c->id; c++) {
- if (c->id == vendor_id) {
- vendor = c->name;
- break;
- }
- }
- if (!vendor) {
- sprintf(tmp, "Generic %04x", vendor_id);
- vendor = tmp;
- }
- codec->vendor_name = kstrdup(vendor, GFP_KERNEL);
- if (!codec->vendor_name)
- return -ENOMEM;
-
- get_chip_name:
- if (codec->chip_name)
- return 0;
-
- if (codec->preset && codec->preset->name)
- codec->chip_name = kstrdup(codec->preset->name, GFP_KERNEL);
- else {
- sprintf(tmp, "ID %x", codec->vendor_id & 0xffff);
- codec->chip_name = kstrdup(tmp, GFP_KERNEL);
- }
- if (!codec->chip_name)
- return -ENOMEM;
- return 0;
-}
-
-/*
- * look for an AFG and MFG nodes
- */
-static void setup_fg_nodes(struct hda_codec *codec)
-{
- int i, total_nodes, function_id;
- hda_nid_t nid;
-
- total_nodes = snd_hda_get_sub_nodes(codec, AC_NODE_ROOT, &nid);
- for (i = 0; i < total_nodes; i++, nid++) {
- function_id = snd_hda_param_read(codec, nid,
- AC_PAR_FUNCTION_TYPE);
- switch (function_id & 0xff) {
- case AC_GRP_AUDIO_FUNCTION:
- codec->afg = nid;
- codec->afg_function_id = function_id & 0xff;
- codec->afg_unsol = (function_id >> 8) & 1;
- break;
- case AC_GRP_MODEM_FUNCTION:
- codec->mfg = nid;
- codec->mfg_function_id = function_id & 0xff;
- codec->mfg_unsol = (function_id >> 8) & 1;
- break;
- default:
- break;
- }
- }
-}
-
/*
* read widget caps for each widget and store in cache
*/
@@ -1065,25 +581,22 @@ static int read_widget_caps(struct hda_codec *codec, hda_nid_t fg_node)
int i;
hda_nid_t nid;
- codec->num_nodes = snd_hda_get_sub_nodes(codec, fg_node,
- &codec->start_nid);
- codec->wcaps = kmalloc(codec->num_nodes * 4, GFP_KERNEL);
+ codec->wcaps = kmalloc(codec->core.num_nodes * 4, GFP_KERNEL);
if (!codec->wcaps)
return -ENOMEM;
- nid = codec->start_nid;
- for (i = 0; i < codec->num_nodes; i++, nid++)
- codec->wcaps[i] = snd_hda_param_read(codec, nid,
- AC_PAR_AUDIO_WIDGET_CAP);
+ nid = codec->core.start_nid;
+ for (i = 0; i < codec->core.num_nodes; i++, nid++)
+ codec->wcaps[i] = snd_hdac_read_parm_uncached(&codec->core,
+ nid, AC_PAR_AUDIO_WIDGET_CAP);
return 0;
}
/* read all pin default configurations and save codec->init_pins */
static int read_pin_defaults(struct hda_codec *codec)
{
- int i;
- hda_nid_t nid = codec->start_nid;
+ hda_nid_t nid;
- for (i = 0; i < codec->num_nodes; i++, nid++) {
+ for_each_hda_codec_node(nid, codec) {
struct hda_pincfg *pin;
unsigned int wcaps = get_wcaps(codec, nid);
unsigned int wid_type = get_wcaps_type(wcaps);
@@ -1290,14 +803,10 @@ static void hda_jackpoll_work(struct work_struct *work)
if (!codec->jackpoll_interval)
return;
- queue_delayed_work(codec->bus->workq, &codec->jackpoll_work,
- codec->jackpoll_interval);
+ schedule_delayed_work(&codec->jackpoll_work,
+ codec->jackpoll_interval);
}
-static void init_hda_cache(struct hda_cache_rec *cache,
- unsigned int record_size);
-static void free_hda_cache(struct hda_cache_rec *cache);
-
/* release all pincfg lists */
static void free_init_pincfgs(struct hda_codec *codec)
{
@@ -1339,70 +848,118 @@ get_hda_cvt_setup(struct hda_codec *codec, hda_nid_t nid)
}
/*
- * Dynamic symbol binding for the codec parsers
+ * PCM device
*/
+static void release_pcm(struct kref *kref)
+{
+ struct hda_pcm *pcm = container_of(kref, struct hda_pcm, kref);
-#define load_parser(codec, sym) \
- ((codec)->parser = (int (*)(struct hda_codec *))symbol_request(sym))
+ if (pcm->pcm)
+ snd_device_free(pcm->codec->card, pcm->pcm);
+ clear_bit(pcm->device, pcm->codec->bus->pcm_dev_bits);
+ kfree(pcm->name);
+ kfree(pcm);
+}
+
+void snd_hda_codec_pcm_put(struct hda_pcm *pcm)
+{
+ kref_put(&pcm->kref, release_pcm);
+}
+EXPORT_SYMBOL_GPL(snd_hda_codec_pcm_put);
-static void unload_parser(struct hda_codec *codec)
+struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec,
+ const char *fmt, ...)
{
- if (codec->parser)
- symbol_put_addr(codec->parser);
- codec->parser = NULL;
+ struct hda_pcm *pcm;
+ va_list args;
+
+ pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return NULL;
+
+ pcm->codec = codec;
+ kref_init(&pcm->kref);
+ va_start(args, fmt);
+ pcm->name = kvasprintf(GFP_KERNEL, fmt, args);
+ va_end(args);
+ if (!pcm->name) {
+ kfree(pcm);
+ return NULL;
+ }
+
+ list_add_tail(&pcm->list, &codec->pcm_list_head);
+ return pcm;
}
+EXPORT_SYMBOL_GPL(snd_hda_codec_pcm_new);
/*
* codec destructor
*/
-static void snd_hda_codec_free(struct hda_codec *codec)
+static void codec_release_pcms(struct hda_codec *codec)
{
- if (!codec)
- return;
+ struct hda_pcm *pcm, *n;
+
+ list_for_each_entry_safe(pcm, n, &codec->pcm_list_head, list) {
+ list_del_init(&pcm->list);
+ if (pcm->pcm)
+ snd_device_disconnect(codec->card, pcm->pcm);
+ snd_hda_codec_pcm_put(pcm);
+ }
+}
+
+void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
+{
+ if (codec->registered) {
+ /* pm_runtime_put() is called in snd_hdac_device_exit() */
+ pm_runtime_get_noresume(hda_codec_dev(codec));
+ pm_runtime_disable(hda_codec_dev(codec));
+ codec->registered = 0;
+ }
+
cancel_delayed_work_sync(&codec->jackpoll_work);
+ if (!codec->in_freeing)
+ snd_hda_ctls_clear(codec);
+ codec_release_pcms(codec);
+ snd_hda_detach_beep_device(codec);
+ memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
snd_hda_jack_tbl_clear(codec);
- free_init_pincfgs(codec);
-#ifdef CONFIG_PM
- cancel_delayed_work(&codec->power_work);
- flush_workqueue(codec->bus->workq);
-#endif
- list_del(&codec->list);
- snd_array_free(&codec->mixers);
- snd_array_free(&codec->nids);
+ codec->proc_widget_hook = NULL;
+ codec->spec = NULL;
+
+ /* free only driver_pins so that init_pins + user_pins are restored */
+ snd_array_free(&codec->driver_pins);
snd_array_free(&codec->cvt_setups);
snd_array_free(&codec->spdif_out);
+ snd_array_free(&codec->verbs);
+ codec->preset = NULL;
+ codec->slave_dig_outs = NULL;
+ codec->spdif_status_reset = 0;
+ snd_array_free(&codec->mixers);
+ snd_array_free(&codec->nids);
remove_conn_list(codec);
- codec->bus->caddr_tbl[codec->addr] = NULL;
- if (codec->patch_ops.free)
- codec->patch_ops.free(codec);
- hda_call_pm_notify(codec, false); /* cancel leftover refcounts */
- snd_hda_sysfs_clear(codec);
- unload_parser(codec);
- module_put(codec->owner);
- free_hda_cache(&codec->amp_cache);
- free_hda_cache(&codec->cmd_cache);
- kfree(codec->vendor_name);
- kfree(codec->chip_name);
- kfree(codec->modelname);
- kfree(codec->wcaps);
- codec->bus->num_codecs--;
- put_device(&codec->dev);
+ snd_hdac_regmap_exit(&codec->core);
}
-static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec,
- hda_nid_t fg, unsigned int power_state);
-
static unsigned int hda_set_power_state(struct hda_codec *codec,
unsigned int power_state);
-static int snd_hda_codec_dev_register(struct snd_device *device)
+/* also called from hda_bind.c */
+void snd_hda_codec_register(struct hda_codec *codec)
{
- struct hda_codec *codec = device->device_data;
- int err = device_add(&codec->dev);
+ if (codec->registered)
+ return;
+ if (device_is_registered(hda_codec_dev(codec))) {
+ snd_hda_register_beep_device(codec);
+ pm_runtime_enable(hda_codec_dev(codec));
+ /* it was powered up in snd_hda_codec_new(), now all done */
+ snd_hda_power_down(codec);
+ codec->registered = 1;
+ }
+}
- if (err < 0)
- return err;
- snd_hda_register_beep_device(codec);
+static int snd_hda_codec_dev_register(struct snd_device *device)
+{
+ snd_hda_codec_register(device->device_data);
return 0;
}
@@ -1411,20 +968,29 @@ static int snd_hda_codec_dev_disconnect(struct snd_device *device)
struct hda_codec *codec = device->device_data;
snd_hda_detach_beep_device(codec);
- device_del(&codec->dev);
return 0;
}
static int snd_hda_codec_dev_free(struct snd_device *device)
{
- snd_hda_codec_free(device->device_data);
+ struct hda_codec *codec = device->device_data;
+
+ codec->in_freeing = 1;
+ snd_hdac_device_unregister(&codec->core);
+ put_device(hda_codec_dev(codec));
return 0;
}
-/* just free the container */
static void snd_hda_codec_dev_release(struct device *dev)
{
- kfree(container_of(dev, struct hda_codec, dev));
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+
+ free_init_pincfgs(codec);
+ snd_hdac_device_exit(&codec->core);
+ snd_hda_sysfs_clear(codec);
+ kfree(codec->modelname);
+ kfree(codec->wcaps);
+ kfree(codec);
}
/**
@@ -1435,9 +1001,8 @@ static void snd_hda_codec_dev_release(struct device *dev)
*
* Returns 0 if successful, or a negative error code.
*/
-int snd_hda_codec_new(struct hda_bus *bus,
- unsigned int codec_addr,
- struct hda_codec **codecp)
+int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
+ unsigned int codec_addr, struct hda_codec **codecp)
{
struct hda_codec *codec;
char component[31];
@@ -1454,35 +1019,27 @@ int snd_hda_codec_new(struct hda_bus *bus,
if (snd_BUG_ON(codec_addr > HDA_MAX_CODEC_ADDRESS))
return -EINVAL;
- if (bus->caddr_tbl[codec_addr]) {
- dev_err(bus->card->dev,
- "address 0x%x is already occupied\n",
- codec_addr);
- return -EBUSY;
- }
-
codec = kzalloc(sizeof(*codec), GFP_KERNEL);
- if (codec == NULL) {
- dev_err(bus->card->dev, "can't allocate struct hda_codec\n");
+ if (!codec)
return -ENOMEM;
+
+ sprintf(component, "hdaudioC%dD%d", card->number, codec_addr);
+ err = snd_hdac_device_init(&codec->core, &bus->core, component,
+ codec_addr);
+ if (err < 0) {
+ kfree(codec);
+ return err;
}
- device_initialize(&codec->dev);
- codec->dev.parent = &bus->card->card_dev;
- codec->dev.class = sound_class;
- codec->dev.release = snd_hda_codec_dev_release;
- codec->dev.groups = snd_hda_dev_attr_groups;
- dev_set_name(&codec->dev, "hdaudioC%dD%d", bus->card->number,
- codec_addr);
- dev_set_drvdata(&codec->dev, codec); /* for sysfs */
+ codec->core.dev.release = snd_hda_codec_dev_release;
+ codec->core.type = HDA_DEV_LEGACY;
+ codec->core.exec_verb = codec_exec_verb;
codec->bus = bus;
+ codec->card = card;
codec->addr = codec_addr;
mutex_init(&codec->spdif_mutex);
mutex_init(&codec->control_mutex);
- mutex_init(&codec->hash_mutex);
- init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info));
- init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head));
snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32);
snd_array_init(&codec->nids, sizeof(struct hda_nid_item), 32);
snd_array_init(&codec->init_pins, sizeof(struct hda_pincfg), 16);
@@ -1492,19 +1049,14 @@ int snd_hda_codec_new(struct hda_bus *bus,
snd_array_init(&codec->jacktbl, sizeof(struct hda_jack_tbl), 16);
snd_array_init(&codec->verbs, sizeof(struct hda_verb *), 8);
INIT_LIST_HEAD(&codec->conn_list);
+ INIT_LIST_HEAD(&codec->pcm_list_head);
INIT_DELAYED_WORK(&codec->jackpoll_work, hda_jackpoll_work);
codec->depop_delay = -1;
codec->fixup_id = HDA_FIXUP_ID_NOT_SET;
#ifdef CONFIG_PM
- spin_lock_init(&codec->power_lock);
- INIT_DELAYED_WORK(&codec->power_work, hda_power_work);
- /* snd_hda_codec_new() marks the codec as power-up, and leave it as is.
- * the caller has to power down appropriatley after initialization
- * phase.
- */
- hda_keep_power_on(codec);
+ codec->power_jiffies = jiffies;
#endif
snd_hda_sysfs_init(codec);
@@ -1517,59 +1069,14 @@ int snd_hda_codec_new(struct hda_bus *bus,
}
}
- list_add_tail(&codec->list, &bus->codec_list);
- bus->num_codecs++;
-
- bus->caddr_tbl[codec_addr] = codec;
-
- codec->vendor_id = snd_hda_param_read(codec, AC_NODE_ROOT,
- AC_PAR_VENDOR_ID);
- if (codec->vendor_id == -1)
- /* read again, hopefully the access method was corrected
- * in the last read...
- */
- codec->vendor_id = snd_hda_param_read(codec, AC_NODE_ROOT,
- AC_PAR_VENDOR_ID);
- codec->subsystem_id = snd_hda_param_read(codec, AC_NODE_ROOT,
- AC_PAR_SUBSYSTEM_ID);
- codec->revision_id = snd_hda_param_read(codec, AC_NODE_ROOT,
- AC_PAR_REV_ID);
-
- setup_fg_nodes(codec);
- if (!codec->afg && !codec->mfg) {
- dev_err(bus->card->dev, "no AFG or MFG node found\n");
- err = -ENODEV;
- goto error;
- }
-
- fg = codec->afg ? codec->afg : codec->mfg;
+ fg = codec->core.afg ? codec->core.afg : codec->core.mfg;
err = read_widget_caps(codec, fg);
- if (err < 0) {
- dev_err(bus->card->dev, "cannot malloc\n");
+ if (err < 0)
goto error;
- }
err = read_pin_defaults(codec);
if (err < 0)
goto error;
- if (!codec->subsystem_id) {
- codec->subsystem_id =
- snd_hda_codec_read(codec, fg, 0,
- AC_VERB_GET_SUBSYSTEM_ID, 0);
- }
-
-#ifdef CONFIG_PM
- codec->d3_stop_clk = snd_hda_codec_get_supported_ps(codec, fg,
- AC_PWRST_CLKSTOP);
-#endif
- codec->epss = snd_hda_codec_get_supported_ps(codec, fg,
- AC_PWRST_EPSS);
-#ifdef CONFIG_PM
- if (!codec->d3_stop_clk || !codec->epss)
- bus->power_keep_link_on = 1;
-#endif
-
-
/* power-up all before initialization */
hda_set_power_state(codec, AC_PWRST_D0);
@@ -1577,11 +1084,11 @@ int snd_hda_codec_new(struct hda_bus *bus,
snd_hda_create_hwdep(codec);
- sprintf(component, "HDA:%08x,%08x,%08x", codec->vendor_id,
- codec->subsystem_id, codec->revision_id);
- snd_component_add(codec->bus->card, component);
+ sprintf(component, "HDA:%08x,%08x,%08x", codec->core.vendor_id,
+ codec->core.subsystem_id, codec->core.revision_id);
+ snd_component_add(card, component);
- err = snd_device_new(bus->card, SNDRV_DEV_CODEC, codec, &dev_ops);
+ err = snd_device_new(card, SNDRV_DEV_CODEC, codec, &dev_ops);
if (err < 0)
goto error;
@@ -1590,7 +1097,7 @@ int snd_hda_codec_new(struct hda_bus *bus,
return 0;
error:
- snd_hda_codec_free(codec);
+ put_device(hda_codec_dev(codec));
return err;
}
EXPORT_SYMBOL_GPL(snd_hda_codec_new);
@@ -1607,16 +1114,18 @@ int snd_hda_codec_update_widgets(struct hda_codec *codec)
hda_nid_t fg;
int err;
+ err = snd_hdac_refresh_widgets(&codec->core);
+ if (err < 0)
+ return err;
+
/* Assume the function group node does not change,
* only the widget nodes may change.
*/
kfree(codec->wcaps);
- fg = codec->afg ? codec->afg : codec->mfg;
+ fg = codec->core.afg ? codec->core.afg : codec->core.mfg;
err = read_widget_caps(codec, fg);
- if (err < 0) {
- codec_err(codec, "cannot malloc\n");
+ if (err < 0)
return err;
- }
snd_array_free(&codec->init_pins);
err = read_pin_defaults(codec);
@@ -1625,98 +1134,6 @@ int snd_hda_codec_update_widgets(struct hda_codec *codec)
}
EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets);
-
-#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
-/* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */
-static bool is_likely_hdmi_codec(struct hda_codec *codec)
-{
- hda_nid_t nid = codec->start_nid;
- int i;
-
- for (i = 0; i < codec->num_nodes; i++, nid++) {
- unsigned int wcaps = get_wcaps(codec, nid);
- switch (get_wcaps_type(wcaps)) {
- case AC_WID_AUD_IN:
- return false; /* HDMI parser supports only HDMI out */
- case AC_WID_AUD_OUT:
- if (!(wcaps & AC_WCAP_DIGITAL))
- return false;
- break;
- }
- }
- return true;
-}
-#else
-/* no HDMI codec parser support */
-#define is_likely_hdmi_codec(codec) false
-#endif /* CONFIG_SND_HDA_CODEC_HDMI */
-
-/**
- * snd_hda_codec_configure - (Re-)configure the HD-audio codec
- * @codec: the HDA codec
- *
- * Start parsing of the given codec tree and (re-)initialize the whole
- * patch instance.
- *
- * Returns 0 if successful or a negative error code.
- */
-int snd_hda_codec_configure(struct hda_codec *codec)
-{
- int (*patch)(struct hda_codec *) = NULL;
- int err;
-
- codec->preset = find_codec_preset(codec);
- if (!codec->vendor_name || !codec->chip_name) {
- err = get_codec_name(codec);
- if (err < 0)
- return err;
- }
-
- if (!is_generic_config(codec) && codec->preset)
- patch = codec->preset->patch;
- if (!patch) {
- unload_parser(codec); /* to be sure */
- if (is_likely_hdmi_codec(codec)) {
-#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
- patch = load_parser(codec, snd_hda_parse_hdmi_codec);
-#elif IS_BUILTIN(CONFIG_SND_HDA_CODEC_HDMI)
- patch = snd_hda_parse_hdmi_codec;
-#endif
- }
- if (!patch) {
-#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
- patch = load_parser(codec, snd_hda_parse_generic_codec);
-#elif IS_BUILTIN(CONFIG_SND_HDA_GENERIC)
- patch = snd_hda_parse_generic_codec;
-#endif
- }
- if (!patch) {
- codec_err(codec, "No codec parser is available\n");
- return -ENODEV;
- }
- }
-
- err = patch(codec);
- if (err < 0) {
- unload_parser(codec);
- return err;
- }
-
- if (codec->patch_ops.unsol_event) {
- err = init_unsol_queue(codec->bus);
- if (err < 0)
- return err;
- }
-
- /* audio codec should override the mixer name */
- if (codec->afg || !*codec->bus->card->mixername)
- snprintf(codec->bus->card->mixername,
- sizeof(codec->bus->card->mixername),
- "%s %s", codec->vendor_name, codec->chip_name);
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_codec_configure);
-
/* update the stream-id if changed */
static void update_pcm_stream_id(struct hda_codec *codec,
struct hda_cvt_setup *p, hda_nid_t nid,
@@ -1782,6 +1199,8 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
if (!p)
return;
+ if (codec->patch_ops.stream_pm)
+ codec->patch_ops.stream_pm(codec, nid, true);
if (codec->pcm_format_first)
update_pcm_format(codec, p, nid, format);
update_pcm_stream_id(codec, p, nid, stream_tag, channel_id);
@@ -1793,7 +1212,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
/* make other inactive cvts with the same stream-tag dirty */
type = get_wcaps_type(get_wcaps(codec, nid));
- list_for_each_entry(c, &codec->bus->codec_list, list) {
+ list_for_each_codec(c, codec->bus) {
for (i = 0; i < c->cvt_setups.used; i++) {
p = snd_array_elem(&c->cvt_setups, i);
if (!p->active && p->stream_tag == stream_tag &&
@@ -1850,6 +1269,8 @@ static void really_cleanup_stream(struct hda_codec *codec,
);
memset(q, 0, sizeof(*q));
q->nid = nid;
+ if (codec->patch_ops.stream_pm)
+ codec->patch_ops.stream_pm(codec, nid, false);
}
/* clean up the all conflicting obsolete streams */
@@ -1858,7 +1279,7 @@ static void purify_inactive_streams(struct hda_codec *codec)
struct hda_codec *c;
int i;
- list_for_each_entry(c, &codec->bus->codec_list, list) {
+ list_for_each_codec(c, codec->bus) {
for (i = 0; i < c->cvt_setups.used; i++) {
struct hda_cvt_setup *p;
p = snd_array_elem(&c->cvt_setups, i);
@@ -1886,127 +1307,6 @@ static void hda_cleanup_all_streams(struct hda_codec *codec)
* amp access functions
*/
-/* FIXME: more better hash key? */
-#define HDA_HASH_KEY(nid, dir, idx) (u32)((nid) + ((idx) << 16) + ((dir) << 24))
-#define HDA_HASH_PINCAP_KEY(nid) (u32)((nid) + (0x02 << 24))
-#define HDA_HASH_PARPCM_KEY(nid) (u32)((nid) + (0x03 << 24))
-#define HDA_HASH_PARSTR_KEY(nid) (u32)((nid) + (0x04 << 24))
-#define INFO_AMP_CAPS (1<<0)
-#define INFO_AMP_VOL(ch) (1 << (1 + (ch)))
-
-/* initialize the hash table */
-static void init_hda_cache(struct hda_cache_rec *cache,
- unsigned int record_size)
-{
- memset(cache, 0, sizeof(*cache));
- memset(cache->hash, 0xff, sizeof(cache->hash));
- snd_array_init(&cache->buf, record_size, 64);
-}
-
-static void free_hda_cache(struct hda_cache_rec *cache)
-{
- snd_array_free(&cache->buf);
-}
-
-/* query the hash. allocate an entry if not found. */
-static struct hda_cache_head *get_hash(struct hda_cache_rec *cache, u32 key)
-{
- u16 idx = key % (u16)ARRAY_SIZE(cache->hash);
- u16 cur = cache->hash[idx];
- struct hda_cache_head *info;
-
- while (cur != 0xffff) {
- info = snd_array_elem(&cache->buf, cur);
- if (info->key == key)
- return info;
- cur = info->next;
- }
- return NULL;
-}
-
-/* query the hash. allocate an entry if not found. */
-static struct hda_cache_head *get_alloc_hash(struct hda_cache_rec *cache,
- u32 key)
-{
- struct hda_cache_head *info = get_hash(cache, key);
- if (!info) {
- u16 idx, cur;
- /* add a new hash entry */
- info = snd_array_new(&cache->buf);
- if (!info)
- return NULL;
- cur = snd_array_index(&cache->buf, info);
- info->key = key;
- info->val = 0;
- info->dirty = 0;
- idx = key % (u16)ARRAY_SIZE(cache->hash);
- info->next = cache->hash[idx];
- cache->hash[idx] = cur;
- }
- return info;
-}
-
-/* query and allocate an amp hash entry */
-static inline struct hda_amp_info *
-get_alloc_amp_hash(struct hda_codec *codec, u32 key)
-{
- return (struct hda_amp_info *)get_alloc_hash(&codec->amp_cache, key);
-}
-
-/* overwrite the value with the key in the caps hash */
-static int write_caps_hash(struct hda_codec *codec, u32 key, unsigned int val)
-{
- struct hda_amp_info *info;
-
- mutex_lock(&codec->hash_mutex);
- info = get_alloc_amp_hash(codec, key);
- if (!info) {
- mutex_unlock(&codec->hash_mutex);
- return -EINVAL;
- }
- info->amp_caps = val;
- info->head.val |= INFO_AMP_CAPS;
- mutex_unlock(&codec->hash_mutex);
- return 0;
-}
-
-/* query the value from the caps hash; if not found, fetch the current
- * value from the given function and store in the hash
- */
-static unsigned int
-query_caps_hash(struct hda_codec *codec, hda_nid_t nid, int dir, u32 key,
- unsigned int (*func)(struct hda_codec *, hda_nid_t, int))
-{
- struct hda_amp_info *info;
- unsigned int val;
-
- mutex_lock(&codec->hash_mutex);
- info = get_alloc_amp_hash(codec, key);
- if (!info) {
- mutex_unlock(&codec->hash_mutex);
- return 0;
- }
- if (!(info->head.val & INFO_AMP_CAPS)) {
- mutex_unlock(&codec->hash_mutex); /* for reentrance */
- val = func(codec, nid, dir);
- write_caps_hash(codec, key, val);
- } else {
- val = info->amp_caps;
- mutex_unlock(&codec->hash_mutex);
- }
- return val;
-}
-
-static unsigned int read_amp_cap(struct hda_codec *codec, hda_nid_t nid,
- int direction)
-{
- if (!(get_wcaps(codec, nid) & AC_WCAP_AMP_OVRD))
- nid = codec->afg;
- return snd_hda_param_read(codec, nid,
- direction == HDA_OUTPUT ?
- AC_PAR_AMP_OUT_CAP : AC_PAR_AMP_IN_CAP);
-}
-
/**
* query_amp_caps - query AMP capabilities
* @codec: the HD-auio codec
@@ -2021,9 +1321,11 @@ static unsigned int read_amp_cap(struct hda_codec *codec, hda_nid_t nid,
*/
u32 query_amp_caps(struct hda_codec *codec, hda_nid_t nid, int direction)
{
- return query_caps_hash(codec, nid, direction,
- HDA_HASH_KEY(nid, direction, 0),
- read_amp_cap);
+ if (!(get_wcaps(codec, nid) & AC_WCAP_AMP_OVRD))
+ nid = codec->core.afg;
+ return snd_hda_param_read(codec, nid,
+ direction == HDA_OUTPUT ?
+ AC_PAR_AMP_OUT_CAP : AC_PAR_AMP_IN_CAP);
}
EXPORT_SYMBOL_GPL(query_amp_caps);
@@ -2064,183 +1366,14 @@ EXPORT_SYMBOL_GPL(snd_hda_check_amp_caps);
int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
unsigned int caps)
{
- return write_caps_hash(codec, HDA_HASH_KEY(nid, dir, 0), caps);
-}
-EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps);
-
-static unsigned int read_pin_cap(struct hda_codec *codec, hda_nid_t nid,
- int dir)
-{
- return snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP);
-}
-
-/**
- * snd_hda_query_pin_caps - Query PIN capabilities
- * @codec: the HD-auio codec
- * @nid: the NID to query
- *
- * Query PIN capabilities for the given widget.
- * Returns the obtained capability bits.
- *
- * When cap bits have been already read, this doesn't read again but
- * returns the cached value.
- */
-u32 snd_hda_query_pin_caps(struct hda_codec *codec, hda_nid_t nid)
-{
- return query_caps_hash(codec, nid, 0, HDA_HASH_PINCAP_KEY(nid),
- read_pin_cap);
-}
-EXPORT_SYMBOL_GPL(snd_hda_query_pin_caps);
-
-/**
- * snd_hda_override_pin_caps - Override the pin capabilities
- * @codec: the CODEC
- * @nid: the NID to override
- * @caps: the capability bits to set
- *
- * Override the cached PIN capabilitiy bits value by the given one.
- *
- * Returns zero if successful or a negative error code.
- */
-int snd_hda_override_pin_caps(struct hda_codec *codec, hda_nid_t nid,
- unsigned int caps)
-{
- return write_caps_hash(codec, HDA_HASH_PINCAP_KEY(nid), caps);
-}
-EXPORT_SYMBOL_GPL(snd_hda_override_pin_caps);
-
-/* read or sync the hash value with the current value;
- * call within hash_mutex
- */
-static struct hda_amp_info *
-update_amp_hash(struct hda_codec *codec, hda_nid_t nid, int ch,
- int direction, int index, bool init_only)
-{
- struct hda_amp_info *info;
- unsigned int parm, val = 0;
- bool val_read = false;
-
- retry:
- info = get_alloc_amp_hash(codec, HDA_HASH_KEY(nid, direction, index));
- if (!info)
- return NULL;
- if (!(info->head.val & INFO_AMP_VOL(ch))) {
- if (!val_read) {
- mutex_unlock(&codec->hash_mutex);
- parm = ch ? AC_AMP_GET_RIGHT : AC_AMP_GET_LEFT;
- parm |= direction == HDA_OUTPUT ?
- AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
- parm |= index;
- val = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_AMP_GAIN_MUTE, parm);
- val &= 0xff;
- val_read = true;
- mutex_lock(&codec->hash_mutex);
- goto retry;
- }
- info->vol[ch] = val;
- info->head.val |= INFO_AMP_VOL(ch);
- } else if (init_only)
- return NULL;
- return info;
-}
-
-/*
- * write the current volume in info to the h/w
- */
-static void put_vol_mute(struct hda_codec *codec, unsigned int amp_caps,
- hda_nid_t nid, int ch, int direction, int index,
- int val)
-{
- u32 parm;
-
- parm = ch ? AC_AMP_SET_RIGHT : AC_AMP_SET_LEFT;
- parm |= direction == HDA_OUTPUT ? AC_AMP_SET_OUTPUT : AC_AMP_SET_INPUT;
- parm |= index << AC_AMP_SET_INDEX_SHIFT;
- if ((val & HDA_AMP_MUTE) && !(amp_caps & AC_AMPCAP_MUTE) &&
- (amp_caps & AC_AMPCAP_MIN_MUTE))
- ; /* set the zero value as a fake mute */
- else
- parm |= val;
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, parm);
-}
-
-/**
- * snd_hda_codec_amp_read - Read AMP value
- * @codec: HD-audio codec
- * @nid: NID to read the AMP value
- * @ch: channel (left=0 or right=1)
- * @direction: #HDA_INPUT or #HDA_OUTPUT
- * @index: the index value (only for input direction)
- *
- * Read AMP value. The volume is between 0 to 0x7f, 0x80 = mute bit.
- */
-int snd_hda_codec_amp_read(struct hda_codec *codec, hda_nid_t nid, int ch,
- int direction, int index)
-{
- struct hda_amp_info *info;
- unsigned int val = 0;
-
- mutex_lock(&codec->hash_mutex);
- info = update_amp_hash(codec, nid, ch, direction, index, false);
- if (info)
- val = info->vol[ch];
- mutex_unlock(&codec->hash_mutex);
- return val;
-}
-EXPORT_SYMBOL_GPL(snd_hda_codec_amp_read);
-
-static int codec_amp_update(struct hda_codec *codec, hda_nid_t nid, int ch,
- int direction, int idx, int mask, int val,
- bool init_only)
-{
- struct hda_amp_info *info;
- unsigned int caps;
- unsigned int cache_only;
-
- if (snd_BUG_ON(mask & ~0xff))
- mask &= 0xff;
- val &= mask;
-
- mutex_lock(&codec->hash_mutex);
- info = update_amp_hash(codec, nid, ch, direction, idx, init_only);
- if (!info) {
- mutex_unlock(&codec->hash_mutex);
- return 0;
- }
- val |= info->vol[ch] & ~mask;
- if (info->vol[ch] == val) {
- mutex_unlock(&codec->hash_mutex);
- return 0;
- }
- info->vol[ch] = val;
- cache_only = info->head.dirty = codec->cached_write;
- caps = info->amp_caps;
- mutex_unlock(&codec->hash_mutex);
- if (!cache_only)
- put_vol_mute(codec, caps, nid, ch, direction, idx, val);
- return 1;
-}
+ unsigned int parm;
-/**
- * snd_hda_codec_amp_update - update the AMP value
- * @codec: HD-audio codec
- * @nid: NID to read the AMP value
- * @ch: channel (left=0 or right=1)
- * @direction: #HDA_INPUT or #HDA_OUTPUT
- * @idx: the index value (only for input direction)
- * @mask: bit mask to set
- * @val: the bits value to set
- *
- * Update the AMP value with a bit mask.
- * Returns 0 if the value is unchanged, 1 if changed.
- */
-int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, int ch,
- int direction, int idx, int mask, int val)
-{
- return codec_amp_update(codec, nid, ch, direction, idx, mask, val, false);
+ snd_hda_override_wcaps(codec, nid,
+ get_wcaps(codec, nid) | AC_WCAP_AMP_OVRD);
+ parm = dir == HDA_OUTPUT ? AC_PAR_AMP_OUT_CAP : AC_PAR_AMP_IN_CAP;
+ return snd_hdac_override_parm(&codec->core, nid, parm, caps);
}
-EXPORT_SYMBOL_GPL(snd_hda_codec_amp_update);
+EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps);
/**
* snd_hda_codec_amp_stereo - update the AMP stereo values
@@ -2285,7 +1418,16 @@ EXPORT_SYMBOL_GPL(snd_hda_codec_amp_stereo);
int snd_hda_codec_amp_init(struct hda_codec *codec, hda_nid_t nid, int ch,
int dir, int idx, int mask, int val)
{
- return codec_amp_update(codec, nid, ch, dir, idx, mask, val, true);
+ int orig;
+
+ if (!codec->core.regmap)
+ return -EINVAL;
+ regcache_cache_only(codec->core.regmap, true);
+ orig = snd_hda_codec_amp_read(codec, nid, ch, dir, idx);
+ regcache_cache_only(codec->core.regmap, false);
+ if (orig >= 0)
+ return 0;
+ return snd_hda_codec_amp_update(codec, nid, ch, dir, idx, mask, val);
}
EXPORT_SYMBOL_GPL(snd_hda_codec_amp_init);
@@ -2314,49 +1456,6 @@ int snd_hda_codec_amp_init_stereo(struct hda_codec *codec, hda_nid_t nid,
}
EXPORT_SYMBOL_GPL(snd_hda_codec_amp_init_stereo);
-/**
- * snd_hda_codec_resume_amp - Resume all AMP commands from the cache
- * @codec: HD-audio codec
- *
- * Resume the all amp commands from the cache.
- */
-void snd_hda_codec_resume_amp(struct hda_codec *codec)
-{
- int i;
-
- mutex_lock(&codec->hash_mutex);
- codec->cached_write = 0;
- for (i = 0; i < codec->amp_cache.buf.used; i++) {
- struct hda_amp_info *buffer;
- u32 key;
- hda_nid_t nid;
- unsigned int idx, dir, ch;
- struct hda_amp_info info;
-
- buffer = snd_array_elem(&codec->amp_cache.buf, i);
- if (!buffer->head.dirty)
- continue;
- buffer->head.dirty = 0;
- info = *buffer;
- key = info.head.key;
- if (!key)
- continue;
- nid = key & 0xff;
- idx = (key >> 16) & 0xff;
- dir = (key >> 24) & 0xff;
- for (ch = 0; ch < 2; ch++) {
- if (!(info.head.val & INFO_AMP_VOL(ch)))
- continue;
- mutex_unlock(&codec->hash_mutex);
- put_vol_mute(codec, info.amp_caps, nid, ch, dir, idx,
- info.vol[ch]);
- mutex_lock(&codec->hash_mutex);
- }
- }
- mutex_unlock(&codec->hash_mutex);
-}
-EXPORT_SYMBOL_GPL(snd_hda_codec_resume_amp);
-
static u32 get_amp_max_value(struct hda_codec *codec, hda_nid_t nid, int dir,
unsigned int ofs)
{
@@ -2478,14 +1577,12 @@ int snd_hda_mixer_amp_volume_put(struct snd_kcontrol *kcontrol,
long *valp = ucontrol->value.integer.value;
int change = 0;
- snd_hda_power_up(codec);
if (chs & 1) {
change = update_amp_value(codec, nid, 0, dir, idx, ofs, *valp);
valp++;
}
if (chs & 2)
change |= update_amp_value(codec, nid, 1, dir, idx, ofs, *valp);
- snd_hda_power_down(codec);
return change;
}
EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_volume_put);
@@ -2572,7 +1669,7 @@ find_mixer_ctl(struct hda_codec *codec, const char *name, int dev, int idx)
if (snd_BUG_ON(strlen(name) >= sizeof(id.name)))
return NULL;
strcpy(id.name, name);
- return snd_ctl_find_id(codec->bus->card, &id);
+ return snd_ctl_find_id(codec->card, &id);
}
/**
@@ -2636,7 +1733,7 @@ int snd_hda_ctl_add(struct hda_codec *codec, hda_nid_t nid,
nid = kctl->id.subdevice & 0xffff;
if (kctl->id.subdevice & (HDA_SUBDEV_NID_FLAG|HDA_SUBDEV_AMP_FLAG))
kctl->id.subdevice = 0;
- err = snd_ctl_add(codec->bus->card, kctl);
+ err = snd_ctl_add(codec->card, kctl);
if (err < 0)
return err;
item = snd_array_new(&codec->mixers);
@@ -2689,7 +1786,7 @@ void snd_hda_ctls_clear(struct hda_codec *codec)
int i;
struct hda_nid_item *items = codec->mixers.list;
for (i = 0; i < codec->mixers.used; i++)
- snd_ctl_remove(codec->bus->card, items[i].kctl);
+ snd_ctl_remove(codec->card, items[i].kctl);
snd_array_free(&codec->mixers);
snd_array_free(&codec->nids);
}
@@ -2712,10 +1809,9 @@ int snd_hda_lock_devices(struct hda_bus *bus)
if (!list_empty(&card->ctl_files))
goto err_clear;
- list_for_each_entry(codec, &bus->codec_list, list) {
- int pcm;
- for (pcm = 0; pcm < codec->num_pcms; pcm++) {
- struct hda_pcm *cpcm = &codec->pcm_info[pcm];
+ list_for_each_codec(codec, bus) {
+ struct hda_pcm *cpcm;
+ list_for_each_entry(cpcm, &codec->pcm_list_head, list) {
if (!cpcm->pcm)
continue;
if (cpcm->pcm->streams[0].substream_opened ||
@@ -2742,7 +1838,6 @@ void snd_hda_unlock_devices(struct hda_bus *bus)
{
struct snd_card *card = bus->card;
- card = bus->card;
spin_lock(&card->files_lock);
card->shutdown = 0;
spin_unlock(&card->files_lock);
@@ -2762,51 +1857,12 @@ EXPORT_SYMBOL_GPL(snd_hda_unlock_devices);
int snd_hda_codec_reset(struct hda_codec *codec)
{
struct hda_bus *bus = codec->bus;
- struct snd_card *card = bus->card;
- int i;
if (snd_hda_lock_devices(bus) < 0)
return -EBUSY;
/* OK, let it free */
- cancel_delayed_work_sync(&codec->jackpoll_work);
-#ifdef CONFIG_PM
- cancel_delayed_work_sync(&codec->power_work);
- flush_workqueue(bus->workq);
-#endif
- snd_hda_ctls_clear(codec);
- /* release PCMs */
- for (i = 0; i < codec->num_pcms; i++) {
- if (codec->pcm_info[i].pcm) {
- snd_device_free(card, codec->pcm_info[i].pcm);
- clear_bit(codec->pcm_info[i].device,
- bus->pcm_dev_bits);
- }
- }
- snd_hda_detach_beep_device(codec);
- if (codec->patch_ops.free)
- codec->patch_ops.free(codec);
- memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
- snd_hda_jack_tbl_clear(codec);
- codec->proc_widget_hook = NULL;
- codec->spec = NULL;
- free_hda_cache(&codec->amp_cache);
- free_hda_cache(&codec->cmd_cache);
- init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info));
- init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head));
- /* free only driver_pins so that init_pins + user_pins are restored */
- snd_array_free(&codec->driver_pins);
- snd_array_free(&codec->cvt_setups);
- snd_array_free(&codec->spdif_out);
- snd_array_free(&codec->verbs);
- codec->num_pcms = 0;
- codec->pcm_info = NULL;
- codec->preset = NULL;
- codec->slave_dig_outs = NULL;
- codec->spdif_status_reset = 0;
- unload_parser(codec);
- module_put(codec->owner);
- codec->owner = NULL;
+ snd_hdac_device_unregister(&codec->core);
/* allow device access again */
snd_hda_unlock_devices(bus);
@@ -3027,6 +2083,16 @@ static struct snd_kcontrol_new vmaster_mute_mode = {
.put = vmaster_mute_mode_put,
};
+/* meta hook to call each driver's vmaster hook */
+static void vmaster_hook(void *private_data, int enabled)
+{
+ struct hda_vmaster_mute_hook *hook = private_data;
+
+ if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
+ enabled = hook->mute_mode;
+ hook->hook(hook->codec, enabled);
+}
+
/**
* snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED
* @codec: the HDA codec
@@ -3045,9 +2111,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
if (!hook->hook || !hook->sw_kctl)
return 0;
- snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
hook->codec = codec;
hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
+ snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
if (!expose_enum_ctl)
return 0;
kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
@@ -3073,14 +2139,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
*/
if (hook->codec->bus->shutdown)
return;
- switch (hook->mute_mode) {
- case HDA_VMUTE_FOLLOW_MASTER:
- snd_ctl_sync_vmaster_hook(hook->sw_kctl);
- break;
- default:
- hook->hook(hook->codec, hook->mute_mode);
- break;
- }
+ snd_ctl_sync_vmaster_hook(hook->sw_kctl);
}
EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook);
@@ -3153,7 +2212,6 @@ int snd_hda_mixer_amp_switch_put(struct snd_kcontrol *kcontrol,
long *valp = ucontrol->value.integer.value;
int change = 0;
- snd_hda_power_up(codec);
if (chs & 1) {
change = snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
HDA_AMP_MUTE,
@@ -3165,7 +2223,6 @@ int snd_hda_mixer_amp_switch_put(struct snd_kcontrol *kcontrol,
HDA_AMP_MUTE,
*valp ? 0 : HDA_AMP_MUTE);
hda_call_check_power_status(codec, nid);
- snd_hda_power_down(codec);
return change;
}
EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_switch_put);
@@ -3466,25 +2523,35 @@ static unsigned int convert_to_spdif_status(unsigned short val)
/* set digital convert verbs both for the given NID and its slaves */
static void set_dig_out(struct hda_codec *codec, hda_nid_t nid,
- int verb, int val)
+ int mask, int val)
{
const hda_nid_t *d;
- snd_hda_codec_write_cache(codec, nid, 0, verb, val);
+ snd_hdac_regmap_update(&codec->core, nid, AC_VERB_SET_DIGI_CONVERT_1,
+ mask, val);
d = codec->slave_dig_outs;
if (!d)
return;
for (; *d; d++)
- snd_hda_codec_write_cache(codec, *d, 0, verb, val);
+ snd_hdac_regmap_update(&codec->core, *d,
+ AC_VERB_SET_DIGI_CONVERT_1, mask, val);
}
static inline void set_dig_out_convert(struct hda_codec *codec, hda_nid_t nid,
int dig1, int dig2)
{
- if (dig1 != -1)
- set_dig_out(codec, nid, AC_VERB_SET_DIGI_CONVERT_1, dig1);
- if (dig2 != -1)
- set_dig_out(codec, nid, AC_VERB_SET_DIGI_CONVERT_2, dig2);
+ unsigned int mask = 0;
+ unsigned int val = 0;
+
+ if (dig1 != -1) {
+ mask |= 0xff;
+ val = dig1;
+ }
+ if (dig2 != -1) {
+ mask |= 0xff00;
+ val |= dig2 << 8;
+ }
+ set_dig_out(codec, nid, mask, val);
}
static int snd_hda_spdif_default_put(struct snd_kcontrol *kcontrol,
@@ -3617,6 +2684,7 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
struct snd_kcontrol *kctl;
struct snd_kcontrol_new *dig_mix;
int idx = 0;
+ int val = 0;
const int spdif_index = 16;
struct hda_spdif_out *spdif;
struct hda_bus *bus = codec->bus;
@@ -3657,8 +2725,9 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
return err;
}
spdif->nid = cvt_nid;
- spdif->ctls = snd_hda_codec_read(codec, cvt_nid, 0,
- AC_VERB_GET_DIGI_CONVERT_1, 0);
+ snd_hdac_regmap_read(&codec->core, cvt_nid,
+ AC_VERB_GET_DIGI_CONVERT_1, &val);
+ spdif->ctls = val;
spdif->status = convert_to_spdif_status(spdif->ctls);
return 0;
}
@@ -3802,8 +2871,8 @@ static int snd_hda_spdif_in_switch_put(struct snd_kcontrol *kcontrol,
change = codec->spdif_in_enable != val;
if (change) {
codec->spdif_in_enable = val;
- snd_hda_codec_write_cache(codec, nid, 0,
- AC_VERB_SET_DIGI_CONVERT_1, val);
+ snd_hdac_regmap_write(&codec->core, nid,
+ AC_VERB_SET_DIGI_CONVERT_1, val);
}
mutex_unlock(&codec->spdif_mutex);
return change;
@@ -3814,10 +2883,11 @@ static int snd_hda_spdif_in_status_get(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
hda_nid_t nid = kcontrol->private_value;
- unsigned short val;
+ unsigned int val;
unsigned int sbits;
- val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_DIGI_CONVERT_1, 0);
+ snd_hdac_regmap_read(&codec->core, nid,
+ AC_VERB_GET_DIGI_CONVERT_1, &val);
sbits = convert_to_spdif_status(val);
ucontrol->value.iec958.status[0] = sbits;
ucontrol->value.iec958.status[1] = sbits >> 8;
@@ -3883,153 +2953,6 @@ int snd_hda_create_spdif_in_ctls(struct hda_codec *codec, hda_nid_t nid)
}
EXPORT_SYMBOL_GPL(snd_hda_create_spdif_in_ctls);
-/*
- * command cache
- */
-
-/* build a 31bit cache key with the widget id and the command parameter */
-#define build_cmd_cache_key(nid, verb) ((verb << 8) | nid)
-#define get_cmd_cache_nid(key) ((key) & 0xff)
-#define get_cmd_cache_cmd(key) (((key) >> 8) & 0xffff)
-
-/**
- * snd_hda_codec_write_cache - send a single command with caching
- * @codec: the HDA codec
- * @nid: NID to send the command
- * @flags: optional bit flags
- * @verb: the verb to send
- * @parm: the parameter for the verb
- *
- * Send a single command without waiting for response.
- *
- * Returns 0 if successful, or a negative error code.
- */
-int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
- int flags, unsigned int verb, unsigned int parm)
-{
- int err;
- struct hda_cache_head *c;
- u32 key;
- unsigned int cache_only;
-
- cache_only = codec->cached_write;
- if (!cache_only) {
- err = snd_hda_codec_write(codec, nid, flags, verb, parm);
- if (err < 0)
- return err;
- }
-
- /* parm may contain the verb stuff for get/set amp */
- verb = verb | (parm >> 8);
- parm &= 0xff;
- key = build_cmd_cache_key(nid, verb);
- mutex_lock(&codec->bus->cmd_mutex);
- c = get_alloc_hash(&codec->cmd_cache, key);
- if (c) {
- c->val = parm;
- c->dirty = cache_only;
- }
- mutex_unlock(&codec->bus->cmd_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_codec_write_cache);
-
-/**
- * snd_hda_codec_update_cache - check cache and write the cmd only when needed
- * @codec: the HDA codec
- * @nid: NID to send the command
- * @flags: optional bit flags
- * @verb: the verb to send
- * @parm: the parameter for the verb
- *
- * This function works like snd_hda_codec_write_cache(), but it doesn't send
- * command if the parameter is already identical with the cached value.
- * If not, it sends the command and refreshes the cache.
- *
- * Returns 0 if successful, or a negative error code.
- */
-int snd_hda_codec_update_cache(struct hda_codec *codec, hda_nid_t nid,
- int flags, unsigned int verb, unsigned int parm)
-{
- struct hda_cache_head *c;
- u32 key;
-
- /* parm may contain the verb stuff for get/set amp */
- verb = verb | (parm >> 8);
- parm &= 0xff;
- key = build_cmd_cache_key(nid, verb);
- mutex_lock(&codec->bus->cmd_mutex);
- c = get_hash(&codec->cmd_cache, key);
- if (c && c->val == parm) {
- mutex_unlock(&codec->bus->cmd_mutex);
- return 0;
- }
- mutex_unlock(&codec->bus->cmd_mutex);
- return snd_hda_codec_write_cache(codec, nid, flags, verb, parm);
-}
-EXPORT_SYMBOL_GPL(snd_hda_codec_update_cache);
-
-/**
- * snd_hda_codec_resume_cache - Resume the all commands from the cache
- * @codec: HD-audio codec
- *
- * Execute all verbs recorded in the command caches to resume.
- */
-void snd_hda_codec_resume_cache(struct hda_codec *codec)
-{
- int i;
-
- mutex_lock(&codec->hash_mutex);
- codec->cached_write = 0;
- for (i = 0; i < codec->cmd_cache.buf.used; i++) {
- struct hda_cache_head *buffer;
- u32 key;
-
- buffer = snd_array_elem(&codec->cmd_cache.buf, i);
- key = buffer->key;
- if (!key)
- continue;
- if (!buffer->dirty)
- continue;
- buffer->dirty = 0;
- mutex_unlock(&codec->hash_mutex);
- snd_hda_codec_write(codec, get_cmd_cache_nid(key), 0,
- get_cmd_cache_cmd(key), buffer->val);
- mutex_lock(&codec->hash_mutex);
- }
- mutex_unlock(&codec->hash_mutex);
-}
-EXPORT_SYMBOL_GPL(snd_hda_codec_resume_cache);
-
-/**
- * snd_hda_sequence_write_cache - sequence writes with caching
- * @codec: the HDA codec
- * @seq: VERB array to send
- *
- * Send the commands sequentially from the given array.
- * Thte commands are recorded on cache for power-save and resume.
- * The array must be terminated with NID=0.
- */
-void snd_hda_sequence_write_cache(struct hda_codec *codec,
- const struct hda_verb *seq)
-{
- for (; seq->nid; seq++)
- snd_hda_codec_write_cache(codec, seq->nid, 0, seq->verb,
- seq->param);
-}
-EXPORT_SYMBOL_GPL(snd_hda_sequence_write_cache);
-
-/**
- * snd_hda_codec_flush_cache - Execute all pending (cached) amps / verbs
- * @codec: HD-audio codec
- */
-void snd_hda_codec_flush_cache(struct hda_codec *codec)
-{
- snd_hda_codec_resume_amp(codec);
- snd_hda_codec_resume_cache(codec);
-}
-EXPORT_SYMBOL_GPL(snd_hda_codec_flush_cache);
-
/**
* snd_hda_codec_set_power_to_all - Set the power state to all widgets
* @codec: the HDA codec
@@ -4043,10 +2966,9 @@ EXPORT_SYMBOL_GPL(snd_hda_codec_flush_cache);
void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state)
{
- hda_nid_t nid = codec->start_nid;
- int i;
+ hda_nid_t nid;
- for (i = 0; i < codec->num_nodes; i++, nid++) {
+ for_each_hda_codec_node(nid, codec) {
unsigned int wcaps = get_wcaps(codec, nid);
unsigned int state = power_state;
if (!(wcaps & AC_WCAP_POWER))
@@ -4063,22 +2985,6 @@ void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
EXPORT_SYMBOL_GPL(snd_hda_codec_set_power_to_all);
/*
- * supported power states check
- */
-static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, hda_nid_t fg,
- unsigned int power_state)
-{
- int sup = snd_hda_param_read(codec, fg, AC_PAR_POWER_STATE);
-
- if (sup == -1)
- return false;
- if (sup & power_state)
- return true;
- else
- return false;
-}
-
-/*
* wait until the state is reached, returns the current state
*/
static unsigned int hda_sync_power_state(struct hda_codec *codec,
@@ -4117,7 +3023,7 @@ unsigned int snd_hda_codec_eapd_power_filter(struct hda_codec *codec,
hda_nid_t nid,
unsigned int power_state)
{
- if (nid == codec->afg || nid == codec->mfg)
+ if (nid == codec->core.afg || nid == codec->core.mfg)
return power_state;
if (power_state == AC_PWRST_D3 &&
get_wcaps_type(get_wcaps(codec, nid)) == AC_WID_PIN &&
@@ -4137,7 +3043,7 @@ EXPORT_SYMBOL_GPL(snd_hda_codec_eapd_power_filter);
static unsigned int hda_set_power_state(struct hda_codec *codec,
unsigned int power_state)
{
- hda_nid_t fg = codec->afg ? codec->afg : codec->mfg;
+ hda_nid_t fg = codec->core.afg ? codec->core.afg : codec->core.mfg;
int count;
unsigned int state;
int flags = 0;
@@ -4145,7 +3051,7 @@ static unsigned int hda_set_power_state(struct hda_codec *codec,
/* this delay seems necessary to avoid click noise at power-down */
if (power_state == AC_PWRST_D3) {
if (codec->depop_delay < 0)
- msleep(codec->epss ? 10 : 100);
+ msleep(codec_has_epss(codec) ? 10 : 100);
else if (codec->depop_delay > 0)
msleep(codec->depop_delay);
flags = HDA_RW_NO_RESPONSE_FALLBACK;
@@ -4179,14 +3085,13 @@ static unsigned int hda_set_power_state(struct hda_codec *codec,
*/
static void sync_power_up_states(struct hda_codec *codec)
{
- hda_nid_t nid = codec->start_nid;
- int i;
+ hda_nid_t nid;
/* don't care if no filter is used */
if (!codec->power_filter)
return;
- for (i = 0; i < codec->num_nodes; i++, nid++) {
+ for_each_hda_codec_node(nid, codec) {
unsigned int wcaps = get_wcaps(codec, nid);
unsigned int target;
if (!(wcaps & AC_WCAP_POWER))
@@ -4212,63 +3117,54 @@ static inline void hda_exec_init_verbs(struct hda_codec *codec) {}
#endif
#ifdef CONFIG_PM
+/* update the power on/off account with the current jiffies */
+static void update_power_acct(struct hda_codec *codec, bool on)
+{
+ unsigned long delta = jiffies - codec->power_jiffies;
+
+ if (on)
+ codec->power_on_acct += delta;
+ else
+ codec->power_off_acct += delta;
+ codec->power_jiffies += delta;
+}
+
+void snd_hda_update_power_acct(struct hda_codec *codec)
+{
+ update_power_acct(codec, hda_codec_is_power_on(codec));
+}
+
/*
* call suspend and power-down; used both from PM and power-save
* this function returns the power state in the end
*/
-static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq)
+static unsigned int hda_call_codec_suspend(struct hda_codec *codec)
{
unsigned int state;
- codec->in_pm = 1;
+ atomic_inc(&codec->core.in_pm);
if (codec->patch_ops.suspend)
codec->patch_ops.suspend(codec);
hda_cleanup_all_streams(codec);
state = hda_set_power_state(codec, AC_PWRST_D3);
- /* Cancel delayed work if we aren't currently running from it. */
- if (!in_wq)
- cancel_delayed_work_sync(&codec->power_work);
- spin_lock(&codec->power_lock);
- snd_hda_update_power_acct(codec);
- trace_hda_power_down(codec);
- codec->power_on = 0;
- codec->power_transition = 0;
- codec->power_jiffies = jiffies;
- spin_unlock(&codec->power_lock);
- codec->in_pm = 0;
+ update_power_acct(codec, true);
+ atomic_dec(&codec->core.in_pm);
return state;
}
-/* mark all entries of cmd and amp caches dirty */
-static void hda_mark_cmd_cache_dirty(struct hda_codec *codec)
-{
- int i;
- for (i = 0; i < codec->cmd_cache.buf.used; i++) {
- struct hda_cache_head *cmd;
- cmd = snd_array_elem(&codec->cmd_cache.buf, i);
- cmd->dirty = 1;
- }
- for (i = 0; i < codec->amp_cache.buf.used; i++) {
- struct hda_amp_info *amp;
- amp = snd_array_elem(&codec->amp_cache.buf, i);
- amp->head.dirty = 1;
- }
-}
-
/*
* kick up codec; used both from PM and power-save
*/
static void hda_call_codec_resume(struct hda_codec *codec)
{
- codec->in_pm = 1;
+ atomic_inc(&codec->core.in_pm);
- hda_mark_cmd_cache_dirty(codec);
+ if (codec->core.regmap)
+ regcache_mark_dirty(codec->core.regmap);
+
+ codec->power_jiffies = jiffies;
- /* set as if powered on for avoiding re-entering the resume
- * in the resume / power-save sequence
- */
- hda_keep_power_on(codec);
hda_set_power_state(codec, AC_PWRST_D0);
restore_shutup_pins(codec);
hda_exec_init_verbs(codec);
@@ -4278,72 +3174,71 @@ static void hda_call_codec_resume(struct hda_codec *codec)
else {
if (codec->patch_ops.init)
codec->patch_ops.init(codec);
- snd_hda_codec_resume_amp(codec);
- snd_hda_codec_resume_cache(codec);
+ if (codec->core.regmap)
+ regcache_sync(codec->core.regmap);
}
if (codec->jackpoll_interval)
hda_jackpoll_work(&codec->jackpoll_work.work);
else
snd_hda_jack_report_sync(codec);
-
- codec->in_pm = 0;
- snd_hda_power_down(codec); /* flag down before returning */
+ atomic_dec(&codec->core.in_pm);
}
-#endif /* CONFIG_PM */
+static int hda_codec_runtime_suspend(struct device *dev)
+{
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+ struct hda_pcm *pcm;
+ unsigned int state;
-/**
- * snd_hda_build_controls - build mixer controls
- * @bus: the BUS
- *
- * Creates mixer controls for each codec included in the bus.
- *
- * Returns 0 if successful, otherwise a negative error code.
- */
-int snd_hda_build_controls(struct hda_bus *bus)
+ cancel_delayed_work_sync(&codec->jackpoll_work);
+ list_for_each_entry(pcm, &codec->pcm_list_head, list)
+ snd_pcm_suspend_all(pcm->pcm);
+ state = hda_call_codec_suspend(codec);
+ if (codec_has_clkstop(codec) && codec_has_epss(codec) &&
+ (state & AC_PWRST_CLK_STOP_OK))
+ snd_hdac_codec_link_down(&codec->core);
+ return 0;
+}
+
+static int hda_codec_runtime_resume(struct device *dev)
{
- struct hda_codec *codec;
+ struct hda_codec *codec = dev_to_hda_codec(dev);
- list_for_each_entry(codec, &bus->codec_list, list) {
- int err = snd_hda_codec_build_controls(codec);
- if (err < 0) {
- codec_err(codec,
- "cannot build controls for #%d (error %d)\n",
- codec->addr, err);
- err = snd_hda_codec_reset(codec);
- if (err < 0) {
- codec_err(codec,
- "cannot revert codec\n");
- return err;
- }
- }
- }
+ snd_hdac_codec_link_up(&codec->core);
+ hda_call_codec_resume(codec);
+ pm_runtime_mark_last_busy(dev);
return 0;
}
-EXPORT_SYMBOL_GPL(snd_hda_build_controls);
+#endif /* CONFIG_PM */
+
+/* referred in hda_bind.c */
+const struct dev_pm_ops hda_codec_driver_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(hda_codec_runtime_suspend, hda_codec_runtime_resume,
+ NULL)
+};
/*
* add standard channel maps if not specified
*/
static int add_std_chmaps(struct hda_codec *codec)
{
- int i, str, err;
+ struct hda_pcm *pcm;
+ int str, err;
- for (i = 0; i < codec->num_pcms; i++) {
+ list_for_each_entry(pcm, &codec->pcm_list_head, list) {
for (str = 0; str < 2; str++) {
- struct snd_pcm *pcm = codec->pcm_info[i].pcm;
- struct hda_pcm_stream *hinfo =
- &codec->pcm_info[i].stream[str];
+ struct hda_pcm_stream *hinfo = &pcm->stream[str];
struct snd_pcm_chmap *chmap;
const struct snd_pcm_chmap_elem *elem;
- if (codec->pcm_info[i].own_chmap)
- continue;
- if (!pcm || !hinfo->substreams)
+ if (!pcm || pcm->own_chmap ||
+ !hinfo->substreams)
continue;
elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps;
- err = snd_pcm_add_chmap_ctls(pcm, str, elem,
+ err = snd_pcm_add_chmap_ctls(pcm->pcm, str, elem,
hinfo->channels_max,
0, &chmap);
if (err < 0)
@@ -4499,43 +3394,29 @@ unsigned int snd_hda_calc_stream_format(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_calc_stream_format);
-static unsigned int get_pcm_param(struct hda_codec *codec, hda_nid_t nid,
- int dir)
+static unsigned int query_pcm_param(struct hda_codec *codec, hda_nid_t nid)
{
unsigned int val = 0;
- if (nid != codec->afg &&
+ if (nid != codec->core.afg &&
(get_wcaps(codec, nid) & AC_WCAP_FORMAT_OVRD))
val = snd_hda_param_read(codec, nid, AC_PAR_PCM);
if (!val || val == -1)
- val = snd_hda_param_read(codec, codec->afg, AC_PAR_PCM);
+ val = snd_hda_param_read(codec, codec->core.afg, AC_PAR_PCM);
if (!val || val == -1)
return 0;
return val;
}
-static unsigned int query_pcm_param(struct hda_codec *codec, hda_nid_t nid)
-{
- return query_caps_hash(codec, nid, 0, HDA_HASH_PARPCM_KEY(nid),
- get_pcm_param);
-}
-
-static unsigned int get_stream_param(struct hda_codec *codec, hda_nid_t nid,
- int dir)
+static unsigned int query_stream_param(struct hda_codec *codec, hda_nid_t nid)
{
unsigned int streams = snd_hda_param_read(codec, nid, AC_PAR_STREAM);
if (!streams || streams == -1)
- streams = snd_hda_param_read(codec, codec->afg, AC_PAR_STREAM);
+ streams = snd_hda_param_read(codec, codec->core.afg, AC_PAR_STREAM);
if (!streams || streams == -1)
return 0;
return streams;
}
-static unsigned int query_stream_param(struct hda_codec *codec, hda_nid_t nid)
-{
- return query_caps_hash(codec, nid, 0, HDA_HASH_PARSTR_KEY(nid),
- get_stream_param);
-}
-
/**
* snd_hda_query_supported_pcm - query the supported PCM rates and formats
* @codec: the HDA codec
@@ -4792,7 +3673,11 @@ int snd_hda_codec_prepare(struct hda_codec *codec,
{
int ret;
mutex_lock(&codec->bus->prepare_mutex);
- ret = hinfo->ops.prepare(hinfo, codec, stream, format, substream);
+ if (hinfo->ops.prepare)
+ ret = hinfo->ops.prepare(hinfo, codec, stream, format,
+ substream);
+ else
+ ret = -ENODEV;
if (ret >= 0)
purify_inactive_streams(codec);
mutex_unlock(&codec->bus->prepare_mutex);
@@ -4813,7 +3698,8 @@ void snd_hda_codec_cleanup(struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
mutex_lock(&codec->bus->prepare_mutex);
- hinfo->ops.cleanup(hinfo, codec, substream);
+ if (hinfo->ops.cleanup)
+ hinfo->ops.cleanup(hinfo, codec, substream);
mutex_unlock(&codec->bus->prepare_mutex);
}
EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup);
@@ -4871,112 +3757,84 @@ static int get_empty_pcm_device(struct hda_bus *bus, unsigned int type)
return -EAGAIN;
}
-/*
- * attach a new PCM stream
- */
-static int snd_hda_attach_pcm(struct hda_codec *codec, struct hda_pcm *pcm)
+/* call build_pcms ops of the given codec and set up the default parameters */
+int snd_hda_codec_parse_pcms(struct hda_codec *codec)
{
- struct hda_bus *bus = codec->bus;
- struct hda_pcm_stream *info;
- int stream, err;
+ struct hda_pcm *cpcm;
+ int err;
- if (snd_BUG_ON(!pcm->name))
- return -EINVAL;
- for (stream = 0; stream < 2; stream++) {
- info = &pcm->stream[stream];
- if (info->substreams) {
+ if (!list_empty(&codec->pcm_list_head))
+ return 0; /* already parsed */
+
+ if (!codec->patch_ops.build_pcms)
+ return 0;
+
+ err = codec->patch_ops.build_pcms(codec);
+ if (err < 0) {
+ codec_err(codec, "cannot build PCMs for #%d (error %d)\n",
+ codec->core.addr, err);
+ return err;
+ }
+
+ list_for_each_entry(cpcm, &codec->pcm_list_head, list) {
+ int stream;
+
+ for (stream = 0; stream < 2; stream++) {
+ struct hda_pcm_stream *info = &cpcm->stream[stream];
+
+ if (!info->substreams)
+ continue;
err = set_pcm_default_values(codec, info);
- if (err < 0)
+ if (err < 0) {
+ codec_warn(codec,
+ "fail to setup default for PCM %s\n",
+ cpcm->name);
return err;
+ }
}
}
- return bus->ops.attach_pcm(bus, codec, pcm);
+
+ return 0;
}
/* assign all PCMs of the given codec */
int snd_hda_codec_build_pcms(struct hda_codec *codec)
{
- unsigned int pcm;
- int err;
+ struct hda_bus *bus = codec->bus;
+ struct hda_pcm *cpcm;
+ int dev, err;
- if (!codec->num_pcms) {
- if (!codec->patch_ops.build_pcms)
- return 0;
- err = codec->patch_ops.build_pcms(codec);
- if (err < 0) {
- codec_err(codec,
- "cannot build PCMs for #%d (error %d)\n",
- codec->addr, err);
- err = snd_hda_codec_reset(codec);
- if (err < 0) {
- codec_err(codec,
- "cannot revert codec\n");
- return err;
- }
- }
+ if (snd_BUG_ON(!bus->ops.attach_pcm))
+ return -EINVAL;
+
+ err = snd_hda_codec_parse_pcms(codec);
+ if (err < 0) {
+ snd_hda_codec_reset(codec);
+ return err;
}
- for (pcm = 0; pcm < codec->num_pcms; pcm++) {
- struct hda_pcm *cpcm = &codec->pcm_info[pcm];
- int dev;
+ /* attach a new PCM streams */
+ list_for_each_entry(cpcm, &codec->pcm_list_head, list) {
+ if (cpcm->pcm)
+ continue; /* already attached */
if (!cpcm->stream[0].substreams && !cpcm->stream[1].substreams)
continue; /* no substreams assigned */
- if (!cpcm->pcm) {
- dev = get_empty_pcm_device(codec->bus, cpcm->pcm_type);
- if (dev < 0)
- continue; /* no fatal error */
- cpcm->device = dev;
- err = snd_hda_attach_pcm(codec, cpcm);
- if (err < 0) {
- codec_err(codec,
- "cannot attach PCM stream %d for codec #%d\n",
- dev, codec->addr);
- continue; /* no fatal error */
- }
+ dev = get_empty_pcm_device(bus, cpcm->pcm_type);
+ if (dev < 0)
+ continue; /* no fatal error */
+ cpcm->device = dev;
+ err = bus->ops.attach_pcm(bus, codec, cpcm);
+ if (err < 0) {
+ codec_err(codec,
+ "cannot attach PCM stream %d for codec #%d\n",
+ dev, codec->core.addr);
+ continue; /* no fatal error */
}
}
- return 0;
-}
-
-/**
- * snd_hda_build_pcms - build PCM information
- * @bus: the BUS
- *
- * Create PCM information for each codec included in the bus.
- *
- * The build_pcms codec patch is requested to set up codec->num_pcms and
- * codec->pcm_info properly. The array is referred by the top-level driver
- * to create its PCM instances.
- * The allocated codec->pcm_info should be released in codec->patch_ops.free
- * callback.
- *
- * At least, substreams, channels_min and channels_max must be filled for
- * each stream. substreams = 0 indicates that the stream doesn't exist.
- * When rates and/or formats are zero, the supported values are queried
- * from the given nid. The nid is used also by the default ops.prepare
- * and ops.cleanup callbacks.
- *
- * The driver needs to call ops.open in its open callback. Similarly,
- * ops.close is supposed to be called in the close callback.
- * ops.prepare should be called in the prepare or hw_params callback
- * with the proper parameters for set up.
- * ops.cleanup should be called in hw_free for clean up of streams.
- *
- * This function returns 0 if successful, or a negative error code.
- */
-int snd_hda_build_pcms(struct hda_bus *bus)
-{
- struct hda_codec *codec;
- list_for_each_entry(codec, &bus->codec_list, list) {
- int err = snd_hda_codec_build_pcms(codec);
- if (err < 0)
- return err;
- }
return 0;
}
-EXPORT_SYMBOL_GPL(snd_hda_build_pcms);
/**
* snd_hda_add_new_ctls - create controls from the array
@@ -5013,8 +3871,8 @@ int snd_hda_add_new_ctls(struct hda_codec *codec,
* the codec addr; if it still fails (or it's the
* primary codec), then try another control index
*/
- if (!addr && codec->addr)
- addr = codec->addr;
+ if (!addr && codec->core.addr)
+ addr = codec->core.addr;
else if (!idx && !knew->index) {
idx = find_empty_mixer_ctl_idx(codec,
knew->name, 0);
@@ -5029,127 +3887,37 @@ int snd_hda_add_new_ctls(struct hda_codec *codec,
EXPORT_SYMBOL_GPL(snd_hda_add_new_ctls);
#ifdef CONFIG_PM
-static void hda_power_work(struct work_struct *work)
-{
- struct hda_codec *codec =
- container_of(work, struct hda_codec, power_work.work);
- struct hda_bus *bus = codec->bus;
- unsigned int state;
-
- spin_lock(&codec->power_lock);
- if (codec->power_transition > 0) { /* during power-up sequence? */
- spin_unlock(&codec->power_lock);
- return;
- }
- if (!codec->power_on || codec->power_count) {
- codec->power_transition = 0;
- spin_unlock(&codec->power_lock);
- return;
- }
- spin_unlock(&codec->power_lock);
-
- state = hda_call_codec_suspend(codec, true);
- if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK))
- hda_call_pm_notify(codec, false);
-}
-
-static void hda_keep_power_on(struct hda_codec *codec)
-{
- spin_lock(&codec->power_lock);
- codec->power_count++;
- codec->power_on = 1;
- codec->power_jiffies = jiffies;
- spin_unlock(&codec->power_lock);
- hda_call_pm_notify(codec, true);
-}
-
-/* update the power on/off account with the current jiffies */
-void snd_hda_update_power_acct(struct hda_codec *codec)
-{
- unsigned long delta = jiffies - codec->power_jiffies;
- if (codec->power_on)
- codec->power_on_acct += delta;
- else
- codec->power_off_acct += delta;
- codec->power_jiffies += delta;
-}
-
-/* Transition to powered up, if wait_power_down then wait for a pending
- * transition to D3 to complete. A pending D3 transition is indicated
- * with power_transition == -1. */
-/* call this with codec->power_lock held! */
-static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
-{
- /* Return if power_on or transitioning to power_on, unless currently
- * powering down. */
- if ((codec->power_on || codec->power_transition > 0) &&
- !(wait_power_down && codec->power_transition < 0))
- return;
- spin_unlock(&codec->power_lock);
-
- cancel_delayed_work_sync(&codec->power_work);
-
- spin_lock(&codec->power_lock);
- /* If the power down delayed work was cancelled above before starting,
- * then there is no need to go through power up here.
- */
- if (codec->power_on) {
- if (codec->power_transition < 0)
- codec->power_transition = 0;
- return;
- }
-
- trace_hda_power_up(codec);
- snd_hda_update_power_acct(codec);
- codec->power_on = 1;
- codec->power_jiffies = jiffies;
- codec->power_transition = 1; /* avoid reentrance */
- spin_unlock(&codec->power_lock);
-
- hda_call_codec_resume(codec);
-
- spin_lock(&codec->power_lock);
- codec->power_transition = 0;
-}
-
-#define power_save(codec) \
- ((codec)->bus->power_save ? *(codec)->bus->power_save : 0)
-
-/* Transition to powered down */
-static void __snd_hda_power_down(struct hda_codec *codec)
+static void codec_set_power_save(struct hda_codec *codec, int delay)
{
- if (!codec->power_on || codec->power_count || codec->power_transition)
- return;
+ struct device *dev = hda_codec_dev(codec);
- if (power_save(codec)) {
- codec->power_transition = -1; /* avoid reentrance */
- queue_delayed_work(codec->bus->workq, &codec->power_work,
- msecs_to_jiffies(power_save(codec) * 1000));
+ if (delay > 0) {
+ pm_runtime_set_autosuspend_delay(dev, delay);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+ if (!pm_runtime_suspended(dev))
+ pm_runtime_mark_last_busy(dev);
+ } else {
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_forbid(dev);
}
}
/**
- * snd_hda_power_save - Power-up/down/sync the codec
- * @codec: HD-audio codec
- * @delta: the counter delta to change
- * @d3wait: sync for D3 transition complete
+ * snd_hda_set_power_save - reprogram autosuspend for the given delay
+ * @bus: HD-audio bus
+ * @delay: autosuspend delay in msec, 0 = off
*
- * Change the power-up counter via @delta, and power up or down the hardware
- * appropriately. For the power-down, queue to the delayed action.
- * Passing zero to @delta means to synchronize the power state.
+ * Synchronize the runtime PM autosuspend state from the power_save option.
*/
-void snd_hda_power_save(struct hda_codec *codec, int delta, bool d3wait)
+void snd_hda_set_power_save(struct hda_bus *bus, int delay)
{
- spin_lock(&codec->power_lock);
- codec->power_count += delta;
- trace_hda_power_count(codec);
- if (delta > 0)
- __snd_hda_power_up(codec, d3wait);
- else
- __snd_hda_power_down(codec);
- spin_unlock(&codec->power_lock);
+ struct hda_codec *c;
+
+ list_for_each_codec(c, bus)
+ codec_set_power_save(c, delay);
}
-EXPORT_SYMBOL_GPL(snd_hda_power_save);
+EXPORT_SYMBOL_GPL(snd_hda_set_power_save);
/**
* snd_hda_check_amp_list_power - Check the amp list and update the power
@@ -5187,7 +3955,7 @@ int snd_hda_check_amp_list_power(struct hda_codec *codec,
if (!(v & HDA_AMP_MUTE) && v > 0) {
if (!check->power_on) {
check->power_on = 1;
- snd_hda_power_up(codec);
+ snd_hda_power_up_pm(codec);
}
return 1;
}
@@ -5195,7 +3963,7 @@ int snd_hda_check_amp_list_power(struct hda_codec *codec,
}
if (check->power_on) {
check->power_on = 0;
- snd_hda_power_down(codec);
+ snd_hda_power_down_pm(codec);
}
return 0;
}
@@ -5203,88 +3971,6 @@ EXPORT_SYMBOL_GPL(snd_hda_check_amp_list_power);
#endif
/*
- * Channel mode helper
- */
-
-/**
- * snd_hda_ch_mode_info - Info callback helper for the channel mode enum
- * @codec: the HDA codec
- * @uinfo: pointer to get/store the data
- * @chmode: channel mode array
- * @num_chmodes: channel mode array size
- */
-int snd_hda_ch_mode_info(struct hda_codec *codec,
- struct snd_ctl_elem_info *uinfo,
- const struct hda_channel_mode *chmode,
- int num_chmodes)
-{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = num_chmodes;
- if (uinfo->value.enumerated.item >= num_chmodes)
- uinfo->value.enumerated.item = num_chmodes - 1;
- sprintf(uinfo->value.enumerated.name, "%dch",
- chmode[uinfo->value.enumerated.item].channels);
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_ch_mode_info);
-
-/**
- * snd_hda_ch_mode_get - Get callback helper for the channel mode enum
- * @codec: the HDA codec
- * @ucontrol: pointer to get/store the data
- * @chmode: channel mode array
- * @num_chmodes: channel mode array size
- * @max_channels: max number of channels
- */
-int snd_hda_ch_mode_get(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol,
- const struct hda_channel_mode *chmode,
- int num_chmodes,
- int max_channels)
-{
- int i;
-
- for (i = 0; i < num_chmodes; i++) {
- if (max_channels == chmode[i].channels) {
- ucontrol->value.enumerated.item[0] = i;
- break;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_ch_mode_get);
-
-/**
- * snd_hda_ch_mode_put - Put callback helper for the channel mode enum
- * @codec: the HDA codec
- * @ucontrol: pointer to get/store the data
- * @chmode: channel mode array
- * @num_chmodes: channel mode array size
- * @max_channelsp: pointer to store the max channels
- */
-int snd_hda_ch_mode_put(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol,
- const struct hda_channel_mode *chmode,
- int num_chmodes,
- int *max_channelsp)
-{
- unsigned int mode;
-
- mode = ucontrol->value.enumerated.item[0];
- if (mode >= num_chmodes)
- return -EINVAL;
- if (*max_channelsp == chmode[mode].channels)
- return 0;
- /* change the current channel setting */
- *max_channelsp = chmode[mode].channels;
- if (chmode[mode].sequence)
- snd_hda_sequence_write_cache(codec, chmode[mode].sequence);
- return 1;
-}
-EXPORT_SYMBOL_GPL(snd_hda_ch_mode_put);
-
-/*
* input MUX helper
*/
@@ -5418,24 +4104,6 @@ static void cleanup_dig_out_stream(struct hda_codec *codec, hda_nid_t nid)
}
/**
- * snd_hda_bus_reboot_notify - call the reboot notifier of each codec
- * @bus: HD-audio bus
- */
-void snd_hda_bus_reboot_notify(struct hda_bus *bus)
-{
- struct hda_codec *codec;
-
- if (!bus)
- return;
- list_for_each_entry(codec, &bus->codec_list, list) {
- if (hda_codec_is_power_on(codec) &&
- codec->patch_ops.reboot_notify)
- codec->patch_ops.reboot_notify(codec);
- }
-}
-EXPORT_SYMBOL_GPL(snd_hda_bus_reboot_notify);
-
-/**
* snd_hda_multi_out_dig_open - open the digital out in the exclusive mode
* @codec: the HDA codec
* @mout: hda_multi_out object
@@ -5825,123 +4493,26 @@ int snd_hda_add_imux_item(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_add_imux_item);
-
-#ifdef CONFIG_PM
-/*
- * power management
- */
-
-
-static void hda_async_suspend(void *data, async_cookie_t cookie)
-{
- hda_call_codec_suspend(data, false);
-}
-
-static void hda_async_resume(void *data, async_cookie_t cookie)
-{
- hda_call_codec_resume(data);
-}
-
/**
- * snd_hda_suspend - suspend the codecs
- * @bus: the HDA bus
- *
- * Returns 0 if successful.
+ * snd_hda_bus_reset - Reset the bus
+ * @bus: HD-audio bus
*/
-int snd_hda_suspend(struct hda_bus *bus)
+void snd_hda_bus_reset(struct hda_bus *bus)
{
struct hda_codec *codec;
- ASYNC_DOMAIN_EXCLUSIVE(domain);
- list_for_each_entry(codec, &bus->codec_list, list) {
+ list_for_each_codec(codec, bus) {
+ /* FIXME: maybe a better way needed for forced reset */
cancel_delayed_work_sync(&codec->jackpoll_work);
+#ifdef CONFIG_PM
if (hda_codec_is_power_on(codec)) {
- if (bus->num_codecs > 1)
- async_schedule_domain(hda_async_suspend, codec,
- &domain);
- else
- hda_call_codec_suspend(codec, false);
- }
- }
-
- if (bus->num_codecs > 1)
- async_synchronize_full_domain(&domain);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_suspend);
-
-/**
- * snd_hda_resume - resume the codecs
- * @bus: the HDA bus
- *
- * Returns 0 if successful.
- */
-int snd_hda_resume(struct hda_bus *bus)
-{
- struct hda_codec *codec;
- ASYNC_DOMAIN_EXCLUSIVE(domain);
-
- list_for_each_entry(codec, &bus->codec_list, list) {
- if (bus->num_codecs > 1)
- async_schedule_domain(hda_async_resume, codec, &domain);
- else
+ hda_call_codec_suspend(codec);
hda_call_codec_resume(codec);
+ }
+#endif
}
-
- if (bus->num_codecs > 1)
- async_synchronize_full_domain(&domain);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_resume);
-#endif /* CONFIG_PM */
-
-/*
- * generic arrays
- */
-
-/**
- * snd_array_new - get a new element from the given array
- * @array: the array object
- *
- * Get a new element from the given array. If it exceeds the
- * pre-allocated array size, re-allocate the array.
- *
- * Returns NULL if allocation failed.
- */
-void *snd_array_new(struct snd_array *array)
-{
- if (snd_BUG_ON(!array->elem_size))
- return NULL;
- if (array->used >= array->alloced) {
- int num = array->alloced + array->alloc_align;
- int size = (num + 1) * array->elem_size;
- void *nlist;
- if (snd_BUG_ON(num >= 4096))
- return NULL;
- nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO);
- if (!nlist)
- return NULL;
- array->list = nlist;
- array->alloced = num;
- }
- return snd_array_elem(array, array->used++);
-}
-EXPORT_SYMBOL_GPL(snd_array_new);
-
-/**
- * snd_array_free - free the given array elements
- * @array: the array object
- */
-void snd_array_free(struct snd_array *array)
-{
- kfree(array->list);
- array->used = 0;
- array->alloced = 0;
- array->list = NULL;
}
-EXPORT_SYMBOL_GPL(snd_array_free);
+EXPORT_SYMBOL_GPL(snd_hda_bus_reset);
/**
* snd_print_pcm_bits - Print the supported PCM fmt bits to the string buffer
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 9c8820f21f94..9075ac28dc4b 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -21,41 +21,14 @@
#ifndef __SOUND_HDA_CODEC_H
#define __SOUND_HDA_CODEC_H
+#include <linux/kref.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/pcm.h>
#include <sound/hwdep.h>
+#include <sound/hdaudio.h>
#include <sound/hda_verbs.h>
-
-/*
- * generic arrays
- */
-struct snd_array {
- unsigned int used;
- unsigned int alloced;
- unsigned int elem_size;
- unsigned int alloc_align;
- void *list;
-};
-
-void *snd_array_new(struct snd_array *array);
-void snd_array_free(struct snd_array *array);
-static inline void snd_array_init(struct snd_array *array, unsigned int size,
- unsigned int align)
-{
- array->elem_size = size;
- array->alloc_align = align;
-}
-
-static inline void *snd_array_elem(struct snd_array *array, unsigned int idx)
-{
- return array->list + idx * array->elem_size;
-}
-
-static inline unsigned int snd_array_index(struct snd_array *array, void *ptr)
-{
- return (unsigned long)(ptr - array->list) / array->elem_size;
-}
+#include <sound/hda_regmap.h>
/*
* Structures
@@ -66,10 +39,6 @@ struct hda_beep;
struct hda_codec;
struct hda_pcm;
struct hda_pcm_stream;
-struct hda_bus_unsolicited;
-
-/* NID type */
-typedef u16 hda_nid_t;
/* bus operators */
struct hda_bus_ops {
@@ -84,10 +53,6 @@ struct hda_bus_ops {
struct hda_pcm *pcm);
/* reset bus for retry verb */
void (*bus_reset)(struct hda_bus *bus);
-#ifdef CONFIG_PM
- /* notify power-up/down from codec to controller */
- void (*pm_notify)(struct hda_bus *bus, bool power_up);
-#endif
#ifdef CONFIG_SND_HDA_DSP_LOADER
/* prepare DSP transfer */
int (*load_dsp_prepare)(struct hda_bus *bus, unsigned int format,
@@ -101,15 +66,6 @@ struct hda_bus_ops {
#endif
};
-/* template to pass to the bus constructor */
-struct hda_bus_template {
- void *private_data;
- struct pci_dev *pci;
- const char *modelname;
- int *power_save;
- struct hda_bus_ops ops;
-};
-
/*
* codec bus
*
@@ -117,42 +73,28 @@ struct hda_bus_template {
* A hda_bus contains several codecs in the list codec_list.
*/
struct hda_bus {
+ struct hdac_bus core;
+
struct snd_card *card;
- /* copied from template */
void *private_data;
struct pci_dev *pci;
const char *modelname;
- int *power_save;
struct hda_bus_ops ops;
- /* codec linked list */
- struct list_head codec_list;
- unsigned int num_codecs;
- /* link caddr -> codec */
- struct hda_codec *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1];
-
- struct mutex cmd_mutex;
struct mutex prepare_mutex;
- /* unsolicited event queue */
- struct hda_bus_unsolicited *unsol;
- char workq_name[16];
- struct workqueue_struct *workq; /* common workqueue for codecs */
-
/* assigned PCMs */
DECLARE_BITMAP(pcm_dev_bits, SNDRV_PCM_DEVICES);
/* misc op flags */
unsigned int needs_damn_long_delay :1;
unsigned int allow_bus_reset:1; /* allow bus reset at fatal error */
- unsigned int sync_write:1; /* sync after verb write */
/* status for codec/controller */
unsigned int shutdown :1; /* being unloaded */
unsigned int rirb_error:1; /* error in codec communication */
unsigned int response_reset:1; /* controller was reset */
unsigned int in_reset:1; /* during reset operation */
- unsigned int power_keep_link_on:1; /* don't power off HDA link */
unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
int primary_dig_out_type; /* primary digital out PCM type */
@@ -175,15 +117,22 @@ struct hda_codec_preset {
int (*patch)(struct hda_codec *codec);
};
-struct hda_codec_preset_list {
+#define HDA_CODEC_ID_GENERIC_HDMI 0x00000101
+#define HDA_CODEC_ID_GENERIC 0x00000201
+
+struct hda_codec_driver {
+ struct hdac_driver core;
const struct hda_codec_preset *preset;
- struct module *owner;
- struct list_head list;
};
-/* initial hook */
-int snd_hda_add_codec_preset(struct hda_codec_preset_list *preset);
-int snd_hda_delete_codec_preset(struct hda_codec_preset_list *preset);
+int __hda_codec_driver_register(struct hda_codec_driver *drv, const char *name,
+ struct module *owner);
+#define hda_codec_driver_register(drv) \
+ __hda_codec_driver_register(drv, KBUILD_MODNAME, THIS_MODULE)
+void hda_codec_driver_unregister(struct hda_codec_driver *drv);
+#define module_hda_codec_driver(drv) \
+ module_driver(drv, hda_codec_driver_register, \
+ hda_codec_driver_unregister)
/* ops set by the preset patch */
struct hda_codec_ops {
@@ -200,25 +149,7 @@ struct hda_codec_ops {
int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
#endif
void (*reboot_notify)(struct hda_codec *codec);
-};
-
-/* record for amp information cache */
-struct hda_cache_head {
- u32 key:31; /* hash key */
- u32 dirty:1;
- u16 val; /* assigned value */
- u16 next;
-};
-
-struct hda_amp_info {
- struct hda_cache_head head;
- u32 amp_caps; /* amp capabilities */
- u16 vol[2]; /* current volume & mute */
-};
-
-struct hda_cache_rec {
- u16 hash[64]; /* hash table for index */
- struct snd_array buf; /* record entries */
+ void (*stream_pm)(struct hda_codec *codec, hda_nid_t nid, bool on);
};
/* PCM callbacks */
@@ -267,41 +198,29 @@ struct hda_pcm {
int device; /* device number to assign */
struct snd_pcm *pcm; /* assigned PCM instance */
bool own_chmap; /* codec driver provides own channel maps */
+ /* private: */
+ struct hda_codec *codec;
+ struct kref kref;
+ struct list_head list;
};
/* codec information */
struct hda_codec {
- struct device dev;
+ struct hdac_device core;
struct hda_bus *bus;
+ struct snd_card *card;
unsigned int addr; /* codec addr*/
- struct list_head list; /* list point */
-
- hda_nid_t afg; /* AFG node id */
- hda_nid_t mfg; /* MFG node id */
-
- /* ids */
- u8 afg_function_id;
- u8 mfg_function_id;
- u8 afg_unsol;
- u8 mfg_unsol;
- u32 vendor_id;
- u32 subsystem_id;
- u32 revision_id;
+ u32 probe_id; /* overridden id for probing */
/* detected preset */
const struct hda_codec_preset *preset;
- struct module *owner;
- int (*parser)(struct hda_codec *codec);
- const char *vendor_name; /* codec vendor name */
- const char *chip_name; /* codec chip name */
const char *modelname; /* model name for preset */
/* set by patch */
struct hda_codec_ops patch_ops;
/* PCM to create, set by patch_ops.build_pcms callback */
- unsigned int num_pcms;
- struct hda_pcm *pcm_info;
+ struct list_head pcm_list_head;
/* codec specific info */
void *spec;
@@ -311,21 +230,15 @@ struct hda_codec {
unsigned int beep_mode;
/* widget capabilities cache */
- unsigned int num_nodes;
- hda_nid_t start_nid;
u32 *wcaps;
struct snd_array mixers; /* list of assigned mixer elements */
struct snd_array nids; /* list of mapped mixer elements */
- struct hda_cache_rec amp_cache; /* cache for amp access */
- struct hda_cache_rec cmd_cache; /* cache for other commands */
-
struct list_head conn_list; /* linked-list of connection-list */
struct mutex spdif_mutex;
struct mutex control_mutex;
- struct mutex hash_mutex;
struct snd_array spdif_out;
unsigned int spdif_in_enable; /* SPDIF input enable? */
const hda_nid_t *slave_dig_outs; /* optional digital out slave widgets */
@@ -345,6 +258,8 @@ struct hda_codec {
#endif
/* misc flags */
+ unsigned int in_freeing:1; /* being released */
+ unsigned int registered:1; /* codec was registered */
unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each
* status change
* (e.g. Realtek codecs)
@@ -362,22 +277,14 @@ struct hda_codec {
unsigned int inv_eapd:1; /* broken h/w: inverted EAPD control */
unsigned int inv_jack_detect:1; /* broken h/w: inverted detection bit */
unsigned int pcm_format_first:1; /* PCM format must be set first */
- unsigned int epss:1; /* supporting EPSS? */
unsigned int cached_write:1; /* write only to caches */
unsigned int dp_mst:1; /* support DP1.2 Multi-stream transport */
unsigned int dump_coef:1; /* dump processing coefs in codec proc file */
+ unsigned int power_save_node:1; /* advanced PM for each widget */
#ifdef CONFIG_PM
- unsigned int power_on :1; /* current (global) power-state */
- unsigned int d3_stop_clk:1; /* support D3 operation without BCLK */
- unsigned int pm_up_notified:1; /* PM notified to controller */
- unsigned int in_pm:1; /* suspend/resume being performed */
- int power_transition; /* power-state in transition */
- int power_count; /* current (global) power refcount */
- struct delayed_work power_work; /* delayed task for powerdown */
unsigned long power_on_acct;
unsigned long power_off_acct;
unsigned long power_jiffies;
- spinlock_t power_lock;
#endif
/* filter the requested power state per nid */
@@ -409,10 +316,11 @@ struct hda_codec {
struct snd_array verbs;
};
-/* direction */
-enum {
- HDA_INPUT, HDA_OUTPUT
-};
+#define dev_to_hda_codec(_dev) container_of(_dev, struct hda_codec, core.dev)
+#define hda_codec_dev(_dev) (&(_dev)->core.dev)
+
+#define list_for_each_codec(c, bus) \
+ list_for_each_entry(c, &(bus)->core.codec_list, core.list)
/* snd_hda_codec_read/write optional flags */
#define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
@@ -420,10 +328,9 @@ enum {
/*
* constructors
*/
-int snd_hda_bus_new(struct snd_card *card, const struct hda_bus_template *temp,
- struct hda_bus **busp);
-int snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr,
- struct hda_codec **codecp);
+int snd_hda_bus_new(struct snd_card *card, struct hda_bus **busp);
+int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
+ unsigned int codec_addr, struct hda_codec **codecp);
int snd_hda_codec_configure(struct hda_codec *codec);
int snd_hda_codec_update_widgets(struct hda_codec *codec);
@@ -436,9 +343,9 @@ unsigned int snd_hda_codec_read(struct hda_codec *codec, hda_nid_t nid,
int snd_hda_codec_write(struct hda_codec *codec, hda_nid_t nid, int flags,
unsigned int verb, unsigned int parm);
#define snd_hda_param_read(codec, nid, param) \
- snd_hda_codec_read(codec, nid, 0, AC_VERB_PARAMETERS, param)
-int snd_hda_get_sub_nodes(struct hda_codec *codec, hda_nid_t nid,
- hda_nid_t *start_id);
+ snd_hdac_read_parm(&(codec)->core, nid, param)
+#define snd_hda_get_sub_nodes(codec, nid, start_nid) \
+ snd_hdac_get_sub_nodes(&(codec)->core, nid, start_nid)
int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid,
hda_nid_t *conn_list, int max_conns);
static inline int
@@ -446,9 +353,12 @@ snd_hda_get_num_conns(struct hda_codec *codec, hda_nid_t nid)
{
return snd_hda_get_connections(codec, nid, NULL, 0);
}
-int snd_hda_get_num_raw_conns(struct hda_codec *codec, hda_nid_t nid);
-int snd_hda_get_raw_connections(struct hda_codec *codec, hda_nid_t nid,
- hda_nid_t *conn_list, int max_conns);
+
+#define snd_hda_get_raw_connections(codec, nid, list, max_conns) \
+ snd_hdac_get_connections(&(codec)->core, nid, list, max_conns)
+#define snd_hda_get_num_raw_conns(codec, nid) \
+ snd_hdac_get_connections(&(codec)->core, nid, NULL, 0);
+
int snd_hda_get_conn_list(struct hda_codec *codec, hda_nid_t nid,
const hda_nid_t **listp);
int snd_hda_override_conn_list(struct hda_codec *codec, hda_nid_t nid, int nums,
@@ -470,18 +380,22 @@ void snd_hda_sequence_write(struct hda_codec *codec,
const struct hda_verb *seq);
/* unsolicited event */
-int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex);
+static inline void
+snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
+{
+ snd_hdac_bus_queue_event(&bus->core, res, res_ex);
+}
/* cached write */
-int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
- int flags, unsigned int verb, unsigned int parm);
-void snd_hda_sequence_write_cache(struct hda_codec *codec,
- const struct hda_verb *seq);
-int snd_hda_codec_update_cache(struct hda_codec *codec, hda_nid_t nid,
- int flags, unsigned int verb, unsigned int parm);
-void snd_hda_codec_resume_cache(struct hda_codec *codec);
-/* both for cmd & amp caches */
-void snd_hda_codec_flush_cache(struct hda_codec *codec);
+static inline int
+snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
+ int flags, unsigned int verb, unsigned int parm)
+{
+ return snd_hdac_regmap_write(&codec->core, nid, verb, parm);
+}
+
+#define snd_hda_codec_update_cache(codec, nid, flags, verb, parm) \
+ snd_hda_codec_write_cache(codec, nid, flags, verb, parm)
/* the struct for codec->pin_configs */
struct hda_pincfg {
@@ -512,15 +426,24 @@ void snd_hda_spdif_ctls_assign(struct hda_codec *codec, int idx, hda_nid_t nid);
/*
* Mixer
*/
-int snd_hda_build_controls(struct hda_bus *bus);
int snd_hda_codec_build_controls(struct hda_codec *codec);
/*
* PCM
*/
-int snd_hda_build_pcms(struct hda_bus *bus);
+int snd_hda_codec_parse_pcms(struct hda_codec *codec);
int snd_hda_codec_build_pcms(struct hda_codec *codec);
+__printf(2, 3)
+struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec,
+ const char *fmt, ...);
+
+static inline void snd_hda_codec_pcm_get(struct hda_pcm *pcm)
+{
+ kref_get(&pcm->kref);
+}
+void snd_hda_codec_pcm_put(struct hda_pcm *pcm);
+
int snd_hda_codec_prepare(struct hda_codec *codec,
struct hda_pcm_stream *hinfo,
unsigned int stream,
@@ -552,20 +475,17 @@ extern const struct snd_pcm_chmap_elem snd_pcm_2_1_chmaps[];
* Misc
*/
void snd_hda_get_codec_name(struct hda_codec *codec, char *name, int namelen);
-void snd_hda_bus_reboot_notify(struct hda_bus *bus);
void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state);
int snd_hda_lock_devices(struct hda_bus *bus);
void snd_hda_unlock_devices(struct hda_bus *bus);
+void snd_hda_bus_reset(struct hda_bus *bus);
/*
* power management
*/
-#ifdef CONFIG_PM
-int snd_hda_suspend(struct hda_bus *bus);
-int snd_hda_resume(struct hda_bus *bus);
-#endif
+extern const struct dev_pm_ops hda_codec_driver_pm;
static inline
int hda_call_check_power_status(struct hda_codec *codec, hda_nid_t nid)
@@ -587,65 +507,17 @@ const char *snd_hda_get_jack_location(u32 cfg);
/*
* power saving
*/
+#define snd_hda_power_up(codec) snd_hdac_power_up(&(codec)->core)
+#define snd_hda_power_up_pm(codec) snd_hdac_power_up_pm(&(codec)->core)
+#define snd_hda_power_down(codec) snd_hdac_power_down(&(codec)->core)
+#define snd_hda_power_down_pm(codec) snd_hdac_power_down_pm(&(codec)->core)
#ifdef CONFIG_PM
-void snd_hda_power_save(struct hda_codec *codec, int delta, bool d3wait);
+void snd_hda_set_power_save(struct hda_bus *bus, int delay);
void snd_hda_update_power_acct(struct hda_codec *codec);
#else
-static inline void snd_hda_power_save(struct hda_codec *codec, int delta,
- bool d3wait) {}
+static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {}
#endif
-/**
- * snd_hda_power_up - Power-up the codec
- * @codec: HD-audio codec
- *
- * Increment the power-up counter and power up the hardware really when
- * not turned on yet.
- */
-static inline void snd_hda_power_up(struct hda_codec *codec)
-{
- snd_hda_power_save(codec, 1, false);
-}
-
-/**
- * snd_hda_power_up_d3wait - Power-up the codec after waiting for any pending
- * D3 transition to complete. This differs from snd_hda_power_up() when
- * power_transition == -1. snd_hda_power_up sees this case as a nop,
- * snd_hda_power_up_d3wait waits for the D3 transition to complete then powers
- * back up.
- * @codec: HD-audio codec
- *
- * Cancel any power down operation hapenning on the work queue, then power up.
- */
-static inline void snd_hda_power_up_d3wait(struct hda_codec *codec)
-{
- snd_hda_power_save(codec, 1, true);
-}
-
-/**
- * snd_hda_power_down - Power-down the codec
- * @codec: HD-audio codec
- *
- * Decrement the power-up counter and schedules the power-off work if
- * the counter rearches to zero.
- */
-static inline void snd_hda_power_down(struct hda_codec *codec)
-{
- snd_hda_power_save(codec, -1, false);
-}
-
-/**
- * snd_hda_power_sync - Synchronize the power-save status
- * @codec: HD-audio codec
- *
- * Synchronize the actual power state with the power account;
- * called when power_save parameter is changed
- */
-static inline void snd_hda_power_sync(struct hda_codec *codec)
-{
- snd_hda_power_save(codec, 0, false);
-}
-
#ifdef CONFIG_SND_HDA_PATCH_LOADER
/*
* patch firmware
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 17c2637d842c..26ce990592a0 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -27,10 +27,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <linux/reboot.h>
#include <sound/core.h>
#include <sound/initval.h>
-#include "hda_priv.h"
#include "hda_controller.h"
#define CREATE_TRACE_POINTS
@@ -259,11 +257,18 @@ static void azx_timecounter_init(struct snd_pcm_substream *substream,
tc->cycle_last = last;
}
+static inline struct hda_pcm_stream *
+to_hda_pcm_stream(struct snd_pcm_substream *substream)
+{
+ struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
+ return &apcm->info->stream[substream->stream];
+}
+
static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
u64 nsec)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
- struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
+ struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
u64 codec_frames, codec_nsecs;
if (!hinfo->ops.get_delay)
@@ -399,7 +404,7 @@ static int azx_setup_periods(struct azx *chip,
static int azx_pcm_close(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
- struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
+ struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
unsigned long flags;
@@ -410,9 +415,11 @@ static int azx_pcm_close(struct snd_pcm_substream *substream)
azx_dev->running = 0;
spin_unlock_irqrestore(&chip->reg_lock, flags);
azx_release_device(azx_dev);
- hinfo->ops.close(hinfo, apcm->codec, substream);
+ if (hinfo->ops.close)
+ hinfo->ops.close(hinfo, apcm->codec, substream);
snd_hda_power_down(apcm->codec);
mutex_unlock(&chip->open_mutex);
+ snd_hda_codec_pcm_put(apcm->info);
return 0;
}
@@ -441,7 +448,7 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx_dev *azx_dev = get_azx_dev(substream);
struct azx *chip = apcm->chip;
- struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
+ struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
int err;
/* reset BDL address */
@@ -468,7 +475,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
- struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
+ struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int bufsize, period_bytes, format_val, stream_tag;
int err;
@@ -708,7 +715,7 @@ unsigned int azx_get_position(struct azx *chip,
if (substream->runtime) {
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
- struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
+ struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
if (chip->get_delay[stream])
delay += chip->get_delay[stream](chip, azx_dev, pos);
@@ -732,17 +739,32 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
azx_get_position(chip, azx_dev));
}
-static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
- struct timespec *ts)
+static int azx_get_time_info(struct snd_pcm_substream *substream,
+ struct timespec *system_ts, struct timespec *audio_ts,
+ struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+ struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
{
struct azx_dev *azx_dev = get_azx_dev(substream);
u64 nsec;
- nsec = timecounter_read(&azx_dev->azx_tc);
- nsec = div_u64(nsec, 3); /* can be optimized */
- nsec = azx_adjust_codec_delay(substream, nsec);
+ if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
+ (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
+
+ snd_pcm_gettime(substream->runtime, system_ts);
- *ts = ns_to_timespec(nsec);
+ nsec = timecounter_read(&azx_dev->azx_tc);
+ nsec = div_u64(nsec, 3); /* can be optimized */
+ if (audio_tstamp_config->report_delay)
+ nsec = azx_adjust_codec_delay(substream, nsec);
+
+ *audio_ts = ns_to_timespec(nsec);
+
+ audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
+ audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
+ audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
+
+ } else
+ audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
return 0;
}
@@ -756,7 +778,8 @@ static struct snd_pcm_hardware azx_pcm_hw = {
/* SNDRV_PCM_INFO_RESUME |*/
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
- SNDRV_PCM_INFO_HAS_WALL_CLOCK |
+ SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
+ SNDRV_PCM_INFO_HAS_LINK_ATIME |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
@@ -775,7 +798,7 @@ static struct snd_pcm_hardware azx_pcm_hw = {
static int azx_pcm_open(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
- struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
+ struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -783,11 +806,12 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
int err;
int buff_step;
+ snd_hda_codec_pcm_get(apcm->info);
mutex_lock(&chip->open_mutex);
azx_dev = azx_assign_device(chip, substream);
if (azx_dev == NULL) {
- mutex_unlock(&chip->open_mutex);
- return -EBUSY;
+ err = -EBUSY;
+ goto unlock;
}
runtime->hw = azx_pcm_hw;
runtime->hw.channels_min = hinfo->channels_min;
@@ -821,13 +845,14 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
buff_step);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
buff_step);
- snd_hda_power_up_d3wait(apcm->codec);
- err = hinfo->ops.open(hinfo, apcm->codec, substream);
+ snd_hda_power_up(apcm->codec);
+ if (hinfo->ops.open)
+ err = hinfo->ops.open(hinfo, apcm->codec, substream);
+ else
+ err = -ENODEV;
if (err < 0) {
azx_release_device(azx_dev);
- snd_hda_power_down(apcm->codec);
- mutex_unlock(&chip->open_mutex);
- return err;
+ goto powerdown;
}
snd_pcm_limit_hw_rates(runtime);
/* sanity check */
@@ -836,16 +861,18 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
snd_BUG_ON(!runtime->hw.formats) ||
snd_BUG_ON(!runtime->hw.rates)) {
azx_release_device(azx_dev);
- hinfo->ops.close(hinfo, apcm->codec, substream);
- snd_hda_power_down(apcm->codec);
- mutex_unlock(&chip->open_mutex);
- return -EINVAL;
+ if (hinfo->ops.close)
+ hinfo->ops.close(hinfo, apcm->codec, substream);
+ err = -EINVAL;
+ goto powerdown;
}
- /* disable WALLCLOCK timestamps for capture streams
+ /* disable LINK_ATIME timestamps for capture streams
until we figure out how to handle digital inputs */
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
+ runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
+ }
spin_lock_irqsave(&chip->reg_lock, flags);
azx_dev->substream = substream;
@@ -856,6 +883,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
snd_pcm_set_sync(substream);
mutex_unlock(&chip->open_mutex);
return 0;
+
+ powerdown:
+ snd_hda_power_down(apcm->codec);
+ unlock:
+ mutex_unlock(&chip->open_mutex);
+ snd_hda_codec_pcm_put(apcm->info);
+ return err;
}
static int azx_pcm_mmap(struct snd_pcm_substream *substream,
@@ -877,7 +911,7 @@ static struct snd_pcm_ops azx_pcm_ops = {
.prepare = azx_pcm_prepare,
.trigger = azx_pcm_trigger,
.pointer = azx_pcm_pointer,
- .wall_clock = azx_get_wallclock_tstamp,
+ .get_time_info = azx_get_time_info,
.mmap = azx_pcm_mmap,
.page = snd_pcm_sgbuf_ops_page,
};
@@ -887,6 +921,7 @@ static void azx_pcm_free(struct snd_pcm *pcm)
struct azx_pcm *apcm = pcm->private_data;
if (apcm) {
list_del(&apcm->list);
+ apcm->info->pcm = NULL;
kfree(apcm);
}
}
@@ -923,6 +958,7 @@ static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
apcm->chip = chip;
apcm->pcm = pcm;
apcm->codec = codec;
+ apcm->info = cpcm;
pcm->private_data = apcm;
pcm->private_free = azx_pcm_free;
if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
@@ -930,7 +966,6 @@ static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
list_add_tail(&apcm->list, &chip->pcm_list);
cpcm->pcm = pcm;
for (s = 0; s < 2; s++) {
- apcm->hinfo[s] = &cpcm->stream[s];
if (cpcm->stream[s].substreams)
snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
}
@@ -941,9 +976,6 @@ static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
chip->card->dev,
size, MAX_PREALLOC_SIZE);
- /* link to codec */
- for (s = 0; s < 2; s++)
- pcm->streams[s].dev.parent = &codec->dev;
return 0;
}
@@ -952,14 +984,9 @@ static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
*/
static int azx_alloc_cmd_io(struct azx *chip)
{
- int err;
-
/* single page (at least 4096 bytes) must suffice for both ringbuffes */
- err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
- PAGE_SIZE, &chip->rb);
- if (err < 0)
- dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
- return err;
+ return chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
+ PAGE_SIZE, &chip->rb);
}
static void azx_init_cmd_io(struct azx *chip)
@@ -1445,7 +1472,6 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus,
int azx_alloc_stream_pages(struct azx *chip)
{
int i, err;
- struct snd_card *card = chip->card;
for (i = 0; i < chip->num_streams; i++) {
dsp_lock_init(&chip->azx_dev[i]);
@@ -1453,18 +1479,14 @@ int azx_alloc_stream_pages(struct azx *chip)
err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
BDL_SIZE,
&chip->azx_dev[i].bdl);
- if (err < 0) {
- dev_err(card->dev, "cannot allocate BDL\n");
+ if (err < 0)
return -ENOMEM;
- }
}
/* allocate memory for the position buffer */
err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
chip->num_streams * 8, &chip->posbuf);
- if (err < 0) {
- dev_err(card->dev, "cannot allocate posbuf\n");
+ if (err < 0)
return -ENOMEM;
- }
/* allocate CORB/RIRB */
err = azx_alloc_cmd_io(chip);
@@ -1676,7 +1698,7 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
int i;
#ifdef CONFIG_PM
- if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
+ if (azx_has_pm_runtime(chip))
if (!pm_runtime_active(chip->card->dev))
return IRQ_NONE;
#endif
@@ -1742,12 +1764,12 @@ static int probe_codec(struct azx *chip, int addr)
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
unsigned int res;
- mutex_lock(&chip->bus->cmd_mutex);
+ mutex_lock(&chip->bus->core.cmd_mutex);
chip->probing = 1;
azx_send_cmd(chip->bus, cmd);
res = azx_get_response(chip->bus, addr);
chip->probing = 0;
- mutex_unlock(&chip->bus->cmd_mutex);
+ mutex_unlock(&chip->bus->core.cmd_mutex);
if (res == -1)
return -EIO;
dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
@@ -1761,34 +1783,11 @@ static void azx_bus_reset(struct hda_bus *bus)
bus->in_reset = 1;
azx_stop_chip(chip);
azx_init_chip(chip, true);
-#ifdef CONFIG_PM
- if (chip->initialized) {
- struct azx_pcm *p;
- list_for_each_entry(p, &chip->pcm_list, list)
- snd_pcm_suspend_all(p->pcm);
- snd_hda_suspend(chip->bus);
- snd_hda_resume(chip->bus);
- }
-#endif
+ if (chip->initialized)
+ snd_hda_bus_reset(chip->bus);
bus->in_reset = 0;
}
-#ifdef CONFIG_PM
-/* power-up/down the controller */
-static void azx_power_notify(struct hda_bus *bus, bool power_up)
-{
- struct azx *chip = bus->private_data;
-
- if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
- return;
-
- if (power_up)
- pm_runtime_get_sync(chip->card->dev);
- else
- pm_runtime_put_sync(chip->card->dev);
-}
-#endif
-
static int get_jackpoll_interval(struct azx *chip)
{
int i;
@@ -1810,41 +1809,59 @@ static int get_jackpoll_interval(struct azx *chip)
return j;
}
-/* Codec initialization */
-int azx_codec_create(struct azx *chip, const char *model,
- unsigned int max_slots,
- int *power_save_to)
-{
- struct hda_bus_template bus_temp;
- int c, codecs, err;
-
- memset(&bus_temp, 0, sizeof(bus_temp));
- bus_temp.private_data = chip;
- bus_temp.modelname = model;
- bus_temp.pci = chip->pci;
- bus_temp.ops.command = azx_send_cmd;
- bus_temp.ops.get_response = azx_get_response;
- bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
- bus_temp.ops.bus_reset = azx_bus_reset;
-#ifdef CONFIG_PM
- bus_temp.power_save = power_save_to;
- bus_temp.ops.pm_notify = azx_power_notify;
-#endif
+static struct hda_bus_ops bus_ops = {
+ .command = azx_send_cmd,
+ .get_response = azx_get_response,
+ .attach_pcm = azx_attach_pcm_stream,
+ .bus_reset = azx_bus_reset,
#ifdef CONFIG_SND_HDA_DSP_LOADER
- bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
- bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
- bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
+ .load_dsp_prepare = azx_load_dsp_prepare,
+ .load_dsp_trigger = azx_load_dsp_trigger,
+ .load_dsp_cleanup = azx_load_dsp_cleanup,
#endif
+};
+
+/* HD-audio bus initialization */
+int azx_bus_create(struct azx *chip, const char *model)
+{
+ struct hda_bus *bus;
+ int err;
- err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
+ err = snd_hda_bus_new(chip->card, &bus);
if (err < 0)
return err;
+ chip->bus = bus;
+ bus->private_data = chip;
+ bus->pci = chip->pci;
+ bus->modelname = model;
+ bus->ops = bus_ops;
+
if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
- chip->bus->needs_damn_long_delay = 1;
+ bus->needs_damn_long_delay = 1;
+ }
+
+ /* AMD chipsets often cause the communication stalls upon certain
+ * sequence like the pin-detection. It seems that forcing the synced
+ * access works around the stall. Grrr...
+ */
+ if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
+ dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
+ bus->core.sync_write = 1;
+ bus->allow_bus_reset = 1;
}
+ return 0;
+}
+EXPORT_SYMBOL_GPL(azx_bus_create);
+
+/* Probe codecs */
+int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
+{
+ struct hda_bus *bus = chip->bus;
+ int c, codecs, err;
+
codecs = 0;
if (!max_slots)
max_slots = AZX_DEFAULT_CODECS;
@@ -1872,21 +1889,11 @@ int azx_codec_create(struct azx *chip, const char *model,
}
}
- /* AMD chipsets often cause the communication stalls upon certain
- * sequence like the pin-detection. It seems that forcing the synced
- * access works around the stall. Grrr...
- */
- if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
- dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
- chip->bus->sync_write = 1;
- chip->bus->allow_bus_reset = 1;
- }
-
/* Then create codec instances */
for (c = 0; c < max_slots; c++) {
if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
struct hda_codec *codec;
- err = snd_hda_codec_new(chip->bus, c, &codec);
+ err = snd_hda_codec_new(bus, bus->card, c, &codec);
if (err < 0)
continue;
codec->jackpoll_interval = get_jackpoll_interval(chip);
@@ -1900,26 +1907,19 @@ int azx_codec_create(struct azx *chip, const char *model,
}
return 0;
}
-EXPORT_SYMBOL_GPL(azx_codec_create);
+EXPORT_SYMBOL_GPL(azx_probe_codecs);
/* configure each codec instance */
int azx_codec_configure(struct azx *chip)
{
struct hda_codec *codec;
- list_for_each_entry(codec, &chip->bus->codec_list, list) {
+ list_for_each_codec(codec, chip->bus) {
snd_hda_codec_configure(codec);
}
return 0;
}
EXPORT_SYMBOL_GPL(azx_codec_configure);
-/* mixer creation - all stuff is implemented in hda module */
-int azx_mixer_create(struct azx *chip)
-{
- return snd_hda_build_controls(chip->bus);
-}
-EXPORT_SYMBOL_GPL(azx_mixer_create);
-
static bool is_input_stream(struct azx *chip, unsigned char index)
{
@@ -1966,30 +1966,5 @@ int azx_init_stream(struct azx *chip)
}
EXPORT_SYMBOL_GPL(azx_init_stream);
-/*
- * reboot notifier for hang-up problem at power-down
- */
-static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
-{
- struct azx *chip = container_of(nb, struct azx, reboot_notifier);
- snd_hda_bus_reboot_notify(chip->bus);
- azx_stop_chip(chip);
- return NOTIFY_OK;
-}
-
-void azx_notifier_register(struct azx *chip)
-{
- chip->reboot_notifier.notifier_call = azx_halt;
- register_reboot_notifier(&chip->reboot_notifier);
-}
-EXPORT_SYMBOL_GPL(azx_notifier_register);
-
-void azx_notifier_unregister(struct azx *chip)
-{
- if (chip->reboot_notifier.notifier_call)
- unregister_reboot_notifier(&chip->reboot_notifier);
-}
-EXPORT_SYMBOL_GPL(azx_notifier_unregister);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Common HDA driver functions");
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index c90d10fd4d8f..0efdb094d21c 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -15,10 +15,396 @@
#ifndef __SOUND_HDA_CONTROLLER_H
#define __SOUND_HDA_CONTROLLER_H
+#include <linux/timecounter.h>
+#include <linux/interrupt.h>
#include <sound/core.h>
+#include <sound/pcm.h>
#include <sound/initval.h>
#include "hda_codec.h"
-#include "hda_priv.h"
+
+/*
+ * registers
+ */
+#define AZX_REG_GCAP 0x00
+#define AZX_GCAP_64OK (1 << 0) /* 64bit address support */
+#define AZX_GCAP_NSDO (3 << 1) /* # of serial data out signals */
+#define AZX_GCAP_BSS (31 << 3) /* # of bidirectional streams */
+#define AZX_GCAP_ISS (15 << 8) /* # of input streams */
+#define AZX_GCAP_OSS (15 << 12) /* # of output streams */
+#define AZX_REG_VMIN 0x02
+#define AZX_REG_VMAJ 0x03
+#define AZX_REG_OUTPAY 0x04
+#define AZX_REG_INPAY 0x06
+#define AZX_REG_GCTL 0x08
+#define AZX_GCTL_RESET (1 << 0) /* controller reset */
+#define AZX_GCTL_FCNTRL (1 << 1) /* flush control */
+#define AZX_GCTL_UNSOL (1 << 8) /* accept unsol. response enable */
+#define AZX_REG_WAKEEN 0x0c
+#define AZX_REG_STATESTS 0x0e
+#define AZX_REG_GSTS 0x10
+#define AZX_GSTS_FSTS (1 << 1) /* flush status */
+#define AZX_REG_INTCTL 0x20
+#define AZX_REG_INTSTS 0x24
+#define AZX_REG_WALLCLK 0x30 /* 24Mhz source */
+#define AZX_REG_OLD_SSYNC 0x34 /* SSYNC for old ICH */
+#define AZX_REG_SSYNC 0x38
+#define AZX_REG_CORBLBASE 0x40
+#define AZX_REG_CORBUBASE 0x44
+#define AZX_REG_CORBWP 0x48
+#define AZX_REG_CORBRP 0x4a
+#define AZX_CORBRP_RST (1 << 15) /* read pointer reset */
+#define AZX_REG_CORBCTL 0x4c
+#define AZX_CORBCTL_RUN (1 << 1) /* enable DMA */
+#define AZX_CORBCTL_CMEIE (1 << 0) /* enable memory error irq */
+#define AZX_REG_CORBSTS 0x4d
+#define AZX_CORBSTS_CMEI (1 << 0) /* memory error indication */
+#define AZX_REG_CORBSIZE 0x4e
+
+#define AZX_REG_RIRBLBASE 0x50
+#define AZX_REG_RIRBUBASE 0x54
+#define AZX_REG_RIRBWP 0x58
+#define AZX_RIRBWP_RST (1 << 15) /* write pointer reset */
+#define AZX_REG_RINTCNT 0x5a
+#define AZX_REG_RIRBCTL 0x5c
+#define AZX_RBCTL_IRQ_EN (1 << 0) /* enable IRQ */
+#define AZX_RBCTL_DMA_EN (1 << 1) /* enable DMA */
+#define AZX_RBCTL_OVERRUN_EN (1 << 2) /* enable overrun irq */
+#define AZX_REG_RIRBSTS 0x5d
+#define AZX_RBSTS_IRQ (1 << 0) /* response irq */
+#define AZX_RBSTS_OVERRUN (1 << 2) /* overrun irq */
+#define AZX_REG_RIRBSIZE 0x5e
+
+#define AZX_REG_IC 0x60
+#define AZX_REG_IR 0x64
+#define AZX_REG_IRS 0x68
+#define AZX_IRS_VALID (1<<1)
+#define AZX_IRS_BUSY (1<<0)
+
+#define AZX_REG_DPLBASE 0x70
+#define AZX_REG_DPUBASE 0x74
+#define AZX_DPLBASE_ENABLE 0x1 /* Enable position buffer */
+
+/* SD offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
+enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
+
+/* stream register offsets from stream base */
+#define AZX_REG_SD_CTL 0x00
+#define AZX_REG_SD_STS 0x03
+#define AZX_REG_SD_LPIB 0x04
+#define AZX_REG_SD_CBL 0x08
+#define AZX_REG_SD_LVI 0x0c
+#define AZX_REG_SD_FIFOW 0x0e
+#define AZX_REG_SD_FIFOSIZE 0x10
+#define AZX_REG_SD_FORMAT 0x12
+#define AZX_REG_SD_BDLPL 0x18
+#define AZX_REG_SD_BDLPU 0x1c
+
+/* PCI space */
+#define AZX_PCIREG_TCSEL 0x44
+
+/*
+ * other constants
+ */
+
+/* max number of fragments - we may use more if allocating more pages for BDL */
+#define BDL_SIZE 4096
+#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16)
+#define AZX_MAX_FRAG 32
+/* max buffer size - no h/w limit, you can increase as you like */
+#define AZX_MAX_BUF_SIZE (1024*1024*1024)
+
+/* RIRB int mask: overrun[2], response[0] */
+#define RIRB_INT_RESPONSE 0x01
+#define RIRB_INT_OVERRUN 0x04
+#define RIRB_INT_MASK 0x05
+
+/* STATESTS int mask: S3,SD2,SD1,SD0 */
+#define AZX_MAX_CODECS 8
+#define AZX_DEFAULT_CODECS 4
+#define STATESTS_INT_MASK ((1 << AZX_MAX_CODECS) - 1)
+
+/* SD_CTL bits */
+#define SD_CTL_STREAM_RESET 0x01 /* stream reset bit */
+#define SD_CTL_DMA_START 0x02 /* stream DMA start bit */
+#define SD_CTL_STRIPE (3 << 16) /* stripe control */
+#define SD_CTL_TRAFFIC_PRIO (1 << 18) /* traffic priority */
+#define SD_CTL_DIR (1 << 19) /* bi-directional stream */
+#define SD_CTL_STREAM_TAG_MASK (0xf << 20)
+#define SD_CTL_STREAM_TAG_SHIFT 20
+
+/* SD_CTL and SD_STS */
+#define SD_INT_DESC_ERR 0x10 /* descriptor error interrupt */
+#define SD_INT_FIFO_ERR 0x08 /* FIFO error interrupt */
+#define SD_INT_COMPLETE 0x04 /* completion interrupt */
+#define SD_INT_MASK (SD_INT_DESC_ERR|SD_INT_FIFO_ERR|\
+ SD_INT_COMPLETE)
+
+/* SD_STS */
+#define SD_STS_FIFO_READY 0x20 /* FIFO ready */
+
+/* INTCTL and INTSTS */
+#define AZX_INT_ALL_STREAM 0xff /* all stream interrupts */
+#define AZX_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */
+#define AZX_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */
+
+/* below are so far hardcoded - should read registers in future */
+#define AZX_MAX_CORB_ENTRIES 256
+#define AZX_MAX_RIRB_ENTRIES 256
+
+/* driver quirks (capabilities) */
+/* bits 0-7 are used for indicating driver type */
+#define AZX_DCAPS_NO_TCSEL (1 << 8) /* No Intel TCSEL bit */
+#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
+#define AZX_DCAPS_SNOOP_MASK (3 << 10) /* snoop type mask */
+#define AZX_DCAPS_SNOOP_OFF (1 << 12) /* snoop default off */
+#define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
+#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
+#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
+#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
+#define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
+#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
+#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
+#define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
+#define AZX_DCAPS_NO_ALIGN_BUFSIZE (1 << 21) /* no buffer size alignment */
+/* 22 unused */
+#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
+#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */
+#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
+#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
+#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
+#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
+#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
+#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
+
+enum {
+ AZX_SNOOP_TYPE_NONE,
+ AZX_SNOOP_TYPE_SCH,
+ AZX_SNOOP_TYPE_ATI,
+ AZX_SNOOP_TYPE_NVIDIA,
+};
+
+/* HD Audio class code */
+#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
+
+struct azx_dev {
+ struct snd_dma_buffer bdl; /* BDL buffer */
+ u32 *posbuf; /* position buffer pointer */
+
+ unsigned int bufsize; /* size of the play buffer in bytes */
+ unsigned int period_bytes; /* size of the period in bytes */
+ unsigned int frags; /* number for period in the play buffer */
+ unsigned int fifo_size; /* FIFO size */
+ unsigned long start_wallclk; /* start + minimum wallclk */
+ unsigned long period_wallclk; /* wallclk for period */
+
+ void __iomem *sd_addr; /* stream descriptor pointer */
+
+ u32 sd_int_sta_mask; /* stream int status mask */
+
+ /* pcm support */
+ struct snd_pcm_substream *substream; /* assigned substream,
+ * set in PCM open
+ */
+ unsigned int format_val; /* format value to be set in the
+ * controller and the codec
+ */
+ unsigned char stream_tag; /* assigned stream */
+ unsigned char index; /* stream index */
+ int assigned_key; /* last device# key assigned to */
+
+ unsigned int opened:1;
+ unsigned int running:1;
+ unsigned int irq_pending:1;
+ unsigned int prepared:1;
+ unsigned int locked:1;
+ /*
+ * For VIA:
+ * A flag to ensure DMA position is 0
+ * when link position is not greater than FIFO size
+ */
+ unsigned int insufficient:1;
+ unsigned int wc_marked:1;
+ unsigned int no_period_wakeup:1;
+
+ struct timecounter azx_tc;
+ struct cyclecounter azx_cc;
+
+ int delay_negative_threshold;
+
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+ /* Allows dsp load to have sole access to the playback stream. */
+ struct mutex dsp_mutex;
+#endif
+};
+
+/* CORB/RIRB */
+struct azx_rb {
+ u32 *buf; /* CORB/RIRB buffer
+ * Each CORB entry is 4byte, RIRB is 8byte
+ */
+ dma_addr_t addr; /* physical address of CORB/RIRB buffer */
+ /* for RIRB */
+ unsigned short rp, wp; /* read/write pointers */
+ int cmds[AZX_MAX_CODECS]; /* number of pending requests */
+ u32 res[AZX_MAX_CODECS]; /* last read value */
+};
+
+struct azx;
+
+/* Functions to read/write to hda registers. */
+struct hda_controller_ops {
+ /* Register Access */
+ void (*reg_writel)(u32 value, u32 __iomem *addr);
+ u32 (*reg_readl)(u32 __iomem *addr);
+ void (*reg_writew)(u16 value, u16 __iomem *addr);
+ u16 (*reg_readw)(u16 __iomem *addr);
+ void (*reg_writeb)(u8 value, u8 __iomem *addr);
+ u8 (*reg_readb)(u8 __iomem *addr);
+ /* Disable msi if supported, PCI only */
+ int (*disable_msi_reset_irq)(struct azx *);
+ /* Allocation ops */
+ int (*dma_alloc_pages)(struct azx *chip,
+ int type,
+ size_t size,
+ struct snd_dma_buffer *buf);
+ void (*dma_free_pages)(struct azx *chip, struct snd_dma_buffer *buf);
+ int (*substream_alloc_pages)(struct azx *chip,
+ struct snd_pcm_substream *substream,
+ size_t size);
+ int (*substream_free_pages)(struct azx *chip,
+ struct snd_pcm_substream *substream);
+ void (*pcm_mmap_prepare)(struct snd_pcm_substream *substream,
+ struct vm_area_struct *area);
+ /* Check if current position is acceptable */
+ int (*position_check)(struct azx *chip, struct azx_dev *azx_dev);
+};
+
+struct azx_pcm {
+ struct azx *chip;
+ struct snd_pcm *pcm;
+ struct hda_codec *codec;
+ struct hda_pcm *info;
+ struct list_head list;
+};
+
+typedef unsigned int (*azx_get_pos_callback_t)(struct azx *, struct azx_dev *);
+typedef int (*azx_get_delay_callback_t)(struct azx *, struct azx_dev *, unsigned int pos);
+
+struct azx {
+ struct snd_card *card;
+ struct pci_dev *pci;
+ int dev_index;
+
+ /* chip type specific */
+ int driver_type;
+ unsigned int driver_caps;
+ int playback_streams;
+ int playback_index_offset;
+ int capture_streams;
+ int capture_index_offset;
+ int num_streams;
+ const int *jackpoll_ms; /* per-card jack poll interval */
+
+ /* Register interaction. */
+ const struct hda_controller_ops *ops;
+
+ /* position adjustment callbacks */
+ azx_get_pos_callback_t get_position[2];
+ azx_get_delay_callback_t get_delay[2];
+
+ /* pci resources */
+ unsigned long addr;
+ void __iomem *remap_addr;
+ int irq;
+
+ /* locks */
+ spinlock_t reg_lock;
+ struct mutex open_mutex; /* Prevents concurrent open/close operations */
+
+ /* streams (x num_streams) */
+ struct azx_dev *azx_dev;
+
+ /* PCM */
+ struct list_head pcm_list; /* azx_pcm list */
+
+ /* HD codec */
+ unsigned short codec_mask;
+ int codec_probe_mask; /* copied from probe_mask option */
+ struct hda_bus *bus;
+ unsigned int beep_mode;
+
+ /* CORB/RIRB */
+ struct azx_rb corb;
+ struct azx_rb rirb;
+
+ /* CORB/RIRB and position buffers */
+ struct snd_dma_buffer rb;
+ struct snd_dma_buffer posbuf;
+
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+ const struct firmware *fw;
+#endif
+
+ /* flags */
+ const int *bdl_pos_adj;
+ int poll_count;
+ unsigned int running:1;
+ unsigned int initialized:1;
+ unsigned int single_cmd:1;
+ unsigned int polling_mode:1;
+ unsigned int msi:1;
+ unsigned int probing:1; /* codec probing phase */
+ unsigned int snoop:1;
+ unsigned int align_buffer_size:1;
+ unsigned int region_requested:1;
+ unsigned int disabled:1; /* disabled by VGA-switcher */
+
+ /* for debugging */
+ unsigned int last_cmd[AZX_MAX_CODECS];
+
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+ struct azx_dev saved_azx_dev;
+#endif
+};
+
+#ifdef CONFIG_X86
+#define azx_snoop(chip) ((chip)->snoop)
+#else
+#define azx_snoop(chip) true
+#endif
+
+/*
+ * macros for easy use
+ */
+
+#define azx_writel(chip, reg, value) \
+ ((chip)->ops->reg_writel(value, (chip)->remap_addr + AZX_REG_##reg))
+#define azx_readl(chip, reg) \
+ ((chip)->ops->reg_readl((chip)->remap_addr + AZX_REG_##reg))
+#define azx_writew(chip, reg, value) \
+ ((chip)->ops->reg_writew(value, (chip)->remap_addr + AZX_REG_##reg))
+#define azx_readw(chip, reg) \
+ ((chip)->ops->reg_readw((chip)->remap_addr + AZX_REG_##reg))
+#define azx_writeb(chip, reg, value) \
+ ((chip)->ops->reg_writeb(value, (chip)->remap_addr + AZX_REG_##reg))
+#define azx_readb(chip, reg) \
+ ((chip)->ops->reg_readb((chip)->remap_addr + AZX_REG_##reg))
+
+#define azx_sd_writel(chip, dev, reg, value) \
+ ((chip)->ops->reg_writel(value, (dev)->sd_addr + AZX_REG_##reg))
+#define azx_sd_readl(chip, dev, reg) \
+ ((chip)->ops->reg_readl((dev)->sd_addr + AZX_REG_##reg))
+#define azx_sd_writew(chip, dev, reg, value) \
+ ((chip)->ops->reg_writew(value, (dev)->sd_addr + AZX_REG_##reg))
+#define azx_sd_readw(chip, dev, reg) \
+ ((chip)->ops->reg_readw((dev)->sd_addr + AZX_REG_##reg))
+#define azx_sd_writeb(chip, dev, reg, value) \
+ ((chip)->ops->reg_writeb(value, (dev)->sd_addr + AZX_REG_##reg))
+#define azx_sd_readb(chip, dev, reg) \
+ ((chip)->ops->reg_readb((dev)->sd_addr + AZX_REG_##reg))
+
+#define azx_has_pm_runtime(chip) \
+ ((chip)->driver_caps & AZX_DCAPS_PM_RUNTIME)
/* PCM setup */
static inline struct azx_dev *get_azx_dev(struct snd_pcm_substream *substream)
@@ -43,14 +429,9 @@ void azx_enter_link_reset(struct azx *chip);
irqreturn_t azx_interrupt(int irq, void *dev_id);
/* Codec interface */
-int azx_codec_create(struct azx *chip, const char *model,
- unsigned int max_slots,
- int *power_save_to);
+int azx_bus_create(struct azx *chip, const char *model);
+int azx_probe_codecs(struct azx *chip, unsigned int max_slots);
int azx_codec_configure(struct azx *chip);
-int azx_mixer_create(struct azx *chip);
int azx_init_stream(struct azx *chip);
-void azx_notifier_register(struct azx *chip);
-void azx_notifier_unregister(struct azx *chip);
-
#endif /* __SOUND_HDA_CONTROLLER_H */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8ec5289f8e05..788f969b1a68 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -140,6 +140,9 @@ static void parse_user_hints(struct hda_codec *codec)
val = snd_hda_get_bool_hint(codec, "single_adc_amp");
if (val >= 0)
codec->single_adc_amp = !!val;
+ val = snd_hda_get_bool_hint(codec, "power_save_node");
+ if (val >= 0)
+ codec->power_save_node = !!val;
val = snd_hda_get_bool_hint(codec, "auto_mute");
if (val >= 0)
@@ -648,12 +651,24 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
unsigned int dir, unsigned int idx)
{
struct hda_gen_spec *spec = codec->spec;
+ int type = get_wcaps_type(get_wcaps(codec, nid));
int i, n;
+ if (nid == codec->core.afg)
+ return true;
+
for (n = 0; n < spec->paths.used; n++) {
struct nid_path *path = snd_array_elem(&spec->paths, n);
if (!path->active)
continue;
+ if (codec->power_save_node) {
+ if (!path->stream_enabled)
+ continue;
+ /* ignore unplugged paths except for DAC/ADC */
+ if (!(path->pin_enabled || path->pin_fixed) &&
+ type != AC_WID_AUD_OUT && type != AC_WID_AUD_IN)
+ continue;
+ }
for (i = 0; i < path->depth; i++) {
if (path->path[i] == nid) {
if (dir == HDA_OUTPUT || path->idx[i] == idx)
@@ -807,6 +822,44 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
}
}
+/* sync power of each widget in the the given path */
+static hda_nid_t path_power_update(struct hda_codec *codec,
+ struct nid_path *path,
+ bool allow_powerdown)
+{
+ hda_nid_t nid, changed = 0;
+ int i, state;
+
+ for (i = 0; i < path->depth; i++) {
+ nid = path->path[i];
+ if (!(get_wcaps(codec, nid) & AC_WCAP_POWER))
+ continue;
+ if (nid == codec->core.afg)
+ continue;
+ if (!allow_powerdown || is_active_nid_for_any(codec, nid))
+ state = AC_PWRST_D0;
+ else
+ state = AC_PWRST_D3;
+ if (!snd_hda_check_power_state(codec, nid, state)) {
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_POWER_STATE, state);
+ changed = nid;
+ if (state == AC_PWRST_D0)
+ snd_hdac_regmap_sync_node(&codec->core, nid);
+ }
+ }
+ return changed;
+}
+
+/* do sync with the last power state change */
+static void sync_power_state_change(struct hda_codec *codec, hda_nid_t nid)
+{
+ if (nid) {
+ msleep(10);
+ snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_POWER_STATE, 0);
+ }
+}
+
/**
* snd_hda_activate_path - activate or deactivate the given path
* @codec: the HDA codec
@@ -825,15 +878,13 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
if (!enable)
path->active = false;
+ /* make sure the widget is powered up */
+ if (enable && (spec->power_down_unused || codec->power_save_node))
+ path_power_update(codec, path, codec->power_save_node);
+
for (i = path->depth - 1; i >= 0; i--) {
hda_nid_t nid = path->path[i];
- if (enable && spec->power_down_unused) {
- /* make sure the widget is powered up */
- if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D0))
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_POWER_STATE,
- AC_PWRST_D0);
- }
+
if (enable && path->multi[i])
snd_hda_codec_update_cache(codec, nid, 0,
AC_VERB_SET_CONNECT_SEL,
@@ -853,28 +904,10 @@ EXPORT_SYMBOL_GPL(snd_hda_activate_path);
static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path)
{
struct hda_gen_spec *spec = codec->spec;
- bool changed = false;
- int i;
- if (!spec->power_down_unused || path->active)
+ if (!(spec->power_down_unused || codec->power_save_node) || path->active)
return;
-
- for (i = 0; i < path->depth; i++) {
- hda_nid_t nid = path->path[i];
- if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3) &&
- !is_active_nid_for_any(codec, nid)) {
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_POWER_STATE,
- AC_PWRST_D3);
- changed = true;
- }
- }
-
- if (changed) {
- msleep(10);
- snd_hda_codec_read(codec, path->path[0], 0,
- AC_VERB_GET_POWER_STATE, 0);
- }
+ sync_power_state_change(codec, path_power_update(codec, path, true));
}
/* turn on/off EAPD on the given pin */
@@ -1574,6 +1607,7 @@ static int check_aamix_out_path(struct hda_codec *codec, int path_idx)
return 0;
/* print_nid_path(codec, "output-aamix", path); */
path->active = false; /* unused as default */
+ path->pin_fixed = true; /* static route */
return snd_hda_get_path_idx(codec, path);
}
@@ -1863,12 +1897,11 @@ static void debug_show_configs(struct hda_codec *codec,
static void fill_all_dac_nids(struct hda_codec *codec)
{
struct hda_gen_spec *spec = codec->spec;
- int i;
- hda_nid_t nid = codec->start_nid;
+ hda_nid_t nid;
spec->num_all_dacs = 0;
memset(spec->all_dacs, 0, sizeof(spec->all_dacs));
- for (i = 0; i < codec->num_nodes; i++, nid++) {
+ for_each_hda_codec_node(nid, codec) {
if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_AUD_OUT)
continue;
if (spec->num_all_dacs >= ARRAY_SIZE(spec->all_dacs)) {
@@ -2998,6 +3031,7 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
}
path->active = true;
+ path->stream_enabled = true; /* no DAC/ADC involved */
err = add_loopback_list(spec, mix_nid, idx);
if (err < 0)
return err;
@@ -3009,6 +3043,8 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
if (path) {
print_nid_path(codec, "loopback-merge", path);
path->active = true;
+ path->pin_fixed = true; /* static route */
+ path->stream_enabled = true; /* no DAC/ADC involved */
spec->loopback_merge_path =
snd_hda_get_path_idx(codec, path);
}
@@ -3030,10 +3066,9 @@ static int fill_adc_nids(struct hda_codec *codec)
hda_nid_t nid;
hda_nid_t *adc_nids = spec->adc_nids;
int max_nums = ARRAY_SIZE(spec->adc_nids);
- int i, nums = 0;
+ int nums = 0;
- nid = codec->start_nid;
- for (i = 0; i < codec->num_nodes; i++, nid++) {
+ for_each_hda_codec_node(nid, codec) {
unsigned int caps = get_wcaps(codec, nid);
int type = get_wcaps_type(caps);
@@ -3224,7 +3259,8 @@ static int create_input_ctls(struct hda_codec *codec)
val = PIN_IN;
if (cfg->inputs[i].type == AUTO_PIN_MIC)
val |= snd_hda_get_default_vref(codec, pin);
- if (pin != spec->hp_mic_pin)
+ if (pin != spec->hp_mic_pin &&
+ !snd_hda_codec_get_pin_target(codec, pin))
set_pin_target(codec, pin, val, false);
if (mixer) {
@@ -3346,11 +3382,6 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
imux = &spec->input_mux;
adc_idx = kcontrol->id.index;
mutex_lock(&codec->control_mutex);
- /* we use the cache-only update at first since multiple input paths
- * may shared the same amp; by updating only caches, the redundant
- * writes to hardware can be reduced.
- */
- codec->cached_write = 1;
for (i = 0; i < imux->num_items; i++) {
path = get_input_path(codec, adc_idx, i);
if (!path || !path->ctls[type])
@@ -3358,12 +3389,9 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
kcontrol->private_value = path->ctls[type];
err = func(kcontrol, ucontrol);
if (err < 0)
- goto error;
+ break;
}
- error:
- codec->cached_write = 0;
mutex_unlock(&codec->control_mutex);
- snd_hda_codec_flush_cache(codec); /* flush the updates */
if (err >= 0 && spec->cap_sync_hook)
spec->cap_sync_hook(codec, kcontrol, ucontrol);
return err;
@@ -3810,6 +3838,7 @@ static void parse_digital(struct hda_codec *codec)
continue;
print_nid_path(codec, "digout", path);
path->active = true;
+ path->pin_fixed = true; /* no jack detection */
spec->digout_paths[i] = snd_hda_get_path_idx(codec, path);
set_pin_target(codec, pin, PIN_OUT, false);
if (!nums) {
@@ -3826,8 +3855,7 @@ static void parse_digital(struct hda_codec *codec)
if (spec->autocfg.dig_in_pin) {
pin = spec->autocfg.dig_in_pin;
- dig_nid = codec->start_nid;
- for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
+ for_each_hda_codec_node(dig_nid, codec) {
unsigned int wcaps = get_wcaps(codec, dig_nid);
if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
continue;
@@ -3837,6 +3865,7 @@ static void parse_digital(struct hda_codec *codec)
if (path) {
print_nid_path(codec, "digin", path);
path->active = true;
+ path->pin_fixed = true; /* no jack */
spec->dig_in_nid = dig_nid;
spec->digin_path = snd_hda_get_path_idx(codec, path);
set_pin_target(codec, pin, PIN_IN, false);
@@ -3896,6 +3925,238 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx,
return 1;
}
+/* power up/down widgets in the all paths that match with the given NID
+ * as terminals (either start- or endpoint)
+ *
+ * returns the last changed NID, or zero if unchanged.
+ */
+static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
+ int pin_state, int stream_state)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ hda_nid_t last, changed = 0;
+ struct nid_path *path;
+ int n;
+
+ for (n = 0; n < spec->paths.used; n++) {
+ path = snd_array_elem(&spec->paths, n);
+ if (path->path[0] == nid ||
+ path->path[path->depth - 1] == nid) {
+ bool pin_old = path->pin_enabled;
+ bool stream_old = path->stream_enabled;
+
+ if (pin_state >= 0)
+ path->pin_enabled = pin_state;
+ if (stream_state >= 0)
+ path->stream_enabled = stream_state;
+ if ((!path->pin_fixed && path->pin_enabled != pin_old)
+ || path->stream_enabled != stream_old) {
+ last = path_power_update(codec, path, true);
+ if (last)
+ changed = last;
+ }
+ }
+ }
+ return changed;
+}
+
+/* check the jack status for power control */
+static bool detect_pin_state(struct hda_codec *codec, hda_nid_t pin)
+{
+ if (!is_jack_detectable(codec, pin))
+ return true;
+ return snd_hda_jack_detect_state(codec, pin) != HDA_JACK_NOT_PRESENT;
+}
+
+/* power up/down the paths of the given pin according to the jack state;
+ * power = 0/1 : only power up/down if it matches with the jack state,
+ * < 0 : force power up/down to follow the jack sate
+ *
+ * returns the last changed NID, or zero if unchanged.
+ */
+static hda_nid_t set_pin_power_jack(struct hda_codec *codec, hda_nid_t pin,
+ int power)
+{
+ bool on;
+
+ if (!codec->power_save_node)
+ return 0;
+
+ on = detect_pin_state(codec, pin);
+
+ if (power >= 0 && on != power)
+ return 0;
+ return set_path_power(codec, pin, on, -1);
+}
+
+static void pin_power_callback(struct hda_codec *codec,
+ struct hda_jack_callback *jack,
+ bool on)
+{
+ if (jack && jack->tbl->nid)
+ sync_power_state_change(codec,
+ set_pin_power_jack(codec, jack->tbl->nid, on));
+}
+
+/* callback only doing power up -- called at first */
+static void pin_power_up_callback(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ pin_power_callback(codec, jack, true);
+}
+
+/* callback only doing power down -- called at last */
+static void pin_power_down_callback(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ pin_power_callback(codec, jack, false);
+}
+
+/* set up the power up/down callbacks */
+static void add_pin_power_ctls(struct hda_codec *codec, int num_pins,
+ const hda_nid_t *pins, bool on)
+{
+ int i;
+ hda_jack_callback_fn cb =
+ on ? pin_power_up_callback : pin_power_down_callback;
+
+ for (i = 0; i < num_pins && pins[i]; i++) {
+ if (is_jack_detectable(codec, pins[i]))
+ snd_hda_jack_detect_enable_callback(codec, pins[i], cb);
+ else
+ set_path_power(codec, pins[i], true, -1);
+ }
+}
+
+/* enabled power callback to each available I/O pin with jack detections;
+ * the digital I/O pins are excluded because of the unreliable detectsion
+ */
+static void add_all_pin_power_ctls(struct hda_codec *codec, bool on)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i;
+
+ if (!codec->power_save_node)
+ return;
+ add_pin_power_ctls(codec, cfg->line_outs, cfg->line_out_pins, on);
+ if (cfg->line_out_type != AUTO_PIN_HP_OUT)
+ add_pin_power_ctls(codec, cfg->hp_outs, cfg->hp_pins, on);
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
+ add_pin_power_ctls(codec, cfg->speaker_outs, cfg->speaker_pins, on);
+ for (i = 0; i < cfg->num_inputs; i++)
+ add_pin_power_ctls(codec, 1, &cfg->inputs[i].pin, on);
+}
+
+/* sync path power up/down with the jack states of given pins */
+static void sync_pin_power_ctls(struct hda_codec *codec, int num_pins,
+ const hda_nid_t *pins)
+{
+ int i;
+
+ for (i = 0; i < num_pins && pins[i]; i++)
+ if (is_jack_detectable(codec, pins[i]))
+ set_pin_power_jack(codec, pins[i], -1);
+}
+
+/* sync path power up/down with pins; called at init and resume */
+static void sync_all_pin_power_ctls(struct hda_codec *codec)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i;
+
+ if (!codec->power_save_node)
+ return;
+ sync_pin_power_ctls(codec, cfg->line_outs, cfg->line_out_pins);
+ if (cfg->line_out_type != AUTO_PIN_HP_OUT)
+ sync_pin_power_ctls(codec, cfg->hp_outs, cfg->hp_pins);
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
+ sync_pin_power_ctls(codec, cfg->speaker_outs, cfg->speaker_pins);
+ for (i = 0; i < cfg->num_inputs; i++)
+ sync_pin_power_ctls(codec, 1, &cfg->inputs[i].pin);
+}
+
+/* add fake paths if not present yet */
+static int add_fake_paths(struct hda_codec *codec, hda_nid_t nid,
+ int num_pins, const hda_nid_t *pins)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ struct nid_path *path;
+ int i;
+
+ for (i = 0; i < num_pins; i++) {
+ if (!pins[i])
+ break;
+ if (get_nid_path(codec, nid, pins[i], 0))
+ continue;
+ path = snd_array_new(&spec->paths);
+ if (!path)
+ return -ENOMEM;
+ memset(path, 0, sizeof(*path));
+ path->depth = 2;
+ path->path[0] = nid;
+ path->path[1] = pins[i];
+ path->active = true;
+ }
+ return 0;
+}
+
+/* create fake paths to all outputs from beep */
+static int add_fake_beep_paths(struct hda_codec *codec)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ hda_nid_t nid = spec->beep_nid;
+ int err;
+
+ if (!codec->power_save_node || !nid)
+ return 0;
+ err = add_fake_paths(codec, nid, cfg->line_outs, cfg->line_out_pins);
+ if (err < 0)
+ return err;
+ if (cfg->line_out_type != AUTO_PIN_HP_OUT) {
+ err = add_fake_paths(codec, nid, cfg->hp_outs, cfg->hp_pins);
+ if (err < 0)
+ return err;
+ }
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ err = add_fake_paths(codec, nid, cfg->speaker_outs,
+ cfg->speaker_pins);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+/* power up/down beep widget and its output paths */
+static void beep_power_hook(struct hda_beep *beep, bool on)
+{
+ set_path_power(beep->codec, beep->nid, -1, on);
+}
+
+/**
+ * snd_hda_gen_fix_pin_power - Fix the power of the given pin widget to D0
+ * @codec: the HDA codec
+ * @pin: NID of pin to fix
+ */
+int snd_hda_gen_fix_pin_power(struct hda_codec *codec, hda_nid_t pin)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ struct nid_path *path;
+
+ path = snd_array_new(&spec->paths);
+ if (!path)
+ return -ENOMEM;
+ memset(path, 0, sizeof(*path));
+ path->depth = 1;
+ path->path[0] = pin;
+ path->active = true;
+ path->pin_fixed = true;
+ path->stream_enabled = true;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_fix_pin_power);
/*
* Jack detections for HP auto-mute and mic-switch
@@ -3933,6 +4194,10 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
if (!nid)
break;
+ oldval = snd_hda_codec_get_pin_target(codec, nid);
+ if (oldval & PIN_IN)
+ continue; /* no mute for inputs */
+
if (spec->auto_mute_via_amp) {
struct nid_path *path;
hda_nid_t mute_nid;
@@ -3947,29 +4212,32 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
spec->mute_bits |= (1ULL << mute_nid);
else
spec->mute_bits &= ~(1ULL << mute_nid);
- set_pin_eapd(codec, nid, !mute);
continue;
+ } else {
+ /* don't reset VREF value in case it's controlling
+ * the amp (see alc861_fixup_asus_amp_vref_0f())
+ */
+ if (spec->keep_vref_in_automute)
+ val = oldval & ~PIN_HP;
+ else
+ val = 0;
+ if (!mute)
+ val |= oldval;
+ /* here we call update_pin_ctl() so that the pinctl is
+ * changed without changing the pinctl target value;
+ * the original target value will be still referred at
+ * the init / resume again
+ */
+ update_pin_ctl(codec, nid, val);
}
- oldval = snd_hda_codec_get_pin_target(codec, nid);
- if (oldval & PIN_IN)
- continue; /* no mute for inputs */
- /* don't reset VREF value in case it's controlling
- * the amp (see alc861_fixup_asus_amp_vref_0f())
- */
- if (spec->keep_vref_in_automute)
- val = oldval & ~PIN_HP;
- else
- val = 0;
- if (!mute)
- val |= oldval;
- /* here we call update_pin_ctl() so that the pinctl is changed
- * without changing the pinctl target value;
- * the original target value will be still referred at the
- * init / resume again
- */
- update_pin_ctl(codec, nid, val);
set_pin_eapd(codec, nid, !mute);
+ if (codec->power_save_node) {
+ bool on = !mute;
+ if (on)
+ on = detect_pin_state(codec, nid);
+ set_path_power(codec, nid, on, -1);
+ }
}
}
@@ -4436,7 +4704,11 @@ unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
hda_nid_t nid,
unsigned int power_state)
{
- if (power_state != AC_PWRST_D0 || nid == codec->afg)
+ struct hda_gen_spec *spec = codec->spec;
+
+ if (!spec->power_down_unused && !codec->power_save_node)
+ return power_state;
+ if (power_state != AC_PWRST_D0 || nid == codec->core.afg)
return power_state;
if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER)
return power_state;
@@ -4466,6 +4738,21 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
}
/**
+ * snd_hda_gen_stream_pm - Stream power management callback
+ * @codec: the HDA codec
+ * @nid: audio widget
+ * @on: power on/off flag
+ *
+ * Set this in patch_ops.stream_pm. Only valid with power_save_node flag.
+ */
+void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on)
+{
+ if (codec->power_save_node)
+ set_path_power(codec, nid, -1, on);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_stream_pm);
+
+/**
* snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and
* set up the hda_gen_spec
* @codec: the HDA codec
@@ -4549,6 +4836,9 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
if (err < 0)
return err;
+ /* add power-down pin callbacks at first */
+ add_all_pin_power_ctls(codec, false);
+
spec->const_channel_count = spec->ext_channel_count;
/* check the multiple speaker and headphone pins */
if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
@@ -4618,6 +4908,9 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
}
}
+ /* add power-up pin callbacks at last */
+ add_all_pin_power_ctls(codec, true);
+
/* mute all aamix input initially */
if (spec->mixer_nid)
mute_all_mixer_nid(codec, spec->mixer_nid);
@@ -4625,13 +4918,20 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
dig_only:
parse_digital(codec);
- if (spec->power_down_unused)
- codec->power_filter = snd_hda_gen_path_power_filter;
+ if (spec->power_down_unused || codec->power_save_node)
+ if (!codec->power_filter)
+ codec->power_filter = snd_hda_gen_path_power_filter;
if (!spec->no_analog && spec->beep_nid) {
err = snd_hda_attach_beep_device(codec, spec->beep_nid);
if (err < 0)
return err;
+ if (codec->beep && codec->power_save_node) {
+ err = add_fake_beep_paths(codec);
+ if (err < 0)
+ return err;
+ codec->beep->power_hook = beep_power_hook;
+ }
}
return 1;
@@ -4675,7 +4975,7 @@ int snd_hda_gen_build_controls(struct hda_codec *codec)
err = snd_hda_create_dig_out_ctls(codec,
spec->multiout.dig_out_nid,
spec->multiout.dig_out_nid,
- spec->pcm_rec[1].pcm_type);
+ spec->pcm_rec[1]->pcm_type);
if (err < 0)
return err;
if (!spec->no_analog) {
@@ -5137,6 +5437,33 @@ static void fill_pcm_stream_name(char *str, size_t len, const char *sfx,
strlcat(str, sfx, len);
}
+/* copy PCM stream info from @default_str, and override non-NULL entries
+ * from @spec_str and @nid
+ */
+static void setup_pcm_stream(struct hda_pcm_stream *str,
+ const struct hda_pcm_stream *default_str,
+ const struct hda_pcm_stream *spec_str,
+ hda_nid_t nid)
+{
+ *str = *default_str;
+ if (nid)
+ str->nid = nid;
+ if (spec_str) {
+ if (spec_str->substreams)
+ str->substreams = spec_str->substreams;
+ if (spec_str->channels_min)
+ str->channels_min = spec_str->channels_min;
+ if (spec_str->channels_max)
+ str->channels_max = spec_str->channels_max;
+ if (spec_str->rates)
+ str->rates = spec_str->rates;
+ if (spec_str->formats)
+ str->formats = spec_str->formats;
+ if (spec_str->maxbps)
+ str->maxbps = spec_str->maxbps;
+ }
+}
+
/**
* snd_hda_gen_build_pcms - build PCM streams based on the parsed results
* @codec: the HDA codec
@@ -5146,27 +5473,25 @@ static void fill_pcm_stream_name(char *str, size_t len, const char *sfx,
int snd_hda_gen_build_pcms(struct hda_codec *codec)
{
struct hda_gen_spec *spec = codec->spec;
- struct hda_pcm *info = spec->pcm_rec;
- const struct hda_pcm_stream *p;
+ struct hda_pcm *info;
bool have_multi_adcs;
- codec->num_pcms = 1;
- codec->pcm_info = info;
-
if (spec->no_analog)
goto skip_analog;
fill_pcm_stream_name(spec->stream_name_analog,
sizeof(spec->stream_name_analog),
- " Analog", codec->chip_name);
- info->name = spec->stream_name_analog;
+ " Analog", codec->core.chip_name);
+ info = snd_hda_codec_pcm_new(codec, "%s", spec->stream_name_analog);
+ if (!info)
+ return -ENOMEM;
+ spec->pcm_rec[0] = info;
if (spec->multiout.num_dacs > 0) {
- p = spec->stream_analog_playback;
- if (!p)
- p = &pcm_analog_playback;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK] = *p;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dac_nids[0];
+ setup_pcm_stream(&info->stream[SNDRV_PCM_STREAM_PLAYBACK],
+ &pcm_analog_playback,
+ spec->stream_analog_playback,
+ spec->multiout.dac_nids[0]);
info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
spec->multiout.max_channels;
if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT &&
@@ -5175,15 +5500,11 @@ int snd_hda_gen_build_pcms(struct hda_codec *codec)
snd_pcm_2_1_chmaps;
}
if (spec->num_adc_nids) {
- p = spec->stream_analog_capture;
- if (!p) {
- if (spec->dyn_adc_switch)
- p = &dyn_adc_pcm_analog_capture;
- else
- p = &pcm_analog_capture;
- }
- info->stream[SNDRV_PCM_STREAM_CAPTURE] = *p;
- info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0];
+ setup_pcm_stream(&info->stream[SNDRV_PCM_STREAM_CAPTURE],
+ (spec->dyn_adc_switch ?
+ &dyn_adc_pcm_analog_capture : &pcm_analog_capture),
+ spec->stream_analog_capture,
+ spec->adc_nids[0]);
}
skip_analog:
@@ -5191,29 +5512,27 @@ int snd_hda_gen_build_pcms(struct hda_codec *codec)
if (spec->multiout.dig_out_nid || spec->dig_in_nid) {
fill_pcm_stream_name(spec->stream_name_digital,
sizeof(spec->stream_name_digital),
- " Digital", codec->chip_name);
- codec->num_pcms = 2;
+ " Digital", codec->core.chip_name);
+ info = snd_hda_codec_pcm_new(codec, "%s",
+ spec->stream_name_digital);
+ if (!info)
+ return -ENOMEM;
codec->slave_dig_outs = spec->multiout.slave_dig_outs;
- info = spec->pcm_rec + 1;
- info->name = spec->stream_name_digital;
+ spec->pcm_rec[1] = info;
if (spec->dig_out_type)
info->pcm_type = spec->dig_out_type;
else
info->pcm_type = HDA_PCM_TYPE_SPDIF;
- if (spec->multiout.dig_out_nid) {
- p = spec->stream_digital_playback;
- if (!p)
- p = &pcm_digital_playback;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK] = *p;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dig_out_nid;
- }
- if (spec->dig_in_nid) {
- p = spec->stream_digital_capture;
- if (!p)
- p = &pcm_digital_capture;
- info->stream[SNDRV_PCM_STREAM_CAPTURE] = *p;
- info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in_nid;
- }
+ if (spec->multiout.dig_out_nid)
+ setup_pcm_stream(&info->stream[SNDRV_PCM_STREAM_PLAYBACK],
+ &pcm_digital_playback,
+ spec->stream_digital_playback,
+ spec->multiout.dig_out_nid);
+ if (spec->dig_in_nid)
+ setup_pcm_stream(&info->stream[SNDRV_PCM_STREAM_CAPTURE],
+ &pcm_digital_capture,
+ spec->stream_digital_capture,
+ spec->dig_in_nid);
}
if (spec->no_analog)
@@ -5228,35 +5547,30 @@ int snd_hda_gen_build_pcms(struct hda_codec *codec)
if (spec->alt_dac_nid || have_multi_adcs) {
fill_pcm_stream_name(spec->stream_name_alt_analog,
sizeof(spec->stream_name_alt_analog),
- " Alt Analog", codec->chip_name);
- codec->num_pcms = 3;
- info = spec->pcm_rec + 2;
- info->name = spec->stream_name_alt_analog;
- if (spec->alt_dac_nid) {
- p = spec->stream_analog_alt_playback;
- if (!p)
- p = &pcm_analog_alt_playback;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK] = *p;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
- spec->alt_dac_nid;
- } else {
- info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
- pcm_null_stream;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = 0;
- }
+ " Alt Analog", codec->core.chip_name);
+ info = snd_hda_codec_pcm_new(codec, "%s",
+ spec->stream_name_alt_analog);
+ if (!info)
+ return -ENOMEM;
+ spec->pcm_rec[2] = info;
+ if (spec->alt_dac_nid)
+ setup_pcm_stream(&info->stream[SNDRV_PCM_STREAM_PLAYBACK],
+ &pcm_analog_alt_playback,
+ spec->stream_analog_alt_playback,
+ spec->alt_dac_nid);
+ else
+ setup_pcm_stream(&info->stream[SNDRV_PCM_STREAM_PLAYBACK],
+ &pcm_null_stream, NULL, 0);
if (have_multi_adcs) {
- p = spec->stream_analog_alt_capture;
- if (!p)
- p = &pcm_analog_alt_capture;
- info->stream[SNDRV_PCM_STREAM_CAPTURE] = *p;
- info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
- spec->adc_nids[1];
+ setup_pcm_stream(&info->stream[SNDRV_PCM_STREAM_CAPTURE],
+ &pcm_analog_alt_capture,
+ spec->stream_analog_alt_capture,
+ spec->adc_nids[1]);
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams =
spec->num_adc_nids - 1;
} else {
- info->stream[SNDRV_PCM_STREAM_CAPTURE] =
- pcm_null_stream;
- info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = 0;
+ setup_pcm_stream(&info->stream[SNDRV_PCM_STREAM_CAPTURE],
+ &pcm_null_stream, NULL, 0);
}
}
@@ -5452,8 +5766,6 @@ int snd_hda_gen_init(struct hda_codec *codec)
snd_hda_apply_verbs(codec);
- codec->cached_write = 1;
-
init_multi_out(codec);
init_extra_out(codec);
init_multi_io(codec);
@@ -5464,10 +5776,12 @@ int snd_hda_gen_init(struct hda_codec *codec)
clear_unsol_on_unused_pins(codec);
+ sync_all_pin_power_ctls(codec);
+
/* call init functions of standard auto-mute helpers */
update_automute_all(codec);
- snd_hda_codec_flush_cache(codec);
+ regcache_sync(codec->core.regmap);
if (spec->vmaster_mute.sw_kctl && spec->vmaster_mute.hook)
snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
@@ -5524,13 +5838,11 @@ static const struct hda_codec_ops generic_patch_ops = {
#endif
};
-/**
+/*
* snd_hda_parse_generic_codec - Generic codec parser
* @codec: the HDA codec
- *
- * This should be called from the HDA codec core.
*/
-int snd_hda_parse_generic_codec(struct hda_codec *codec)
+static int snd_hda_parse_generic_codec(struct hda_codec *codec)
{
struct hda_gen_spec *spec;
int err;
@@ -5556,7 +5868,17 @@ error:
snd_hda_gen_free(codec);
return err;
}
-EXPORT_SYMBOL_GPL(snd_hda_parse_generic_codec);
+
+static const struct hda_codec_preset snd_hda_preset_generic[] = {
+ { .id = HDA_CODEC_ID_GENERIC, .patch = snd_hda_parse_generic_codec },
+ {} /* terminator */
+};
+
+static struct hda_codec_driver generic_driver = {
+ .preset = snd_hda_preset_generic,
+};
+
+module_hda_codec_driver(generic_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic HD-audio codec parser");
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 3d852660443a..56e4139b9032 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -46,7 +46,10 @@ struct nid_path {
unsigned char idx[MAX_NID_PATH_DEPTH];
unsigned char multi[MAX_NID_PATH_DEPTH];
unsigned int ctls[NID_PATH_NUM_CTLS]; /* NID_PATH_XXX_CTL */
- bool active;
+ bool active:1; /* activated by driver */
+ bool pin_enabled:1; /* pins are enabled */
+ bool pin_fixed:1; /* path with fixed pin */
+ bool stream_enabled:1; /* stream is active */
};
/* mic/line-in auto switching entry */
@@ -144,7 +147,7 @@ struct hda_gen_spec {
int const_channel_count; /* channel count for all */
/* PCM information */
- struct hda_pcm pcm_rec[3]; /* used in build_pcms() */
+ struct hda_pcm *pcm_rec[3]; /* used in build_pcms() */
/* dynamic controls, init_verbs and input_mux */
struct auto_pin_cfg autocfg;
@@ -340,5 +343,7 @@ int snd_hda_gen_check_power_status(struct hda_codec *codec, hda_nid_t nid);
unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
hda_nid_t nid,
unsigned int power_state);
+void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on);
+int snd_hda_gen_fix_pin_power(struct hda_codec *codec, hda_nid_t pin);
#endif /* __SOUND_HDA_GENERIC_H */
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 11b5a42b4ec8..57df06e76968 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -101,7 +101,7 @@ int snd_hda_create_hwdep(struct hda_codec *codec)
int err;
sprintf(hwname, "HDA Codec %d", codec->addr);
- err = snd_hwdep_new(codec->bus->card, hwname, codec->addr, &hwdep);
+ err = snd_hwdep_new(codec->card, hwname, codec->addr, &hwdep);
if (err < 0)
return err;
codec->hwdep = hwdep;
@@ -116,9 +116,6 @@ int snd_hda_create_hwdep(struct hda_codec *codec)
hwdep->ops.ioctl_compat = hda_hwdep_ioctl_compat;
#endif
- /* link to codec */
- hwdep->dev.parent = &codec->dev;
-
/* for sysfs */
hwdep->dev.groups = snd_hda_dev_attr_groups;
dev_set_drvdata(&hwdep->dev, codec);
diff --git a/sound/pci/hda/hda_i915.c b/sound/pci/hda/hda_i915.c
index 714894527e06..3052a2b095f7 100644
--- a/sound/pci/hda/hda_i915.c
+++ b/sound/pci/hda/hda_i915.c
@@ -22,7 +22,7 @@
#include <linux/component.h>
#include <drm/i915_component.h>
#include <sound/core.h>
-#include "hda_priv.h"
+#include "hda_controller.h"
#include "hda_intel.h"
/* Intel HSW/BDW display HDA controller Extended Mode registers.
@@ -55,6 +55,12 @@ void haswell_set_bclk(struct hda_intel *hda)
int cdclk_freq;
unsigned int bclk_m, bclk_n;
struct i915_audio_component *acomp = &hda->audio_component;
+ struct pci_dev *pci = hda->chip.pci;
+
+ /* Only Haswell/Broadwell need set BCLK */
+ if (pci->device != 0x0a0c && pci->device != 0x0c0c
+ && pci->device != 0x0d0c && pci->device != 0x160c)
+ return;
if (!acomp->ops)
return;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index a8a1e14272a1..34040d26c94f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -62,7 +62,6 @@
#include <linux/firmware.h>
#include "hda_codec.h"
#include "hda_controller.h"
-#include "hda_priv.h"
#include "hda_intel.h"
/* position fix mode */
@@ -174,7 +173,6 @@ static struct kernel_param_ops param_ops_xint = {
#define param_check_xint param_check_int
static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
-static int *power_save_addr = &power_save;
module_param(power_save, xint, 0644);
MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
"(in second, 0 = disable).");
@@ -187,7 +185,7 @@ static bool power_save_controller = 1;
module_param(power_save_controller, bool, 0644);
MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode.");
#else
-static int *power_save_addr;
+#define power_save 0
#endif /* CONFIG_PM */
static int align_buffer_size = -1;
@@ -299,8 +297,15 @@ enum {
AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
AZX_DCAPS_SNOOP_TYPE(SCH))
+#define AZX_DCAPS_INTEL_BAYTRAIL \
+ (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_I915_POWERWELL)
+
+#define AZX_DCAPS_INTEL_BRASWELL \
+ (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_I915_POWERWELL)
+
#define AZX_DCAPS_INTEL_SKYLAKE \
- (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG)
+ (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
+ AZX_DCAPS_I915_POWERWELL)
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
@@ -530,10 +535,10 @@ static int azx_position_check(struct azx *chip, struct azx_dev *azx_dev)
if (ok == 1) {
azx_dev->irq_pending = 0;
return ok;
- } else if (ok == 0 && chip->bus && chip->bus->workq) {
+ } else if (ok == 0) {
/* bogus IRQ, process it later */
azx_dev->irq_pending = 1;
- queue_work(chip->bus->workq, &hda->irq_pending_work);
+ schedule_work(&hda->irq_pending_work);
}
return 0;
}
@@ -741,7 +746,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
{
struct hda_intel *hda;
struct azx *chip;
- struct hda_codec *c;
int prev = power_save;
int ret = param_set_int(val, kp);
@@ -753,8 +757,7 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
chip = &hda->chip;
if (!chip->bus || chip->disabled)
continue;
- list_for_each_entry(c, &chip->bus->codec_list, list)
- snd_hda_power_sync(c);
+ snd_hda_set_power_save(chip->bus, power_save * 1000);
}
mutex_unlock(&card_list_lock);
return 0;
@@ -773,7 +776,6 @@ static int azx_suspend(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
struct hda_intel *hda;
- struct azx_pcm *p;
if (!card)
return 0;
@@ -785,10 +787,6 @@ static int azx_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
azx_clear_irq_pending(chip);
- list_for_each_entry(p, &chip->pcm_list, list)
- snd_pcm_suspend_all(p->pcm);
- if (chip->initialized)
- snd_hda_suspend(chip->bus);
azx_stop_chip(chip);
azx_enter_link_reset(chip);
if (chip->irq >= 0) {
@@ -831,7 +829,6 @@ static int azx_resume(struct device *dev)
azx_init_chip(chip, true);
- snd_hda_resume(chip->bus);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
@@ -852,7 +849,7 @@ static int azx_runtime_suspend(struct device *dev)
if (chip->disabled || hda->init_failed)
return 0;
- if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+ if (!azx_has_pm_runtime(chip))
return 0;
/* enable controller wake up event */
@@ -885,7 +882,7 @@ static int azx_runtime_resume(struct device *dev)
if (chip->disabled || hda->init_failed)
return 0;
- if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+ if (!azx_has_pm_runtime(chip))
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
@@ -901,10 +898,10 @@ static int azx_runtime_resume(struct device *dev)
bus = chip->bus;
if (status && bus) {
- list_for_each_entry(codec, &bus->codec_list, list)
+ list_for_each_codec(codec, bus)
if (status & (1 << codec->addr))
- queue_delayed_work(codec->bus->workq,
- &codec->jackpoll_work, codec->jackpoll_interval);
+ schedule_delayed_work(&codec->jackpoll_work,
+ codec->jackpoll_interval);
}
/* disable controller Wake Up event*/
@@ -928,8 +925,8 @@ static int azx_runtime_idle(struct device *dev)
if (chip->disabled || hda->init_failed)
return 0;
- if (!power_save_controller ||
- !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+ if (!power_save_controller || !azx_has_pm_runtime(chip) ||
+ chip->bus->core.codec_powered)
return -EBUSY;
return 0;
@@ -1071,14 +1068,11 @@ static int azx_free(struct azx *chip)
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
int i;
- if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
- && chip->running)
+ if (azx_has_pm_runtime(chip) && chip->running)
pm_runtime_get_noresume(&pci->dev);
azx_del_card_list(chip);
- azx_notifier_unregister(chip);
-
hda->init_failed = 1; /* to be sure */
complete_all(&hda->probe_wait);
@@ -1394,7 +1388,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
hda = kzalloc(sizeof(*hda), GFP_KERNEL);
if (!hda) {
- dev_err(card->dev, "Cannot allocate hda\n");
pci_disable_device(pci);
return -ENOMEM;
}
@@ -1575,10 +1568,8 @@ static int azx_first_init(struct azx *chip)
chip->num_streams = chip->playback_streams + chip->capture_streams;
chip->azx_dev = kcalloc(chip->num_streams, sizeof(*chip->azx_dev),
GFP_KERNEL);
- if (!chip->azx_dev) {
- dev_err(card->dev, "cannot malloc azx_dev\n");
+ if (!chip->azx_dev)
return -ENOMEM;
- }
err = azx_alloc_stream_pages(chip);
if (err < 0)
@@ -1615,19 +1606,6 @@ static int azx_first_init(struct azx *chip)
return 0;
}
-static void power_down_all_codecs(struct azx *chip)
-{
-#ifdef CONFIG_PM
- /* The codecs were powered up in snd_hda_codec_new().
- * Now all initialization done, so turn them down if possible
- */
- struct hda_codec *codec;
- list_for_each_entry(codec, &chip->bus->codec_list, list) {
- snd_hda_power_down(codec);
- }
-#endif
-}
-
#ifdef CONFIG_SND_HDA_PATCH_LOADER
/* callback from request_firmware_nowait() */
static void azx_firmware_cb(const struct firmware *fw, void *context)
@@ -1896,12 +1874,14 @@ static int azx_probe_continue(struct azx *chip)
#endif
/* create codec instances */
- err = azx_codec_create(chip, model[dev],
- azx_max_codecs[chip->driver_type],
- power_save_addr);
+ err = azx_bus_create(chip, model[dev]);
+ if (err < 0)
+ goto out_free;
+ err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
if (err < 0)
goto out_free;
+
#ifdef CONFIG_SND_HDA_PATCH_LOADER
if (chip->fw) {
err = snd_hda_load_patch(chip->bus, chip->fw->size,
@@ -1920,25 +1900,14 @@ static int azx_probe_continue(struct azx *chip)
goto out_free;
}
- /* create PCM streams */
- err = snd_hda_build_pcms(chip->bus);
- if (err < 0)
- goto out_free;
-
- /* create mixer controls */
- err = azx_mixer_create(chip);
- if (err < 0)
- goto out_free;
-
err = snd_card_register(chip->card);
if (err < 0)
goto out_free;
chip->running = 1;
- power_down_all_codecs(chip);
- azx_notifier_register(chip);
azx_add_card_list(chip);
- if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || hda->use_vga_switcheroo)
+ snd_hda_set_power_save(chip->bus, power_save * 1000);
+ if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
pm_runtime_put_noidle(&pci->dev);
out_free:
@@ -1956,6 +1925,18 @@ static void azx_remove(struct pci_dev *pci)
snd_card_free(card);
}
+static void azx_shutdown(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip;
+
+ if (!card)
+ return;
+ chip = card->private_data;
+ if (chip && chip->running)
+ azx_stop_chip(chip);
+}
+
/* PCI IDs */
static const struct pci_device_id azx_ids[] = {
/* CPT */
@@ -2014,10 +1995,10 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
/* BayTrail */
{ PCI_DEVICE(0x8086, 0x0f04),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BAYTRAIL },
/* Braswell */
{ PCI_DEVICE(0x8086, 0x2284),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BRASWELL },
/* ICH6 */
{ PCI_DEVICE(0x8086, 0x2668),
.driver_data = AZX_DRIVER_ICH | AZX_DCAPS_INTEL_ICH },
@@ -2178,6 +2159,7 @@ static struct pci_driver azx_driver = {
.id_table = azx_ids,
.probe = azx_probe,
.remove = azx_remove,
+ .shutdown = azx_shutdown,
.driver = {
.pm = AZX_PM_OPS,
},
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index 348611835476..d5231f7216a7 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -17,7 +17,7 @@
#define __SOUND_HDA_INTEL_H
#include <drm/i915_component.h>
-#include "hda_priv.h"
+#include "hda_controller.h"
struct hda_intel {
struct azx chip;
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index e664307617bd..d7cfe7b8c32b 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -135,7 +135,7 @@ void snd_hda_jack_tbl_clear(struct hda_codec *codec)
#ifdef CONFIG_SND_HDA_INPUT_JACK
/* free jack instances manually when clearing/reconfiguring */
if (!codec->bus->shutdown && jack->jack)
- snd_device_free(codec->bus->card, jack->jack);
+ snd_device_free(codec->card, jack->jack);
#endif
for (cb = jack->callback; cb; cb = next) {
next = cb->next;
@@ -340,7 +340,7 @@ void snd_hda_jack_report_sync(struct hda_codec *codec)
if (!jack->kctl || jack->block_report)
continue;
state = get_jack_plug_state(jack->pin_sense);
- snd_kctl_jack_report(codec->bus->card, jack->kctl, state);
+ snd_kctl_jack_report(codec->card, jack->kctl, state);
#ifdef CONFIG_SND_HDA_INPUT_JACK
if (jack->jack)
snd_jack_report(jack->jack,
@@ -412,11 +412,11 @@ static int __snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
jack->phantom_jack = !!phantom_jack;
state = snd_hda_jack_detect(codec, nid);
- snd_kctl_jack_report(codec->bus->card, kctl, state);
+ snd_kctl_jack_report(codec->card, kctl, state);
#ifdef CONFIG_SND_HDA_INPUT_JACK
if (!phantom_jack) {
jack->type = get_input_jack_type(codec, nid);
- err = snd_jack_new(codec->bus->card, name, jack->type,
+ err = snd_jack_new(codec->card, name, jack->type,
&jack->jack);
if (err < 0)
return err;
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 62658f2f8c9f..3b567f42296b 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -127,18 +127,16 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
#endif
/* lowlevel accessor with caching; use carefully */
-int snd_hda_codec_amp_read(struct hda_codec *codec, hda_nid_t nid, int ch,
- int direction, int index);
-int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, int ch,
- int direction, int idx, int mask, int val);
+#define snd_hda_codec_amp_read(codec, nid, ch, dir, idx) \
+ snd_hdac_regmap_get_amp(&(codec)->core, nid, ch, dir, idx)
+#define snd_hda_codec_amp_update(codec, nid, ch, dir, idx, mask, val) \
+ snd_hdac_regmap_update_amp(&(codec)->core, nid, ch, dir, idx, mask, val)
int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid,
int dir, int idx, int mask, int val);
int snd_hda_codec_amp_init(struct hda_codec *codec, hda_nid_t nid, int ch,
int direction, int idx, int mask, int val);
int snd_hda_codec_amp_init_stereo(struct hda_codec *codec, hda_nid_t nid,
int dir, int idx, int mask, int val);
-void snd_hda_codec_resume_amp(struct hda_codec *codec);
-
void snd_hda_set_vmaster_tlv(struct hda_codec *codec, hda_nid_t nid, int dir,
unsigned int *tlv);
struct snd_kcontrol *snd_hda_find_mixer_ctl(struct hda_codec *codec,
@@ -150,6 +148,8 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name,
#define snd_hda_add_vmaster(codec, name, tlv, slaves, suffix) \
__snd_hda_add_vmaster(codec, name, tlv, slaves, suffix, true, NULL)
int snd_hda_codec_reset(struct hda_codec *codec);
+void snd_hda_codec_register(struct hda_codec *codec);
+void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec);
enum {
HDA_VMUTE_OFF,
@@ -273,29 +273,6 @@ int snd_hda_add_imux_item(struct hda_codec *codec,
int index, int *type_index_ret);
/*
- * Channel mode helper
- */
-struct hda_channel_mode {
- int channels;
- const struct hda_verb *sequence;
-};
-
-int snd_hda_ch_mode_info(struct hda_codec *codec,
- struct snd_ctl_elem_info *uinfo,
- const struct hda_channel_mode *chmode,
- int num_chmodes);
-int snd_hda_ch_mode_get(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol,
- const struct hda_channel_mode *chmode,
- int num_chmodes,
- int max_channels);
-int snd_hda_ch_mode_put(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol,
- const struct hda_channel_mode *chmode,
- int num_chmodes,
- int *max_channelsp);
-
-/*
* Multi-channel / digital-out PCM helper
*/
@@ -351,12 +328,6 @@ int snd_hda_multi_out_analog_cleanup(struct hda_codec *codec,
struct hda_multi_out *mout);
/*
- * generic codec parser
- */
-int snd_hda_parse_generic_codec(struct hda_codec *codec);
-int snd_hda_parse_hdmi_codec(struct hda_codec *codec);
-
-/*
* generic proc interface
*/
#ifdef CONFIG_PROC_FS
@@ -466,23 +437,6 @@ void snd_hda_pick_pin_fixup(struct hda_codec *codec,
const struct snd_hda_pin_quirk *pin_quirk,
const struct hda_fixup *fixlist);
-
-/*
- * unsolicited event handler
- */
-
-#define HDA_UNSOL_QUEUE_SIZE 64
-
-struct hda_bus_unsolicited {
- /* ring buffer */
- u32 queue[HDA_UNSOL_QUEUE_SIZE * 2];
- unsigned int rp, wp;
-
- /* workqueue */
- struct work_struct work;
- struct hda_bus *bus;
-};
-
/* helper macros to retrieve pin default-config values */
#define get_defcfg_connect(cfg) \
((cfg & AC_DEFCFG_PORT_CONN) >> AC_DEFCFG_PORT_CONN_SHIFT)
@@ -560,15 +514,18 @@ int snd_hda_codec_get_pin_target(struct hda_codec *codec, hda_nid_t nid);
int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid,
unsigned int val);
+#define for_each_hda_codec_node(nid, codec) \
+ for ((nid) = (codec)->core.start_nid; (nid) < (codec)->core.end_nid; (nid)++)
+
/*
* get widget capabilities
*/
static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
{
- if (nid < codec->start_nid ||
- nid >= codec->start_nid + codec->num_nodes)
+ if (nid < codec->core.start_nid ||
+ nid >= codec->core.start_nid + codec->core.num_nodes)
return 0;
- return codec->wcaps[nid - codec->start_nid];
+ return codec->wcaps[nid - codec->core.start_nid];
}
/* get the widget type from widget capability bits */
@@ -592,17 +549,49 @@ static inline unsigned int get_wcaps_channels(u32 wcaps)
static inline void snd_hda_override_wcaps(struct hda_codec *codec,
hda_nid_t nid, u32 val)
{
- if (nid >= codec->start_nid &&
- nid < codec->start_nid + codec->num_nodes)
- codec->wcaps[nid - codec->start_nid] = val;
+ if (nid >= codec->core.start_nid &&
+ nid < codec->core.start_nid + codec->core.num_nodes)
+ codec->wcaps[nid - codec->core.start_nid] = val;
}
u32 query_amp_caps(struct hda_codec *codec, hda_nid_t nid, int direction);
int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
unsigned int caps);
-u32 snd_hda_query_pin_caps(struct hda_codec *codec, hda_nid_t nid);
-int snd_hda_override_pin_caps(struct hda_codec *codec, hda_nid_t nid,
- unsigned int caps);
+/**
+ * snd_hda_query_pin_caps - Query PIN capabilities
+ * @codec: the HD-auio codec
+ * @nid: the NID to query
+ *
+ * Query PIN capabilities for the given widget.
+ * Returns the obtained capability bits.
+ *
+ * When cap bits have been already read, this doesn't read again but
+ * returns the cached value.
+ */
+static inline u32
+snd_hda_query_pin_caps(struct hda_codec *codec, hda_nid_t nid)
+{
+ return snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP);
+
+}
+
+/**
+ * snd_hda_override_pin_caps - Override the pin capabilities
+ * @codec: the CODEC
+ * @nid: the NID to override
+ * @caps: the capability bits to set
+ *
+ * Override the cached PIN capabilitiy bits value by the given one.
+ *
+ * Returns zero if successful or a negative error code.
+ */
+static inline int
+snd_hda_override_pin_caps(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int caps)
+{
+ return snd_hdac_override_parm(&codec->core, nid, AC_PAR_PIN_CAP, caps);
+}
+
bool snd_hda_check_amp_caps(struct hda_codec *codec, hda_nid_t nid,
int dir, unsigned int bits);
@@ -800,9 +789,13 @@ void snd_print_channel_allocation(int spk_alloc, char *buf, int buflen);
/*
*/
-#define codec_err(codec, fmt, args...) dev_err(&(codec)->dev, fmt, ##args)
-#define codec_warn(codec, fmt, args...) dev_warn(&(codec)->dev, fmt, ##args)
-#define codec_info(codec, fmt, args...) dev_info(&(codec)->dev, fmt, ##args)
-#define codec_dbg(codec, fmt, args...) dev_dbg(&(codec)->dev, fmt, ##args)
+#define codec_err(codec, fmt, args...) \
+ dev_err(hda_codec_dev(codec), fmt, ##args)
+#define codec_warn(codec, fmt, args...) \
+ dev_warn(hda_codec_dev(codec), fmt, ##args)
+#define codec_info(codec, fmt, args...) \
+ dev_info(hda_codec_dev(codec), fmt, ##args)
+#define codec_dbg(codec, fmt, args...) \
+ dev_dbg(hda_codec_dev(codec), fmt, ##args)
#endif /* __SOUND_HDA_LOCAL_H */
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
deleted file mode 100644
index daf458299753..000000000000
--- a/sound/pci/hda/hda_priv.h
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * Common defines for the alsa driver code base for HD Audio.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __SOUND_HDA_PRIV_H
-#define __SOUND_HDA_PRIV_H
-
-#include <linux/timecounter.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-
-/*
- * registers
- */
-#define AZX_REG_GCAP 0x00
-#define AZX_GCAP_64OK (1 << 0) /* 64bit address support */
-#define AZX_GCAP_NSDO (3 << 1) /* # of serial data out signals */
-#define AZX_GCAP_BSS (31 << 3) /* # of bidirectional streams */
-#define AZX_GCAP_ISS (15 << 8) /* # of input streams */
-#define AZX_GCAP_OSS (15 << 12) /* # of output streams */
-#define AZX_REG_VMIN 0x02
-#define AZX_REG_VMAJ 0x03
-#define AZX_REG_OUTPAY 0x04
-#define AZX_REG_INPAY 0x06
-#define AZX_REG_GCTL 0x08
-#define AZX_GCTL_RESET (1 << 0) /* controller reset */
-#define AZX_GCTL_FCNTRL (1 << 1) /* flush control */
-#define AZX_GCTL_UNSOL (1 << 8) /* accept unsol. response enable */
-#define AZX_REG_WAKEEN 0x0c
-#define AZX_REG_STATESTS 0x0e
-#define AZX_REG_GSTS 0x10
-#define AZX_GSTS_FSTS (1 << 1) /* flush status */
-#define AZX_REG_INTCTL 0x20
-#define AZX_REG_INTSTS 0x24
-#define AZX_REG_WALLCLK 0x30 /* 24Mhz source */
-#define AZX_REG_OLD_SSYNC 0x34 /* SSYNC for old ICH */
-#define AZX_REG_SSYNC 0x38
-#define AZX_REG_CORBLBASE 0x40
-#define AZX_REG_CORBUBASE 0x44
-#define AZX_REG_CORBWP 0x48
-#define AZX_REG_CORBRP 0x4a
-#define AZX_CORBRP_RST (1 << 15) /* read pointer reset */
-#define AZX_REG_CORBCTL 0x4c
-#define AZX_CORBCTL_RUN (1 << 1) /* enable DMA */
-#define AZX_CORBCTL_CMEIE (1 << 0) /* enable memory error irq */
-#define AZX_REG_CORBSTS 0x4d
-#define AZX_CORBSTS_CMEI (1 << 0) /* memory error indication */
-#define AZX_REG_CORBSIZE 0x4e
-
-#define AZX_REG_RIRBLBASE 0x50
-#define AZX_REG_RIRBUBASE 0x54
-#define AZX_REG_RIRBWP 0x58
-#define AZX_RIRBWP_RST (1 << 15) /* write pointer reset */
-#define AZX_REG_RINTCNT 0x5a
-#define AZX_REG_RIRBCTL 0x5c
-#define AZX_RBCTL_IRQ_EN (1 << 0) /* enable IRQ */
-#define AZX_RBCTL_DMA_EN (1 << 1) /* enable DMA */
-#define AZX_RBCTL_OVERRUN_EN (1 << 2) /* enable overrun irq */
-#define AZX_REG_RIRBSTS 0x5d
-#define AZX_RBSTS_IRQ (1 << 0) /* response irq */
-#define AZX_RBSTS_OVERRUN (1 << 2) /* overrun irq */
-#define AZX_REG_RIRBSIZE 0x5e
-
-#define AZX_REG_IC 0x60
-#define AZX_REG_IR 0x64
-#define AZX_REG_IRS 0x68
-#define AZX_IRS_VALID (1<<1)
-#define AZX_IRS_BUSY (1<<0)
-
-#define AZX_REG_DPLBASE 0x70
-#define AZX_REG_DPUBASE 0x74
-#define AZX_DPLBASE_ENABLE 0x1 /* Enable position buffer */
-
-/* SD offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
-enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
-
-/* stream register offsets from stream base */
-#define AZX_REG_SD_CTL 0x00
-#define AZX_REG_SD_STS 0x03
-#define AZX_REG_SD_LPIB 0x04
-#define AZX_REG_SD_CBL 0x08
-#define AZX_REG_SD_LVI 0x0c
-#define AZX_REG_SD_FIFOW 0x0e
-#define AZX_REG_SD_FIFOSIZE 0x10
-#define AZX_REG_SD_FORMAT 0x12
-#define AZX_REG_SD_BDLPL 0x18
-#define AZX_REG_SD_BDLPU 0x1c
-
-/* PCI space */
-#define AZX_PCIREG_TCSEL 0x44
-
-/*
- * other constants
- */
-
-/* max number of fragments - we may use more if allocating more pages for BDL */
-#define BDL_SIZE 4096
-#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16)
-#define AZX_MAX_FRAG 32
-/* max buffer size - no h/w limit, you can increase as you like */
-#define AZX_MAX_BUF_SIZE (1024*1024*1024)
-
-/* RIRB int mask: overrun[2], response[0] */
-#define RIRB_INT_RESPONSE 0x01
-#define RIRB_INT_OVERRUN 0x04
-#define RIRB_INT_MASK 0x05
-
-/* STATESTS int mask: S3,SD2,SD1,SD0 */
-#define AZX_MAX_CODECS 8
-#define AZX_DEFAULT_CODECS 4
-#define STATESTS_INT_MASK ((1 << AZX_MAX_CODECS) - 1)
-
-/* SD_CTL bits */
-#define SD_CTL_STREAM_RESET 0x01 /* stream reset bit */
-#define SD_CTL_DMA_START 0x02 /* stream DMA start bit */
-#define SD_CTL_STRIPE (3 << 16) /* stripe control */
-#define SD_CTL_TRAFFIC_PRIO (1 << 18) /* traffic priority */
-#define SD_CTL_DIR (1 << 19) /* bi-directional stream */
-#define SD_CTL_STREAM_TAG_MASK (0xf << 20)
-#define SD_CTL_STREAM_TAG_SHIFT 20
-
-/* SD_CTL and SD_STS */
-#define SD_INT_DESC_ERR 0x10 /* descriptor error interrupt */
-#define SD_INT_FIFO_ERR 0x08 /* FIFO error interrupt */
-#define SD_INT_COMPLETE 0x04 /* completion interrupt */
-#define SD_INT_MASK (SD_INT_DESC_ERR|SD_INT_FIFO_ERR|\
- SD_INT_COMPLETE)
-
-/* SD_STS */
-#define SD_STS_FIFO_READY 0x20 /* FIFO ready */
-
-/* INTCTL and INTSTS */
-#define AZX_INT_ALL_STREAM 0xff /* all stream interrupts */
-#define AZX_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */
-#define AZX_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */
-
-/* below are so far hardcoded - should read registers in future */
-#define AZX_MAX_CORB_ENTRIES 256
-#define AZX_MAX_RIRB_ENTRIES 256
-
-/* driver quirks (capabilities) */
-/* bits 0-7 are used for indicating driver type */
-#define AZX_DCAPS_NO_TCSEL (1 << 8) /* No Intel TCSEL bit */
-#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
-#define AZX_DCAPS_SNOOP_MASK (3 << 10) /* snoop type mask */
-#define AZX_DCAPS_SNOOP_OFF (1 << 12) /* snoop default off */
-#define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
-#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
-#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
-#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
-#define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
-#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
-#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
-#define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
-#define AZX_DCAPS_NO_ALIGN_BUFSIZE (1 << 21) /* no buffer size alignment */
-/* 22 unused */
-#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
-#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */
-#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
-#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
-#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
-#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
-#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
-#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
-
-enum {
- AZX_SNOOP_TYPE_NONE ,
- AZX_SNOOP_TYPE_SCH,
- AZX_SNOOP_TYPE_ATI,
- AZX_SNOOP_TYPE_NVIDIA,
-};
-
-/* HD Audio class code */
-#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
-
-struct azx_dev {
- struct snd_dma_buffer bdl; /* BDL buffer */
- u32 *posbuf; /* position buffer pointer */
-
- unsigned int bufsize; /* size of the play buffer in bytes */
- unsigned int period_bytes; /* size of the period in bytes */
- unsigned int frags; /* number for period in the play buffer */
- unsigned int fifo_size; /* FIFO size */
- unsigned long start_wallclk; /* start + minimum wallclk */
- unsigned long period_wallclk; /* wallclk for period */
-
- void __iomem *sd_addr; /* stream descriptor pointer */
-
- u32 sd_int_sta_mask; /* stream int status mask */
-
- /* pcm support */
- struct snd_pcm_substream *substream; /* assigned substream,
- * set in PCM open
- */
- unsigned int format_val; /* format value to be set in the
- * controller and the codec
- */
- unsigned char stream_tag; /* assigned stream */
- unsigned char index; /* stream index */
- int assigned_key; /* last device# key assigned to */
-
- unsigned int opened:1;
- unsigned int running:1;
- unsigned int irq_pending:1;
- unsigned int prepared:1;
- unsigned int locked:1;
- /*
- * For VIA:
- * A flag to ensure DMA position is 0
- * when link position is not greater than FIFO size
- */
- unsigned int insufficient:1;
- unsigned int wc_marked:1;
- unsigned int no_period_wakeup:1;
-
- struct timecounter azx_tc;
- struct cyclecounter azx_cc;
-
- int delay_negative_threshold;
-
-#ifdef CONFIG_SND_HDA_DSP_LOADER
- /* Allows dsp load to have sole access to the playback stream. */
- struct mutex dsp_mutex;
-#endif
-};
-
-/* CORB/RIRB */
-struct azx_rb {
- u32 *buf; /* CORB/RIRB buffer
- * Each CORB entry is 4byte, RIRB is 8byte
- */
- dma_addr_t addr; /* physical address of CORB/RIRB buffer */
- /* for RIRB */
- unsigned short rp, wp; /* read/write pointers */
- int cmds[AZX_MAX_CODECS]; /* number of pending requests */
- u32 res[AZX_MAX_CODECS]; /* last read value */
-};
-
-struct azx;
-
-/* Functions to read/write to hda registers. */
-struct hda_controller_ops {
- /* Register Access */
- void (*reg_writel)(u32 value, u32 __iomem *addr);
- u32 (*reg_readl)(u32 __iomem *addr);
- void (*reg_writew)(u16 value, u16 __iomem *addr);
- u16 (*reg_readw)(u16 __iomem *addr);
- void (*reg_writeb)(u8 value, u8 __iomem *addr);
- u8 (*reg_readb)(u8 __iomem *addr);
- /* Disable msi if supported, PCI only */
- int (*disable_msi_reset_irq)(struct azx *);
- /* Allocation ops */
- int (*dma_alloc_pages)(struct azx *chip,
- int type,
- size_t size,
- struct snd_dma_buffer *buf);
- void (*dma_free_pages)(struct azx *chip, struct snd_dma_buffer *buf);
- int (*substream_alloc_pages)(struct azx *chip,
- struct snd_pcm_substream *substream,
- size_t size);
- int (*substream_free_pages)(struct azx *chip,
- struct snd_pcm_substream *substream);
- void (*pcm_mmap_prepare)(struct snd_pcm_substream *substream,
- struct vm_area_struct *area);
- /* Check if current position is acceptable */
- int (*position_check)(struct azx *chip, struct azx_dev *azx_dev);
-};
-
-struct azx_pcm {
- struct azx *chip;
- struct snd_pcm *pcm;
- struct hda_codec *codec;
- struct hda_pcm_stream *hinfo[2];
- struct list_head list;
-};
-
-typedef unsigned int (*azx_get_pos_callback_t)(struct azx *, struct azx_dev *);
-typedef int (*azx_get_delay_callback_t)(struct azx *, struct azx_dev *, unsigned int pos);
-
-struct azx {
- struct snd_card *card;
- struct pci_dev *pci;
- int dev_index;
-
- /* chip type specific */
- int driver_type;
- unsigned int driver_caps;
- int playback_streams;
- int playback_index_offset;
- int capture_streams;
- int capture_index_offset;
- int num_streams;
- const int *jackpoll_ms; /* per-card jack poll interval */
-
- /* Register interaction. */
- const struct hda_controller_ops *ops;
-
- /* position adjustment callbacks */
- azx_get_pos_callback_t get_position[2];
- azx_get_delay_callback_t get_delay[2];
-
- /* pci resources */
- unsigned long addr;
- void __iomem *remap_addr;
- int irq;
-
- /* locks */
- spinlock_t reg_lock;
- struct mutex open_mutex; /* Prevents concurrent open/close operations */
-
- /* streams (x num_streams) */
- struct azx_dev *azx_dev;
-
- /* PCM */
- struct list_head pcm_list; /* azx_pcm list */
-
- /* HD codec */
- unsigned short codec_mask;
- int codec_probe_mask; /* copied from probe_mask option */
- struct hda_bus *bus;
- unsigned int beep_mode;
-
- /* CORB/RIRB */
- struct azx_rb corb;
- struct azx_rb rirb;
-
- /* CORB/RIRB and position buffers */
- struct snd_dma_buffer rb;
- struct snd_dma_buffer posbuf;
-
-#ifdef CONFIG_SND_HDA_PATCH_LOADER
- const struct firmware *fw;
-#endif
-
- /* flags */
- const int *bdl_pos_adj;
- int poll_count;
- unsigned int running:1;
- unsigned int initialized:1;
- unsigned int single_cmd:1;
- unsigned int polling_mode:1;
- unsigned int msi:1;
- unsigned int probing:1; /* codec probing phase */
- unsigned int snoop:1;
- unsigned int align_buffer_size:1;
- unsigned int region_requested:1;
- unsigned int disabled:1; /* disabled by VGA-switcher */
-
- /* for debugging */
- unsigned int last_cmd[AZX_MAX_CODECS];
-
- /* reboot notifier (for mysterious hangup problem at power-down) */
- struct notifier_block reboot_notifier;
-
-#ifdef CONFIG_SND_HDA_DSP_LOADER
- struct azx_dev saved_azx_dev;
-#endif
-};
-
-#ifdef CONFIG_X86
-#define azx_snoop(chip) ((chip)->snoop)
-#else
-#define azx_snoop(chip) true
-#endif
-
-/*
- * macros for easy use
- */
-
-#define azx_writel(chip, reg, value) \
- ((chip)->ops->reg_writel(value, (chip)->remap_addr + AZX_REG_##reg))
-#define azx_readl(chip, reg) \
- ((chip)->ops->reg_readl((chip)->remap_addr + AZX_REG_##reg))
-#define azx_writew(chip, reg, value) \
- ((chip)->ops->reg_writew(value, (chip)->remap_addr + AZX_REG_##reg))
-#define azx_readw(chip, reg) \
- ((chip)->ops->reg_readw((chip)->remap_addr + AZX_REG_##reg))
-#define azx_writeb(chip, reg, value) \
- ((chip)->ops->reg_writeb(value, (chip)->remap_addr + AZX_REG_##reg))
-#define azx_readb(chip, reg) \
- ((chip)->ops->reg_readb((chip)->remap_addr + AZX_REG_##reg))
-
-#define azx_sd_writel(chip, dev, reg, value) \
- ((chip)->ops->reg_writel(value, (dev)->sd_addr + AZX_REG_##reg))
-#define azx_sd_readl(chip, dev, reg) \
- ((chip)->ops->reg_readl((dev)->sd_addr + AZX_REG_##reg))
-#define azx_sd_writew(chip, dev, reg, value) \
- ((chip)->ops->reg_writew(value, (dev)->sd_addr + AZX_REG_##reg))
-#define azx_sd_readw(chip, dev, reg) \
- ((chip)->ops->reg_readw((dev)->sd_addr + AZX_REG_##reg))
-#define azx_sd_writeb(chip, dev, reg, value) \
- ((chip)->ops->reg_writeb(value, (dev)->sd_addr + AZX_REG_##reg))
-#define azx_sd_readb(chip, dev, reg) \
- ((chip)->ops->reg_readb((dev)->sd_addr + AZX_REG_##reg))
-
-#endif /* __SOUND_HDA_PRIV_H */
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 05e19f78b4cb..baaf7ed06875 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -32,6 +32,10 @@ static int dump_coef = -1;
module_param(dump_coef, int, 0644);
MODULE_PARM_DESC(dump_coef, "Dump processing coefficients in codec proc file (-1=auto, 0=disable, 1=enable)");
+/* always use noncached version */
+#define param_read(codec, nid, parm) \
+ snd_hdac_read_parm_uncached(&(codec)->core, nid, parm)
+
static char *bits_names(unsigned int bits, char *names[], int size)
{
int i, n;
@@ -99,10 +103,10 @@ static void print_nid_array(struct snd_info_buffer *buffer,
static void print_nid_pcms(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid)
{
- int pcm, type;
+ int type;
struct hda_pcm *cpcm;
- for (pcm = 0; pcm < codec->num_pcms; pcm++) {
- cpcm = &codec->pcm_info[pcm];
+
+ list_for_each_entry(cpcm, &codec->pcm_list_head, list) {
for (type = 0; type < 2; type++) {
if (cpcm->stream[type].nid != nid || cpcm->pcm == NULL)
continue;
@@ -119,9 +123,8 @@ static void print_amp_caps(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid, int dir)
{
unsigned int caps;
- caps = snd_hda_param_read(codec, nid,
- dir == HDA_OUTPUT ?
- AC_PAR_AMP_OUT_CAP : AC_PAR_AMP_IN_CAP);
+ caps = param_read(codec, nid, dir == HDA_OUTPUT ?
+ AC_PAR_AMP_OUT_CAP : AC_PAR_AMP_IN_CAP);
if (caps == -1 || caps == 0) {
snd_iprintf(buffer, "N/A\n");
return;
@@ -225,8 +228,8 @@ static void print_pcm_formats(struct snd_info_buffer *buffer,
static void print_pcm_caps(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid)
{
- unsigned int pcm = snd_hda_param_read(codec, nid, AC_PAR_PCM);
- unsigned int stream = snd_hda_param_read(codec, nid, AC_PAR_STREAM);
+ unsigned int pcm = param_read(codec, nid, AC_PAR_PCM);
+ unsigned int stream = param_read(codec, nid, AC_PAR_STREAM);
if (pcm == -1 || stream == -1) {
snd_iprintf(buffer, "N/A\n");
return;
@@ -273,7 +276,7 @@ static void print_pin_caps(struct snd_info_buffer *buffer,
static char *jack_conns[4] = { "Jack", "N/A", "Fixed", "Both" };
unsigned int caps, val;
- caps = snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP);
+ caps = param_read(codec, nid, AC_PAR_PIN_CAP);
snd_iprintf(buffer, " Pincap 0x%08x:", caps);
if (caps & AC_PINCAP_IN)
snd_iprintf(buffer, " IN");
@@ -289,7 +292,7 @@ static void print_pin_caps(struct snd_info_buffer *buffer,
snd_iprintf(buffer, " Balanced");
if (caps & AC_PINCAP_HDMI) {
/* Realtek uses this bit as a different meaning */
- if ((codec->vendor_id >> 16) == 0x10ec)
+ if ((codec->core.vendor_id >> 16) == 0x10ec)
snd_iprintf(buffer, " R/L");
else {
if (caps & AC_PINCAP_HBR)
@@ -401,8 +404,7 @@ static void print_pin_ctls(struct snd_info_buffer *buffer,
static void print_vol_knob(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid)
{
- unsigned int cap = snd_hda_param_read(codec, nid,
- AC_PAR_VOL_KNB_CAP);
+ unsigned int cap = param_read(codec, nid, AC_PAR_VOL_KNB_CAP);
snd_iprintf(buffer, " Volume-Knob: delta=%d, steps=%d, ",
(cap >> 7) & 1, cap & 0x7f);
cap = snd_hda_codec_read(codec, nid, 0,
@@ -487,7 +489,7 @@ static void print_power_state(struct snd_info_buffer *buffer,
[ilog2(AC_PWRST_EPSS)] = "EPSS",
};
- int sup = snd_hda_param_read(codec, nid, AC_PAR_POWER_STATE);
+ int sup = param_read(codec, nid, AC_PAR_POWER_STATE);
int pwr = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_POWER_STATE, 0);
if (sup != -1)
@@ -531,8 +533,7 @@ static void print_proc_caps(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid)
{
unsigned int i, ncoeff, oldindex;
- unsigned int proc_caps = snd_hda_param_read(codec, nid,
- AC_PAR_PROC_CAP);
+ unsigned int proc_caps = param_read(codec, nid, AC_PAR_PROC_CAP);
ncoeff = (proc_caps & AC_PCAP_NUM_COEF) >> AC_PCAP_NUM_COEF_SHIFT;
snd_iprintf(buffer, " Processing caps: benign=%d, ncoeff=%d\n",
proc_caps & AC_PCAP_BENIGN, ncoeff);
@@ -581,8 +582,8 @@ static void print_conn_list(struct snd_info_buffer *buffer,
/* Get Cache connections info */
cache_len = snd_hda_get_conn_list(codec, nid, &list);
- if (cache_len != conn_len
- || memcmp(list, conn, conn_len)) {
+ if (cache_len >= 0 && (cache_len != conn_len ||
+ memcmp(list, conn, conn_len) != 0)) {
snd_iprintf(buffer, " In-driver Connection: %d\n", cache_len);
if (cache_len > 0) {
snd_iprintf(buffer, " ");
@@ -597,7 +598,7 @@ static void print_gpio(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid)
{
unsigned int gpio =
- snd_hda_param_read(codec, codec->afg, AC_PAR_GPIO_CAP);
+ param_read(codec, codec->core.afg, AC_PAR_GPIO_CAP);
unsigned int enable, direction, wake, unsol, sticky, data;
int i, max;
snd_iprintf(buffer, "GPIO: io=%d, o=%d, i=%d, "
@@ -667,13 +668,9 @@ static void print_device_list(struct snd_info_buffer *buffer,
}
}
-static void print_codec_info(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer)
+static void print_codec_core_info(struct hdac_device *codec,
+ struct snd_info_buffer *buffer)
{
- struct hda_codec *codec = entry->private_data;
- hda_nid_t nid;
- int i, nodes;
-
snd_iprintf(buffer, "Codec: ");
if (codec->vendor_name && codec->chip_name)
snd_iprintf(buffer, "%s %s\n",
@@ -695,34 +692,43 @@ static void print_codec_info(struct snd_info_entry *entry,
snd_iprintf(buffer, "Modem Function Group: 0x%x\n", codec->mfg);
else
snd_iprintf(buffer, "No Modem Function Group found\n");
+}
+
+static void print_codec_info(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct hda_codec *codec = entry->private_data;
+ hda_nid_t nid, fg;
+ int i, nodes;
- if (! codec->afg)
+ print_codec_core_info(&codec->core, buffer);
+ fg = codec->core.afg;
+ if (!fg)
return;
snd_hda_power_up(codec);
snd_iprintf(buffer, "Default PCM:\n");
- print_pcm_caps(buffer, codec, codec->afg);
+ print_pcm_caps(buffer, codec, fg);
snd_iprintf(buffer, "Default Amp-In caps: ");
- print_amp_caps(buffer, codec, codec->afg, HDA_INPUT);
+ print_amp_caps(buffer, codec, fg, HDA_INPUT);
snd_iprintf(buffer, "Default Amp-Out caps: ");
- print_amp_caps(buffer, codec, codec->afg, HDA_OUTPUT);
- snd_iprintf(buffer, "State of AFG node 0x%02x:\n", codec->afg);
- print_power_state(buffer, codec, codec->afg);
+ print_amp_caps(buffer, codec, fg, HDA_OUTPUT);
+ snd_iprintf(buffer, "State of AFG node 0x%02x:\n", fg);
+ print_power_state(buffer, codec, fg);
- nodes = snd_hda_get_sub_nodes(codec, codec->afg, &nid);
+ nodes = snd_hda_get_sub_nodes(codec, fg, &nid);
if (! nid || nodes < 0) {
snd_iprintf(buffer, "Invalid AFG subtree\n");
snd_hda_power_down(codec);
return;
}
- print_gpio(buffer, codec, codec->afg);
+ print_gpio(buffer, codec, fg);
if (codec->proc_widget_hook)
- codec->proc_widget_hook(buffer, codec, codec->afg);
+ codec->proc_widget_hook(buffer, codec, fg);
for (i = 0; i < nodes; i++, nid++) {
unsigned int wid_caps =
- snd_hda_param_read(codec, nid,
- AC_PAR_AUDIO_WIDGET_CAP);
+ param_read(codec, nid, AC_PAR_AUDIO_WIDGET_CAP);
unsigned int wid_type = get_wcaps_type(wid_caps);
hda_nid_t *conn = NULL;
int conn_len = 0;
@@ -860,8 +866,8 @@ int snd_hda_codec_proc_new(struct hda_codec *codec)
struct snd_info_entry *entry;
int err;
- snprintf(name, sizeof(name), "codec#%d", codec->addr);
- err = snd_card_proc_new(codec->bus->card, name, &entry);
+ snprintf(name, sizeof(name), "codec#%d", codec->core.addr);
+ err = snd_card_proc_new(codec->card, name, &entry);
if (err < 0)
return err;
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index ccc962a1699f..a6e3d9b511ab 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -48,33 +48,33 @@ static DEVICE_ATTR_RO(power_on_acct);
static DEVICE_ATTR_RO(power_off_acct);
#endif /* CONFIG_PM */
-#define CODEC_INFO_SHOW(type) \
+#define CODEC_INFO_SHOW(type, field) \
static ssize_t type##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct hda_codec *codec = dev_get_drvdata(dev); \
- return sprintf(buf, "0x%x\n", codec->type); \
+ return sprintf(buf, "0x%x\n", codec->field); \
}
-#define CODEC_INFO_STR_SHOW(type) \
+#define CODEC_INFO_STR_SHOW(type, field) \
static ssize_t type##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct hda_codec *codec = dev_get_drvdata(dev); \
return sprintf(buf, "%s\n", \
- codec->type ? codec->type : ""); \
+ codec->field ? codec->field : ""); \
}
-CODEC_INFO_SHOW(vendor_id);
-CODEC_INFO_SHOW(subsystem_id);
-CODEC_INFO_SHOW(revision_id);
-CODEC_INFO_SHOW(afg);
-CODEC_INFO_SHOW(mfg);
-CODEC_INFO_STR_SHOW(vendor_name);
-CODEC_INFO_STR_SHOW(chip_name);
-CODEC_INFO_STR_SHOW(modelname);
+CODEC_INFO_SHOW(vendor_id, core.vendor_id);
+CODEC_INFO_SHOW(subsystem_id, core.subsystem_id);
+CODEC_INFO_SHOW(revision_id, core.revision_id);
+CODEC_INFO_SHOW(afg, core.afg);
+CODEC_INFO_SHOW(mfg, core.mfg);
+CODEC_INFO_STR_SHOW(vendor_name, core.vendor_name);
+CODEC_INFO_STR_SHOW(chip_name, core.chip_name);
+CODEC_INFO_STR_SHOW(modelname, modelname);
static ssize_t pin_configs_show(struct hda_codec *codec,
struct snd_array *list,
@@ -149,7 +149,7 @@ static int reconfig_codec(struct hda_codec *codec)
err = snd_hda_codec_build_controls(codec);
if (err < 0)
goto error;
- err = snd_card_register(codec->bus->card);
+ err = snd_card_register(codec->card);
error:
snd_hda_power_down(codec);
return err;
@@ -170,7 +170,7 @@ static char *kstrndup_noeol(const char *src, size_t len)
return s;
}
-#define CODEC_INFO_STORE(type) \
+#define CODEC_INFO_STORE(type, field) \
static ssize_t type##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
@@ -180,11 +180,11 @@ static ssize_t type##_store(struct device *dev, \
int err = kstrtoul(buf, 0, &val); \
if (err < 0) \
return err; \
- codec->type = val; \
+ codec->field = val; \
return count; \
}
-#define CODEC_INFO_STR_STORE(type) \
+#define CODEC_INFO_STR_STORE(type, field) \
static ssize_t type##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
@@ -193,17 +193,17 @@ static ssize_t type##_store(struct device *dev, \
char *s = kstrndup_noeol(buf, 64); \
if (!s) \
return -ENOMEM; \
- kfree(codec->type); \
- codec->type = s; \
+ kfree(codec->field); \
+ codec->field = s; \
return count; \
}
-CODEC_INFO_STORE(vendor_id);
-CODEC_INFO_STORE(subsystem_id);
-CODEC_INFO_STORE(revision_id);
-CODEC_INFO_STR_STORE(vendor_name);
-CODEC_INFO_STR_STORE(chip_name);
-CODEC_INFO_STR_STORE(modelname);
+CODEC_INFO_STORE(vendor_id, core.vendor_id);
+CODEC_INFO_STORE(subsystem_id, core.subsystem_id);
+CODEC_INFO_STORE(revision_id, core.revision_id);
+CODEC_INFO_STR_STORE(vendor_name, core.vendor_name);
+CODEC_INFO_STR_STORE(chip_name, core.chip_name);
+CODEC_INFO_STR_STORE(modelname, modelname);
#define CODEC_ACTION_STORE(type) \
static ssize_t type##_store(struct device *dev, \
@@ -552,10 +552,10 @@ static void parse_codec_mode(char *buf, struct hda_bus *bus,
*codecp = NULL;
if (sscanf(buf, "%i %i %i", &vendorid, &subid, &caddr) == 3) {
- list_for_each_entry(codec, &bus->codec_list, list) {
- if ((vendorid <= 0 || codec->vendor_id == vendorid) &&
- (subid <= 0 || codec->subsystem_id == subid) &&
- codec->addr == caddr) {
+ list_for_each_codec(codec, bus) {
+ if ((vendorid <= 0 || codec->core.vendor_id == vendorid) &&
+ (subid <= 0 || codec->core.subsystem_id == subid) &&
+ codec->core.addr == caddr) {
*codecp = codec;
break;
}
@@ -595,8 +595,8 @@ static void parse_model_mode(char *buf, struct hda_bus *bus,
static void parse_chip_name_mode(char *buf, struct hda_bus *bus,
struct hda_codec **codecp)
{
- kfree((*codecp)->chip_name);
- (*codecp)->chip_name = kstrdup(buf, GFP_KERNEL);
+ kfree((*codecp)->core.chip_name);
+ (*codecp)->core.chip_name = kstrdup(buf, GFP_KERNEL);
}
#define DEFINE_PARSE_ID_MODE(name) \
@@ -605,7 +605,7 @@ static void parse_##name##_mode(char *buf, struct hda_bus *bus, \
{ \
unsigned long val; \
if (!kstrtoul(buf, 0, &val)) \
- (*codecp)->name = val; \
+ (*codecp)->core.name = val; \
}
DEFINE_PARSE_ID_MODE(vendor_id);
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 375e94f4cf52..2e4fd5c56d3b 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -37,7 +37,6 @@
#include "hda_codec.h"
#include "hda_controller.h"
-#include "hda_priv.h"
/* Defines for Nvidia Tegra HDA support */
#define HDA_BAR0 0x8000
@@ -82,7 +81,7 @@ module_param(power_save, bint, 0644);
MODULE_PARM_DESC(power_save,
"Automatic power-saving timeout (in seconds, 0 = disable).");
#else
-static int power_save = 0;
+#define power_save 0
#endif
/*
@@ -250,14 +249,9 @@ static int hda_tegra_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- struct azx_pcm *p;
struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- list_for_each_entry(p, &chip->pcm_list, list)
- snd_pcm_suspend_all(p->pcm);
- if (chip->initialized)
- snd_hda_suspend(chip->bus);
azx_stop_chip(chip);
azx_enter_link_reset(chip);
@@ -278,7 +272,6 @@ static int hda_tegra_resume(struct device *dev)
azx_init_chip(chip, 1);
- snd_hda_resume(chip->bus);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
@@ -297,8 +290,6 @@ static int hda_tegra_dev_free(struct snd_device *device)
int i;
struct azx *chip = device->device_data;
- azx_notifier_unregister(chip);
-
if (chip->initialized) {
for (i = 0; i < chip->num_streams; i++)
azx_stream_stop(chip, &chip->azx_dev[i]);
@@ -344,17 +335,6 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev)
return 0;
}
-/*
- * The codecs were powered up in snd_hda_codec_new().
- * Now all initialization done, so turn them down if possible
- */
-static void power_down_all_codecs(struct azx *chip)
-{
- struct hda_codec *codec;
- list_for_each_entry(codec, &chip->bus->codec_list, list)
- snd_hda_power_down(codec);
-}
-
static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
{
struct snd_card *card = chip->card;
@@ -503,21 +483,15 @@ static int hda_tegra_probe(struct platform_device *pdev)
goto out_free;
/* create codec instances */
- err = azx_codec_create(chip, NULL, 0, &power_save);
+ err = azx_bus_create(chip, NULL);
if (err < 0)
goto out_free;
- err = azx_codec_configure(chip);
+ err = azx_probe_codecs(chip, 0);
if (err < 0)
goto out_free;
- /* create PCM streams */
- err = snd_hda_build_pcms(chip->bus);
- if (err < 0)
- goto out_free;
-
- /* create mixer controls */
- err = azx_mixer_create(chip);
+ err = azx_codec_configure(chip);
if (err < 0)
goto out_free;
@@ -526,8 +500,7 @@ static int hda_tegra_probe(struct platform_device *pdev)
goto out_free;
chip->running = 1;
- power_down_all_codecs(chip);
- azx_notifier_register(chip);
+ snd_hda_set_power_save(chip->bus, power_save * 1000);
return 0;
@@ -541,6 +514,18 @@ static int hda_tegra_remove(struct platform_device *pdev)
return snd_card_free(dev_get_drvdata(&pdev->dev));
}
+static void hda_tegra_shutdown(struct platform_device *pdev)
+{
+ struct snd_card *card = dev_get_drvdata(&pdev->dev);
+ struct azx *chip;
+
+ if (!card)
+ return;
+ chip = card->private_data;
+ if (chip && chip->running)
+ azx_stop_chip(chip);
+}
+
static struct platform_driver tegra_platform_hda = {
.driver = {
.name = "tegra-hda",
@@ -549,6 +534,7 @@ static struct platform_driver tegra_platform_hda = {
},
.probe = hda_tegra_probe,
.remove = hda_tegra_remove,
+ .shutdown = hda_tegra_shutdown,
};
module_platform_driver(tegra_platform_hda);
diff --git a/sound/pci/hda/hda_trace.h b/sound/pci/hda/hda_trace.h
deleted file mode 100644
index 3a1c63161eb1..000000000000
--- a/sound/pci/hda/hda_trace.h
+++ /dev/null
@@ -1,143 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hda
-#define TRACE_INCLUDE_FILE hda_trace
-
-#if !defined(_TRACE_HDA_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_HDA_H
-
-#include <linux/tracepoint.h>
-
-struct hda_bus;
-struct hda_codec;
-
-DECLARE_EVENT_CLASS(hda_cmd,
-
- TP_PROTO(struct hda_codec *codec, unsigned int val),
-
- TP_ARGS(codec, val),
-
- TP_STRUCT__entry(
- __field( unsigned int, card )
- __field( unsigned int, addr )
- __field( unsigned int, val )
- ),
-
- TP_fast_assign(
- __entry->card = (codec)->bus->card->number;
- __entry->addr = (codec)->addr;
- __entry->val = (val);
- ),
-
- TP_printk("[%d:%d] val=%x", __entry->card, __entry->addr, __entry->val)
-);
-
-DEFINE_EVENT(hda_cmd, hda_send_cmd,
- TP_PROTO(struct hda_codec *codec, unsigned int val),
- TP_ARGS(codec, val)
-);
-
-DEFINE_EVENT(hda_cmd, hda_get_response,
- TP_PROTO(struct hda_codec *codec, unsigned int val),
- TP_ARGS(codec, val)
-);
-
-TRACE_EVENT(hda_bus_reset,
-
- TP_PROTO(struct hda_bus *bus),
-
- TP_ARGS(bus),
-
- TP_STRUCT__entry(
- __field( unsigned int, card )
- ),
-
- TP_fast_assign(
- __entry->card = (bus)->card->number;
- ),
-
- TP_printk("[%d]", __entry->card)
-);
-
-#ifdef CONFIG_PM
-DECLARE_EVENT_CLASS(hda_power,
-
- TP_PROTO(struct hda_codec *codec),
-
- TP_ARGS(codec),
-
- TP_STRUCT__entry(
- __field( unsigned int, card )
- __field( unsigned int, addr )
- ),
-
- TP_fast_assign(
- __entry->card = (codec)->bus->card->number;
- __entry->addr = (codec)->addr;
- ),
-
- TP_printk("[%d:%d]", __entry->card, __entry->addr)
-);
-
-DEFINE_EVENT(hda_power, hda_power_down,
- TP_PROTO(struct hda_codec *codec),
- TP_ARGS(codec)
-);
-
-DEFINE_EVENT(hda_power, hda_power_up,
- TP_PROTO(struct hda_codec *codec),
- TP_ARGS(codec)
-);
-
-TRACE_EVENT(hda_power_count,
- TP_PROTO(struct hda_codec *codec),
- TP_ARGS(codec),
- TP_STRUCT__entry(
- __field( unsigned int, card )
- __field( unsigned int, addr )
- __field( int, power_count )
- __field( int, power_on )
- __field( int, power_transition )
- ),
-
- TP_fast_assign(
- __entry->card = (codec)->bus->card->number;
- __entry->addr = (codec)->addr;
- __entry->power_count = (codec)->power_count;
- __entry->power_on = (codec)->power_on;
- __entry->power_transition = (codec)->power_transition;
- ),
-
- TP_printk("[%d:%d] power_count=%d, power_on=%d, power_transition=%d",
- __entry->card, __entry->addr, __entry->power_count,
- __entry->power_on, __entry->power_transition)
-);
-#endif /* CONFIG_PM */
-
-TRACE_EVENT(hda_unsol_event,
-
- TP_PROTO(struct hda_bus *bus, u32 res, u32 res_ex),
-
- TP_ARGS(bus, res, res_ex),
-
- TP_STRUCT__entry(
- __field( unsigned int, card )
- __field( u32, res )
- __field( u32, res_ex )
- ),
-
- TP_fast_assign(
- __entry->card = (bus)->card->number;
- __entry->res = res;
- __entry->res_ex = res_ex;
- ),
-
- TP_printk("[%d] res=%x, res_ex=%x", __entry->card,
- __entry->res, __entry->res_ex)
-);
-
-#endif /* _TRACE_HDA_H */
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#include <trace/define_trace.h>
diff --git a/sound/pci/hda/local.h b/sound/pci/hda/local.h
new file mode 100644
index 000000000000..28cb7f98982e
--- /dev/null
+++ b/sound/pci/hda/local.h
@@ -0,0 +1,39 @@
+/*
+ */
+
+#ifndef __HDAC_LOCAL_H
+#define __HDAC_LOCAL_H
+
+int hdac_read_parm(struct hdac_device *codec, hda_nid_t nid, int parm);
+
+#define get_wcaps(codec, nid) \
+ hdac_read_parm(codec, nid, AC_PAR_AUDIO_WIDGET_CAP)
+/* get the widget type from widget capability bits */
+static inline int get_wcaps_type(unsigned int wcaps)
+{
+ if (!wcaps)
+ return -1; /* invalid type */
+ return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+}
+
+#define get_pin_caps(codec, nid) \
+ hdac_read_parm(codec, nid, AC_PAR_PIN_CAP)
+
+static inline
+unsigned int get_pin_cfg(struct hdac_device *codec, hda_nid_t nid)
+{
+ unsigned int val;
+
+ if (snd_hdac_read(codec, nid, AC_VERB_GET_CONFIG_DEFAULT, 0, &val))
+ return -1;
+ return val;
+}
+
+#define get_amp_caps(codec, nid, dir) \
+ hdac_read_parm(codec, nid, (dir) == HDA_OUTPUT ? \
+ AC_PAR_AMP_OUT_CAP : AC_PAR_AMP_IN_CAP)
+
+#define get_power_caps(codec, nid) \
+ hdac_read_parm(codec, nid, AC_PAR_POWER_STATE)
+
+#endif /* __HDAC_LOCAL_H */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index d285904cdb64..231f89029779 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -99,7 +99,7 @@ static void ad198x_power_eapd_write(struct hda_codec *codec, hda_nid_t front,
static void ad198x_power_eapd(struct hda_codec *codec)
{
/* We currently only handle front, HP */
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x11d41882:
case 0x11d4882a:
case 0x11d41884:
@@ -777,7 +777,6 @@ static int ad1988_auto_smux_enum_put(struct snd_kcontrol *kcontrol,
return 0;
mutex_lock(&codec->control_mutex);
- codec->cached_write = 1;
path = snd_hda_get_path_from_idx(codec,
spec->smux_paths[spec->cur_smux]);
if (path)
@@ -786,9 +785,7 @@ static int ad1988_auto_smux_enum_put(struct snd_kcontrol *kcontrol,
if (path)
snd_hda_activate_path(codec, path, true, true);
spec->cur_smux = val;
- codec->cached_write = 0;
mutex_unlock(&codec->control_mutex);
- snd_hda_codec_flush_cache(codec); /* flush the updates */
return 1;
}
@@ -1004,18 +1001,17 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct ad198x_spec *spec = codec->spec;
- static const struct hda_verb gpio_init_verbs[] = {
- {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x02},
- {},
- };
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
spec->gen.vmaster_mute.hook = ad1884_vmaster_hp_gpio_hook;
spec->gen.own_eapd_ctl = 1;
- snd_hda_sequence_write_cache(codec, gpio_init_verbs);
+ snd_hda_codec_write_cache(codec, 0x01, 0,
+ AC_VERB_SET_GPIO_MASK, 0x02);
+ snd_hda_codec_write_cache(codec, 0x01, 0,
+ AC_VERB_SET_GPIO_DIRECTION, 0x02);
+ snd_hda_codec_write_cache(codec, 0x01, 0,
+ AC_VERB_SET_GPIO_DATA, 0x02);
break;
case HDA_FIXUP_ACT_PROBE:
if (spec->gen.autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
@@ -1194,20 +1190,8 @@ MODULE_ALIAS("snd-hda-codec-id:11d4*");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Analog Devices HD-audio codec");
-static struct hda_codec_preset_list analog_list = {
+static struct hda_codec_driver analog_driver = {
.preset = snd_hda_preset_analog,
- .owner = THIS_MODULE,
};
-static int __init patch_analog_init(void)
-{
- return snd_hda_add_codec_preset(&analog_list);
-}
-
-static void __exit patch_analog_exit(void)
-{
- snd_hda_delete_codec_preset(&analog_list);
-}
-
-module_init(patch_analog_init)
-module_exit(patch_analog_exit)
+module_hda_codec_driver(analog_driver);
diff --git a/sound/pci/hda/patch_ca0110.c b/sound/pci/hda/patch_ca0110.c
index 5e65999e0d8e..447302695195 100644
--- a/sound/pci/hda/patch_ca0110.c
+++ b/sound/pci/hda/patch_ca0110.c
@@ -98,20 +98,8 @@ MODULE_ALIAS("snd-hda-codec-id:1102000d");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Creative CA0110-IBG HD-audio codec");
-static struct hda_codec_preset_list ca0110_list = {
+static struct hda_codec_driver ca0110_driver = {
.preset = snd_hda_preset_ca0110,
- .owner = THIS_MODULE,
};
-static int __init patch_ca0110_init(void)
-{
- return snd_hda_add_codec_preset(&ca0110_list);
-}
-
-static void __exit patch_ca0110_exit(void)
-{
- snd_hda_delete_codec_preset(&ca0110_list);
-}
-
-module_init(patch_ca0110_init)
-module_exit(patch_ca0110_exit)
+module_hda_codec_driver(ca0110_driver);
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index e0383eea9880..4a4e7b282e4f 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -719,7 +719,6 @@ struct ca0132_spec {
unsigned int num_inputs;
hda_nid_t shared_mic_nid;
hda_nid_t shared_out_nid;
- struct hda_pcm pcm_rec[5]; /* PCM information */
/* chip access */
struct mutex chipio_mutex; /* chip access mutex */
@@ -3132,7 +3131,7 @@ static int ca0132_select_out(struct hda_codec *codec)
codec_dbg(codec, "ca0132_select_out\n");
- snd_hda_power_up(codec);
+ snd_hda_power_up_pm(codec);
auto_jack = spec->vnode_lswitch[VNID_HP_ASEL - VNODE_START_NID];
@@ -3216,7 +3215,7 @@ static int ca0132_select_out(struct hda_codec *codec)
}
exit:
- snd_hda_power_down(codec);
+ snd_hda_power_down_pm(codec);
return err < 0 ? err : 0;
}
@@ -3294,7 +3293,7 @@ static int ca0132_select_mic(struct hda_codec *codec)
codec_dbg(codec, "ca0132_select_mic\n");
- snd_hda_power_up(codec);
+ snd_hda_power_up_pm(codec);
auto_jack = spec->vnode_lswitch[VNID_AMIC1_ASEL - VNODE_START_NID];
@@ -3327,7 +3326,7 @@ static int ca0132_select_mic(struct hda_codec *codec)
ca0132_effects_set(codec, VOICE_FOCUS, 0);
}
- snd_hda_power_down(codec);
+ snd_hda_power_down_pm(codec);
return 0;
}
@@ -4036,12 +4035,11 @@ static struct hda_pcm_stream ca0132_pcm_digital_capture = {
static int ca0132_build_pcms(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- struct hda_pcm *info = spec->pcm_rec;
-
- codec->pcm_info = info;
- codec->num_pcms = 0;
+ struct hda_pcm *info;
- info->name = "CA0132 Analog";
+ info = snd_hda_codec_pcm_new(codec, "CA0132 Analog");
+ if (!info)
+ return -ENOMEM;
info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ca0132_pcm_analog_playback;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dacs[0];
info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
@@ -4049,27 +4047,27 @@ static int ca0132_build_pcms(struct hda_codec *codec)
info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = 1;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
- codec->num_pcms++;
- info++;
- info->name = "CA0132 Analog Mic-In2";
+ info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2");
+ if (!info)
+ return -ENOMEM;
info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = 1;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[1];
- codec->num_pcms++;
- info++;
- info->name = "CA0132 What U Hear";
+ info = snd_hda_codec_pcm_new(codec, "CA0132 What U Hear");
+ if (!info)
+ return -ENOMEM;
info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = 1;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[2];
- codec->num_pcms++;
if (!spec->dig_out && !spec->dig_in)
return 0;
- info++;
- info->name = "CA0132 Digital";
+ info = snd_hda_codec_pcm_new(codec, "CA0132 Digital");
+ if (!info)
+ return -ENOMEM;
info->pcm_type = HDA_PCM_TYPE_SPDIF;
if (spec->dig_out) {
info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
@@ -4081,7 +4079,6 @@ static int ca0132_build_pcms(struct hda_codec *codec)
ca0132_pcm_digital_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in;
}
- codec->num_pcms++;
return 0;
}
@@ -4246,13 +4243,9 @@ static void ca0132_refresh_widget_caps(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
int i;
- hda_nid_t nid;
codec_dbg(codec, "ca0132_refresh_widget_caps.\n");
- nid = codec->start_nid;
- for (i = 0; i < codec->num_nodes; i++, nid++)
- codec->wcaps[i] = snd_hda_param_read(codec, nid,
- AC_PAR_AUDIO_WIDGET_CAP);
+ snd_hda_codec_update_widgets(codec);
for (i = 0; i < spec->multiout.num_dacs; i++)
refresh_amp_caps(codec, spec->dacs[i], HDA_OUTPUT);
@@ -4352,7 +4345,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
const struct dsp_image_seg *dsp_os_image;
const struct firmware *fw_entry;
- if (request_firmware(&fw_entry, EFX_FILE, codec->bus->card->dev) != 0)
+ if (request_firmware(&fw_entry, EFX_FILE, codec->card->dev) != 0)
return false;
dsp_os_image = (struct dsp_image_seg *)(fw_entry->data);
@@ -4413,8 +4406,7 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
* state machine run.
*/
cancel_delayed_work_sync(&spec->unsol_hp_work);
- queue_delayed_work(codec->bus->workq, &spec->unsol_hp_work,
- msecs_to_jiffies(500));
+ schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
cb->tbl->block_report = 1;
}
@@ -4554,7 +4546,7 @@ static int ca0132_init(struct hda_codec *codec)
spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
- snd_hda_power_up(codec);
+ snd_hda_power_up_pm(codec);
ca0132_init_unsol(codec);
@@ -4585,7 +4577,7 @@ static int ca0132_init(struct hda_codec *codec)
snd_hda_jack_report_sync(codec);
- snd_hda_power_down(codec);
+ snd_hda_power_down_pm(codec);
return 0;
}
@@ -4702,20 +4694,8 @@ MODULE_ALIAS("snd-hda-codec-id:11020011");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Creative Sound Core3D codec");
-static struct hda_codec_preset_list ca0132_list = {
+static struct hda_codec_driver ca0132_driver = {
.preset = snd_hda_preset_ca0132,
- .owner = THIS_MODULE,
};
-static int __init patch_ca0132_init(void)
-{
- return snd_hda_add_codec_preset(&ca0132_list);
-}
-
-static void __exit patch_ca0132_exit(void)
-{
- snd_hda_delete_codec_preset(&ca0132_list);
-}
-
-module_init(patch_ca0132_init)
-module_exit(patch_ca0132_exit)
+module_hda_codec_driver(ca0132_driver);
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index dd2b3d92071f..50e9dd675579 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1221,20 +1221,8 @@ MODULE_ALIAS("snd-hda-codec-id:10134213");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cirrus Logic HD-audio codec");
-static struct hda_codec_preset_list cirrus_list = {
+static struct hda_codec_driver cirrus_driver = {
.preset = snd_hda_preset_cirrus,
- .owner = THIS_MODULE,
};
-static int __init patch_cirrus_init(void)
-{
- return snd_hda_add_codec_preset(&cirrus_list);
-}
-
-static void __exit patch_cirrus_exit(void)
-{
- snd_hda_delete_codec_preset(&cirrus_list);
-}
-
-module_init(patch_cirrus_init)
-module_exit(patch_cirrus_exit)
+module_hda_codec_driver(cirrus_driver);
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index c895a8f21192..617d9012e78a 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -137,20 +137,8 @@ MODULE_ALIAS("snd-hda-codec-id:434d4980");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("C-Media HD-audio codec");
-static struct hda_codec_preset_list cmedia_list = {
+static struct hda_codec_driver cmedia_driver = {
.preset = snd_hda_preset_cmedia,
- .owner = THIS_MODULE,
};
-static int __init patch_cmedia_init(void)
-{
- return snd_hda_add_codec_preset(&cmedia_list);
-}
-
-static void __exit patch_cmedia_exit(void)
-{
- snd_hda_delete_codec_preset(&cmedia_list);
-}
-
-module_init(patch_cmedia_init)
-module_exit(patch_cmedia_exit)
+module_hda_codec_driver(cmedia_driver);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index da67ea8645a6..f8f0dfbef149 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -103,10 +103,9 @@ static int add_beep_ctls(struct hda_codec *codec)
static void cx_auto_parse_beep(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- hda_nid_t nid, end_nid;
+ hda_nid_t nid;
- end_nid = codec->start_nid + codec->num_nodes;
- for (nid = codec->start_nid; nid < end_nid; nid++)
+ for_each_hda_codec_node(nid, codec)
if (get_wcaps_type(get_wcaps(codec, nid)) == AC_WID_BEEP) {
set_beep_amp(spec, nid, 0, HDA_OUTPUT);
break;
@@ -120,10 +119,9 @@ static void cx_auto_parse_beep(struct hda_codec *codec)
static void cx_auto_parse_eapd(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- hda_nid_t nid, end_nid;
+ hda_nid_t nid;
- end_nid = codec->start_nid + codec->num_nodes;
- for (nid = codec->start_nid; nid < end_nid; nid++) {
+ for_each_hda_codec_node(nid, codec) {
if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN)
continue;
if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_EAPD))
@@ -304,6 +302,7 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec,
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
spec->parse_flags |= HDA_PINCFG_HEADPHONE_MIC;
+ snd_hdac_regmap_add_vendor_verb(&codec->core, 0x410);
break;
case HDA_FIXUP_ACT_PROBE:
spec->gen.cap_sync_hook = cxt_update_headset_mode_hook;
@@ -411,15 +410,11 @@ static void olpc_xo_automic(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
struct conexant_spec *spec = codec->spec;
- int saved_cached_write = codec->cached_write;
- codec->cached_write = 1;
/* in DC mode, we don't handle automic */
if (!spec->dc_enable)
snd_hda_gen_mic_autoswitch(codec, jack);
olpc_xo_update_mic_pins(codec);
- snd_hda_codec_flush_cache(codec);
- codec->cached_write = saved_cached_write;
if (spec->dc_enable)
olpc_xo_update_mic_boost(codec);
}
@@ -848,7 +843,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
struct conexant_spec *spec;
int err;
- codec_info(codec, "%s: BIOS auto-probing.\n", codec->chip_name);
+ codec_info(codec, "%s: BIOS auto-probing.\n", codec->core.chip_name);
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@@ -862,7 +857,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
if (spec->dynamic_eapd)
spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook;
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x14f15045:
codec->single_adc_amp = 1;
spec->gen.mixer_nid = 0x17;
@@ -896,7 +891,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
* others may use EAPD really as an amp switch, so it might be
* not good to expose it blindly.
*/
- switch (codec->subsystem_id >> 16) {
+ switch (codec->core.subsystem_id >> 16) {
case 0x103c:
spec->gen.vmaster_mute_enum = 1;
break;
@@ -919,10 +914,10 @@ static int patch_conexant_auto(struct hda_codec *codec)
* which falls into the single-cmd mode.
* Better to make reset, then.
*/
- if (!codec->bus->sync_write) {
+ if (!codec->bus->core.sync_write) {
codec_info(codec,
"Enable sync_write for stable communication\n");
- codec->bus->sync_write = 1;
+ codec->bus->core.sync_write = 1;
codec->bus->allow_bus_reset = 1;
}
@@ -1018,20 +1013,8 @@ MODULE_ALIAS("snd-hda-codec-id:14f151d7");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Conexant HD-audio codec");
-static struct hda_codec_preset_list conexant_list = {
+static struct hda_codec_driver conexant_driver = {
.preset = snd_hda_preset_conexant,
- .owner = THIS_MODULE,
};
-static int __init patch_conexant_init(void)
-{
- return snd_hda_add_codec_preset(&conexant_list);
-}
-
-static void __exit patch_conexant_exit(void)
-{
- snd_hda_delete_codec_preset(&conexant_list);
-}
-
-module_init(patch_conexant_init)
-module_exit(patch_conexant_exit)
+module_hda_codec_driver(conexant_driver);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index b422e406a9cb..5f44f60a6389 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -45,14 +45,14 @@ static bool static_hdmi_pcm;
module_param(static_hdmi_pcm, bool, 0644);
MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
-#define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
-#define is_broadwell(codec) ((codec)->vendor_id == 0x80862808)
-#define is_skylake(codec) ((codec)->vendor_id == 0x80862809)
+#define is_haswell(codec) ((codec)->core.vendor_id == 0x80862807)
+#define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
+#define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
|| is_skylake(codec))
-#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
-#define is_cherryview(codec) ((codec)->vendor_id == 0x80862883)
+#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
+#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
struct hdmi_spec_per_cvt {
@@ -86,7 +86,6 @@ struct hdmi_spec_per_pin {
bool non_pcm;
bool chmap_set; /* channel-map override by ALSA API? */
unsigned char chmap[8]; /* ALSA API channel-map */
- char pcm_name[8]; /* filled in build_pcm callbacks */
#ifdef CONFIG_PROC_FS
struct snd_info_entry *proc_entry;
#endif
@@ -132,7 +131,7 @@ struct hdmi_spec {
int num_pins;
struct snd_array pins; /* struct hdmi_spec_per_pin */
- struct snd_array pcm_rec; /* struct hda_pcm */
+ struct hda_pcm *pcm_rec[16];
unsigned int channels_max; /* max over all cvts */
struct hdmi_eld temp_eld;
@@ -355,8 +354,7 @@ static struct cea_channel_speaker_allocation channel_allocations[] = {
((struct hdmi_spec_per_pin *)snd_array_elem(&spec->pins, idx))
#define get_cvt(spec, idx) \
((struct hdmi_spec_per_cvt *)snd_array_elem(&spec->cvts, idx))
-#define get_pcm_rec(spec, idx) \
- ((struct hda_pcm *)snd_array_elem(&spec->pcm_rec, idx))
+#define get_pcm_rec(spec, idx) ((spec)->pcm_rec[idx])
static int pin_nid_to_pin_index(struct hda_codec *codec, hda_nid_t pin_nid)
{
@@ -579,7 +577,7 @@ static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index)
int err;
snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
- err = snd_card_proc_new(codec->bus->card, name, &entry);
+ err = snd_card_proc_new(codec->card, name, &entry);
if (err < 0)
return err;
@@ -594,7 +592,7 @@ static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index)
static void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
{
if (!per_pin->codec->bus->shutdown && per_pin->proc_entry) {
- snd_device_free(per_pin->codec->bus->card, per_pin->proc_entry);
+ snd_device_free(per_pin->codec->card, per_pin->proc_entry);
per_pin->proc_entry = NULL;
}
}
@@ -1393,13 +1391,12 @@ static void intel_not_share_assigned_cvt(struct hda_codec *codec,
hda_nid_t pin_nid, int mux_idx)
{
struct hdmi_spec *spec = codec->spec;
- hda_nid_t nid, end_nid;
+ hda_nid_t nid;
int cvt_idx, curr;
struct hdmi_spec_per_cvt *per_cvt;
/* configure all pins, including "no physical connection" ones */
- end_nid = codec->start_nid + codec->num_nodes;
- for (nid = codec->start_nid; nid < end_nid; nid++) {
+ for_each_hda_codec_node(nid, codec) {
unsigned int wid_caps = get_wcaps(codec, nid);
unsigned int wid_type = get_wcaps_type(wid_caps);
@@ -1548,7 +1545,7 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
bool eld_changed = false;
bool ret;
- snd_hda_power_up(codec);
+ snd_hda_power_up_pm(codec);
present = snd_hda_pin_sense(codec, pin_nid);
mutex_lock(&per_pin->lock);
@@ -1578,9 +1575,8 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
update_eld = true;
}
else if (repoll) {
- queue_delayed_work(codec->bus->workq,
- &per_pin->work,
- msecs_to_jiffies(300));
+ schedule_delayed_work(&per_pin->work,
+ msecs_to_jiffies(300));
goto unlock;
}
}
@@ -1624,7 +1620,7 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
}
if (eld_changed)
- snd_ctl_notify(codec->bus->card,
+ snd_ctl_notify(codec->card,
SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
&per_pin->eld_ctl->id);
unlock:
@@ -1635,7 +1631,7 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
jack->block_report = !ret;
mutex_unlock(&per_pin->lock);
- snd_hda_power_down(codec);
+ snd_hda_power_down_pm(codec);
return ret;
}
@@ -1731,7 +1727,7 @@ static int hdmi_parse_codec(struct hda_codec *codec)
hda_nid_t nid;
int i, nodes;
- nodes = snd_hda_get_sub_nodes(codec, codec->afg, &nid);
+ nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &nid);
if (!nid || nodes < 0) {
codec_warn(codec, "HDMI: failed to get afg sub nodes\n");
return -EINVAL;
@@ -2056,11 +2052,10 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
struct hdmi_spec_per_pin *per_pin;
per_pin = get_pin(spec, pin_idx);
- sprintf(per_pin->pcm_name, "HDMI %d", pin_idx);
- info = snd_array_new(&spec->pcm_rec);
+ info = snd_hda_codec_pcm_new(codec, "HDMI %d", pin_idx);
if (!info)
return -ENOMEM;
- info->name = per_pin->pcm_name;
+ spec->pcm_rec[pin_idx] = info;
info->pcm_type = HDA_PCM_TYPE_HDMI;
info->own_chmap = true;
@@ -2070,9 +2065,6 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
/* other pstr fields are set in open */
}
- codec->num_pcms = spec->num_pins;
- codec->pcm_info = spec->pcm_rec.list;
-
return 0;
}
@@ -2125,13 +2117,15 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
/* add channel maps */
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hda_pcm *pcm;
struct snd_pcm_chmap *chmap;
struct snd_kcontrol *kctl;
int i;
- if (!codec->pcm_info[pin_idx].pcm)
+ pcm = spec->pcm_rec[pin_idx];
+ if (!pcm || !pcm->pcm)
break;
- err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm,
+ err = snd_pcm_add_chmap_ctls(pcm->pcm,
SNDRV_PCM_STREAM_PLAYBACK,
NULL, 0, pin_idx, &chmap);
if (err < 0)
@@ -2186,14 +2180,12 @@ static void hdmi_array_init(struct hdmi_spec *spec, int nums)
{
snd_array_init(&spec->pins, sizeof(struct hdmi_spec_per_pin), nums);
snd_array_init(&spec->cvts, sizeof(struct hdmi_spec_per_cvt), nums);
- snd_array_init(&spec->pcm_rec, sizeof(struct hda_pcm), nums);
}
static void hdmi_array_free(struct hdmi_spec *spec)
{
snd_array_free(&spec->pins);
snd_array_free(&spec->cvts);
- snd_array_free(&spec->pcm_rec);
}
static void generic_hdmi_free(struct hda_codec *codec)
@@ -2204,11 +2196,10 @@ static void generic_hdmi_free(struct hda_codec *codec)
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
- cancel_delayed_work(&per_pin->work);
+ cancel_delayed_work_sync(&per_pin->work);
eld_proc_free(per_pin);
}
- flush_workqueue(codec->bus->workq);
hdmi_array_free(spec);
kfree(spec);
}
@@ -2220,8 +2211,7 @@ static int generic_hdmi_resume(struct hda_codec *codec)
int pin_idx;
codec->patch_ops.init(codec);
- snd_hda_codec_resume_amp(codec);
- snd_hda_codec_resume_cache(codec);
+ regcache_sync(codec->core.regmap);
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2308,6 +2298,7 @@ static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
/* enable DP1.2 mode */
vendor_param |= INTEL_EN_DP12;
+ snd_hdac_regmap_add_vendor_verb(&codec->core, INTEL_SET_VENDOR_VERB);
snd_hda_codec_write_cache(codec, INTEL_VENDOR_NID, 0,
INTEL_SET_VENDOR_VERB, vendor_param);
}
@@ -2381,11 +2372,10 @@ static int simple_playback_build_pcms(struct hda_codec *codec)
chans = get_wcaps(codec, per_cvt->cvt_nid);
chans = get_wcaps_channels(chans);
- info = snd_array_new(&spec->pcm_rec);
+ info = snd_hda_codec_pcm_new(codec, "HDMI 0");
if (!info)
return -ENOMEM;
- info->name = get_pin(spec, 0)->pcm_name;
- sprintf(info->name, "HDMI 0");
+ spec->pcm_rec[0] = info;
info->pcm_type = HDA_PCM_TYPE_HDMI;
pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
*pstr = spec->pcm_playback;
@@ -2393,9 +2383,6 @@ static int simple_playback_build_pcms(struct hda_codec *codec)
if (pstr->channels_max <= 2 && chans && chans <= 16)
pstr->channels_max = chans;
- codec->num_pcms = 1;
- codec->pcm_info = info;
-
return 0;
}
@@ -2940,7 +2927,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
*/
#define is_amdhdmi_rev3_or_later(codec) \
- ((codec)->vendor_id == 0x1002aa01 && ((codec)->revision_id & 0xff00) >= 0x0300)
+ ((codec)->core.vendor_id == 0x1002aa01 && \
+ ((codec)->core.revision_id & 0xff00) >= 0x0300)
#define has_amd_full_remap_support(codec) is_amdhdmi_rev3_or_later(codec)
/* ATI/AMD specific HDA pin verbs, see the AMD HDA Verbs specification */
@@ -3301,15 +3289,6 @@ static int patch_via_hdmi(struct hda_codec *codec)
}
/*
- * called from hda_codec.c for generic HDMI support
- */
-int snd_hda_parse_hdmi_codec(struct hda_codec *codec)
-{
- return patch_generic_hdmi(codec);
-}
-EXPORT_SYMBOL_GPL(snd_hda_parse_hdmi_codec);
-
-/*
* patch entries
*/
static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
@@ -3373,6 +3352,8 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi },
{ .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
+/* special ID for generic HDMI */
+{ .id = HDA_CODEC_ID_GENERIC_HDMI, .patch = patch_generic_hdmi },
{} /* terminator */
};
@@ -3442,20 +3423,8 @@ MODULE_ALIAS("snd-hda-codec-intelhdmi");
MODULE_ALIAS("snd-hda-codec-nvhdmi");
MODULE_ALIAS("snd-hda-codec-atihdmi");
-static struct hda_codec_preset_list intel_list = {
+static struct hda_codec_driver hdmi_driver = {
.preset = snd_hda_preset_hdmi,
- .owner = THIS_MODULE,
};
-static int __init patch_hdmi_init(void)
-{
- return snd_hda_add_codec_preset(&intel_list);
-}
-
-static void __exit patch_hdmi_exit(void)
-{
- snd_hda_delete_codec_preset(&intel_list);
-}
-
-module_init(patch_hdmi_init)
-module_exit(patch_hdmi_exit)
+module_hda_codec_driver(hdmi_driver);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f9d12c0a7e5a..e2afd53cc14c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -299,7 +299,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
coef = alc_get_coef0(codec);
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0262:
alc_update_coef_idx(codec, 0x7, 0, 1<<5);
break;
@@ -432,7 +432,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
snd_hda_sequence_write(codec, alc_gpio3_init_verbs);
break;
case ALC_INIT_DEFAULT:
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0260:
alc_update_coefex_idx(codec, 0x1a, 7, 0, 0x2010);
break;
@@ -498,18 +498,18 @@ static int alc_auto_parse_customize_define(struct hda_codec *codec)
if (!codec->bus->pci)
return -1;
- ass = codec->subsystem_id & 0xffff;
+ ass = codec->core.subsystem_id & 0xffff;
if (ass != codec->bus->pci->subsystem_device && (ass & 1))
goto do_sku;
nid = 0x1d;
- if (codec->vendor_id == 0x10ec0260)
+ if (codec->core.vendor_id == 0x10ec0260)
nid = 0x17;
ass = snd_hda_codec_get_pincfg(codec, nid);
if (!(ass & 1)) {
codec_info(codec, "%s: SKU not ready 0x%08x\n",
- codec->chip_name, ass);
+ codec->core.chip_name, ass);
return -1;
}
@@ -585,7 +585,7 @@ static int alc_subsystem_id(struct hda_codec *codec, const hda_nid_t *ports)
goto do_sku;
}
- ass = codec->subsystem_id & 0xffff;
+ ass = codec->core.subsystem_id & 0xffff;
if (codec->bus->pci &&
ass != codec->bus->pci->subsystem_device && (ass & 1))
goto do_sku;
@@ -600,7 +600,7 @@ static int alc_subsystem_id(struct hda_codec *codec, const hda_nid_t *ports)
* 0 : override
*/
nid = 0x1d;
- if (codec->vendor_id == 0x10ec0260)
+ if (codec->core.vendor_id == 0x10ec0260)
nid = 0x17;
ass = snd_hda_codec_get_pincfg(codec, nid);
codec_dbg(codec,
@@ -621,7 +621,7 @@ static int alc_subsystem_id(struct hda_codec *codec, const hda_nid_t *ports)
return 0;
do_sku:
codec_dbg(codec, "realtek: Enabling init ASM_ID=0x%04x CODEC_ID=%08x\n",
- ass & 0xffff, codec->vendor_id);
+ ass & 0xffff, codec->core.vendor_id);
/*
* 0 : override
* 1 : Swap Jack
@@ -799,8 +799,7 @@ static int alc_resume(struct hda_codec *codec)
if (!spec->no_depop_delay)
msleep(150); /* to avoid pop noise */
codec->patch_ops.init(codec);
- snd_hda_codec_resume_amp(codec);
- snd_hda_codec_resume_cache(codec);
+ regcache_sync(codec->core.regmap);
hda_call_check_power_status(codec, 0x01);
return 0;
}
@@ -826,9 +825,9 @@ static const struct hda_codec_ops alc_patch_ops = {
/* replace the codec chip_name with the given string */
static int alc_codec_rename(struct hda_codec *codec, const char *name)
{
- kfree(codec->chip_name);
- codec->chip_name = kstrdup(name, GFP_KERNEL);
- if (!codec->chip_name) {
+ kfree(codec->core.chip_name);
+ codec->core.chip_name = kstrdup(name, GFP_KERNEL);
+ if (!codec->core.chip_name) {
alc_free(codec);
return -ENOMEM;
}
@@ -904,7 +903,7 @@ static int alc_codec_rename_from_preset(struct hda_codec *codec)
const struct alc_codec_rename_pci_table *q;
for (p = rename_tbl; p->vendor_id; p++) {
- if (p->vendor_id != codec->vendor_id)
+ if (p->vendor_id != codec->core.vendor_id)
continue;
if ((alc_get_coef0(codec) & p->coef_mask) == p->coef_bits)
return alc_codec_rename(codec, p->name);
@@ -913,7 +912,7 @@ static int alc_codec_rename_from_preset(struct hda_codec *codec)
if (!codec->bus->pci)
return 0;
for (q = rename_pci_tbl; q->codec_vendor_id; q++) {
- if (q->codec_vendor_id != codec->vendor_id)
+ if (q->codec_vendor_id != codec->core.vendor_id)
continue;
if (q->pci_subvendor != codec->bus->pci->subsystem_vendor)
continue;
@@ -1785,7 +1784,7 @@ static void alc882_gpio_mute(struct hda_codec *codec, int pin, int muted)
{
unsigned int gpiostate, gpiomask, gpiodir;
- gpiostate = snd_hda_codec_read(codec, codec->afg, 0,
+ gpiostate = snd_hda_codec_read(codec, codec->core.afg, 0,
AC_VERB_GET_GPIO_DATA, 0);
if (!muted)
@@ -1793,23 +1792,23 @@ static void alc882_gpio_mute(struct hda_codec *codec, int pin, int muted)
else
gpiostate &= ~(1 << pin);
- gpiomask = snd_hda_codec_read(codec, codec->afg, 0,
+ gpiomask = snd_hda_codec_read(codec, codec->core.afg, 0,
AC_VERB_GET_GPIO_MASK, 0);
gpiomask |= (1 << pin);
- gpiodir = snd_hda_codec_read(codec, codec->afg, 0,
+ gpiodir = snd_hda_codec_read(codec, codec->core.afg, 0,
AC_VERB_GET_GPIO_DIRECTION, 0);
gpiodir |= (1 << pin);
- snd_hda_codec_write(codec, codec->afg, 0,
+ snd_hda_codec_write(codec, codec->core.afg, 0,
AC_VERB_SET_GPIO_MASK, gpiomask);
- snd_hda_codec_write(codec, codec->afg, 0,
+ snd_hda_codec_write(codec, codec->core.afg, 0,
AC_VERB_SET_GPIO_DIRECTION, gpiodir);
msleep(1);
- snd_hda_codec_write(codec, codec->afg, 0,
+ snd_hda_codec_write(codec, codec->core.afg, 0,
AC_VERB_SET_GPIO_DATA, gpiostate);
}
@@ -2269,7 +2268,7 @@ static int patch_alc882(struct hda_codec *codec)
spec = codec->spec;
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0882:
case 0x10ec0885:
case 0x10ec0900:
@@ -2602,53 +2601,12 @@ static int patch_alc268(struct hda_codec *codec)
* ALC269
*/
-static int playback_pcm_open(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct hda_gen_spec *spec = codec->spec;
- return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
- hinfo);
-}
-
-static int playback_pcm_prepare(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- unsigned int stream_tag,
- unsigned int format,
- struct snd_pcm_substream *substream)
-{
- struct hda_gen_spec *spec = codec->spec;
- return snd_hda_multi_out_analog_prepare(codec, &spec->multiout,
- stream_tag, format, substream);
-}
-
-static int playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct hda_gen_spec *spec = codec->spec;
- return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
-}
-
static const struct hda_pcm_stream alc269_44k_pcm_analog_playback = {
- .substreams = 1,
- .channels_min = 2,
- .channels_max = 8,
.rates = SNDRV_PCM_RATE_44100, /* fixed rate */
- /* NID is set in alc_build_pcms */
- .ops = {
- .open = playback_pcm_open,
- .prepare = playback_pcm_prepare,
- .cleanup = playback_pcm_cleanup
- },
};
static const struct hda_pcm_stream alc269_44k_pcm_analog_capture = {
- .substreams = 1,
- .channels_min = 2,
- .channels_max = 2,
.rates = SNDRV_PCM_RATE_44100, /* fixed rate */
- /* NID is set in alc_build_pcms */
};
/* different alc269-variants */
@@ -3101,8 +3059,7 @@ static int alc269_resume(struct hda_codec *codec)
msleep(200);
}
- snd_hda_codec_resume_amp(codec);
- snd_hda_codec_resume_cache(codec);
+ regcache_sync(codec->core.regmap);
hda_call_check_power_status(codec, 0x01);
/* on some machine, the BIOS will clear the codec gpio data when enter
@@ -3110,7 +3067,7 @@ static int alc269_resume(struct hda_codec *codec)
* in the driver.
*/
if (spec->gpio_led)
- snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_SET_GPIO_DATA,
+ snd_hda_codec_write(codec, codec->core.afg, 0, AC_VERB_SET_GPIO_DATA,
spec->gpio_led);
if (spec->has_alc5505_dsp)
@@ -3155,8 +3112,8 @@ static void alc271_fixup_dmic(struct hda_codec *codec,
};
unsigned int cfg;
- if (strcmp(codec->chip_name, "ALC271X") &&
- strcmp(codec->chip_name, "ALC269VB"))
+ if (strcmp(codec->core.chip_name, "ALC271X") &&
+ strcmp(codec->core.chip_name, "ALC269VB"))
return;
cfg = snd_hda_codec_get_pincfg(codec, 0x12);
if (get_defcfg_connect(cfg) == AC_JACK_PORT_FIXED)
@@ -3266,7 +3223,7 @@ static unsigned int led_power_filter(struct hda_codec *codec,
snd_hda_set_pin_ctl(codec, nid,
snd_hda_codec_get_pin_target(codec, nid));
- return AC_PWRST_D0;
+ return snd_hda_gen_path_power_filter(codec, nid, power_state);
}
static void alc269_fixup_hp_mute_led(struct hda_codec *codec,
@@ -3522,9 +3479,9 @@ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
}
snd_hda_add_verbs(codec, gpio_init);
- snd_hda_codec_write_cache(codec, codec->afg, 0,
+ snd_hda_codec_write_cache(codec, codec->core.afg, 0,
AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x04);
- snd_hda_jack_detect_enable_callback(codec, codec->afg,
+ snd_hda_jack_detect_enable_callback(codec, codec->core.afg,
gpio2_mic_hotkey_event);
spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
@@ -3585,6 +3542,14 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
WRITE_COEF(0x32, 0x42a3),
{}
};
+ static struct coef_fw coef0288[] = {
+ UPDATE_COEF(0x4f, 0xfcc0, 0xc400),
+ UPDATE_COEF(0x50, 0x2000, 0x2000),
+ UPDATE_COEF(0x56, 0x0006, 0x0006),
+ UPDATE_COEF(0x66, 0x0008, 0),
+ UPDATE_COEF(0x67, 0x2000, 0),
+ {}
+ };
static struct coef_fw coef0292[] = {
WRITE_COEF(0x76, 0x000e),
WRITE_COEF(0x6c, 0x2400),
@@ -3607,7 +3572,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
{}
};
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -3616,6 +3581,10 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
break;
+ case 0x10ec0286:
+ case 0x10ec0288:
+ alc_process_coef_fw(codec, coef0288);
+ break;
case 0x10ec0292:
alc_process_coef_fw(codec, coef0292);
break;
@@ -3645,6 +3614,14 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
WRITE_COEF(0x26, 0x008c),
{}
};
+ static struct coef_fw coef0288[] = {
+ UPDATE_COEF(0x50, 0x2000, 0),
+ UPDATE_COEF(0x56, 0x0006, 0),
+ UPDATE_COEF(0x4f, 0xfcc0, 0xc400),
+ UPDATE_COEF(0x66, 0x0008, 0x0008),
+ UPDATE_COEF(0x67, 0x2000, 0x2000),
+ {}
+ };
static struct coef_fw coef0292[] = {
WRITE_COEF(0x19, 0xa208),
WRITE_COEF(0x2e, 0xacf0),
@@ -3663,7 +3640,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
{}
};
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0255:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -3678,6 +3655,13 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
alc_process_coef_fw(codec, coef0233);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0286:
+ case 0x10ec0288:
+ alc_update_coef_idx(codec, 0x4f, 0x000c, 0);
+ snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+ alc_process_coef_fw(codec, coef0288);
+ snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+ break;
case 0x10ec0292:
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
alc_process_coef_fw(codec, coef0292);
@@ -3713,6 +3697,14 @@ static void alc_headset_mode_default(struct hda_codec *codec)
WRITE_COEF(0x32, 0x4ea3),
{}
};
+ static struct coef_fw coef0288[] = {
+ UPDATE_COEF(0x4f, 0xfcc0, 0xc400), /* Set to TRS type */
+ UPDATE_COEF(0x50, 0x2000, 0x2000),
+ UPDATE_COEF(0x56, 0x0006, 0x0006),
+ UPDATE_COEF(0x66, 0x0008, 0),
+ UPDATE_COEF(0x67, 0x2000, 0),
+ {}
+ };
static struct coef_fw coef0292[] = {
WRITE_COEF(0x76, 0x000e),
WRITE_COEF(0x6c, 0x2400),
@@ -3733,7 +3725,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
{}
};
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -3742,6 +3734,11 @@ static void alc_headset_mode_default(struct hda_codec *codec)
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
break;
+ case 0x10ec0286:
+ case 0x10ec0288:
+ alc_process_coef_fw(codec, coef0288);
+ break;
+ break;
case 0x10ec0292:
alc_process_coef_fw(codec, coef0292);
break;
@@ -3770,6 +3767,13 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
WRITE_COEF(0x32, 0x4ea3),
{}
};
+ static struct coef_fw coef0288[] = {
+ UPDATE_COEF(0x50, 0x2000, 0x2000),
+ UPDATE_COEF(0x56, 0x0006, 0x0006),
+ UPDATE_COEF(0x66, 0x0008, 0),
+ UPDATE_COEF(0x67, 0x2000, 0),
+ {}
+ };
static struct coef_fw coef0292[] = {
WRITE_COEF(0x6b, 0xd429),
WRITE_COEF(0x76, 0x0008),
@@ -3788,7 +3792,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
{}
};
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -3797,6 +3801,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
break;
+ case 0x10ec0286:
+ case 0x10ec0288:
+ alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xd400);
+ msleep(300);
+ alc_process_coef_fw(codec, coef0288);
+ break;
case 0x10ec0292:
alc_process_coef_fw(codec, coef0292);
break;
@@ -3825,6 +3835,13 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
WRITE_COEF(0x32, 0x4ea3),
{}
};
+ static struct coef_fw coef0288[] = {
+ UPDATE_COEF(0x50, 0x2000, 0x2000),
+ UPDATE_COEF(0x56, 0x0006, 0x0006),
+ UPDATE_COEF(0x66, 0x0008, 0),
+ UPDATE_COEF(0x67, 0x2000, 0),
+ {}
+ };
static struct coef_fw coef0292[] = {
WRITE_COEF(0x6b, 0xe429),
WRITE_COEF(0x76, 0x0008),
@@ -3843,7 +3860,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
{}
};
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -3852,6 +3869,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
break;
+ case 0x10ec0286:
+ case 0x10ec0288:
+ alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xe400);
+ msleep(300);
+ alc_process_coef_fw(codec, coef0288);
+ break;
case 0x10ec0292:
alc_process_coef_fw(codec, coef0292);
break;
@@ -3876,6 +3899,10 @@ static void alc_determine_headset_type(struct hda_codec *codec)
conteol) */
{}
};
+ static struct coef_fw coef0288[] = {
+ UPDATE_COEF(0x4f, 0xfcc0, 0xd400), /* Check Type */
+ {}
+ };
static struct coef_fw coef0293[] = {
UPDATE_COEF(0x4a, 0x000f, 0x0008), /* Combo Jack auto detect */
WRITE_COEF(0x45, 0xD429), /* Set to ctia type */
@@ -3889,7 +3916,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
{}
};
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -3904,6 +3931,13 @@ static void alc_determine_headset_type(struct hda_codec *codec)
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x0070) == 0x0070;
break;
+ case 0x10ec0286:
+ case 0x10ec0288:
+ alc_process_coef_fw(codec, coef0288);
+ msleep(350);
+ val = alc_read_coef_idx(codec, 0x50);
+ is_ctia = (val & 0x0070) == 0x0070;
+ break;
case 0x10ec0292:
alc_write_coef_idx(codec, 0x6b, 0xd429);
msleep(300);
@@ -4087,6 +4121,29 @@ static void alc_fixup_headset_mode_alc255_no_hp_mic(struct hda_codec *codec,
alc_fixup_headset_mode(codec, fix, action);
}
+static void alc288_update_headset_jack_cb(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ struct alc_spec *spec = codec->spec;
+ int present;
+
+ alc_update_headset_jack_cb(codec, jack);
+ /* Headset Mic enable or disable, only for Dell Dino */
+ present = spec->gen.hp_jack_present ? 0x40 : 0;
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+ present);
+}
+
+static void alc_fixup_headset_mode_dell_alc288(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ alc_fixup_headset_mode(codec, fix, action);
+ if (action == HDA_FIXUP_ACT_PROBE) {
+ struct alc_spec *spec = codec->spec;
+ spec->gen.hp_automute_hook = alc288_update_headset_jack_cb;
+ }
+}
+
static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -4119,29 +4176,33 @@ static void alc_fixup_disable_aamix(struct hda_codec *codec,
}
}
-static unsigned int alc_power_filter_xps13(struct hda_codec *codec,
- hda_nid_t nid,
- unsigned int power_state)
+static void alc_shutup_dell_xps13(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
+ int hp_pin = spec->gen.autocfg.hp_pins[0];
- /* Avoid pop noises when headphones are plugged in */
- if (spec->gen.hp_jack_present)
- if (nid == codec->afg || nid == 0x02 || nid == 0x15)
- return AC_PWRST_D0;
- return power_state;
+ /* Prevent pop noises when headphones are plugged in */
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ msleep(20);
}
static void alc_fixup_dell_xps13(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- if (action == HDA_FIXUP_ACT_PROBE) {
- struct alc_spec *spec = codec->spec;
- struct hda_input_mux *imux = &spec->gen.input_mux;
- int i;
+ struct alc_spec *spec = codec->spec;
+ struct hda_input_mux *imux = &spec->gen.input_mux;
+ int i;
- spec->shutup = alc_no_shutup;
- codec->power_filter = alc_power_filter_xps13;
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ /* mic pin 0x19 must be initialized with Vref Hi-Z, otherwise
+ * it causes a click noise at start up
+ */
+ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+ break;
+ case HDA_FIXUP_ACT_PROBE:
+ spec->shutup = alc_shutup_dell_xps13;
/* Make the internal mic the default input source. */
for (i = 0; i < imux->num_items; i++) {
@@ -4150,6 +4211,7 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec,
break;
}
}
+ break;
}
}
@@ -4427,6 +4489,9 @@ enum {
ALC286_FIXUP_HP_GPIO_LED,
ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
ALC280_FIXUP_HP_DOCK_PINS,
+ ALC288_FIXUP_DELL_HEADSET_MODE,
+ ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC288_FIXUP_DELL_XPS_13_GPIO6,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -4922,6 +4987,33 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC280_FIXUP_HP_GPIO4
},
+ [ALC288_FIXUP_DELL_HEADSET_MODE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mode_dell_alc288,
+ .chained = true,
+ .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED
+ },
+ [ALC288_FIXUP_DELL1_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC288_FIXUP_DELL_HEADSET_MODE
+ },
+ [ALC288_FIXUP_DELL_XPS_13_GPIO6] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ {0x01, AC_VERB_SET_GPIO_MASK, 0x40},
+ {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x40},
+ {0x01, AC_VERB_SET_GPIO_DATA, 0x00},
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5047,12 +5139,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -5142,6 +5236,16 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{0x1b, 0x411111f0}, \
{0x1e, 0x411111f0}
+#define ALC256_STANDARD_PINS \
+ {0x12, 0x90a60140}, \
+ {0x14, 0x90170110}, \
+ {0x19, 0x411111f0}, \
+ {0x1a, 0x411111f0}, \
+ {0x1b, 0x411111f0}, \
+ {0x1d, 0x40700001}, \
+ {0x1e, 0x411111f0}, \
+ {0x21, 0x02211020}
+
#define ALC282_STANDARD_PINS \
{0x14, 0x90170110}, \
{0x18, 0x411111f0}, \
@@ -5149,6 +5253,13 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{0x1b, 0x411111f0}, \
{0x1e, 0x411111f0}
+#define ALC288_STANDARD_PINS \
+ {0x17, 0x411111f0}, \
+ {0x18, 0x411111f0}, \
+ {0x19, 0x411111f0}, \
+ {0x1a, 0x411111f0}, \
+ {0x1e, 0x411111f0}
+
#define ALC290_STANDARD_PINS \
{0x12, 0x99a30130}, \
{0x13, 0x40000000}, \
@@ -5235,15 +5346,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1d, 0x40700001},
{0x21, 0x02211050}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60140},
- {0x13, 0x40000000},
- {0x14, 0x90170110},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
- {0x1d, 0x40700001},
- {0x1e, 0x411111f0},
- {0x21, 0x02211020}),
+ ALC256_STANDARD_PINS,
+ {0x13, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC256_STANDARD_PINS,
+ {0x13, 0x411111f0}),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
{0x12, 0x90a60130},
{0x13, 0x40000000},
@@ -5344,6 +5451,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x19, 0x03a11020},
{0x1d, 0x40e00001},
{0x21, 0x0321101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL_XPS_13_GPIO6,
+ ALC288_STANDARD_PINS,
+ {0x12, 0x90a60120},
+ {0x13, 0x40000000},
+ {0x14, 0x90170110},
+ {0x1d, 0x4076832d},
+ {0x21, 0x0321101f}),
SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
ALC290_STANDARD_PINS,
{0x14, 0x411111f0},
@@ -5484,6 +5598,7 @@ static int patch_alc269(struct hda_codec *codec)
spec = codec->spec;
spec->gen.shared_mic_vref_pin = 0x18;
+ codec->power_save_node = 1;
snd_hda_pick_fixup(codec, alc269_fixup_models,
alc269_fixup_tbl, alc269_fixups);
@@ -5497,7 +5612,7 @@ static int patch_alc269(struct hda_codec *codec)
if (has_cdefine_beep(codec))
spec->gen.beep_nid = 0x01;
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0269:
spec->codec_variant = ALC269_TYPE_ALC269VA;
switch (alc_get_coef0(codec) & 0x00f0) {
@@ -5563,6 +5678,8 @@ static int patch_alc269(struct hda_codec *codec)
break;
case 0x10ec0256:
spec->codec_variant = ALC269_TYPE_ALC256;
+ spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
+ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
break;
}
@@ -5576,10 +5693,11 @@ static int patch_alc269(struct hda_codec *codec)
if (err < 0)
goto error;
- if (!spec->gen.no_analog && spec->gen.beep_nid)
- set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid)
+ set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);
codec->patch_ops = alc_patch_ops;
+ codec->patch_ops.stream_pm = snd_hda_gen_stream_pm;
#ifdef CONFIG_PM
codec->patch_ops.suspend = alc269_suspend;
codec->patch_ops.resume = alc269_resume;
@@ -5841,9 +5959,9 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
static const hda_nid_t alc662_ssids[] = { 0x15, 0x1b, 0x14, 0 };
const hda_nid_t *ssids;
- if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 ||
- codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 ||
- codec->vendor_id == 0x10ec0671)
+ if (codec->core.vendor_id == 0x10ec0272 || codec->core.vendor_id == 0x10ec0663 ||
+ codec->core.vendor_id == 0x10ec0665 || codec->core.vendor_id == 0x10ec0670 ||
+ codec->core.vendor_id == 0x10ec0671)
ssids = alc663_ssids;
else
ssids = alc662_ssids;
@@ -5878,7 +5996,7 @@ static void alc_fixup_bass_chmap(struct hda_codec *codec,
{
if (action == HDA_FIXUP_ACT_BUILD) {
struct alc_spec *spec = codec->spec;
- spec->gen.pcm_rec[0].stream[0].chmap = asus_pcm_2_1_chmaps;
+ spec->gen.pcm_rec[0]->stream[0].chmap = asus_pcm_2_1_chmaps;
}
}
@@ -5888,7 +6006,7 @@ static unsigned int gpio_led_power_filter(struct hda_codec *codec,
unsigned int power_state)
{
struct alc_spec *spec = codec->spec;
- if (nid == codec->afg && power_state == AC_PWRST_D3 && spec->gpio_led)
+ if (nid == codec->core.afg && power_state == AC_PWRST_D3 && spec->gpio_led)
return AC_PWRST_D0;
return power_state;
}
@@ -6386,7 +6504,7 @@ static int patch_alc662(struct hda_codec *codec)
alc_fix_pll_init(codec, 0x20, 0x04, 15);
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0668:
spec->init_hook = alc668_restore_default_value;
break;
@@ -6416,7 +6534,7 @@ static int patch_alc662(struct hda_codec *codec)
goto error;
if (!spec->gen.no_analog && spec->gen.beep_nid) {
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x10ec0662:
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
break;
@@ -6549,20 +6667,8 @@ MODULE_ALIAS("snd-hda-codec-id:10ec*");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek HD-audio codec");
-static struct hda_codec_preset_list realtek_list = {
+static struct hda_codec_driver realtek_driver = {
.preset = snd_hda_preset_realtek,
- .owner = THIS_MODULE,
};
-static int __init patch_realtek_init(void)
-{
- return snd_hda_add_codec_preset(&realtek_list);
-}
-
-static void __exit patch_realtek_exit(void)
-{
- snd_hda_delete_codec_preset(&realtek_list);
-}
-
-module_init(patch_realtek_init)
-module_exit(patch_realtek_exit)
+module_hda_codec_driver(realtek_driver);
diff --git a/sound/pci/hda/patch_si3054.c b/sound/pci/hda/patch_si3054.c
index 3208ad69583e..5104bebb2286 100644
--- a/sound/pci/hda/patch_si3054.c
+++ b/sound/pci/hda/patch_si3054.c
@@ -83,7 +83,6 @@
struct si3054_spec {
unsigned international;
- struct hda_pcm pcm;
};
@@ -199,15 +198,15 @@ static const struct hda_pcm_stream si3054_pcm = {
static int si3054_build_pcms(struct hda_codec *codec)
{
- struct si3054_spec *spec = codec->spec;
- struct hda_pcm *info = &spec->pcm;
- codec->num_pcms = 1;
- codec->pcm_info = info;
- info->name = "Si3054 Modem";
+ struct hda_pcm *info;
+
+ info = snd_hda_codec_pcm_new(codec, "Si3054 Modem");
+ if (!info)
+ return -ENOMEM;
info->stream[SNDRV_PCM_STREAM_PLAYBACK] = si3054_pcm;
info->stream[SNDRV_PCM_STREAM_CAPTURE] = si3054_pcm;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = codec->mfg;
- info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = codec->mfg;
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = codec->core.mfg;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = codec->core.mfg;
info->pcm_type = HDA_PCM_TYPE_MODEM;
return 0;
}
@@ -223,8 +222,12 @@ static int si3054_init(struct hda_codec *codec)
unsigned wait_count;
u16 val;
+ if (snd_hdac_regmap_add_vendor_verb(&codec->core,
+ SI3054_VERB_WRITE_NODE))
+ return -ENOMEM;
+
snd_hda_codec_write(codec, AC_NODE_ROOT, 0, AC_VERB_SET_CODEC_RESET, 0);
- snd_hda_codec_write(codec, codec->mfg, 0, AC_VERB_SET_STREAM_FORMAT, 0);
+ snd_hda_codec_write(codec, codec->core.mfg, 0, AC_VERB_SET_STREAM_FORMAT, 0);
SET_REG(codec, SI3054_LINE_RATE, 9600);
SET_REG(codec, SI3054_LINE_LEVEL, SI3054_DTAG_MASK|SI3054_ATAG_MASK);
SET_REG(codec, SI3054_EXTENDED_MID, 0);
@@ -319,20 +322,8 @@ MODULE_ALIAS("snd-hda-codec-id:18540018");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Si3054 HD-audio modem codec");
-static struct hda_codec_preset_list si3054_list = {
+static struct hda_codec_driver si3054_driver = {
.preset = snd_hda_preset_si3054,
- .owner = THIS_MODULE,
};
-static int __init patch_si3054_init(void)
-{
- return snd_hda_add_codec_preset(&si3054_list);
-}
-
-static void __exit patch_si3054_exit(void)
-{
- snd_hda_delete_codec_preset(&si3054_list);
-}
-
-module_init(patch_si3054_init)
-module_exit(patch_si3054_exit)
+module_hda_codec_driver(si3054_driver);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 87eff3173ce9..43c99ce4a520 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -299,32 +299,33 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
unsigned int dir_mask, unsigned int data)
{
unsigned int gpiostate, gpiomask, gpiodir;
+ hda_nid_t fg = codec->core.afg;
codec_dbg(codec, "%s msk %x dir %x gpio %x\n", __func__, mask, dir_mask, data);
- gpiostate = snd_hda_codec_read(codec, codec->afg, 0,
+ gpiostate = snd_hda_codec_read(codec, fg, 0,
AC_VERB_GET_GPIO_DATA, 0);
gpiostate = (gpiostate & ~dir_mask) | (data & dir_mask);
- gpiomask = snd_hda_codec_read(codec, codec->afg, 0,
+ gpiomask = snd_hda_codec_read(codec, fg, 0,
AC_VERB_GET_GPIO_MASK, 0);
gpiomask |= mask;
- gpiodir = snd_hda_codec_read(codec, codec->afg, 0,
+ gpiodir = snd_hda_codec_read(codec, fg, 0,
AC_VERB_GET_GPIO_DIRECTION, 0);
gpiodir |= dir_mask;
/* Configure GPIOx as CMOS */
- snd_hda_codec_write(codec, codec->afg, 0, 0x7e7, 0);
+ snd_hda_codec_write(codec, fg, 0, 0x7e7, 0);
- snd_hda_codec_write(codec, codec->afg, 0,
+ snd_hda_codec_write(codec, fg, 0,
AC_VERB_SET_GPIO_MASK, gpiomask);
- snd_hda_codec_read(codec, codec->afg, 0,
+ snd_hda_codec_read(codec, fg, 0,
AC_VERB_SET_GPIO_DIRECTION, gpiodir); /* sync */
msleep(1);
- snd_hda_codec_read(codec, codec->afg, 0,
+ snd_hda_codec_read(codec, fg, 0,
AC_VERB_SET_GPIO_DATA, gpiostate); /* sync */
}
@@ -387,7 +388,7 @@ static unsigned int stac_vref_led_power_filter(struct hda_codec *codec,
hda_nid_t nid,
unsigned int power_state)
{
- if (nid == codec->afg && power_state == AC_PWRST_D3)
+ if (nid == codec->core.afg && power_state == AC_PWRST_D3)
return AC_PWRST_D1;
return snd_hda_gen_path_power_filter(codec, nid, power_state);
}
@@ -432,7 +433,7 @@ static void stac_update_outputs(struct hda_codec *codec)
if (spec->gpio_mute)
spec->gen.master_mute =
- !(snd_hda_codec_read(codec, codec->afg, 0,
+ !(snd_hda_codec_read(codec, codec->core.afg, 0,
AC_VERB_GET_GPIO_DATA, 0) & spec->gpio_mute);
snd_hda_gen_update_outputs(codec);
@@ -476,7 +477,7 @@ static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid,
if (val != spec->power_map_bits) {
spec->power_map_bits = val;
if (do_write)
- snd_hda_codec_write(codec, codec->afg, 0,
+ snd_hda_codec_write(codec, codec->core.afg, 0,
AC_VERB_IDT_SET_POWER_MAP, val);
}
}
@@ -508,7 +509,8 @@ static void jack_update_power(struct hda_codec *codec,
false);
}
- snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_IDT_SET_POWER_MAP,
+ snd_hda_codec_write(codec, codec->core.afg, 0,
+ AC_VERB_IDT_SET_POWER_MAP,
spec->power_map_bits);
}
@@ -517,10 +519,10 @@ static void stac_vref_event(struct hda_codec *codec,
{
unsigned int data;
- data = snd_hda_codec_read(codec, codec->afg, 0,
+ data = snd_hda_codec_read(codec, codec->core.afg, 0,
AC_VERB_GET_GPIO_DATA, 0);
/* toggle VREF state based on GPIOx status */
- snd_hda_codec_write(codec, codec->afg, 0, 0x7e0,
+ snd_hda_codec_write(codec, codec->core.afg, 0, 0x7e0,
!!(data & (1 << event->private_data)));
}
@@ -622,7 +624,7 @@ static int stac_aloopback_put(struct snd_kcontrol *kcontrol,
/* Only return the bits defined by the shift value of the
* first two bytes of the mask
*/
- dac_mode = snd_hda_codec_read(codec, codec->afg, 0,
+ dac_mode = snd_hda_codec_read(codec, codec->core.afg, 0,
kcontrol->private_value & 0xFFFF, 0x0);
dac_mode >>= spec->aloopback_shift;
@@ -634,7 +636,7 @@ static int stac_aloopback_put(struct snd_kcontrol *kcontrol,
dac_mode &= ~idx_val;
}
- snd_hda_codec_write_cache(codec, codec->afg, 0,
+ snd_hda_codec_write_cache(codec, codec->core.afg, 0,
kcontrol->private_value >> 16, dac_mode);
return 1;
@@ -658,11 +660,11 @@ static int stac_aloopback_put(struct snd_kcontrol *kcontrol,
/* check whether it's a HP laptop with a docking port */
static bool hp_bnb2011_with_dock(struct hda_codec *codec)
{
- if (codec->vendor_id != 0x111d7605 &&
- codec->vendor_id != 0x111d76d1)
+ if (codec->core.vendor_id != 0x111d7605 &&
+ codec->core.vendor_id != 0x111d76d1)
return false;
- switch (codec->subsystem_id) {
+ switch (codec->core.subsystem_id) {
case 0x103c1618:
case 0x103c1619:
case 0x103c161a:
@@ -733,7 +735,7 @@ static void set_hp_led_gpio(struct hda_codec *codec)
if (spec->gpio_led)
return;
- gpio = snd_hda_param_read(codec, codec->afg, AC_PAR_GPIO_CAP);
+ gpio = snd_hda_param_read(codec, codec->core.afg, AC_PAR_GPIO_CAP);
gpio &= AC_GPIO_IO_COUNT;
if (gpio > 3)
spec->gpio_led = 0x08; /* GPIO 3 */
@@ -777,7 +779,7 @@ static int find_mute_led_cfg(struct hda_codec *codec, int default_polarity)
&spec->gpio_led_polarity,
&spec->gpio_led) == 2) {
unsigned int max_gpio;
- max_gpio = snd_hda_param_read(codec, codec->afg,
+ max_gpio = snd_hda_param_read(codec, codec->core.afg,
AC_PAR_GPIO_CAP);
max_gpio &= AC_GPIO_IO_COUNT;
if (spec->gpio_led < max_gpio)
@@ -807,7 +809,7 @@ static int find_mute_led_cfg(struct hda_codec *codec, int default_polarity)
* we statically set the GPIO - if not a B-series system
* and default polarity is provided
*/
- if (!hp_blike_system(codec->subsystem_id) &&
+ if (!hp_blike_system(codec->core.subsystem_id) &&
(default_polarity == 0 || default_polarity == 1)) {
set_hp_led_gpio(codec);
spec->gpio_led_polarity = default_polarity;
@@ -1048,12 +1050,9 @@ static const struct hda_verb stac92hd71bxx_core_init[] = {
{}
};
-static const struct hda_verb stac92hd71bxx_unmute_core_init[] = {
+static const hda_nid_t stac92hd71bxx_unmute_nids[] = {
/* unmute right and left channels for nodes 0x0f, 0xa, 0x0d */
- { 0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- { 0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- { 0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {}
+ 0x0f, 0x0a, 0x0d, 0
};
static const struct hda_verb stac925x_core_init[] = {
@@ -2132,8 +2131,10 @@ static void stac92hd83xxx_fixup_hp_mic_led(struct hda_codec *codec,
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->mic_mute_led_gpio = 0x08; /* GPIO3 */
+#ifdef CONFIG_PM
/* resetting controller clears GPIO, so we need to keep on */
- codec->bus->power_keep_link_on = 1;
+ codec->core.power_caps &= ~AC_PWRST_CLKSTOP;
+#endif
}
}
@@ -3029,9 +3030,9 @@ static void stac92hd71bxx_fixup_hp_m4(struct hda_codec *codec,
return;
/* Enable VREF power saving on GPIO1 detect */
- snd_hda_codec_write_cache(codec, codec->afg, 0,
+ snd_hda_codec_write_cache(codec, codec->core.afg, 0,
AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x02);
- jack = snd_hda_jack_detect_enable_callback(codec, codec->afg,
+ jack = snd_hda_jack_detect_enable_callback(codec, codec->core.afg,
stac_vref_event);
if (!IS_ERR(jack))
jack->private_data = 0x02;
@@ -3091,7 +3092,7 @@ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
if (action != HDA_FIXUP_ACT_PRE_PROBE)
return;
- if (hp_blike_system(codec->subsystem_id)) {
+ if (hp_blike_system(codec->core.subsystem_id)) {
unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f);
if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER ||
@@ -3790,7 +3791,7 @@ static void stac927x_fixup_dell_dmic(struct hda_codec *codec,
if (action != HDA_FIXUP_ACT_PRE_PROBE)
return;
- if (codec->subsystem_id != 0x1028022f) {
+ if (codec->core.subsystem_id != 0x1028022f) {
/* GPIO2 High = Enable EAPD */
spec->eapd_mask = spec->gpio_mask = 0x04;
spec->gpio_dir = spec->gpio_data = 0x04;
@@ -4051,9 +4052,9 @@ static void stac9205_fixup_dell_m43(struct hda_codec *codec,
snd_hda_apply_pincfgs(codec, dell_9205_m43_pin_configs);
/* Enable unsol response for GPIO4/Dock HP connection */
- snd_hda_codec_write_cache(codec, codec->afg, 0,
+ snd_hda_codec_write_cache(codec, codec->core.afg, 0,
AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x10);
- jack = snd_hda_jack_detect_enable_callback(codec, codec->afg,
+ jack = snd_hda_jack_detect_enable_callback(codec, codec->core.afg,
stac_vref_event);
if (!IS_ERR(jack))
jack->private_data = 0x01;
@@ -4223,6 +4224,12 @@ static int stac_parse_auto_config(struct hda_codec *codec)
if (err < 0)
return err;
+ if (spec->vref_mute_led_nid) {
+ err = snd_hda_gen_fix_pin_power(codec, spec->vref_mute_led_nid);
+ if (err < 0)
+ return err;
+ }
+
/* setup analog beep controls */
if (spec->anabeep_nid > 0) {
err = stac_auto_create_beep_ctls(codec,
@@ -4259,6 +4266,10 @@ static int stac_parse_auto_config(struct hda_codec *codec)
if (spec->aloopback_ctl &&
snd_hda_get_bool_hint(codec, "loopback") == 1) {
+ unsigned int wr_verb =
+ spec->aloopback_ctl->private_value >> 16;
+ if (snd_hdac_regmap_add_vendor_verb(&codec->core, wr_verb))
+ return -ENOMEM;
if (!snd_hda_gen_add_kctl(&spec->gen, NULL, spec->aloopback_ctl))
return -ENOMEM;
}
@@ -4294,7 +4305,7 @@ static int stac_init(struct hda_codec *codec)
/* sync the power-map */
if (spec->num_pwrs)
- snd_hda_codec_write(codec, codec->afg, 0,
+ snd_hda_codec_write(codec, codec->core.afg, 0,
AC_VERB_IDT_SET_POWER_MAP,
spec->power_map_bits);
@@ -4330,7 +4341,7 @@ static void stac_shutup(struct hda_codec *codec)
static void stac92hd_proc_hook(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid)
{
- if (nid == codec->afg)
+ if (nid == codec->core.afg)
snd_iprintf(buffer, "Power-Map: 0x%02x\n",
snd_hda_codec_read(codec, nid, 0,
AC_VERB_IDT_GET_POWER_MAP, 0));
@@ -4341,7 +4352,7 @@ static void analog_loop_proc_hook(struct snd_info_buffer *buffer,
unsigned int verb)
{
snd_iprintf(buffer, "Analog Loopback: 0x%02x\n",
- snd_hda_codec_read(codec, codec->afg, 0, verb, 0));
+ snd_hda_codec_read(codec, codec->core.afg, 0, verb, 0));
}
/* stac92hd71bxx, stac92hd73xx */
@@ -4349,21 +4360,21 @@ static void stac92hd7x_proc_hook(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid)
{
stac92hd_proc_hook(buffer, codec, nid);
- if (nid == codec->afg)
+ if (nid == codec->core.afg)
analog_loop_proc_hook(buffer, codec, 0xfa0);
}
static void stac9205_proc_hook(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid)
{
- if (nid == codec->afg)
+ if (nid == codec->core.afg)
analog_loop_proc_hook(buffer, codec, 0xfe0);
}
static void stac927x_proc_hook(struct snd_info_buffer *buffer,
struct hda_codec *codec, hda_nid_t nid)
{
- if (nid == codec->afg)
+ if (nid == codec->core.afg)
analog_loop_proc_hook(buffer, codec, 0xfeb);
}
#else
@@ -4392,6 +4403,7 @@ static const struct hda_codec_ops stac_patch_ops = {
#ifdef CONFIG_PM
.suspend = stac_suspend,
#endif
+ .stream_pm = snd_hda_gen_stream_pm,
.reboot_notify = stac_shutup,
};
@@ -4485,6 +4497,7 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
return err;
spec = codec->spec;
+ codec->power_save_node = 1;
spec->linear_tone_beep = 0;
spec->gen.mixer_nid = 0x1d;
spec->have_spdif_mux = 1;
@@ -4587,9 +4600,11 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
if (err < 0)
return err;
- codec->epss = 0; /* longer delay needed for D3 */
+ /* longer delay needed for D3 */
+ codec->core.power_caps &= ~AC_PWRST_EPSS;
spec = codec->spec;
+ codec->power_save_node = 1;
spec->linear_tone_beep = 0;
spec->gen.own_eapd_ctl = 1;
spec->gen.power_down_unused = 1;
@@ -4636,9 +4651,11 @@ static int patch_stac92hd95(struct hda_codec *codec)
if (err < 0)
return err;
- codec->epss = 0; /* longer delay needed for D3 */
+ /* longer delay needed for D3 */
+ codec->core.power_caps &= ~AC_PWRST_EPSS;
spec = codec->spec;
+ codec->power_save_node = 1;
spec->linear_tone_beep = 0;
spec->gen.own_eapd_ctl = 1;
spec->gen.power_down_unused = 1;
@@ -4672,7 +4689,7 @@ static int patch_stac92hd95(struct hda_codec *codec)
static int patch_stac92hd71bxx(struct hda_codec *codec)
{
struct sigmatel_spec *spec;
- const struct hda_verb *unmute_init = stac92hd71bxx_unmute_core_init;
+ const hda_nid_t *unmute_nids = stac92hd71bxx_unmute_nids;
int err;
err = alloc_stac_spec(codec);
@@ -4680,6 +4697,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
return err;
spec = codec->spec;
+ codec->power_save_node = 1;
spec->linear_tone_beep = 0;
spec->gen.own_eapd_ctl = 1;
spec->gen.power_down_unused = 1;
@@ -4693,23 +4711,23 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
spec->gpio_dir = 0x01;
spec->gpio_data = 0x01;
- switch (codec->vendor_id) {
+ switch (codec->core.vendor_id) {
case 0x111d76b6: /* 4 Port without Analog Mixer */
case 0x111d76b7:
- unmute_init++;
+ unmute_nids++;
break;
case 0x111d7608: /* 5 Port with Analog Mixer */
- if ((codec->revision_id & 0xf) == 0 ||
- (codec->revision_id & 0xf) == 1)
+ if ((codec->core.revision_id & 0xf) == 0 ||
+ (codec->core.revision_id & 0xf) == 1)
spec->stream_delay = 40; /* 40 milliseconds */
/* disable VSW */
- unmute_init++;
+ unmute_nids++;
snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0);
snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3);
break;
case 0x111d7603: /* 6 Port with Analog Mixer */
- if ((codec->revision_id & 0xf) == 1)
+ if ((codec->core.revision_id & 0xf) == 1)
spec->stream_delay = 40; /* 40 milliseconds */
break;
@@ -4718,8 +4736,12 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
if (get_wcaps_type(get_wcaps(codec, 0x28)) == AC_WID_VOL_KNB)
snd_hda_add_verbs(codec, stac92hd71bxx_core_init);
- if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP)
- snd_hda_sequence_write_cache(codec, unmute_init);
+ if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP) {
+ const hda_nid_t *p;
+ for (p = unmute_nids; *p; p++)
+ snd_hda_codec_amp_init_stereo(codec, *p, HDA_INPUT, 0,
+ 0xff, 0x00);
+ }
spec->aloopback_ctl = &stac92hd71bxx_loopback;
spec->aloopback_mask = 0x50;
@@ -5091,20 +5113,8 @@ MODULE_ALIAS("snd-hda-codec-id:111d*");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IDT/Sigmatel HD-audio codec");
-static struct hda_codec_preset_list sigmatel_list = {
+static struct hda_codec_driver sigmatel_driver = {
.preset = snd_hda_preset_sigmatel,
- .owner = THIS_MODULE,
};
-static int __init patch_sigmatel_init(void)
-{
- return snd_hda_add_codec_preset(&sigmatel_list);
-}
-
-static void __exit patch_sigmatel_exit(void)
-{
- snd_hda_delete_codec_preset(&sigmatel_list);
-}
-
-module_init(patch_sigmatel_init)
-module_exit(patch_sigmatel_exit)
+module_hda_codec_driver(sigmatel_driver);
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 3de6d3d779c9..31a95cca015d 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -99,7 +99,6 @@ struct via_spec {
/* HP mode source */
unsigned int dmic_enabled;
- unsigned int no_pin_power_ctl;
enum VIA_HDA_CODEC codec_type;
/* analog low-power control */
@@ -109,8 +108,7 @@ struct via_spec {
int hp_work_active;
int vt1708_jack_detect;
- void (*set_widgets_power_state)(struct hda_codec *codec);
- unsigned int dac_stream_tag[4];
+ unsigned int beep_amp;
};
static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec);
@@ -133,17 +131,18 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
/* VT1708BCE & VT1708S are almost same */
if (spec->codec_type == VT1708BCE)
spec->codec_type = VT1708S;
- spec->no_pin_power_ctl = 1;
spec->gen.indep_hp = 1;
spec->gen.keep_eapd_on = 1;
spec->gen.pcm_playback_hook = via_playback_pcm_hook;
spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
+ codec->power_save_node = 1;
+ spec->gen.power_down_unused = 1;
return spec;
}
static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec)
{
- u32 vendor_id = codec->vendor_id;
+ u32 vendor_id = codec->core.vendor_id;
u16 ven_id = vendor_id >> 16;
u16 dev_id = vendor_id & 0xffff;
enum VIA_HDA_CODEC codec_type;
@@ -222,98 +221,13 @@ static void vt1708_update_hp_work(struct hda_codec *codec)
if (!spec->hp_work_active) {
codec->jackpoll_interval = msecs_to_jiffies(100);
snd_hda_codec_write(codec, 0x1, 0, 0xf81, 0);
- queue_delayed_work(codec->bus->workq,
- &codec->jackpoll_work, 0);
+ schedule_delayed_work(&codec->jackpoll_work, 0);
spec->hp_work_active = true;
}
} else if (!hp_detect_with_aa(codec))
vt1708_stop_hp_work(codec);
}
-static void set_widgets_power_state(struct hda_codec *codec)
-{
-#if 0 /* FIXME: the assumed connections don't match always with the
- * actual routes by the generic parser, so better to disable
- * the control for safety.
- */
- struct via_spec *spec = codec->spec;
- if (spec->set_widgets_power_state)
- spec->set_widgets_power_state(codec);
-#endif
-}
-
-static void update_power_state(struct hda_codec *codec, hda_nid_t nid,
- unsigned int parm)
-{
- if (snd_hda_check_power_state(codec, nid, parm))
- return;
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm);
-}
-
-static void update_conv_power_state(struct hda_codec *codec, hda_nid_t nid,
- unsigned int parm, unsigned int index)
-{
- struct via_spec *spec = codec->spec;
- unsigned int format;
-
- if (snd_hda_check_power_state(codec, nid, parm))
- return;
- format = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
- if (format && (spec->dac_stream_tag[index] != format))
- spec->dac_stream_tag[index] = format;
-
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm);
- if (parm == AC_PWRST_D0) {
- format = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
- if (!format && (spec->dac_stream_tag[index] != format))
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_CHANNEL_STREAMID,
- spec->dac_stream_tag[index]);
- }
-}
-
-static bool smart51_enabled(struct hda_codec *codec)
-{
- struct via_spec *spec = codec->spec;
- return spec->gen.ext_channel_count > 2;
-}
-
-static bool is_smart51_pins(struct hda_codec *codec, hda_nid_t pin)
-{
- struct via_spec *spec = codec->spec;
- int i;
-
- for (i = 0; i < spec->gen.multi_ios; i++)
- if (spec->gen.multi_io[i].pin == pin)
- return true;
- return false;
-}
-
-static void set_pin_power_state(struct hda_codec *codec, hda_nid_t nid,
- unsigned int *affected_parm)
-{
- unsigned parm;
- unsigned def_conf = snd_hda_codec_get_pincfg(codec, nid);
- unsigned no_presence = (def_conf & AC_DEFCFG_MISC)
- >> AC_DEFCFG_MISC_SHIFT
- & AC_DEFCFG_MISC_NO_PRESENCE; /* do not support pin sense */
- struct via_spec *spec = codec->spec;
- unsigned present = 0;
-
- no_presence |= spec->no_pin_power_ctl;
- if (!no_presence)
- present = snd_hda_jack_detect(codec, nid);
- if ((smart51_enabled(codec) && is_smart51_pins(codec, nid))
- || ((no_presence || present)
- && get_defcfg_connect(def_conf) != AC_JACK_PORT_NONE)) {
- *affected_parm = AC_PWRST_D0; /* if it's connected */
- parm = AC_PWRST_D0;
- } else
- parm = AC_PWRST_D3;
-
- update_power_state(codec, nid, parm);
-}
-
static int via_pin_power_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -324,8 +238,7 @@ static int via_pin_power_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct via_spec *spec = codec->spec;
- ucontrol->value.enumerated.item[0] = !spec->no_pin_power_ctl;
+ ucontrol->value.enumerated.item[0] = codec->power_save_node;
return 0;
}
@@ -334,12 +247,12 @@ static int via_pin_power_ctl_put(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct via_spec *spec = codec->spec;
- unsigned int val = !ucontrol->value.enumerated.item[0];
+ bool val = !!ucontrol->value.enumerated.item[0];
- if (val == spec->no_pin_power_ctl)
+ if (val == codec->power_save_node)
return 0;
- spec->no_pin_power_ctl = val;
- set_widgets_power_state(codec);
+ codec->power_save_node = val;
+ spec->gen.power_down_unused = val;
analog_low_current_mode(codec);
return 1;
}
@@ -355,6 +268,59 @@ static const struct snd_kcontrol_new via_pin_power_ctl_enum[] = {
{} /* terminator */
};
+#ifdef CONFIG_SND_HDA_INPUT_BEEP
+static inline void set_beep_amp(struct via_spec *spec, hda_nid_t nid,
+ int idx, int dir)
+{
+ spec->gen.beep_nid = nid;
+ spec->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 1, idx, dir);
+}
+
+/* additional beep mixers; the actual parameters are overwritten at build */
+static const struct snd_kcontrol_new cxt_beep_mixer[] = {
+ HDA_CODEC_VOLUME_MONO("Beep Playback Volume", 0, 1, 0, HDA_OUTPUT),
+ HDA_CODEC_MUTE_BEEP_MONO("Beep Playback Switch", 0, 1, 0, HDA_OUTPUT),
+ { } /* end */
+};
+
+/* create beep controls if needed */
+static int add_beep_ctls(struct hda_codec *codec)
+{
+ struct via_spec *spec = codec->spec;
+ int err;
+
+ if (spec->beep_amp) {
+ const struct snd_kcontrol_new *knew;
+ for (knew = cxt_beep_mixer; knew->name; knew++) {
+ struct snd_kcontrol *kctl;
+ kctl = snd_ctl_new1(knew, codec);
+ if (!kctl)
+ return -ENOMEM;
+ kctl->private_value = spec->beep_amp;
+ err = snd_hda_ctl_add(codec, 0, kctl);
+ if (err < 0)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void auto_parse_beep(struct hda_codec *codec)
+{
+ struct via_spec *spec = codec->spec;
+ hda_nid_t nid;
+
+ for_each_hda_codec_node(nid, codec)
+ if (get_wcaps_type(get_wcaps(codec, nid)) == AC_WID_BEEP) {
+ set_beep_amp(spec, nid, 0, HDA_OUTPUT);
+ break;
+ }
+}
+#else
+#define set_beep_amp(spec, nid, idx, dir) /* NOP */
+#define add_beep_ctls(codec) 0
+#define auto_parse_beep(codec)
+#endif
/* check AA path's mute status */
static bool is_aa_path_mute(struct hda_codec *codec)
@@ -384,7 +350,7 @@ static void __analog_low_current_mode(struct hda_codec *codec, bool force)
bool enable;
unsigned int verb, parm;
- if (spec->no_pin_power_ctl)
+ if (!codec->power_save_node)
enable = false;
else
enable = is_aa_path_mute(codec) && !spec->gen.active_streams;
@@ -424,7 +390,7 @@ static void __analog_low_current_mode(struct hda_codec *codec, bool force)
return; /* other codecs are not supported */
}
/* send verb */
- snd_hda_codec_write(codec, codec->afg, 0, verb, parm);
+ snd_hda_codec_write(codec, codec->core.afg, 0, verb, parm);
}
static void analog_low_current_mode(struct hda_codec *codec)
@@ -441,8 +407,11 @@ static int via_build_controls(struct hda_codec *codec)
if (err < 0)
return err;
- if (spec->set_widgets_power_state)
- spec->mixers[spec->num_mixers++] = via_pin_power_ctl_enum;
+ err = add_beep_ctls(codec);
+ if (err < 0)
+ return err;
+
+ spec->mixers[spec->num_mixers++] = via_pin_power_ctl_enum;
for (i = 0; i < spec->num_mixers; i++) {
err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
@@ -486,7 +455,6 @@ static int via_suspend(struct hda_codec *codec)
static int via_check_power_status(struct hda_codec *codec, hda_nid_t nid)
{
struct via_spec *spec = codec->spec;
- set_widgets_power_state(codec);
analog_low_current_mode(codec);
vt1708_update_hp_work(codec);
return snd_hda_check_amp_list_power(codec, &spec->gen.loopback, nid);
@@ -504,6 +472,7 @@ static const struct hda_codec_ops via_patch_ops = {
.init = via_init,
.free = via_free,
.unsol_event = snd_hda_jack_unsol_event,
+ .stream_pm = snd_hda_gen_stream_pm,
#ifdef CONFIG_PM
.suspend = via_suspend,
.check_power_status = via_check_power_status,
@@ -574,34 +543,6 @@ static const struct snd_kcontrol_new vt1708_jack_detect_ctl[] = {
{} /* terminator */
};
-static void via_jack_powerstate_event(struct hda_codec *codec,
- struct hda_jack_callback *tbl)
-{
- set_widgets_power_state(codec);
-}
-
-static void via_set_jack_unsol_events(struct hda_codec *codec)
-{
- struct via_spec *spec = codec->spec;
- struct auto_pin_cfg *cfg = &spec->gen.autocfg;
- hda_nid_t pin;
- int i;
-
- for (i = 0; i < cfg->line_outs; i++) {
- pin = cfg->line_out_pins[i];
- if (pin && is_jack_detectable(codec, pin))
- snd_hda_jack_detect_enable_callback(codec, pin,
- via_jack_powerstate_event);
- }
-
- for (i = 0; i < cfg->num_inputs; i++) {
- pin = cfg->line_out_pins[i];
- if (pin && is_jack_detectable(codec, pin))
- snd_hda_jack_detect_enable_callback(codec, pin,
- via_jack_powerstate_event);
- }
-}
-
static const struct badness_table via_main_out_badness = {
.no_primary_dac = 0x10000,
.no_dac = 0x4000,
@@ -631,11 +572,15 @@ static int via_parse_auto_config(struct hda_codec *codec)
if (err < 0)
return err;
+ auto_parse_beep(codec);
+
err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
if (err < 0)
return err;
- via_set_jack_unsol_events(codec);
+ /* disable widget PM at start for compatibility */
+ codec->power_save_node = 0;
+ spec->gen.power_down_unused = 0;
return 0;
}
@@ -648,7 +593,6 @@ static int via_init(struct hda_codec *codec)
snd_hda_sequence_write(codec, spec->init_verbs[i]);
/* init power states */
- set_widgets_power_state(codec);
__analog_low_current_mode(codec, true);
snd_hda_gen_init(codec);
@@ -676,15 +620,17 @@ static int vt1708_build_pcms(struct hda_codec *codec)
int i, err;
err = snd_hda_gen_build_pcms(codec);
- if (err < 0 || codec->vendor_id != 0x11061708)
+ if (err < 0 || codec->core.vendor_id != 0x11061708)
return err;
/* We got noisy outputs on the right channel on VT1708 when
* 24bit samples are used. Until any workaround is found,
* disable the 24bit format, so far.
*/
- for (i = 0; i < codec->num_pcms; i++) {
- struct hda_pcm *info = &spec->gen.pcm_rec[i];
+ for (i = 0; i < ARRAY_SIZE(spec->gen.pcm_rec); i++) {
+ struct hda_pcm *info = spec->gen.pcm_rec[i];
+ if (!info)
+ continue;
if (!info->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams ||
info->pcm_type != HDA_PCM_TYPE_AUDIO)
continue;
@@ -766,78 +712,6 @@ static int patch_vt1709(struct hda_codec *codec)
return 0;
}
-static void set_widgets_power_state_vt1708B(struct hda_codec *codec)
-{
- struct via_spec *spec = codec->spec;
- int imux_is_smixer;
- unsigned int parm;
- int is_8ch = 0;
- if ((spec->codec_type != VT1708B_4CH) &&
- (codec->vendor_id != 0x11064397))
- is_8ch = 1;
-
- /* SW0 (17h) = stereo mixer */
- imux_is_smixer =
- (snd_hda_codec_read(codec, 0x17, 0, AC_VERB_GET_CONNECT_SEL, 0x00)
- == ((spec->codec_type == VT1708S) ? 5 : 0));
- /* inputs */
- /* PW 1/2/5 (1ah/1bh/1eh) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x1a, &parm);
- set_pin_power_state(codec, 0x1b, &parm);
- set_pin_power_state(codec, 0x1e, &parm);
- if (imux_is_smixer)
- parm = AC_PWRST_D0;
- /* SW0 (17h), AIW 0/1 (13h/14h) */
- update_power_state(codec, 0x17, parm);
- update_power_state(codec, 0x13, parm);
- update_power_state(codec, 0x14, parm);
-
- /* outputs */
- /* PW0 (19h), SW1 (18h), AOW1 (11h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x19, &parm);
- if (smart51_enabled(codec))
- set_pin_power_state(codec, 0x1b, &parm);
- update_power_state(codec, 0x18, parm);
- update_power_state(codec, 0x11, parm);
-
- /* PW6 (22h), SW2 (26h), AOW2 (24h) */
- if (is_8ch) {
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x22, &parm);
- if (smart51_enabled(codec))
- set_pin_power_state(codec, 0x1a, &parm);
- update_power_state(codec, 0x26, parm);
- update_power_state(codec, 0x24, parm);
- } else if (codec->vendor_id == 0x11064397) {
- /* PW7(23h), SW2(27h), AOW2(25h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x23, &parm);
- if (smart51_enabled(codec))
- set_pin_power_state(codec, 0x1a, &parm);
- update_power_state(codec, 0x27, parm);
- update_power_state(codec, 0x25, parm);
- }
-
- /* PW 3/4/7 (1ch/1dh/23h) */
- parm = AC_PWRST_D3;
- /* force to D0 for internal Speaker */
- set_pin_power_state(codec, 0x1c, &parm);
- set_pin_power_state(codec, 0x1d, &parm);
- if (is_8ch)
- set_pin_power_state(codec, 0x23, &parm);
-
- /* MW0 (16h), Sw3 (27h), AOW 0/3 (10h/25h) */
- update_power_state(codec, 0x16, imux_is_smixer ? AC_PWRST_D0 : parm);
- update_power_state(codec, 0x10, parm);
- if (is_8ch) {
- update_power_state(codec, 0x25, parm);
- update_power_state(codec, 0x27, parm);
- } else if (codec->vendor_id == 0x11064397 && spec->gen.indep_hp_enabled)
- update_power_state(codec, 0x25, parm);
-}
-
static int patch_vt1708S(struct hda_codec *codec);
static int patch_vt1708B(struct hda_codec *codec)
{
@@ -862,9 +736,6 @@ static int patch_vt1708B(struct hda_codec *codec)
}
codec->patch_ops = via_patch_ops;
-
- spec->set_widgets_power_state = set_widgets_power_state_vt1708B;
-
return 0;
}
@@ -905,19 +776,19 @@ static int patch_vt1708S(struct hda_codec *codec)
/* correct names for VT1708BCE */
if (get_codec_type(codec) == VT1708BCE) {
- kfree(codec->chip_name);
- codec->chip_name = kstrdup("VT1708BCE", GFP_KERNEL);
- snprintf(codec->bus->card->mixername,
- sizeof(codec->bus->card->mixername),
- "%s %s", codec->vendor_name, codec->chip_name);
+ kfree(codec->core.chip_name);
+ codec->core.chip_name = kstrdup("VT1708BCE", GFP_KERNEL);
+ snprintf(codec->card->mixername,
+ sizeof(codec->card->mixername),
+ "%s %s", codec->core.vendor_name, codec->core.chip_name);
}
/* correct names for VT1705 */
- if (codec->vendor_id == 0x11064397) {
- kfree(codec->chip_name);
- codec->chip_name = kstrdup("VT1705", GFP_KERNEL);
- snprintf(codec->bus->card->mixername,
- sizeof(codec->bus->card->mixername),
- "%s %s", codec->vendor_name, codec->chip_name);
+ if (codec->core.vendor_id == 0x11064397) {
+ kfree(codec->core.chip_name);
+ codec->core.chip_name = kstrdup("VT1705", GFP_KERNEL);
+ snprintf(codec->card->mixername,
+ sizeof(codec->card->mixername),
+ "%s %s", codec->core.vendor_name, codec->core.chip_name);
}
/* automatic parse from the BIOS config */
@@ -930,8 +801,6 @@ static int patch_vt1708S(struct hda_codec *codec)
spec->init_verbs[spec->num_iverbs++] = vt1708S_init_verbs;
codec->patch_ops = via_patch_ops;
-
- spec->set_widgets_power_state = set_widgets_power_state_vt1708B;
return 0;
}
@@ -945,36 +814,6 @@ static const struct hda_verb vt1702_init_verbs[] = {
{ }
};
-static void set_widgets_power_state_vt1702(struct hda_codec *codec)
-{
- int imux_is_smixer =
- snd_hda_codec_read(codec, 0x13, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 3;
- unsigned int parm;
- /* inputs */
- /* PW 1/2/5 (14h/15h/18h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x14, &parm);
- set_pin_power_state(codec, 0x15, &parm);
- set_pin_power_state(codec, 0x18, &parm);
- if (imux_is_smixer)
- parm = AC_PWRST_D0; /* SW0 (13h) = stereo mixer (idx 3) */
- /* SW0 (13h), AIW 0/1/2 (12h/1fh/20h) */
- update_power_state(codec, 0x13, parm);
- update_power_state(codec, 0x12, parm);
- update_power_state(codec, 0x1f, parm);
- update_power_state(codec, 0x20, parm);
-
- /* outputs */
- /* PW 3/4 (16h/17h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x17, &parm);
- set_pin_power_state(codec, 0x16, &parm);
- /* MW0 (1ah), AOW 0/1 (10h/1dh) */
- update_power_state(codec, 0x1a, imux_is_smixer ? AC_PWRST_D0 : parm);
- update_power_state(codec, 0x10, parm);
- update_power_state(codec, 0x1d, parm);
-}
-
static int patch_vt1702(struct hda_codec *codec)
{
struct via_spec *spec;
@@ -1004,8 +843,6 @@ static int patch_vt1702(struct hda_codec *codec)
spec->init_verbs[spec->num_iverbs++] = vt1702_init_verbs;
codec->patch_ops = via_patch_ops;
-
- spec->set_widgets_power_state = set_widgets_power_state_vt1702;
return 0;
}
@@ -1020,71 +857,6 @@ static const struct hda_verb vt1718S_init_verbs[] = {
{ }
};
-static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
-{
- struct via_spec *spec = codec->spec;
- int imux_is_smixer;
- unsigned int parm, parm2;
- /* MUX6 (1eh) = stereo mixer */
- imux_is_smixer =
- snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5;
- /* inputs */
- /* PW 5/6/7 (29h/2ah/2bh) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x29, &parm);
- set_pin_power_state(codec, 0x2a, &parm);
- set_pin_power_state(codec, 0x2b, &parm);
- if (imux_is_smixer)
- parm = AC_PWRST_D0;
- /* MUX6/7 (1eh/1fh), AIW 0/1 (10h/11h) */
- update_power_state(codec, 0x1e, parm);
- update_power_state(codec, 0x1f, parm);
- update_power_state(codec, 0x10, parm);
- update_power_state(codec, 0x11, parm);
-
- /* outputs */
- /* PW3 (27h), MW2 (1ah), AOW3 (bh) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x27, &parm);
- update_power_state(codec, 0x1a, parm);
- parm2 = parm; /* for pin 0x0b */
-
- /* PW2 (26h), AOW2 (ah) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x26, &parm);
- if (smart51_enabled(codec))
- set_pin_power_state(codec, 0x2b, &parm);
- update_power_state(codec, 0xa, parm);
-
- /* PW0 (24h), AOW0 (8h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x24, &parm);
- if (!spec->gen.indep_hp_enabled) /* check for redirected HP */
- set_pin_power_state(codec, 0x28, &parm);
- update_power_state(codec, 0x8, parm);
- if (!spec->gen.indep_hp_enabled && parm2 != AC_PWRST_D3)
- parm = parm2;
- update_power_state(codec, 0xb, parm);
- /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
- update_power_state(codec, 0x21, imux_is_smixer ? AC_PWRST_D0 : parm);
-
- /* PW1 (25h), AOW1 (9h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x25, &parm);
- if (smart51_enabled(codec))
- set_pin_power_state(codec, 0x2a, &parm);
- update_power_state(codec, 0x9, parm);
-
- if (spec->gen.indep_hp_enabled) {
- /* PW4 (28h), MW3 (1bh), MUX1(34h), AOW4 (ch) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x28, &parm);
- update_power_state(codec, 0x1b, parm);
- update_power_state(codec, 0x34, parm);
- update_power_state(codec, 0xc, parm);
- }
-}
-
/* Add a connection to the primary DAC from AA-mixer for some codecs
* This isn't listed from the raw info, but the chip has a secret connection.
*/
@@ -1105,8 +877,7 @@ static int add_secret_dac_path(struct hda_codec *codec)
}
/* find the primary DAC and add to the connection list */
- nid = codec->start_nid;
- for (i = 0; i < codec->num_nodes; i++, nid++) {
+ for_each_hda_codec_node(nid, codec) {
unsigned int caps = get_wcaps(codec, nid);
if (get_wcaps_type(caps) == AC_WID_AUD_OUT &&
!(caps & AC_WCAP_DIGITAL)) {
@@ -1145,9 +916,6 @@ static int patch_vt1718S(struct hda_codec *codec)
spec->init_verbs[spec->num_iverbs++] = vt1718S_init_verbs;
codec->patch_ops = via_patch_ops;
-
- spec->set_widgets_power_state = set_widgets_power_state_vt1718S;
-
return 0;
}
@@ -1187,7 +955,6 @@ static int vt1716s_dmic_put(struct snd_kcontrol *kcontrol,
snd_hda_codec_write(codec, 0x26, 0,
AC_VERB_SET_CONNECT_SEL, index);
spec->dmic_enabled = index;
- set_widgets_power_state(codec);
return 1;
}
@@ -1222,95 +989,6 @@ static const struct hda_verb vt1716S_init_verbs[] = {
{ }
};
-static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
-{
- struct via_spec *spec = codec->spec;
- int imux_is_smixer;
- unsigned int parm;
- unsigned int mono_out, present;
- /* SW0 (17h) = stereo mixer */
- imux_is_smixer =
- (snd_hda_codec_read(codec, 0x17, 0,
- AC_VERB_GET_CONNECT_SEL, 0x00) == 5);
- /* inputs */
- /* PW 1/2/5 (1ah/1bh/1eh) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x1a, &parm);
- set_pin_power_state(codec, 0x1b, &parm);
- set_pin_power_state(codec, 0x1e, &parm);
- if (imux_is_smixer)
- parm = AC_PWRST_D0;
- /* SW0 (17h), AIW0(13h) */
- update_power_state(codec, 0x17, parm);
- update_power_state(codec, 0x13, parm);
-
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x1e, &parm);
- /* PW11 (22h) */
- if (spec->dmic_enabled)
- set_pin_power_state(codec, 0x22, &parm);
- else
- update_power_state(codec, 0x22, AC_PWRST_D3);
-
- /* SW2(26h), AIW1(14h) */
- update_power_state(codec, 0x26, parm);
- update_power_state(codec, 0x14, parm);
-
- /* outputs */
- /* PW0 (19h), SW1 (18h), AOW1 (11h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x19, &parm);
- /* Smart 5.1 PW2(1bh) */
- if (smart51_enabled(codec))
- set_pin_power_state(codec, 0x1b, &parm);
- update_power_state(codec, 0x18, parm);
- update_power_state(codec, 0x11, parm);
-
- /* PW7 (23h), SW3 (27h), AOW3 (25h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x23, &parm);
- /* Smart 5.1 PW1(1ah) */
- if (smart51_enabled(codec))
- set_pin_power_state(codec, 0x1a, &parm);
- update_power_state(codec, 0x27, parm);
-
- /* Smart 5.1 PW5(1eh) */
- if (smart51_enabled(codec))
- set_pin_power_state(codec, 0x1e, &parm);
- update_power_state(codec, 0x25, parm);
-
- /* Mono out */
- /* SW4(28h)->MW1(29h)-> PW12 (2ah)*/
- present = snd_hda_jack_detect(codec, 0x1c);
-
- if (present)
- mono_out = 0;
- else {
- present = snd_hda_jack_detect(codec, 0x1d);
- if (!spec->gen.indep_hp_enabled && present)
- mono_out = 0;
- else
- mono_out = 1;
- }
- parm = mono_out ? AC_PWRST_D0 : AC_PWRST_D3;
- update_power_state(codec, 0x28, parm);
- update_power_state(codec, 0x29, parm);
- update_power_state(codec, 0x2a, parm);
-
- /* PW 3/4 (1ch/1dh) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x1c, &parm);
- set_pin_power_state(codec, 0x1d, &parm);
- /* HP Independent Mode, power on AOW3 */
- if (spec->gen.indep_hp_enabled)
- update_power_state(codec, 0x25, parm);
-
- /* force to D0 for internal Speaker */
- /* MW0 (16h), AOW0 (10h) */
- update_power_state(codec, 0x16, imux_is_smixer ? AC_PWRST_D0 : parm);
- update_power_state(codec, 0x10, mono_out ? AC_PWRST_D0 : parm);
-}
-
static int patch_vt1716S(struct hda_codec *codec)
{
struct via_spec *spec;
@@ -1338,8 +1016,6 @@ static int patch_vt1716S(struct hda_codec *codec)
spec->mixers[spec->num_mixers++] = vt1716S_mono_out_mixer;
codec->patch_ops = via_patch_ops;
-
- spec->set_widgets_power_state = set_widgets_power_state_vt1716S;
return 0;
}
@@ -1365,98 +1041,6 @@ static const struct hda_verb vt1802_init_verbs[] = {
{ }
};
-static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
-{
- struct via_spec *spec = codec->spec;
- int imux_is_smixer;
- unsigned int parm;
- unsigned int present;
- /* MUX9 (1eh) = stereo mixer */
- imux_is_smixer =
- snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 3;
- /* inputs */
- /* PW 5/6/7 (29h/2ah/2bh) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x29, &parm);
- set_pin_power_state(codec, 0x2a, &parm);
- set_pin_power_state(codec, 0x2b, &parm);
- parm = AC_PWRST_D0;
- /* MUX9/10 (1eh/1fh), AIW 0/1 (10h/11h) */
- update_power_state(codec, 0x1e, parm);
- update_power_state(codec, 0x1f, parm);
- update_power_state(codec, 0x10, parm);
- update_power_state(codec, 0x11, parm);
-
- /* outputs */
- /* AOW0 (8h)*/
- update_power_state(codec, 0x8, parm);
-
- if (spec->codec_type == VT1802) {
- /* PW4 (28h), MW4 (18h), MUX4(38h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x28, &parm);
- update_power_state(codec, 0x18, parm);
- update_power_state(codec, 0x38, parm);
- } else {
- /* PW4 (26h), MW4 (1ch), MUX4(37h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x26, &parm);
- update_power_state(codec, 0x1c, parm);
- update_power_state(codec, 0x37, parm);
- }
-
- if (spec->codec_type == VT1802) {
- /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x25, &parm);
- update_power_state(codec, 0x15, parm);
- update_power_state(codec, 0x35, parm);
- } else {
- /* PW1 (25h), MW1 (19h), MUX1(35h), AOW1 (9h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x25, &parm);
- update_power_state(codec, 0x19, parm);
- update_power_state(codec, 0x35, parm);
- }
-
- if (spec->gen.indep_hp_enabled)
- update_power_state(codec, 0x9, AC_PWRST_D0);
-
- /* Class-D */
- /* PW0 (24h), MW0(18h/14h), MUX0(34h) */
- present = snd_hda_jack_detect(codec, 0x25);
-
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x24, &parm);
- parm = present ? AC_PWRST_D3 : AC_PWRST_D0;
- if (spec->codec_type == VT1802)
- update_power_state(codec, 0x14, parm);
- else
- update_power_state(codec, 0x18, parm);
- update_power_state(codec, 0x34, parm);
-
- /* Mono Out */
- present = snd_hda_jack_detect(codec, 0x26);
-
- parm = present ? AC_PWRST_D3 : AC_PWRST_D0;
- if (spec->codec_type == VT1802) {
- /* PW15 (33h), MW8(1ch), MUX8(3ch) */
- update_power_state(codec, 0x33, parm);
- update_power_state(codec, 0x1c, parm);
- update_power_state(codec, 0x3c, parm);
- } else {
- /* PW15 (31h), MW8(17h), MUX8(3bh) */
- update_power_state(codec, 0x31, parm);
- update_power_state(codec, 0x17, parm);
- update_power_state(codec, 0x3b, parm);
- }
- /* MW9 (21h) */
- if (imux_is_smixer || !is_aa_path_mute(codec))
- update_power_state(codec, 0x21, AC_PWRST_D0);
- else
- update_power_state(codec, 0x21, AC_PWRST_D3);
-}
-
/*
* pin fix-up
*/
@@ -1540,8 +1124,6 @@ static int patch_vt2002P(struct hda_codec *codec)
spec->init_verbs[spec->num_iverbs++] = vt2002P_init_verbs;
codec->patch_ops = via_patch_ops;
-
- spec->set_widgets_power_state = set_widgets_power_state_vt2002P;
return 0;
}
@@ -1555,81 +1137,6 @@ static const struct hda_verb vt1812_init_verbs[] = {
{ }
};
-static void set_widgets_power_state_vt1812(struct hda_codec *codec)
-{
- struct via_spec *spec = codec->spec;
- unsigned int parm;
- unsigned int present;
- /* inputs */
- /* PW 5/6/7 (29h/2ah/2bh) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x29, &parm);
- set_pin_power_state(codec, 0x2a, &parm);
- set_pin_power_state(codec, 0x2b, &parm);
- parm = AC_PWRST_D0;
- /* MUX10/11 (1eh/1fh), AIW 0/1 (10h/11h) */
- update_power_state(codec, 0x1e, parm);
- update_power_state(codec, 0x1f, parm);
- update_power_state(codec, 0x10, parm);
- update_power_state(codec, 0x11, parm);
-
- /* outputs */
- /* AOW0 (8h)*/
- update_power_state(codec, 0x8, AC_PWRST_D0);
-
- /* PW4 (28h), MW4 (18h), MUX4(38h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x28, &parm);
- update_power_state(codec, 0x18, parm);
- update_power_state(codec, 0x38, parm);
-
- /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x25, &parm);
- update_power_state(codec, 0x15, parm);
- update_power_state(codec, 0x35, parm);
- if (spec->gen.indep_hp_enabled)
- update_power_state(codec, 0x9, AC_PWRST_D0);
-
- /* Internal Speaker */
- /* PW0 (24h), MW0(14h), MUX0(34h) */
- present = snd_hda_jack_detect(codec, 0x25);
-
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x24, &parm);
- if (present) {
- update_power_state(codec, 0x14, AC_PWRST_D3);
- update_power_state(codec, 0x34, AC_PWRST_D3);
- } else {
- update_power_state(codec, 0x14, AC_PWRST_D0);
- update_power_state(codec, 0x34, AC_PWRST_D0);
- }
-
-
- /* Mono Out */
- /* PW13 (31h), MW13(1ch), MUX13(3ch), MW14(3eh) */
- present = snd_hda_jack_detect(codec, 0x28);
-
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x31, &parm);
- if (present) {
- update_power_state(codec, 0x1c, AC_PWRST_D3);
- update_power_state(codec, 0x3c, AC_PWRST_D3);
- update_power_state(codec, 0x3e, AC_PWRST_D3);
- } else {
- update_power_state(codec, 0x1c, AC_PWRST_D0);
- update_power_state(codec, 0x3c, AC_PWRST_D0);
- update_power_state(codec, 0x3e, AC_PWRST_D0);
- }
-
- /* PW15 (33h), MW15 (1dh), MUX15(3dh) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x33, &parm);
- update_power_state(codec, 0x1d, parm);
- update_power_state(codec, 0x3d, parm);
-
-}
-
/* patch for vt1812 */
static int patch_vt1812(struct hda_codec *codec)
{
@@ -1656,8 +1163,6 @@ static int patch_vt1812(struct hda_codec *codec)
spec->init_verbs[spec->num_iverbs++] = vt1812_init_verbs;
codec->patch_ops = via_patch_ops;
-
- spec->set_widgets_power_state = set_widgets_power_state_vt1812;
return 0;
}
@@ -1673,84 +1178,6 @@ static const struct hda_verb vt3476_init_verbs[] = {
{ }
};
-static void set_widgets_power_state_vt3476(struct hda_codec *codec)
-{
- struct via_spec *spec = codec->spec;
- int imux_is_smixer;
- unsigned int parm, parm2;
- /* MUX10 (1eh) = stereo mixer */
- imux_is_smixer =
- snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 4;
- /* inputs */
- /* PW 5/6/7 (29h/2ah/2bh) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x29, &parm);
- set_pin_power_state(codec, 0x2a, &parm);
- set_pin_power_state(codec, 0x2b, &parm);
- if (imux_is_smixer)
- parm = AC_PWRST_D0;
- /* MUX10/11 (1eh/1fh), AIW 0/1 (10h/11h) */
- update_power_state(codec, 0x1e, parm);
- update_power_state(codec, 0x1f, parm);
- update_power_state(codec, 0x10, parm);
- update_power_state(codec, 0x11, parm);
-
- /* outputs */
- /* PW3 (27h), MW3(37h), AOW3 (bh) */
- if (spec->codec_type == VT1705CF) {
- parm = AC_PWRST_D3;
- update_power_state(codec, 0x27, parm);
- update_power_state(codec, 0x37, parm);
- } else {
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x27, &parm);
- update_power_state(codec, 0x37, parm);
- }
-
- /* PW2 (26h), MW2(36h), AOW2 (ah) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x26, &parm);
- update_power_state(codec, 0x36, parm);
- if (smart51_enabled(codec)) {
- /* PW7(2bh), MW7(3bh), MUX7(1Bh) */
- set_pin_power_state(codec, 0x2b, &parm);
- update_power_state(codec, 0x3b, parm);
- update_power_state(codec, 0x1b, parm);
- }
- update_conv_power_state(codec, 0xa, parm, 2);
-
- /* PW1 (25h), MW1(35h), AOW1 (9h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x25, &parm);
- update_power_state(codec, 0x35, parm);
- if (smart51_enabled(codec)) {
- /* PW6(2ah), MW6(3ah), MUX6(1ah) */
- set_pin_power_state(codec, 0x2a, &parm);
- update_power_state(codec, 0x3a, parm);
- update_power_state(codec, 0x1a, parm);
- }
- update_conv_power_state(codec, 0x9, parm, 1);
-
- /* PW4 (28h), MW4 (38h), MUX4(18h), AOW3(bh)/AOW0(8h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x28, &parm);
- update_power_state(codec, 0x38, parm);
- update_power_state(codec, 0x18, parm);
- if (spec->gen.indep_hp_enabled)
- update_conv_power_state(codec, 0xb, parm, 3);
- parm2 = parm; /* for pin 0x0b */
-
- /* PW0 (24h), MW0(34h), MW9(3fh), AOW0 (8h) */
- parm = AC_PWRST_D3;
- set_pin_power_state(codec, 0x24, &parm);
- update_power_state(codec, 0x34, parm);
- if (!spec->gen.indep_hp_enabled && parm2 != AC_PWRST_D3)
- parm = parm2;
- update_conv_power_state(codec, 0x8, parm, 0);
- /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
- update_power_state(codec, 0x3f, imux_is_smixer ? AC_PWRST_D0 : parm);
-}
-
static int patch_vt3476(struct hda_codec *codec)
{
struct via_spec *spec;
@@ -1774,9 +1201,6 @@ static int patch_vt3476(struct hda_codec *codec)
spec->init_verbs[spec->num_iverbs++] = vt3476_init_verbs;
codec->patch_ops = via_patch_ops;
-
- spec->set_widgets_power_state = set_widgets_power_state_vt3476;
-
return 0;
}
@@ -1884,23 +1308,11 @@ static const struct hda_codec_preset snd_hda_preset_via[] = {
MODULE_ALIAS("snd-hda-codec-id:1106*");
-static struct hda_codec_preset_list via_list = {
+static struct hda_codec_driver via_driver = {
.preset = snd_hda_preset_via,
- .owner = THIS_MODULE,
};
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VIA HD-audio codec");
-static int __init patch_via_init(void)
-{
- return snd_hda_add_codec_preset(&via_list);
-}
-
-static void __exit patch_via_exit(void)
-{
- snd_hda_delete_codec_preset(&via_list);
-}
-
-module_init(patch_via_init)
-module_exit(patch_via_exit)
+module_hda_codec_driver(via_driver);
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 6ba0b5517c40..d51703e30523 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -21,7 +21,7 @@ static acpi_status acpi_check_cb(acpi_handle handle, u32 lvl, void *context,
static bool is_thinkpad(struct hda_codec *codec)
{
bool found = false;
- if (codec->subsystem_id >> 16 != 0x17aa)
+ if (codec->core.subsystem_id >> 16 != 0x17aa)
return false;
if (ACPI_SUCCESS(acpi_get_devices("LEN0068", acpi_check_cb, &found, NULL)) && found)
return true;
@@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
old_vmaster_hook = spec->vmaster_mute.hook;
spec->vmaster_mute.hook = update_tpacpi_mute_led;
+ spec->vmaster_mute_enum = 1;
removefunc = false;
}
if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
diff --git a/sound/pci/ice1712/wtm.c b/sound/pci/ice1712/wtm.c
index bcf30a387b87..9906119e0954 100644
--- a/sound/pci/ice1712/wtm.c
+++ b/sound/pci/ice1712/wtm.c
@@ -29,12 +29,19 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <sound/core.h>
+#include <sound/tlv.h>
+#include <linux/slab.h>
#include "ice1712.h"
#include "envy24ht.h"
#include "wtm.h"
#include "stac946x.h"
+struct wtm_spec {
+ /* rate change needs atomic mute/unmute of all dacs*/
+ struct mutex mute_mutex;
+};
+
/*
* 2*ADC 6*DAC no1 ringbuffer r/w on i2c bus
@@ -68,15 +75,65 @@ static inline unsigned char stac9460_2_get(struct snd_ice1712 *ice, int reg)
/*
* DAC mute control
*/
+static void stac9460_dac_mute_all(struct snd_ice1712 *ice, unsigned char mute,
+ unsigned short int *change_mask)
+{
+ unsigned char new, old;
+ int id, idx, change;
+
+ /*stac9460 1*/
+ for (id = 0; id < 7; id++) {
+ if (*change_mask & (0x01 << id)) {
+ if (id == 0)
+ idx = STAC946X_MASTER_VOLUME;
+ else
+ idx = STAC946X_LF_VOLUME - 1 + id;
+ old = stac9460_get(ice, idx);
+ new = (~mute << 7 & 0x80) | (old & ~0x80);
+ change = (new != old);
+ if (change) {
+ stac9460_put(ice, idx, new);
+ *change_mask = *change_mask | (0x01 << id);
+ } else {
+ *change_mask = *change_mask & ~(0x01 << id);
+ }
+ }
+ }
+
+ /*stac9460 2*/
+ for (id = 0; id < 3; id++) {
+ if (*change_mask & (0x01 << (id + 7))) {
+ if (id == 0)
+ idx = STAC946X_MASTER_VOLUME;
+ else
+ idx = STAC946X_LF_VOLUME - 1 + id;
+ old = stac9460_2_get(ice, idx);
+ new = (~mute << 7 & 0x80) | (old & ~0x80);
+ change = (new != old);
+ if (change) {
+ stac9460_2_put(ice, idx, new);
+ *change_mask = *change_mask | (0x01 << id);
+ } else {
+ *change_mask = *change_mask & ~(0x01 << id);
+ }
+ }
+ }
+}
+
+
+
#define stac9460_dac_mute_info snd_ctl_boolean_mono_info
static int stac9460_dac_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
+ struct wtm_spec *spec = ice->spec;
unsigned char val;
int idx, id;
+ mutex_lock(&spec->mute_mutex);
+
if (kcontrol->private_value) {
idx = STAC946X_MASTER_VOLUME;
id = 0;
@@ -89,6 +146,8 @@ static int stac9460_dac_mute_get(struct snd_kcontrol *kcontrol,
else
val = stac9460_2_get(ice, idx - 6);
ucontrol->value.integer.value[0] = (~val >> 7) & 0x1;
+
+ mutex_unlock(&spec->mute_mutex);
return 0;
}
@@ -338,8 +397,14 @@ static int stac9460_adc_vol_put(struct snd_kcontrol *kcontrol,
/*
* MIC / LINE switch fonction
*/
+static int stac9460_mic_sw_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char * const texts[2] = { "Line In", "Mic" };
+
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
+}
-#define stac9460_mic_sw_info snd_ctl_boolean_mono_info
static int stac9460_mic_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -353,7 +418,7 @@ static int stac9460_mic_sw_get(struct snd_kcontrol *kcontrol,
val = stac9460_get(ice, STAC946X_GENERAL_PURPOSE);
else
val = stac9460_2_get(ice, STAC946X_GENERAL_PURPOSE);
- ucontrol->value.integer.value[0] = ~val>>7 & 0x1;
+ ucontrol->value.enumerated.item[0] = (val >> 7) & 0x1;
return 0;
}
@@ -369,7 +434,7 @@ static int stac9460_mic_sw_put(struct snd_kcontrol *kcontrol,
old = stac9460_get(ice, STAC946X_GENERAL_PURPOSE);
else
old = stac9460_2_get(ice, STAC946X_GENERAL_PURPOSE);
- new = (~ucontrol->value.integer.value[0] << 7 & 0x80) | (old & ~0x80);
+ new = (ucontrol->value.enumerated.item[0] << 7 & 0x80) | (old & ~0x80);
change = (new != old);
if (change) {
if (id == 0)
@@ -380,17 +445,63 @@ static int stac9460_mic_sw_put(struct snd_kcontrol *kcontrol,
return change;
}
+
+/*
+ * Handler for setting correct codec rate - called when rate change is detected
+ */
+static void stac9460_set_rate_val(struct snd_ice1712 *ice, unsigned int rate)
+{
+ unsigned char old, new;
+ unsigned short int changed;
+ struct wtm_spec *spec = ice->spec;
+
+ if (rate == 0) /* no hint - S/PDIF input is master, simply return */
+ return;
+ else if (rate <= 48000)
+ new = 0x08; /* 256x, base rate mode */
+ else if (rate <= 96000)
+ new = 0x11; /* 256x, mid rate mode */
+ else
+ new = 0x12; /* 128x, high rate mode */
+
+ old = stac9460_get(ice, STAC946X_MASTER_CLOCKING);
+ if (old == new)
+ return;
+ /* change detected, setting master clock, muting first */
+ /* due to possible conflicts with mute controls - mutexing */
+ mutex_lock(&spec->mute_mutex);
+ /* we have to remember current mute status for each DAC */
+ changed = 0xFFFF;
+ stac9460_dac_mute_all(ice, 0, &changed);
+ /*printk(KERN_DEBUG "Rate change: %d, new MC: 0x%02x\n", rate, new);*/
+ stac9460_put(ice, STAC946X_MASTER_CLOCKING, new);
+ stac9460_2_put(ice, STAC946X_MASTER_CLOCKING, new);
+ udelay(10);
+ /* unmuting - only originally unmuted dacs -
+ * i.e. those changed when muting */
+ stac9460_dac_mute_all(ice, 1, &changed);
+ mutex_unlock(&spec->mute_mutex);
+}
+
+
+/*Limits value in dB for fader*/
+static const DECLARE_TLV_DB_SCALE(db_scale_dac, -19125, 75, 0);
+static const DECLARE_TLV_DB_SCALE(db_scale_adc, 0, 150, 0);
+
/*
* Control tabs
*/
static struct snd_kcontrol_new stac9640_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Master Playback Switch",
.info = stac9460_dac_mute_info,
.get = stac9460_dac_mute_get,
.put = stac9460_dac_mute_put,
- .private_value = 1
+ .private_value = 1,
+ .tlv = { .p = db_scale_dac }
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -402,7 +513,7 @@ static struct snd_kcontrol_new stac9640_controls[] = {
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "MIC/Line switch",
+ .name = "MIC/Line Input Enum",
.count = 2,
.info = stac9460_mic_sw_info,
.get = stac9460_mic_sw_get,
@@ -419,11 +530,15 @@ static struct snd_kcontrol_new stac9640_controls[] = {
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ),
+
.name = "DAC Volume",
.count = 8,
.info = stac9460_dac_vol_info,
.get = stac9460_dac_vol_get,
.put = stac9460_dac_vol_put,
+ .tlv = { .p = db_scale_dac }
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -435,12 +550,15 @@ static struct snd_kcontrol_new stac9640_controls[] = {
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ),
+
.name = "ADC Volume",
.count = 2,
.info = stac9460_adc_vol_info,
.get = stac9460_adc_vol_get,
.put = stac9460_adc_vol_put,
-
+ .tlv = { .p = db_scale_adc }
}
};
@@ -463,41 +581,53 @@ static int wtm_add_controls(struct snd_ice1712 *ice)
static int wtm_init(struct snd_ice1712 *ice)
{
- static unsigned short stac_inits_prodigy[] = {
+ static unsigned short stac_inits_wtm[] = {
STAC946X_RESET, 0,
+ STAC946X_MASTER_CLOCKING, 0x11,
(unsigned short)-1
};
unsigned short *p;
+ struct wtm_spec *spec;
/*WTM 192M*/
ice->num_total_dacs = 8;
ice->num_total_adcs = 4;
ice->force_rdma1 = 1;
+ /*init mutex for dac mute conflict*/
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ ice->spec = spec;
+ mutex_init(&spec->mute_mutex);
+
+
/*initialize codec*/
- p = stac_inits_prodigy;
+ p = stac_inits_wtm;
for (; *p != (unsigned short)-1; p += 2) {
stac9460_put(ice, p[0], p[1]);
stac9460_2_put(ice, p[0], p[1]);
}
+ ice->gpio.set_pro_rate = stac9460_set_rate_val;
return 0;
}
static unsigned char wtm_eeprom[] = {
- 0x47, /*SYSCONF: clock 192KHz, 4ADC, 8DAC */
- 0x80, /* ACLINK : I2S */
- 0xf8, /* I2S: vol; 96k, 24bit, 192k */
- 0xc1 /*SPDIF: out-en, spidf ext out*/,
- 0x9f, /* GPIO_DIR */
- 0xff, /* GPIO_DIR1 */
- 0x7f, /* GPIO_DIR2 */
- 0x9f, /* GPIO_MASK */
- 0xff, /* GPIO_MASK1 */
- 0x7f, /* GPIO_MASK2 */
- 0x16, /* GPIO_STATE */
- 0x80, /* GPIO_STATE1 */
- 0x00, /* GPIO_STATE2 */
+ [ICE_EEP2_SYSCONF] = 0x67, /*SYSCONF: clock 192KHz, mpu401,
+ 4ADC, 8DAC */
+ [ICE_EEP2_ACLINK] = 0x80, /* ACLINK : I2S */
+ [ICE_EEP2_I2S] = 0xf8, /* I2S: vol; 96k, 24bit, 192k */
+ [ICE_EEP2_SPDIF] = 0xc1, /*SPDIF: out-en, spidf ext out*/
+ [ICE_EEP2_GPIO_DIR] = 0x9f,
+ [ICE_EEP2_GPIO_DIR1] = 0xff,
+ [ICE_EEP2_GPIO_DIR2] = 0x7f,
+ [ICE_EEP2_GPIO_MASK] = 0x9f,
+ [ICE_EEP2_GPIO_MASK1] = 0xff,
+ [ICE_EEP2_GPIO_MASK2] = 0x7f,
+ [ICE_EEP2_GPIO_STATE] = 0x16,
+ [ICE_EEP2_GPIO_STATE1] = 0x80,
+ [ICE_EEP2_GPIO_STATE2] = 0x00,
};
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 2c5484eeb963..b120925223ae 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1795,7 +1795,7 @@ static struct ac97_pcm ac97_pcm_defs[] = {
},
};
-static struct ac97_quirk ac97_quirks[] = {
+static const struct ac97_quirk ac97_quirks[] = {
{
.subvendor = 0x0e11,
.subdevice = 0x000e,
@@ -3101,13 +3101,13 @@ static int snd_intel8x0_create(struct snd_card *card,
chip->bmaddr = pci_iomap(pci, 3, 0);
else
chip->bmaddr = pci_iomap(pci, 1, 0);
+
+ port_inited:
if (!chip->bmaddr) {
dev_err(card->dev, "Controller space ioremap problem\n");
snd_intel8x0_free(chip);
return -EIO;
}
-
- port_inited:
chip->bdbars_count = bdbars[device_type];
/* initialize offsets */
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index ca67f896d117..cb666c73712d 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6043,23 +6043,30 @@ hdspm_hw_constraints_aes32_sample_rates = {
.mask = 0
};
-static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
+static int snd_hdspm_open(struct snd_pcm_substream *substream)
{
struct hdspm *hdspm = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
+ bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
spin_lock_irq(&hdspm->lock);
-
snd_pcm_set_sync(substream);
+ runtime->hw = (playback) ? snd_hdspm_playback_subinfo :
+ snd_hdspm_capture_subinfo;
+ if (playback) {
+ if (hdspm->capture_substream == NULL)
+ hdspm_stop_audio(hdspm);
- runtime->hw = snd_hdspm_playback_subinfo;
-
- if (hdspm->capture_substream == NULL)
- hdspm_stop_audio(hdspm);
+ hdspm->playback_pid = current->pid;
+ hdspm->playback_substream = substream;
+ } else {
+ if (hdspm->playback_substream == NULL)
+ hdspm_stop_audio(hdspm);
- hdspm->playback_pid = current->pid;
- hdspm->playback_substream = substream;
+ hdspm->capture_pid = current->pid;
+ hdspm->capture_substream = substream;
+ }
spin_unlock_irq(&hdspm->lock);
@@ -6094,108 +6101,42 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
&hdspm_hw_constraints_aes32_sample_rates);
} else {
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- snd_hdspm_hw_rule_rate_out_channels, hdspm,
+ (playback ?
+ snd_hdspm_hw_rule_rate_out_channels :
+ snd_hdspm_hw_rule_rate_in_channels), hdspm,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
}
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_hdspm_hw_rule_out_channels, hdspm,
+ (playback ? snd_hdspm_hw_rule_out_channels :
+ snd_hdspm_hw_rule_in_channels), hdspm,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_hdspm_hw_rule_out_channels_rate, hdspm,
+ (playback ? snd_hdspm_hw_rule_out_channels_rate :
+ snd_hdspm_hw_rule_in_channels_rate), hdspm,
SNDRV_PCM_HW_PARAM_RATE, -1);
return 0;
}
-static int snd_hdspm_playback_release(struct snd_pcm_substream *substream)
+static int snd_hdspm_release(struct snd_pcm_substream *substream)
{
struct hdspm *hdspm = snd_pcm_substream_chip(substream);
+ bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
spin_lock_irq(&hdspm->lock);
- hdspm->playback_pid = -1;
- hdspm->playback_substream = NULL;
-
- spin_unlock_irq(&hdspm->lock);
-
- return 0;
-}
-
-
-static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
-{
- struct hdspm *hdspm = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- spin_lock_irq(&hdspm->lock);
- snd_pcm_set_sync(substream);
- runtime->hw = snd_hdspm_capture_subinfo;
-
- if (hdspm->playback_substream == NULL)
- hdspm_stop_audio(hdspm);
-
- hdspm->capture_pid = current->pid;
- hdspm->capture_substream = substream;
-
- spin_unlock_irq(&hdspm->lock);
-
- snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
- snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
-
- switch (hdspm->io_type) {
- case AIO:
- case RayDAT:
- snd_pcm_hw_constraint_minmax(runtime,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- 32, 4096);
- snd_pcm_hw_constraint_minmax(runtime,
- SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
- 16384, 16384);
- break;
-
- default:
- snd_pcm_hw_constraint_minmax(runtime,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- 64, 8192);
- snd_pcm_hw_constraint_minmax(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS,
- 2, 2);
- break;
- }
-
- if (AES32 == hdspm->io_type) {
- runtime->hw.rates |= SNDRV_PCM_RATE_KNOT;
- snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- &hdspm_hw_constraints_aes32_sample_rates);
+ if (playback) {
+ hdspm->playback_pid = -1;
+ hdspm->playback_substream = NULL;
} else {
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- snd_hdspm_hw_rule_rate_in_channels, hdspm,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ hdspm->capture_pid = -1;
+ hdspm->capture_substream = NULL;
}
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_hdspm_hw_rule_in_channels, hdspm,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
-
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_hdspm_hw_rule_in_channels_rate, hdspm,
- SNDRV_PCM_HW_PARAM_RATE, -1);
-
- return 0;
-}
-
-static int snd_hdspm_capture_release(struct snd_pcm_substream *substream)
-{
- struct hdspm *hdspm = snd_pcm_substream_chip(substream);
-
- spin_lock_irq(&hdspm->lock);
-
- hdspm->capture_pid = -1;
- hdspm->capture_substream = NULL;
-
spin_unlock_irq(&hdspm->lock);
+
return 0;
}
@@ -6413,21 +6354,9 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
return 0;
}
-static struct snd_pcm_ops snd_hdspm_playback_ops = {
- .open = snd_hdspm_playback_open,
- .close = snd_hdspm_playback_release,
- .ioctl = snd_hdspm_ioctl,
- .hw_params = snd_hdspm_hw_params,
- .hw_free = snd_hdspm_hw_free,
- .prepare = snd_hdspm_prepare,
- .trigger = snd_hdspm_trigger,
- .pointer = snd_hdspm_hw_pointer,
- .page = snd_pcm_sgbuf_ops_page,
-};
-
-static struct snd_pcm_ops snd_hdspm_capture_ops = {
- .open = snd_hdspm_capture_open,
- .close = snd_hdspm_capture_release,
+static struct snd_pcm_ops snd_hdspm_ops = {
+ .open = snd_hdspm_open,
+ .close = snd_hdspm_release,
.ioctl = snd_hdspm_ioctl,
.hw_params = snd_hdspm_hw_params,
.hw_free = snd_hdspm_hw_free,
@@ -6521,9 +6450,9 @@ static int snd_hdspm_create_pcm(struct snd_card *card,
strcpy(pcm->name, hdspm->card_name);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- &snd_hdspm_playback_ops);
+ &snd_hdspm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
- &snd_hdspm_capture_ops);
+ &snd_hdspm_ops);
pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 8622283e89f3..3dd038bdb204 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -1812,7 +1812,7 @@ static void snd_via82xx_mixer_free_ac97(struct snd_ac97 *ac97)
chip->ac97 = NULL;
}
-static struct ac97_quirk ac97_quirks[] = {
+static const struct ac97_quirk ac97_quirks[] = {
{
.subvendor = 0x1106,
.subdevice = 0x4161,
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 13146d701413..0095a80a997f 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -240,7 +240,7 @@ static int snd_pmac_pcm_prepare(struct snd_pmac *chip, struct pmac_stream *rec,
*/
spin_lock_irq(&chip->reg_lock);
snd_pmac_dma_stop(rec);
- st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP);
+ chip->extra_dma.cmds->command = cpu_to_le16(DBDMA_STOP);
snd_pmac_dma_set_command(rec, &chip->extra_dma);
snd_pmac_dma_run(rec, RUN);
spin_unlock_irq(&chip->reg_lock);
@@ -251,15 +251,15 @@ static int snd_pmac_pcm_prepare(struct snd_pmac *chip, struct pmac_stream *rec,
*/
offset = runtime->dma_addr;
for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) {
- st_le32(&cp->phy_addr, offset);
- st_le16(&cp->req_count, rec->period_size);
- /*st_le16(&cp->res_count, 0);*/
- st_le16(&cp->xfer_status, 0);
+ cp->phy_addr = cpu_to_le32(offset);
+ cp->req_count = cpu_to_le16(rec->period_size);
+ /*cp->res_count = cpu_to_le16(0);*/
+ cp->xfer_status = cpu_to_le16(0);
offset += rec->period_size;
}
/* make loop */
- st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
- st_le32(&cp->cmd_dep, rec->cmd.addr);
+ cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
+ cp->cmd_dep = cpu_to_le32(rec->cmd.addr);
snd_pmac_dma_stop(rec);
snd_pmac_dma_set_command(rec, &rec->cmd);
@@ -328,7 +328,7 @@ static snd_pcm_uframes_t snd_pmac_pcm_pointer(struct snd_pmac *chip,
#if 1 /* hmm.. how can we get the current dma pointer?? */
int stat;
volatile struct dbdma_cmd __iomem *cp = &rec->cmd.cmds[rec->cur_period];
- stat = ld_le16(&cp->xfer_status);
+ stat = le16_to_cpu(cp->xfer_status);
if (stat & (ACTIVE|DEAD)) {
count = in_le16(&cp->res_count);
if (count)
@@ -427,26 +427,26 @@ static inline void snd_pmac_pcm_dead_xfer(struct pmac_stream *rec,
memcpy((void *)emergency_dbdma.cmds, (void *)cp,
sizeof(struct dbdma_cmd));
emergency_in_use = 1;
- st_le16(&cp->xfer_status, 0);
- st_le16(&cp->req_count, rec->period_size);
+ cp->xfer_status = cpu_to_le16(0);
+ cp->req_count = cpu_to_le16(rec->period_size);
cp = emergency_dbdma.cmds;
}
/* now bump the values to reflect the amount
we haven't yet shifted */
- req = ld_le16(&cp->req_count);
- res = ld_le16(&cp->res_count);
- phy = ld_le32(&cp->phy_addr);
+ req = le16_to_cpu(cp->req_count);
+ res = le16_to_cpu(cp->res_count);
+ phy = le32_to_cpu(cp->phy_addr);
phy += (req - res);
- st_le16(&cp->req_count, res);
- st_le16(&cp->res_count, 0);
- st_le16(&cp->xfer_status, 0);
- st_le32(&cp->phy_addr, phy);
+ cp->req_count = cpu_to_le16(res);
+ cp->res_count = cpu_to_le16(0);
+ cp->xfer_status = cpu_to_le16(0);
+ cp->phy_addr = cpu_to_le32(phy);
- st_le32(&cp->cmd_dep, rec->cmd.addr
+ cp->cmd_dep = cpu_to_le32(rec->cmd.addr
+ sizeof(struct dbdma_cmd)*((rec->cur_period+1)%rec->nperiods));
- st_le16(&cp->command, OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS);
+ cp->command = cpu_to_le16(OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS);
/* point at our patched up command block */
out_le32(&rec->dma->cmdptr, emergency_dbdma.addr);
@@ -475,7 +475,7 @@ static void snd_pmac_pcm_update(struct snd_pmac *chip, struct pmac_stream *rec)
else
cp = &rec->cmd.cmds[rec->cur_period];
- stat = ld_le16(&cp->xfer_status);
+ stat = le16_to_cpu(cp->xfer_status);
if (stat & DEAD) {
snd_pmac_pcm_dead_xfer(rec, cp);
@@ -489,9 +489,9 @@ static void snd_pmac_pcm_update(struct snd_pmac *chip, struct pmac_stream *rec)
break;
/*printk(KERN_DEBUG "update frag %d\n", rec->cur_period);*/
- st_le16(&cp->xfer_status, 0);
- st_le16(&cp->req_count, rec->period_size);
- /*st_le16(&cp->res_count, 0);*/
+ cp->xfer_status = cpu_to_le16(0);
+ cp->req_count = cpu_to_le16(rec->period_size);
+ /*cp->res_count = cpu_to_le16(0);*/
rec->cur_period++;
if (rec->cur_period >= rec->nperiods) {
rec->cur_period = 0;
@@ -760,11 +760,11 @@ void snd_pmac_beep_dma_start(struct snd_pmac *chip, int bytes, unsigned long add
struct pmac_stream *rec = &chip->playback;
snd_pmac_dma_stop(rec);
- st_le16(&chip->extra_dma.cmds->req_count, bytes);
- st_le16(&chip->extra_dma.cmds->xfer_status, 0);
- st_le32(&chip->extra_dma.cmds->cmd_dep, chip->extra_dma.addr);
- st_le32(&chip->extra_dma.cmds->phy_addr, addr);
- st_le16(&chip->extra_dma.cmds->command, OUTPUT_MORE + BR_ALWAYS);
+ chip->extra_dma.cmds->req_count = cpu_to_le16(bytes);
+ chip->extra_dma.cmds->xfer_status = cpu_to_le16(0);
+ chip->extra_dma.cmds->cmd_dep = cpu_to_le32(chip->extra_dma.addr);
+ chip->extra_dma.cmds->phy_addr = cpu_to_le32(addr);
+ chip->extra_dma.cmds->command = cpu_to_le16(OUTPUT_MORE + BR_ALWAYS);
out_le32(&chip->awacs->control,
(in_le32(&chip->awacs->control) & ~0x1f00)
| (speed << 8));
@@ -776,7 +776,7 @@ void snd_pmac_beep_dma_start(struct snd_pmac *chip, int bytes, unsigned long add
void snd_pmac_beep_dma_stop(struct snd_pmac *chip)
{
snd_pmac_dma_stop(&chip->playback);
- st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP);
+ chip->extra_dma.cmds->command = cpu_to_le16(DBDMA_STOP);
snd_pmac_pcm_set_format(chip); /* reset format */
}
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index dcc79aa0236b..3ba52da18bc6 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -47,6 +47,7 @@ source "sound/soc/kirkwood/Kconfig"
source "sound/soc/intel/Kconfig"
source "sound/soc/mxs/Kconfig"
source "sound/soc/pxa/Kconfig"
+source "sound/soc/qcom/Kconfig"
source "sound/soc/rockchip/Kconfig"
source "sound/soc/samsung/Kconfig"
source "sound/soc/sh/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 5b3c8f67c8db..974ba708b482 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SND_SOC) += nuc900/
obj-$(CONFIG_SND_SOC) += omap/
obj-$(CONFIG_SND_SOC) += kirkwood/
obj-$(CONFIG_SND_SOC) += pxa/
+obj-$(CONFIG_SND_SOC) += qcom/
obj-$(CONFIG_SND_SOC) += rockchip/
obj-$(CONFIG_SND_SOC) += samsung/
obj-$(CONFIG_SND_SOC) += sh/
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 1579e994acf8..e7d08806f3e9 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -25,7 +25,8 @@ config SND_ATMEL_SOC_SSC
config SND_AT91_SOC_SAM9G20_WM8731
tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board"
- depends on ARCH_AT91 && ATMEL_SSC && SND_ATMEL_SOC
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on ATMEL_SSC && SND_ATMEL_SOC && SND_SOC_I2C_AND_SPI
select SND_ATMEL_SOC_PDC
select SND_ATMEL_SOC_SSC
select SND_SOC_WM8731
@@ -35,7 +36,8 @@ config SND_AT91_SOC_SAM9G20_WM8731
config SND_ATMEL_SOC_WM8904
tristate "Atmel ASoC driver for boards using WM8904 codec"
- depends on ARCH_AT91 && ATMEL_SSC && SND_ATMEL_SOC && I2C
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on ATMEL_SSC && SND_ATMEL_SOC && I2C
select SND_ATMEL_SOC_SSC
select SND_ATMEL_SOC_DMA
select SND_SOC_WM8904
@@ -45,7 +47,8 @@ config SND_ATMEL_SOC_WM8904
config SND_AT91_SOC_SAM9X5_WM8731
tristate "SoC Audio support for WM8731-based at91sam9x5 board"
- depends on ARCH_AT91 && ATMEL_SSC && SND_ATMEL_SOC
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on ATMEL_SSC && SND_ATMEL_SOC && SND_SOC_I2C_AND_SPI
select SND_ATMEL_SOC_SSC
select SND_ATMEL_SOC_DMA
select SND_SOC_WM8731
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index 466a821da98c..b327e5cc8de3 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -1,10 +1,8 @@
# AT91 Platform Support
-snd-soc-atmel-pcm-objs := atmel-pcm.o
snd-soc-atmel-pcm-pdc-objs := atmel-pcm-pdc.o
snd-soc-atmel-pcm-dma-objs := atmel-pcm-dma.o
snd-soc-atmel_ssc_dai-objs := atmel_ssc_dai.o
-obj-$(CONFIG_SND_ATMEL_SOC) += snd-soc-atmel-pcm.o
obj-$(CONFIG_SND_ATMEL_SOC_PDC) += snd-soc-atmel-pcm-pdc.o
obj-$(CONFIG_SND_ATMEL_SOC_DMA) += snd-soc-atmel-pcm-dma.o
obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
index b8e7bad05eb1..b6625c8c411b 100644
--- a/sound/soc/atmel/atmel-pcm-dma.c
+++ b/sound/soc/atmel/atmel-pcm-dma.c
@@ -54,7 +54,7 @@ static const struct snd_pcm_hardware atmel_pcm_dma_hardware = {
.period_bytes_max = 2 * 0xffff, /* if 2 bytes format */
.periods_min = 8,
.periods_max = 1024, /* no limit */
- .buffer_bytes_max = ATMEL_SSC_DMABUF_SIZE,
+ .buffer_bytes_max = 512 * 1024,
};
/**
@@ -119,7 +119,7 @@ static int atmel_pcm_configure_dma(struct snd_pcm_substream *substream,
static const struct snd_dmaengine_pcm_config atmel_dmaengine_pcm_config = {
.prepare_slave_config = atmel_pcm_configure_dma,
.pcm_hardware = &atmel_pcm_dma_hardware,
- .prealloc_buffer_size = ATMEL_SSC_DMABUF_SIZE,
+ .prealloc_buffer_size = 64 * 1024,
};
int atmel_pcm_dma_platform_register(struct device *dev)
diff --git a/sound/soc/atmel/atmel-pcm-pdc.c b/sound/soc/atmel/atmel-pcm-pdc.c
index a366b3503c28..da861b44413f 100644
--- a/sound/soc/atmel/atmel-pcm-pdc.c
+++ b/sound/soc/atmel/atmel-pcm-pdc.c
@@ -47,6 +47,85 @@
#include "atmel-pcm.h"
+static int atmel_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
+ int stream)
+{
+ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ size_t size = ATMEL_SSC_DMABUF_SIZE;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = pcm->card->dev;
+ buf->private_data = NULL;
+ buf->area = dma_alloc_coherent(pcm->card->dev, size,
+ &buf->addr, GFP_KERNEL);
+ pr_debug("atmel-pcm: alloc dma buffer: area=%p, addr=%p, size=%zu\n",
+ (void *)buf->area, (void *)(long)buf->addr, size);
+
+ if (!buf->area)
+ return -ENOMEM;
+
+ buf->bytes = size;
+ return 0;
+}
+
+static int atmel_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ return remap_pfn_range(vma, vma->vm_start,
+ substream->dma_buffer.addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+ pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
+ ret = atmel_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret)
+ goto out;
+ }
+
+ if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+ pr_debug("atmel-pcm: allocating PCM capture DMA buffer\n");
+ ret = atmel_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE);
+ if (ret)
+ goto out;
+ }
+ out:
+ return ret;
+}
+
+static void atmel_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_dma_buffer *buf;
+ int stream;
+
+ for (stream = 0; stream < 2; stream++) {
+ substream = pcm->streams[stream].substream;
+ if (!substream)
+ continue;
+
+ buf = &substream->dma_buffer;
+ if (!buf->area)
+ continue;
+ dma_free_coherent(pcm->card->dev, buf->bytes,
+ buf->area, buf->addr);
+ buf->area = NULL;
+ }
+}
+
/*--------------------------------------------------------------------------*\
* Hardware definition
\*--------------------------------------------------------------------------*/
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c
deleted file mode 100644
index 8ae3fa5ac60a..000000000000
--- a/sound/soc/atmel/atmel-pcm.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * atmel-pcm.c -- ALSA PCM interface for the Atmel atmel SoC.
- *
- * Copyright (C) 2005 SAN People
- * Copyright (C) 2008 Atmel
- *
- * Authors: Sedji Gaouaou <sedji.gaouaou@atmel.com>
- *
- * Based on at91-pcm. by:
- * Frank Mandarino <fmandarino@endrelia.com>
- * Copyright 2006 Endrelia Technologies Inc.
- *
- * Based on pxa2xx-pcm.c by:
- *
- * Author: Nicolas Pitre
- * Created: Nov 30, 2004
- * Copyright: (C) 2004 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include "atmel-pcm.h"
-
-static int atmel_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
- int stream)
-{
- struct snd_pcm_substream *substream = pcm->streams[stream].substream;
- struct snd_dma_buffer *buf = &substream->dma_buffer;
- size_t size = ATMEL_SSC_DMABUF_SIZE;
-
- buf->dev.type = SNDRV_DMA_TYPE_DEV;
- buf->dev.dev = pcm->card->dev;
- buf->private_data = NULL;
- buf->area = dma_alloc_coherent(pcm->card->dev, size,
- &buf->addr, GFP_KERNEL);
- pr_debug("atmel-pcm: alloc dma buffer: area=%p, addr=%p, size=%zu\n",
- (void *)buf->area, (void *)(long)buf->addr, size);
-
- if (!buf->area)
- return -ENOMEM;
-
- buf->bytes = size;
- return 0;
-}
-
-int atmel_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
-{
- return remap_pfn_range(vma, vma->vm_start,
- substream->dma_buffer.addr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-}
-EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
-
-int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_card *card = rtd->card->snd_card;
- struct snd_pcm *pcm = rtd->pcm;
- int ret;
-
- ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
- pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
- ret = atmel_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_PLAYBACK);
- if (ret)
- goto out;
- }
-
- if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
- pr_debug("atmel-pcm: allocating PCM capture DMA buffer\n");
- ret = atmel_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_CAPTURE);
- if (ret)
- goto out;
- }
- out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(atmel_pcm_new);
-
-void atmel_pcm_free(struct snd_pcm *pcm)
-{
- struct snd_pcm_substream *substream;
- struct snd_dma_buffer *buf;
- int stream;
-
- for (stream = 0; stream < 2; stream++) {
- substream = pcm->streams[stream].substream;
- if (!substream)
- continue;
-
- buf = &substream->dma_buffer;
- if (!buf->area)
- continue;
- dma_free_coherent(pcm->card->dev, buf->bytes,
- buf->area, buf->addr);
- buf->area = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(atmel_pcm_free);
-
diff --git a/sound/soc/atmel/atmel-pcm.h b/sound/soc/atmel/atmel-pcm.h
index 12ae814eff21..6eaf081cad50 100644
--- a/sound/soc/atmel/atmel-pcm.h
+++ b/sound/soc/atmel/atmel-pcm.h
@@ -83,11 +83,6 @@ struct atmel_pcm_dma_params {
#define ssc_readx(base, reg) (__raw_readl((base) + (reg)))
#define ssc_writex(base, reg, value) __raw_writel((value), (base) + (reg))
-int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd);
-void atmel_pcm_free(struct snd_pcm *pcm);
-int atmel_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma);
-
#if defined(CONFIG_SND_ATMEL_SOC_PDC) || \
defined(CONFIG_SND_ATMEL_SOC_PDC_MODULE)
int atmel_pcm_pdc_platform_register(struct device *dev);
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index fb0b7e8b08ff..841d05946b88 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -187,6 +187,94 @@ static irqreturn_t atmel_ssc_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/*
+ * When the bit clock is input, limit the maximum rate according to the
+ * Serial Clock Ratio Considerations section from the SSC documentation:
+ *
+ * The Transmitter and the Receiver can be programmed to operate
+ * with the clock signals provided on either the TK or RK pins.
+ * This allows the SSC to support many slave-mode data transfers.
+ * In this case, the maximum clock speed allowed on the RK pin is:
+ * - Peripheral clock divided by 2 if Receiver Frame Synchro is input
+ * - Peripheral clock divided by 3 if Receiver Frame Synchro is output
+ * In addition, the maximum clock speed allowed on the TK pin is:
+ * - Peripheral clock divided by 6 if Transmit Frame Synchro is input
+ * - Peripheral clock divided by 2 if Transmit Frame Synchro is output
+ *
+ * When the bit clock is output, limit the rate according to the
+ * SSC divider restrictions.
+ */
+static int atmel_ssc_hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct atmel_ssc_info *ssc_p = rule->private;
+ struct ssc_device *ssc = ssc_p->ssc;
+ struct snd_interval *i = hw_param_interval(params, rule->var);
+ struct snd_interval t;
+ struct snd_ratnum r = {
+ .den_min = 1,
+ .den_max = 4095,
+ .den_step = 1,
+ };
+ unsigned int num = 0, den = 0;
+ int frame_size;
+ int mck_div = 2;
+ int ret;
+
+ frame_size = snd_soc_params_to_frame_size(params);
+ if (frame_size < 0)
+ return frame_size;
+
+ switch (ssc_p->daifmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFS:
+ if ((ssc_p->dir_mask & SSC_DIR_MASK_CAPTURE)
+ && ssc->clk_from_rk_pin)
+ /* Receiver Frame Synchro (i.e. capture)
+ * is output (format is _CFS) and the RK pin
+ * is used for input (format is _CBM_).
+ */
+ mck_div = 3;
+ break;
+
+ case SND_SOC_DAIFMT_CBM_CFM:
+ if ((ssc_p->dir_mask & SSC_DIR_MASK_PLAYBACK)
+ && !ssc->clk_from_rk_pin)
+ /* Transmit Frame Synchro (i.e. playback)
+ * is input (format is _CFM) and the TK pin
+ * is used for input (format _CBM_ but not
+ * using the RK pin).
+ */
+ mck_div = 6;
+ break;
+ }
+
+ switch (ssc_p->daifmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ r.num = ssc_p->mck_rate / mck_div / frame_size;
+
+ ret = snd_interval_ratnum(i, 1, &r, &num, &den);
+ if (ret >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
+ params->rate_num = num;
+ params->rate_den = den;
+ }
+ break;
+
+ case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_CBM_CFM:
+ t.min = 8000;
+ t.max = ssc_p->mck_rate / mck_div / frame_size;
+ t.openmin = t.openmax = 0;
+ t.integer = 0;
+ ret = snd_interval_refine(i, &t);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
/*-------------------------------------------------------------------------*\
* DAI functions
@@ -200,6 +288,7 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
struct atmel_pcm_dma_params *dma_params;
int dir, dir_mask;
+ int ret;
pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
ssc_readl(ssc_p->ssc->regs, SR));
@@ -207,6 +296,7 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
/* Enable PMC peripheral clock for this SSC */
pr_debug("atmel_ssc_dai: Starting clock\n");
clk_enable(ssc_p->ssc->clk);
+ ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
/* Reset the SSC to keep it at a clean status */
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
@@ -219,6 +309,17 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
dir_mask = SSC_DIR_MASK_CAPTURE;
}
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ atmel_ssc_hw_rule_rate,
+ ssc_p,
+ SNDRV_PCM_HW_PARAM_FRAME_BITS,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to specify rate rule: %d\n", ret);
+ return ret;
+ }
+
dma_params = &ssc_dma_params[dai->id][dir];
dma_params->ssc = ssc_p->ssc;
dma_params->substream = substream;
@@ -783,8 +884,6 @@ static int atmel_ssc_resume(struct snd_soc_dai *cpu_dai)
# define atmel_ssc_resume NULL
#endif /* CONFIG_PM */
-#define ATMEL_SSC_RATES (SNDRV_PCM_RATE_8000_96000)
-
#define ATMEL_SSC_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
@@ -804,12 +903,16 @@ static struct snd_soc_dai_driver atmel_ssc_dai = {
.playback = {
.channels_min = 1,
.channels_max = 2,
- .rates = ATMEL_SSC_RATES,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 384000,
.formats = ATMEL_SSC_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
- .rates = ATMEL_SSC_RATES,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 384000,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
};
diff --git a/sound/soc/atmel/atmel_ssc_dai.h b/sound/soc/atmel/atmel_ssc_dai.h
index b1f08d511495..80b153857a88 100644
--- a/sound/soc/atmel/atmel_ssc_dai.h
+++ b/sound/soc/atmel/atmel_ssc_dai.h
@@ -115,6 +115,7 @@ struct atmel_ssc_info {
unsigned short rcmr_period;
struct atmel_pcm_dma_params *dma_params[2];
struct atmel_ssc_state ssc_state;
+ unsigned long mck_rate;
};
int atmel_ssc_set_audio(int ssc_id);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index ea9f0e31f9d4..061c46587628 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -70,6 +70,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX98090 if I2C
select SND_SOC_MAX98095 if I2C
select SND_SOC_MAX98357A if GPIOLIB
+ select SND_SOC_MAX98925 if I2C
select SND_SOC_MAX9850 if I2C
select SND_SOC_MAX9768 if I2C
select SND_SOC_MAX9877 if I2C
@@ -141,7 +142,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM8770 if SPI_MASTER
select SND_SOC_WM8776 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8782
- select SND_SOC_WM8804 if SND_SOC_I2C_AND_SPI
+ select SND_SOC_WM8804_I2C if I2C
+ select SND_SOC_WM8804_SPI if SPI_MASTER
select SND_SOC_WM8900 if I2C
select SND_SOC_WM8903 if I2C
select SND_SOC_WM8904 if I2C
@@ -460,6 +462,9 @@ config SND_SOC_MAX98095
config SND_SOC_MAX98357A
tristate
+config SND_SOC_MAX98925
+ tristate
+
config SND_SOC_MAX9850
tristate
@@ -744,8 +749,19 @@ config SND_SOC_WM8782
tristate
config SND_SOC_WM8804
- tristate "Wolfson Microelectronics WM8804 S/PDIF transceiver"
- depends on SND_SOC_I2C_AND_SPI
+ tristate
+
+config SND_SOC_WM8804_I2C
+ tristate "Wolfson Microelectronics WM8804 S/PDIF transceiver I2C"
+ depends on I2C
+ select SND_SOC_WM8804
+ select REGMAP_I2C
+
+config SND_SOC_WM8804_SPI
+ tristate "Wolfson Microelectronics WM8804 S/PDIF transceiver SPI"
+ depends on SPI_MASTER
+ select SND_SOC_WM8804
+ select REGMAP_SPI
config SND_SOC_WM8900
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 69b8666d187a..abe2d7edf65c 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -65,6 +65,7 @@ snd-soc-max98088-objs := max98088.o
snd-soc-max98090-objs := max98090.o
snd-soc-max98095-objs := max98095.o
snd-soc-max98357a-objs := max98357a.o
+snd-soc-max98925-objs := max98925.o
snd-soc-max9850-objs := max9850.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
@@ -145,6 +146,8 @@ snd-soc-wm8770-objs := wm8770.o
snd-soc-wm8776-objs := wm8776.o
snd-soc-wm8782-objs := wm8782.o
snd-soc-wm8804-objs := wm8804.o
+snd-soc-wm8804-i2c-objs := wm8804-i2c.o
+snd-soc-wm8804-spi-objs := wm8804-spi.o
snd-soc-wm8900-objs := wm8900.o
snd-soc-wm8903-objs := wm8903.o
snd-soc-wm8904-objs := wm8904.o
@@ -247,6 +250,7 @@ obj-$(CONFIG_SND_SOC_MAX98088) += snd-soc-max98088.o
obj-$(CONFIG_SND_SOC_MAX98090) += snd-soc-max98090.o
obj-$(CONFIG_SND_SOC_MAX98095) += snd-soc-max98095.o
obj-$(CONFIG_SND_SOC_MAX98357A) += snd-soc-max98357a.o
+obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
@@ -323,6 +327,8 @@ obj-$(CONFIG_SND_SOC_WM8770) += snd-soc-wm8770.o
obj-$(CONFIG_SND_SOC_WM8776) += snd-soc-wm8776.o
obj-$(CONFIG_SND_SOC_WM8782) += snd-soc-wm8782.o
obj-$(CONFIG_SND_SOC_WM8804) += snd-soc-wm8804.o
+obj-$(CONFIG_SND_SOC_WM8804_I2C) += snd-soc-wm8804-i2c.o
+obj-$(CONFIG_SND_SOC_WM8804_SPI) += snd-soc-wm8804-spi.o
obj-$(CONFIG_SND_SOC_WM8900) += snd-soc-wm8900.o
obj-$(CONFIG_SND_SOC_WM8903) += snd-soc-wm8903.o
obj-$(CONFIG_SND_SOC_WM8904) += snd-soc-wm8904.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 7895689588da..88ca9cb0ce79 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2003,7 +2003,6 @@ static int ab8500_audio_setup_mics(struct snd_soc_codec *codec,
return 0;
}
-EXPORT_SYMBOL_GPL(ab8500_audio_setup_mics);
static int ab8500_audio_set_ear_cmv(struct snd_soc_codec *codec,
enum ear_cm_voltage ear_cmv)
@@ -2036,7 +2035,6 @@ static int ab8500_audio_set_ear_cmv(struct snd_soc_codec *codec,
return 0;
}
-EXPORT_SYMBOL_GPL(ab8500_audio_set_ear_cmv);
static int ab8500_audio_set_bit_delay(struct snd_soc_dai *dai,
unsigned int delay)
diff --git a/sound/soc/codecs/adau1977.c b/sound/soc/codecs/adau1977.c
index 70ab35744aba..7ad8e156e2df 100644
--- a/sound/soc/codecs/adau1977.c
+++ b/sound/soc/codecs/adau1977.c
@@ -938,22 +938,15 @@ int adau1977_probe(struct device *dev, struct regmap *regmap,
adau1977->dvdd_reg = NULL;
}
- adau1977->reset_gpio = devm_gpiod_get(dev, "reset");
- if (IS_ERR(adau1977->reset_gpio)) {
- ret = PTR_ERR(adau1977->reset_gpio);
- if (ret != -ENOENT && ret != -ENOSYS)
- return PTR_ERR(adau1977->reset_gpio);
- adau1977->reset_gpio = NULL;
- }
+ adau1977->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(adau1977->reset_gpio))
+ return PTR_ERR(adau1977->reset_gpio);
dev_set_drvdata(dev, adau1977);
- if (adau1977->reset_gpio) {
- ret = gpiod_direction_output(adau1977->reset_gpio, 0);
- if (ret)
- return ret;
+ if (adau1977->reset_gpio)
ndelay(100);
- }
ret = adau1977_power_enable(adau1977);
if (ret)
diff --git a/sound/soc/codecs/ak4554.c b/sound/soc/codecs/ak4554.c
index 16ce9f9fefa1..298dedc05140 100644
--- a/sound/soc/codecs/ak4554.c
+++ b/sound/soc/codecs/ak4554.c
@@ -84,7 +84,7 @@ static int ak4554_soc_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id ak4554_of_match[] = {
+static const struct of_device_id ak4554_of_match[] = {
{ .compatible = "asahi-kasei,ak4554" },
{},
};
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index dde8b49c19ad..13585e88f597 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -97,6 +97,9 @@
#define PMMP (1 << 2) /* MPWR pin Power Management */
#define MGAIN0 (1 << 0) /* MIC amp gain*/
+/* SG_SL2 */
+#define LOPS (1 << 6) /* Stero Line-out Power Save Mode */
+
/* TIMER */
#define ZTM(param) ((param & 0x3) << 4) /* ALC Zero Crossing TimeOut */
#define WTM(param) (((param & 0x4) << 4) | ((param & 0x3) << 2))
@@ -168,6 +171,29 @@ static const struct snd_kcontrol_new ak4642_lout_mixer_controls[] = {
SOC_DAPM_SINGLE("DACL", SG_SL1, 4, 1, 0),
};
+/* event handlers */
+static int ak4642_lout_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMD:
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Power save mode ON */
+ snd_soc_update_bits(codec, SG_SL2, LOPS, LOPS);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ case SND_SOC_DAPM_POST_PMD:
+ /* Power save mode OFF */
+ mdelay(300);
+ snd_soc_update_bits(codec, SG_SL2, LOPS, 0);
+ break;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget ak4642_dapm_widgets[] = {
/* Outputs */
@@ -182,12 +208,15 @@ static const struct snd_soc_dapm_widget ak4642_dapm_widgets[] = {
SND_SOC_DAPM_PGA("DACH", MD_CTL4, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("LINEOUT Mixer", PW_MGMT1, 3, 0,
+ SND_SOC_DAPM_MIXER_E("LINEOUT Mixer", PW_MGMT1, 3, 0,
&ak4642_lout_mixer_controls[0],
- ARRAY_SIZE(ak4642_lout_mixer_controls)),
+ ARRAY_SIZE(ak4642_lout_mixer_controls),
+ ak4642_lout_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
/* DAC */
- SND_SOC_DAPM_DAC("DAC", "HiFi Playback", PW_MGMT1, 2, 0),
+ SND_SOC_DAPM_DAC("DAC", NULL, PW_MGMT1, 2, 0),
};
static const struct snd_soc_dapm_route ak4642_intercon[] = {
@@ -205,6 +234,8 @@ static const struct snd_soc_dapm_route ak4642_intercon[] = {
{"DACH", NULL, "DAC"},
{"LINEOUT Mixer", "DACL", "DAC"},
+
+ { "DAC", NULL, "Playback" },
};
/*
@@ -468,13 +499,13 @@ static struct snd_soc_dai_driver ak4642_dai = {
.name = "ak4642-hifi",
.playback = {
.stream_name = "Playback",
- .channels_min = 1,
+ .channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE },
.capture = {
.stream_name = "Capture",
- .channels_min = 1,
+ .channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE },
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 29202610dd0d..eff4b4d512b7 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -19,7 +19,6 @@
#include <sound/tlv.h>
#include <linux/mfd/arizona/core.h>
-#include <linux/mfd/arizona/gpio.h>
#include <linux/mfd/arizona/registers.h>
#include "arizona.h"
@@ -281,6 +280,7 @@ int arizona_init_gpio(struct snd_soc_codec *codec)
switch (arizona->type) {
case WM5110:
+ case WM8280:
snd_soc_dapm_disable_pin(&codec->dapm, "DRC2 Signal Activity");
break;
default:
@@ -840,8 +840,8 @@ int arizona_hp_ev(struct snd_soc_dapm_widget *w,
priv->arizona->hp_ena &= ~mask;
priv->arizona->hp_ena |= val;
- /* Force off if HPDET magic is active */
- if (priv->arizona->hpdet_magic)
+ /* Force off if HPDET clamp is active */
+ if (priv->arizona->hpdet_clamp)
val = 0;
regmap_update_bits_async(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1,
@@ -1729,6 +1729,7 @@ static int arizona_calc_fratio(struct arizona_fll *fll,
switch (fll->arizona->type) {
case WM5110:
+ case WM8280:
if (fll->arizona->rev < 3 || sync)
return init_ratio;
break;
@@ -1901,7 +1902,7 @@ static int arizona_is_enabled_fll(struct arizona_fll *fll)
static int arizona_enable_fll(struct arizona_fll *fll)
{
struct arizona *arizona = fll->arizona;
- int ret;
+ unsigned long time_left;
bool use_sync = false;
int already_enabled = arizona_is_enabled_fll(fll);
struct arizona_fll_cfg cfg;
@@ -1977,9 +1978,9 @@ static int arizona_enable_fll(struct arizona_fll *fll)
regmap_update_bits_async(arizona->regmap, fll->base + 1,
ARIZONA_FLL1_FREERUN, 0);
- ret = wait_for_completion_timeout(&fll->ok,
+ time_left = wait_for_completion_timeout(&fll->ok,
msecs_to_jiffies(250));
- if (ret == 0)
+ if (time_left == 0)
arizona_fll_warn(fll, "Timed out waiting for lock\n");
return 0;
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index f2b8aad21274..60598b230341 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -437,20 +437,13 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
}
/* Reset the Device */
- cs35l32->reset_gpio = devm_gpiod_get(&i2c_client->dev,
- "reset-gpios");
- if (IS_ERR(cs35l32->reset_gpio)) {
- ret = PTR_ERR(cs35l32->reset_gpio);
- if (ret != -ENOENT && ret != -ENOSYS)
- return ret;
-
- cs35l32->reset_gpio = NULL;
- } else {
- ret = gpiod_direction_output(cs35l32->reset_gpio, 0);
- if (ret)
- return ret;
+ cs35l32->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(cs35l32->reset_gpio))
+ return PTR_ERR(cs35l32->reset_gpio);
+
+ if (cs35l32->reset_gpio)
gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
- }
/* initialize codec */
ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_AB, &reg);
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index ce6086835ebd..cac48ddf3ba6 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -605,21 +605,14 @@ static int cs4265_i2c_probe(struct i2c_client *i2c_client,
return ret;
}
- cs4265->reset_gpio = devm_gpiod_get(&i2c_client->dev,
- "reset-gpios");
- if (IS_ERR(cs4265->reset_gpio)) {
- ret = PTR_ERR(cs4265->reset_gpio);
- if (ret != -ENOENT && ret != -ENOSYS)
- return ret;
-
- cs4265->reset_gpio = NULL;
- } else {
- ret = gpiod_direction_output(cs4265->reset_gpio, 0);
- if (ret)
- return ret;
+ cs4265->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(cs4265->reset_gpio))
+ return PTR_ERR(cs4265->reset_gpio);
+
+ if (cs4265->reset_gpio) {
mdelay(1);
gpiod_set_value_cansleep(cs4265->reset_gpio, 1);
-
}
i2c_set_clientdata(i2c_client, cs4265);
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 7d3a6accaf9a..e770ee6f36da 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -561,10 +561,10 @@ static int cs4271_codec_probe(struct snd_soc_codec *codec)
if (gpio_is_valid(cs4271->gpio_nreset)) {
/* Reset codec */
gpio_direction_output(cs4271->gpio_nreset, 0);
- udelay(1);
+ mdelay(1);
gpio_set_value(cs4271->gpio_nreset, 1);
/* Give the codec time to wake up */
- udelay(1);
+ mdelay(1);
}
ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 0b10979513c4..0f334bc1b63c 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -420,7 +420,7 @@ static int cx20442_platform_probe(struct platform_device *pdev)
&cx20442_codec_dev, &cx20442_dai, 1);
}
-static int __exit cx20442_platform_remove(struct platform_device *pdev)
+static int cx20442_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -431,7 +431,7 @@ static struct platform_driver cx20442_platform_driver = {
.name = "cx20442-codec",
},
.probe = cx20442_platform_probe,
- .remove = __exit_p(cx20442_platform_remove),
+ .remove = cx20442_platform_remove,
};
module_platform_driver(cx20442_platform_driver);
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index b112b1c2c394..3e33ef2acf3c 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2605,8 +2605,24 @@ err_enable:
return ret;
}
+static void max98090_i2c_shutdown(struct i2c_client *i2c)
+{
+ struct max98090_priv *max98090 = dev_get_drvdata(&i2c->dev);
+
+ /*
+ * Enable volume smoothing, disable zero cross. This will cause
+ * a quick 40ms ramp to mute on shutdown.
+ */
+ regmap_write(max98090->regmap,
+ M98090_REG_LEVEL_CONTROL, M98090_VSENN_MASK);
+ regmap_write(max98090->regmap,
+ M98090_REG_DEVICE_SHUTDOWN, 0x00);
+ msleep(40);
+}
+
static int max98090_i2c_remove(struct i2c_client *client)
{
+ max98090_i2c_shutdown(client);
snd_soc_unregister_codec(&client->dev);
return 0;
}
@@ -2696,6 +2712,7 @@ static struct i2c_driver max98090_i2c_driver = {
.acpi_match_table = ACPI_PTR(max98090_acpi_match),
},
.probe = max98090_i2c_probe,
+ .shutdown = max98090_i2c_shutdown,
.remove = max98090_i2c_remove,
.id_table = max98090_i2c_id,
};
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index e9e6efbc21dd..bf3e933ee895 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -26,8 +26,6 @@
#include <sound/soc-dai.h>
#include <sound/soc-dapm.h>
-#define DRV_NAME "max98357a"
-
static int max98357a_daiops_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
@@ -87,9 +85,9 @@ static struct snd_soc_dai_ops max98357a_dai_ops = {
};
static struct snd_soc_dai_driver max98357a_dai_driver = {
- .name = DRV_NAME,
+ .name = "HiFi",
.playback = {
- .stream_name = DRV_NAME "-playback",
+ .stream_name = "HiFi Playback",
.formats = SNDRV_PCM_FMTBIT_S16 |
SNDRV_PCM_FMTBIT_S24 |
SNDRV_PCM_FMTBIT_S32,
@@ -127,7 +125,7 @@ static int max98357a_platform_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id max98357a_device_id[] = {
- { .compatible = "maxim," DRV_NAME, },
+ { .compatible = "maxim,max98357a" },
{}
};
MODULE_DEVICE_TABLE(of, max98357a_device_id);
@@ -135,7 +133,7 @@ MODULE_DEVICE_TABLE(of, max98357a_device_id);
static struct platform_driver max98357a_platform_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = "max98357a",
.of_match_table = of_match_ptr(max98357a_device_id),
},
.probe = max98357a_platform_probe,
@@ -145,4 +143,3 @@ module_platform_driver(max98357a_platform_driver);
MODULE_DESCRIPTION("Maxim MAX98357A Codec Driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
new file mode 100644
index 000000000000..9b5a17de4690
--- /dev/null
+++ b/sound/soc/codecs/max98925.c
@@ -0,0 +1,655 @@
+/*
+ * max98925.c -- ALSA SoC Stereo MAX98925 driver
+ * Copyright 2013-15 Maxim Integrated Products
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "max98925.h"
+
+static const char *const dai_text[] = {
+ "Left", "Right", "LeftRight", "LeftRightDiv2",
+};
+
+static const char * const max98925_boost_voltage_text[] = {
+ "8.5V", "8.25V", "8.0V", "7.75V", "7.5V", "7.25V", "7.0V", "6.75V",
+ "6.5V", "6.5V", "6.5V", "6.5V", "6.5V", "6.5V", "6.5V", "6.5V"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98925_boost_voltage,
+ MAX98925_CONFIGURATION, M98925_BST_VOUT_SHIFT,
+ max98925_boost_voltage_text);
+
+static const char *const hpf_text[] = {
+ "Disable", "DC Block", "100Hz", "200Hz", "400Hz", "800Hz",
+};
+
+static const struct reg_default max98925_reg[] = {
+ { 0x0B, 0x00 }, /* IRQ Enable0 */
+ { 0x0C, 0x00 }, /* IRQ Enable1 */
+ { 0x0D, 0x00 }, /* IRQ Enable2 */
+ { 0x0E, 0x00 }, /* IRQ Clear0 */
+ { 0x0F, 0x00 }, /* IRQ Clear1 */
+ { 0x10, 0x00 }, /* IRQ Clear2 */
+ { 0x11, 0xC0 }, /* Map0 */
+ { 0x12, 0x00 }, /* Map1 */
+ { 0x13, 0x00 }, /* Map2 */
+ { 0x14, 0xF0 }, /* Map3 */
+ { 0x15, 0x00 }, /* Map4 */
+ { 0x16, 0xAB }, /* Map5 */
+ { 0x17, 0x89 }, /* Map6 */
+ { 0x18, 0x00 }, /* Map7 */
+ { 0x19, 0x00 }, /* Map8 */
+ { 0x1A, 0x06 }, /* DAI Clock Mode 1 */
+ { 0x1B, 0xC0 }, /* DAI Clock Mode 2 */
+ { 0x1C, 0x00 }, /* DAI Clock Divider Denominator MSBs */
+ { 0x1D, 0x00 }, /* DAI Clock Divider Denominator LSBs */
+ { 0x1E, 0xF0 }, /* DAI Clock Divider Numerator MSBs */
+ { 0x1F, 0x00 }, /* DAI Clock Divider Numerator LSBs */
+ { 0x20, 0x50 }, /* Format */
+ { 0x21, 0x00 }, /* TDM Slot Select */
+ { 0x22, 0x00 }, /* DOUT Configuration VMON */
+ { 0x23, 0x00 }, /* DOUT Configuration IMON */
+ { 0x24, 0x00 }, /* DOUT Configuration VBAT */
+ { 0x25, 0x00 }, /* DOUT Configuration VBST */
+ { 0x26, 0x00 }, /* DOUT Configuration FLAG */
+ { 0x27, 0xFF }, /* DOUT HiZ Configuration 1 */
+ { 0x28, 0xFF }, /* DOUT HiZ Configuration 2 */
+ { 0x29, 0xFF }, /* DOUT HiZ Configuration 3 */
+ { 0x2A, 0xFF }, /* DOUT HiZ Configuration 4 */
+ { 0x2B, 0x02 }, /* DOUT Drive Strength */
+ { 0x2C, 0x90 }, /* Filters */
+ { 0x2D, 0x00 }, /* Gain */
+ { 0x2E, 0x02 }, /* Gain Ramping */
+ { 0x2F, 0x00 }, /* Speaker Amplifier */
+ { 0x30, 0x0A }, /* Threshold */
+ { 0x31, 0x00 }, /* ALC Attack */
+ { 0x32, 0x80 }, /* ALC Atten and Release */
+ { 0x33, 0x00 }, /* ALC Infinite Hold Release */
+ { 0x34, 0x92 }, /* ALC Configuration */
+ { 0x35, 0x01 }, /* Boost Converter */
+ { 0x36, 0x00 }, /* Block Enable */
+ { 0x37, 0x00 }, /* Configuration */
+ { 0x38, 0x00 }, /* Global Enable */
+ { 0x3A, 0x00 }, /* Boost Limiter */
+};
+
+static const struct soc_enum max98925_dai_enum =
+ SOC_ENUM_SINGLE(MAX98925_GAIN, 5, ARRAY_SIZE(dai_text), dai_text);
+
+static const struct soc_enum max98925_hpf_enum =
+ SOC_ENUM_SINGLE(MAX98925_FILTERS, 0, ARRAY_SIZE(hpf_text), hpf_text);
+
+static const struct snd_kcontrol_new max98925_hpf_sel_mux =
+ SOC_DAPM_ENUM("Rc Filter MUX Mux", max98925_hpf_enum);
+
+static const struct snd_kcontrol_new max98925_dai_sel_mux =
+ SOC_DAPM_ENUM("DAI IN MUX Mux", max98925_dai_enum);
+
+static int max98925_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ regmap_update_bits(max98925->regmap,
+ MAX98925_BLOCK_ENABLE,
+ M98925_BST_EN_MASK |
+ M98925_ADC_IMON_EN_MASK | M98925_ADC_VMON_EN_MASK,
+ M98925_BST_EN_MASK |
+ M98925_ADC_IMON_EN_MASK | M98925_ADC_VMON_EN_MASK);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_update_bits(max98925->regmap,
+ MAX98925_BLOCK_ENABLE, M98925_BST_EN_MASK |
+ M98925_ADC_IMON_EN_MASK | M98925_ADC_VMON_EN_MASK, 0);
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget max98925_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("DAI_OUT", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("DAI IN MUX", SND_SOC_NOPM, 0, 0,
+ &max98925_dai_sel_mux),
+ SND_SOC_DAPM_MUX("Rc Filter MUX", SND_SOC_NOPM, 0, 0,
+ &max98925_hpf_sel_mux),
+ SND_SOC_DAPM_DAC_E("Amp Enable", NULL, MAX98925_BLOCK_ENABLE,
+ M98925_SPK_EN_SHIFT, 0, max98925_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("Global Enable", MAX98925_GLOBAL_ENABLE,
+ M98925_EN_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("BE_OUT"),
+};
+
+static const struct snd_soc_dapm_route max98925_audio_map[] = {
+ {"DAI IN MUX", "Left", "DAI_OUT"},
+ {"DAI IN MUX", "Right", "DAI_OUT"},
+ {"DAI IN MUX", "LeftRight", "DAI_OUT"},
+ {"DAI IN MUX", "LeftRightDiv2", "DAI_OUT"},
+ {"Rc Filter MUX", "Disable", "DAI IN MUX"},
+ {"Rc Filter MUX", "DC Block", "DAI IN MUX"},
+ {"Rc Filter MUX", "100Hz", "DAI IN MUX"},
+ {"Rc Filter MUX", "200Hz", "DAI IN MUX"},
+ {"Rc Filter MUX", "400Hz", "DAI IN MUX"},
+ {"Rc Filter MUX", "800Hz", "DAI IN MUX"},
+ {"Amp Enable", NULL, "Rc Filter MUX"},
+ {"BE_OUT", NULL, "Amp Enable"},
+ {"BE_OUT", NULL, "Global Enable"},
+};
+
+static bool max98925_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98925_VBAT_DATA:
+ case MAX98925_VBST_DATA:
+ case MAX98925_LIVE_STATUS0:
+ case MAX98925_LIVE_STATUS1:
+ case MAX98925_LIVE_STATUS2:
+ case MAX98925_STATE0:
+ case MAX98925_STATE1:
+ case MAX98925_STATE2:
+ case MAX98925_FLAG0:
+ case MAX98925_FLAG1:
+ case MAX98925_FLAG2:
+ case MAX98925_REV_VERSION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max98925_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98925_IRQ_CLEAR0:
+ case MAX98925_IRQ_CLEAR1:
+ case MAX98925_IRQ_CLEAR2:
+ case MAX98925_ALC_HOLD_RLS:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static DECLARE_TLV_DB_SCALE(max98925_spk_tlv, -600, 100, 0);
+
+static const struct snd_kcontrol_new max98925_snd_controls[] = {
+ SOC_SINGLE_TLV("Speaker Volume", MAX98925_GAIN,
+ M98925_SPK_GAIN_SHIFT, (1<<M98925_SPK_GAIN_WIDTH)-1, 0,
+ max98925_spk_tlv),
+ SOC_SINGLE("Ramp Switch", MAX98925_GAIN_RAMPING,
+ M98925_SPK_RMP_EN_SHIFT, 1, 0),
+ SOC_SINGLE("ZCD Switch", MAX98925_GAIN_RAMPING,
+ M98925_SPK_ZCD_EN_SHIFT, 1, 0),
+ SOC_SINGLE("ALC Switch", MAX98925_THRESHOLD,
+ M98925_ALC_EN_SHIFT, 1, 0),
+ SOC_SINGLE("ALC Threshold", MAX98925_THRESHOLD, M98925_ALC_TH_SHIFT,
+ (1<<M98925_ALC_TH_WIDTH)-1, 0),
+ SOC_ENUM("Boost Output Voltage", max98925_boost_voltage),
+};
+
+/* codec sample rate and n/m dividers parameter table */
+static const struct {
+ int rate;
+ int sr;
+ int divisors[3][2];
+} rate_table[] = {
+ {
+ .rate = 8000,
+ .sr = 0,
+ .divisors = { {1, 375}, {5, 1764}, {1, 384} }
+ },
+ {
+ .rate = 11025,
+ .sr = 1,
+ .divisors = { {147, 40000}, {1, 256}, {147, 40960} }
+ },
+ {
+ .rate = 12000,
+ .sr = 2,
+ .divisors = { {1, 250}, {5, 1176}, {1, 256} }
+ },
+ {
+ .rate = 16000,
+ .sr = 3,
+ .divisors = { {2, 375}, {5, 882}, {1, 192} }
+ },
+ {
+ .rate = 22050,
+ .sr = 4,
+ .divisors = { {147, 20000}, {1, 128}, {147, 20480} }
+ },
+ {
+ .rate = 24000,
+ .sr = 5,
+ .divisors = { {1, 125}, {5, 588}, {1, 128} }
+ },
+ {
+ .rate = 32000,
+ .sr = 6,
+ .divisors = { {4, 375}, {5, 441}, {1, 96} }
+ },
+ {
+ .rate = 44100,
+ .sr = 7,
+ .divisors = { {147, 10000}, {1, 64}, {147, 10240} }
+ },
+ {
+ .rate = 48000,
+ .sr = 8,
+ .divisors = { {2, 125}, {5, 294}, {1, 64} }
+ },
+};
+
+static inline int max98925_rate_value(struct snd_soc_codec *codec,
+ int rate, int clock, int *value, int *n, int *m)
+{
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rate_table); i++) {
+ if (rate_table[i].rate >= rate) {
+ *value = rate_table[i].sr;
+ *n = rate_table[i].divisors[clock][0];
+ *m = rate_table[i].divisors[clock][1];
+ ret = 0;
+ break;
+ }
+ }
+ dev_dbg(codec->dev, "%s: sample rate is %d, returning %d\n",
+ __func__, rate_table[i].rate, *value);
+ return ret;
+}
+
+static void max98925_set_sense_data(struct max98925_priv *max98925)
+{
+ /* set VMON slots */
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DOUT_CFG_VMON,
+ M98925_DAI_VMON_EN_MASK, M98925_DAI_VMON_EN_MASK);
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DOUT_CFG_VMON,
+ M98925_DAI_VMON_SLOT_MASK,
+ max98925->v_slot << M98925_DAI_VMON_SLOT_SHIFT);
+ /* set IMON slots */
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DOUT_CFG_IMON,
+ M98925_DAI_IMON_EN_MASK, M98925_DAI_IMON_EN_MASK);
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DOUT_CFG_IMON,
+ M98925_DAI_IMON_SLOT_MASK,
+ max98925->i_slot << M98925_DAI_IMON_SLOT_SHIFT);
+}
+
+static int max98925_dai_set_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
+ unsigned int invert = 0;
+
+ dev_dbg(codec->dev, "%s: fmt 0x%08X\n", __func__, fmt);
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ /* set DAI to slave mode */
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DAI_CLK_MODE2,
+ M98925_DAI_MAS_MASK, 0);
+ max98925_set_sense_data(max98925);
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /*
+ * set left channel DAI to master mode,
+ * right channel always slave
+ */
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DAI_CLK_MODE2,
+ M98925_DAI_MAS_MASK, M98925_DAI_MAS_MASK);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_CBM_CFS:
+ default:
+ dev_err(codec->dev, "DAI clock mode unsupported");
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ invert = M98925_DAI_WCI_MASK;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ invert = M98925_DAI_BCI_MASK;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ invert = M98925_DAI_BCI_MASK | M98925_DAI_WCI_MASK;
+ break;
+ default:
+ dev_err(codec->dev, "DAI invert mode unsupported");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98925->regmap, MAX98925_FORMAT,
+ M98925_DAI_BCI_MASK, invert);
+ return 0;
+}
+
+static int max98925_set_clock(struct max98925_priv *max98925,
+ struct snd_pcm_hw_params *params)
+{
+ unsigned int dai_sr = 0, clock, mdll, n, m;
+ struct snd_soc_codec *codec = max98925->codec;
+ int rate = params_rate(params);
+ /* BCLK/LRCLK ratio calculation */
+ int blr_clk_ratio = params_channels(params) * max98925->ch_size;
+
+ switch (blr_clk_ratio) {
+ case 32:
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DAI_CLK_MODE2,
+ M98925_DAI_BSEL_MASK, M98925_DAI_BSEL_32);
+ break;
+ case 48:
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DAI_CLK_MODE2,
+ M98925_DAI_BSEL_MASK, M98925_DAI_BSEL_48);
+ break;
+ case 64:
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DAI_CLK_MODE2,
+ M98925_DAI_BSEL_MASK, M98925_DAI_BSEL_64);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (max98925->sysclk) {
+ case 6000000:
+ clock = 0;
+ mdll = M98925_MDLL_MULT_MCLKx16;
+ break;
+ case 11289600:
+ clock = 1;
+ mdll = M98925_MDLL_MULT_MCLKx8;
+ break;
+ case 12000000:
+ clock = 0;
+ mdll = M98925_MDLL_MULT_MCLKx8;
+ break;
+ case 12288000:
+ clock = 2;
+ mdll = M98925_MDLL_MULT_MCLKx8;
+ break;
+ default:
+ dev_info(max98925->codec->dev, "unsupported sysclk %d\n",
+ max98925->sysclk);
+ return -EINVAL;
+ }
+
+ if (max98925_rate_value(codec, rate, clock, &dai_sr, &n, &m))
+ return -EINVAL;
+
+ /* set DAI_SR to correct LRCLK frequency */
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DAI_CLK_MODE2,
+ M98925_DAI_SR_MASK, dai_sr << M98925_DAI_SR_SHIFT);
+ /* set DAI m divider */
+ regmap_write(max98925->regmap,
+ MAX98925_DAI_CLK_DIV_M_MSBS, m >> 8);
+ regmap_write(max98925->regmap,
+ MAX98925_DAI_CLK_DIV_M_LSBS, m & 0xFF);
+ /* set DAI n divider */
+ regmap_write(max98925->regmap,
+ MAX98925_DAI_CLK_DIV_N_MSBS, n >> 8);
+ regmap_write(max98925->regmap,
+ MAX98925_DAI_CLK_DIV_N_LSBS, n & 0xFF);
+ /* set MDLL */
+ regmap_update_bits(max98925->regmap, MAX98925_DAI_CLK_MODE1,
+ M98925_MDLL_MULT_MASK, mdll << M98925_MDLL_MULT_SHIFT);
+ return 0;
+}
+
+static int max98925_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
+
+ switch (snd_pcm_format_width(params_format(params))) {
+ case 16:
+ regmap_update_bits(max98925->regmap,
+ MAX98925_FORMAT,
+ M98925_DAI_CHANSZ_MASK, M98925_DAI_CHANSZ_16);
+ max98925->ch_size = 16;
+ break;
+ case 24:
+ regmap_update_bits(max98925->regmap,
+ MAX98925_FORMAT,
+ M98925_DAI_CHANSZ_MASK, M98925_DAI_CHANSZ_24);
+ max98925->ch_size = 24;
+ break;
+ case 32:
+ regmap_update_bits(max98925->regmap,
+ MAX98925_FORMAT,
+ M98925_DAI_CHANSZ_MASK, M98925_DAI_CHANSZ_32);
+ max98925->ch_size = 32;
+ break;
+ default:
+ pr_err("%s: format unsupported %d",
+ __func__, params_format(params));
+ return -EINVAL;
+ }
+ dev_dbg(codec->dev, "%s: format supported %d",
+ __func__, params_format(params));
+ return max98925_set_clock(max98925, params);
+}
+
+static int max98925_dai_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
+
+ switch (clk_id) {
+ case 0:
+ /* use MCLK for Left channel, right channel always BCLK */
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DAI_CLK_MODE1,
+ M98925_DAI_CLK_SOURCE_MASK, 0);
+ break;
+ case 1:
+ /* configure dai clock source to BCLK instead of MCLK */
+ regmap_update_bits(max98925->regmap,
+ MAX98925_DAI_CLK_MODE1,
+ M98925_DAI_CLK_SOURCE_MASK,
+ M98925_DAI_CLK_SOURCE_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+ max98925->sysclk = freq;
+ return 0;
+}
+
+#define MAX98925_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops max98925_dai_ops = {
+ .set_sysclk = max98925_dai_set_sysclk,
+ .set_fmt = max98925_dai_set_fmt,
+ .hw_params = max98925_dai_hw_params,
+};
+
+static struct snd_soc_dai_driver max98925_dai[] = {
+ {
+ .name = "max98925-aif1",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = MAX98925_FORMATS,
+ },
+ .capture = {
+ .stream_name = "HiFi Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = MAX98925_FORMATS,
+ },
+ .ops = &max98925_dai_ops,
+ }
+};
+
+static int max98925_probe(struct snd_soc_codec *codec)
+{
+ struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
+
+ max98925->codec = codec;
+ codec->control_data = max98925->regmap;
+ regmap_write(max98925->regmap, MAX98925_GLOBAL_ENABLE, 0x00);
+ /* It's not the default but we need to set DAI_DLY */
+ regmap_write(max98925->regmap,
+ MAX98925_FORMAT, M98925_DAI_DLY_MASK);
+ regmap_write(max98925->regmap, MAX98925_TDM_SLOT_SELECT, 0xC8);
+ regmap_write(max98925->regmap, MAX98925_DOUT_HIZ_CFG1, 0xFF);
+ regmap_write(max98925->regmap, MAX98925_DOUT_HIZ_CFG2, 0xFF);
+ regmap_write(max98925->regmap, MAX98925_DOUT_HIZ_CFG3, 0xFF);
+ regmap_write(max98925->regmap, MAX98925_DOUT_HIZ_CFG4, 0xF0);
+ regmap_write(max98925->regmap, MAX98925_FILTERS, 0xD8);
+ regmap_write(max98925->regmap, MAX98925_ALC_CONFIGURATION, 0xF8);
+ regmap_write(max98925->regmap, MAX98925_CONFIGURATION, 0xF0);
+ /* Disable ALC muting */
+ regmap_write(max98925->regmap, MAX98925_BOOST_LIMITER, 0xF8);
+ return 0;
+}
+
+static const struct snd_soc_codec_driver soc_codec_dev_max98925 = {
+ .probe = max98925_probe,
+ .controls = max98925_snd_controls,
+ .num_controls = ARRAY_SIZE(max98925_snd_controls),
+ .dapm_routes = max98925_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98925_audio_map),
+ .dapm_widgets = max98925_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98925_dapm_widgets),
+};
+
+static const struct regmap_config max98925_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX98925_REV_VERSION,
+ .reg_defaults = max98925_reg,
+ .num_reg_defaults = ARRAY_SIZE(max98925_reg),
+ .volatile_reg = max98925_volatile_register,
+ .readable_reg = max98925_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int max98925_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ int ret, reg;
+ u32 value;
+ struct max98925_priv *max98925;
+
+ max98925 = devm_kzalloc(&i2c->dev,
+ sizeof(*max98925), GFP_KERNEL);
+ if (!max98925)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, max98925);
+ max98925->regmap = devm_regmap_init_i2c(i2c, &max98925_regmap);
+ if (IS_ERR(max98925->regmap)) {
+ ret = PTR_ERR(max98925->regmap);
+ dev_err(&i2c->dev,
+ "Failed to allocate regmap: %d\n", ret);
+ goto err_out;
+ }
+
+ if (!of_property_read_u32(i2c->dev.of_node, "vmon-slot-no", &value)) {
+ if (value > M98925_DAI_VMON_SLOT_1E_1F) {
+ dev_err(&i2c->dev, "vmon slot number is wrong:\n");
+ return -EINVAL;
+ }
+ max98925->v_slot = value;
+ }
+ if (!of_property_read_u32(i2c->dev.of_node, "imon-slot-no", &value)) {
+ if (value > M98925_DAI_IMON_SLOT_1E_1F) {
+ dev_err(&i2c->dev, "imon slot number is wrong:\n");
+ return -EINVAL;
+ }
+ max98925->i_slot = value;
+ }
+ ret = regmap_read(max98925->regmap,
+ MAX98925_REV_VERSION, &reg);
+ if ((ret < 0) ||
+ ((reg != MAX98925_VERSION) &&
+ (reg != MAX98925_VERSION1))) {
+ dev_err(&i2c->dev,
+ "device initialization error (%d 0x%02X)\n",
+ ret, reg);
+ goto err_out;
+ }
+ dev_info(&i2c->dev, "device version 0x%02X\n", reg);
+
+ ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98925,
+ max98925_dai, ARRAY_SIZE(max98925_dai));
+ if (ret < 0)
+ dev_err(&i2c->dev,
+ "Failed to register codec: %d\n", ret);
+err_out:
+ return ret;
+}
+
+static int max98925_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id max98925_i2c_id[] = {
+ { "max98925", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max98925_i2c_id);
+
+static const struct of_device_id max98925_of_match[] = {
+ { .compatible = "maxim,max98925", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max98925_of_match);
+
+static struct i2c_driver max98925_i2c_driver = {
+ .driver = {
+ .name = "max98925",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(max98925_of_match),
+ .pm = NULL,
+ },
+ .probe = max98925_i2c_probe,
+ .remove = max98925_i2c_remove,
+ .id_table = max98925_i2c_id,
+};
+
+module_i2c_driver(max98925_i2c_driver)
+
+MODULE_DESCRIPTION("ALSA SoC MAX98925 driver");
+MODULE_AUTHOR("Ralph Birt <rdbirt@gmail.com>, Anish kumar <anish.kumar@maximintegrated.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98925.h b/sound/soc/codecs/max98925.h
new file mode 100644
index 000000000000..3783248f2780
--- /dev/null
+++ b/sound/soc/codecs/max98925.h
@@ -0,0 +1,832 @@
+/*
+ * max98925.h -- MAX98925 ALSA SoC Audio driver
+ *
+ * Copyright 2013-2015 Maxim Integrated Products
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MAX98925_H
+#define _MAX98925_H
+
+#define MAX98925_VERSION 0x51
+#define MAX98925_VERSION1 0x80
+#define MAX98925_VBAT_DATA 0x00
+#define MAX98925_VBST_DATA 0x01
+#define MAX98925_LIVE_STATUS0 0x02
+#define MAX98925_LIVE_STATUS1 0x03
+#define MAX98925_LIVE_STATUS2 0x04
+#define MAX98925_STATE0 0x05
+#define MAX98925_STATE1 0x06
+#define MAX98925_STATE2 0x07
+#define MAX98925_FLAG0 0x08
+#define MAX98925_FLAG1 0x09
+#define MAX98925_FLAG2 0x0A
+#define MAX98925_IRQ_ENABLE0 0x0B
+#define MAX98925_IRQ_ENABLE1 0x0C
+#define MAX98925_IRQ_ENABLE2 0x0D
+#define MAX98925_IRQ_CLEAR0 0x0E
+#define MAX98925_IRQ_CLEAR1 0x0F
+#define MAX98925_IRQ_CLEAR2 0x10
+#define MAX98925_MAP0 0x11
+#define MAX98925_MAP1 0x12
+#define MAX98925_MAP2 0x13
+#define MAX98925_MAP3 0x14
+#define MAX98925_MAP4 0x15
+#define MAX98925_MAP5 0x16
+#define MAX98925_MAP6 0x17
+#define MAX98925_MAP7 0x18
+#define MAX98925_MAP8 0x19
+#define MAX98925_DAI_CLK_MODE1 0x1A
+#define MAX98925_DAI_CLK_MODE2 0x1B
+#define MAX98925_DAI_CLK_DIV_M_MSBS 0x1C
+#define MAX98925_DAI_CLK_DIV_M_LSBS 0x1D
+#define MAX98925_DAI_CLK_DIV_N_MSBS 0x1E
+#define MAX98925_DAI_CLK_DIV_N_LSBS 0x1F
+#define MAX98925_FORMAT 0x20
+#define MAX98925_TDM_SLOT_SELECT 0x21
+#define MAX98925_DOUT_CFG_VMON 0x22
+#define MAX98925_DOUT_CFG_IMON 0x23
+#define MAX98925_DOUT_CFG_VBAT 0x24
+#define MAX98925_DOUT_CFG_VBST 0x25
+#define MAX98925_DOUT_CFG_FLAG 0x26
+#define MAX98925_DOUT_HIZ_CFG1 0x27
+#define MAX98925_DOUT_HIZ_CFG2 0x28
+#define MAX98925_DOUT_HIZ_CFG3 0x29
+#define MAX98925_DOUT_HIZ_CFG4 0x2A
+#define MAX98925_DOUT_DRV_STRENGTH 0x2B
+#define MAX98925_FILTERS 0x2C
+#define MAX98925_GAIN 0x2D
+#define MAX98925_GAIN_RAMPING 0x2E
+#define MAX98925_SPK_AMP 0x2F
+#define MAX98925_THRESHOLD 0x30
+#define MAX98925_ALC_ATTACK 0x31
+#define MAX98925_ALC_ATTEN_RLS 0x32
+#define MAX98925_ALC_HOLD_RLS 0x33
+#define MAX98925_ALC_CONFIGURATION 0x34
+#define MAX98925_BOOST_CONVERTER 0x35
+#define MAX98925_BLOCK_ENABLE 0x36
+#define MAX98925_CONFIGURATION 0x37
+#define MAX98925_GLOBAL_ENABLE 0x38
+#define MAX98925_BOOST_LIMITER 0x3A
+#define MAX98925_REV_VERSION 0xFF
+
+#define MAX98925_REG_CNT (MAX98925_R03A_BOOST_LIMITER+1)
+
+/* MAX98925 Register Bit Fields */
+
+/* MAX98925_R002_LIVE_STATUS0 */
+#define M98925_THERMWARN_STATUS_MASK (1<<3)
+#define M98925_THERMWARN_STATUS_SHIFT 3
+#define M98925_THERMWARN_STATUS_WIDTH 1
+#define M98925_THERMSHDN_STATUS_MASK (1<<1)
+#define M98925_THERMSHDN_STATUS_SHIFT 1
+#define M98925_THERMSHDN_STATUS_WIDTH 1
+
+/* MAX98925_R003_LIVE_STATUS1 */
+#define M98925_SPKCURNT_STATUS_MASK (1<<5)
+#define M98925_SPKCURNT_STATUS_SHIFT 5
+#define M98925_SPKCURNT_STATUS_WIDTH 1
+#define M98925_WATCHFAIL_STATUS_MASK (1<<4)
+#define M98925_WATCHFAIL_STATUS_SHIFT 4
+#define M98925_WATCHFAIL_STATUS_WIDTH 1
+#define M98925_ALCINFH_STATUS_MASK (1<<3)
+#define M98925_ALCINFH_STATUS_SHIFT 3
+#define M98925_ALCINFH_STATUS_WIDTH 1
+#define M98925_ALCACT_STATUS_MASK (1<<2)
+#define M98925_ALCACT_STATUS_SHIFT 2
+#define M98925_ALCACT_STATUS_WIDTH 1
+#define M98925_ALCMUT_STATUS_MASK (1<<1)
+#define M98925_ALCMUT_STATUS_SHIFT 1
+#define M98925_ALCMUT_STATUS_WIDTH 1
+#define M98925_ACLP_STATUS_MASK (1<<0)
+#define M98925_ACLP_STATUS_SHIFT 0
+#define M98925_ACLP_STATUS_WIDTH 1
+
+/* MAX98925_R004_LIVE_STATUS2 */
+#define M98925_SLOTOVRN_STATUS_MASK (1<<6)
+#define M98925_SLOTOVRN_STATUS_SHIFT 6
+#define M98925_SLOTOVRN_STATUS_WIDTH 1
+#define M98925_INVALSLOT_STATUS_MASK (1<<5)
+#define M98925_INVALSLOT_STATUS_SHIFT 5
+#define M98925_INVALSLOT_STATUS_WIDTH 1
+#define M98925_SLOTCNFLT_STATUS_MASK (1<<4)
+#define M98925_SLOTCNFLT_STATUS_SHIFT 4
+#define M98925_SLOTCNFLT_STATUS_WIDTH 1
+#define M98925_VBSTOVFL_STATUS_MASK (1<<3)
+#define M98925_VBSTOVFL_STATUS_SHIFT 3
+#define M98925_VBSTOVFL_STATUS_WIDTH 1
+#define M98925_VBATOVFL_STATUS_MASK (1<<2)
+#define M98925_VBATOVFL_STATUS_SHIFT 2
+#define M98925_VBATOVFL_STATUS_WIDTH 1
+#define M98925_IMONOVFL_STATUS_MASK (1<<1)
+#define M98925_IMONOVFL_STATUS_SHIFT 1
+#define M98925_IMONOVFL_STATUS_WIDTH 1
+#define M98925_VMONOVFL_STATUS_MASK (1<<0)
+#define M98925_VMONOVFL_STATUS_SHIFT 0
+#define M98925_VMONOVFL_STATUS_WIDTH 1
+
+/* MAX98925_R005_STATE0 */
+#define M98925_THERMWARN_END_STATE_MASK (1<<3)
+#define M98925_THERMWARN_END_STATE_SHIFT 3
+#define M98925_THERMWARN_END_STATE_WIDTH 1
+#define M98925_THERMWARN_BGN_STATE_MASK (1<<2)
+#define M98925_THERMWARN_BGN_STATE_SHIFT 1
+#define M98925_THERMWARN_BGN_STATE_WIDTH 1
+#define M98925_THERMSHDN_END_STATE_MASK (1<<1)
+#define M98925_THERMSHDN_END_STATE_SHIFT 1
+#define M98925_THERMSHDN_END_STATE_WIDTH 1
+#define M98925_THERMSHDN_BGN_STATE_MASK (1<<0)
+#define M98925_THERMSHDN_BGN_STATE_SHIFT 0
+#define M98925_THERMSHDN_BGN_STATE_WIDTH 1
+
+/* MAX98925_R006_STATE1 */
+#define M98925_SPRCURNT_STATE_MASK (1<<5)
+#define M98925_SPRCURNT_STATE_SHIFT 5
+#define M98925_SPRCURNT_STATE_WIDTH 1
+#define M98925_WATCHFAIL_STATE_MASK (1<<4)
+#define M98925_WATCHFAIL_STATE_SHIFT 4
+#define M98925_WATCHFAIL_STATE_WIDTH 1
+#define M98925_ALCINFH_STATE_MASK (1<<3)
+#define M98925_ALCINFH_STATE_SHIFT 3
+#define M98925_ALCINFH_STATE_WIDTH 1
+#define M98925_ALCACT_STATE_MASK (1<<2)
+#define M98925_ALCACT_STATE_SHIFT 2
+#define M98925_ALCACT_STATE_WIDTH 1
+#define M98925_ALCMUT_STATE_MASK (1<<1)
+#define M98925_ALCMUT_STATE_SHIFT 1
+#define M98925_ALCMUT_STATE_WIDTH 1
+#define M98925_ALCP_STATE_MASK (1<<0)
+#define M98925_ALCP_STATE_SHIFT 0
+#define M98925_ALCP_STATE_WIDTH 1
+
+/* MAX98925_R007_STATE2 */
+#define M98925_SLOTOVRN_STATE_MASK (1<<6)
+#define M98925_SLOTOVRN_STATE_SHIFT 6
+#define M98925_SLOTOVRN_STATE_WIDTH 1
+#define M98925_INVALSLOT_STATE_MASK (1<<5)
+#define M98925_INVALSLOT_STATE_SHIFT 5
+#define M98925_INVALSLOT_STATE_WIDTH 1
+#define M98925_SLOTCNFLT_STATE_MASK (1<<4)
+#define M98925_SLOTCNFLT_STATE_SHIFT 4
+#define M98925_SLOTCNFLT_STATE_WIDTH 1
+#define M98925_VBSTOVFL_STATE_MASK (1<<3)
+#define M98925_VBSTOVFL_STATE_SHIFT 3
+#define M98925_VBSTOVFL_STATE_WIDTH 1
+#define M98925_VBATOVFL_STATE_MASK (1<<2)
+#define M98925_VBATOVFL_STATE_SHIFT 2
+#define M98925_VBATOVFL_STATE_WIDTH 1
+#define M98925_IMONOVFL_STATE_MASK (1<<1)
+#define M98925_IMONOVFL_STATE_SHIFT 1
+#define M98925_IMONOVFL_STATE_WIDTH 1
+#define M98925_VMONOVFL_STATE_MASK (1<<0)
+#define M98925_VMONOVFL_STATE_SHIFT 0
+#define M98925_VMONOVFL_STATE_WIDTH 1
+
+/* MAX98925_R008_FLAG0 */
+#define M98925_THERMWARN_END_FLAG_MASK (1<<3)
+#define M98925_THERMWARN_END_FLAG_SHIFT 3
+#define M98925_THERMWARN_END_FLAG_WIDTH 1
+#define M98925_THERMWARN_BGN_FLAG_MASK (1<<2)
+#define M98925_THERMWARN_BGN_FLAG_SHIFT 2
+#define M98925_THERMWARN_BGN_FLAG_WIDTH 1
+#define M98925_THERMSHDN_END_FLAG_MASK (1<<1)
+#define M98925_THERMSHDN_END_FLAG_SHIFT 1
+#define M98925_THERMSHDN_END_FLAG_WIDTH 1
+#define M98925_THERMSHDN_BGN_FLAG_MASK (1<<0)
+#define M98925_THERMSHDN_BGN_FLAG_SHIFT 0
+#define M98925_THERMSHDN_BGN_FLAG_WIDTH 1
+
+/* MAX98925_R009_FLAG1 */
+#define M98925_SPKCURNT_FLAG_MASK (1<<5)
+#define M98925_SPKCURNT_FLAG_SHIFT 5
+#define M98925_SPKCURNT_FLAG_WIDTH 1
+#define M98925_WATCHFAIL_FLAG_MASK (1<<4)
+#define M98925_WATCHFAIL_FLAG_SHIFT 4
+#define M98925_WATCHFAIL_FLAG_WIDTH 1
+#define M98925_ALCINFH_FLAG_MASK (1<<3)
+#define M98925_ALCINFH_FLAG_SHIFT 3
+#define M98925_ALCINFH_FLAG_WIDTH 1
+#define M98925_ALCACT_FLAG_MASK (1<<2)
+#define M98925_ALCACT_FLAG_SHIFT 2
+#define M98925_ALCACT_FLAG_WIDTH 1
+#define M98925_ALCMUT_FLAG_MASK (1<<1)
+#define M98925_ALCMUT_FLAG_SHIFT 1
+#define M98925_ALCMUT_FLAG_WIDTH 1
+#define M98925_ALCP_FLAG_MASK (1<<0)
+#define M98925_ALCP_FLAG_SHIFT 0
+#define M98925_ALCP_FLAG_WIDTH 1
+
+/* MAX98925_R00A_FLAG2 */
+#define M98925_SLOTOVRN_FLAG_MASK (1<<6)
+#define M98925_SLOTOVRN_FLAG_SHIFT 6
+#define M98925_SLOTOVRN_FLAG_WIDTH 1
+#define M98925_INVALSLOT_FLAG_MASK (1<<5)
+#define M98925_INVALSLOT_FLAG_SHIFT 5
+#define M98925_INVALSLOT_FLAG_WIDTH 1
+#define M98925_SLOTCNFLT_FLAG_MASK (1<<4)
+#define M98925_SLOTCNFLT_FLAG_SHIFT 4
+#define M98925_SLOTCNFLT_FLAG_WIDTH 1
+#define M98925_VBSTOVFL_FLAG_MASK (1<<3)
+#define M98925_VBSTOVFL_FLAG_SHIFT 3
+#define M98925_VBSTOVFL_FLAG_WIDTH 1
+#define M98925_VBATOVFL_FLAG_MASK (1<<2)
+#define M98925_VBATOVFL_FLAG_SHIFT 2
+#define M98925_VBATOVFL_FLAG_WIDTH 1
+#define M98925_IMONOVFL_FLAG_MASK (1<<1)
+#define M98925_IMONOVFL_FLAG_SHIFT 1
+#define M98925_IMONOVFL_FLAG_WIDTH 1
+#define M98925_VMONOVFL_FLAG_MASK (1<<0)
+#define M98925_VMONOVFL_FLAG_SHIFT 0
+#define M98925_VMONOVFL_FLAG_WIDTH 1
+
+/* MAX98925_R00B_IRQ_ENABLE0 */
+#define M98925_THERMWARN_END_EN_MASK (1<<3)
+#define M98925_THERMWARN_END_EN_SHIFT 3
+#define M98925_THERMWARN_END_EN_WIDTH 1
+#define M98925_THERMWARN_BGN_EN_MASK (1<<2)
+#define M98925_THERMWARN_BGN_EN_SHIFT 2
+#define M98925_THERMWARN_BGN_EN_WIDTH 1
+#define M98925_THERMSHDN_END_EN_MASK (1<<1)
+#define M98925_THERMSHDN_END_EN_SHIFT 1
+#define M98925_THERMSHDN_END_EN_WIDTH 1
+#define M98925_THERMSHDN_BGN_EN_MASK (1<<0)
+#define M98925_THERMSHDN_BGN_EN_SHIFT 0
+#define M98925_THERMSHDN_BGN_EN_WIDTH 1
+
+/* MAX98925_R00C_IRQ_ENABLE1 */
+#define M98925_SPKCURNT_EN_MASK (1<<5)
+#define M98925_SPKCURNT_EN_SHIFT 5
+#define M98925_SPKCURNT_EN_WIDTH 1
+#define M98925_WATCHFAIL_EN_MASK (1<<4)
+#define M98925_WATCHFAIL_EN_SHIFT 4
+#define M98925_WATCHFAIL_EN_WIDTH 1
+#define M98925_ALCINFH_EN_MASK (1<<3)
+#define M98925_ALCINFH_EN_SHIFT 3
+#define M98925_ALCINFH_EN_WIDTH 1
+#define M98925_ALCACT_EN_MASK (1<<2)
+#define M98925_ALCACT_EN_SHIFT 2
+#define M98925_ALCACT_EN_WIDTH 1
+#define M98925_ALCMUT_EN_MASK (1<<1)
+#define M98925_ALCMUT_EN_SHIFT 1
+#define M98925_ALCMUT_EN_WIDTH 1
+#define M98925_ALCP_EN_MASK (1<<0)
+#define M98925_ALCP_EN_SHIFT 0
+#define M98925_ALCP_EN_WIDTH 1
+
+/* MAX98925_R00D_IRQ_ENABLE2 */
+#define M98925_SLOTOVRN_EN_MASK (1<<6)
+#define M98925_SLOTOVRN_EN_SHIFT 6
+#define M98925_SLOTOVRN_EN_WIDTH 1
+#define M98925_INVALSLOT_EN_MASK (1<<5)
+#define M98925_INVALSLOT_EN_SHIFT 5
+#define M98925_INVALSLOT_EN_WIDTH 1
+#define M98925_SLOTCNFLT_EN_MASK (1<<4)
+#define M98925_SLOTCNFLT_EN_SHIFT 4
+#define M98925_SLOTCNFLT_EN_WIDTH 1
+#define M98925_VBSTOVFL_EN_MASK (1<<3)
+#define M98925_VBSTOVFL_EN_SHIFT 3
+#define M98925_VBSTOVFL_EN_WIDTH 1
+#define M98925_VBATOVFL_EN_MASK (1<<2)
+#define M98925_VBATOVFL_EN_SHIFT 2
+#define M98925_VBATOVFL_EN_WIDTH 1
+#define M98925_IMONOVFL_EN_MASK (1<<1)
+#define M98925_IMONOVFL_EN_SHIFT 1
+#define M98925_IMONOVFL_EN_WIDTH 1
+#define M98925_VMONOVFL_EN_MASK (1<<0)
+#define M98925_VMONOVFL_EN_SHIFT 0
+#define M98925_VMONOVFL_EN_WIDTH 1
+
+/* MAX98925_R00E_IRQ_CLEAR0 */
+#define M98925_THERMWARN_END_CLR_MASK (1<<3)
+#define M98925_THERMWARN_END_CLR_SHIFT 3
+#define M98925_THERMWARN_END_CLR_WIDTH 1
+#define M98925_THERMWARN_BGN_CLR_MASK (1<<2)
+#define M98925_THERMWARN_BGN_CLR_SHIFT 2
+#define M98925_THERMWARN_BGN_CLR_WIDTH 1
+#define M98925_THERMSHDN_END_CLR_MASK (1<<1)
+#define M98925_THERMSHDN_END_CLR_SHIFT 1
+#define M98925_THERMSHDN_END_CLR_WIDTH 1
+#define M98925_THERMSHDN_BGN_CLR_MASK (1<<0)
+#define M98925_THERMSHDN_BGN_CLR_SHIFT 0
+#define M98925_THERMSHDN_BGN_CLR_WIDTH 1
+
+/* MAX98925_R00F_IRQ_CLEAR1 */
+#define M98925_SPKCURNT_CLR_MASK (1<<5)
+#define M98925_SPKCURNT_CLR_SHIFT 5
+#define M98925_SPKCURNT_CLR_WIDTH 1
+#define M98925_WATCHFAIL_CLR_MASK (1<<4)
+#define M98925_WATCHFAIL_CLR_SHIFT 4
+#define M98925_WATCHFAIL_CLR_WIDTH 1
+#define M98925_ALCINFH_CLR_MASK (1<<3)
+#define M98925_ALCINFH_CLR_SHIFT 3
+#define M98925_ALCINFH_CLR_WIDTH 1
+#define M98925_ALCACT_CLR_MASK (1<<2)
+#define M98925_ALCACT_CLR_SHIFT 2
+#define M98925_ALCACT_CLR_WIDTH 1
+#define M98925_ALCMUT_CLR_MASK (1<<1)
+#define M98925_ALCMUT_CLR_SHIFT 1
+#define M98925_ALCMUT_CLR_WIDTH 1
+#define M98925_ALCP_CLR_MASK (1<<0)
+#define M98925_ALCP_CLR_SHIFT 0
+#define M98925_ALCP_CLR_WIDTH 1
+
+/* MAX98925_R010_IRQ_CLEAR2 */
+#define M98925_SLOTOVRN_CLR_MASK (1<<6)
+#define M98925_SLOTOVRN_CLR_SHIFT 6
+#define M98925_SLOTOVRN_CLR_WIDTH 1
+#define M98925_INVALSLOT_CLR_MASK (1<<5)
+#define M98925_INVALSLOT_CLR_SHIFT 5
+#define M98925_INVALSLOT_CLR_WIDTH 1
+#define M98925_SLOTCNFLT_CLR_MASK (1<<4)
+#define M98925_SLOTCNFLT_CLR_SHIFT 4
+#define M98925_SLOTCNFLT_CLR_WIDTH 1
+#define M98925_VBSTOVFL_CLR_MASK (1<<3)
+#define M98925_VBSTOVFL_CLR_SHIFT 3
+#define M98925_VBSTOVFL_CLR_WIDTH 1
+#define M98925_VBATOVFL_CLR_MASK (1<<2)
+#define M98925_VBATOVFL_CLR_SHIFT 2
+#define M98925_VBATOVFL_CLR_WIDTH 1
+#define M98925_IMONOVFL_CLR_MASK (1<<1)
+#define M98925_IMONOVFL_CLR_SHIFT 1
+#define M98925_IMONOVFL_CLR_WIDTH 1
+#define M98925_VMONOVFL_CLR_MASK (1<<0)
+#define M98925_VMONOVFL_CLR_SHIFT 0
+#define M98925_VMONOVFL_CLR_WIDTH 1
+
+/* MAX98925_R011_MAP0 */
+#define M98925_ER_THERMWARN_EN_MASK (1<<7)
+#define M98925_ER_THERMWARN_EN_SHIFT 7
+#define M98925_ER_THERMWARN_EN_WIDTH 1
+#define M98925_ER_THERMWARN_MAP_MASK (0x07<<4)
+#define M98925_ER_THERMWARN_MAP_SHIFT 4
+#define M98925_ER_THERMWARN_MAP_WIDTH 3
+
+/* MAX98925_R012_MAP1 */
+#define M98925_ER_ALCMUT_EN_MASK (1<<7)
+#define M98925_ER_ALCMUT_EN_SHIFT 7
+#define M98925_ER_ALCMUT_EN_WIDTH 1
+#define M98925_ER_ALCMUT_MAP_MASK (0x07<<4)
+#define M98925_ER_ALCMUT_MAP_SHIFT 4
+#define M98925_ER_ALCMUT_MAP_WIDTH 3
+#define M98925_ER_ALCP_EN_MASK (1<<3)
+#define M98925_ER_ALCP_EN_SHIFT 3
+#define M98925_ER_ALCP_EN_WIDTH 1
+#define M98925_ER_ALCP_MAP_MASK (0x07<<0)
+#define M98925_ER_ALCP_MAP_SHIFT 0
+#define M98925_ER_ALCP_MAP_WIDTH 3
+
+/* MAX98925_R013_MAP2 */
+#define M98925_ER_ALCINFH_EN_MASK (1<<7)
+#define M98925_ER_ALCINFH_EN_SHIFT 7
+#define M98925_ER_ALCINFH_EN_WIDTH 1
+#define M98925_ER_ALCINFH_MAP_MASK (0x07<<4)
+#define M98925_ER_ALCINFH_MAP_SHIFT 4
+#define M98925_ER_ALCINFH_MAP_WIDTH 3
+#define M98925_ER_ALCACT_EN_MASK (1<<3)
+#define M98925_ER_ALCACT_EN_SHIFT 3
+#define M98925_ER_ALCACT_EN_WIDTH 1
+#define M98925_ER_ALCACT_MAP_MASK (0x07<<0)
+#define M98925_ER_ALCACT_MAP_SHIFT 0
+#define M98925_ER_ALCACT_MAP_WIDTH 3
+
+/* MAX98925_R014_MAP3 */
+#define M98925_ER_SPKCURNT_EN_MASK (1<<7)
+#define M98925_ER_SPKCURNT_EN_SHIFT 7
+#define M98925_ER_SPKCURNT_EN_WIDTH 1
+#define M98925_ER_SPKCURNT_MAP_MASK (0x07<<4)
+#define M98925_ER_SPKCURNT_MAP_SHIFT 4
+#define M98925_ER_SPKCURNT_MAP_WIDTH 3
+
+/* MAX98925_R015_MAP4 */
+/* RESERVED */
+
+/* MAX98925_R016_MAP5 */
+#define M98925_ER_IMONOVFL_EN_MASK (1<<7)
+#define M98925_ER_IMONOVFL_EN_SHIFT 7
+#define M98925_ER_IMONOVFL_EN_WIDTH 1
+#define M98925_ER_IMONOVFL_MAP_MASK (0x07<<4)
+#define M98925_ER_IMONOVFL_MAP_SHIFT 4
+#define M98925_ER_IMONOVFL_MAP_WIDTH 3
+#define M98925_ER_VMONOVFL_EN_MASK (1<<3)
+#define M98925_ER_VMONOVFL_EN_SHIFT 3
+#define M98925_ER_VMONOVFL_EN_WIDTH 1
+#define M98925_ER_VMONOVFL_MAP_MASK (0x07<<0)
+#define M98925_ER_VMONOVFL_MAP_SHIFT 0
+#define M98925_ER_VMONOVFL_MAP_WIDTH 3
+
+/* MAX98925_R017_MAP6 */
+#define M98925_ER_VBSTOVFL_EN_MASK (1<<7)
+#define M98925_ER_VBSTOVFL_EN_SHIFT 7
+#define M98925_ER_VBSTOVFL_EN_WIDTH 1
+#define M98925_ER_VBSTOVFL_MAP_MASK (0x07<<4)
+#define M98925_ER_VBSTOVFL_MAP_SHIFT 4
+#define M98925_ER_VBSTOVFL_MAP_WIDTH 3
+#define M98925_ER_VBATOVFL_EN_MASK (1<<3)
+#define M98925_ER_VBATOVFL_EN_SHIFT 3
+#define M98925_ER_VBATOVFL_EN_WIDTH 1
+#define M98925_ER_VBATOVFL_MAP_MASK (0x07<<0)
+#define M98925_ER_VBATOVFL_MAP_SHIFT 0
+#define M98925_ER_VBATOVFL_MAP_WIDTH 3
+
+/* MAX98925_R018_MAP7 */
+#define M98925_ER_INVALSLOT_EN_MASK (1<<7)
+#define M98925_ER_INVALSLOT_EN_SHIFT 7
+#define M98925_ER_INVALSLOT_EN_WIDTH 1
+#define M98925_ER_INVALSLOT_MAP_MASK (0x07<<4)
+#define M98925_ER_INVALSLOT_MAP_SHIFT 4
+#define M98925_ER_INVALSLOT_MAP_WIDTH 3
+#define M98925_ER_SLOTCNFLT_EN_MASK (1<<3)
+#define M98925_ER_SLOTCNFLT_EN_SHIFT 3
+#define M98925_ER_SLOTCNFLT_EN_WIDTH 1
+#define M98925_ER_SLOTCNFLT_MAP_MASK (0x07<<0)
+#define M98925_ER_SLOTCNFLT_MAP_SHIFT 0
+#define M98925_ER_SLOTCNFLT_MAP_WIDTH 3
+
+/* MAX98925_R019_MAP8 */
+#define M98925_ER_SLOTOVRN_EN_MASK (1<<3)
+#define M98925_ER_SLOTOVRN_EN_SHIFT 3
+#define M98925_ER_SLOTOVRN_EN_WIDTH 1
+#define M98925_ER_SLOTOVRN_MAP_MASK (0x07<<0)
+#define M98925_ER_SLOTOVRN_MAP_SHIFT 0
+#define M98925_ER_SLOTOVRN_MAP_WIDTH 3
+
+/* MAX98925_R01A_DAI_CLK_MODE1 */
+#define M98925_DAI_CLK_SOURCE_MASK (1<<6)
+#define M98925_DAI_CLK_SOURCE_SHIFT 6
+#define M98925_DAI_CLK_SOURCE_WIDTH 1
+#define M98925_MDLL_MULT_MASK (0x0F<<0)
+#define M98925_MDLL_MULT_SHIFT 0
+#define M98925_MDLL_MULT_WIDTH 4
+
+#define M98925_MDLL_MULT_MCLKx8 6
+#define M98925_MDLL_MULT_MCLKx16 8
+
+/* MAX98925_R01B_DAI_CLK_MODE2 */
+#define M98925_DAI_SR_MASK (0x0F<<4)
+#define M98925_DAI_SR_SHIFT 4
+#define M98925_DAI_SR_WIDTH 4
+#define M98925_DAI_MAS_MASK (1<<3)
+#define M98925_DAI_MAS_SHIFT 3
+#define M98925_DAI_MAS_WIDTH 1
+#define M98925_DAI_BSEL_MASK (0x07<<0)
+#define M98925_DAI_BSEL_SHIFT 0
+#define M98925_DAI_BSEL_WIDTH 3
+
+#define M98925_DAI_BSEL_32 (0 << M98925_DAI_BSEL_SHIFT)
+#define M98925_DAI_BSEL_48 (1 << M98925_DAI_BSEL_SHIFT)
+#define M98925_DAI_BSEL_64 (2 << M98925_DAI_BSEL_SHIFT)
+#define M98925_DAI_BSEL_256 (6 << M98925_DAI_BSEL_SHIFT)
+
+/* MAX98925_R01C_DAI_CLK_DIV_M_MSBS */
+#define M98925_DAI_M_MSBS_MASK (0xFF<<0)
+#define M98925_DAI_M_MSBS_SHIFT 0
+#define M98925_DAI_M_MSBS_WIDTH 8
+
+/* MAX98925_R01D_DAI_CLK_DIV_M_LSBS */
+#define M98925_DAI_M_LSBS_MASK (0xFF<<0)
+#define M98925_DAI_M_LSBS_SHIFT 0
+#define M98925_DAI_M_LSBS_WIDTH 8
+
+/* MAX98925_R01E_DAI_CLK_DIV_N_MSBS */
+#define M98925_DAI_N_MSBS_MASK (0x7F<<0)
+#define M98925_DAI_N_MSBS_SHIFT 0
+#define M98925_DAI_N_MSBS_WIDTH 7
+
+/* MAX98925_R01F_DAI_CLK_DIV_N_LSBS */
+#define M98925_DAI_N_LSBS_MASK (0xFF<<0)
+#define M98925_DAI_N_LSBS_SHIFT 0
+#define M98925_DAI_N_LSBS_WIDTH 8
+
+/* MAX98925_R020_FORMAT */
+#define M98925_DAI_CHANSZ_MASK (0x03<<6)
+#define M98925_DAI_CHANSZ_SHIFT 6
+#define M98925_DAI_CHANSZ_WIDTH 2
+#define M98925_DAI_EXTBCLK_HIZ_MASK (1<<4)
+#define M98925_DAI_EXTBCLK_HIZ_SHIFT 4
+#define M98925_DAI_EXTBCLK_HIZ_WIDTH 1
+#define M98925_DAI_WCI_MASK (1<<3)
+#define M98925_DAI_WCI_SHIFT 3
+#define M98925_DAI_WCI_WIDTH 1
+#define M98925_DAI_BCI_MASK (1<<2)
+#define M98925_DAI_BCI_SHIFT 2
+#define M98925_DAI_BCI_WIDTH 1
+#define M98925_DAI_DLY_MASK (1<<1)
+#define M98925_DAI_DLY_SHIFT 1
+#define M98925_DAI_DLY_WIDTH 1
+#define M98925_DAI_TDM_MASK (1<<0)
+#define M98925_DAI_TDM_SHIFT 0
+#define M98925_DAI_TDM_WIDTH 1
+
+#define M98925_DAI_CHANSZ_16 (1 << M98925_DAI_CHANSZ_SHIFT)
+#define M98925_DAI_CHANSZ_24 (2 << M98925_DAI_CHANSZ_SHIFT)
+#define M98925_DAI_CHANSZ_32 (3 << M98925_DAI_CHANSZ_SHIFT)
+
+/* MAX98925_R021_TDM_SLOT_SELECT */
+#define M98925_DAI_DO_EN_MASK (1<<7)
+#define M98925_DAI_DO_EN_SHIFT 7
+#define M98925_DAI_DO_EN_WIDTH 1
+#define M98925_DAI_DIN_EN_MASK (1<<6)
+#define M98925_DAI_DIN_EN_SHIFT 6
+#define M98925_DAI_DIN_EN_WIDTH 1
+#define M98925_DAI_INR_SOURCE_MASK (0x07<<3)
+#define M98925_DAI_INR_SOURCE_SHIFT 3
+#define M98925_DAI_INR_SOURCE_WIDTH 3
+#define M98925_DAI_INL_SOURCE_MASK (0x07<<0)
+#define M98925_DAI_INL_SOURCE_SHIFT 0
+#define M98925_DAI_INL_SOURCE_WIDTH 3
+
+/* MAX98925_R022_DOUT_CFG_VMON */
+#define M98925_DAI_VMON_EN_MASK (1<<5)
+#define M98925_DAI_VMON_EN_SHIFT 5
+#define M98925_DAI_VMON_EN_WIDTH 1
+#define M98925_DAI_VMON_SLOT_MASK (0x1F<<0)
+#define M98925_DAI_VMON_SLOT_SHIFT 0
+#define M98925_DAI_VMON_SLOT_WIDTH 5
+
+#define M98925_DAI_VMON_SLOT_00_01 (0 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_01_02 (1 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_02_03 (2 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_03_04 (3 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_04_05 (4 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_05_06 (5 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_06_07 (6 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_07_08 (7 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_08_09 (8 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_09_0A (9 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_0A_0B (10 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_0B_0C (11 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_0C_0D (12 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_0D_0E (13 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_0E_0F (14 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_0F_10 (15 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_10_11 (16 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_11_12 (17 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_12_13 (18 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_13_14 (19 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_14_15 (20 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_15_16 (21 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_16_17 (22 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_17_18 (23 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_18_19 (24 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_19_1A (25 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_1A_1B (26 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_1B_1C (27 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_1C_1D (28 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_1D_1E (29 << M98925_DAI_VMON_SLOT_SHIFT)
+#define M98925_DAI_VMON_SLOT_1E_1F (30 << M98925_DAI_VMON_SLOT_SHIFT)
+
+/* MAX98925_R023_DOUT_CFG_IMON */
+#define M98925_DAI_IMON_EN_MASK (1<<5)
+#define M98925_DAI_IMON_EN_SHIFT 5
+#define M98925_DAI_IMON_EN_WIDTH 1
+#define M98925_DAI_IMON_SLOT_MASK (0x1F<<0)
+#define M98925_DAI_IMON_SLOT_SHIFT 0
+#define M98925_DAI_IMON_SLOT_WIDTH 5
+
+#define M98925_DAI_IMON_SLOT_00_01 (0 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_01_02 (1 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_02_03 (2 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_03_04 (3 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_04_05 (4 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_05_06 (5 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_06_07 (6 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_07_08 (7 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_08_09 (8 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_09_0A (9 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_0A_0B (10 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_0B_0C (11 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_0C_0D (12 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_0D_0E (13 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_0E_0F (14 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_0F_10 (15 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_10_11 (16 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_11_12 (17 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_12_13 (18 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_13_14 (19 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_14_15 (20 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_15_16 (21 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_16_17 (22 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_17_18 (23 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_18_19 (24 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_19_1A (25 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_1A_1B (26 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_1B_1C (27 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_1C_1D (28 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_1D_1E (29 << M98925_DAI_IMON_SLOT_SHIFT)
+#define M98925_DAI_IMON_SLOT_1E_1F (30 << M98925_DAI_IMON_SLOT_SHIFT)
+
+/* MAX98925_R024_DOUT_CFG_VBAT */
+#define M98925_DAI_VBAT_EN_MASK (1<<5)
+#define M98925_DAI_VBAT_EN_SHIFT 5
+#define M98925_DAI_VBAT_EN_WIDTH 1
+#define M98925_DAI_VBAT_SLOT_MASK (0x1F<<0)
+#define M98925_DAI_VBAT_SLOT_SHIFT 0
+#define M98925_DAI_VBAT_SLOT_WIDTH 5
+
+/* MAX98925_R025_DOUT_CFG_VBST */
+#define M98925_DAI_VBST_EN_MASK (1<<5)
+#define M98925_DAI_VBST_EN_SHIFT 5
+#define M98925_DAI_VBST_EN_WIDTH 1
+#define M98925_DAI_VBST_SLOT_MASK (0x1F<<0)
+#define M98925_DAI_VBST_SLOT_SHIFT 0
+#define M98925_DAI_VBST_SLOT_WIDTH 5
+
+/* MAX98925_R026_DOUT_CFG_FLAG */
+#define M98925_DAI_FLAG_EN_MASK (1<<5)
+#define M98925_DAI_FLAG_EN_SHIFT 5
+#define M98925_DAI_FLAG_EN_WIDTH 1
+#define M98925_DAI_FLAG_SLOT_MASK (0x1F<<0)
+#define M98925_DAI_FLAG_SLOT_SHIFT 0
+#define M98925_DAI_FLAG_SLOT_WIDTH 5
+
+/* MAX98925_R027_DOUT_HIZ_CFG1 */
+#define M98925_DAI_SLOT_HIZ_CFG1_MASK (0xFF<<0)
+#define M98925_DAI_SLOT_HIZ_CFG1_SHIFT 0
+#define M98925_DAI_SLOT_HIZ_CFG1_WIDTH 8
+
+/* MAX98925_R028_DOUT_HIZ_CFG2 */
+#define M98925_DAI_SLOT_HIZ_CFG2_MASK (0xFF<<0)
+#define M98925_DAI_SLOT_HIZ_CFG2_SHIFT 0
+#define M98925_DAI_SLOT_HIZ_CFG2_WIDTH 8
+
+/* MAX98925_R029_DOUT_HIZ_CFG3 */
+#define M98925_DAI_SLOT_HIZ_CFG3_MASK (0xFF<<0)
+#define M98925_DAI_SLOT_HIZ_CFG3_SHIFT 0
+#define M98925_DAI_SLOT_HIZ_CFG3_WIDTH 8
+
+/* MAX98925_R02A_DOUT_HIZ_CFG4 */
+#define M98925_DAI_SLOT_HIZ_CFG4_MASK (0xFF<<0)
+#define M98925_DAI_SLOT_HIZ_CFG4_SHIFT 0
+#define M98925_DAI_SLOT_HIZ_CFG4_WIDTH 8
+
+/* MAX98925_R02B_DOUT_DRV_STRENGTH */
+#define M98925_DAI_OUT_DRIVE_MASK (0x03<<0)
+#define M98925_DAI_OUT_DRIVE_SHIFT 0
+#define M98925_DAI_OUT_DRIVE_WIDTH 2
+
+/* MAX98925_R02C_FILTERS */
+#define M98925_ADC_DITHER_EN_MASK (1<<7)
+#define M98925_ADC_DITHER_EN_SHIFT 7
+#define M98925_ADC_DITHER_EN_WIDTH 1
+#define M98925_IV_DCB_EN_MASK (1<<6)
+#define M98925_IV_DCB_EN_SHIFT 6
+#define M98925_IV_DCB_EN_WIDTH 1
+#define M98925_DAC_DITHER_EN_MASK (1<<4)
+#define M98925_DAC_DITHER_EN_SHIFT 4
+#define M98925_DAC_DITHER_EN_WIDTH 1
+#define M98925_DAC_FILTER_MODE_MASK (1<<3)
+#define M98925_DAC_FILTER_MODE_SHIFT 3
+#define M98925_DAC_FILTER_MODE_WIDTH 1
+#define M98925_DAC_HPF_MASK (0x07<<0)
+#define M98925_DAC_HPF_SHIFT 0
+#define M98925_DAC_HPF_WIDTH 3
+#define M98925_DAC_HPF_DISABLE (0 << M98925_DAC_HPF_SHIFT)
+#define M98925_DAC_HPF_DC_BLOCK (1 << M98925_DAC_HPF_SHIFT)
+#define M98925_DAC_HPF_EN_100 (2 << M98925_DAC_HPF_SHIFT)
+#define M98925_DAC_HPF_EN_200 (3 << M98925_DAC_HPF_SHIFT)
+#define M98925_DAC_HPF_EN_400 (4 << M98925_DAC_HPF_SHIFT)
+#define M98925_DAC_HPF_EN_800 (5 << M98925_DAC_HPF_SHIFT)
+
+/* MAX98925_R02D_GAIN */
+#define M98925_DAC_IN_SEL_MASK (0x03<<5)
+#define M98925_DAC_IN_SEL_SHIFT 5
+#define M98925_DAC_IN_SEL_WIDTH 2
+#define M98925_SPK_GAIN_MASK (0x1F<<0)
+#define M98925_SPK_GAIN_SHIFT 0
+#define M98925_SPK_GAIN_WIDTH 5
+
+#define M98925_DAC_IN_SEL_LEFT_DAI (0 << M98925_DAC_IN_SEL_SHIFT)
+#define M98925_DAC_IN_SEL_RIGHT_DAI (1 << M98925_DAC_IN_SEL_SHIFT)
+#define M98925_DAC_IN_SEL_SUMMED_DAI (2 << M98925_DAC_IN_SEL_SHIFT)
+#define M98925_DAC_IN_SEL_DIV2_SUMMED_DAI (3 << M98925_DAC_IN_SEL_SHIFT)
+
+/* MAX98925_R02E_GAIN_RAMPING */
+#define M98925_SPK_RMP_EN_MASK (1<<1)
+#define M98925_SPK_RMP_EN_SHIFT 1
+#define M98925_SPK_RMP_EN_WIDTH 1
+#define M98925_SPK_ZCD_EN_MASK (1<<0)
+#define M98925_SPK_ZCD_EN_SHIFT 0
+#define M98925_SPK_ZCD_EN_WIDTH 1
+
+/* MAX98925_R02F_SPK_AMP */
+#define M98925_SPK_MODE_MASK (1<<0)
+#define M98925_SPK_MODE_SHIFT 0
+#define M98925_SPK_MODE_WIDTH 1
+
+/* MAX98925_R030_THRESHOLD */
+#define M98925_ALC_EN_MASK (1<<5)
+#define M98925_ALC_EN_SHIFT 5
+#define M98925_ALC_EN_WIDTH 1
+#define M98925_ALC_TH_MASK (0x1F<<0)
+#define M98925_ALC_TH_SHIFT 0
+#define M98925_ALC_TH_WIDTH 5
+
+/* MAX98925_R031_ALC_ATTACK */
+#define M98925_ALC_ATK_STEP_MASK (0x0F<<4)
+#define M98925_ALC_ATK_STEP_SHIFT 4
+#define M98925_ALC_ATK_STEP_WIDTH 4
+#define M98925_ALC_ATK_RATE_MASK (0x7<<0)
+#define M98925_ALC_ATK_RATE_SHIFT 0
+#define M98925_ALC_ATK_RATE_WIDTH 3
+
+/* MAX98925_R032_ALC_ATTEN_RLS */
+#define M98925_ALC_MAX_ATTEN_MASK (0x0F<<4)
+#define M98925_ALC_MAX_ATTEN_SHIFT 4
+#define M98925_ALC_MAX_ATTEN_WIDTH 4
+#define M98925_ALC_RLS_RATE_MASK (0x7<<0)
+#define M98925_ALC_RLS_RATE_SHIFT 0
+#define M98925_ALC_RLS_RATE_WIDTH 3
+
+/* MAX98925_R033_ALC_HOLD_RLS */
+#define M98925_ALC_RLS_TGR_MASK (1<<0)
+#define M98925_ALC_RLS_TGR_SHIFT 0
+#define M98925_ALC_RLS_TGR_WIDTH 1
+
+/* MAX98925_R034_ALC_CONFIGURATION */
+#define M98925_ALC_MUTE_EN_MASK (1<<7)
+#define M98925_ALC_MUTE_EN_SHIFT 7
+#define M98925_ALC_MUTE_EN_WIDTH 1
+#define M98925_ALC_MUTE_DLY_MASK (0x07<<4)
+#define M98925_ALC_MUTE_DLY_SHIFT 4
+#define M98925_ALC_MUTE_DLY_WIDTH 3
+#define M98925_ALC_RLS_DBT_MASK (0x07<<0)
+#define M98925_ALC_RLS_DBT_SHIFT 0
+#define M98925_ALC_RLS_DBT_WIDTH 3
+
+/* MAX98925_R035_BOOST_CONVERTER */
+#define M98925_BST_SYNC_MASK (1<<7)
+#define M98925_BST_SYNC_SHIFT 7
+#define M98925_BST_SYNC_WIDTH 1
+#define M98925_BST_PHASE_MASK (0x03<<4)
+#define M98925_BST_PHASE_SHIFT 4
+#define M98925_BST_PHASE_WIDTH 2
+#define M98925_BST_SKIP_MODE_MASK (0x03<<0)
+#define M98925_BST_SKIP_MODE_SHIFT 0
+#define M98925_BST_SKIP_MODE_WIDTH 2
+
+/* MAX98925_R036_BLOCK_ENABLE */
+#define M98925_BST_EN_MASK (1<<7)
+#define M98925_BST_EN_SHIFT 7
+#define M98925_BST_EN_WIDTH 1
+#define M98925_WATCH_EN_MASK (1<<6)
+#define M98925_WATCH_EN_SHIFT 6
+#define M98925_WATCH_EN_WIDTH 1
+#define M98925_CLKMON_EN_MASK (1<<5)
+#define M98925_CLKMON_EN_SHIFT 5
+#define M98925_CLKMON_EN_WIDTH 1
+#define M98925_SPK_EN_MASK (1<<4)
+#define M98925_SPK_EN_SHIFT 4
+#define M98925_SPK_EN_WIDTH 1
+#define M98925_ADC_VBST_EN_MASK (1<<3)
+#define M98925_ADC_VBST_EN_SHIFT 3
+#define M98925_ADC_VBST_EN_WIDTH 1
+#define M98925_ADC_VBAT_EN_MASK (1<<2)
+#define M98925_ADC_VBAT_EN_SHIFT 2
+#define M98925_ADC_VBAT_EN_WIDTH 1
+#define M98925_ADC_IMON_EN_MASK (1<<1)
+#define M98925_ADC_IMON_EN_SHIFT 1
+#define M98925_ADC_IMON_EN_WIDTH 1
+#define M98925_ADC_VMON_EN_MASK (1<<0)
+#define M98925_ADC_VMON_EN_SHIFT 0
+#define M98925_ADC_VMON_EN_WIDTH 1
+
+/* MAX98925_R037_CONFIGURATION */
+#define M98925_BST_VOUT_MASK (0x0F<<4)
+#define M98925_BST_VOUT_SHIFT 4
+#define M98925_BST_VOUT_WIDTH 4
+#define M98925_THERMWARN_LEVEL_MASK (0x03<<2)
+#define M98925_THERMWARN_LEVEL_SHIFT 2
+#define M98925_THERMWARN_LEVEL_WIDTH 2
+#define M98925_WATCH_TIME_MASK (0x03<<0)
+#define M98925_WATCH_TIME_SHIFT 0
+#define M98925_WATCH_TIME_WIDTH 2
+
+/* MAX98925_R038_GLOBAL_ENABLE */
+#define M98925_EN_MASK (1<<7)
+#define M98925_EN_SHIFT 7
+#define M98925_EN_WIDTH 1
+
+/* MAX98925_R03A_BOOST_LIMITER */
+#define M98925_BST_ILIM_MASK (0x1F<<3)
+#define M98925_BST_ILIM_SHIFT 3
+#define M98925_BST_ILIM_WIDTH 5
+
+/* MAX98925_R0FF_VERSION */
+#define M98925_REV_ID_MASK (0xFF<<0)
+#define M98925_REV_ID_SHIFT 0
+#define M98925_REV_ID_WIDTH 8
+
+struct max98925_priv {
+ struct regmap *regmap;
+ struct snd_soc_codec *codec;
+ struct max98925_pdata *pdata;
+ unsigned int sysclk;
+ unsigned int v_slot;
+ unsigned int i_slot;
+ unsigned int spk_gain;
+ unsigned int ch_size;
+};
+#endif
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 474cae82a874..e12764d15431 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/clk.h>
+#include <linux/kernel.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -31,8 +32,6 @@
#define DIV_ROUND_DOWN_ULL(ll, d) \
({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
-#define DIV_ROUND_CLOSEST_ULL(ll, d) \
- ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
#define PCM512x_NUM_SUPPLIES 3
static const char * const pcm512x_supply_names[PCM512x_NUM_SUPPLIES] = {
@@ -54,6 +53,9 @@ struct pcm512x_priv {
int pll_d;
int pll_p;
unsigned long real_pll;
+ unsigned long overclock_pll;
+ unsigned long overclock_dac;
+ unsigned long overclock_dsp;
};
/*
@@ -224,6 +226,90 @@ static bool pcm512x_volatile(struct device *dev, unsigned int reg)
}
}
+static int pcm512x_overclock_pll_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = pcm512x->overclock_pll;
+ return 0;
+}
+
+static int pcm512x_overclock_pll_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+
+ switch (codec->dapm.bias_level) {
+ case SND_SOC_BIAS_OFF:
+ case SND_SOC_BIAS_STANDBY:
+ break;
+ default:
+ return -EBUSY;
+ }
+
+ pcm512x->overclock_pll = ucontrol->value.integer.value[0];
+ return 0;
+}
+
+static int pcm512x_overclock_dsp_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = pcm512x->overclock_dsp;
+ return 0;
+}
+
+static int pcm512x_overclock_dsp_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+
+ switch (codec->dapm.bias_level) {
+ case SND_SOC_BIAS_OFF:
+ case SND_SOC_BIAS_STANDBY:
+ break;
+ default:
+ return -EBUSY;
+ }
+
+ pcm512x->overclock_dsp = ucontrol->value.integer.value[0];
+ return 0;
+}
+
+static int pcm512x_overclock_dac_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = pcm512x->overclock_dac;
+ return 0;
+}
+
+static int pcm512x_overclock_dac_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
+
+ switch (codec->dapm.bias_level) {
+ case SND_SOC_BIAS_OFF:
+ case SND_SOC_BIAS_STANDBY:
+ break;
+ default:
+ return -EBUSY;
+ }
+
+ pcm512x->overclock_dac = ucontrol->value.integer.value[0];
+ return 0;
+}
+
static const DECLARE_TLV_DB_SCALE(digital_tlv, -10350, 50, 1);
static const DECLARE_TLV_DB_SCALE(analog_tlv, -600, 600, 0);
static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
@@ -304,9 +390,9 @@ static const struct soc_enum pcm512x_veds =
static const struct snd_kcontrol_new pcm512x_controls[] = {
SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
-SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
+SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
-SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
+SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
PCM512x_RQMR_SHIFT, 1, 1),
@@ -328,6 +414,13 @@ SOC_ENUM("Volume Ramp Up Rate", pcm512x_vnuf),
SOC_ENUM("Volume Ramp Up Step", pcm512x_vnus),
SOC_ENUM("Volume Ramp Down Emergency Rate", pcm512x_vedf),
SOC_ENUM("Volume Ramp Down Emergency Step", pcm512x_veds),
+
+SOC_SINGLE_EXT("Max Overclock PLL", SND_SOC_NOPM, 0, 20, 0,
+ pcm512x_overclock_pll_get, pcm512x_overclock_pll_put),
+SOC_SINGLE_EXT("Max Overclock DSP", SND_SOC_NOPM, 0, 40, 0,
+ pcm512x_overclock_dsp_get, pcm512x_overclock_dsp_put),
+SOC_SINGLE_EXT("Max Overclock DAC", SND_SOC_NOPM, 0, 40, 0,
+ pcm512x_overclock_dac_get, pcm512x_overclock_dac_put),
};
static const struct snd_soc_dapm_widget pcm512x_dapm_widgets[] = {
@@ -346,6 +439,45 @@ static const struct snd_soc_dapm_route pcm512x_dapm_routes[] = {
{ "OUTR", NULL, "DACR" },
};
+static unsigned long pcm512x_pll_max(struct pcm512x_priv *pcm512x)
+{
+ return 25000000 + 25000000 * pcm512x->overclock_pll / 100;
+}
+
+static unsigned long pcm512x_dsp_max(struct pcm512x_priv *pcm512x)
+{
+ return 50000000 + 50000000 * pcm512x->overclock_dsp / 100;
+}
+
+static unsigned long pcm512x_dac_max(struct pcm512x_priv *pcm512x,
+ unsigned long rate)
+{
+ return rate + rate * pcm512x->overclock_dac / 100;
+}
+
+static unsigned long pcm512x_sck_max(struct pcm512x_priv *pcm512x)
+{
+ if (!pcm512x->pll_out)
+ return 25000000;
+ return pcm512x_pll_max(pcm512x);
+}
+
+static unsigned long pcm512x_ncp_target(struct pcm512x_priv *pcm512x,
+ unsigned long dac_rate)
+{
+ /*
+ * If the DAC is not actually overclocked, use the good old
+ * NCP target rate...
+ */
+ if (dac_rate <= 6144000)
+ return 1536000;
+ /*
+ * ...but if the DAC is in fact overclocked, bump the NCP target
+ * rate to get the recommended dividers even when overclocking.
+ */
+ return pcm512x_dac_max(pcm512x, 1536000);
+}
+
static const u32 pcm512x_dai_rates[] = {
8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
88200, 96000, 176400, 192000, 384000,
@@ -359,6 +491,7 @@ static const struct snd_pcm_hw_constraint_list constraints_slave = {
static int pcm512x_hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
+ struct pcm512x_priv *pcm512x = rule->private;
struct snd_interval ranges[2];
int frame_size;
@@ -377,7 +510,7 @@ static int pcm512x_hw_rule_rate(struct snd_pcm_hw_params *params,
*/
memset(ranges, 0, sizeof(ranges));
ranges[0].min = 8000;
- ranges[0].max = 25000000 / frame_size / 2;
+ ranges[0].max = pcm512x_sck_max(pcm512x) / frame_size / 2;
ranges[1].min = DIV_ROUND_UP(16000000, frame_size);
ranges[1].max = 384000;
break;
@@ -408,7 +541,7 @@ static int pcm512x_dai_startup_master(struct snd_pcm_substream *substream,
return snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
pcm512x_hw_rule_rate,
- NULL,
+ pcm512x,
SNDRV_PCM_HW_PARAM_FRAME_BITS,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
@@ -517,6 +650,8 @@ static unsigned long pcm512x_find_sck(struct snd_soc_dai *dai,
unsigned long bclk_rate)
{
struct device *dev = dai->dev;
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm512x_priv *pcm512x = snd_soc_codec_get_drvdata(codec);
unsigned long sck_rate;
int pow2;
@@ -527,9 +662,10 @@ static unsigned long pcm512x_find_sck(struct snd_soc_dai *dai,
* as many factors of 2 as possible, as that makes it easier
* to find a fast DAC rate
*/
- pow2 = 1 << fls((25000000 - 16000000) / bclk_rate);
+ pow2 = 1 << fls((pcm512x_pll_max(pcm512x) - 16000000) / bclk_rate);
for (; pow2; pow2 >>= 1) {
- sck_rate = rounddown(25000000, bclk_rate * pow2);
+ sck_rate = rounddown(pcm512x_pll_max(pcm512x),
+ bclk_rate * pow2);
if (sck_rate >= 16000000)
break;
}
@@ -576,8 +712,8 @@ static int pcm512x_find_pll_coeff(struct snd_soc_dai *dai,
/* pllin_rate / P (or here, den) cannot be greater than 20 MHz */
if (pllin_rate / den > 20000000 && num < 8) {
- num *= 20000000 / (pllin_rate / den);
- den *= 20000000 / (pllin_rate / den);
+ num *= DIV_ROUND_UP(pllin_rate / den, 20000000);
+ den *= DIV_ROUND_UP(pllin_rate / den, 20000000);
}
dev_dbg(dev, "num / den = %lu / %lu\n", num, den);
@@ -678,7 +814,7 @@ static unsigned long pcm512x_pllin_dac_rate(struct snd_soc_dai *dai,
return 0; /* futile, quit early */
/* run DAC no faster than 6144000 Hz */
- for (dac_rate = rounddown(6144000, osr_rate);
+ for (dac_rate = rounddown(pcm512x_dac_max(pcm512x, 6144000), osr_rate);
dac_rate;
dac_rate -= osr_rate) {
@@ -805,7 +941,7 @@ static int pcm512x_set_dividers(struct snd_soc_dai *dai,
osr_rate = 16 * sample_rate;
/* run DSP no faster than 50 MHz */
- dsp_div = mck_rate > 50000000 ? 2 : 1;
+ dsp_div = mck_rate > pcm512x_dsp_max(pcm512x) ? 2 : 1;
dac_rate = pcm512x_pllin_dac_rate(dai, osr_rate, pllin_rate);
if (dac_rate) {
@@ -836,7 +972,8 @@ static int pcm512x_set_dividers(struct snd_soc_dai *dai,
dacsrc_rate = pllin_rate;
} else {
/* run DAC no faster than 6144000 Hz */
- unsigned long dac_mul = 6144000 / osr_rate;
+ unsigned long dac_mul = pcm512x_dac_max(pcm512x, 6144000)
+ / osr_rate;
unsigned long sck_mul = sck_rate / osr_rate;
for (; dac_mul; dac_mul--) {
@@ -863,28 +1000,30 @@ static int pcm512x_set_dividers(struct snd_soc_dai *dai,
dacsrc_rate = sck_rate;
}
+ osr_div = DIV_ROUND_CLOSEST(dac_rate, osr_rate);
+ if (osr_div > 128) {
+ dev_err(dev, "Failed to find OSR divider\n");
+ return -EINVAL;
+ }
+
dac_div = DIV_ROUND_CLOSEST(dacsrc_rate, dac_rate);
if (dac_div > 128) {
dev_err(dev, "Failed to find DAC divider\n");
return -EINVAL;
}
+ dac_rate = dacsrc_rate / dac_div;
- ncp_div = DIV_ROUND_CLOSEST(dacsrc_rate / dac_div, 1536000);
- if (ncp_div > 128 || dacsrc_rate / dac_div / ncp_div > 2048000) {
+ ncp_div = DIV_ROUND_CLOSEST(dac_rate,
+ pcm512x_ncp_target(pcm512x, dac_rate));
+ if (ncp_div > 128 || dac_rate / ncp_div > 2048000) {
/* run NCP no faster than 2048000 Hz, but why? */
- ncp_div = DIV_ROUND_UP(dacsrc_rate / dac_div, 2048000);
+ ncp_div = DIV_ROUND_UP(dac_rate, 2048000);
if (ncp_div > 128) {
dev_err(dev, "Failed to find NCP divider\n");
return -EINVAL;
}
}
- osr_div = DIV_ROUND_CLOSEST(dac_rate, osr_rate);
- if (osr_div > 128) {
- dev_err(dev, "Failed to find OSR divider\n");
- return -EINVAL;
- }
-
idac = mck_rate / (dsp_div * sample_rate);
ret = regmap_write(pcm512x->regmap, PCM512x_DSP_CLKDIV, dsp_div - 1);
@@ -937,11 +1076,11 @@ static int pcm512x_set_dividers(struct snd_soc_dai *dai,
return ret;
}
- if (sample_rate <= 48000)
+ if (sample_rate <= pcm512x_dac_max(pcm512x, 48000))
fssp = PCM512x_FSSP_48KHZ;
- else if (sample_rate <= 96000)
+ else if (sample_rate <= pcm512x_dac_max(pcm512x, 96000))
fssp = PCM512x_FSSP_96KHZ;
- else if (sample_rate <= 192000)
+ else if (sample_rate <= pcm512x_dac_max(pcm512x, 192000))
fssp = PCM512x_FSSP_192KHZ;
else
fssp = PCM512x_FSSP_384KHZ;
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 9b541e52da8c..0fcda35a3a93 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -395,9 +395,20 @@ int rt286_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
rt286->jack = jack;
- /* Send an initial empty report */
- snd_soc_jack_report(rt286->jack, 0,
- SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+ if (jack) {
+ /* enable IRQ */
+ if (rt286->jack->status & SND_JACK_HEADPHONE)
+ snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO1");
+ regmap_update_bits(rt286->regmap, RT286_IRQ_CTRL, 0x2, 0x2);
+ /* Send an initial empty report */
+ snd_soc_jack_report(rt286->jack, rt286->jack->status,
+ SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+ } else {
+ /* disable IRQ */
+ regmap_update_bits(rt286->regmap, RT286_IRQ_CTRL, 0x2, 0x0);
+ snd_soc_dapm_disable_pin(&codec->dapm, "LDO1");
+ }
+ snd_soc_dapm_sync(&codec->dapm);
return 0;
}
@@ -1037,7 +1048,6 @@ static int rt286_probe(struct snd_soc_codec *codec)
struct rt286_priv *rt286 = snd_soc_codec_get_drvdata(codec);
rt286->codec = codec;
- codec->dapm.bias_level = SND_SOC_BIAS_OFF;
if (rt286->i2c->irq) {
regmap_update_bits(rt286->regmap,
@@ -1209,7 +1219,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
{
struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct rt286_priv *rt286;
- int i, ret;
+ int i, ret, val;
rt286 = devm_kzalloc(&i2c->dev, sizeof(*rt286),
GFP_KERNEL);
@@ -1224,11 +1234,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
return ret;
}
- regmap_read(rt286->regmap,
- RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &ret);
- if (ret != RT286_VENDOR_ID && ret != RT288_VENDOR_ID) {
+ ret = regmap_read(rt286->regmap,
+ RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "I2C error %d\n", ret);
+ return ret;
+ }
+ if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
dev_err(&i2c->dev,
- "Device with ID register %x is not rt286\n", ret);
+ "Device with ID register %x is not rt286\n", val);
return -ENODEV;
}
@@ -1236,6 +1250,14 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
rt286->i2c = i2c;
i2c_set_clientdata(i2c, rt286);
+ /* restore codec default */
+ for (i = 0; i < INDEX_CACHE_SIZE; i++)
+ regmap_write(rt286->regmap, rt286->index_cache[i].reg,
+ rt286->index_cache[i].def);
+ for (i = 0; i < ARRAY_SIZE(rt286_reg); i++)
+ regmap_write(rt286->regmap, rt286_reg[i].reg,
+ rt286_reg[i].def);
+
if (pdata)
rt286->pdata = *pdata;
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index c61852742ee3..2c10d77727af 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -1675,7 +1675,7 @@ static const struct i2c_device_id rt5631_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, rt5631_i2c_id);
#ifdef CONFIG_OF
-static struct of_device_id rt5631_i2c_dt_ids[] = {
+static const struct of_device_id rt5631_i2c_dt_ids[] = {
{ .compatible = "realtek,rt5631"},
{ .compatible = "realtek,alc5631"},
{ }
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index c9a4c5be083b..be4d741c45ba 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
+#include <linux/acpi.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -1270,6 +1271,8 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on)
snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
RT5645_PWR_HP_L | RT5645_PWR_HP_R |
RT5645_PWR_HA, 0);
+ snd_soc_update_bits(codec, RT5645_DEPOP_M2,
+ RT5645_DEPOP_MASK, 0);
}
}
}
@@ -1538,8 +1541,6 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY_S("adc stereo1 filter", 1, RT5645_PWR_DIG2,
RT5645_PWR_ADC_S1F_BIT, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY_S("adc stereo2 filter", 1, RT5645_PWR_DIG2,
- RT5645_PWR_ADC_S2F_BIT, 0, NULL, 0),
SND_SOC_DAPM_MIXER_E("Sto1 ADC MIXL", SND_SOC_NOPM, 0, 0,
rt5645_sto1_adc_l_mix, ARRAY_SIZE(rt5645_sto1_adc_l_mix),
NULL, 0),
@@ -1729,7 +1730,6 @@ static const struct snd_soc_dapm_widget rt5650_specific_dapm_widgets[] = {
static const struct snd_soc_dapm_route rt5645_dapm_routes[] = {
{ "adc stereo1 filter", NULL, "ADC STO1 ASRC", is_using_asrc },
- { "adc stereo2 filter", NULL, "ADC STO2 ASRC", is_using_asrc },
{ "adc mono left filter", NULL, "ADC MONO L ASRC", is_using_asrc },
{ "adc mono right filter", NULL, "ADC MONO R ASRC", is_using_asrc },
{ "dac mono left filter", NULL, "DAC MONO L ASRC", is_using_asrc },
@@ -2052,7 +2052,7 @@ static int rt5645_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_codec *codec = dai->codec;
struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
- unsigned int val_len = 0, val_clk, mask_clk;
+ unsigned int val_len = 0, val_clk, mask_clk, dl_sft;
int pre_div, bclk_ms, frame_size;
rt5645->lrck[dai->id] = params_rate(params);
@@ -2066,6 +2066,16 @@ static int rt5645_hw_params(struct snd_pcm_substream *substream,
dev_err(codec->dev, "Unsupported frame size: %d\n", frame_size);
return -EINVAL;
}
+
+ switch (rt5645->codec_type) {
+ case CODEC_TYPE_RT5650:
+ dl_sft = 4;
+ break;
+ default:
+ dl_sft = 2;
+ break;
+ }
+
bclk_ms = frame_size > 32;
rt5645->bclk[dai->id] = rt5645->lrck[dai->id] * (32 << bclk_ms);
@@ -2078,13 +2088,13 @@ static int rt5645_hw_params(struct snd_pcm_substream *substream,
case 16:
break;
case 20:
- val_len |= RT5645_I2S_DL_20;
+ val_len = 0x1;
break;
case 24:
- val_len |= RT5645_I2S_DL_24;
+ val_len = 0x2;
break;
case 8:
- val_len |= RT5645_I2S_DL_8;
+ val_len = 0x3;
break;
default:
return -EINVAL;
@@ -2096,7 +2106,7 @@ static int rt5645_hw_params(struct snd_pcm_substream *substream,
val_clk = bclk_ms << RT5645_I2S_BCLK_MS1_SFT |
pre_div << RT5645_I2S_PD1_SFT;
snd_soc_update_bits(codec, RT5645_I2S1_SDP,
- RT5645_I2S_DL_MASK, val_len);
+ (0x3 << dl_sft), (val_len << dl_sft));
snd_soc_update_bits(codec, RT5645_ADDA_CLK1, mask_clk, val_clk);
break;
case RT5645_AIF2:
@@ -2104,7 +2114,7 @@ static int rt5645_hw_params(struct snd_pcm_substream *substream,
val_clk = bclk_ms << RT5645_I2S_BCLK_MS2_SFT |
pre_div << RT5645_I2S_PD2_SFT;
snd_soc_update_bits(codec, RT5645_I2S2_SDP,
- RT5645_I2S_DL_MASK, val_len);
+ (0x3 << dl_sft), (val_len << dl_sft));
snd_soc_update_bits(codec, RT5645_ADDA_CLK1, mask_clk, val_clk);
break;
default:
@@ -2119,7 +2129,16 @@ static int rt5645_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
- unsigned int reg_val = 0;
+ unsigned int reg_val = 0, pol_sft;
+
+ switch (rt5645->codec_type) {
+ case CODEC_TYPE_RT5650:
+ pol_sft = 8;
+ break;
+ default:
+ pol_sft = 7;
+ break;
+ }
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
@@ -2137,7 +2156,7 @@ static int rt5645_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_NF:
- reg_val |= RT5645_I2S_BP_INV;
+ reg_val |= (1 << pol_sft);
break;
default:
return -EINVAL;
@@ -2161,12 +2180,12 @@ static int rt5645_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (dai->id) {
case RT5645_AIF1:
snd_soc_update_bits(codec, RT5645_I2S1_SDP,
- RT5645_I2S_MS_MASK | RT5645_I2S_BP_MASK |
+ RT5645_I2S_MS_MASK | (1 << pol_sft) |
RT5645_I2S_DF_MASK, reg_val);
break;
case RT5645_AIF2:
snd_soc_update_bits(codec, RT5645_I2S2_SDP,
- RT5645_I2S_MS_MASK | RT5645_I2S_BP_MASK |
+ RT5645_I2S_MS_MASK | (1 << pol_sft) |
RT5645_I2S_DF_MASK, reg_val);
break;
default:
@@ -2285,23 +2304,42 @@ static int rt5645_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int rx_mask, int slots, int slot_width)
{
struct snd_soc_codec *codec = dai->codec;
- unsigned int val = 0;
+ struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
+ unsigned int i_slot_sft, o_slot_sft, i_width_sht, o_width_sht, en_sft;
+ unsigned int mask, val = 0;
+ switch (rt5645->codec_type) {
+ case CODEC_TYPE_RT5650:
+ en_sft = 15;
+ i_slot_sft = 10;
+ o_slot_sft = 8;
+ i_width_sht = 6;
+ o_width_sht = 4;
+ mask = 0x8ff0;
+ break;
+ default:
+ en_sft = 14;
+ i_slot_sft = o_slot_sft = 12;
+ i_width_sht = o_width_sht = 10;
+ mask = 0x7c00;
+ break;
+ }
if (rx_mask || tx_mask) {
- val |= (1 << 14);
- snd_soc_update_bits(codec, RT5645_BASS_BACK,
- RT5645_G_BB_BST_MASK, RT5645_G_BB_BST_25DB);
+ val |= (1 << en_sft);
+ if (rt5645->codec_type == CODEC_TYPE_RT5645)
+ snd_soc_update_bits(codec, RT5645_BASS_BACK,
+ RT5645_G_BB_BST_MASK, RT5645_G_BB_BST_25DB);
}
switch (slots) {
case 4:
- val |= (1 << 12);
+ val |= (1 << i_slot_sft) | (1 << o_slot_sft);
break;
case 6:
- val |= (2 << 12);
+ val |= (2 << i_slot_sft) | (2 << o_slot_sft);
break;
case 8:
- val |= (3 << 12);
+ val |= (3 << i_slot_sft) | (3 << o_slot_sft);
break;
case 2:
default:
@@ -2310,20 +2348,20 @@ static int rt5645_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
switch (slot_width) {
case 20:
- val |= (1 << 10);
+ val |= (1 << i_width_sht) | (1 << o_width_sht);
break;
case 24:
- val |= (2 << 10);
+ val |= (2 << i_width_sht) | (2 << o_width_sht);
break;
case 32:
- val |= (3 << 10);
+ val |= (3 << i_width_sht) | (3 << o_width_sht);
break;
case 16:
default:
break;
}
- snd_soc_update_bits(codec, RT5645_TDM_CTRL_1, 0x7c00, val);
+ snd_soc_update_bits(codec, RT5645_TDM_CTRL_1, mask, val);
return 0;
}
@@ -2361,7 +2399,8 @@ static int rt5645_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_OFF:
snd_soc_write(codec, RT5645_DEPOP_M2, 0x1100);
- snd_soc_write(codec, RT5645_GEN_CTRL1, 0x0128);
+ snd_soc_update_bits(codec, RT5645_GEN_CTRL1,
+ RT5645_DIG_GATE_CTRL, 0);
snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
RT5645_PWR_VREF1 | RT5645_PWR_MB |
RT5645_PWR_BG | RT5645_PWR_VREF2 |
@@ -2598,7 +2637,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5645 = {
static const struct regmap_config rt5645_regmap = {
.reg_bits = 8,
.val_bits = 16,
-
+ .use_single_rw = true,
.max_register = RT5645_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5645_ranges) *
RT5645_PR_SPACING),
.volatile_reg = rt5645_volatile_register,
@@ -2618,6 +2657,15 @@ static const struct i2c_device_id rt5645_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id);
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt5645_acpi_match[] = {
+ { "10EC5645", 0 },
+ { "10EC5650", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match);
+#endif
+
static int rt5645_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -2732,7 +2780,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
case RT5645_DMIC_DATA_GPIO12:
regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
- RT5645_DMIC_1_DP_MASK, RT5645_DMIC_2_DP_GPIO12);
+ RT5645_DMIC_2_DP_MASK, RT5645_DMIC_2_DP_GPIO12);
regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
RT5645_GP12_PIN_MASK,
RT5645_GP12_PIN_DMIC2_SDA);
@@ -2834,6 +2882,7 @@ static struct i2c_driver rt5645_i2c_driver = {
.driver = {
.name = "rt5645",
.owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(rt5645_acpi_match),
},
.probe = rt5645_i2c_probe,
.remove = rt5645_i2c_remove,
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index dbfd98c22f4d..db78e9462876 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -804,8 +804,6 @@
#define RT5645_PWR_DAC_MF_L_BIT 10
#define RT5645_PWR_DAC_MF_R (0x1 << 9)
#define RT5645_PWR_DAC_MF_R_BIT 9
-#define RT5645_PWR_ADC_S2F (0x1 << 8)
-#define RT5645_PWR_ADC_S2F_BIT 8
#define RT5645_PWR_PDM1 (0x1 << 7)
#define RT5645_PWR_PDM1_BIT 7
#define RT5645_PWR_PDM2 (0x1 << 6)
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index fd102613d20d..cc7f84a150a7 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -403,6 +403,189 @@ static bool rt5670_readable_register(struct device *dev, unsigned int reg)
}
}
+/**
+ * rt5670_headset_detect - Detect headset.
+ * @codec: SoC audio codec device.
+ * @jack_insert: Jack insert or not.
+ *
+ * Detect whether is headset or not when jack inserted.
+ *
+ * Returns detect status.
+ */
+
+static int rt5670_headset_detect(struct snd_soc_codec *codec, int jack_insert)
+{
+ int val;
+ struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
+
+ if (jack_insert) {
+ snd_soc_dapm_force_enable_pin(&codec->dapm,
+ "Mic Det Power");
+ snd_soc_dapm_sync(&codec->dapm);
+ snd_soc_update_bits(codec, RT5670_GEN_CTRL3, 0x4, 0x0);
+ snd_soc_update_bits(codec, RT5670_CJ_CTRL2,
+ RT5670_CBJ_DET_MODE | RT5670_CBJ_MN_JD,
+ RT5670_CBJ_MN_JD);
+ snd_soc_write(codec, RT5670_GPIO_CTRL2, 0x0004);
+ snd_soc_update_bits(codec, RT5670_GPIO_CTRL1,
+ RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_IRQ);
+ snd_soc_update_bits(codec, RT5670_CJ_CTRL1,
+ RT5670_CBJ_BST1_EN, RT5670_CBJ_BST1_EN);
+ snd_soc_write(codec, RT5670_JD_CTRL3, 0x00f0);
+ snd_soc_update_bits(codec, RT5670_CJ_CTRL2,
+ RT5670_CBJ_MN_JD, RT5670_CBJ_MN_JD);
+ snd_soc_update_bits(codec, RT5670_CJ_CTRL2,
+ RT5670_CBJ_MN_JD, 0);
+ msleep(300);
+ val = snd_soc_read(codec, RT5670_CJ_CTRL3) & 0x7;
+ if (val == 0x1 || val == 0x2) {
+ rt5670->jack_type = SND_JACK_HEADSET;
+ /* for push button */
+ snd_soc_update_bits(codec, RT5670_INT_IRQ_ST, 0x8, 0x8);
+ snd_soc_update_bits(codec, RT5670_IL_CMD, 0x40, 0x40);
+ snd_soc_read(codec, RT5670_IL_CMD);
+ } else {
+ snd_soc_update_bits(codec, RT5670_GEN_CTRL3, 0x4, 0x4);
+ rt5670->jack_type = SND_JACK_HEADPHONE;
+ snd_soc_dapm_disable_pin(&codec->dapm, "Mic Det Power");
+ snd_soc_dapm_sync(&codec->dapm);
+ }
+ } else {
+ snd_soc_update_bits(codec, RT5670_INT_IRQ_ST, 0x8, 0x0);
+ snd_soc_update_bits(codec, RT5670_GEN_CTRL3, 0x4, 0x4);
+ rt5670->jack_type = 0;
+ snd_soc_dapm_disable_pin(&codec->dapm, "Mic Det Power");
+ snd_soc_dapm_sync(&codec->dapm);
+ }
+
+ return rt5670->jack_type;
+}
+
+void rt5670_jack_suspend(struct snd_soc_codec *codec)
+{
+ struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
+
+ rt5670->jack_type_saved = rt5670->jack_type;
+ rt5670_headset_detect(codec, 0);
+}
+EXPORT_SYMBOL_GPL(rt5670_jack_suspend);
+
+void rt5670_jack_resume(struct snd_soc_codec *codec)
+{
+ struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
+
+ if (rt5670->jack_type_saved)
+ rt5670_headset_detect(codec, 1);
+}
+EXPORT_SYMBOL_GPL(rt5670_jack_resume);
+
+static int rt5670_button_detect(struct snd_soc_codec *codec)
+{
+ int btn_type, val;
+
+ val = snd_soc_read(codec, RT5670_IL_CMD);
+ btn_type = val & 0xff80;
+ snd_soc_write(codec, RT5670_IL_CMD, val);
+ if (btn_type != 0) {
+ msleep(20);
+ val = snd_soc_read(codec, RT5670_IL_CMD);
+ snd_soc_write(codec, RT5670_IL_CMD, val);
+ }
+
+ return btn_type;
+}
+
+static int rt5670_irq_detection(void *data)
+{
+ struct rt5670_priv *rt5670 = (struct rt5670_priv *)data;
+ struct snd_soc_jack_gpio *gpio = &rt5670->hp_gpio;
+ struct snd_soc_jack *jack = rt5670->jack;
+ int val, btn_type, report = jack->status;
+
+ if (rt5670->pdata.jd_mode == 1) /* 2 port */
+ val = snd_soc_read(rt5670->codec, RT5670_A_JD_CTRL1) & 0x0070;
+ else
+ val = snd_soc_read(rt5670->codec, RT5670_A_JD_CTRL1) & 0x0020;
+
+ switch (val) {
+ /* jack in */
+ case 0x30: /* 2 port */
+ case 0x0: /* 1 port or 2 port */
+ if (rt5670->jack_type == 0) {
+ report = rt5670_headset_detect(rt5670->codec, 1);
+ /* for push button and jack out */
+ gpio->debounce_time = 25;
+ break;
+ }
+ btn_type = 0;
+ if (snd_soc_read(rt5670->codec, RT5670_INT_IRQ_ST) & 0x4) {
+ /* button pressed */
+ report = SND_JACK_HEADSET;
+ btn_type = rt5670_button_detect(rt5670->codec);
+ switch (btn_type) {
+ case 0x2000: /* up */
+ report |= SND_JACK_BTN_1;
+ break;
+ case 0x0400: /* center */
+ report |= SND_JACK_BTN_0;
+ break;
+ case 0x0080: /* down */
+ report |= SND_JACK_BTN_2;
+ break;
+ default:
+ dev_err(rt5670->codec->dev,
+ "Unexpected button code 0x%04x\n",
+ btn_type);
+ break;
+ }
+ }
+ if (btn_type == 0)/* button release */
+ report = rt5670->jack_type;
+
+ break;
+ /* jack out */
+ case 0x70: /* 2 port */
+ case 0x10: /* 2 port */
+ case 0x20: /* 1 port */
+ report = 0;
+ snd_soc_update_bits(rt5670->codec, RT5670_INT_IRQ_ST, 0x1, 0x0);
+ rt5670_headset_detect(rt5670->codec, 0);
+ gpio->debounce_time = 150; /* for jack in */
+ break;
+ default:
+ break;
+ }
+
+ return report;
+}
+
+int rt5670_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack)
+{
+ struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ rt5670->jack = jack;
+ rt5670->hp_gpio.gpiod_dev = codec->dev;
+ rt5670->hp_gpio.name = "headphone detect";
+ rt5670->hp_gpio.report = SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2;
+ rt5670->hp_gpio.debounce_time = 150;
+ rt5670->hp_gpio.wake = true;
+ rt5670->hp_gpio.data = (struct rt5670_priv *)rt5670;
+ rt5670->hp_gpio.jack_status_check = rt5670_irq_detection;
+
+ ret = snd_soc_jack_add_gpios(rt5670->jack, 1,
+ &rt5670->hp_gpio);
+ if (ret) {
+ dev_err(codec->dev, "Adding jack GPIO failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5670_set_jack_detect);
+
static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
@@ -517,11 +700,9 @@ static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
- unsigned int val;
+ struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
- val = snd_soc_read(codec, RT5670_GLB_CLK);
- val &= RT5670_SCLK_SRC_MASK;
- if (val == RT5670_SCLK_SRC_PLL1)
+ if (rt5670->sysclk_src == RT5670_SCLK_S_PLL1)
return 1;
else
return 0;
@@ -2271,16 +2452,6 @@ static int rt5670_set_dai_sysclk(struct snd_soc_dai *dai,
struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
unsigned int reg_val = 0;
- if (freq == rt5670->sysclk && clk_id == rt5670->sysclk_src)
- return 0;
-
- if (rt5670->pdata.jd_mode) {
- if (clk_id == RT5670_SCLK_S_PLL1)
- snd_soc_dapm_force_enable_pin(&codec->dapm, "PLL1");
- else
- snd_soc_dapm_disable_pin(&codec->dapm, "PLL1");
- snd_soc_dapm_sync(&codec->dapm);
- }
switch (clk_id) {
case RT5670_SCLK_S_MCLK:
reg_val |= RT5670_SCLK_SRC_MCLK;
@@ -2298,7 +2469,8 @@ static int rt5670_set_dai_sysclk(struct snd_soc_dai *dai,
snd_soc_update_bits(codec, RT5670_GLB_CLK,
RT5670_SCLK_SRC_MASK, reg_val);
rt5670->sysclk = freq;
- rt5670->sysclk_src = clk_id;
+ if (clk_id != RT5670_SCLK_S_RCCLK)
+ rt5670->sysclk_src = clk_id;
dev_dbg(dai->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id);
@@ -2517,6 +2689,7 @@ static int rt5670_remove(struct snd_soc_codec *codec)
struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
regmap_write(rt5670->regmap, RT5670_RESET, 0);
+ snd_soc_jack_free_gpios(rt5670->jack, 1, &rt5670->hp_gpio);
return 0;
}
@@ -2676,6 +2849,7 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
if (dmi_check_system(dmi_platform_intel_braswell)) {
rt5670->pdata.dmic_en = true;
rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_IN2P;
+ rt5670->pdata.dev_gpio = true;
rt5670->pdata.jd_mode = 1;
}
@@ -2717,12 +2891,17 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5670->regmap, RT5670_IN2,
RT5670_IN_DF2, RT5670_IN_DF2);
- if (i2c->irq) {
+ if (rt5670->pdata.dev_gpio) {
+ /* for push button */
+ regmap_write(rt5670->regmap, RT5670_IL_CMD, 0x0000);
+ regmap_write(rt5670->regmap, RT5670_IL_CMD2, 0x0010);
+ regmap_write(rt5670->regmap, RT5670_IL_CMD3, 0x0014);
+ /* for irq */
regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1,
RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_IRQ);
regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2,
RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT);
-
+ regmap_update_bits(rt5670->regmap, RT5670_DIG_MISC, 0x8, 0x8);
}
if (rt5670->pdata.jd_mode) {
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index 21f8e18c13c4..dc2b46236c5c 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -1950,17 +1950,20 @@ enum {
};
enum {
+ RT5670_DMIC1_DISABLED,
RT5670_DMIC_DATA_GPIO6,
RT5670_DMIC_DATA_IN2P,
RT5670_DMIC_DATA_GPIO7,
};
enum {
+ RT5670_DMIC2_DISABLED,
RT5670_DMIC_DATA_GPIO8,
RT5670_DMIC_DATA_IN3N,
};
enum {
+ RT5670_DMIC3_DISABLED,
RT5670_DMIC_DATA_GPIO9,
RT5670_DMIC_DATA_GPIO10,
RT5670_DMIC_DATA_GPIO5,
@@ -1985,6 +1988,8 @@ struct rt5670_priv {
struct snd_soc_codec *codec;
struct rt5670_platform_data pdata;
struct regmap *regmap;
+ struct snd_soc_jack *jack;
+ struct snd_soc_jack_gpio hp_gpio;
int sysclk;
int sysclk_src;
@@ -1999,6 +2004,11 @@ struct rt5670_priv {
int dsp_sw; /* expected parameter setting */
int dsp_rate;
int jack_type;
+ int jack_type_saved;
};
+void rt5670_jack_suspend(struct snd_soc_codec *codec);
+void rt5670_jack_resume(struct snd_soc_codec *codec);
+int rt5670_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack);
#endif /* __RT5670_H__ */
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index fb9c20eace3f..169aa471ffbd 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -62,6 +62,9 @@ static const struct reg_default init_list[] = {
{RT5677_PR_BASE + 0x1e, 0x0000},
{RT5677_PR_BASE + 0x12, 0x0eaa},
{RT5677_PR_BASE + 0x14, 0x018a},
+ {RT5677_PR_BASE + 0x15, 0x0490},
+ {RT5677_PR_BASE + 0x38, 0x0f71},
+ {RT5677_PR_BASE + 0x39, 0x0f71},
};
#define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list)
@@ -718,11 +721,24 @@ static int rt5677_set_dsp_vad(struct snd_soc_codec *codec, bool on)
RT5677_LDO1_SEL_MASK, 0x0);
regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG2,
RT5677_PWR_LDO1, RT5677_PWR_LDO1);
- regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK1,
- RT5677_MCLK_SRC_MASK, RT5677_MCLK2_SRC);
- regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK2,
- RT5677_PLL2_PR_SRC_MASK | RT5677_DSP_CLK_SRC_MASK,
- RT5677_PLL2_PR_SRC_MCLK2 | RT5677_DSP_CLK_SRC_BYPASS);
+ switch (rt5677->type) {
+ case RT5677:
+ regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK1,
+ RT5677_MCLK_SRC_MASK, RT5677_MCLK2_SRC);
+ regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK2,
+ RT5677_PLL2_PR_SRC_MASK |
+ RT5677_DSP_CLK_SRC_MASK,
+ RT5677_PLL2_PR_SRC_MCLK2 |
+ RT5677_DSP_CLK_SRC_BYPASS);
+ break;
+ case RT5676:
+ regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK2,
+ RT5677_DSP_CLK_SRC_MASK,
+ RT5677_DSP_CLK_SRC_BYPASS);
+ break;
+ default:
+ break;
+ }
regmap_write(rt5677->regmap, RT5677_PWR_DSP2, 0x07ff);
regmap_write(rt5677->regmap, RT5677_PWR_DSP1, 0x07fd);
rt5677_set_dsp_mode(codec, true);
@@ -901,7 +917,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
- int idx = rl6231_calc_dmic_clk(rt5677->sysclk);
+ int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
if (idx < 0)
dev_err(codec->dev, "Failed to set DMIC clock\n");
@@ -1021,6 +1037,169 @@ static int can_use_asrc(struct snd_soc_dapm_widget *source,
return 0;
}
+/**
+ * rt5677_sel_asrc_clk_src - select ASRC clock source for a set of filters
+ * @codec: SoC audio codec device.
+ * @filter_mask: mask of filters.
+ * @clk_src: clock source
+ *
+ * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5677 can
+ * only support standard 32fs or 64fs i2s format, ASRC should be enabled to
+ * support special i2s clock format such as Intel's 100fs(100 * sampling rate).
+ * ASRC function will track i2s clock and generate a corresponding system clock
+ * for codec. This function provides an API to select the clock source for a
+ * set of filters specified by the mask. And the codec driver will turn on ASRC
+ * for these filters if ASRC is selected as their clock source.
+ */
+int rt5677_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src)
+{
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ unsigned int asrc3_mask = 0, asrc3_value = 0;
+ unsigned int asrc4_mask = 0, asrc4_value = 0;
+ unsigned int asrc5_mask = 0, asrc5_value = 0;
+ unsigned int asrc6_mask = 0, asrc6_value = 0;
+ unsigned int asrc7_mask = 0, asrc7_value = 0;
+
+ switch (clk_src) {
+ case RT5677_CLK_SEL_SYS:
+ case RT5677_CLK_SEL_I2S1_ASRC:
+ case RT5677_CLK_SEL_I2S2_ASRC:
+ case RT5677_CLK_SEL_I2S3_ASRC:
+ case RT5677_CLK_SEL_I2S4_ASRC:
+ case RT5677_CLK_SEL_I2S5_ASRC:
+ case RT5677_CLK_SEL_I2S6_ASRC:
+ case RT5677_CLK_SEL_SYS2:
+ case RT5677_CLK_SEL_SYS3:
+ case RT5677_CLK_SEL_SYS4:
+ case RT5677_CLK_SEL_SYS5:
+ case RT5677_CLK_SEL_SYS6:
+ case RT5677_CLK_SEL_SYS7:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* ASRC 3 */
+ if (filter_mask & RT5677_DA_STEREO_FILTER) {
+ asrc3_mask |= RT5677_DA_STO_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5677_DA_STO_CLK_SEL_MASK)
+ | (clk_src << RT5677_DA_STO_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_DA_MONO2_L_FILTER) {
+ asrc3_mask |= RT5677_DA_MONO2L_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5677_DA_MONO2L_CLK_SEL_MASK)
+ | (clk_src << RT5677_DA_MONO2L_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_DA_MONO2_R_FILTER) {
+ asrc3_mask |= RT5677_DA_MONO2R_CLK_SEL_MASK;
+ asrc3_value = (asrc3_value & ~RT5677_DA_MONO2R_CLK_SEL_MASK)
+ | (clk_src << RT5677_DA_MONO2R_CLK_SEL_SFT);
+ }
+
+ if (asrc3_mask)
+ regmap_update_bits(rt5677->regmap, RT5677_ASRC_3, asrc3_mask,
+ asrc3_value);
+
+ /* ASRC 4 */
+ if (filter_mask & RT5677_DA_MONO3_L_FILTER) {
+ asrc4_mask |= RT5677_DA_MONO3L_CLK_SEL_MASK;
+ asrc4_value = (asrc4_value & ~RT5677_DA_MONO3L_CLK_SEL_MASK)
+ | (clk_src << RT5677_DA_MONO3L_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_DA_MONO3_R_FILTER) {
+ asrc4_mask |= RT5677_DA_MONO3R_CLK_SEL_MASK;
+ asrc4_value = (asrc4_value & ~RT5677_DA_MONO3R_CLK_SEL_MASK)
+ | (clk_src << RT5677_DA_MONO3R_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_DA_MONO4_L_FILTER) {
+ asrc4_mask |= RT5677_DA_MONO4L_CLK_SEL_MASK;
+ asrc4_value = (asrc4_value & ~RT5677_DA_MONO4L_CLK_SEL_MASK)
+ | (clk_src << RT5677_DA_MONO4L_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_DA_MONO4_R_FILTER) {
+ asrc4_mask |= RT5677_DA_MONO4R_CLK_SEL_MASK;
+ asrc4_value = (asrc4_value & ~RT5677_DA_MONO4R_CLK_SEL_MASK)
+ | (clk_src << RT5677_DA_MONO4R_CLK_SEL_SFT);
+ }
+
+ if (asrc4_mask)
+ regmap_update_bits(rt5677->regmap, RT5677_ASRC_4, asrc4_mask,
+ asrc4_value);
+
+ /* ASRC 5 */
+ if (filter_mask & RT5677_AD_STEREO1_FILTER) {
+ asrc5_mask |= RT5677_AD_STO1_CLK_SEL_MASK;
+ asrc5_value = (asrc5_value & ~RT5677_AD_STO1_CLK_SEL_MASK)
+ | (clk_src << RT5677_AD_STO1_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_AD_STEREO2_FILTER) {
+ asrc5_mask |= RT5677_AD_STO2_CLK_SEL_MASK;
+ asrc5_value = (asrc5_value & ~RT5677_AD_STO2_CLK_SEL_MASK)
+ | (clk_src << RT5677_AD_STO2_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_AD_STEREO3_FILTER) {
+ asrc5_mask |= RT5677_AD_STO3_CLK_SEL_MASK;
+ asrc5_value = (asrc5_value & ~RT5677_AD_STO3_CLK_SEL_MASK)
+ | (clk_src << RT5677_AD_STO3_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_AD_STEREO4_FILTER) {
+ asrc5_mask |= RT5677_AD_STO4_CLK_SEL_MASK;
+ asrc5_value = (asrc5_value & ~RT5677_AD_STO4_CLK_SEL_MASK)
+ | (clk_src << RT5677_AD_STO4_CLK_SEL_SFT);
+ }
+
+ if (asrc5_mask)
+ regmap_update_bits(rt5677->regmap, RT5677_ASRC_5, asrc5_mask,
+ asrc5_value);
+
+ /* ASRC 6 */
+ if (filter_mask & RT5677_AD_MONO_L_FILTER) {
+ asrc6_mask |= RT5677_AD_MONOL_CLK_SEL_MASK;
+ asrc6_value = (asrc6_value & ~RT5677_AD_MONOL_CLK_SEL_MASK)
+ | (clk_src << RT5677_AD_MONOL_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_AD_MONO_R_FILTER) {
+ asrc6_mask |= RT5677_AD_MONOR_CLK_SEL_MASK;
+ asrc6_value = (asrc6_value & ~RT5677_AD_MONOR_CLK_SEL_MASK)
+ | (clk_src << RT5677_AD_MONOR_CLK_SEL_SFT);
+ }
+
+ if (asrc6_mask)
+ regmap_update_bits(rt5677->regmap, RT5677_ASRC_6, asrc6_mask,
+ asrc6_value);
+
+ /* ASRC 7 */
+ if (filter_mask & RT5677_DSP_OB_0_3_FILTER) {
+ asrc7_mask |= RT5677_DSP_OB_0_3_CLK_SEL_MASK;
+ asrc7_value = (asrc7_value & ~RT5677_DSP_OB_0_3_CLK_SEL_MASK)
+ | (clk_src << RT5677_DSP_OB_0_3_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5677_DSP_OB_4_7_FILTER) {
+ asrc7_mask |= RT5677_DSP_OB_4_7_CLK_SEL_MASK;
+ asrc7_value = (asrc7_value & ~RT5677_DSP_OB_4_7_CLK_SEL_MASK)
+ | (clk_src << RT5677_DSP_OB_4_7_CLK_SEL_SFT);
+ }
+
+ if (asrc7_mask)
+ regmap_update_bits(rt5677->regmap, RT5677_ASRC_7, asrc7_mask,
+ asrc7_value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5677_sel_asrc_clk_src);
+
/* Digital Mixer */
static const struct snd_kcontrol_new rt5677_sto1_adc_l_mix[] = {
SOC_DAPM_SINGLE("ADC1 Switch", RT5677_STO1_ADC_MIXER,
@@ -4500,10 +4679,10 @@ static int rt5677_suspend(struct snd_soc_codec *codec)
if (!rt5677->dsp_vad_en) {
regcache_cache_only(rt5677->regmap, true);
regcache_mark_dirty(rt5677->regmap);
- }
- if (gpio_is_valid(rt5677->pow_ldo2))
- gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
+ if (gpio_is_valid(rt5677->pow_ldo2))
+ gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
+ }
return 0;
}
@@ -4512,12 +4691,12 @@ static int rt5677_resume(struct snd_soc_codec *codec)
{
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
- if (gpio_is_valid(rt5677->pow_ldo2)) {
- gpio_set_value_cansleep(rt5677->pow_ldo2, 1);
- msleep(10);
- }
-
if (!rt5677->dsp_vad_en) {
+ if (gpio_is_valid(rt5677->pow_ldo2)) {
+ gpio_set_value_cansleep(rt5677->pow_ldo2, 1);
+ msleep(10);
+ }
+
regcache_cache_only(rt5677->regmap, false);
regcache_sync(rt5677->regmap);
}
@@ -4733,7 +4912,8 @@ static const struct regmap_config rt5677_regmap = {
};
static const struct i2c_device_id rt5677_i2c_id[] = {
- { "rt5677", 0 },
+ { "rt5677", RT5677 },
+ { "rt5676", RT5676 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
@@ -4850,6 +5030,8 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, rt5677);
+ rt5677->type = id->driver_data;
+
if (pdata)
rt5677->pdata = *pdata;
diff --git a/sound/soc/codecs/rt5677.h b/sound/soc/codecs/rt5677.h
index c0a625f290cc..9dceb41d18ea 100644
--- a/sound/soc/codecs/rt5677.h
+++ b/sound/soc/codecs/rt5677.h
@@ -1406,6 +1406,46 @@
#define RT5677_DSP_CLK_SRC_PLL2 (0x0 << 7)
#define RT5677_DSP_CLK_SRC_BYPASS (0x1 << 7)
+/* ASRC Control 3 (0x85) */
+#define RT5677_DA_STO_CLK_SEL_MASK (0xf << 12)
+#define RT5677_DA_STO_CLK_SEL_SFT 12
+#define RT5677_DA_MONO2L_CLK_SEL_MASK (0xf << 4)
+#define RT5677_DA_MONO2L_CLK_SEL_SFT 4
+#define RT5677_DA_MONO2R_CLK_SEL_MASK (0xf << 0)
+#define RT5677_DA_MONO2R_CLK_SEL_SFT 0
+
+/* ASRC Control 4 (0x86) */
+#define RT5677_DA_MONO3L_CLK_SEL_MASK (0xf << 12)
+#define RT5677_DA_MONO3L_CLK_SEL_SFT 12
+#define RT5677_DA_MONO3R_CLK_SEL_MASK (0xf << 8)
+#define RT5677_DA_MONO3R_CLK_SEL_SFT 8
+#define RT5677_DA_MONO4L_CLK_SEL_MASK (0xf << 4)
+#define RT5677_DA_MONO4L_CLK_SEL_SFT 4
+#define RT5677_DA_MONO4R_CLK_SEL_MASK (0xf << 0)
+#define RT5677_DA_MONO4R_CLK_SEL_SFT 0
+
+/* ASRC Control 5 (0x87) */
+#define RT5677_AD_STO1_CLK_SEL_MASK (0xf << 12)
+#define RT5677_AD_STO1_CLK_SEL_SFT 12
+#define RT5677_AD_STO2_CLK_SEL_MASK (0xf << 8)
+#define RT5677_AD_STO2_CLK_SEL_SFT 8
+#define RT5677_AD_STO3_CLK_SEL_MASK (0xf << 4)
+#define RT5677_AD_STO3_CLK_SEL_SFT 4
+#define RT5677_AD_STO4_CLK_SEL_MASK (0xf << 0)
+#define RT5677_AD_STO4_CLK_SEL_SFT 0
+
+/* ASRC Control 6 (0x88) */
+#define RT5677_AD_MONOL_CLK_SEL_MASK (0xf << 12)
+#define RT5677_AD_MONOL_CLK_SEL_SFT 12
+#define RT5677_AD_MONOR_CLK_SEL_MASK (0xf << 8)
+#define RT5677_AD_MONOR_CLK_SEL_SFT 8
+
+/* ASRC Control 7 (0x89) */
+#define RT5677_DSP_OB_0_3_CLK_SEL_MASK (0xf << 12)
+#define RT5677_DSP_OB_0_3_CLK_SEL_SFT 12
+#define RT5677_DSP_OB_4_7_CLK_SEL_MASK (0xf << 8)
+#define RT5677_DSP_OB_4_7_CLK_SEL_SFT 8
+
/* VAD Function Control 4 (0x9f) */
#define RT5677_VAD_SRC_MASK (0x7 << 8)
#define RT5677_VAD_SRC_SFT 8
@@ -1665,6 +1705,47 @@ enum {
RT5677_IRQ_JD3,
};
+enum rt5677_type {
+ RT5677,
+ RT5676,
+};
+
+/* ASRC clock source selection */
+enum {
+ RT5677_CLK_SEL_SYS,
+ RT5677_CLK_SEL_I2S1_ASRC,
+ RT5677_CLK_SEL_I2S2_ASRC,
+ RT5677_CLK_SEL_I2S3_ASRC,
+ RT5677_CLK_SEL_I2S4_ASRC,
+ RT5677_CLK_SEL_I2S5_ASRC,
+ RT5677_CLK_SEL_I2S6_ASRC,
+ RT5677_CLK_SEL_SYS2,
+ RT5677_CLK_SEL_SYS3,
+ RT5677_CLK_SEL_SYS4,
+ RT5677_CLK_SEL_SYS5,
+ RT5677_CLK_SEL_SYS6,
+ RT5677_CLK_SEL_SYS7,
+};
+
+/* filter mask */
+enum {
+ RT5677_DA_STEREO_FILTER = 0x1,
+ RT5677_DA_MONO2_L_FILTER = (0x1 << 1),
+ RT5677_DA_MONO2_R_FILTER = (0x1 << 2),
+ RT5677_DA_MONO3_L_FILTER = (0x1 << 3),
+ RT5677_DA_MONO3_R_FILTER = (0x1 << 4),
+ RT5677_DA_MONO4_L_FILTER = (0x1 << 5),
+ RT5677_DA_MONO4_R_FILTER = (0x1 << 6),
+ RT5677_AD_STEREO1_FILTER = (0x1 << 7),
+ RT5677_AD_STEREO2_FILTER = (0x1 << 8),
+ RT5677_AD_STEREO3_FILTER = (0x1 << 9),
+ RT5677_AD_STEREO4_FILTER = (0x1 << 10),
+ RT5677_AD_MONO_L_FILTER = (0x1 << 11),
+ RT5677_AD_MONO_R_FILTER = (0x1 << 12),
+ RT5677_DSP_OB_0_3_FILTER = (0x1 << 13),
+ RT5677_DSP_OB_4_7_FILTER = (0x1 << 14),
+};
+
struct rt5677_priv {
struct snd_soc_codec *codec;
struct rt5677_platform_data pdata;
@@ -1681,6 +1762,7 @@ struct rt5677_priv {
int pll_in;
int pll_out;
int pow_ldo2; /* POW_LDO2 pin */
+ enum rt5677_type type;
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio_chip;
#endif
@@ -1690,4 +1772,7 @@ struct rt5677_priv {
bool is_vref_slow;
};
+int rt5677_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src);
+
#endif /* __RT5677_H__ */
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index 82095d6cd070..7947c0ebb1ed 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -783,19 +783,21 @@ static inline void sn95031_enable_jack_btn(struct snd_soc_codec *codec)
snd_soc_write(codec, SN95031_BTNCTRL2, 0x01);
}
-static int sn95031_get_headset_state(struct snd_soc_jack *mfld_jack)
+static int sn95031_get_headset_state(struct snd_soc_codec *codec,
+ struct snd_soc_jack *mfld_jack)
{
- int micbias = sn95031_get_mic_bias(mfld_jack->codec);
+ int micbias = sn95031_get_mic_bias(codec);
int jack_type = snd_soc_jack_get_type(mfld_jack, micbias);
pr_debug("jack type detected = %d\n", jack_type);
if (jack_type == SND_JACK_HEADSET)
- sn95031_enable_jack_btn(mfld_jack->codec);
+ sn95031_enable_jack_btn(codec);
return jack_type;
}
-void sn95031_jack_detection(struct mfld_jack_data *jack_data)
+void sn95031_jack_detection(struct snd_soc_codec *codec,
+ struct mfld_jack_data *jack_data)
{
unsigned int status;
unsigned int mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_HEADSET;
@@ -809,11 +811,11 @@ void sn95031_jack_detection(struct mfld_jack_data *jack_data)
status = SND_JACK_HEADSET | SND_JACK_BTN_1;
} else if (jack_data->intr_id & 0x4) {
pr_debug("headset or headphones inserted\n");
- status = sn95031_get_headset_state(jack_data->mfld_jack);
+ status = sn95031_get_headset_state(codec, jack_data->mfld_jack);
} else if (jack_data->intr_id & 0x8) {
pr_debug("headset or headphones removed\n");
status = 0;
- sn95031_disable_jack_btn(jack_data->mfld_jack->codec);
+ sn95031_disable_jack_btn(codec);
} else {
pr_err("unidentified interrupt\n");
return;
diff --git a/sound/soc/codecs/sn95031.h b/sound/soc/codecs/sn95031.h
index 20376d234fb8..7651fe4e6a45 100644
--- a/sound/soc/codecs/sn95031.h
+++ b/sound/soc/codecs/sn95031.h
@@ -127,6 +127,7 @@ struct mfld_jack_data {
struct snd_soc_jack *mfld_jack;
};
-extern void sn95031_jack_detection(struct mfld_jack_data *jack_data);
+extern void sn95031_jack_detection(struct snd_soc_codec *codec,
+ struct mfld_jack_data *jack_data);
#endif
diff --git a/sound/soc/codecs/sta350.c b/sound/soc/codecs/sta350.c
index bda2ee18769e..669e3228241e 100644
--- a/sound/soc/codecs/sta350.c
+++ b/sound/soc/codecs/sta350.c
@@ -1213,27 +1213,15 @@ static int sta350_i2c_probe(struct i2c_client *i2c,
#endif
/* GPIOs */
- sta350->gpiod_nreset = devm_gpiod_get(dev, "reset");
- if (IS_ERR(sta350->gpiod_nreset)) {
- ret = PTR_ERR(sta350->gpiod_nreset);
- if (ret != -ENOENT && ret != -ENOSYS)
- return ret;
-
- sta350->gpiod_nreset = NULL;
- } else {
- gpiod_direction_output(sta350->gpiod_nreset, 0);
- }
-
- sta350->gpiod_power_down = devm_gpiod_get(dev, "power-down");
- if (IS_ERR(sta350->gpiod_power_down)) {
- ret = PTR_ERR(sta350->gpiod_power_down);
- if (ret != -ENOENT && ret != -ENOSYS)
- return ret;
-
- sta350->gpiod_power_down = NULL;
- } else {
- gpiod_direction_output(sta350->gpiod_power_down, 0);
- }
+ sta350->gpiod_nreset = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sta350->gpiod_nreset))
+ return PTR_ERR(sta350->gpiod_nreset);
+
+ sta350->gpiod_power_down = devm_gpiod_get(dev, "power-down",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sta350->gpiod_power_down))
+ return PTR_ERR(sta350->gpiod_power_down);
/* regulators */
for (i = 0; i < ARRAY_SIZE(sta350->supplies); i++)
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index ae23acdd2708..dfb4ff5cc9ea 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -485,16 +485,9 @@ static int tas2552_probe(struct i2c_client *client,
if (data == NULL)
return -ENOMEM;
- data->enable_gpio = devm_gpiod_get(dev, "enable");
- if (IS_ERR(data->enable_gpio)) {
- ret = PTR_ERR(data->enable_gpio);
- if (ret != -ENOENT && ret != -ENOSYS)
- return ret;
-
- data->enable_gpio = NULL;
- } else {
- gpiod_direction_output(data->enable_gpio, 0);
- }
+ data->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(data->enable_gpio))
+ return PTR_ERR(data->enable_gpio);
data->tas2552_client = client;
data->regmap = devm_regmap_init_i2c(client, &tas2552_regmap_config);
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index 16f1b71edb55..aab0af681e8c 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -280,8 +280,8 @@ static int tfa9879_i2c_probe(struct i2c_client *i2c,
int i;
tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL);
- if (IS_ERR(tfa9879))
- return PTR_ERR(tfa9879);
+ if (!tfa9879)
+ return -ENOMEM;
i2c_set_clientdata(i2c, tfa9879);
diff --git a/sound/soc/codecs/tlv320aic23-i2c.c b/sound/soc/codecs/tlv320aic23-i2c.c
index f13701995482..78a94af65518 100644
--- a/sound/soc/codecs/tlv320aic23-i2c.c
+++ b/sound/soc/codecs/tlv320aic23-i2c.c
@@ -31,7 +31,7 @@ static int tlv320aic23_i2c_probe(struct i2c_client *i2c,
return tlv320aic23_probe(&i2c->dev, regmap);
}
-static int __exit tlv320aic23_i2c_remove(struct i2c_client *i2c)
+static int tlv320aic23_i2c_remove(struct i2c_client *i2c)
{
snd_soc_unregister_codec(&i2c->dev);
return 0;
@@ -56,7 +56,7 @@ static struct i2c_driver tlv320aic23_i2c_driver = {
.of_match_table = of_match_ptr(tlv320aic23_of_match),
},
.probe = tlv320aic23_i2c_probe,
- .remove = __exit_p(tlv320aic23_i2c_remove),
+ .remove = tlv320aic23_i2c_remove,
.id_table = tlv320aic23_id,
};
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index 15599845a660..5a9da28f4f33 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -1554,7 +1554,6 @@ static int wm2200_probe(struct snd_soc_codec *codec)
int ret;
wm2200->codec = codec;
- codec->dapm.bias_level = SND_SOC_BIAS_OFF;
ret = snd_soc_add_codec_controls(codec, wm_adsp1_fw_controls, 2);
if (ret != 0)
@@ -1942,6 +1941,7 @@ static int wm2200_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
struct _fll_div factors;
int ret, i, timeout;
+ unsigned long time_left;
if (!Fout) {
dev_dbg(codec->dev, "FLL disabled");
@@ -2021,9 +2021,10 @@ static int wm2200_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
/* Poll for the lock; will use the interrupt to exit quickly */
for (i = 0; i < timeout; i++) {
if (i2c->irq) {
- ret = wait_for_completion_timeout(&wm2200->fll_lock,
- msecs_to_jiffies(25));
- if (ret > 0)
+ time_left = wait_for_completion_timeout(
+ &wm2200->fll_lock,
+ msecs_to_jiffies(25));
+ if (time_left > 0)
break;
} else {
msleep(1);
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index ea09db585aa1..96740379b711 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -1762,6 +1762,7 @@ static int wm5100_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
struct _fll_div factors;
struct wm5100_fll *fll;
int ret, base, lock, i, timeout;
+ unsigned long time_left;
switch (fll_id) {
case WM5100_FLL1:
@@ -1842,9 +1843,9 @@ static int wm5100_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
/* Poll for the lock; will use interrupt when we can test */
for (i = 0; i < timeout; i++) {
if (i2c->irq) {
- ret = wait_for_completion_timeout(&fll->lock,
- msecs_to_jiffies(25));
- if (ret > 0)
+ time_left = wait_for_completion_timeout(&fll->lock,
+ msecs_to_jiffies(25));
+ if (time_left > 0)
break;
} else {
msleep(1);
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 6d0fe0ac95a3..0c6d1bc0526e 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1861,7 +1861,6 @@ static unsigned int wm5102_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_2L,
ARIZONA_DAC_DIGITAL_VOLUME_2R,
ARIZONA_DAC_DIGITAL_VOLUME_3L,
- ARIZONA_DAC_DIGITAL_VOLUME_3R,
ARIZONA_DAC_DIGITAL_VOLUME_4L,
ARIZONA_DAC_DIGITAL_VOLUME_4R,
ARIZONA_DAC_DIGITAL_VOLUME_5L,
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index c81a9eab3e3e..c65e5a75fc1a 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -69,14 +69,14 @@ struct wm8350_data {
struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
int fll_freq_out;
int fll_freq_in;
+ struct delayed_work pga_work;
};
/*
* Ramp OUT1 PGA volume to minimise pops at stream startup and shutdown.
*/
-static inline int wm8350_out1_ramp_step(struct snd_soc_codec *codec)
+static inline int wm8350_out1_ramp_step(struct wm8350_data *wm8350_data)
{
- struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out1 = &wm8350_data->out1;
struct wm8350 *wm8350 = wm8350_data->wm8350;
int left_complete = 0, right_complete = 0;
@@ -140,9 +140,8 @@ static inline int wm8350_out1_ramp_step(struct snd_soc_codec *codec)
/*
* Ramp OUT2 PGA volume to minimise pops at stream startup and shutdown.
*/
-static inline int wm8350_out2_ramp_step(struct snd_soc_codec *codec)
+static inline int wm8350_out2_ramp_step(struct wm8350_data *wm8350_data)
{
- struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out2 = &wm8350_data->out2;
struct wm8350 *wm8350 = wm8350_data->wm8350;
int left_complete = 0, right_complete = 0;
@@ -210,10 +209,8 @@ static inline int wm8350_out2_ramp_step(struct snd_soc_codec *codec)
*/
static void wm8350_pga_work(struct work_struct *work)
{
- struct snd_soc_dapm_context *dapm =
- container_of(work, struct snd_soc_dapm_context, delayed_work.work);
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
- struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
+ struct wm8350_data *wm8350_data =
+ container_of(work, struct wm8350_data, pga_work.work);
struct wm8350_output *out1 = &wm8350_data->out1,
*out2 = &wm8350_data->out2;
int i, out1_complete, out2_complete;
@@ -226,9 +223,9 @@ static void wm8350_pga_work(struct work_struct *work)
for (i = 0; i <= 63; i++) {
out1_complete = 1, out2_complete = 1;
if (out1->ramp != WM8350_RAMP_NONE)
- out1_complete = wm8350_out1_ramp_step(codec);
+ out1_complete = wm8350_out1_ramp_step(wm8350_data);
if (out2->ramp != WM8350_RAMP_NONE)
- out2_complete = wm8350_out2_ramp_step(codec);
+ out2_complete = wm8350_out2_ramp_step(wm8350_data);
/* ramp finished ? */
if (out1_complete && out2_complete)
@@ -283,7 +280,7 @@ static int pga_event(struct snd_soc_dapm_widget *w,
out->ramp = WM8350_RAMP_UP;
out->active = 1;
- schedule_delayed_work(&codec->dapm.delayed_work,
+ schedule_delayed_work(&wm8350_data->pga_work,
msecs_to_jiffies(1));
break;
@@ -291,7 +288,7 @@ static int pga_event(struct snd_soc_dapm_widget *w,
out->ramp = WM8350_RAMP_DOWN;
out->active = 0;
- schedule_delayed_work(&codec->dapm.delayed_work,
+ schedule_delayed_work(&wm8350_data->pga_work,
msecs_to_jiffies(1));
break;
}
@@ -1492,7 +1489,7 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec)
/* Put the codec into reset if it wasn't already */
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
- INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8350_pga_work);
+ INIT_DELAYED_WORK(&priv->pga_work, wm8350_pga_work);
INIT_DELAYED_WORK(&priv->hpl.work, wm8350_hpl_work);
INIT_DELAYED_WORK(&priv->hpr.work, wm8350_hpr_work);
@@ -1578,7 +1575,7 @@ static int wm8350_codec_remove(struct snd_soc_codec *codec)
/* if there was any work waiting then we run it now and
* wait for its completion */
- flush_delayed_work(&codec->dapm.delayed_work);
+ flush_delayed_work(&priv->pga_work);
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index c6d10533e2bd..2245b6a32f3d 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -25,6 +25,7 @@
#include <linux/spi/spi.h>
#include <linux/of_device.h>
#include <linux/mutex.h>
+#include <linux/clk.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -45,6 +46,7 @@ static const char *wm8731_supply_names[WM8731_NUM_SUPPLIES] = {
/* codec private data */
struct wm8731_priv {
struct regmap *regmap;
+ struct clk *mclk;
struct regulator_bulk_data supplies[WM8731_NUM_SUPPLIES];
const struct snd_pcm_hw_constraint_list *constraints;
unsigned int sysclk;
@@ -390,6 +392,8 @@ static int wm8731_set_dai_sysclk(struct snd_soc_dai *codec_dai,
switch (clk_id) {
case WM8731_SYSCLK_XTAL:
case WM8731_SYSCLK_MCLK:
+ if (wm8731->mclk && clk_set_rate(wm8731->mclk, freq))
+ return -EINVAL;
wm8731->sysclk_type = clk_id;
break;
default:
@@ -491,6 +495,8 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
+ if (wm8731->mclk)
+ clk_prepare_enable(wm8731->mclk);
break;
case SND_SOC_BIAS_PREPARE:
break;
@@ -509,6 +515,8 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
snd_soc_write(codec, WM8731_PWR, reg | 0x0040);
break;
case SND_SOC_BIAS_OFF:
+ if (wm8731->mclk)
+ clk_disable_unprepare(wm8731->mclk);
snd_soc_write(codec, WM8731_PWR, 0xffff);
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
wm8731->supplies);
@@ -667,6 +675,19 @@ static int wm8731_spi_probe(struct spi_device *spi)
if (wm8731 == NULL)
return -ENOMEM;
+ wm8731->mclk = devm_clk_get(&spi->dev, "mclk");
+ if (IS_ERR(wm8731->mclk)) {
+ ret = PTR_ERR(wm8731->mclk);
+ if (ret == -ENOENT) {
+ wm8731->mclk = NULL;
+ dev_warn(&spi->dev, "Assuming static MCLK\n");
+ } else {
+ dev_err(&spi->dev, "Failed to get MCLK: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
mutex_init(&wm8731->lock);
wm8731->regmap = devm_regmap_init_spi(spi, &wm8731_regmap);
@@ -718,6 +739,19 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
if (wm8731 == NULL)
return -ENOMEM;
+ wm8731->mclk = devm_clk_get(&i2c->dev, "mclk");
+ if (IS_ERR(wm8731->mclk)) {
+ ret = PTR_ERR(wm8731->mclk);
+ if (ret == -ENOENT) {
+ wm8731->mclk = NULL;
+ dev_warn(&i2c->dev, "Assuming static MCLK\n");
+ } else {
+ dev_err(&i2c->dev, "Failed to get MCLK: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
mutex_init(&wm8731->lock);
wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index 31bb4801a005..9e71c768966f 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -123,7 +123,7 @@ static struct {
};
static const unsigned int rates_11289[] = {
- 44100, 88235,
+ 44100, 88200,
};
static const struct snd_pcm_hw_constraint_list constraints_11289 = {
@@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = {
};
static const unsigned int rates_16934[] = {
- 44100, 88235,
+ 44100, 88200,
};
static const struct snd_pcm_hw_constraint_list constraints_16934 = {
@@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = {
};
static const unsigned int rates_22579[] = {
- 44100, 88235, 1764000
+ 44100, 88200, 176400
};
static const struct snd_pcm_hw_constraint_list constraints_22579 = {
@@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = {
};
static const unsigned int rates_36864[] = {
- 48000, 96000, 19200
+ 48000, 96000, 192000
};
static const struct snd_pcm_hw_constraint_list constraints_36864 = {
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 21ca3a94fc96..c50a5959345f 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -153,6 +153,7 @@ struct wm8753_priv {
unsigned int hifi_fmt;
int dai_func;
+ struct delayed_work charge_work;
};
#define wm8753_reset(c) snd_soc_write(c, WM8753_RESET, 0)
@@ -1326,9 +1327,19 @@ static int wm8753_mute(struct snd_soc_dai *dai, int mute)
return 0;
}
+static void wm8753_charge_work(struct work_struct *work)
+{
+ struct wm8753_priv *wm8753 =
+ container_of(work, struct wm8753_priv, charge_work.work);
+
+ /* Set to 500k */
+ regmap_update_bits(wm8753->regmap, WM8753_PWR1, 0x0180, 0x0100);
+}
+
static int wm8753_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
u16 pwr_reg = snd_soc_read(codec, WM8753_PWR1) & 0xfe3e;
switch (level) {
@@ -1337,14 +1348,22 @@ static int wm8753_set_bias_level(struct snd_soc_codec *codec,
snd_soc_write(codec, WM8753_PWR1, pwr_reg | 0x00c0);
break;
case SND_SOC_BIAS_PREPARE:
- /* set vmid to 5k for quick power up */
- snd_soc_write(codec, WM8753_PWR1, pwr_reg | 0x01c1);
+ /* Wait until fully charged */
+ flush_delayed_work(&wm8753->charge_work);
break;
case SND_SOC_BIAS_STANDBY:
- /* mute dac and set vmid to 500k, enable VREF */
- snd_soc_write(codec, WM8753_PWR1, pwr_reg | 0x0141);
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ /* set vmid to 5k for quick power up */
+ snd_soc_write(codec, WM8753_PWR1, pwr_reg | 0x01c1);
+ schedule_delayed_work(&wm8753->charge_work,
+ msecs_to_jiffies(caps_charge));
+ } else {
+ /* mute dac and set vmid to 500k, enable VREF */
+ snd_soc_write(codec, WM8753_PWR1, pwr_reg | 0x0141);
+ }
break;
case SND_SOC_BIAS_OFF:
+ cancel_delayed_work_sync(&wm8753->charge_work);
snd_soc_write(codec, WM8753_PWR1, 0x0001);
break;
}
@@ -1428,38 +1447,12 @@ static struct snd_soc_dai_driver wm8753_dai[] = {
},
};
-static void wm8753_work(struct work_struct *work)
-{
- struct snd_soc_dapm_context *dapm =
- container_of(work, struct snd_soc_dapm_context,
- delayed_work.work);
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
- wm8753_set_bias_level(codec, dapm->bias_level);
-}
-
-static int wm8753_suspend(struct snd_soc_codec *codec)
-{
- wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static int wm8753_resume(struct snd_soc_codec *codec)
{
struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
regcache_sync(wm8753->regmap);
- wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- /* charge wm8753 caps */
- if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
- wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
- codec->dapm.bias_level = SND_SOC_BIAS_ON;
- queue_delayed_work(system_power_efficient_wq,
- &codec->dapm.delayed_work,
- msecs_to_jiffies(caps_charge));
- }
-
return 0;
}
@@ -1468,7 +1461,7 @@ static int wm8753_probe(struct snd_soc_codec *codec)
struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
int ret;
- INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8753_work);
+ INIT_DELAYED_WORK(&wm8753->charge_work, wm8753_charge_work);
ret = wm8753_reset(codec);
if (ret < 0) {
@@ -1476,14 +1469,8 @@ static int wm8753_probe(struct snd_soc_codec *codec)
return ret;
}
- wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
wm8753->dai_func = 0;
- /* charge output caps */
- wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
- schedule_delayed_work(&codec->dapm.delayed_work,
- msecs_to_jiffies(caps_charge));
-
/* set the update bits */
snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
@@ -1499,21 +1486,11 @@ static int wm8753_probe(struct snd_soc_codec *codec)
return 0;
}
-/* power down chip */
-static int wm8753_remove(struct snd_soc_codec *codec)
-{
- flush_delayed_work(&codec->dapm.delayed_work);
- wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8753 = {
.probe = wm8753_probe,
- .remove = wm8753_remove,
- .suspend = wm8753_suspend,
.resume = wm8753_resume,
.set_bias_level = wm8753_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8753_snd_controls,
.num_controls = ARRAY_SIZE(wm8753_snd_controls),
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
new file mode 100644
index 000000000000..6596f5f3a0c3
--- /dev/null
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -0,0 +1,65 @@
+/*
+ * wm8804-i2c.c -- WM8804 S/PDIF transceiver driver - I2C
+ *
+ * Copyright 2015 Cirrus Logic Inc
+ *
+ * Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+
+#include "wm8804.h"
+
+static int wm8804_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &wm8804_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return wm8804_probe(&i2c->dev, regmap);
+}
+
+static int wm8804_i2c_remove(struct i2c_client *i2c)
+{
+ wm8804_remove(&i2c->dev);
+ return 0;
+}
+
+static const struct i2c_device_id wm8804_i2c_id[] = {
+ { "wm8804", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
+
+static const struct of_device_id wm8804_of_match[] = {
+ { .compatible = "wlf,wm8804", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wm8804_of_match);
+
+static struct i2c_driver wm8804_i2c_driver = {
+ .driver = {
+ .name = "wm8804",
+ .owner = THIS_MODULE,
+ .pm = &wm8804_pm,
+ .of_match_table = wm8804_of_match,
+ },
+ .probe = wm8804_i2c_probe,
+ .remove = wm8804_i2c_remove,
+ .id_table = wm8804_i2c_id
+};
+
+module_i2c_driver(wm8804_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC WM8804 driver - I2C");
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8804-spi.c b/sound/soc/codecs/wm8804-spi.c
new file mode 100644
index 000000000000..407a3cf391e5
--- /dev/null
+++ b/sound/soc/codecs/wm8804-spi.c
@@ -0,0 +1,57 @@
+/*
+ * wm8804-spi.c -- WM8804 S/PDIF transceiver driver - SPI
+ *
+ * Copyright 2015 Cirrus Logic Inc
+ *
+ * Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "wm8804.h"
+
+static int wm8804_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &wm8804_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return wm8804_probe(&spi->dev, regmap);
+}
+
+static int wm8804_spi_remove(struct spi_device *spi)
+{
+ wm8804_remove(&spi->dev);
+ return 0;
+}
+
+static const struct of_device_id wm8804_of_match[] = {
+ { .compatible = "wlf,wm8804", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wm8804_of_match);
+
+static struct spi_driver wm8804_spi_driver = {
+ .driver = {
+ .name = "wm8804",
+ .owner = THIS_MODULE,
+ .pm = &wm8804_pm,
+ .of_match_table = wm8804_of_match,
+ },
+ .probe = wm8804_spi_probe,
+ .remove = wm8804_spi_remove
+};
+
+module_spi_driver(wm8804_spi_driver);
+
+MODULE_DESCRIPTION("ASoC WM8804 driver - SPI");
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index b2b0e68f707e..1e403f67cf16 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -13,12 +13,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/pm.h>
-#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
#include <linux/of_device.h>
-#include <linux/spi/spi.h>
-#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -27,6 +26,7 @@
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
+#include <sound/soc-dapm.h>
#include "wm8804.h"
@@ -60,18 +60,23 @@ static const struct reg_default wm8804_reg_defaults[] = {
};
struct wm8804_priv {
+ struct device *dev;
struct regmap *regmap;
struct regulator_bulk_data supplies[WM8804_NUM_SUPPLIES];
struct notifier_block disable_nb[WM8804_NUM_SUPPLIES];
int mclk_div;
-};
-static int txsrc_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
+ struct gpio_desc *reset;
+
+ int aif_pwr;
+};
static int txsrc_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
+static int wm8804_aif_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event);
+
/*
* We can't use the same notifier block for more than one supply and
* there's no way I can see to get from a callback to the caller
@@ -93,26 +98,62 @@ WM8804_REGULATOR_EVENT(0)
WM8804_REGULATOR_EVENT(1)
static const char *txsrc_text[] = { "S/PDIF RX", "AIF" };
-static SOC_ENUM_SINGLE_EXT_DECL(txsrc, txsrc_text);
+static const SOC_ENUM_SINGLE_DECL(txsrc, WM8804_SPDTX4, 6, txsrc_text);
-static const struct snd_kcontrol_new wm8804_snd_controls[] = {
- SOC_ENUM_EXT("Input Source", txsrc, txsrc_get, txsrc_put),
- SOC_SINGLE("TX Playback Switch", WM8804_PWRDN, 2, 1, 1),
- SOC_SINGLE("AIF Playback Switch", WM8804_PWRDN, 4, 1, 1)
+static const struct snd_kcontrol_new wm8804_tx_source_mux[] = {
+ SOC_DAPM_ENUM_EXT("Input Source", txsrc,
+ snd_soc_dapm_get_enum_double, txsrc_put),
};
-static int txsrc_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec;
- unsigned int src;
+static const struct snd_soc_dapm_widget wm8804_dapm_widgets[] = {
+SND_SOC_DAPM_OUTPUT("SPDIF Out"),
+SND_SOC_DAPM_INPUT("SPDIF In"),
+
+SND_SOC_DAPM_PGA("SPDIFTX", WM8804_PWRDN, 2, 1, NULL, 0),
+SND_SOC_DAPM_PGA("SPDIFRX", WM8804_PWRDN, 1, 1, NULL, 0),
+
+SND_SOC_DAPM_MUX("Tx Source", SND_SOC_NOPM, 6, 0, wm8804_tx_source_mux),
+
+SND_SOC_DAPM_AIF_OUT_E("AIFTX", NULL, 0, SND_SOC_NOPM, 0, 0, wm8804_aif_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIFRX", NULL, 0, SND_SOC_NOPM, 0, 0, wm8804_aif_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route wm8804_dapm_routes[] = {
+ { "AIFRX", NULL, "Playback" },
+ { "Tx Source", "AIF", "AIFRX" },
- codec = snd_soc_kcontrol_codec(kcontrol);
- src = snd_soc_read(codec, WM8804_SPDTX4);
- if (src & 0x40)
- ucontrol->value.integer.value[0] = 1;
- else
- ucontrol->value.integer.value[0] = 0;
+ { "SPDIFRX", NULL, "SPDIF In" },
+ { "Tx Source", "S/PDIF RX", "SPDIFRX" },
+
+ { "SPDIFTX", NULL, "Tx Source" },
+ { "SPDIF Out", NULL, "SPDIFTX" },
+
+ { "AIFTX", NULL, "SPDIFRX" },
+ { "Capture", NULL, "AIFTX" },
+};
+
+static int wm8804_aif_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wm8804_priv *wm8804 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* power up the aif */
+ if (!wm8804->aif_pwr)
+ snd_soc_update_bits(codec, WM8804_PWRDN, 0x10, 0x0);
+ wm8804->aif_pwr++;
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* power down only both paths are disabled */
+ wm8804->aif_pwr--;
+ if (!wm8804->aif_pwr)
+ snd_soc_update_bits(codec, WM8804_PWRDN, 0x10, 0x10);
+ break;
+ }
return 0;
}
@@ -120,48 +161,33 @@ static int txsrc_get(struct snd_kcontrol *kcontrol,
static int txsrc_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec;
- unsigned int src, txpwr;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int val = ucontrol->value.enumerated.item[0] << e->shift_l;
+ unsigned int mask = 1 << e->shift_l;
+ unsigned int txpwr;
+
+ if (val != 0 && val != mask)
+ return -EINVAL;
- codec = snd_soc_kcontrol_codec(kcontrol);
+ snd_soc_dapm_mutex_lock(dapm);
- if (ucontrol->value.integer.value[0] != 0
- && ucontrol->value.integer.value[0] != 1)
- return -EINVAL;
+ if (snd_soc_test_bits(codec, e->reg, mask, val)) {
+ /* save the current power state of the transmitter */
+ txpwr = snd_soc_read(codec, WM8804_PWRDN) & 0x4;
- src = snd_soc_read(codec, WM8804_SPDTX4);
- switch ((src & 0x40) >> 6) {
- case 0:
- if (!ucontrol->value.integer.value[0])
- return 0;
- break;
- case 1:
- if (ucontrol->value.integer.value[1])
- return 0;
- break;
- }
+ /* power down the transmitter */
+ snd_soc_update_bits(codec, WM8804_PWRDN, 0x4, 0x4);
- /* save the current power state of the transmitter */
- txpwr = snd_soc_read(codec, WM8804_PWRDN) & 0x4;
- /* power down the transmitter */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x4, 0x4);
- /* set the tx source */
- snd_soc_update_bits(codec, WM8804_SPDTX4, 0x40,
- ucontrol->value.integer.value[0] << 6);
-
- if (ucontrol->value.integer.value[0]) {
- /* power down the receiver */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x2, 0x2);
- /* power up the AIF */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x10, 0);
- } else {
- /* don't power down the AIF -- may be used as an output */
- /* power up the receiver */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x2, 0);
+ /* set the tx source */
+ snd_soc_update_bits(codec, e->reg, mask, val);
+
+ /* restore the transmitter's configuration */
+ snd_soc_update_bits(codec, WM8804_PWRDN, 0x4, txpwr);
}
- /* restore the transmitter's configuration */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x4, txpwr);
+ snd_soc_dapm_mutex_unlock(dapm);
return 0;
}
@@ -185,9 +211,9 @@ static bool wm8804_volatile(struct device *dev, unsigned int reg)
}
}
-static int wm8804_reset(struct snd_soc_codec *codec)
+static int wm8804_soft_reset(struct wm8804_priv *wm8804)
{
- return snd_soc_write(codec, WM8804_RST_DEVID1, 0x0);
+ return regmap_write(wm8804->regmap, WM8804_RST_DEVID1, 0x0);
}
static int wm8804_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
@@ -379,19 +405,19 @@ static int wm8804_set_pll(struct snd_soc_dai *dai, int pll_id,
int source, unsigned int freq_in,
unsigned int freq_out)
{
- struct snd_soc_codec *codec;
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8804_priv *wm8804 = snd_soc_codec_get_drvdata(codec);
+ bool change;
- codec = dai->codec;
if (!freq_in || !freq_out) {
/* disable the PLL */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x1, 0x1);
- return 0;
+ regmap_update_bits_check(wm8804->regmap, WM8804_PWRDN,
+ 0x1, 0x1, &change);
+ if (change)
+ pm_runtime_put(wm8804->dev);
} else {
int ret;
struct pll_div pll_div;
- struct wm8804_priv *wm8804;
-
- wm8804 = snd_soc_codec_get_drvdata(codec);
ret = pll_factors(&pll_div, freq_out, freq_in,
wm8804->mclk_div);
@@ -399,7 +425,10 @@ static int wm8804_set_pll(struct snd_soc_dai *dai, int pll_id,
return ret;
/* power down the PLL before reprogramming it */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x1, 0x1);
+ regmap_update_bits_check(wm8804->regmap, WM8804_PWRDN,
+ 0x1, 0x1, &change);
+ if (!change)
+ pm_runtime_get_sync(wm8804->dev);
/* set PLLN and PRESCALE */
snd_soc_update_bits(codec, WM8804_PLL4, 0xf | 0x10,
@@ -477,141 +506,6 @@ static int wm8804_set_clkdiv(struct snd_soc_dai *dai,
return 0;
}
-static int wm8804_set_bias_level(struct snd_soc_codec *codec,
- enum snd_soc_bias_level level)
-{
- int ret;
- struct wm8804_priv *wm8804;
-
- wm8804 = snd_soc_codec_get_drvdata(codec);
- switch (level) {
- case SND_SOC_BIAS_ON:
- break;
- case SND_SOC_BIAS_PREPARE:
- /* power up the OSC and the PLL */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x9, 0);
- break;
- case SND_SOC_BIAS_STANDBY:
- if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
- ret = regulator_bulk_enable(ARRAY_SIZE(wm8804->supplies),
- wm8804->supplies);
- if (ret) {
- dev_err(codec->dev,
- "Failed to enable supplies: %d\n",
- ret);
- return ret;
- }
- regcache_sync(wm8804->regmap);
- }
- /* power down the OSC and the PLL */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x9, 0x9);
- break;
- case SND_SOC_BIAS_OFF:
- /* power down the OSC and the PLL */
- snd_soc_update_bits(codec, WM8804_PWRDN, 0x9, 0x9);
- regulator_bulk_disable(ARRAY_SIZE(wm8804->supplies),
- wm8804->supplies);
- break;
- }
-
- codec->dapm.bias_level = level;
- return 0;
-}
-
-static int wm8804_remove(struct snd_soc_codec *codec)
-{
- struct wm8804_priv *wm8804;
- int i;
-
- wm8804 = snd_soc_codec_get_drvdata(codec);
-
- for (i = 0; i < ARRAY_SIZE(wm8804->supplies); ++i)
- regulator_unregister_notifier(wm8804->supplies[i].consumer,
- &wm8804->disable_nb[i]);
- return 0;
-}
-
-static int wm8804_probe(struct snd_soc_codec *codec)
-{
- struct wm8804_priv *wm8804;
- int i, id1, id2, ret;
-
- wm8804 = snd_soc_codec_get_drvdata(codec);
-
- for (i = 0; i < ARRAY_SIZE(wm8804->supplies); i++)
- wm8804->supplies[i].supply = wm8804_supply_names[i];
-
- ret = devm_regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8804->supplies),
- wm8804->supplies);
- if (ret) {
- dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
- return ret;
- }
-
- wm8804->disable_nb[0].notifier_call = wm8804_regulator_event_0;
- wm8804->disable_nb[1].notifier_call = wm8804_regulator_event_1;
-
- /* This should really be moved into the regulator core */
- for (i = 0; i < ARRAY_SIZE(wm8804->supplies); i++) {
- ret = regulator_register_notifier(wm8804->supplies[i].consumer,
- &wm8804->disable_nb[i]);
- if (ret != 0) {
- dev_err(codec->dev,
- "Failed to register regulator notifier: %d\n",
- ret);
- }
- }
-
- ret = regulator_bulk_enable(ARRAY_SIZE(wm8804->supplies),
- wm8804->supplies);
- if (ret) {
- dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
- return ret;
- }
-
- id1 = snd_soc_read(codec, WM8804_RST_DEVID1);
- if (id1 < 0) {
- dev_err(codec->dev, "Failed to read device ID: %d\n", id1);
- ret = id1;
- goto err_reg_enable;
- }
-
- id2 = snd_soc_read(codec, WM8804_DEVID2);
- if (id2 < 0) {
- dev_err(codec->dev, "Failed to read device ID: %d\n", id2);
- ret = id2;
- goto err_reg_enable;
- }
-
- id2 = (id2 << 8) | id1;
-
- if (id2 != 0x8805) {
- dev_err(codec->dev, "Invalid device ID: %#x\n", id2);
- ret = -EINVAL;
- goto err_reg_enable;
- }
-
- ret = snd_soc_read(codec, WM8804_DEVREV);
- if (ret < 0) {
- dev_err(codec->dev, "Failed to read device revision: %d\n",
- ret);
- goto err_reg_enable;
- }
- dev_info(codec->dev, "revision %c\n", ret + 'A');
-
- ret = wm8804_reset(codec);
- if (ret < 0) {
- dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
- goto err_reg_enable;
- }
-
- return 0;
-
-err_reg_enable:
- regulator_bulk_disable(ARRAY_SIZE(wm8804->supplies), wm8804->supplies);
- return ret;
-}
-
static const struct snd_soc_dai_ops wm8804_dai_ops = {
.hw_params = wm8804_hw_params,
.set_fmt = wm8804_set_fmt,
@@ -649,22 +543,15 @@ static struct snd_soc_dai_driver wm8804_dai = {
};
static const struct snd_soc_codec_driver soc_codec_dev_wm8804 = {
- .probe = wm8804_probe,
- .remove = wm8804_remove,
- .set_bias_level = wm8804_set_bias_level,
.idle_bias_off = true,
- .controls = wm8804_snd_controls,
- .num_controls = ARRAY_SIZE(wm8804_snd_controls),
-};
-
-static const struct of_device_id wm8804_of_match[] = {
- { .compatible = "wlf,wm8804", },
- { }
+ .dapm_widgets = wm8804_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8804_dapm_widgets),
+ .dapm_routes = wm8804_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8804_dapm_routes),
};
-MODULE_DEVICE_TABLE(of, wm8804_of_match);
-static const struct regmap_config wm8804_regmap_config = {
+const struct regmap_config wm8804_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -675,128 +562,169 @@ static const struct regmap_config wm8804_regmap_config = {
.reg_defaults = wm8804_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8804_reg_defaults),
};
+EXPORT_SYMBOL_GPL(wm8804_regmap_config);
-#if defined(CONFIG_SPI_MASTER)
-static int wm8804_spi_probe(struct spi_device *spi)
+int wm8804_probe(struct device *dev, struct regmap *regmap)
{
struct wm8804_priv *wm8804;
- int ret;
+ unsigned int id1, id2;
+ int i, ret;
- wm8804 = devm_kzalloc(&spi->dev, sizeof *wm8804, GFP_KERNEL);
+ wm8804 = devm_kzalloc(dev, sizeof(*wm8804), GFP_KERNEL);
if (!wm8804)
return -ENOMEM;
- wm8804->regmap = devm_regmap_init_spi(spi, &wm8804_regmap_config);
- if (IS_ERR(wm8804->regmap)) {
- ret = PTR_ERR(wm8804->regmap);
+ dev_set_drvdata(dev, wm8804);
+
+ wm8804->dev = dev;
+ wm8804->regmap = regmap;
+
+ wm8804->reset = devm_gpiod_get_optional(dev, "wlf,reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(wm8804->reset)) {
+ ret = PTR_ERR(wm8804->reset);
+ dev_err(dev, "Failed to get reset line: %d\n", ret);
return ret;
}
- spi_set_drvdata(spi, wm8804);
+ for (i = 0; i < ARRAY_SIZE(wm8804->supplies); i++)
+ wm8804->supplies[i].supply = wm8804_supply_names[i];
- ret = snd_soc_register_codec(&spi->dev,
- &soc_codec_dev_wm8804, &wm8804_dai, 1);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(wm8804->supplies),
+ wm8804->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
- return ret;
-}
+ wm8804->disable_nb[0].notifier_call = wm8804_regulator_event_0;
+ wm8804->disable_nb[1].notifier_call = wm8804_regulator_event_1;
-static int wm8804_spi_remove(struct spi_device *spi)
-{
- snd_soc_unregister_codec(&spi->dev);
- return 0;
-}
+ /* This should really be moved into the regulator core */
+ for (i = 0; i < ARRAY_SIZE(wm8804->supplies); i++) {
+ struct regulator *regulator = wm8804->supplies[i].consumer;
-static struct spi_driver wm8804_spi_driver = {
- .driver = {
- .name = "wm8804",
- .owner = THIS_MODULE,
- .of_match_table = wm8804_of_match,
- },
- .probe = wm8804_spi_probe,
- .remove = wm8804_spi_remove
-};
-#endif
+ ret = devm_regulator_register_notifier(regulator,
+ &wm8804->disable_nb[i]);
+ if (ret != 0) {
+ dev_err(dev,
+ "Failed to register regulator notifier: %d\n",
+ ret);
+ return ret;
+ }
+ }
-#if IS_ENABLED(CONFIG_I2C)
-static int wm8804_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
-{
- struct wm8804_priv *wm8804;
- int ret;
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8804->supplies),
+ wm8804->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
- wm8804 = devm_kzalloc(&i2c->dev, sizeof *wm8804, GFP_KERNEL);
- if (!wm8804)
- return -ENOMEM;
+ if (wm8804->reset)
+ gpiod_set_value_cansleep(wm8804->reset, 1);
- wm8804->regmap = devm_regmap_init_i2c(i2c, &wm8804_regmap_config);
- if (IS_ERR(wm8804->regmap)) {
- ret = PTR_ERR(wm8804->regmap);
- return ret;
+ ret = regmap_read(regmap, WM8804_RST_DEVID1, &id1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read device ID: %d\n", ret);
+ goto err_reg_enable;
+ }
+
+ ret = regmap_read(regmap, WM8804_DEVID2, &id2);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read device ID: %d\n", ret);
+ goto err_reg_enable;
+ }
+
+ id2 = (id2 << 8) | id1;
+
+ if (id2 != 0x8805) {
+ dev_err(dev, "Invalid device ID: %#x\n", id2);
+ ret = -EINVAL;
+ goto err_reg_enable;
+ }
+
+ ret = regmap_read(regmap, WM8804_DEVREV, &id1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read device revision: %d\n",
+ ret);
+ goto err_reg_enable;
}
+ dev_info(dev, "revision %c\n", id1 + 'A');
- i2c_set_clientdata(i2c, wm8804);
+ if (!wm8804->reset) {
+ ret = wm8804_soft_reset(wm8804);
+ if (ret < 0) {
+ dev_err(dev, "Failed to issue reset: %d\n", ret);
+ goto err_reg_enable;
+ }
+ }
+
+ ret = snd_soc_register_codec(dev, &soc_codec_dev_wm8804,
+ &wm8804_dai, 1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register CODEC: %d\n", ret);
+ goto err_reg_enable;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
- ret = snd_soc_register_codec(&i2c->dev,
- &soc_codec_dev_wm8804, &wm8804_dai, 1);
+ return 0;
+
+err_reg_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8804->supplies), wm8804->supplies);
return ret;
}
+EXPORT_SYMBOL_GPL(wm8804_probe);
-static int wm8804_i2c_remove(struct i2c_client *i2c)
+void wm8804_remove(struct device *dev)
{
- snd_soc_unregister_codec(&i2c->dev);
- return 0;
+ pm_runtime_disable(dev);
+ snd_soc_unregister_codec(dev);
}
+EXPORT_SYMBOL_GPL(wm8804_remove);
-static const struct i2c_device_id wm8804_i2c_id[] = {
- { "wm8804", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
-
-static struct i2c_driver wm8804_i2c_driver = {
- .driver = {
- .name = "wm8804",
- .owner = THIS_MODULE,
- .of_match_table = wm8804_of_match,
- },
- .probe = wm8804_i2c_probe,
- .remove = wm8804_i2c_remove,
- .id_table = wm8804_i2c_id
-};
-#endif
-
-static int __init wm8804_modinit(void)
+#if IS_ENABLED(CONFIG_PM)
+static int wm8804_runtime_resume(struct device *dev)
{
- int ret = 0;
+ struct wm8804_priv *wm8804 = dev_get_drvdata(dev);
+ int ret;
-#if IS_ENABLED(CONFIG_I2C)
- ret = i2c_add_driver(&wm8804_i2c_driver);
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8804->supplies),
+ wm8804->supplies);
if (ret) {
- printk(KERN_ERR "Failed to register wm8804 I2C driver: %d\n",
- ret);
- }
-#endif
-#if defined(CONFIG_SPI_MASTER)
- ret = spi_register_driver(&wm8804_spi_driver);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register wm8804 SPI driver: %d\n",
- ret);
+ dev_err(wm8804->dev, "Failed to enable supplies: %d\n", ret);
+ return ret;
}
-#endif
- return ret;
+
+ regcache_sync(wm8804->regmap);
+
+ /* Power up OSCCLK */
+ regmap_update_bits(wm8804->regmap, WM8804_PWRDN, 0x8, 0x0);
+
+ return 0;
}
-module_init(wm8804_modinit);
-static void __exit wm8804_exit(void)
+static int wm8804_runtime_suspend(struct device *dev)
{
-#if IS_ENABLED(CONFIG_I2C)
- i2c_del_driver(&wm8804_i2c_driver);
-#endif
-#if defined(CONFIG_SPI_MASTER)
- spi_unregister_driver(&wm8804_spi_driver);
-#endif
+ struct wm8804_priv *wm8804 = dev_get_drvdata(dev);
+
+ /* Power down OSCCLK */
+ regmap_update_bits(wm8804->regmap, WM8804_PWRDN, 0x8, 0x8);
+
+ regulator_bulk_disable(ARRAY_SIZE(wm8804->supplies),
+ wm8804->supplies);
+
+ return 0;
}
-module_exit(wm8804_exit);
+#endif
+
+const struct dev_pm_ops wm8804_pm = {
+ SET_RUNTIME_PM_OPS(wm8804_runtime_suspend, wm8804_runtime_resume, NULL)
+};
+EXPORT_SYMBOL_GPL(wm8804_pm);
MODULE_DESCRIPTION("ASoC WM8804 driver");
MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
diff --git a/sound/soc/codecs/wm8804.h b/sound/soc/codecs/wm8804.h
index e72d4f4ba6b1..aa72fa66c932 100644
--- a/sound/soc/codecs/wm8804.h
+++ b/sound/soc/codecs/wm8804.h
@@ -13,6 +13,8 @@
#ifndef _WM8804_H
#define _WM8804_H
+#include <linux/regmap.h>
+
/*
* Register values.
*/
@@ -62,4 +64,10 @@
#define WM8804_MCLKDIV_256FS 0
#define WM8804_MCLKDIV_128FS 1
+extern const struct regmap_config wm8804_regmap_config;
+extern const struct dev_pm_ops wm8804_pm;
+
+int wm8804_probe(struct device *dev, struct regmap *regmap);
+void wm8804_remove(struct device *dev);
+
#endif /* _WM8804_H */
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 39ddb9b8834c..f9cbabdc6238 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -31,11 +31,11 @@
#define WM8971_REG_COUNT 43
-static struct workqueue_struct *wm8971_workq = NULL;
-
/* codec private data */
struct wm8971_priv {
unsigned int sysclk;
+ struct delayed_work charge_work;
+ struct regmap *regmap;
};
/*
@@ -552,9 +552,19 @@ static int wm8971_mute(struct snd_soc_dai *dai, int mute)
return 0;
}
+static void wm8971_charge_work(struct work_struct *work)
+{
+ struct wm8971_priv *wm8971 =
+ container_of(work, struct wm8971_priv, charge_work.work);
+
+ /* Set to 500k */
+ regmap_update_bits(wm8971->regmap, WM8971_PWR1, 0x0180, 0x0100);
+}
+
static int wm8971_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct wm8971_priv *wm8971 = snd_soc_codec_get_drvdata(codec);
u16 pwr_reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
switch (level) {
@@ -563,15 +573,24 @@ static int wm8971_set_bias_level(struct snd_soc_codec *codec,
snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x00c1);
break;
case SND_SOC_BIAS_PREPARE:
+ /* Wait until fully charged */
+ flush_delayed_work(&wm8971->charge_work);
break;
case SND_SOC_BIAS_STANDBY:
- if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
snd_soc_cache_sync(codec);
+ /* charge output caps - set vmid to 5k for quick power up */
+ snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x01c0);
+ queue_delayed_work(system_power_efficient_wq,
+ &wm8971->charge_work, msecs_to_jiffies(1000));
+ } else {
+ /* mute dac and set vmid to 500k, enable VREF */
+ snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x0140);
+ }
- /* mute dac and set vmid to 500k, enable VREF */
- snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x0140);
break;
case SND_SOC_BIAS_OFF:
+ cancel_delayed_work_sync(&wm8971->charge_work);
snd_soc_write(codec, WM8971_PWR1, 0x0001);
break;
}
@@ -610,58 +629,14 @@ static struct snd_soc_dai_driver wm8971_dai = {
.ops = &wm8971_dai_ops,
};
-static void wm8971_work(struct work_struct *work)
-{
- struct snd_soc_dapm_context *dapm =
- container_of(work, struct snd_soc_dapm_context,
- delayed_work.work);
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
- wm8971_set_bias_level(codec, codec->dapm.bias_level);
-}
-
-static int wm8971_suspend(struct snd_soc_codec *codec)
-{
- wm8971_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8971_resume(struct snd_soc_codec *codec)
-{
- u16 reg;
-
- wm8971_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- /* charge wm8971 caps */
- if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
- reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
- snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
- codec->dapm.bias_level = SND_SOC_BIAS_ON;
- queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work,
- msecs_to_jiffies(1000));
- }
-
- return 0;
-}
-
static int wm8971_probe(struct snd_soc_codec *codec)
{
- int ret = 0;
- u16 reg;
+ struct wm8971_priv *wm8971 = snd_soc_codec_get_drvdata(codec);
- INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8971_work);
- wm8971_workq = create_workqueue("wm8971");
- if (wm8971_workq == NULL)
- return -ENOMEM;
+ INIT_DELAYED_WORK(&wm8971->charge_work, wm8971_charge_work);
wm8971_reset(codec);
- /* charge output caps - set vmid to 5k for quick power up */
- reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
- snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
- codec->dapm.bias_level = SND_SOC_BIAS_STANDBY;
- queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work,
- msecs_to_jiffies(1000));
-
/* set the update bits */
snd_soc_update_bits(codec, WM8971_LDAC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8971_RDAC, 0x0100, 0x0100);
@@ -672,26 +647,13 @@ static int wm8971_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, WM8971_LINVOL, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8971_RINVOL, 0x0100, 0x0100);
- return ret;
-}
-
-
-/* power down chip */
-static int wm8971_remove(struct snd_soc_codec *codec)
-{
- wm8971_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- if (wm8971_workq)
- destroy_workqueue(wm8971_workq);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8971 = {
.probe = wm8971_probe,
- .remove = wm8971_remove,
- .suspend = wm8971_suspend,
- .resume = wm8971_resume,
.set_bias_level = wm8971_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8971_snd_controls,
.num_controls = ARRAY_SIZE(wm8971_snd_controls),
@@ -715,7 +677,6 @@ static int wm8971_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8971_priv *wm8971;
- struct regmap *regmap;
int ret;
wm8971 = devm_kzalloc(&i2c->dev, sizeof(struct wm8971_priv),
@@ -723,9 +684,9 @@ static int wm8971_i2c_probe(struct i2c_client *i2c,
if (wm8971 == NULL)
return -ENOMEM;
- regmap = devm_regmap_init_i2c(i2c, &wm8971_regmap);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ wm8971->regmap = devm_regmap_init_i2c(i2c, &wm8971_regmap);
+ if (IS_ERR(wm8971->regmap))
+ return PTR_ERR(wm8971->regmap);
i2c_set_clientdata(i2c, wm8971);
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index dc92d5e4e942..308748a022c5 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -2009,7 +2009,7 @@ static int wm8996_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *i2c = to_i2c_client(codec->dev);
struct _fll_div fll_div;
- unsigned long timeout;
+ unsigned long timeout, time_left;
int ret, reg, retry;
/* Any change? */
@@ -2110,13 +2110,15 @@ static int wm8996_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
if (i2c->irq)
timeout *= 10;
else
- timeout /= 2;
+ /* ensure timeout of atleast 1 jiffies */
+ timeout = timeout/2 ? : 1;
for (retry = 0; retry < 10; retry++) {
- ret = wait_for_completion_timeout(&wm8996->fll_lock,
- timeout);
- if (ret != 0) {
+ time_left = wait_for_completion_timeout(&wm8996->fll_lock,
+ timeout);
+ if (time_left != 0) {
WARN_ON(!i2c->irq);
+ ret = 1;
break;
}
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index ff67b334065b..d01c2095452f 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -420,10 +420,9 @@ static int wm_coeff_put(struct snd_kcontrol *kcontrol,
memcpy(ctl->cache, p, ctl->len);
- if (!ctl->enabled) {
- ctl->set = 1;
+ ctl->set = 1;
+ if (!ctl->enabled)
return 0;
- }
return wm_coeff_write_control(kcontrol, p, ctl->len);
}
@@ -1185,7 +1184,6 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
int ret, pos, blocks, type, offset, reg;
char *file;
struct wm_adsp_buf *buf;
- int tmp;
file = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (file == NULL)
@@ -1335,12 +1333,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
}
}
- tmp = le32_to_cpu(blk->len) % 4;
- if (tmp)
- pos += le32_to_cpu(blk->len) + (4 - tmp) + sizeof(*blk);
- else
- pos += le32_to_cpu(blk->len) + sizeof(*blk);
-
+ pos += (le32_to_cpu(blk->len) + sizeof(*blk) + 3) & ~0x03;
blocks++;
}
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
index 2b81ca418d2a..3736d9aabc56 100644
--- a/sound/soc/davinci/Kconfig
+++ b/sound/soc/davinci/Kconfig
@@ -1,14 +1,16 @@
config SND_DAVINCI_SOC
- tristate "SoC Audio for TI DAVINCI"
+ tristate
depends on ARCH_DAVINCI
+ select SND_EDMA_SOC
config SND_EDMA_SOC
- tristate "SoC Audio for Texas Instruments chips using eDMA (AM33XX/43XX)"
- depends on SOC_AM33XX || SOC_AM43XX
+ tristate "SoC Audio for Texas Instruments chips using eDMA"
+ depends on SOC_AM33XX || SOC_AM43XX || ARCH_DAVINCI
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y or M here if you want audio support for TI SoC which uses eDMA.
The following line of SoCs are supported by this platform driver:
+ - daVinci devices
- AM335x
- AM437x/AM438x
@@ -17,7 +19,7 @@ config SND_DAVINCI_SOC_I2S
config SND_DAVINCI_SOC_MCASP
tristate "Multichannel Audio Serial Port (McASP) support"
- depends on SND_DAVINCI_SOC || SND_OMAP_SOC || SND_EDMA_SOC
+ depends on SND_OMAP_SOC || SND_EDMA_SOC
help
Say Y or M here if you want to have support for McASP IP found in
various Texas Instruments SoCs like:
@@ -45,7 +47,7 @@ config SND_AM33XX_SOC_EVM
config SND_DAVINCI_SOC_EVM
tristate "SoC Audio support for DaVinci DM6446, DM355 or DM365 EVM"
- depends on SND_DAVINCI_SOC && I2C
+ depends on SND_EDMA_SOC && I2C
depends on MACH_DAVINCI_EVM || MACH_DAVINCI_DM355_EVM || MACH_DAVINCI_DM365_EVM
select SND_DAVINCI_SOC_GENERIC_EVM
help
@@ -73,7 +75,7 @@ endchoice
config SND_DM6467_SOC_EVM
tristate "SoC Audio support for DaVinci DM6467 EVM"
- depends on SND_DAVINCI_SOC && MACH_DAVINCI_DM6467_EVM && I2C
+ depends on SND_EDMA_SOC && MACH_DAVINCI_DM6467_EVM && I2C
select SND_DAVINCI_SOC_GENERIC_EVM
select SND_SOC_SPDIF
@@ -82,7 +84,7 @@ config SND_DM6467_SOC_EVM
config SND_DA830_SOC_EVM
tristate "SoC Audio support for DA830/OMAP-L137 EVM"
- depends on SND_DAVINCI_SOC && MACH_DAVINCI_DA830_EVM && I2C
+ depends on SND_EDMA_SOC && MACH_DAVINCI_DA830_EVM && I2C
select SND_DAVINCI_SOC_GENERIC_EVM
help
@@ -91,7 +93,7 @@ config SND_DA830_SOC_EVM
config SND_DA850_SOC_EVM
tristate "SoC Audio support for DA850/OMAP-L138 EVM"
- depends on SND_DAVINCI_SOC && MACH_DAVINCI_DA850_EVM && I2C
+ depends on SND_EDMA_SOC && MACH_DAVINCI_DA850_EVM && I2C
select SND_DAVINCI_SOC_GENERIC_EVM
help
Say Y if you want to add support for SoC audio on TI
diff --git a/sound/soc/davinci/Makefile b/sound/soc/davinci/Makefile
index 09bf2ba92d38..f883933c1a19 100644
--- a/sound/soc/davinci/Makefile
+++ b/sound/soc/davinci/Makefile
@@ -1,11 +1,9 @@
# DAVINCI Platform Support
-snd-soc-davinci-objs := davinci-pcm.o
snd-soc-edma-objs := edma-pcm.o
snd-soc-davinci-i2s-objs := davinci-i2s.o
snd-soc-davinci-mcasp-objs:= davinci-mcasp.o
snd-soc-davinci-vcif-objs:= davinci-vcif.o
-obj-$(CONFIG_SND_DAVINCI_SOC) += snd-soc-davinci.o
obj-$(CONFIG_SND_EDMA_SOC) += snd-soc-edma.o
obj-$(CONFIG_SND_DAVINCI_SOC_I2S) += snd-soc-davinci-i2s.o
obj-$(CONFIG_SND_DAVINCI_SOC_MCASP) += snd-soc-davinci-mcasp.o
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index b6bb5947a8a8..731fb0d86c6a 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -117,7 +117,6 @@ static const struct snd_soc_dapm_route audio_map[] = {
static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
- struct snd_soc_codec *codec = rtd->codec;
struct device_node *np = card->dev->of_node;
int ret;
@@ -136,9 +135,9 @@ static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd)
}
/* not connected */
- snd_soc_dapm_nc_pin(&codec->dapm, "MONO_LOUT");
- snd_soc_dapm_nc_pin(&codec->dapm, "HPLCOM");
- snd_soc_dapm_nc_pin(&codec->dapm, "HPRCOM");
+ snd_soc_dapm_nc_pin(&card->dapm, "MONO_LOUT");
+ snd_soc_dapm_nc_pin(&card->dapm, "HPLCOM");
+ snd_soc_dapm_nc_pin(&card->dapm, "HPRCOM");
return 0;
}
@@ -425,18 +424,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
return ret;
}
-static int davinci_evm_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
-
- return 0;
-}
-
static struct platform_driver davinci_evm_driver = {
.probe = davinci_evm_probe,
- .remove = davinci_evm_remove,
.driver = {
.name = "davinci_evm",
.pm = &snd_soc_pm_ops,
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index 15fb28fc8e1b..56cb4d95637d 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -23,8 +23,9 @@
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
+#include <sound/dmaengine_pcm.h>
-#include "davinci-pcm.h"
+#include "edma-pcm.h"
#include "davinci-i2s.h"
@@ -122,7 +123,8 @@ static const unsigned char double_fmt[SNDRV_PCM_FORMAT_S32_LE + 1] = {
struct davinci_mcbsp_dev {
struct device *dev;
- struct davinci_pcm_dma_params dma_params[2];
+ struct snd_dmaengine_dai_dma_data dma_data[2];
+ int dma_request[2];
void __iomem *base;
#define MOD_DSP_A 0
#define MOD_DSP_B 1
@@ -419,8 +421,6 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
- struct davinci_pcm_dma_params *dma_params =
- &dev->dma_params[substream->stream];
struct snd_interval *i = NULL;
int mcbsp_word_length, master;
unsigned int rcr, xcr, srgr, clk_div, freq, framesize;
@@ -532,8 +532,6 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
}
- dma_params->acnt = dma_params->data_type = data_type[fmt];
- dma_params->fifo_level = 0;
mcbsp_word_length = asp_word_length[fmt];
switch (master) {
@@ -600,15 +598,6 @@ static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
return ret;
}
-static int davinci_i2s_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
-
- snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
- return 0;
-}
-
static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -620,7 +609,6 @@ static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000
static const struct snd_soc_dai_ops davinci_i2s_dai_ops = {
- .startup = davinci_i2s_startup,
.shutdown = davinci_i2s_shutdown,
.prepare = davinci_i2s_prepare,
.trigger = davinci_i2s_trigger,
@@ -630,7 +618,18 @@ static const struct snd_soc_dai_ops davinci_i2s_dai_ops = {
};
+static int davinci_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+ struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+ dai->playback_dma_data = &dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
+ dai->capture_dma_data = &dev->dma_data[SNDRV_PCM_STREAM_CAPTURE];
+
+ return 0;
+}
+
static struct snd_soc_dai_driver davinci_i2s_dai = {
+ .probe = davinci_i2s_dai_probe,
.playback = {
.channels_min = 2,
.channels_max = 2,
@@ -651,11 +650,9 @@ static const struct snd_soc_component_driver davinci_i2s_component = {
static int davinci_i2s_probe(struct platform_device *pdev)
{
- struct snd_platform_data *pdata = pdev->dev.platform_data;
struct davinci_mcbsp_dev *dev;
struct resource *mem, *ioarea, *res;
- enum dma_event_q asp_chan_q = EVENTQ_0;
- enum dma_event_q ram_chan_q = EVENTQ_1;
+ int *dma;
int ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -676,22 +673,6 @@ static int davinci_i2s_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!dev)
return -ENOMEM;
- if (pdata) {
- dev->enable_channel_combine = pdata->enable_channel_combine;
- dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].sram_size =
- pdata->sram_size_playback;
- dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].sram_size =
- pdata->sram_size_capture;
- dev->clk_input_pin = pdata->clk_input_pin;
- dev->i2s_accurate_sck = pdata->i2s_accurate_sck;
- asp_chan_q = pdata->asp_chan_q;
- ram_chan_q = pdata->ram_chan_q;
- }
-
- dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].asp_chan_q = asp_chan_q;
- dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].ram_chan_q = ram_chan_q;
- dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].asp_chan_q = asp_chan_q;
- dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].ram_chan_q = ram_chan_q;
dev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
@@ -705,10 +686,10 @@ static int davinci_i2s_probe(struct platform_device *pdev)
goto err_release_clk;
}
- dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr =
+ dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr =
(dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG);
- dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr =
+ dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].addr =
(dma_addr_t)(mem->start + DAVINCI_MCBSP_DRR_REG);
/* first TX, then RX */
@@ -718,7 +699,9 @@ static int davinci_i2s_probe(struct platform_device *pdev)
ret = -ENXIO;
goto err_release_clk;
}
- dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel = res->start;
+ dma = &dev->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
+ *dma = res->start;
+ dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].filter_data = dma;
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!res) {
@@ -726,9 +709,11 @@ static int davinci_i2s_probe(struct platform_device *pdev)
ret = -ENXIO;
goto err_release_clk;
}
- dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start;
- dev->dev = &pdev->dev;
+ dma = &dev->dma_request[SNDRV_PCM_STREAM_CAPTURE];
+ *dma = res->start;
+ dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].filter_data = dma;
+ dev->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, dev);
ret = snd_soc_register_component(&pdev->dev, &davinci_i2s_component,
@@ -736,7 +721,7 @@ static int davinci_i2s_probe(struct platform_device *pdev)
if (ret != 0)
goto err_release_clk;
- ret = davinci_soc_platform_register(&pdev->dev);
+ ret = edma_pcm_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
goto err_unregister_component;
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index de3b155a5011..bb4b78eada58 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -26,6 +26,8 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/platform_data/davinci_asp.h>
+#include <linux/math64.h>
#include <sound/asoundef.h>
#include <sound/core.h>
@@ -36,7 +38,6 @@
#include <sound/dmaengine_pcm.h>
#include <sound/omap-pcm.h>
-#include "davinci-pcm.h"
#include "edma-pcm.h"
#include "davinci-mcasp.h"
@@ -62,10 +63,15 @@ struct davinci_mcasp_context {
u32 config_regs[ARRAY_SIZE(context_regs)];
u32 afifo_regs[2]; /* for read/write fifo control registers */
u32 *xrsr_regs; /* for serializer configuration */
+ bool pm_state;
+};
+
+struct davinci_mcasp_ruledata {
+ struct davinci_mcasp *mcasp;
+ int serializers;
};
struct davinci_mcasp {
- struct davinci_pcm_dma_params dma_params[2];
struct snd_dmaengine_dai_dma_data dma_data[2];
void __iomem *base;
u32 fifo_base;
@@ -82,6 +88,7 @@ struct davinci_mcasp {
u16 bclk_lrclk_ratio;
int streams;
u32 irq_request[2];
+ int dma_request[2];
int sysclk_freq;
bool bclk_master;
@@ -98,6 +105,8 @@ struct davinci_mcasp {
#ifdef CONFIG_PM_SLEEP
struct davinci_mcasp_context context;
#endif
+
+ struct davinci_mcasp_ruledata ruledata[2];
};
static inline void mcasp_set_bits(struct davinci_mcasp *mcasp, u32 offset,
@@ -441,6 +450,18 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, AFSX | AFSR);
mcasp->bclk_master = 1;
break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ /* codec is clock slave and frame master */
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
+
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
+
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, ACLKX | ACLKR);
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, AFSX | AFSR);
+ mcasp->bclk_master = 1;
+ break;
case SND_SOC_DAIFMT_CBM_CFS:
/* codec is clock master and frame slave */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
@@ -507,7 +528,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
}
out:
- pm_runtime_put_sync(mcasp->dev);
+ pm_runtime_put(mcasp->dev);
return ret;
}
@@ -516,6 +537,7 @@ static int __davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id,
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
+ pm_runtime_get_sync(mcasp->dev);
switch (div_id) {
case 0: /* MCLK divider */
mcasp_mod_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG,
@@ -541,6 +563,7 @@ static int __davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id,
return -EINVAL;
}
+ pm_runtime_put(mcasp->dev);
return 0;
}
@@ -555,6 +578,7 @@ static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id,
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
+ pm_runtime_get_sync(mcasp->dev);
if (dir == SND_SOC_CLOCK_OUT) {
mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, AHCLKRE);
@@ -567,6 +591,7 @@ static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id,
mcasp->sysclk_freq = freq;
+ pm_runtime_put(mcasp->dev);
return 0;
}
@@ -631,7 +656,6 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
int period_words, int channels)
{
- struct davinci_pcm_dma_params *dma_params = &mcasp->dma_params[stream];
struct snd_dmaengine_dai_dma_data *dma_data = &mcasp->dma_data[stream];
int i;
u8 tx_ser = 0;
@@ -699,10 +723,8 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
* For example if three serializers are enabled the DMA
* need to transfer three words per DMA request.
*/
- dma_params->fifo_level = active_serializers;
dma_data->maxburst = active_serializers;
} else {
- dma_params->fifo_level = 0;
dma_data->maxburst = 0;
}
return 0;
@@ -734,7 +756,6 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
/* Configure the burst size for platform drivers */
if (numevt == 1)
numevt = 0;
- dma_params->fifo_level = numevt;
dma_data->maxburst = numevt;
return 0;
@@ -855,13 +876,35 @@ static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp,
return 0;
}
+static int davinci_mcasp_calc_clk_div(struct davinci_mcasp *mcasp,
+ unsigned int bclk_freq,
+ int *error_ppm)
+{
+ int div = mcasp->sysclk_freq / bclk_freq;
+ int rem = mcasp->sysclk_freq % bclk_freq;
+
+ if (rem != 0) {
+ if (div == 0 ||
+ ((mcasp->sysclk_freq / div) - bclk_freq) >
+ (bclk_freq - (mcasp->sysclk_freq / (div+1)))) {
+ div++;
+ rem = rem - bclk_freq;
+ }
+ }
+ if (error_ppm)
+ *error_ppm =
+ (div*1000000 + (int)div64_long(1000000LL*rem,
+ (int)bclk_freq))
+ /div - 1000000;
+
+ return div;
+}
+
static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
- struct davinci_pcm_dma_params *dma_params =
- &mcasp->dma_params[substream->stream];
int word_length;
int channels = params_channels(params);
int period_size = params_period_size(params);
@@ -872,16 +915,20 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
* the machine driver, we need to calculate the ratio.
*/
if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
- unsigned int bclk_freq = snd_soc_params_to_bclk(params);
- unsigned int div = mcasp->sysclk_freq / bclk_freq;
- if (mcasp->sysclk_freq % bclk_freq != 0) {
- if (((mcasp->sysclk_freq / div) - bclk_freq) >
- (bclk_freq - (mcasp->sysclk_freq / (div+1))))
- div++;
- dev_warn(mcasp->dev,
- "Inaccurate BCLK: %u Hz / %u != %u Hz\n",
- mcasp->sysclk_freq, div, bclk_freq);
- }
+ int channels = params_channels(params);
+ int rate = params_rate(params);
+ int sbits = params_width(params);
+ int ppm, div;
+
+ if (channels > mcasp->tdm_slots)
+ channels = mcasp->tdm_slots;
+
+ div = davinci_mcasp_calc_clk_div(mcasp, rate*sbits*channels,
+ &ppm);
+ if (ppm)
+ dev_info(mcasp->dev, "Sample-rate is off by %d PPM\n",
+ ppm);
+
__davinci_mcasp_set_clkdiv(cpu_dai, 1, div, 0);
}
@@ -902,31 +949,26 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U8:
case SNDRV_PCM_FORMAT_S8:
- dma_params->data_type = 1;
word_length = 8;
break;
case SNDRV_PCM_FORMAT_U16_LE:
case SNDRV_PCM_FORMAT_S16_LE:
- dma_params->data_type = 2;
word_length = 16;
break;
case SNDRV_PCM_FORMAT_U24_3LE:
case SNDRV_PCM_FORMAT_S24_3LE:
- dma_params->data_type = 3;
word_length = 24;
break;
case SNDRV_PCM_FORMAT_U24_LE:
case SNDRV_PCM_FORMAT_S24_LE:
- dma_params->data_type = 4;
word_length = 24;
break;
case SNDRV_PCM_FORMAT_U32_LE:
case SNDRV_PCM_FORMAT_S32_LE:
- dma_params->data_type = 4;
word_length = 32;
break;
@@ -935,11 +977,6 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- if (mcasp->version == MCASP_VERSION_2 && !dma_params->fifo_level)
- dma_params->acnt = 4;
- else
- dma_params->acnt = dma_params->data_type;
-
davinci_config_channel_size(mcasp, word_length);
if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE)
@@ -973,10 +1010,126 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
return ret;
}
+static const unsigned int davinci_mcasp_dai_rates[] = {
+ 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
+ 88200, 96000, 176400, 192000,
+};
+
+#define DAVINCI_MAX_RATE_ERROR_PPM 1000
+
+static int davinci_mcasp_hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct davinci_mcasp_ruledata *rd = rule->private;
+ struct snd_interval *ri =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ int sbits = params_width(params);
+ int channels = params_channels(params);
+ unsigned int list[ARRAY_SIZE(davinci_mcasp_dai_rates)];
+ int i, count = 0;
+
+ if (channels > rd->mcasp->tdm_slots)
+ channels = rd->mcasp->tdm_slots;
+
+ for (i = 0; i < ARRAY_SIZE(davinci_mcasp_dai_rates); i++) {
+ if (ri->min <= davinci_mcasp_dai_rates[i] &&
+ ri->max >= davinci_mcasp_dai_rates[i]) {
+ uint bclk_freq = sbits*channels*
+ davinci_mcasp_dai_rates[i];
+ int ppm;
+
+ davinci_mcasp_calc_clk_div(rd->mcasp, bclk_freq, &ppm);
+ if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM)
+ list[count++] = davinci_mcasp_dai_rates[i];
+ }
+ }
+ dev_dbg(rd->mcasp->dev,
+ "%d frequencies (%d-%d) for %d sbits and %d channels\n",
+ count, ri->min, ri->max, sbits, channels);
+
+ return snd_interval_list(hw_param_interval(params, rule->var),
+ count, list, 0);
+}
+
+static int davinci_mcasp_hw_rule_format(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct davinci_mcasp_ruledata *rd = rule->private;
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_mask nfmt;
+ int rate = params_rate(params);
+ int channels = params_channels(params);
+ int i, count = 0;
+
+ snd_mask_none(&nfmt);
+
+ if (channels > rd->mcasp->tdm_slots)
+ channels = rd->mcasp->tdm_slots;
+
+ for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
+ if (snd_mask_test(fmt, i)) {
+ uint bclk_freq = snd_pcm_format_width(i)*channels*rate;
+ int ppm;
+
+ davinci_mcasp_calc_clk_div(rd->mcasp, bclk_freq, &ppm);
+ if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM) {
+ snd_mask_set(&nfmt, i);
+ count++;
+ }
+ }
+ }
+ dev_dbg(rd->mcasp->dev,
+ "%d possible sample format for %d Hz and %d channels\n",
+ count, rate, channels);
+
+ return snd_mask_refine(fmt, &nfmt);
+}
+
+static int davinci_mcasp_hw_rule_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct davinci_mcasp_ruledata *rd = rule->private;
+ struct snd_interval *ci =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ int sbits = params_width(params);
+ int rate = params_rate(params);
+ int max_chan_per_wire = rd->mcasp->tdm_slots < ci->max ?
+ rd->mcasp->tdm_slots : ci->max;
+ unsigned int list[ci->max - ci->min + 1];
+ int c1, c, count = 0;
+
+ for (c1 = ci->min; c1 <= max_chan_per_wire; c1++) {
+ uint bclk_freq = c1*sbits*rate;
+ int ppm;
+
+ davinci_mcasp_calc_clk_div(rd->mcasp, bclk_freq, &ppm);
+ if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM) {
+ /* If we can use all tdm_slots, we can put any
+ amount of channels to remaining wires as
+ long as they fit in. */
+ if (c1 == rd->mcasp->tdm_slots) {
+ for (c = c1; c <= rd->serializers*c1 &&
+ c <= ci->max; c++)
+ list[count++] = c;
+ } else {
+ list[count++] = c1;
+ }
+ }
+ }
+ dev_dbg(rd->mcasp->dev,
+ "%d possible channel counts (%d-%d) for %d Hz and %d sbits\n",
+ count, ci->min, ci->max, rate, sbits);
+
+ return snd_interval_list(hw_param_interval(params, rule->var),
+ count, list, 0);
+}
+
static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
+ struct davinci_mcasp_ruledata *ruledata =
+ &mcasp->ruledata[substream->stream];
u32 max_channels = 0;
int i, dir;
@@ -998,6 +1151,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
if (mcasp->serial_dir[i] == dir)
max_channels++;
}
+ ruledata->serializers = max_channels;
max_channels *= mcasp->tdm_slots;
/*
* If the already active stream has less channels than the calculated
@@ -1012,6 +1166,42 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
snd_pcm_hw_constraint_minmax(substream->runtime,
SNDRV_PCM_HW_PARAM_CHANNELS,
2, max_channels);
+
+ /*
+ * If we rely on implicit BCLK divider setting we should
+ * set constraints based on what we can provide.
+ */
+ if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
+ int ret;
+
+ ruledata->mcasp = mcasp;
+
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ davinci_mcasp_hw_rule_rate,
+ ruledata,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (ret)
+ return ret;
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ davinci_mcasp_hw_rule_format,
+ ruledata,
+ SNDRV_PCM_HW_PARAM_RATE,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (ret)
+ return ret;
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ davinci_mcasp_hw_rule_channels,
+ ruledata,
+ SNDRV_PCM_HW_PARAM_RATE,
+ SNDRV_PCM_HW_PARAM_FORMAT, -1);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1043,17 +1233,8 @@ static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
- if (mcasp->version >= MCASP_VERSION_3) {
- /* Using dmaengine PCM */
- dai->playback_dma_data =
- &mcasp->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
- dai->capture_dma_data =
- &mcasp->dma_data[SNDRV_PCM_STREAM_CAPTURE];
- } else {
- /* Using davinci-pcm */
- dai->playback_dma_data = mcasp->dma_params;
- dai->capture_dma_data = mcasp->dma_params;
- }
+ dai->playback_dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
+ dai->capture_dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_CAPTURE];
return 0;
}
@@ -1066,6 +1247,10 @@ static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
u32 reg;
int i;
+ context->pm_state = pm_runtime_enabled(mcasp->dev);
+ if (!context->pm_state)
+ pm_runtime_get_sync(mcasp->dev);
+
for (i = 0; i < ARRAY_SIZE(context_regs); i++)
context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
@@ -1082,6 +1267,8 @@ static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
context->xrsr_regs[i] = mcasp_get_reg(mcasp,
DAVINCI_MCASP_XRSRCTL_REG(i));
+ pm_runtime_put_sync(mcasp->dev);
+
return 0;
}
@@ -1092,6 +1279,8 @@ static int davinci_mcasp_resume(struct snd_soc_dai *dai)
u32 reg;
int i;
+ pm_runtime_get_sync(mcasp->dev);
+
for (i = 0; i < ARRAY_SIZE(context_regs); i++)
mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
@@ -1108,6 +1297,9 @@ static int davinci_mcasp_resume(struct snd_soc_dai *dai)
mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
context->xrsr_regs[i]);
+ if (!context->pm_state)
+ pm_runtime_put_sync(mcasp->dev);
+
return 0;
}
#else
@@ -1172,28 +1364,24 @@ static const struct snd_soc_component_driver davinci_mcasp_component = {
static struct davinci_mcasp_pdata dm646x_mcasp_pdata = {
.tx_dma_offset = 0x400,
.rx_dma_offset = 0x400,
- .asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_1,
};
static struct davinci_mcasp_pdata da830_mcasp_pdata = {
.tx_dma_offset = 0x2000,
.rx_dma_offset = 0x2000,
- .asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_2,
};
static struct davinci_mcasp_pdata am33xx_mcasp_pdata = {
.tx_dma_offset = 0,
.rx_dma_offset = 0,
- .asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_3,
};
static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
.tx_dma_offset = 0x200,
.rx_dma_offset = 0x284,
- .asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_4,
};
@@ -1370,12 +1558,12 @@ nodata:
static int davinci_mcasp_probe(struct platform_device *pdev)
{
- struct davinci_pcm_dma_params *dma_params;
struct snd_dmaengine_dai_dma_data *dma_data;
struct resource *mem, *ioarea, *res, *dat;
struct davinci_mcasp_pdata *pdata;
struct davinci_mcasp *mcasp;
char *irq_name;
+ int *dma;
int irq;
int ret;
@@ -1415,13 +1603,6 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (IS_ERR_VALUE(ret)) {
- dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
- pm_runtime_disable(&pdev->dev);
- return ret;
- }
-
mcasp->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
if (!mcasp->base) {
dev_err(&pdev->dev, "ioremap failed\n");
@@ -1509,59 +1690,45 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
if (dat)
mcasp->dat_port = true;
- dma_params = &mcasp->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
- dma_params->asp_chan_q = pdata->asp_chan_q;
- dma_params->ram_chan_q = pdata->ram_chan_q;
- dma_params->sram_pool = pdata->sram_pool;
- dma_params->sram_size = pdata->sram_size_playback;
if (dat)
- dma_params->dma_addr = dat->start;
+ dma_data->addr = dat->start;
else
- dma_params->dma_addr = mem->start + pdata->tx_dma_offset;
-
- /* Unconditional dmaengine stuff */
- dma_data->addr = dma_params->dma_addr;
+ dma_data->addr = mem->start + pdata->tx_dma_offset;
+ dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res)
- dma_params->channel = res->start;
+ *dma = res->start;
else
- dma_params->channel = pdata->tx_dma_channel;
+ *dma = pdata->tx_dma_channel;
/* dmaengine filter data for DT and non-DT boot */
if (pdev->dev.of_node)
dma_data->filter_data = "tx";
else
- dma_data->filter_data = &dma_params->channel;
+ dma_data->filter_data = dma;
/* RX is not valid in DIT mode */
if (mcasp->op_mode != DAVINCI_MCASP_DIT_MODE) {
- dma_params = &mcasp->dma_params[SNDRV_PCM_STREAM_CAPTURE];
dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_CAPTURE];
- dma_params->asp_chan_q = pdata->asp_chan_q;
- dma_params->ram_chan_q = pdata->ram_chan_q;
- dma_params->sram_pool = pdata->sram_pool;
- dma_params->sram_size = pdata->sram_size_capture;
if (dat)
- dma_params->dma_addr = dat->start;
+ dma_data->addr = dat->start;
else
- dma_params->dma_addr = mem->start + pdata->rx_dma_offset;
-
- /* Unconditional dmaengine stuff */
- dma_data->addr = dma_params->dma_addr;
+ dma_data->addr = mem->start + pdata->rx_dma_offset;
+ dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE];
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (res)
- dma_params->channel = res->start;
+ *dma = res->start;
else
- dma_params->channel = pdata->rx_dma_channel;
+ *dma = pdata->rx_dma_channel;
/* dmaengine filter data for DT and non-DT boot */
if (pdev->dev.of_node)
dma_data->filter_data = "rx";
else
- dma_data->filter_data = &dma_params->channel;
+ dma_data->filter_data = dma;
}
if (mcasp->version < MCASP_VERSION_3) {
@@ -1584,17 +1751,11 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
goto err;
switch (mcasp->version) {
-#if IS_BUILTIN(CONFIG_SND_DAVINCI_SOC) || \
- (IS_MODULE(CONFIG_SND_DAVINCI_SOC_MCASP) && \
- IS_MODULE(CONFIG_SND_DAVINCI_SOC))
- case MCASP_VERSION_1:
- case MCASP_VERSION_2:
- ret = davinci_soc_platform_register(&pdev->dev);
- break;
-#endif
#if IS_BUILTIN(CONFIG_SND_EDMA_SOC) || \
(IS_MODULE(CONFIG_SND_DAVINCI_SOC_MCASP) && \
IS_MODULE(CONFIG_SND_EDMA_SOC))
+ case MCASP_VERSION_1:
+ case MCASP_VERSION_2:
case MCASP_VERSION_3:
ret = edma_pcm_platform_register(&pdev->dev);
break;
@@ -1621,14 +1782,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
return 0;
err:
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
static int davinci_mcasp_remove(struct platform_device *pdev)
{
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
deleted file mode 100644
index 7809e9d935fc..000000000000
--- a/sound/soc/davinci/davinci-pcm.c
+++ /dev/null
@@ -1,861 +0,0 @@
-/*
- * ALSA PCM interface for the TI DAVINCI processor
- *
- * Author: Vladimir Barinov, <vbarinov@embeddedalley.com>
- * Copyright: (C) 2007 MontaVista Software, Inc., <source@mvista.com>
- * added SRAM ping/pong (C) 2008 Troy Kisky <troy.kisky@boundarydevices.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/genalloc.h>
-#include <linux/platform_data/edma.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <asm/dma.h>
-
-#include "davinci-pcm.h"
-
-#ifdef DEBUG
-static void print_buf_info(int slot, char *name)
-{
- struct edmacc_param p;
- if (slot < 0)
- return;
- edma_read_slot(slot, &p);
- printk(KERN_DEBUG "%s: 0x%x, opt=%x, src=%x, a_b_cnt=%x dst=%x\n",
- name, slot, p.opt, p.src, p.a_b_cnt, p.dst);
- printk(KERN_DEBUG " src_dst_bidx=%x link_bcntrld=%x src_dst_cidx=%x ccnt=%x\n",
- p.src_dst_bidx, p.link_bcntrld, p.src_dst_cidx, p.ccnt);
-}
-#else
-static void print_buf_info(int slot, char *name)
-{
-}
-#endif
-
-static struct snd_pcm_hardware pcm_hardware_playback = {
- .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME|
- SNDRV_PCM_INFO_BATCH),
- .buffer_bytes_max = 128 * 1024,
- .period_bytes_min = 32,
- .period_bytes_max = 8 * 1024,
- .periods_min = 16,
- .periods_max = 255,
- .fifo_size = 0,
-};
-
-static struct snd_pcm_hardware pcm_hardware_capture = {
- .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_BATCH),
- .buffer_bytes_max = 128 * 1024,
- .period_bytes_min = 32,
- .period_bytes_max = 8 * 1024,
- .periods_min = 16,
- .periods_max = 255,
- .fifo_size = 0,
-};
-
-/*
- * How ping/pong works....
- *
- * Playback:
- * ram_params - copys 2*ping_size from start of SDRAM to iram,
- * links to ram_link2
- * ram_link2 - copys rest of SDRAM to iram in ping_size units,
- * links to ram_link
- * ram_link - copys entire SDRAM to iram in ping_size uints,
- * links to self
- *
- * asp_params - same as asp_link[0]
- * asp_link[0] - copys from lower half of iram to asp port
- * links to asp_link[1], triggers iram copy event on completion
- * asp_link[1] - copys from upper half of iram to asp port
- * links to asp_link[0], triggers iram copy event on completion
- * triggers interrupt only needed to let upper SOC levels update position
- * in stream on completion
- *
- * When playback is started:
- * ram_params started
- * asp_params started
- *
- * Capture:
- * ram_params - same as ram_link,
- * links to ram_link
- * ram_link - same as playback
- * links to self
- *
- * asp_params - same as playback
- * asp_link[0] - same as playback
- * asp_link[1] - same as playback
- *
- * When capture is started:
- * asp_params started
- */
-struct davinci_runtime_data {
- spinlock_t lock;
- int period; /* current DMA period */
- int asp_channel; /* Master DMA channel */
- int asp_link[2]; /* asp parameter link channel, ping/pong */
- struct davinci_pcm_dma_params *params; /* DMA params */
- int ram_channel;
- int ram_link;
- int ram_link2;
- struct edmacc_param asp_params;
- struct edmacc_param ram_params;
-};
-
-static void davinci_pcm_period_elapsed(struct snd_pcm_substream *substream)
-{
- struct davinci_runtime_data *prtd = substream->runtime->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- prtd->period++;
- if (unlikely(prtd->period >= runtime->periods))
- prtd->period = 0;
-}
-
-static void davinci_pcm_period_reset(struct snd_pcm_substream *substream)
-{
- struct davinci_runtime_data *prtd = substream->runtime->private_data;
-
- prtd->period = 0;
-}
-/*
- * Not used with ping/pong
- */
-static void davinci_pcm_enqueue_dma(struct snd_pcm_substream *substream)
-{
- struct davinci_runtime_data *prtd = substream->runtime->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned int period_size;
- unsigned int dma_offset;
- dma_addr_t dma_pos;
- dma_addr_t src, dst;
- unsigned short src_bidx, dst_bidx;
- unsigned short src_cidx, dst_cidx;
- unsigned int data_type;
- unsigned short acnt;
- unsigned int count;
- unsigned int fifo_level;
-
- period_size = snd_pcm_lib_period_bytes(substream);
- dma_offset = prtd->period * period_size;
- dma_pos = runtime->dma_addr + dma_offset;
- fifo_level = prtd->params->fifo_level;
-
- pr_debug("davinci_pcm: audio_set_dma_params_play channel = %d "
- "dma_ptr = %x period_size=%x\n", prtd->asp_link[0], dma_pos,
- period_size);
-
- data_type = prtd->params->data_type;
- count = period_size / data_type;
- if (fifo_level)
- count /= fifo_level;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- src = dma_pos;
- dst = prtd->params->dma_addr;
- src_bidx = data_type;
- dst_bidx = 4;
- src_cidx = data_type * fifo_level;
- dst_cidx = 0;
- } else {
- src = prtd->params->dma_addr;
- dst = dma_pos;
- src_bidx = 0;
- dst_bidx = data_type;
- src_cidx = 0;
- dst_cidx = data_type * fifo_level;
- }
-
- acnt = prtd->params->acnt;
- edma_set_src(prtd->asp_link[0], src, INCR, W8BIT);
- edma_set_dest(prtd->asp_link[0], dst, INCR, W8BIT);
-
- edma_set_src_index(prtd->asp_link[0], src_bidx, src_cidx);
- edma_set_dest_index(prtd->asp_link[0], dst_bidx, dst_cidx);
-
- if (!fifo_level)
- edma_set_transfer_params(prtd->asp_link[0], acnt, count, 1, 0,
- ASYNC);
- else
- edma_set_transfer_params(prtd->asp_link[0], acnt,
- fifo_level,
- count, fifo_level,
- ABSYNC);
-}
-
-static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data)
-{
- struct snd_pcm_substream *substream = data;
- struct davinci_runtime_data *prtd = substream->runtime->private_data;
-
- print_buf_info(prtd->ram_channel, "i ram_channel");
- pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status);
-
- if (unlikely(ch_status != EDMA_DMA_COMPLETE))
- return;
-
- if (snd_pcm_running(substream)) {
- spin_lock(&prtd->lock);
- if (prtd->ram_channel < 0) {
- /* No ping/pong must fix up link dma data*/
- davinci_pcm_enqueue_dma(substream);
- }
- davinci_pcm_period_elapsed(substream);
- spin_unlock(&prtd->lock);
- snd_pcm_period_elapsed(substream);
- }
-}
-
-#ifdef CONFIG_GENERIC_ALLOCATOR
-static int allocate_sram(struct snd_pcm_substream *substream,
- struct gen_pool *sram_pool, unsigned size,
- struct snd_pcm_hardware *ppcm)
-{
- struct snd_dma_buffer *buf = &substream->dma_buffer;
- struct snd_dma_buffer *iram_dma = NULL;
- dma_addr_t iram_phys = 0;
- void *iram_virt = NULL;
-
- if (buf->private_data || !size)
- return 0;
-
- ppcm->period_bytes_max = size;
- iram_virt = gen_pool_dma_alloc(sram_pool, size, &iram_phys);
- if (!iram_virt)
- goto exit1;
- iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL);
- if (!iram_dma)
- goto exit2;
- iram_dma->area = iram_virt;
- iram_dma->addr = iram_phys;
- memset(iram_dma->area, 0, size);
- iram_dma->bytes = size;
- buf->private_data = iram_dma;
- return 0;
-exit2:
- if (iram_virt)
- gen_pool_free(sram_pool, (unsigned)iram_virt, size);
-exit1:
- return -ENOMEM;
-}
-
-static void davinci_free_sram(struct snd_pcm_substream *substream,
- struct snd_dma_buffer *iram_dma)
-{
- struct davinci_runtime_data *prtd = substream->runtime->private_data;
- struct gen_pool *sram_pool = prtd->params->sram_pool;
-
- gen_pool_free(sram_pool, (unsigned) iram_dma->area, iram_dma->bytes);
-}
-#else
-static int allocate_sram(struct snd_pcm_substream *substream,
- struct gen_pool *sram_pool, unsigned size,
- struct snd_pcm_hardware *ppcm)
-{
- return 0;
-}
-
-static void davinci_free_sram(struct snd_pcm_substream *substream,
- struct snd_dma_buffer *iram_dma)
-{
-}
-#endif
-
-/*
- * Only used with ping/pong.
- * This is called after runtime->dma_addr, period_bytes and data_type are valid
- */
-static int ping_pong_dma_setup(struct snd_pcm_substream *substream)
-{
- unsigned short ram_src_cidx, ram_dst_cidx;
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct davinci_runtime_data *prtd = runtime->private_data;
- struct snd_dma_buffer *iram_dma =
- (struct snd_dma_buffer *)substream->dma_buffer.private_data;
- struct davinci_pcm_dma_params *params = prtd->params;
- unsigned int data_type = params->data_type;
- unsigned int acnt = params->acnt;
- /* divide by 2 for ping/pong */
- unsigned int ping_size = snd_pcm_lib_period_bytes(substream) >> 1;
- unsigned int fifo_level = prtd->params->fifo_level;
- unsigned int count;
- if ((data_type == 0) || (data_type > 4)) {
- printk(KERN_ERR "%s: data_type=%i\n", __func__, data_type);
- return -EINVAL;
- }
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- dma_addr_t asp_src_pong = iram_dma->addr + ping_size;
- ram_src_cidx = ping_size;
- ram_dst_cidx = -ping_size;
- edma_set_src(prtd->asp_link[1], asp_src_pong, INCR, W8BIT);
-
- edma_set_src_index(prtd->asp_link[0], data_type,
- data_type * fifo_level);
- edma_set_src_index(prtd->asp_link[1], data_type,
- data_type * fifo_level);
-
- edma_set_src(prtd->ram_link, runtime->dma_addr, INCR, W32BIT);
- } else {
- dma_addr_t asp_dst_pong = iram_dma->addr + ping_size;
- ram_src_cidx = -ping_size;
- ram_dst_cidx = ping_size;
- edma_set_dest(prtd->asp_link[1], asp_dst_pong, INCR, W8BIT);
-
- edma_set_dest_index(prtd->asp_link[0], data_type,
- data_type * fifo_level);
- edma_set_dest_index(prtd->asp_link[1], data_type,
- data_type * fifo_level);
-
- edma_set_dest(prtd->ram_link, runtime->dma_addr, INCR, W32BIT);
- }
-
- if (!fifo_level) {
- count = ping_size / data_type;
- edma_set_transfer_params(prtd->asp_link[0], acnt, count,
- 1, 0, ASYNC);
- edma_set_transfer_params(prtd->asp_link[1], acnt, count,
- 1, 0, ASYNC);
- } else {
- count = ping_size / (data_type * fifo_level);
- edma_set_transfer_params(prtd->asp_link[0], acnt, fifo_level,
- count, fifo_level, ABSYNC);
- edma_set_transfer_params(prtd->asp_link[1], acnt, fifo_level,
- count, fifo_level, ABSYNC);
- }
-
- edma_set_src_index(prtd->ram_link, ping_size, ram_src_cidx);
- edma_set_dest_index(prtd->ram_link, ping_size, ram_dst_cidx);
- edma_set_transfer_params(prtd->ram_link, ping_size, 2,
- runtime->periods, 2, ASYNC);
-
- /* init master params */
- edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
- edma_read_slot(prtd->ram_link, &prtd->ram_params);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- struct edmacc_param p_ram;
- /* Copy entire iram buffer before playback started */
- prtd->ram_params.a_b_cnt = (1 << 16) | (ping_size << 1);
- /* 0 dst_bidx */
- prtd->ram_params.src_dst_bidx = (ping_size << 1);
- /* 0 dst_cidx */
- prtd->ram_params.src_dst_cidx = (ping_size << 1);
- prtd->ram_params.ccnt = 1;
-
- /* Skip 1st period */
- edma_read_slot(prtd->ram_link, &p_ram);
- p_ram.src += (ping_size << 1);
- p_ram.ccnt -= 1;
- edma_write_slot(prtd->ram_link2, &p_ram);
- /*
- * When 1st started, ram -> iram dma channel will fill the
- * entire iram. Then, whenever a ping/pong asp buffer finishes,
- * 1/2 iram will be filled.
- */
- prtd->ram_params.link_bcntrld =
- EDMA_CHAN_SLOT(prtd->ram_link2) << 5;
- }
- return 0;
-}
-
-/* 1 asp tx or rx channel using 2 parameter channels
- * 1 ram to/from iram channel using 1 parameter channel
- *
- * Playback
- * ram copy channel kicks off first,
- * 1st ram copy of entire iram buffer completion kicks off asp channel
- * asp tcc always kicks off ram copy of 1/2 iram buffer
- *
- * Record
- * asp channel starts, tcc kicks off ram copy
- */
-static int request_ping_pong(struct snd_pcm_substream *substream,
- struct davinci_runtime_data *prtd,
- struct snd_dma_buffer *iram_dma)
-{
- dma_addr_t asp_src_ping;
- dma_addr_t asp_dst_ping;
- int ret;
- struct davinci_pcm_dma_params *params = prtd->params;
-
- /* Request ram master channel */
- ret = prtd->ram_channel = edma_alloc_channel(EDMA_CHANNEL_ANY,
- davinci_pcm_dma_irq, substream,
- prtd->params->ram_chan_q);
- if (ret < 0)
- goto exit1;
-
- /* Request ram link channel */
- ret = prtd->ram_link = edma_alloc_slot(
- EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY);
- if (ret < 0)
- goto exit2;
-
- ret = prtd->asp_link[1] = edma_alloc_slot(
- EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY);
- if (ret < 0)
- goto exit3;
-
- prtd->ram_link2 = -1;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- ret = prtd->ram_link2 = edma_alloc_slot(
- EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY);
- if (ret < 0)
- goto exit4;
- }
- /* circle ping-pong buffers */
- edma_link(prtd->asp_link[0], prtd->asp_link[1]);
- edma_link(prtd->asp_link[1], prtd->asp_link[0]);
- /* circle ram buffers */
- edma_link(prtd->ram_link, prtd->ram_link);
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- asp_src_ping = iram_dma->addr;
- asp_dst_ping = params->dma_addr; /* fifo */
- } else {
- asp_src_ping = params->dma_addr; /* fifo */
- asp_dst_ping = iram_dma->addr;
- }
- /* ping */
- edma_set_src(prtd->asp_link[0], asp_src_ping, INCR, W16BIT);
- edma_set_dest(prtd->asp_link[0], asp_dst_ping, INCR, W16BIT);
- edma_set_src_index(prtd->asp_link[0], 0, 0);
- edma_set_dest_index(prtd->asp_link[0], 0, 0);
-
- edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
- prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f) | TCINTEN);
- prtd->asp_params.opt |= TCCHEN |
- EDMA_TCC(prtd->ram_channel & 0x3f);
- edma_write_slot(prtd->asp_link[0], &prtd->asp_params);
-
- /* pong */
- edma_set_src(prtd->asp_link[1], asp_src_ping, INCR, W16BIT);
- edma_set_dest(prtd->asp_link[1], asp_dst_ping, INCR, W16BIT);
- edma_set_src_index(prtd->asp_link[1], 0, 0);
- edma_set_dest_index(prtd->asp_link[1], 0, 0);
-
- edma_read_slot(prtd->asp_link[1], &prtd->asp_params);
- prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f));
- /* interrupt after every pong completion */
- prtd->asp_params.opt |= TCINTEN | TCCHEN |
- EDMA_TCC(prtd->ram_channel & 0x3f);
- edma_write_slot(prtd->asp_link[1], &prtd->asp_params);
-
- /* ram */
- edma_set_src(prtd->ram_link, iram_dma->addr, INCR, W32BIT);
- edma_set_dest(prtd->ram_link, iram_dma->addr, INCR, W32BIT);
- pr_debug("%s: audio dma channels/slots in use for ram:%u %u %u,"
- "for asp:%u %u %u\n", __func__,
- prtd->ram_channel, prtd->ram_link, prtd->ram_link2,
- prtd->asp_channel, prtd->asp_link[0],
- prtd->asp_link[1]);
- return 0;
-exit4:
- edma_free_channel(prtd->asp_link[1]);
- prtd->asp_link[1] = -1;
-exit3:
- edma_free_channel(prtd->ram_link);
- prtd->ram_link = -1;
-exit2:
- edma_free_channel(prtd->ram_channel);
- prtd->ram_channel = -1;
-exit1:
- return ret;
-}
-
-static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
-{
- struct snd_dma_buffer *iram_dma;
- struct davinci_runtime_data *prtd = substream->runtime->private_data;
- struct davinci_pcm_dma_params *params = prtd->params;
- int ret;
-
- if (!params)
- return -ENODEV;
-
- /* Request asp master DMA channel */
- ret = prtd->asp_channel = edma_alloc_channel(params->channel,
- davinci_pcm_dma_irq, substream,
- prtd->params->asp_chan_q);
- if (ret < 0)
- goto exit1;
-
- /* Request asp link channels */
- ret = prtd->asp_link[0] = edma_alloc_slot(
- EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY);
- if (ret < 0)
- goto exit2;
-
- iram_dma = (struct snd_dma_buffer *)substream->dma_buffer.private_data;
- if (iram_dma) {
- if (request_ping_pong(substream, prtd, iram_dma) == 0)
- return 0;
- printk(KERN_WARNING "%s: dma channel allocation failed,"
- "not using sram\n", __func__);
- }
-
- /* Issue transfer completion IRQ when the channel completes a
- * transfer, then always reload from the same slot (by a kind
- * of loopback link). The completion IRQ handler will update
- * the reload slot with a new buffer.
- *
- * REVISIT save p_ram here after setting up everything except
- * the buffer and its length (ccnt) ... use it as a template
- * so davinci_pcm_enqueue_dma() takes less time in IRQ.
- */
- edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
- prtd->asp_params.opt |= TCINTEN |
- EDMA_TCC(EDMA_CHAN_SLOT(prtd->asp_channel));
- prtd->asp_params.link_bcntrld = EDMA_CHAN_SLOT(prtd->asp_link[0]) << 5;
- edma_write_slot(prtd->asp_link[0], &prtd->asp_params);
- return 0;
-exit2:
- edma_free_channel(prtd->asp_channel);
- prtd->asp_channel = -1;
-exit1:
- return ret;
-}
-
-static int davinci_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct davinci_runtime_data *prtd = substream->runtime->private_data;
- int ret = 0;
-
- spin_lock(&prtd->lock);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- edma_start(prtd->asp_channel);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
- prtd->ram_channel >= 0) {
- /* copy 1st iram buffer */
- edma_start(prtd->ram_channel);
- }
- break;
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- edma_resume(prtd->asp_channel);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- edma_pause(prtd->asp_channel);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- spin_unlock(&prtd->lock);
-
- return ret;
-}
-
-static int davinci_pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct davinci_runtime_data *prtd = substream->runtime->private_data;
-
- davinci_pcm_period_reset(substream);
- if (prtd->ram_channel >= 0) {
- int ret = ping_pong_dma_setup(substream);
- if (ret < 0)
- return ret;
-
- edma_write_slot(prtd->ram_channel, &prtd->ram_params);
- edma_write_slot(prtd->asp_channel, &prtd->asp_params);
-
- print_buf_info(prtd->ram_channel, "ram_channel");
- print_buf_info(prtd->ram_link, "ram_link");
- print_buf_info(prtd->ram_link2, "ram_link2");
- print_buf_info(prtd->asp_channel, "asp_channel");
- print_buf_info(prtd->asp_link[0], "asp_link[0]");
- print_buf_info(prtd->asp_link[1], "asp_link[1]");
-
- /*
- * There is a phase offset of 2 periods between the position
- * used by dma setup and the position reported in the pointer
- * function.
- *
- * The phase offset, when not using ping-pong buffers, is due to
- * the two consecutive calls to davinci_pcm_enqueue_dma() below.
- *
- * Whereas here, with ping-pong buffers, the phase is due to
- * there being an entire buffer transfer complete before the
- * first dma completion event triggers davinci_pcm_dma_irq().
- */
- davinci_pcm_period_elapsed(substream);
- davinci_pcm_period_elapsed(substream);
-
- return 0;
- }
- davinci_pcm_enqueue_dma(substream);
- davinci_pcm_period_elapsed(substream);
-
- /* Copy self-linked parameter RAM entry into master channel */
- edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
- edma_write_slot(prtd->asp_channel, &prtd->asp_params);
- davinci_pcm_enqueue_dma(substream);
- davinci_pcm_period_elapsed(substream);
-
- return 0;
-}
-
-static snd_pcm_uframes_t
-davinci_pcm_pointer(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct davinci_runtime_data *prtd = runtime->private_data;
- unsigned int offset;
- int asp_count;
- unsigned int period_size = snd_pcm_lib_period_bytes(substream);
-
- /*
- * There is a phase offset of 2 periods between the position used by dma
- * setup and the position reported in the pointer function. Either +2 in
- * the dma setup or -2 here in the pointer function (with wrapping,
- * both) accounts for this offset -- choose the latter since it makes
- * the first-time setup clearer.
- */
- spin_lock(&prtd->lock);
- asp_count = prtd->period - 2;
- spin_unlock(&prtd->lock);
-
- if (asp_count < 0)
- asp_count += runtime->periods;
- asp_count *= period_size;
-
- offset = bytes_to_frames(runtime, asp_count);
- if (offset >= runtime->buffer_size)
- offset = 0;
-
- return offset;
-}
-
-static int davinci_pcm_open(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct davinci_runtime_data *prtd;
- struct snd_pcm_hardware *ppcm;
- int ret = 0;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct davinci_pcm_dma_params *pa;
- struct davinci_pcm_dma_params *params;
-
- pa = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
- if (!pa)
- return -ENODEV;
- params = &pa[substream->stream];
-
- ppcm = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- &pcm_hardware_playback : &pcm_hardware_capture;
- allocate_sram(substream, params->sram_pool, params->sram_size, ppcm);
- snd_soc_set_runtime_hwparams(substream, ppcm);
- /* ensure that buffer size is a multiple of period size */
- ret = snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
- if (ret < 0)
- return ret;
-
- prtd = kzalloc(sizeof(struct davinci_runtime_data), GFP_KERNEL);
- if (prtd == NULL)
- return -ENOMEM;
-
- spin_lock_init(&prtd->lock);
- prtd->params = params;
- prtd->asp_channel = -1;
- prtd->asp_link[0] = prtd->asp_link[1] = -1;
- prtd->ram_channel = -1;
- prtd->ram_link = -1;
- prtd->ram_link2 = -1;
-
- runtime->private_data = prtd;
-
- ret = davinci_pcm_dma_request(substream);
- if (ret) {
- printk(KERN_ERR "davinci_pcm: Failed to get dma channels\n");
- kfree(prtd);
- }
-
- return ret;
-}
-
-static int davinci_pcm_close(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct davinci_runtime_data *prtd = runtime->private_data;
-
- if (prtd->ram_channel >= 0)
- edma_stop(prtd->ram_channel);
- if (prtd->asp_channel >= 0)
- edma_stop(prtd->asp_channel);
- if (prtd->asp_link[0] >= 0)
- edma_unlink(prtd->asp_link[0]);
- if (prtd->asp_link[1] >= 0)
- edma_unlink(prtd->asp_link[1]);
- if (prtd->ram_link >= 0)
- edma_unlink(prtd->ram_link);
-
- if (prtd->asp_link[0] >= 0)
- edma_free_slot(prtd->asp_link[0]);
- if (prtd->asp_link[1] >= 0)
- edma_free_slot(prtd->asp_link[1]);
- if (prtd->asp_channel >= 0)
- edma_free_channel(prtd->asp_channel);
- if (prtd->ram_link >= 0)
- edma_free_slot(prtd->ram_link);
- if (prtd->ram_link2 >= 0)
- edma_free_slot(prtd->ram_link2);
- if (prtd->ram_channel >= 0)
- edma_free_channel(prtd->ram_channel);
-
- kfree(prtd);
-
- return 0;
-}
-
-static int davinci_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- return snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
-}
-
-static int davinci_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
-static int davinci_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- return dma_mmap_writecombine(substream->pcm->card->dev, vma,
- runtime->dma_area,
- runtime->dma_addr,
- runtime->dma_bytes);
-}
-
-static struct snd_pcm_ops davinci_pcm_ops = {
- .open = davinci_pcm_open,
- .close = davinci_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = davinci_pcm_hw_params,
- .hw_free = davinci_pcm_hw_free,
- .prepare = davinci_pcm_prepare,
- .trigger = davinci_pcm_trigger,
- .pointer = davinci_pcm_pointer,
- .mmap = davinci_pcm_mmap,
-};
-
-static int davinci_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream,
- size_t size)
-{
- struct snd_pcm_substream *substream = pcm->streams[stream].substream;
- struct snd_dma_buffer *buf = &substream->dma_buffer;
-
- buf->dev.type = SNDRV_DMA_TYPE_DEV;
- buf->dev.dev = pcm->card->dev;
- buf->private_data = NULL;
- buf->area = dma_alloc_writecombine(pcm->card->dev, size,
- &buf->addr, GFP_KERNEL);
-
- pr_debug("davinci_pcm: preallocate_dma_buffer: area=%p, addr=%p, "
- "size=%d\n", (void *) buf->area, (void *) buf->addr, size);
-
- if (!buf->area)
- return -ENOMEM;
-
- buf->bytes = size;
- return 0;
-}
-
-static void davinci_pcm_free(struct snd_pcm *pcm)
-{
- struct snd_pcm_substream *substream;
- struct snd_dma_buffer *buf;
- int stream;
-
- for (stream = 0; stream < 2; stream++) {
- struct snd_dma_buffer *iram_dma;
- substream = pcm->streams[stream].substream;
- if (!substream)
- continue;
-
- buf = &substream->dma_buffer;
- if (!buf->area)
- continue;
-
- dma_free_writecombine(pcm->card->dev, buf->bytes,
- buf->area, buf->addr);
- buf->area = NULL;
- iram_dma = buf->private_data;
- if (iram_dma) {
- davinci_free_sram(substream, iram_dma);
- kfree(iram_dma);
- }
- }
-}
-
-static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_card *card = rtd->card->snd_card;
- struct snd_pcm *pcm = rtd->pcm;
- int ret;
-
- ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
- ret = davinci_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_PLAYBACK,
- pcm_hardware_playback.buffer_bytes_max);
- if (ret)
- return ret;
- }
-
- if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
- ret = davinci_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_CAPTURE,
- pcm_hardware_capture.buffer_bytes_max);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct snd_soc_platform_driver davinci_soc_platform = {
- .ops = &davinci_pcm_ops,
- .pcm_new = davinci_pcm_new,
- .pcm_free = davinci_pcm_free,
-};
-
-int davinci_soc_platform_register(struct device *dev)
-{
- return devm_snd_soc_register_platform(dev, &davinci_soc_platform);
-}
-EXPORT_SYMBOL_GPL(davinci_soc_platform_register);
-
-MODULE_AUTHOR("Vladimir Barinov");
-MODULE_DESCRIPTION("TI DAVINCI PCM DMA module");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/davinci/davinci-pcm.h b/sound/soc/davinci/davinci-pcm.h
deleted file mode 100644
index 0fe2346a9aa2..000000000000
--- a/sound/soc/davinci/davinci-pcm.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * ALSA PCM interface for the TI DAVINCI processor
- *
- * Author: Vladimir Barinov, <vbarinov@embeddedalley.com>
- * Copyright: (C) 2007 MontaVista Software, Inc., <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _DAVINCI_PCM_H
-#define _DAVINCI_PCM_H
-
-#include <linux/genalloc.h>
-#include <linux/platform_data/davinci_asp.h>
-#include <linux/platform_data/edma.h>
-
-struct davinci_pcm_dma_params {
- int channel; /* sync dma channel ID */
- unsigned short acnt;
- dma_addr_t dma_addr; /* device physical address for DMA */
- unsigned sram_size;
- struct gen_pool *sram_pool; /* SRAM gen_pool for ping pong */
- enum dma_event_q asp_chan_q; /* event queue number for ASP channel */
- enum dma_event_q ram_chan_q; /* event queue number for RAM channel */
- unsigned char data_type; /* xfer data type */
- unsigned char convert_mono_stereo;
- unsigned int fifo_level;
-};
-
-#if IS_ENABLED(CONFIG_SND_DAVINCI_SOC)
-int davinci_soc_platform_register(struct device *dev);
-#else
-static inline int davinci_soc_platform_register(struct device *dev)
-{
- return 0;
-}
-#endif /* CONFIG_SND_DAVINCI_SOC */
-
-#endif
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index 5bee04279ebe..fabd05f24aeb 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -33,8 +33,9 @@
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
+#include <sound/dmaengine_pcm.h>
-#include "davinci-pcm.h"
+#include "edma-pcm.h"
#include "davinci-i2s.h"
#define MOD_REG_BIT(val, mask, set) do { \
@@ -47,7 +48,8 @@
struct davinci_vcif_dev {
struct davinci_vc *davinci_vc;
- struct davinci_pcm_dma_params dma_params[2];
+ struct snd_dmaengine_dai_dma_data dma_data[2];
+ int dma_request[2];
};
static void davinci_vcif_start(struct snd_pcm_substream *substream)
@@ -93,8 +95,6 @@ static int davinci_vcif_hw_params(struct snd_pcm_substream *substream,
{
struct davinci_vcif_dev *davinci_vcif_dev = snd_soc_dai_get_drvdata(dai);
struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
- struct davinci_pcm_dma_params *dma_params =
- &davinci_vcif_dev->dma_params[substream->stream];
u32 w;
/* Restart the codec before setup */
@@ -113,16 +113,12 @@ static int davinci_vcif_hw_params(struct snd_pcm_substream *substream,
/* Determine xfer data type */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U8:
- dma_params->data_type = 0;
-
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
DAVINCI_VC_CTRL_RD_UNSIGNED |
DAVINCI_VC_CTRL_WD_BITS_8 |
DAVINCI_VC_CTRL_WD_UNSIGNED, 1);
break;
case SNDRV_PCM_FORMAT_S8:
- dma_params->data_type = 1;
-
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
DAVINCI_VC_CTRL_WD_BITS_8, 1);
@@ -130,8 +126,6 @@ static int davinci_vcif_hw_params(struct snd_pcm_substream *substream,
DAVINCI_VC_CTRL_WD_UNSIGNED, 0);
break;
case SNDRV_PCM_FORMAT_S16_LE:
- dma_params->data_type = 2;
-
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
DAVINCI_VC_CTRL_RD_UNSIGNED |
DAVINCI_VC_CTRL_WD_BITS_8 |
@@ -142,8 +136,6 @@ static int davinci_vcif_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- dma_params->acnt = dma_params->data_type;
-
writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
return 0;
@@ -172,24 +164,25 @@ static int davinci_vcif_trigger(struct snd_pcm_substream *substream, int cmd,
return ret;
}
-static int davinci_vcif_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct davinci_vcif_dev *dev = snd_soc_dai_get_drvdata(dai);
-
- snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
- return 0;
-}
-
#define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000
static const struct snd_soc_dai_ops davinci_vcif_dai_ops = {
- .startup = davinci_vcif_startup,
.trigger = davinci_vcif_trigger,
.hw_params = davinci_vcif_hw_params,
};
+static int davinci_vcif_dai_probe(struct snd_soc_dai *dai)
+{
+ struct davinci_vcif_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+ dai->playback_dma_data = &dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
+ dai->capture_dma_data = &dev->dma_data[SNDRV_PCM_STREAM_CAPTURE];
+
+ return 0;
+}
+
static struct snd_soc_dai_driver davinci_vcif_dai = {
+ .probe = davinci_vcif_dai_probe,
.playback = {
.channels_min = 1,
.channels_max = 2,
@@ -225,16 +218,16 @@ static int davinci_vcif_probe(struct platform_device *pdev)
/* DMA tx params */
davinci_vcif_dev->davinci_vc = davinci_vc;
- davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel =
- davinci_vc->davinci_vcif.dma_tx_channel;
- davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr =
- davinci_vc->davinci_vcif.dma_tx_addr;
+ davinci_vcif_dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].filter_data =
+ &davinci_vc->davinci_vcif.dma_tx_channel;
+ davinci_vcif_dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr =
+ davinci_vc->davinci_vcif.dma_tx_addr;
/* DMA rx params */
- davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel =
- davinci_vc->davinci_vcif.dma_rx_channel;
- davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr =
- davinci_vc->davinci_vcif.dma_rx_addr;
+ davinci_vcif_dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].filter_data =
+ &davinci_vc->davinci_vcif.dma_rx_channel;
+ davinci_vcif_dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].addr =
+ davinci_vc->davinci_vcif.dma_rx_addr;
dev_set_drvdata(&pdev->dev, davinci_vcif_dev);
@@ -245,7 +238,7 @@ static int davinci_vcif_probe(struct platform_device *pdev)
return ret;
}
- ret = davinci_soc_platform_register(&pdev->dev);
+ ret = edma_pcm_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
snd_soc_unregister_component(&pdev->dev);
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 081e406b3713..19c302b0d763 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -24,7 +24,7 @@ config SND_SOC_FSL_SAI
in-tree drivers select it automatically.
config SND_SOC_FSL_SSI
- tristate "Synchronous Serial Interface module support"
+ tristate "Synchronous Serial Interface module (SSI) support"
select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
select SND_SOC_IMX_PCM_FIQ if SND_IMX_SOC != n && (MXC_TZIC || MXC_AVIC)
select REGMAP_MMIO
@@ -35,7 +35,7 @@ config SND_SOC_FSL_SSI
in-tree drivers select it automatically.
config SND_SOC_FSL_SPDIF
- tristate "Sony/Philips Digital Interface module support"
+ tristate "Sony/Philips Digital Interface (S/PDIF) module support"
select REGMAP_MMIO
select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
select SND_SOC_IMX_PCM_FIQ if SND_IMX_SOC != n && (MXC_TZIC || MXC_AVIC)
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 3f6959c8e2f7..de438871040b 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -512,6 +512,12 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
memcpy(priv->dai_link, fsl_asoc_card_dai,
sizeof(struct snd_soc_dai_link) * ARRAY_SIZE(priv->dai_link));
+ ret = snd_soc_of_parse_audio_routing(&priv->card, "audio-routing");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret);
+ goto asrc_fail;
+ }
+
/* Normal DAI Link */
priv->dai_link[0].cpu_of_node = cpu_np;
priv->dai_link[0].codec_of_node = codec_np;
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 6b0c8f717ec2..0d48804218b1 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1288,7 +1288,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
const char *p, *sprop;
const uint32_t *iprop;
- struct resource res;
+ struct resource *res;
void __iomem *iomem;
char name[64];
@@ -1335,19 +1335,11 @@ static int fsl_ssi_probe(struct platform_device *pdev)
}
ssi_private->cpu_dai_drv.name = dev_name(&pdev->dev);
- /* Get the addresses and IRQ */
- ret = of_address_to_resource(np, 0, &res);
- if (ret) {
- dev_err(&pdev->dev, "could not determine device resources\n");
- return ret;
- }
- ssi_private->ssi_phys = res.start;
-
- iomem = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
- if (!iomem) {
- dev_err(&pdev->dev, "could not map device resources\n");
- return -ENOMEM;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iomem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+ ssi_private->ssi_phys = res->start;
ret = of_property_match_string(np, "clock-names", "ipg");
if (ret < 0) {
@@ -1365,7 +1357,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
}
ssi_private->irq = platform_get_irq(pdev, 0);
- if (!ssi_private->irq) {
+ if (ssi_private->irq < 0) {
dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
return ssi_private->irq;
}
@@ -1393,8 +1385,8 @@ static int fsl_ssi_probe(struct platform_device *pdev)
return ret;
}
- ret = snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
- &ssi_private->cpu_dai_drv, 1);
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
+ &ssi_private->cpu_dai_drv, 1);
if (ret) {
dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
goto error_asoc_register;
@@ -1407,13 +1399,13 @@ static int fsl_ssi_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "could not claim irq %u\n",
ssi_private->irq);
- goto error_irq;
+ goto error_asoc_register;
}
}
ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev);
if (ret)
- goto error_irq;
+ goto error_asoc_register;
/*
* If codec-handle property is missing from SSI node, we assume
@@ -1454,9 +1446,6 @@ done:
error_sound_card:
fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
-error_irq:
- snd_soc_unregister_component(&pdev->dev);
-
error_asoc_register:
if (ssi_private->soc->imx)
fsl_ssi_imx_clean(pdev, ssi_private);
@@ -1472,7 +1461,6 @@ static int fsl_ssi_remove(struct platform_device *pdev)
if (ssi_private->pdev)
platform_device_unregister(ssi_private->pdev);
- snd_soc_unregister_component(&pdev->dev);
if (ssi_private->soc->imx)
fsl_ssi_imx_clean(pdev, ssi_private);
diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
index f8cf10e16ce9..20e7400e2611 100644
--- a/sound/soc/fsl/imx-es8328.c
+++ b/sound/soc/fsl/imx-es8328.c
@@ -53,9 +53,9 @@ static int imx_es8328_dai_init(struct snd_soc_pcm_runtime *rtd)
/* Headphone jack detection */
if (gpio_is_valid(data->jack_gpio)) {
- ret = snd_soc_jack_new(rtd->codec, "Headphone",
- SND_JACK_HEADPHONE | SND_JACK_BTN_0,
- &headset_jack);
+ ret = snd_soc_card_jack_new(rtd->card, "Headphone",
+ SND_JACK_HEADPHONE | SND_JACK_BTN_0,
+ &headset_jack, NULL, 0);
if (ret)
return ret;
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.c b/sound/soc/fsl/mpc5200_psc_ac97.c
index 08d2a8069b0a..0bab76051fd8 100644
--- a/sound/soc/fsl/mpc5200_psc_ac97.c
+++ b/sound/soc/fsl/mpc5200_psc_ac97.c
@@ -326,7 +326,7 @@ static int psc_ac97_of_remove(struct platform_device *op)
}
/* Match table for of_platform binding */
-static struct of_device_id psc_ac97_match[] = {
+static const struct of_device_id psc_ac97_match[] = {
{ .compatible = "fsl,mpc5200-psc-ac97", },
{ .compatible = "fsl,mpc5200b-psc-ac97", },
{}
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c
index 51fb0c00fe73..d8232943ccb6 100644
--- a/sound/soc/fsl/mpc5200_psc_i2s.c
+++ b/sound/soc/fsl/mpc5200_psc_i2s.c
@@ -217,7 +217,7 @@ static int psc_i2s_of_remove(struct platform_device *op)
}
/* Match table for of_platform binding */
-static struct of_device_id psc_i2s_match[] = {
+static const struct of_device_id psc_i2s_match[] = {
{ .compatible = "fsl,mpc5200-psc-i2s", },
{ .compatible = "fsl,mpc5200b-psc-i2s", },
{}
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index c44459d24c50..ec731223cab3 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -113,7 +113,7 @@ static int pcm030_fabric_remove(struct platform_device *op)
return ret;
}
-static struct of_device_id pcm030_audio_match[] = {
+static const struct of_device_id pcm030_audio_match[] = {
{ .compatible = "phytec,pcm030-audio-fabric", },
{}
};
diff --git a/sound/soc/fsl/wm1133-ev1.c b/sound/soc/fsl/wm1133-ev1.c
index a958937ab405..b454972dce35 100644
--- a/sound/soc/fsl/wm1133-ev1.c
+++ b/sound/soc/fsl/wm1133-ev1.c
@@ -202,23 +202,20 @@ static struct snd_soc_jack_pin mic_jack_pins[] = {
static int wm1133_ev1_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
/* Headphone jack detection */
- snd_soc_jack_new(codec, "Headphone", SND_JACK_HEADPHONE, &hp_jack);
- snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
- hp_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headphone", SND_JACK_HEADPHONE,
+ &hp_jack, hp_jack_pins, ARRAY_SIZE(hp_jack_pins));
wm8350_hp_jack_detect(codec, WM8350_JDR, &hp_jack, SND_JACK_HEADPHONE);
/* Microphone jack detection */
- snd_soc_jack_new(codec, "Microphone",
- SND_JACK_MICROPHONE | SND_JACK_BTN_0, &mic_jack);
- snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins),
- mic_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Microphone",
+ SND_JACK_MICROPHONE | SND_JACK_BTN_0, &mic_jack,
+ mic_jack_pins, ARRAY_SIZE(mic_jack_pins));
wm8350_mic_jack_detect(codec, &mic_jack, SND_JACK_MICROPHONE,
SND_JACK_BTN_0);
- snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
+ snd_soc_dapm_force_enable_pin(&rtd->card->dapm, "Mic Bias");
return 0;
}
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index fb550b5869d2..33feee9ca8c3 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -125,14 +125,6 @@ static int __asoc_simple_card_dai_init(struct snd_soc_dai *dai,
{
int ret;
- if (set->fmt) {
- ret = snd_soc_dai_set_fmt(dai, set->fmt);
- if (ret && ret != -ENOTSUPP) {
- dev_err(dai->dev, "simple-card: set_fmt error\n");
- goto err;
- }
- }
-
if (set->sysclk) {
ret = snd_soc_dai_set_sysclk(dai, 0, set->sysclk, 0);
if (ret && ret != -ENOTSUPP) {
@@ -176,11 +168,11 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
return ret;
if (gpio_is_valid(priv->gpio_hp_det)) {
- snd_soc_jack_new(codec->codec, "Headphones", SND_JACK_HEADPHONE,
- &simple_card_hp_jack);
- snd_soc_jack_add_pins(&simple_card_hp_jack,
- ARRAY_SIZE(simple_card_hp_jack_pins),
- simple_card_hp_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headphones",
+ SND_JACK_HEADPHONE,
+ &simple_card_hp_jack,
+ simple_card_hp_jack_pins,
+ ARRAY_SIZE(simple_card_hp_jack_pins));
simple_card_hp_jack_gpio.gpio = priv->gpio_hp_det;
simple_card_hp_jack_gpio.invert = priv->gpio_hp_det_invert;
@@ -189,11 +181,11 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
}
if (gpio_is_valid(priv->gpio_mic_det)) {
- snd_soc_jack_new(codec->codec, "Mic Jack", SND_JACK_MICROPHONE,
- &simple_card_mic_jack);
- snd_soc_jack_add_pins(&simple_card_mic_jack,
- ARRAY_SIZE(simple_card_mic_jack_pins),
- simple_card_mic_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Mic Jack",
+ SND_JACK_MICROPHONE,
+ &simple_card_mic_jack,
+ simple_card_mic_jack_pins,
+ ARRAY_SIZE(simple_card_mic_jack_pins));
simple_card_mic_jack_gpio.gpio = priv->gpio_mic_det;
simple_card_mic_jack_gpio.invert = priv->gpio_mic_det_invert;
snd_soc_jack_add_gpios(&simple_card_mic_jack, 1,
@@ -269,12 +261,10 @@ static int asoc_simple_card_parse_daifmt(struct device_node *node,
struct device_node *codec,
char *prefix, int idx)
{
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
struct device *dev = simple_priv_to_dev(priv);
struct device_node *bitclkmaster = NULL;
struct device_node *framemaster = NULL;
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, idx);
- struct asoc_simple_dai *cpu_dai = &dai_props->cpu_dai;
- struct asoc_simple_dai *codec_dai = &dai_props->codec_dai;
unsigned int daifmt;
daifmt = snd_soc_of_parse_daifmt(node, prefix,
@@ -289,8 +279,7 @@ static int asoc_simple_card_parse_daifmt(struct device_node *node,
*/
dev_dbg(dev, "Revert to legacy daifmt parsing\n");
- cpu_dai->fmt = codec_dai->fmt =
- snd_soc_of_parse_daifmt(codec, NULL, NULL, NULL) |
+ daifmt = snd_soc_of_parse_daifmt(codec, NULL, NULL, NULL) |
(daifmt & ~SND_SOC_DAIFMT_CLOCK_MASK);
} else {
if (codec == bitclkmaster)
@@ -299,11 +288,10 @@ static int asoc_simple_card_parse_daifmt(struct device_node *node,
else
daifmt |= (codec == framemaster) ?
SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
-
- cpu_dai->fmt = daifmt;
- codec_dai->fmt = daifmt;
}
+ dai_link->dai_fmt = daifmt;
+
of_node_put(bitclkmaster);
of_node_put(framemaster);
@@ -384,13 +372,12 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
dai_link->init = asoc_simple_card_dai_init;
dev_dbg(dev, "\tname : %s\n", dai_link->stream_name);
- dev_dbg(dev, "\tcpu : %s / %04x / %d\n",
+ dev_dbg(dev, "\tformat : %04x\n", dai_link->dai_fmt);
+ dev_dbg(dev, "\tcpu : %s / %d\n",
dai_link->cpu_dai_name,
- dai_props->cpu_dai.fmt,
dai_props->cpu_dai.sysclk);
- dev_dbg(dev, "\tcodec : %s / %04x / %d\n",
+ dev_dbg(dev, "\tcodec : %s / %d\n",
dai_link->codec_dai_name,
- dai_props->codec_dai.fmt,
dai_props->codec_dai.sysclk);
/*
@@ -577,14 +564,13 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
dai_link->codec_name = cinfo->codec;
dai_link->cpu_dai_name = cinfo->cpu_dai.name;
dai_link->codec_dai_name = cinfo->codec_dai.name;
+ dai_link->dai_fmt = cinfo->daifmt;
dai_link->init = asoc_simple_card_dai_init;
memcpy(&priv->dai_props->cpu_dai, &cinfo->cpu_dai,
sizeof(priv->dai_props->cpu_dai));
memcpy(&priv->dai_props->codec_dai, &cinfo->codec_dai,
sizeof(priv->dai_props->codec_dai));
- priv->dai_props->cpu_dai.fmt |= cinfo->daifmt;
- priv->dai_props->codec_dai.fmt |= cinfo->daifmt;
}
snd_soc_card_set_drvdata(&priv->snd_card, priv);
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index a8e53c45c6b6..3853ec2ddbc7 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -1,42 +1,10 @@
# Core support
-snd-soc-sst-dsp-objs := sst-dsp.o sst-firmware.o
-snd-soc-sst-acpi-objs := sst-acpi.o
-
-snd-soc-sst-mfld-platform-objs := sst-mfld-platform-pcm.o \
- sst-mfld-platform-compress.o sst-atom-controls.o
-snd-soc-mfld-machine-objs := mfld_machine.o
-
-obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += snd-soc-sst-mfld-platform.o
-obj-$(CONFIG_SND_MFLD_MACHINE) += snd-soc-mfld-machine.o
-
-obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o
-obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
+obj-$(CONFIG_SND_SOC_INTEL_SST) += common/
# Platform Support
-snd-soc-sst-haswell-pcm-objs := \
- sst-haswell-ipc.o sst-haswell-pcm.o sst-haswell-dsp.o
-snd-soc-sst-baytrail-pcm-objs := \
- sst-baytrail-ipc.o sst-baytrail-pcm.o sst-baytrail-dsp.o
-
-obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += snd-soc-sst-haswell-pcm.o
-obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += snd-soc-sst-baytrail-pcm.o
+obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
+obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
+obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
# Machine support
-snd-soc-sst-haswell-objs := haswell.o
-snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
-snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
-snd-soc-sst-broadwell-objs := broadwell.o
-snd-soc-sst-bytcr-dpcm-rt5640-objs := bytcr_dpcm_rt5640.o
-snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
-snd-soc-sst-cht-bsw-rt5645-objs := cht_bsw_rt5645.o
-
-obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
-obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
-obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
-obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
-obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH) += snd-soc-sst-bytcr-dpcm-rt5640.o
-obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH) += snd-soc-sst-cht-bsw-rt5672.o
-obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH) += snd-soc-sst-cht-bsw-rt5645.o
-
-# DSP driver
-obj-$(CONFIG_SND_SST_IPC) += sst/
+obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/
diff --git a/sound/soc/intel/atom/Makefile b/sound/soc/intel/atom/Makefile
new file mode 100644
index 000000000000..ce8074fa6d66
--- /dev/null
+++ b/sound/soc/intel/atom/Makefile
@@ -0,0 +1,7 @@
+snd-soc-sst-mfld-platform-objs := sst-mfld-platform-pcm.o \
+ sst-mfld-platform-compress.o sst-atom-controls.o
+
+obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += snd-soc-sst-mfld-platform.o
+
+# DSP driver
+obj-$(CONFIG_SND_SST_IPC) += sst/
diff --git a/sound/soc/intel/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 90aa5c0476f3..90aa5c0476f3 100644
--- a/sound/soc/intel/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
diff --git a/sound/soc/intel/sst-atom-controls.h b/sound/soc/intel/atom/sst-atom-controls.h
index daecc58f28af..daecc58f28af 100644
--- a/sound/soc/intel/sst-atom-controls.h
+++ b/sound/soc/intel/atom/sst-atom-controls.h
diff --git a/sound/soc/intel/sst-mfld-dsp.h b/sound/soc/intel/atom/sst-mfld-dsp.h
index 4257263157cd..4257263157cd 100644
--- a/sound/soc/intel/sst-mfld-dsp.h
+++ b/sound/soc/intel/atom/sst-mfld-dsp.h
diff --git a/sound/soc/intel/sst-mfld-platform-compress.c b/sound/soc/intel/atom/sst-mfld-platform-compress.c
index 395168986462..395168986462 100644
--- a/sound/soc/intel/sst-mfld-platform-compress.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-compress.c
diff --git a/sound/soc/intel/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 7523cbef8780..2fbaf2c75d17 100644
--- a/sound/soc/intel/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -594,11 +594,13 @@ static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
ret_val = stream->ops->stream_drop(sst->dev, str_id);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
dev_dbg(rtd->dev, "sst: in pause\n");
status = SST_PLATFORM_PAUSED;
ret_val = stream->ops->stream_pause(sst->dev, str_id);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
dev_dbg(rtd->dev, "sst: in pause release\n");
status = SST_PLATFORM_RUNNING;
ret_val = stream->ops->stream_pause_release(sst->dev, str_id);
@@ -665,6 +667,9 @@ static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
static int sst_soc_probe(struct snd_soc_platform *platform)
{
+ struct sst_data *drv = dev_get_drvdata(platform->dev);
+
+ drv->soc_card = platform->component.card;
return sst_dsp_init_v2_dpcm(platform);
}
@@ -727,9 +732,64 @@ static int sst_platform_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+
+static int sst_soc_prepare(struct device *dev)
+{
+ struct sst_data *drv = dev_get_drvdata(dev);
+ int i;
+
+ /* suspend all pcms first */
+ snd_soc_suspend(drv->soc_card->dev);
+ snd_soc_poweroff(drv->soc_card->dev);
+
+ /* set the SSPs to idle */
+ for (i = 0; i < drv->soc_card->num_rtd; i++) {
+ struct snd_soc_dai *dai = drv->soc_card->rtd[i].cpu_dai;
+
+ if (dai->active) {
+ send_ssp_cmd(dai, dai->name, 0);
+ sst_handle_vb_timer(dai, false);
+ }
+ }
+
+ return 0;
+}
+
+static void sst_soc_complete(struct device *dev)
+{
+ struct sst_data *drv = dev_get_drvdata(dev);
+ int i;
+
+ /* restart SSPs */
+ for (i = 0; i < drv->soc_card->num_rtd; i++) {
+ struct snd_soc_dai *dai = drv->soc_card->rtd[i].cpu_dai;
+
+ if (dai->active) {
+ sst_handle_vb_timer(dai, true);
+ send_ssp_cmd(dai, dai->name, 1);
+ }
+ }
+ snd_soc_resume(drv->soc_card->dev);
+}
+
+#else
+
+#define sst_soc_prepare NULL
+#define sst_soc_complete NULL
+
+#endif
+
+
+static const struct dev_pm_ops sst_platform_pm = {
+ .prepare = sst_soc_prepare,
+ .complete = sst_soc_complete,
+};
+
static struct platform_driver sst_platform_driver = {
.driver = {
.name = "sst-mfld-platform",
+ .pm = &sst_platform_pm,
},
.probe = sst_platform_probe,
.remove = sst_platform_remove,
diff --git a/sound/soc/intel/sst-mfld-platform.h b/sound/soc/intel/atom/sst-mfld-platform.h
index 79c8d1246a8f..9094314be2b0 100644
--- a/sound/soc/intel/sst-mfld-platform.h
+++ b/sound/soc/intel/atom/sst-mfld-platform.h
@@ -174,6 +174,7 @@ struct sst_data {
struct sst_platform_data *pdata;
struct snd_sst_bytes_v2 *byte_stream;
struct mutex lock;
+ struct snd_soc_card *soc_card;
};
int sst_register_dsp(struct sst_device *sst);
int sst_unregister_dsp(struct sst_device *sst);
diff --git a/sound/soc/intel/sst/Makefile b/sound/soc/intel/atom/sst/Makefile
index fd21726361b5..fd21726361b5 100644
--- a/sound/soc/intel/sst/Makefile
+++ b/sound/soc/intel/atom/sst/Makefile
diff --git a/sound/soc/intel/sst/sst.c b/sound/soc/intel/atom/sst/sst.c
index 11c578651c1c..96c2e420cce6 100644
--- a/sound/soc/intel/sst/sst.c
+++ b/sound/soc/intel/atom/sst/sst.c
@@ -32,7 +32,7 @@
#include <asm/platform_sst_audio.h>
#include "../sst-mfld-platform.h"
#include "sst.h"
-#include "../sst-dsp.h"
+#include "../../common/sst-dsp.h"
MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
@@ -423,23 +423,135 @@ static int intel_sst_runtime_suspend(struct device *dev)
return ret;
}
-static int intel_sst_runtime_resume(struct device *dev)
+static int intel_sst_suspend(struct device *dev)
{
- int ret = 0;
struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+ struct sst_fw_save *fw_save;
+ int i, ret = 0;
- if (ctx->sst_state == SST_RESET) {
- ret = sst_load_fw(ctx);
- if (ret) {
- dev_err(dev, "FW download fail %d\n", ret);
- sst_set_fw_state_locked(ctx, SST_RESET);
+ /* check first if we are already in SW reset */
+ if (ctx->sst_state == SST_RESET)
+ return 0;
+
+ /*
+ * check if any stream is active and running
+ * they should already by suspend by soc_suspend
+ */
+ for (i = 1; i <= ctx->info.max_streams; i++) {
+ struct stream_info *stream = &ctx->streams[i];
+
+ if (stream->status == STREAM_RUNNING) {
+ dev_err(dev, "stream %d is running, cant susupend, abort\n", i);
+ return -EBUSY;
}
}
+ synchronize_irq(ctx->irq_num);
+ flush_workqueue(ctx->post_msg_wq);
+
+ /* Move the SST state to Reset */
+ sst_set_fw_state_locked(ctx, SST_RESET);
+
+ /* tell DSP we are suspending */
+ if (ctx->ops->save_dsp_context(ctx))
+ return -EBUSY;
+
+ /* save the memories */
+ fw_save = kzalloc(sizeof(*fw_save), GFP_KERNEL);
+ if (!fw_save)
+ return -ENOMEM;
+ fw_save->iram = kzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL);
+ if (!fw_save->iram) {
+ ret = -ENOMEM;
+ goto iram;
+ }
+ fw_save->dram = kzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL);
+ if (!fw_save->dram) {
+ ret = -ENOMEM;
+ goto dram;
+ }
+ fw_save->sram = kzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
+ if (!fw_save->sram) {
+ ret = -ENOMEM;
+ goto sram;
+ }
+
+ fw_save->ddr = kzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL);
+ if (!fw_save->ddr) {
+ ret = -ENOMEM;
+ goto ddr;
+ }
+
+ memcpy32_fromio(fw_save->iram, ctx->iram, ctx->iram_end - ctx->iram_base);
+ memcpy32_fromio(fw_save->dram, ctx->dram, ctx->dram_end - ctx->dram_base);
+ memcpy32_fromio(fw_save->sram, ctx->mailbox, SST_MAILBOX_SIZE);
+ memcpy32_fromio(fw_save->ddr, ctx->ddr, ctx->ddr_end - ctx->ddr_base);
+
+ ctx->fw_save = fw_save;
+ ctx->ops->reset(ctx);
+ return 0;
+ddr:
+ kfree(fw_save->sram);
+sram:
+ kfree(fw_save->dram);
+dram:
+ kfree(fw_save->iram);
+iram:
+ kfree(fw_save);
+ return ret;
+}
+
+static int intel_sst_resume(struct device *dev)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+ struct sst_fw_save *fw_save = ctx->fw_save;
+ int ret = 0;
+ struct sst_block *block;
+
+ if (!fw_save)
+ return 0;
+
+ sst_set_fw_state_locked(ctx, SST_FW_LOADING);
+
+ /* we have to restore the memory saved */
+ ctx->ops->reset(ctx);
+
+ ctx->fw_save = NULL;
+
+ memcpy32_toio(ctx->iram, fw_save->iram, ctx->iram_end - ctx->iram_base);
+ memcpy32_toio(ctx->dram, fw_save->dram, ctx->dram_end - ctx->dram_base);
+ memcpy32_toio(ctx->mailbox, fw_save->sram, SST_MAILBOX_SIZE);
+ memcpy32_toio(ctx->ddr, fw_save->ddr, ctx->ddr_end - ctx->ddr_base);
+
+ kfree(fw_save->sram);
+ kfree(fw_save->dram);
+ kfree(fw_save->iram);
+ kfree(fw_save->ddr);
+ kfree(fw_save);
+
+ block = sst_create_block(ctx, 0, FW_DWNL_ID);
+ if (block == NULL)
+ return -ENOMEM;
+
+
+ /* start and wait for ack */
+ ctx->ops->start(ctx);
+ ret = sst_wait_timeout(ctx, block);
+ if (ret) {
+ dev_err(ctx->dev, "fw download failed %d\n", ret);
+ /* FW download failed due to timeout */
+ ret = -EBUSY;
+
+ } else {
+ sst_set_fw_state_locked(ctx, SST_FW_RUNNING);
+ }
+
+ sst_free_block(ctx, block);
return ret;
}
const struct dev_pm_ops intel_sst_pm = {
+ .suspend = intel_sst_suspend,
+ .resume = intel_sst_resume,
.runtime_suspend = intel_sst_runtime_suspend,
- .runtime_resume = intel_sst_runtime_resume,
};
EXPORT_SYMBOL_GPL(intel_sst_pm);
diff --git a/sound/soc/intel/sst/sst.h b/sound/soc/intel/atom/sst/sst.h
index 562bc483d6b7..3f493862e98d 100644
--- a/sound/soc/intel/sst/sst.h
+++ b/sound/soc/intel/atom/sst/sst.h
@@ -337,6 +337,13 @@ struct sst_shim_regs64 {
u64 csr2;
};
+struct sst_fw_save {
+ void *iram;
+ void *dram;
+ void *sram;
+ void *ddr;
+};
+
/**
* struct intel_sst_drv - driver ops
*
@@ -428,6 +435,8 @@ struct intel_sst_drv {
* persistent till worker thread gets called
*/
char firmware_name[FW_NAME_SIZE];
+
+ struct sst_fw_save *fw_save;
};
/* misc definitions */
@@ -544,4 +553,7 @@ int sst_alloc_drv_context(struct intel_sst_drv **ctx,
int sst_context_init(struct intel_sst_drv *ctx);
void sst_context_cleanup(struct intel_sst_drv *ctx);
void sst_configure_runtime_pm(struct intel_sst_drv *ctx);
+void memcpy32_toio(void __iomem *dst, const void *src, int count);
+void memcpy32_fromio(void *dst, const void __iomem *src, int count);
+
#endif
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index b782dfdcdbba..05f693083911 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -39,7 +39,7 @@
#include <acpi/actypes.h>
#include <acpi/acpi_bus.h>
#include "../sst-mfld-platform.h"
-#include "../sst-dsp.h"
+#include "../../common/sst-dsp.h"
#include "sst.h"
struct sst_machines {
@@ -309,7 +309,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
ctx->shim_regs64 = devm_kzalloc(ctx->dev, sizeof(*ctx->shim_regs64),
GFP_KERNEL);
if (!ctx->shim_regs64) {
- return -ENOMEM;
+ ret = -ENOMEM;
goto do_sst_cleanup;
}
diff --git a/sound/soc/intel/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 5f75ef3cdd22..7b50a9d17ec1 100644
--- a/sound/soc/intel/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -32,7 +32,7 @@
#include <asm/platform_sst_audio.h>
#include "../sst-mfld-platform.h"
#include "sst.h"
-#include "../sst-dsp.h"
+#include "../../common/sst-dsp.h"
@@ -138,12 +138,36 @@ int sst_get_stream(struct intel_sst_drv *ctx,
static int sst_power_control(struct device *dev, bool state)
{
struct intel_sst_drv *ctx = dev_get_drvdata(dev);
-
- dev_dbg(ctx->dev, "state:%d", state);
- if (state == true)
- return pm_runtime_get_sync(dev);
- else
+ int ret = 0;
+ int usage_count = 0;
+
+#ifdef CONFIG_PM
+ usage_count = atomic_read(&dev->power.usage_count);
+#else
+ usage_count = 1;
+#endif
+
+ if (state == true) {
+ ret = pm_runtime_get_sync(dev);
+
+ dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
+ return ret;
+ }
+ if ((ctx->sst_state == SST_RESET) && (usage_count == 1)) {
+ ret = sst_load_fw(ctx);
+ if (ret) {
+ dev_err(dev, "FW download fail %d\n", ret);
+ sst_set_fw_state_locked(ctx, SST_RESET);
+ ret = sst_pm_runtime_put(ctx);
+ }
+ }
+ } else {
+ dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
return sst_pm_runtime_put(ctx);
+ }
+ return ret;
}
/*
@@ -357,7 +381,7 @@ static int sst_cdev_tstamp(struct device *dev, unsigned int str_id,
tstamp->copied_total = fw_tstamp.ring_buffer_counter;
tstamp->pcm_frames = fw_tstamp.frames_decoded;
tstamp->pcm_io_frames = div_u64(fw_tstamp.hardware_counter,
- (u64)((stream->num_ch) * SST_GET_BYTES_PER_SAMPLE(24)));
+ (u64)stream->num_ch * SST_GET_BYTES_PER_SAMPLE(24));
tstamp->sampling_rate = fw_tstamp.sampling_frequency;
dev_dbg(dev, "PCM = %u\n", tstamp->pcm_io_frames);
@@ -572,6 +596,35 @@ static int sst_stream_drop(struct device *dev, int str_id)
return sst_drop_stream(ctx, str_id);
}
+static int sst_stream_pause(struct device *dev, int str_id)
+{
+ struct stream_info *str_info;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (ctx->sst_state != SST_FW_RUNNING)
+ return 0;
+
+ str_info = get_stream_info(ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+
+ return sst_pause_stream(ctx, str_id);
+}
+
+static int sst_stream_resume(struct device *dev, int str_id)
+{
+ struct stream_info *str_info;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (ctx->sst_state != SST_FW_RUNNING)
+ return 0;
+
+ str_info = get_stream_info(ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+ return sst_resume_stream(ctx, str_id);
+}
+
static int sst_stream_init(struct device *dev, struct pcm_stream_info *str_info)
{
int str_id = 0;
@@ -633,6 +686,8 @@ static struct sst_ops pcm_ops = {
.stream_init = sst_stream_init,
.stream_start = sst_stream_start,
.stream_drop = sst_stream_drop,
+ .stream_pause = sst_stream_pause,
+ .stream_pause_release = sst_stream_resume,
.stream_read_tstamp = sst_read_timestamp,
.send_byte_stream = sst_send_byte_stream,
.close = sst_close_pcm_stream,
diff --git a/sound/soc/intel/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index 484e60978477..5a278618466c 100644
--- a/sound/soc/intel/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -32,7 +32,7 @@
#include <asm/platform_sst_audio.h>
#include "../sst-mfld-platform.h"
#include "sst.h"
-#include "../sst-dsp.h"
+#include "../../common/sst-dsp.h"
struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
u32 msg_id, u32 drv_id)
diff --git a/sound/soc/intel/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
index 7888cd707853..33917146d9c4 100644
--- a/sound/soc/intel/sst/sst_loader.c
+++ b/sound/soc/intel/atom/sst/sst_loader.c
@@ -37,9 +37,17 @@
#include <asm/platform_sst_audio.h>
#include "../sst-mfld-platform.h"
#include "sst.h"
-#include "../sst-dsp.h"
+#include "../../common/sst-dsp.h"
-static inline void memcpy32_toio(void __iomem *dst, const void *src, int count)
+void memcpy32_toio(void __iomem *dst, const void *src, int count)
+{
+ /* __iowrite32_copy uses 32-bit count values so divide by 4 for
+ * right count in words
+ */
+ __iowrite32_copy(dst, src, count/4);
+}
+
+void memcpy32_fromio(void *dst, const void __iomem *src, int count)
{
/* __iowrite32_copy uses 32-bit count values so divide by 4 for
* right count in words
diff --git a/sound/soc/intel/sst/sst_pci.c b/sound/soc/intel/atom/sst/sst_pci.c
index 3a0b3bf0af97..3a0b3bf0af97 100644
--- a/sound/soc/intel/sst/sst_pci.c
+++ b/sound/soc/intel/atom/sst/sst_pci.c
diff --git a/sound/soc/intel/sst/sst_pvt.c b/sound/soc/intel/atom/sst/sst_pvt.c
index 4b7720864492..adb32fefd693 100644
--- a/sound/soc/intel/sst/sst_pvt.c
+++ b/sound/soc/intel/atom/sst/sst_pvt.c
@@ -34,7 +34,7 @@
#include <asm/platform_sst_audio.h>
#include "../sst-mfld-platform.h"
#include "sst.h"
-#include "../sst-dsp.h"
+#include "../../common/sst-dsp.h"
int sst_shim_write(void __iomem *addr, int offset, int value)
{
@@ -111,30 +111,6 @@ int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
}
-unsigned long long read_shim_data(struct intel_sst_drv *sst, int addr)
-{
- unsigned long long val = 0;
-
- switch (sst->dev_id) {
- case SST_MRFLD_PCI_ID:
- case SST_BYT_ACPI_ID:
- val = sst_shim_read64(sst->shim, addr);
- break;
- }
- return val;
-}
-
-void write_shim_data(struct intel_sst_drv *sst, int addr,
- unsigned long long data)
-{
- switch (sst->dev_id) {
- case SST_MRFLD_PCI_ID:
- case SST_BYT_ACPI_ID:
- sst_shim_write64(sst->shim, addr, (u64) data);
- break;
- }
-}
-
/*
* sst_wait_timeout - wait on event for timeout
*
diff --git a/sound/soc/intel/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index dae2a41997aa..a74c64c7053c 100644
--- a/sound/soc/intel/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -31,7 +31,7 @@
#include <asm/platform_sst_audio.h>
#include "../sst-mfld-platform.h"
#include "sst.h"
-#include "../sst-dsp.h"
+#include "../../common/sst-dsp.h"
int sst_alloc_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, void *params)
{
diff --git a/sound/soc/intel/baytrail/Makefile b/sound/soc/intel/baytrail/Makefile
new file mode 100644
index 000000000000..488408cadf6d
--- /dev/null
+++ b/sound/soc/intel/baytrail/Makefile
@@ -0,0 +1,4 @@
+snd-soc-sst-baytrail-pcm-objs := \
+ sst-baytrail-ipc.o sst-baytrail-pcm.o sst-baytrail-dsp.o
+
+obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += snd-soc-sst-baytrail-pcm.o
diff --git a/sound/soc/intel/sst-baytrail-dsp.c b/sound/soc/intel/baytrail/sst-baytrail-dsp.c
index 5a9e56700f31..01d023cc05dd 100644
--- a/sound/soc/intel/sst-baytrail-dsp.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-dsp.c
@@ -22,8 +22,8 @@
#include <linux/platform_device.h>
#include <linux/firmware.h>
-#include "sst-dsp.h"
-#include "sst-dsp-priv.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
#include "sst-baytrail-ipc.h"
#define SST_BYT_FW_SIGNATURE_SIZE 4
diff --git a/sound/soc/intel/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index b4ad98c43e5c..a839dbfa5218 100644
--- a/sound/soc/intel/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -29,8 +29,9 @@
#include <asm/div64.h>
#include "sst-baytrail-ipc.h"
-#include "sst-dsp.h"
-#include "sst-dsp-priv.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "../common/sst-ipc.h"
/* IPC message timeout */
#define IPC_TIMEOUT_MSECS 300
@@ -142,23 +143,6 @@ struct sst_byt_fw_init {
u8 debug_info;
} __packed;
-/* driver internal IPC message structure */
-struct ipc_message {
- struct list_head list;
- u64 header;
-
- /* direction wrt host CPU */
- char tx_data[SST_BYT_IPC_MAX_PAYLOAD_SIZE];
- size_t tx_size;
- char rx_data[SST_BYT_IPC_MAX_PAYLOAD_SIZE];
- size_t rx_size;
-
- wait_queue_head_t waitq;
- bool complete;
- bool wait;
- int errno;
-};
-
struct sst_byt_stream;
struct sst_byt;
@@ -195,14 +179,7 @@ struct sst_byt {
struct sst_fw *fw;
/* IPC messaging */
- struct list_head tx_list;
- struct list_head rx_list;
- struct list_head empty_list;
- wait_queue_head_t wait_txq;
- struct task_struct *tx_thread;
- struct kthread_worker kworker;
- struct kthread_work kwork;
- struct ipc_message *msg;
+ struct sst_generic_ipc ipc;
};
static inline u64 sst_byt_header(int msg_id, int data, bool large, int str_id)
@@ -246,209 +223,6 @@ static struct sst_byt_stream *sst_byt_get_stream(struct sst_byt *byt,
return NULL;
}
-static void sst_byt_ipc_shim_dbg(struct sst_byt *byt, const char *text)
-{
- struct sst_dsp *sst = byt->dsp;
- u64 isr, ipcd, imrx, ipcx;
-
- ipcx = sst_dsp_shim_read64_unlocked(sst, SST_IPCX);
- isr = sst_dsp_shim_read64_unlocked(sst, SST_ISRX);
- ipcd = sst_dsp_shim_read64_unlocked(sst, SST_IPCD);
- imrx = sst_dsp_shim_read64_unlocked(sst, SST_IMRX);
-
- dev_err(byt->dev,
- "ipc: --%s-- ipcx 0x%llx isr 0x%llx ipcd 0x%llx imrx 0x%llx\n",
- text, ipcx, isr, ipcd, imrx);
-}
-
-/* locks held by caller */
-static struct ipc_message *sst_byt_msg_get_empty(struct sst_byt *byt)
-{
- struct ipc_message *msg = NULL;
-
- if (!list_empty(&byt->empty_list)) {
- msg = list_first_entry(&byt->empty_list,
- struct ipc_message, list);
- list_del(&msg->list);
- }
-
- return msg;
-}
-
-static void sst_byt_ipc_tx_msgs(struct kthread_work *work)
-{
- struct sst_byt *byt =
- container_of(work, struct sst_byt, kwork);
- struct ipc_message *msg;
- u64 ipcx;
- unsigned long flags;
-
- spin_lock_irqsave(&byt->dsp->spinlock, flags);
- if (list_empty(&byt->tx_list)) {
- spin_unlock_irqrestore(&byt->dsp->spinlock, flags);
- return;
- }
-
- /* if the DSP is busy we will TX messages after IRQ */
- ipcx = sst_dsp_shim_read64_unlocked(byt->dsp, SST_IPCX);
- if (ipcx & SST_BYT_IPCX_BUSY) {
- spin_unlock_irqrestore(&byt->dsp->spinlock, flags);
- return;
- }
-
- msg = list_first_entry(&byt->tx_list, struct ipc_message, list);
-
- list_move(&msg->list, &byt->rx_list);
-
- /* send the message */
- if (msg->header & IPC_HEADER_LARGE(true))
- sst_dsp_outbox_write(byt->dsp, msg->tx_data, msg->tx_size);
- sst_dsp_shim_write64_unlocked(byt->dsp, SST_IPCX, msg->header);
-
- spin_unlock_irqrestore(&byt->dsp->spinlock, flags);
-}
-
-static inline void sst_byt_tx_msg_reply_complete(struct sst_byt *byt,
- struct ipc_message *msg)
-{
- msg->complete = true;
-
- if (!msg->wait)
- list_add_tail(&msg->list, &byt->empty_list);
- else
- wake_up(&msg->waitq);
-}
-
-static void sst_byt_drop_all(struct sst_byt *byt)
-{
- struct ipc_message *msg, *tmp;
- unsigned long flags;
-
- /* drop all TX and Rx messages before we stall + reset DSP */
- spin_lock_irqsave(&byt->dsp->spinlock, flags);
- list_for_each_entry_safe(msg, tmp, &byt->tx_list, list) {
- list_move(&msg->list, &byt->empty_list);
- }
-
- list_for_each_entry_safe(msg, tmp, &byt->rx_list, list) {
- list_move(&msg->list, &byt->empty_list);
- }
-
- spin_unlock_irqrestore(&byt->dsp->spinlock, flags);
-}
-
-static int sst_byt_tx_wait_done(struct sst_byt *byt, struct ipc_message *msg,
- void *rx_data)
-{
- unsigned long flags;
- int ret;
-
- /* wait for DSP completion */
- ret = wait_event_timeout(msg->waitq, msg->complete,
- msecs_to_jiffies(IPC_TIMEOUT_MSECS));
-
- spin_lock_irqsave(&byt->dsp->spinlock, flags);
- if (ret == 0) {
- list_del(&msg->list);
- sst_byt_ipc_shim_dbg(byt, "message timeout");
-
- ret = -ETIMEDOUT;
- } else {
-
- /* copy the data returned from DSP */
- if (msg->rx_size)
- memcpy(rx_data, msg->rx_data, msg->rx_size);
- ret = msg->errno;
- }
-
- list_add_tail(&msg->list, &byt->empty_list);
- spin_unlock_irqrestore(&byt->dsp->spinlock, flags);
- return ret;
-}
-
-static int sst_byt_ipc_tx_message(struct sst_byt *byt, u64 header,
- void *tx_data, size_t tx_bytes,
- void *rx_data, size_t rx_bytes, int wait)
-{
- unsigned long flags;
- struct ipc_message *msg;
-
- spin_lock_irqsave(&byt->dsp->spinlock, flags);
-
- msg = sst_byt_msg_get_empty(byt);
- if (msg == NULL) {
- spin_unlock_irqrestore(&byt->dsp->spinlock, flags);
- return -EBUSY;
- }
-
- msg->header = header;
- msg->tx_size = tx_bytes;
- msg->rx_size = rx_bytes;
- msg->wait = wait;
- msg->errno = 0;
- msg->complete = false;
-
- if (tx_bytes) {
- /* msg content = lower 32-bit of the header + data */
- *(u32 *)msg->tx_data = (u32)(header & (u32)-1);
- memcpy(msg->tx_data + sizeof(u32), tx_data, tx_bytes);
- msg->tx_size += sizeof(u32);
- }
-
- list_add_tail(&msg->list, &byt->tx_list);
- spin_unlock_irqrestore(&byt->dsp->spinlock, flags);
-
- queue_kthread_work(&byt->kworker, &byt->kwork);
-
- if (wait)
- return sst_byt_tx_wait_done(byt, msg, rx_data);
- else
- return 0;
-}
-
-static inline int sst_byt_ipc_tx_msg_wait(struct sst_byt *byt, u64 header,
- void *tx_data, size_t tx_bytes,
- void *rx_data, size_t rx_bytes)
-{
- return sst_byt_ipc_tx_message(byt, header, tx_data, tx_bytes,
- rx_data, rx_bytes, 1);
-}
-
-static inline int sst_byt_ipc_tx_msg_nowait(struct sst_byt *byt, u64 header,
- void *tx_data, size_t tx_bytes)
-{
- return sst_byt_ipc_tx_message(byt, header, tx_data, tx_bytes,
- NULL, 0, 0);
-}
-
-static struct ipc_message *sst_byt_reply_find_msg(struct sst_byt *byt,
- u64 header)
-{
- struct ipc_message *msg = NULL, *_msg;
- u64 mask;
-
- /* match reply to message sent based on msg and stream IDs */
- mask = IPC_HEADER_MSG_ID_MASK |
- IPC_HEADER_STR_ID_MASK << IPC_HEADER_STR_ID_SHIFT;
- header &= mask;
-
- if (list_empty(&byt->rx_list)) {
- dev_err(byt->dev,
- "ipc: rx list is empty but received 0x%llx\n", header);
- goto out;
- }
-
- list_for_each_entry(_msg, &byt->rx_list, list) {
- if ((_msg->header & mask) == header) {
- msg = _msg;
- break;
- }
- }
-
-out:
- return msg;
-}
-
static void sst_byt_stream_update(struct sst_byt *byt, struct ipc_message *msg)
{
struct sst_byt_stream *stream;
@@ -477,7 +251,7 @@ static int sst_byt_process_reply(struct sst_byt *byt, u64 header)
{
struct ipc_message *msg;
- msg = sst_byt_reply_find_msg(byt, header);
+ msg = sst_ipc_reply_find_msg(&byt->ipc, header);
if (msg == NULL)
return 1;
@@ -491,7 +265,7 @@ static int sst_byt_process_reply(struct sst_byt *byt, u64 header)
list_del(&msg->list);
/* wake up */
- sst_byt_tx_msg_reply_complete(byt, msg);
+ sst_ipc_tx_msg_reply_complete(&byt->ipc, msg);
return 1;
}
@@ -538,6 +312,7 @@ static irqreturn_t sst_byt_irq_thread(int irq, void *context)
{
struct sst_dsp *sst = (struct sst_dsp *) context;
struct sst_byt *byt = sst_dsp_get_thread_context(sst);
+ struct sst_generic_ipc *ipc = &byt->ipc;
u64 header;
unsigned long flags;
@@ -569,7 +344,7 @@ static irqreturn_t sst_byt_irq_thread(int irq, void *context)
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
- queue_kthread_work(&byt->kworker, &byt->kwork);
+ queue_kthread_work(&ipc->kworker, &ipc->kwork);
return IRQ_HANDLED;
}
@@ -656,7 +431,8 @@ int sst_byt_stream_commit(struct sst_byt *byt, struct sst_byt_stream *stream)
header = sst_byt_header(IPC_IA_ALLOC_STREAM,
sizeof(*str_req) + sizeof(u32),
true, stream->str_id);
- ret = sst_byt_ipc_tx_msg_wait(byt, header, str_req, sizeof(*str_req),
+ ret = sst_ipc_tx_message_wait(&byt->ipc, header, str_req,
+ sizeof(*str_req),
reply, sizeof(*reply));
if (ret < 0) {
dev_err(byt->dev, "ipc: error stream commit failed\n");
@@ -679,7 +455,7 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream)
goto out;
header = sst_byt_header(IPC_IA_FREE_STREAM, 0, false, stream->str_id);
- ret = sst_byt_ipc_tx_msg_wait(byt, header, NULL, 0, NULL, 0);
+ ret = sst_ipc_tx_message_wait(&byt->ipc, header, NULL, 0, NULL, 0);
if (ret < 0) {
dev_err(byt->dev, "ipc: free stream %d failed\n",
stream->str_id);
@@ -703,9 +479,11 @@ static int sst_byt_stream_operations(struct sst_byt *byt, int type,
header = sst_byt_header(type, 0, false, stream_id);
if (wait)
- return sst_byt_ipc_tx_msg_wait(byt, header, NULL, 0, NULL, 0);
+ return sst_ipc_tx_message_wait(&byt->ipc, header, NULL,
+ 0, NULL, 0);
else
- return sst_byt_ipc_tx_msg_nowait(byt, header, NULL, 0);
+ return sst_ipc_tx_message_nowait(&byt->ipc, header,
+ NULL, 0);
}
/* stream ALSA trigger operations */
@@ -725,7 +503,7 @@ int sst_byt_stream_start(struct sst_byt *byt, struct sst_byt_stream *stream,
tx_msg = &start_stream;
size = sizeof(start_stream);
- ret = sst_byt_ipc_tx_msg_nowait(byt, header, tx_msg, size);
+ ret = sst_ipc_tx_message_nowait(&byt->ipc, header, tx_msg, size);
if (ret < 0)
dev_err(byt->dev, "ipc: error failed to start stream %d\n",
stream->str_id);
@@ -790,23 +568,6 @@ int sst_byt_get_dsp_position(struct sst_byt *byt,
return do_div(fw_tstamp.ring_buffer_counter, buffer_size);
}
-static int msg_empty_list_init(struct sst_byt *byt)
-{
- struct ipc_message *msg;
- int i;
-
- byt->msg = kzalloc(sizeof(*msg) * IPC_EMPTY_LIST_SIZE, GFP_KERNEL);
- if (byt->msg == NULL)
- return -ENOMEM;
-
- for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {
- init_waitqueue_head(&byt->msg[i].waitq);
- list_add(&byt->msg[i].list, &byt->empty_list);
- }
-
- return 0;
-}
-
struct sst_dsp *sst_byt_get_dsp(struct sst_byt *byt)
{
return byt->dsp;
@@ -823,7 +584,7 @@ int sst_byt_dsp_suspend_late(struct device *dev, struct sst_pdata *pdata)
dev_dbg(byt->dev, "dsp reset\n");
sst_dsp_reset(byt->dsp);
- sst_byt_drop_all(byt);
+ sst_ipc_drop_all(&byt->ipc);
dev_dbg(byt->dev, "dsp in reset\n");
dev_dbg(byt->dev, "free all blocks and unload fw\n");
@@ -876,9 +637,52 @@ int sst_byt_dsp_wait_for_ready(struct device *dev, struct sst_pdata *pdata)
}
EXPORT_SYMBOL_GPL(sst_byt_dsp_wait_for_ready);
+static void byt_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
+{
+ if (msg->header & IPC_HEADER_LARGE(true))
+ sst_dsp_outbox_write(ipc->dsp, msg->tx_data, msg->tx_size);
+
+ sst_dsp_shim_write64_unlocked(ipc->dsp, SST_IPCX, msg->header);
+}
+
+static void byt_shim_dbg(struct sst_generic_ipc *ipc, const char *text)
+{
+ struct sst_dsp *sst = ipc->dsp;
+ u64 isr, ipcd, imrx, ipcx;
+
+ ipcx = sst_dsp_shim_read64_unlocked(sst, SST_IPCX);
+ isr = sst_dsp_shim_read64_unlocked(sst, SST_ISRX);
+ ipcd = sst_dsp_shim_read64_unlocked(sst, SST_IPCD);
+ imrx = sst_dsp_shim_read64_unlocked(sst, SST_IMRX);
+
+ dev_err(ipc->dev,
+ "ipc: --%s-- ipcx 0x%llx isr 0x%llx ipcd 0x%llx imrx 0x%llx\n",
+ text, ipcx, isr, ipcd, imrx);
+}
+
+static void byt_tx_data_copy(struct ipc_message *msg, char *tx_data,
+ size_t tx_size)
+{
+ /* msg content = lower 32-bit of the header + data */
+ *(u32 *)msg->tx_data = (u32)(msg->header & (u32)-1);
+ memcpy(msg->tx_data + sizeof(u32), tx_data, tx_size);
+ msg->tx_size += sizeof(u32);
+}
+
+static u64 byt_reply_msg_match(u64 header, u64 *mask)
+{
+ /* match reply to message sent based on msg and stream IDs */
+ *mask = IPC_HEADER_MSG_ID_MASK |
+ IPC_HEADER_STR_ID_MASK << IPC_HEADER_STR_ID_SHIFT;
+ header &= *mask;
+
+ return header;
+}
+
int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
{
struct sst_byt *byt;
+ struct sst_generic_ipc *ipc;
struct sst_fw *byt_sst_fw;
struct sst_byt_fw_init init;
int err;
@@ -889,39 +693,30 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
if (byt == NULL)
return -ENOMEM;
- byt->dev = dev;
- INIT_LIST_HEAD(&byt->stream_list);
- INIT_LIST_HEAD(&byt->tx_list);
- INIT_LIST_HEAD(&byt->rx_list);
- INIT_LIST_HEAD(&byt->empty_list);
- init_waitqueue_head(&byt->boot_wait);
- init_waitqueue_head(&byt->wait_txq);
+ ipc = &byt->ipc;
+ ipc->dev = dev;
+ ipc->ops.tx_msg = byt_tx_msg;
+ ipc->ops.shim_dbg = byt_shim_dbg;
+ ipc->ops.tx_data_copy = byt_tx_data_copy;
+ ipc->ops.reply_msg_match = byt_reply_msg_match;
- err = msg_empty_list_init(byt);
- if (err < 0)
- return -ENOMEM;
-
- /* start the IPC message thread */
- init_kthread_worker(&byt->kworker);
- byt->tx_thread = kthread_run(kthread_worker_fn,
- &byt->kworker, "%s",
- dev_name(byt->dev));
- if (IS_ERR(byt->tx_thread)) {
- err = PTR_ERR(byt->tx_thread);
- dev_err(byt->dev, "error failed to create message TX task\n");
- goto err_free_msg;
- }
- init_kthread_work(&byt->kwork, sst_byt_ipc_tx_msgs);
+ err = sst_ipc_init(ipc);
+ if (err != 0)
+ goto ipc_init_err;
+ INIT_LIST_HEAD(&byt->stream_list);
+ init_waitqueue_head(&byt->boot_wait);
byt_dev.thread_context = byt;
/* init SST shim */
byt->dsp = sst_dsp_new(dev, &byt_dev, pdata);
if (byt->dsp == NULL) {
err = -ENODEV;
- goto dsp_err;
+ goto dsp_new_err;
}
+ ipc->dsp = byt->dsp;
+
/* keep the DSP in reset state for base FW loading */
sst_dsp_reset(byt->dsp);
@@ -961,10 +756,9 @@ boot_err:
sst_fw_free(byt_sst_fw);
fw_err:
sst_dsp_free(byt->dsp);
-dsp_err:
- kthread_stop(byt->tx_thread);
-err_free_msg:
- kfree(byt->msg);
+dsp_new_err:
+ sst_ipc_fini(ipc);
+ipc_init_err:
return err;
}
@@ -977,7 +771,6 @@ void sst_byt_dsp_free(struct device *dev, struct sst_pdata *pdata)
sst_dsp_reset(byt->dsp);
sst_fw_free_all(byt->dsp);
sst_dsp_free(byt->dsp);
- kthread_stop(byt->tx_thread);
- kfree(byt->msg);
+ sst_ipc_fini(&byt->ipc);
}
EXPORT_SYMBOL_GPL(sst_byt_dsp_free);
diff --git a/sound/soc/intel/sst-baytrail-ipc.h b/sound/soc/intel/baytrail/sst-baytrail-ipc.h
index 8faff6dcf25d..8faff6dcf25d 100644
--- a/sound/soc/intel/sst-baytrail-ipc.h
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.h
diff --git a/sound/soc/intel/sst-baytrail-pcm.c b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
index 224c49c9f135..79547bec558b 100644
--- a/sound/soc/intel/sst-baytrail-pcm.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
@@ -20,8 +20,8 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "sst-baytrail-ipc.h"
-#include "sst-dsp-priv.h"
-#include "sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "../common/sst-dsp.h"
#define BYT_PCM_COUNT 2
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
new file mode 100644
index 000000000000..f8237f0044eb
--- /dev/null
+++ b/sound/soc/intel/boards/Makefile
@@ -0,0 +1,15 @@
+snd-soc-sst-haswell-objs := haswell.o
+snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
+snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
+snd-soc-sst-broadwell-objs := broadwell.o
+snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
+snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
+snd-soc-sst-cht-bsw-rt5645-objs := cht_bsw_rt5645.o
+
+obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
+obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
+obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
+obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
+obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH) += snd-soc-sst-bytcr-rt5640.o
+obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH) += snd-soc-sst-cht-bsw-rt5672.o
+obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH) += snd-soc-sst-cht-bsw-rt5645.o
diff --git a/sound/soc/intel/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 9cf7d01479ad..8bafaf6ceab1 100644
--- a/sound/soc/intel/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -22,10 +22,10 @@
#include <sound/jack.h>
#include <sound/pcm_params.h>
-#include "sst-dsp.h"
-#include "sst-haswell-ipc.h"
+#include "../common/sst-dsp.h"
+#include "../haswell/sst-haswell-ipc.h"
-#include "../codecs/rt286.h"
+#include "../../codecs/rt286.h"
static struct snd_soc_jack broadwell_headset;
/* Headset jack detection DAPM pins */
@@ -80,15 +80,9 @@ static int broadwell_rt286_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
int ret = 0;
- ret = snd_soc_jack_new(codec, "Headset",
- SND_JACK_HEADSET | SND_JACK_BTN_0, &broadwell_headset);
-
- if (ret)
- return ret;
-
- ret = snd_soc_jack_add_pins(&broadwell_headset,
- ARRAY_SIZE(broadwell_headset_pins),
- broadwell_headset_pins);
+ ret = snd_soc_card_jack_new(rtd->card, "Headset",
+ SND_JACK_HEADSET | SND_JACK_BTN_0, &broadwell_headset,
+ broadwell_headset_pins, ARRAY_SIZE(broadwell_headset_pins));
if (ret)
return ret;
@@ -110,9 +104,7 @@ static int broadwell_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min = channels->max = 2;
/* set SSP0 to 16 bit */
- snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
- SNDRV_PCM_HW_PARAM_FIRST_MASK],
- SNDRV_PCM_FORMAT_S16_LE);
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
@@ -227,6 +219,32 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
},
};
+static int broadwell_suspend(struct snd_soc_card *card){
+ struct snd_soc_codec *codec;
+
+ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+ if (!strcmp(codec->component.name, "i2c-INT343A:00")) {
+ dev_dbg(codec->dev, "disabling jack detect before going to suspend.\n");
+ rt286_mic_detect(codec, NULL);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int broadwell_resume(struct snd_soc_card *card){
+ struct snd_soc_codec *codec;
+
+ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+ if (!strcmp(codec->component.name, "i2c-INT343A:00")) {
+ dev_dbg(codec->dev, "enabling jack detect for resume.\n");
+ rt286_mic_detect(codec, &broadwell_headset);
+ break;
+ }
+ }
+ return 0;
+}
+
/* broadwell audio machine driver for WPT + RT286S */
static struct snd_soc_card broadwell_rt286 = {
.name = "broadwell-rt286",
@@ -240,6 +258,8 @@ static struct snd_soc_card broadwell_rt286 = {
.dapm_routes = broadwell_rt286_map,
.num_dapm_routes = ARRAY_SIZE(broadwell_rt286_map),
.fully_routed = true,
+ .suspend_pre = broadwell_suspend,
+ .resume_post = broadwell_resume,
};
static int broadwell_audio_probe(struct platform_device *pdev)
diff --git a/sound/soc/intel/byt-max98090.c b/sound/soc/intel/boards/byt-max98090.c
index 9832afe7d22c..7ab8cc9fbfd5 100644
--- a/sound/soc/intel/byt-max98090.c
+++ b/sound/soc/intel/boards/byt-max98090.c
@@ -24,7 +24,7 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
-#include "../codecs/max98090.h"
+#include "../../codecs/max98090.h"
struct byt_max98090_private {
struct snd_soc_jack jack;
@@ -84,7 +84,6 @@ static struct snd_soc_jack_gpio hs_jack_gpios[] = {
static int byt_max98090_init(struct snd_soc_pcm_runtime *runtime)
{
int ret;
- struct snd_soc_codec *codec = runtime->codec;
struct snd_soc_card *card = runtime->card;
struct byt_max98090_private *drv = snd_soc_card_get_drvdata(card);
struct snd_soc_jack *jack = &drv->jack;
@@ -100,13 +99,9 @@ static int byt_max98090_init(struct snd_soc_pcm_runtime *runtime)
}
/* Enable jack detection */
- ret = snd_soc_jack_new(codec, "Headset",
- SND_JACK_LINEOUT | SND_JACK_HEADSET, jack);
- if (ret)
- return ret;
-
- ret = snd_soc_jack_add_pins(jack, ARRAY_SIZE(hs_jack_pins),
- hs_jack_pins);
+ ret = snd_soc_card_jack_new(runtime->card, "Headset",
+ SND_JACK_LINEOUT | SND_JACK_HEADSET, jack,
+ hs_jack_pins, ARRAY_SIZE(hs_jack_pins));
if (ret)
return ret;
diff --git a/sound/soc/intel/byt-rt5640.c b/sound/soc/intel/boards/byt-rt5640.c
index 354eaad886e1..ae89b9b966d9 100644
--- a/sound/soc/intel/byt-rt5640.c
+++ b/sound/soc/intel/boards/byt-rt5640.c
@@ -23,9 +23,9 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
-#include "../codecs/rt5640.h"
+#include "../../codecs/rt5640.h"
-#include "sst-dsp.h"
+#include "../common/sst-dsp.h"
static const struct snd_soc_dapm_widget byt_rt5640_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
diff --git a/sound/soc/intel/bytcr_dpcm_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 59308629043e..7f55d59024a8 100644
--- a/sound/soc/intel/bytcr_dpcm_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -26,8 +26,8 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
-#include "../codecs/rt5640.h"
-#include "sst-atom-controls.h"
+#include "../../codecs/rt5640.h"
+#include "../atom/sst-atom-controls.h"
static const struct snd_soc_dapm_widget byt_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
@@ -113,9 +113,7 @@ static int byt_codec_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min = channels->max = 2;
/* set SSP2 to 24-bit */
- snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
- SNDRV_PCM_HW_PARAM_FIRST_MASK],
- SNDRV_PCM_FORMAT_S24_LE);
+ params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
diff --git a/sound/soc/intel/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index bd29617a9ab9..20a28b22e30f 100644
--- a/sound/soc/intel/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -27,8 +27,8 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
-#include "../codecs/rt5645.h"
-#include "sst-atom-controls.h"
+#include "../../codecs/rt5645.h"
+#include "../atom/sst-atom-controls.h"
#define CHT_PLAT_CLK_3_HZ 19200000
#define CHT_CODEC_DAI "rt5645-aif1"
@@ -169,17 +169,17 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
return ret;
}
- ret = snd_soc_jack_new(codec, "Headphone Jack",
- SND_JACK_HEADPHONE,
- &ctx->hp_jack);
+ ret = snd_soc_card_jack_new(runtime->card, "Headphone Jack",
+ SND_JACK_HEADPHONE, &ctx->hp_jack,
+ NULL, 0);
if (ret) {
dev_err(runtime->dev, "HP jack creation failed %d\n", ret);
return ret;
}
- ret = snd_soc_jack_new(codec, "Mic Jack",
- SND_JACK_MICROPHONE,
- &ctx->mic_jack);
+ ret = snd_soc_card_jack_new(runtime->card, "Mic Jack",
+ SND_JACK_MICROPHONE, &ctx->mic_jack,
+ NULL, 0);
if (ret) {
dev_err(runtime->dev, "Mic jack creation failed %d\n", ret);
return ret;
@@ -203,9 +203,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min = channels->max = 2;
/* set SSP2 to 24-bit */
- snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
- SNDRV_PCM_HW_PARAM_FIRST_MASK],
- SNDRV_PCM_FORMAT_S24_LE);
+ params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
diff --git a/sound/soc/intel/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index ff016621583a..2c9cc5be439e 100644
--- a/sound/soc/intel/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -22,13 +22,28 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
-#include "../codecs/rt5670.h"
-#include "sst-atom-controls.h"
+#include <sound/jack.h>
+#include "../../codecs/rt5670.h"
+#include "../atom/sst-atom-controls.h"
/* The platform clock #3 outputs 19.2Mhz clock to codec as I2S MCLK */
#define CHT_PLAT_CLK_3_HZ 19200000
#define CHT_CODEC_DAI "rt5670-aif1"
+static struct snd_soc_jack cht_bsw_headset;
+
+/* Headset jack detection DAPM pins */
+static struct snd_soc_jack_pin cht_bsw_headset_pins[] = {
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+ {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
+
static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
{
int i;
@@ -50,6 +65,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_soc_dapm_context *dapm = w->dapm;
struct snd_soc_card *card = dapm->card;
struct snd_soc_dai *codec_dai;
+ int ret;
codec_dai = cht_get_codec_dai(card);
if (!codec_dai) {
@@ -57,17 +73,31 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
return -EIO;
}
- if (!SND_SOC_DAPM_EVENT_OFF(event))
- return 0;
-
- /* Set codec sysclk source to its internal clock because codec PLL will
- * be off when idle and MCLK will also be off by ACPI when codec is
- * runtime suspended. Codec needs clock for jack detection and button
- * press.
- */
- snd_soc_dai_set_sysclk(codec_dai, RT5670_SCLK_S_RCCLK,
- 0, SND_SOC_CLOCK_IN);
-
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ /* set codec PLL source to the 19.2MHz platform clock (MCLK) */
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5670_PLL1_S_MCLK,
+ CHT_PLAT_CLK_3_HZ, 48000 * 512);
+ if (ret < 0) {
+ dev_err(card->dev, "can't set codec pll: %d\n", ret);
+ return ret;
+ }
+
+ /* set codec sysclk source to PLL */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5670_SCLK_S_PLL1,
+ 48000 * 512, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "can't set codec sysclk: %d\n", ret);
+ return ret;
+ }
+ } else {
+ /* Set codec sysclk source to its internal clock because codec
+ * PLL will be off when idle and MCLK will also be off by ACPI
+ * when codec is runtime suspended. Codec needs clock for jack
+ * detection and button press.
+ */
+ snd_soc_dai_set_sysclk(codec_dai, RT5670_SCLK_S_RCCLK,
+ 48000 * 512, SND_SOC_CLOCK_IN);
+ }
return 0;
}
@@ -77,7 +107,8 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Int Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
- platform_clock_control, SND_SOC_DAPM_POST_PMD),
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route cht_audio_map[] = {
@@ -162,6 +193,15 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
| RT5670_AD_MONO_L_FILTER
| RT5670_AD_MONO_R_FILTER,
RT5670_CLK_SEL_I2S1_ASRC);
+
+ ret = snd_soc_card_jack_new(runtime->card, "Headset",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2, &cht_bsw_headset,
+ cht_bsw_headset_pins, ARRAY_SIZE(cht_bsw_headset_pins));
+ if (ret)
+ return ret;
+
+ rt5670_set_jack_detect(codec, &cht_bsw_headset);
return 0;
}
@@ -178,9 +218,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min = channels->max = 2;
/* set SSP2 to 24-bit */
- snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
- SNDRV_PCM_HW_PARAM_FIRST_MASK],
- SNDRV_PCM_FORMAT_S24_LE);
+ params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
@@ -217,7 +255,7 @@ static struct snd_soc_dai_link cht_dailink[] = {
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.platform_name = "sst-mfld-platform",
- .ignore_suspend = 1,
+ .nonatomic = true,
.dynamic = 1,
.dpcm_playback = 1,
.dpcm_capture = 1,
@@ -240,19 +278,48 @@ static struct snd_soc_dai_link cht_dailink[] = {
.cpu_dai_name = "ssp2-port",
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
+ .nonatomic = true,
.codec_dai_name = "rt5670-aif1",
.codec_name = "i2c-10EC5670:00",
.dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
| SND_SOC_DAIFMT_CBS_CFS,
.init = cht_codec_init,
.be_hw_params_fixup = cht_codec_fixup,
- .ignore_suspend = 1,
.dpcm_playback = 1,
.dpcm_capture = 1,
.ops = &cht_be_ssp2_ops,
},
};
+static int cht_suspend_pre(struct snd_soc_card *card)
+{
+ struct snd_soc_codec *codec;
+
+ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+ if (!strcmp(codec->component.name, "i2c-10EC5670:00")) {
+ dev_dbg(codec->dev, "disabling jack detect before going to suspend.\n");
+ rt5670_jack_suspend(codec);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int cht_resume_post(struct snd_soc_card *card)
+{
+ struct snd_soc_codec *codec;
+
+ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+ if (!strcmp(codec->component.name, "i2c-10EC5670:00")) {
+ dev_dbg(codec->dev, "enabling jack detect for resume.\n");
+ rt5670_jack_resume(codec);
+ break;
+ }
+ }
+
+ return 0;
+}
+
/* SoC card */
static struct snd_soc_card snd_soc_card_cht = {
.name = "cherrytrailcraudio",
@@ -264,6 +331,8 @@ static struct snd_soc_card snd_soc_card_cht = {
.num_dapm_routes = ARRAY_SIZE(cht_audio_map),
.controls = cht_mc_controls,
.num_controls = ARRAY_SIZE(cht_mc_controls),
+ .suspend_pre = cht_suspend_pre,
+ .resume_post = cht_resume_post,
};
static int snd_cht_mc_probe(struct platform_device *pdev)
@@ -285,7 +354,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_cht_mc_driver = {
.driver = {
.name = "cht-bsw-rt5672",
- .pm = &snd_soc_pm_ops,
},
.probe = snd_cht_mc_probe,
};
diff --git a/sound/soc/intel/haswell.c b/sound/soc/intel/boards/haswell.c
index 35edf51a52aa..22558572cb9c 100644
--- a/sound/soc/intel/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -21,10 +21,10 @@
#include <sound/soc.h>
#include <sound/pcm_params.h>
-#include "sst-dsp.h"
-#include "sst-haswell-ipc.h"
+#include "../common/sst-dsp.h"
+#include "../haswell/sst-haswell-ipc.h"
-#include "../codecs/rt5640.h"
+#include "../../codecs/rt5640.h"
/* Haswell ULT platforms have a Headphone and Mic jack */
static const struct snd_soc_dapm_widget haswell_widgets[] = {
@@ -56,9 +56,7 @@ static int haswell_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min = channels->max = 2;
/* set SSP0 to 16 bit */
- snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
- SNDRV_PCM_HW_PARAM_FIRST_MASK],
- SNDRV_PCM_FORMAT_S16_LE);
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
diff --git a/sound/soc/intel/mfld_machine.c b/sound/soc/intel/boards/mfld_machine.c
index 90b7a57713a0..49c09a0add79 100644
--- a/sound/soc/intel/mfld_machine.c
+++ b/sound/soc/intel/boards/mfld_machine.c
@@ -228,10 +228,13 @@ static void mfld_jack_check(unsigned int intr_status)
{
struct mfld_jack_data jack_data;
+ if (!mfld_codec)
+ return;
+
jack_data.mfld_jack = &mfld_jack;
jack_data.intr_id = intr_status;
- sn95031_jack_detection(&jack_data);
+ sn95031_jack_detection(mfld_codec, &jack_data);
/* TODO: add american headset detection post gpiolib support */
}
@@ -240,8 +243,6 @@ static int mfld_init(struct snd_soc_pcm_runtime *runtime)
struct snd_soc_dapm_context *dapm = &runtime->card->dapm;
int ret_val;
- mfld_codec = runtime->codec;
-
/* default is earpiece pin, userspace sets it explcitly */
snd_soc_dapm_disable_pin(dapm, "Headphones");
/* default is lineout NC, userspace sets it explcitly */
@@ -254,20 +255,15 @@ static int mfld_init(struct snd_soc_pcm_runtime *runtime)
snd_soc_dapm_disable_pin(dapm, "LINEINR");
/* Headset and button jack detection */
- ret_val = snd_soc_jack_new(mfld_codec, "Intel(R) MID Audio Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 |
- SND_JACK_BTN_1, &mfld_jack);
+ ret_val = snd_soc_card_jack_new(runtime->card,
+ "Intel(R) MID Audio Jack", SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1, &mfld_jack,
+ mfld_jack_pins, ARRAY_SIZE(mfld_jack_pins));
if (ret_val) {
pr_err("jack creation failed\n");
return ret_val;
}
- ret_val = snd_soc_jack_add_pins(&mfld_jack,
- ARRAY_SIZE(mfld_jack_pins), mfld_jack_pins);
- if (ret_val) {
- pr_err("adding jack pins failed\n");
- return ret_val;
- }
ret_val = snd_soc_jack_add_zones(&mfld_jack,
ARRAY_SIZE(mfld_zones), mfld_zones);
if (ret_val) {
@@ -275,6 +271,8 @@ static int mfld_init(struct snd_soc_pcm_runtime *runtime)
return ret_val;
}
+ mfld_codec = runtime->codec;
+
/* we want to check if anything is inserted at boot,
* so send a fake event to codec and it will read adc
* to find if anything is there or not */
@@ -359,8 +357,6 @@ static irqreturn_t snd_mfld_jack_detection(int irq, void *data)
{
struct mfld_mc_private *mc_drv_ctx = (struct mfld_mc_private *) data;
- if (mfld_jack.codec == NULL)
- return IRQ_HANDLED;
mfld_jack_check(mc_drv_ctx->interrupt_status);
return IRQ_HANDLED;
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
new file mode 100644
index 000000000000..f24154ca4e98
--- /dev/null
+++ b/sound/soc/intel/common/Makefile
@@ -0,0 +1,7 @@
+snd-soc-sst-dsp-objs := sst-dsp.o sst-firmware.o
+snd-soc-sst-acpi-objs := sst-acpi.o
+snd-soc-sst-ipc-objs := sst-ipc.o
+
+obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
+obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
+
diff --git a/sound/soc/intel/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c
index b3d84560fbb5..42f293f9c6e2 100644
--- a/sound/soc/intel/sst-acpi.c
+++ b/sound/soc/intel/common/sst-acpi.c
@@ -142,6 +142,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
sst_acpi->desc = desc;
sst_acpi->mach = mach;
+ sst_pdata->resindex_dma_base = desc->resindex_dma_base;
if (desc->resindex_dma_base >= 0) {
sst_pdata->dma_engine = desc->dma_engine;
sst_pdata->dma_base = desc->resindex_dma_base;
diff --git a/sound/soc/intel/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h
index b9da030e312d..396d54510350 100644
--- a/sound/soc/intel/sst-dsp-priv.h
+++ b/sound/soc/intel/common/sst-dsp-priv.h
@@ -173,6 +173,16 @@ struct sst_module_runtime_context {
};
/*
+ * Audio DSP Module State
+ */
+enum sst_module_state {
+ SST_MODULE_STATE_UNLOADED = 0, /* default state */
+ SST_MODULE_STATE_LOADED,
+ SST_MODULE_STATE_INITIALIZED, /* and inactive */
+ SST_MODULE_STATE_ACTIVE,
+};
+
+/*
* Audio DSP Generic Module.
*
* Each Firmware file can consist of 1..N modules. A module can span multiple
@@ -203,6 +213,9 @@ struct sst_module {
struct list_head list; /* DSP list of modules */
struct list_head list_fw; /* FW list of modules */
struct list_head runtime_list; /* list of runtime module objects*/
+
+ /* state */
+ enum sst_module_state state;
};
/*
diff --git a/sound/soc/intel/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index 64e94212d2d2..64e94212d2d2 100644
--- a/sound/soc/intel/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
diff --git a/sound/soc/intel/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h
index f291e32f0077..96aeb2556ad4 100644
--- a/sound/soc/intel/sst-dsp.h
+++ b/sound/soc/intel/common/sst-dsp.h
@@ -28,7 +28,6 @@
/* Supported SST DMA Devices */
#define SST_DMA_TYPE_DW 1
-#define SST_DMA_TYPE_MID 2
/* autosuspend delay 5s*/
#define SST_RUNTIME_SUSPEND_DELAY (5 * 1000)
@@ -206,6 +205,7 @@ struct sst_pdata {
const struct firmware *fw;
/* DMA */
+ int resindex_dma_base; /* other fields invalid if equals to -1 */
u32 dma_base;
u32 dma_size;
int dma_engine;
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 5f71ef607a57..ebcca6dc48d1 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -221,8 +221,6 @@ int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
dma_cap_mask_t mask;
int ret;
- /* The Intel MID DMA engine driver needs the slave config set but
- * Synopsis DMA engine driver safely ignores the slave config */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -271,15 +269,16 @@ int sst_dma_new(struct sst_dsp *sst)
const char *dma_dev_name;
int ret = 0;
+ if (sst->pdata->resindex_dma_base == -1)
+ /* DMA is not used, return and squelsh error messages */
+ return 0;
+
/* configure the correct platform data for whatever DMA engine
* is attached to the ADSP IP. */
switch (sst->pdata->dma_engine) {
case SST_DMA_TYPE_DW:
dma_dev_name = "dw_dmac";
break;
- case SST_DMA_TYPE_MID:
- dma_dev_name = "Intel MID DMA";
- break;
default:
dev_err(sst->dev, "error: invalid DMA engine %d\n",
sst->pdata->dma_engine);
@@ -498,6 +497,7 @@ struct sst_module *sst_module_new(struct sst_fw *sst_fw,
sst_module->scratch_size = template->scratch_size;
sst_module->persistent_size = template->persistent_size;
sst_module->entry = template->entry;
+ sst_module->state = SST_MODULE_STATE_UNLOADED;
INIT_LIST_HEAD(&sst_module->block_list);
INIT_LIST_HEAD(&sst_module->runtime_list);
diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
new file mode 100644
index 000000000000..4b62a553823c
--- /dev/null
+++ b/sound/soc/intel/common/sst-ipc.c
@@ -0,0 +1,294 @@
+/*
+ * Intel SST generic IPC Support
+ *
+ * Copyright (C) 2015, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/kthread.h>
+#include <sound/asound.h>
+
+#include "sst-dsp.h"
+#include "sst-dsp-priv.h"
+#include "sst-ipc.h"
+
+/* IPC message timeout (msecs) */
+#define IPC_TIMEOUT_MSECS 300
+
+#define IPC_EMPTY_LIST_SIZE 8
+
+/* locks held by caller */
+static struct ipc_message *msg_get_empty(struct sst_generic_ipc *ipc)
+{
+ struct ipc_message *msg = NULL;
+
+ if (!list_empty(&ipc->empty_list)) {
+ msg = list_first_entry(&ipc->empty_list, struct ipc_message,
+ list);
+ list_del(&msg->list);
+ }
+
+ return msg;
+}
+
+static int tx_wait_done(struct sst_generic_ipc *ipc,
+ struct ipc_message *msg, void *rx_data)
+{
+ unsigned long flags;
+ int ret;
+
+ /* wait for DSP completion (in all cases atm inc pending) */
+ ret = wait_event_timeout(msg->waitq, msg->complete,
+ msecs_to_jiffies(IPC_TIMEOUT_MSECS));
+
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+ if (ret == 0) {
+ if (ipc->ops.shim_dbg != NULL)
+ ipc->ops.shim_dbg(ipc, "message timeout");
+
+ list_del(&msg->list);
+ ret = -ETIMEDOUT;
+ } else {
+
+ /* copy the data returned from DSP */
+ if (msg->rx_size)
+ memcpy(rx_data, msg->rx_data, msg->rx_size);
+ ret = msg->errno;
+ }
+
+ list_add_tail(&msg->list, &ipc->empty_list);
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+ return ret;
+}
+
+static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes, void *rx_data,
+ size_t rx_bytes, int wait)
+{
+ struct ipc_message *msg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+
+ msg = msg_get_empty(ipc);
+ if (msg == NULL) {
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+ return -EBUSY;
+ }
+
+ msg->header = header;
+ msg->tx_size = tx_bytes;
+ msg->rx_size = rx_bytes;
+ msg->wait = wait;
+ msg->errno = 0;
+ msg->pending = false;
+ msg->complete = false;
+
+ if ((tx_bytes) && (ipc->ops.tx_data_copy != NULL))
+ ipc->ops.tx_data_copy(msg, tx_data, tx_bytes);
+
+ list_add_tail(&msg->list, &ipc->tx_list);
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+
+ queue_kthread_work(&ipc->kworker, &ipc->kwork);
+
+ if (wait)
+ return tx_wait_done(ipc, msg, rx_data);
+ else
+ return 0;
+}
+
+static int msg_empty_list_init(struct sst_generic_ipc *ipc)
+{
+ int i;
+
+ ipc->msg = kzalloc(sizeof(struct ipc_message) *
+ IPC_EMPTY_LIST_SIZE, GFP_KERNEL);
+ if (ipc->msg == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {
+ init_waitqueue_head(&ipc->msg[i].waitq);
+ list_add(&ipc->msg[i].list, &ipc->empty_list);
+ }
+
+ return 0;
+}
+
+static void ipc_tx_msgs(struct kthread_work *work)
+{
+ struct sst_generic_ipc *ipc =
+ container_of(work, struct sst_generic_ipc, kwork);
+ struct ipc_message *msg;
+ unsigned long flags;
+ u64 ipcx;
+
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+
+ if (list_empty(&ipc->tx_list) || ipc->pending) {
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+ return;
+ }
+
+ /* if the DSP is busy, we will TX messages after IRQ.
+ * also postpone if we are in the middle of procesing completion irq*/
+ ipcx = sst_dsp_shim_read_unlocked(ipc->dsp, SST_IPCX);
+ if (ipcx & (SST_IPCX_BUSY | SST_IPCX_DONE)) {
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+ return;
+ }
+
+ msg = list_first_entry(&ipc->tx_list, struct ipc_message, list);
+ list_move(&msg->list, &ipc->rx_list);
+
+ if (ipc->ops.tx_msg != NULL)
+ ipc->ops.tx_msg(ipc, msg);
+
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+}
+
+int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
+{
+ return ipc_tx_message(ipc, header, tx_data, tx_bytes,
+ rx_data, rx_bytes, 1);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_tx_message_wait);
+
+int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes)
+{
+ return ipc_tx_message(ipc, header, tx_data, tx_bytes,
+ NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nowait);
+
+struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
+ u64 header)
+{
+ struct ipc_message *msg;
+ u64 mask;
+
+ if (ipc->ops.reply_msg_match != NULL)
+ header = ipc->ops.reply_msg_match(header, &mask);
+
+ if (list_empty(&ipc->rx_list)) {
+ dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
+ header);
+ return NULL;
+ }
+
+ list_for_each_entry(msg, &ipc->rx_list, list) {
+ if ((msg->header & mask) == header)
+ return msg;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_ipc_reply_find_msg);
+
+/* locks held by caller */
+void sst_ipc_tx_msg_reply_complete(struct sst_generic_ipc *ipc,
+ struct ipc_message *msg)
+{
+ msg->complete = true;
+
+ if (!msg->wait)
+ list_add_tail(&msg->list, &ipc->empty_list);
+ else
+ wake_up(&msg->waitq);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_tx_msg_reply_complete);
+
+void sst_ipc_drop_all(struct sst_generic_ipc *ipc)
+{
+ struct ipc_message *msg, *tmp;
+ unsigned long flags;
+ int tx_drop_cnt = 0, rx_drop_cnt = 0;
+
+ /* drop all TX and Rx messages before we stall + reset DSP */
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+
+ list_for_each_entry_safe(msg, tmp, &ipc->tx_list, list) {
+ list_move(&msg->list, &ipc->empty_list);
+ tx_drop_cnt++;
+ }
+
+ list_for_each_entry_safe(msg, tmp, &ipc->rx_list, list) {
+ list_move(&msg->list, &ipc->empty_list);
+ rx_drop_cnt++;
+ }
+
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+
+ if (tx_drop_cnt || rx_drop_cnt)
+ dev_err(ipc->dev, "dropped IPC msg RX=%d, TX=%d\n",
+ tx_drop_cnt, rx_drop_cnt);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_drop_all);
+
+int sst_ipc_init(struct sst_generic_ipc *ipc)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&ipc->tx_list);
+ INIT_LIST_HEAD(&ipc->rx_list);
+ INIT_LIST_HEAD(&ipc->empty_list);
+ init_waitqueue_head(&ipc->wait_txq);
+
+ ret = msg_empty_list_init(ipc);
+ if (ret < 0)
+ return -ENOMEM;
+
+ /* start the IPC message thread */
+ init_kthread_worker(&ipc->kworker);
+ ipc->tx_thread = kthread_run(kthread_worker_fn,
+ &ipc->kworker, "%s",
+ dev_name(ipc->dev));
+ if (IS_ERR(ipc->tx_thread)) {
+ dev_err(ipc->dev, "error: failed to create message TX task\n");
+ ret = PTR_ERR(ipc->tx_thread);
+ kfree(ipc->msg);
+ return ret;
+ }
+
+ init_kthread_work(&ipc->kwork, ipc_tx_msgs);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_ipc_init);
+
+void sst_ipc_fini(struct sst_generic_ipc *ipc)
+{
+ if (ipc->tx_thread)
+ kthread_stop(ipc->tx_thread);
+
+ if (ipc->msg)
+ kfree(ipc->msg);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_fini);
+
+/* Module information */
+MODULE_AUTHOR("Jin Yao");
+MODULE_DESCRIPTION("Intel SST IPC generic");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/intel/common/sst-ipc.h b/sound/soc/intel/common/sst-ipc.h
new file mode 100644
index 000000000000..125ea451a373
--- /dev/null
+++ b/sound/soc/intel/common/sst-ipc.h
@@ -0,0 +1,91 @@
+/*
+ * Intel SST generic IPC Support
+ *
+ * Copyright (C) 2015, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SST_GENERIC_IPC_H
+#define __SST_GENERIC_IPC_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+
+#define IPC_MAX_MAILBOX_BYTES 256
+
+struct ipc_message {
+ struct list_head list;
+ u64 header;
+
+ /* direction wrt host CPU */
+ char tx_data[IPC_MAX_MAILBOX_BYTES];
+ size_t tx_size;
+ char rx_data[IPC_MAX_MAILBOX_BYTES];
+ size_t rx_size;
+
+ wait_queue_head_t waitq;
+ bool pending;
+ bool complete;
+ bool wait;
+ int errno;
+};
+
+struct sst_generic_ipc;
+
+struct sst_plat_ipc_ops {
+ void (*tx_msg)(struct sst_generic_ipc *, struct ipc_message *);
+ void (*shim_dbg)(struct sst_generic_ipc *, const char *);
+ void (*tx_data_copy)(struct ipc_message *, char *, size_t);
+ u64 (*reply_msg_match)(u64 header, u64 *mask);
+};
+
+/* SST generic IPC data */
+struct sst_generic_ipc {
+ struct device *dev;
+ struct sst_dsp *dsp;
+
+ /* IPC messaging */
+ struct list_head tx_list;
+ struct list_head rx_list;
+ struct list_head empty_list;
+ wait_queue_head_t wait_txq;
+ struct task_struct *tx_thread;
+ struct kthread_worker kworker;
+ struct kthread_work kwork;
+ bool pending;
+ struct ipc_message *msg;
+
+ struct sst_plat_ipc_ops ops;
+};
+
+int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes);
+
+int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes);
+
+struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
+ u64 header);
+
+void sst_ipc_tx_msg_reply_complete(struct sst_generic_ipc *ipc,
+ struct ipc_message *msg);
+
+void sst_ipc_drop_all(struct sst_generic_ipc *ipc);
+int sst_ipc_init(struct sst_generic_ipc *ipc);
+void sst_ipc_fini(struct sst_generic_ipc *ipc);
+
+#endif
diff --git a/sound/soc/intel/haswell/Makefile b/sound/soc/intel/haswell/Makefile
new file mode 100644
index 000000000000..9c1723112d22
--- /dev/null
+++ b/sound/soc/intel/haswell/Makefile
@@ -0,0 +1,4 @@
+snd-soc-sst-haswell-pcm-objs := \
+ sst-haswell-ipc.o sst-haswell-pcm.o sst-haswell-dsp.o
+
+obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += snd-soc-sst-haswell-pcm.o
diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/haswell/sst-haswell-dsp.c
index 402b728c0a06..7f94920c8a4d 100644
--- a/sound/soc/intel/sst-haswell-dsp.c
+++ b/sound/soc/intel/haswell/sst-haswell-dsp.c
@@ -28,9 +28,9 @@
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
-#include "sst-dsp.h"
-#include "sst-dsp-priv.h"
-#include "sst-haswell-ipc.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "../haswell/sst-haswell-ipc.h"
#include <trace/events/hswadsp.h>
@@ -100,6 +100,7 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
&& module->type != SST_HSW_MODULE_PCM
&& module->type != SST_HSW_MODULE_PCM_REFERENCE
&& module->type != SST_HSW_MODULE_PCM_CAPTURE
+ && module->type != SST_HSW_MODULE_WAVES
&& module->type != SST_HSW_MODULE_LPAL)
return 0;
@@ -139,6 +140,7 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
mod->type = SST_MEM_IRAM;
break;
case SST_HSW_DRAM:
+ case SST_HSW_REGS:
ram = dsp->addr.lpe;
mod->offset = block->ram_offset;
mod->type = SST_MEM_DRAM;
@@ -169,6 +171,7 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
block = (void *)block + sizeof(*block) + block->size;
}
+ mod->state = SST_MODULE_STATE_LOADED;
return 0;
}
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index 863a9ca34b8e..324eceb07b25 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -34,8 +34,9 @@
#include <sound/asound.h>
#include "sst-haswell-ipc.h"
-#include "sst-dsp.h"
-#include "sst-dsp-priv.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "../common/sst-ipc.h"
/* Global Message - Generic */
#define IPC_GLB_TYPE_SHIFT 24
@@ -79,6 +80,15 @@
#define IPC_LOG_ID_MASK (0xf << IPC_LOG_ID_SHIFT)
#define IPC_LOG_ID(x) (x << IPC_LOG_ID_SHIFT)
+/* Module Message */
+#define IPC_MODULE_OPERATION_SHIFT 20
+#define IPC_MODULE_OPERATION_MASK (0xf << IPC_MODULE_OPERATION_SHIFT)
+#define IPC_MODULE_OPERATION(x) (x << IPC_MODULE_OPERATION_SHIFT)
+
+#define IPC_MODULE_ID_SHIFT 16
+#define IPC_MODULE_ID_MASK (0xf << IPC_MODULE_ID_SHIFT)
+#define IPC_MODULE_ID(x) (x << IPC_MODULE_ID_SHIFT)
+
/* IPC message timeout (msecs) */
#define IPC_TIMEOUT_MSECS 300
#define IPC_BOOT_MSECS 200
@@ -115,6 +125,7 @@ enum ipc_glb_type {
IPC_GLB_ENTER_DX_STATE = 12,
IPC_GLB_GET_MIXER_STREAM_INFO = 13, /* Request mixer stream params */
IPC_GLB_DEBUG_LOG_MESSAGE = 14, /* Message to or from the debug logger. */
+ IPC_GLB_MODULE_OPERATION = 15, /* Message to loadable fw module */
IPC_GLB_REQUEST_TRANSFER = 16, /* < Request Transfer for host */
IPC_GLB_MAX_IPC_MESSAGE_TYPE = 17, /* Maximum message number */
};
@@ -133,6 +144,16 @@ enum ipc_glb_reply {
IPC_GLB_REPLY_SOURCE_NOT_STARTED = 10, /* Source was not started. */
};
+enum ipc_module_operation {
+ IPC_MODULE_NOTIFICATION = 0,
+ IPC_MODULE_ENABLE = 1,
+ IPC_MODULE_DISABLE = 2,
+ IPC_MODULE_GET_PARAMETER = 3,
+ IPC_MODULE_SET_PARAMETER = 4,
+ IPC_MODULE_GET_INFO = 5,
+ IPC_MODULE_MAX_MESSAGE
+};
+
/* Stream Message - Types */
enum ipc_str_operation {
IPC_STR_RESET = 0,
@@ -190,23 +211,6 @@ struct sst_hsw_ipc_fw_ready {
u8 fw_info[IPC_MAX_MAILBOX_BYTES - 5 * sizeof(u32)];
} __attribute__((packed));
-struct ipc_message {
- struct list_head list;
- u32 header;
-
- /* direction wrt host CPU */
- char tx_data[IPC_MAX_MAILBOX_BYTES];
- size_t tx_size;
- char rx_data[IPC_MAX_MAILBOX_BYTES];
- size_t rx_size;
-
- wait_queue_head_t waitq;
- bool pending;
- bool complete;
- bool wait;
- int errno;
-};
-
struct sst_hsw_stream;
struct sst_hsw;
@@ -305,18 +309,19 @@ struct sst_hsw {
bool shutdown;
/* IPC messaging */
- struct list_head tx_list;
- struct list_head rx_list;
- struct list_head empty_list;
- wait_queue_head_t wait_txq;
- struct task_struct *tx_thread;
- struct kthread_worker kworker;
- struct kthread_work kwork;
- bool pending;
- struct ipc_message *msg;
+ struct sst_generic_ipc ipc;
/* FW log stream */
struct sst_hsw_log_stream log_stream;
+
+ /* flags bit field to track module state when resume from RTD3,
+ * each bit represent state (enabled/disabled) of single module */
+ u32 enabled_modules_rtd3;
+
+ /* buffer to store parameter lines */
+ u32 param_idx_w; /* write index */
+ u32 param_idx_r; /* read index */
+ u8 param_buf[WAVES_PARAM_LINES][WAVES_PARAM_COUNT];
};
#define CREATE_TRACE_POINTS
@@ -352,6 +357,16 @@ static inline u32 msg_get_notify_reason(u32 msg)
return (msg & IPC_STG_TYPE_MASK) >> IPC_STG_TYPE_SHIFT;
}
+static inline u32 msg_get_module_operation(u32 msg)
+{
+ return (msg & IPC_MODULE_OPERATION_MASK) >> IPC_MODULE_OPERATION_SHIFT;
+}
+
+static inline u32 msg_get_module_id(u32 msg)
+{
+ return (msg & IPC_MODULE_ID_MASK) >> IPC_MODULE_ID_SHIFT;
+}
+
u32 create_channel_map(enum sst_hsw_channel_config config)
{
switch (config) {
@@ -417,159 +432,6 @@ static struct sst_hsw_stream *get_stream_by_id(struct sst_hsw *hsw,
return NULL;
}
-static void ipc_shim_dbg(struct sst_hsw *hsw, const char *text)
-{
- struct sst_dsp *sst = hsw->dsp;
- u32 isr, ipcd, imrx, ipcx;
-
- ipcx = sst_dsp_shim_read_unlocked(sst, SST_IPCX);
- isr = sst_dsp_shim_read_unlocked(sst, SST_ISRX);
- ipcd = sst_dsp_shim_read_unlocked(sst, SST_IPCD);
- imrx = sst_dsp_shim_read_unlocked(sst, SST_IMRX);
-
- dev_err(hsw->dev, "ipc: --%s-- ipcx 0x%8.8x isr 0x%8.8x ipcd 0x%8.8x imrx 0x%8.8x\n",
- text, ipcx, isr, ipcd, imrx);
-}
-
-/* locks held by caller */
-static struct ipc_message *msg_get_empty(struct sst_hsw *hsw)
-{
- struct ipc_message *msg = NULL;
-
- if (!list_empty(&hsw->empty_list)) {
- msg = list_first_entry(&hsw->empty_list, struct ipc_message,
- list);
- list_del(&msg->list);
- }
-
- return msg;
-}
-
-static void ipc_tx_msgs(struct kthread_work *work)
-{
- struct sst_hsw *hsw =
- container_of(work, struct sst_hsw, kwork);
- struct ipc_message *msg;
- unsigned long flags;
- u32 ipcx;
-
- spin_lock_irqsave(&hsw->dsp->spinlock, flags);
-
- if (list_empty(&hsw->tx_list) || hsw->pending) {
- spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
- return;
- }
-
- /* if the DSP is busy, we will TX messages after IRQ.
- * also postpone if we are in the middle of procesing completion irq*/
- ipcx = sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCX);
- if (ipcx & (SST_IPCX_BUSY | SST_IPCX_DONE)) {
- spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
- return;
- }
-
- msg = list_first_entry(&hsw->tx_list, struct ipc_message, list);
-
- list_move(&msg->list, &hsw->rx_list);
-
- /* send the message */
- sst_dsp_outbox_write(hsw->dsp, msg->tx_data, msg->tx_size);
- sst_dsp_ipc_msg_tx(hsw->dsp, msg->header | SST_IPCX_BUSY);
-
- spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
-}
-
-/* locks held by caller */
-static void tx_msg_reply_complete(struct sst_hsw *hsw, struct ipc_message *msg)
-{
- msg->complete = true;
- trace_ipc_reply("completed", msg->header);
-
- if (!msg->wait)
- list_add_tail(&msg->list, &hsw->empty_list);
- else
- wake_up(&msg->waitq);
-}
-
-static int tx_wait_done(struct sst_hsw *hsw, struct ipc_message *msg,
- void *rx_data)
-{
- unsigned long flags;
- int ret;
-
- /* wait for DSP completion (in all cases atm inc pending) */
- ret = wait_event_timeout(msg->waitq, msg->complete,
- msecs_to_jiffies(IPC_TIMEOUT_MSECS));
-
- spin_lock_irqsave(&hsw->dsp->spinlock, flags);
- if (ret == 0) {
- ipc_shim_dbg(hsw, "message timeout");
-
- trace_ipc_error("error message timeout for", msg->header);
- list_del(&msg->list);
- ret = -ETIMEDOUT;
- } else {
-
- /* copy the data returned from DSP */
- if (msg->rx_size)
- memcpy(rx_data, msg->rx_data, msg->rx_size);
- ret = msg->errno;
- }
-
- list_add_tail(&msg->list, &hsw->empty_list);
- spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
- return ret;
-}
-
-static int ipc_tx_message(struct sst_hsw *hsw, u32 header, void *tx_data,
- size_t tx_bytes, void *rx_data, size_t rx_bytes, int wait)
-{
- struct ipc_message *msg;
- unsigned long flags;
-
- spin_lock_irqsave(&hsw->dsp->spinlock, flags);
-
- msg = msg_get_empty(hsw);
- if (msg == NULL) {
- spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
- return -EBUSY;
- }
-
- if (tx_bytes)
- memcpy(msg->tx_data, tx_data, tx_bytes);
-
- msg->header = header;
- msg->tx_size = tx_bytes;
- msg->rx_size = rx_bytes;
- msg->wait = wait;
- msg->errno = 0;
- msg->pending = false;
- msg->complete = false;
-
- list_add_tail(&msg->list, &hsw->tx_list);
- spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
-
- queue_kthread_work(&hsw->kworker, &hsw->kwork);
-
- if (wait)
- return tx_wait_done(hsw, msg, rx_data);
- else
- return 0;
-}
-
-static inline int ipc_tx_message_wait(struct sst_hsw *hsw, u32 header,
- void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
-{
- return ipc_tx_message(hsw, header, tx_data, tx_bytes, rx_data,
- rx_bytes, 1);
-}
-
-static inline int ipc_tx_message_nowait(struct sst_hsw *hsw, u32 header,
- void *tx_data, size_t tx_bytes)
-{
- return ipc_tx_message(hsw, header, tx_data, tx_bytes, NULL, 0, 0);
-}
-
static void hsw_fw_ready(struct sst_hsw *hsw, u32 header)
{
struct sst_hsw_ipc_fw_ready fw_ready;
@@ -604,7 +466,7 @@ static void hsw_fw_ready(struct sst_hsw *hsw, u32 header)
/* log the FW version info got from the mailbox here. */
memcpy(fw_info, fw_ready.fw_info, fw_ready.fw_info_size);
pinfo = &fw_info[0];
- for (i = 0; i < sizeof(tmp) / sizeof(char *); i++)
+ for (i = 0; i < ARRAY_SIZE(tmp); i++)
tmp[i] = strsep(&pinfo, " ");
dev_info(hsw->dev, "FW loaded, mailbox readback FW info: type %s, - "
"version: %s.%s, build %s, source commit id: %s\n",
@@ -657,27 +519,6 @@ static void hsw_notification_work(struct work_struct *work)
sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
}
-static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header)
-{
- struct ipc_message *msg;
-
- /* clear reply bits & status bits */
- header &= ~(IPC_STATUS_MASK | IPC_GLB_REPLY_MASK);
-
- if (list_empty(&hsw->rx_list)) {
- dev_err(hsw->dev, "error: rx list empty but received 0x%x\n",
- header);
- return NULL;
- }
-
- list_for_each_entry(msg, &hsw->rx_list, list) {
- if (msg->header == header)
- return msg;
- }
-
- return NULL;
-}
-
static void hsw_stream_update(struct sst_hsw *hsw, struct ipc_message *msg)
{
struct sst_hsw_stream *stream;
@@ -716,7 +557,7 @@ static int hsw_process_reply(struct sst_hsw *hsw, u32 header)
trace_ipc_reply("processing -->", header);
- msg = reply_find_msg(hsw, header);
+ msg = sst_ipc_reply_find_msg(&hsw->ipc, header);
if (msg == NULL) {
trace_ipc_error("error: can't find message header", header);
return -EIO;
@@ -727,14 +568,14 @@ static int hsw_process_reply(struct sst_hsw *hsw, u32 header)
case IPC_GLB_REPLY_PENDING:
trace_ipc_pending_reply("received", header);
msg->pending = true;
- hsw->pending = true;
+ hsw->ipc.pending = true;
return 1;
case IPC_GLB_REPLY_SUCCESS:
if (msg->pending) {
trace_ipc_pending_reply("completed", header);
sst_dsp_inbox_read(hsw->dsp, msg->rx_data,
msg->rx_size);
- hsw->pending = false;
+ hsw->ipc.pending = false;
} else {
/* copy data from the DSP */
sst_dsp_outbox_read(hsw->dsp, msg->rx_data,
@@ -790,11 +631,36 @@ static int hsw_process_reply(struct sst_hsw *hsw, u32 header)
/* wake up and return the error if we have waiters on this message ? */
list_del(&msg->list);
- tx_msg_reply_complete(hsw, msg);
+ sst_ipc_tx_msg_reply_complete(&hsw->ipc, msg);
return 1;
}
+static int hsw_module_message(struct sst_hsw *hsw, u32 header)
+{
+ u32 operation, module_id;
+ int handled = 0;
+
+ operation = msg_get_module_operation(header);
+ module_id = msg_get_module_id(header);
+ dev_dbg(hsw->dev, "received module message header: 0x%8.8x\n",
+ header);
+ dev_dbg(hsw->dev, "operation: 0x%8.8x module_id: 0x%8.8x\n",
+ operation, module_id);
+
+ switch (operation) {
+ case IPC_MODULE_NOTIFICATION:
+ dev_dbg(hsw->dev, "module notification received");
+ handled = 1;
+ break;
+ default:
+ handled = hsw_process_reply(hsw, header);
+ break;
+ }
+
+ return handled;
+}
+
static int hsw_stream_message(struct sst_hsw *hsw, u32 header)
{
u32 stream_msg, stream_id, stage_type;
@@ -890,6 +756,9 @@ static int hsw_process_notification(struct sst_hsw *hsw)
case IPC_GLB_DEBUG_LOG_MESSAGE:
handled = hsw_log_message(hsw, header);
break;
+ case IPC_GLB_MODULE_OPERATION:
+ handled = hsw_module_message(hsw, header);
+ break;
default:
dev_err(hsw->dev, "error: unexpected type %d hdr 0x%8.8x\n",
type, header);
@@ -903,6 +772,7 @@ static irqreturn_t hsw_irq_thread(int irq, void *context)
{
struct sst_dsp *sst = (struct sst_dsp *) context;
struct sst_hsw *hsw = sst_dsp_get_thread_context(sst);
+ struct sst_generic_ipc *ipc = &hsw->ipc;
u32 ipcx, ipcd;
int handled;
unsigned long flags;
@@ -949,7 +819,7 @@ static irqreturn_t hsw_irq_thread(int irq, void *context)
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
- queue_kthread_work(&hsw->kworker, &hsw->kwork);
+ queue_kthread_work(&ipc->kworker, &ipc->kwork);
return IRQ_HANDLED;
}
@@ -959,7 +829,8 @@ int sst_hsw_fw_get_version(struct sst_hsw *hsw,
{
int ret;
- ret = ipc_tx_message_wait(hsw, IPC_GLB_TYPE(IPC_GLB_GET_FW_VERSION),
+ ret = sst_ipc_tx_message_wait(&hsw->ipc,
+ IPC_GLB_TYPE(IPC_GLB_GET_FW_VERSION),
NULL, 0, version, sizeof(*version));
if (ret < 0)
dev_err(hsw->dev, "error: get version failed\n");
@@ -1023,7 +894,8 @@ int sst_hsw_stream_set_volume(struct sst_hsw *hsw,
req->channel = channel;
}
- ret = ipc_tx_message_wait(hsw, header, req, sizeof(*req), NULL, 0);
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header, req,
+ sizeof(*req), NULL, 0);
if (ret < 0) {
dev_err(hsw->dev, "error: set stream volume failed\n");
return ret;
@@ -1088,7 +960,8 @@ int sst_hsw_mixer_set_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
req.curve_type = hsw->curve_type;
req.target_volume = volume;
- ret = ipc_tx_message_wait(hsw, header, &req, sizeof(req), NULL, 0);
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header, &req,
+ sizeof(req), NULL, 0);
if (ret < 0) {
dev_err(hsw->dev, "error: set mixer volume failed\n");
return ret;
@@ -1146,7 +1019,7 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
stream->free_req.stream_id = stream->reply.stream_hw_id;
header = IPC_GLB_TYPE(IPC_GLB_FREE_STREAM);
- ret = ipc_tx_message_wait(hsw, header, &stream->free_req,
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header, &stream->free_req,
sizeof(stream->free_req), NULL, 0);
if (ret < 0) {
dev_err(hsw->dev, "error: free stream %d failed\n",
@@ -1338,8 +1211,8 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM);
- ret = ipc_tx_message_wait(hsw, header, str_req, sizeof(*str_req),
- reply, sizeof(*reply));
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header, str_req,
+ sizeof(*str_req), reply, sizeof(*reply));
if (ret < 0) {
dev_err(hsw->dev, "error: stream commit failed\n");
return ret;
@@ -1388,7 +1261,8 @@ int sst_hsw_mixer_get_info(struct sst_hsw *hsw)
trace_ipc_request("get global mixer info", 0);
- ret = ipc_tx_message_wait(hsw, header, NULL, 0, reply, sizeof(*reply));
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header, NULL, 0,
+ reply, sizeof(*reply));
if (ret < 0) {
dev_err(hsw->dev, "error: get stream info failed\n");
return ret;
@@ -1409,9 +1283,10 @@ static int sst_hsw_stream_operations(struct sst_hsw *hsw, int type,
header |= (stream_id << IPC_STR_ID_SHIFT);
if (wait)
- return ipc_tx_message_wait(hsw, header, NULL, 0, NULL, 0);
+ return sst_ipc_tx_message_wait(&hsw->ipc, header,
+ NULL, 0, NULL, 0);
else
- return ipc_tx_message_nowait(hsw, header, NULL, 0);
+ return sst_ipc_tx_message_nowait(&hsw->ipc, header, NULL, 0);
}
/* Stream ALSA trigger operations */
@@ -1538,8 +1413,8 @@ int sst_hsw_device_set_config(struct sst_hsw *hsw,
header = IPC_GLB_TYPE(IPC_GLB_SET_DEVICE_FORMATS);
- ret = ipc_tx_message_wait(hsw, header, &config, sizeof(config),
- NULL, 0);
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header, &config,
+ sizeof(config), NULL, 0);
if (ret < 0)
dev_err(hsw->dev, "error: set device formats failed\n");
@@ -1559,8 +1434,8 @@ int sst_hsw_dx_set_state(struct sst_hsw *hsw,
trace_ipc_request("PM enter Dx state", state);
- ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_),
- dx, sizeof(*dx));
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header, &state_,
+ sizeof(state_), dx, sizeof(*dx));
if (ret < 0) {
dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state);
return ret;
@@ -1703,32 +1578,6 @@ static int sst_hsw_dx_state_restore(struct sst_hsw *hsw)
return 0;
}
-static void sst_hsw_drop_all(struct sst_hsw *hsw)
-{
- struct ipc_message *msg, *tmp;
- unsigned long flags;
- int tx_drop_cnt = 0, rx_drop_cnt = 0;
-
- /* drop all TX and Rx messages before we stall + reset DSP */
- spin_lock_irqsave(&hsw->dsp->spinlock, flags);
-
- list_for_each_entry_safe(msg, tmp, &hsw->tx_list, list) {
- list_move(&msg->list, &hsw->empty_list);
- tx_drop_cnt++;
- }
-
- list_for_each_entry_safe(msg, tmp, &hsw->rx_list, list) {
- list_move(&msg->list, &hsw->empty_list);
- rx_drop_cnt++;
- }
-
- spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
-
- if (tx_drop_cnt || rx_drop_cnt)
- dev_err(hsw->dev, "dropped IPC msg RX=%d, TX=%d\n",
- tx_drop_cnt, rx_drop_cnt);
-}
-
int sst_hsw_dsp_load(struct sst_hsw *hsw)
{
struct sst_dsp *dsp = hsw->dsp;
@@ -1808,7 +1657,7 @@ int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw)
if (ret < 0)
return ret;
- sst_hsw_drop_all(hsw);
+ sst_ipc_drop_all(&hsw->ipc);
return 0;
}
@@ -1844,6 +1693,8 @@ int sst_hsw_dsp_runtime_resume(struct sst_hsw *hsw)
if (ret < 0)
dev_err(dev, "error: audio DSP boot failure\n");
+ sst_hsw_init_module_state(hsw);
+
ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete,
msecs_to_jiffies(IPC_BOOT_MSECS));
if (ret == 0) {
@@ -1864,26 +1715,345 @@ int sst_hsw_dsp_runtime_resume(struct sst_hsw *hsw)
}
#endif
-static int msg_empty_list_init(struct sst_hsw *hsw)
+struct sst_dsp *sst_hsw_get_dsp(struct sst_hsw *hsw)
{
- int i;
+ return hsw->dsp;
+}
- hsw->msg = kzalloc(sizeof(struct ipc_message) *
- IPC_EMPTY_LIST_SIZE, GFP_KERNEL);
- if (hsw->msg == NULL)
- return -ENOMEM;
+void sst_hsw_init_module_state(struct sst_hsw *hsw)
+{
+ struct sst_module *module;
+ enum sst_hsw_module_id id;
+
+ /* the base fw contains several modules */
+ for (id = SST_HSW_MODULE_BASE_FW; id < SST_HSW_MAX_MODULE_ID; id++) {
+ module = sst_module_get_from_id(hsw->dsp, id);
+ if (module) {
+ /* module waves is active only after being enabled */
+ if (id == SST_HSW_MODULE_WAVES)
+ module->state = SST_MODULE_STATE_INITIALIZED;
+ else
+ module->state = SST_MODULE_STATE_ACTIVE;
+ }
+ }
+}
+
+bool sst_hsw_is_module_loaded(struct sst_hsw *hsw, u32 module_id)
+{
+ struct sst_module *module;
+
+ module = sst_module_get_from_id(hsw->dsp, module_id);
+ if (module == NULL || module->state == SST_MODULE_STATE_UNLOADED)
+ return false;
+ else
+ return true;
+}
+
+bool sst_hsw_is_module_active(struct sst_hsw *hsw, u32 module_id)
+{
+ struct sst_module *module;
+
+ module = sst_module_get_from_id(hsw->dsp, module_id);
+ if (module != NULL && module->state == SST_MODULE_STATE_ACTIVE)
+ return true;
+ else
+ return false;
+}
- for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {
- init_waitqueue_head(&hsw->msg[i].waitq);
- list_add(&hsw->msg[i].list, &hsw->empty_list);
+void sst_hsw_set_module_enabled_rtd3(struct sst_hsw *hsw, u32 module_id)
+{
+ hsw->enabled_modules_rtd3 |= (1 << module_id);
+}
+
+void sst_hsw_set_module_disabled_rtd3(struct sst_hsw *hsw, u32 module_id)
+{
+ hsw->enabled_modules_rtd3 &= ~(1 << module_id);
+}
+
+bool sst_hsw_is_module_enabled_rtd3(struct sst_hsw *hsw, u32 module_id)
+{
+ return hsw->enabled_modules_rtd3 & (1 << module_id);
+}
+
+void sst_hsw_reset_param_buf(struct sst_hsw *hsw)
+{
+ hsw->param_idx_w = 0;
+ hsw->param_idx_r = 0;
+ memset((void *)hsw->param_buf, 0, sizeof(hsw->param_buf));
+}
+
+int sst_hsw_store_param_line(struct sst_hsw *hsw, u8 *buf)
+{
+ /* save line to the first available position of param buffer */
+ if (hsw->param_idx_w > WAVES_PARAM_LINES - 1) {
+ dev_warn(hsw->dev, "warning: param buffer overflow!\n");
+ return -EPERM;
+ }
+ memcpy(hsw->param_buf[hsw->param_idx_w], buf, WAVES_PARAM_COUNT);
+ hsw->param_idx_w++;
+ return 0;
+}
+
+int sst_hsw_load_param_line(struct sst_hsw *hsw, u8 *buf)
+{
+ u8 id = 0;
+
+ /* read the first matching line from param buffer */
+ while (hsw->param_idx_r < WAVES_PARAM_LINES) {
+ id = hsw->param_buf[hsw->param_idx_r][0];
+ hsw->param_idx_r++;
+ if (buf[0] == id) {
+ memcpy(buf, hsw->param_buf[hsw->param_idx_r],
+ WAVES_PARAM_COUNT);
+ break;
+ }
}
+ if (hsw->param_idx_r > WAVES_PARAM_LINES - 1) {
+ dev_dbg(hsw->dev, "end of buffer, roll to the beginning\n");
+ hsw->param_idx_r = 0;
+ return 0;
+ }
+ return 0;
+}
+
+int sst_hsw_launch_param_buf(struct sst_hsw *hsw)
+{
+ int ret, idx;
+ if (!sst_hsw_is_module_active(hsw, SST_HSW_MODULE_WAVES)) {
+ dev_dbg(hsw->dev, "module waves is not active\n");
+ return 0;
+ }
+
+ /* put all param lines to DSP through ipc */
+ for (idx = 0; idx < hsw->param_idx_w; idx++) {
+ ret = sst_hsw_module_set_param(hsw,
+ SST_HSW_MODULE_WAVES, 0, hsw->param_buf[idx][0],
+ WAVES_PARAM_COUNT, hsw->param_buf[idx]);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
-struct sst_dsp *sst_hsw_get_dsp(struct sst_hsw *hsw)
+int sst_hsw_module_load(struct sst_hsw *hsw,
+ u32 module_id, u32 instance_id, char *name)
{
- return hsw->dsp;
+ int ret = 0;
+ const struct firmware *fw = NULL;
+ struct sst_fw *hsw_sst_fw;
+ struct sst_module *module;
+ struct device *dev = hsw->dev;
+ struct sst_dsp *dsp = hsw->dsp;
+
+ dev_dbg(dev, "sst_hsw_module_load id=%d, name='%s'", module_id, name);
+
+ module = sst_module_get_from_id(dsp, module_id);
+ if (module == NULL) {
+ /* loading for the first time */
+ if (module_id == SST_HSW_MODULE_BASE_FW) {
+ /* for base module: use fw requested in acpi probe */
+ fw = dsp->pdata->fw;
+ if (!fw) {
+ dev_err(dev, "request Base fw failed\n");
+ return -ENODEV;
+ }
+ } else {
+ /* try and load any other optional modules if they are
+ * available. Use dev_info instead of dev_err in case
+ * request firmware failed */
+ ret = request_firmware(&fw, name, dev);
+ if (ret) {
+ dev_info(dev, "fw image %s not available(%d)\n",
+ name, ret);
+ return ret;
+ }
+ }
+ hsw_sst_fw = sst_fw_new(dsp, fw, hsw);
+ if (hsw_sst_fw == NULL) {
+ dev_err(dev, "error: failed to load firmware\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ module = sst_module_get_from_id(dsp, module_id);
+ if (module == NULL) {
+ dev_err(dev, "error: no module %d in firmware %s\n",
+ module_id, name);
+ }
+ } else
+ dev_info(dev, "module %d (%s) already loaded\n",
+ module_id, name);
+out:
+ /* release fw, but base fw should be released by acpi driver */
+ if (fw && module_id != SST_HSW_MODULE_BASE_FW)
+ release_firmware(fw);
+
+ return ret;
+}
+
+int sst_hsw_module_enable(struct sst_hsw *hsw,
+ u32 module_id, u32 instance_id)
+{
+ int ret;
+ u32 header = 0;
+ struct sst_hsw_ipc_module_config config;
+ struct sst_module *module;
+ struct sst_module_runtime *runtime;
+ struct device *dev = hsw->dev;
+ struct sst_dsp *dsp = hsw->dsp;
+
+ if (!sst_hsw_is_module_loaded(hsw, module_id)) {
+ dev_dbg(dev, "module %d not loaded\n", module_id);
+ return 0;
+ }
+
+ if (sst_hsw_is_module_active(hsw, module_id)) {
+ dev_info(dev, "module %d already enabled\n", module_id);
+ return 0;
+ }
+
+ module = sst_module_get_from_id(dsp, module_id);
+ if (module == NULL) {
+ dev_err(dev, "module %d not valid\n", module_id);
+ return -ENXIO;
+ }
+
+ runtime = sst_module_runtime_get_from_id(module, module_id);
+ if (runtime == NULL) {
+ dev_err(dev, "runtime %d not valid", module_id);
+ return -ENXIO;
+ }
+
+ header = IPC_GLB_TYPE(IPC_GLB_MODULE_OPERATION) |
+ IPC_MODULE_OPERATION(IPC_MODULE_ENABLE) |
+ IPC_MODULE_ID(module_id);
+ dev_dbg(dev, "module enable header: %x\n", header);
+
+ config.map.module_entries_count = 1;
+ config.map.module_entries[0].module_id = module->id;
+ config.map.module_entries[0].entry_point = module->entry;
+
+ config.persistent_mem.offset =
+ sst_dsp_get_offset(dsp,
+ runtime->persistent_offset, SST_MEM_DRAM);
+ config.persistent_mem.size = module->persistent_size;
+
+ config.scratch_mem.offset =
+ sst_dsp_get_offset(dsp,
+ dsp->scratch_offset, SST_MEM_DRAM);
+ config.scratch_mem.size = module->scratch_size;
+ dev_dbg(dev, "mod %d enable p:%d @ %x, s:%d @ %x, ep: %x",
+ config.map.module_entries[0].module_id,
+ config.persistent_mem.size,
+ config.persistent_mem.offset,
+ config.scratch_mem.size, config.scratch_mem.offset,
+ config.map.module_entries[0].entry_point);
+
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header,
+ &config, sizeof(config), NULL, 0);
+ if (ret < 0)
+ dev_err(dev, "ipc: module enable failed - %d\n", ret);
+ else
+ module->state = SST_MODULE_STATE_ACTIVE;
+
+ return ret;
+}
+
+int sst_hsw_module_disable(struct sst_hsw *hsw,
+ u32 module_id, u32 instance_id)
+{
+ int ret;
+ u32 header;
+ struct sst_module *module;
+ struct device *dev = hsw->dev;
+ struct sst_dsp *dsp = hsw->dsp;
+
+ if (!sst_hsw_is_module_loaded(hsw, module_id)) {
+ dev_dbg(dev, "module %d not loaded\n", module_id);
+ return 0;
+ }
+
+ if (!sst_hsw_is_module_active(hsw, module_id)) {
+ dev_info(dev, "module %d already disabled\n", module_id);
+ return 0;
+ }
+
+ module = sst_module_get_from_id(dsp, module_id);
+ if (module == NULL) {
+ dev_err(dev, "module %d not valid\n", module_id);
+ return -ENXIO;
+ }
+
+ header = IPC_GLB_TYPE(IPC_GLB_MODULE_OPERATION) |
+ IPC_MODULE_OPERATION(IPC_MODULE_DISABLE) |
+ IPC_MODULE_ID(module_id);
+
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header, NULL, 0, NULL, 0);
+ if (ret < 0)
+ dev_err(dev, "module disable failed - %d\n", ret);
+ else
+ module->state = SST_MODULE_STATE_INITIALIZED;
+
+ return ret;
+}
+
+int sst_hsw_module_set_param(struct sst_hsw *hsw,
+ u32 module_id, u32 instance_id, u32 parameter_id,
+ u32 param_size, char *param)
+{
+ int ret;
+ unsigned char *data = NULL;
+ u32 header = 0;
+ u32 payload_size = 0, transfer_parameter_size = 0;
+ dma_addr_t dma_addr = 0;
+ struct sst_hsw_transfer_parameter *parameter;
+ struct device *dev = hsw->dev;
+
+ header = IPC_GLB_TYPE(IPC_GLB_MODULE_OPERATION) |
+ IPC_MODULE_OPERATION(IPC_MODULE_SET_PARAMETER) |
+ IPC_MODULE_ID(module_id);
+ dev_dbg(dev, "sst_hsw_module_set_param header=%x\n", header);
+
+ payload_size = param_size +
+ sizeof(struct sst_hsw_transfer_parameter) -
+ sizeof(struct sst_hsw_transfer_list);
+ dev_dbg(dev, "parameter size : %d\n", param_size);
+ dev_dbg(dev, "payload size : %d\n", payload_size);
+
+ if (payload_size <= SST_HSW_IPC_MAX_SHORT_PARAMETER_SIZE) {
+ /* short parameter, mailbox can contain data */
+ dev_dbg(dev, "transfer parameter size : %d\n",
+ transfer_parameter_size);
+
+ transfer_parameter_size = ALIGN(payload_size, 4);
+ dev_dbg(dev, "transfer parameter aligned size : %d\n",
+ transfer_parameter_size);
+
+ parameter = kzalloc(transfer_parameter_size, GFP_KERNEL);
+ if (parameter == NULL)
+ return -ENOMEM;
+
+ memcpy(parameter->data, param, param_size);
+ } else {
+ dev_warn(dev, "transfer parameter size too large!");
+ return 0;
+ }
+
+ parameter->parameter_id = parameter_id;
+ parameter->data_size = param_size;
+
+ ret = sst_ipc_tx_message_wait(&hsw->ipc, header,
+ parameter, transfer_parameter_size , NULL, 0);
+ if (ret < 0)
+ dev_err(dev, "ipc: module set parameter failed - %d\n", ret);
+
+ kfree(parameter);
+
+ if (data)
+ dma_free_coherent(hsw->dsp->dma_dev,
+ param_size, (void *)data, dma_addr);
+
+ return ret;
}
static struct sst_dsp_device hsw_dev = {
@@ -1891,10 +2061,48 @@ static struct sst_dsp_device hsw_dev = {
.ops = &haswell_ops,
};
+static void hsw_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
+{
+ /* send the message */
+ sst_dsp_outbox_write(ipc->dsp, msg->tx_data, msg->tx_size);
+ sst_dsp_ipc_msg_tx(ipc->dsp, msg->header);
+}
+
+static void hsw_shim_dbg(struct sst_generic_ipc *ipc, const char *text)
+{
+ struct sst_dsp *sst = ipc->dsp;
+ u32 isr, ipcd, imrx, ipcx;
+
+ ipcx = sst_dsp_shim_read_unlocked(sst, SST_IPCX);
+ isr = sst_dsp_shim_read_unlocked(sst, SST_ISRX);
+ ipcd = sst_dsp_shim_read_unlocked(sst, SST_IPCD);
+ imrx = sst_dsp_shim_read_unlocked(sst, SST_IMRX);
+
+ dev_err(ipc->dev,
+ "ipc: --%s-- ipcx 0x%8.8x isr 0x%8.8x ipcd 0x%8.8x imrx 0x%8.8x\n",
+ text, ipcx, isr, ipcd, imrx);
+}
+
+static void hsw_tx_data_copy(struct ipc_message *msg, char *tx_data,
+ size_t tx_size)
+{
+ memcpy(msg->tx_data, tx_data, tx_size);
+}
+
+static u64 hsw_reply_msg_match(u64 header, u64 *mask)
+{
+ /* clear reply bits & status bits */
+ header &= ~(IPC_STATUS_MASK | IPC_GLB_REPLY_MASK);
+ *mask = (u64)-1;
+
+ return header;
+}
+
int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
{
struct sst_hsw_ipc_fw_version version;
struct sst_hsw *hsw;
+ struct sst_generic_ipc *ipc;
int ret;
dev_dbg(dev, "initialising Audio DSP IPC\n");
@@ -1903,39 +2111,30 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
if (hsw == NULL)
return -ENOMEM;
- hsw->dev = dev;
- INIT_LIST_HEAD(&hsw->stream_list);
- INIT_LIST_HEAD(&hsw->tx_list);
- INIT_LIST_HEAD(&hsw->rx_list);
- INIT_LIST_HEAD(&hsw->empty_list);
- init_waitqueue_head(&hsw->boot_wait);
- init_waitqueue_head(&hsw->wait_txq);
-
- ret = msg_empty_list_init(hsw);
- if (ret < 0)
- return -ENOMEM;
+ ipc = &hsw->ipc;
+ ipc->dev = dev;
+ ipc->ops.tx_msg = hsw_tx_msg;
+ ipc->ops.shim_dbg = hsw_shim_dbg;
+ ipc->ops.tx_data_copy = hsw_tx_data_copy;
+ ipc->ops.reply_msg_match = hsw_reply_msg_match;
- /* start the IPC message thread */
- init_kthread_worker(&hsw->kworker);
- hsw->tx_thread = kthread_run(kthread_worker_fn,
- &hsw->kworker, "%s",
- dev_name(hsw->dev));
- if (IS_ERR(hsw->tx_thread)) {
- ret = PTR_ERR(hsw->tx_thread);
- dev_err(hsw->dev, "error: failed to create message TX task\n");
- goto err_free_msg;
- }
- init_kthread_work(&hsw->kwork, ipc_tx_msgs);
+ ret = sst_ipc_init(ipc);
+ if (ret != 0)
+ goto ipc_init_err;
+ INIT_LIST_HEAD(&hsw->stream_list);
+ init_waitqueue_head(&hsw->boot_wait);
hsw_dev.thread_context = hsw;
/* init SST shim */
hsw->dsp = sst_dsp_new(dev, &hsw_dev, pdata);
if (hsw->dsp == NULL) {
ret = -ENODEV;
- goto dsp_err;
+ goto dsp_new_err;
}
+ ipc->dsp = hsw->dsp;
+
/* allocate DMA buffer for context storage */
hsw->dx_context = dma_alloc_coherent(hsw->dsp->dma_dev,
SST_HSW_DX_CONTEXT_SIZE, &hsw->dx_context_paddr, GFP_KERNEL);
@@ -1947,18 +2146,22 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
/* keep the DSP in reset state for base FW loading */
sst_dsp_reset(hsw->dsp);
- hsw->sst_fw = sst_fw_new(hsw->dsp, pdata->fw, hsw);
- if (hsw->sst_fw == NULL) {
- ret = -ENODEV;
- dev_err(dev, "error: failed to load firmware\n");
+ /* load base module and other modules in base firmware image */
+ ret = sst_hsw_module_load(hsw, SST_HSW_MODULE_BASE_FW, 0, "Base");
+ if (ret < 0)
goto fw_err;
- }
+
+ /* try to load module waves */
+ sst_hsw_module_load(hsw, SST_HSW_MODULE_WAVES, 0, "intel/IntcPP01.bin");
/* allocate scratch mem regions */
ret = sst_block_alloc_scratch(hsw->dsp);
if (ret < 0)
goto boot_err;
+ /* init param buffer */
+ sst_hsw_reset_param_buf(hsw);
+
/* wait for DSP boot completion */
sst_dsp_boot(hsw->dsp);
ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete,
@@ -1971,6 +2174,9 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
goto boot_err;
}
+ /* init module state after boot */
+ sst_hsw_init_module_state(hsw);
+
/* get the FW version */
sst_hsw_fw_get_version(hsw, &version);
@@ -1986,17 +2192,15 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
boot_err:
sst_dsp_reset(hsw->dsp);
- sst_fw_free(hsw->sst_fw);
+ sst_fw_free_all(hsw->dsp);
fw_err:
dma_free_coherent(hsw->dsp->dma_dev, SST_HSW_DX_CONTEXT_SIZE,
hsw->dx_context, hsw->dx_context_paddr);
dma_err:
sst_dsp_free(hsw->dsp);
-dsp_err:
- kthread_stop(hsw->tx_thread);
-err_free_msg:
- kfree(hsw->msg);
-
+dsp_new_err:
+ sst_ipc_fini(ipc);
+ipc_init_err:
return ret;
}
EXPORT_SYMBOL_GPL(sst_hsw_dsp_init);
@@ -2010,7 +2214,6 @@ void sst_hsw_dsp_free(struct device *dev, struct sst_pdata *pdata)
dma_free_coherent(hsw->dsp->dma_dev, SST_HSW_DX_CONTEXT_SIZE,
hsw->dx_context, hsw->dx_context_paddr);
sst_dsp_free(hsw->dsp);
- kthread_stop(hsw->tx_thread);
- kfree(hsw->msg);
+ sst_ipc_fini(&hsw->ipc);
}
EXPORT_SYMBOL_GPL(sst_hsw_dsp_free);
diff --git a/sound/soc/intel/sst-haswell-ipc.h b/sound/soc/intel/haswell/sst-haswell-ipc.h
index 858096041cb1..06d71aefa1fe 100644
--- a/sound/soc/intel/sst-haswell-ipc.h
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.h
@@ -37,6 +37,9 @@
#define SST_HSW_IPC_MAX_PAYLOAD_SIZE 400
#define SST_HSW_MAX_INFO_SIZE 64
#define SST_HSW_BUILD_HASH_LENGTH 40
+#define SST_HSW_IPC_MAX_SHORT_PARAMETER_SIZE 500
+#define WAVES_PARAM_COUNT 128
+#define WAVES_PARAM_LINES 160
struct sst_hsw;
struct sst_hsw_stream;
@@ -187,6 +190,28 @@ enum sst_hsw_performance_action {
SST_HSW_PERF_STOP = 1,
};
+struct sst_hsw_transfer_info {
+ uint32_t destination; /* destination address */
+ uint32_t reverse:1; /* if 1 data flows from destination */
+ uint32_t size:31; /* transfer size in bytes.*/
+ uint16_t first_page_offset; /* offset to data in the first page. */
+ uint8_t packed_pages; /* page addresses. Each occupies 20 bits */
+} __attribute__((packed));
+
+struct sst_hsw_transfer_list {
+ uint32_t transfers_count;
+ struct sst_hsw_transfer_info transfers;
+} __attribute__((packed));
+
+struct sst_hsw_transfer_parameter {
+ uint32_t parameter_id;
+ uint32_t data_size;
+ union {
+ uint8_t data[1];
+ struct sst_hsw_transfer_list transfer_list;
+ };
+} __attribute__((packed));
+
/* SST firmware module info */
struct sst_hsw_module_info {
u8 name[SST_HSW_MAX_INFO_SIZE];
@@ -215,6 +240,12 @@ struct sst_hsw_fx_enable {
struct sst_hsw_memory_info persistent_mem;
} __attribute__((packed));
+struct sst_hsw_ipc_module_config {
+ struct sst_hsw_module_map map;
+ struct sst_hsw_memory_info persistent_mem;
+ struct sst_hsw_memory_info scratch_mem;
+} __attribute__((packed));
+
struct sst_hsw_get_fx_param {
u32 parameter_id;
u32 param_size;
@@ -467,6 +498,28 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata);
void sst_hsw_dsp_free(struct device *dev, struct sst_pdata *pdata);
struct sst_dsp *sst_hsw_get_dsp(struct sst_hsw *hsw);
+/* fw module function */
+void sst_hsw_init_module_state(struct sst_hsw *hsw);
+bool sst_hsw_is_module_loaded(struct sst_hsw *hsw, u32 module_id);
+bool sst_hsw_is_module_active(struct sst_hsw *hsw, u32 module_id);
+void sst_hsw_set_module_enabled_rtd3(struct sst_hsw *hsw, u32 module_id);
+void sst_hsw_set_module_disabled_rtd3(struct sst_hsw *hsw, u32 module_id);
+bool sst_hsw_is_module_enabled_rtd3(struct sst_hsw *hsw, u32 module_id);
+void sst_hsw_reset_param_buf(struct sst_hsw *hsw);
+int sst_hsw_store_param_line(struct sst_hsw *hsw, u8 *buf);
+int sst_hsw_load_param_line(struct sst_hsw *hsw, u8 *buf);
+int sst_hsw_launch_param_buf(struct sst_hsw *hsw);
+
+int sst_hsw_module_load(struct sst_hsw *hsw,
+ u32 module_id, u32 instance_id, char *name);
+int sst_hsw_module_enable(struct sst_hsw *hsw,
+ u32 module_id, u32 instance_id);
+int sst_hsw_module_disable(struct sst_hsw *hsw,
+ u32 module_id, u32 instance_id);
+int sst_hsw_module_set_param(struct sst_hsw *hsw,
+ u32 module_id, u32 instance_id, u32 parameter_id,
+ u32 param_size, char *param);
+
/* runtime module management */
struct sst_module_runtime *sst_hsw_runtime_module_create(struct sst_hsw *hsw,
int mod_id, int offset);
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/haswell/sst-haswell-pcm.c
index 7e21e8f85885..23ae0400d6db 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/haswell/sst-haswell-pcm.c
@@ -29,9 +29,9 @@
#include <sound/tlv.h>
#include <sound/compress_driver.h>
-#include "sst-haswell-ipc.h"
-#include "sst-dsp-priv.h"
-#include "sst-dsp.h"
+#include "../haswell/sst-haswell-ipc.h"
+#include "../common/sst-dsp-priv.h"
+#include "../common/sst-dsp.h"
#define HSW_PCM_COUNT 6
#define HSW_VOLUME_MAX 0x7FFFFFFF /* 0dB */
@@ -137,6 +137,7 @@ struct hsw_priv_data {
struct device *dev;
enum hsw_pm_state pm_state;
struct snd_soc_card *soc_card;
+ struct sst_module_runtime *runtime_waves; /* sound effect module */
/* page tables */
struct snd_dma_buffer dmab[HSW_PCM_COUNT][2];
@@ -318,6 +319,93 @@ static int hsw_volume_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int hsw_waves_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
+ struct sst_hsw *hsw = pdata->hsw;
+ enum sst_hsw_module_id id = SST_HSW_MODULE_WAVES;
+
+ ucontrol->value.integer.value[0] =
+ (sst_hsw_is_module_active(hsw, id) ||
+ sst_hsw_is_module_enabled_rtd3(hsw, id));
+ return 0;
+}
+
+static int hsw_waves_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
+ struct sst_hsw *hsw = pdata->hsw;
+ int ret = 0;
+ enum sst_hsw_module_id id = SST_HSW_MODULE_WAVES;
+ bool switch_on = (bool)ucontrol->value.integer.value[0];
+
+ /* if module is in RAM on the DSP, apply user settings to module through
+ * ipc. If module is not in RAM on the DSP, store user setting for
+ * track */
+ if (sst_hsw_is_module_loaded(hsw, id)) {
+ if (switch_on == sst_hsw_is_module_active(hsw, id))
+ return 0;
+
+ if (switch_on)
+ ret = sst_hsw_module_enable(hsw, id, 0);
+ else
+ ret = sst_hsw_module_disable(hsw, id, 0);
+ } else {
+ if (switch_on == sst_hsw_is_module_enabled_rtd3(hsw, id))
+ return 0;
+
+ if (switch_on)
+ sst_hsw_set_module_enabled_rtd3(hsw, id);
+ else
+ sst_hsw_set_module_disabled_rtd3(hsw, id);
+ }
+
+ return ret;
+}
+
+static int hsw_waves_param_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
+ struct sst_hsw *hsw = pdata->hsw;
+
+ /* return a matching line from param buffer */
+ return sst_hsw_load_param_line(hsw, ucontrol->value.bytes.data);
+}
+
+static int hsw_waves_param_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
+ struct sst_hsw *hsw = pdata->hsw;
+ int ret;
+ enum sst_hsw_module_id id = SST_HSW_MODULE_WAVES;
+ int param_id = ucontrol->value.bytes.data[0];
+ int param_size = WAVES_PARAM_COUNT;
+
+ /* clear param buffer and reset buffer index */
+ if (param_id == 0xFF) {
+ sst_hsw_reset_param_buf(hsw);
+ return 0;
+ }
+
+ /* store params into buffer */
+ ret = sst_hsw_store_param_line(hsw, ucontrol->value.bytes.data);
+ if (ret < 0)
+ return ret;
+
+ if (sst_hsw_is_module_active(hsw, id))
+ ret = sst_hsw_module_set_param(hsw, id, 0, param_id,
+ param_size, ucontrol->value.bytes.data);
+ return ret;
+}
+
/* TLV used by both global and stream volumes */
static const DECLARE_TLV_DB_SCALE(hsw_vol_tlv, -9000, 300, 1);
@@ -339,6 +427,12 @@ static const struct snd_kcontrol_new hsw_volume_controls[] = {
SOC_DOUBLE_EXT_TLV("Mic Capture Volume", 4, 0, 8,
ARRAY_SIZE(volume_map) - 1, 0,
hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
+ /* enable/disable module waves */
+ SOC_SINGLE_BOOL_EXT("Waves Switch", 0,
+ hsw_waves_switch_get, hsw_waves_switch_put),
+ /* set parameters to module waves */
+ SND_SOC_BYTES_EXT("Waves Set Param", WAVES_PARAM_COUNT,
+ hsw_waves_param_get, hsw_waves_param_put),
};
/* Create DMA buffer page table for DSP */
@@ -807,6 +901,14 @@ static int hsw_pcm_create_modules(struct hsw_priv_data *pdata)
pcm_data->runtime->persistent_offset;
}
+ /* create runtime blocks for module waves */
+ if (sst_hsw_is_module_loaded(hsw, SST_HSW_MODULE_WAVES)) {
+ pdata->runtime_waves = sst_hsw_runtime_module_create(hsw,
+ SST_HSW_MODULE_WAVES, 0);
+ if (pdata->runtime_waves == NULL)
+ goto err;
+ }
+
return 0;
err:
@@ -820,14 +922,17 @@ err:
static void hsw_pcm_free_modules(struct hsw_priv_data *pdata)
{
+ struct sst_hsw *hsw = pdata->hsw;
struct hsw_pcm_data *pcm_data;
int i;
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
-
sst_hsw_runtime_module_free(pcm_data->runtime);
}
+ if (sst_hsw_is_module_loaded(hsw, SST_HSW_MODULE_WAVES)) {
+ sst_hsw_runtime_module_free(pdata->runtime_waves);
+ }
}
static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
@@ -984,7 +1089,9 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
}
/* allocate runtime modules */
- hsw_pcm_create_modules(priv_data);
+ ret = hsw_pcm_create_modules(priv_data);
+ if (ret < 0)
+ goto err;
/* enable runtime PM with auto suspend */
pm_runtime_set_autosuspend_delay(platform->dev,
@@ -996,7 +1103,7 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
return 0;
err:
- for (;i >= 0; i--) {
+ for (--i; i >= 0; i--) {
if (hsw_dais[i].playback.channels_min)
snd_dma_free_pages(&priv_data->dmab[i][0]);
if (hsw_dais[i].capture.channels_min)
@@ -1101,10 +1208,18 @@ static int hsw_pcm_runtime_suspend(struct device *dev)
{
struct hsw_priv_data *pdata = dev_get_drvdata(dev);
struct sst_hsw *hsw = pdata->hsw;
+ int ret;
if (pdata->pm_state >= HSW_PM_STATE_RTD3)
return 0;
+ /* fw modules will be unloaded on RTD3, set flag to track */
+ if (sst_hsw_is_module_active(hsw, SST_HSW_MODULE_WAVES)) {
+ ret = sst_hsw_module_disable(hsw, SST_HSW_MODULE_WAVES, 0);
+ if (ret < 0)
+ return ret;
+ sst_hsw_set_module_enabled_rtd3(hsw, SST_HSW_MODULE_WAVES);
+ }
sst_hsw_dsp_runtime_suspend(hsw);
sst_hsw_dsp_runtime_sleep(hsw);
pdata->pm_state = HSW_PM_STATE_RTD3;
@@ -1139,6 +1254,19 @@ static int hsw_pcm_runtime_resume(struct device *dev)
else if (ret == 1) /* no action required */
return 0;
+ /* check flag when resume */
+ if (sst_hsw_is_module_enabled_rtd3(hsw, SST_HSW_MODULE_WAVES)) {
+ ret = sst_hsw_module_enable(hsw, SST_HSW_MODULE_WAVES, 0);
+ if (ret < 0)
+ return ret;
+ /* put parameters from buffer to dsp */
+ ret = sst_hsw_launch_param_buf(hsw);
+ if (ret < 0)
+ return ret;
+ /* unset flag */
+ sst_hsw_set_module_disabled_rtd3(hsw, SST_HSW_MODULE_WAVES);
+ }
+
pdata->pm_state = HSW_PM_STATE_D0;
return ret;
}
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 07f77815a586..b05fb1c1a848 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -58,6 +58,12 @@
#define JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 12
#define JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 8
+#define JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 24
+#define JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 16
+#define JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_MASK \
+ (0xf << JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET)
+#define JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_MASK \
+ (0x1f << JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET)
#define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK (0x7 << 19)
#define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK (0x7 << 16)
@@ -79,6 +85,7 @@
#define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET 16
#define JZ_AIC_I2S_FMT_DISABLE_BIT_CLK BIT(12)
+#define JZ_AIC_I2S_FMT_DISABLE_BIT_ICLK BIT(13)
#define JZ_AIC_I2S_FMT_ENABLE_SYS_CLK BIT(4)
#define JZ_AIC_I2S_FMT_MSB BIT(0)
@@ -87,6 +94,13 @@
#define JZ_AIC_CLK_DIV_MASK 0xf
#define I2SDIV_DV_SHIFT 8
#define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
+#define I2SDIV_IDV_SHIFT 8
+#define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
+
+enum jz47xx_i2s_version {
+ JZ_I2S_JZ4740,
+ JZ_I2S_JZ4780,
+};
struct jz4740_i2s {
struct resource *mem;
@@ -98,6 +112,8 @@ struct jz4740_i2s {
struct snd_dmaengine_dai_dma_data playback_dma_data;
struct snd_dmaengine_dai_dma_data capture_dma_data;
+
+ enum jz47xx_i2s_version version;
};
static inline uint32_t jz4740_i2s_read(const struct jz4740_i2s *i2s,
@@ -267,13 +283,22 @@ static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
ctrl |= JZ_AIC_CTRL_MONO_TO_STEREO;
else
ctrl &= ~JZ_AIC_CTRL_MONO_TO_STEREO;
+
+ div_reg &= ~I2SDIV_DV_MASK;
+ div_reg |= (div - 1) << I2SDIV_DV_SHIFT;
} else {
ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK;
ctrl |= sample_size << JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET;
+
+ if (i2s->version >= JZ_I2S_JZ4780) {
+ div_reg &= ~I2SDIV_IDV_MASK;
+ div_reg |= (div - 1) << I2SDIV_IDV_SHIFT;
+ } else {
+ div_reg &= ~I2SDIV_DV_MASK;
+ div_reg |= (div - 1) << I2SDIV_DV_SHIFT;
+ }
}
- div_reg &= ~I2SDIV_DV_MASK;
- div_reg |= (div - 1) << I2SDIV_DV_SHIFT;
jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl);
jz4740_i2s_write(i2s, JZ_REG_AIC_CLK_DIV, div_reg);
@@ -369,11 +394,19 @@ static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai)
snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data,
&i2s->capture_dma_data);
- conf = (7 << JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) |
- (8 << JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) |
- JZ_AIC_CONF_OVERFLOW_PLAY_LAST |
- JZ_AIC_CONF_I2S |
- JZ_AIC_CONF_INTERNAL_CODEC;
+ if (i2s->version >= JZ_I2S_JZ4780) {
+ conf = (7 << JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) |
+ (8 << JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) |
+ JZ_AIC_CONF_OVERFLOW_PLAY_LAST |
+ JZ_AIC_CONF_I2S |
+ JZ_AIC_CONF_INTERNAL_CODEC;
+ } else {
+ conf = (7 << JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) |
+ (8 << JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) |
+ JZ_AIC_CONF_OVERFLOW_PLAY_LAST |
+ JZ_AIC_CONF_I2S |
+ JZ_AIC_CONF_INTERNAL_CODEC;
+ }
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, JZ_AIC_CONF_RESET);
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
@@ -422,13 +455,34 @@ static struct snd_soc_dai_driver jz4740_i2s_dai = {
.resume = jz4740_i2s_resume,
};
+static struct snd_soc_dai_driver jz4780_i2s_dai = {
+ .probe = jz4740_i2s_dai_probe,
+ .remove = jz4740_i2s_dai_remove,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = JZ4740_I2S_FMTS,
+ },
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = JZ4740_I2S_FMTS,
+ },
+ .ops = &jz4740_i2s_dai_ops,
+ .suspend = jz4740_i2s_suspend,
+ .resume = jz4740_i2s_resume,
+};
+
static const struct snd_soc_component_driver jz4740_i2s_component = {
.name = "jz4740-i2s",
};
#ifdef CONFIG_OF
static const struct of_device_id jz4740_of_matches[] = {
- { .compatible = "ingenic,jz4740-i2s" },
+ { .compatible = "ingenic,jz4740-i2s", .data = (void *)JZ_I2S_JZ4740 },
+ { .compatible = "ingenic,jz4780-i2s", .data = (void *)JZ_I2S_JZ4780 },
{ /* sentinel */ }
};
#endif
@@ -438,11 +492,16 @@ static int jz4740_i2s_dev_probe(struct platform_device *pdev)
struct jz4740_i2s *i2s;
struct resource *mem;
int ret;
+ const struct of_device_id *match;
i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
if (!i2s)
return -ENOMEM;
+ match = of_match_device(jz4740_of_matches, &pdev->dev);
+ if (match)
+ i2s->version = (enum jz47xx_i2s_version)match->data;
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2s->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(i2s->base))
@@ -460,8 +519,13 @@ static int jz4740_i2s_dev_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2s);
- ret = devm_snd_soc_register_component(&pdev->dev,
- &jz4740_i2s_component, &jz4740_i2s_dai, 1);
+ if (i2s->version == JZ_I2S_JZ4780)
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &jz4740_i2s_component, &jz4780_i2s_dai, 1);
+ else
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &jz4740_i2s_component, &jz4740_i2s_dai, 1);
+
if (ret)
return ret;
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index d19483081f9b..3a36d60e1785 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -643,7 +643,7 @@ static int kirkwood_i2s_dev_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id mvebu_audio_of_match[] = {
+static const struct of_device_id mvebu_audio_of_match[] = {
{ .compatible = "marvell,kirkwood-audio" },
{ .compatible = "marvell,dove-audio" },
{ .compatible = "marvell,armada370-audio" },
diff --git a/sound/soc/nuc900/nuc900-audio.h b/sound/soc/nuc900/nuc900-audio.h
index 59f7e8ed1a68..d0b725705914 100644
--- a/sound/soc/nuc900/nuc900-audio.h
+++ b/sound/soc/nuc900/nuc900-audio.h
@@ -100,10 +100,7 @@
struct nuc900_audio {
void __iomem *mmio;
spinlock_t lock;
- dma_addr_t dma_addr[2];
- unsigned long buffersize[2];
unsigned long irq_num;
- struct snd_pcm_substream *substream;
struct resource *res;
struct clk *clk;
struct device *dev;
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index b809fa909e4d..5ae5ca15b6d6 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -42,29 +42,10 @@ static const struct snd_pcm_hardware nuc900_pcm_hardware = {
static int nuc900_dma_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct nuc900_audio *nuc900_audio = runtime->private_data;
- unsigned long flags;
- int ret = 0;
-
- ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
- if (ret < 0)
- return ret;
-
- spin_lock_irqsave(&nuc900_audio->lock, flags);
-
- nuc900_audio->substream = substream;
- nuc900_audio->dma_addr[substream->stream] = runtime->dma_addr;
- nuc900_audio->buffersize[substream->stream] =
- params_buffer_bytes(params);
-
- spin_unlock_irqrestore(&nuc900_audio->lock, flags);
-
- return ret;
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
}
-static void nuc900_update_dma_register(struct snd_pcm_substream *substream,
- dma_addr_t dma_addr, size_t count)
+static void nuc900_update_dma_register(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct nuc900_audio *nuc900_audio = runtime->private_data;
@@ -78,8 +59,8 @@ static void nuc900_update_dma_register(struct snd_pcm_substream *substream,
mmio_len = nuc900_audio->mmio + ACTL_RDST_LENGTH;
}
- AUDIO_WRITE(mmio_addr, dma_addr);
- AUDIO_WRITE(mmio_len, count);
+ AUDIO_WRITE(mmio_addr, runtime->dma_addr);
+ AUDIO_WRITE(mmio_len, runtime->dma_bytes);
}
static void nuc900_dma_start(struct snd_pcm_substream *substream)
@@ -170,9 +151,7 @@ static int nuc900_dma_prepare(struct snd_pcm_substream *substream)
spin_lock_irqsave(&nuc900_audio->lock, flags);
- nuc900_update_dma_register(substream,
- nuc900_audio->dma_addr[substream->stream],
- nuc900_audio->buffersize[substream->stream]);
+ nuc900_update_dma_register(substream);
val = AUDIO_READ(nuc900_audio->mmio + ACTL_RESET);
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index a2cd3486ac55..6768e4f7d7d0 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -100,17 +100,19 @@ config SND_OMAP_SOC_OMAP_TWL4030
config SND_OMAP_SOC_OMAP_ABE_TWL6040
tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
- depends on TWL6040_CORE && SND_OMAP_SOC && (ARCH_OMAP4 || COMPILE_TEST)
+ depends on TWL6040_CORE && SND_OMAP_SOC && (ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST)
select SND_OMAP_SOC_DMIC
select SND_OMAP_SOC_MCPDM
select SND_SOC_TWL6040
select SND_SOC_DMIC
+ select COMMON_CLK_PALMAS if MFD_PALMAS
help
Say Y if you want to add support for SoC audio on OMAP boards using
ABE and twl6040 codec. This driver currently supports:
- SDP4430/Blaze boards
- PandaBoard (4430)
- PandaBoardES (4460)
+ - omap5-uevm (5432)
config SND_OMAP_SOC_OMAP3_PANDORA
tristate "SoC Audio support for OMAP3 Pandora"
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index 706613077c15..16cc95fa4573 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -479,8 +479,8 @@ static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
/* Add hook switch - can be used to control the codec from userspace
* even if line discipline fails */
- ret = snd_soc_jack_new(rtd->codec, "hook_switch",
- SND_JACK_HEADSET, &ams_delta_hook_switch);
+ ret = snd_soc_card_jack_new(card, "hook_switch", SND_JACK_HEADSET,
+ &ams_delta_hook_switch, NULL, 0);
if (ret)
dev_warn(card->dev,
"Failed to allocate resources for hook switch, "
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 5d7f9cebe041..dcb5336b5698 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -98,12 +98,11 @@ static int n810_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_codec *codec = rtd->codec;
snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_CHANNELS, 2, 2);
- n810_ext_control(&codec->dapm);
+ n810_ext_control(&rtd->card->dapm);
return clk_prepare_enable(sys_clkout2);
}
@@ -255,24 +254,6 @@ static const struct snd_kcontrol_new aic33_n810_controls[] = {
n810_get_input, n810_set_input),
};
-static int n810_aic33_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- /* Not connected */
- snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
- snd_soc_dapm_nc_pin(dapm, "HPLCOM");
- snd_soc_dapm_nc_pin(dapm, "HPRCOM");
- snd_soc_dapm_nc_pin(dapm, "MIC3L");
- snd_soc_dapm_nc_pin(dapm, "MIC3R");
- snd_soc_dapm_nc_pin(dapm, "LINE1R");
- snd_soc_dapm_nc_pin(dapm, "LINE2L");
- snd_soc_dapm_nc_pin(dapm, "LINE2R");
-
- return 0;
-}
-
/* Digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link n810_dai = {
.name = "TLV320AIC33",
@@ -283,7 +264,6 @@ static struct snd_soc_dai_link n810_dai = {
.codec_dai_name = "tlv320aic3x-hifi",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
- .init = n810_aic33_init,
.ops = &n810_ops,
};
@@ -300,6 +280,7 @@ static struct snd_soc_card snd_soc_n810 = {
.num_dapm_widgets = ARRAY_SIZE(aic33_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
+ .fully_routed = true,
};
static struct platform_device *n810_snd_device;
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index b9c65f1ad5a8..0843a68f277c 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -182,17 +182,17 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
/* Headset jack detection only if it is supported */
if (priv->jack_detection) {
- ret = snd_soc_jack_new(codec, "Headset Jack",
- SND_JACK_HEADSET, &hs_jack);
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET, &hs_jack,
+ hs_jack_pins,
+ ARRAY_SIZE(hs_jack_pins));
if (ret)
return ret;
- ret = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
- hs_jack_pins);
twl6040_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADSET);
}
- return ret;
+ return 0;
}
static const struct snd_soc_dapm_route dmic_audio_map[] = {
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index f7eb42aa3f38..4775da4c4db5 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -142,8 +142,6 @@ static int hdmi_dai_hw_params(struct snd_pcm_substream *substream,
iec->status[0] |= IEC958_AES0_CON_EMPHASIS_NONE;
- iec->status[0] |= IEC958_AES1_PRO_MODE_NOTID;
-
iec->status[1] = IEC958_AES1_CON_GENERAL;
iec->status[2] |= IEC958_AES2_CON_SOURCE_UNSPEC;
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 1343ecbf0bd5..6bb623a2a4df 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -39,7 +39,7 @@
#define pcm_omap1510() 0
#endif
-static const struct snd_pcm_hardware omap_pcm_hardware = {
+static struct snd_pcm_hardware omap_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
@@ -53,6 +53,24 @@ static const struct snd_pcm_hardware omap_pcm_hardware = {
.buffer_bytes_max = 128 * 1024,
};
+/* sDMA supports only 1, 2, and 4 byte transfer elements. */
+static void omap_pcm_limit_supported_formats(void)
+{
+ int i;
+
+ for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
+ switch (snd_pcm_format_physical_width(i)) {
+ case 8:
+ case 16:
+ case 32:
+ omap_pcm_hardware.formats |= (1LL << i);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
/* this may get called several times by oss emulation */
static int omap_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
@@ -235,6 +253,7 @@ static struct snd_soc_platform_driver omap_soc_platform = {
int omap_pcm_platform_register(struct device *dev)
{
+ omap_pcm_limit_supported_formats();
return devm_snd_soc_register_platform(dev, &omap_soc_platform);
}
EXPORT_SYMBOL_GPL(omap_pcm_platform_register);
diff --git a/sound/soc/omap/omap-twl4030.c b/sound/soc/omap/omap-twl4030.c
index fb1f6bb87cd4..3673ada43bfb 100644
--- a/sound/soc/omap/omap-twl4030.c
+++ b/sound/soc/omap/omap-twl4030.c
@@ -170,14 +170,10 @@ static int omap_twl4030_init(struct snd_soc_pcm_runtime *rtd)
if (priv->jack_detect > 0) {
hs_jack_gpios[0].gpio = priv->jack_detect;
- ret = snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET,
- &priv->hs_jack);
- if (ret)
- return ret;
-
- ret = snd_soc_jack_add_pins(&priv->hs_jack,
- ARRAY_SIZE(hs_jack_pins),
- hs_jack_pins);
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET, &priv->hs_jack,
+ hs_jack_pins,
+ ARRAY_SIZE(hs_jack_pins));
if (ret)
return ret;
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index 7f299357c2d2..c2ddf0fbfa28 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -311,9 +311,9 @@ static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
}
/* AV jack detection */
- err = snd_soc_jack_new(codec, "AV Jack",
- SND_JACK_HEADSET | SND_JACK_VIDEOOUT,
- &rx51_av_jack);
+ err = snd_soc_card_jack_new(rtd->card, "AV Jack",
+ SND_JACK_HEADSET | SND_JACK_VIDEOOUT,
+ &rx51_av_jack, NULL, 0);
if (err) {
dev_err(card->dev, "Failed to add AV Jack\n");
return err;
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 73eb5ddf9753..9f8be7cd567e 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -126,17 +126,12 @@ static const struct snd_soc_dapm_route hx4700_audio_map[] = {
*/
static int hx4700_ak4641_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
int err;
/* Jack detection API stuff */
- err = snd_soc_jack_new(codec, "Headphone Jack",
- SND_JACK_HEADPHONE, &hs_jack);
- if (err)
- return err;
-
- err = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pin),
- hs_jack_pin);
+ err = snd_soc_card_jack_new(rtd->card, "Headphone Jack",
+ SND_JACK_HEADPHONE, &hs_jack, hs_jack_pin,
+ ARRAY_SIZE(hs_jack_pin));
if (err)
return err;
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 910336c5ebeb..c20bbc042425 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -75,17 +75,12 @@ static struct snd_soc_card palm27x_asoc;
static int palm27x_ac97_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
int err;
/* Jack detection API stuff */
- err = snd_soc_jack_new(codec, "Headphone Jack",
- SND_JACK_HEADPHONE, &hs_jack);
- if (err)
- return err;
-
- err = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
- hs_jack_pins);
+ err = snd_soc_card_jack_new(rtd->card, "Headphone Jack",
+ SND_JACK_HEADPHONE, &hs_jack, hs_jack_pins,
+ ARRAY_SIZE(hs_jack_pins));
if (err)
return err;
diff --git a/sound/soc/pxa/ttc-dkb.c b/sound/soc/pxa/ttc-dkb.c
index 5001dbb9b257..1753c7d9e760 100644
--- a/sound/soc/pxa/ttc-dkb.c
+++ b/sound/soc/pxa/ttc-dkb.c
@@ -78,15 +78,12 @@ static int ttc_pm860x_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_codec *codec = rtd->codec;
/* Headset jack detection */
- snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE
- | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2,
- &hs_jack);
- snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
- hs_jack_pins);
- snd_soc_jack_new(codec, "Microphone Jack", SND_JACK_MICROPHONE,
- &mic_jack);
- snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins),
- mic_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headphone Jack", SND_JACK_HEADPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2,
+ &hs_jack, hs_jack_pins, ARRAY_SIZE(hs_jack_pins));
+ snd_soc_card_jack_new(rtd->card, "Microphone Jack", SND_JACK_MICROPHONE,
+ &mic_jack, mic_jack_pins,
+ ARRAY_SIZE(mic_jack_pins));
/* headphone, microphone detection & headset short detection */
pm860x_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADPHONE,
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
index 76ccb172d0a7..bcbfbe8303f7 100644
--- a/sound/soc/pxa/z2.c
+++ b/sound/soc/pxa/z2.c
@@ -143,13 +143,9 @@ static int z2_wm8750_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dapm_disable_pin(dapm, "MONO1");
/* Jack detection API stuff */
- ret = snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET,
- &hs_jack);
- if (ret)
- goto err;
-
- ret = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
- hs_jack_pins);
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack", SND_JACK_HEADSET,
+ &hs_jack, hs_jack_pins,
+ ARRAY_SIZE(hs_jack_pins));
if (ret)
goto err;
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
new file mode 100644
index 000000000000..5f58e4f1bca9
--- /dev/null
+++ b/sound/soc/qcom/Kconfig
@@ -0,0 +1,25 @@
+config SND_SOC_QCOM
+ tristate "ASoC support for QCOM platforms"
+ help
+ Say Y or M if you want to add support to use audio devices
+ in Qualcomm Technologies SOC-based platforms.
+
+config SND_SOC_LPASS_CPU
+ tristate
+ depends on SND_SOC_QCOM
+ select REGMAP_MMIO
+
+config SND_SOC_LPASS_PLATFORM
+ tristate
+ depends on SND_SOC_QCOM
+ select REGMAP_MMIO
+
+config SND_SOC_STORM
+ tristate "ASoC I2S support for Storm boards"
+ depends on (ARCH_QCOM && SND_SOC_QCOM) || COMPILE_TEST
+ select SND_SOC_LPASS_CPU
+ select SND_SOC_LPASS_PLATFORM
+ select SND_SOC_MAX98357A
+ help
+ Say Y or M if you want add support for SoC audio on the
+ Qualcomm Technologies IPQ806X-based Storm board.
diff --git a/sound/soc/qcom/Makefile b/sound/soc/qcom/Makefile
new file mode 100644
index 000000000000..c5ce96c761c4
--- /dev/null
+++ b/sound/soc/qcom/Makefile
@@ -0,0 +1,11 @@
+# Platform
+snd-soc-lpass-cpu-objs := lpass-cpu.o
+snd-soc-lpass-platform-objs := lpass-platform.o
+
+obj-$(CONFIG_SND_SOC_LPASS_CPU) += snd-soc-lpass-cpu.o
+obj-$(CONFIG_SND_SOC_LPASS_PLATFORM) += snd-soc-lpass-platform.o
+
+# Machine
+snd-soc-storm-objs := storm.o
+
+obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
new file mode 100644
index 000000000000..dc790abaa331
--- /dev/null
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * lpass-cpu.c -- ALSA SoC CPU DAI driver for QTi LPASS
+ */
+
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include "lpass-lpaif-ipq806x.h"
+#include "lpass.h"
+
+static int lpass_cpu_daiops_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = clk_set_rate(drvdata->mi2s_osr_clk, freq);
+ if (ret)
+ dev_err(dai->dev, "%s() error setting mi2s osrclk to %u: %d\n",
+ __func__, freq, ret);
+
+ return ret;
+}
+
+static int lpass_cpu_daiops_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->mi2s_osr_clk);
+ if (ret) {
+ dev_err(dai->dev, "%s() error in enabling mi2s osr clk: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(drvdata->mi2s_bit_clk);
+ if (ret) {
+ dev_err(dai->dev, "%s() error in enabling mi2s bit clk: %d\n",
+ __func__, ret);
+ clk_disable_unprepare(drvdata->mi2s_osr_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable_unprepare(drvdata->mi2s_bit_clk);
+ clk_disable_unprepare(drvdata->mi2s_osr_clk);
+}
+
+static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ snd_pcm_format_t format = params_format(params);
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+ unsigned int regval;
+ int bitwidth, ret;
+
+ bitwidth = snd_pcm_format_width(format);
+ if (bitwidth < 0) {
+ dev_err(dai->dev, "%s() invalid bit width given: %d\n",
+ __func__, bitwidth);
+ return bitwidth;
+ }
+
+ regval = LPAIF_I2SCTL_LOOPBACK_DISABLE |
+ LPAIF_I2SCTL_WSSRC_INTERNAL;
+
+ switch (bitwidth) {
+ case 16:
+ regval |= LPAIF_I2SCTL_BITWIDTH_16;
+ break;
+ case 24:
+ regval |= LPAIF_I2SCTL_BITWIDTH_24;
+ break;
+ case 32:
+ regval |= LPAIF_I2SCTL_BITWIDTH_32;
+ break;
+ default:
+ dev_err(dai->dev, "%s() invalid bitwidth given: %d\n",
+ __func__, bitwidth);
+ return -EINVAL;
+ }
+
+ switch (channels) {
+ case 1:
+ regval |= LPAIF_I2SCTL_SPKMODE_SD0;
+ regval |= LPAIF_I2SCTL_SPKMONO_MONO;
+ break;
+ case 2:
+ regval |= LPAIF_I2SCTL_SPKMODE_SD0;
+ regval |= LPAIF_I2SCTL_SPKMONO_STEREO;
+ break;
+ case 4:
+ regval |= LPAIF_I2SCTL_SPKMODE_QUAD01;
+ regval |= LPAIF_I2SCTL_SPKMONO_STEREO;
+ break;
+ case 6:
+ regval |= LPAIF_I2SCTL_SPKMODE_6CH;
+ regval |= LPAIF_I2SCTL_SPKMONO_STEREO;
+ break;
+ case 8:
+ regval |= LPAIF_I2SCTL_SPKMODE_8CH;
+ regval |= LPAIF_I2SCTL_SPKMONO_STEREO;
+ break;
+ default:
+ dev_err(dai->dev, "%s() invalid channels given: %u\n",
+ __func__, channels);
+ return -EINVAL;
+ }
+
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_I2SCTL_REG(LPAIF_I2S_PORT_MI2S), regval);
+ if (ret) {
+ dev_err(dai->dev, "%s() error writing to i2sctl reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = clk_set_rate(drvdata->mi2s_bit_clk, rate * bitwidth * 2);
+ if (ret) {
+ dev_err(dai->dev, "%s() error setting mi2s bitclk to %u: %d\n",
+ __func__, rate * bitwidth * 2, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_I2SCTL_REG(LPAIF_I2S_PORT_MI2S), 0);
+ if (ret)
+ dev_err(dai->dev, "%s() error writing to i2sctl reg: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = regmap_update_bits(drvdata->lpaif_map,
+ LPAIF_I2SCTL_REG(LPAIF_I2S_PORT_MI2S),
+ LPAIF_I2SCTL_SPKEN_MASK, LPAIF_I2SCTL_SPKEN_ENABLE);
+ if (ret)
+ dev_err(dai->dev, "%s() error writing to i2sctl reg: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ int ret = -EINVAL;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = regmap_update_bits(drvdata->lpaif_map,
+ LPAIF_I2SCTL_REG(LPAIF_I2S_PORT_MI2S),
+ LPAIF_I2SCTL_SPKEN_MASK,
+ LPAIF_I2SCTL_SPKEN_ENABLE);
+ if (ret)
+ dev_err(dai->dev, "%s() error writing to i2sctl reg: %d\n",
+ __func__, ret);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = regmap_update_bits(drvdata->lpaif_map,
+ LPAIF_I2SCTL_REG(LPAIF_I2S_PORT_MI2S),
+ LPAIF_I2SCTL_SPKEN_MASK,
+ LPAIF_I2SCTL_SPKEN_DISABLE);
+ if (ret)
+ dev_err(dai->dev, "%s() error writing to i2sctl reg: %d\n",
+ __func__, ret);
+ break;
+ }
+
+ return ret;
+}
+
+static struct snd_soc_dai_ops lpass_cpu_dai_ops = {
+ .set_sysclk = lpass_cpu_daiops_set_sysclk,
+ .startup = lpass_cpu_daiops_startup,
+ .shutdown = lpass_cpu_daiops_shutdown,
+ .hw_params = lpass_cpu_daiops_hw_params,
+ .hw_free = lpass_cpu_daiops_hw_free,
+ .prepare = lpass_cpu_daiops_prepare,
+ .trigger = lpass_cpu_daiops_trigger,
+};
+
+static int lpass_cpu_dai_probe(struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ /* ensure audio hardware is disabled */
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_I2SCTL_REG(LPAIF_I2S_PORT_MI2S), 0);
+ if (ret)
+ dev_err(dai->dev, "%s() error writing to i2sctl reg: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static struct snd_soc_dai_driver lpass_cpu_dai_driver = {
+ .playback = {
+ .stream_name = "lpass-cpu-playback",
+ .formats = SNDRV_PCM_FMTBIT_S16 |
+ SNDRV_PCM_FMTBIT_S24 |
+ SNDRV_PCM_FMTBIT_S32,
+ .rates = SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .probe = &lpass_cpu_dai_probe,
+ .ops = &lpass_cpu_dai_ops,
+};
+
+static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
+ .name = "lpass-cpu",
+};
+
+static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
+{
+ int i;
+
+ for (i = 0; i < LPAIF_I2S_PORT_NUM; ++i)
+ if (reg == LPAIF_I2SCTL_REG(i))
+ return true;
+
+ for (i = 0; i < LPAIF_IRQ_PORT_NUM; ++i) {
+ if (reg == LPAIF_IRQEN_REG(i))
+ return true;
+ if (reg == LPAIF_IRQCLEAR_REG(i))
+ return true;
+ }
+
+ for (i = 0; i < LPAIF_RDMA_CHAN_NUM; ++i) {
+ if (reg == LPAIF_RDMACTL_REG(i))
+ return true;
+ if (reg == LPAIF_RDMABASE_REG(i))
+ return true;
+ if (reg == LPAIF_RDMABUFF_REG(i))
+ return true;
+ if (reg == LPAIF_RDMAPER_REG(i))
+ return true;
+ }
+
+ return false;
+}
+
+static bool lpass_cpu_regmap_readable(struct device *dev, unsigned int reg)
+{
+ int i;
+
+ for (i = 0; i < LPAIF_I2S_PORT_NUM; ++i)
+ if (reg == LPAIF_I2SCTL_REG(i))
+ return true;
+
+ for (i = 0; i < LPAIF_IRQ_PORT_NUM; ++i) {
+ if (reg == LPAIF_IRQEN_REG(i))
+ return true;
+ if (reg == LPAIF_IRQSTAT_REG(i))
+ return true;
+ }
+
+ for (i = 0; i < LPAIF_RDMA_CHAN_NUM; ++i) {
+ if (reg == LPAIF_RDMACTL_REG(i))
+ return true;
+ if (reg == LPAIF_RDMABASE_REG(i))
+ return true;
+ if (reg == LPAIF_RDMABUFF_REG(i))
+ return true;
+ if (reg == LPAIF_RDMACURR_REG(i))
+ return true;
+ if (reg == LPAIF_RDMAPER_REG(i))
+ return true;
+ }
+
+ return false;
+}
+
+static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
+{
+ int i;
+
+ for (i = 0; i < LPAIF_IRQ_PORT_NUM; ++i)
+ if (reg == LPAIF_IRQSTAT_REG(i))
+ return true;
+
+ for (i = 0; i < LPAIF_RDMA_CHAN_NUM; ++i)
+ if (reg == LPAIF_RDMACURR_REG(i))
+ return true;
+
+ return false;
+}
+
+static const struct regmap_config lpass_cpu_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = LPAIF_RDMAPER_REG(LPAIF_RDMA_CHAN_MAX),
+ .writeable_reg = lpass_cpu_regmap_writeable,
+ .readable_reg = lpass_cpu_regmap_readable,
+ .volatile_reg = lpass_cpu_regmap_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int lpass_cpu_platform_probe(struct platform_device *pdev)
+{
+ struct lpass_data *drvdata;
+ struct device_node *dsp_of_node;
+ struct resource *res;
+ int ret;
+
+ dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0);
+ if (dsp_of_node) {
+ dev_err(&pdev->dev, "%s() DSP exists and holds audio resources\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(struct lpass_data),
+ GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, drvdata);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-lpaif");
+ if (!res) {
+ dev_err(&pdev->dev, "%s() error getting resource\n", __func__);
+ return -ENODEV;
+ }
+
+ drvdata->lpaif = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR((void const __force *)drvdata->lpaif)) {
+ dev_err(&pdev->dev, "%s() error mapping reg resource: %ld\n",
+ __func__,
+ PTR_ERR((void const __force *)drvdata->lpaif));
+ return PTR_ERR((void const __force *)drvdata->lpaif);
+ }
+
+ drvdata->lpaif_map = devm_regmap_init_mmio(&pdev->dev, drvdata->lpaif,
+ &lpass_cpu_regmap_config);
+ if (IS_ERR(drvdata->lpaif_map)) {
+ dev_err(&pdev->dev, "%s() error initializing regmap: %ld\n",
+ __func__, PTR_ERR(drvdata->lpaif_map));
+ return PTR_ERR(drvdata->lpaif_map);
+ }
+
+ drvdata->mi2s_osr_clk = devm_clk_get(&pdev->dev, "mi2s-osr-clk");
+ if (IS_ERR(drvdata->mi2s_osr_clk)) {
+ dev_err(&pdev->dev, "%s() error getting mi2s-osr-clk: %ld\n",
+ __func__, PTR_ERR(drvdata->mi2s_osr_clk));
+ return PTR_ERR(drvdata->mi2s_osr_clk);
+ }
+
+ drvdata->mi2s_bit_clk = devm_clk_get(&pdev->dev, "mi2s-bit-clk");
+ if (IS_ERR(drvdata->mi2s_bit_clk)) {
+ dev_err(&pdev->dev, "%s() error getting mi2s-bit-clk: %ld\n",
+ __func__, PTR_ERR(drvdata->mi2s_bit_clk));
+ return PTR_ERR(drvdata->mi2s_bit_clk);
+ }
+
+ drvdata->ahbix_clk = devm_clk_get(&pdev->dev, "ahbix-clk");
+ if (IS_ERR(drvdata->ahbix_clk)) {
+ dev_err(&pdev->dev, "%s() error getting ahbix-clk: %ld\n",
+ __func__, PTR_ERR(drvdata->ahbix_clk));
+ return PTR_ERR(drvdata->ahbix_clk);
+ }
+
+ ret = clk_set_rate(drvdata->ahbix_clk, LPASS_AHBIX_CLOCK_FREQUENCY);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() error setting rate on ahbix_clk: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ dev_dbg(&pdev->dev, "%s() set ahbix_clk rate to %lu\n", __func__,
+ clk_get_rate(drvdata->ahbix_clk));
+
+ ret = clk_prepare_enable(drvdata->ahbix_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() error enabling ahbix_clk: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &lpass_cpu_comp_driver, &lpass_cpu_dai_driver, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() error registering cpu driver: %d\n",
+ __func__, ret);
+ goto err_clk;
+ }
+
+ ret = asoc_qcom_lpass_platform_register(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() error registering platform driver: %d\n",
+ __func__, ret);
+ goto err_clk;
+ }
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(drvdata->ahbix_clk);
+ return ret;
+}
+
+static int lpass_cpu_platform_remove(struct platform_device *pdev)
+{
+ struct lpass_data *drvdata = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(drvdata->ahbix_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id lpass_cpu_device_id[] = {
+ { .compatible = "qcom,lpass-cpu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lpass_cpu_device_id);
+#endif
+
+static struct platform_driver lpass_cpu_platform_driver = {
+ .driver = {
+ .name = "lpass-cpu",
+ .of_match_table = of_match_ptr(lpass_cpu_device_id),
+ },
+ .probe = lpass_cpu_platform_probe,
+ .remove = lpass_cpu_platform_remove,
+};
+module_platform_driver(lpass_cpu_platform_driver);
+
+MODULE_DESCRIPTION("QTi LPASS CPU Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/lpass-lpaif-ipq806x.h b/sound/soc/qcom/lpass-lpaif-ipq806x.h
new file mode 100644
index 000000000000..dc423b888842
--- /dev/null
+++ b/sound/soc/qcom/lpass-lpaif-ipq806x.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * lpass-lpaif-ipq806x.h -- Definitions for the QTi LPAIF in the ipq806x LPASS
+ */
+
+#ifndef __LPASS_LPAIF_H__
+#define __LPASS_LPAIF_H__
+
+#define LPAIF_BANK_OFFSET 0x1000
+
+/* LPAIF I2S */
+
+#define LPAIF_I2SCTL_REG_BASE 0x0010
+#define LPAIF_I2SCTL_REG_STRIDE 0x4
+#define LPAIF_I2SCTL_REG_ADDR(addr, port) \
+ (LPAIF_I2SCTL_REG_BASE + (addr) + (LPAIF_I2SCTL_REG_STRIDE * (port)))
+
+enum lpaif_i2s_ports {
+ LPAIF_I2S_PORT_MIN = 0,
+
+ LPAIF_I2S_PORT_CODEC_SPK = 0,
+ LPAIF_I2S_PORT_CODEC_MIC = 1,
+ LPAIF_I2S_PORT_SEC_SPK = 2,
+ LPAIF_I2S_PORT_SEC_MIC = 3,
+ LPAIF_I2S_PORT_MI2S = 4,
+
+ LPAIF_I2S_PORT_MAX = 4,
+ LPAIF_I2S_PORT_NUM = 5,
+};
+
+#define LPAIF_I2SCTL_REG(port) LPAIF_I2SCTL_REG_ADDR(0x0, (port))
+
+#define LPAIF_I2SCTL_LOOPBACK_MASK 0x8000
+#define LPAIF_I2SCTL_LOOPBACK_SHIFT 15
+#define LPAIF_I2SCTL_LOOPBACK_DISABLE (0 << LPAIF_I2SCTL_LOOPBACK_SHIFT)
+#define LPAIF_I2SCTL_LOOPBACK_ENABLE (1 << LPAIF_I2SCTL_LOOPBACK_SHIFT)
+
+#define LPAIF_I2SCTL_SPKEN_MASK 0x4000
+#define LPAIF_I2SCTL_SPKEN_SHIFT 14
+#define LPAIF_I2SCTL_SPKEN_DISABLE (0 << LPAIF_I2SCTL_SPKEN_SHIFT)
+#define LPAIF_I2SCTL_SPKEN_ENABLE (1 << LPAIF_I2SCTL_SPKEN_SHIFT)
+
+#define LPAIF_I2SCTL_SPKMODE_MASK 0x3C00
+#define LPAIF_I2SCTL_SPKMODE_SHIFT 10
+#define LPAIF_I2SCTL_SPKMODE_NONE (0 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+#define LPAIF_I2SCTL_SPKMODE_SD0 (1 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+#define LPAIF_I2SCTL_SPKMODE_SD1 (2 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+#define LPAIF_I2SCTL_SPKMODE_SD2 (3 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+#define LPAIF_I2SCTL_SPKMODE_SD3 (4 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+#define LPAIF_I2SCTL_SPKMODE_QUAD01 (5 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+#define LPAIF_I2SCTL_SPKMODE_QUAD23 (6 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+#define LPAIF_I2SCTL_SPKMODE_6CH (7 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+#define LPAIF_I2SCTL_SPKMODE_8CH (8 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+
+#define LPAIF_I2SCTL_SPKMONO_MASK 0x0200
+#define LPAIF_I2SCTL_SPKMONO_SHIFT 9
+#define LPAIF_I2SCTL_SPKMONO_STEREO (0 << LPAIF_I2SCTL_SPKMONO_SHIFT)
+#define LPAIF_I2SCTL_SPKMONO_MONO (1 << LPAIF_I2SCTL_SPKMONO_SHIFT)
+
+#define LPAIF_I2SCTL_WSSRC_MASK 0x0004
+#define LPAIF_I2SCTL_WSSRC_SHIFT 2
+#define LPAIF_I2SCTL_WSSRC_INTERNAL (0 << LPAIF_I2SCTL_WSSRC_SHIFT)
+#define LPAIF_I2SCTL_WSSRC_EXTERNAL (1 << LPAIF_I2SCTL_WSSRC_SHIFT)
+
+#define LPAIF_I2SCTL_BITWIDTH_MASK 0x0003
+#define LPAIF_I2SCTL_BITWIDTH_SHIFT 0
+#define LPAIF_I2SCTL_BITWIDTH_16 (0 << LPAIF_I2SCTL_BITWIDTH_SHIFT)
+#define LPAIF_I2SCTL_BITWIDTH_24 (1 << LPAIF_I2SCTL_BITWIDTH_SHIFT)
+#define LPAIF_I2SCTL_BITWIDTH_32 (2 << LPAIF_I2SCTL_BITWIDTH_SHIFT)
+
+/* LPAIF IRQ */
+
+#define LPAIF_IRQ_REG_BASE 0x3000
+#define LPAIF_IRQ_REG_STRIDE 0x1000
+#define LPAIF_IRQ_REG_ADDR(addr, port) \
+ (LPAIF_IRQ_REG_BASE + (addr) + (LPAIF_IRQ_REG_STRIDE * (port)))
+
+enum lpaif_irq_ports {
+ LPAIF_IRQ_PORT_MIN = 0,
+
+ LPAIF_IRQ_PORT_HOST = 0,
+ LPAIF_IRQ_PORT_ADSP = 1,
+
+ LPAIF_IRQ_PORT_MAX = 2,
+ LPAIF_IRQ_PORT_NUM = 3,
+};
+
+#define LPAIF_IRQEN_REG(port) LPAIF_IRQ_REG_ADDR(0x0, (port))
+#define LPAIF_IRQSTAT_REG(port) LPAIF_IRQ_REG_ADDR(0x4, (port))
+#define LPAIF_IRQCLEAR_REG(port) LPAIF_IRQ_REG_ADDR(0xC, (port))
+
+#define LPAIF_IRQ_BITSTRIDE 3
+#define LPAIF_IRQ_PER(chan) (1 << (LPAIF_IRQ_BITSTRIDE * (chan)))
+#define LPAIF_IRQ_XRUN(chan) (2 << (LPAIF_IRQ_BITSTRIDE * (chan)))
+#define LPAIF_IRQ_ERR(chan) (4 << (LPAIF_IRQ_BITSTRIDE * (chan)))
+#define LPAIF_IRQ_ALL(chan) (7 << (LPAIF_IRQ_BITSTRIDE * (chan)))
+
+/* LPAIF DMA */
+
+#define LPAIF_RDMA_REG_BASE 0x6000
+#define LPAIF_RDMA_REG_STRIDE 0x1000
+#define LPAIF_RDMA_REG_ADDR(addr, chan) \
+ (LPAIF_RDMA_REG_BASE + (addr) + (LPAIF_RDMA_REG_STRIDE * (chan)))
+
+enum lpaif_dma_channels {
+ LPAIF_RDMA_CHAN_MIN = 0,
+
+ LPAIF_RDMA_CHAN_MI2S = 0,
+ LPAIF_RDMA_CHAN_PCM0 = 1,
+ LPAIF_RDMA_CHAN_PCM1 = 2,
+
+ LPAIF_RDMA_CHAN_MAX = 4,
+ LPAIF_RDMA_CHAN_NUM = 5,
+};
+
+#define LPAIF_RDMACTL_REG(chan) LPAIF_RDMA_REG_ADDR(0x00, (chan))
+#define LPAIF_RDMABASE_REG(chan) LPAIF_RDMA_REG_ADDR(0x04, (chan))
+#define LPAIF_RDMABUFF_REG(chan) LPAIF_RDMA_REG_ADDR(0x08, (chan))
+#define LPAIF_RDMACURR_REG(chan) LPAIF_RDMA_REG_ADDR(0x0C, (chan))
+#define LPAIF_RDMAPER_REG(chan) LPAIF_RDMA_REG_ADDR(0x10, (chan))
+
+#define LPAIF_RDMACTL_BURSTEN_MASK 0x800
+#define LPAIF_RDMACTL_BURSTEN_SHIFT 11
+#define LPAIF_RDMACTL_BURSTEN_SINGLE (0 << LPAIF_RDMACTL_BURSTEN_SHIFT)
+#define LPAIF_RDMACTL_BURSTEN_INCR4 (1 << LPAIF_RDMACTL_BURSTEN_SHIFT)
+
+#define LPAIF_RDMACTL_WPSCNT_MASK 0x700
+#define LPAIF_RDMACTL_WPSCNT_SHIFT 8
+#define LPAIF_RDMACTL_WPSCNT_ONE (0 << LPAIF_RDMACTL_WPSCNT_SHIFT)
+#define LPAIF_RDMACTL_WPSCNT_TWO (1 << LPAIF_RDMACTL_WPSCNT_SHIFT)
+#define LPAIF_RDMACTL_WPSCNT_THREE (2 << LPAIF_RDMACTL_WPSCNT_SHIFT)
+#define LPAIF_RDMACTL_WPSCNT_FOUR (3 << LPAIF_RDMACTL_WPSCNT_SHIFT)
+#define LPAIF_RDMACTL_WPSCNT_SIX (5 << LPAIF_RDMACTL_WPSCNT_SHIFT)
+#define LPAIF_RDMACTL_WPSCNT_EIGHT (7 << LPAIF_RDMACTL_WPSCNT_SHIFT)
+
+#define LPAIF_RDMACTL_AUDINTF_MASK 0x0F0
+#define LPAIF_RDMACTL_AUDINTF_SHIFT 4
+#define LPAIF_RDMACTL_AUDINTF_NONE (0 << LPAIF_RDMACTL_AUDINTF_SHIFT)
+#define LPAIF_RDMACTL_AUDINTF_CODEC (1 << LPAIF_RDMACTL_AUDINTF_SHIFT)
+#define LPAIF_RDMACTL_AUDINTF_PCM (2 << LPAIF_RDMACTL_AUDINTF_SHIFT)
+#define LPAIF_RDMACTL_AUDINTF_SEC_I2S (3 << LPAIF_RDMACTL_AUDINTF_SHIFT)
+#define LPAIF_RDMACTL_AUDINTF_MI2S (4 << LPAIF_RDMACTL_AUDINTF_SHIFT)
+#define LPAIF_RDMACTL_AUDINTF_HDMI (5 << LPAIF_RDMACTL_AUDINTF_SHIFT)
+#define LPAIF_RDMACTL_AUDINTF_SEC_PCM (7 << LPAIF_RDMACTL_AUDINTF_SHIFT)
+
+#define LPAIF_RDMACTL_FIFOWM_MASK 0x00E
+#define LPAIF_RDMACTL_FIFOWM_SHIFT 1
+#define LPAIF_RDMACTL_FIFOWM_1 (0 << LPAIF_RDMACTL_FIFOWM_SHIFT)
+#define LPAIF_RDMACTL_FIFOWM_2 (1 << LPAIF_RDMACTL_FIFOWM_SHIFT)
+#define LPAIF_RDMACTL_FIFOWM_3 (2 << LPAIF_RDMACTL_FIFOWM_SHIFT)
+#define LPAIF_RDMACTL_FIFOWM_4 (3 << LPAIF_RDMACTL_FIFOWM_SHIFT)
+#define LPAIF_RDMACTL_FIFOWM_5 (4 << LPAIF_RDMACTL_FIFOWM_SHIFT)
+#define LPAIF_RDMACTL_FIFOWM_6 (5 << LPAIF_RDMACTL_FIFOWM_SHIFT)
+#define LPAIF_RDMACTL_FIFOWM_7 (6 << LPAIF_RDMACTL_FIFOWM_SHIFT)
+#define LPAIF_RDMACTL_FIFOWM_8 (7 << LPAIF_RDMACTL_FIFOWM_SHIFT)
+
+#define LPAIF_RDMACTL_ENABLE_MASK 0x1
+#define LPAIF_RDMACTL_ENABLE_SHIFT 0
+#define LPAIF_RDMACTL_ENABLE_OFF (0 << LPAIF_RDMACTL_ENABLE_SHIFT)
+#define LPAIF_RDMACTL_ENABLE_ON (1 << LPAIF_RDMACTL_ENABLE_SHIFT)
+
+#endif /* __LPASS_LPAIF_H__ */
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
new file mode 100644
index 000000000000..2fa6280dfb23
--- /dev/null
+++ b/sound/soc/qcom/lpass-platform.c
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * lpass-platform.c -- ALSA SoC platform driver for QTi LPASS
+ */
+
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <sound/memalloc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include "lpass-lpaif-ipq806x.h"
+#include "lpass.h"
+
+#define LPASS_PLATFORM_BUFFER_SIZE (16 * 1024)
+#define LPASS_PLATFORM_PERIODS 2
+
+static struct snd_pcm_hardware lpass_platform_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S16 |
+ SNDRV_PCM_FMTBIT_S24 |
+ SNDRV_PCM_FMTBIT_S32,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = LPASS_PLATFORM_BUFFER_SIZE,
+ .period_bytes_max = LPASS_PLATFORM_BUFFER_SIZE /
+ LPASS_PLATFORM_PERIODS,
+ .period_bytes_min = LPASS_PLATFORM_BUFFER_SIZE /
+ LPASS_PLATFORM_PERIODS,
+ .periods_min = LPASS_PLATFORM_PERIODS,
+ .periods_max = LPASS_PLATFORM_PERIODS,
+ .fifo_size = 0,
+};
+
+static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ int ret;
+
+ snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware);
+
+ runtime->dma_bytes = lpass_platform_pcm_hardware.buffer_bytes_max;
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ dev_err(soc_runtime->dev, "%s() setting constraints failed: %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ return 0;
+}
+
+static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ snd_pcm_format_t format = params_format(params);
+ unsigned int channels = params_channels(params);
+ unsigned int regval;
+ int bitwidth;
+ int ret;
+
+ bitwidth = snd_pcm_format_width(format);
+ if (bitwidth < 0) {
+ dev_err(soc_runtime->dev, "%s() invalid bit width given: %d\n",
+ __func__, bitwidth);
+ return bitwidth;
+ }
+
+ regval = LPAIF_RDMACTL_BURSTEN_INCR4 |
+ LPAIF_RDMACTL_AUDINTF_MI2S |
+ LPAIF_RDMACTL_FIFOWM_8;
+
+ switch (bitwidth) {
+ case 16:
+ switch (channels) {
+ case 1:
+ case 2:
+ regval |= LPAIF_RDMACTL_WPSCNT_ONE;
+ break;
+ case 4:
+ regval |= LPAIF_RDMACTL_WPSCNT_TWO;
+ break;
+ case 6:
+ regval |= LPAIF_RDMACTL_WPSCNT_THREE;
+ break;
+ case 8:
+ regval |= LPAIF_RDMACTL_WPSCNT_FOUR;
+ break;
+ default:
+ dev_err(soc_runtime->dev, "%s() invalid PCM config given: bw=%d, ch=%u\n",
+ __func__, bitwidth, channels);
+ return -EINVAL;
+ }
+ break;
+ case 24:
+ case 32:
+ switch (channels) {
+ case 1:
+ regval |= LPAIF_RDMACTL_WPSCNT_ONE;
+ break;
+ case 2:
+ regval |= LPAIF_RDMACTL_WPSCNT_TWO;
+ break;
+ case 4:
+ regval |= LPAIF_RDMACTL_WPSCNT_FOUR;
+ break;
+ case 6:
+ regval |= LPAIF_RDMACTL_WPSCNT_SIX;
+ break;
+ case 8:
+ regval |= LPAIF_RDMACTL_WPSCNT_EIGHT;
+ break;
+ default:
+ dev_err(soc_runtime->dev, "%s() invalid PCM config given: bw=%d, ch=%u\n",
+ __func__, bitwidth, channels);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(soc_runtime->dev, "%s() invalid PCM config given: bw=%d, ch=%u\n",
+ __func__, bitwidth, channels);
+ return -EINVAL;
+ }
+
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_RDMACTL_REG(LPAIF_RDMA_CHAN_MI2S), regval);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to rdmactl reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ int ret;
+
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_RDMACTL_REG(LPAIF_RDMA_CHAN_MI2S), 0);
+ if (ret)
+ dev_err(soc_runtime->dev, "%s() error writing to rdmactl reg: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ int ret;
+
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_RDMABASE_REG(LPAIF_RDMA_CHAN_MI2S),
+ runtime->dma_addr);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to rdmabase reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_RDMABUFF_REG(LPAIF_RDMA_CHAN_MI2S),
+ (snd_pcm_lib_buffer_bytes(substream) >> 2) - 1);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to rdmabuff reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_RDMAPER_REG(LPAIF_RDMA_CHAN_MI2S),
+ (snd_pcm_lib_period_bytes(substream) >> 2) - 1);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to rdmaper reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(drvdata->lpaif_map,
+ LPAIF_RDMACTL_REG(LPAIF_RDMA_CHAN_MI2S),
+ LPAIF_RDMACTL_ENABLE_MASK, LPAIF_RDMACTL_ENABLE_ON);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to rdmactl reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /* clear status before enabling interrupts */
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_IRQCLEAR_REG(LPAIF_IRQ_PORT_HOST),
+ LPAIF_IRQ_ALL(LPAIF_RDMA_CHAN_MI2S));
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to irqclear reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(drvdata->lpaif_map,
+ LPAIF_IRQEN_REG(LPAIF_IRQ_PORT_HOST),
+ LPAIF_IRQ_ALL(LPAIF_RDMA_CHAN_MI2S),
+ LPAIF_IRQ_ALL(LPAIF_RDMA_CHAN_MI2S));
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to irqen reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(drvdata->lpaif_map,
+ LPAIF_RDMACTL_REG(LPAIF_RDMA_CHAN_MI2S),
+ LPAIF_RDMACTL_ENABLE_MASK,
+ LPAIF_RDMACTL_ENABLE_ON);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to rdmactl reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = regmap_update_bits(drvdata->lpaif_map,
+ LPAIF_RDMACTL_REG(LPAIF_RDMA_CHAN_MI2S),
+ LPAIF_RDMACTL_ENABLE_MASK,
+ LPAIF_RDMACTL_ENABLE_OFF);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to rdmactl reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(drvdata->lpaif_map,
+ LPAIF_IRQEN_REG(LPAIF_IRQ_PORT_HOST),
+ LPAIF_IRQ_ALL(LPAIF_RDMA_CHAN_MI2S), 0);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to irqen reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ unsigned int base_addr, curr_addr;
+ int ret;
+
+ ret = regmap_read(drvdata->lpaif_map,
+ LPAIF_RDMABASE_REG(LPAIF_RDMA_CHAN_MI2S), &base_addr);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error reading from rdmabase reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = regmap_read(drvdata->lpaif_map,
+ LPAIF_RDMACURR_REG(LPAIF_RDMA_CHAN_MI2S), &curr_addr);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error reading from rdmacurr reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return bytes_to_frames(substream->runtime, curr_addr - base_addr);
+}
+
+static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ return dma_mmap_coherent(substream->pcm->card->dev, vma,
+ runtime->dma_area, runtime->dma_addr,
+ runtime->dma_bytes);
+}
+
+static struct snd_pcm_ops lpass_platform_pcm_ops = {
+ .open = lpass_platform_pcmops_open,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = lpass_platform_pcmops_hw_params,
+ .hw_free = lpass_platform_pcmops_hw_free,
+ .prepare = lpass_platform_pcmops_prepare,
+ .trigger = lpass_platform_pcmops_trigger,
+ .pointer = lpass_platform_pcmops_pointer,
+ .mmap = lpass_platform_pcmops_mmap,
+};
+
+static irqreturn_t lpass_platform_lpaif_irq(int irq, void *data)
+{
+ struct snd_pcm_substream *substream = data;
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ unsigned int interrupts;
+ irqreturn_t ret = IRQ_NONE;
+ int rv;
+
+ rv = regmap_read(drvdata->lpaif_map,
+ LPAIF_IRQSTAT_REG(LPAIF_IRQ_PORT_HOST), &interrupts);
+ if (rv) {
+ dev_err(soc_runtime->dev, "%s() error reading from irqstat reg: %d\n",
+ __func__, rv);
+ return IRQ_NONE;
+ }
+ interrupts &= LPAIF_IRQ_ALL(LPAIF_RDMA_CHAN_MI2S);
+
+ if (interrupts & LPAIF_IRQ_PER(LPAIF_RDMA_CHAN_MI2S)) {
+ rv = regmap_write(drvdata->lpaif_map,
+ LPAIF_IRQCLEAR_REG(LPAIF_IRQ_PORT_HOST),
+ LPAIF_IRQ_PER(LPAIF_RDMA_CHAN_MI2S));
+ if (rv) {
+ dev_err(soc_runtime->dev, "%s() error writing to irqclear reg: %d\n",
+ __func__, rv);
+ return IRQ_NONE;
+ }
+ snd_pcm_period_elapsed(substream);
+ ret = IRQ_HANDLED;
+ }
+
+ if (interrupts & LPAIF_IRQ_XRUN(LPAIF_RDMA_CHAN_MI2S)) {
+ rv = regmap_write(drvdata->lpaif_map,
+ LPAIF_IRQCLEAR_REG(LPAIF_IRQ_PORT_HOST),
+ LPAIF_IRQ_XRUN(LPAIF_RDMA_CHAN_MI2S));
+ if (rv) {
+ dev_err(soc_runtime->dev, "%s() error writing to irqclear reg: %d\n",
+ __func__, rv);
+ return IRQ_NONE;
+ }
+ dev_warn(soc_runtime->dev, "%s() xrun warning\n", __func__);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ ret = IRQ_HANDLED;
+ }
+
+ if (interrupts & LPAIF_IRQ_ERR(LPAIF_RDMA_CHAN_MI2S)) {
+ rv = regmap_write(drvdata->lpaif_map,
+ LPAIF_IRQCLEAR_REG(LPAIF_IRQ_PORT_HOST),
+ LPAIF_IRQ_ERR(LPAIF_RDMA_CHAN_MI2S));
+ if (rv) {
+ dev_err(soc_runtime->dev, "%s() error writing to irqclear reg: %d\n",
+ __func__, rv);
+ return IRQ_NONE;
+ }
+ dev_err(soc_runtime->dev, "%s() bus access error\n", __func__);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream,
+ struct snd_soc_pcm_runtime *soc_runtime)
+{
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = soc_runtime->dev;
+ buf->private_data = NULL;
+ buf->area = dma_alloc_coherent(soc_runtime->dev, size, &buf->addr,
+ GFP_KERNEL);
+ if (!buf->area) {
+ dev_err(soc_runtime->dev, "%s: Could not allocate DMA buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+ buf->bytes = size;
+
+ return 0;
+}
+
+static void lpass_platform_free_buffer(struct snd_pcm_substream *substream,
+ struct snd_soc_pcm_runtime *soc_runtime)
+{
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+
+ if (buf->area) {
+ dma_free_coherent(soc_runtime->dev, buf->bytes, buf->area,
+ buf->addr);
+ }
+ buf->area = NULL;
+}
+
+static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
+{
+ struct snd_pcm *pcm = soc_runtime->pcm;
+ struct snd_pcm_substream *substream =
+ pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ struct lpass_data *drvdata =
+ snd_soc_platform_get_drvdata(soc_runtime->platform);
+ int ret;
+
+ soc_runtime->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ soc_runtime->dev->dma_mask = &soc_runtime->dev->coherent_dma_mask;
+
+ ret = lpass_platform_alloc_buffer(substream, soc_runtime);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(soc_runtime->dev, drvdata->lpaif_irq,
+ lpass_platform_lpaif_irq, IRQF_TRIGGER_RISING,
+ "lpass-irq-lpaif", substream);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() irq request failed: %d\n",
+ __func__, ret);
+ goto err_buf;
+ }
+
+ /* ensure audio hardware is disabled */
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_IRQEN_REG(LPAIF_IRQ_PORT_HOST), 0);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to irqen reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = regmap_write(drvdata->lpaif_map,
+ LPAIF_RDMACTL_REG(LPAIF_RDMA_CHAN_MI2S), 0);
+ if (ret) {
+ dev_err(soc_runtime->dev, "%s() error writing to rdmactl reg: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+
+err_buf:
+ lpass_platform_free_buffer(substream, soc_runtime);
+ return ret;
+}
+
+static void lpass_platform_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream =
+ pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+
+ lpass_platform_free_buffer(substream, soc_runtime);
+}
+
+static struct snd_soc_platform_driver lpass_platform_driver = {
+ .pcm_new = lpass_platform_pcm_new,
+ .pcm_free = lpass_platform_pcm_free,
+ .ops = &lpass_platform_pcm_ops,
+};
+
+int asoc_qcom_lpass_platform_register(struct platform_device *pdev)
+{
+ struct lpass_data *drvdata = platform_get_drvdata(pdev);
+
+ drvdata->lpaif_irq = platform_get_irq_byname(pdev, "lpass-irq-lpaif");
+ if (drvdata->lpaif_irq < 0) {
+ dev_err(&pdev->dev, "%s() error getting irq handle: %d\n",
+ __func__, drvdata->lpaif_irq);
+ return -ENODEV;
+ }
+
+ return devm_snd_soc_register_platform(&pdev->dev,
+ &lpass_platform_driver);
+}
+EXPORT_SYMBOL_GPL(asoc_qcom_lpass_platform_register);
+
+MODULE_DESCRIPTION("QTi LPASS Platform Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
new file mode 100644
index 000000000000..5c99b3dace86
--- /dev/null
+++ b/sound/soc/qcom/lpass.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * lpass.h - Definitions for the QTi LPASS
+ */
+
+#ifndef __LPASS_H__
+#define __LPASS_H__
+
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define LPASS_AHBIX_CLOCK_FREQUENCY 131072000
+
+/* Both the CPU DAI and platform drivers will access this data */
+struct lpass_data {
+
+ /* AHB-I/X bus clocks inside the low-power audio subsystem (LPASS) */
+ struct clk *ahbix_clk;
+
+ /* MI2S system clock */
+ struct clk *mi2s_osr_clk;
+
+ /* MI2S bit clock (derived from system clock by a divider */
+ struct clk *mi2s_bit_clk;
+
+ /* low-power audio interface (LPAIF) registers */
+ void __iomem *lpaif;
+
+ /* regmap backed by the low-power audio interface (LPAIF) registers */
+ struct regmap *lpaif_map;
+
+ /* interrupts from the low-power audio interface (LPAIF) */
+ int lpaif_irq;
+};
+
+/* register the platform driver from the CPU DAI driver */
+int asoc_qcom_lpass_platform_register(struct platform_device *);
+
+#endif /* __LPASS_H__ */
diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c
new file mode 100644
index 000000000000..b8bd296190ad
--- /dev/null
+++ b/sound/soc/qcom/storm.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * storm.c -- ALSA SoC machine driver for QTi ipq806x-based Storm board
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#define STORM_SYSCLK_MULT 4
+
+static int storm_ops_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+ struct snd_soc_card *card = soc_runtime->card;
+ snd_pcm_format_t format = params_format(params);
+ unsigned int rate = params_rate(params);
+ unsigned int sysclk_freq;
+ int bitwidth, ret;
+
+ bitwidth = snd_pcm_format_width(format);
+ if (bitwidth < 0) {
+ dev_err(card->dev, "%s() invalid bit width given: %d\n",
+ __func__, bitwidth);
+ return bitwidth;
+ }
+
+ /*
+ * as the CPU DAI is the I2S bus master and no system clock is needed by
+ * the MAX98357a DAC, simply set the system clock to be a constant
+ * multiple of the bit clock for the clock divider
+ */
+ sysclk_freq = rate * bitwidth * 2 * STORM_SYSCLK_MULT;
+
+ ret = snd_soc_dai_set_sysclk(soc_runtime->cpu_dai, 0, sysclk_freq, 0);
+ if (ret) {
+ dev_err(card->dev, "%s() error setting sysclk to %u: %d\n",
+ __func__, sysclk_freq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops storm_soc_ops = {
+ .hw_params = storm_ops_hw_params,
+};
+
+static struct snd_soc_dai_link storm_dai_link = {
+ .name = "Primary",
+ .stream_name = "Primary",
+ .codec_dai_name = "HiFi",
+ .ops = &storm_soc_ops,
+};
+
+static struct snd_soc_card storm_soc_card = {
+ .name = "ipq806x-storm",
+ .dev = NULL,
+};
+
+static int storm_parse_of(struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link *dai_link = card->dai_link;
+ struct device_node *np = card->dev->of_node;
+
+ dai_link->cpu_of_node = of_parse_phandle(np, "cpu", 0);
+ if (!dai_link->cpu_of_node) {
+ dev_err(card->dev, "%s() error getting cpu phandle\n",
+ __func__);
+ return -EINVAL;
+ }
+ dai_link->platform_of_node = dai_link->cpu_of_node;
+
+ dai_link->codec_of_node = of_parse_phandle(np, "codec", 0);
+ if (!dai_link->codec_of_node) {
+ dev_err(card->dev, "%s() error getting codec phandle\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int storm_platform_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &storm_soc_card;
+ int ret;
+
+ if (card->dev) {
+ dev_err(&pdev->dev, "%s() error, existing soundcard\n",
+ __func__);
+ return -ENODEV;
+ }
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+
+ ret = snd_soc_of_parse_card_name(card, "qcom,model");
+ if (ret) {
+ dev_err(&pdev->dev, "%s() error parsing card name: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ card->dai_link = &storm_dai_link;
+ card->num_links = 1;
+
+ ret = storm_parse_of(card);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() error resolving dai links: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret == -EPROBE_DEFER) {
+ card->dev = NULL;
+ return ret;
+ } else if (ret) {
+ dev_err(&pdev->dev, "%s() error registering soundcard: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id storm_device_id[] = {
+ { .compatible = "google,storm-audio" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, storm_device_id);
+#endif
+
+static struct platform_driver storm_platform_driver = {
+ .driver = {
+ .name = "storm-audio",
+ .of_match_table =
+ of_match_ptr(storm_device_id),
+ },
+ .probe = storm_platform_probe,
+};
+module_platform_driver(storm_platform_driver);
+
+MODULE_DESCRIPTION("QTi IPQ806x-based Storm Machine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
index 59b044255b78..c72e9fb26658 100644
--- a/sound/soc/samsung/h1940_uda1380.c
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -162,13 +162,8 @@ static struct platform_device *s3c24xx_snd_device;
static int h1940_uda1380_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
-
- snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
- &hp_jack);
-
- snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
- hp_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headphone Jack", SND_JACK_HEADPHONE,
+ &hp_jack, hp_jack_pins, ARRAY_SIZE(hp_jack_pins));
snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
hp_jack_gpios);
diff --git a/sound/soc/samsung/littlemill.c b/sound/soc/samsung/littlemill.c
index 141519c21e21..31a820eb0ac3 100644
--- a/sound/soc/samsung/littlemill.c
+++ b/sound/soc/samsung/littlemill.c
@@ -260,12 +260,12 @@ static int littlemill_late_probe(struct snd_soc_card *card)
if (ret < 0)
return ret;
- ret = snd_soc_jack_new(codec, "Headset",
- SND_JACK_HEADSET | SND_JACK_MECHANICAL |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3 |
- SND_JACK_BTN_4 | SND_JACK_BTN_5,
- &littlemill_headset);
+ ret = snd_soc_card_jack_new(card, "Headset",
+ SND_JACK_HEADSET | SND_JACK_MECHANICAL |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 |
+ SND_JACK_BTN_4 | SND_JACK_BTN_5,
+ &littlemill_headset, NULL, 0);
if (ret)
return ret;
diff --git a/sound/soc/samsung/lowland.c b/sound/soc/samsung/lowland.c
index 243dea7ba38f..5f156093101e 100644
--- a/sound/soc/samsung/lowland.c
+++ b/sound/soc/samsung/lowland.c
@@ -56,16 +56,10 @@ static int lowland_wm5100_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- ret = snd_soc_jack_new(codec, "Headset",
- SND_JACK_LINEOUT | SND_JACK_HEADSET |
- SND_JACK_BTN_0,
- &lowland_headset);
- if (ret)
- return ret;
-
- ret = snd_soc_jack_add_pins(&lowland_headset,
- ARRAY_SIZE(lowland_headset_pins),
- lowland_headset_pins);
+ ret = snd_soc_card_jack_new(rtd->card, "Headset", SND_JACK_LINEOUT |
+ SND_JACK_HEADSET | SND_JACK_BTN_0,
+ &lowland_headset, lowland_headset_pins,
+ ARRAY_SIZE(lowland_headset_pins));
if (ret)
return ret;
diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
index 873f2cb4bebe..35e37c457f1f 100644
--- a/sound/soc/samsung/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -211,13 +211,8 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream,
static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
-
- snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
- &hp_jack);
-
- snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
- hp_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headphone Jack", SND_JACK_HEADPHONE,
+ &hp_jack, hp_jack_pins, ARRAY_SIZE(hp_jack_pins));
snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
hp_jack_gpios);
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 326d3c3804e3..5bf723689692 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -461,8 +461,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
return -ENOENT;
}
s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
- if (s3c24xx_i2s.regs == NULL)
- return -ENXIO;
+ if (IS_ERR(s3c24xx_i2s.regs))
+ return PTR_ERR(s3c24xx_i2s.regs);
s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO;
s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO;
diff --git a/sound/soc/samsung/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c
index 8291d2a5f152..dfbe2db1c407 100644
--- a/sound/soc/samsung/smartq_wm8987.c
+++ b/sound/soc/samsung/smartq_wm8987.c
@@ -151,13 +151,10 @@ static int smartq_wm8987_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
/* Headphone jack detection */
- err = snd_soc_jack_new(codec, "Headphone Jack",
- SND_JACK_HEADPHONE, &smartq_jack);
- if (err)
- return err;
-
- err = snd_soc_jack_add_pins(&smartq_jack, ARRAY_SIZE(smartq_jack_pins),
- smartq_jack_pins);
+ err = snd_soc_card_jack_new(rtd->card, "Headphone Jack",
+ SND_JACK_HEADPHONE, &smartq_jack,
+ smartq_jack_pins,
+ ARRAY_SIZE(smartq_jack_pins));
if (err)
return err;
diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c
index 5ec7c52282f2..2dcb988bdff2 100644
--- a/sound/soc/samsung/speyside.c
+++ b/sound/soc/samsung/speyside.c
@@ -153,16 +153,10 @@ static int speyside_wm8996_init(struct snd_soc_pcm_runtime *rtd)
pr_err("Failed to request HP_SEL GPIO: %d\n", ret);
gpio_direction_output(WM8996_HPSEL_GPIO, speyside_jack_polarity);
- ret = snd_soc_jack_new(codec, "Headset",
- SND_JACK_LINEOUT | SND_JACK_HEADSET |
- SND_JACK_BTN_0,
- &speyside_headset);
- if (ret)
- return ret;
-
- ret = snd_soc_jack_add_pins(&speyside_headset,
- ARRAY_SIZE(speyside_headset_pins),
- speyside_headset_pins);
+ ret = snd_soc_card_jack_new(rtd->card, "Headset", SND_JACK_LINEOUT |
+ SND_JACK_HEADSET | SND_JACK_BTN_0,
+ &speyside_headset, speyside_headset_pins,
+ ARRAY_SIZE(speyside_headset_pins));
if (ret)
return ret;
diff --git a/sound/soc/samsung/tobermory.c b/sound/soc/samsung/tobermory.c
index 9c80506527c4..85ccfb7188cb 100644
--- a/sound/soc/samsung/tobermory.c
+++ b/sound/soc/samsung/tobermory.c
@@ -179,15 +179,10 @@ static int tobermory_late_probe(struct snd_soc_card *card)
if (ret < 0)
return ret;
- ret = snd_soc_jack_new(codec, "Headset",
- SND_JACK_HEADSET | SND_JACK_BTN_0,
- &tobermory_headset);
- if (ret)
- return ret;
-
- ret = snd_soc_jack_add_pins(&tobermory_headset,
- ARRAY_SIZE(tobermory_headset_pins),
- tobermory_headset_pins);
+ ret = snd_soc_card_jack_new(card, "Headset", SND_JACK_HEADSET |
+ SND_JACK_BTN_0, &tobermory_headset,
+ tobermory_headset_pins,
+ ARRAY_SIZE(tobermory_headset_pins));
if (ret)
return ret;
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 80245b6eebd6..07114b0b0dc1 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -36,11 +36,17 @@ config SND_SOC_SH4_SIU
config SND_SOC_RCAR
tristate "R-Car series SRU/SCU/SSIU/SSI support"
+ depends on DMA_OF
select SND_SIMPLE_CARD
select REGMAP_MMIO
help
This option enables R-Car SUR/SCU/SSIU/SSI sound support
+config SND_SOC_RSRC_CARD
+ tristate "Renesas Sampling Rate Convert Sound Card"
+ help
+ This option enables simple sound if you need sampling rate convert
+
##
## Boards
##
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index b87b22e88e43..142c066eaee2 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -250,6 +250,7 @@ struct fsi_clk {
struct fsi_priv {
void __iomem *base;
+ phys_addr_t phys;
struct fsi_master *master;
struct fsi_stream playback;
@@ -1371,13 +1372,18 @@ static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io, struct dev
shdma_chan_filter, (void *)io->dma_id,
dev, is_play ? "tx" : "rx");
if (io->chan) {
- struct dma_slave_config cfg;
+ struct dma_slave_config cfg = {};
int ret;
- cfg.slave_id = io->dma_id;
- cfg.dst_addr = 0; /* use default addr */
- cfg.src_addr = 0; /* use default addr */
- cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ if (is_play) {
+ cfg.dst_addr = fsi->phys + REG_DODT;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.direction = DMA_MEM_TO_DEV;
+ } else {
+ cfg.src_addr = fsi->phys + REG_DIDT;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.direction = DMA_DEV_TO_MEM;
+ }
ret = dmaengine_slave_config(io->chan, &cfg);
if (ret < 0) {
@@ -1876,7 +1882,40 @@ static void fsi_handler_init(struct fsi_priv *fsi,
}
}
-static struct of_device_id fsi_of_match[];
+static const struct fsi_core fsi1_core = {
+ .ver = 1,
+
+ /* Interrupt */
+ .int_st = INT_ST,
+ .iemsk = IEMSK,
+ .imsk = IMSK,
+};
+
+static const struct fsi_core fsi2_core = {
+ .ver = 2,
+
+ /* Interrupt */
+ .int_st = CPU_INT_ST,
+ .iemsk = CPU_IEMSK,
+ .imsk = CPU_IMSK,
+ .a_mclk = A_MST_CTLR,
+ .b_mclk = B_MST_CTLR,
+};
+
+static const struct of_device_id fsi_of_match[] = {
+ { .compatible = "renesas,sh_fsi", .data = &fsi1_core},
+ { .compatible = "renesas,sh_fsi2", .data = &fsi2_core},
+ {},
+};
+MODULE_DEVICE_TABLE(of, fsi_of_match);
+
+static const struct platform_device_id fsi_id_table[] = {
+ { "sh_fsi", (kernel_ulong_t)&fsi1_core },
+ { "sh_fsi2", (kernel_ulong_t)&fsi2_core },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, fsi_id_table);
+
static int fsi_probe(struct platform_device *pdev)
{
struct fsi_master *master;
@@ -1941,6 +1980,7 @@ static int fsi_probe(struct platform_device *pdev)
/* FSI A setting */
fsi = &master->fsia;
fsi->base = master->base;
+ fsi->phys = res->start;
fsi->master = master;
fsi_port_info_init(fsi, &info.port_a);
fsi_handler_init(fsi, &info.port_a);
@@ -1953,6 +1993,7 @@ static int fsi_probe(struct platform_device *pdev)
/* FSI B setting */
fsi = &master->fsib;
fsi->base = master->base + 0x40;
+ fsi->phys = res->start + 0x40;
fsi->master = master;
fsi_port_info_init(fsi, &info.port_b);
fsi_handler_init(fsi, &info.port_b);
@@ -2072,40 +2113,6 @@ static struct dev_pm_ops fsi_pm_ops = {
.resume = fsi_resume,
};
-static struct fsi_core fsi1_core = {
- .ver = 1,
-
- /* Interrupt */
- .int_st = INT_ST,
- .iemsk = IEMSK,
- .imsk = IMSK,
-};
-
-static struct fsi_core fsi2_core = {
- .ver = 2,
-
- /* Interrupt */
- .int_st = CPU_INT_ST,
- .iemsk = CPU_IEMSK,
- .imsk = CPU_IMSK,
- .a_mclk = A_MST_CTLR,
- .b_mclk = B_MST_CTLR,
-};
-
-static struct of_device_id fsi_of_match[] = {
- { .compatible = "renesas,sh_fsi", .data = &fsi1_core},
- { .compatible = "renesas,sh_fsi2", .data = &fsi2_core},
- {},
-};
-MODULE_DEVICE_TABLE(of, fsi_of_match);
-
-static struct platform_device_id fsi_id_table[] = {
- { "sh_fsi", (kernel_ulong_t)&fsi1_core },
- { "sh_fsi2", (kernel_ulong_t)&fsi2_core },
- {},
-};
-MODULE_DEVICE_TABLE(platform, fsi_id_table);
-
static struct platform_driver fsi_driver = {
.driver = {
.name = "fsi-pcm-audio",
@@ -2119,7 +2126,7 @@ static struct platform_driver fsi_driver = {
module_platform_driver(fsi_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SuperH onchip FSI audio driver");
MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
MODULE_ALIAS("platform:fsi-pcm-audio");
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
index 9ac536429800..f1b445173fba 100644
--- a/sound/soc/sh/rcar/Makefile
+++ b/sound/soc/sh/rcar/Makefile
@@ -1,2 +1,5 @@
-snd-soc-rcar-objs := core.o gen.o src.o adg.o ssi.o dvc.o
-obj-$(CONFIG_SND_SOC_RCAR) += snd-soc-rcar.o \ No newline at end of file
+snd-soc-rcar-objs := core.o gen.o dma.o src.o adg.o ssi.o dvc.o
+obj-$(CONFIG_SND_SOC_RCAR) += snd-soc-rcar.o
+
+snd-soc-rsrc-card-objs := rsrc-card.o
+obj-$(CONFIG_SND_SOC_RSRC_CARD) += snd-soc-rsrc-card.o
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 7ac35c9d1cb8..fefc881dbac2 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -183,6 +183,8 @@ int rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *mod,
rsnd_mod_bset(mod, DIV_EN, en, en);
+ dev_dbg(dev, "convert rate %d <-> %d\n", src_rate, dst_rate);
+
return 0;
}
@@ -432,7 +434,5 @@ int rsnd_adg_probe(struct platform_device *pdev,
priv->adg = adg;
- dev_dbg(dev, "adg probed\n");
-
return 0;
}
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 110577c52317..9f48d75fa992 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -94,21 +94,20 @@
*
*/
#include <linux/pm_runtime.h>
-#include <linux/shdma-base.h>
#include "rsnd.h"
#define RSND_RATES SNDRV_PCM_RATE_8000_96000
#define RSND_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
-static struct rsnd_of_data rsnd_of_data_gen1 = {
+static const struct rsnd_of_data rsnd_of_data_gen1 = {
.flags = RSND_GEN1,
};
-static struct rsnd_of_data rsnd_of_data_gen2 = {
+static const struct rsnd_of_data rsnd_of_data_gen2 = {
.flags = RSND_GEN2,
};
-static struct of_device_id rsnd_of_match[] = {
+static const struct of_device_id rsnd_of_match[] = {
{ .compatible = "renesas,rcar_sound-gen1", .data = &rsnd_of_data_gen1 },
{ .compatible = "renesas,rcar_sound-gen2", .data = &rsnd_of_data_gen2 },
{},
@@ -138,249 +137,37 @@ char *rsnd_mod_name(struct rsnd_mod *mod)
return mod->ops->name;
}
-char *rsnd_mod_dma_name(struct rsnd_mod *mod)
+struct dma_chan *rsnd_mod_dma_req(struct rsnd_mod *mod)
{
- if (!mod || !mod->ops)
- return "unknown";
-
- if (!mod->ops->dma_name)
- return mod->ops->name;
+ if (!mod || !mod->ops || !mod->ops->dma_req)
+ return NULL;
- return mod->ops->dma_name(mod);
+ return mod->ops->dma_req(mod);
}
-void rsnd_mod_init(struct rsnd_mod *mod,
+int rsnd_mod_init(struct rsnd_mod *mod,
struct rsnd_mod_ops *ops,
struct clk *clk,
enum rsnd_mod_type type,
int id)
{
+ int ret = clk_prepare(clk);
+
+ if (ret)
+ return ret;
+
mod->id = id;
mod->ops = ops;
mod->type = type;
mod->clk = clk;
-}
-
-/*
- * rsnd_dma functions
- */
-void rsnd_dma_stop(struct rsnd_dma *dma)
-{
- dmaengine_terminate_all(dma->chan);
-}
-
-static void rsnd_dma_complete(void *data)
-{
- struct rsnd_dma *dma = (struct rsnd_dma *)data;
- struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
- struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
-
- /*
- * Renesas sound Gen1 needs 1 DMAC,
- * Gen2 needs 2 DMAC.
- * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
- * But, Audio-DMAC-peri-peri doesn't have interrupt,
- * and this driver is assuming that here.
- *
- * If Audio-DMAC-peri-peri has interrpt,
- * rsnd_dai_pointer_update() will be called twice,
- * ant it will breaks io->byte_pos
- */
-
- rsnd_dai_pointer_update(io, io->byte_per_period);
-}
-
-void rsnd_dma_start(struct rsnd_dma *dma)
-{
- struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
- struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
- struct snd_pcm_substream *substream = io->substream;
- struct device *dev = rsnd_priv_to_dev(priv);
- struct dma_async_tx_descriptor *desc;
-
- desc = dmaengine_prep_dma_cyclic(dma->chan,
- (dma->addr) ? dma->addr :
- substream->runtime->dma_addr,
- snd_pcm_lib_buffer_bytes(substream),
- snd_pcm_lib_period_bytes(substream),
- dma->dir,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
- if (!desc) {
- dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
- return;
- }
-
- desc->callback = rsnd_dma_complete;
- desc->callback_param = dma;
-
- if (dmaengine_submit(desc) < 0) {
- dev_err(dev, "dmaengine_submit() fail\n");
- return;
- }
-
- dma_async_issue_pending(dma->chan);
-}
-
-int rsnd_dma_available(struct rsnd_dma *dma)
-{
- return !!dma->chan;
-}
-
-#define DMA_NAME_SIZE 16
-#define MOD_MAX 4 /* MEM/SSI/SRC/DVC */
-static int _rsnd_dma_of_name(char *dma_name, struct rsnd_mod *mod)
-{
- if (mod)
- return snprintf(dma_name, DMA_NAME_SIZE / 2, "%s%d",
- rsnd_mod_dma_name(mod), rsnd_mod_id(mod));
- else
- return snprintf(dma_name, DMA_NAME_SIZE / 2, "mem");
-
-}
-
-static void rsnd_dma_of_name(struct rsnd_mod *mod_from,
- struct rsnd_mod *mod_to,
- char *dma_name)
-{
- int index = 0;
-
- index = _rsnd_dma_of_name(dma_name + index, mod_from);
- *(dma_name + index++) = '_';
- index = _rsnd_dma_of_name(dma_name + index, mod_to);
-}
-
-static void rsnd_dma_of_path(struct rsnd_dma *dma,
- int is_play,
- struct rsnd_mod **mod_from,
- struct rsnd_mod **mod_to)
-{
- struct rsnd_mod *this = rsnd_dma_to_mod(dma);
- struct rsnd_dai_stream *io = rsnd_mod_to_io(this);
- struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
- struct rsnd_mod *src = rsnd_io_to_mod_src(io);
- struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
- struct rsnd_mod *mod[MOD_MAX];
- int i, index;
-
- for (i = 0; i < MOD_MAX; i++)
- mod[i] = NULL;
-
- /*
- * in play case...
- *
- * src -> dst
- *
- * mem -> SSI
- * mem -> SRC -> SSI
- * mem -> SRC -> DVC -> SSI
- */
- mod[0] = NULL; /* for "mem" */
- index = 1;
- for (i = 1; i < MOD_MAX; i++) {
- if (!src) {
- mod[i] = ssi;
- } else if (!dvc) {
- mod[i] = src;
- src = NULL;
- } else {
- if ((!is_play) && (this == src))
- this = dvc;
-
- mod[i] = (is_play) ? src : dvc;
- i++;
- mod[i] = (is_play) ? dvc : src;
- src = NULL;
- dvc = NULL;
- }
-
- if (mod[i] == this)
- index = i;
-
- if (mod[i] == ssi)
- break;
- }
-
- if (is_play) {
- *mod_from = mod[index - 1];
- *mod_to = mod[index];
- } else {
- *mod_from = mod[index];
- *mod_to = mod[index - 1];
- }
-}
-
-int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
- int is_play, int id)
-{
- struct device *dev = rsnd_priv_to_dev(priv);
- struct dma_slave_config cfg;
- struct rsnd_mod *mod_from;
- struct rsnd_mod *mod_to;
- char dma_name[DMA_NAME_SIZE];
- dma_cap_mask_t mask;
- int ret;
-
- if (dma->chan) {
- dev_err(dev, "it already has dma channel\n");
- return -EIO;
- }
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- rsnd_dma_of_path(dma, is_play, &mod_from, &mod_to);
- rsnd_dma_of_name(mod_from, mod_to, dma_name);
-
- cfg.slave_id = id;
- cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
- cfg.src_addr = rsnd_gen_dma_addr(priv, mod_from, is_play, 1);
- cfg.dst_addr = rsnd_gen_dma_addr(priv, mod_to, is_play, 0);
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-
- dev_dbg(dev, "dma : %s %pad -> %pad\n",
- dma_name, &cfg.src_addr, &cfg.dst_addr);
-
- dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- (void *)id, dev,
- dma_name);
- if (!dma->chan) {
- dev_err(dev, "can't get dma channel\n");
- goto rsnd_dma_channel_err;
- }
-
- ret = dmaengine_slave_config(dma->chan, &cfg);
- if (ret < 0)
- goto rsnd_dma_init_err;
-
- dma->addr = is_play ? cfg.src_addr : cfg.dst_addr;
- dma->dir = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
-
- return 0;
-
-rsnd_dma_init_err:
- rsnd_dma_quit(priv, dma);
-rsnd_dma_channel_err:
-
- /*
- * DMA failed. try to PIO mode
- * see
- * rsnd_ssi_fallback()
- * rsnd_rdai_continuance_probe()
- */
- return -EAGAIN;
+ return ret;
}
-void rsnd_dma_quit(struct rsnd_priv *priv,
- struct rsnd_dma *dma)
+void rsnd_mod_quit(struct rsnd_mod *mod)
{
- if (dma->chan)
- dma_release_channel(dma->chan);
-
- dma->chan = NULL;
+ if (mod->clk)
+ clk_unprepare(mod->clk);
}
/*
@@ -416,7 +203,7 @@ u32 rsnd_get_adinr(struct rsnd_mod *mod)
({ \
struct rsnd_priv *priv = rsnd_mod_to_priv(mod); \
struct device *dev = rsnd_priv_to_dev(priv); \
- u32 mask = 1 << __rsnd_mod_shift_##func; \
+ u32 mask = (1 << __rsnd_mod_shift_##func) & ~(1 << 31); \
u32 call = __rsnd_mod_call_##func << __rsnd_mod_shift_##func; \
int ret = 0; \
if ((mod->status & mask) == call) { \
@@ -458,7 +245,7 @@ static int rsnd_dai_connect(struct rsnd_mod *mod,
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
- dev_err(dev, "%s%d is not empty\n",
+ dev_err(dev, "%s[%d] is not empty\n",
rsnd_mod_name(mod),
rsnd_mod_id(mod));
return -EIO;
@@ -874,20 +661,28 @@ static int rsnd_dai_probe(struct platform_device *pdev,
drv[i].name = rdai[i].name;
drv[i].ops = &rsnd_soc_dai_ops;
if (pmod) {
+ snprintf(rdai[i].playback.name, RSND_DAI_NAME_SIZE,
+ "DAI%d Playback", i);
+
drv[i].playback.rates = RSND_RATES;
drv[i].playback.formats = RSND_FMTS;
drv[i].playback.channels_min = 2;
drv[i].playback.channels_max = 2;
+ drv[i].playback.stream_name = rdai[i].playback.name;
rdai[i].playback.info = &info->dai_info[i].playback;
rdai[i].playback.rdai = rdai + i;
rsnd_path_init(priv, &rdai[i], &rdai[i].playback);
}
if (cmod) {
+ snprintf(rdai[i].capture.name, RSND_DAI_NAME_SIZE,
+ "DAI%d Capture", i);
+
drv[i].capture.rates = RSND_RATES;
drv[i].capture.formats = RSND_FMTS;
drv[i].capture.channels_min = 2;
drv[i].capture.channels_max = 2;
+ drv[i].capture.stream_name = rdai[i].capture.name;
rdai[i].capture.info = &info->dai_info[i].capture;
rdai[i].capture.rdai = rdai + i;
@@ -933,6 +728,15 @@ static int rsnd_pcm_open(struct snd_pcm_substream *substream)
static int rsnd_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
+ struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+ int ret;
+
+ ret = rsnd_dai_call(hw_params, io, substream, hw_params);
+ if (ret)
+ return ret;
+
return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
}
@@ -1197,6 +1001,7 @@ static int rsnd_probe(struct platform_device *pdev)
const struct rsnd_of_data *of_data,
struct rsnd_priv *priv) = {
rsnd_gen_probe,
+ rsnd_dma_probe,
rsnd_ssi_probe,
rsnd_src_probe,
rsnd_dvc_probe,
@@ -1290,6 +1095,12 @@ static int rsnd_remove(struct platform_device *pdev)
{
struct rsnd_priv *priv = dev_get_drvdata(&pdev->dev);
struct rsnd_dai *rdai;
+ void (*remove_func[])(struct platform_device *pdev,
+ struct rsnd_priv *priv) = {
+ rsnd_ssi_remove,
+ rsnd_src_remove,
+ rsnd_dvc_remove,
+ };
int ret = 0, i;
pm_runtime_disable(&pdev->dev);
@@ -1299,6 +1110,9 @@ static int rsnd_remove(struct platform_device *pdev)
ret |= rsnd_dai_call(remove, &rdai->capture, priv);
}
+ for (i = 0; i < ARRAY_SIZE(remove_func); i++)
+ remove_func[i](pdev, priv);
+
snd_soc_unregister_component(&pdev->dev);
snd_soc_unregister_platform(&pdev->dev);
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
new file mode 100644
index 000000000000..144308f15fb3
--- /dev/null
+++ b/sound/soc/sh/rcar/dma.c
@@ -0,0 +1,617 @@
+/*
+ * Renesas R-Car Audio DMAC support
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/of_dma.h>
+#include "rsnd.h"
+
+/*
+ * Audio DMAC peri peri register
+ */
+#define PDMASAR 0x00
+#define PDMADAR 0x04
+#define PDMACHCR 0x0c
+
+/* PDMACHCR */
+#define PDMACHCR_DE (1 << 0)
+
+struct rsnd_dma_ctrl {
+ void __iomem *base;
+ int dmapp_num;
+};
+
+#define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
+
+/*
+ * Audio DMAC
+ */
+static void rsnd_dmaen_complete(void *data)
+{
+ struct rsnd_dma *dma = (struct rsnd_dma *)data;
+ struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+
+ /*
+ * Renesas sound Gen1 needs 1 DMAC,
+ * Gen2 needs 2 DMAC.
+ * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
+ * But, Audio-DMAC-peri-peri doesn't have interrupt,
+ * and this driver is assuming that here.
+ *
+ * If Audio-DMAC-peri-peri has interrpt,
+ * rsnd_dai_pointer_update() will be called twice,
+ * ant it will breaks io->byte_pos
+ */
+
+ rsnd_dai_pointer_update(io, io->byte_per_period);
+}
+
+static void rsnd_dmaen_stop(struct rsnd_dma *dma)
+{
+ struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
+
+ dmaengine_terminate_all(dmaen->chan);
+}
+
+static void rsnd_dmaen_start(struct rsnd_dma *dma)
+{
+ struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
+ struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ struct snd_pcm_substream *substream = io->substream;
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct dma_async_tx_descriptor *desc;
+ int is_play = rsnd_io_is_play(io);
+
+ desc = dmaengine_prep_dma_cyclic(dmaen->chan,
+ substream->runtime->dma_addr,
+ snd_pcm_lib_buffer_bytes(substream),
+ snd_pcm_lib_period_bytes(substream),
+ is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!desc) {
+ dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
+ return;
+ }
+
+ desc->callback = rsnd_dmaen_complete;
+ desc->callback_param = dma;
+
+ if (dmaengine_submit(desc) < 0) {
+ dev_err(dev, "dmaengine_submit() fail\n");
+ return;
+ }
+
+ dma_async_issue_pending(dmaen->chan);
+}
+
+struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
+ struct rsnd_mod *mod, char *name)
+{
+ struct dma_chan *chan;
+ struct device_node *np;
+ int i = 0;
+
+ for_each_child_of_node(of_node, np) {
+ if (i == rsnd_mod_id(mod))
+ break;
+ i++;
+ }
+
+ chan = of_dma_request_slave_channel(np, name);
+
+ of_node_put(np);
+ of_node_put(of_node);
+
+ return chan;
+}
+
+static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_mod *mod_from,
+ struct rsnd_mod *mod_to)
+{
+ if ((!mod_from && !mod_to) ||
+ (mod_from && mod_to))
+ return NULL;
+
+ if (mod_from)
+ return rsnd_mod_dma_req(mod_from);
+ else
+ return rsnd_mod_dma_req(mod_to);
+}
+
+static int rsnd_dmaen_init(struct rsnd_priv *priv, struct rsnd_dma *dma, int id,
+ struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
+{
+ struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct dma_slave_config cfg = {};
+ struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ int is_play = rsnd_io_is_play(io);
+ int ret;
+
+ if (dmaen->chan) {
+ dev_err(dev, "it already has dma channel\n");
+ return -EIO;
+ }
+
+ if (dev->of_node) {
+ dmaen->chan = rsnd_dmaen_request_channel(mod_from, mod_to);
+ } else {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
+ (void *)id);
+ }
+ if (IS_ERR_OR_NULL(dmaen->chan)) {
+ dmaen->chan = NULL;
+ dev_err(dev, "can't get dma channel\n");
+ goto rsnd_dma_channel_err;
+ }
+
+ cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ cfg.src_addr = dma->src_addr;
+ cfg.dst_addr = dma->dst_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ dev_dbg(dev, "dma : %pad -> %pad\n",
+ &cfg.src_addr, &cfg.dst_addr);
+
+ ret = dmaengine_slave_config(dmaen->chan, &cfg);
+ if (ret < 0)
+ goto rsnd_dma_init_err;
+
+ return 0;
+
+rsnd_dma_init_err:
+ rsnd_dma_quit(dma);
+rsnd_dma_channel_err:
+
+ /*
+ * DMA failed. try to PIO mode
+ * see
+ * rsnd_ssi_fallback()
+ * rsnd_rdai_continuance_probe()
+ */
+ return -EAGAIN;
+}
+
+static void rsnd_dmaen_quit(struct rsnd_dma *dma)
+{
+ struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
+
+ if (dmaen->chan)
+ dma_release_channel(dmaen->chan);
+
+ dmaen->chan = NULL;
+}
+
+static struct rsnd_dma_ops rsnd_dmaen_ops = {
+ .start = rsnd_dmaen_start,
+ .stop = rsnd_dmaen_stop,
+ .init = rsnd_dmaen_init,
+ .quit = rsnd_dmaen_quit,
+};
+
+/*
+ * Audio DMAC peri peri
+ */
+static const u8 gen2_id_table_ssiu[] = {
+ 0x00, /* SSI00 */
+ 0x04, /* SSI10 */
+ 0x08, /* SSI20 */
+ 0x0c, /* SSI3 */
+ 0x0d, /* SSI4 */
+ 0x0e, /* SSI5 */
+ 0x0f, /* SSI6 */
+ 0x10, /* SSI7 */
+ 0x11, /* SSI8 */
+ 0x12, /* SSI90 */
+};
+static const u8 gen2_id_table_scu[] = {
+ 0x2d, /* SCU_SRCI0 */
+ 0x2e, /* SCU_SRCI1 */
+ 0x2f, /* SCU_SRCI2 */
+ 0x30, /* SCU_SRCI3 */
+ 0x31, /* SCU_SRCI4 */
+ 0x32, /* SCU_SRCI5 */
+ 0x33, /* SCU_SRCI6 */
+ 0x34, /* SCU_SRCI7 */
+ 0x35, /* SCU_SRCI8 */
+ 0x36, /* SCU_SRCI9 */
+};
+static const u8 gen2_id_table_cmd[] = {
+ 0x37, /* SCU_CMD0 */
+ 0x38, /* SCU_CMD1 */
+};
+
+static u32 rsnd_dmapp_get_id(struct rsnd_mod *mod)
+{
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
+ struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+ struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
+ const u8 *entry = NULL;
+ int id = rsnd_mod_id(mod);
+ int size = 0;
+
+ if (mod == ssi) {
+ entry = gen2_id_table_ssiu;
+ size = ARRAY_SIZE(gen2_id_table_ssiu);
+ } else if (mod == src) {
+ entry = gen2_id_table_scu;
+ size = ARRAY_SIZE(gen2_id_table_scu);
+ } else if (mod == dvc) {
+ entry = gen2_id_table_cmd;
+ size = ARRAY_SIZE(gen2_id_table_cmd);
+ }
+
+ if (!entry)
+ return 0xFF;
+
+ if (size <= id)
+ return 0xFF;
+
+ return entry[id];
+}
+
+static u32 rsnd_dmapp_get_chcr(struct rsnd_mod *mod_from,
+ struct rsnd_mod *mod_to)
+{
+ return (rsnd_dmapp_get_id(mod_from) << 24) +
+ (rsnd_dmapp_get_id(mod_to) << 16);
+}
+
+#define rsnd_dmapp_addr(dmac, dma, reg) \
+ (dmac->base + 0x20 + reg + \
+ (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
+static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
+{
+ struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
+
+ iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
+}
+
+static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
+{
+ struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+
+ return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
+}
+
+static void rsnd_dmapp_stop(struct rsnd_dma *dma)
+{
+ int i;
+
+ rsnd_dmapp_write(dma, 0, PDMACHCR);
+
+ for (i = 0; i < 1024; i++) {
+ if (0 == rsnd_dmapp_read(dma, PDMACHCR))
+ return;
+ udelay(1);
+ }
+}
+
+static void rsnd_dmapp_start(struct rsnd_dma *dma)
+{
+ struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
+
+ rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
+ rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
+ rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
+}
+
+static int rsnd_dmapp_init(struct rsnd_priv *priv, struct rsnd_dma *dma, int id,
+ struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
+{
+ struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
+ struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ dmapp->dmapp_id = dmac->dmapp_num;
+ dmapp->chcr = rsnd_dmapp_get_chcr(mod_from, mod_to) | PDMACHCR_DE;
+
+ dmac->dmapp_num++;
+
+ rsnd_dmapp_stop(dma);
+
+ dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
+ dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
+
+ return 0;
+}
+
+static struct rsnd_dma_ops rsnd_dmapp_ops = {
+ .start = rsnd_dmapp_start,
+ .stop = rsnd_dmapp_stop,
+ .init = rsnd_dmapp_init,
+ .quit = rsnd_dmapp_stop,
+};
+
+/*
+ * Common DMAC Interface
+ */
+
+/*
+ * DMA read/write register offset
+ *
+ * RSND_xxx_I_N for Audio DMAC input
+ * RSND_xxx_O_N for Audio DMAC output
+ * RSND_xxx_I_P for Audio DMAC peri peri input
+ * RSND_xxx_O_P for Audio DMAC peri peri output
+ *
+ * ex) R-Car H2 case
+ * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
+ * SSI : 0xec541000 / 0xec241008 / 0xec24100c
+ * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
+ * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
+ * CMD : 0xec500000 / / 0xec008000 0xec308000
+ */
+#define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
+#define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
+
+#define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
+#define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
+
+#define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
+#define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
+
+#define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
+#define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
+
+#define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
+#define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
+
+#define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
+#define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
+
+static dma_addr_t
+rsnd_gen2_dma_addr(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ int is_play, int is_from)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
+ phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
+ int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
+ int use_src = !!rsnd_io_to_mod_src(io);
+ int use_dvc = !!rsnd_io_to_mod_dvc(io);
+ int id = rsnd_mod_id(mod);
+ struct dma_addr {
+ dma_addr_t out_addr;
+ dma_addr_t in_addr;
+ } dma_addrs[3][2][3] = {
+ /* SRC */
+ {{{ 0, 0 },
+ /* Capture */
+ { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
+ { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
+ /* Playback */
+ {{ 0, 0, },
+ { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
+ { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
+ },
+ /* SSI */
+ /* Capture */
+ {{{ RDMA_SSI_O_N(ssi, id), 0 },
+ { RDMA_SSIU_O_P(ssi, id), 0 },
+ { RDMA_SSIU_O_P(ssi, id), 0 } },
+ /* Playback */
+ {{ 0, RDMA_SSI_I_N(ssi, id) },
+ { 0, RDMA_SSIU_I_P(ssi, id) },
+ { 0, RDMA_SSIU_I_P(ssi, id) } }
+ },
+ /* SSIU */
+ /* Capture */
+ {{{ RDMA_SSIU_O_N(ssi, id), 0 },
+ { RDMA_SSIU_O_P(ssi, id), 0 },
+ { RDMA_SSIU_O_P(ssi, id), 0 } },
+ /* Playback */
+ {{ 0, RDMA_SSIU_I_N(ssi, id) },
+ { 0, RDMA_SSIU_I_P(ssi, id) },
+ { 0, RDMA_SSIU_I_P(ssi, id) } } },
+ };
+
+ /* it shouldn't happen */
+ if (use_dvc && !use_src)
+ dev_err(dev, "DVC is selected without SRC\n");
+
+ /* use SSIU or SSI ? */
+ if (is_ssi && rsnd_ssi_use_busif(mod))
+ is_ssi++;
+
+ return (is_from) ?
+ dma_addrs[is_ssi][is_play][use_src + use_dvc].out_addr :
+ dma_addrs[is_ssi][is_play][use_src + use_dvc].in_addr;
+}
+
+static dma_addr_t rsnd_dma_addr(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ int is_play, int is_from)
+{
+ /*
+ * gen1 uses default DMA addr
+ */
+ if (rsnd_is_gen1(priv))
+ return 0;
+
+ if (!mod)
+ return 0;
+
+ return rsnd_gen2_dma_addr(priv, mod, is_play, is_from);
+}
+
+#define MOD_MAX 4 /* MEM/SSI/SRC/DVC */
+static void rsnd_dma_of_path(struct rsnd_dma *dma,
+ int is_play,
+ struct rsnd_mod **mod_from,
+ struct rsnd_mod **mod_to)
+{
+ struct rsnd_mod *this = rsnd_dma_to_mod(dma);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(this);
+ struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
+ struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+ struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
+ struct rsnd_mod *mod[MOD_MAX];
+ int i, index;
+
+
+ for (i = 0; i < MOD_MAX; i++)
+ mod[i] = NULL;
+
+ /*
+ * in play case...
+ *
+ * src -> dst
+ *
+ * mem -> SSI
+ * mem -> SRC -> SSI
+ * mem -> SRC -> DVC -> SSI
+ */
+ mod[0] = NULL; /* for "mem" */
+ index = 1;
+ for (i = 1; i < MOD_MAX; i++) {
+ if (!src) {
+ mod[i] = ssi;
+ } else if (!dvc) {
+ mod[i] = src;
+ src = NULL;
+ } else {
+ if ((!is_play) && (this == src))
+ this = dvc;
+
+ mod[i] = (is_play) ? src : dvc;
+ i++;
+ mod[i] = (is_play) ? dvc : src;
+ src = NULL;
+ dvc = NULL;
+ }
+
+ if (mod[i] == this)
+ index = i;
+
+ if (mod[i] == ssi)
+ break;
+ }
+
+ if (is_play) {
+ *mod_from = mod[index - 1];
+ *mod_to = mod[index];
+ } else {
+ *mod_from = mod[index];
+ *mod_to = mod[index - 1];
+ }
+}
+
+void rsnd_dma_stop(struct rsnd_dma *dma)
+{
+ dma->ops->stop(dma);
+}
+
+void rsnd_dma_start(struct rsnd_dma *dma)
+{
+ dma->ops->start(dma);
+}
+
+void rsnd_dma_quit(struct rsnd_dma *dma)
+{
+ struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+
+ if (!dmac)
+ return;
+
+ dma->ops->quit(dma);
+}
+
+int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma, int id)
+{
+ struct rsnd_mod *mod = rsnd_dma_to_mod(dma);
+ struct rsnd_mod *mod_from;
+ struct rsnd_mod *mod_to;
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+ int is_play = rsnd_io_is_play(io);
+
+ /*
+ * DMA failed. try to PIO mode
+ * see
+ * rsnd_ssi_fallback()
+ * rsnd_rdai_continuance_probe()
+ */
+ if (!dmac)
+ return -EAGAIN;
+
+ rsnd_dma_of_path(dma, is_play, &mod_from, &mod_to);
+
+ dma->src_addr = rsnd_dma_addr(priv, mod_from, is_play, 1);
+ dma->dst_addr = rsnd_dma_addr(priv, mod_to, is_play, 0);
+
+ /* for Gen2 */
+ if (mod_from && mod_to)
+ dma->ops = &rsnd_dmapp_ops;
+ else
+ dma->ops = &rsnd_dmaen_ops;
+
+ /* for Gen1, overwrite */
+ if (rsnd_is_gen1(priv))
+ dma->ops = &rsnd_dmaen_ops;
+
+ return dma->ops->init(priv, dma, id, mod_from, mod_to);
+}
+
+int rsnd_dma_probe(struct platform_device *pdev,
+ const struct rsnd_of_data *of_data,
+ struct rsnd_priv *priv)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_dma_ctrl *dmac;
+ struct resource *res;
+
+ /*
+ * for Gen1
+ */
+ if (rsnd_is_gen1(priv))
+ return 0;
+
+ /*
+ * for Gen2
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
+ dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac || !res) {
+ dev_err(dev, "dma allocate failed\n");
+ return 0; /* it will be PIO mode */
+ }
+
+ dmac->dmapp_num = 0;
+ dmac->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dmac->base))
+ return PTR_ERR(dmac->base);
+
+ priv->dma = dmac;
+
+ return 0;
+}
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index d7f9ed959c4e..e5fcb062ad77 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -24,6 +24,9 @@ struct rsnd_dvc {
struct rsnd_kctrl_cfg_s rdown; /* Ramp Rate Down */
};
+#define rsnd_dvc_of_node(priv) \
+ of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc")
+
#define rsnd_mod_to_dvc(_mod) \
container_of((_mod), struct rsnd_dvc, mod)
@@ -33,7 +36,7 @@ struct rsnd_dvc {
((pos) = (struct rsnd_dvc *)(priv)->dvc + i); \
i++)
-static const char const *dvc_ramp_rate[] = {
+static const char * const dvc_ramp_rate[] = {
"128 dB/1 step", /* 00000 */
"64 dB/1 step", /* 00001 */
"32 dB/1 step", /* 00010 */
@@ -116,17 +119,6 @@ static void rsnd_dvc_volume_update(struct rsnd_mod *mod)
rsnd_mod_write(mod, DVC_DVUER, 1);
}
-static int rsnd_dvc_probe_gen2(struct rsnd_mod *mod,
- struct rsnd_priv *priv)
-{
- struct device *dev = rsnd_priv_to_dev(priv);
-
- dev_dbg(dev, "%s[%d] (Gen2) is probed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
-
- return 0;
-}
-
static int rsnd_dvc_remove_gen2(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
@@ -269,9 +261,17 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
return 0;
}
+static struct dma_chan *rsnd_dvc_dma_req(struct rsnd_mod *mod)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+
+ return rsnd_dma_request_channel(rsnd_dvc_of_node(priv),
+ mod, "tx");
+}
+
static struct rsnd_mod_ops rsnd_dvc_ops = {
.name = DVC_NAME,
- .probe = rsnd_dvc_probe_gen2,
+ .dma_req = rsnd_dvc_dma_req,
.remove = rsnd_dvc_remove_gen2,
.init = rsnd_dvc_init,
.quit = rsnd_dvc_quit,
@@ -333,7 +333,7 @@ int rsnd_dvc_probe(struct platform_device *pdev,
struct rsnd_dvc *dvc;
struct clk *clk;
char name[RSND_DVC_NAME_SIZE];
- int i, nr;
+ int i, nr, ret;
rsnd_of_parse_dvc(pdev, of_data, priv);
@@ -366,11 +366,22 @@ int rsnd_dvc_probe(struct platform_device *pdev,
dvc->info = &info->dvc_info[i];
- rsnd_mod_init(&dvc->mod, &rsnd_dvc_ops,
+ ret = rsnd_mod_init(&dvc->mod, &rsnd_dvc_ops,
clk, RSND_MOD_DVC, i);
-
- dev_dbg(dev, "CMD%d probed\n", i);
+ if (ret)
+ return ret;
}
return 0;
}
+
+void rsnd_dvc_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_dvc *dvc;
+ int i;
+
+ for_each_rsnd_dvc(dvc, priv, i) {
+ rsnd_mod_quit(&dvc->mod);
+ }
+}
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index de0685f2abae..8c7dc51b1c4f 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -28,6 +28,7 @@ struct rsnd_gen {
struct regmap *regmap[RSND_BASE_MAX];
struct regmap_field *regs[RSND_REG_MAX];
+ phys_addr_t res[RSND_REG_MAX];
};
#define rsnd_priv_to_gen(p) ((struct rsnd_gen *)(p)->gen)
@@ -118,11 +119,19 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
mask, data);
}
-#define rsnd_gen_regmap_init(priv, id_size, reg_id, conf) \
- _rsnd_gen_regmap_init(priv, id_size, reg_id, conf, ARRAY_SIZE(conf))
+phys_addr_t rsnd_gen_get_phy_addr(struct rsnd_priv *priv, int reg_id)
+{
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+ return gen->res[reg_id];
+}
+
+#define rsnd_gen_regmap_init(priv, id_size, reg_id, name, conf) \
+ _rsnd_gen_regmap_init(priv, id_size, reg_id, name, conf, ARRAY_SIZE(conf))
static int _rsnd_gen_regmap_init(struct rsnd_priv *priv,
int id_size,
int reg_id,
+ const char *name,
struct rsnd_regmap_field_conf *conf,
int conf_size)
{
@@ -141,8 +150,11 @@ static int _rsnd_gen_regmap_init(struct rsnd_priv *priv,
regc.reg_bits = 32;
regc.val_bits = 32;
regc.reg_stride = 4;
+ regc.name = name;
- res = platform_get_resource(pdev, IORESOURCE_MEM, reg_id);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, reg_id);
if (!res)
return -ENODEV;
@@ -156,6 +168,7 @@ static int _rsnd_gen_regmap_init(struct rsnd_priv *priv,
gen->base[reg_id] = base;
gen->regmap[reg_id] = regmap;
+ gen->res[reg_id] = res->start;
for (i = 0; i < conf_size; i++) {
@@ -176,125 +189,11 @@ static int _rsnd_gen_regmap_init(struct rsnd_priv *priv,
}
/*
- * DMA read/write register offset
- *
- * RSND_xxx_I_N for Audio DMAC input
- * RSND_xxx_O_N for Audio DMAC output
- * RSND_xxx_I_P for Audio DMAC peri peri input
- * RSND_xxx_O_P for Audio DMAC peri peri output
- *
- * ex) R-Car H2 case
- * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
- * SSI : 0xec541000 / 0xec241008 / 0xec24100c
- * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
- * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
- * CMD : 0xec500000 / / 0xec008000 0xec308000
- */
-#define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
-#define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
-
-#define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
-#define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
-
-#define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
-#define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
-
-#define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
-#define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
-
-#define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
-#define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
-
-#define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
-#define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
-
-static dma_addr_t
-rsnd_gen2_dma_addr(struct rsnd_priv *priv,
- struct rsnd_mod *mod,
- int is_play, int is_from)
-{
- struct platform_device *pdev = rsnd_priv_to_pdev(priv);
- struct device *dev = rsnd_priv_to_dev(priv);
- struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
- dma_addr_t ssi_reg = platform_get_resource(pdev,
- IORESOURCE_MEM, RSND_GEN2_SSI)->start;
- dma_addr_t src_reg = platform_get_resource(pdev,
- IORESOURCE_MEM, RSND_GEN2_SCU)->start;
- int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
- int use_src = !!rsnd_io_to_mod_src(io);
- int use_dvc = !!rsnd_io_to_mod_dvc(io);
- int id = rsnd_mod_id(mod);
- struct dma_addr {
- dma_addr_t out_addr;
- dma_addr_t in_addr;
- } dma_addrs[3][2][3] = {
- /* SRC */
- {{{ 0, 0 },
- /* Capture */
- { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
- { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
- /* Playback */
- {{ 0, 0, },
- { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
- { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
- },
- /* SSI */
- /* Capture */
- {{{ RDMA_SSI_O_N(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 } },
- /* Playback */
- {{ 0, RDMA_SSI_I_N(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) } }
- },
- /* SSIU */
- /* Capture */
- {{{ RDMA_SSIU_O_N(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 } },
- /* Playback */
- {{ 0, RDMA_SSIU_I_N(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) } } },
- };
-
- /* it shouldn't happen */
- if (use_dvc && !use_src)
- dev_err(dev, "DVC is selected without SRC\n");
-
- /* use SSIU or SSI ? */
- if (is_ssi && (0 == strcmp(rsnd_mod_dma_name(mod), "ssiu")))
- is_ssi++;
-
- return (is_from) ?
- dma_addrs[is_ssi][is_play][use_src + use_dvc].out_addr :
- dma_addrs[is_ssi][is_play][use_src + use_dvc].in_addr;
-}
-
-dma_addr_t rsnd_gen_dma_addr(struct rsnd_priv *priv,
- struct rsnd_mod *mod,
- int is_play, int is_from)
-{
- /*
- * gen1 uses default DMA addr
- */
- if (rsnd_is_gen1(priv))
- return 0;
-
- if (!mod)
- return 0;
-
- return rsnd_gen2_dma_addr(priv, mod, is_play, is_from);
-}
-
-/*
* Gen2
*/
static int rsnd_gen2_probe(struct platform_device *pdev,
struct rsnd_priv *priv)
{
- struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_regmap_field_conf conf_ssiu[] = {
RSND_GEN_S_REG(SSI_MODE0, 0x800),
RSND_GEN_S_REG(SSI_MODE1, 0x804),
@@ -368,18 +267,16 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
int ret_adg;
int ret_ssi;
- ret_ssiu = rsnd_gen_regmap_init(priv, 10, RSND_GEN2_SSIU, conf_ssiu);
- ret_scu = rsnd_gen_regmap_init(priv, 10, RSND_GEN2_SCU, conf_scu);
- ret_adg = rsnd_gen_regmap_init(priv, 10, RSND_GEN2_ADG, conf_adg);
- ret_ssi = rsnd_gen_regmap_init(priv, 10, RSND_GEN2_SSI, conf_ssi);
+ ret_ssiu = rsnd_gen_regmap_init(priv, 10, RSND_GEN2_SSIU, "ssiu", conf_ssiu);
+ ret_scu = rsnd_gen_regmap_init(priv, 10, RSND_GEN2_SCU, "scu", conf_scu);
+ ret_adg = rsnd_gen_regmap_init(priv, 10, RSND_GEN2_ADG, "adg", conf_adg);
+ ret_ssi = rsnd_gen_regmap_init(priv, 10, RSND_GEN2_SSI, "ssi", conf_ssi);
if (ret_ssiu < 0 ||
ret_scu < 0 ||
ret_adg < 0 ||
ret_ssi < 0)
return ret_ssiu | ret_scu | ret_adg | ret_ssi;
- dev_dbg(dev, "Gen2 is probed\n");
-
return 0;
}
@@ -390,7 +287,6 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
static int rsnd_gen1_probe(struct platform_device *pdev,
struct rsnd_priv *priv)
{
- struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_regmap_field_conf conf_sru[] = {
RSND_GEN_S_REG(SRC_ROUTE_SEL, 0x00),
RSND_GEN_S_REG(SRC_TMG_SEL0, 0x08),
@@ -440,16 +336,14 @@ static int rsnd_gen1_probe(struct platform_device *pdev,
int ret_adg;
int ret_ssi;
- ret_sru = rsnd_gen_regmap_init(priv, 9, RSND_GEN1_SRU, conf_sru);
- ret_adg = rsnd_gen_regmap_init(priv, 9, RSND_GEN1_ADG, conf_adg);
- ret_ssi = rsnd_gen_regmap_init(priv, 9, RSND_GEN1_SSI, conf_ssi);
+ ret_sru = rsnd_gen_regmap_init(priv, 9, RSND_GEN1_SRU, "sru", conf_sru);
+ ret_adg = rsnd_gen_regmap_init(priv, 9, RSND_GEN1_ADG, "adg", conf_adg);
+ ret_ssi = rsnd_gen_regmap_init(priv, 9, RSND_GEN1_SSI, "ssi", conf_ssi);
if (ret_sru < 0 ||
ret_adg < 0 ||
ret_ssi < 0)
return ret_sru | ret_adg | ret_ssi;
- dev_dbg(dev, "Gen1 is probed\n");
-
return 0;
}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index e7914bd610e2..4e6de6804cfb 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -170,21 +170,47 @@ u32 rsnd_get_adinr(struct rsnd_mod *mod);
/*
* R-Car DMA
*/
-struct rsnd_dma {
- struct sh_dmae_slave slave;
+struct rsnd_dma;
+struct rsnd_dma_ops {
+ void (*start)(struct rsnd_dma *dma);
+ void (*stop)(struct rsnd_dma *dma);
+ int (*init)(struct rsnd_priv *priv, struct rsnd_dma *dma, int id,
+ struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
+ void (*quit)(struct rsnd_dma *dma);
+};
+
+struct rsnd_dmaen {
struct dma_chan *chan;
- enum dma_transfer_direction dir;
- dma_addr_t addr;
};
+struct rsnd_dmapp {
+ int dmapp_id;
+ u32 chcr;
+};
+
+struct rsnd_dma {
+ struct rsnd_dma_ops *ops;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ union {
+ struct rsnd_dmaen en;
+ struct rsnd_dmapp pp;
+ } dma;
+};
+#define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
+#define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
+
void rsnd_dma_start(struct rsnd_dma *dma);
void rsnd_dma_stop(struct rsnd_dma *dma);
-int rsnd_dma_available(struct rsnd_dma *dma);
-int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
- int is_play, int id);
-void rsnd_dma_quit(struct rsnd_priv *priv,
- struct rsnd_dma *dma);
+int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma, int id);
+void rsnd_dma_quit(struct rsnd_dma *dma);
+int rsnd_dma_probe(struct platform_device *pdev,
+ const struct rsnd_of_data *of_data,
+ struct rsnd_priv *priv);
+struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
+ struct rsnd_mod *mod, char *name);
+#define rsnd_dma_to_mod(_dma) container_of((_dma), struct rsnd_mod, dma)
/*
* R-Car sound mod
@@ -198,7 +224,7 @@ enum rsnd_mod_type {
struct rsnd_mod_ops {
char *name;
- char* (*dma_name)(struct rsnd_mod *mod);
+ struct dma_chan* (*dma_req)(struct rsnd_mod *mod);
int (*probe)(struct rsnd_mod *mod,
struct rsnd_priv *priv);
int (*remove)(struct rsnd_mod *mod,
@@ -213,6 +239,9 @@ struct rsnd_mod_ops {
struct rsnd_priv *priv);
int (*pcm_new)(struct rsnd_mod *mod,
struct snd_soc_pcm_runtime *rtd);
+ int (*hw_params)(struct rsnd_mod *mod,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params);
int (*fallback)(struct rsnd_mod *mod,
struct rsnd_priv *priv);
};
@@ -236,6 +265,9 @@ struct rsnd_mod {
* 2 0: start 1: stop
* 3 0: pcm_new
* 4 0: fallback
+ *
+ * 31 bit is always called (see __rsnd_mod_call)
+ * 31 0: hw_params
*/
#define __rsnd_mod_shift_probe 0
#define __rsnd_mod_shift_remove 0
@@ -245,6 +277,7 @@ struct rsnd_mod {
#define __rsnd_mod_shift_stop 2
#define __rsnd_mod_shift_pcm_new 3
#define __rsnd_mod_shift_fallback 4
+#define __rsnd_mod_shift_hw_params 31 /* always called */
#define __rsnd_mod_call_probe 0
#define __rsnd_mod_call_remove 1
@@ -254,28 +287,30 @@ struct rsnd_mod {
#define __rsnd_mod_call_stop 1
#define __rsnd_mod_call_pcm_new 0
#define __rsnd_mod_call_fallback 0
+#define __rsnd_mod_call_hw_params 0
#define rsnd_mod_to_priv(mod) (rsnd_io_to_priv(rsnd_mod_to_io(mod)))
#define rsnd_mod_to_dma(mod) (&(mod)->dma)
-#define rsnd_dma_to_mod(_dma) container_of((_dma), struct rsnd_mod, dma)
#define rsnd_mod_to_io(mod) ((mod)->io)
#define rsnd_mod_id(mod) ((mod)->id)
-#define rsnd_mod_hw_start(mod) clk_prepare_enable((mod)->clk)
-#define rsnd_mod_hw_stop(mod) clk_disable_unprepare((mod)->clk)
+#define rsnd_mod_hw_start(mod) clk_enable((mod)->clk)
+#define rsnd_mod_hw_stop(mod) clk_disable((mod)->clk)
-void rsnd_mod_init(struct rsnd_mod *mod,
+int rsnd_mod_init(struct rsnd_mod *mod,
struct rsnd_mod_ops *ops,
struct clk *clk,
enum rsnd_mod_type type,
int id);
+void rsnd_mod_quit(struct rsnd_mod *mod);
char *rsnd_mod_name(struct rsnd_mod *mod);
-char *rsnd_mod_dma_name(struct rsnd_mod *mod);
+struct dma_chan *rsnd_mod_dma_req(struct rsnd_mod *mod);
/*
* R-Car sound DAI
*/
#define RSND_DAI_NAME_SIZE 16
struct rsnd_dai_stream {
+ char name[RSND_DAI_NAME_SIZE];
struct snd_pcm_substream *substream;
struct rsnd_mod *mod[RSND_MOD_MAX];
struct rsnd_dai_path_info *info; /* rcar_snd.h */
@@ -331,9 +366,7 @@ int rsnd_gen_probe(struct platform_device *pdev,
void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
struct rsnd_mod *mod,
enum rsnd_reg reg);
-dma_addr_t rsnd_gen_dma_addr(struct rsnd_priv *priv,
- struct rsnd_mod *mod,
- int is_play, int is_from);
+phys_addr_t rsnd_gen_get_phy_addr(struct rsnd_priv *priv, int reg_id);
#define rsnd_is_gen1(s) (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN1)
#define rsnd_is_gen2(s) (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN2)
@@ -389,6 +422,11 @@ struct rsnd_priv {
void *adg;
/*
+ * below value will be filled on rsnd_dma_probe()
+ */
+ void *dma;
+
+ /*
* below value will be filled on rsnd_ssi_probe()
*/
void *ssi;
@@ -414,19 +452,6 @@ struct rsnd_priv {
#define rsnd_lock(priv, flags) spin_lock_irqsave(&priv->lock, flags)
#define rsnd_unlock(priv, flags) spin_unlock_irqrestore(&priv->lock, flags)
-#define rsnd_info_is_playback(priv, type) \
-({ \
- struct rcar_snd_info *info = rsnd_priv_to_info(priv); \
- int i, is_play = 0; \
- for (i = 0; i < info->dai_info_nr; i++) { \
- if (info->dai_info[i].playback.type == (type)->info) { \
- is_play = 1; \
- break; \
- } \
- } \
- is_play; \
-})
-
/*
* rsnd_kctrl
*/
@@ -480,6 +505,8 @@ int rsnd_kctrl_new_e(struct rsnd_mod *mod,
int rsnd_src_probe(struct platform_device *pdev,
const struct rsnd_of_data *of_data,
struct rsnd_priv *priv);
+void rsnd_src_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv);
struct rsnd_mod *rsnd_src_mod_get(struct rsnd_priv *priv, int id);
unsigned int rsnd_src_get_ssi_rate(struct rsnd_priv *priv,
struct rsnd_dai_stream *io,
@@ -498,9 +525,12 @@ int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod);
int rsnd_ssi_probe(struct platform_device *pdev,
const struct rsnd_of_data *of_data,
struct rsnd_priv *priv);
+void rsnd_ssi_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv);
struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
+int rsnd_ssi_use_busif(struct rsnd_mod *mod);
/*
* R-Car DVC
diff --git a/sound/soc/sh/rcar/rsrc-card.c b/sound/soc/sh/rcar/rsrc-card.c
new file mode 100644
index 000000000000..a68517afe615
--- /dev/null
+++ b/sound/soc/sh/rcar/rsrc-card.c
@@ -0,0 +1,512 @@
+/*
+ * Renesas Sampling Rate Convert Sound Card for DPCM
+ *
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * based on ${LINUX}/sound/soc/generic/simple-card.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+struct rsrc_card_of_data {
+ const char *prefix;
+ const struct snd_soc_dapm_route *routes;
+ int num_routes;
+};
+
+static const struct snd_soc_dapm_route routes_ssi0_ak4642[] = {
+ {"ak4642 Playback", NULL, "DAI0 Playback"},
+ {"DAI0 Capture", NULL, "ak4642 Capture"},
+};
+
+static const struct rsrc_card_of_data routes_of_ssi0_ak4642 = {
+ .prefix = "ak4642",
+ .routes = routes_ssi0_ak4642,
+ .num_routes = ARRAY_SIZE(routes_ssi0_ak4642),
+};
+
+static const struct of_device_id rsrc_card_of_match[] = {
+ { .compatible = "renesas,rsrc-card,lager", .data = &routes_of_ssi0_ak4642 },
+ { .compatible = "renesas,rsrc-card,koelsch", .data = &routes_of_ssi0_ak4642 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rsrc_card_of_match);
+
+struct rsrc_card_dai {
+ const char *name;
+ unsigned int fmt;
+ unsigned int sysclk;
+ struct clk *clk;
+};
+
+#define RSRC_FB_NUM 2 /* FE/BE */
+#define IDX_CPU 0
+#define IDX_CODEC 1
+struct rsrc_card_priv {
+ struct snd_soc_card snd_card;
+ struct rsrc_card_dai_props {
+ struct rsrc_card_dai cpu_dai;
+ struct rsrc_card_dai codec_dai;
+ } dai_props[RSRC_FB_NUM];
+ struct snd_soc_codec_conf codec_conf;
+ struct snd_soc_dai_link dai_link[RSRC_FB_NUM];
+ u32 convert_rate;
+};
+
+#define rsrc_priv_to_dev(priv) ((priv)->snd_card.dev)
+#define rsrc_priv_to_link(priv, i) ((priv)->snd_card.dai_link + i)
+#define rsrc_priv_to_props(priv, i) ((priv)->dai_props + i)
+#define rsrc_dev_to_of_data(dev) (of_match_device(rsrc_card_of_match, (dev))->data)
+
+static int rsrc_card_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct rsrc_card_dai_props *dai_props =
+ &priv->dai_props[rtd - rtd->card->rtd];
+ int ret;
+
+ ret = clk_prepare_enable(dai_props->cpu_dai.clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dai_props->codec_dai.clk);
+ if (ret)
+ clk_disable_unprepare(dai_props->cpu_dai.clk);
+
+ return ret;
+}
+
+static void rsrc_card_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct rsrc_card_dai_props *dai_props =
+ &priv->dai_props[rtd - rtd->card->rtd];
+
+ clk_disable_unprepare(dai_props->cpu_dai.clk);
+
+ clk_disable_unprepare(dai_props->codec_dai.clk);
+}
+
+static struct snd_soc_ops rsrc_card_ops = {
+ .startup = rsrc_card_startup,
+ .shutdown = rsrc_card_shutdown,
+};
+
+static int __rsrc_card_dai_init(struct snd_soc_dai *dai,
+ struct rsrc_card_dai *set)
+{
+ int ret;
+
+ if (set->fmt) {
+ ret = snd_soc_dai_set_fmt(dai, set->fmt);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dai->dev, "set_fmt error\n");
+ goto err;
+ }
+ }
+
+ if (set->sysclk) {
+ ret = snd_soc_dai_set_sysclk(dai, 0, set->sysclk, 0);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dai->dev, "set_sysclk error\n");
+ goto err;
+ }
+ }
+
+ ret = 0;
+
+err:
+ return ret;
+}
+
+static int rsrc_card_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *codec = rtd->codec_dai;
+ struct snd_soc_dai *cpu = rtd->cpu_dai;
+ struct rsrc_card_dai_props *dai_props;
+ int num, ret;
+
+ num = rtd - rtd->card->rtd;
+ dai_props = &priv->dai_props[num];
+ ret = __rsrc_card_dai_init(codec, &dai_props->codec_dai);
+ if (ret < 0)
+ return ret;
+
+ ret = __rsrc_card_dai_init(cpu, &dai_props->cpu_dai);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rsrc_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ if (!priv->convert_rate)
+ return 0;
+
+ rate->min = rate->max = priv->convert_rate;
+
+ return 0;
+}
+
+static int
+rsrc_card_sub_parse_of(struct rsrc_card_priv *priv,
+ struct device_node *np,
+ struct rsrc_card_dai *dai,
+ struct snd_soc_dai_link *dai_link,
+ int *args_count)
+{
+ struct device *dev = rsrc_priv_to_dev(priv);
+ const struct rsrc_card_of_data *of_data = rsrc_dev_to_of_data(dev);
+ struct of_phandle_args args;
+ struct device_node **p_node;
+ struct clk *clk;
+ const char **dai_name;
+ const char **name;
+ u32 val;
+ int ret;
+
+ if (args_count) {
+ p_node = &dai_link->cpu_of_node;
+ dai_name = &dai_link->cpu_dai_name;
+ name = &dai_link->cpu_name;
+ } else {
+ p_node = &dai_link->codec_of_node;
+ dai_name = &dai_link->codec_dai_name;
+ name = &dai_link->codec_name;
+ }
+
+ if (!np) {
+ /* use snd-soc-dummy */
+ *p_node = NULL;
+ *dai_name = "snd-soc-dummy-dai";
+ *name = "snd-soc-dummy";
+ return 0;
+ }
+
+ /*
+ * Get node via "sound-dai = <&phandle port>"
+ * it will be used as xxx_of_node on soc_bind_dai_link()
+ */
+ ret = of_parse_phandle_with_args(np, "sound-dai",
+ "#sound-dai-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ *p_node = args.np;
+
+ /* Get dai->name */
+ ret = snd_soc_of_get_dai_name(np, dai_name);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * FIXME
+ *
+ * rsrc assumes DPCM playback/capture
+ */
+ dai_link->dpcm_playback = 1;
+ dai_link->dpcm_capture = 1;
+
+ if (args_count) {
+ *args_count = args.args_count;
+ dai_link->dynamic = 1;
+ } else {
+ dai_link->no_pcm = 1;
+ priv->codec_conf.of_node = (*p_node);
+ priv->codec_conf.name_prefix = of_data->prefix;
+ }
+
+ /*
+ * Parse dai->sysclk come from "clocks = <&xxx>"
+ * (if system has common clock)
+ * or "system-clock-frequency = <xxx>"
+ * or device's module clock.
+ */
+ if (of_property_read_bool(np, "clocks")) {
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+
+ dai->sysclk = clk_get_rate(clk);
+ dai->clk = clk;
+ } else if (!of_property_read_u32(np, "system-clock-frequency", &val)) {
+ dai->sysclk = val;
+ } else {
+ clk = of_clk_get(args.np, 0);
+ if (!IS_ERR(clk))
+ dai->sysclk = clk_get_rate(clk);
+ }
+
+ return 0;
+}
+
+static int rsrc_card_parse_daifmt(struct device_node *node,
+ struct rsrc_card_priv *priv,
+ struct device_node *codec,
+ int idx)
+{
+ struct device_node *bitclkmaster = NULL;
+ struct device_node *framemaster = NULL;
+ struct rsrc_card_dai_props *dai_props = rsrc_priv_to_props(priv, idx);
+ struct rsrc_card_dai *cpu_dai = &dai_props->cpu_dai;
+ struct rsrc_card_dai *codec_dai = &dai_props->codec_dai;
+ unsigned int daifmt;
+
+ daifmt = snd_soc_of_parse_daifmt(node, NULL,
+ &bitclkmaster, &framemaster);
+ daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+
+ if (!bitclkmaster && !framemaster)
+ return -EINVAL;
+
+ if (codec == bitclkmaster)
+ daifmt |= (codec == framemaster) ?
+ SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
+ else
+ daifmt |= (codec == framemaster) ?
+ SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
+
+ cpu_dai->fmt = daifmt;
+ codec_dai->fmt = daifmt;
+
+ of_node_put(bitclkmaster);
+ of_node_put(framemaster);
+
+ return 0;
+}
+
+static int rsrc_card_dai_link_of(struct device_node *node,
+ struct rsrc_card_priv *priv,
+ int idx)
+{
+ struct device *dev = rsrc_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
+ struct rsrc_card_dai_props *dai_props = rsrc_priv_to_props(priv, idx);
+ struct device_node *cpu = NULL;
+ struct device_node *codec = NULL;
+ char *name;
+ char prop[128];
+ int ret, cpu_args;
+
+ cpu = of_get_child_by_name(node, "cpu");
+ codec = of_get_child_by_name(node, "codec");
+
+ if (!cpu || !codec) {
+ ret = -EINVAL;
+ dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop);
+ goto dai_link_of_err;
+ }
+
+ ret = rsrc_card_parse_daifmt(node, priv, codec, idx);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = rsrc_card_sub_parse_of(priv, (idx == IDX_CPU) ? cpu : NULL,
+ &dai_props->cpu_dai,
+ dai_link,
+ &cpu_args);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = rsrc_card_sub_parse_of(priv, (idx == IDX_CODEC) ? codec : NULL,
+ &dai_props->codec_dai,
+ dai_link,
+ NULL);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ if (!dai_link->cpu_dai_name || !dai_link->codec_dai_name) {
+ ret = -EINVAL;
+ goto dai_link_of_err;
+ }
+
+ /* Simple Card assumes platform == cpu */
+ dai_link->platform_of_node = dai_link->cpu_of_node;
+
+ /* DAI link name is created from CPU/CODEC dai name */
+ name = devm_kzalloc(dev,
+ strlen(dai_link->cpu_dai_name) +
+ strlen(dai_link->codec_dai_name) + 2,
+ GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto dai_link_of_err;
+ }
+
+ sprintf(name, "%s-%s", dai_link->cpu_dai_name,
+ dai_link->codec_dai_name);
+ dai_link->name = dai_link->stream_name = name;
+ dai_link->ops = &rsrc_card_ops;
+ dai_link->init = rsrc_card_dai_init;
+
+ if (idx == IDX_CODEC)
+ dai_link->be_hw_params_fixup = rsrc_card_be_hw_params_fixup;
+
+ dev_dbg(dev, "\tname : %s\n", dai_link->stream_name);
+ dev_dbg(dev, "\tcpu : %s / %04x / %d\n",
+ dai_link->cpu_dai_name,
+ dai_props->cpu_dai.fmt,
+ dai_props->cpu_dai.sysclk);
+ dev_dbg(dev, "\tcodec : %s / %04x / %d\n",
+ dai_link->codec_dai_name,
+ dai_props->codec_dai.fmt,
+ dai_props->codec_dai.sysclk);
+
+ /*
+ * In soc_bind_dai_link() will check cpu name after
+ * of_node matching if dai_link has cpu_dai_name.
+ * but, it will never match if name was created by
+ * fmt_single_name() remove cpu_dai_name if cpu_args
+ * was 0. See:
+ * fmt_single_name()
+ * fmt_multiple_name()
+ */
+ if (!cpu_args)
+ dai_link->cpu_dai_name = NULL;
+
+dai_link_of_err:
+ of_node_put(cpu);
+ of_node_put(codec);
+
+ return ret;
+}
+
+static int rsrc_card_parse_of(struct device_node *node,
+ struct rsrc_card_priv *priv)
+{
+ struct device *dev = rsrc_priv_to_dev(priv);
+ const struct rsrc_card_of_data *of_data = rsrc_dev_to_of_data(dev);
+ int ret;
+ int i;
+
+ if (!node)
+ return -EINVAL;
+
+ /* Parse the card name from DT */
+ snd_soc_of_parse_card_name(&priv->snd_card, "card-name");
+
+ /* DAPM routes */
+ priv->snd_card.of_dapm_routes = of_data->routes;
+ priv->snd_card.num_of_dapm_routes = of_data->num_routes;
+
+ /* sampling rate convert */
+ of_property_read_u32(node, "convert-rate", &priv->convert_rate);
+
+ dev_dbg(dev, "New rsrc-audio-card: %s (%d)\n",
+ priv->snd_card.name ? priv->snd_card.name : "",
+ priv->convert_rate);
+
+ /* FE/BE */
+ for (i = 0; i < RSRC_FB_NUM; i++) {
+ ret = rsrc_card_dai_link_of(node, priv, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!priv->snd_card.name)
+ priv->snd_card.name = priv->snd_card.dai_link->name;
+
+ return 0;
+}
+
+/* Decrease the reference count of the device nodes */
+static int rsrc_card_unref(struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link *dai_link;
+ int num_links;
+
+ for (num_links = 0, dai_link = card->dai_link;
+ num_links < card->num_links;
+ num_links++, dai_link++) {
+ of_node_put(dai_link->cpu_of_node);
+ of_node_put(dai_link->codec_of_node);
+ }
+ return 0;
+}
+
+static int rsrc_card_probe(struct platform_device *pdev)
+{
+ struct rsrc_card_priv *priv;
+ struct snd_soc_dai_link *dai_link;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ /* Allocate the private data */
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Init snd_soc_card */
+ priv->snd_card.owner = THIS_MODULE;
+ priv->snd_card.dev = dev;
+ dai_link = priv->dai_link;
+ priv->snd_card.dai_link = dai_link;
+ priv->snd_card.num_links = RSRC_FB_NUM;
+ priv->snd_card.codec_conf = &priv->codec_conf;
+ priv->snd_card.num_configs = 1;
+
+ ret = rsrc_card_parse_of(np, priv);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "parse error %d\n", ret);
+ goto err;
+ }
+
+ snd_soc_card_set_drvdata(&priv->snd_card, priv);
+
+ ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card);
+ if (ret >= 0)
+ return ret;
+err:
+ rsrc_card_unref(&priv->snd_card);
+
+ return ret;
+}
+
+static int rsrc_card_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ return rsrc_card_unref(card);
+}
+
+static struct platform_driver rsrc_card = {
+ .driver = {
+ .name = "renesas-src-audio-card",
+ .of_match_table = rsrc_card_of_match,
+ },
+ .probe = rsrc_card_probe,
+ .remove = rsrc_card_remove,
+};
+
+module_platform_driver(rsrc_card);
+
+MODULE_ALIAS("platform:renesas-src-audio-card");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas Sampling Rate Convert Sound Card");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 81c182b4bad5..3beb32eb412a 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -22,16 +22,20 @@
struct rsnd_src {
struct rsnd_src_platform_info *info; /* rcar_snd.h */
struct rsnd_mod mod;
+ struct rsnd_kctrl_cfg_s sen; /* sync convert enable */
+ struct rsnd_kctrl_cfg_s sync; /* sync convert */
+ u32 convert_rate; /* sampling rate convert */
int err;
};
#define RSND_SRC_NAME_SIZE 16
-#define rsnd_src_convert_rate(p) ((p)->info->convert_rate)
+#define rsnd_enable_sync_convert(src) ((src)->sen.val)
+#define rsnd_src_of_node(priv) \
+ of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,src")
+
#define rsnd_mod_to_src(_mod) \
container_of((_mod), struct rsnd_src, mod)
-#define rsnd_src_dma_available(src) \
- rsnd_dma_available(rsnd_mod_to_dma(&(src)->mod))
#define for_each_rsnd_src(pos, priv, i) \
for ((i) = 0; \
@@ -113,6 +117,17 @@ struct rsnd_src {
/*
* Gen1/Gen2 common functions
*/
+static struct dma_chan *rsnd_src_dma_req(struct rsnd_mod *mod)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ int is_play = rsnd_io_is_play(io);
+
+ return rsnd_dma_request_channel(rsnd_src_of_node(priv),
+ mod,
+ is_play ? "rx" : "tx");
+}
+
int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod,
int use_busif)
{
@@ -220,6 +235,30 @@ int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod)
return 0;
}
+static u32 rsnd_src_convert_rate(struct rsnd_src *src)
+{
+ struct rsnd_mod *mod = &src->mod;
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ u32 convert_rate;
+
+ if (!runtime)
+ return 0;
+
+ if (!rsnd_enable_sync_convert(src))
+ return src->convert_rate;
+
+ convert_rate = src->sync.val;
+
+ if (!convert_rate)
+ convert_rate = src->convert_rate;
+
+ if (!convert_rate)
+ convert_rate = runtime->rate;
+
+ return convert_rate;
+}
+
unsigned int rsnd_src_get_ssi_rate(struct rsnd_priv *priv,
struct rsnd_dai_stream *io,
struct snd_pcm_runtime *runtime)
@@ -276,7 +315,43 @@ static int rsnd_src_set_convert_rate(struct rsnd_mod *mod)
return 0;
}
-static int rsnd_src_init(struct rsnd_mod *mod)
+static int rsnd_src_hw_params(struct rsnd_mod *mod,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *fe_params)
+{
+ struct rsnd_src *src = rsnd_mod_to_src(mod);
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+
+ /* default value (mainly for non-DT) */
+ src->convert_rate = src->info->convert_rate;
+
+ /*
+ * SRC assumes that it is used under DPCM if user want to use
+ * sampling rate convert. Then, SRC should be FE.
+ * And then, this function will be called *after* BE settings.
+ * this means, each BE already has fixuped hw_params.
+ * see
+ * dpcm_fe_dai_hw_params()
+ * dpcm_be_dai_hw_params()
+ */
+ if (fe->dai_link->dynamic) {
+ int stream = substream->stream;
+ struct snd_soc_dpcm *dpcm;
+ struct snd_pcm_hw_params *be_params;
+
+ list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ be_params = &dpcm->hw_params;
+
+ if (params_rate(fe_params) != params_rate(be_params))
+ src->convert_rate = params_rate(be_params);
+ }
+ }
+
+ return 0;
+}
+
+static int rsnd_src_init(struct rsnd_mod *mod,
+ struct rsnd_priv *priv)
{
struct rsnd_src *src = rsnd_mod_to_src(mod);
@@ -284,6 +359,9 @@ static int rsnd_src_init(struct rsnd_mod *mod)
src->err = 0;
+ /* reset sync convert_rate */
+ src->sync.val = 0;
+
/*
* Initialize the operation of the SRC internal circuits
* see rsnd_src_start()
@@ -305,6 +383,11 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
dev_warn(dev, "%s[%d] under/over flow err = %d\n",
rsnd_mod_name(mod), rsnd_mod_id(mod), src->err);
+ src->convert_rate = 0;
+
+ /* reset sync convert_rate */
+ src->sync.val = 0;
+
return 0;
}
@@ -448,23 +531,12 @@ static int rsnd_src_set_convert_rate_gen1(struct rsnd_mod *mod)
return 0;
}
-static int rsnd_src_probe_gen1(struct rsnd_mod *mod,
- struct rsnd_priv *priv)
-{
- struct device *dev = rsnd_priv_to_dev(priv);
-
- dev_dbg(dev, "%s[%d] (Gen1) is probed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
-
- return 0;
-}
-
static int rsnd_src_init_gen1(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
int ret;
- ret = rsnd_src_init(mod);
+ ret = rsnd_src_init(mod, priv);
if (ret < 0)
return ret;
@@ -505,11 +577,12 @@ static int rsnd_src_stop_gen1(struct rsnd_mod *mod,
static struct rsnd_mod_ops rsnd_src_gen1_ops = {
.name = SRC_NAME,
- .probe = rsnd_src_probe_gen1,
+ .dma_req = rsnd_src_dma_req,
.init = rsnd_src_init_gen1,
.quit = rsnd_src_quit,
.start = rsnd_src_start_gen1,
.stop = rsnd_src_stop_gen1,
+ .hw_params = rsnd_src_hw_params,
};
/*
@@ -607,13 +680,17 @@ static irqreturn_t rsnd_src_interrupt_gen2(int irq, void *data)
if (rsnd_src_error_record_gen2(mod)) {
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_src *src = rsnd_mod_to_src(mod);
struct device *dev = rsnd_priv_to_dev(priv);
- _rsnd_src_stop_gen2(mod);
- _rsnd_src_start_gen2(mod);
-
dev_dbg(dev, "%s[%d] restart\n",
rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ _rsnd_src_stop_gen2(mod);
+ if (src->err < 1024)
+ _rsnd_src_start_gen2(mod);
+ else
+ dev_warn(dev, "no more SRC restart\n");
}
return IRQ_HANDLED;
@@ -627,6 +704,7 @@ static int rsnd_src_set_convert_rate_gen2(struct rsnd_mod *mod)
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
struct rsnd_src *src = rsnd_mod_to_src(mod);
u32 convert_rate = rsnd_src_convert_rate(src);
+ u32 cr, route;
uint ratio;
int ret;
@@ -647,13 +725,21 @@ static int rsnd_src_set_convert_rate_gen2(struct rsnd_mod *mod)
if (ret < 0)
return ret;
- rsnd_mod_write(mod, SRC_SRCCR, 0x00011110);
-
+ cr = 0x00011110;
+ route = 0x0;
if (convert_rate) {
- /* Gen1/Gen2 are not compatible */
- rsnd_mod_write(mod, SRC_ROUTE_MODE0, 1);
+ route = 0x1;
+
+ if (rsnd_enable_sync_convert(src)) {
+ cr |= 0x1;
+ route |= rsnd_io_is_play(io) ?
+ (0x1 << 24) : (0x1 << 25);
+ }
}
+ rsnd_mod_write(mod, SRC_SRCCR, cr);
+ rsnd_mod_write(mod, SRC_ROUTE_MODE0, route);
+
switch (rsnd_mod_id(mod)) {
case 5:
case 6:
@@ -708,24 +794,12 @@ static int rsnd_src_probe_gen2(struct rsnd_mod *mod,
IRQF_SHARED,
dev_name(dev), mod);
if (ret)
- goto rsnd_src_probe_gen2_fail;
+ return ret;
}
ret = rsnd_dma_init(priv,
rsnd_mod_to_dma(mod),
- rsnd_info_is_playback(priv, src),
src->info->dma_id);
- if (ret)
- goto rsnd_src_probe_gen2_fail;
-
- dev_dbg(dev, "%s[%d] (Gen2) is probed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
-
- return ret;
-
-rsnd_src_probe_gen2_fail:
- dev_err(dev, "%s[%d] (Gen2) failed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
return ret;
}
@@ -733,7 +807,7 @@ rsnd_src_probe_gen2_fail:
static int rsnd_src_remove_gen2(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
- rsnd_dma_quit(priv, rsnd_mod_to_dma(mod));
+ rsnd_dma_quit(rsnd_mod_to_dma(mod));
return 0;
}
@@ -743,7 +817,7 @@ static int rsnd_src_init_gen2(struct rsnd_mod *mod,
{
int ret;
- ret = rsnd_src_init(mod);
+ ret = rsnd_src_init(mod, priv);
if (ret < 0)
return ret;
@@ -778,14 +852,91 @@ static int rsnd_src_stop_gen2(struct rsnd_mod *mod,
return ret;
}
+static void rsnd_src_reconvert_update(struct rsnd_mod *mod)
+{
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ struct rsnd_src *src = rsnd_mod_to_src(mod);
+ u32 convert_rate = rsnd_src_convert_rate(src);
+ u32 fsrate;
+
+ if (!runtime)
+ return;
+
+ if (!convert_rate)
+ convert_rate = runtime->rate;
+
+ fsrate = 0x0400000 / convert_rate * runtime->rate;
+
+ /* update IFS */
+ rsnd_mod_write(mod, SRC_IFSVR, fsrate);
+}
+
+static int rsnd_src_pcm_new(struct rsnd_mod *mod,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
+ struct rsnd_src *src = rsnd_mod_to_src(mod);
+ int ret;
+
+ /*
+ * enable SRC sync convert if possible
+ */
+
+ /*
+ * Gen1 is not supported
+ */
+ if (rsnd_is_gen1(priv))
+ return 0;
+
+ /*
+ * SRC sync convert needs clock master
+ */
+ if (!rsnd_rdai_is_clk_master(rdai))
+ return 0;
+
+ /*
+ * We can't use SRC sync convert
+ * if it has DVC
+ */
+ if (rsnd_io_to_mod_dvc(io))
+ return 0;
+
+ /*
+ * enable sync convert
+ */
+ ret = rsnd_kctrl_new_s(mod, rtd,
+ rsnd_io_is_play(io) ?
+ "SRC Out Rate Switch" :
+ "SRC In Rate Switch",
+ rsnd_src_reconvert_update,
+ &src->sen, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_kctrl_new_s(mod, rtd,
+ rsnd_io_is_play(io) ?
+ "SRC Out Rate" :
+ "SRC In Rate",
+ rsnd_src_reconvert_update,
+ &src->sync, 192000);
+
+ return ret;
+}
+
static struct rsnd_mod_ops rsnd_src_gen2_ops = {
.name = SRC_NAME,
+ .dma_req = rsnd_src_dma_req,
.probe = rsnd_src_probe_gen2,
.remove = rsnd_src_remove_gen2,
.init = rsnd_src_init_gen2,
.quit = rsnd_src_quit,
.start = rsnd_src_start_gen2,
.stop = rsnd_src_stop_gen2,
+ .hw_params = rsnd_src_hw_params,
+ .pcm_new = rsnd_src_pcm_new,
};
struct rsnd_mod *rsnd_src_mod_get(struct rsnd_priv *priv, int id)
@@ -810,7 +961,7 @@ static void rsnd_of_parse_src(struct platform_device *pdev,
if (!of_data)
return;
- src_node = of_get_child_by_name(dev->of_node, "rcar_sound,src");
+ src_node = rsnd_src_of_node(priv);
if (!src_node)
return;
@@ -850,7 +1001,7 @@ int rsnd_src_probe(struct platform_device *pdev,
struct rsnd_mod_ops *ops;
struct clk *clk;
char name[RSND_SRC_NAME_SIZE];
- int i, nr;
+ int i, nr, ret;
ops = NULL;
if (rsnd_is_gen1(priv))
@@ -890,10 +1041,21 @@ int rsnd_src_probe(struct platform_device *pdev,
src->info = &info->src_info[i];
- rsnd_mod_init(&src->mod, ops, clk, RSND_MOD_SRC, i);
-
- dev_dbg(dev, "SRC%d probed\n", i);
+ ret = rsnd_mod_init(&src->mod, ops, clk, RSND_MOD_SRC, i);
+ if (ret)
+ return ret;
}
return 0;
}
+
+void rsnd_src_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_src *src;
+ int i;
+
+ for_each_rsnd_src(src, priv, i) {
+ rsnd_mod_quit(&src->mod);
+ }
+}
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 9e7b627c08e2..7bb9c087f3dc 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -80,13 +80,13 @@ struct rsnd_ssi {
#define rsnd_mod_to_ssi(_mod) container_of((_mod), struct rsnd_ssi, mod)
#define rsnd_dma_to_ssi(dma) rsnd_mod_to_ssi(rsnd_dma_to_mod(dma))
#define rsnd_ssi_pio_available(ssi) ((ssi)->info->irq > 0)
-#define rsnd_ssi_dma_available(ssi) \
- rsnd_dma_available(rsnd_mod_to_dma(&(ssi)->mod))
#define rsnd_ssi_clk_from_parent(ssi) ((ssi)->parent)
#define rsnd_ssi_mode_flags(p) ((p)->info->flags)
#define rsnd_ssi_dai_id(ssi) ((ssi)->info->dai_id)
+#define rsnd_ssi_of_node(priv) \
+ of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,ssi")
-static int rsnd_ssi_use_busif(struct rsnd_mod *mod)
+int rsnd_ssi_use_busif(struct rsnd_mod *mod)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
@@ -416,11 +416,14 @@ static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
/*
* restart SSI
*/
- rsnd_ssi_stop(mod, priv);
- rsnd_ssi_start(mod, priv);
-
dev_dbg(dev, "%s[%d] restart\n",
rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ rsnd_ssi_stop(mod, priv);
+ if (ssi->err < 1024)
+ rsnd_ssi_start(mod, priv);
+ else
+ dev_warn(dev, "no more SSI restart\n");
}
rsnd_ssi_record_error(ssi, status);
@@ -442,12 +445,6 @@ static int rsnd_ssi_pio_probe(struct rsnd_mod *mod,
rsnd_ssi_interrupt,
IRQF_SHARED,
dev_name(dev), ssi);
- if (ret)
- dev_err(dev, "%s[%d] (PIO) request interrupt failed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
- else
- dev_dbg(dev, "%s[%d] (PIO) is probed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
return ret;
}
@@ -474,23 +471,11 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
IRQF_SHARED,
dev_name(dev), ssi);
if (ret)
- goto rsnd_ssi_dma_probe_fail;
+ return ret;
ret = rsnd_dma_init(
priv, rsnd_mod_to_dma(mod),
- rsnd_info_is_playback(priv, ssi),
dma_id);
- if (ret)
- goto rsnd_ssi_dma_probe_fail;
-
- dev_dbg(dev, "%s[%d] (DMA) is probed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
-
- return ret;
-
-rsnd_ssi_dma_probe_fail:
- dev_err(dev, "%s[%d] (DMA) is failed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
return ret;
}
@@ -502,7 +487,7 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
struct device *dev = rsnd_priv_to_dev(priv);
int irq = ssi->info->irq;
- rsnd_dma_quit(priv, rsnd_mod_to_dma(mod));
+ rsnd_dma_quit(rsnd_mod_to_dma(mod));
/* PIO will request IRQ again */
devm_free_irq(dev, irq, ssi);
@@ -554,14 +539,25 @@ static int rsnd_ssi_dma_stop(struct rsnd_mod *mod,
return 0;
}
-static char *rsnd_ssi_dma_name(struct rsnd_mod *mod)
+static struct dma_chan *rsnd_ssi_dma_req(struct rsnd_mod *mod)
{
- return rsnd_ssi_use_busif(mod) ? "ssiu" : SSI_NAME;
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ int is_play = rsnd_io_is_play(io);
+ char *name;
+
+ if (rsnd_ssi_use_busif(mod))
+ name = is_play ? "rxu" : "txu";
+ else
+ name = is_play ? "rx" : "tx";
+
+ return rsnd_dma_request_channel(rsnd_ssi_of_node(priv),
+ mod, name);
}
static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
.name = SSI_NAME,
- .dma_name = rsnd_ssi_dma_name,
+ .dma_req = rsnd_ssi_dma_req,
.probe = rsnd_ssi_dma_probe,
.remove = rsnd_ssi_dma_remove,
.init = rsnd_ssi_init,
@@ -636,7 +632,7 @@ static void rsnd_of_parse_ssi(struct platform_device *pdev,
if (!of_data)
return;
- node = of_get_child_by_name(dev->of_node, "rcar_sound,ssi");
+ node = rsnd_ssi_of_node(priv);
if (!node)
return;
@@ -697,7 +693,7 @@ int rsnd_ssi_probe(struct platform_device *pdev,
struct clk *clk;
struct rsnd_ssi *ssi;
char name[RSND_SSI_NAME_SIZE];
- int i, nr;
+ int i, nr, ret;
rsnd_of_parse_ssi(pdev, of_data, priv);
@@ -732,10 +728,23 @@ int rsnd_ssi_probe(struct platform_device *pdev,
else if (rsnd_ssi_pio_available(ssi))
ops = &rsnd_ssi_pio_ops;
- rsnd_mod_init(&ssi->mod, ops, clk, RSND_MOD_SSI, i);
+ ret = rsnd_mod_init(&ssi->mod, ops, clk, RSND_MOD_SSI, i);
+ if (ret)
+ return ret;
rsnd_ssi_parent_clk_setup(priv, ssi);
}
return 0;
}
+
+void rsnd_ssi_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_ssi *ssi;
+ int i;
+
+ for_each_rsnd_ssi(ssi, priv, i) {
+ rsnd_mod_quit(&ssi->mod);
+ }
+}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index e5c990889dcc..23732523f87c 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -292,6 +292,9 @@ static const struct file_operations codec_reg_fops = {
static void soc_init_component_debugfs(struct snd_soc_component *component)
{
+ if (!component->card->debugfs_card_root)
+ return;
+
if (component->debugfs_prefix) {
char *name;
@@ -455,6 +458,9 @@ static const struct file_operations platform_list_fops = {
static void soc_init_card_debugfs(struct snd_soc_card *card)
{
+ if (!snd_soc_debugfs_root)
+ return;
+
card->debugfs_card_root = debugfs_create_dir(card->name,
snd_soc_debugfs_root);
if (!card->debugfs_card_root) {
@@ -476,6 +482,34 @@ static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
debugfs_remove_recursive(card->debugfs_card_root);
}
+
+static void snd_soc_debugfs_init(void)
+{
+ snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL);
+ if (IS_ERR(snd_soc_debugfs_root) || !snd_soc_debugfs_root) {
+ pr_warn("ASoC: Failed to create debugfs directory\n");
+ snd_soc_debugfs_root = NULL;
+ return;
+ }
+
+ if (!debugfs_create_file("codecs", 0444, snd_soc_debugfs_root, NULL,
+ &codec_list_fops))
+ pr_warn("ASoC: Failed to create CODEC list debugfs file\n");
+
+ if (!debugfs_create_file("dais", 0444, snd_soc_debugfs_root, NULL,
+ &dai_list_fops))
+ pr_warn("ASoC: Failed to create DAI list debugfs file\n");
+
+ if (!debugfs_create_file("platforms", 0444, snd_soc_debugfs_root, NULL,
+ &platform_list_fops))
+ pr_warn("ASoC: Failed to create platform list debugfs file\n");
+}
+
+static void snd_soc_debugfs_exit(void)
+{
+ debugfs_remove_recursive(snd_soc_debugfs_root);
+}
+
#else
#define soc_init_codec_debugfs NULL
@@ -497,6 +531,15 @@ static inline void soc_init_card_debugfs(struct snd_soc_card *card)
static inline void soc_cleanup_card_debugfs(struct snd_soc_card *card)
{
}
+
+static inline void snd_soc_debugfs_init(void)
+{
+}
+
+static inline void snd_soc_debugfs_exit(void)
+{
+}
+
#endif
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
@@ -595,15 +638,9 @@ int snd_soc_suspend(struct device *dev)
cpu_dai->driver->suspend(cpu_dai);
}
- /* close any waiting streams and save state */
- for (i = 0; i < card->num_rtd; i++) {
- struct snd_soc_dai **codec_dais = card->rtd[i].codec_dais;
+ /* close any waiting streams */
+ for (i = 0; i < card->num_rtd; i++)
flush_delayed_work(&card->rtd[i].delayed_work);
- for (j = 0; j < card->rtd[i].num_codecs; j++) {
- codec_dais[j]->codec->dapm.suspend_bias_level =
- codec_dais[j]->codec->dapm.bias_level;
- }
- }
for (i = 0; i < card->num_rtd; i++) {
@@ -1261,7 +1298,8 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
capture_w = cpu_dai->capture_widget;
if (play_w && capture_w) {
ret = snd_soc_dapm_new_pcm(card, dai_link->params,
- capture_w, play_w);
+ dai_link->num_params, capture_w,
+ play_w);
if (ret != 0) {
dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n",
play_w->name, capture_w->name, ret);
@@ -1273,7 +1311,8 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
capture_w = codec_dai->capture_widget;
if (play_w && capture_w) {
ret = snd_soc_dapm_new_pcm(card, dai_link->params,
- capture_w, play_w);
+ dai_link->num_params, capture_w,
+ play_w);
if (ret != 0) {
dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n",
play_w->name, capture_w->name, ret);
@@ -1322,21 +1361,17 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
}
}
+ if (dai_link->dai_fmt)
+ snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
+
ret = soc_post_component_init(rtd, dai_link->name);
if (ret)
return ret;
#ifdef CONFIG_DEBUG_FS
/* add DPCM sysfs entries */
- if (dai_link->dynamic) {
- ret = soc_dpcm_debugfs_add(rtd);
- if (ret < 0) {
- dev_err(rtd->dev,
- "ASoC: failed to add dpcm sysfs entries: %d\n",
- ret);
- return ret;
- }
- }
+ if (dai_link->dynamic)
+ soc_dpcm_debugfs_add(rtd);
#endif
if (cpu_dai->driver->compress_dai) {
@@ -1426,7 +1461,6 @@ static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
/* unregister the rtd device */
if (rtd->dev_registered) {
- device_remove_file(rtd->dev, &dev_attr_codec_reg);
device_unregister(rtd->dev);
rtd->dev_registered = 0;
}
@@ -1560,6 +1594,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
goto base_error;
}
+ soc_init_card_debugfs(card);
+
card->dapm.bias_level = SND_SOC_BIAS_OFF;
card->dapm.dev = card->dev;
card->dapm.card = card;
@@ -1578,6 +1614,10 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
snd_soc_dapm_new_controls(&card->dapm, card->dapm_widgets,
card->num_dapm_widgets);
+ if (card->of_dapm_widgets)
+ snd_soc_dapm_new_controls(&card->dapm, card->of_dapm_widgets,
+ card->num_of_dapm_widgets);
+
/* initialise the sound card only once */
if (card->probe) {
ret = card->probe(card);
@@ -1633,11 +1673,9 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
card->num_dapm_routes);
- for (i = 0; i < card->num_links; i++) {
- if (card->dai_link[i].dai_fmt)
- snd_soc_runtime_set_dai_fmt(&card->rtd[i],
- card->dai_link[i].dai_fmt);
- }
+ if (card->of_dapm_routes)
+ snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes,
+ card->num_of_dapm_routes);
snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname),
"%s", card->name);
@@ -1694,6 +1732,7 @@ card_probe_error:
if (card->remove)
card->remove(card);
+ soc_cleanup_card_debugfs(card);
snd_card_free(card->snd_card);
base_error:
@@ -2372,8 +2411,6 @@ int snd_soc_register_card(struct snd_soc_card *card)
snd_soc_initialize_card_lists(card);
- soc_init_card_debugfs(card);
-
card->rtd = devm_kzalloc(card->dev,
sizeof(struct snd_soc_pcm_runtime) *
(card->num_links + card->num_aux_devs),
@@ -2404,7 +2441,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
ret = snd_soc_instantiate_card(card);
if (ret != 0)
- soc_cleanup_card_debugfs(card);
+ return ret;
/* deactivate pins to sleep state */
for (i = 0; i < card->num_rtd; i++) {
@@ -2755,7 +2792,7 @@ int snd_soc_register_component(struct device *dev,
ret = snd_soc_register_dais(cmpnt, dai_drv, num_dai, true);
if (ret < 0) {
- dev_err(dev, "ASoC: Failed to regster DAIs: %d\n", ret);
+ dev_err(dev, "ASoC: Failed to register DAIs: %d\n", ret);
goto err_cleanup;
}
@@ -3076,7 +3113,7 @@ int snd_soc_register_codec(struct device *dev,
ret = snd_soc_register_dais(&codec->component, dai_drv, num_dai, false);
if (ret < 0) {
- dev_err(dev, "ASoC: Failed to regster DAIs: %d\n", ret);
+ dev_err(dev, "ASoC: Failed to register DAIs: %d\n", ret);
goto err_cleanup;
}
@@ -3242,8 +3279,8 @@ int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
widgets[i].name = wname;
}
- card->dapm_widgets = widgets;
- card->num_dapm_widgets = num_widgets;
+ card->of_dapm_widgets = widgets;
+ card->num_of_dapm_widgets = num_widgets;
return 0;
}
@@ -3327,8 +3364,8 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
}
}
- card->num_dapm_routes = num_routes;
- card->dapm_routes = routes;
+ card->num_of_dapm_routes = num_routes;
+ card->of_dapm_routes = routes;
return 0;
}
@@ -3587,26 +3624,7 @@ EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_codecs);
static int __init snd_soc_init(void)
{
-#ifdef CONFIG_DEBUG_FS
- snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL);
- if (IS_ERR(snd_soc_debugfs_root) || !snd_soc_debugfs_root) {
- pr_warn("ASoC: Failed to create debugfs directory\n");
- snd_soc_debugfs_root = NULL;
- }
-
- if (!debugfs_create_file("codecs", 0444, snd_soc_debugfs_root, NULL,
- &codec_list_fops))
- pr_warn("ASoC: Failed to create CODEC list debugfs file\n");
-
- if (!debugfs_create_file("dais", 0444, snd_soc_debugfs_root, NULL,
- &dai_list_fops))
- pr_warn("ASoC: Failed to create DAI list debugfs file\n");
-
- if (!debugfs_create_file("platforms", 0444, snd_soc_debugfs_root, NULL,
- &platform_list_fops))
- pr_warn("ASoC: Failed to create platform list debugfs file\n");
-#endif
-
+ snd_soc_debugfs_init();
snd_soc_util_init();
return platform_driver_register(&soc_driver);
@@ -3616,9 +3634,9 @@ module_init(snd_soc_init);
static void __exit snd_soc_exit(void)
{
snd_soc_util_exit();
+ snd_soc_debugfs_exit();
#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(snd_soc_debugfs_root);
#endif
platform_driver_unregister(&soc_driver);
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index b6f88202b8c9..defe0f0082b5 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -473,16 +473,6 @@ struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_dapm);
-/**
- * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
- * @kcontrol: The kcontrol
- */
-struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol)
-{
- return snd_soc_dapm_to_codec(snd_soc_dapm_kcontrol_dapm(kcontrol));
-}
-EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_codec);
-
static void dapm_reset(struct snd_soc_card *card)
{
struct snd_soc_dapm_widget *w;
@@ -853,6 +843,36 @@ static int dapm_new_pga(struct snd_soc_dapm_widget *w)
return 0;
}
+/* create new dapm dai link control */
+static int dapm_new_dai_link(struct snd_soc_dapm_widget *w)
+{
+ int i, ret;
+ struct snd_kcontrol *kcontrol;
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_card *card = dapm->card->snd_card;
+
+ /* create control for links with > 1 config */
+ if (w->num_params <= 1)
+ return 0;
+
+ /* add kcontrol */
+ for (i = 0; i < w->num_kcontrols; i++) {
+ kcontrol = snd_soc_cnew(&w->kcontrol_news[i], w,
+ w->name, NULL);
+ ret = snd_ctl_add(card, kcontrol);
+ if (ret < 0) {
+ dev_err(dapm->dev,
+ "ASoC: failed to add widget %s dapm kcontrol %s: %d\n",
+ w->name, w->kcontrol_news[i].name, ret);
+ return ret;
+ }
+ kcontrol->private_data = w;
+ w->kcontrols[i] = kcontrol;
+ }
+
+ return 0;
+}
+
/* We implement power down on suspend by checking the power state of
* the ALSA card - when we are suspending the ALSA state for the card
* is set to D3.
@@ -1898,6 +1918,9 @@ void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
{
struct dentry *d;
+ if (!parent)
+ return;
+
dapm->debugfs_dapm = debugfs_create_dir("dapm", parent);
if (!dapm->debugfs_dapm) {
@@ -2719,6 +2742,9 @@ int snd_soc_dapm_new_widgets(struct snd_soc_card *card)
case snd_soc_dapm_out_drv:
dapm_new_pga(w);
break;
+ case snd_soc_dapm_dai_link:
+ dapm_new_dai_link(w);
+ break;
default:
break;
}
@@ -3193,7 +3219,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
{
struct snd_soc_dapm_path *source_p, *sink_p;
struct snd_soc_dai *source, *sink;
- const struct snd_soc_pcm_stream *config = w->params;
+ const struct snd_soc_pcm_stream *config = w->params + w->params_select;
struct snd_pcm_substream substream;
struct snd_pcm_hw_params *params = NULL;
u64 fmt;
@@ -3285,22 +3311,97 @@ out:
return ret;
}
+static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = w->params_select;
+
+ return 0;
+}
+
+static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
+
+ /* Can't change the config when widget is already powered */
+ if (w->power)
+ return -EBUSY;
+
+ if (ucontrol->value.integer.value[0] == w->params_select)
+ return 0;
+
+ if (ucontrol->value.integer.value[0] >= w->num_params)
+ return -EINVAL;
+
+ w->params_select = ucontrol->value.integer.value[0];
+
+ return 0;
+}
+
int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
const struct snd_soc_pcm_stream *params,
+ unsigned int num_params,
struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
struct snd_soc_dapm_widget template;
struct snd_soc_dapm_widget *w;
- size_t len;
char *link_name;
- int ret;
-
- len = strlen(source->name) + strlen(sink->name) + 2;
- link_name = devm_kzalloc(card->dev, len, GFP_KERNEL);
- if (!link_name)
+ int ret, count;
+ unsigned long private_value;
+ const char **w_param_text;
+ struct soc_enum w_param_enum[] = {
+ SOC_ENUM_SINGLE(0, 0, 0, NULL),
+ };
+ struct snd_kcontrol_new kcontrol_dai_link[] = {
+ SOC_ENUM_EXT(NULL, w_param_enum[0],
+ snd_soc_dapm_dai_link_get,
+ snd_soc_dapm_dai_link_put),
+ };
+ const struct snd_soc_pcm_stream *config = params;
+
+ w_param_text = devm_kcalloc(card->dev, num_params,
+ sizeof(char *), GFP_KERNEL);
+ if (!w_param_text)
return -ENOMEM;
- snprintf(link_name, len, "%s-%s", source->name, sink->name);
+
+ link_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-%s",
+ source->name, sink->name);
+ if (!link_name) {
+ ret = -ENOMEM;
+ goto outfree_w_param;
+ }
+
+ for (count = 0 ; count < num_params; count++) {
+ if (!config->stream_name) {
+ dev_warn(card->dapm.dev,
+ "ASoC: anonymous config %d for dai link %s\n",
+ count, link_name);
+ w_param_text[count] =
+ devm_kasprintf(card->dev, GFP_KERNEL,
+ "Anonymous Configuration %d",
+ count);
+ if (!w_param_text[count]) {
+ ret = -ENOMEM;
+ goto outfree_link_name;
+ }
+ } else {
+ w_param_text[count] = devm_kmemdup(card->dev,
+ config->stream_name,
+ strlen(config->stream_name) + 1,
+ GFP_KERNEL);
+ if (!w_param_text[count]) {
+ ret = -ENOMEM;
+ goto outfree_link_name;
+ }
+ }
+ config++;
+ }
+ w_param_enum[0].items = num_params;
+ w_param_enum[0].texts = w_param_text;
memset(&template, 0, sizeof(template));
template.reg = SND_SOC_NOPM;
@@ -3309,6 +3410,30 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
template.event = snd_soc_dai_link_event;
template.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD;
+ template.num_kcontrols = 1;
+ /* duplicate w_param_enum on heap so that memory persists */
+ private_value =
+ (unsigned long) devm_kmemdup(card->dev,
+ (void *)(kcontrol_dai_link[0].private_value),
+ sizeof(struct soc_enum), GFP_KERNEL);
+ if (!private_value) {
+ dev_err(card->dev, "ASoC: Failed to create control for %s widget\n",
+ link_name);
+ ret = -ENOMEM;
+ goto outfree_link_name;
+ }
+ kcontrol_dai_link[0].private_value = private_value;
+ /* duplicate kcontrol_dai_link on heap so that memory persists */
+ template.kcontrol_news =
+ devm_kmemdup(card->dev, &kcontrol_dai_link[0],
+ sizeof(struct snd_kcontrol_new),
+ GFP_KERNEL);
+ if (!template.kcontrol_news) {
+ dev_err(card->dev, "ASoC: Failed to create control for %s widget\n",
+ link_name);
+ ret = -ENOMEM;
+ goto outfree_private_value;
+ }
dev_dbg(card->dev, "ASoC: adding %s widget\n", link_name);
@@ -3316,15 +3441,32 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
if (!w) {
dev_err(card->dev, "ASoC: Failed to create %s widget\n",
link_name);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto outfree_kcontrol_news;
}
w->params = params;
+ w->num_params = num_params;
ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL);
if (ret)
- return ret;
+ goto outfree_w;
return snd_soc_dapm_add_path(&card->dapm, w, sink, NULL, NULL);
+
+outfree_w:
+ devm_kfree(card->dev, w);
+outfree_kcontrol_news:
+ devm_kfree(card->dev, (void *)template.kcontrol_news);
+outfree_private_value:
+ devm_kfree(card->dev, (void *)private_value);
+outfree_link_name:
+ devm_kfree(card->dev, link_name);
+outfree_w_param:
+ for (count = 0 ; count < num_params; count++)
+ devm_kfree(card->dev, (void *)w_param_text[count]);
+ devm_kfree(card->dev, w_param_text);
+
+ return ret;
}
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 4380dcc064a5..9f60c25c4568 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -22,30 +22,42 @@
#include <trace/events/asoc.h>
/**
- * snd_soc_jack_new - Create a new jack
- * @codec: ASoC codec
+ * snd_soc_card_jack_new - Create a new jack
+ * @card: ASoC card
* @id: an identifying string for this jack
* @type: a bitmask of enum snd_jack_type values that can be detected by
* this jack
* @jack: structure to use for the jack
+ * @pins: Array of jack pins to be added to the jack or NULL
+ * @num_pins: Number of elements in the @pins array
*
* Creates a new jack object.
*
* Returns zero if successful, or a negative error code on failure.
* On success jack will be initialised.
*/
-int snd_soc_jack_new(struct snd_soc_codec *codec, const char *id, int type,
- struct snd_soc_jack *jack)
+int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
+ struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins,
+ unsigned int num_pins)
{
+ int ret;
+
mutex_init(&jack->mutex);
- jack->codec = codec;
+ jack->card = card;
INIT_LIST_HEAD(&jack->pins);
INIT_LIST_HEAD(&jack->jack_zones);
BLOCKING_INIT_NOTIFIER_HEAD(&jack->notifier);
- return snd_jack_new(codec->component.card->snd_card, id, type, &jack->jack);
+ ret = snd_jack_new(card->snd_card, id, type, &jack->jack);
+ if (ret)
+ return ret;
+
+ if (num_pins)
+ return snd_soc_jack_add_pins(jack, num_pins, pins);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(snd_soc_jack_new);
+EXPORT_SYMBOL_GPL(snd_soc_card_jack_new);
/**
* snd_soc_jack_report - Report the current status for a jack
@@ -63,7 +75,6 @@ EXPORT_SYMBOL_GPL(snd_soc_jack_new);
*/
void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
{
- struct snd_soc_codec *codec;
struct snd_soc_dapm_context *dapm;
struct snd_soc_jack_pin *pin;
unsigned int sync = 0;
@@ -74,8 +85,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
if (!jack)
return;
- codec = jack->codec;
- dapm = &codec->dapm;
+ dapm = &jack->card->dapm;
mutex_lock(&jack->mutex);
@@ -175,12 +185,12 @@ int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
for (i = 0; i < count; i++) {
if (!pins[i].pin) {
- dev_err(jack->codec->dev, "ASoC: No name for pin %d\n",
+ dev_err(jack->card->dev, "ASoC: No name for pin %d\n",
i);
return -EINVAL;
}
if (!pins[i].mask) {
- dev_err(jack->codec->dev, "ASoC: No mask for pin %d"
+ dev_err(jack->card->dev, "ASoC: No mask for pin %d"
" (%s)\n", i, pins[i].pin);
return -EINVAL;
}
@@ -260,7 +270,7 @@ static void snd_soc_jack_gpio_detect(struct snd_soc_jack_gpio *gpio)
static irqreturn_t gpio_handler(int irq, void *data)
{
struct snd_soc_jack_gpio *gpio = data;
- struct device *dev = gpio->jack->codec->component.card->dev;
+ struct device *dev = gpio->jack->card->dev;
trace_snd_soc_jack_irq(gpio->name);
@@ -299,7 +309,7 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
for (i = 0; i < count; i++) {
if (!gpios[i].name) {
- dev_err(jack->codec->dev,
+ dev_err(jack->card->dev,
"ASoC: No name for gpio at index %d\n", i);
ret = -EINVAL;
goto undo;
@@ -320,7 +330,7 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
} else {
/* legacy GPIO number */
if (!gpio_is_valid(gpios[i].gpio)) {
- dev_err(jack->codec->dev,
+ dev_err(jack->card->dev,
"ASoC: Invalid gpio %d\n",
gpios[i].gpio);
ret = -EINVAL;
@@ -350,7 +360,7 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
if (gpios[i].wake) {
ret = irq_set_irq_wake(gpiod_to_irq(gpios[i].desc), 1);
if (ret != 0)
- dev_err(jack->codec->dev,
+ dev_err(jack->card->dev,
"ASoC: Failed to mark GPIO at index %d as wake source: %d\n",
i, ret);
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 6b0136e7cb88..35fe58f4fa86 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1097,8 +1097,9 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
stream ? "<-" : "->", be->dai_link->name);
#ifdef CONFIG_DEBUG_FS
- dpcm->debugfs_state = debugfs_create_u32(be->dai_link->name, 0644,
- fe->debugfs_dpcm_root, &dpcm->state);
+ if (fe->debugfs_dpcm_root)
+ dpcm->debugfs_state = debugfs_create_u32(be->dai_link->name, 0644,
+ fe->debugfs_dpcm_root, &dpcm->state);
#endif
return 1;
}
@@ -2511,6 +2512,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
/* DAPM dai link stream work */
INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
+ pcm->nonatomic = rtd->dai_link->nonatomic;
rtd->pcm = pcm;
pcm->private_data = rtd;
@@ -2802,10 +2804,13 @@ static const struct file_operations dpcm_state_fops = {
.llseek = default_llseek,
};
-int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd)
+void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd)
{
if (!rtd->dai_link)
- return 0;
+ return;
+
+ if (!rtd->card->debugfs_card_root)
+ return;
rtd->debugfs_dpcm_root = debugfs_create_dir(rtd->dai_link->name,
rtd->card->debugfs_card_root);
@@ -2813,13 +2818,11 @@ int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd)
dev_dbg(rtd->dev,
"ASoC: Failed to create dpcm debugfs directory %s\n",
rtd->dai_link->name);
- return -EINVAL;
+ return;
}
rtd->debugfs_dpcm_state = debugfs_create_file("state", 0444,
rtd->debugfs_dpcm_root,
rtd, &dpcm_state_fops);
-
- return 0;
}
#endif
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index 769aca2fc5f5..ba272e21a6fa 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -101,16 +101,12 @@ static const struct snd_kcontrol_new tegra_alc5632_controls[] = {
static int tegra_alc5632_asoc_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_codec *codec = codec_dai->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
struct tegra_alc5632 *machine = snd_soc_card_get_drvdata(rtd->card);
- snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET,
- &tegra_alc5632_hs_jack);
- snd_soc_jack_add_pins(&tegra_alc5632_hs_jack,
- ARRAY_SIZE(tegra_alc5632_hs_jack_pins),
- tegra_alc5632_hs_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headset Jack", SND_JACK_HEADSET,
+ &tegra_alc5632_hs_jack,
+ tegra_alc5632_hs_jack_pins,
+ ARRAY_SIZE(tegra_alc5632_hs_jack_pins));
if (gpio_is_valid(machine->gpio_hp_det)) {
tegra_alc5632_hp_jack_gpio.gpio = machine->gpio_hp_det;
@@ -119,7 +115,7 @@ static int tegra_alc5632_asoc_init(struct snd_soc_pcm_runtime *rtd)
&tegra_alc5632_hp_jack_gpio);
}
- snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
+ snd_soc_dapm_force_enable_pin(&rtd->card->dapm, "MICBIAS1");
return 0;
}
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
index af3fb997b752..902da36581d1 100644
--- a/sound/soc/tegra/tegra_max98090.c
+++ b/sound/soc/tegra/tegra_max98090.c
@@ -133,24 +133,26 @@ static const struct snd_soc_dapm_widget tegra_max98090_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphones", NULL),
SND_SOC_DAPM_SPK("Speakers", NULL),
SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
};
static const struct snd_kcontrol_new tegra_max98090_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphones"),
SOC_DAPM_PIN_SWITCH("Speakers"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
};
static int tegra_max98090_asoc_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_codec *codec = codec_dai->codec;
struct tegra_max98090 *machine = snd_soc_card_get_drvdata(rtd->card);
if (gpio_is_valid(machine->gpio_hp_det)) {
- snd_soc_jack_new(codec, "Headphones", SND_JACK_HEADPHONE,
- &tegra_max98090_hp_jack);
- snd_soc_jack_add_pins(&tegra_max98090_hp_jack,
- ARRAY_SIZE(tegra_max98090_hp_jack_pins),
- tegra_max98090_hp_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headphones",
+ SND_JACK_HEADPHONE,
+ &tegra_max98090_hp_jack,
+ tegra_max98090_hp_jack_pins,
+ ARRAY_SIZE(tegra_max98090_hp_jack_pins));
tegra_max98090_hp_jack_gpio.gpio = machine->gpio_hp_det;
snd_soc_jack_add_gpios(&tegra_max98090_hp_jack,
@@ -159,11 +161,11 @@ static int tegra_max98090_asoc_init(struct snd_soc_pcm_runtime *rtd)
}
if (gpio_is_valid(machine->gpio_mic_det)) {
- snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE,
- &tegra_max98090_mic_jack);
- snd_soc_jack_add_pins(&tegra_max98090_mic_jack,
- ARRAY_SIZE(tegra_max98090_mic_jack_pins),
- tegra_max98090_mic_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Mic Jack",
+ SND_JACK_MICROPHONE,
+ &tegra_max98090_mic_jack,
+ tegra_max98090_mic_jack_pins,
+ ARRAY_SIZE(tegra_max98090_mic_jack_pins));
tegra_max98090_mic_jack_gpio.gpio = machine->gpio_mic_det;
snd_soc_jack_add_gpios(&tegra_max98090_mic_jack,
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index ed759a3076b8..773daecaa5e8 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -108,15 +108,11 @@ static const struct snd_kcontrol_new tegra_rt5640_controls[] = {
static int tegra_rt5640_asoc_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_codec *codec = codec_dai->codec;
struct tegra_rt5640 *machine = snd_soc_card_get_drvdata(rtd->card);
- snd_soc_jack_new(codec, "Headphones", SND_JACK_HEADPHONE,
- &tegra_rt5640_hp_jack);
- snd_soc_jack_add_pins(&tegra_rt5640_hp_jack,
- ARRAY_SIZE(tegra_rt5640_hp_jack_pins),
- tegra_rt5640_hp_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headphones", SND_JACK_HEADPHONE,
+ &tegra_rt5640_hp_jack, tegra_rt5640_hp_jack_pins,
+ ARRAY_SIZE(tegra_rt5640_hp_jack_pins));
if (gpio_is_valid(machine->gpio_hp_det)) {
tegra_rt5640_hp_jack_gpio.gpio = machine->gpio_hp_det;
diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
index e4cf978a6e3a..1470873ecde6 100644
--- a/sound/soc/tegra/tegra_rt5677.c
+++ b/sound/soc/tegra/tegra_rt5677.c
@@ -141,15 +141,11 @@ static const struct snd_kcontrol_new tegra_rt5677_controls[] = {
static int tegra_rt5677_asoc_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_codec *codec = codec_dai->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(rtd->card);
- snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
- &tegra_rt5677_hp_jack);
- snd_soc_jack_add_pins(&tegra_rt5677_hp_jack, 1,
- &tegra_rt5677_hp_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headphone Jack", SND_JACK_HEADPHONE,
+ &tegra_rt5677_hp_jack,
+ &tegra_rt5677_hp_jack_pins, 1);
if (gpio_is_valid(machine->gpio_hp_det)) {
tegra_rt5677_hp_jack_gpio.gpio = machine->gpio_hp_det;
@@ -158,10 +154,9 @@ static int tegra_rt5677_asoc_init(struct snd_soc_pcm_runtime *rtd)
}
- snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE,
- &tegra_rt5677_mic_jack);
- snd_soc_jack_add_pins(&tegra_rt5677_mic_jack, 1,
- &tegra_rt5677_mic_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Mic Jack", SND_JACK_MICROPHONE,
+ &tegra_rt5677_mic_jack,
+ &tegra_rt5677_mic_jack_pins, 1);
if (gpio_is_valid(machine->gpio_mic_present)) {
tegra_rt5677_mic_jack_gpio.gpio = machine->gpio_mic_present;
@@ -169,7 +164,7 @@ static int tegra_rt5677_asoc_init(struct snd_soc_pcm_runtime *rtd)
&tegra_rt5677_mic_jack_gpio);
}
- snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
+ snd_soc_dapm_force_enable_pin(&rtd->card->dapm, "MICBIAS1");
return 0;
}
@@ -331,7 +326,6 @@ static const struct of_device_id tegra_rt5677_of_match[] = {
static struct platform_driver tegra_rt5677_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = tegra_rt5677_of_match,
},
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index e52420dae2b4..21604009bc1a 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -171,31 +171,28 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_codec *codec = codec_dai->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_card *card = rtd->card;
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
if (gpio_is_valid(machine->gpio_hp_det)) {
tegra_wm8903_hp_jack_gpio.gpio = machine->gpio_hp_det;
- snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
- &tegra_wm8903_hp_jack);
- snd_soc_jack_add_pins(&tegra_wm8903_hp_jack,
- ARRAY_SIZE(tegra_wm8903_hp_jack_pins),
- tegra_wm8903_hp_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Headphone Jack",
+ SND_JACK_HEADPHONE, &tegra_wm8903_hp_jack,
+ tegra_wm8903_hp_jack_pins,
+ ARRAY_SIZE(tegra_wm8903_hp_jack_pins));
snd_soc_jack_add_gpios(&tegra_wm8903_hp_jack,
1,
&tegra_wm8903_hp_jack_gpio);
}
- snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE,
- &tegra_wm8903_mic_jack);
- snd_soc_jack_add_pins(&tegra_wm8903_mic_jack,
- ARRAY_SIZE(tegra_wm8903_mic_jack_pins),
- tegra_wm8903_mic_jack_pins);
+ snd_soc_card_jack_new(rtd->card, "Mic Jack", SND_JACK_MICROPHONE,
+ &tegra_wm8903_mic_jack,
+ tegra_wm8903_mic_jack_pins,
+ ARRAY_SIZE(tegra_wm8903_mic_jack_pins));
wm8903_mic_detect(codec, &tegra_wm8903_mic_jack, SND_JACK_MICROPHONE,
0);
- snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
+ snd_soc_dapm_force_enable_pin(&card->dapm, "MICBIAS");
return 0;
}
diff --git a/sound/soc/tegra/tegra_wm9712.c b/sound/soc/tegra/tegra_wm9712.c
index 2868b4839bc0..6492f8143ff1 100644
--- a/sound/soc/tegra/tegra_wm9712.c
+++ b/sound/soc/tegra/tegra_wm9712.c
@@ -46,11 +46,7 @@ static const struct snd_soc_dapm_widget tegra_wm9712_dapm_widgets[] = {
static int tegra_wm9712_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_codec *codec = codec_dai->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- return snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
+ return snd_soc_dapm_force_enable_pin(&rtd->card->dapm, "Mic Bias");
}
static struct snd_soc_dai_link tegra_wm9712_dai = {
diff --git a/sound/soc/ux500/mop500_ab8500.c b/sound/soc/ux500/mop500_ab8500.c
index aa65370db82a..b81a7a4c938b 100644
--- a/sound/soc/ux500/mop500_ab8500.c
+++ b/sound/soc/ux500/mop500_ab8500.c
@@ -362,7 +362,7 @@ struct snd_soc_ops mop500_ab8500_ops[] = {
int mop500_ab8500_machine_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
struct device *dev = rtd->card->dev;
struct mop500_ab8500_drvdata *drvdata;
int ret;
@@ -407,23 +407,23 @@ int mop500_ab8500_machine_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- ret = snd_soc_dapm_disable_pin(&codec->dapm, "Earpiece");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Speaker Left");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Speaker Right");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineOut Left");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineOut Right");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Vibra 1");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Vibra 2");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Mic 1");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Mic 2");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineIn Left");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineIn Right");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 1");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 2");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 3");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 4");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 5");
- ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 6");
+ ret = snd_soc_dapm_disable_pin(dapm, "Earpiece");
+ ret |= snd_soc_dapm_disable_pin(dapm, "Speaker Left");
+ ret |= snd_soc_dapm_disable_pin(dapm, "Speaker Right");
+ ret |= snd_soc_dapm_disable_pin(dapm, "LineOut Left");
+ ret |= snd_soc_dapm_disable_pin(dapm, "LineOut Right");
+ ret |= snd_soc_dapm_disable_pin(dapm, "Vibra 1");
+ ret |= snd_soc_dapm_disable_pin(dapm, "Vibra 2");
+ ret |= snd_soc_dapm_disable_pin(dapm, "Mic 1");
+ ret |= snd_soc_dapm_disable_pin(dapm, "Mic 2");
+ ret |= snd_soc_dapm_disable_pin(dapm, "LineIn Left");
+ ret |= snd_soc_dapm_disable_pin(dapm, "LineIn Right");
+ ret |= snd_soc_dapm_disable_pin(dapm, "DMic 1");
+ ret |= snd_soc_dapm_disable_pin(dapm, "DMic 2");
+ ret |= snd_soc_dapm_disable_pin(dapm, "DMic 3");
+ ret |= snd_soc_dapm_disable_pin(dapm, "DMic 4");
+ ret |= snd_soc_dapm_disable_pin(dapm, "DMic 5");
+ ret |= snd_soc_dapm_disable_pin(dapm, "DMic 6");
return ret;
}
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
index ab37add269ae..82e350e9501c 100644
--- a/sound/synth/emux/emux_oss.c
+++ b/sound/synth/emux/emux_oss.c
@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
if (snd_BUG_ON(!arg || !emu))
return -ENXIO;
- mutex_lock(&emu->register_mutex);
-
- if (!snd_emux_inc_count(emu)) {
- mutex_unlock(&emu->register_mutex);
+ if (!snd_emux_inc_count(emu))
return -EFAULT;
- }
memset(&callback, 0, sizeof(callback));
callback.owner = THIS_MODULE;
@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
if (p == NULL) {
snd_printk(KERN_ERR "can't create port\n");
snd_emux_dec_count(emu);
- mutex_unlock(&emu->register_mutex);
return -ENOMEM;
}
@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
reset_port_mode(p, arg->seq_mode);
snd_emux_reset_port(p);
-
- mutex_unlock(&emu->register_mutex);
return 0;
}
@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
if (snd_BUG_ON(!emu))
return -ENXIO;
- mutex_lock(&emu->register_mutex);
snd_emux_sounds_off_all(p);
snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
snd_seq_event_port_detach(p->chset.client, p->chset.port);
snd_emux_dec_count(emu);
- mutex_unlock(&emu->register_mutex);
return 0;
}
diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
index 7778b8e19782..a0209204ae48 100644
--- a/sound/synth/emux/emux_seq.c
+++ b/sound/synth/emux/emux_seq.c
@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
if (emu->voices)
snd_emux_terminate_all(emu);
- mutex_lock(&emu->register_mutex);
if (emu->client >= 0) {
snd_seq_delete_kernel_client(emu->client);
emu->client = -1;
}
- mutex_unlock(&emu->register_mutex);
}
@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
/*
* increment usage count
*/
-int
-snd_emux_inc_count(struct snd_emux *emu)
+static int
+__snd_emux_inc_count(struct snd_emux *emu)
{
emu->used++;
if (!try_module_get(emu->ops.owner))
@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
return 1;
}
+int snd_emux_inc_count(struct snd_emux *emu)
+{
+ int ret;
+
+ mutex_lock(&emu->register_mutex);
+ ret = __snd_emux_inc_count(emu);
+ mutex_unlock(&emu->register_mutex);
+ return ret;
+}
/*
* decrease usage count
*/
-void
-snd_emux_dec_count(struct snd_emux *emu)
+static void
+__snd_emux_dec_count(struct snd_emux *emu)
{
module_put(emu->card->module);
emu->used--;
@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
module_put(emu->ops.owner);
}
+void snd_emux_dec_count(struct snd_emux *emu)
+{
+ mutex_lock(&emu->register_mutex);
+ __snd_emux_dec_count(emu);
+ mutex_unlock(&emu->register_mutex);
+}
/*
* Routine that is called upon a first use of a particular port
@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
mutex_lock(&emu->register_mutex);
snd_emux_init_port(p);
- snd_emux_inc_count(emu);
+ __snd_emux_inc_count(emu);
mutex_unlock(&emu->register_mutex);
return 0;
}
@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
mutex_lock(&emu->register_mutex);
snd_emux_sounds_off_all(p);
- snd_emux_dec_count(emu);
+ __snd_emux_dec_count(emu);
mutex_unlock(&emu->register_mutex);
return 0;
}
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 8bcc87cf5667..789d19ec035d 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -79,7 +79,10 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
format = 1 << UAC_FORMAT_TYPE_I_PCM;
}
if (format & (1 << UAC_FORMAT_TYPE_I_PCM)) {
- if (chip->usb_id == USB_ID(0x0582, 0x0016) /* Edirol SD-90 */ &&
+ if (((chip->usb_id == USB_ID(0x0582, 0x0016)) ||
+ /* Edirol SD-90 */
+ (chip->usb_id == USB_ID(0x0582, 0x000c))) &&
+ /* Roland SC-D70 */
sample_width == 24 && sample_bytes == 2)
sample_bytes = 3;
else if (sample_width > sample_bytes * 8) {
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 07f984d5f516..2f6d3e9a1bcd 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -816,37 +816,11 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.data = (const struct snd_usb_audio_quirk[]) {
{
.ifnum = 0,
- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
- .data = & (const struct audioformat) {
- .formats = SNDRV_PCM_FMTBIT_S24_3LE,
- .channels = 2,
- .iface = 0,
- .altsetting = 1,
- .altset_idx = 1,
- .attributes = 0,
- .endpoint = 0x01,
- .ep_attr = 0x01,
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
- .rate_min = 44100,
- .rate_max = 44100,
- }
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
},
{
.ifnum = 1,
- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
- .data = & (const struct audioformat) {
- .formats = SNDRV_PCM_FMTBIT_S24_3LE,
- .channels = 2,
- .iface = 1,
- .altsetting = 1,
- .altset_idx = 1,
- .attributes = 0,
- .endpoint = 0x81,
- .ep_attr = 0x01,
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
- .rate_min = 44100,
- .rate_max = 44100,
- }
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
},
{
.ifnum = 2,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 9a28365126f9..7c5a70139278 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1115,6 +1115,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
{
/* devices which do not support reading the sample rate. */
switch (chip->usb_id) {
+ case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
return true;
@@ -1125,17 +1126,24 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
/* Marantz/Denon USB DACs need a vendor cmd to switch
* between PCM and native DSD mode
*/
+static bool is_marantz_denon_dac(unsigned int id)
+{
+ switch (id) {
+ case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
+ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
+ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
+ return true;
+ }
+ return false;
+}
+
int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
struct audioformat *fmt)
{
struct usb_device *dev = subs->dev;
int err;
- switch (subs->stream->chip->usb_id) {
- case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
- case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
- case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
-
+ if (is_marantz_denon_dac(subs->stream->chip->usb_id)) {
/* First switch to alt set 0, otherwise the mode switch cmd
* will not be accepted by the DAC
*/
@@ -1208,17 +1216,10 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
/* Marantz/Denon devices with USB DAC functionality need a delay
* after each class compliant request
*/
- if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) &&
- (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
-
- switch (le16_to_cpu(dev->descriptor.idProduct)) {
- case 0x1003: /* Denon DA300-USB */
- case 0x3005: /* Marantz HD-DAC1 */
- case 0x3006: /* Marantz SA-14S1 */
- mdelay(20);
- break;
- }
- }
+ if (is_marantz_denon_dac(USB_ID(le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct)))
+ && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(20);
/* Zoom R16/24 needs a tiny delay here, otherwise requests like
* get/set frequency return as failed despite actually succeeding.
@@ -1273,15 +1274,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
}
/* Denon/Marantz devices with USB DAC functionality */
- switch (chip->usb_id) {
- case USB_ID(0x154e, 0x1003): /* Denon DA300-USB */
- case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
- case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
+ if (is_marantz_denon_dac(chip->usb_id)) {
if (fp->altsetting == 2)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
- break;
- default:
- break;
}
return 0;
diff --git a/tools/build/Build.include b/tools/build/Build.include
new file mode 100644
index 000000000000..4c8daaccb82a
--- /dev/null
+++ b/tools/build/Build.include
@@ -0,0 +1,81 @@
+###
+# build: Generic definitions
+#
+# Lots of this code have been borrowed or heavily inspired from parts
+# of kbuild code, which is not credited, but mostly developed by:
+#
+# Copyright (C) Sam Ravnborg <sam@mars.ravnborg.org>, 2015
+# Copyright (C) Linus Torvalds <torvalds@linux-foundation.org>, 2015
+#
+
+###
+# Convenient variables
+comma := ,
+squote := '
+
+###
+# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
+dot-target = $(dir $@).$(notdir $@)
+
+###
+# filename of target with directory and extension stripped
+basetarget = $(basename $(notdir $@))
+
+###
+# The temporary file to save gcc -MD generated dependencies must not
+# contain a comma
+depfile = $(subst $(comma),_,$(dot-target).d)
+
+###
+# Check if both arguments has same arguments. Result is empty string if equal.
+arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
+ $(filter-out $(cmd_$@), $(cmd_$(1))) )
+
+###
+# Escape single quote for use in echo statements
+escsq = $(subst $(squote),'\$(squote)',$1)
+
+# Echo command
+# Short version is used, if $(quiet) equals `quiet_', otherwise full one.
+echo-cmd = $(if $($(quiet)cmd_$(1)),\
+ echo ' $(call escsq,$($(quiet)cmd_$(1)))';)
+
+###
+# Replace >$< with >$$< to preserve $ when reloading the .cmd file
+# (needed for make)
+# Replace >#< with >\#< to avoid starting a comment in the .cmd file
+# (needed for make)
+# Replace >'< with >'\''< to be able to enclose the whole string in '...'
+# (needed for the shell)
+make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
+
+###
+# Find any prerequisites that is newer than target or that does not exist.
+# PHONY targets skipped in both cases.
+any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
+
+###
+# if_changed_dep - execute command if any prerequisite is newer than
+# target, or command line has changed and update
+# dependencies in the cmd file
+if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \
+ @set -e; \
+ $(echo-cmd) $(cmd_$(1)); \
+ cat $(depfile) > $(dot-target).cmd; \
+ printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
+
+# if_changed - execute command if any prerequisite is newer than
+# target, or command line has changed
+if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
+ @set -e; \
+ $(echo-cmd) $(cmd_$(1)); \
+ printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd)
+
+###
+# C flags to be used in rule definitions, includes:
+# - depfile generation
+# - global $(CFLAGS)
+# - per target C flags
+# - per object C flags
+# - BUILD_STR macro to allow '-D"$(variable)"' constructs
+c_flags = -Wp,-MD,$(depfile),-MT,$@ $(CFLAGS) -D"BUILD_STR(s)=\#s" $(CFLAGS_$(basetarget).o) $(CFLAGS_$(obj))
diff --git a/tools/build/Documentation/Build.txt b/tools/build/Documentation/Build.txt
new file mode 100644
index 000000000000..00ad2d608727
--- /dev/null
+++ b/tools/build/Documentation/Build.txt
@@ -0,0 +1,139 @@
+Build Framework
+===============
+
+The perf build framework was adopted from the kernel build system, hence the
+idea and the way how objects are built is the same.
+
+Basically the user provides set of 'Build' files that list objects and
+directories to nest for specific target to be build.
+
+Unlike the kernel we don't have a single build object 'obj-y' list that where
+we setup source objects, but we support more. This allows one 'Build' file to
+carry a sources list for multiple build objects.
+
+a) Build framework makefiles
+----------------------------
+
+The build framework consists of 2 Makefiles:
+
+ Build.include
+ Makefile.build
+
+While the 'Build.include' file contains just some generic definitions, the
+'Makefile.build' file is the makefile used from the outside. It's
+interface/usage is following:
+
+ $ make -f tools/build/Makefile srctree=$(KSRC) dir=$(DIR) obj=$(OBJECT)
+
+where:
+
+ KSRC - is the path to kernel sources
+ DIR - is the path to the project to be built
+ OBJECT - is the name of the build object
+
+When succefully finished the $(DIR) directory contains the final object file
+called $(OBJECT)-in.o:
+
+ $ ls $(DIR)/$(OBJECT)-in.o
+
+which includes all compiled sources described in 'Build' makefiles.
+
+a) Build makefiles
+------------------
+
+The user supplies 'Build' makefiles that contains a objects list, and connects
+the build to nested directories.
+
+Assume we have the following project structure:
+
+ ex/a.c
+ /b.c
+ /c.c
+ /d.c
+ /arch/e.c
+ /arch/f.c
+
+Out of which you build the 'ex' binary ' and the 'libex.a' library:
+
+ 'ex' - consists of 'a.o', 'b.o' and libex.a
+ 'libex.a' - consists of 'c.o', 'd.o', 'e.o' and 'f.o'
+
+The build framework does not create the 'ex' and 'libex.a' binaries for you, it
+only prepares proper objects to be compiled and grouped together.
+
+To follow the above example, the user provides following 'Build' files:
+
+ ex/Build:
+ ex-y += a.o
+ ex-y += b.o
+
+ libex-y += c.o
+ libex-y += d.o
+ libex-y += arch/
+
+ ex/arch/Build:
+ libex-y += e.o
+ libex-y += f.o
+
+and runs:
+
+ $ make -f tools/build/Makefile.build dir=. obj=ex
+ $ make -f tools/build/Makefile.build dir=. obj=libex
+
+which creates the following objects:
+
+ ex/ex-in.o
+ ex/libex-in.o
+
+that contain request objects names in Build files.
+
+It's only a matter of 2 single commands to create the final binaries:
+
+ $ ar rcs libex.a libex-in.o
+ $ gcc -o ex ex-in.o libex.a
+
+You can check the 'ex' example in 'tools/build/tests/ex' for more details.
+
+b) Rules
+--------
+
+The build framework provides standard compilation rules to handle .S and .c
+compilation.
+
+It's possible to include special rule if needed (like we do for flex or bison
+code generation).
+
+c) CFLAGS
+---------
+
+It's possible to alter the standard object C flags in the following way:
+
+ CFLAGS_perf.o += '...' - alters CFLAGS for perf.o object
+ CFLAGS_gtk += '...' - alters CFLAGS for gtk build object
+
+This C flags changes has the scope of the Build makefile they are defined in.
+
+
+d) Dependencies
+---------------
+
+For each built object file 'a.o' the '.a.cmd' is created and holds:
+
+ - Command line used to built that object
+ (for each object)
+
+ - Dependency rules generated by 'gcc -Wp,-MD,...'
+ (for compiled object)
+
+All existing '.cmd' files are included in the Build process to follow properly
+the dependencies and trigger a rebuild when necessary.
+
+
+e) Single rules
+---------------
+
+It's possible to build single object file by choice, like:
+
+ $ make util/map.o # objects
+ $ make util/map.i # preprocessor
+ $ make util/map.s # assembly
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
new file mode 100644
index 000000000000..10df57237a66
--- /dev/null
+++ b/tools/build/Makefile.build
@@ -0,0 +1,130 @@
+###
+# Main build makefile.
+#
+# Lots of this code have been borrowed or heavily inspired from parts
+# of kbuild code, which is not credited, but mostly developed by:
+#
+# Copyright (C) Sam Ravnborg <sam@mars.ravnborg.org>, 2015
+# Copyright (C) Linus Torvalds <torvalds@linux-foundation.org>, 2015
+#
+
+PHONY := __build
+__build:
+
+ifeq ($(V),1)
+ quiet =
+ Q =
+else
+ quiet=quiet_
+ Q=@
+endif
+
+build-dir := $(srctree)/tools/build
+
+# Generic definitions
+include $(build-dir)/Build.include
+
+# do not force detected configuration
+-include .config-detected
+
+# Init all relevant variables used in build files so
+# 1) they have correct type
+# 2) they do not inherit any value from the environment
+subdir-y :=
+obj-y :=
+subdir-y :=
+subdir-obj-y :=
+
+# Build definitions
+build-file := $(dir)/Build
+include $(build-file)
+
+quiet_cmd_flex = FLEX $@
+quiet_cmd_bison = BISON $@
+
+# Create directory unless it exists
+quiet_cmd_mkdir = MKDIR $(dir $@)
+ cmd_mkdir = mkdir -p $(dir $@)
+ rule_mkdir = $(if $(wildcard $(dir $@)),,@$(call echo-cmd,mkdir) $(cmd_mkdir))
+
+# Compile command
+quiet_cmd_cc_o_c = CC $@
+ cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
+
+quiet_cmd_cc_i_c = CPP $@
+ cmd_cc_i_c = $(CC) $(c_flags) -E -o $@ $<
+
+quiet_cmd_cc_s_c = AS $@
+ cmd_cc_s_c = $(CC) $(c_flags) -S -o $@ $<
+
+# Link agregate command
+# If there's nothing to link, create empty $@ object.
+quiet_cmd_ld_multi = LD $@
+ cmd_ld_multi = $(if $(strip $(obj-y)),\
+ $(LD) -r -o $@ $(obj-y),rm -f $@; $(AR) rcs $@)
+
+# Build rules
+$(OUTPUT)%.o: %.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)%.o: %.S FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)%.i: %.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_i_c)
+
+$(OUTPUT)%.i: %.S FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_i_c)
+
+$(OUTPUT)%.s: %.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_s_c)
+
+# Gather build data:
+# obj-y - list of build objects
+# subdir-y - list of directories to nest
+# subdir-obj-y - list of directories objects 'dir/$(obj)-in.o'
+obj-y := $($(obj)-y)
+subdir-y := $(patsubst %/,%,$(filter %/, $(obj-y)))
+obj-y := $(patsubst %/, %/$(obj)-in.o, $(obj-y))
+subdir-obj-y := $(filter %/$(obj)-in.o, $(obj-y))
+
+# '$(OUTPUT)/dir' prefix to all objects
+prefix := $(subst ./,,$(OUTPUT)$(dir)/)
+obj-y := $(addprefix $(prefix),$(obj-y))
+subdir-obj-y := $(addprefix $(prefix),$(subdir-obj-y))
+
+# Final '$(obj)-in.o' object
+in-target := $(prefix)$(obj)-in.o
+
+PHONY += $(subdir-y)
+
+$(subdir-y):
+ $(Q)$(MAKE) -f $(build-dir)/Makefile.build dir=$(dir)/$@ obj=$(obj)
+
+$(sort $(subdir-obj-y)): $(subdir-y) ;
+
+$(in-target): $(obj-y) FORCE
+ $(call rule_mkdir)
+ $(call if_changed,ld_multi)
+
+__build: $(in-target)
+ @:
+
+PHONY += FORCE
+FORCE:
+
+# Include all cmd files to get all the dependency rules
+# for all objects included
+targets := $(wildcard $(sort $(obj-y) $(in-target) $(MAKECMDGOALS)))
+cmd_files := $(wildcard $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
+
+ifneq ($(cmd_files),)
+ include $(cmd_files)
+endif
+
+.PHONY: $(PHONY)
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
new file mode 100644
index 000000000000..3a0b0ca2a28c
--- /dev/null
+++ b/tools/build/Makefile.feature
@@ -0,0 +1,171 @@
+feature_dir := $(srctree)/tools/build/feature
+
+ifneq ($(OUTPUT),)
+ OUTPUT_FEATURES = $(OUTPUT)feature/
+ $(shell mkdir -p $(OUTPUT_FEATURES))
+endif
+
+feature_check = $(eval $(feature_check_code))
+define feature_check_code
+ feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
+endef
+
+feature_set = $(eval $(feature_set_code))
+define feature_set_code
+ feature-$(1) := 1
+endef
+
+#
+# Build the feature check binaries in parallel, ignore errors, ignore return value and suppress output:
+#
+
+#
+# Note that this is not a complete list of all feature tests, just
+# those that are typically built on a fully configured system.
+#
+# [ Feature tests not mentioned here have to be built explicitly in
+# the rule that uses them - an example for that is the 'bionic'
+# feature check. ]
+#
+FEATURE_TESTS = \
+ backtrace \
+ dwarf \
+ fortify-source \
+ sync-compare-and-swap \
+ glibc \
+ gtk2 \
+ gtk2-infobar \
+ libaudit \
+ libbfd \
+ libelf \
+ libelf-getphdrnum \
+ libelf-mmap \
+ libnuma \
+ libperl \
+ libpython \
+ libpython-version \
+ libslang \
+ libunwind \
+ pthread-attr-setaffinity-np \
+ stackprotector-all \
+ timerfd \
+ libdw-dwarf-unwind \
+ zlib \
+ lzma
+
+FEATURE_DISPLAY = \
+ dwarf \
+ glibc \
+ gtk2 \
+ libaudit \
+ libbfd \
+ libelf \
+ libnuma \
+ libperl \
+ libpython \
+ libslang \
+ libunwind \
+ libdw-dwarf-unwind \
+ zlib \
+ lzma
+
+# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
+# If in the future we need per-feature checks/flags for features not
+# mentioned in this list we need to refactor this ;-).
+set_test_all_flags = $(eval $(set_test_all_flags_code))
+define set_test_all_flags_code
+ FEATURE_CHECK_CFLAGS-all += $(FEATURE_CHECK_CFLAGS-$(1))
+ FEATURE_CHECK_LDFLAGS-all += $(FEATURE_CHECK_LDFLAGS-$(1))
+endef
+
+$(foreach feat,$(FEATURE_TESTS),$(call set_test_all_flags,$(feat)))
+
+#
+# Special fast-path for the 'all features are available' case:
+#
+$(call feature_check,all,$(MSG))
+
+#
+# Just in case the build freshly failed, make sure we print the
+# feature matrix:
+#
+ifeq ($(feature-all), 1)
+ #
+ # test-all.c passed - just set all the core feature flags to 1:
+ #
+ $(foreach feat,$(FEATURE_TESTS),$(call feature_set,$(feat)))
+else
+ $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS)" LDFLAGS=$(LDFLAGS) -i -j -C $(feature_dir) $(addsuffix .bin,$(FEATURE_TESTS)) >/dev/null 2>&1)
+ $(foreach feat,$(FEATURE_TESTS),$(call feature_check,$(feat)))
+endif
+
+#
+# Print the result of the feature test:
+#
+feature_print_status = $(eval $(feature_print_status_code)) $(info $(MSG))
+
+define feature_print_status_code
+ ifeq ($(feature-$(1)), 1)
+ MSG = $(shell printf '...%30s: [ \033[32mon\033[m ]' $(1))
+ else
+ MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
+ endif
+endef
+
+feature_print_text = $(eval $(feature_print_text_code)) $(info $(MSG))
+define feature_print_text_code
+ MSG = $(shell printf '...%30s: %s' $(1) $(2))
+endef
+
+FEATURE_DUMP := $(foreach feat,$(FEATURE_DISPLAY),feature-$(feat)($(feature-$(feat))))
+FEATURE_DUMP_FILE := $(shell touch $(OUTPUT)FEATURE-DUMP; cat $(OUTPUT)FEATURE-DUMP)
+
+ifeq ($(dwarf-post-unwind),1)
+ FEATURE_DUMP += dwarf-post-unwind($(dwarf-post-unwind-text))
+endif
+
+# The $(feature_display) controls the default detection message
+# output. It's set if:
+# - detected features differes from stored features from
+# last build (in FEATURE-DUMP file)
+# - one of the $(FEATURE_DISPLAY) is not detected
+# - VF is enabled
+
+ifneq ("$(FEATURE_DUMP)","$(FEATURE_DUMP_FILE)")
+ $(shell echo "$(FEATURE_DUMP)" > $(OUTPUT)FEATURE-DUMP)
+ feature_display := 1
+endif
+
+feature_display_check = $(eval $(feature_check_code))
+define feature_display_check_code
+ ifneq ($(feature-$(1)), 1)
+ feature_display := 1
+ endif
+endef
+
+$(foreach feat,$(FEATURE_DISPLAY),$(call feature_display_check,$(feat)))
+
+ifeq ($(VF),1)
+ feature_display := 1
+ feature_verbose := 1
+endif
+
+ifeq ($(feature_display),1)
+ $(info )
+ $(info Auto-detecting system features:)
+ $(foreach feat,$(FEATURE_DISPLAY),$(call feature_print_status,$(feat),))
+
+ ifeq ($(dwarf-post-unwind),1)
+ $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
+ endif
+
+ ifneq ($(feature_verbose),1)
+ $(info )
+ endif
+endif
+
+ifeq ($(feature_verbose),1)
+ TMP := $(filter-out $(FEATURE_DISPLAY),$(FEATURE_TESTS))
+ $(foreach feat,$(TMP),$(call feature_print_status,$(feat),))
+ $(info )
+endif
diff --git a/tools/perf/config/feature-checks/.gitignore b/tools/build/feature/.gitignore
index 80f3da0c3515..09b335b98842 100644
--- a/tools/perf/config/feature-checks/.gitignore
+++ b/tools/build/feature/.gitignore
@@ -1,2 +1,3 @@
*.d
*.bin
+*.output
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/build/feature/Makefile
index b32ff3372514..463ed8f2a267 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/build/feature/Makefile
@@ -29,33 +29,36 @@ FILES= \
test-stackprotector-all.bin \
test-timerfd.bin \
test-libdw-dwarf-unwind.bin \
+ test-libbabeltrace.bin \
test-compile-32.bin \
test-compile-x32.bin \
- test-zlib.bin
+ test-zlib.bin \
+ test-lzma.bin
CC := $(CROSS_COMPILE)gcc -MD
PKG_CONFIG := $(CROSS_COMPILE)pkg-config
all: $(FILES)
-BUILD = $(CC) $(CFLAGS) -o $(OUTPUT)$@ $(patsubst %.bin,%.c,$@) $(LDFLAGS)
+__BUILD = $(CC) $(CFLAGS) -Wall -Werror -o $(OUTPUT)$@ $(patsubst %.bin,%.c,$@) $(LDFLAGS)
+ BUILD = $(__BUILD) > $(OUTPUT)$(@:.bin=.make.output) 2>&1
###############################
test-all.bin:
- $(BUILD) -Werror -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz
+ $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
test-hello.bin:
$(BUILD)
test-pthread-attr-setaffinity-np.bin:
- $(BUILD) -D_GNU_SOURCE -Werror -lpthread
+ $(BUILD) -D_GNU_SOURCE -lpthread
test-stackprotector-all.bin:
- $(BUILD) -Werror -fstack-protector-all
+ $(BUILD) -fstack-protector-all
test-fortify-source.bin:
- $(BUILD) -O2 -Werror -D_FORTIFY_SOURCE=2
+ $(BUILD) -O2 -D_FORTIFY_SOURCE=2
test-bionic.bin:
$(BUILD)
@@ -118,10 +121,10 @@ test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
test-liberty.bin:
- $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
+ $(CC) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
test-liberty-z.bin:
- $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz
+ $(CC) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz
test-cplus-demangle.bin:
$(BUILD) -liberty
@@ -133,10 +136,13 @@ test-timerfd.bin:
$(BUILD)
test-libdw-dwarf-unwind.bin:
- $(BUILD)
+ $(BUILD) # -ldw provided by $(FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind)
+
+test-libbabeltrace.bin:
+ $(BUILD) # -lbabeltrace provided by $(FEATURE_CHECK_LDFLAGS-libbabeltrace)
test-sync-compare-and-swap.bin:
- $(BUILD) -Werror
+ $(BUILD)
test-compile-32.bin:
$(CC) -m32 -o $(OUTPUT)$@ test-compile.c
@@ -147,9 +153,12 @@ test-compile-x32.bin:
test-zlib.bin:
$(BUILD) -lz
+test-lzma.bin:
+ $(BUILD) -llzma
+
-include *.d
###############################
clean:
- rm -f $(FILES) *.d
+ rm -f $(FILES) *.d $(FILES:.bin=.make.output)
diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/build/feature/test-all.c
index 6d4d09323922..84689a67814a 100644
--- a/tools/perf/config/feature-checks/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -98,7 +98,23 @@
#undef main
#define main main_test_pthread_attr_setaffinity_np
-# include "test-pthread_attr_setaffinity_np.c"
+# include "test-pthread-attr-setaffinity-np.c"
+#undef main
+
+# if 0
+/*
+ * Disable libbabeltrace check for test-all, because the requested
+ * library version is not released yet in most distributions. Will
+ * reenable later.
+ */
+
+#define main main_test_libbabeltrace
+# include "test-libbabeltrace.c"
+#undef main
+#endif
+
+#define main main_test_lzma
+# include "test-lzma.c"
#undef main
int main(int argc, char *argv[])
@@ -126,6 +142,7 @@ int main(int argc, char *argv[])
main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
main_test_pthread_attr_setaffinity_np();
+ main_test_lzma();
return 0;
}
diff --git a/tools/perf/config/feature-checks/test-backtrace.c b/tools/build/feature/test-backtrace.c
index 7124aa1dc8fb..7124aa1dc8fb 100644
--- a/tools/perf/config/feature-checks/test-backtrace.c
+++ b/tools/build/feature/test-backtrace.c
diff --git a/tools/perf/config/feature-checks/test-bionic.c b/tools/build/feature/test-bionic.c
index eac24e9513eb..eac24e9513eb 100644
--- a/tools/perf/config/feature-checks/test-bionic.c
+++ b/tools/build/feature/test-bionic.c
diff --git a/tools/perf/config/feature-checks/test-compile.c b/tools/build/feature/test-compile.c
index 31dbf45bf99c..31dbf45bf99c 100644
--- a/tools/perf/config/feature-checks/test-compile.c
+++ b/tools/build/feature/test-compile.c
diff --git a/tools/perf/config/feature-checks/test-cplus-demangle.c b/tools/build/feature/test-cplus-demangle.c
index 610c686e0009..610c686e0009 100644
--- a/tools/perf/config/feature-checks/test-cplus-demangle.c
+++ b/tools/build/feature/test-cplus-demangle.c
diff --git a/tools/perf/config/feature-checks/test-dwarf.c b/tools/build/feature/test-dwarf.c
index 3fc1801ce4a9..3fc1801ce4a9 100644
--- a/tools/perf/config/feature-checks/test-dwarf.c
+++ b/tools/build/feature/test-dwarf.c
diff --git a/tools/perf/config/feature-checks/test-fortify-source.c b/tools/build/feature/test-fortify-source.c
index c9f398d87868..c9f398d87868 100644
--- a/tools/perf/config/feature-checks/test-fortify-source.c
+++ b/tools/build/feature/test-fortify-source.c
diff --git a/tools/perf/config/feature-checks/test-glibc.c b/tools/build/feature/test-glibc.c
index b0820345cd98..b0820345cd98 100644
--- a/tools/perf/config/feature-checks/test-glibc.c
+++ b/tools/build/feature/test-glibc.c
diff --git a/tools/perf/config/feature-checks/test-gtk2-infobar.c b/tools/build/feature/test-gtk2-infobar.c
index 397b4646d066..397b4646d066 100644
--- a/tools/perf/config/feature-checks/test-gtk2-infobar.c
+++ b/tools/build/feature/test-gtk2-infobar.c
diff --git a/tools/perf/config/feature-checks/test-gtk2.c b/tools/build/feature/test-gtk2.c
index 6bd80e509439..6bd80e509439 100644
--- a/tools/perf/config/feature-checks/test-gtk2.c
+++ b/tools/build/feature/test-gtk2.c
diff --git a/tools/perf/config/feature-checks/test-hello.c b/tools/build/feature/test-hello.c
index c9f398d87868..c9f398d87868 100644
--- a/tools/perf/config/feature-checks/test-hello.c
+++ b/tools/build/feature/test-hello.c
diff --git a/tools/perf/config/feature-checks/test-libaudit.c b/tools/build/feature/test-libaudit.c
index afc019f08641..afc019f08641 100644
--- a/tools/perf/config/feature-checks/test-libaudit.c
+++ b/tools/build/feature/test-libaudit.c
diff --git a/tools/build/feature/test-libbabeltrace.c b/tools/build/feature/test-libbabeltrace.c
new file mode 100644
index 000000000000..9cf802a04885
--- /dev/null
+++ b/tools/build/feature/test-libbabeltrace.c
@@ -0,0 +1,9 @@
+
+#include <babeltrace/ctf-writer/writer.h>
+#include <babeltrace/ctf-ir/stream-class.h>
+
+int main(void)
+{
+ bt_ctf_stream_class_get_packet_context_type((void *) 0);
+ return 0;
+}
diff --git a/tools/perf/config/feature-checks/test-libbfd.c b/tools/build/feature/test-libbfd.c
index 24059907e990..24059907e990 100644
--- a/tools/perf/config/feature-checks/test-libbfd.c
+++ b/tools/build/feature/test-libbfd.c
diff --git a/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c b/tools/build/feature/test-libdw-dwarf-unwind.c
index f676a3ff442a..f676a3ff442a 100644
--- a/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c
+++ b/tools/build/feature/test-libdw-dwarf-unwind.c
diff --git a/tools/perf/config/feature-checks/test-libelf-getphdrnum.c b/tools/build/feature/test-libelf-getphdrnum.c
index d710459306c3..d710459306c3 100644
--- a/tools/perf/config/feature-checks/test-libelf-getphdrnum.c
+++ b/tools/build/feature/test-libelf-getphdrnum.c
diff --git a/tools/perf/config/feature-checks/test-libelf-mmap.c b/tools/build/feature/test-libelf-mmap.c
index 564427d7ef18..564427d7ef18 100644
--- a/tools/perf/config/feature-checks/test-libelf-mmap.c
+++ b/tools/build/feature/test-libelf-mmap.c
diff --git a/tools/perf/config/feature-checks/test-libelf.c b/tools/build/feature/test-libelf.c
index 08db322d8957..08db322d8957 100644
--- a/tools/perf/config/feature-checks/test-libelf.c
+++ b/tools/build/feature/test-libelf.c
diff --git a/tools/perf/config/feature-checks/test-libnuma.c b/tools/build/feature/test-libnuma.c
index 4763d9cd587d..4763d9cd587d 100644
--- a/tools/perf/config/feature-checks/test-libnuma.c
+++ b/tools/build/feature/test-libnuma.c
diff --git a/tools/perf/config/feature-checks/test-libperl.c b/tools/build/feature/test-libperl.c
index 8871f6a0fdb4..8871f6a0fdb4 100644
--- a/tools/perf/config/feature-checks/test-libperl.c
+++ b/tools/build/feature/test-libperl.c
diff --git a/tools/perf/config/feature-checks/test-libpython-version.c b/tools/build/feature/test-libpython-version.c
index facea122d812..facea122d812 100644
--- a/tools/perf/config/feature-checks/test-libpython-version.c
+++ b/tools/build/feature/test-libpython-version.c
diff --git a/tools/perf/config/feature-checks/test-libpython.c b/tools/build/feature/test-libpython.c
index b24b28ad6324..b24b28ad6324 100644
--- a/tools/perf/config/feature-checks/test-libpython.c
+++ b/tools/build/feature/test-libpython.c
diff --git a/tools/perf/config/feature-checks/test-libslang.c b/tools/build/feature/test-libslang.c
index 22ff22ed94d1..22ff22ed94d1 100644
--- a/tools/perf/config/feature-checks/test-libslang.c
+++ b/tools/build/feature/test-libslang.c
diff --git a/tools/perf/config/feature-checks/test-libunwind-debug-frame.c b/tools/build/feature/test-libunwind-debug-frame.c
index 0ef8087a104a..0ef8087a104a 100644
--- a/tools/perf/config/feature-checks/test-libunwind-debug-frame.c
+++ b/tools/build/feature/test-libunwind-debug-frame.c
diff --git a/tools/perf/config/feature-checks/test-libunwind.c b/tools/build/feature/test-libunwind.c
index 43b9369bcab7..43b9369bcab7 100644
--- a/tools/perf/config/feature-checks/test-libunwind.c
+++ b/tools/build/feature/test-libunwind.c
diff --git a/tools/build/feature/test-lzma.c b/tools/build/feature/test-lzma.c
new file mode 100644
index 000000000000..95adc8ced3dd
--- /dev/null
+++ b/tools/build/feature/test-lzma.c
@@ -0,0 +1,10 @@
+#include <lzma.h>
+
+int main(void)
+{
+ lzma_stream strm = LZMA_STREAM_INIT;
+ int ret;
+
+ ret = lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED);
+ return ret ? -1 : 0;
+}
diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/build/feature/test-pthread-attr-setaffinity-np.c
index 2b81b72eca23..fdada5e8d454 100644
--- a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
+++ b/tools/build/feature/test-pthread-attr-setaffinity-np.c
@@ -1,5 +1,6 @@
#include <stdint.h>
#include <pthread.h>
+#include <sched.h>
int main(void)
{
@@ -8,7 +9,8 @@ int main(void)
cpu_set_t cs;
pthread_attr_init(&thread_attr);
- /* don't care abt exact args, just the API itself in libpthread */
+ CPU_ZERO(&cs);
+
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs);
return ret;
diff --git a/tools/perf/config/feature-checks/test-stackprotector-all.c b/tools/build/feature/test-stackprotector-all.c
index c9f398d87868..c9f398d87868 100644
--- a/tools/perf/config/feature-checks/test-stackprotector-all.c
+++ b/tools/build/feature/test-stackprotector-all.c
diff --git a/tools/perf/config/feature-checks/test-sync-compare-and-swap.c b/tools/build/feature/test-sync-compare-and-swap.c
index c34d4ca4af56..c34d4ca4af56 100644
--- a/tools/perf/config/feature-checks/test-sync-compare-and-swap.c
+++ b/tools/build/feature/test-sync-compare-and-swap.c
diff --git a/tools/perf/config/feature-checks/test-timerfd.c b/tools/build/feature/test-timerfd.c
index 8c5c083b4d3c..8c5c083b4d3c 100644
--- a/tools/perf/config/feature-checks/test-timerfd.c
+++ b/tools/build/feature/test-timerfd.c
diff --git a/tools/perf/config/feature-checks/test-zlib.c b/tools/build/feature/test-zlib.c
index e111fff6240e..e111fff6240e 100644
--- a/tools/perf/config/feature-checks/test-zlib.c
+++ b/tools/build/feature/test-zlib.c
diff --git a/tools/build/tests/ex/Build b/tools/build/tests/ex/Build
new file mode 100644
index 000000000000..0e6c3e6767e6
--- /dev/null
+++ b/tools/build/tests/ex/Build
@@ -0,0 +1,8 @@
+ex-y += ex.o
+ex-y += a.o
+ex-y += b.o
+ex-y += empty/
+
+libex-y += c.o
+libex-y += d.o
+libex-y += arch/
diff --git a/tools/build/tests/ex/Makefile b/tools/build/tests/ex/Makefile
new file mode 100644
index 000000000000..52d2476073a3
--- /dev/null
+++ b/tools/build/tests/ex/Makefile
@@ -0,0 +1,23 @@
+export srctree := ../../../..
+export CC := gcc
+export LD := ld
+export AR := ar
+
+build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+ex: ex-in.o libex-in.o
+ gcc -o $@ $^
+
+ex.%: FORCE
+ make -f $(srctree)/tools/build/Makefile.build dir=. $@
+
+ex-in.o: FORCE
+ make $(build)=ex
+
+libex-in.o: FORCE
+ make $(build)=libex
+
+clean:
+ find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+ rm -f ex ex.i ex.s
+
+.PHONY: FORCE
diff --git a/tools/build/tests/ex/a.c b/tools/build/tests/ex/a.c
new file mode 100644
index 000000000000..851762798c83
--- /dev/null
+++ b/tools/build/tests/ex/a.c
@@ -0,0 +1,5 @@
+
+int a(void)
+{
+ return 0;
+}
diff --git a/tools/build/tests/ex/arch/Build b/tools/build/tests/ex/arch/Build
new file mode 100644
index 000000000000..55506189efae
--- /dev/null
+++ b/tools/build/tests/ex/arch/Build
@@ -0,0 +1,2 @@
+libex-y += e.o
+libex-y += f.o
diff --git a/tools/build/tests/ex/arch/e.c b/tools/build/tests/ex/arch/e.c
new file mode 100644
index 000000000000..beaa4a1d7ba8
--- /dev/null
+++ b/tools/build/tests/ex/arch/e.c
@@ -0,0 +1,5 @@
+
+int e(void)
+{
+ return 0;
+}
diff --git a/tools/build/tests/ex/arch/f.c b/tools/build/tests/ex/arch/f.c
new file mode 100644
index 000000000000..7c3e9e9da5b7
--- /dev/null
+++ b/tools/build/tests/ex/arch/f.c
@@ -0,0 +1,5 @@
+
+int f(void)
+{
+ return 0;
+}
diff --git a/tools/build/tests/ex/b.c b/tools/build/tests/ex/b.c
new file mode 100644
index 000000000000..c24ff9ca9a97
--- /dev/null
+++ b/tools/build/tests/ex/b.c
@@ -0,0 +1,5 @@
+
+int b(void)
+{
+ return 0;
+}
diff --git a/tools/build/tests/ex/c.c b/tools/build/tests/ex/c.c
new file mode 100644
index 000000000000..e216d0217499
--- /dev/null
+++ b/tools/build/tests/ex/c.c
@@ -0,0 +1,5 @@
+
+int c(void)
+{
+ return 0;
+}
diff --git a/tools/build/tests/ex/d.c b/tools/build/tests/ex/d.c
new file mode 100644
index 000000000000..80dc0f06151b
--- /dev/null
+++ b/tools/build/tests/ex/d.c
@@ -0,0 +1,5 @@
+
+int d(void)
+{
+ return 0;
+}
diff --git a/tools/build/tests/ex/empty/Build b/tools/build/tests/ex/empty/Build
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/build/tests/ex/empty/Build
diff --git a/tools/build/tests/ex/ex.c b/tools/build/tests/ex/ex.c
new file mode 100644
index 000000000000..dc42eb2e1a67
--- /dev/null
+++ b/tools/build/tests/ex/ex.c
@@ -0,0 +1,19 @@
+
+int a(void);
+int b(void);
+int c(void);
+int d(void);
+int e(void);
+int f(void);
+
+int main(void)
+{
+ a();
+ b();
+ c();
+ d();
+ e();
+ f();
+
+ return 0;
+}
diff --git a/tools/build/tests/run.sh b/tools/build/tests/run.sh
new file mode 100755
index 000000000000..5494f8ea7567
--- /dev/null
+++ b/tools/build/tests/run.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+function test_ex {
+ make -C ex V=1 clean > ex.out 2>&1
+ make -C ex V=1 >> ex.out 2>&1
+
+ if [ ! -x ./ex/ex ]; then
+ echo FAILED
+ exit -1
+ fi
+
+ make -C ex V=1 clean > /dev/null 2>&1
+ rm -f ex.out
+}
+
+function test_ex_suffix {
+ make -C ex V=1 clean > ex.out 2>&1
+
+ # use -rR to disable make's builtin rules
+ make -rR -C ex V=1 ex.o >> ex.out 2>&1
+ make -rR -C ex V=1 ex.i >> ex.out 2>&1
+ make -rR -C ex V=1 ex.s >> ex.out 2>&1
+
+ if [ -x ./ex/ex ]; then
+ echo FAILED
+ exit -1
+ fi
+
+ if [ ! -f ./ex/ex.o -o ! -f ./ex/ex.i -o ! -f ./ex/ex.s ]; then
+ echo FAILED
+ exit -1
+ fi
+
+ make -C ex V=1 clean > /dev/null 2>&1
+ rm -f ex.out
+}
+echo -n Testing..
+
+test_ex
+test_ex_suffix
+
+echo OK
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 99ffe61051a7..a8ab79556926 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -3,7 +3,7 @@
CC = $(CROSS_COMPILE)gcc
PTHREAD_LIBS = -lpthread
WARNINGS = -Wall -Wextra
-CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS)
+CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
%: %.c
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 5e63f70bd956..506dd0148828 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -81,6 +81,7 @@ static int vss_operate(int operation)
char match[] = "/dev/";
FILE *mounts;
struct mntent *ent;
+ char errdir[1024] = {0};
unsigned int cmd;
int error = 0, root_seen = 0, save_errno = 0;
@@ -115,6 +116,8 @@ static int vss_operate(int operation)
goto err;
}
+ endmntent(mounts);
+
if (root_seen) {
error |= vss_do_freeze("/", cmd);
if (error && operation == VSS_OP_FREEZE)
@@ -124,16 +127,19 @@ static int vss_operate(int operation)
goto out;
err:
save_errno = errno;
+ if (ent) {
+ strncpy(errdir, ent->mnt_dir, sizeof(errdir)-1);
+ endmntent(mounts);
+ }
vss_operate(VSS_OP_THAW);
/* Call syslog after we thaw all filesystems */
if (ent)
syslog(LOG_ERR, "FREEZE of %s failed; error:%d %s",
- ent->mnt_dir, save_errno, strerror(save_errno));
+ errdir, save_errno, strerror(save_errno));
else
syslog(LOG_ERR, "FREEZE of / failed; error:%d %s", save_errno,
strerror(save_errno));
out:
- endmntent(mounts);
return error;
}
diff --git a/tools/lib/api/Build b/tools/lib/api/Build
new file mode 100644
index 000000000000..3653965cf481
--- /dev/null
+++ b/tools/lib/api/Build
@@ -0,0 +1,2 @@
+libapi-y += fd/
+libapi-y += fs/
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index 36c08b1f4afb..8bd960658463 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -1,49 +1,43 @@
include ../../scripts/Makefile.include
include ../../perf/config/utilities.mak # QUIET_CLEAN
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
CC = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar
-# guard against environment variables
-LIB_H=
-LIB_OBJS=
-
-LIB_H += fs/debugfs.h
-LIB_H += fs/fs.h
-# See comment below about piggybacking...
-LIB_H += fd/array.h
-
-LIB_OBJS += $(OUTPUT)fs/debugfs.o
-LIB_OBJS += $(OUTPUT)fs/fs.o
-# XXX piggybacking here, need to introduce libapikfd, or rename this
-# to plain libapik.a and make it have it all api goodies
-LIB_OBJS += $(OUTPUT)fd/array.o
+MAKEFLAGS += --no-print-directory
-LIBFILE = libapikfs.a
+LIBFILE = $(OUTPUT)libapi.a
-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
-EXTLIBS = -lelf -lpthread -lrt -lm
-ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
-ALL_LDFLAGS = $(LDFLAGS)
+CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
RM = rm -f
-$(LIBFILE): $(LIB_OBJS)
- $(QUIET_AR)$(RM) $@ && $(AR) rcs $(OUTPUT)$@ $(LIB_OBJS)
+build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+API_IN := $(OUTPUT)libapi-in.o
-$(LIB_OBJS): $(LIB_H)
+export srctree OUTPUT CC LD CFLAGS V
-libapi_dirs:
- $(QUIET_MKDIR)mkdir -p $(OUTPUT)fd $(OUTPUT)fs
+all: $(LIBFILE)
-$(OUTPUT)%.o: %.c libapi_dirs
- $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
-$(OUTPUT)%.s: %.c libapi_dirs
- $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
-$(OUTPUT)%.o: %.S libapi_dirs
- $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+$(API_IN): FORCE
+ @$(MAKE) $(build)=libapi
+
+$(LIBFILE): $(API_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(API_IN)
clean:
- $(call QUIET_CLEAN, libapi) $(RM) $(LIB_OBJS) $(LIBFILE)
+ $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
+ find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o | xargs $(RM)
+
+FORCE:
-.PHONY: clean
+.PHONY: clean FORCE
diff --git a/tools/lib/api/fd/Build b/tools/lib/api/fd/Build
new file mode 100644
index 000000000000..605d99f6d71a
--- /dev/null
+++ b/tools/lib/api/fd/Build
@@ -0,0 +1 @@
+libapi-y += array.o
diff --git a/tools/lib/api/fs/Build b/tools/lib/api/fs/Build
new file mode 100644
index 000000000000..6de5a4f0b501
--- /dev/null
+++ b/tools/lib/api/fs/Build
@@ -0,0 +1,4 @@
+libapi-y += fs.o
+libapi-y += debugfs.o
+libapi-y += findfs.o
+libapi-y += tracefs.o
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
index d2b18e887071..8305b3e9d48e 100644
--- a/tools/lib/api/fs/debugfs.c
+++ b/tools/lib/api/fs/debugfs.c
@@ -3,75 +3,50 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <unistd.h>
#include <stdbool.h>
#include <sys/vfs.h>
+#include <sys/types.h>
+#include <sys/stat.h>
#include <sys/mount.h>
#include <linux/kernel.h>
#include "debugfs.h"
-char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
+#ifndef DEBUGFS_DEFAULT_PATH
+#define DEBUGFS_DEFAULT_PATH "/sys/kernel/debug"
+#endif
+
+char debugfs_mountpoint[PATH_MAX + 1] = DEBUGFS_DEFAULT_PATH;
static const char * const debugfs_known_mountpoints[] = {
- "/sys/kernel/debug",
+ DEBUGFS_DEFAULT_PATH,
"/debug",
0,
};
static bool debugfs_found;
+bool debugfs_configured(void)
+{
+ return debugfs_find_mountpoint() != NULL;
+}
+
/* find the path to the mounted debugfs */
const char *debugfs_find_mountpoint(void)
{
- const char * const *ptr;
- char type[100];
- FILE *fp;
+ const char *ret;
if (debugfs_found)
return (const char *)debugfs_mountpoint;
- ptr = debugfs_known_mountpoints;
- while (*ptr) {
- if (debugfs_valid_mountpoint(*ptr) == 0) {
- debugfs_found = true;
- strcpy(debugfs_mountpoint, *ptr);
- return debugfs_mountpoint;
- }
- ptr++;
- }
-
- /* give up and parse /proc/mounts */
- fp = fopen("/proc/mounts", "r");
- if (fp == NULL)
- return NULL;
-
- while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n",
- debugfs_mountpoint, type) == 2) {
- if (strcmp(type, "debugfs") == 0)
- break;
- }
- fclose(fp);
+ ret = find_mountpoint("debugfs", (long) DEBUGFS_MAGIC,
+ debugfs_mountpoint, PATH_MAX + 1,
+ debugfs_known_mountpoints);
+ if (ret)
+ debugfs_found = true;
- if (strcmp(type, "debugfs") != 0)
- return NULL;
-
- debugfs_found = true;
-
- return debugfs_mountpoint;
-}
-
-/* verify that a mountpoint is actually a debugfs instance */
-
-int debugfs_valid_mountpoint(const char *debugfs)
-{
- struct statfs st_fs;
-
- if (statfs(debugfs, &st_fs) < 0)
- return -ENOENT;
- else if ((long)st_fs.f_type != (long)DEBUGFS_MAGIC)
- return -ENOENT;
-
- return 0;
+ return ret;
}
/* mount the debugfs somewhere if it's not mounted */
@@ -87,7 +62,7 @@ char *debugfs_mount(const char *mountpoint)
mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT);
/* if no environment variable, use default */
if (mountpoint == NULL)
- mountpoint = "/sys/kernel/debug";
+ mountpoint = DEBUGFS_DEFAULT_PATH;
}
if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
diff --git a/tools/lib/api/fs/debugfs.h b/tools/lib/api/fs/debugfs.h
index 0739881a9897..455023698d2b 100644
--- a/tools/lib/api/fs/debugfs.h
+++ b/tools/lib/api/fs/debugfs.h
@@ -1,16 +1,7 @@
#ifndef __API_DEBUGFS_H__
#define __API_DEBUGFS_H__
-#define _STR(x) #x
-#define STR(x) _STR(x)
-
-/*
- * On most systems <limits.h> would have given us this, but not on some systems
- * (e.g. GNU/Hurd).
- */
-#ifndef PATH_MAX
-#define PATH_MAX 4096
-#endif
+#include "findfs.h"
#ifndef DEBUGFS_MAGIC
#define DEBUGFS_MAGIC 0x64626720
@@ -20,8 +11,8 @@
#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
#endif
+bool debugfs_configured(void);
const char *debugfs_find_mountpoint(void);
-int debugfs_valid_mountpoint(const char *debugfs);
char *debugfs_mount(const char *mountpoint);
extern char debugfs_mountpoint[];
diff --git a/tools/lib/api/fs/findfs.c b/tools/lib/api/fs/findfs.c
new file mode 100644
index 000000000000..49946cb6d7af
--- /dev/null
+++ b/tools/lib/api/fs/findfs.c
@@ -0,0 +1,63 @@
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/vfs.h>
+
+#include "findfs.h"
+
+/* verify that a mountpoint is actually the type we want */
+
+int valid_mountpoint(const char *mount, long magic)
+{
+ struct statfs st_fs;
+
+ if (statfs(mount, &st_fs) < 0)
+ return -ENOENT;
+ else if ((long)st_fs.f_type != magic)
+ return -ENOENT;
+
+ return 0;
+}
+
+/* find the path to a mounted file system */
+const char *find_mountpoint(const char *fstype, long magic,
+ char *mountpoint, int len,
+ const char * const *known_mountpoints)
+{
+ const char * const *ptr;
+ char format[128];
+ char type[100];
+ FILE *fp;
+
+ if (known_mountpoints) {
+ ptr = known_mountpoints;
+ while (*ptr) {
+ if (valid_mountpoint(*ptr, magic) == 0) {
+ strncpy(mountpoint, *ptr, len - 1);
+ mountpoint[len-1] = 0;
+ return mountpoint;
+ }
+ ptr++;
+ }
+ }
+
+ /* give up and parse /proc/mounts */
+ fp = fopen("/proc/mounts", "r");
+ if (fp == NULL)
+ return NULL;
+
+ snprintf(format, 128, "%%*s %%%ds %%99s %%*s %%*d %%*d\n", len);
+
+ while (fscanf(fp, format, mountpoint, type) == 2) {
+ if (strcmp(type, fstype) == 0)
+ break;
+ }
+ fclose(fp);
+
+ if (strcmp(type, fstype) != 0)
+ return NULL;
+
+ return mountpoint;
+}
diff --git a/tools/lib/api/fs/findfs.h b/tools/lib/api/fs/findfs.h
new file mode 100644
index 000000000000..b6f5d05acc42
--- /dev/null
+++ b/tools/lib/api/fs/findfs.h
@@ -0,0 +1,23 @@
+#ifndef __API_FINDFS_H__
+#define __API_FINDFS_H__
+
+#include <stdbool.h>
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
+/*
+ * On most systems <limits.h> would have given us this, but not on some systems
+ * (e.g. GNU/Hurd).
+ */
+#ifndef PATH_MAX
+#define PATH_MAX 4096
+#endif
+
+const char *find_mountpoint(const char *fstype, long magic,
+ char *mountpoint, int len,
+ const char * const *known_mountpoints);
+
+int valid_mountpoint(const char *mount, long magic);
+
+#endif /* __API_FINDFS_H__ */
diff --git a/tools/lib/api/fs/tracefs.c b/tools/lib/api/fs/tracefs.c
new file mode 100644
index 000000000000..e4aa9688b71e
--- /dev/null
+++ b/tools/lib/api/fs/tracefs.c
@@ -0,0 +1,78 @@
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <sys/vfs.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <linux/kernel.h>
+
+#include "tracefs.h"
+
+#ifndef TRACEFS_DEFAULT_PATH
+#define TRACEFS_DEFAULT_PATH "/sys/kernel/tracing"
+#endif
+
+char tracefs_mountpoint[PATH_MAX + 1] = TRACEFS_DEFAULT_PATH;
+
+static const char * const tracefs_known_mountpoints[] = {
+ TRACEFS_DEFAULT_PATH,
+ "/sys/kernel/debug/tracing",
+ "/tracing",
+ "/trace",
+ 0,
+};
+
+static bool tracefs_found;
+
+bool tracefs_configured(void)
+{
+ return tracefs_find_mountpoint() != NULL;
+}
+
+/* find the path to the mounted tracefs */
+const char *tracefs_find_mountpoint(void)
+{
+ const char *ret;
+
+ if (tracefs_found)
+ return (const char *)tracefs_mountpoint;
+
+ ret = find_mountpoint("tracefs", (long) TRACEFS_MAGIC,
+ tracefs_mountpoint, PATH_MAX + 1,
+ tracefs_known_mountpoints);
+
+ if (ret)
+ tracefs_found = true;
+
+ return ret;
+}
+
+/* mount the tracefs somewhere if it's not mounted */
+char *tracefs_mount(const char *mountpoint)
+{
+ /* see if it's already mounted */
+ if (tracefs_find_mountpoint())
+ goto out;
+
+ /* if not mounted and no argument */
+ if (mountpoint == NULL) {
+ /* see if environment variable set */
+ mountpoint = getenv(PERF_TRACEFS_ENVIRONMENT);
+ /* if no environment variable, use default */
+ if (mountpoint == NULL)
+ mountpoint = TRACEFS_DEFAULT_PATH;
+ }
+
+ if (mount(NULL, mountpoint, "tracefs", 0, NULL) < 0)
+ return NULL;
+
+ /* save the mountpoint */
+ tracefs_found = true;
+ strncpy(tracefs_mountpoint, mountpoint, sizeof(tracefs_mountpoint));
+out:
+ return tracefs_mountpoint;
+}
diff --git a/tools/lib/api/fs/tracefs.h b/tools/lib/api/fs/tracefs.h
new file mode 100644
index 000000000000..da780ac49acb
--- /dev/null
+++ b/tools/lib/api/fs/tracefs.h
@@ -0,0 +1,21 @@
+#ifndef __API_TRACEFS_H__
+#define __API_TRACEFS_H__
+
+#include "findfs.h"
+
+#ifndef TRACEFS_MAGIC
+#define TRACEFS_MAGIC 0x74726163
+#endif
+
+#ifndef PERF_TRACEFS_ENVIRONMENT
+#define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR"
+#endif
+
+bool tracefs_configured(void);
+const char *tracefs_find_mountpoint(void);
+int tracefs_valid_mountpoint(const char *debugfs);
+char *tracefs_mount(const char *mountpoint);
+
+extern char tracefs_mountpoint[];
+
+#endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/lockdep/Build b/tools/lib/lockdep/Build
new file mode 100644
index 000000000000..6f667355b068
--- /dev/null
+++ b/tools/lib/lockdep/Build
@@ -0,0 +1 @@
+liblockdep-y += common.o lockdep.o preload.o rbtree.o
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index 4b866c54f624..18ffccf00426 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -14,9 +14,10 @@ define allow-override
$(eval $(1) = $(2)))
endef
-# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
+# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,AR,$(CROSS_COMPILE)ar)
+$(call allow-override,LD,$(CROSS_COMPILE)ld)
INSTALL = install
@@ -35,6 +36,10 @@ bindir = $(prefix)/$(bindir_relative)
export DESTDIR DESTDIR_SQ INSTALL
+MAKEFLAGS += --no-print-directory
+
+include ../../scripts/Makefile.include
+
# copy a bit from Linux kbuild
ifeq ("$(origin V)", "command line")
@@ -44,56 +49,21 @@ ifndef VERBOSE
VERBOSE = 0
endif
-ifeq ("$(origin O)", "command line")
- BUILD_OUTPUT := $(O)
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
endif
-ifeq ($(BUILD_SRC),)
-ifneq ($(BUILD_OUTPUT),)
-
-define build_output
- $(if $(VERBOSE:1=),@)$(MAKE) -C $(BUILD_OUTPUT) \
- BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1
-endef
-
-saved-output := $(BUILD_OUTPUT)
-BUILD_OUTPUT := $(shell cd $(BUILD_OUTPUT) && /bin/pwd)
-$(if $(BUILD_OUTPUT),, \
- $(error output directory "$(saved-output)" does not exist))
-
-all: sub-make
-
-gui: force
- $(call build_output, all_cmd)
-
-$(filter-out gui,$(MAKECMDGOALS)): sub-make
-
-sub-make: force
- $(call build_output, $(MAKECMDGOALS))
-
-
-# Leave processing to above invocation of make
-skip-makefile := 1
-
-endif # BUILD_OUTPUT
-endif # BUILD_SRC
-
-# We process the rest of the Makefile if this is the final invocation of make
-ifeq ($(skip-makefile),)
-
-srctree := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR)))
-objtree := $(realpath $(CURDIR))
-src := $(srctree)
-obj := $(objtree)
-
-export prefix libdir bindir src obj
-
# Shell quotes
libdir_SQ = $(subst ','\'',$(libdir))
bindir_SQ = $(subst ','\'',$(bindir))
-LIB_FILE = liblockdep.a liblockdep.so.$(LIBLOCKDEP_VERSION)
+LIB_IN := $(OUTPUT)liblockdep-in.o
+
BIN_FILE = lockdep
+LIB_FILE = $(OUTPUT)liblockdep.a $(OUTPUT)liblockdep.so.$(LIBLOCKDEP_VERSION)
CONFIG_INCLUDES =
CONFIG_LIBS =
@@ -108,33 +78,23 @@ INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
+CFLAGS += -fPIC
override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
ifeq ($(VERBOSE),1)
Q =
- print_compile =
- print_app_build =
- print_fpic_compile =
print_shared_lib_compile =
print_install =
else
Q = @
- print_compile = echo ' CC '$(OBJ);
- print_app_build = echo ' BUILD '$(OBJ);
- print_fpic_compile = echo ' CC FPIC '$(OBJ);
- print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ);
- print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ);
- print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
+ print_shared_lib_compile = echo ' LD '$(OBJ);
+ print_static_lib_build = echo ' LD '$(OBJ);
+ print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
endif
-do_fpic_compile = \
- ($(print_fpic_compile) \
- $(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@)
-
-do_app_build = \
- ($(print_app_build) \
- $(CC) $^ -rdynamic -o $@ $(CONFIG_LIBS) $(LIBS))
+export srctree OUTPUT CC LD CFLAGS V
+build := -f $(srctree)/tools/build/Makefile.build dir=. obj
do_compile_shared_library = \
($(print_shared_lib_compile) \
@@ -144,22 +104,6 @@ do_build_static_lib = \
($(print_static_lib_build) \
$(RM) $@; $(AR) rcs $@ $^)
-
-define do_compile
- $(print_compile) \
- $(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
-endef
-
-$(obj)/%.o: $(src)/%.c
- $(Q)$(call do_compile)
-
-%.o: $(src)/%.c
- $(Q)$(call do_compile)
-
-PEVENT_LIB_OBJS = common.o lockdep.o preload.o rbtree.o
-
-ALL_OBJS = $(PEVENT_LIB_OBJS)
-
CMD_TARGETS = $(LIB_FILE)
TARGETS = $(CMD_TARGETS)
@@ -169,42 +113,15 @@ all: all_cmd
all_cmd: $(CMD_TARGETS)
-liblockdep.so.$(LIBLOCKDEP_VERSION): $(PEVENT_LIB_OBJS)
+$(LIB_IN): force
+ $(Q)$(MAKE) $(build)=liblockdep
+
+liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN)
$(Q)$(do_compile_shared_library)
-liblockdep.a: $(PEVENT_LIB_OBJS)
+liblockdep.a: $(LIB_IN)
$(Q)$(do_build_static_lib)
-$(PEVENT_LIB_OBJS): %.o: $(src)/%.c
- $(Q)$(do_fpic_compile)
-
-## make deps
-
-all_objs := $(sort $(ALL_OBJS))
-all_deps := $(all_objs:%.o=.%.d)
-
-# let .d file also depends on the source and header files
-define check_deps
- @set -e; $(RM) $@; \
- $(CC) -MM $(CFLAGS) $< > $@.$$$$; \
- sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
- $(RM) $@.$$$$
-endef
-
-$(all_deps): .%.d: $(src)/%.c
- $(Q)$(call check_deps)
-
-$(all_objs) : %.o : .%.d
-
-dep_includes := $(wildcard $(all_deps))
-
-ifneq ($(dep_includes),)
- include $(dep_includes)
-endif
-
-### Detect environment changes
-TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE)
-
tags: force
$(RM) tags
find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
@@ -233,8 +150,6 @@ clean:
$(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
$(RM) tags TAGS
-endif # skip-makefile
-
PHONY += force
force:
diff --git a/tools/lib/lockdep/uinclude/linux/kernel.h b/tools/lib/lockdep/uinclude/linux/kernel.h
index a11e3c357be7..cd2cc59a5da7 100644
--- a/tools/lib/lockdep/uinclude/linux/kernel.h
+++ b/tools/lib/lockdep/uinclude/linux/kernel.h
@@ -28,6 +28,9 @@
#define __init
#define noinline
#define list_add_tail_rcu list_add_tail
+#define list_for_each_entry_rcu list_for_each_entry
+#define barrier()
+#define synchronize_sched()
#ifndef CALLER_ADDR0
#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
diff --git a/tools/lib/traceevent/Build b/tools/lib/traceevent/Build
new file mode 100644
index 000000000000..c681d0575d16
--- /dev/null
+++ b/tools/lib/traceevent/Build
@@ -0,0 +1,17 @@
+libtraceevent-y += event-parse.o
+libtraceevent-y += event-plugin.o
+libtraceevent-y += trace-seq.o
+libtraceevent-y += parse-filter.o
+libtraceevent-y += parse-utils.o
+libtraceevent-y += kbuffer-parse.o
+
+plugin_jbd2-y += plugin_jbd2.o
+plugin_hrtimer-y += plugin_hrtimer.o
+plugin_kmem-y += plugin_kmem.o
+plugin_kvm-y += plugin_kvm.o
+plugin_mac80211-y += plugin_mac80211.o
+plugin_sched_switch-y += plugin_sched_switch.o
+plugin_function-y += plugin_function.o
+plugin_xen-y += plugin_xen.o
+plugin_scsi-y += plugin_scsi.o
+plugin_cfg80211-y += plugin_cfg80211.o
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 005c9cc06935..d410da335e3d 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -67,7 +67,7 @@ PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)"
PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))'
endif
-include $(if $(BUILD_SRC),$(BUILD_SRC)/)../../scripts/Makefile.include
+include ../../scripts/Makefile.include
# copy a bit from Linux kbuild
@@ -78,40 +78,13 @@ ifndef VERBOSE
VERBOSE = 0
endif
-ifeq ("$(origin O)", "command line")
- BUILD_OUTPUT := $(O)
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
endif
-ifeq ($(BUILD_SRC),)
-ifneq ($(OUTPUT),)
-
-define build_output
- $(if $(VERBOSE:1=),@)+$(MAKE) -C $(OUTPUT) \
- BUILD_SRC=$(CURDIR)/ -f $(CURDIR)/Makefile $1
-endef
-
-all: sub-make
-
-$(MAKECMDGOALS): sub-make
-
-sub-make: force
- $(call build_output, $(MAKECMDGOALS))
-
-
-# Leave processing to above invocation of make
-skip-makefile := 1
-
-endif # OUTPUT
-endif # BUILD_SRC
-
-# We process the rest of the Makefile if this is the final invocation of make
-ifeq ($(skip-makefile),)
-
-srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
-objtree := $(CURDIR)
-src := $(srctree)
-obj := $(objtree)
-
export prefix bindir src obj
# Shell quotes
@@ -132,16 +105,19 @@ EXTRAVERSION = $(EP_EXTRAVERSION)
OBJ = $@
N =
-export Q VERBOSE
-
EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
-INCLUDES = -I. -I $(srctree)/../../include $(CONFIG_INCLUDES)
+INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
-# Set compile option CFLAGS if not set elsewhere
-CFLAGS ?= -g -Wall
+# Set compile option CFLAGS
+ifdef EXTRA_CFLAGS
+ CFLAGS := $(EXTRA_CFLAGS)
+else
+ CFLAGS := -g -Wall
+endif
# Append required CFLAGS
+override CFLAGS += -fPIC
override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
override CFLAGS += $(udis86-flags) -D_GNU_SOURCE
@@ -151,74 +127,58 @@ else
Q = @
endif
-do_compile_shared_library = \
- ($(print_shared_lib_compile) \
- $(CC) --shared $^ -o $@)
-
-do_plugin_build = \
- ($(print_plugin_build) \
- $(CC) $(CFLAGS) -shared -nostartfiles -o $@ $<)
-
-do_build_static_lib = \
- ($(print_static_lib_build) \
- $(RM) $@; $(AR) rcs $@ $^)
-
-
-do_compile = $(QUIET_CC)$(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
+# Disable command line variables (CFLAGS) overide from top
+# level Makefile (perf), otherwise build Makefile will get
+# the same command line setup.
+MAKEOVERRIDES=
-$(obj)/%.o: $(src)/%.c
- $(call do_compile)
+export srctree OUTPUT CC LD CFLAGS V
+build := -f $(srctree)/tools/build/Makefile.build dir=. obj
-%.o: $(src)/%.c
- $(call do_compile)
+PLUGINS = plugin_jbd2.so
+PLUGINS += plugin_hrtimer.so
+PLUGINS += plugin_kmem.so
+PLUGINS += plugin_kvm.so
+PLUGINS += plugin_mac80211.so
+PLUGINS += plugin_sched_switch.so
+PLUGINS += plugin_function.so
+PLUGINS += plugin_xen.so
+PLUGINS += plugin_scsi.so
+PLUGINS += plugin_cfg80211.so
-PEVENT_LIB_OBJS = event-parse.o
-PEVENT_LIB_OBJS += event-plugin.o
-PEVENT_LIB_OBJS += trace-seq.o
-PEVENT_LIB_OBJS += parse-filter.o
-PEVENT_LIB_OBJS += parse-utils.o
-PEVENT_LIB_OBJS += kbuffer-parse.o
+PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS))
+PLUGINS_IN := $(PLUGINS:.so=-in.o)
-PLUGIN_OBJS = plugin_jbd2.o
-PLUGIN_OBJS += plugin_hrtimer.o
-PLUGIN_OBJS += plugin_kmem.o
-PLUGIN_OBJS += plugin_kvm.o
-PLUGIN_OBJS += plugin_mac80211.o
-PLUGIN_OBJS += plugin_sched_switch.o
-PLUGIN_OBJS += plugin_function.o
-PLUGIN_OBJS += plugin_xen.o
-PLUGIN_OBJS += plugin_scsi.o
-PLUGIN_OBJS += plugin_cfg80211.o
-
-PLUGINS := $(PLUGIN_OBJS:.o=.so)
-
-ALL_OBJS = $(PEVENT_LIB_OBJS) $(PLUGIN_OBJS)
+TE_IN := $(OUTPUT)libtraceevent-in.o
+LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
CMD_TARGETS = $(LIB_FILE) $(PLUGINS)
TARGETS = $(CMD_TARGETS)
-
all: all_cmd
all_cmd: $(CMD_TARGETS)
-libtraceevent.so: $(PEVENT_LIB_OBJS)
+$(TE_IN): force
+ $(Q)$(MAKE) $(build)=libtraceevent
+
+$(OUTPUT)libtraceevent.so: $(TE_IN)
$(QUIET_LINK)$(CC) --shared $^ -o $@
-libtraceevent.a: $(PEVENT_LIB_OBJS)
+$(OUTPUT)libtraceevent.a: $(TE_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
plugins: $(PLUGINS)
-$(PEVENT_LIB_OBJS): %.o: $(src)/%.c TRACEEVENT-CFLAGS
- $(QUIET_CC_FPIC)$(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@
+__plugin_obj = $(notdir $@)
+ plugin_obj = $(__plugin_obj:-in.o=)
-$(PLUGIN_OBJS): %.o : $(src)/%.c
- $(QUIET_CC_FPIC)$(CC) -c $(CFLAGS) -fPIC -o $@ $<
+$(PLUGINS_IN): force
+ $(Q)$(MAKE) $(build)=$(plugin_obj)
-$(PLUGINS): %.so: %.o
- $(QUIET_LINK)$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $<
+$(OUTPUT)%.so: $(OUTPUT)%-in.o
+ $(QUIET_LINK)$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $^
define make_version.h
(echo '/* This file is automatically generated. Do not modify. */'; \
@@ -255,40 +215,6 @@ define update_dir
fi);
endef
-## make deps
-
-all_objs := $(sort $(ALL_OBJS))
-all_deps := $(all_objs:%.o=.%.d)
-
-# let .d file also depends on the source and header files
-define check_deps
- @set -e; $(RM) $@; \
- $(CC) -MM $(CFLAGS) $< > $@.$$$$; \
- sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
- $(RM) $@.$$$$
-endef
-
-$(all_deps): .%.d: $(src)/%.c
- $(Q)$(call check_deps)
-
-$(all_objs) : %.o : .%.d
-
-dep_includes := $(wildcard $(all_deps))
-
-ifneq ($(dep_includes),)
- include $(dep_includes)
-endif
-
-### Detect environment changes
-TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE)
-
-TRACEEVENT-CFLAGS: force
- @FLAGS='$(TRACK_CFLAGS)'; \
- if test x"$$FLAGS" != x"`cat TRACEEVENT-CFLAGS 2>/dev/null`" ; then \
- echo 1>&2 " FLAGS: * new build flags or cross compiler"; \
- echo "$$FLAGS" >TRACEEVENT-CFLAGS; \
- fi
-
tags: force
$(RM) tags
find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
@@ -327,14 +253,9 @@ clean:
$(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \
$(RM) TRACEEVENT-CFLAGS tags TAGS
-endif # skip-makefile
-
PHONY += force plugins
force:
-plugins:
- @echo > /dev/null
-
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index afe20ed9fac8..29f94f6f0d9e 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -304,7 +304,10 @@ int pevent_register_comm(struct pevent *pevent, const char *comm, int pid)
if (!item)
return -1;
- item->comm = strdup(comm);
+ if (comm)
+ item->comm = strdup(comm);
+ else
+ item->comm = strdup("<...>");
if (!item->comm) {
free(item);
return -1;
@@ -318,9 +321,14 @@ int pevent_register_comm(struct pevent *pevent, const char *comm, int pid)
return 0;
}
-void pevent_register_trace_clock(struct pevent *pevent, char *trace_clock)
+int pevent_register_trace_clock(struct pevent *pevent, const char *trace_clock)
{
- pevent->trace_clock = trace_clock;
+ pevent->trace_clock = strdup(trace_clock);
+ if (!pevent->trace_clock) {
+ errno = ENOMEM;
+ return -1;
+ }
+ return 0;
}
struct func_map {
@@ -758,6 +766,11 @@ static void free_arg(struct print_arg *arg)
free_arg(arg->hex.field);
free_arg(arg->hex.size);
break;
+ case PRINT_INT_ARRAY:
+ free_arg(arg->int_array.field);
+ free_arg(arg->int_array.count);
+ free_arg(arg->int_array.el_size);
+ break;
case PRINT_TYPE:
free(arg->typecast.type);
free_arg(arg->typecast.item);
@@ -1926,7 +1939,22 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
goto out_warn_free;
type = process_arg_token(event, right, tok, type);
- arg->op.right = right;
+
+ if (right->type == PRINT_OP &&
+ get_op_prio(arg->op.op) < get_op_prio(right->op.op)) {
+ struct print_arg tmp;
+
+ /* rotate ops according to the priority */
+ arg->op.right = right->op.left;
+
+ tmp = *arg;
+ *arg = *right;
+ *right = tmp;
+
+ arg->op.left = right;
+ } else {
+ arg->op.right = right;
+ }
} else if (strcmp(token, "[") == 0) {
@@ -2014,6 +2042,38 @@ process_entry(struct event_format *event __maybe_unused, struct print_arg *arg,
return EVENT_ERROR;
}
+static int alloc_and_process_delim(struct event_format *event, char *next_token,
+ struct print_arg **print_arg)
+{
+ struct print_arg *field;
+ enum event_type type;
+ char *token;
+ int ret = 0;
+
+ field = alloc_arg();
+ if (!field) {
+ do_warning_event(event, "%s: not enough memory!", __func__);
+ errno = ENOMEM;
+ return -1;
+ }
+
+ type = process_arg(event, field, &token);
+
+ if (test_type_token(type, token, EVENT_DELIM, next_token)) {
+ errno = EINVAL;
+ ret = -1;
+ free_arg(field);
+ goto out_free_token;
+ }
+
+ *print_arg = field;
+
+out_free_token:
+ free_token(token);
+
+ return ret;
+}
+
static char *arg_eval (struct print_arg *arg);
static unsigned long long
@@ -2486,49 +2546,46 @@ out_free:
static enum event_type
process_hex(struct event_format *event, struct print_arg *arg, char **tok)
{
- struct print_arg *field;
- enum event_type type;
- char *token = NULL;
-
memset(arg, 0, sizeof(*arg));
arg->type = PRINT_HEX;
- field = alloc_arg();
- if (!field) {
- do_warning_event(event, "%s: not enough memory!", __func__);
- goto out_free;
- }
+ if (alloc_and_process_delim(event, ",", &arg->hex.field))
+ goto out;
- type = process_arg(event, field, &token);
+ if (alloc_and_process_delim(event, ")", &arg->hex.size))
+ goto free_field;
- if (test_type_token(type, token, EVENT_DELIM, ","))
- goto out_free;
-
- arg->hex.field = field;
+ return read_token_item(tok);
- free_token(token);
+free_field:
+ free_arg(arg->hex.field);
+out:
+ *tok = NULL;
+ return EVENT_ERROR;
+}
- field = alloc_arg();
- if (!field) {
- do_warning_event(event, "%s: not enough memory!", __func__);
- *tok = NULL;
- return EVENT_ERROR;
- }
+static enum event_type
+process_int_array(struct event_format *event, struct print_arg *arg, char **tok)
+{
+ memset(arg, 0, sizeof(*arg));
+ arg->type = PRINT_INT_ARRAY;
- type = process_arg(event, field, &token);
+ if (alloc_and_process_delim(event, ",", &arg->int_array.field))
+ goto out;
- if (test_type_token(type, token, EVENT_DELIM, ")"))
- goto out_free;
+ if (alloc_and_process_delim(event, ",", &arg->int_array.count))
+ goto free_field;
- arg->hex.size = field;
+ if (alloc_and_process_delim(event, ")", &arg->int_array.el_size))
+ goto free_size;
- free_token(token);
- type = read_token_item(tok);
- return type;
+ return read_token_item(tok);
- out_free:
- free_arg(field);
- free_token(token);
+free_size:
+ free_arg(arg->int_array.count);
+free_field:
+ free_arg(arg->int_array.field);
+out:
*tok = NULL;
return EVENT_ERROR;
}
@@ -2828,6 +2885,10 @@ process_function(struct event_format *event, struct print_arg *arg,
free_token(token);
return process_hex(event, arg, tok);
}
+ if (strcmp(token, "__print_array") == 0) {
+ free_token(token);
+ return process_int_array(event, arg, tok);
+ }
if (strcmp(token, "__get_str") == 0) {
free_token(token);
return process_str(event, arg, tok);
@@ -3356,6 +3417,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
break;
case PRINT_FLAGS:
case PRINT_SYMBOL:
+ case PRINT_INT_ARRAY:
case PRINT_HEX:
break;
case PRINT_TYPE:
@@ -3568,7 +3630,7 @@ static const struct flag flags[] = {
{ "HRTIMER_RESTART", 1 },
};
-static unsigned long long eval_flag(const char *flag)
+static long long eval_flag(const char *flag)
{
int i;
@@ -3584,7 +3646,7 @@ static unsigned long long eval_flag(const char *flag)
if (strcmp(flags[i].name, flag) == 0)
return flags[i].value;
- return 0;
+ return -1LL;
}
static void print_str_to_seq(struct trace_seq *s, const char *format,
@@ -3658,7 +3720,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
struct print_flag_sym *flag;
struct format_field *field;
struct printk_map *printk;
- unsigned long long val, fval;
+ long long val, fval;
unsigned long addr;
char *str;
unsigned char *hex;
@@ -3717,11 +3779,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
print = 0;
for (flag = arg->flags.flags; flag; flag = flag->next) {
fval = eval_flag(flag->value);
- if (!val && !fval) {
+ if (!val && fval < 0) {
print_str_to_seq(s, format, len_arg, flag->str);
break;
}
- if (fval && (val & fval) == fval) {
+ if (fval > 0 && (val & fval) == fval) {
if (print && arg->flags.delim)
trace_seq_puts(s, arg->flags.delim);
print_str_to_seq(s, format, len_arg, flag->str);
@@ -3766,6 +3828,54 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
}
break;
+ case PRINT_INT_ARRAY: {
+ void *num;
+ int el_size;
+
+ if (arg->int_array.field->type == PRINT_DYNAMIC_ARRAY) {
+ unsigned long offset;
+ struct format_field *field =
+ arg->int_array.field->dynarray.field;
+ offset = pevent_read_number(pevent,
+ data + field->offset,
+ field->size);
+ num = data + (offset & 0xffff);
+ } else {
+ field = arg->int_array.field->field.field;
+ if (!field) {
+ str = arg->int_array.field->field.name;
+ field = pevent_find_any_field(event, str);
+ if (!field)
+ goto out_warning_field;
+ arg->int_array.field->field.field = field;
+ }
+ num = data + field->offset;
+ }
+ len = eval_num_arg(data, size, event, arg->int_array.count);
+ el_size = eval_num_arg(data, size, event,
+ arg->int_array.el_size);
+ for (i = 0; i < len; i++) {
+ if (i)
+ trace_seq_putc(s, ' ');
+
+ if (el_size == 1) {
+ trace_seq_printf(s, "%u", *(uint8_t *)num);
+ } else if (el_size == 2) {
+ trace_seq_printf(s, "%u", *(uint16_t *)num);
+ } else if (el_size == 4) {
+ trace_seq_printf(s, "%u", *(uint32_t *)num);
+ } else if (el_size == 8) {
+ trace_seq_printf(s, "%"PRIu64, *(uint64_t *)num);
+ } else {
+ trace_seq_printf(s, "BAD SIZE:%d 0x%x",
+ el_size, *(uint8_t *)num);
+ el_size = 1;
+ }
+
+ num += el_size;
+ }
+ break;
+ }
case PRINT_TYPE:
break;
case PRINT_STRING: {
@@ -3976,7 +4086,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
if (asprintf(&arg->atom.atom, "%lld", ip) < 0)
goto out_free;
- /* skip the first "%pf: " */
+ /* skip the first "%ps: " */
for (ptr = fmt + 5, bptr = data + field->offset;
bptr < data + size && *ptr; ptr++) {
int ls = 0;
@@ -3997,6 +4107,10 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
goto process_again;
case '.':
goto process_again;
+ case 'z':
+ case 'Z':
+ ls = 1;
+ goto process_again;
case 'p':
ls = 1;
/* fall through */
@@ -4939,6 +5053,96 @@ const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid)
return comm;
}
+static struct cmdline *
+pid_from_cmdlist(struct pevent *pevent, const char *comm, struct cmdline *next)
+{
+ struct cmdline_list *cmdlist = (struct cmdline_list *)next;
+
+ if (cmdlist)
+ cmdlist = cmdlist->next;
+ else
+ cmdlist = pevent->cmdlist;
+
+ while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
+ cmdlist = cmdlist->next;
+
+ return (struct cmdline *)cmdlist;
+}
+
+/**
+ * pevent_data_pid_from_comm - return the pid from a given comm
+ * @pevent: a handle to the pevent
+ * @comm: the cmdline to find the pid from
+ * @next: the cmdline structure to find the next comm
+ *
+ * This returns the cmdline structure that holds a pid for a given
+ * comm, or NULL if none found. As there may be more than one pid for
+ * a given comm, the result of this call can be passed back into
+ * a recurring call in the @next paramater, and then it will find the
+ * next pid.
+ * Also, it does a linear seach, so it may be slow.
+ */
+struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *comm,
+ struct cmdline *next)
+{
+ struct cmdline *cmdline;
+
+ /*
+ * If the cmdlines have not been converted yet, then use
+ * the list.
+ */
+ if (!pevent->cmdlines)
+ return pid_from_cmdlist(pevent, comm, next);
+
+ if (next) {
+ /*
+ * The next pointer could have been still from
+ * a previous call before cmdlines were created
+ */
+ if (next < pevent->cmdlines ||
+ next >= pevent->cmdlines + pevent->cmdline_count)
+ next = NULL;
+ else
+ cmdline = next++;
+ }
+
+ if (!next)
+ cmdline = pevent->cmdlines;
+
+ while (cmdline < pevent->cmdlines + pevent->cmdline_count) {
+ if (strcmp(cmdline->comm, comm) == 0)
+ return cmdline;
+ cmdline++;
+ }
+ return NULL;
+}
+
+/**
+ * pevent_cmdline_pid - return the pid associated to a given cmdline
+ * @cmdline: The cmdline structure to get the pid from
+ *
+ * Returns the pid for a give cmdline. If @cmdline is NULL, then
+ * -1 is returned.
+ */
+int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline)
+{
+ struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
+
+ if (!cmdline)
+ return -1;
+
+ /*
+ * If cmdlines have not been created yet, or cmdline is
+ * not part of the array, then treat it as a cmdlist instead.
+ */
+ if (!pevent->cmdlines ||
+ cmdline < pevent->cmdlines ||
+ cmdline >= pevent->cmdlines + pevent->cmdline_count)
+ return cmdlist->pid;
+
+ return cmdline->pid;
+}
+
/**
* pevent_data_comm_from_pid - parse the data into the print format
* @s: the trace_seq to write to
@@ -5256,6 +5460,15 @@ static void print_args(struct print_arg *args)
print_args(args->hex.size);
printf(")");
break;
+ case PRINT_INT_ARRAY:
+ printf("__print_array(");
+ print_args(args->int_array.field);
+ printf(", ");
+ print_args(args->int_array.count);
+ printf(", ");
+ print_args(args->int_array.el_size);
+ printf(")");
+ break;
case PRINT_STRING:
case PRINT_BSTRING:
printf("__get_str(%s)", args->string.string);
@@ -6228,15 +6441,20 @@ void pevent_ref(struct pevent *pevent)
pevent->ref_count++;
}
+void pevent_free_format_field(struct format_field *field)
+{
+ free(field->type);
+ free(field->name);
+ free(field);
+}
+
static void free_format_fields(struct format_field *field)
{
struct format_field *next;
while (field) {
next = field->next;
- free(field->type);
- free(field->name);
- free(field);
+ pevent_free_format_field(field);
field = next;
}
}
@@ -6341,6 +6559,7 @@ void pevent_free(struct pevent *pevent)
free_handler(handle);
}
+ free(pevent->trace_clock);
free(pevent->events);
free(pevent->sort_events);
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 7a3873ff9a4f..86a5839fb048 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -22,6 +22,7 @@
#include <stdbool.h>
#include <stdarg.h>
+#include <stdio.h>
#include <regex.h>
#include <string.h>
@@ -91,6 +92,7 @@ extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
extern void trace_seq_terminate(struct trace_seq *s);
+extern int trace_seq_do_fprintf(struct trace_seq *s, FILE *fp);
extern int trace_seq_do_printf(struct trace_seq *s);
@@ -114,7 +116,7 @@ struct pevent_plugin_option {
char *name;
char *plugin_alias;
char *description;
- char *value;
+ const char *value;
void *priv;
int set;
};
@@ -152,6 +154,10 @@ struct pevent_plugin_option {
* .plugin_alias is used to give a shorter name to access
* the vairable. Useful if a plugin handles more than one event.
*
+ * If .value is not set, then it is considered a boolean and only
+ * .set will be processed. If .value is defined, then it is considered
+ * a string option and .set will be ignored.
+ *
* PEVENT_PLUGIN_ALIAS: (optional)
* The name to use for finding options (uses filename if not defined)
*/
@@ -245,6 +251,12 @@ struct print_arg_hex {
struct print_arg *size;
};
+struct print_arg_int_array {
+ struct print_arg *field;
+ struct print_arg *count;
+ struct print_arg *el_size;
+};
+
struct print_arg_dynarray {
struct format_field *field;
struct print_arg *index;
@@ -273,6 +285,7 @@ enum print_arg_type {
PRINT_FLAGS,
PRINT_SYMBOL,
PRINT_HEX,
+ PRINT_INT_ARRAY,
PRINT_TYPE,
PRINT_STRING,
PRINT_BSTRING,
@@ -292,6 +305,7 @@ struct print_arg {
struct print_arg_flags flags;
struct print_arg_symbol symbol;
struct print_arg_hex hex;
+ struct print_arg_int_array int_array;
struct print_arg_func func;
struct print_arg_string string;
struct print_arg_bitmask bitmask;
@@ -597,7 +611,7 @@ enum trace_flag_type {
};
int pevent_register_comm(struct pevent *pevent, const char *comm, int pid);
-void pevent_register_trace_clock(struct pevent *pevent, char *trace_clock);
+int pevent_register_trace_clock(struct pevent *pevent, const char *trace_clock);
int pevent_register_function(struct pevent *pevent, char *name,
unsigned long long addr, char *mod);
int pevent_register_print_string(struct pevent *pevent, const char *fmt,
@@ -617,6 +631,7 @@ enum pevent_errno pevent_parse_format(struct pevent *pevent,
const char *buf,
unsigned long size, const char *sys);
void pevent_free_format(struct event_format *event);
+void pevent_free_format_field(struct format_field *field);
void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
const char *name, struct pevent_record *record,
@@ -675,6 +690,11 @@ int pevent_data_type(struct pevent *pevent, struct pevent_record *rec);
struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type);
int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec);
const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid);
+struct cmdline;
+struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *comm,
+ struct cmdline *next);
+int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline);
+
void pevent_event_info(struct trace_seq *s, struct event_format *event,
struct pevent_record *record);
int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum,
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index 136162c03af1..a16756ae3526 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -18,6 +18,7 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#include <ctype.h>
#include <stdio.h>
#include <string.h>
#include <dlfcn.h>
@@ -49,6 +50,52 @@ struct plugin_list {
void *handle;
};
+static void lower_case(char *str)
+{
+ if (!str)
+ return;
+ for (; *str; str++)
+ *str = tolower(*str);
+}
+
+static int update_option_value(struct pevent_plugin_option *op, const char *val)
+{
+ char *op_val;
+
+ if (!val) {
+ /* toggle, only if option is boolean */
+ if (op->value)
+ /* Warn? */
+ return 0;
+ op->set ^= 1;
+ return 0;
+ }
+
+ /*
+ * If the option has a value then it takes a string
+ * otherwise the option is a boolean.
+ */
+ if (op->value) {
+ op->value = val;
+ return 0;
+ }
+
+ /* Option is boolean, must be either "1", "0", "true" or "false" */
+
+ op_val = strdup(val);
+ if (!op_val)
+ return -1;
+ lower_case(op_val);
+
+ if (strcmp(val, "1") == 0 || strcmp(val, "true") == 0)
+ op->set = 1;
+ else if (strcmp(val, "0") == 0 || strcmp(val, "false") == 0)
+ op->set = 0;
+ free(op_val);
+
+ return 0;
+}
+
/**
* traceevent_plugin_list_options - get list of plugin options
*
@@ -120,6 +167,7 @@ update_option(const char *file, struct pevent_plugin_option *option)
{
struct trace_plugin_options *op;
char *plugin;
+ int ret = 0;
if (option->plugin_alias) {
plugin = strdup(option->plugin_alias);
@@ -144,9 +192,10 @@ update_option(const char *file, struct pevent_plugin_option *option)
if (strcmp(op->option, option->name) != 0)
continue;
- option->value = op->value;
- option->set ^= 1;
- goto out;
+ ret = update_option_value(option, op->value);
+ if (ret)
+ goto out;
+ break;
}
/* first look for unnamed options */
@@ -156,14 +205,13 @@ update_option(const char *file, struct pevent_plugin_option *option)
if (strcmp(op->option, option->name) != 0)
continue;
- option->value = op->value;
- option->set ^= 1;
+ ret = update_option_value(option, op->value);
break;
}
out:
free(plugin);
- return 0;
+ return ret;
}
/**
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
index dcc665228c71..3bcada3ae05a 100644
--- a/tools/lib/traceevent/kbuffer-parse.c
+++ b/tools/lib/traceevent/kbuffer-parse.c
@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
switch (type_len) {
case KBUFFER_TYPE_PADDING:
*length = read_4(kbuf, data);
- data += *length;
break;
case KBUFFER_TYPE_TIME_EXTEND:
@@ -730,3 +729,14 @@ void kbuffer_set_old_format(struct kbuffer *kbuf)
kbuf->next_event = __old_next_event;
}
+
+/**
+ * kbuffer_start_of_data - return offset of where data starts on subbuffer
+ * @kbuf: The kbuffer
+ *
+ * Returns the location on the subbuffer where the data starts.
+ */
+int kbuffer_start_of_data(struct kbuffer *kbuf)
+{
+ return kbuf->start;
+}
diff --git a/tools/lib/traceevent/kbuffer.h b/tools/lib/traceevent/kbuffer.h
index c831f64b17a0..03dce757553f 100644
--- a/tools/lib/traceevent/kbuffer.h
+++ b/tools/lib/traceevent/kbuffer.h
@@ -63,5 +63,6 @@ int kbuffer_missed_events(struct kbuffer *kbuf);
int kbuffer_subbuffer_size(struct kbuffer *kbuf);
void kbuffer_set_old_format(struct kbuffer *kbuf);
+int kbuffer_start_of_data(struct kbuffer *kbuf);
#endif /* _K_BUFFER_H */
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index b50234402fc2..0144b3d1bb77 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1058,6 +1058,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
*parg = current_op;
else
*parg = current_exp;
+ free(token);
return PEVENT_ERRNO__UNBALANCED_PAREN;
}
break;
@@ -1168,6 +1169,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
*parg = current_op;
+ free(token);
return 0;
fail_alloc:
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
index ec3bd16a5488..292dc9f1d233 100644
--- a/tools/lib/traceevent/trace-seq.c
+++ b/tools/lib/traceevent/trace-seq.c
@@ -231,19 +231,24 @@ void trace_seq_terminate(struct trace_seq *s)
s->buffer[s->len] = 0;
}
-int trace_seq_do_printf(struct trace_seq *s)
+int trace_seq_do_fprintf(struct trace_seq *s, FILE *fp)
{
TRACE_SEQ_CHECK(s);
switch (s->state) {
case TRACE_SEQ__GOOD:
- return printf("%.*s", s->len, s->buffer);
+ return fprintf(fp, "%.*s", s->len, s->buffer);
case TRACE_SEQ__BUFFER_POISONED:
- puts("Usage of trace_seq after it was destroyed");
+ fprintf(fp, "%s\n", "Usage of trace_seq after it was destroyed");
break;
case TRACE_SEQ__MEM_ALLOC_FAILED:
- puts("Can't allocate trace_seq buffer memory");
+ fprintf(fp, "%s\n", "Can't allocate trace_seq buffer memory");
break;
}
return -1;
}
+
+int trace_seq_do_printf(struct trace_seq *s)
+{
+ return trace_seq_do_fprintf(s, stdout);
+}
diff --git a/tools/net/bpf_exp.l b/tools/net/bpf_exp.l
index 833a96611da6..7cc72a336645 100644
--- a/tools/net/bpf_exp.l
+++ b/tools/net/bpf_exp.l
@@ -90,8 +90,10 @@ extern void yyerror(const char *str);
"#"?("hatype") { return K_HATYPE; }
"#"?("rxhash") { return K_RXHASH; }
"#"?("cpu") { return K_CPU; }
-"#"?("vlan_tci") { return K_VLANT; }
-"#"?("vlan_pr") { return K_VLANP; }
+"#"?("vlan_tci") { return K_VLAN_TCI; }
+"#"?("vlan_pr") { return K_VLAN_AVAIL; }
+"#"?("vlan_avail") { return K_VLAN_AVAIL; }
+"#"?("vlan_tpid") { return K_VLAN_TPID; }
"#"?("rand") { return K_RAND; }
":" { return ':'; }
diff --git a/tools/net/bpf_exp.y b/tools/net/bpf_exp.y
index e6306c51c26f..e24eea1b0db5 100644
--- a/tools/net/bpf_exp.y
+++ b/tools/net/bpf_exp.y
@@ -56,7 +56,7 @@ static void bpf_set_jmp_label(char *label, enum jmp_type type);
%token OP_LDXI
%token K_PKT_LEN K_PROTO K_TYPE K_NLATTR K_NLATTR_NEST K_MARK K_QUEUE K_HATYPE
-%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF K_RAND
+%token K_RXHASH K_CPU K_IFIDX K_VLAN_TCI K_VLAN_AVAIL K_VLAN_TPID K_POFF K_RAND
%token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%'
@@ -155,10 +155,10 @@ ldb
| OP_LDB K_CPU {
bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_CPU); }
- | OP_LDB K_VLANT {
+ | OP_LDB K_VLAN_TCI {
bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_VLAN_TAG); }
- | OP_LDB K_VLANP {
+ | OP_LDB K_VLAN_AVAIL {
bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); }
| OP_LDB K_POFF {
@@ -167,6 +167,9 @@ ldb
| OP_LDB K_RAND {
bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_RANDOM); }
+ | OP_LDB K_VLAN_TPID {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_VLAN_TPID); }
;
ldh
@@ -206,10 +209,10 @@ ldh
| OP_LDH K_CPU {
bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_CPU); }
- | OP_LDH K_VLANT {
+ | OP_LDH K_VLAN_TCI {
bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_VLAN_TAG); }
- | OP_LDH K_VLANP {
+ | OP_LDH K_VLAN_AVAIL {
bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); }
| OP_LDH K_POFF {
@@ -218,6 +221,9 @@ ldh
| OP_LDH K_RAND {
bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_RANDOM); }
+ | OP_LDH K_VLAN_TPID {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_VLAN_TPID); }
;
ldi
@@ -262,10 +268,10 @@ ld
| OP_LD K_CPU {
bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_CPU); }
- | OP_LD K_VLANT {
+ | OP_LD K_VLAN_TCI {
bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_VLAN_TAG); }
- | OP_LD K_VLANP {
+ | OP_LD K_VLAN_AVAIL {
bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); }
| OP_LD K_POFF {
@@ -274,6 +280,9 @@ ld
| OP_LD K_RAND {
bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
SKF_AD_OFF + SKF_AD_RANDOM); }
+ | OP_LD K_VLAN_TPID {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_VLAN_TPID); }
| OP_LD 'M' '[' number ']' {
bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); }
| OP_LD '[' 'x' '+' number ']' {
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 40399c3d97d6..812f904193e8 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -1,6 +1,7 @@
PERF-CFLAGS
PERF-GUI-VARS
PERF-VERSION-FILE
+FEATURE-DUMP
perf
perf-read-vdso32
perf-read-vdsox32
diff --git a/tools/perf/Build b/tools/perf/Build
new file mode 100644
index 000000000000..b77370ef7005
--- /dev/null
+++ b/tools/perf/Build
@@ -0,0 +1,44 @@
+perf-y += builtin-bench.o
+perf-y += builtin-annotate.o
+perf-y += builtin-diff.o
+perf-y += builtin-evlist.o
+perf-y += builtin-help.o
+perf-y += builtin-sched.o
+perf-y += builtin-buildid-list.o
+perf-y += builtin-buildid-cache.o
+perf-y += builtin-list.o
+perf-y += builtin-record.o
+perf-y += builtin-report.o
+perf-y += builtin-stat.o
+perf-y += builtin-timechart.o
+perf-y += builtin-top.o
+perf-y += builtin-script.o
+perf-y += builtin-kmem.o
+perf-y += builtin-lock.o
+perf-y += builtin-kvm.o
+perf-y += builtin-inject.o
+perf-y += builtin-mem.o
+perf-y += builtin-data.o
+
+perf-$(CONFIG_AUDIT) += builtin-trace.o
+perf-$(CONFIG_LIBELF) += builtin-probe.o
+
+perf-y += bench/
+perf-y += tests/
+
+perf-y += perf.o
+
+paths += -DPERF_HTML_PATH="BUILD_STR($(htmldir_SQ))"
+paths += -DPERF_INFO_PATH="BUILD_STR($(infodir_SQ))"
+paths += -DPERF_MAN_PATH="BUILD_STR($(mandir_SQ))"
+
+CFLAGS_builtin-help.o += $(paths)
+CFLAGS_builtin-timechart.o += $(paths)
+CFLAGS_perf.o += -DPERF_HTML_PATH="BUILD_STR($(htmldir_SQ))" -include $(OUTPUT)PERF-VERSION-FILE
+
+libperf-y += util/
+libperf-y += arch/
+libperf-y += ui/
+libperf-y += scripts/
+
+gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/Build.txt b/tools/perf/Documentation/Build.txt
new file mode 100644
index 000000000000..f6fc6507ba55
--- /dev/null
+++ b/tools/perf/Documentation/Build.txt
@@ -0,0 +1,49 @@
+
+1) perf build
+=============
+The perf build process consists of several separated building blocks,
+which are linked together to form the perf binary:
+ - libperf library (static)
+ - perf builtin commands
+ - traceevent library (static)
+ - GTK ui library
+
+Several makefiles govern the perf build:
+
+ - Makefile
+ top level Makefile working as a wrapper that calls the main
+ Makefile.perf with a -j option to do parallel builds.
+
+ - Makefile.perf
+ main makefile that triggers build of all perf objects including
+ installation and documentation processing.
+
+ - tools/build/Makefile.build
+ main makefile of the build framework
+
+ - tools/build/Build.include
+ build framework generic definitions
+
+ - Build makefiles
+ makefiles that defines build objects
+
+Please refer to tools/build/Documentation/Build.txt for more
+information about build framework.
+
+
+2) perf build
+=============
+The Makefile.perf triggers the build framework for build objects:
+ perf, libperf, gtk
+
+resulting in following objects:
+ $ ls *-in.o
+ gtk-in.o libperf-in.o perf-in.o
+
+Those objects are then used in final linking:
+ libperf-gtk.so <- gtk-in.o libperf-in.o
+ perf <- perf-in.o libperf-in.o
+
+
+NOTE this description is omitting other libraries involved, only
+ focusing on build framework outcomes
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index 0294c57b1f5e..dd07b55f58d8 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -12,9 +12,9 @@ SYNOPSIS
DESCRIPTION
-----------
-This command manages the build-id cache. It can add and remove files to/from
-the cache. In the future it should as well purge older entries, set upper
-limits for the space used by the cache, etc.
+This command manages the build-id cache. It can add, remove, update and purge
+files to/from the cache. In the future it should as well set upper limits for
+the space used by the cache, etc.
OPTIONS
-------
@@ -36,14 +36,24 @@ OPTIONS
actually made.
-r::
--remove=::
- Remove specified file from the cache.
+ Remove a cached binary which has same build-id of specified file
+ from the cache.
+-p::
+--purge=::
+ Purge all cached binaries including older caches which have specified
+ path from the cache.
-M::
--missing=::
List missing build ids in the cache for the specified file.
-u::
---update::
- Update specified file of the cache. It can be used to update kallsyms
- kernel dso to vmlinux in order to support annotation.
+--update=::
+ Update specified file of the cache. Note that this doesn't remove
+ older entires since those may be still needed for annotating old
+ (or remote) perf.data. Only if there is already a cache which has
+ exactly same build-id, that is replaced by new one. It can be used
+ to update kallsyms and kernel dso to vmlinux in order to support
+ annotation.
+
-v::
--verbose::
Be more verbose.
diff --git a/tools/perf/Documentation/perf-data.txt b/tools/perf/Documentation/perf-data.txt
new file mode 100644
index 000000000000..be8fa1a0a97e
--- /dev/null
+++ b/tools/perf/Documentation/perf-data.txt
@@ -0,0 +1,40 @@
+perf-data(1)
+==============
+
+NAME
+----
+perf-data - Data file related processing
+
+SYNOPSIS
+--------
+[verse]
+'perf data' [<common options>] <command> [<options>]",
+
+DESCRIPTION
+-----------
+Data file related processing.
+
+COMMANDS
+--------
+convert::
+ Converts perf data file into another format (only CTF [1] format is support by now).
+ It's possible to set data-convert debug variable to get debug messages from conversion,
+ like:
+ perf --debug data-convert data convert ...
+
+OPTIONS for 'convert'
+---------------------
+--to-ctf::
+ Triggers the CTF conversion, specify the path of CTF data directory.
+
+-i::
+ Specify input perf data file path.
+
+-v::
+--verbose::
+ Be more verbose (show counter open errors, etc).
+
+SEE ALSO
+--------
+linkperf:perf[1]
+[1] Common Trace Format - http://www.efficios.com/ctf
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index e463caa3eb49..d1deb573877f 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -20,12 +20,20 @@ If no parameters are passed it will assume perf.data.old and perf.data.
The differential profile is displayed only for events matching both
specified perf.data files.
+If no parameters are passed the samples will be sorted by dso and symbol.
+As the perf.data files could come from different binaries, the symbols addresses
+could vary. So perf diff is based on the comparison of the files and
+symbols name.
+
OPTIONS
-------
-D::
--dump-raw-trace::
Dump raw trace in ASCII.
+--kallsyms=<file>::
+ kallsyms pathname
+
-m::
--modules::
Load module symbols. WARNING: use only with -k and LIVE kernel
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index 7c8fbbf3f61c..23219c65c16f 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -3,7 +3,7 @@ perf-kmem(1)
NAME
----
-perf-kmem - Tool to trace/measure kernel memory(slab) properties
+perf-kmem - Tool to trace/measure kernel memory properties
SYNOPSIS
--------
@@ -25,6 +25,10 @@ OPTIONS
--input=<file>::
Select the input file (default: perf.data unless stdin is a fifo)
+-v::
+--verbose::
+ Be more verbose. (show symbol address, etc)
+
--caller::
Show per-callsite statistics
@@ -42,6 +46,12 @@ OPTIONS
--raw-ip::
Print raw ip instead of symbol
+--slab::
+ Analyze SLAB allocator events.
+
+--page::
+ Analyze page allocator events
+
SEE ALSO
--------
linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 3e2aec94f806..bada8933fdd4 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -26,6 +26,7 @@ counted. The following modifiers exist:
u - user-space counting
k - kernel counting
h - hypervisor counting
+ I - non idle counting
G - guest counting (in KVM guests)
H - host counting (not in KVM guests)
p - precise level
@@ -127,6 +128,12 @@ To limit the list use:
One or more types can be used at the same time, listing the events for the
types specified.
+Support raw format:
+
+. '--raw-dump', shows the raw-dump of all the events.
+. '--raw-dump [hw|sw|cache|tracepoint|pmu|event_glob]', shows the raw-dump of
+ a certain kind of events.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-top[1],
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index aaa869be3dc1..239609c09f83 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -47,6 +47,12 @@ OPTIONS
-v::
--verbose::
Be more verbose (show parsed arguments, etc).
+ Can not use with -q.
+
+-q::
+--quiet::
+ Be quiet (do not show any messages including errors).
+ Can not use with -v.
-a::
--add=::
@@ -96,7 +102,7 @@ OPTIONS
Dry run. With this option, --add and --del doesn't execute actual
adding and removal operations.
---max-probes::
+--max-probes=NUM::
Set the maximum number of probe points for an event. Default is 128.
-x::
@@ -104,8 +110,13 @@ OPTIONS
Specify path to the executable or shared library file for user
space tracing. Can also be used with --funcs option.
+--demangle::
+ Demangle application symbols. --no-demangle is also available
+ for disabling demangling.
+
--demangle-kernel::
- Demangle kernel symbols.
+ Demangle kernel symbols. --no-demangle-kernel is also available
+ for disabling kernel demangling.
In absence of -m/-x options, perf probe checks if the first argument after
the options is an absolute path name. If its an absolute path, perf probe
@@ -137,6 +148,7 @@ Each probe argument follows below syntax.
[NAME=]LOCALVAR|$retval|%REG|@SYMBOL[:TYPE]
'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
+'$vars' special argument is also available for NAME, it is expanded to the local variables which can access at given probe point.
'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 31e977459c51..4847a793de65 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -55,6 +55,11 @@ OPTIONS
If you want to profile write accesses in [0x1000~1008), just set
'mem:0x1000/8:w'.
+ - a group of events surrounded by a pair of brace ("{event1,event2,...}").
+ Each event is separated by commas and the group should be quoted to
+ prevent the shell interpretation. You also need to use --group on
+ "perf report" to view group events together.
+
--filter=<filter>::
Event filter.
@@ -62,9 +67,6 @@ OPTIONS
--all-cpus::
System-wide collection from all CPUs.
--l::
- Scale counter values.
-
-p::
--pid=::
Record events on existing process ID (comma separated list).
@@ -107,6 +109,10 @@ OPTIONS
specification with appended unit character - B/K/M/G. The
size is rounded up to have nearest pages power of two value.
+--group::
+ Put all events in a single event group. This precedes the --event
+ option and remains only for backward compatibility. See --event.
+
-g::
Enables call-graph (stack chain/backtrace) recording.
@@ -115,13 +121,19 @@ OPTIONS
implies -g.
Allows specifying "fp" (frame pointer) or "dwarf"
- (DWARF's CFI - Call Frame Information) as the method to collect
+ (DWARF's CFI - Call Frame Information) or "lbr"
+ (Hardware Last Branch Record facility) as the method to collect
the information used to show the call graphs.
In some systems, where binaries are build with gcc
--fomit-frame-pointer, using the "fp" method will produce bogus
call graphs, using "dwarf", if available (perf tools linked to
the libunwind library) should be used instead.
+ Using the "lbr" method doesn't require any compiler options. It
+ will produce call graphs from the hardware LBR registers. The
+ main limition is that it is only available on new Intel
+ platforms, such as Haswell. It can only get user call chain. It
+ doesn't work with branch stack sampling at the same time.
-q::
--quiet::
@@ -235,6 +247,16 @@ Capture machine state (registers) at interrupt, i.e., on counter overflows for
each sample. List of captured registers depends on the architecture. This option
is off by default.
+--running-time::
+Record running and enabled time for read events (:S)
+
+-k::
+--clockid::
+Sets the clock id to use for the various time fields in the perf_event_type
+records. See clock_gettime(). In particular CLOCK_MONOTONIC and
+CLOCK_MONOTONIC_RAW are supported, some events might also allow
+CLOCK_BOOTTIME, CLOCK_REALTIME and CLOCK_TAI.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index dd7cccdde498..4879cf638824 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -40,6 +40,11 @@ OPTIONS
Only consider symbols in these comms. CSV that understands
file://filename entries. This option will affect the percentage of
the overhead column. See --percentage for more info.
+--pid=::
+ Only show events for given process ID (comma separated list).
+
+--tid=::
+ Only show events for given thread ID (comma separated list).
-d::
--dsos=::
Only consider symbols in these dsos. CSV that understands
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index a21eec05bc42..79445750fcb3 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -193,6 +193,12 @@ OPTIONS
Only display events for these comms. CSV that understands
file://filename entries.
+--pid=::
+ Only show events for given process ID (comma separated list).
+
+--tid=::
+ Only show events for given thread ID (comma separated list).
+
-I::
--show-info::
Display extended information about the perf.data file. This adds
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 7e1b1f2bb83c..ba03fd5d1a54 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -55,6 +55,9 @@ OPTIONS
--uid=::
Record events in threads owned by uid. Name or number.
+--filter-pids=::
+ Filter out events for these pids and for 'trace' itself (comma separated list).
+
-v::
--verbose=::
Verbosity level.
@@ -115,6 +118,9 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
--syscalls::
Trace system calls. This options is enabled by default.
+--event::
+ Trace other events, see 'perf list' for a complete list.
+
PAGEFAULTS
----------
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 1e8e400b4493..2b131776363e 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -13,11 +13,16 @@ SYNOPSIS
OPTIONS
-------
--debug::
- Setup debug variable (just verbose for now) in value
+ Setup debug variable (see list below) in value
range (0, 10). Use like:
--debug verbose # sets verbose = 1
--debug verbose=2 # sets verbose = 2
+ List of debug variables allowed to set:
+ verbose - general debug messages
+ ordered-events - ordered events object debug messages
+ data-convert - data convert command debug messages
+
--buildid-dir::
Setup buildid cache directory. It has higher priority than
buildid.dir config file option.
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index fbbfdc39271d..11ccbb22ea2b 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -1,5 +1,6 @@
tools/perf
tools/scripts
+tools/build
tools/lib/traceevent
tools/lib/api
tools/lib/symbol/kallsyms.c
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index cb2e5868c8e8..d31a7bbd7cee 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -24,8 +24,8 @@ unexport MAKEFLAGS
# (To override it, run 'make JOBS=1' and similar.)
#
ifeq ($(JOBS),)
- JOBS := $(shell grep -c ^processor /proc/cpuinfo 2>/dev/null)
- ifeq ($(JOBS),)
+ JOBS := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+ ifeq ($(JOBS),0)
JOBS := 1
endif
endif
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index aa6a50447c32..c43a20517591 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -68,7 +68,11 @@ include config/utilities.mak
# for reading the x32 mode 32-bit compatibility VDSO in 64-bit mode
#
# Define NO_ZLIB if you do not want to support compressed kernel modules
-
+#
+# Define LIBBABELTRACE if you DO want libbabeltrace support
+# for CTF data format.
+#
+# Define NO_LZMA if you do not want to support compressed (xz) kernel modules
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(shell pwd)))
@@ -82,13 +86,29 @@ endif
ifneq ($(OUTPUT),)
#$(info Determined 'OUTPUT' to be $(OUTPUT))
+# Adding $(OUTPUT) as a directory to look for source files,
+# because use generated output files as sources dependency
+# for flex/bison parsers.
+VPATH += $(OUTPUT)
+export VPATH
endif
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
+endif
+
+# Do not use make's built-in rules
+# (this improves performance and avoids hard-to-debug behaviour);
+MAKEFLAGS += -r
+
$(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
- @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
- @touch $(OUTPUT)PERF-VERSION-FILE
+ $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
+ $(Q)touch $(OUTPUT)PERF-VERSION-FILE
CC = $(CROSS_COMPILE)gcc
+LD = $(CROSS_COMPILE)ld
AR = $(CROSS_COMPILE)ar
PKG_CONFIG = $(CROSS_COMPILE)pkg-config
@@ -127,10 +147,6 @@ export prefix bindir sharedir sysconfdir DESTDIR
SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
# Guard against environment variables
-BUILTIN_OBJS =
-LIB_H =
-LIB_OBJS =
-GTK_OBJS =
PYRF_OBJS =
SCRIPT_SH =
@@ -155,8 +171,8 @@ endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
export LIBTRACEEVENT
-LIBAPIKFS = $(LIB_PATH)libapikfs.a
-export LIBAPIKFS
+LIBAPI = $(LIB_PATH)libapi.a
+export LIBAPI
# python extension build directories
PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
@@ -167,7 +183,7 @@ export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
-PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPIKFS)
+PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPI)
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
$(QUIET_GEN)CFLAGS='$(CFLAGS)' $(PYTHON_WORD) util/setup.py \
@@ -206,297 +222,9 @@ endif
export PERL_PATH
-$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
- $(QUIET_FLEX)$(FLEX) -o $@ --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) util/parse-events.l
-
-$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
- $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_
-
-$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
- $(QUIET_FLEX)$(FLEX) -o $@ --header-file=$(OUTPUT)util/pmu-flex.h util/pmu.l
-
-$(OUTPUT)util/pmu-bison.c: util/pmu.y
- $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
-
-$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
-$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
-
LIB_FILE=$(OUTPUT)libperf.a
-LIB_H += ../lib/symbol/kallsyms.h
-LIB_H += ../../include/uapi/linux/perf_event.h
-LIB_H += ../../include/linux/rbtree.h
-LIB_H += ../../include/linux/list.h
-LIB_H += ../../include/uapi/linux/const.h
-LIB_H += ../include/linux/hash.h
-LIB_H += ../../include/linux/stringify.h
-LIB_H += util/include/linux/bitmap.h
-LIB_H += ../include/linux/bitops.h
-LIB_H += ../include/asm-generic/bitops/arch_hweight.h
-LIB_H += ../include/asm-generic/bitops/atomic.h
-LIB_H += ../include/asm-generic/bitops/const_hweight.h
-LIB_H += ../include/asm-generic/bitops/find.h
-LIB_H += ../include/asm-generic/bitops/fls64.h
-LIB_H += ../include/asm-generic/bitops/fls.h
-LIB_H += ../include/asm-generic/bitops/__ffs.h
-LIB_H += ../include/asm-generic/bitops/__fls.h
-LIB_H += ../include/asm-generic/bitops/hweight.h
-LIB_H += ../include/asm-generic/bitops.h
-LIB_H += ../include/linux/compiler.h
-LIB_H += ../include/linux/log2.h
-LIB_H += util/include/linux/const.h
-LIB_H += util/include/linux/ctype.h
-LIB_H += util/include/linux/kernel.h
-LIB_H += util/include/linux/list.h
-LIB_H += ../include/linux/export.h
-LIB_H += util/include/linux/poison.h
-LIB_H += util/include/linux/rbtree.h
-LIB_H += util/include/linux/rbtree_augmented.h
-LIB_H += util/include/linux/string.h
-LIB_H += ../include/linux/types.h
-LIB_H += util/include/linux/linkage.h
-LIB_H += util/include/asm/asm-offsets.h
-LIB_H += ../include/asm/bug.h
-LIB_H += util/include/asm/byteorder.h
-LIB_H += util/include/asm/swab.h
-LIB_H += util/include/asm/system.h
-LIB_H += util/include/asm/uaccess.h
-LIB_H += util/include/dwarf-regs.h
-LIB_H += util/include/asm/dwarf2.h
-LIB_H += util/include/asm/cpufeature.h
-LIB_H += util/include/asm/unistd_32.h
-LIB_H += util/include/asm/unistd_64.h
-LIB_H += perf.h
-LIB_H += util/annotate.h
-LIB_H += util/cache.h
-LIB_H += util/callchain.h
-LIB_H += util/build-id.h
-LIB_H += util/db-export.h
-LIB_H += util/debug.h
-LIB_H += util/pmu.h
-LIB_H += util/event.h
-LIB_H += util/evsel.h
-LIB_H += util/evlist.h
-LIB_H += util/exec_cmd.h
-LIB_H += util/find-vdso-map.c
-LIB_H += util/levenshtein.h
-LIB_H += util/machine.h
-LIB_H += util/map.h
-LIB_H += util/parse-options.h
-LIB_H += util/parse-events.h
-LIB_H += util/quote.h
-LIB_H += util/util.h
-LIB_H += util/xyarray.h
-LIB_H += util/header.h
-LIB_H += util/help.h
-LIB_H += util/session.h
-LIB_H += util/ordered-events.h
-LIB_H += util/strbuf.h
-LIB_H += util/strlist.h
-LIB_H += util/strfilter.h
-LIB_H += util/svghelper.h
-LIB_H += util/tool.h
-LIB_H += util/run-command.h
-LIB_H += util/sigchain.h
-LIB_H += util/dso.h
-LIB_H += util/symbol.h
-LIB_H += util/color.h
-LIB_H += util/values.h
-LIB_H += util/sort.h
-LIB_H += util/hist.h
-LIB_H += util/comm.h
-LIB_H += util/thread.h
-LIB_H += util/thread_map.h
-LIB_H += util/trace-event.h
-LIB_H += util/probe-finder.h
-LIB_H += util/dwarf-aux.h
-LIB_H += util/probe-event.h
-LIB_H += util/pstack.h
-LIB_H += util/cpumap.h
-LIB_H += util/top.h
-LIB_H += $(ARCH_INCLUDE)
-LIB_H += util/cgroup.h
-LIB_H += $(LIB_INCLUDE)traceevent/event-parse.h
-LIB_H += util/target.h
-LIB_H += util/rblist.h
-LIB_H += util/intlist.h
-LIB_H += util/perf_regs.h
-LIB_H += util/unwind.h
-LIB_H += util/vdso.h
-LIB_H += util/tsc.h
-LIB_H += ui/helpline.h
-LIB_H += ui/progress.h
-LIB_H += ui/util.h
-LIB_H += ui/ui.h
-LIB_H += util/data.h
-LIB_H += util/kvm-stat.h
-LIB_H += util/thread-stack.h
-
-LIB_OBJS += $(OUTPUT)util/abspath.o
-LIB_OBJS += $(OUTPUT)util/alias.o
-LIB_OBJS += $(OUTPUT)util/annotate.o
-LIB_OBJS += $(OUTPUT)util/build-id.o
-LIB_OBJS += $(OUTPUT)util/config.o
-LIB_OBJS += $(OUTPUT)util/ctype.o
-LIB_OBJS += $(OUTPUT)util/db-export.o
-LIB_OBJS += $(OUTPUT)util/pmu.o
-LIB_OBJS += $(OUTPUT)util/environment.o
-LIB_OBJS += $(OUTPUT)util/event.o
-LIB_OBJS += $(OUTPUT)util/evlist.o
-LIB_OBJS += $(OUTPUT)util/evsel.o
-LIB_OBJS += $(OUTPUT)util/exec_cmd.o
-LIB_OBJS += $(OUTPUT)util/find_next_bit.o
-LIB_OBJS += $(OUTPUT)util/help.o
-LIB_OBJS += $(OUTPUT)util/kallsyms.o
-LIB_OBJS += $(OUTPUT)util/levenshtein.o
-LIB_OBJS += $(OUTPUT)util/parse-options.o
-LIB_OBJS += $(OUTPUT)util/parse-events.o
-LIB_OBJS += $(OUTPUT)util/path.o
-LIB_OBJS += $(OUTPUT)util/rbtree.o
-LIB_OBJS += $(OUTPUT)util/bitmap.o
-LIB_OBJS += $(OUTPUT)util/hweight.o
-LIB_OBJS += $(OUTPUT)util/run-command.o
-LIB_OBJS += $(OUTPUT)util/quote.o
-LIB_OBJS += $(OUTPUT)util/strbuf.o
-LIB_OBJS += $(OUTPUT)util/string.o
-LIB_OBJS += $(OUTPUT)util/strlist.o
-LIB_OBJS += $(OUTPUT)util/strfilter.o
-LIB_OBJS += $(OUTPUT)util/top.o
-LIB_OBJS += $(OUTPUT)util/usage.o
-LIB_OBJS += $(OUTPUT)util/wrapper.o
-LIB_OBJS += $(OUTPUT)util/sigchain.o
-LIB_OBJS += $(OUTPUT)util/dso.o
-LIB_OBJS += $(OUTPUT)util/symbol.o
-LIB_OBJS += $(OUTPUT)util/symbol-elf.o
-LIB_OBJS += $(OUTPUT)util/color.o
-LIB_OBJS += $(OUTPUT)util/pager.o
-LIB_OBJS += $(OUTPUT)util/header.o
-LIB_OBJS += $(OUTPUT)util/callchain.o
-LIB_OBJS += $(OUTPUT)util/values.o
-LIB_OBJS += $(OUTPUT)util/debug.o
-LIB_OBJS += $(OUTPUT)util/machine.o
-LIB_OBJS += $(OUTPUT)util/map.o
-LIB_OBJS += $(OUTPUT)util/pstack.o
-LIB_OBJS += $(OUTPUT)util/session.o
-LIB_OBJS += $(OUTPUT)util/ordered-events.o
-LIB_OBJS += $(OUTPUT)util/comm.o
-LIB_OBJS += $(OUTPUT)util/thread.o
-LIB_OBJS += $(OUTPUT)util/thread_map.o
-LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
-LIB_OBJS += $(OUTPUT)util/parse-events-flex.o
-LIB_OBJS += $(OUTPUT)util/parse-events-bison.o
-LIB_OBJS += $(OUTPUT)util/pmu-flex.o
-LIB_OBJS += $(OUTPUT)util/pmu-bison.o
-LIB_OBJS += $(OUTPUT)util/trace-event-read.o
-LIB_OBJS += $(OUTPUT)util/trace-event-info.o
-LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
-LIB_OBJS += $(OUTPUT)util/trace-event.o
-LIB_OBJS += $(OUTPUT)util/svghelper.o
-LIB_OBJS += $(OUTPUT)util/sort.o
-LIB_OBJS += $(OUTPUT)util/hist.o
-LIB_OBJS += $(OUTPUT)util/probe-event.o
-LIB_OBJS += $(OUTPUT)util/util.o
-LIB_OBJS += $(OUTPUT)util/xyarray.o
-LIB_OBJS += $(OUTPUT)util/cpumap.o
-LIB_OBJS += $(OUTPUT)util/cgroup.o
-LIB_OBJS += $(OUTPUT)util/target.o
-LIB_OBJS += $(OUTPUT)util/rblist.o
-LIB_OBJS += $(OUTPUT)util/intlist.o
-LIB_OBJS += $(OUTPUT)util/vdso.o
-LIB_OBJS += $(OUTPUT)util/stat.o
-LIB_OBJS += $(OUTPUT)util/record.o
-LIB_OBJS += $(OUTPUT)util/srcline.o
-LIB_OBJS += $(OUTPUT)util/data.o
-LIB_OBJS += $(OUTPUT)util/tsc.o
-LIB_OBJS += $(OUTPUT)util/cloexec.o
-LIB_OBJS += $(OUTPUT)util/thread-stack.o
-
-LIB_OBJS += $(OUTPUT)ui/setup.o
-LIB_OBJS += $(OUTPUT)ui/helpline.o
-LIB_OBJS += $(OUTPUT)ui/progress.o
-LIB_OBJS += $(OUTPUT)ui/util.o
-LIB_OBJS += $(OUTPUT)ui/hist.o
-LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
-
-LIB_OBJS += $(OUTPUT)arch/common.o
-
-LIB_OBJS += $(OUTPUT)tests/parse-events.o
-LIB_OBJS += $(OUTPUT)tests/dso-data.o
-LIB_OBJS += $(OUTPUT)tests/attr.o
-LIB_OBJS += $(OUTPUT)tests/vmlinux-kallsyms.o
-LIB_OBJS += $(OUTPUT)tests/open-syscall.o
-LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o
-LIB_OBJS += $(OUTPUT)tests/open-syscall-tp-fields.o
-LIB_OBJS += $(OUTPUT)tests/mmap-basic.o
-LIB_OBJS += $(OUTPUT)tests/perf-record.o
-LIB_OBJS += $(OUTPUT)tests/rdpmc.o
-LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o
-LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
-LIB_OBJS += $(OUTPUT)tests/fdarray.o
-LIB_OBJS += $(OUTPUT)tests/pmu.o
-LIB_OBJS += $(OUTPUT)tests/hists_common.o
-LIB_OBJS += $(OUTPUT)tests/hists_link.o
-LIB_OBJS += $(OUTPUT)tests/hists_filter.o
-LIB_OBJS += $(OUTPUT)tests/hists_output.o
-LIB_OBJS += $(OUTPUT)tests/hists_cumulate.o
-LIB_OBJS += $(OUTPUT)tests/python-use.o
-LIB_OBJS += $(OUTPUT)tests/bp_signal.o
-LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o
-LIB_OBJS += $(OUTPUT)tests/task-exit.o
-LIB_OBJS += $(OUTPUT)tests/sw-clock.o
-ifeq ($(ARCH),x86)
-LIB_OBJS += $(OUTPUT)tests/perf-time-to-tsc.o
-endif
-LIB_OBJS += $(OUTPUT)tests/code-reading.o
-LIB_OBJS += $(OUTPUT)tests/sample-parsing.o
-LIB_OBJS += $(OUTPUT)tests/parse-no-sample-id-all.o
-ifndef NO_DWARF_UNWIND
-ifeq ($(ARCH),$(filter $(ARCH),x86 arm))
-LIB_OBJS += $(OUTPUT)tests/dwarf-unwind.o
-endif
-endif
-LIB_OBJS += $(OUTPUT)tests/mmap-thread-lookup.o
-LIB_OBJS += $(OUTPUT)tests/thread-mg-share.o
-LIB_OBJS += $(OUTPUT)tests/switch-tracking.o
-
-BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
-BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
-# Benchmark modules
-BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
-BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
-ifeq ($(ARCH), x86)
-ifeq ($(IS_64_BIT), 1)
-BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
-BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
-endif
-endif
-BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
-BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
-BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
-BUILTIN_OBJS += $(OUTPUT)bench/futex-requeue.o
-
-BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
-BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o
-BUILTIN_OBJS += $(OUTPUT)builtin-help.o
-BUILTIN_OBJS += $(OUTPUT)builtin-sched.o
-BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o
-BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o
-BUILTIN_OBJS += $(OUTPUT)builtin-list.o
-BUILTIN_OBJS += $(OUTPUT)builtin-record.o
-BUILTIN_OBJS += $(OUTPUT)builtin-report.o
-BUILTIN_OBJS += $(OUTPUT)builtin-stat.o
-BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o
-BUILTIN_OBJS += $(OUTPUT)builtin-top.o
-BUILTIN_OBJS += $(OUTPUT)builtin-script.o
-BUILTIN_OBJS += $(OUTPUT)builtin-probe.o
-BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o
-BUILTIN_OBJS += $(OUTPUT)builtin-lock.o
-BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
-BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
-BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o
-BUILTIN_OBJS += $(OUTPUT)builtin-mem.o
-
-PERFLIBS = $(LIB_FILE) $(LIBAPIKFS) $(LIBTRACEEVENT)
+PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT)
# We choose to avoid "if .. else if .. else .. endif endif"
# because maintaining the nesting to match is a pain. If
@@ -508,67 +236,9 @@ ifneq ($(OUTPUT),)
CFLAGS += -I$(OUTPUT)
endif
-ifdef NO_LIBELF
-# Remove ELF/DWARF dependent codes
-LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS))
-LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS))
-LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS))
-LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS))
-
-BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS))
-
-# Use minimal symbol handling
-LIB_OBJS += $(OUTPUT)util/symbol-minimal.o
-
-else # NO_LIBELF
-ifndef NO_DWARF
- LIB_OBJS += $(OUTPUT)util/probe-finder.o
- LIB_OBJS += $(OUTPUT)util/dwarf-aux.o
-endif # NO_DWARF
-endif # NO_LIBELF
-
-ifndef NO_LIBDW_DWARF_UNWIND
- LIB_OBJS += $(OUTPUT)util/unwind-libdw.o
- LIB_H += util/unwind-libdw.h
-endif
-
-ifndef NO_LIBUNWIND
- LIB_OBJS += $(OUTPUT)util/unwind-libunwind.o
-endif
-LIB_OBJS += $(OUTPUT)tests/keep-tracking.o
-
-ifndef NO_LIBAUDIT
- BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
-endif
-
-ifndef NO_SLANG
- LIB_OBJS += $(OUTPUT)ui/browser.o
- LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
- LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
- LIB_OBJS += $(OUTPUT)ui/browsers/map.o
- LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o
- LIB_OBJS += $(OUTPUT)ui/browsers/header.o
- LIB_OBJS += $(OUTPUT)ui/tui/setup.o
- LIB_OBJS += $(OUTPUT)ui/tui/util.o
- LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
- LIB_OBJS += $(OUTPUT)ui/tui/progress.o
- LIB_H += ui/tui/tui.h
- LIB_H += ui/browser.h
- LIB_H += ui/browsers/map.h
- LIB_H += ui/keysyms.h
- LIB_H += ui/libslang.h
-endif
-
ifndef NO_GTK2
ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so
-
- GTK_OBJS += $(OUTPUT)ui/gtk/browser.o
- GTK_OBJS += $(OUTPUT)ui/gtk/hists.o
- GTK_OBJS += $(OUTPUT)ui/gtk/setup.o
- GTK_OBJS += $(OUTPUT)ui/gtk/util.o
- GTK_OBJS += $(OUTPUT)ui/gtk/helpline.o
- GTK_OBJS += $(OUTPUT)ui/gtk/progress.o
- GTK_OBJS += $(OUTPUT)ui/gtk/annotate.o
+ GTK_IN := $(OUTPUT)gtk-in.o
install-gtk: $(OUTPUT)libperf-gtk.so
$(call QUIET_INSTALL, 'GTK UI') \
@@ -576,31 +246,6 @@ install-gtk: $(OUTPUT)libperf-gtk.so
$(INSTALL) $(OUTPUT)libperf-gtk.so '$(DESTDIR_SQ)$(libdir_SQ)'
endif
-ifndef NO_LIBPERL
- LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
- LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
-endif
-
-ifndef NO_LIBPYTHON
- LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
- LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
-endif
-
-ifeq ($(NO_PERF_REGS),0)
- ifeq ($(ARCH),x86)
- LIB_H += arch/x86/include/perf_regs.h
- endif
- LIB_OBJS += $(OUTPUT)util/perf_regs.o
-endif
-
-ifndef NO_LIBNUMA
- BUILTIN_OBJS += $(OUTPUT)bench/numa.o
-endif
-
-ifndef NO_ZLIB
- LIB_OBJS += $(OUTPUT)util/zlib.o
-endif
-
ifdef ASCIIDOC8
export ASCIIDOC8
endif
@@ -616,39 +261,29 @@ SHELL = $(SHELL_PATH)
all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
please_set_SHELL_PATH_to_a_more_modern_shell:
- @$$(:)
+ $(Q)$$(:)
shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
strip: $(PROGRAMS) $(OUTPUT)perf
$(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf
-$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \
- '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
- $(CFLAGS) -c $(filter %.c,$^) -o $@
+PERF_IN := $(OUTPUT)perf-in.o
-$(OUTPUT)perf: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OUTPUT)perf.o \
- $(BUILTIN_OBJS) $(LIBS) -o $@
+export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX
+build := -f $(srctree)/tools/build/Makefile.build dir=. obj
-$(GTK_OBJS): $(OUTPUT)%.o: %.c $(LIB_H)
- $(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $<
+$(PERF_IN): $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h FORCE
+ $(Q)$(MAKE) $(build)=perf
-$(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS)
- $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
+$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(PERF_IN) $(LIBS) -o $@
-$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
- '-DPERF_MAN_PATH="$(mandir_SQ)"' \
- '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
+$(GTK_IN): FORCE
+ $(Q)$(MAKE) $(build)=gtk
-$(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
- '-DPERF_MAN_PATH="$(mandir_SQ)"' \
- '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
+$(OUTPUT)libperf-gtk.so: $(GTK_IN) $(PERFLIBS)
+ $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
$(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt
@@ -659,8 +294,7 @@ $(SCRIPTS) : % : %.sh
$(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
# These can record PERF_VERSION
-$(OUTPUT)perf.o perf.spec \
- $(SCRIPTS) \
+perf.spec $(SCRIPTS) \
: $(OUTPUT)PERF-VERSION-FILE
.SUFFIXES:
@@ -683,90 +317,33 @@ endif
# These two need to be here so that when O= is not used they take precedence
# over the general rule for .o
-$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -w $<
-
-$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $<
-
-$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
-$(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $<
-$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -S $(CFLAGS) $<
-$(OUTPUT)%.o: %.S
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
-$(OUTPUT)%.s: %.S
- $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $<
-
-$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \
- '-DPREFIX="$(prefix_SQ)"' \
- $<
-
-$(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \
- $<
-
-$(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
- -DPYTHONPATH='"$(OUTPUT)python"' \
- -DPYTHON='"$(PYTHON_WORD)"' \
- $<
-
-$(OUTPUT)tests/dwarf-unwind.o: tests/dwarf-unwind.c
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -fno-optimize-sibling-calls $<
+# get relative building directory (to $(OUTPUT))
+# and '.' if it's $(OUTPUT) itself
+__build-dir = $(subst $(OUTPUT),,$(dir $@))
+build-dir = $(if $(__build-dir),$(__build-dir),.)
-$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+single_dep: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
-$(OUTPUT)ui/setup.o: ui/setup.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DLIBDIR='"$(libdir_SQ)"' $<
+$(OUTPUT)%.o: %.c single_dep FORCE
+ $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
-$(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+$(OUTPUT)%.i: %.c single_dep FORCE
+ $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
-$(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+$(OUTPUT)%.s: %.c single_dep FORCE
+ $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
-$(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+$(OUTPUT)%-bison.o: %.c single_dep FORCE
+ $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
-$(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+$(OUTPUT)%-flex.o: %.c single_dep FORCE
+ $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
-$(OUTPUT)ui/browsers/scripts.o: ui/browsers/scripts.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
+$(OUTPUT)%.o: %.S single_dep FORCE
+ $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
-$(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
-
-$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-
-$(OUTPUT)util/hweight.o: ../../lib/hweight.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-
-$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-
-$(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-redundant-decls $<
-
-$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default $<
-
-$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default $<
-
-$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
-
-$(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+$(OUTPUT)%.i: %.S single_dep FORCE
+ $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
$(OUTPUT)perf-%: %.o $(PERFLIBS)
$(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS)
@@ -781,58 +358,34 @@ $(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-vdso-map.c
$(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
endif
-$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
-$(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
+$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
-# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
-# we depend the various files onto their directories.
-DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(GTK_OBJS)
-DIRECTORY_DEPS += $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
-# no need to add flex objects, because they depend on bison ones
-DIRECTORY_DEPS += $(OUTPUT)util/parse-events-bison.c
-DIRECTORY_DEPS += $(OUTPUT)util/pmu-bison.c
+LIBPERF_IN := $(OUTPUT)libperf-in.o
-OUTPUT_DIRECTORIES := $(sort $(dir $(DIRECTORY_DEPS)))
+$(LIBPERF_IN): FORCE
+ $(Q)$(MAKE) $(build)=libperf
-$(DIRECTORY_DEPS): | $(OUTPUT_DIRECTORIES)
-# In the second step, we make a rule to actually create these directories
-$(OUTPUT_DIRECTORIES):
- $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
+$(LIB_FILE): $(LIBPERF_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
-$(LIB_FILE): $(LIB_OBJS)
- $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
-
-# libtraceevent.a
-TE_SOURCES = $(wildcard $(TRACE_EVENT_DIR)*.[ch])
-
-LIBTRACEEVENT_FLAGS = $(QUIET_SUBDIR1) O=$(OUTPUT)
-LIBTRACEEVENT_FLAGS += CFLAGS="-g -Wall $(EXTRA_CFLAGS)"
LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
-$(LIBTRACEEVENT): $(TE_SOURCES) $(OUTPUT)PERF-CFLAGS
- $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) libtraceevent.a plugins
+$(LIBTRACEEVENT): FORCE
+ $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a plugins
$(LIBTRACEEVENT)-clean:
$(call QUIET_CLEAN, libtraceevent)
- @$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
+ $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
install-traceevent-plugins: $(LIBTRACEEVENT)
- $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) install_plugins
+ $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
-LIBAPIKFS_SOURCES = $(wildcard $(LIB_PATH)fs/*.[ch] $(LIB_PATH)fd/*.[ch])
-
-# if subdir is set, we've been called from above so target has been built
-# already
-$(LIBAPIKFS): $(LIBAPIKFS_SOURCES)
-ifeq ($(subdir),)
- $(QUIET_SUBDIR0)$(LIB_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libapikfs.a
-endif
+$(LIBAPI): FORCE
+ $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
-$(LIBAPIKFS)-clean:
-ifeq ($(subdir),)
- $(call QUIET_CLEAN, libapikfs)
- @$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
-endif
+$(LIBAPI)-clean:
+ $(call QUIET_CLEAN, libapi)
+ $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
help:
@echo 'Perf make targets:'
@@ -888,17 +441,6 @@ cscope:
$(QUIET_GEN)$(RM) cscope*; \
$(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs cscope -b $(TAG_FILES)
-### Detect prefix changes
-TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):\
- $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ):$(plugindir_SQ)
-
-$(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
- @FLAGS='$(TRACK_CFLAGS)'; \
- if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \
- echo 1>&2 " FLAGS: * new build flags or prefix"; \
- echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \
- fi
-
### Testing rules
# GNU make supports exporting all variables by "export" without parameters.
@@ -981,12 +523,14 @@ $(INSTALL_DOC_TARGETS):
#
config-clean:
$(call QUIET_CLEAN, config)
- @$(MAKE) -C config/feature-checks clean >/dev/null
+ $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
-clean: $(LIBTRACEEVENT)-clean $(LIBAPIKFS)-clean config-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS)
+clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean config-clean
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
+ $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+ $(Q)$(RM) .config-detected
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32
- $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-FEATURES $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
+ $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
$(python-clean)
@@ -1000,7 +544,9 @@ else
GIT-HEAD-PHONY =
endif
+FORCE:
+
.PHONY: all install clean config-clean strip install-gtk
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
-.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope .FORCE-PERF-CFLAGS
+.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE single_dep
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
new file mode 100644
index 000000000000..109eb75cf7de
--- /dev/null
+++ b/tools/perf/arch/Build
@@ -0,0 +1,2 @@
+libperf-y += common.o
+libperf-y += $(ARCH)/
diff --git a/tools/perf/arch/arm/Build b/tools/perf/arch/arm/Build
new file mode 100644
index 000000000000..41bf61da476a
--- /dev/null
+++ b/tools/perf/arch/arm/Build
@@ -0,0 +1,2 @@
+libperf-y += util/
+libperf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm/Makefile b/tools/perf/arch/arm/Makefile
index 09d62153d384..7fbca175099e 100644
--- a/tools/perf/arch/arm/Makefile
+++ b/tools/perf/arch/arm/Makefile
@@ -1,14 +1,3 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
-endif
-ifndef NO_LIBUNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o
-endif
-ifndef NO_LIBDW_DWARF_UNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libdw.o
-endif
-ifndef NO_DWARF_UNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/regs_load.o
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/dwarf-unwind.o
endif
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build
new file mode 100644
index 000000000000..b30eff9bcc83
--- /dev/null
+++ b/tools/perf/arch/arm/tests/Build
@@ -0,0 +1,2 @@
+libperf-y += regs_load.o
+libperf-y += dwarf-unwind.o
diff --git a/tools/perf/arch/arm/util/Build b/tools/perf/arch/arm/util/Build
new file mode 100644
index 000000000000..d22e3d07de3d
--- /dev/null
+++ b/tools/perf/arch/arm/util/Build
@@ -0,0 +1,4 @@
+libperf-$(CONFIG_DWARF) += dwarf-regs.o
+
+libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/arm64/Build b/tools/perf/arch/arm64/Build
new file mode 100644
index 000000000000..54afe4a467e7
--- /dev/null
+++ b/tools/perf/arch/arm64/Build
@@ -0,0 +1 @@
+libperf-y += util/
diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile
index 67e9b3d38e89..7fbca175099e 100644
--- a/tools/perf/arch/arm64/Makefile
+++ b/tools/perf/arch/arm64/Makefile
@@ -1,7 +1,3 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
-endif
-ifndef NO_LIBUNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o
endif
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
new file mode 100644
index 000000000000..e58123a8912b
--- /dev/null
+++ b/tools/perf/arch/arm64/util/Build
@@ -0,0 +1,2 @@
+libperf-$(CONFIG_DWARF) += dwarf-regs.o
+libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
diff --git a/tools/perf/arch/powerpc/Build b/tools/perf/arch/powerpc/Build
new file mode 100644
index 000000000000..54afe4a467e7
--- /dev/null
+++ b/tools/perf/arch/powerpc/Build
@@ -0,0 +1 @@
+libperf-y += util/
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index 6f7782bea5dd..7fbca175099e 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -1,6 +1,3 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/skip-callchain-idx.o
endif
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
new file mode 100644
index 000000000000..0af6e9b3f728
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/Build
@@ -0,0 +1,4 @@
+libperf-y += header.o
+
+libperf-$(CONFIG_DWARF) += dwarf-regs.o
+libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
diff --git a/tools/perf/arch/s390/Build b/tools/perf/arch/s390/Build
new file mode 100644
index 000000000000..54afe4a467e7
--- /dev/null
+++ b/tools/perf/arch/s390/Build
@@ -0,0 +1 @@
+libperf-y += util/
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 798ac7379c5f..21322e0385b8 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -1,7 +1,4 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
HAVE_KVM_STAT_SUPPORT := 1
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/kvm-stat.o
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
new file mode 100644
index 000000000000..8a61372bb47a
--- /dev/null
+++ b/tools/perf/arch/s390/util/Build
@@ -0,0 +1,4 @@
+libperf-y += header.o
+libperf-y += kvm-stat.o
+
+libperf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/sh/Build b/tools/perf/arch/sh/Build
new file mode 100644
index 000000000000..54afe4a467e7
--- /dev/null
+++ b/tools/perf/arch/sh/Build
@@ -0,0 +1 @@
+libperf-y += util/
diff --git a/tools/perf/arch/sh/Makefile b/tools/perf/arch/sh/Makefile
index 15130b50dfe3..7fbca175099e 100644
--- a/tools/perf/arch/sh/Makefile
+++ b/tools/perf/arch/sh/Makefile
@@ -1,4 +1,3 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif
diff --git a/tools/perf/arch/sh/util/Build b/tools/perf/arch/sh/util/Build
new file mode 100644
index 000000000000..954e287bbb89
--- /dev/null
+++ b/tools/perf/arch/sh/util/Build
@@ -0,0 +1 @@
+libperf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/sparc/Build b/tools/perf/arch/sparc/Build
new file mode 100644
index 000000000000..54afe4a467e7
--- /dev/null
+++ b/tools/perf/arch/sparc/Build
@@ -0,0 +1 @@
+libperf-y += util/
diff --git a/tools/perf/arch/sparc/Makefile b/tools/perf/arch/sparc/Makefile
index 15130b50dfe3..7fbca175099e 100644
--- a/tools/perf/arch/sparc/Makefile
+++ b/tools/perf/arch/sparc/Makefile
@@ -1,4 +1,3 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif
diff --git a/tools/perf/arch/sparc/util/Build b/tools/perf/arch/sparc/util/Build
new file mode 100644
index 000000000000..954e287bbb89
--- /dev/null
+++ b/tools/perf/arch/sparc/util/Build
@@ -0,0 +1 @@
+libperf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build
new file mode 100644
index 000000000000..41bf61da476a
--- /dev/null
+++ b/tools/perf/arch/x86/Build
@@ -0,0 +1,2 @@
+libperf-y += util/
+libperf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 9b21881db52f..21322e0385b8 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -1,19 +1,4 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif
-ifndef NO_LIBUNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o
-endif
-ifndef NO_LIBDW_DWARF_UNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libdw.o
-endif
-ifndef NO_DWARF_UNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/regs_load.o
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/dwarf-unwind.o
-endif
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/tsc.o
-LIB_H += arch/$(ARCH)/util/tsc.h
HAVE_KVM_STAT_SUPPORT := 1
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/kvm-stat.o
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
new file mode 100644
index 000000000000..b30eff9bcc83
--- /dev/null
+++ b/tools/perf/arch/x86/tests/Build
@@ -0,0 +1,2 @@
+libperf-y += regs_load.o
+libperf-y += dwarf-unwind.o
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
new file mode 100644
index 000000000000..cfbccc4e3187
--- /dev/null
+++ b/tools/perf/arch/x86/util/Build
@@ -0,0 +1,8 @@
+libperf-y += header.o
+libperf-y += tsc.o
+libperf-y += kvm-stat.o
+
+libperf-$(CONFIG_DWARF) += dwarf-regs.o
+
+libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
new file mode 100644
index 000000000000..5ce98023d518
--- /dev/null
+++ b/tools/perf/bench/Build
@@ -0,0 +1,11 @@
+perf-y += sched-messaging.o
+perf-y += sched-pipe.o
+perf-y += mem-memcpy.o
+perf-y += futex-hash.o
+perf-y += futex-wake.o
+perf-y += futex-requeue.o
+
+perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
+perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
+
+perf-$(CONFIG_NUMA) += numa.o
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index bedff6b5b3cf..ad0d9b5342fb 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -132,6 +132,9 @@ int bench_futex_requeue(int argc, const char **argv,
if (!fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
+ if (nrequeue > nthreads)
+ nrequeue = nthreads;
+
printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
"%d at a time.\n\n", getpid(), nthreads,
fshared ? "shared":"private", &futex1, &futex2, nrequeue);
@@ -161,20 +164,18 @@ int bench_futex_requeue(int argc, const char **argv,
/* Ok, all threads are patiently blocked, start requeueing */
gettimeofday(&start, NULL);
- for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue) {
+ while (nrequeued < nthreads) {
/*
* Do not wakeup any tasks blocked on futex1, allowing
* us to really measure futex_wait functionality.
*/
- futex_cmp_requeue(&futex1, 0, &futex2, 0,
- nrequeue, futex_flag);
+ nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0,
+ nrequeue, futex_flag);
}
+
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
- if (nrequeued > nthreads)
- nrequeued = nthreads;
-
update_stats(&requeued_stats, nrequeued);
update_stats(&requeuetime_stats, runtime.tv_usec);
@@ -184,7 +185,7 @@ int bench_futex_requeue(int argc, const char **argv,
}
/* everybody should be blocked on futex2, wake'em up */
- nrequeued = futex_wake(&futex2, nthreads, futex_flag);
+ nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
if (nthreads != nrequeued)
warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index ebfa163b80b5..ba5efa4710b5 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -180,7 +180,7 @@ static const struct option options[] = {
OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"),
OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
- OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "bzero the initial allocations"),
+ OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "quiet mode"),
OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
/* Special option string parsing callbacks: */
@@ -828,6 +828,9 @@ static int count_process_nodes(int process_nr)
td = g->threads + task_nr;
node = numa_node_of_cpu(td->curr_cpu);
+ if (node < 0) /* curr_cpu was likely still -1 */
+ return 0;
+
node_present[node] = 1;
}
@@ -882,6 +885,11 @@ static void calc_convergence_compression(int *strong)
for (p = 0; p < g->p.nr_proc; p++) {
unsigned int nodes = count_process_nodes(p);
+ if (!nodes) {
+ *strong = 0;
+ return;
+ }
+
nodes_min = min(nodes, nodes_min);
nodes_max = max(nodes, nodes_max);
}
@@ -1395,7 +1403,7 @@ static void print_res(const char *name, double val,
if (!name)
name = "main,";
- if (g->p.show_quiet)
+ if (!g->p.show_quiet)
printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
else
printf(" %14.3f %s\n", val, txt_long);
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 747f86103599..71bf7451c0ca 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -208,7 +208,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
goto out;
}
- ret = perf_session__process_events(session, &ann->tool);
+ ret = perf_session__process_events(session);
if (ret)
goto out;
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 50e6b66aea1f..d47a0cdc71c9 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -125,8 +125,7 @@ static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
return ret;
}
-static int build_id_cache__add_kcore(const char *filename, const char *debugdir,
- bool force)
+static int build_id_cache__add_kcore(const char *filename, bool force)
{
char dir[32], sbuildid[BUILD_ID_SIZE * 2 + 1];
char from_dir[PATH_MAX], to_dir[PATH_MAX];
@@ -143,7 +142,7 @@ static int build_id_cache__add_kcore(const char *filename, const char *debugdir,
return -1;
scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s",
- debugdir, sbuildid);
+ buildid_dir, sbuildid);
if (!force &&
!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
@@ -155,7 +154,7 @@ static int build_id_cache__add_kcore(const char *filename, const char *debugdir,
return -1;
scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s",
- debugdir, sbuildid, dir);
+ buildid_dir, sbuildid, dir);
if (mkdir_p(to_dir, 0755))
return -1;
@@ -183,7 +182,7 @@ static int build_id_cache__add_kcore(const char *filename, const char *debugdir,
return 0;
}
-static int build_id_cache__add_file(const char *filename, const char *debugdir)
+static int build_id_cache__add_file(const char *filename)
{
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
u8 build_id[BUILD_ID_SIZE];
@@ -195,16 +194,14 @@ static int build_id_cache__add_file(const char *filename, const char *debugdir)
}
build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
- err = build_id_cache__add_s(sbuild_id, debugdir, filename,
+ err = build_id_cache__add_s(sbuild_id, filename,
false, false);
- if (verbose)
- pr_info("Adding %s %s: %s\n", sbuild_id, filename,
- err ? "FAIL" : "Ok");
+ pr_debug("Adding %s %s: %s\n", sbuild_id, filename,
+ err ? "FAIL" : "Ok");
return err;
}
-static int build_id_cache__remove_file(const char *filename,
- const char *debugdir)
+static int build_id_cache__remove_file(const char *filename)
{
u8 build_id[BUILD_ID_SIZE];
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
@@ -217,10 +214,34 @@ static int build_id_cache__remove_file(const char *filename,
}
build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
- err = build_id_cache__remove_s(sbuild_id, debugdir);
- if (verbose)
- pr_info("Removing %s %s: %s\n", sbuild_id, filename,
- err ? "FAIL" : "Ok");
+ err = build_id_cache__remove_s(sbuild_id);
+ pr_debug("Removing %s %s: %s\n", sbuild_id, filename,
+ err ? "FAIL" : "Ok");
+
+ return err;
+}
+
+static int build_id_cache__purge_path(const char *pathname)
+{
+ struct strlist *list;
+ struct str_node *pos;
+ int err;
+
+ err = build_id_cache__list_build_ids(pathname, &list);
+ if (err)
+ goto out;
+
+ strlist__for_each(pos, list) {
+ err = build_id_cache__remove_s(pos->s);
+ pr_debug("Removing %s %s: %s\n", pos->s, pathname,
+ err ? "FAIL" : "Ok");
+ if (err)
+ break;
+ }
+ strlist__delete(list);
+
+out:
+ pr_debug("Purging %s: %s\n", pathname, err ? "FAIL" : "Ok");
return err;
}
@@ -252,13 +273,12 @@ static int build_id_cache__fprintf_missing(struct perf_session *session, FILE *f
return 0;
}
-static int build_id_cache__update_file(const char *filename,
- const char *debugdir)
+static int build_id_cache__update_file(const char *filename)
{
u8 build_id[BUILD_ID_SIZE];
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
- int err;
+ int err = 0;
if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
@@ -266,14 +286,14 @@ static int build_id_cache__update_file(const char *filename,
}
build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
- err = build_id_cache__remove_s(sbuild_id, debugdir);
- if (!err) {
- err = build_id_cache__add_s(sbuild_id, debugdir, filename,
- false, false);
- }
- if (verbose)
- pr_info("Updating %s %s: %s\n", sbuild_id, filename,
- err ? "FAIL" : "Ok");
+ if (build_id_cache__cached(sbuild_id))
+ err = build_id_cache__remove_s(sbuild_id);
+
+ if (!err)
+ err = build_id_cache__add_s(sbuild_id, filename, false, false);
+
+ pr_debug("Updating %s %s: %s\n", sbuild_id, filename,
+ err ? "FAIL" : "Ok");
return err;
}
@@ -287,6 +307,7 @@ int cmd_buildid_cache(int argc, const char **argv,
bool force = false;
char const *add_name_list_str = NULL,
*remove_name_list_str = NULL,
+ *purge_name_list_str = NULL,
*missing_filename = NULL,
*update_name_list_str = NULL,
*kcore_filename = NULL;
@@ -304,6 +325,8 @@ int cmd_buildid_cache(int argc, const char **argv,
"file", "kcore file to add"),
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
"file(s) to remove"),
+ OPT_STRING('p', "purge", &purge_name_list_str, "path list",
+ "path(s) to remove (remove old caches too)"),
OPT_STRING('M', "missing", &missing_filename, "file",
"to find missing build ids in the cache"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
@@ -320,6 +343,11 @@ int cmd_buildid_cache(int argc, const char **argv,
argc = parse_options(argc, argv, buildid_cache_options,
buildid_cache_usage, 0);
+ if (argc || (!add_name_list_str && !kcore_filename &&
+ !remove_name_list_str && !purge_name_list_str &&
+ !missing_filename && !update_name_list_str))
+ usage_with_options(buildid_cache_usage, buildid_cache_options);
+
if (missing_filename) {
file.path = missing_filename;
file.force = force;
@@ -338,7 +366,7 @@ int cmd_buildid_cache(int argc, const char **argv,
list = strlist__new(true, add_name_list_str);
if (list) {
strlist__for_each(pos, list)
- if (build_id_cache__add_file(pos->s, buildid_dir)) {
+ if (build_id_cache__add_file(pos->s)) {
if (errno == EEXIST) {
pr_debug("%s already in the cache\n",
pos->s);
@@ -356,7 +384,25 @@ int cmd_buildid_cache(int argc, const char **argv,
list = strlist__new(true, remove_name_list_str);
if (list) {
strlist__for_each(pos, list)
- if (build_id_cache__remove_file(pos->s, buildid_dir)) {
+ if (build_id_cache__remove_file(pos->s)) {
+ if (errno == ENOENT) {
+ pr_debug("%s wasn't in the cache\n",
+ pos->s);
+ continue;
+ }
+ pr_warning("Couldn't remove %s: %s\n",
+ pos->s, strerror_r(errno, sbuf, sizeof(sbuf)));
+ }
+
+ strlist__delete(list);
+ }
+ }
+
+ if (purge_name_list_str) {
+ list = strlist__new(true, purge_name_list_str);
+ if (list) {
+ strlist__for_each(pos, list)
+ if (build_id_cache__purge_path(pos->s)) {
if (errno == ENOENT) {
pr_debug("%s wasn't in the cache\n",
pos->s);
@@ -377,7 +423,7 @@ int cmd_buildid_cache(int argc, const char **argv,
list = strlist__new(true, update_name_list_str);
if (list) {
strlist__for_each(pos, list)
- if (build_id_cache__update_file(pos->s, buildid_dir)) {
+ if (build_id_cache__update_file(pos->s)) {
if (errno == ENOENT) {
pr_debug("%s wasn't in the cache\n",
pos->s);
@@ -391,8 +437,7 @@ int cmd_buildid_cache(int argc, const char **argv,
}
}
- if (kcore_filename &&
- build_id_cache__add_kcore(kcore_filename, buildid_dir, force))
+ if (kcore_filename && build_id_cache__add_kcore(kcore_filename, force))
pr_warning("Couldn't add %s\n", kcore_filename);
out:
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index ed3873b3e238..feb420f74c2d 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -74,7 +74,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
* the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
*/
if (with_hits || perf_data_file__is_pipe(&file))
- perf_session__process_events(session, &build_id__mark_dso_hit_ops);
+ perf_session__process_events(session);
perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits);
perf_session__delete(session);
diff --git a/tools/perf/builtin-data.c b/tools/perf/builtin-data.c
new file mode 100644
index 000000000000..d6525bc54d13
--- /dev/null
+++ b/tools/perf/builtin-data.c
@@ -0,0 +1,123 @@
+#include <linux/compiler.h>
+#include "builtin.h"
+#include "perf.h"
+#include "debug.h"
+#include "parse-options.h"
+#include "data-convert-bt.h"
+
+typedef int (*data_cmd_fn_t)(int argc, const char **argv, const char *prefix);
+
+struct data_cmd {
+ const char *name;
+ const char *summary;
+ data_cmd_fn_t fn;
+};
+
+static struct data_cmd data_cmds[];
+
+#define for_each_cmd(cmd) \
+ for (cmd = data_cmds; cmd && cmd->name; cmd++)
+
+static const struct option data_options[] = {
+ OPT_END()
+};
+
+static const char * const data_subcommands[] = { "convert", NULL };
+
+static const char *data_usage[] = {
+ "perf data [<common options>] <command> [<options>]",
+ NULL
+};
+
+static void print_usage(void)
+{
+ struct data_cmd *cmd;
+
+ printf("Usage:\n");
+ printf("\t%s\n\n", data_usage[0]);
+ printf("\tAvailable commands:\n");
+
+ for_each_cmd(cmd) {
+ printf("\t %s\t- %s\n", cmd->name, cmd->summary);
+ }
+
+ printf("\n");
+}
+
+static const char * const data_convert_usage[] = {
+ "perf data convert [<options>]",
+ NULL
+};
+
+static int cmd_data_convert(int argc, const char **argv,
+ const char *prefix __maybe_unused)
+{
+ const char *to_ctf = NULL;
+ bool force = false;
+ const struct option options[] = {
+ OPT_INCR('v', "verbose", &verbose, "be more verbose"),
+ OPT_STRING('i', "input", &input_name, "file", "input file name"),
+#ifdef HAVE_LIBBABELTRACE_SUPPORT
+ OPT_STRING(0, "to-ctf", &to_ctf, NULL, "Convert to CTF format"),
+#endif
+ OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_END()
+ };
+
+#ifndef HAVE_LIBBABELTRACE_SUPPORT
+ pr_err("No conversion support compiled in.\n");
+ return -1;
+#endif
+
+ argc = parse_options(argc, argv, options,
+ data_convert_usage, 0);
+ if (argc) {
+ usage_with_options(data_convert_usage, options);
+ return -1;
+ }
+
+ if (to_ctf) {
+#ifdef HAVE_LIBBABELTRACE_SUPPORT
+ return bt_convert__perf2ctf(input_name, to_ctf, force);
+#else
+ pr_err("The libbabeltrace support is not compiled in.\n");
+ return -1;
+#endif
+ }
+
+ return 0;
+}
+
+static struct data_cmd data_cmds[] = {
+ { "convert", "converts data file between formats", cmd_data_convert },
+ { .name = NULL, },
+};
+
+int cmd_data(int argc, const char **argv, const char *prefix)
+{
+ struct data_cmd *cmd;
+ const char *cmdstr;
+
+ /* No command specified. */
+ if (argc < 2)
+ goto usage;
+
+ argc = parse_options_subcommand(argc, argv, data_options, data_subcommands, data_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (argc < 1)
+ goto usage;
+
+ cmdstr = argv[0];
+
+ for_each_cmd(cmd) {
+ if (strcmp(cmd->name, cmdstr))
+ continue;
+
+ return cmd->fn(argc, argv, prefix);
+ }
+
+ pr_err("Unknown command: %s\n", cmdstr);
+usage:
+ print_usage();
+ return -1;
+}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 74aada554b12..df6307b4050a 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -747,7 +747,7 @@ static int __cmd_diff(void)
goto out_delete;
}
- ret = perf_session__process_events(d->session, &tool);
+ ret = perf_session__process_events(d->session);
if (ret) {
pr_err("Failed to process %s\n", d->file.path);
goto out_delete;
@@ -791,6 +791,8 @@ static const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
+ "file", "kallsyms pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
@@ -802,7 +804,7 @@ static const struct option options[] = {
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
" Please refer the man page for the complete list."),
- OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
+ OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 0f93f859b782..695ec5a50cf2 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -24,6 +24,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
struct perf_data_file file = {
.path = file_name,
.mode = PERF_DATA_MODE_READ,
+ .force = details->force,
};
session = perf_session__new(&file, 0, NULL);
@@ -47,6 +48,7 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
"Show all event attr details"),
OPT_BOOLEAN('g', "group", &details.event_group,
"Show event group information"),
+ OPT_BOOLEAN('f', "force", &details.force, "don't complain, do it"),
OPT_END()
};
const char * const evlist_usage[] = {
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 25d20628212e..36486eade1ef 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -437,7 +437,18 @@ int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
HELP_FORMAT_INFO),
OPT_END(),
};
- const char * const builtin_help_usage[] = {
+ const char * const builtin_help_subcommands[] = {
+ "buildid-cache", "buildid-list", "diff", "evlist", "help", "list",
+ "record", "report", "bench", "stat", "timechart", "top", "annotate",
+ "script", "sched", "kmem", "lock", "kvm", "test", "inject", "mem", "data",
+#ifdef HAVE_LIBELF_SUPPORT
+ "probe",
+#endif
+#ifdef HAVE_LIBAUDIT_SUPPORT
+ "trace",
+#endif
+ NULL };
+ const char *builtin_help_usage[] = {
"perf help [--all] [--man|--web|--info] [command]",
NULL
};
@@ -448,8 +459,8 @@ int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
perf_config(perf_help_config, &help_format);
- argc = parse_options(argc, argv, builtin_help_options,
- builtin_help_usage, 0);
+ argc = parse_options_subcommand(argc, argv, builtin_help_options,
+ builtin_help_subcommands, builtin_help_usage, 0);
if (show_all) {
printf("\n usage: %s\n\n", perf_usage_string);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index a13641e066f5..40a33d7334cc 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -53,6 +53,13 @@ static int perf_event__repipe_synth(struct perf_tool *tool,
return 0;
}
+static int perf_event__repipe_oe_synth(struct perf_tool *tool,
+ union perf_event *event,
+ struct ordered_events *oe __maybe_unused)
+{
+ return perf_event__repipe_synth(tool, event);
+}
+
static int perf_event__repipe_op2_synth(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session
@@ -359,8 +366,6 @@ static int __cmd_inject(struct perf_inject *inject)
} else if (inject->sched_stat) {
struct perf_evsel *evsel;
- inject->tool.ordered_events = true;
-
evlist__for_each(session->evlist, evsel) {
const char *name = perf_evsel__name(evsel);
@@ -379,7 +384,7 @@ static int __cmd_inject(struct perf_inject *inject)
if (!file_out->is_pipe)
lseek(fd, session->header.data_offset, SEEK_SET);
- ret = perf_session__process_events(session, &inject->tool);
+ ret = perf_session__process_events(session);
if (!file_out->is_pipe) {
if (inject->build_ids)
@@ -408,7 +413,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
.unthrottle = perf_event__repipe,
.attr = perf_event__repipe_attr,
.tracing_data = perf_event__repipe_op2_synth,
- .finished_round = perf_event__repipe_op2_synth,
+ .finished_round = perf_event__repipe_oe_synth,
.build_id = perf_event__repipe_op2_synth,
.id_index = perf_event__repipe_op2_synth,
},
@@ -438,6 +443,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
"be more verbose (show build ids, etc)"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
"kallsyms pathname"),
+ OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
OPT_END()
};
const char * const inject_usage[] = {
@@ -458,6 +464,8 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
return -1;
}
+ inject.tool.ordered_events = inject.sched_stat;
+
file.path = inject.input_name;
inject.session = perf_session__new(&file, true, &inject.tool);
if (inject.session == NULL)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index f295141025bc..1634186d537c 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -20,6 +20,12 @@
#include <linux/rbtree.h>
#include <linux/string.h>
+#include <locale.h>
+
+static int kmem_slab;
+static int kmem_page;
+
+static long kmem_page_size;
struct alloc_stat;
typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
@@ -225,6 +231,244 @@ static int perf_evsel__process_free_event(struct perf_evsel *evsel,
return 0;
}
+static u64 total_page_alloc_bytes;
+static u64 total_page_free_bytes;
+static u64 total_page_nomatch_bytes;
+static u64 total_page_fail_bytes;
+static unsigned long nr_page_allocs;
+static unsigned long nr_page_frees;
+static unsigned long nr_page_fails;
+static unsigned long nr_page_nomatch;
+
+static bool use_pfn;
+
+#define MAX_MIGRATE_TYPES 6
+#define MAX_PAGE_ORDER 11
+
+static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
+
+struct page_stat {
+ struct rb_node node;
+ u64 page;
+ int order;
+ unsigned gfp_flags;
+ unsigned migrate_type;
+ u64 alloc_bytes;
+ u64 free_bytes;
+ int nr_alloc;
+ int nr_free;
+};
+
+static struct rb_root page_tree;
+static struct rb_root page_alloc_tree;
+static struct rb_root page_alloc_sorted;
+
+static struct page_stat *search_page(unsigned long page, bool create)
+{
+ struct rb_node **node = &page_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct page_stat *data;
+
+ while (*node) {
+ s64 cmp;
+
+ parent = *node;
+ data = rb_entry(*node, struct page_stat, node);
+
+ cmp = data->page - page;
+ if (cmp < 0)
+ node = &parent->rb_left;
+ else if (cmp > 0)
+ node = &parent->rb_right;
+ else
+ return data;
+ }
+
+ if (!create)
+ return NULL;
+
+ data = zalloc(sizeof(*data));
+ if (data != NULL) {
+ data->page = page;
+
+ rb_link_node(&data->node, parent, node);
+ rb_insert_color(&data->node, &page_tree);
+ }
+
+ return data;
+}
+
+static int page_stat_cmp(struct page_stat *a, struct page_stat *b)
+{
+ if (a->page > b->page)
+ return -1;
+ if (a->page < b->page)
+ return 1;
+ if (a->order > b->order)
+ return -1;
+ if (a->order < b->order)
+ return 1;
+ if (a->migrate_type > b->migrate_type)
+ return -1;
+ if (a->migrate_type < b->migrate_type)
+ return 1;
+ if (a->gfp_flags > b->gfp_flags)
+ return -1;
+ if (a->gfp_flags < b->gfp_flags)
+ return 1;
+ return 0;
+}
+
+static struct page_stat *search_page_alloc_stat(struct page_stat *pstat, bool create)
+{
+ struct rb_node **node = &page_alloc_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct page_stat *data;
+
+ while (*node) {
+ s64 cmp;
+
+ parent = *node;
+ data = rb_entry(*node, struct page_stat, node);
+
+ cmp = page_stat_cmp(data, pstat);
+ if (cmp < 0)
+ node = &parent->rb_left;
+ else if (cmp > 0)
+ node = &parent->rb_right;
+ else
+ return data;
+ }
+
+ if (!create)
+ return NULL;
+
+ data = zalloc(sizeof(*data));
+ if (data != NULL) {
+ data->page = pstat->page;
+ data->order = pstat->order;
+ data->gfp_flags = pstat->gfp_flags;
+ data->migrate_type = pstat->migrate_type;
+
+ rb_link_node(&data->node, parent, node);
+ rb_insert_color(&data->node, &page_alloc_tree);
+ }
+
+ return data;
+}
+
+static bool valid_page(u64 pfn_or_page)
+{
+ if (use_pfn && pfn_or_page == -1UL)
+ return false;
+ if (!use_pfn && pfn_or_page == 0)
+ return false;
+ return true;
+}
+
+static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ u64 page;
+ unsigned int order = perf_evsel__intval(evsel, sample, "order");
+ unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
+ unsigned int migrate_type = perf_evsel__intval(evsel, sample,
+ "migratetype");
+ u64 bytes = kmem_page_size << order;
+ struct page_stat *pstat;
+ struct page_stat this = {
+ .order = order,
+ .gfp_flags = gfp_flags,
+ .migrate_type = migrate_type,
+ };
+
+ if (use_pfn)
+ page = perf_evsel__intval(evsel, sample, "pfn");
+ else
+ page = perf_evsel__intval(evsel, sample, "page");
+
+ nr_page_allocs++;
+ total_page_alloc_bytes += bytes;
+
+ if (!valid_page(page)) {
+ nr_page_fails++;
+ total_page_fail_bytes += bytes;
+
+ return 0;
+ }
+
+ /*
+ * This is to find the current page (with correct gfp flags and
+ * migrate type) at free event.
+ */
+ pstat = search_page(page, true);
+ if (pstat == NULL)
+ return -ENOMEM;
+
+ pstat->order = order;
+ pstat->gfp_flags = gfp_flags;
+ pstat->migrate_type = migrate_type;
+
+ this.page = page;
+ pstat = search_page_alloc_stat(&this, true);
+ if (pstat == NULL)
+ return -ENOMEM;
+
+ pstat->nr_alloc++;
+ pstat->alloc_bytes += bytes;
+
+ order_stats[order][migrate_type]++;
+
+ return 0;
+}
+
+static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ u64 page;
+ unsigned int order = perf_evsel__intval(evsel, sample, "order");
+ u64 bytes = kmem_page_size << order;
+ struct page_stat *pstat;
+ struct page_stat this = {
+ .order = order,
+ };
+
+ if (use_pfn)
+ page = perf_evsel__intval(evsel, sample, "pfn");
+ else
+ page = perf_evsel__intval(evsel, sample, "page");
+
+ nr_page_frees++;
+ total_page_free_bytes += bytes;
+
+ pstat = search_page(page, false);
+ if (pstat == NULL) {
+ pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
+ page, order);
+
+ nr_page_nomatch++;
+ total_page_nomatch_bytes += bytes;
+
+ return 0;
+ }
+
+ this.page = page;
+ this.gfp_flags = pstat->gfp_flags;
+ this.migrate_type = pstat->migrate_type;
+
+ rb_erase(&pstat->node, &page_tree);
+ free(pstat);
+
+ pstat = search_page_alloc_stat(&this, false);
+ if (pstat == NULL)
+ return -ENOENT;
+
+ pstat->nr_free++;
+ pstat->free_bytes += bytes;
+
+ return 0;
+}
+
typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
struct perf_sample *sample);
@@ -269,16 +513,17 @@ static double fragmentation(unsigned long n_req, unsigned long n_alloc)
return 100.0 - (100.0 * n_req / n_alloc);
}
-static void __print_result(struct rb_root *root, struct perf_session *session,
- int n_lines, int is_caller)
+static void __print_slab_result(struct rb_root *root,
+ struct perf_session *session,
+ int n_lines, int is_caller)
{
struct rb_node *next;
struct machine *machine = &session->machines.host;
- printf("%.102s\n", graph_dotted_line);
+ printf("%.105s\n", graph_dotted_line);
printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
- printf("%.102s\n", graph_dotted_line);
+ printf("%.105s\n", graph_dotted_line);
next = rb_first(root);
@@ -304,7 +549,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
printf(" %-34s |", buf);
- printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
+ printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
(unsigned long long)data->bytes_alloc,
(unsigned long)data->bytes_alloc / data->hit,
(unsigned long long)data->bytes_req,
@@ -317,30 +562,137 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
}
if (n_lines == -1)
- printf(" ... | ... | ... | ... | ... | ... \n");
+ printf(" ... | ... | ... | ... | ... | ... \n");
- printf("%.102s\n", graph_dotted_line);
+ printf("%.105s\n", graph_dotted_line);
}
-static void print_summary(void)
+static const char * const migrate_type_str[] = {
+ "UNMOVABL",
+ "RECLAIM",
+ "MOVABLE",
+ "RESERVED",
+ "CMA/ISLT",
+ "UNKNOWN",
+};
+
+static void __print_page_result(struct rb_root *root,
+ struct perf_session *session __maybe_unused,
+ int n_lines)
{
- printf("\nSUMMARY\n=======\n");
- printf("Total bytes requested: %lu\n", total_requested);
- printf("Total bytes allocated: %lu\n", total_allocated);
- printf("Total bytes wasted on internal fragmentation: %lu\n",
+ struct rb_node *next = rb_first(root);
+ const char *format;
+
+ printf("\n%.80s\n", graph_dotted_line);
+ printf(" %-16s | Total alloc (KB) | Hits | Order | Mig.type | GFP flags\n",
+ use_pfn ? "PFN" : "Page");
+ printf("%.80s\n", graph_dotted_line);
+
+ if (use_pfn)
+ format = " %16llu | %'16llu | %'9d | %5d | %8s | %08lx\n";
+ else
+ format = " %016llx | %'16llu | %'9d | %5d | %8s | %08lx\n";
+
+ while (next && n_lines--) {
+ struct page_stat *data;
+
+ data = rb_entry(next, struct page_stat, node);
+
+ printf(format, (unsigned long long)data->page,
+ (unsigned long long)data->alloc_bytes / 1024,
+ data->nr_alloc, data->order,
+ migrate_type_str[data->migrate_type],
+ (unsigned long)data->gfp_flags);
+
+ next = rb_next(next);
+ }
+
+ if (n_lines == -1)
+ printf(" ... | ... | ... | ... | ... | ... \n");
+
+ printf("%.80s\n", graph_dotted_line);
+}
+
+static void print_slab_summary(void)
+{
+ printf("\nSUMMARY (SLAB allocator)");
+ printf("\n========================\n");
+ printf("Total bytes requested: %'lu\n", total_requested);
+ printf("Total bytes allocated: %'lu\n", total_allocated);
+ printf("Total bytes wasted on internal fragmentation: %'lu\n",
total_allocated - total_requested);
printf("Internal fragmentation: %f%%\n",
fragmentation(total_requested, total_allocated));
- printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
+ printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
}
-static void print_result(struct perf_session *session)
+static void print_page_summary(void)
+{
+ int o, m;
+ u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
+ u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
+
+ printf("\nSUMMARY (page allocator)");
+ printf("\n========================\n");
+ printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
+ nr_page_allocs, total_page_alloc_bytes / 1024);
+ printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
+ nr_page_frees, total_page_free_bytes / 1024);
+ printf("\n");
+
+ printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
+ nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
+ printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
+ nr_page_allocs - nr_alloc_freed,
+ (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
+ printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
+ nr_page_nomatch, total_page_nomatch_bytes / 1024);
+ printf("\n");
+
+ printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
+ nr_page_fails, total_page_fail_bytes / 1024);
+ printf("\n");
+
+ printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
+ "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
+ printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
+ graph_dotted_line, graph_dotted_line, graph_dotted_line,
+ graph_dotted_line, graph_dotted_line);
+
+ for (o = 0; o < MAX_PAGE_ORDER; o++) {
+ printf("%5d", o);
+ for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
+ if (order_stats[o][m])
+ printf(" %'12d", order_stats[o][m]);
+ else
+ printf(" %12c", '.');
+ }
+ printf("\n");
+ }
+}
+
+static void print_slab_result(struct perf_session *session)
{
if (caller_flag)
- __print_result(&root_caller_sorted, session, caller_lines, 1);
+ __print_slab_result(&root_caller_sorted, session, caller_lines, 1);
if (alloc_flag)
- __print_result(&root_alloc_sorted, session, alloc_lines, 0);
- print_summary();
+ __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
+ print_slab_summary();
+}
+
+static void print_page_result(struct perf_session *session)
+{
+ if (alloc_flag)
+ __print_page_result(&page_alloc_sorted, session, alloc_lines);
+ print_page_summary();
+}
+
+static void print_result(struct perf_session *session)
+{
+ if (kmem_slab)
+ print_slab_result(session);
+ if (kmem_page)
+ print_page_result(session);
}
struct sort_dimension {
@@ -352,8 +704,8 @@ struct sort_dimension {
static LIST_HEAD(caller_sort);
static LIST_HEAD(alloc_sort);
-static void sort_insert(struct rb_root *root, struct alloc_stat *data,
- struct list_head *sort_list)
+static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
+ struct list_head *sort_list)
{
struct rb_node **new = &(root->rb_node);
struct rb_node *parent = NULL;
@@ -382,8 +734,8 @@ static void sort_insert(struct rb_root *root, struct alloc_stat *data,
rb_insert_color(&data->node, root);
}
-static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
- struct list_head *sort_list)
+static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
+ struct list_head *sort_list)
{
struct rb_node *node;
struct alloc_stat *data;
@@ -395,26 +747,79 @@ static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
rb_erase(node, root);
data = rb_entry(node, struct alloc_stat, node);
- sort_insert(root_sorted, data, sort_list);
+ sort_slab_insert(root_sorted, data, sort_list);
+ }
+}
+
+static void sort_page_insert(struct rb_root *root, struct page_stat *data)
+{
+ struct rb_node **new = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*new) {
+ struct page_stat *this;
+ int cmp = 0;
+
+ this = rb_entry(*new, struct page_stat, node);
+ parent = *new;
+
+ /* TODO: support more sort key */
+ cmp = data->alloc_bytes - this->alloc_bytes;
+
+ if (cmp > 0)
+ new = &parent->rb_left;
+ else
+ new = &parent->rb_right;
+ }
+
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted)
+{
+ struct rb_node *node;
+ struct page_stat *data;
+
+ for (;;) {
+ node = rb_first(root);
+ if (!node)
+ break;
+
+ rb_erase(node, root);
+ data = rb_entry(node, struct page_stat, node);
+ sort_page_insert(root_sorted, data);
}
}
static void sort_result(void)
{
- __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
- __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
+ if (kmem_slab) {
+ __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
+ &alloc_sort);
+ __sort_slab_result(&root_caller_stat, &root_caller_sorted,
+ &caller_sort);
+ }
+ if (kmem_page) {
+ __sort_page_result(&page_alloc_tree, &page_alloc_sorted);
+ }
}
static int __cmd_kmem(struct perf_session *session)
{
int err = -EINVAL;
+ struct perf_evsel *evsel;
const struct perf_evsel_str_handler kmem_tracepoints[] = {
+ /* slab allocator */
{ "kmem:kmalloc", perf_evsel__process_alloc_event, },
{ "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
{ "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
{ "kmem:kfree", perf_evsel__process_free_event, },
{ "kmem:kmem_cache_free", perf_evsel__process_free_event, },
+ /* page allocator */
+ { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
+ { "kmem:mm_page_free", perf_evsel__process_page_free_event, },
};
if (!perf_session__has_traces(session, "kmem record"))
@@ -425,10 +830,20 @@ static int __cmd_kmem(struct perf_session *session)
goto out;
}
+ evlist__for_each(session->evlist, evsel) {
+ if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
+ perf_evsel__field(evsel, "pfn")) {
+ use_pfn = true;
+ break;
+ }
+ }
+
setup_pager();
- err = perf_session__process_events(session, &perf_kmem);
- if (err != 0)
+ err = perf_session__process_events(session);
+ if (err != 0) {
+ pr_err("error during process events: %d\n", err);
goto out;
+ }
sort_result();
print_result(session);
out:
@@ -559,6 +974,7 @@ static int setup_sorting(struct list_head *sort_list, const char *arg)
{
char *tok;
char *str = strdup(arg);
+ char *pos = str;
if (!str) {
pr_err("%s: strdup failed\n", __func__);
@@ -566,7 +982,7 @@ static int setup_sorting(struct list_head *sort_list, const char *arg)
}
while (true) {
- tok = strsep(&str, ",");
+ tok = strsep(&pos, ",");
if (!tok)
break;
if (sort_dimension__add(tok, sort_list) < 0) {
@@ -610,6 +1026,22 @@ static int parse_alloc_opt(const struct option *opt __maybe_unused,
return 0;
}
+static int parse_slab_opt(const struct option *opt __maybe_unused,
+ const char *arg __maybe_unused,
+ int unset __maybe_unused)
+{
+ kmem_slab = (kmem_page + 1);
+ return 0;
+}
+
+static int parse_page_opt(const struct option *opt __maybe_unused,
+ const char *arg __maybe_unused,
+ int unset __maybe_unused)
+{
+ kmem_page = (kmem_slab + 1);
+ return 0;
+}
+
static int parse_line_opt(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
{
@@ -632,6 +1064,8 @@ static int __cmd_record(int argc, const char **argv)
{
const char * const record_args[] = {
"record", "-a", "-R", "-c", "1",
+ };
+ const char * const slab_events[] = {
"-e", "kmem:kmalloc",
"-e", "kmem:kmalloc_node",
"-e", "kmem:kfree",
@@ -639,10 +1073,19 @@ static int __cmd_record(int argc, const char **argv)
"-e", "kmem:kmem_cache_alloc_node",
"-e", "kmem:kmem_cache_free",
};
+ const char * const page_events[] = {
+ "-e", "kmem:mm_page_alloc",
+ "-e", "kmem:mm_page_free",
+ };
unsigned int rec_argc, i, j;
const char **rec_argv;
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ if (kmem_slab)
+ rec_argc += ARRAY_SIZE(slab_events);
+ if (kmem_page)
+ rec_argc += ARRAY_SIZE(page_events);
+
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
@@ -651,6 +1094,15 @@ static int __cmd_record(int argc, const char **argv)
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
+ if (kmem_slab) {
+ for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
+ rec_argv[i] = strdup(slab_events[j]);
+ }
+ if (kmem_page) {
+ for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
+ rec_argv[i] = strdup(page_events[j]);
+ }
+
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
@@ -660,8 +1112,13 @@ static int __cmd_record(int argc, const char **argv)
int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char * const default_sort_order = "frag,hit,bytes";
+ struct perf_data_file file = {
+ .mode = PERF_DATA_MODE_READ,
+ };
const struct option kmem_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
"show per-callsite statistics", parse_caller_opt),
OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
@@ -671,6 +1128,11 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
parse_sort_opt),
OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
+ OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
+ OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
+ parse_slab_opt),
+ OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
+ parse_page_opt),
OPT_END()
};
const char *const kmem_subcommands[] = { "record", "stat", NULL };
@@ -679,10 +1141,6 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
NULL
};
struct perf_session *session;
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- };
int ret = -1;
argc = parse_options_subcommand(argc, argv, kmem_options,
@@ -691,18 +1149,36 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
if (!argc)
usage_with_options(kmem_usage, kmem_options);
+ if (kmem_slab == 0 && kmem_page == 0)
+ kmem_slab = 1; /* for backward compatibility */
+
if (!strncmp(argv[0], "rec", 3)) {
symbol__init(NULL);
return __cmd_record(argc, argv);
}
+ file.path = input_name;
+
session = perf_session__new(&file, false, &perf_kmem);
if (session == NULL)
return -1;
+ if (kmem_page) {
+ struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+
+ if (evsel == NULL || evsel->tp_format == NULL) {
+ pr_err("invalid event found.. aborting\n");
+ return -1;
+ }
+
+ kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
+ }
+
symbol__init(&session->header.env);
if (!strcmp(argv[0], "stat")) {
+ setlocale(LC_ALL, "");
+
if (cpu__setup_cpunode_map())
goto out_delete;
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 0894a817f67e..1f9338f6109c 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -18,6 +18,7 @@
#include "util/stat.h"
#include "util/top.h"
#include "util/data.h"
+#include "util/ordered-events.h"
#include <sys/prctl.h>
#ifdef HAVE_TIMERFD_SUPPORT
@@ -730,9 +731,9 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
return -1;
}
- err = perf_session_queue_event(kvm->session, event, &kvm->tool, &sample, 0);
+ err = perf_session__queue_event(kvm->session, event, &sample, 0);
/*
- * FIXME: Here we can't consume the event, as perf_session_queue_event will
+ * FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
*/
perf_evlist__mmap_consume(kvm->evlist, idx);
@@ -783,8 +784,10 @@ static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
/* flush queue after each round in which we processed events */
if (ntotal) {
- kvm->session->ordered_events.next_flush = flush_time;
- err = kvm->tool.finished_round(&kvm->tool, NULL, kvm->session);
+ struct ordered_events *oe = &kvm->session->ordered_events;
+
+ oe->next_flush = flush_time;
+ err = ordered_events__flush(oe, OE_FLUSH__ROUND);
if (err) {
if (kvm->lost_events)
pr_info("\nLost events: %" PRIu64 "\n\n",
@@ -1044,6 +1047,7 @@ static int read_events(struct perf_kvm_stat *kvm)
struct perf_data_file file = {
.path = kvm->file_name,
.mode = PERF_DATA_MODE_READ,
+ .force = kvm->force,
};
kvm->tool = eops;
@@ -1066,7 +1070,7 @@ static int read_events(struct perf_kvm_stat *kvm)
if (ret < 0)
return ret;
- return perf_session__process_events(kvm->session, &kvm->tool);
+ return perf_session__process_events(kvm->session);
}
static int parse_target_str(struct perf_kvm_stat *kvm)
@@ -1201,6 +1205,7 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
" time (sort by avg time)"),
OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
"analyze events only for given process id(s)"),
+ OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"),
OPT_END()
};
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 198f3c3aff95..af5bd0514108 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -36,38 +36,36 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
setup_pager();
- if (raw_dump) {
- print_events(NULL, true);
- return 0;
- }
+ if (!raw_dump)
+ printf("\nList of pre-defined events (to be used in -e):\n\n");
if (argc == 0) {
- print_events(NULL, false);
+ print_events(NULL, raw_dump);
return 0;
}
for (i = 0; i < argc; ++i) {
- if (i)
- putchar('\n');
- if (strncmp(argv[i], "tracepoint", 10) == 0)
- print_tracepoint_events(NULL, NULL, false);
+ if (strcmp(argv[i], "tracepoint") == 0)
+ print_tracepoint_events(NULL, NULL, raw_dump);
else if (strcmp(argv[i], "hw") == 0 ||
strcmp(argv[i], "hardware") == 0)
- print_events_type(PERF_TYPE_HARDWARE);
+ print_symbol_events(NULL, PERF_TYPE_HARDWARE,
+ event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
else if (strcmp(argv[i], "sw") == 0 ||
strcmp(argv[i], "software") == 0)
- print_events_type(PERF_TYPE_SOFTWARE);
+ print_symbol_events(NULL, PERF_TYPE_SOFTWARE,
+ event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
else if (strcmp(argv[i], "cache") == 0 ||
strcmp(argv[i], "hwcache") == 0)
- print_hwcache_events(NULL, false);
+ print_hwcache_events(NULL, raw_dump);
else if (strcmp(argv[i], "pmu") == 0)
- print_pmu_events(NULL, false);
+ print_pmu_events(NULL, raw_dump);
else {
char *sep = strchr(argv[i], ':'), *s;
int sep_idx;
if (sep == NULL) {
- print_events(argv[i], false);
+ print_events(argv[i], raw_dump);
continue;
}
sep_idx = sep - argv[i];
@@ -76,7 +74,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
return -1;
s[sep_idx] = '\0';
- print_tracepoint_events(s, s + sep_idx + 1, false);
+ print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
free(s);
}
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index e7ec71589da6..d49c2ab85fc2 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -846,6 +846,8 @@ static const struct perf_evsel_str_handler lock_tracepoints[] = {
{ "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
};
+static bool force;
+
static int __cmd_report(bool display_info)
{
int err = -EINVAL;
@@ -857,6 +859,7 @@ static int __cmd_report(bool display_info)
struct perf_data_file file = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
+ .force = force,
};
session = perf_session__new(&file, false, &eops);
@@ -878,7 +881,7 @@ static int __cmd_report(bool display_info)
if (select_key())
goto out_delete;
- err = perf_session__process_events(session, &eops);
+ err = perf_session__process_events(session);
if (err)
goto out_delete;
@@ -945,6 +948,7 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
"dump thread list in perf.data"),
OPT_BOOLEAN('m', "map", &info_map,
"map of lock instances (address:name table)"),
+ OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_END()
};
const struct option lock_options[] = {
@@ -956,6 +960,7 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
const struct option report_options[] = {
OPT_STRING('k', "key", &sort_key, "acquired",
"key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
+ OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
/* TODO: type */
OPT_END()
};
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 9b5663950a4d..675216e08bfc 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -15,6 +15,7 @@ struct perf_mem {
char const *input_name;
bool hide_unresolved;
bool dump_raw;
+ bool force;
int operation;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -120,6 +121,7 @@ static int report_raw_events(struct perf_mem *mem)
struct perf_data_file file = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
+ .force = mem->force,
};
int err = -EINVAL;
int ret;
@@ -141,7 +143,7 @@ static int report_raw_events(struct perf_mem *mem)
printf("# PID, TID, IP, ADDR, LOCAL WEIGHT, DSRC, SYMBOL\n");
- err = perf_session__process_events(session, &mem->tool);
+ err = perf_session__process_events(session);
if (err)
return err;
@@ -286,10 +288,11 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
"input file name"),
OPT_STRING('C', "cpu", &mem.cpu_list, "cpu",
"list of cpus to profile"),
- OPT_STRING('x', "field-separator", &symbol_conf.field_sep,
+ OPT_STRING_NOEMPTY('x', "field-separator", &symbol_conf.field_sep,
"separator",
"separator for columns, no spaces will be added"
" between columns '.' is reserved."),
+ OPT_BOOLEAN('f', "force", &mem.force, "don't complain, do it"),
OPT_END()
};
const char *const mem_subcommands[] = { "record", "report", NULL };
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 921bb6942503..f7b1af67e9f6 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -56,6 +56,7 @@ static struct {
bool mod_events;
bool uprobes;
bool quiet;
+ bool target_used;
int nevents;
struct perf_probe_event events[MAX_PROBES];
struct strlist *dellist;
@@ -78,6 +79,12 @@ static int parse_probe_event(const char *str)
}
pev->uprobes = params.uprobes;
+ if (params.target) {
+ pev->target = strdup(params.target);
+ if (!pev->target)
+ return -ENOMEM;
+ params.target_used = true;
+ }
/* Parse a perf-probe command into event */
ret = parse_perf_probe_command(str, pev);
@@ -102,6 +109,7 @@ static int set_target(const char *ptr)
params.target = strdup(ptr);
if (!params.target)
return -ENOMEM;
+ params.target_used = false;
found = 1;
buf = ptr + (strlen(ptr) - 3);
@@ -178,7 +186,7 @@ static int opt_set_target(const struct option *opt, const char *str,
int ret = -ENOENT;
char *tmp;
- if (str && !params.target) {
+ if (str) {
if (!strcmp(opt->long_name, "exec"))
params.uprobes = true;
#ifdef HAVE_DWARF_SUPPORT
@@ -200,7 +208,9 @@ static int opt_set_target(const struct option *opt, const char *str,
if (!tmp)
return -ENOMEM;
}
+ free(params.target);
params.target = tmp;
+ params.target_used = false;
ret = 0;
}
@@ -485,9 +495,14 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
}
if (params.nevents) {
+ /* Ensure the last given target is used */
+ if (params.target && !params.target_used) {
+ pr_warning(" Error: -x/-m must follow the probe definitions.\n");
+ usage_with_options(probe_usage, options);
+ }
+
ret = add_perf_probe_events(params.events, params.nevents,
params.max_probe_points,
- params.target,
params.force_add);
if (ret < 0) {
pr_err_with_code(" Error: Failed to add events.", ret);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 404ab3434052..c3efdfb630b5 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -70,8 +70,8 @@ static int process_synthesized_event(struct perf_tool *tool,
static int record__mmap_read(struct record *rec, int idx)
{
struct perf_mmap *md = &rec->evlist->mmap[idx];
- unsigned int head = perf_mmap__read_head(md);
- unsigned int old = md->prev;
+ u64 head = perf_mmap__read_head(md);
+ u64 old = md->prev;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
@@ -161,8 +161,9 @@ try_again:
}
}
- if (perf_evlist__apply_filters(evlist)) {
- error("failed to set filter with %d (%s)\n", errno,
+ if (perf_evlist__apply_filters(evlist, &pos)) {
+ error("failed to set filter \"%s\" on event %s with %d (%s)\n",
+ pos->filter, perf_evsel__name(pos), errno,
strerror_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
@@ -225,7 +226,7 @@ static int process_buildids(struct record *rec)
*/
symbol_conf.ignore_vmlinux_buildid = true;
- return perf_session__process_events(session, &rec->tool);
+ return perf_session__process_events(session);
}
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
@@ -343,7 +344,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
- session = perf_session__new(file, false, NULL);
+ session = perf_session__new(file, false, tool);
if (session == NULL) {
pr_err("Perf session creation failed.\n");
return -1;
@@ -658,7 +659,7 @@ error:
static void callchain_debug(void)
{
- static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
+ static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
@@ -710,6 +711,90 @@ static int perf_record_config(const char *var, const char *value, void *cb)
return perf_default_config(var, value, cb);
}
+struct clockid_map {
+ const char *name;
+ int clockid;
+};
+
+#define CLOCKID_MAP(n, c) \
+ { .name = n, .clockid = (c), }
+
+#define CLOCKID_END { .name = NULL, }
+
+
+/*
+ * Add the missing ones, we need to build on many distros...
+ */
+#ifndef CLOCK_MONOTONIC_RAW
+#define CLOCK_MONOTONIC_RAW 4
+#endif
+#ifndef CLOCK_BOOTTIME
+#define CLOCK_BOOTTIME 7
+#endif
+#ifndef CLOCK_TAI
+#define CLOCK_TAI 11
+#endif
+
+static const struct clockid_map clockids[] = {
+ /* available for all events, NMI safe */
+ CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
+ CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
+
+ /* available for some events */
+ CLOCKID_MAP("realtime", CLOCK_REALTIME),
+ CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
+ CLOCKID_MAP("tai", CLOCK_TAI),
+
+ /* available for the lazy */
+ CLOCKID_MAP("mono", CLOCK_MONOTONIC),
+ CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
+ CLOCKID_MAP("real", CLOCK_REALTIME),
+ CLOCKID_MAP("boot", CLOCK_BOOTTIME),
+
+ CLOCKID_END,
+};
+
+static int parse_clockid(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+ const struct clockid_map *cm;
+ const char *ostr = str;
+
+ if (unset) {
+ opts->use_clockid = 0;
+ return 0;
+ }
+
+ /* no arg passed */
+ if (!str)
+ return 0;
+
+ /* no setting it twice */
+ if (opts->use_clockid)
+ return -1;
+
+ opts->use_clockid = true;
+
+ /* if its a number, we're done */
+ if (sscanf(str, "%d", &opts->clockid) == 1)
+ return 0;
+
+ /* allow a "CLOCK_" prefix to the name */
+ if (!strncasecmp(str, "CLOCK_", 6))
+ str += 6;
+
+ for (cm = clockids; cm->name; cm++) {
+ if (!strcasecmp(str, cm->name)) {
+ opts->clockid = cm->clockid;
+ return 0;
+ }
+ }
+
+ opts->use_clockid = false;
+ ui__warning("unknown clockid %s, check man page\n", ostr);
+ return -1;
+}
+
static const char * const __record_usage[] = {
"perf record [<options>] [<command>]",
"perf record [<options>] -- <command> [<options>]",
@@ -751,9 +836,9 @@ static struct record record = {
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
#ifdef HAVE_DWARF_UNWIND_SUPPORT
-const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
+const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf lbr";
#else
-const char record_callchain_help[] = CALLCHAIN_HELP "fp";
+const char record_callchain_help[] = CALLCHAIN_HELP "fp lbr";
#endif
/*
@@ -839,6 +924,11 @@ struct option __record_options[] = {
"use per-thread mmaps"),
OPT_BOOLEAN('I', "intr-regs", &record.opts.sample_intr_regs,
"Sample machine registers on interrupt"),
+ OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
+ "Record running/enabled time of read (:S) events"),
+ OPT_CALLBACK('k', "clockid", &record.opts,
+ "clockid", "clockid to use for events, see clock_gettime()",
+ parse_clockid),
OPT_END()
};
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2f91094e228b..b63aeda719be 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -249,6 +249,8 @@ static int report__setup_sample_type(struct report *rep)
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
(sample_type & PERF_SAMPLE_STACK_USER))
callchain_param.record_mode = CALLCHAIN_DWARF;
+ else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ callchain_param.record_mode = CALLCHAIN_LBR;
else
callchain_param.record_mode = CALLCHAIN_FP;
}
@@ -302,7 +304,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
if (rep->mem_mode) {
ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
- ret += fprintf(fp, "\n# Sort order : %s", sort_order);
+ ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
} else
ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
return ret + fprintf(fp, "\n#\n");
@@ -327,7 +329,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
fprintf(stdout, "\n\n");
}
- if (sort_order == default_sort_order &&
+ if (sort_order == NULL &&
parent_pattern == default_parent_pattern) {
fprintf(stdout, "#\n# (%s)\n#\n", help);
@@ -345,7 +347,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
static void report__warn_kptr_restrict(const struct report *rep)
{
struct map *kernel_map = rep->session->machines.host.vmlinux_maps[MAP__FUNCTION];
- struct kmap *kernel_kmap = map__kmap(kernel_map);
+ struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
if (kernel_map == NULL ||
(kernel_map->dso->hit &&
@@ -480,7 +482,7 @@ static int __cmd_report(struct report *rep)
if (ret)
return ret;
- ret = perf_session__process_events(session, &rep->tool);
+ ret = perf_session__process_events(session);
if (ret)
return ret;
@@ -667,6 +669,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"only consider symbols in these dsos"),
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
+ OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
+ "only consider symbols in these pids"),
+ OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
+ "only consider symbols in these tids"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
@@ -674,7 +680,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
"width[,width...]",
"don't try to adjust column width, use these fixed values"),
- OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
+ OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved,
@@ -766,7 +772,7 @@ repeat:
* 0/1 means the user chose a mode.
*/
if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
- branch_call_mode == -1) {
+ !branch_call_mode) {
sort__mode = SORT_MODE__BRANCH;
symbol_conf.cumulate_callchain = false;
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 891c3930080e..5275bab70313 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -23,12 +23,13 @@
#include <semaphore.h>
#include <pthread.h>
#include <math.h>
+#include <api/fs/fs.h>
#define PR_SET_NAME 15 /* Set process name */
#define MAX_CPUS 4096
#define COMM_LEN 20
#define SYM_LEN 129
-#define MAX_PID 65536
+#define MAX_PID 1024000
struct sched_atom;
@@ -124,7 +125,7 @@ struct perf_sched {
struct perf_tool tool;
const char *sort_order;
unsigned long nr_tasks;
- struct task_desc *pid_to_task[MAX_PID];
+ struct task_desc **pid_to_task;
struct task_desc **tasks;
const struct trace_sched_handler *tp_handler;
pthread_mutex_t start_work_mutex;
@@ -169,6 +170,7 @@ struct perf_sched {
u64 cpu_last_switched[MAX_CPUS];
struct rb_root atom_root, sorted_atom_root;
struct list_head sort_list, cmp_pid;
+ bool force;
};
static u64 get_nsecs(void)
@@ -326,8 +328,19 @@ static struct task_desc *register_pid(struct perf_sched *sched,
unsigned long pid, const char *comm)
{
struct task_desc *task;
+ static int pid_max;
- BUG_ON(pid >= MAX_PID);
+ if (sched->pid_to_task == NULL) {
+ if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
+ pid_max = MAX_PID;
+ BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
+ }
+ if (pid >= (unsigned long)pid_max) {
+ BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
+ sizeof(struct task_desc *))) == NULL);
+ while (pid >= (unsigned long)pid_max)
+ sched->pid_to_task[pid_max++] = NULL;
+ }
task = sched->pid_to_task[pid];
@@ -346,7 +359,7 @@ static struct task_desc *register_pid(struct perf_sched *sched,
sched->pid_to_task[pid] = task;
sched->nr_tasks++;
- sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *));
+ sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
BUG_ON(!sched->tasks);
sched->tasks[task->nr] = task;
@@ -425,24 +438,45 @@ static u64 get_cpu_usage_nsec_parent(void)
return sum;
}
-static int self_open_counters(void)
+static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
{
struct perf_event_attr attr;
- char sbuf[STRERR_BUFSIZE];
+ char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
int fd;
+ struct rlimit limit;
+ bool need_privilege = false;
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_TASK_CLOCK;
+force_again:
fd = sys_perf_event_open(&attr, 0, -1, -1,
perf_event_open_cloexec_flag());
- if (fd < 0)
+ if (fd < 0) {
+ if (errno == EMFILE) {
+ if (sched->force) {
+ BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
+ limit.rlim_cur += sched->nr_tasks - cur_task;
+ if (limit.rlim_cur > limit.rlim_max) {
+ limit.rlim_max = limit.rlim_cur;
+ need_privilege = true;
+ }
+ if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
+ if (need_privilege && errno == EPERM)
+ strcpy(info, "Need privilege\n");
+ } else
+ goto force_again;
+ } else
+ strcpy(info, "Have a try with -f option\n");
+ }
pr_err("Error: sys_perf_event_open() syscall returned "
- "with %d (%s)\n", fd,
- strerror_r(errno, sbuf, sizeof(sbuf)));
+ "with %d (%s)\n%s", fd,
+ strerror_r(errno, sbuf, sizeof(sbuf)), info);
+ exit(EXIT_FAILURE);
+ }
return fd;
}
@@ -460,6 +494,7 @@ static u64 get_cpu_usage_nsec_self(int fd)
struct sched_thread_parms {
struct task_desc *task;
struct perf_sched *sched;
+ int fd;
};
static void *thread_func(void *ctx)
@@ -470,13 +505,12 @@ static void *thread_func(void *ctx)
u64 cpu_usage_0, cpu_usage_1;
unsigned long i, ret;
char comm2[22];
- int fd;
+ int fd = parms->fd;
zfree(&parms);
sprintf(comm2, ":%s", this_task->comm);
prctl(PR_SET_NAME, comm2);
- fd = self_open_counters();
if (fd < 0)
return NULL;
again:
@@ -528,6 +562,7 @@ static void create_tasks(struct perf_sched *sched)
BUG_ON(parms == NULL);
parms->task = task = sched->tasks[i];
parms->sched = sched;
+ parms->fd = self_open_counters(sched, i);
sem_init(&task->sleep_sem, 0, 0);
sem_init(&task->ready_for_work, 0, 0);
sem_init(&task->work_done_sem, 0, 0);
@@ -572,13 +607,13 @@ static void wait_for_tasks(struct perf_sched *sched)
cpu_usage_1 = get_cpu_usage_nsec_parent();
if (!sched->runavg_cpu_usage)
sched->runavg_cpu_usage = sched->cpu_usage;
- sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10;
+ sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
if (!sched->runavg_parent_cpu_usage)
sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
- sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 +
- sched->parent_cpu_usage)/10;
+ sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
+ sched->parent_cpu_usage)/sched->replay_repeat;
ret = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(ret);
@@ -610,7 +645,7 @@ static void run_one_test(struct perf_sched *sched)
sched->sum_fluct += fluct;
if (!sched->run_avg)
sched->run_avg = delta;
- sched->run_avg = (sched->run_avg * 9 + delta) / 10;
+ sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
@@ -831,7 +866,7 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
return -1;
}
- atoms->thread = thread;
+ atoms->thread = thread__get(thread);
INIT_LIST_HEAD(&atoms->work_list);
__thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
return 0;
@@ -1439,8 +1474,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_
return err;
}
-static int perf_sched__read_events(struct perf_sched *sched,
- struct perf_session **psession)
+static int perf_sched__read_events(struct perf_sched *sched)
{
const struct perf_evsel_str_handler handlers[] = {
{ "sched:sched_switch", process_sched_switch_event, },
@@ -1453,7 +1487,9 @@ static int perf_sched__read_events(struct perf_sched *sched,
struct perf_data_file file = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
+ int rc = -1;
session = perf_session__new(&file, false, &sched->tool);
if (session == NULL) {
@@ -1467,27 +1503,21 @@ static int perf_sched__read_events(struct perf_sched *sched,
goto out_delete;
if (perf_session__has_traces(session, "record -R")) {
- int err = perf_session__process_events(session, &sched->tool);
+ int err = perf_session__process_events(session);
if (err) {
pr_err("Failed to process events, error %d", err);
goto out_delete;
}
- sched->nr_events = session->stats.nr_events[0];
- sched->nr_lost_events = session->stats.total_lost;
- sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST];
+ sched->nr_events = session->evlist->stats.nr_events[0];
+ sched->nr_lost_events = session->evlist->stats.total_lost;
+ sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
}
- if (psession)
- *psession = session;
- else
- perf_session__delete(session);
-
- return 0;
-
+ rc = 0;
out_delete:
perf_session__delete(session);
- return -1;
+ return rc;
}
static void print_bad_events(struct perf_sched *sched)
@@ -1515,12 +1545,10 @@ static void print_bad_events(struct perf_sched *sched)
static int perf_sched__lat(struct perf_sched *sched)
{
struct rb_node *next;
- struct perf_session *session;
setup_pager();
- /* save session -- references to threads are held in work_list */
- if (perf_sched__read_events(sched, &session))
+ if (perf_sched__read_events(sched))
return -1;
perf_sched__sort_lat(sched);
@@ -1537,6 +1565,7 @@ static int perf_sched__lat(struct perf_sched *sched)
work_list = rb_entry(next, struct work_atoms, node);
output_lat_thread(sched, work_list);
next = rb_next(next);
+ thread__zput(work_list->thread);
}
printf(" -----------------------------------------------------------------------------------------------------------------\n");
@@ -1548,7 +1577,6 @@ static int perf_sched__lat(struct perf_sched *sched)
print_bad_events(sched);
printf("\n");
- perf_session__delete(session);
return 0;
}
@@ -1557,7 +1585,7 @@ static int perf_sched__map(struct perf_sched *sched)
sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
setup_pager();
- if (perf_sched__read_events(sched, NULL))
+ if (perf_sched__read_events(sched))
return -1;
print_bad_events(sched);
return 0;
@@ -1572,7 +1600,7 @@ static int perf_sched__replay(struct perf_sched *sched)
test_calibrations(sched);
- if (perf_sched__read_events(sched, NULL))
+ if (perf_sched__read_events(sched))
return -1;
printf("nr_run_events: %ld\n", sched->nr_run_events);
@@ -1693,6 +1721,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
+ OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
OPT_END()
};
const struct option sched_options[] = {
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index ce304dfd962a..58f10b8e6ff2 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -446,9 +446,9 @@ static void print_sample_bts(union perf_event *event,
}
static void process_event(union perf_event *event, struct perf_sample *sample,
- struct perf_evsel *evsel, struct thread *thread,
- struct addr_location *al)
+ struct perf_evsel *evsel, struct addr_location *al)
{
+ struct thread *thread = al->thread;
struct perf_event_attr *attr = &evsel->attr;
if (output[attr->type].fields == 0)
@@ -549,14 +549,6 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct machine *machine)
{
struct addr_location al;
- struct thread *thread = machine__findnew_thread(machine, sample->pid,
- sample->tid);
-
- if (thread == NULL) {
- pr_debug("problem processing %d event, skipping it.\n",
- event->header.type);
- return -1;
- }
if (debug_mode) {
if (sample->time < last_timestamp) {
@@ -581,7 +573,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
- scripting_ops->process_event(event, sample, evsel, thread, &al);
+ scripting_ops->process_event(event, sample, evsel, &al);
return 0;
}
@@ -800,7 +792,7 @@ static int __cmd_script(struct perf_script *script)
script->tool.mmap2 = process_mmap2_event;
}
- ret = perf_session__process_events(script->session, &script->tool);
+ ret = perf_session__process_events(script->session);
if (debug_mode)
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -1523,6 +1515,9 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
.ordering_requires_timestamps = true,
},
};
+ struct perf_data_file file = {
+ .mode = PERF_DATA_MODE_READ,
+ };
const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
@@ -1550,7 +1545,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"When printing symbols do not display call chain"),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
- OPT_CALLBACK('f', "fields", NULL, "str",
+ OPT_CALLBACK('F', "fields", NULL, "str",
"comma separated output fields prepend with 'type:'. "
"Valid types: hw,sw,trace,raw. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
@@ -1562,6 +1557,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only display events for these comms"),
+ OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
+ "only consider symbols in these pids"),
+ OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
+ "only consider symbols in these tids"),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -1570,9 +1569,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"Show the fork/comm/exit events"),
OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events,
"Show the mmap events"),
+ OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
OPT_END()
};
- const char * const script_usage[] = {
+ const char * const script_subcommands[] = { "record", "report", NULL };
+ const char *script_usage[] = {
"perf script [<options>]",
"perf script [<options>] record <script> [<record-options>] <command>",
"perf script [<options>] report <script> [script-args]",
@@ -1580,13 +1581,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"perf script [<options>] <top-script> [script-args]",
NULL
};
- struct perf_data_file file = {
- .mode = PERF_DATA_MODE_READ,
- };
setup_scripting();
- argc = parse_options(argc, argv, options, script_usage,
+ argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
file.path = input_name;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e598e4e98170..f7b8218785f6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -353,39 +353,40 @@ static struct perf_evsel *nth_evsel(int n)
* more semantic information such as miss/hit ratios,
* instruction rates, etc:
*/
-static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
+static void update_shadow_stats(struct perf_evsel *counter, u64 *count,
+ int cpu)
{
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
- update_stats(&runtime_nsecs_stats[0], count[0]);
+ update_stats(&runtime_nsecs_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
- update_stats(&runtime_cycles_stats[0], count[0]);
+ update_stats(&runtime_cycles_stats[cpu], count[0]);
else if (transaction_run &&
perf_evsel__cmp(counter, nth_evsel(T_CYCLES_IN_TX)))
- update_stats(&runtime_cycles_in_tx_stats[0], count[0]);
+ update_stats(&runtime_cycles_in_tx_stats[cpu], count[0]);
else if (transaction_run &&
perf_evsel__cmp(counter, nth_evsel(T_TRANSACTION_START)))
- update_stats(&runtime_transaction_stats[0], count[0]);
+ update_stats(&runtime_transaction_stats[cpu], count[0]);
else if (transaction_run &&
perf_evsel__cmp(counter, nth_evsel(T_ELISION_START)))
- update_stats(&runtime_elision_stats[0], count[0]);
+ update_stats(&runtime_elision_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
- update_stats(&runtime_stalled_cycles_front_stats[0], count[0]);
+ update_stats(&runtime_stalled_cycles_front_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
- update_stats(&runtime_stalled_cycles_back_stats[0], count[0]);
+ update_stats(&runtime_stalled_cycles_back_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- update_stats(&runtime_branches_stats[0], count[0]);
+ update_stats(&runtime_branches_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
- update_stats(&runtime_cacherefs_stats[0], count[0]);
+ update_stats(&runtime_cacherefs_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
- update_stats(&runtime_l1_dcache_stats[0], count[0]);
+ update_stats(&runtime_l1_dcache_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
- update_stats(&runtime_l1_icache_stats[0], count[0]);
+ update_stats(&runtime_l1_icache_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
- update_stats(&runtime_ll_cache_stats[0], count[0]);
+ update_stats(&runtime_ll_cache_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
- update_stats(&runtime_dtlb_cache_stats[0], count[0]);
+ update_stats(&runtime_dtlb_cache_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
- update_stats(&runtime_itlb_cache_stats[0], count[0]);
+ update_stats(&runtime_itlb_cache_stats[cpu], count[0]);
}
static void zero_per_pkg(struct perf_evsel *counter)
@@ -447,7 +448,8 @@ static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused,
perf_evsel__compute_deltas(evsel, cpu, count);
perf_counts_values__scale(count, scale, NULL);
evsel->counts->cpu[cpu] = *count;
- update_shadow_stats(evsel, count->values);
+ if (aggr_mode == AGGR_NONE)
+ update_shadow_stats(evsel, count->values, cpu);
break;
case AGGR_GLOBAL:
aggr->val += count->val;
@@ -495,7 +497,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
/*
* Save the full runtime - to allow normalization during printout:
*/
- update_shadow_stats(counter, count);
+ update_shadow_stats(counter, count, 0);
return 0;
}
@@ -510,6 +512,9 @@ static int read_counter(struct perf_evsel *counter)
int ncpus = perf_evsel__nr_cpus(counter);
int cpu, thread;
+ if (!counter->supported)
+ return -ENOENT;
+
if (counter->system_wide)
nthreads = 1;
@@ -679,8 +684,9 @@ static int __run_perf_stat(int argc, const char **argv)
unit_width = l;
}
- if (perf_evlist__apply_filters(evsel_list)) {
- error("failed to set filter with %d (%s)\n", errno,
+ if (perf_evlist__apply_filters(evsel_list, &counter)) {
+ error("failed to set filter \"%s\" on event %s with %d (%s)\n",
+ counter->filter, perf_evsel__name(counter), errno,
strerror_r(errno, msg, sizeof(msg)));
return -1;
}
@@ -766,6 +772,19 @@ static int run_perf_stat(int argc, const char **argv)
return ret;
}
+static void print_running(u64 run, u64 ena)
+{
+ if (csv_output) {
+ fprintf(output, "%s%" PRIu64 "%s%.2f",
+ csv_sep,
+ run,
+ csv_sep,
+ ena ? 100.0 * run / ena : 100.0);
+ } else if (run != ena) {
+ fprintf(output, " (%.2f%%)", 100.0 * run / ena);
+ }
+}
+
static void print_noise_pct(double total, double avg)
{
double pct = rel_stddev_stats(total, avg);
@@ -1076,6 +1095,8 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
if (total) {
ratio = avg / total;
fprintf(output, " # %5.2f insns per cycle ", ratio);
+ } else {
+ fprintf(output, " ");
}
total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
@@ -1145,6 +1166,8 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
if (total) {
ratio = avg / total;
fprintf(output, " # %8.3f GHz ", ratio);
+ } else {
+ fprintf(output, " ");
}
} else if (transaction_run &&
perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX))) {
@@ -1249,6 +1272,7 @@ static void print_aggr(char *prefix)
fprintf(output, "%s%s",
csv_sep, counter->cgrp->name);
+ print_running(run, ena);
fputc('\n', output);
continue;
}
@@ -1259,13 +1283,10 @@ static void print_aggr(char *prefix)
else
abs_printout(id, nr, counter, uval);
- if (!csv_output) {
+ if (!csv_output)
print_noise(counter, 1.0);
- if (run != ena)
- fprintf(output, " (%.2f%%)",
- 100.0 * run / ena);
- }
+ print_running(run, ena);
fputc('\n', output);
}
}
@@ -1281,11 +1302,15 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
double avg = avg_stats(&ps->res_stats[0]);
int scaled = counter->counts->scaled;
double uval;
+ double avg_enabled, avg_running;
+
+ avg_enabled = avg_stats(&ps->res_stats[1]);
+ avg_running = avg_stats(&ps->res_stats[2]);
if (prefix)
fprintf(output, "%s", prefix);
- if (scaled == -1) {
+ if (scaled == -1 || !counter->supported) {
fprintf(output, "%*s%s",
csv_output ? 0 : 18,
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
@@ -1300,6 +1325,7 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
if (counter->cgrp)
fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
+ print_running(avg_running, avg_enabled);
fputc('\n', output);
return;
}
@@ -1313,19 +1339,7 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
print_noise(counter, avg);
- if (csv_output) {
- fputc('\n', output);
- return;
- }
-
- if (scaled) {
- double avg_enabled, avg_running;
-
- avg_enabled = avg_stats(&ps->res_stats[1]);
- avg_running = avg_stats(&ps->res_stats[2]);
-
- fprintf(output, " [%5.2f%%]", 100 * avg_running / avg_enabled);
- }
+ print_running(avg_running, avg_enabled);
fprintf(output, "\n");
}
@@ -1367,6 +1381,7 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s%s",
csv_sep, counter->cgrp->name);
+ print_running(run, ena);
fputc('\n', output);
continue;
}
@@ -1378,13 +1393,10 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
else
abs_printout(cpu, 0, counter, uval);
- if (!csv_output) {
+ if (!csv_output)
print_noise(counter, 1.0);
+ print_running(run, ena);
- if (run != ena)
- fprintf(output, " (%.2f%%)",
- 100.0 * run / ena);
- }
fputc('\n', output);
}
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index f3bb1a4bf060..e50fe1187b0b 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -67,6 +67,7 @@ struct timechart {
skip_eagain;
u64 min_time,
merge_dist;
+ bool force;
};
struct per_pidcomm;
@@ -1598,6 +1599,7 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
struct perf_data_file file = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
+ .force = tchart->force,
};
struct perf_session *session = perf_session__new(&file, false,
@@ -1623,7 +1625,7 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
goto out_delete;
}
- ret = perf_session__process_events(session, &tchart->tool);
+ ret = perf_session__process_events(session);
if (ret)
goto out_delete;
@@ -1956,9 +1958,11 @@ int cmd_timechart(int argc, const char **argv,
OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
"merge events that are merge-dist us apart",
parse_time),
+ OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
OPT_END()
};
- const char * const timechart_usage[] = {
+ const char * const timechart_subcommands[] = { "record", NULL };
+ const char *timechart_usage[] = {
"perf timechart [<options>] {record}",
NULL
};
@@ -1976,8 +1980,8 @@ int cmd_timechart(int argc, const char **argv,
"perf timechart record [<options>]",
NULL
};
- argc = parse_options(argc, argv, timechart_options, timechart_usage,
- PARSE_OPT_STOP_AT_NON_OPTION);
+ argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
+ timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (tchart.power_only && tchart.tasks_only) {
pr_err("-P and -T options cannot be used at the same time.\n");
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index c4c7eac69de4..6a4d5d41c671 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -716,7 +716,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (!machine) {
pr_err("%u unprocessable samples recorded.\r",
- top->session->stats.nr_unprocessable_samples++);
+ top->session->evlist->stats.nr_unprocessable_samples++);
return;
}
@@ -733,7 +733,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict.\n\n"
"Kernel%s samples will not be resolved.\n",
- !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
+ al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
" modules" : "");
if (use_browser <= 0)
sleep(5);
@@ -757,8 +757,10 @@ static void perf_event__process_sample(struct perf_tool *tool,
al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
if (symbol_conf.vmlinux_name) {
- ui__warning("The %s file can't be used.\n%s",
- symbol_conf.vmlinux_name, msg);
+ char serr[256];
+ dso__strerror_load(al.map->dso, serr, sizeof(serr));
+ ui__warning("The %s file can't be used: %s\n%s",
+ symbol_conf.vmlinux_name, serr, msg);
} else {
ui__warning("A vmlinux file was not found.\n%s",
msg);
@@ -856,7 +858,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
hists__inc_nr_events(evsel__hists(evsel), event->header.type);
machine__process_event(machine, event, &sample);
} else
- ++session->stats.nr_unknown_events;
+ ++session->evlist->stats.nr_unknown_events;
next_event:
perf_evlist__mmap_consume(top->evlist, idx);
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 7e935f1083ec..e122970361f2 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -52,7 +52,9 @@ struct tp_field {
#define TP_UINT_FIELD(bits) \
static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
{ \
- return *(u##bits *)(sample->raw_data + field->offset); \
+ u##bits value; \
+ memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
+ return value; \
}
TP_UINT_FIELD(8);
@@ -63,7 +65,8 @@ TP_UINT_FIELD(64);
#define TP_UINT_FIELD__SWAPPED(bits) \
static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
{ \
- u##bits value = *(u##bits *)(sample->raw_data + field->offset); \
+ u##bits value; \
+ memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
return bswap_##bits(value);\
}
@@ -1132,6 +1135,8 @@ static struct syscall_fmt *syscall_fmt__find(const char *name)
struct syscall {
struct event_format *tp_format;
+ int nr_args;
+ struct format_field *args;
const char *name;
bool filtered;
bool is_exit;
@@ -1219,7 +1224,9 @@ struct trace {
struct syscall *table;
} syscalls;
struct record_opts opts;
+ struct perf_evlist *evlist;
struct machine *host;
+ struct thread *current;
u64 base_time;
FILE *output;
unsigned long nr_events;
@@ -1227,6 +1234,10 @@ struct trace {
const char *last_vfs_getname;
struct intlist *tid_list;
struct intlist *pid_list;
+ struct {
+ size_t nr;
+ pid_t *entries;
+ } filter_pids;
double duration_filter;
double runtime_ms;
struct {
@@ -1243,6 +1254,7 @@ struct trace {
bool show_comm;
bool show_tool_stats;
bool trace_syscalls;
+ bool force;
int trace_pgfaults;
};
@@ -1433,14 +1445,14 @@ static int syscall__set_arg_fmts(struct syscall *sc)
struct format_field *field;
int idx = 0;
- sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *));
+ sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
if (sc->arg_scnprintf == NULL)
return -1;
if (sc->fmt)
sc->arg_parm = sc->fmt->arg_parm;
- for (field = sc->tp_format->format.fields->next; field; field = field->next) {
+ for (field = sc->args; field; field = field->next) {
if (sc->fmt && sc->fmt->arg_scnprintf[idx])
sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
else if (field->flags & FIELD_IS_POINTER)
@@ -1506,18 +1518,37 @@ static int trace__read_syscall_info(struct trace *trace, int id)
if (sc->tp_format == NULL)
return -1;
+ sc->args = sc->tp_format->format.fields;
+ sc->nr_args = sc->tp_format->format.nr_fields;
+ /* drop nr field - not relevant here; does not exist on older kernels */
+ if (sc->args && strcmp(sc->args->name, "nr") == 0) {
+ sc->args = sc->args->next;
+ --sc->nr_args;
+ }
+
sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
return syscall__set_arg_fmts(sc);
}
+/*
+ * args is to be interpreted as a series of longs but we need to handle
+ * 8-byte unaligned accesses. args points to raw_data within the event
+ * and raw_data is guaranteed to be 8-byte unaligned because it is
+ * preceded by raw_size which is a u32. So we need to copy args to a temp
+ * variable to read it. Most notably this avoids extended load instructions
+ * on unaligned addresses
+ */
+
static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
- unsigned long *args, struct trace *trace,
+ unsigned char *args, struct trace *trace,
struct thread *thread)
{
size_t printed = 0;
+ unsigned char *p;
+ unsigned long val;
- if (sc->tp_format != NULL) {
+ if (sc->args != NULL) {
struct format_field *field;
u8 bit = 1;
struct syscall_arg arg = {
@@ -1527,16 +1558,21 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
.thread = thread,
};
- for (field = sc->tp_format->format.fields->next; field;
+ for (field = sc->args; field;
field = field->next, ++arg.idx, bit <<= 1) {
if (arg.mask & bit)
continue;
+
+ /* special care for unaligned accesses */
+ p = args + sizeof(unsigned long) * arg.idx;
+ memcpy(&val, p, sizeof(val));
+
/*
* Suppress this argument if its value is zero and
* and we don't have a string associated in an
* strarray for it.
*/
- if (args[arg.idx] == 0 &&
+ if (val == 0 &&
!(sc->arg_scnprintf &&
sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
sc->arg_parm[arg.idx]))
@@ -1545,23 +1581,26 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
printed += scnprintf(bf + printed, size - printed,
"%s%s: ", printed ? ", " : "", field->name);
if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
- arg.val = args[arg.idx];
+ arg.val = val;
if (sc->arg_parm)
arg.parm = sc->arg_parm[arg.idx];
printed += sc->arg_scnprintf[arg.idx](bf + printed,
size - printed, &arg);
} else {
printed += scnprintf(bf + printed, size - printed,
- "%ld", args[arg.idx]);
+ "%ld", val);
}
}
} else {
int i = 0;
while (i < 6) {
+ /* special care for unaligned accesses */
+ p = args + sizeof(unsigned long) * i;
+ memcpy(&val, p, sizeof(val));
printed += scnprintf(bf + printed, size - printed,
"%sarg%d: %ld",
- printed ? ", " : "", i, args[i]);
+ printed ? ", " : "", i, val);
++i;
}
}
@@ -1642,6 +1681,29 @@ static void thread__update_stats(struct thread_trace *ttrace,
update_stats(stats, duration);
}
+static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
+{
+ struct thread_trace *ttrace;
+ u64 duration;
+ size_t printed;
+
+ if (trace->current == NULL)
+ return 0;
+
+ ttrace = thread__priv(trace->current);
+
+ if (!ttrace->entry_pending)
+ return 0;
+
+ duration = sample->time - ttrace->entry_time;
+
+ printed = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output);
+ printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
+ ttrace->entry_pending = false;
+
+ return printed;
+}
+
static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -1673,6 +1735,9 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
return -1;
}
+ if (!trace->summary_only)
+ trace__printf_interrupted_entry(trace, sample);
+
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
@@ -1688,6 +1753,11 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
} else
ttrace->entry_pending = true;
+ if (trace->current != thread) {
+ thread__put(trace->current);
+ trace->current = thread__get(thread);
+ }
+
return 0;
}
@@ -1805,6 +1875,28 @@ out_dump:
return 0;
}
+static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample)
+{
+ trace__printf_interrupted_entry(trace, sample);
+ trace__fprintf_tstamp(trace, sample->time, trace->output);
+
+ if (trace->trace_syscalls)
+ fprintf(trace->output, "( ): ");
+
+ fprintf(trace->output, "%s:", evsel->name);
+
+ if (evsel->tp_format) {
+ event_format__fprintf(evsel->tp_format, sample->cpu,
+ sample->raw_data, sample->raw_size,
+ trace->output);
+ }
+
+ fprintf(trace->output, ")\n");
+ return 0;
+}
+
static void print_location(FILE *f, struct perf_sample *sample,
struct addr_location *al,
bool print_dso, bool print_sym)
@@ -2037,10 +2129,39 @@ static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
return 0;
}
-static int trace__run(struct trace *trace, int argc, const char **argv)
+static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
{
- struct perf_evlist *evlist = perf_evlist__new();
+ const u32 type = event->header.type;
struct perf_evsel *evsel;
+
+ if (!trace->full_time && trace->base_time == 0)
+ trace->base_time = sample->time;
+
+ if (type != PERF_RECORD_SAMPLE) {
+ trace__process_event(trace, trace->host, event, sample);
+ return;
+ }
+
+ evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
+ if (evsel == NULL) {
+ fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
+ return;
+ }
+
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
+ sample->raw_data == NULL) {
+ fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
+ perf_evsel__name(evsel), sample->tid,
+ sample->cpu, sample->raw_size);
+ } else {
+ tracepoint_handler handler = evsel->handler;
+ handler(trace, evsel, event, sample);
+ }
+}
+
+static int trace__run(struct trace *trace, int argc, const char **argv)
+{
+ struct perf_evlist *evlist = trace->evlist;
int err = -1, i;
unsigned long before;
const bool forks = argc > 0;
@@ -2048,11 +2169,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
trace->live = true;
- if (evlist == NULL) {
- fprintf(trace->output, "Not enough memory to run!\n");
- goto out;
- }
-
if (trace->trace_syscalls &&
perf_evlist__add_syscall_newtp(evlist, trace__sys_enter,
trace__sys_exit))
@@ -2105,16 +2221,35 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_open;
+ /*
+ * Better not use !target__has_task() here because we need to cover the
+ * case where no threads were specified in the command line, but a
+ * workload was, and in that case we will fill in the thread_map when
+ * we fork the workload in perf_evlist__prepare_workload.
+ */
+ if (trace->filter_pids.nr > 0)
+ err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
+ else if (evlist->threads->map[0] == -1)
+ err = perf_evlist__set_filter_pid(evlist, getpid());
+
+ if (err < 0) {
+ printf("err=%d,%s\n", -err, strerror(-err));
+ exit(1);
+ }
+
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
if (err < 0)
goto out_error_mmap;
- perf_evlist__enable(evlist);
+ if (!target__none(&trace->opts.target))
+ perf_evlist__enable(evlist);
if (forks)
perf_evlist__start_workload(evlist);
- trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
+ trace->multiple_threads = evlist->threads->map[0] == -1 ||
+ evlist->threads->nr > 1 ||
+ perf_evlist__first(evlist)->attr.inherit;
again:
before = trace->nr_events;
@@ -2122,8 +2257,6 @@ again:
union perf_event *event;
while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
- const u32 type = event->header.type;
- tracepoint_handler handler;
struct perf_sample sample;
++trace->nr_events;
@@ -2134,35 +2267,17 @@ again:
goto next_event;
}
- if (!trace->full_time && trace->base_time == 0)
- trace->base_time = sample.time;
-
- if (type != PERF_RECORD_SAMPLE) {
- trace__process_event(trace, trace->host, event, &sample);
- continue;
- }
-
- evsel = perf_evlist__id2evsel(evlist, sample.id);
- if (evsel == NULL) {
- fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
- goto next_event;
- }
-
- if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
- sample.raw_data == NULL) {
- fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
- perf_evsel__name(evsel), sample.tid,
- sample.cpu, sample.raw_size);
- goto next_event;
- }
-
- handler = evsel->handler;
- handler(trace, evsel, event, &sample);
+ trace__handle_event(trace, event, &sample);
next_event:
perf_evlist__mmap_consume(evlist, i);
if (interrupted)
goto out_disable;
+
+ if (done && !draining) {
+ perf_evlist__disable(evlist);
+ draining = true;
+ }
}
}
@@ -2180,6 +2295,8 @@ next_event:
}
out_disable:
+ thread__zput(trace->current);
+
perf_evlist__disable(evlist);
if (!err) {
@@ -2197,7 +2314,7 @@ out_disable:
out_delete_evlist:
perf_evlist__delete(evlist);
-out:
+ trace->evlist = NULL;
trace->live = false;
return err;
{
@@ -2235,6 +2352,7 @@ static int trace__replay(struct trace *trace)
struct perf_data_file file = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
+ .force = trace->force,
};
struct perf_session *session;
struct perf_evsel *evsel;
@@ -2309,7 +2427,7 @@ static int trace__replay(struct trace *trace)
setup_pager();
- err = perf_session__process_events(session, &trace->tool);
+ err = perf_session__process_events(session);
if (err)
pr_err("Failed to process events, error %d", err);
@@ -2434,6 +2552,38 @@ static int trace__set_duration(const struct option *opt, const char *str,
return 0;
}
+static int trace__set_filter_pids(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ int ret = -1;
+ size_t i;
+ struct trace *trace = opt->value;
+ /*
+ * FIXME: introduce a intarray class, plain parse csv and create a
+ * { int nr, int entries[] } struct...
+ */
+ struct intlist *list = intlist__new(str);
+
+ if (list == NULL)
+ return -1;
+
+ i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
+ trace->filter_pids.entries = calloc(i, sizeof(pid_t));
+
+ if (trace->filter_pids.entries == NULL)
+ goto out;
+
+ trace->filter_pids.entries[0] = getpid();
+
+ for (i = 1; i < trace->filter_pids.nr; ++i)
+ trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
+
+ intlist__delete(list);
+ ret = 0;
+out:
+ return ret;
+}
+
static int trace__open_output(struct trace *trace, const char *filename)
{
struct stat st;
@@ -2468,9 +2618,17 @@ static int parse_pagefaults(const struct option *opt, const char *str,
return 0;
}
+static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel)
+ evsel->handler = handler;
+}
+
int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
{
- const char * const trace_usage[] = {
+ const char *trace_usage[] = {
"perf trace [<options>] [<command>]",
"perf trace [<options>] -- <command> [<options>]",
"perf trace record [<options>] [<command>]",
@@ -2502,6 +2660,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
const char *output_name = NULL;
const char *ev_qualifier_str = NULL;
const struct option trace_options[] = {
+ OPT_CALLBACK(0, "event", &trace.evlist, "event",
+ "event selector. use 'perf list' to list available events",
+ parse_events_option),
OPT_BOOLEAN(0, "comm", &trace.show_comm,
"show the thread COMM next to its id"),
OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
@@ -2513,6 +2674,8 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
"trace events on existing process id"),
OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
"trace events on existing thread id"),
+ OPT_CALLBACK(0, "filter-pids", &trace, "float",
+ "show only events with duration > N.M ms", trace__set_filter_pids),
OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
@@ -2538,19 +2701,36 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
"Trace pagefaults", parse_pagefaults, "maj"),
OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
+ OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
OPT_END()
};
+ const char * const trace_subcommands[] = { "record", NULL };
int err;
char bf[BUFSIZ];
- argc = parse_options(argc, argv, trace_options, trace_usage,
- PARSE_OPT_STOP_AT_NON_OPTION);
+ signal(SIGSEGV, sighandler_dump_stack);
+ signal(SIGFPE, sighandler_dump_stack);
+
+ trace.evlist = perf_evlist__new();
+ if (trace.evlist == NULL)
+ return -ENOMEM;
+
+ if (trace.evlist == NULL) {
+ pr_err("Not enough memory to run!\n");
+ goto out;
+ }
+
+ argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
+ trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (trace.trace_pgfaults) {
trace.opts.sample_address = true;
trace.opts.sample_time = true;
}
+ if (trace.evlist->nr_entries > 0)
+ evlist__set_evsel_handler(trace.evlist, trace__event_handler);
+
if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
return trace__record(&trace, argc-1, &argv[1]);
@@ -2558,7 +2738,8 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
if (trace.summary_only)
trace.summary = trace.summary_only;
- if (!trace.trace_syscalls && !trace.trace_pgfaults) {
+ if (!trace.trace_syscalls && !trace.trace_pgfaults &&
+ trace.evlist->nr_entries == 0 /* Was --events used? */) {
pr_err("Please specify something to trace.\n");
return -1;
}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index b210d62907e4..3688ad29085f 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -37,6 +37,7 @@ extern int cmd_test(int argc, const char **argv, const char *prefix);
extern int cmd_trace(int argc, const char **argv, const char *prefix);
extern int cmd_inject(int argc, const char **argv, const char *prefix);
extern int cmd_mem(int argc, const char **argv, const char *prefix);
+extern int cmd_data(int argc, const char **argv, const char *prefix);
extern int find_scripts(char **scripts_array, char **scripts_path_array);
#endif
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 0906fc401c52..00fcaf8a5b8d 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -7,6 +7,7 @@ perf-archive mainporcelain common
perf-bench mainporcelain common
perf-buildid-cache mainporcelain common
perf-buildid-list mainporcelain common
+perf-data mainporcelain common
perf-diff mainporcelain common
perf-evlist mainporcelain common
perf-inject mainporcelain common
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index cc224080b525..59a98c643240 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -11,19 +11,26 @@ ifneq ($(obj-perf),)
obj-perf := $(abspath $(obj-perf))/
endif
-LIB_INCLUDE := $(srctree)/tools/lib/
+$(shell echo -n > .config-detected)
+detected = $(shell echo "$(1)=y" >> .config-detected)
+detected_var = $(shell echo "$(1)=$($(1))" >> .config-detected)
+
CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
include $(src-perf)/config/Makefile.arch
+$(call detected_var,ARCH)
+
NO_PERF_REGS := 1
# Additional ARCH settings for x86
ifeq ($(ARCH),x86)
+ $(call detected,CONFIG_X86)
ifeq (${IS_64_BIT}, 1)
CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
+ $(call detected,CONFIG_X86_64)
else
LIBUNWIND_LIBS = -lunwind -lunwind-x86
endif
@@ -40,6 +47,10 @@ ifeq ($(ARCH),arm64)
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
endif
+ifeq ($(NO_PERF_REGS),0)
+ $(call detected,CONFIG_PERF_REGS)
+endif
+
# So far there's only x86 and arm libdw unwind support merged in perf.
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
@@ -84,6 +95,17 @@ ifndef NO_LIBELF
FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) -ldw
endif
+ifdef LIBBABELTRACE
+ # for linking with debug library, run like:
+ # make DEBUG=1 LIBBABELTRACE_DIR=/opt/libbabeltrace/
+ ifdef LIBBABELTRACE_DIR
+ LIBBABELTRACE_CFLAGS := -I$(LIBBABELTRACE_DIR)/include
+ LIBBABELTRACE_LDFLAGS := -L$(LIBBABELTRACE_DIR)/lib
+ endif
+ FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
+ FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
+endif
+
# include ARCH specific config
-include $(src-perf)/arch/$(ARCH)/Makefile
@@ -114,6 +136,8 @@ ifdef PARSER_DEBUG
PARSER_DEBUG_BISON := -t
PARSER_DEBUG_FLEX := -d
CFLAGS += -DPARSER_DEBUG
+ $(call detected_var,PARSER_DEBUG_BISON)
+ $(call detected_var,PARSER_DEBUG_FLEX)
endif
ifndef NO_LIBPYTHON
@@ -152,121 +176,7 @@ LDFLAGS += -Wl,-z,noexecstack
EXTLIBS = -lpthread -lrt -lm -ldl
-ifneq ($(OUTPUT),)
- OUTPUT_FEATURES = $(OUTPUT)config/feature-checks/
- $(shell mkdir -p $(OUTPUT_FEATURES))
-endif
-
-feature_check = $(eval $(feature_check_code))
-define feature_check_code
- feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C config/feature-checks test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
-endef
-
-feature_set = $(eval $(feature_set_code))
-define feature_set_code
- feature-$(1) := 1
-endef
-
-#
-# Build the feature check binaries in parallel, ignore errors, ignore return value and suppress output:
-#
-
-#
-# Note that this is not a complete list of all feature tests, just
-# those that are typically built on a fully configured system.
-#
-# [ Feature tests not mentioned here have to be built explicitly in
-# the rule that uses them - an example for that is the 'bionic'
-# feature check. ]
-#
-CORE_FEATURE_TESTS = \
- backtrace \
- dwarf \
- fortify-source \
- sync-compare-and-swap \
- glibc \
- gtk2 \
- gtk2-infobar \
- libaudit \
- libbfd \
- libelf \
- libelf-getphdrnum \
- libelf-mmap \
- libnuma \
- libperl \
- libpython \
- libpython-version \
- libslang \
- libunwind \
- pthread-attr-setaffinity-np \
- stackprotector-all \
- timerfd \
- libdw-dwarf-unwind \
- zlib
-
-LIB_FEATURE_TESTS = \
- dwarf \
- glibc \
- gtk2 \
- libaudit \
- libbfd \
- libelf \
- libnuma \
- libperl \
- libpython \
- libslang \
- libunwind \
- libdw-dwarf-unwind \
- zlib
-
-VF_FEATURE_TESTS = \
- backtrace \
- fortify-source \
- sync-compare-and-swap \
- gtk2-infobar \
- libelf-getphdrnum \
- libelf-mmap \
- libpython-version \
- pthread-attr-setaffinity-np \
- stackprotector-all \
- timerfd \
- libunwind-debug-frame \
- bionic \
- liberty \
- liberty-z \
- cplus-demangle \
- compile-32 \
- compile-x32
-
-# Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
-# If in the future we need per-feature checks/flags for features not
-# mentioned in this list we need to refactor this ;-).
-set_test_all_flags = $(eval $(set_test_all_flags_code))
-define set_test_all_flags_code
- FEATURE_CHECK_CFLAGS-all += $(FEATURE_CHECK_CFLAGS-$(1))
- FEATURE_CHECK_LDFLAGS-all += $(FEATURE_CHECK_LDFLAGS-$(1))
-endef
-
-$(foreach feat,$(CORE_FEATURE_TESTS),$(call set_test_all_flags,$(feat)))
-
-#
-# Special fast-path for the 'all features are available' case:
-#
-$(call feature_check,all,$(MSG))
-
-#
-# Just in case the build freshly failed, make sure we print the
-# feature matrix:
-#
-ifeq ($(feature-all), 1)
- #
- # test-all.c passed - just set all the core feature flags to 1:
- #
- $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_set,$(feat)))
-else
- $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS)" LDFLAGS=$(LDFLAGS) -i -j -C config/feature-checks $(addsuffix .bin,$(CORE_FEATURE_TESTS)) >/dev/null 2>&1)
- $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_check,$(feat)))
-endif
+include $(srctree)/tools/build/Makefile.feature
ifeq ($(feature-stackprotector-all), 1)
CFLAGS += -fstack-protector-all
@@ -295,7 +205,7 @@ endif
CFLAGS += -I$(src-perf)/util
CFLAGS += -I$(src-perf)
-CFLAGS += -I$(LIB_INCLUDE)
+CFLAGS += -I$(srctree)/tools/lib/
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
@@ -361,6 +271,7 @@ endif # NO_LIBELF
ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBELF_SUPPORT
EXTLIBS += -lelf
+ $(call detected,CONFIG_LIBELF)
ifeq ($(feature-libelf-mmap), 1)
CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
@@ -381,6 +292,7 @@ ifndef NO_LIBELF
CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
LDFLAGS += $(LIBDW_LDFLAGS)
EXTLIBS += -ldw
+ $(call detected,CONFIG_DWARF)
endif # PERF_HAVE_DWARF_REGS
endif # NO_DWARF
endif # NO_LIBELF
@@ -408,9 +320,11 @@ ifdef NO_LIBUNWIND
dwarf-post-unwind := 0
else
dwarf-post-unwind-text := libdw
+ $(call detected,CONFIG_LIBDW_DWARF_UNWIND)
endif
else
dwarf-post-unwind-text := libunwind
+ $(call detected,CONFIG_LIBUNWIND)
# Enable libunwind support by default.
ifndef NO_LIBDW_DWARF_UNWIND
NO_LIBDW_DWARF_UNWIND := 1
@@ -419,6 +333,7 @@ endif
ifeq ($(dwarf-post-unwind),1)
CFLAGS += -DHAVE_DWARF_UNWIND_SUPPORT
+ $(call detected,CONFIG_DWARF_UNWIND)
else
NO_DWARF_UNWIND := 1
endif
@@ -447,6 +362,7 @@ ifndef NO_LIBAUDIT
else
CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
EXTLIBS += -laudit
+ $(call detected,CONFIG_AUDIT)
endif
endif
@@ -463,6 +379,7 @@ ifndef NO_SLANG
CFLAGS += -I/usr/include/slang
CFLAGS += -DHAVE_SLANG_SUPPORT
EXTLIBS += -lslang
+ $(call detected,CONFIG_SLANG)
endif
endif
@@ -497,10 +414,11 @@ else
ifneq ($(feature-libperl), 1)
CFLAGS += -DNO_LIBPERL
NO_LIBPERL := 1
- msg := $(warning Missing perl devel files. Disabling perl scripting support, consider installing perl-ExtUtils-Embed);
+ msg := $(warning Missing perl devel files. Disabling perl scripting support, please install perl-ExtUtils-Embed/libperl-dev);
else
LDFLAGS += $(PERL_EMBED_LDFLAGS)
EXTLIBS += $(PERL_EMBED_LIBADD)
+ $(call detected,CONFIG_LIBPERL)
endif
endif
@@ -513,22 +431,21 @@ endif
disable-python = $(eval $(disable-python_code))
define disable-python_code
CFLAGS += -DNO_LIBPYTHON
- $(if $(1),$(warning No $(1) was found))
- $(warning Python support will not be built)
+ $(warning $1)
NO_LIBPYTHON := 1
endef
ifdef NO_LIBPYTHON
- $(call disable-python)
+ $(call disable-python,Python support disabled by user)
else
ifndef PYTHON
- $(call disable-python,python interpreter)
+ $(call disable-python,No python interpreter was found: disables Python support - please install python-devel/python-dev)
else
PYTHON_WORD := $(call shell-wordify,$(PYTHON))
ifndef PYTHON_CONFIG
- $(call disable-python,python-config tool)
+ $(call disable-python,No 'python-config' tool was found: disables Python support - please install python-devel/python-dev)
else
PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
@@ -540,7 +457,7 @@ else
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
ifneq ($(feature-libpython), 1)
- $(call disable-python,Python.h (for Python 2.x))
+ $(call disable-python,No 'Python.h' (for Python 2.x support) was found: disables Python support - please install python-devel/python-dev)
else
ifneq ($(feature-libpython-version), 1)
@@ -560,6 +477,7 @@ else
LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
EXTLIBS += $(PYTHON_EMBED_LIBADD)
LANG_BINDINGS += $(obj-perf)python/perf.so
+ $(call detected,CONFIG_LIBPYTHON)
endif
endif
endif
@@ -600,7 +518,7 @@ else
EXTLIBS += -liberty
CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
else
- msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling)
+ msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
CFLAGS += -DNO_DEMANGLE
endif
endif
@@ -617,11 +535,23 @@ ifndef NO_ZLIB
ifeq ($(feature-zlib), 1)
CFLAGS += -DHAVE_ZLIB_SUPPORT
EXTLIBS += -lz
+ $(call detected,CONFIG_ZLIB)
else
NO_ZLIB := 1
endif
endif
+ifndef NO_LZMA
+ ifeq ($(feature-lzma), 1)
+ CFLAGS += -DHAVE_LZMA_SUPPORT
+ EXTLIBS += -llzma
+ $(call detected,CONFIG_LZMA)
+ else
+ msg := $(warning No liblzma found, disables xz kernel module decompression, please install xz-devel/liblzma-dev);
+ NO_LZMA := 1
+ endif
+endif
+
ifndef NO_BACKTRACE
ifeq ($(feature-backtrace), 1)
CFLAGS += -DHAVE_BACKTRACE_SUPPORT
@@ -635,6 +565,7 @@ ifndef NO_LIBNUMA
else
CFLAGS += -DHAVE_LIBNUMA_SUPPORT
EXTLIBS += -lnuma
+ $(call detected,CONFIG_NUMA)
endif
endif
@@ -651,7 +582,7 @@ ifeq (${IS_64_BIT}, 1)
NO_PERF_READ_VDSO32 := 1
endif
endif
- ifneq (${IS_X86_64}, 1)
+ ifneq ($(ARCH), x86)
NO_PERF_READ_VDSOX32 := 1
endif
ifndef NO_PERF_READ_VDSOX32
@@ -667,6 +598,18 @@ else
NO_PERF_READ_VDSOX32 := 1
endif
+ifdef LIBBABELTRACE
+ $(call feature_check,libbabeltrace)
+ ifeq ($(feature-libbabeltrace), 1)
+ CFLAGS += -DHAVE_LIBBABELTRACE_SUPPORT $(LIBBABELTRACE_CFLAGS)
+ LDFLAGS += $(LIBBABELTRACE_LDFLAGS)
+ EXTLIBS += -lbabeltrace-ctf
+ $(call detected,CONFIG_LIBBABELTRACE)
+ else
+ msg := $(warning No libbabeltrace found, disables 'perf data' CTF format support, please install libbabeltrace-dev[el]/libbabeltrace-ctf-dev);
+ endif
+endif
+
# Among the variables below, these:
# perfexecdir
# template_dir
@@ -699,7 +642,7 @@ sysconfdir = $(prefix)/etc
ETC_PERFCONFIG = etc/perfconfig
endif
ifndef lib
-ifeq ($(IS_X86_64),1)
+ifeq ($(ARCH)$(IS_64_BIT), x861)
lib = lib64
else
lib = lib
@@ -735,83 +678,33 @@ plugindir=$(libdir)/traceevent/plugins
plugindir_SQ= $(subst ','\'',$(plugindir))
endif
-#
-# Print the result of the feature test:
-#
-feature_print_status = $(eval $(feature_print_status_code)) $(info $(MSG))
-
-define feature_print_status_code
- ifeq ($(feature-$(1)), 1)
- MSG = $(shell printf '...%30s: [ \033[32mon\033[m ]' $(1))
- else
- MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
- endif
-endef
-
-feature_print_var = $(eval $(feature_print_var_code)) $(info $(MSG))
-define feature_print_var_code
+print_var = $(eval $(print_var_code)) $(info $(MSG))
+define print_var_code
MSG = $(shell printf '...%30s: %s' $(1) $($(1)))
endef
-feature_print_text = $(eval $(feature_print_text_code)) $(info $(MSG))
-define feature_print_text_code
- MSG = $(shell printf '...%30s: %s' $(1) $(2))
-endef
-
-PERF_FEATURES := $(foreach feat,$(LIB_FEATURE_TESTS),feature-$(feat)($(feature-$(feat))))
-PERF_FEATURES_FILE := $(shell touch $(OUTPUT)PERF-FEATURES; cat $(OUTPUT)PERF-FEATURES)
-
-ifeq ($(dwarf-post-unwind),1)
- PERF_FEATURES += dwarf-post-unwind($(dwarf-post-unwind-text))
-endif
-
-# The $(display_lib) controls the default detection message
-# output. It's set if:
-# - detected features differes from stored features from
-# last build (in PERF-FEATURES file)
-# - one of the $(LIB_FEATURE_TESTS) is not detected
-# - VF is enabled
-
-ifneq ("$(PERF_FEATURES)","$(PERF_FEATURES_FILE)")
- $(shell echo "$(PERF_FEATURES)" > $(OUTPUT)PERF-FEATURES)
- display_lib := 1
-endif
-
-feature_check = $(eval $(feature_check_code))
-define feature_check_code
- ifneq ($(feature-$(1)), 1)
- display_lib := 1
- endif
-endef
-
-$(foreach feat,$(LIB_FEATURE_TESTS),$(call feature_check,$(feat)))
-
ifeq ($(VF),1)
- display_lib := 1
- display_vf := 1
-endif
-
-ifeq ($(display_lib),1)
+ $(call print_var,prefix)
+ $(call print_var,bindir)
+ $(call print_var,libdir)
+ $(call print_var,sysconfdir)
+ $(call print_var,LIBUNWIND_DIR)
+ $(call print_var,LIBDW_DIR)
$(info )
- $(info Auto-detecting system features:)
- $(foreach feat,$(LIB_FEATURE_TESTS),$(call feature_print_status,$(feat),))
-
- ifeq ($(dwarf-post-unwind),1)
- $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
- endif
-endif
-
-ifeq ($(display_vf),1)
- $(foreach feat,$(VF_FEATURE_TESTS),$(call feature_print_status,$(feat),))
- $(info )
- $(call feature_print_var,prefix)
- $(call feature_print_var,bindir)
- $(call feature_print_var,libdir)
- $(call feature_print_var,sysconfdir)
- $(call feature_print_var,LIBUNWIND_DIR)
- $(call feature_print_var,LIBDW_DIR)
endif
-ifeq ($(display_lib),1)
- $(info )
-endif
+$(call detected_var,bindir_SQ)
+$(call detected_var,PYTHON_WORD)
+ifneq ($(OUTPUT),)
+$(call detected_var,OUTPUT)
+endif
+$(call detected_var,htmldir_SQ)
+$(call detected_var,infodir_SQ)
+$(call detected_var,mandir_SQ)
+$(call detected_var,ETC_PERFCONFIG_SQ)
+$(call detected_var,prefix_SQ)
+$(call detected_var,perfexecdir_SQ)
+$(call detected_var,LIBDIR)
+$(call detected_var,GTK_CFLAGS)
+$(call detected_var,PERL_EMBED_CCOPTS)
+$(call detected_var,PYTHON_EMBED_CCOPTS)
diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch
index ac8721ffa6c8..e11fbd6fae78 100644
--- a/tools/perf/config/Makefile.arch
+++ b/tools/perf/config/Makefile.arch
@@ -1,32 +1,15 @@
+ifndef ARCH
+ARCH := $(shell uname -m 2>/dev/null || echo not)
+endif
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-
-RAW_ARCH := $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
- -e s/arm.*/arm/ -e s/sa110/arm/ \
+ARCH := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
+ -e s/sun4u/sparc/ -e s/sparc64/sparc/ \
+ -e /arm64/!s/arm.*/arm/ -e s/sa110/arm/ \
-e s/s390x/s390/ -e s/parisc64/parisc/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
-e s/tile.*/tile/ )
-# Additional ARCH settings for x86
-ifeq ($(RAW_ARCH),i386)
- ARCH ?= x86
-endif
-
-ifeq ($(RAW_ARCH),x86_64)
- ARCH ?= x86
-
- ifneq (, $(findstring m32,$(CFLAGS)))
- RAW_ARCH := x86_32
- endif
-endif
-
-ifeq ($(RAW_ARCH),sparc64)
- ARCH ?= sparc
-endif
-
-ARCH ?= $(RAW_ARCH)
-
LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
ifeq ($(LP64), 1)
IS_64_BIT := 1
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
index 7076a62d0ff7..c16ce833079c 100644
--- a/tools/perf/config/utilities.mak
+++ b/tools/perf/config/utilities.mak
@@ -175,6 +175,5 @@ _ge-abspath = $(if $(is-executable),$(1))
define get-executable-or-default
$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
endef
-_ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2)))
-_gea_warn = $(warning The path '$(1)' is not executable.)
+_ge_attempt = $(if $(get-executable),$(get-executable),$(call _gea_err,$(2)))
_gea_err = $(if $(1),$(error Please set '$(1)' appropriately))
diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
index 33569847fdcc..3ba80b2359cc 100644
--- a/tools/perf/perf-completion.sh
+++ b/tools/perf/perf-completion.sh
@@ -47,8 +47,16 @@ __my_reassemble_comp_words_by_ref()
done
}
-type _get_comp_words_by_ref &>/dev/null ||
-_get_comp_words_by_ref()
+# Define preload_get_comp_words_by_ref="false", if the function
+# __perf_get_comp_words_by_ref() is required instead.
+preload_get_comp_words_by_ref="true"
+
+if [ $preload_get_comp_words_by_ref = "true" ]; then
+ type _get_comp_words_by_ref &>/dev/null ||
+ preload_get_comp_words_by_ref="false"
+fi
+[ $preload_get_comp_words_by_ref = "true" ] ||
+__perf_get_comp_words_by_ref()
{
local exclude cur_ words_ cword_
if [ "$1" = "-n" ]; then
@@ -76,8 +84,16 @@ _get_comp_words_by_ref()
done
}
-type __ltrim_colon_completions &>/dev/null ||
-__ltrim_colon_completions()
+# Define preload__ltrim_colon_completions="false", if the function
+# __perf__ltrim_colon_completions() is required instead.
+preload__ltrim_colon_completions="true"
+
+if [ $preload__ltrim_colon_completions = "true" ]; then
+ type __ltrim_colon_completions &>/dev/null ||
+ preload__ltrim_colon_completions="false"
+fi
+[ $preload__ltrim_colon_completions = "true" ] ||
+__perf__ltrim_colon_completions()
{
if [[ "$1" == *:* && "$COMP_WORDBREAKS" == *:* ]]; then
# Remove colon-word prefix from COMPREPLY items
@@ -97,7 +113,32 @@ __perfcomp ()
__perfcomp_colon ()
{
__perfcomp "$1" "$2"
- __ltrim_colon_completions $cur
+ if [ $preload__ltrim_colon_completions = "true" ]; then
+ __ltrim_colon_completions $cur
+ else
+ __perf__ltrim_colon_completions $cur
+ fi
+}
+
+__perf_prev_skip_opts ()
+{
+ local i cmd_ cmds_
+
+ let i=cword-1
+ cmds_=$($cmd $1 --list-cmds)
+ prev_skip_opts=()
+ while [ $i -ge 0 ]; do
+ if [[ ${words[i]} == $1 ]]; then
+ return
+ fi
+ for cmd_ in $cmds_; do
+ if [[ ${words[i]} == $cmd_ ]]; then
+ prev_skip_opts=${words[i]}
+ return
+ fi
+ done
+ ((i--))
+ done
}
__perf_main ()
@@ -107,29 +148,36 @@ __perf_main ()
cmd=${words[0]}
COMPREPLY=()
+ # Skip options backward and find the last perf command
+ __perf_prev_skip_opts
# List perf subcommands or long options
- if [ $cword -eq 1 ]; then
+ if [ -z $prev_skip_opts ]; then
if [[ $cur == --* ]]; then
- __perfcomp '--help --version \
- --exec-path --html-path --paginate --no-pager \
- --perf-dir --work-tree --debugfs-dir' -- "$cur"
+ cmds=$($cmd --list-opts)
else
cmds=$($cmd --list-cmds)
- __perfcomp "$cmds" "$cur"
fi
+ __perfcomp "$cmds" "$cur"
# List possible events for -e option
- elif [[ $prev == "-e" && "${words[1]}" == @(record|stat|top) ]]; then
+ elif [[ $prev == @("-e"|"--event") &&
+ $prev_skip_opts == @(record|stat|top) ]]; then
evts=$($cmd list --raw-dump)
__perfcomp_colon "$evts" "$cur"
- # List subcommands for perf commands
- elif [[ $prev == @(kvm|kmem|mem|lock|sched) ]]; then
- subcmds=$($cmd $prev --list-cmds)
- __perfcomp_colon "$subcmds" "$cur"
- # List long option names
- elif [[ $cur == --* ]]; then
- subcmd=${words[1]}
- opts=$($cmd $subcmd --list-opts)
- __perfcomp "$opts" "$cur"
+ else
+ # List subcommands for perf commands
+ if [[ $prev_skip_opts == @(kvm|kmem|mem|lock|sched|
+ |data|help|script|test|timechart|trace) ]]; then
+ subcmds=$($cmd $prev_skip_opts --list-cmds)
+ __perfcomp_colon "$subcmds" "$cur"
+ fi
+ # List long option names
+ if [[ $cur == --* ]]; then
+ subcmd=$prev_skip_opts
+ __perf_prev_skip_opts $subcmd
+ subcmd=$subcmd" "$prev_skip_opts
+ opts=$($cmd $subcmd --list-opts)
+ __perfcomp "$opts" "$cur"
+ fi
fi
}
@@ -198,7 +246,11 @@ type perf &>/dev/null &&
_perf()
{
local cur words cword prev
- _get_comp_words_by_ref -n =: cur words cword prev
+ if [ $preload_get_comp_words_by_ref = "true" ]; then
+ _get_comp_words_by_ref -n =: cur words cword prev
+ else
+ __perf_get_comp_words_by_ref -n =: cur words cword prev
+ fi
__perf_main
} &&
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 3700a7faca6c..b857fcbd00cf 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -13,6 +13,7 @@
#include "util/quote.h"
#include "util/run-command.h"
#include "util/parse-events.h"
+#include "util/parse-options.h"
#include "util/debug.h"
#include <api/fs/debugfs.h>
#include <pthread.h>
@@ -62,6 +63,7 @@ static struct cmd_struct commands[] = {
#endif
{ "inject", cmd_inject, 0 },
{ "mem", cmd_mem, 0 },
+ { "data", cmd_data, 0 },
};
struct pager_config {
@@ -124,6 +126,23 @@ static void commit_pager_choice(void)
}
}
+struct option options[] = {
+ OPT_ARGUMENT("help", "help"),
+ OPT_ARGUMENT("version", "version"),
+ OPT_ARGUMENT("exec-path", "exec-path"),
+ OPT_ARGUMENT("html-path", "html-path"),
+ OPT_ARGUMENT("paginate", "paginate"),
+ OPT_ARGUMENT("no-pager", "no-pager"),
+ OPT_ARGUMENT("perf-dir", "perf-dir"),
+ OPT_ARGUMENT("work-tree", "work-tree"),
+ OPT_ARGUMENT("debugfs-dir", "debugfs-dir"),
+ OPT_ARGUMENT("buildid-dir", "buildid-dir"),
+ OPT_ARGUMENT("list-cmds", "list-cmds"),
+ OPT_ARGUMENT("list-opts", "list-opts"),
+ OPT_ARGUMENT("debug", "debug"),
+ OPT_END()
+};
+
static int handle_options(const char ***argv, int *argc, int *envchanged)
{
int handled = 0;
@@ -222,6 +241,16 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
struct cmd_struct *p = commands+i;
printf("%s ", p->cmd);
}
+ putchar('\n');
+ exit(0);
+ } else if (!strcmp(cmd, "--list-opts")) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(options)-1; i++) {
+ struct option *p = options+i;
+ printf("--%s ", p->long_name);
+ }
+ putchar('\n');
exit(0);
} else if (!strcmp(cmd, "--debug")) {
if (*argc < 2) {
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 1dabb8553499..e14bb637255c 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -29,7 +29,7 @@ static inline unsigned long long rdclock(void)
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}
-#define MAX_NR_CPUS 256
+#define MAX_NR_CPUS 1024
extern const char *input_name;
extern bool perf_host, perf_guest;
@@ -53,6 +53,7 @@ struct record_opts {
bool sample_time;
bool period;
bool sample_intr_regs;
+ bool running_time;
unsigned int freq;
unsigned int mmap_pages;
unsigned int user_freq;
@@ -61,6 +62,8 @@ struct record_opts {
u64 user_interval;
bool sample_transaction;
unsigned initial_delay;
+ bool use_clockid;
+ clockid_t clockid;
};
struct option;
diff --git a/tools/perf/scripts/Build b/tools/perf/scripts/Build
new file mode 100644
index 000000000000..41efd7e368b3
--- /dev/null
+++ b/tools/perf/scripts/Build
@@ -0,0 +1,2 @@
+libperf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
+libperf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
new file mode 100644
index 000000000000..928e110179cb
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -0,0 +1,3 @@
+libperf-y += Context.o
+
+CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
new file mode 100644
index 000000000000..aefc15c9444a
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
@@ -0,0 +1,3 @@
+libperf-y += Context.o
+
+CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
new file mode 100644
index 000000000000..6a8801b32017
--- /dev/null
+++ b/tools/perf/tests/Build
@@ -0,0 +1,43 @@
+perf-y += builtin-test.o
+perf-y += parse-events.o
+perf-y += dso-data.o
+perf-y += attr.o
+perf-y += vmlinux-kallsyms.o
+perf-y += open-syscall.o
+perf-y += open-syscall-all-cpus.o
+perf-y += open-syscall-tp-fields.o
+perf-y += mmap-basic.o
+perf-y += perf-record.o
+perf-y += rdpmc.o
+perf-y += evsel-roundtrip-name.o
+perf-y += evsel-tp-sched.o
+perf-y += fdarray.o
+perf-y += pmu.o
+perf-y += hists_common.o
+perf-y += hists_link.o
+perf-y += hists_filter.o
+perf-y += hists_output.o
+perf-y += hists_cumulate.o
+perf-y += python-use.o
+perf-y += bp_signal.o
+perf-y += bp_signal_overflow.o
+perf-y += task-exit.o
+perf-y += sw-clock.o
+perf-y += mmap-thread-lookup.o
+perf-y += thread-mg-share.o
+perf-y += switch-tracking.o
+perf-y += keep-tracking.o
+perf-y += code-reading.o
+perf-y += sample-parsing.o
+perf-y += parse-no-sample-id-all.o
+perf-y += kmod-path.o
+
+perf-$(CONFIG_X86) += perf-time-to-tsc.o
+
+ifeq ($(ARCH),$(filter $(ARCH),x86 arm))
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+endif
+
+CFLAGS_attr.o += -DBINDIR="BUILD_STR($(bindir_SQ))" -DPYTHON="BUILD_STR($(PYTHON_WORD))"
+CFLAGS_python-use.o += -DPYTHONPATH="BUILD_STR($(OUTPUT)python)" -DPYTHON="BUILD_STR($(PYTHON_WORD))"
+CFLAGS_dwarf-unwind.o += -fno-optimize-sibling-calls
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index d3095dafed36..7e6d74946e04 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -5,7 +5,7 @@ group_fd=-1
flags=0|8
cpu=*
type=0|1
-size=104
+size=112
config=0
sample_period=4000
sample_type=263
diff --git a/tools/perf/tests/attr/base-stat b/tools/perf/tests/attr/base-stat
index 872ed7e24c7c..f4cf148f14cb 100644
--- a/tools/perf/tests/attr/base-stat
+++ b/tools/perf/tests/attr/base-stat
@@ -5,7 +5,7 @@ group_fd=-1
flags=0|8
cpu=*
type=0
-size=104
+size=112
config=0
sample_period=0
sample_type=0
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 4b7d9ab0f049..4f4098167112 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -167,6 +167,10 @@ static struct test {
.func = test__fdarray__add,
},
{
+ .desc = "Test kmod_path__parse function",
+ .func = test__kmod_path__parse,
+ },
+ {
.func = NULL,
},
};
@@ -291,7 +295,7 @@ static int perf_test__list(int argc, const char **argv)
int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
{
- const char * const test_usage[] = {
+ const char *test_usage[] = {
"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
NULL,
};
@@ -302,13 +306,14 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
"be more verbose (show symbol address, etc)"),
OPT_END()
};
+ const char * const test_subcommands[] = { "list", NULL };
struct intlist *skiplist = NULL;
int ret = hists__init();
if (ret < 0)
return ret;
- argc = parse_options(argc, argv, test_options, test_usage, 0);
+ argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
if (argc >= 1 && !strcmp(argv[0], "list"))
return perf_test__list(argc, argv);
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index caaf37f079b1..513e5febbe5a 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -112,6 +112,9 @@ int test__dso_data(void)
dso = dso__new((const char *)file);
+ TEST_ASSERT_VAL("Failed to access to dso",
+ dso__data_fd(dso, &machine) >= 0);
+
/* Basic 10 bytes tests. */
for (i = 0; i < ARRAY_SIZE(offsets); i++) {
struct test_data_offset *data = &offsets[i];
@@ -243,8 +246,8 @@ int test__dso_data_cache(void)
limit = nr * 4;
TEST_ASSERT_VAL("failed to set file limit", !set_fd_limit(limit));
- /* and this is now our dso open FDs limit + 1 extra */
- dso_cnt = limit / 2 + 1;
+ /* and this is now our dso open FDs limit */
+ dso_cnt = limit / 2;
TEST_ASSERT_VAL("failed to create dsos\n",
!dsos__create(dso_cnt, TEST_FILE_SIZE));
@@ -252,13 +255,13 @@ int test__dso_data_cache(void)
struct dso *dso = dsos[i];
/*
- * Open dsos via dso__data_fd or dso__data_read_offset.
- * Both opens the data file and keep it open.
+ * Open dsos via dso__data_fd(), it opens the data
+ * file and keep it open (unless open file limit).
*/
+ fd = dso__data_fd(dso, &machine);
+ TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
if (i % 2) {
- fd = dso__data_fd(dso, &machine);
- TEST_ASSERT_VAL("failed to get fd", fd > 0);
- } else {
#define BUFSIZE 10
u8 buf[BUFSIZE];
ssize_t n;
@@ -268,7 +271,10 @@ int test__dso_data_cache(void)
}
}
- /* open +1 dso over the allowed limit */
+ /* verify the first one is already open */
+ TEST_ASSERT_VAL("dsos[0] is not open", dsos[0]->data.fd != -1);
+
+ /* open +1 dso to reach the allowed limit */
fd = dso__data_fd(dsos[i], &machine);
TEST_ASSERT_VAL("failed to get fd", fd > 0);
diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c
new file mode 100644
index 000000000000..e8d7cbb9320c
--- /dev/null
+++ b/tools/perf/tests/kmod-path.c
@@ -0,0 +1,73 @@
+#include <stdbool.h>
+#include "tests.h"
+#include "dso.h"
+#include "debug.h"
+
+static int test(const char *path, bool alloc_name, bool alloc_ext,
+ bool kmod, bool comp, const char *name, const char *ext)
+{
+ struct kmod_path m;
+
+ memset(&m, 0x0, sizeof(m));
+
+ TEST_ASSERT_VAL("kmod_path__parse",
+ !__kmod_path__parse(&m, path, alloc_name, alloc_ext));
+
+ pr_debug("%s - alloc name %d, alloc ext %d, kmod %d, comp %d, name '%s', ext '%s'\n",
+ path, alloc_name, alloc_ext, m.kmod, m.comp, m.name, m.ext);
+
+ TEST_ASSERT_VAL("wrong kmod", m.kmod == kmod);
+ TEST_ASSERT_VAL("wrong comp", m.comp == comp);
+
+ if (ext)
+ TEST_ASSERT_VAL("wrong ext", m.ext && !strcmp(ext, m.ext));
+ else
+ TEST_ASSERT_VAL("wrong ext", !m.ext);
+
+ if (name)
+ TEST_ASSERT_VAL("wrong name", m.name && !strcmp(name, m.name));
+ else
+ TEST_ASSERT_VAL("wrong name", !m.name);
+
+ free(m.name);
+ free(m.ext);
+ return 0;
+}
+
+#define T(path, an, ae, k, c, n, e) \
+ TEST_ASSERT_VAL("failed", !test(path, an, ae, k, c, n, e))
+
+int test__kmod_path__parse(void)
+{
+ /* path alloc_name alloc_ext kmod comp name ext */
+ T("/xxxx/xxxx/x-x.ko", true , true , true, false, "[x_x]", NULL);
+ T("/xxxx/xxxx/x-x.ko", false , true , true, false, NULL , NULL);
+ T("/xxxx/xxxx/x-x.ko", true , false , true, false, "[x_x]", NULL);
+ T("/xxxx/xxxx/x-x.ko", false , false , true, false, NULL , NULL);
+
+ /* path alloc_name alloc_ext kmod comp name ext */
+ T("/xxxx/xxxx/x.ko.gz", true , true , true, true, "[x]", "gz");
+ T("/xxxx/xxxx/x.ko.gz", false , true , true, true, NULL , "gz");
+ T("/xxxx/xxxx/x.ko.gz", true , false , true, true, "[x]", NULL);
+ T("/xxxx/xxxx/x.ko.gz", false , false , true, true, NULL , NULL);
+
+ /* path alloc_name alloc_ext kmod comp name ext */
+ T("/xxxx/xxxx/x.gz", true , true , false, true, "x.gz" ,"gz");
+ T("/xxxx/xxxx/x.gz", false , true , false, true, NULL ,"gz");
+ T("/xxxx/xxxx/x.gz", true , false , false, true, "x.gz" , NULL);
+ T("/xxxx/xxxx/x.gz", false , false , false, true, NULL , NULL);
+
+ /* path alloc_name alloc_ext kmod comp name ext */
+ T("x.gz", true , true , false, true, "x.gz", "gz");
+ T("x.gz", false , true , false, true, NULL , "gz");
+ T("x.gz", true , false , false, true, "x.gz", NULL);
+ T("x.gz", false , false , false, true, NULL , NULL);
+
+ /* path alloc_name alloc_ext kmod comp name ext */
+ T("x.ko.gz", true , true , true, true, "[x]", "gz");
+ T("x.ko.gz", false , true , true, true, NULL , "gz");
+ T("x.ko.gz", true , false , true, true, "[x]", NULL);
+ T("x.ko.gz", false , false , true, true, NULL , NULL);
+
+ return 0;
+}
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 75709d2b17b4..bff85324f799 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -5,7 +5,7 @@ include config/Makefile.arch
# FIXME looks like x86 is the only arch running tests ;-)
# we need some IS_(32/64) flag to make this generic
-ifeq ($(IS_X86_64),1)
+ifeq ($(ARCH)$(IS_64_BIT), x861)
lib = lib64
else
lib = lib
diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c
index 8fa82d1700c7..3ec885c48f8f 100644
--- a/tools/perf/tests/open-syscall-all-cpus.c
+++ b/tools/perf/tests/open-syscall-all-cpus.c
@@ -29,7 +29,12 @@ int test__open_syscall_event_on_all_cpus(void)
evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
if (evsel == NULL) {
- pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+ if (tracefs_configured())
+ pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
+ else if (debugfs_configured())
+ pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+ else
+ pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
goto out_thread_map_delete;
}
diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c
index a33b2daae40f..07aa319bf334 100644
--- a/tools/perf/tests/open-syscall.c
+++ b/tools/perf/tests/open-syscall.c
@@ -18,7 +18,12 @@ int test__open_syscall_event(void)
evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
if (evsel == NULL) {
- pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+ if (tracefs_configured())
+ pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
+ else if (debugfs_configured())
+ pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+ else
+ pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
goto out_thread_map_delete;
}
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 1cdab0ce00e2..3de744961739 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -3,6 +3,7 @@
#include "evsel.h"
#include "evlist.h"
#include <api/fs/fs.h>
+#include <api/fs/tracefs.h>
#include <api/fs/debugfs.h>
#include "tests.h"
#include "debug.h"
@@ -294,6 +295,36 @@ static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
return test__checkevent_genhw(evlist);
}
+static int test__checkevent_exclude_idle_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude idle", evsel->attr.exclude_idle);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_exclude_idle_modifier_1(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude idle", evsel->attr.exclude_idle);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
@@ -1192,11 +1223,19 @@ static int count_tracepoints(void)
{
char events_path[PATH_MAX];
struct dirent *events_ent;
+ const char *mountpoint;
DIR *events_dir;
int cnt = 0;
- scnprintf(events_path, PATH_MAX, "%s/tracing/events",
- debugfs_find_mountpoint());
+ mountpoint = tracefs_find_mountpoint();
+ if (mountpoint) {
+ scnprintf(events_path, PATH_MAX, "%s/events",
+ mountpoint);
+ } else {
+ mountpoint = debugfs_find_mountpoint();
+ scnprintf(events_path, PATH_MAX, "%s/tracing/events",
+ mountpoint);
+ }
events_dir = opendir(events_path);
@@ -1485,6 +1524,16 @@ static struct evlist_test test__events[] = {
.id = 100,
},
#endif
+ {
+ .name = "instructions:I",
+ .check = test__checkevent_exclude_idle_modifier,
+ .id = 45,
+ },
+ {
+ .name = "instructions:kIG",
+ .check = test__checkevent_exclude_idle_modifier_1,
+ .id = 46,
+ },
};
static struct evlist_test test__events_pmu[] = {
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 00e776a87a9c..52758a33f64c 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -51,6 +51,7 @@ int test__hists_cumulate(void);
int test__switch_tracking(void);
int test__fdarray__filter(void);
int test__fdarray__add(void);
+int test__kmod_path__parse(void);
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/ui/Build b/tools/perf/ui/Build
new file mode 100644
index 000000000000..0a73538c0441
--- /dev/null
+++ b/tools/perf/ui/Build
@@ -0,0 +1,14 @@
+libperf-y += setup.o
+libperf-y += helpline.o
+libperf-y += progress.o
+libperf-y += util.o
+libperf-y += hist.o
+libperf-y += stdio/hist.o
+
+CFLAGS_setup.o += -DLIBDIR="BUILD_STR($(LIBDIR))"
+
+libperf-$(CONFIG_SLANG) += browser.o
+libperf-$(CONFIG_SLANG) += browsers/
+libperf-$(CONFIG_SLANG) += tui/
+
+CFLAGS_browser.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build
new file mode 100644
index 000000000000..de223f5bed58
--- /dev/null
+++ b/tools/perf/ui/browsers/Build
@@ -0,0 +1,10 @@
+libperf-y += annotate.o
+libperf-y += hists.o
+libperf-y += map.o
+libperf-y += scripts.o
+libperf-y += header.o
+
+CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
+CFLAGS_hists.o += -DENABLE_SLFUTURE_CONST
+CFLAGS_map.o += -DENABLE_SLFUTURE_CONST
+CFLAGS_scripts.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 9d32e3c0cfee..e5250eb2dd57 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -829,10 +829,16 @@ out:
return key;
}
+int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt)
+{
+ return symbol__tui_annotate(ms->sym, ms->map, evsel, hbt);
+}
+
int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
- return symbol__tui_annotate(he->ms.sym, he->ms.map, evsel, hbt);
+ return map_symbol__tui_annotate(&he->ms, evsel, hbt);
}
static void annotate_browser__mark_jump_targets(struct annotate_browser *browser,
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 788506eef567..995b7a8596b1 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -48,6 +48,24 @@ static bool hist_browser__has_filter(struct hist_browser *hb)
return hists__has_filter(hb->hists) || hb->min_pcnt;
}
+static int hist_browser__get_folding(struct hist_browser *browser)
+{
+ struct rb_node *nd;
+ struct hists *hists = browser->hists;
+ int unfolded_rows = 0;
+
+ for (nd = rb_first(&hists->entries);
+ (nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
+ nd = rb_next(nd)) {
+ struct hist_entry *he =
+ rb_entry(nd, struct hist_entry, rb_node);
+
+ if (he->ms.unfolded)
+ unfolded_rows += he->nr_rows;
+ }
+ return unfolded_rows;
+}
+
static u32 hist_browser__nr_entries(struct hist_browser *hb)
{
u32 nr_entries;
@@ -57,6 +75,7 @@ static u32 hist_browser__nr_entries(struct hist_browser *hb)
else
nr_entries = hb->hists->nr_entries;
+ hb->nr_callchain_rows = hist_browser__get_folding(hb);
return nr_entries + hb->nr_callchain_rows;
}
@@ -492,6 +511,7 @@ static void hist_browser__show_callchain_entry(struct hist_browser *browser,
{
int color, width;
char folded_sign = callchain_list__folded(chain);
+ bool show_annotated = browser->show_dso && chain->ms.sym && symbol__annotation(chain->ms.sym)->src;
color = HE_COLORSET_NORMAL;
width = browser->b.width - (offset + 2);
@@ -504,7 +524,8 @@ static void hist_browser__show_callchain_entry(struct hist_browser *browser,
ui_browser__set_color(&browser->b, color);
hist_browser__gotorc(browser, row, 0);
slsmg_write_nstring(" ", offset);
- slsmg_printf("%c ", folded_sign);
+ slsmg_printf("%c", folded_sign);
+ ui_browser__write_graph(&browser->b, show_annotated ? SLSMG_RARROW_CHAR : ' ');
slsmg_write_nstring(str, width);
}
@@ -1467,7 +1488,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
perf_hpp__set_user_width(symbol_conf.col_width_list_str);
while (1) {
- const struct thread *thread = NULL;
+ struct thread *thread = NULL;
const struct dso *dso = NULL;
int choice = 0,
annotate = -2, zoom_dso = -2, zoom_thread = -2,
@@ -1593,28 +1614,30 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
if (!sort__has_sym)
goto add_exit_option;
+ if (browser->selection == NULL)
+ goto skip_annotation;
+
if (sort__mode == SORT_MODE__BRANCH) {
bi = browser->he_selection->branch_info;
- if (browser->selection != NULL &&
- bi &&
- bi->from.sym != NULL &&
+
+ if (bi == NULL)
+ goto skip_annotation;
+
+ if (bi->from.sym != NULL &&
!bi->from.map->dso->annotate_warned &&
- asprintf(&options[nr_options], "Annotate %s",
- bi->from.sym->name) > 0)
+ asprintf(&options[nr_options], "Annotate %s", bi->from.sym->name) > 0) {
annotate_f = nr_options++;
+ }
- if (browser->selection != NULL &&
- bi &&
- bi->to.sym != NULL &&
+ if (bi->to.sym != NULL &&
!bi->to.map->dso->annotate_warned &&
(bi->to.sym != bi->from.sym ||
bi->to.map->dso != bi->from.map->dso) &&
- asprintf(&options[nr_options], "Annotate %s",
- bi->to.sym->name) > 0)
+ asprintf(&options[nr_options], "Annotate %s", bi->to.sym->name) > 0) {
annotate_t = nr_options++;
+ }
} else {
- if (browser->selection != NULL &&
- browser->selection->sym != NULL &&
+ if (browser->selection->sym != NULL &&
!browser->selection->map->dso->annotate_warned) {
struct annotation *notes;
@@ -1622,11 +1645,12 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
if (notes->src &&
asprintf(&options[nr_options], "Annotate %s",
- browser->selection->sym->name) > 0)
+ browser->selection->sym->name) > 0) {
annotate = nr_options++;
+ }
}
}
-
+skip_annotation:
if (thread != NULL &&
asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
(browser->hists->thread_filter ? "out of" : "into"),
@@ -1682,6 +1706,7 @@ retry_popup_menu:
if (choice == annotate || choice == annotate_t || choice == annotate_f) {
struct hist_entry *he;
struct annotation *notes;
+ struct map_symbol ms;
int err;
do_annotate:
if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1691,30 +1716,21 @@ do_annotate:
if (he == NULL)
continue;
- /*
- * we stash the branch_info symbol + map into the
- * the ms so we don't have to rewrite all the annotation
- * code to use branch_info.
- * in branch mode, the ms struct is not used
- */
if (choice == annotate_f) {
- he->ms.sym = he->branch_info->from.sym;
- he->ms.map = he->branch_info->from.map;
- } else if (choice == annotate_t) {
- he->ms.sym = he->branch_info->to.sym;
- he->ms.map = he->branch_info->to.map;
+ ms.map = he->branch_info->from.map;
+ ms.sym = he->branch_info->from.sym;
+ } else if (choice == annotate_t) {
+ ms.map = he->branch_info->to.map;
+ ms.sym = he->branch_info->to.sym;
+ } else {
+ ms = *browser->selection;
}
- notes = symbol__annotation(he->ms.sym);
+ notes = symbol__annotation(ms.sym);
if (!notes->src)
continue;
- /*
- * Don't let this be freed, say, by hists__decay_entry.
- */
- he->used = true;
- err = hist_entry__tui_annotate(he, evsel, hbt);
- he->used = false;
+ err = map_symbol__tui_annotate(&ms, evsel, hbt);
/*
* offer option to annotate the other branch source or target
* (if they exists) when returning from annotate
@@ -1754,13 +1770,13 @@ zoom_thread:
pstack__remove(fstack, &browser->hists->thread_filter);
zoom_out_thread:
ui_helpline__pop();
- browser->hists->thread_filter = NULL;
+ thread__zput(browser->hists->thread_filter);
perf_hpp__set_elide(HISTC_THREAD, false);
} else {
ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
thread->comm_set ? thread__comm_str(thread) : "",
thread->tid);
- browser->hists->thread_filter = thread;
+ browser->hists->thread_filter = thread__get(thread);
perf_hpp__set_elide(HISTC_THREAD, false);
pstack__push(fstack, &browser->hists->thread_filter);
}
diff --git a/tools/perf/ui/gtk/Build b/tools/perf/ui/gtk/Build
new file mode 100644
index 000000000000..ec22e899a224
--- /dev/null
+++ b/tools/perf/ui/gtk/Build
@@ -0,0 +1,9 @@
+CFLAGS_gtk += -fPIC $(GTK_CFLAGS)
+
+gtk-y += browser.o
+gtk-y += hists.o
+gtk-y += setup.o
+gtk-y += util.o
+gtk-y += helpline.o
+gtk-y += progress.o
+gtk-y += annotate.o
diff --git a/tools/perf/ui/tui/Build b/tools/perf/ui/tui/Build
new file mode 100644
index 000000000000..9e4c6ca41a9f
--- /dev/null
+++ b/tools/perf/ui/tui/Build
@@ -0,0 +1,4 @@
+libperf-y += setup.o
+libperf-y += util.o
+libperf-y += helpline.o
+libperf-y += progress.o
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
new file mode 100644
index 000000000000..797490a40075
--- /dev/null
+++ b/tools/perf/util/Build
@@ -0,0 +1,145 @@
+libperf-y += abspath.o
+libperf-y += alias.o
+libperf-y += annotate.o
+libperf-y += build-id.o
+libperf-y += config.o
+libperf-y += ctype.o
+libperf-y += db-export.o
+libperf-y += environment.o
+libperf-y += event.o
+libperf-y += evlist.o
+libperf-y += evsel.o
+libperf-y += exec_cmd.o
+libperf-y += find_next_bit.o
+libperf-y += help.o
+libperf-y += kallsyms.o
+libperf-y += levenshtein.o
+libperf-y += parse-options.o
+libperf-y += parse-events.o
+libperf-y += path.o
+libperf-y += rbtree.o
+libperf-y += bitmap.o
+libperf-y += hweight.o
+libperf-y += run-command.o
+libperf-y += quote.o
+libperf-y += strbuf.o
+libperf-y += string.o
+libperf-y += strlist.o
+libperf-y += strfilter.o
+libperf-y += top.o
+libperf-y += usage.o
+libperf-y += wrapper.o
+libperf-y += sigchain.o
+libperf-y += dso.o
+libperf-y += symbol.o
+libperf-y += color.o
+libperf-y += pager.o
+libperf-y += header.o
+libperf-y += callchain.o
+libperf-y += values.o
+libperf-y += debug.o
+libperf-y += machine.o
+libperf-y += map.o
+libperf-y += pstack.o
+libperf-y += session.o
+libperf-y += ordered-events.o
+libperf-y += comm.o
+libperf-y += thread.o
+libperf-y += thread_map.o
+libperf-y += trace-event-parse.o
+libperf-y += parse-events-flex.o
+libperf-y += parse-events-bison.o
+libperf-y += pmu.o
+libperf-y += pmu-flex.o
+libperf-y += pmu-bison.o
+libperf-y += trace-event-read.o
+libperf-y += trace-event-info.o
+libperf-y += trace-event-scripting.o
+libperf-y += trace-event.o
+libperf-y += svghelper.o
+libperf-y += sort.o
+libperf-y += hist.o
+libperf-y += util.o
+libperf-y += xyarray.o
+libperf-y += cpumap.o
+libperf-y += cgroup.o
+libperf-y += target.o
+libperf-y += rblist.o
+libperf-y += intlist.o
+libperf-y += vdso.o
+libperf-y += stat.o
+libperf-y += record.o
+libperf-y += srcline.o
+libperf-y += data.o
+libperf-$(CONFIG_X86) += tsc.o
+libperf-y += cloexec.o
+libperf-y += thread-stack.o
+
+libperf-$(CONFIG_LIBELF) += symbol-elf.o
+libperf-$(CONFIG_LIBELF) += probe-event.o
+
+ifndef CONFIG_LIBELF
+libperf-y += symbol-minimal.o
+endif
+
+libperf-$(CONFIG_DWARF) += probe-finder.o
+libperf-$(CONFIG_DWARF) += dwarf-aux.o
+
+libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+
+libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
+
+libperf-y += scripting-engines/
+
+libperf-$(CONFIG_PERF_REGS) += perf_regs.o
+libperf-$(CONFIG_ZLIB) += zlib.o
+libperf-$(CONFIG_LZMA) += lzma.o
+
+CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+CFLAGS_exec_cmd.o += -DPERF_EXEC_PATH="BUILD_STR($(perfexecdir_SQ))" -DPREFIX="BUILD_STR($(prefix_SQ))"
+
+$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
+ $(call rule_mkdir)
+ @$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) util/parse-events.l
+
+$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
+ $(call rule_mkdir)
+ @$(call echo-cmd,bison)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $@ -p parse_events_
+
+$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
+ $(call rule_mkdir)
+ @$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/pmu-flex.h util/pmu.l
+
+$(OUTPUT)util/pmu-bison.c: util/pmu.y
+ $(call rule_mkdir)
+ @$(call echo-cmd,bison)$(BISON) -v util/pmu.y -d -o $@ -p perf_pmu_
+
+CFLAGS_parse-events-flex.o += -w
+CFLAGS_pmu-flex.o += -w
+CFLAGS_parse-events-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
+CFLAGS_pmu-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
+
+$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
+$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
+
+CFLAGS_find_next_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+CFLAGS_parse-events.o += -Wno-redundant-decls
+
+$(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)util/hweight.o: ../../lib/hweight.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 9d9db3b296dd..7f5bdfc9bc87 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1010,6 +1010,32 @@ fallback:
}
filename = symfs_filename;
}
+ } else if (dso__needs_decompress(dso)) {
+ char tmp[PATH_MAX];
+ struct kmod_path m;
+ int fd;
+ bool ret;
+
+ if (kmod_path__parse_ext(&m, symfs_filename))
+ goto out_free_filename;
+
+ snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
+
+ fd = mkstemp(tmp);
+ if (fd < 0) {
+ free(m.ext);
+ goto out_free_filename;
+ }
+
+ ret = decompress_to_file(m.ext, symfs_filename, fd);
+
+ free(m.ext);
+ close(fd);
+
+ if (!ret)
+ goto out_free_filename;
+
+ strcpy(symfs_filename, tmp);
}
snprintf(command, sizeof(command),
@@ -1029,7 +1055,7 @@ fallback:
file = popen(command, "r");
if (!file)
- goto out_free_filename;
+ goto out_remove_tmp;
while (!feof(file))
if (symbol__parse_objdump_line(sym, map, file, privsize,
@@ -1044,6 +1070,10 @@ fallback:
delete_last_nop(sym);
pclose(file);
+
+out_remove_tmp:
+ if (dso__needs_decompress(dso))
+ unlink(symfs_filename);
out_free_filename:
if (delete_extract)
kcore_extract__delete(&kce);
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 0c72680a977f..61867dff5d5a 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -59,11 +59,8 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
- if (thread) {
- rb_erase(&thread->rb_node, &machine->threads);
- machine->last_match = NULL;
- thread__delete(thread);
- }
+ if (thread)
+ machine__remove_thread(machine, thread);
return 0;
}
@@ -93,6 +90,35 @@ int build_id__sprintf(const u8 *build_id, int len, char *bf)
return raw - build_id;
}
+/* asnprintf consolidates asprintf and snprintf */
+static int asnprintf(char **strp, size_t size, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ if (!strp)
+ return -EINVAL;
+
+ va_start(ap, fmt);
+ if (*strp)
+ ret = vsnprintf(*strp, size, fmt, ap);
+ else
+ ret = vasprintf(strp, fmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+static char *build_id__filename(const char *sbuild_id, char *bf, size_t size)
+{
+ char *tmp = bf;
+ int ret = asnprintf(&bf, size, "%s/.build-id/%.2s/%s", buildid_dir,
+ sbuild_id, sbuild_id + 2);
+ if (ret < 0 || (tmp && size < (unsigned int)ret))
+ return NULL;
+ return bf;
+}
+
char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
{
char build_id_hex[BUILD_ID_SIZE * 2 + 1];
@@ -101,14 +127,7 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
return NULL;
build_id__sprintf(dso->build_id, sizeof(dso->build_id), build_id_hex);
- if (bf == NULL) {
- if (asprintf(&bf, "%s/.build-id/%.2s/%s", buildid_dir,
- build_id_hex, build_id_hex + 2) < 0)
- return NULL;
- } else
- snprintf(bf, size, "%s/.build-id/%.2s/%s", buildid_dir,
- build_id_hex, build_id_hex + 2);
- return bf;
+ return build_id__filename(build_id_hex, bf, size);
}
#define dsos__for_each_with_build_id(pos, head) \
@@ -259,52 +278,113 @@ void disable_buildid_cache(void)
no_buildid_cache = true;
}
-int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
- const char *name, bool is_kallsyms, bool is_vdso)
+static char *build_id_cache__dirname_from_path(const char *name,
+ bool is_kallsyms, bool is_vdso)
{
- const size_t size = PATH_MAX;
- char *realname, *filename = zalloc(size),
- *linkname = zalloc(size), *targetname;
- int len, err = -1;
+ char *realname = (char *)name, *filename;
bool slash = is_kallsyms || is_vdso;
- if (is_kallsyms) {
- if (symbol_conf.kptr_restrict) {
- pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
- err = 0;
- goto out_free;
- }
- realname = (char *) name;
- } else
+ if (!slash) {
realname = realpath(name, NULL);
+ if (!realname)
+ return NULL;
+ }
- if (realname == NULL || filename == NULL || linkname == NULL)
+ if (asprintf(&filename, "%s%s%s", buildid_dir, slash ? "/" : "",
+ is_vdso ? DSO__NAME_VDSO : realname) < 0)
+ filename = NULL;
+
+ if (!slash)
+ free(realname);
+
+ return filename;
+}
+
+int build_id_cache__list_build_ids(const char *pathname,
+ struct strlist **result)
+{
+ struct strlist *list;
+ char *dir_name;
+ DIR *dir;
+ struct dirent *d;
+ int ret = 0;
+
+ list = strlist__new(true, NULL);
+ dir_name = build_id_cache__dirname_from_path(pathname, false, false);
+ if (!list || !dir_name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* List up all dirents */
+ dir = opendir(dir_name);
+ if (!dir) {
+ ret = -errno;
+ goto out;
+ }
+
+ while ((d = readdir(dir)) != NULL) {
+ if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
+ continue;
+ strlist__add(list, d->d_name);
+ }
+ closedir(dir);
+
+out:
+ free(dir_name);
+ if (ret)
+ strlist__delete(list);
+ else
+ *result = list;
+
+ return ret;
+}
+
+int build_id_cache__add_s(const char *sbuild_id, const char *name,
+ bool is_kallsyms, bool is_vdso)
+{
+ const size_t size = PATH_MAX;
+ char *realname = NULL, *filename = NULL, *dir_name = NULL,
+ *linkname = zalloc(size), *targetname, *tmp;
+ int err = -1;
+
+ if (!is_kallsyms) {
+ realname = realpath(name, NULL);
+ if (!realname)
+ goto out_free;
+ }
+
+ dir_name = build_id_cache__dirname_from_path(name, is_kallsyms, is_vdso);
+ if (!dir_name)
goto out_free;
- len = scnprintf(filename, size, "%s%s%s",
- debugdir, slash ? "/" : "",
- is_vdso ? DSO__NAME_VDSO : realname);
- if (mkdir_p(filename, 0755))
+ if (mkdir_p(dir_name, 0755))
goto out_free;
- snprintf(filename + len, size - len, "/%s", sbuild_id);
+ if (asprintf(&filename, "%s/%s", dir_name, sbuild_id) < 0) {
+ filename = NULL;
+ goto out_free;
+ }
if (access(filename, F_OK)) {
if (is_kallsyms) {
if (copyfile("/proc/kallsyms", filename))
goto out_free;
- } else if (link(realname, filename) && copyfile(name, filename))
+ } else if (link(realname, filename) && errno != EEXIST &&
+ copyfile(name, filename))
goto out_free;
}
- len = scnprintf(linkname, size, "%s/.build-id/%.2s",
- debugdir, sbuild_id);
+ if (!build_id__filename(sbuild_id, linkname, size))
+ goto out_free;
+ tmp = strrchr(linkname, '/');
+ *tmp = '\0';
if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
goto out_free;
- snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
- targetname = filename + strlen(debugdir) - 5;
+ *tmp = '/';
+ targetname = filename + strlen(buildid_dir) - 5;
memcpy(targetname, "../..", 5);
if (symlink(targetname, linkname) == 0)
@@ -313,34 +393,46 @@ out_free:
if (!is_kallsyms)
free(realname);
free(filename);
+ free(dir_name);
free(linkname);
return err;
}
static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
- const char *name, const char *debugdir,
- bool is_kallsyms, bool is_vdso)
+ const char *name, bool is_kallsyms,
+ bool is_vdso)
{
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
build_id__sprintf(build_id, build_id_size, sbuild_id);
- return build_id_cache__add_s(sbuild_id, debugdir, name,
- is_kallsyms, is_vdso);
+ return build_id_cache__add_s(sbuild_id, name, is_kallsyms, is_vdso);
+}
+
+bool build_id_cache__cached(const char *sbuild_id)
+{
+ bool ret = false;
+ char *filename = build_id__filename(sbuild_id, NULL, 0);
+
+ if (filename && !access(filename, F_OK))
+ ret = true;
+ free(filename);
+
+ return ret;
}
-int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
+int build_id_cache__remove_s(const char *sbuild_id)
{
const size_t size = PATH_MAX;
char *filename = zalloc(size),
- *linkname = zalloc(size);
+ *linkname = zalloc(size), *tmp;
int err = -1;
if (filename == NULL || linkname == NULL)
goto out_free;
- snprintf(linkname, size, "%s/.build-id/%.2s/%s",
- debugdir, sbuild_id, sbuild_id + 2);
+ if (!build_id__filename(sbuild_id, linkname, size))
+ goto out_free;
if (access(linkname, F_OK))
goto out_free;
@@ -354,8 +446,8 @@ int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
/*
* Since the link is relative, we must make it absolute:
*/
- snprintf(linkname, size, "%s/.build-id/%.2s/%s",
- debugdir, sbuild_id, filename);
+ tmp = strrchr(linkname, '/') + 1;
+ snprintf(tmp, size - (tmp - linkname), "%s", filename);
if (unlink(linkname))
goto out_free;
@@ -367,8 +459,7 @@ out_free:
return err;
}
-static int dso__cache_build_id(struct dso *dso, struct machine *machine,
- const char *debugdir)
+static int dso__cache_build_id(struct dso *dso, struct machine *machine)
{
bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
bool is_vdso = dso__is_vdso(dso);
@@ -381,28 +472,26 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
name = nm;
}
return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
- debugdir, is_kallsyms, is_vdso);
+ is_kallsyms, is_vdso);
}
static int __dsos__cache_build_ids(struct list_head *head,
- struct machine *machine, const char *debugdir)
+ struct machine *machine)
{
struct dso *pos;
int err = 0;
dsos__for_each_with_build_id(pos, head)
- if (dso__cache_build_id(pos, machine, debugdir))
+ if (dso__cache_build_id(pos, machine))
err = -1;
return err;
}
-static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
+static int machine__cache_build_ids(struct machine *machine)
{
- int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine,
- debugdir);
- ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine,
- debugdir);
+ int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine);
+ ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine);
return ret;
}
@@ -417,11 +506,11 @@ int perf_session__cache_build_ids(struct perf_session *session)
if (mkdir(buildid_dir, 0755) != 0 && errno != EEXIST)
return -1;
- ret = machine__cache_build_ids(&session->machines.host, buildid_dir);
+ ret = machine__cache_build_ids(&session->machines.host);
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret |= machine__cache_build_ids(pos, buildid_dir);
+ ret |= machine__cache_build_ids(pos);
}
return ret ? -1 : 0;
}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 8236319514d5..85011222cc14 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -4,6 +4,7 @@
#define BUILD_ID_SIZE 20
#include "tool.h"
+#include "strlist.h"
#include <linux/types.h>
extern struct perf_tool build_id__mark_dso_hit_ops;
@@ -22,9 +23,12 @@ bool perf_session__read_build_ids(struct perf_session *session, bool with_hits);
int perf_session__write_buildid_table(struct perf_session *session, int fd);
int perf_session__cache_build_ids(struct perf_session *session);
-int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+int build_id_cache__list_build_ids(const char *pathname,
+ struct strlist **result);
+bool build_id_cache__cached(const char *sbuild_id);
+int build_id_cache__add_s(const char *sbuild_id,
const char *name, bool is_kallsyms, bool is_vdso);
-int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
+int build_id_cache__remove_s(const char *sbuild_id);
void disable_buildid_cache(void);
#endif
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index d04d770d90f6..fbcca21d66ab 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -17,6 +17,7 @@
#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH"
#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
+#define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR"
typedef int (*config_fn_t)(const char *, const char *, void *);
extern int perf_default_config(const char *, const char *, void *);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 14e7a123d43b..9f643ee77001 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -97,6 +97,14 @@ int parse_callchain_record_opt(const char *arg)
callchain_param.dump_size = size;
}
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
+ } else if (!strncmp(name, "lbr", sizeof("lbr"))) {
+ if (!strtok_r(NULL, ",", &saveptr)) {
+ callchain_param.record_mode = CALLCHAIN_LBR;
+ ret = 0;
+ } else
+ pr_err("callchain: No more arguments "
+ "needed for --call-graph lbr\n");
+ break;
} else {
pr_err("callchain: Unknown --call-graph option "
"value: %s\n", arg);
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index c0ec1acc38e4..6033a0a212ca 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -11,6 +11,7 @@ enum perf_call_graph_mode {
CALLCHAIN_NONE,
CALLCHAIN_FP,
CALLCHAIN_DWARF,
+ CALLCHAIN_LBR,
CALLCHAIN_MAX
};
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index 6da965bdbc2c..85b523885f9d 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -7,6 +7,12 @@
static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
+int __weak sched_getcpu(void)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
static int perf_flag_probe(void)
{
/* use 'safest' configuration as used in perf_evsel__fallback() */
diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
index 94a5a7d829d5..68888c29b04a 100644
--- a/tools/perf/util/cloexec.h
+++ b/tools/perf/util/cloexec.h
@@ -3,4 +3,10 @@
unsigned long perf_event_open_cloexec_flag(void);
+#ifdef __GLIBC_PREREQ
+#if !__GLIBC_PREREQ(2, 6)
+extern int sched_getcpu(void) __THROW;
+#endif
+#endif
+
#endif /* __PERF_CLOEXEC_H */
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
new file mode 100644
index 000000000000..dd17c9a32fbc
--- /dev/null
+++ b/tools/perf/util/data-convert-bt.c
@@ -0,0 +1,857 @@
+/*
+ * CTF writing support via babeltrace.
+ *
+ * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
+ * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include <linux/compiler.h>
+#include <babeltrace/ctf-writer/writer.h>
+#include <babeltrace/ctf-writer/clock.h>
+#include <babeltrace/ctf-writer/stream.h>
+#include <babeltrace/ctf-writer/event.h>
+#include <babeltrace/ctf-writer/event-types.h>
+#include <babeltrace/ctf-writer/event-fields.h>
+#include <babeltrace/ctf/events.h>
+#include <traceevent/event-parse.h>
+#include "asm/bug.h"
+#include "data-convert-bt.h"
+#include "session.h"
+#include "util.h"
+#include "debug.h"
+#include "tool.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "machine.h"
+
+#define pr_N(n, fmt, ...) \
+ eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
+
+#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
+
+#define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
+
+struct evsel_priv {
+ struct bt_ctf_event_class *event_class;
+};
+
+struct ctf_writer {
+ /* writer primitives */
+ struct bt_ctf_writer *writer;
+ struct bt_ctf_stream *stream;
+ struct bt_ctf_stream_class *stream_class;
+ struct bt_ctf_clock *clock;
+
+ /* data types */
+ union {
+ struct {
+ struct bt_ctf_field_type *s64;
+ struct bt_ctf_field_type *u64;
+ struct bt_ctf_field_type *s32;
+ struct bt_ctf_field_type *u32;
+ struct bt_ctf_field_type *string;
+ struct bt_ctf_field_type *u64_hex;
+ };
+ struct bt_ctf_field_type *array[6];
+ } data;
+};
+
+struct convert {
+ struct perf_tool tool;
+ struct ctf_writer writer;
+
+ u64 events_size;
+ u64 events_count;
+};
+
+static int value_set(struct bt_ctf_field_type *type,
+ struct bt_ctf_event *event,
+ const char *name, u64 val)
+{
+ struct bt_ctf_field *field;
+ bool sign = bt_ctf_field_type_integer_get_signed(type);
+ int ret;
+
+ field = bt_ctf_field_create(type);
+ if (!field) {
+ pr_err("failed to create a field %s\n", name);
+ return -1;
+ }
+
+ if (sign) {
+ ret = bt_ctf_field_signed_integer_set_value(field, val);
+ if (ret) {
+ pr_err("failed to set field value %s\n", name);
+ goto err;
+ }
+ } else {
+ ret = bt_ctf_field_unsigned_integer_set_value(field, val);
+ if (ret) {
+ pr_err("failed to set field value %s\n", name);
+ goto err;
+ }
+ }
+
+ ret = bt_ctf_event_set_payload(event, name, field);
+ if (ret) {
+ pr_err("failed to set payload %s\n", name);
+ goto err;
+ }
+
+ pr2(" SET [%s = %" PRIu64 "]\n", name, val);
+
+err:
+ bt_ctf_field_put(field);
+ return ret;
+}
+
+#define __FUNC_VALUE_SET(_name, _val_type) \
+static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
+ struct bt_ctf_event *event, \
+ const char *name, \
+ _val_type val) \
+{ \
+ struct bt_ctf_field_type *type = cw->data._name; \
+ return value_set(type, event, name, (u64) val); \
+}
+
+#define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
+
+FUNC_VALUE_SET(s32)
+FUNC_VALUE_SET(u32)
+FUNC_VALUE_SET(s64)
+FUNC_VALUE_SET(u64)
+__FUNC_VALUE_SET(u64_hex, u64)
+
+static struct bt_ctf_field_type*
+get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
+{
+ unsigned long flags = field->flags;
+
+ if (flags & FIELD_IS_STRING)
+ return cw->data.string;
+
+ if (!(flags & FIELD_IS_SIGNED)) {
+ /* unsigned long are mostly pointers */
+ if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
+ return cw->data.u64_hex;
+ }
+
+ if (flags & FIELD_IS_SIGNED) {
+ if (field->size == 8)
+ return cw->data.s64;
+ else
+ return cw->data.s32;
+ }
+
+ if (field->size == 8)
+ return cw->data.u64;
+ else
+ return cw->data.u32;
+}
+
+static int add_tracepoint_field_value(struct ctf_writer *cw,
+ struct bt_ctf_event_class *event_class,
+ struct bt_ctf_event *event,
+ struct perf_sample *sample,
+ struct format_field *fmtf)
+{
+ struct bt_ctf_field_type *type;
+ struct bt_ctf_field *array_field;
+ struct bt_ctf_field *field;
+ const char *name = fmtf->name;
+ void *data = sample->raw_data;
+ unsigned long long value_int;
+ unsigned long flags = fmtf->flags;
+ unsigned int n_items;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int len;
+ int ret;
+
+ offset = fmtf->offset;
+ len = fmtf->size;
+ if (flags & FIELD_IS_STRING)
+ flags &= ~FIELD_IS_ARRAY;
+
+ if (flags & FIELD_IS_DYNAMIC) {
+ unsigned long long tmp_val;
+
+ tmp_val = pevent_read_number(fmtf->event->pevent,
+ data + offset, len);
+ offset = tmp_val;
+ len = offset >> 16;
+ offset &= 0xffff;
+ }
+
+ if (flags & FIELD_IS_ARRAY) {
+
+ type = bt_ctf_event_class_get_field_by_name(
+ event_class, name);
+ array_field = bt_ctf_field_create(type);
+ bt_ctf_field_type_put(type);
+ if (!array_field) {
+ pr_err("Failed to create array type %s\n", name);
+ return -1;
+ }
+
+ len = fmtf->size / fmtf->arraylen;
+ n_items = fmtf->arraylen;
+ } else {
+ n_items = 1;
+ array_field = NULL;
+ }
+
+ type = get_tracepoint_field_type(cw, fmtf);
+
+ for (i = 0; i < n_items; i++) {
+ if (!(flags & FIELD_IS_STRING))
+ value_int = pevent_read_number(
+ fmtf->event->pevent,
+ data + offset + i * len, len);
+
+ if (flags & FIELD_IS_ARRAY)
+ field = bt_ctf_field_array_get_field(array_field, i);
+ else
+ field = bt_ctf_field_create(type);
+
+ if (!field) {
+ pr_err("failed to create a field %s\n", name);
+ return -1;
+ }
+
+ if (flags & FIELD_IS_STRING)
+ ret = bt_ctf_field_string_set_value(field,
+ data + offset + i * len);
+ else if (!(flags & FIELD_IS_SIGNED))
+ ret = bt_ctf_field_unsigned_integer_set_value(
+ field, value_int);
+ else
+ ret = bt_ctf_field_signed_integer_set_value(
+ field, value_int);
+ if (ret) {
+ pr_err("failed to set file value %s\n", name);
+ goto err_put_field;
+ }
+ if (!(flags & FIELD_IS_ARRAY)) {
+ ret = bt_ctf_event_set_payload(event, name, field);
+ if (ret) {
+ pr_err("failed to set payload %s\n", name);
+ goto err_put_field;
+ }
+ }
+ bt_ctf_field_put(field);
+ }
+ if (flags & FIELD_IS_ARRAY) {
+ ret = bt_ctf_event_set_payload(event, name, array_field);
+ if (ret) {
+ pr_err("Failed add payload array %s\n", name);
+ return -1;
+ }
+ bt_ctf_field_put(array_field);
+ }
+ return 0;
+
+err_put_field:
+ bt_ctf_field_put(field);
+ return -1;
+}
+
+static int add_tracepoint_fields_values(struct ctf_writer *cw,
+ struct bt_ctf_event_class *event_class,
+ struct bt_ctf_event *event,
+ struct format_field *fields,
+ struct perf_sample *sample)
+{
+ struct format_field *field;
+ int ret;
+
+ for (field = fields; field; field = field->next) {
+ ret = add_tracepoint_field_value(cw, event_class, event, sample,
+ field);
+ if (ret)
+ return -1;
+ }
+ return 0;
+}
+
+static int add_tracepoint_values(struct ctf_writer *cw,
+ struct bt_ctf_event_class *event_class,
+ struct bt_ctf_event *event,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ struct format_field *common_fields = evsel->tp_format->format.common_fields;
+ struct format_field *fields = evsel->tp_format->format.fields;
+ int ret;
+
+ ret = add_tracepoint_fields_values(cw, event_class, event,
+ common_fields, sample);
+ if (!ret)
+ ret = add_tracepoint_fields_values(cw, event_class, event,
+ fields, sample);
+
+ return ret;
+}
+
+static int add_generic_values(struct ctf_writer *cw,
+ struct bt_ctf_event *event,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ u64 type = evsel->attr.sample_type;
+ int ret;
+
+ /*
+ * missing:
+ * PERF_SAMPLE_TIME - not needed as we have it in
+ * ctf event header
+ * PERF_SAMPLE_READ - TODO
+ * PERF_SAMPLE_CALLCHAIN - TODO
+ * PERF_SAMPLE_RAW - tracepoint fields are handled separately
+ * PERF_SAMPLE_BRANCH_STACK - TODO
+ * PERF_SAMPLE_REGS_USER - TODO
+ * PERF_SAMPLE_STACK_USER - TODO
+ */
+
+ if (type & PERF_SAMPLE_IP) {
+ ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
+ if (ret)
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ ret = value_set_s32(cw, event, "perf_tid", sample->tid);
+ if (ret)
+ return -1;
+
+ ret = value_set_s32(cw, event, "perf_pid", sample->pid);
+ if (ret)
+ return -1;
+ }
+
+ if ((type & PERF_SAMPLE_ID) ||
+ (type & PERF_SAMPLE_IDENTIFIER)) {
+ ret = value_set_u64(cw, event, "perf_id", sample->id);
+ if (ret)
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
+ if (ret)
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ ret = value_set_u32(cw, event, "perf_cpu", sample->cpu);
+ if (ret)
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ ret = value_set_u64(cw, event, "perf_period", sample->period);
+ if (ret)
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT) {
+ ret = value_set_u64(cw, event, "perf_weight", sample->weight);
+ if (ret)
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_DATA_SRC) {
+ ret = value_set_u64(cw, event, "perf_data_src",
+ sample->data_src);
+ if (ret)
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ ret = value_set_u64(cw, event, "perf_transaction",
+ sample->transaction);
+ if (ret)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *_event __maybe_unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine __maybe_unused)
+{
+ struct convert *c = container_of(tool, struct convert, tool);
+ struct evsel_priv *priv = evsel->priv;
+ struct ctf_writer *cw = &c->writer;
+ struct bt_ctf_event_class *event_class;
+ struct bt_ctf_event *event;
+ int ret;
+
+ if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
+ return 0;
+
+ event_class = priv->event_class;
+
+ /* update stats */
+ c->events_count++;
+ c->events_size += _event->header.size;
+
+ pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
+
+ event = bt_ctf_event_create(event_class);
+ if (!event) {
+ pr_err("Failed to create an CTF event\n");
+ return -1;
+ }
+
+ bt_ctf_clock_set_time(cw->clock, sample->time);
+
+ ret = add_generic_values(cw, event, evsel, sample);
+ if (ret)
+ return -1;
+
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
+ ret = add_tracepoint_values(cw, event_class, event,
+ evsel, sample);
+ if (ret)
+ return -1;
+ }
+
+ bt_ctf_stream_append_event(cw->stream, event);
+ bt_ctf_event_put(event);
+ return 0;
+}
+
+static int add_tracepoint_fields_types(struct ctf_writer *cw,
+ struct format_field *fields,
+ struct bt_ctf_event_class *event_class)
+{
+ struct format_field *field;
+ int ret;
+
+ for (field = fields; field; field = field->next) {
+ struct bt_ctf_field_type *type;
+ unsigned long flags = field->flags;
+
+ pr2(" field '%s'\n", field->name);
+
+ type = get_tracepoint_field_type(cw, field);
+ if (!type)
+ return -1;
+
+ /*
+ * A string is an array of chars. For this we use the string
+ * type and don't care that it is an array. What we don't
+ * support is an array of strings.
+ */
+ if (flags & FIELD_IS_STRING)
+ flags &= ~FIELD_IS_ARRAY;
+
+ if (flags & FIELD_IS_ARRAY)
+ type = bt_ctf_field_type_array_create(type, field->arraylen);
+
+ ret = bt_ctf_event_class_add_field(event_class, type,
+ field->name);
+
+ if (flags & FIELD_IS_ARRAY)
+ bt_ctf_field_type_put(type);
+
+ if (ret) {
+ pr_err("Failed to add field '%s\n", field->name);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int add_tracepoint_types(struct ctf_writer *cw,
+ struct perf_evsel *evsel,
+ struct bt_ctf_event_class *class)
+{
+ struct format_field *common_fields = evsel->tp_format->format.common_fields;
+ struct format_field *fields = evsel->tp_format->format.fields;
+ int ret;
+
+ ret = add_tracepoint_fields_types(cw, common_fields, class);
+ if (!ret)
+ ret = add_tracepoint_fields_types(cw, fields, class);
+
+ return ret;
+}
+
+static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
+ struct bt_ctf_event_class *event_class)
+{
+ u64 type = evsel->attr.sample_type;
+
+ /*
+ * missing:
+ * PERF_SAMPLE_TIME - not needed as we have it in
+ * ctf event header
+ * PERF_SAMPLE_READ - TODO
+ * PERF_SAMPLE_CALLCHAIN - TODO
+ * PERF_SAMPLE_RAW - tracepoint fields are handled separately
+ * PERF_SAMPLE_BRANCH_STACK - TODO
+ * PERF_SAMPLE_REGS_USER - TODO
+ * PERF_SAMPLE_STACK_USER - TODO
+ */
+
+#define ADD_FIELD(cl, t, n) \
+ do { \
+ pr2(" field '%s'\n", n); \
+ if (bt_ctf_event_class_add_field(cl, t, n)) { \
+ pr_err("Failed to add field '%s;\n", n); \
+ return -1; \
+ } \
+ } while (0)
+
+ if (type & PERF_SAMPLE_IP)
+ ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
+
+ if (type & PERF_SAMPLE_TID) {
+ ADD_FIELD(event_class, cw->data.s32, "perf_tid");
+ ADD_FIELD(event_class, cw->data.s32, "perf_pid");
+ }
+
+ if ((type & PERF_SAMPLE_ID) ||
+ (type & PERF_SAMPLE_IDENTIFIER))
+ ADD_FIELD(event_class, cw->data.u64, "perf_id");
+
+ if (type & PERF_SAMPLE_STREAM_ID)
+ ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
+
+ if (type & PERF_SAMPLE_CPU)
+ ADD_FIELD(event_class, cw->data.u32, "perf_cpu");
+
+ if (type & PERF_SAMPLE_PERIOD)
+ ADD_FIELD(event_class, cw->data.u64, "perf_period");
+
+ if (type & PERF_SAMPLE_WEIGHT)
+ ADD_FIELD(event_class, cw->data.u64, "perf_weight");
+
+ if (type & PERF_SAMPLE_DATA_SRC)
+ ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
+
+ if (type & PERF_SAMPLE_TRANSACTION)
+ ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
+
+#undef ADD_FIELD
+ return 0;
+}
+
+static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
+{
+ struct bt_ctf_event_class *event_class;
+ struct evsel_priv *priv;
+ const char *name = perf_evsel__name(evsel);
+ int ret;
+
+ pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
+
+ event_class = bt_ctf_event_class_create(name);
+ if (!event_class)
+ return -1;
+
+ ret = add_generic_types(cw, evsel, event_class);
+ if (ret)
+ goto err;
+
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
+ ret = add_tracepoint_types(cw, evsel, event_class);
+ if (ret)
+ goto err;
+ }
+
+ ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
+ if (ret) {
+ pr("Failed to add event class into stream.\n");
+ goto err;
+ }
+
+ priv = malloc(sizeof(*priv));
+ if (!priv)
+ goto err;
+
+ priv->event_class = event_class;
+ evsel->priv = priv;
+ return 0;
+
+err:
+ bt_ctf_event_class_put(event_class);
+ pr_err("Failed to add event '%s'.\n", name);
+ return -1;
+}
+
+static int setup_events(struct ctf_writer *cw, struct perf_session *session)
+{
+ struct perf_evlist *evlist = session->evlist;
+ struct perf_evsel *evsel;
+ int ret;
+
+ evlist__for_each(evlist, evsel) {
+ ret = add_event(cw, evsel);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int ctf_writer__setup_env(struct ctf_writer *cw,
+ struct perf_session *session)
+{
+ struct perf_header *header = &session->header;
+ struct bt_ctf_writer *writer = cw->writer;
+
+#define ADD(__n, __v) \
+do { \
+ if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
+ return -1; \
+} while (0)
+
+ ADD("host", header->env.hostname);
+ ADD("sysname", "Linux");
+ ADD("release", header->env.os_release);
+ ADD("version", header->env.version);
+ ADD("machine", header->env.arch);
+ ADD("domain", "kernel");
+ ADD("tracer_name", "perf");
+
+#undef ADD
+ return 0;
+}
+
+static int ctf_writer__setup_clock(struct ctf_writer *cw)
+{
+ struct bt_ctf_clock *clock = cw->clock;
+
+ bt_ctf_clock_set_description(clock, "perf clock");
+
+#define SET(__n, __v) \
+do { \
+ if (bt_ctf_clock_set_##__n(clock, __v)) \
+ return -1; \
+} while (0)
+
+ SET(frequency, 1000000000);
+ SET(offset_s, 0);
+ SET(offset, 0);
+ SET(precision, 10);
+ SET(is_absolute, 0);
+
+#undef SET
+ return 0;
+}
+
+static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
+{
+ struct bt_ctf_field_type *type;
+
+ type = bt_ctf_field_type_integer_create(size);
+ if (!type)
+ return NULL;
+
+ if (sign &&
+ bt_ctf_field_type_integer_set_signed(type, 1))
+ goto err;
+
+ if (hex &&
+ bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
+ goto err;
+
+ pr2("Created type: INTEGER %d-bit %ssigned %s\n",
+ size, sign ? "un" : "", hex ? "hex" : "");
+ return type;
+
+err:
+ bt_ctf_field_type_put(type);
+ return NULL;
+}
+
+static void ctf_writer__cleanup_data(struct ctf_writer *cw)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
+ bt_ctf_field_type_put(cw->data.array[i]);
+}
+
+static int ctf_writer__init_data(struct ctf_writer *cw)
+{
+#define CREATE_INT_TYPE(type, size, sign, hex) \
+do { \
+ (type) = create_int_type(size, sign, hex); \
+ if (!(type)) \
+ goto err; \
+} while (0)
+
+ CREATE_INT_TYPE(cw->data.s64, 64, true, false);
+ CREATE_INT_TYPE(cw->data.u64, 64, false, false);
+ CREATE_INT_TYPE(cw->data.s32, 32, true, false);
+ CREATE_INT_TYPE(cw->data.u32, 32, false, false);
+ CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
+
+ cw->data.string = bt_ctf_field_type_string_create();
+ if (cw->data.string)
+ return 0;
+
+err:
+ ctf_writer__cleanup_data(cw);
+ pr_err("Failed to create data types.\n");
+ return -1;
+}
+
+static void ctf_writer__cleanup(struct ctf_writer *cw)
+{
+ ctf_writer__cleanup_data(cw);
+
+ bt_ctf_clock_put(cw->clock);
+ bt_ctf_stream_put(cw->stream);
+ bt_ctf_stream_class_put(cw->stream_class);
+ bt_ctf_writer_put(cw->writer);
+
+ /* and NULL all the pointers */
+ memset(cw, 0, sizeof(*cw));
+}
+
+static int ctf_writer__init(struct ctf_writer *cw, const char *path)
+{
+ struct bt_ctf_writer *writer;
+ struct bt_ctf_stream_class *stream_class;
+ struct bt_ctf_stream *stream;
+ struct bt_ctf_clock *clock;
+
+ /* CTF writer */
+ writer = bt_ctf_writer_create(path);
+ if (!writer)
+ goto err;
+
+ cw->writer = writer;
+
+ /* CTF clock */
+ clock = bt_ctf_clock_create("perf_clock");
+ if (!clock) {
+ pr("Failed to create CTF clock.\n");
+ goto err_cleanup;
+ }
+
+ cw->clock = clock;
+
+ if (ctf_writer__setup_clock(cw)) {
+ pr("Failed to setup CTF clock.\n");
+ goto err_cleanup;
+ }
+
+ /* CTF stream class */
+ stream_class = bt_ctf_stream_class_create("perf_stream");
+ if (!stream_class) {
+ pr("Failed to create CTF stream class.\n");
+ goto err_cleanup;
+ }
+
+ cw->stream_class = stream_class;
+
+ /* CTF clock stream setup */
+ if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
+ pr("Failed to assign CTF clock to stream class.\n");
+ goto err_cleanup;
+ }
+
+ if (ctf_writer__init_data(cw))
+ goto err_cleanup;
+
+ /* CTF stream instance */
+ stream = bt_ctf_writer_create_stream(writer, stream_class);
+ if (!stream) {
+ pr("Failed to create CTF stream.\n");
+ goto err_cleanup;
+ }
+
+ cw->stream = stream;
+
+ /* CTF clock writer setup */
+ if (bt_ctf_writer_add_clock(writer, clock)) {
+ pr("Failed to assign CTF clock to writer.\n");
+ goto err_cleanup;
+ }
+
+ return 0;
+
+err_cleanup:
+ ctf_writer__cleanup(cw);
+err:
+ pr_err("Failed to setup CTF writer.\n");
+ return -1;
+}
+
+int bt_convert__perf2ctf(const char *input, const char *path, bool force)
+{
+ struct perf_session *session;
+ struct perf_data_file file = {
+ .path = input,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
+ };
+ struct convert c = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_exit,
+ .fork = perf_event__process_fork,
+ .lost = perf_event__process_lost,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
+ .ordered_events = true,
+ .ordering_requires_timestamps = true,
+ },
+ };
+ struct ctf_writer *cw = &c.writer;
+ int err = -1;
+
+ /* CTF writer */
+ if (ctf_writer__init(cw, path))
+ return -1;
+
+ /* perf.data session */
+ session = perf_session__new(&file, 0, &c.tool);
+ if (!session)
+ goto free_writer;
+
+ /* CTF writer env/clock setup */
+ if (ctf_writer__setup_env(cw, session))
+ goto free_session;
+
+ /* CTF events setup */
+ if (setup_events(cw, session))
+ goto free_session;
+
+ err = perf_session__process_events(session);
+ if (!err)
+ err = bt_ctf_stream_flush(cw->stream);
+
+ fprintf(stderr,
+ "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
+ file.path, path);
+
+ fprintf(stderr,
+ "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
+ (double) c.events_size / 1024.0 / 1024.0,
+ c.events_count);
+
+ /* its all good */
+free_session:
+ perf_session__delete(session);
+
+free_writer:
+ ctf_writer__cleanup(cw);
+ return err;
+}
diff --git a/tools/perf/util/data-convert-bt.h b/tools/perf/util/data-convert-bt.h
new file mode 100644
index 000000000000..4c204342a9d8
--- /dev/null
+++ b/tools/perf/util/data-convert-bt.h
@@ -0,0 +1,8 @@
+#ifndef __DATA_CONVERT_BT_H
+#define __DATA_CONVERT_BT_H
+#ifdef HAVE_LIBBABELTRACE_SUPPORT
+
+int bt_convert__perf2ctf(const char *input_name, const char *to_ctf, bool force);
+
+#endif /* HAVE_LIBBABELTRACE_SUPPORT */
+#endif /* __DATA_CONVERT_BT_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index c81dae399763..bb39a3ffc70b 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -282,13 +282,13 @@ int db_export__branch_type(struct db_export *dbe, u32 branch_type,
int db_export__sample(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
- struct thread *thread, struct addr_location *al)
+ struct addr_location *al)
{
+ struct thread* thread = al->thread;
struct export_sample es = {
.event = event,
.sample = sample,
.evsel = evsel,
- .thread = thread,
.al = al,
};
struct thread *main_thread;
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
index adbd22d66798..25e22fd76aca 100644
--- a/tools/perf/util/db-export.h
+++ b/tools/perf/util/db-export.h
@@ -34,7 +34,6 @@ struct export_sample {
union perf_event *event;
struct perf_sample *sample;
struct perf_evsel *evsel;
- struct thread *thread;
struct addr_location *al;
u64 db_id;
u64 comm_db_id;
@@ -97,7 +96,7 @@ int db_export__branch_type(struct db_export *dbe, u32 branch_type,
const char *name);
int db_export__sample(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
- struct thread *thread, struct addr_location *al);
+ struct addr_location *al);
int db_export__branch_types(struct db_export *dbe);
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index ad60b2f20258..2da5581ec74d 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -20,6 +20,7 @@ int verbose;
bool dump_trace = false, quiet = false;
int debug_ordered_events;
static int redirect_to_stderr;
+int debug_data_convert;
static int _eprintf(int level, int var, const char *fmt, va_list args)
{
@@ -147,6 +148,7 @@ static struct debug_variable {
{ .name = "verbose", .ptr = &verbose },
{ .name = "ordered-events", .ptr = &debug_ordered_events},
{ .name = "stderr", .ptr = &redirect_to_stderr},
+ { .name = "data-convert", .ptr = &debug_data_convert },
{ .name = NULL, }
};
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index be264d6f3b30..caac2fdc6105 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -12,6 +12,7 @@
extern int verbose;
extern bool quiet, dump_trace;
extern int debug_ordered_events;
+extern int debug_data_convert;
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index c2f7d3b90966..fc0ddd5792a9 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -45,13 +45,13 @@ int dso__read_binary_type_filename(const struct dso *dso,
case DSO_BINARY_TYPE__DEBUGLINK: {
char *debuglink;
- strncpy(filename, dso->long_name, size);
- debuglink = filename + dso->long_name_len;
+ len = __symbol__join_symfs(filename, size, dso->long_name);
+ debuglink = filename + len;
while (debuglink != filename && *debuglink != '/')
debuglink--;
if (*debuglink == '/')
debuglink++;
- ret = filename__read_debuglink(dso->long_name, debuglink,
+ ret = filename__read_debuglink(filename, debuglink,
size - (debuglink - filename));
}
break;
@@ -148,6 +148,9 @@ static const struct {
#ifdef HAVE_ZLIB_SUPPORT
{ "gz", gzip_decompress_to_file },
#endif
+#ifdef HAVE_LZMA_SUPPORT
+ { "xz", lzma_decompress_to_file },
+#endif
{ NULL, NULL },
};
@@ -162,32 +165,14 @@ bool is_supported_compression(const char *ext)
return false;
}
-bool is_kmodule_extension(const char *ext)
-{
- if (strncmp(ext, "ko", 2))
- return false;
-
- if (ext[2] == '\0' || (ext[2] == '.' && is_supported_compression(ext+3)))
- return true;
-
- return false;
-}
-
-bool is_kernel_module(const char *pathname, bool *compressed)
+bool is_kernel_module(const char *pathname)
{
- const char *ext = strrchr(pathname, '.');
+ struct kmod_path m;
- if (ext == NULL)
- return false;
-
- if (is_supported_compression(ext + 1)) {
- if (compressed)
- *compressed = true;
- ext -= 3;
- } else if (compressed)
- *compressed = false;
+ if (kmod_path__parse(&m, pathname))
+ return NULL;
- return is_kmodule_extension(ext + 1);
+ return m.kmod;
}
bool decompress_to_file(const char *ext, const char *filename, int output_fd)
@@ -209,6 +194,72 @@ bool dso__needs_decompress(struct dso *dso)
}
/*
+ * Parses kernel module specified in @path and updates
+ * @m argument like:
+ *
+ * @comp - true if @path contains supported compression suffix,
+ * false otherwise
+ * @kmod - true if @path contains '.ko' suffix in right position,
+ * false otherwise
+ * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
+ * of the kernel module without suffixes, otherwise strudup-ed
+ * base name of @path
+ * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
+ * the compression suffix
+ *
+ * Returns 0 if there's no strdup error, -ENOMEM otherwise.
+ */
+int __kmod_path__parse(struct kmod_path *m, const char *path,
+ bool alloc_name, bool alloc_ext)
+{
+ const char *name = strrchr(path, '/');
+ const char *ext = strrchr(path, '.');
+
+ memset(m, 0x0, sizeof(*m));
+ name = name ? name + 1 : path;
+
+ /* No extension, just return name. */
+ if (ext == NULL) {
+ if (alloc_name) {
+ m->name = strdup(name);
+ return m->name ? 0 : -ENOMEM;
+ }
+ return 0;
+ }
+
+ if (is_supported_compression(ext + 1)) {
+ m->comp = true;
+ ext -= 3;
+ }
+
+ /* Check .ko extension only if there's enough name left. */
+ if (ext > name)
+ m->kmod = !strncmp(ext, ".ko", 3);
+
+ if (alloc_name) {
+ if (m->kmod) {
+ if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
+ return -ENOMEM;
+ } else {
+ if (asprintf(&m->name, "%s", name) == -1)
+ return -ENOMEM;
+ }
+
+ strxfrchar(m->name, '-', '_');
+ }
+
+ if (alloc_ext && m->comp) {
+ m->ext = strdup(ext + 4);
+ if (!m->ext) {
+ free((void *) m->name);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/*
* Global list of open DSOs and the counter.
*/
static LIST_HEAD(dso__data_open);
@@ -240,7 +291,7 @@ static int do_open(char *name)
if (fd >= 0)
return fd;
- pr_debug("dso open failed, mmap: %s\n",
+ pr_debug("dso open failed: %s\n",
strerror_r(errno, sbuf, sizeof(sbuf)));
if (!dso__data_open_cnt || errno != EMFILE)
break;
@@ -1002,21 +1053,24 @@ struct dso *dsos__find(const struct dsos *dsos, const char *name,
return dso__find_by_longname(&dsos->root, name);
}
-struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
+struct dso *dsos__addnew(struct dsos *dsos, const char *name)
{
- struct dso *dso = dsos__find(dsos, name, false);
+ struct dso *dso = dso__new(name);
- if (!dso) {
- dso = dso__new(name);
- if (dso != NULL) {
- dsos__add(dsos, dso);
- dso__set_basename(dso);
- }
+ if (dso != NULL) {
+ dsos__add(dsos, dso);
+ dso__set_basename(dso);
}
-
return dso;
}
+struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
+{
+ struct dso *dso = dsos__find(dsos, name, false);
+
+ return dso ? dso : dsos__addnew(dsos, name);
+}
+
size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
@@ -1083,3 +1137,36 @@ enum dso_type dso__type(struct dso *dso, struct machine *machine)
return dso__type_fd(fd);
}
+
+int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
+{
+ int idx, errnum = dso->load_errno;
+ /*
+ * This must have a same ordering as the enum dso_load_errno.
+ */
+ static const char *dso_load__error_str[] = {
+ "Internal tools/perf/ library error",
+ "Invalid ELF file",
+ "Can not read build id",
+ "Mismatching build id",
+ "Decompression failure",
+ };
+
+ BUG_ON(buflen == 0);
+
+ if (errnum >= 0) {
+ const char *err = strerror_r(errnum, buf, buflen);
+
+ if (err != buf)
+ scnprintf(buf, buflen, "%s", err);
+
+ return 0;
+ }
+
+ if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
+ return -1;
+
+ idx = errnum - __DSO_LOAD_ERRNO__START;
+ scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
+ return 0;
+}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index ced92841ff97..e0901b4ed8de 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -60,6 +60,31 @@ enum dso_type {
DSO__TYPE_X32BIT,
};
+enum dso_load_errno {
+ DSO_LOAD_ERRNO__SUCCESS = 0,
+
+ /*
+ * Choose an arbitrary negative big number not to clash with standard
+ * errno since SUS requires the errno has distinct positive values.
+ * See 'Issue 6' in the link below.
+ *
+ * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
+ */
+ __DSO_LOAD_ERRNO__START = -10000,
+
+ DSO_LOAD_ERRNO__INTERNAL_ERROR = __DSO_LOAD_ERRNO__START,
+
+ /* for symsrc__init() */
+ DSO_LOAD_ERRNO__INVALID_ELF,
+ DSO_LOAD_ERRNO__CANNOT_READ_BUILDID,
+ DSO_LOAD_ERRNO__MISMATCHING_BUILDID,
+
+ /* for decompress_kmodule */
+ DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE,
+
+ __DSO_LOAD_ERRNO__END,
+};
+
#define DSO__SWAP(dso, type, val) \
({ \
type ____r = val; \
@@ -113,6 +138,7 @@ struct dso {
enum dso_swap_type needs_swap;
enum dso_binary_type symtab_type;
enum dso_binary_type binary_type;
+ enum dso_load_errno load_errno;
u8 adjust_symbols:1;
u8 has_build_id:1;
u8 has_srcline:1;
@@ -139,7 +165,8 @@ struct dso {
u32 status_seen;
size_t file_size;
struct list_head open_entry;
- u64 frame_offset;
+ u64 debug_frame_offset;
+ u64 eh_frame_hdr_offset;
} data;
union { /* Tool specific area */
@@ -189,11 +216,24 @@ char dso__symtab_origin(const struct dso *dso);
int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
char *root_dir, char *filename, size_t size);
bool is_supported_compression(const char *ext);
-bool is_kmodule_extension(const char *ext);
-bool is_kernel_module(const char *pathname, bool *compressed);
+bool is_kernel_module(const char *pathname);
bool decompress_to_file(const char *ext, const char *filename, int output_fd);
bool dso__needs_decompress(struct dso *dso);
+struct kmod_path {
+ char *name;
+ char *ext;
+ bool comp;
+ bool kmod;
+};
+
+int __kmod_path__parse(struct kmod_path *m, const char *path,
+ bool alloc_name, bool alloc_ext);
+
+#define kmod_path__parse(__m, __p) __kmod_path__parse(__m, __p, false, false)
+#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false)
+#define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true)
+
/*
* The dso__data_* external interface provides following functions:
* dso__data_fd
@@ -249,6 +289,7 @@ struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
const char *short_name, int dso_type);
void dsos__add(struct dsos *dsos, struct dso *dso);
+struct dso *dsos__addnew(struct dsos *dsos, const char *name);
struct dso *dsos__find(const struct dsos *dsos, const char *name,
bool cmp_short);
struct dso *__dsos__findnew(struct dsos *dsos, const char *name);
@@ -279,4 +320,6 @@ void dso__free_a2l(struct dso *dso);
enum dso_type dso__type(struct dso *dso, struct machine *machine);
+int dso__strerror_load(struct dso *dso, char *buf, size_t buflen);
+
#endif /* __PERF_DSO */
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index cc66c4049e09..c34e024020c7 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -278,6 +278,21 @@ bool die_is_func_def(Dwarf_Die *dw_die)
}
/**
+ * die_is_func_instance - Ensure that this DIE is an instance of a subprogram
+ * @dw_die: a DIE
+ *
+ * Ensure that this DIE is an instance (which has an entry address).
+ * This returns true if @dw_die is a function instance. If not, you need to
+ * call die_walk_instances() to find actual instances.
+ **/
+bool die_is_func_instance(Dwarf_Die *dw_die)
+{
+ Dwarf_Addr tmp;
+
+ /* Actually gcc optimizes non-inline as like as inlined */
+ return !dwarf_func_inline(dw_die) && dwarf_entrypc(dw_die, &tmp) == 0;
+}
+/**
* die_get_data_member_location - Get the data-member offset
* @mb_die: a DIE of a member of a data structure
* @offs: The offset of the member in the data structure
@@ -786,10 +801,16 @@ static int __die_find_member_cb(Dwarf_Die *die_mem, void *data)
{
const char *name = data;
- if ((dwarf_tag(die_mem) == DW_TAG_member) &&
- die_compare_name(die_mem, name))
- return DIE_FIND_CB_END;
-
+ if (dwarf_tag(die_mem) == DW_TAG_member) {
+ if (die_compare_name(die_mem, name))
+ return DIE_FIND_CB_END;
+ else if (!dwarf_diename(die_mem)) { /* Unnamed structure */
+ Dwarf_Die type_die, tmp_die;
+ if (die_get_type(die_mem, &type_die) &&
+ die_find_member(&type_die, name, &tmp_die))
+ return DIE_FIND_CB_END;
+ }
+ }
return DIE_FIND_CB_SIBLING;
}
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index b4fe90c6cb2d..af7dbcd5f929 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -41,6 +41,9 @@ extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
/* Ensure that this DIE is a subprogram and definition (not declaration) */
extern bool die_is_func_def(Dwarf_Die *dw_die);
+/* Ensure that this DIE is an instance of a subprogram */
+extern bool die_is_func_instance(Dwarf_Die *dw_die);
+
/* Compare diename and tname */
extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 6c6d044e959a..ff866c4d2e2f 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -49,72 +49,103 @@ static struct perf_sample synth_sample = {
.period = 1,
};
-static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
+/*
+ * Assumes that the first 4095 bytes of /proc/pid/stat contains
+ * the comm, tgid and ppid.
+ */
+static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
+ pid_t *tgid, pid_t *ppid)
{
char filename[PATH_MAX];
- char bf[BUFSIZ];
- FILE *fp;
- size_t size = 0;
- pid_t tgid = -1;
+ char bf[4096];
+ int fd;
+ size_t size = 0, n;
+ char *nl, *name, *tgids, *ppids;
+
+ *tgid = -1;
+ *ppid = -1;
snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
- fp = fopen(filename, "r");
- if (fp == NULL) {
+ fd = open(filename, O_RDONLY);
+ if (fd < 0) {
pr_debug("couldn't open %s\n", filename);
- return 0;
+ return -1;
}
- while (!comm[0] || (tgid < 0)) {
- if (fgets(bf, sizeof(bf), fp) == NULL) {
- pr_warning("couldn't get COMM and pgid, malformed %s\n",
- filename);
- break;
- }
+ n = read(fd, bf, sizeof(bf) - 1);
+ close(fd);
+ if (n <= 0) {
+ pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
+ pid);
+ return -1;
+ }
+ bf[n] = '\0';
- if (memcmp(bf, "Name:", 5) == 0) {
- char *name = bf + 5;
- while (*name && isspace(*name))
- ++name;
- size = strlen(name) - 1;
- if (size >= len)
- size = len - 1;
- memcpy(comm, name, size);
- comm[size] = '\0';
-
- } else if (memcmp(bf, "Tgid:", 5) == 0) {
- char *tgids = bf + 5;
- while (*tgids && isspace(*tgids))
- ++tgids;
- tgid = atoi(tgids);
- }
+ name = strstr(bf, "Name:");
+ tgids = strstr(bf, "Tgid:");
+ ppids = strstr(bf, "PPid:");
+
+ if (name) {
+ name += 5; /* strlen("Name:") */
+
+ while (*name && isspace(*name))
+ ++name;
+
+ nl = strchr(name, '\n');
+ if (nl)
+ *nl = '\0';
+
+ size = strlen(name);
+ if (size >= len)
+ size = len - 1;
+ memcpy(comm, name, size);
+ comm[size] = '\0';
+ } else {
+ pr_debug("Name: string not found for pid %d\n", pid);
}
- fclose(fp);
+ if (tgids) {
+ tgids += 5; /* strlen("Tgid:") */
+ *tgid = atoi(tgids);
+ } else {
+ pr_debug("Tgid: string not found for pid %d\n", pid);
+ }
- return tgid;
+ if (ppids) {
+ ppids += 5; /* strlen("PPid:") */
+ *ppid = atoi(ppids);
+ } else {
+ pr_debug("PPid: string not found for pid %d\n", pid);
+ }
+
+ return 0;
}
-static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
- union perf_event *event, pid_t pid,
- perf_event__handler_t process,
- struct machine *machine)
+static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
+ struct machine *machine,
+ pid_t *tgid, pid_t *ppid)
{
size_t size;
- pid_t tgid;
+
+ *ppid = -1;
memset(&event->comm, 0, sizeof(event->comm));
- if (machine__is_host(machine))
- tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
- sizeof(event->comm.comm));
- else
- tgid = machine->pid;
+ if (machine__is_host(machine)) {
+ if (perf_event__get_comm_ids(pid, event->comm.comm,
+ sizeof(event->comm.comm),
+ tgid, ppid) != 0) {
+ return -1;
+ }
+ } else {
+ *tgid = machine->pid;
+ }
- if (tgid < 0)
- goto out;
+ if (*tgid < 0)
+ return -1;
- event->comm.pid = tgid;
+ event->comm.pid = *tgid;
event->comm.header.type = PERF_RECORD_COMM;
size = strlen(event->comm.comm) + 1;
@@ -125,23 +156,45 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
machine->id_hdr_size);
event->comm.tid = pid;
+ return 0;
+}
+
+static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ pid_t tgid, ppid;
+
+ if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
+ return -1;
+
if (process(tool, event, &synth_sample, machine) != 0)
return -1;
-out:
return tgid;
}
static int perf_event__synthesize_fork(struct perf_tool *tool,
- union perf_event *event, pid_t pid,
- pid_t tgid, perf_event__handler_t process,
+ union perf_event *event,
+ pid_t pid, pid_t tgid, pid_t ppid,
+ perf_event__handler_t process,
struct machine *machine)
{
memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
- /* this is really a clone event but we use fork to synthesize it */
- event->fork.ppid = tgid;
- event->fork.ptid = tgid;
+ /*
+ * for main thread set parent to ppid from status file. For other
+ * threads set parent pid to main thread. ie., assume main thread
+ * spawns all threads in a process
+ */
+ if (tgid == pid) {
+ event->fork.ppid = ppid;
+ event->fork.ptid = ppid;
+ } else {
+ event->fork.ppid = tgid;
+ event->fork.ptid = tgid;
+ }
event->fork.pid = tgid;
event->fork.tid = pid;
event->fork.header.type = PERF_RECORD_FORK;
@@ -333,7 +386,8 @@ static int __event__synthesize_thread(union perf_event *comm_event,
char filename[PATH_MAX];
DIR *tasks;
struct dirent dirent, *next;
- pid_t tgid;
+ pid_t tgid, ppid;
+ int rc = 0;
/* special case: only send one comm event using passed in pid */
if (!full) {
@@ -361,34 +415,38 @@ static int __event__synthesize_thread(union perf_event *comm_event,
while (!readdir_r(tasks, &dirent, &next) && next) {
char *end;
- int rc = 0;
pid_t _pid;
_pid = strtol(dirent.d_name, &end, 10);
if (*end)
continue;
- tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
- process, machine);
- if (tgid == -1)
- return -1;
+ rc = -1;
+ if (perf_event__prepare_comm(comm_event, _pid, machine,
+ &tgid, &ppid) != 0)
+ break;
+ if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
+ ppid, process, machine) < 0)
+ break;
+ /*
+ * Send the prepared comm event
+ */
+ if (process(tool, comm_event, &synth_sample, machine) != 0)
+ break;
+
+ rc = 0;
if (_pid == pid) {
/* process the parent's maps too */
rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine, mmap_data);
- } else {
- /* only fork the tid's map, to save time */
- rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
- process, machine);
+ if (rc)
+ break;
}
-
- if (rc)
- return rc;
}
closedir(tasks);
- return 0;
+ return rc;
}
int perf_event__synthesize_thread_map(struct perf_tool *tool,
@@ -615,7 +673,7 @@ size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
else
s = "";
- return fprintf(fp, "%s: %s:%d\n", s, event->comm.comm, event->comm.tid);
+ return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
}
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index c4ffe2bd0738..09b9e8d3fcf7 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -242,7 +242,6 @@ struct events_stats {
u32 nr_invalid_chains;
u32 nr_unknown_id;
u32 nr_unprocessable_samples;
- u32 nr_unordered_events;
};
struct attr_event {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 28b8ce86bf12..080be93eea96 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -7,7 +7,6 @@
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "util.h"
-#include <api/fs/debugfs.h>
#include <api/fs/fs.h>
#include <poll.h>
#include "cpumap.h"
@@ -635,8 +634,8 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{
struct perf_mmap *md = &evlist->mmap[idx];
- unsigned int head = perf_mmap__read_head(md);
- unsigned int old = md->prev;
+ u64 head = perf_mmap__read_head(md);
+ u64 old = md->prev;
unsigned char *data = md->base + page_size;
union perf_event *event = NULL;
@@ -696,7 +695,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
static bool perf_mmap__empty(struct perf_mmap *md)
{
- return perf_mmap__read_head(md) != md->prev;
+ return perf_mmap__read_head(md) == md->prev;
}
static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
@@ -717,7 +716,7 @@ void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
struct perf_mmap *md = &evlist->mmap[idx];
if (!evlist->overwrite) {
- unsigned int old = md->prev;
+ u64 old = md->prev;
perf_mmap__write_tail(md, old);
}
@@ -1051,7 +1050,7 @@ out_delete_threads:
return -1;
}
-int perf_evlist__apply_filters(struct perf_evlist *evlist)
+int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
{
struct perf_evsel *evsel;
int err = 0;
@@ -1063,8 +1062,10 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist)
continue;
err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
- if (err)
+ if (err) {
+ *err_evsel = evsel;
break;
+ }
}
return err;
@@ -1086,6 +1087,38 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
return err;
}
+int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
+{
+ char *filter;
+ int ret = -1;
+ size_t i;
+
+ for (i = 0; i < npids; ++i) {
+ if (i == 0) {
+ if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
+ return -1;
+ } else {
+ char *tmp;
+
+ if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
+ goto out_free;
+
+ free(filter);
+ filter = tmp;
+ }
+ }
+
+ ret = perf_evlist__set_filter(evlist, filter);
+out_free:
+ free(filter);
+ return ret;
+}
+
+int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
+{
+ return perf_evlist__set_filter_pids(evlist, 1, &pid);
+}
+
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
{
struct perf_evsel *pos;
@@ -1329,7 +1362,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar
* writing exactly one byte, in workload.cork_fd, usually via
* perf_evlist__start_workload().
*
- * For cancelling the workload without actuallin running it,
+ * For cancelling the workload without actually running it,
* the parent will just close workload.cork_fd, without writing
* anything, i.e. read will return zero and we just exit()
* here.
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index e99a67632831..b5cce95d644e 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -27,7 +27,7 @@ struct perf_mmap {
void *base;
int mask;
int refcnt;
- unsigned int prev;
+ u64 prev;
char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
};
@@ -51,6 +51,7 @@ struct perf_evlist {
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evsel *selected;
+ struct events_stats stats;
};
struct perf_evsel_str_handler {
@@ -77,6 +78,8 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);
int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
+int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
+int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
@@ -149,7 +152,7 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
-int perf_evlist__apply_filters(struct perf_evlist *evlist);
+int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
void __perf_evlist__set_leader(struct list_head *list);
void perf_evlist__set_leader(struct perf_evlist *evlist);
@@ -186,16 +189,15 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
-static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
+static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->base;
- int head = ACCESS_ONCE(pc->data_head);
+ u64 head = ACCESS_ONCE(pc->data_head);
rmb();
return head;
}
-static inline void perf_mmap__write_tail(struct perf_mmap *md,
- unsigned long tail)
+static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
{
struct perf_event_mmap_page *pc = md->base;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index ea51a90e20a0..33e3fd8c2e68 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -32,8 +32,12 @@ static struct {
bool exclude_guest;
bool mmap2;
bool cloexec;
+ bool clockid;
+ bool clockid_wrong;
} perf_missing_features;
+static clockid_t clockid;
+
static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
{
return 0;
@@ -537,13 +541,30 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
}
static void
-perf_evsel__config_callgraph(struct perf_evsel *evsel)
+perf_evsel__config_callgraph(struct perf_evsel *evsel,
+ struct record_opts *opts)
{
bool function = perf_evsel__is_function_event(evsel);
struct perf_event_attr *attr = &evsel->attr;
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
+ if (callchain_param.record_mode == CALLCHAIN_LBR) {
+ if (!opts->branch_stack) {
+ if (attr->exclude_user) {
+ pr_warning("LBR callstack option is only available "
+ "to get user callchain information. "
+ "Falling back to framepointers.\n");
+ } else {
+ perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
+ attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
+ PERF_SAMPLE_BRANCH_CALL_STACK;
+ }
+ } else
+ pr_warning("Cannot use LBR callstack with branch stack. "
+ "Falling back to framepointers.\n");
+ }
+
if (callchain_param.record_mode == CALLCHAIN_DWARF) {
if (!function) {
perf_evsel__set_sample_bit(evsel, REGS_USER);
@@ -667,7 +688,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
evsel->attr.exclude_callchain_user = 1;
if (callchain_param.enabled && !evsel->no_aux_samples)
- perf_evsel__config_callgraph(evsel);
+ perf_evsel__config_callgraph(evsel, opts);
if (opts->sample_intr_regs) {
attr->sample_regs_intr = PERF_REGS_MASK;
@@ -717,6 +738,12 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
if (opts->sample_transaction)
perf_evsel__set_sample_bit(evsel, TRANSACTION);
+ if (opts->running_time) {
+ evsel->attr.read_format |=
+ PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
+ }
+
/*
* XXX see the function comment above
*
@@ -738,6 +765,12 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
attr->disabled = 0;
attr->enable_on_exec = 0;
}
+
+ clockid = opts->clockid;
+ if (opts->use_clockid) {
+ attr->use_clockid = 1;
+ attr->clockid = opts->clockid;
+ }
}
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -978,67 +1011,126 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
return fd;
}
-#define __PRINT_ATTR(fmt, cast, field) \
- fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
-
-#define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
-#define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
-#define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
-#define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
-
-#define PRINT_ATTR2N(name1, field1, name2, field2) \
- fprintf(fp, " %-19s %u %-19s %u\n", \
- name1, attr->field1, name2, attr->field2)
-
-#define PRINT_ATTR2(field1, field2) \
- PRINT_ATTR2N(#field1, field1, #field2, field2)
-
-static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
-{
- size_t ret = 0;
-
- ret += fprintf(fp, "%.60s\n", graph_dotted_line);
- ret += fprintf(fp, "perf_event_attr:\n");
-
- ret += PRINT_ATTR_U32(type);
- ret += PRINT_ATTR_U32(size);
- ret += PRINT_ATTR_X64(config);
- ret += PRINT_ATTR_U64(sample_period);
- ret += PRINT_ATTR_U64(sample_freq);
- ret += PRINT_ATTR_X64(sample_type);
- ret += PRINT_ATTR_X64(read_format);
-
- ret += PRINT_ATTR2(disabled, inherit);
- ret += PRINT_ATTR2(pinned, exclusive);
- ret += PRINT_ATTR2(exclude_user, exclude_kernel);
- ret += PRINT_ATTR2(exclude_hv, exclude_idle);
- ret += PRINT_ATTR2(mmap, comm);
- ret += PRINT_ATTR2(mmap2, comm_exec);
- ret += PRINT_ATTR2(freq, inherit_stat);
- ret += PRINT_ATTR2(enable_on_exec, task);
- ret += PRINT_ATTR2(watermark, precise_ip);
- ret += PRINT_ATTR2(mmap_data, sample_id_all);
- ret += PRINT_ATTR2(exclude_host, exclude_guest);
- ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
- "excl.callchain_user", exclude_callchain_user);
-
- ret += PRINT_ATTR_U32(wakeup_events);
- ret += PRINT_ATTR_U32(wakeup_watermark);
- ret += PRINT_ATTR_X32(bp_type);
- ret += PRINT_ATTR_X64(bp_addr);
- ret += PRINT_ATTR_X64(config1);
- ret += PRINT_ATTR_U64(bp_len);
- ret += PRINT_ATTR_X64(config2);
- ret += PRINT_ATTR_X64(branch_sample_type);
- ret += PRINT_ATTR_X64(sample_regs_user);
- ret += PRINT_ATTR_U32(sample_stack_user);
- ret += PRINT_ATTR_X64(sample_regs_intr);
-
- ret += fprintf(fp, "%.60s\n", graph_dotted_line);
+struct bit_names {
+ int bit;
+ const char *name;
+};
+
+static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
+{
+ bool first_bit = true;
+ int i = 0;
+
+ do {
+ if (value & bits[i].bit) {
+ buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
+ first_bit = false;
+ }
+ } while (bits[++i].name != NULL);
+}
+
+static void __p_sample_type(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
+ bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
+ bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
+ bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
+ bit_name(IDENTIFIER), bit_name(REGS_INTR),
+ { .name = NULL, }
+ };
+#undef bit_name
+ __p_bits(buf, size, value, bits);
+}
+
+static void __p_read_format(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_FORMAT_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
+ bit_name(ID), bit_name(GROUP),
+ { .name = NULL, }
+ };
+#undef bit_name
+ __p_bits(buf, size, value, bits);
+}
+
+#define BUF_SIZE 1024
+
+#define p_hex(val) snprintf(buf, BUF_SIZE, "%"PRIx64, (uint64_t)(val))
+#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
+#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
+#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
+#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
+
+#define PRINT_ATTRn(_n, _f, _p) \
+do { \
+ if (attr->_f) { \
+ _p(attr->_f); \
+ ret += attr__fprintf(fp, _n, buf, priv);\
+ } \
+} while (0)
+
+#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
+
+int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
+ attr__fprintf_f attr__fprintf, void *priv)
+{
+ char buf[BUF_SIZE];
+ int ret = 0;
+
+ PRINT_ATTRf(type, p_unsigned);
+ PRINT_ATTRf(size, p_unsigned);
+ PRINT_ATTRf(config, p_hex);
+ PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
+ PRINT_ATTRf(sample_type, p_sample_type);
+ PRINT_ATTRf(read_format, p_read_format);
+
+ PRINT_ATTRf(disabled, p_unsigned);
+ PRINT_ATTRf(inherit, p_unsigned);
+ PRINT_ATTRf(pinned, p_unsigned);
+ PRINT_ATTRf(exclusive, p_unsigned);
+ PRINT_ATTRf(exclude_user, p_unsigned);
+ PRINT_ATTRf(exclude_kernel, p_unsigned);
+ PRINT_ATTRf(exclude_hv, p_unsigned);
+ PRINT_ATTRf(exclude_idle, p_unsigned);
+ PRINT_ATTRf(mmap, p_unsigned);
+ PRINT_ATTRf(comm, p_unsigned);
+ PRINT_ATTRf(freq, p_unsigned);
+ PRINT_ATTRf(inherit_stat, p_unsigned);
+ PRINT_ATTRf(enable_on_exec, p_unsigned);
+ PRINT_ATTRf(task, p_unsigned);
+ PRINT_ATTRf(watermark, p_unsigned);
+ PRINT_ATTRf(precise_ip, p_unsigned);
+ PRINT_ATTRf(mmap_data, p_unsigned);
+ PRINT_ATTRf(sample_id_all, p_unsigned);
+ PRINT_ATTRf(exclude_host, p_unsigned);
+ PRINT_ATTRf(exclude_guest, p_unsigned);
+ PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
+ PRINT_ATTRf(exclude_callchain_user, p_unsigned);
+ PRINT_ATTRf(mmap2, p_unsigned);
+ PRINT_ATTRf(comm_exec, p_unsigned);
+ PRINT_ATTRf(use_clockid, p_unsigned);
+
+ PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
+ PRINT_ATTRf(bp_type, p_unsigned);
+ PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
+ PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
+ PRINT_ATTRf(sample_regs_user, p_hex);
+ PRINT_ATTRf(sample_stack_user, p_unsigned);
+ PRINT_ATTRf(clockid, p_signed);
+ PRINT_ATTRf(sample_regs_intr, p_hex);
return ret;
}
+static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
+ void *priv __attribute__((unused)))
+{
+ return fprintf(fp, " %-32s %s\n", name, val);
+}
+
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads)
{
@@ -1062,6 +1154,12 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
}
fallback_missing_features:
+ if (perf_missing_features.clockid_wrong)
+ evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
+ if (perf_missing_features.clockid) {
+ evsel->attr.use_clockid = 0;
+ evsel->attr.clockid = 0;
+ }
if (perf_missing_features.cloexec)
flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
if (perf_missing_features.mmap2)
@@ -1072,8 +1170,12 @@ retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
- if (verbose >= 2)
- perf_event_attr__fprintf(&evsel->attr, stderr);
+ if (verbose >= 2) {
+ fprintf(stderr, "%.60s\n", graph_dotted_line);
+ fprintf(stderr, "perf_event_attr:\n");
+ perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
+ fprintf(stderr, "%.60s\n", graph_dotted_line);
+ }
for (cpu = 0; cpu < cpus->nr; cpu++) {
@@ -1099,6 +1201,17 @@ retry_open:
goto try_fallback;
}
set_rlimit = NO_CHANGE;
+
+ /*
+ * If we succeeded but had to kill clockid, fail and
+ * have perf_evsel__open_strerror() print us a nice
+ * error.
+ */
+ if (perf_missing_features.clockid ||
+ perf_missing_features.clockid_wrong) {
+ err = -EINVAL;
+ goto out_close;
+ }
}
}
@@ -1132,7 +1245,17 @@ try_fallback:
if (err != -EINVAL || cpu > 0 || thread > 0)
goto out_close;
- if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
+ /*
+ * Must probe features in the order they were added to the
+ * perf_event_attr interface.
+ */
+ if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
+ perf_missing_features.clockid_wrong = true;
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
+ perf_missing_features.clockid = true;
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
perf_missing_features.cloexec = true;
goto fallback_missing_features;
} else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
@@ -1892,7 +2015,7 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
value = *(u32 *)ptr;
break;
case 8:
- value = *(u64 *)ptr;
+ memcpy(&value, ptr, sizeof(u64));
break;
default:
return 0;
@@ -1933,62 +2056,9 @@ static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
return ret;
}
-static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
-{
- if (value == 0)
- return 0;
-
- return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
-}
-
-#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
-
-struct bit_names {
- int bit;
- const char *name;
-};
-
-static int bits__fprintf(FILE *fp, const char *field, u64 value,
- struct bit_names *bits, bool *first)
-{
- int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
- bool first_bit = true;
-
- do {
- if (value & bits[i].bit) {
- printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
- first_bit = false;
- }
- } while (bits[++i].name != NULL);
-
- return printed;
-}
-
-static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
-{
-#define bit_name(n) { PERF_SAMPLE_##n, #n }
- struct bit_names bits[] = {
- bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
- bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
- bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
- bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
- bit_name(IDENTIFIER), bit_name(REGS_INTR),
- { .name = NULL, }
- };
-#undef bit_name
- return bits__fprintf(fp, "sample_type", value, bits, first);
-}
-
-static int read_format__fprintf(FILE *fp, bool *first, u64 value)
+static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
{
-#define bit_name(n) { PERF_FORMAT_##n, #n }
- struct bit_names bits[] = {
- bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
- bit_name(ID), bit_name(GROUP),
- { .name = NULL, }
- };
-#undef bit_name
- return bits__fprintf(fp, "read_format", value, bits, first);
+ return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
}
int perf_evsel__fprintf(struct perf_evsel *evsel,
@@ -2017,47 +2087,13 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
printed += fprintf(fp, "%s", perf_evsel__name(evsel));
- if (details->verbose || details->freq) {
+ if (details->verbose) {
+ printed += perf_event_attr__fprintf(fp, &evsel->attr,
+ __print_attr__fprintf, &first);
+ } else if (details->freq) {
printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
(u64)evsel->attr.sample_freq);
}
-
- if (details->verbose) {
- if_print(type);
- if_print(config);
- if_print(config1);
- if_print(config2);
- if_print(size);
- printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
- if (evsel->attr.read_format)
- printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
- if_print(disabled);
- if_print(inherit);
- if_print(pinned);
- if_print(exclusive);
- if_print(exclude_user);
- if_print(exclude_kernel);
- if_print(exclude_hv);
- if_print(exclude_idle);
- if_print(mmap);
- if_print(mmap2);
- if_print(comm);
- if_print(comm_exec);
- if_print(freq);
- if_print(inherit_stat);
- if_print(enable_on_exec);
- if_print(task);
- if_print(watermark);
- if_print(precise_ip);
- if_print(mmap_data);
- if_print(sample_id_all);
- if_print(exclude_host);
- if_print(exclude_guest);
- if_print(__reserved_1);
- if_print(wakeup_events);
- if_print(bp_type);
- if_print(branch_sample_type);
- }
out:
fputc('\n', fp);
return ++printed;
@@ -2135,6 +2171,12 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
"The PMU counters are busy/taken by another profiler.\n"
"We found oprofile daemon running, please stop it and try again.");
break;
+ case EINVAL:
+ if (perf_missing_features.clockid)
+ return scnprintf(msg, size, "clockid feature not supported.");
+ if (perf_missing_features.clockid_wrong)
+ return scnprintf(msg, size, "wrong clockid (%d).", clockid);
+ break;
default:
break;
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 38622747d130..e486151b0308 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -335,6 +335,7 @@ struct perf_attr_details {
bool freq;
bool verbose;
bool event_group;
+ bool force;
};
int perf_evsel__fprintf(struct perf_evsel *evsel,
@@ -355,4 +356,14 @@ for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
(_evsel) && (_evsel)->leader == (_leader); \
(_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
+static inline bool has_branch_callstack(struct perf_evsel *evsel)
+{
+ return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
+}
+
+typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
+
+int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
+ attr__fprintf_f attr__fprintf, void *priv);
+
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 1f407f7352a7..918fd8ae2d80 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1055,6 +1055,12 @@ error:
goto out;
}
+static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
+ void *priv __attribute__((unused)))
+{
+ return fprintf(fp, ", %s = %s", name, val);
+}
+
static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
{
struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
@@ -1069,26 +1075,6 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
for (evsel = events; evsel->attr.size; evsel++) {
fprintf(fp, "# event : name = %s, ", evsel->name);
- fprintf(fp, "type = %d, config = 0x%"PRIx64
- ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
- evsel->attr.type,
- (u64)evsel->attr.config,
- (u64)evsel->attr.config1,
- (u64)evsel->attr.config2);
-
- fprintf(fp, ", excl_usr = %d, excl_kern = %d",
- evsel->attr.exclude_user,
- evsel->attr.exclude_kernel);
-
- fprintf(fp, ", excl_host = %d, excl_guest = %d",
- evsel->attr.exclude_host,
- evsel->attr.exclude_guest);
-
- fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip);
-
- fprintf(fp, ", attr_mmap2 = %d", evsel->attr.mmap2);
- fprintf(fp, ", attr_mmap = %d", evsel->attr.mmap);
- fprintf(fp, ", attr_mmap_data = %d", evsel->attr.mmap_data);
if (evsel->ids) {
fprintf(fp, ", id = {");
for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
@@ -1099,6 +1085,8 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
fprintf(fp, " }");
}
+ perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
+
fputc('\n', fp);
}
@@ -1266,7 +1254,7 @@ static int __event_process_build_id(struct build_id_event *bev,
dso__set_build_id(dso, &bev->build_id);
- if (!is_kernel_module(filename, NULL))
+ if (!is_kernel_module(filename))
dso->kernel = dso_type;
build_id__sprintf(dso->build_id, sizeof(dso->build_id),
@@ -2516,8 +2504,11 @@ int perf_session__read_header(struct perf_session *session)
if (read_attr(fd, header, &f_attr) < 0)
goto out_errno;
- if (header->needs_swap)
+ if (header->needs_swap) {
+ f_attr.ids.size = bswap_64(f_attr.ids.size);
+ f_attr.ids.offset = bswap_64(f_attr.ids.offset);
perf_event__attr_swap(&f_attr.attr);
+ }
tmp = lseek(fd, 0, SEEK_CUR);
evsel = perf_evsel__new(&f_attr.attr);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 70b48a65064c..cc22b9158b93 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -263,15 +263,9 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
- /*
- * We may be annotating this, for instance, so keep it here in
- * case some it gets new samples, we'll eventually free it when
- * the user stops browsing and it agains gets fully decayed.
- */
if (((zap_user && n->level == '.') ||
(zap_kernel && n->level != '.') ||
- hists__decay_entry(hists, n)) &&
- !n->used) {
+ hists__decay_entry(hists, n))) {
hists__delete_entry(hists, n);
}
}
@@ -355,6 +349,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
callchain_init(he->callchain);
INIT_LIST_HEAD(&he->pairs.node);
+ thread__get(he->thread);
}
return he;
@@ -941,6 +936,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
void hist_entry__delete(struct hist_entry *he)
{
+ thread__zput(he->thread);
zfree(&he->branch_info);
zfree(&he->mem_info);
zfree(&he->stat_acc);
@@ -1169,6 +1165,7 @@ static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h
/* force fold unfiltered entry for simplicity */
h->ms.unfolded = false;
h->row_offset = 0;
+ h->nr_rows = 0;
hists->stats.nr_non_filtered_samples += h->stat.nr_events;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 2b690d028907..9f31b89a527a 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -60,7 +60,7 @@ struct hists {
struct rb_root entries_collapsed;
u64 nr_entries;
u64 nr_non_filtered_entries;
- const struct thread *thread_filter;
+ struct thread *thread_filter;
const struct dso *dso_filter;
const char *uid_filter_str;
const char *symbol_filter_str;
@@ -303,6 +303,9 @@ struct hist_browser_timer {
#ifdef HAVE_SLANG_SUPPORT
#include "../ui/keysyms.h"
+int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt);
+
int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
struct hist_browser_timer *hbt);
@@ -321,6 +324,12 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
{
return 0;
}
+static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct hist_browser_timer *hbt __maybe_unused)
+{
+ return 0;
+}
static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index cf1d7913783b..ae825d4ec110 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -99,6 +99,7 @@ struct perf_kvm_stat {
int timerfd;
unsigned int display_time;
bool live;
+ bool force;
};
struct kvm_reg_events_ops {
diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c
new file mode 100644
index 000000000000..95a1acb61245
--- /dev/null
+++ b/tools/perf/util/lzma.c
@@ -0,0 +1,95 @@
+#include <lzma.h>
+#include <stdio.h>
+#include <linux/compiler.h>
+#include "util.h"
+#include "debug.h"
+
+#define BUFSIZE 8192
+
+static const char *lzma_strerror(lzma_ret ret)
+{
+ switch ((int) ret) {
+ case LZMA_MEM_ERROR:
+ return "Memory allocation failed";
+ case LZMA_OPTIONS_ERROR:
+ return "Unsupported decompressor flags";
+ case LZMA_FORMAT_ERROR:
+ return "The input is not in the .xz format";
+ case LZMA_DATA_ERROR:
+ return "Compressed file is corrupt";
+ case LZMA_BUF_ERROR:
+ return "Compressed file is truncated or otherwise corrupt";
+ default:
+ return "Unknown error, possibly a bug";
+ }
+}
+
+int lzma_decompress_to_file(const char *input, int output_fd)
+{
+ lzma_action action = LZMA_RUN;
+ lzma_stream strm = LZMA_STREAM_INIT;
+ lzma_ret ret;
+
+ u8 buf_in[BUFSIZE];
+ u8 buf_out[BUFSIZE];
+ FILE *infile;
+
+ infile = fopen(input, "rb");
+ if (!infile) {
+ pr_err("lzma: fopen failed on %s: '%s'\n",
+ input, strerror(errno));
+ return -1;
+ }
+
+ ret = lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED);
+ if (ret != LZMA_OK) {
+ pr_err("lzma: lzma_stream_decoder failed %s (%d)\n",
+ lzma_strerror(ret), ret);
+ return -1;
+ }
+
+ strm.next_in = NULL;
+ strm.avail_in = 0;
+ strm.next_out = buf_out;
+ strm.avail_out = sizeof(buf_out);
+
+ while (1) {
+ if (strm.avail_in == 0 && !feof(infile)) {
+ strm.next_in = buf_in;
+ strm.avail_in = fread(buf_in, 1, sizeof(buf_in), infile);
+
+ if (ferror(infile)) {
+ pr_err("lzma: read error: %s\n", strerror(errno));
+ return -1;
+ }
+
+ if (feof(infile))
+ action = LZMA_FINISH;
+ }
+
+ ret = lzma_code(&strm, action);
+
+ if (strm.avail_out == 0 || ret == LZMA_STREAM_END) {
+ ssize_t write_size = sizeof(buf_out) - strm.avail_out;
+
+ if (writen(output_fd, buf_out, write_size) != write_size) {
+ pr_err("lzma: write error: %s\n", strerror(errno));
+ return -1;
+ }
+
+ strm.next_out = buf_out;
+ strm.avail_out = sizeof(buf_out);
+ }
+
+ if (ret != LZMA_OK) {
+ if (ret == LZMA_STREAM_END)
+ return 0;
+
+ pr_err("lzma: failed %s\n", lzma_strerror(ret));
+ return -1;
+ }
+ }
+
+ fclose(infile);
+ return 0;
+}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 1bca3a9f2b16..527e032e24f6 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -89,16 +89,6 @@ static void dsos__delete(struct dsos *dsos)
}
}
-void machine__delete_dead_threads(struct machine *machine)
-{
- struct thread *n, *t;
-
- list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
- list_del(&t->node);
- thread__delete(t);
- }
-}
-
void machine__delete_threads(struct machine *machine)
{
struct rb_node *nd = rb_first(&machine->threads);
@@ -106,9 +96,8 @@ void machine__delete_threads(struct machine *machine)
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
- rb_erase(&t->rb_node, &machine->threads);
nd = rb_next(nd);
- thread__delete(t);
+ machine__remove_thread(machine, t);
}
}
@@ -361,9 +350,13 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
* the full rbtree:
*/
th = machine->last_match;
- if (th && th->tid == tid) {
- machine__update_thread_pid(machine, th, pid);
- return th;
+ if (th != NULL) {
+ if (th->tid == tid) {
+ machine__update_thread_pid(machine, th, pid);
+ return th;
+ }
+
+ thread__zput(machine->last_match);
}
while (*p != NULL) {
@@ -371,7 +364,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
th = rb_entry(parent, struct thread, rb_node);
if (th->tid == tid) {
- machine->last_match = th;
+ machine->last_match = thread__get(th);
machine__update_thread_pid(machine, th, pid);
return th;
}
@@ -403,8 +396,11 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
thread__delete(th);
return NULL;
}
-
- machine->last_match = th;
+ /*
+ * It is now in the rbtree, get a ref
+ */
+ thread__get(th);
+ machine->last_match = thread__get(th);
}
return th;
@@ -462,30 +458,61 @@ int machine__process_lost_event(struct machine *machine __maybe_unused,
return 0;
}
+static struct dso*
+machine__module_dso(struct machine *machine, struct kmod_path *m,
+ const char *filename)
+{
+ struct dso *dso;
+
+ dso = dsos__find(&machine->kernel_dsos, m->name, true);
+ if (!dso) {
+ dso = dsos__addnew(&machine->kernel_dsos, m->name);
+ if (dso == NULL)
+ return NULL;
+
+ if (machine__is_host(machine))
+ dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
+ else
+ dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+
+ /* _KMODULE_COMP should be next to _KMODULE */
+ if (m->kmod && m->comp)
+ dso->symtab_type++;
+
+ dso__set_short_name(dso, strdup(m->name), true);
+ dso__set_long_name(dso, strdup(filename), true);
+ }
+
+ return dso;
+}
+
struct map *machine__new_module(struct machine *machine, u64 start,
const char *filename)
{
- struct map *map;
- struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
- bool compressed;
+ struct map *map = NULL;
+ struct dso *dso;
+ struct kmod_path m;
- if (dso == NULL)
+ if (kmod_path__parse_name(&m, filename))
return NULL;
- map = map__new2(start, dso, MAP__FUNCTION);
- if (map == NULL)
- return NULL;
+ map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
+ m.name);
+ if (map)
+ goto out;
- if (machine__is_host(machine))
- dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
- else
- dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+ dso = machine__module_dso(machine, &m, filename);
+ if (dso == NULL)
+ goto out;
- /* _KMODULE_COMP should be next to _KMODULE */
- if (is_kernel_module(filename, &compressed) && compressed)
- dso->symtab_type++;
+ map = map__new2(start, dso, MAP__FUNCTION);
+ if (map == NULL)
+ goto out;
map_groups__insert(&machine->kmaps, map);
+
+out:
+ free(m.name);
return map;
}
@@ -650,6 +677,9 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
machine->vmlinux_maps[type]->unmap_ip =
identity__map_ip;
kmap = map__kmap(machine->vmlinux_maps[type]);
+ if (!kmap)
+ return -1;
+
kmap->kmaps = &machine->kmaps;
map_groups__insert(&machine->kmaps,
machine->vmlinux_maps[type]);
@@ -671,7 +701,7 @@ void machine__destroy_kernel_maps(struct machine *machine)
kmap = map__kmap(machine->vmlinux_maps[type]);
map_groups__remove(&machine->kmaps,
machine->vmlinux_maps[type]);
- if (kmap->ref_reloc_sym) {
+ if (kmap && kmap->ref_reloc_sym) {
/*
* ref_reloc_sym is shared among all maps, so free just
* on one of them.
@@ -827,6 +857,39 @@ static char *get_kernel_version(const char *root_dir)
return strdup(name);
}
+static bool is_kmod_dso(struct dso *dso)
+{
+ return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+ dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
+}
+
+static int map_groups__set_module_path(struct map_groups *mg, const char *path,
+ struct kmod_path *m)
+{
+ struct map *map;
+ char *long_name;
+
+ map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
+ if (map == NULL)
+ return 0;
+
+ long_name = strdup(path);
+ if (long_name == NULL)
+ return -ENOMEM;
+
+ dso__set_long_name(map->dso, long_name, true);
+ dso__kernel_module_get_build_id(map->dso, "");
+
+ /*
+ * Full name could reveal us kmod compression, so
+ * we need to update the symtab_type if needed.
+ */
+ if (m->comp && is_kmod_dso(map->dso))
+ map->dso->symtab_type++;
+
+ return 0;
+}
+
static int map_groups__set_modules_path_dir(struct map_groups *mg,
const char *dir_name, int depth)
{
@@ -865,35 +928,19 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
if (ret < 0)
goto out;
} else {
- char *dot = strrchr(dent->d_name, '.'),
- dso_name[PATH_MAX];
- struct map *map;
- char *long_name;
+ struct kmod_path m;
- if (dot == NULL)
- continue;
-
- /* On some system, modules are compressed like .ko.gz */
- if (is_supported_compression(dot + 1) &&
- is_kmodule_extension(dot - 2))
- dot -= 3;
+ ret = kmod_path__parse_name(&m, dent->d_name);
+ if (ret)
+ goto out;
- snprintf(dso_name, sizeof(dso_name), "[%.*s]",
- (int)(dot - dent->d_name), dent->d_name);
+ if (m.kmod)
+ ret = map_groups__set_module_path(mg, path, &m);
- strxfrchar(dso_name, '-', '_');
- map = map_groups__find_by_name(mg, MAP__FUNCTION,
- dso_name);
- if (map == NULL)
- continue;
+ free(m.name);
- long_name = strdup(path);
- if (long_name == NULL) {
- ret = -1;
+ if (ret)
goto out;
- }
- dso__set_long_name(map->dso, long_name, true);
- dso__kernel_module_get_build_id(map->dso, "");
}
}
@@ -1046,40 +1093,11 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
strlen(kmmap_prefix) - 1) == 0;
if (event->mmap.filename[0] == '/' ||
(!is_kernel_mmap && event->mmap.filename[0] == '[')) {
-
- char short_module_name[1024];
- char *name, *dot;
-
- if (event->mmap.filename[0] == '/') {
- name = strrchr(event->mmap.filename, '/');
- if (name == NULL)
- goto out_problem;
-
- ++name; /* skip / */
- dot = strrchr(name, '.');
- if (dot == NULL)
- goto out_problem;
- /* On some system, modules are compressed like .ko.gz */
- if (is_supported_compression(dot + 1))
- dot -= 3;
- if (!is_kmodule_extension(dot + 1))
- goto out_problem;
- snprintf(short_module_name, sizeof(short_module_name),
- "[%.*s]", (int)(dot - name), name);
- strxfrchar(short_module_name, '-', '_');
- } else
- strcpy(short_module_name, event->mmap.filename);
-
map = machine__new_module(machine, event->mmap.start,
event->mmap.filename);
if (map == NULL)
goto out_problem;
- name = strdup(short_module_name);
- if (name == NULL)
- goto out_problem;
-
- dso__set_short_name(map->dso, name, true);
map->end = map->start + event->mmap.len;
} else if (is_kernel_mmap) {
const char *symbol_name = (event->mmap.filename +
@@ -1092,7 +1110,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
struct dso *dso;
list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
- if (is_kernel_module(dso->long_name, NULL))
+ if (is_kernel_module(dso->long_name))
continue;
kernel = dso;
@@ -1236,15 +1254,19 @@ out_problem:
return 0;
}
-static void machine__remove_thread(struct machine *machine, struct thread *th)
+void machine__remove_thread(struct machine *machine, struct thread *th)
{
- machine->last_match = NULL;
+ if (machine->last_match == th)
+ thread__zput(machine->last_match);
+
rb_erase(&th->rb_node, &machine->threads);
/*
- * We may have references to this thread, for instance in some hist_entry
- * instances, so just move them to a separate list.
+ * Move it first to the dead_threads list, then drop the reference,
+ * if this is the last reference, then the thread__delete destructor
+ * will be called and we will remove it from the dead_threads list.
*/
list_add_tail(&th->node, &machine->dead_threads);
+ thread__put(th);
}
int machine__process_fork_event(struct machine *machine, union perf_event *event,
@@ -1387,29 +1409,27 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
static int add_callchain_ip(struct thread *thread,
struct symbol **parent,
struct addr_location *root_al,
- bool branch_history,
+ u8 *cpumode,
u64 ip)
{
struct addr_location al;
al.filtered = 0;
al.sym = NULL;
- if (branch_history)
+ if (!cpumode) {
thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
ip, &al);
- else {
- u8 cpumode = PERF_RECORD_MISC_USER;
-
+ } else {
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
case PERF_CONTEXT_HV:
- cpumode = PERF_RECORD_MISC_HYPERVISOR;
+ *cpumode = PERF_RECORD_MISC_HYPERVISOR;
break;
case PERF_CONTEXT_KERNEL:
- cpumode = PERF_RECORD_MISC_KERNEL;
+ *cpumode = PERF_RECORD_MISC_KERNEL;
break;
case PERF_CONTEXT_USER:
- cpumode = PERF_RECORD_MISC_USER;
+ *cpumode = PERF_RECORD_MISC_USER;
break;
default:
pr_debug("invalid callchain context: "
@@ -1423,8 +1443,8 @@ static int add_callchain_ip(struct thread *thread,
}
return 0;
}
- thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
- ip, &al);
+ thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
+ ip, &al);
}
if (al.sym != NULL) {
@@ -1502,18 +1522,102 @@ static int remove_loops(struct branch_entry *l, int nr)
return nr;
}
-static int thread__resolve_callchain_sample(struct thread *thread,
- struct ip_callchain *chain,
- struct branch_stack *branch,
- struct symbol **parent,
- struct addr_location *root_al,
- int max_stack)
+/*
+ * Recolve LBR callstack chain sample
+ * Return:
+ * 1 on success get LBR callchain information
+ * 0 no available LBR callchain information, should try fp
+ * negative error code on other errors.
+ */
+static int resolve_lbr_callchain_sample(struct thread *thread,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ int max_stack)
{
+ struct ip_callchain *chain = sample->callchain;
int chain_nr = min(max_stack, (int)chain->nr);
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ int i, j, err;
+ u64 ip;
+
+ for (i = 0; i < chain_nr; i++) {
+ if (chain->ips[i] == PERF_CONTEXT_USER)
+ break;
+ }
+
+ /* LBR only affects the user callchain */
+ if (i != chain_nr) {
+ struct branch_stack *lbr_stack = sample->branch_stack;
+ int lbr_nr = lbr_stack->nr;
+ /*
+ * LBR callstack can only get user call chain.
+ * The mix_chain_nr is kernel call chain
+ * number plus LBR user call chain number.
+ * i is kernel call chain number,
+ * 1 is PERF_CONTEXT_USER,
+ * lbr_nr + 1 is the user call chain number.
+ * For details, please refer to the comments
+ * in callchain__printf
+ */
+ int mix_chain_nr = i + 1 + lbr_nr + 1;
+
+ if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
+ pr_warning("corrupted callchain. skipping...\n");
+ return 0;
+ }
+
+ for (j = 0; j < mix_chain_nr; j++) {
+ if (callchain_param.order == ORDER_CALLEE) {
+ if (j < i + 1)
+ ip = chain->ips[j];
+ else if (j > i + 1)
+ ip = lbr_stack->entries[j - i - 2].from;
+ else
+ ip = lbr_stack->entries[0].to;
+ } else {
+ if (j < lbr_nr)
+ ip = lbr_stack->entries[lbr_nr - j - 1].from;
+ else if (j > lbr_nr)
+ ip = chain->ips[i + 1 - (j - lbr_nr)];
+ else
+ ip = lbr_stack->entries[0].to;
+ }
+
+ err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
+ if (err)
+ return (err < 0) ? err : 0;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+static int thread__resolve_callchain_sample(struct thread *thread,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ int max_stack)
+{
+ struct branch_stack *branch = sample->branch_stack;
+ struct ip_callchain *chain = sample->callchain;
+ int chain_nr = min(max_stack, (int)chain->nr);
+ u8 cpumode = PERF_RECORD_MISC_USER;
int i, j, err;
int skip_idx = -1;
int first_call = 0;
+ callchain_cursor_reset(&callchain_cursor);
+
+ if (has_branch_callstack(evsel)) {
+ err = resolve_lbr_callchain_sample(thread, sample, parent,
+ root_al, max_stack);
+ if (err)
+ return (err < 0) ? err : 0;
+ }
+
/*
* Based on DWARF debug information, some architectures skip
* a callchain entry saved by the kernel.
@@ -1521,8 +1625,6 @@ static int thread__resolve_callchain_sample(struct thread *thread,
if (chain->nr < PERF_MAX_STACK_DEPTH)
skip_idx = arch_skip_callchain_idx(thread, chain);
- callchain_cursor_reset(&callchain_cursor);
-
/*
* Add branches to call stack for easier browsing. This gives
* more context for a sample than just the callers.
@@ -1568,10 +1670,10 @@ static int thread__resolve_callchain_sample(struct thread *thread,
for (i = 0; i < nr; i++) {
err = add_callchain_ip(thread, parent, root_al,
- true, be[i].to);
+ NULL, be[i].to);
if (!err)
err = add_callchain_ip(thread, parent, root_al,
- true, be[i].from);
+ NULL, be[i].from);
if (err == -EINVAL)
break;
if (err)
@@ -1600,7 +1702,7 @@ check_calls:
#endif
ip = chain->ips[j];
- err = add_callchain_ip(thread, parent, root_al, false, ip);
+ err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
if (err)
return (err < 0) ? err : 0;
@@ -1623,9 +1725,9 @@ int thread__resolve_callchain(struct thread *thread,
struct addr_location *root_al,
int max_stack)
{
- int ret = thread__resolve_callchain_sample(thread, sample->callchain,
- sample->branch_stack,
- parent, root_al, max_stack);
+ int ret = thread__resolve_callchain_sample(thread, evsel,
+ sample, parent,
+ root_al, max_stack);
if (ret)
return ret;
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index e8b7779a0a3f..6d64cedb9d1e 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -118,9 +118,9 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec);
struct machine *machine__new_host(void);
int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
void machine__exit(struct machine *machine);
-void machine__delete_dead_threads(struct machine *machine);
void machine__delete_threads(struct machine *machine);
void machine__delete(struct machine *machine);
+void machine__remove_thread(struct machine *machine, struct thread *th);
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
struct addr_location *al);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 62ca9f2607d5..a14f08f41686 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -778,3 +778,23 @@ struct map *maps__next(struct map *map)
return rb_entry(next, struct map, rb_node);
return NULL;
}
+
+struct kmap *map__kmap(struct map *map)
+{
+ if (!map->dso || !map->dso->kernel) {
+ pr_err("Internal error: map__kmap with a non-kernel map\n");
+ return NULL;
+ }
+ return (struct kmap *)(map + 1);
+}
+
+struct map_groups *map__kmaps(struct map *map)
+{
+ struct kmap *kmap = map__kmap(map);
+
+ if (!kmap || !kmap->kmaps) {
+ pr_err("Internal error: map__kmaps with a non-kernel map\n");
+ return NULL;
+ }
+ return kmap->kmaps;
+}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 0e42438b1e59..ec19c59ca38e 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -76,10 +76,8 @@ static inline struct map_groups *map_groups__get(struct map_groups *mg)
void map_groups__put(struct map_groups *mg);
-static inline struct kmap *map__kmap(struct map *map)
-{
- return (struct kmap *)(map + 1);
-}
+struct kmap *map__kmap(struct map *map);
+struct map_groups *map__kmaps(struct map *map);
static inline u64 map__map_ip(struct map *map, u64 ip)
{
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index fd4be94125fb..52be201b9b25 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -2,7 +2,6 @@
#include <linux/compiler.h>
#include <linux/string.h>
#include "ordered-events.h"
-#include "evlist.h"
#include "session.h"
#include "asm/bug.h"
#include "debug.h"
@@ -131,8 +130,8 @@ static struct ordered_event *alloc_event(struct ordered_events *oe,
return new;
}
-struct ordered_event *
-ordered_events__new(struct ordered_events *oe, u64 timestamp,
+static struct ordered_event *
+ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
union perf_event *event)
{
struct ordered_event *new;
@@ -153,20 +152,47 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
free_dup_event(oe, event->event);
}
-static int __ordered_events__flush(struct perf_session *s,
- struct perf_tool *tool)
+int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset)
+{
+ u64 timestamp = sample->time;
+ struct ordered_event *oevent;
+
+ if (!timestamp || timestamp == ~0ULL)
+ return -ETIME;
+
+ if (timestamp < oe->last_flush) {
+ pr_oe_time(timestamp, "out of order event\n");
+ pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
+ oe->last_flush_type);
+
+ oe->nr_unordered_events++;
+ }
+
+ oevent = ordered_events__new_event(oe, timestamp, event);
+ if (!oevent) {
+ ordered_events__flush(oe, OE_FLUSH__HALF);
+ oevent = ordered_events__new_event(oe, timestamp, event);
+ }
+
+ if (!oevent)
+ return -ENOMEM;
+
+ oevent->file_offset = file_offset;
+ return 0;
+}
+
+static int __ordered_events__flush(struct ordered_events *oe)
{
- struct ordered_events *oe = &s->ordered_events;
struct list_head *head = &oe->events;
struct ordered_event *tmp, *iter;
- struct perf_sample sample;
u64 limit = oe->next_flush;
u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
bool show_progress = limit == ULLONG_MAX;
struct ui_progress prog;
int ret;
- if (!tool->ordered_events || !limit)
+ if (!limit)
return 0;
if (show_progress)
@@ -178,16 +204,9 @@ static int __ordered_events__flush(struct perf_session *s,
if (iter->timestamp > limit)
break;
-
- ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
+ ret = oe->deliver(oe, iter);
if (ret)
- pr_err("Can't parse sample, err = %d\n", ret);
- else {
- ret = perf_session__deliver_event(s, iter->event, &sample, tool,
- iter->file_offset);
- if (ret)
- return ret;
- }
+ return ret;
ordered_events__delete(oe, iter);
oe->last_flush = iter->timestamp;
@@ -204,10 +223,8 @@ static int __ordered_events__flush(struct perf_session *s,
return 0;
}
-int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
- enum oe_flush how)
+int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
{
- struct ordered_events *oe = &s->ordered_events;
static const char * const str[] = {
"NONE",
"FINAL",
@@ -216,6 +233,9 @@ int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
};
int err;
+ if (oe->nr_events == 0)
+ return 0;
+
switch (how) {
case OE_FLUSH__FINAL:
oe->next_flush = ULLONG_MAX;
@@ -248,7 +268,7 @@ int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
str[how], oe->nr_events);
pr_oe_time(oe->max_timestamp, "max_timestamp\n");
- err = __ordered_events__flush(s, tool);
+ err = __ordered_events__flush(oe);
if (!err) {
if (how == OE_FLUSH__ROUND)
@@ -264,13 +284,14 @@ int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
return err;
}
-void ordered_events__init(struct ordered_events *oe)
+void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
{
INIT_LIST_HEAD(&oe->events);
INIT_LIST_HEAD(&oe->cache);
INIT_LIST_HEAD(&oe->to_free);
oe->max_alloc_size = (u64) -1;
oe->cur_alloc_size = 0;
+ oe->deliver = deliver;
}
void ordered_events__free(struct ordered_events *oe)
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 7b8f9b011f38..f403991e3bfd 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -2,9 +2,8 @@
#define __ORDERED_EVENTS_H
#include <linux/types.h>
-#include "tool.h"
-struct perf_session;
+struct perf_sample;
struct ordered_event {
u64 timestamp;
@@ -20,6 +19,11 @@ enum oe_flush {
OE_FLUSH__HALF,
};
+struct ordered_events;
+
+typedef int (*ordered_events__deliver_t)(struct ordered_events *oe,
+ struct ordered_event *event);
+
struct ordered_events {
u64 last_flush;
u64 next_flush;
@@ -31,18 +35,19 @@ struct ordered_events {
struct list_head to_free;
struct ordered_event *buffer;
struct ordered_event *last;
+ ordered_events__deliver_t deliver;
int buffer_idx;
unsigned int nr_events;
enum oe_flush last_flush_type;
+ u32 nr_unordered_events;
bool copy_on_queue;
};
-struct ordered_event *ordered_events__new(struct ordered_events *oe, u64 timestamp,
- union perf_event *event);
+int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset);
void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
-int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
- enum oe_flush how);
-void ordered_events__init(struct ordered_events *oe);
+int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
+void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver);
void ordered_events__free(struct ordered_events *oe);
static inline
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 7f8ec6ce2823..be0655388b38 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -20,11 +20,6 @@
#define MAX_NAME_LEN 100
-struct event_symbol {
- const char *symbol;
- const char *alias;
-};
-
#ifdef PARSER_DEBUG
extern int parse_events_debug;
#endif
@@ -39,7 +34,7 @@ static struct perf_pmu_event_symbol *perf_pmu_events_list;
*/
static int perf_pmu_events_list_num;
-static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
+struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = {
.symbol = "cpu-cycles",
.alias = "cycles",
@@ -82,7 +77,7 @@ static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
},
};
-static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
+struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
[PERF_COUNT_SW_CPU_CLOCK] = {
.symbol = "cpu-clock",
.alias = "",
@@ -175,9 +170,6 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(tracing_events_path))
- return NULL;
-
sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return NULL;
@@ -473,12 +465,6 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
int parse_events_add_tracepoint(struct list_head *list, int *idx,
char *sys, char *event)
{
- int ret;
-
- ret = debugfs_valid_mountpoint(tracing_events_path);
- if (ret)
- return ret;
-
if (strpbrk(sys, "*?"))
return add_tracepoint_multi_sys(list, idx, sys, event);
else
@@ -723,6 +709,7 @@ struct event_modifier {
int eh;
int eH;
int eG;
+ int eI;
int precise;
int exclude_GH;
int sample_read;
@@ -737,6 +724,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
int eh = evsel ? evsel->attr.exclude_hv : 0;
int eH = evsel ? evsel->attr.exclude_host : 0;
int eG = evsel ? evsel->attr.exclude_guest : 0;
+ int eI = evsel ? evsel->attr.exclude_idle : 0;
int precise = evsel ? evsel->attr.precise_ip : 0;
int sample_read = 0;
int pinned = evsel ? evsel->attr.pinned : 0;
@@ -767,6 +755,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
if (!exclude_GH)
exclude_GH = eG = eH = 1;
eH = 0;
+ } else if (*str == 'I') {
+ eI = 1;
} else if (*str == 'p') {
precise++;
/* use of precise requires exclude_guest */
@@ -800,6 +790,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
mod->eh = eh;
mod->eH = eH;
mod->eG = eG;
+ mod->eI = eI;
mod->precise = precise;
mod->exclude_GH = exclude_GH;
mod->sample_read = sample_read;
@@ -817,7 +808,7 @@ static int check_modifier(char *str)
char *p = str;
/* The sizeof includes 0 byte as well. */
- if (strlen(str) > (sizeof("ukhGHpppSD") - 1))
+ if (strlen(str) > (sizeof("ukhGHpppSDI") - 1))
return -1;
while (*p) {
@@ -853,6 +844,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
evsel->attr.precise_ip = mod.precise;
evsel->attr.exclude_host = mod.eH;
evsel->attr.exclude_guest = mod.eG;
+ evsel->attr.exclude_idle = mod.eI;
evsel->exclude_GH = mod.exclude_GH;
evsel->sample_read = mod.sample_read;
@@ -1098,6 +1090,14 @@ static const char * const event_type_descriptors[] = {
"Hardware breakpoint",
};
+static int cmp_string(const void *a, const void *b)
+{
+ const char * const *as = a;
+ const char * const *bs = b;
+
+ return strcmp(*as, *bs);
+}
+
/*
* Print the events from <debugfs_mount_point>/tracing/events
*/
@@ -1109,18 +1109,21 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- char sbuf[STRERR_BUFSIZE];
-
- if (debugfs_valid_mountpoint(tracing_events_path)) {
- printf(" [ Tracepoints not available: %s ]\n",
- strerror_r(errno, sbuf, sizeof(sbuf)));
- return;
- }
+ char **evt_list = NULL;
+ unsigned int evt_i = 0, evt_num = 0;
+ bool evt_num_known = false;
+restart:
sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return;
+ if (evt_num_known) {
+ evt_list = zalloc(sizeof(char *) * evt_num);
+ if (!evt_list)
+ goto out_close_sys_dir;
+ }
+
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
if (subsys_glob != NULL &&
!strglobmatch(sys_dirent.d_name, subsys_glob))
@@ -1137,19 +1140,56 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
!strglobmatch(evt_dirent.d_name, event_glob))
continue;
- if (name_only) {
- printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name);
+ if (!evt_num_known) {
+ evt_num++;
continue;
}
snprintf(evt_path, MAXPATHLEN, "%s:%s",
sys_dirent.d_name, evt_dirent.d_name);
- printf(" %-50s [%s]\n", evt_path,
- event_type_descriptors[PERF_TYPE_TRACEPOINT]);
+
+ evt_list[evt_i] = strdup(evt_path);
+ if (evt_list[evt_i] == NULL)
+ goto out_close_evt_dir;
+ evt_i++;
}
closedir(evt_dir);
}
closedir(sys_dir);
+
+ if (!evt_num_known) {
+ evt_num_known = true;
+ goto restart;
+ }
+ qsort(evt_list, evt_num, sizeof(char *), cmp_string);
+ evt_i = 0;
+ while (evt_i < evt_num) {
+ if (name_only) {
+ printf("%s ", evt_list[evt_i++]);
+ continue;
+ }
+ printf(" %-50s [%s]\n", evt_list[evt_i++],
+ event_type_descriptors[PERF_TYPE_TRACEPOINT]);
+ }
+ if (evt_num)
+ printf("\n");
+
+out_free:
+ evt_num = evt_i;
+ for (evt_i = 0; evt_i < evt_num; evt_i++)
+ zfree(&evt_list[evt_i]);
+ zfree(&evt_list);
+ return;
+
+out_close_evt_dir:
+ closedir(evt_dir);
+out_close_sys_dir:
+ closedir(sys_dir);
+
+ printf("FATAL: not enough memory to print %s\n",
+ event_type_descriptors[PERF_TYPE_TRACEPOINT]);
+ if (evt_list)
+ goto out_free;
}
/*
@@ -1163,9 +1203,6 @@ int is_valid_tracepoint(const char *event_string)
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(tracing_events_path))
- return 0;
-
sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return 0;
@@ -1233,38 +1270,19 @@ static bool is_event_supported(u8 type, unsigned config)
return ret;
}
-static void __print_events_type(u8 type, struct event_symbol *syms,
- unsigned max)
-{
- char name[64];
- unsigned i;
-
- for (i = 0; i < max ; i++, syms++) {
- if (!is_event_supported(type, i))
- continue;
-
- if (strlen(syms->alias))
- snprintf(name, sizeof(name), "%s OR %s",
- syms->symbol, syms->alias);
- else
- snprintf(name, sizeof(name), "%s", syms->symbol);
-
- printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
- }
-}
-
-void print_events_type(u8 type)
-{
- if (type == PERF_TYPE_SOFTWARE)
- __print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX);
- else
- __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX);
-}
-
int print_hwcache_events(const char *event_glob, bool name_only)
{
- unsigned int type, op, i, printed = 0;
+ unsigned int type, op, i, evt_i = 0, evt_num = 0;
char name[64];
+ char **evt_list = NULL;
+ bool evt_num_known = false;
+
+restart:
+ if (evt_num_known) {
+ evt_list = zalloc(sizeof(char *) * evt_num);
+ if (!evt_list)
+ goto out_enomem;
+ }
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
@@ -1282,27 +1300,66 @@ int print_hwcache_events(const char *event_glob, bool name_only)
type | (op << 8) | (i << 16)))
continue;
- if (name_only)
- printf("%s ", name);
- else
- printf(" %-50s [%s]\n", name,
- event_type_descriptors[PERF_TYPE_HW_CACHE]);
- ++printed;
+ if (!evt_num_known) {
+ evt_num++;
+ continue;
+ }
+
+ evt_list[evt_i] = strdup(name);
+ if (evt_list[evt_i] == NULL)
+ goto out_enomem;
+ evt_i++;
}
}
}
- if (printed)
+ if (!evt_num_known) {
+ evt_num_known = true;
+ goto restart;
+ }
+ qsort(evt_list, evt_num, sizeof(char *), cmp_string);
+ evt_i = 0;
+ while (evt_i < evt_num) {
+ if (name_only) {
+ printf("%s ", evt_list[evt_i++]);
+ continue;
+ }
+ printf(" %-50s [%s]\n", evt_list[evt_i++],
+ event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ }
+ if (evt_num)
printf("\n");
- return printed;
+
+out_free:
+ evt_num = evt_i;
+ for (evt_i = 0; evt_i < evt_num; evt_i++)
+ zfree(&evt_list[evt_i]);
+ zfree(&evt_list);
+ return evt_num;
+
+out_enomem:
+ printf("FATAL: not enough memory to print %s\n", event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ if (evt_list)
+ goto out_free;
+ return evt_num;
}
-static void print_symbol_events(const char *event_glob, unsigned type,
+void print_symbol_events(const char *event_glob, unsigned type,
struct event_symbol *syms, unsigned max,
bool name_only)
{
- unsigned i, printed = 0;
+ unsigned int i, evt_i = 0, evt_num = 0;
char name[MAX_NAME_LEN];
+ char **evt_list = NULL;
+ bool evt_num_known = false;
+
+restart:
+ if (evt_num_known) {
+ evt_list = zalloc(sizeof(char *) * evt_num);
+ if (!evt_list)
+ goto out_enomem;
+ syms -= max;
+ }
for (i = 0; i < max; i++, syms++) {
@@ -1314,23 +1371,49 @@ static void print_symbol_events(const char *event_glob, unsigned type,
if (!is_event_supported(type, i))
continue;
- if (name_only) {
- printf("%s ", syms->symbol);
+ if (!evt_num_known) {
+ evt_num++;
continue;
}
- if (strlen(syms->alias))
+ if (!name_only && strlen(syms->alias))
snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
else
strncpy(name, syms->symbol, MAX_NAME_LEN);
- printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
-
- printed++;
+ evt_list[evt_i] = strdup(name);
+ if (evt_list[evt_i] == NULL)
+ goto out_enomem;
+ evt_i++;
}
- if (printed)
+ if (!evt_num_known) {
+ evt_num_known = true;
+ goto restart;
+ }
+ qsort(evt_list, evt_num, sizeof(char *), cmp_string);
+ evt_i = 0;
+ while (evt_i < evt_num) {
+ if (name_only) {
+ printf("%s ", evt_list[evt_i++]);
+ continue;
+ }
+ printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
+ }
+ if (evt_num)
printf("\n");
+
+out_free:
+ evt_num = evt_i;
+ for (evt_i = 0; evt_i < evt_num; evt_i++)
+ zfree(&evt_list[evt_i]);
+ zfree(&evt_list);
+ return;
+
+out_enomem:
+ printf("FATAL: not enough memory to print %s\n", event_type_descriptors[type]);
+ if (evt_list)
+ goto out_free;
}
/*
@@ -1338,11 +1421,6 @@ static void print_symbol_events(const char *event_glob, unsigned type,
*/
void print_events(const char *event_glob, bool name_only)
{
- if (!name_only) {
- printf("\n");
- printf("List of pre-defined events (to be used in -e):\n");
- }
-
print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index ff6e1fa4111e..52a2dda4f954 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -116,12 +116,21 @@ void parse_events_update_lists(struct list_head *list_event,
void parse_events_error(void *data, void *scanner, char const *msg);
void print_events(const char *event_glob, bool name_only);
-void print_events_type(u8 type);
+
+struct event_symbol {
+ const char *symbol;
+ const char *alias;
+};
+extern struct event_symbol event_symbols_hw[];
+extern struct event_symbol event_symbols_sw[];
+void print_symbol_events(const char *event_glob, unsigned type,
+ struct event_symbol *syms, unsigned max,
+ bool name_only);
void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
bool name_only);
int print_hwcache_events(const char *event_glob, bool name_only);
extern int is_valid_tracepoint(const char *event_string);
-extern int valid_debugfs_mount(const char *debugfs);
+int valid_event_mount(const char *eventfs);
#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 94eacb6c1ef7..8895cf3132ab 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -101,7 +101,7 @@ num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?][a-zA-Z0-9_*?]*
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]*
/* If you add a modifier you need to update check_modifier() */
-modifier_event [ukhpGHSD]+
+modifier_event [ukhpGHSDI]+
modifier_bp [rwx]{1,3}
%%
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 4a015f77e2b5..01626be2a8eb 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -37,6 +37,7 @@ static int get_value(struct parse_opt_ctx_t *p,
{
const char *s, *arg = NULL;
const int unset = flags & OPT_UNSET;
+ int err;
if (unset && p->opt)
return opterror(opt, "takes no value", flags);
@@ -114,13 +115,29 @@ static int get_value(struct parse_opt_ctx_t *p,
return 0;
case OPTION_STRING:
+ err = 0;
if (unset)
*(const char **)opt->value = NULL;
else if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
*(const char **)opt->value = (const char *)opt->defval;
else
- return get_arg(p, opt, flags, (const char **)opt->value);
- return 0;
+ err = get_arg(p, opt, flags, (const char **)opt->value);
+
+ /* PARSE_OPT_NOEMPTY: Allow NULL but disallow empty string. */
+ if (opt->flags & PARSE_OPT_NOEMPTY) {
+ const char *val = *(const char **)opt->value;
+
+ if (!val)
+ return err;
+
+ /* Similar to unset if we are given an empty string. */
+ if (val[0] == '\0') {
+ *(const char **)opt->value = NULL;
+ return 0;
+ }
+ }
+
+ return err;
case OPTION_CALLBACK:
if (unset)
@@ -505,13 +522,18 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
break;
case PARSE_OPT_LIST_OPTS:
while (options->type != OPTION_END) {
- printf("--%s ", options->long_name);
+ if (options->long_name)
+ printf("--%s ", options->long_name);
options++;
}
+ putchar('\n');
exit(130);
case PARSE_OPT_LIST_SUBCMDS:
- for (int i = 0; subcommands[i]; i++)
- printf("%s ", subcommands[i]);
+ if (subcommands) {
+ for (int i = 0; subcommands[i]; i++)
+ printf("%s ", subcommands[i]);
+ }
+ putchar('\n');
exit(130);
default: /* PARSE_OPT_UNKNOWN */
if (ctx.argv[0][1] == '-') {
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index 97b153fb4999..59561fd86278 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -40,6 +40,7 @@ enum parse_opt_option_flags {
PARSE_OPT_LASTARG_DEFAULT = 16,
PARSE_OPT_DISABLED = 32,
PARSE_OPT_EXCLUSIVE = 64,
+ PARSE_OPT_NOEMPTY = 128,
};
struct option;
@@ -122,6 +123,7 @@ struct option {
#define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) }
#define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) }
#define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h) }
+#define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY}
#define OPT_DATE(s, l, v, h) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
#define OPT_CALLBACK(s, l, v, a, h, f) \
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 919937eb0be2..d05b77cf35f7 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -41,6 +41,7 @@
#include "symbol.h"
#include "thread.h"
#include <api/fs/debugfs.h>
+#include <api/fs/tracefs.h>
#include "trace-event.h" /* For __maybe_unused */
#include "probe-event.h"
#include "probe-finder.h"
@@ -79,6 +80,7 @@ static int init_symbol_maps(bool user_only)
int ret;
symbol_conf.sort_by_name = true;
+ symbol_conf.allow_aliases = true;
ret = symbol__init(NULL);
if (ret < 0) {
pr_debug("Failed to init symbol map.\n");
@@ -133,6 +135,8 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
return NULL;
kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]);
+ if (!kmap)
+ return NULL;
return kmap->ref_reloc_sym;
}
@@ -150,7 +154,7 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
sym = __find_kernel_function_by_name(name, &map);
if (sym)
return map->unmap_ip(map, sym->start) -
- (reloc) ? 0 : map->reloc;
+ ((reloc) ? 0 : map->reloc);
}
return 0;
}
@@ -177,6 +181,25 @@ static struct map *kernel_get_module_map(const char *module)
return NULL;
}
+static struct map *get_target_map(const char *target, bool user)
+{
+ /* Init maps of given executable or kernel */
+ if (user)
+ return dso__new_map(target);
+ else
+ return kernel_get_module_map(target);
+}
+
+static void put_target_map(struct map *map, bool user)
+{
+ if (map && user) {
+ /* Only the user map needs to be released */
+ dso__delete(map->dso);
+ map__delete(map);
+ }
+}
+
+
static struct dso *kernel_get_module_dso(const char *module)
{
struct dso *dso;
@@ -248,6 +271,13 @@ out:
return ret;
}
+static void clear_perf_probe_point(struct perf_probe_point *pp)
+{
+ free(pp->file);
+ free(pp->function);
+ free(pp->lazy_line);
+}
+
static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
{
int i;
@@ -257,6 +287,104 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
}
#ifdef HAVE_DWARF_SUPPORT
+/*
+ * Some binaries like glibc have special symbols which are on the symbol
+ * table, but not in the debuginfo. If we can find the address of the
+ * symbol from map, we can translate the address back to the probe point.
+ */
+static int find_alternative_probe_point(struct debuginfo *dinfo,
+ struct perf_probe_point *pp,
+ struct perf_probe_point *result,
+ const char *target, bool uprobes)
+{
+ struct map *map = NULL;
+ struct symbol *sym;
+ u64 address = 0;
+ int ret = -ENOENT;
+
+ /* This can work only for function-name based one */
+ if (!pp->function || pp->file)
+ return -ENOTSUP;
+
+ map = get_target_map(target, uprobes);
+ if (!map)
+ return -EINVAL;
+
+ /* Find the address of given function */
+ map__for_each_symbol_by_name(map, pp->function, sym) {
+ if (uprobes)
+ address = sym->start;
+ else
+ address = map->unmap_ip(map, sym->start);
+ break;
+ }
+ if (!address) {
+ ret = -ENOENT;
+ goto out;
+ }
+ pr_debug("Symbol %s address found : %" PRIx64 "\n",
+ pp->function, address);
+
+ ret = debuginfo__find_probe_point(dinfo, (unsigned long)address,
+ result);
+ if (ret <= 0)
+ ret = (!ret) ? -ENOENT : ret;
+ else {
+ result->offset += pp->offset;
+ result->line += pp->line;
+ result->retprobe = pp->retprobe;
+ ret = 0;
+ }
+
+out:
+ put_target_map(map, uprobes);
+ return ret;
+
+}
+
+static int get_alternative_probe_event(struct debuginfo *dinfo,
+ struct perf_probe_event *pev,
+ struct perf_probe_point *tmp,
+ const char *target)
+{
+ int ret;
+
+ memcpy(tmp, &pev->point, sizeof(*tmp));
+ memset(&pev->point, 0, sizeof(pev->point));
+ ret = find_alternative_probe_point(dinfo, tmp, &pev->point,
+ target, pev->uprobes);
+ if (ret < 0)
+ memcpy(&pev->point, tmp, sizeof(*tmp));
+
+ return ret;
+}
+
+static int get_alternative_line_range(struct debuginfo *dinfo,
+ struct line_range *lr,
+ const char *target, bool user)
+{
+ struct perf_probe_point pp = { .function = lr->function,
+ .file = lr->file,
+ .line = lr->start };
+ struct perf_probe_point result;
+ int ret, len = 0;
+
+ memset(&result, 0, sizeof(result));
+
+ if (lr->end != INT_MAX)
+ len = lr->end - lr->start;
+ ret = find_alternative_probe_point(dinfo, &pp, &result,
+ target, user);
+ if (!ret) {
+ lr->function = result.function;
+ lr->file = result.file;
+ lr->start = result.line;
+ if (lr->end != INT_MAX)
+ lr->end = lr->start + len;
+ clear_perf_probe_point(&pp);
+ }
+ return ret;
+}
/* Open new debuginfo of given module */
static struct debuginfo *open_debuginfo(const char *module, bool silent)
@@ -465,6 +593,7 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
int max_tevs, const char *target)
{
bool need_dwarf = perf_probe_event_need_dwarf(pev);
+ struct perf_probe_point tmp;
struct debuginfo *dinfo;
int ntevs, ret = 0;
@@ -481,6 +610,20 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
/* Searching trace events corresponding to a probe event */
ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs);
+ if (ntevs == 0) { /* Not found, retry with an alternative */
+ ret = get_alternative_probe_event(dinfo, pev, &tmp, target);
+ if (!ret) {
+ ntevs = debuginfo__find_trace_events(dinfo, pev,
+ tevs, max_tevs);
+ /*
+ * Write back to the original probe_event for
+ * setting appropriate (user given) event name
+ */
+ clear_perf_probe_point(&pev->point);
+ memcpy(&pev->point, &tmp, sizeof(tmp));
+ }
+ }
+
debuginfo__delete(dinfo);
if (ntevs > 0) { /* Succeeded to find trace events */
@@ -495,11 +638,9 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
}
if (ntevs == 0) { /* No error but failed to find probe point. */
- pr_warning("Probe point '%s' not found in debuginfo.\n",
+ pr_warning("Probe point '%s' not found.\n",
synthesize_perf_probe_point(&pev->point));
- if (need_dwarf)
- return -ENOENT;
- return 0;
+ return -ENOENT;
}
/* Error path : ntevs < 0 */
pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
@@ -514,63 +655,6 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
return ntevs;
}
-/*
- * Find a src file from a DWARF tag path. Prepend optional source path prefix
- * and chop off leading directories that do not exist. Result is passed back as
- * a newly allocated path on success.
- * Return 0 if file was found and readable, -errno otherwise.
- */
-static int get_real_path(const char *raw_path, const char *comp_dir,
- char **new_path)
-{
- const char *prefix = symbol_conf.source_prefix;
-
- if (!prefix) {
- if (raw_path[0] != '/' && comp_dir)
- /* If not an absolute path, try to use comp_dir */
- prefix = comp_dir;
- else {
- if (access(raw_path, R_OK) == 0) {
- *new_path = strdup(raw_path);
- return 0;
- } else
- return -errno;
- }
- }
-
- *new_path = malloc((strlen(prefix) + strlen(raw_path) + 2));
- if (!*new_path)
- return -ENOMEM;
-
- for (;;) {
- sprintf(*new_path, "%s/%s", prefix, raw_path);
-
- if (access(*new_path, R_OK) == 0)
- return 0;
-
- if (!symbol_conf.source_prefix)
- /* In case of searching comp_dir, don't retry */
- return -errno;
-
- switch (errno) {
- case ENAMETOOLONG:
- case ENOENT:
- case EROFS:
- case EFAULT:
- raw_path = strchr(++raw_path, '/');
- if (!raw_path) {
- zfree(new_path);
- return -ENOENT;
- }
- continue;
-
- default:
- zfree(new_path);
- return -errno;
- }
- }
-}
-
#define LINEBUF_SIZE 256
#define NR_ADDITIONAL_LINES 2
@@ -622,7 +706,8 @@ static int _show_one_line(FILE *fp, int l, bool skip, bool show_num)
* Show line-range always requires debuginfo to find source file and
* line number.
*/
-static int __show_line_range(struct line_range *lr, const char *module)
+static int __show_line_range(struct line_range *lr, const char *module,
+ bool user)
{
int l = 1;
struct int_node *ln;
@@ -638,6 +723,11 @@ static int __show_line_range(struct line_range *lr, const char *module)
return -ENOENT;
ret = debuginfo__find_line_range(dinfo, lr);
+ if (!ret) { /* Not found, retry with an alternative */
+ ret = get_alternative_line_range(dinfo, lr, module, user);
+ if (!ret)
+ ret = debuginfo__find_line_range(dinfo, lr);
+ }
debuginfo__delete(dinfo);
if (ret == 0 || ret == -ENOENT) {
pr_warning("Specified source line is not found.\n");
@@ -650,7 +740,11 @@ static int __show_line_range(struct line_range *lr, const char *module)
/* Convert source file path */
tmp = lr->path;
ret = get_real_path(tmp, lr->comp_dir, &lr->path);
- free(tmp); /* Free old path */
+
+ /* Free old path when new path is assigned */
+ if (tmp != lr->path)
+ free(tmp);
+
if (ret < 0) {
pr_warning("Failed to find source file path.\n");
return ret;
@@ -707,7 +801,7 @@ int show_line_range(struct line_range *lr, const char *module, bool user)
ret = init_symbol_maps(user);
if (ret < 0)
return ret;
- ret = __show_line_range(lr, module);
+ ret = __show_line_range(lr, module, user);
exit_symbol_maps();
return ret;
@@ -716,12 +810,13 @@ int show_line_range(struct line_range *lr, const char *module, bool user)
static int show_available_vars_at(struct debuginfo *dinfo,
struct perf_probe_event *pev,
int max_vls, struct strfilter *_filter,
- bool externs)
+ bool externs, const char *target)
{
char *buf;
int ret, i, nvars;
struct str_node *node;
struct variable_list *vls = NULL, *vl;
+ struct perf_probe_point tmp;
const char *var;
buf = synthesize_perf_probe_point(&pev->point);
@@ -731,6 +826,15 @@ static int show_available_vars_at(struct debuginfo *dinfo,
ret = debuginfo__find_available_vars_at(dinfo, pev, &vls,
max_vls, externs);
+ if (!ret) { /* Not found, retry with an alternative */
+ ret = get_alternative_probe_event(dinfo, pev, &tmp, target);
+ if (!ret) {
+ ret = debuginfo__find_available_vars_at(dinfo, pev,
+ &vls, max_vls, externs);
+ /* Release the old probe_point */
+ clear_perf_probe_point(&tmp);
+ }
+ }
if (ret <= 0) {
if (ret == 0 || ret == -ENOENT) {
pr_err("Failed to find the address of %s\n", buf);
@@ -793,7 +897,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
for (i = 0; i < npevs && ret >= 0; i++)
ret = show_available_vars_at(dinfo, &pevs[i], max_vls, _filter,
- externs);
+ externs, module);
debuginfo__delete(dinfo);
out:
@@ -980,6 +1084,8 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
*
* TODO:Group name support
*/
+ if (!arg)
+ return -EINVAL;
ptr = strpbrk(arg, ";=@+%");
if (ptr && *ptr == '=') { /* Event name */
@@ -1739,15 +1845,13 @@ static int convert_to_perf_probe_event(struct probe_trace_event *tev,
void clear_perf_probe_event(struct perf_probe_event *pev)
{
- struct perf_probe_point *pp = &pev->point;
struct perf_probe_arg_field *field, *next;
int i;
free(pev->event);
free(pev->group);
- free(pp->file);
- free(pp->function);
- free(pp->lazy_line);
+ free(pev->target);
+ clear_perf_probe_point(&pev->point);
for (i = 0; i < pev->nargs; i++) {
free(pev->args[i].name);
@@ -1805,7 +1909,7 @@ static void print_open_warning(int err, bool is_kprobe)
" - please rebuild kernel with %s.\n",
is_kprobe ? 'k' : 'u', config);
} else if (err == -ENOTSUP)
- pr_warning("Debugfs is not mounted.\n");
+ pr_warning("Tracefs or debugfs is not mounted.\n");
else
pr_warning("Failed to open %cprobe_events: %s\n",
is_kprobe ? 'k' : 'u',
@@ -1816,7 +1920,7 @@ static void print_both_open_warning(int kerr, int uerr)
{
/* Both kprobes and uprobes are disabled, warn it. */
if (kerr == -ENOTSUP && uerr == -ENOTSUP)
- pr_warning("Debugfs is not mounted.\n");
+ pr_warning("Tracefs or debugfs is not mounted.\n");
else if (kerr == -ENOENT && uerr == -ENOENT)
pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS "
"or/and CONFIG_UPROBE_EVENTS.\n");
@@ -1833,13 +1937,20 @@ static int open_probe_events(const char *trace_file, bool readwrite)
{
char buf[PATH_MAX];
const char *__debugfs;
+ const char *tracing_dir = "";
int ret;
- __debugfs = debugfs_find_mountpoint();
- if (__debugfs == NULL)
- return -ENOTSUP;
+ __debugfs = tracefs_find_mountpoint();
+ if (__debugfs == NULL) {
+ tracing_dir = "tracing/";
+
+ __debugfs = debugfs_find_mountpoint();
+ if (__debugfs == NULL)
+ return -ENOTSUP;
+ }
- ret = e_snprintf(buf, PATH_MAX, "%s/%s", __debugfs, trace_file);
+ ret = e_snprintf(buf, PATH_MAX, "%s/%s%s",
+ __debugfs, tracing_dir, trace_file);
if (ret >= 0) {
pr_debug("Opening %s write=%d\n", buf, readwrite);
if (readwrite && !probe_event_dry_run)
@@ -1855,12 +1966,12 @@ static int open_probe_events(const char *trace_file, bool readwrite)
static int open_kprobe_events(bool readwrite)
{
- return open_probe_events("tracing/kprobe_events", readwrite);
+ return open_probe_events("kprobe_events", readwrite);
}
static int open_uprobe_events(bool readwrite)
{
- return open_probe_events("tracing/uprobe_events", readwrite);
+ return open_probe_events("uprobe_events", readwrite);
}
/* Get raw string list of current kprobe_events or uprobe_events */
@@ -1895,6 +2006,95 @@ static struct strlist *get_probe_trace_command_rawlist(int fd)
return sl;
}
+struct kprobe_blacklist_node {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+ char *symbol;
+};
+
+static void kprobe_blacklist__delete(struct list_head *blacklist)
+{
+ struct kprobe_blacklist_node *node;
+
+ while (!list_empty(blacklist)) {
+ node = list_first_entry(blacklist,
+ struct kprobe_blacklist_node, list);
+ list_del(&node->list);
+ free(node->symbol);
+ free(node);
+ }
+}
+
+static int kprobe_blacklist__load(struct list_head *blacklist)
+{
+ struct kprobe_blacklist_node *node;
+ const char *__debugfs = debugfs_find_mountpoint();
+ char buf[PATH_MAX], *p;
+ FILE *fp;
+ int ret;
+
+ if (__debugfs == NULL)
+ return -ENOTSUP;
+
+ ret = e_snprintf(buf, PATH_MAX, "%s/kprobes/blacklist", __debugfs);
+ if (ret < 0)
+ return ret;
+
+ fp = fopen(buf, "r");
+ if (!fp)
+ return -errno;
+
+ ret = 0;
+ while (fgets(buf, PATH_MAX, fp)) {
+ node = zalloc(sizeof(*node));
+ if (!node) {
+ ret = -ENOMEM;
+ break;
+ }
+ INIT_LIST_HEAD(&node->list);
+ list_add_tail(&node->list, blacklist);
+ if (sscanf(buf, "0x%lx-0x%lx", &node->start, &node->end) != 2) {
+ ret = -EINVAL;
+ break;
+ }
+ p = strchr(buf, '\t');
+ if (p) {
+ p++;
+ if (p[strlen(p) - 1] == '\n')
+ p[strlen(p) - 1] = '\0';
+ } else
+ p = (char *)"unknown";
+ node->symbol = strdup(p);
+ if (!node->symbol) {
+ ret = -ENOMEM;
+ break;
+ }
+ pr_debug2("Blacklist: 0x%lx-0x%lx, %s\n",
+ node->start, node->end, node->symbol);
+ ret++;
+ }
+ if (ret < 0)
+ kprobe_blacklist__delete(blacklist);
+ fclose(fp);
+
+ return ret;
+}
+
+static struct kprobe_blacklist_node *
+kprobe_blacklist__find_by_address(struct list_head *blacklist,
+ unsigned long address)
+{
+ struct kprobe_blacklist_node *node;
+
+ list_for_each_entry(node, blacklist, list) {
+ if (node->start <= address && address <= node->end)
+ return node;
+ }
+
+ return NULL;
+}
+
/* Show an event */
static int show_perf_probe_event(struct perf_probe_event *pev,
const char *module)
@@ -2100,6 +2300,27 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
return ret;
}
+/* Warn if the current kernel's uprobe implementation is old */
+static void warn_uprobe_event_compat(struct probe_trace_event *tev)
+{
+ int i;
+ char *buf = synthesize_probe_trace_command(tev);
+
+ /* Old uprobe event doesn't support memory dereference */
+ if (!tev->uprobes || tev->nargs == 0 || !buf)
+ goto out;
+
+ for (i = 0; i < tev->nargs; i++)
+ if (strglobmatch(tev->args[i].value, "[$@+-]*")) {
+ pr_warning("Please upgrade your kernel to at least "
+ "3.14 to have access to feature %s\n",
+ tev->args[i].value);
+ break;
+ }
+out:
+ free(buf);
+}
+
static int __add_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event *tevs,
int ntevs, bool allow_suffix)
@@ -2109,6 +2330,8 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
char buf[64];
const char *event, *group;
struct strlist *namelist;
+ LIST_HEAD(blacklist);
+ struct kprobe_blacklist_node *node;
if (pev->uprobes)
fd = open_uprobe_events(true);
@@ -2126,11 +2349,25 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
pr_debug("Failed to get current event list.\n");
return -EIO;
}
+ /* Get kprobe blacklist if exists */
+ if (!pev->uprobes) {
+ ret = kprobe_blacklist__load(&blacklist);
+ if (ret < 0)
+ pr_debug("No kprobe blacklist support, ignored\n");
+ }
ret = 0;
pr_info("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
for (i = 0; i < ntevs; i++) {
tev = &tevs[i];
+ /* Ensure that the address is NOT blacklisted */
+ node = kprobe_blacklist__find_by_address(&blacklist,
+ tev->point.address);
+ if (node) {
+ pr_warning("Warning: Skipped probing on blacklisted function: %s\n", node->symbol);
+ continue;
+ }
+
if (pev->event)
event = pev->event;
else
@@ -2180,14 +2417,18 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
*/
allow_suffix = true;
}
+ if (ret == -EINVAL && pev->uprobes)
+ warn_uprobe_event_compat(tev);
- if (ret >= 0) {
+ /* Note that it is possible to skip all events because of blacklist */
+ if (ret >= 0 && tev->event) {
/* Show how to use the event. */
pr_info("\nYou can now use it in all perf tools, such as:\n\n");
pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group,
tev->event);
}
+ kprobe_blacklist__delete(&blacklist);
strlist__delete(namelist);
close(fd);
return ret;
@@ -2199,8 +2440,7 @@ static int find_probe_functions(struct map *map, char *name)
struct symbol *sym;
map__for_each_symbol_by_name(map, name, sym) {
- if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL)
- found++;
+ found++;
}
return found;
@@ -2218,7 +2458,6 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
int max_tevs, const char *target)
{
struct map *map = NULL;
- struct kmap *kmap = NULL;
struct ref_reloc_sym *reloc_sym = NULL;
struct symbol *sym;
struct probe_trace_event *tev;
@@ -2227,11 +2466,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
int num_matched_functions;
int ret, i;
- /* Init maps of given executable or kernel */
- if (pev->uprobes)
- map = dso__new_map(target);
- else
- map = kernel_get_module_map(target);
+ map = get_target_map(target, pev->uprobes);
if (!map) {
ret = -EINVAL;
goto out;
@@ -2255,8 +2490,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
}
if (!pev->uprobes && !pp->retprobe) {
- kmap = map__kmap(map);
- reloc_sym = kmap->ref_reloc_sym;
+ reloc_sym = kernel_get_ref_reloc_sym();
if (!reloc_sym) {
pr_warning("Relocated base symbol is not found!\n");
ret = -EINVAL;
@@ -2324,11 +2558,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
}
out:
- if (map && pev->uprobes) {
- /* Only when using uprobe(exec) map needs to be released */
- dso__delete(map->dso);
- map__delete(map);
- }
+ put_target_map(map, pev->uprobes);
return ret;
nomem_out:
@@ -2369,7 +2599,7 @@ struct __event_package {
};
int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
- int max_tevs, const char *target, bool force_add)
+ int max_tevs, bool force_add)
{
int i, j, ret;
struct __event_package *pkgs;
@@ -2393,7 +2623,7 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
ret = convert_to_probe_trace_events(pkgs[i].pev,
&pkgs[i].tevs,
max_tevs,
- target);
+ pkgs[i].pev->target);
if (ret < 0)
goto end;
pkgs[i].ntevs = ret;
@@ -2568,8 +2798,7 @@ static struct strfilter *available_func_filter;
static int filter_available_functions(struct map *map __maybe_unused,
struct symbol *sym)
{
- if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
- strfilter__compare(available_func_filter, sym->name))
+ if (strfilter__compare(available_func_filter, sym->name))
return 0;
return 1;
}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index e01e9943139f..d6b783447be9 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -73,7 +73,8 @@ struct perf_probe_event {
char *group; /* Group name */
struct perf_probe_point point; /* Probe point */
int nargs; /* Number of arguments */
- bool uprobes;
+ bool uprobes; /* Uprobe event flag */
+ char *target; /* Target binary */
struct perf_probe_arg *args; /* Arguments */
};
@@ -124,8 +125,7 @@ extern int line_range__init(struct line_range *lr);
extern const char *kernel_get_module_path(const char *module);
extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
- int max_probe_points, const char *module,
- bool force_add);
+ int max_probe_points, bool force_add);
extern int del_perf_probe_events(struct strlist *dellist);
extern int show_perf_probe_events(void);
extern int show_line_range(struct line_range *lr, const char *module,
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index b5247d777f0e..2a76e14db732 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -456,11 +456,12 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
return -EINVAL;
}
if (field->name[0] == '[') {
- pr_err("Semantic error: %s is not a pointor"
+ pr_err("Semantic error: %s is not a pointer"
" nor array.\n", varname);
return -EINVAL;
}
- if (field->ref) {
+ /* While prcessing unnamed field, we don't care about this */
+ if (field->ref && dwarf_diename(vr_die)) {
pr_err("Semantic error: %s must be referred by '.'\n",
field->name);
return -EINVAL;
@@ -491,6 +492,11 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
}
ref->offset += (long)offs;
+ /* If this member is unnamed, we need to reuse this field */
+ if (!dwarf_diename(die_mem))
+ return convert_variable_fields(die_mem, varname, field,
+ &ref, die_mem);
+
next:
/* Converting next field */
if (field->next)
@@ -572,10 +578,12 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
/* Search child die for local variables and parameters. */
if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) {
/* Search again in global variables */
- if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
+ if (!die_find_variable_at(&pf->cu_die, pf->pvar->var,
+ 0, &vr_die)) {
pr_warning("Failed to find '%s' in this function.\n",
pf->pvar->var);
ret = -ENOENT;
+ }
}
if (ret >= 0)
ret = convert_variable(&vr_die, pf);
@@ -849,11 +857,22 @@ static int probe_point_lazy_walker(const char *fname, int lineno,
static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
{
int ret = 0;
+ char *fpath;
if (intlist__empty(pf->lcache)) {
+ const char *comp_dir;
+
+ comp_dir = cu_get_comp_dir(&pf->cu_die);
+ ret = get_real_path(pf->fname, comp_dir, &fpath);
+ if (ret < 0) {
+ pr_warning("Failed to find source file path.\n");
+ return ret;
+ }
+
/* Matching lazy line pattern */
- ret = find_lazy_match_lines(pf->lcache, pf->fname,
+ ret = find_lazy_match_lines(pf->lcache, fpath,
pf->pev->point.lazy_line);
+ free(fpath);
if (ret <= 0)
return ret;
}
@@ -915,17 +934,13 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
dwarf_decl_line(sp_die, &pf->lno);
pf->lno += pp->line;
param->retval = find_probe_point_by_line(pf);
- } else if (!dwarf_func_inline(sp_die)) {
+ } else if (die_is_func_instance(sp_die)) {
+ /* Instances always have the entry address */
+ dwarf_entrypc(sp_die, &pf->addr);
/* Real function */
if (pp->lazy_line)
param->retval = find_probe_point_lazy(sp_die, pf);
else {
- if (dwarf_entrypc(sp_die, &pf->addr) != 0) {
- pr_warning("Failed to get entry address of "
- "%s.\n", dwarf_diename(sp_die));
- param->retval = -ENOENT;
- return DWARF_CB_ABORT;
- }
pf->addr += pp->offset;
/* TODO: Check the address in this function */
param->retval = call_probe_finder(sp_die, pf);
@@ -1053,7 +1068,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
if (pp->function)
ret = find_probe_point_by_func(pf);
else if (pp->lazy_line)
- ret = find_probe_point_lazy(NULL, pf);
+ ret = find_probe_point_lazy(&pf->cu_die, pf);
else {
pf->lno = pp->line;
ret = find_probe_point_by_line(pf);
@@ -1349,11 +1364,8 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
int baseline = 0, lineno = 0, ret = 0;
- /* Adjust address with bias */
- addr += dbg->bias;
-
/* Find cu die */
- if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr - dbg->bias, &cudie)) {
+ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
pr_warning("Failed to find debug information for address %lx\n",
addr);
ret = -EINVAL;
@@ -1536,7 +1548,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e);
lr->start = lf->lno_s;
lr->end = lf->lno_e;
- if (dwarf_func_inline(sp_die))
+ if (!die_is_func_instance(sp_die))
param->retval = die_walk_instances(sp_die,
line_range_inline_cb, lf);
else
@@ -1623,3 +1635,61 @@ found:
return (ret < 0) ? ret : lf.found;
}
+/*
+ * Find a src file from a DWARF tag path. Prepend optional source path prefix
+ * and chop off leading directories that do not exist. Result is passed back as
+ * a newly allocated path on success.
+ * Return 0 if file was found and readable, -errno otherwise.
+ */
+int get_real_path(const char *raw_path, const char *comp_dir,
+ char **new_path)
+{
+ const char *prefix = symbol_conf.source_prefix;
+
+ if (!prefix) {
+ if (raw_path[0] != '/' && comp_dir)
+ /* If not an absolute path, try to use comp_dir */
+ prefix = comp_dir;
+ else {
+ if (access(raw_path, R_OK) == 0) {
+ *new_path = strdup(raw_path);
+ return *new_path ? 0 : -ENOMEM;
+ } else
+ return -errno;
+ }
+ }
+
+ *new_path = malloc((strlen(prefix) + strlen(raw_path) + 2));
+ if (!*new_path)
+ return -ENOMEM;
+
+ for (;;) {
+ sprintf(*new_path, "%s/%s", prefix, raw_path);
+
+ if (access(*new_path, R_OK) == 0)
+ return 0;
+
+ if (!symbol_conf.source_prefix) {
+ /* In case of searching comp_dir, don't retry */
+ zfree(new_path);
+ return -errno;
+ }
+
+ switch (errno) {
+ case ENAMETOOLONG:
+ case ENOENT:
+ case EROFS:
+ case EFAULT:
+ raw_path = strchr(++raw_path, '/');
+ if (!raw_path) {
+ zfree(new_path);
+ return -ENOENT;
+ }
+ continue;
+
+ default:
+ zfree(new_path);
+ return -errno;
+ }
+ }
+}
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 92590b2c7e1c..ebf8c8c81453 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -55,6 +55,10 @@ extern int debuginfo__find_available_vars_at(struct debuginfo *dbg,
struct variable_list **vls,
int max_points, bool externs);
+/* Find a src file from a DWARF tag path */
+int get_real_path(const char *raw_path, const char *comp_dir,
+ char **new_path);
+
struct probe_finder {
struct perf_probe_event *pev; /* Target probe event */
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 6c6a6953fa93..4d28624a1eca 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -17,6 +17,5 @@ util/xyarray.c
util/cgroup.c
util/rblist.c
util/strlist.c
-../lib/api/fs/fs.c
util/trace-event.c
../../lib/rbtree.c
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
new file mode 100644
index 000000000000..6516e220c247
--- /dev/null
+++ b/tools/perf/util/scripting-engines/Build
@@ -0,0 +1,6 @@
+libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
+libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
+
+CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default
+
+CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 22ebc46226e7..430b5d27828e 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -214,6 +214,11 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
+ case PRINT_INT_ARRAY:
+ define_event_symbols(event, ev_name, args->int_array.field);
+ define_event_symbols(event, ev_name, args->int_array.count);
+ define_event_symbols(event, ev_name, args->int_array.el_size);
+ break;
case PRINT_BSTRING:
case PRINT_DYNAMIC_ARRAY:
case PRINT_STRING:
@@ -355,10 +360,9 @@ static void perl_process_event_generic(union perf_event *event,
static void perl_process_event(union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct thread *thread,
- struct addr_location *al __maybe_unused)
+ struct addr_location *al)
{
- perl_process_tracepoint(sample, evsel, thread);
+ perl_process_tracepoint(sample, evsel, al->thread);
perl_process_event_generic(event, sample, evsel);
}
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 0c815a40a6e8..5544b8cdd1ee 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -231,6 +231,11 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
+ case PRINT_INT_ARRAY:
+ define_event_symbols(event, ev_name, args->int_array.field);
+ define_event_symbols(event, ev_name, args->int_array.count);
+ define_event_symbols(event, ev_name, args->int_array.el_size);
+ break;
case PRINT_STRING:
break;
case PRINT_TYPE:
@@ -376,7 +381,6 @@ exit:
static void python_process_tracepoint(struct perf_sample *sample,
struct perf_evsel *evsel,
- struct thread *thread,
struct addr_location *al)
{
struct event_format *event = evsel->tp_format;
@@ -390,7 +394,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
- const char *comm = thread__comm_str(thread);
+ const char *comm = thread__comm_str(al->thread);
t = PyTuple_New(MAX_FIELDS);
if (!t)
@@ -675,7 +679,7 @@ static int python_export_sample(struct db_export *dbe,
tuple_set_u64(t, 0, es->db_id);
tuple_set_u64(t, 1, es->evsel->db_id);
tuple_set_u64(t, 2, es->al->machine->db_id);
- tuple_set_u64(t, 3, es->thread->db_id);
+ tuple_set_u64(t, 3, es->al->thread->db_id);
tuple_set_u64(t, 4, es->comm_db_id);
tuple_set_u64(t, 5, es->dso_db_id);
tuple_set_u64(t, 6, es->sym_db_id);
@@ -761,7 +765,6 @@ static int python_process_call_return(struct call_return *cr, void *data)
static void python_process_general_event(struct perf_sample *sample,
struct perf_evsel *evsel,
- struct thread *thread,
struct addr_location *al)
{
PyObject *handler, *t, *dict, *callchain, *dict_sample;
@@ -811,7 +814,7 @@ static void python_process_general_event(struct perf_sample *sample,
pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
(const char *)sample->raw_data, sample->raw_size));
pydict_set_item_string_decref(dict, "comm",
- PyString_FromString(thread__comm_str(thread)));
+ PyString_FromString(thread__comm_str(al->thread)));
if (al->map) {
pydict_set_item_string_decref(dict, "dso",
PyString_FromString(al->map->dso->name));
@@ -838,22 +841,20 @@ exit:
static void python_process_event(union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct thread *thread,
struct addr_location *al)
{
struct tables *tables = &tables_global;
switch (evsel->attr.type) {
case PERF_TYPE_TRACEPOINT:
- python_process_tracepoint(sample, evsel, thread, al);
+ python_process_tracepoint(sample, evsel, al);
break;
/* Reserve for future process_hw/sw/raw APIs */
default:
if (tables->db_export_mode)
- db_export__sample(&tables->dbe, event, sample, evsel,
- thread, al);
+ db_export__sample(&tables->dbe, event, sample, evsel, al);
else
- python_process_general_event(sample, evsel, thread, al);
+ python_process_general_event(sample, evsel, al);
}
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0baf75f12b7c..0c74012575ac 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -16,6 +16,12 @@
#include "perf_regs.h"
#include "asm/bug.h"
+static int machines__deliver_event(struct machines *machines,
+ struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool, u64 file_offset);
+
static int perf_session__open(struct perf_session *session)
{
struct perf_data_file *file = session->file;
@@ -86,6 +92,23 @@ static void perf_session__set_comm_exec(struct perf_session *session)
machines__set_comm_exec(&session->machines, comm_exec);
}
+static int ordered_events__deliver_event(struct ordered_events *oe,
+ struct ordered_event *event)
+{
+ struct perf_sample sample;
+ struct perf_session *session = container_of(oe, struct perf_session,
+ ordered_events);
+ int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
+
+ if (ret) {
+ pr_err("Can't parse sample, err = %d\n", ret);
+ return ret;
+ }
+
+ return machines__deliver_event(&session->machines, session->evlist, event->event,
+ &sample, session->tool, event->file_offset);
+}
+
struct perf_session *perf_session__new(struct perf_data_file *file,
bool repipe, struct perf_tool *tool)
{
@@ -95,8 +118,9 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
goto out;
session->repipe = repipe;
- ordered_events__init(&session->ordered_events);
+ session->tool = tool;
machines__init(&session->machines);
+ ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
if (file) {
if (perf_data_file__open(file))
@@ -138,11 +162,6 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
return NULL;
}
-static void perf_session__delete_dead_threads(struct perf_session *session)
-{
- machine__delete_dead_threads(&session->machines.host);
-}
-
static void perf_session__delete_threads(struct perf_session *session)
{
machine__delete_threads(&session->machines.host);
@@ -167,7 +186,6 @@ static void perf_session_env__delete(struct perf_session_env *env)
void perf_session__delete(struct perf_session *session)
{
perf_session__destroy_kernel_maps(session);
- perf_session__delete_dead_threads(session);
perf_session__delete_threads(session);
perf_session_env__delete(&session->header.env);
machines__exit(&session->machines);
@@ -215,10 +233,17 @@ static int process_event_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
+static int process_build_id_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *session __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
- struct perf_session *perf_session
- __maybe_unused)
+ struct ordered_events *oe __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
@@ -226,7 +251,7 @@ static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
static int process_finished_round(struct perf_tool *tool,
union perf_event *event,
- struct perf_session *session);
+ struct ordered_events *oe);
static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
@@ -264,7 +289,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
if (tool->tracing_data == NULL)
tool->tracing_data = process_event_synth_tracing_data_stub;
if (tool->build_id == NULL)
- tool->build_id = process_finished_round_stub;
+ tool->build_id = process_build_id_stub;
if (tool->finished_round == NULL) {
if (tool->ordered_events)
tool->finished_round = process_finished_round;
@@ -514,54 +539,80 @@ static perf_event__swap_op perf_event__swap_ops[] = {
* Flush every events below timestamp 7
* etc...
*/
-static int process_finished_round(struct perf_tool *tool,
+static int process_finished_round(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
- struct perf_session *session)
+ struct ordered_events *oe)
{
- return ordered_events__flush(session, tool, OE_FLUSH__ROUND);
+ return ordered_events__flush(oe, OE_FLUSH__ROUND);
}
-int perf_session_queue_event(struct perf_session *s, union perf_event *event,
- struct perf_tool *tool, struct perf_sample *sample,
- u64 file_offset)
+int perf_session__queue_event(struct perf_session *s, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset)
{
- struct ordered_events *oe = &s->ordered_events;
- u64 timestamp = sample->time;
- struct ordered_event *new;
-
- if (!timestamp || timestamp == ~0ULL)
- return -ETIME;
+ return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
+}
- if (timestamp < oe->last_flush) {
- pr_oe_time(timestamp, "out of order event\n");
- pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
- oe->last_flush_type);
+static void callchain__lbr_callstack_printf(struct perf_sample *sample)
+{
+ struct ip_callchain *callchain = sample->callchain;
+ struct branch_stack *lbr_stack = sample->branch_stack;
+ u64 kernel_callchain_nr = callchain->nr;
+ unsigned int i;
- s->stats.nr_unordered_events++;
+ for (i = 0; i < kernel_callchain_nr; i++) {
+ if (callchain->ips[i] == PERF_CONTEXT_USER)
+ break;
}
- new = ordered_events__new(oe, timestamp, event);
- if (!new) {
- ordered_events__flush(s, tool, OE_FLUSH__HALF);
- new = ordered_events__new(oe, timestamp, event);
- }
+ if ((i != kernel_callchain_nr) && lbr_stack->nr) {
+ u64 total_nr;
+ /*
+ * LBR callstack can only get user call chain,
+ * i is kernel call chain number,
+ * 1 is PERF_CONTEXT_USER.
+ *
+ * The user call chain is stored in LBR registers.
+ * LBR are pair registers. The caller is stored
+ * in "from" register, while the callee is stored
+ * in "to" register.
+ * For example, there is a call stack
+ * "A"->"B"->"C"->"D".
+ * The LBR registers will recorde like
+ * "C"->"D", "B"->"C", "A"->"B".
+ * So only the first "to" register and all "from"
+ * registers are needed to construct the whole stack.
+ */
+ total_nr = i + 1 + lbr_stack->nr + 1;
+ kernel_callchain_nr = i + 1;
- if (!new)
- return -ENOMEM;
+ printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
- new->file_offset = file_offset;
- return 0;
+ for (i = 0; i < kernel_callchain_nr; i++)
+ printf("..... %2d: %016" PRIx64 "\n",
+ i, callchain->ips[i]);
+
+ printf("..... %2d: %016" PRIx64 "\n",
+ (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
+ for (i = 0; i < lbr_stack->nr; i++)
+ printf("..... %2d: %016" PRIx64 "\n",
+ (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
+ }
}
-static void callchain__printf(struct perf_sample *sample)
+static void callchain__printf(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
unsigned int i;
+ struct ip_callchain *callchain = sample->callchain;
+
+ if (has_branch_callstack(evsel))
+ callchain__lbr_callstack_printf(sample);
- printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
+ printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
- for (i = 0; i < sample->callchain->nr; i++)
+ for (i = 0; i < callchain->nr; i++)
printf("..... %2d: %016" PRIx64 "\n",
- i, sample->callchain->ips[i]);
+ i, callchain->ips[i]);
}
static void branch_stack__printf(struct perf_sample *sample)
@@ -636,14 +687,14 @@ static void stack_user__printf(struct stack_dump *dump)
dump->size, dump->offset);
}
-static void perf_session__print_tstamp(struct perf_session *session,
+static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
union perf_event *event,
struct perf_sample *sample)
{
- u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
+ u64 sample_type = __perf_evlist__combined_sample_type(evlist);
if (event->header.type != PERF_RECORD_SAMPLE &&
- !perf_evlist__sample_id_all(session->evlist)) {
+ !perf_evlist__sample_id_all(evlist)) {
fputs("-1 -1 ", stdout);
return;
}
@@ -685,7 +736,7 @@ static void sample_read__printf(struct perf_sample *sample, u64 read_format)
sample->read.one.id, sample->read.one.value);
}
-static void dump_event(struct perf_session *session, union perf_event *event,
+static void dump_event(struct perf_evlist *evlist, union perf_event *event,
u64 file_offset, struct perf_sample *sample)
{
if (!dump_trace)
@@ -697,7 +748,7 @@ static void dump_event(struct perf_session *session, union perf_event *event,
trace_event(event);
if (sample)
- perf_session__print_tstamp(session, event, sample);
+ perf_evlist__print_tstamp(evlist, event, sample);
printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
event->header.size, perf_event__name(event->header.type));
@@ -718,9 +769,9 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
sample_type = evsel->attr.sample_type;
if (sample_type & PERF_SAMPLE_CALLCHAIN)
- callchain__printf(sample);
+ callchain__printf(evsel, sample);
- if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !has_branch_callstack(evsel))
branch_stack__printf(sample);
if (sample_type & PERF_SAMPLE_REGS_USER)
@@ -745,8 +796,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
sample_read__printf(sample, evsel->attr.read_format);
}
-static struct machine *
- perf_session__find_machine_for_cpumode(struct perf_session *session,
+static struct machine *machines__find_for_cpumode(struct machines *machines,
union perf_event *event,
struct perf_sample *sample)
{
@@ -764,26 +814,24 @@ static struct machine *
else
pid = sample->pid;
- machine = perf_session__find_machine(session, pid);
+ machine = machines__find(machines, pid);
if (!machine)
- machine = perf_session__findnew_machine(session,
- DEFAULT_GUEST_KERNEL_ID);
+ machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
return machine;
}
- return &session->machines.host;
+ return &machines->host;
}
-static int deliver_sample_value(struct perf_session *session,
+static int deliver_sample_value(struct perf_evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct sample_read_value *v,
struct machine *machine)
{
- struct perf_sample_id *sid;
+ struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
- sid = perf_evlist__id2sid(session->evlist, v->id);
if (sid) {
sample->id = v->id;
sample->period = v->value - sid->period;
@@ -791,14 +839,14 @@ static int deliver_sample_value(struct perf_session *session,
}
if (!sid || sid->evsel == NULL) {
- ++session->stats.nr_unknown_id;
+ ++evlist->stats.nr_unknown_id;
return 0;
}
return tool->sample(tool, event, sample, sid->evsel, machine);
}
-static int deliver_sample_group(struct perf_session *session,
+static int deliver_sample_group(struct perf_evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -808,7 +856,7 @@ static int deliver_sample_group(struct perf_session *session,
u64 i;
for (i = 0; i < sample->read.group.nr; i++) {
- ret = deliver_sample_value(session, tool, event, sample,
+ ret = deliver_sample_value(evlist, tool, event, sample,
&sample->read.group.values[i],
machine);
if (ret)
@@ -819,7 +867,7 @@ static int deliver_sample_group(struct perf_session *session,
}
static int
-perf_session__deliver_sample(struct perf_session *session,
+ perf_evlist__deliver_sample(struct perf_evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -836,41 +884,40 @@ perf_session__deliver_sample(struct perf_session *session,
/* For PERF_SAMPLE_READ we have either single or group mode. */
if (read_format & PERF_FORMAT_GROUP)
- return deliver_sample_group(session, tool, event, sample,
+ return deliver_sample_group(evlist, tool, event, sample,
machine);
else
- return deliver_sample_value(session, tool, event, sample,
+ return deliver_sample_value(evlist, tool, event, sample,
&sample->read.one, machine);
}
-int perf_session__deliver_event(struct perf_session *session,
- union perf_event *event,
- struct perf_sample *sample,
- struct perf_tool *tool, u64 file_offset)
+static int machines__deliver_event(struct machines *machines,
+ struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool, u64 file_offset)
{
struct perf_evsel *evsel;
struct machine *machine;
- dump_event(session, event, file_offset, sample);
+ dump_event(evlist, event, file_offset, sample);
- evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+ evsel = perf_evlist__id2evsel(evlist, sample->id);
- machine = perf_session__find_machine_for_cpumode(session, event,
- sample);
+ machine = machines__find_for_cpumode(machines, event, sample);
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
dump_sample(evsel, event, sample);
if (evsel == NULL) {
- ++session->stats.nr_unknown_id;
+ ++evlist->stats.nr_unknown_id;
return 0;
}
if (machine == NULL) {
- ++session->stats.nr_unprocessable_samples;
+ ++evlist->stats.nr_unprocessable_samples;
return 0;
}
- return perf_session__deliver_sample(session, tool, event,
- sample, evsel, machine);
+ return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_MMAP2:
@@ -883,7 +930,7 @@ int perf_session__deliver_event(struct perf_session *session,
return tool->exit(tool, event, sample, machine);
case PERF_RECORD_LOST:
if (tool->lost == perf_event__process_lost)
- session->stats.total_lost += event->lost.lost;
+ evlist->stats.total_lost += event->lost.lost;
return tool->lost(tool, event, sample, machine);
case PERF_RECORD_READ:
return tool->read(tool, event, sample, evsel, machine);
@@ -892,20 +939,21 @@ int perf_session__deliver_event(struct perf_session *session,
case PERF_RECORD_UNTHROTTLE:
return tool->unthrottle(tool, event, sample, machine);
default:
- ++session->stats.nr_unknown_events;
+ ++evlist->stats.nr_unknown_events;
return -1;
}
}
static s64 perf_session__process_user_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool,
u64 file_offset)
{
+ struct ordered_events *oe = &session->ordered_events;
+ struct perf_tool *tool = session->tool;
int fd = perf_data_file__fd(session->file);
int err;
- dump_event(session, event, file_offset, NULL);
+ dump_event(session->evlist, event, file_offset, NULL);
/* These events are processed right away */
switch (event->header.type) {
@@ -929,7 +977,7 @@ static s64 perf_session__process_user_event(struct perf_session *session,
case PERF_RECORD_HEADER_BUILD_ID:
return tool->build_id(tool, event, session);
case PERF_RECORD_FINISHED_ROUND:
- return tool->finished_round(tool, event, session);
+ return tool->finished_round(tool, event, oe);
case PERF_RECORD_ID_INDEX:
return tool->id_index(tool, event, session);
default:
@@ -939,15 +987,17 @@ static s64 perf_session__process_user_event(struct perf_session *session,
int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
- struct perf_sample *sample,
- struct perf_tool *tool)
+ struct perf_sample *sample)
{
- events_stats__inc(&session->stats, event->header.type);
+ struct perf_evlist *evlist = session->evlist;
+ struct perf_tool *tool = session->tool;
+
+ events_stats__inc(&evlist->stats, event->header.type);
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
- return perf_session__process_user_event(session, event, tool, 0);
+ return perf_session__process_user_event(session, event, 0);
- return perf_session__deliver_event(session, event, sample, tool, 0);
+ return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
}
static void event_swap(union perf_event *event, bool sample_id_all)
@@ -1015,40 +1065,39 @@ out_parse_sample:
}
static s64 perf_session__process_event(struct perf_session *session,
- union perf_event *event,
- struct perf_tool *tool,
- u64 file_offset)
+ union perf_event *event, u64 file_offset)
{
+ struct perf_evlist *evlist = session->evlist;
+ struct perf_tool *tool = session->tool;
struct perf_sample sample;
int ret;
if (session->header.needs_swap)
- event_swap(event, perf_evlist__sample_id_all(session->evlist));
+ event_swap(event, perf_evlist__sample_id_all(evlist));
if (event->header.type >= PERF_RECORD_HEADER_MAX)
return -EINVAL;
- events_stats__inc(&session->stats, event->header.type);
+ events_stats__inc(&evlist->stats, event->header.type);
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
- return perf_session__process_user_event(session, event, tool, file_offset);
+ return perf_session__process_user_event(session, event, file_offset);
/*
* For all kernel events we get the sample data
*/
- ret = perf_evlist__parse_sample(session->evlist, event, &sample);
+ ret = perf_evlist__parse_sample(evlist, event, &sample);
if (ret)
return ret;
if (tool->ordered_events) {
- ret = perf_session_queue_event(session, event, tool, &sample,
- file_offset);
+ ret = perf_session__queue_event(session, event, &sample, file_offset);
if (ret != -ETIME)
return ret;
}
- return perf_session__deliver_event(session, event, &sample, tool,
- file_offset);
+ return machines__deliver_event(&session->machines, evlist, event,
+ &sample, tool, file_offset);
}
void perf_event_header__bswap(struct perf_event_header *hdr)
@@ -1076,54 +1125,57 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se
return thread;
}
-static void perf_session__warn_about_errors(const struct perf_session *session,
- const struct perf_tool *tool)
+static void perf_session__warn_about_errors(const struct perf_session *session)
{
- if (tool->lost == perf_event__process_lost &&
- session->stats.nr_events[PERF_RECORD_LOST] != 0) {
+ const struct events_stats *stats = &session->evlist->stats;
+ const struct ordered_events *oe = &session->ordered_events;
+
+ if (session->tool->lost == perf_event__process_lost &&
+ stats->nr_events[PERF_RECORD_LOST] != 0) {
ui__warning("Processed %d events and lost %d chunks!\n\n"
"Check IO/CPU overload!\n\n",
- session->stats.nr_events[0],
- session->stats.nr_events[PERF_RECORD_LOST]);
+ stats->nr_events[0],
+ stats->nr_events[PERF_RECORD_LOST]);
}
- if (session->stats.nr_unknown_events != 0) {
+ if (stats->nr_unknown_events != 0) {
ui__warning("Found %u unknown events!\n\n"
"Is this an older tool processing a perf.data "
"file generated by a more recent tool?\n\n"
"If that is not the case, consider "
"reporting to linux-kernel@vger.kernel.org.\n\n",
- session->stats.nr_unknown_events);
+ stats->nr_unknown_events);
}
- if (session->stats.nr_unknown_id != 0) {
+ if (stats->nr_unknown_id != 0) {
ui__warning("%u samples with id not present in the header\n",
- session->stats.nr_unknown_id);
+ stats->nr_unknown_id);
}
- if (session->stats.nr_invalid_chains != 0) {
- ui__warning("Found invalid callchains!\n\n"
- "%u out of %u events were discarded for this reason.\n\n"
- "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
- session->stats.nr_invalid_chains,
- session->stats.nr_events[PERF_RECORD_SAMPLE]);
- }
+ if (stats->nr_invalid_chains != 0) {
+ ui__warning("Found invalid callchains!\n\n"
+ "%u out of %u events were discarded for this reason.\n\n"
+ "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
+ stats->nr_invalid_chains,
+ stats->nr_events[PERF_RECORD_SAMPLE]);
+ }
- if (session->stats.nr_unprocessable_samples != 0) {
+ if (stats->nr_unprocessable_samples != 0) {
ui__warning("%u unprocessable samples recorded.\n"
"Do you have a KVM guest running and not using 'perf kvm'?\n",
- session->stats.nr_unprocessable_samples);
+ stats->nr_unprocessable_samples);
}
- if (session->stats.nr_unordered_events != 0)
- ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events);
+ if (oe->nr_unordered_events != 0)
+ ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
}
volatile int session_done;
-static int __perf_session__process_pipe_events(struct perf_session *session,
- struct perf_tool *tool)
+static int __perf_session__process_pipe_events(struct perf_session *session)
{
+ struct ordered_events *oe = &session->ordered_events;
+ struct perf_tool *tool = session->tool;
int fd = perf_data_file__fd(session->file);
union perf_event *event;
uint32_t size, cur_size = 0;
@@ -1187,7 +1239,7 @@ more:
}
}
- if ((skip = perf_session__process_event(session, event, tool, head)) < 0) {
+ if ((skip = perf_session__process_event(session, event, head)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
head, event->header.size, event->header.type);
err = -EINVAL;
@@ -1203,10 +1255,10 @@ more:
goto more;
done:
/* do the final flush for ordered samples */
- err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
+ err = ordered_events__flush(oe, OE_FLUSH__FINAL);
out_err:
free(buf);
- perf_session__warn_about_errors(session, tool);
+ perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
return err;
}
@@ -1253,8 +1305,10 @@ fetch_mmaped_event(struct perf_session *session,
static int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
- u64 file_size, struct perf_tool *tool)
+ u64 file_size)
{
+ struct ordered_events *oe = &session->ordered_events;
+ struct perf_tool *tool = session->tool;
int fd = perf_data_file__fd(session->file);
u64 head, page_offset, file_offset, file_pos, size;
int err, mmap_prot, mmap_flags, map_idx = 0;
@@ -1323,8 +1377,7 @@ more:
size = event->header.size;
if (size < sizeof(struct perf_event_header) ||
- (skip = perf_session__process_event(session, event, tool, file_pos))
- < 0) {
+ (skip = perf_session__process_event(session, event, file_pos)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
file_offset + head, event->header.size,
event->header.type);
@@ -1348,17 +1401,16 @@ more:
out:
/* do the final flush for ordered samples */
- err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
+ err = ordered_events__flush(oe, OE_FLUSH__FINAL);
out_err:
ui_progress__finish();
- perf_session__warn_about_errors(session, tool);
+ perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
session->one_mmap = false;
return err;
}
-int perf_session__process_events(struct perf_session *session,
- struct perf_tool *tool)
+int perf_session__process_events(struct perf_session *session)
{
u64 size = perf_data_file__size(session->file);
int err;
@@ -1369,10 +1421,9 @@ int perf_session__process_events(struct perf_session *session,
if (!perf_data_file__is_pipe(session->file))
err = __perf_session__process_events(session,
session->header.data_offset,
- session->header.data_size,
- size, tool);
+ session->header.data_size, size);
else
- err = __perf_session__process_pipe_events(session, tool);
+ err = __perf_session__process_pipe_events(session);
return err;
}
@@ -1415,6 +1466,9 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
for (i = 0; i < MAP__NR_TYPES; ++i) {
struct kmap *kmap = map__kmap(maps[i]);
+
+ if (!kmap)
+ continue;
kmap->ref_reloc_sym = ref;
}
@@ -1436,7 +1490,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
size_t ret = fprintf(fp, "Aggregated stats:\n");
- ret += events_stats__fprintf(&session->stats, fp);
+ ret += events_stats__fprintf(&session->evlist->stats, fp);
return ret;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 6d663dc76404..d5fa7b7916ef 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -20,13 +20,13 @@ struct perf_session {
struct machines machines;
struct perf_evlist *evlist;
struct trace_event tevent;
- struct events_stats stats;
bool repipe;
bool one_mmap;
void *one_mmap_addr;
u64 one_mmap_offset;
struct ordered_events ordered_events;
struct perf_data_file *file;
+ struct perf_tool *tool;
};
#define PRINT_IP_OPT_IP (1<<0)
@@ -49,20 +49,13 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
union perf_event **event_ptr,
struct perf_sample *sample);
-int perf_session__process_events(struct perf_session *session,
- struct perf_tool *tool);
+int perf_session__process_events(struct perf_session *session);
-int perf_session_queue_event(struct perf_session *s, union perf_event *event,
- struct perf_tool *tool, struct perf_sample *sample,
- u64 file_offset);
+int perf_session__queue_event(struct perf_session *s, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset);
void perf_tool__fill_defaults(struct perf_tool *tool);
-int perf_session__deliver_event(struct perf_session *session,
- union perf_event *event,
- struct perf_sample *sample,
- struct perf_tool *tool, u64 file_offset);
-
int perf_session__resolve_callchain(struct perf_session *session,
struct perf_evsel *evsel,
struct thread *thread,
@@ -126,8 +119,7 @@ extern volatile int session_done;
int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
- struct perf_sample *sample,
- struct perf_tool *tool);
+ struct perf_sample *sample);
int perf_event__process_id_index(struct perf_tool *tool,
union perf_event *event,
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index d0aee4b9dfd4..1833103768cb 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -25,7 +25,7 @@ cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter'
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
-libapikfs = getenv('LIBAPIKFS')
+libapikfs = getenv('LIBAPI')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 7a39c1ed8d37..4593f36ecc4c 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1463,6 +1463,15 @@ int sort_dimension__add(const char *tok)
sort__has_parent = 1;
} else if (sd->entry == &sort_sym) {
sort__has_sym = 1;
+ /*
+ * perf diff displays the performance difference amongst
+ * two or more perf.data files. Those files could come
+ * from different binaries. So we should not compare
+ * their ips, but the name of symbol.
+ */
+ if (sort__mode == SORT_MODE__DIFF)
+ sd->entry->se_collapse = sort__sym_sort;
+
} else if (sd->entry == &sort_dso) {
sort__has_dso = 1;
}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index c03e4ff8beff..846036a921dc 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -44,6 +44,7 @@ extern struct sort_entry sort_dso_to;
extern struct sort_entry sort_sym_from;
extern struct sort_entry sort_sym_to;
extern enum sort_type sort__first_dimension;
+extern const char default_mem_sort_order[];
struct he_stat {
u64 period;
@@ -102,7 +103,6 @@ struct hist_entry {
bool init_have_children;
char level;
- bool used;
u8 filtered;
char *srcline;
struct symbol *parent;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 33b7a2aef713..a7ab6063e038 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -74,6 +74,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
return GELF_ST_TYPE(sym->st_info);
}
+#ifndef STT_GNU_IFUNC
+#define STT_GNU_IFUNC 10
+#endif
+
static inline int elf_sym__is_function(const GElf_Sym *sym)
{
return (elf_sym__type(sym) == STT_FUNC ||
@@ -575,32 +579,37 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
static int decompress_kmodule(struct dso *dso, const char *name,
enum dso_binary_type type)
{
- int fd;
- const char *ext = strrchr(name, '.');
+ int fd = -1;
char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
+ struct kmod_path m;
if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
return -1;
- if (!ext || !is_supported_compression(ext + 1)) {
- ext = strrchr(dso->name, '.');
- if (!ext || !is_supported_compression(ext + 1))
- return -1;
- }
+ if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
+ name = dso->long_name;
- fd = mkstemp(tmpbuf);
- if (fd < 0)
+ if (kmod_path__parse_ext(&m, name) || !m.comp)
return -1;
- if (!decompress_to_file(ext + 1, name, fd)) {
+ fd = mkstemp(tmpbuf);
+ if (fd < 0) {
+ dso->load_errno = errno;
+ goto out;
+ }
+
+ if (!decompress_to_file(m.ext, name, fd)) {
+ dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
close(fd);
fd = -1;
}
unlink(tmpbuf);
+out:
+ free(m.ext);
return fd;
}
@@ -629,37 +638,49 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
Elf *elf;
int fd;
- if (dso__needs_decompress(dso))
+ if (dso__needs_decompress(dso)) {
fd = decompress_kmodule(dso, name, type);
- else
+ if (fd < 0)
+ return -1;
+ } else {
fd = open(name, O_RDONLY);
-
- if (fd < 0)
- return -1;
+ if (fd < 0) {
+ dso->load_errno = errno;
+ return -1;
+ }
+ }
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
+ dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
goto out_close;
}
if (gelf_getehdr(elf, &ehdr) == NULL) {
+ dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
pr_debug("%s: cannot get elf header.\n", __func__);
goto out_elf_end;
}
- if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
+ if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
+ dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
goto out_elf_end;
+ }
/* Always reject images with a mismatched build-id: */
if (dso->has_build_id) {
u8 build_id[BUILD_ID_SIZE];
- if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
+ if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
+ dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
goto out_elf_end;
+ }
- if (!dso__build_id_equal(dso, build_id))
+ if (!dso__build_id_equal(dso, build_id)) {
+ dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
goto out_elf_end;
+ }
}
ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
@@ -695,8 +716,10 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
}
ss->name = strdup(name);
- if (!ss->name)
+ if (!ss->name) {
+ dso->load_errno = errno;
goto out_elf_end;
+ }
ss->elf = elf;
ss->fd = fd;
@@ -753,6 +776,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
symbol_filter_t filter, int kmodule)
{
struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
+ struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
struct map *curr_map = map;
struct dso *curr_dso = dso;
Elf_Data *symstrs, *secstrs;
@@ -768,6 +792,9 @@ int dso__load_sym(struct dso *dso, struct map *map,
int nr = 0;
bool remap_kernel = false, adjust_kernel_syms = false;
+ if (kmap && !kmaps)
+ return -1;
+
dso->symtab_type = syms_ss->type;
dso->is_64_bit = syms_ss->is_64_bit;
dso->rel = syms_ss->ehdr.e_type == ET_REL;
@@ -864,10 +891,9 @@ int dso__load_sym(struct dso *dso, struct map *map,
/* Reject ARM ELF "mapping symbols": these aren't unique and
* don't identify functions, so will confuse the profile
* output: */
- if (ehdr.e_machine == EM_ARM) {
- if (!strcmp(elf_name, "$a") ||
- !strcmp(elf_name, "$d") ||
- !strcmp(elf_name, "$t"))
+ if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
+ if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
+ && (elf_name[2] == '\0' || elf_name[2] == '.'))
continue;
}
@@ -936,8 +962,10 @@ int dso__load_sym(struct dso *dso, struct map *map,
map->map_ip = map__map_ip;
map->unmap_ip = map__unmap_ip;
/* Ensure maps are correctly ordered */
- map_groups__remove(kmap->kmaps, map);
- map_groups__insert(kmap->kmaps, map);
+ if (kmaps) {
+ map_groups__remove(kmaps, map);
+ map_groups__insert(kmaps, map);
+ }
}
/*
@@ -961,7 +989,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
snprintf(dso_name, sizeof(dso_name),
"%s%s", dso->short_name, section_name);
- curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
+ curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
if (curr_map == NULL) {
u64 start = sym.st_value;
@@ -991,7 +1019,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
curr_map->unmap_ip = identity__map_ip;
}
curr_dso->symtab_type = dso->symtab_type;
- map_groups__insert(kmap->kmaps, curr_map);
+ map_groups__insert(kmaps, curr_map);
/*
* The new DSO should go to the kernel DSOS
*/
@@ -1045,14 +1073,15 @@ new_symbol:
* For misannotated, zeroed, ASM function sizes.
*/
if (nr > 0) {
- symbols__fixup_duplicate(&dso->symbols[map->type]);
+ if (!symbol_conf.allow_aliases)
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
symbols__fixup_end(&dso->symbols[map->type]);
if (kmap) {
/*
* We need to fixup this here too because we create new
* maps here, for things like vsyscall sections.
*/
- __map_groups__fixup_end(kmap->kmaps, map->type);
+ __map_groups__fixup_end(kmaps, map->type);
}
}
err = nr;
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index d7efb03b3f9a..fd8477cacf88 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -246,13 +246,12 @@ out:
return ret;
}
-int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused,
- const char *name,
+int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
enum dso_binary_type type)
{
int fd = open(name, O_RDONLY);
if (fd < 0)
- return -1;
+ goto out_errno;
ss->name = strdup(name);
if (!ss->name)
@@ -264,6 +263,8 @@ int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused,
return 0;
out_close:
close(fd);
+out_errno:
+ dso->load_errno = errno;
return -1;
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index a69066865a55..201f6c4ca738 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -15,6 +15,7 @@
#include "machine.h"
#include "symbol.h"
#include "strlist.h"
+#include "intlist.h"
#include "header.h"
#include <elf.h>
@@ -629,13 +630,16 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
symbol_filter_t filter)
{
- struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct map_groups *kmaps = map__kmaps(map);
struct map *curr_map;
struct symbol *pos;
int count = 0, moved = 0;
struct rb_root *root = &dso->symbols[map->type];
struct rb_node *next = rb_first(root);
+ if (!kmaps)
+ return -1;
+
while (next) {
char *module;
@@ -681,8 +685,8 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
symbol_filter_t filter)
{
- struct map_groups *kmaps = map__kmap(map)->kmaps;
- struct machine *machine = kmaps->machine;
+ struct map_groups *kmaps = map__kmaps(map);
+ struct machine *machine;
struct map *curr_map = map;
struct symbol *pos;
int count = 0, moved = 0;
@@ -690,6 +694,11 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
struct rb_node *next = rb_first(root);
int kernel_range = 0;
+ if (!kmaps)
+ return -1;
+
+ machine = kmaps->machine;
+
while (next) {
char *module;
@@ -1024,9 +1033,12 @@ static bool filename_from_kallsyms_filename(char *filename,
static int validate_kcore_modules(const char *kallsyms_filename,
struct map *map)
{
- struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct map_groups *kmaps = map__kmaps(map);
char modules_filename[PATH_MAX];
+ if (!kmaps)
+ return -EINVAL;
+
if (!filename_from_kallsyms_filename(modules_filename, "modules",
kallsyms_filename))
return -EINVAL;
@@ -1042,6 +1054,9 @@ static int validate_kcore_addresses(const char *kallsyms_filename,
{
struct kmap *kmap = map__kmap(map);
+ if (!kmap)
+ return -EINVAL;
+
if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
u64 start;
@@ -1080,8 +1095,8 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
static int dso__load_kcore(struct dso *dso, struct map *map,
const char *kallsyms_filename)
{
- struct map_groups *kmaps = map__kmap(map)->kmaps;
- struct machine *machine = kmaps->machine;
+ struct map_groups *kmaps = map__kmaps(map);
+ struct machine *machine;
struct kcore_mapfn_data md;
struct map *old_map, *new_map, *replacement_map = NULL;
bool is_64_bit;
@@ -1089,6 +1104,11 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
char kcore_filename[PATH_MAX];
struct symbol *sym;
+ if (!kmaps)
+ return -EINVAL;
+
+ machine = kmaps->machine;
+
/* This function requires that the map is the kernel map */
if (map != machine->vmlinux_maps[map->type])
return -EINVAL;
@@ -1201,6 +1221,9 @@ static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
struct kmap *kmap = map__kmap(map);
u64 addr;
+ if (!kmap)
+ return -1;
+
if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
return 0;
@@ -1859,6 +1882,20 @@ int setup_list(struct strlist **list, const char *list_str,
return 0;
}
+int setup_intlist(struct intlist **list, const char *list_str,
+ const char *list_name)
+{
+ if (list_str == NULL)
+ return 0;
+
+ *list = intlist__new(list_str);
+ if (!*list) {
+ pr_err("problems parsing %s list\n", list_name);
+ return -1;
+ }
+ return 0;
+}
+
static bool symbol__read_kptr_restrict(void)
{
bool value = false;
@@ -1909,9 +1946,17 @@ int symbol__init(struct perf_session_env *env)
symbol_conf.comm_list_str, "comm") < 0)
goto out_free_dso_list;
+ if (setup_intlist(&symbol_conf.pid_list,
+ symbol_conf.pid_list_str, "pid") < 0)
+ goto out_free_comm_list;
+
+ if (setup_intlist(&symbol_conf.tid_list,
+ symbol_conf.tid_list_str, "tid") < 0)
+ goto out_free_pid_list;
+
if (setup_list(&symbol_conf.sym_list,
symbol_conf.sym_list_str, "symbol") < 0)
- goto out_free_comm_list;
+ goto out_free_tid_list;
/*
* A path to symbols of "/" is identical to ""
@@ -1930,6 +1975,10 @@ int symbol__init(struct perf_session_env *env)
symbol_conf.initialized = true;
return 0;
+out_free_tid_list:
+ intlist__delete(symbol_conf.tid_list);
+out_free_pid_list:
+ intlist__delete(symbol_conf.pid_list);
out_free_comm_list:
strlist__delete(symbol_conf.comm_list);
out_free_dso_list:
@@ -1944,6 +1993,8 @@ void symbol__exit(void)
strlist__delete(symbol_conf.sym_list);
strlist__delete(symbol_conf.dso_list);
strlist__delete(symbol_conf.comm_list);
+ intlist__delete(symbol_conf.tid_list);
+ intlist__delete(symbol_conf.pid_list);
vmlinux_path__exit();
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
symbol_conf.initialized = false;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 1650dcb3a67b..09561500164a 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -78,6 +78,7 @@ static inline size_t symbol__size(const struct symbol *sym)
}
struct strlist;
+struct intlist;
struct symbol_conf {
unsigned short priv_size;
@@ -87,6 +88,7 @@ struct symbol_conf {
ignore_vmlinux_buildid,
show_kernel_path,
use_modules,
+ allow_aliases,
sort_by_name,
show_nr_samples,
show_total_period,
@@ -114,6 +116,8 @@ struct symbol_conf {
const char *guestmount;
const char *dso_list_str,
*comm_list_str,
+ *pid_list_str,
+ *tid_list_str,
*sym_list_str,
*col_width_list_str;
struct strlist *dso_list,
@@ -123,6 +127,8 @@ struct symbol_conf {
*dso_to_list,
*sym_from_list,
*sym_to_list;
+ struct intlist *pid_list,
+ *tid_list;
const char *symfs;
};
@@ -294,5 +300,7 @@ int compare_proc_modules(const char *from, const char *to);
int setup_list(struct strlist **list, const char *list_str,
const char *list_name);
+int setup_intlist(struct intlist **list, const char *list_str,
+ const char *list_name);
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index e74c5963dc7a..a53603b27e52 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -123,11 +123,8 @@ int target__strerror(struct target *target, int errnum,
if (errnum >= 0) {
const char *err = strerror_r(errnum, buf, buflen);
- if (err != buf) {
- size_t len = strlen(err);
- memcpy(buf, err, min(buflen - 1, len));
- *(buf + min(buflen - 1, len)) = '\0';
- }
+ if (err != buf)
+ scnprintf(buf, buflen, "%s", err);
return 0;
}
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 9ebc8b1f9be5..1c8fbc9588c5 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -82,6 +82,20 @@ void thread__delete(struct thread *thread)
free(thread);
}
+struct thread *thread__get(struct thread *thread)
+{
+ ++thread->refcnt;
+ return thread;
+}
+
+void thread__put(struct thread *thread)
+{
+ if (thread && --thread->refcnt == 0) {
+ list_del_init(&thread->node);
+ thread__delete(thread);
+ }
+}
+
struct comm *thread__comm(const struct thread *thread)
{
if (list_empty(&thread->comm_list))
@@ -192,7 +206,6 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
err = thread__set_comm(thread, comm, timestamp);
if (err)
return err;
- thread->comm_set = true;
}
thread->ppid = parent->tid;
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 160fd066a7d1..9b8a54dc34a8 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -7,6 +7,7 @@
#include <sys/types.h>
#include "symbol.h"
#include <strlist.h>
+#include <intlist.h>
struct thread_stack;
@@ -20,6 +21,7 @@ struct thread {
pid_t tid;
pid_t ppid;
int cpu;
+ int refcnt;
char shortname[3];
bool comm_set;
bool dead; /* if set thread has exited */
@@ -37,6 +39,18 @@ struct comm;
struct thread *thread__new(pid_t pid, pid_t tid);
int thread__init_map_groups(struct thread *thread, struct machine *machine);
void thread__delete(struct thread *thread);
+
+struct thread *thread__get(struct thread *thread);
+void thread__put(struct thread *thread);
+
+static inline void __thread__zput(struct thread **thread)
+{
+ thread__put(*thread);
+ *thread = NULL;
+}
+
+#define thread__zput(thread) __thread__zput(&thread)
+
static inline void thread__exited(struct thread *thread)
{
thread->dead = true;
@@ -87,6 +101,16 @@ static inline bool thread__is_filtered(struct thread *thread)
return true;
}
+ if (symbol_conf.pid_list &&
+ !intlist__has_entry(symbol_conf.pid_list, thread->pid_)) {
+ return true;
+ }
+
+ if (symbol_conf.tid_list &&
+ !intlist__has_entry(symbol_conf.tid_list, thread->tid)) {
+ return true;
+ }
+
return false;
}
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index bb2708bbfaca..51d9e56c0f84 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -10,6 +10,7 @@ struct perf_evsel;
struct perf_sample;
struct perf_tool;
struct machine;
+struct ordered_events;
typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample,
@@ -25,6 +26,9 @@ typedef int (*event_attr_op)(struct perf_tool *tool,
typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
struct perf_session *session);
+typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event,
+ struct ordered_events *oe);
+
struct perf_tool {
event_sample sample,
read;
@@ -38,8 +42,8 @@ struct perf_tool {
unthrottle;
event_attr_op attr;
event_op2 tracing_data;
- event_op2 finished_round,
- build_id,
+ event_oe finished_round;
+ event_op2 build_id,
id_index;
bool ordered_events;
bool ordering_requires_timestamps;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index c36636fd825b..25d6c737be3e 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -112,8 +112,8 @@ unsigned long long read_size(struct event_format *event, void *ptr, int size)
return pevent_read_number(event->pevent, ptr, size);
}
-void event_format__print(struct event_format *event,
- int cpu, void *data, int size)
+void event_format__fprintf(struct event_format *event,
+ int cpu, void *data, int size, FILE *fp)
{
struct pevent_record record;
struct trace_seq s;
@@ -125,10 +125,16 @@ void event_format__print(struct event_format *event,
trace_seq_init(&s);
pevent_event_info(&s, event, &record);
- trace_seq_do_printf(&s);
+ trace_seq_do_fprintf(&s, fp);
trace_seq_destroy(&s);
}
+void event_format__print(struct event_format *event,
+ int cpu, void *data, int size)
+{
+ return event_format__fprintf(event, cpu, data, size, stdout);
+}
+
void parse_proc_kallsyms(struct pevent *pevent,
char *file, unsigned int size __maybe_unused)
{
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 5c9bdd1591a9..9df61059a85d 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -43,7 +43,6 @@ static int stop_script_unsupported(void)
static void process_event_unsupported(union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
- struct thread *thread __maybe_unused,
struct addr_location *al __maybe_unused)
{
}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 52aaa19e1eb1..d5168f0be4ec 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -23,6 +23,9 @@ trace_event__tp_format(const char *sys, const char *name);
int bigendian(void);
+void event_format__fprintf(struct event_format *event,
+ int cpu, void *data, int size, FILE *fp);
+
void event_format__print(struct event_format *event,
int cpu, void *data, int size);
@@ -69,8 +72,7 @@ struct scripting_ops {
void (*process_event) (union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct thread *thread,
- struct addr_location *al);
+ struct addr_location *al);
int (*generate_script) (struct pevent *pevent, const char *outfile);
};
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index e3c40a520a25..7b09a443a280 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -266,7 +266,7 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
u64 *fde_count)
{
int ret = -EINVAL, fd;
- u64 offset = dso->data.frame_offset;
+ u64 offset = dso->data.eh_frame_hdr_offset;
if (offset == 0) {
fd = dso__data_fd(dso, machine);
@@ -275,7 +275,7 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
/* Check the .eh_frame section for unwinding info */
offset = elf_section_offset(fd, ".eh_frame_hdr");
- dso->data.frame_offset = offset;
+ dso->data.eh_frame_hdr_offset = offset;
}
if (offset)
@@ -291,7 +291,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
struct machine *machine, u64 *offset)
{
int fd;
- u64 ofs = dso->data.frame_offset;
+ u64 ofs = dso->data.debug_frame_offset;
if (ofs == 0) {
fd = dso__data_fd(dso, machine);
@@ -300,7 +300,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
/* Check the .debug_frame section for unwinding info */
ofs = elf_section_offset(fd, ".debug_frame");
- dso->data.frame_offset = ofs;
+ dso->data.debug_frame_offset = ofs;
}
*offset = ofs;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index b86744f29eef..4ee6d0d4c993 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -269,6 +269,13 @@ void dump_stack(void)
void dump_stack(void) {}
#endif
+void sighandler_dump_stack(int sig)
+{
+ psignal(sig, "perf");
+ dump_stack();
+ exit(sig);
+}
+
void get_term_dimensions(struct winsize *ws)
{
char *s = getenv("LINES");
@@ -303,13 +310,26 @@ void set_term_quiet_input(struct termios *old)
tcsetattr(0, TCSANOW, &tc);
}
-static void set_tracing_events_path(const char *mountpoint)
+static void set_tracing_events_path(const char *tracing, const char *mountpoint)
{
- snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
- mountpoint, "tracing/events");
+ snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s%s",
+ mountpoint, tracing, "events");
}
-const char *perf_debugfs_mount(const char *mountpoint)
+static const char *__perf_tracefs_mount(const char *mountpoint)
+{
+ const char *mnt;
+
+ mnt = tracefs_mount(mountpoint);
+ if (!mnt)
+ return NULL;
+
+ set_tracing_events_path("", mnt);
+
+ return mnt;
+}
+
+static const char *__perf_debugfs_mount(const char *mountpoint)
{
const char *mnt;
@@ -317,7 +337,20 @@ const char *perf_debugfs_mount(const char *mountpoint)
if (!mnt)
return NULL;
- set_tracing_events_path(mnt);
+ set_tracing_events_path("tracing/", mnt);
+
+ return mnt;
+}
+
+const char *perf_debugfs_mount(const char *mountpoint)
+{
+ const char *mnt;
+
+ mnt = __perf_tracefs_mount(mountpoint);
+ if (mnt)
+ return mnt;
+
+ mnt = __perf_debugfs_mount(mountpoint);
return mnt;
}
@@ -325,12 +358,19 @@ const char *perf_debugfs_mount(const char *mountpoint)
void perf_debugfs_set_path(const char *mntpt)
{
snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt);
- set_tracing_events_path(mntpt);
+ set_tracing_events_path("tracing/", mntpt);
+}
+
+static const char *find_tracefs(void)
+{
+ const char *path = __perf_tracefs_mount(NULL);
+
+ return path;
}
static const char *find_debugfs(void)
{
- const char *path = perf_debugfs_mount(NULL);
+ const char *path = __perf_debugfs_mount(NULL);
if (!path)
fprintf(stderr, "Your kernel does not support the debugfs filesystem");
@@ -344,6 +384,7 @@ static const char *find_debugfs(void)
*/
const char *find_tracing_dir(void)
{
+ const char *tracing_dir = "";
static char *tracing;
static int tracing_found;
const char *debugfs;
@@ -351,11 +392,15 @@ const char *find_tracing_dir(void)
if (tracing_found)
return tracing;
- debugfs = find_debugfs();
- if (!debugfs)
- return NULL;
+ debugfs = find_tracefs();
+ if (!debugfs) {
+ tracing_dir = "/tracing";
+ debugfs = find_debugfs();
+ if (!debugfs)
+ return NULL;
+ }
- if (asprintf(&tracing, "%s/tracing", debugfs) < 0)
+ if (asprintf(&tracing, "%s%s", debugfs, tracing_dir) < 0)
return NULL;
tracing_found = 1;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 027a5153495c..1ff23e04ad27 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -75,6 +75,7 @@
#include <linux/types.h>
#include <sys/ttydefaults.h>
#include <api/fs/debugfs.h>
+#include <api/fs/tracefs.h>
#include <termios.h>
#include <linux/bitops.h>
#include <termios.h>
@@ -276,6 +277,7 @@ char *ltrim(char *s);
char *rtrim(char *s);
void dump_stack(void);
+void sighandler_dump_stack(int sig);
extern unsigned int page_size;
extern int cacheline_size;
@@ -327,4 +329,8 @@ bool find_process(const char *name);
int gzip_decompress_to_file(const char *input, int output_fd);
#endif
+#ifdef HAVE_LZMA_SUPPORT
+int lzma_decompress_to_file(const char *input, int output_fd);
+#endif
+
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 92f1fd700344..db15c9d2049e 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -1156,7 +1156,7 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance)
/* Extract instance number */
if (isdigit((int)filename[ACPI_NAME_SIZE])) {
- sscanf(&filename[ACPI_NAME_SIZE], "%d", instance);
+ sscanf(&filename[ACPI_NAME_SIZE], "%u", instance);
} else if (strlen(filename) != ACPI_NAME_SIZE) {
return (AE_BAD_SIGNATURE);
} else {
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index 3853a7350440..0b1fa290245a 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -146,6 +146,6 @@ void acpi_os_unmap_memory(void *where, acpi_size length)
acpi_size page_size;
page_size = acpi_os_get_page_size();
- offset = (acpi_physical_address) where % page_size;
+ offset = ACPI_TO_INTEGER(where) % page_size;
munmap((u8 *)where - offset, (length + offset));
}
diff --git a/tools/power/cpupower/utils/helpers/pci.c b/tools/power/cpupower/utils/helpers/pci.c
index 9690798e6446..8b278983cfc5 100644
--- a/tools/power/cpupower/utils/helpers/pci.c
+++ b/tools/power/cpupower/utils/helpers/pci.c
@@ -25,14 +25,21 @@
struct pci_dev *pci_acc_init(struct pci_access **pacc, int domain, int bus,
int slot, int func, int vendor, int dev)
{
- struct pci_filter filter_nb_link = { domain, bus, slot, func,
- vendor, dev };
+ struct pci_filter filter_nb_link;
struct pci_dev *device;
*pacc = pci_alloc();
if (*pacc == NULL)
return NULL;
+ pci_filter_init(*pacc, &filter_nb_link);
+ filter_nb_link.domain = domain;
+ filter_nb_link.bus = bus;
+ filter_nb_link.slot = slot;
+ filter_nb_link.func = func;
+ filter_nb_link.vendor = vendor;
+ filter_nb_link.device = dev;
+
pci_init(*pacc);
pci_scan_bus(*pacc);
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index d1b3a361e526..4039854560d0 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -1,8 +1,12 @@
CC = $(CROSS_COMPILE)gcc
-BUILD_OUTPUT := $(PWD)
+BUILD_OUTPUT := $(CURDIR)
PREFIX := /usr
DESTDIR :=
+ifeq ("$(origin O)", "command line")
+ BUILD_OUTPUT := $(O)
+endif
+
turbostat : turbostat.c
CFLAGS += -Wall
CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index feea7ad9500b..05b8fc38dc8b 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -20,9 +20,11 @@ upon its completion.
The second method is to omit the command,
and turbostat displays statistics every 5 seconds.
The 5-second interval can be changed using the --interval option.
-
+.PP
Some information is not available on older processors.
.SS Options
+Options can be specified with a single or double '-', and only as much of the option
+name as necessary to disambiguate it from others is necessary. Note that options are case-sensitive.
\fB--Counter MSR#\fP shows the delta of the specified 64-bit MSR counter.
.PP
\fB--counter MSR#\fP shows the delta of the specified 32-bit MSR counter.
@@ -55,16 +57,20 @@ more than once may also enable internal turbostat debug information.
The \fBcommand\fP parameter forks \fBcommand\fP, and upon its exit,
displays the statistics gathered since it was forked.
.PP
-.SH FIELD DESCRIPTIONS
+.SH DEFAULT FIELD DESCRIPTIONS
.nf
-\fBPackage\fP processor package number.
-\fBCore\fP processor core number.
-\fBCPU\fP Linux CPU (logical processor) number.
-Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology.
+\fBCPU\fP Linux CPU (logical processor) number. Yes, it is okay that on many systems the CPUs are not listed in numerical order -- for efficiency reasons, turbostat runs in topology order, so HT siblings appear together.
\fBAVG_MHz\fP number of cycles executed divided by time elapsed.
\fB%Busy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state.
\fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state).
\fBTSC_MHz\fP average MHz that the TSC ran during the entire interval.
+.fi
+.PP
+.SH DEBUG FIELD DESCRIPTIONS
+.nf
+\fBPackage\fP processor package number.
+\fBCore\fP processor core number.
+Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology (HT).
\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.
\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
@@ -81,63 +87,76 @@ Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading T
Without any parameters, turbostat displays statistics ever 5 seconds.
(override interval with "-i sec" option, or specify a command
for turbostat to fork).
+.nf
+[root@hsw]# ./turbostat
+ CPU Avg_MHz %Busy Bzy_MHz TSC_MHz
+ - 488 12.51 3898 3498
+ 0 0 0.01 3885 3498
+ 4 3897 99.99 3898 3498
+ 1 0 0.00 3861 3498
+ 5 0 0.00 3882 3498
+ 2 1 0.02 3894 3498
+ 6 2 0.06 3898 3498
+ 3 0 0.00 3849 3498
+ 7 0 0.00 3877 3498
+
+.fi
+.SH DEBUG EXAMPLE
+The "--debug" option prints additional system information before measurements:
The first row of statistics is a summary for the entire system.
For residency % columns, the summary is a weighted average.
For Temperature columns, the summary is the column maximum.
For Watts columns, the summary is a system total.
Subsequent rows show per-CPU statistics.
-
-.nf
-[root@ivy]# ./turbostat
- Core CPU Avg_MHz %Busy Bzy_MHz TSC_MHz SMI CPU%c1 CPU%c3 CPU%c6 CPU%c7 CoreTmp PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
- - - 6 0.36 1596 3492 0 0.59 0.01 99.04 0.00 23 24 23.82 0.01 72.47 0.00 6.40 1.01 0.00
- 0 0 9 0.58 1596 3492 0 0.28 0.01 99.13 0.00 23 24 23.82 0.01 72.47 0.00 6.40 1.01 0.00
- 0 4 1 0.07 1596 3492 0 0.79
- 1 1 10 0.65 1596 3492 0 0.59 0.00 98.76 0.00 23
- 1 5 5 0.28 1596 3492 0 0.95
- 2 2 10 0.66 1596 3492 0 0.41 0.01 98.92 0.00 23
- 2 6 2 0.10 1597 3492 0 0.97
- 3 3 3 0.20 1596 3492 0 0.44 0.00 99.37 0.00 23
- 3 7 5 0.31 1596 3492 0 0.33
-.fi
-.SH DEBUG EXAMPLE
-The "--debug" option prints additional system information before measurements:
-
.nf
-turbostat version 4.0 10-Feb, 2015 - Len Brown <lenb@kernel.org>
-CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3a:9 (6:58:9)
+turbostat version 4.1 10-Feb, 2015 - Len Brown <lenb@kernel.org>
+CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3c:3 (6:60:3)
CPUID(6): APERF, DTS, PTM, EPB
-RAPL: 851 sec. Joule Counter Range, at 77 Watts
-cpu0: MSR_NHM_PLATFORM_INFO: 0x81010f0012300
-16 * 100 = 1600 MHz max efficiency
+RAPL: 3121 sec. Joule Counter Range, at 84 Watts
+cpu0: MSR_NHM_PLATFORM_INFO: 0x80838f3012300
+8 * 100 = 800 MHz max efficiency
35 * 100 = 3500 MHz TSC frequency
-cpu0: MSR_IA32_POWER_CTL: 0x0014005d (C1E auto-promotion: DISabled)
-cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e008402 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked: pkg-cstate-limit=2: pc6n)
+cpu0: MSR_IA32_POWER_CTL: 0x0004005d (C1E auto-promotion: DISabled)
+cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e000400 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, UNlocked: pkg-cstate-limit=0: pc0)
cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727
37 * 100 = 3700 MHz max turbo 4 active cores
38 * 100 = 3800 MHz max turbo 3 active cores
39 * 100 = 3900 MHz max turbo 2 active cores
39 * 100 = 3900 MHz max turbo 1 active cores
cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced)
-cpu0: MSR_RAPL_POWER_UNIT: 0x000a1003 (0.125000 Watts, 0.000015 Joules, 0.000977 sec.)
-cpu0: MSR_PKG_POWER_INFO: 0x01e00268 (77 W TDP, RAPL 60 - 0 W, 0.000000 sec.)
-cpu0: MSR_PKG_POWER_LIMIT: 0x30000148268 (UNlocked)
-cpu0: PKG Limit #1: ENabled (77.000000 Watts, 1.000000 sec, clamp DISabled)
-cpu0: PKG Limit #2: DISabled (96.000000 Watts, 0.000977* sec, clamp DISabled)
+cpu0: MSR_CORE_PERF_LIMIT_REASONS, 0x31200000 (Active: ) (Logged: Auto-HWP, Amps, MultiCoreTurbo, Transitions, )
+cpu0: MSR_GFX_PERF_LIMIT_REASONS, 0x00000000 (Active: ) (Logged: )
+cpu0: MSR_RING_PERF_LIMIT_REASONS, 0x0d000000 (Active: ) (Logged: Amps, PkgPwrL1, PkgPwrL2, )
+cpu0: MSR_RAPL_POWER_UNIT: 0x000a0e03 (0.125000 Watts, 0.000061 Joules, 0.000977 sec.)
+cpu0: MSR_PKG_POWER_INFO: 0x000002a0 (84 W TDP, RAPL 0 - 0 W, 0.000000 sec.)
+cpu0: MSR_PKG_POWER_LIMIT: 0x428348001a82a0 (UNlocked)
+cpu0: PKG Limit #1: ENabled (84.000000 Watts, 8.000000 sec, clamp DISabled)
+cpu0: PKG Limit #2: ENabled (105.000000 Watts, 0.002441* sec, clamp DISabled)
cpu0: MSR_PP0_POLICY: 0
cpu0: MSR_PP0_POWER_LIMIT: 0x00000000 (UNlocked)
cpu0: Cores Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
cpu0: MSR_PP1_POLICY: 0
cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked)
cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
-cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00691400 (105 C)
-cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884e0000 (27 C)
-cpu0: MSR_IA32_THERM_STATUS: 0x88580000 (17 C +/- 1)
-cpu1: MSR_IA32_THERM_STATUS: 0x885a0000 (15 C +/- 1)
-cpu2: MSR_IA32_THERM_STATUS: 0x88570000 (18 C +/- 1)
-cpu3: MSR_IA32_THERM_STATUS: 0x884e0000 (27 C +/- 1)
- ...
+cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00641400 (100 C)
+cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x88340800 (48 C)
+cpu0: MSR_IA32_THERM_STATUS: 0x88340000 (48 C +/- 1)
+cpu1: MSR_IA32_THERM_STATUS: 0x88440000 (32 C +/- 1)
+cpu2: MSR_IA32_THERM_STATUS: 0x88450000 (31 C +/- 1)
+cpu3: MSR_IA32_THERM_STATUS: 0x88490000 (27 C +/- 1)
+ Core CPU Avg_MHz %Busy Bzy_MHz TSC_MHz SMI CPU%c1 CPU%c3 CPU%c6 CPU%c7 CoreTmp PkgTmp PkgWatt CorWatt GFXWatt
+ - - 493 12.64 3898 3498 0 12.64 0.00 0.00 74.72 47 47 21.62 13.74 0.00
+ 0 0 4 0.11 3894 3498 0 99.89 0.00 0.00 0.00 47 47 21.62 13.74 0.00
+ 0 4 3897 99.98 3898 3498 0 0.02
+ 1 1 7 0.17 3887 3498 0 0.04 0.00 0.00 99.79 32
+ 1 5 0 0.00 3885 3498 0 0.21
+ 2 2 29 0.76 3895 3498 0 0.10 0.01 0.01 99.13 32
+ 2 6 2 0.06 3896 3498 0 0.80
+ 3 3 1 0.02 3832 3498 0 0.03 0.00 0.00 99.95 28
+ 3 7 0 0.00 3879 3498 0 0.04
+^C
+
.fi
The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
available at the minimum package voltage. The \fBTSC frequency\fP is the base
@@ -147,6 +166,9 @@ should be sustainable on all CPUs indefinitely, given nominal power and cooling.
The remaining rows show what maximum turbo frequency is possible
depending on the number of idle cores. Note that not all information is
available on all processors.
+.PP
+The --debug option adds additional columns to the measurement ouput, including CPU idle power-state residency processor temperature sensor readinds.
+See the field definitions above.
.SH FORK EXAMPLE
If turbostat is invoked with a command, it will fork that command
and output the statistics gathered when the command exits.
@@ -154,27 +176,23 @@ eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds
until ^C while the other CPUs are mostly idle:
.nf
-root@ivy: turbostat cat /dev/zero > /dev/null
+root@hsw: turbostat cat /dev/zero > /dev/null
^C
- Core CPU Avg_MHz %Busy Bzy_MHz TSC_MHz SMI CPU%c1 CPU%c3 CPU%c6 CPU%c7 CoreTmp PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
- - - 496 12.75 3886 3492 0 13.16 0.04 74.04 0.00 36 36 0.00 0.00 0.00 0.00 23.15 17.65 0.00
- 0 0 22 0.57 3830 3492 0 0.83 0.02 98.59 0.00 27 36 0.00 0.00 0.00 0.00 23.15 17.65 0.00
- 0 4 9 0.24 3829 3492 0 1.15
- 1 1 4 0.09 3783 3492 0 99.91 0.00 0.00 0.00 36
- 1 5 3880 99.82 3888 3492 0 0.18
- 2 2 17 0.44 3813 3492 0 0.77 0.04 98.75 0.00 28
- 2 6 12 0.32 3823 3492 0 0.89
- 3 3 16 0.43 3844 3492 0 0.63 0.11 98.84 0.00 30
- 3 7 4 0.11 3827 3492 0 0.94
-30.372243 sec
+ CPU Avg_MHz %Busy Bzy_MHz TSC_MHz
+ - 482 12.51 3854 3498
+ 0 0 0.01 1960 3498
+ 4 0 0.00 2128 3498
+ 1 0 0.00 3003 3498
+ 5 3854 99.98 3855 3498
+ 2 0 0.01 3504 3498
+ 6 3 0.08 3884 3498
+ 3 0 0.00 2553 3498
+ 7 0 0.00 2126 3498
+10.783983 sec
.fi
-Above the cycle soaker drives cpu5 up its 3.8 GHz turbo limit
-while the other processors are generally in various states of idle.
-
-Note that cpu1 and cpu5 are HT siblings within core1.
-As cpu5 is very busy, it prevents its sibling, cpu1,
-from entering a c-state deeper than c1.
+Above the cycle soaker drives cpu5 up its 3.9 GHz turbo limit.
+The first row shows the average MHz and %Busy across all the processors in the system.
Note that the Avg_MHz column reflects the total number of cycles executed
divided by the measurement interval. If the %Busy column is 100%,
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 2d089cac8580..bac98ca3d4ca 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -57,6 +57,7 @@ unsigned int do_pc3;
unsigned int do_pc6;
unsigned int do_pc7;
unsigned int do_c8_c9_c10;
+unsigned int do_skl_residency;
unsigned int do_slm_cstates;
unsigned int use_c1_residency_msr;
unsigned int has_aperf;
@@ -65,8 +66,6 @@ unsigned int units = 1000000; /* MHz etc */
unsigned int genuine_intel;
unsigned int has_invariant_tsc;
unsigned int do_nhm_platform_info;
-unsigned int do_nhm_turbo_ratio_limit;
-unsigned int do_ivt_turbo_ratio_limit;
unsigned int extra_msr_offset32;
unsigned int extra_msr_offset64;
unsigned int extra_delta_offset32;
@@ -84,11 +83,14 @@ unsigned int do_dts;
unsigned int do_ptm;
unsigned int tcc_activation_temp;
unsigned int tcc_activation_temp_override;
-double rapl_power_units, rapl_energy_units, rapl_time_units;
+double rapl_power_units, rapl_time_units;
+double rapl_dram_energy_units, rapl_energy_units;
double rapl_joule_counter_range;
unsigned int do_core_perf_limit_reasons;
unsigned int do_gfx_perf_limit_reasons;
unsigned int do_ring_perf_limit_reasons;
+unsigned int crystal_hz;
+unsigned long long tsc_hz;
#define RAPL_PKG (1 << 0)
/* 0x610 MSR_PKG_POWER_LIMIT */
@@ -101,18 +103,18 @@ unsigned int do_ring_perf_limit_reasons;
#define RAPL_DRAM (1 << 3)
/* 0x618 MSR_DRAM_POWER_LIMIT */
/* 0x619 MSR_DRAM_ENERGY_STATUS */
- /* 0x61c MSR_DRAM_POWER_INFO */
#define RAPL_DRAM_PERF_STATUS (1 << 4)
/* 0x61b MSR_DRAM_PERF_STATUS */
+#define RAPL_DRAM_POWER_INFO (1 << 5)
+ /* 0x61c MSR_DRAM_POWER_INFO */
-#define RAPL_CORES (1 << 5)
+#define RAPL_CORES (1 << 6)
/* 0x638 MSR_PP0_POWER_LIMIT */
/* 0x639 MSR_PP0_ENERGY_STATUS */
-#define RAPL_CORE_POLICY (1 << 6)
+#define RAPL_CORE_POLICY (1 << 7)
/* 0x63a MSR_PP0_POLICY */
-
-#define RAPL_GFX (1 << 7)
+#define RAPL_GFX (1 << 8)
/* 0x640 MSR_PP1_POWER_LIMIT */
/* 0x641 MSR_PP1_ENERGY_STATUS */
/* 0x642 MSR_PP1_POLICY */
@@ -159,6 +161,10 @@ struct pkg_data {
unsigned long long pc8;
unsigned long long pc9;
unsigned long long pc10;
+ unsigned long long pkg_wtd_core_c0;
+ unsigned long long pkg_any_core_c0;
+ unsigned long long pkg_any_gfxe_c0;
+ unsigned long long pkg_both_core_gfxe_c0;
unsigned int package_id;
unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
@@ -292,8 +298,7 @@ void print_header(void)
if (has_aperf)
outp += sprintf(outp, " Bzy_MHz");
outp += sprintf(outp, " TSC_MHz");
- if (do_smi)
- outp += sprintf(outp, " SMI");
+
if (extra_delta_offset32)
outp += sprintf(outp, " count 0x%03X", extra_delta_offset32);
if (extra_delta_offset64)
@@ -302,6 +307,13 @@ void print_header(void)
outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset32);
if (extra_msr_offset64)
outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64);
+
+ if (!debug)
+ goto done;
+
+ if (do_smi)
+ outp += sprintf(outp, " SMI");
+
if (do_nhm_cstates)
outp += sprintf(outp, " CPU%%c1");
if (do_nhm_cstates && !do_slm_cstates)
@@ -316,6 +328,13 @@ void print_header(void)
if (do_ptm)
outp += sprintf(outp, " PkgTmp");
+ if (do_skl_residency) {
+ outp += sprintf(outp, " Totl%%C0");
+ outp += sprintf(outp, " Any%%C0");
+ outp += sprintf(outp, " GFX%%C0");
+ outp += sprintf(outp, " CPUGFX%%");
+ }
+
if (do_pc2)
outp += sprintf(outp, " Pkg%%pc2");
if (do_pc3)
@@ -359,6 +378,7 @@ void print_header(void)
outp += sprintf(outp, " time");
}
+ done:
outp += sprintf(outp, "\n");
}
@@ -396,6 +416,12 @@ int dump_counters(struct thread_data *t, struct core_data *c,
if (p) {
outp += sprintf(outp, "package: %d\n", p->package_id);
+
+ outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
+ outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
+ outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
+ outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
+
outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
if (do_pc3)
outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
@@ -487,10 +513,6 @@ int format_counters(struct thread_data *t, struct core_data *c,
/* TSC_MHz */
outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
- /* SMI */
- if (do_smi)
- outp += sprintf(outp, "%8d", t->smi_count);
-
/* delta */
if (extra_delta_offset32)
outp += sprintf(outp, " %11llu", t->extra_delta32);
@@ -506,6 +528,13 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (extra_msr_offset64)
outp += sprintf(outp, " 0x%016llx", t->extra_msr64);
+ if (!debug)
+ goto done;
+
+ /* SMI */
+ if (do_smi)
+ outp += sprintf(outp, "%8d", t->smi_count);
+
if (do_nhm_cstates) {
if (!skip_c1)
outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc);
@@ -531,9 +560,18 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
goto done;
+ /* PkgTmp */
if (do_ptm)
outp += sprintf(outp, "%8d", p->pkg_temp_c);
+ /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
+ if (do_skl_residency) {
+ outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc);
+ outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_core_c0/t->tsc);
+ outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc);
+ outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc);
+ }
+
if (do_pc2)
outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
if (do_pc3)
@@ -565,7 +603,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (do_rapl & RAPL_GFX)
outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
if (do_rapl & RAPL_DRAM)
- outp += sprintf(outp, fmt8, p->energy_dram * rapl_energy_units / interval_float);
+ outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float);
if (do_rapl & RAPL_PKG_PERF_STATUS)
outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
if (do_rapl & RAPL_DRAM_PERF_STATUS)
@@ -582,7 +620,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
p->energy_gfx * rapl_energy_units);
if (do_rapl & RAPL_DRAM)
outp += sprintf(outp, fmt8,
- p->energy_dram * rapl_energy_units);
+ p->energy_dram * rapl_dram_energy_units);
if (do_rapl & RAPL_PKG_PERF_STATUS)
outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
if (do_rapl & RAPL_DRAM_PERF_STATUS)
@@ -636,6 +674,13 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
void
delta_package(struct pkg_data *new, struct pkg_data *old)
{
+
+ if (do_skl_residency) {
+ old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
+ old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
+ old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
+ old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
+ }
old->pc2 = new->pc2 - old->pc2;
if (do_pc3)
old->pc3 = new->pc3 - old->pc3;
@@ -782,6 +827,11 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
c->c7 = 0;
c->core_temp_c = 0;
+ p->pkg_wtd_core_c0 = 0;
+ p->pkg_any_core_c0 = 0;
+ p->pkg_any_gfxe_c0 = 0;
+ p->pkg_both_core_gfxe_c0 = 0;
+
p->pc2 = 0;
if (do_pc3)
p->pc3 = 0;
@@ -826,6 +876,13 @@ int sum_counters(struct thread_data *t, struct core_data *c,
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
return 0;
+ if (do_skl_residency) {
+ average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
+ average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
+ average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
+ average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
+ }
+
average.packages.pc2 += p->pc2;
if (do_pc3)
average.packages.pc3 += p->pc3;
@@ -873,6 +930,13 @@ void compute_average(struct thread_data *t, struct core_data *c,
average.cores.c6 /= topo.num_cores;
average.cores.c7 /= topo.num_cores;
+ if (do_skl_residency) {
+ average.packages.pkg_wtd_core_c0 /= topo.num_packages;
+ average.packages.pkg_any_core_c0 /= topo.num_packages;
+ average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
+ average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
+ }
+
average.packages.pc2 /= topo.num_packages;
if (do_pc3)
average.packages.pc3 /= topo.num_packages;
@@ -979,6 +1043,16 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
return 0;
+ if (do_skl_residency) {
+ if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
+ return -10;
+ if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
+ return -11;
+ if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
+ return -12;
+ if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
+ return -13;
+ }
if (do_pc3)
if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
return -9;
@@ -1055,49 +1129,77 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
#define PCL_6R 9 /* PC6 Retention */
#define PCL__7 10 /* PC7 */
#define PCL_7S 11 /* PC7 Shrink */
-#define PCLUNL 12 /* Unlimited */
+#define PCL__8 12 /* PC8 */
+#define PCL__9 13 /* PC9 */
+#define PCLUNL 14 /* Unlimited */
int pkg_cstate_limit = PCLUKN;
char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
- "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "unlimited"};
+ "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
-int nhm_pkg_cstate_limits[8] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL};
-int snb_pkg_cstate_limits[8] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL};
-int hsw_pkg_cstate_limits[8] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCLRSV, PCLUNL};
-int slv_pkg_cstate_limits[8] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7};
-int amt_pkg_cstate_limits[8] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7};
-int phi_pkg_cstate_limits[8] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL};
+int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
+int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
+int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
+int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
+int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
+int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
-void print_verbose_header(void)
+static void
+dump_nhm_platform_info(void)
{
unsigned long long msr;
unsigned int ratio;
- if (!do_nhm_platform_info)
- return;
-
get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
ratio = (msr >> 40) & 0xFF;
- fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
+ fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
ratio, bclk, ratio * bclk);
ratio = (msr >> 8) & 0xFF;
- fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
+ fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
ratio, bclk, ratio * bclk);
get_msr(0, MSR_IA32_POWER_CTL, &msr);
fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
msr, msr & 0x2 ? "EN" : "DIS");
- if (!do_ivt_turbo_ratio_limit)
- goto print_nhm_turbo_ratio_limits;
+ return;
+}
+
+static void
+dump_hsw_turbo_ratio_limits(void)
+{
+ unsigned long long msr;
+ unsigned int ratio;
+
+ get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr);
+
+ fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr);
+
+ ratio = (msr >> 8) & 0xFF;
+ if (ratio)
+ fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 18 active cores\n",
+ ratio, bclk, ratio * bclk);
+
+ ratio = (msr >> 0) & 0xFF;
+ if (ratio)
+ fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 17 active cores\n",
+ ratio, bclk, ratio * bclk);
+ return;
+}
+
+static void
+dump_ivt_turbo_ratio_limits(void)
+{
+ unsigned long long msr;
+ unsigned int ratio;
- get_msr(0, MSR_IVT_TURBO_RATIO_LIMIT, &msr);
+ get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr);
- fprintf(stderr, "cpu0: MSR_IVT_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
+ fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr);
ratio = (msr >> 56) & 0xFF;
if (ratio)
@@ -1138,30 +1240,18 @@ void print_verbose_header(void)
if (ratio)
fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
ratio, bclk, ratio * bclk);
+ return;
+}
-print_nhm_turbo_ratio_limits:
- get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
-
-#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
-#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
-
- fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr);
-
- fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
- (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
- (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
- (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
- (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
- (msr & (1 << 15)) ? "" : "UN",
- (unsigned int)msr & 7,
- pkg_cstate_limit_strings[pkg_cstate_limit]);
-
- if (!do_nhm_turbo_ratio_limit)
- return;
+static void
+dump_nhm_turbo_ratio_limits(void)
+{
+ unsigned long long msr;
+ unsigned int ratio;
- get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
+ get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr);
- fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
+ fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
ratio = (msr >> 56) & 0xFF;
if (ratio)
@@ -1202,7 +1292,30 @@ print_nhm_turbo_ratio_limits:
if (ratio)
fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
ratio, bclk, ratio * bclk);
+ return;
+}
+
+static void
+dump_nhm_cst_cfg(void)
+{
+ unsigned long long msr;
+
+ get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
+
+#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
+#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
+ fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr);
+
+ fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
+ (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
+ (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
+ (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
+ (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
+ (msr & (1 << 15)) ? "" : "UN",
+ (unsigned int)msr & 7,
+ pkg_cstate_limit_strings[pkg_cstate_limit]);
+ return;
}
void free_all_buffers(void)
@@ -1483,7 +1596,8 @@ void check_dev_msr()
struct stat sb;
if (stat("/dev/cpu/0/msr", &sb))
- err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
+ if (system("/sbin/modprobe msr > /dev/null 2>&1"))
+ err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
}
void check_permissions()
@@ -1573,6 +1687,8 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
case 0x47: /* BDW */
case 0x4F: /* BDX */
case 0x56: /* BDX-DE */
+ case 0x4E: /* SKL */
+ case 0x5E: /* SKL */
pkg_cstate_limits = hsw_pkg_cstate_limits;
break;
case 0x37: /* BYT */
@@ -1590,7 +1706,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
}
get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
- pkg_cstate_limit = pkg_cstate_limits[msr & 0x7];
+ pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
return 1;
}
@@ -1615,12 +1731,49 @@ int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
switch (model) {
case 0x3E: /* IVB Xeon */
+ case 0x3F: /* HSW Xeon */
+ return 1;
+ default:
+ return 0;
+ }
+}
+int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
+{
+ if (!genuine_intel)
+ return 0;
+
+ if (family != 6)
+ return 0;
+
+ switch (model) {
+ case 0x3F: /* HSW Xeon */
return 1;
default:
return 0;
}
}
+static void
+dump_cstate_pstate_config_info(family, model)
+{
+ if (!do_nhm_platform_info)
+ return;
+
+ dump_nhm_platform_info();
+
+ if (has_hsw_turbo_ratio_limit(family, model))
+ dump_hsw_turbo_ratio_limits();
+
+ if (has_ivt_turbo_ratio_limit(family, model))
+ dump_ivt_turbo_ratio_limits();
+
+ if (has_nhm_turbo_ratio_limit(family, model))
+ dump_nhm_turbo_ratio_limits();
+
+ dump_nhm_cst_cfg();
+}
+
+
/*
* print_epb()
* Decode the ENERGY_PERF_BIAS MSR
@@ -1690,35 +1843,35 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
- (msr & 1 << 0) ? "PROCHOT, " : "",
- (msr & 1 << 1) ? "ThermStatus, " : "",
- (msr & 1 << 2) ? "bit2, " : "",
- (msr & 1 << 4) ? "Graphics, " : "",
- (msr & 1 << 5) ? "Auto-HWP, " : "",
- (msr & 1 << 6) ? "VR-Therm, " : "",
- (msr & 1 << 8) ? "Amps, " : "",
- (msr & 1 << 9) ? "CorePwr, " : "",
- (msr & 1 << 10) ? "PkgPwrL1, " : "",
- (msr & 1 << 11) ? "PkgPwrL2, " : "",
- (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
- (msr & 1 << 13) ? "Transitions, " : "",
+ (msr & 1 << 15) ? "bit15, " : "",
(msr & 1 << 14) ? "bit14, " : "",
- (msr & 1 << 15) ? "bit15, " : "");
+ (msr & 1 << 13) ? "Transitions, " : "",
+ (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
+ (msr & 1 << 11) ? "PkgPwrL2, " : "",
+ (msr & 1 << 10) ? "PkgPwrL1, " : "",
+ (msr & 1 << 9) ? "CorePwr, " : "",
+ (msr & 1 << 8) ? "Amps, " : "",
+ (msr & 1 << 6) ? "VR-Therm, " : "",
+ (msr & 1 << 5) ? "Auto-HWP, " : "",
+ (msr & 1 << 4) ? "Graphics, " : "",
+ (msr & 1 << 2) ? "bit2, " : "",
+ (msr & 1 << 1) ? "ThermStatus, " : "",
+ (msr & 1 << 0) ? "PROCHOT, " : "");
fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
- (msr & 1 << 16) ? "PROCHOT, " : "",
- (msr & 1 << 17) ? "ThermStatus, " : "",
- (msr & 1 << 18) ? "bit18, " : "",
- (msr & 1 << 20) ? "Graphics, " : "",
- (msr & 1 << 21) ? "Auto-HWP, " : "",
- (msr & 1 << 22) ? "VR-Therm, " : "",
- (msr & 1 << 24) ? "Amps, " : "",
- (msr & 1 << 25) ? "CorePwr, " : "",
- (msr & 1 << 26) ? "PkgPwrL1, " : "",
- (msr & 1 << 27) ? "PkgPwrL2, " : "",
- (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
- (msr & 1 << 29) ? "Transitions, " : "",
+ (msr & 1 << 31) ? "bit31, " : "",
(msr & 1 << 30) ? "bit30, " : "",
- (msr & 1 << 31) ? "bit31, " : "");
+ (msr & 1 << 29) ? "Transitions, " : "",
+ (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
+ (msr & 1 << 27) ? "PkgPwrL2, " : "",
+ (msr & 1 << 26) ? "PkgPwrL1, " : "",
+ (msr & 1 << 25) ? "CorePwr, " : "",
+ (msr & 1 << 24) ? "Amps, " : "",
+ (msr & 1 << 22) ? "VR-Therm, " : "",
+ (msr & 1 << 21) ? "Auto-HWP, " : "",
+ (msr & 1 << 20) ? "Graphics, " : "",
+ (msr & 1 << 18) ? "bit18, " : "",
+ (msr & 1 << 17) ? "ThermStatus, " : "",
+ (msr & 1 << 16) ? "PROCHOT, " : "");
}
if (do_gfx_perf_limit_reasons) {
@@ -1784,6 +1937,25 @@ double get_tdp(model)
}
}
+/*
+ * rapl_dram_energy_units_probe()
+ * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
+ */
+static double
+rapl_dram_energy_units_probe(int model, double rapl_energy_units)
+{
+ /* only called for genuine_intel, family 6 */
+
+ switch (model) {
+ case 0x3F: /* HSX */
+ case 0x4F: /* BDX */
+ case 0x56: /* BDX-DE */
+ return (rapl_dram_energy_units = 15.3 / 1000000);
+ default:
+ return (rapl_energy_units);
+ }
+}
+
/*
* rapl_probe()
@@ -1812,14 +1984,18 @@ void rapl_probe(unsigned int family, unsigned int model)
case 0x47: /* BDW */
do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
break;
+ case 0x4E: /* SKL */
+ case 0x5E: /* SKL */
+ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
+ break;
case 0x3F: /* HSX */
case 0x4F: /* BDX */
case 0x56: /* BDX-DE */
- do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
+ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
break;
case 0x2D:
case 0x3E:
- do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
+ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
break;
case 0x37: /* BYT */
case 0x4D: /* AVN */
@@ -1839,6 +2015,8 @@ void rapl_probe(unsigned int family, unsigned int model)
else
rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
+ rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
+
time_unit = msr >> 16 & 0xF;
if (time_unit == 0)
time_unit = 0xA;
@@ -2009,19 +2187,18 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
((msr >> 48) & 1) ? "EN" : "DIS");
}
- if (do_rapl & RAPL_DRAM) {
+ if (do_rapl & RAPL_DRAM_POWER_INFO) {
if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
return -6;
-
fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
cpu, msr,
((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
-
-
+ }
+ if (do_rapl & RAPL_DRAM) {
if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
return -9;
fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
@@ -2090,6 +2267,8 @@ int has_snb_msrs(unsigned int family, unsigned int model)
case 0x47: /* BDW */
case 0x4F: /* BDX */
case 0x56: /* BDX-DE */
+ case 0x4E: /* SKL */
+ case 0x5E: /* SKL */
return 1;
}
return 0;
@@ -2110,11 +2289,35 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
switch (model) {
case 0x45: /* HSW */
case 0x3D: /* BDW */
+ case 0x4E: /* SKL */
+ case 0x5E: /* SKL */
return 1;
}
return 0;
}
+/*
+ * SKL adds support for additional MSRS:
+ *
+ * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
+ * MSR_PKG_ANY_CORE_C0_RES 0x00000659
+ * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
+ * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
+ */
+int has_skl_msrs(unsigned int family, unsigned int model)
+{
+ if (!genuine_intel)
+ return 0;
+
+ switch (model) {
+ case 0x4E: /* SKL */
+ case 0x5E: /* SKL */
+ return 1;
+ }
+ return 0;
+}
+
+
int is_slm(unsigned int family, unsigned int model)
{
@@ -2228,7 +2431,7 @@ guess:
return 0;
}
-void check_cpuid()
+void process_cpuid()
{
unsigned int eax, ebx, ecx, edx, max_level;
unsigned int fms, family, model, stepping;
@@ -2294,6 +2497,41 @@ void check_cpuid()
do_ptm ? "" : "No ",
has_epb ? "" : "No ");
+ if (max_level > 0x15) {
+ unsigned int eax_crystal;
+ unsigned int ebx_tsc;
+
+ /*
+ * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
+ */
+ eax_crystal = ebx_tsc = crystal_hz = edx = 0;
+ __get_cpuid(0x15, &eax_crystal, &ebx_tsc, &crystal_hz, &edx);
+
+ if (ebx_tsc != 0) {
+
+ if (debug && (ebx != 0))
+ fprintf(stderr, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
+ eax_crystal, ebx_tsc, crystal_hz);
+
+ if (crystal_hz == 0)
+ switch(model) {
+ case 0x4E: /* SKL */
+ case 0x5E: /* SKL */
+ crystal_hz = 24000000; /* 24 MHz */
+ break;
+ default:
+ crystal_hz = 0;
+ }
+
+ if (crystal_hz) {
+ tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
+ if (debug)
+ fprintf(stderr, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
+ tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal);
+ }
+ }
+ }
+
do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
do_snb_cstates = has_snb_msrs(family, model);
do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
@@ -2301,18 +2539,19 @@ void check_cpuid()
do_pc6 = (pkg_cstate_limit >= PCL__6);
do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
do_c8_c9_c10 = has_hsw_msrs(family, model);
+ do_skl_residency = has_skl_msrs(family, model);
do_slm_cstates = is_slm(family, model);
bclk = discover_bclk(family, model);
- do_nhm_turbo_ratio_limit = do_nhm_platform_info && has_nhm_turbo_ratio_limit(family, model);
- do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model);
rapl_probe(family, model);
perf_limit_reasons_probe(family, model);
+ if (debug)
+ dump_cstate_pstate_config_info();
+
return;
}
-
void help()
{
fprintf(stderr,
@@ -2428,14 +2667,14 @@ void topology_probe()
if (debug > 1)
fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
max_core_id, topo.num_cores_per_pkg);
- if (!summary_only && topo.num_cores_per_pkg > 1)
+ if (debug && !summary_only && topo.num_cores_per_pkg > 1)
show_core = 1;
topo.num_packages = max_package_id + 1;
if (debug > 1)
fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
max_package_id, topo.num_packages);
- if (!summary_only && topo.num_packages > 1)
+ if (debug && !summary_only && topo.num_packages > 1)
show_pkg = 1;
topo.num_threads_per_core = max_siblings;
@@ -2550,14 +2789,11 @@ void turbostat_init()
{
check_dev_msr();
check_permissions();
- check_cpuid();
+ process_cpuid();
setup_all_buffers();
if (debug)
- print_verbose_header();
-
- if (debug)
for_all_cpus(print_epb, ODD_COUNTERS);
if (debug)
@@ -2634,7 +2870,7 @@ int get_and_dump_counters(void)
}
void print_version() {
- fprintf(stderr, "turbostat version 4.1 10-Feb, 2015"
+ fprintf(stderr, "turbostat version 4.5 2 Apr, 2015"
" - Len Brown <lenb@kernel.org>\n");
}
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 2958fe9a74e9..5ad042345ab9 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -12,37 +12,37 @@ CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CUR
export CFLAGS
-TARGETS = pmu copyloops mm tm primitives stringloops
+SUB_DIRS = pmu copyloops mm tm primitives stringloops vphn switch_endian
endif
-all: $(TARGETS)
+all: $(SUB_DIRS)
-$(TARGETS):
+$(SUB_DIRS):
$(MAKE) -k -C $@ all
include ../lib.mk
override define RUN_TESTS
- @for TARGET in $(TARGETS); do \
+ @for TARGET in $(SUB_DIRS); do \
$(MAKE) -C $$TARGET run_tests; \
done;
endef
override define INSTALL_RULE
- @for TARGET in $(TARGETS); do \
+ @for TARGET in $(SUB_DIRS); do \
$(MAKE) -C $$TARGET install; \
done;
endef
override define EMIT_TESTS
- @for TARGET in $(TARGETS); do \
+ @for TARGET in $(SUB_DIRS); do \
$(MAKE) -s -C $$TARGET emit_tests; \
done;
endef
clean:
- @for TARGET in $(TARGETS); do \
+ @for TARGET in $(SUB_DIRS); do \
$(MAKE) -C $$TARGET clean; \
done;
rm -f tags
@@ -50,4 +50,4 @@ clean:
tags:
find . -name '*.c' -o -name '*.h' | xargs ctags
-.PHONY: tags $(TARGETS)
+.PHONY: tags $(SUB_DIRS)
diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile
index c05023514ce8..384843ea0d40 100644
--- a/tools/testing/selftests/powerpc/copyloops/Makefile
+++ b/tools/testing/selftests/powerpc/copyloops/Makefile
@@ -2,6 +2,7 @@
CFLAGS += -m64
CFLAGS += -I$(CURDIR)
CFLAGS += -D SELFTEST
+CFLAGS += -maltivec
# Use our CFLAGS for the implicit .S rule
ASFLAGS = $(CFLAGS)
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
index d1dc37425510..50ae7d2091ce 100644
--- a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
+++ b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
@@ -4,39 +4,6 @@
#define r1 1
-#define vr0 0
-#define vr1 1
-#define vr2 2
-#define vr3 3
-#define vr4 4
-#define vr5 5
-#define vr6 6
-#define vr7 7
-#define vr8 8
-#define vr9 9
-#define vr10 10
-#define vr11 11
-#define vr12 12
-#define vr13 13
-#define vr14 14
-#define vr15 15
-#define vr16 16
-#define vr17 17
-#define vr18 18
-#define vr19 19
-#define vr20 20
-#define vr21 21
-#define vr22 22
-#define vr23 23
-#define vr24 24
-#define vr25 25
-#define vr26 26
-#define vr27 27
-#define vr28 28
-#define vr29 29
-#define vr30 30
-#define vr31 31
-
#define R14 r14
#define R15 r15
#define R16 r16
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index 8ebc58a09311..f7997affd143 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -11,6 +11,10 @@
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
+#include <elf.h>
+#include <fcntl.h>
+#include <link.h>
+#include <sys/stat.h>
#include "subunit.h"
#include "utils.h"
@@ -112,3 +116,46 @@ int test_harness(int (test_function)(void), char *name)
return rc;
}
+
+static char auxv[4096];
+
+void *get_auxv_entry(int type)
+{
+ ElfW(auxv_t) *p;
+ void *result;
+ ssize_t num;
+ int fd;
+
+ fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd == -1) {
+ perror("open");
+ return NULL;
+ }
+
+ result = NULL;
+
+ num = read(fd, auxv, sizeof(auxv));
+ if (num < 0) {
+ perror("read");
+ goto out;
+ }
+
+ if (num > sizeof(auxv)) {
+ printf("Overflowed auxv buffer\n");
+ goto out;
+ }
+
+ p = (ElfW(auxv_t) *)auxv;
+
+ while (p->a_type != AT_NULL) {
+ if (p->a_type == type) {
+ result = (void *)p->a_un.a_val;
+ break;
+ }
+
+ p++;
+ }
+out:
+ close(fd);
+ return result;
+}
diff --git a/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c b/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c
index 3d8e5b033e1d..49003674de4f 100644
--- a/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c
+++ b/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c
@@ -21,9 +21,13 @@ static int test_body(void)
* Typically the mmap will fail because no huge pages are
* allocated on the system. But if there are huge pages
* allocated the mmap will succeed. That's fine too, we just
- * munmap here before continuing.
+ * munmap here before continuing. munmap() length of
+ * MAP_HUGETLB memory must be hugepage aligned.
*/
- munmap(addr, SIZE);
+ if (munmap(addr, SIZE)) {
+ perror("munmap");
+ return 1;
+ }
}
p = mmap(addr, SIZE, PROT_READ | PROT_WRITE,
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 5a161175bbd4..a9099d9f8f39 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -26,7 +26,7 @@ override define EMIT_TESTS
$(MAKE) -s -C ebb emit_tests
endef
-DEFAULT_INSTALL := $(INSTALL_RULE)
+DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
override define INSTALL_RULE
$(DEFAULT_INSTALL_RULE)
$(MAKE) -C ebb install
diff --git a/tools/testing/selftests/powerpc/pmu/lib.c b/tools/testing/selftests/powerpc/pmu/lib.c
index 9768dea37bf3..a07104c2afe6 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.c
+++ b/tools/testing/selftests/powerpc/pmu/lib.c
@@ -5,15 +5,10 @@
#define _GNU_SOURCE /* For CPU_ZERO etc. */
-#include <elf.h>
#include <errno.h>
-#include <fcntl.h>
-#include <link.h>
#include <sched.h>
#include <setjmp.h>
#include <stdlib.h>
-#include <sys/stat.h>
-#include <sys/types.h>
#include <sys/wait.h>
#include "utils.h"
@@ -256,45 +251,3 @@ out:
return rc;
}
-static char auxv[4096];
-
-void *get_auxv_entry(int type)
-{
- ElfW(auxv_t) *p;
- void *result;
- ssize_t num;
- int fd;
-
- fd = open("/proc/self/auxv", O_RDONLY);
- if (fd == -1) {
- perror("open");
- return NULL;
- }
-
- result = NULL;
-
- num = read(fd, auxv, sizeof(auxv));
- if (num < 0) {
- perror("read");
- goto out;
- }
-
- if (num > sizeof(auxv)) {
- printf("Overflowed auxv buffer\n");
- goto out;
- }
-
- p = (ElfW(auxv_t) *)auxv;
-
- while (p->a_type != AT_NULL) {
- if (p->a_type == type) {
- result = (void *)p->a_un.a_val;
- break;
- }
-
- p++;
- }
-out:
- close(fd);
- return result;
-}
diff --git a/tools/testing/selftests/powerpc/pmu/lib.h b/tools/testing/selftests/powerpc/pmu/lib.h
index 0f0339c8a6f6..ca5d72ae3be6 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.h
+++ b/tools/testing/selftests/powerpc/pmu/lib.h
@@ -29,7 +29,6 @@ extern int notify_parent(union pipe write_pipe);
extern int notify_parent_of_error(union pipe write_pipe);
extern pid_t eat_cpu(int (test_function)(void));
extern bool require_paranoia_below(int level);
-extern void *get_auxv_entry(int type);
struct addr_range {
uint64_t first, last;
diff --git a/tools/testing/selftests/powerpc/switch_endian/.gitignore b/tools/testing/selftests/powerpc/switch_endian/.gitignore
new file mode 100644
index 000000000000..89e762eab676
--- /dev/null
+++ b/tools/testing/selftests/powerpc/switch_endian/.gitignore
@@ -0,0 +1,2 @@
+switch_endian_test
+check-reversed.S
diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile
new file mode 100644
index 000000000000..081473db22b7
--- /dev/null
+++ b/tools/testing/selftests/powerpc/switch_endian/Makefile
@@ -0,0 +1,24 @@
+CC := $(CROSS_COMPILE)gcc
+PROGS := switch_endian_test
+
+ASFLAGS += -O2 -Wall -g -nostdlib -m64
+
+all: $(PROGS)
+
+switch_endian_test: check-reversed.S
+
+check-reversed.o: check.o
+ $(CROSS_COMPILE)objcopy -j .text --reverse-bytes=4 -O binary $< $@
+
+check-reversed.S: check-reversed.o
+ hexdump -v -e '/1 ".byte 0x%02X\n"' $< > $@
+
+run_tests: all
+ @-for PROG in $(PROGS); do \
+ ./$$PROG; \
+ done;
+
+clean:
+ rm -f $(PROGS) *.o check-reversed.S
+
+.PHONY: all run_tests clean
diff --git a/tools/testing/selftests/powerpc/switch_endian/check.S b/tools/testing/selftests/powerpc/switch_endian/check.S
new file mode 100644
index 000000000000..e2484d2c24f4
--- /dev/null
+++ b/tools/testing/selftests/powerpc/switch_endian/check.S
@@ -0,0 +1,100 @@
+#include "common.h"
+
+/*
+ * Checks that registers contain what we expect, ie. they were not clobbered by
+ * the syscall.
+ *
+ * r15: pattern to check registers against.
+ *
+ * At the end r3 == 0 if everything's OK.
+ */
+ nop # guaranteed to be illegal in reverse-endian
+ mr r9,r15
+ cmpd r9,r3 # check r3
+ bne 1f
+ addi r9,r15,4 # check r4
+ cmpd r9,r4
+ bne 1f
+ lis r9,0x00FF # check CR
+ ori r9,r9,0xF000
+ mfcr r10
+ and r10,r10,r9
+ cmpw r9,r10
+ addi r9,r15,34
+ bne 1f
+ addi r9,r15,32 # check LR
+ mflr r10
+ cmpd r9,r10
+ bne 1f
+ addi r9,r15,5 # check r5
+ cmpd r9,r5
+ bne 1f
+ addi r9,r15,6 # check r6
+ cmpd r9,r6
+ bne 1f
+ addi r9,r15,7 # check r7
+ cmpd r9,r7
+ bne 1f
+ addi r9,r15,8 # check r8
+ cmpd r9,r8
+ bne 1f
+ addi r9,r15,13 # check r13
+ cmpd r9,r13
+ bne 1f
+ addi r9,r15,14 # check r14
+ cmpd r9,r14
+ bne 1f
+ addi r9,r15,16 # check r16
+ cmpd r9,r16
+ bne 1f
+ addi r9,r15,17 # check r17
+ cmpd r9,r17
+ bne 1f
+ addi r9,r15,18 # check r18
+ cmpd r9,r18
+ bne 1f
+ addi r9,r15,19 # check r19
+ cmpd r9,r19
+ bne 1f
+ addi r9,r15,20 # check r20
+ cmpd r9,r20
+ bne 1f
+ addi r9,r15,21 # check r21
+ cmpd r9,r21
+ bne 1f
+ addi r9,r15,22 # check r22
+ cmpd r9,r22
+ bne 1f
+ addi r9,r15,23 # check r23
+ cmpd r9,r23
+ bne 1f
+ addi r9,r15,24 # check r24
+ cmpd r9,r24
+ bne 1f
+ addi r9,r15,25 # check r25
+ cmpd r9,r25
+ bne 1f
+ addi r9,r15,26 # check r26
+ cmpd r9,r26
+ bne 1f
+ addi r9,r15,27 # check r27
+ cmpd r9,r27
+ bne 1f
+ addi r9,r15,28 # check r28
+ cmpd r9,r28
+ bne 1f
+ addi r9,r15,29 # check r29
+ cmpd r9,r29
+ bne 1f
+ addi r9,r15,30 # check r30
+ cmpd r9,r30
+ bne 1f
+ addi r9,r15,31 # check r31
+ cmpd r9,r31
+ bne 1f
+ b 2f
+1: mr r3, r9
+ li r0, __NR_exit
+ sc
+2: li r0, __NR_switch_endian
+ nop
diff --git a/tools/testing/selftests/powerpc/switch_endian/common.h b/tools/testing/selftests/powerpc/switch_endian/common.h
new file mode 100644
index 000000000000..69e399698c64
--- /dev/null
+++ b/tools/testing/selftests/powerpc/switch_endian/common.h
@@ -0,0 +1,6 @@
+#include <ppc-asm.h>
+#include <asm/unistd.h>
+
+#ifndef __NR_switch_endian
+#define __NR_switch_endian 363
+#endif
diff --git a/tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S b/tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S
new file mode 100644
index 000000000000..ef7c971abb67
--- /dev/null
+++ b/tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S
@@ -0,0 +1,81 @@
+#include "common.h"
+
+ .data
+ .balign 8
+message:
+ .ascii "success: switch_endian_test\n\0"
+
+ .section ".toc"
+ .balign 8
+pattern:
+ .llong 0x5555AAAA5555AAAA
+
+ .text
+FUNC_START(_start)
+ /* Load the pattern */
+ ld r15, pattern@TOC(%r2)
+
+ /* Setup CR, only CR2-CR4 are maintained */
+ lis r3, 0x00FF
+ ori r3, r3, 0xF000
+ mtcr r3
+
+ /* Load the pattern slightly modified into the registers */
+ mr r3, r15
+ addi r4, r15, 4
+
+ addi r5, r15, 32
+ mtlr r5
+
+ addi r5, r15, 5
+ addi r6, r15, 6
+ addi r7, r15, 7
+ addi r8, r15, 8
+
+ /* r9 - r12 are clobbered */
+
+ addi r13, r15, 13
+ addi r14, r15, 14
+
+ /* Skip r15 we're using it */
+
+ addi r16, r15, 16
+ addi r17, r15, 17
+ addi r18, r15, 18
+ addi r19, r15, 19
+ addi r20, r15, 20
+ addi r21, r15, 21
+ addi r22, r15, 22
+ addi r23, r15, 23
+ addi r24, r15, 24
+ addi r25, r15, 25
+ addi r26, r15, 26
+ addi r27, r15, 27
+ addi r28, r15, 28
+ addi r29, r15, 29
+ addi r30, r15, 30
+ addi r31, r15, 31
+
+ /*
+ * Call the syscall to switch endian.
+ * It clobbers r9-r12, XER, CTR and CR0-1,5-7.
+ */
+ li r0, __NR_switch_endian
+ sc
+
+#include "check-reversed.S"
+
+ /* Flip back, r0 already has the switch syscall number */
+ .long 0x02000044 /* sc */
+
+#include "check.S"
+
+ li r0, __NR_write
+ li r3, 1 /* stdout */
+ ld r4, message@got(%r2)
+ li r5, 28 /* strlen(message3) */
+ sc
+ li r0, __NR_exit
+ li r3, 0
+ sc
+ b .
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 33d02cc54a3e..2699635d2cd9 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -1 +1,2 @@
tm-resched-dscr
+tm-syscall
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 34f2ec634b40..6bff955e1d55 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -4,6 +4,9 @@ all: $(TEST_PROGS)
$(TEST_PROGS): ../harness.c
+tm-syscall: tm-syscall-asm.S
+tm-syscall: CFLAGS += -mhtm
+
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
new file mode 100644
index 000000000000..431f61ae2368
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
@@ -0,0 +1,27 @@
+#include <ppc-asm.h>
+#include <asm/unistd.h>
+
+ .text
+FUNC_START(getppid_tm_active)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ sc
+ tend.
+ blr
+1:
+ li r3, -1
+ blr
+
+FUNC_START(getppid_tm_suspended)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ tsuspend.
+ sc
+ tresume.
+ tend.
+ blr
+1:
+ li r3, -1
+ blr
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall.c b/tools/testing/selftests/powerpc/tm/tm-syscall.c
new file mode 100644
index 000000000000..3ed8d4b252fa
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015, Sam Bobroff, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Test the kernel's system call code to ensure that a system call
+ * made from within an active HTM transaction is aborted with the
+ * correct failure code.
+ * Conversely, ensure that a system call made from within a
+ * suspended transaction can succeed.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <asm/tm.h>
+#include <asm/cputable.h>
+#include <linux/auxvec.h>
+#include <sys/time.h>
+#include <stdlib.h>
+
+#include "utils.h"
+
+extern int getppid_tm_active(void);
+extern int getppid_tm_suspended(void);
+
+unsigned retries = 0;
+
+#define TEST_DURATION 10 /* seconds */
+#define TM_RETRIES 100
+
+long failure_code(void)
+{
+ return __builtin_get_texasru() >> 24;
+}
+
+bool failure_is_persistent(void)
+{
+ return (failure_code() & TM_CAUSE_PERSISTENT) == TM_CAUSE_PERSISTENT;
+}
+
+bool failure_is_syscall(void)
+{
+ return (failure_code() & TM_CAUSE_SYSCALL) == TM_CAUSE_SYSCALL;
+}
+
+pid_t getppid_tm(bool suspend)
+{
+ int i;
+ pid_t pid;
+
+ for (i = 0; i < TM_RETRIES; i++) {
+ if (suspend)
+ pid = getppid_tm_suspended();
+ else
+ pid = getppid_tm_active();
+
+ if (pid >= 0)
+ return pid;
+
+ if (failure_is_persistent()) {
+ if (failure_is_syscall())
+ return -1;
+
+ printf("Unexpected persistent transaction failure.\n");
+ printf("TEXASR 0x%016lx, TFIAR 0x%016lx.\n",
+ __builtin_get_texasr(), __builtin_get_tfiar());
+ exit(-1);
+ }
+
+ retries++;
+ }
+
+ printf("Exceeded limit of %d temporary transaction failures.\n", TM_RETRIES);
+ printf("TEXASR 0x%016lx, TFIAR 0x%016lx.\n",
+ __builtin_get_texasr(), __builtin_get_tfiar());
+
+ exit(-1);
+}
+
+int tm_syscall(void)
+{
+ unsigned count = 0;
+ struct timeval end, now;
+
+ SKIP_IF(!((long)get_auxv_entry(AT_HWCAP2) & PPC_FEATURE2_HTM));
+ setbuf(stdout, NULL);
+
+ printf("Testing transactional syscalls for %d seconds...\n", TEST_DURATION);
+
+ gettimeofday(&end, NULL);
+ now.tv_sec = TEST_DURATION;
+ now.tv_usec = 0;
+ timeradd(&end, &now, &end);
+
+ for (count = 0; timercmp(&now, &end, <); count++) {
+ /*
+ * Test a syscall within a suspended transaction and verify
+ * that it succeeds.
+ */
+ FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
+
+ /*
+ * Test a syscall within an active transaction and verify that
+ * it fails with the correct failure code.
+ */
+ FAIL_IF(getppid_tm(false) != -1); /* Should fail... */
+ FAIL_IF(!failure_is_persistent()); /* ...persistently... */
+ FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
+ gettimeofday(&now, 0);
+ }
+
+ printf("%d active and suspended transactions behaved correctly.\n", count);
+ printf("(There were %d transaction retries.)\n", retries);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(tm_syscall, "tm_syscall");
+}
diff --git a/tools/testing/selftests/powerpc/utils.h b/tools/testing/selftests/powerpc/utils.h
index a93777ae0684..b7d41086bb0a 100644
--- a/tools/testing/selftests/powerpc/utils.h
+++ b/tools/testing/selftests/powerpc/utils.h
@@ -15,11 +15,12 @@ typedef signed long long s64;
/* Just for familiarity */
typedef uint32_t u32;
+typedef uint16_t u16;
typedef uint8_t u8;
int test_harness(int (test_function)(void), char *name);
-
+extern void *get_auxv_entry(int type);
/* Yes, this is evil */
#define FAIL_IF(x) \
diff --git a/tools/testing/selftests/powerpc/vphn/.gitignore b/tools/testing/selftests/powerpc/vphn/.gitignore
new file mode 100644
index 000000000000..7c04395010cb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vphn/.gitignore
@@ -0,0 +1 @@
+test-vphn
diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile
new file mode 100644
index 000000000000..e539f775fd8f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vphn/Makefile
@@ -0,0 +1,15 @@
+PROG := test-vphn
+
+CFLAGS += -m64
+
+all: $(PROG)
+
+$(PROG): ../harness.c
+
+run_tests: all
+ ./$(PROG)
+
+clean:
+ rm -f $(PROG)
+
+.PHONY: all run_tests clean
diff --git a/tools/testing/selftests/powerpc/vphn/test-vphn.c b/tools/testing/selftests/powerpc/vphn/test-vphn.c
new file mode 100644
index 000000000000..5742f6876b25
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vphn/test-vphn.c
@@ -0,0 +1,410 @@
+#include <stdio.h>
+#include <byteswap.h>
+#include "utils.h"
+#include "subunit.h"
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define cpu_to_be32(x) bswap_32(x)
+#define be32_to_cpu(x) bswap_32(x)
+#define be16_to_cpup(x) bswap_16(*x)
+#define cpu_to_be64(x) bswap_64(x)
+#else
+#define cpu_to_be32(x) (x)
+#define be32_to_cpu(x) (x)
+#define be16_to_cpup(x) (*x)
+#define cpu_to_be64(x) (x)
+#endif
+
+#include "vphn.c"
+
+static struct test {
+ char *descr;
+ long input[VPHN_REGISTER_COUNT];
+ u32 expected[VPHN_ASSOC_BUFSIZE];
+} all_tests[] = {
+ {
+ "vphn: no data",
+ {
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ },
+ {
+ 0x00000000
+ }
+ },
+ {
+ "vphn: 1 x 16-bit value",
+ {
+ 0x8001ffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ },
+ {
+ 0x00000001,
+ 0x00000001
+ }
+ },
+ {
+ "vphn: 2 x 16-bit values",
+ {
+ 0x80018002ffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ },
+ {
+ 0x00000002,
+ 0x00000001,
+ 0x00000002
+ }
+ },
+ {
+ "vphn: 3 x 16-bit values",
+ {
+ 0x800180028003ffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ },
+ {
+ 0x00000003,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003
+ }
+ },
+ {
+ "vphn: 4 x 16-bit values",
+ {
+ 0x8001800280038004,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ },
+ {
+ 0x00000004,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003,
+ 0x00000004
+ }
+ },
+ {
+ /* Parsing the next 16-bit value out of the next 64-bit input
+ * value.
+ */
+ "vphn: 5 x 16-bit values",
+ {
+ 0x8001800280038004,
+ 0x8005ffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ },
+ {
+ 0x00000005,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003,
+ 0x00000004,
+ 0x00000005
+ }
+ },
+ {
+ /* Parse at most 6 x 64-bit input values */
+ "vphn: 24 x 16-bit values",
+ {
+ 0x8001800280038004,
+ 0x8005800680078008,
+ 0x8009800a800b800c,
+ 0x800d800e800f8010,
+ 0x8011801280138014,
+ 0x8015801680178018
+ },
+ {
+ 0x00000018,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003,
+ 0x00000004,
+ 0x00000005,
+ 0x00000006,
+ 0x00000007,
+ 0x00000008,
+ 0x00000009,
+ 0x0000000a,
+ 0x0000000b,
+ 0x0000000c,
+ 0x0000000d,
+ 0x0000000e,
+ 0x0000000f,
+ 0x00000010,
+ 0x00000011,
+ 0x00000012,
+ 0x00000013,
+ 0x00000014,
+ 0x00000015,
+ 0x00000016,
+ 0x00000017,
+ 0x00000018
+ }
+ },
+ {
+ "vphn: 1 x 32-bit value",
+ {
+ 0x00000001ffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff
+ },
+ {
+ 0x00000001,
+ 0x00000001
+ }
+ },
+ {
+ "vphn: 2 x 32-bit values",
+ {
+ 0x0000000100000002,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff
+ },
+ {
+ 0x00000002,
+ 0x00000001,
+ 0x00000002
+ }
+ },
+ {
+ /* Parsing the next 32-bit value out of the next 64-bit input
+ * value.
+ */
+ "vphn: 3 x 32-bit values",
+ {
+ 0x0000000100000002,
+ 0x00000003ffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff
+ },
+ {
+ 0x00000003,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003
+ }
+ },
+ {
+ /* Parse at most 6 x 64-bit input values */
+ "vphn: 12 x 32-bit values",
+ {
+ 0x0000000100000002,
+ 0x0000000300000004,
+ 0x0000000500000006,
+ 0x0000000700000008,
+ 0x000000090000000a,
+ 0x0000000b0000000c
+ },
+ {
+ 0x0000000c,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003,
+ 0x00000004,
+ 0x00000005,
+ 0x00000006,
+ 0x00000007,
+ 0x00000008,
+ 0x00000009,
+ 0x0000000a,
+ 0x0000000b,
+ 0x0000000c
+ }
+ },
+ {
+ "vphn: 16-bit value followed by 32-bit value",
+ {
+ 0x800100000002ffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff
+ },
+ {
+ 0x00000002,
+ 0x00000001,
+ 0x00000002
+ }
+ },
+ {
+ "vphn: 32-bit value followed by 16-bit value",
+ {
+ 0x000000018002ffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff
+ },
+ {
+ 0x00000002,
+ 0x00000001,
+ 0x00000002
+ }
+ },
+ {
+ /* Parse a 32-bit value split accross two consecutives 64-bit
+ * input values.
+ */
+ "vphn: 16-bit value followed by 2 x 32-bit values",
+ {
+ 0x8001000000020000,
+ 0x0003ffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff
+ },
+ {
+ 0x00000003,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003,
+ 0x00000004,
+ 0x00000005
+ }
+ },
+ {
+ /* The lower bits in 0x0001ffff don't get mixed up with the
+ * 0xffff terminator.
+ */
+ "vphn: 32-bit value has all ones in 16 lower bits",
+ {
+ 0x0001ffff80028003,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff
+ },
+ {
+ 0x00000003,
+ 0x0001ffff,
+ 0x00000002,
+ 0x00000003
+ }
+ },
+ {
+ /* The following input doesn't follow the specification.
+ */
+ "vphn: last 32-bit value is truncated",
+ {
+ 0x0000000100000002,
+ 0x0000000300000004,
+ 0x0000000500000006,
+ 0x0000000700000008,
+ 0x000000090000000a,
+ 0x0000000b800c2bad
+ },
+ {
+ 0x0000000c,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003,
+ 0x00000004,
+ 0x00000005,
+ 0x00000006,
+ 0x00000007,
+ 0x00000008,
+ 0x00000009,
+ 0x0000000a,
+ 0x0000000b,
+ 0x0000000c
+ }
+ },
+ {
+ "vphn: garbage after terminator",
+ {
+ 0xffff2bad2bad2bad,
+ 0x2bad2bad2bad2bad,
+ 0x2bad2bad2bad2bad,
+ 0x2bad2bad2bad2bad,
+ 0x2bad2bad2bad2bad,
+ 0x2bad2bad2bad2bad
+ },
+ {
+ 0x00000000
+ }
+ },
+ {
+ NULL
+ }
+};
+
+static int test_one(struct test *test)
+{
+ __be32 output[VPHN_ASSOC_BUFSIZE] = { 0 };
+ int i, len;
+
+ vphn_unpack_associativity(test->input, output);
+
+ len = be32_to_cpu(output[0]);
+ if (len != test->expected[0]) {
+ printf("expected %d elements, got %d\n", test->expected[0],
+ len);
+ return 1;
+ }
+
+ for (i = 1; i < len; i++) {
+ u32 val = be32_to_cpu(output[i]);
+ if (val != test->expected[i]) {
+ printf("element #%d is 0x%x, should be 0x%x\n", i, val,
+ test->expected[i]);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int test_vphn(void)
+{
+ static struct test *test;
+
+ for (test = all_tests; test->descr; test++) {
+ int ret;
+
+ ret = test_one(test);
+ test_finish(test->descr, ret);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ return test_harness(test_vphn, "test-vphn");
+}
diff --git a/tools/testing/selftests/powerpc/vphn/vphn.c b/tools/testing/selftests/powerpc/vphn/vphn.c
new file mode 120000
index 000000000000..186b906e66d5
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vphn/vphn.c
@@ -0,0 +1 @@
+../../../../../arch/powerpc/mm/vphn.c \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/vphn/vphn.h b/tools/testing/selftests/powerpc/vphn/vphn.h
new file mode 120000
index 000000000000..7131efe38c65
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vphn/vphn.h
@@ -0,0 +1 @@
+../../../../../arch/powerpc/mm/vphn.h \ No newline at end of file
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 368d64ac779e..dd2812ceb0ba 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -310,7 +310,7 @@ function dump(first, pastlast)
cfr[jn] = cf[j] "." cfrep[cf[j]];
}
if (cpusr[jn] > ncpus && ncpus != 0)
- ovf = "(!)";
+ ovf = "-ovf";
else
ovf = "";
print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`";
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFcommon b/tools/testing/selftests/rcutorture/configs/rcu/CFcommon
index d2d2a86139db..49701218dc62 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/CFcommon
+++ b/tools/testing/selftests/rcutorture/configs/rcu/CFcommon
@@ -1,2 +1,3 @@
CONFIG_RCU_TORTURE_TEST=y
CONFIG_PRINTK_TIME=y
+CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
diff --git a/tools/testing/selftests/vm/hugetlbfstest.c b/tools/testing/selftests/vm/hugetlbfstest.c
index ea40ff8c2391..02e1072ec187 100644
--- a/tools/testing/selftests/vm/hugetlbfstest.c
+++ b/tools/testing/selftests/vm/hugetlbfstest.c
@@ -34,6 +34,7 @@ static void do_mmap(int fd, int extra_flags, int unmap)
int *p;
int flags = MAP_PRIVATE | MAP_POPULATE | extra_flags;
u64 before, after;
+ int ret;
before = read_rss();
p = mmap(NULL, length, PROT_READ | PROT_WRITE, flags, fd, 0);
@@ -44,7 +45,8 @@ static void do_mmap(int fd, int extra_flags, int unmap)
!"rss didn't grow as expected");
if (!unmap)
return;
- munmap(p, length);
+ ret = munmap(p, length);
+ assert(!ret || !"munmap returned an unexpected error");
after = read_rss();
assert(llabs(after - before) < 0x40000 ||
!"rss didn't shrink as expected");
diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
index ac56639dd4a9..addcd6fc1ecc 100644
--- a/tools/testing/selftests/vm/map_hugetlb.c
+++ b/tools/testing/selftests/vm/map_hugetlb.c
@@ -73,7 +73,11 @@ int main(void)
write_bytes(addr);
ret = read_bytes(addr);
- munmap(addr, LENGTH);
+ /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
+ if (munmap(addr, LENGTH)) {
+ perror("munmap");
+ exit(1);
+ }
return ret;
}
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index f0a7918178dd..5bdb781163d1 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -1,48 +1,57 @@
-.PHONY: all all_32 all_64 check_build32 clean run_tests
+all:
-TARGETS_C_BOTHBITS := sigreturn
+include ../lib.mk
+
+.PHONY: all all_32 all_64 warn_32bit_failure clean
+
+TARGETS_C_BOTHBITS := sigreturn single_step_syscall
BINARIES_32 := $(TARGETS_C_BOTHBITS:%=%_32)
BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64)
CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
-UNAME_P := $(shell uname -p)
+UNAME_M := $(shell uname -m)
+CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
+CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
-# Always build 32-bit tests
+ifeq ($(CAN_BUILD_I386),1)
all: all_32
+TEST_PROGS += $(BINARIES_32)
+endif
-# If we're on a 64-bit host, build 64-bit tests as well
-ifeq ($(shell uname -p),x86_64)
+ifeq ($(CAN_BUILD_X86_64),1)
all: all_64
+TEST_PROGS += $(BINARIES_64)
endif
-all_32: check_build32 $(BINARIES_32)
+all_32: $(BINARIES_32)
all_64: $(BINARIES_64)
clean:
$(RM) $(BINARIES_32) $(BINARIES_64)
-run_tests:
- ./run_x86_tests.sh
-
$(TARGETS_C_BOTHBITS:%=%_32): %_32: %.c
$(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
$(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c
$(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
-check_build32:
- @if ! $(CC) -m32 -o /dev/null trivial_32bit_program.c; then \
- echo "Warning: you seem to have a broken 32-bit build" 2>&1; \
- echo "environment. If you are using a Debian-like"; \
- echo " distribution, try:"; \
- echo ""; \
- echo " apt-get install gcc-multilib libc6-i386 libc6-dev-i386"; \
- echo ""; \
- echo "If you are using a Fedora-like distribution, try:"; \
- echo ""; \
- echo " yum install glibc-devel.*i686"; \
- exit 1; \
- fi
+# x86_64 users should be encouraged to install 32-bit libraries
+ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),01)
+all: warn_32bit_failure
+
+warn_32bit_failure:
+ @echo "Warning: you seem to have a broken 32-bit build" 2>&1; \
+ echo "environment. This will reduce test coverage of 64-bit" 2>&1; \
+ echo "kernels. If you are using a Debian-like distribution," 2>&1; \
+ echo "try:"; 2>&1; \
+ echo ""; \
+ echo " apt-get install gcc-multilib libc6-i386 libc6-dev-i386"; \
+ echo ""; \
+ echo "If you are using a Fedora-like distribution, try:"; \
+ echo ""; \
+ echo " yum install glibc-devel.*i686"; \
+ exit 0;
+endif
diff --git a/tools/testing/selftests/x86/check_cc.sh b/tools/testing/selftests/x86/check_cc.sh
new file mode 100755
index 000000000000..172d3293fb7b
--- /dev/null
+++ b/tools/testing/selftests/x86/check_cc.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# check_cc.sh - Helper to test userspace compilation support
+# Copyright (c) 2015 Andrew Lutomirski
+# GPL v2
+
+CC="$1"
+TESTPROG="$2"
+shift 2
+
+if "$CC" -o /dev/null "$TESTPROG" -O0 "$@" 2>/dev/null; then
+ echo 1
+else
+ echo 0
+fi
+
+exit 0
diff --git a/tools/testing/selftests/x86/run_x86_tests.sh b/tools/testing/selftests/x86/run_x86_tests.sh
deleted file mode 100644
index 3d3ec65f3e7c..000000000000
--- a/tools/testing/selftests/x86/run_x86_tests.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# This is deliberately minimal. IMO kselftests should provide a standard
-# script here.
-./sigreturn_32 || exit 1
-
-if [[ "$uname -p" -eq "x86_64" ]]; then
- ./sigreturn_64 || exit 1
-fi
-
-exit 0
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
new file mode 100644
index 000000000000..50c26358e8b7
--- /dev/null
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -0,0 +1,181 @@
+/*
+ * single_step_syscall.c - single-steps various x86 syscalls
+ * Copyright (c) 2014-2015 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This is a very simple series of tests that makes system calls with
+ * the TF flag set. This exercises some nasty kernel code in the
+ * SYSENTER case: SYSENTER does not clear TF, so SYSENTER with TF set
+ * immediately issues #DB from CPL 0. This requires special handling in
+ * the kernel.
+ */
+
+#define _GNU_SOURCE
+
+#include <sys/time.h>
+#include <time.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+#include <asm/ldt.h>
+#include <err.h>
+#include <setjmp.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <sys/ptrace.h>
+#include <sys/user.h>
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+ int flags)
+{
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handler;
+ sa.sa_flags = SA_SIGINFO | flags;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+}
+
+static volatile sig_atomic_t sig_traps;
+
+#ifdef __x86_64__
+# define REG_IP REG_RIP
+# define WIDTH "q"
+#else
+# define REG_IP REG_EIP
+# define WIDTH "l"
+#endif
+
+static unsigned long get_eflags(void)
+{
+ unsigned long eflags;
+ asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
+ return eflags;
+}
+
+static void set_eflags(unsigned long eflags)
+{
+ asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
+ : : "rm" (eflags) : "flags");
+}
+
+#define X86_EFLAGS_TF (1UL << 8)
+
+static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
+{
+ ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+ if (get_eflags() & X86_EFLAGS_TF) {
+ set_eflags(get_eflags() & ~X86_EFLAGS_TF);
+ printf("[WARN]\tSIGTRAP handler had TF set\n");
+ _exit(1);
+ }
+
+ sig_traps++;
+
+ if (sig_traps == 10000 || sig_traps == 10001) {
+ printf("[WARN]\tHit %d SIGTRAPs with si_addr 0x%lx, ip 0x%lx\n",
+ (int)sig_traps,
+ (unsigned long)info->si_addr,
+ (unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
+ }
+}
+
+static void check_result(void)
+{
+ unsigned long new_eflags = get_eflags();
+ set_eflags(new_eflags & ~X86_EFLAGS_TF);
+
+ if (!sig_traps) {
+ printf("[FAIL]\tNo SIGTRAP\n");
+ exit(1);
+ }
+
+ if (!(new_eflags & X86_EFLAGS_TF)) {
+ printf("[FAIL]\tTF was cleared\n");
+ exit(1);
+ }
+
+ printf("[OK]\tSurvived with TF set and %d traps\n", (int)sig_traps);
+ sig_traps = 0;
+}
+
+int main()
+{
+ int tmp;
+
+ sethandler(SIGTRAP, sigtrap, 0);
+
+ printf("[RUN]\tSet TF and check nop\n");
+ set_eflags(get_eflags() | X86_EFLAGS_TF);
+ asm volatile ("nop");
+ check_result();
+
+#ifdef __x86_64__
+ printf("[RUN]\tSet TF and check syscall-less opportunistic sysret\n");
+ set_eflags(get_eflags() | X86_EFLAGS_TF);
+ extern unsigned char post_nop[];
+ asm volatile ("pushf" WIDTH "\n\t"
+ "pop" WIDTH " %%r11\n\t"
+ "nop\n\t"
+ "post_nop:"
+ : : "c" (post_nop) : "r11");
+ check_result();
+#endif
+
+ printf("[RUN]\tSet TF and check int80\n");
+ set_eflags(get_eflags() | X86_EFLAGS_TF);
+ asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid));
+ check_result();
+
+ /*
+ * This test is particularly interesting if fast syscalls use
+ * SYSENTER: it triggers a nasty design flaw in SYSENTER.
+ * Specifically, SYSENTER does not clear TF, so either SYSENTER
+ * or the next instruction traps at CPL0. (Of course, Intel
+ * mostly forgot to document exactly what happens here.) So we
+ * get a CPL0 fault with usergs (on 64-bit kernels) and possibly
+ * no stack. The only sane way the kernel can possibly handle
+ * it is to clear TF on return from the #DB handler, but this
+ * happens way too early to set TF in the saved pt_regs, so the
+ * kernel has to do something clever to avoid losing track of
+ * the TF bit.
+ *
+ * Needless to say, we've had bugs in this area.
+ */
+ syscall(SYS_getpid); /* Force symbol binding without TF set. */
+ printf("[RUN]\tSet TF and check a fast syscall\n");
+ set_eflags(get_eflags() | X86_EFLAGS_TF);
+ syscall(SYS_getpid);
+ check_result();
+
+ /* Now make sure that another fast syscall doesn't set TF again. */
+ printf("[RUN]\tFast syscall with TF cleared\n");
+ fflush(stdout); /* Force a syscall */
+ if (get_eflags() & X86_EFLAGS_TF) {
+ printf("[FAIL]\tTF is now set\n");
+ exit(1);
+ }
+ if (sig_traps) {
+ printf("[FAIL]\tGot SIGTRAP\n");
+ exit(1);
+ }
+ printf("[OK]\tNothing unexpected happened\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/x86/trivial_32bit_program.c b/tools/testing/selftests/x86/trivial_32bit_program.c
index 2e231beb0a39..fabdf0f51621 100644
--- a/tools/testing/selftests/x86/trivial_32bit_program.c
+++ b/tools/testing/selftests/x86/trivial_32bit_program.c
@@ -4,6 +4,10 @@
* GPL v2
*/
+#ifndef __i386__
+# error wrong architecture
+#endif
+
#include <stdio.h>
int main()
diff --git a/tools/testing/selftests/x86/trivial_64bit_program.c b/tools/testing/selftests/x86/trivial_64bit_program.c
new file mode 100644
index 000000000000..b994946c40fb
--- /dev/null
+++ b/tools/testing/selftests/x86/trivial_64bit_program.c
@@ -0,0 +1,18 @@
+/*
+ * Trivial program to check that we have a valid 32-bit build environment.
+ * Copyright (c) 2015 Andy Lutomirski
+ * GPL v2
+ */
+
+#ifndef __x86_64__
+# error wrong architecture
+#endif
+
+#include <stdio.h>
+
+int main()
+{
+ printf("\n");
+
+ return 0;
+}
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 0788621c8d76..2e83dd3655a2 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -12,10 +12,6 @@ TARGET=tmon
INSTALL_PROGRAM=install -m 755 -p
DEL_FILE=rm -f
-INSTALL_CONFIGFILE=install -m 644 -p
-CONFIG_FILE=
-CONFIG_PATH=
-
# Static builds might require -ltinfo, for instance
ifneq ($(findstring -static, $(LDFLAGS)),)
STATIC := --static
@@ -38,13 +34,9 @@ valgrind: tmon
install:
- mkdir -p $(INSTALL_ROOT)/$(BINDIR)
- $(INSTALL_PROGRAM) "$(TARGET)" "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
- - mkdir -p $(INSTALL_ROOT)/$(CONFIG_PATH)
- - $(INSTALL_CONFIGFILE) "$(CONFIG_FILE)" "$(INSTALL_ROOT)/$(CONFIG_PATH)"
uninstall:
$(DEL_FILE) "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
- $(CONFIG_FILE) "$(CONFIG_PATH)"
-
clean:
find . -name "*.o" | xargs $(DEL_FILE)
diff --git a/tools/vm/Makefile b/tools/vm/Makefile
index ac884b65a072..93aadaf7ff63 100644
--- a/tools/vm/Makefile
+++ b/tools/vm/Makefile
@@ -3,7 +3,7 @@
TARGETS=page-types slabinfo page_owner_sort
LIB_DIR = ../lib/api
-LIBS = $(LIB_DIR)/libapikfs.a
+LIBS = $(LIB_DIR)/libapi.a
CC = $(CROSS_COMPILE)gcc
CFLAGS = -Wall -Wextra -I../lib/
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 8d550ff14700..78fb8201014f 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1561,6 +1561,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
goto out;
}
+ if (irq_num >= kvm->arch.vgic.nr_irqs)
+ return -EINVAL;
+
vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
if (vcpu_id >= 0) {
/* kick the specified vcpu */
@@ -2141,7 +2144,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries,
int gsi)
{
- return gsi;
+ return 0;
}
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d3fc9399062a..90977418aeb6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -89,6 +89,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
static __read_mostly struct preempt_ops kvm_preempt_ops;
struct dentry *kvm_debugfs_dir;
+EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);